From 40afbb94dbab86215e4085163643848f8ea4b657 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Wed, 6 Nov 2024 19:46:07 +0530 Subject: [PATCH 1/3] PHOENIX-7441 Integrate the Spotless plugin and update the code template - Update dev/PhoenixCodeTemplate.xml to use latest format as in hbase. - Copied license-header file from hbase, the one which phoenix was using was a little different - Fix file having misplaced package block as we do not want any manual code change in the commit where we run spotless --- dev/PhoenixCodeTemplate.xml | 724 ++++++++++-------- .../coprocessor/DelegateRegionScanner.java | 3 +- pom.xml | 114 +++ src/main/config/checkstyle/header.txt | 17 +- 4 files changed, 540 insertions(+), 318 deletions(-) diff --git a/dev/PhoenixCodeTemplate.xml b/dev/PhoenixCodeTemplate.xml index 318b30d9bff..993b616957d 100644 --- a/dev/PhoenixCodeTemplate.xml +++ b/dev/PhoenixCodeTemplate.xml @@ -1,312 +1,418 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java index 3d742431914..3d562d57619 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java @@ -14,7 +14,8 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - */package org.apache.phoenix.coprocessor; + */ +package org.apache.phoenix.coprocessor; import java.io.IOException; import java.util.List; diff --git a/pom.xml b/pom.xml index 29786ee7bba..53738776290 100644 --- a/pom.xml +++ b/pom.xml @@ -168,6 +168,7 @@ 3.3.0 3.6.0 2.5.2.Final + 2.43.0 false @@ -700,6 +701,119 @@ true true + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.version} + + + + + **/generated/* + **/package-info.java + + + + Remove unhelpful javadoc stubs + (?m)^ *\* *@(?:param|throws|return) *\w* *\n + + + + + Purge single returns tag multi line + (?m)^ */\*\*\n *\* *@return *(.*) *\n *\*/$ + /** Returns $1 */ + + + Purge single returns tag single line + ^ */\*\* *@return *(.*) *\*/$ + /** Returns $1 */ + + + + ${session.executionRootDirectory}/dev/PhoenixCodeTemplate.xml + + + ${session.executionRootDirectory}/dev/phoenix.importorder + + + + + + + + false + + + + + + + + **/*.xml + **/*.sh + **/*.py + **/Jenkinsfile* + **/Dockerfile* + **/*.md + *.md + **/*.txt + *.txt + + + **/target/** + **/dependency-reduced-pom.xml + + + + + + + + + src/main/java/**/*.java + src/test/java/**/*.java + + + **/generated/* + **/package-info.java + + + ${session.executionRootDirectory}/src/main/config/checkstyle/header.txt + package + + + + + + false + + + diff --git a/src/main/config/checkstyle/header.txt b/src/main/config/checkstyle/header.txt index 2a4297155ea..d5519133edc 100644 --- a/src/main/config/checkstyle/header.txt +++ b/src/main/config/checkstyle/header.txt @@ -1,16 +1,17 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - */ + */ \ No newline at end of file From 078071b89739f05f0b1071a26bcd54740fed4536 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Wed, 6 Nov 2024 23:12:03 +0530 Subject: [PATCH 2/3] Downgrade to 2.30.0 as that is the last known version which works with java 8 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 53738776290..36baee073fb 100644 --- a/pom.xml +++ b/pom.xml @@ -168,7 +168,7 @@ 3.3.0 3.6.0 2.5.2.Final - 2.43.0 + 2.30.0 false From 3ae0e76a208c8b74e6c5ed72b3bce56ad0c87a18 Mon Sep 17 00:00:00 2001 From: Nihal Jain Date: Wed, 6 Nov 2024 23:17:14 +0530 Subject: [PATCH 3/3] PHOENIX-7442 Apply Spotless to reformat the entire codebase --- BUILDING.md | 12 +- Jenkinsfile | 2 +- Jenkinsfile.yetus | 2 +- README.md | 2 +- bin/end2endTest.py | 2 +- bin/pherf-standalone.py | 2 +- bin/phoenix_utils.py | 4 +- bin/psql.py | 2 +- bin/readme.txt | 9 +- bin/traceserver.py | 2 +- dev/PhoenixCodeTemplate.xml | 2 +- dev/cache-apache-project-artifact.sh | 2 +- dev/create-release/release-util.sh | 2 +- dev/jenkinsEnv.sh | 1 - dev/misc_utils/README.md | 6 +- dev/rebuild_hbase.sh | 1 - dev/smart-apply-patch.sh | 2 +- dev/test-patch.sh | 20 +- phoenix-assembly/pom.xml | 162 +- .../components/all-common-dependencies.xml | 2 +- .../src/build/package-to-tar-all.xml | 2 +- .../phoenix-client-embedded/pom.xml | 86 +- .../phoenix-client-lite/pom.xml | 86 +- phoenix-client-parent/pom.xml | 29 +- phoenix-core-client/pom.xml | 369 +- .../src/build/phoenix-core.xml | 4 +- .../apache/hadoop/hbase/PhoenixTagType.java | 14 +- .../hadoop/hbase/client/RegionInfoUtil.java | 12 +- .../ClientRpcControllerFactory.java | 60 +- .../ipc/controller/IndexRpcController.java | 40 +- ...RegionServerIndexRpcControllerFactory.java | 63 +- .../InvalidateMetadataCacheController.java | 38 +- ...alidateMetadataCacheControllerFactory.java | 26 +- .../ipc/controller/MetadataRpcController.java | 66 +- .../ServerSideRPCControllerFactory.java | 22 +- .../ServerToServerRpcController.java | 30 +- .../ServerToServerRpcControllerImpl.java | 77 +- .../org/apache/phoenix/cache/HashCache.java | 15 +- .../phoenix/cache/IndexMetaDataCache.java | 56 +- .../phoenix/cache/JodaTimezoneCache.java | 107 +- .../phoenix/cache/ServerCacheClient.java | 888 +- .../phoenix/cache/ServerMetadataCache.java | 22 +- .../cache/ServerMetadataCacheImpl.java | 199 +- .../org/apache/phoenix/cache/TenantCache.java | 22 +- .../apache/phoenix/cache/TenantCacheImpl.java | 412 +- .../org/apache/phoenix/call/CallRunner.java | 60 +- .../org/apache/phoenix/call/CallWrapper.java | 8 +- .../phoenix/compile/AggregationManager.java | 141 +- .../phoenix/compile/BaseMutationPlan.java | 104 +- .../apache/phoenix/compile/BindManager.java | 72 +- .../compile/CloseStatementCompiler.java | 48 +- .../ColumnNameTrackingExpressionCompiler.java | 27 +- .../phoenix/compile/ColumnProjector.java | 96 +- .../phoenix/compile/ColumnResolver.java | 93 +- .../phoenix/compile/CompiledOffset.java | 30 +- .../compile/CreateFunctionCompiler.java | 93 +- .../phoenix/compile/CreateIndexCompiler.java | 393 +- .../phoenix/compile/CreateSchemaCompiler.java | 84 +- .../compile/CreateSequenceCompiler.java | 399 +- .../phoenix/compile/CreateTableCompiler.java | 1320 +- .../compile/DeclareCursorCompiler.java | 83 +- .../phoenix/compile/DelegateMutationPlan.java | 122 +- .../phoenix/compile/DeleteCompiler.java | 1805 +- .../phoenix/compile/DropSequenceCompiler.java | 58 +- .../apache/phoenix/compile/ExplainPlan.java | 66 +- .../compile/ExplainPlanAttributes.java | 1031 +- .../phoenix/compile/ExpressionCompiler.java | 2106 ++- .../phoenix/compile/ExpressionManager.java | 71 +- .../phoenix/compile/ExpressionProjector.java | 156 +- .../apache/phoenix/compile/FromCompiler.java | 2023 +- .../phoenix/compile/GroupByCompiler.java | 773 +- .../phoenix/compile/HavingCompiler.java | 253 +- .../compile/IndexExpressionCompiler.java | 74 +- .../compile/IndexStatementRewriter.java | 240 +- .../apache/phoenix/compile/JoinCompiler.java | 2653 ++- .../org/apache/phoenix/compile/KeyPart.java | 93 +- .../apache/phoenix/compile/LimitCompiler.java | 160 +- .../phoenix/compile/ListJarsQueryPlan.java | 445 +- .../MutatingParallelIteratorFactory.java | 191 +- .../apache/phoenix/compile/MutationPlan.java | 13 +- .../phoenix/compile/OffsetCompiler.java | 165 +- .../compile/OpenStatementCompiler.java | 48 +- .../phoenix/compile/OrderByCompiler.java | 359 +- .../compile/OrderPreservingTracker.java | 1076 +- .../phoenix/compile/PostDDLCompiler.java | 571 +- .../phoenix/compile/PostIndexDDLCompiler.java | 214 +- .../compile/PostLocalIndexDDLCompiler.java | 146 +- .../phoenix/compile/ProjectionCompiler.java | 1521 +- .../apache/phoenix/compile/QueryCompiler.java | 1599 +- .../org/apache/phoenix/compile/QueryPlan.java | 140 +- .../phoenix/compile/RVCOffsetCompiler.java | 597 +- .../apache/phoenix/compile/RowProjector.java | 329 +- .../apache/phoenix/compile/ScanRanges.java | 1515 +- .../phoenix/compile/SequenceManager.java | 369 +- .../compile/SequenceValueExpression.java | 143 +- .../compile/ServerBuildIndexCompiler.java | 232 +- .../ServerBuildTransformingTableCompiler.java | 105 +- .../compile/StatelessExpressionCompiler.java | 46 +- .../phoenix/compile/StatementContext.java | 748 +- .../phoenix/compile/StatementNormalizer.java | 250 +- .../apache/phoenix/compile/StatementPlan.java | 63 +- .../phoenix/compile/SubqueryRewriter.java | 1522 +- .../phoenix/compile/SubselectRewriter.java | 984 +- .../phoenix/compile/TraceQueryPlan.java | 454 +- .../compile/TupleProjectionCompiler.java | 405 +- .../apache/phoenix/compile/UnionCompiler.java | 449 +- .../phoenix/compile/UpsertCompiler.java | 2663 +-- .../apache/phoenix/compile/WhereCompiler.java | 1570 +- .../phoenix/compile/WhereOptimizer.java | 4456 ++--- .../BaseScannerRegionObserverConstants.java | 323 +- .../HashJoinCacheNotFoundException.java | 38 +- .../InvalidateServerMetadataCacheRequest.java | 49 +- .../MetaDataEndpointImplConstants.java | 16 +- .../coprocessorclient/MetaDataProtocol.java | 874 +- .../coprocessorclient/RowKeyMatcher.java | 206 +- .../ScanRegionObserverConstants.java | 14 +- .../SequenceRegionObserverConstants.java | 10 +- .../ServerCachingProtocol.java | 63 +- .../phoenix/coprocessorclient/TableInfo.java | 96 +- .../coprocessorclient/TableTTLInfo.java | 150 +- .../coprocessorclient/TableTTLInfoCache.java | 128 +- ...ngroupedAggregateRegionObserverHelper.java | 50 +- .../WhereConstantParser.java | 111 +- .../metrics/MetricsMetadataCachingSource.java | 381 +- .../MetricsMetadataCachingSourceImpl.java | 161 +- ...etricsPhoenixCoprocessorSourceFactory.java | 50 +- .../metrics/MetricsPhoenixTTLSource.java | 66 +- .../metrics/MetricsPhoenixTTLSourceImpl.java | 56 +- .../tasks/IndexRebuildTaskConstants.java | 10 +- .../DataExceedsCapacityException.java | 79 +- .../exception/FailoverSQLException.java | 19 +- .../InvalidRegionSplitPolicyException.java | 35 +- .../phoenix/exception/PhoenixIOException.java | 13 +- .../PhoenixNonRetryableRuntimeException.java | 25 +- .../exception/PhoenixParserException.java | 148 +- .../ResultSetOutOfScanRangeException.java | 23 +- .../exception/RetriableUpgradeException.java | 13 +- .../phoenix/exception/SQLExceptionCode.java | 1388 +- .../phoenix/exception/SQLExceptionInfo.java | 510 +- .../StaleMetadataCacheException.java | 14 +- .../exception/UndecodableByteException.java | 15 +- .../exception/UnknownFunctionException.java | 20 +- .../exception/UpgradeBlockedException.java | 12 +- .../exception/UpgradeInProgressException.java | 19 +- .../UpgradeNotRequiredException.java | 12 +- .../exception/UpgradeRequiredException.java | 29 +- .../apache/phoenix/execute/AggregatePlan.java | 561 +- .../apache/phoenix/execute/BaseQueryPlan.java | 984 +- .../phoenix/execute/ClientAggregatePlan.java | 549 +- .../phoenix/execute/ClientProcessingPlan.java | 116 +- .../phoenix/execute/ClientScanPlan.java | 230 +- .../phoenix/execute/CommitException.java | 60 +- .../phoenix/execute/CursorFetchPlan.java | 77 +- .../phoenix/execute/DelegateHTable.java | 559 +- .../phoenix/execute/DelegateQueryPlan.java | 276 +- .../DescVarLengthFastByteComparisons.java | 363 +- .../apache/phoenix/execute/HashJoinPlan.java | 1126 +- .../execute/LiteralResultIterationPlan.java | 201 +- .../apache/phoenix/execute/MutationState.java | 4485 ++--- .../PhoenixTxIndexMutationGenerator.java | 858 +- .../phoenix/execute/RuntimeContext.java | 12 +- .../phoenix/execute/RuntimeContextImpl.java | 104 +- .../org/apache/phoenix/execute/ScanPlan.java | 578 +- .../phoenix/execute/SortMergeJoinPlan.java | 1675 +- .../phoenix/execute/TupleProjectionPlan.java | 286 +- .../phoenix/execute/TupleProjector.java | 814 +- .../org/apache/phoenix/execute/UnionPlan.java | 696 +- .../phoenix/execute/UnnestArrayPlan.java | 283 +- .../execute/visitor/AvgRowWidthVisitor.java | 283 +- .../execute/visitor/ByteCountVisitor.java | 156 +- .../execute/visitor/QueryPlanVisitor.java | 45 +- .../execute/visitor/RowCountVisitor.java | 465 +- .../phoenix/expression/AddExpression.java | 40 +- .../phoenix/expression/AndExpression.java | 129 +- .../phoenix/expression/AndOrExpression.java | 125 +- .../expression/ArithmeticExpression.java | 91 +- .../ArrayConstructorExpression.java | 228 +- .../expression/BaseAddSubtractExpression.java | 41 +- .../expression/BaseCompoundExpression.java | 215 +- .../BaseDecimalAddSubtractExpression.java | 3 +- .../phoenix/expression/BaseExpression.java | 462 +- .../expression/BaseSingleExpression.java | 177 +- .../expression/BaseTerminalExpression.java | 15 +- .../expression/ByteBasedLikeExpression.java | 35 +- .../phoenix/expression/CaseExpression.java | 397 +- .../phoenix/expression/CoerceExpression.java | 336 +- .../phoenix/expression/ColumnExpression.java | 173 +- .../expression/ComparisonExpression.java | 873 +- ...orrelateVariableFieldAccessExpression.java | 80 +- .../expression/CurrentDateTimeFunction.java | 30 +- .../phoenix/expression/DateAddExpression.java | 93 +- .../expression/DateSubtractExpression.java | 94 +- .../expression/DecimalAddExpression.java | 121 +- .../expression/DecimalDivideExpression.java | 88 +- .../expression/DecimalMultiplyExpression.java | 88 +- .../expression/DecimalSubtractExpression.java | 144 +- .../expression/DelegateExpression.java | 164 +- .../phoenix/expression/Determinism.java | 39 +- .../phoenix/expression/DivideExpression.java | 105 +- .../expression/DoubleAddExpression.java | 78 +- .../expression/DoubleDivideExpression.java | 86 +- .../expression/DoubleMultiplyExpression.java | 78 +- .../expression/DoubleSubtractExpression.java | 84 +- .../apache/phoenix/expression/Expression.java | 131 +- .../phoenix/expression/ExpressionType.java | 434 +- .../phoenix/expression/InListExpression.java | 738 +- .../phoenix/expression/IsNullExpression.java | 223 +- .../expression/KeyValueColumnExpression.java | 212 +- .../phoenix/expression/LikeExpression.java | 589 +- .../phoenix/expression/LiteralExpression.java | 645 +- .../phoenix/expression/LongAddExpression.java | 61 +- .../expression/LongDivideExpression.java | 69 +- .../expression/LongMultiplyExpression.java | 67 +- .../expression/LongSubtractExpression.java | 93 +- .../phoenix/expression/ModulusExpression.java | 125 +- .../expression/MultiplyExpression.java | 106 +- .../phoenix/expression/NotExpression.java | 113 +- .../phoenix/expression/OrExpression.java | 60 +- .../phoenix/expression/OrderByExpression.java | 279 +- .../expression/ProjectedColumnExpression.java | 239 +- .../expression/RowKeyColumnExpression.java | 322 +- .../phoenix/expression/RowKeyExpression.java | 39 +- .../RowValueConstructorExpression.java | 479 +- .../SingleCellColumnExpression.java | 335 +- .../SingleCellConstructorExpression.java | 147 +- .../expression/StringBasedLikeExpression.java | 34 +- .../expression/StringConcatExpression.java | 129 +- .../expression/SubtractExpression.java | 45 +- .../expression/TimestampAddExpression.java | 91 +- .../TimestampSubtractExpression.java | 103 +- .../expression/aggregator/Aggregator.java | 45 +- .../expression/aggregator/Aggregators.java | 189 +- .../expression/aggregator/BaseAggregator.java | 74 +- .../BaseDecimalStddevAggregator.java | 125 +- .../aggregator/BaseStddevAggregator.java | 94 +- .../aggregator/ClientAggregators.java | 82 +- .../aggregator/CountAggregator.java | 97 +- .../DecimalStddevPopAggregator.java | 18 +- .../DecimalStddevSampAggregator.java | 18 +- .../aggregator/DecimalSumAggregator.java | 126 +- .../DistinctCountClientAggregator.java | 47 +- .../DistinctValueClientAggregator.java | 67 +- ...istinctValueWithCountClientAggregator.java | 192 +- ...istinctValueWithCountServerAggregator.java | 228 +- .../aggregator/DoubleSumAggregator.java | 120 +- .../FirstLastValueBaseClientAggregator.java | 254 +- .../FirstLastValueServerAggregator.java | 318 +- .../aggregator/IntSumAggregator.java | 25 +- .../aggregator/LongSumAggregator.java | 25 +- .../expression/aggregator/MaxAggregator.java | 35 +- .../expression/aggregator/MinAggregator.java | 113 +- .../NonSizeTrackingServerAggregators.java | 29 +- .../aggregator/NumberSumAggregator.java | 143 +- .../PercentRankClientAggregator.java | 85 +- .../PercentileClientAggregator.java | 122 +- .../PercentileDiscClientAggregator.java | 127 +- .../aggregator/ServerAggregators.java | 202 +- .../SizeTrackingServerAggregators.java | 57 +- .../aggregator/StddevPopAggregator.java | 18 +- .../aggregator/StddevSampAggregator.java | 18 +- .../aggregator/UnsignedIntSumAggregator.java | 25 +- .../aggregator/UnsignedLongSumAggregator.java | 29 +- .../expression/function/AbsFunction.java | 54 +- .../function/AggregateFunction.java | 38 +- .../ArrayAllComparisonExpression.java | 29 +- .../ArrayAnyComparisonExpression.java | 108 +- .../function/ArrayAppendFunction.java | 41 +- .../function/ArrayConcatFunction.java | 117 +- .../function/ArrayElemRefExpression.java | 88 +- .../function/ArrayFillFunction.java | 118 +- .../function/ArrayIndexFunction.java | 99 +- .../function/ArrayLengthFunction.java | 73 +- .../function/ArrayModifierFunction.java | 405 +- .../function/ArrayPrependFunction.java | 43 +- .../function/ArrayRemoveFunction.java | 80 +- .../function/ArrayToStringFunction.java | 97 +- .../function/AvgAggregateFunction.java | 140 +- .../BsonConditionExpressionFunction.java | 161 +- .../BsonUpdateExpressionFunction.java | 141 +- .../function/BsonValueFunction.java | 216 +- .../ByteBasedRegexpReplaceFunction.java | 38 +- .../ByteBasedRegexpSplitFunction.java | 35 +- .../ByteBasedRegexpSubstrFunction.java | 44 +- .../expression/function/CbrtFunction.java | 41 +- .../function/CeilDateExpression.java | 162 +- .../function/CeilDecimalExpression.java | 132 +- .../expression/function/CeilFunction.java | 54 +- .../function/CeilMonthExpression.java | 55 +- .../function/CeilTimestampExpression.java | 159 +- .../function/CeilWeekExpression.java | 57 +- .../function/CeilYearExpression.java | 57 +- .../expression/function/CoalesceFunction.java | 168 +- .../function/CollationKeyFunction.java | 295 +- .../function/CompositeAggregateFunction.java | 27 +- .../function/ConvertTimezoneFunction.java | 101 +- .../expression/function/CosFunction.java | 42 +- .../function/CountAggregateFunction.java | 138 +- .../function/CurrentDateFunction.java | 71 +- .../function/CurrentTimeFunction.java | 67 +- .../function/DateScalarFunction.java | 50 +- .../function/DayOfMonthFunction.java | 80 +- .../function/DayOfWeekFunction.java | 84 +- .../function/DayOfYearFunction.java | 74 +- .../expression/function/DecodeFunction.java | 198 +- .../function/DefaultValueExpression.java | 115 +- ...egateConstantToCountAggregateFunction.java | 73 +- .../DistinctCountAggregateFunction.java | 158 +- ...inctCountHyperLogLogAggregateFunction.java | 283 +- .../DistinctValueAggregateFunction.java | 66 +- ...stinctValueWithCountAggregateFunction.java | 31 +- .../expression/function/EncodeFormat.java | 10 +- .../expression/function/EncodeFunction.java | 148 +- .../expression/function/ExpFunction.java | 41 +- .../function/ExternalSqlTypeIdFunction.java | 97 +- .../function/FirstLastValueBaseFunction.java | 62 +- .../function/FirstValueFunction.java | 67 +- .../function/FirstValuesFunction.java | 95 +- .../function/FloorDateExpression.java | 179 +- .../function/FloorDecimalExpression.java | 134 +- .../expression/function/FloorFunction.java | 48 +- .../function/FloorMonthExpression.java | 55 +- .../function/FloorWeekExpression.java | 54 +- .../function/FloorYearExpression.java | 55 +- .../function/FunctionArgumentType.java | 53 +- .../function/FunctionExpression.java | 87 +- .../expression/function/GetBitFunction.java | 109 +- .../expression/function/GetByteFunction.java | 109 +- .../expression/function/HourFunction.java | 80 +- .../function/IndexStateNameFunction.java | 79 +- .../expression/function/InstrFunction.java | 172 +- .../expression/function/InvertFunction.java | 200 +- .../function/JavaMathOneArgumentFunction.java | 56 +- .../function/JavaMathTwoArgumentFunction.java | 62 +- .../function/JsonExistsFunction.java | 114 +- .../function/JsonModifyFunction.java | 134 +- .../function/JsonQueryFunction.java | 136 +- .../function/JsonValueFunction.java | 152 +- .../expression/function/LTrimFunction.java | 113 +- .../function/LastValueFunction.java | 69 +- .../function/LastValuesFunction.java | 95 +- .../expression/function/LengthFunction.java | 98 +- .../expression/function/LnFunction.java | 41 +- .../expression/function/LogFunction.java | 43 +- .../expression/function/LowerFunction.java | 118 +- .../expression/function/LpadFunction.java | 318 +- .../expression/function/MD5Function.java | 96 +- .../expression/function/MathPIFunction.java | 51 +- .../function/MaxAggregateFunction.java | 81 +- .../function/MinAggregateFunction.java | 102 +- .../expression/function/MinuteFunction.java | 80 +- .../expression/function/MonthFunction.java | 80 +- .../expression/function/NowFunction.java | 43 +- .../expression/function/NthValueFunction.java | 82 +- .../function/OctetLengthFunction.java | 54 +- .../PercentRankAggregateFunction.java | 57 +- .../PercentileContAggregateFunction.java | 68 +- .../PercentileDiscAggregateFunction.java | 60 +- .../function/PhoenixRowTimestampFunction.java | 159 +- .../expression/function/PowerFunction.java | 35 +- .../expression/function/PrefixFunction.java | 175 +- .../expression/function/RTrimFunction.java | 319 +- .../expression/function/RandomFunction.java | 192 +- .../function/RegexpReplaceFunction.java | 238 +- .../function/RegexpSplitFunction.java | 151 +- .../function/RegexpSubstrFunction.java | 292 +- .../expression/function/ReverseFunction.java | 94 +- .../function/RoundDateExpression.java | 586 +- .../function/RoundDecimalExpression.java | 620 +- .../expression/function/RoundFunction.java | 47 +- .../function/RoundJodaDateExpression.java | 71 +- .../function/RoundMonthExpression.java | 64 +- .../function/RoundTimestampExpression.java | 140 +- .../function/RoundWeekExpression.java | 62 +- .../function/RoundYearExpression.java | 62 +- .../function/RowKeyBytesStringFunction.java | 60 +- .../function/SQLIndexTypeFunction.java | 76 +- .../function/SQLTableTypeFunction.java | 76 +- .../function/SQLViewTypeFunction.java | 77 +- .../expression/function/ScalarFunction.java | 151 +- .../expression/function/SecondFunction.java | 80 +- .../expression/function/SetBitFunction.java | 85 +- .../expression/function/SetByteFunction.java | 83 +- .../expression/function/SignFunction.java | 69 +- .../expression/function/SinFunction.java | 42 +- .../function/SingleAggregateFunction.java | 241 +- .../function/SqlTypeNameFunction.java | 85 +- .../expression/function/SqrtFunction.java | 41 +- .../function/StddevPopFunction.java | 63 +- .../function/StddevSampFunction.java | 63 +- .../StringBasedRegexpReplaceFunction.java | 37 +- .../StringBasedRegexpSplitFunction.java | 33 +- .../StringBasedRegexpSubstrFunction.java | 38 +- .../function/StringToArrayFunction.java | 108 +- .../expression/function/SubstrFunction.java | 382 +- .../function/SumAggregateFunction.java | 185 +- .../expression/function/TanFunction.java | 42 +- .../phoenix/expression/function/TimeUnit.java | 57 +- .../function/TimezoneOffsetFunction.java | 87 +- .../expression/function/ToCharFunction.java | 260 +- .../expression/function/ToDateFunction.java | 311 +- .../expression/function/ToNumberFunction.java | 334 +- .../expression/function/ToTimeFunction.java | 76 +- .../function/ToTimestampFunction.java | 76 +- .../TransactionProviderNameFunction.java | 74 +- .../expression/function/TrimFunction.java | 113 +- .../expression/function/TruncFunction.java | 55 +- .../expression/function/UDFExpression.java | 391 +- .../expression/function/UpperFunction.java | 124 +- .../expression/function/WeekFunction.java | 80 +- .../expression/function/YearFunction.java | 76 +- ...RowValueConstructorExpressionRewriter.java | 47 +- .../bson/CommonComparisonExpressionUtils.java | 103 +- .../DocumentComparisonExpressionUtils.java | 201 +- .../bson/SQLComparisonExpressionUtils.java | 124 +- .../util/bson/UpdateExpressionUtils.java | 356 +- .../util/regex/AbstractBasePattern.java | 12 +- .../util/regex/AbstractBaseSplitter.java | 4 +- .../expression/util/regex/GuavaSplitter.java | 41 +- .../expression/util/regex/JONIPattern.java | 296 +- .../expression/util/regex/JavaPattern.java | 109 +- .../visitor/BaseExpressionVisitor.java | 232 +- .../visitor/CloneExpressionVisitor.java | 332 +- .../expression/visitor/ExpressionVisitor.java | 192 +- .../visitor/KeyValueExpressionVisitor.java | 18 +- .../ProjectedColumnExpressionVisitor.java | 9 +- ...ReplaceArrayFunctionExpressionVisitor.java | 32 +- .../visitor/RowKeyExpressionVisitor.java | 16 +- .../SingleAggregateFunctionVisitor.java | 17 +- ...StatelessTraverseAllExpressionVisitor.java | 290 +- .../StatelessTraverseNoExpressionVisitor.java | 290 +- .../visitor/TraverseAllExpressionVisitor.java | 55 +- .../visitor/TraverseNoExpressionVisitor.java | 10 +- .../filter/AllVersionsIndexRebuildFilter.java | 37 +- .../filter/BooleanExpressionFilter.java | 161 +- .../filter/ColumnProjectionFilter.java | 306 +- .../apache/phoenix/filter/DelegateFilter.java | 173 +- .../phoenix/filter/DistinctPrefixFilter.java | 264 +- .../phoenix/filter/EmptyColumnOnlyFilter.java | 145 +- ...codedQualifiersColumnProjectionFilter.java | 225 +- .../MultiCFCQKeyValueComparisonFilter.java | 178 +- .../MultiCQKeyValueComparisonFilter.java | 71 +- ...ultiEncodedCQKeyValueComparisonFilter.java | 680 +- .../filter/MultiKeyValueComparisonFilter.java | 439 +- .../apache/phoenix/filter/PagingFilter.java | 404 +- .../filter/RowKeyComparisonFilter.java | 215 +- .../SingleCFCQKeyValueComparisonFilter.java | 50 +- .../SingleCQKeyValueComparisonFilter.java | 45 +- .../SingleKeyValueComparisonFilter.java | 214 +- .../apache/phoenix/filter/SkipScanFilter.java | 1341 +- .../SystemCatalogViewIndexIdFilter.java | 239 +- .../phoenix/filter/UnverifiedRowFilter.java | 168 +- .../hbase/index/AbstractValueGetter.java | 41 +- .../phoenix/hbase/index/BaseIndexCodec.java | 24 +- .../phoenix/hbase/index/MultiMutation.java | 93 +- .../phoenix/hbase/index/OffsetCell.java | 225 +- .../phoenix/hbase/index/ValueGetter.java | 17 +- .../FatalIndexBuildingFailureException.java | 34 +- .../IndexBuildingFailureException.java | 6 +- .../phoenix/hbase/index/covered/Batch.java | 7 +- .../hbase/index/covered/IndexCodec.java | 141 +- .../hbase/index/covered/IndexMetaData.java | 60 +- .../hbase/index/covered/IndexUpdate.java | 14 +- .../hbase/index/covered/KeyValueStore.java | 4 +- .../hbase/index/covered/TableState.java | 25 +- .../covered/data/DelegateComparator.java | 114 +- .../index/covered/data/LazyValueGetter.java | 115 +- .../index/covered/data/LocalHBaseState.java | 31 +- .../filter/ApplyAndFilterDeletesFilter.java | 180 +- ...umnTrackingNextLargestTimestampFilter.java | 5 +- .../covered/filter/MaxTimestampFilter.java | 11 +- .../covered/filter/NewerTimestampFilter.java | 5 +- .../index/covered/update/ColumnReference.java | 138 +- .../index/covered/update/ColumnTracker.java | 17 +- .../covered/update/IndexUpdateManager.java | 29 +- .../covered/update/IndexedColumnGroup.java | 4 +- .../index/exception/IndexWriteException.java | 67 +- .../MultiIndexWriteFailureException.java | 45 +- .../SingleIndexWriteFailureException.java | 39 +- .../metrics/GlobalIndexCheckerSource.java | 121 +- .../metrics/GlobalIndexCheckerSourceImpl.java | 217 +- .../index/metrics/MetricsIndexerSource.java | 106 +- .../metrics/MetricsIndexerSourceFactory.java | 20 +- .../metrics/MetricsIndexerSourceImpl.java | 415 +- .../hbase/index/parallel/BaseTaskRunner.java | 20 +- .../index/parallel/EarlyExitFailure.java | 4 +- .../parallel/QuickFailingTaskRunner.java | 11 +- .../phoenix/hbase/index/parallel/Task.java | 4 +- .../hbase/index/parallel/TaskBatch.java | 8 +- .../hbase/index/parallel/TaskRunner.java | 24 +- .../index/parallel/ThreadPoolBuilder.java | 12 +- .../parallel/WaitForCompletionTaskRunner.java | 8 +- .../hbase/index/scanner/EmptyScanner.java | 14 +- .../scanner/FilteredKeyValueScanner.java | 162 +- .../index/scanner/ReseekableScanner.java | 7 +- .../phoenix/hbase/index/scanner/Scanner.java | 5 +- .../hbase/index/scanner/ScannerBuilder.java | 59 +- .../hbase/index/table/HTableFactory.java | 5 +- .../index/table/HTableInterfaceReference.java | 22 +- .../index/util/GenericKeyValueBuilder.java | 142 +- .../hbase/index/util/ImmutableBytesPtr.java | 201 +- .../hbase/index/util/IndexManagementUtil.java | 434 +- .../hbase/index/util/KeyValueBuilder.java | 98 +- .../phoenix/hbase/index/util/VersionUtil.java | 115 +- .../apache/phoenix/index/CDCTableInfo.java | 408 +- .../apache/phoenix/index/IndexMaintainer.java | 4409 ++--- .../index/IndexMetaDataCacheClient.java | 213 +- .../index/IndexMetaDataCacheFactory.java | 82 +- .../index/PhoenixIndexBuilderHelper.java | 213 +- .../phoenix/index/PhoenixIndexCodec.java | 208 +- .../PhoenixIndexFailurePolicyHelper.java | 372 +- .../phoenix/index/PhoenixIndexMetaData.java | 134 +- .../iterate/AggregatingResultIterator.java | 23 +- .../BaseGroupedAggregatingResultIterator.java | 141 +- .../phoenix/iterate/BaseResultIterator.java | 36 +- .../phoenix/iterate/BaseResultIterators.java | 3204 ++-- .../apache/phoenix/iterate/BufferedQueue.java | 643 +- .../phoenix/iterate/BufferedSortedQueue.java | 194 +- .../phoenix/iterate/BufferedTupleQueue.java | 169 +- .../iterate/ChunkedResultIterator.java | 356 +- .../ClientHashAggregatingResultIterator.java | 312 +- .../phoenix/iterate/ConcatResultIterator.java | 204 +- .../phoenix/iterate/CursorResultIterator.java | 108 +- .../iterate/DefaultParallelScanGrouper.java | 93 +- .../DefaultTableResultIteratorFactory.java | 18 +- .../iterate/DelegateResultIterator.java | 66 +- .../DistinctAggregatingResultIterator.java | 195 +- .../apache/phoenix/iterate/ExplainTable.java | 904 +- .../FilterAggregatingResultIterator.java | 112 +- .../phoenix/iterate/FilterResultIterator.java | 112 +- .../GroupedAggregatingResultIterator.java | 64 +- .../LimitingPeekingResultIterator.java | 35 +- .../iterate/LimitingResultIterator.java | 69 +- .../iterate/LookAheadResultIterator.java | 104 +- .../MaterializedComparableResultIterator.java | 85 +- .../iterate/MaterializedResultIterator.java | 143 +- .../iterate/MergeSortResultIterator.java | 113 +- .../MergeSortRowKeyResultIterator.java | 84 +- .../iterate/MergeSortTopNResultIterator.java | 186 +- .../phoenix/iterate/OffsetResultIterator.java | 144 +- .../OrderedAggregatingResultIterator.java | 55 +- .../iterate/OrderedResultIterator.java | 974 +- .../iterate/ParallelIteratorFactory.java | 22 +- .../ParallelIteratorRegionSplitter.java | 7 +- .../phoenix/iterate/ParallelIterators.java | 247 +- .../phoenix/iterate/ParallelScanGrouper.java | 57 +- .../iterate/ParallelScansCollector.java | 60 +- .../iterate/PeekingResultIterator.java | 74 +- .../apache/phoenix/iterate/PhoenixQueues.java | 100 +- .../phoenix/iterate/ResultIterator.java | 84 +- .../phoenix/iterate/ResultIterators.java | 50 +- .../iterate/RoundRobinResultIterator.java | 563 +- .../RowKeyOrderedAggregateResultIterator.java | 290 +- .../iterate/ScanningResultIterator.java | 336 +- .../ScanningResultPostDummyResultCaller.java | 22 +- .../ScanningResultPostValidResultCaller.java | 22 +- .../iterate/ScansWithRegionLocations.java | 45 +- .../iterate/SequenceResultIterator.java | 79 +- .../phoenix/iterate/SerialIterators.java | 352 +- .../phoenix/iterate/SizeAwareQueue.java | 4 +- .../phoenix/iterate/SizeBoundQueue.java | 114 +- .../iterate/SpoolTooBigToDiskException.java | 18 +- .../iterate/SpoolingResultIterator.java | 555 +- .../phoenix/iterate/TableResultIterator.java | 604 +- .../iterate/TableResultIteratorFactory.java | 10 +- .../iterate/TableSamplerPredicate.java | 157 +- .../UngroupedAggregatingResultIterator.java | 86 +- .../phoenix/iterate/UnionResultIterators.java | 219 +- .../jdbc/AbstractRPCConnectionInfo.java | 397 +- .../phoenix/jdbc/ClusterRoleRecord.java | 421 +- .../jdbc/ClusterRoleRecordGeneratorTool.java | 210 +- .../apache/phoenix/jdbc/ConnectionInfo.java | 984 +- .../phoenix/jdbc/DelegateConnection.java | 592 +- .../jdbc/DelegatePreparedStatement.java | 1014 +- .../phoenix/jdbc/DelegateResultSet.java | 1961 +- .../phoenix/jdbc/DelegateStatement.java | 452 +- .../jdbc/FailoverPhoenixConnection.java | 1154 +- .../apache/phoenix/jdbc/FailoverPolicy.java | 206 +- .../phoenix/jdbc/HighAvailabilityGroup.java | 1562 +- .../phoenix/jdbc/HighAvailabilityPolicy.java | 196 +- .../jdbc/LoggingPhoenixConnection.java | 188 +- .../jdbc/LoggingPhoenixPreparedStatement.java | 96 +- .../phoenix/jdbc/LoggingPhoenixResultSet.java | 45 +- .../phoenix/jdbc/LoggingPhoenixStatement.java | 100 +- .../phoenix/jdbc/MasterConnectionInfo.java | 125 +- .../jdbc/ParallelPhoenixConnection.java | 1252 +- .../phoenix/jdbc/ParallelPhoenixContext.java | 413 +- .../phoenix/jdbc/ParallelPhoenixMetrics.java | 66 +- ...ParallelPhoenixNullComparingResultSet.java | 501 +- .../ParallelPhoenixPreparedStatement.java | 1357 +- .../jdbc/ParallelPhoenixResultSet.java | 227 +- .../jdbc/ParallelPhoenixResultSetFactory.java | 90 +- .../jdbc/ParallelPhoenixStatement.java | 1120 +- .../phoenix/jdbc/ParallelPhoenixUtil.java | 520 +- .../phoenix/jdbc/PhoenixConnection.java | 2769 ++- .../phoenix/jdbc/PhoenixDatabaseMetaData.java | 3814 ++-- .../apache/phoenix/jdbc/PhoenixDriver.java | 593 +- .../phoenix/jdbc/PhoenixEmbeddedDriver.java | 279 +- .../phoenix/jdbc/PhoenixHAAdminTool.java | 1068 +- .../PhoenixHAExecutorServiceProvider.java | 524 +- .../phoenix/jdbc/PhoenixHAGroupMetrics.java | 90 +- .../phoenix/jdbc/PhoenixMetricsHolder.java | 14 +- .../phoenix/jdbc/PhoenixMetricsLog.java | 12 +- .../jdbc/PhoenixMonitoredConnection.java | 35 +- .../PhoenixMonitoredPreparedStatement.java | 6 +- .../jdbc/PhoenixMonitoredResultSet.java | 12 +- .../jdbc/PhoenixMonitoredStatement.java | 10 +- .../jdbc/PhoenixParameterMetaData.java | 221 +- .../jdbc/PhoenixPreparedStatement.java | 1095 +- .../apache/phoenix/jdbc/PhoenixResultSet.java | 3094 ++-- .../jdbc/PhoenixResultSetMetaData.java | 336 +- .../apache/phoenix/jdbc/PhoenixStatement.java | 4398 ++--- .../phoenix/jdbc/PhoenixStatementFactory.java | 4 +- .../phoenix/jdbc/RPCConnectionInfo.java | 278 +- .../apache/phoenix/jdbc/ZKConnectionInfo.java | 568 +- .../phoenix/job/AbstractRoundRobinQueue.java | 489 +- .../org/apache/phoenix/job/JobManager.java | 455 +- .../apache/phoenix/join/HashCacheClient.java | 315 +- .../apache/phoenix/join/HashCacheFactory.java | 217 +- .../org/apache/phoenix/join/HashJoinInfo.java | 393 +- .../MaxServerCacheSizeExceededException.java | 34 +- .../apache/phoenix/log/ActivityLogInfo.java | 66 +- .../apache/phoenix/log/AuditQueryLogger.java | 145 +- .../phoenix/log/BaseConnectionLimiter.java | 231 +- .../phoenix/log/ConnectionActivityLogger.java | 207 +- .../apache/phoenix/log/ConnectionLimiter.java | 25 +- .../phoenix/log/DefaultConnectionLimiter.java | 66 +- .../java/org/apache/phoenix/log/LogLevel.java | 7 +- .../org/apache/phoenix/log/LogWriter.java | 39 +- .../phoenix/log/LoggingConnectionLimiter.java | 281 +- .../log/QueryLogDetailsWorkHandler.java | 51 +- .../org/apache/phoenix/log/QueryLogInfo.java | 72 +- .../org/apache/phoenix/log/QueryLogger.java | 259 +- .../QueryLoggerDefaultExceptionHandler.java | 48 +- .../phoenix/log/QueryLoggerDisruptor.java | 173 +- .../apache/phoenix/log/QueryLoggerUtil.java | 57 +- .../org/apache/phoenix/log/QueryStatus.java | 6 +- .../apache/phoenix/log/RingBufferEvent.java | 122 +- .../log/RingBufferEventTranslator.java | 66 +- .../apache/phoenix/log/TableLogWriter.java | 216 +- .../mapreduce/util/ConnectionUtil.java | 112 +- .../util/PhoenixConfigurationUtilHelper.java | 295 +- .../phoenix/memory/ChildMemoryManager.java | 169 +- .../memory/DelegatingMemoryManager.java | 64 +- .../phoenix/memory/GlobalMemoryManager.java | 264 +- .../memory/InsufficientMemoryException.java | 29 +- .../apache/phoenix/memory/MemoryManager.java | 114 +- .../apache/phoenix/metrics/MetricInfo.java | 46 +- .../org/apache/phoenix/metrics/Metrics.java | 65 +- .../phoenix/monitoring/AtomicMetric.java | 88 +- .../phoenix/monitoring/CombinableMetric.java | 103 +- .../monitoring/CombinableMetricImpl.java | 144 +- .../ConnectionQueryServicesMetric.java | 20 +- .../ConnectionQueryServicesMetricImpl.java | 115 +- .../monitoring/GlobalClientMetrics.java | 347 +- .../phoenix/monitoring/GlobalMetric.java | 17 +- .../phoenix/monitoring/GlobalMetricImpl.java | 128 +- .../GlobalMetricRegistriesAdapter.java | 234 +- .../monitoring/HistogramDistribution.java | 12 +- .../monitoring/HistogramDistributionImpl.java | 81 +- .../phoenix/monitoring/JmxMetricProvider.java | 106 +- .../phoenix/monitoring/LatencyHistogram.java | 37 +- .../monitoring/MemoryMetricsHolder.java | 28 +- .../org/apache/phoenix/monitoring/Metric.java | 75 +- .../MetricPublisherSupplierFactory.java | 24 +- .../monitoring/MetricServiceResolver.java | 63 +- .../apache/phoenix/monitoring/MetricType.java | 551 +- .../apache/phoenix/monitoring/MetricUtil.java | 62 +- .../phoenix/monitoring/MetricsRegistry.java | 23 +- .../phoenix/monitoring/MetricsStopWatch.java | 75 +- .../monitoring/MutationMetricQueue.java | 436 +- .../monitoring/NoOpGlobalMetricImpl.java | 76 +- .../monitoring/NoOpTableMetricsManager.java | 44 +- .../phoenix/monitoring/NonAtomicMetric.java | 93 +- .../monitoring/OverAllQueryMetrics.java | 303 +- .../monitoring/PhoenixTableMetric.java | 21 +- .../monitoring/PhoenixTableMetricImpl.java | 113 +- .../phoenix/monitoring/RangeHistogram.java | 140 +- .../phoenix/monitoring/ReadMetricQueue.java | 291 +- .../phoenix/monitoring/ScanMetricsHolder.java | 231 +- .../phoenix/monitoring/SizeHistogram.java | 39 +- .../monitoring/SpoolingMetricsHolder.java | 33 +- .../monitoring/TableClientMetrics.java | 312 +- .../phoenix/monitoring/TableHistograms.java | 197 +- .../monitoring/TableMetricsManager.java | 808 +- .../TaskExecutionMetricsHolder.java | 68 +- .../ConnectionQueryServicesHistogram.java | 29 +- .../ConnectionQueryServicesMetrics.java | 137 +- ...nectionQueryServicesMetricsHistograms.java | 73 +- ...ConnectionQueryServicesMetricsManager.java | 504 +- ...ConnectionQueryServicesMetricsManager.java | 48 +- .../org/apache/phoenix/optimize/Cost.java | 156 +- .../GenSubqueryParamValuesRewriter.java | 210 +- .../phoenix/optimize/QueryOptimizer.java | 1526 +- .../phoenix/parse/AddColumnStatement.java | 71 +- .../phoenix/parse/AddJarsStatement.java | 26 +- .../apache/phoenix/parse/AddParseNode.java | 39 +- .../parse/AggregateFunctionParseNode.java | 28 +- ...AggregateFunctionWithinGroupParseNode.java | 50 +- .../org/apache/phoenix/parse/AliasedNode.java | 101 +- .../phoenix/parse/AlterIndexStatement.java | 93 +- .../phoenix/parse/AlterSessionStatement.java | 24 +- .../phoenix/parse/AlterTableStatement.java | 18 +- .../parse/AndBooleanParseNodeVisitor.java | 26 +- .../apache/phoenix/parse/AndParseNode.java | 52 +- .../AndRewriterBooleanParseNodeVisitor.java | 75 +- .../phoenix/parse/ArithmeticParseNode.java | 32 +- .../parse/ArrayAllAnyComparisonNode.java | 40 +- .../phoenix/parse/ArrayAllComparisonNode.java | 38 +- .../phoenix/parse/ArrayAnyComparisonNode.java | 38 +- .../phoenix/parse/ArrayConstructorNode.java | 53 +- .../phoenix/parse/ArrayElemRefNode.java | 40 +- .../phoenix/parse/ArrayModifierParseNode.java | 17 +- .../phoenix/parse/AvgAggregateParseNode.java | 34 +- .../phoenix/parse/BaseParseNodeVisitor.java | 67 +- .../phoenix/parse/BetweenParseNode.java | 98 +- .../apache/phoenix/parse/BinaryParseNode.java | 25 +- .../apache/phoenix/parse/BindParseNode.java | 92 +- .../apache/phoenix/parse/BindTableNode.java | 40 +- .../phoenix/parse/BindableStatement.java | 8 +- .../parse/BooleanParseNodeVisitor.java | 561 +- .../BsonConditionExpressionParseNode.java | 31 +- .../parse/BsonUpdateExpressionParseNode.java | 27 +- .../phoenix/parse/BsonValueParseNode.java | 24 +- .../apache/phoenix/parse/CaseParseNode.java | 60 +- .../apache/phoenix/parse/CastParseNode.java | 181 +- .../apache/phoenix/parse/CeilParseNode.java | 92 +- .../phoenix/parse/ChangePermsStatement.java | 145 +- .../apache/phoenix/parse/CloseStatement.java | 28 +- .../org/apache/phoenix/parse/ColumnDef.java | 495 +- .../parse/ColumnDefInPkConstraint.java | 39 +- .../apache/phoenix/parse/ColumnFamilyDef.java | 42 +- .../org/apache/phoenix/parse/ColumnName.java | 163 +- .../apache/phoenix/parse/ColumnParseNode.java | 176 +- .../phoenix/parse/ComparisonParseNode.java | 59 +- .../phoenix/parse/CompoundParseNode.java | 113 +- .../phoenix/parse/ConcreteTableNode.java | 102 +- .../phoenix/parse/CreateCDCStatement.java | 73 +- .../parse/CreateFunctionStatement.java | 46 +- .../phoenix/parse/CreateIndexStatement.java | 171 +- .../phoenix/parse/CreateSchemaStatement.java | 40 +- .../parse/CreateSequenceStatement.java | 123 +- .../phoenix/parse/CreateTableStatement.java | 280 +- .../phoenix/parse/CurrentDateParseNode.java | 18 +- .../phoenix/parse/CurrentTimeParseNode.java | 18 +- .../org/apache/phoenix/parse/CursorName.java | 35 +- .../apache/phoenix/parse/DMLStatement.java | 23 +- .../phoenix/parse/DeclareCursorStatement.java | 59 +- .../DelegateConstantToCountParseNode.java | 33 +- .../phoenix/parse/DeleteJarStatement.java | 24 +- .../apache/phoenix/parse/DeleteStatement.java | 102 +- .../phoenix/parse/DerivedTableNode.java | 81 +- ...nctCountHyperLogLogAggregateParseNode.java | 25 +- .../phoenix/parse/DistinctCountParseNode.java | 50 +- .../apache/phoenix/parse/DivideParseNode.java | 38 +- .../phoenix/parse/DropCDCStatement.java | 66 +- .../phoenix/parse/DropColumnStatement.java | 31 +- .../phoenix/parse/DropFunctionStatement.java | 35 +- .../phoenix/parse/DropIndexStatement.java | 64 +- .../phoenix/parse/DropSchemaStatement.java | 64 +- .../phoenix/parse/DropSequenceStatement.java | 58 +- .../phoenix/parse/DropTableStatement.java | 80 +- .../apache/phoenix/parse/EqualParseNode.java | 27 +- .../parse/ExecuteUpgradeStatement.java | 18 +- .../apache/phoenix/parse/ExistsParseNode.java | 87 +- .../phoenix/parse/ExplainStatement.java | 42 +- .../org/apache/phoenix/parse/ExplainType.java | 20 +- .../parse/FamilyWildcardParseNode.java | 108 +- .../apache/phoenix/parse/FetchStatement.java | 60 +- .../phoenix/parse/FilterableStatement.java | 25 +- .../parse/FirstValueAggregateParseNode.java | 18 +- .../parse/FirstValuesAggregateParseNode.java | 18 +- .../apache/phoenix/parse/FloorParseNode.java | 81 +- .../phoenix/parse/FunctionParseNode.java | 960 +- .../parse/GreaterThanOrEqualParseNode.java | 28 +- .../phoenix/parse/GreaterThanParseNode.java | 28 +- .../org/apache/phoenix/parse/HintNode.java | 425 +- .../apache/phoenix/parse/InListParseNode.java | 122 +- .../org/apache/phoenix/parse/InParseNode.java | 108 +- .../IndexExpressionParseNodeRewriter.java | 102 +- .../phoenix/parse/IndexKeyConstraint.java | 60 +- .../apache/phoenix/parse/IsNullParseNode.java | 91 +- .../apache/phoenix/parse/JoinTableNode.java | 195 +- .../phoenix/parse/JsonExistsParseNode.java | 30 +- .../phoenix/parse/JsonModifyParseNode.java | 32 +- .../phoenix/parse/JsonQueryParseNode.java | 30 +- .../phoenix/parse/JsonValueParseNode.java | 30 +- .../parse/LastValueAggregateParseNode.java | 18 +- .../parse/LastValuesAggregateParseNode.java | 18 +- .../parse/LessThanOrEqualParseNode.java | 28 +- .../phoenix/parse/LessThanParseNode.java | 28 +- .../apache/phoenix/parse/LikeParseNode.java | 111 +- .../org/apache/phoenix/parse/LimitNode.java | 89 +- .../phoenix/parse/ListJarsStatement.java | 18 +- .../phoenix/parse/LiteralParseNode.java | 167 +- .../phoenix/parse/MaxAggregateParseNode.java | 20 +- .../phoenix/parse/MinAggregateParseNode.java | 20 +- .../phoenix/parse/ModulusParseNode.java | 37 +- .../phoenix/parse/MultiplyParseNode.java | 37 +- .../phoenix/parse/MutableStatement.java | 10 +- .../org/apache/phoenix/parse/NamedNode.java | 79 +- .../apache/phoenix/parse/NamedParseNode.java | 106 +- .../apache/phoenix/parse/NamedTableNode.java | 163 +- .../phoenix/parse/NotEqualParseNode.java | 28 +- .../apache/phoenix/parse/NotParseNode.java | 41 +- .../parse/NthValueAggregateParseNode.java | 18 +- .../org/apache/phoenix/parse/OffsetNode.java | 70 +- .../apache/phoenix/parse/OpenStatement.java | 28 +- .../org/apache/phoenix/parse/OrParseNode.java | 51 +- .../org/apache/phoenix/parse/OrderByNode.java | 131 +- .../org/apache/phoenix/parse/PFunction.java | 437 +- .../org/apache/phoenix/parse/PSchema.java | 90 +- .../apache/phoenix/parse/ParseContext.java | 68 +- .../apache/phoenix/parse/ParseException.java | 31 +- .../org/apache/phoenix/parse/ParseNode.java | 134 +- .../phoenix/parse/ParseNodeFactory.java | 1999 +- .../phoenix/parse/ParseNodeRewriter.java | 1197 +- .../phoenix/parse/ParseNodeVisitor.java | 229 +- .../parse/PhoenixRowTimestampParseNode.java | 119 +- .../phoenix/parse/PrimaryKeyConstraint.java | 147 +- .../apache/phoenix/parse/PropertyName.java | 38 +- .../phoenix/parse/RegexpReplaceParseNode.java | 31 +- .../phoenix/parse/RegexpSplitParseNode.java | 31 +- .../phoenix/parse/RegexpSubstrParseNode.java | 31 +- .../apache/phoenix/parse/RoundParseNode.java | 82 +- .../parse/RowValueConstructorParseNode.java | 55 +- .../org/apache/phoenix/parse/SQLParser.java | 399 +- .../apache/phoenix/parse/SelectStatement.java | 681 +- .../parse/SelectStatementRewriter.java | 315 +- .../phoenix/parse/SequenceValueParseNode.java | 158 +- .../apache/phoenix/parse/ShowCreateTable.java | 22 +- .../parse/ShowCreateTableStatement.java | 64 +- .../phoenix/parse/ShowSchemasStatement.java | 74 +- .../apache/phoenix/parse/ShowStatement.java | 22 +- .../phoenix/parse/ShowTablesStatement.java | 112 +- .../phoenix/parse/SingleTableStatement.java | 30 +- .../StatelessTraverseAllParseNodeVisitor.java | 253 +- .../phoenix/parse/StringConcatParseNode.java | 51 +- .../phoenix/parse/SubqueryParseNode.java | 103 +- .../phoenix/parse/SubtractParseNode.java | 37 +- .../phoenix/parse/SumAggregateParseNode.java | 20 +- .../org/apache/phoenix/parse/TableName.java | 138 +- .../org/apache/phoenix/parse/TableNode.java | 39 +- .../phoenix/parse/TableNodeVisitor.java | 17 +- .../phoenix/parse/TableWildcardParseNode.java | 122 +- .../phoenix/parse/TerminalParseNode.java | 25 +- .../apache/phoenix/parse/ToCharParseNode.java | 62 +- .../apache/phoenix/parse/ToDateParseNode.java | 32 +- .../phoenix/parse/ToNumberParseNode.java | 65 +- .../apache/phoenix/parse/ToTimeParseNode.java | 32 +- .../phoenix/parse/ToTimestampParseNode.java | 32 +- .../apache/phoenix/parse/TraceStatement.java | 42 +- .../parse/TraverseAllParseNodeVisitor.java | 392 +- .../parse/TraverseNoParseNodeVisitor.java | 563 +- .../apache/phoenix/parse/UDFParseNode.java | 8 +- .../apache/phoenix/parse/UnaryParseNode.java | 11 +- .../parse/UnsupportedAllParseNodeVisitor.java | 510 +- .../parse/UpdateStatisticsStatement.java | 52 +- .../apache/phoenix/parse/UpsertStatement.java | 74 +- .../phoenix/parse/UseSchemaStatement.java | 26 +- .../phoenix/parse/WildcardParseNode.java | 99 +- .../apache/phoenix/protobuf/ProtobufUtil.java | 237 +- .../phoenix/query/AdminUtilWithFallback.java | 74 +- .../phoenix/query/BaseQueryServicesImpl.java | 80 +- .../ChildLinkMetaDataServiceCallBack.java | 83 +- .../phoenix/query/ChildQueryServices.java | 34 +- .../phoenix/query/ConfigurationFactory.java | 48 +- .../query/ConnectionQueryServices.java | 493 +- .../query/ConnectionQueryServicesImpl.java | 12761 ++++++------- .../ConnectionlessQueryServicesImpl.java | 1517 +- .../query/DefaultGuidePostsCacheFactory.java | 65 +- .../DelegateConnectionQueryServices.java | 830 +- .../phoenix/query/DelegateQueryServices.java | 68 +- .../phoenix/query/EmptyStatsLoader.java | 54 +- .../apache/phoenix/query/GuidePostsCache.java | 31 +- .../phoenix/query/GuidePostsCacheFactory.java | 52 +- .../phoenix/query/GuidePostsCacheImpl.java | 210 +- .../query/GuidePostsCacheProvider.java | 121 +- .../phoenix/query/GuidePostsCacheWrapper.java | 89 +- .../phoenix/query/HBaseFactoryProvider.java | 43 +- .../phoenix/query/HConnectionFactory.java | 33 +- .../apache/phoenix/query/HTableFactory.java | 42 +- .../query/ITGuidePostsCacheFactory.java | 83 +- .../org/apache/phoenix/query/KeyRange.java | 1390 +- .../apache/phoenix/query/MetaDataMutated.java | 31 +- .../query/PhoenixStatsCacheLoader.java | 110 +- .../phoenix/query/PhoenixStatsLoader.java | 52 +- .../query/PropertyNotAllowedException.java | 23 +- .../apache/phoenix/query/PropertyPolicy.java | 40 +- .../phoenix/query/PropertyPolicyProvider.java | 20 +- .../apache/phoenix/query/QueryConstants.java | 858 +- .../apache/phoenix/query/QueryServices.java | 1103 +- .../phoenix/query/QueryServicesImpl.java | 17 +- .../phoenix/query/QueryServicesOptions.java | 1749 +- .../apache/phoenix/query/StatsLoaderImpl.java | 115 +- .../schema/AmbiguousColumnException.java | 36 +- .../schema/AmbiguousTableException.java | 36 +- .../schema/ArgumentTypeMismatchException.java | 54 +- .../schema/ColumnAlreadyExistsException.java | 57 +- .../schema/ColumnFamilyNotFoundException.java | 27 +- .../phoenix/schema/ColumnMetaDataOps.java | 326 +- .../apache/phoenix/schema/ColumnModifier.java | 223 +- .../schema/ColumnNotFoundException.java | 42 +- .../org/apache/phoenix/schema/ColumnRef.java | 229 +- .../phoenix/schema/ColumnValueDecoder.java | 14 +- .../phoenix/schema/ColumnValueEncoder.java | 39 +- .../ComparisonNotSupportedException.java | 12 +- .../ConcurrentTableMutationException.java | 35 +- .../phoenix/schema/ConnectionProperty.java | 56 +- .../schema/ConstraintViolationException.java | 34 +- .../apache/phoenix/schema/DelegateColumn.java | 199 +- .../apache/phoenix/schema/DelegateDatum.java | 70 +- .../phoenix/schema/DelegateSQLException.java | 76 +- .../apache/phoenix/schema/DelegateTable.java | 846 +- .../schema/EmptySequenceCacheException.java | 21 +- .../ExecuteQueryNotApplicableException.java | 33 +- .../ExecuteUpdateNotApplicableException.java | 31 +- .../FunctionAlreadyExistsException.java | 64 +- .../schema/FunctionNotFoundException.java | 48 +- .../phoenix/schema/IllegalDataException.java | 32 +- .../schema/IndexNotFoundException.java | 30 +- .../schema/IndexUncoveredDataColumnRef.java | 104 +- .../apache/phoenix/schema/KeyValueSchema.java | 370 +- ...MaxMutationSizeBytesExceededException.java | 33 +- .../MaxMutationSizeExceededException.java | 33 +- ...MaxPhoenixColumnSizeExceededException.java | 40 +- .../apache/phoenix/schema/MetaDataClient.java | 12413 +++++++------ .../MetaDataEntityNotFoundException.java | 6 +- .../NewerFunctionAlreadyExistsException.java | 26 +- .../NewerSchemaAlreadyExistsException.java | 10 +- .../NewerTableAlreadyExistsException.java | 26 +- .../org/apache/phoenix/schema/PColumn.java | 77 +- .../apache/phoenix/schema/PColumnFamily.java | 63 +- .../phoenix/schema/PColumnFamilyImpl.java | 146 +- .../apache/phoenix/schema/PColumnImpl.java | 624 +- .../org/apache/phoenix/schema/PDatum.java | 40 +- .../apache/phoenix/schema/PIndexState.java | 149 +- .../org/apache/phoenix/schema/PMetaData.java | 32 +- .../apache/phoenix/schema/PMetaDataCache.java | 178 +- .../phoenix/schema/PMetaDataEntity.java | 4 +- .../apache/phoenix/schema/PMetaDataImpl.java | 570 +- .../java/org/apache/phoenix/schema/PName.java | 137 +- .../apache/phoenix/schema/PNameFactory.java | 57 +- .../org/apache/phoenix/schema/PNameImpl.java | 138 +- .../java/org/apache/phoenix/schema/PRow.java | 98 +- .../phoenix/schema/PSynchronizedMetaData.java | 360 +- .../org/apache/phoenix/schema/PTable.java | 1899 +- .../org/apache/phoenix/schema/PTableImpl.java | 4680 +++-- .../org/apache/phoenix/schema/PTableKey.java | 112 +- .../org/apache/phoenix/schema/PTableRef.java | 58 +- .../phoenix/schema/PTableRefFactory.java | 48 +- .../apache/phoenix/schema/PTableRefImpl.java | 33 +- .../org/apache/phoenix/schema/PTableType.java | 164 +- .../phoenix/schema/ProjectedColumn.java | 93 +- .../schema/ReadOnlyTableException.java | 47 +- .../apache/phoenix/schema/RowKeySchema.java | 829 +- .../phoenix/schema/RowKeyValueAccessor.java | 803 +- ...nstructorOffsetInternalErrorException.java | 17 +- ...uctorOffsetNotAllowedInQueryException.java | 17 +- ...onstructorOffsetNotCoercibleException.java | 17 +- .../apache/phoenix/schema/SaltingUtil.java | 154 +- .../schema/SchemaAlreadyExistsException.java | 34 +- .../schema/SchemaNotFoundException.java | 36 +- .../org/apache/phoenix/schema/Sequence.java | 1177 +- .../phoenix/schema/SequenceAllocation.java | 76 +- .../SequenceAlreadyExistsException.java | 37 +- .../apache/phoenix/schema/SequenceInfo.java | 58 +- .../apache/phoenix/schema/SequenceKey.java | 128 +- .../schema/SequenceNotFoundException.java | 21 +- .../phoenix/schema/SerializedPTableRef.java | 41 +- .../schema/SerializedPTableRefFactory.java | 37 +- .../org/apache/phoenix/schema/SortOrder.java | 239 +- .../StaleRegionBoundaryCacheException.java | 34 +- .../schema/TableAlreadyExistsException.java | 69 +- .../schema/TableNotFoundException.java | 75 +- .../apache/phoenix/schema/TableProperty.java | 735 +- .../org/apache/phoenix/schema/TableRef.java | 312 +- .../schema/TablesNotInSyncException.java | 26 +- .../phoenix/schema/TypeMismatchException.java | 57 +- .../UpsertColumnsValuesMismatchException.java | 36 +- .../apache/phoenix/schema/ValueBitSet.java | 335 +- .../phoenix/schema/ValueRangeExcpetion.java | 24 +- .../apache/phoenix/schema/ValueSchema.java | 630 +- .../DefaultSchemaRegistryRepository.java | 101 +- .../schema/export/DefaultSchemaWriter.java | 27 +- .../phoenix/schema/export/SchemaImporter.java | 20 +- .../export/SchemaRegistryRepository.java | 76 +- .../SchemaRegistryRepositoryFactory.java | 73 +- .../phoenix/schema/export/SchemaWriter.java | 43 +- .../schema/export/SchemaWriterFactory.java | 33 +- .../schema/metrics/MetricsMetadataSource.java | 438 +- .../metrics/MetricsMetadataSourceFactory.java | 39 +- .../metrics/MetricsMetadataSourceImpl.java | 465 +- .../phoenix/schema/stats/GuidePostsInfo.java | 225 +- .../schema/stats/GuidePostsInfoBuilder.java | 197 +- .../phoenix/schema/stats/GuidePostsKey.java | 101 +- .../stats/StatisticsCollectionRunTracker.java | 220 +- .../stats/StatisticsCollectionScope.java | 16 +- .../phoenix/schema/stats/StatisticsUtil.java | 384 +- ...tsCollectionDisabledOnServerException.java | 32 +- .../phoenix/schema/task/SystemTaskParams.java | 269 +- .../org/apache/phoenix/schema/task/Task.java | 680 +- .../tool/SchemaExtractionProcessor.java | 1366 +- .../phoenix/schema/tool/SchemaProcessor.java | 4 +- .../phoenix/schema/tool/SchemaSQLUtil.java | 264 +- .../schema/tool/SchemaSynthesisProcessor.java | 358 +- .../phoenix/schema/tool/SchemaTool.java | 264 +- .../transform/SystemTransformRecord.java | 444 +- .../schema/transform/TransformClient.java | 698 +- .../schema/transform/TransformMaintainer.java | 1004 +- .../phoenix/schema/tuple/BaseTuple.java | 87 +- .../phoenix/schema/tuple/DelegateTuple.java | 86 +- .../tuple/EncodedColumnQualiferCellsList.java | 1015 +- .../schema/tuple/MultiKeyValueTuple.java | 97 +- .../PositionBasedMultiKeyValueTuple.java | 96 +- .../tuple/PositionBasedResultTuple.java | 159 +- .../phoenix/schema/tuple/ResultTuple.java | 124 +- .../schema/tuple/SingleKeyValueTuple.java | 172 +- .../apache/phoenix/schema/tuple/Tuple.java | 136 +- .../schema/tuple/ValueGetterTuple.java | 132 +- .../phoenix/schema/types/PArrayDataType.java | 2221 +-- .../schema/types/PArrayDataTypeDecoder.java | 269 +- .../schema/types/PArrayDataTypeEncoder.java | 390 +- .../apache/phoenix/schema/types/PBinary.java | 338 +- .../phoenix/schema/types/PBinaryArray.java | 108 +- .../phoenix/schema/types/PBinaryBase.java | 199 +- .../apache/phoenix/schema/types/PBoolean.java | 296 +- .../phoenix/schema/types/PBooleanArray.java | 108 +- .../apache/phoenix/schema/types/PBson.java | 182 +- .../apache/phoenix/schema/types/PChar.java | 343 +- .../phoenix/schema/types/PCharArray.java | 114 +- .../phoenix/schema/types/PDataType.java | 2251 +-- .../schema/types/PDataTypeFactory.java | 36 +- .../apache/phoenix/schema/types/PDate.java | 420 +- .../phoenix/schema/types/PDateArray.java | 114 +- .../apache/phoenix/schema/types/PDecimal.java | 744 +- .../phoenix/schema/types/PDecimalArray.java | 108 +- .../apache/phoenix/schema/types/PDouble.java | 76 +- .../phoenix/schema/types/PDoubleArray.java | 115 +- .../apache/phoenix/schema/types/PFloat.java | 56 +- .../phoenix/schema/types/PFloatArray.java | 115 +- .../apache/phoenix/schema/types/PInteger.java | 34 +- .../phoenix/schema/types/PIntegerArray.java | 115 +- .../apache/phoenix/schema/types/PJson.java | 159 +- .../apache/phoenix/schema/types/PLong.java | 568 +- .../phoenix/schema/types/PLongArray.java | 115 +- .../phoenix/schema/types/PNumericType.java | 57 +- .../phoenix/schema/types/PRealNumber.java | 38 +- .../phoenix/schema/types/PSmallint.java | 363 +- .../phoenix/schema/types/PSmallintArray.java | 115 +- .../apache/phoenix/schema/types/PTime.java | 82 +- .../phoenix/schema/types/PTimeArray.java | 115 +- .../phoenix/schema/types/PTimestamp.java | 554 +- .../phoenix/schema/types/PTimestampArray.java | 115 +- .../apache/phoenix/schema/types/PTinyint.java | 32 +- .../phoenix/schema/types/PTinyintArray.java | 115 +- .../phoenix/schema/types/PUnsignedDate.java | 281 +- .../schema/types/PUnsignedDateArray.java | 110 +- .../phoenix/schema/types/PUnsignedDouble.java | 269 +- .../schema/types/PUnsignedDoubleArray.java | 125 +- .../phoenix/schema/types/PUnsignedFloat.java | 15 +- .../schema/types/PUnsignedFloatArray.java | 115 +- .../phoenix/schema/types/PUnsignedInt.java | 29 +- .../schema/types/PUnsignedIntArray.java | 115 +- .../phoenix/schema/types/PUnsignedLong.java | 365 +- .../schema/types/PUnsignedLongArray.java | 115 +- .../schema/types/PUnsignedSmallint.java | 13 +- .../schema/types/PUnsignedSmallintArray.java | 117 +- .../phoenix/schema/types/PUnsignedTime.java | 22 +- .../schema/types/PUnsignedTimeArray.java | 115 +- .../schema/types/PUnsignedTimestamp.java | 87 +- .../schema/types/PUnsignedTimestampArray.java | 117 +- .../schema/types/PUnsignedTinyint.java | 11 +- .../schema/types/PUnsignedTinyintArray.java | 117 +- .../phoenix/schema/types/PVarbinary.java | 277 +- .../phoenix/schema/types/PVarbinaryArray.java | 115 +- .../schema/types/PVarbinaryEncoded.java | 290 +- .../apache/phoenix/schema/types/PVarchar.java | 334 +- .../phoenix/schema/types/PVarcharArray.java | 115 +- .../phoenix/schema/types/PWholeNumber.java | 34 +- .../phoenix/schema/types/PhoenixArray.java | 935 +- .../apache/phoenix/trace/MetricsInfoImpl.java | 32 +- .../phoenix/trace/PhoenixMetricsSink.java | 455 +- .../org/apache/phoenix/trace/TraceReader.java | 615 +- .../phoenix/trace/TraceSpanReceiver.java | 65 +- .../org/apache/phoenix/trace/TraceWriter.java | 468 +- .../apache/phoenix/trace/TracingIterator.java | 66 +- .../apache/phoenix/trace/TracingUtils.java | 65 +- .../trace/util/ConfigurationAdapter.java | 4 +- .../apache/phoenix/trace/util/NullSpan.java | 2 +- .../apache/phoenix/trace/util/Tracing.java | 474 +- .../NotAvailableTransactionProvider.java | 98 +- .../transaction/OmidTransactionContext.java | 488 +- .../transaction/OmidTransactionProvider.java | 177 +- .../transaction/OmidTransactionTable.java | 578 +- .../transaction/PhoenixTransactionClient.java | 2 +- .../PhoenixTransactionContext.java | 406 +- .../PhoenixTransactionProvider.java | 72 +- .../transaction/TransactionFactory.java | 101 +- .../apache/phoenix/util/Base62Encoder.java | 202 +- .../apache/phoenix/util/BigDecimalUtil.java | 89 +- .../java/org/apache/phoenix/util/BitSet.java | 167 +- .../org/apache/phoenix/util/ByteUtil.java | 1482 +- .../apache/phoenix/util/CDCChangeBuilder.java | 215 +- .../java/org/apache/phoenix/util/CDCUtil.java | 186 +- .../apache/phoenix/util/CSVCommonsLoader.java | 451 +- .../org/apache/phoenix/util/ClientUtil.java | 324 +- .../org/apache/phoenix/util/Closeables.java | 212 +- .../org/apache/phoenix/util/ColumnInfo.java | 334 +- .../org/apache/phoenix/util/ConfigUtil.java | 3 +- .../org/apache/phoenix/util/CostUtil.java | 117 +- .../org/apache/phoenix/util/CursorUtil.java | 270 +- .../org/apache/phoenix/util/DateUtil.java | 824 +- .../phoenix/util/DefaultEnvironmentEdge.java | 4 +- .../phoenix/util/DeferredStringBuilder.java | 203 +- .../phoenix/util/EncodedColumnsUtil.java | 309 +- .../apache/phoenix/util/EnvironmentEdge.java | 7 +- .../phoenix/util/EnvironmentEdgeManager.java | 27 +- .../util/EquiDepthStreamHistogram.java | 803 +- .../apache/phoenix/util/ExpressionUtil.java | 877 +- .../util/FirstLastNthValueDataContainer.java | 253 +- .../org/apache/phoenix/util/IndexUtil.java | 1633 +- .../apache/phoenix/util/InstanceResolver.java | 116 +- .../org/apache/phoenix/util/JDBCUtil.java | 371 +- .../org/apache/phoenix/util/JacksonUtil.java | 38 +- .../java/org/apache/phoenix/util/LogUtil.java | 70 +- .../phoenix/util/MajorMinorVersion.java | 65 +- .../phoenix/util/ManualEnvironmentEdge.java | 28 +- .../org/apache/phoenix/util/MetaDataUtil.java | 2303 +-- .../org/apache/phoenix/util/NumberUtil.java | 105 +- .../apache/phoenix/util/ParseNodeUtil.java | 290 +- .../phoenix/util/PhoenixContextExecutor.java | 115 +- .../phoenix/util/PhoenixKeyValueUtil.java | 431 +- .../apache/phoenix/util/PhoenixRuntime.java | 2934 ++- .../apache/phoenix/util/PhoenixStopWatch.java | 104 +- .../apache/phoenix/util/PrefixByteCodec.java | 98 +- .../phoenix/util/PrefixByteDecoder.java | 113 +- .../phoenix/util/PrefixByteEncoder.java | 128 +- .../apache/phoenix/util/PropertiesUtil.java | 117 +- .../org/apache/phoenix/util/QueryBuilder.java | 351 +- .../org/apache/phoenix/util/QueryUtil.java | 1455 +- .../apache/phoenix/util/ReadOnlyProps.java | 529 +- .../org/apache/phoenix/util/ResultUtil.java | 180 +- .../org/apache/phoenix/util/SQLCloseable.java | 10 +- .../apache/phoenix/util/SQLCloseables.java | 182 +- .../org/apache/phoenix/util/ScanUtil.java | 3592 ++-- .../org/apache/phoenix/util/SchemaUtil.java | 2697 +-- .../org/apache/phoenix/util/SequenceUtil.java | 160 +- .../org/apache/phoenix/util/SizedUtil.java | 103 +- .../org/apache/phoenix/util/StringUtil.java | 651 +- .../phoenix/util/TableViewFinderResult.java | 42 +- .../util/TaskMetaDataServiceCallBack.java | 60 +- .../org/apache/phoenix/util/TimeKeeper.java | 20 +- .../apache/phoenix/util/TransactionUtil.java | 250 +- .../util/TrustedByteArrayOutputStream.java | 51 +- .../org/apache/phoenix/util/TupleUtil.java | 301 +- .../org/apache/phoenix/util/UpgradeUtil.java | 5809 +++--- .../apache/phoenix/util/UpsertExecutor.java | 213 +- .../util/ValidateLastDDLTimestampUtil.java | 378 +- .../phoenix/util/VarBinaryFormatter.java | 37 +- .../phoenix/util/ViewIndexIdRetrieveUtil.java | 61 +- .../org/apache/phoenix/util/ViewUtil.java | 1766 +- .../phoenix/util/WALAnnotationUtil.java | 60 +- .../phoenix/util/csv/CsvUpsertExecutor.java | 333 +- .../util/csv/StringToArrayConverter.java | 71 +- .../phoenix/util/i18n/LinguisticSort.java | 2145 ++- .../apache/phoenix/util/i18n/LocaleUtils.java | 111 +- .../apache/phoenix/util/i18n/OracleUpper.java | 113 +- .../phoenix/util/i18n/OracleUpperTable.java | 616 +- .../phoenix/util/json/BsonDataFormat.java | 317 +- .../phoenix/util/json/BsonJsonProvider.java | 411 +- .../phoenix/util/json/JsonDataFormat.java | 107 +- .../util/json/JsonDataFormatFactory.java | 20 +- .../phoenix/util/json/JsonUpsertExecutor.java | 369 +- .../util/json/ObjectToArrayConverter.java | 63 +- .../util/regex/RegexUpsertExecutor.java | 94 +- phoenix-core-server/pom.xml | 333 +- .../hadoop/hbase/ipc/PhoenixRpcScheduler.java | 456 +- .../hbase/ipc/PhoenixRpcSchedulerFactory.java | 101 +- .../org/apache/hadoop/hbase/ipc/RpcUtil.java | 17 +- ...ionServerMetadataRpcControllerFactory.java | 58 +- .../ServerRpcControllerFactory.java | 10 +- .../DataTableLocalIndexRegionScanner.java | 158 +- .../IndexHalfStoreFileReader.java | 207 +- .../IndexHalfStoreFileReaderGenerator.java | 429 +- .../IndexKeyValueSkipListSet.java | 24 +- .../regionserver/KeyValueSkipListSet.java | 41 +- .../regionserver/LocalIndexSplitter.java | 4 +- .../LocalIndexStoreFileScanner.java | 415 +- .../regionserver/ScannerContextUtil.java | 61 +- .../wal/BinaryCompatibleBaseDecoder.java | 20 +- .../regionserver/wal/IndexedHLogReader.java | 15 +- .../regionserver/wal/IndexedWALEditCodec.java | 197 +- .../org/apache/phoenix/cache/GlobalCache.java | 331 +- .../phoenix/cache/aggcache/SpillFile.java | 218 +- .../phoenix/cache/aggcache/SpillManager.java | 475 +- .../phoenix/cache/aggcache/SpillMap.java | 876 +- .../cache/aggcache/SpillableGroupByCache.java | 671 +- .../phoenix/coprocessor/AddColumnMutator.java | 726 +- .../BaseMetaDataEndpointObserver.java | 132 +- .../coprocessor/BaseRegionScanner.java | 63 +- .../BaseScannerRegionObserver.java | 905 +- .../CDCGlobalIndexRegionScanner.java | 388 +- .../ChildLinkMetaDataEndpoint.java | 126 +- .../phoenix/coprocessor/ColumnMutator.java | 63 +- .../coprocessor/CompactionScanner.java | 4592 +++-- .../DelegateRegionCoprocessorEnvironment.java | 185 +- .../coprocessor/DelegateRegionObserver.java | 546 +- .../coprocessor/DelegateRegionScanner.java | 177 +- .../coprocessor/DropColumnMutator.java | 442 +- .../coprocessor/GlobalIndexRegionScanner.java | 2797 +-- .../phoenix/coprocessor/GroupByCache.java | 19 +- .../GroupedAggregateRegionObserver.java | 1671 +- .../coprocessor/HashJoinRegionScanner.java | 657 +- .../IndexRebuildRegionScanner.java | 709 +- .../coprocessor/IndexRepairRegionScanner.java | 776 +- .../IndexToolVerificationResult.java | 1143 +- .../coprocessor/IndexerRegionScanner.java | 775 +- .../coprocessor/MetaDataEndpointImpl.java | 9635 +++++----- .../coprocessor/MetaDataEndpointObserver.java | 62 +- .../coprocessor/MetaDataRegionObserver.java | 1183 +- .../phoenix/coprocessor/OmidGCProcessor.java | 43 +- .../OmidTransactionalProcessor.java | 46 +- .../coprocessor/PagingRegionScanner.java | 479 +- .../coprocessor/PhoenixAccessController.java | 1236 +- .../coprocessor/PhoenixCoprocessor.java | 8 +- .../PhoenixMetaDataCoprocessorHost.java | 394 +- .../PhoenixRegionServerEndpoint.java | 143 +- .../coprocessor/PhoenixTTLRegionObserver.java | 533 +- .../coprocessor/ReplicationSinkEndpoint.java | 3 +- .../coprocessor/ScanRegionObserver.java | 241 +- .../coprocessor/SequenceRegionObserver.java | 781 +- .../ServerCachingEndpointImpl.java | 68 +- .../phoenix/coprocessor/SuffixFilter.java | 70 +- .../SystemCatalogRegionObserver.java | 46 +- .../phoenix/coprocessor/TTLRegionScanner.java | 406 +- .../coprocessor/TaskMetaDataEndpoint.java | 127 +- .../coprocessor/TaskRegionObserver.java | 420 +- .../TephraTransactionalProcessor.java | 15 +- .../UncoveredGlobalIndexRegionScanner.java | 311 +- .../UncoveredIndexRegionScanner.java | 779 +- .../UncoveredLocalIndexRegionScanner.java | 164 +- .../UngroupedAggregateRegionObserver.java | 1645 +- .../UngroupedAggregateRegionScanner.java | 1195 +- .../coprocessor/VerifyLastDDLTimestamp.java | 68 +- .../phoenix/coprocessor/tasks/BaseTask.java | 21 +- .../coprocessor/tasks/DropChildViewsTask.java | 135 +- .../coprocessor/tasks/IndexRebuildTask.java | 280 +- .../tasks/TransformMonitorTask.java | 305 +- .../hbase/index/CapturingAbortable.java | 4 +- .../hbase/index/IndexRegionObserver.java | 3268 ++-- .../hbase/index/IndexRegionSplitPolicy.java | 72 +- .../apache/phoenix/hbase/index/Indexer.java | 914 +- .../phoenix/hbase/index/LockManager.java | 296 +- .../index/balancer/IndexLoadBalancer.java | 2 +- .../hbase/index/builder/BaseIndexBuilder.java | 236 +- .../index/builder/IndexBuildManager.java | 76 +- .../hbase/index/builder/IndexBuilder.java | 72 +- .../hbase/index/covered/CoveredColumns.java | 6 +- .../hbase/index/covered/LocalTableState.java | 411 +- .../index/covered/NonTxIndexBuilder.java | 431 +- .../index/covered/data/CachedLocalTable.java | 277 +- .../index/covered/data/IndexMemStore.java | 55 +- .../index/parallel/ThreadPoolManager.java | 33 +- .../hbase/index/wal/IndexedKeyValue.java | 259 +- .../hbase/index/wal/KeyValueCodec.java | 14 +- .../AbstractParallelWriterIndexCommitter.java | 363 +- .../write/DelegateIndexFailurePolicy.java | 63 +- .../hbase/index/write/IndexCommitter.java | 18 +- .../hbase/index/write/IndexFailurePolicy.java | 12 +- .../hbase/index/write/IndexWriter.java | 103 +- .../hbase/index/write/IndexWriterUtils.java | 156 +- .../write/KillServerOnFailurePolicy.java | 13 +- .../LazyParallelWriterIndexCommitter.java | 18 +- .../write/LeaveIndexActiveFailurePolicy.java | 57 +- .../write/ParallelWriterIndexCommitter.java | 79 +- .../index/write/RecoveryIndexWriter.java | 181 +- .../TrackingParallelWriterIndexCommitter.java | 482 +- .../recovery/PerRegionIndexWriteCache.java | 18 +- .../recovery/StoreFailuresInCachePolicy.java | 13 +- .../phoenix/index/GlobalIndexChecker.java | 1153 +- .../phoenix/index/PhoenixIndexBuilder.java | 403 +- .../index/PhoenixIndexFailurePolicy.java | 597 +- .../index/PhoenixIndexMetaDataBuilder.java | 122 +- .../index/PhoenixTransactionalIndexer.java | 350 +- .../iterate/MapReduceParallelScanGrouper.java | 209 +- .../NonAggregateRegionScannerFactory.java | 1051 +- .../phoenix/iterate/RegionScannerFactory.java | 368 +- .../iterate/RegionScannerResultIterator.java | 123 +- .../phoenix/iterate/SnapshotScanner.java | 103 +- .../iterate/TableSnapshotResultIterator.java | 74 +- .../mapreduce/AbstractBulkLoadTool.java | 756 +- .../phoenix/mapreduce/CsvBulkImportUtil.java | 100 +- .../phoenix/mapreduce/CsvBulkLoadTool.java | 158 +- .../mapreduce/CsvToKeyValueMapper.java | 135 +- .../FormatToBytesWritableMapper.java | 633 +- .../mapreduce/FormatToKeyValueReducer.java | 218 +- .../ImportPreUpsertKeyValueProcessor.java | 33 +- .../phoenix/mapreduce/JsonBulkLoadTool.java | 34 +- .../mapreduce/JsonToKeyValueMapper.java | 61 +- .../mapreduce/MultiHfileOutputFormat.java | 1200 +- .../phoenix/mapreduce/OrphanViewTool.java | 1633 +- .../phoenix/mapreduce/PhoenixInputFormat.java | 446 +- .../phoenix/mapreduce/PhoenixInputSplit.java | 209 +- .../phoenix/mapreduce/PhoenixJobCounters.java | 11 +- .../PhoenixMultiViewInputFormat.java | 136 +- .../mapreduce/PhoenixMultiViewInputSplit.java | 80 +- .../mapreduce/PhoenixMultiViewReader.java | 94 +- .../mapreduce/PhoenixOutputCommitter.java | 39 +- .../mapreduce/PhoenixOutputFormat.java | 65 +- .../mapreduce/PhoenixRecordReader.java | 269 +- .../mapreduce/PhoenixRecordWritable.java | 274 +- .../mapreduce/PhoenixRecordWriter.java | 119 +- .../PhoenixServerBuildIndexInputFormat.java | 304 +- .../mapreduce/PhoenixTTLDeleteJobMapper.java | 375 +- .../phoenix/mapreduce/PhoenixTTLTool.java | 517 +- .../mapreduce/PhoenixTextInputFormat.java | 19 +- .../phoenix/mapreduce/RegexBulkLoadTool.java | 74 +- .../mapreduce/RegexToKeyValueMapper.java | 173 +- .../mapreduce/bulkload/TableRowkeyPair.java | 185 +- .../mapreduce/bulkload/TargetTableRef.java | 67 +- .../bulkload/TargetTableRefFunctions.java | 146 +- .../mapreduce/index/DirectHTableWriter.java | 124 +- .../mapreduce/index/IndexScrutinyMapper.java | 890 +- .../index/IndexScrutinyMapperForTest.java | 35 +- .../index/IndexScrutinyTableOutput.java | 603 +- .../mapreduce/index/IndexScrutinyTool.java | 907 +- .../phoenix/mapreduce/index/IndexTool.java | 2128 +-- .../mapreduce/index/IndexToolUtil.java | 83 +- .../mapreduce/index/IndexUpgradeTool.java | 1739 +- .../IndexVerificationOutputRepository.java | 680 +- .../index/IndexVerificationOutputRow.java | 368 +- .../IndexVerificationResultRepository.java | 694 +- .../index/PhoenixIndexDBWritable.java | 118 +- .../index/PhoenixIndexImportDirectMapper.java | 272 +- .../PhoenixIndexImportDirectReducer.java | 334 +- .../index/PhoenixIndexPartialBuildMapper.java | 261 +- .../index/PhoenixIndexToolJobCounters.java | 55 +- .../index/PhoenixScrutinyJobCounters.java | 55 +- .../PhoenixServerBuildIndexDBWritable.java | 39 +- .../index/PhoenixServerBuildIndexMapper.java | 67 +- .../index/SourceTargetColumnNames.java | 293 +- .../index/automation/PhoenixAsyncIndex.java | 81 +- .../automation/PhoenixMRJobCallable.java | 74 +- .../automation/PhoenixMRJobSubmitter.java | 494 +- .../index/automation/YarnApplication.java | 353 +- .../transform/PhoenixTransformReducer.java | 69 +- .../PhoenixTransformRepairMapper.java | 278 +- .../PhoenixTransformWithViewsInputFormat.java | 156 +- .../mapreduce/transform/TransformTool.java | 1730 +- .../ColumnInfoToStringEncoderDecoder.java | 62 +- .../DefaultMultiViewJobStatusTracker.java | 48 +- .../util/DefaultMultiViewSplitStrategy.java | 90 +- .../DefaultPhoenixMultiViewListProvider.java | 305 +- .../mapreduce/util/IndexColumnNames.java | 424 +- .../util/MultiViewJobStatusTracker.java | 8 +- .../util/MultiViewSplitStrategy.java | 10 +- .../util/PhoenixConfigurationUtil.java | 1650 +- .../mapreduce/util/PhoenixMapReduceUtil.java | 426 +- .../mapreduce/util/PhoenixMultiInputUtil.java | 154 +- .../util/PhoenixMultiViewListProvider.java | 9 +- .../mapreduce/util/ViewInfoTracker.java | 154 +- .../mapreduce/util/ViewInfoWritable.java | 59 +- .../SystemCatalogWALEntryFilter.java | 72 +- .../phoenix/schema/MetaDataSplitPolicy.java | 69 +- .../SplitOnLeadingVarCharColumnsPolicy.java | 43 +- .../schema/SystemFunctionSplitPolicy.java | 10 +- .../schema/SystemStatsSplitPolicy.java | 10 +- .../phoenix/schema/SystemTaskSplitPolicy.java | 5 +- .../stats/DefaultStatisticsCollector.java | 573 +- .../schema/stats/NoOpStatisticsCollector.java | 79 +- .../schema/stats/StatisticsCollector.java | 75 +- .../stats/StatisticsCollectorFactory.java | 75 +- .../schema/stats/StatisticsScanner.java | 287 +- .../schema/stats/StatisticsWriter.java | 434 +- .../schema/stats/UpdateStatisticsTool.java | 503 +- .../phoenix/schema/task/ServerTask.java | 144 +- .../phoenix/schema/transform/Transform.java | 851 +- .../util/MergeViewIndexIdSequencesTool.java | 133 +- .../apache/phoenix/util/PhoenixMRJobUtil.java | 325 +- .../org/apache/phoenix/util/RepairUtil.java | 30 +- .../apache/phoenix/util/ServerIndexUtil.java | 118 +- .../org/apache/phoenix/util/ServerUtil.java | 433 +- .../apache/phoenix/util/ServerViewUtil.java | 178 +- .../util/ZKBasedMasterElectionUtil.java | 60 +- phoenix-core/pom.xml | 888 +- .../gold_files/gold_query_add_data.txt | 2 +- .../gold_files/gold_query_add_delete.txt | 2 +- .../gold_files/gold_query_create_add.txt | 2 +- .../gold_files/gold_query_delete.txt | 2 +- ...gold_query_delete_for_splitable_syscat.txt | 2 +- .../gold_files/gold_query_orderby_nonpk.txt | 2 +- .../gold_files/gold_query_ordered_groupby.txt | 2 +- .../gold_files/gold_query_view_index.txt | 2 +- .../src/it/resources/scripts/execute_query.sh | 2 +- .../hbase/ipc/PhoenixRpcSchedulerTest.java | 306 +- .../PhoenixRpcSchedulerFactoryTest.java | 174 +- .../regionserver/wal/IndexedKeyValueTest.java | 121 +- .../wal/IndexedWALEditCodecTest.java | 30 +- .../wal/ReadWriteKeyValuesWithCodecTest.java | 34 +- .../impl/ExposedMetricCounterLong.java | 8 +- .../impl/ExposedMetricsRecordImpl.java | 10 +- .../metrics2/lib/ExposedMetricsInfoImpl.java | 18 +- .../org/apache/phoenix/SystemExitRule.java | 77 +- .../java/org/apache/phoenix/TestJVMExit.java | 35 +- .../apache/phoenix/TestSecurityManager.java | 234 +- .../phoenix/cache/JodaTimezoneCacheTest.java | 50 +- .../phoenix/cache/ServerCacheClientTest.java | 40 +- .../apache/phoenix/cache/TenantCacheTest.java | 339 +- .../compile/CreateTableCompilerTest.java | 52 +- .../phoenix/compile/CursorCompilerTest.java | 95 +- .../phoenix/compile/HavingCompilerTest.java | 307 +- .../compile/JoinQueryCompilerTest.java | 221 +- .../phoenix/compile/LimitCompilerTest.java | 155 +- .../compile/PostIndexDDLCompilerTest.java | 80 +- .../phoenix/compile/QueryCompilerTest.java | 15386 ++++++++-------- .../phoenix/compile/QueryMetaDataTest.java | 811 +- .../phoenix/compile/QueryOptimizerTest.java | 1631 +- .../compile/RVCOffsetCompilerTest.java | 261 +- .../phoenix/compile/SaltedScanRangesTest.java | 355 +- .../compile/ScanRangesIntersectTest.java | 70 +- .../phoenix/compile/ScanRangesTest.java | 977 +- .../compile/SelectStatementRewriterTest.java | 121 +- .../StatementHintsCompilationTest.java | 130 +- .../TenantSpecificViewIndexCompileTest.java | 494 +- .../phoenix/compile/ViewCompilerTest.java | 157 +- .../phoenix/compile/WhereCompilerTest.java | 2155 +-- .../phoenix/compile/WhereOptimizerTest.java | 7265 ++++---- .../coprocessor/TaskMetaDataEndpointTest.java | 316 +- .../DescVarLengthFastByteComparisonsTest.java | 38 +- .../LiteralResultIteratorPlanTest.java | 256 +- .../phoenix/execute/MutationStateTest.java | 452 +- .../phoenix/execute/SortMergeJoinTest.java | 753 +- .../phoenix/execute/UnnestArrayPlanTest.java | 215 +- .../phoenix/expression/AbsFunctionTest.java | 248 +- .../phoenix/expression/AndExpressionTest.java | 560 +- .../expression/ArithmeticOperationTest.java | 516 +- .../expression/ArrayAppendFunctionTest.java | 773 +- .../expression/ArrayConcatFunctionTest.java | 1378 +- .../ArrayConstructorExpressionTest.java | 81 +- .../expression/ArrayFillFunctionTest.java | 438 +- .../expression/ArrayPrependFunctionTest.java | 1213 +- .../expression/ArrayRemoveFunctionTest.java | 471 +- .../expression/ArrayToStringFunctionTest.java | 682 +- .../phoenix/expression/CbrtFunctionTest.java | 119 +- .../expression/CoerceExpressionTest.java | 154 +- .../expression/ColumnExpressionTest.java | 156 +- .../phoenix/expression/DeterminismTest.java | 60 +- .../phoenix/expression/ExpFunctionTest.java | 130 +- .../expression/GetSetByteBitFunctionTest.java | 268 +- .../expression/ILikeExpressionTest.java | 82 +- .../expression/InListExpressionTest.java | 463 +- .../expression/LikeExpressionTest.java | 118 +- .../phoenix/expression/LnLogFunctionTest.java | 192 +- .../expression/MathPIFunctionTest.java | 23 +- .../expression/MathTrigFunctionTest.java | 190 +- .../phoenix/expression/NullValueTest.java | 130 +- .../expression/OctetLengthFunctionTest.java | 60 +- .../phoenix/expression/OrExpressionTest.java | 551 +- .../phoenix/expression/PowerFunctionTest.java | 194 +- .../expression/RegexpReplaceFunctionTest.java | 85 +- .../expression/RegexpSplitFunctionTest.java | 107 +- .../expression/RegexpSubstrFunctionTest.java | 88 +- .../RoundFloorCeilExpressionsTest.java | 3472 ++-- .../phoenix/expression/SignFunctionTest.java | 125 +- .../expression/SortOrderExpressionTest.java | 596 +- .../phoenix/expression/SqrtFunctionTest.java | 130 +- .../expression/StringToArrayFunctionTest.java | 489 +- .../BuiltinFunctionConstructorTest.java | 73 +- .../function/CollationKeyFunctionTest.java | 431 +- .../ExternalSqlTypeIdFunctionTest.java | 68 +- .../function/InstrFunctionTest.java | 169 +- .../function/LowerFunctionTest.java | 107 +- .../function/UpperFunctionTest.java | 104 +- ...alueConstructorExpressionRewriterTest.java | 73 +- .../util/regex/PatternPerformanceTest.java | 219 +- .../filter/DistinctPrefixFilterTest.java | 468 +- ...dQualifiersColumnProjectionFilterTest.java | 70 +- .../phoenix/filter/SkipScanBigFilterTest.java | 1803 +- .../filter/SkipScanFilterIntersectTest.java | 1111 +- .../phoenix/filter/SkipScanFilterTest.java | 1090 +- .../phoenix/hbase/index/IndexTableName.java | 4 +- .../hbase/index/IndexTestingUtils.java | 29 +- .../phoenix/hbase/index/StubAbortable.java | 2 +- .../hbase/index/covered/ColumnGroup.java | 18 +- .../hbase/index/covered/CoveredColumn.java | 7 +- .../covered/CoveredColumnIndexCodec.java | 616 +- .../CoveredColumnIndexSpecifierBuilder.java | 46 +- .../index/covered/CoveredColumnsTest.java | 12 +- .../covered/CoveredIndexCodecForTesting.java | 66 +- .../index/covered/LocalTableStateTest.java | 93 +- .../index/covered/NonTxIndexBuilderTest.java | 605 +- .../index/covered/TestColumnTracker.java | 7 +- .../covered/TestCoveredColumnIndexCodec.java | 75 +- .../TestCoveredIndexSpecifierBuilder.java | 22 +- .../index/covered/data/TestIndexMemStore.java | 15 +- .../TestApplyAndFilterDeletesFilter.java | 14 +- .../filter/TestNewerTimestampFilter.java | 5 +- .../update/TestIndexUpdateManager.java | 4 +- .../hbase/index/parallel/TestTaskRunner.java | 75 +- .../index/parallel/TestThreadPoolBuilder.java | 10 +- .../index/parallel/TestThreadPoolManager.java | 23 +- .../index/util/TestIndexManagementUtil.java | 18 +- .../hbase/index/write/FakeTableFactory.java | 21 +- .../hbase/index/write/TestIndexWriter.java | 79 +- .../index/write/TestParalleIndexWriter.java | 44 +- .../TestParalleWriterIndexCommitter.java | 62 +- .../index/write/TestWALRecoveryCaching.java | 90 +- .../TestPerRegionIndexWriteCache.java | 127 +- .../phoenix/index/IndexMaintainerTest.java | 554 +- .../index/IndexRebuildRegionScannerTest.java | 106 +- .../index/IndexScrutinyMapperTest.java | 171 +- .../apache/phoenix/index/IndexToolTest.java | 633 +- .../phoenix/index/IndexUpgradeToolTest.java | 390 +- .../PrepareIndexMutationsForRebuildTest.java | 1448 +- .../phoenix/index/ShouldVerifyTest.java | 132 +- .../index/VerifySingleIndexRowTest.java | 1486 +- .../index/automated/MRJobSubmitterTest.java | 225 +- .../iterate/AggregateResultScannerTest.java | 69 +- .../iterate/ConcatResultIteratorTest.java | 199 +- ...DistinctAggregatingResultIteratorTest.java | 424 +- .../iterate/MaterializedResultIterators.java | 69 +- .../iterate/MergeSortResultIteratorTest.java | 347 +- .../iterate/OrderedResultIteratorTest.java | 103 +- ...KeyOrderedAggregateResultIteratorTest.java | 243 +- .../iterate/SpoolingResultIteratorTest.java | 68 +- .../TestingMapReduceParallelScanGrouper.java | 62 +- .../ClusterRoleRecordGeneratorToolTest.java | 95 +- .../phoenix/jdbc/ClusterRoleRecordTest.java | 358 +- .../jdbc/FailoverPhoenixConnectionTest.java | 333 +- .../ParallelPhoenixConnectionFailureTest.java | 100 +- .../jdbc/ParallelPhoenixConnectionTest.java | 756 +- .../jdbc/ParallelPhoenixContextTest.java | 175 +- ...llelPhoenixNullComparingResultSetTest.java | 310 +- .../ParallelPhoenixPreparedStatementTest.java | 176 +- .../jdbc/ParallelPhoenixResultSetTest.java | 466 +- .../phoenix/jdbc/ParallelPhoenixUtilTest.java | 282 +- .../phoenix/jdbc/PhoenixDriverTest.java | 177 +- .../jdbc/PhoenixEmbeddedDriverTest.java | 908 +- .../phoenix/jdbc/PhoenixHAAdminToolTest.java | 452 +- .../PhoenixHAExecutorServiceProviderTest.java | 169 +- .../jdbc/PhoenixPreparedStatementTest.java | 95 +- .../jdbc/PhoenixResultSetMetadataTest.java | 255 +- .../phoenix/jdbc/PhoenixStatementTest.java | 388 +- .../phoenix/jdbc/PhoenixTestDriver.java | 164 +- .../phoenix/jdbc/ReadOnlyPropertiesTest.java | 113 +- .../phoenix/mapreduce/BulkLoadToolTest.java | 77 +- .../mapreduce/CsvBulkImportUtilTest.java | 131 +- .../mapreduce/CsvToKeyValueMapperTest.java | 54 +- .../DefaultMultiViewSplitStrategyTest.java | 157 +- .../FormatToBytesWritableMapperTest.java | 139 +- .../PhoenixMultiViewInputFormatTest.java | 103 +- .../mapreduce/PhoenixMultiViewReaderTest.java | 98 +- .../phoenix/mapreduce/PhoenixTTLToolTest.java | 102 +- .../mapreduce/PhoenixTestingInputFormat.java | 24 +- .../bulkload/TestTableRowkeyPair.java | 67 +- .../mapreduce/index/BaseIndexTest.java | 93 +- .../index/IndexScrutinyTableOutputTest.java | 154 +- .../ColumnInfoToStringEncoderDecoderTest.java | 74 +- .../mapreduce/util/IndexColumnNamesTest.java | 88 +- .../util/PhoenixConfigurationUtilTest.java | 586 +- .../phoenix/memory/MemoryManagerTest.java | 302 +- .../apache/phoenix/metrics/LoggingSink.java | 48 +- .../phoenix/metrics/MetricTypeTest.java | 41 +- .../monitoring/LatencyHistogramTest.java | 126 +- .../phoenix/monitoring/MetricUtilTest.java | 71 +- .../monitoring/OverAllQueryMetricsTest.java | 279 +- .../PhoenixTableMetricImplTest.java | 124 +- .../phoenix/monitoring/SizeHistogramTest.java | 99 +- .../monitoring/TableClientMetricsTest.java | 280 +- .../monitoring/TableHistogramsTest.java | 46 +- .../monitoring/TableLevelMetricsTestData.java | 245 +- .../monitoring/TableMetricsManagerTest.java | 776 +- .../ConnectionQueryServicesHistogramTest.java | 91 +- ...ionQueryServicesMetricsHistogramsTest.java | 30 +- ...ectionQueryServicesMetricsManagerTest.java | 145 +- .../ConnectionQueryServicesMetricsTest.java | 136 +- ...onnectionQueryServicesNameMetricsTest.java | 97 +- .../parse/BuiltInFunctionInfoTest.java | 139 +- .../phoenix/parse/CastParseNodeTest.java | 65 +- .../phoenix/parse/CursorParserTest.java | 621 +- .../PhoenixRowTimestampFunctionTest.java | 69 +- .../apache/phoenix/parse/QueryParserTest.java | 1943 +- .../query/BaseConnectionlessQueryTest.java | 215 +- .../org/apache/phoenix/query/BaseTest.java | 3953 ++-- .../ConnectionQueryServicesImplTest.java | 608 +- .../phoenix/query/ConnectionlessTest.java | 311 +- .../apache/phoenix/query/DelegateCell.java | 235 +- .../EncodedColumnQualifierCellsListTest.java | 846 +- .../phoenix/query/ExplainPlanTextTest.java | 67 +- .../query/GuidePostsCacheProviderTest.java | 157 +- .../query/GuidePostsCacheWrapperTest.java | 145 +- .../query/HBaseFactoryProviderTest.java | 36 +- .../phoenix/query/KeyRangeClipTest.java | 203 +- .../phoenix/query/KeyRangeCoalesceTest.java | 197 +- .../phoenix/query/KeyRangeIntersectTest.java | 110 +- .../phoenix/query/KeyRangeMoreTest.java | 436 +- .../phoenix/query/KeyRangeUnionTest.java | 101 +- .../org/apache/phoenix/query/OrderByTest.java | 123 +- .../query/ParallelIteratorsSplitTest.java | 817 +- .../query/PhoenixStatsCacheLoaderTest.java | 244 +- .../PhoenixStatsCacheRemovalListenerTest.java | 46 +- .../phoenix/query/PhoenixTestBuilder.java | 3843 ++-- .../query/PropertyPolicyProviderTest.java | 58 +- .../apache/phoenix/query/QueryPlanTest.java | 534 +- .../phoenix/query/QueryServicesTestImpl.java | 200 +- .../query/ScannerLeaseRenewalTest.java | 199 +- .../phoenix/query/TestPropertyPolicy.java | 9 +- .../schema/ImmutableStorageSchemeTest.java | 605 +- .../phoenix/schema/MetaDataClientTest.java | 97 +- .../apache/phoenix/schema/MutationTest.java | 203 +- .../apache/phoenix/schema/PBaseColumn.java | 39 +- .../apache/phoenix/schema/PCharPadTest.java | 215 +- .../apache/phoenix/schema/PLongColumn.java | 10 +- .../phoenix/schema/PMetaDataImplTest.java | 346 +- .../phoenix/schema/RowKeySchemaTest.java | 307 +- .../schema/RowKeyValueAccessorTest.java | 103 +- .../phoenix/schema/SaltingUtilTest.java | 59 +- .../apache/phoenix/schema/SchemaUtilTest.java | 95 +- .../schema/SequenceAllocationTest.java | 101 +- .../apache/phoenix/schema/SortOrderTest.java | 191 +- .../phoenix/schema/SystemSplitPolicyTest.java | 118 +- .../phoenix/schema/ValueBitSetTest.java | 337 +- .../schema/stats/StatisticsScannerTest.java | 224 +- .../stats/UpdateStatisticsToolTest.java | 141 +- .../schema/tuple/SingleKeyValueTupleTest.java | 33 +- .../types/BasePhoenixArrayToStringTest.java | 122 +- ...imitiveDoublePhoenixArrayToStringTest.java | 67 +- ...ePrimitiveIntPhoenixArrayToStringTest.java | 100 +- .../schema/types/PDataTypeForArraysTest.java | 2338 ++- .../phoenix/schema/types/PDataTypeTest.java | 3791 ++-- .../schema/types/PDateArrayToStringTest.java | 118 +- .../types/PVarcharArrayToStringTest.java | 104 +- ...mitiveBooleanPhoenixArrayToStringTest.java | 103 +- ...PrimitiveBytePhoenixArrayToStringTest.java | 58 +- ...imitiveDoublePhoenixArrayToStringTest.java | 59 +- ...rimitiveFloatPhoenixArrayToStringTest.java | 60 +- .../PrimitiveIntPhoenixArrayToStringTest.java | 10 +- ...PrimitiveLongPhoenixArrayToStringTest.java | 58 +- ...rimitiveShortPhoenixArrayToStringTest.java | 59 +- .../phoenix/trace/TraceSpanReceiverTest.java | 18 +- .../NotAvailableTransactionService.java | 7 +- .../transaction/OmidTransactionService.java | 149 +- .../PhoenixTransactionService.java | 2 +- .../TransactionServiceManager.java | 23 +- .../util/AbstractUpsertExecutorTest.java | 340 +- .../apache/phoenix/util/AssertResults.java | 173 +- .../phoenix/util/Base62EncoderTest.java | 62 +- .../org/apache/phoenix/util/ByteUtilTest.java | 76 +- .../org/apache/phoenix/util/CDCUtilTest.java | 62 +- .../apache/phoenix/util/ClientUtilTest.java | 131 +- .../apache/phoenix/util/ColumnInfoTest.java | 102 +- .../org/apache/phoenix/util/DateUtilTest.java | 634 +- .../util/EquiDepthStreamHistogramTest.java | 487 +- .../phoenix/util/GeneratePerformanceData.java | 63 +- .../apache/phoenix/util/IndexScrutiny.java | 252 +- .../apache/phoenix/util/IndexUtilTest.java | 192 +- .../org/apache/phoenix/util/JDBCUtilTest.java | 284 +- .../phoenix/util/LikeExpressionTest.java | 25 +- .../org/apache/phoenix/util/LogUtilTest.java | 58 +- .../apache/phoenix/util/MetaDataUtilTest.java | 516 +- .../phoenix/util/MinVersionTestRunner.java | 56 +- .../util/PhoenixContextExecutorTest.java | 41 +- .../phoenix/util/PhoenixEncodeDecodeTest.java | 88 +- .../phoenix/util/PhoenixMRJobUtilTest.java | 38 +- .../phoenix/util/PhoenixRuntimeTest.java | 559 +- .../util/PrefixByteEncoderDecoderTest.java | 126 +- .../phoenix/util/PropertiesUtilTest.java | 117 +- .../util/QualifierEncodingSchemeTest.java | 177 +- .../apache/phoenix/util/QueryUtilTest.java | 274 +- .../phoenix/util/ReadOnlyPropsTest.java | 51 +- .../java/org/apache/phoenix/util/Repeat.java | 17 +- .../phoenix/util/RowKeyMatcherTest.java | 496 +- .../apache/phoenix/util/RunUntilFailure.java | 100 +- .../phoenix/util/SQLExceptionCodeTest.java | 78 +- .../org/apache/phoenix/util/ScanUtilTest.java | 1027 +- .../apache/phoenix/util/SequenceUtilTest.java | 234 +- .../apache/phoenix/util/StringUtilTest.java | 122 +- .../util/TenantIdByteConversionTest.java | 458 +- .../org/apache/phoenix/util/TestDDLUtil.java | 138 +- .../org/apache/phoenix/util/TestUtil.java | 2607 +-- .../bson/ComparisonExpressionUtilsTest.java | 1311 +- .../util/bson/UpdateExpressionUtilsTest.java | 883 +- .../util/csv/CsvUpsertExecutorTest.java | 72 +- .../util/csv/StringToArrayConverterTest.java | 75 +- .../phoenix/util/i18n/LinguisticSortTest.java | 1121 +- .../i18n/OracleUpperTableGeneratorTest.java | 695 +- .../util/json/JsonUpsertExecutorTest.java | 52 +- phoenix-hbase-compat-2.4.1/pom.xml | 9 +- .../compat/hbase/CompatDelegateHTable.java | 31 +- .../hbase/CompatIndexHalfStoreFileReader.java | 27 +- .../compat/hbase/CompatIndexedHLogReader.java | 27 +- .../CompatLocalIndexStoreFileScanner.java | 30 +- .../hbase/CompatOmidTransactionTable.java | 79 +- .../hbase/CompatPhoenixRpcScheduler.java | 29 +- .../phoenix/compat/hbase/CompatUtil.java | 80 +- .../compat/hbase/HbaseCompatCapabilities.java | 20 +- .../hbase/ReplicationSinkCompatEndpoint.java | 59 +- phoenix-hbase-compat-2.5.0/pom.xml | 9 +- .../compat/hbase/CompatDelegateHTable.java | 39 +- .../hbase/CompatIndexHalfStoreFileReader.java | 27 +- .../compat/hbase/CompatIndexedHLogReader.java | 27 +- .../CompatLocalIndexStoreFileScanner.java | 31 +- .../hbase/CompatOmidTransactionTable.java | 87 +- .../hbase/CompatPhoenixRpcScheduler.java | 36 +- .../phoenix/compat/hbase/CompatUtil.java | 70 +- .../compat/hbase/HbaseCompatCapabilities.java | 20 +- .../hbase/ReplicationSinkCompatEndpoint.java | 59 +- phoenix-hbase-compat-2.5.4/pom.xml | 9 +- .../compat/hbase/CompatDelegateHTable.java | 39 +- .../hbase/CompatIndexHalfStoreFileReader.java | 27 +- .../compat/hbase/CompatIndexedHLogReader.java | 28 +- .../CompatLocalIndexStoreFileScanner.java | 30 +- .../hbase/CompatOmidTransactionTable.java | 87 +- .../hbase/CompatPhoenixRpcScheduler.java | 36 +- .../phoenix/compat/hbase/CompatUtil.java | 70 +- .../compat/hbase/HbaseCompatCapabilities.java | 20 +- .../hbase/ReplicationSinkCompatEndpoint.java | 59 +- phoenix-hbase-compat-2.6.0/pom.xml | 9 +- .../compat/hbase/CompatDelegateHTable.java | 39 +- .../hbase/CompatIndexHalfStoreFileReader.java | 31 +- .../compat/hbase/CompatIndexedHLogReader.java | 15 +- .../CompatLocalIndexStoreFileScanner.java | 32 +- .../hbase/CompatOmidTransactionTable.java | 87 +- .../hbase/CompatPhoenixRpcScheduler.java | 60 +- .../phoenix/compat/hbase/CompatUtil.java | 70 +- .../compat/hbase/HbaseCompatCapabilities.java | 20 +- .../hbase/ReplicationSinkCompatEndpoint.java | 59 +- phoenix-mapreduce-byo-shaded-hbase/pom.xml | 528 +- phoenix-pherf/README.md | 38 +- .../config/scenario/user_defined_scenario.xml | 2 +- phoenix-pherf/pom.xml | 98 +- .../java/org/apache/phoenix/pherf/Pherf.java | 670 +- .../apache/phoenix/pherf/PherfConstants.java | 251 +- .../phoenix/pherf/configuration/Column.java | 359 +- .../pherf/configuration/DataModel.java | 102 +- .../pherf/configuration/DataOverride.java | 41 +- .../pherf/configuration/DataSequence.java | 31 +- .../pherf/configuration/DataTypeMapping.java | 91 +- .../phoenix/pherf/configuration/Ddl.java | 125 +- .../pherf/configuration/ExecutionType.java | 28 +- .../phoenix/pherf/configuration/IdleTime.java | 33 +- .../pherf/configuration/LoadProfile.java | 212 +- .../pherf/configuration/OperationGroup.java | 33 +- .../phoenix/pherf/configuration/Query.java | 324 +- .../phoenix/pherf/configuration/QuerySet.java | 235 +- .../phoenix/pherf/configuration/Scenario.java | 516 +- .../pherf/configuration/TenantGroup.java | 79 +- .../phoenix/pherf/configuration/Upsert.java | 217 +- .../pherf/configuration/UserDefined.java | 60 +- .../pherf/configuration/WriteParams.java | 103 +- .../pherf/configuration/XMLConfigParser.java | 298 +- .../pherf/exception/FileLoaderException.java | 41 +- .../exception/FileLoaderRuntimeException.java | 40 +- .../pherf/exception/PherfException.java | 38 +- .../exception/PherfRuntimeException.java | 38 +- .../phoenix/pherf/jmx/MonitorDetails.java | 74 +- .../phoenix/pherf/jmx/MonitorManager.java | 325 +- .../org/apache/phoenix/pherf/jmx/Stat.java | 43 +- .../jmx/monitors/CPULoadAverageMonitor.java | 40 +- .../pherf/jmx/monitors/FreeMemoryMonitor.java | 35 +- .../GarbageCollectorElapsedTimeMonitor.java | 57 +- .../pherf/jmx/monitors/HeapMemoryMonitor.java | 39 +- .../pherf/jmx/monitors/MaxMemoryMonitor.java | 35 +- .../phoenix/pherf/jmx/monitors/Monitor.java | 33 +- .../jmx/monitors/NonHeapMemoryMonitor.java | 40 +- ...ObjectPendingFinalizationCountMonitor.java | 40 +- .../pherf/jmx/monitors/ThreadMonitor.java | 40 +- .../jmx/monitors/TotalMemoryMonitor.java | 35 +- .../pherf/result/DataLoadThreadTime.java | 120 +- .../pherf/result/DataLoadTimeSummary.java | 87 +- .../phoenix/pherf/result/DataModelResult.java | 106 +- .../phoenix/pherf/result/QueryResult.java | 278 +- .../phoenix/pherf/result/QuerySetResult.java | 61 +- .../apache/phoenix/pherf/result/Result.java | 90 +- .../phoenix/pherf/result/ResultHandler.java | 53 +- .../phoenix/pherf/result/ResultManager.java | 263 +- .../phoenix/pherf/result/ResultUtil.java | 446 +- .../phoenix/pherf/result/ResultValue.java | 50 +- .../apache/phoenix/pherf/result/RunTime.java | 238 +- .../phoenix/pherf/result/ScenarioResult.java | 62 +- .../phoenix/pherf/result/ThreadTime.java | 244 +- .../phoenix/pherf/result/file/Extension.java | 50 +- .../phoenix/pherf/result/file/Header.java | 64 +- .../pherf/result/file/ResultFileDetails.java | 75 +- .../result/impl/CSVFileResultHandler.java | 120 +- .../pherf/result/impl/CSVResultHandler.java | 93 +- .../result/impl/DefaultResultHandler.java | 94 +- .../pherf/result/impl/XMLResultHandler.java | 133 +- .../apache/phoenix/pherf/rules/DataValue.java | 177 +- .../pherf/rules/RuleBasedDataGenerator.java | 39 +- .../phoenix/pherf/rules/RulesApplier.java | 1044 +- .../rules/SequentialDateDataGenerator.java | 96 +- .../rules/SequentialIntegerDataGenerator.java | 94 +- .../rules/SequentialListDataGenerator.java | 93 +- .../rules/SequentialVarcharDataGenerator.java | 108 +- .../phoenix/pherf/schema/SchemaReader.java | 150 +- .../pherf/util/GoogleChartGenerator.java | 683 +- .../phoenix/pherf/util/PhoenixUtil.java | 1204 +- .../phoenix/pherf/util/ResourceList.java | 294 +- .../phoenix/pherf/util/RowCalculator.java | 112 +- .../pherf/workload/MultiThreadedRunner.java | 378 +- .../pherf/workload/MultithreadedDiffer.java | 143 +- .../phoenix/pherf/workload/QueryExecutor.java | 449 +- .../phoenix/pherf/workload/QueryVerifier.java | 252 +- .../phoenix/pherf/workload/Workload.java | 3 +- .../pherf/workload/WorkloadExecutor.java | 216 +- .../phoenix/pherf/workload/WriteWorkload.java | 641 +- .../workload/mt/MultiTenantWorkload.java | 85 +- .../mt/generators/BaseLoadEventGenerator.java | 300 +- .../mt/generators/LoadEventGenerator.java | 43 +- .../generators/LoadEventGeneratorFactory.java | 21 +- .../SequentialLoadEventGenerator.java | 278 +- .../TenantLoadEventGeneratorFactory.java | 82 +- .../mt/generators/TenantOperationInfo.java | 74 +- ...UniformDistributionLoadEventGenerator.java | 159 +- .../WeightedRandomLoadEventGenerator.java | 287 +- .../mt/handlers/PherfWorkHandler.java | 8 +- .../mt/handlers/RendezvousingWorkHandler.java | 114 +- .../handlers/TenantOperationWorkHandler.java | 98 +- .../mt/operations/BaseOperationSupplier.java | 34 +- .../mt/operations/IdleTimeOperation.java | 3 +- .../operations/IdleTimeOperationSupplier.java | 75 +- .../workload/mt/operations/Operation.java | 17 +- .../mt/operations/OperationStats.java | 168 +- .../mt/operations/PreScenarioOperation.java | 7 +- .../PreScenarioOperationSupplier.java | 102 +- .../mt/operations/QueryOperation.java | 3 +- .../mt/operations/QueryOperationSupplier.java | 114 +- .../mt/operations/TenantOperationFactory.java | 583 +- .../mt/operations/UpsertOperation.java | 3 +- .../operations/UpsertOperationSupplier.java | 241 +- .../mt/operations/UserDefinedOperation.java | 3 +- .../UserDefinedOperationSupplier.java | 37 +- .../org/apache/phoenix/pherf/ColumnTest.java | 70 +- .../pherf/ConfigurationParserTest.java | 590 +- .../org/apache/phoenix/pherf/PherfTest.java | 179 +- .../apache/phoenix/pherf/ResourceTest.java | 87 +- .../apache/phoenix/pherf/ResultBaseTest.java | 70 +- .../org/apache/phoenix/pherf/ResultTest.java | 358 +- .../phoenix/pherf/RowCalculatorTest.java | 132 +- .../phoenix/pherf/RuleGeneratorTest.java | 792 +- .../apache/phoenix/pherf/TestHBaseProps.java | 38 +- .../phoenix/pherf/XMLConfigParserTest.java | 48 +- .../result/impl/XMLResultHandlerTest.java | 47 +- .../SequentialDateDataGeneratorTest.java | 102 +- .../SequentialIntegerDataGeneratorTest.java | 100 +- .../SequentialListDataGeneratorTest.java | 114 +- .../SequentialVarcharDataGeneratorTest.java | 98 +- .../phoenix/pherf/util/ResourceListTest.java | 24 +- .../workload/MultiThreadedRunnerTest.java | 182 +- .../mt/SequentialLoadEventGeneratorTest.java | 221 +- .../mt/TenantOperationFactoryTest.java | 170 +- ...ormDistributionLoadEventGeneratorTest.java | 195 +- .../WeightedRandomLoadEventGeneratorTest.java | 367 +- .../scenario/malicious_scenario_with_dtd.xml | 6 +- .../test/resources/scenario/test_scenario.xml | 30 +- phoenix-server/pom.xml | 527 +- phoenix-tracing-webapp/pom.xml | 356 +- .../tracingwebapp/http/ConnectionFactory.java | 12 +- .../tracingwebapp/http/EntityFactory.java | 24 +- .../phoenix/tracingwebapp/http/Main.java | 101 +- .../tracingwebapp/http/TraceServlet.java | 78 +- pom.xml | 1706 +- src/main/config/checkstyle/header.txt | 2 +- src/main/config/spotbugs/spotbugs-exclude.xml | 2 +- 1865 files changed, 270396 insertions(+), 270748 deletions(-) diff --git a/BUILDING.md b/BUILDING.md index d32692fe5b6..159b8440f1e 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -59,12 +59,12 @@ Building from source On first setup, you may need to run `$ mvn install -DskipTests` to install the local jars. This is a side-effect of multi-module maven projects -To re-generate the antlr based files: +To re-generate the antlr based files: `$ mvn process-sources` -To build the jars and the assembly tarball: +To build the jars and the assembly tarball: `$ mvn package` -and optionally, to just skip all the tests and build the jars: +and optionally, to just skip all the tests and build the jars: `$ mvn package -DskipTests` Note: javadocs are generated in target/apidocs @@ -79,7 +79,7 @@ Phoenix, even within the same HBase minor release. By default, Phoenix will be built for the latest known patch level of the latest HBase 2.x minor release that Phoenix supports. -You can specify the targeted HBase minor release by setting the `hbase.profile` system property for +You can specify the targeted HBase minor release by setting the `hbase.profile` system property for maven. You can also specify the exact HBase release to build Phoenix with by additionally @@ -102,7 +102,7 @@ Use the m2e eclipse plugin and do Import->Maven Project and just pick the root ' Running the tests ----------------- -All Unit Tests +All Unit Tests `$ mvn clean test` All Unit Tests and Integration tests (takes a few hours) @@ -133,5 +133,5 @@ as well as each of the subprojects. (not every project has all reports) Generate Apache Web Site ------------------------ -checkout https://svn.apache.org/repos/asf/phoenix +checkout https://svn.apache.org/repos/asf/phoenix `$ build.sh` diff --git a/Jenkinsfile b/Jenkinsfile index 2882405ed82..c03afad1a14 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -116,4 +116,4 @@ pipeline { } } } -} \ No newline at end of file +} diff --git a/Jenkinsfile.yetus b/Jenkinsfile.yetus index c70dc991e02..a0e9c43f654 100644 --- a/Jenkinsfile.yetus +++ b/Jenkinsfile.yetus @@ -55,4 +55,4 @@ pipeline { } } } -} \ No newline at end of file +} diff --git a/README.md b/README.md index 6f886e16e78..f6d499101ac 100644 --- a/README.md +++ b/README.md @@ -19,4 +19,4 @@ limitations under the License. [Apache Phoenix](http://phoenix.apache.org/) is a SQL skin over HBase delivered as a client-embedded JDBC driver targeting low latency queries over HBase data. Visit the Apache Phoenix website [here](http://phoenix.apache.org/). -Copyright ©2014 [Apache Software Foundation](http://www.apache.org/). All Rights Reserved. +Copyright ©2014 [Apache Software Foundation](http://www.apache.org/). All Rights Reserved. diff --git a/bin/end2endTest.py b/bin/end2endTest.py index 7de0a0423fd..b3b119fa64c 100755 --- a/bin/end2endTest.py +++ b/bin/end2endTest.py @@ -21,7 +21,7 @@ # !!! PLEASE READ !!! # !!! Do NOT run the script against a prodcution cluster because it wipes out -# !!! existing data of the cluster +# !!! existing data of the cluster from __future__ import print_function import os diff --git a/bin/pherf-standalone.py b/bin/pherf-standalone.py index 14f5500fb15..62e1acb582d 100755 --- a/bin/pherf-standalone.py +++ b/bin/pherf-standalone.py @@ -40,6 +40,6 @@ phoenix_utils.phoenix_pherf_jar + \ '" -Dlog4j2.configurationFile=file:' + \ os.path.join(phoenix_utils.current_dir, "log4j2.properties") + \ - " org.apache.phoenix.pherf.Pherf " + args + " org.apache.phoenix.pherf.Pherf " + args os.execl("/bin/sh", "/bin/sh", "-c", java_cmd) diff --git a/bin/phoenix_utils.py b/bin/phoenix_utils.py index 48e7a42d5ab..4cb3182f025 100755 --- a/bin/phoenix_utils.py +++ b/bin/phoenix_utils.py @@ -33,14 +33,14 @@ def find(pattern, classPaths): # remove * if it's at the end of path if ((path is not None) and (len(path) > 0) and (path[-1] == '*')) : path = path[:-1] - + for root, dirs, files in os.walk(path): # sort the file names so *-client always precedes *-thin-client files.sort() for name in files: if fnmatch.fnmatch(name, pattern): return os.path.join(root, name) - + return "" def tryDecode(input): diff --git a/bin/psql.py b/bin/psql.py index 03d76bc8309..2a82cf68ba8 100755 --- a/bin/psql.py +++ b/bin/psql.py @@ -36,7 +36,7 @@ os.pathsep + phoenix_utils.logging_jar + \ os.pathsep + phoenix_utils.phoenix_client_embedded_jar + '" -Dlog4j2.configurationFile=file:' + \ os.path.join(phoenix_utils.current_dir, "log4j2.properties") + \ - " org.apache.phoenix.util.PhoenixRuntime " + args + " org.apache.phoenix.util.PhoenixRuntime " + args print(java_cmd) diff --git a/bin/readme.txt b/bin/readme.txt index e9c52439da2..43499e01091 100644 --- a/bin/readme.txt +++ b/bin/readme.txt @@ -3,10 +3,10 @@ SqlLine https://github.com/julianhyde/sqlline Execute SQL from command line. Sqlline manual is available at https://julianhyde.github.io/sqlline/manual.html - - Usage: - $ sqlline.py - Example: + + Usage: + $ sqlline.py + Example: $ sqlline.py localhost $ sqlline.py localhost /STOCK_SYMBOL.sql @@ -47,4 +47,3 @@ Usage: hadoop jar phoneix-[version]-mapreduce.jar -s,--schema Phoenix schema name (optional) -t,--table Phoenix table name (mandatory) -z,--zookeeper Zookeeper quorum to connect to (optional) - diff --git a/bin/traceserver.py b/bin/traceserver.py index 706cbd4421c..68522b705e3 100755 --- a/bin/traceserver.py +++ b/bin/traceserver.py @@ -123,7 +123,7 @@ phoenix_utils.phoenix_traceserver_jar + os.pathsep + phoenix_utils.slf4j_backend_jar + os.pathsep + \ phoenix_utils.logging_jar + os.pathsep + \ phoenix_utils.phoenix_client_embedded_jar + os.pathsep + phoenix_utils.phoenix_queryserver_jar + \ - + " -Dproc_phoenixtraceserver" + \ " -Dlog4j2.configurationFile=file:" + os.path.join(phoenix_utils.current_dir, "log4j2.properties") + \ " -Dpsql.root.logger=%(root_logger)s" + \ diff --git a/dev/PhoenixCodeTemplate.xml b/dev/PhoenixCodeTemplate.xml index 993b616957d..3ad1dcb285f 100644 --- a/dev/PhoenixCodeTemplate.xml +++ b/dev/PhoenixCodeTemplate.xml @@ -415,4 +415,4 @@ - \ No newline at end of file + diff --git a/dev/cache-apache-project-artifact.sh b/dev/cache-apache-project-artifact.sh index 2b1e922fc59..58433661b93 100755 --- a/dev/cache-apache-project-artifact.sh +++ b/dev/cache-apache-project-artifact.sh @@ -138,4 +138,4 @@ echo "moving artifact into place at '${target}'" mv "${working_dir}/artifact" "${target}.copying" # attempt atomic move mv "${target}.copying" "${target}" -echo "all done!" \ No newline at end of file +echo "all done!" diff --git a/dev/create-release/release-util.sh b/dev/create-release/release-util.sh index af3a6556499..211fc4f813e 100755 --- a/dev/create-release/release-util.sh +++ b/dev/create-release/release-util.sh @@ -771,4 +771,4 @@ function rebuild_hbase_for_omid() { function rebuild_hbase_locally() { local hbase_version="$1" MAVEN_SETTINGS_FILE="$MAVEN_SETTINGS_FILE" "$SELF"/rebuild_hbase.sh "$hbase_version" -} \ No newline at end of file +} diff --git a/dev/jenkinsEnv.sh b/dev/jenkinsEnv.sh index 2717cb7ed6b..fc5839db887 100755 --- a/dev/jenkinsEnv.sh +++ b/dev/jenkinsEnv.sh @@ -28,4 +28,3 @@ export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:$MAVEN_HOME/bin export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData -XX:MaxPermSize=256m"}" ulimit -n - diff --git a/dev/misc_utils/README.md b/dev/misc_utils/README.md index f6946e03346..b11fd0abeab 100644 --- a/dev/misc_utils/README.md +++ b/dev/misc_utils/README.md @@ -72,11 +72,11 @@ The script also requires below inputs: Example of script execution: ``` -$ python3 dev/misc_utils/git_jira_fix_version_check.py +$ python3 dev/misc_utils/git_jira_fix_version_check.py JIRA Project Name (e.g PHOENIX / OMID / TEPHRA etc): PHOENIX First commit hash to start excluding commits from history: a2adf5e572c5a4bcccee7f8ac43bad6b84293ec6 Fix Version: 4.16.0 -Jira server url (default: https://issues.apache.org/jira): +Jira server url (default: https://issues.apache.org/jira): Path of project's working dir with release branch checked-in: /Users/{USER}/Documents/phoenix Check git status output and verify expected branch @@ -114,5 +114,3 @@ Completed diff: ############################################## ``` - - diff --git a/dev/rebuild_hbase.sh b/dev/rebuild_hbase.sh index 6e022082414..2fe927c4616 100755 --- a/dev/rebuild_hbase.sh +++ b/dev/rebuild_hbase.sh @@ -73,4 +73,3 @@ cd hbase-$HBASE_VERSION echo mvn ${SETTINGS[@]} clean install -Dhadoop.profile=3.0 -DskipTests -B $LOCALREPO mvn ${SETTINGS[@]} clean install -Dhadoop.profile=3.0 -DskipTests -B $LOCALREPO cd ${STARTDIR} - diff --git a/dev/smart-apply-patch.sh b/dev/smart-apply-patch.sh index 0b69eabaf89..26dff4b4721 100755 --- a/dev/smart-apply-patch.sh +++ b/dev/smart-apply-patch.sh @@ -52,7 +52,7 @@ if $PATCH -p0 -E --dry-run < $PATCH_FILE 2>&1 > $TMP; then # correct place to put those files. # NOTE 2014/07/17: -# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash +# Temporarily disabling below check since our jenkins boxes seems to be not defaulting to bash # causing below checks to fail. Once it is fixed, we can revert the commit and enable this again. # TMP2=/tmp/tmp.paths.2.$$ diff --git a/dev/test-patch.sh b/dev/test-patch.sh index 62a6cd82d20..7fc0b791168 100755 --- a/dev/test-patch.sh +++ b/dev/test-patch.sh @@ -19,7 +19,7 @@ set -x -### Setup some variables. +### Setup some variables. ### GIT_COMMIT and BUILD_URL are set by Hudson if it is run by patch process ### Read variables from properties file bindir=$(dirname $0) @@ -160,7 +160,7 @@ parseArgs() { ### Check if $PATCH_DIR exists. If it does not exist, create a new directory if [[ ! -e "$PATCH_DIR" ]] ; then mkdir "$PATCH_DIR" - if [[ $? == 0 ]] ; then + if [[ $? == 0 ]] ; then echo "$PATCH_DIR has been created" else echo "Unable to create $PATCH_DIR" @@ -296,7 +296,7 @@ setup () { VERSION=${GIT_COMMIT}_${defect}_PATCH-${patchNum} findBranchNameFromPatchName ${relativePatchURL} checkoutBranch - JIRA_COMMENT="Here are the results of testing the latest attachment + JIRA_COMMENT="Here are the results of testing the latest attachment $patchURL against ${BRANCH_NAME} branch at commit ${GIT_COMMIT}. ATTACHMENT ID: ${ATTACHMENT_ID}" @@ -485,7 +485,7 @@ applyPatch () { echo "======================================================================" echo "" echo "" - + export PATCH $BASEDIR/dev/smart-apply-patch.sh $PATCH_DIR/patch if [[ $? != 0 ]] ; then @@ -786,7 +786,7 @@ checkFindbugsWarnings () { echo "======================================================================" echo "" echo "" - echo "$MVN clean package findbugs:findbugs -D${PROJECT_NAME}PatchProcess" + echo "$MVN clean package findbugs:findbugs -D${PROJECT_NAME}PatchProcess" export MAVEN_OPTS="${MAVEN_OPTS}" $MVN clean package findbugs:findbugs -D${PROJECT_NAME}PatchProcess -DskipTests < /dev/null @@ -797,7 +797,7 @@ checkFindbugsWarnings () { return 1 fi - collectFindbugsReports patch $BASEDIR $PATCH_DIR + collectFindbugsReports patch $BASEDIR $PATCH_DIR #this files are generated by collectFindbugsReports() named with its first argument patch_xml=$PATCH_DIR/patchFindbugsWarnings.xml trunk_xml=$PATCH_DIR/trunkFindbugsWarnings.xml @@ -884,7 +884,7 @@ runTests () { if [[ $? != 0 ]] ; then ### Find and format names of failed tests failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E " /dev/null @@ -1004,7 +1004,7 @@ $JIRA_COMMENT_FOOTER" -$comment" +$comment" if [[ $JENKINS == "true" ]] ; then echo "" @@ -1099,7 +1099,7 @@ checkLineLengths (( RESULT = RESULT + $? )) # checkSiteXml # (( RESULT = RESULT + $?)) -### Do not call these when run by a developer +### Do not call these when run by a developer if [[ $JENKINS == "true" ]] ; then runTests (( RESULT = RESULT + $? )) diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml index 0ff54a34ff3..2271018980f 100644 --- a/phoenix-assembly/pom.xml +++ b/phoenix-assembly/pom.xml @@ -15,9 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -25,9 +23,9 @@ 5.3.0-SNAPSHOT phoenix-assembly + pom Phoenix Assembly Assemble Phoenix artifacts - pom true @@ -36,26 +34,59 @@ ${project.parent.basedir} + + + + org.apache.phoenix + phoenix-server-${hbase.suffix} + + + org.apache.phoenix + phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix} + + + org.apache.phoenix + phoenix-client-embedded-${hbase.suffix} + + + org.apache.phoenix + phoenix-client-lite-${hbase.suffix} + + + org.apache.phoenix + phoenix-pherf + + + org.apache.phoenix + phoenix-tracing-webapp + + + sqlline + sqlline + ${sqlline.version} + jar-with-dependencies + + + + - exec-maven-plugin org.codehaus.mojo + exec-maven-plugin embedded client without version - compile exec + compile ln ${project.basedir}/../phoenix-client-parent/phoenix-client-embedded/target -fnsv - - phoenix-client-embedded-${hbase.suffix}-${project.version}.jar - + phoenix-client-embedded-${hbase.suffix}-${project.version}.jar phoenix-client-embedded-${hbase.suffix}.jar @@ -65,18 +96,16 @@ lite client without version - compile exec + compile ln ${project.basedir}/../phoenix-client-parent/phoenix-client-lite/target -fnsv - - phoenix-client-lite-${hbase.suffix}-${project.version}.jar - + phoenix-client-lite-${hbase.suffix}-${project.version}.jar phoenix-client-lite-${hbase.suffix}.jar @@ -86,18 +115,16 @@ server without version - compile exec + compile ln ${project.basedir}/../phoenix-server/target -fnsv - - phoenix-server-${hbase.suffix}-${project.version}.jar - + phoenix-server-${hbase.suffix}-${project.version}.jar phoenix-server-${hbase.suffix}.jar @@ -107,18 +134,16 @@ mapreduce without version - compile exec + compile ln ${project.basedir}/../phoenix-mapreduce-byo-shaded-hbase/target -fnsv - - phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix}-${project.version}.jar - + phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix}-${project.version}.jar phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix}.jar @@ -128,21 +153,17 @@ pherf without version - compile exec + compile ln ${project.basedir}/../phoenix-pherf/target -fnsv - - phoenix-pherf-${project.version}.jar - - - phoenix-pherf.jar - + phoenix-pherf-${project.version}.jar + phoenix-pherf.jar @@ -155,8 +176,8 @@ default-jar - none + none @@ -165,12 +186,12 @@ package-to-tar - package single + package - phoenix-${hbase.suffix}-${project.version}-bin + phoenix-${hbase.suffix}-${project.version}-bin false gnu false @@ -185,66 +206,9 @@ - - - - org.apache.phoenix - phoenix-server-${hbase.suffix} - - - org.apache.phoenix - phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix} - - - org.apache.phoenix - phoenix-client-embedded-${hbase.suffix} - - - org.apache.phoenix - phoenix-client-lite-${hbase.suffix} - - - org.apache.phoenix - phoenix-pherf - - - org.apache.phoenix - phoenix-tracing-webapp - - - sqlline - sqlline - ${sqlline.version} - jar-with-dependencies - - - - coverage - - - - org.jacoco - jacoco-maven-plugin - - - report-aggregate - - report-aggregate - - verify - - ${jacocoReportDir} - ${project.build.sourceEncoding} - ${project.reporting.outputEncoding} - - - - - - org.apache.phoenix @@ -282,6 +246,28 @@ ${project.version} + + + + org.jacoco + jacoco-maven-plugin + + + report-aggregate + + report-aggregate + + verify + + ${jacocoReportDir} + ${project.build.sourceEncoding} + ${project.reporting.outputEncoding} + + + + + + diff --git a/phoenix-assembly/src/build/components/all-common-dependencies.xml b/phoenix-assembly/src/build/components/all-common-dependencies.xml index 4a5fd9bf865..1720212b1fa 100644 --- a/phoenix-assembly/src/build/components/all-common-dependencies.xml +++ b/phoenix-assembly/src/build/components/all-common-dependencies.xml @@ -31,4 +31,4 @@ - \ No newline at end of file + diff --git a/phoenix-assembly/src/build/package-to-tar-all.xml b/phoenix-assembly/src/build/package-to-tar-all.xml index 9683ea714b1..9dd292842a2 100644 --- a/phoenix-assembly/src/build/package-to-tar-all.xml +++ b/phoenix-assembly/src/build/package-to-tar-all.xml @@ -36,4 +36,4 @@ src/build/components/all-common-files.xml src/build/components/all-common-dependencies.xml - \ No newline at end of file + diff --git a/phoenix-client-parent/phoenix-client-embedded/pom.xml b/phoenix-client-parent/phoenix-client-embedded/pom.xml index 3a6cadce0e3..b61287a6b97 100644 --- a/phoenix-client-parent/phoenix-client-embedded/pom.xml +++ b/phoenix-client-parent/phoenix-client-embedded/pom.xml @@ -15,9 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -26,6 +24,8 @@ phoenix-client-embedded-${hbase.suffix} + + jar Phoenix Client Embedded Phoenix Client without logging implementation @@ -33,41 +33,6 @@ ${project.basedir}/../.. - jar - - - - - org.apache.maven.plugins - maven-shade-plugin - - - embedded-shaded - package - - shade - - - true - false - ${shadeSources} - ${basedir}/target/pom.xml - - - *:* - - - org.apache.phoenix:phoenix-client - xom:xom - - - - - - - - - @@ -103,38 +68,71 @@ org.eclipse.jetty jetty-server - provided ${jetty.version} + provided org.eclipse.jetty jetty-util - provided ${jetty.version} + provided org.eclipse.jetty jetty-util-ajax - provided ${jetty.version} + provided org.eclipse.jetty jetty-servlet - provided ${jetty.version} + provided org.eclipse.jetty jetty-webapp - provided ${jetty.version} + provided javax.servlet javax.servlet-api - provided ${javax.servlet-api.version} + provided + + + + + org.apache.maven.plugins + maven-shade-plugin + + + embedded-shaded + + shade + + package + + true + false + ${shadeSources} + ${basedir}/target/pom.xml + + + *:* + + + org.apache.phoenix:phoenix-client + xom:xom + + + + + + + + diff --git a/phoenix-client-parent/phoenix-client-lite/pom.xml b/phoenix-client-parent/phoenix-client-lite/pom.xml index 53a85e1f3b5..25cd5f2a004 100644 --- a/phoenix-client-parent/phoenix-client-lite/pom.xml +++ b/phoenix-client-parent/phoenix-client-lite/pom.xml @@ -15,9 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -26,6 +24,8 @@ phoenix-client-lite-${hbase.suffix} + + jar Phoenix Client Lite Phoenix Client without server-side code and dependencies @@ -33,41 +33,6 @@ ${project.basedir}/../.. - jar - - - - - org.apache.maven.plugins - maven-shade-plugin - - - lite-shaded - package - - shade - - - true - false - ${shadeSources} - ${basedir}/target/pom.xml - - - *:* - - - org.apache.phoenix:phoenix-client - xom:xom - - - - - - - - - @@ -103,38 +68,71 @@ org.eclipse.jetty jetty-server - provided ${jetty.version} + provided org.eclipse.jetty jetty-util - provided ${jetty.version} + provided org.eclipse.jetty jetty-util-ajax - provided ${jetty.version} + provided org.eclipse.jetty jetty-servlet - provided ${jetty.version} + provided org.eclipse.jetty jetty-webapp - provided ${jetty.version} + provided javax.servlet javax.servlet-api - provided ${javax.servlet-api.version} + provided + + + + + org.apache.maven.plugins + maven-shade-plugin + + + lite-shaded + + shade + + package + + true + false + ${shadeSources} + ${basedir}/target/pom.xml + + + *:* + + + org.apache.phoenix:phoenix-client + xom:xom + + + + + + + + diff --git a/phoenix-client-parent/pom.xml b/phoenix-client-parent/pom.xml index 0b3f15b8dfd..574fbac0b0e 100644 --- a/phoenix-client-parent/pom.xml +++ b/phoenix-client-parent/pom.xml @@ -15,9 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -25,9 +23,9 @@ 5.3.0-SNAPSHOT phoenix-client-parent + pom Phoenix Client Parent Common configuration for the Phoenix Client Variants - pom true @@ -66,7 +64,7 @@ * - + org.apache.maven.plugins @@ -88,7 +86,7 @@ README* - + org.apache.hadoop:hadoop-yarn-common org/apache/hadoop/yarn/factories/package-info.class @@ -115,27 +113,20 @@ - - + + csv-bulk-load-config.properties - - ${project.basedir}/../config/csv-bulk-load-config.properties - + ${project.basedir}/../config/csv-bulk-load-config.properties - + README.md ${project.basedir}/../README.md - + LICENSE.txt ${project.basedir}/../LICENSE - + NOTICE ${project.basedir}/../NOTICE diff --git a/phoenix-core-client/pom.xml b/phoenix-core-client/pom.xml index ad65b74f39a..239499efff9 100644 --- a/phoenix-core-client/pom.xml +++ b/phoenix-core-client/pom.xml @@ -15,8 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 @@ -28,189 +27,6 @@ Phoenix Core Client Core Phoenix Client codebase - - - - - org.apache.maven.plugins - maven-site-plugin - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - - - import java.util.regex.Pattern; - import java.lang.Integer; - - versionPattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)[^.]*$"); - versionMatcher = versionPattern.matcher("${hbase.version}"); - versionMatcher.find(); - - hbaseMajor = Integer.parseInt(versionMatcher.group(1)); - hbaseMinor = Integer.parseInt(versionMatcher.group(2)); - hbasePatch = Integer.parseInt(versionMatcher.group(3)); - - hbaseMajor == 2 && ( - ("${hbase.compat.version}".equals("2.4.1") - && hbaseMinor == 4 - && hbasePatch >=1) - || ("${hbase.compat.version}".equals("2.5.0") - && hbaseMinor == 5 - && hbasePatch >=0) - || ("${hbase.compat.version}".equals("2.5.4") - && hbaseMinor == 5 - && hbasePatch >=4) - || ("${hbase.compat.version}".equals("2.6.0") - && hbaseMinor == 6 - && hbasePatch >=0) - ) - - - - - - - check-hbase-compatibility - validate - - enforce - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - add-source - generate-sources - - add-source - - - - ${antlr-output.dir} - ${antlr-input.dir} - - - - - - - - org.antlr - antlr3-maven-plugin - - - - antlr - - - - - ${antlr-output.dir}/org/apache/phoenix/parse - - - - - org.apache.maven.plugins - maven-eclipse-plugin - - - org.jamon.project.templateBuilder - org.eclipse.jdt.core.javabuilder - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - - create-phoenix-generated-classpath - - build-classpath - - - ${project.build.directory}/cached_classpath.txt - - - - - copy-for-sqlline - - copy - - - - - org.apache.logging.log4j - log4j-api - - - org.apache.logging.log4j - log4j-core - - - org.apache.logging.log4j - log4j-slf4j-impl - - - org.apache.logging.log4j - log4j-1.2-api - - - sqlline - sqlline - jar-with-dependencies - - - ${project.basedir}/../lib - - - - - - org.apache.rat - apache-rat-plugin - - - src/main/java/org/apache/phoenix/coprocessor/generated/*.java - src/main/resources/META-INF/services/java.sql.Driver - src/it/resources/json/*.json - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - ${protobuf.group}:protoc:${protoc.version}:exe:${protoc.arch} - ${basedir}/src/main/protobuf/ - false - true - - - - - - - @@ -431,7 +247,7 @@ joni - org.jruby.jcodings + org.jruby.jcodings jcodings @@ -443,4 +259,185 @@ mvel2 + + + + + + org.apache.maven.plugins + maven-site-plugin + + + + org.apache.maven.plugins + maven-enforcer-plugin + + + + import java.util.regex.Pattern; + import java.lang.Integer; + + versionPattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)[^.]*$"); + versionMatcher = versionPattern.matcher("${hbase.version}"); + versionMatcher.find(); + + hbaseMajor = Integer.parseInt(versionMatcher.group(1)); + hbaseMinor = Integer.parseInt(versionMatcher.group(2)); + hbasePatch = Integer.parseInt(versionMatcher.group(3)); + + hbaseMajor == 2 && ( + ("${hbase.compat.version}".equals("2.4.1") + && hbaseMinor == 4 + && hbasePatch >=1) + || ("${hbase.compat.version}".equals("2.5.0") + && hbaseMinor == 5 + && hbasePatch >=0) + || ("${hbase.compat.version}".equals("2.5.4") + && hbaseMinor == 5 + && hbasePatch >=4) + || ("${hbase.compat.version}".equals("2.6.0") + && hbaseMinor == 6 + && hbasePatch >=0) + ) + + + + + + check-hbase-compatibility + + enforce + + validate + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-source + + add-source + + generate-sources + + + ${antlr-output.dir} + ${antlr-input.dir} + + + + + + + + org.antlr + antlr3-maven-plugin + + ${antlr-output.dir}/org/apache/phoenix/parse + + + + + antlr + + + + + + + org.apache.maven.plugins + maven-eclipse-plugin + + + org.jamon.project.templateBuilder + org.eclipse.jdt.core.javabuilder + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + + create-phoenix-generated-classpath + + build-classpath + + + ${project.build.directory}/cached_classpath.txt + + + + + copy-for-sqlline + + copy + + + + + org.apache.logging.log4j + log4j-api + + + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-slf4j-impl + + + org.apache.logging.log4j + log4j-1.2-api + + + sqlline + sqlline + jar-with-dependencies + + + ${project.basedir}/../lib + + + + + + org.apache.rat + apache-rat-plugin + + + src/main/java/org/apache/phoenix/coprocessor/generated/*.java + src/main/resources/META-INF/services/java.sql.Driver + src/it/resources/json/*.json + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + ${protobuf.group}:protoc:${protoc.version}:exe:${protoc.arch} + ${basedir}/src/main/protobuf/ + false + true + + + + + + diff --git a/phoenix-core-client/src/build/phoenix-core.xml b/phoenix-core-client/src/build/phoenix-core.xml index 7b8df1ef33b..383488f1472 100644 --- a/phoenix-core-client/src/build/phoenix-core.xml +++ b/phoenix-core-client/src/build/phoenix-core.xml @@ -29,10 +29,10 @@ jar false - + - true diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java index da2064e2d01..3b9f17c0bac 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/PhoenixTagType.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,12 @@ package org.apache.hadoop.hbase; /** - Used to persist the TagType in HBase Cell Tags. - All the type present here should be more than @{@link Tag#CUSTOM_TAG_TYPE_RANGE} which is 64. + * Used to persist the TagType in HBase Cell Tags. All the type present here should be more + * than @{@link Tag#CUSTOM_TAG_TYPE_RANGE} which is 64. **/ public final class PhoenixTagType { - /** - * Indicates the source of operation. - */ - public static final byte SOURCE_OPERATION_TAG_TYPE = (byte) 65; + /** + * Indicates the source of operation. + */ + public static final byte SOURCE_OPERATION_TAG_TYPE = (byte) 65; } diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java index fd83af73765..8a00de568c9 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,9 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; - + public class RegionInfoUtil { - public static byte[] toByteArray(RegionInfo regionInfo) { - return RegionInfo.toByteArray(regionInfo); - } -} \ No newline at end of file + public static byte[] toByteArray(RegionInfo regionInfo) { + return RegionInfo.toByteArray(regionInfo); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java index f90d6401c98..a5cfe3541e0 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ClientRpcControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,35 +26,35 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; /** - * {@link RpcControllerFactory} that sets the priority of metadata rpc calls to be processed - * in its own queue. + * {@link RpcControllerFactory} that sets the priority of metadata rpc calls to be processed in its + * own queue. */ public class ClientRpcControllerFactory extends RpcControllerFactory { - public ClientRpcControllerFactory(Configuration conf) { - super(conf); - } - - @Override - public HBaseRpcController newController() { - HBaseRpcController delegate = super.newController(); - return getController(delegate); - } - - @Override - public HBaseRpcController newController(CellScanner cellScanner) { - HBaseRpcController delegate = super.newController(cellScanner); - return getController(delegate); - } - - @Override - public HBaseRpcController newController(List cellIterables) { - HBaseRpcController delegate = super.newController(cellIterables); - return getController(delegate); - } - - private HBaseRpcController getController(HBaseRpcController delegate) { - return new MetadataRpcController(delegate, conf); - } - -} \ No newline at end of file + public ClientRpcControllerFactory(Configuration conf) { + super(conf); + } + + @Override + public HBaseRpcController newController() { + HBaseRpcController delegate = super.newController(); + return getController(delegate); + } + + @Override + public HBaseRpcController newController(CellScanner cellScanner) { + HBaseRpcController delegate = super.newController(cellScanner); + return getController(delegate); + } + + @Override + public HBaseRpcController newController(List cellIterables) { + HBaseRpcController delegate = super.newController(cellIterables); + return getController(delegate); + } + + private HBaseRpcController getController(HBaseRpcController delegate) { + return new MetadataRpcController(delegate, conf); + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java index 0e876fe6ae1..7d1d34c6d63 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/IndexRpcController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,9 +23,9 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.util.IndexUtil; import com.google.protobuf.RpcController; -import org.apache.phoenix.util.IndexUtil; /** * {@link RpcController} that sets the appropriate priority of RPC calls destined for Phoenix index @@ -33,25 +33,23 @@ */ class IndexRpcController extends DelegatingHBaseRpcController { - private final int priority; - private final String tracingTableName; - - public IndexRpcController(HBaseRpcController delegate, Configuration conf) { - super(delegate); - this.priority = IndexUtil.getIndexPriority(conf); - this.tracingTableName = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB, - QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME); - } - - @Override - public void setPriority(final TableName tn) { - if (!tn.isSystemTable() && !tn.getNameAsString().equals(tracingTableName)) { - setPriority(this.priority); - } - else { - super.setPriority(tn); - } + private final int priority; + private final String tracingTableName; + + public IndexRpcController(HBaseRpcController delegate, Configuration conf) { + super(delegate); + this.priority = IndexUtil.getIndexPriority(conf); + this.tracingTableName = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB, + QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME); + } + + @Override + public void setPriority(final TableName tn) { + if (!tn.isSystemTable() && !tn.getNameAsString().equals(tracingTableName)) { + setPriority(this.priority); + } else { + super.setPriority(tn); } - + } } diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java index c761f9cdc54..26073080f76 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerIndexRpcControllerFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,39 +26,38 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; /** - * RpcControllerFactory that should only be used when creating Table for - * making remote RPCs to the region servers hosting global mutable index table regions. - * This controller factory shouldn't be globally configured anywhere and is meant to be used - * only internally by Phoenix indexing code. + * RpcControllerFactory that should only be used when creating Table for making remote RPCs to the + * region servers hosting global mutable index table regions. This controller factory shouldn't be + * globally configured anywhere and is meant to be used only internally by Phoenix indexing code. */ public class InterRegionServerIndexRpcControllerFactory extends RpcControllerFactory { - public InterRegionServerIndexRpcControllerFactory(Configuration conf) { - super(conf); - } - - @Override - public HBaseRpcController newController() { - HBaseRpcController delegate = super.newController(); - return getController(delegate); - } - - @Override - public HBaseRpcController newController(CellScanner cellScanner) { - HBaseRpcController delegate = super.newController(cellScanner); - return getController(delegate); - } - - @Override - public HBaseRpcController newController(List cellIterables) { - HBaseRpcController delegate = super.newController(cellIterables); - return getController(delegate); - } - - private HBaseRpcController getController(HBaseRpcController delegate) { - // construct a chain of controllers: metadata, index and standard controller - IndexRpcController indexRpcController = new IndexRpcController(delegate, conf); - return new MetadataRpcController(indexRpcController, conf); - } + public InterRegionServerIndexRpcControllerFactory(Configuration conf) { + super(conf); + } + + @Override + public HBaseRpcController newController() { + HBaseRpcController delegate = super.newController(); + return getController(delegate); + } + + @Override + public HBaseRpcController newController(CellScanner cellScanner) { + HBaseRpcController delegate = super.newController(cellScanner); + return getController(delegate); + } + + @Override + public HBaseRpcController newController(List cellIterables) { + HBaseRpcController delegate = super.newController(cellIterables); + return getController(delegate); + } + + private HBaseRpcController getController(HBaseRpcController delegate) { + // construct a chain of controllers: metadata, index and standard controller + IndexRpcController indexRpcController = new IndexRpcController(delegate, conf); + return new MetadataRpcController(indexRpcController, conf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheController.java index cdfa9da168f..c13439547b9 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheController.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,25 +27,25 @@ * Controller used to invalidate server side metadata cache RPCs. */ public class InvalidateMetadataCacheController extends DelegatingHBaseRpcController { - private int priority; + private int priority; - public InvalidateMetadataCacheController(HBaseRpcController delegate, Configuration conf) { - super(delegate); - this.priority = IndexUtil.getInvalidateMetadataCachePriority(conf); - } + public InvalidateMetadataCacheController(HBaseRpcController delegate, Configuration conf) { + super(delegate); + this.priority = IndexUtil.getInvalidateMetadataCachePriority(conf); + } - @Override - public void setPriority(int priority) { - this.priority = priority; - } + @Override + public void setPriority(int priority) { + this.priority = priority; + } - @Override - public void setPriority(TableName tn) { - // Nothing - } + @Override + public void setPriority(TableName tn) { + // Nothing + } - @Override - public int getPriority() { - return this.priority; - } + @Override + public int getPriority() { + return this.priority; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheControllerFactory.java index ee6b3b24ffa..3a0c13366ec 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheControllerFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/InvalidateMetadataCacheControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,17 +25,17 @@ * Factory to instantiate InvalidateMetadataCacheControllers */ public class InvalidateMetadataCacheControllerFactory extends RpcControllerFactory { - public InvalidateMetadataCacheControllerFactory(Configuration conf) { - super(conf); - } + public InvalidateMetadataCacheControllerFactory(Configuration conf) { + super(conf); + } - @Override - public HBaseRpcController newController() { - HBaseRpcController delegate = super.newController(); - return getController(delegate); - } + @Override + public HBaseRpcController newController() { + HBaseRpcController delegate = super.newController(); + return getController(delegate); + } - private HBaseRpcController getController(HBaseRpcController delegate) { - return new InvalidateMetadataCacheController(delegate, conf); - } + private HBaseRpcController getController(HBaseRpcController delegate) { + return new InvalidateMetadataCacheController(delegate, conf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java index 16ad4394427..f4159bf4076 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/MetadataRpcController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,10 +24,10 @@ import org.apache.hadoop.hbase.ipc.DelegatingHBaseRpcController; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import com.google.protobuf.RpcController; /** @@ -36,39 +36,37 @@ */ class MetadataRpcController extends DelegatingHBaseRpcController { - private int priority; - // list of system tables - private static final List SYSTEM_TABLE_NAMES = new ImmutableList.Builder() - .add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME) - .add(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME) - .add(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME) - .add(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME) - .add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME) - .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true) - .getNameAsString()) - .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, true) - .getNameAsString()) - .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, true) - .getNameAsString()) - .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES, true) - .getNameAsString()) - .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true) - .getNameAsString()) - .build(); + private int priority; + // list of system tables + private static final List SYSTEM_TABLE_NAMES = new ImmutableList.Builder() + .add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME).add(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME) + .add(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME) + .add(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME) + .add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME) + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true) + .getNameAsString()) + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, true) + .getNameAsString()) + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, true) + .getNameAsString()) + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES, true) + .getNameAsString()) + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true) + .getNameAsString()) + .build(); - public MetadataRpcController(HBaseRpcController delegate, - Configuration conf) { - super(delegate); - this.priority = IndexUtil.getMetadataPriority(conf); - } + public MetadataRpcController(HBaseRpcController delegate, Configuration conf) { + super(delegate); + this.priority = IndexUtil.getMetadataPriority(conf); + } - @Override - public void setPriority(final TableName tn) { - if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) { - setPriority(this.priority); - } else { - super.setPriority(tn); - } - } + @Override + public void setPriority(final TableName tn) { + if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) { + setPriority(this.priority); + } else { + super.setPriority(tn); + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java index a1a97cf6ce7..3ff84ef353d 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerSideRPCControllerFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,18 +21,18 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; /** - * {@link RpcControllerFactory} that should only be used when - * making server-server remote RPCs to the region servers hosting Phoenix SYSTEM tables. + * {@link RpcControllerFactory} that should only be used when making server-server remote RPCs to + * the region servers hosting Phoenix SYSTEM tables. */ -public class ServerSideRPCControllerFactory { +public class ServerSideRPCControllerFactory { - protected final Configuration conf; + protected final Configuration conf; - public ServerSideRPCControllerFactory(Configuration conf) { - this.conf = conf; - } + public ServerSideRPCControllerFactory(Configuration conf) { + this.conf = conf; + } - public ServerToServerRpcController newController() { - return new ServerToServerRpcControllerImpl(this.conf); - } + public ServerToServerRpcController newController() { + return new ServerToServerRpcControllerImpl(this.conf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java index 4916168b9db..0bb5b4e56e4 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcController.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,28 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc.controller; -import com.google.protobuf.RpcController; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import com.google.protobuf.RpcController; + public interface ServerToServerRpcController extends RpcController { - /** - * @param priority Priority for this request; should fall roughly in the range - * {@link HConstants#NORMAL_QOS} to {@link HConstants#HIGH_QOS} - */ - void setPriority(int priority); + /** + * @param priority Priority for this request; should fall roughly in the range + * {@link HConstants#NORMAL_QOS} to {@link HConstants#HIGH_QOS} + */ + void setPriority(int priority); - /** - * @param tn Set priority based off the table we are going against. - */ - void setPriority(final TableName tn); + /** + * @param tn Set priority based off the table we are going against. + */ + void setPriority(final TableName tn); - /** - * @return The priority of this request - */ - int getPriority(); + /** Returns The priority of this request */ + int getPriority(); } diff --git a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java index 8e12d2ec2a7..28bcd50fa0f 100644 --- a/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerToServerRpcControllerImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc.controller; -import com.google.protobuf.RpcController; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.ServerRpcController; @@ -27,49 +27,48 @@ import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.SchemaUtil; -import java.util.List; +import com.google.protobuf.RpcController; /** - * {@link RpcController} that sets the appropriate priority of server-server RPC calls destined - * for Phoenix SYSTEM tables. + * {@link RpcController} that sets the appropriate priority of server-server RPC calls destined for + * Phoenix SYSTEM tables. */ -public class ServerToServerRpcControllerImpl extends ServerRpcController implements - ServerToServerRpcController { +public class ServerToServerRpcControllerImpl extends ServerRpcController + implements ServerToServerRpcController { - private int priority; - // list of system tables that can possibly have server-server rpc's - private static final List SYSTEM_TABLE_NAMES = new ImmutableList.Builder() - .add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME) - .add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME) - .add(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME) - .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true) - .getNameAsString()) - .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true) - .getNameAsString()) - .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, true) - .getNameAsString()) - .build(); + private int priority; + // list of system tables that can possibly have server-server rpc's + private static final List SYSTEM_TABLE_NAMES = new ImmutableList.Builder() + .add(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME) + .add(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME) + .add(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME) + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true) + .getNameAsString()) + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, true) + .getNameAsString()) + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, true) + .getNameAsString()) + .build(); - public ServerToServerRpcControllerImpl( - Configuration conf) { - super(); - this.priority = IndexUtil.getServerSidePriority(conf); - } + public ServerToServerRpcControllerImpl(Configuration conf) { + super(); + this.priority = IndexUtil.getServerSidePriority(conf); + } - @Override - public void setPriority(final TableName tn) { - if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) { - setPriority(this.priority); - } + @Override + public void setPriority(final TableName tn) { + if (SYSTEM_TABLE_NAMES.contains(tn.getNameAsString())) { + setPriority(this.priority); } + } + @Override + public void setPriority(int priority) { + this.priority = priority; + } - @Override public void setPriority(int priority) { - this.priority = priority; - } - - - @Override public int getPriority() { - return this.priority; - } + @Override + public int getPriority() { + return this.priority; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java index 80e37ce64db..de7b950c282 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/HashCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,20 +21,19 @@ import java.io.IOException; import java.util.List; -import net.jcip.annotations.Immutable; - import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.schema.tuple.Tuple; +import net.jcip.annotations.Immutable; /** - * Encapsulate deserialized hash cache from bytes into Map. - * The Map uses the row key as the key and the row as the value. - * + * Encapsulate deserialized hash cache from bytes into Map. The Map uses the row key as the key and + * the row as the value. * @since 0.1 */ @Immutable public interface HashCache extends Closeable { - public int getClientVersion(); - public List get(ImmutableBytesPtr hashKey) throws IOException; + public int getClientVersion(); + + public List get(ImmutableBytesPtr hashKey) throws IOException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java index 9f3dd592336..b9cb21e156c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/IndexMetaDataCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.cache; import java.io.Closeable; @@ -28,29 +27,32 @@ import org.apache.phoenix.util.ScanUtil; public interface IndexMetaDataCache extends Closeable { - public static final IndexMetaDataCache EMPTY_INDEX_META_DATA_CACHE = new IndexMetaDataCache() { - - @Override - public void close() throws IOException { - } - - @Override - public List getIndexMaintainers() { - return Collections.emptyList(); - } - - @Override - public PhoenixTransactionContext getTransactionContext() { - return null; - } - - @Override - public int getClientVersion() { - return ScanUtil.UNKNOWN_CLIENT_VERSION; - } - - }; - public List getIndexMaintainers(); - public PhoenixTransactionContext getTransactionContext(); - public int getClientVersion(); + public static final IndexMetaDataCache EMPTY_INDEX_META_DATA_CACHE = new IndexMetaDataCache() { + + @Override + public void close() throws IOException { + } + + @Override + public List getIndexMaintainers() { + return Collections.emptyList(); + } + + @Override + public PhoenixTransactionContext getTransactionContext() { + return null; + } + + @Override + public int getClientVersion() { + return ScanUtil.UNKNOWN_CLIENT_VERSION; + } + + }; + + public List getIndexMaintainers(); + + public PhoenixTransactionContext getTransactionContext(); + + public int getClientVersion(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java index ea6982d00a6..2a7e2a32e74 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -17,70 +17,73 @@ */ package org.apache.phoenix.cache; -import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; -import org.apache.phoenix.thirdparty.com.google.common.cache.CacheLoader; -import org.apache.phoenix.thirdparty.com.google.common.cache.LoadingCache; -import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.UncheckedExecutionException; import java.nio.ByteBuffer; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; + import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.IllegalDataException; +import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.phoenix.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.phoenix.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.UncheckedExecutionException; import org.joda.time.DateTimeZone; public class JodaTimezoneCache { - public static final int CACHE_EXPRIRE_TIME_MINUTES = 10; - private static final LoadingCache cachedJodaTimeZones = createTimezoneCache(); + public static final int CACHE_EXPRIRE_TIME_MINUTES = 10; + private static final LoadingCache cachedJodaTimeZones = + createTimezoneCache(); - /** - * Returns joda's DateTimeZone instance from cache or create new instance and cache it. - * - * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man - * @return joda's DateTimeZone instance - * @throws IllegalDataException if unknown timezone id is passed - */ - public static DateTimeZone getInstance(ByteBuffer timezoneId) { - try { - return cachedJodaTimeZones.get(timezoneId); - } catch (ExecutionException ex) { - throw new IllegalDataException(ex); - } catch (UncheckedExecutionException e) { - throw new IllegalDataException("Unknown timezone " + Bytes.toString(timezoneId.array())); - } + /** + * Returns joda's DateTimeZone instance from cache or create new instance and cache it. + * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. + * Europe/Isle_of_Man + * @return joda's DateTimeZone instance + * @throws IllegalDataException if unknown timezone id is passed + */ + public static DateTimeZone getInstance(ByteBuffer timezoneId) { + try { + return cachedJodaTimeZones.get(timezoneId); + } catch (ExecutionException ex) { + throw new IllegalDataException(ex); + } catch (UncheckedExecutionException e) { + throw new IllegalDataException("Unknown timezone " + Bytes.toString(timezoneId.array())); } + } - /** - * Returns joda's DateTimeZone instance from cache or create new instance and cache it. - * - * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man - * @return joda's DateTimeZone instance - * @throws IllegalDataException if unknown timezone id is passed - */ - public static DateTimeZone getInstance(ImmutableBytesWritable timezoneId) { - return getInstance(ByteBuffer.wrap(timezoneId.copyBytes())); - } + /** + * Returns joda's DateTimeZone instance from cache or create new instance and cache it. + * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. + * Europe/Isle_of_Man + * @return joda's DateTimeZone instance + * @throws IllegalDataException if unknown timezone id is passed + */ + public static DateTimeZone getInstance(ImmutableBytesWritable timezoneId) { + return getInstance(ByteBuffer.wrap(timezoneId.copyBytes())); + } - /** - * Returns joda's DateTimeZone instance from cache or create new instance and cache it. - * - * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. Europe/Isle_of_Man - * @return joda's DateTimeZone instance - * @throws IllegalDataException if unknown timezone id is passed - */ - public static DateTimeZone getInstance(String timezoneId) { - return getInstance(ByteBuffer.wrap(Bytes.toBytes(timezoneId))); - } + /** + * Returns joda's DateTimeZone instance from cache or create new instance and cache it. + * @param timezoneId Timezone Id as accepted by {@code DateTimeZone.forID()}. E.g. + * Europe/Isle_of_Man + * @return joda's DateTimeZone instance + * @throws IllegalDataException if unknown timezone id is passed + */ + public static DateTimeZone getInstance(String timezoneId) { + return getInstance(ByteBuffer.wrap(Bytes.toBytes(timezoneId))); + } - private static LoadingCache createTimezoneCache() { - return CacheBuilder.newBuilder().expireAfterAccess(CACHE_EXPRIRE_TIME_MINUTES, TimeUnit.MINUTES).build(new CacheLoader() { + private static LoadingCache createTimezoneCache() { + return CacheBuilder.newBuilder().expireAfterAccess(CACHE_EXPRIRE_TIME_MINUTES, TimeUnit.MINUTES) + .build(new CacheLoader() { - @Override - public DateTimeZone load(ByteBuffer timezone) throws Exception { - return DateTimeZone.forID(Bytes.toString(timezone.array())); - } - }); - } + @Override + public DateTimeZone load(ByteBuffer timezone) throws Exception { + return DateTimeZone.forID(Bytes.toString(timezone.array())); + } + }); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java index 9c6bb116952..64ed016bff1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,14 +51,14 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.ScanRanges; -import org.apache.phoenix.coprocessorclient.MetaDataProtocol; -import org.apache.phoenix.coprocessorclient.ServerCachingProtocol.ServerCacheFactory; import org.apache.phoenix.coprocessor.generated.ServerCacheFactoryProtos; import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheRequest; import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.AddServerCacheResponse; import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServerCacheRequest; import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServerCacheResponse; import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService; +import org.apache.phoenix.coprocessorclient.MetaDataProtocol; +import org.apache.phoenix.coprocessorclient.ServerCachingProtocol.ServerCacheFactory; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.job.JobManager.JobCallable; import org.apache.phoenix.join.HashCacheFactory; @@ -79,485 +79,497 @@ import org.slf4j.LoggerFactory; /** - * * Client for sending cache to each region server - * - * * @since 0.1 */ public class ServerCacheClient { - public static final int UUID_LENGTH = Bytes.SIZEOF_LONG; - public static final byte[] KEY_IN_FIRST_REGION = new byte[]{0}; - private static final Logger LOGGER = LoggerFactory.getLogger(ServerCacheClient.class); - private static final Random RANDOM = new Random(); - public static final String HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER = "hash.join.server.cache.resend.per.server"; - private final PhoenixConnection connection; - private final Map cacheUsingTableMap = new ConcurrentHashMap(); + public static final int UUID_LENGTH = Bytes.SIZEOF_LONG; + public static final byte[] KEY_IN_FIRST_REGION = new byte[] { 0 }; + private static final Logger LOGGER = LoggerFactory.getLogger(ServerCacheClient.class); + private static final Random RANDOM = new Random(); + public static final String HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER = + "hash.join.server.cache.resend.per.server"; + private final PhoenixConnection connection; + private final Map cacheUsingTableMap = new ConcurrentHashMap(); - /** - * Construct client used to create a serialized cached snapshot of a table and send it to each region server - * for caching during hash join processing. - * @param connection the client connection - * - * TODO: instead of minMaxKeyRange, have an interface for iterating through ranges as we may be sending to - * servers when we don't have to if the min is in first region and max is in last region, especially for point queries. - */ - public ServerCacheClient(PhoenixConnection connection) { - this.connection = connection; - } - - public PhoenixConnection getConnection() { - return connection; - } - - /** - * Client-side representation of a server cache. Call {@link #close()} when usage - * is complete to free cache up on region server - * - * - * @since 0.1 - */ - public class ServerCache implements SQLCloseable { - private final int size; - private final byte[] id; - private final Map servers; - private ImmutableBytesWritable cachePtr; - private MemoryChunk chunk; - private File outputFile; - private long maxServerCacheTTL; - - - public ServerCache(byte[] id, Set servers, ImmutableBytesWritable cachePtr, - ConnectionQueryServices services, boolean storeCacheOnClient) throws IOException { - maxServerCacheTTL = services.getProps().getInt( - QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS); - this.id = id; - this.servers = new HashMap(); - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - for(HRegionLocation loc : servers) { - this.servers.put(loc, currentTime); - } - this.size = cachePtr.getLength(); - if (storeCacheOnClient) { - try { - this.chunk = services.getMemoryManager().allocate(cachePtr.getLength()); - this.cachePtr = cachePtr; - } catch (InsufficientMemoryException e) { - this.outputFile = File.createTempFile("HashJoinCacheSpooler", ".bin", new File(services.getProps() - .get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY))); - try (OutputStream fio = Files.newOutputStream(outputFile.toPath())) { - fio.write(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength()); - } - } - } - - } + /** + * Construct client used to create a serialized cached snapshot of a table and send it to each + * region server for caching during hash join processing. + * @param connection the client connection TODO: instead of minMaxKeyRange, have an interface for + * iterating through ranges as we may be sending to servers when we don't have + * to if the min is in first region and max is in last region, especially for + * point queries. + */ + public ServerCacheClient(PhoenixConnection connection) { + this.connection = connection; + } - public ImmutableBytesWritable getCachePtr() throws IOException { - if(this.outputFile!=null){ - try (InputStream fio = Files.newInputStream(outputFile.toPath())) { - byte[] b = new byte[this.size]; - fio.read(b); - cachePtr = new ImmutableBytesWritable(b); - } - } - return cachePtr; - } + public PhoenixConnection getConnection() { + return connection; + } - /** - * Gets the size in bytes of hash cache - */ - public int getSize() { - return size; - } + /** + * Client-side representation of a server cache. Call {@link #close()} when usage is complete to + * free cache up on region server + * @since 0.1 + */ + public class ServerCache implements SQLCloseable { + private final int size; + private final byte[] id; + private final Map servers; + private ImmutableBytesWritable cachePtr; + private MemoryChunk chunk; + private File outputFile; + private long maxServerCacheTTL; - /** - * Gets the unique identifier for this hash cache - */ - public byte[] getId() { - return id; + public ServerCache(byte[] id, Set servers, ImmutableBytesWritable cachePtr, + ConnectionQueryServices services, boolean storeCacheOnClient) throws IOException { + maxServerCacheTTL = + services.getProps().getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS); + this.id = id; + this.servers = new HashMap(); + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + for (HRegionLocation loc : servers) { + this.servers.put(loc, currentTime); + } + this.size = cachePtr.getLength(); + if (storeCacheOnClient) { + try { + this.chunk = services.getMemoryManager().allocate(cachePtr.getLength()); + this.cachePtr = cachePtr; + } catch (InsufficientMemoryException e) { + this.outputFile = + File.createTempFile("HashJoinCacheSpooler", ".bin", new File(services.getProps() + .get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY))); + try (OutputStream fio = Files.newOutputStream(outputFile.toPath())) { + fio.write(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength()); + } } + } - public boolean addServer(HRegionLocation loc) { - if(this.servers.containsKey(loc)) { - return false; - } else { - this.servers.put(loc, EnvironmentEdgeManager.currentTimeMillis()); - return true; - } - } + } - public boolean isExpired(HRegionLocation loc) { - if(this.servers.containsKey(loc)) { - Long time = this.servers.get(loc); - if(EnvironmentEdgeManager.currentTimeMillis() - time > maxServerCacheTTL) - return true; // cache was send more than maxTTL ms ago, expecting that it's expired - } else { - return false; // should be on server yet. - } - return false; // Unknown region location. Need to send the cache. + public ImmutableBytesWritable getCachePtr() throws IOException { + if (this.outputFile != null) { + try (InputStream fio = Files.newInputStream(outputFile.toPath())) { + byte[] b = new byte[this.size]; + fio.read(b); + cachePtr = new ImmutableBytesWritable(b); } + } + return cachePtr; + } + /** + * Gets the size in bytes of hash cache + */ + public int getSize() { + return size; + } - - /** - * Call to free up cache on region servers when no longer needed - */ - @Override - public void close() throws SQLException { - try{ - removeServerCache(this, servers.keySet()); - }finally{ - cachePtr = null; - if (chunk != null) { - chunk.close(); - } - if (outputFile != null) { - outputFile.delete(); - } - } - } + /** + * Gets the unique identifier for this hash cache + */ + public byte[] getId() { + return id; } - public ServerCache createServerCache(byte[] cacheId, QueryPlan delegate) - throws SQLException, IOException { - PTable cacheUsingTable = delegate.getTableRef().getTable(); - ConnectionQueryServices services = delegate.getContext().getConnection().getQueryServices(); - List locations = services.getAllTableRegions( - cacheUsingTable.getPhysicalName().getBytes(), - delegate.getContext().getStatement().getQueryTimeoutInMillis()); - int nRegions = locations.size(); - Set servers = new HashSet<>(nRegions); - cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable); - return new ServerCache(cacheId, servers, new ImmutableBytesWritable( - new byte[]{}), services, false); + public boolean addServer(HRegionLocation loc) { + if (this.servers.containsKey(loc)) { + return false; + } else { + this.servers.put(loc, EnvironmentEdgeManager.currentTimeMillis()); + return true; + } } - public ServerCache addServerCache( - ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, - final ServerCacheFactory cacheFactory, final PTable cacheUsingTable) - throws SQLException { - return addServerCache(keyRanges, cachePtr, txState, cacheFactory, cacheUsingTable, false); + public boolean isExpired(HRegionLocation loc) { + if (this.servers.containsKey(loc)) { + Long time = this.servers.get(loc); + if (EnvironmentEdgeManager.currentTimeMillis() - time > maxServerCacheTTL) return true; // cache + // was + // send + // more + // than + // maxTTL + // ms + // ago, + // expecting + // that + // it's + // expired + } else { + return false; // should be on server yet. + } + return false; // Unknown region location. Need to send the cache. } - public ServerCache addServerCache( - ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, - final ServerCacheFactory cacheFactory, final PTable cacheUsingTable, - boolean storeCacheOnClient) throws SQLException { - final byte[] cacheId = ServerCacheClient.generateId(); - return addServerCache(keyRanges, cacheId, cachePtr, txState, cacheFactory, - cacheUsingTable, false, storeCacheOnClient); + /** + * Call to free up cache on region servers when no longer needed + */ + @Override + public void close() throws SQLException { + try { + removeServerCache(this, servers.keySet()); + } finally { + cachePtr = null; + if (chunk != null) { + chunk.close(); + } + if (outputFile != null) { + outputFile.delete(); + } + } } + } - public ServerCache addServerCache( - ScanRanges keyRanges, final byte[] cacheId, final ImmutableBytesWritable cachePtr, - final byte[] txState, final ServerCacheFactory cacheFactory, - final PTable cacheUsingTable, final boolean usePersistentCache, - boolean storeCacheOnClient) throws SQLException { - ConnectionQueryServices services = connection.getQueryServices(); - List closeables = new ArrayList(); - ServerCache hashCacheSpec = null; - SQLException firstException = null; - /** - * Execute EndPoint in parallel on each server to send compressed hash cache - */ - // TODO: generalize and package as a per region server EndPoint caller - // (ideally this would be functionality provided by the coprocessor framework) - boolean success = false; - ExecutorService executor = services.getExecutor(); - List> futures = Collections.emptyList(); - try { - int queryTimeout = connection.getQueryServices().getProps() - .getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); - List locations = - services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes(), - queryTimeout); - int nRegions = locations.size(); - // Size these based on worst case - futures = new ArrayList>(nRegions); - Set servers = new HashSet(nRegions); - for (HRegionLocation entry : locations) { - // Keep track of servers we've sent to and only send once - byte[] regionStartKey = entry.getRegion().getStartKey(); - byte[] regionEndKey = entry.getRegion().getEndKey(); - if ( ! servers.contains(entry) && - keyRanges.intersectRegion(regionStartKey, regionEndKey, - cacheUsingTable.getIndexType() == IndexType.LOCAL)) { - // Call RPC once per server - servers.add(entry); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(addCustomAnnotations( - "Adding cache entry to be sent for " + entry, connection)); - } - final byte[] key = getKeyInRegion(entry.getRegion().getStartKey()); - final Table htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes()); - closeables.add(htable); - futures.add(executor.submit(new JobCallable() { - - @Override - public Boolean call() throws Exception { - return addServerCache(htable, key, cacheUsingTable, cacheId, cachePtr, cacheFactory, txState, usePersistentCache); - } - - /** - * Defines the grouping for round robin behavior. All threads spawned to process - * this scan will be grouped together and time sliced with other simultaneously - * executing parallel scans. - */ - @Override - public Object getJobId() { - return ServerCacheClient.this; - } - - @Override - public TaskExecutionMetricsHolder getTaskExecutionMetric() { - return NO_OP_INSTANCE; - } - })); - } else { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(addCustomAnnotations( - "NOT adding cache entry to be sent for " + entry + - " since one already exists for that entry", connection)); - } - } + public ServerCache createServerCache(byte[] cacheId, QueryPlan delegate) + throws SQLException, IOException { + PTable cacheUsingTable = delegate.getTableRef().getTable(); + ConnectionQueryServices services = delegate.getContext().getConnection().getQueryServices(); + List locations = + services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes(), + delegate.getContext().getStatement().getQueryTimeoutInMillis()); + int nRegions = locations.size(); + Set servers = new HashSet<>(nRegions); + cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable); + return new ServerCache(cacheId, servers, new ImmutableBytesWritable(new byte[] {}), services, + false); + } + + public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, + final byte[] txState, final ServerCacheFactory cacheFactory, final PTable cacheUsingTable) + throws SQLException { + return addServerCache(keyRanges, cachePtr, txState, cacheFactory, cacheUsingTable, false); + } + + public ServerCache addServerCache(ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, + final byte[] txState, final ServerCacheFactory cacheFactory, final PTable cacheUsingTable, + boolean storeCacheOnClient) throws SQLException { + final byte[] cacheId = ServerCacheClient.generateId(); + return addServerCache(keyRanges, cacheId, cachePtr, txState, cacheFactory, cacheUsingTable, + false, storeCacheOnClient); + } + + public ServerCache addServerCache(ScanRanges keyRanges, final byte[] cacheId, + final ImmutableBytesWritable cachePtr, final byte[] txState, + final ServerCacheFactory cacheFactory, final PTable cacheUsingTable, + final boolean usePersistentCache, boolean storeCacheOnClient) throws SQLException { + ConnectionQueryServices services = connection.getQueryServices(); + List closeables = new ArrayList(); + ServerCache hashCacheSpec = null; + SQLException firstException = null; + /** + * Execute EndPoint in parallel on each server to send compressed hash cache + */ + // TODO: generalize and package as a per region server EndPoint caller + // (ideally this would be functionality provided by the coprocessor framework) + boolean success = false; + ExecutorService executor = services.getExecutor(); + List> futures = Collections.emptyList(); + try { + int queryTimeout = connection.getQueryServices().getProps().getInt( + QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); + List locations = + services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes(), queryTimeout); + int nRegions = locations.size(); + // Size these based on worst case + futures = new ArrayList>(nRegions); + Set servers = new HashSet(nRegions); + for (HRegionLocation entry : locations) { + // Keep track of servers we've sent to and only send once + byte[] regionStartKey = entry.getRegion().getStartKey(); + byte[] regionEndKey = entry.getRegion().getEndKey(); + if ( + !servers.contains(entry) && keyRanges.intersectRegion(regionStartKey, regionEndKey, + cacheUsingTable.getIndexType() == IndexType.LOCAL) + ) { + // Call RPC once per server + servers.add(entry); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + addCustomAnnotations("Adding cache entry to be sent for " + entry, connection)); + } + final byte[] key = getKeyInRegion(entry.getRegion().getStartKey()); + final Table htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes()); + closeables.add(htable); + futures.add(executor.submit(new JobCallable() { + + @Override + public Boolean call() throws Exception { + return addServerCache(htable, key, cacheUsingTable, cacheId, cachePtr, cacheFactory, + txState, usePersistentCache); + } + + /** + * Defines the grouping for round robin behavior. All threads spawned to process this + * scan will be grouped together and time sliced with other simultaneously executing + * parallel scans. + */ + @Override + public Object getJobId() { + return ServerCacheClient.this; } - hashCacheSpec = new ServerCache(cacheId,servers,cachePtr, services, storeCacheOnClient); - // Execute in parallel - int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); - for (Future future : futures) { - future.get(timeoutMs, TimeUnit.MILLISECONDS); + @Override + public TaskExecutionMetricsHolder getTaskExecutionMetric() { + return NO_OP_INSTANCE; } + })); + } else { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + + " since one already exists for that entry", connection)); + } + } + } + + hashCacheSpec = new ServerCache(cacheId, servers, cachePtr, services, storeCacheOnClient); + // Execute in parallel + int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, + QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); + for (Future future : futures) { + future.get(timeoutMs, TimeUnit.MILLISECONDS); + } - cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable); - success = true; - } catch (SQLException e) { - firstException = e; - } catch (Exception e) { + cacheUsingTableMap.put(Bytes.mapKey(cacheId), cacheUsingTable); + success = true; + } catch (SQLException e) { + firstException = e; + } catch (Exception e) { + firstException = new SQLException(e); + } finally { + try { + if (!success) { + if (hashCacheSpec != null) { + SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec)); + } + SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec)); + for (Future future : futures) { + future.cancel(true); + } + } + } finally { + try { + Closeables.closeAll(closeables); + } catch (IOException e) { + if (firstException == null) { firstException = new SQLException(e); + } } finally { - try { - if (!success) { - if (hashCacheSpec != null) { - SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec)); - } - SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec)); - for (Future future : futures) { - future.cancel(true); - } - } - } finally { - try { - Closeables.closeAll(closeables); - } catch (IOException e) { - if (firstException == null) { - firstException = new SQLException(e); - } - } finally { - if (firstException != null) { - throw firstException; - } - } - } + if (firstException != null) { + throw firstException; + } } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(addCustomAnnotations("Cache " + cacheId + - " successfully added to servers.", connection)); - } - return hashCacheSpec; + } } - - /** - * Remove the cached table from all region servers - * @throws SQLException - * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added - */ - private void removeServerCache(final ServerCache cache, Set remainingOnServers) throws SQLException { - Table iterateOverTable = null; - final byte[] cacheId = cache.getId(); - try { - ConnectionQueryServices services = connection.getQueryServices(); - Throwable lastThrowable = null; - final PTable cacheUsingTable = cacheUsingTableMap.get(Bytes.mapKey(cacheId)); - byte[] tableName = cacheUsingTable.getPhysicalName().getBytes(); - iterateOverTable = services.getTable(tableName); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection)); + } + return hashCacheSpec; + } - int queryTimeout = connection.getQueryServices().getProps() - .getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); - List locations = services.getAllTableRegions(tableName, queryTimeout); + /** + * Remove the cached table from all region servers + * @throws IllegalStateException if hashed table cannot be removed on any region server on which + * it was added + */ + private void removeServerCache(final ServerCache cache, Set remainingOnServers) + throws SQLException { + Table iterateOverTable = null; + final byte[] cacheId = cache.getId(); + try { + ConnectionQueryServices services = connection.getQueryServices(); + Throwable lastThrowable = null; + final PTable cacheUsingTable = cacheUsingTableMap.get(Bytes.mapKey(cacheId)); + byte[] tableName = cacheUsingTable.getPhysicalName().getBytes(); + iterateOverTable = services.getTable(tableName); - /** - * Allow for the possibility that the region we based where to send our cache has split and been relocated - * to another region server *after* we sent it, but before we removed it. To accommodate this, we iterate - * through the current metadata boundaries and remove the cache once for each server that we originally sent - * to. - */ - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(addCustomAnnotations( - "Removing Cache " + cacheId + " from servers.", connection)); - } - for (HRegionLocation entry : locations) { - // Call once per server - if (remainingOnServers.contains(entry)) { + int queryTimeout = connection.getQueryServices().getProps().getInt( + QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); + List locations = services.getAllTableRegions(tableName, queryTimeout); + + /** + * Allow for the possibility that the region we based where to send our cache has split and + * been relocated to another region server *after* we sent it, but before we removed it. To + * accommodate this, we iterate through the current metadata boundaries and remove the cache + * once for each server that we originally sent to. + */ + if (LOGGER.isDebugEnabled()) { + LOGGER + .debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection)); + } + for (HRegionLocation entry : locations) { + // Call once per server + if (remainingOnServers.contains(entry)) { + try { + byte[] key = getKeyInRegion(entry.getRegion().getStartKey()); + iterateOverTable.coprocessorService(ServerCachingService.class, key, key, + new Batch.Call() { + @Override + public RemoveServerCacheResponse call(ServerCachingService instance) + throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder(); + final byte[] tenantIdBytes; + if (cacheUsingTable.isMultiTenant()) { try { - byte[] key = getKeyInRegion(entry.getRegion().getStartKey()); - iterateOverTable.coprocessorService(ServerCachingService.class, key, key, - new Batch.Call() { - @Override - public RemoveServerCacheResponse call(ServerCachingService instance) - throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = new BlockingRpcCallback(); - RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest - .newBuilder(); - final byte[] tenantIdBytes; - if (cacheUsingTable.isMultiTenant()) { - try { - tenantIdBytes = connection.getTenantId() == null ? null - : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), - cacheUsingTable.getBucketNum() != null, - connection.getTenantId(), - cacheUsingTable.getViewIndexId() != null); - } catch (SQLException e) { - throw new IOException(e); - } - } else { - tenantIdBytes = connection.getTenantId() == null ? null - : connection.getTenantId().getBytes(); - } - if (tenantIdBytes != null) { - builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); - } - builder.setCacheId(ByteStringer.wrap(cacheId)); - instance.removeServerCache(controller, builder.build(), rpcCallback); - if (controller.getFailedOn() != null) { throw controller.getFailedOn(); } - return rpcCallback.get(); - } - }); - remainingOnServers.remove(entry); - } catch (Throwable t) { - lastThrowable = t; - LOGGER.error(addCustomAnnotations( - "Error trying to remove hash cache for " + entry, - connection), t); + tenantIdBytes = connection.getTenantId() == null + ? null + : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), + cacheUsingTable.getBucketNum() != null, connection.getTenantId(), + cacheUsingTable.getViewIndexId() != null); + } catch (SQLException e) { + throw new IOException(e); } + } else { + tenantIdBytes = + connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); + } + if (tenantIdBytes != null) { + builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); + } + builder.setCacheId(ByteStringer.wrap(cacheId)); + instance.removeServerCache(controller, builder.build(), rpcCallback); + if (controller.getFailedOn() != null) { + throw controller.getFailedOn(); + } + return rpcCallback.get(); } - } - if (!remainingOnServers.isEmpty()) { - LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for " - + remainingOnServers, connection), - lastThrowable); - } - } finally { - cacheUsingTableMap.remove(Bytes.mapKey(cacheId)); - Closeables.closeQuietly(iterateOverTable); + }); + remainingOnServers.remove(entry); + } catch (Throwable t) { + lastThrowable = t; + LOGGER.error( + addCustomAnnotations("Error trying to remove hash cache for " + entry, connection), + t); + } } + } + if (!remainingOnServers.isEmpty()) { + LOGGER.warn( + addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection), + lastThrowable); + } + } finally { + cacheUsingTableMap.remove(Bytes.mapKey(cacheId)); + Closeables.closeQuietly(iterateOverTable); } + } - /** - * Create an ID to keep the cached information across other operations independent. - * Using simple long random number, since the length of time we need this to be unique - * is very limited. - */ - public static byte[] generateId() { - long rand = RANDOM.nextLong(); - return Bytes.toBytes(rand); + /** + * Create an ID to keep the cached information across other operations independent. Using simple + * long random number, since the length of time we need this to be unique is very limited. + */ + public static byte[] generateId() { + long rand = RANDOM.nextLong(); + return Bytes.toBytes(rand); + } + + public static String idToString(byte[] uuid) { + assert (uuid.length == Bytes.SIZEOF_LONG); + return Long.toString(Bytes.toLong(uuid)); + } + + private static byte[] getKeyInRegion(byte[] regionStartKey) { + assert (regionStartKey != null); + if (Bytes.equals(regionStartKey, HConstants.EMPTY_START_ROW)) { + return KEY_IN_FIRST_REGION; } - - public static String idToString(byte[] uuid) { - assert(uuid.length == Bytes.SIZEOF_LONG); - return Long.toString(Bytes.toLong(uuid)); + return regionStartKey; + } + + public boolean addServerCache(byte[] startkeyOfRegion, ServerCache cache, + HashCacheFactory cacheFactory, byte[] txState, PTable pTable) throws Exception { + Table table = null; + boolean success = true; + byte[] cacheId = cache.getId(); + try { + ConnectionQueryServices services = connection.getQueryServices(); + + byte[] tableName = pTable.getPhysicalName().getBytes(); + table = services.getTable(tableName); + HRegionLocation tableRegionLocation = + services.getTableRegionLocation(tableName, startkeyOfRegion); + if (cache.isExpired(tableRegionLocation)) { + return false; + } + if ( + cache.addServer(tableRegionLocation) + || services.getProps().getBoolean(HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER, false) + ) { + success = addServerCache(table, startkeyOfRegion, pTable, cacheId, cache.getCachePtr(), + cacheFactory, txState, false); + } + return success; + } finally { + Closeables.closeQuietly(table); } + } - private static byte[] getKeyInRegion(byte[] regionStartKey) { - assert (regionStartKey != null); - if (Bytes.equals(regionStartKey, HConstants.EMPTY_START_ROW)) { - return KEY_IN_FIRST_REGION; - } - return regionStartKey; + public boolean addServerCache(Table htable, byte[] key, final PTable cacheUsingTable, + final byte[] cacheId, final ImmutableBytesWritable cachePtr, + final ServerCacheFactory cacheFactory, final byte[] txState, final boolean usePersistentCache) + throws Exception { + byte[] keyInRegion = getKeyInRegion(key); + final Map results; + + AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder(); + final byte[] tenantIdBytes; + if (cacheUsingTable.isMultiTenant()) { + try { + tenantIdBytes = connection.getTenantId() == null + ? null + : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), + cacheUsingTable.getBucketNum() != null, connection.getTenantId(), + cacheUsingTable.getViewIndexId() != null); + } catch (SQLException e) { + throw new IOException(e); + } + } else { + tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); + } + if (tenantIdBytes != null) { + builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); } + builder.setCacheId(ByteStringer.wrap(cacheId)); + builder.setUsePersistentCache(usePersistentCache); + builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr)); + builder.setHasProtoBufIndexMaintainer(true); + ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = + ServerCacheFactoryProtos.ServerCacheFactory.newBuilder(); + svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName()); + builder.setCacheFactory(svrCacheFactoryBuider.build()); + builder.setTxState(ByteStringer.wrap(txState)); + builder.setClientVersion(MetaDataProtocol.PHOENIX_VERSION); + final AddServerCacheRequest request = builder.build(); - public boolean addServerCache(byte[] startkeyOfRegion, ServerCache cache, HashCacheFactory cacheFactory, - byte[] txState, PTable pTable) throws Exception { - Table table = null; - boolean success = true; - byte[] cacheId = cache.getId(); - try { - ConnectionQueryServices services = connection.getQueryServices(); - - byte[] tableName = pTable.getPhysicalName().getBytes(); - table = services.getTable(tableName); - HRegionLocation tableRegionLocation = services.getTableRegionLocation(tableName, startkeyOfRegion); - if(cache.isExpired(tableRegionLocation)) { - return false; + try { + results = htable.coprocessorService(ServerCachingService.class, keyInRegion, keyInRegion, + new Batch.Call() { + @Override + public AddServerCacheResponse call(ServerCachingService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + instance.addServerCache(controller, request, rpcCallback); + if (controller.getFailedOn() != null) { + throw controller.getFailedOn(); } - if (cache.addServer(tableRegionLocation) || services.getProps().getBoolean(HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER,false)) { - success = addServerCache(table, startkeyOfRegion, pTable, cacheId, cache.getCachePtr(), cacheFactory, - txState, false); - } - return success; - } finally { - Closeables.closeQuietly(table); - } + return rpcCallback.get(); + } + }); + } catch (Throwable t) { + throw new Exception(t); } - - public boolean addServerCache(Table htable, byte[] key, final PTable cacheUsingTable, final byte[] cacheId, - final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory, final byte[] txState, final boolean usePersistentCache) - throws Exception { - byte[] keyInRegion = getKeyInRegion(key); - final Map results; - - AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder(); - final byte[] tenantIdBytes; - if (cacheUsingTable.isMultiTenant()) { - try { - tenantIdBytes = connection.getTenantId() == null ? null - : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), - cacheUsingTable.getBucketNum() != null, connection.getTenantId(), - cacheUsingTable.getViewIndexId() != null); - } catch (SQLException e) { - throw new IOException(e); - } - } else { - tenantIdBytes = connection.getTenantId() == null ? null - : connection.getTenantId().getBytes(); - } - if (tenantIdBytes != null) { - builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); - } - builder.setCacheId(ByteStringer.wrap(cacheId)); - builder.setUsePersistentCache(usePersistentCache); - builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr)); - builder.setHasProtoBufIndexMaintainer(true); - ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory - .newBuilder(); - svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName()); - builder.setCacheFactory(svrCacheFactoryBuider.build()); - builder.setTxState(ByteStringer.wrap(txState)); - builder.setClientVersion(MetaDataProtocol.PHOENIX_VERSION); - final AddServerCacheRequest request = builder.build(); - - try { - results = htable.coprocessorService(ServerCachingService.class, keyInRegion, keyInRegion, - new Batch.Call() { - @Override - public AddServerCacheResponse call(ServerCachingService instance) throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = new BlockingRpcCallback(); - instance.addServerCache(controller, request, rpcCallback); - if (controller.getFailedOn() != null) { throw controller.getFailedOn(); } - return rpcCallback.get(); - } - }); - } catch (Throwable t) { - throw new Exception(t); - } - if (results != null && results.size() == 1) { return results.values().iterator().next().getReturn(); } - return false; + if (results != null && results.size() == 1) { + return results.values().iterator().next().getReturn(); } - + return false; + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java index f251dc7123c..7cf54a4d009 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +23,8 @@ * Interface for server side metadata cache hosted on each region server. */ public interface ServerMetadataCache { - long getLastDDLTimestampForTable(byte[] tenantID, byte[] schemaName, byte[] tableName) - throws SQLException; - void invalidate(byte[] tenantID, byte[] schemaName, byte[] tableName); + long getLastDDLTimestampForTable(byte[] tenantID, byte[] schemaName, byte[] tableName) + throws SQLException; + + void invalidate(byte[] tenantID, byte[] schemaName, byte[] tableName); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCacheImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCacheImpl.java index 5f9aa104556..a2beb6d8e5d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCacheImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/ServerMetadataCacheImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +17,11 @@ */ package org.apache.phoenix.cache; +import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB; + import java.sql.Connection; import java.sql.SQLException; import java.util.Properties; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,124 +34,118 @@ import org.apache.phoenix.thirdparty.com.google.common.cache.Cache; import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalListener; -import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.SchemaUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB; /** * This manages the cache for all the objects(data table, views, indexes) on each region server. * Currently, it only stores LAST_DDL_TIMESTAMP in the cache. */ public class ServerMetadataCacheImpl implements ServerMetadataCache { - protected Configuration conf; - // key is the combination of , value is the lastDDLTimestamp - protected final Cache lastDDLTimestampMap; - private static final Logger LOGGER = LoggerFactory.getLogger(ServerMetadataCacheImpl.class); - private static final String PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE - = "phoenix.coprocessor.regionserver.cache.size"; - private static final long DEFAULT_PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE = 10000L; - private static volatile ServerMetadataCacheImpl cacheInstance; - private MetricsMetadataCachingSource metricsSource; + protected Configuration conf; + // key is the combination of , value is the lastDDLTimestamp + protected final Cache lastDDLTimestampMap; + private static final Logger LOGGER = LoggerFactory.getLogger(ServerMetadataCacheImpl.class); + private static final String PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE = + "phoenix.coprocessor.regionserver.cache.size"; + private static final long DEFAULT_PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE = 10000L; + private static volatile ServerMetadataCacheImpl cacheInstance; + private MetricsMetadataCachingSource metricsSource; - /** - * Creates/gets an instance of ServerMetadataCache. - * - * @param conf configuration - * @return cache - */ - public static ServerMetadataCacheImpl getInstance(Configuration conf) { - ServerMetadataCacheImpl result = cacheInstance; + /** + * Creates/gets an instance of ServerMetadataCache. + * @param conf configuration + */ + public static ServerMetadataCacheImpl getInstance(Configuration conf) { + ServerMetadataCacheImpl result = cacheInstance; + if (result == null) { + synchronized (ServerMetadataCacheImpl.class) { + result = cacheInstance; if (result == null) { - synchronized (ServerMetadataCacheImpl.class) { - result = cacheInstance; - if (result == null) { - cacheInstance = result = new ServerMetadataCacheImpl(conf); - } - } + cacheInstance = result = new ServerMetadataCacheImpl(conf); } - return result; + } } + return result; + } - public ServerMetadataCacheImpl(Configuration conf) { - this.conf = HBaseConfiguration.create(conf); - this.metricsSource = MetricsPhoenixCoprocessorSourceFactory - .getInstance().getMetadataCachingSource(); - long maxSize = conf.getLong(PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE, - DEFAULT_PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE); - lastDDLTimestampMap = CacheBuilder.newBuilder() - .removalListener((RemovalListener) notification -> { - String key = notification.getKey().toString(); - LOGGER.debug("Expiring " + key + " because of " - + notification.getCause().name()); - }) - // maximum number of entries this cache can handle. - .maximumSize(maxSize) - .build(); - } + public ServerMetadataCacheImpl(Configuration conf) { + this.conf = HBaseConfiguration.create(conf); + this.metricsSource = + MetricsPhoenixCoprocessorSourceFactory.getInstance().getMetadataCachingSource(); + long maxSize = conf.getLong(PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE, + DEFAULT_PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE); + lastDDLTimestampMap = CacheBuilder.newBuilder() + .removalListener((RemovalListener) notification -> { + String key = notification.getKey().toString(); + LOGGER.debug("Expiring " + key + " because of " + notification.getCause().name()); + }) + // maximum number of entries this cache can handle. + .maximumSize(maxSize).build(); + } - /** - * Returns the last DDL timestamp from the table. - * If not found in cache, then query SYSCAT regionserver. - * @param tenantID tenant id - * @param schemaName schema name - * @param tableName table name - * @return last DDL timestamp - * @throws Exception - */ - public long getLastDDLTimestampForTable(byte[] tenantID, byte[] schemaName, byte[] tableName) - throws SQLException { - byte[] tableKey = SchemaUtil.getTableKey(tenantID, schemaName, tableName); - ImmutableBytesPtr tableKeyPtr = new ImmutableBytesPtr(tableKey); - // Lookup in cache if present. - Long lastDDLTimestamp = lastDDLTimestampMap.getIfPresent(tableKeyPtr); - if (lastDDLTimestamp != null) { - metricsSource.incrementRegionServerMetadataCacheHitCount(); - LOGGER.trace("Retrieving last ddl timestamp value from cache for " + "schema: {}, " + - "table: {}", Bytes.toString(schemaName), Bytes.toString(tableName)); - return lastDDLTimestamp; - } - metricsSource.incrementRegionServerMetadataCacheMissCount(); - PTable table; - String tenantIDStr = Bytes.toString(tenantID); - if (tenantIDStr == null || tenantIDStr.isEmpty()) { - tenantIDStr = null; - } - Properties properties = new Properties(); - if (tenantIDStr != null) { - properties.setProperty(TENANT_ID_ATTRIB, tenantIDStr); - } - try (Connection connection = getConnection(properties)) { - // Using PhoenixConnection#getTableFromServerNoCache to completely bypass CQSI cache. - table = connection.unwrap(PhoenixConnection.class) - .getTableFromServerNoCache(schemaName, tableName); - // TODO PhoenixConnection#getTableFromServerNoCache can throw TableNotFoundException. - // In that case, do we want to throw non retryable exception back to the client? - // Update cache with the latest DDL timestamp from SYSCAT server. - lastDDLTimestampMap.put(tableKeyPtr, table.getLastDDLTimestamp()); - } - return table.getLastDDLTimestamp(); + /** + * Returns the last DDL timestamp from the table. If not found in cache, then query SYSCAT + * regionserver. + * @param tenantID tenant id + * @param schemaName schema name + * @param tableName table name + * @return last DDL timestamp + */ + public long getLastDDLTimestampForTable(byte[] tenantID, byte[] schemaName, byte[] tableName) + throws SQLException { + byte[] tableKey = SchemaUtil.getTableKey(tenantID, schemaName, tableName); + ImmutableBytesPtr tableKeyPtr = new ImmutableBytesPtr(tableKey); + // Lookup in cache if present. + Long lastDDLTimestamp = lastDDLTimestampMap.getIfPresent(tableKeyPtr); + if (lastDDLTimestamp != null) { + metricsSource.incrementRegionServerMetadataCacheHitCount(); + LOGGER.trace( + "Retrieving last ddl timestamp value from cache for " + "schema: {}, " + "table: {}", + Bytes.toString(schemaName), Bytes.toString(tableName)); + return lastDDLTimestamp; } - - /** - * Invalidate cache for the given tenantID, schema name and table name. - * Guava cache is thread safe so we don't have to synchronize it explicitly. - * @param tenantID tenantID - * @param schemaName schemaName - * @param tableName tableName - */ - public void invalidate(byte[] tenantID, byte[] schemaName, byte[] tableName) { - LOGGER.info("Invalidating server metadata cache for tenantID: {}, schema: {}, table: {}", - Bytes.toString(tenantID), Bytes.toString(schemaName), Bytes.toString(tableName)); - byte[] tableKey = SchemaUtil.getTableKey(tenantID, schemaName, tableName); - ImmutableBytesPtr tableKeyPtr = new ImmutableBytesPtr(tableKey); - lastDDLTimestampMap.invalidate(tableKeyPtr); + metricsSource.incrementRegionServerMetadataCacheMissCount(); + PTable table; + String tenantIDStr = Bytes.toString(tenantID); + if (tenantIDStr == null || tenantIDStr.isEmpty()) { + tenantIDStr = null; } - - protected Connection getConnection(Properties properties) throws SQLException { - return QueryUtil.getConnectionOnServer(properties, this.conf); + Properties properties = new Properties(); + if (tenantIDStr != null) { + properties.setProperty(TENANT_ID_ATTRIB, tenantIDStr); } + try (Connection connection = getConnection(properties)) { + // Using PhoenixConnection#getTableFromServerNoCache to completely bypass CQSI cache. + table = + connection.unwrap(PhoenixConnection.class).getTableFromServerNoCache(schemaName, tableName); + // TODO PhoenixConnection#getTableFromServerNoCache can throw TableNotFoundException. + // In that case, do we want to throw non retryable exception back to the client? + // Update cache with the latest DDL timestamp from SYSCAT server. + lastDDLTimestampMap.put(tableKeyPtr, table.getLastDDLTimestamp()); + } + return table.getLastDDLTimestamp(); + } + + /** + * Invalidate cache for the given tenantID, schema name and table name. Guava cache is thread safe + * so we don't have to synchronize it explicitly. + * @param tenantID tenantID + * @param schemaName schemaName + * @param tableName tableName + */ + public void invalidate(byte[] tenantID, byte[] schemaName, byte[] tableName) { + LOGGER.info("Invalidating server metadata cache for tenantID: {}, schema: {}, table: {}", + Bytes.toString(tenantID), Bytes.toString(schemaName), Bytes.toString(tableName)); + byte[] tableKey = SchemaUtil.getTableKey(tenantID, schemaName, tableName); + ImmutableBytesPtr tableKeyPtr = new ImmutableBytesPtr(tableKey); + lastDDLTimestampMap.invalidate(tableKeyPtr); + } + + protected Connection getConnection(Properties properties) throws SQLException { + return QueryUtil.getConnectionOnServer(properties, this.conf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java index e36fd09a980..11ad61a74eb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,18 +25,20 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.memory.MemoryManager; - /** - * * Inteface to set and set cached values for a tenant - * - * * @since 0.1 */ public interface TenantCache { - MemoryManager getMemoryManager(); - Closeable getServerCache(ImmutableBytesPtr cacheId); - Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, byte[] txState, ServerCacheFactory cacheFactory, boolean useProtoForIndexMaintainer, boolean usePersistentCache, int clientVersion) throws SQLException; - void removeServerCache(ImmutableBytesPtr cacheId); - void removeAllServerCache(); + MemoryManager getMemoryManager(); + + Closeable getServerCache(ImmutableBytesPtr cacheId); + + Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, + byte[] txState, ServerCacheFactory cacheFactory, boolean useProtoForIndexMaintainer, + boolean usePersistentCache, int clientVersion) throws SQLException; + + void removeServerCache(ImmutableBytesPtr cacheId); + + void removeAllServerCache(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java index 54afaf6c4d1..f2bb7238fed 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,258 +30,256 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.memory.MemoryManager; import org.apache.phoenix.memory.MemoryManager.MemoryChunk; -import org.apache.phoenix.util.Closeables; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.base.Ticker; import org.apache.phoenix.thirdparty.com.google.common.cache.Cache; import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalListener; import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalNotification; +import org.apache.phoenix.util.Closeables; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * - * Cache per tenant on server side. Tracks memory usage for each - * tenat as well and rolling up usage to global memory manager. - * - * + * Cache per tenant on server side. Tracks memory usage for each tenat as well and rolling up usage + * to global memory manager. * @since 0.1 */ public class TenantCacheImpl implements TenantCache { - private static final Logger LOGGER = LoggerFactory.getLogger(TenantCacheImpl.class); - private final int maxTimeToLiveMs; - private final int maxPersistenceTimeToLiveMs; - private final MemoryManager memoryManager; - private final Ticker ticker; - - // Two caches exist: the "serverCaches" cache which is used for handling live - // queries, and the "persistentServerCaches" cache which is used to store data - // between queries. If we are out of memory, attempt to clear out entries from - // the persistent cache before throwing an exception. - private volatile Cache serverCaches; - private volatile Cache persistentServerCaches; - - private final long EVICTION_MARGIN_BYTES = 10000000; - - private static class CacheEntry implements Comparable, Closeable { - private ImmutableBytesPtr cacheId; - private ImmutableBytesWritable cachePtr; - private int hits; - private int liveQueriesCount; - private boolean usePersistentCache; - private long size; - private Closeable closeable; - - public CacheEntry(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, - ServerCacheFactory cacheFactory, byte[] txState, MemoryChunk chunk, - boolean usePersistentCache, boolean useProtoForIndexMaintainer, - int clientVersion) throws SQLException { - this.cacheId = cacheId; - this.cachePtr = cachePtr; - this.size = cachePtr.getLength(); - this.hits = 0; - this.liveQueriesCount = 0; - this.usePersistentCache = usePersistentCache; - this.closeable = cacheFactory.newCache(cachePtr, txState, chunk, useProtoForIndexMaintainer, clientVersion); - } - - public void close() throws IOException { - this.closeable.close(); - } + private static final Logger LOGGER = LoggerFactory.getLogger(TenantCacheImpl.class); + private final int maxTimeToLiveMs; + private final int maxPersistenceTimeToLiveMs; + private final MemoryManager memoryManager; + private final Ticker ticker; - synchronized public void incrementLiveQueryCount() { - liveQueriesCount++; - hits++; - } + // Two caches exist: the "serverCaches" cache which is used for handling live + // queries, and the "persistentServerCaches" cache which is used to store data + // between queries. If we are out of memory, attempt to clear out entries from + // the persistent cache before throwing an exception. + private volatile Cache serverCaches; + private volatile Cache persistentServerCaches; - synchronized public void decrementLiveQueryCount() { - liveQueriesCount--; - } + private final long EVICTION_MARGIN_BYTES = 10000000; - synchronized public boolean isLive() { - return liveQueriesCount > 0; - } + private static class CacheEntry implements Comparable, Closeable { + private ImmutableBytesPtr cacheId; + private ImmutableBytesWritable cachePtr; + private int hits; + private int liveQueriesCount; + private boolean usePersistentCache; + private long size; + private Closeable closeable; - public boolean getUsePersistentCache() { - return usePersistentCache; - } + public CacheEntry(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, + ServerCacheFactory cacheFactory, byte[] txState, MemoryChunk chunk, + boolean usePersistentCache, boolean useProtoForIndexMaintainer, int clientVersion) + throws SQLException { + this.cacheId = cacheId; + this.cachePtr = cachePtr; + this.size = cachePtr.getLength(); + this.hits = 0; + this.liveQueriesCount = 0; + this.usePersistentCache = usePersistentCache; + this.closeable = + cacheFactory.newCache(cachePtr, txState, chunk, useProtoForIndexMaintainer, clientVersion); + } - public ImmutableBytesPtr getCacheId() { - return cacheId; - } + public void close() throws IOException { + this.closeable.close(); + } - private Float rank() { - return (float)hits; - } + synchronized public void incrementLiveQueryCount() { + liveQueriesCount++; + hits++; + } - @Override - public int compareTo(CacheEntry o) { - return rank().compareTo(o.rank()); - } + synchronized public void decrementLiveQueryCount() { + liveQueriesCount--; } - public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs, int maxPersistenceTimeToLiveMs) { - this(memoryManager, maxTimeToLiveMs, maxPersistenceTimeToLiveMs, Ticker.systemTicker()); + synchronized public boolean isLive() { + return liveQueriesCount > 0; } - - public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs, int maxPersistenceTimeToLiveMs, Ticker ticker) { - this.memoryManager = memoryManager; - this.maxTimeToLiveMs = maxTimeToLiveMs; - this.maxPersistenceTimeToLiveMs = maxPersistenceTimeToLiveMs; - this.ticker = ticker; + + public boolean getUsePersistentCache() { + return usePersistentCache; } - - public Ticker getTicker() { - return ticker; + + public ImmutableBytesPtr getCacheId() { + return cacheId; } - - // For testing - public void cleanUp() { - synchronized(this) { - if (serverCaches != null) { - serverCaches.cleanUp(); - } - if (persistentServerCaches != null) { - persistentServerCaches.cleanUp(); - } - } + + private Float rank() { + return (float) hits; } - + @Override - public MemoryManager getMemoryManager() { - return memoryManager; + public int compareTo(CacheEntry o) { + return rank().compareTo(o.rank()); } + } + + public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs, + int maxPersistenceTimeToLiveMs) { + this(memoryManager, maxTimeToLiveMs, maxPersistenceTimeToLiveMs, Ticker.systemTicker()); + } + + public TenantCacheImpl(MemoryManager memoryManager, int maxTimeToLiveMs, + int maxPersistenceTimeToLiveMs, Ticker ticker) { + this.memoryManager = memoryManager; + this.maxTimeToLiveMs = maxTimeToLiveMs; + this.maxPersistenceTimeToLiveMs = maxPersistenceTimeToLiveMs; + this.ticker = ticker; + } + + public Ticker getTicker() { + return ticker; + } - private Cache getServerCaches() { - /* Delay creation of this map until it's needed */ + // For testing + public void cleanUp() { + synchronized (this) { + if (serverCaches != null) { + serverCaches.cleanUp(); + } + if (persistentServerCaches != null) { + persistentServerCaches.cleanUp(); + } + } + } + + @Override + public MemoryManager getMemoryManager() { + return memoryManager; + } + + private Cache getServerCaches() { + /* Delay creation of this map until it's needed */ + if (serverCaches == null) { + synchronized (this) { if (serverCaches == null) { - synchronized(this) { - if (serverCaches == null) { - serverCaches = buildCache(maxTimeToLiveMs, false); - } - } + serverCaches = buildCache(maxTimeToLiveMs, false); } - return serverCaches; + } } + return serverCaches; + } - private Cache getPersistentServerCaches() { - /* Delay creation of this map until it's needed */ + private Cache getPersistentServerCaches() { + /* Delay creation of this map until it's needed */ + if (persistentServerCaches == null) { + synchronized (this) { if (persistentServerCaches == null) { - synchronized(this) { - if (persistentServerCaches == null) { - persistentServerCaches = buildCache(maxPersistenceTimeToLiveMs, true); - } - } + persistentServerCaches = buildCache(maxPersistenceTimeToLiveMs, true); } - return persistentServerCaches; + } } + return persistentServerCaches; + } - private Cache buildCache(final int ttl, final boolean isPersistent) { - CacheBuilder builder = CacheBuilder.newBuilder(); - if (isPersistent) { - builder.expireAfterWrite(ttl, TimeUnit.MILLISECONDS); - } else { - builder.expireAfterAccess(ttl, TimeUnit.MILLISECONDS); - } - return builder - .ticker(getTicker()) - .removalListener(new RemovalListener(){ - @Override - public void onRemoval(RemovalNotification notification) { - if (isPersistent || !notification.getValue().getUsePersistentCache()) { - Closeables.closeAllQuietly(Collections.singletonList(notification.getValue())); - } - } - }) - .build(); + private Cache buildCache(final int ttl, + final boolean isPersistent) { + CacheBuilder builder = CacheBuilder.newBuilder(); + if (isPersistent) { + builder.expireAfterWrite(ttl, TimeUnit.MILLISECONDS); + } else { + builder.expireAfterAccess(ttl, TimeUnit.MILLISECONDS); } - - synchronized private void evictInactiveEntries(long bytesNeeded) { - LOGGER.debug("Trying to evict inactive cache entries to free up " + bytesNeeded + " bytes"); - CacheEntry[] entries = getPersistentServerCaches().asMap().values().toArray(new CacheEntry[]{}); - Arrays.sort(entries); - long available = this.getMemoryManager().getAvailableMemory(); - for (int i = 0; i < entries.length && available < bytesNeeded; i++) { - CacheEntry entry = entries[i]; - ImmutableBytesPtr cacheId = entry.getCacheId(); - getPersistentServerCaches().invalidate(cacheId); - available = this.getMemoryManager().getAvailableMemory(); - LOGGER.debug("Evicted cache ID " + Bytes.toLong(cacheId.get()) + ", we now have " - + available + " bytes available"); + return builder.ticker(getTicker()) + .removalListener(new RemovalListener() { + @Override + public void onRemoval(RemovalNotification notification) { + if (isPersistent || !notification.getValue().getUsePersistentCache()) { + Closeables.closeAllQuietly(Collections.singletonList(notification.getValue())); + } } - } + }).build(); + } - private CacheEntry getIfPresent(ImmutableBytesPtr cacheId) { - CacheEntry entry = getPersistentServerCaches().getIfPresent(cacheId); - if (entry != null) { - return entry; - } - return getServerCaches().getIfPresent(cacheId); + synchronized private void evictInactiveEntries(long bytesNeeded) { + LOGGER.debug("Trying to evict inactive cache entries to free up " + bytesNeeded + " bytes"); + CacheEntry[] entries = + getPersistentServerCaches().asMap().values().toArray(new CacheEntry[] {}); + Arrays.sort(entries); + long available = this.getMemoryManager().getAvailableMemory(); + for (int i = 0; i < entries.length && available < bytesNeeded; i++) { + CacheEntry entry = entries[i]; + ImmutableBytesPtr cacheId = entry.getCacheId(); + getPersistentServerCaches().invalidate(cacheId); + available = this.getMemoryManager().getAvailableMemory(); + LOGGER.debug("Evicted cache ID " + Bytes.toLong(cacheId.get()) + ", we now have " + available + + " bytes available"); } + } - @Override - public Closeable getServerCache(ImmutableBytesPtr cacheId) { - getServerCaches().cleanUp(); - CacheEntry entry = getIfPresent(cacheId); - if (entry == null) { - return null; - } - return entry.closeable; + private CacheEntry getIfPresent(ImmutableBytesPtr cacheId) { + CacheEntry entry = getPersistentServerCaches().getIfPresent(cacheId); + if (entry != null) { + return entry; } + return getServerCaches().getIfPresent(cacheId); + } - @Override - public Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, byte[] txState, ServerCacheFactory cacheFactory, boolean useProtoForIndexMaintainer, boolean usePersistentCache, int clientVersion) throws SQLException { - getServerCaches().cleanUp(); - long available = this.getMemoryManager().getAvailableMemory(); - int size = cachePtr.getLength() + txState.length; - if (size > available) { - evictInactiveEntries(size - available + EVICTION_MARGIN_BYTES); - } - MemoryChunk chunk = this.getMemoryManager().allocate(size); - boolean success = false; - try { - CacheEntry entry; - synchronized(this) { - entry = getIfPresent(cacheId); - if (entry == null) { - entry = new CacheEntry( - cacheId, cachePtr, cacheFactory, txState, chunk, - usePersistentCache, useProtoForIndexMaintainer, - clientVersion); - getServerCaches().put(cacheId, entry); - if (usePersistentCache) { - getPersistentServerCaches().put(cacheId, entry); - } - } - entry.incrementLiveQueryCount(); - } - success = true; - return entry; - } finally { - if (!success) { - Closeables.closeAllQuietly(Collections.singletonList(chunk)); - } - } + @Override + public Closeable getServerCache(ImmutableBytesPtr cacheId) { + getServerCaches().cleanUp(); + CacheEntry entry = getIfPresent(cacheId); + if (entry == null) { + return null; } + return entry.closeable; + } - @Override - synchronized public void removeServerCache(ImmutableBytesPtr cacheId) { - CacheEntry entry = getServerCaches().getIfPresent(cacheId); + @Override + public Closeable addServerCache(ImmutableBytesPtr cacheId, ImmutableBytesWritable cachePtr, + byte[] txState, ServerCacheFactory cacheFactory, boolean useProtoForIndexMaintainer, + boolean usePersistentCache, int clientVersion) throws SQLException { + getServerCaches().cleanUp(); + long available = this.getMemoryManager().getAvailableMemory(); + int size = cachePtr.getLength() + txState.length; + if (size > available) { + evictInactiveEntries(size - available + EVICTION_MARGIN_BYTES); + } + MemoryChunk chunk = this.getMemoryManager().allocate(size); + boolean success = false; + try { + CacheEntry entry; + synchronized (this) { + entry = getIfPresent(cacheId); if (entry == null) { - return; - } - entry.decrementLiveQueryCount(); - if (!entry.isLive()) { - LOGGER.debug("Cache ID " + Bytes.toLong(cacheId.get()) - + " is no longer live, invalidate it"); - getServerCaches().invalidate(cacheId); + entry = new CacheEntry(cacheId, cachePtr, cacheFactory, txState, chunk, + usePersistentCache, useProtoForIndexMaintainer, clientVersion); + getServerCaches().put(cacheId, entry); + if (usePersistentCache) { + getPersistentServerCaches().put(cacheId, entry); + } } + entry.incrementLiveQueryCount(); + } + success = true; + return entry; + } finally { + if (!success) { + Closeables.closeAllQuietly(Collections.singletonList(chunk)); + } } + } - @Override - public void removeAllServerCache() { - getServerCaches().invalidateAll(); - getPersistentServerCaches().invalidateAll(); + @Override + synchronized public void removeServerCache(ImmutableBytesPtr cacheId) { + CacheEntry entry = getServerCaches().getIfPresent(cacheId); + if (entry == null) { + return; + } + entry.decrementLiveQueryCount(); + if (!entry.isLive()) { + LOGGER.debug("Cache ID " + Bytes.toLong(cacheId.get()) + " is no longer live, invalidate it"); + getServerCaches().invalidate(cacheId); } + } + + @Override + public void removeAllServerCache() { + getServerCaches().invalidateAll(); + getPersistentServerCaches().invalidateAll(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallRunner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallRunner.java index 69807c8a6ee..5fae6199424 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallRunner.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallRunner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,39 +27,39 @@ */ public class CallRunner { - /** - * Helper {@link Callable} that also declares the type of exception it will throw, to help with - * type safety/generics for java - * @param value type returned - * @param type of check exception thrown - */ - public static interface CallableThrowable extends Callable { - @Override - public V call() throws E; - } + /** + * Helper {@link Callable} that also declares the type of exception it will throw, to help with + * type safety/generics for java + * @param value type returned + * @param type of check exception thrown + */ + public static interface CallableThrowable extends Callable { + @Override + public V call() throws E; + } - private static final Logger LOGGER = LoggerFactory.getLogger(CallRunner.class); + private static final Logger LOGGER = LoggerFactory.getLogger(CallRunner.class); - private CallRunner() { - // no ctor for util class - } + private CallRunner() { + // no ctor for util class + } - public static > V run(T call, - CallWrapper... wrappers) throws E { + public static > V run(T call, + CallWrapper... wrappers) throws E { + try { + for (CallWrapper wrap : wrappers) { + wrap.before(); + } + return call.call(); + } finally { + // have to go in reverse, to match the before logic + for (int i = wrappers.length - 1; i >= 0; i--) { try { - for (CallWrapper wrap : wrappers) { - wrap.before(); - } - return call.call(); - } finally { - // have to go in reverse, to match the before logic - for (int i = wrappers.length - 1; i >= 0; i--) { - try { - wrappers[i].after(); - } catch (Exception e) { - LOGGER.error("Failed to complete wrapper " + wrappers[i], e); - } - } + wrappers[i].after(); + } catch (Exception e) { + LOGGER.error("Failed to complete wrapper " + wrappers[i], e); } + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallWrapper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallWrapper.java index b84dd5d2f3b..783c36caa3b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallWrapper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/call/CallWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,8 +22,8 @@ */ public interface CallWrapper { - public void before(); + public void before(); - public void after(); + public void after(); -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/AggregationManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/AggregationManager.java index e43edb21b4d..db5a2087358 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/AggregationManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/AggregationManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,86 +30,89 @@ import org.apache.phoenix.expression.aggregator.ServerAggregators; import org.apache.phoenix.expression.function.SingleAggregateFunction; import org.apache.phoenix.expression.visitor.SingleAggregateFunctionVisitor; - import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; /** - * * Class that manages aggregations during query compilation - * - * * @since 0.1 */ public class AggregationManager { - private ClientAggregators aggregators; - private int position = 0; - - public AggregationManager() { - } + private ClientAggregators aggregators; + private int position = 0; - public ClientAggregators getAggregators() { - return aggregators; - } - - public boolean isEmpty() { - return aggregators == null || aggregators.getAggregatorCount() == 0; - } - - /** - * @return allocate the next available zero-based positional index - * for the client-side aggregate function. - */ - protected int nextPosition() { - return position++; - } - - public void setAggregators(ClientAggregators clientAggregator) { - this.aggregators = clientAggregator; - } - /** - * Compiles projection by: - * 1) Adding RowCount aggregate function if not present when limiting rows. We need this - * to track how many rows have been scanned. - * 2) Reordering aggregation functions (by putting fixed length aggregates first) to - * optimize the positional access of the aggregated value. - */ - public void compile(StatementContext context, GroupByCompiler.GroupBy groupBy) throws - SQLException { - final Set aggFuncSet = Sets.newHashSetWithExpectedSize(context.getExpressionManager().getExpressionCount()); + public AggregationManager() { + } - Iterator expressions = context.getExpressionManager().getExpressions(); - while (expressions.hasNext()) { - Expression expression = expressions.next(); - expression.accept(new SingleAggregateFunctionVisitor() { - @Override - public Iterator visitEnter(SingleAggregateFunction function) { - aggFuncSet.add(function); - return Collections.emptyIterator(); - } - }); - } - if (aggFuncSet.isEmpty() && groupBy.isEmpty()) { - return; - } - List aggFuncs = new ArrayList(aggFuncSet); - Collections.sort(aggFuncs, SingleAggregateFunction.SCHEMA_COMPARATOR); + public ClientAggregators getAggregators() { + return aggregators; + } - int minNullableIndex = getMinNullableIndex(aggFuncs,groupBy.isEmpty()); - context.getScan().setAttribute(BaseScannerRegionObserverConstants.AGGREGATORS, ServerAggregators.serialize(aggFuncs, minNullableIndex)); - ClientAggregators clientAggregators = new ClientAggregators(aggFuncs, minNullableIndex); - context.getAggregationManager().setAggregators(clientAggregators); - } + public boolean isEmpty() { + return aggregators == null || aggregators.getAggregatorCount() == 0; + } + + /** + * @return allocate the next available zero-based positional index for the client-side aggregate + * function. + */ + protected int nextPosition() { + return position++; + } + + public void setAggregators(ClientAggregators clientAggregator) { + this.aggregators = clientAggregator; + } + + /** + * Compiles projection by: 1) Adding RowCount aggregate function if not present when limiting + * rows. We need this to track how many rows have been scanned. 2) Reordering aggregation + * functions (by putting fixed length aggregates first) to optimize the positional access of the + * aggregated value. + */ + public void compile(StatementContext context, GroupByCompiler.GroupBy groupBy) + throws SQLException { + final Set aggFuncSet = + Sets.newHashSetWithExpectedSize(context.getExpressionManager().getExpressionCount()); - private static int getMinNullableIndex(List aggFuncs, boolean isUngroupedAggregation) { - int minNullableIndex = aggFuncs.size(); - for (int i = 0; i < aggFuncs.size(); i++) { - SingleAggregateFunction aggFunc = aggFuncs.get(i); - if (isUngroupedAggregation ? aggFunc.getAggregator().isNullable() : aggFunc.getAggregatorExpression().isNullable()) { - minNullableIndex = i; - break; - } + Iterator expressions = context.getExpressionManager().getExpressions(); + while (expressions.hasNext()) { + Expression expression = expressions.next(); + expression.accept(new SingleAggregateFunctionVisitor() { + @Override + public Iterator visitEnter(SingleAggregateFunction function) { + aggFuncSet.add(function); + return Collections.emptyIterator(); } - return minNullableIndex; + }); + } + if (aggFuncSet.isEmpty() && groupBy.isEmpty()) { + return; + } + List aggFuncs = new ArrayList(aggFuncSet); + Collections.sort(aggFuncs, SingleAggregateFunction.SCHEMA_COMPARATOR); + + int minNullableIndex = getMinNullableIndex(aggFuncs, groupBy.isEmpty()); + context.getScan().setAttribute(BaseScannerRegionObserverConstants.AGGREGATORS, + ServerAggregators.serialize(aggFuncs, minNullableIndex)); + ClientAggregators clientAggregators = new ClientAggregators(aggFuncs, minNullableIndex); + context.getAggregationManager().setAggregators(clientAggregators); + } + + private static int getMinNullableIndex(List aggFuncs, + boolean isUngroupedAggregation) { + int minNullableIndex = aggFuncs.size(); + for (int i = 0; i < aggFuncs.size(); i++) { + SingleAggregateFunction aggFunc = aggFuncs.get(i); + if ( + isUngroupedAggregation + ? aggFunc.getAggregator().isNullable() + : aggFunc.getAggregatorExpression().isNullable() + ) { + minNullableIndex = i; + break; + } } + return minNullableIndex; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java index 60eb59a4c7f..11b7306b5ed 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BaseMutationPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,62 +26,62 @@ import org.apache.phoenix.schema.TableRef; public abstract class BaseMutationPlan implements MutationPlan { - private final StatementContext context; - private final Operation operation; - - public BaseMutationPlan(StatementContext context, Operation operation) { - this.context = context; - this.operation = operation; - } - - @Override - public Operation getOperation() { - return operation; - } - - @Override - public StatementContext getContext() { - return context; - } + private final StatementContext context; + private final Operation operation; - @Override - public ParameterMetaData getParameterMetaData() { - return context.getBindManager().getParameterMetaData(); - } + public BaseMutationPlan(StatementContext context, Operation operation) { + this.context = context; + this.operation = operation; + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return ExplainPlan.EMPTY_PLAN; - } + @Override + public Operation getOperation() { + return operation; + } - @Override - public TableRef getTargetRef() { - return context.getCurrentTable(); - } - - @Override - public Set getSourceRefs() { - return Collections.emptySet(); - } + @Override + public StatementContext getContext() { + return context; + } - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return 0l; - } + @Override + public ParameterMetaData getParameterMetaData() { + return context.getBindManager().getParameterMetaData(); + } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return 0l; - } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return ExplainPlan.EMPTY_PLAN; + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return 0l; - } + @Override + public TableRef getTargetRef() { + return context.getCurrentTable(); + } - @Override - public QueryPlan getQueryPlan() { - return null; - } + @Override + public Set getSourceRefs() { + return Collections.emptySet(); + } -} \ No newline at end of file + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return 0l; + } + + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return 0l; + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return 0l; + } + + @Override + public QueryPlan getQueryPlan() { + return null; + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BindManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BindManager.java index f7a2a6d3dc4..732a050cc6e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BindManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/BindManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,54 +27,46 @@ import org.apache.phoenix.parse.BindParseNode; import org.apache.phoenix.schema.PDatum; - /** - * - * Class that manages binding parameters and checking type matching. There are - * two main usages: - * - * 1) the standard query case where we have the values for the binds. - * 2) the retrieve param metadata case where we don't have the bind values. - * - * In both cases, during query compilation we figure out what type the bind variable - * "should" be, based on how it's used in the query. For example {@code foo < ? } would expect - * that the bind variable type matches or can be coerced to the type of foo. For (1), - * we check that the bind value has the correct type and for (2) we set the param + * Class that manages binding parameters and checking type matching. There are two main usages: 1) + * the standard query case where we have the values for the binds. 2) the retrieve param metadata + * case where we don't have the bind values. In both cases, during query compilation we figure out + * what type the bind variable "should" be, based on how it's used in the query. For example + * {@code foo < ? } would expect that the bind variable type matches or can be coerced to the type + * of foo. For (1), we check that the bind value has the correct type and for (2) we set the param * metadata type. - * - * * @since 0.1 */ public class BindManager { - public static final Object UNBOUND_PARAMETER = new Object(); + public static final Object UNBOUND_PARAMETER = new Object(); - private final List binds; - private final PhoenixParameterMetaData bindMetaData; + private final List binds; + private final PhoenixParameterMetaData bindMetaData; - public BindManager(List binds) { - this.binds = binds; - this.bindMetaData = new PhoenixParameterMetaData(binds.size()); - } + public BindManager(List binds) { + this.binds = binds; + this.bindMetaData = new PhoenixParameterMetaData(binds.size()); + } - public ParameterMetaData getParameterMetaData() { - return bindMetaData; + public ParameterMetaData getParameterMetaData() { + return bindMetaData; + } + + public Object getBindValue(BindParseNode node) throws SQLException { + int index = node.getIndex(); + if (index < 0 || index >= binds.size()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND) + .setMessage("binds size: " + binds.size() + "; index: " + index).build().buildException(); } - - public Object getBindValue(BindParseNode node) throws SQLException { - int index = node.getIndex(); - if (index < 0 || index >= binds.size()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND) - .setMessage("binds size: " + binds.size() + "; index: " + index).build().buildException(); - } - Object value = binds.get(index); - if (value == UNBOUND_PARAMETER) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_VALUE_UNBOUND) - .setMessage(node.toString()).build().buildException(); - } - return value; + Object value = binds.get(index); + if (value == UNBOUND_PARAMETER) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_VALUE_UNBOUND) + .setMessage(node.toString()).build().buildException(); } + return value; + } - public void addParamMetaData(BindParseNode bind, PDatum column) throws SQLException { - bindMetaData.addParam(bind,column); - } + public void addParamMetaData(BindParseNode bind, PDatum column) throws SQLException { + bindMetaData.addParam(bind, column); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CloseStatementCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CloseStatementCompiler.java index 708262c2043..37170546e5d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CloseStatementCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CloseStatementCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,29 +28,29 @@ import org.apache.phoenix.schema.MetaDataClient; public class CloseStatementCompiler { - private final PhoenixStatement statement; - private final Operation operation; + private final PhoenixStatement statement; + private final Operation operation; - public CloseStatementCompiler(PhoenixStatement statement, Operation operation) { - this.statement = statement; - this.operation = operation; - } + public CloseStatementCompiler(PhoenixStatement statement, Operation operation) { + this.statement = statement; + this.operation = operation; + } - public MutationPlan compile(final CloseStatement close) throws SQLException { - final PhoenixConnection connection = statement.getConnection(); - final StatementContext context = new StatementContext(statement); - final MetaDataClient client = new MetaDataClient(connection); - - return new BaseMutationPlan(context, operation) { - @Override - public MutationState execute() throws SQLException { - return client.close(close); - } + public MutationPlan compile(final CloseStatement close) throws SQLException { + final PhoenixConnection connection = statement.getConnection(); + final StatementContext context = new StatementContext(statement); + final MetaDataClient client = new MetaDataClient(connection); - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("CLOSE CURSOR")); - } - }; - } -} \ No newline at end of file + return new BaseMutationPlan(context, operation) { + @Override + public MutationState execute() throws SQLException { + return client.close(close); + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("CLOSE CURSOR")); + } + }; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnNameTrackingExpressionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnNameTrackingExpressionCompiler.java index 3f4c83c46d7..b13c940c961 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnNameTrackingExpressionCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnNameTrackingExpressionCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,25 +22,24 @@ import org.apache.phoenix.parse.ColumnParseNode; import org.apache.phoenix.parse.StatelessTraverseAllParseNodeVisitor; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; public class ColumnNameTrackingExpressionCompiler extends StatelessTraverseAllParseNodeVisitor { - private List dataColumnNames = Lists.newArrayListWithExpectedSize(10); + private List dataColumnNames = Lists.newArrayListWithExpectedSize(10); - public void reset() { - this.getDataColumnNames().clear(); - } + public void reset() { + this.getDataColumnNames().clear(); + } - @Override - public Void visit(ColumnParseNode node) throws SQLException { - getDataColumnNames().add(node.getName()); - return null; - } + @Override + public Void visit(ColumnParseNode node) throws SQLException { + getDataColumnNames().add(node.getName()); + return null; + } - public List getDataColumnNames() { - return dataColumnNames; - } + public List getDataColumnNames() { + return dataColumnNames; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnProjector.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnProjector.java index 9cd600cdd7e..6b763e856b5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnProjector.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnProjector.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,67 +20,59 @@ import java.sql.SQLException; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; - - +import org.apache.phoenix.schema.types.PDataType; /** - * * Interface used to access the value of a projected column. - * - * * @since 0.1 */ public interface ColumnProjector { - /** - * Get the column name - * @return the database column name - */ - String getName(); + /** + * Get the column name + * @return the database column name + */ + String getName(); + + /** + * Get the expression string + * @return the label as it was referenced in the query + */ + String getLabel(); + + /** + * Get the expression + * @return the expression for the column projector + */ + public Expression getExpression(); - /** - * Get the expression string - * @return the label as it was referenced in the query - */ - String getLabel(); + // TODO: An expression may contain references to multiple tables. + /** + * Get the name of the hbase table containing the column + * @return the hbase table name + */ + String getTableName(); - /** - * Get the expression - * @return the expression for the column projector - */ - public Expression getExpression(); - - // TODO: An expression may contain references to multiple tables. - /** - * Get the name of the hbase table containing the column - * @return the hbase table name - */ - String getTableName(); - - /** - * Get the value of the column, coercing it if necessary to the specified type - * @param tuple the row containing the column - * @param type the type to which to coerce the binary value - * @param ptr used to retrieve the value - * @return the object representation of the column value. - * @throws SQLException - */ - Object getValue(Tuple tuple, PDataType type, ImmutableBytesWritable ptr) throws SQLException; + /** + * Get the value of the column, coercing it if necessary to the specified type + * @param tuple the row containing the column + * @param type the type to which to coerce the binary value + * @param ptr used to retrieve the value + * @return the object representation of the column value. + */ + Object getValue(Tuple tuple, PDataType type, ImmutableBytesWritable ptr) throws SQLException; - /** - * Get the value of the column, coercing it if necessary to the specified type - * @param tuple the row containing the column - * @param type the type to which to coerce the binary value - * @param ptr used to retrieve the value - * @param jdbcType The java type to convert to, for rs.getObject() - * @return the object representation of the column value. - * @throws SQLException - */ - Object getValue(Tuple tuple, PDataType type, ImmutableBytesWritable ptr, Class jdbcType) - throws SQLException; + /** + * Get the value of the column, coercing it if necessary to the specified type + * @param tuple the row containing the column + * @param type the type to which to coerce the binary value + * @param ptr used to retrieve the value + * @param jdbcType The java type to convert to, for rs.getObject() + * @return the object representation of the column value. + */ + Object getValue(Tuple tuple, PDataType type, ImmutableBytesWritable ptr, Class jdbcType) + throws SQLException; - boolean isCaseSensitive(); + boolean isCaseSensitive(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnResolver.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnResolver.java index a3d856706d5..0f8b110f42b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnResolver.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ColumnResolver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,70 +20,61 @@ import java.sql.SQLException; import java.util.List; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.phoenix.parse.PFunction; import org.apache.phoenix.parse.PSchema; import org.apache.phoenix.schema.AmbiguousColumnException; -import org.apache.phoenix.schema.AmbiguousTableException; import org.apache.phoenix.schema.ColumnNotFoundException; import org.apache.phoenix.schema.ColumnRef; import org.apache.phoenix.schema.TableRef; - - /** - * - * Interface used to resolve column references occurring - * in the select statement. - * - * + * Interface used to resolve column references occurring in the select statement. * @since 0.1 */ public interface ColumnResolver { - - /** - * Returns the collection of resolved tables in the FROM clause. - */ - public List getTables(); - - /** - * Returns the collection of resolved functions. - */ - public List getFunctions(); - /** - * Resolves table using name or alias. - * @param schemaName the schema name - * @param tableName the table name or table alias - * @return the resolved TableRef - * @throws SQLException - */ - public TableRef resolveTable(String schemaName, String tableName) throws SQLException; - - /** - * Resolves column using name and alias. - * @param schemaName TODO - * @param tableName TODO - * @param colName TODO - * @return the resolved ColumnRef - * @throws ColumnNotFoundException if the column could not be resolved - * @throws AmbiguousColumnException if the column name is ambiguous - */ - public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException; - - /** - * Resolves function using functionName. - * @param functionName - * @return the resolved PFunction - * @throws ColumnNotFoundException if the column could not be resolved - * @throws AmbiguousColumnException if the column name is ambiguous - */ - public PFunction resolveFunction(String functionName) throws SQLException; + /** + * Returns the collection of resolved tables in the FROM clause. + */ + public List getTables(); + + /** + * Returns the collection of resolved functions. + */ + public List getFunctions(); + + /** + * Resolves table using name or alias. + * @param schemaName the schema name + * @param tableName the table name or table alias + * @return the resolved TableRef + */ + public TableRef resolveTable(String schemaName, String tableName) throws SQLException; + + /** + * Resolves column using name and alias. + * @param schemaName TODO + * @param tableName TODO + * @param colName TODO + * @return the resolved ColumnRef + * @throws ColumnNotFoundException if the column could not be resolved + * @throws AmbiguousColumnException if the column name is ambiguous + */ + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException; + + /** + * Resolves function using functionName. + * @return the resolved PFunction + * @throws ColumnNotFoundException if the column could not be resolved + * @throws AmbiguousColumnException if the column name is ambiguous + */ + public PFunction resolveFunction(String functionName) throws SQLException; - public boolean hasUDFs(); + public boolean hasUDFs(); - public PSchema resolveSchema(String schemaName) throws SQLException; + public PSchema resolveSchema(String schemaName) throws SQLException; - public List getSchemas(); + public List getSchemas(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CompiledOffset.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CompiledOffset.java index aa97f0760a8..1f2e4569906 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CompiledOffset.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CompiledOffset.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,21 +23,21 @@ * CompiledOffset represents the result of the Compiler on the OFFSET clause. */ public class CompiledOffset { - public static final CompiledOffset EMPTY_COMPILED_OFFSET = - new CompiledOffset(Optional.absent(), Optional.absent()); - private final Optional integerOffset; - private final Optional byteOffset; + public static final CompiledOffset EMPTY_COMPILED_OFFSET = + new CompiledOffset(Optional. absent(), Optional. absent()); + private final Optional integerOffset; + private final Optional byteOffset; - public CompiledOffset(Optional integerOffset, Optional byteOffset) { - this.integerOffset = integerOffset; - this.byteOffset = byteOffset; - } + public CompiledOffset(Optional integerOffset, Optional byteOffset) { + this.integerOffset = integerOffset; + this.byteOffset = byteOffset; + } - public Optional getIntegerOffset() { - return integerOffset; - } + public Optional getIntegerOffset() { + return integerOffset; + } - public Optional getByteOffset() { - return byteOffset; - } + public Optional getByteOffset() { + return byteOffset; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateFunctionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateFunctionCompiler.java index ea4f486e4fa..6eb67ad27af 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateFunctionCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateFunctionCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,58 +28,57 @@ public class CreateFunctionCompiler { - private final PhoenixStatement statement; - - public CreateFunctionCompiler(PhoenixStatement statement) { - this.statement = statement; - } + private final PhoenixStatement statement; - public MutationPlan compile(final CreateFunctionStatement create) throws SQLException { - final PhoenixConnection connection = statement.getConnection(); - PhoenixConnection connectionToBe = connection; - final StatementContext context = new StatementContext(statement); - final MetaDataClient client = new MetaDataClient(connectionToBe); - - return new CreateFunctionMutationPlan(context, create, client, connection); - } + public CreateFunctionCompiler(PhoenixStatement statement) { + this.statement = statement; + } - private static class CreateFunctionMutationPlan extends BaseMutationPlan { + public MutationPlan compile(final CreateFunctionStatement create) throws SQLException { + final PhoenixConnection connection = statement.getConnection(); + PhoenixConnection connectionToBe = connection; + final StatementContext context = new StatementContext(statement); + final MetaDataClient client = new MetaDataClient(connectionToBe); - private final StatementContext context; - private final CreateFunctionStatement create; - private final MetaDataClient client; - private final PhoenixConnection connection; + return new CreateFunctionMutationPlan(context, create, client, connection); + } - private CreateFunctionMutationPlan(StatementContext context, CreateFunctionStatement create, - MetaDataClient client, PhoenixConnection connection) { - super(context, create.getOperation()); - this.context = context; - this.create = create; - this.client = client; - this.connection = connection; - } + private static class CreateFunctionMutationPlan extends BaseMutationPlan { - @Override - public MutationState execute() throws SQLException { - try { - return client.createFunction(create); - } finally { - if (client.getConnection() != connection) { - client.getConnection().close(); - } - } - } + private final StatementContext context; + private final CreateFunctionStatement create; + private final MetaDataClient client; + private final PhoenixConnection connection; - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("CREATE" - + (create.getFunctionInfo().isReplace() ? " OR REPLACE" : "") - + " FUNCTION")); - } + private CreateFunctionMutationPlan(StatementContext context, CreateFunctionStatement create, + MetaDataClient client, PhoenixConnection connection) { + super(context, create.getOperation()); + this.context = context; + this.create = create; + this.client = client; + this.connection = connection; + } - @Override - public StatementContext getContext() { - return context; + @Override + public MutationState execute() throws SQLException { + try { + return client.createFunction(create); + } finally { + if (client.getConnection() != connection) { + client.getConnection().close(); } + } + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList( + "CREATE" + (create.getFunctionInfo().isReplace() ? " OR REPLACE" : "") + " FUNCTION")); + } + + @Override + public StatementContext getContext() { + return context; } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java index c98f9a6a5c6..3b0c791c02e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateIndexCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,13 @@ */ package org.apache.phoenix.compile; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.client.Scan; @@ -66,216 +73,214 @@ import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.SchemaUtil; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - public class CreateIndexCompiler { - private final PhoenixStatement statement; - private final Operation operation; + private final PhoenixStatement statement; + private final Operation operation; - public CreateIndexCompiler(PhoenixStatement statement, Operation operation) { - this.statement = statement; - this.operation = operation; - } + public CreateIndexCompiler(PhoenixStatement statement, Operation operation) { + this.statement = statement; + this.operation = operation; + } - /** - * This is to check if the index where clause has a subquery - */ - private static class IndexWhereParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor { - private boolean hasSubquery = false; + /** + * This is to check if the index where clause has a subquery + */ + private static class IndexWhereParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor { + private boolean hasSubquery = false; - @Override - public Void visit(SubqueryParseNode node) throws SQLException { - hasSubquery = true; - return null; - } + @Override + public Void visit(SubqueryParseNode node) throws SQLException { + hasSubquery = true; + return null; } + } - private String getValue(PDataType type) { - if (type instanceof PNumericType) { - return "0"; - } else if (type instanceof PChar || type instanceof PVarchar) { - return "'a'"; - } else if (type instanceof PDate || type instanceof PUnsignedDate || type instanceof PTime - || type instanceof PUnsignedTime || type instanceof PTimestamp - || type instanceof PUnsignedTimestamp) { - Timestamp now = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); - return "TO_DATE('" + now + "','yyyy-MM-dd HH:mm:ss.SSS', 'PST')"; - } else if (type instanceof PBoolean) { - return "TRUE"; - } else if (type instanceof PDateArray || type instanceof PUnsignedDateArray - || type instanceof PTimeArray || type instanceof PUnsignedTimeArray - || type instanceof PTimestampArray || type instanceof PUnsignedTimestampArray) { - return "ARRAY[" + getValue(PDate.INSTANCE) + "]"; - } else if (type instanceof PArrayDataType) { - return "ARRAY" + type.getSampleValue().toString(); - } else if (type instanceof PJson || type instanceof PBson) { - return "'{\"a\":\"b\"}'"; - } else { - return "0123"; - } + private String getValue(PDataType type) { + if (type instanceof PNumericType) { + return "0"; + } else if (type instanceof PChar || type instanceof PVarchar) { + return "'a'"; + } else if ( + type instanceof PDate || type instanceof PUnsignedDate || type instanceof PTime + || type instanceof PUnsignedTime || type instanceof PTimestamp + || type instanceof PUnsignedTimestamp + ) { + Timestamp now = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); + return "TO_DATE('" + now + "','yyyy-MM-dd HH:mm:ss.SSS', 'PST')"; + } else if (type instanceof PBoolean) { + return "TRUE"; + } else if ( + type instanceof PDateArray || type instanceof PUnsignedDateArray || type instanceof PTimeArray + || type instanceof PUnsignedTimeArray || type instanceof PTimestampArray + || type instanceof PUnsignedTimestampArray + ) { + return "ARRAY[" + getValue(PDate.INSTANCE) + "]"; + } else if (type instanceof PArrayDataType) { + return "ARRAY" + type.getSampleValue().toString(); + } else if (type instanceof PJson || type instanceof PBson) { + return "'{\"a\":\"b\"}'"; + } else { + return "0123"; } + } - /** - * Verifies that index WHERE clause does not include a subquery and it can - * be evaluated on a single data table row. - * - */ - private void verifyIndexWhere(ParseNode indexWhere, StatementContext context, - TableName dataTableName) throws SQLException { - if (indexWhere == null) { - return; - } - // Verify that index WHERE clause does not include a subquery - PhoenixConnection connection = context.getConnection(); - IndexWhereParseNodeVisitor indexWhereParseNodeVisitor = new IndexWhereParseNodeVisitor(); - indexWhere.accept(indexWhereParseNodeVisitor); - if (indexWhereParseNodeVisitor.hasSubquery) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_WHERE_WITH_SUBQUERY). - build().buildException(); - } + /** + * Verifies that index WHERE clause does not include a subquery and it can be evaluated on a + * single data table row. + */ + private void verifyIndexWhere(ParseNode indexWhere, StatementContext context, + TableName dataTableName) throws SQLException { + if (indexWhere == null) { + return; + } + // Verify that index WHERE clause does not include a subquery + PhoenixConnection connection = context.getConnection(); + IndexWhereParseNodeVisitor indexWhereParseNodeVisitor = new IndexWhereParseNodeVisitor(); + indexWhere.accept(indexWhereParseNodeVisitor); + if (indexWhereParseNodeVisitor.hasSubquery) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_WHERE_WITH_SUBQUERY).build() + .buildException(); + } - // Verify that index WHERE clause can be evaluated on a single data table row + // Verify that index WHERE clause can be evaluated on a single data table row - // Convert the index WHERE ParseNode to an Expression - ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); - Expression indexWhereExpression = indexWhere.accept(expressionCompiler); - PTable dataTable = connection.getTable(dataTableName.toString()); + // Convert the index WHERE ParseNode to an Expression + ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); + Expression indexWhereExpression = indexWhere.accept(expressionCompiler); + PTable dataTable = connection.getTable(dataTableName.toString()); - // Create a full data table row. Skip generating values for view constants as they - // will be generated by the Upsert compiler - boolean autoCommit = connection.getAutoCommit(); - connection.setAutoCommit(false); - StringBuilder stringBuilder = new StringBuilder("UPSERT INTO "); - stringBuilder.append(dataTableName); - int startingColumnIndex = dataTable.getBucketNum() != null ? 1 : 0; - startingColumnIndex += dataTable.getTenantId() != null ? 1 : 0; - stringBuilder.append(" ("); - PColumn column; - byte[] value; - int i = startingColumnIndex; - for (; i < dataTable.getColumns().size() - 1; i++) { - column = dataTable.getColumns().get(i); - value = column.getViewConstant(); - if (value == null) { - stringBuilder.append(SchemaUtil.getEscapedArgument(column.getName().getString()) - + ","); - } - } - column = dataTable.getColumns().get(i); - value = column.getViewConstant(); - if (value == null) { - stringBuilder.append(SchemaUtil.getEscapedArgument(column.getName().getString()) + ")"); - } else { - stringBuilder.append(")"); - } + // Create a full data table row. Skip generating values for view constants as they + // will be generated by the Upsert compiler + boolean autoCommit = connection.getAutoCommit(); + connection.setAutoCommit(false); + StringBuilder stringBuilder = new StringBuilder("UPSERT INTO "); + stringBuilder.append(dataTableName); + int startingColumnIndex = dataTable.getBucketNum() != null ? 1 : 0; + startingColumnIndex += dataTable.getTenantId() != null ? 1 : 0; + stringBuilder.append(" ("); + PColumn column; + byte[] value; + int i = startingColumnIndex; + for (; i < dataTable.getColumns().size() - 1; i++) { + column = dataTable.getColumns().get(i); + value = column.getViewConstant(); + if (value == null) { + stringBuilder.append(SchemaUtil.getEscapedArgument(column.getName().getString()) + ","); + } + } + column = dataTable.getColumns().get(i); + value = column.getViewConstant(); + if (value == null) { + stringBuilder.append(SchemaUtil.getEscapedArgument(column.getName().getString()) + ")"); + } else { + stringBuilder.append(")"); + } - stringBuilder.append(" Values("); - i = startingColumnIndex; - for (; i < dataTable.getColumns().size() - 1; i++) { - column = dataTable.getColumns().get(i); - value = column.getViewConstant(); - if (value == null) { - PDataType dataType = column.getDataType(); - stringBuilder.append(getValue(dataType) + ","); - } - } - column = dataTable.getColumns().get(i); - value = column.getViewConstant(); - if (value == null) { - PDataType dataType = column.getDataType(); - stringBuilder.append(getValue(dataType) + ")"); + stringBuilder.append(" Values("); + i = startingColumnIndex; + for (; i < dataTable.getColumns().size() - 1; i++) { + column = dataTable.getColumns().get(i); + value = column.getViewConstant(); + if (value == null) { + PDataType dataType = column.getDataType(); + stringBuilder.append(getValue(dataType) + ","); + } + } + column = dataTable.getColumns().get(i); + value = column.getViewConstant(); + if (value == null) { + PDataType dataType = column.getDataType(); + stringBuilder.append(getValue(dataType) + ")"); + } else { + stringBuilder.append(")"); + } + + try ( + PreparedStatement ps = context.getConnection().prepareStatement(stringBuilder.toString())) { + ps.execute(); + Iterator>> dataTableNameAndMutationIterator = + PhoenixRuntime.getUncommittedDataIterator(connection); + Pair> dataTableNameAndMutation = null; + while (dataTableNameAndMutationIterator.hasNext()) { + dataTableNameAndMutation = dataTableNameAndMutationIterator.next(); + if ( + java.util.Arrays.equals(dataTableNameAndMutation.getFirst(), + dataTable.getPhysicalName().getBytes()) + ) { + break; } else { - stringBuilder.append(")"); + dataTableNameAndMutation = null; } + } + if (dataTableNameAndMutation == null) { + throw new RuntimeException("Unexpected result from " + + "PhoenixRuntime#getUncommittedDataIterator for " + dataTableName); + } - try (PreparedStatement ps = - context.getConnection().prepareStatement(stringBuilder.toString())) { - ps.execute(); - Iterator>> dataTableNameAndMutationIterator = - PhoenixRuntime.getUncommittedDataIterator(connection); - Pair> dataTableNameAndMutation = null; - while (dataTableNameAndMutationIterator.hasNext()) { - dataTableNameAndMutation = dataTableNameAndMutationIterator.next(); - if (java.util.Arrays.equals(dataTableNameAndMutation.getFirst(), - dataTable.getPhysicalName().getBytes())) { - break; - } else { - dataTableNameAndMutation = null; - } - } - if (dataTableNameAndMutation == null) { - throw new RuntimeException( - "Unexpected result from " + "PhoenixRuntime#getUncommittedDataIterator for " - + dataTableName); - } + // Evaluate the WHERE expression on the data table row + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + List cols = dataTableNameAndMutation.getSecond(); + Collections.sort(cols, CellComparator.getInstance()); + MultiKeyValueTuple tuple = new MultiKeyValueTuple(cols); + if (!indexWhereExpression.evaluate(tuple, ptr)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_EVALUATE_INDEX_WHERE).build() + .buildException(); + } + } finally { + connection.setAutoCommit(autoCommit); + } + } - // Evaluate the WHERE expression on the data table row - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - List cols = dataTableNameAndMutation.getSecond(); - Collections.sort(cols, CellComparator.getInstance()); - MultiKeyValueTuple tuple = new MultiKeyValueTuple(cols); - if (!indexWhereExpression.evaluate(tuple, ptr)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_EVALUATE_INDEX_WHERE). - build().buildException(); - } - } finally { - connection.setAutoCommit(autoCommit); + public MutationPlan compile(final CreateIndexStatement create) throws SQLException { + final PhoenixConnection connection = statement.getConnection(); + final ColumnResolver resolver = + FromCompiler.getResolverForCreateIndex(create, connection, create.getUdfParseNodes()); + Scan scan = new Scan(); + final StatementContext context = + new StatementContext(statement, resolver, scan, new SequenceManager(statement)); + verifyIndexWhere(create.getWhere(), context, create.getTable().getName()); + ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); + List splitNodes = create.getSplitNodes(); + if (create.getIndexType() == IndexType.LOCAL) { + if (!splitNodes.isEmpty()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPLIT_LOCAL_INDEX).build() + .buildException(); + } + List> list = + create.getProps() != null ? create.getProps().get("") : null; + if (list != null) { + for (Pair pair : list) { + if (pair.getFirst().equals(PhoenixDatabaseMetaData.SALT_BUCKETS)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SALT_LOCAL_INDEX).build() + .buildException(); + } } + } } - public MutationPlan compile(final CreateIndexStatement create) throws SQLException { - final PhoenixConnection connection = statement.getConnection(); - final ColumnResolver resolver - = FromCompiler.getResolverForCreateIndex( - create, connection, create.getUdfParseNodes()); - Scan scan = new Scan(); - final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement)); - verifyIndexWhere(create.getWhere(), context, create.getTable().getName()); - ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); - List splitNodes = create.getSplitNodes(); - if (create.getIndexType() == IndexType.LOCAL) { - if (!splitNodes.isEmpty()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPLIT_LOCAL_INDEX) - .build().buildException(); - } - List> list = create.getProps() != null ? create.getProps().get("") : null; - if (list != null) { - for (Pair pair : list) { - if (pair.getFirst().equals(PhoenixDatabaseMetaData.SALT_BUCKETS)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SALT_LOCAL_INDEX) - .build().buildException(); - } - } - } - } - final byte[][] splits = new byte[splitNodes.size()][]; - for (int i = 0; i < splits.length; i++) { - ParseNode node = splitNodes.get(i); - if (!node.isStateless()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_POINT_NOT_CONSTANT) - .setMessage("Node: " + node).build().buildException(); - } - LiteralExpression expression = (LiteralExpression)node.accept(expressionCompiler); - splits[i] = expression.getBytes(); - } - final MetaDataClient client = new MetaDataClient(connection); - - return new BaseMutationPlan(context, operation) { - @Override - public MutationState execute() throws SQLException { - return client.createIndex(create, splits); - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("CREATE INDEX")); - } - - }; + final byte[][] splits = new byte[splitNodes.size()][]; + for (int i = 0; i < splits.length; i++) { + ParseNode node = splitNodes.get(i); + if (!node.isStateless()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_POINT_NOT_CONSTANT) + .setMessage("Node: " + node).build().buildException(); + } + LiteralExpression expression = (LiteralExpression) node.accept(expressionCompiler); + splits[i] = expression.getBytes(); } + final MetaDataClient client = new MetaDataClient(connection); + + return new BaseMutationPlan(context, operation) { + @Override + public MutationState execute() throws SQLException { + return client.createIndex(create, splits); + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("CREATE INDEX")); + } + + }; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSchemaCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSchemaCompiler.java index 480b2b6ed17..ae9a046949f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSchemaCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSchemaCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,54 +28,54 @@ public class CreateSchemaCompiler { - private final PhoenixStatement statement; + private final PhoenixStatement statement; - public CreateSchemaCompiler(PhoenixStatement statement) { - this.statement = statement; - } + public CreateSchemaCompiler(PhoenixStatement statement) { + this.statement = statement; + } - public MutationPlan compile(final CreateSchemaStatement create) throws SQLException { - final PhoenixConnection connection = statement.getConnection(); - final StatementContext context = new StatementContext(statement); - final MetaDataClient client = new MetaDataClient(connection); - return new CreateSchemaMutationPlan(context, create, client, connection); - } + public MutationPlan compile(final CreateSchemaStatement create) throws SQLException { + final PhoenixConnection connection = statement.getConnection(); + final StatementContext context = new StatementContext(statement); + final MetaDataClient client = new MetaDataClient(connection); + return new CreateSchemaMutationPlan(context, create, client, connection); + } - private static class CreateSchemaMutationPlan extends BaseMutationPlan { + private static class CreateSchemaMutationPlan extends BaseMutationPlan { - private final StatementContext context; - private final CreateSchemaStatement create; - private final MetaDataClient client; - private final PhoenixConnection connection; + private final StatementContext context; + private final CreateSchemaStatement create; + private final MetaDataClient client; + private final PhoenixConnection connection; - private CreateSchemaMutationPlan(StatementContext context, CreateSchemaStatement create, - MetaDataClient client, PhoenixConnection connection) { - super(context, create.getOperation()); - this.context = context; - this.create = create; - this.client = client; - this.connection = connection; - } + private CreateSchemaMutationPlan(StatementContext context, CreateSchemaStatement create, + MetaDataClient client, PhoenixConnection connection) { + super(context, create.getOperation()); + this.context = context; + this.create = create; + this.client = client; + this.connection = connection; + } - @Override - public MutationState execute() throws SQLException { - try { - return client.createSchema(create); - } finally { - if (client.getConnection() != connection) { - client.getConnection().close(); - } - } + @Override + public MutationState execute() throws SQLException { + try { + return client.createSchema(create); + } finally { + if (client.getConnection() != connection) { + client.getConnection().close(); } + } + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("CREATE SCHEMA")); - } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("CREATE SCHEMA")); + } - @Override - public StatementContext getContext() { - return context; - } + @Override + public StatementContext getContext() { + return context; } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSequenceCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSequenceCompiler.java index 3ff149a34ea..ea959b3c90d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSequenceCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateSequenceCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,207 +42,196 @@ import org.apache.phoenix.util.SequenceUtil; public class CreateSequenceCompiler { - private final PhoenixStatement statement; - private final Operation operation; - - public CreateSequenceCompiler(PhoenixStatement statement, Operation operation) { - this.statement = statement; - this.operation = operation; - } - - private static class LongDatum implements PDatum { - - @Override - public boolean isNullable() { - return false; - } - - @Override - public PDataType getDataType() { - return PLong.INSTANCE; - } - - @Override - public Integer getMaxLength() { - return null; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - } - private static class IntegerDatum implements PDatum { - - @Override - public boolean isNullable() { - return false; - } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; - } - - @Override - public Integer getMaxLength() { - return null; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - } - private static final PDatum LONG_DATUM = new LongDatum(); - private static final PDatum INTEGER_DATUM = new IntegerDatum(); - - private void validateNodeIsStateless(CreateSequenceStatement sequence, ParseNode node, - SQLExceptionCode code) throws SQLException { - if (!node.isStateless()) { - TableName sequenceName = sequence.getSequenceName(); - throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), code); - } - } - - private long evalExpression(CreateSequenceStatement sequence, StatementContext context, - Expression expression, SQLExceptionCode code) throws SQLException { - ImmutableBytesWritable ptr = context.getTempPtr(); - expression.evaluate(null, ptr); - if (ptr.getLength() == 0 || !expression.getDataType().isCoercibleTo(PLong.INSTANCE)) { - TableName sequenceName = sequence.getSequenceName(); - throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), code); - } - return (Long) PLong.INSTANCE.toObject(ptr, expression.getDataType()); - } - - public MutationPlan compile(final CreateSequenceStatement sequence) throws SQLException { - ParseNode startsWithNode = sequence.getStartWith(); - ParseNode incrementByNode = sequence.getIncrementBy(); - ParseNode maxValueNode = sequence.getMaxValue(); - ParseNode minValueNode = sequence.getMinValue(); - ParseNode cacheNode = sequence.getCacheSize(); - - // validate parse nodes - if (startsWithNode!=null) { - validateNodeIsStateless(sequence, startsWithNode, - SQLExceptionCode.START_WITH_MUST_BE_CONSTANT); - } - validateNodeIsStateless(sequence, incrementByNode, - SQLExceptionCode.INCREMENT_BY_MUST_BE_CONSTANT); - validateNodeIsStateless(sequence, maxValueNode, - SQLExceptionCode.MAXVALUE_MUST_BE_CONSTANT); - validateNodeIsStateless(sequence, minValueNode, - SQLExceptionCode.MINVALUE_MUST_BE_CONSTANT); - if (cacheNode != null) { - validateNodeIsStateless(sequence, cacheNode, - SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT); - } - - final PhoenixConnection connection = statement.getConnection(); - final StatementContext context = new StatementContext(statement); - - // add param meta data if required - if (startsWithNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode) startsWithNode, LONG_DATUM); - } - if (incrementByNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode) incrementByNode, LONG_DATUM); - } - if (maxValueNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode) maxValueNode, LONG_DATUM); - } - if (minValueNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode) minValueNode, LONG_DATUM); - } - if (cacheNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode) cacheNode, INTEGER_DATUM); - } - - ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); - final long incrementBy = - evalExpression(sequence, context, incrementByNode.accept(expressionCompiler), - SQLExceptionCode.INCREMENT_BY_MUST_BE_CONSTANT); - if (incrementBy == 0) { - throw SequenceUtil.getException(sequence.getSequenceName().getSchemaName(), sequence - .getSequenceName().getTableName(), - SQLExceptionCode.INCREMENT_BY_MUST_NOT_BE_ZERO); - } - final long maxValue = - evalExpression(sequence, context, maxValueNode.accept(expressionCompiler), - SQLExceptionCode.MAXVALUE_MUST_BE_CONSTANT); - final long minValue = - evalExpression(sequence, context, minValueNode.accept(expressionCompiler), - SQLExceptionCode.MINVALUE_MUST_BE_CONSTANT); - if (minValue>maxValue) { - TableName sequenceName = sequence.getSequenceName(); - throw SequenceUtil.getException(sequenceName.getSchemaName(), - sequenceName.getTableName(), - SQLExceptionCode.MINVALUE_MUST_BE_LESS_THAN_OR_EQUAL_TO_MAXVALUE); - } - - long startsWithValue; - if (startsWithNode == null) { - startsWithValue = incrementBy > 0 ? minValue : maxValue; - } else { - startsWithValue = - evalExpression(sequence, context, startsWithNode.accept(expressionCompiler), - SQLExceptionCode.START_WITH_MUST_BE_CONSTANT); - if (startsWithValue < minValue || startsWithValue > maxValue) { - TableName sequenceName = sequence.getSequenceName(); - throw SequenceUtil.getException(sequenceName.getSchemaName(), - sequenceName.getTableName(), - SQLExceptionCode.STARTS_WITH_MUST_BE_BETWEEN_MIN_MAX_VALUE); - } - } - final long startsWith = startsWithValue; - - long cacheSizeValue; - if (cacheNode == null) { - cacheSizeValue = - connection - .getQueryServices() - .getProps() - .getLong(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_SEQUENCE_CACHE_SIZE); - } - else { - cacheSizeValue = - evalExpression(sequence, context, cacheNode.accept(expressionCompiler), - SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT); - if (cacheSizeValue < 0) { - TableName sequenceName = sequence.getSequenceName(); - throw SequenceUtil.getException(sequenceName.getSchemaName(), - sequenceName.getTableName(), - SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT); - } - } - final long cacheSize = Math.max(1L, cacheSizeValue); - - final MetaDataClient client = new MetaDataClient(connection); - return new BaseMutationPlan(context, operation) { - - @Override - public MutationState execute() throws SQLException { - return client.createSequence(sequence, startsWith, incrementBy, cacheSize, minValue, maxValue); - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("CREATE SEQUENCE")); - } - }; - } -} \ No newline at end of file + private final PhoenixStatement statement; + private final Operation operation; + + public CreateSequenceCompiler(PhoenixStatement statement, Operation operation) { + this.statement = statement; + this.operation = operation; + } + + private static class LongDatum implements PDatum { + + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PLong.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + + } + + private static class IntegerDatum implements PDatum { + + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + + } + + private static final PDatum LONG_DATUM = new LongDatum(); + private static final PDatum INTEGER_DATUM = new IntegerDatum(); + + private void validateNodeIsStateless(CreateSequenceStatement sequence, ParseNode node, + SQLExceptionCode code) throws SQLException { + if (!node.isStateless()) { + TableName sequenceName = sequence.getSequenceName(); + throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), + code); + } + } + + private long evalExpression(CreateSequenceStatement sequence, StatementContext context, + Expression expression, SQLExceptionCode code) throws SQLException { + ImmutableBytesWritable ptr = context.getTempPtr(); + expression.evaluate(null, ptr); + if (ptr.getLength() == 0 || !expression.getDataType().isCoercibleTo(PLong.INSTANCE)) { + TableName sequenceName = sequence.getSequenceName(); + throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), + code); + } + return (Long) PLong.INSTANCE.toObject(ptr, expression.getDataType()); + } + + public MutationPlan compile(final CreateSequenceStatement sequence) throws SQLException { + ParseNode startsWithNode = sequence.getStartWith(); + ParseNode incrementByNode = sequence.getIncrementBy(); + ParseNode maxValueNode = sequence.getMaxValue(); + ParseNode minValueNode = sequence.getMinValue(); + ParseNode cacheNode = sequence.getCacheSize(); + + // validate parse nodes + if (startsWithNode != null) { + validateNodeIsStateless(sequence, startsWithNode, + SQLExceptionCode.START_WITH_MUST_BE_CONSTANT); + } + validateNodeIsStateless(sequence, incrementByNode, + SQLExceptionCode.INCREMENT_BY_MUST_BE_CONSTANT); + validateNodeIsStateless(sequence, maxValueNode, SQLExceptionCode.MAXVALUE_MUST_BE_CONSTANT); + validateNodeIsStateless(sequence, minValueNode, SQLExceptionCode.MINVALUE_MUST_BE_CONSTANT); + if (cacheNode != null) { + validateNodeIsStateless(sequence, cacheNode, + SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT); + } + + final PhoenixConnection connection = statement.getConnection(); + final StatementContext context = new StatementContext(statement); + + // add param meta data if required + if (startsWithNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) startsWithNode, LONG_DATUM); + } + if (incrementByNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) incrementByNode, LONG_DATUM); + } + if (maxValueNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) maxValueNode, LONG_DATUM); + } + if (minValueNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) minValueNode, LONG_DATUM); + } + if (cacheNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) cacheNode, INTEGER_DATUM); + } + + ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); + final long incrementBy = evalExpression(sequence, context, + incrementByNode.accept(expressionCompiler), SQLExceptionCode.INCREMENT_BY_MUST_BE_CONSTANT); + if (incrementBy == 0) { + throw SequenceUtil.getException(sequence.getSequenceName().getSchemaName(), + sequence.getSequenceName().getTableName(), SQLExceptionCode.INCREMENT_BY_MUST_NOT_BE_ZERO); + } + final long maxValue = evalExpression(sequence, context, maxValueNode.accept(expressionCompiler), + SQLExceptionCode.MAXVALUE_MUST_BE_CONSTANT); + final long minValue = evalExpression(sequence, context, minValueNode.accept(expressionCompiler), + SQLExceptionCode.MINVALUE_MUST_BE_CONSTANT); + if (minValue > maxValue) { + TableName sequenceName = sequence.getSequenceName(); + throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), + SQLExceptionCode.MINVALUE_MUST_BE_LESS_THAN_OR_EQUAL_TO_MAXVALUE); + } + + long startsWithValue; + if (startsWithNode == null) { + startsWithValue = incrementBy > 0 ? minValue : maxValue; + } else { + startsWithValue = evalExpression(sequence, context, startsWithNode.accept(expressionCompiler), + SQLExceptionCode.START_WITH_MUST_BE_CONSTANT); + if (startsWithValue < minValue || startsWithValue > maxValue) { + TableName sequenceName = sequence.getSequenceName(); + throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), + SQLExceptionCode.STARTS_WITH_MUST_BE_BETWEEN_MIN_MAX_VALUE); + } + } + final long startsWith = startsWithValue; + + long cacheSizeValue; + if (cacheNode == null) { + cacheSizeValue = connection.getQueryServices().getProps().getLong( + QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_SEQUENCE_CACHE_SIZE); + } else { + cacheSizeValue = evalExpression(sequence, context, cacheNode.accept(expressionCompiler), + SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT); + if (cacheSizeValue < 0) { + TableName sequenceName = sequence.getSequenceName(); + throw SequenceUtil.getException(sequenceName.getSchemaName(), sequenceName.getTableName(), + SQLExceptionCode.CACHE_MUST_BE_NON_NEGATIVE_CONSTANT); + } + } + final long cacheSize = Math.max(1L, cacheSizeValue); + + final MetaDataClient client = new MetaDataClient(connection); + return new BaseMutationPlan(context, operation) { + + @Override + public MutationState execute() throws SQLException { + return client.createSequence(sequence, startsWith, incrementBy, cacheSize, minValue, + maxValue); + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("CREATE SEQUENCE")); + } + }; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java index ccda4659c43..fd0a0cdf921 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,11 @@ */ package org.apache.phoenix.compile; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAMESPACE_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; +import static org.apache.phoenix.query.QueryServices.DEFAULT_PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED; +import static org.apache.phoenix.query.QueryServices.PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED; + import java.io.IOException; import java.sql.SQLException; import java.util.ArrayList; @@ -84,712 +89,687 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAMESPACE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; -import static org.apache.phoenix.query.QueryServices.DEFAULT_PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED; -import static org.apache.phoenix.query.QueryServices.PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED; - - public class CreateTableCompiler { - private static final Logger LOGGER = LoggerFactory.getLogger(CreateTableCompiler.class); - private static final PDatum VARBINARY_DATUM = new VarbinaryDatum(); - private final PhoenixStatement statement; - private final Operation operation; - - public CreateTableCompiler(PhoenixStatement statement, Operation operation) { - this.statement = statement; - this.operation = operation; - } - - public MutationPlan compile(CreateTableStatement create) throws SQLException { - final PhoenixConnection connection = statement.getConnection(); - ColumnResolver resolver = FromCompiler.getResolverForCreation(create, connection); - PTableType type = create.getTableType(); - PTable parentToBe = null; - ViewType viewTypeToBe = null; - Scan scan = new Scan(); - final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement)); - // TODO: support any statement for a VIEW instead of just a WHERE clause - ParseNode whereNode = create.getWhereClause(); - String viewStatementToBe = null; - byte[][] viewColumnConstantsToBe = null; - BitSet isViewColumnReferencedToBe = null; - Set pkColumnsInWhere = new HashSet<>(); - Set nonPkColumnsInWhere = new HashSet<>(); - byte[] rowKeyMatcher = ByteUtil.EMPTY_BYTE_ARRAY; - - // Check whether column families having local index column family suffix or not if present - // don't allow creating table. - // Also validate the default values expressions. - List columnDefs = create.getColumnDefs(); - List overideColumnDefs = null; - PrimaryKeyConstraint pkConstraint = create.getPrimaryKeyConstraint(); - for (int i = 0; i < columnDefs.size(); i++) { - ColumnDef columnDef = columnDefs.get(i); - if (columnDef.getColumnDefName().getFamilyName()!=null && columnDef.getColumnDefName().getFamilyName().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_COLUMN_FAMILY) - .build().buildException(); - } - // False means we do not need the default (because it evaluated to null) - if (!columnDef.validateDefault(context, pkConstraint)) { - if (overideColumnDefs == null) { - overideColumnDefs = new ArrayList<>(columnDefs); - } - overideColumnDefs.set(i, new ColumnDef(columnDef, null)); - } - } - if (overideColumnDefs != null) { - create = new CreateTableStatement(create,overideColumnDefs); - } - final CreateTableStatement finalCreate = create; - - if (type == PTableType.VIEW) { - TableRef tableRef = resolver.getTables().get(0); - int nColumns = tableRef.getTable().getColumns().size(); - isViewColumnReferencedToBe = new BitSet(nColumns); - // Used to track column references in a view - ExpressionCompiler expressionCompiler = new ColumnTrackingExpressionCompiler(context, isViewColumnReferencedToBe); - parentToBe = tableRef.getTable(); - - // Disallow creating views on top of SYSTEM tables. See PHOENIX-5386 - if (parentToBe.getType() == PTableType.SYSTEM) { - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.CANNOT_CREATE_VIEWS_ON_SYSTEM_TABLES) - .build().buildException(); - } - viewTypeToBe = parentToBe.getViewType() == ViewType.MAPPED ? ViewType.MAPPED : ViewType.UPDATABLE; - Expression where = null; - if (whereNode == null) { - if (parentToBe.getViewType() == ViewType.READ_ONLY) { - viewTypeToBe = ViewType.READ_ONLY; - } - viewStatementToBe = parentToBe.getViewStatement(); - if (viewStatementToBe != null) { - SelectStatement select = new SQLParser(viewStatementToBe).parseQuery(); - whereNode = select.getWhere(); - where = whereNode.accept(expressionCompiler); - } - } else { - whereNode = StatementNormalizer.normalize(whereNode, resolver); - if (whereNode.isStateless()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WHERE_IS_CONSTANT) - .build().buildException(); - } - // If our parent has a VIEW statement, combine it with this one - if (parentToBe.getViewStatement() != null) { - SelectStatement select = new SQLParser(parentToBe.getViewStatement()).parseQuery().combine(whereNode); - whereNode = select.getWhere(); - } - where = whereNode.accept(expressionCompiler); - if (where != null && !LiteralExpression.isTrue(where)) { - TableName baseTableName = create.getBaseTableName(); - StringBuilder buf = new StringBuilder(); - whereNode.toSQL(resolver, buf); - viewStatementToBe = QueryUtil.getViewStatement(baseTableName.getSchemaName(), baseTableName.getTableName(), buf.toString()); - } - if (viewTypeToBe != ViewType.MAPPED) { - viewColumnConstantsToBe = new byte[nColumns][]; - ViewWhereExpressionVisitor visitor = new ViewWhereExpressionVisitor(parentToBe, viewColumnConstantsToBe); - where.accept(visitor); - - viewTypeToBe = visitor.isUpdatable() ? ViewType.UPDATABLE : ViewType.READ_ONLY; - boolean updatableViewRestrictionEnabled = connection.getQueryServices() - .getProps().getBoolean(PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED, - DEFAULT_PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED); - if (viewTypeToBe == ViewType.UPDATABLE && updatableViewRestrictionEnabled) { - ViewWhereExpressionValidatorVisitor validatorVisitor = - new ViewWhereExpressionValidatorVisitor(parentToBe, - pkColumnsInWhere, nonPkColumnsInWhere); - where.accept(validatorVisitor); - if (!(connection.getQueryServices() - instanceof ConnectionlessQueryServicesImpl)) { - try { - viewTypeToBe = setViewTypeToBe(connection, parentToBe, - pkColumnsInWhere, nonPkColumnsInWhere); - LOGGER.info("VIEW type is set to {}. View Statement: {}, " + - "View Name: {}, " + - "Parent Table/View Name: {}", - viewTypeToBe, viewStatementToBe, - create.getTableName(), parentToBe.getName()); - } catch (IOException e) { - throw new SQLException(e); - } - } - } - - // If view is not updatable, viewColumnConstants should be empty. We will still - // inherit our parent viewConstants, but we have no additional ones. - if (viewTypeToBe != ViewType.UPDATABLE) { - viewColumnConstantsToBe = null; - } - } - } - if (viewTypeToBe == ViewType.MAPPED - && parentToBe.getPKColumns().isEmpty()) { - validateCreateViewCompilation(connection, parentToBe, - columnDefs, pkConstraint); - } else if (where != null && viewTypeToBe == ViewType.UPDATABLE) { - rowKeyMatcher = WhereOptimizer.getRowKeyMatcher(context, create.getTableName(), - parentToBe, where); - } - verifyIfAnyParentHasIndexesAndViewExtendsPk(parentToBe, columnDefs, pkConstraint); - } - final ViewType viewType = viewTypeToBe; - final String viewStatement = viewStatementToBe; - final byte[][] viewColumnConstants = viewColumnConstantsToBe; - final BitSet isViewColumnReferenced = isViewColumnReferencedToBe; - List splitNodes = create.getSplitNodes(); - final byte[][] splits = new byte[splitNodes.size()][]; - ImmutableBytesWritable ptr = context.getTempPtr(); - ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); - for (int i = 0; i < splits.length; i++) { - ParseNode node = splitNodes.get(i); - if (node instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode) node, VARBINARY_DATUM); - } - if (node.isStateless()) { - Expression expression = node.accept(expressionCompiler); - if (expression.evaluate(null, ptr)) {; - splits[i] = ByteUtil.copyKeyBytesIfNecessary(ptr); - continue; - } + private static final Logger LOGGER = LoggerFactory.getLogger(CreateTableCompiler.class); + private static final PDatum VARBINARY_DATUM = new VarbinaryDatum(); + private final PhoenixStatement statement; + private final Operation operation; + + public CreateTableCompiler(PhoenixStatement statement, Operation operation) { + this.statement = statement; + this.operation = operation; + } + + public MutationPlan compile(CreateTableStatement create) throws SQLException { + final PhoenixConnection connection = statement.getConnection(); + ColumnResolver resolver = FromCompiler.getResolverForCreation(create, connection); + PTableType type = create.getTableType(); + PTable parentToBe = null; + ViewType viewTypeToBe = null; + Scan scan = new Scan(); + final StatementContext context = + new StatementContext(statement, resolver, scan, new SequenceManager(statement)); + // TODO: support any statement for a VIEW instead of just a WHERE clause + ParseNode whereNode = create.getWhereClause(); + String viewStatementToBe = null; + byte[][] viewColumnConstantsToBe = null; + BitSet isViewColumnReferencedToBe = null; + Set pkColumnsInWhere = new HashSet<>(); + Set nonPkColumnsInWhere = new HashSet<>(); + byte[] rowKeyMatcher = ByteUtil.EMPTY_BYTE_ARRAY; + + // Check whether column families having local index column family suffix or not if present + // don't allow creating table. + // Also validate the default values expressions. + List columnDefs = create.getColumnDefs(); + List overideColumnDefs = null; + PrimaryKeyConstraint pkConstraint = create.getPrimaryKeyConstraint(); + for (int i = 0; i < columnDefs.size(); i++) { + ColumnDef columnDef = columnDefs.get(i); + if ( + columnDef.getColumnDefName().getFamilyName() != null && columnDef.getColumnDefName() + .getFamilyName().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_COLUMN_FAMILY).build() + .buildException(); + } + // False means we do not need the default (because it evaluated to null) + if (!columnDef.validateDefault(context, pkConstraint)) { + if (overideColumnDefs == null) { + overideColumnDefs = new ArrayList<>(columnDefs); + } + overideColumnDefs.set(i, new ColumnDef(columnDef, null)); + } + } + if (overideColumnDefs != null) { + create = new CreateTableStatement(create, overideColumnDefs); + } + final CreateTableStatement finalCreate = create; + + if (type == PTableType.VIEW) { + TableRef tableRef = resolver.getTables().get(0); + int nColumns = tableRef.getTable().getColumns().size(); + isViewColumnReferencedToBe = new BitSet(nColumns); + // Used to track column references in a view + ExpressionCompiler expressionCompiler = + new ColumnTrackingExpressionCompiler(context, isViewColumnReferencedToBe); + parentToBe = tableRef.getTable(); + + // Disallow creating views on top of SYSTEM tables. See PHOENIX-5386 + if (parentToBe.getType() == PTableType.SYSTEM) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_VIEWS_ON_SYSTEM_TABLES) + .build().buildException(); + } + viewTypeToBe = + parentToBe.getViewType() == ViewType.MAPPED ? ViewType.MAPPED : ViewType.UPDATABLE; + Expression where = null; + if (whereNode == null) { + if (parentToBe.getViewType() == ViewType.READ_ONLY) { + viewTypeToBe = ViewType.READ_ONLY; + } + viewStatementToBe = parentToBe.getViewStatement(); + if (viewStatementToBe != null) { + SelectStatement select = new SQLParser(viewStatementToBe).parseQuery(); + whereNode = select.getWhere(); + where = whereNode.accept(expressionCompiler); + } + } else { + whereNode = StatementNormalizer.normalize(whereNode, resolver); + if (whereNode.isStateless()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WHERE_IS_CONSTANT).build() + .buildException(); + } + // If our parent has a VIEW statement, combine it with this one + if (parentToBe.getViewStatement() != null) { + SelectStatement select = + new SQLParser(parentToBe.getViewStatement()).parseQuery().combine(whereNode); + whereNode = select.getWhere(); + } + where = whereNode.accept(expressionCompiler); + if (where != null && !LiteralExpression.isTrue(where)) { + TableName baseTableName = create.getBaseTableName(); + StringBuilder buf = new StringBuilder(); + whereNode.toSQL(resolver, buf); + viewStatementToBe = QueryUtil.getViewStatement(baseTableName.getSchemaName(), + baseTableName.getTableName(), buf.toString()); + } + if (viewTypeToBe != ViewType.MAPPED) { + viewColumnConstantsToBe = new byte[nColumns][]; + ViewWhereExpressionVisitor visitor = + new ViewWhereExpressionVisitor(parentToBe, viewColumnConstantsToBe); + where.accept(visitor); + + viewTypeToBe = visitor.isUpdatable() ? ViewType.UPDATABLE : ViewType.READ_ONLY; + boolean updatableViewRestrictionEnabled = connection.getQueryServices().getProps() + .getBoolean(PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED, + DEFAULT_PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED); + if (viewTypeToBe == ViewType.UPDATABLE && updatableViewRestrictionEnabled) { + ViewWhereExpressionValidatorVisitor validatorVisitor = + new ViewWhereExpressionValidatorVisitor(parentToBe, pkColumnsInWhere, + nonPkColumnsInWhere); + where.accept(validatorVisitor); + if (!(connection.getQueryServices() instanceof ConnectionlessQueryServicesImpl)) { + try { + viewTypeToBe = + setViewTypeToBe(connection, parentToBe, pkColumnsInWhere, nonPkColumnsInWhere); + LOGGER.info( + "VIEW type is set to {}. View Statement: {}, " + "View Name: {}, " + + "Parent Table/View Name: {}", + viewTypeToBe, viewStatementToBe, create.getTableName(), parentToBe.getName()); + } catch (IOException e) { + throw new SQLException(e); + } } - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_POINT_NOT_CONSTANT) - .setMessage("Node: " + node).build().buildException(); - } - final MetaDataClient client = new MetaDataClient(connection); - final PTable parent = parentToBe; - - return new CreateTableMutationPlan(context, client, finalCreate, splits, parent, - viewStatement, viewType, rowKeyMatcher, - viewColumnConstants, isViewColumnReferenced, connection); - } - - /** - * Restrict view to be UPDATABLE if the view specification: - * 1. uses only the PK columns; - * 2. starts from the first PK column (ignore the prefix PK columns, TENANT_ID and/or - * _SALTED, if the parent table is multi-tenant and/or salted); - * 3. PK columns should be in the order they are defined; - * 4. uses the same set of PK columns as its sibling views' specification; - * Otherwise, mark the view as READ_ONLY. - * - * @param connection The client connection - * @param parentToBe To be parent for given view - * @param pkColumnsInWhere Set of primary key in where clause - * @param nonPkColumnsInWhere Set of non-primary key columns in where clause - * @throws IOException thrown if there is an error finding sibling views - * @throws SQLException - */ - private ViewType setViewTypeToBe(final PhoenixConnection connection, final PTable parentToBe, - final Set pkColumnsInWhere, - final Set nonPkColumnsInWhere) - throws IOException, SQLException { - // 1. Check the view specification WHERE clause uses only the PK columns - if (!nonPkColumnsInWhere.isEmpty()) { - LOGGER.info("Setting the view type as READ_ONLY because the view statement contains " + - "non-PK columns: {}", nonPkColumnsInWhere); - return ViewType.READ_ONLY; - } - if (pkColumnsInWhere.isEmpty()) { - return ViewType.UPDATABLE; - } + } + + // If view is not updatable, viewColumnConstants should be empty. We will still + // inherit our parent viewConstants, but we have no additional ones. + if (viewTypeToBe != ViewType.UPDATABLE) { + viewColumnConstantsToBe = null; + } + } + } + if (viewTypeToBe == ViewType.MAPPED && parentToBe.getPKColumns().isEmpty()) { + validateCreateViewCompilation(connection, parentToBe, columnDefs, pkConstraint); + } else if (where != null && viewTypeToBe == ViewType.UPDATABLE) { + rowKeyMatcher = + WhereOptimizer.getRowKeyMatcher(context, create.getTableName(), parentToBe, where); + } + verifyIfAnyParentHasIndexesAndViewExtendsPk(parentToBe, columnDefs, pkConstraint); + } + final ViewType viewType = viewTypeToBe; + final String viewStatement = viewStatementToBe; + final byte[][] viewColumnConstants = viewColumnConstantsToBe; + final BitSet isViewColumnReferenced = isViewColumnReferencedToBe; + List splitNodes = create.getSplitNodes(); + final byte[][] splits = new byte[splitNodes.size()][]; + ImmutableBytesWritable ptr = context.getTempPtr(); + ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); + for (int i = 0; i < splits.length; i++) { + ParseNode node = splitNodes.get(i); + if (node instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) node, VARBINARY_DATUM); + } + if (node.isStateless()) { + Expression expression = node.accept(expressionCompiler); + if (expression.evaluate(null, ptr)) { + ; + splits[i] = ByteUtil.copyKeyBytesIfNecessary(ptr); + continue; + } + } + throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_POINT_NOT_CONSTANT) + .setMessage("Node: " + node).build().buildException(); + } + final MetaDataClient client = new MetaDataClient(connection); + final PTable parent = parentToBe; + + return new CreateTableMutationPlan(context, client, finalCreate, splits, parent, viewStatement, + viewType, rowKeyMatcher, viewColumnConstants, isViewColumnReferenced, connection); + } + + /** + * Restrict view to be UPDATABLE if the view specification: 1. uses only the PK columns; 2. starts + * from the first PK column (ignore the prefix PK columns, TENANT_ID and/or _SALTED, if the parent + * table is multi-tenant and/or salted); 3. PK columns should be in the order they are defined; 4. + * uses the same set of PK columns as its sibling views' specification; Otherwise, mark the view + * as READ_ONLY. + * @param connection The client connection + * @param parentToBe To be parent for given view + * @param pkColumnsInWhere Set of primary key in where clause + * @param nonPkColumnsInWhere Set of non-primary key columns in where clause + * @throws IOException thrown if there is an error finding sibling views + */ + private ViewType setViewTypeToBe(final PhoenixConnection connection, final PTable parentToBe, + final Set pkColumnsInWhere, final Set nonPkColumnsInWhere) + throws IOException, SQLException { + // 1. Check the view specification WHERE clause uses only the PK columns + if (!nonPkColumnsInWhere.isEmpty()) { + LOGGER.info("Setting the view type as READ_ONLY because the view statement contains " + + "non-PK columns: {}", nonPkColumnsInWhere); + return ViewType.READ_ONLY; + } + if (pkColumnsInWhere.isEmpty()) { + return ViewType.UPDATABLE; + } - // 2. Check the WHERE clause starts from the first PK column (ignore the prefix PK - // columns, TENANT_ID and/or _SALTED, if the parent table is multi-tenant and/or salted) - List tablePkPositions = new ArrayList<>(); - List viewPkPositions = new ArrayList<>(); - List tablePkColumns = parentToBe.getPKColumns(); - tablePkColumns.forEach(tablePkColumn -> - tablePkPositions.add(tablePkColumn.getPosition())); - pkColumnsInWhere.forEach(pkColumn -> viewPkPositions.add(pkColumn.getPosition())); - Collections.sort(viewPkPositions); - int tablePkStartIdx = 0; - if (parentToBe.isMultiTenant()) { - tablePkStartIdx++; - } - if (parentToBe.getBucketNum() != null) { - tablePkStartIdx++; - } - if (!Objects.equals(viewPkPositions.get(0), tablePkPositions.get(tablePkStartIdx))) { - LOGGER.info("Setting the view type as READ_ONLY because the view statement WHERE " + - "clause does not start from the first PK column (ignore the prefix PKs " + - "if the parent table is multi-tenant and/or salted). View PK Columns: " + - "{}, Table PK Columns: {}", pkColumnsInWhere, tablePkColumns); - return ViewType.READ_ONLY; - } + // 2. Check the WHERE clause starts from the first PK column (ignore the prefix PK + // columns, TENANT_ID and/or _SALTED, if the parent table is multi-tenant and/or salted) + List tablePkPositions = new ArrayList<>(); + List viewPkPositions = new ArrayList<>(); + List tablePkColumns = parentToBe.getPKColumns(); + tablePkColumns.forEach(tablePkColumn -> tablePkPositions.add(tablePkColumn.getPosition())); + pkColumnsInWhere.forEach(pkColumn -> viewPkPositions.add(pkColumn.getPosition())); + Collections.sort(viewPkPositions); + int tablePkStartIdx = 0; + if (parentToBe.isMultiTenant()) { + tablePkStartIdx++; + } + if (parentToBe.getBucketNum() != null) { + tablePkStartIdx++; + } + if (!Objects.equals(viewPkPositions.get(0), tablePkPositions.get(tablePkStartIdx))) { + LOGGER.info("Setting the view type as READ_ONLY because the view statement WHERE " + + "clause does not start from the first PK column (ignore the prefix PKs " + + "if the parent table is multi-tenant and/or salted). View PK Columns: " + + "{}, Table PK Columns: {}", pkColumnsInWhere, tablePkColumns); + return ViewType.READ_ONLY; + } - // 3. Check PK columns are in the order they are defined - if (!isPkColumnsInOrder(viewPkPositions, tablePkPositions, tablePkStartIdx)) { - LOGGER.info("Setting the view type as READ_ONLY because the PK columns is not in the " + - "order they are defined. View PK Columns: {}, Table PK Columns: {}", - pkColumnsInWhere, tablePkColumns); - return ViewType.READ_ONLY; - } + // 3. Check PK columns are in the order they are defined + if (!isPkColumnsInOrder(viewPkPositions, tablePkPositions, tablePkStartIdx)) { + LOGGER.info( + "Setting the view type as READ_ONLY because the PK columns is not in the " + + "order they are defined. View PK Columns: {}, Table PK Columns: {}", + pkColumnsInWhere, tablePkColumns); + return ViewType.READ_ONLY; + } - // 4. Check the view specification has the same set of PK column(s) as its sibling view - byte[] parentTenantIdInBytes = parentToBe.getTenantId() != null - ? parentToBe.getTenantId().getBytes() : null; - byte[] parentSchemaNameInBytes = parentToBe.getSchemaName() != null - ? parentToBe.getSchemaName().getBytes() : null; - ConnectionQueryServices queryServices = connection.getQueryServices(); - Configuration config = queryServices.getConfiguration(); - byte[] systemChildLinkTable = SchemaUtil.isNamespaceMappingEnabled(null, config) - ? SYSTEM_CHILD_LINK_NAMESPACE_BYTES - : SYSTEM_CHILD_LINK_NAME_BYTES; - try (Table childLinkTable = queryServices.getTable(systemChildLinkTable)) { - List legitimateSiblingViewList = - ViewUtil.findAllDescendantViews(childLinkTable, config, parentTenantIdInBytes, - parentSchemaNameInBytes, parentToBe.getTableName().getBytes(), - HConstants.LATEST_TIMESTAMP, true).getFirst(); - if (!legitimateSiblingViewList.isEmpty()) { - PTable siblingView = legitimateSiblingViewList.get(0); - Expression siblingViewWhere = getWhereFromView(connection, siblingView); - Set siblingViewPkColsInWhere = new HashSet<>(); - if (siblingViewWhere != null) { - ViewWhereExpressionValidatorVisitor siblingViewValidatorVisitor = - new ViewWhereExpressionValidatorVisitor(parentToBe, - siblingViewPkColsInWhere, null); - siblingViewWhere.accept(siblingViewValidatorVisitor); - } - if (!pkColumnsInWhere.equals(siblingViewPkColsInWhere)) { - LOGGER.info("Setting the view type as READ_ONLY because its set of PK " + - "columns is different from its sibling view {}'s. View PK " + - "Columns: {}, Sibling View PK Columns: {}", - siblingView.getName(), pkColumnsInWhere, siblingViewPkColsInWhere); - return ViewType.READ_ONLY; - } - } - } - return ViewType.UPDATABLE; - } - - /** - * Get the where Expression of given view. - * @param connection The client connection - * @param view PTable of the view - * @return A where Expression - * @throws SQLException - */ - private Expression getWhereFromView(final PhoenixConnection connection, final PTable view) - throws SQLException { - String viewStatement = view.getViewStatement(); - if (viewStatement == null) { - return null; - } - SelectStatement select = new SQLParser(viewStatement).parseQuery(); - ColumnResolver resolver = FromCompiler.getResolverForQuery(select, connection); - StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver); - BitSet isViewColumnReferencedToBe = new BitSet(view.getColumns().size()); - ExpressionCompiler expressionCompiler = new ColumnTrackingExpressionCompiler(context, - isViewColumnReferencedToBe); - ParseNode whereNode = select.getWhere(); - return whereNode.accept(expressionCompiler); - } - - /** - * Check if the primary key columns are in order (consecutive in position) as they are - * defined, providing their positions list - * @param viewPkPositions A positions list of view PK columns to be checked - * @param tablePkPositions The positions list of the table's PK columns to be compared - * @param tablePkStartIdx The start index of table PK position, depending on whether the - * table is multi-tenant and/or salted - * @return true if the PK columns are in order, otherwise false - */ - private boolean isPkColumnsInOrder(final List viewPkPositions, - final List tablePkPositions, - final int tablePkStartIdx) { - for (int i = 1; i < viewPkPositions.size(); i++) { - if (!Objects.equals( - viewPkPositions.get(i), - tablePkPositions.get(tablePkStartIdx + i))) { - return false; - } - } - return true; - } - - /** - * If any of the parent table/view has indexes in the parent hierarchy, and the current - * view under creation extends the primary key of the parent, throw error. - * - * @param parentToBe parent table/view of the current view under creation. - * @param columnDefs list of column definitions. - * @param pkConstraint primary key constraint. - * @throws SQLException if the view extends primary key and one of the parent view/table has - * indexes in the parent hierarchy. - */ - private void verifyIfAnyParentHasIndexesAndViewExtendsPk(PTable parentToBe, - List columnDefs, - PrimaryKeyConstraint pkConstraint) - throws SQLException { - if (viewExtendsParentPk(columnDefs, pkConstraint)) { - PTable table = parentToBe; - while (table != null) { - if (table.getIndexes().size() > 0) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode - .VIEW_CANNOT_EXTEND_PK_WITH_PARENT_INDEXES) - .build() - .buildException(); - } - if (table.getType() != PTableType.VIEW) { - return; - } - String schemaName = table.getParentSchemaName().getString(); - String tableName = table.getParentTableName().getString(); - try { - table = statement.getConnection().getTable( - SchemaUtil.getTableName(schemaName, tableName)); - } catch (TableNotFoundException e) { - table = null; - } - } - } + // 4. Check the view specification has the same set of PK column(s) as its sibling view + byte[] parentTenantIdInBytes = + parentToBe.getTenantId() != null ? parentToBe.getTenantId().getBytes() : null; + byte[] parentSchemaNameInBytes = + parentToBe.getSchemaName() != null ? parentToBe.getSchemaName().getBytes() : null; + ConnectionQueryServices queryServices = connection.getQueryServices(); + Configuration config = queryServices.getConfiguration(); + byte[] systemChildLinkTable = SchemaUtil.isNamespaceMappingEnabled(null, config) + ? SYSTEM_CHILD_LINK_NAMESPACE_BYTES + : SYSTEM_CHILD_LINK_NAME_BYTES; + try (Table childLinkTable = queryServices.getTable(systemChildLinkTable)) { + List legitimateSiblingViewList = ViewUtil.findAllDescendantViews(childLinkTable, + config, parentTenantIdInBytes, parentSchemaNameInBytes, + parentToBe.getTableName().getBytes(), HConstants.LATEST_TIMESTAMP, true).getFirst(); + if (!legitimateSiblingViewList.isEmpty()) { + PTable siblingView = legitimateSiblingViewList.get(0); + Expression siblingViewWhere = getWhereFromView(connection, siblingView); + Set siblingViewPkColsInWhere = new HashSet<>(); + if (siblingViewWhere != null) { + ViewWhereExpressionValidatorVisitor siblingViewValidatorVisitor = + new ViewWhereExpressionValidatorVisitor(parentToBe, siblingViewPkColsInWhere, null); + siblingViewWhere.accept(siblingViewValidatorVisitor); + } + if (!pkColumnsInWhere.equals(siblingViewPkColsInWhere)) { + LOGGER.info( + "Setting the view type as READ_ONLY because its set of PK " + + "columns is different from its sibling view {}'s. View PK " + + "Columns: {}, Sibling View PK Columns: {}", + siblingView.getName(), pkColumnsInWhere, siblingViewPkColsInWhere); + return ViewType.READ_ONLY; + } + } + } + return ViewType.UPDATABLE; + } + + /** + * Get the where Expression of given view. + * @param connection The client connection + * @param view PTable of the view + * @return A where Expression + */ + private Expression getWhereFromView(final PhoenixConnection connection, final PTable view) + throws SQLException { + String viewStatement = view.getViewStatement(); + if (viewStatement == null) { + return null; + } + SelectStatement select = new SQLParser(viewStatement).parseQuery(); + ColumnResolver resolver = FromCompiler.getResolverForQuery(select, connection); + StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver); + BitSet isViewColumnReferencedToBe = new BitSet(view.getColumns().size()); + ExpressionCompiler expressionCompiler = + new ColumnTrackingExpressionCompiler(context, isViewColumnReferencedToBe); + ParseNode whereNode = select.getWhere(); + return whereNode.accept(expressionCompiler); + } + + /** + * Check if the primary key columns are in order (consecutive in position) as they are defined, + * providing their positions list + * @param viewPkPositions A positions list of view PK columns to be checked + * @param tablePkPositions The positions list of the table's PK columns to be compared + * @param tablePkStartIdx The start index of table PK position, depending on whether the table is + * multi-tenant and/or salted + * @return true if the PK columns are in order, otherwise false + */ + private boolean isPkColumnsInOrder(final List viewPkPositions, + final List tablePkPositions, final int tablePkStartIdx) { + for (int i = 1; i < viewPkPositions.size(); i++) { + if (!Objects.equals(viewPkPositions.get(i), tablePkPositions.get(tablePkStartIdx + i))) { + return false; + } + } + return true; + } + + /** + * If any of the parent table/view has indexes in the parent hierarchy, and the current view under + * creation extends the primary key of the parent, throw error. + * @param parentToBe parent table/view of the current view under creation. + * @param columnDefs list of column definitions. + * @param pkConstraint primary key constraint. + * @throws SQLException if the view extends primary key and one of the parent view/table has + * indexes in the parent hierarchy. + */ + private void verifyIfAnyParentHasIndexesAndViewExtendsPk(PTable parentToBe, + List columnDefs, PrimaryKeyConstraint pkConstraint) throws SQLException { + if (viewExtendsParentPk(columnDefs, pkConstraint)) { + PTable table = parentToBe; + while (table != null) { + if (table.getIndexes().size() > 0) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.VIEW_CANNOT_EXTEND_PK_WITH_PARENT_INDEXES).build().buildException(); + } + if (table.getType() != PTableType.VIEW) { + return; + } + String schemaName = table.getParentSchemaName().getString(); + String tableName = table.getParentTableName().getString(); + try { + table = + statement.getConnection().getTable(SchemaUtil.getTableName(schemaName, tableName)); + } catch (TableNotFoundException e) { + table = null; + } + } + } + } + + /** + * Validate View creation compilation. 1. If view creation syntax does not specify primary key, + * the method throws SQLException with PRIMARY_KEY_MISSING code. 2. If parent table does not + * exist, the method throws TNFE. + * @param connection The client connection + * @param parentToBe To be parent for given view + * @param columnDefs List of column defs + * @param pkConstraint PrimaryKey constraint retrieved from CreateTable statement + * @throws SQLException If view creation validation fails + */ + private void validateCreateViewCompilation(final PhoenixConnection connection, + final PTable parentToBe, final List columnDefs, + final PrimaryKeyConstraint pkConstraint) throws SQLException { + boolean isPKMissed = true; + if (pkConstraint.getColumnNames().size() > 0) { + isPKMissed = false; + } else { + for (ColumnDef columnDef : columnDefs) { + if (columnDef.isPK()) { + isPKMissed = false; + break; + } + } + } + PName fullTableName = SchemaUtil.getPhysicalHBaseTableName(parentToBe.getSchemaName(), + parentToBe.getTableName(), parentToBe.isNamespaceMapped()); + // getTableIfExists will throw TNFE if table does not exist + try (Table ignored = connection.getQueryServices().getTableIfExists(fullTableName.getBytes())) { + // empty try block + } catch (IOException e) { + throw new SQLException(e); + } + if (isPKMissed) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING).build() + .buildException(); } + } + + /** + * Returns true if the view extends the primary key of the parent table/view, returns false + * otherwise. + * @param columnDefs column def list. + * @param pkConstraint primary key constraint. + * @return true if the view extends the primary key of the parent table/view, false otherwise. + */ + private boolean viewExtendsParentPk(final List columnDefs, + final PrimaryKeyConstraint pkConstraint) { + if (pkConstraint.getColumnNames().size() > 0) { + return true; + } else { + return columnDefs.stream().anyMatch(ColumnDef::isPK); + } + } - /** - * Validate View creation compilation. - * 1. If view creation syntax does not specify primary key, the method - * throws SQLException with PRIMARY_KEY_MISSING code. - * 2. If parent table does not exist, the method throws TNFE. - * - * @param connection The client connection - * @param parentToBe To be parent for given view - * @param columnDefs List of column defs - * @param pkConstraint PrimaryKey constraint retrieved from CreateTable - * statement - * @throws SQLException If view creation validation fails - */ - private void validateCreateViewCompilation( - final PhoenixConnection connection, final PTable parentToBe, - final List columnDefs, - final PrimaryKeyConstraint pkConstraint) throws SQLException { - boolean isPKMissed = true; - if (pkConstraint.getColumnNames().size() > 0) { - isPKMissed = false; - } else { - for (ColumnDef columnDef : columnDefs) { - if (columnDef.isPK()) { - isPKMissed = false; - break; - } - } - } - PName fullTableName = SchemaUtil.getPhysicalHBaseTableName( - parentToBe.getSchemaName(), parentToBe.getTableName(), - parentToBe.isNamespaceMapped()); - // getTableIfExists will throw TNFE if table does not exist - try (Table ignored = - connection.getQueryServices().getTableIfExists( - fullTableName.getBytes())) { - // empty try block - } catch (IOException e) { - throw new SQLException(e); - } - if (isPKMissed) { - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.PRIMARY_KEY_MISSING) - .build().buildException(); - } + public static class ColumnTrackingExpressionCompiler extends ExpressionCompiler { + private final BitSet isColumnReferenced; + + public ColumnTrackingExpressionCompiler(StatementContext context, BitSet isColumnReferenced) { + super(context, true); + this.isColumnReferenced = isColumnReferenced; } - /** - * Returns true if the view extends the primary key of the parent table/view, returns false - * otherwise. - * - * @param columnDefs column def list. - * @param pkConstraint primary key constraint. - * @return true if the view extends the primary key of the parent table/view, false otherwise. - */ - private boolean viewExtendsParentPk( - final List columnDefs, - final PrimaryKeyConstraint pkConstraint) { - if (pkConstraint.getColumnNames().size() > 0) { - return true; - } else { - return columnDefs.stream().anyMatch(ColumnDef::isPK); - } + @Override + protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { + ColumnRef ref = super.resolveColumn(node); + isColumnReferenced.set(ref.getColumn().getPosition()); + return ref; + } + } + + public static class ViewWhereExpressionVisitor + extends StatelessTraverseNoExpressionVisitor { + private boolean isUpdatable = true; + private final PTable table; + private int position; + private final byte[][] columnValues; + private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + + public ViewWhereExpressionVisitor(PTable table, byte[][] columnValues) { + this.table = table; + this.columnValues = columnValues; } - public static class ColumnTrackingExpressionCompiler extends ExpressionCompiler { - private final BitSet isColumnReferenced; - - public ColumnTrackingExpressionCompiler(StatementContext context, BitSet isColumnReferenced) { - super(context, true); - this.isColumnReferenced = isColumnReferenced; - } - - @Override - protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { - ColumnRef ref = super.resolveColumn(node); - isColumnReferenced.set(ref.getColumn().getPosition()); - return ref; - } + public boolean isUpdatable() { + return isUpdatable; } - - public static class ViewWhereExpressionVisitor extends StatelessTraverseNoExpressionVisitor { - private boolean isUpdatable = true; - private final PTable table; - private int position; - private final byte[][] columnValues; - private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - public ViewWhereExpressionVisitor (PTable table, byte[][] columnValues) { - this.table = table; - this.columnValues = columnValues; - } - - public boolean isUpdatable() { - return isUpdatable; - } - @Override - public Boolean defaultReturn(Expression node, List l) { - // We only hit this if we're trying to traverse somewhere - // in which we don't have a visitLeave that returns non null - isUpdatable = false; - return null; - } + @Override + public Boolean defaultReturn(Expression node, List l) { + // We only hit this if we're trying to traverse somewhere + // in which we don't have a visitLeave that returns non null + isUpdatable = false; + return null; + } - @Override - public Iterator visitEnter(AndExpression node) { - return node.getChildren().iterator(); - } + @Override + public Iterator visitEnter(AndExpression node) { + return node.getChildren().iterator(); + } - @Override - public Boolean visitLeave(AndExpression node, List l) { - return l.isEmpty() ? null : Boolean.TRUE; - } + @Override + public Boolean visitLeave(AndExpression node, List l) { + return l.isEmpty() ? null : Boolean.TRUE; + } - @Override - public Iterator visitEnter(ComparisonExpression node) { - if (node.getFilterOp() == CompareOperator.EQUAL && node.getChildren().get(1).isStateless() - && node.getChildren().get(1).getDeterminism() == Determinism.ALWAYS ) { - return Iterators.singletonIterator(node.getChildren().get(0)); - } - return super.visitEnter(node); - } + @Override + public Iterator visitEnter(ComparisonExpression node) { + if ( + node.getFilterOp() == CompareOperator.EQUAL && node.getChildren().get(1).isStateless() + && node.getChildren().get(1).getDeterminism() == Determinism.ALWAYS + ) { + return Iterators.singletonIterator(node.getChildren().get(0)); + } + return super.visitEnter(node); + } - @Override - public Boolean visitLeave(ComparisonExpression node, List l) { - if (l.isEmpty()) { - return null; - } - - node.getChildren().get(1).evaluate(null, ptr); - // Set the columnValue at the position of the column to the - // constant with which it is being compared. - // We always strip the last byte so that we can recognize null - // as a value with a single byte. - columnValues[position] = new byte [ptr.getLength() + 1]; - System.arraycopy(ptr.get(), ptr.getOffset(), columnValues[position], 0, ptr.getLength()); - return Boolean.TRUE; - } + @Override + public Boolean visitLeave(ComparisonExpression node, List l) { + if (l.isEmpty()) { + return null; + } + + node.getChildren().get(1).evaluate(null, ptr); + // Set the columnValue at the position of the column to the + // constant with which it is being compared. + // We always strip the last byte so that we can recognize null + // as a value with a single byte. + columnValues[position] = new byte[ptr.getLength() + 1]; + System.arraycopy(ptr.get(), ptr.getOffset(), columnValues[position], 0, ptr.getLength()); + return Boolean.TRUE; + } - @Override - public Iterator visitEnter(IsNullExpression node) { - return node.isNegate() ? super.visitEnter(node) : node.getChildren().iterator(); - } - - @Override - public Boolean visitLeave(IsNullExpression node, List l) { - // Nothing to do as we've already set the position to an empty byte array - return l.isEmpty() ? null : Boolean.TRUE; - } - - @Override - public Boolean visit(RowKeyColumnExpression node) { - this.position = table.getPKColumns().get(node.getPosition()).getPosition(); - return Boolean.TRUE; - } + @Override + public Iterator visitEnter(IsNullExpression node) { + return node.isNegate() ? super.visitEnter(node) : node.getChildren().iterator(); + } - @Override - public Boolean visit(KeyValueColumnExpression node) { - try { - this.position = table.getColumnFamily(node.getColumnFamily()).getPColumnForColumnQualifier(node.getColumnQualifier()).getPosition(); - } catch (SQLException e) { - throw new RuntimeException(e); // Impossible - } - return Boolean.TRUE; - } - - @Override - public Boolean visit(SingleCellColumnExpression node) { - return visit(node.getKeyValueExpression()); - } - - } - - /** - * Visitor for view's where expression, which updates primary key columns and non-primary key - * columns for validating if the view is updatable - */ - public static class ViewWhereExpressionValidatorVisitor extends - StatelessTraverseNoExpressionVisitor { - private boolean isUpdatable = true; - private final PTable table; - private final Set pkColumns; - private final Set nonPKColumns; - - public ViewWhereExpressionValidatorVisitor(PTable table, Set pkColumns, - Set nonPKColumns) { - this.table = table; - this.pkColumns = pkColumns; - this.nonPKColumns = nonPKColumns; - } + @Override + public Boolean visitLeave(IsNullExpression node, List l) { + // Nothing to do as we've already set the position to an empty byte array + return l.isEmpty() ? null : Boolean.TRUE; + } - public boolean isUpdatable() { - return isUpdatable; - } + @Override + public Boolean visit(RowKeyColumnExpression node) { + this.position = table.getPKColumns().get(node.getPosition()).getPosition(); + return Boolean.TRUE; + } - @Override - public Boolean defaultReturn(Expression node, List l) { - // We only hit this if we're trying to traverse somewhere - // in which we don't have a visitLeave that returns non null - isUpdatable = false; - return null; - } + @Override + public Boolean visit(KeyValueColumnExpression node) { + try { + this.position = table.getColumnFamily(node.getColumnFamily()) + .getPColumnForColumnQualifier(node.getColumnQualifier()).getPosition(); + } catch (SQLException e) { + throw new RuntimeException(e); // Impossible + } + return Boolean.TRUE; + } - @Override - public Iterator visitEnter(AndExpression node) { - return node.getChildren().iterator(); - } + @Override + public Boolean visit(SingleCellColumnExpression node) { + return visit(node.getKeyValueExpression()); + } - @Override - public Boolean visitLeave(AndExpression node, List l) { - return l.isEmpty() ? null : Boolean.TRUE; - } + } + + /** + * Visitor for view's where expression, which updates primary key columns and non-primary key + * columns for validating if the view is updatable + */ + public static class ViewWhereExpressionValidatorVisitor + extends StatelessTraverseNoExpressionVisitor { + private boolean isUpdatable = true; + private final PTable table; + private final Set pkColumns; + private final Set nonPKColumns; + + public ViewWhereExpressionValidatorVisitor(PTable table, Set pkColumns, + Set nonPKColumns) { + this.table = table; + this.pkColumns = pkColumns; + this.nonPKColumns = nonPKColumns; + } - @Override - public Iterator visitEnter(ComparisonExpression node) { - if (node.getFilterOp() == CompareOperator.EQUAL - && node.getChildren().get(1).isStateless() - && node.getChildren().get(1).getDeterminism() == Determinism.ALWAYS) { - return Iterators.singletonIterator(node.getChildren().get(0)); - } - return super.visitEnter(node); - } + public boolean isUpdatable() { + return isUpdatable; + } - @Override - public Boolean visitLeave(ComparisonExpression node, List l) { - return l.isEmpty() ? null : Boolean.TRUE; - } + @Override + public Boolean defaultReturn(Expression node, List l) { + // We only hit this if we're trying to traverse somewhere + // in which we don't have a visitLeave that returns non null + isUpdatable = false; + return null; + } - @Override - public Iterator visitEnter(IsNullExpression node) { - return node.isNegate() ? super.visitEnter(node) : node.getChildren().iterator(); - } + @Override + public Iterator visitEnter(AndExpression node) { + return node.getChildren().iterator(); + } - @Override - public Boolean visitLeave(IsNullExpression node, List l) { - // Nothing to do as we've already set the position to an empty byte array - return l.isEmpty() ? null : Boolean.TRUE; - } + @Override + public Boolean visitLeave(AndExpression node, List l) { + return l.isEmpty() ? null : Boolean.TRUE; + } - @Override - public Boolean visit(RowKeyColumnExpression node) { - pkColumns.add(table.getPKColumns().get(node.getPosition())); - return Boolean.TRUE; - } + @Override + public Iterator visitEnter(ComparisonExpression node) { + if ( + node.getFilterOp() == CompareOperator.EQUAL && node.getChildren().get(1).isStateless() + && node.getChildren().get(1).getDeterminism() == Determinism.ALWAYS + ) { + return Iterators.singletonIterator(node.getChildren().get(0)); + } + return super.visitEnter(node); + } - @Override - public Boolean visit(KeyValueColumnExpression node) { - try { - if (nonPKColumns != null) { - nonPKColumns.add( - table.getColumnFamily(node.getColumnFamily()) - .getPColumnForColumnQualifier(node.getColumnQualifier())); - } - } catch (SQLException e) { - throw new RuntimeException(e); // Impossible - } - return Boolean.TRUE; - } + @Override + public Boolean visitLeave(ComparisonExpression node, List l) { + return l.isEmpty() ? null : Boolean.TRUE; + } - @Override - public Boolean visit(SingleCellColumnExpression node) { - return visit(node.getKeyValueExpression()); - } + @Override + public Iterator visitEnter(IsNullExpression node) { + return node.isNegate() ? super.visitEnter(node) : node.getChildren().iterator(); } - private static class VarbinaryDatum implements PDatum { + @Override + public Boolean visitLeave(IsNullExpression node, List l) { + // Nothing to do as we've already set the position to an empty byte array + return l.isEmpty() ? null : Boolean.TRUE; + } - @Override - public boolean isNullable() { - return false; - } + @Override + public Boolean visit(RowKeyColumnExpression node) { + pkColumns.add(table.getPKColumns().get(node.getPosition())); + return Boolean.TRUE; + } - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; - } + @Override + public Boolean visit(KeyValueColumnExpression node) { + try { + if (nonPKColumns != null) { + nonPKColumns.add(table.getColumnFamily(node.getColumnFamily()) + .getPColumnForColumnQualifier(node.getColumnQualifier())); + } + } catch (SQLException e) { + throw new RuntimeException(e); // Impossible + } + return Boolean.TRUE; + } - @Override - public Integer getMaxLength() { - return null; - } + @Override + public Boolean visit(SingleCellColumnExpression node) { + return visit(node.getKeyValueExpression()); + } + } - @Override - public Integer getScale() { - return null; - } + private static class VarbinaryDatum implements PDatum { - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - } - - private class CreateTableMutationPlan extends BaseMutationPlan { - - private final MetaDataClient client; - private final CreateTableStatement finalCreate; - private final byte[][] splits; - private final PTable parent; - private final String viewStatement; - private final ViewType viewType; - private final byte[][] viewColumnConstants; - private final BitSet isViewColumnReferenced; - - private final byte[] rowKeyMatcher; - private final PhoenixConnection connection; - - private CreateTableMutationPlan(StatementContext context, MetaDataClient client, - CreateTableStatement finalCreate, byte[][] splits, PTable parent, - String viewStatement, ViewType viewType, byte[] rowKeyMatcher, - byte[][] viewColumnConstants, BitSet isViewColumnReferenced, - PhoenixConnection connection) { - super(context, CreateTableCompiler.this.operation); - this.client = client; - this.finalCreate = finalCreate; - this.splits = splits; - this.parent = parent; - this.viewStatement = viewStatement; - this.viewType = viewType; - this.rowKeyMatcher = rowKeyMatcher; - this.viewColumnConstants = viewColumnConstants; - this.isViewColumnReferenced = isViewColumnReferenced; - this.connection = connection; - } + @Override + public boolean isNullable() { + return false; + } - @Override - public MutationState execute() throws SQLException { - try { - return client.createTable(finalCreate, splits, parent, viewStatement, - viewType, MetaDataUtil.getViewIndexIdDataType(), rowKeyMatcher, - viewColumnConstants, isViewColumnReferenced); - } finally { - if (client.getConnection() != connection) { - client.getConnection().close(); - } - } - } + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("CREATE TABLE")); - } + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + + } + + private class CreateTableMutationPlan extends BaseMutationPlan { + + private final MetaDataClient client; + private final CreateTableStatement finalCreate; + private final byte[][] splits; + private final PTable parent; + private final String viewStatement; + private final ViewType viewType; + private final byte[][] viewColumnConstants; + private final BitSet isViewColumnReferenced; + + private final byte[] rowKeyMatcher; + private final PhoenixConnection connection; + + private CreateTableMutationPlan(StatementContext context, MetaDataClient client, + CreateTableStatement finalCreate, byte[][] splits, PTable parent, String viewStatement, + ViewType viewType, byte[] rowKeyMatcher, byte[][] viewColumnConstants, + BitSet isViewColumnReferenced, PhoenixConnection connection) { + super(context, CreateTableCompiler.this.operation); + this.client = client; + this.finalCreate = finalCreate; + this.splits = splits; + this.parent = parent; + this.viewStatement = viewStatement; + this.viewType = viewType; + this.rowKeyMatcher = rowKeyMatcher; + this.viewColumnConstants = viewColumnConstants; + this.isViewColumnReferenced = isViewColumnReferenced; + this.connection = connection; + } + + @Override + public MutationState execute() throws SQLException { + try { + return client.createTable(finalCreate, splits, parent, viewStatement, viewType, + MetaDataUtil.getViewIndexIdDataType(), rowKeyMatcher, viewColumnConstants, + isViewColumnReferenced); + } finally { + if (client.getConnection() != connection) { + client.getConnection().close(); + } + } + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("CREATE TABLE")); } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeclareCursorCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeclareCursorCompiler.java index 89acfe14aaf..37f7866e2bf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeclareCursorCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeclareCursorCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,63 +17,52 @@ */ package org.apache.phoenix.compile; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.exception.SQLExceptionInfo; +import java.sql.SQLException; +import java.util.Collections; + import org.apache.phoenix.execute.MutationState; -import org.apache.phoenix.expression.LiteralExpression; import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; -import org.apache.phoenix.jdbc.PhoenixPreparedStatement; import org.apache.phoenix.jdbc.PhoenixStatement; import org.apache.phoenix.jdbc.PhoenixStatement.Operation; -import org.apache.phoenix.parse.CreateIndexStatement; import org.apache.phoenix.parse.DeclareCursorStatement; -import org.apache.phoenix.parse.ParseNode; -import org.apache.phoenix.util.CursorUtil; import org.apache.phoenix.schema.MetaDataClient; -import org.apache.phoenix.schema.PTable.IndexType; - -import java.sql.ParameterMetaData; -import java.sql.SQLException; -import java.util.Collections; -import java.util.List; public class DeclareCursorCompiler { - private final PhoenixStatement statement; - private final Operation operation; - private QueryPlan queryPlan; + private final PhoenixStatement statement; + private final Operation operation; + private QueryPlan queryPlan; - public DeclareCursorCompiler(PhoenixStatement statement, Operation operation, QueryPlan queryPlan) throws SQLException { - this.statement = statement; - this.operation = operation; - // See PHOENIX-5072 - // We optimize the plan inside the CursorFetchPlan here at the first place. - // Later when the next optimize is called, the original CursorFetchPlan will be selected as there won't be any better plans. - this.queryPlan = statement.getConnection().getQueryServices().getOptimizer() - .optimize(statement, queryPlan); + public DeclareCursorCompiler(PhoenixStatement statement, Operation operation, QueryPlan queryPlan) + throws SQLException { + this.statement = statement; + this.operation = operation; + // See PHOENIX-5072 + // We optimize the plan inside the CursorFetchPlan here at the first place. + // Later when the next optimize is called, the original CursorFetchPlan will be selected as + // there won't be any better plans. + this.queryPlan = + statement.getConnection().getQueryServices().getOptimizer().optimize(statement, queryPlan); + } + + public MutationPlan compile(final DeclareCursorStatement declare) throws SQLException { + if (declare.getBindCount() != 0) { + throw new SQLException("Cannot declare cursor, internal SELECT statement contains bindings!"); } - public MutationPlan compile(final DeclareCursorStatement declare) throws SQLException { - if(declare.getBindCount() != 0){ - throw new SQLException("Cannot declare cursor, internal SELECT statement contains bindings!"); - } + final PhoenixConnection connection = statement.getConnection(); + final StatementContext context = new StatementContext(statement); + final MetaDataClient client = new MetaDataClient(connection); - final PhoenixConnection connection = statement.getConnection(); - final StatementContext context = new StatementContext(statement); - final MetaDataClient client = new MetaDataClient(connection); - - return new BaseMutationPlan(context, operation) { - @Override - public MutationState execute() throws SQLException { - return client.declareCursor(declare, queryPlan); - } + return new BaseMutationPlan(context, operation) { + @Override + public MutationState execute() throws SQLException { + return client.declareCursor(declare, queryPlan); + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("DECLARE CURSOR")); - } - }; - } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("DECLARE CURSOR")); + } + }; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java index 90eef610f6b..0b72717420c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DelegateMutationPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,65 +26,65 @@ import org.apache.phoenix.schema.TableRef; public class DelegateMutationPlan implements MutationPlan { - @Override - public MutationState execute() throws SQLException { - return plan.execute(); - } - - @Override - public StatementContext getContext() { - return plan.getContext(); - } - - @Override - public TableRef getTargetRef() { - return plan.getTargetRef(); - } - - @Override - public QueryPlan getQueryPlan() { - return plan.getQueryPlan(); - } - - @Override - public ParameterMetaData getParameterMetaData() { - return plan.getParameterMetaData(); - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return plan.getExplainPlan(); - } - - @Override - public Set getSourceRefs() { - return plan.getSourceRefs(); - } - - @Override - public Operation getOperation() { - return plan.getOperation(); - } - - private final MutationPlan plan; - - public DelegateMutationPlan(MutationPlan plan) { - this.plan = plan; - } - - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return plan.getEstimatedRowsToScan(); - } - - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return plan.getEstimatedBytesToScan(); - } - - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return plan.getEstimateInfoTimestamp(); - } + @Override + public MutationState execute() throws SQLException { + return plan.execute(); + } + + @Override + public StatementContext getContext() { + return plan.getContext(); + } + + @Override + public TableRef getTargetRef() { + return plan.getTargetRef(); + } + + @Override + public QueryPlan getQueryPlan() { + return plan.getQueryPlan(); + } + + @Override + public ParameterMetaData getParameterMetaData() { + return plan.getParameterMetaData(); + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return plan.getExplainPlan(); + } + + @Override + public Set getSourceRefs() { + return plan.getSourceRefs(); + } + + @Override + public Operation getOperation() { + return plan.getOperation(); + } + + private final MutationPlan plan; + + public DelegateMutationPlan(MutationPlan plan) { + this.plan = plan; + } + + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return plan.getEstimatedRowsToScan(); + } + + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return plan.getEstimatedBytesToScan(); + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return plan.getEstimateInfoTimestamp(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java index a4d087288d3..05403c7abbd 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java @@ -1,4 +1,5 @@ /* + * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file @@ -6,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,8 +36,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.cache.ServerCacheClient; import org.apache.phoenix.cache.ServerCacheClient.ServerCache; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; @@ -88,943 +88,1008 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.transaction.PhoenixTransactionProvider.Feature; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.ScanUtil; - -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.SchemaUtil; import edu.umd.cs.findbugs.annotations.NonNull; -import org.apache.phoenix.util.SchemaUtil; public class DeleteCompiler { - private static ParseNodeFactory FACTORY = new ParseNodeFactory(); - - private final PhoenixStatement statement; - private final Operation operation; - - public DeleteCompiler(PhoenixStatement statement, Operation operation) { - this.statement = statement; - this.operation = operation; - } - - /** - * Handles client side deletion of rows for a DELETE statement. We determine the "best" plan to drive the query using - * our standard optimizer. The plan may be based on using an index, in which case we need to translate the index row - * key to get the data row key used to form the delete mutation. We always collect up the data table mutations, but we - * only collect and send the index mutations for global, immutable indexes. Local indexes and mutable indexes are always - * maintained on the server side. - * @param context StatementContext for the scan being executed - * @param iterator ResultIterator for the scan being executed - * @param bestPlan QueryPlan used to produce the iterator - * @param projectedTableRef TableRef containing all indexed and covered columns across all indexes on the data table - * @param otherTableRefs other TableRefs needed to be maintained apart from the one over which the scan is executing. - * Might be other index tables (if we're driving off of the data table table), the data table (if we're driving off of - * an index table), or a mix of the data table and additional index tables. - * @return MutationState representing the uncommitted data across the data table and indexes. Will be joined with the - * MutationState on the connection over which the delete is occurring. - * @throws SQLException - */ - private static MutationState deleteRows(StatementContext context, ResultIterator iterator, QueryPlan bestPlan, TableRef projectedTableRef, List otherTableRefs) throws SQLException { - RowProjector projector = bestPlan.getProjector(); - TableRef tableRef = bestPlan.getTableRef(); - PTable table = tableRef.getTable(); - PhoenixStatement statement = context.getStatement(); - PhoenixConnection connection = statement.getConnection(); - PName tenantId = connection.getTenantId(); - byte[] tenantIdBytes = null; - if (tenantId != null) { - tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null); - } - // we automatically flush the mutations when either auto commit is enabled, or - // the target table is transactional (in that case changes are not visible until we commit) - final boolean autoFlush = connection.getAutoCommit() || tableRef.getTable().isTransactional(); - ConnectionQueryServices services = connection.getQueryServices(); - final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); - final long maxSizeBytes = services.getProps() - .getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); - final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize); - MultiRowMutationState mutations = new MultiRowMutationState(batchSize); - List otherMutations = null; - // If otherTableRefs is not empty, we're deleting the rows from both the index table and - // the data table through a single query to save executing an additional one (since we - // can always get the data table row key from an index row key). + private static ParseNodeFactory FACTORY = new ParseNodeFactory(); + + private final PhoenixStatement statement; + private final Operation operation; + + public DeleteCompiler(PhoenixStatement statement, Operation operation) { + this.statement = statement; + this.operation = operation; + } + + /** + * Handles client side deletion of rows for a DELETE statement. We determine the "best" plan to + * drive the query using our standard optimizer. The plan may be based on using an index, in which + * case we need to translate the index row key to get the data row key used to form the delete + * mutation. We always collect up the data table mutations, but we only collect and send the index + * mutations for global, immutable indexes. Local indexes and mutable indexes are always + * maintained on the server side. + * @param context StatementContext for the scan being executed + * @param iterator ResultIterator for the scan being executed + * @param bestPlan QueryPlan used to produce the iterator + * @param projectedTableRef TableRef containing all indexed and covered columns across all indexes + * on the data table + * @param otherTableRefs other TableRefs needed to be maintained apart from the one over which + * the scan is executing. Might be other index tables (if we're driving + * off of the data table table), the data table (if we're driving off of + * an index table), or a mix of the data table and additional index + * tables. + * @return MutationState representing the uncommitted data across the data table and indexes. Will + * be joined with the MutationState on the connection over which the delete is occurring. + */ + private static MutationState deleteRows(StatementContext context, ResultIterator iterator, + QueryPlan bestPlan, TableRef projectedTableRef, List otherTableRefs) + throws SQLException { + RowProjector projector = bestPlan.getProjector(); + TableRef tableRef = bestPlan.getTableRef(); + PTable table = tableRef.getTable(); + PhoenixStatement statement = context.getStatement(); + PhoenixConnection connection = statement.getConnection(); + PName tenantId = connection.getTenantId(); + byte[] tenantIdBytes = null; + if (tenantId != null) { + tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), + table.getBucketNum() != null, tenantId, table.getViewIndexId() != null); + } + // we automatically flush the mutations when either auto commit is enabled, or + // the target table is transactional (in that case changes are not visible until we commit) + final boolean autoFlush = connection.getAutoCommit() || tableRef.getTable().isTransactional(); + ConnectionQueryServices services = connection.getQueryServices(); + final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); + final long maxSizeBytes = + services.getProps().getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); + final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize); + MultiRowMutationState mutations = new MultiRowMutationState(batchSize); + List otherMutations = null; + // If otherTableRefs is not empty, we're deleting the rows from both the index table and + // the data table through a single query to save executing an additional one (since we + // can always get the data table row key from an index row key). + if (!otherTableRefs.isEmpty()) { + otherMutations = Lists.newArrayListWithExpectedSize(otherTableRefs.size()); + for (int i = 0; i < otherTableRefs.size(); i++) { + otherMutations.add(new MultiRowMutationState(batchSize)); + } + } + List pkColumns = table.getPKColumns(); + boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null; + boolean isSharedViewIndex = table.getViewIndexId() != null; + int offset = (table.getBucketNum() == null ? 0 : 1); + byte[][] values = new byte[pkColumns.size()][]; + if (isSharedViewIndex) { + values[offset++] = table.getviewIndexIdType().toBytes(table.getViewIndexId()); + } + if (isMultiTenant) { + values[offset++] = tenantIdBytes; + } + try (final PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, context)) { + ValueGetter getter = null; + if (!otherTableRefs.isEmpty()) { + getter = new AbstractValueGetter() { + final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable(); + final ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(); + + @Override + public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) + throws IOException { + Cell cell = rs.getCurrentRow().getValue(ref.getFamily(), ref.getQualifier()); + if (cell == null) { + return null; + } + valuePtr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + return valuePtr; + } + + @Override + public byte[] getRowKey() { + rs.getCurrentRow().getKey(rowKeyPtr); + return ByteUtil.copyKeyBytesIfNecessary(rowKeyPtr); + } + }; + } + IndexMaintainer scannedIndexMaintainer = null; + IndexMaintainer[] maintainers = null; + PTable dataTable = table; + if (table.getType() == PTableType.INDEX) { if (!otherTableRefs.isEmpty()) { - otherMutations = Lists.newArrayListWithExpectedSize(otherTableRefs.size()); - for (int i = 0; i < otherTableRefs.size(); i++) { - otherMutations.add(new MultiRowMutationState(batchSize)); + // The data table is always the last one in the list if it's + // not chosen as the best of the possible plans. + dataTable = otherTableRefs.get(otherTableRefs.size() - 1).getTable(); + if (!isMaintainedOnClient(table)) { + // dataTable is a projected table and may not include all the indexed columns and so we + // need to get + // the actual data table + dataTable = + connection.getTable(SchemaUtil.getTableName(dataTable.getSchemaName().getString(), + dataTable.getTableName().getString())); + } + scannedIndexMaintainer = IndexMaintainer.create(dataTable, table, connection); + } + maintainers = new IndexMaintainer[otherTableRefs.size()]; + for (int i = 0; i < otherTableRefs.size(); i++) { + // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that + // client-side + // expressions are used instead of server-side ones. + PTable otherTable = otherTableRefs.get(i).getTable(); + if (otherTable.getType() == PTableType.INDEX) { + // In this case, we'll convert from index row -> data row -> other index row + maintainers[i] = IndexMaintainer.create(dataTable, otherTable, connection); + } else { + maintainers[i] = scannedIndexMaintainer; + } + } + } else if (!otherTableRefs.isEmpty()) { + dataTable = table; + maintainers = new IndexMaintainer[otherTableRefs.size()]; + for (int i = 0; i < otherTableRefs.size(); i++) { + // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that + // client-side + // expressions are used instead of server-side ones. + maintainers[i] = IndexMaintainer.create(projectedTableRef.getTable(), + otherTableRefs.get(i).getTable(), connection); + } + + } + byte[][] viewConstants = IndexUtil.getViewConstants(dataTable); + int rowCount = 0; + while (rs.next()) { + ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a + // Map + rs.getCurrentRow().getKey(rowKeyPtr); + // When issuing deletes, we do not care about the row time ranges. Also, if the table had a + // row timestamp column, then the + // row key will already have its value. + // Check for otherTableRefs being empty required when deleting directly from the index + if (otherTableRefs.isEmpty() || isMaintainedOnClient(table)) { + mutations.put(rowKeyPtr, + new RowMutationState(PRow.DELETE_MARKER, 0, + statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, + null)); + } + for (int i = 0; i < otherTableRefs.size(); i++) { + PTable otherTable = otherTableRefs.get(i).getTable(); + ImmutableBytesPtr otherRowKeyPtr = new ImmutableBytesPtr(); // allocate new as this is a + // key in a Map + // Translate the data table row to the index table row + if (table.getType() == PTableType.INDEX) { + otherRowKeyPtr.set(scannedIndexMaintainer.buildDataRowKey(rowKeyPtr, viewConstants)); + if (otherTable.getType() == PTableType.INDEX) { + otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, otherRowKeyPtr, null, null, + rs.getCurrentRow().getValue(0).getTimestamp())); } - } - List pkColumns = table.getPKColumns(); - boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null; - boolean isSharedViewIndex = table.getViewIndexId() != null; - int offset = (table.getBucketNum() == null ? 0 : 1); - byte[][] values = new byte[pkColumns.size()][]; - if (isSharedViewIndex) { - values[offset++] = table.getviewIndexIdType().toBytes(table.getViewIndexId()); - } - if (isMultiTenant) { - values[offset++] = tenantIdBytes; - } - try (final PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, context)) { - ValueGetter getter = null; - if (!otherTableRefs.isEmpty()) { - getter = new AbstractValueGetter() { - final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable(); - final ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(); - - @Override - public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException { - Cell cell = rs.getCurrentRow().getValue(ref.getFamily(), ref.getQualifier()); - if (cell == null) { - return null; - } - valuePtr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - return valuePtr; - } - - @Override - public byte[] getRowKey() { - rs.getCurrentRow().getKey(rowKeyPtr); - return ByteUtil.copyKeyBytesIfNecessary(rowKeyPtr); - } - }; + } else { + otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, rowKeyPtr, null, null, + rs.getCurrentRow().getValue(0).getTimestamp())); + } + otherMutations.get(i).put(otherRowKeyPtr, + new RowMutationState(PRow.DELETE_MARKER, 0, + statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, + null)); + } + if (mutations.size() > maxSize) { + throw new IllegalArgumentException("MutationState size of " + mutations.size() + + " is bigger than max allowed size of " + maxSize); + } + rowCount++; + // Commit a batch if we are flushing automatically and we're at our batch size + if (autoFlush && rowCount % batchSize == 0) { + MutationState state = + new MutationState(tableRef, mutations, 0, maxSize, maxSizeBytes, connection); + connection.getMutationState().join(state); + for (int i = 0; i < otherTableRefs.size(); i++) { + MutationState indexState = new MutationState(otherTableRefs.get(i), + otherMutations.get(i), 0, maxSize, maxSizeBytes, connection); + connection.getMutationState().join(indexState); + } + connection.getMutationState().send(); + mutations.clear(); + if (otherMutations != null) { + for (MultiRowMutationState multiRowMutationState : otherMutations) { + multiRowMutationState.clear(); } - IndexMaintainer scannedIndexMaintainer = null; - IndexMaintainer[] maintainers = null; - PTable dataTable = table; - if (table.getType() == PTableType.INDEX) { - if (!otherTableRefs.isEmpty()) { - // The data table is always the last one in the list if it's - // not chosen as the best of the possible plans. - dataTable = otherTableRefs.get(otherTableRefs.size()-1).getTable(); - if (!isMaintainedOnClient(table)) { - // dataTable is a projected table and may not include all the indexed columns and so we need to get - // the actual data table - dataTable = connection.getTable(SchemaUtil - .getTableName(dataTable.getSchemaName().getString(), - dataTable.getTableName().getString())); - } - scannedIndexMaintainer = IndexMaintainer.create(dataTable, table, connection); - } - maintainers = new IndexMaintainer[otherTableRefs.size()]; - for (int i = 0; i < otherTableRefs.size(); i++) { - // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side - // expressions are used instead of server-side ones. - PTable otherTable = otherTableRefs.get(i).getTable(); - if (otherTable.getType() == PTableType.INDEX) { - // In this case, we'll convert from index row -> data row -> other index row - maintainers[i] = IndexMaintainer.create(dataTable, otherTable, connection); - } else { - maintainers[i] = scannedIndexMaintainer; - } - } - } else if (!otherTableRefs.isEmpty()) { - dataTable = table; - maintainers = new IndexMaintainer[otherTableRefs.size()]; - for (int i = 0; i < otherTableRefs.size(); i++) { - // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side - // expressions are used instead of server-side ones. - maintainers[i] = IndexMaintainer.create(projectedTableRef.getTable(), otherTableRefs.get(i).getTable(), connection); - } + } + } + } + + // If auto flush is true, this last batch will be committed upon return + int nCommittedRows = autoFlush ? (rowCount / batchSize * batchSize) : 0; + + // tableRef can be index if the index table is selected by the query plan or if we do the + // DELETE + // directly on the index table. In other cases it refers to the data table + MutationState tableState = + new MutationState(tableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection); + MutationState state; + if (otherTableRefs.isEmpty()) { + state = tableState; + } else { + state = new MutationState(maxSize, maxSizeBytes, connection); + // if there are other table references we need to start with an empty mutation state and + // then join the other states. We only need to count the data table rows that will be + // deleted. + // MutationState.join() correctly maintains that accounting and ignores the index table + // rows. + // This way we always return the correct number of rows that are deleted. + state.join(tableState); + } + for (int i = 0; i < otherTableRefs.size(); i++) { + MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), + 0, maxSize, maxSizeBytes, connection); + state.join(indexState); + } + return state; + } + } - } - byte[][] viewConstants = IndexUtil.getViewConstants(dataTable); - int rowCount = 0; - while (rs.next()) { - ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map - rs.getCurrentRow().getKey(rowKeyPtr); - // When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the - // row key will already have its value. - // Check for otherTableRefs being empty required when deleting directly from the index - if (otherTableRefs.isEmpty() || isMaintainedOnClient(table)) { - mutations.put(rowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null)); - } - for (int i = 0; i < otherTableRefs.size(); i++) { - PTable otherTable = otherTableRefs.get(i).getTable(); - ImmutableBytesPtr otherRowKeyPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map - // Translate the data table row to the index table row - if (table.getType() == PTableType.INDEX) { - otherRowKeyPtr.set(scannedIndexMaintainer.buildDataRowKey(rowKeyPtr, viewConstants)); - if (otherTable.getType() == PTableType.INDEX) { - otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, otherRowKeyPtr, null, null, rs.getCurrentRow().getValue(0).getTimestamp())); - } - } else { - otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, rowKeyPtr, null, null, rs.getCurrentRow().getValue(0).getTimestamp())); - } - otherMutations.get(i).put(otherRowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null)); - } - if (mutations.size() > maxSize) { - throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize); - } - rowCount++; - // Commit a batch if we are flushing automatically and we're at our batch size - if (autoFlush && rowCount % batchSize == 0) { - MutationState state = new MutationState(tableRef, mutations, 0, maxSize, maxSizeBytes, connection); - connection.getMutationState().join(state); - for (int i = 0; i < otherTableRefs.size(); i++) { - MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection); - connection.getMutationState().join(indexState); - } - connection.getMutationState().send(); - mutations.clear(); - if (otherMutations != null) { - for (MultiRowMutationState multiRowMutationState : otherMutations) { - multiRowMutationState.clear(); - } - } - } - } + private static class DeletingParallelIteratorFactory extends MutatingParallelIteratorFactory { + private QueryPlan queryPlan; + private List otherTableRefs; + private TableRef projectedTableRef; - // If auto flush is true, this last batch will be committed upon return - int nCommittedRows = autoFlush ? (rowCount / batchSize * batchSize) : 0; - - // tableRef can be index if the index table is selected by the query plan or if we do the DELETE - // directly on the index table. In other cases it refers to the data table - MutationState tableState = - new MutationState(tableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection); - MutationState state; - if (otherTableRefs.isEmpty()) { - state = tableState; - } else { - state = new MutationState(maxSize, maxSizeBytes, connection); - // if there are other table references we need to start with an empty mutation state and - // then join the other states. We only need to count the data table rows that will be deleted. - // MutationState.join() correctly maintains that accounting and ignores the index table rows. - // This way we always return the correct number of rows that are deleted. - state.join(tableState); - } - for (int i = 0; i < otherTableRefs.size(); i++) { - MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection); - state.join(indexState); - } - return state; - } - } - - private static class DeletingParallelIteratorFactory extends MutatingParallelIteratorFactory { - private QueryPlan queryPlan; - private List otherTableRefs; - private TableRef projectedTableRef; - - private DeletingParallelIteratorFactory(PhoenixConnection connection) { - super(connection); - } - - @Override - protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, PhoenixConnection connection) throws SQLException { - PhoenixStatement statement = new PhoenixStatement(connection); - /* - * We don't want to collect any read metrics within the child context. This is because any read metrics that - * need to be captured are already getting collected in the parent statement context enclosed in the result - * iterator being used for reading rows out. - */ - StatementContext context = new StatementContext(statement, false); - MutationState state = deleteRows(context, iterator, queryPlan, projectedTableRef, otherTableRefs); - return state; - } - - public void setQueryPlan(QueryPlan queryPlan) { - this.queryPlan = queryPlan; - } - - public void setOtherTableRefs(List otherTableRefs) { - this.otherTableRefs = otherTableRefs; - } - - public void setProjectedTableRef(TableRef projectedTableRef) { - this.projectedTableRef = projectedTableRef; - } + private DeletingParallelIteratorFactory(PhoenixConnection connection) { + super(connection); } - - private List getClientSideMaintainedIndexes(TableRef tableRef) { - PTable table = tableRef.getTable(); - if (!table.getIndexes().isEmpty()) { - List nonDisabledIndexes = Lists.newArrayListWithExpectedSize(table.getIndexes().size()); - for (PTable index : table.getIndexes()) { - if (!index.getIndexState().isDisabled() && isMaintainedOnClient(index)) { - nonDisabledIndexes.add(index); - } - } - return nonDisabledIndexes; - } - return Collections.emptyList(); - } - - /** - * Implementation of MutationPlan that is selected if - * 1) the query is strictly point lookup, and - * 2) the query has no LIMIT clause. - */ - public class MultiRowDeleteMutationPlan implements MutationPlan { - private final List plans; - private final MutationPlan firstPlan; - private final QueryPlan dataPlan; - - public MultiRowDeleteMutationPlan(QueryPlan dataPlan, @NonNull List plans) { - Preconditions.checkArgument(!plans.isEmpty()); - this.plans = plans; - this.firstPlan = plans.get(0); - this.dataPlan = dataPlan; - } - - @Override - public StatementContext getContext() { - return firstPlan.getContext(); - } - @Override - public ParameterMetaData getParameterMetaData() { - return firstPlan.getParameterMetaData(); - } + @Override + protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, + PhoenixConnection connection) throws SQLException { + PhoenixStatement statement = new PhoenixStatement(connection); + /* + * We don't want to collect any read metrics within the child context. This is because any + * read metrics that need to be captured are already getting collected in the parent statement + * context enclosed in the result iterator being used for reading rows out. + */ + StatementContext context = new StatementContext(statement, false); + MutationState state = + deleteRows(context, iterator, queryPlan, projectedTableRef, otherTableRefs); + return state; + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return firstPlan.getExplainPlan(); - } + public void setQueryPlan(QueryPlan queryPlan) { + this.queryPlan = queryPlan; + } - @Override - public MutationState execute() throws SQLException { - MutationState state = firstPlan.execute(); - statement.getConnection().getMutationState().join(state); - for (MutationPlan plan : plans.subList(1, plans.size())) { - statement.getConnection().getMutationState().join(plan.execute()); - } - return state; - } + public void setOtherTableRefs(List otherTableRefs) { + this.otherTableRefs = otherTableRefs; + } - @Override - public TableRef getTargetRef() { - return firstPlan.getTargetRef(); - } + public void setProjectedTableRef(TableRef projectedTableRef) { + this.projectedTableRef = projectedTableRef; + } + } + + private List getClientSideMaintainedIndexes(TableRef tableRef) { + PTable table = tableRef.getTable(); + if (!table.getIndexes().isEmpty()) { + List nonDisabledIndexes = + Lists.newArrayListWithExpectedSize(table.getIndexes().size()); + for (PTable index : table.getIndexes()) { + if (!index.getIndexState().isDisabled() && isMaintainedOnClient(index)) { + nonDisabledIndexes.add(index); + } + } + return nonDisabledIndexes; + } + return Collections.emptyList(); + } + + /** + * Implementation of MutationPlan that is selected if 1) the query is strictly point lookup, and + * 2) the query has no LIMIT clause. + */ + public class MultiRowDeleteMutationPlan implements MutationPlan { + private final List plans; + private final MutationPlan firstPlan; + private final QueryPlan dataPlan; + + public MultiRowDeleteMutationPlan(QueryPlan dataPlan, @NonNull List plans) { + Preconditions.checkArgument(!plans.isEmpty()); + this.plans = plans; + this.firstPlan = plans.get(0); + this.dataPlan = dataPlan; + } - @Override - public Set getSourceRefs() { - return firstPlan.getSourceRefs(); - } + @Override + public StatementContext getContext() { + return firstPlan.getContext(); + } - @Override - public Operation getOperation() { - return operation; - } - - @Override - public Long getEstimatedRowsToScan() throws SQLException { - Long estRows = null; - for (MutationPlan plan : plans) { - /* - * If any of the plan doesn't have estimate information available, then we cannot - * provide estimate for the overall plan. - */ - if (plan.getEstimatedRowsToScan() == null) { - return null; - } - estRows = add(estRows, plan.getEstimatedRowsToScan()); - } - return estRows; - } + @Override + public ParameterMetaData getParameterMetaData() { + return firstPlan.getParameterMetaData(); + } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - Long estBytes = null; - for (MutationPlan plan : plans) { - /* - * If any of the plan doesn't have estimate information available, then we cannot - * provide estimate for the overall plan. - */ - if (plan.getEstimatedBytesToScan() == null) { - return null; - } - estBytes = add(estBytes, plan.getEstimatedBytesToScan()); - } - return estBytes; - } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return firstPlan.getExplainPlan(); + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - Long estInfoTimestamp = Long.MAX_VALUE; - for (MutationPlan plan : plans) { - Long timestamp = plan.getEstimateInfoTimestamp(); - /* - * If any of the plan doesn't have estimate information available, then we cannot - * provide estimate for the overall plan. - */ - if (timestamp == null) { - return timestamp; - } - estInfoTimestamp = Math.min(estInfoTimestamp, timestamp); - } - return estInfoTimestamp; - } + @Override + public MutationState execute() throws SQLException { + MutationState state = firstPlan.execute(); + statement.getConnection().getMutationState().join(state); + for (MutationPlan plan : plans.subList(1, plans.size())) { + statement.getConnection().getMutationState().join(plan.execute()); + } + return state; + } - @Override - public QueryPlan getQueryPlan() { - return dataPlan; - } + @Override + public TableRef getTargetRef() { + return firstPlan.getTargetRef(); } - public MutationPlan compile(DeleteStatement delete) throws SQLException { - final PhoenixConnection connection = statement.getConnection(); - final boolean isAutoCommit = connection.getAutoCommit(); - final boolean hasPostProcessing = delete.getLimit() != null; - final ConnectionQueryServices services = connection.getQueryServices(); - List queryPlans; - boolean allowServerMutations = - services.getProps().getBoolean(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS, - QueryServicesOptions.DEFAULT_ENABLE_SERVER_SIDE_DELETE_MUTATIONS); - NamedTableNode tableNode = delete.getTable(); - String tableName = tableNode.getName().getTableName(); - String schemaName = tableNode.getName().getSchemaName(); - SelectStatement select = null; - ColumnResolver resolverToBe = null; - DeletingParallelIteratorFactory parallelIteratorFactoryToBe; - resolverToBe = FromCompiler.getResolverForMutation(delete, connection); - final TableRef targetTableRef = resolverToBe.getTables().get(0); - PTable table = targetTableRef.getTable(); - // Cannot update: - // - read-only VIEW - // - transactional table with a connection having an SCN - // TODO: SchemaUtil.isReadOnly(PTable, connection)? - if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) { - throw new ReadOnlyTableException(schemaName,tableName); - } - else if (table.isTransactional() && connection.getSCN() != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } - - List clientSideIndexes = getClientSideMaintainedIndexes(targetTableRef); - final boolean hasClientSideIndexes = !clientSideIndexes.isEmpty(); - - boolean isSalted = table.getBucketNum() != null; - boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant(); - boolean isSharedViewIndex = table.getViewIndexId() != null; - int pkColumnOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); - final int pkColumnCount = table.getPKColumns().size() - pkColumnOffset; - int selectColumnCount = pkColumnCount; - for (PTable index : clientSideIndexes) { - selectColumnCount += index.getPKColumns().size() - pkColumnCount; - } - Set projectedColumns = new LinkedHashSet(selectColumnCount + pkColumnOffset); - List aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount); - for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) { - PColumn column = table.getPKColumns().get(i); - projectedColumns.add(column); - } - for (int i = pkColumnOffset; i < table.getPKColumns().size(); i++) { - PColumn column = table.getPKColumns().get(i); - projectedColumns.add(column); - aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null))); - } - // Project all non PK indexed columns so that we can do the proper index maintenance on the indexes for which - // mutations are generated on the client side. Indexed columns are needed to identify index rows to be deleted - for (PTable index : table.getIndexes()) { - if (isMaintainedOnClient(index)) { - IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); - // Go through maintainer as it handles functional indexes correctly - for (Pair columnInfo : maintainer.getIndexedColumnInfo()) { - String familyName = columnInfo.getFirst(); - if (familyName != null) { - String columnName = columnInfo.getSecond(); - boolean hasNoColumnFamilies = table.getColumnFamilies().isEmpty(); - PColumn column = hasNoColumnFamilies ? table.getColumnForColumnName(columnName) : table.getColumnFamily(familyName).getPColumnForColumnName(columnName); - if (!projectedColumns.contains(column)) { - projectedColumns.add(column); - aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), '"' + columnName + '"', null))); - } - } - } - } - } - select = FACTORY.select(delete.getTable(), delete.getHint(), false, aliasedNodes, delete.getWhere(), - Collections. emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, - delete.getBindCount(), false, false, Collections. emptyList(), - delete.getUdfParseNodes()); - select = StatementNormalizer.normalize(select, resolverToBe); - - SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection); - boolean hasPreProcessing = transformedSelect != select; - if (transformedSelect != select) { - resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName()); - select = StatementNormalizer.normalize(transformedSelect, resolverToBe); - } - final boolean hasPreOrPostProcessing = hasPreProcessing || hasPostProcessing; - boolean noQueryReqd = !hasPreOrPostProcessing; - // No limit and no sub queries, joins, etc in where clause - // Can't run on same server for transactional data, as we need the row keys for the data - // that is being upserted for conflict detection purposes. - // If we have immutable indexes, we'd increase the number of bytes scanned by executing - // separate queries against each index, so better to drive from a single table in that case. - boolean runOnServer = isAutoCommit && !hasPreOrPostProcessing && !table.isTransactional() && !hasClientSideIndexes && allowServerMutations; - HintNode hint = delete.getHint(); - if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) { - select = SelectStatement.create(select, HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE)); - } - - parallelIteratorFactoryToBe = hasPreOrPostProcessing ? null : new DeletingParallelIteratorFactory(connection); - QueryOptimizer optimizer = new QueryOptimizer(services); - QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.emptyList(), parallelIteratorFactoryToBe, new SequenceManager(statement)); - final QueryPlan dataPlan = compiler.compile(); - // TODO: the select clause should know that there's a sub query, but doesn't seem to currently - queryPlans = Lists.newArrayList(!clientSideIndexes.isEmpty() - ? optimizer.getApplicablePlans(dataPlan, statement, select, resolverToBe, Collections.emptyList(), parallelIteratorFactoryToBe) - : optimizer.getBestPlan(dataPlan, statement, select, resolverToBe, Collections.emptyList(), parallelIteratorFactoryToBe)); - - runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != PTableType.INDEX; - - // We need to have all indexed columns available in all immutable indexes in order - // to generate the delete markers from the query. We also cannot have any filters - // except for our SkipScanFilter for point lookups. - // A simple check of the non existence of a where clause in the parse node is not sufficient, as the where clause - // may have been optimized out. Instead, we check that there's a single SkipScanFilter - // If we can generate a plan for every index, that means all the required columns are available in every index, - // hence we can drive the delete from any of the plans. - noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size(); - int queryPlanIndex = 0; - while (noQueryReqd && queryPlanIndex < queryPlans.size()) { - QueryPlan plan = queryPlans.get(queryPlanIndex++); - StatementContext context = plan.getContext(); - noQueryReqd &= (!context.getScan().hasFilter() - || context.getScan().getFilter() instanceof SkipScanFilter) - && context.getScanRanges().isPointLookup(); - } + @Override + public Set getSourceRefs() { + return firstPlan.getSourceRefs(); + } - final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); - final long maxSizeBytes = services.getProps() - .getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); - - // If we're doing a query for a set of rows with no where clause, then we don't need to contact the server at all. - if (noQueryReqd) { - // Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows - // from the data table, while the others will be for deleting rows from immutable indexes. - List mutationPlans = Lists.newArrayListWithExpectedSize(queryPlans.size()); - for (final QueryPlan plan : queryPlans) { - mutationPlans.add(new SingleRowDeleteMutationPlan(plan, connection, maxSize, maxSizeBytes)); - } - return new MultiRowDeleteMutationPlan(dataPlan, mutationPlans); - } else if (runOnServer) { - // TODO: better abstraction - final StatementContext context = dataPlan.getContext(); - Scan scan = context.getScan(); - scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_AGG, QueryConstants.TRUE); - - // Build an ungrouped aggregate query: select COUNT(*) from where - // The coprocessor will delete each row returned from the scan - // Ignoring ORDER BY, since with auto commit on and no limit makes no difference - SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint()); - RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY); - context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY); - if (dataPlan.getProjector().projectEveryRow()) { - projectorToBe = new RowProjector(projectorToBe,true); - } - final RowProjector projector = projectorToBe; - final QueryPlan aggPlan = new AggregatePlan(context, select, dataPlan.getTableRef(), projector, null, null, - OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, dataPlan); - return new ServerSelectDeleteMutationPlan(dataPlan, connection, aggPlan, projector, maxSize, maxSizeBytes); - } else { - final DeletingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe; - List adjustedProjectedColumns = Lists.newArrayListWithExpectedSize(projectedColumns.size()); - final int offset = table.getBucketNum() == null ? 0 : 1; - Iterator projectedColsItr = projectedColumns.iterator(); - int i = 0; - while (projectedColsItr.hasNext()) { - final int position = i++; - adjustedProjectedColumns.add(new DelegateColumn(projectedColsItr.next()) { - @Override - public int getPosition() { - return position + offset; - } - }); - } - PTable projectedTable = PTableImpl.builderWithColumns(table, adjustedProjectedColumns) - .setType(PTableType.PROJECTED) - .build(); - final TableRef projectedTableRef = new TableRef(projectedTable, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp()); - - QueryPlan bestPlanToBe = dataPlan; - for (QueryPlan plan : queryPlans) { - PTable planTable = plan.getTableRef().getTable(); - if (planTable.getIndexState() != PIndexState.BUILDING) { - bestPlanToBe = plan; - break; - } - } - final QueryPlan bestPlan = bestPlanToBe; - final ListotherTableRefs = Lists.newArrayListWithExpectedSize(clientSideIndexes.size()); - for (PTable index : clientSideIndexes) { - if (!bestPlan.getTableRef().getTable().equals(index)) { - otherTableRefs.add(new TableRef(index, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp())); - } - } - - if (!bestPlan.getTableRef().getTable().equals(targetTableRef.getTable())) { - otherTableRefs.add(projectedTableRef); - } - return new ClientSelectDeleteMutationPlan(targetTableRef, dataPlan, bestPlan, hasPreOrPostProcessing, - parallelIteratorFactory, otherTableRefs, projectedTableRef, maxSize, maxSizeBytes, connection); - } + @Override + public Operation getOperation() { + return operation; } - /** - * Implementation of MutationPlan for composing a MultiRowDeleteMutationPlan. - */ - private class SingleRowDeleteMutationPlan implements MutationPlan { + @Override + public Long getEstimatedRowsToScan() throws SQLException { + Long estRows = null; + for (MutationPlan plan : plans) { + /* + * If any of the plan doesn't have estimate information available, then we cannot provide + * estimate for the overall plan. + */ + if (plan.getEstimatedRowsToScan() == null) { + return null; + } + estRows = add(estRows, plan.getEstimatedRowsToScan()); + } + return estRows; + } - private final QueryPlan dataPlan; - private final PhoenixConnection connection; - private final int maxSize; - private final StatementContext context; - private final long maxSizeBytes; + @Override + public Long getEstimatedBytesToScan() throws SQLException { + Long estBytes = null; + for (MutationPlan plan : plans) { + /* + * If any of the plan doesn't have estimate information available, then we cannot provide + * estimate for the overall plan. + */ + if (plan.getEstimatedBytesToScan() == null) { + return null; + } + estBytes = add(estBytes, plan.getEstimatedBytesToScan()); + } + return estBytes; + } - public SingleRowDeleteMutationPlan(QueryPlan dataPlan, PhoenixConnection connection, int maxSize, long maxSizeBytes) { - this.dataPlan = dataPlan; - this.connection = connection; - this.maxSize = maxSize; - this.context = dataPlan.getContext(); - this.maxSizeBytes = maxSizeBytes; - } + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + Long estInfoTimestamp = Long.MAX_VALUE; + for (MutationPlan plan : plans) { + Long timestamp = plan.getEstimateInfoTimestamp(); + /* + * If any of the plan doesn't have estimate information available, then we cannot provide + * estimate for the overall plan. + */ + if (timestamp == null) { + return timestamp; + } + estInfoTimestamp = Math.min(estInfoTimestamp, timestamp); + } + return estInfoTimestamp; + } - @Override - public ParameterMetaData getParameterMetaData() { - return context.getBindManager().getParameterMetaData(); - } + @Override + public QueryPlan getQueryPlan() { + return dataPlan; + } + } + + public MutationPlan compile(DeleteStatement delete) throws SQLException { + final PhoenixConnection connection = statement.getConnection(); + final boolean isAutoCommit = connection.getAutoCommit(); + final boolean hasPostProcessing = delete.getLimit() != null; + final ConnectionQueryServices services = connection.getQueryServices(); + List queryPlans; + boolean allowServerMutations = + services.getProps().getBoolean(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS, + QueryServicesOptions.DEFAULT_ENABLE_SERVER_SIDE_DELETE_MUTATIONS); + NamedTableNode tableNode = delete.getTable(); + String tableName = tableNode.getName().getTableName(); + String schemaName = tableNode.getName().getSchemaName(); + SelectStatement select = null; + ColumnResolver resolverToBe = null; + DeletingParallelIteratorFactory parallelIteratorFactoryToBe; + resolverToBe = FromCompiler.getResolverForMutation(delete, connection); + final TableRef targetTableRef = resolverToBe.getTables().get(0); + PTable table = targetTableRef.getTable(); + // Cannot update: + // - read-only VIEW + // - transactional table with a connection having an SCN + // TODO: SchemaUtil.isReadOnly(PTable, connection)? + if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) { + throw new ReadOnlyTableException(schemaName, tableName); + } else if (table.isTransactional() && connection.getSCN() != null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } - @Override - public MutationState execute() throws SQLException { - // We have a point lookup, so we know we have a simple set of fully qualified - // keys for our ranges - ScanRanges ranges = context.getScanRanges(); - Iterator iterator = ranges.getPointLookupKeyIterator(); - MultiRowMutationState mutation = new MultiRowMutationState(ranges.getPointLookupCount()); - while (iterator.hasNext()) { - mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), - new RowMutationState(PRow.DELETE_MARKER, 0, - statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null)); + List clientSideIndexes = getClientSideMaintainedIndexes(targetTableRef); + final boolean hasClientSideIndexes = !clientSideIndexes.isEmpty(); + + boolean isSalted = table.getBucketNum() != null; + boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant(); + boolean isSharedViewIndex = table.getViewIndexId() != null; + int pkColumnOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); + final int pkColumnCount = table.getPKColumns().size() - pkColumnOffset; + int selectColumnCount = pkColumnCount; + for (PTable index : clientSideIndexes) { + selectColumnCount += index.getPKColumns().size() - pkColumnCount; + } + Set projectedColumns = new LinkedHashSet(selectColumnCount + pkColumnOffset); + List aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount); + for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) { + PColumn column = table.getPKColumns().get(i); + projectedColumns.add(column); + } + for (int i = pkColumnOffset; i < table.getPKColumns().size(); i++) { + PColumn column = table.getPKColumns().get(i); + projectedColumns.add(column); + aliasedNodes.add(FACTORY.aliasedNode(null, + FACTORY.column(null, '"' + column.getName().getString() + '"', null))); + } + // Project all non PK indexed columns so that we can do the proper index maintenance on the + // indexes for which + // mutations are generated on the client side. Indexed columns are needed to identify index rows + // to be deleted + for (PTable index : table.getIndexes()) { + if (isMaintainedOnClient(index)) { + IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); + // Go through maintainer as it handles functional indexes correctly + for (Pair columnInfo : maintainer.getIndexedColumnInfo()) { + String familyName = columnInfo.getFirst(); + if (familyName != null) { + String columnName = columnInfo.getSecond(); + boolean hasNoColumnFamilies = table.getColumnFamilies().isEmpty(); + PColumn column = hasNoColumnFamilies + ? table.getColumnForColumnName(columnName) + : table.getColumnFamily(familyName).getPColumnForColumnName(columnName); + if (!projectedColumns.contains(column)) { + projectedColumns.add(column); + aliasedNodes.add(FACTORY.aliasedNode(null, + FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), + '"' + columnName + '"', null))); } - return new MutationState(dataPlan.getTableRef(), mutation, 0, maxSize, maxSizeBytes, connection); + } } + } + } + select = FACTORY.select(delete.getTable(), delete.getHint(), false, aliasedNodes, + delete.getWhere(), Collections. emptyList(), null, delete.getOrderBy(), + delete.getLimit(), null, delete.getBindCount(), false, false, + Collections. emptyList(), delete.getUdfParseNodes()); + select = StatementNormalizer.normalize(select, resolverToBe); + + SelectStatement transformedSelect = + SubqueryRewriter.transform(select, resolverToBe, connection); + boolean hasPreProcessing = transformedSelect != select; + if (transformedSelect != select) { + resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, + delete.getTable().getName()); + select = StatementNormalizer.normalize(transformedSelect, resolverToBe); + } + final boolean hasPreOrPostProcessing = hasPreProcessing || hasPostProcessing; + boolean noQueryReqd = !hasPreOrPostProcessing; + // No limit and no sub queries, joins, etc in where clause + // Can't run on same server for transactional data, as we need the row keys for the data + // that is being upserted for conflict detection purposes. + // If we have immutable indexes, we'd increase the number of bytes scanned by executing + // separate queries against each index, so better to drive from a single table in that case. + boolean runOnServer = isAutoCommit && !hasPreOrPostProcessing && !table.isTransactional() + && !hasClientSideIndexes && allowServerMutations; + HintNode hint = delete.getHint(); + if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) { + select = + SelectStatement.create(select, HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE)); + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW")); - } + parallelIteratorFactoryToBe = + hasPreOrPostProcessing ? null : new DeletingParallelIteratorFactory(connection); + QueryOptimizer optimizer = new QueryOptimizer(services); + QueryCompiler compiler = + new QueryCompiler(statement, select, resolverToBe, Collections. emptyList(), + parallelIteratorFactoryToBe, new SequenceManager(statement)); + final QueryPlan dataPlan = compiler.compile(); + // TODO: the select clause should know that there's a sub query, but doesn't seem to currently + queryPlans = Lists.newArrayList(!clientSideIndexes.isEmpty() + ? optimizer.getApplicablePlans(dataPlan, statement, select, resolverToBe, + Collections. emptyList(), parallelIteratorFactoryToBe) + : optimizer.getBestPlan(dataPlan, statement, select, resolverToBe, + Collections. emptyList(), parallelIteratorFactoryToBe)); + + runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != PTableType.INDEX; + + // We need to have all indexed columns available in all immutable indexes in order + // to generate the delete markers from the query. We also cannot have any filters + // except for our SkipScanFilter for point lookups. + // A simple check of the non existence of a where clause in the parse node is not sufficient, as + // the where clause + // may have been optimized out. Instead, we check that there's a single SkipScanFilter + // If we can generate a plan for every index, that means all the required columns are available + // in every index, + // hence we can drive the delete from any of the plans. + noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size(); + int queryPlanIndex = 0; + while (noQueryReqd && queryPlanIndex < queryPlans.size()) { + QueryPlan plan = queryPlans.get(queryPlanIndex++); + StatementContext context = plan.getContext(); + noQueryReqd &= + (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) + && context.getScanRanges().isPointLookup(); + } - @Override - public QueryPlan getQueryPlan() { - return dataPlan; - } + final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); + final long maxSizeBytes = + services.getProps().getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); + + // If we're doing a query for a set of rows with no where clause, then we don't need to contact + // the server at all. + if (noQueryReqd) { + // Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows + // from the data table, while the others will be for deleting rows from immutable indexes. + List mutationPlans = Lists.newArrayListWithExpectedSize(queryPlans.size()); + for (final QueryPlan plan : queryPlans) { + mutationPlans.add(new SingleRowDeleteMutationPlan(plan, connection, maxSize, maxSizeBytes)); + } + return new MultiRowDeleteMutationPlan(dataPlan, mutationPlans); + } else if (runOnServer) { + // TODO: better abstraction + final StatementContext context = dataPlan.getContext(); + Scan scan = context.getScan(); + scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_AGG, QueryConstants.TRUE); + + // Build an ungrouped aggregate query: select COUNT(*) from
where + // The coprocessor will delete each row returned from the scan + // Ignoring ORDER BY, since with auto commit on and no limit makes no difference + SelectStatement aggSelect = + SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint()); + RowProjector projectorToBe = + ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY); + context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY); + if (dataPlan.getProjector().projectEveryRow()) { + projectorToBe = new RowProjector(projectorToBe, true); + } + final RowProjector projector = projectorToBe; + final QueryPlan aggPlan = + new AggregatePlan(context, select, dataPlan.getTableRef(), projector, null, null, + OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, dataPlan); + return new ServerSelectDeleteMutationPlan(dataPlan, connection, aggPlan, projector, maxSize, + maxSizeBytes); + } else { + final DeletingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe; + List adjustedProjectedColumns = + Lists.newArrayListWithExpectedSize(projectedColumns.size()); + final int offset = table.getBucketNum() == null ? 0 : 1; + Iterator projectedColsItr = projectedColumns.iterator(); + int i = 0; + while (projectedColsItr.hasNext()) { + final int position = i++; + adjustedProjectedColumns.add(new DelegateColumn(projectedColsItr.next()) { + @Override + public int getPosition() { + return position + offset; + } + }); + } + PTable projectedTable = PTableImpl.builderWithColumns(table, adjustedProjectedColumns) + .setType(PTableType.PROJECTED).build(); + final TableRef projectedTableRef = new TableRef(projectedTable, + targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp()); + + QueryPlan bestPlanToBe = dataPlan; + for (QueryPlan plan : queryPlans) { + PTable planTable = plan.getTableRef().getTable(); + if (planTable.getIndexState() != PIndexState.BUILDING) { + bestPlanToBe = plan; + break; + } + } + final QueryPlan bestPlan = bestPlanToBe; + final List otherTableRefs = + Lists.newArrayListWithExpectedSize(clientSideIndexes.size()); + for (PTable index : clientSideIndexes) { + if (!bestPlan.getTableRef().getTable().equals(index)) { + otherTableRefs.add(new TableRef(index, targetTableRef.getLowerBoundTimeStamp(), + targetTableRef.getTimeStamp())); + } + } + + if (!bestPlan.getTableRef().getTable().equals(targetTableRef.getTable())) { + otherTableRefs.add(projectedTableRef); + } + return new ClientSelectDeleteMutationPlan(targetTableRef, dataPlan, bestPlan, + hasPreOrPostProcessing, parallelIteratorFactory, otherTableRefs, projectedTableRef, maxSize, + maxSizeBytes, connection); + } + } + + /** + * Implementation of MutationPlan for composing a MultiRowDeleteMutationPlan. + */ + private class SingleRowDeleteMutationPlan implements MutationPlan { + + private final QueryPlan dataPlan; + private final PhoenixConnection connection; + private final int maxSize; + private final StatementContext context; + private final long maxSizeBytes; + + public SingleRowDeleteMutationPlan(QueryPlan dataPlan, PhoenixConnection connection, + int maxSize, long maxSizeBytes) { + this.dataPlan = dataPlan; + this.connection = connection; + this.maxSize = maxSize; + this.context = dataPlan.getContext(); + this.maxSizeBytes = maxSizeBytes; + } - @Override - public StatementContext getContext() { - return context; - } + @Override + public ParameterMetaData getParameterMetaData() { + return context.getBindManager().getParameterMetaData(); + } - @Override - public TableRef getTargetRef() { - return dataPlan.getTableRef(); - } + @Override + public MutationState execute() throws SQLException { + // We have a point lookup, so we know we have a simple set of fully qualified + // keys for our ranges + ScanRanges ranges = context.getScanRanges(); + Iterator iterator = ranges.getPointLookupKeyIterator(); + MultiRowMutationState mutation = new MultiRowMutationState(ranges.getPointLookupCount()); + while (iterator.hasNext()) { + mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), + new RowMutationState(PRow.DELETE_MARKER, 0, + statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, + null)); + } + return new MutationState(dataPlan.getTableRef(), mutation, 0, maxSize, maxSizeBytes, + connection); + } - @Override - public Set getSourceRefs() { - // Don't include the target - return Collections.emptySet(); - } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW")); + } - @Override - public Operation getOperation() { - return operation; - } + @Override + public QueryPlan getQueryPlan() { + return dataPlan; + } - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return 0l; - } + @Override + public StatementContext getContext() { + return context; + } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return 0l; - } + @Override + public TableRef getTargetRef() { + return dataPlan.getTableRef(); + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return 0l; - } + @Override + public Set getSourceRefs() { + // Don't include the target + return Collections.emptySet(); } - /** - * Implementation of MutationPlan that is selected if - * 1) there is no immutable index presented for the table, - * 2) auto commit is enabled as well as server side delete mutations are enabled, - * 3) the table is not transactional, - * 4) the query has no LIMIT clause, and - * 5) the query has WHERE clause and is not strictly point lookup. - */ - public class ServerSelectDeleteMutationPlan implements MutationPlan { - private final StatementContext context; - private final QueryPlan dataPlan; - private final PhoenixConnection connection; - private final QueryPlan aggPlan; - private final RowProjector projector; - private final int maxSize; - private final long maxSizeBytes; - - public ServerSelectDeleteMutationPlan(QueryPlan dataPlan, PhoenixConnection connection, QueryPlan aggPlan, - RowProjector projector, int maxSize, long maxSizeBytes) { - this.context = dataPlan.getContext(); - this.dataPlan = dataPlan; - this.connection = connection; - this.aggPlan = aggPlan; - this.projector = projector; - this.maxSize = maxSize; - this.maxSizeBytes = maxSizeBytes; - } + @Override + public Operation getOperation() { + return operation; + } - @Override - public ParameterMetaData getParameterMetaData() { - return context.getBindManager().getParameterMetaData(); - } + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return 0l; + } - @Override - public StatementContext getContext() { - return context; - } + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return 0l; + } - @Override - public TableRef getTargetRef() { - return dataPlan.getTableRef(); - } + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return 0l; + } + } + + /** + * Implementation of MutationPlan that is selected if 1) there is no immutable index presented for + * the table, 2) auto commit is enabled as well as server side delete mutations are enabled, 3) + * the table is not transactional, 4) the query has no LIMIT clause, and 5) the query has WHERE + * clause and is not strictly point lookup. + */ + public class ServerSelectDeleteMutationPlan implements MutationPlan { + private final StatementContext context; + private final QueryPlan dataPlan; + private final PhoenixConnection connection; + private final QueryPlan aggPlan; + private final RowProjector projector; + private final int maxSize; + private final long maxSizeBytes; + + public ServerSelectDeleteMutationPlan(QueryPlan dataPlan, PhoenixConnection connection, + QueryPlan aggPlan, RowProjector projector, int maxSize, long maxSizeBytes) { + this.context = dataPlan.getContext(); + this.dataPlan = dataPlan; + this.connection = connection; + this.aggPlan = aggPlan; + this.projector = projector; + this.maxSize = maxSize; + this.maxSizeBytes = maxSizeBytes; + } - @Override - public Set getSourceRefs() { - return dataPlan.getSourceRefs(); - } + @Override + public ParameterMetaData getParameterMetaData() { + return context.getBindManager().getParameterMetaData(); + } - @Override - public Operation getOperation() { - return operation; - } + @Override + public StatementContext getContext() { + return context; + } - @Override - public MutationState execute() throws SQLException { - // TODO: share this block of code with UPSERT SELECT - ImmutableBytesWritable ptr = context.getTempPtr(); - PTable table = dataPlan.getTableRef().getTable(); - table.getIndexMaintainers(ptr, context.getConnection()); - ScanUtil.annotateScanWithMetadataAttributes(table, context.getScan()); - byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY; - ServerCache cache = null; - try { - if (ptr.getLength() > 0) { - byte[] uuidValue = ServerCacheClient.generateId(); - context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); - context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get()); - context.getScan().setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); - ScanUtil.setClientVersion(context.getScan(), MetaDataProtocol.PHOENIX_VERSION); - String sourceOfDelete = statement.getConnection().getSourceOfOperation(); - if (sourceOfDelete != null) { - context.getScan().setAttribute(QueryServices.SOURCE_OPERATION_ATTRIB, - Bytes.toBytes(sourceOfDelete)); - } - } - ResultIterator iterator = aggPlan.iterator(); - try { - Tuple row = iterator.next(); - final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr); - return new MutationState(maxSize, maxSizeBytes, connection) { - @Override - public long getUpdateCount() { - return mutationCount; - } - }; - } finally { - iterator.close(); - } - } finally { - if (cache != null) { - cache.close(); - } - } - } + @Override + public TableRef getTargetRef() { + return dataPlan.getTableRef(); + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - ExplainPlan explainPlan = aggPlan.getExplainPlan(); - List queryPlanSteps = explainPlan.getPlanSteps(); - ExplainPlanAttributes explainPlanAttributes = - explainPlan.getPlanStepsAsAttributes(); - List planSteps = - Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); - ExplainPlanAttributesBuilder newBuilder = - new ExplainPlanAttributesBuilder(explainPlanAttributes); - newBuilder.setAbstractExplainPlan("DELETE ROWS SERVER SELECT"); - planSteps.add("DELETE ROWS SERVER SELECT"); - planSteps.addAll(queryPlanSteps); - return new ExplainPlan(planSteps, newBuilder.build()); - } + @Override + public Set getSourceRefs() { + return dataPlan.getSourceRefs(); + } - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return aggPlan.getEstimatedRowsToScan(); - } + @Override + public Operation getOperation() { + return operation; + } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return aggPlan.getEstimatedBytesToScan(); + @Override + public MutationState execute() throws SQLException { + // TODO: share this block of code with UPSERT SELECT + ImmutableBytesWritable ptr = context.getTempPtr(); + PTable table = dataPlan.getTableRef().getTable(); + table.getIndexMaintainers(ptr, context.getConnection()); + ScanUtil.annotateScanWithMetadataAttributes(table, context.getScan()); + byte[] txState = table.isTransactional() + ? connection.getMutationState().encodeTransaction() + : ByteUtil.EMPTY_BYTE_ARRAY; + ServerCache cache = null; + try { + if (ptr.getLength() > 0) { + byte[] uuidValue = ServerCacheClient.generateId(); + context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); + context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get()); + context.getScan().setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); + ScanUtil.setClientVersion(context.getScan(), MetaDataProtocol.PHOENIX_VERSION); + String sourceOfDelete = statement.getConnection().getSourceOfOperation(); + if (sourceOfDelete != null) { + context.getScan().setAttribute(QueryServices.SOURCE_OPERATION_ATTRIB, + Bytes.toBytes(sourceOfDelete)); + } + } + ResultIterator iterator = aggPlan.iterator(); + try { + Tuple row = iterator.next(); + final long mutationCount = + (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr); + return new MutationState(maxSize, maxSizeBytes, connection) { + @Override + public long getUpdateCount() { + return mutationCount; + } + }; + } finally { + iterator.close(); } - - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return aggPlan.getEstimateInfoTimestamp(); + } finally { + if (cache != null) { + cache.close(); } + } + } - @Override - public QueryPlan getQueryPlan() { - return aggPlan; - } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + ExplainPlan explainPlan = aggPlan.getExplainPlan(); + List queryPlanSteps = explainPlan.getPlanSteps(); + ExplainPlanAttributes explainPlanAttributes = explainPlan.getPlanStepsAsAttributes(); + List planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); + ExplainPlanAttributesBuilder newBuilder = + new ExplainPlanAttributesBuilder(explainPlanAttributes); + newBuilder.setAbstractExplainPlan("DELETE ROWS SERVER SELECT"); + planSteps.add("DELETE ROWS SERVER SELECT"); + planSteps.addAll(queryPlanSteps); + return new ExplainPlan(planSteps, newBuilder.build()); } - /** - * Implementation of MutationPlan that is selected if the query doesn't match the criteria of - * ServerSelectDeleteMutationPlan. - */ - public class ClientSelectDeleteMutationPlan implements MutationPlan { - private final StatementContext context; - private final TableRef targetTableRef; - private final QueryPlan dataPlan; - private final QueryPlan bestPlan; - private final boolean hasPreOrPostProcessing; - private final DeletingParallelIteratorFactory parallelIteratorFactory; - private final List otherTableRefs; - private final TableRef projectedTableRef; - private final int maxSize; - private final long maxSizeBytes; - private final PhoenixConnection connection; - - public ClientSelectDeleteMutationPlan(TableRef targetTableRef, QueryPlan dataPlan, QueryPlan bestPlan, - boolean hasPreOrPostProcessing, - DeletingParallelIteratorFactory parallelIteratorFactory, - List otherTableRefs, TableRef projectedTableRef, int maxSize, - long maxSizeBytes, PhoenixConnection connection) { - this.context = bestPlan.getContext(); - this.targetTableRef = targetTableRef; - this.dataPlan = dataPlan; - this.bestPlan = bestPlan; - this.hasPreOrPostProcessing = hasPreOrPostProcessing; - this.parallelIteratorFactory = parallelIteratorFactory; - this.otherTableRefs = otherTableRefs; - this.projectedTableRef = projectedTableRef; - this.maxSize = maxSize; - this.maxSizeBytes = maxSizeBytes; - this.connection = connection; - } + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return aggPlan.getEstimatedRowsToScan(); + } - @Override - public ParameterMetaData getParameterMetaData() { - return context.getBindManager().getParameterMetaData(); - } + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return aggPlan.getEstimatedBytesToScan(); + } - @Override - public StatementContext getContext() { - return context; - } + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return aggPlan.getEstimateInfoTimestamp(); + } - @Override - public TableRef getTargetRef() { - return targetTableRef; - } + @Override + public QueryPlan getQueryPlan() { + return aggPlan; + } + } + + /** + * Implementation of MutationPlan that is selected if the query doesn't match the criteria of + * ServerSelectDeleteMutationPlan. + */ + public class ClientSelectDeleteMutationPlan implements MutationPlan { + private final StatementContext context; + private final TableRef targetTableRef; + private final QueryPlan dataPlan; + private final QueryPlan bestPlan; + private final boolean hasPreOrPostProcessing; + private final DeletingParallelIteratorFactory parallelIteratorFactory; + private final List otherTableRefs; + private final TableRef projectedTableRef; + private final int maxSize; + private final long maxSizeBytes; + private final PhoenixConnection connection; + + public ClientSelectDeleteMutationPlan(TableRef targetTableRef, QueryPlan dataPlan, + QueryPlan bestPlan, boolean hasPreOrPostProcessing, + DeletingParallelIteratorFactory parallelIteratorFactory, List otherTableRefs, + TableRef projectedTableRef, int maxSize, long maxSizeBytes, PhoenixConnection connection) { + this.context = bestPlan.getContext(); + this.targetTableRef = targetTableRef; + this.dataPlan = dataPlan; + this.bestPlan = bestPlan; + this.hasPreOrPostProcessing = hasPreOrPostProcessing; + this.parallelIteratorFactory = parallelIteratorFactory; + this.otherTableRefs = otherTableRefs; + this.projectedTableRef = projectedTableRef; + this.maxSize = maxSize; + this.maxSizeBytes = maxSizeBytes; + this.connection = connection; + } - @Override - public Set getSourceRefs() { - return dataPlan.getSourceRefs(); - } + @Override + public ParameterMetaData getParameterMetaData() { + return context.getBindManager().getParameterMetaData(); + } - @Override - public Operation getOperation() { - return operation; - } + @Override + public StatementContext getContext() { + return context; + } - @Override - public MutationState execute() throws SQLException { - ResultIterator iterator = bestPlan.iterator(); - try { - // If we're not doing any pre or post processing, we can produce the delete mutations directly - // in the parallel threads executed for the scan - if (!hasPreOrPostProcessing) { - Tuple tuple; - long totalRowCount = 0; - if (parallelIteratorFactory != null) { - parallelIteratorFactory.setQueryPlan(bestPlan); - parallelIteratorFactory.setOtherTableRefs(otherTableRefs); - parallelIteratorFactory.setProjectedTableRef(projectedTableRef); - } - while ((tuple=iterator.next()) != null) {// Runs query - Cell kv = tuple.getValue(0); - totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault()); - } - // Return total number of rows that have been deleted from the table. In the case of auto commit being off - // the mutations will all be in the mutation state of the current connection. - MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount); - - // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed. - state.setReadMetricQueue(context.getReadMetricsQueue()); - - return state; - } else { - // Otherwise, we have to execute the query and produce the delete mutations in the single thread - // producing the query results. - return deleteRows(context, iterator, bestPlan, projectedTableRef, otherTableRefs); - } - } finally { - iterator.close(); - } - } + @Override + public TableRef getTargetRef() { + return targetTableRef; + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - ExplainPlan explainPlan = bestPlan.getExplainPlan(); - List queryPlanSteps = explainPlan.getPlanSteps(); - ExplainPlanAttributes explainPlanAttributes = - explainPlan.getPlanStepsAsAttributes(); - List planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1); - ExplainPlanAttributesBuilder newBuilder = - new ExplainPlanAttributesBuilder(explainPlanAttributes); - newBuilder.setAbstractExplainPlan("DELETE ROWS CLIENT SELECT"); - planSteps.add("DELETE ROWS CLIENT SELECT"); - planSteps.addAll(queryPlanSteps); - return new ExplainPlan(planSteps, newBuilder.build()); - } + @Override + public Set getSourceRefs() { + return dataPlan.getSourceRefs(); + } - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return bestPlan.getEstimatedRowsToScan(); - } + @Override + public Operation getOperation() { + return operation; + } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return bestPlan.getEstimatedBytesToScan(); - } + @Override + public MutationState execute() throws SQLException { + ResultIterator iterator = bestPlan.iterator(); + try { + // If we're not doing any pre or post processing, we can produce the delete mutations + // directly + // in the parallel threads executed for the scan + if (!hasPreOrPostProcessing) { + Tuple tuple; + long totalRowCount = 0; + if (parallelIteratorFactory != null) { + parallelIteratorFactory.setQueryPlan(bestPlan); + parallelIteratorFactory.setOtherTableRefs(otherTableRefs); + parallelIteratorFactory.setProjectedTableRef(projectedTableRef); + } + while ((tuple = iterator.next()) != null) {// Runs query + Cell kv = tuple.getValue(0); + totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), + kv.getValueOffset(), SortOrder.getDefault()); + } + // Return total number of rows that have been deleted from the table. In the case of auto + // commit being off + // the mutations will all be in the mutation state of the current connection. + MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount); + + // set the read metrics accumulated in the parent context so that it can be published when + // the mutations are committed. + state.setReadMetricQueue(context.getReadMetricsQueue()); + + return state; + } else { + // Otherwise, we have to execute the query and produce the delete mutations in the single + // thread + // producing the query results. + return deleteRows(context, iterator, bestPlan, projectedTableRef, otherTableRefs); + } + } finally { + iterator.close(); + } + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return bestPlan.getEstimateInfoTimestamp(); - } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + ExplainPlan explainPlan = bestPlan.getExplainPlan(); + List queryPlanSteps = explainPlan.getPlanSteps(); + ExplainPlanAttributes explainPlanAttributes = explainPlan.getPlanStepsAsAttributes(); + List planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); + ExplainPlanAttributesBuilder newBuilder = + new ExplainPlanAttributesBuilder(explainPlanAttributes); + newBuilder.setAbstractExplainPlan("DELETE ROWS CLIENT SELECT"); + planSteps.add("DELETE ROWS CLIENT SELECT"); + planSteps.addAll(queryPlanSteps); + return new ExplainPlan(planSteps, newBuilder.build()); + } - @Override - public QueryPlan getQueryPlan() { - return bestPlan; - } + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return bestPlan.getEstimatedRowsToScan(); + } + + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return bestPlan.getEstimatedBytesToScan(); } - - private static boolean isMaintainedOnClient(PTable table) { - // Test for not being local (rather than being GLOBAL) so that this doesn't fail - // when tested with our projected table. - return (table.getIndexType() != IndexType.LOCAL && (table.isTransactional() || table.isImmutableRows())) || - (table.getIndexType() == IndexType.LOCAL && (table.isTransactional() && - table.getTransactionProvider().getTransactionProvider().isUnsupported(Feature.MAINTAIN_LOCAL_INDEX_ON_SERVER) ) ); + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return bestPlan.getEstimateInfoTimestamp(); } - + + @Override + public QueryPlan getQueryPlan() { + return bestPlan; + } + } + + private static boolean isMaintainedOnClient(PTable table) { + // Test for not being local (rather than being GLOBAL) so that this doesn't fail + // when tested with our projected table. + return (table.getIndexType() != IndexType.LOCAL + && (table.isTransactional() || table.isImmutableRows())) + || (table.getIndexType() == IndexType.LOCAL + && (table.isTransactional() && table.getTransactionProvider().getTransactionProvider() + .isUnsupported(Feature.MAINTAIN_LOCAL_INDEX_ON_SERVER))); + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DropSequenceCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DropSequenceCompiler.java index 2785dc87f9a..882ebcc3aba 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DropSequenceCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/DropSequenceCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,33 +27,31 @@ import org.apache.phoenix.parse.DropSequenceStatement; import org.apache.phoenix.schema.MetaDataClient; - public class DropSequenceCompiler { - private final PhoenixStatement statement; - private final Operation operation; - - public DropSequenceCompiler(PhoenixStatement statement, Operation operation) { - this.statement = statement; - this.operation = operation; - } - - - public MutationPlan compile(final DropSequenceStatement sequence) throws SQLException { - final PhoenixConnection connection = statement.getConnection(); - final MetaDataClient client = new MetaDataClient(connection); - final StatementContext context = new StatementContext(statement); - return new BaseMutationPlan(context, operation) { - - @Override - public MutationState execute() throws SQLException { - return client.dropSequence(sequence); - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("DROP SEQUENCE")); - } - - }; - } -} \ No newline at end of file + private final PhoenixStatement statement; + private final Operation operation; + + public DropSequenceCompiler(PhoenixStatement statement, Operation operation) { + this.statement = statement; + this.operation = operation; + } + + public MutationPlan compile(final DropSequenceStatement sequence) throws SQLException { + final PhoenixConnection connection = statement.getConnection(); + final MetaDataClient client = new MetaDataClient(connection); + final StatementContext context = new StatementContext(statement); + return new BaseMutationPlan(context, operation) { + + @Override + public MutationState execute() throws SQLException { + return client.dropSequence(sequence); + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("DROP SEQUENCE")); + } + + }; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlan.java index 10ad1518f1b..0d05068a459 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,38 +23,36 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; public class ExplainPlan { - public static final ExplainPlan EMPTY_PLAN = new ExplainPlan(Collections.emptyList()); - - private final List planSteps; - private final ExplainPlanAttributes planStepsAsAttributes; - - public ExplainPlan(List planSteps) { - this.planSteps = ImmutableList.copyOf(planSteps); - this.planStepsAsAttributes = - ExplainPlanAttributes.getDefaultExplainPlan(); - } - - public ExplainPlan(List planSteps, - ExplainPlanAttributes planStepsAsAttributes) { - this.planSteps = planSteps; - this.planStepsAsAttributes = planStepsAsAttributes; - } - - public List getPlanSteps() { - return planSteps; - } - - public ExplainPlanAttributes getPlanStepsAsAttributes() { - return planStepsAsAttributes; - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder(); - for (String step : planSteps) { - buf.append(step); - buf.append('\n'); - } - return buf.toString(); + public static final ExplainPlan EMPTY_PLAN = new ExplainPlan(Collections. emptyList()); + + private final List planSteps; + private final ExplainPlanAttributes planStepsAsAttributes; + + public ExplainPlan(List planSteps) { + this.planSteps = ImmutableList.copyOf(planSteps); + this.planStepsAsAttributes = ExplainPlanAttributes.getDefaultExplainPlan(); + } + + public ExplainPlan(List planSteps, ExplainPlanAttributes planStepsAsAttributes) { + this.planSteps = planSteps; + this.planStepsAsAttributes = planStepsAsAttributes; + } + + public List getPlanSteps() { + return planSteps; + } + + public ExplainPlanAttributes getPlanStepsAsAttributes() { + return planStepsAsAttributes; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + for (String step : planSteps) { + buf.append(step); + buf.append('\n'); } + return buf.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlanAttributes.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlanAttributes.java index d6c4d0398a0..7e559db0eaa 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlanAttributes.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExplainPlanAttributes.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compile; import java.util.List; @@ -28,623 +27,567 @@ import org.apache.phoenix.schema.PColumn; /** - * ExplainPlan attributes that contain individual attributes of ExplainPlan - * that we can assert against. This also makes attribute retrieval easier - * as an API rather than retrieving list of Strings containing entire plan. + * ExplainPlan attributes that contain individual attributes of ExplainPlan that we can assert + * against. This also makes attribute retrieval easier as an API rather than retrieving list of + * Strings containing entire plan. */ public class ExplainPlanAttributes { - private final String abstractExplainPlan; - private final Integer splitsChunk; - private final Long estimatedRows; - private final Long estimatedSizeInBytes; - private final String iteratorTypeAndScanSize; - private final Double samplingRate; - private final boolean useRoundRobinIterator; - private final String hexStringRVCOffset; - private final Consistency consistency; - private final Hint hint; - private final String serverSortedBy; - private final String explainScanType; - private final String tableName; - private final String keyRanges; - private final Long scanTimeRangeMin; - private final Long scanTimeRangeMax; - private final String serverWhereFilter; - private final String serverDistinctFilter; - private final Integer serverOffset; - private final Long serverRowLimit; - private final boolean serverArrayElementProjection; - private final String serverAggregate; - private final String clientFilterBy; - private final String clientAggregate; - private final String clientSortedBy; - private final String clientAfterAggregate; - private final String clientDistinctFilter; - private final Integer clientOffset; - private final Integer clientRowLimit; - private final Integer clientSequenceCount; - private final String clientCursorName; - private final String clientSortAlgo; - // This object represents PlanAttributes object for rhs query - // to be used only by Join queries. In case of Join query, lhs plan is - // represented by 'this' object and rhs plan is represented by - // 'rhsJoinQueryExplainPlan' object (which in turn should - // have null rhsJoinQueryExplainPlan) - // For non-Join queries related Plans, rhsJoinQueryExplainPlan will always - // be null - private final ExplainPlanAttributes rhsJoinQueryExplainPlan; - private final Set serverMergeColumns; - private final List regionLocations; - private final int numRegionLocationLookups; - - private static final ExplainPlanAttributes EXPLAIN_PLAN_INSTANCE = - new ExplainPlanAttributes(); - - private ExplainPlanAttributes() { - this.abstractExplainPlan = null; - this.splitsChunk = null; - this.estimatedRows = null; - this.estimatedSizeInBytes = null; - this.iteratorTypeAndScanSize = null; - this.samplingRate = null; - this.useRoundRobinIterator = false; - this.hexStringRVCOffset = null; - this.consistency = null; - this.hint = null; - this.serverSortedBy = null; - this.explainScanType = null; - this.tableName = null; - this.keyRanges = null; - this.scanTimeRangeMin = null; - this.scanTimeRangeMax = null; - this.serverWhereFilter = null; - this.serverDistinctFilter = null; - this.serverOffset = null; - this.serverRowLimit = null; - this.serverArrayElementProjection = false; - this.serverAggregate = null; - this.clientFilterBy = null; - this.clientAggregate = null; - this.clientSortedBy = null; - this.clientAfterAggregate = null; - this.clientDistinctFilter = null; - this.clientOffset = null; - this.clientRowLimit = null; - this.clientSequenceCount = null; - this.clientCursorName = null; - this.clientSortAlgo = null; - this.rhsJoinQueryExplainPlan = null; - this.serverMergeColumns = null; - this.regionLocations = null; - this.numRegionLocationLookups = 0; - } - - public ExplainPlanAttributes(String abstractExplainPlan, - Integer splitsChunk, Long estimatedRows, Long estimatedSizeInBytes, - String iteratorTypeAndScanSize, Double samplingRate, - boolean useRoundRobinIterator, - String hexStringRVCOffset, Consistency consistency, - Hint hint, String serverSortedBy, String explainScanType, - String tableName, String keyRanges, Long scanTimeRangeMin, - Long scanTimeRangeMax, String serverWhereFilter, - String serverDistinctFilter, - Integer serverOffset, Long serverRowLimit, - boolean serverArrayElementProjection, String serverAggregate, - String clientFilterBy, String clientAggregate, - String clientSortedBy, - String clientAfterAggregate, String clientDistinctFilter, - Integer clientOffset, Integer clientRowLimit, - Integer clientSequenceCount, String clientCursorName, - String clientSortAlgo, - ExplainPlanAttributes rhsJoinQueryExplainPlan, Set serverMergeColumns, - List regionLocations, int numRegionLocationLookups) { - this.abstractExplainPlan = abstractExplainPlan; - this.splitsChunk = splitsChunk; - this.estimatedRows = estimatedRows; - this.estimatedSizeInBytes = estimatedSizeInBytes; - this.iteratorTypeAndScanSize = iteratorTypeAndScanSize; - this.samplingRate = samplingRate; - this.useRoundRobinIterator = useRoundRobinIterator; - this.hexStringRVCOffset = hexStringRVCOffset; - this.consistency = consistency; - this.hint = hint; - this.serverSortedBy = serverSortedBy; - this.explainScanType = explainScanType; - this.tableName = tableName; - this.keyRanges = keyRanges; - this.scanTimeRangeMin = scanTimeRangeMin; - this.scanTimeRangeMax = scanTimeRangeMax; - this.serverWhereFilter = serverWhereFilter; - this.serverDistinctFilter = serverDistinctFilter; - this.serverOffset = serverOffset; - this.serverRowLimit = serverRowLimit; - this.serverArrayElementProjection = serverArrayElementProjection; - this.serverAggregate = serverAggregate; - this.clientFilterBy = clientFilterBy; - this.clientAggregate = clientAggregate; - this.clientSortedBy = clientSortedBy; - this.clientAfterAggregate = clientAfterAggregate; - this.clientDistinctFilter = clientDistinctFilter; - this.clientOffset = clientOffset; - this.clientRowLimit = clientRowLimit; - this.clientSequenceCount = clientSequenceCount; - this.clientCursorName = clientCursorName; - this.clientSortAlgo = clientSortAlgo; - this.rhsJoinQueryExplainPlan = rhsJoinQueryExplainPlan; - this.serverMergeColumns = serverMergeColumns; - this.regionLocations = regionLocations; - this.numRegionLocationLookups = numRegionLocationLookups; - } - - public String getAbstractExplainPlan() { - return abstractExplainPlan; + private final String abstractExplainPlan; + private final Integer splitsChunk; + private final Long estimatedRows; + private final Long estimatedSizeInBytes; + private final String iteratorTypeAndScanSize; + private final Double samplingRate; + private final boolean useRoundRobinIterator; + private final String hexStringRVCOffset; + private final Consistency consistency; + private final Hint hint; + private final String serverSortedBy; + private final String explainScanType; + private final String tableName; + private final String keyRanges; + private final Long scanTimeRangeMin; + private final Long scanTimeRangeMax; + private final String serverWhereFilter; + private final String serverDistinctFilter; + private final Integer serverOffset; + private final Long serverRowLimit; + private final boolean serverArrayElementProjection; + private final String serverAggregate; + private final String clientFilterBy; + private final String clientAggregate; + private final String clientSortedBy; + private final String clientAfterAggregate; + private final String clientDistinctFilter; + private final Integer clientOffset; + private final Integer clientRowLimit; + private final Integer clientSequenceCount; + private final String clientCursorName; + private final String clientSortAlgo; + // This object represents PlanAttributes object for rhs query + // to be used only by Join queries. In case of Join query, lhs plan is + // represented by 'this' object and rhs plan is represented by + // 'rhsJoinQueryExplainPlan' object (which in turn should + // have null rhsJoinQueryExplainPlan) + // For non-Join queries related Plans, rhsJoinQueryExplainPlan will always + // be null + private final ExplainPlanAttributes rhsJoinQueryExplainPlan; + private final Set serverMergeColumns; + private final List regionLocations; + private final int numRegionLocationLookups; + + private static final ExplainPlanAttributes EXPLAIN_PLAN_INSTANCE = new ExplainPlanAttributes(); + + private ExplainPlanAttributes() { + this.abstractExplainPlan = null; + this.splitsChunk = null; + this.estimatedRows = null; + this.estimatedSizeInBytes = null; + this.iteratorTypeAndScanSize = null; + this.samplingRate = null; + this.useRoundRobinIterator = false; + this.hexStringRVCOffset = null; + this.consistency = null; + this.hint = null; + this.serverSortedBy = null; + this.explainScanType = null; + this.tableName = null; + this.keyRanges = null; + this.scanTimeRangeMin = null; + this.scanTimeRangeMax = null; + this.serverWhereFilter = null; + this.serverDistinctFilter = null; + this.serverOffset = null; + this.serverRowLimit = null; + this.serverArrayElementProjection = false; + this.serverAggregate = null; + this.clientFilterBy = null; + this.clientAggregate = null; + this.clientSortedBy = null; + this.clientAfterAggregate = null; + this.clientDistinctFilter = null; + this.clientOffset = null; + this.clientRowLimit = null; + this.clientSequenceCount = null; + this.clientCursorName = null; + this.clientSortAlgo = null; + this.rhsJoinQueryExplainPlan = null; + this.serverMergeColumns = null; + this.regionLocations = null; + this.numRegionLocationLookups = 0; + } + + public ExplainPlanAttributes(String abstractExplainPlan, Integer splitsChunk, Long estimatedRows, + Long estimatedSizeInBytes, String iteratorTypeAndScanSize, Double samplingRate, + boolean useRoundRobinIterator, String hexStringRVCOffset, Consistency consistency, Hint hint, + String serverSortedBy, String explainScanType, String tableName, String keyRanges, + Long scanTimeRangeMin, Long scanTimeRangeMax, String serverWhereFilter, + String serverDistinctFilter, Integer serverOffset, Long serverRowLimit, + boolean serverArrayElementProjection, String serverAggregate, String clientFilterBy, + String clientAggregate, String clientSortedBy, String clientAfterAggregate, + String clientDistinctFilter, Integer clientOffset, Integer clientRowLimit, + Integer clientSequenceCount, String clientCursorName, String clientSortAlgo, + ExplainPlanAttributes rhsJoinQueryExplainPlan, Set serverMergeColumns, + List regionLocations, int numRegionLocationLookups) { + this.abstractExplainPlan = abstractExplainPlan; + this.splitsChunk = splitsChunk; + this.estimatedRows = estimatedRows; + this.estimatedSizeInBytes = estimatedSizeInBytes; + this.iteratorTypeAndScanSize = iteratorTypeAndScanSize; + this.samplingRate = samplingRate; + this.useRoundRobinIterator = useRoundRobinIterator; + this.hexStringRVCOffset = hexStringRVCOffset; + this.consistency = consistency; + this.hint = hint; + this.serverSortedBy = serverSortedBy; + this.explainScanType = explainScanType; + this.tableName = tableName; + this.keyRanges = keyRanges; + this.scanTimeRangeMin = scanTimeRangeMin; + this.scanTimeRangeMax = scanTimeRangeMax; + this.serverWhereFilter = serverWhereFilter; + this.serverDistinctFilter = serverDistinctFilter; + this.serverOffset = serverOffset; + this.serverRowLimit = serverRowLimit; + this.serverArrayElementProjection = serverArrayElementProjection; + this.serverAggregate = serverAggregate; + this.clientFilterBy = clientFilterBy; + this.clientAggregate = clientAggregate; + this.clientSortedBy = clientSortedBy; + this.clientAfterAggregate = clientAfterAggregate; + this.clientDistinctFilter = clientDistinctFilter; + this.clientOffset = clientOffset; + this.clientRowLimit = clientRowLimit; + this.clientSequenceCount = clientSequenceCount; + this.clientCursorName = clientCursorName; + this.clientSortAlgo = clientSortAlgo; + this.rhsJoinQueryExplainPlan = rhsJoinQueryExplainPlan; + this.serverMergeColumns = serverMergeColumns; + this.regionLocations = regionLocations; + this.numRegionLocationLookups = numRegionLocationLookups; + } + + public String getAbstractExplainPlan() { + return abstractExplainPlan; + } + + public Integer getSplitsChunk() { + return splitsChunk; + } + + public Long getEstimatedRows() { + return estimatedRows; + } + + public Long getEstimatedSizeInBytes() { + return estimatedSizeInBytes; + } + + public String getIteratorTypeAndScanSize() { + return iteratorTypeAndScanSize; + } + + public Double getSamplingRate() { + return samplingRate; + } + + public boolean isUseRoundRobinIterator() { + return useRoundRobinIterator; + } + + public String getHexStringRVCOffset() { + return hexStringRVCOffset; + } + + public Consistency getConsistency() { + return consistency; + } + + public Hint getHint() { + return hint; + } + + public String getServerSortedBy() { + return serverSortedBy; + } + + public String getExplainScanType() { + return explainScanType; + } + + public String getTableName() { + return tableName; + } + + public String getKeyRanges() { + return keyRanges; + } + + public Long getScanTimeRangeMin() { + return scanTimeRangeMin; + } + + public Long getScanTimeRangeMax() { + return scanTimeRangeMax; + } + + public String getServerWhereFilter() { + return serverWhereFilter; + } + + public String getServerDistinctFilter() { + return serverDistinctFilter; + } + + public Integer getServerOffset() { + return serverOffset; + } + + public Long getServerRowLimit() { + return serverRowLimit; + } + + public boolean isServerArrayElementProjection() { + return serverArrayElementProjection; + } + + public String getServerAggregate() { + return serverAggregate; + } + + public String getClientFilterBy() { + return clientFilterBy; + } + + public String getClientAggregate() { + return clientAggregate; + } + + public String getClientSortedBy() { + return clientSortedBy; + } + + public String getClientAfterAggregate() { + return clientAfterAggregate; + } + + public String getClientDistinctFilter() { + return clientDistinctFilter; + } + + public Integer getClientOffset() { + return clientOffset; + } + + public Integer getClientRowLimit() { + return clientRowLimit; + } + + public Integer getClientSequenceCount() { + return clientSequenceCount; + } + + public String getClientCursorName() { + return clientCursorName; + } + + public String getClientSortAlgo() { + return clientSortAlgo; + } + + public ExplainPlanAttributes getRhsJoinQueryExplainPlan() { + return rhsJoinQueryExplainPlan; + } + + public Set getServerMergeColumns() { + return serverMergeColumns; + } + + public List getRegionLocations() { + return regionLocations; + } + + public int getNumRegionLocationLookups() { + return numRegionLocationLookups; + } + + public static ExplainPlanAttributes getDefaultExplainPlan() { + return EXPLAIN_PLAN_INSTANCE; + } + + public static class ExplainPlanAttributesBuilder { + private String abstractExplainPlan; + private Integer splitsChunk; + private Long estimatedRows; + private Long estimatedSizeInBytes; + private String iteratorTypeAndScanSize; + private Double samplingRate; + private boolean useRoundRobinIterator; + private String hexStringRVCOffset; + private Consistency consistency; + private HintNode.Hint hint; + private String serverSortedBy; + private String explainScanType; + private String tableName; + private String keyRanges; + private Long scanTimeRangeMin; + private Long scanTimeRangeMax; + private String serverWhereFilter; + private String serverDistinctFilter; + private Integer serverOffset; + private Long serverRowLimit; + private boolean serverArrayElementProjection; + private String serverAggregate; + private String clientFilterBy; + private String clientAggregate; + private String clientSortedBy; + private String clientAfterAggregate; + private String clientDistinctFilter; + private Integer clientOffset; + private Integer clientRowLimit; + private Integer clientSequenceCount; + private String clientCursorName; + private String clientSortAlgo; + private ExplainPlanAttributes rhsJoinQueryExplainPlan; + private Set serverMergeColumns; + private List regionLocations; + private int numRegionLocationLookups; + + public ExplainPlanAttributesBuilder() { + // default + } + + public ExplainPlanAttributesBuilder(ExplainPlanAttributes explainPlanAttributes) { + this.abstractExplainPlan = explainPlanAttributes.getAbstractExplainPlan(); + this.splitsChunk = explainPlanAttributes.getSplitsChunk(); + this.estimatedRows = explainPlanAttributes.getEstimatedRows(); + this.estimatedSizeInBytes = explainPlanAttributes.getEstimatedSizeInBytes(); + this.iteratorTypeAndScanSize = explainPlanAttributes.getIteratorTypeAndScanSize(); + this.samplingRate = explainPlanAttributes.getSamplingRate(); + this.useRoundRobinIterator = explainPlanAttributes.isUseRoundRobinIterator(); + this.hexStringRVCOffset = explainPlanAttributes.getHexStringRVCOffset(); + this.consistency = explainPlanAttributes.getConsistency(); + this.hint = explainPlanAttributes.getHint(); + this.serverSortedBy = explainPlanAttributes.getServerSortedBy(); + this.explainScanType = explainPlanAttributes.getExplainScanType(); + this.tableName = explainPlanAttributes.getTableName(); + this.keyRanges = explainPlanAttributes.getKeyRanges(); + this.scanTimeRangeMin = explainPlanAttributes.getScanTimeRangeMin(); + this.scanTimeRangeMax = explainPlanAttributes.getScanTimeRangeMax(); + this.serverWhereFilter = explainPlanAttributes.getServerWhereFilter(); + this.serverDistinctFilter = explainPlanAttributes.getServerDistinctFilter(); + this.serverOffset = explainPlanAttributes.getServerOffset(); + this.serverRowLimit = explainPlanAttributes.getServerRowLimit(); + this.serverArrayElementProjection = explainPlanAttributes.isServerArrayElementProjection(); + this.serverAggregate = explainPlanAttributes.getServerAggregate(); + this.clientFilterBy = explainPlanAttributes.getClientFilterBy(); + this.clientAggregate = explainPlanAttributes.getClientAggregate(); + this.clientSortedBy = explainPlanAttributes.getClientSortedBy(); + this.clientAfterAggregate = explainPlanAttributes.getClientAfterAggregate(); + this.clientDistinctFilter = explainPlanAttributes.getClientDistinctFilter(); + this.clientOffset = explainPlanAttributes.getClientOffset(); + this.clientRowLimit = explainPlanAttributes.getClientRowLimit(); + this.clientSequenceCount = explainPlanAttributes.getClientSequenceCount(); + this.clientCursorName = explainPlanAttributes.getClientCursorName(); + this.clientSortAlgo = explainPlanAttributes.getClientSortAlgo(); + this.rhsJoinQueryExplainPlan = explainPlanAttributes.getRhsJoinQueryExplainPlan(); + this.serverMergeColumns = explainPlanAttributes.getServerMergeColumns(); + this.regionLocations = explainPlanAttributes.getRegionLocations(); + this.numRegionLocationLookups = explainPlanAttributes.getNumRegionLocationLookups(); } - public Integer getSplitsChunk() { - return splitsChunk; - } + public ExplainPlanAttributesBuilder setAbstractExplainPlan(String abstractExplainPlan) { + this.abstractExplainPlan = abstractExplainPlan; + return this; + } - public Long getEstimatedRows() { - return estimatedRows; + public ExplainPlanAttributesBuilder setSplitsChunk(Integer splitsChunk) { + this.splitsChunk = splitsChunk; + return this; } - public Long getEstimatedSizeInBytes() { - return estimatedSizeInBytes; + public ExplainPlanAttributesBuilder setEstimatedRows(Long estimatedRows) { + this.estimatedRows = estimatedRows; + return this; } - public String getIteratorTypeAndScanSize() { - return iteratorTypeAndScanSize; + public ExplainPlanAttributesBuilder setEstimatedSizeInBytes(Long estimatedSizeInBytes) { + this.estimatedSizeInBytes = estimatedSizeInBytes; + return this; } - public Double getSamplingRate() { - return samplingRate; + public ExplainPlanAttributesBuilder setIteratorTypeAndScanSize(String iteratorTypeAndScanSize) { + this.iteratorTypeAndScanSize = iteratorTypeAndScanSize; + return this; } - public boolean isUseRoundRobinIterator() { - return useRoundRobinIterator; + public ExplainPlanAttributesBuilder setSamplingRate(Double samplingRate) { + this.samplingRate = samplingRate; + return this; } - public String getHexStringRVCOffset() { - return hexStringRVCOffset; + public ExplainPlanAttributesBuilder setUseRoundRobinIterator(boolean useRoundRobinIterator) { + this.useRoundRobinIterator = useRoundRobinIterator; + return this; } - public Consistency getConsistency() { - return consistency; + public ExplainPlanAttributesBuilder setHexStringRVCOffset(String hexStringRVCOffset) { + this.hexStringRVCOffset = hexStringRVCOffset; + return this; } - public Hint getHint() { - return hint; + public ExplainPlanAttributesBuilder setConsistency(Consistency consistency) { + this.consistency = consistency; + return this; } - public String getServerSortedBy() { - return serverSortedBy; + public ExplainPlanAttributesBuilder setHint(HintNode.Hint hint) { + this.hint = hint; + return this; } - public String getExplainScanType() { - return explainScanType; + public ExplainPlanAttributesBuilder setServerSortedBy(String serverSortedBy) { + this.serverSortedBy = serverSortedBy; + return this; } - public String getTableName() { - return tableName; + public ExplainPlanAttributesBuilder setExplainScanType(String explainScanType) { + this.explainScanType = explainScanType; + return this; } - public String getKeyRanges() { - return keyRanges; + public ExplainPlanAttributesBuilder setTableName(String tableName) { + this.tableName = tableName; + return this; } - public Long getScanTimeRangeMin() { - return scanTimeRangeMin; + public ExplainPlanAttributesBuilder setKeyRanges(String keyRanges) { + this.keyRanges = keyRanges; + return this; } - public Long getScanTimeRangeMax() { - return scanTimeRangeMax; + public ExplainPlanAttributesBuilder setScanTimeRangeMin(Long scanTimeRangeMin) { + this.scanTimeRangeMin = scanTimeRangeMin; + return this; } - public String getServerWhereFilter() { - return serverWhereFilter; + public ExplainPlanAttributesBuilder setScanTimeRangeMax(Long scanTimeRangeMax) { + this.scanTimeRangeMax = scanTimeRangeMax; + return this; } - public String getServerDistinctFilter() { - return serverDistinctFilter; + public ExplainPlanAttributesBuilder setServerWhereFilter(String serverWhereFilter) { + this.serverWhereFilter = serverWhereFilter; + return this; } - public Integer getServerOffset() { - return serverOffset; + public ExplainPlanAttributesBuilder setServerDistinctFilter(String serverDistinctFilter) { + this.serverDistinctFilter = serverDistinctFilter; + return this; } - public Long getServerRowLimit() { - return serverRowLimit; + public ExplainPlanAttributesBuilder setServerOffset(Integer serverOffset) { + this.serverOffset = serverOffset; + return this; } - public boolean isServerArrayElementProjection() { - return serverArrayElementProjection; + public ExplainPlanAttributesBuilder setServerRowLimit(Long serverRowLimit) { + this.serverRowLimit = serverRowLimit; + return this; } - public String getServerAggregate() { - return serverAggregate; + public ExplainPlanAttributesBuilder + setServerArrayElementProjection(boolean serverArrayElementProjection) { + this.serverArrayElementProjection = serverArrayElementProjection; + return this; } - public String getClientFilterBy() { - return clientFilterBy; + public ExplainPlanAttributesBuilder setServerAggregate(String serverAggregate) { + this.serverAggregate = serverAggregate; + return this; } - public String getClientAggregate() { - return clientAggregate; + public ExplainPlanAttributesBuilder setClientFilterBy(String clientFilterBy) { + this.clientFilterBy = clientFilterBy; + return this; } - public String getClientSortedBy() { - return clientSortedBy; + public ExplainPlanAttributesBuilder setClientAggregate(String clientAggregate) { + this.clientAggregate = clientAggregate; + return this; } - public String getClientAfterAggregate() { - return clientAfterAggregate; + public ExplainPlanAttributesBuilder setClientSortedBy(String clientSortedBy) { + this.clientSortedBy = clientSortedBy; + return this; } - public String getClientDistinctFilter() { - return clientDistinctFilter; + public ExplainPlanAttributesBuilder setClientAfterAggregate(String clientAfterAggregate) { + this.clientAfterAggregate = clientAfterAggregate; + return this; } - public Integer getClientOffset() { - return clientOffset; + public ExplainPlanAttributesBuilder setClientDistinctFilter(String clientDistinctFilter) { + this.clientDistinctFilter = clientDistinctFilter; + return this; } - public Integer getClientRowLimit() { - return clientRowLimit; + public ExplainPlanAttributesBuilder setClientOffset(Integer clientOffset) { + this.clientOffset = clientOffset; + return this; } - public Integer getClientSequenceCount() { - return clientSequenceCount; + public ExplainPlanAttributesBuilder setClientRowLimit(Integer clientRowLimit) { + this.clientRowLimit = clientRowLimit; + return this; } - public String getClientCursorName() { - return clientCursorName; + public ExplainPlanAttributesBuilder setClientSequenceCount(Integer clientSequenceCount) { + this.clientSequenceCount = clientSequenceCount; + return this; } - public String getClientSortAlgo() { - return clientSortAlgo; + public ExplainPlanAttributesBuilder setClientCursorName(String clientCursorName) { + this.clientCursorName = clientCursorName; + return this; } - public ExplainPlanAttributes getRhsJoinQueryExplainPlan() { - return rhsJoinQueryExplainPlan; + public ExplainPlanAttributesBuilder setClientSortAlgo(String clientSortAlgo) { + this.clientSortAlgo = clientSortAlgo; + return this; } - public Set getServerMergeColumns() { - return serverMergeColumns; + public ExplainPlanAttributesBuilder + setRhsJoinQueryExplainPlan(ExplainPlanAttributes rhsJoinQueryExplainPlan) { + this.rhsJoinQueryExplainPlan = rhsJoinQueryExplainPlan; + return this; } - public List getRegionLocations() { - return regionLocations; + public ExplainPlanAttributesBuilder setServerMergeColumns(Set columns) { + this.serverMergeColumns = columns; + return this; } - public int getNumRegionLocationLookups() { - return numRegionLocationLookups; + public ExplainPlanAttributesBuilder setRegionLocations(List regionLocations) { + this.regionLocations = regionLocations; + return this; } - public static ExplainPlanAttributes getDefaultExplainPlan() { - return EXPLAIN_PLAN_INSTANCE; + public ExplainPlanAttributesBuilder setNumRegionLocationLookups(int numRegionLocationLookups) { + this.numRegionLocationLookups = numRegionLocationLookups; + return this; } - public static class ExplainPlanAttributesBuilder { - private String abstractExplainPlan; - private Integer splitsChunk; - private Long estimatedRows; - private Long estimatedSizeInBytes; - private String iteratorTypeAndScanSize; - private Double samplingRate; - private boolean useRoundRobinIterator; - private String hexStringRVCOffset; - private Consistency consistency; - private HintNode.Hint hint; - private String serverSortedBy; - private String explainScanType; - private String tableName; - private String keyRanges; - private Long scanTimeRangeMin; - private Long scanTimeRangeMax; - private String serverWhereFilter; - private String serverDistinctFilter; - private Integer serverOffset; - private Long serverRowLimit; - private boolean serverArrayElementProjection; - private String serverAggregate; - private String clientFilterBy; - private String clientAggregate; - private String clientSortedBy; - private String clientAfterAggregate; - private String clientDistinctFilter; - private Integer clientOffset; - private Integer clientRowLimit; - private Integer clientSequenceCount; - private String clientCursorName; - private String clientSortAlgo; - private ExplainPlanAttributes rhsJoinQueryExplainPlan; - private Set serverMergeColumns; - private List regionLocations; - private int numRegionLocationLookups; - - public ExplainPlanAttributesBuilder() { - // default - } - - public ExplainPlanAttributesBuilder( - ExplainPlanAttributes explainPlanAttributes) { - this.abstractExplainPlan = - explainPlanAttributes.getAbstractExplainPlan(); - this.splitsChunk = explainPlanAttributes.getSplitsChunk(); - this.estimatedRows = explainPlanAttributes.getEstimatedRows(); - this.estimatedSizeInBytes = - explainPlanAttributes.getEstimatedSizeInBytes(); - this.iteratorTypeAndScanSize = explainPlanAttributes.getIteratorTypeAndScanSize(); - this.samplingRate = explainPlanAttributes.getSamplingRate(); - this.useRoundRobinIterator = - explainPlanAttributes.isUseRoundRobinIterator(); - this.hexStringRVCOffset = - explainPlanAttributes.getHexStringRVCOffset(); - this.consistency = explainPlanAttributes.getConsistency(); - this.hint = explainPlanAttributes.getHint(); - this.serverSortedBy = explainPlanAttributes.getServerSortedBy(); - this.explainScanType = explainPlanAttributes.getExplainScanType(); - this.tableName = explainPlanAttributes.getTableName(); - this.keyRanges = explainPlanAttributes.getKeyRanges(); - this.scanTimeRangeMin = explainPlanAttributes.getScanTimeRangeMin(); - this.scanTimeRangeMax = explainPlanAttributes.getScanTimeRangeMax(); - this.serverWhereFilter = - explainPlanAttributes.getServerWhereFilter(); - this.serverDistinctFilter = - explainPlanAttributes.getServerDistinctFilter(); - this.serverOffset = explainPlanAttributes.getServerOffset(); - this.serverRowLimit = explainPlanAttributes.getServerRowLimit(); - this.serverArrayElementProjection = - explainPlanAttributes.isServerArrayElementProjection(); - this.serverAggregate = explainPlanAttributes.getServerAggregate(); - this.clientFilterBy = explainPlanAttributes.getClientFilterBy(); - this.clientAggregate = explainPlanAttributes.getClientAggregate(); - this.clientSortedBy = explainPlanAttributes.getClientSortedBy(); - this.clientAfterAggregate = - explainPlanAttributes.getClientAfterAggregate(); - this.clientDistinctFilter = - explainPlanAttributes.getClientDistinctFilter(); - this.clientOffset = explainPlanAttributes.getClientOffset(); - this.clientRowLimit = explainPlanAttributes.getClientRowLimit(); - this.clientSequenceCount = - explainPlanAttributes.getClientSequenceCount(); - this.clientCursorName = explainPlanAttributes.getClientCursorName(); - this.clientSortAlgo = explainPlanAttributes.getClientSortAlgo(); - this.rhsJoinQueryExplainPlan = - explainPlanAttributes.getRhsJoinQueryExplainPlan(); - this.serverMergeColumns = explainPlanAttributes.getServerMergeColumns(); - this.regionLocations = explainPlanAttributes.getRegionLocations(); - this.numRegionLocationLookups = explainPlanAttributes.getNumRegionLocationLookups(); - } - - public ExplainPlanAttributesBuilder setAbstractExplainPlan( - String abstractExplainPlan) { - this.abstractExplainPlan = abstractExplainPlan; - return this; - } - - public ExplainPlanAttributesBuilder setSplitsChunk( - Integer splitsChunk) { - this.splitsChunk = splitsChunk; - return this; - } - - public ExplainPlanAttributesBuilder setEstimatedRows( - Long estimatedRows) { - this.estimatedRows = estimatedRows; - return this; - } - - public ExplainPlanAttributesBuilder setEstimatedSizeInBytes( - Long estimatedSizeInBytes) { - this.estimatedSizeInBytes = estimatedSizeInBytes; - return this; - } - - public ExplainPlanAttributesBuilder setIteratorTypeAndScanSize( - String iteratorTypeAndScanSize) { - this.iteratorTypeAndScanSize = iteratorTypeAndScanSize; - return this; - } - - public ExplainPlanAttributesBuilder setSamplingRate( - Double samplingRate) { - this.samplingRate = samplingRate; - return this; - } - - public ExplainPlanAttributesBuilder setUseRoundRobinIterator( - boolean useRoundRobinIterator) { - this.useRoundRobinIterator = useRoundRobinIterator; - return this; - } - - public ExplainPlanAttributesBuilder setHexStringRVCOffset( - String hexStringRVCOffset) { - this.hexStringRVCOffset = hexStringRVCOffset; - return this; - } - - public ExplainPlanAttributesBuilder setConsistency( - Consistency consistency) { - this.consistency = consistency; - return this; - } - - public ExplainPlanAttributesBuilder setHint(HintNode.Hint hint) { - this.hint = hint; - return this; - } - - public ExplainPlanAttributesBuilder setServerSortedBy( - String serverSortedBy) { - this.serverSortedBy = serverSortedBy; - return this; - } - - public ExplainPlanAttributesBuilder setExplainScanType( - String explainScanType) { - this.explainScanType = explainScanType; - return this; - } - - public ExplainPlanAttributesBuilder setTableName(String tableName) { - this.tableName = tableName; - return this; - } - - public ExplainPlanAttributesBuilder setKeyRanges(String keyRanges) { - this.keyRanges = keyRanges; - return this; - } - - public ExplainPlanAttributesBuilder setScanTimeRangeMin( - Long scanTimeRangeMin) { - this.scanTimeRangeMin = scanTimeRangeMin; - return this; - } - - public ExplainPlanAttributesBuilder setScanTimeRangeMax( - Long scanTimeRangeMax) { - this.scanTimeRangeMax = scanTimeRangeMax; - return this; - } - - public ExplainPlanAttributesBuilder setServerWhereFilter( - String serverWhereFilter) { - this.serverWhereFilter = serverWhereFilter; - return this; - } - - public ExplainPlanAttributesBuilder setServerDistinctFilter( - String serverDistinctFilter) { - this.serverDistinctFilter = serverDistinctFilter; - return this; - } - - public ExplainPlanAttributesBuilder setServerOffset( - Integer serverOffset) { - this.serverOffset = serverOffset; - return this; - } - - public ExplainPlanAttributesBuilder setServerRowLimit( - Long serverRowLimit) { - this.serverRowLimit = serverRowLimit; - return this; - } - - public ExplainPlanAttributesBuilder setServerArrayElementProjection( - boolean serverArrayElementProjection) { - this.serverArrayElementProjection = serverArrayElementProjection; - return this; - } - - public ExplainPlanAttributesBuilder setServerAggregate( - String serverAggregate) { - this.serverAggregate = serverAggregate; - return this; - } - - public ExplainPlanAttributesBuilder setClientFilterBy( - String clientFilterBy) { - this.clientFilterBy = clientFilterBy; - return this; - } - - public ExplainPlanAttributesBuilder setClientAggregate( - String clientAggregate) { - this.clientAggregate = clientAggregate; - return this; - } - - public ExplainPlanAttributesBuilder setClientSortedBy( - String clientSortedBy) { - this.clientSortedBy = clientSortedBy; - return this; - } - - public ExplainPlanAttributesBuilder setClientAfterAggregate( - String clientAfterAggregate) { - this.clientAfterAggregate = clientAfterAggregate; - return this; - } - - public ExplainPlanAttributesBuilder setClientDistinctFilter( - String clientDistinctFilter) { - this.clientDistinctFilter = clientDistinctFilter; - return this; - } - - public ExplainPlanAttributesBuilder setClientOffset( - Integer clientOffset) { - this.clientOffset = clientOffset; - return this; - } - - public ExplainPlanAttributesBuilder setClientRowLimit( - Integer clientRowLimit) { - this.clientRowLimit = clientRowLimit; - return this; - } - - public ExplainPlanAttributesBuilder setClientSequenceCount( - Integer clientSequenceCount) { - this.clientSequenceCount = clientSequenceCount; - return this; - } - - public ExplainPlanAttributesBuilder setClientCursorName( - String clientCursorName) { - this.clientCursorName = clientCursorName; - return this; - } - - public ExplainPlanAttributesBuilder setClientSortAlgo( - String clientSortAlgo) { - this.clientSortAlgo = clientSortAlgo; - return this; - } - - public ExplainPlanAttributesBuilder setRhsJoinQueryExplainPlan( - ExplainPlanAttributes rhsJoinQueryExplainPlan) { - this.rhsJoinQueryExplainPlan = rhsJoinQueryExplainPlan; - return this; - } - - public ExplainPlanAttributesBuilder setServerMergeColumns( - Set columns) { - this.serverMergeColumns = columns; - return this; - } - - public ExplainPlanAttributesBuilder setRegionLocations( - List regionLocations) { - this.regionLocations = regionLocations; - return this; - } - - public ExplainPlanAttributesBuilder setNumRegionLocationLookups( - int numRegionLocationLookups) { - this.numRegionLocationLookups = numRegionLocationLookups; - return this; - } - - public ExplainPlanAttributes build() { - return new ExplainPlanAttributes(abstractExplainPlan, splitsChunk, - estimatedRows, estimatedSizeInBytes, iteratorTypeAndScanSize, - samplingRate, useRoundRobinIterator, hexStringRVCOffset, - consistency, hint, serverSortedBy, explainScanType, tableName, - keyRanges, scanTimeRangeMin, scanTimeRangeMax, - serverWhereFilter, serverDistinctFilter, - serverOffset, serverRowLimit, - serverArrayElementProjection, serverAggregate, - clientFilterBy, clientAggregate, clientSortedBy, - clientAfterAggregate, clientDistinctFilter, clientOffset, - clientRowLimit, clientSequenceCount, clientCursorName, - clientSortAlgo, rhsJoinQueryExplainPlan, serverMergeColumns, - regionLocations, numRegionLocationLookups); - } + public ExplainPlanAttributes build() { + return new ExplainPlanAttributes(abstractExplainPlan, splitsChunk, estimatedRows, + estimatedSizeInBytes, iteratorTypeAndScanSize, samplingRate, useRoundRobinIterator, + hexStringRVCOffset, consistency, hint, serverSortedBy, explainScanType, tableName, + keyRanges, scanTimeRangeMin, scanTimeRangeMax, serverWhereFilter, serverDistinctFilter, + serverOffset, serverRowLimit, serverArrayElementProjection, serverAggregate, clientFilterBy, + clientAggregate, clientSortedBy, clientAfterAggregate, clientDistinctFilter, clientOffset, + clientRowLimit, clientSequenceCount, clientCursorName, clientSortAlgo, + rhsJoinQueryExplainPlan, serverMergeColumns, regionLocations, numRegionLocationLookups); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java index cff3d964f5e..692e2fbeea7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.phoenix.compile; - import java.math.BigDecimal; import java.nio.charset.StandardCharsets; import java.sql.SQLException; @@ -142,1141 +141,1206 @@ import org.apache.phoenix.util.StringUtil; public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor { - private boolean isAggregate; - private boolean isJsonFragment; - protected ParseNode aggregateFunction; - protected final StatementContext context; - protected final GroupBy groupBy; - private int nodeCount; - private int totalNodeCount; - private final boolean resolveViewConstants; - private static final Expression NOT_NULL_STRING = LiteralExpression.newConstant(PVarchar.INSTANCE.toObject(KeyRange.IS_NOT_NULL_RANGE.getLowerRange())); + private boolean isAggregate; + private boolean isJsonFragment; + protected ParseNode aggregateFunction; + protected final StatementContext context; + protected final GroupBy groupBy; + private int nodeCount; + private int totalNodeCount; + private final boolean resolveViewConstants; + private static final Expression NOT_NULL_STRING = LiteralExpression + .newConstant(PVarchar.INSTANCE.toObject(KeyRange.IS_NOT_NULL_RANGE.getLowerRange())); + + public ExpressionCompiler(StatementContext context) { + this(context, GroupBy.EMPTY_GROUP_BY, false); + } - public ExpressionCompiler(StatementContext context) { - this(context,GroupBy.EMPTY_GROUP_BY, false); - } + ExpressionCompiler(StatementContext context, boolean resolveViewConstants) { + this(context, GroupBy.EMPTY_GROUP_BY, resolveViewConstants); + } - ExpressionCompiler(StatementContext context, boolean resolveViewConstants) { - this(context,GroupBy.EMPTY_GROUP_BY, resolveViewConstants); - } + ExpressionCompiler(StatementContext context, GroupBy groupBy) { + this(context, groupBy, false); + } - ExpressionCompiler(StatementContext context, GroupBy groupBy) { - this(context, groupBy, false); - } + ExpressionCompiler(StatementContext context, GroupBy groupBy, boolean resolveViewConstants) { + this.context = context; + this.groupBy = groupBy; + this.resolveViewConstants = resolveViewConstants; + } - ExpressionCompiler(StatementContext context, GroupBy groupBy, boolean resolveViewConstants) { - this.context = context; - this.groupBy = groupBy; - this.resolveViewConstants = resolveViewConstants; - } + public boolean isAggregate() { + return isAggregate; + } - public boolean isAggregate() { - return isAggregate; - } + public boolean isJsonFragment() { + return isJsonFragment; + } - public boolean isJsonFragment() { - return isJsonFragment; - } + public boolean isTopLevel() { + return nodeCount == 0; + } - public boolean isTopLevel() { - return nodeCount == 0; - } + public void reset() { + this.isAggregate = false; + this.nodeCount = 0; + this.totalNodeCount = 0; + this.isJsonFragment = false; + } - public void reset() { - this.isAggregate = false; - this.nodeCount = 0; - this.totalNodeCount = 0; - this.isJsonFragment = false; - } + @Override + public boolean visitEnter(ComparisonParseNode node) { + return true; + } - @Override - public boolean visitEnter(ComparisonParseNode node) { - return true; + private void addBindParamMetaData(ParseNode lhsNode, ParseNode rhsNode, Expression lhsExpr, + Expression rhsExpr) throws SQLException { + if (lhsNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) lhsNode, rhsExpr); + } + if (rhsNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) rhsNode, lhsExpr); } + } - private void addBindParamMetaData(ParseNode lhsNode, ParseNode rhsNode, Expression lhsExpr, Expression rhsExpr) throws SQLException { - if (lhsNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)lhsNode, rhsExpr); - } - if (rhsNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)rhsNode, lhsExpr); - } + @Override + public Expression visitLeave(ComparisonParseNode node, List children) + throws SQLException { + ParseNode lhsNode = node.getChildren().get(0); + ParseNode rhsNode = node.getChildren().get(1); + Expression lhsExpr = children.get(0); + Expression rhsExpr = children.get(1); + + PDataType dataTypeOfLHSExpr = lhsExpr.getDataType(); + if (dataTypeOfLHSExpr != null && !dataTypeOfLHSExpr.isComparisonSupported()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) + .setMessage(" for type " + dataTypeOfLHSExpr).build().buildException(); + } + PDataType dataTypeOfRHSExpr = rhsExpr.getDataType(); + if (dataTypeOfRHSExpr != null && !dataTypeOfRHSExpr.isComparisonSupported()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) + .setMessage(" for type " + dataTypeOfRHSExpr).build().buildException(); + } + + CompareOperator op = node.getFilterOp(); + + if ( + lhsNode instanceof RowValueConstructorParseNode + && rhsNode instanceof RowValueConstructorParseNode + ) { + int i = 0; + for (; i < Math.min(lhsExpr.getChildren().size(), rhsExpr.getChildren().size()); i++) { + addBindParamMetaData(lhsNode.getChildren().get(i), rhsNode.getChildren().get(i), + lhsExpr.getChildren().get(i), rhsExpr.getChildren().get(i)); + } + for (; i < lhsExpr.getChildren().size(); i++) { + addBindParamMetaData(lhsNode.getChildren().get(i), null, lhsExpr.getChildren().get(i), + null); + } + for (; i < rhsExpr.getChildren().size(); i++) { + addBindParamMetaData(null, rhsNode.getChildren().get(i), null, + rhsExpr.getChildren().get(i)); + } + } else if (lhsExpr instanceof RowValueConstructorExpression) { + addBindParamMetaData(lhsNode.getChildren().get(0), rhsNode, lhsExpr.getChildren().get(0), + rhsExpr); + for (int i = 1; i < lhsExpr.getChildren().size(); i++) { + addBindParamMetaData(lhsNode.getChildren().get(i), null, lhsExpr.getChildren().get(i), + null); + } + } else if (rhsExpr instanceof RowValueConstructorExpression) { + addBindParamMetaData(lhsNode, rhsNode.getChildren().get(0), lhsExpr, + rhsExpr.getChildren().get(0)); + for (int i = 1; i < rhsExpr.getChildren().size(); i++) { + addBindParamMetaData(null, rhsNode.getChildren().get(i), null, + rhsExpr.getChildren().get(i)); + } + } else { + addBindParamMetaData(lhsNode, rhsNode, lhsExpr, rhsExpr); } + return wrapGroupByExpression(ComparisonExpression.create(op, children, context.getTempPtr(), + context.getCurrentTable().getTable().rowKeyOrderOptimizable())); + } - @Override - public Expression visitLeave(ComparisonParseNode node, List children) throws SQLException { - ParseNode lhsNode = node.getChildren().get(0); - ParseNode rhsNode = node.getChildren().get(1); - Expression lhsExpr = children.get(0); - Expression rhsExpr = children.get(1); - - PDataType dataTypeOfLHSExpr = lhsExpr.getDataType(); - if (dataTypeOfLHSExpr != null && !dataTypeOfLHSExpr.isComparisonSupported()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) - .setMessage(" for type " + dataTypeOfLHSExpr).build().buildException(); - } - PDataType dataTypeOfRHSExpr = rhsExpr.getDataType(); - if (dataTypeOfRHSExpr != null && !dataTypeOfRHSExpr.isComparisonSupported()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) - .setMessage(" for type " + dataTypeOfRHSExpr).build().buildException(); - } + @Override + public boolean visitEnter(AndParseNode node) throws SQLException { + return true; + } - CompareOperator op = node.getFilterOp(); + @Override + public Expression visitLeave(AndParseNode node, List children) throws SQLException { + return wrapGroupByExpression(AndExpression.create(children)); + } - if (lhsNode instanceof RowValueConstructorParseNode && rhsNode instanceof RowValueConstructorParseNode) { - int i = 0; - for (; i < Math.min(lhsExpr.getChildren().size(),rhsExpr.getChildren().size()); i++) { - addBindParamMetaData(lhsNode.getChildren().get(i), rhsNode.getChildren().get(i), lhsExpr.getChildren().get(i), rhsExpr.getChildren().get(i)); - } - for (; i < lhsExpr.getChildren().size(); i++) { - addBindParamMetaData(lhsNode.getChildren().get(i), null, lhsExpr.getChildren().get(i), null); - } - for (; i < rhsExpr.getChildren().size(); i++) { - addBindParamMetaData(null, rhsNode.getChildren().get(i), null, rhsExpr.getChildren().get(i)); - } - } else if (lhsExpr instanceof RowValueConstructorExpression) { - addBindParamMetaData(lhsNode.getChildren().get(0), rhsNode, lhsExpr.getChildren().get(0), rhsExpr); - for (int i = 1; i < lhsExpr.getChildren().size(); i++) { - addBindParamMetaData(lhsNode.getChildren().get(i), null, lhsExpr.getChildren().get(i), null); - } - } else if (rhsExpr instanceof RowValueConstructorExpression) { - addBindParamMetaData(lhsNode, rhsNode.getChildren().get(0), lhsExpr, rhsExpr.getChildren().get(0)); - for (int i = 1; i < rhsExpr.getChildren().size(); i++) { - addBindParamMetaData(null, rhsNode.getChildren().get(i), null, rhsExpr.getChildren().get(i)); - } - } else { - addBindParamMetaData(lhsNode, rhsNode, lhsExpr, rhsExpr); - } - return wrapGroupByExpression(ComparisonExpression.create(op, children, context.getTempPtr(), context.getCurrentTable().getTable().rowKeyOrderOptimizable())); + @Override + public boolean visitEnter(OrParseNode node) throws SQLException { + return true; + } + + private Expression orExpression(List children) throws SQLException { + Iterator iterator = children.iterator(); + Determinism determinism = Determinism.ALWAYS; + while (iterator.hasNext()) { + Expression child = iterator.next(); + if (child.getDataType() != PBoolean.INSTANCE) { + throw TypeMismatchException.newException(PBoolean.INSTANCE, child.getDataType(), + child.toString()); + } + if (LiteralExpression.isFalse(child)) { + iterator.remove(); + } + if (LiteralExpression.isTrue(child)) { + return child; + } + determinism = determinism.combine(child.getDeterminism()); + } + if (children.size() == 0) { + return LiteralExpression.newConstant(false, determinism); } + if (children.size() == 1) { + return children.get(0); + } + return new OrExpression(children); + } - @Override - public boolean visitEnter(AndParseNode node) throws SQLException { - return true; + @Override + public Expression visitLeave(OrParseNode node, List children) throws SQLException { + return wrapGroupByExpression(orExpression(children)); + } + + @Override + public boolean visitEnter(FunctionParseNode node) throws SQLException { + if (node instanceof JsonQueryParseNode || node instanceof JsonModifyParseNode) { + this.isJsonFragment = true; } + // TODO: Oracle supports nested aggregate function while other DBs don't. Should we? + if (node.isAggregate()) { + if (aggregateFunction != null) { + throw new SQLFeatureNotSupportedException("Nested aggregate functions are not supported"); + } + this.aggregateFunction = node; + this.isAggregate = true; - @Override - public Expression visitLeave(AndParseNode node, List children) throws SQLException { - return wrapGroupByExpression(AndExpression.create(children)); } + return true; + } - @Override - public boolean visitEnter(OrParseNode node) throws SQLException { - return true; + private Expression wrapGroupByExpression(Expression expression) { + // If we're in an aggregate function, don't wrap a group by expression, + // since in that case we're aggregating over the regular/ungrouped + // column. + if (aggregateFunction == null) { + int index = groupBy.getExpressions().indexOf(expression); + if (index >= 0) { + isAggregate = true; + expression = ExpressionUtil.convertGroupByExpressionToRowKeyColumnExpression(groupBy, + expression, index); + } } + return expression; + } - private Expression orExpression(List children) throws SQLException { - Iterator iterator = children.iterator(); - Determinism determinism = Determinism.ALWAYS; - while (iterator.hasNext()) { - Expression child = iterator.next(); - if (child.getDataType() != PBoolean.INSTANCE) { - throw TypeMismatchException.newException(PBoolean.INSTANCE, child.getDataType(), child.toString()); - } - if (LiteralExpression.isFalse(child)) { - iterator.remove(); - } - if (LiteralExpression.isTrue(child)) { - return child; - } - determinism = determinism.combine(child.getDeterminism()); - } - if (children.size() == 0) { - return LiteralExpression.newConstant(false, determinism); + /** + * Add expression to the expression manager, returning the same one if already used. + */ + protected Expression addExpression(Expression expression) { + return context.getExpressionManager().addIfAbsent(expression); + } + + @Override + /** + * @param node a function expression node + * @param children the child expression arguments to the function expression node. + */ + public Expression visitLeave(FunctionParseNode node, List children) + throws SQLException { + PFunction function = null; + if (node instanceof UDFParseNode) { + function = context.getResolver().resolveFunction(node.getName()); + BuiltInFunctionInfo info = new BuiltInFunctionInfo(function); + node = new UDFParseNode(node.getName(), node.getChildren(), info); + } + children = node.validate(children, context); + Expression expression = null; + if (function == null) { + expression = node.create(children, context); + } else { + expression = node.create(children, function, context); + } + ImmutableBytesWritable ptr = context.getTempPtr(); + BuiltInFunctionInfo info = node.getInfo(); + for (int i = 0; i < info.getRequiredArgCount(); i++) { + // Optimization to catch cases where a required argument is null resulting in the function + // returning null. We have to wait until after we create the function expression so that + // we can get the proper type to use. + if (node.evalToNullIfParamIsNull(context, i)) { + Expression child = children.get(i); + if (ExpressionUtil.isNull(child, ptr)) { + return ExpressionUtil.getNullExpression(expression); } - if (children.size() == 1) { - return children.get(0); + } + } + if (ExpressionUtil.isConstant(expression)) { + return ExpressionUtil.getConstantExpression(expression, ptr); + } + expression = addExpression(expression); + expression = wrapGroupByExpression(expression); + if (aggregateFunction == node) { + aggregateFunction = null; // Turn back off on the way out + } + return expression; + } + + /** + * Called by visitor to resolve a column expression node into a column reference. Derived classes + * may use this as a hook to trap all column resolves. + * @param node a column expression node + * @return a resolved ColumnRef + * @throws SQLException if the column expression node does not refer to a known/unambiguous column + */ + protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { + ColumnRef ref = null; + try { + ref = context.getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), + node.getName()); + } catch (ColumnNotFoundException e) { + // Rather than not use a local index when a column not contained by it is referenced, we + // join back to the data table in our coprocessor since this is a relatively cheap + // operation given that we know the join is local. + if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(context.getCurrentTable())) { + try { + context.setUncoveredIndex(true); + return new IndexUncoveredDataColumnRef(context, context.getCurrentTable(), + node.getName()); + } catch (ColumnFamilyNotFoundException c) { + throw e; } - return new OrExpression(children); + } else { + throw e; + } + } + PTable table = ref.getTable(); + int pkPosition = ref.getPKSlotPosition(); + // Disallow explicit reference to salting column, tenant ID column, and index ID column + if (pkPosition >= 0) { + boolean isSalted = table.getBucketNum() != null; + boolean isMultiTenant = + context.getConnection().getTenantId() != null && table.isMultiTenant(); + boolean isSharedViewIndex = table.getViewIndexId() != null; + int minPosition = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); + if (pkPosition < minPosition) { + throw new ColumnNotFoundException(table.getSchemaName().getString(), + table.getTableName().getString(), null, ref.getColumn().getName().getString()); + } } + return ref; + } - @Override - public Expression visitLeave(OrParseNode node, List children) throws SQLException { - return wrapGroupByExpression(orExpression(children)); + protected void addColumn(PColumn column) { + EncodedColumnsUtil.setColumns(column, context.getCurrentTable().getTable(), context.getScan()); + } + + @Override + public Expression visit(ColumnParseNode node) throws SQLException { + ColumnRef ref = resolveColumn(node); + TableRef tableRef = ref.getTableRef(); + ImmutableBytesWritable ptr = context.getTempPtr(); + PColumn column = ref.getColumn(); + // If we have an UPDATABLE view, then we compile those view constants (i.e. columns in equality + // constraints + // in the view) to constants. This allows the optimize to optimize out reference to them in + // various scenarios. + // If the column is matched in a WHERE clause against a constant not equal to it's constant, + // then the entire + // query would become degenerate. + if (!resolveViewConstants && IndexUtil.getViewConstantValue(column, ptr)) { + return LiteralExpression.newConstant( + column.getDataType().toObject(ptr, column.getSortOrder()), column.getDataType(), + column.getSortOrder()); + } + if (tableRef.equals(context.getCurrentTable()) && !SchemaUtil.isPKColumn(column)) { // project + // only kv + // columns + addColumn(column); + } + Expression expression = + ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive()); + Expression wrappedExpression = wrapGroupByExpression(expression); + // If we're in an aggregate expression + // and we're not in the context of an aggregate function + // and we didn't just wrap our column reference + // then we're mixing aggregate and non aggregate expressions in the same expression. + // This catches cases like this: SELECT sum(a_integer) + a_integer FROM atable GROUP BY a_string + if (isAggregate && aggregateFunction == null && wrappedExpression == expression) { + throwNonAggExpressionInAggException(expression.toString()); + } + return wrappedExpression; + } + + @Override + public Expression visit(BindParseNode node) throws SQLException { + Object value = context.getBindManager().getBindValue(node); + return LiteralExpression.newConstant(value, Determinism.ALWAYS); + } + + @Override + public Expression visit(LiteralParseNode node) throws SQLException { + return LiteralExpression.newConstant(node.getValue(), node.getType(), Determinism.ALWAYS); + } + + @Override + public List newElementList(int size) { + nodeCount += size; + return new ArrayList(size); + } + + @Override + public void addElement(List l, Expression element) { + nodeCount--; + totalNodeCount++; + l.add(element); + } + + @Override + public boolean visitEnter(CaseParseNode node) throws SQLException { + return true; + } + + @Override + public Expression visitLeave(CaseParseNode node, List l) throws SQLException { + final Expression caseExpression = CaseExpression.create(l); + for (int i = 0; i < node.getChildren().size(); i += 2) { + ParseNode childNode = node.getChildren().get(i); + if (childNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) childNode, + new DelegateDatum(caseExpression)); + } } + return wrapGroupByExpression(caseExpression); + } - @Override - public boolean visitEnter(FunctionParseNode node) throws SQLException { - if (node instanceof JsonQueryParseNode || node instanceof JsonModifyParseNode) { - this.isJsonFragment = true; - } - // TODO: Oracle supports nested aggregate function while other DBs don't. Should we? - if (node.isAggregate()) { - if (aggregateFunction != null) { - throw new SQLFeatureNotSupportedException( - "Nested aggregate functions are not supported"); - } - this.aggregateFunction = node; - this.isAggregate = true; + @Override + public boolean visitEnter(LikeParseNode node) throws SQLException { + return true; + } + @Override + public Expression visitLeave(LikeParseNode node, List children) throws SQLException { + ParseNode lhsNode = node.getChildren().get(0); + ParseNode rhsNode = node.getChildren().get(1); + Expression lhs = children.get(0); + Expression rhs = children.get(1); + if (lhs.getDataType() != null && !lhs.getDataType().isComparisonSupported()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) + .setMessage(" for type " + lhs.getDataType()).build().buildException(); + } + if (rhs.getDataType() != null && !rhs.getDataType().isComparisonSupported()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) + .setMessage(" for type " + rhs.getDataType()).build().buildException(); + } + if ( + rhs.getDataType() != null && lhs.getDataType() != null + && !lhs.getDataType().isCoercibleTo(rhs.getDataType()) + && !rhs.getDataType().isCoercibleTo(lhs.getDataType()) + ) { + throw TypeMismatchException.newException(lhs.getDataType(), rhs.getDataType(), + node.toString()); + } + if (lhsNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) lhsNode, rhs); + } + if (rhsNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) rhsNode, lhs); + } + if (rhs instanceof LiteralExpression) { + String pattern = (String) ((LiteralExpression) rhs).getValue(); + if (pattern == null || pattern.length() == 0) { + return LiteralExpression.newConstant(null, PBoolean.INSTANCE, rhs.getDeterminism()); + } + // TODO: for pattern of '%' optimize to strlength(lhs) > 0 + // We can't use lhs IS NOT NULL b/c if lhs is NULL we need + // to return NULL. + int index = LikeExpression.indexOfWildcard(pattern); + // Can't possibly be as long as the constant, then FALSE + Integer lhsMaxLength = lhs.getMaxLength(); + if (lhsMaxLength != null && lhsMaxLength < index) { + return LiteralExpression.newConstant(false, rhs.getDeterminism()); + } + if (index == -1) { + String rhsLiteral = LikeExpression.unescapeLike(pattern); + if (node.getLikeType() == LikeType.CASE_SENSITIVE) { + CompareOperator op = node.isNegate() ? CompareOperator.NOT_EQUAL : CompareOperator.EQUAL; + if (pattern.equals(rhsLiteral)) { + return new ComparisonExpression(children, op); + } else { + rhs = LiteralExpression.newConstant(rhsLiteral, PChar.INSTANCE, rhs.getDeterminism()); + return new ComparisonExpression(Arrays.asList(lhs, rhs), op); + } } - return true; - } - - private Expression wrapGroupByExpression(Expression expression) { - // If we're in an aggregate function, don't wrap a group by expression, - // since in that case we're aggregating over the regular/ungrouped - // column. - if (aggregateFunction == null) { - int index = groupBy.getExpressions().indexOf(expression); - if (index >= 0) { - isAggregate = true; - expression = ExpressionUtil.convertGroupByExpressionToRowKeyColumnExpression(groupBy, expression, index); - } + } else { + byte[] wildcardString = new byte[pattern.length()]; + byte[] wildcard = { StringUtil.MULTI_CHAR_LIKE }; + StringUtil.fill(wildcardString, 0, pattern.length(), wildcard, 0, 1, false); + if (pattern.equals(new String(wildcardString, StandardCharsets.UTF_8))) { + List compareChildren = Arrays.asList(lhs, NOT_NULL_STRING); + return new ComparisonExpression(compareChildren, + node.isNegate() ? CompareOperator.LESS : CompareOperator.GREATER_OR_EQUAL); } - return expression; + } + } + QueryServices services = context.getConnection().getQueryServices(); + boolean useByteBasedRegex = services.getProps().getBoolean( + QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); + Expression expression; + if (useByteBasedRegex) { + expression = ByteBasedLikeExpression.create(children, node.getLikeType()); + } else { + expression = StringBasedLikeExpression.create(children, node.getLikeType()); + } + if (ExpressionUtil.isConstant(expression)) { + ImmutableBytesWritable ptr = context.getTempPtr(); + if (!expression.evaluate(null, ptr)) { + return LiteralExpression.newConstant(null, expression.getDeterminism()); + } else { + return LiteralExpression.newConstant( + Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)) ^ node.isNegate(), + expression.getDeterminism()); + } } + if (node.isNegate()) { + expression = new NotExpression(expression); + } + return wrapGroupByExpression(expression); + } - /** - * Add expression to the expression manager, returning the same one if - * already used. - */ - protected Expression addExpression(Expression expression) { - return context.getExpressionManager().addIfAbsent(expression); + @Override + public boolean visitEnter(NotParseNode node) throws SQLException { + return true; + } + + @Override + public Expression visitLeave(NotParseNode node, List children) throws SQLException { + ParseNode childNode = node.getChildren().get(0); + Expression child = children.get(0); + if (!PBoolean.INSTANCE.isCoercibleTo(child.getDataType())) { + throw TypeMismatchException.newException(PBoolean.INSTANCE, child.getDataType(), + node.toString()); + } + if (childNode instanceof BindParseNode) { // TODO: valid/possibe? + context.getBindManager().addParamMetaData((BindParseNode) childNode, child); } + return wrapGroupByExpression(NotExpression.create(child, context.getTempPtr())); + } - @Override - /** - * @param node a function expression node - * @param children the child expression arguments to the function expression node. - */ - public Expression visitLeave(FunctionParseNode node, List children) throws SQLException { - PFunction function = null; - if(node instanceof UDFParseNode) { - function = context.getResolver().resolveFunction(node.getName()); - BuiltInFunctionInfo info = new BuiltInFunctionInfo(function); - node = new UDFParseNode(node.getName(), node.getChildren(), info); - } - children = node.validate(children, context); - Expression expression = null; - if (function == null) { - expression = node.create(children, context); - } else { - expression = node.create(children, function, context); - } - ImmutableBytesWritable ptr = context.getTempPtr(); - BuiltInFunctionInfo info = node.getInfo(); - for (int i = 0; i < info.getRequiredArgCount(); i++) { - // Optimization to catch cases where a required argument is null resulting in the function - // returning null. We have to wait until after we create the function expression so that - // we can get the proper type to use. - if (node.evalToNullIfParamIsNull(context, i)) { - Expression child = children.get(i); - if (ExpressionUtil.isNull(child, ptr)) { - return ExpressionUtil.getNullExpression(expression); - } - } - } - if (ExpressionUtil.isConstant(expression)) { - return ExpressionUtil.getConstantExpression(expression, ptr); - } - expression = addExpression(expression); - expression = wrapGroupByExpression(expression); - if (aggregateFunction == node) { - aggregateFunction = null; // Turn back off on the way out - } - return expression; + @Override + public boolean visitEnter(CastParseNode node) throws SQLException { + return true; + } + + // TODO: don't repeat this ugly cast logic (maybe use isCastable in the last else block. + private static Expression convertToRoundExpressionIfNeeded(PDataType fromDataType, + PDataType targetDataType, List expressions) throws SQLException { + Expression firstChildExpr = expressions.get(0); + if (fromDataType == targetDataType) { + return firstChildExpr; + } else if ( + (fromDataType == PDecimal.INSTANCE || fromDataType == PTimestamp.INSTANCE + || fromDataType == PUnsignedTimestamp.INSTANCE) + && targetDataType.isCoercibleTo(PLong.INSTANCE) + ) { + return RoundDecimalExpression.create(expressions); + } else if ( + expressions.size() == 1 && fromDataType == PTimestamp.INSTANCE + && targetDataType.isCoercibleTo(PDate.INSTANCE) + ) { + return firstChildExpr; + } else if ( + (fromDataType == PDecimal.INSTANCE || fromDataType == PTimestamp.INSTANCE + || fromDataType == PUnsignedTimestamp.INSTANCE) + && targetDataType.isCoercibleTo(PDate.INSTANCE) + ) { + return RoundTimestampExpression.create(expressions); + } else if (fromDataType.isCastableTo(targetDataType)) { + return firstChildExpr; + } else { + throw TypeMismatchException.newException(fromDataType, targetDataType, + firstChildExpr.toString()); } + } - /** - * Called by visitor to resolve a column expression node into a column reference. - * Derived classes may use this as a hook to trap all column resolves. - * @param node a column expression node - * @return a resolved ColumnRef - * @throws SQLException if the column expression node does not refer to a known/unambiguous column - */ - protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { - ColumnRef ref = null; - try { - ref = context.getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); - } catch (ColumnNotFoundException e) { - // Rather than not use a local index when a column not contained by it is referenced, we - // join back to the data table in our coprocessor since this is a relatively cheap - // operation given that we know the join is local. - if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(context.getCurrentTable())) { - try { - context.setUncoveredIndex(true); - return new IndexUncoveredDataColumnRef(context, context.getCurrentTable(), - node.getName()); - } catch (ColumnFamilyNotFoundException c) { - throw e; - } - } else { - throw e; - } - } - PTable table = ref.getTable(); - int pkPosition = ref.getPKSlotPosition(); - // Disallow explicit reference to salting column, tenant ID column, and index ID column - if (pkPosition >= 0) { - boolean isSalted = table.getBucketNum() != null; - boolean isMultiTenant = context.getConnection().getTenantId() != null && table.isMultiTenant(); - boolean isSharedViewIndex = table.getViewIndexId() != null; - int minPosition = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); - if (pkPosition < minPosition) { - throw new ColumnNotFoundException(table.getSchemaName().getString(), table.getTableName().getString(), null, ref.getColumn().getName().getString()); - } - } - return ref; + @Override + public Expression visitLeave(CastParseNode node, List children) throws SQLException { + ParseNode childNode = node.getChildren().get(0); + PDataType targetDataType = node.getDataType(); + Expression childExpr = children.get(0); + PDataType fromDataType = childExpr.getDataType(); + + if (childNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) childNode, childExpr); + } + + Expression expr = childExpr; + if (fromDataType != null) { + /* + * IndexStatementRewriter creates a CAST parse node when rewriting the query to use indexed + * columns. Without this check present we wrongly and unnecessarily end up creating a + * RoundExpression. + */ + if (context.getCurrentTable().getTable().getType() != PTableType.INDEX) { + expr = convertToRoundExpressionIfNeeded(fromDataType, targetDataType, children); + } } + boolean rowKeyOrderOptimizable = context.getCurrentTable().getTable().rowKeyOrderOptimizable(); + return wrapGroupByExpression(CoerceExpression.create(expr, targetDataType, + SortOrder.getDefault(), expr.getMaxLength(), rowKeyOrderOptimizable)); + } + + @Override + public boolean visitEnter(InListParseNode node) throws SQLException { + return true; + } - protected void addColumn(PColumn column) { - EncodedColumnsUtil.setColumns(column, context.getCurrentTable().getTable(), context.getScan()); + @Override + public Expression visitLeave(InListParseNode node, List l) throws SQLException { + List inChildren = l; + Expression firstChild = inChildren.get(0); + ImmutableBytesWritable ptr = context.getTempPtr(); + PDataType firstChildType = firstChild.getDataType(); + ParseNode firstChildNode = node.getChildren().get(0); + + if (firstChildType != null && !firstChildType.isComparisonSupported()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) + .setMessage(" for type " + firstChildType).build().buildException(); } - @Override - public Expression visit(ColumnParseNode node) throws SQLException { - ColumnRef ref = resolveColumn(node); - TableRef tableRef = ref.getTableRef(); - ImmutableBytesWritable ptr = context.getTempPtr(); - PColumn column = ref.getColumn(); - // If we have an UPDATABLE view, then we compile those view constants (i.e. columns in equality constraints - // in the view) to constants. This allows the optimize to optimize out reference to them in various scenarios. - // If the column is matched in a WHERE clause against a constant not equal to it's constant, then the entire - // query would become degenerate. - if (!resolveViewConstants && IndexUtil.getViewConstantValue(column, ptr)) { - return LiteralExpression.newConstant( - column.getDataType().toObject(ptr, column.getSortOrder()), - column.getDataType(), column.getSortOrder()); - } - if (tableRef.equals(context.getCurrentTable()) && !SchemaUtil.isPKColumn(column)) { // project only kv columns - addColumn(column); - } - Expression expression = ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive()); - Expression wrappedExpression = wrapGroupByExpression(expression); - // If we're in an aggregate expression - // and we're not in the context of an aggregate function - // and we didn't just wrap our column reference - // then we're mixing aggregate and non aggregate expressions in the same expression. - // This catches cases like this: SELECT sum(a_integer) + a_integer FROM atable GROUP BY a_string - if (isAggregate && aggregateFunction == null && wrappedExpression == expression) { - throwNonAggExpressionInAggException(expression.toString()); - } - return wrappedExpression; + if (firstChildNode instanceof BindParseNode) { + PDatum datum = firstChild; + if (firstChildType == null) { + datum = inferBindDatum(inChildren); + } + context.getBindManager().addParamMetaData((BindParseNode) firstChildNode, datum); } + for (int i = 1; i < l.size(); i++) { + ParseNode childNode = node.getChildren().get(i); + if (childNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) childNode, firstChild); + } + } + return wrapGroupByExpression(InListExpression.create(inChildren, node.isNegate(), ptr, + context.getCurrentTable().getTable().rowKeyOrderOptimizable())); + } + private static final PDatum DECIMAL_DATUM = new PDatum() { @Override - public Expression visit(BindParseNode node) throws SQLException { - Object value = context.getBindManager().getBindValue(node); - return LiteralExpression.newConstant(value, Determinism.ALWAYS); + public boolean isNullable() { + return true; } @Override - public Expression visit(LiteralParseNode node) throws SQLException { - return LiteralExpression.newConstant(node.getValue(), node.getType(), Determinism.ALWAYS); + public PDataType getDataType() { + return PDecimal.INSTANCE; } @Override - public List newElementList(int size) { - nodeCount += size; - return new ArrayList(size); + public Integer getMaxLength() { + return null; } @Override - public void addElement(List l, Expression element) { - nodeCount--; - totalNodeCount++; - l.add(element); + public Integer getScale() { + return null; } @Override - public boolean visitEnter(CaseParseNode node) throws SQLException { - return true; + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }; + + private static PDatum inferBindDatum(List children) { + boolean isChildTypeUnknown = false; + PDatum datum = children.get(1); + for (int i = 2; i < children.size(); i++) { + Expression child = children.get(i); + PDataType childType = child.getDataType(); + if (childType == null) { + isChildTypeUnknown = true; + } else if (datum.getDataType() == null) { + datum = child; + isChildTypeUnknown = true; + } else if (datum.getDataType() == childType || childType.isCoercibleTo(datum.getDataType())) { + continue; + } else if (datum.getDataType().isCoercibleTo(childType)) { + datum = child; + } + } + // If we found an "unknown" child type and the return type is a number + // make the return type be the most general number type of DECIMAL. + // TODO: same for TIMESTAMP for DATE/TIME? + if ( + isChildTypeUnknown && datum.getDataType() != null + && datum.getDataType().isCoercibleTo(PDecimal.INSTANCE) + ) { + return DECIMAL_DATUM; } + return datum; + } - @Override - public Expression visitLeave(CaseParseNode node, List l) throws SQLException { - final Expression caseExpression = CaseExpression.create(l); - for (int i = 0; i < node.getChildren().size(); i+=2) { - ParseNode childNode = node.getChildren().get(i); - if (childNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)childNode, new DelegateDatum(caseExpression)); - } - } - return wrapGroupByExpression(caseExpression); + @Override + public boolean visitEnter(IsNullParseNode node) throws SQLException { + return true; + } + + @Override + public Expression visitLeave(IsNullParseNode node, List children) + throws SQLException { + ParseNode childNode = node.getChildren().get(0); + Expression child = children.get(0); + if (childNode instanceof BindParseNode) { // TODO: valid/possibe? + context.getBindManager().addParamMetaData((BindParseNode) childNode, child); } + return wrapGroupByExpression( + IsNullExpression.create(child, node.isNegate(), context.getTempPtr())); + } - @Override - public boolean visitEnter(LikeParseNode node) throws SQLException { - return true; + private static interface ArithmeticExpressionFactory { + Expression create(ArithmeticParseNode node, List children) throws SQLException; + } + + private static interface ArithmeticExpressionBinder { + PDatum getBindMetaData(int i, List children, Expression expression); + } + + private Expression visitLeave(ArithmeticParseNode node, List children, + ArithmeticExpressionBinder binder, ArithmeticExpressionFactory factory) throws SQLException { + + boolean isNull = false; + for (Expression child : children) { + boolean isChildLiteral = (child instanceof LiteralExpression); + isNull |= isChildLiteral && ((LiteralExpression) child).getValue() == null; } - @Override - public Expression visitLeave(LikeParseNode node, List children) throws SQLException { - ParseNode lhsNode = node.getChildren().get(0); - ParseNode rhsNode = node.getChildren().get(1); - Expression lhs = children.get(0); - Expression rhs = children.get(1); - if (lhs.getDataType() != null && !lhs.getDataType().isComparisonSupported()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED).setMessage( - " for type " + lhs.getDataType()).build().buildException(); - } - if (rhs.getDataType() != null && !rhs.getDataType().isComparisonSupported()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED).setMessage( - " for type " + rhs.getDataType()).build().buildException(); - } - if ( rhs.getDataType() != null && lhs.getDataType() != null && - !lhs.getDataType().isCoercibleTo(rhs.getDataType()) && - !rhs.getDataType().isCoercibleTo(lhs.getDataType())) { - throw TypeMismatchException.newException(lhs.getDataType(), rhs.getDataType(), node.toString()); - } - if (lhsNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)lhsNode, rhs); - } - if (rhsNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)rhsNode, lhs); - } - if (rhs instanceof LiteralExpression) { - String pattern = (String)((LiteralExpression)rhs).getValue(); - if (pattern == null || pattern.length() == 0) { - return LiteralExpression.newConstant(null, PBoolean.INSTANCE, rhs.getDeterminism()); - } - // TODO: for pattern of '%' optimize to strlength(lhs) > 0 - // We can't use lhs IS NOT NULL b/c if lhs is NULL we need - // to return NULL. - int index = LikeExpression.indexOfWildcard(pattern); - // Can't possibly be as long as the constant, then FALSE - Integer lhsMaxLength = lhs.getMaxLength(); - if (lhsMaxLength != null && lhsMaxLength < index) { - return LiteralExpression.newConstant(false, rhs.getDeterminism()); - } - if (index == -1) { - String rhsLiteral = LikeExpression.unescapeLike(pattern); - if (node.getLikeType() == LikeType.CASE_SENSITIVE) { - CompareOperator op = node.isNegate() ? CompareOperator.NOT_EQUAL : CompareOperator.EQUAL; - if (pattern.equals(rhsLiteral)) { - return new ComparisonExpression(children, op); - } else { - rhs = LiteralExpression.newConstant(rhsLiteral, PChar.INSTANCE, rhs.getDeterminism()); - return new ComparisonExpression(Arrays.asList(lhs, rhs), op); - } - } - } else { - byte[] wildcardString = new byte[pattern.length()]; - byte[] wildcard = {StringUtil.MULTI_CHAR_LIKE}; - StringUtil.fill(wildcardString, 0, pattern.length(), wildcard, 0, 1, false); - if (pattern.equals(new String(wildcardString, StandardCharsets.UTF_8))) { - List compareChildren = Arrays.asList(lhs, NOT_NULL_STRING); - return new ComparisonExpression(compareChildren, node.isNegate() ? CompareOperator.LESS : CompareOperator.GREATER_OR_EQUAL); - } - } - } - QueryServices services = context.getConnection().getQueryServices(); - boolean useByteBasedRegex = - services.getProps().getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, - QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); - Expression expression; - if (useByteBasedRegex) { - expression = ByteBasedLikeExpression.create(children, node.getLikeType()); - } else { - expression = StringBasedLikeExpression.create(children, node.getLikeType()); - } - if (ExpressionUtil.isConstant(expression)) { - ImmutableBytesWritable ptr = context.getTempPtr(); - if (!expression.evaluate(null, ptr)) { - return LiteralExpression.newConstant(null, expression.getDeterminism()); - } else { - return LiteralExpression.newConstant(Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)) ^ node.isNegate(), expression.getDeterminism()); - } - } - if (node.isNegate()) { - expression = new NotExpression(expression); - } - return wrapGroupByExpression(expression); + Expression expression = factory.create(node, children); + + for (int i = 0; i < node.getChildren().size(); i++) { + ParseNode childNode = node.getChildren().get(i); + if (childNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) childNode, + binder == null ? expression : binder.getBindMetaData(i, children, expression)); + } } + ImmutableBytesWritable ptr = context.getTempPtr(); - @Override - public boolean visitEnter(NotParseNode node) throws SQLException { - return true; + // If all children are literals, just evaluate now + if (ExpressionUtil.isConstant(expression)) { + return ExpressionUtil.getConstantExpression(expression, ptr); + } else if (isNull) { + return LiteralExpression.newConstant(null, expression.getDataType(), + expression.getDeterminism()); } + // Otherwise create and return the expression + return wrapGroupByExpression(expression); + } - @Override - public Expression visitLeave(NotParseNode node, List children) throws SQLException { - ParseNode childNode = node.getChildren().get(0); - Expression child = children.get(0); - if (!PBoolean.INSTANCE.isCoercibleTo(child.getDataType())) { - throw TypeMismatchException.newException(PBoolean.INSTANCE, child.getDataType(), node.toString()); + @Override + public boolean visitEnter(AddParseNode node) throws SQLException { + return true; + } + + @Override + public Expression visitLeave(AddParseNode node, List children) throws SQLException { + return visitLeave(node, children, new ArithmeticExpressionBinder() { + @Override + public PDatum getBindMetaData(int i, List children, final Expression expression) { + PDataType type = expression.getDataType(); + if (type != null && type.isCoercibleTo(PDate.INSTANCE)) { + return getPDatumByExpression(expression, PDecimal.INSTANCE); } - if (childNode instanceof BindParseNode) { // TODO: valid/possibe? - context.getBindManager().addParamMetaData((BindParseNode)childNode, child); + return expression; + } + }, new ArithmeticExpressionFactory() { + @Override + public Expression create(ArithmeticParseNode node, List children) + throws SQLException { + boolean foundDate = false; + Determinism determinism = Determinism.ALWAYS; + PDataType theType = null; + for (int i = 0; i < children.size(); i++) { + Expression e = children.get(i); + determinism = determinism.combine(e.getDeterminism()); + PDataType type = e.getDataType(); + if (type == null) { + continue; + } else if (type.isCoercibleTo(PTimestamp.INSTANCE)) { + if (foundDate) { + throw TypeMismatchException.newException(type, node.toString()); + } + if ( + theType == null + || (theType != PTimestamp.INSTANCE && theType != PUnsignedTimestamp.INSTANCE) + ) { + theType = type; + } + foundDate = true; + } else if (type == PDecimal.INSTANCE) { + if (theType == null || !theType.isCoercibleTo(PTimestamp.INSTANCE)) { + theType = PDecimal.INSTANCE; + } + } else if (type.isCoercibleTo(PLong.INSTANCE)) { + if (theType == null) { + theType = PLong.INSTANCE; + } + } else if (type.isCoercibleTo(PDouble.INSTANCE)) { + if (theType == null) { + theType = PDouble.INSTANCE; + } + } else { + throw TypeMismatchException.newException(type, node.toString()); + } } - return wrapGroupByExpression(NotExpression.create(child, context.getTempPtr())); - } - - @Override - public boolean visitEnter(CastParseNode node) throws SQLException { - return true; - } - - // TODO: don't repeat this ugly cast logic (maybe use isCastable in the last else block. - private static Expression convertToRoundExpressionIfNeeded(PDataType fromDataType, PDataType targetDataType, List expressions) throws SQLException { - Expression firstChildExpr = expressions.get(0); - if(fromDataType == targetDataType) { - return firstChildExpr; - } else if ((fromDataType == PDecimal.INSTANCE || fromDataType == PTimestamp.INSTANCE || - fromDataType == PUnsignedTimestamp.INSTANCE) && targetDataType.isCoercibleTo( - PLong.INSTANCE)) { - return RoundDecimalExpression.create(expressions); - } else if (expressions.size() == 1 && fromDataType == PTimestamp.INSTANCE && - targetDataType.isCoercibleTo(PDate.INSTANCE)) { - return firstChildExpr; - } else if((fromDataType == PDecimal.INSTANCE || fromDataType == PTimestamp.INSTANCE || - fromDataType == PUnsignedTimestamp.INSTANCE) && targetDataType.isCoercibleTo( - PDate.INSTANCE)) { - return RoundTimestampExpression.create(expressions); - } else if(fromDataType.isCastableTo(targetDataType)) { - return firstChildExpr; + if (theType == PDecimal.INSTANCE) { + return new DecimalAddExpression(children); + } else if (theType == PLong.INSTANCE) { + return new LongAddExpression(children); + } else if (theType == PDouble.INSTANCE) { + return new DoubleAddExpression(children); + } else if (theType == null) { + return LiteralExpression.newConstant(null, theType, determinism); + } else if (theType == PTimestamp.INSTANCE || theType == PUnsignedTimestamp.INSTANCE) { + return new TimestampAddExpression(children); + } else if (theType.isCoercibleTo(PDate.INSTANCE)) { + return new DateAddExpression(children); } else { - throw TypeMismatchException.newException(fromDataType, targetDataType, firstChildExpr.toString()); + throw TypeMismatchException.newException(theType, node.toString()); } - } + } + }); + } - @Override - public Expression visitLeave(CastParseNode node, List children) throws SQLException { - ParseNode childNode = node.getChildren().get(0); - PDataType targetDataType = node.getDataType(); - Expression childExpr = children.get(0); - PDataType fromDataType = childExpr.getDataType(); - - if (childNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)childNode, childExpr); + @Override + public boolean visitEnter(SubtractParseNode node) throws SQLException { + return true; + } + + @Override + public Expression visitLeave(SubtractParseNode node, List children) + throws SQLException { + return visitLeave(node, children, new ArithmeticExpressionBinder() { + @Override + public PDatum getBindMetaData(int i, List children, final Expression expression) { + final PDataType type; + // If we're binding the first parameter and the second parameter + // is a date + // we know that the first parameter must be a date type too. + if ( + i == 0 && (type = children.get(1).getDataType()) != null + && type.isCoercibleTo(PDate.INSTANCE) + ) { + return getPDatumByExpression(expression, type); + } else if ( + expression.getDataType() != null && expression.getDataType().isCoercibleTo(PDate.INSTANCE) + ) { + return getPDatumByExpression(expression, PDecimal.INSTANCE); } - - Expression expr = childExpr; - if(fromDataType != null) { + // Otherwise just go with what was calculated for the expression + return expression; + } + }, new ArithmeticExpressionFactory() { + @Override + public Expression create(ArithmeticParseNode node, List children) + throws SQLException { + int i = 0; + PDataType theType = null; + Expression e1 = children.get(0); + Expression e2 = children.get(1); + Determinism determinism = e1.getDeterminism().combine(e2.getDeterminism()); + PDataType type1 = e1.getDataType(); + PDataType type2 = e2.getDataType(); + // TODO: simplify this special case for DATE conversion + /** + * For date1-date2, we want to coerce to a LONG because this cannot be compared against + * another date. It has essentially become a number. For date1-5, we want to preserve the + * DATE type because this can still be compared against another date and cannot be + * multiplied or divided. Any other time occurs is an error. For example, 5-date1 is an + * error. The nulls occur if we have bind variables. + */ + boolean isType1Date = type1 != null && type1 != PTimestamp.INSTANCE + && type1 != PUnsignedTimestamp.INSTANCE && type1.isCoercibleTo(PDate.INSTANCE); + boolean isType2Date = type2 != null && type2 != PTimestamp.INSTANCE + && type2 != PUnsignedTimestamp.INSTANCE && type2.isCoercibleTo(PDate.INSTANCE); + if (isType1Date || isType2Date) { + if (isType1Date && isType2Date) { + i = 2; + theType = PDecimal.INSTANCE; + } else if (isType1Date && type2 != null && type2.isCoercibleTo(PDecimal.INSTANCE)) { + i = 2; + theType = PDate.INSTANCE; + } else if (type1 == null || type2 == null) { /* - * IndexStatementRewriter creates a CAST parse node when rewriting the query to use - * indexed columns. Without this check present we wrongly and unnecessarily - * end up creating a RoundExpression. + * FIXME: Could be either a Date or BigDecimal, but we don't know if we're comparing to + * a date or a number which would be disambiguate it. */ - if (context.getCurrentTable().getTable().getType() != PTableType.INDEX) { - expr = convertToRoundExpressionIfNeeded(fromDataType, targetDataType, children); - } - } - boolean rowKeyOrderOptimizable = context.getCurrentTable().getTable().rowKeyOrderOptimizable(); - return wrapGroupByExpression(CoerceExpression.create(expr, targetDataType, SortOrder.getDefault(), expr.getMaxLength(), rowKeyOrderOptimizable)); - } - - @Override - public boolean visitEnter(InListParseNode node) throws SQLException { - return true; - } - - @Override - public Expression visitLeave(InListParseNode node, List l) throws SQLException { - List inChildren = l; - Expression firstChild = inChildren.get(0); - ImmutableBytesWritable ptr = context.getTempPtr(); - PDataType firstChildType = firstChild.getDataType(); - ParseNode firstChildNode = node.getChildren().get(0); - - if (firstChildType != null && !firstChildType.isComparisonSupported()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) - .setMessage(" for type " + firstChildType).build().buildException(); + i = 2; + theType = null; + } + } else if (type1 == PTimestamp.INSTANCE || type2 == PTimestamp.INSTANCE) { + i = 2; + theType = PTimestamp.INSTANCE; + } else if (type1 == PUnsignedTimestamp.INSTANCE || type2 == PUnsignedTimestamp.INSTANCE) { + i = 2; + theType = PUnsignedTimestamp.INSTANCE; } - if (firstChildNode instanceof BindParseNode) { - PDatum datum = firstChild; - if (firstChildType == null) { - datum = inferBindDatum(inChildren); + for (; i < children.size(); i++) { + // This logic finds the common type to which all child types are coercible + // without losing precision. + Expression e = children.get(i); + determinism = determinism.combine(e.getDeterminism()); + PDataType type = e.getDataType(); + if (type == null) { + continue; + } else if (type.isCoercibleTo(PLong.INSTANCE)) { + if (theType == null) { + theType = PLong.INSTANCE; } - context.getBindManager().addParamMetaData((BindParseNode)firstChildNode, datum); - } - for (int i = 1; i < l.size(); i++) { - ParseNode childNode = node.getChildren().get(i); - if (childNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)childNode, firstChild); + } else if (type == PDecimal.INSTANCE) { + // Coerce return type to DECIMAL from LONG or DOUBLE if DECIMAL child found, + // unless we're doing date arithmetic. + if (theType == null || !theType.isCoercibleTo(PDate.INSTANCE)) { + theType = PDecimal.INSTANCE; } - } - return wrapGroupByExpression(InListExpression.create(inChildren, node.isNegate(), ptr, context.getCurrentTable().getTable().rowKeyOrderOptimizable())); - } - - private static final PDatum DECIMAL_DATUM = new PDatum() { - @Override - public boolean isNullable() { - return true; - } - @Override - public PDataType getDataType() { - return PDecimal.INSTANCE; - } - @Override - public Integer getMaxLength() { - return null; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }; - - private static PDatum inferBindDatum(List children) { - boolean isChildTypeUnknown = false; - PDatum datum = children.get(1); - for (int i = 2; i < children.size(); i++) { - Expression child = children.get(i); - PDataType childType = child.getDataType(); - if (childType == null) { - isChildTypeUnknown = true; - } else if (datum.getDataType() == null) { - datum = child; - isChildTypeUnknown = true; - } else if (datum.getDataType() == childType || childType.isCoercibleTo(datum.getDataType())) { - continue; - } else if (datum.getDataType().isCoercibleTo(childType)) { - datum = child; + } else if (type.isCoercibleTo(PDouble.INSTANCE)) { + // Coerce return type to DOUBLE from LONG if DOUBLE child found, + // unless we're doing date arithmetic or we've found another child of type DECIMAL + if ( + theType == null + || (theType != PDecimal.INSTANCE && !theType.isCoercibleTo(PDate.INSTANCE)) + ) { + theType = PDouble.INSTANCE; } + } else { + throw TypeMismatchException.newException(type, node.toString()); + } } - // If we found an "unknown" child type and the return type is a number - // make the return type be the most general number type of DECIMAL. - // TODO: same for TIMESTAMP for DATE/TIME? - if (isChildTypeUnknown && datum.getDataType() != null && datum.getDataType().isCoercibleTo( - PDecimal.INSTANCE)) { - return DECIMAL_DATUM; + if (theType == PDecimal.INSTANCE) { + return new DecimalSubtractExpression(children); + } else if (theType == PLong.INSTANCE) { + return new LongSubtractExpression(children); + } else if (theType == PDouble.INSTANCE) { + return new DoubleSubtractExpression(children); + } else if (theType == null) { + return LiteralExpression.newConstant(null, theType, determinism); + } else if (theType == PTimestamp.INSTANCE || theType == PUnsignedTimestamp.INSTANCE) { + return new TimestampSubtractExpression(children); + } else if (theType.isCoercibleTo(PDate.INSTANCE)) { + return new DateSubtractExpression(children); + } else { + throw TypeMismatchException.newException(theType, node.toString()); } - return datum; - } + } + }); + } - @Override - public boolean visitEnter(IsNullParseNode node) throws SQLException { - return true; - } + @Override + public boolean visitEnter(MultiplyParseNode node) throws SQLException { + return true; + } - @Override - public Expression visitLeave(IsNullParseNode node, List children) throws SQLException { - ParseNode childNode = node.getChildren().get(0); - Expression child = children.get(0); - if (childNode instanceof BindParseNode) { // TODO: valid/possibe? - context.getBindManager().addParamMetaData((BindParseNode)childNode, child); + @Override + public Expression visitLeave(MultiplyParseNode node, List children) + throws SQLException { + return visitLeave(node, children, null, new ArithmeticExpressionFactory() { + @Override + public Expression create(ArithmeticParseNode node, List children) + throws SQLException { + PDataType theType = null; + Determinism determinism = Determinism.ALWAYS; + ExpressionDeterminism expressionDeterminism = + new ExpressionDeterminism(node, children, theType, determinism).invoke(); + theType = expressionDeterminism.getDataType(); + determinism = expressionDeterminism.getDeterminism(); + if (theType == PDecimal.INSTANCE) { + return new DecimalMultiplyExpression(children); + } else if (theType == PLong.INSTANCE) { + return new LongMultiplyExpression(children); + } else if (theType == PDouble.INSTANCE) { + return new DoubleMultiplyExpression(children); + } else { + return LiteralExpression.newConstant(null, theType, determinism); } - return wrapGroupByExpression(IsNullExpression.create(child, node.isNegate(), context.getTempPtr())); - } + } + }); + } - private static interface ArithmeticExpressionFactory { - Expression create(ArithmeticParseNode node, List children) throws SQLException; - } + @Override + public boolean visitEnter(DivideParseNode node) throws SQLException { + return true; + } - private static interface ArithmeticExpressionBinder { - PDatum getBindMetaData(int i, List children, Expression expression); + @Override + public Expression visitLeave(DivideParseNode node, List children) + throws SQLException { + for (int i = 1; i < children.size(); i++) { // Compile time check for divide by zero and null + Expression child = children.get(i); + if (child.getDataType() != null && child instanceof LiteralExpression) { + LiteralExpression literal = (LiteralExpression) child; + if (literal.getDataType() == PDecimal.INSTANCE) { + if (PDecimal.INSTANCE.compareTo(literal.getValue(), BigDecimal.ZERO) == 0) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.DIVIDE_BY_ZERO).build() + .buildException(); + } + } else { + if (literal.getDataType().compareTo(literal.getValue(), 0L, PLong.INSTANCE) == 0) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.DIVIDE_BY_ZERO).build() + .buildException(); + } + } + } } - - private Expression visitLeave(ArithmeticParseNode node, List children, ArithmeticExpressionBinder binder, ArithmeticExpressionFactory factory) - throws SQLException { - - boolean isNull = false; - for (Expression child : children) { - boolean isChildLiteral = (child instanceof LiteralExpression); - isNull |= isChildLiteral && ((LiteralExpression)child).getValue() == null; + return visitLeave(node, children, null, new ArithmeticExpressionFactory() { + @Override + public Expression create(ArithmeticParseNode node, List children) + throws SQLException { + PDataType theType = null; + Determinism determinism = Determinism.ALWAYS; + ExpressionDeterminism expressionDeterminism = + new ExpressionDeterminism(node, children, theType, determinism).invoke(); + theType = expressionDeterminism.getDataType(); + determinism = expressionDeterminism.getDeterminism(); + if (theType == PDecimal.INSTANCE) { + return new DecimalDivideExpression(children); + } else if (theType == PLong.INSTANCE) { + return new LongDivideExpression(children); + } else if (theType == PDouble.INSTANCE) { + return new DoubleDivideExpression(children); + } else { + return LiteralExpression.newConstant(null, theType, determinism); } + } + }); + } - Expression expression = factory.create(node, children); + @Override + public boolean visitEnter(ModulusParseNode node) throws SQLException { + return true; + } - for (int i = 0; i < node.getChildren().size(); i++) { - ParseNode childNode = node.getChildren().get(i); - if (childNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)childNode, binder == null ? expression : binder.getBindMetaData(i, children, expression)); - } + @Override + public Expression visitLeave(ModulusParseNode node, List children) + throws SQLException { + return visitLeave(node, children, null, new ArithmeticExpressionFactory() { + @Override + public Expression create(ArithmeticParseNode node, List children) + throws SQLException { + // ensure integer types + for (Expression child : children) { + PDataType type = child.getDataType(); + if (type != null && !type.isCoercibleTo(PLong.INSTANCE)) { + throw TypeMismatchException.newException(type, node.toString()); + } } - ImmutableBytesWritable ptr = context.getTempPtr(); + return new ModulusExpression(children); + } + }); + } - // If all children are literals, just evaluate now - if (ExpressionUtil.isConstant(expression)) { - return ExpressionUtil.getConstantExpression(expression, ptr); - } - else if (isNull) { - return LiteralExpression.newConstant(null, expression.getDataType(), expression.getDeterminism()); - } - // Otherwise create and return the expression - return wrapGroupByExpression(expression); - } - - @Override - public boolean visitEnter(AddParseNode node) throws SQLException { - return true; - } + @Override + public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { + return true; + } - @Override - public Expression visitLeave(AddParseNode node, List children) throws SQLException { - return visitLeave(node, children, - new ArithmeticExpressionBinder() { - @Override - public PDatum getBindMetaData(int i, List children, final Expression expression) { - PDataType type = expression.getDataType(); - if (type != null && type.isCoercibleTo(PDate.INSTANCE)) { - return getPDatumByExpression(expression, PDecimal.INSTANCE); - } - return expression; - } - }, - new ArithmeticExpressionFactory() { - @Override - public Expression create(ArithmeticParseNode node, List children) throws SQLException { - boolean foundDate = false; - Determinism determinism = Determinism.ALWAYS; - PDataType theType = null; - for(int i = 0; i < children.size(); i++) { - Expression e = children.get(i); - determinism = determinism.combine(e.getDeterminism()); - PDataType type = e.getDataType(); - if (type == null) { - continue; - } else if (type.isCoercibleTo(PTimestamp.INSTANCE)) { - if (foundDate) { - throw TypeMismatchException.newException(type, node.toString()); - } - if (theType == null || (theType != PTimestamp.INSTANCE && theType != PUnsignedTimestamp.INSTANCE)) { - theType = type; - } - foundDate = true; - }else if (type == PDecimal.INSTANCE) { - if (theType == null || !theType.isCoercibleTo(PTimestamp.INSTANCE)) { - theType = PDecimal.INSTANCE; - } - } else if (type.isCoercibleTo(PLong.INSTANCE)) { - if (theType == null) { - theType = PLong.INSTANCE; - } - } else if (type.isCoercibleTo(PDouble.INSTANCE)) { - if (theType == null) { - theType = PDouble.INSTANCE; - } - } else { - throw TypeMismatchException.newException(type, node.toString()); - } - } - if (theType == PDecimal.INSTANCE) { - return new DecimalAddExpression(children); - } else if (theType == PLong.INSTANCE) { - return new LongAddExpression(children); - } else if (theType == PDouble.INSTANCE) { - return new DoubleAddExpression(children); - } else if (theType == null) { - return LiteralExpression.newConstant(null, theType, determinism); - } else if (theType == PTimestamp.INSTANCE || theType == PUnsignedTimestamp.INSTANCE) { - return new TimestampAddExpression(children); - } else if (theType.isCoercibleTo(PDate.INSTANCE)) { - return new DateAddExpression(children); - } else { - throw TypeMismatchException.newException(theType, node.toString()); - } - } - }); - } + @Override + public Expression visitLeave(ArrayAnyComparisonNode node, List children) + throws SQLException { + return new ArrayAnyComparisonExpression(children); + } - @Override - public boolean visitEnter(SubtractParseNode node) throws SQLException { - return true; - } + @Override + public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { + return true; + } - @Override - public Expression visitLeave(SubtractParseNode node, List children) throws SQLException { - return visitLeave(node, children, new ArithmeticExpressionBinder() { - @Override - public PDatum getBindMetaData(int i, List children, - final Expression expression) { - final PDataType type; - // If we're binding the first parameter and the second parameter - // is a date - // we know that the first parameter must be a date type too. - if (i == 0 && (type = children.get(1).getDataType()) != null - && type.isCoercibleTo(PDate.INSTANCE)) { - return getPDatumByExpression(expression, type); - } else if (expression.getDataType() != null - && expression.getDataType().isCoercibleTo(PDate.INSTANCE)) { - return getPDatumByExpression(expression, PDecimal.INSTANCE); - } - // Otherwise just go with what was calculated for the expression - return expression; - } - }, new ArithmeticExpressionFactory() { - @Override - public Expression create(ArithmeticParseNode node, - List children) throws SQLException { - int i = 0; - PDataType theType = null; - Expression e1 = children.get(0); - Expression e2 = children.get(1); - Determinism determinism = e1.getDeterminism().combine(e2.getDeterminism()); - PDataType type1 = e1.getDataType(); - PDataType type2 = e2.getDataType(); - // TODO: simplify this special case for DATE conversion - /** - * For date1-date2, we want to coerce to a LONG because this - * cannot be compared against another date. It has essentially - * become a number. For date1-5, we want to preserve the DATE - * type because this can still be compared against another date - * and cannot be multiplied or divided. Any other time occurs is - * an error. For example, 5-date1 is an error. The nulls occur if - * we have bind variables. - */ - boolean isType1Date = - type1 != null - && type1 != PTimestamp.INSTANCE - && type1 != PUnsignedTimestamp.INSTANCE - && type1.isCoercibleTo(PDate.INSTANCE); - boolean isType2Date = - type2 != null - && type2 != PTimestamp.INSTANCE - && type2 != PUnsignedTimestamp.INSTANCE - && type2.isCoercibleTo(PDate.INSTANCE); - if (isType1Date || isType2Date) { - if (isType1Date && isType2Date) { - i = 2; - theType = PDecimal.INSTANCE; - } else if (isType1Date && type2 != null - && type2.isCoercibleTo(PDecimal.INSTANCE)) { - i = 2; - theType = PDate.INSTANCE; - } else if (type1 == null || type2 == null) { - /* - * FIXME: Could be either a Date or BigDecimal, but we - * don't know if we're comparing to a date or a number - * which would be disambiguate it. - */ - i = 2; - theType = null; - } - } else if(type1 == PTimestamp.INSTANCE || type2 == PTimestamp.INSTANCE) { - i = 2; - theType = PTimestamp.INSTANCE; - } else if(type1 == PUnsignedTimestamp.INSTANCE || type2 == PUnsignedTimestamp.INSTANCE) { - i = 2; - theType = PUnsignedTimestamp.INSTANCE; - } - - for (; i < children.size(); i++) { - // This logic finds the common type to which all child types are coercible - // without losing precision. - Expression e = children.get(i); - determinism = determinism.combine(e.getDeterminism()); - PDataType type = e.getDataType(); - if (type == null) { - continue; - } else if (type.isCoercibleTo(PLong.INSTANCE)) { - if (theType == null) { - theType = PLong.INSTANCE; - } - } else if (type == PDecimal.INSTANCE) { - // Coerce return type to DECIMAL from LONG or DOUBLE if DECIMAL child found, - // unless we're doing date arithmetic. - if (theType == null - || !theType.isCoercibleTo(PDate.INSTANCE)) { - theType = PDecimal.INSTANCE; - } - } else if (type.isCoercibleTo(PDouble.INSTANCE)) { - // Coerce return type to DOUBLE from LONG if DOUBLE child found, - // unless we're doing date arithmetic or we've found another child of type DECIMAL - if (theType == null - || (theType != PDecimal.INSTANCE && !theType.isCoercibleTo(PDate.INSTANCE) )) { - theType = PDouble.INSTANCE; - } - } else { - throw TypeMismatchException.newException(type, node.toString()); - } - } - if (theType == PDecimal.INSTANCE) { - return new DecimalSubtractExpression(children); - } else if (theType == PLong.INSTANCE) { - return new LongSubtractExpression(children); - } else if (theType == PDouble.INSTANCE) { - return new DoubleSubtractExpression(children); - } else if (theType == null) { - return LiteralExpression.newConstant(null, theType, determinism); - } else if (theType == PTimestamp.INSTANCE || theType == PUnsignedTimestamp.INSTANCE) { - return new TimestampSubtractExpression(children); - } else if (theType.isCoercibleTo(PDate.INSTANCE)) { - return new DateSubtractExpression(children); - } else { - throw TypeMismatchException.newException(theType, node.toString()); - } - } - }); - } + @Override + public boolean visitEnter(ArrayElemRefNode node) throws SQLException { + return true; + } - @Override - public boolean visitEnter(MultiplyParseNode node) throws SQLException { - return true; - } + @Override + public Expression visitLeave(ArrayElemRefNode node, List l) throws SQLException { + return new ArrayElemRefExpression(l); + } - @Override - public Expression visitLeave(MultiplyParseNode node, List children) throws SQLException { - return visitLeave(node, children, null, new ArithmeticExpressionFactory() { - @Override - public Expression create(ArithmeticParseNode node, List children) throws SQLException { - PDataType theType = null; - Determinism determinism = Determinism.ALWAYS; - ExpressionDeterminism expressionDeterminism = - new ExpressionDeterminism(node, children, theType, determinism).invoke(); - theType = expressionDeterminism.getDataType(); - determinism = expressionDeterminism.getDeterminism(); - if (theType == PDecimal.INSTANCE) { - return new DecimalMultiplyExpression( children); - } else if (theType == PLong.INSTANCE) { - return new LongMultiplyExpression( children); - } else if (theType == PDouble.INSTANCE) { - return new DoubleMultiplyExpression( children); - } else { - return LiteralExpression.newConstant(null, theType, determinism); - } - } - }); - } - - @Override - public boolean visitEnter(DivideParseNode node) throws SQLException { - return true; - } + @Override + public Expression visitLeave(ArrayAllComparisonNode node, List children) + throws SQLException { + return new ArrayAllComparisonExpression(children); + } - @Override - public Expression visitLeave(DivideParseNode node, List children) throws SQLException { - for (int i = 1; i < children.size(); i++) { // Compile time check for divide by zero and null - Expression child = children.get(i); - if (child.getDataType() != null && child instanceof LiteralExpression) { - LiteralExpression literal = (LiteralExpression)child; - if (literal.getDataType() == PDecimal.INSTANCE) { - if (PDecimal.INSTANCE.compareTo(literal.getValue(), BigDecimal.ZERO) == 0) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.DIVIDE_BY_ZERO).build().buildException(); - } - } else { - if (literal.getDataType().compareTo(literal.getValue(), 0L, PLong.INSTANCE) == 0) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.DIVIDE_BY_ZERO).build().buildException(); - } - } - } - } - return visitLeave(node, children, null, new ArithmeticExpressionFactory() { - @Override - public Expression create(ArithmeticParseNode node, List children) throws SQLException { - PDataType theType = null; - Determinism determinism = Determinism.ALWAYS; - ExpressionDeterminism expressionDeterminism = - new ExpressionDeterminism(node, children, theType, determinism).invoke(); - theType = expressionDeterminism.getDataType(); - determinism = expressionDeterminism.getDeterminism(); - if (theType == PDecimal.INSTANCE) { - return new DecimalDivideExpression( children); - } else if (theType == PLong.INSTANCE) { - return new LongDivideExpression( children); - } else if (theType == PDouble.INSTANCE) { - return new DoubleDivideExpression(children); - } else { - return LiteralExpression.newConstant(null, theType, determinism); - } - } - }); - } - - @Override - public boolean visitEnter(ModulusParseNode node) throws SQLException { - return true; - } + public static void throwNonAggExpressionInAggException(String nonAggregateExpression) + throws SQLException { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN) + .setMessage(nonAggregateExpression).build().buildException(); + } - @Override - public Expression visitLeave(ModulusParseNode node, List children) throws SQLException { - return visitLeave(node, children, null, new ArithmeticExpressionFactory() { - @Override - public Expression create(ArithmeticParseNode node, List children) throws SQLException { - // ensure integer types - for(Expression child : children) { - PDataType type = child.getDataType(); - if(type != null && !type.isCoercibleTo(PLong.INSTANCE)) { - throw TypeMismatchException.newException(type, node.toString()); - } - } - - return new ModulusExpression(children); - } - }); + @Override + public Expression visitLeave(StringConcatParseNode node, List children) + throws SQLException { + final StringConcatExpression expression = new StringConcatExpression(children); + for (int i = 0; i < children.size(); i++) { + ParseNode childNode = node.getChildren().get(i); + if (childNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) childNode, expression); + } + PDataType type = children.get(i).getDataType(); + if (type == PVarbinary.INSTANCE) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_NOT_SUPPORTED_FOR_OPERATOR) + .setMessage("Concatenation does not support " + type + " in expression" + node).build() + .buildException(); + } } - - @Override - public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { - return true; + ImmutableBytesWritable ptr = context.getTempPtr(); + if (ExpressionUtil.isConstant(expression)) { + return ExpressionUtil.getConstantExpression(expression, ptr); } + return wrapGroupByExpression(expression); + } - @Override - public Expression visitLeave(ArrayAnyComparisonNode node, List children) throws SQLException { - return new ArrayAnyComparisonExpression(children); - } + @Override + public boolean visitEnter(StringConcatParseNode node) throws SQLException { + return true; + } - @Override - public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { - return true; - } - - @Override - public boolean visitEnter(ArrayElemRefNode node) throws SQLException { - return true; - } - - @Override - public Expression visitLeave(ArrayElemRefNode node, List l) throws SQLException { - return new ArrayElemRefExpression(l); - } - - @Override - public Expression visitLeave(ArrayAllComparisonNode node, List children) throws SQLException { - return new ArrayAllComparisonExpression(children); - } + @Override + public boolean visitEnter(RowValueConstructorParseNode node) throws SQLException { + return true; + } - public static void throwNonAggExpressionInAggException(String nonAggregateExpression) throws SQLException { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN) - .setMessage(nonAggregateExpression).build().buildException(); - } + @Override + public Expression visitLeave(RowValueConstructorParseNode node, List l) + throws SQLException { + // Don't trim trailing nulls here, as we'd potentially be dropping bind + // variables that aren't bound yet. + return wrapGroupByExpression(new RowValueConstructorExpression(l, node.isStateless())); + } - @Override - public Expression visitLeave(StringConcatParseNode node, List children) throws SQLException { - final StringConcatExpression expression=new StringConcatExpression(children); - for (int i = 0; i < children.size(); i++) { - ParseNode childNode=node.getChildren().get(i); - if(childNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)childNode,expression); - } - PDataType type=children.get(i).getDataType(); - if(type == PVarbinary.INSTANCE){ - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_NOT_SUPPORTED_FOR_OPERATOR) - .setMessage("Concatenation does not support "+ type +" in expression" + node).build().buildException(); - } - } - ImmutableBytesWritable ptr = context.getTempPtr(); - if (ExpressionUtil.isConstant(expression)) { - return ExpressionUtil.getConstantExpression(expression, ptr); - } - return wrapGroupByExpression(expression); - } + @Override + public Expression visit(SequenceValueParseNode node) throws SQLException { + // NEXT VALUE FOR is only supported in SELECT expressions and UPSERT VALUES + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_USE_OF_NEXT_VALUE_FOR) + .setSchemaName(node.getTableName().getSchemaName()) + .setTableName(node.getTableName().getTableName()).build().buildException(); + } - @Override - public boolean visitEnter(StringConcatParseNode node) throws SQLException { - return true; + @Override + public Expression visitLeave(ArrayConstructorNode node, List children) + throws SQLException { + boolean isChildTypeUnknown = false; + Expression arrayElemChild = null; + PDataType arrayElemDataType = children.get(0).getDataType(); + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + PDataType childType = child.getDataType(); + if (childType == null) { + isChildTypeUnknown = true; + } else if (arrayElemDataType == null) { + arrayElemDataType = childType; + isChildTypeUnknown = true; + arrayElemChild = child; + } else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) { + continue; + } else if (arrayElemDataType.isCoercibleTo(childType)) { + arrayElemChild = child; + arrayElemDataType = childType; + } else { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) + .setMessage("Case expressions must have common type: " + arrayElemDataType + + " cannot be coerced to " + childType) + .build().buildException(); + } } - - @Override - public boolean visitEnter(RowValueConstructorParseNode node) throws SQLException { - return true; + // If we found an "unknown" child type and the return type is a number + // make the return type be the most general number type of DECIMAL. + if ( + isChildTypeUnknown && arrayElemDataType != null + && arrayElemDataType.isCoercibleTo(PDecimal.INSTANCE) + ) { + arrayElemDataType = PDecimal.INSTANCE; + } + final PDataType theArrayElemDataType = arrayElemDataType; + for (int i = 0; i < node.getChildren().size(); i++) { + ParseNode childNode = node.getChildren().get(i); + if (childNode instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) childNode, + arrayElemDataType == arrayElemChild.getDataType() + ? arrayElemChild + : new DelegateDatum(arrayElemChild) { + @Override + public PDataType getDataType() { + return theArrayElemDataType; + } + }); + } } - - @Override - public Expression visitLeave(RowValueConstructorParseNode node, List l) throws SQLException { - // Don't trim trailing nulls here, as we'd potentially be dropping bind - // variables that aren't bound yet. - return wrapGroupByExpression(new RowValueConstructorExpression(l, node.isStateless())); + ImmutableBytesWritable ptr = context.getTempPtr(); + // the value object array type should match the java known type + Object[] elements = (Object[]) java.lang.reflect.Array + .newInstance(theArrayElemDataType.getJavaClass(), children.size()); + + boolean rowKeyOrderOptimizable = context.getCurrentTable().getTable().rowKeyOrderOptimizable(); + ArrayConstructorExpression arrayExpression = + new ArrayConstructorExpression(children, arrayElemDataType, rowKeyOrderOptimizable); + if (ExpressionUtil.isConstant(arrayExpression)) { + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + child.evaluate(null, ptr); + Object value = null; + if (child.getDataType() == null) { + value = arrayElemDataType.toObject(ptr, theArrayElemDataType, child.getSortOrder()); + } else { + value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder()); + } + elements[i] = LiteralExpression + .newConstant(value, theArrayElemDataType, child.getDeterminism()).getValue(); + } + Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements); + return LiteralExpression.newConstant(value, + PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, + null, arrayExpression.getSortOrder(), Determinism.ALWAYS, rowKeyOrderOptimizable); } - @Override - public Expression visit(SequenceValueParseNode node) - throws SQLException { - // NEXT VALUE FOR is only supported in SELECT expressions and UPSERT VALUES - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_USE_OF_NEXT_VALUE_FOR) - .setSchemaName(node.getTableName().getSchemaName()) - .setTableName(node.getTableName().getTableName()).build().buildException(); - } + return wrapGroupByExpression(arrayExpression); + } - @Override - public Expression visitLeave(ArrayConstructorNode node, List children) throws SQLException { - boolean isChildTypeUnknown = false; - Expression arrayElemChild = null; - PDataType arrayElemDataType = children.get(0).getDataType(); - for (int i = 0; i < children.size(); i++) { - Expression child = children.get(i); - PDataType childType = child.getDataType(); - if (childType == null) { - isChildTypeUnknown = true; - } else if (arrayElemDataType == null) { - arrayElemDataType = childType; - isChildTypeUnknown = true; - arrayElemChild = child; - } else if (arrayElemDataType == childType || childType.isCoercibleTo(arrayElemDataType)) { - continue; - } else if (arrayElemDataType.isCoercibleTo(childType)) { - arrayElemChild = child; - arrayElemDataType = childType; - } else { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) - .setMessage( - "Case expressions must have common type: " + arrayElemDataType - + " cannot be coerced to " + childType).build().buildException(); - } - } - // If we found an "unknown" child type and the return type is a number - // make the return type be the most general number type of DECIMAL. - if (isChildTypeUnknown && arrayElemDataType != null && arrayElemDataType.isCoercibleTo( - PDecimal.INSTANCE)) { - arrayElemDataType = PDecimal.INSTANCE; - } - final PDataType theArrayElemDataType = arrayElemDataType; - for (int i = 0; i < node.getChildren().size(); i++) { - ParseNode childNode = node.getChildren().get(i); - if (childNode instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)childNode, - arrayElemDataType == arrayElemChild.getDataType() ? arrayElemChild : - new DelegateDatum(arrayElemChild) { - @Override - public PDataType getDataType() { - return theArrayElemDataType; - } - }); - } - } - ImmutableBytesWritable ptr = context.getTempPtr(); - // the value object array type should match the java known type - Object[] elements = (Object[]) java.lang.reflect.Array.newInstance(theArrayElemDataType.getJavaClass(), children.size()); - - boolean rowKeyOrderOptimizable = context.getCurrentTable().getTable().rowKeyOrderOptimizable(); - ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, arrayElemDataType, rowKeyOrderOptimizable); - if (ExpressionUtil.isConstant(arrayExpression)) { - for (int i = 0; i < children.size(); i++) { - Expression child = children.get(i); - child.evaluate(null, ptr); - Object value = null; - if (child.getDataType() == null) { - value = arrayElemDataType.toObject(ptr, theArrayElemDataType, child.getSortOrder()); - } else { - value = arrayElemDataType.toObject(ptr, child.getDataType(), child.getSortOrder()); - } - elements[i] = LiteralExpression.newConstant(value, theArrayElemDataType, child.getDeterminism()).getValue(); - } - Object value = PArrayDataType.instantiatePhoenixArray(arrayElemDataType, elements); - return LiteralExpression.newConstant(value, - PDataType.fromTypeId(arrayElemDataType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, arrayExpression.getSortOrder(), Determinism.ALWAYS, rowKeyOrderOptimizable); - } - - return wrapGroupByExpression(arrayExpression); - } + @Override + public boolean visitEnter(ArrayConstructorNode node) throws SQLException { + return true; + } - @Override - public boolean visitEnter(ArrayConstructorNode node) throws SQLException { - return true; - } + @Override + public boolean visitEnter(ExistsParseNode node) throws SQLException { + return true; + } - @Override - public boolean visitEnter(ExistsParseNode node) throws SQLException { - return true; - } + @Override + public Expression visitLeave(ExistsParseNode node, List l) throws SQLException { + LiteralExpression child = (LiteralExpression) l.get(0); + boolean elementExists = child != null && child.getValue() != null + && ((PhoenixArray) child.getValue()).getDimensions() > 0; + return LiteralExpression.newConstant(elementExists ^ node.isNegate(), PBoolean.INSTANCE); + } - @Override - public Expression visitLeave(ExistsParseNode node, List l) throws SQLException { - LiteralExpression child = (LiteralExpression) l.get(0); - boolean elementExists = child != null - && child.getValue() != null - && ((PhoenixArray)child.getValue()).getDimensions() > 0; - return LiteralExpression.newConstant(elementExists ^ node.isNegate(), PBoolean.INSTANCE); - } + @Override + public Expression visit(SubqueryParseNode node) throws SQLException { + Object result = context.getSubqueryResult(node.getSelectNode()); + return LiteralExpression.newConstant(result); + } - @Override - public Expression visit(SubqueryParseNode node) throws SQLException { - Object result = context.getSubqueryResult(node.getSelectNode()); - return LiteralExpression.newConstant(result); - } - - public int getTotalNodeCount() { - return totalNodeCount; - } + public int getTotalNodeCount() { + return totalNodeCount; + } private PDatum getPDatumByExpression(Expression expression, PDataType pDataTypeInput) { return new PDatumImpl(expression, pDataTypeInput); @@ -1331,8 +1395,8 @@ private static class ExpressionDeterminism { private PDataType theType; private Determinism determinism; - ExpressionDeterminism(ArithmeticParseNode node, List children, - PDataType theType, Determinism determinism) { + ExpressionDeterminism(ArithmeticParseNode node, List children, PDataType theType, + Determinism determinism) { this.node = node; this.children = children; this.theType = theType; diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionManager.java index 3785846824e..84e6a7552b4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,49 +20,42 @@ import java.util.Iterator; import java.util.Map; - -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.expression.Expression; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; /** - * - * Class to manage list of expressions inside of a select statement by - * deduping them. - * - * + * Class to manage list of expressions inside of a select statement by deduping them. * @since 0.1 */ public class ExpressionManager { - // Use a Map instead of a Set because we need to get and return - // the existing Expression - private final Map expressionMap; - - public ExpressionManager() { - expressionMap = Maps.newHashMap(); - } - - /** - * Add the expression to the set of known expressions for the select - * clause. If the expression is already in the set, then the new one - * passed in is ignored. - * @param expression the new expression to add - * @return the new expression if not already present in the set and - * the existing one otherwise. - */ - public Expression addIfAbsent(Expression expression) { - Expression existingExpression = expressionMap.get(expression); - if (existingExpression == null) { - expressionMap.put(expression, expression); - return expression; - } - return existingExpression; - } - - public int getExpressionCount() { - return expressionMap.size(); - } - - public Iterator getExpressions() { - return expressionMap.keySet().iterator(); + // Use a Map instead of a Set because we need to get and return + // the existing Expression + private final Map expressionMap; + + public ExpressionManager() { + expressionMap = Maps.newHashMap(); + } + + /** + * Add the expression to the set of known expressions for the select clause. If the expression is + * already in the set, then the new one passed in is ignored. + * @param expression the new expression to add + * @return the new expression if not already present in the set and the existing one otherwise. + */ + public Expression addIfAbsent(Expression expression) { + Expression existingExpression = expressionMap.get(expression); + if (existingExpression == null) { + expressionMap.put(expression, expression); + return expression; } + return existingExpression; + } + + public int getExpressionCount() { + return expressionMap.size(); + } + + public Iterator getExpressions() { + return expressionMap.keySet().iterator(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionProjector.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionProjector.java index bc8902df254..67f9c87a966 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionProjector.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ExpressionProjector.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,103 +17,101 @@ */ package org.apache.phoenix.compile; - import java.sql.SQLException; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; - +import org.apache.phoenix.schema.types.PDataType; /** - * * Projector for getting value from a select statement for an expression - * - * * @since 0.1 */ public class ExpressionProjector implements ColumnProjector { - private final String name; - private final Expression expression; - private final String tableName; - private final boolean isCaseSensitive; - private final String label; + private final String name; + private final Expression expression; + private final String tableName; + private final boolean isCaseSensitive; + private final String label; - public ExpressionProjector(String name, String label, String tableName, Expression expression, boolean isCaseSensitive) { - this.name = name; - this.label = label; - this.expression = expression; - this.tableName = tableName; - this.isCaseSensitive = isCaseSensitive; - } + public ExpressionProjector(String name, String label, String tableName, Expression expression, + boolean isCaseSensitive) { + this.name = name; + this.label = label; + this.expression = expression; + this.tableName = tableName; + this.isCaseSensitive = isCaseSensitive; + } - @Override - public String getTableName() { - return tableName; - } + @Override + public String getTableName() { + return tableName; + } - @Override - public Expression getExpression() { - return expression; - } + @Override + public Expression getExpression() { + return expression; + } - @Override - public String getName() { - return name; - } + @Override + public String getName() { + return name; + } - @Override - public String getLabel() { - return label; - } + @Override + public String getLabel() { + return label; + } - @Override - public final Object getValue(Tuple tuple, PDataType type, ImmutableBytesWritable ptr) throws SQLException { - try { - Expression expression = getExpression(); - if (!expression.evaluate(tuple, ptr)) { - return null; - } - if (ptr.getLength() == 0) { - return null; - } - return type.toObject(ptr, expression.getDataType(), expression.getSortOrder(), expression.getMaxLength(), expression.getScale()); - } catch (RuntimeException e) { - // FIXME: Expression.evaluate does not throw SQLException - // so this will unwrap throws from that. - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw e; - } + @Override + public final Object getValue(Tuple tuple, PDataType type, ImmutableBytesWritable ptr) + throws SQLException { + try { + Expression expression = getExpression(); + if (!expression.evaluate(tuple, ptr)) { + return null; + } + if (ptr.getLength() == 0) { + return null; + } + return type.toObject(ptr, expression.getDataType(), expression.getSortOrder(), + expression.getMaxLength(), expression.getScale()); + } catch (RuntimeException e) { + // FIXME: Expression.evaluate does not throw SQLException + // so this will unwrap throws from that. + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw e; } + } - @Override - public final Object getValue(Tuple tuple, PDataType type, ImmutableBytesWritable ptr, - Class jdbcType) throws SQLException { - try { - Expression expression = getExpression(); - if (!expression.evaluate(tuple, ptr)) { - return null; - } - if (ptr.getLength() == 0) { - return null; - } - return type.toObject(ptr, expression.getDataType(), expression.getSortOrder(), - expression.getMaxLength(), expression.getScale(), jdbcType); - } catch (RuntimeException e) { - // FIXME: Expression.evaluate does not throw SQLException - // so this will unwrap throws from that. - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw e; - } + @Override + public final Object getValue(Tuple tuple, PDataType type, ImmutableBytesWritable ptr, + Class jdbcType) throws SQLException { + try { + Expression expression = getExpression(); + if (!expression.evaluate(tuple, ptr)) { + return null; + } + if (ptr.getLength() == 0) { + return null; + } + return type.toObject(ptr, expression.getDataType(), expression.getSortOrder(), + expression.getMaxLength(), expression.getScale(), jdbcType); + } catch (RuntimeException e) { + // FIXME: Expression.evaluate does not throw SQLException + // so this will unwrap throws from that. + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw e; } + } - @Override - public boolean isCaseSensitive() { - return isCaseSensitive; - } + @Override + public boolean isCaseSensitive() { + return isCaseSensitive; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/FromCompiler.java index 9d3a6b8952f..706aa2eb815 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/FromCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/FromCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.compile; +import static org.apache.phoenix.monitoring.MetricType.NUM_METADATA_LOOKUP_FAILURES; + import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.util.ArrayList; @@ -34,6 +36,7 @@ import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MutationCode; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.monitoring.TableMetricsManager; import org.apache.phoenix.parse.AliasedNode; import org.apache.phoenix.parse.BindTableNode; import org.apache.phoenix.parse.ColumnDef; @@ -86,1135 +89,1145 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.Closeables; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.LogUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.TransactionUtil; -import org.apache.phoenix.monitoring.TableMetricsManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - -import static org.apache.phoenix.monitoring.MetricType.NUM_METADATA_LOOKUP_FAILURES; - /** * Validates FROM clause and builds a ColumnResolver for resolving column references - * - * * @since 0.1 */ public class FromCompiler { - private static final Logger LOGGER = LoggerFactory.getLogger(FromCompiler.class); - - public static final ColumnResolver EMPTY_TABLE_RESOLVER = new ColumnResolver() { + private static final Logger LOGGER = LoggerFactory.getLogger(FromCompiler.class); - @Override - public List getTables() { - return Collections.singletonList(TableRef.EMPTY_TABLE_REF); - } + public static final ColumnResolver EMPTY_TABLE_RESOLVER = new ColumnResolver() { - @Override - public List getFunctions() { - return Collections.emptyList(); - } + @Override + public List getTables() { + return Collections.singletonList(TableRef.EMPTY_TABLE_REF); + } - @Override - public TableRef resolveTable(String schemaName, String tableName) - throws SQLException { - throw new TableNotFoundException(schemaName, tableName); - } + @Override + public List getFunctions() { + return Collections.emptyList(); + } - @Override - public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException { - throw new ColumnNotFoundException(schemaName, tableName, null, colName); - } - - @Override - public PFunction resolveFunction(String functionName) throws SQLException { - throw new FunctionNotFoundException(functionName); - } + @Override + public TableRef resolveTable(String schemaName, String tableName) throws SQLException { + throw new TableNotFoundException(schemaName, tableName); + } - @Override - public boolean hasUDFs() { - return false; - } + @Override + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException { + throw new ColumnNotFoundException(schemaName, tableName, null, colName); + } - @Override - public PSchema resolveSchema(String schemaName) throws SQLException { - throw new SchemaNotFoundException(schemaName); - } + @Override + public PFunction resolveFunction(String functionName) throws SQLException { + throw new FunctionNotFoundException(functionName); + } - @Override - public List getSchemas() { - return Collections.emptyList(); - } + @Override + public boolean hasUDFs() { + return false; + } - }; - - public static ColumnResolver getResolverForCreation(final CreateTableStatement statement, final PhoenixConnection connection) - throws SQLException { - - TableName baseTable = statement.getBaseTableName(); - String schemaName; - if (SchemaUtil.isSchemaCheckRequired(statement.getTableType(), - connection.getQueryServices().getProps())) { - // To ensure schema set through properties or connection - // string exists before creating table - schemaName = statement.getTableName().getSchemaName() != null - ? statement.getTableName().getSchemaName() : connection.getSchema(); - if (schemaName != null) { - // Only create SchemaResolver object to check if constructor throws exception. - // No exception means schema exists - new SchemaResolver(connection, schemaName, true); - } - } - if (baseTable == null) { - return EMPTY_TABLE_RESOLVER; - } - NamedTableNode tableNode = NamedTableNode.create(null, baseTable, Collections.emptyList()); - // Always use non-tenant-specific connection here - try { - SingleTableColumnResolver visitor - = new SingleTableColumnResolver(connection, tableNode, true, true); - return visitor; - } catch (TableNotFoundException e) { - // Used for mapped VIEW, since we won't be able to resolve that. - // Instead, we create a table with just the dynamic columns. - // A tenant-specific connection may not create a mapped VIEW. - if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) { - ConnectionQueryServices services = connection.getQueryServices(); - boolean isNamespaceMapped = SchemaUtil.isNamespaceMappingEnabled(statement.getTableType(), connection.getQueryServices().getProps()); - byte[] fullTableName = SchemaUtil.getPhysicalHBaseTableName( - baseTable.getSchemaName(), baseTable.getTableName(), isNamespaceMapped).getBytes(); - Table htable = null; - try { - htable = services.getTable(fullTableName); - } catch (UnsupportedOperationException ignore) { - throw e; // For Connectionless - } finally { - if (htable != null) Closeables.closeQuietly(htable); - } - tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs()); - return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp(), new HashMap(1), isNamespaceMapped); - } - throw e; - } + @Override + public PSchema resolveSchema(String schemaName) throws SQLException { + throw new SchemaNotFoundException(schemaName); } - public static ColumnResolver getResolverForQuery(SelectStatement statement, PhoenixConnection connection) - throws SQLException{ - return getResolverForQuery(statement, connection, false, null); + @Override + public List getSchemas() { + return Collections.emptyList(); } - /** - * Iterate through the nodes in the FROM clause to build a column resolver used to lookup a column given the name - * and alias. - * - * @param statement - * the select statement - * @return the column resolver - * @throws SQLException - * @throws SQLFeatureNotSupportedException - * if unsupported constructs appear in the FROM clause - * @throws TableNotFoundException - * if table name not found in schema + }; + + public static ColumnResolver getResolverForCreation(final CreateTableStatement statement, + final PhoenixConnection connection) throws SQLException { + + TableName baseTable = statement.getBaseTableName(); + String schemaName; + if ( + SchemaUtil.isSchemaCheckRequired(statement.getTableType(), + connection.getQueryServices().getProps()) + ) { + // To ensure schema set through properties or connection + // string exists before creating table + schemaName = statement.getTableName().getSchemaName() != null + ? statement.getTableName().getSchemaName() + : connection.getSchema(); + if (schemaName != null) { + // Only create SchemaResolver object to check if constructor throws exception. + // No exception means schema exists + new SchemaResolver(connection, schemaName, true); + } + } + if (baseTable == null) { + return EMPTY_TABLE_RESOLVER; + } + NamedTableNode tableNode = + NamedTableNode.create(null, baseTable, Collections. emptyList()); + // Always use non-tenant-specific connection here + try { + SingleTableColumnResolver visitor = + new SingleTableColumnResolver(connection, tableNode, true, true); + return visitor; + } catch (TableNotFoundException e) { + // Used for mapped VIEW, since we won't be able to resolve that. + // Instead, we create a table with just the dynamic columns. + // A tenant-specific connection may not create a mapped VIEW. + if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) { + ConnectionQueryServices services = connection.getQueryServices(); + boolean isNamespaceMapped = SchemaUtil.isNamespaceMappingEnabled(statement.getTableType(), + connection.getQueryServices().getProps()); + byte[] fullTableName = SchemaUtil.getPhysicalHBaseTableName(baseTable.getSchemaName(), + baseTable.getTableName(), isNamespaceMapped).getBytes(); + Table htable = null; + try { + htable = services.getTable(fullTableName); + } catch (UnsupportedOperationException ignore) { + throw e; // For Connectionless + } finally { + if (htable != null) Closeables.closeQuietly(htable); + } + tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs()); + return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp(), + new HashMap(1), isNamespaceMapped); + } + throw e; + } + } + + public static ColumnResolver getResolverForQuery(SelectStatement statement, + PhoenixConnection connection) throws SQLException { + return getResolverForQuery(statement, connection, false, null); + } + + /** + * Iterate through the nodes in the FROM clause to build a column resolver used to lookup a column + * given the name and alias. the select statement + * @return the column resolver if unsupported constructs appear in the FROM clause if table name + * not found in schema + */ + public static ColumnResolver getResolverForQuery(SelectStatement statement, + PhoenixConnection connection, boolean alwaysHitServer, TableName mutatingTableName) + throws SQLException { + TableNode fromNode = statement.getFrom(); + if (fromNode == null) + return new ColumnResolverWithUDF(connection, 1, true, statement.getUdfParseNodes()); + if (fromNode instanceof NamedTableNode) + return new SingleTableColumnResolver(connection, (NamedTableNode) fromNode, true, 1, + statement.getUdfParseNodes(), alwaysHitServer, mutatingTableName); + + MultiTableColumnResolver visitor = + new MultiTableColumnResolver(connection, 1, statement.getUdfParseNodes(), mutatingTableName); + fromNode.accept(visitor); + return visitor; + } + + /** + * Refresh the inner state of {@link MultiTableColumnResolver} for the derivedTableNode when the + * derivedTableNode is changed for some sql optimization. + */ + public static TableRef refreshDerivedTableNode(ColumnResolver columnResolver, + DerivedTableNode derivedTableNode) throws SQLException { + if (!(columnResolver instanceof MultiTableColumnResolver)) { + throw new UnsupportedOperationException(); + } + return ((MultiTableColumnResolver) columnResolver).refreshDerivedTableNode(derivedTableNode); + } + + public static ColumnResolver getResolverForSchema(UseSchemaStatement statement, + PhoenixConnection connection) throws SQLException { + return new SchemaResolver(connection, SchemaUtil.normalizeIdentifier(statement.getSchemaName()), + true); + } + + public static ColumnResolver getResolverForSchema(String schema, PhoenixConnection connection) + throws SQLException { + return new SchemaResolver(connection, schema, true); + } + + public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection) + throws SQLException { + SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true); + return visitor; + } + + public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection, + boolean updateCacheImmediately) throws SQLException { + SingleTableColumnResolver visitor = + new SingleTableColumnResolver(connection, tableNode, updateCacheImmediately); + return visitor; + } + + public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection, + Map udfParseNodes) throws SQLException { + SingleTableColumnResolver visitor = + new SingleTableColumnResolver(connection, tableNode, true, 0, udfParseNodes); + return visitor; + } + + public static ColumnResolver getResolver(SingleTableStatement statement, + PhoenixConnection connection) throws SQLException { + SingleTableColumnResolver visitor = + new SingleTableColumnResolver(connection, statement.getTable(), true, true); + return visitor; + } + + public static ColumnResolver getIndexResolver(SingleTableStatement statement, + PhoenixConnection connection) throws SQLException { + try { + return getResolver(statement, connection); + } catch (TableNotFoundException e) { + throw new IndexNotFoundException(e.getSchemaName(), e.getTableName(), e.getTimeStamp()); + } + } + + public static ColumnResolver getResolverForCreateIndex(SingleTableStatement statement, + PhoenixConnection connection, Map udfParseNodes) throws SQLException { + // use alwaysHitServer=true to ensure client's cache is up-to-date even when client is + // validating last_ddl_timestamps and UCF = never. + SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, + statement.getTable(), true, 0, udfParseNodes, true, null); + return visitor; + } + + public static ColumnResolver getResolverForCompiledDerivedTable(PhoenixConnection connection, + TableRef tableRef, RowProjector projector) throws SQLException { + List projectedColumns = new ArrayList(); + PTable table = tableRef.getTable(); + for (PColumn column : table.getColumns()) { + Expression sourceExpression = + projector.getColumnProjector(column.getPosition()).getExpression(); + PColumnImpl projectedColumn = new PColumnImpl(column.getName(), column.getFamilyName(), + sourceExpression.getDataType(), sourceExpression.getMaxLength(), + sourceExpression.getScale(), sourceExpression.isNullable(), column.getPosition(), + sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), + column.isViewReferenced(), column.getExpressionStr(), column.isRowTimestamp(), + column.isDynamic(), column.getColumnQualifierBytes(), column.getTimestamp()); + projectedColumns.add(projectedColumn); + } + PTable t = PTableImpl.builderWithColumns(table, projectedColumns).build(); + return new SingleTableColumnResolver(connection, new TableRef(tableRef.getTableAlias(), t, + tableRef.getLowerBoundTimeStamp(), tableRef.hasDynamicCols())); + } + + public static ColumnResolver getResolver(TableRef tableRef) throws SQLException { + SingleTableColumnResolver visitor = new SingleTableColumnResolver(tableRef); + return visitor; + } + + public static ColumnResolver getResolver(PhoenixConnection connection, TableRef tableRef, + Map udfParseNodes) throws SQLException { + SingleTableColumnResolver visitor = + new SingleTableColumnResolver(connection, tableRef, udfParseNodes, null); + return visitor; + } + + public static ColumnResolver getResolverForMutation(DMLStatement statement, + PhoenixConnection connection) throws SQLException { + /* + * We validate the meta data at commit time for mutations, as this allows us to do many UPSERT + * VALUES calls without hitting the server each time to check if the meta data is up-to-date. */ - public static ColumnResolver getResolverForQuery(SelectStatement statement, PhoenixConnection connection, boolean alwaysHitServer, TableName mutatingTableName) - throws SQLException { - TableNode fromNode = statement.getFrom(); - if (fromNode == null) - return new ColumnResolverWithUDF(connection, 1, true, statement.getUdfParseNodes()); - if (fromNode instanceof NamedTableNode) - return new SingleTableColumnResolver(connection, (NamedTableNode) fromNode, true, 1, statement.getUdfParseNodes(), alwaysHitServer, mutatingTableName); - - MultiTableColumnResolver visitor = new MultiTableColumnResolver(connection, 1, statement.getUdfParseNodes(), mutatingTableName); - fromNode.accept(visitor); - return visitor; + SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, + statement.getTable(), false, 0, statement.getUdfParseNodes()); + return visitor; + } + + public static ColumnResolver getResolverForProjectedTable(PTable projectedTable, + PhoenixConnection connection, Map udfParseNodes) throws SQLException { + return new ProjectedTableColumnResolver(projectedTable, connection, udfParseNodes); + } + + private static class SchemaResolver extends BaseColumnResolver { + private final List schemas; + + public SchemaResolver(PhoenixConnection conn, String schemaName, boolean updateCacheImmediately) + throws SQLException { + super(conn, 0, null); + schemaName = + connection.getSchema() != null && schemaName == null ? connection.getSchema() : schemaName; + schemas = ImmutableList.of(createSchemaRef(schemaName, updateCacheImmediately)); } - /** - * Refresh the inner state of {@link MultiTableColumnResolver} for the derivedTableNode when - * the derivedTableNode is changed for some sql optimization. - * @param columnResolver - * @param derivedTableNode - * @return - * @throws SQLException - */ - public static TableRef refreshDerivedTableNode( - ColumnResolver columnResolver, DerivedTableNode derivedTableNode) throws SQLException { - if (!(columnResolver instanceof MultiTableColumnResolver)) { - throw new UnsupportedOperationException(); - } - return ((MultiTableColumnResolver)columnResolver).refreshDerivedTableNode(derivedTableNode); + @Override + public List getTables() { + throw new UnsupportedOperationException(); } - public static ColumnResolver getResolverForSchema(UseSchemaStatement statement, PhoenixConnection connection) - throws SQLException { - return new SchemaResolver(connection, SchemaUtil.normalizeIdentifier(statement.getSchemaName()), true); + @Override + public TableRef resolveTable(String schemaName, String tableName) throws SQLException { + throw new UnsupportedOperationException(); } - public static ColumnResolver getResolverForSchema(String schema, PhoenixConnection connection) throws SQLException { - return new SchemaResolver(connection, schema, true); + @Override + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException { + throw new UnsupportedOperationException(); } - public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection) throws SQLException { - SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true); - return visitor; + @Override + public PSchema resolveSchema(String schemaName) throws SQLException { + return schemas.get(0); } - public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection, boolean updateCacheImmediately) throws SQLException { - SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, updateCacheImmediately); - return visitor; + @Override + public List getSchemas() { + return schemas; } - public static ColumnResolver getResolver(NamedTableNode tableNode, PhoenixConnection connection, Map udfParseNodes) throws SQLException { - SingleTableColumnResolver visitor = - new SingleTableColumnResolver(connection, tableNode, true, 0, udfParseNodes); - return visitor; + } + + private static class SingleTableColumnResolver extends BaseColumnResolver { + private final List tableRefs; + private final String alias; + private final List schemas; + + public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode table, + long timeStamp, Map udfParseNodes, boolean isNamespaceMapped) + throws SQLException { + super(connection, 0, false, udfParseNodes, null); + List families = + Lists.newArrayListWithExpectedSize(table.getDynamicColumns().size()); + for (ColumnDef def : table.getDynamicColumns()) { + if (def.getColumnDefName().getFamilyName() != null) { + families + .add(new PColumnFamilyImpl(PNameFactory.newName(def.getColumnDefName().getFamilyName()), + Collections. emptyList()));// , NON_ENCODED_QUALIFIERS)); + } + } + Long scn = connection.getSCN(); + String schema = table.getName().getSchemaName(); + if (connection.getSchema() != null) { + schema = schema != null ? schema : connection.getSchema(); + } + + // Storage scheme and encoding scheme don't matter here since the PTable is being used only + // for the purposes of create table. + // The actual values of these two will be determined by the metadata client. + PName tenantId = connection.getTenantId(); + PTableImpl.checkTenantId(tenantId); + String tableName = table.getName().getTableName(); + PName name = PNameFactory.newName(SchemaUtil.getTableName(schema, tableName)); + PTable theTable = new PTableImpl.Builder().setTenantId(tenantId).setName(name) + .setKey(new PTableKey(tenantId, name.getString())) + .setSchemaName(PNameFactory.newName(schema)).setTableName(PNameFactory.newName(tableName)) + .setType(PTableType.VIEW).setViewType(PTable.ViewType.MAPPED) + .setTimeStamp(scn == null ? HConstants.LATEST_TIMESTAMP : scn) + .setPkColumns(Collections.emptyList()).setAllColumns(Collections.emptyList()) + .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA).setIndexes(Collections.emptyList()) + .setFamilyAttributes(families).setPhysicalNames(Collections.emptyList()) + .setNamespaceMapped(isNamespaceMapped).build(); + theTable = this.addDynamicColumns(table.getDynamicColumns(), theTable); + alias = null; + tableRefs = ImmutableList + .of(new TableRef(alias, theTable, timeStamp, !table.getDynamicColumns().isEmpty())); + schemas = ImmutableList.of(new PSchema(theTable.getSchemaName().toString(), timeStamp)); } - public static ColumnResolver getResolver(SingleTableStatement statement, PhoenixConnection connection) - throws SQLException { - SingleTableColumnResolver visitor - = new SingleTableColumnResolver(connection, statement.getTable(), true, true); - return visitor; + public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, + boolean updateCacheImmediately) throws SQLException { + this(connection, tableNode, updateCacheImmediately, 0, new HashMap(1)); } - public static ColumnResolver getIndexResolver(SingleTableStatement statement, - PhoenixConnection connection) throws SQLException { - try { - return getResolver(statement, connection); - } catch (TableNotFoundException e) { - throw new IndexNotFoundException(e.getSchemaName(), e.getTableName(), e.getTimeStamp()); - } + public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, + boolean updateCacheImmediately, boolean alwaysHitServer) throws SQLException { + this(connection, tableNode, updateCacheImmediately, 0, new HashMap(1), + alwaysHitServer, null); } - public static ColumnResolver getResolverForCreateIndex(SingleTableStatement statement, - PhoenixConnection connection, Map udfParseNodes) - throws SQLException { - // use alwaysHitServer=true to ensure client's cache is up-to-date even when client is - // validating last_ddl_timestamps and UCF = never. - SingleTableColumnResolver visitor - = new SingleTableColumnResolver(connection, statement.getTable(), true, 0, - udfParseNodes, true, null); - return visitor; - } - - public static ColumnResolver getResolverForCompiledDerivedTable(PhoenixConnection connection, TableRef tableRef, RowProjector projector) - throws SQLException { - List projectedColumns = new ArrayList(); - PTable table = tableRef.getTable(); - for (PColumn column : table.getColumns()) { - Expression sourceExpression = projector.getColumnProjector(column.getPosition()).getExpression(); - PColumnImpl projectedColumn = new PColumnImpl(column.getName(), column.getFamilyName(), - sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(), - column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr(), column.isRowTimestamp(), column.isDynamic(), column.getColumnQualifierBytes(), - column.getTimestamp()); - projectedColumns.add(projectedColumn); - } - PTable t = PTableImpl.builderWithColumns(table, projectedColumns) - .build(); - return new SingleTableColumnResolver(connection, new TableRef(tableRef.getTableAlias(), t, tableRef.getLowerBoundTimeStamp(), tableRef.hasDynamicCols())); + public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, + boolean updateCacheImmediately, int tsAddition, Map udfParseNodes) + throws SQLException { + this(connection, tableNode, updateCacheImmediately, tsAddition, udfParseNodes, false, null); } - public static ColumnResolver getResolver(TableRef tableRef) - throws SQLException { - SingleTableColumnResolver visitor = new SingleTableColumnResolver(tableRef); - return visitor; + public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, + boolean updateCacheImmediately, int tsAddition, Map udfParseNodes, + boolean alwaysHitServer, TableName mutatingTableName) throws SQLException { + super(connection, tsAddition, updateCacheImmediately, udfParseNodes, mutatingTableName); + alias = tableNode.getAlias(); + TableRef tableRef = createTableRef(tableNode.getName().getSchemaName(), tableNode, + updateCacheImmediately, alwaysHitServer); + PSchema schema = new PSchema(tableRef.getTable().getSchemaName().toString()); + tableRefs = ImmutableList.of(tableRef); + schemas = ImmutableList.of(schema); } - public static ColumnResolver getResolver(PhoenixConnection connection, TableRef tableRef, Map udfParseNodes) - throws SQLException { - SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableRef, udfParseNodes, null); - return visitor; + public SingleTableColumnResolver(PhoenixConnection connection, TableRef tableRef) { + super(connection, 0, null); + alias = tableRef.getTableAlias(); + tableRefs = ImmutableList.of(tableRef); + schemas = ImmutableList.of(new PSchema(tableRef.getTable().getSchemaName().toString())); } - public static ColumnResolver getResolverForMutation(DMLStatement statement, PhoenixConnection connection) - throws SQLException { - /* - * We validate the meta data at commit time for mutations, as this allows us to do many UPSERT VALUES calls - * without hitting the server each time to check if the meta data is up-to-date. - */ - SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), false, 0, statement.getUdfParseNodes()); - return visitor; + public SingleTableColumnResolver(PhoenixConnection connection, TableRef tableRef, + Map udfParseNodes, TableName mutatingTableName) throws SQLException { + super(connection, 0, false, udfParseNodes, mutatingTableName); + alias = tableRef.getTableAlias(); + tableRefs = ImmutableList.of(tableRef); + schemas = ImmutableList.of(new PSchema(tableRef.getTable().getSchemaName().toString())); } - - public static ColumnResolver getResolverForProjectedTable(PTable projectedTable, PhoenixConnection connection, Map udfParseNodes) throws SQLException { - return new ProjectedTableColumnResolver(projectedTable, connection, udfParseNodes); + + public SingleTableColumnResolver(TableRef tableRef) throws SQLException { + super(null, 0, null); + alias = tableRef.getTableAlias(); + tableRefs = ImmutableList.of(tableRef); + schemas = ImmutableList.of(new PSchema(tableRef.getTable().getSchemaName().toString())); } - private static class SchemaResolver extends BaseColumnResolver { - private final List schemas; + @Override + public List getTables() { + return tableRefs; + } - public SchemaResolver(PhoenixConnection conn, String schemaName, boolean updateCacheImmediately) - throws SQLException { - super(conn, 0, null); - schemaName = connection.getSchema() != null && schemaName == null ? connection.getSchema() : schemaName; - schemas = ImmutableList.of(createSchemaRef(schemaName, updateCacheImmediately)); - } + @Override + public List getFunctions() { + throw new UnsupportedOperationException(); + } - @Override - public List getTables() { - throw new UnsupportedOperationException(); + @Override + public TableRef resolveTable(String schemaName, String tableName) throws SQLException { + TableRef tableRef = tableRefs.get(0); + /* + * The only case we can definitely verify is when both a schemaName and a tableName are + * provided. Otherwise, the tableName might be a column family. In this case, this will be + * validated by resolveColumn. + */ + if (schemaName != null || tableName != null) { + String resolvedTableName = tableRef.getTable().getTableName().getString(); + String resolvedSchemaName = tableRef.getTable().getSchemaName().getString(); + if (schemaName != null && tableName != null) { + if ( + !(schemaName.equals(resolvedSchemaName) && tableName.equals(resolvedTableName)) + && !schemaName.equals(alias) + ) { + throw new TableNotFoundException(schemaName, tableName); + } } + } + return tableRef; + } - @Override - public TableRef resolveTable(String schemaName, String tableName) throws SQLException { - throw new UnsupportedOperationException(); + @Override + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException { + TableRef tableRef = tableRefs.get(0); + boolean resolveCF = false; + if (schemaName != null || tableName != null) { + String resolvedTableName = tableRef.getTable().getTableName().getString(); + String resolvedSchemaName = tableRef.getTable().getSchemaName().getString(); + if (schemaName != null && tableName != null) { + if (!(schemaName.equals(resolvedSchemaName) && tableName.equals(resolvedTableName))) { + if (!(resolveCF = schemaName.equals(alias))) { + throw new ColumnNotFoundException(schemaName, tableName, null, colName); + } + } + } else { // schemaName == null && tableName != null + if ( + tableName != null && !tableName.equals(alias) + && (!tableName.equals(resolvedTableName) || !resolvedSchemaName.equals("")) + ) { + resolveCF = true; + } } - @Override - public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException { - throw new UnsupportedOperationException(); - } + } + PColumn column = resolveCF + ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) + : tableRef.getTable().getColumnForColumnName(colName); + return new ColumnRef(tableRef, column.getPosition()); + } - @Override - public PSchema resolveSchema(String schemaName) throws SQLException { - return schemas.get(0); - } + @Override + public PSchema resolveSchema(String schemaName) throws SQLException { + return schemas.get(0); + } - @Override - public List getSchemas() { - return schemas; + @Override + public List getSchemas() { + return schemas; + } + } + + private static class ColumnResolverWithUDF implements ColumnResolver { + protected final PhoenixConnection connection; + protected final MetaDataClient client; + // Fudge factor to add to current time we calculate. We need this when we do a SELECT + // on Windows because the millis timestamp granularity is so bad we sometimes won't + // get the data back that we just upsert. + protected final int tsAddition; + protected final Map functionMap; + protected List functions; + // PHOENIX-3823 : Force update cache when mutating table and select table are same + // (UpsertSelect or Delete with select on same table) + + private ColumnResolverWithUDF(PhoenixConnection connection, int tsAddition, + boolean updateCacheImmediately, Map udfParseNodes) throws SQLException { + this.connection = connection; + this.client = connection == null ? null : new MetaDataClient(connection); + this.tsAddition = tsAddition; + functionMap = new HashMap(1); + if (udfParseNodes.isEmpty()) { + functions = Collections. emptyList(); + } else { + functions = + createFunctionRef(new ArrayList(udfParseNodes.keySet()), updateCacheImmediately); + for (PFunction function : functions) { + functionMap.put(function.getFunctionName(), function); } - + } } - private static class SingleTableColumnResolver extends BaseColumnResolver { - private final List tableRefs; - private final String alias; - private final List schemas; - - public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode table, long timeStamp, Map udfParseNodes, boolean isNamespaceMapped) throws SQLException { - super(connection, 0, false, udfParseNodes, null); - List families = Lists.newArrayListWithExpectedSize(table.getDynamicColumns().size()); - for (ColumnDef def : table.getDynamicColumns()) { - if (def.getColumnDefName().getFamilyName() != null) { - families.add(new PColumnFamilyImpl(PNameFactory.newName(def.getColumnDefName().getFamilyName()),Collections.emptyList()));//, NON_ENCODED_QUALIFIERS)); - } - } - Long scn = connection.getSCN(); - String schema = table.getName().getSchemaName(); - if (connection.getSchema() != null) { - schema = schema != null ? schema : connection.getSchema(); - } + private ColumnResolverWithUDF(PhoenixConnection connection, int tsAddition) { + this.connection = connection; + this.client = connection == null ? null : new MetaDataClient(connection); + this.tsAddition = tsAddition; + functionMap = new HashMap(1); + this.functions = Collections. emptyList(); + } - // Storage scheme and encoding scheme don't matter here since the PTable is being used only for the purposes of create table. - // The actual values of these two will be determined by the metadata client. - PName tenantId = connection.getTenantId(); - PTableImpl.checkTenantId(tenantId); - String tableName = table.getName().getTableName(); - PName name = PNameFactory.newName(SchemaUtil.getTableName(schema, tableName)); - PTable theTable = new PTableImpl.Builder() - .setTenantId(tenantId) - .setName(name) - .setKey(new PTableKey(tenantId, name.getString())) - .setSchemaName(PNameFactory.newName(schema)) - .setTableName(PNameFactory.newName(tableName)) - .setType(PTableType.VIEW) - .setViewType(PTable.ViewType.MAPPED) - .setTimeStamp(scn == null ? HConstants.LATEST_TIMESTAMP : scn) - .setPkColumns(Collections.emptyList()) - .setAllColumns(Collections.emptyList()) - .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) - .setIndexes(Collections.emptyList()) - .setFamilyAttributes(families) - .setPhysicalNames(Collections.emptyList()) - .setNamespaceMapped(isNamespaceMapped) - .build(); - theTable = this.addDynamicColumns(table.getDynamicColumns(), theTable); - alias = null; - tableRefs = ImmutableList.of(new TableRef(alias, theTable, timeStamp, !table.getDynamicColumns().isEmpty())); - schemas = ImmutableList.of(new PSchema(theTable.getSchemaName().toString(), timeStamp)); - } + @Override + public List getFunctions() { + return functions; + } - public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, boolean updateCacheImmediately) throws SQLException { - this(connection, tableNode, updateCacheImmediately, 0, new HashMap(1)); + private List createFunctionRef(List functionNames, + boolean updateCacheImmediately) throws SQLException { + long timeStamp = QueryConstants.UNSET_TIMESTAMP; + int numFunctions = functionNames.size(); + List functionsFound = new ArrayList(functionNames.size()); + if (updateCacheImmediately || connection.getAutoCommit()) { + getFunctionFromCache(functionNames, functionsFound, true); + if (functionNames.isEmpty()) { + return functionsFound; + } + MetaDataMutationResult result = client.updateCache(functionNames); + timeStamp = result.getMutationTime(); + functionsFound = result.getFunctions(); + if (functionNames.size() != functionsFound.size()) { + throw new FunctionNotFoundException( + "Some of the functions in " + functionNames.toString() + " are not found"); + } + } else { + getFunctionFromCache(functionNames, functionsFound, false); + // We always attempt to update the cache in the event of a FunctionNotFoundException + MetaDataMutationResult result = null; + if (!functionNames.isEmpty()) { + result = client.updateCache(functionNames); + } + if (result != null) { + if (!result.getFunctions().isEmpty()) { + functionsFound.addAll(result.getFunctions()); + } + if (result.wasUpdated()) { + timeStamp = result.getMutationTime(); + } + } + if (functionsFound.size() != numFunctions) { + throw new FunctionNotFoundException( + "Some of the functions in " + functionNames.toString() + " are not found", timeStamp); } - public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, - boolean updateCacheImmediately, boolean alwaysHitServer) throws SQLException { - this(connection, tableNode, updateCacheImmediately, 0, new HashMap(1), alwaysHitServer, null); } - public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, - boolean updateCacheImmediately, int tsAddition, - Map udfParseNodes) throws SQLException { - this(connection, tableNode, updateCacheImmediately, tsAddition, udfParseNodes, false, null); - } - - public SingleTableColumnResolver(PhoenixConnection connection, NamedTableNode tableNode, - boolean updateCacheImmediately, int tsAddition, - Map udfParseNodes, boolean alwaysHitServer, TableName mutatingTableName) throws SQLException { - super(connection, tsAddition, updateCacheImmediately, udfParseNodes, mutatingTableName); - alias = tableNode.getAlias(); - TableRef tableRef = createTableRef(tableNode.getName().getSchemaName(), tableNode, updateCacheImmediately, alwaysHitServer); - PSchema schema = new PSchema(tableRef.getTable().getSchemaName().toString()); - tableRefs = ImmutableList.of(tableRef); - schemas = ImmutableList.of(schema); - } + if (timeStamp != QueryConstants.UNSET_TIMESTAMP) { + timeStamp += tsAddition; + } - public SingleTableColumnResolver(PhoenixConnection connection, TableRef tableRef) { - super(connection, 0, null); - alias = tableRef.getTableAlias(); - tableRefs = ImmutableList.of(tableRef); - schemas = ImmutableList.of(new PSchema(tableRef.getTable().getSchemaName().toString())); - } + if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) { + LOGGER.debug(LogUtil.addCustomAnnotations( + "Re-resolved stale function " + functionNames.toString() + "at timestamp " + timeStamp, + connection)); + } + return functionsFound; + } - public SingleTableColumnResolver(PhoenixConnection connection, TableRef tableRef, Map udfParseNodes, TableName mutatingTableName) throws SQLException { - super(connection, 0, false, udfParseNodes, mutatingTableName); - alias = tableRef.getTableAlias(); - tableRefs = ImmutableList.of(tableRef); - schemas = ImmutableList.of(new PSchema(tableRef.getTable().getSchemaName().toString())); + private void getFunctionFromCache(List functionNames, List functionsFound, + boolean getOnlyTemporyFunctions) { + Iterator iterator = functionNames.iterator(); + while (iterator.hasNext()) { + PFunction function = null; + String functionName = iterator.next(); + try { + function = connection.getMetaDataCache() + .getFunction(new PTableKey(connection.getTenantId(), functionName)); + } catch (FunctionNotFoundException e1) { + if (connection.getTenantId() != null) { // Check with null tenantId next + try { + function = + connection.getMetaDataCache().getFunction(new PTableKey(null, functionName)); + } catch (FunctionNotFoundException ignored) { + } + } } - - public SingleTableColumnResolver(TableRef tableRef) throws SQLException { - super(null, 0, null); - alias = tableRef.getTableAlias(); - tableRefs = ImmutableList.of(tableRef); - schemas = ImmutableList.of(new PSchema(tableRef.getTable().getSchemaName().toString())); + if (function != null) { + if (getOnlyTemporyFunctions) { + if (function.isTemporaryFunction()) { + functionsFound.add(function); + iterator.remove(); + } + } else { + functionsFound.add(function); + iterator.remove(); + } } + } + } - + @Override + public PFunction resolveFunction(String functionName) throws SQLException { + PFunction function = functionMap.get(functionName); + if (function == null) { + throw new FunctionNotFoundException(functionName); + } + return function; + } - @Override - public List getTables() { - return tableRefs; - } + @Override + public boolean hasUDFs() { + return !functions.isEmpty(); + } - @Override - public List getFunctions() { - throw new UnsupportedOperationException(); - } + @Override + public List getTables() { + return Collections.singletonList(TableRef.EMPTY_TABLE_REF); + } - @Override - public TableRef resolveTable(String schemaName, String tableName) - throws SQLException { - TableRef tableRef = tableRefs.get(0); - /* - * The only case we can definitely verify is when both a schemaName and a tableName - * are provided. Otherwise, the tableName might be a column family. In this case, - * this will be validated by resolveColumn. - */ - if (schemaName != null || tableName != null) { - String resolvedTableName = tableRef.getTable().getTableName().getString(); - String resolvedSchemaName = tableRef.getTable().getSchemaName().getString(); - if (schemaName != null && tableName != null) { - if ( ! ( schemaName.equals(resolvedSchemaName) && - tableName.equals(resolvedTableName) ) && - ! schemaName.equals(alias) ) { - throw new TableNotFoundException(schemaName, tableName); - } - } - } - return tableRef; - } + @Override + public TableRef resolveTable(String schemaName, String tableName) throws SQLException { + throw new TableNotFoundException(schemaName, tableName); + } - @Override - public ColumnRef resolveColumn(String schemaName, String tableName, - String colName) throws SQLException { - TableRef tableRef = tableRefs.get(0); - boolean resolveCF = false; - if (schemaName != null || tableName != null) { - String resolvedTableName = tableRef.getTable().getTableName().getString(); - String resolvedSchemaName = tableRef.getTable().getSchemaName().getString(); - if (schemaName != null && tableName != null) { - if ( ! ( schemaName.equals(resolvedSchemaName) && - tableName.equals(resolvedTableName) )) { - if (!(resolveCF = schemaName.equals(alias))) { - throw new ColumnNotFoundException(schemaName, tableName, null, colName); - } - } - } else { // schemaName == null && tableName != null - if (tableName != null && !tableName.equals(alias) && (!tableName.equals(resolvedTableName) || !resolvedSchemaName.equals(""))) { - resolveCF = true; - } - } - - } - PColumn column = resolveCF - ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) - : tableRef.getTable().getColumnForColumnName(colName); - return new ColumnRef(tableRef, column.getPosition()); - } - - @Override - public PSchema resolveSchema(String schemaName) throws SQLException { - return schemas.get(0); - } + @Override + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException { + throw new ColumnNotFoundException(schemaName, tableName, null, colName); + } - @Override - public List getSchemas() { - return schemas; - } + @Override + public PSchema resolveSchema(String schemaName) throws SQLException { + throw new SchemaNotFoundException(schemaName); } - private static class ColumnResolverWithUDF implements ColumnResolver { - protected final PhoenixConnection connection; - protected final MetaDataClient client; - // Fudge factor to add to current time we calculate. We need this when we do a SELECT - // on Windows because the millis timestamp granularity is so bad we sometimes won't - // get the data back that we just upsert. - protected final int tsAddition; - protected final Map functionMap; - protected List functions; - //PHOENIX-3823 : Force update cache when mutating table and select table are same - //(UpsertSelect or Delete with select on same table) - - private ColumnResolverWithUDF(PhoenixConnection connection, int tsAddition, - boolean updateCacheImmediately, Map udfParseNodes) throws SQLException { - this.connection = connection; - this.client = connection == null ? null : new MetaDataClient(connection); - this.tsAddition = tsAddition; - functionMap = new HashMap(1); - if (udfParseNodes.isEmpty()) { - functions = Collections. emptyList(); - } else { - functions = createFunctionRef(new ArrayList(udfParseNodes.keySet()), - updateCacheImmediately); - for (PFunction function : functions) { - functionMap.put(function.getFunctionName(), function); - } - } - } + @Override + public List getSchemas() { + return Collections.emptyList(); + } - private ColumnResolverWithUDF(PhoenixConnection connection, int tsAddition) { - this.connection = connection; - this.client = connection == null ? null : new MetaDataClient(connection); - this.tsAddition = tsAddition; - functionMap = new HashMap(1); - this.functions = Collections.emptyList(); - } + } - @Override - public List getFunctions() { - return functions; - } + private static abstract class BaseColumnResolver extends ColumnResolverWithUDF { + protected TableName mutatingTableName = null; - private List createFunctionRef(List functionNames, - boolean updateCacheImmediately) - throws SQLException { - long timeStamp = QueryConstants.UNSET_TIMESTAMP; - int numFunctions = functionNames.size(); - List functionsFound = new ArrayList(functionNames.size()); - if (updateCacheImmediately || connection.getAutoCommit()) { - getFunctionFromCache(functionNames, functionsFound, true); - if (functionNames.isEmpty()) { - return functionsFound; - } - MetaDataMutationResult result = client.updateCache(functionNames); - timeStamp = result.getMutationTime(); - functionsFound = result.getFunctions(); - if (functionNames.size() != functionsFound.size()) { - throw new FunctionNotFoundException("Some of the functions in " + - functionNames.toString()+" are not found"); - } - } else { - getFunctionFromCache(functionNames, functionsFound, false); - // We always attempt to update the cache in the event of a FunctionNotFoundException - MetaDataMutationResult result = null; - if (!functionNames.isEmpty()) { - result = client.updateCache(functionNames); - } - if (result != null) { - if (!result.getFunctions().isEmpty()) { - functionsFound.addAll(result.getFunctions()); - } - if (result.wasUpdated()) { - timeStamp = result.getMutationTime(); - } - } - if (functionsFound.size()!=numFunctions) { - throw new FunctionNotFoundException("Some of the functions in " + - functionNames.toString()+" are not found", timeStamp); - } - } - if (timeStamp != QueryConstants.UNSET_TIMESTAMP) { - timeStamp += tsAddition; - } + private BaseColumnResolver(PhoenixConnection connection, int tsAddition, + TableName mutatingTableName) { + super(connection, tsAddition); + this.mutatingTableName = mutatingTableName; + } - if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) { - LOGGER.debug(LogUtil.addCustomAnnotations("Re-resolved stale function " + - functionNames.toString() + "at timestamp " + timeStamp, connection)); - } - return functionsFound; - } + private BaseColumnResolver(PhoenixConnection connection, int tsAddition, + boolean updateCacheImmediately, Map udfParseNodes, + TableName mutatingTableName) throws SQLException { + super(connection, tsAddition, updateCacheImmediately, udfParseNodes); + this.mutatingTableName = mutatingTableName; + } - private void getFunctionFromCache(List functionNames, - List functionsFound, - boolean getOnlyTemporyFunctions) { - Iterator iterator = functionNames.iterator(); - while (iterator.hasNext()) { - PFunction function = null; - String functionName = iterator.next(); - try { - function = connection.getMetaDataCache().getFunction( - new PTableKey(connection.getTenantId(), functionName)); - } catch (FunctionNotFoundException e1) { - if (connection.getTenantId() != null) { // Check with null tenantId next - try { - function = connection.getMetaDataCache().getFunction( - new PTableKey(null, functionName)); - } catch (FunctionNotFoundException ignored) { - } - } - } - if (function != null) { - if (getOnlyTemporyFunctions) { - if (function.isTemporaryFunction()) { - functionsFound.add(function); - iterator.remove(); - } - } else { - functionsFound.add(function); - iterator.remove(); - } - } + protected PSchema createSchemaRef(String schemaName, boolean updateCacheImmediately) + throws SQLException { + long timeStamp = QueryConstants.UNSET_TIMESTAMP; + PSchema theSchema = null; + MetaDataClient client = new MetaDataClient(connection); + try { + if (updateCacheImmediately) { + MetaDataMutationResult result = client.updateCache(schemaName, true); + timeStamp = TransactionUtil.getResolvedTimestamp(connection, result); + theSchema = result.getSchema(); + if (theSchema == null) { + throw new SchemaNotFoundException(schemaName, timeStamp); + } + } else { + try { + theSchema = connection.getSchema(new PTableKey(null, schemaName)); + } catch (SchemaNotFoundException e1) { + } + // We always attempt to update the cache in the event of a + // SchemaNotFoundException + if (theSchema == null) { + MetaDataMutationResult result = client.updateCache(schemaName, true); + if (result.wasUpdated()) { + timeStamp = TransactionUtil.getResolvedTimestamp(connection, result); + theSchema = result.getSchema(); } - } + } + if (theSchema == null) { + throw new SchemaNotFoundException(schemaName, timeStamp); + } + } + return theSchema; + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + NUM_METADATA_LOOKUP_FAILURES, 1); + throw e; + } + } - @Override - public PFunction resolveFunction(String functionName) throws SQLException { - PFunction function = functionMap.get(functionName); - if (function == null) { - throw new FunctionNotFoundException(functionName); + protected TableRef createTableRef(String connectionSchemaName, NamedTableNode tableNode, + boolean updateCacheImmediately, boolean alwaysHitServer) throws SQLException { + String tableName = tableNode.getName().getTableName(); + String schemaName = tableNode.getName().getSchemaName(); + schemaName = + connection.getSchema() != null && schemaName == null ? connection.getSchema() : schemaName; + long timeStamp = QueryConstants.UNSET_TIMESTAMP; + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + PName tenantId = connection.getTenantId(); + PTable theTable = null; + boolean error = false; + + try { + if (updateCacheImmediately) { + // Force update cache when mutating and ref table are same except for meta tables + if ( + !QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName) && mutatingTableName != null + && tableNode != null && tableNode.getName().equals(mutatingTableName) + ) { + alwaysHitServer = true; + } + + try { + MetaDataMutationResult result = + client.updateCache(tenantId, schemaName, tableName, alwaysHitServer); + timeStamp = TransactionUtil.getResolvedTimestamp(connection, result); + theTable = result.getTable(); + MutationCode mutationCode = result.getMutationCode(); + if (theTable == null) { + throw new TableNotFoundException(schemaName, tableName, timeStamp); } - return function; + } catch (Throwable e) { + error = true; + throw e; + } + } else { + try { + theTable = connection.getTable(fullTableName); + } catch (Throwable e) { + error = true; + throw e; + } + } + // Add any dynamic columns to the table declaration + List dynamicColumns = tableNode.getDynamicColumns(); + theTable = addDynamicColumns(dynamicColumns, theTable); + if (timeStamp != QueryConstants.UNSET_TIMESTAMP) { + timeStamp += tsAddition; + } + TableRef tableRef = + new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty()); + if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) { + LOGGER.debug(LogUtil.addCustomAnnotations("Re-resolved stale table " + fullTableName + + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + + tableRef.getTable().getTimeStamp() + " with " + + tableRef.getTable().getColumns().size() + " columns: " + + tableRef.getTable().getColumns(), connection)); + } + return tableRef; + } finally { + if (error) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(fullTableName, + NUM_METADATA_LOOKUP_FAILURES, 1); } + } + } - @Override - public boolean hasUDFs() { - return !functions.isEmpty(); - } + protected PTable addDynamicColumns(List dynColumns, PTable theTable) + throws SQLException { + if (!dynColumns.isEmpty()) { + List existingColumns = theTable.getColumns(); + // Need to skip the salting column, as it's handled in the PTable builder call below + List allcolumns = new ArrayList<>(theTable.getBucketNum() == null + ? existingColumns + : existingColumns.subList(1, existingColumns.size())); + // Position still based on with the salting columns + int position = existingColumns.size(); + PName defaultFamilyName = PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(theTable)); + for (ColumnDef dynColumn : dynColumns) { + PName familyName = defaultFamilyName; + PName name = PNameFactory.newName(dynColumn.getColumnDefName().getColumnName()); + String family = dynColumn.getColumnDefName().getFamilyName(); + if (family != null) { + theTable.getColumnFamily(family); // Verifies that column family exists + familyName = PNameFactory.newName(family); + } + allcolumns.add( + new PColumnImpl(name, familyName, dynColumn.getDataType(), dynColumn.getMaxLength(), + dynColumn.getScale(), dynColumn.isNull(), position, dynColumn.getSortOrder(), + dynColumn.getArraySize(), null, false, dynColumn.getExpression(), false, true, + Bytes.toBytes(dynColumn.getColumnDefName().getColumnName()), + HConstants.LATEST_TIMESTAMP)); + position++; + } + theTable = PTableImpl.builderWithColumns(theTable, allcolumns).build(); + } + return theTable; + } + } + + private static class MultiTableColumnResolver extends BaseColumnResolver + implements TableNodeVisitor { + protected final ListMultimap tableMap; + protected final List tables; + private String connectionSchemaName; + + private MultiTableColumnResolver(PhoenixConnection connection, int tsAddition) { + super(connection, tsAddition, null); + tableMap = ArrayListMultimap. create(); + tables = Lists.newArrayList(); + try { + connectionSchemaName = connection.getSchema(); + } catch (SQLException e) { + // ignore + } + } - @Override - public List getTables() { - return Collections.singletonList(TableRef.EMPTY_TABLE_REF); - } + private MultiTableColumnResolver(PhoenixConnection connection, int tsAddition, + Map udfParseNodes, TableName mutatingTableName) throws SQLException { + super(connection, tsAddition, false, udfParseNodes, mutatingTableName); + tableMap = ArrayListMultimap. create(); + tables = Lists.newArrayList(); + } + @Override + public List getTables() { + return tables; + } - @Override - public TableRef resolveTable(String schemaName, String tableName) - throws SQLException { - throw new TableNotFoundException(schemaName, tableName); - } + @Override + public Void visit(BindTableNode boundTableNode) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } - @Override - public ColumnRef resolveColumn(String schemaName, String tableName, String colName) - throws SQLException { - throw new ColumnNotFoundException(schemaName, tableName, null, colName); - } + @Override + public Void visit(JoinTableNode joinNode) throws SQLException { + joinNode.getLHS().accept(this); + joinNode.getRHS().accept(this); + return null; + } - @Override - public PSchema resolveSchema(String schemaName) throws SQLException { - throw new SchemaNotFoundException(schemaName); - } + @Override + public Void visit(NamedTableNode tableNode) throws SQLException { + String alias = tableNode.getAlias(); + TableRef tableRef = createTableRef(connectionSchemaName, tableNode, true, false); + PTable theTable = tableRef.getTable(); - @Override - public List getSchemas() { - return Collections.emptyList(); - } + if (alias != null) { + tableMap.put(alias, tableRef); + } + String name = theTable.getName().getString(); + // avoid having one name mapped to two identical TableRef. + if (alias == null || !alias.equals(name)) { + tableMap.put(name, tableRef); + } + tables.add(tableRef); + return null; } - private static abstract class BaseColumnResolver extends ColumnResolverWithUDF { - protected TableName mutatingTableName = null; + @Override + public Void visit(DerivedTableNode subselectNode) throws SQLException { + List selectNodes = subselectNode.getSelect().getSelect(); + List columns = new ArrayList(); + int position = 0; + for (AliasedNode aliasedNode : selectNodes) { + String alias = aliasedNode.getAlias(); + if (alias == null) { + ParseNode node = aliasedNode.getNode(); + if ( + node instanceof WildcardParseNode || node instanceof TableWildcardParseNode + || node instanceof FamilyWildcardParseNode + ) throw new SQLFeatureNotSupportedException("Wildcard in subqueries not supported."); + + alias = SchemaUtil.normalizeIdentifier(node.getAlias()); + } + if (alias == null) { + // Use position as column name for anonymous columns, which can be + // referenced by an outer wild-card select. + alias = String.valueOf(position); + } + PName name = PNameFactory.newName(alias); + PColumnImpl column = new PColumnImpl(PNameFactory.newName(alias), + PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY), null, 0, 0, true, position++, + SortOrder.ASC, null, null, false, null, false, false, name.getBytes(), + HConstants.LATEST_TIMESTAMP); + columns.add(column); + } + PTable t = new PTableImpl.Builder().setType(PTableType.SUBQUERY) + .setTimeStamp(MetaDataProtocol.MIN_TABLE_TIMESTAMP).setIndexDisableTimestamp(0L) + .setSequenceNumber(PTable.INITIAL_SEQ_NUM).setImmutableRows(false).setDisableWAL(false) + .setMultiTenant(false).setStoreNulls(false).setUpdateCacheFrequency(0) + .setNamespaceMapped(SchemaUtil.isNamespaceMappingEnabled(PTableType.SUBQUERY, + connection.getQueryServices().getProps())) + .setAppendOnlySchema(false) + .setImmutableStorageScheme(ImmutableStorageScheme.ONE_CELL_PER_COLUMN) + .setQualifierEncodingScheme(QualifierEncodingScheme.NON_ENCODED_QUALIFIERS) + .setBaseColumnCount(QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT) + .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER) + .setUseStatsForParallelization(true).setExcludedColumns(ImmutableList.of()) + .setSchemaName(PName.EMPTY_NAME).setTableName(PName.EMPTY_NAME) + .setRowKeyOrderOptimizable(false).setIndexes(Collections.emptyList()) + .setPhysicalNames(ImmutableList.of()).setColumns(columns).build(); + + String alias = subselectNode.getAlias(); + TableRef tableRef = new TableRef(alias, t, MetaDataProtocol.MIN_TABLE_TIMESTAMP, false); + tableMap.put(alias, tableRef); + tables.add(tableRef); + return null; + } - private BaseColumnResolver(PhoenixConnection connection, int tsAddition, - TableName mutatingTableName) { - super(connection, tsAddition); - this.mutatingTableName = mutatingTableName; - } + /** + * Invoke the {@link #visit(DerivedTableNode)} again to refresh the inner state. + */ + public TableRef refreshDerivedTableNode(DerivedTableNode derivedTableNode) throws SQLException { + String tableAlias = derivedTableNode.getAlias(); + List removedTableRefs = this.tableMap.removeAll(tableAlias); + if (removedTableRefs == null || removedTableRefs.isEmpty()) { + return null; + } + tables.removeAll(removedTableRefs); + this.visit(derivedTableNode); + return this.resolveTable(null, tableAlias); + } - private BaseColumnResolver(PhoenixConnection connection, int tsAddition, - boolean updateCacheImmediately, - Map udfParseNodes, - TableName mutatingTableName) throws SQLException { - super(connection, tsAddition, updateCacheImmediately, udfParseNodes); - this.mutatingTableName = mutatingTableName; - } + private static class ColumnFamilyRef { + private final TableRef tableRef; + private final PColumnFamily family; - protected PSchema createSchemaRef(String schemaName, boolean updateCacheImmediately) throws SQLException { - long timeStamp = QueryConstants.UNSET_TIMESTAMP; - PSchema theSchema = null; - MetaDataClient client = new MetaDataClient(connection); - try { - if (updateCacheImmediately) { - MetaDataMutationResult result = client.updateCache(schemaName, true); - timeStamp = TransactionUtil.getResolvedTimestamp(connection, result); - theSchema = result.getSchema(); - if (theSchema == null) { - throw new SchemaNotFoundException(schemaName, timeStamp); - } - } else { - try { - theSchema = connection.getSchema(new PTableKey(null, schemaName)); - } catch (SchemaNotFoundException e1) { - } - // We always attempt to update the cache in the event of a - // SchemaNotFoundException - if (theSchema == null) { - MetaDataMutationResult result = client.updateCache(schemaName, true); - if (result.wasUpdated()) { - timeStamp = TransactionUtil.getResolvedTimestamp(connection, result); - theSchema = result.getSchema(); - } - } - if (theSchema == null) { - throw new SchemaNotFoundException(schemaName, timeStamp); - } - } - return theSchema; - } catch(Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, - NUM_METADATA_LOOKUP_FAILURES, 1); - throw e; - } - } - - protected TableRef createTableRef(String connectionSchemaName, NamedTableNode tableNode, - boolean updateCacheImmediately, boolean alwaysHitServer) throws SQLException { - String tableName = tableNode.getName().getTableName(); - String schemaName = tableNode.getName().getSchemaName(); - schemaName = connection.getSchema() != null && schemaName == null ? connection.getSchema() : schemaName; - long timeStamp = QueryConstants.UNSET_TIMESTAMP; - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - PName tenantId = connection.getTenantId(); - PTable theTable = null; - boolean error = false; + ColumnFamilyRef(TableRef tableRef, PColumnFamily family) { + this.tableRef = tableRef; + this.family = family; + } - try { - if (updateCacheImmediately) { - //Force update cache when mutating and ref table are same except for meta tables - if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName) && - mutatingTableName != null && tableNode != null && - tableNode.getName().equals(mutatingTableName)) { - alwaysHitServer = true; - } - - try { - MetaDataMutationResult result = client.updateCache(tenantId, schemaName, tableName, alwaysHitServer); - timeStamp = TransactionUtil.getResolvedTimestamp(connection, result); - theTable = result.getTable(); - MutationCode mutationCode = result.getMutationCode(); - if (theTable == null) { - throw new TableNotFoundException(schemaName, tableName, timeStamp); - } - } catch (Throwable e) { - error = true; - throw e; - } - } else { - try { - theTable = connection.getTable(fullTableName); - } catch (Throwable e) { - error = true; - throw e; - } - } - // Add any dynamic columns to the table declaration - List dynamicColumns = tableNode.getDynamicColumns(); - theTable = addDynamicColumns(dynamicColumns, theTable); - if (timeStamp != QueryConstants.UNSET_TIMESTAMP) { - timeStamp += tsAddition; - } - TableRef tableRef = new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty()); - if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) { - LOGGER.debug(LogUtil.addCustomAnnotations( - "Re-resolved stale table " + fullTableName + " with seqNum " - + tableRef.getTable().getSequenceNumber() + " at timestamp " - + tableRef.getTable().getTimeStamp() + " with " - + tableRef.getTable().getColumns().size() + " columns: " - + tableRef.getTable().getColumns(), connection)); - } - return tableRef; - } finally { - if (error) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(fullTableName, - NUM_METADATA_LOOKUP_FAILURES, 1); - } - } - } + public TableRef getTableRef() { + return tableRef; + } - protected PTable addDynamicColumns(List dynColumns, PTable theTable) - throws SQLException { - if (!dynColumns.isEmpty()) { - List existingColumns = theTable.getColumns(); - // Need to skip the salting column, as it's handled in the PTable builder call below - List allcolumns = new ArrayList<>( - theTable.getBucketNum() == null ? existingColumns : - existingColumns.subList(1, existingColumns.size())); - // Position still based on with the salting columns - int position = existingColumns.size(); - PName defaultFamilyName = PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(theTable)); - for (ColumnDef dynColumn : dynColumns) { - PName familyName = defaultFamilyName; - PName name = PNameFactory.newName(dynColumn.getColumnDefName().getColumnName()); - String family = dynColumn.getColumnDefName().getFamilyName(); - if (family != null) { - theTable.getColumnFamily(family); // Verifies that column family exists - familyName = PNameFactory.newName(family); - } - allcolumns.add(new PColumnImpl(name, familyName, dynColumn.getDataType(), dynColumn.getMaxLength(), - dynColumn.getScale(), dynColumn.isNull(), position, dynColumn.getSortOrder(), dynColumn.getArraySize(), null, false, dynColumn.getExpression(), false, true, Bytes.toBytes(dynColumn.getColumnDefName().getColumnName()), - HConstants.LATEST_TIMESTAMP)); - position++; - } - theTable = PTableImpl.builderWithColumns(theTable, allcolumns) - .build(); - } - return theTable; - } + public PColumnFamily getFamily() { + return family; + } } - private static class MultiTableColumnResolver extends BaseColumnResolver implements TableNodeVisitor { - protected final ListMultimap tableMap; - protected final List tables; - private String connectionSchemaName; + @Override + public TableRef resolveTable(String schemaName, String tableName) throws SQLException { + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + List tableRefs = tableMap.get(fullTableName); + if (tableRefs.size() == 0) { + throw new TableNotFoundException(fullTableName); + } else if (tableRefs.size() > 1) { + throw new AmbiguousTableException(tableName); + } else { + return tableRefs.get(0); + } + } - private MultiTableColumnResolver(PhoenixConnection connection, int tsAddition) { - super(connection, tsAddition, null); - tableMap = ArrayListMultimap. create(); - tables = Lists.newArrayList(); - try { - connectionSchemaName = connection.getSchema(); - } catch (SQLException e) { - // ignore + private ColumnFamilyRef resolveColumnFamily(String tableName, String cfName) + throws SQLException { + if (tableName == null) { + ColumnFamilyRef theColumnFamilyRef = null; + Iterator iterator = tables.iterator(); + while (iterator.hasNext()) { + TableRef tableRef = iterator.next(); + try { + PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName); + if (columnFamily == null) { + throw new TableNotFoundException(cfName); } + theColumnFamilyRef = new ColumnFamilyRef(tableRef, columnFamily); + } catch (ColumnFamilyNotFoundException e) { + } } - - private MultiTableColumnResolver(PhoenixConnection connection, int tsAddition, Map udfParseNodes, TableName mutatingTableName) throws SQLException { - super(connection, tsAddition, false, udfParseNodes, mutatingTableName); - tableMap = ArrayListMultimap. create(); - tables = Lists.newArrayList(); - } - - @Override - public List getTables() { - return tables; - } - - @Override - public Void visit(BindTableNode boundTableNode) throws SQLException { - throw new SQLFeatureNotSupportedException(); + if (theColumnFamilyRef != null) { + return theColumnFamilyRef; } - - @Override - public Void visit(JoinTableNode joinNode) throws SQLException { - joinNode.getLHS().accept(this); - joinNode.getRHS().accept(this); - return null; + throw new TableNotFoundException(cfName); + } else { + TableRef tableRef = null; + try { + tableRef = resolveTable(null, tableName); + } catch (TableNotFoundException e) { + return resolveColumnFamily(null, cfName); } + PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName); + return new ColumnFamilyRef(tableRef, columnFamily); + } + } - @Override - public Void visit(NamedTableNode tableNode) throws SQLException { - String alias = tableNode.getAlias(); - TableRef tableRef = createTableRef(connectionSchemaName, tableNode, true, false); - PTable theTable = tableRef.getTable(); - - if (alias != null) { - tableMap.put(alias, tableRef); + @Override + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException { + if (tableName == null) { + int theColumnPosition = -1; + TableRef theTableRef = null; + Iterator iterator = tables.iterator(); + while (iterator.hasNext()) { + TableRef tableRef = iterator.next(); + try { + PColumn column = tableRef.getTable().getColumnForColumnName(colName); + if (theTableRef != null) { + throw new AmbiguousColumnException(colName); } + theTableRef = tableRef; + theColumnPosition = column.getPosition(); + } catch (ColumnNotFoundException e) { - String name = theTable.getName().getString(); - //avoid having one name mapped to two identical TableRef. - if (alias == null || !alias.equals(name)) { - tableMap.put(name, tableRef); - } - tables.add(tableRef); - return null; + } } - - @Override - public Void visit(DerivedTableNode subselectNode) throws SQLException { - List selectNodes = subselectNode.getSelect().getSelect(); - List columns = new ArrayList(); - int position = 0; - for (AliasedNode aliasedNode : selectNodes) { - String alias = aliasedNode.getAlias(); - if (alias == null) { - ParseNode node = aliasedNode.getNode(); - if (node instanceof WildcardParseNode - || node instanceof TableWildcardParseNode - || node instanceof FamilyWildcardParseNode) - throw new SQLFeatureNotSupportedException("Wildcard in subqueries not supported."); - - alias = SchemaUtil.normalizeIdentifier(node.getAlias()); - } - if (alias == null) { - // Use position as column name for anonymous columns, which can be - // referenced by an outer wild-card select. - alias = String.valueOf(position); - } - PName name = PNameFactory.newName(alias); - PColumnImpl column = new PColumnImpl(PNameFactory.newName(alias), - PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY), - null, 0, 0, true, position++, SortOrder.ASC, null, null, false, null, false, false, name.getBytes(), - HConstants.LATEST_TIMESTAMP); - columns.add(column); - } - PTable t = new PTableImpl.Builder() - .setType(PTableType.SUBQUERY) - .setTimeStamp(MetaDataProtocol.MIN_TABLE_TIMESTAMP) - .setIndexDisableTimestamp(0L) - .setSequenceNumber(PTable.INITIAL_SEQ_NUM) - .setImmutableRows(false) - .setDisableWAL(false) - .setMultiTenant(false) - .setStoreNulls(false) - .setUpdateCacheFrequency(0) - .setNamespaceMapped(SchemaUtil.isNamespaceMappingEnabled(PTableType.SUBQUERY, - connection.getQueryServices().getProps())) - .setAppendOnlySchema(false) - .setImmutableStorageScheme(ImmutableStorageScheme.ONE_CELL_PER_COLUMN) - .setQualifierEncodingScheme(QualifierEncodingScheme.NON_ENCODED_QUALIFIERS) - .setBaseColumnCount(QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT) - .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER) - .setUseStatsForParallelization(true) - .setExcludedColumns(ImmutableList.of()) - .setSchemaName(PName.EMPTY_NAME) - .setTableName(PName.EMPTY_NAME) - .setRowKeyOrderOptimizable(false) - .setIndexes(Collections.emptyList()) - .setPhysicalNames(ImmutableList.of()) - .setColumns(columns) - .build(); - - String alias = subselectNode.getAlias(); - TableRef tableRef = new TableRef(alias, t, MetaDataProtocol.MIN_TABLE_TIMESTAMP, false); - tableMap.put(alias, tableRef); - tables.add(tableRef); - return null; - } - - /** - * Invoke the {@link #visit(DerivedTableNode)} again to refresh the inner state. - * @param derivedTableNode - * @return - * @throws SQLException - */ - public TableRef refreshDerivedTableNode(DerivedTableNode derivedTableNode) throws SQLException { - String tableAlias = derivedTableNode.getAlias(); - List removedTableRefs = this.tableMap.removeAll(tableAlias); - if (removedTableRefs == null || removedTableRefs.isEmpty()) { - return null; - } - tables.removeAll(removedTableRefs); - this.visit(derivedTableNode); - return this.resolveTable(null, tableAlias); + if (theTableRef != null) { + return new ColumnRef(theTableRef, theColumnPosition); } - - private static class ColumnFamilyRef { - private final TableRef tableRef; - private final PColumnFamily family; - - ColumnFamilyRef(TableRef tableRef, PColumnFamily family) { - this.tableRef = tableRef; - this.family = family; - } - - public TableRef getTableRef() { - return tableRef; + throw new ColumnNotFoundException(schemaName, tableName, null, colName); + } else { + try { + TableRef tableRef = resolveTable(schemaName, tableName); + PColumn column = tableRef.getTable().getColumnForColumnName(colName); + return new ColumnRef(tableRef, column.getPosition()); + } catch (TableNotFoundException e) { + TableRef theTableRef = null; + PColumn theColumn = null; + PColumnFamily theColumnFamily = null; + if (schemaName != null) { + try { + // Try schemaName as the tableName and use tableName as column family name + theTableRef = resolveTable(null, schemaName); + theColumnFamily = theTableRef.getTable().getColumnFamily(tableName); + theColumn = theColumnFamily.getPColumnForColumnName(colName); + } catch (MetaDataEntityNotFoundException e2) { } - - public PColumnFamily getFamily() { - return family; + } + if (theColumn == null) { + // Try using the tableName as a columnFamily reference instead + // and resolve column in each column family. + Iterator iterator = tables.iterator(); + while (iterator.hasNext()) { + TableRef tableRef = iterator.next(); + try { + PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(tableName); + PColumn column = columnFamily.getPColumnForColumnName(colName); + if (theColumn != null) { + throw new AmbiguousColumnException(colName); + } + theTableRef = tableRef; + theColumnFamily = columnFamily; + theColumn = column; + } catch (MetaDataEntityNotFoundException e1) { + } } - } - - @Override - public TableRef resolveTable(String schemaName, String tableName) throws SQLException { - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - List tableRefs = tableMap.get(fullTableName); - if (tableRefs.size() == 0) { - throw new TableNotFoundException(fullTableName); - } else if (tableRefs.size() > 1) { - throw new AmbiguousTableException(tableName); - } else { - return tableRefs.get(0); + if (theColumn == null) { + throw new ColumnNotFoundException(colName); } + } + ColumnFamilyRef cfRef = new ColumnFamilyRef(theTableRef, theColumnFamily); + return new ColumnRef(cfRef.getTableRef(), theColumn.getPosition()); } + } + } - private ColumnFamilyRef resolveColumnFamily(String tableName, String cfName) throws SQLException { - if (tableName == null) { - ColumnFamilyRef theColumnFamilyRef = null; - Iterator iterator = tables.iterator(); - while (iterator.hasNext()) { - TableRef tableRef = iterator.next(); - try { - PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName); - if (columnFamily == null) { - throw new TableNotFoundException(cfName); - } - theColumnFamilyRef = new ColumnFamilyRef(tableRef, columnFamily); - } catch (ColumnFamilyNotFoundException e) {} - } - if (theColumnFamilyRef != null) { return theColumnFamilyRef; } - throw new TableNotFoundException(cfName); - } else { - TableRef tableRef = null; - try { - tableRef = resolveTable(null, tableName); - } catch (TableNotFoundException e) { - return resolveColumnFamily(null, cfName); - } - PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName); - return new ColumnFamilyRef(tableRef, columnFamily); - } - } + @Override + public PSchema resolveSchema(String schemaName) throws SQLException { + // TODO Auto-generated method stub + return null; + } - @Override - public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException { - if (tableName == null) { - int theColumnPosition = -1; - TableRef theTableRef = null; - Iterator iterator = tables.iterator(); - while (iterator.hasNext()) { - TableRef tableRef = iterator.next(); - try { - PColumn column = tableRef.getTable().getColumnForColumnName(colName); - if (theTableRef != null) { throw new AmbiguousColumnException(colName); } - theTableRef = tableRef; - theColumnPosition = column.getPosition(); - } catch (ColumnNotFoundException e) { - - } - } - if (theTableRef != null) { return new ColumnRef(theTableRef, theColumnPosition); } - throw new ColumnNotFoundException(schemaName, tableName, null, colName); - } else { - try { - TableRef tableRef = resolveTable(schemaName, tableName); - PColumn column = tableRef.getTable().getColumnForColumnName(colName); - return new ColumnRef(tableRef, column.getPosition()); - } catch (TableNotFoundException e) { - TableRef theTableRef = null; - PColumn theColumn = null; - PColumnFamily theColumnFamily = null; - if (schemaName != null) { - try { - // Try schemaName as the tableName and use tableName as column family name - theTableRef = resolveTable(null, schemaName); - theColumnFamily = theTableRef.getTable().getColumnFamily(tableName); - theColumn = theColumnFamily.getPColumnForColumnName(colName); - } catch (MetaDataEntityNotFoundException e2) { - } - } - if (theColumn == null) { - // Try using the tableName as a columnFamily reference instead - // and resolve column in each column family. - Iterator iterator = tables.iterator(); - while (iterator.hasNext()) { - TableRef tableRef = iterator.next(); - try { - PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(tableName); - PColumn column = columnFamily.getPColumnForColumnName(colName); - if (theColumn != null) { - throw new AmbiguousColumnException(colName); - } - theTableRef = tableRef; - theColumnFamily = columnFamily; - theColumn = column; - } catch (MetaDataEntityNotFoundException e1) { - } - } - if (theColumn == null) { - throw new ColumnNotFoundException(colName); - } - } - ColumnFamilyRef cfRef = new ColumnFamilyRef(theTableRef, theColumnFamily); - return new ColumnRef(cfRef.getTableRef(), theColumn.getPosition()); - } - } - } + @Override + public List getSchemas() { + // TODO Auto-generated method stub + return null; + } + } + + private static class ProjectedTableColumnResolver extends MultiTableColumnResolver { + private final boolean isIndex; + private final List theTableRefs; + private final Map columnRefMap; + + private ProjectedTableColumnResolver(PTable projectedTable, PhoenixConnection conn, + Map udfParseNodes) throws SQLException { + super(conn, 0, udfParseNodes, null); + Preconditions.checkArgument(projectedTable.getType() == PTableType.PROJECTED); + this.isIndex = + projectedTable.getIndexType() == IndexType.LOCAL || IndexUtil.isGlobalIndex(projectedTable); + this.columnRefMap = new HashMap(); + long ts = Long.MAX_VALUE; + for (int i = projectedTable.getBucketNum() == null ? 0 : 1; i + < projectedTable.getColumns().size(); i++) { + PColumn column = projectedTable.getColumns().get(i); + ColumnRef colRef = ((ProjectedColumn) column).getSourceColumnRef(); + TableRef tableRef = colRef.getTableRef(); + if (!tables.contains(tableRef)) { + String alias = tableRef.getTableAlias(); + if (alias != null) { + this.tableMap.put(alias, tableRef); + } + String name = tableRef.getTable().getName().getString(); + if (alias == null || !alias.equals(name)) { + tableMap.put(name, tableRef); + } + tables.add(tableRef); + if (tableRef.getLowerBoundTimeStamp() < ts) { + ts = tableRef.getLowerBoundTimeStamp(); + } + } + this.columnRefMap.put(new ColumnRef(tableRef, colRef.getColumnPosition()), + column.getPosition()); + } + this.theTableRefs = ImmutableList + .of(new TableRef(ParseNodeFactory.createTempAlias(), projectedTable, ts, false)); - @Override - public PSchema resolveSchema(String schemaName) throws SQLException { - // TODO Auto-generated method stub - return null; - } + } - @Override - public List getSchemas() { - // TODO Auto-generated method stub - return null; - } + @Override + public List getTables() { + return theTableRefs; } - - private static class ProjectedTableColumnResolver extends MultiTableColumnResolver { - private final boolean isIndex; - private final List theTableRefs; - private final Map columnRefMap; - private ProjectedTableColumnResolver(PTable projectedTable, PhoenixConnection conn, Map udfParseNodes) throws SQLException { - super(conn, 0, udfParseNodes, null); - Preconditions.checkArgument(projectedTable.getType() == PTableType.PROJECTED); - this.isIndex = projectedTable.getIndexType() == IndexType.LOCAL - || IndexUtil.isGlobalIndex(projectedTable); - this.columnRefMap = new HashMap(); - long ts = Long.MAX_VALUE; - for (int i = projectedTable.getBucketNum() == null ? 0 : 1; i < projectedTable.getColumns().size(); i++) { - PColumn column = projectedTable.getColumns().get(i); - ColumnRef colRef = ((ProjectedColumn) column).getSourceColumnRef(); - TableRef tableRef = colRef.getTableRef(); - if (!tables.contains(tableRef)) { - String alias = tableRef.getTableAlias(); - if (alias != null) { - this.tableMap.put(alias, tableRef); - } - String name = tableRef.getTable().getName().getString(); - if (alias == null || !alias.equals(name)) { - tableMap.put(name, tableRef); - } - tables.add(tableRef); - if (tableRef.getLowerBoundTimeStamp() < ts) { - ts = tableRef.getLowerBoundTimeStamp(); - } - } - this.columnRefMap.put(new ColumnRef(tableRef, colRef.getColumnPosition()), column.getPosition()); - } - this.theTableRefs = ImmutableList.of(new TableRef(ParseNodeFactory.createTempAlias(), projectedTable, ts, false)); - - } - - @Override - public List getTables() { - return theTableRefs; - } - - @Override - public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException { - ColumnRef colRef; - try { - colRef = super.resolveColumn(schemaName, tableName, colName); - } catch (ColumnNotFoundException e) { - // This could be a ColumnRef for index data column. - TableRef tableRef = isIndex ? super.getTables().get(0) - : super.resolveTable(schemaName, tableName); - if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { - try { - TableRef parentTableRef = super.resolveTable( - tableRef.getTable().getSchemaName().getString(), - tableRef.getTable().getParentTableName().getString()); - colRef = new ColumnRef(parentTableRef, - IndexUtil.getDataColumnFamilyName(colName), - IndexUtil.getDataColumnName(colName)); - } catch (TableNotFoundException te) { - throw e; - } - } else { - throw e; - } - } - Integer position = columnRefMap.get(colRef); - if (position == null) - throw new ColumnNotFoundException(schemaName, tableName, null, colName); - - return new ColumnRef(theTableRefs.get(0), position); + + @Override + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException { + ColumnRef colRef; + try { + colRef = super.resolveColumn(schemaName, tableName, colName); + } catch (ColumnNotFoundException e) { + // This could be a ColumnRef for index data column. + TableRef tableRef = + isIndex ? super.getTables().get(0) : super.resolveTable(schemaName, tableName); + if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { + try { + TableRef parentTableRef = + super.resolveTable(tableRef.getTable().getSchemaName().getString(), + tableRef.getTable().getParentTableName().getString()); + colRef = new ColumnRef(parentTableRef, IndexUtil.getDataColumnFamilyName(colName), + IndexUtil.getDataColumnName(colName)); + } catch (TableNotFoundException te) { + throw e; + } + } else { + throw e; } + } + Integer position = columnRefMap.get(colRef); + if (position == null) throw new ColumnNotFoundException(schemaName, tableName, null, colName); + + return new ColumnRef(theTableRefs.get(0), position); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java index d348eafcc24..863a9fa266d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,11 +23,8 @@ import java.util.Comparator; import java.util.List; -import net.jcip.annotations.Immutable; - import org.apache.hadoop.hbase.util.Pair; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.OrderPreservingTracker.Info; import org.apache.phoenix.compile.OrderPreservingTracker.Ordering; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; @@ -44,438 +41,426 @@ import org.apache.phoenix.schema.ColumnNotFoundException; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarbinary; -import org.apache.phoenix.util.IndexUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.IndexUtil; + +import net.jcip.annotations.Immutable; /** - * - * Validates GROUP BY clause and builds a {@link GroupBy} instance to encapsulate the - * group by expressions. - * - * + * Validates GROUP BY clause and builds a {@link GroupBy} instance to encapsulate the group by + * expressions. * @since 0.1 */ public class GroupByCompiler { - @Immutable - public static class GroupBy { - private final List expressions; - private final List keyExpressions; - private final boolean isOrderPreserving; - private final int orderPreservingColumnCount; - private final boolean isUngroupedAggregate; - private final List orderPreservingTrackInfos; - public static final GroupByCompiler.GroupBy EMPTY_GROUP_BY = new GroupBy(new GroupByBuilder()) { - @Override - public GroupBy compile(StatementContext context, QueryPlan innerQueryPlan, Expression whereExpression) throws SQLException { - return this; - } - - @Override - public void explain(List planSteps, Integer limit) { - } + @Immutable + public static class GroupBy { + private final List expressions; + private final List keyExpressions; + private final boolean isOrderPreserving; + private final int orderPreservingColumnCount; + private final boolean isUngroupedAggregate; + private final List orderPreservingTrackInfos; + public static final GroupByCompiler.GroupBy EMPTY_GROUP_BY = new GroupBy(new GroupByBuilder()) { + @Override + public GroupBy compile(StatementContext context, QueryPlan innerQueryPlan, + Expression whereExpression) throws SQLException { + return this; + } - @Override - public void explain(List planSteps, Integer limit, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } + @Override + public void explain(List planSteps, Integer limit) { + } - @Override - public String getScanAttribName() { - return null; - } - }; + @Override + public void explain(List planSteps, Integer limit, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } - public static final GroupByCompiler.GroupBy UNGROUPED_GROUP_BY = new GroupBy(new GroupByBuilder().setIsOrderPreserving(true).setIsUngroupedAggregate(true)) { - @Override - public GroupBy compile(StatementContext context, QueryPlan innerQueryPlan, Expression whereExpression) throws SQLException { - return this; - } + @Override + public String getScanAttribName() { + return null; + } + }; - @Override - public void explain(List planSteps, Integer limit) { - planSteps.add(" SERVER AGGREGATE INTO SINGLE ROW"); - } + public static final GroupByCompiler.GroupBy UNGROUPED_GROUP_BY = + new GroupBy(new GroupByBuilder().setIsOrderPreserving(true).setIsUngroupedAggregate(true)) { + @Override + public GroupBy compile(StatementContext context, QueryPlan innerQueryPlan, + Expression whereExpression) throws SQLException { + return this; + } - @Override - public void explain(List planSteps, Integer limit, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - planSteps.add(" SERVER AGGREGATE INTO SINGLE ROW"); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setServerAggregate( - "SERVER AGGREGATE INTO SINGLE ROW"); - } - } + @Override + public void explain(List planSteps, Integer limit) { + planSteps.add(" SERVER AGGREGATE INTO SINGLE ROW"); + } - @Override - public String getScanAttribName() { - return BaseScannerRegionObserverConstants.UNGROUPED_AGG; - } - }; - - private GroupBy(GroupByBuilder builder) { - this.expressions = ImmutableList.copyOf(builder.expressions); - this.keyExpressions = builder.expressions == builder.keyExpressions ? - this.expressions : builder.keyExpressions == null ? null : - ImmutableList.copyOf(builder.keyExpressions); - this.isOrderPreserving = builder.isOrderPreserving; - this.orderPreservingColumnCount = builder.orderPreservingColumnCount; - this.isUngroupedAggregate = builder.isUngroupedAggregate; - this.orderPreservingTrackInfos = builder.orderPreservingTrackInfos; + @Override + public void explain(List planSteps, Integer limit, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + planSteps.add(" SERVER AGGREGATE INTO SINGLE ROW"); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setServerAggregate("SERVER AGGREGATE INTO SINGLE ROW"); + } } - - public List getExpressions() { - return expressions; + + @Override + public String getScanAttribName() { + return BaseScannerRegionObserverConstants.UNGROUPED_AGG; } - - public List getKeyExpressions() { - return keyExpressions; + }; + + private GroupBy(GroupByBuilder builder) { + this.expressions = ImmutableList.copyOf(builder.expressions); + this.keyExpressions = builder.expressions == builder.keyExpressions ? this.expressions + : builder.keyExpressions == null ? null + : ImmutableList.copyOf(builder.keyExpressions); + this.isOrderPreserving = builder.isOrderPreserving; + this.orderPreservingColumnCount = builder.orderPreservingColumnCount; + this.isUngroupedAggregate = builder.isUngroupedAggregate; + this.orderPreservingTrackInfos = builder.orderPreservingTrackInfos; + } + + public List getExpressions() { + return expressions; + } + + public List getKeyExpressions() { + return keyExpressions; + } + + public String getScanAttribName() { + if (isUngroupedAggregate) { + return BaseScannerRegionObserverConstants.UNGROUPED_AGG; + } else if (isOrderPreserving) { + return BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS; + } else { + return BaseScannerRegionObserverConstants.UNORDERED_GROUP_BY_EXPRESSIONS; + } + } + + public boolean isEmpty() { + return expressions.isEmpty(); + } + + public boolean isOrderPreserving() { + return isOrderPreserving; + } + + public boolean isUngroupedAggregate() { + return isUngroupedAggregate; + } + + /** + * This value represents the row key column count corresponding to longest continuous ordering + * columns returned by {@link GroupBy#getOrderPreservingTrackInfos}, it may not equal to the + * size of {@link GroupBy#getOrderPreservingTrackInfos}. + */ + public int getOrderPreservingColumnCount() { + return orderPreservingColumnCount; + } + + public List getOrderPreservingTrackInfos() { + return orderPreservingTrackInfos; + } + + public GroupBy compile(StatementContext context, QueryPlan innerQueryPlan, + Expression whereExpression) throws SQLException { + boolean isOrderPreserving = this.isOrderPreserving; + int orderPreservingColumnCount = 0; + if (isOrderPreserving) { + OrderPreservingTracker tracker = new OrderPreservingTracker(context, GroupBy.EMPTY_GROUP_BY, + Ordering.UNORDERED, expressions.size(), null, innerQueryPlan, whereExpression); + for (int i = 0; i < expressions.size(); i++) { + Expression expression = expressions.get(i); + tracker.track(expression); } - - public String getScanAttribName() { - if (isUngroupedAggregate) { - return BaseScannerRegionObserverConstants.UNGROUPED_AGG; - } else if (isOrderPreserving) { - return BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS; + + // This is true if the GROUP BY is composed of only PK columns. We further check here that + // there are no "gaps" in the PK columns positions used (i.e. we start with the first PK + // column and use each subsequent one in PK order). + isOrderPreserving = tracker.isOrderPreserving(); + orderPreservingColumnCount = tracker.getOrderPreservingColumnCount(); + if (isOrderPreserving) { + // reorder the groupby expressions following pk columns + List orderPreservingTrackInfos = tracker.getOrderPreservingTrackInfos(); + List newExpressions = Info.extractExpressions(orderPreservingTrackInfos); + assert newExpressions.size() == expressions.size(); + return new GroupBy.GroupByBuilder(this).setIsOrderPreserving(isOrderPreserving) + .setOrderPreservingColumnCount(orderPreservingColumnCount) + .setExpressions(newExpressions).setKeyExpressions(newExpressions) + .setOrderPreservingTrackInfos(orderPreservingTrackInfos).build(); + } + } + + if (isUngroupedAggregate) { + return new GroupBy.GroupByBuilder(this).setIsOrderPreserving(isOrderPreserving) + .setOrderPreservingColumnCount(orderPreservingColumnCount).build(); + } + List expressions = Lists.newArrayListWithExpectedSize(this.expressions.size()); + List keyExpressions = expressions; + List> groupBys = + Lists.newArrayListWithExpectedSize(this.expressions.size()); + for (int i = 0; i < this.expressions.size(); i++) { + Expression expression = this.expressions.get(i); + groupBys.add(new Pair(i, expression)); + } + /* + * If we're not ordered along the PK axis, our coprocessor needs to collect all distinct + * groups within a region, sort them, and hold on to them until the scan completes. Put fixed + * length nullables at the end, so that we can represent null by the absence of the trailing + * value in the group by key. If there is more than one, we'll need to convert the ones not at + * the end into a Decimal so that we can use an empty byte array as our representation for + * null (which correctly maintains the sort order). We convert the Decimal back to the + * appropriate type (Integer or Long) when it's retrieved from the result set. More + * specifically, order into the following buckets: 1) non nullable fixed width 2) variable + * width 3) nullable fixed width Within each bucket, order based on the column position in the + * schema. Putting the fixed width values in the beginning optimizes access to subsequent + * values. + */ + Collections.sort(groupBys, new Comparator>() { + @Override + public int compare(Pair gb1, Pair gb2) { + Expression e1 = gb1.getSecond(); + Expression e2 = gb2.getSecond(); + PDataType t1 = e1.getDataType(); + PDataType t2 = e2.getDataType(); + boolean isFixed1 = t1.isFixedWidth(); + boolean isFixed2 = t2.isFixedWidth(); + boolean isFixedNullable1 = e1.isNullable() && isFixed1; + boolean isFixedNullable2 = e2.isNullable() && isFixed2; + boolean oae1 = onlyAtEndType(e1); + boolean oae2 = onlyAtEndType(e2); + if (oae1 == oae2) { + if (isFixedNullable1 == isFixedNullable2) { + if (isFixed1 == isFixed2) { + // Not strictly necessary, but forces the order to match the schema + // column order (with PK columns before value columns). + // return o1.getColumnPosition() - o2.getColumnPosition(); + return gb1.getFirst() - gb2.getFirst(); + } else if (isFixed1) { + return -1; + } else { + return 1; + } + } else if (isFixedNullable1) { + return 1; } else { - return BaseScannerRegionObserverConstants.UNORDERED_GROUP_BY_EXPRESSIONS; + return -1; } + } else if (oae1) { + return 1; + } else { + return -1; + } } - - public boolean isEmpty() { - return expressions.isEmpty(); + }); + boolean foundOnlyAtEndType = false; + for (Pair groupBy : groupBys) { + Expression e = groupBy.getSecond(); + if (onlyAtEndType(e)) { + if (foundOnlyAtEndType) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS) + .setMessage(e.toString()).build().buildException(); + } + foundOnlyAtEndType = true; } - - public boolean isOrderPreserving() { - return isOrderPreserving; + expressions.add(e); + } + for (int i = expressions.size() - 2; i >= 0; i--) { + Expression expression = expressions.get(i); + PDataType keyType = getGroupByDataType(expression); + if (keyType == expression.getDataType()) { + continue; } - - public boolean isUngroupedAggregate() { - return isUngroupedAggregate; + // Copy expressions only when keyExpressions will be different than expressions + if (keyExpressions == expressions) { + keyExpressions = new ArrayList(expressions); } + // Wrap expression in an expression that coerces the expression to the required type.. + // This is done so that we have a way of expressing null as an empty key when more + // than one fixed and nullable types are used in a group by clause + keyExpressions.set(i, CoerceExpression.create(expression, keyType)); + } - /** - * This value represents the row key column count corresponding to longest continuous - * ordering columns returned by {@link GroupBy#getOrderPreservingTrackInfos}, it may - * not equal to the size of {@link GroupBy#getOrderPreservingTrackInfos}. - */ - public int getOrderPreservingColumnCount() { - return orderPreservingColumnCount; - } + GroupBy groupBy = new GroupBy.GroupByBuilder().setIsOrderPreserving(isOrderPreserving) + .setExpressions(expressions).setKeyExpressions(keyExpressions).build(); + return groupBy; + } - public List getOrderPreservingTrackInfos() { - return orderPreservingTrackInfos; - } + public static class GroupByBuilder { + private boolean isOrderPreserving; + private int orderPreservingColumnCount; + private List expressions = Collections.emptyList(); + private List keyExpressions = Collections.emptyList(); + private boolean isUngroupedAggregate; + private List orderPreservingTrackInfos = Collections.emptyList(); - public GroupBy compile(StatementContext context, QueryPlan innerQueryPlan, Expression whereExpression) throws SQLException { - boolean isOrderPreserving = this.isOrderPreserving; - int orderPreservingColumnCount = 0; - if (isOrderPreserving) { - OrderPreservingTracker tracker = new OrderPreservingTracker( - context, - GroupBy.EMPTY_GROUP_BY, - Ordering.UNORDERED, - expressions.size(), - null, - innerQueryPlan, - whereExpression); - for (int i = 0; i < expressions.size(); i++) { - Expression expression = expressions.get(i); - tracker.track(expression); - } - - // This is true if the GROUP BY is composed of only PK columns. We further check here that - // there are no "gaps" in the PK columns positions used (i.e. we start with the first PK - // column and use each subsequent one in PK order). - isOrderPreserving = tracker.isOrderPreserving(); - orderPreservingColumnCount = tracker.getOrderPreservingColumnCount(); - if(isOrderPreserving) { - //reorder the groupby expressions following pk columns - List orderPreservingTrackInfos = tracker.getOrderPreservingTrackInfos(); - List newExpressions = Info.extractExpressions(orderPreservingTrackInfos); - assert newExpressions.size() == expressions.size(); - return new GroupBy.GroupByBuilder(this) - .setIsOrderPreserving(isOrderPreserving) - .setOrderPreservingColumnCount(orderPreservingColumnCount) - .setExpressions(newExpressions) - .setKeyExpressions(newExpressions) - .setOrderPreservingTrackInfos(orderPreservingTrackInfos) - .build(); - } - } + public GroupByBuilder() { + } - if (isUngroupedAggregate) { - return new GroupBy.GroupByBuilder(this) - .setIsOrderPreserving(isOrderPreserving) - .setOrderPreservingColumnCount(orderPreservingColumnCount) - .build(); - } - List expressions = Lists.newArrayListWithExpectedSize(this.expressions.size()); - List keyExpressions = expressions; - List> groupBys = Lists.newArrayListWithExpectedSize(this.expressions.size()); - for (int i = 0; i < this.expressions.size(); i++) { - Expression expression = this.expressions.get(i); - groupBys.add(new Pair(i,expression)); - } - /* - * If we're not ordered along the PK axis, our coprocessor needs to collect all distinct groups within - * a region, sort them, and hold on to them until the scan completes. - * Put fixed length nullables at the end, so that we can represent null by the absence of the trailing - * value in the group by key. If there is more than one, we'll need to convert the ones not at the end - * into a Decimal so that we can use an empty byte array as our representation for null (which correctly - * maintains the sort order). We convert the Decimal back to the appropriate type (Integer or Long) when - * it's retrieved from the result set. - * - * More specifically, order into the following buckets: - * 1) non nullable fixed width - * 2) variable width - * 3) nullable fixed width - * Within each bucket, order based on the column position in the schema. Putting the fixed width values - * in the beginning optimizes access to subsequent values. - */ - Collections.sort(groupBys, new Comparator>() { - @Override - public int compare(Pair gb1, Pair gb2) { - Expression e1 = gb1.getSecond(); - Expression e2 = gb2.getSecond(); - PDataType t1 = e1.getDataType(); - PDataType t2 = e2.getDataType(); - boolean isFixed1 = t1.isFixedWidth(); - boolean isFixed2 = t2.isFixedWidth(); - boolean isFixedNullable1 = e1.isNullable() &&isFixed1; - boolean isFixedNullable2 = e2.isNullable() && isFixed2; - boolean oae1 = onlyAtEndType(e1); - boolean oae2 = onlyAtEndType(e2); - if (oae1 == oae2) { - if (isFixedNullable1 == isFixedNullable2) { - if (isFixed1 == isFixed2) { - // Not strictly necessary, but forces the order to match the schema - // column order (with PK columns before value columns). - //return o1.getColumnPosition() - o2.getColumnPosition(); - return gb1.getFirst() - gb2.getFirst(); - } else if (isFixed1) { - return -1; - } else { - return 1; - } - } else if (isFixedNullable1) { - return 1; - } else { - return -1; - } - } else if (oae1) { - return 1; - } else { - return -1; - } - } - }); - boolean foundOnlyAtEndType = false; - for (Pair groupBy : groupBys) { - Expression e = groupBy.getSecond(); - if (onlyAtEndType(e)) { - if (foundOnlyAtEndType) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS) - .setMessage(e.toString()).build().buildException(); - } - foundOnlyAtEndType = true; - } - expressions.add(e); - } - for (int i = expressions.size()-2; i >= 0; i--) { - Expression expression = expressions.get(i); - PDataType keyType = getGroupByDataType(expression); - if (keyType == expression.getDataType()) { - continue; - } - // Copy expressions only when keyExpressions will be different than expressions - if (keyExpressions == expressions) { - keyExpressions = new ArrayList(expressions); - } - // Wrap expression in an expression that coerces the expression to the required type.. - // This is done so that we have a way of expressing null as an empty key when more - // than one fixed and nullable types are used in a group by clause - keyExpressions.set(i, CoerceExpression.create(expression, keyType)); - } + public GroupByBuilder(GroupBy groupBy) { + this.isOrderPreserving = groupBy.isOrderPreserving; + this.orderPreservingColumnCount = groupBy.orderPreservingColumnCount; + this.expressions = groupBy.expressions; + this.keyExpressions = groupBy.keyExpressions; + this.isUngroupedAggregate = groupBy.isUngroupedAggregate; + } - GroupBy groupBy = new GroupBy.GroupByBuilder().setIsOrderPreserving(isOrderPreserving).setExpressions(expressions).setKeyExpressions(keyExpressions).build(); - return groupBy; - } - - public static class GroupByBuilder { - private boolean isOrderPreserving; - private int orderPreservingColumnCount; - private List expressions = Collections.emptyList(); - private List keyExpressions = Collections.emptyList(); - private boolean isUngroupedAggregate; - private List orderPreservingTrackInfos = Collections.emptyList(); - - public GroupByBuilder() { - } - - public GroupByBuilder(GroupBy groupBy) { - this.isOrderPreserving = groupBy.isOrderPreserving; - this.orderPreservingColumnCount = groupBy.orderPreservingColumnCount; - this.expressions = groupBy.expressions; - this.keyExpressions = groupBy.keyExpressions; - this.isUngroupedAggregate = groupBy.isUngroupedAggregate; - } - - public GroupByBuilder setExpressions(List expressions) { - this.expressions = expressions; - return this; - } - - public GroupByBuilder setKeyExpressions(List keyExpressions) { - this.keyExpressions = keyExpressions; - return this; - } - - public GroupByBuilder setIsOrderPreserving(boolean isOrderPreserving) { - this.isOrderPreserving = isOrderPreserving; - return this; - } + public GroupByBuilder setExpressions(List expressions) { + this.expressions = expressions; + return this; + } - public GroupByBuilder setIsUngroupedAggregate(boolean isUngroupedAggregate) { - this.isUngroupedAggregate = isUngroupedAggregate; - return this; - } + public GroupByBuilder setKeyExpressions(List keyExpressions) { + this.keyExpressions = keyExpressions; + return this; + } - public GroupByBuilder setOrderPreservingColumnCount(int orderPreservingColumnCount) { - this.orderPreservingColumnCount = orderPreservingColumnCount; - return this; - } + public GroupByBuilder setIsOrderPreserving(boolean isOrderPreserving) { + this.isOrderPreserving = isOrderPreserving; + return this; + } - public GroupByBuilder setOrderPreservingTrackInfos(List orderPreservingTrackInfos) { - this.orderPreservingTrackInfos = orderPreservingTrackInfos; - return this; - } + public GroupByBuilder setIsUngroupedAggregate(boolean isUngroupedAggregate) { + this.isUngroupedAggregate = isUngroupedAggregate; + return this; + } - public GroupBy build() { - return new GroupBy(this); - } - } + public GroupByBuilder setOrderPreservingColumnCount(int orderPreservingColumnCount) { + this.orderPreservingColumnCount = orderPreservingColumnCount; + return this; + } - public void explain(List planSteps, Integer limit) { - explainUtil(planSteps, limit, null); - } + public GroupByBuilder setOrderPreservingTrackInfos(List orderPreservingTrackInfos) { + this.orderPreservingTrackInfos = orderPreservingTrackInfos; + return this; + } - private void explainUtil(List planSteps, Integer limit, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - String serverAggregate; - if (isUngroupedAggregate) { - serverAggregate = "SERVER AGGREGATE INTO SINGLE ROW"; - } else { - String groupLimit = limit == null ? "" : (" LIMIT " + limit - + " GROUP" + (limit == 1 ? "" : "S")); - if (isOrderPreserving) { - serverAggregate = "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY " - + getExpressions() + groupLimit; - } else { - serverAggregate = "SERVER AGGREGATE INTO DISTINCT ROWS BY " - + getExpressions() + groupLimit; - } - } - planSteps.add(" " + serverAggregate); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setServerAggregate(serverAggregate); - } - } + public GroupBy build() { + return new GroupBy(this); + } + } - public void explain(List planSteps, Integer limit, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - explainUtil(planSteps, limit, explainPlanAttributesBuilder); + public void explain(List planSteps, Integer limit) { + explainUtil(planSteps, limit, null); + } + + private void explainUtil(List planSteps, Integer limit, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + String serverAggregate; + if (isUngroupedAggregate) { + serverAggregate = "SERVER AGGREGATE INTO SINGLE ROW"; + } else { + String groupLimit = + limit == null ? "" : (" LIMIT " + limit + " GROUP" + (limit == 1 ? "" : "S")); + if (isOrderPreserving) { + serverAggregate = + "SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY " + getExpressions() + groupLimit; + } else { + serverAggregate = + "SERVER AGGREGATE INTO DISTINCT ROWS BY " + getExpressions() + groupLimit; } + } + planSteps.add(" " + serverAggregate); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setServerAggregate(serverAggregate); + } + } + + public void explain(List planSteps, Integer limit, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + explainUtil(planSteps, limit, explainPlanAttributesBuilder); } + } + /** + * Get list of columns in the GROUP BY clause. + * @param context query context kept between compilation of different query clauses + * @param statement SQL statement being compiled + * @return the {@link GroupBy} instance encapsulating the group by clause + * @throws ColumnNotFoundException if column name could not be resolved + * @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple + * tables + */ + public static GroupBy compile(StatementContext context, SelectStatement statement) + throws SQLException { + List groupByNodes = statement.getGroupBy(); /** - * Get list of columns in the GROUP BY clause. - * @param context query context kept between compilation of different query clauses - * @param statement SQL statement being compiled - * @return the {@link GroupBy} instance encapsulating the group by clause - * @throws ColumnNotFoundException if column name could not be resolved - * @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables + * Distinct can use an aggregate plan if there's no group by. Otherwise, we need to insert a + * step after the Merge that dedups. Order by only allowed on columns in the select distinct */ - public static GroupBy compile(StatementContext context, SelectStatement statement) throws SQLException { - List groupByNodes = statement.getGroupBy(); - /** - * Distinct can use an aggregate plan if there's no group by. - * Otherwise, we need to insert a step after the Merge that dedups. - * Order by only allowed on columns in the select distinct - */ - boolean isUngroupedAggregate = false; - if (groupByNodes.isEmpty()) { - if (statement.isAggregate()) { - // do not optimize if - // 1. we were asked not to optimize - // 2. there's any HAVING clause - // TODO: PHOENIX-2989 suggests some ways to optimize the latter case - if (statement.getHint().hasHint(Hint.RANGE_SCAN) || - statement.getHaving() != null) { - return GroupBy.UNGROUPED_GROUP_BY; - } - groupByNodes = Lists.newArrayListWithExpectedSize(statement.getSelect().size()); - for (AliasedNode aliasedNode : statement.getSelect()) { - if (aliasedNode.getNode() instanceof DistinctCountParseNode) { - // only add children of DistinctCount nodes - groupByNodes.addAll(aliasedNode.getNode().getChildren()); - } else { - // if we found anything else, do not attempt any further optimization - return GroupBy.UNGROUPED_GROUP_BY; - } - } - isUngroupedAggregate = true; - } else if (statement.isDistinct()) { - groupByNodes = Lists.newArrayListWithExpectedSize(statement.getSelect().size()); - for (AliasedNode aliasedNode : statement.getSelect()) { - // for distinct at all select expression as group by conditions - groupByNodes.add(aliasedNode.getNode()); - } - } else { - return GroupBy.EMPTY_GROUP_BY; - } + boolean isUngroupedAggregate = false; + if (groupByNodes.isEmpty()) { + if (statement.isAggregate()) { + // do not optimize if + // 1. we were asked not to optimize + // 2. there's any HAVING clause + // TODO: PHOENIX-2989 suggests some ways to optimize the latter case + if (statement.getHint().hasHint(Hint.RANGE_SCAN) || statement.getHaving() != null) { + return GroupBy.UNGROUPED_GROUP_BY; } - - // Accumulate expressions in GROUP BY - ExpressionCompiler compiler = - new ExpressionCompiler(context, GroupBy.EMPTY_GROUP_BY); - List expressions = Lists.newArrayListWithExpectedSize(groupByNodes.size()); - for (int i = 0; i < groupByNodes.size(); i++) { - ParseNode node = groupByNodes.get(i); - Expression expression = node.accept(compiler); - if (!expression.isStateless()) { - if (compiler.isAggregate()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_IN_GROUP_BY) - .setMessage(expression.toString()).build().buildException(); - } - expressions.add(expression); - } - compiler.reset(); + groupByNodes = Lists.newArrayListWithExpectedSize(statement.getSelect().size()); + for (AliasedNode aliasedNode : statement.getSelect()) { + if (aliasedNode.getNode() instanceof DistinctCountParseNode) { + // only add children of DistinctCount nodes + groupByNodes.addAll(aliasedNode.getNode().getChildren()); + } else { + // if we found anything else, do not attempt any further optimization + return GroupBy.UNGROUPED_GROUP_BY; + } } - - if (expressions.isEmpty()) { - return GroupBy.EMPTY_GROUP_BY; + isUngroupedAggregate = true; + } else if (statement.isDistinct()) { + groupByNodes = Lists.newArrayListWithExpectedSize(statement.getSelect().size()); + for (AliasedNode aliasedNode : statement.getSelect()) { + // for distinct at all select expression as group by conditions + groupByNodes.add(aliasedNode.getNode()); } - GroupBy groupBy = new GroupBy.GroupByBuilder() - .setIsOrderPreserving(OrderByCompiler.isTrackOrderByPreserving(statement)) - .setExpressions(expressions).setKeyExpressions(expressions) - .setIsUngroupedAggregate(isUngroupedAggregate).build(); - return groupBy; - } - - private static boolean onlyAtEndType(Expression expression) { - // Due to the encoding schema of these types, they may only be - // used once in a group by and are located at the end of the - // group by row key. - PDataType type = getGroupByDataType(expression); - return type.isArrayType() || type == PVarbinary.INSTANCE; + } else { + return GroupBy.EMPTY_GROUP_BY; + } } - - private static PDataType getGroupByDataType(Expression expression) { - return IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType()); + + // Accumulate expressions in GROUP BY + ExpressionCompiler compiler = new ExpressionCompiler(context, GroupBy.EMPTY_GROUP_BY); + List expressions = Lists.newArrayListWithExpectedSize(groupByNodes.size()); + for (int i = 0; i < groupByNodes.size(); i++) { + ParseNode node = groupByNodes.get(i); + Expression expression = node.accept(compiler); + if (!expression.isStateless()) { + if (compiler.isAggregate()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_IN_GROUP_BY) + .setMessage(expression.toString()).build().buildException(); + } + expressions.add(expression); + } + compiler.reset(); } - - private GroupByCompiler() { + + if (expressions.isEmpty()) { + return GroupBy.EMPTY_GROUP_BY; } + GroupBy groupBy = new GroupBy.GroupByBuilder() + .setIsOrderPreserving(OrderByCompiler.isTrackOrderByPreserving(statement)) + .setExpressions(expressions).setKeyExpressions(expressions) + .setIsUngroupedAggregate(isUngroupedAggregate).build(); + return groupBy; + } + + private static boolean onlyAtEndType(Expression expression) { + // Due to the encoding schema of these types, they may only be + // used once in a group by and are located at the end of the + // group by row key. + PDataType type = getGroupByDataType(expression); + return type.isArrayType() || type == PVarbinary.INSTANCE; + } + + private static PDataType getGroupByDataType(Expression expression) { + return IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType()); + } + + private GroupByCompiler() { + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/HavingCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/HavingCompiler.java index 9ccd2f0c560..7163d88d174 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/HavingCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/HavingCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,148 +38,145 @@ import org.apache.phoenix.schema.TypeMismatchException; import org.apache.phoenix.schema.types.PBoolean; - public class HavingCompiler { - private HavingCompiler() { + private HavingCompiler() { + } + + public static Expression compile(StatementContext context, SelectStatement statement, + GroupBy groupBy) throws SQLException { + ParseNode having = statement.getHaving(); + if (having == null) { + return null; + } + ExpressionCompiler expressionBuilder = new ExpressionCompiler(context, groupBy); + Expression expression = having.accept(expressionBuilder); + if (expression.getDataType() != PBoolean.INSTANCE) { + throw TypeMismatchException.newException(PBoolean.INSTANCE, expression.getDataType(), + expression.toString()); + } + if (LiteralExpression.isBooleanFalseOrNull(expression)) { + context.setScanRanges(ScanRanges.NOTHING); + return null; + } else if (LiteralExpression.isTrue(expression)) { + return null; + } + if (!expressionBuilder.isAggregate()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_AGGREGATE_IN_HAVING_CLAUSE).build() + .buildException(); } + return expression; + } - public static Expression compile(StatementContext context, SelectStatement statement, GroupBy groupBy) throws SQLException { - ParseNode having = statement.getHaving(); - if (having == null) { - return null; - } - ExpressionCompiler expressionBuilder = new ExpressionCompiler(context, groupBy); - Expression expression = having.accept(expressionBuilder); - if (expression.getDataType() != PBoolean.INSTANCE) { - throw TypeMismatchException.newException(PBoolean.INSTANCE, expression.getDataType(), expression.toString()); - } - if (LiteralExpression.isBooleanFalseOrNull(expression)) { - context.setScanRanges(ScanRanges.NOTHING); - return null; - } else if (LiteralExpression.isTrue(expression)) { - return null; - } - if (!expressionBuilder.isAggregate()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_AGGREGATE_IN_HAVING_CLAUSE).build().buildException(); - } - return expression; + public static SelectStatement rewrite(StatementContext context, SelectStatement statement, + GroupBy groupBy) throws SQLException { + ParseNode having = statement.getHaving(); + if (having == null) { + return statement; } + HavingClauseVisitor visitor = new HavingClauseVisitor(context, groupBy); + having.accept(visitor); + statement = SelectStatementRewriter.moveFromHavingToWhereClause(statement, + visitor.getMoveToWhereClauseExpressions()); + return statement; + } - public static SelectStatement rewrite(StatementContext context, SelectStatement statement, GroupBy groupBy) throws SQLException { - ParseNode having = statement.getHaving(); - if (having == null) { - return statement; - } - HavingClauseVisitor visitor = new HavingClauseVisitor(context, groupBy); - having.accept(visitor); - statement = SelectStatementRewriter.moveFromHavingToWhereClause(statement, visitor.getMoveToWhereClauseExpressions()); - return statement; + /** + * Visitor that figures out if an expression can be moved from the HAVING clause to the WHERE + * clause, since it's more optimal to pre-filter instead of post-filter. The visitor traverses + * through AND expressions only and into comparison expresssions. If a comparison expression uses + * a GROUP BY column and does not use any aggregate functions, then it's moved. For example, these + * HAVING expressions would be moved: select count(1) from atable group by a_string having + * a_string = 'foo' select count(1) from atable group by a_date having round(a_date,'hour') > ? + * select count(1) from atable group by a_date,a_string having a_date > ? or a_string = 'a' select + * count(1) from atable group by a_string,b_string having a_string = 'a' and b_string = 'b' while + * these would not be moved: select count(1) from atable having min(a_integer) < 5 select count(1) + * from atable group by a_string having count(a_string) >= 1 select count(1) from atable group by + * a_date,a_string having a_date > ? or min(a_string) = 'a' select count(1) from atable group by + * a_date having round(min(a_date),'hour') < ? + * @since 0.1 + */ + private static class HavingClauseVisitor extends BooleanParseNodeVisitor { + private ParseNode topNode = null; + private boolean hasNoAggregateFunctions = true; + private Boolean hasOnlyAggregateColumns; + private final StatementContext context; + private final GroupBy groupBy; + private final Set moveToWhereClause = new LinkedHashSet(); + + HavingClauseVisitor(StatementContext context, GroupBy groupBy) { + this.context = context; + this.groupBy = groupBy; } - /** - * - * Visitor that figures out if an expression can be moved from the HAVING clause to - * the WHERE clause, since it's more optimal to pre-filter instead of post-filter. - * - * The visitor traverses through AND expressions only and into comparison expresssions. - * If a comparison expression uses a GROUP BY column and does not use any aggregate - * functions, then it's moved. For example, these HAVING expressions would be moved: - * - * select count(1) from atable group by a_string having a_string = 'foo' - * select count(1) from atable group by a_date having round(a_date,'hour') > ? - * select count(1) from atable group by a_date,a_string having a_date > ? or a_string = 'a' - * select count(1) from atable group by a_string,b_string having a_string = 'a' and b_string = 'b' - * - * while these would not be moved: - * - * select count(1) from atable having min(a_integer) < 5 - * select count(1) from atable group by a_string having count(a_string) >= 1 - * select count(1) from atable group by a_date,a_string having a_date > ? or min(a_string) = 'a' - * select count(1) from atable group by a_date having round(min(a_date),'hour') < ? - * - * - * @since 0.1 - */ - private static class HavingClauseVisitor extends BooleanParseNodeVisitor { - private ParseNode topNode = null; - private boolean hasNoAggregateFunctions = true; - private Boolean hasOnlyAggregateColumns; - private final StatementContext context; - private final GroupBy groupBy; - private final Set moveToWhereClause = new LinkedHashSet(); - - HavingClauseVisitor(StatementContext context, GroupBy groupBy) { - this.context = context; - this.groupBy = groupBy; - } - - public Set getMoveToWhereClauseExpressions() { - return moveToWhereClause; - } + public Set getMoveToWhereClauseExpressions() { + return moveToWhereClause; + } - @Override - protected boolean enterBooleanNode(ParseNode node) throws SQLException { - if (topNode == null) { - topNode = node; - } - - return true; - } + @Override + protected boolean enterBooleanNode(ParseNode node) throws SQLException { + if (topNode == null) { + topNode = node; + } - @Override - protected Void leaveBooleanNode(ParseNode node, List l) throws SQLException { - if (topNode == node) { - if ( hasNoAggregateFunctions && !Boolean.FALSE.equals(hasOnlyAggregateColumns)) { - moveToWhereClause.add(node); - } - hasNoAggregateFunctions = true; - hasOnlyAggregateColumns = null; - topNode = null; - } - - return null; - } + return true; + } - @Override - protected boolean enterNonBooleanNode(ParseNode node) throws SQLException { - return true; + @Override + protected Void leaveBooleanNode(ParseNode node, List l) throws SQLException { + if (topNode == node) { + if (hasNoAggregateFunctions && !Boolean.FALSE.equals(hasOnlyAggregateColumns)) { + moveToWhereClause.add(node); } + hasNoAggregateFunctions = true; + hasOnlyAggregateColumns = null; + topNode = null; + } - @Override - protected Void leaveNonBooleanNode(ParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(AndParseNode node) throws SQLException { - return true; - } + return null; + } - @Override - public Void visitLeave(AndParseNode node, List l) throws SQLException { - return null; - } + @Override + protected boolean enterNonBooleanNode(ParseNode node) throws SQLException { + return true; + } - @Override - public boolean visitEnter(FunctionParseNode node) throws SQLException { - boolean isAggregate = node.isAggregate(); - this.hasNoAggregateFunctions = this.hasNoAggregateFunctions && !isAggregate; - return !isAggregate && super.visitEnter(node); - } + @Override + protected Void leaveNonBooleanNode(ParseNode node, List l) throws SQLException { + return null; + } - @Override - public Void visit(ColumnParseNode node) throws SQLException { - ColumnRef ref = context.getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); - boolean isAggregateColumn = groupBy.getExpressions().indexOf(ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive())) >= 0; - if (hasOnlyAggregateColumns == null) { - hasOnlyAggregateColumns = isAggregateColumn; - } else { - hasOnlyAggregateColumns &= isAggregateColumn; - } - - return null; - } - + @Override + public boolean visitEnter(AndParseNode node) throws SQLException { + return true; } + + @Override + public Void visitLeave(AndParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(FunctionParseNode node) throws SQLException { + boolean isAggregate = node.isAggregate(); + this.hasNoAggregateFunctions = this.hasNoAggregateFunctions && !isAggregate; + return !isAggregate && super.visitEnter(node); + } + + @Override + public Void visit(ColumnParseNode node) throws SQLException { + ColumnRef ref = context.getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), + node.getName()); + boolean isAggregateColumn = groupBy.getExpressions().indexOf( + ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive())) >= 0; + if (hasOnlyAggregateColumns == null) { + hasOnlyAggregateColumns = isAggregateColumn; + } else { + hasOnlyAggregateColumns &= isAggregateColumn; + } + + return null; + } + + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java index b4a4168adaf..ef873412cea 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexExpressionCompiler.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.compile; @@ -20,34 +28,32 @@ */ public class IndexExpressionCompiler extends ExpressionCompiler { - // - private ColumnRef columnRef; - - public IndexExpressionCompiler(StatementContext context) { - super(context); - this.columnRef = null; - } - - @Override - public void reset() { - super.reset(); - this.columnRef = null; + // + private ColumnRef columnRef; + + public IndexExpressionCompiler(StatementContext context) { + super(context); + this.columnRef = null; + } + + @Override + public void reset() { + super.reset(); + this.columnRef = null; + } + + @Override + protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { + ColumnRef columnRef = super.resolveColumn(node); + if (isTopLevel()) { + this.columnRef = columnRef; } + return columnRef; + } - @Override - protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { - ColumnRef columnRef = super.resolveColumn(node); - if (isTopLevel()) { - this.columnRef = columnRef; - } - return columnRef; - } - - /** - * @return if the expression being compiled is a regular column the column ref, else is null - */ - public ColumnRef getColumnRef() { - return columnRef; - } + /** Returns if the expression being compiled is a regular column the column ref, else is null */ + public ColumnRef getColumnRef() { + return columnRef; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java index 1985d0f8226..3978412054c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/IndexStatementRewriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compile; import java.sql.SQLException; @@ -39,128 +38,129 @@ import org.apache.phoenix.util.IndexUtil; public class IndexStatementRewriter extends ParseNodeRewriter { - private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); - - private Map multiTableRewriteMap; - private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - private final boolean setTableAlias; - - public IndexStatementRewriter(ColumnResolver dataResolver, Map multiTableRewriteMap, boolean setTableAlias) { - super(dataResolver); - this.multiTableRewriteMap = multiTableRewriteMap; - this.setTableAlias = setTableAlias; - } - - /** - * Rewrite the parse node by translating all data table column references to - * references to the corresponding index column. - * @param node the parse node - * @param dataResolver the column resolver - * @return new parse node or the same one if nothing was rewritten. - * @throws SQLException - */ - public static ParseNode translate(ParseNode node, ColumnResolver dataResolver) throws SQLException { - return rewrite(node, new IndexStatementRewriter(dataResolver, null, false)); - } - - /** - * Rewrite the select statement by translating all data table column references to - * references to the corresponding index column. - * @param statement the select statement - * @param dataResolver the column resolver - * @return new select statement or the same one if nothing was rewritten. - * @throws SQLException - */ - public static SelectStatement translate(SelectStatement statement, ColumnResolver dataResolver) throws SQLException { - return translate(statement, dataResolver, null); - } - - /** - * Rewrite the select statement containing multiple tables by translating all - * data table column references to references to the corresponding index column. - * @param statement the select statement - * @param dataResolver the column resolver - * @param multiTableRewriteMap the data table to index table map - * @return new select statement or the same one if nothing was rewritten. - * @throws SQLException - */ - public static SelectStatement translate(SelectStatement statement, ColumnResolver dataResolver, Map multiTableRewriteMap) throws SQLException { - return rewrite(statement, new IndexStatementRewriter(dataResolver, multiTableRewriteMap, false)); - } + private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); - @Override - public ParseNode visit(ColumnParseNode node) throws SQLException { - ColumnRef dataColRef = getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); - PColumn dataCol = dataColRef.getColumn(); - TableRef dataTableRef = dataColRef.getTableRef(); - // Rewrite view constants as literals, as they won't be in the schema for - // an index on the view. Our view may be READ_ONLY yet still have inherited - // view constants if based on an UPDATABLE view - if (dataCol.getViewConstant() != null) { - byte[] viewConstant = dataCol.getViewConstant(); - // Ignore last byte, as it's only there so we can have a way to differentiate null - // from the absence of a value. - ptr.set(viewConstant, 0, viewConstant.length-1); - Object literal = dataCol.getDataType().toObject(ptr, dataCol.getSortOrder()); - return new LiteralParseNode(literal, dataCol.getDataType()); - } - TableName tName = getReplacedTableName(dataTableRef); - if (multiTableRewriteMap != null && tName == null) - return node; - - String indexColName = IndexUtil.getIndexColumnName(dataCol); - ParseNode indexColNode = new ColumnParseNode(tName, '"' + indexColName + '"', node.getAlias()); - PDataType indexColType = IndexUtil.getIndexColumnDataType(dataCol); - PDataType dataColType = dataColRef.getColumn().getDataType(); - - // Coerce index column reference back to same type as data column so that - // expression behave exactly the same. No need to invert, as this will be done - // automatically as needed. If node is used at the top level, do not convert, as - // otherwise the wrapper gets in the way in the group by clause. For example, - // an INTEGER column in a GROUP BY gets doubly wrapped like this: - // CAST CAST int_col AS INTEGER AS DECIMAL - // This is unnecessary and problematic in the case of a null value. - // TODO: test case for this - if (!isTopLevel() && indexColType != dataColType) { - indexColNode = FACTORY.cast(indexColNode, dataColType, null, null); - } - return indexColNode; - } + private Map multiTableRewriteMap; + private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + private final boolean setTableAlias; - @Override - public ParseNode visit(WildcardParseNode node) throws SQLException { - return multiTableRewriteMap != null ? node : WildcardParseNode.REWRITE_INSTANCE; - } + public IndexStatementRewriter(ColumnResolver dataResolver, + Map multiTableRewriteMap, boolean setTableAlias) { + super(dataResolver); + this.multiTableRewriteMap = multiTableRewriteMap; + this.setTableAlias = setTableAlias; + } - @Override - public ParseNode visit(TableWildcardParseNode node) throws SQLException { - TableName tName = getReplacedTableName(getResolver().resolveTable(node.getTableName().getSchemaName(), node.getTableName().getTableName())); - return tName == null ? node : TableWildcardParseNode.create(tName, true); - } + /** + * Rewrite the parse node by translating all data table column references to references to the + * corresponding index column. + * @param node the parse node + * @param dataResolver the column resolver + * @return new parse node or the same one if nothing was rewritten. + */ + public static ParseNode translate(ParseNode node, ColumnResolver dataResolver) + throws SQLException { + return rewrite(node, new IndexStatementRewriter(dataResolver, null, false)); + } + + /** + * Rewrite the select statement by translating all data table column references to references to + * the corresponding index column. + * @param statement the select statement + * @param dataResolver the column resolver + * @return new select statement or the same one if nothing was rewritten. + */ + public static SelectStatement translate(SelectStatement statement, ColumnResolver dataResolver) + throws SQLException { + return translate(statement, dataResolver, null); + } - @Override - public ParseNode visit(FamilyWildcardParseNode node) throws SQLException { - return multiTableRewriteMap != null ? node : new FamilyWildcardParseNode(node, true); + /** + * Rewrite the select statement containing multiple tables by translating all data table column + * references to references to the corresponding index column. + * @param statement the select statement + * @param dataResolver the column resolver + * @param multiTableRewriteMap the data table to index table map + * @return new select statement or the same one if nothing was rewritten. + */ + public static SelectStatement translate(SelectStatement statement, ColumnResolver dataResolver, + Map multiTableRewriteMap) throws SQLException { + return rewrite(statement, + new IndexStatementRewriter(dataResolver, multiTableRewriteMap, false)); + } + + @Override + public ParseNode visit(ColumnParseNode node) throws SQLException { + ColumnRef dataColRef = + getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); + PColumn dataCol = dataColRef.getColumn(); + TableRef dataTableRef = dataColRef.getTableRef(); + // Rewrite view constants as literals, as they won't be in the schema for + // an index on the view. Our view may be READ_ONLY yet still have inherited + // view constants if based on an UPDATABLE view + if (dataCol.getViewConstant() != null) { + byte[] viewConstant = dataCol.getViewConstant(); + // Ignore last byte, as it's only there so we can have a way to differentiate null + // from the absence of a value. + ptr.set(viewConstant, 0, viewConstant.length - 1); + Object literal = dataCol.getDataType().toObject(ptr, dataCol.getSortOrder()); + return new LiteralParseNode(literal, dataCol.getDataType()); } - - private TableName getReplacedTableName(TableRef origRef) { - // if the setTableAlias flag is true and the original table has an alias we use that as the table name - if (setTableAlias && origRef.getTableAlias() != null) - return TableName.create(null, origRef.getTableAlias()); - - if (multiTableRewriteMap == null) - return null; - - TableRef tableRef = multiTableRewriteMap.get(origRef); - if (tableRef == null) - return null; - - if (origRef.getTableAlias() != null) - return TableName.create(null, origRef.getTableAlias()); - - String schemaName = tableRef.getTable().getSchemaName().getString(); - return TableName.create(schemaName.length() == 0 ? null : schemaName, tableRef.getTable().getTableName().getString()); + TableName tName = getReplacedTableName(dataTableRef); + if (multiTableRewriteMap != null && tName == null) return node; + + String indexColName = IndexUtil.getIndexColumnName(dataCol); + ParseNode indexColNode = new ColumnParseNode(tName, '"' + indexColName + '"', node.getAlias()); + PDataType indexColType = IndexUtil.getIndexColumnDataType(dataCol); + PDataType dataColType = dataColRef.getColumn().getDataType(); + + // Coerce index column reference back to same type as data column so that + // expression behave exactly the same. No need to invert, as this will be done + // automatically as needed. If node is used at the top level, do not convert, as + // otherwise the wrapper gets in the way in the group by clause. For example, + // an INTEGER column in a GROUP BY gets doubly wrapped like this: + // CAST CAST int_col AS INTEGER AS DECIMAL + // This is unnecessary and problematic in the case of a null value. + // TODO: test case for this + if (!isTopLevel() && indexColType != dataColType) { + indexColNode = FACTORY.cast(indexColNode, dataColType, null, null); } - -} + return indexColNode; + } + + @Override + public ParseNode visit(WildcardParseNode node) throws SQLException { + return multiTableRewriteMap != null ? node : WildcardParseNode.REWRITE_INSTANCE; + } + + @Override + public ParseNode visit(TableWildcardParseNode node) throws SQLException { + TableName tName = getReplacedTableName(getResolver() + .resolveTable(node.getTableName().getSchemaName(), node.getTableName().getTableName())); + return tName == null ? node : TableWildcardParseNode.create(tName, true); + } + + @Override + public ParseNode visit(FamilyWildcardParseNode node) throws SQLException { + return multiTableRewriteMap != null ? node : new FamilyWildcardParseNode(node, true); + } + + private TableName getReplacedTableName(TableRef origRef) { + // if the setTableAlias flag is true and the original table has an alias we use that as the + // table name + if (setTableAlias && origRef.getTableAlias() != null) + return TableName.create(null, origRef.getTableAlias()); + if (multiTableRewriteMap == null) return null; + + TableRef tableRef = multiTableRewriteMap.get(origRef); + if (tableRef == null) return null; + + if (origRef.getTableAlias() != null) return TableName.create(null, origRef.getTableAlias()); + + String schemaName = tableRef.getTable().getSchemaName().getString(); + return TableName.create(schemaName.length() == 0 ? null : schemaName, + tableRef.getTable().getTableName().getString()); + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/JoinCompiler.java index c3295d2b0f7..296964f0a99 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/JoinCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/JoinCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,8 +34,6 @@ import java.util.Map; import java.util.Set; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; - import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; @@ -77,10 +75,9 @@ import org.apache.phoenix.schema.IndexUncoveredDataColumnRef; import org.apache.phoenix.schema.MetaDataEntityNotFoundException; import org.apache.phoenix.schema.PColumn; -import org.apache.phoenix.schema.PNameFactory; import org.apache.phoenix.schema.PName; +import org.apache.phoenix.schema.PNameFactory; import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.PTableImpl; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.ProjectedColumn; @@ -98,1492 +95,1472 @@ import org.apache.phoenix.schema.types.PTinyint; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.ParseNodeUtil; import org.apache.phoenix.util.ParseNodeUtil.RewriteResult; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; - - public class JoinCompiler { - public enum Strategy { - HASH_BUILD_LEFT, - HASH_BUILD_RIGHT, - SORT_MERGE, - } - - public enum ColumnRefType { - JOINLOCAL, - GENERAL, + public enum Strategy { + HASH_BUILD_LEFT, + HASH_BUILD_RIGHT, + SORT_MERGE, + } + + public enum ColumnRefType { + JOINLOCAL, + GENERAL, + } + + private final PhoenixStatement phoenixStatement; + /** + * The original join sql for current {@link JoinCompiler}. + */ + private final SelectStatement originalJoinSelectStatement; + private final ColumnResolver origResolver; + private final boolean useStarJoin; + private final Map columnRefs; + private final Map columnNodes; + private final boolean useSortMergeJoin; + + private JoinCompiler(PhoenixStatement statement, SelectStatement select, + ColumnResolver resolver) { + this.phoenixStatement = statement; + this.originalJoinSelectStatement = select; + this.origResolver = resolver; + this.useStarJoin = !select.getHint().hasHint(Hint.NO_STAR_JOIN); + this.columnRefs = new HashMap(); + this.columnNodes = new HashMap(); + this.useSortMergeJoin = select.getHint().hasHint(Hint.USE_SORT_MERGE_JOIN); + } + + /** + * After this method is called, the inner state of the parameter resolver may be changed by + * {@link FromCompiler#refreshDerivedTableNode} because of some sql optimization, see also + * {@link Table#pruneSubselectAliasedNodes()}. + */ + public static JoinTable compile(PhoenixStatement statement, SelectStatement select, + ColumnResolver resolver) throws SQLException { + JoinCompiler compiler = new JoinCompiler(statement, select, resolver); + JoinTableConstructor constructor = compiler.new JoinTableConstructor(); + Pair> res = select.getFrom().accept(constructor); + JoinTable joinTable = res.getSecond() == null + ? compiler.new JoinTable(res.getFirst()) + : compiler.new JoinTable(res.getFirst(), res.getSecond()); + if (select.getWhere() != null) { + joinTable.pushDownFilter(select.getWhere()); } - private final PhoenixStatement phoenixStatement; - /** - * The original join sql for current {@link JoinCompiler}. - */ - private final SelectStatement originalJoinSelectStatement; - private final ColumnResolver origResolver; - private final boolean useStarJoin; - private final Map columnRefs; - private final Map columnNodes; - private final boolean useSortMergeJoin; - - private JoinCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver) { - this.phoenixStatement = statement; - this.originalJoinSelectStatement = select; - this.origResolver = resolver; - this.useStarJoin = !select.getHint().hasHint(Hint.NO_STAR_JOIN); - this.columnRefs = new HashMap(); - this.columnNodes = new HashMap(); - this.useSortMergeJoin = select.getHint().hasHint(Hint.USE_SORT_MERGE_JOIN); - } - - /** - * After this method is called, the inner state of the parameter resolver may be changed by - * {@link FromCompiler#refreshDerivedTableNode} because of some sql optimization, - * see also {@link Table#pruneSubselectAliasedNodes()}. - * @param statement - * @param select - * @param resolver - * @return - * @throws SQLException - */ - public static JoinTable compile(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver) throws SQLException { - JoinCompiler compiler = new JoinCompiler(statement, select, resolver); - JoinTableConstructor constructor = compiler.new JoinTableConstructor(); - Pair> res = select.getFrom().accept(constructor); - JoinTable joinTable = res.getSecond() == null ? compiler.new JoinTable(res.getFirst()) : compiler.new JoinTable(res.getFirst(), res.getSecond()); - if (select.getWhere() != null) { - joinTable.pushDownFilter(select.getWhere()); - } - - ColumnRefParseNodeVisitor generalRefVisitor = new ColumnRefParseNodeVisitor(resolver, statement.getConnection()); - ColumnRefParseNodeVisitor joinLocalRefVisitor = new ColumnRefParseNodeVisitor(resolver, statement.getConnection()); - - joinTable.pushDownColumnRefVisitors(generalRefVisitor, joinLocalRefVisitor); + ColumnRefParseNodeVisitor generalRefVisitor = + new ColumnRefParseNodeVisitor(resolver, statement.getConnection()); + ColumnRefParseNodeVisitor joinLocalRefVisitor = + new ColumnRefParseNodeVisitor(resolver, statement.getConnection()); - ParseNodeUtil.applyParseNodeVisitor(select, generalRefVisitor, false); + joinTable.pushDownColumnRefVisitors(generalRefVisitor, joinLocalRefVisitor); - compiler.columnNodes.putAll(joinLocalRefVisitor.getColumnRefMap()); - compiler.columnNodes.putAll(generalRefVisitor.getColumnRefMap()); + ParseNodeUtil.applyParseNodeVisitor(select, generalRefVisitor, false); - for (ColumnRef ref : generalRefVisitor.getColumnRefMap().keySet()) { - compiler.columnRefs.put(ref, ColumnRefType.GENERAL); - } - for (ColumnRef ref : joinLocalRefVisitor.getColumnRefMap().keySet()) { - if (!compiler.columnRefs.containsKey(ref)) - compiler.columnRefs.put(ref, ColumnRefType.JOINLOCAL); - } + compiler.columnNodes.putAll(joinLocalRefVisitor.getColumnRefMap()); + compiler.columnNodes.putAll(generalRefVisitor.getColumnRefMap()); - /** - * After {@link ColumnRefParseNodeVisitor} is pushed down, - * pruning columns for each {@link JoinCompiler.Table} if - * {@link @link JoinCompiler.Table#isSubselect()}. - */ - joinTable.pruneSubselectAliasedNodes(); - return joinTable; + for (ColumnRef ref : generalRefVisitor.getColumnRefMap().keySet()) { + compiler.columnRefs.put(ref, ColumnRefType.GENERAL); + } + for (ColumnRef ref : joinLocalRefVisitor.getColumnRefMap().keySet()) { + if (!compiler.columnRefs.containsKey(ref)) + compiler.columnRefs.put(ref, ColumnRefType.JOINLOCAL); } - private class JoinTableConstructor implements TableNodeVisitor>> { - - private TableRef resolveTable(String alias, TableName name) throws SQLException { - if (alias != null) - return origResolver.resolveTable(null, alias); - - return origResolver.resolveTable(name.getSchemaName(), name.getTableName()); - } - - @Override - public Pair> visit(BindTableNode boundTableNode) throws SQLException { - TableRef tableRef = resolveTable(boundTableNode.getAlias(), boundTableNode.getName()); - boolean isWildCard = isWildCardSelectForTable(originalJoinSelectStatement.getSelect(), tableRef, origResolver); - Table table = new Table(boundTableNode, isWildCard, Collections.emptyList(), boundTableNode.getTableSamplingRate(), tableRef); - return new Pair>(table, null); - } - - @Override - public Pair> visit(JoinTableNode joinNode) throws SQLException { - Pair> lhs = joinNode.getLHS().accept(this); - Pair> rhs = joinNode.getRHS().accept(this); - JoinTable joinTable = rhs.getSecond() == null ? new JoinTable(rhs.getFirst()) : new JoinTable(rhs.getFirst(), rhs.getSecond()); - List joinSpecs = lhs.getSecond(); - if (joinSpecs == null) { - joinSpecs = new ArrayList(); - } - joinSpecs.add(new JoinSpec(joinNode.getType(), joinNode.getOnNode(), joinTable, joinNode.isSingleValueOnly(), origResolver)); + /** + * After {@link ColumnRefParseNodeVisitor} is pushed down, pruning columns for each + * {@link JoinCompiler.Table} if {@link @link JoinCompiler.Table#isSubselect()}. + */ + joinTable.pruneSubselectAliasedNodes(); + return joinTable; + } - return new Pair>(lhs.getFirst(), joinSpecs); - } + private class JoinTableConstructor implements TableNodeVisitor>> { - @Override - public Pair> visit(NamedTableNode namedTableNode) - throws SQLException { - TableRef tableRef = resolveTable(namedTableNode.getAlias(), namedTableNode.getName()); - boolean isWildCard = isWildCardSelectForTable(originalJoinSelectStatement.getSelect(), tableRef, origResolver); - Table table = new Table(namedTableNode, isWildCard, namedTableNode.getDynamicColumns(), namedTableNode.getTableSamplingRate(), tableRef); - return new Pair>(table, null); - } + private TableRef resolveTable(String alias, TableName name) throws SQLException { + if (alias != null) return origResolver.resolveTable(null, alias); - @Override - public Pair> visit(DerivedTableNode subselectNode) - throws SQLException { - TableRef tableRef = resolveTable(subselectNode.getAlias(), null); - boolean isWildCard = isWildCardSelectForTable(originalJoinSelectStatement.getSelect(), tableRef, origResolver); - Table table = new Table(subselectNode, isWildCard, tableRef); - return new Pair>(table, null); - } + return origResolver.resolveTable(name.getSchemaName(), name.getTableName()); } - public class JoinTable { - private final Table leftTable; - private final List joinSpecs; - private List postFilters; - private final List
allTables; - private final List allTableRefs; - private final boolean allLeftJoin; - private final boolean isPrefilterAccepted; - private final List prefilterAcceptedTables; - - private JoinTable(Table table) { - this.leftTable = table; - this.joinSpecs = Collections.emptyList(); - this.postFilters = Collections.EMPTY_LIST; - this.allTables = Collections.
singletonList(table); - this.allTableRefs = Collections.singletonList(table.getTableRef()); - this.allLeftJoin = false; - this.isPrefilterAccepted = true; - this.prefilterAcceptedTables = Collections.emptyList(); - } + @Override + public Pair> visit(BindTableNode boundTableNode) throws SQLException { + TableRef tableRef = resolveTable(boundTableNode.getAlias(), boundTableNode.getName()); + boolean isWildCard = + isWildCardSelectForTable(originalJoinSelectStatement.getSelect(), tableRef, origResolver); + Table table = new Table(boundTableNode, isWildCard, Collections. emptyList(), + boundTableNode.getTableSamplingRate(), tableRef); + return new Pair>(table, null); + } - private JoinTable(Table table, List joinSpecs) { - this.leftTable = table; - this.joinSpecs = joinSpecs; - this.postFilters = new ArrayList(); - this.allTables = new ArrayList
(); - this.allTableRefs = new ArrayList(); - this.allTables.add(table); - boolean allLeftJoin = true; - int lastRightJoinIndex = -1; - boolean hasFullJoin = false; - for (int i = 0; i < joinSpecs.size(); i++) { - JoinSpec joinSpec = joinSpecs.get(i); - this.allTables.addAll(joinSpec.getRhsJoinTable().getAllTables()); - allLeftJoin = allLeftJoin && joinSpec.getType() == JoinType.Left; - hasFullJoin = hasFullJoin || joinSpec.getType() == JoinType.Full; - if (joinSpec.getType() == JoinType.Right) { - lastRightJoinIndex = i; - } - } - for (Table t : this.allTables) { - this.allTableRefs.add(t.getTableRef()); - } - this.allLeftJoin = allLeftJoin; - this.isPrefilterAccepted = !hasFullJoin && lastRightJoinIndex == -1; - this.prefilterAcceptedTables = new ArrayList(); - for (int i = lastRightJoinIndex == -1 ? 0 : lastRightJoinIndex; i < joinSpecs.size(); i++) { - JoinSpec joinSpec = joinSpecs.get(i); - if (joinSpec.getType() != JoinType.Left && joinSpec.getType() != JoinType.Anti && joinSpec.getType() != JoinType.Full) { - prefilterAcceptedTables.add(joinSpec); - } - } - } + @Override + public Pair> visit(JoinTableNode joinNode) throws SQLException { + Pair> lhs = joinNode.getLHS().accept(this); + Pair> rhs = joinNode.getRHS().accept(this); + JoinTable joinTable = rhs.getSecond() == null + ? new JoinTable(rhs.getFirst()) + : new JoinTable(rhs.getFirst(), rhs.getSecond()); + List joinSpecs = lhs.getSecond(); + if (joinSpecs == null) { + joinSpecs = new ArrayList(); + } + joinSpecs.add(new JoinSpec(joinNode.getType(), joinNode.getOnNode(), joinTable, + joinNode.isSingleValueOnly(), origResolver)); + + return new Pair>(lhs.getFirst(), joinSpecs); + } - public Table getLeftTable() { - return leftTable; - } + @Override + public Pair> visit(NamedTableNode namedTableNode) throws SQLException { + TableRef tableRef = resolveTable(namedTableNode.getAlias(), namedTableNode.getName()); + boolean isWildCard = + isWildCardSelectForTable(originalJoinSelectStatement.getSelect(), tableRef, origResolver); + Table table = new Table(namedTableNode, isWildCard, namedTableNode.getDynamicColumns(), + namedTableNode.getTableSamplingRate(), tableRef); + return new Pair>(table, null); + } - public List getJoinSpecs() { - return joinSpecs; - } + @Override + public Pair> visit(DerivedTableNode subselectNode) throws SQLException { + TableRef tableRef = resolveTable(subselectNode.getAlias(), null); + boolean isWildCard = + isWildCardSelectForTable(originalJoinSelectStatement.getSelect(), tableRef, origResolver); + Table table = new Table(subselectNode, isWildCard, tableRef); + return new Pair>(table, null); + } + } + + public class JoinTable { + private final Table leftTable; + private final List joinSpecs; + private List postFilters; + private final List
allTables; + private final List allTableRefs; + private final boolean allLeftJoin; + private final boolean isPrefilterAccepted; + private final List prefilterAcceptedTables; + + private JoinTable(Table table) { + this.leftTable = table; + this.joinSpecs = Collections. emptyList(); + this.postFilters = Collections.EMPTY_LIST; + this.allTables = Collections.
singletonList(table); + this.allTableRefs = Collections. singletonList(table.getTableRef()); + this.allLeftJoin = false; + this.isPrefilterAccepted = true; + this.prefilterAcceptedTables = Collections. emptyList(); + } - public List
getAllTables() { - return allTables; - } + private JoinTable(Table table, List joinSpecs) { + this.leftTable = table; + this.joinSpecs = joinSpecs; + this.postFilters = new ArrayList(); + this.allTables = new ArrayList
(); + this.allTableRefs = new ArrayList(); + this.allTables.add(table); + boolean allLeftJoin = true; + int lastRightJoinIndex = -1; + boolean hasFullJoin = false; + for (int i = 0; i < joinSpecs.size(); i++) { + JoinSpec joinSpec = joinSpecs.get(i); + this.allTables.addAll(joinSpec.getRhsJoinTable().getAllTables()); + allLeftJoin = allLeftJoin && joinSpec.getType() == JoinType.Left; + hasFullJoin = hasFullJoin || joinSpec.getType() == JoinType.Full; + if (joinSpec.getType() == JoinType.Right) { + lastRightJoinIndex = i; + } + } + for (Table t : this.allTables) { + this.allTableRefs.add(t.getTableRef()); + } + this.allLeftJoin = allLeftJoin; + this.isPrefilterAccepted = !hasFullJoin && lastRightJoinIndex == -1; + this.prefilterAcceptedTables = new ArrayList(); + for (int i = lastRightJoinIndex == -1 ? 0 : lastRightJoinIndex; i < joinSpecs.size(); i++) { + JoinSpec joinSpec = joinSpecs.get(i); + if ( + joinSpec.getType() != JoinType.Left && joinSpec.getType() != JoinType.Anti + && joinSpec.getType() != JoinType.Full + ) { + prefilterAcceptedTables.add(joinSpec); + } + } + } - public List getAllTableRefs() { - return allTableRefs; - } + public Table getLeftTable() { + return leftTable; + } - public List getLeftTableRef() { - return Collections.singletonList(leftTable.getTableRef()); - } + public List getJoinSpecs() { + return joinSpecs; + } - public boolean isAllLeftJoin() { - return allLeftJoin; - } + public List
getAllTables() { + return allTables; + } - public SelectStatement getOriginalJoinSelectStatement() { - return originalJoinSelectStatement; - } + public List getAllTableRefs() { + return allTableRefs; + } - public ColumnResolver getOriginalResolver() { - return origResolver; - } + public List getLeftTableRef() { + return Collections. singletonList(leftTable.getTableRef()); + } - public Map getColumnRefs() { - return columnRefs; - } + public boolean isAllLeftJoin() { + return allLeftJoin; + } - public ParseNode getPostFiltersCombined() { - return combine(postFilters); - } + public SelectStatement getOriginalJoinSelectStatement() { + return originalJoinSelectStatement; + } - public void addPostJoinFilter(ParseNode parseNode) { - if(this.postFilters == Collections.EMPTY_LIST) { - this.postFilters = new ArrayList(); - } - this.postFilters.add(parseNode); - } + public ColumnResolver getOriginalResolver() { + return origResolver; + } - public void addLeftTableFilter(ParseNode parseNode) throws SQLException { - if (isPrefilterAccepted) { - leftTable.addFilter(parseNode); - } else { - addPostJoinFilter(parseNode); - } - } + public Map getColumnRefs() { + return columnRefs; + } - public List getPrefilterAcceptedJoinSpecs() { - return this.prefilterAcceptedTables; - } + public ParseNode getPostFiltersCombined() { + return combine(postFilters); + } - /** - * try to decompose filter and push down to single table. - * @param filter - * @throws SQLException - */ - public void pushDownFilter(ParseNode filter) throws SQLException { - if (joinSpecs.isEmpty()) { - leftTable.addFilter(filter); - return; - } + public void addPostJoinFilter(ParseNode parseNode) { + if (this.postFilters == Collections.EMPTY_LIST) { + this.postFilters = new ArrayList(); + } + this.postFilters.add(parseNode); + } - WhereNodeVisitor visitor = new WhereNodeVisitor( - origResolver, - this, - phoenixStatement.getConnection()); - filter.accept(visitor); - } + public void addLeftTableFilter(ParseNode parseNode) throws SQLException { + if (isPrefilterAccepted) { + leftTable.addFilter(parseNode); + } else { + addPostJoinFilter(parseNode); + } + } - public void pushDownColumnRefVisitors( - ColumnRefParseNodeVisitor generalRefVisitor, - ColumnRefParseNodeVisitor joinLocalRefVisitor) throws SQLException { - for (ParseNode node : leftTable.getPostFilterParseNodes()) { - node.accept(generalRefVisitor); - } - for (ParseNode node : postFilters) { - node.accept(generalRefVisitor); - } - for (JoinSpec joinSpec : joinSpecs) { - JoinTable joinTable = joinSpec.getRhsJoinTable(); - boolean hasSubJoin = !joinTable.getJoinSpecs().isEmpty(); - for (EqualParseNode node : joinSpec.getOnConditions()) { - node.getLHS().accept(generalRefVisitor); - if (hasSubJoin) { - node.getRHS().accept(generalRefVisitor); - } else { - node.getRHS().accept(joinLocalRefVisitor); - } - } - joinTable.pushDownColumnRefVisitors(generalRefVisitor, joinLocalRefVisitor); - } - } + public List getPrefilterAcceptedJoinSpecs() { + return this.prefilterAcceptedTables; + } - /** - * Pruning columns for each {@link JoinCompiler.Table} if - * {@link JoinCompiler.Table#isSubselect()}. - * @throws SQLException - */ - public void pruneSubselectAliasedNodes() throws SQLException { - this.leftTable.pruneSubselectAliasedNodes(); - for (JoinSpec joinSpec : joinSpecs) { - JoinTable rhsJoinTablesContext = joinSpec.getRhsJoinTable();; - rhsJoinTablesContext.pruneSubselectAliasedNodes(); - } - } + /** + * try to decompose filter and push down to single table. + */ + public void pushDownFilter(ParseNode filter) throws SQLException { + if (joinSpecs.isEmpty()) { + leftTable.addFilter(filter); + return; + } + + WhereNodeVisitor visitor = + new WhereNodeVisitor(origResolver, this, phoenixStatement.getConnection()); + filter.accept(visitor); + } - public Expression compilePostFilterExpression(StatementContext context) throws SQLException { - List filtersCombined = Lists. newArrayList(postFilters); - return JoinCompiler.compilePostFilterExpression(context, filtersCombined); + public void pushDownColumnRefVisitors(ColumnRefParseNodeVisitor generalRefVisitor, + ColumnRefParseNodeVisitor joinLocalRefVisitor) throws SQLException { + for (ParseNode node : leftTable.getPostFilterParseNodes()) { + node.accept(generalRefVisitor); + } + for (ParseNode node : postFilters) { + node.accept(generalRefVisitor); + } + for (JoinSpec joinSpec : joinSpecs) { + JoinTable joinTable = joinSpec.getRhsJoinTable(); + boolean hasSubJoin = !joinTable.getJoinSpecs().isEmpty(); + for (EqualParseNode node : joinSpec.getOnConditions()) { + node.getLHS().accept(generalRefVisitor); + if (hasSubJoin) { + node.getRHS().accept(generalRefVisitor); + } else { + node.getRHS().accept(joinLocalRefVisitor); + } } + joinTable.pushDownColumnRefVisitors(generalRefVisitor, joinLocalRefVisitor); + } + } - /** - * Return a list of all applicable join strategies. The order of the strategies in the - * returned list is based on the static rule below. However, the caller can decide on - * an optimal join strategy by evaluating and comparing the costs. - * 1. If hint USE_SORT_MERGE_JOIN is specified, - * return a singleton list containing only SORT_MERGE. - * 2. If 1) matches pattern "A LEFT/INNER/SEMI/ANTI JOIN B"; or - * 2) matches pattern "A LEFT/INNER/SEMI/ANTI JOIN B (LEFT/INNER/SEMI/ANTI JOIN C)+" - * and hint NO_STAR_JOIN is not specified, - * add BUILD_RIGHT to the returned list. - * 3. If matches pattern "A RIGHT/INNER JOIN B", where B is either a named table reference - * or a flat sub-query, - * add BUILD_LEFT to the returned list. - * 4. add SORT_MERGE to the returned list. - * @throws SQLException - */ - public List getApplicableJoinStrategies() throws SQLException { - List strategies = Lists.newArrayList(); - if (useSortMergeJoin) { - strategies.add(Strategy.SORT_MERGE); - } else { - if (getStarJoinVector() != null) { - strategies.add(Strategy.HASH_BUILD_RIGHT); - } - JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1); - JoinType type = lastJoinSpec.getType(); - if ((type == JoinType.Right || type == JoinType.Inner) - && lastJoinSpec.getRhsJoinTable().getJoinSpecs().isEmpty() - && lastJoinSpec.getRhsJoinTable().getLeftTable().isCouldPushToServerAsHashJoinProbeSide()) { - strategies.add(Strategy.HASH_BUILD_LEFT); - } - strategies.add(Strategy.SORT_MERGE); - } - - return strategies; - } + /** + * Pruning columns for each {@link JoinCompiler.Table} if + * {@link JoinCompiler.Table#isSubselect()}. + */ + public void pruneSubselectAliasedNodes() throws SQLException { + this.leftTable.pruneSubselectAliasedNodes(); + for (JoinSpec joinSpec : joinSpecs) { + JoinTable rhsJoinTablesContext = joinSpec.getRhsJoinTable(); + ; + rhsJoinTablesContext.pruneSubselectAliasedNodes(); + } + } - /** - * Returns a boolean vector indicating whether the evaluation of join expressions - * can be evaluated at an early stage if the input JoinSpec can be taken as a - * star join. Otherwise returns null. - * @return a boolean vector for a star join; or null for non star join. - * @throws SQLException - */ - public boolean[] getStarJoinVector() throws SQLException { - int count = joinSpecs.size(); - if (!leftTable.isCouldPushToServerAsHashJoinProbeSide() || - (!useStarJoin - && count > 1 - && joinSpecs.get(count - 1).getType() != JoinType.Left - && joinSpecs.get(count - 1).getType() != JoinType.Semi - && joinSpecs.get(count - 1).getType() != JoinType.Anti - && !joinSpecs.get(count - 1).isSingleValueOnly())) - return null; - - boolean[] vector = new boolean[count]; - for (int i = 0; i < count; i++) { - JoinSpec joinSpec = joinSpecs.get(i); - if (joinSpec.getType() != JoinType.Left - && joinSpec.getType() != JoinType.Inner - && joinSpec.getType() != JoinType.Semi - && joinSpec.getType() != JoinType.Anti) - return null; - vector[i] = true; - Iterator iter = joinSpec.getDependentTableRefs().iterator(); - while (vector[i] == true && iter.hasNext()) { - TableRef tableRef = iter.next(); - if (!tableRef.equals(leftTable.getTableRef())) { - vector[i] = false; - } - } - } + public Expression compilePostFilterExpression(StatementContext context) throws SQLException { + List filtersCombined = Lists. newArrayList(postFilters); + return JoinCompiler.compilePostFilterExpression(context, filtersCombined); + } - return vector; - } + /** + * Return a list of all applicable join strategies. The order of the strategies in the returned + * list is based on the static rule below. However, the caller can decide on an optimal join + * strategy by evaluating and comparing the costs. 1. If hint USE_SORT_MERGE_JOIN is specified, + * return a singleton list containing only SORT_MERGE. 2. If 1) matches pattern "A + * LEFT/INNER/SEMI/ANTI JOIN B"; or 2) matches pattern "A LEFT/INNER/SEMI/ANTI JOIN B + * (LEFT/INNER/SEMI/ANTI JOIN C)+" and hint NO_STAR_JOIN is not specified, add BUILD_RIGHT to + * the returned list. 3. If matches pattern "A RIGHT/INNER JOIN B", where B is either a named + * table reference or a flat sub-query, add BUILD_LEFT to the returned list. 4. add SORT_MERGE + * to the returned list. + */ + public List getApplicableJoinStrategies() throws SQLException { + List strategies = Lists.newArrayList(); + if (useSortMergeJoin) { + strategies.add(Strategy.SORT_MERGE); + } else { + if (getStarJoinVector() != null) { + strategies.add(Strategy.HASH_BUILD_RIGHT); + } + JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1); + JoinType type = lastJoinSpec.getType(); + if ( + (type == JoinType.Right || type == JoinType.Inner) + && lastJoinSpec.getRhsJoinTable().getJoinSpecs().isEmpty() + && lastJoinSpec.getRhsJoinTable().getLeftTable() + .isCouldPushToServerAsHashJoinProbeSide() + ) { + strategies.add(Strategy.HASH_BUILD_LEFT); + } + strategies.add(Strategy.SORT_MERGE); + } + + return strategies; + } - /** - * create a new {@link JoinTable} exclude the last {@link JoinSpec}, - * and try to push {@link #postFilters} to the new {@link JoinTable}. - * @param phoenixConnection - * @return - * @throws SQLException - */ - public JoinTable createSubJoinTable( - PhoenixConnection phoenixConnection) throws SQLException { - assert joinSpecs.size() > 0; - JoinTable newJoinTablesContext = joinSpecs.size() > 1 ? - new JoinTable(leftTable, joinSpecs.subList(0, joinSpecs.size() - 1)) : - new JoinTable(leftTable); - JoinType rightmostJoinType = joinSpecs.get(joinSpecs.size() - 1).getType(); - if(rightmostJoinType == JoinType.Right || rightmostJoinType == JoinType.Full) { - return newJoinTablesContext; - } + /** + * Returns a boolean vector indicating whether the evaluation of join expressions can be + * evaluated at an early stage if the input JoinSpec can be taken as a star join. Otherwise + * returns null. + * @return a boolean vector for a star join; or null for non star join. + */ + public boolean[] getStarJoinVector() throws SQLException { + int count = joinSpecs.size(); + if ( + !leftTable.isCouldPushToServerAsHashJoinProbeSide() + || (!useStarJoin && count > 1 && joinSpecs.get(count - 1).getType() != JoinType.Left + && joinSpecs.get(count - 1).getType() != JoinType.Semi + && joinSpecs.get(count - 1).getType() != JoinType.Anti + && !joinSpecs.get(count - 1).isSingleValueOnly()) + ) return null; + + boolean[] vector = new boolean[count]; + for (int i = 0; i < count; i++) { + JoinSpec joinSpec = joinSpecs.get(i); + if ( + joinSpec.getType() != JoinType.Left && joinSpec.getType() != JoinType.Inner + && joinSpec.getType() != JoinType.Semi && joinSpec.getType() != JoinType.Anti + ) return null; + vector[i] = true; + Iterator iter = joinSpec.getDependentTableRefs().iterator(); + while (vector[i] == true && iter.hasNext()) { + TableRef tableRef = iter.next(); + if (!tableRef.equals(leftTable.getTableRef())) { + vector[i] = false; + } + } + } + + return vector; + } - if(this.postFilters.isEmpty()) { - return newJoinTablesContext; - } + /** + * create a new {@link JoinTable} exclude the last {@link JoinSpec}, and try to push + * {@link #postFilters} to the new {@link JoinTable}. + */ + public JoinTable createSubJoinTable(PhoenixConnection phoenixConnection) throws SQLException { + assert joinSpecs.size() > 0; + JoinTable newJoinTablesContext = joinSpecs.size() > 1 + ? new JoinTable(leftTable, joinSpecs.subList(0, joinSpecs.size() - 1)) + : new JoinTable(leftTable); + JoinType rightmostJoinType = joinSpecs.get(joinSpecs.size() - 1).getType(); + if (rightmostJoinType == JoinType.Right || rightmostJoinType == JoinType.Full) { + return newJoinTablesContext; + } + + if (this.postFilters.isEmpty()) { + return newJoinTablesContext; + } + + PushDownPostFilterParseNodeVisitor pushDownPostFilterNodeVistor = + new PushDownPostFilterParseNodeVisitor(JoinCompiler.this.origResolver, newJoinTablesContext, + phoenixConnection); + int index = 0; + List newPostFilterParseNodes = null; + for (ParseNode postFilterParseNode : this.postFilters) { + ParseNode newPostFilterParseNode = postFilterParseNode.accept(pushDownPostFilterNodeVistor); + if (newPostFilterParseNode != postFilterParseNode && newPostFilterParseNodes == null) { + newPostFilterParseNodes = new ArrayList(this.postFilters.subList(0, index)); + } + if (newPostFilterParseNodes != null && newPostFilterParseNode != null) { + newPostFilterParseNodes.add(newPostFilterParseNode); + } + index++; + } + if (newPostFilterParseNodes != null) { + this.postFilters = newPostFilterParseNodes; + } + return newJoinTablesContext; + } - PushDownPostFilterParseNodeVisitor pushDownPostFilterNodeVistor = - new PushDownPostFilterParseNodeVisitor( - JoinCompiler.this.origResolver, - newJoinTablesContext, - phoenixConnection); - int index = 0; - List newPostFilterParseNodes = null; - for(ParseNode postFilterParseNode : this.postFilters) { - ParseNode newPostFilterParseNode = - postFilterParseNode.accept(pushDownPostFilterNodeVistor); - if(newPostFilterParseNode != postFilterParseNode && - newPostFilterParseNodes == null) { - newPostFilterParseNodes = - new ArrayList(this.postFilters.subList(0, index)); - } - if(newPostFilterParseNodes != null && newPostFilterParseNode != null) { - newPostFilterParseNodes.add(newPostFilterParseNode); - } - index++; - } - if(newPostFilterParseNodes != null) { - this.postFilters = newPostFilterParseNodes; - } - return newJoinTablesContext; - } + public SelectStatement getAsSingleSubquery(SelectStatement query, boolean asSubquery) + throws SQLException { + assert (isCouldPushToServerAsHashJoinProbeSide(query)); - public SelectStatement getAsSingleSubquery(SelectStatement query, boolean asSubquery) throws SQLException { - assert (isCouldPushToServerAsHashJoinProbeSide(query)); + if (asSubquery) return query; - if (asSubquery) - return query; + return NODE_FACTORY.select(originalJoinSelectStatement, query.getFrom(), query.getWhere()); + } - return NODE_FACTORY.select(originalJoinSelectStatement, query.getFrom(), query.getWhere()); + public boolean hasPostReference() { + for (Table table : allTables) { + if (table.isWildCardSelect()) { + return true; } + } - public boolean hasPostReference() { - for (Table table : allTables) { - if (table.isWildCardSelect()) { - return true; - } - } - - for (Map.Entry e : columnRefs.entrySet()) { - if (e.getValue() == ColumnRefType.GENERAL && - allTableRefs.contains(e.getKey().getTableRef())) { - return true; - } - } - - return false; + for (Map.Entry e : columnRefs.entrySet()) { + if ( + e.getValue() == ColumnRefType.GENERAL && allTableRefs.contains(e.getKey().getTableRef()) + ) { + return true; } + } - public boolean hasFilters() { - if (!postFilters.isEmpty()) - return true; - - if (isPrefilterAccepted && leftTable.hasFilters()) - return true; - - for (JoinSpec joinSpec : prefilterAcceptedTables) { - if (joinSpec.getRhsJoinTable().hasFilters()) - return true; - } - - return false; - } + return false; } - public class JoinSpec { - private final JoinType type; - private final List onConditions; - private final JoinTable rhsJoinTable; - private final boolean singleValueOnly; - private Set dependentTableRefs; - private OnNodeVisitor onNodeVisitor; - - private JoinSpec(JoinType type, ParseNode onNode, JoinTable joinTable, - boolean singleValueOnly, ColumnResolver resolver) throws SQLException { - this.type = type; - this.onConditions = new ArrayList(); - this.rhsJoinTable = joinTable; - this.singleValueOnly = singleValueOnly; - this.dependentTableRefs = new HashSet(); - this.onNodeVisitor = new OnNodeVisitor(resolver, this, phoenixStatement.getConnection()); - if (onNode != null) { - this.pushDownOnCondition(onNode); - } - } - - /** - *
-         * 1.in {@link JoinSpec} ctor,try to push the filter in join on clause to where clause,
-         *   eg. for "a join b on a.id = b.id and b.code = 1 where a.name is not null", try to
-         *   push "b.code =1" in join on clause to where clause.
-         * 2.in{@link WhereNodeVisitor#visitLeave(ComparisonParseNode, List)}, for inner join,
-         *   try to push the join on condition in where clause to join on clause,
-         *   eg. for "a join b on a.id = b.id where a.name = b.name", try to push "a.name=b.name"
-         *   in where clause to join on clause.
-         * 
- * @param node - * @throws SQLException - */ - public void pushDownOnCondition(ParseNode node) throws SQLException { - node.accept(onNodeVisitor); - } + public boolean hasFilters() { + if (!postFilters.isEmpty()) return true; - public JoinType getType() { - return type; - } + if (isPrefilterAccepted && leftTable.hasFilters()) return true; - public List getOnConditions() { - return onConditions; - } + for (JoinSpec joinSpec : prefilterAcceptedTables) { + if (joinSpec.getRhsJoinTable().hasFilters()) return true; + } - public JoinTable getRhsJoinTable() { - return rhsJoinTable; - } - - public List getRhsJoinTableRefs() { - return this.rhsJoinTable.getAllTableRefs(); - } + return false; + } + } + + public class JoinSpec { + private final JoinType type; + private final List onConditions; + private final JoinTable rhsJoinTable; + private final boolean singleValueOnly; + private Set dependentTableRefs; + private OnNodeVisitor onNodeVisitor; + + private JoinSpec(JoinType type, ParseNode onNode, JoinTable joinTable, boolean singleValueOnly, + ColumnResolver resolver) throws SQLException { + this.type = type; + this.onConditions = new ArrayList(); + this.rhsJoinTable = joinTable; + this.singleValueOnly = singleValueOnly; + this.dependentTableRefs = new HashSet(); + this.onNodeVisitor = new OnNodeVisitor(resolver, this, phoenixStatement.getConnection()); + if (onNode != null) { + this.pushDownOnCondition(onNode); + } + } - public void pushDownFilterToRhsJoinTable(ParseNode parseNode) throws SQLException { - this.rhsJoinTable.pushDownFilter(parseNode); - } + /** + *
+     * 1.in {@link JoinSpec} ctor,try to push the filter in join on clause to where clause,
+     *   eg. for "a join b on a.id = b.id and b.code = 1 where a.name is not null", try to
+     *   push "b.code =1" in join on clause to where clause.
+     * 2.in{@link WhereNodeVisitor#visitLeave(ComparisonParseNode, List)}, for inner join,
+     *   try to push the join on condition in where clause to join on clause,
+     *   eg. for "a join b on a.id = b.id where a.name = b.name", try to push "a.name=b.name"
+     *   in where clause to join on clause.
+     * 
+ */ + public void pushDownOnCondition(ParseNode node) throws SQLException { + node.accept(onNodeVisitor); + } - public void addOnCondition(EqualParseNode equalParseNode) { - this.onConditions.add(equalParseNode); - } + public JoinType getType() { + return type; + } - public void addDependentTableRefs(Collection tableRefs) { - this.dependentTableRefs.addAll(tableRefs); - } + public List getOnConditions() { + return onConditions; + } - public boolean isSingleValueOnly() { - return singleValueOnly; - } + public JoinTable getRhsJoinTable() { + return rhsJoinTable; + } - public Set getDependentTableRefs() { - return dependentTableRefs; - } + public List getRhsJoinTableRefs() { + return this.rhsJoinTable.getAllTableRefs(); + } - public Pair, List> compileJoinConditions(StatementContext lhsCtx, StatementContext rhsCtx, Strategy strategy) throws SQLException { - if (onConditions.isEmpty()) { - return new Pair, List>( - Collections. singletonList(LiteralExpression.newConstant(1)), - Collections. singletonList(LiteralExpression.newConstant(1))); - } + public void pushDownFilterToRhsJoinTable(ParseNode parseNode) throws SQLException { + this.rhsJoinTable.pushDownFilter(parseNode); + } - List> compiled = Lists.> newArrayListWithExpectedSize(onConditions.size()); - ExpressionCompiler lhsCompiler = new ExpressionCompiler(lhsCtx); - ExpressionCompiler rhsCompiler = new ExpressionCompiler(rhsCtx); - for (EqualParseNode condition : onConditions) { - lhsCompiler.reset(); - Expression left = condition.getLHS().accept(lhsCompiler); - rhsCompiler.reset(); - Expression right = condition.getRHS().accept(rhsCompiler); - PDataType toType = getCommonType(left.getDataType(), right.getDataType()); - SortOrder toSortOrder = strategy == Strategy.SORT_MERGE ? SortOrder.ASC : (strategy == Strategy.HASH_BUILD_LEFT ? right.getSortOrder() : left.getSortOrder()); - if (left.getDataType() != toType || left.getSortOrder() != toSortOrder) { - left = CoerceExpression.create(left, toType, toSortOrder, left.getMaxLength()); - } - if (right.getDataType() != toType || right.getSortOrder() != toSortOrder) { - right = CoerceExpression.create(right, toType, toSortOrder, right.getMaxLength()); - } - compiled.add(new Pair(left, right)); - } - // TODO PHOENIX-4618: - // For Stategy.SORT_MERGE, we probably need to re-order the join keys based on the - // specific ordering required by the join's parent, or re-order the following way - // to align with group-by expressions' re-ordering. - if (strategy != Strategy.SORT_MERGE) { - Collections.sort(compiled, new Comparator>() { - @Override - public int compare(Pair o1, Pair o2) { - Expression e1 = o1.getFirst(); - Expression e2 = o2.getFirst(); - boolean isFixed1 = e1.getDataType().isFixedWidth(); - boolean isFixed2 = e2.getDataType().isFixedWidth(); - boolean isFixedNullable1 = e1.isNullable() &&isFixed1; - boolean isFixedNullable2 = e2.isNullable() && isFixed2; - if (isFixedNullable1 == isFixedNullable2) { - if (isFixed1 == isFixed2) { - return 0; - } else if (isFixed1) { - return -1; - } else { - return 1; - } - } else if (isFixedNullable1) { - return 1; - } else { - return -1; - } - } - }); - } - List lConditions = Lists. newArrayListWithExpectedSize(compiled.size()); - List rConditions = Lists. newArrayListWithExpectedSize(compiled.size()); - for (Pair pair : compiled) { - lConditions.add(pair.getFirst()); - rConditions.add(pair.getSecond()); - } + public void addOnCondition(EqualParseNode equalParseNode) { + this.onConditions.add(equalParseNode); + } - return new Pair, List>(lConditions, rConditions); - } + public void addDependentTableRefs(Collection tableRefs) { + this.dependentTableRefs.addAll(tableRefs); + } - private PDataType getCommonType(PDataType lType, PDataType rType) throws SQLException { - if (lType == rType) - return lType; + public boolean isSingleValueOnly() { + return singleValueOnly; + } - if (!lType.isComparableTo(rType)) - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) - .setMessage("On-clause LHS expression and RHS expression must be comparable. LHS type: " + lType + ", RHS type: " + rType) - .build().buildException(); + public Set getDependentTableRefs() { + return dependentTableRefs; + } - if (lType.isCoercibleTo(PTinyint.INSTANCE) - && (rType == null || rType.isCoercibleTo(PTinyint.INSTANCE))) { - return lType; // to preserve UNSIGNED type - } - if (lType.isCoercibleTo(PSmallint.INSTANCE) - && (rType == null || rType.isCoercibleTo(PSmallint.INSTANCE))) { - return lType; // to preserve UNSIGNED type - } - if (lType.isCoercibleTo(PInteger.INSTANCE) - && (rType == null || rType.isCoercibleTo(PInteger.INSTANCE))) { - return lType; // to preserve UNSIGNED type - } - if (lType.isCoercibleTo(PLong.INSTANCE) - && (rType == null || rType.isCoercibleTo(PLong.INSTANCE))) { - return lType; // to preserve UNSIGNED type - } - if (lType.isCoercibleTo(PDouble.INSTANCE) - && (rType == null || rType.isCoercibleTo(PDouble.INSTANCE))) { - return lType; // to preserve UNSIGNED type - } - if (lType.isCoercibleTo(PDecimal.INSTANCE) - && (rType == null || rType.isCoercibleTo(PDecimal.INSTANCE))) { - return PDecimal.INSTANCE; - } - if (lType.isCoercibleTo(PDate.INSTANCE) - && (rType == null || rType.isCoercibleTo(PDate.INSTANCE))) { - return lType; - } - if (lType.isCoercibleTo(PTimestamp.INSTANCE) - && (rType == null || rType.isCoercibleTo(PTimestamp.INSTANCE))) { - return lType; - } - if (lType.isCoercibleTo(PVarchar.INSTANCE) - && (rType == null || rType.isCoercibleTo(PVarchar.INSTANCE))) { - return PVarchar.INSTANCE; - } - if (lType.isCoercibleTo(PBoolean.INSTANCE) - && (rType == null || rType.isCoercibleTo(PBoolean.INSTANCE))) { - return PBoolean.INSTANCE; + public Pair, List> compileJoinConditions(StatementContext lhsCtx, + StatementContext rhsCtx, Strategy strategy) throws SQLException { + if (onConditions.isEmpty()) { + return new Pair, List>( + Collections. singletonList(LiteralExpression.newConstant(1)), + Collections. singletonList(LiteralExpression.newConstant(1))); + } + + List> compiled = + Lists.> newArrayListWithExpectedSize(onConditions.size()); + ExpressionCompiler lhsCompiler = new ExpressionCompiler(lhsCtx); + ExpressionCompiler rhsCompiler = new ExpressionCompiler(rhsCtx); + for (EqualParseNode condition : onConditions) { + lhsCompiler.reset(); + Expression left = condition.getLHS().accept(lhsCompiler); + rhsCompiler.reset(); + Expression right = condition.getRHS().accept(rhsCompiler); + PDataType toType = getCommonType(left.getDataType(), right.getDataType()); + SortOrder toSortOrder = strategy == Strategy.SORT_MERGE + ? SortOrder.ASC + : (strategy == Strategy.HASH_BUILD_LEFT ? right.getSortOrder() : left.getSortOrder()); + if (left.getDataType() != toType || left.getSortOrder() != toSortOrder) { + left = CoerceExpression.create(left, toType, toSortOrder, left.getMaxLength()); + } + if (right.getDataType() != toType || right.getSortOrder() != toSortOrder) { + right = CoerceExpression.create(right, toType, toSortOrder, right.getMaxLength()); + } + compiled.add(new Pair(left, right)); + } + // TODO PHOENIX-4618: + // For Stategy.SORT_MERGE, we probably need to re-order the join keys based on the + // specific ordering required by the join's parent, or re-order the following way + // to align with group-by expressions' re-ordering. + if (strategy != Strategy.SORT_MERGE) { + Collections.sort(compiled, new Comparator>() { + @Override + public int compare(Pair o1, Pair o2) { + Expression e1 = o1.getFirst(); + Expression e2 = o2.getFirst(); + boolean isFixed1 = e1.getDataType().isFixedWidth(); + boolean isFixed2 = e2.getDataType().isFixedWidth(); + boolean isFixedNullable1 = e1.isNullable() && isFixed1; + boolean isFixedNullable2 = e2.isNullable() && isFixed2; + if (isFixedNullable1 == isFixedNullable2) { + if (isFixed1 == isFixed2) { + return 0; + } else if (isFixed1) { + return -1; + } else { + return 1; + } + } else if (isFixedNullable1) { + return 1; + } else { + return -1; } - return PVarbinary.INSTANCE; - } + } + }); + } + List lConditions = + Lists. newArrayListWithExpectedSize(compiled.size()); + List rConditions = + Lists. newArrayListWithExpectedSize(compiled.size()); + for (Pair pair : compiled) { + lConditions.add(pair.getFirst()); + rConditions.add(pair.getSecond()); + } + + return new Pair, List>(lConditions, rConditions); } - public class Table { - private TableNode tableNode; - private final boolean isWildcard; - private final List dynamicColumns; - private final Double tableSamplingRate; - private SelectStatement subselectStatement; - private TableRef tableRef; - /** - * Which could as this {@link Table}'s where conditions. - * Note: for {@link #isSubselect()}, added preFilterParseNode - * is at first rewritten by - * {@link SubselectRewriter#rewritePreFilterForSubselect}. - */ - private final List preFilterParseNodes; - /** - * Only make sense for {@link #isSubselect()}. - * {@link #postFilterParseNodes} could not as this - * {@link Table}'s where conditions, but need to filter after - * {@link #getSelectStatementByApplyPreFiltersForSubselect()} - * is executed. - */ - private final List postFilterParseNodes; - /** - * Determined by {@link SubselectRewriter#isFilterCanPushDownToSelect}. - * Only make sense for {@link #isSubselect()}, - */ - private final boolean filterCanPushDownToSubselect; - - private Table(TableNode tableNode, boolean isWildcard, List dynamicColumns, - Double tableSamplingRate, TableRef tableRef) { - this.tableNode = tableNode; - this.isWildcard = isWildcard; - this.dynamicColumns = dynamicColumns; - this.tableSamplingRate=tableSamplingRate; - this.subselectStatement = null; - this.tableRef = tableRef; - this.preFilterParseNodes = new ArrayList(); - this.postFilterParseNodes = Collections.emptyList(); - this.filterCanPushDownToSubselect = false; - } - - private Table(DerivedTableNode tableNode, boolean isWildcard, TableRef tableRef) throws SQLException { - this.tableNode = tableNode; - this.isWildcard = isWildcard; - this.dynamicColumns = Collections.emptyList(); - this.tableSamplingRate=ConcreteTableNode.DEFAULT_TABLE_SAMPLING_RATE; - this.subselectStatement = SubselectRewriter.flatten(tableNode.getSelect(), phoenixStatement.getConnection()); - this.tableRef = tableRef; - this.preFilterParseNodes = new ArrayList(); - this.postFilterParseNodes = new ArrayList(); - this.filterCanPushDownToSubselect = SubselectRewriter.isFilterCanPushDownToSelect(subselectStatement); - } + private PDataType getCommonType(PDataType lType, PDataType rType) throws SQLException { + if (lType == rType) return lType; + + if ( + !lType.isComparableTo(rType) + ) throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) + .setMessage("On-clause LHS expression and RHS expression must be comparable. LHS type: " + + lType + ", RHS type: " + rType) + .build().buildException(); + + if ( + lType.isCoercibleTo(PTinyint.INSTANCE) + && (rType == null || rType.isCoercibleTo(PTinyint.INSTANCE)) + ) { + return lType; // to preserve UNSIGNED type + } + if ( + lType.isCoercibleTo(PSmallint.INSTANCE) + && (rType == null || rType.isCoercibleTo(PSmallint.INSTANCE)) + ) { + return lType; // to preserve UNSIGNED type + } + if ( + lType.isCoercibleTo(PInteger.INSTANCE) + && (rType == null || rType.isCoercibleTo(PInteger.INSTANCE)) + ) { + return lType; // to preserve UNSIGNED type + } + if ( + lType.isCoercibleTo(PLong.INSTANCE) + && (rType == null || rType.isCoercibleTo(PLong.INSTANCE)) + ) { + return lType; // to preserve UNSIGNED type + } + if ( + lType.isCoercibleTo(PDouble.INSTANCE) + && (rType == null || rType.isCoercibleTo(PDouble.INSTANCE)) + ) { + return lType; // to preserve UNSIGNED type + } + if ( + lType.isCoercibleTo(PDecimal.INSTANCE) + && (rType == null || rType.isCoercibleTo(PDecimal.INSTANCE)) + ) { + return PDecimal.INSTANCE; + } + if ( + lType.isCoercibleTo(PDate.INSTANCE) + && (rType == null || rType.isCoercibleTo(PDate.INSTANCE)) + ) { + return lType; + } + if ( + lType.isCoercibleTo(PTimestamp.INSTANCE) + && (rType == null || rType.isCoercibleTo(PTimestamp.INSTANCE)) + ) { + return lType; + } + if ( + lType.isCoercibleTo(PVarchar.INSTANCE) + && (rType == null || rType.isCoercibleTo(PVarchar.INSTANCE)) + ) { + return PVarchar.INSTANCE; + } + if ( + lType.isCoercibleTo(PBoolean.INSTANCE) + && (rType == null || rType.isCoercibleTo(PBoolean.INSTANCE)) + ) { + return PBoolean.INSTANCE; + } + return PVarbinary.INSTANCE; + } + } + + public class Table { + private TableNode tableNode; + private final boolean isWildcard; + private final List dynamicColumns; + private final Double tableSamplingRate; + private SelectStatement subselectStatement; + private TableRef tableRef; + /** + * Which could as this {@link Table}'s where conditions. Note: for {@link #isSubselect()}, added + * preFilterParseNode is at first rewritten by + * {@link SubselectRewriter#rewritePreFilterForSubselect}. + */ + private final List preFilterParseNodes; + /** + * Only make sense for {@link #isSubselect()}. {@link #postFilterParseNodes} could not as this + * {@link Table}'s where conditions, but need to filter after + * {@link #getSelectStatementByApplyPreFiltersForSubselect()} is executed. + */ + private final List postFilterParseNodes; + /** + * Determined by {@link SubselectRewriter#isFilterCanPushDownToSelect}. Only make sense for + * {@link #isSubselect()}, + */ + private final boolean filterCanPushDownToSubselect; + + private Table(TableNode tableNode, boolean isWildcard, List dynamicColumns, + Double tableSamplingRate, TableRef tableRef) { + this.tableNode = tableNode; + this.isWildcard = isWildcard; + this.dynamicColumns = dynamicColumns; + this.tableSamplingRate = tableSamplingRate; + this.subselectStatement = null; + this.tableRef = tableRef; + this.preFilterParseNodes = new ArrayList(); + this.postFilterParseNodes = Collections. emptyList(); + this.filterCanPushDownToSubselect = false; + } - public TableNode getTableNode() { - return tableNode; - } + private Table(DerivedTableNode tableNode, boolean isWildcard, TableRef tableRef) + throws SQLException { + this.tableNode = tableNode; + this.isWildcard = isWildcard; + this.dynamicColumns = Collections. emptyList(); + this.tableSamplingRate = ConcreteTableNode.DEFAULT_TABLE_SAMPLING_RATE; + this.subselectStatement = + SubselectRewriter.flatten(tableNode.getSelect(), phoenixStatement.getConnection()); + this.tableRef = tableRef; + this.preFilterParseNodes = new ArrayList(); + this.postFilterParseNodes = new ArrayList(); + this.filterCanPushDownToSubselect = + SubselectRewriter.isFilterCanPushDownToSelect(subselectStatement); + } - public List getDynamicColumns() { - return dynamicColumns; - } - - public Double getTableSamplingRate() { - return tableSamplingRate; - } + public TableNode getTableNode() { + return tableNode; + } - public boolean isSubselect() { - return subselectStatement != null; - } + public List getDynamicColumns() { + return dynamicColumns; + } - public SelectStatement getSubselectStatement() { - return this.subselectStatement; - } + public Double getTableSamplingRate() { + return tableSamplingRate; + } - /** - * Pruning columns if {@link #isSubselect()}. - * Note: If some columns are pruned, the {@link JoinCompiler#origResolver} should be refreshed. - * @throws SQLException - */ - public void pruneSubselectAliasedNodes() throws SQLException { - if(!this.isSubselect()) { - return; - } - Set referencedColumnNames = this.getReferencedColumnNames(); - SelectStatement newSubselectStatement = - SubselectRewriter.pruneSelectAliasedNodes( - this.subselectStatement, - referencedColumnNames, - phoenixStatement.getConnection()); - if(!newSubselectStatement.getSelect().equals(this.subselectStatement.getSelect())) { - /** - * The columns are pruned, so {@link ColumnResolver} should be refreshed. - */ - DerivedTableNode newDerivedTableNode = - NODE_FACTORY.derivedTable(this.tableNode.getAlias(), newSubselectStatement); - TableRef newTableRef = - FromCompiler.refreshDerivedTableNode(origResolver, newDerivedTableNode); - assert newTableRef != null; - this.subselectStatement = newSubselectStatement; - this.tableRef = newTableRef; - this.tableNode = newDerivedTableNode; - } - } + public boolean isSubselect() { + return subselectStatement != null; + } - /** - * Collect the referenced columns of this {@link Table} - * according to {@link JoinCompiler#columnNodes}. - * @return - * @throws SQLException - */ - private Set getReferencedColumnNames() throws SQLException { - assert(this.isSubselect()); - if (isWildCardSelect()) { - return null; - } - Set referencedColumnNames = new HashSet(); - for (Map.Entry entry : columnNodes.entrySet()) { - if (tableRef.equals(entry.getKey().getTableRef())) { - ColumnParseNode columnParseNode = entry.getValue(); - String normalizedColumnName = SchemaUtil.getNormalizedColumnName(columnParseNode); - referencedColumnNames.add(normalizedColumnName); - } - } - return referencedColumnNames; - } + public SelectStatement getSubselectStatement() { + return this.subselectStatement; + } + /** + * Pruning columns if {@link #isSubselect()}. Note: If some columns are pruned, the + * {@link JoinCompiler#origResolver} should be refreshed. + */ + public void pruneSubselectAliasedNodes() throws SQLException { + if (!this.isSubselect()) { + return; + } + Set referencedColumnNames = this.getReferencedColumnNames(); + SelectStatement newSubselectStatement = SubselectRewriter.pruneSelectAliasedNodes( + this.subselectStatement, referencedColumnNames, phoenixStatement.getConnection()); + if (!newSubselectStatement.getSelect().equals(this.subselectStatement.getSelect())) { /** - * Returns all the basic select nodes, no aggregation. + * The columns are pruned, so {@link ColumnResolver} should be refreshed. */ - public List getSelectAliasedNodes() { - if (isWildCardSelect()) { - return Collections.singletonList(NODE_FACTORY.aliasedNode(null, NODE_FACTORY.wildcard())); - } - - List ret = new ArrayList(); - for (Map.Entry entry : columnNodes.entrySet()) { - if (tableRef.equals(entry.getKey().getTableRef())) { - ret.add(NODE_FACTORY.aliasedNode(null, entry.getValue())); - } - } - if (ret.isEmpty()) { - ret.add(NODE_FACTORY.aliasedNode(null, NODE_FACTORY.literal(1))); - } - return ret; - } - - public List getPreFilterParseNodes() { - return preFilterParseNodes; - } + DerivedTableNode newDerivedTableNode = + NODE_FACTORY.derivedTable(this.tableNode.getAlias(), newSubselectStatement); + TableRef newTableRef = + FromCompiler.refreshDerivedTableNode(origResolver, newDerivedTableNode); + assert newTableRef != null; + this.subselectStatement = newSubselectStatement; + this.tableRef = newTableRef; + this.tableNode = newDerivedTableNode; + } + } - public List getPostFilterParseNodes() { - return postFilterParseNodes; - } + /** + * Collect the referenced columns of this {@link Table} according to + * {@link JoinCompiler#columnNodes}. + */ + private Set getReferencedColumnNames() throws SQLException { + assert (this.isSubselect()); + if (isWildCardSelect()) { + return null; + } + Set referencedColumnNames = new HashSet(); + for (Map.Entry entry : columnNodes.entrySet()) { + if (tableRef.equals(entry.getKey().getTableRef())) { + ColumnParseNode columnParseNode = entry.getValue(); + String normalizedColumnName = SchemaUtil.getNormalizedColumnName(columnParseNode); + referencedColumnNames.add(normalizedColumnName); + } + } + return referencedColumnNames; + } - public TableRef getTableRef() { - return tableRef; - } + /** + * Returns all the basic select nodes, no aggregation. + */ + public List getSelectAliasedNodes() { + if (isWildCardSelect()) { + return Collections.singletonList(NODE_FACTORY.aliasedNode(null, NODE_FACTORY.wildcard())); + } + + List ret = new ArrayList(); + for (Map.Entry entry : columnNodes.entrySet()) { + if (tableRef.equals(entry.getKey().getTableRef())) { + ret.add(NODE_FACTORY.aliasedNode(null, entry.getValue())); + } + } + if (ret.isEmpty()) { + ret.add(NODE_FACTORY.aliasedNode(null, NODE_FACTORY.literal(1))); + } + return ret; + } - public void addFilter(ParseNode filter) throws SQLException { - if (!isSubselect() || filterCanPushDownToSubselect) { - this.addPreFilter(filter); - } else { - postFilterParseNodes.add(filter); - } - } + public List getPreFilterParseNodes() { + return preFilterParseNodes; + } - /** - * If {@link #isSubselect()}, preFilterParseNode is at first rewritten by - * {@link SubselectRewriter#rewritePreFilterForSubselect} - * @param preFilterParseNode - * @throws SQLException - */ - private void addPreFilter(ParseNode preFilterParseNode) throws SQLException { - if(this.isSubselect()) { - preFilterParseNode = - SubselectRewriter.rewritePreFilterForSubselect( - preFilterParseNode, - this.subselectStatement, - tableNode.getAlias()); - } - preFilterParseNodes.add(preFilterParseNode); - } + public List getPostFilterParseNodes() { + return postFilterParseNodes; + } - public ParseNode getCombinedPreFilterParseNodes() { - return combine(preFilterParseNodes); - } + public TableRef getTableRef() { + return tableRef; + } - /** - * Get this {@link Table}'s new {@link SelectStatement} by applying {@link #preFilterParseNodes}, - * {@link #postFilterParseNodes} and additional newOrderByNodes. - * @param newOrderByNodes - * @return - * @throws SQLException - */ - public SelectStatement getAsSubquery(List newOrderByNodes) throws SQLException { - if (isSubselect()) { - return SubselectRewriter.applyOrderByAndPostFilters( - this.getSelectStatementByApplyPreFiltersForSubselect(), - newOrderByNodes, - tableNode.getAlias(), - postFilterParseNodes); - } + public void addFilter(ParseNode filter) throws SQLException { + if (!isSubselect() || filterCanPushDownToSubselect) { + this.addPreFilter(filter); + } else { + postFilterParseNodes.add(filter); + } + } - /** - * For flat table, {@link #postFilterParseNodes} is empty , because it can safely pushed down as - * {@link #preFilterParseNodes}. - */ - assert postFilterParseNodes == null || postFilterParseNodes.isEmpty(); - return NODE_FACTORY.select( - tableNode, - originalJoinSelectStatement.getHint(), - false, - getSelectAliasedNodes(), - getCombinedPreFilterParseNodes(), - null, - null, - newOrderByNodes, - null, - null, - 0, - false, - originalJoinSelectStatement.hasSequence(), - Collections. emptyList(), - originalJoinSelectStatement.getUdfParseNodes()); - } + /** + * If {@link #isSubselect()}, preFilterParseNode is at first rewritten by + * {@link SubselectRewriter#rewritePreFilterForSubselect} + */ + private void addPreFilter(ParseNode preFilterParseNode) throws SQLException { + if (this.isSubselect()) { + preFilterParseNode = SubselectRewriter.rewritePreFilterForSubselect(preFilterParseNode, + this.subselectStatement, tableNode.getAlias()); + } + preFilterParseNodes.add(preFilterParseNode); + } - public SelectStatement getAsSubqueryForOptimization(boolean applyGroupByOrOrderBy) throws SQLException { - assert (!isSubselect()); - - SelectStatement query = getAsSubquery(null); - if (!applyGroupByOrOrderBy) - return query; - - boolean addGroupBy = false; - boolean addOrderBy = false; - if (originalJoinSelectStatement.getGroupBy() != null && !originalJoinSelectStatement.getGroupBy().isEmpty()) { - ColumnRefParseNodeVisitor groupByVisitor = new ColumnRefParseNodeVisitor(origResolver, phoenixStatement.getConnection()); - for (ParseNode node : originalJoinSelectStatement.getGroupBy()) { - node.accept(groupByVisitor); - } - Set set = groupByVisitor.getTableRefSet(); - if (set.size() == 1 && tableRef.equals(set.iterator().next())) { - addGroupBy = true; - } - } else if (originalJoinSelectStatement.getOrderBy() != null && !originalJoinSelectStatement.getOrderBy().isEmpty()) { - ColumnRefParseNodeVisitor orderByVisitor = new ColumnRefParseNodeVisitor(origResolver, phoenixStatement.getConnection()); - for (OrderByNode node : originalJoinSelectStatement.getOrderBy()) { - node.getNode().accept(orderByVisitor); - } - Set set = orderByVisitor.getTableRefSet(); - if (set.size() == 1 && tableRef.equals(set.iterator().next())) { - addOrderBy = true; - } - } + public ParseNode getCombinedPreFilterParseNodes() { + return combine(preFilterParseNodes); + } - if (!addGroupBy && !addOrderBy) - return query; - - List selectList = query.getSelect(); - if (addGroupBy) { - assert (!isWildCardSelect()); - selectList = new ArrayList(query.getSelect().size()); - for (AliasedNode aliasedNode : query.getSelect()) { - ParseNode node = NODE_FACTORY.function( - MinAggregateFunction.NAME, Collections.singletonList(aliasedNode.getNode())); - selectList.add(NODE_FACTORY.aliasedNode(null, node)); - } - } + /** + * Get this {@link Table}'s new {@link SelectStatement} by applying + * {@link #preFilterParseNodes}, {@link #postFilterParseNodes} and additional newOrderByNodes. + */ + public SelectStatement getAsSubquery(List newOrderByNodes) throws SQLException { + if (isSubselect()) { + return SubselectRewriter.applyOrderByAndPostFilters( + this.getSelectStatementByApplyPreFiltersForSubselect(), newOrderByNodes, + tableNode.getAlias(), postFilterParseNodes); + } + + /** + * For flat table, {@link #postFilterParseNodes} is empty , because it can safely pushed down + * as {@link #preFilterParseNodes}. + */ + assert postFilterParseNodes == null || postFilterParseNodes.isEmpty(); + return NODE_FACTORY.select(tableNode, originalJoinSelectStatement.getHint(), false, + getSelectAliasedNodes(), getCombinedPreFilterParseNodes(), null, null, newOrderByNodes, + null, null, 0, false, originalJoinSelectStatement.hasSequence(), + Collections. emptyList(), originalJoinSelectStatement.getUdfParseNodes()); + } - return NODE_FACTORY.select(query.getFrom(), query.getHint(), query.isDistinct(), selectList, - query.getWhere(), addGroupBy ? originalJoinSelectStatement.getGroupBy() : query.getGroupBy(), - addGroupBy ? null : query.getHaving(), addOrderBy ? originalJoinSelectStatement.getOrderBy() : query.getOrderBy(), - query.getLimit(), query.getOffset(), query.getBindCount(), addGroupBy, query.hasSequence(), - query.getSelects(), query.getUdfParseNodes()); - } + public SelectStatement getAsSubqueryForOptimization(boolean applyGroupByOrOrderBy) + throws SQLException { + assert (!isSubselect()); + + SelectStatement query = getAsSubquery(null); + if (!applyGroupByOrOrderBy) return query; + + boolean addGroupBy = false; + boolean addOrderBy = false; + if ( + originalJoinSelectStatement.getGroupBy() != null + && !originalJoinSelectStatement.getGroupBy().isEmpty() + ) { + ColumnRefParseNodeVisitor groupByVisitor = + new ColumnRefParseNodeVisitor(origResolver, phoenixStatement.getConnection()); + for (ParseNode node : originalJoinSelectStatement.getGroupBy()) { + node.accept(groupByVisitor); + } + Set set = groupByVisitor.getTableRefSet(); + if (set.size() == 1 && tableRef.equals(set.iterator().next())) { + addGroupBy = true; + } + } else if ( + originalJoinSelectStatement.getOrderBy() != null + && !originalJoinSelectStatement.getOrderBy().isEmpty() + ) { + ColumnRefParseNodeVisitor orderByVisitor = + new ColumnRefParseNodeVisitor(origResolver, phoenixStatement.getConnection()); + for (OrderByNode node : originalJoinSelectStatement.getOrderBy()) { + node.getNode().accept(orderByVisitor); + } + Set set = orderByVisitor.getTableRefSet(); + if (set.size() == 1 && tableRef.equals(set.iterator().next())) { + addOrderBy = true; + } + } + + if (!addGroupBy && !addOrderBy) return query; + + List selectList = query.getSelect(); + if (addGroupBy) { + assert (!isWildCardSelect()); + selectList = new ArrayList(query.getSelect().size()); + for (AliasedNode aliasedNode : query.getSelect()) { + ParseNode node = NODE_FACTORY.function(MinAggregateFunction.NAME, + Collections.singletonList(aliasedNode.getNode())); + selectList.add(NODE_FACTORY.aliasedNode(null, node)); + } + } + + return NODE_FACTORY.select(query.getFrom(), query.getHint(), query.isDistinct(), selectList, + query.getWhere(), + addGroupBy ? originalJoinSelectStatement.getGroupBy() : query.getGroupBy(), + addGroupBy ? null : query.getHaving(), + addOrderBy ? originalJoinSelectStatement.getOrderBy() : query.getOrderBy(), + query.getLimit(), query.getOffset(), query.getBindCount(), addGroupBy, query.hasSequence(), + query.getSelects(), query.getUdfParseNodes()); + } - public boolean hasFilters() { - return isSubselect() ? - (!postFilterParseNodes.isEmpty() || subselectStatement.getWhere() != null || subselectStatement.getHaving() != null) : - !preFilterParseNodes.isEmpty(); - } + public boolean hasFilters() { + return isSubselect() + ? (!postFilterParseNodes.isEmpty() || subselectStatement.getWhere() != null + || subselectStatement.getHaving() != null) + : !preFilterParseNodes.isEmpty(); + } - /** - * Check if this {@link Table} could be pushed to RegionServer - * {@link HashJoinRegionScanner} as the probe side of Hash join. - * @return - * @throws SQLException - */ - public boolean isCouldPushToServerAsHashJoinProbeSide() throws SQLException { - /** - * If {@link #postFilterParseNodes} is not empty, obviously this {@link Table} - * should execute {@link #postFilterParseNodes} before join. - */ - if(this.postFilterParseNodes != null && !this.postFilterParseNodes.isEmpty()) { - return false; - } + /** + * Check if this {@link Table} could be pushed to RegionServer {@link HashJoinRegionScanner} as + * the probe side of Hash join. + */ + public boolean isCouldPushToServerAsHashJoinProbeSide() throws SQLException { + /** + * If {@link #postFilterParseNodes} is not empty, obviously this {@link Table} should execute + * {@link #postFilterParseNodes} before join. + */ + if (this.postFilterParseNodes != null && !this.postFilterParseNodes.isEmpty()) { + return false; + } - SelectStatement selectStatementToUse = this.getAsSubquery(null); - RewriteResult rewriteResult = - ParseNodeUtil.rewrite(selectStatementToUse, phoenixStatement.getConnection()); - return JoinCompiler.isCouldPushToServerAsHashJoinProbeSide( - rewriteResult.getRewrittenSelectStatement()); - } + SelectStatement selectStatementToUse = this.getAsSubquery(null); + RewriteResult rewriteResult = + ParseNodeUtil.rewrite(selectStatementToUse, phoenixStatement.getConnection()); + return JoinCompiler + .isCouldPushToServerAsHashJoinProbeSide(rewriteResult.getRewrittenSelectStatement()); + } - /** - * Get this {@link Table}'s new {@link SelectStatement} only applying - * {@link #preFilterParseNodes} for {@link #isSubselect()}. - * @return - */ - private SelectStatement getSelectStatementByApplyPreFiltersForSubselect() { - return SubselectRewriter.applyPreFiltersForSubselect( - subselectStatement, - preFilterParseNodes, - tableNode.getAlias()); + /** + * Get this {@link Table}'s new {@link SelectStatement} only applying + * {@link #preFilterParseNodes} for {@link #isSubselect()}. + */ + private SelectStatement getSelectStatementByApplyPreFiltersForSubselect() { + return SubselectRewriter.applyPreFiltersForSubselect(subselectStatement, preFilterParseNodes, + tableNode.getAlias()); - } + } - protected boolean isWildCardSelect() { - return isWildcard; - } + protected boolean isWildCardSelect() { + return isWildcard; + } - public void projectColumns(Scan scan) { - assert(!isSubselect()); - if (isWildCardSelect()) { - scan.getFamilyMap().clear(); - return; - } - for (ColumnRef columnRef : columnRefs.keySet()) { - if (columnRef.getTableRef().equals(tableRef) - && !SchemaUtil.isPKColumn(columnRef.getColumn()) - && !(columnRef instanceof LocalIndexColumnRef)) { - EncodedColumnsUtil.setColumns(columnRef.getColumn(), tableRef.getTable(), scan); - } - } - } + public void projectColumns(Scan scan) { + assert (!isSubselect()); + if (isWildCardSelect()) { + scan.getFamilyMap().clear(); + return; + } + for (ColumnRef columnRef : columnRefs.keySet()) { + if ( + columnRef.getTableRef().equals(tableRef) && !SchemaUtil.isPKColumn(columnRef.getColumn()) + && !(columnRef instanceof LocalIndexColumnRef) + ) { + EncodedColumnsUtil.setColumns(columnRef.getColumn(), tableRef.getTable(), scan); + } + } + } - public PTable createProjectedTable(boolean retainPKColumns, StatementContext context) throws SQLException { - assert(!isSubselect()); - List sourceColumns = new ArrayList(); - PTable table = tableRef.getTable(); - if (retainPKColumns) { - for (PColumn column : table.getPKColumns()) { - sourceColumns.add(new ColumnRef(tableRef, column.getPosition())); - } - } - if (isWildCardSelect()) { - for (PColumn column : table.getColumns()) { - if (!retainPKColumns || !SchemaUtil.isPKColumn(column)) { - sourceColumns.add(new ColumnRef(tableRef, column.getPosition())); - } - } + public PTable createProjectedTable(boolean retainPKColumns, StatementContext context) + throws SQLException { + assert (!isSubselect()); + List sourceColumns = new ArrayList(); + PTable table = tableRef.getTable(); + if (retainPKColumns) { + for (PColumn column : table.getPKColumns()) { + sourceColumns.add(new ColumnRef(tableRef, column.getPosition())); + } + } + if (isWildCardSelect()) { + for (PColumn column : table.getColumns()) { + if (!retainPKColumns || !SchemaUtil.isPKColumn(column)) { + sourceColumns.add(new ColumnRef(tableRef, column.getPosition())); + } + } + } else { + for (Map.Entry e : columnRefs.entrySet()) { + ColumnRef columnRef = e.getKey(); + if ( + columnRef.getTableRef().equals(tableRef) + && (!retainPKColumns || !SchemaUtil.isPKColumn(columnRef.getColumn())) + ) { + if (columnRef instanceof LocalIndexColumnRef) { + sourceColumns.add(new IndexUncoveredDataColumnRef(context, tableRef, + IndexUtil.getIndexColumnName(columnRef.getColumn()))); } else { - for (Map.Entry e : columnRefs.entrySet()) { - ColumnRef columnRef = e.getKey(); - if (columnRef.getTableRef().equals(tableRef) - && (!retainPKColumns || !SchemaUtil.isPKColumn(columnRef.getColumn()))) { - if (columnRef instanceof LocalIndexColumnRef) { - sourceColumns.add(new IndexUncoveredDataColumnRef(context, tableRef, - IndexUtil.getIndexColumnName(columnRef.getColumn()))); - } else { - sourceColumns.add(columnRef); - } - } - } + sourceColumns.add(columnRef); } - - return TupleProjectionCompiler.createProjectedTable(tableRef, sourceColumns, retainPKColumns); + } } + } - public PTable createProjectedTable(RowProjector rowProjector) throws SQLException { - assert(isSubselect()); - TableRef tableRef = FromCompiler.getResolverForCompiledDerivedTable(phoenixStatement.getConnection(), this.tableRef, rowProjector).getTables().get(0); - List sourceColumns = new ArrayList(); - PTable table = tableRef.getTable(); - for (PColumn column : table.getColumns()) { - sourceColumns.add(new ColumnRef(tableRef, column.getPosition())); - } - return TupleProjectionCompiler.createProjectedTable(tableRef, sourceColumns, false); - } + return TupleProjectionCompiler.createProjectedTable(tableRef, sourceColumns, retainPKColumns); } + public PTable createProjectedTable(RowProjector rowProjector) throws SQLException { + assert (isSubselect()); + TableRef tableRef = + FromCompiler.getResolverForCompiledDerivedTable(phoenixStatement.getConnection(), + this.tableRef, rowProjector).getTables().get(0); + List sourceColumns = new ArrayList(); + PTable table = tableRef.getTable(); + for (PColumn column : table.getColumns()) { + sourceColumns.add(new ColumnRef(tableRef, column.getPosition())); + } + return TupleProjectionCompiler.createProjectedTable(tableRef, sourceColumns, false); + } + } + + /** + * Push down {@link JoinTable#postFilters} of Outermost-JoinTable to {@link JoinTable#postFilters} + * of Sub-JoinTable + */ + private static class PushDownPostFilterParseNodeVisitor + extends AndRewriterBooleanParseNodeVisitor { + private ColumnRefParseNodeVisitor columnRefParseNodeVisitor; /** - * Push down {@link JoinTable#postFilters} of Outermost-JoinTable to - * {@link JoinTable#postFilters} of Sub-JoinTable + * Sub-JoinTable to accept pushed down PostFilters. */ - private static class PushDownPostFilterParseNodeVisitor extends AndRewriterBooleanParseNodeVisitor { - private ColumnRefParseNodeVisitor columnRefParseNodeVisitor; - /** - * Sub-JoinTable to accept pushed down PostFilters. - */ - private JoinTable joinTable; - - public PushDownPostFilterParseNodeVisitor( - ColumnResolver resolver, - JoinTable joinTablesContext, - PhoenixConnection connection) { - super(NODE_FACTORY); - this.joinTable = joinTablesContext; - this.columnRefParseNodeVisitor = new ColumnRefParseNodeVisitor(resolver, connection); - } + private JoinTable joinTable; - @Override - protected ParseNode leaveBooleanNode( - ParseNode parentParseNode, List childParseNodes) throws SQLException { - columnRefParseNodeVisitor.reset(); - parentParseNode.accept(columnRefParseNodeVisitor); - ColumnRefParseNodeVisitor.ColumnRefType columnRefType = - columnRefParseNodeVisitor.getContentType( - this.joinTable.getAllTableRefs()); - if(columnRefType == ColumnRefParseNodeVisitor.ColumnRefType.NONE || - columnRefType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY){ - this.joinTable.postFilters.add(parentParseNode); - return null; - } - return parentParseNode; - } + public PushDownPostFilterParseNodeVisitor(ColumnResolver resolver, JoinTable joinTablesContext, + PhoenixConnection connection) { + super(NODE_FACTORY); + this.joinTable = joinTablesContext; + this.columnRefParseNodeVisitor = new ColumnRefParseNodeVisitor(resolver, connection); } - private static class WhereNodeVisitor extends AndBooleanParseNodeVisitor { - private ColumnRefParseNodeVisitor columnRefVisitor; - private JoinTable joinTable; - - public WhereNodeVisitor( - ColumnResolver resolver, - JoinTable joinTablesContext, - PhoenixConnection connection) { - this.joinTable = joinTablesContext; - this.columnRefVisitor = new ColumnRefParseNodeVisitor(resolver, connection); - } - - @Override - protected Void leaveBooleanNode(ParseNode node, - List l) throws SQLException { - columnRefVisitor.reset(); - node.accept(columnRefVisitor); - ColumnRefParseNodeVisitor.ColumnRefType type = - columnRefVisitor.getContentType(this.joinTable.getLeftTableRef()); - switch (type) { - case NONE: - case SELF_ONLY: - this.joinTable.addLeftTableFilter(node); - break; - case FOREIGN_ONLY: - JoinTable matched = null; - for (JoinSpec joinSpec : this.joinTable.getPrefilterAcceptedJoinSpecs()) { - if (columnRefVisitor.getContentType( - joinSpec.getRhsJoinTable().getAllTableRefs()) == - ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY) { - matched = joinSpec.getRhsJoinTable(); - break; - } - } - if (matched != null) { - matched.pushDownFilter(node); - } else { - this.joinTable.addPostJoinFilter(node); - } - break; - default: - this.joinTable.addPostJoinFilter(node); - break; - } - return null; - } + @Override + protected ParseNode leaveBooleanNode(ParseNode parentParseNode, List childParseNodes) + throws SQLException { + columnRefParseNodeVisitor.reset(); + parentParseNode.accept(columnRefParseNodeVisitor); + ColumnRefParseNodeVisitor.ColumnRefType columnRefType = + columnRefParseNodeVisitor.getContentType(this.joinTable.getAllTableRefs()); + if ( + columnRefType == ColumnRefParseNodeVisitor.ColumnRefType.NONE + || columnRefType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY + ) { + this.joinTable.postFilters.add(parentParseNode); + return null; + } + return parentParseNode; + } + } - @Override - protected Void leaveNonBooleanNode(ParseNode node, List l) throws SQLException { - return null; - } + private static class WhereNodeVisitor extends AndBooleanParseNodeVisitor { + private ColumnRefParseNodeVisitor columnRefVisitor; + private JoinTable joinTable; - @Override - public Void visitLeave(AndParseNode node, List l) throws SQLException { - return null; - } + public WhereNodeVisitor(ColumnResolver resolver, JoinTable joinTablesContext, + PhoenixConnection connection) { + this.joinTable = joinTablesContext; + this.columnRefVisitor = new ColumnRefParseNodeVisitor(resolver, connection); + } - @Override - public Void visitLeave(ComparisonParseNode node, List l) - throws SQLException { - if (!(node instanceof EqualParseNode)) - return leaveBooleanNode(node, l); - - List prefilterAcceptedJoinSpecs = - this.joinTable.getPrefilterAcceptedJoinSpecs(); - ListIterator iter = - prefilterAcceptedJoinSpecs.listIterator(prefilterAcceptedJoinSpecs.size()); - while (iter.hasPrevious()) { - JoinSpec joinSpec = iter.previous(); - if (joinSpec.getType() != JoinType.Inner || joinSpec.isSingleValueOnly()) { - continue; - } - - try { - joinSpec.pushDownOnCondition(node); - return null; - } catch (SQLException e) { - } + @Override + protected Void leaveBooleanNode(ParseNode node, List l) throws SQLException { + columnRefVisitor.reset(); + node.accept(columnRefVisitor); + ColumnRefParseNodeVisitor.ColumnRefType type = + columnRefVisitor.getContentType(this.joinTable.getLeftTableRef()); + switch (type) { + case NONE: + case SELF_ONLY: + this.joinTable.addLeftTableFilter(node); + break; + case FOREIGN_ONLY: + JoinTable matched = null; + for (JoinSpec joinSpec : this.joinTable.getPrefilterAcceptedJoinSpecs()) { + if ( + columnRefVisitor.getContentType(joinSpec.getRhsJoinTable().getAllTableRefs()) + == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY + ) { + matched = joinSpec.getRhsJoinTable(); + break; } + } + if (matched != null) { + matched.pushDownFilter(node); + } else { + this.joinTable.addPostJoinFilter(node); + } + break; + default: + this.joinTable.addPostJoinFilter(node); + break; + } + return null; + } - return leaveBooleanNode(node, l); - } + @Override + protected Void leaveNonBooleanNode(ParseNode node, List l) throws SQLException { + return null; } - private static class OnNodeVisitor extends AndBooleanParseNodeVisitor { - private final ColumnRefParseNodeVisitor columnRefVisitor; - private final JoinSpec joinSpec; + @Override + public Void visitLeave(AndParseNode node, List l) throws SQLException { + return null; + } - public OnNodeVisitor( - ColumnResolver resolver, JoinSpec joinSpec, PhoenixConnection connection) { - this.joinSpec = joinSpec; - this.columnRefVisitor = new ColumnRefParseNodeVisitor(resolver, connection); - } + @Override + public Void visitLeave(ComparisonParseNode node, List l) throws SQLException { + if (!(node instanceof EqualParseNode)) return leaveBooleanNode(node, l); - @Override - protected Void leaveBooleanNode(ParseNode node, - List l) throws SQLException { - columnRefVisitor.reset(); - node.accept(columnRefVisitor); - ColumnRefParseNodeVisitor.ColumnRefType type = - columnRefVisitor.getContentType(this.joinSpec.getRhsJoinTableRefs()); - if (type == ColumnRefParseNodeVisitor.ColumnRefType.NONE - || type == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY) { - this.joinSpec.pushDownFilterToRhsJoinTable(node); - } else { - throwAmbiguousJoinConditionException(); - } - return null; + List prefilterAcceptedJoinSpecs = this.joinTable.getPrefilterAcceptedJoinSpecs(); + ListIterator iter = + prefilterAcceptedJoinSpecs.listIterator(prefilterAcceptedJoinSpecs.size()); + while (iter.hasPrevious()) { + JoinSpec joinSpec = iter.previous(); + if (joinSpec.getType() != JoinType.Inner || joinSpec.isSingleValueOnly()) { + continue; } - @Override - protected Void leaveNonBooleanNode(ParseNode node, List l) throws SQLException { - return null; + try { + joinSpec.pushDownOnCondition(node); + return null; + } catch (SQLException e) { } + } - @Override - public Void visitLeave(AndParseNode node, List l) throws SQLException { - return null; - } + return leaveBooleanNode(node, l); + } + } - @Override - public Void visitLeave(ComparisonParseNode node, List l) - throws SQLException { - if (!(node instanceof EqualParseNode)) - return leaveBooleanNode(node, l); - columnRefVisitor.reset(); - node.getLHS().accept(columnRefVisitor); - ColumnRefParseNodeVisitor.ColumnRefType lhsType = - columnRefVisitor.getContentType(this.joinSpec.getRhsJoinTableRefs()); - Set lhsTableRefSet = Sets.newHashSet(columnRefVisitor.getTableRefSet()); - columnRefVisitor.reset(); - node.getRHS().accept(columnRefVisitor); - ColumnRefParseNodeVisitor.ColumnRefType rhsType = - columnRefVisitor.getContentType(this.joinSpec.getRhsJoinTableRefs()); - Set rhsTableRefSet = Sets.newHashSet(columnRefVisitor.getTableRefSet()); - if ((lhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY || lhsType == ColumnRefParseNodeVisitor.ColumnRefType.NONE) - && (rhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY || rhsType == ColumnRefParseNodeVisitor.ColumnRefType.NONE)) { - this.joinSpec.pushDownFilterToRhsJoinTable(node); - } else if (lhsType == ColumnRefParseNodeVisitor.ColumnRefType.FOREIGN_ONLY - && rhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY) { - this.joinSpec.addOnCondition((EqualParseNode) node); - this.joinSpec.addDependentTableRefs(lhsTableRefSet); - } else if (rhsType == ColumnRefParseNodeVisitor.ColumnRefType.FOREIGN_ONLY - && lhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY) { - this.joinSpec.addOnCondition(NODE_FACTORY.equal(node.getRHS(), node.getLHS())); - this.joinSpec.addDependentTableRefs(rhsTableRefSet); - } else { - throwAmbiguousJoinConditionException(); - } - return null; - } + private static class OnNodeVisitor extends AndBooleanParseNodeVisitor { + private final ColumnRefParseNodeVisitor columnRefVisitor; + private final JoinSpec joinSpec; - /* - * Conditions in the ON clause can only be: - * 1) an equal test between a self table expression and a foreign - * table expression. - * 2) a boolean condition referencing to the self table only. - * Otherwise, it can be ambiguous. - */ - public void throwAmbiguousJoinConditionException() throws SQLException { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.AMBIGUOUS_JOIN_CONDITION).build().buildException(); - } + public OnNodeVisitor(ColumnResolver resolver, JoinSpec joinSpec, PhoenixConnection connection) { + this.joinSpec = joinSpec; + this.columnRefVisitor = new ColumnRefParseNodeVisitor(resolver, connection); } - private static class LocalIndexColumnRef extends ColumnRef { - private final TableRef indexTableRef; - - public LocalIndexColumnRef(TableRef tableRef, String familyName, - String columnName, TableRef indexTableRef) throws MetaDataEntityNotFoundException { - super(tableRef, familyName, columnName); - this.indexTableRef = indexTableRef; - } + @Override + protected Void leaveBooleanNode(ParseNode node, List l) throws SQLException { + columnRefVisitor.reset(); + node.accept(columnRefVisitor); + ColumnRefParseNodeVisitor.ColumnRefType type = + columnRefVisitor.getContentType(this.joinSpec.getRhsJoinTableRefs()); + if ( + type == ColumnRefParseNodeVisitor.ColumnRefType.NONE + || type == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY + ) { + this.joinSpec.pushDownFilterToRhsJoinTable(node); + } else { + throwAmbiguousJoinConditionException(); + } + return null; + } - @Override - public TableRef getTableRef() { - return indexTableRef; - } + @Override + protected Void leaveNonBooleanNode(ParseNode node, List l) throws SQLException { + return null; } - private static class ColumnRefParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor { - public enum ColumnRefType {NONE, SELF_ONLY, FOREIGN_ONLY, COMPLEX}; + @Override + public Void visitLeave(AndParseNode node, List l) throws SQLException { + return null; + } - private final ColumnResolver resolver; - private final PhoenixConnection connection; - private final Set tableRefSet; - private final Map columnRefMap; + @Override + public Void visitLeave(ComparisonParseNode node, List l) throws SQLException { + if (!(node instanceof EqualParseNode)) return leaveBooleanNode(node, l); + columnRefVisitor.reset(); + node.getLHS().accept(columnRefVisitor); + ColumnRefParseNodeVisitor.ColumnRefType lhsType = + columnRefVisitor.getContentType(this.joinSpec.getRhsJoinTableRefs()); + Set lhsTableRefSet = Sets.newHashSet(columnRefVisitor.getTableRefSet()); + columnRefVisitor.reset(); + node.getRHS().accept(columnRefVisitor); + ColumnRefParseNodeVisitor.ColumnRefType rhsType = + columnRefVisitor.getContentType(this.joinSpec.getRhsJoinTableRefs()); + Set rhsTableRefSet = Sets.newHashSet(columnRefVisitor.getTableRefSet()); + if ( + (lhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY + || lhsType == ColumnRefParseNodeVisitor.ColumnRefType.NONE) + && (rhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY + || rhsType == ColumnRefParseNodeVisitor.ColumnRefType.NONE) + ) { + this.joinSpec.pushDownFilterToRhsJoinTable(node); + } else if ( + lhsType == ColumnRefParseNodeVisitor.ColumnRefType.FOREIGN_ONLY + && rhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY + ) { + this.joinSpec.addOnCondition((EqualParseNode) node); + this.joinSpec.addDependentTableRefs(lhsTableRefSet); + } else if ( + rhsType == ColumnRefParseNodeVisitor.ColumnRefType.FOREIGN_ONLY + && lhsType == ColumnRefParseNodeVisitor.ColumnRefType.SELF_ONLY + ) { + this.joinSpec.addOnCondition(NODE_FACTORY.equal(node.getRHS(), node.getLHS())); + this.joinSpec.addDependentTableRefs(rhsTableRefSet); + } else { + throwAmbiguousJoinConditionException(); + } + return null; + } - public ColumnRefParseNodeVisitor(ColumnResolver resolver, PhoenixConnection connection) { - this.resolver = resolver; - this.tableRefSet = new HashSet(); - this.columnRefMap = new HashMap(); - this.connection = connection; - } + /* + * Conditions in the ON clause can only be: 1) an equal test between a self table expression and + * a foreign table expression. 2) a boolean condition referencing to the self table only. + * Otherwise, it can be ambiguous. + */ + public void throwAmbiguousJoinConditionException() throws SQLException { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.AMBIGUOUS_JOIN_CONDITION).build() + .buildException(); + } + } - public void reset() { - this.tableRefSet.clear(); - this.columnRefMap.clear(); - } + private static class LocalIndexColumnRef extends ColumnRef { + private final TableRef indexTableRef; - @Override - public Void visit(ColumnParseNode node) throws SQLException { - ColumnRef columnRef = null; - try { - columnRef = resolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); - } catch (ColumnNotFoundException e) { - // This could be an IndexUncoveredDataColumnRef. If so, the table name must have - // been appended by the IndexStatementRewriter, and we can convert it into. - TableRef tableRef = resolver.resolveTable(node.getSchemaName(), node.getTableName()); - if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { - TableRef parentTableRef = FromCompiler.getResolver( - NODE_FACTORY.namedTable(null, TableName.create(tableRef.getTable() - .getSchemaName().getString(), tableRef.getTable() - .getParentTableName().getString())), connection).resolveTable( - tableRef.getTable().getSchemaName().getString(), - tableRef.getTable().getParentTableName().getString()); - columnRef = new LocalIndexColumnRef(parentTableRef, - IndexUtil.getDataColumnFamilyName(node.getName()), - IndexUtil.getDataColumnName(node.getName()), tableRef); - } else { - throw e; - } - } - columnRefMap.put(columnRef, node); - tableRefSet.add(columnRef.getTableRef()); - return null; - } + public LocalIndexColumnRef(TableRef tableRef, String familyName, String columnName, + TableRef indexTableRef) throws MetaDataEntityNotFoundException { + super(tableRef, familyName, columnName); + this.indexTableRef = indexTableRef; + } - public Set getTableRefSet() { - return tableRefSet; - } + @Override + public TableRef getTableRef() { + return indexTableRef; + } + } - public Map getColumnRefMap() { - return columnRefMap; - } + private static class ColumnRefParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor { + public enum ColumnRefType { + NONE, + SELF_ONLY, + FOREIGN_ONLY, + COMPLEX + }; + + private final ColumnResolver resolver; + private final PhoenixConnection connection; + private final Set tableRefSet; + private final Map columnRefMap; + + public ColumnRefParseNodeVisitor(ColumnResolver resolver, PhoenixConnection connection) { + this.resolver = resolver; + this.tableRefSet = new HashSet(); + this.columnRefMap = new HashMap(); + this.connection = connection; + } - public ColumnRefType getContentType(List selfTableRefs) { - if (tableRefSet.isEmpty()) - return ColumnRefType.NONE; - - ColumnRefType ret = ColumnRefType.NONE; - for (TableRef tRef : tableRefSet) { - boolean isSelf = selfTableRefs.contains(tRef); - switch (ret) { - case NONE: - ret = isSelf ? ColumnRefType.SELF_ONLY : ColumnRefType.FOREIGN_ONLY; - break; - case SELF_ONLY: - ret = isSelf ? ColumnRefType.SELF_ONLY : ColumnRefType.COMPLEX; - break; - case FOREIGN_ONLY: - ret = isSelf ? ColumnRefType.COMPLEX : ColumnRefType.FOREIGN_ONLY; - break; - default: // COMPLEX do nothing - break; - } - - if (ret == ColumnRefType.COMPLEX) { - break; - } - } + public void reset() { + this.tableRefSet.clear(); + this.columnRefMap.clear(); + } - return ret; + @Override + public Void visit(ColumnParseNode node) throws SQLException { + ColumnRef columnRef = null; + try { + columnRef = + resolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); + } catch (ColumnNotFoundException e) { + // This could be an IndexUncoveredDataColumnRef. If so, the table name must have + // been appended by the IndexStatementRewriter, and we can convert it into. + TableRef tableRef = resolver.resolveTable(node.getSchemaName(), node.getTableName()); + if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { + TableRef parentTableRef = FromCompiler + .getResolver(NODE_FACTORY.namedTable(null, + TableName.create(tableRef.getTable().getSchemaName().getString(), + tableRef.getTable().getParentTableName().getString())), + connection) + .resolveTable(tableRef.getTable().getSchemaName().getString(), + tableRef.getTable().getParentTableName().getString()); + columnRef = new LocalIndexColumnRef(parentTableRef, + IndexUtil.getDataColumnFamilyName(node.getName()), + IndexUtil.getDataColumnName(node.getName()), tableRef); + } else { + throw e; } + } + columnRefMap.put(columnRef, node); + tableRefSet.add(columnRef.getTableRef()); + return null; } - // for creation of new statements - private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - - /** - * Check if this {@link Table} could be pushed to RegionServer - * {@link HashJoinRegionScanner} as the probe side of Hash join. - * Note: the {@link SelectStatement} parameter must be rewritten by - * {@link ParseNodeUtil#rewrite} before this method. - * {@link SelectStatement} parameter could has NonCorrelated subquery, - * but for Correlated subquery, {@link ParseNodeUtil#rewrite} rewrite - * it as join. - * Note: {@link SelectStatement} could also have {@link OrderBy},but we - * could ignore the {@link OrderBy} because we do not guarantee the {@link OrderBy} - * after join. - * @param selectStatement - * @return - */ - private static boolean isCouldPushToServerAsHashJoinProbeSide(SelectStatement selectStatement) { - return !selectStatement.isJoin() - && !selectStatement.isAggregate() - && !selectStatement.isDistinct() - && !(selectStatement.getFrom() instanceof DerivedTableNode) - && selectStatement.getLimit() == null - && selectStatement.getOffset() == null; + public Set getTableRefSet() { + return tableRefSet; } - private static ParseNode combine(List nodes) { - if (nodes.isEmpty()) - return null; - - if (nodes.size() == 1) - return nodes.get(0); - - return NODE_FACTORY.and(nodes); + public Map getColumnRefMap() { + return columnRefMap; } - private boolean isWildCardSelectForTable(List select, TableRef tableRef, ColumnResolver resolver) throws SQLException { - for (AliasedNode aliasedNode : select) { - ParseNode node = aliasedNode.getNode(); - if (node instanceof TableWildcardParseNode) { - TableName tableName = ((TableWildcardParseNode) node).getTableName(); - if (tableRef.equals(resolver.resolveTable(tableName.getSchemaName(), tableName.getTableName()))) { - return true; - } + public ColumnRefType getContentType(List selfTableRefs) { + if (tableRefSet.isEmpty()) return ColumnRefType.NONE; - } + ColumnRefType ret = ColumnRefType.NONE; + for (TableRef tRef : tableRefSet) { + boolean isSelf = selfTableRefs.contains(tRef); + switch (ret) { + case NONE: + ret = isSelf ? ColumnRefType.SELF_ONLY : ColumnRefType.FOREIGN_ONLY; + break; + case SELF_ONLY: + ret = isSelf ? ColumnRefType.SELF_ONLY : ColumnRefType.COMPLEX; + break; + case FOREIGN_ONLY: + ret = isSelf ? ColumnRefType.COMPLEX : ColumnRefType.FOREIGN_ONLY; + break; + default: // COMPLEX do nothing + break; } - return false; - } - private static Expression compilePostFilterExpression(StatementContext context, List postFilters) throws SQLException { - if (postFilters.isEmpty()) - return null; - - ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); - List expressions = new ArrayList(postFilters.size()); - for (ParseNode postFilter : postFilters) { - expressionCompiler.reset(); - Expression expression = postFilter.accept(expressionCompiler); - expressions.add(expression); + if (ret == ColumnRefType.COMPLEX) { + break; } + } - if (expressions.size() == 1) - return expressions.get(0); - - return AndExpression.create(expressions); + return ret; + } + } + + // for creation of new statements + private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); + + /** + * Check if this {@link Table} could be pushed to RegionServer {@link HashJoinRegionScanner} as + * the probe side of Hash join. Note: the {@link SelectStatement} parameter must be rewritten by + * {@link ParseNodeUtil#rewrite} before this method. {@link SelectStatement} parameter could has + * NonCorrelated subquery, but for Correlated subquery, {@link ParseNodeUtil#rewrite} rewrite it + * as join. Note: {@link SelectStatement} could also have {@link OrderBy},but we could ignore the + * {@link OrderBy} because we do not guarantee the {@link OrderBy} after join. + */ + private static boolean isCouldPushToServerAsHashJoinProbeSide(SelectStatement selectStatement) { + return !selectStatement.isJoin() && !selectStatement.isAggregate() + && !selectStatement.isDistinct() && !(selectStatement.getFrom() instanceof DerivedTableNode) + && selectStatement.getLimit() == null && selectStatement.getOffset() == null; + } + + private static ParseNode combine(List nodes) { + if (nodes.isEmpty()) return null; + + if (nodes.size() == 1) return nodes.get(0); + + return NODE_FACTORY.and(nodes); + } + + private boolean isWildCardSelectForTable(List select, TableRef tableRef, + ColumnResolver resolver) throws SQLException { + for (AliasedNode aliasedNode : select) { + ParseNode node = aliasedNode.getNode(); + if (node instanceof TableWildcardParseNode) { + TableName tableName = ((TableWildcardParseNode) node).getTableName(); + if ( + tableRef + .equals(resolver.resolveTable(tableName.getSchemaName(), tableName.getTableName())) + ) { + return true; + } + + } + } + return false; + } + + private static Expression compilePostFilterExpression(StatementContext context, + List postFilters) throws SQLException { + if (postFilters.isEmpty()) return null; + + ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); + List expressions = new ArrayList(postFilters.size()); + for (ParseNode postFilter : postFilters) { + expressionCompiler.reset(); + Expression expression = postFilter.accept(expressionCompiler); + expressions.add(expression); } - public static PTable joinProjectedTables(PTable left, PTable right, JoinType type) throws SQLException { - Preconditions.checkArgument(left.getType() == PTableType.PROJECTED); - Preconditions.checkArgument(right.getType() == PTableType.PROJECTED); - List merged = Lists. newArrayList(); - int startingPosition = left.getBucketNum() == null ? 0 : 1; - if (type == JoinType.Full) { - for (int i = startingPosition; i < left.getColumns().size(); i++) { - PColumn c = left.getColumns().get(i); - merged.add(new ProjectedColumn(c.getName(), c.getFamilyName(), - c.getPosition(), true, ((ProjectedColumn) c).getSourceColumnRef(), SchemaUtil.isPKColumn(c) ? null : c.getName().getBytes())); - } - } else { - merged.addAll(left.getColumns()); - if (left.getBucketNum() != null) { - merged.remove(0); - } - } - int position = merged.size() + startingPosition; - for (PColumn c : right.getColumns()) { - if (!SchemaUtil.isPKColumn(c)) { - PColumn column = new ProjectedColumn(c.getName(), c.getFamilyName(), - position++, type == JoinType.Inner ? c.isNullable() : true, - ((ProjectedColumn) c).getSourceColumnRef(), c.getName().getBytes()); - merged.add(column); - } - } - return new PTableImpl.Builder() - .setType(left.getType()) - .setState(left.getIndexState()) - .setTimeStamp(left.getTimeStamp()) - .setIndexDisableTimestamp(left.getIndexDisableTimestamp()) - .setSequenceNumber(left.getSequenceNumber()) - .setImmutableRows(left.isImmutableRows()) - .setDisableWAL(PTable.DEFAULT_DISABLE_WAL) - .setMultiTenant(left.isMultiTenant()) - .setStoreNulls(left.getStoreNulls()) - .setViewType(left.getViewType()) - .setViewIndexIdType(left.getviewIndexIdType()) - .setViewIndexId(left.getViewIndexId()) - .setIndexType(left.getIndexType()) - .setTransactionProvider(left.getTransactionProvider()) - .setUpdateCacheFrequency(left.getUpdateCacheFrequency()) - .setNamespaceMapped(left.isNamespaceMapped()) - .setAutoPartitionSeqName(left.getAutoPartitionSeqName()) - .setAppendOnlySchema(left.isAppendOnlySchema()) - .setImmutableStorageScheme(ONE_CELL_PER_COLUMN) - .setQualifierEncodingScheme(NON_ENCODED_QUALIFIERS) - .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT) - .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER) - .setUseStatsForParallelization(left.useStatsForParallelization()) - .setExcludedColumns(ImmutableList.of()) - .setTenantId(left.getTenantId()) - .setSchemaName(left.getSchemaName()) - .setTableName(PNameFactory.newName(SchemaUtil.getTableName(left.getName().getString(), - right.getName().getString()))) - .setPkName(left.getPKName()) - .setRowKeyOrderOptimizable(left.rowKeyOrderOptimizable()) - .setBucketNum(left.getBucketNum()) - .setIndexes(left.getIndexes() == null ? Collections.emptyList() : left.getIndexes()) - .setParentSchemaName(left.getParentSchemaName()) - .setParentTableName(left.getParentTableName()) - .setPhysicalNames(ImmutableList.of()) - .setColumns(merged) - .build(); + if (expressions.size() == 1) return expressions.get(0); + + return AndExpression.create(expressions); + } + + public static PTable joinProjectedTables(PTable left, PTable right, JoinType type) + throws SQLException { + Preconditions.checkArgument(left.getType() == PTableType.PROJECTED); + Preconditions.checkArgument(right.getType() == PTableType.PROJECTED); + List merged = Lists. newArrayList(); + int startingPosition = left.getBucketNum() == null ? 0 : 1; + if (type == JoinType.Full) { + for (int i = startingPosition; i < left.getColumns().size(); i++) { + PColumn c = left.getColumns().get(i); + merged.add(new ProjectedColumn(c.getName(), c.getFamilyName(), c.getPosition(), true, + ((ProjectedColumn) c).getSourceColumnRef(), + SchemaUtil.isPKColumn(c) ? null : c.getName().getBytes())); + } + } else { + merged.addAll(left.getColumns()); + if (left.getBucketNum() != null) { + merged.remove(0); + } + } + int position = merged.size() + startingPosition; + for (PColumn c : right.getColumns()) { + if (!SchemaUtil.isPKColumn(c)) { + PColumn column = new ProjectedColumn(c.getName(), c.getFamilyName(), position++, + type == JoinType.Inner ? c.isNullable() : true, + ((ProjectedColumn) c).getSourceColumnRef(), c.getName().getBytes()); + merged.add(column); + } } + return new PTableImpl.Builder().setType(left.getType()).setState(left.getIndexState()) + .setTimeStamp(left.getTimeStamp()).setIndexDisableTimestamp(left.getIndexDisableTimestamp()) + .setSequenceNumber(left.getSequenceNumber()).setImmutableRows(left.isImmutableRows()) + .setDisableWAL(PTable.DEFAULT_DISABLE_WAL).setMultiTenant(left.isMultiTenant()) + .setStoreNulls(left.getStoreNulls()).setViewType(left.getViewType()) + .setViewIndexIdType(left.getviewIndexIdType()).setViewIndexId(left.getViewIndexId()) + .setIndexType(left.getIndexType()).setTransactionProvider(left.getTransactionProvider()) + .setUpdateCacheFrequency(left.getUpdateCacheFrequency()) + .setNamespaceMapped(left.isNamespaceMapped()) + .setAutoPartitionSeqName(left.getAutoPartitionSeqName()) + .setAppendOnlySchema(left.isAppendOnlySchema()).setImmutableStorageScheme(ONE_CELL_PER_COLUMN) + .setQualifierEncodingScheme(NON_ENCODED_QUALIFIERS) + .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT) + .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER) + .setUseStatsForParallelization(left.useStatsForParallelization()) + .setExcludedColumns(ImmutableList.of()).setTenantId(left.getTenantId()) + .setSchemaName(left.getSchemaName()) + .setTableName(PNameFactory + .newName(SchemaUtil.getTableName(left.getName().getString(), right.getName().getString()))) + .setPkName(left.getPKName()).setRowKeyOrderOptimizable(left.rowKeyOrderOptimizable()) + .setBucketNum(left.getBucketNum()) + .setIndexes(left.getIndexes() == null ? Collections.emptyList() : left.getIndexes()) + .setParentSchemaName(left.getParentSchemaName()).setParentTableName(left.getParentTableName()) + .setPhysicalNames(ImmutableList. of()).setColumns(merged).build(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/KeyPart.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/KeyPart.java index 2828129f06f..81e515dd180 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/KeyPart.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/KeyPart.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.phoenix.compile; -import java.util.List; import java.util.Set; import org.apache.hadoop.hbase.CompareOperator; @@ -27,56 +26,44 @@ import org.apache.phoenix.schema.PTable; /** - * - * Interface that determines how a key part contributes to - * the forming of the key (start/stop of scan and SkipScanFilter) - * for each part of a multi-part primary key. It acts as the glue - * between a built-in function and the setting of the scan key - * during query compilation. - * + * Interface that determines how a key part contributes to the forming of the key (start/stop of + * scan and SkipScanFilter) for each part of a multi-part primary key. It acts as the glue between a + * built-in function and the setting of the scan key during query compilation. */ public interface KeyPart { - /** - * Calculate the key range given an operator and the key on - * the RHS of an expression. For example, given the expression - * SUBSTR(foo,1,3) = 'bar', the key range would be ['bar','bas'), - * and if foo was fixed length, the upper and lower key range - * bytes would be filled out to the fixed length. - * @param op comparison operator {@code (=, <=, <, >=, >, !=) } - * @param rhs the constant on the RHS of an expression. - * @return the key range that encompasses the range for the - * expression for which this keyPart is associated - * - * @see org.apache.phoenix.expression.function.ScalarFunction#newKeyPart(KeyPart) - */ - public KeyRange getKeyRange(CompareOperator op, Expression rhs); - - /** - * Determines whether an expression gets extracted from the - * WHERE clause if it contributes toward the building of the - * scan key. For example, the SUBSTR built-in function may - * be extracted, since it may be completely represented - * through a key range. However, the REGEXP_SUBSTR must be - * left in the WHERE clause, since only the constant prefix - * part of the evaluation can be represented through a key - * range (i.e. rows may pass through that will fail when - * the REGEXP_SUBSTR is evaluated). - * - * @return an empty set if the expression should remain in - * the WHERE clause for post filtering or a singleton set - * containing the expression if it should be removed. - */ - public Set getExtractNodes(); - - /** - * Gets the primary key column associated with the start of this key part. - * @return the primary key column for this key part - */ - public PColumn getColumn(); - - /** - * Gets the table metadata object associated with this key part - * @return the table for this key part - */ - public PTable getTable(); -} \ No newline at end of file + /** + * Calculate the key range given an operator and the key on the RHS of an expression. For example, + * given the expression SUBSTR(foo,1,3) = 'bar', the key range would be ['bar','bas'), and if foo + * was fixed length, the upper and lower key range bytes would be filled out to the fixed length. + * @param op comparison operator {@code (=, <=, <, >=, >, !=) } + * @param rhs the constant on the RHS of an expression. + * @return the key range that encompasses the range for the expression for which this keyPart is + * associated + * @see org.apache.phoenix.expression.function.ScalarFunction#newKeyPart(KeyPart) + */ + public KeyRange getKeyRange(CompareOperator op, Expression rhs); + + /** + * Determines whether an expression gets extracted from the WHERE clause if it contributes toward + * the building of the scan key. For example, the SUBSTR built-in function may be extracted, since + * it may be completely represented through a key range. However, the REGEXP_SUBSTR must be left + * in the WHERE clause, since only the constant prefix part of the evaluation can be represented + * through a key range (i.e. rows may pass through that will fail when the REGEXP_SUBSTR is + * evaluated). + * @return an empty set if the expression should remain in the WHERE clause for post filtering or + * a singleton set containing the expression if it should be removed. + */ + public Set getExtractNodes(); + + /** + * Gets the primary key column associated with the start of this key part. + * @return the primary key column for this key part + */ + public PColumn getColumn(); + + /** + * Gets the table metadata object associated with this key part + * @return the table for this key part + */ + public PTable getTable(); +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/LimitCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/LimitCompiler.java index 06caad9558e..59a016fa89a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/LimitCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/LimitCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,91 +25,95 @@ import org.apache.phoenix.parse.LiteralParseNode; import org.apache.phoenix.parse.ParseNodeFactory; import org.apache.phoenix.parse.TraverseNoParseNodeVisitor; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.PDatum; -import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PInteger; public class LimitCompiler { - private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - - public static final PDatum LIMIT_DATUM = new PDatum() { - @Override - public boolean isNullable() { - return false; - } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; - } - @Override - public Integer getMaxLength() { - return null; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }; - - private LimitCompiler() { + private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); + + public static final PDatum LIMIT_DATUM = new PDatum() { + @Override + public boolean isNullable() { + return false; } - public static Integer compile(StatementContext context, FilterableStatement statement) throws SQLException { - LimitNode limitNode = statement.getLimit(); - if (limitNode == null) { - return null; - } - LimitParseNodeVisitor visitor = new LimitParseNodeVisitor(context); - limitNode.getLimitParseNode().accept(visitor); - return visitor.getLimit(); + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; } - - private static class LimitParseNodeVisitor extends TraverseNoParseNodeVisitor { - private final StatementContext context; - private Integer limit; - - private LimitParseNodeVisitor(StatementContext context) { - this.context = context; - } - - public Integer getLimit() { - return limit; - } - - @Override - public Void visit(LiteralParseNode node) throws SQLException { - Object limitValue = node.getValue(); - // If limit is null, leave this.limit set to zero - // This means that we've bound limit to null for the purpose of - // collecting parameter metadata. - if (limitValue != null) { - Integer limit = (Integer)LIMIT_DATUM.getDataType().toObject(limitValue, node.getType()); - if (limit.intValue() >= 0) { // TODO: handle LIMIT 0 - this.limit = limit; - } - } - return null; - } - - @Override - public Void visit(BindParseNode node) throws SQLException { - // This is for static evaluation in SubselectRewriter. - if (context == null) - return null; - - Object value = context.getBindManager().getBindValue(node); - context.getBindManager().addParamMetaData(node, LIMIT_DATUM); - // Resolve the bind value, create a LiteralParseNode, and call the visit method for it. - // In this way, we can deal with just having a literal on one side of the expression. - visit(NODE_FACTORY.literal(value, LIMIT_DATUM.getDataType())); - return null; + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }; + + private LimitCompiler() { + } + + public static Integer compile(StatementContext context, FilterableStatement statement) + throws SQLException { + LimitNode limitNode = statement.getLimit(); + if (limitNode == null) { + return null; + } + LimitParseNodeVisitor visitor = new LimitParseNodeVisitor(context); + limitNode.getLimitParseNode().accept(visitor); + return visitor.getLimit(); + } + + private static class LimitParseNodeVisitor extends TraverseNoParseNodeVisitor { + private final StatementContext context; + private Integer limit; + + private LimitParseNodeVisitor(StatementContext context) { + this.context = context; + } + + public Integer getLimit() { + return limit; + } + + @Override + public Void visit(LiteralParseNode node) throws SQLException { + Object limitValue = node.getValue(); + // If limit is null, leave this.limit set to zero + // This means that we've bound limit to null for the purpose of + // collecting parameter metadata. + if (limitValue != null) { + Integer limit = (Integer) LIMIT_DATUM.getDataType().toObject(limitValue, node.getType()); + if (limit.intValue() >= 0) { // TODO: handle LIMIT 0 + this.limit = limit; } - + } + return null; } + @Override + public Void visit(BindParseNode node) throws SQLException { + // This is for static evaluation in SubselectRewriter. + if (context == null) return null; + + Object value = context.getBindManager().getBindValue(node); + context.getBindManager().addParamMetaData(node, LIMIT_DATUM); + // Resolve the bind value, create a LiteralParseNode, and call the visit method for it. + // In this way, we can deal with just having a literal on one side of the expression. + visit(NODE_FACTORY.literal(value, LIMIT_DATUM.getDataType())); + return null; + } + + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java index 61bab5e50a2..78c9b90ba26 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,8 +35,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.execute.visitor.QueryPlanVisitor; @@ -74,227 +73,221 @@ public class ListJarsQueryPlan implements QueryPlan { - private PhoenixStatement stmt = null; - private StatementContext context = null; - private boolean first = true; - - private static final RowProjector JARS_PROJECTOR; - - static { - List projectedColumns = new ArrayList(); - PName colName = PNameFactory.newName("jar_location"); - PColumn column = - new PColumnImpl(colName, null, - PVarchar.INSTANCE, null, null, false, 0, SortOrder.getDefault(), 0, null, - false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP); - List columns = new ArrayList(); - columns.add(column); - Expression expression = - new RowKeyColumnExpression(column, new RowKeyValueAccessor(columns, 0)); - projectedColumns.add(new ExpressionProjector("jar_location", "jar_location", "", expression, - true)); - int estimatedByteSize = SizedUtil.KEY_VALUE_SIZE; - JARS_PROJECTOR = new RowProjector(projectedColumns, estimatedByteSize, false); - } - - public ListJarsQueryPlan(PhoenixStatement stmt) { - this.stmt = stmt; - this.context = new StatementContext(stmt); - } - - @Override - public StatementContext getContext() { - return this.context; - } - - @Override - public ParameterMetaData getParameterMetaData() { - return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return ExplainPlan.EMPTY_PLAN; - } - - @Override - public ResultIterator iterator() throws SQLException { - return iterator(DefaultParallelScanGrouper.getInstance()); - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan s) throws SQLException { - return iterator(scanGrouper); - } - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - return new ResultIterator() { - private RemoteIterator listFiles = null; - - @Override - public void close() throws SQLException { - + private PhoenixStatement stmt = null; + private StatementContext context = null; + private boolean first = true; + + private static final RowProjector JARS_PROJECTOR; + + static { + List projectedColumns = new ArrayList(); + PName colName = PNameFactory.newName("jar_location"); + PColumn column = new PColumnImpl(colName, null, PVarchar.INSTANCE, null, null, false, 0, + SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes(), + HConstants.LATEST_TIMESTAMP); + List columns = new ArrayList(); + columns.add(column); + Expression expression = new RowKeyColumnExpression(column, new RowKeyValueAccessor(columns, 0)); + projectedColumns + .add(new ExpressionProjector("jar_location", "jar_location", "", expression, true)); + int estimatedByteSize = SizedUtil.KEY_VALUE_SIZE; + JARS_PROJECTOR = new RowProjector(projectedColumns, estimatedByteSize, false); + } + + public ListJarsQueryPlan(PhoenixStatement stmt) { + this.stmt = stmt; + this.context = new StatementContext(stmt); + } + + @Override + public StatementContext getContext() { + return this.context; + } + + @Override + public ParameterMetaData getParameterMetaData() { + return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return ExplainPlan.EMPTY_PLAN; + } + + @Override + public ResultIterator iterator() throws SQLException { + return iterator(DefaultParallelScanGrouper.getInstance()); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan s) throws SQLException { + return iterator(scanGrouper); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { + return new ResultIterator() { + private RemoteIterator listFiles = null; + + @Override + public void close() throws SQLException { + + } + + @Override + public Tuple next() throws SQLException { + try { + if (first) { + String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps() + .get(QueryServices.DYNAMIC_JARS_DIR_KEY); + if (dynamicJarsDir == null) { + throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY + + " is not configured for the listing the jars."); } - - @Override - public Tuple next() throws SQLException { - try { - if(first) { - String dynamicJarsDir = - stmt.getConnection().getQueryServices().getProps() - .get(QueryServices.DYNAMIC_JARS_DIR_KEY); - if(dynamicJarsDir == null) { - throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY - + " is not configured for the listing the jars."); - } - dynamicJarsDir = - dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/'; - Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - Path dynamicJarsDirPath = new Path(dynamicJarsDir); - FileSystem fs = dynamicJarsDirPath.getFileSystem(conf); - listFiles = fs.listFiles(dynamicJarsDirPath, true); - first = false; - } - if(listFiles == null || !listFiles.hasNext()) return null; - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ParseNodeFactory factory = new ParseNodeFactory(); - LiteralParseNode literal = - factory.literal(listFiles.next().getPath().toString()); - LiteralExpression expression = - LiteralExpression.newConstant(literal.getValue(), PVarchar.INSTANCE, - Determinism.ALWAYS); - expression.evaluate(null, ptr); - byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr); - Cell cell = - PhoenixKeyValueUtil.newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, EnvironmentEdgeManager.currentTimeMillis(), - HConstants.EMPTY_BYTE_ARRAY); - List cells = new ArrayList(1); - cells.add(cell); - return new ResultTuple(Result.create(cells)); - } catch (IOException e) { - throw new SQLException(e); - } - } - - @Override - public void explain(List planSteps) { - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - }; - } - - @Override - public long getEstimatedSize() { - return PVarchar.INSTANCE.getByteSize(); - } - - @Override - public Cost getCost() { - return Cost.ZERO; - } - - @Override - public TableRef getTableRef() { - return null; - } - - @Override - public RowProjector getProjector() { - return JARS_PROJECTOR; - } - - @Override - public Integer getLimit() { - return null; - } - - @Override - public Integer getOffset() { - return null; - } - - @Override - public OrderBy getOrderBy() { - return OrderBy.EMPTY_ORDER_BY; - } - - @Override - public GroupBy getGroupBy() { - return GroupBy.EMPTY_GROUP_BY; - } - - @Override - public List getSplits() { - return Collections.emptyList(); - } - - @Override - public List> getScans() { - return Collections.emptyList(); - } - - @Override - public FilterableStatement getStatement() { - return null; - } - - @Override - public boolean isDegenerate() { - return false; - } - - @Override - public boolean isRowKeyOrdered() { - return false; - } - - @Override - public boolean useRoundRobinIterator() { - return false; - } - - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); - } - - @Override - public Set getSourceRefs() { - return Collections.emptySet(); - } - - @Override - public Operation getOperation() { - return stmt.getUpdateOperation(); - } - - @Override - public Long getEstimatedRowsToScan() { - return 0l; - } - - @Override - public Long getEstimatedBytesToScan() { - return 0l; - } - - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return 0l; - } - - @Override - public List getOutputOrderBys() { - return Collections. emptyList(); - } - - @Override - public boolean isApplicable() { - return true; - } -} \ No newline at end of file + dynamicJarsDir = dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/'; + Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + Path dynamicJarsDirPath = new Path(dynamicJarsDir); + FileSystem fs = dynamicJarsDirPath.getFileSystem(conf); + listFiles = fs.listFiles(dynamicJarsDirPath, true); + first = false; + } + if (listFiles == null || !listFiles.hasNext()) return null; + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ParseNodeFactory factory = new ParseNodeFactory(); + LiteralParseNode literal = factory.literal(listFiles.next().getPath().toString()); + LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), + PVarchar.INSTANCE, Determinism.ALWAYS); + expression.evaluate(null, ptr); + byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr); + Cell cell = PhoenixKeyValueUtil.newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, EnvironmentEdgeManager.currentTimeMillis(), + HConstants.EMPTY_BYTE_ARRAY); + List cells = new ArrayList(1); + cells.add(cell); + return new ResultTuple(Result.create(cells)); + } catch (IOException e) { + throw new SQLException(e); + } + } + + @Override + public void explain(List planSteps) { + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } + }; + } + + @Override + public long getEstimatedSize() { + return PVarchar.INSTANCE.getByteSize(); + } + + @Override + public Cost getCost() { + return Cost.ZERO; + } + + @Override + public TableRef getTableRef() { + return null; + } + + @Override + public RowProjector getProjector() { + return JARS_PROJECTOR; + } + + @Override + public Integer getLimit() { + return null; + } + + @Override + public Integer getOffset() { + return null; + } + + @Override + public OrderBy getOrderBy() { + return OrderBy.EMPTY_ORDER_BY; + } + + @Override + public GroupBy getGroupBy() { + return GroupBy.EMPTY_GROUP_BY; + } + + @Override + public List getSplits() { + return Collections.emptyList(); + } + + @Override + public List> getScans() { + return Collections.emptyList(); + } + + @Override + public FilterableStatement getStatement() { + return null; + } + + @Override + public boolean isDegenerate() { + return false; + } + + @Override + public boolean isRowKeyOrdered() { + return false; + } + + @Override + public boolean useRoundRobinIterator() { + return false; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + @Override + public Set getSourceRefs() { + return Collections. emptySet(); + } + + @Override + public Operation getOperation() { + return stmt.getUpdateOperation(); + } + + @Override + public Long getEstimatedRowsToScan() { + return 0l; + } + + @Override + public Long getEstimatedBytesToScan() { + return 0l; + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return 0l; + } + + @Override + public List getOutputOrderBys() { + return Collections. emptyList(); + } + + @Override + public boolean isApplicable() { + return true; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java index 2c5af7a84b7..9c87f838886 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,8 +27,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.Scan; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.execute.MutationState; import org.apache.phoenix.iterate.ParallelIteratorFactory; import org.apache.phoenix.iterate.PeekingResultIterator; @@ -46,102 +45,98 @@ */ public abstract class MutatingParallelIteratorFactory implements ParallelIteratorFactory { - private static final Logger LOGGER = LoggerFactory.getLogger( - MutatingParallelIteratorFactory.class); - protected final PhoenixConnection connection; + private static final Logger LOGGER = + LoggerFactory.getLogger(MutatingParallelIteratorFactory.class); + protected final PhoenixConnection connection; - protected MutatingParallelIteratorFactory(PhoenixConnection connection) { - this.connection = connection; - } - - /** - * Method that does the actual mutation work - */ - abstract protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, - PhoenixConnection connection) throws SQLException; - - @Override - public PeekingResultIterator newIterator(final StatementContext parentContext, - ResultIterator iterator, Scan scan, String tableName, - QueryPlan plan) throws SQLException { - - final PhoenixConnection clonedConnection = new PhoenixConnection(this.connection); - connection.addChildConnection(clonedConnection); - try { - MutationState state = mutate(parentContext, iterator, clonedConnection); - - final long totalRowCount = state.getUpdateCount(); - final boolean autoFlush = connection.getAutoCommit() || - plan.getTableRef().getTable().isTransactional(); - if (autoFlush) { - clonedConnection.getMutationState().join(state); - state = clonedConnection.getMutationState(); - } - final MutationState finalState = state; - - byte[] value = PLong.INSTANCE.toBytes(totalRowCount); - Cell keyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, - SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); - final Tuple tuple = new SingleKeyValueTuple(keyValue); - return new PeekingResultIterator() { - private boolean done = false; - - @Override - public Tuple next() { - if (done) { - return null; - } - done = true; - return tuple; - } - - @Override - public void explain(List planSteps) { - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - - @Override - public void close() throws SQLException { - try { - /* - * Join the child mutation states in close, since this is called in a single - * threaded manner after the parallel results have been processed. - * If auto-commit is on for the cloned child connection, then the finalState - * here is an empty mutation state (with no mutations). However, it still - * has the metrics for mutation work done by the mutating-iterator. - * Joining the mutation state makes sure those metrics are passed over - * to the parent connection. - */ - MutatingParallelIteratorFactory.this.connection.getMutationState() - .join(finalState); - } finally { - //Removing to be closed connection from the parent connection queue. - connection.removeChildConnection(clonedConnection); - clonedConnection.close(); - } - } - - @Override - public Tuple peek() { - return done ? null : tuple; - } - }; - } catch (Throwable ex) { - // Catch just to make sure we close the cloned connection and then rethrow - try { - //Removing to be closed connection from the parent connection queue. - connection.removeChildConnection(clonedConnection); - // closeQuietly only handles IOException - clonedConnection.close(); - } catch (SQLException sqlEx) { - LOGGER.error("Closing cloned Phoenix connection inside iterator, failed with: ", - sqlEx); - } - throw ex; + protected MutatingParallelIteratorFactory(PhoenixConnection connection) { + this.connection = connection; + } + + /** + * Method that does the actual mutation work + */ + abstract protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, + PhoenixConnection connection) throws SQLException; + + @Override + public PeekingResultIterator newIterator(final StatementContext parentContext, + ResultIterator iterator, Scan scan, String tableName, QueryPlan plan) throws SQLException { + + final PhoenixConnection clonedConnection = new PhoenixConnection(this.connection); + connection.addChildConnection(clonedConnection); + try { + MutationState state = mutate(parentContext, iterator, clonedConnection); + + final long totalRowCount = state.getUpdateCount(); + final boolean autoFlush = + connection.getAutoCommit() || plan.getTableRef().getTable().isTransactional(); + if (autoFlush) { + clonedConnection.getMutationState().join(state); + state = clonedConnection.getMutationState(); + } + final MutationState finalState = state; + + byte[] value = PLong.INSTANCE.toBytes(totalRowCount); + Cell keyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, + SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); + final Tuple tuple = new SingleKeyValueTuple(keyValue); + return new PeekingResultIterator() { + private boolean done = false; + + @Override + public Tuple next() { + if (done) { + return null; + } + done = true; + return tuple; + } + + @Override + public void explain(List planSteps) { + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } + + @Override + public void close() throws SQLException { + try { + /* + * Join the child mutation states in close, since this is called in a single threaded + * manner after the parallel results have been processed. If auto-commit is on for the + * cloned child connection, then the finalState here is an empty mutation state (with no + * mutations). However, it still has the metrics for mutation work done by the + * mutating-iterator. Joining the mutation state makes sure those metrics are passed + * over to the parent connection. + */ + MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState); + } finally { + // Removing to be closed connection from the parent connection queue. + connection.removeChildConnection(clonedConnection); + clonedConnection.close(); + } + } + + @Override + public Tuple peek() { + return done ? null : tuple; } + }; + } catch (Throwable ex) { + // Catch just to make sure we close the cloned connection and then rethrow + try { + // Removing to be closed connection from the parent connection queue. + connection.removeChildConnection(clonedConnection); + // closeQuietly only handles IOException + clonedConnection.close(); + } catch (SQLException sqlEx) { + LOGGER.error("Closing cloned Phoenix connection inside iterator, failed with: ", sqlEx); + } + throw ex; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutationPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutationPlan.java index 97f3f3d0ee2..d8d0cf86cc0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutationPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/MutationPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,9 +22,10 @@ import org.apache.phoenix.execute.MutationState; import org.apache.phoenix.schema.TableRef; - public interface MutationPlan extends StatementPlan { - MutationState execute() throws SQLException; - TableRef getTargetRef(); - QueryPlan getQueryPlan(); -} \ No newline at end of file + MutationState execute() throws SQLException; + + TableRef getTargetRef(); + + QueryPlan getQueryPlan(); +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OffsetCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OffsetCompiler.java index f83aa30d396..848a030a45f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OffsetCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OffsetCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,109 +29,110 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PInteger; - import org.apache.phoenix.thirdparty.com.google.common.base.Optional; public class OffsetCompiler { - private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); + private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - private static final PDatum OFFSET_DATUM = new PDatum() { - @Override - public boolean isNullable() { - return false; - } + private static final PDatum OFFSET_DATUM = new PDatum() { + @Override + public boolean isNullable() { + return false; + } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - @Override - public Integer getMaxLength() { - return null; - } + @Override + public Integer getMaxLength() { + return null; + } - @Override - public Integer getScale() { - return null; - } + @Override + public Integer getScale() { + return null; + } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }; + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }; - private final RVCOffsetCompiler rvcOffsetCompiler = RVCOffsetCompiler.getInstance(); + private final RVCOffsetCompiler rvcOffsetCompiler = RVCOffsetCompiler.getInstance(); - private OffsetCompiler() {} + private OffsetCompiler() { + } - // eager initialization - final private static OffsetCompiler OFFSET_COMPILER = getInstance(); + // eager initialization + final private static OffsetCompiler OFFSET_COMPILER = getInstance(); - private static OffsetCompiler getInstance() { - return new OffsetCompiler(); - } + private static OffsetCompiler getInstance() { + return new OffsetCompiler(); + } - public static OffsetCompiler getOffsetCompiler() { - return OFFSET_COMPILER; - } + public static OffsetCompiler getOffsetCompiler() { + return OFFSET_COMPILER; + } - public CompiledOffset compile(StatementContext context, FilterableStatement statement, boolean inJoin, boolean inUnion) throws SQLException { - OffsetNode offsetNode = statement.getOffset(); - if (offsetNode == null) { - return CompiledOffset.EMPTY_COMPILED_OFFSET; - } - if (offsetNode.isIntegerOffset()) { - OffsetParseNodeVisitor visitor = new OffsetParseNodeVisitor(context); - offsetNode.getOffsetParseNode().accept(visitor); - Integer offset = visitor.getOffset(); - return new CompiledOffset(Optional.fromNullable(offset), Optional.absent()); - } else { //Must be a RVC Offset - return rvcOffsetCompiler.getRVCOffset(context, statement, inJoin, inUnion, offsetNode); - } + public CompiledOffset compile(StatementContext context, FilterableStatement statement, + boolean inJoin, boolean inUnion) throws SQLException { + OffsetNode offsetNode = statement.getOffset(); + if (offsetNode == null) { + return CompiledOffset.EMPTY_COMPILED_OFFSET; } + if (offsetNode.isIntegerOffset()) { + OffsetParseNodeVisitor visitor = new OffsetParseNodeVisitor(context); + offsetNode.getOffsetParseNode().accept(visitor); + Integer offset = visitor.getOffset(); + return new CompiledOffset(Optional.fromNullable(offset), Optional. absent()); + } else { // Must be a RVC Offset + return rvcOffsetCompiler.getRVCOffset(context, statement, inJoin, inUnion, offsetNode); + } + } - private static class OffsetParseNodeVisitor extends TraverseNoParseNodeVisitor { - private final StatementContext context; - private Integer offset; - - OffsetParseNodeVisitor(StatementContext context) { - this.context = context; - } + private static class OffsetParseNodeVisitor extends TraverseNoParseNodeVisitor { + private final StatementContext context; + private Integer offset; - Integer getOffset() { - return offset; - } + OffsetParseNodeVisitor(StatementContext context) { + this.context = context; + } - @Override - public Void visit(LiteralParseNode node) throws SQLException { - Object offsetValue = node.getValue(); - if (offsetValue != null) { - Integer offset = (Integer)OFFSET_DATUM.getDataType().toObject(offsetValue, node.getType()); - if (offset >= 0) { - this.offset = offset; - } - } - return null; - } + Integer getOffset() { + return offset; + } - @Override - public Void visit(BindParseNode node) throws SQLException { - // This is for static evaluation in SubselectRewriter. - if (context == null) return null; - - Object value = context.getBindManager().getBindValue(node); - context.getBindManager().addParamMetaData(node, OFFSET_DATUM); - // Resolve the bind value, create a LiteralParseNode, and call the - // visit method for it. - // In this way, we can deal with just having a literal on one side - // of the expression. - visit(NODE_FACTORY.literal(value, OFFSET_DATUM.getDataType())); - return null; + @Override + public Void visit(LiteralParseNode node) throws SQLException { + Object offsetValue = node.getValue(); + if (offsetValue != null) { + Integer offset = (Integer) OFFSET_DATUM.getDataType().toObject(offsetValue, node.getType()); + if (offset >= 0) { + this.offset = offset; } + } + return null; + } + @Override + public Void visit(BindParseNode node) throws SQLException { + // This is for static evaluation in SubselectRewriter. + if (context == null) return null; + + Object value = context.getBindManager().getBindValue(node); + context.getBindManager().addParamMetaData(node, OFFSET_DATUM); + // Resolve the bind value, create a LiteralParseNode, and call the + // visit method for it. + // In this way, we can deal with just having a literal on one side + // of the expression. + visit(NODE_FACTORY.literal(value, OFFSET_DATUM.getDataType())); + return null; } + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OpenStatementCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OpenStatementCompiler.java index bed8fd1e350..11bc454a062 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OpenStatementCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OpenStatementCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,29 +28,29 @@ import org.apache.phoenix.schema.MetaDataClient; public class OpenStatementCompiler { - private final PhoenixStatement statement; - private final Operation operation; + private final PhoenixStatement statement; + private final Operation operation; - public OpenStatementCompiler(PhoenixStatement statement, Operation operation) { - this.statement = statement; - this.operation = operation; - } + public OpenStatementCompiler(PhoenixStatement statement, Operation operation) { + this.statement = statement; + this.operation = operation; + } - public MutationPlan compile(final OpenStatement open) throws SQLException { - final PhoenixConnection connection = statement.getConnection(); - final StatementContext context = new StatementContext(statement); - final MetaDataClient client = new MetaDataClient(connection); - - return new BaseMutationPlan(context, operation) { - @Override - public MutationState execute() throws SQLException { - return client.open(open); - } + public MutationPlan compile(final OpenStatement open) throws SQLException { + final PhoenixConnection connection = statement.getConnection(); + final StatementContext context = new StatementContext(statement); + final MetaDataClient client = new MetaDataClient(connection); - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("OPEN CURSOR")); - } - }; - } -} \ No newline at end of file + return new BaseMutationPlan(context, operation) { + @Override + public MutationState execute() throws SQLException { + return client.open(open); + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("OPEN CURSOR")); + } + }; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java index 7faf1dbcbdf..5046d33aba1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.phoenix.compile; - import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; @@ -40,213 +39,209 @@ import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.RowValueConstructorOffsetNotAllowedInQueryException; import org.apache.phoenix.schema.RowValueConstructorOffsetNotCoercibleException; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; /** * Validates ORDER BY clause and builds up a list of referenced columns. - * - * * @since 0.1 */ public class OrderByCompiler { - public static class OrderBy { - public static final OrderBy EMPTY_ORDER_BY = new OrderBy(Collections.emptyList()); - /** - * Used to indicate that there was an ORDER BY, but it was optimized out because - * rows are already returned in this order. - */ - public static final OrderBy FWD_ROW_KEY_ORDER_BY = new OrderBy(Collections.emptyList()); - public static final OrderBy REV_ROW_KEY_ORDER_BY = new OrderBy(Collections.emptyList()); - - private final List orderByExpressions; - - public OrderBy(List orderByExpressions) { - this.orderByExpressions = ImmutableList.copyOf(orderByExpressions); - } + public static class OrderBy { + public static final OrderBy EMPTY_ORDER_BY = + new OrderBy(Collections. emptyList()); + /** + * Used to indicate that there was an ORDER BY, but it was optimized out because rows are + * already returned in this order. + */ + public static final OrderBy FWD_ROW_KEY_ORDER_BY = + new OrderBy(Collections. emptyList()); + public static final OrderBy REV_ROW_KEY_ORDER_BY = + new OrderBy(Collections. emptyList()); - public List getOrderByExpressions() { - return orderByExpressions; - } + private final List orderByExpressions; - public boolean isEmpty() { - return this.orderByExpressions == null || this.orderByExpressions.isEmpty(); - } + public OrderBy(List orderByExpressions) { + this.orderByExpressions = ImmutableList.copyOf(orderByExpressions); + } - public static List wrapForOutputOrderBys(OrderBy orderBy) { - assert orderBy != OrderBy.FWD_ROW_KEY_ORDER_BY && orderBy != OrderBy.REV_ROW_KEY_ORDER_BY; - if(orderBy == null || orderBy == OrderBy.EMPTY_ORDER_BY) { - return Collections. emptyList(); - } - return Collections. singletonList(orderBy); - } + public List getOrderByExpressions() { + return orderByExpressions; + } - /** - * When we compile {@link OrderByNode} in {@link OrderByCompiler#compile}, we invoke {@link OrderByExpression#createByCheckIfExpressionSortOrderDesc} - * to get the compiled {@link OrderByExpression} for using it in {@link OrderedResultIterator}, but for {@link QueryPlan#getOutputOrderBys()}, - * the returned {@link OrderByExpression} is used for {@link OrderPreservingTracker}, so we should invoke {@link OrderByExpression#createByCheckIfExpressionSortOrderDesc} - * again to the actual {@link OrderByExpression}. - * @return - */ - public static OrderBy convertCompiledOrderByToOutputOrderBy(OrderBy orderBy) { - if(orderBy.isEmpty()) { - return orderBy; - } - List orderByExpressions = orderBy.getOrderByExpressions(); - List newOrderByExpressions = new ArrayList(orderByExpressions.size()); - for(OrderByExpression orderByExpression : orderByExpressions) { - OrderByExpression newOrderByExpression = - OrderByExpression.convertIfExpressionSortOrderDesc(orderByExpression); - newOrderByExpressions.add(newOrderByExpression); - } - return new OrderBy(newOrderByExpressions); - } + public boolean isEmpty() { + return this.orderByExpressions == null || this.orderByExpressions.isEmpty(); + } - public static boolean equalsForOutputOrderBy(OrderBy orderBy1, OrderBy orderBy2) { - return Objects.equals(orderBy1.orderByExpressions, orderBy2.orderByExpressions); - } + public static List wrapForOutputOrderBys(OrderBy orderBy) { + assert orderBy != OrderBy.FWD_ROW_KEY_ORDER_BY && orderBy != OrderBy.REV_ROW_KEY_ORDER_BY; + if (orderBy == null || orderBy == OrderBy.EMPTY_ORDER_BY) { + return Collections. emptyList(); + } + return Collections. singletonList(orderBy); } + /** - * Gets a list of columns in the ORDER BY clause - * @param context the query context for tracking various states - * associated with the given select statement - * @param statement TODO - * @param groupBy the list of columns in the GROUP BY clause - * @param limit the row limit or null if no limit - * @return the compiled ORDER BY clause - * @throws SQLException + * When we compile {@link OrderByNode} in {@link OrderByCompiler#compile}, we invoke + * {@link OrderByExpression#createByCheckIfExpressionSortOrderDesc} to get the compiled + * {@link OrderByExpression} for using it in {@link OrderedResultIterator}, but for + * {@link QueryPlan#getOutputOrderBys()}, the returned {@link OrderByExpression} is used for + * {@link OrderPreservingTracker}, so we should invoke + * {@link OrderByExpression#createByCheckIfExpressionSortOrderDesc} again to the actual + * {@link OrderByExpression}. */ - public static OrderBy compile(StatementContext context, - SelectStatement statement, - GroupBy groupBy, - Integer limit, - CompiledOffset offset, - RowProjector rowProjector, - QueryPlan innerQueryPlan, - Expression whereExpression) throws SQLException { - List orderByNodes = statement.getOrderBy(); - if (orderByNodes.isEmpty()) { - return OrderBy.EMPTY_ORDER_BY; - } - // for ungroupedAggregates as GROUP BY expression, check against an empty group by - ExpressionCompiler compiler; - if (groupBy.isUngroupedAggregate()) { - compiler = new StatelessExpressionCompiler(context, GroupBy.EMPTY_GROUP_BY); - } else { - compiler = new ExpressionCompiler(context, groupBy); - } - OrderPreservingTracker tracker = null; - if(isTrackOrderByPreserving(statement)) { - // accumulate columns in ORDER BY - tracker = new OrderPreservingTracker( - context, - groupBy, - Ordering.ORDERED, - orderByNodes.size(), - null, - innerQueryPlan, - whereExpression); - } + public static OrderBy convertCompiledOrderByToOutputOrderBy(OrderBy orderBy) { + if (orderBy.isEmpty()) { + return orderBy; + } + List orderByExpressions = orderBy.getOrderByExpressions(); + List newOrderByExpressions = + new ArrayList(orderByExpressions.size()); + for (OrderByExpression orderByExpression : orderByExpressions) { + OrderByExpression newOrderByExpression = + OrderByExpression.convertIfExpressionSortOrderDesc(orderByExpression); + newOrderByExpressions.add(newOrderByExpression); + } + return new OrderBy(newOrderByExpressions); + } - LinkedHashSet orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size()); - for (OrderByNode node : orderByNodes) { - Expression expression = null; - if (node.isIntegerLiteral()) { - if (rowProjector == null) { - throw new IllegalStateException( - "rowProjector is null when there is LiteralParseNode in orderByNodes"); - } - Integer index = node.getValueIfIntegerLiteral(); - assert index != null; - int size = rowProjector.getColumnProjectors().size(); - if (index > size || index <= 0 ) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND) - .build().buildException(); - } - expression = rowProjector.getColumnProjector(index-1).getExpression(); - } else { - expression = node.getNode().accept(compiler); - // Detect mix of aggregate and non aggregates (i.e. ORDER BY txns, SUM(txns) - if (!expression.isStateless() && !compiler.isAggregate()) { - if (statement.isAggregate() || statement.isDistinct()) { - // Detect ORDER BY not in SELECT DISTINCT: SELECT DISTINCT count(*) FROM t ORDER BY x - if (statement.isDistinct()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT) - .setMessage(expression.toString()).build().buildException(); - } - ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString()); - } - } - } - if (!expression.isStateless()) { - boolean isAscending = node.isAscending(); - boolean isNullsLast = node.isNullsLast(); - if(tracker != null) { - tracker.track(expression, isAscending, isNullsLast); - } - /** - * If we have a schema where column A is DESC, reverse the sort order - * since this is the order they actually are in. - * Reverse is required because the compiled OrderByExpression is used in {@link OrderedResultIterator}, - * {@link OrderedResultIterator} implements the compare based on binary representation, not the decoded value of corresponding dataType. - */ - OrderByExpression orderByExpression = OrderByExpression.createByCheckIfExpressionSortOrderDesc( - expression, - isNullsLast, - isAscending); - orderByExpressions.add(orderByExpression); - } - compiler.reset(); - } + public static boolean equalsForOutputOrderBy(OrderBy orderBy1, OrderBy orderBy2) { + return Objects.equals(orderBy1.orderByExpressions, orderBy2.orderByExpressions); + } + } - //If we are not ordered we shouldn't be using RVC Offset - //I think this makes sense for the pagination case but perhaps we can relax this for - //other use cases. - //Note If the table is salted we still mark as row ordered in this code path - if (offset.getByteOffset().isPresent() && orderByExpressions.isEmpty()) { - throw new RowValueConstructorOffsetNotAllowedInQueryException( - "RVC OFFSET requires either forceRowKeyOrder or explict ORDERBY with row key order"); - } + /** + * Gets a list of columns in the ORDER BY clause + * @param context the query context for tracking various states associated with the given select + * statement + * @param statement TODO + * @param groupBy the list of columns in the GROUP BY clause + * @param limit the row limit or null if no limit + * @return the compiled ORDER BY clause + */ + public static OrderBy compile(StatementContext context, SelectStatement statement, + GroupBy groupBy, Integer limit, CompiledOffset offset, RowProjector rowProjector, + QueryPlan innerQueryPlan, Expression whereExpression) throws SQLException { + List orderByNodes = statement.getOrderBy(); + if (orderByNodes.isEmpty()) { + return OrderBy.EMPTY_ORDER_BY; + } + // for ungroupedAggregates as GROUP BY expression, check against an empty group by + ExpressionCompiler compiler; + if (groupBy.isUngroupedAggregate()) { + compiler = new StatelessExpressionCompiler(context, GroupBy.EMPTY_GROUP_BY); + } else { + compiler = new ExpressionCompiler(context, groupBy); + } + OrderPreservingTracker tracker = null; + if (isTrackOrderByPreserving(statement)) { + // accumulate columns in ORDER BY + tracker = new OrderPreservingTracker(context, groupBy, Ordering.ORDERED, orderByNodes.size(), + null, innerQueryPlan, whereExpression); + } - // we can remove ORDER BY clauses in case of only COUNT(DISTINCT...) clauses - if (orderByExpressions.isEmpty() || groupBy.isUngroupedAggregate()) { - return OrderBy.EMPTY_ORDER_BY; + LinkedHashSet orderByExpressions = + Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size()); + for (OrderByNode node : orderByNodes) { + Expression expression = null; + if (node.isIntegerLiteral()) { + if (rowProjector == null) { + throw new IllegalStateException( + "rowProjector is null when there is LiteralParseNode in orderByNodes"); + } + Integer index = node.getValueIfIntegerLiteral(); + assert index != null; + int size = rowProjector.getColumnProjectors().size(); + if (index > size || index <= 0) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND).build() + .buildException(); } - // If we're ordering by the order returned by the scan, we don't need an order by - if (tracker != null && tracker.isOrderPreserving()) { - if (tracker.isReverse()) { - // Don't use reverse scan if: - // 1) we're using a skip scan, as our skip scan doesn't support this yet. - // 2) we have the FORWARD_SCAN hint set to choose to keep loading of column - // families on demand versus doing a reverse scan - // REV_ROW_KEY_ORDER_BY scan would not take effect for a projected table, so don't return it for such table types. - if (context.getConnection().getQueryServices().getProps().getBoolean(QueryServices.USE_REVERSE_SCAN_ATTRIB, QueryServicesOptions.DEFAULT_USE_REVERSE_SCAN) - && !context.getScanRanges().useSkipScanFilter() - && context.getCurrentTable().getTable().getType() != PTableType.PROJECTED - && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY - && !statement.getHint().hasHint(Hint.FORWARD_SCAN)) { - if(offset.getByteOffset().isPresent()){ - throw new SQLException("Do not allow non-pk ORDER BY with RVC OFFSET"); - } - return OrderBy.REV_ROW_KEY_ORDER_BY; - } - } else { - return OrderBy.FWD_ROW_KEY_ORDER_BY; + expression = rowProjector.getColumnProjector(index - 1).getExpression(); + } else { + expression = node.getNode().accept(compiler); + // Detect mix of aggregate and non aggregates (i.e. ORDER BY txns, SUM(txns) + if (!expression.isStateless() && !compiler.isAggregate()) { + if (statement.isAggregate() || statement.isDistinct()) { + // Detect ORDER BY not in SELECT DISTINCT: SELECT DISTINCT count(*) FROM t ORDER BY x + if (statement.isDistinct()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT) + .setMessage(expression.toString()).build().buildException(); } + ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString()); + } } - //If we were in row order this would be optimized out above - if(offset.getByteOffset().isPresent()){ - throw new RowValueConstructorOffsetNotCoercibleException("Do not allow non-pk ORDER BY with RVC OFFSET"); + } + if (!expression.isStateless()) { + boolean isAscending = node.isAscending(); + boolean isNullsLast = node.isNullsLast(); + if (tracker != null) { + tracker.track(expression, isAscending, isNullsLast); } - return new OrderBy(Lists.newArrayList(orderByExpressions.iterator())); + /** + * If we have a schema where column A is DESC, reverse the sort order since this is the + * order they actually are in. Reverse is required because the compiled OrderByExpression is + * used in {@link OrderedResultIterator}, {@link OrderedResultIterator} implements the + * compare based on binary representation, not the decoded value of corresponding dataType. + */ + OrderByExpression orderByExpression = OrderByExpression + .createByCheckIfExpressionSortOrderDesc(expression, isNullsLast, isAscending); + orderByExpressions.add(orderByExpression); + } + compiler.reset(); } - public static boolean isTrackOrderByPreserving(SelectStatement selectStatement) { - return !selectStatement.isUnion(); + // If we are not ordered we shouldn't be using RVC Offset + // I think this makes sense for the pagination case but perhaps we can relax this for + // other use cases. + // Note If the table is salted we still mark as row ordered in this code path + if (offset.getByteOffset().isPresent() && orderByExpressions.isEmpty()) { + throw new RowValueConstructorOffsetNotAllowedInQueryException( + "RVC OFFSET requires either forceRowKeyOrder or explict ORDERBY with row key order"); } - private OrderByCompiler() { + // we can remove ORDER BY clauses in case of only COUNT(DISTINCT...) clauses + if (orderByExpressions.isEmpty() || groupBy.isUngroupedAggregate()) { + return OrderBy.EMPTY_ORDER_BY; } + // If we're ordering by the order returned by the scan, we don't need an order by + if (tracker != null && tracker.isOrderPreserving()) { + if (tracker.isReverse()) { + // Don't use reverse scan if: + // 1) we're using a skip scan, as our skip scan doesn't support this yet. + // 2) we have the FORWARD_SCAN hint set to choose to keep loading of column + // families on demand versus doing a reverse scan + // REV_ROW_KEY_ORDER_BY scan would not take effect for a projected table, so don't return it + // for such table types. + if ( + context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.USE_REVERSE_SCAN_ATTRIB, QueryServicesOptions.DEFAULT_USE_REVERSE_SCAN) + && !context.getScanRanges().useSkipScanFilter() + && context.getCurrentTable().getTable().getType() != PTableType.PROJECTED + && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY + && !statement.getHint().hasHint(Hint.FORWARD_SCAN) + ) { + if (offset.getByteOffset().isPresent()) { + throw new SQLException("Do not allow non-pk ORDER BY with RVC OFFSET"); + } + return OrderBy.REV_ROW_KEY_ORDER_BY; + } + } else { + return OrderBy.FWD_ROW_KEY_ORDER_BY; + } + } + // If we were in row order this would be optimized out above + if (offset.getByteOffset().isPresent()) { + throw new RowValueConstructorOffsetNotCoercibleException( + "Do not allow non-pk ORDER BY with RVC OFFSET"); + } + return new OrderBy(Lists.newArrayList(orderByExpressions.iterator())); + } + + public static boolean isTrackOrderByPreserving(SelectStatement selectStatement) { + return !selectStatement.isUnion(); + } + + private OrderByCompiler() { + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java index 03c864bbde1..794a5eb9843 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/OrderPreservingTracker.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.compile; @@ -38,11 +46,10 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.TableRef; -import org.apache.phoenix.util.ExpressionUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Iterators; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.ExpressionUtil; /** *
@@ -52,7 +59,7 @@
  * keeping track of each distinct group. We can only do this optimization if all the rows
  * for each group will be contiguous. For ORDER BY, we can drop the ORDER BY statement if
  * the order is preserved.
- * 
+ *
  * There are mainly four changes for refactoring this class after PHOENIX-5148:
  * 1.Add a {@link #getInputOrderBys} method to determine the input OrderBys through
  *   innerQueryPlan's output OrderBys, GroupBy of current QueryPlan or the rowKeyColumns of
@@ -78,584 +85,583 @@
  * 
*/ public class OrderPreservingTracker { - public enum Ordering {ORDERED, UNORDERED}; - - public static class Info { - private final OrderPreserving orderPreserving; - private final int pkPosition; - private final int slotSpan; - private final boolean ascending; - private final boolean nullsLast; - private TrackingOrderByExpression trackingOrderByExpression; - - public Info(int pkPosition, boolean ascending, boolean nullsLast) { - this.pkPosition = pkPosition; - this.orderPreserving = OrderPreserving.YES; - this.slotSpan = 1; - this.ascending = ascending; - this.nullsLast = nullsLast; - } - - public Info(int rowKeyColumnPosition, int rowKeySlotSpan, OrderPreserving orderPreserving, boolean ascending, boolean nullsLast) { - this.pkPosition = rowKeyColumnPosition; - this.slotSpan = rowKeySlotSpan; - this.orderPreserving = orderPreserving; - this.ascending = ascending; - this.nullsLast = nullsLast; - } + public enum Ordering { + ORDERED, + UNORDERED + }; + + public static class Info { + private final OrderPreserving orderPreserving; + private final int pkPosition; + private final int slotSpan; + private final boolean ascending; + private final boolean nullsLast; + private TrackingOrderByExpression trackingOrderByExpression; + + public Info(int pkPosition, boolean ascending, boolean nullsLast) { + this.pkPosition = pkPosition; + this.orderPreserving = OrderPreserving.YES; + this.slotSpan = 1; + this.ascending = ascending; + this.nullsLast = nullsLast; + } - public static List extractExpressions(List orderPreservingTrackInfos) { - List newExpressions = new ArrayList(orderPreservingTrackInfos.size()); - for(Info trackInfo : orderPreservingTrackInfos) { - newExpressions.add(trackInfo.getExpression()); - } - return newExpressions; - } + public Info(int rowKeyColumnPosition, int rowKeySlotSpan, OrderPreserving orderPreserving, + boolean ascending, boolean nullsLast) { + this.pkPosition = rowKeyColumnPosition; + this.slotSpan = rowKeySlotSpan; + this.orderPreserving = orderPreserving; + this.ascending = ascending; + this.nullsLast = nullsLast; + } - public Expression getExpression() { - return this.trackingOrderByExpression.expression; - } + public static List extractExpressions(List orderPreservingTrackInfos) { + List newExpressions = new ArrayList(orderPreservingTrackInfos.size()); + for (Info trackInfo : orderPreservingTrackInfos) { + newExpressions.add(trackInfo.getExpression()); + } + return newExpressions; + } - public boolean isAscending() { - return ascending; - } + public Expression getExpression() { + return this.trackingOrderByExpression.expression; + } - public boolean isNullsLast() { - return nullsLast; - } + public boolean isAscending() { + return ascending; } - private final StatementContext context; - private final GroupBy groupBy; - private final Ordering ordering; - private final int pkPositionOffset; - private Expression whereExpression; - private List trackingOrderByExpressions = - new LinkedList(); - private final List trackOrderByContexts; - private TrackOrderByContext selectedTrackOrderByContext = null; - private final List inputOrderBys; - - public OrderPreservingTracker(StatementContext context, GroupBy groupBy, Ordering ordering, int nNodes) throws SQLException { - this(context, groupBy, ordering, nNodes, null, null, null); - } - - public OrderPreservingTracker( - StatementContext context, - GroupBy groupBy, - Ordering ordering, - int nNodes, - List inputOrderBys, - QueryPlan innerQueryPlan, - Expression whereExpression) throws SQLException { - - this.context = context; - this.groupBy = groupBy; - this.ordering = ordering; - this.whereExpression = whereExpression; - if (inputOrderBys != null) { - this.inputOrderBys = inputOrderBys; - this.pkPositionOffset = 0; - } else { - Pair, Integer> orderBysAndRowKeyColumnOffset = - getInputOrderBys(innerQueryPlan, groupBy, context); - this.inputOrderBys = orderBysAndRowKeyColumnOffset.getFirst(); - this.pkPositionOffset = orderBysAndRowKeyColumnOffset.getSecond(); - } - if (this.inputOrderBys.isEmpty()) { - this.trackOrderByContexts = Collections.emptyList(); - return; - } + public boolean isNullsLast() { + return nullsLast; + } + } + + private final StatementContext context; + private final GroupBy groupBy; + private final Ordering ordering; + private final int pkPositionOffset; + private Expression whereExpression; + private List trackingOrderByExpressions = + new LinkedList(); + private final List trackOrderByContexts; + private TrackOrderByContext selectedTrackOrderByContext = null; + private final List inputOrderBys; + + public OrderPreservingTracker(StatementContext context, GroupBy groupBy, Ordering ordering, + int nNodes) throws SQLException { + this(context, groupBy, ordering, nNodes, null, null, null); + } + + public OrderPreservingTracker(StatementContext context, GroupBy groupBy, Ordering ordering, + int nNodes, List inputOrderBys, QueryPlan innerQueryPlan, Expression whereExpression) + throws SQLException { + + this.context = context; + this.groupBy = groupBy; + this.ordering = ordering; + this.whereExpression = whereExpression; + if (inputOrderBys != null) { + this.inputOrderBys = inputOrderBys; + this.pkPositionOffset = 0; + } else { + Pair, Integer> orderBysAndRowKeyColumnOffset = + getInputOrderBys(innerQueryPlan, groupBy, context); + this.inputOrderBys = orderBysAndRowKeyColumnOffset.getFirst(); + this.pkPositionOffset = orderBysAndRowKeyColumnOffset.getSecond(); + } - this.trackOrderByContexts = new ArrayList(this.inputOrderBys.size()); - for(OrderBy inputOrderBy : this.inputOrderBys) { - this.trackOrderByContexts.add( - new TrackOrderByContext(nNodes, inputOrderBy)); - } + if (this.inputOrderBys.isEmpty()) { + this.trackOrderByContexts = Collections.emptyList(); + return; } - /** - * Infer input OrderBys, if the innerQueryPlan is null, we make the OrderBys from the pk columns of {@link PTable}. - */ - private static Pair, Integer> getInputOrderBys( - QueryPlan innerQueryPlan, - GroupBy groupBy, - StatementContext statementContext) throws SQLException { - if (!groupBy.isEmpty()) { - return Pair.newPair( - Collections.singletonList( - ExpressionUtil.convertGroupByToOrderBy(groupBy, false)), 0); - } - if (innerQueryPlan != null) { - return Pair.newPair(innerQueryPlan.getOutputOrderBys(), 0); - } + this.trackOrderByContexts = new ArrayList(this.inputOrderBys.size()); + for (OrderBy inputOrderBy : this.inputOrderBys) { + this.trackOrderByContexts.add(new TrackOrderByContext(nNodes, inputOrderBy)); + } + } + + /** + * Infer input OrderBys, if the innerQueryPlan is null, we make the OrderBys from the pk columns + * of {@link PTable}. + */ + private static Pair, Integer> getInputOrderBys(QueryPlan innerQueryPlan, + GroupBy groupBy, StatementContext statementContext) throws SQLException { + if (!groupBy.isEmpty()) { + return Pair.newPair( + Collections.singletonList(ExpressionUtil.convertGroupByToOrderBy(groupBy, false)), 0); + } + if (innerQueryPlan != null) { + return Pair.newPair(innerQueryPlan.getOutputOrderBys(), 0); + } - TableRef tableRef = statementContext.getResolver().getTables().get(0); - if (!tableRef.getTable().rowKeyOrderOptimizable()) { - return Pair.newPair(Collections.emptyList(), 0); - } - PhoenixConnection phoenixConnection = statementContext.getConnection(); - Pair orderByAndRowKeyColumnOffset = - ExpressionUtil.getOrderByFromTable(tableRef, phoenixConnection, false); - OrderBy orderBy = orderByAndRowKeyColumnOffset.getFirst(); - Integer rowKeyColumnOffset = orderByAndRowKeyColumnOffset.getSecond(); - return Pair.newPair( - orderBy != OrderBy.EMPTY_ORDER_BY - ? Collections.singletonList(orderBy) - : Collections.emptyList(), - rowKeyColumnOffset); - } - - private class TrackOrderByContext { - private final List orderPreservingTrackedInfos; - private boolean isOrderPreserving = true; - private Boolean isReverse = null; - private int orderPreservingColumnCount = 0; - private int orderedTrackedInfosCount = 0; - private final TrackOrderPreservingExpressionVisitor trackOrderPreservingExpressionVisitor; - private final OrderBy inputOrderBy; - private int trackingOrderByExpressionCount = 0; - private boolean isOrderPreservingCalled = false; - - TrackOrderByContext(int orderByNodeCount, OrderBy inputOrderBy) { - this.trackOrderPreservingExpressionVisitor = new TrackOrderPreservingExpressionVisitor(inputOrderBy); - this.orderPreservingTrackedInfos = Lists.newArrayListWithExpectedSize(orderByNodeCount); - this.inputOrderBy = inputOrderBy; - } + TableRef tableRef = statementContext.getResolver().getTables().get(0); + if (!tableRef.getTable().rowKeyOrderOptimizable()) { + return Pair.newPair(Collections. emptyList(), 0); + } + PhoenixConnection phoenixConnection = statementContext.getConnection(); + Pair orderByAndRowKeyColumnOffset = + ExpressionUtil.getOrderByFromTable(tableRef, phoenixConnection, false); + OrderBy orderBy = orderByAndRowKeyColumnOffset.getFirst(); + Integer rowKeyColumnOffset = orderByAndRowKeyColumnOffset.getSecond(); + return Pair.newPair(orderBy != OrderBy.EMPTY_ORDER_BY + ? Collections.singletonList(orderBy) + : Collections. emptyList(), rowKeyColumnOffset); + } + + private class TrackOrderByContext { + private final List orderPreservingTrackedInfos; + private boolean isOrderPreserving = true; + private Boolean isReverse = null; + private int orderPreservingColumnCount = 0; + private int orderedTrackedInfosCount = 0; + private final TrackOrderPreservingExpressionVisitor trackOrderPreservingExpressionVisitor; + private final OrderBy inputOrderBy; + private int trackingOrderByExpressionCount = 0; + private boolean isOrderPreservingCalled = false; + + TrackOrderByContext(int orderByNodeCount, OrderBy inputOrderBy) { + this.trackOrderPreservingExpressionVisitor = + new TrackOrderPreservingExpressionVisitor(inputOrderBy); + this.orderPreservingTrackedInfos = Lists.newArrayListWithExpectedSize(orderByNodeCount); + this.inputOrderBy = inputOrderBy; + } - public void track(List trackingOrderByExpressions) { - this.trackingOrderByExpressionCount = trackingOrderByExpressions.size(); - trackingOrderByExpressions.forEach(trackingOrderByExpression -> { - Expression expression = trackingOrderByExpression.expression; - Info trackedInfo = expression.accept(trackOrderPreservingExpressionVisitor); - if (trackedInfo != null) { - trackedInfo.trackingOrderByExpression = trackingOrderByExpression; - orderPreservingTrackedInfos.add(trackedInfo); - } - }); + public void track(List trackingOrderByExpressions) { + this.trackingOrderByExpressionCount = trackingOrderByExpressions.size(); + trackingOrderByExpressions.forEach(trackingOrderByExpression -> { + Expression expression = trackingOrderByExpression.expression; + Info trackedInfo = expression.accept(trackOrderPreservingExpressionVisitor); + if (trackedInfo != null) { + trackedInfo.trackingOrderByExpression = trackingOrderByExpression; + orderPreservingTrackedInfos.add(trackedInfo); } + }); + } - private void checkAscendingAndNullsLast(Info trackedInfo) { - TrackingOrderByExpression trackingOrderByExpression = - trackedInfo.trackingOrderByExpression; - Expression expression = trackingOrderByExpression.expression; - Boolean isAscending = trackingOrderByExpression.isAscending; - Boolean isNullsLast = trackingOrderByExpression.isNullsLast; - - // If the expression is sorted in a different order than the specified sort order - // then the expressions are not order preserving. - if (isAscending != null && trackedInfo.ascending != isAscending.booleanValue()) { - if (isReverse == null) { - isReverse = true; - } else if (!isReverse){ - isOrderPreserving = false; - isReverse = false; - return; - } - } else { - if (isReverse == null) { - isReverse = false; - } else if (isReverse){ - isOrderPreserving = false; - isReverse = false; - return; - } - } - - assert isReverse != null; - if (isNullsLast != null && expression.isNullable()) { - if (trackedInfo.nullsLast == isNullsLast.booleanValue() - && isReverse.booleanValue() - || trackedInfo.nullsLast != isNullsLast.booleanValue() - && !isReverse.booleanValue()) { - isOrderPreserving = false; - isReverse = false; - return; - } - } - } + private void checkAscendingAndNullsLast(Info trackedInfo) { + TrackingOrderByExpression trackingOrderByExpression = trackedInfo.trackingOrderByExpression; + Expression expression = trackingOrderByExpression.expression; + Boolean isAscending = trackingOrderByExpression.isAscending; + Boolean isNullsLast = trackingOrderByExpression.isNullsLast; + + // If the expression is sorted in a different order than the specified sort order + // then the expressions are not order preserving. + if (isAscending != null && trackedInfo.ascending != isAscending.booleanValue()) { + if (isReverse == null) { + isReverse = true; + } else if (!isReverse) { + isOrderPreserving = false; + isReverse = false; + return; + } + } else { + if (isReverse == null) { + isReverse = false; + } else if (isReverse) { + isOrderPreserving = false; + isReverse = false; + return; + } + } + + assert isReverse != null; + if (isNullsLast != null && expression.isNullable()) { + if ( + trackedInfo.nullsLast == isNullsLast.booleanValue() && isReverse.booleanValue() + || trackedInfo.nullsLast != isNullsLast.booleanValue() && !isReverse.booleanValue() + ) { + isOrderPreserving = false; + isReverse = false; + return; + } + } + } - /** - * Only valid AFTER call to isOrderPreserving. - * This value represents the input column count of {@link TrackOrderByContext#inputOrderBy} - * corresponding to longest continuous ordering columns returned by - * {@link TrackOrderByContext#getOrderPreservingTrackInfos}, it may not equal to the size - * of {@link TrackOrderByContext#getOrderPreservingTrackInfos}. - */ - public int getOrderPreservingColumnCount() { - if (!isOrderPreservingCalled) { - throw new IllegalStateException( - "getOrderPreservingColumnCount must be called after isOrderPreserving is called!"); - } - return orderPreservingColumnCount; - } + /** + * Only valid AFTER call to isOrderPreserving. This value represents the input column count of + * {@link TrackOrderByContext#inputOrderBy} corresponding to longest continuous ordering columns + * returned by {@link TrackOrderByContext#getOrderPreservingTrackInfos}, it may not equal to the + * size of {@link TrackOrderByContext#getOrderPreservingTrackInfos}. + */ + public int getOrderPreservingColumnCount() { + if (!isOrderPreservingCalled) { + throw new IllegalStateException( + "getOrderPreservingColumnCount must be called after isOrderPreserving is called!"); + } + return orderPreservingColumnCount; + } - /** - * Only valid AFTER call to isOrderPreserving - */ - public List getOrderPreservingTrackInfos() { - if (!isOrderPreservingCalled) { - throw new IllegalStateException( - "getOrderPreservingTrackInfos must be called after isOrderPreserving is called!"); - } - if (this.isOrderPreserving) { - return ImmutableList.copyOf(this.orderPreservingTrackedInfos); - } - if (this.orderedTrackedInfosCount <= 0) { - return Collections. emptyList(); - } - return ImmutableList.copyOf( - this.orderPreservingTrackedInfos.subList( - 0, this.orderedTrackedInfosCount)); - } + /** + * Only valid AFTER call to isOrderPreserving + */ + public List getOrderPreservingTrackInfos() { + if (!isOrderPreservingCalled) { + throw new IllegalStateException( + "getOrderPreservingTrackInfos must be called after isOrderPreserving is called!"); + } + if (this.isOrderPreserving) { + return ImmutableList.copyOf(this.orderPreservingTrackedInfos); + } + if (this.orderedTrackedInfosCount <= 0) { + return Collections. emptyList(); + } + return ImmutableList + .copyOf(this.orderPreservingTrackedInfos.subList(0, this.orderedTrackedInfosCount)); + } - public boolean isOrderPreserving() { - if (this.isOrderPreservingCalled) { - return isOrderPreserving; - } - - if (ordering == Ordering.UNORDERED) { - // Sort by position - Collections.sort(orderPreservingTrackedInfos, new Comparator() { - @Override - public int compare(Info o1, Info o2) { - int cmp = o1.pkPosition - o2.pkPosition; - if (cmp != 0) return cmp; - // After pk position, sort on reverse OrderPreserving ordinal: NO, YES_IF_LAST, YES - // In this way, if we have an ORDER BY over a YES_IF_LAST followed by a YES, we'll - // allow subsequent columns to be ordered. - return o2.orderPreserving.ordinal() - o1.orderPreserving.ordinal(); - } - }); - } - // Determine if there are any gaps in the PK columns (in which case we don't need - // to sort in the coprocessor because the keys will already naturally be in sorted - // order. - int prevSlotSpan = 1; - int prevPos = -1; - OrderPreserving prevOrderPreserving = OrderPreserving.YES; - this.orderedTrackedInfosCount = 0; - for (int i = 0; i < orderPreservingTrackedInfos.size(); i++) { - Info entry = orderPreservingTrackedInfos.get(i); - int pos = entry.pkPosition; - this.checkAscendingAndNullsLast(entry); - isOrderPreserving = isOrderPreserving && - entry.orderPreserving != OrderPreserving.NO && - prevOrderPreserving == OrderPreserving.YES && - (pos == prevPos || - pos - prevSlotSpan == prevPos || - hasEqualityConstraints(prevPos + prevSlotSpan, pos)); - if (!isOrderPreserving) { - break; - } - this.orderedTrackedInfosCount++; - prevPos = pos; - prevSlotSpan = entry.slotSpan; - prevOrderPreserving = entry.orderPreserving; - } - isOrderPreserving = isOrderPreserving - && this.orderPreservingTrackedInfos.size() - == this.trackingOrderByExpressionCount; - orderPreservingColumnCount = prevPos + prevSlotSpan + pkPositionOffset; - this.isOrderPreservingCalled = true; - return isOrderPreserving; - } + public boolean isOrderPreserving() { + if (this.isOrderPreservingCalled) { + return isOrderPreserving; + } + + if (ordering == Ordering.UNORDERED) { + // Sort by position + Collections.sort(orderPreservingTrackedInfos, new Comparator() { + @Override + public int compare(Info o1, Info o2) { + int cmp = o1.pkPosition - o2.pkPosition; + if (cmp != 0) return cmp; + // After pk position, sort on reverse OrderPreserving ordinal: NO, YES_IF_LAST, YES + // In this way, if we have an ORDER BY over a YES_IF_LAST followed by a YES, we'll + // allow subsequent columns to be ordered. + return o2.orderPreserving.ordinal() - o1.orderPreserving.ordinal(); + } + }); + } + // Determine if there are any gaps in the PK columns (in which case we don't need + // to sort in the coprocessor because the keys will already naturally be in sorted + // order. + int prevSlotSpan = 1; + int prevPos = -1; + OrderPreserving prevOrderPreserving = OrderPreserving.YES; + this.orderedTrackedInfosCount = 0; + for (int i = 0; i < orderPreservingTrackedInfos.size(); i++) { + Info entry = orderPreservingTrackedInfos.get(i); + int pos = entry.pkPosition; + this.checkAscendingAndNullsLast(entry); + isOrderPreserving = isOrderPreserving && entry.orderPreserving != OrderPreserving.NO + && prevOrderPreserving == OrderPreserving.YES + && (pos == prevPos || pos - prevSlotSpan == prevPos + || hasEqualityConstraints(prevPos + prevSlotSpan, pos)); + if (!isOrderPreserving) { + break; + } + this.orderedTrackedInfosCount++; + prevPos = pos; + prevSlotSpan = entry.slotSpan; + prevOrderPreserving = entry.orderPreserving; + } + isOrderPreserving = isOrderPreserving + && this.orderPreservingTrackedInfos.size() == this.trackingOrderByExpressionCount; + orderPreservingColumnCount = prevPos + prevSlotSpan + pkPositionOffset; + this.isOrderPreservingCalled = true; + return isOrderPreserving; + } - private boolean hasEqualityConstraints(int startPos, int endPos) { - ScanRanges ranges = context.getScanRanges(); - // If a GROUP BY is being done, then the rows are ordered according to the GROUP BY key, - // not by the original row key order of the table (see PHOENIX-3451). - // We check each GROUP BY expression to see if it only references columns that are - // matched by equality constraints, in which case the expression itself would be constant. - for (int pos = startPos; pos < endPos; pos++) { - Expression expressionToCheckConstant = this.getExpressionToCheckConstant(pos); - IsConstantVisitor visitor = new IsConstantVisitor(ranges, whereExpression); - Boolean isConstant = expressionToCheckConstant.accept(visitor); - if (!Boolean.TRUE.equals(isConstant)) { - return false; - } - } - return true; - } + private boolean hasEqualityConstraints(int startPos, int endPos) { + ScanRanges ranges = context.getScanRanges(); + // If a GROUP BY is being done, then the rows are ordered according to the GROUP BY key, + // not by the original row key order of the table (see PHOENIX-3451). + // We check each GROUP BY expression to see if it only references columns that are + // matched by equality constraints, in which case the expression itself would be constant. + for (int pos = startPos; pos < endPos; pos++) { + Expression expressionToCheckConstant = this.getExpressionToCheckConstant(pos); + IsConstantVisitor visitor = new IsConstantVisitor(ranges, whereExpression); + Boolean isConstant = expressionToCheckConstant.accept(visitor); + if (!Boolean.TRUE.equals(isConstant)) { + return false; + } + } + return true; + } - public boolean isReverse() { - if (!isOrderPreservingCalled) { - throw new IllegalStateException( - "isReverse must be called after isOrderPreserving is called!"); - } - if (!isOrderPreserving) { - throw new IllegalStateException( - "isReverse should only be called when isOrderPreserving is true!"); - } - return Boolean.TRUE.equals(isReverse); - } + public boolean isReverse() { + if (!isOrderPreservingCalled) { + throw new IllegalStateException( + "isReverse must be called after isOrderPreserving is called!"); + } + if (!isOrderPreserving) { + throw new IllegalStateException( + "isReverse should only be called when isOrderPreserving is true!"); + } + return Boolean.TRUE.equals(isReverse); + } - private Expression getExpressionToCheckConstant(int columnIndex) { - if (!groupBy.isEmpty()) { - List groupByExpressions = groupBy.getExpressions(); - assert columnIndex < groupByExpressions.size(); - return groupByExpressions.get(columnIndex); - } + private Expression getExpressionToCheckConstant(int columnIndex) { + if (!groupBy.isEmpty()) { + List groupByExpressions = groupBy.getExpressions(); + assert columnIndex < groupByExpressions.size(); + return groupByExpressions.get(columnIndex); + } - assert columnIndex < inputOrderBy.getOrderByExpressions().size(); - return inputOrderBy.getOrderByExpressions().get(columnIndex).getExpression(); - } + assert columnIndex < inputOrderBy.getOrderByExpressions().size(); + return inputOrderBy.getOrderByExpressions().get(columnIndex).getExpression(); } + } - private static class TrackingOrderByExpression - { - private Expression expression; - private Boolean isAscending; - private Boolean isNullsLast; + private static class TrackingOrderByExpression { + private Expression expression; + private Boolean isAscending; + private Boolean isNullsLast; - TrackingOrderByExpression( - Expression expression, Boolean isAscending, Boolean isNullsLast) { - this.expression = expression; - this.isAscending = isAscending; - this.isNullsLast = isNullsLast; - } + TrackingOrderByExpression(Expression expression, Boolean isAscending, Boolean isNullsLast) { + this.expression = expression; + this.isAscending = isAscending; + this.isNullsLast = isNullsLast; + } + } + + public void track(Expression expression) { + track(expression, null, null); + } + + public void track(Expression expression, Boolean isAscending, Boolean isNullsLast) { + TrackingOrderByExpression trackingOrderByExpression = + new TrackingOrderByExpression(expression, isAscending, isNullsLast); + this.trackingOrderByExpressions.add(trackingOrderByExpression); + } + + /** + * Only valid AFTER call to isOrderPreserving. This value represents the input column count + * corresponding to longest continuous ordering columns returned by + * {@link OrderPreservingTracker#getOrderPreservingTrackInfos}, it may not equal to the size of + * {@link OrderPreservingTracker#getOrderPreservingTrackInfos}. + */ + public int getOrderPreservingColumnCount() { + if (this.selectedTrackOrderByContext == null) { + return 0; } + return this.selectedTrackOrderByContext.getOrderPreservingColumnCount(); + } + + /** + * Only valid AFTER call to isOrderPreserving + */ + public List getOrderPreservingTrackInfos() { + if (this.selectedTrackOrderByContext == null) { + return Collections. emptyList(); + } + return this.selectedTrackOrderByContext.getOrderPreservingTrackInfos(); + } - public void track(Expression expression) { - track(expression, null, null); + public boolean isOrderPreserving() { + if (this.selectedTrackOrderByContext != null) { + return this.selectedTrackOrderByContext.isOrderPreserving(); } - public void track(Expression expression, Boolean isAscending, Boolean isNullsLast) { - TrackingOrderByExpression trackingOrderByExpression = - new TrackingOrderByExpression(expression, isAscending, isNullsLast); - this.trackingOrderByExpressions.add(trackingOrderByExpression); + if (this.trackOrderByContexts.isEmpty()) { + return false; } - /** - * Only valid AFTER call to isOrderPreserving. - * This value represents the input column count corresponding to longest continuous ordering - * columns returned by {@link OrderPreservingTracker#getOrderPreservingTrackInfos}, it may not - * equal to the size of {@link OrderPreservingTracker#getOrderPreservingTrackInfos}. - */ - public int getOrderPreservingColumnCount() { - if(this.selectedTrackOrderByContext == null) { - return 0; - } - return this.selectedTrackOrderByContext.getOrderPreservingColumnCount(); + if (this.trackingOrderByExpressions.isEmpty()) { + return false; } /** - * Only valid AFTER call to isOrderPreserving + * at most only one TrackOrderByContext can meet isOrderPreserving is true */ - public List getOrderPreservingTrackInfos() { - if(this.selectedTrackOrderByContext == null) { - return Collections. emptyList(); - } - return this.selectedTrackOrderByContext.getOrderPreservingTrackInfos(); + for (TrackOrderByContext trackOrderByContext : this.trackOrderByContexts) { + trackOrderByContext.track(trackingOrderByExpressions); + if (trackOrderByContext.isOrderPreserving()) { + this.selectedTrackOrderByContext = trackOrderByContext; + break; + } + + if (this.selectedTrackOrderByContext == null) { + this.selectedTrackOrderByContext = trackOrderByContext; + } } + return this.selectedTrackOrderByContext.isOrderPreserving(); + } - public boolean isOrderPreserving() { - if (this.selectedTrackOrderByContext != null) { - return this.selectedTrackOrderByContext.isOrderPreserving(); - } - - if (this.trackOrderByContexts.isEmpty()) { - return false; - } - - if (this.trackingOrderByExpressions.isEmpty()) { - return false; - } + public boolean isReverse() { + if (this.selectedTrackOrderByContext == null) { + throw new IllegalStateException( + "isReverse should only be called when isOrderPreserving is true!"); + } + return this.selectedTrackOrderByContext.isReverse(); + } + + /** + * Determines if an expression is held constant. Only works for columns in the PK currently, as we + * only track whether PK columns are held constant. + */ + private static class IsConstantVisitor extends StatelessTraverseAllExpressionVisitor { + private final ScanRanges scanRanges; + private final Expression whereExpression; + + public IsConstantVisitor(ScanRanges scanRanges, Expression whereExpression) { + this.scanRanges = scanRanges; + this.whereExpression = whereExpression; + } - /** - * at most only one TrackOrderByContext can meet isOrderPreserving is true - */ - for(TrackOrderByContext trackOrderByContext : this.trackOrderByContexts) { - trackOrderByContext.track(trackingOrderByExpressions); - if(trackOrderByContext.isOrderPreserving()) { - this.selectedTrackOrderByContext = trackOrderByContext; - break; - } - - if(this.selectedTrackOrderByContext == null) { - this.selectedTrackOrderByContext = trackOrderByContext; - } - } - return this.selectedTrackOrderByContext.isOrderPreserving(); + @Override + public Boolean defaultReturn(Expression node, List returnValues) { + if ( + !ExpressionUtil.isContantForStatement(node) + || returnValues.size() < node.getChildren().size() + ) { + return Boolean.FALSE; + } + for (Boolean returnValue : returnValues) { + if (!returnValue) { + return Boolean.FALSE; + } + } + return Boolean.TRUE; } - public boolean isReverse() { - if(this.selectedTrackOrderByContext == null) { - throw new IllegalStateException("isReverse should only be called when isOrderPreserving is true!"); - } - return this.selectedTrackOrderByContext.isReverse(); + @Override + public Boolean visit(RowKeyColumnExpression node) { + return scanRanges.hasEqualityConstraint(node.getPosition()); } - /** - * - * Determines if an expression is held constant. Only works for columns in the PK currently, - * as we only track whether PK columns are held constant. - * - */ - private static class IsConstantVisitor extends StatelessTraverseAllExpressionVisitor { - private final ScanRanges scanRanges; - private final Expression whereExpression; - - public IsConstantVisitor(ScanRanges scanRanges, Expression whereExpression) { - this.scanRanges = scanRanges; - this.whereExpression = whereExpression; - } - - @Override - public Boolean defaultReturn(Expression node, List returnValues) { - if (!ExpressionUtil.isContantForStatement(node) || - returnValues.size() < node.getChildren().size()) { - return Boolean.FALSE; - } - for (Boolean returnValue : returnValues) { - if (!returnValue) { - return Boolean.FALSE; - } - } - return Boolean.TRUE; - } + @Override + public Boolean visit(LiteralExpression node) { + return Boolean.TRUE; + } - @Override - public Boolean visit(RowKeyColumnExpression node) { - return scanRanges.hasEqualityConstraint(node.getPosition()); - } + @Override + public Boolean visit(KeyValueColumnExpression keyValueColumnExpression) { + return ExpressionUtil.isColumnExpressionConstant(keyValueColumnExpression, whereExpression); + } - @Override - public Boolean visit(LiteralExpression node) { - return Boolean.TRUE; - } + @Override + public Boolean visit(ProjectedColumnExpression projectedColumnExpression) { + return ExpressionUtil.isColumnExpressionConstant(projectedColumnExpression, whereExpression); + } + } + + /** + * Visitor used to determine if order is preserved across a list of expressions (GROUP BY or ORDER + * BY expressions) + */ + private static class TrackOrderPreservingExpressionVisitor + extends StatelessTraverseNoExpressionVisitor { + private Map> expressionToPositionAndOrderByExpression; + + public TrackOrderPreservingExpressionVisitor(OrderBy orderBy) { + if (orderBy.isEmpty()) { + this.expressionToPositionAndOrderByExpression = + Collections.> emptyMap(); + return; + } + List orderByExpressions = orderBy.getOrderByExpressions(); + this.expressionToPositionAndOrderByExpression = + new HashMap>(orderByExpressions.size()); + int index = 0; + for (OrderByExpression orderByExpression : orderByExpressions) { + this.expressionToPositionAndOrderByExpression.put(orderByExpression.getExpression(), + new Pair(index++, orderByExpression)); + } + } - @Override - public Boolean visit(KeyValueColumnExpression keyValueColumnExpression) { - return ExpressionUtil.isColumnExpressionConstant(keyValueColumnExpression, whereExpression); - } - @Override - public Boolean visit(ProjectedColumnExpression projectedColumnExpression) { - return ExpressionUtil.isColumnExpressionConstant(projectedColumnExpression, whereExpression); - } + @Override + public Info defaultReturn(Expression expression, List childInfos) { + return match(expression); } - /** - * - * Visitor used to determine if order is preserved across a list of expressions (GROUP BY or ORDER BY expressions) - * - */ - private static class TrackOrderPreservingExpressionVisitor extends StatelessTraverseNoExpressionVisitor { - private Map> expressionToPositionAndOrderByExpression; - - public TrackOrderPreservingExpressionVisitor(OrderBy orderBy) { - if(orderBy.isEmpty()) { - this.expressionToPositionAndOrderByExpression = Collections.> emptyMap(); - return; - } - List orderByExpressions = orderBy.getOrderByExpressions(); - this.expressionToPositionAndOrderByExpression = new HashMap>(orderByExpressions.size()); - int index = 0; - for(OrderByExpression orderByExpression : orderByExpressions) { - this.expressionToPositionAndOrderByExpression.put( - orderByExpression.getExpression(), - new Pair(index++, orderByExpression)); - } - } - @Override - public Info defaultReturn(Expression expression, List childInfos) { - return match(expression); - } + @Override + public Info visit(RowKeyColumnExpression rowKeyColumnExpression) { + return match(rowKeyColumnExpression); + } - @Override - public Info visit(RowKeyColumnExpression rowKeyColumnExpression) { - return match(rowKeyColumnExpression); - } + @Override + public Info visit(KeyValueColumnExpression keyValueColumnExpression) { + return match(keyValueColumnExpression); + } - @Override - public Info visit(KeyValueColumnExpression keyValueColumnExpression) { - return match(keyValueColumnExpression); - } + @Override + public Info visit(ProjectedColumnExpression projectedColumnExpression) { + return match(projectedColumnExpression); + } - @Override - public Info visit(ProjectedColumnExpression projectedColumnExpression) { - return match(projectedColumnExpression); - } + private Info match(Expression expression) { + Pair positionAndOrderByExpression = + this.expressionToPositionAndOrderByExpression.get(expression); + if (positionAndOrderByExpression == null) { + return null; + } + return new Info(positionAndOrderByExpression.getFirst(), + positionAndOrderByExpression.getSecond().isAscending(), + positionAndOrderByExpression.getSecond().isNullsLast()); + } - private Info match(Expression expression) - { - Pair positionAndOrderByExpression = this.expressionToPositionAndOrderByExpression.get(expression); - if(positionAndOrderByExpression == null) { - return null; - } - return new Info( - positionAndOrderByExpression.getFirst(), - positionAndOrderByExpression.getSecond().isAscending(), - positionAndOrderByExpression.getSecond().isNullsLast()); - } + @Override + public Iterator visitEnter(ScalarFunction node) { + return node.preservesOrder() == OrderPreserving.NO + ? Collections. emptyIterator() + : Iterators.singletonIterator(node.getChildren().get(node.getKeyFormationTraversalIndex())); + } - @Override - public Iterator visitEnter(ScalarFunction node) { - return node.preservesOrder() == OrderPreserving.NO ? Collections. emptyIterator() : Iterators - .singletonIterator(node.getChildren().get(node.getKeyFormationTraversalIndex())); - } + @Override + public Info visitLeave(ScalarFunction node, List l) { + if (l.isEmpty()) { + return null; + } + Info info = l.get(0); + // Keep the minimum value between this function and the current value, + // so that we never increase OrderPreserving from NO or YES_IF_LAST. + OrderPreserving orderPreserving = OrderPreserving.values()[Math + .min(node.preservesOrder().ordinal(), info.orderPreserving.ordinal())]; + Expression childExpression = node.getChildren().get(node.getKeyFormationTraversalIndex()); + boolean sortOrderIsSame = node.getSortOrder() == childExpression.getSortOrder(); + if (orderPreserving == info.orderPreserving && sortOrderIsSame) { + return info; + } + return new Info(info.pkPosition, info.slotSpan, orderPreserving, + sortOrderIsSame ? info.ascending : !info.ascending, info.nullsLast); + } - @Override - public Info visitLeave(ScalarFunction node, List l) { - if (l.isEmpty()) { return null; } - Info info = l.get(0); - // Keep the minimum value between this function and the current value, - // so that we never increase OrderPreserving from NO or YES_IF_LAST. - OrderPreserving orderPreserving = OrderPreserving.values()[Math.min(node.preservesOrder().ordinal(), info.orderPreserving.ordinal())]; - Expression childExpression = node.getChildren().get( - node.getKeyFormationTraversalIndex()); - boolean sortOrderIsSame = node.getSortOrder() == childExpression.getSortOrder(); - if (orderPreserving == info.orderPreserving && sortOrderIsSame) { - return info; - } - return new Info( - info.pkPosition, - info.slotSpan, - orderPreserving, - sortOrderIsSame ? info.ascending : !info.ascending, - info.nullsLast); - } + @Override + public Iterator visitEnter(CoerceExpression node) { + return node.getChildren().iterator(); + } - @Override - public Iterator visitEnter(CoerceExpression node) { - return node.getChildren().iterator(); - } + @Override + public Info visitLeave(CoerceExpression node, List l) { + if (l.isEmpty()) { + return null; + } + return l.get(0); + } - @Override - public Info visitLeave(CoerceExpression node, List l) { - if (l.isEmpty()) { return null; } - return l.get(0); - } - - @Override - public Iterator visitEnter(RowValueConstructorExpression node) { - return node.getChildren().iterator(); - } + @Override + public Iterator visitEnter(RowValueConstructorExpression node) { + return node.getChildren().iterator(); + } - @Override - public Info visitLeave(RowValueConstructorExpression node, List l) { - // Child expression returned null and was filtered, so not order preserving - if (l.size() != node.getChildren().size()) { return null; } - Info firstInfo = l.get(0); - Info lastInfo = firstInfo; - // Check that pkPos are consecutive which is the only way a RVC can be order preserving - for (int i = 1; i < l.size(); i++) { - // not order preserving since it's not last - if (lastInfo.orderPreserving == OrderPreserving.YES_IF_LAST) { return null; } - Info info = l.get(i); - // not order preserving since there's a gap in the pk - if (info.pkPosition != lastInfo.pkPosition + 1) { - return null; - } - if(info.ascending != lastInfo.ascending) { - return null; - } - if(info.nullsLast != lastInfo.nullsLast) { - return null; - } - lastInfo = info; - } - return new Info(firstInfo.pkPosition, l.size(), lastInfo.orderPreserving, lastInfo.ascending, lastInfo.nullsLast); - } + @Override + public Info visitLeave(RowValueConstructorExpression node, List l) { + // Child expression returned null and was filtered, so not order preserving + if (l.size() != node.getChildren().size()) { + return null; + } + Info firstInfo = l.get(0); + Info lastInfo = firstInfo; + // Check that pkPos are consecutive which is the only way a RVC can be order preserving + for (int i = 1; i < l.size(); i++) { + // not order preserving since it's not last + if (lastInfo.orderPreserving == OrderPreserving.YES_IF_LAST) { + return null; + } + Info info = l.get(i); + // not order preserving since there's a gap in the pk + if (info.pkPosition != lastInfo.pkPosition + 1) { + return null; + } + if (info.ascending != lastInfo.ascending) { + return null; + } + if (info.nullsLast != lastInfo.nullsLast) { + return null; + } + lastInfo = info; + } + return new Info(firstInfo.pkPosition, l.size(), lastInfo.orderPreserving, lastInfo.ascending, + lastInfo.nullsLast); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java index e7f82757382..ddc98882025 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -50,323 +50,322 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.TransactionUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - /** - * - * Class that compiles plan to update data values after a DDL command - * executes. - * - * TODO: get rid of this ugly code and just go through the standard APIs. - * The only time we may still need this is to manage updating the empty - * key value, as we sometimes need to "go back through time" to adjust + * Class that compiles plan to update data values after a DDL command executes. TODO: get rid of + * this ugly code and just go through the standard APIs. The only time we may still need this is to + * manage updating the empty key value, as we sometimes need to "go back through time" to adjust * this. - * * @since 0.1 */ public class PostDDLCompiler { - private final PhoenixConnection connection; - private final Scan scan; - - public PostDDLCompiler(PhoenixConnection connection) { - this(connection, new Scan()); + private final PhoenixConnection connection; + private final Scan scan; + + public PostDDLCompiler(PhoenixConnection connection) { + this(connection, new Scan()); + } + + public PostDDLCompiler(PhoenixConnection connection, Scan scan) { + this.connection = connection; + this.scan = scan; + scan.setAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG, QueryConstants.TRUE); + } + + public MutationPlan compile(final List tableRefs, final byte[] emptyCF, + final List projectCFs, final List deleteList, final long timestamp) + throws SQLException { + PhoenixStatement statement = new PhoenixStatement(connection); + final StatementContext context = new StatementContext(statement, + new MultipleTableRefColumnResolver(tableRefs), scan, new SequenceManager(statement)); + return new PostDDLMutationPlan(context, tableRefs, timestamp, emptyCF, deleteList, projectCFs); + } + + private static class MultipleTableRefColumnResolver implements ColumnResolver { + + private final List tableRefs; + + public MultipleTableRefColumnResolver(List tableRefs) { + this.tableRefs = tableRefs; } - public PostDDLCompiler(PhoenixConnection connection, Scan scan) { - this.connection = connection; - this.scan = scan; - scan.setAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG, QueryConstants.TRUE); + @Override + public List getTables() { + return tableRefs; } - public MutationPlan compile(final List tableRefs, final byte[] emptyCF, final List projectCFs, final List deleteList, - final long timestamp) throws SQLException { - PhoenixStatement statement = new PhoenixStatement(connection); - final StatementContext context = new StatementContext( - statement, - new MultipleTableRefColumnResolver(tableRefs), - scan, - new SequenceManager(statement)); - return new PostDDLMutationPlan(context, tableRefs, timestamp, emptyCF, deleteList, projectCFs); + @Override + public TableRef resolveTable(String schemaName, String tableName) throws SQLException { + throw new UnsupportedOperationException(); } - private static class MultipleTableRefColumnResolver implements ColumnResolver { - - private final List tableRefs; - - public MultipleTableRefColumnResolver(List tableRefs) { - this.tableRefs = tableRefs; - } - - @Override - public List getTables() { - return tableRefs; - } - - @Override - public TableRef resolveTable(String schemaName, String tableName) throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public ColumnRef resolveColumn(String schemaName, String tableName, String colName) - throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public List getFunctions() { - return Collections.emptyList(); - } + @Override + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException { + throw new UnsupportedOperationException(); + } - @Override - public PFunction resolveFunction(String functionName) - throws SQLException { - throw new FunctionNotFoundException(functionName); - } + @Override + public List getFunctions() { + return Collections. emptyList(); + } - @Override - public boolean hasUDFs() { - return false; - } + @Override + public PFunction resolveFunction(String functionName) throws SQLException { + throw new FunctionNotFoundException(functionName); + } - @Override - public PSchema resolveSchema(String schemaName) throws SQLException { - throw new SchemaNotFoundException(schemaName); - } + @Override + public boolean hasUDFs() { + return false; + } - @Override - public List getSchemas() { - throw new UnsupportedOperationException(); - } + @Override + public PSchema resolveSchema(String schemaName) throws SQLException { + throw new SchemaNotFoundException(schemaName); + } + @Override + public List getSchemas() { + throw new UnsupportedOperationException(); } - private class PostDDLMutationPlan extends BaseMutationPlan { - - private final StatementContext context; - private final List tableRefs; - private final long timestamp; - private final byte[] emptyCF; - private final List deleteList; - private final List projectCFs; - - public PostDDLMutationPlan(StatementContext context, List tableRefs, long timestamp, byte[] emptyCF, List deleteList, List projectCFs) { - super(context, Operation.UPSERT); - this.context = context; - this.tableRefs = tableRefs; - this.timestamp = timestamp; - this.emptyCF = emptyCF; - this.deleteList = deleteList; - this.projectCFs = projectCFs; - } + } + + private class PostDDLMutationPlan extends BaseMutationPlan { + + private final StatementContext context; + private final List tableRefs; + private final long timestamp; + private final byte[] emptyCF; + private final List deleteList; + private final List projectCFs; + + public PostDDLMutationPlan(StatementContext context, List tableRefs, long timestamp, + byte[] emptyCF, List deleteList, List projectCFs) { + super(context, Operation.UPSERT); + this.context = context; + this.tableRefs = tableRefs; + this.timestamp = timestamp; + this.emptyCF = emptyCF; + this.deleteList = deleteList; + this.projectCFs = projectCFs; + } - @Override - public MutationState execute() throws SQLException { - if (tableRefs.isEmpty()) { - return new MutationState(0, 1000, connection); - } - boolean wasAutoCommit = connection.getAutoCommit(); - try { - connection.setAutoCommit(true); - SQLException sqlE = null; + @Override + public MutationState execute() throws SQLException { + if (tableRefs.isEmpty()) { + return new MutationState(0, 1000, connection); + } + boolean wasAutoCommit = connection.getAutoCommit(); + try { + connection.setAutoCommit(true); + SQLException sqlE = null; + /* + * Handles: 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows + * for a DROP INDEX; 2) deletion of all column values for a ALTER TABLE DROP COLUMN 3) + * updating the necessary rows to have an empty KV 4) updating table stats + */ + long totalMutationCount = 0; + for (final TableRef tableRef : tableRefs) { + Scan scan = ScanUtil.newScan(context.getScan()); + SelectStatement select = SelectStatement.COUNT_ONE; + // We need to use this tableRef + ColumnResolver resolver = new SingleTableRefColumnResolver(tableRef); + PhoenixStatement statement = new PhoenixStatement(connection); + StatementContext context = + new StatementContext(statement, resolver, scan, new SequenceManager(statement)); + long ts = timestamp; + // FIXME: DDL operations aren't transactional, so we're basing the timestamp on a server + // timestamp. + // Not sure what the fix should be. We don't need conflict detection nor filtering of + // invalid transactions + // in this case, so maybe this is ok. + if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) { + ts = TransactionUtil.convertToNanoseconds(ts); + } + ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts); + if (emptyCF != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_CF, emptyCF); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER, + EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst()); + } + ServerCache cache = null; + try { + if (deleteList != null) { + if (deleteList.isEmpty()) { + scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_AGG, + QueryConstants.TRUE); + // In the case of a row deletion, add index metadata so mutable secondary indexing + // works /* - * Handles: - * 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX; - * 2) deletion of all column values for a ALTER TABLE DROP COLUMN - * 3) updating the necessary rows to have an empty KV - * 4) updating table stats + * TODO: we currently manually run a scan to delete the index data here + * ImmutableBytesWritable ptr = context.getTempPtr(); + * tableRef.getTable().getIndexMaintainers(ptr); if (ptr.getLength() > 0) { + * IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, + * tableRef); cache = client.addIndexMetadataCache(context.getScanRanges(), ptr); + * byte[] uuidValue = cache.getId(); scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, + * uuidValue); } */ - long totalMutationCount = 0; - for (final TableRef tableRef : tableRefs) { - Scan scan = ScanUtil.newScan(context.getScan()); - SelectStatement select = SelectStatement.COUNT_ONE; - // We need to use this tableRef - ColumnResolver resolver = new SingleTableRefColumnResolver(tableRef); - PhoenixStatement statement = new PhoenixStatement(connection); - StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement)); - long ts = timestamp; - // FIXME: DDL operations aren't transactional, so we're basing the timestamp on a server timestamp. - // Not sure what the fix should be. We don't need conflict detection nor filtering of invalid transactions - // in this case, so maybe this is ok. - if (ts!= HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) { - ts = TransactionUtil.convertToNanoseconds(ts); - } - ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts); - if (emptyCF != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_CF, emptyCF); - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst()); - } - ServerCache cache = null; - try { - if (deleteList != null) { - if (deleteList.isEmpty()) { - scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_AGG, QueryConstants.TRUE); - // In the case of a row deletion, add index metadata so mutable secondary indexing works - /* TODO: we currently manually run a scan to delete the index data here - ImmutableBytesWritable ptr = context.getTempPtr(); - tableRef.getTable().getIndexMaintainers(ptr); - if (ptr.getLength() > 0) { - IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef); - cache = client.addIndexMetadataCache(context.getScanRanges(), ptr); - byte[] uuidValue = cache.getId(); - scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); - } - */ - } else { - // In the case of the empty key value column family changing, do not send the index - // metadata, as we're currently managing this from the client. It's possible for the - // data empty column family to stay the same, while the index empty column family - // changes. - PColumn column = deleteList.get(0); - byte[] cq = column.getColumnQualifierBytes(); - if (emptyCF == null) { - scan.addColumn(column.getFamilyName().getBytes(), cq); - } - scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_CF, column.getFamilyName().getBytes()); - scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_CQ, cq); - } - } - List columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size()); - if (projectCFs == null) { - for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) { - columnFamilies.add(family.getName().getBytes()); - } - } else { - for (byte[] projectCF : projectCFs) { - columnFamilies.add(projectCF); - } - } - // Need to project all column families into the scan, since we haven't yet created our empty key value - RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY); - context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY); - // Explicitly project these column families and don't project the empty key value, - // since at this point we haven't added the empty key value everywhere. - if (columnFamilies != null) { - scan.getFamilyMap().clear(); - for (byte[] family : columnFamilies) { - scan.addFamily(family); - } - projector = new RowProjector(projector,false); - } - // Ignore exceptions due to not being able to resolve any view columns, - // as this just means the view is invalid. Continue on and try to perform - // any other Post DDL operations. - try { - // Since dropping a VIEW does not affect the underlying data, we do - // not need to pass through the view statement here. - WhereCompiler.compile(context, select); // Push where clause into scan - } catch (ColumnFamilyNotFoundException e) { - continue; - } catch (ColumnNotFoundException e) { - continue; - } catch (AmbiguousColumnException e) { - continue; - } - QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, - OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, null); - try { - ResultIterator iterator = plan.iterator(); - try { - Tuple row = iterator.next(); - ImmutableBytesWritable ptr = context.getTempPtr(); - totalMutationCount += (Long)projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr); - } catch (SQLException e) { - sqlE = e; - } finally { - try { - iterator.close(); - } catch (SQLException e) { - if (sqlE == null) { - sqlE = e; - } else { - sqlE.setNextException(e); - } - } finally { - if (sqlE != null) { - throw sqlE; - } - } - } - } catch (TableNotFoundException e) { - // Ignore and continue, as HBase throws when table hasn't been written to - // FIXME: Remove if this is fixed in 0.96 - } - } finally { - if (cache != null) { // Remove server cache if there is one - cache.close(); - } - } - + } else { + // In the case of the empty key value column family changing, do not send the index + // metadata, as we're currently managing this from the client. It's possible for the + // data empty column family to stay the same, while the index empty column family + // changes. + PColumn column = deleteList.get(0); + byte[] cq = column.getColumnQualifierBytes(); + if (emptyCF == null) { + scan.addColumn(column.getFamilyName().getBytes(), cq); } - final long count = totalMutationCount; - return new MutationState(1, 1000, connection) { - @Override - public long getUpdateCount() { - return count; - } - }; - } finally { - if (!wasAutoCommit) connection.setAutoCommit(wasAutoCommit); + scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_CF, + column.getFamilyName().getBytes()); + scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_CQ, cq); + } } - } - - private class SingleTableRefColumnResolver implements ColumnResolver { - private final TableRef tableRef; - - public SingleTableRefColumnResolver(TableRef tableRef) { - this.tableRef = tableRef; - } - - @Override - public List getTables() { - return Collections.singletonList(tableRef); + List columnFamilies = + Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size()); + if (projectCFs == null) { + for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) { + columnFamilies.add(family.getName().getBytes()); + } + } else { + for (byte[] projectCF : projectCFs) { + columnFamilies.add(projectCF); + } } - - @Override - public List getFunctions() { - return Collections.emptyList(); + // Need to project all column families into the scan, since we haven't yet created our + // empty key value + RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, + GroupBy.EMPTY_GROUP_BY); + context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY); + // Explicitly project these column families and don't project the empty key value, + // since at this point we haven't added the empty key value everywhere. + if (columnFamilies != null) { + scan.getFamilyMap().clear(); + for (byte[] family : columnFamilies) { + scan.addFamily(family); + } + projector = new RowProjector(projector, false); } - - ; - - @Override - public TableRef resolveTable(String schemaName, String tableName) - throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException { - PColumn column = tableName != null - ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) - : tableRef.getTable().getColumnForColumnName(colName); - return new ColumnRef(tableRef, column.getPosition()); - } - - @Override - public PFunction resolveFunction(String functionName) throws SQLException { - throw new UnsupportedOperationException(); + // Ignore exceptions due to not being able to resolve any view columns, + // as this just means the view is invalid. Continue on and try to perform + // any other Post DDL operations. + try { + // Since dropping a VIEW does not affect the underlying data, we do + // not need to pass through the view statement here. + WhereCompiler.compile(context, select); // Push where clause into scan + } catch (ColumnFamilyNotFoundException e) { + continue; + } catch (ColumnNotFoundException e) { + continue; + } catch (AmbiguousColumnException e) { + continue; } - - @Override - public boolean hasUDFs() { - return false; + QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, + OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, null); + try { + ResultIterator iterator = plan.iterator(); + try { + Tuple row = iterator.next(); + ImmutableBytesWritable ptr = context.getTempPtr(); + totalMutationCount += + (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr); + } catch (SQLException e) { + sqlE = e; + } finally { + try { + iterator.close(); + } catch (SQLException e) { + if (sqlE == null) { + sqlE = e; + } else { + sqlE.setNextException(e); + } + } finally { + if (sqlE != null) { + throw sqlE; + } + } + } + } catch (TableNotFoundException e) { + // Ignore and continue, as HBase throws when table hasn't been written to + // FIXME: Remove if this is fixed in 0.96 } - - @Override - public List getSchemas() { - throw new UnsupportedOperationException(); + } finally { + if (cache != null) { // Remove server cache if there is one + cache.close(); } + } - @Override - public PSchema resolveSchema(String schemaName) throws SQLException { - throw new SchemaNotFoundException(schemaName); - } } + final long count = totalMutationCount; + return new MutationState(1, 1000, connection) { + @Override + public long getUpdateCount() { + return count; + } + }; + } finally { + if (!wasAutoCommit) connection.setAutoCommit(wasAutoCommit); + } + } + + private class SingleTableRefColumnResolver implements ColumnResolver { + private final TableRef tableRef; + + public SingleTableRefColumnResolver(TableRef tableRef) { + this.tableRef = tableRef; + } + + @Override + public List getTables() { + return Collections.singletonList(tableRef); + } + + @Override + public List getFunctions() { + return Collections.emptyList(); + } + + ; + + @Override + public TableRef resolveTable(String schemaName, String tableName) throws SQLException { + throw new UnsupportedOperationException(); + } + + @Override + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException { + PColumn column = tableName != null + ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) + : tableRef.getTable().getColumnForColumnName(colName); + return new ColumnRef(tableRef, column.getPosition()); + } + + @Override + public PFunction resolveFunction(String functionName) throws SQLException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasUDFs() { + return false; + } + + @Override + public List getSchemas() { + throw new UnsupportedOperationException(); + } + + @Override + public PSchema resolveSchema(String schemaName) throws SQLException { + throw new SchemaNotFoundException(schemaName); + } } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java index ec6fcf3d716..02091108699 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.compile; +import static org.apache.phoenix.util.IndexUtil.INDEX_COLUMN_NAME_SEP; + import java.sql.SQLException; import java.util.List; @@ -27,125 +29,127 @@ import org.apache.phoenix.schema.PColumnFamily; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.StringUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - -import static org.apache.phoenix.util.IndexUtil.INDEX_COLUMN_NAME_SEP; - - /** - * Class that compiles plan to generate initial data values after a DDL command for - * index table. + * Class that compiles plan to generate initial data values after a DDL command for index table. */ public class PostIndexDDLCompiler { - private final PhoenixConnection connection; - private final TableRef dataTableRef; - private List indexColumnNames; - private List dataColumnNames; - private String selectQuery; - private boolean forTransform = false; - - public PostIndexDDLCompiler(PhoenixConnection connection, TableRef dataTableRef) { - this.connection = connection; - this.dataTableRef = dataTableRef; - indexColumnNames = Lists.newArrayList(); - dataColumnNames = Lists.newArrayList(); - } + private final PhoenixConnection connection; + private final TableRef dataTableRef; + private List indexColumnNames; + private List dataColumnNames; + private String selectQuery; + private boolean forTransform = false; + + public PostIndexDDLCompiler(PhoenixConnection connection, TableRef dataTableRef) { + this.connection = connection; + this.dataTableRef = dataTableRef; + indexColumnNames = Lists.newArrayList(); + dataColumnNames = Lists.newArrayList(); + } - public PostIndexDDLCompiler(PhoenixConnection connection, TableRef dataTableRef, boolean forTransform) { - this(connection, dataTableRef); - this.forTransform = forTransform; + public PostIndexDDLCompiler(PhoenixConnection connection, TableRef dataTableRef, + boolean forTransform) { + this(connection, dataTableRef); + this.forTransform = forTransform; + } + + public MutationPlan compile(final PTable indexTable) throws SQLException { + /* + * Compiles an UPSERT SELECT command to read from the data table and populate the index table + */ + StringBuilder indexColumns = new StringBuilder(); + StringBuilder dataColumns = new StringBuilder(); + + // Add the pk index columns + List indexPKColumns = indexTable.getPKColumns(); + int nIndexPKColumns = indexTable.getPKColumns().size(); + boolean isSalted = indexTable.getBucketNum() != null; + boolean isMultiTenant = connection.getTenantId() != null && indexTable.isMultiTenant(); + boolean isViewIndex = indexTable.getViewIndexId() != null; + int posOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isViewIndex ? 1 : 0); + for (int i = posOffset; i < nIndexPKColumns; i++) { + PColumn col = indexPKColumns.get(i); + String indexColName = col.getName().getString(); + // need to escape backslash as this used in the SELECT statement + String dataColName = col.getExpressionStr() == null + ? col.getName().getString() + : StringUtil.escapeBackslash(col.getExpressionStr()); + dataColumns.append(dataColName).append(","); + indexColumns.append('"').append(indexColName).append("\","); + indexColumnNames.add(indexColName); + dataColumnNames.add(dataColName); } - public MutationPlan compile(final PTable indexTable) throws SQLException { - /* - * Compiles an UPSERT SELECT command to read from the data table and populate the index table - */ - StringBuilder indexColumns = new StringBuilder(); - StringBuilder dataColumns = new StringBuilder(); - - // Add the pk index columns - List indexPKColumns = indexTable.getPKColumns(); - int nIndexPKColumns = indexTable.getPKColumns().size(); - boolean isSalted = indexTable.getBucketNum() != null; - boolean isMultiTenant = connection.getTenantId() != null && indexTable.isMultiTenant(); - boolean isViewIndex = indexTable.getViewIndexId()!=null; - int posOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isViewIndex ? 1 : 0); - for (int i = posOffset; i < nIndexPKColumns; i++) { - PColumn col = indexPKColumns.get(i); - String indexColName = col.getName().getString(); - // need to escape backslash as this used in the SELECT statement - String dataColName = col.getExpressionStr() == null ? col.getName().getString() - : StringUtil.escapeBackslash(col.getExpressionStr()); - dataColumns.append(dataColName).append(","); - indexColumns.append('"').append(indexColName).append("\","); - indexColumnNames.add(indexColName); - dataColumnNames.add(dataColName); - } - - // Add the covered columns - for (PColumnFamily family : indexTable.getColumnFamilies()) { - for (PColumn col : family.getColumns()) { - if (col.getViewConstant() == null) { - String indexColName = col.getName().getString(); - // Transforming tables also behave like indexes but they don't have index_col_name_sep. So we use family name directly. - String dataFamilyName = indexColName.indexOf(INDEX_COLUMN_NAME_SEP)!=-1 ? IndexUtil.getDataColumnFamilyName(indexColName) : - col.getFamilyName().getString(); - String dataColumnName = IndexUtil.getDataColumnName(indexColName); - if (!dataFamilyName.equals("")) { - dataColumns.append('"').append(dataFamilyName).append("\"."); - if (forTransform) { - // transforming table columns have the same family name - indexColumns.append('"').append(dataFamilyName).append("\"."); - } - } - dataColumns.append('"').append(dataColumnName).append("\","); - indexColumns.append('"').append(indexColName).append("\","); - indexColumnNames.add(indexColName); - dataColumnNames.add(dataColumnName); - } + // Add the covered columns + for (PColumnFamily family : indexTable.getColumnFamilies()) { + for (PColumn col : family.getColumns()) { + if (col.getViewConstant() == null) { + String indexColName = col.getName().getString(); + // Transforming tables also behave like indexes but they don't have index_col_name_sep. So + // we use family name directly. + String dataFamilyName = indexColName.indexOf(INDEX_COLUMN_NAME_SEP) != -1 + ? IndexUtil.getDataColumnFamilyName(indexColName) + : col.getFamilyName().getString(); + String dataColumnName = IndexUtil.getDataColumnName(indexColName); + if (!dataFamilyName.equals("")) { + dataColumns.append('"').append(dataFamilyName).append("\"."); + if (forTransform) { + // transforming table columns have the same family name + indexColumns.append('"').append(dataFamilyName).append("\"."); } + } + dataColumns.append('"').append(dataColumnName).append("\","); + indexColumns.append('"').append(indexColName).append("\","); + indexColumnNames.add(indexColName); + dataColumnNames.add(dataColumnName); } - - final PTable dataTable = dataTableRef.getTable(); - dataColumns.setLength(dataColumns.length()-1); - indexColumns.setLength(indexColumns.length()-1); - String schemaName = dataTable.getSchemaName().getString(); - String tableName = indexTable.getTableName().getString(); - - StringBuilder updateStmtStr = new StringBuilder(); - updateStmtStr.append("UPSERT /*+ NO_INDEX */ INTO ").append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"').append(tableName).append("\"(") - .append(indexColumns).append(") "); - final StringBuilder selectQueryBuilder = new StringBuilder(); - selectQueryBuilder.append(" SELECT /*+ NO_INDEX */ ").append(dataColumns).append(" FROM ") - .append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"').append(dataTable.getTableName().getString()).append('"'); - this.selectQuery = selectQueryBuilder.toString(); - updateStmtStr.append(this.selectQuery); - - try (final PhoenixStatement statement = new PhoenixStatement(connection)) { - DelegateMutationPlan delegate = new DelegateMutationPlan(statement.compileMutation(updateStmtStr.toString())) { - @Override - public MutationState execute() throws SQLException { - connection.getMutationState().commitDDLFence(dataTable); - return super.execute(); - } - }; - return delegate; - } + } } - public List getIndexColumnNames() { - return indexColumnNames; - } + final PTable dataTable = dataTableRef.getTable(); + dataColumns.setLength(dataColumns.length() - 1); + indexColumns.setLength(indexColumns.length() - 1); + String schemaName = dataTable.getSchemaName().getString(); + String tableName = indexTable.getTableName().getString(); - public List getDataColumnNames() { - return dataColumnNames; - } + StringBuilder updateStmtStr = new StringBuilder(); + updateStmtStr.append("UPSERT /*+ NO_INDEX */ INTO ") + .append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"') + .append(tableName).append("\"(").append(indexColumns).append(") "); + final StringBuilder selectQueryBuilder = new StringBuilder(); + selectQueryBuilder.append(" SELECT /*+ NO_INDEX */ ").append(dataColumns).append(" FROM ") + .append(schemaName.length() == 0 ? "" : '"' + schemaName + "\".").append('"') + .append(dataTable.getTableName().getString()).append('"'); + this.selectQuery = selectQueryBuilder.toString(); + updateStmtStr.append(this.selectQuery); - public String getSelectQuery() { - return selectQuery; + try (final PhoenixStatement statement = new PhoenixStatement(connection)) { + DelegateMutationPlan delegate = + new DelegateMutationPlan(statement.compileMutation(updateStmtStr.toString())) { + @Override + public MutationState execute() throws SQLException { + connection.getMutationState().commitDDLFence(dataTable); + return super.execute(); + } + }; + return delegate; } + } + + public List getIndexColumnNames() { + return indexColumnNames; + } + + public List getDataColumnNames() { + return dataColumnNames; + } + + public String getSelectQuery() { + return selectQuery; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java index 01cf620c8d5..bab39f2ef86 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,66 +36,70 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PLong; -import org.apache.phoenix.util.ByteUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.ByteUtil; /** - * For local indexes, we optimize the initial index population by *not* sending - * Puts over the wire for the index rows, as we don't need to do that. Instead, - * we tap into our region observer to generate the index rows based on the data - * rows as we scan + * For local indexes, we optimize the initial index population by *not* sending Puts over the wire + * for the index rows, as we don't need to do that. Instead, we tap into our region observer to + * generate the index rows based on the data rows as we scan */ public class PostLocalIndexDDLCompiler { - private final PhoenixConnection connection; - private final String tableName; - - public PostLocalIndexDDLCompiler(PhoenixConnection connection, String tableName) { - this.connection = connection; - this.tableName = tableName; - } + private final PhoenixConnection connection; + private final String tableName; - public MutationPlan compile(PTable index) throws SQLException { - try (final PhoenixStatement statement = new PhoenixStatement(connection)) { - String query = "SELECT count(*) FROM " + tableName; - final QueryPlan plan = statement.compileQuery(query); - TableRef tableRef = plan.getTableRef(); - Scan scan = plan.getContext().getScan(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - final PTable dataTable = tableRef.getTable(); - List indexes = Lists.newArrayListWithExpectedSize(1); - for (PTable indexTable : dataTable.getIndexes()) { - if (indexTable.getKey().equals(index.getKey())) { - index = indexTable; - break; - } - } - // Only build newly created index. - indexes.add(index); - IndexMaintainer.serialize(dataTable, ptr, indexes, plan.getContext().getConnection()); - // Set attribute on scan that UngroupedAggregateRegionObserver will switch on. - // We'll detect that this attribute was set the server-side and write the index - // rows per region as a result. The value of the attribute will be our persisted - // index maintainers. - // Define the LOCAL_INDEX_BUILD as a new static in BaseScannerRegionObserver - scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr)); - // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*). - // However, in this case, we need to project all of the data columns that contribute to the index. - IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection); - for (ColumnReference columnRef : indexMaintainer.getAllColumns()) { - if (index.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { - scan.addFamily(columnRef.getFamily()); - } else { - scan.addColumn(columnRef.getFamily(), columnRef.getQualifier()); - } - } - if (dataTable.isTransactional()) { - scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, connection.getMutationState().encodeTransaction()); - } + public PostLocalIndexDDLCompiler(PhoenixConnection connection, String tableName) { + this.connection = connection; + this.tableName = tableName; + } - // Go through MutationPlan abstraction so that we can create local indexes - // with a connectionless connection (which makes testing easier). - return new PostLocalIndexDDLMutationPlan(plan, dataTable); + public MutationPlan compile(PTable index) throws SQLException { + try (final PhoenixStatement statement = new PhoenixStatement(connection)) { + String query = "SELECT count(*) FROM " + tableName; + final QueryPlan plan = statement.compileQuery(query); + TableRef tableRef = plan.getTableRef(); + Scan scan = plan.getContext().getScan(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + final PTable dataTable = tableRef.getTable(); + List indexes = Lists.newArrayListWithExpectedSize(1); + for (PTable indexTable : dataTable.getIndexes()) { + if (indexTable.getKey().equals(index.getKey())) { + index = indexTable; + break; + } + } + // Only build newly created index. + indexes.add(index); + IndexMaintainer.serialize(dataTable, ptr, indexes, plan.getContext().getConnection()); + // Set attribute on scan that UngroupedAggregateRegionObserver will switch on. + // We'll detect that this attribute was set the server-side and write the index + // rows per region as a result. The value of the attribute will be our persisted + // index maintainers. + // Define the LOCAL_INDEX_BUILD as a new static in BaseScannerRegionObserver + scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_BUILD_PROTO, + ByteUtil.copyKeyBytesIfNecessary(ptr)); + // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for + // count(*). + // However, in this case, we need to project all of the data columns that contribute to the + // index. + IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection); + for (ColumnReference columnRef : indexMaintainer.getAllColumns()) { + if ( + index.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS + ) { + scan.addFamily(columnRef.getFamily()); + } else { + scan.addColumn(columnRef.getFamily(), columnRef.getQualifier()); + } + } + if (dataTable.isTransactional()) { + scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, + connection.getMutationState().encodeTransaction()); + } + + // Go through MutationPlan abstraction so that we can create local indexes + // with a connectionless connection (which makes testing easier). + return new PostLocalIndexDDLMutationPlan(plan, dataTable); } } @@ -105,27 +109,27 @@ private class PostLocalIndexDDLMutationPlan extends BaseMutationPlan { private final PTable dataTable; private PostLocalIndexDDLMutationPlan(QueryPlan plan, PTable dataTable) { - super(plan.getContext(), Operation.UPSERT); - this.plan = plan; - this.dataTable = dataTable; + super(plan.getContext(), Operation.UPSERT); + this.plan = plan; + this.dataTable = dataTable; } @Override public MutationState execute() throws SQLException { - connection.getMutationState().commitDDLFence(dataTable); - Tuple tuple = plan.iterator().next(); - long rowCount = 0; - if (tuple != null) { - Cell kv = tuple.getValue(0); - ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), - kv.getValueOffset(), kv.getValueLength()); - // A single Cell will be returned with the count(*) - we decode that here - rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); - } - // The contract is to return a MutationState that contains the number of rows modified. - // In this case, it's the number of rows in the data table which corresponds to the - // number of index rows that were added. - return new MutationState(0, 0, connection, rowCount); + connection.getMutationState().commitDDLFence(dataTable); + Tuple tuple = plan.iterator().next(); + long rowCount = 0; + if (tuple != null) { + Cell kv = tuple.getValue(0); + ImmutableBytesWritable tmpPtr = + new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + // A single Cell will be returned with the count(*) - we decode that here + rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); + } + // The contract is to return a MutationState that contains the number of rows modified. + // In this case, it's the number of rows in the data table which corresponds to the + // number of index rows that were added. + return new MutationState(0, 0, connection, rowCount); } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java index 96906389326..1f235d76670 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -87,792 +87,849 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.ValueBitSet; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PBson; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PJson; -import org.apache.phoenix.schema.types.PBson; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.SizedUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - /** - * - * Class that iterates through expressions in SELECT clause and adds projected - * columns to scan. - * - * + * Class that iterates through expressions in SELECT clause and adds projected columns to scan. * @since 0.1 */ public class ProjectionCompiler { - private static final Expression NULL_EXPRESSION = LiteralExpression.newConstant(null); - private ProjectionCompiler() { + private static final Expression NULL_EXPRESSION = LiteralExpression.newConstant(null); + + private ProjectionCompiler() { + } + + private static void projectColumnFamily(PTable table, Scan scan, byte[] family) { + // Will project all colmuns for given CF + scan.addFamily(family); + } + + public static RowProjector compile(StatementContext context, SelectStatement statement, + GroupBy groupBy) throws SQLException { + boolean wildcardIncludesDynamicCols = + context.getConnection().getQueryServices().getConfiguration() + .getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); + return compile(context, statement, groupBy, Collections. emptyList(), + // Pass null expression because we don't want empty key value to be projected + NULL_EXPRESSION, wildcardIncludesDynamicCols); + } + + private static int getMinPKOffset(PTable table, PName tenantId) { + // In SELECT *, don't include tenant column or index ID column for tenant connection + int posOffset = table.getBucketNum() == null ? 0 : 1; + if (table.isMultiTenant() && tenantId != null) { + posOffset++; } - - private static void projectColumnFamily(PTable table, Scan scan, byte[] family) { - // Will project all colmuns for given CF - scan.addFamily(family); + if (table.getViewIndexId() != null) { + posOffset++; } - - public static RowProjector compile(StatementContext context, SelectStatement statement, GroupBy groupBy) throws SQLException { - boolean wildcardIncludesDynamicCols = context.getConnection().getQueryServices() - .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, - DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); - return compile(context, statement, groupBy, Collections.emptyList(), - // Pass null expression because we don't want empty key value to be projected - NULL_EXPRESSION, - wildcardIncludesDynamicCols); - } - - private static int getMinPKOffset(PTable table, PName tenantId) { - // In SELECT *, don't include tenant column or index ID column for tenant connection - int posOffset = table.getBucketNum() == null ? 0 : 1; - if (table.isMultiTenant() && tenantId != null) { - posOffset++; - } - if (table.getViewIndexId() != null) { - posOffset++; + return posOffset; + } + + private static void projectAllTableColumns(StatementContext context, TableRef tableRef, + boolean resolveColumn, List projectedExpressions, + List projectedColumns, List targetColumns) + throws SQLException { + ColumnResolver resolver = context.getResolver(); + PTable table = tableRef.getTable(); + int projectedOffset = projectedExpressions.size(); + int posOffset = table.getBucketNum() == null ? 0 : 1; + int minPKOffset = getMinPKOffset(table, context.getConnection().getTenantId()); + for (int i = posOffset, j = posOffset; i < table.getColumns().size(); i++) { + PColumn column = table.getColumns().get(i); + // Skip tenant ID column (which may not be the first column, but is the first PK column) + if (SchemaUtil.isPKColumn(column) && j++ < minPKOffset) { + posOffset++; + continue; + } + ColumnRef ref = new ColumnRef(tableRef, i); + String colName = ref.getColumn().getName().getString(); + String tableAlias = tableRef.getTableAlias(); + if (resolveColumn) { + try { + if (tableAlias != null) { + ref = resolver.resolveColumn(null, tableAlias, colName); + } else { + String schemaName = table.getSchemaName().getString(); + ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, + table.getTableName().getString(), colName); + } + // The freshly revolved column's family better be the same as the original one. + // If not, trigger the disambiguation logic. Also see + // PTableImpl.getColumnForColumnName(...) + if ( + column.getFamilyName() != null + && !column.getFamilyName().equals(ref.getColumn().getFamilyName()) + ) { + throw new AmbiguousColumnException(); + } + } catch (AmbiguousColumnException e) { + if (column.getFamilyName() != null) { + ref = resolver.resolveColumn( + tableAlias != null ? tableAlias : table.getTableName().getString(), + column.getFamilyName().getString(), colName); + } else { + throw e; + } } - return posOffset; + } + Expression expression = ref.newColumnExpression(); + expression = coerceIfNecessary(i - posOffset + projectedOffset, targetColumns, expression); + ImmutableBytesWritable ptr = context.getTempPtr(); + if (IndexUtil.getViewConstantValue(column, ptr)) { + expression = + LiteralExpression.newConstant(column.getDataType().toObject(ptr, column.getSortOrder()), + expression.getDataType(), column.getSortOrder()); + } + projectedExpressions.add(expression); + boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName); + projectedColumns.add(new ExpressionProjector(colName, colName, + tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), + expression, isCaseSensitive)); } - - private static void projectAllTableColumns(StatementContext context, TableRef tableRef, boolean resolveColumn, List projectedExpressions, List projectedColumns, List targetColumns) throws SQLException { - ColumnResolver resolver = context.getResolver(); - PTable table = tableRef.getTable(); - int projectedOffset = projectedExpressions.size(); - int posOffset = table.getBucketNum() == null ? 0 : 1; - int minPKOffset = getMinPKOffset(table, context.getConnection().getTenantId()); - for (int i = posOffset, j = posOffset; i < table.getColumns().size(); i++) { - PColumn column = table.getColumns().get(i); - // Skip tenant ID column (which may not be the first column, but is the first PK column) - if (SchemaUtil.isPKColumn(column) && j++ < minPKOffset) { - posOffset++; - continue; - } - ColumnRef ref = new ColumnRef(tableRef,i); - String colName = ref.getColumn().getName().getString(); - String tableAlias = tableRef.getTableAlias(); - if (resolveColumn) { - try { - if (tableAlias != null) { - ref = resolver.resolveColumn(null, tableAlias, colName); - } else { - String schemaName = table.getSchemaName().getString(); - ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, table.getTableName().getString(), colName); - } - // The freshly revolved column's family better be the same as the original one. - // If not, trigger the disambiguation logic. Also see PTableImpl.getColumnForColumnName(...) - if (column.getFamilyName() != null && !column.getFamilyName().equals(ref.getColumn().getFamilyName())) { - throw new AmbiguousColumnException(); - } - } catch (AmbiguousColumnException e) { - if (column.getFamilyName() != null) { - ref = resolver.resolveColumn(tableAlias != null ? tableAlias : table.getTableName().getString(), column.getFamilyName().getString(), colName); - } else { - throw e; - } - } - } - Expression expression = ref.newColumnExpression(); - expression = coerceIfNecessary(i-posOffset+projectedOffset, targetColumns, expression); - ImmutableBytesWritable ptr = context.getTempPtr(); - if (IndexUtil.getViewConstantValue(column, ptr)) { - expression = LiteralExpression.newConstant( - column.getDataType().toObject(ptr, column.getSortOrder()), - expression.getDataType(), - column.getSortOrder()); - } - projectedExpressions.add(expression); - boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName); - projectedColumns.add(new ExpressionProjector(colName, colName, tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive)); - } + } + + private static void projectAllIndexColumns(StatementContext context, TableRef tableRef, + boolean resolveColumn, List projectedExpressions, + List projectedColumns, List targetColumns) + throws SQLException { + ColumnResolver resolver = context.getResolver(); + PTable index = tableRef.getTable(); + int projectedOffset = projectedExpressions.size(); + PhoenixConnection conn = context.getConnection(); + PName tenantId = conn.getTenantId(); + String dataTableName = index.getParentName().getString(); + PTable dataTable = conn.getTable(dataTableName); + int tableOffset = dataTable.getBucketNum() == null ? 0 : 1; + int minTablePKOffset = getMinPKOffset(dataTable, tenantId); + int minIndexPKOffset = getMinPKOffset(index, tenantId); + if (!IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { + if ( + index.getColumns().size() - minIndexPKOffset + != dataTable.getColumns().size() - minTablePKOffset + ) { + // We'll end up not using this by the optimizer, so just throw + String schemaNameStr = + dataTable.getSchemaName() == null ? null : dataTable.getSchemaName().getString(); + String tableNameStr = + dataTable.getTableName() == null ? null : dataTable.getTableName().getString(); + throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, + WildcardParseNode.INSTANCE.toString()); + } } - - private static void projectAllIndexColumns(StatementContext context, TableRef tableRef, boolean resolveColumn, List projectedExpressions, List projectedColumns, List targetColumns) throws SQLException { - ColumnResolver resolver = context.getResolver(); - PTable index = tableRef.getTable(); - int projectedOffset = projectedExpressions.size(); - PhoenixConnection conn = context.getConnection(); - PName tenantId = conn.getTenantId(); - String dataTableName = index.getParentName().getString(); - PTable dataTable = conn.getTable(dataTableName); - int tableOffset = dataTable.getBucketNum() == null ? 0 : 1; - int minTablePKOffset = getMinPKOffset(dataTable, tenantId); - int minIndexPKOffset = getMinPKOffset(index, tenantId); - if (!IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { - if (index.getColumns().size()-minIndexPKOffset != dataTable.getColumns().size()-minTablePKOffset) { - // We'll end up not using this by the optimizer, so just throw - String schemaNameStr = dataTable.getSchemaName()==null?null:dataTable.getSchemaName().getString(); - String tableNameStr = dataTable.getTableName()==null?null:dataTable.getTableName().getString(); - throw new ColumnNotFoundException(schemaNameStr, tableNameStr,null, WildcardParseNode.INSTANCE.toString()); - } + // At this point, the index table is either fully covered, or we are projecting uncovered + // columns + // The easy thing would be to just call projectAllTableColumns on the projected table, + // but its columns are not in the same order as the data column, so we have to map them to + // the data column order + TableRef projectedTableRef = + new TableRef(resolver.getTables().get(0), tableRef.getTableAlias()); + for (int i = tableOffset, j = tableOffset; i < dataTable.getColumns().size(); i++) { + PColumn column = dataTable.getColumns().get(i); + // Skip tenant ID column (which may not be the first column, but is the first PK column) + if (SchemaUtil.isPKColumn(column) && j++ < minTablePKOffset) { + tableOffset++; + continue; + } + PColumn dataTableColumn = dataTable.getColumns().get(i); + String indexColName = IndexUtil.getIndexColumnName(dataTableColumn); + PColumn indexColumn = null; + ColumnRef ref = null; + try { + indexColumn = index.getColumnForColumnName(indexColName); + // TODO: Should we do this more efficiently than catching the exception ? + } catch (ColumnNotFoundException e) { + if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { + // Projected columns have the same name as in the data table + String familyName = dataTableColumn.getFamilyName() == null + ? null + : dataTableColumn.getFamilyName().getString(); + ref = resolver.resolveColumn(familyName, + tableRef.getTableAlias() == null + ? tableRef.getTable().getName().getString() + : tableRef.getTableAlias(), + indexColName); + indexColumn = ref.getColumn(); + } else { + throw e; } - // At this point, the index table is either fully covered, or we are projecting uncovered - // columns - // The easy thing would be to just call projectAllTableColumns on the projected table, - // but its columns are not in the same order as the data column, so we have to map them to - // the data column order - TableRef projectedTableRef = - new TableRef(resolver.getTables().get(0), tableRef.getTableAlias()); - for (int i = tableOffset, j = tableOffset; i < dataTable.getColumns().size(); i++) { - PColumn column = dataTable.getColumns().get(i); - // Skip tenant ID column (which may not be the first column, but is the first PK column) - if (SchemaUtil.isPKColumn(column) && j++ < minTablePKOffset) { - tableOffset++; - continue; - } - PColumn dataTableColumn = dataTable.getColumns().get(i); - String indexColName = IndexUtil.getIndexColumnName(dataTableColumn); - PColumn indexColumn = null; - ColumnRef ref = null; - try { - indexColumn = index.getColumnForColumnName(indexColName); - // TODO: Should we do this more efficiently than catching the exception ? - } catch (ColumnNotFoundException e) { - if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { - //Projected columns have the same name as in the data table - String familyName = - dataTableColumn.getFamilyName() == null ? null - : dataTableColumn.getFamilyName().getString(); - ref = - resolver.resolveColumn(familyName, - tableRef.getTableAlias() == null - ? tableRef.getTable().getName().getString() - : tableRef.getTableAlias(), - indexColName); - indexColumn = ref.getColumn(); - } else { - throw e; - } - } - ref = new ColumnRef(projectedTableRef, indexColumn.getPosition()); - String colName = dataTableColumn.getName().getString(); - String tableAlias = tableRef.getTableAlias(); - if (resolveColumn) { - try { - if (tableAlias != null) { - ref = resolver.resolveColumn(null, tableAlias, indexColName); - } else { - String schemaName = index.getSchemaName().getString(); - ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, index.getTableName().getString(), indexColName); - } - } catch (AmbiguousColumnException e) { - if (indexColumn.getFamilyName() != null) { - ref = resolver.resolveColumn(tableAlias != null ? tableAlias : index.getTableName().getString(), indexColumn.getFamilyName().getString(), indexColName); - } else { - throw e; - } - } - } - Expression expression = ref.newColumnExpression(); - expression = coerceIfNecessary(i-tableOffset+projectedOffset, targetColumns, expression); - // We do not need to check if the column is a viewConstant, because view constants never - // appear as a column in an index - projectedExpressions.add(expression); - boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName); - ExpressionProjector projector = new ExpressionProjector(colName, colName, tableRef.getTableAlias() == null ? dataTable.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive); - projectedColumns.add(projector); + } + ref = new ColumnRef(projectedTableRef, indexColumn.getPosition()); + String colName = dataTableColumn.getName().getString(); + String tableAlias = tableRef.getTableAlias(); + if (resolveColumn) { + try { + if (tableAlias != null) { + ref = resolver.resolveColumn(null, tableAlias, indexColName); + } else { + String schemaName = index.getSchemaName().getString(); + ref = resolver.resolveColumn(schemaName.length() == 0 ? null : schemaName, + index.getTableName().getString(), indexColName); + } + } catch (AmbiguousColumnException e) { + if (indexColumn.getFamilyName() != null) { + ref = resolver.resolveColumn( + tableAlias != null ? tableAlias : index.getTableName().getString(), + indexColumn.getFamilyName().getString(), indexColName); + } else { + throw e; + } } + } + Expression expression = ref.newColumnExpression(); + expression = coerceIfNecessary(i - tableOffset + projectedOffset, targetColumns, expression); + // We do not need to check if the column is a viewConstant, because view constants never + // appear as a column in an index + projectedExpressions.add(expression); + boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName); + ExpressionProjector projector = new ExpressionProjector(colName, colName, + tableRef.getTableAlias() == null + ? dataTable.getName().getString() + : tableRef.getTableAlias(), + expression, isCaseSensitive); + projectedColumns.add(projector); } - - private static void projectTableColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List projectedExpressions, List projectedColumns) throws SQLException { - PTable table = tableRef.getTable(); - PColumnFamily pfamily = table.getColumnFamily(cfName); - for (PColumn column : pfamily.getColumns()) { - ColumnRef ref = new ColumnRef(tableRef, column.getPosition()); - if (resolveColumn) { - ref = context.getResolver().resolveColumn(table.getTableName().getString(), cfName, column.getName().getString()); - } - Expression expression = ref.newColumnExpression(); - projectedExpressions.add(expression); - String colName = column.getName().toString(); - boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName); - projectedColumns.add(new ExpressionProjector(colName, colName, tableRef.getTableAlias() == null ? - table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive)); - } + } + + private static void projectTableColumnFamily(StatementContext context, String cfName, + TableRef tableRef, boolean resolveColumn, List projectedExpressions, + List projectedColumns) throws SQLException { + PTable table = tableRef.getTable(); + PColumnFamily pfamily = table.getColumnFamily(cfName); + for (PColumn column : pfamily.getColumns()) { + ColumnRef ref = new ColumnRef(tableRef, column.getPosition()); + if (resolveColumn) { + ref = context.getResolver().resolveColumn(table.getTableName().getString(), cfName, + column.getName().getString()); + } + Expression expression = ref.newColumnExpression(); + projectedExpressions.add(expression); + String colName = column.getName().toString(); + boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName); + projectedColumns.add(new ExpressionProjector(colName, colName, + tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), + expression, isCaseSensitive)); } - - private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, boolean resolveColumn, List projectedExpressions, List projectedColumns) throws SQLException { - ColumnResolver resolver = context.getResolver(); - PTable index = tableRef.getTable(); - PhoenixConnection conn = context.getConnection(); - String dataTableName = index.getParentName().getString(); - PTable dataTable = conn.getTable(dataTableName); - PColumnFamily pfamily = dataTable.getColumnFamily(cfName); - TableRef projectedTableRef = - new TableRef(resolver.getTables().get(0), tableRef.getTableAlias()); - PTable projectedIndex = projectedTableRef.getTable(); - for (PColumn column : pfamily.getColumns()) { - String indexColName = IndexUtil.getIndexColumnName(column); - PColumn indexColumn = null; - ColumnRef ref = null; - String indexColumnFamily = null; - try { - indexColumn = index.getColumnForColumnName(indexColName); - ref = new ColumnRef(projectedTableRef, indexColumn.getPosition()); - indexColumnFamily = - indexColumn.getFamilyName() == null ? null - : indexColumn.getFamilyName().getString(); - } catch (ColumnNotFoundException e) { - if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { - try { - //Projected columns have the same name as in the data table - String colName = column.getName().getString(); - String familyName = - column.getFamilyName() == null ? null - : column.getFamilyName().getString(); - resolver.resolveColumn(familyName, - tableRef.getTableAlias() == null - ? tableRef.getTable().getName().getString() - : tableRef.getTableAlias(), - indexColName); - indexColumn = projectedIndex.getColumnForColumnName(colName); - } catch (ColumnFamilyNotFoundException c) { - throw e; - } - } else { - throw e; - } - } - if (resolveColumn) { - ref = - resolver.resolveColumn(index.getTableName().getString(), indexColumnFamily, - indexColName); - } - Expression expression = ref.newColumnExpression(); - projectedExpressions.add(expression); - String colName = column.getName().toString(); - boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName); - projectedColumns.add(new ExpressionProjector(colName, colName, - tableRef.getTableAlias() == null ? dataTable.getName().getString() - : tableRef.getTableAlias(), - expression, isCaseSensitive)); + } + + private static void projectIndexColumnFamily(StatementContext context, String cfName, + TableRef tableRef, boolean resolveColumn, List projectedExpressions, + List projectedColumns) throws SQLException { + ColumnResolver resolver = context.getResolver(); + PTable index = tableRef.getTable(); + PhoenixConnection conn = context.getConnection(); + String dataTableName = index.getParentName().getString(); + PTable dataTable = conn.getTable(dataTableName); + PColumnFamily pfamily = dataTable.getColumnFamily(cfName); + TableRef projectedTableRef = + new TableRef(resolver.getTables().get(0), tableRef.getTableAlias()); + PTable projectedIndex = projectedTableRef.getTable(); + for (PColumn column : pfamily.getColumns()) { + String indexColName = IndexUtil.getIndexColumnName(column); + PColumn indexColumn = null; + ColumnRef ref = null; + String indexColumnFamily = null; + try { + indexColumn = index.getColumnForColumnName(indexColName); + ref = new ColumnRef(projectedTableRef, indexColumn.getPosition()); + indexColumnFamily = + indexColumn.getFamilyName() == null ? null : indexColumn.getFamilyName().getString(); + } catch (ColumnNotFoundException e) { + if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { + try { + // Projected columns have the same name as in the data table + String colName = column.getName().getString(); + String familyName = + column.getFamilyName() == null ? null : column.getFamilyName().getString(); + resolver.resolveColumn(familyName, + tableRef.getTableAlias() == null + ? tableRef.getTable().getName().getString() + : tableRef.getTableAlias(), + indexColName); + indexColumn = projectedIndex.getColumnForColumnName(colName); + } catch (ColumnFamilyNotFoundException c) { + throw e; + } + } else { + throw e; } + } + if (resolveColumn) { + ref = + resolver.resolveColumn(index.getTableName().getString(), indexColumnFamily, indexColName); + } + Expression expression = ref.newColumnExpression(); + projectedExpressions.add(expression); + String colName = column.getName().toString(); + boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName); + projectedColumns.add(new ExpressionProjector(colName, colName, + tableRef.getTableAlias() == null + ? dataTable.getName().getString() + : tableRef.getTableAlias(), + expression, isCaseSensitive)); } - - private static Expression coerceIfNecessary(int index, List targetColumns, Expression expression) throws SQLException { - if (index < targetColumns.size()) { - PDatum targetColumn = targetColumns.get(index); - if (targetColumn.getDataType() != expression.getDataType()) { - PDataType targetType = targetColumn.getDataType(); - // Check if coerce allowed using more relaxed isCastableTo check, since we promote INTEGER to LONG - // during expression evaluation and then convert back to INTEGER on UPSERT SELECT (and we don't have - // (an actual value we can specifically check against). - if (expression.getDataType() != null && !expression.getDataType().isCastableTo(targetType)) { - throw new ArgumentTypeMismatchException(targetType, expression.getDataType(), "column: " + targetColumn); - } - expression = CoerceExpression.create(expression, targetType, targetColumn.getSortOrder(), targetColumn.getMaxLength()); - } + } + + private static Expression coerceIfNecessary(int index, List targetColumns, + Expression expression) throws SQLException { + if (index < targetColumns.size()) { + PDatum targetColumn = targetColumns.get(index); + if (targetColumn.getDataType() != expression.getDataType()) { + PDataType targetType = targetColumn.getDataType(); + // Check if coerce allowed using more relaxed isCastableTo check, since we promote INTEGER + // to LONG + // during expression evaluation and then convert back to INTEGER on UPSERT SELECT (and we + // don't have + // (an actual value we can specifically check against). + if ( + expression.getDataType() != null && !expression.getDataType().isCastableTo(targetType) + ) { + throw new ArgumentTypeMismatchException(targetType, expression.getDataType(), + "column: " + targetColumn); } - return expression; + expression = CoerceExpression.create(expression, targetType, targetColumn.getSortOrder(), + targetColumn.getMaxLength()); + } } - /** - * Builds the projection for the scan - * @param context query context kept between compilation of different query clauses - * @param statement the statement being compiled - * @param groupBy compiled GROUP BY clause - * @param targetColumns list of columns, parallel to aliasedNodes, that are being set for an - * UPSERT SELECT statement. Used to coerce expression types to the expected target type. - * @param where the where clause expression - * @param wildcardIncludesDynamicCols true if wildcard queries should include dynamic columns - * @return projector used to access row values during scan - * @throws SQLException - */ - public static RowProjector compile(StatementContext context, SelectStatement statement, - GroupBy groupBy, List targetColumns, Expression where, - boolean wildcardIncludesDynamicCols) throws SQLException { - List serverParsedKVRefs = new ArrayList<>(); - List serverParsedProjectedColumnRefs = new ArrayList<>(); - List serverParsedKVFuncs = new ArrayList<>(); - List serverParsedOldFuncs = new ArrayList<>(); - Map serverParsedExpressionCounts = new HashMap<>(); - List aliasedNodes = statement.getSelect(); - // Setup projected columns in Scan - SelectClauseVisitor - selectVisitor = - new SelectClauseVisitor(context, groupBy, serverParsedKVRefs, serverParsedKVFuncs, - serverParsedExpressionCounts, serverParsedProjectedColumnRefs, - serverParsedOldFuncs, statement); - List projectedColumns = new ArrayList<>(); - ColumnResolver resolver = context.getResolver(); - TableRef tableRef = context.getCurrentTable(); - PTable table = tableRef.getTable(); - boolean resolveColumn = !tableRef.equals(resolver.getTables().get(0)); - boolean isWildcard = false; - Scan scan = context.getScan(); - int index = 0; - List projectedExpressions = Lists.newArrayListWithExpectedSize(aliasedNodes.size()); - List projectedFamilies = Lists.newArrayListWithExpectedSize(aliasedNodes.size()); - for (AliasedNode aliasedNode : aliasedNodes) { - ParseNode node = aliasedNode.getNode(); - // TODO: visitor? - if (node instanceof WildcardParseNode) { - if (statement.isAggregate()) { - ExpressionCompiler.throwNonAggExpressionInAggException(node.toString()); - } - if (tableRef == TableRef.EMPTY_TABLE_REF) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT).build().buildException(); - } - isWildcard = true; - if (tableRef.getTable().getType() == PTableType.INDEX && ((WildcardParseNode)node).isRewrite()) { - projectAllIndexColumns(context, tableRef, resolveColumn, projectedExpressions, projectedColumns, targetColumns); - } else { - projectAllTableColumns(context, tableRef, resolveColumn, projectedExpressions, projectedColumns, targetColumns); - } - } else if (node instanceof TableWildcardParseNode) { - TableName tName = ((TableWildcardParseNode) node).getTableName(); - TableRef tRef = resolver.resolveTable(tName.getSchemaName(), tName.getTableName()); - if (tRef.equals(tableRef)) { - isWildcard = true; - } - if (tRef.getTable().getType() == PTableType.INDEX && ((TableWildcardParseNode)node).isRewrite()) { - projectAllIndexColumns(context, tRef, true, projectedExpressions, projectedColumns, targetColumns); - } else { - projectAllTableColumns(context, tRef, true, projectedExpressions, projectedColumns, targetColumns); - } - } else if (node instanceof FamilyWildcardParseNode) { - if (tableRef == TableRef.EMPTY_TABLE_REF) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT).build().buildException(); - } - // Project everything for SELECT cf.* - String cfName = ((FamilyWildcardParseNode) node).getName(); - // Delay projecting to scan, as when any other column in the column family gets - // added to the scan, it overwrites that we want to project the entire column - // family. Instead, we do the projection at the end. - // TODO: consider having a ScanUtil.addColumn and ScanUtil.addFamily to work - // around this, as this code depends on this function being the last place where - // columns are projected (which is currently true, but could change). - projectedFamilies.add(Bytes.toBytes(cfName)); - if (tableRef.getTable().getType() == PTableType.INDEX && ((FamilyWildcardParseNode)node).isRewrite()) { - projectIndexColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, projectedColumns); - } else { - projectTableColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, projectedColumns); - } - } else { - if (node instanceof PhoenixRowTimestampParseNode) { - if (statement.isAggregate()) { - ExpressionCompiler.throwNonAggExpressionInAggException(node.toString()); - } - } - Expression expression = node.accept(selectVisitor); - projectedExpressions.add(expression); - expression = coerceIfNecessary(index, targetColumns, expression); - if (node instanceof BindParseNode) { - context.getBindManager().addParamMetaData((BindParseNode)node, expression); - } - if (!node.isStateless()) { - if (!selectVisitor.isAggregate() && statement.isAggregate()) { - ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString()); - } - } - - String tableName = tableRef.getTableAlias() == null ? - (table.getName() == null ? - "" : - table.getName().getString()) : - tableRef.getTableAlias(); - String colName = SchemaUtil.normalizeIdentifier(aliasedNode.getNode().getAlias()); - String name = colName == null ? expression.toString() : colName; - boolean isCaseSensitive = aliasedNode.getAlias() != null ? - aliasedNode.isCaseSensitve() : - (colName != null ? - SchemaUtil.isCaseSensitive(aliasedNode.getNode().getAlias()) : - selectVisitor.isCaseSensitive); - if (null != aliasedNode.getAlias()){ - projectedColumns.add(new ExpressionProjector(name, aliasedNode.getAlias(), tableName, expression, isCaseSensitive)); - } else { - projectedColumns.add(new ExpressionProjector(name, name, tableName, expression, isCaseSensitive)); - } - } - - selectVisitor.reset(); - index++; + return expression; + } + + /** + * Builds the projection for the scan + * @param context query context kept between compilation of different query + * clauses + * @param statement the statement being compiled + * @param groupBy compiled GROUP BY clause + * @param targetColumns list of columns, parallel to aliasedNodes, that are being + * set for an UPSERT SELECT statement. Used to coerce + * expression types to the expected target type. + * @param where the where clause expression + * @param wildcardIncludesDynamicCols true if wildcard queries should include dynamic columns + * @return projector used to access row values during scan + */ + public static RowProjector compile(StatementContext context, SelectStatement statement, + GroupBy groupBy, List targetColumns, Expression where, + boolean wildcardIncludesDynamicCols) throws SQLException { + List serverParsedKVRefs = new ArrayList<>(); + List serverParsedProjectedColumnRefs = new ArrayList<>(); + List serverParsedKVFuncs = new ArrayList<>(); + List serverParsedOldFuncs = new ArrayList<>(); + Map serverParsedExpressionCounts = new HashMap<>(); + List aliasedNodes = statement.getSelect(); + // Setup projected columns in Scan + SelectClauseVisitor selectVisitor = new SelectClauseVisitor(context, groupBy, + serverParsedKVRefs, serverParsedKVFuncs, serverParsedExpressionCounts, + serverParsedProjectedColumnRefs, serverParsedOldFuncs, statement); + List projectedColumns = new ArrayList<>(); + ColumnResolver resolver = context.getResolver(); + TableRef tableRef = context.getCurrentTable(); + PTable table = tableRef.getTable(); + boolean resolveColumn = !tableRef.equals(resolver.getTables().get(0)); + boolean isWildcard = false; + Scan scan = context.getScan(); + int index = 0; + List projectedExpressions = Lists.newArrayListWithExpectedSize(aliasedNodes.size()); + List projectedFamilies = Lists.newArrayListWithExpectedSize(aliasedNodes.size()); + for (AliasedNode aliasedNode : aliasedNodes) { + ParseNode node = aliasedNode.getNode(); + // TODO: visitor? + if (node instanceof WildcardParseNode) { + if (statement.isAggregate()) { + ExpressionCompiler.throwNonAggExpressionInAggException(node.toString()); + } + if (tableRef == TableRef.EMPTY_TABLE_REF) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT).build().buildException(); + } + isWildcard = true; + if ( + tableRef.getTable().getType() == PTableType.INDEX + && ((WildcardParseNode) node).isRewrite() + ) { + projectAllIndexColumns(context, tableRef, resolveColumn, projectedExpressions, + projectedColumns, targetColumns); + } else { + projectAllTableColumns(context, tableRef, resolveColumn, projectedExpressions, + projectedColumns, targetColumns); + } + } else if (node instanceof TableWildcardParseNode) { + TableName tName = ((TableWildcardParseNode) node).getTableName(); + TableRef tRef = resolver.resolveTable(tName.getSchemaName(), tName.getTableName()); + if (tRef.equals(tableRef)) { + isWildcard = true; + } + if ( + tRef.getTable().getType() == PTableType.INDEX + && ((TableWildcardParseNode) node).isRewrite() + ) { + projectAllIndexColumns(context, tRef, true, projectedExpressions, projectedColumns, + targetColumns); + } else { + projectAllTableColumns(context, tRef, true, projectedExpressions, projectedColumns, + targetColumns); + } + } else if (node instanceof FamilyWildcardParseNode) { + if (tableRef == TableRef.EMPTY_TABLE_REF) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT).build().buildException(); + } + // Project everything for SELECT cf.* + String cfName = ((FamilyWildcardParseNode) node).getName(); + // Delay projecting to scan, as when any other column in the column family gets + // added to the scan, it overwrites that we want to project the entire column + // family. Instead, we do the projection at the end. + // TODO: consider having a ScanUtil.addColumn and ScanUtil.addFamily to work + // around this, as this code depends on this function being the last place where + // columns are projected (which is currently true, but could change). + projectedFamilies.add(Bytes.toBytes(cfName)); + if ( + tableRef.getTable().getType() == PTableType.INDEX + && ((FamilyWildcardParseNode) node).isRewrite() + ) { + projectIndexColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, + projectedColumns); + } else { + projectTableColumnFamily(context, cfName, tableRef, resolveColumn, projectedExpressions, + projectedColumns); + } + } else { + if (node instanceof PhoenixRowTimestampParseNode) { + if (statement.isAggregate()) { + ExpressionCompiler.throwNonAggExpressionInAggException(node.toString()); + } + } + Expression expression = node.accept(selectVisitor); + projectedExpressions.add(expression); + expression = coerceIfNecessary(index, targetColumns, expression); + if (node instanceof BindParseNode) { + context.getBindManager().addParamMetaData((BindParseNode) node, expression); + } + if (!node.isStateless()) { + if (!selectVisitor.isAggregate() && statement.isAggregate()) { + ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString()); + } } - for (int i = serverParsedProjectedColumnRefs.size() - 1; i >= 0; i--) { - Expression expression = serverParsedProjectedColumnRefs.get(i); - Integer count = serverParsedExpressionCounts.get(expression); - if (count != 0) { - serverParsedKVRefs.remove(i); - serverParsedKVFuncs.remove(i); - serverParsedOldFuncs.remove(i); - } + String tableName = tableRef.getTableAlias() == null + ? (table.getName() == null ? "" : table.getName().getString()) + : tableRef.getTableAlias(); + String colName = SchemaUtil.normalizeIdentifier(aliasedNode.getNode().getAlias()); + String name = colName == null ? expression.toString() : colName; + boolean isCaseSensitive = aliasedNode.getAlias() != null + ? aliasedNode.isCaseSensitve() + : (colName != null + ? SchemaUtil.isCaseSensitive(aliasedNode.getNode().getAlias()) + : selectVisitor.isCaseSensitive); + if (null != aliasedNode.getAlias()) { + projectedColumns.add(new ExpressionProjector(name, aliasedNode.getAlias(), tableName, + expression, isCaseSensitive)); + } else { + projectedColumns + .add(new ExpressionProjector(name, name, tableName, expression, isCaseSensitive)); } + } - if (serverParsedKVFuncs.size() > 0 && serverParsedKVRefs.size() > 0) { - String[] - scanAttributes = - new String[] { BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX, - BaseScannerRegionObserverConstants.JSON_VALUE_FUNCTION, - BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION, - BaseScannerRegionObserverConstants.BSON_VALUE_FUNCTION}; - Map attributeToFunctionMap = new HashMap() {{ - put(scanAttributes[0], ArrayIndexFunction.class); - put(scanAttributes[1], JsonValueFunction.class); - put(scanAttributes[2], JsonQueryFunction.class); - put(scanAttributes[3], BsonValueFunction.class); - }}; - // This map is to keep track of the positions that get swapped with rearranging - // the functions in the serialized data to server. - Map initialToShuffledPositionMap = new HashMap<>(); - Map> - serverAttributeToFuncExpressionMap = - new HashMap>() {{ - for (String attribute : attributeToFunctionMap.keySet()) { - put(attribute, new ArrayList<>()); - } - }}; - Map> - serverAttributeToKVExpressionMap = - new HashMap>() {{ - for (String attribute : attributeToFunctionMap.keySet()) { - put(attribute, new ArrayList<>()); - } - }}; - int counter = 0; - for (String attribute : scanAttributes) { - for (int i = 0; i < serverParsedKVFuncs.size(); i++) { - if (attributeToFunctionMap.get(attribute) - .isInstance(serverParsedKVFuncs.get(i))) { - initialToShuffledPositionMap.put(i, counter++); - serverAttributeToFuncExpressionMap.get(attribute) - .add(serverParsedKVFuncs.get(i)); - serverAttributeToKVExpressionMap.get(attribute) - .add(serverParsedKVRefs.get(i)); - } - } - } - for (Map.Entry entry : attributeToFunctionMap.entrySet()) { - if (serverAttributeToFuncExpressionMap.get(entry.getKey()).size() > 0) { - serializeServerParsedExpressionInformationAndSetInScan(context, entry.getKey(), - serverAttributeToFuncExpressionMap.get(entry.getKey()), - serverAttributeToKVExpressionMap.get(entry.getKey())); - } - } - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); - for (Expression expression : serverParsedKVRefs) { - builder.addField(expression); - } - KeyValueSchema kvSchema = builder.build(); - ValueBitSet arrayIndexesBitSet = ValueBitSet.newInstance(kvSchema); - builder = new KeyValueSchemaBuilder(0); - for (Expression expression : serverParsedKVFuncs) { - builder.addField(expression); - } - KeyValueSchema arrayIndexesSchema = builder.build(); + selectVisitor.reset(); + index++; + } - Map replacementMap = new HashMap<>(); - for (int i = 0; i < serverParsedOldFuncs.size(); i++) { - Expression function = serverParsedKVFuncs.get(i); - replacementMap.put(serverParsedOldFuncs.get(i), - new ArrayIndexExpression(initialToShuffledPositionMap.get(i), - function.getDataType(), arrayIndexesBitSet, arrayIndexesSchema)); + for (int i = serverParsedProjectedColumnRefs.size() - 1; i >= 0; i--) { + Expression expression = serverParsedProjectedColumnRefs.get(i); + Integer count = serverParsedExpressionCounts.get(expression); + if (count != 0) { + serverParsedKVRefs.remove(i); + serverParsedKVFuncs.remove(i); + serverParsedOldFuncs.remove(i); + } + } + if (serverParsedKVFuncs.size() > 0 && serverParsedKVRefs.size() > 0) { + String[] scanAttributes = + new String[] { BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX, + BaseScannerRegionObserverConstants.JSON_VALUE_FUNCTION, + BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION, + BaseScannerRegionObserverConstants.BSON_VALUE_FUNCTION }; + Map attributeToFunctionMap = new HashMap() { + { + put(scanAttributes[0], ArrayIndexFunction.class); + put(scanAttributes[1], JsonValueFunction.class); + put(scanAttributes[2], JsonQueryFunction.class); + put(scanAttributes[3], BsonValueFunction.class); + } + }; + // This map is to keep track of the positions that get swapped with rearranging + // the functions in the serialized data to server. + Map initialToShuffledPositionMap = new HashMap<>(); + Map> serverAttributeToFuncExpressionMap = + new HashMap>() { + { + for (String attribute : attributeToFunctionMap.keySet()) { + put(attribute, new ArrayList<>()); } - - ReplaceArrayFunctionExpressionVisitor - visitor = - new ReplaceArrayFunctionExpressionVisitor(replacementMap); - for (int i = 0; i < projectedColumns.size(); i++) { - ExpressionProjector projector = projectedColumns.get(i); - projectedColumns.set(i, - new ExpressionProjector(projector.getName(), projector.getLabel(), - tableRef.getTableAlias() == null ? - (table.getName() == null ? - "" : - table.getName().getString()) : - tableRef.getTableAlias(), - projector.getExpression().accept(visitor), - projector.isCaseSensitive())); + } + }; + Map> serverAttributeToKVExpressionMap = + new HashMap>() { + { + for (String attribute : attributeToFunctionMap.keySet()) { + put(attribute, new ArrayList<>()); } + } + }; + int counter = 0; + for (String attribute : scanAttributes) { + for (int i = 0; i < serverParsedKVFuncs.size(); i++) { + if (attributeToFunctionMap.get(attribute).isInstance(serverParsedKVFuncs.get(i))) { + initialToShuffledPositionMap.put(i, counter++); + serverAttributeToFuncExpressionMap.get(attribute).add(serverParsedKVFuncs.get(i)); + serverAttributeToKVExpressionMap.get(attribute).add(serverParsedKVRefs.get(i)); + } + } + } + for (Map.Entry entry : attributeToFunctionMap.entrySet()) { + if (serverAttributeToFuncExpressionMap.get(entry.getKey()).size() > 0) { + serializeServerParsedExpressionInformationAndSetInScan(context, entry.getKey(), + serverAttributeToFuncExpressionMap.get(entry.getKey()), + serverAttributeToKVExpressionMap.get(entry.getKey())); } + } + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); + for (Expression expression : serverParsedKVRefs) { + builder.addField(expression); + } + KeyValueSchema kvSchema = builder.build(); + ValueBitSet arrayIndexesBitSet = ValueBitSet.newInstance(kvSchema); + builder = new KeyValueSchemaBuilder(0); + for (Expression expression : serverParsedKVFuncs) { + builder.addField(expression); + } + KeyValueSchema arrayIndexesSchema = builder.build(); + + Map replacementMap = new HashMap<>(); + for (int i = 0; i < serverParsedOldFuncs.size(); i++) { + Expression function = serverParsedKVFuncs.get(i); + replacementMap.put(serverParsedOldFuncs.get(i), + new ArrayIndexExpression(initialToShuffledPositionMap.get(i), function.getDataType(), + arrayIndexesBitSet, arrayIndexesSchema)); + + } + + ReplaceArrayFunctionExpressionVisitor visitor = + new ReplaceArrayFunctionExpressionVisitor(replacementMap); + for (int i = 0; i < projectedColumns.size(); i++) { + ExpressionProjector projector = projectedColumns.get(i); + projectedColumns.set(i, + new ExpressionProjector(projector.getName(), projector.getLabel(), + tableRef.getTableAlias() == null + ? (table.getName() == null ? "" : table.getName().getString()) + : tableRef.getTableAlias(), + projector.getExpression().accept(visitor), projector.isCaseSensitive())); + } + } - boolean isProjectEmptyKeyValue = false; - // Don't project known/declared column families into the scan if we want to support - // surfacing dynamic columns in wildcard queries - if (isWildcard && !wildcardIncludesDynamicCols) { - projectAllColumnFamilies(table, scan); - } else { - isProjectEmptyKeyValue = where == null || LiteralExpression.isTrue(where) || where.requiresFinalEvaluation(); - for (byte[] family : projectedFamilies) { - try { - if (table.getColumnFamily(family) != null) { - projectColumnFamily(table, scan, family); - } - } catch (ColumnFamilyNotFoundException e) { - if (!IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { - throw e; - } - } - } + boolean isProjectEmptyKeyValue = false; + // Don't project known/declared column families into the scan if we want to support + // surfacing dynamic columns in wildcard queries + if (isWildcard && !wildcardIncludesDynamicCols) { + projectAllColumnFamilies(table, scan); + } else { + isProjectEmptyKeyValue = + where == null || LiteralExpression.isTrue(where) || where.requiresFinalEvaluation(); + for (byte[] family : projectedFamilies) { + try { + if (table.getColumnFamily(family) != null) { + projectColumnFamily(table, scan, family); + } + } catch (ColumnFamilyNotFoundException e) { + if (!IndexUtil.shouldIndexBeUsedForUncoveredQuery(tableRef)) { + throw e; + } } - - // TODO make estimatedByteSize more accurate by counting the joined columns. - int estimatedKeySize = table.getRowKeySchema().getEstimatedValueLength(); - int estimatedByteSize = 0; - for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { - try { - PColumnFamily family = table.getColumnFamily(entry.getKey()); - if (entry.getValue() == null) { - for (PColumn column : family.getColumns()) { - Integer maxLength = column.getMaxLength(); - int byteSize = column.getDataType().isFixedWidth() ? maxLength == null ? column.getDataType().getByteSize() : maxLength : RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE; - estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + byteSize; - } - } else { - for (byte[] cq : entry.getValue()) { - PColumn column = family.getPColumnForColumnQualifier(cq); - // Continue: If an EMPTY_COLUMN is in the projection list, - // since the table column list does not contain the EMPTY_COLUMN - // no value is returned. - if (column == null) { - continue; - } - Integer maxLength = column.getMaxLength(); - int byteSize = column.getDataType().isFixedWidth() ? maxLength == null ? column.getDataType().getByteSize() : maxLength : RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE; - estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + byteSize; - } - } - } catch (ColumnFamilyNotFoundException e) { - // Ignore as this can happen for local indexes when the data table has a column family, but there are no covered columns in the family + } + } + + // TODO make estimatedByteSize more accurate by counting the joined columns. + int estimatedKeySize = table.getRowKeySchema().getEstimatedValueLength(); + int estimatedByteSize = 0; + for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { + try { + PColumnFamily family = table.getColumnFamily(entry.getKey()); + if (entry.getValue() == null) { + for (PColumn column : family.getColumns()) { + Integer maxLength = column.getMaxLength(); + int byteSize = column.getDataType().isFixedWidth() + ? maxLength == null ? column.getDataType().getByteSize() : maxLength + : RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE; + estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + byteSize; + } + } else { + for (byte[] cq : entry.getValue()) { + PColumn column = family.getPColumnForColumnQualifier(cq); + // Continue: If an EMPTY_COLUMN is in the projection list, + // since the table column list does not contain the EMPTY_COLUMN + // no value is returned. + if (column == null) { + continue; } + Integer maxLength = column.getMaxLength(); + int byteSize = column.getDataType().isFixedWidth() + ? maxLength == null ? column.getDataType().getByteSize() : maxLength + : RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE; + estimatedByteSize += SizedUtil.KEY_VALUE_SIZE + estimatedKeySize + byteSize; + } } - return new RowProjector(projectedColumns, Math.max(estimatedKeySize, estimatedByteSize), - isProjectEmptyKeyValue, resolver.hasUDFs(), isWildcard, - wildcardIncludesDynamicCols); + } catch (ColumnFamilyNotFoundException e) { + // Ignore as this can happen for local indexes when the data table has a column family, but + // there are no covered columns in the family + } + } + return new RowProjector(projectedColumns, Math.max(estimatedKeySize, estimatedByteSize), + isProjectEmptyKeyValue, resolver.hasUDFs(), isWildcard, wildcardIncludesDynamicCols); + } + + private static void projectAllColumnFamilies(PTable table, Scan scan) { + // Will project all known/declared column families + scan.getFamilyMap().clear(); + for (PColumnFamily family : table.getColumnFamilies()) { + scan.addFamily(family.getName().getBytes()); + } + } + + // A replaced ArrayIndex function that retrieves the exact array value retrieved from the server + static class ArrayIndexExpression extends BaseTerminalExpression { + private final int position; + private final PDataType type; + private final ValueBitSet arrayIndexesBitSet; + private final KeyValueSchema arrayIndexesSchema; + + public ArrayIndexExpression(int position, PDataType type, ValueBitSet arrayIndexesBitSet, + KeyValueSchema arrayIndexesSchema) { + this.position = position; + this.type = type; + this.arrayIndexesBitSet = arrayIndexesBitSet; + this.arrayIndexesSchema = arrayIndexesSchema; } - private static void projectAllColumnFamilies(PTable table, Scan scan) { - // Will project all known/declared column families - scan.getFamilyMap().clear(); - for (PColumnFamily family : table.getColumnFamilies()) { - scan.addFamily(family.getName().getBytes()); - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if ( + !tuple.getValue(QueryConstants.ARRAY_VALUE_COLUMN_FAMILY, + QueryConstants.ARRAY_VALUE_COLUMN_QUALIFIER, ptr) + ) { + return false; + } + int maxOffset = ptr.getOffset() + ptr.getLength(); + arrayIndexesBitSet.or(ptr); + arrayIndexesSchema.iterator(ptr, position, arrayIndexesBitSet); + Boolean hasValue = arrayIndexesSchema.next(ptr, position, maxOffset, arrayIndexesBitSet); + arrayIndexesBitSet.clear(); + if (hasValue == null) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } + return true; } - // A replaced ArrayIndex function that retrieves the exact array value retrieved from the server - static class ArrayIndexExpression extends BaseTerminalExpression { - private final int position; - private final PDataType type; - private final ValueBitSet arrayIndexesBitSet; - private final KeyValueSchema arrayIndexesSchema; - - public ArrayIndexExpression(int position, PDataType type, ValueBitSet arrayIndexesBitSet, KeyValueSchema arrayIndexesSchema) { - this.position = position; - this.type = type; - this.arrayIndexesBitSet = arrayIndexesBitSet; - this.arrayIndexesSchema = arrayIndexesSchema; - } + @Override + public PDataType getDataType() { + return this.type; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!tuple.getValue(QueryConstants.ARRAY_VALUE_COLUMN_FAMILY, QueryConstants.ARRAY_VALUE_COLUMN_QUALIFIER, - ptr)) { - return false; - } - int maxOffset = ptr.getOffset() + ptr.getLength(); - arrayIndexesBitSet.or(ptr); - arrayIndexesSchema.iterator(ptr, position, arrayIndexesBitSet); - Boolean hasValue = arrayIndexesSchema.next(ptr, position, maxOffset, arrayIndexesBitSet); - arrayIndexesBitSet.clear(); - if (hasValue == null) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } - return true; - } + @Override + public T accept(ExpressionVisitor visitor) { + // TODO Auto-generated method stub + return null; + } + } + + private static void serializeServerParsedExpressionInformationAndSetInScan( + StatementContext context, String serverParsedExpressionAttribute, + List serverParsedKVFuncs, List serverParsedKVRefs) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + try { + DataOutputStream output = new DataOutputStream(stream); + // Write the KVRef size followed by the keyvalues that needs to be of + // type arrayindex or json function based on serverParsedExpressionAttribute + WritableUtils.writeVInt(output, serverParsedKVRefs.size()); + for (Expression expression : serverParsedKVRefs) { + expression.write(output); + } + // then write the number of arrayindex or json functions followed + // by the expression itself + WritableUtils.writeVInt(output, serverParsedKVFuncs.size()); + for (Expression expression : serverParsedKVFuncs) { + expression.write(output); + } + + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + context.getScan().setAttribute(serverParsedExpressionAttribute, stream.toByteArray()); + } - @Override - public PDataType getDataType() { - return this.type; - } + private static class SelectClauseVisitor extends ExpressionCompiler { - @Override - public T accept(ExpressionVisitor visitor) { - // TODO Auto-generated method stub - return null; - } + /** + * Track whether or not the projection expression is case sensitive. We use this information to + * determine whether or not we normalize the column name passed + */ + private boolean isCaseSensitive; + private int elementCount; + // Looks at PHOENIX-2160 for the context and use of the below variables. + // These are used for reference counting and converting to KeyValueColumnExpressions + private List serverParsedKVRefs; + private List serverParsedKVFuncs; + private List serverParsedOldFuncs; + private List serverParsedProjectedColumnRefs; + private Map serverParsedExpressionCounts; + private SelectStatement statement; + + private SelectClauseVisitor(StatementContext context, GroupBy groupBy, + List serverParsedKVRefs, List serverParsedKVFuncs, + Map serverParsedExpressionCounts, + List serverParsedProjectedColumnRefs, + List serverParsedOldFuncs, SelectStatement statement) { + super(context, groupBy); + this.serverParsedKVRefs = serverParsedKVRefs; + this.serverParsedKVFuncs = serverParsedKVFuncs; + this.serverParsedOldFuncs = serverParsedOldFuncs; + this.serverParsedExpressionCounts = serverParsedExpressionCounts; + this.serverParsedProjectedColumnRefs = serverParsedProjectedColumnRefs; + this.statement = statement; + reset(); } - private static void serializeServerParsedExpressionInformationAndSetInScan( - StatementContext context, String serverParsedExpressionAttribute, - List serverParsedKVFuncs, - List serverParsedKVRefs) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - try { - DataOutputStream output = new DataOutputStream(stream); - // Write the KVRef size followed by the keyvalues that needs to be of - // type arrayindex or json function based on serverParsedExpressionAttribute - WritableUtils.writeVInt(output, serverParsedKVRefs.size()); - for (Expression expression : serverParsedKVRefs) { - expression.write(output); - } - // then write the number of arrayindex or json functions followed - // by the expression itself - WritableUtils.writeVInt(output, serverParsedKVFuncs.size()); - for (Expression expression : serverParsedKVFuncs) { - expression.write(output); - } - - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - context.getScan().setAttribute(serverParsedExpressionAttribute, stream.toByteArray()); + @Override + public void reset() { + super.reset(); + elementCount = 0; + isCaseSensitive = true; } - private static class SelectClauseVisitor extends ExpressionCompiler { - - /** - * Track whether or not the projection expression is case sensitive. We use this - * information to determine whether or not we normalize the column name passed - */ - private boolean isCaseSensitive; - private int elementCount; - // Looks at PHOENIX-2160 for the context and use of the below variables. - // These are used for reference counting and converting to KeyValueColumnExpressions - private List serverParsedKVRefs; - private List serverParsedKVFuncs; - private List serverParsedOldFuncs; - private List serverParsedProjectedColumnRefs; - private Map serverParsedExpressionCounts; - private SelectStatement statement; - - private SelectClauseVisitor(StatementContext context, GroupBy groupBy, - List serverParsedKVRefs, - List serverParsedKVFuncs, - Map serverParsedExpressionCounts, - List serverParsedProjectedColumnRefs, - List serverParsedOldFuncs, SelectStatement statement) { - super(context, groupBy); - this.serverParsedKVRefs = serverParsedKVRefs; - this.serverParsedKVFuncs = serverParsedKVFuncs; - this.serverParsedOldFuncs = serverParsedOldFuncs; - this.serverParsedExpressionCounts = serverParsedExpressionCounts; - this.serverParsedProjectedColumnRefs = serverParsedProjectedColumnRefs; - this.statement = statement; - reset(); - } + @Override + protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { + ColumnRef ref = super.resolveColumn(node); + isCaseSensitive = isCaseSensitive && node.isCaseSensitive(); + return ref; + } - @Override - public void reset() { - super.reset(); - elementCount = 0; - isCaseSensitive = true; - } - - @Override - protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { - ColumnRef ref = super.resolveColumn(node); - isCaseSensitive = isCaseSensitive && node.isCaseSensitive(); - return ref; - } + @Override + public Expression visit(ColumnParseNode node) throws SQLException { + Expression expression = super.visit(node); + if (parseOnServer(expression)) { + Integer count = serverParsedExpressionCounts.get(expression); + serverParsedExpressionCounts.put(expression, count != null ? (count + 1) : 1); + } + return expression; + } - @Override - public Expression visit(ColumnParseNode node) throws SQLException { - Expression expression = super.visit(node); - if (parseOnServer(expression)) { - Integer count = serverParsedExpressionCounts.get(expression); - serverParsedExpressionCounts.put(expression, count != null ? (count + 1) : 1); - } - return expression; - } + private static boolean parseOnServer(Expression expression) { + return expression.getDataType().isArrayType() + || expression.getDataType().equals(PJson.INSTANCE) + || expression.getDataType().equals(PBson.INSTANCE); + } - private static boolean parseOnServer(Expression expression) { - return expression.getDataType().isArrayType() || expression.getDataType() - .equals(PJson.INSTANCE) || expression.getDataType().equals(PBson.INSTANCE); - } + @Override + public void addElement(List l, Expression element) { + elementCount++; + isCaseSensitive &= elementCount == 1; + super.addElement(l, element); + } - @Override - public void addElement(List l, Expression element) { - elementCount++; - isCaseSensitive &= elementCount == 1; - super.addElement(l, element); - } - - @Override - public Expression visit(SequenceValueParseNode node) throws SQLException { - if (aggregateFunction != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_USE_OF_NEXT_VALUE_FOR) - .setSchemaName(node.getTableName().getSchemaName()) - .setTableName(node.getTableName().getTableName()).build().buildException(); - } - return context.getSequenceManager().newSequenceReference(node); - } - - @Override - public Expression visitLeave(FunctionParseNode node, final List children) throws SQLException { - - // this need not be done for group by clause with array or json. Hence, the below check - if (!statement.isAggregate() && (ArrayIndexFunction.NAME.equals( - node.getName()) || isJsonFunction(node) || isBsonFunction(node)) && - children.get(0) instanceof ProjectedColumnExpression) { - final List indexKVs = Lists.newArrayList(); - final List indexProjectedColumns = Lists.newArrayList(); - final List copyOfChildren = new ArrayList<>(children); - // Create anon visitor to find reference to array or json in a generic way - children.get(0).accept(new ProjectedColumnExpressionVisitor() { - @Override - public Void visit(ProjectedColumnExpression expression) { - if (expression.getDataType().isArrayType() || expression.getDataType() - .equals(PJson.INSTANCE) || expression.getDataType() - .equals(PBson.INSTANCE)) { - indexProjectedColumns.add(expression); - PColumn col = expression.getColumn(); - // hack'ish... For covered columns with local indexes we defer to the server. - if (col instanceof ProjectedColumn && ((ProjectedColumn) col).getSourceColumnRef() instanceof IndexUncoveredDataColumnRef) { - return null; - } - PTable table = context.getCurrentTable().getTable(); - KeyValueColumnExpression keyValueColumnExpression; - if (table.getImmutableStorageScheme() != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - keyValueColumnExpression = - new SingleCellColumnExpression(col, - col.getName().getString(), - table.getEncodingScheme(), - table.getImmutableStorageScheme()); - } else { - keyValueColumnExpression = new KeyValueColumnExpression(col); - } - indexKVs.add(keyValueColumnExpression); - copyOfChildren.set(0, keyValueColumnExpression); - Integer count = serverParsedExpressionCounts.get(expression); - serverParsedExpressionCounts.put(expression, - count != null ? (count - 1) : -1); - } - return null; - } - }); - - Expression func = super.visitLeave(node, children); - // Add the keyvalues which is of type array or json - if (!indexKVs.isEmpty()) { - serverParsedKVRefs.addAll(indexKVs); - serverParsedProjectedColumnRefs.addAll(indexProjectedColumns); - Expression funcModified = super.visitLeave(node, copyOfChildren); - // Track the array index or json function also - serverParsedKVFuncs.add(funcModified); - serverParsedOldFuncs.add(func); - } - return func; - } else { - return super.visitLeave(node, children); + @Override + public Expression visit(SequenceValueParseNode node) throws SQLException { + if (aggregateFunction != null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_USE_OF_NEXT_VALUE_FOR) + .setSchemaName(node.getTableName().getSchemaName()) + .setTableName(node.getTableName().getTableName()).build().buildException(); + } + return context.getSequenceManager().newSequenceReference(node); + } + + @Override + public Expression visitLeave(FunctionParseNode node, final List children) + throws SQLException { + + // this need not be done for group by clause with array or json. Hence, the below check + if ( + !statement.isAggregate() && (ArrayIndexFunction.NAME.equals(node.getName()) + || isJsonFunction(node) || isBsonFunction(node)) + && children.get(0) instanceof ProjectedColumnExpression + ) { + final List indexKVs = Lists.newArrayList(); + final List indexProjectedColumns = Lists.newArrayList(); + final List copyOfChildren = new ArrayList<>(children); + // Create anon visitor to find reference to array or json in a generic way + children.get(0).accept(new ProjectedColumnExpressionVisitor() { + @Override + public Void visit(ProjectedColumnExpression expression) { + if ( + expression.getDataType().isArrayType() + || expression.getDataType().equals(PJson.INSTANCE) + || expression.getDataType().equals(PBson.INSTANCE) + ) { + indexProjectedColumns.add(expression); + PColumn col = expression.getColumn(); + // hack'ish... For covered columns with local indexes we defer to the server. + if ( + col instanceof ProjectedColumn && ((ProjectedColumn) col) + .getSourceColumnRef() instanceof IndexUncoveredDataColumnRef + ) { + return null; + } + PTable table = context.getCurrentTable().getTable(); + KeyValueColumnExpression keyValueColumnExpression; + if (table.getImmutableStorageScheme() != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { + keyValueColumnExpression = + new SingleCellColumnExpression(col, col.getName().getString(), + table.getEncodingScheme(), table.getImmutableStorageScheme()); + } else { + keyValueColumnExpression = new KeyValueColumnExpression(col); + } + indexKVs.add(keyValueColumnExpression); + copyOfChildren.set(0, keyValueColumnExpression); + Integer count = serverParsedExpressionCounts.get(expression); + serverParsedExpressionCounts.put(expression, count != null ? (count - 1) : -1); } + return null; + } + }); + + Expression func = super.visitLeave(node, children); + // Add the keyvalues which is of type array or json + if (!indexKVs.isEmpty()) { + serverParsedKVRefs.addAll(indexKVs); + serverParsedProjectedColumnRefs.addAll(indexProjectedColumns); + Expression funcModified = super.visitLeave(node, copyOfChildren); + // Track the array index or json function also + serverParsedKVFuncs.add(funcModified); + serverParsedOldFuncs.add(func); } + return func; + } else { + return super.visitLeave(node, children); + } } + } - private static boolean isJsonFunction(FunctionParseNode node) { - return JsonValueFunction.NAME.equals(node.getName()) || JsonQueryFunction.NAME.equals( - node.getName()); - } + private static boolean isJsonFunction(FunctionParseNode node) { + return JsonValueFunction.NAME.equals(node.getName()) + || JsonQueryFunction.NAME.equals(node.getName()); + } - private static boolean isBsonFunction(FunctionParseNode node) { - return BsonValueFunction.NAME.equals(node.getName()); - } + private static boolean isBsonFunction(FunctionParseNode node) { + return BsonValueFunction.NAME.equals(node.getName()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryCompiler.java index 2d152067899..078d2f6aa4a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,13 +28,7 @@ import java.util.Map; import java.util.Set; -import org.apache.phoenix.expression.function.PhoenixRowTimestampFunction; -import org.apache.phoenix.parse.HintNode; -import org.apache.phoenix.parse.NamedTableNode; -import org.apache.phoenix.parse.TerminalParseNode; -import org.apache.phoenix.schema.PTableType; -import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.thirdparty.com.google.common.base.Optional; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; @@ -60,6 +54,7 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; import org.apache.phoenix.expression.RowValueConstructorExpression; +import org.apache.phoenix.expression.function.PhoenixRowTimestampFunction; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.iterate.ParallelIteratorFactory; import org.apache.phoenix.jdbc.PhoenixConnection; @@ -68,8 +63,10 @@ import org.apache.phoenix.optimize.Cost; import org.apache.phoenix.parse.AliasedNode; import org.apache.phoenix.parse.EqualParseNode; +import org.apache.phoenix.parse.HintNode; import org.apache.phoenix.parse.HintNode.Hint; import org.apache.phoenix.parse.JoinTableNode.JoinType; +import org.apache.phoenix.parse.NamedTableNode; import org.apache.phoenix.parse.OrderByNode; import org.apache.phoenix.parse.ParseNode; import org.apache.phoenix.parse.ParseNodeFactory; @@ -77,6 +74,7 @@ import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.parse.SubqueryParseNode; import org.apache.phoenix.parse.TableNode; +import org.apache.phoenix.parse.TerminalParseNode; import org.apache.phoenix.query.ConnectionQueryServices; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; @@ -84,847 +82,834 @@ import org.apache.phoenix.schema.ColumnNotFoundException; import org.apache.phoenix.schema.PDatum; import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.RowValueConstructorOffsetNotCoercibleException; +import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.thirdparty.com.google.common.base.Optional; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.util.CDCUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; +import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.ParseNodeUtil; import org.apache.phoenix.util.ParseNodeUtil.RewriteResult; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.ScanUtil; -import org.apache.phoenix.util.MetaDataUtil; -import org.apache.hadoop.conf.Configuration; - -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; - /** - * * Class used to build an executable query plan - * - * * @since 0.1 */ public class QueryCompiler { - private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - private final PhoenixStatement statement; - private final Scan scan; - private final Scan originalScan; - private final ColumnResolver resolver; - private final BindManager bindManager; - private final SelectStatement select; - private final List targetColumns; - private final ParallelIteratorFactory parallelIteratorFactory; - private final SequenceManager sequenceManager; - private final boolean projectTuples; - private final boolean noChildParentJoinOptimization; - private final boolean usePersistentCache; - private final boolean optimizeSubquery; - private final Map dataPlans; - private final boolean costBased; - private final StatementContext parentContext; - - public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, boolean projectTuples, boolean optimizeSubquery, Map dataPlans) throws SQLException { - this(statement, select, resolver, Collections.emptyList(), null, new SequenceManager(statement), projectTuples, optimizeSubquery, dataPlans); - } - - public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, BindManager bindManager, boolean projectTuples, boolean optimizeSubquery, Map dataPlans) throws SQLException { - this(statement, select, resolver, bindManager, Collections.emptyList(), null, new SequenceManager(statement), projectTuples, optimizeSubquery, dataPlans, null); - } - - public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, boolean projectTuples, boolean optimizeSubquery, Map dataPlans) throws SQLException { - this(statement, select, resolver, new BindManager(statement.getParameters()), targetColumns, parallelIteratorFactory, sequenceManager, projectTuples, optimizeSubquery, dataPlans, null); - } - - public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, BindManager bindManager, List targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, boolean projectTuples, boolean optimizeSubquery, Map dataPlans) throws SQLException { - this(statement, select, resolver, bindManager, targetColumns, parallelIteratorFactory, sequenceManager, projectTuples, optimizeSubquery, dataPlans, null); - } - - public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, boolean projectTuples, boolean optimizeSubquery, Map dataPlans, StatementContext parentContext) throws SQLException { - this(statement, select, resolver, new BindManager(statement.getParameters()), targetColumns, parallelIteratorFactory, sequenceManager, projectTuples, optimizeSubquery, dataPlans, parentContext); - } - - public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, BindManager bindManager, List targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, boolean projectTuples, boolean optimizeSubquery, Map dataPlans, StatementContext parentContext) throws SQLException { - this.statement = statement; - this.select = select; - this.resolver = resolver; - this.bindManager = bindManager; - this.scan = new Scan(); - this.targetColumns = targetColumns; - this.parallelIteratorFactory = parallelIteratorFactory; - this.sequenceManager = sequenceManager; - this.projectTuples = projectTuples; - this.noChildParentJoinOptimization = select.getHint().hasHint(Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION) || select.getHint().hasHint(Hint.USE_PERSISTENT_CACHE); - this.usePersistentCache = select.getHint().hasHint(Hint.USE_PERSISTENT_CACHE); - ConnectionQueryServices services = statement.getConnection().getQueryServices(); - this.costBased = services.getProps().getBoolean(QueryServices.COST_BASED_OPTIMIZER_ENABLED, QueryServicesOptions.DEFAULT_COST_BASED_OPTIMIZER_ENABLED); - scan.setLoadColumnFamiliesOnDemand(true); - if (select.getHint().hasHint(Hint.NO_CACHE)) { - scan.setCacheBlocks(false); - } - - scan.setCaching(statement.getFetchSize()); - this.originalScan = ScanUtil.newScan(scan); - this.optimizeSubquery = optimizeSubquery; - this.dataPlans = dataPlans == null ? Collections.emptyMap() : dataPlans; - this.parentContext = parentContext; - } - - public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List targetColumns, ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager) throws SQLException { - this(statement, select, resolver, targetColumns, parallelIteratorFactory, sequenceManager, true, false, null); - } - - /** - * Builds an executable query plan from a parsed SQL statement - * @return executable query plan - * @throws SQLException if mismatched types are found, bind value do not match binds, - * or invalid function arguments are encountered. - * @throws SQLFeatureNotSupportedException if an unsupported construct is encountered - * @throws TableNotFoundException if table name not found in schema - * @throws ColumnNotFoundException if column name could not be resolved - * @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables - */ - public QueryPlan compile() throws SQLException{ - verifySCN(); - QueryPlan plan; - if (select.isUnion()) { - plan = compileUnionAll(select); - } else { - plan = compileSelect(select); - } - return plan; + private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); + private final PhoenixStatement statement; + private final Scan scan; + private final Scan originalScan; + private final ColumnResolver resolver; + private final BindManager bindManager; + private final SelectStatement select; + private final List targetColumns; + private final ParallelIteratorFactory parallelIteratorFactory; + private final SequenceManager sequenceManager; + private final boolean projectTuples; + private final boolean noChildParentJoinOptimization; + private final boolean usePersistentCache; + private final boolean optimizeSubquery; + private final Map dataPlans; + private final boolean costBased; + private final StatementContext parentContext; + + public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, + boolean projectTuples, boolean optimizeSubquery, Map dataPlans) + throws SQLException { + this(statement, select, resolver, Collections. emptyList(), null, + new SequenceManager(statement), projectTuples, optimizeSubquery, dataPlans); + } + + public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, + BindManager bindManager, boolean projectTuples, boolean optimizeSubquery, + Map dataPlans) throws SQLException { + this(statement, select, resolver, bindManager, Collections. emptyList(), null, + new SequenceManager(statement), projectTuples, optimizeSubquery, dataPlans, null); + } + + public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, + List targetColumns, ParallelIteratorFactory parallelIteratorFactory, + SequenceManager sequenceManager, boolean projectTuples, boolean optimizeSubquery, + Map dataPlans) throws SQLException { + this(statement, select, resolver, new BindManager(statement.getParameters()), targetColumns, + parallelIteratorFactory, sequenceManager, projectTuples, optimizeSubquery, dataPlans, null); + } + + public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, + BindManager bindManager, List targetColumns, + ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, + boolean projectTuples, boolean optimizeSubquery, Map dataPlans) + throws SQLException { + this(statement, select, resolver, bindManager, targetColumns, parallelIteratorFactory, + sequenceManager, projectTuples, optimizeSubquery, dataPlans, null); + } + + public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, + List targetColumns, ParallelIteratorFactory parallelIteratorFactory, + SequenceManager sequenceManager, boolean projectTuples, boolean optimizeSubquery, + Map dataPlans, StatementContext parentContext) throws SQLException { + this(statement, select, resolver, new BindManager(statement.getParameters()), targetColumns, + parallelIteratorFactory, sequenceManager, projectTuples, optimizeSubquery, dataPlans, + parentContext); + } + + public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, + BindManager bindManager, List targetColumns, + ParallelIteratorFactory parallelIteratorFactory, SequenceManager sequenceManager, + boolean projectTuples, boolean optimizeSubquery, Map dataPlans, + StatementContext parentContext) throws SQLException { + this.statement = statement; + this.select = select; + this.resolver = resolver; + this.bindManager = bindManager; + this.scan = new Scan(); + this.targetColumns = targetColumns; + this.parallelIteratorFactory = parallelIteratorFactory; + this.sequenceManager = sequenceManager; + this.projectTuples = projectTuples; + this.noChildParentJoinOptimization = + select.getHint().hasHint(Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION) + || select.getHint().hasHint(Hint.USE_PERSISTENT_CACHE); + this.usePersistentCache = select.getHint().hasHint(Hint.USE_PERSISTENT_CACHE); + ConnectionQueryServices services = statement.getConnection().getQueryServices(); + this.costBased = services.getProps().getBoolean(QueryServices.COST_BASED_OPTIMIZER_ENABLED, + QueryServicesOptions.DEFAULT_COST_BASED_OPTIMIZER_ENABLED); + scan.setLoadColumnFamiliesOnDemand(true); + if (select.getHint().hasHint(Hint.NO_CACHE)) { + scan.setCacheBlocks(false); } - private void verifySCN() throws SQLException { - PhoenixConnection conn = statement.getConnection(); - if (conn.isRunningUpgrade()) { - // PHOENIX-6179 : if upgrade is going on, we don't need to - // perform MaxLookBackAge check - return; - } - Long scn = conn.getSCN(); - if (scn == null) { - return; - } - List involvedTables = resolver.getTables(); - Long maxLookBackAgeInMillis = null; - for(TableRef tableRef: involvedTables) { - PTable table = tableRef.getTable(); - if (maxLookBackAgeInMillis == null) { - maxLookBackAgeInMillis = table.getMaxLookbackAge(); - } - else if (table.getMaxLookbackAge() != null) { - maxLookBackAgeInMillis = Long.min(maxLookBackAgeInMillis, table.getMaxLookbackAge()); - } - } - Configuration conf = conn.getQueryServices().getConfiguration(); - maxLookBackAgeInMillis = MetaDataUtil.getMaxLookbackAge(conf, maxLookBackAgeInMillis); - long now = EnvironmentEdgeManager.currentTimeMillis(); - if (maxLookBackAgeInMillis > 0 && now - maxLookBackAgeInMillis > scn){ - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_QUERY_TABLE_WITH_SCN_OLDER_THAN_MAX_LOOKBACK_AGE) - .build().buildException(); - } + scan.setCaching(statement.getFetchSize()); + this.originalScan = ScanUtil.newScan(scan); + this.optimizeSubquery = optimizeSubquery; + this.dataPlans = dataPlans == null ? Collections. emptyMap() : dataPlans; + this.parentContext = parentContext; + } + + public QueryCompiler(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, + List targetColumns, ParallelIteratorFactory parallelIteratorFactory, + SequenceManager sequenceManager) throws SQLException { + this(statement, select, resolver, targetColumns, parallelIteratorFactory, sequenceManager, true, + false, null); + } + + /** + * Builds an executable query plan from a parsed SQL statement + * @return executable query plan + * @throws SQLException if mismatched types are found, bind value do not match + * binds, or invalid function arguments are encountered. + * @throws SQLFeatureNotSupportedException if an unsupported construct is encountered + * @throws TableNotFoundException if table name not found in schema + * @throws ColumnNotFoundException if column name could not be resolved + * @throws AmbiguousColumnException if an unaliased column name is ambiguous across + * multiple tables + */ + public QueryPlan compile() throws SQLException { + verifySCN(); + QueryPlan plan; + if (select.isUnion()) { + plan = compileUnionAll(select); + } else { + plan = compileSelect(select); } - - public QueryPlan compileUnionAll(SelectStatement select) throws SQLException { - List unionAllSelects = select.getSelects(); - List plans = new ArrayList(); - - for (int i=0; i < unionAllSelects.size(); i++ ) { - SelectStatement subSelect = unionAllSelects.get(i); - // Push down order-by and limit into sub-selects. - if (!select.getOrderBy().isEmpty() || select.getLimit() != null) { - if (select.getOffset() == null) { - subSelect = NODE_FACTORY.select(subSelect, select.getOrderBy(), select.getLimit(), null); - } else { - subSelect = NODE_FACTORY.select(subSelect, select.getOrderBy(), null, null); - } - } - QueryPlan subPlan = compileSubquery(subSelect, true); - plans.add(subPlan); - } - - TableRef tableRef = UnionCompiler.contructSchemaTable(statement, plans, - select.hasWildcard() ? null : select.getSelect()); - ColumnResolver resolver = FromCompiler.getResolver(tableRef); - StatementContext context = new StatementContext(statement, resolver, bindManager, scan, sequenceManager); - plans = UnionCompiler.convertToTupleProjectionPlan(plans, tableRef, context); - QueryPlan plan = compileSingleFlatQuery( - context, - select, - false, - false, - null, - false, - true); - plan = new UnionPlan(context, select, tableRef, plan.getProjector(), plan.getLimit(), - plan.getOffset(), plan.getOrderBy(), GroupBy.EMPTY_GROUP_BY, plans, - context.getBindManager().getParameterMetaData()); - return plan; - } - - private QueryPlan getExistingDataPlanForCDC() { - if (dataPlans != null) { - for (QueryPlan plan : dataPlans.values()) { - if (plan.getTableRef().getTable().getType() == PTableType.CDC) { - return plan; - } - } - } - return null; + return plan; + } + + private void verifySCN() throws SQLException { + PhoenixConnection conn = statement.getConnection(); + if (conn.isRunningUpgrade()) { + // PHOENIX-6179 : if upgrade is going on, we don't need to + // perform MaxLookBackAge check + return; } - - public QueryPlan compileSelect(SelectStatement select) throws SQLException{ - StatementContext context = createStatementContext(); - if (parentContext != null) { - parentContext.addSubStatementContext(context); - } - QueryPlan dataPlanForCDC = getExistingDataPlanForCDC(); - if (dataPlanForCDC != null) { - TableRef cdcTableRef = dataPlanForCDC.getTableRef(); - PTable cdcTable = cdcTableRef.getTable(); - NamedTableNode cdcDataTableName = NODE_FACTORY.namedTable(null, - NODE_FACTORY.table(cdcTable.getSchemaName().getString(), - cdcTable.getParentTableName().getString()), - select.getTableSamplingRate()); - ColumnResolver dataTableResolver = FromCompiler.getResolver(cdcDataTableName, - statement.getConnection()); - TableRef cdcDataTableRef = dataTableResolver.getTables().get(0); - Set cdcIncludeScopes = - cdcTable.getCDCIncludeScopes(); - String cdcHint = select.getHint().getHint(Hint.CDC_INCLUDE); - if (cdcHint != null && cdcHint.startsWith(HintNode.PREFIX)) { - cdcIncludeScopes = CDCUtil.makeChangeScopeEnumsFromString(cdcHint.substring(1, - cdcHint.length() - 1)); - } - context.setCDCDataTableRef(cdcDataTableRef); - context.setCDCTableRef(cdcTableRef); - context.setCDCIncludeScopes(cdcIncludeScopes); - } - if (select.isJoin()) { - JoinTable joinTable = JoinCompiler.compile(statement, select, context.getResolver()); - return compileJoinQuery(context, joinTable, false, false, null); + Long scn = conn.getSCN(); + if (scn == null) { + return; + } + List involvedTables = resolver.getTables(); + Long maxLookBackAgeInMillis = null; + for (TableRef tableRef : involvedTables) { + PTable table = tableRef.getTable(); + if (maxLookBackAgeInMillis == null) { + maxLookBackAgeInMillis = table.getMaxLookbackAge(); + } else if (table.getMaxLookbackAge() != null) { + maxLookBackAgeInMillis = Long.min(maxLookBackAgeInMillis, table.getMaxLookbackAge()); + } + } + Configuration conf = conn.getQueryServices().getConfiguration(); + maxLookBackAgeInMillis = MetaDataUtil.getMaxLookbackAge(conf, maxLookBackAgeInMillis); + long now = EnvironmentEdgeManager.currentTimeMillis(); + if (maxLookBackAgeInMillis > 0 && now - maxLookBackAgeInMillis > scn) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_QUERY_TABLE_WITH_SCN_OLDER_THAN_MAX_LOOKBACK_AGE).build() + .buildException(); + } + } + + public QueryPlan compileUnionAll(SelectStatement select) throws SQLException { + List unionAllSelects = select.getSelects(); + List plans = new ArrayList(); + + for (int i = 0; i < unionAllSelects.size(); i++) { + SelectStatement subSelect = unionAllSelects.get(i); + // Push down order-by and limit into sub-selects. + if (!select.getOrderBy().isEmpty() || select.getLimit() != null) { + if (select.getOffset() == null) { + subSelect = NODE_FACTORY.select(subSelect, select.getOrderBy(), select.getLimit(), null); } else { - return compileSingleQuery(context, select, false, true); + subSelect = NODE_FACTORY.select(subSelect, select.getOrderBy(), null, null); } + } + QueryPlan subPlan = compileSubquery(subSelect, true); + plans.add(subPlan); } - private StatementContext createStatementContext() { - return new StatementContext(statement, resolver, bindManager, scan, sequenceManager); - } - - /** - * Call compileJoinQuery() for join queries recursively down to the leaf JoinTable nodes. - * If it is a leaf node, call compileSingleFlatQuery() or compileSubquery(), otherwise: - * 1) If option COST_BASED_OPTIMIZER_ENABLED is on and stats are available, return the - * join plan with the best cost. Note that the "best" plan is only locally optimal, - * and might or might not be globally optimal. - * 2) Otherwise, return the join plan compiled with the default strategy. - * @see JoinCompiler.JoinTable#getApplicableJoinStrategies() - */ - protected QueryPlan compileJoinQuery(StatementContext context, JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List orderBy) throws SQLException { - if (joinTable.getJoinSpecs().isEmpty()) { - Table table = joinTable.getLeftTable(); - SelectStatement subquery = table.getAsSubquery(orderBy); - if (!table.isSubselect()) { - context.setCurrentTable(table.getTableRef()); - PTable projectedTable = table.createProjectedTable(!projectPKColumns, context); - TupleProjector projector = new TupleProjector(projectedTable); - boolean wildcardIncludesDynamicCols = context.getConnection().getQueryServices() - .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, - DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); - TupleProjector.serializeProjectorIntoScan(context.getScan(), projector, - wildcardIncludesDynamicCols); - context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes())); - table.projectColumns(context.getScan()); - return compileSingleFlatQuery( - context, - subquery, - asSubquery, - !asSubquery, - null, - true, - false); - } - QueryPlan plan = compileSubquery(subquery, false); - PTable projectedTable = table.createProjectedTable(plan.getProjector()); - context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), subquery.getUdfParseNodes())); - return new TupleProjectionPlan( - plan, - new TupleProjector(plan.getProjector()), - context, - null); - } - - List strategies = joinTable.getApplicableJoinStrategies(); - assert strategies.size() > 0; - if (!costBased || strategies.size() == 1) { - return compileJoinQuery( - strategies.get(0), context, joinTable, asSubquery, projectPKColumns, orderBy); - } + TableRef tableRef = UnionCompiler.contructSchemaTable(statement, plans, + select.hasWildcard() ? null : select.getSelect()); + ColumnResolver resolver = FromCompiler.getResolver(tableRef); + StatementContext context = + new StatementContext(statement, resolver, bindManager, scan, sequenceManager); + plans = UnionCompiler.convertToTupleProjectionPlan(plans, tableRef, context); + QueryPlan plan = compileSingleFlatQuery(context, select, false, false, null, false, true); + plan = new UnionPlan(context, select, tableRef, plan.getProjector(), plan.getLimit(), + plan.getOffset(), plan.getOrderBy(), GroupBy.EMPTY_GROUP_BY, plans, + context.getBindManager().getParameterMetaData()); + return plan; + } + + private QueryPlan getExistingDataPlanForCDC() { + if (dataPlans != null) { + for (QueryPlan plan : dataPlans.values()) { + if (plan.getTableRef().getTable().getType() == PTableType.CDC) { + return plan; + } + } + } + return null; + } - QueryPlan bestPlan = null; - Cost bestCost = null; - for (JoinCompiler.Strategy strategy : strategies) { - StatementContext newContext = new StatementContext( - context.getStatement(), context.getResolver(), context.getBindManager(), new Scan(), context.getSequenceManager()); - QueryPlan plan = compileJoinQuery( - strategy, newContext, joinTable, asSubquery, projectPKColumns, orderBy); - Cost cost = plan.getCost(); - if (bestPlan == null || cost.compareTo(bestCost) < 0) { - bestPlan = plan; - bestCost = cost; - } - } - context.setResolver(bestPlan.getContext().getResolver()); - context.setCurrentTable(bestPlan.getContext().getCurrentTable()); - return bestPlan; - } - - protected QueryPlan compileJoinQuery(JoinCompiler.Strategy strategy, StatementContext context, JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List orderBy) throws SQLException { - byte[] emptyByteArray = new byte[0]; - List joinSpecs = joinTable.getJoinSpecs(); - boolean wildcardIncludesDynamicCols = context.getConnection().getQueryServices() - .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, - DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); - switch (strategy) { - case HASH_BUILD_RIGHT: { - boolean[] starJoinVector = joinTable.getStarJoinVector(); - Table table = joinTable.getLeftTable(); - PTable initialProjectedTable; - TableRef tableRef; - SelectStatement query; - TupleProjector tupleProjector; - if (!table.isSubselect()) { - context.setCurrentTable(table.getTableRef()); - initialProjectedTable = table.createProjectedTable(!projectPKColumns, context); - tableRef = table.getTableRef(); - table.projectColumns(context.getScan()); - query = joinTable.getAsSingleSubquery(table.getAsSubquery(orderBy), asSubquery); - tupleProjector = new TupleProjector(initialProjectedTable); - } else { - SelectStatement subquery = table.getAsSubquery(orderBy); - QueryPlan plan = compileSubquery(subquery, false); - initialProjectedTable = table.createProjectedTable(plan.getProjector()); - tableRef = plan.getTableRef(); - context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap()); - query = joinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery); - tupleProjector = new TupleProjector(plan.getProjector()); - } - context.setCurrentTable(tableRef); - PTable projectedTable = initialProjectedTable; - int count = joinSpecs.size(); - ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count]; - List[] joinExpressions = new List[count]; - JoinType[] joinTypes = new JoinType[count]; - PTable[] tables = new PTable[count]; - int[] fieldPositions = new int[count]; - StatementContext[] subContexts = new StatementContext[count]; - QueryPlan[] subPlans = new QueryPlan[count]; - HashSubPlan[] hashPlans = new HashSubPlan[count]; - fieldPositions[0] = projectedTable.getColumns().size() - projectedTable.getPKColumns().size(); - for (int i = 0; i < count; i++) { - JoinSpec joinSpec = joinSpecs.get(i); - Scan subScan = ScanUtil.newScan(originalScan); - subContexts[i] = new StatementContext(statement, context.getResolver(), context.getBindManager(), subScan, new SequenceManager(statement)); - subPlans[i] = compileJoinQuery( - subContexts[i], - joinSpec.getRhsJoinTable(), - true, - true, - null); - boolean hasPostReference = joinSpec.getRhsJoinTable().hasPostReference(); - if (hasPostReference) { - tables[i] = subContexts[i].getResolver().getTables().get(0).getTable(); - projectedTable = JoinCompiler.joinProjectedTables(projectedTable, tables[i], joinSpec.getType()); - } else { - tables[i] = null; - } - } - for (int i = 0; i < count; i++) { - JoinSpec joinSpec = joinSpecs.get(i); - context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), query.getUdfParseNodes())); - joinIds[i] = new ImmutableBytesPtr(emptyByteArray); // place-holder - Pair, List> joinConditions = joinSpec.compileJoinConditions(context, subContexts[i], strategy); - joinExpressions[i] = joinConditions.getFirst(); - List hashExpressions = joinConditions.getSecond(); - Pair keyRangeExpressions = new Pair(null, null); - boolean optimized = getKeyExpressionCombinations( - keyRangeExpressions, - context, - joinTable.getOriginalJoinSelectStatement(), - tableRef, - joinSpec.getType(), - joinExpressions[i], - hashExpressions); - Expression keyRangeLhsExpression = keyRangeExpressions.getFirst(); - Expression keyRangeRhsExpression = keyRangeExpressions.getSecond(); - joinTypes[i] = joinSpec.getType(); - if (i < count - 1) { - fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null ? 0 : (tables[i].getColumns().size() - tables[i].getPKColumns().size())); - } - hashPlans[i] = new HashSubPlan(i, subPlans[i], optimized ? null : hashExpressions, joinSpec.isSingleValueOnly(), usePersistentCache, keyRangeLhsExpression, keyRangeRhsExpression); - } - TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector, - wildcardIncludesDynamicCols); - QueryPlan plan = compileSingleFlatQuery( - context, - query, - asSubquery, - !asSubquery && joinTable.isAllLeftJoin(), - null, true, false); - Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context); - Integer limit = null; - Integer offset = null; - if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) { - limit = plan.getLimit(); - offset = plan.getOffset(); - } - HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, joinExpressions, joinTypes, - starJoinVector, tables, fieldPositions, postJoinFilterExpression, QueryUtil.getOffsetLimit(limit, offset)); - return HashJoinPlan.create(joinTable.getOriginalJoinSelectStatement(), plan, joinInfo, hashPlans); - } - case HASH_BUILD_LEFT: { - JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1); - JoinType type = lastJoinSpec.getType(); - JoinTable rhsJoinTable = lastJoinSpec.getRhsJoinTable(); - Table rhsTable = rhsJoinTable.getLeftTable(); - JoinTable lhsJoin = joinTable.createSubJoinTable(statement.getConnection()); - Scan subScan = ScanUtil.newScan(originalScan); - StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), context.getBindManager(), subScan, new SequenceManager(statement)); - QueryPlan lhsPlan = compileJoinQuery(lhsCtx, lhsJoin, true, true, null); - PTable rhsProjTable; - TableRef rhsTableRef; - SelectStatement rhs; - TupleProjector tupleProjector; - if (!rhsTable.isSubselect()) { - context.setCurrentTable(rhsTable.getTableRef()); - rhsProjTable = rhsTable.createProjectedTable(!projectPKColumns, context); - rhsTableRef = rhsTable.getTableRef(); - rhsTable.projectColumns(context.getScan()); - rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(orderBy), asSubquery); - tupleProjector = new TupleProjector(rhsProjTable); - } else { - SelectStatement subquery = rhsTable.getAsSubquery(orderBy); - QueryPlan plan = compileSubquery(subquery, false); - rhsProjTable = rhsTable.createProjectedTable(plan.getProjector()); - rhsTableRef = plan.getTableRef(); - context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap()); - rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery); - tupleProjector = new TupleProjector(plan.getProjector()); - } - context.setCurrentTable(rhsTableRef); - context.setResolver(FromCompiler.getResolverForProjectedTable(rhsProjTable, context.getConnection(), rhs.getUdfParseNodes())); - ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[]{new ImmutableBytesPtr(emptyByteArray)}; - Pair, List> joinConditions = lastJoinSpec.compileJoinConditions(lhsCtx, context, strategy); - List joinExpressions = joinConditions.getSecond(); - List hashExpressions = joinConditions.getFirst(); - boolean needsMerge = lhsJoin.hasPostReference(); - PTable lhsTable = needsMerge ? lhsCtx.getResolver().getTables().get(0).getTable() : null; - int fieldPosition = needsMerge ? rhsProjTable.getColumns().size() - rhsProjTable.getPKColumns().size() : 0; - PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(rhsProjTable, lhsTable, type == JoinType.Right ? JoinType.Left : type) : rhsProjTable; - TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector, - wildcardIncludesDynamicCols); - context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), rhs.getUdfParseNodes())); - QueryPlan rhsPlan = compileSingleFlatQuery( - context, - rhs, - asSubquery, - !asSubquery && type == JoinType.Right, - null, - true, - false); - Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context); - Integer limit = null; - Integer offset = null; - if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) { - limit = rhsPlan.getLimit(); - offset = rhsPlan.getOffset(); - } - HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, new List[]{joinExpressions}, - new JoinType[]{type == JoinType.Right ? JoinType.Left : type}, new boolean[]{true}, - new PTable[]{lhsTable}, new int[]{fieldPosition}, postJoinFilterExpression, QueryUtil.getOffsetLimit(limit, offset)); - boolean usePersistentCache = joinTable.getOriginalJoinSelectStatement().getHint().hasHint(Hint.USE_PERSISTENT_CACHE); - Pair keyRangeExpressions = new Pair(null, null); - getKeyExpressionCombinations( - keyRangeExpressions, - context, - joinTable.getOriginalJoinSelectStatement(), - rhsTableRef, - type, - joinExpressions, - hashExpressions); - return HashJoinPlan.create( - joinTable.getOriginalJoinSelectStatement(), - rhsPlan, - joinInfo, - new HashSubPlan[]{ - new HashSubPlan( - 0, - lhsPlan, - hashExpressions, - false, - usePersistentCache, - keyRangeExpressions.getFirst(), - keyRangeExpressions.getSecond())}); - } - case SORT_MERGE: { - JoinTable lhsJoin = joinTable.createSubJoinTable(statement.getConnection()); - JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1); - JoinType type = lastJoinSpec.getType(); - JoinTable rhsJoin = lastJoinSpec.getRhsJoinTable(); - if (type == JoinType.Right) { - JoinTable temp = lhsJoin; - lhsJoin = rhsJoin; - rhsJoin = temp; - } - - List joinConditionNodes = lastJoinSpec.getOnConditions(); - List lhsOrderBy = Lists.newArrayListWithExpectedSize(joinConditionNodes.size()); - List rhsOrderBy = Lists.newArrayListWithExpectedSize(joinConditionNodes.size()); - for (EqualParseNode condition : joinConditionNodes) { - lhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getRHS() : condition.getLHS(), false, true)); - rhsOrderBy.add(NODE_FACTORY.orderBy(type == JoinType.Right ? condition.getLHS() : condition.getRHS(), false, true)); - } - - Scan lhsScan = ScanUtil.newScan(originalScan); - StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), context.getBindManager(), lhsScan, new SequenceManager(statement)); - boolean preserveRowkey = !projectPKColumns && type != JoinType.Full; - QueryPlan lhsPlan = compileJoinQuery(lhsCtx, lhsJoin, true, !preserveRowkey, lhsOrderBy); - PTable lhsProjTable = lhsCtx.getResolver().getTables().get(0).getTable(); - - Scan rhsScan = ScanUtil.newScan(originalScan); - StatementContext rhsCtx = new StatementContext(statement, context.getResolver(), context.getBindManager(), rhsScan, new SequenceManager(statement)); - QueryPlan rhsPlan = compileJoinQuery(rhsCtx, rhsJoin, true, true, rhsOrderBy); - PTable rhsProjTable = rhsCtx.getResolver().getTables().get(0).getTable(); - - Pair, List> joinConditions = lastJoinSpec.compileJoinConditions(type == JoinType.Right ? rhsCtx : lhsCtx, type == JoinType.Right ? lhsCtx : rhsCtx, strategy); - List lhsKeyExpressions = type == JoinType.Right ? joinConditions.getSecond() : joinConditions.getFirst(); - List rhsKeyExpressions = type == JoinType.Right ? joinConditions.getFirst() : joinConditions.getSecond(); - - boolean needsMerge = rhsJoin.hasPostReference(); - int fieldPosition = needsMerge ? lhsProjTable.getColumns().size() - lhsProjTable.getPKColumns().size() : 0; - PTable projectedTable = needsMerge ? JoinCompiler.joinProjectedTables(lhsProjTable, rhsProjTable, type == JoinType.Right ? JoinType.Left : type) : lhsProjTable; - - ColumnResolver resolver = FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), joinTable.getOriginalJoinSelectStatement().getUdfParseNodes()); - TableRef tableRef = resolver.getTables().get(0); - StatementContext subCtx = new StatementContext(statement, resolver, context.getBindManager(), ScanUtil.newScan(originalScan), new SequenceManager(statement)); - subCtx.setCurrentTable(tableRef); - QueryPlan innerPlan = new SortMergeJoinPlan( - subCtx, - joinTable.getOriginalJoinSelectStatement(), - tableRef, - type == JoinType.Right ? JoinType.Left : type, - lhsPlan, - rhsPlan, - new Pair,List>(lhsKeyExpressions, rhsKeyExpressions), - rhsKeyExpressions, - projectedTable, - lhsProjTable, - needsMerge ? rhsProjTable : null, - fieldPosition, - lastJoinSpec.isSingleValueOnly(), - new Pair,List>(lhsOrderBy, rhsOrderBy)); - context.setCurrentTable(tableRef); - context.setResolver(resolver); - TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), tableRef.getTable().getTableName().getString())); - ParseNode where = joinTable.getPostFiltersCombined(); - SelectStatement select = asSubquery ? - NODE_FACTORY.select( - from, - joinTable.getOriginalJoinSelectStatement().getHint(), - false, - Collections.emptyList(), - where, - null, - null, - orderBy, - null, - null, - 0, - false, - joinTable.getOriginalJoinSelectStatement().hasSequence(), - Collections.emptyList(), - joinTable.getOriginalJoinSelectStatement().getUdfParseNodes()) : - NODE_FACTORY.select( - joinTable.getOriginalJoinSelectStatement(), - from, - where); - - return compileSingleFlatQuery( - context, - select, - asSubquery, - false, - innerPlan, - true, - false); - } - default: - throw new IllegalArgumentException("Invalid join strategy '" + strategy + "'"); - } + public QueryPlan compileSelect(SelectStatement select) throws SQLException { + StatementContext context = createStatementContext(); + if (parentContext != null) { + parentContext.addSubStatementContext(context); + } + QueryPlan dataPlanForCDC = getExistingDataPlanForCDC(); + if (dataPlanForCDC != null) { + TableRef cdcTableRef = dataPlanForCDC.getTableRef(); + PTable cdcTable = cdcTableRef.getTable(); + NamedTableNode cdcDataTableName = + NODE_FACTORY.namedTable(null, NODE_FACTORY.table(cdcTable.getSchemaName().getString(), + cdcTable.getParentTableName().getString()), select.getTableSamplingRate()); + ColumnResolver dataTableResolver = + FromCompiler.getResolver(cdcDataTableName, statement.getConnection()); + TableRef cdcDataTableRef = dataTableResolver.getTables().get(0); + Set cdcIncludeScopes = cdcTable.getCDCIncludeScopes(); + String cdcHint = select.getHint().getHint(Hint.CDC_INCLUDE); + if (cdcHint != null && cdcHint.startsWith(HintNode.PREFIX)) { + cdcIncludeScopes = + CDCUtil.makeChangeScopeEnumsFromString(cdcHint.substring(1, cdcHint.length() - 1)); + } + context.setCDCDataTableRef(cdcDataTableRef); + context.setCDCTableRef(cdcTableRef); + context.setCDCIncludeScopes(cdcIncludeScopes); + } + if (select.isJoin()) { + JoinTable joinTable = JoinCompiler.compile(statement, select, context.getResolver()); + return compileJoinQuery(context, joinTable, false, false, null); + } else { + return compileSingleQuery(context, select, false, true); + } + } + + private StatementContext createStatementContext() { + return new StatementContext(statement, resolver, bindManager, scan, sequenceManager); + } + + /** + * Call compileJoinQuery() for join queries recursively down to the leaf JoinTable nodes. If it is + * a leaf node, call compileSingleFlatQuery() or compileSubquery(), otherwise: 1) If option + * COST_BASED_OPTIMIZER_ENABLED is on and stats are available, return the join plan with the best + * cost. Note that the "best" plan is only locally optimal, and might or might not be globally + * optimal. 2) Otherwise, return the join plan compiled with the default strategy. + * @see JoinCompiler.JoinTable#getApplicableJoinStrategies() + */ + protected QueryPlan compileJoinQuery(StatementContext context, JoinTable joinTable, + boolean asSubquery, boolean projectPKColumns, List orderBy) throws SQLException { + if (joinTable.getJoinSpecs().isEmpty()) { + Table table = joinTable.getLeftTable(); + SelectStatement subquery = table.getAsSubquery(orderBy); + if (!table.isSubselect()) { + context.setCurrentTable(table.getTableRef()); + PTable projectedTable = table.createProjectedTable(!projectPKColumns, context); + TupleProjector projector = new TupleProjector(projectedTable); + boolean wildcardIncludesDynamicCols = + context.getConnection().getQueryServices().getConfiguration().getBoolean( + WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); + TupleProjector.serializeProjectorIntoScan(context.getScan(), projector, + wildcardIncludesDynamicCols); + context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, + context.getConnection(), subquery.getUdfParseNodes())); + table.projectColumns(context.getScan()); + return compileSingleFlatQuery(context, subquery, asSubquery, !asSubquery, null, true, + false); + } + QueryPlan plan = compileSubquery(subquery, false); + PTable projectedTable = table.createProjectedTable(plan.getProjector()); + context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, + context.getConnection(), subquery.getUdfParseNodes())); + return new TupleProjectionPlan(plan, new TupleProjector(plan.getProjector()), context, null); } - private boolean getKeyExpressionCombinations(Pair combination, StatementContext context, SelectStatement select, TableRef table, JoinType type, final List joinExpressions, final List hashExpressions) throws SQLException { - if ((type != JoinType.Inner && type != JoinType.Semi) || this.noChildParentJoinOptimization) - return false; - - Scan scanCopy = ScanUtil.newScan(context.getScan()); - StatementContext contextCopy = new StatementContext(statement, context.getResolver(), context.getBindManager(), scanCopy, new SequenceManager(statement)); - contextCopy.setCurrentTable(table); - List lhsCombination = Lists. newArrayList(); - boolean complete = WhereOptimizer.getKeyExpressionCombination(lhsCombination, contextCopy, select, joinExpressions); - if (lhsCombination.isEmpty()) - return false; - - List rhsCombination = Lists.newArrayListWithExpectedSize(lhsCombination.size()); - for (int i = 0; i < lhsCombination.size(); i++) { - Expression lhs = lhsCombination.get(i); - for (int j = 0; j < joinExpressions.size(); j++) { - if (lhs == joinExpressions.get(j)) { - rhsCombination.add(hashExpressions.get(j)); - break; - } - } - } + List strategies = joinTable.getApplicableJoinStrategies(); + assert strategies.size() > 0; + if (!costBased || strategies.size() == 1) { + return compileJoinQuery(strategies.get(0), context, joinTable, asSubquery, projectPKColumns, + orderBy); + } - if (lhsCombination.size() == 1) { - combination.setFirst(lhsCombination.get(0)); - combination.setSecond(rhsCombination.get(0)); + QueryPlan bestPlan = null; + Cost bestCost = null; + for (JoinCompiler.Strategy strategy : strategies) { + StatementContext newContext = new StatementContext(context.getStatement(), + context.getResolver(), context.getBindManager(), new Scan(), context.getSequenceManager()); + QueryPlan plan = + compileJoinQuery(strategy, newContext, joinTable, asSubquery, projectPKColumns, orderBy); + Cost cost = plan.getCost(); + if (bestPlan == null || cost.compareTo(bestCost) < 0) { + bestPlan = plan; + bestCost = cost; + } + } + context.setResolver(bestPlan.getContext().getResolver()); + context.setCurrentTable(bestPlan.getContext().getCurrentTable()); + return bestPlan; + } + + protected QueryPlan compileJoinQuery(JoinCompiler.Strategy strategy, StatementContext context, + JoinTable joinTable, boolean asSubquery, boolean projectPKColumns, List orderBy) + throws SQLException { + byte[] emptyByteArray = new byte[0]; + List joinSpecs = joinTable.getJoinSpecs(); + boolean wildcardIncludesDynamicCols = + context.getConnection().getQueryServices().getConfiguration() + .getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); + switch (strategy) { + case HASH_BUILD_RIGHT: { + boolean[] starJoinVector = joinTable.getStarJoinVector(); + Table table = joinTable.getLeftTable(); + PTable initialProjectedTable; + TableRef tableRef; + SelectStatement query; + TupleProjector tupleProjector; + if (!table.isSubselect()) { + context.setCurrentTable(table.getTableRef()); + initialProjectedTable = table.createProjectedTable(!projectPKColumns, context); + tableRef = table.getTableRef(); + table.projectColumns(context.getScan()); + query = joinTable.getAsSingleSubquery(table.getAsSubquery(orderBy), asSubquery); + tupleProjector = new TupleProjector(initialProjectedTable); } else { - combination.setFirst(new RowValueConstructorExpression(lhsCombination, false)); - combination.setSecond(new RowValueConstructorExpression(rhsCombination, false)); + SelectStatement subquery = table.getAsSubquery(orderBy); + QueryPlan plan = compileSubquery(subquery, false); + initialProjectedTable = table.createProjectedTable(plan.getProjector()); + tableRef = plan.getTableRef(); + context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap()); + query = joinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery); + tupleProjector = new TupleProjector(plan.getProjector()); } + context.setCurrentTable(tableRef); + PTable projectedTable = initialProjectedTable; + int count = joinSpecs.size(); + ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count]; + List[] joinExpressions = new List[count]; + JoinType[] joinTypes = new JoinType[count]; + PTable[] tables = new PTable[count]; + int[] fieldPositions = new int[count]; + StatementContext[] subContexts = new StatementContext[count]; + QueryPlan[] subPlans = new QueryPlan[count]; + HashSubPlan[] hashPlans = new HashSubPlan[count]; + fieldPositions[0] = + projectedTable.getColumns().size() - projectedTable.getPKColumns().size(); + for (int i = 0; i < count; i++) { + JoinSpec joinSpec = joinSpecs.get(i); + Scan subScan = ScanUtil.newScan(originalScan); + subContexts[i] = new StatementContext(statement, context.getResolver(), + context.getBindManager(), subScan, new SequenceManager(statement)); + subPlans[i] = + compileJoinQuery(subContexts[i], joinSpec.getRhsJoinTable(), true, true, null); + boolean hasPostReference = joinSpec.getRhsJoinTable().hasPostReference(); + if (hasPostReference) { + tables[i] = subContexts[i].getResolver().getTables().get(0).getTable(); + projectedTable = + JoinCompiler.joinProjectedTables(projectedTable, tables[i], joinSpec.getType()); + } else { + tables[i] = null; + } + } + for (int i = 0; i < count; i++) { + JoinSpec joinSpec = joinSpecs.get(i); + context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, + context.getConnection(), query.getUdfParseNodes())); + joinIds[i] = new ImmutableBytesPtr(emptyByteArray); // place-holder + Pair, List> joinConditions = + joinSpec.compileJoinConditions(context, subContexts[i], strategy); + joinExpressions[i] = joinConditions.getFirst(); + List hashExpressions = joinConditions.getSecond(); + Pair keyRangeExpressions = + new Pair(null, null); + boolean optimized = getKeyExpressionCombinations(keyRangeExpressions, context, + joinTable.getOriginalJoinSelectStatement(), tableRef, joinSpec.getType(), + joinExpressions[i], hashExpressions); + Expression keyRangeLhsExpression = keyRangeExpressions.getFirst(); + Expression keyRangeRhsExpression = keyRangeExpressions.getSecond(); + joinTypes[i] = joinSpec.getType(); + if (i < count - 1) { + fieldPositions[i + 1] = fieldPositions[i] + (tables[i] == null + ? 0 + : (tables[i].getColumns().size() - tables[i].getPKColumns().size())); + } + hashPlans[i] = new HashSubPlan(i, subPlans[i], optimized ? null : hashExpressions, + joinSpec.isSingleValueOnly(), usePersistentCache, keyRangeLhsExpression, + keyRangeRhsExpression); + } + TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector, + wildcardIncludesDynamicCols); + QueryPlan plan = compileSingleFlatQuery(context, query, asSubquery, + !asSubquery && joinTable.isAllLeftJoin(), null, true, false); + Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context); + Integer limit = null; + Integer offset = null; + if (!query.isAggregate() && !query.isDistinct() && query.getOrderBy().isEmpty()) { + limit = plan.getLimit(); + offset = plan.getOffset(); + } + HashJoinInfo joinInfo = new HashJoinInfo(projectedTable, joinIds, joinExpressions, + joinTypes, starJoinVector, tables, fieldPositions, postJoinFilterExpression, + QueryUtil.getOffsetLimit(limit, offset)); + return HashJoinPlan.create(joinTable.getOriginalJoinSelectStatement(), plan, joinInfo, + hashPlans); + } + case HASH_BUILD_LEFT: { + JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1); + JoinType type = lastJoinSpec.getType(); + JoinTable rhsJoinTable = lastJoinSpec.getRhsJoinTable(); + Table rhsTable = rhsJoinTable.getLeftTable(); + JoinTable lhsJoin = joinTable.createSubJoinTable(statement.getConnection()); + Scan subScan = ScanUtil.newScan(originalScan); + StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), + context.getBindManager(), subScan, new SequenceManager(statement)); + QueryPlan lhsPlan = compileJoinQuery(lhsCtx, lhsJoin, true, true, null); + PTable rhsProjTable; + TableRef rhsTableRef; + SelectStatement rhs; + TupleProjector tupleProjector; + if (!rhsTable.isSubselect()) { + context.setCurrentTable(rhsTable.getTableRef()); + rhsProjTable = rhsTable.createProjectedTable(!projectPKColumns, context); + rhsTableRef = rhsTable.getTableRef(); + rhsTable.projectColumns(context.getScan()); + rhs = rhsJoinTable.getAsSingleSubquery(rhsTable.getAsSubquery(orderBy), asSubquery); + tupleProjector = new TupleProjector(rhsProjTable); + } else { + SelectStatement subquery = rhsTable.getAsSubquery(orderBy); + QueryPlan plan = compileSubquery(subquery, false); + rhsProjTable = rhsTable.createProjectedTable(plan.getProjector()); + rhsTableRef = plan.getTableRef(); + context.getScan().setFamilyMap(plan.getContext().getScan().getFamilyMap()); + rhs = rhsJoinTable.getAsSingleSubquery((SelectStatement) plan.getStatement(), asSubquery); + tupleProjector = new TupleProjector(plan.getProjector()); + } + context.setCurrentTable(rhsTableRef); + context.setResolver(FromCompiler.getResolverForProjectedTable(rhsProjTable, + context.getConnection(), rhs.getUdfParseNodes())); + ImmutableBytesPtr[] joinIds = + new ImmutableBytesPtr[] { new ImmutableBytesPtr(emptyByteArray) }; + Pair, List> joinConditions = + lastJoinSpec.compileJoinConditions(lhsCtx, context, strategy); + List joinExpressions = joinConditions.getSecond(); + List hashExpressions = joinConditions.getFirst(); + boolean needsMerge = lhsJoin.hasPostReference(); + PTable lhsTable = needsMerge ? lhsCtx.getResolver().getTables().get(0).getTable() : null; + int fieldPosition = + needsMerge ? rhsProjTable.getColumns().size() - rhsProjTable.getPKColumns().size() : 0; + PTable projectedTable = needsMerge + ? JoinCompiler.joinProjectedTables(rhsProjTable, lhsTable, + type == JoinType.Right ? JoinType.Left : type) + : rhsProjTable; + TupleProjector.serializeProjectorIntoScan(context.getScan(), tupleProjector, + wildcardIncludesDynamicCols); + context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, + context.getConnection(), rhs.getUdfParseNodes())); + QueryPlan rhsPlan = compileSingleFlatQuery(context, rhs, asSubquery, + !asSubquery && type == JoinType.Right, null, true, false); + Expression postJoinFilterExpression = joinTable.compilePostFilterExpression(context); + Integer limit = null; + Integer offset = null; + if (!rhs.isAggregate() && !rhs.isDistinct() && rhs.getOrderBy().isEmpty()) { + limit = rhsPlan.getLimit(); + offset = rhsPlan.getOffset(); + } + HashJoinInfo joinInfo = + new HashJoinInfo(projectedTable, joinIds, new List[] { joinExpressions }, + new JoinType[] { type == JoinType.Right ? JoinType.Left : type }, + new boolean[] { true }, new PTable[] { lhsTable }, new int[] { fieldPosition }, + postJoinFilterExpression, QueryUtil.getOffsetLimit(limit, offset)); + boolean usePersistentCache = + joinTable.getOriginalJoinSelectStatement().getHint().hasHint(Hint.USE_PERSISTENT_CACHE); + Pair keyRangeExpressions = + new Pair(null, null); + getKeyExpressionCombinations(keyRangeExpressions, context, + joinTable.getOriginalJoinSelectStatement(), rhsTableRef, type, joinExpressions, + hashExpressions); + return HashJoinPlan.create(joinTable.getOriginalJoinSelectStatement(), rhsPlan, joinInfo, + new HashSubPlan[] { new HashSubPlan(0, lhsPlan, hashExpressions, false, + usePersistentCache, keyRangeExpressions.getFirst(), keyRangeExpressions.getSecond()) }); + } + case SORT_MERGE: { + JoinTable lhsJoin = joinTable.createSubJoinTable(statement.getConnection()); + JoinSpec lastJoinSpec = joinSpecs.get(joinSpecs.size() - 1); + JoinType type = lastJoinSpec.getType(); + JoinTable rhsJoin = lastJoinSpec.getRhsJoinTable(); + if (type == JoinType.Right) { + JoinTable temp = lhsJoin; + lhsJoin = rhsJoin; + rhsJoin = temp; + } + + List joinConditionNodes = lastJoinSpec.getOnConditions(); + List lhsOrderBy = + Lists. newArrayListWithExpectedSize(joinConditionNodes.size()); + List rhsOrderBy = + Lists. newArrayListWithExpectedSize(joinConditionNodes.size()); + for (EqualParseNode condition : joinConditionNodes) { + lhsOrderBy.add(NODE_FACTORY.orderBy( + type == JoinType.Right ? condition.getRHS() : condition.getLHS(), false, true)); + rhsOrderBy.add(NODE_FACTORY.orderBy( + type == JoinType.Right ? condition.getLHS() : condition.getRHS(), false, true)); + } + + Scan lhsScan = ScanUtil.newScan(originalScan); + StatementContext lhsCtx = new StatementContext(statement, context.getResolver(), + context.getBindManager(), lhsScan, new SequenceManager(statement)); + boolean preserveRowkey = !projectPKColumns && type != JoinType.Full; + QueryPlan lhsPlan = compileJoinQuery(lhsCtx, lhsJoin, true, !preserveRowkey, lhsOrderBy); + PTable lhsProjTable = lhsCtx.getResolver().getTables().get(0).getTable(); + + Scan rhsScan = ScanUtil.newScan(originalScan); + StatementContext rhsCtx = new StatementContext(statement, context.getResolver(), + context.getBindManager(), rhsScan, new SequenceManager(statement)); + QueryPlan rhsPlan = compileJoinQuery(rhsCtx, rhsJoin, true, true, rhsOrderBy); + PTable rhsProjTable = rhsCtx.getResolver().getTables().get(0).getTable(); + + Pair, List> joinConditions = + lastJoinSpec.compileJoinConditions(type == JoinType.Right ? rhsCtx : lhsCtx, + type == JoinType.Right ? lhsCtx : rhsCtx, strategy); + List lhsKeyExpressions = + type == JoinType.Right ? joinConditions.getSecond() : joinConditions.getFirst(); + List rhsKeyExpressions = + type == JoinType.Right ? joinConditions.getFirst() : joinConditions.getSecond(); + + boolean needsMerge = rhsJoin.hasPostReference(); + int fieldPosition = + needsMerge ? lhsProjTable.getColumns().size() - lhsProjTable.getPKColumns().size() : 0; + PTable projectedTable = needsMerge + ? JoinCompiler.joinProjectedTables(lhsProjTable, rhsProjTable, + type == JoinType.Right ? JoinType.Left : type) + : lhsProjTable; + + ColumnResolver resolver = FromCompiler.getResolverForProjectedTable(projectedTable, + context.getConnection(), joinTable.getOriginalJoinSelectStatement().getUdfParseNodes()); + TableRef tableRef = resolver.getTables().get(0); + StatementContext subCtx = new StatementContext(statement, resolver, + context.getBindManager(), ScanUtil.newScan(originalScan), new SequenceManager(statement)); + subCtx.setCurrentTable(tableRef); + QueryPlan innerPlan = + new SortMergeJoinPlan(subCtx, joinTable.getOriginalJoinSelectStatement(), tableRef, + type == JoinType.Right ? JoinType.Left : type, lhsPlan, rhsPlan, + new Pair, List>(lhsKeyExpressions, rhsKeyExpressions), + rhsKeyExpressions, projectedTable, lhsProjTable, needsMerge ? rhsProjTable : null, + fieldPosition, lastJoinSpec.isSingleValueOnly(), + new Pair, List>(lhsOrderBy, rhsOrderBy)); + context.setCurrentTable(tableRef); + context.setResolver(resolver); + TableNode from = NODE_FACTORY.namedTable(tableRef.getTableAlias(), + NODE_FACTORY.table(tableRef.getTable().getSchemaName().getString(), + tableRef.getTable().getTableName().getString())); + ParseNode where = joinTable.getPostFiltersCombined(); + SelectStatement select = asSubquery + ? NODE_FACTORY.select(from, joinTable.getOriginalJoinSelectStatement().getHint(), false, + Collections. emptyList(), where, null, null, orderBy, null, null, 0, false, + joinTable.getOriginalJoinSelectStatement().hasSequence(), + Collections. emptyList(), + joinTable.getOriginalJoinSelectStatement().getUdfParseNodes()) + : NODE_FACTORY.select(joinTable.getOriginalJoinSelectStatement(), from, where); + + return compileSingleFlatQuery(context, select, asSubquery, false, innerPlan, true, false); + } + default: + throw new IllegalArgumentException("Invalid join strategy '" + strategy + "'"); + } + } + + private boolean getKeyExpressionCombinations(Pair combination, + StatementContext context, SelectStatement select, TableRef table, JoinType type, + final List joinExpressions, final List hashExpressions) + throws SQLException { + if ((type != JoinType.Inner && type != JoinType.Semi) || this.noChildParentJoinOptimization) + return false; + + Scan scanCopy = ScanUtil.newScan(context.getScan()); + StatementContext contextCopy = new StatementContext(statement, context.getResolver(), + context.getBindManager(), scanCopy, new SequenceManager(statement)); + contextCopy.setCurrentTable(table); + List lhsCombination = Lists. newArrayList(); + boolean complete = WhereOptimizer.getKeyExpressionCombination(lhsCombination, contextCopy, + select, joinExpressions); + if (lhsCombination.isEmpty()) return false; + + List rhsCombination = Lists.newArrayListWithExpectedSize(lhsCombination.size()); + for (int i = 0; i < lhsCombination.size(); i++) { + Expression lhs = lhsCombination.get(i); + for (int j = 0; j < joinExpressions.size(); j++) { + if (lhs == joinExpressions.get(j)) { + rhsCombination.add(hashExpressions.get(j)); + break; + } + } + } - return type == JoinType.Semi && complete; - } - - protected QueryPlan compileSubquery(SelectStatement subquery, boolean pushDownMaxRows) throws SQLException { - return compileSubquery(subquery, pushDownMaxRows, null); - } - - protected QueryPlan compileSubquery( - SelectStatement subquerySelectStatement, - boolean pushDownMaxRows, StatementContext parentContext) throws SQLException { - PhoenixConnection phoenixConnection = this.statement.getConnection(); - RewriteResult rewriteResult = - ParseNodeUtil.rewrite(subquerySelectStatement, phoenixConnection); - int maxRows = this.statement.getMaxRows(); - this.statement.setMaxRows(pushDownMaxRows ? maxRows : 0); // overwrite maxRows to avoid its impact on inner queries. - QueryPlan queryPlan = new QueryCompiler( - this.statement, - rewriteResult.getRewrittenSelectStatement(), - rewriteResult.getColumnResolver(), - bindManager, - false, - optimizeSubquery, - null).compile(); - if (optimizeSubquery) { - queryPlan = statement.getConnection().getQueryServices().getOptimizer().optimize( - statement, - queryPlan); - } - if (parentContext != null) { - parentContext.addSubStatementContext(queryPlan.getContext()); - } - this.statement.setMaxRows(maxRows); // restore maxRows. - return queryPlan; + if (lhsCombination.size() == 1) { + combination.setFirst(lhsCombination.get(0)); + combination.setSecond(rhsCombination.get(0)); + } else { + combination.setFirst(new RowValueConstructorExpression(lhsCombination, false)); + combination.setSecond(new RowValueConstructorExpression(rhsCombination, false)); } - protected QueryPlan compileSingleQuery(StatementContext context, SelectStatement select, boolean asSubquery, boolean allowPageFilter) throws SQLException{ - SelectStatement innerSelect = select.getInnerSelectStatement(); - if (innerSelect == null) { - return compileSingleFlatQuery(context, select, asSubquery, allowPageFilter, null, false, false); - } + return type == JoinType.Semi && complete; + } + + protected QueryPlan compileSubquery(SelectStatement subquery, boolean pushDownMaxRows) + throws SQLException { + return compileSubquery(subquery, pushDownMaxRows, null); + } + + protected QueryPlan compileSubquery(SelectStatement subquerySelectStatement, + boolean pushDownMaxRows, StatementContext parentContext) throws SQLException { + PhoenixConnection phoenixConnection = this.statement.getConnection(); + RewriteResult rewriteResult = ParseNodeUtil.rewrite(subquerySelectStatement, phoenixConnection); + int maxRows = this.statement.getMaxRows(); + this.statement.setMaxRows(pushDownMaxRows ? maxRows : 0); // overwrite maxRows to avoid its + // impact on inner queries. + QueryPlan queryPlan = + new QueryCompiler(this.statement, rewriteResult.getRewrittenSelectStatement(), + rewriteResult.getColumnResolver(), bindManager, false, optimizeSubquery, null).compile(); + if (optimizeSubquery) { + queryPlan = + statement.getConnection().getQueryServices().getOptimizer().optimize(statement, queryPlan); + } + if (parentContext != null) { + parentContext.addSubStatementContext(queryPlan.getContext()); + } + this.statement.setMaxRows(maxRows); // restore maxRows. + return queryPlan; + } + + protected QueryPlan compileSingleQuery(StatementContext context, SelectStatement select, + boolean asSubquery, boolean allowPageFilter) throws SQLException { + SelectStatement innerSelect = select.getInnerSelectStatement(); + if (innerSelect == null) { + return compileSingleFlatQuery(context, select, asSubquery, allowPageFilter, null, false, + false); + } - if((innerSelect.getOffset() != null && (!innerSelect.getOffset().isIntegerOffset()) || - select.getOffset() != null && !select.getOffset().isIntegerOffset())) { - throw new SQLException("RVC Offset not allowed with subqueries."); - } + if ( + (innerSelect.getOffset() != null && (!innerSelect.getOffset().isIntegerOffset()) + || select.getOffset() != null && !select.getOffset().isIntegerOffset()) + ) { + throw new SQLException("RVC Offset not allowed with subqueries."); + } - QueryPlan innerPlan = compileSubquery(innerSelect, false, context); - if (innerPlan instanceof UnionPlan) { - UnionCompiler.optimizeUnionOrderByIfPossible( - (UnionPlan) innerPlan, - select, - this::createStatementContext); - } - RowProjector innerQueryPlanRowProjector = innerPlan.getProjector(); - TupleProjector tupleProjector = new TupleProjector(innerQueryPlanRowProjector); + QueryPlan innerPlan = compileSubquery(innerSelect, false, context); + if (innerPlan instanceof UnionPlan) { + UnionCompiler.optimizeUnionOrderByIfPossible((UnionPlan) innerPlan, select, + this::createStatementContext); + } + RowProjector innerQueryPlanRowProjector = innerPlan.getProjector(); + TupleProjector tupleProjector = new TupleProjector(innerQueryPlanRowProjector); + + // Replace the original resolver and table with those having compiled type info. + TableRef tableRef = context.getResolver().getTables().get(0); + ColumnResolver resolver = FromCompiler.getResolverForCompiledDerivedTable( + statement.getConnection(), tableRef, innerQueryPlanRowProjector); + context.setResolver(resolver); + tableRef = resolver.getTables().get(0); + context.setCurrentTable(tableRef); + innerPlan = new TupleProjectionPlan(innerPlan, tupleProjector, context, null); + + return compileSingleFlatQuery(context, select, asSubquery, allowPageFilter, innerPlan, false, + false); + } + + protected QueryPlan compileSingleFlatQuery(StatementContext context, SelectStatement select, + boolean asSubquery, boolean allowPageFilter, QueryPlan innerPlan, boolean inJoin, + boolean inUnion) throws SQLException { + boolean isApplicable = true; + PTable projectedTable = null; + if (this.projectTuples) { + projectedTable = TupleProjectionCompiler.createProjectedTable(select, context); + if (projectedTable != null) { + context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, + context.getConnection(), select.getUdfParseNodes())); + } + } - // Replace the original resolver and table with those having compiled type info. - TableRef tableRef = context.getResolver().getTables().get(0); - ColumnResolver resolver = FromCompiler.getResolverForCompiledDerivedTable(statement.getConnection(), tableRef, innerQueryPlanRowProjector); - context.setResolver(resolver); - tableRef = resolver.getTables().get(0); - context.setCurrentTable(tableRef); - innerPlan = new TupleProjectionPlan(innerPlan, tupleProjector, context, null); - - return compileSingleFlatQuery(context, select, asSubquery, allowPageFilter, innerPlan, false, false); - } - - protected QueryPlan compileSingleFlatQuery( - StatementContext context, - SelectStatement select, - boolean asSubquery, - boolean allowPageFilter, - QueryPlan innerPlan, - boolean inJoin, - boolean inUnion) throws SQLException { - boolean isApplicable = true; - PTable projectedTable = null; - if (this.projectTuples) { - projectedTable = TupleProjectionCompiler.createProjectedTable(select, context); - if (projectedTable != null) { - context.setResolver(FromCompiler.getResolverForProjectedTable(projectedTable, context.getConnection(), select.getUdfParseNodes())); - } - } - - ColumnResolver resolver = context.getResolver(); - TableRef tableRef = context.getCurrentTable(); - PTable table = tableRef.getTable(); - - if (table.getType() == PTableType.CDC) { - List selectNodes = select.getSelect(); - // For CDC queries, if a single wildcard projection is used, automatically insert - // PHOENIX_ROW_TIMESTAMP() as a project at the beginning. - ParseNode selectNode = selectNodes.size() == 1 ? selectNodes.get(0).getNode() : null; - if (selectNode instanceof TerminalParseNode - && ((TerminalParseNode) selectNode).isWildcardNode()) { - List tmpSelectNodes = Lists.newArrayListWithExpectedSize( - selectNodes.size() + 1); - tmpSelectNodes.add(NODE_FACTORY.aliasedNode(null, - NODE_FACTORY.function(PhoenixRowTimestampFunction.NAME, - Collections.emptyList()))); - tmpSelectNodes.add(NODE_FACTORY.aliasedNode(null, - ((TerminalParseNode) selectNode).getRewritten())); - selectNodes = tmpSelectNodes; - } - List orderByNodes = select.getOrderBy(); - // For CDC queries, if no ORDER BY is specified, add default ordering. - if (orderByNodes.size() == 0) { - orderByNodes = Lists.newArrayListWithExpectedSize(1); - orderByNodes.add(NODE_FACTORY.orderBy( - NODE_FACTORY.function(PhoenixRowTimestampFunction.NAME, - Collections.emptyList()), - false, SortOrder.getDefault() == SortOrder.ASC)); - } - select = NODE_FACTORY.select(select.getFrom(), - select.getHint(), select.isDistinct(), selectNodes, select.getWhere(), - select.getGroupBy(), select.getHaving(), orderByNodes, select.getLimit(), - select.getOffset(), select.getBindCount(), select.isAggregate(), - select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); - } + ColumnResolver resolver = context.getResolver(); + TableRef tableRef = context.getCurrentTable(); + PTable table = tableRef.getTable(); + + if (table.getType() == PTableType.CDC) { + List selectNodes = select.getSelect(); + // For CDC queries, if a single wildcard projection is used, automatically insert + // PHOENIX_ROW_TIMESTAMP() as a project at the beginning. + ParseNode selectNode = selectNodes.size() == 1 ? selectNodes.get(0).getNode() : null; + if ( + selectNode instanceof TerminalParseNode && ((TerminalParseNode) selectNode).isWildcardNode() + ) { + List tmpSelectNodes = + Lists.newArrayListWithExpectedSize(selectNodes.size() + 1); + tmpSelectNodes.add(NODE_FACTORY.aliasedNode(null, + NODE_FACTORY.function(PhoenixRowTimestampFunction.NAME, Collections.emptyList()))); + tmpSelectNodes + .add(NODE_FACTORY.aliasedNode(null, ((TerminalParseNode) selectNode).getRewritten())); + selectNodes = tmpSelectNodes; + } + List orderByNodes = select.getOrderBy(); + // For CDC queries, if no ORDER BY is specified, add default ordering. + if (orderByNodes.size() == 0) { + orderByNodes = Lists.newArrayListWithExpectedSize(1); + orderByNodes.add(NODE_FACTORY.orderBy( + NODE_FACTORY.function(PhoenixRowTimestampFunction.NAME, Collections.emptyList()), false, + SortOrder.getDefault() == SortOrder.ASC)); + } + select = NODE_FACTORY.select(select.getFrom(), select.getHint(), select.isDistinct(), + selectNodes, select.getWhere(), select.getGroupBy(), select.getHaving(), orderByNodes, + select.getLimit(), select.getOffset(), select.getBindCount(), select.isAggregate(), + select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); + } - ParseNode viewWhere = null; - if (table.getViewStatement() != null) { - viewWhere = new SQLParser(table.getViewStatement()).parseQuery().getWhere(); - } - Integer limit = LimitCompiler.compile(context, select); + ParseNode viewWhere = null; + if (table.getViewStatement() != null) { + viewWhere = new SQLParser(table.getViewStatement()).parseQuery().getWhere(); + } + Integer limit = LimitCompiler.compile(context, select); + + CompiledOffset compiledOffset = null; + Integer offset = null; + try { + compiledOffset = OffsetCompiler.getOffsetCompiler().compile(context, select, inJoin, inUnion); + offset = compiledOffset.getIntegerOffset().orNull(); + } catch (RowValueConstructorOffsetNotCoercibleException e) { + // This current plan is not executable + compiledOffset = new CompiledOffset(Optional. absent(), Optional. absent()); + isApplicable = false; + } - CompiledOffset compiledOffset = null; - Integer offset = null; - try { - compiledOffset = OffsetCompiler.getOffsetCompiler().compile(context, select, inJoin, inUnion); - offset = compiledOffset.getIntegerOffset().orNull(); - } catch(RowValueConstructorOffsetNotCoercibleException e){ - //This current plan is not executable - compiledOffset = new CompiledOffset(Optional.absent(),Optional.absent()); - isApplicable = false; + GroupBy groupBy = GroupByCompiler.compile(context, select); + // Optimize the HAVING clause by finding any group by expressions that can be moved + // to the WHERE clause + select = HavingCompiler.rewrite(context, select, groupBy); + Expression having = HavingCompiler.compile(context, select, groupBy); + // Don't pass groupBy when building where clause expression, because we do not want to wrap + // these + // expressions as group by key expressions since they're pre, not post filtered. + if (innerPlan == null && !tableRef.equals(resolver.getTables().get(0))) { + context.setResolver( + FromCompiler.getResolver(context.getConnection(), tableRef, select.getUdfParseNodes())); + } + Set subqueries = Sets. newHashSet(); + Expression where = + WhereCompiler.compile(context, select, viewWhere, subqueries, compiledOffset.getByteOffset()); + // Recompile GROUP BY now that we've figured out our ScanRanges so we know + // definitively whether or not we'll traverse in row key order. + groupBy = groupBy.compile(context, innerPlan, where); + context.setResolver(resolver); // recover resolver + boolean wildcardIncludesDynamicCols = + context.getConnection().getQueryServices().getConfiguration() + .getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); + RowProjector projector = ProjectionCompiler.compile(context, select, groupBy, + asSubquery ? Collections.emptyList() : targetColumns, where, wildcardIncludesDynamicCols); + OrderBy orderBy = OrderByCompiler.compile(context, select, groupBy, limit, compiledOffset, + projector, innerPlan, where); + context.getAggregationManager().compile(context, groupBy); + // Final step is to build the query plan + if (!asSubquery) { + int maxRows = statement.getMaxRows(); + if (maxRows > 0) { + if (limit != null) { + limit = Math.min(limit, maxRows); + } else { + limit = maxRows; } + } + } - GroupBy groupBy = GroupByCompiler.compile(context, select); - // Optimize the HAVING clause by finding any group by expressions that can be moved - // to the WHERE clause - select = HavingCompiler.rewrite(context, select, groupBy); - Expression having = HavingCompiler.compile(context, select, groupBy); - // Don't pass groupBy when building where clause expression, because we do not want to wrap these - // expressions as group by key expressions since they're pre, not post filtered. - if (innerPlan == null && !tableRef.equals(resolver.getTables().get(0))) { - context.setResolver(FromCompiler.getResolver(context.getConnection(), tableRef, select.getUdfParseNodes())); - } - Set subqueries = Sets. newHashSet(); - Expression where = WhereCompiler.compile(context, select, viewWhere, subqueries, compiledOffset.getByteOffset()); - // Recompile GROUP BY now that we've figured out our ScanRanges so we know - // definitively whether or not we'll traverse in row key order. - groupBy = groupBy.compile(context, innerPlan, where); - context.setResolver(resolver); // recover resolver - boolean wildcardIncludesDynamicCols = context.getConnection().getQueryServices() - .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, - DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); - RowProjector projector = ProjectionCompiler.compile(context, select, groupBy, - asSubquery ? Collections.emptyList() : targetColumns, where, - wildcardIncludesDynamicCols); - OrderBy orderBy = OrderByCompiler.compile( - context, - select, - groupBy, - limit, - compiledOffset, - projector, - innerPlan, - where); - context.getAggregationManager().compile(context, groupBy); - // Final step is to build the query plan - if (!asSubquery) { - int maxRows = statement.getMaxRows(); - if (maxRows > 0) { - if (limit != null) { - limit = Math.min(limit, maxRows); - } else { - limit = maxRows; - } - } - } + if (projectedTable != null) { + TupleProjector.serializeProjectorIntoScan(context.getScan(), + new TupleProjector(projectedTable), + wildcardIncludesDynamicCols && projector.projectDynColsInWildcardQueries()); + } - if (projectedTable != null) { - TupleProjector.serializeProjectorIntoScan(context.getScan(), - new TupleProjector(projectedTable), wildcardIncludesDynamicCols && - projector.projectDynColsInWildcardQueries()); - } - - QueryPlan plan = innerPlan; - QueryPlan dataPlan = dataPlans.get(tableRef); - if (plan == null) { - ParallelIteratorFactory parallelIteratorFactory = asSubquery ? null : this.parallelIteratorFactory; - plan = select.getFrom() == null - ? new LiteralResultIterationPlan(context, select, tableRef, projector, limit, offset, orderBy, - parallelIteratorFactory) - : (select.isAggregate() || select.isDistinct() - ? new AggregatePlan(context, select, tableRef, projector, limit, offset, orderBy, - parallelIteratorFactory, groupBy, having, dataPlan) - : new ScanPlan(context, select, tableRef, projector, limit, offset, orderBy, - parallelIteratorFactory, allowPageFilter, dataPlan, compiledOffset.getByteOffset())); - } - SelectStatement planSelect = asSubquery ? select : this.select; - if (!subqueries.isEmpty()) { - int count = subqueries.size(); - WhereClauseSubPlan[] subPlans = new WhereClauseSubPlan[count]; - int i = 0; - for (SubqueryParseNode subqueryNode : subqueries) { - SelectStatement stmt = subqueryNode.getSelectNode(); - subPlans[i++] = new WhereClauseSubPlan(compileSubquery(stmt, false), stmt, subqueryNode.expectSingleRow()); - } - plan = HashJoinPlan.create(planSelect, plan, null, subPlans); - } + QueryPlan plan = innerPlan; + QueryPlan dataPlan = dataPlans.get(tableRef); + if (plan == null) { + ParallelIteratorFactory parallelIteratorFactory = + asSubquery ? null : this.parallelIteratorFactory; + plan = select.getFrom() == null + ? new LiteralResultIterationPlan(context, select, tableRef, projector, limit, offset, + orderBy, parallelIteratorFactory) + : (select.isAggregate() || select.isDistinct() + ? new AggregatePlan(context, select, tableRef, projector, limit, offset, orderBy, + parallelIteratorFactory, groupBy, having, dataPlan) + : new ScanPlan(context, select, tableRef, projector, limit, offset, orderBy, + parallelIteratorFactory, allowPageFilter, dataPlan, compiledOffset.getByteOffset())); + } + SelectStatement planSelect = asSubquery ? select : this.select; + if (!subqueries.isEmpty()) { + int count = subqueries.size(); + WhereClauseSubPlan[] subPlans = new WhereClauseSubPlan[count]; + int i = 0; + for (SubqueryParseNode subqueryNode : subqueries) { + SelectStatement stmt = subqueryNode.getSelectNode(); + subPlans[i++] = new WhereClauseSubPlan(compileSubquery(stmt, false), stmt, + subqueryNode.expectSingleRow()); + } + plan = HashJoinPlan.create(planSelect, plan, null, subPlans); + } - if (innerPlan != null) { - if (LiteralExpression.isTrue(where)) { - where = null; // we do not pass "true" as filter - } - plan = select.isAggregate() || select.isDistinct() - ? new ClientAggregatePlan(context, planSelect, tableRef, projector, limit, offset, where, orderBy, - groupBy, having, plan) - : new ClientScanPlan(context, planSelect, tableRef, projector, limit, offset, where, orderBy, plan); + if (innerPlan != null) { + if (LiteralExpression.isTrue(where)) { + where = null; // we do not pass "true" as filter + } + plan = select.isAggregate() || select.isDistinct() + ? new ClientAggregatePlan(context, planSelect, tableRef, projector, limit, offset, where, + orderBy, groupBy, having, plan) + : new ClientScanPlan(context, planSelect, tableRef, projector, limit, offset, where, + orderBy, plan); - } + } - if(plan instanceof BaseQueryPlan){ - ((BaseQueryPlan) plan).setApplicable(isApplicable); - } - return plan; + if (plan instanceof BaseQueryPlan) { + ((BaseQueryPlan) plan).setApplicable(isApplicable); } + return plan; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryPlan.java index 4764f5979f6..2039ce7e9eb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/QueryPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,82 +34,74 @@ import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.TableRef; - - /** - * * Interface for an executable query plan - * - * * @since 0.1 */ public interface QueryPlan extends StatementPlan { - /** - * Get a result iterator to iterate over the results - * @return result iterator for iterating over the results - * @throws SQLException - */ - public ResultIterator iterator() throws SQLException; - - public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException; - - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException; - - public long getEstimatedSize(); - - public Cost getCost(); - - // TODO: change once joins are supported - TableRef getTableRef(); - /** - * Returns projector used to formulate resultSet row - */ - RowProjector getProjector(); - - Integer getLimit(); - - Integer getOffset(); - - /** - * Return the compiled Order By clause of {@link SelectStatement}. - */ - OrderBy getOrderBy(); - - GroupBy getGroupBy(); - - List getSplits(); - - List> getScans(); - - FilterableStatement getStatement(); - - public boolean isDegenerate(); - - public boolean isRowKeyOrdered(); - - boolean isApplicable(); - - /** - * - * @return whether underlying {@link ResultScanner} can be picked up in a round-robin - * fashion. Generally, selecting scanners in such a fashion is possible if rows don't - * have to be returned back in a certain order. - * @throws SQLException - */ - public boolean useRoundRobinIterator() throws SQLException; - - T accept(QueryPlanVisitor visitor); - - /** - *
-     * Get the actual OrderBys of this queryPlan, which may be different from {@link #getOrderBy()},
-     * because {@link #getOrderBy()} is only the compiled result of {@link SelectStatement}.
-     * The return type is List because we can get multiple OrderBys for the query result of {@link SortMergeJoinPlan},
-     * eg. for the sql:
-     * SELECT  * FROM T1 JOIN T2 ON T1.a = T2.a and T1.b = T2.b
-     * The result of the sort-merge-join is sorted on (T1.a, T1.b) and (T2.a, T2.b) at the same time.
-     * 
- * @return - */ - public List getOutputOrderBys() ; + /** + * Get a result iterator to iterate over the results + * @return result iterator for iterating over the results + */ + public ResultIterator iterator() throws SQLException; + + public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException; + + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException; + + public long getEstimatedSize(); + + public Cost getCost(); + + // TODO: change once joins are supported + TableRef getTableRef(); + + /** + * Returns projector used to formulate resultSet row + */ + RowProjector getProjector(); + + Integer getLimit(); + + Integer getOffset(); + + /** + * Return the compiled Order By clause of {@link SelectStatement}. + */ + OrderBy getOrderBy(); + + GroupBy getGroupBy(); + + List getSplits(); + + List> getScans(); + + FilterableStatement getStatement(); + + public boolean isDegenerate(); + + public boolean isRowKeyOrdered(); + + boolean isApplicable(); + + /** + * @return whether underlying {@link ResultScanner} can be picked up in a round-robin fashion. + * Generally, selecting scanners in such a fashion is possible if rows don't have to be + * returned back in a certain order. + */ + public boolean useRoundRobinIterator() throws SQLException; + + T accept(QueryPlanVisitor visitor); + + /** + *
+   * Get the actual OrderBys of this queryPlan, which may be different from {@link #getOrderBy()},
+   * because {@link #getOrderBy()} is only the compiled result of {@link SelectStatement}.
+   * The return type is List because we can get multiple OrderBys for the query result of {@link SortMergeJoinPlan},
+   * eg. for the sql:
+   * SELECT  * FROM T1 JOIN T2 ON T1.a = T2.a and T1.b = T2.b
+   * The result of the sort-merge-join is sorted on (T1.a, T1.b) and (T2.a, T2.b) at the same time.
+   * 
+ */ + public List getOutputOrderBys(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RVCOffsetCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RVCOffsetCompiler.java index 9eb0191a0f4..911fbaf698b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RVCOffsetCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RVCOffsetCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,11 @@ */ package org.apache.phoenix.compile; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Optional; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + import org.apache.commons.lang3.StringUtils; import org.apache.phoenix.expression.AndExpression; import org.apache.phoenix.expression.CoerceExpression; @@ -44,349 +46,354 @@ import org.apache.phoenix.schema.RowValueConstructorOffsetNotAllowedInQueryException; import org.apache.phoenix.schema.RowValueConstructorOffsetNotCoercibleException; import org.apache.phoenix.schema.TypeMismatchException; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Optional; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.ScanUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - public class RVCOffsetCompiler { - private final static Logger LOGGER = LoggerFactory.getLogger(RVCOffsetCompiler.class); + private final static Logger LOGGER = LoggerFactory.getLogger(RVCOffsetCompiler.class); - private final static RVCOffsetCompiler INSTANCE = new RVCOffsetCompiler(); + private final static RVCOffsetCompiler INSTANCE = new RVCOffsetCompiler(); - private RVCOffsetCompiler() { - } + private RVCOffsetCompiler() { + } - public static RVCOffsetCompiler getInstance() { - return INSTANCE; - } + public static RVCOffsetCompiler getInstance() { + return INSTANCE; + } - public CompiledOffset getRVCOffset(StatementContext context, FilterableStatement statement, - boolean inJoin, boolean inUnion, OffsetNode offsetNode) throws SQLException { - // We have a RVC offset. See PHOENIX-4845 + public CompiledOffset getRVCOffset(StatementContext context, FilterableStatement statement, + boolean inJoin, boolean inUnion, OffsetNode offsetNode) throws SQLException { + // We have a RVC offset. See PHOENIX-4845 - // This is a EqualParseNode with LHS and RHS RowValueConstructorParseNodes - // This is enforced as part of the grammar - EqualParseNode equalParseNode = (EqualParseNode) offsetNode.getOffsetParseNode(); + // This is a EqualParseNode with LHS and RHS RowValueConstructorParseNodes + // This is enforced as part of the grammar + EqualParseNode equalParseNode = (EqualParseNode) offsetNode.getOffsetParseNode(); - RowValueConstructorParseNode - rvcColumnsParseNode = - (RowValueConstructorParseNode) equalParseNode.getLHS(); - RowValueConstructorParseNode - rvcConstantParseNode = - (RowValueConstructorParseNode) equalParseNode.getRHS(); + RowValueConstructorParseNode rvcColumnsParseNode = + (RowValueConstructorParseNode) equalParseNode.getLHS(); + RowValueConstructorParseNode rvcConstantParseNode = + (RowValueConstructorParseNode) equalParseNode.getRHS(); - // disallow use with aggregations - if (statement.isAggregate()) { - throw new RowValueConstructorOffsetNotAllowedInQueryException("RVC Offset not allowed in Aggregates"); - } + // disallow use with aggregations + if (statement.isAggregate()) { + throw new RowValueConstructorOffsetNotAllowedInQueryException( + "RVC Offset not allowed in Aggregates"); + } - // Get the Select Type should not be join/union - // Note cannot use the SelectStatement as for Left/Right joins we won't get passed in the join context - if (inJoin || inUnion) { - throw new RowValueConstructorOffsetNotAllowedInQueryException("RVC Offset not allowed in Joins or Unions"); - } + // Get the Select Type should not be join/union + // Note cannot use the SelectStatement as for Left/Right joins we won't get passed in the join + // context + if (inJoin || inUnion) { + throw new RowValueConstructorOffsetNotAllowedInQueryException( + "RVC Offset not allowed in Joins or Unions"); + } - // Get the tables primary keys - if (context.getResolver().getTables().size() != 1) { - throw new RowValueConstructorOffsetNotAllowedInQueryException("RVC Offset not allowed with zero or multiple tables"); - } + // Get the tables primary keys + if (context.getResolver().getTables().size() != 1) { + throw new RowValueConstructorOffsetNotAllowedInQueryException( + "RVC Offset not allowed with zero or multiple tables"); + } - PTable pTable = context.getCurrentTable().getTable(); + PTable pTable = context.getCurrentTable().getTable(); - List columns = pTable.getPKColumns(); + List columns = pTable.getPKColumns(); - int numUserColumns = columns.size(); // columns specified by the user - int userColumnIndex = 0; // index into the ordered list, columns, of where user specified start + int numUserColumns = columns.size(); // columns specified by the user + int userColumnIndex = 0; // index into the ordered list, columns, of where user specified start - // if we are salted we need to take a subset of the pk - Integer buckets = pTable.getBucketNum(); - if (buckets != null && buckets > 0) { // We are salted - numUserColumns--; - userColumnIndex++; - } + // if we are salted we need to take a subset of the pk + Integer buckets = pTable.getBucketNum(); + if (buckets != null && buckets > 0) { // We are salted + numUserColumns--; + userColumnIndex++; + } - if (pTable.isMultiTenant() && context.getConnection().getTenantId() != null) { - // the tenantId is one of the pks and will be handled automatically - numUserColumns--; - userColumnIndex++; - } + if (pTable.isMultiTenant() && context.getConnection().getTenantId() != null) { + // the tenantId is one of the pks and will be handled automatically + numUserColumns--; + userColumnIndex++; + } - boolean isIndex = false; - if (PTableType.INDEX.equals(pTable.getType())) { - isIndex = true; - // If we are a view index we have to handle the idxId column - // Note that viewIndexId comes before tenantId (what about salt byte?) - if (pTable.getViewIndexId() != null) { - numUserColumns--; - userColumnIndex++; - } - } + boolean isIndex = false; + if (PTableType.INDEX.equals(pTable.getType())) { + isIndex = true; + // If we are a view index we have to handle the idxId column + // Note that viewIndexId comes before tenantId (what about salt byte?) + if (pTable.getViewIndexId() != null) { + numUserColumns--; + userColumnIndex++; + } + } - // Sanity check that they are providing all the user defined keys to this table - if (numUserColumns != rvcConstantParseNode.getChildren().size()) { - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset must exactly cover the tables PK."); - } + // Sanity check that they are providing all the user defined keys to this table + if (numUserColumns != rvcConstantParseNode.getChildren().size()) { + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset must exactly cover the tables PK."); + } - // Make sure the order is the same and all the user defined columns are mentioned in the column RVC - if (numUserColumns != rvcColumnsParseNode.getChildren().size()) { - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset must specify the tables PKs."); - } + // Make sure the order is the same and all the user defined columns are mentioned in the column + // RVC + if (numUserColumns != rvcColumnsParseNode.getChildren().size()) { + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset must specify the tables PKs."); + } - List - rvcColumnParseNodeList = buildListOfColumnParseNodes(rvcColumnsParseNode, isIndex); + List rvcColumnParseNodeList = + buildListOfColumnParseNodes(rvcColumnsParseNode, isIndex); - // Make sure we have all column parse nodes for the left hand - if (rvcColumnParseNodeList.size() != numUserColumns) { - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset must specify the tables PKs."); - } + // Make sure we have all column parse nodes for the left hand + if (rvcColumnParseNodeList.size() != numUserColumns) { + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset must specify the tables PKs."); + } - // We resolve the mini-where now so we can compare to tables pks PColumns and to produce a row offset - // Construct a mini where clause - ParseNode miniWhere = equalParseNode; - - Set originalHints = statement.getHint().getHints(); - WhereCompiler.WhereExpressionCompiler whereCompiler = new WhereCompiler.WhereExpressionCompiler(context); - - Expression whereExpression; - try { - whereExpression = miniWhere.accept(whereCompiler); - }catch(TypeMismatchException e) { - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset could not be coerced to the tables PKs. " + e.getMessage()); - } catch (Exception e) { - LOGGER.error("Unexpected error while compiling RVC Offset, got null expression.",e); - throw new RowValueConstructorOffsetInternalErrorException( - "RVC Offset unexpected failure."); - } + // We resolve the mini-where now so we can compare to tables pks PColumns and to produce a row + // offset + // Construct a mini where clause + ParseNode miniWhere = equalParseNode; + + Set originalHints = statement.getHint().getHints(); + WhereCompiler.WhereExpressionCompiler whereCompiler = + new WhereCompiler.WhereExpressionCompiler(context); + + Expression whereExpression; + try { + whereExpression = miniWhere.accept(whereCompiler); + } catch (TypeMismatchException e) { + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset could not be coerced to the tables PKs. " + e.getMessage()); + } catch (Exception e) { + LOGGER.error("Unexpected error while compiling RVC Offset, got null expression.", e); + throw new RowValueConstructorOffsetInternalErrorException("RVC Offset unexpected failure."); + } - if (whereExpression == null) { - LOGGER.error("Unexpected error while compiling RVC Offset, got null expression."); - throw new RowValueConstructorOffsetInternalErrorException( - "RVC Offset unexpected failure."); - } + if (whereExpression == null) { + LOGGER.error("Unexpected error while compiling RVC Offset, got null expression."); + throw new RowValueConstructorOffsetInternalErrorException("RVC Offset unexpected failure."); + } - Expression expression; - try { - expression = - WhereOptimizer - .pushKeyExpressionsToScan(context, originalHints, whereExpression, null, - Optional.absent()); - } catch (Exception e) { - LOGGER.error("Unexpected error while compiling RVC Offset, got null expression."); - throw new RowValueConstructorOffsetInternalErrorException( - "RVC Offset unexpected failure."); - } + Expression expression; + try { + expression = WhereOptimizer.pushKeyExpressionsToScan(context, originalHints, whereExpression, + null, Optional. absent()); + } catch (Exception e) { + LOGGER.error("Unexpected error while compiling RVC Offset, got null expression."); + throw new RowValueConstructorOffsetInternalErrorException("RVC Offset unexpected failure."); + } - //If the whereExpression is a single term comparison/isNull it will be entirely removed - if (expression == null && whereExpression instanceof AndExpression) { - LOGGER.error("Unexpected error while compiling RVC Offset, got null expression."); - throw new RowValueConstructorOffsetInternalErrorException( - "RVC Offset unexpected failure."); - } + // If the whereExpression is a single term comparison/isNull it will be entirely removed + if (expression == null && whereExpression instanceof AndExpression) { + LOGGER.error("Unexpected error while compiling RVC Offset, got null expression."); + throw new RowValueConstructorOffsetInternalErrorException("RVC Offset unexpected failure."); + } - // Now that columns etc have been resolved lets check to make sure they match the pk order - RowKeyColumnExpressionOutput rowKeyColumnExpressionOutput = - buildListOfRowKeyColumnExpressions(whereExpression, isIndex); + // Now that columns etc have been resolved lets check to make sure they match the pk order + RowKeyColumnExpressionOutput rowKeyColumnExpressionOutput = + buildListOfRowKeyColumnExpressions(whereExpression, isIndex); - List - rowKeyColumnExpressionList = rowKeyColumnExpressionOutput.getRowKeyColumnExpressions(); + List rowKeyColumnExpressionList = + rowKeyColumnExpressionOutput.getRowKeyColumnExpressions(); - if (rowKeyColumnExpressionList.size() != numUserColumns) { - LOGGER.warn("Unexpected error while compiling RVC Offset, expected " + numUserColumns - + " found " + rowKeyColumnExpressionList.size()); - throw new RowValueConstructorOffsetInternalErrorException( - "RVC Offset must specify the table's PKs."); - } + if (rowKeyColumnExpressionList.size() != numUserColumns) { + LOGGER.warn("Unexpected error while compiling RVC Offset, expected " + numUserColumns + + " found " + rowKeyColumnExpressionList.size()); + throw new RowValueConstructorOffsetInternalErrorException( + "RVC Offset must specify the table's PKs."); + } - for (int i = 0; i < numUserColumns; i++) { - PColumn column = columns.get(i + userColumnIndex); + for (int i = 0; i < numUserColumns; i++) { + PColumn column = columns.get(i + userColumnIndex); - ColumnParseNode columnParseNode = rvcColumnParseNodeList.get(i); + ColumnParseNode columnParseNode = rvcColumnParseNodeList.get(i); - String columnParseNodeString = columnParseNode.getFullName(); - if (isIndex) { - columnParseNodeString = IndexUtil.getDataColumnName(columnParseNodeString); - } + String columnParseNodeString = columnParseNode.getFullName(); + if (isIndex) { + columnParseNodeString = IndexUtil.getDataColumnName(columnParseNodeString); + } - RowKeyColumnExpression rowKeyColumnExpression = rowKeyColumnExpressionList.get(i); - String expressionName = rowKeyColumnExpression.getName(); + RowKeyColumnExpression rowKeyColumnExpression = rowKeyColumnExpressionList.get(i); + String expressionName = rowKeyColumnExpression.getName(); - // Not sure why it is getting quoted - expressionName = expressionName.replace("\"", ""); + // Not sure why it is getting quoted + expressionName = expressionName.replace("\"", ""); - if (isIndex) { - expressionName = IndexUtil.getDataColumnName(expressionName); - } + if (isIndex) { + expressionName = IndexUtil.getDataColumnName(expressionName); + } - if (!StringUtils.equals(expressionName, columnParseNodeString)) { - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset must specify the table's PKs."); - } + if (!StringUtils.equals(expressionName, columnParseNodeString)) { + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset must specify the table's PKs."); + } - String columnString = column.getName().getString(); - if (isIndex) { - columnString = IndexUtil.getDataColumnName(columnString); - } - if (!StringUtils.equals(expressionName, columnString)) { - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset must specify the table's PKs."); - } - } + String columnString = column.getName().getString(); + if (isIndex) { + columnString = IndexUtil.getDataColumnName(columnString); + } + if (!StringUtils.equals(expressionName, columnString)) { + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset must specify the table's PKs."); + } + } - byte[] key; - - // check to see if this was a single key expression - ScanRanges scanRanges = context.getScanRanges(); - - //We do not generate a point lookup today in phoenix if the rowkey has a trailing null, we generate a range scan. - if (!scanRanges.isPointLookup()) { - //Since we use a range scan to guarantee we get only the null value and the upper bound is unset this suffices - //sanity check - if (!rowKeyColumnExpressionOutput.isTrailingNull()) { - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset must be a point lookup."); - } - key = scanRanges.getScanRange().getUpperRange(); - } else { - RowKeySchema.RowKeySchemaBuilder builder = new RowKeySchema.RowKeySchemaBuilder(columns.size()); - - for (PColumn column : columns) { - builder.addField(column, column.isNullable(), column.getSortOrder()); - } - - RowKeySchema rowKeySchema = builder.build(); - - //we make a ScanRange with 1 keyslots that cover the entire PK to reuse the code - KeyRange pointKeyRange = scanRanges.getScanRange(); - KeyRange keyRange = KeyRange.getKeyRange(pointKeyRange.getLowerRange(), false, KeyRange.UNBOUND, true); - List myRangeList = Lists.newArrayList(keyRange); - List> slots = new ArrayList<>(); - slots.add(myRangeList); - int[] slotSpan = new int[1]; - - //subtract 1 see ScanUtil.SINGLE_COLUMN_SLOT_SPAN is 0 - slotSpan[0] = columns.size() - 1; - key = ScanUtil.getMinKey(rowKeySchema, slots, slotSpan); - } + byte[] key; + + // check to see if this was a single key expression + ScanRanges scanRanges = context.getScanRanges(); + + // We do not generate a point lookup today in phoenix if the rowkey has a trailing null, we + // generate a range scan. + if (!scanRanges.isPointLookup()) { + // Since we use a range scan to guarantee we get only the null value and the upper bound is + // unset this suffices + // sanity check + if (!rowKeyColumnExpressionOutput.isTrailingNull()) { + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset must be a point lookup."); + } + key = scanRanges.getScanRange().getUpperRange(); + } else { + RowKeySchema.RowKeySchemaBuilder builder = + new RowKeySchema.RowKeySchemaBuilder(columns.size()); + + for (PColumn column : columns) { + builder.addField(column, column.isNullable(), column.getSortOrder()); + } + + RowKeySchema rowKeySchema = builder.build(); + + // we make a ScanRange with 1 keyslots that cover the entire PK to reuse the code + KeyRange pointKeyRange = scanRanges.getScanRange(); + KeyRange keyRange = + KeyRange.getKeyRange(pointKeyRange.getLowerRange(), false, KeyRange.UNBOUND, true); + List myRangeList = Lists.newArrayList(keyRange); + List> slots = new ArrayList<>(); + slots.add(myRangeList); + int[] slotSpan = new int[1]; + + // subtract 1 see ScanUtil.SINGLE_COLUMN_SLOT_SPAN is 0 + slotSpan[0] = columns.size() - 1; + key = ScanUtil.getMinKey(rowKeySchema, slots, slotSpan); + } - // Note the use of ByteUtil.nextKey() to generate exclusive offset - CompiledOffset - compiledOffset = - new CompiledOffset(Optional.absent(), - Optional.of(key)); + // Note the use of ByteUtil.nextKey() to generate exclusive offset + CompiledOffset compiledOffset = + new CompiledOffset(Optional. absent(), Optional.of(key)); - return compiledOffset; - } + return compiledOffset; + } - @VisibleForTesting - static class RowKeyColumnExpressionOutput { - public RowKeyColumnExpressionOutput(List rowKeyColumnExpressions, boolean trailingNull) { - this.rowKeyColumnExpressions = rowKeyColumnExpressions; - this.trailingNull = trailingNull; - } + @VisibleForTesting + static class RowKeyColumnExpressionOutput { + public RowKeyColumnExpressionOutput(List rowKeyColumnExpressions, + boolean trailingNull) { + this.rowKeyColumnExpressions = rowKeyColumnExpressions; + this.trailingNull = trailingNull; + } - private final List rowKeyColumnExpressions; - private final boolean trailingNull; + private final List rowKeyColumnExpressions; + private final boolean trailingNull; - public List getRowKeyColumnExpressions() { - return rowKeyColumnExpressions; - } + public List getRowKeyColumnExpressions() { + return rowKeyColumnExpressions; + } - public boolean isTrailingNull() { - return trailingNull; - } + public boolean isTrailingNull() { + return trailingNull; + } + } + + @VisibleForTesting + RowKeyColumnExpressionOutput buildListOfRowKeyColumnExpressions(Expression whereExpression, + boolean isIndex) throws RowValueConstructorOffsetNotCoercibleException, + RowValueConstructorOffsetInternalErrorException { + + boolean trailingNull = false; + List expressions; + if ((whereExpression instanceof AndExpression)) { + expressions = whereExpression.getChildren(); + } else if ( + whereExpression instanceof ComparisonExpression || whereExpression instanceof IsNullExpression + ) { + expressions = Lists.newArrayList(whereExpression); + } else { + LOGGER.warn( + "Unexpected error while compiling RVC Offset, expected either a Comparison/IsNull Expression of a AndExpression got " + + whereExpression.getClass().getName()); + throw new RowValueConstructorOffsetInternalErrorException( + "RVC Offset must specify the tables PKs."); } - @VisibleForTesting - RowKeyColumnExpressionOutput buildListOfRowKeyColumnExpressions ( - Expression whereExpression, boolean isIndex) - throws RowValueConstructorOffsetNotCoercibleException, RowValueConstructorOffsetInternalErrorException { - - boolean trailingNull = false; - List expressions; - if((whereExpression instanceof AndExpression)) { - expressions = whereExpression.getChildren(); - } else if (whereExpression instanceof ComparisonExpression || whereExpression instanceof IsNullExpression) { - expressions = Lists.newArrayList(whereExpression); - } else { - LOGGER.warn("Unexpected error while compiling RVC Offset, expected either a Comparison/IsNull Expression of a AndExpression got " - + whereExpression.getClass().getName()); - throw new RowValueConstructorOffsetInternalErrorException( - "RVC Offset must specify the tables PKs."); + List rowKeyColumnExpressionList = + new ArrayList(); + for (int i = 0; i < expressions.size(); i++) { + Expression child = expressions.get(i); + if (!(child instanceof ComparisonExpression || child instanceof IsNullExpression)) { + LOGGER.warn("Unexpected error while compiling RVC Offset"); + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset must specify the tables PKs."); + } + + // if this is the last position + if (i == expressions.size() - 1) { + if (child instanceof IsNullExpression) { + trailingNull = true; } - - List - rowKeyColumnExpressionList = - new ArrayList(); - for (int i = 0; i < expressions.size(); i++) { - Expression child = expressions.get(i); - if (!(child instanceof ComparisonExpression || child instanceof IsNullExpression)) { - LOGGER.warn("Unexpected error while compiling RVC Offset"); - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset must specify the tables PKs."); - } - - //if this is the last position - if(i == expressions.size() - 1) { - if(child instanceof IsNullExpression) { - trailingNull = true; - } - } - - //For either case comparison/isNull the first child should be the rowkey - Expression possibleRowKeyColumnExpression = child.getChildren().get(0); - - // Note that since we store indexes in variable length form there may be casts from fixed types to - // variable length - if (isIndex) { - if (possibleRowKeyColumnExpression instanceof CoerceExpression) { - // Cast today has 1 child - possibleRowKeyColumnExpression = - ((CoerceExpression) possibleRowKeyColumnExpression).getChild(); - } - } - - if (!(possibleRowKeyColumnExpression instanceof RowKeyColumnExpression)) { - LOGGER.warn("Unexpected error while compiling RVC Offset"); - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset must specify the tables PKs."); - } - rowKeyColumnExpressionList.add((RowKeyColumnExpression) possibleRowKeyColumnExpression); + } + + // For either case comparison/isNull the first child should be the rowkey + Expression possibleRowKeyColumnExpression = child.getChildren().get(0); + + // Note that since we store indexes in variable length form there may be casts from fixed + // types to + // variable length + if (isIndex) { + if (possibleRowKeyColumnExpression instanceof CoerceExpression) { + // Cast today has 1 child + possibleRowKeyColumnExpression = + ((CoerceExpression) possibleRowKeyColumnExpression).getChild(); } - return new RowKeyColumnExpressionOutput(rowKeyColumnExpressionList,trailingNull); + } + + if (!(possibleRowKeyColumnExpression instanceof RowKeyColumnExpression)) { + LOGGER.warn("Unexpected error while compiling RVC Offset"); + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset must specify the tables PKs."); + } + rowKeyColumnExpressionList.add((RowKeyColumnExpression) possibleRowKeyColumnExpression); } - - @VisibleForTesting List buildListOfColumnParseNodes( - RowValueConstructorParseNode rvcColumnsParseNode, boolean isIndex) - throws RowValueConstructorOffsetNotCoercibleException { - List nodes = new ArrayList(); - for (ParseNode node : rvcColumnsParseNode.getChildren()) { - // Note that since we store indexes in variable length form there may be casts from fixed types to - // variable length - if (isIndex) { - if (node instanceof CastParseNode) { - // Cast today has 1 child - node = node.getChildren().get(0); - } - } - - if (!(node instanceof ColumnParseNode)) { - throw new RowValueConstructorOffsetNotCoercibleException( - "RVC Offset must specify the tables PKs."); - } else { - nodes.add((ColumnParseNode) node); - } + return new RowKeyColumnExpressionOutput(rowKeyColumnExpressionList, trailingNull); + } + + @VisibleForTesting + List + buildListOfColumnParseNodes(RowValueConstructorParseNode rvcColumnsParseNode, boolean isIndex) + throws RowValueConstructorOffsetNotCoercibleException { + List nodes = new ArrayList(); + for (ParseNode node : rvcColumnsParseNode.getChildren()) { + // Note that since we store indexes in variable length form there may be casts from fixed + // types to + // variable length + if (isIndex) { + if (node instanceof CastParseNode) { + // Cast today has 1 child + node = node.getChildren().get(0); } - return nodes; + } + + if (!(node instanceof ColumnParseNode)) { + throw new RowValueConstructorOffsetNotCoercibleException( + "RVC Offset must specify the tables PKs."); + } else { + nodes.add((ColumnParseNode) node); + } } -} \ No newline at end of file + return nodes; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RowProjector.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RowProjector.java index 1d997cb8a59..543fde01c70 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RowProjector.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/RowProjector.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,194 +25,185 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.visitor.CloneExpressionVisitor; import org.apache.phoenix.schema.ColumnNotFoundException; -import org.apache.phoenix.util.SchemaUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; - +import org.apache.phoenix.util.SchemaUtil; /** - * - * Class that manages a set of projected columns accessed through the zero-based - * column index for a SELECT clause projection. The column index may be looked up - * via the name using {@link #getColumnIndex(String)}. - * - * + * Class that manages a set of projected columns accessed through the zero-based column index for a + * SELECT clause projection. The column index may be looked up via the name using + * {@link #getColumnIndex(String)}. * @since 0.1 */ public class RowProjector { - public static final RowProjector EMPTY_PROJECTOR = new RowProjector(Collections.emptyList(),0, true); - - private final List columnProjectors; - private final ListMultimap reverseIndex; - private final boolean allCaseSensitive; - private final boolean someCaseSensitive; - private final int estimatedSize; - private final boolean isProjectAll; - private final boolean isProjectEmptyKeyValue; - private final boolean cloneRequired; - private final boolean hasUDFs; - private final boolean isProjectDynColsInWildcardQueries; - - public RowProjector(RowProjector projector, boolean isProjectEmptyKeyValue) { - this(projector.getColumnProjectors(), projector.getEstimatedRowByteSize(), - isProjectEmptyKeyValue, projector.hasUDFs, projector.isProjectAll, - projector.isProjectDynColsInWildcardQueries); - } - /** - * Construct RowProjector based on a list of ColumnProjectors. - * @param columnProjectors ordered list of ColumnProjectors corresponding to projected columns in SELECT clause - * aggregating coprocessor. Only required in the case of an aggregate query with a limit clause and otherwise may - * be null. - * @param estimatedRowSize - * @param isProjectEmptyKeyValue - */ - public RowProjector(List columnProjectors, int estimatedRowSize, boolean isProjectEmptyKeyValue) { - this(columnProjectors, estimatedRowSize, isProjectEmptyKeyValue, false, false, false); - } - /** - * Construct RowProjector based on a list of ColumnProjectors. - * @param columnProjectors ordered list of ColumnProjectors corresponding to projected columns in SELECT clause - * aggregating coprocessor. Only required in the case of an aggregate query with a limit clause and otherwise may - * be null. - * @param estimatedRowSize - * @param isProjectEmptyKeyValue - * @param hasUDFs - * @param isProjectAll - * @param isProjectDynColsInWildcardQueries - */ - public RowProjector(List columnProjectors, int estimatedRowSize, - boolean isProjectEmptyKeyValue, boolean hasUDFs, boolean isProjectAll, - boolean isProjectDynColsInWildcardQueries) { - this.columnProjectors = Collections.unmodifiableList(columnProjectors); - int position = columnProjectors.size(); - reverseIndex = ArrayListMultimap.create(); - boolean allCaseSensitive = true; - boolean someCaseSensitive = false; - for (--position; position >= 0; position--) { - ColumnProjector colProjector = columnProjectors.get(position); - allCaseSensitive &= colProjector.isCaseSensitive(); - someCaseSensitive |= colProjector.isCaseSensitive(); - reverseIndex.put(colProjector.getLabel(), position); - if (!colProjector.getTableName().isEmpty()) { - reverseIndex.put(SchemaUtil.getColumnName(colProjector.getTableName(), colProjector.getLabel()), position); - } - } - this.allCaseSensitive = allCaseSensitive; - this.someCaseSensitive = someCaseSensitive; - this.estimatedSize = estimatedRowSize; - this.isProjectEmptyKeyValue = isProjectEmptyKeyValue; - this.isProjectAll = isProjectAll; - this.hasUDFs = hasUDFs; - boolean cloneRequired = false; - if (!hasUDFs) { - for (int i = 0; i < this.columnProjectors.size(); i++) { - Expression expression = this.columnProjectors.get(i).getExpression(); - if (expression.isCloneExpression()) { - cloneRequired = true; - break; - } - } - } - this.cloneRequired = cloneRequired || hasUDFs; - this.isProjectDynColsInWildcardQueries = isProjectDynColsInWildcardQueries; - } + public static final RowProjector EMPTY_PROJECTOR = + new RowProjector(Collections. emptyList(), 0, true); - public RowProjector cloneIfNecessary() { - if (!cloneRequired) { - return this; - } - List clonedColProjectors = new ArrayList(columnProjectors.size()); - for (int i = 0; i < this.columnProjectors.size(); i++) { - ColumnProjector colProjector = columnProjectors.get(i); - Expression expression = colProjector.getExpression(); - if (expression.isCloneExpression()) { - CloneExpressionVisitor visitor = new CloneExpressionVisitor(); - Expression clonedExpression = expression.accept(visitor); - clonedColProjectors.add(new ExpressionProjector(colProjector.getName(), - colProjector.getLabel(), - colProjector.getTableName(), - clonedExpression, - colProjector.isCaseSensitive())); - } else { - clonedColProjectors.add(colProjector); - } + private final List columnProjectors; + private final ListMultimap reverseIndex; + private final boolean allCaseSensitive; + private final boolean someCaseSensitive; + private final int estimatedSize; + private final boolean isProjectAll; + private final boolean isProjectEmptyKeyValue; + private final boolean cloneRequired; + private final boolean hasUDFs; + private final boolean isProjectDynColsInWildcardQueries; + + public RowProjector(RowProjector projector, boolean isProjectEmptyKeyValue) { + this(projector.getColumnProjectors(), projector.getEstimatedRowByteSize(), + isProjectEmptyKeyValue, projector.hasUDFs, projector.isProjectAll, + projector.isProjectDynColsInWildcardQueries); + } + + /** + * Construct RowProjector based on a list of ColumnProjectors. + * @param columnProjectors ordered list of ColumnProjectors corresponding to projected columns in + * SELECT clause aggregating coprocessor. Only required in the case of an + * aggregate query with a limit clause and otherwise may be null. + */ + public RowProjector(List columnProjectors, int estimatedRowSize, + boolean isProjectEmptyKeyValue) { + this(columnProjectors, estimatedRowSize, isProjectEmptyKeyValue, false, false, false); + } + + /** + * Construct RowProjector based on a list of ColumnProjectors. + * @param columnProjectors ordered list of ColumnProjectors corresponding to projected columns in + * SELECT clause aggregating coprocessor. Only required in the case of an + * aggregate query with a limit clause and otherwise may be null. + */ + public RowProjector(List columnProjectors, int estimatedRowSize, + boolean isProjectEmptyKeyValue, boolean hasUDFs, boolean isProjectAll, + boolean isProjectDynColsInWildcardQueries) { + this.columnProjectors = Collections.unmodifiableList(columnProjectors); + int position = columnProjectors.size(); + reverseIndex = ArrayListMultimap. create(); + boolean allCaseSensitive = true; + boolean someCaseSensitive = false; + for (--position; position >= 0; position--) { + ColumnProjector colProjector = columnProjectors.get(position); + allCaseSensitive &= colProjector.isCaseSensitive(); + someCaseSensitive |= colProjector.isCaseSensitive(); + reverseIndex.put(colProjector.getLabel(), position); + if (!colProjector.getTableName().isEmpty()) { + reverseIndex.put( + SchemaUtil.getColumnName(colProjector.getTableName(), colProjector.getLabel()), position); + } + } + this.allCaseSensitive = allCaseSensitive; + this.someCaseSensitive = someCaseSensitive; + this.estimatedSize = estimatedRowSize; + this.isProjectEmptyKeyValue = isProjectEmptyKeyValue; + this.isProjectAll = isProjectAll; + this.hasUDFs = hasUDFs; + boolean cloneRequired = false; + if (!hasUDFs) { + for (int i = 0; i < this.columnProjectors.size(); i++) { + Expression expression = this.columnProjectors.get(i).getExpression(); + if (expression.isCloneExpression()) { + cloneRequired = true; + break; } - return new RowProjector(clonedColProjectors, - this.estimatedSize, this.isProjectEmptyKeyValue, this.hasUDFs, this.isProjectAll, - this.isProjectDynColsInWildcardQueries); + } } + this.cloneRequired = cloneRequired || hasUDFs; + this.isProjectDynColsInWildcardQueries = isProjectDynColsInWildcardQueries; + } - public boolean projectEveryRow() { - return isProjectEmptyKeyValue; + public RowProjector cloneIfNecessary() { + if (!cloneRequired) { + return this; } - - public boolean projectEverything() { - return isProjectAll; + List clonedColProjectors = + new ArrayList(columnProjectors.size()); + for (int i = 0; i < this.columnProjectors.size(); i++) { + ColumnProjector colProjector = columnProjectors.get(i); + Expression expression = colProjector.getExpression(); + if (expression.isCloneExpression()) { + CloneExpressionVisitor visitor = new CloneExpressionVisitor(); + Expression clonedExpression = expression.accept(visitor); + clonedColProjectors + .add(new ExpressionProjector(colProjector.getName(), colProjector.getLabel(), + colProjector.getTableName(), clonedExpression, colProjector.isCaseSensitive())); + } else { + clonedColProjectors.add(colProjector); + } } + return new RowProjector(clonedColProjectors, this.estimatedSize, this.isProjectEmptyKeyValue, + this.hasUDFs, this.isProjectAll, this.isProjectDynColsInWildcardQueries); + } - public boolean hasUDFs() { - return hasUDFs; - } + public boolean projectEveryRow() { + return isProjectEmptyKeyValue; + } - public boolean projectDynColsInWildcardQueries() { - return isProjectDynColsInWildcardQueries; - } - - public List getColumnProjectors() { - return columnProjectors; - } - - public int getColumnIndex(String name) throws SQLException { - if (!someCaseSensitive) { - name = SchemaUtil.normalizeIdentifier(name); - } - List index = reverseIndex.get(name); - if (index.isEmpty()) { - if (!allCaseSensitive && someCaseSensitive) { - name = SchemaUtil.normalizeIdentifier(name); - index = reverseIndex.get(name); - } - if (index.isEmpty()) { - throw new ColumnNotFoundException(name); - } - } - - return index.get(0); + public boolean projectEverything() { + return isProjectAll; + } + + public boolean hasUDFs() { + return hasUDFs; + } + + public boolean projectDynColsInWildcardQueries() { + return isProjectDynColsInWildcardQueries; + } + + public List getColumnProjectors() { + return columnProjectors; + } + + public int getColumnIndex(String name) throws SQLException { + if (!someCaseSensitive) { + name = SchemaUtil.normalizeIdentifier(name); } - - public ColumnProjector getColumnProjector(int index) { - return columnProjectors.get(index); + List index = reverseIndex.get(name); + if (index.isEmpty()) { + if (!allCaseSensitive && someCaseSensitive) { + name = SchemaUtil.normalizeIdentifier(name); + index = reverseIndex.get(name); + } + if (index.isEmpty()) { + throw new ColumnNotFoundException(name); + } } - - public int getColumnCount() { - return columnProjectors.size(); + + return index.get(0); + } + + public ColumnProjector getColumnProjector(int index) { + return columnProjectors.get(index); + } + + public int getColumnCount() { + return columnProjectors.size(); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder("["); + for (ColumnProjector projector : columnProjectors) { + buf.append(projector.getExpression()); + buf.append(','); } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder("["); - for (ColumnProjector projector : columnProjectors) { - buf.append(projector.getExpression()); - buf.append(','); - } - if (buf.length() > 1) { - buf.setLength(buf.length()-1); - } - buf.append(']'); - return buf.toString(); + if (buf.length() > 1) { + buf.setLength(buf.length() - 1); } + buf.append(']'); + return buf.toString(); + } - public int getEstimatedRowByteSize() { - return estimatedSize; - } + public int getEstimatedRowByteSize() { + return estimatedSize; + } - /** - * allow individual expressions to reset their state between rows - */ - public void reset() { - for (ColumnProjector projector : columnProjectors) { - projector.getExpression().reset(); - } + /** + * allow individual expressions to reset their state between rows + */ + public void reset() { + for (ColumnProjector projector : columnProjectors) { + projector.getExpression().reset(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ScanRanges.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ScanRanges.java index 18030fa8465..bf2c3bcdcf2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ScanRanges.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ScanRanges.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,8 +26,6 @@ import java.util.Iterator; import java.util.List; -import org.apache.phoenix.schema.types.PVarbinaryEncoded; -import org.apache.phoenix.thirdparty.com.google.common.base.Optional; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; @@ -44,754 +42,785 @@ import org.apache.phoenix.schema.ValueSchema.Field; import org.apache.phoenix.schema.types.PDataType.PDataCodec; import org.apache.phoenix.schema.types.PLong; -import org.apache.phoenix.util.ByteUtil; -import org.apache.phoenix.util.ScanUtil; -import org.apache.phoenix.util.SchemaUtil; - +import org.apache.phoenix.schema.types.PVarbinaryEncoded; +import org.apache.phoenix.thirdparty.com.google.common.base.Optional; import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - +import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.util.ScanUtil; +import org.apache.phoenix.util.SchemaUtil; public class ScanRanges { - private static final List> EVERYTHING_RANGES = Collections.>emptyList(); - private static final List> NOTHING_RANGES = Collections.>singletonList(Collections.singletonList(KeyRange.EMPTY_RANGE)); - public static final ScanRanges EVERYTHING = new ScanRanges(null,ScanUtil.SINGLE_COLUMN_SLOT_SPAN,EVERYTHING_RANGES, KeyRange.EVERYTHING_RANGE, false, false, null, null); - public static final ScanRanges NOTHING = new ScanRanges(null,ScanUtil.SINGLE_COLUMN_SLOT_SPAN,NOTHING_RANGES, KeyRange.EMPTY_RANGE, false, false, null, null); - private static final Scan HAS_INTERSECTION = new Scan(); - - public static ScanRanges createPointLookup(List keys) { - return ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, null, true, -1); - } - - // For testing - public static ScanRanges createSingleSpan(RowKeySchema schema, List> ranges) { - return create(schema, ranges, ScanUtil.getDefaultSlotSpans(ranges.size()), null, true, -1); - } - - public static ScanRanges createSingleSpan(RowKeySchema schema, List> ranges, Integer nBuckets, boolean useSkipSan) { - return create(schema, ranges, ScanUtil.getDefaultSlotSpans(ranges.size()), nBuckets, useSkipSan, -1); - } - - public static ScanRanges create(RowKeySchema schema, List> ranges, int[] slotSpan, Integer nBuckets, boolean useSkipScan, int rowTimestampColIndex) { - return create(schema,ranges,slotSpan,nBuckets,useSkipScan,rowTimestampColIndex,Optional.absent()); - } - - public static ScanRanges create(RowKeySchema schema, List> ranges, int[] slotSpan, Integer nBuckets, boolean useSkipScan, int rowTimestampColIndex, Optional scanMinOffset) { - int offset = nBuckets == null ? 0 : SaltingUtil.NUM_SALTING_BYTES; - int nSlots = ranges.size(); - - if (nSlots == offset && !scanMinOffset.isPresent()) { - return EVERYTHING; - } else if ((nSlots == 1 + offset && ranges.get(offset).size() == 1 && ranges.get(offset).get(0) == KeyRange.EMPTY_RANGE)) { - return NOTHING; - } - TimeRange rowTimestampRange = getRowTimestampColumnRange(ranges, schema, rowTimestampColIndex); - boolean isPointLookup = isPointLookup(schema, ranges, slotSpan, useSkipScan); - if (isPointLookup) { - // TODO: consider keeping original to use for serialization as it would be smaller? - List keys = ScanRanges.getPointKeys(ranges, slotSpan, schema, nBuckets); - List keyRanges = Lists.newArrayListWithExpectedSize(keys.size()); - // We have full keys here, so use field from our varbinary schema - for (byte[] key : keys) { - keyRanges.add(KeyRange.getKeyRange(key)); - } - // while doing a point look up if after intersecting with the MinMaxrange there are - // no more keyranges left then just return - if (keyRanges.isEmpty()) { - return NOTHING; - } - ranges = Collections.singletonList(keyRanges); - useSkipScan = keyRanges.size() > 1; - // Treat as binary if descending because we've got a separator byte at the end - // which is not part of the value. - if (keys.size() > 1 || hasTrailingDescSeparatorByte(schema)) { - schema = SchemaUtil.VAR_BINARY_SCHEMA; - slotSpan = ScanUtil.SINGLE_COLUMN_SLOT_SPAN; - } else { - // Keep original schema and don't use skip scan as it's not necessary - // when there's a single key. - slotSpan = new int[] {schema.getMaxFields()-1}; - } - } - - List> sortedRanges = Lists.newArrayListWithExpectedSize(ranges.size()); - for (int i = 0; i < ranges.size(); i++) { - Field f = schema.getField(i); - List sorted = Lists.newArrayList(ranges.get(i)); - Collections.sort(sorted, f.getSortOrder() == SortOrder.ASC ? KeyRange.COMPARATOR : KeyRange.DESC_COMPARATOR); - sortedRanges.add(ImmutableList.copyOf(sorted)); - } - - - // Don't set minMaxRange for point lookup because it causes issues during intersect - // by going across region boundaries - KeyRange scanRange = KeyRange.EVERYTHING_RANGE; - // if (!isPointLookup && (nBuckets == null || !useSkipScanFilter)) { - // if (! ( isPointLookup || (nBuckets != null && useSkipScanFilter) ) ) { - // if (nBuckets == null || (nBuckets != null && (!isPointLookup || !useSkipScanFilter))) { - if (nBuckets == null || !isPointLookup || !useSkipScan) { - byte[] minKey = ScanUtil.getMinKey(schema, sortedRanges, slotSpan); - byte[] maxKey = ScanUtil.getMaxKey(schema, sortedRanges, slotSpan); - // If the maxKey has crossed the salt byte boundary, then we do not - // have anything to filter at the upper end of the range - if (ScanUtil.crossesPrefixBoundary(maxKey, ScanUtil.getPrefix(minKey, offset), offset)) { - maxKey = KeyRange.UNBOUND; - } - // We won't filter anything at the low end of the range if we just have the salt byte - if (minKey.length <= offset) { - minKey = KeyRange.UNBOUND; - } - - //Handle the offset by pushing it into the scanRange - if(scanMinOffset.isPresent()){ - - byte[] minOffset = scanMinOffset.get(); - //If we are salted we have to - //This should be safe for RVC Offset since we specify a full rowkey which - // is by definition unique so duplicating the salt bucket is fine - if(nBuckets != null && nBuckets > 0) { - minOffset[0] = 0; //We use 0 for salt bucket for scans - } - - //If the offset is more selective than the existing ranges - if(Bytes.BYTES_COMPARATOR.compare(minOffset,minKey) > 0){ - minKey=minOffset; - } - } - - scanRange = KeyRange.getKeyRange(minKey, maxKey); - } - - if (scanRange == KeyRange.EMPTY_RANGE) { - return NOTHING; - } - return new ScanRanges(schema, slotSpan, sortedRanges, scanRange, useSkipScan, isPointLookup, nBuckets, rowTimestampRange); - } - - private static boolean hasTrailingDescSeparatorByte(RowKeySchema schema) { - return - (schema.getField(schema.getFieldCount() - 1).getDataType() != PVarbinaryEncoded.INSTANCE - && SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, - schema.getField(schema.getFieldCount() - 1)) == QueryConstants.DESC_SEPARATOR_BYTE) - || (schema.getField(schema.getFieldCount() - 1).getDataType() - == PVarbinaryEncoded.INSTANCE && SchemaUtil - .getSeparatorBytesForVarBinaryEncoded(schema.rowKeyOrderOptimizable(), false, - schema.getField(schema.getFieldCount() - 1).getSortOrder()) - == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES); - } - - private SkipScanFilter filter; - private final List> ranges; - private final int[] slotSpan; - private final RowKeySchema schema; - private final boolean isPointLookup; - private final boolean isSalted; - private final boolean useSkipScanFilter; - private final KeyRange scanRange; - private final TimeRange rowTimestampRange; - - private ScanRanges (RowKeySchema schema, int[] slotSpan, List> ranges, KeyRange scanRange, boolean useSkipScanFilter, boolean isPointLookup, Integer bucketNum, TimeRange rowTimestampRange) { - this.isPointLookup = isPointLookup; - this.isSalted = bucketNum != null; - this.useSkipScanFilter = useSkipScanFilter; - this.scanRange = scanRange; - this.rowTimestampRange = rowTimestampRange; - - if (isSalted && !isPointLookup) { - ranges.set(0, SaltingUtil.generateAllSaltingRanges(bucketNum)); - } - this.ranges = ImmutableList.copyOf(ranges); - this.slotSpan = slotSpan; - this.schema = schema; - if (schema != null && !ranges.isEmpty()) { - if (!this.useSkipScanFilter) { - int boundSlotCount = this.getBoundSlotCount(); - ranges = ranges.subList(0, boundSlotCount); - slotSpan = Arrays.copyOf(slotSpan, boundSlotCount); - } - this.filter = new SkipScanFilter(ranges, slotSpan, this.schema, isPointLookup); - } - } - - public void initializeScan(Scan scan) { - scan.withStartRow(scanRange.getLowerRange()); - scan.withStopRow(scanRange.getUpperRange()); - } - - public static byte[] prefixKey(byte[] key, int keyOffset, byte[] prefixKey, int prefixKeyOffset) { - return prefixKey(key, keyOffset, key.length, prefixKey, prefixKeyOffset); - } - - public static byte[] prefixKey(byte[] key, int keyOffset, int keyLength, byte[] prefixKey, - int prefixKeyOffset) { - if (keyLength > 0) { - byte[] newKey = new byte[keyLength + prefixKeyOffset]; - int totalKeyOffset = keyOffset + prefixKeyOffset; - if (prefixKey.length >= totalKeyOffset) { // otherwise it's null padded - System.arraycopy(prefixKey, 0, newKey, 0, totalKeyOffset); - } - System.arraycopy(key, keyOffset, newKey, totalKeyOffset, keyLength - keyOffset); - return newKey; - } - return key; - } - - private static byte[] replaceSaltByte(byte[] key, byte[] saltKey) { - if (key.length == 0) { - return key; - } - byte[] temp = new byte[key.length]; - if (saltKey.length >= SaltingUtil.NUM_SALTING_BYTES) { // Otherwise it's null padded - System.arraycopy(saltKey, 0, temp, 0, SaltingUtil.NUM_SALTING_BYTES); - } - System.arraycopy(key, SaltingUtil.NUM_SALTING_BYTES, temp, SaltingUtil.NUM_SALTING_BYTES, key.length - SaltingUtil.NUM_SALTING_BYTES); - return temp; - } - - public static byte[] stripPrefix(byte[] key, int keyOffset) { - if (key.length == 0) { - return key; - } - byte[] temp = new byte[key.length - keyOffset]; - System.arraycopy(key, keyOffset, temp, 0, key.length - keyOffset); - return temp; - } - - // This variant adds synthetic scan boundaries at potentially missing salt bucket boundaries - // and won't return null Scans - public List intersectScan(Scan scan, final byte[] originalStartKey, - final byte[] originalStopKey, final int keyOffset, byte[] splitPostfix, Integer buckets, - boolean crossesRegionBoundary) { - // FIXME Both the salted status and the pre-computed bucket list should be available in - // this object, but in some cases they get overwritten, so we cannot use that. - List newScans = new ArrayList(); - if (buckets != null && buckets > 0) { - byte[] wrkStartKey = originalStartKey; - do { - boolean lastBucket = false; - byte[] nextBucketStart = null; - byte[] nextBucketByte = null; - if (wrkStartKey.length > 0 && Byte.toUnsignedInt(wrkStartKey[0]) >= buckets - 1) { - lastBucket = true; - } else { - // This includes the zero bytes from the minimum PK - nextBucketStart = bucketEnd(wrkStartKey, splitPostfix); - // These is the start of the next bucket in byte[], without the PK suffix - nextBucketByte = new byte[] { nextBucketStart[0] }; - } - if (lastBucket || Bytes.compareTo(originalStopKey, nextBucketStart) <= 0) { - // either we don't need to add synthetic guideposts, or we already have, and - // are at the last bucket of the original scan - addIfNotNull(newScans, intersectScan(scan, wrkStartKey, originalStopKey, - keyOffset, crossesRegionBoundary)); - break; - } - // This is where we add the synthetic guidepost. - // We skip [nextBucketByte, nextBucketStart), but it's guaranteed that there are no - // rows there. - addIfNotNull(newScans, - intersectScan(scan, wrkStartKey, nextBucketByte, keyOffset, false)); - wrkStartKey = nextBucketStart; - } while (true); - } else { - // Definitely Not crossing buckets - addIfNotNull(newScans, intersectScan(scan, originalStartKey, originalStopKey, keyOffset, - crossesRegionBoundary)); - } - return newScans; - } - - private void addIfNotNull(List scans, Scan newScan) { - if (newScan != null) { - scans.add(newScan); - } - } - - // The split (presplit for salted tables) code extends the split point to the minimum PK length. - // Adding the same postfix here avoids creating and extra [n,n\x00\x00\x00..\x00) scan for each - // bucket - private byte[] bucketEnd(byte[] key, byte[] splitPostfix) { - byte startByte = key.length > 0 ? key[0] : 0; - int nextBucket = Byte.toUnsignedInt(startByte) + 1; - byte[] bucketEnd = new byte[splitPostfix.length + 1]; - bucketEnd[0] = (byte) nextBucket; - System.arraycopy(splitPostfix, 0, bucketEnd, 1, splitPostfix.length); - return bucketEnd; - } - - //TODO split this for normal, salted and local index variants - public Scan intersectScan(Scan scan, final byte[] originalStartKey, final byte[] originalStopKey, final int keyOffset, boolean crossesRegionBoundary) { - byte[] startKey = originalStartKey; - byte[] stopKey = originalStopKey; - if (stopKey.length > 0 && Bytes.compareTo(startKey, stopKey) >= 0) { - return null; - } - // Keep the keys as they are if we have a point lookup, as we've already resolved the - // salt bytes in that case. - final int scanKeyOffset = this.isSalted && !this.isPointLookup ? SaltingUtil.NUM_SALTING_BYTES : 0; - assert (scanKeyOffset == 0 || keyOffset == 0); - // Total offset for startKey/stopKey. Either 1 for salted tables or the prefix length - // of the current region for local indexes. We'll never have a case where a table is - // both salted and local. - final int totalKeyOffset = scanKeyOffset + keyOffset; - byte[] prefixBytes = ByteUtil.EMPTY_BYTE_ARRAY; - if (totalKeyOffset > 0) { - prefixBytes = ScanUtil.getPrefix(startKey, totalKeyOffset); - /* - * If our startKey to stopKey crosses a region boundary consider everything after the startKey as our scan - * is always done within a single region. This prevents us from having to prefix the key prior to knowing - * whether or not there may be an intersection. We can't calculate whether or not we've crossed a region - * boundary for local indexes, because we don't know the key offset of the next region, but only for the - * current one (which is the one passed in). If the next prefix happened to be a subset of the previous - * prefix, then this wouldn't detect that we crossed a region boundary. - */ - if (crossesRegionBoundary) { - stopKey = ByteUtil.EMPTY_BYTE_ARRAY; - } - } - int scanStartKeyOffset = scanKeyOffset; - byte[] scanStartKey = scan == null ? this.scanRange.getLowerRange() : scan.getStartRow(); - // Compare ignoring key prefix and salt byte - if (scanStartKey.length - scanKeyOffset > 0) { - if (startKey.length - totalKeyOffset > 0) { - if (Bytes.compareTo(scanStartKey, scanKeyOffset, scanStartKey.length - scanKeyOffset, startKey, totalKeyOffset, startKey.length - totalKeyOffset) < 0) { - scanStartKey = startKey; - scanStartKeyOffset = totalKeyOffset; - } - } + private static final List> EVERYTHING_RANGES = + Collections.> emptyList(); + private static final List> NOTHING_RANGES = Collections.< + List> singletonList(Collections. singletonList(KeyRange.EMPTY_RANGE)); + public static final ScanRanges EVERYTHING = new ScanRanges(null, ScanUtil.SINGLE_COLUMN_SLOT_SPAN, + EVERYTHING_RANGES, KeyRange.EVERYTHING_RANGE, false, false, null, null); + public static final ScanRanges NOTHING = new ScanRanges(null, ScanUtil.SINGLE_COLUMN_SLOT_SPAN, + NOTHING_RANGES, KeyRange.EMPTY_RANGE, false, false, null, null); + private static final Scan HAS_INTERSECTION = new Scan(); + + public static ScanRanges createPointLookup(List keys) { + return ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), + ScanUtil.SINGLE_COLUMN_SLOT_SPAN, null, true, -1); + } + + // For testing + public static ScanRanges createSingleSpan(RowKeySchema schema, List> ranges) { + return create(schema, ranges, ScanUtil.getDefaultSlotSpans(ranges.size()), null, true, -1); + } + + public static ScanRanges createSingleSpan(RowKeySchema schema, List> ranges, + Integer nBuckets, boolean useSkipSan) { + return create(schema, ranges, ScanUtil.getDefaultSlotSpans(ranges.size()), nBuckets, useSkipSan, + -1); + } + + public static ScanRanges create(RowKeySchema schema, List> ranges, int[] slotSpan, + Integer nBuckets, boolean useSkipScan, int rowTimestampColIndex) { + return create(schema, ranges, slotSpan, nBuckets, useSkipScan, rowTimestampColIndex, + Optional. absent()); + } + + public static ScanRanges create(RowKeySchema schema, List> ranges, int[] slotSpan, + Integer nBuckets, boolean useSkipScan, int rowTimestampColIndex, + Optional scanMinOffset) { + int offset = nBuckets == null ? 0 : SaltingUtil.NUM_SALTING_BYTES; + int nSlots = ranges.size(); + + if (nSlots == offset && !scanMinOffset.isPresent()) { + return EVERYTHING; + } else if ( + (nSlots == 1 + offset && ranges.get(offset).size() == 1 + && ranges.get(offset).get(0) == KeyRange.EMPTY_RANGE) + ) { + return NOTHING; + } + TimeRange rowTimestampRange = getRowTimestampColumnRange(ranges, schema, rowTimestampColIndex); + boolean isPointLookup = isPointLookup(schema, ranges, slotSpan, useSkipScan); + if (isPointLookup) { + // TODO: consider keeping original to use for serialization as it would be smaller? + List keys = ScanRanges.getPointKeys(ranges, slotSpan, schema, nBuckets); + List keyRanges = Lists.newArrayListWithExpectedSize(keys.size()); + // We have full keys here, so use field from our varbinary schema + for (byte[] key : keys) { + keyRanges.add(KeyRange.getKeyRange(key)); + } + // while doing a point look up if after intersecting with the MinMaxrange there are + // no more keyranges left then just return + if (keyRanges.isEmpty()) { + return NOTHING; + } + ranges = Collections.singletonList(keyRanges); + useSkipScan = keyRanges.size() > 1; + // Treat as binary if descending because we've got a separator byte at the end + // which is not part of the value. + if (keys.size() > 1 || hasTrailingDescSeparatorByte(schema)) { + schema = SchemaUtil.VAR_BINARY_SCHEMA; + slotSpan = ScanUtil.SINGLE_COLUMN_SLOT_SPAN; + } else { + // Keep original schema and don't use skip scan as it's not necessary + // when there's a single key. + slotSpan = new int[] { schema.getMaxFields() - 1 }; + } + } + + List> sortedRanges = Lists.newArrayListWithExpectedSize(ranges.size()); + for (int i = 0; i < ranges.size(); i++) { + Field f = schema.getField(i); + List sorted = Lists.newArrayList(ranges.get(i)); + Collections.sort(sorted, + f.getSortOrder() == SortOrder.ASC ? KeyRange.COMPARATOR : KeyRange.DESC_COMPARATOR); + sortedRanges.add(ImmutableList.copyOf(sorted)); + } + + // Don't set minMaxRange for point lookup because it causes issues during intersect + // by going across region boundaries + KeyRange scanRange = KeyRange.EVERYTHING_RANGE; + // if (!isPointLookup && (nBuckets == null || !useSkipScanFilter)) { + // if (! ( isPointLookup || (nBuckets != null && useSkipScanFilter) ) ) { + // if (nBuckets == null || (nBuckets != null && (!isPointLookup || !useSkipScanFilter))) { + if (nBuckets == null || !isPointLookup || !useSkipScan) { + byte[] minKey = ScanUtil.getMinKey(schema, sortedRanges, slotSpan); + byte[] maxKey = ScanUtil.getMaxKey(schema, sortedRanges, slotSpan); + // If the maxKey has crossed the salt byte boundary, then we do not + // have anything to filter at the upper end of the range + if (ScanUtil.crossesPrefixBoundary(maxKey, ScanUtil.getPrefix(minKey, offset), offset)) { + maxKey = KeyRange.UNBOUND; + } + // We won't filter anything at the low end of the range if we just have the salt byte + if (minKey.length <= offset) { + minKey = KeyRange.UNBOUND; + } + + // Handle the offset by pushing it into the scanRange + if (scanMinOffset.isPresent()) { + + byte[] minOffset = scanMinOffset.get(); + // If we are salted we have to + // This should be safe for RVC Offset since we specify a full rowkey which + // is by definition unique so duplicating the salt bucket is fine + if (nBuckets != null && nBuckets > 0) { + minOffset[0] = 0; // We use 0 for salt bucket for scans + } + + // If the offset is more selective than the existing ranges + if (Bytes.BYTES_COMPARATOR.compare(minOffset, minKey) > 0) { + minKey = minOffset; + } + } + + scanRange = KeyRange.getKeyRange(minKey, maxKey); + } + + if (scanRange == KeyRange.EMPTY_RANGE) { + return NOTHING; + } + return new ScanRanges(schema, slotSpan, sortedRanges, scanRange, useSkipScan, isPointLookup, + nBuckets, rowTimestampRange); + } + + private static boolean hasTrailingDescSeparatorByte(RowKeySchema schema) { + return (schema.getField(schema.getFieldCount() - 1).getDataType() != PVarbinaryEncoded.INSTANCE + && SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, + schema.getField(schema.getFieldCount() - 1)) == QueryConstants.DESC_SEPARATOR_BYTE) + || (schema.getField(schema.getFieldCount() - 1).getDataType() == PVarbinaryEncoded.INSTANCE + && SchemaUtil.getSeparatorBytesForVarBinaryEncoded(schema.rowKeyOrderOptimizable(), false, + schema.getField(schema.getFieldCount() - 1).getSortOrder()) + == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES); + } + + private SkipScanFilter filter; + private final List> ranges; + private final int[] slotSpan; + private final RowKeySchema schema; + private final boolean isPointLookup; + private final boolean isSalted; + private final boolean useSkipScanFilter; + private final KeyRange scanRange; + private final TimeRange rowTimestampRange; + + private ScanRanges(RowKeySchema schema, int[] slotSpan, List> ranges, + KeyRange scanRange, boolean useSkipScanFilter, boolean isPointLookup, Integer bucketNum, + TimeRange rowTimestampRange) { + this.isPointLookup = isPointLookup; + this.isSalted = bucketNum != null; + this.useSkipScanFilter = useSkipScanFilter; + this.scanRange = scanRange; + this.rowTimestampRange = rowTimestampRange; + + if (isSalted && !isPointLookup) { + ranges.set(0, SaltingUtil.generateAllSaltingRanges(bucketNum)); + } + this.ranges = ImmutableList.copyOf(ranges); + this.slotSpan = slotSpan; + this.schema = schema; + if (schema != null && !ranges.isEmpty()) { + if (!this.useSkipScanFilter) { + int boundSlotCount = this.getBoundSlotCount(); + ranges = ranges.subList(0, boundSlotCount); + slotSpan = Arrays.copyOf(slotSpan, boundSlotCount); + } + this.filter = new SkipScanFilter(ranges, slotSpan, this.schema, isPointLookup); + } + } + + public void initializeScan(Scan scan) { + scan.withStartRow(scanRange.getLowerRange()); + scan.withStopRow(scanRange.getUpperRange()); + } + + public static byte[] prefixKey(byte[] key, int keyOffset, byte[] prefixKey, int prefixKeyOffset) { + return prefixKey(key, keyOffset, key.length, prefixKey, prefixKeyOffset); + } + + public static byte[] prefixKey(byte[] key, int keyOffset, int keyLength, byte[] prefixKey, + int prefixKeyOffset) { + if (keyLength > 0) { + byte[] newKey = new byte[keyLength + prefixKeyOffset]; + int totalKeyOffset = keyOffset + prefixKeyOffset; + if (prefixKey.length >= totalKeyOffset) { // otherwise it's null padded + System.arraycopy(prefixKey, 0, newKey, 0, totalKeyOffset); + } + System.arraycopy(key, keyOffset, newKey, totalKeyOffset, keyLength - keyOffset); + return newKey; + } + return key; + } + + private static byte[] replaceSaltByte(byte[] key, byte[] saltKey) { + if (key.length == 0) { + return key; + } + byte[] temp = new byte[key.length]; + if (saltKey.length >= SaltingUtil.NUM_SALTING_BYTES) { // Otherwise it's null padded + System.arraycopy(saltKey, 0, temp, 0, SaltingUtil.NUM_SALTING_BYTES); + } + System.arraycopy(key, SaltingUtil.NUM_SALTING_BYTES, temp, SaltingUtil.NUM_SALTING_BYTES, + key.length - SaltingUtil.NUM_SALTING_BYTES); + return temp; + } + + public static byte[] stripPrefix(byte[] key, int keyOffset) { + if (key.length == 0) { + return key; + } + byte[] temp = new byte[key.length - keyOffset]; + System.arraycopy(key, keyOffset, temp, 0, key.length - keyOffset); + return temp; + } + + // This variant adds synthetic scan boundaries at potentially missing salt bucket boundaries + // and won't return null Scans + public List intersectScan(Scan scan, final byte[] originalStartKey, + final byte[] originalStopKey, final int keyOffset, byte[] splitPostfix, Integer buckets, + boolean crossesRegionBoundary) { + // FIXME Both the salted status and the pre-computed bucket list should be available in + // this object, but in some cases they get overwritten, so we cannot use that. + List newScans = new ArrayList(); + if (buckets != null && buckets > 0) { + byte[] wrkStartKey = originalStartKey; + do { + boolean lastBucket = false; + byte[] nextBucketStart = null; + byte[] nextBucketByte = null; + if (wrkStartKey.length > 0 && Byte.toUnsignedInt(wrkStartKey[0]) >= buckets - 1) { + lastBucket = true; } else { - scanStartKey = startKey; - scanStartKeyOffset = totalKeyOffset; - } - int scanStopKeyOffset = scanKeyOffset; - byte[] scanStopKey = scan == null ? this.scanRange.getUpperRange() : scan.getStopRow(); - if (scanStopKey.length - scanKeyOffset > 0) { - if (stopKey.length - totalKeyOffset > 0) { - if (Bytes.compareTo(scanStopKey, scanKeyOffset, scanStopKey.length - scanKeyOffset, stopKey, totalKeyOffset, stopKey.length - totalKeyOffset) > 0) { - scanStopKey = stopKey; - scanStopKeyOffset = totalKeyOffset; - } - } - } else { - scanStopKey = stopKey; - scanStopKeyOffset = totalKeyOffset; - } - - // If not scanning anything, return null - if (scanStopKey.length - scanStopKeyOffset > 0 && - Bytes.compareTo(scanStartKey, scanStartKeyOffset, scanStartKey.length - scanStartKeyOffset, - scanStopKey, scanStopKeyOffset, scanStopKey.length - scanStopKeyOffset) >= 0) { - return null; - } - if (originalStopKey.length != 0 && scanStopKey.length == 0) { - scanStopKey = originalStopKey; - } - Filter newFilter = null; - // Only if the scan is using skip scan filter, intersect and replace the filter. - // For example, we may be forcing a range scan, in which case we do not want to - // intersect the start/stop with the filter. Instead, we rely only on the scan - // start/stop or the scanRanges start/stop. - if (this.useSkipScanFilter()) { - byte[] skipScanStartKey = scanStartKey; - byte[] skipScanStopKey = scanStopKey; - // If we have a keyOffset and we've used the startKey/stopKey that - // were passed in (which have the prefix) for the above range check, - // we need to remove the prefix before running our intersect method. - if (scanKeyOffset > 0) { - if (skipScanStartKey != originalStartKey) { // original already has correct salt byte - skipScanStartKey = replaceSaltByte(skipScanStartKey, prefixBytes); - } - if (skipScanStopKey != originalStopKey) { - skipScanStopKey = replaceSaltByte(skipScanStopKey, prefixBytes); - } - } else if (keyOffset > 0) { - if (skipScanStartKey == originalStartKey) { - skipScanStartKey = stripPrefix(skipScanStartKey, keyOffset); - } - if (skipScanStopKey == originalStopKey) { - skipScanStopKey = stripPrefix(skipScanStopKey, keyOffset); - } - } - if (scan == null) { - return filter.hasIntersect(skipScanStartKey, skipScanStopKey) ? HAS_INTERSECTION : null; - } - Filter filter = scan.getFilter(); - SkipScanFilter newSkipScanFilter = null; - if (filter instanceof SkipScanFilter) { - SkipScanFilter oldSkipScanFilter = (SkipScanFilter)filter; - newFilter = newSkipScanFilter = oldSkipScanFilter.intersect(skipScanStartKey, skipScanStopKey); - if (newFilter == null) { - return null; - } - } else if (filter instanceof FilterList) { - FilterList oldList = (FilterList)filter; - FilterList newList = new FilterList(FilterList.Operator.MUST_PASS_ALL); - newFilter = newList; - for (Filter f : oldList.getFilters()) { - if (f instanceof SkipScanFilter) { - newSkipScanFilter = ((SkipScanFilter)f).intersect(skipScanStartKey, skipScanStopKey); - if (newSkipScanFilter == null) { - return null; - } - newList.addFilter(newSkipScanFilter); - } else { - newList.addFilter(f); - } - } - } - // TODO: it seems that our SkipScanFilter or HBase runs into problems if we don't - // have an enclosing range when we do a point lookup. - if (isPointLookup) { - scanStartKey = ScanUtil.getMinKey(schema, newSkipScanFilter.getSlots(), slotSpan); - scanStopKey = ScanUtil.getMaxKey(schema, newSkipScanFilter.getSlots(), slotSpan); - } - } - // If we've got this far, we know we have an intersection - if (scan == null) { - return HAS_INTERSECTION; - } + // This includes the zero bytes from the minimum PK + nextBucketStart = bucketEnd(wrkStartKey, splitPostfix); + // These is the start of the next bucket in byte[], without the PK suffix + nextBucketByte = new byte[] { nextBucketStart[0] }; + } + if (lastBucket || Bytes.compareTo(originalStopKey, nextBucketStart) <= 0) { + // either we don't need to add synthetic guideposts, or we already have, and + // are at the last bucket of the original scan + addIfNotNull(newScans, + intersectScan(scan, wrkStartKey, originalStopKey, keyOffset, crossesRegionBoundary)); + break; + } + // This is where we add the synthetic guidepost. + // We skip [nextBucketByte, nextBucketStart), but it's guaranteed that there are no + // rows there. + addIfNotNull(newScans, intersectScan(scan, wrkStartKey, nextBucketByte, keyOffset, false)); + wrkStartKey = nextBucketStart; + } while (true); + } else { + // Definitely Not crossing buckets + addIfNotNull(newScans, + intersectScan(scan, originalStartKey, originalStopKey, keyOffset, crossesRegionBoundary)); + } + return newScans; + } + + private void addIfNotNull(List scans, Scan newScan) { + if (newScan != null) { + scans.add(newScan); + } + } + + // The split (presplit for salted tables) code extends the split point to the minimum PK length. + // Adding the same postfix here avoids creating and extra [n,n\x00\x00\x00..\x00) scan for each + // bucket + private byte[] bucketEnd(byte[] key, byte[] splitPostfix) { + byte startByte = key.length > 0 ? key[0] : 0; + int nextBucket = Byte.toUnsignedInt(startByte) + 1; + byte[] bucketEnd = new byte[splitPostfix.length + 1]; + bucketEnd[0] = (byte) nextBucket; + System.arraycopy(splitPostfix, 0, bucketEnd, 1, splitPostfix.length); + return bucketEnd; + } + + // TODO split this for normal, salted and local index variants + public Scan intersectScan(Scan scan, final byte[] originalStartKey, final byte[] originalStopKey, + final int keyOffset, boolean crossesRegionBoundary) { + byte[] startKey = originalStartKey; + byte[] stopKey = originalStopKey; + if (stopKey.length > 0 && Bytes.compareTo(startKey, stopKey) >= 0) { + return null; + } + // Keep the keys as they are if we have a point lookup, as we've already resolved the + // salt bytes in that case. + final int scanKeyOffset = + this.isSalted && !this.isPointLookup ? SaltingUtil.NUM_SALTING_BYTES : 0; + assert (scanKeyOffset == 0 || keyOffset == 0); + // Total offset for startKey/stopKey. Either 1 for salted tables or the prefix length + // of the current region for local indexes. We'll never have a case where a table is + // both salted and local. + final int totalKeyOffset = scanKeyOffset + keyOffset; + byte[] prefixBytes = ByteUtil.EMPTY_BYTE_ARRAY; + if (totalKeyOffset > 0) { + prefixBytes = ScanUtil.getPrefix(startKey, totalKeyOffset); + /* + * If our startKey to stopKey crosses a region boundary consider everything after the startKey + * as our scan is always done within a single region. This prevents us from having to prefix + * the key prior to knowing whether or not there may be an intersection. We can't calculate + * whether or not we've crossed a region boundary for local indexes, because we don't know the + * key offset of the next region, but only for the current one (which is the one passed in). + * If the next prefix happened to be a subset of the previous prefix, then this wouldn't + * detect that we crossed a region boundary. + */ + if (crossesRegionBoundary) { + stopKey = ByteUtil.EMPTY_BYTE_ARRAY; + } + } + int scanStartKeyOffset = scanKeyOffset; + byte[] scanStartKey = scan == null ? this.scanRange.getLowerRange() : scan.getStartRow(); + // Compare ignoring key prefix and salt byte + if (scanStartKey.length - scanKeyOffset > 0) { + if (startKey.length - totalKeyOffset > 0) { + if ( + Bytes.compareTo(scanStartKey, scanKeyOffset, scanStartKey.length - scanKeyOffset, + startKey, totalKeyOffset, startKey.length - totalKeyOffset) < 0 + ) { + scanStartKey = startKey; + scanStartKeyOffset = totalKeyOffset; + } + } + } else { + scanStartKey = startKey; + scanStartKeyOffset = totalKeyOffset; + } + int scanStopKeyOffset = scanKeyOffset; + byte[] scanStopKey = scan == null ? this.scanRange.getUpperRange() : scan.getStopRow(); + if (scanStopKey.length - scanKeyOffset > 0) { + if (stopKey.length - totalKeyOffset > 0) { + if ( + Bytes.compareTo(scanStopKey, scanKeyOffset, scanStopKey.length - scanKeyOffset, stopKey, + totalKeyOffset, stopKey.length - totalKeyOffset) > 0 + ) { + scanStopKey = stopKey; + scanStopKeyOffset = totalKeyOffset; + } + } + } else { + scanStopKey = stopKey; + scanStopKeyOffset = totalKeyOffset; + } + + // If not scanning anything, return null + if ( + scanStopKey.length - scanStopKeyOffset > 0 && Bytes.compareTo(scanStartKey, + scanStartKeyOffset, scanStartKey.length - scanStartKeyOffset, scanStopKey, + scanStopKeyOffset, scanStopKey.length - scanStopKeyOffset) >= 0 + ) { + return null; + } + if (originalStopKey.length != 0 && scanStopKey.length == 0) { + scanStopKey = originalStopKey; + } + Filter newFilter = null; + // Only if the scan is using skip scan filter, intersect and replace the filter. + // For example, we may be forcing a range scan, in which case we do not want to + // intersect the start/stop with the filter. Instead, we rely only on the scan + // start/stop or the scanRanges start/stop. + if (this.useSkipScanFilter()) { + byte[] skipScanStartKey = scanStartKey; + byte[] skipScanStopKey = scanStopKey; + // If we have a keyOffset and we've used the startKey/stopKey that + // were passed in (which have the prefix) for the above range check, + // we need to remove the prefix before running our intersect method. + if (scanKeyOffset > 0) { + if (skipScanStartKey != originalStartKey) { // original already has correct salt byte + skipScanStartKey = replaceSaltByte(skipScanStartKey, prefixBytes); + } + if (skipScanStopKey != originalStopKey) { + skipScanStopKey = replaceSaltByte(skipScanStopKey, prefixBytes); + } + } else if (keyOffset > 0) { + if (skipScanStartKey == originalStartKey) { + skipScanStartKey = stripPrefix(skipScanStartKey, keyOffset); + } + if (skipScanStopKey == originalStopKey) { + skipScanStopKey = stripPrefix(skipScanStopKey, keyOffset); + } + } + if (scan == null) { + return filter.hasIntersect(skipScanStartKey, skipScanStopKey) ? HAS_INTERSECTION : null; + } + Filter filter = scan.getFilter(); + SkipScanFilter newSkipScanFilter = null; + if (filter instanceof SkipScanFilter) { + SkipScanFilter oldSkipScanFilter = (SkipScanFilter) filter; + newFilter = + newSkipScanFilter = oldSkipScanFilter.intersect(skipScanStartKey, skipScanStopKey); if (newFilter == null) { - newFilter = scan.getFilter(); - } - Scan newScan = ScanUtil.newScan(scan); - newScan.setFilter(newFilter); - // If we have an offset (salted table or local index), we need to make sure to - // prefix our scan start/stop row by the prefix of the startKey or stopKey that - // were passed in. Our scan either doesn't have the prefix or has a placeholder - // for it. - if (totalKeyOffset > 0) { - if (scanStartKey != originalStartKey) { - scanStartKey = prefixKey(scanStartKey, scanKeyOffset, prefixBytes, keyOffset); - } - if (scanStopKey != originalStopKey) { - scanStopKey = prefixKey(scanStopKey, scanKeyOffset, prefixBytes, keyOffset); - } - } - // Don't let the stopRow of the scan go beyond the originalStopKey - if (originalStopKey.length > 0 && Bytes.compareTo(scanStopKey, originalStopKey) > 0) { - scanStopKey = originalStopKey; - } - if (scanStopKey.length > 0 && Bytes.compareTo(scanStartKey, scanStopKey) >= 0) { - return null; - } - newScan.setAttribute(SCAN_ACTUAL_START_ROW, scanStartKey); - newScan.withStartRow(scanStartKey); - newScan.withStopRow(scanStopKey); - return newScan; - } - - /** - * Return true if the region with the start and end key - * intersects with the scan ranges and false otherwise. - * @param regionStartKey lower inclusive key - * @param regionEndKey upper exclusive key - * @param isLocalIndex true if the table being scanned is a local index - * @return true if the scan range intersects with the specified lower/upper key - * range - */ - public boolean intersectRegion(byte[] regionStartKey, byte[] regionEndKey, boolean isLocalIndex) { - if (isEverything()) { - return true; - } - if (isDegenerate()) { - return false; - } - // Every range intersects all regions of a local index table - if (isLocalIndex) { - return true; - } - - boolean crossesSaltBoundary = isSalted && ScanUtil.crossesPrefixBoundary(regionEndKey, - ScanUtil.getPrefix(regionStartKey, SaltingUtil.NUM_SALTING_BYTES), - SaltingUtil.NUM_SALTING_BYTES); - return intersectScan(null, regionStartKey, regionEndKey, 0, crossesSaltBoundary) == HAS_INTERSECTION; - } - - public SkipScanFilter getSkipScanFilter() { - return filter; - } - - public List> getRanges() { - return ranges; - } - - public List> getBoundRanges() { - return ranges.subList(0, getBoundSlotCount()); - } - - public RowKeySchema getSchema() { - return schema; - } - - public boolean isEverything() { - return this == EVERYTHING || (!ranges.isEmpty() && ranges.get(0).get(0) == KeyRange.EVERYTHING_RANGE); - } - - public boolean isDegenerate() { - return this == NOTHING; - } - - /** - * Use SkipScanFilter under two circumstances: - * 1) If we have multiple ranges for a given key slot (use of IN) - * 2) If we have a range (i.e. not a single/point key) that is - * not the last key slot - */ - public boolean useSkipScanFilter() { - return useSkipScanFilter; - } - - /** - * Finds the total number of row keys spanned by this ranges / slotSpan pair. - * This accounts for slots in the ranges that may span more than on row key. - * @param ranges the KeyRange slots paired with this slotSpan. corresponds to {@link ScanRanges#ranges} - * @param slotSpan the extra span per skip scan slot. corresponds to {@link ScanRanges#slotSpan} - * @return the total number of row keys spanned yb this ranges / slotSpan pair. - */ - private static int getBoundPkSpan(List> ranges, int[] slotSpan) { - int count = 0; - boolean hasUnbound = false; - int nRanges = ranges.size(); - - for(int i = 0; i < nRanges && !hasUnbound; i++) { - List orRanges = ranges.get(i); - for (KeyRange range : orRanges) { - if (range == KeyRange.EVERYTHING_RANGE) { - return count; - } - if (range.isUnbound()) { - hasUnbound = true; - } - } - count += slotSpan[i] + 1; - } - - return count; - } - - private static boolean isFullyQualified(RowKeySchema schema, List> ranges, int[] slotSpan) { - return getBoundPkSpan(ranges, slotSpan) == schema.getMaxFields(); - } - - private static boolean isPointLookup(RowKeySchema schema, List> ranges, int[] slotSpan, boolean useSkipScan) { - if (!isFullyQualified(schema, ranges, slotSpan)) { - return false; - } - int lastIndex = ranges.size()-1; - for (int i = lastIndex; i >= 0; i--) { - List orRanges = ranges.get(i); - if (!useSkipScan && orRanges.size() > 1) { - return false; - } - for (KeyRange keyRange : orRanges) { - // Special case for single trailing IS NULL. We cannot consider this as a point key because - // we strip trailing nulls when we form the key. - if (!keyRange.isSingleKey() || (i == lastIndex && keyRange == KeyRange.IS_NULL_RANGE)) { - return false; - } - } - } - return true; - } - - - private static boolean incrementKey(List> slots, int[] position) { - int idx = slots.size() - 1; - while (idx >= 0 && (position[idx] = (position[idx] + 1) % slots.get(idx).size()) == 0) { - idx--; - } - return idx >= 0; - } - - private static List getPointKeys(List> ranges, int[] slotSpan, RowKeySchema schema, Integer bucketNum) { - if (ranges == null || ranges.isEmpty()) { - return Collections.emptyList(); - } - boolean isSalted = bucketNum != null; - int count = 1; - int offset = isSalted ? 1 : 0; - // Skip salt byte range in the first position if salted - for (int i = offset; i < ranges.size(); i++) { - count *= ranges.get(i).size(); - } - List keys = Lists.newArrayListWithExpectedSize(count); - int[] position = new int[ranges.size()]; - int maxKeyLength = SchemaUtil.getMaxKeyLength(schema, ranges); - int length; - byte[] key = new byte[maxKeyLength]; - do { - length = ScanUtil.setKey(schema, ranges, slotSpan, position, Bound.LOWER, key, offset, offset, ranges.size(), offset); - if (isSalted) { - key[0] = SaltingUtil.getSaltingByte(key, offset, length, bucketNum); + return null; + } + } else if (filter instanceof FilterList) { + FilterList oldList = (FilterList) filter; + FilterList newList = new FilterList(FilterList.Operator.MUST_PASS_ALL); + newFilter = newList; + for (Filter f : oldList.getFilters()) { + if (f instanceof SkipScanFilter) { + newSkipScanFilter = ((SkipScanFilter) f).intersect(skipScanStartKey, skipScanStopKey); + if (newSkipScanFilter == null) { + return null; } - keys.add(Arrays.copyOf(key, length + offset)); - } while (incrementKey(ranges, position)); - return keys; - } - - /** - * @return true if this represents a set of complete keys - */ - public boolean isPointLookup() { - return isPointLookup; - } - - /** - * @return true if this range is salted - i.e. has a salt range - */ - public boolean isSalted() { - return isSalted; - } - - public int getPointLookupCount() { - return getPointLookupCount(isPointLookup, ranges); - } - - private static int getPointLookupCount(boolean isPointLookup, List> ranges) { - return isPointLookup ? ranges.get(0).size() : 0; - } - - public Iterator getPointLookupKeyIterator() { - return isPointLookup ? ranges.get(0).iterator() : Collections.emptyIterator(); - } - - public int getBoundPkColumnCount() { - return getBoundPkSpan(ranges, slotSpan); - } - - public int getBoundSlotCount() { - int count = 0; - boolean hasUnbound = false; - int nRanges = ranges.size(); - - for(int i = 0; i < nRanges && !hasUnbound; i++) { - List orRanges = ranges.get(i); - for (KeyRange range : orRanges) { - if (range == KeyRange.EVERYTHING_RANGE) { - return count; - } - if (range.isUnbound()) { - hasUnbound = true; - } - } - count++; - } - - return count; - } - - @Override - public String toString() { - return "ScanRanges[" + ranges.toString() + "]"; - } - - public int[] getSlotSpans() { - return slotSpan; - } - - public KeyRange getScanRange() { - return scanRange; - } - - public boolean hasEqualityConstraint(int pkPosition) { - int pkOffset = 0; - int nRanges = ranges.size(); - - for(int i = 0; i < nRanges; i++) { - if (pkOffset + slotSpan[i] >= pkPosition) { - List range = ranges.get(i); - return range.size() == 1 && range.get(0).isSingleKey(); - } - pkOffset += slotSpan[i] + 1; - } - + newList.addFilter(newSkipScanFilter); + } else { + newList.addFilter(f); + } + } + } + // TODO: it seems that our SkipScanFilter or HBase runs into problems if we don't + // have an enclosing range when we do a point lookup. + if (isPointLookup) { + scanStartKey = ScanUtil.getMinKey(schema, newSkipScanFilter.getSlots(), slotSpan); + scanStopKey = ScanUtil.getMaxKey(schema, newSkipScanFilter.getSlots(), slotSpan); + } + } + // If we've got this far, we know we have an intersection + if (scan == null) { + return HAS_INTERSECTION; + } + if (newFilter == null) { + newFilter = scan.getFilter(); + } + Scan newScan = ScanUtil.newScan(scan); + newScan.setFilter(newFilter); + // If we have an offset (salted table or local index), we need to make sure to + // prefix our scan start/stop row by the prefix of the startKey or stopKey that + // were passed in. Our scan either doesn't have the prefix or has a placeholder + // for it. + if (totalKeyOffset > 0) { + if (scanStartKey != originalStartKey) { + scanStartKey = prefixKey(scanStartKey, scanKeyOffset, prefixBytes, keyOffset); + } + if (scanStopKey != originalStopKey) { + scanStopKey = prefixKey(scanStopKey, scanKeyOffset, prefixBytes, keyOffset); + } + } + // Don't let the stopRow of the scan go beyond the originalStopKey + if (originalStopKey.length > 0 && Bytes.compareTo(scanStopKey, originalStopKey) > 0) { + scanStopKey = originalStopKey; + } + if (scanStopKey.length > 0 && Bytes.compareTo(scanStartKey, scanStopKey) >= 0) { + return null; + } + newScan.setAttribute(SCAN_ACTUAL_START_ROW, scanStartKey); + newScan.withStartRow(scanStartKey); + newScan.withStopRow(scanStopKey); + return newScan; + } + + /** + * Return true if the region with the start and end key intersects with the scan ranges and false + * otherwise. + * @param regionStartKey lower inclusive key + * @param regionEndKey upper exclusive key + * @param isLocalIndex true if the table being scanned is a local index + * @return true if the scan range intersects with the specified lower/upper key range + */ + public boolean intersectRegion(byte[] regionStartKey, byte[] regionEndKey, boolean isLocalIndex) { + if (isEverything()) { + return true; + } + if (isDegenerate()) { + return false; + } + // Every range intersects all regions of a local index table + if (isLocalIndex) { + return true; + } + + boolean crossesSaltBoundary = isSalted && ScanUtil.crossesPrefixBoundary(regionEndKey, + ScanUtil.getPrefix(regionStartKey, SaltingUtil.NUM_SALTING_BYTES), + SaltingUtil.NUM_SALTING_BYTES); + return intersectScan(null, regionStartKey, regionEndKey, 0, crossesSaltBoundary) + == HAS_INTERSECTION; + } + + public SkipScanFilter getSkipScanFilter() { + return filter; + } + + public List> getRanges() { + return ranges; + } + + public List> getBoundRanges() { + return ranges.subList(0, getBoundSlotCount()); + } + + public RowKeySchema getSchema() { + return schema; + } + + public boolean isEverything() { + return this == EVERYTHING + || (!ranges.isEmpty() && ranges.get(0).get(0) == KeyRange.EVERYTHING_RANGE); + } + + public boolean isDegenerate() { + return this == NOTHING; + } + + /** + * Use SkipScanFilter under two circumstances: 1) If we have multiple ranges for a given key slot + * (use of IN) 2) If we have a range (i.e. not a single/point key) that is not the last key slot + */ + public boolean useSkipScanFilter() { + return useSkipScanFilter; + } + + /** + * Finds the total number of row keys spanned by this ranges / slotSpan pair. This accounts for + * slots in the ranges that may span more than on row key. + * @param ranges the KeyRange slots paired with this slotSpan. corresponds to + * {@link ScanRanges#ranges} + * @param slotSpan the extra span per skip scan slot. corresponds to {@link ScanRanges#slotSpan} + * @return the total number of row keys spanned yb this ranges / slotSpan pair. + */ + private static int getBoundPkSpan(List> ranges, int[] slotSpan) { + int count = 0; + boolean hasUnbound = false; + int nRanges = ranges.size(); + + for (int i = 0; i < nRanges && !hasUnbound; i++) { + List orRanges = ranges.get(i); + for (KeyRange range : orRanges) { + if (range == KeyRange.EVERYTHING_RANGE) { + return count; + } + if (range.isUnbound()) { + hasUnbound = true; + } + } + count += slotSpan[i] + 1; + } + + return count; + } + + private static boolean isFullyQualified(RowKeySchema schema, List> ranges, + int[] slotSpan) { + return getBoundPkSpan(ranges, slotSpan) == schema.getMaxFields(); + } + + private static boolean isPointLookup(RowKeySchema schema, List> ranges, + int[] slotSpan, boolean useSkipScan) { + if (!isFullyQualified(schema, ranges, slotSpan)) { + return false; + } + int lastIndex = ranges.size() - 1; + for (int i = lastIndex; i >= 0; i--) { + List orRanges = ranges.get(i); + if (!useSkipScan && orRanges.size() > 1) { return false; - - } - - private static TimeRange getRowTimestampColumnRange(List> ranges, RowKeySchema schema, int rowTimestampColPos) { - try { - if (rowTimestampColPos != -1) { - if (ranges != null && ranges.size() > rowTimestampColPos) { - List rowTimestampColRange = ranges.get(rowTimestampColPos); - List sortedRange = new ArrayList<>(rowTimestampColRange); - Field f = schema.getField(rowTimestampColPos); - Collections.sort(sortedRange, f.getSortOrder() == SortOrder.ASC ? KeyRange.COMPARATOR : KeyRange.DESC_COMPARATOR); - SortOrder order = f.getSortOrder(); - KeyRange lowestRange = sortedRange.get(0); - KeyRange highestRange = sortedRange.get(rowTimestampColRange.size() - 1); - if (order == SortOrder.DESC) { - return getDescTimeRange(lowestRange, highestRange, f); - } - return getAscTimeRange( lowestRange, highestRange, f); - } - } - } catch (IOException e) { - Throwables.propagate(e); - } - return null; - } - - private static TimeRange getAscTimeRange(KeyRange lowestRange, KeyRange highestRange, Field f) - throws IOException { - long low; - long high; - PDataCodec codec = PLong.INSTANCE.getCodec(); - if (lowestRange.lowerUnbound()) { - low = 0; - } else { - long lowerRange = codec.decodeLong(lowestRange.getLowerRange(), 0, SortOrder.ASC); - low = lowestRange.isLowerInclusive() ? lowerRange : safelyIncrement(lowerRange); - } - if (highestRange.upperUnbound()) { - high = HConstants.LATEST_TIMESTAMP; - } else { - long upperRange = codec.decodeLong(highestRange.getUpperRange(), 0, SortOrder.ASC); - if (highestRange.isUpperInclusive()) { - high = safelyIncrement(upperRange); - } else { - high = upperRange; - } - } - return TimeRange.between(low, high); - } - - public static TimeRange getDescTimeRange(KeyRange lowestKeyRange, KeyRange highestKeyRange, Field f) throws IOException { - boolean lowerUnbound = lowestKeyRange.lowerUnbound(); - boolean lowerInclusive = lowestKeyRange.isLowerInclusive(); - boolean upperUnbound = highestKeyRange.upperUnbound(); - boolean upperInclusive = highestKeyRange.isUpperInclusive(); - PDataCodec codec = PLong.INSTANCE.getCodec(); - long low = lowerUnbound ? -1 : codec.decodeLong(lowestKeyRange.getLowerRange(), 0, SortOrder.DESC); - long high = upperUnbound ? -1 : codec.decodeLong(highestKeyRange.getUpperRange(), 0, SortOrder.DESC); - long newHigh; - long newLow; - if (!lowerUnbound && !upperUnbound) { - newHigh = lowerInclusive ? safelyIncrement(low) : low; - newLow = upperInclusive ? high : safelyIncrement(high); - return TimeRange.between(newLow, newHigh); - } else if (!lowerUnbound && upperUnbound) { - newHigh = lowerInclusive ? safelyIncrement(low) : low; - newLow = 0; - return TimeRange.between(newLow, newHigh); - } else if (lowerUnbound && !upperUnbound) { - newLow = upperInclusive ? high : safelyIncrement(high); - newHigh = HConstants.LATEST_TIMESTAMP; - return TimeRange.between(newLow, newHigh); - } else { - newLow = 0; - newHigh = HConstants.LATEST_TIMESTAMP; - return TimeRange.between(newLow, newHigh); - } - } - - private static long safelyIncrement(long value) { - return value < HConstants.LATEST_TIMESTAMP ? (value + 1) : HConstants.LATEST_TIMESTAMP; - } - - public TimeRange getRowTimestampRange() { - return rowTimestampRange; - } + } + for (KeyRange keyRange : orRanges) { + // Special case for single trailing IS NULL. We cannot consider this as a point key because + // we strip trailing nulls when we form the key. + if (!keyRange.isSingleKey() || (i == lastIndex && keyRange == KeyRange.IS_NULL_RANGE)) { + return false; + } + } + } + return true; + } + + private static boolean incrementKey(List> slots, int[] position) { + int idx = slots.size() - 1; + while (idx >= 0 && (position[idx] = (position[idx] + 1) % slots.get(idx).size()) == 0) { + idx--; + } + return idx >= 0; + } + + private static List getPointKeys(List> ranges, int[] slotSpan, + RowKeySchema schema, Integer bucketNum) { + if (ranges == null || ranges.isEmpty()) { + return Collections.emptyList(); + } + boolean isSalted = bucketNum != null; + int count = 1; + int offset = isSalted ? 1 : 0; + // Skip salt byte range in the first position if salted + for (int i = offset; i < ranges.size(); i++) { + count *= ranges.get(i).size(); + } + List keys = Lists.newArrayListWithExpectedSize(count); + int[] position = new int[ranges.size()]; + int maxKeyLength = SchemaUtil.getMaxKeyLength(schema, ranges); + int length; + byte[] key = new byte[maxKeyLength]; + do { + length = ScanUtil.setKey(schema, ranges, slotSpan, position, Bound.LOWER, key, offset, offset, + ranges.size(), offset); + if (isSalted) { + key[0] = SaltingUtil.getSaltingByte(key, offset, length, bucketNum); + } + keys.add(Arrays.copyOf(key, length + offset)); + } while (incrementKey(ranges, position)); + return keys; + } + + /** Returns true if this represents a set of complete keys */ + public boolean isPointLookup() { + return isPointLookup; + } + + /** Returns true if this range is salted - i.e. has a salt range */ + public boolean isSalted() { + return isSalted; + } + + public int getPointLookupCount() { + return getPointLookupCount(isPointLookup, ranges); + } + + private static int getPointLookupCount(boolean isPointLookup, List> ranges) { + return isPointLookup ? ranges.get(0).size() : 0; + } + + public Iterator getPointLookupKeyIterator() { + return isPointLookup ? ranges.get(0).iterator() : Collections. emptyIterator(); + } + + public int getBoundPkColumnCount() { + return getBoundPkSpan(ranges, slotSpan); + } + + public int getBoundSlotCount() { + int count = 0; + boolean hasUnbound = false; + int nRanges = ranges.size(); + + for (int i = 0; i < nRanges && !hasUnbound; i++) { + List orRanges = ranges.get(i); + for (KeyRange range : orRanges) { + if (range == KeyRange.EVERYTHING_RANGE) { + return count; + } + if (range.isUnbound()) { + hasUnbound = true; + } + } + count++; + } + + return count; + } + + @Override + public String toString() { + return "ScanRanges[" + ranges.toString() + "]"; + } + + public int[] getSlotSpans() { + return slotSpan; + } + + public KeyRange getScanRange() { + return scanRange; + } + + public boolean hasEqualityConstraint(int pkPosition) { + int pkOffset = 0; + int nRanges = ranges.size(); + + for (int i = 0; i < nRanges; i++) { + if (pkOffset + slotSpan[i] >= pkPosition) { + List range = ranges.get(i); + return range.size() == 1 && range.get(0).isSingleKey(); + } + pkOffset += slotSpan[i] + 1; + } + + return false; + + } + + private static TimeRange getRowTimestampColumnRange(List> ranges, + RowKeySchema schema, int rowTimestampColPos) { + try { + if (rowTimestampColPos != -1) { + if (ranges != null && ranges.size() > rowTimestampColPos) { + List rowTimestampColRange = ranges.get(rowTimestampColPos); + List sortedRange = new ArrayList<>(rowTimestampColRange); + Field f = schema.getField(rowTimestampColPos); + Collections.sort(sortedRange, + f.getSortOrder() == SortOrder.ASC ? KeyRange.COMPARATOR : KeyRange.DESC_COMPARATOR); + SortOrder order = f.getSortOrder(); + KeyRange lowestRange = sortedRange.get(0); + KeyRange highestRange = sortedRange.get(rowTimestampColRange.size() - 1); + if (order == SortOrder.DESC) { + return getDescTimeRange(lowestRange, highestRange, f); + } + return getAscTimeRange(lowestRange, highestRange, f); + } + } + } catch (IOException e) { + Throwables.propagate(e); + } + return null; + } + + private static TimeRange getAscTimeRange(KeyRange lowestRange, KeyRange highestRange, Field f) + throws IOException { + long low; + long high; + PDataCodec codec = PLong.INSTANCE.getCodec(); + if (lowestRange.lowerUnbound()) { + low = 0; + } else { + long lowerRange = codec.decodeLong(lowestRange.getLowerRange(), 0, SortOrder.ASC); + low = lowestRange.isLowerInclusive() ? lowerRange : safelyIncrement(lowerRange); + } + if (highestRange.upperUnbound()) { + high = HConstants.LATEST_TIMESTAMP; + } else { + long upperRange = codec.decodeLong(highestRange.getUpperRange(), 0, SortOrder.ASC); + if (highestRange.isUpperInclusive()) { + high = safelyIncrement(upperRange); + } else { + high = upperRange; + } + } + return TimeRange.between(low, high); + } + + public static TimeRange getDescTimeRange(KeyRange lowestKeyRange, KeyRange highestKeyRange, + Field f) throws IOException { + boolean lowerUnbound = lowestKeyRange.lowerUnbound(); + boolean lowerInclusive = lowestKeyRange.isLowerInclusive(); + boolean upperUnbound = highestKeyRange.upperUnbound(); + boolean upperInclusive = highestKeyRange.isUpperInclusive(); + PDataCodec codec = PLong.INSTANCE.getCodec(); + long low = + lowerUnbound ? -1 : codec.decodeLong(lowestKeyRange.getLowerRange(), 0, SortOrder.DESC); + long high = + upperUnbound ? -1 : codec.decodeLong(highestKeyRange.getUpperRange(), 0, SortOrder.DESC); + long newHigh; + long newLow; + if (!lowerUnbound && !upperUnbound) { + newHigh = lowerInclusive ? safelyIncrement(low) : low; + newLow = upperInclusive ? high : safelyIncrement(high); + return TimeRange.between(newLow, newHigh); + } else if (!lowerUnbound && upperUnbound) { + newHigh = lowerInclusive ? safelyIncrement(low) : low; + newLow = 0; + return TimeRange.between(newLow, newHigh); + } else if (lowerUnbound && !upperUnbound) { + newLow = upperInclusive ? high : safelyIncrement(high); + newHigh = HConstants.LATEST_TIMESTAMP; + return TimeRange.between(newLow, newHigh); + } else { + newLow = 0; + newHigh = HConstants.LATEST_TIMESTAMP; + return TimeRange.between(newLow, newHigh); + } + } + + private static long safelyIncrement(long value) { + return value < HConstants.LATEST_TIMESTAMP ? (value + 1) : HConstants.LATEST_TIMESTAMP; + } + + public TimeRange getRowTimestampRange() { + return rowTimestampRange; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SequenceManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SequenceManager.java index 0f50443d14a..4723acd8c6d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SequenceManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SequenceManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,204 +41,207 @@ import org.apache.phoenix.schema.tuple.DelegateTuple; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PLong; -import org.apache.phoenix.util.SequenceUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.util.SequenceUtil; public class SequenceManager { - private final PhoenixStatement statement; - private int[] sequencePosition; - private List nextSequences; - private List currentSequences; - private final Map sequenceMap = Maps.newHashMap(); - private final BitSet isNextSequence = new BitSet(); - - public SequenceManager(PhoenixStatement statement) { - this.statement = statement; - } - - public int getSequenceCount() { - return sequenceMap == null ? 0 : sequenceMap.size(); - } - - private void setSequenceValues(long[] srcSequenceValues, long[] dstSequenceValues, SQLException[] sqlExceptions) throws SQLException { - SQLException eTop = null; - for (int i = 0; i < sqlExceptions.length; i++) { - SQLException e = sqlExceptions[i]; - if (e != null) { - if (eTop == null) { - eTop = e; - } else { - e.setNextException(eTop.getNextException()); - eTop.setNextException(e); - } - } else { - dstSequenceValues[sequencePosition[i]] = srcSequenceValues[i]; - } - } - if (eTop != null) { - throw eTop; + private final PhoenixStatement statement; + private int[] sequencePosition; + private List nextSequences; + private List currentSequences; + private final Map sequenceMap = Maps.newHashMap(); + private final BitSet isNextSequence = new BitSet(); + + public SequenceManager(PhoenixStatement statement) { + this.statement = statement; + } + + public int getSequenceCount() { + return sequenceMap == null ? 0 : sequenceMap.size(); + } + + private void setSequenceValues(long[] srcSequenceValues, long[] dstSequenceValues, + SQLException[] sqlExceptions) throws SQLException { + SQLException eTop = null; + for (int i = 0; i < sqlExceptions.length; i++) { + SQLException e = sqlExceptions[i]; + if (e != null) { + if (eTop == null) { + eTop = e; + } else { + e.setNextException(eTop.getNextException()); + eTop.setNextException(e); } + } else { + dstSequenceValues[sequencePosition[i]] = srcSequenceValues[i]; + } } - - public Tuple newSequenceTuple(Tuple tuple) throws SQLException { - return new SequenceTuple(tuple); + if (eTop != null) { + throw eTop; } - - private class SequenceTuple extends DelegateTuple { - private final long[] srcSequenceValues; - private final long[] dstSequenceValues; - private final SQLException[] sqlExceptions; - - public SequenceTuple(Tuple delegate) throws SQLException { - super(delegate); - int maxSize = sequenceMap.size(); - dstSequenceValues = new long[maxSize]; - srcSequenceValues = new long[nextSequences.size()]; - sqlExceptions = new SQLException[nextSequences.size()]; - incrementSequenceValues(); - } - - private void incrementSequenceValues() throws SQLException { - if (sequenceMap == null) { - return; - } - Long scn = statement.getConnection().getSCN(); - long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - ConnectionQueryServices services = statement.getConnection().getQueryServices(); - services.incrementSequences(nextSequences, timestamp, srcSequenceValues, sqlExceptions); - setSequenceValues(srcSequenceValues, dstSequenceValues, sqlExceptions); - int offset = nextSequences.size(); - for (int i = 0; i < currentSequences.size(); i++) { - dstSequenceValues[sequencePosition[offset+i]] = services.currentSequenceValue(currentSequences.get(i), timestamp); - } - } - - @Override - public long getSequenceValue(int index) { - return dstSequenceValues[index]; - } + } + + public Tuple newSequenceTuple(Tuple tuple) throws SQLException { + return new SequenceTuple(tuple); + } + + private class SequenceTuple extends DelegateTuple { + private final long[] srcSequenceValues; + private final long[] dstSequenceValues; + private final SQLException[] sqlExceptions; + + public SequenceTuple(Tuple delegate) throws SQLException { + super(delegate); + int maxSize = sequenceMap.size(); + dstSequenceValues = new long[maxSize]; + srcSequenceValues = new long[nextSequences.size()]; + sqlExceptions = new SQLException[nextSequences.size()]; + incrementSequenceValues(); } - public SequenceValueExpression newSequenceReference(SequenceValueParseNode node) throws SQLException { - PName tenantName = statement.getConnection().getTenantId(); - String tenantId = tenantName == null ? null : tenantName.getString(); - TableName tableName = node.getTableName(); - if (tableName.getSchemaName() == null && statement.getConnection().getSchema() != null) { - tableName = TableName.create(statement.getConnection().getSchema(), tableName.getTableName()); - } - int nSaltBuckets = statement.getConnection().getQueryServices().getSequenceSaltBuckets(); - ParseNode numToAllocateNode = node.getNumToAllocateNode(); - - Expression numToAllocateExp = numToAllocateExpression(tableName, numToAllocateNode); - SequenceKey key = new SequenceKey(tenantId, tableName.getSchemaName(), tableName.getTableName(), nSaltBuckets); - - SequenceValueExpression expression = sequenceMap.get(key); - if (expression == null) { - int index = sequenceMap.size(); - expression = new SequenceValueExpression(key, node.getOp(), index, numToAllocateExp); - } else { - // Add the new numToAllocateExp to the expression - SequenceValueExpression oldExpression = expression; - expression = new SequenceValueExpression(oldExpression, node.getOp(), numToAllocateExp); - } - sequenceMap.put(key, expression); + private void incrementSequenceValues() throws SQLException { + if (sequenceMap == null) { + return; + } + Long scn = statement.getConnection().getSCN(); + long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + ConnectionQueryServices services = statement.getConnection().getQueryServices(); + services.incrementSequences(nextSequences, timestamp, srcSequenceValues, sqlExceptions); + setSequenceValues(srcSequenceValues, dstSequenceValues, sqlExceptions); + int offset = nextSequences.size(); + for (int i = 0; i < currentSequences.size(); i++) { + dstSequenceValues[sequencePosition[offset + i]] = + services.currentSequenceValue(currentSequences.get(i), timestamp); + } + } - // If we see a NEXT and a CURRENT, treat the CURRENT just like a NEXT - if (node.getOp() == Op.NEXT_VALUE) { - isNextSequence.set(expression.getIndex()); - } + @Override + public long getSequenceValue(int index) { + return dstSequenceValues[index]; + } + } + + public SequenceValueExpression newSequenceReference(SequenceValueParseNode node) + throws SQLException { + PName tenantName = statement.getConnection().getTenantId(); + String tenantId = tenantName == null ? null : tenantName.getString(); + TableName tableName = node.getTableName(); + if (tableName.getSchemaName() == null && statement.getConnection().getSchema() != null) { + tableName = TableName.create(statement.getConnection().getSchema(), tableName.getTableName()); + } + int nSaltBuckets = statement.getConnection().getQueryServices().getSequenceSaltBuckets(); + ParseNode numToAllocateNode = node.getNumToAllocateNode(); + + Expression numToAllocateExp = numToAllocateExpression(tableName, numToAllocateNode); + SequenceKey key = + new SequenceKey(tenantId, tableName.getSchemaName(), tableName.getTableName(), nSaltBuckets); + + SequenceValueExpression expression = sequenceMap.get(key); + if (expression == null) { + int index = sequenceMap.size(); + expression = new SequenceValueExpression(key, node.getOp(), index, numToAllocateExp); + } else { + // Add the new numToAllocateExp to the expression + SequenceValueExpression oldExpression = expression; + expression = new SequenceValueExpression(oldExpression, node.getOp(), numToAllocateExp); + } + sequenceMap.put(key, expression); - return expression; + // If we see a NEXT and a CURRENT, treat the CURRENT just like a NEXT + if (node.getOp() == Op.NEXT_VALUE) { + isNextSequence.set(expression.getIndex()); } - private Expression numToAllocateExpression(TableName tableName, ParseNode numToAllocateNode) throws SQLException { - if (numToAllocateNode != null) { - final StatementContext context = new StatementContext(statement); - ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); - return numToAllocateNode.accept(expressionCompiler); - } else { - // Standard Sequence Allocation Behavior - return LiteralExpression.newConstant(SequenceUtil.DEFAULT_NUM_SLOTS_TO_ALLOCATE); - } + return expression; + } + + private Expression numToAllocateExpression(TableName tableName, ParseNode numToAllocateNode) + throws SQLException { + if (numToAllocateNode != null) { + final StatementContext context = new StatementContext(statement); + ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); + return numToAllocateNode.accept(expressionCompiler); + } else { + // Standard Sequence Allocation Behavior + return LiteralExpression.newConstant(SequenceUtil.DEFAULT_NUM_SLOTS_TO_ALLOCATE); + } + } + + /** + * If caller specified used NEXT VALUES FOR expression then we have set the + * numToAllocate. If numToAllocate is > 1 we treat this as a bulk reservation of a block of + * sequence slots. + * @throws a SQLException if we can't evaluate the expression + */ + private long determineNumToAllocate(SequenceValueExpression expression) throws SQLException { + + final StatementContext context = new StatementContext(statement); + long maxNumToAllocate = 0; + for (Expression numToAllocateExp : expression.getNumToAllocateExpressions()) { + ImmutableBytesWritable ptr = context.getTempPtr(); + numToAllocateExp.evaluate(null, ptr); + if (ptr.getLength() == 0 || !numToAllocateExp.getDataType().isCoercibleTo(PLong.INSTANCE)) { + throw SequenceUtil.getException(expression.getKey().getSchemaName(), + expression.getKey().getSequenceName(), + SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT); + } + + // Parse and make sure it is greater than 0. We don't support allocating 0 or negative + // values! + long numToAllocate = (long) PLong.INSTANCE.toObject(ptr, numToAllocateExp.getDataType()); + if (numToAllocate < 1) { + throw SequenceUtil.getException(expression.getKey().getSchemaName(), + expression.getKey().getSequenceName(), + SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT); + } + if (numToAllocate > maxNumToAllocate) { + maxNumToAllocate = numToAllocate; + } } - /** - * If caller specified used NEXT VALUES FOR expression then we have set the numToAllocate. - * If numToAllocate is > 1 we treat this as a bulk reservation of a block of sequence slots. - * - * @throws a SQLException if we can't evaluate the expression - */ - private long determineNumToAllocate(SequenceValueExpression expression) - throws SQLException { - - final StatementContext context = new StatementContext(statement); - long maxNumToAllocate = 0; - for (Expression numToAllocateExp : expression.getNumToAllocateExpressions()) { - ImmutableBytesWritable ptr = context.getTempPtr(); - numToAllocateExp.evaluate(null, ptr); - if (ptr.getLength() == 0 || !numToAllocateExp.getDataType().isCoercibleTo(PLong.INSTANCE)) { - throw SequenceUtil.getException(expression.getKey().getSchemaName(), - expression.getKey().getSequenceName(), - SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT); - } - - // Parse and make sure it is greater than 0. We don't support allocating 0 or negative values! - long numToAllocate = (long) PLong.INSTANCE.toObject(ptr, numToAllocateExp.getDataType()); - if (numToAllocate < 1) { - throw SequenceUtil.getException(expression.getKey().getSchemaName(), - expression.getKey().getSequenceName(), - SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT); - } - if (numToAllocate > maxNumToAllocate) { - maxNumToAllocate = numToAllocate; - } - } + return maxNumToAllocate; + } - return maxNumToAllocate; + public void validateSequences(Sequence.ValueOp action) throws SQLException { + if (action == Sequence.ValueOp.NOOP) { + return; + } + if (sequenceMap.isEmpty()) { + return; } + int maxSize = sequenceMap.size(); + long[] dstSequenceValues = new long[maxSize]; + sequencePosition = new int[maxSize]; + nextSequences = Lists.newArrayListWithExpectedSize(maxSize); + currentSequences = Lists.newArrayListWithExpectedSize(maxSize); + for (Map.Entry entry : sequenceMap.entrySet()) { + if (isNextSequence.get(entry.getValue().getIndex())) { + nextSequences + .add(new SequenceAllocation(entry.getKey(), determineNumToAllocate(entry.getValue()))); + } else { + currentSequences.add(entry.getKey()); + } + } + long[] srcSequenceValues = new long[nextSequences.size()]; + SQLException[] sqlExceptions = new SQLException[nextSequences.size()]; - public void validateSequences(Sequence.ValueOp action) throws SQLException { - if (action == Sequence.ValueOp.NOOP) { - return; - } - if (sequenceMap.isEmpty()) { - return; - } - int maxSize = sequenceMap.size(); - long[] dstSequenceValues = new long[maxSize]; - sequencePosition = new int[maxSize]; - nextSequences = Lists.newArrayListWithExpectedSize(maxSize); - currentSequences = Lists.newArrayListWithExpectedSize(maxSize); - for (Map.Entry entry : sequenceMap.entrySet()) { - if (isNextSequence.get(entry.getValue().getIndex())) { - nextSequences.add(new SequenceAllocation(entry.getKey(), - determineNumToAllocate(entry.getValue()))); - } else { - currentSequences.add(entry.getKey()); - } - } - long[] srcSequenceValues = new long[nextSequences.size()]; - SQLException[] sqlExceptions = new SQLException[nextSequences.size()]; - - // Sort the next sequences to prevent deadlocks - Collections.sort(nextSequences); - - // Create reverse indexes - for (int i = 0; i < nextSequences.size(); i++) { - sequencePosition[i] = sequenceMap.get(nextSequences.get(i).getSequenceKey()).getIndex(); - } - int offset = nextSequences.size(); - for (int i = 0; i < currentSequences.size(); i++) { - sequencePosition[i+offset] = sequenceMap.get(currentSequences.get(i)).getIndex(); - } - ConnectionQueryServices services = this.statement.getConnection().getQueryServices(); - Long scn = statement.getConnection().getSCN(); - long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - services.validateSequences(nextSequences, timestamp, srcSequenceValues, sqlExceptions, action); - setSequenceValues(srcSequenceValues, dstSequenceValues, sqlExceptions); + // Sort the next sequences to prevent deadlocks + Collections.sort(nextSequences); + + // Create reverse indexes + for (int i = 0; i < nextSequences.size(); i++) { + sequencePosition[i] = sequenceMap.get(nextSequences.get(i).getSequenceKey()).getIndex(); + } + int offset = nextSequences.size(); + for (int i = 0; i < currentSequences.size(); i++) { + sequencePosition[i + offset] = sequenceMap.get(currentSequences.get(i)).getIndex(); } - -} - + ConnectionQueryServices services = this.statement.getConnection().getQueryServices(); + Long scn = statement.getConnection().getSCN(); + long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + services.validateSequences(nextSequences, timestamp, srcSequenceValues, sqlExceptions, action); + setSequenceValues(srcSequenceValues, dstSequenceValues, sqlExceptions); + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SequenceValueExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SequenceValueExpression.java index 18ec87c2ad2..1db23d71f16 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SequenceValueExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SequenceValueExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,80 +34,81 @@ import org.apache.phoenix.util.SchemaUtil; public class SequenceValueExpression extends BaseTerminalExpression { - private final SequenceKey key; - final Op op; - private final int index; - private final Set numToAllocateExpressions = new HashSet<>(); + private final SequenceKey key; + final Op op; + private final int index; + private final Set numToAllocateExpressions = new HashSet<>(); - public SequenceValueExpression(SequenceKey key, Op op, int index, Expression numToAllocateExp) { - this.key = key; - this.op = op; - this.index = index; - this.numToAllocateExpressions.add(numToAllocateExp); - } + public SequenceValueExpression(SequenceKey key, Op op, int index, Expression numToAllocateExp) { + this.key = key; + this.op = op; + this.index = index; + this.numToAllocateExpressions.add(numToAllocateExp); + } - public SequenceValueExpression(SequenceValueExpression seqIn, Op op, Expression numToAllocateExp) { - this.key = seqIn.getKey(); - this.op = op; - this.index = seqIn.getIndex(); - this.numToAllocateExpressions.addAll(seqIn.numToAllocateExpressions); - this.numToAllocateExpressions.add(numToAllocateExp); - } + public SequenceValueExpression(SequenceValueExpression seqIn, Op op, + Expression numToAllocateExp) { + this.key = seqIn.getKey(); + this.op = op; + this.index = seqIn.getIndex(); + this.numToAllocateExpressions.addAll(seqIn.numToAllocateExpressions); + this.numToAllocateExpressions.add(numToAllocateExp); + } - public Set getNumToAllocateExpressions() { - return numToAllocateExpressions; - } - - public SequenceKey getKey() { - return key; - } - - public int getIndex() { - return index; - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - byte[] valueBuffer = new byte[PLong.INSTANCE.getByteSize()]; - PLong.INSTANCE.getCodec().encodeLong(tuple.getSequenceValue(index), valueBuffer, 0); - ptr.set(valueBuffer); - return true; - } + public Set getNumToAllocateExpressions() { + return numToAllocateExpressions; + } - @Override - public PDataType getDataType() { - return PLong.INSTANCE; - } - - @Override - public boolean isNullable() { - return false; - } - - @Override - public Determinism getDeterminism() { - return Determinism.PER_ROW; - } - - @Override - public boolean isStateless() { - return true; - } + public SequenceKey getKey() { + return key; + } - @Override - public String toString() { - String sequenceQualifiedName = - SchemaUtil.getTableName(key.getSchemaName(), key.getSequenceName()); - if (op == Op.CURRENT_VALUE) { - return op.getName() + " VALUE " + "FOR " + sequenceQualifiedName; - } else { - return op.getName() + Arrays.toString(getNumToAllocateExpressions().toArray()) - + " VALUE(S) " + "FOR " + sequenceQualifiedName; - } - } + public int getIndex() { + return index; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + byte[] valueBuffer = new byte[PLong.INSTANCE.getByteSize()]; + PLong.INSTANCE.getCodec().encodeLong(tuple.getSequenceValue(index), valueBuffer, 0); + ptr.set(valueBuffer); + return true; + } + + @Override + public PDataType getDataType() { + return PLong.INSTANCE; + } - @Override - public T accept(ExpressionVisitor visitor) { - return visitor.visit(this); + @Override + public boolean isNullable() { + return false; + } + + @Override + public Determinism getDeterminism() { + return Determinism.PER_ROW; + } + + @Override + public boolean isStateless() { + return true; + } + + @Override + public String toString() { + String sequenceQualifiedName = + SchemaUtil.getTableName(key.getSchemaName(), key.getSequenceName()); + if (op == Op.CURRENT_VALUE) { + return op.getName() + " VALUE " + "FOR " + sequenceQualifiedName; + } else { + return op.getName() + Arrays.toString(getNumToAllocateExpressions().toArray()) + " VALUE(S) " + + "FOR " + sequenceQualifiedName; } -} \ No newline at end of file + } + + @Override + public T accept(ExpressionVisitor visitor) { + return visitor.visit(this); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ServerBuildIndexCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ServerBuildIndexCompiler.java index bd8dcd480ca..59080e6b214 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ServerBuildIndexCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ServerBuildIndexCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,13 @@ */ package org.apache.phoenix.compile; +import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; +import static org.apache.phoenix.util.ScanUtil.addEmptyColumnToScan; + +import java.sql.SQLException; +import java.util.Collections; +import java.util.Set; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -40,123 +47,130 @@ import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.ScanUtil; -import java.sql.SQLException; -import java.util.Collections; -import java.util.Set; - -import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; -import static org.apache.phoenix.util.ScanUtil.addEmptyColumnToScan; - - /** - * Class that compiles plan to generate initial data values after a DDL command for - * index table. + * Class that compiles plan to generate initial data values after a DDL command for index table. */ public class ServerBuildIndexCompiler { - protected final PhoenixConnection connection; - protected final String tableName; - protected PTable dataTable; - protected QueryPlan plan; + protected final PhoenixConnection connection; + protected final String tableName; + protected PTable dataTable; + protected QueryPlan plan; - protected class RowCountMutationPlan extends BaseMutationPlan { - protected RowCountMutationPlan(StatementContext context, PhoenixStatement.Operation operation) { - super(context, operation); - } - @Override - public MutationState execute() throws SQLException { - connection.getMutationState().commitDDLFence(dataTable); - Tuple tuple = plan.iterator().next(); - long rowCount = 0; - if (tuple != null) { - Cell kv = tuple.getValue(0); - ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - // A single Cell will be returned with the count(*) - we decode that here - rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); - } - // The contract is to return a MutationState that contains the number of rows modified. In this - // case, it's the number of rows in the data table which corresponds to the number of index - // rows that were added. - return new MutationState(0, 0, connection, rowCount); - } + protected class RowCountMutationPlan extends BaseMutationPlan { + protected RowCountMutationPlan(StatementContext context, PhoenixStatement.Operation operation) { + super(context, operation); + } - @Override - public QueryPlan getQueryPlan() { - return plan; - } - }; - - public ServerBuildIndexCompiler(PhoenixConnection connection, String tableName) { - this.connection = connection; - this.tableName = tableName; + @Override + public MutationState execute() throws SQLException { + connection.getMutationState().commitDDLFence(dataTable); + Tuple tuple = plan.iterator().next(); + long rowCount = 0; + if (tuple != null) { + Cell kv = tuple.getValue(0); + ImmutableBytesWritable tmpPtr = + new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + // A single Cell will be returned with the count(*) - we decode that here + rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); + } + // The contract is to return a MutationState that contains the number of rows modified. In + // this + // case, it's the number of rows in the data table which corresponds to the number of index + // rows that were added. + return new MutationState(0, 0, connection, rowCount); } - private static void addColumnsToScan(Set columns, Scan scan, PTable index) { - for (ColumnReference columnRef : columns) { - if (index.getImmutableStorageScheme() == - PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { - scan.addFamily(columnRef.getFamily()); - } else { - scan.addColumn(columnRef.getFamily(), columnRef.getQualifier()); - } - } + @Override + public QueryPlan getQueryPlan() { + return plan; + } + }; + + public ServerBuildIndexCompiler(PhoenixConnection connection, String tableName) { + this.connection = connection; + this.tableName = tableName; + } + + private static void addColumnsToScan(Set columns, Scan scan, PTable index) { + for (ColumnReference columnRef : columns) { + if ( + index.getImmutableStorageScheme() + == PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS + ) { + scan.addFamily(columnRef.getFamily()); + } else { + scan.addColumn(columnRef.getFamily(), columnRef.getQualifier()); + } } - public MutationPlan compile(PTable index) throws SQLException { - try (final PhoenixStatement statement = new PhoenixStatement(connection)) { - String query = "SELECT /*+ NO_INDEX */ count(*) FROM " + tableName; - this.plan = statement.compileQuery(query); - TableRef tableRef = plan.getTableRef(); - Scan scan = plan.getContext().getScan(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - dataTable = tableRef.getTable(); - if (IndexUtil.isGlobalIndex(index) && dataTable.isTransactional()) { - throw new IllegalArgumentException( - "ServerBuildIndexCompiler does not support global indexes on transactional tables"); - } - IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection); - // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*). - // However, in this case, we need to project all of the data columns that contribute to the index. - addColumnsToScan(indexMaintainer.getAllColumns(), scan, index); - if (indexMaintainer.getIndexWhereColumns() != null) { - addColumnsToScan(indexMaintainer.getIndexWhereColumns(), scan, index); - } - IndexMaintainer.serialize(dataTable, ptr, Collections.singletonList(index), plan.getContext().getConnection()); - scan.setAttribute(PhoenixIndexCodec.INDEX_NAME_FOR_IDX_MAINTAINER, - index.getTableName().getBytes()); - ScanUtil.annotateScanWithMetadataAttributes(dataTable, scan); - // Set the scan attributes that UngroupedAggregateRegionObserver will switch on. - // For local indexes, the BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO attribute, and - // for global indexes PhoenixIndexCodec.INDEX_PROTO_MD attribute is set to the serialized form of index - // metadata to build index rows from data table rows. For global indexes, we also need to set (1) the - // BaseScannerRegionObserver.REBUILD_INDEXES attribute in order to signal UngroupedAggregateRegionObserver - // that this scan is for building global indexes and (2) the MetaDataProtocol.PHOENIX_VERSION attribute - // that will be passed as a mutation attribute for the scanned mutations that will be applied on - // the index table possibly remotely - if (index.getIndexType() == PTable.IndexType.LOCAL) { - scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr)); - } else { - scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr)); - scan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, TRUE_BYTES); - ScanUtil.setScanAttributeForMaxLookbackAge(scan, dataTable.getMaxLookbackAge()); - ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING, TRUE_BYTES); - // Serialize page row size only if we're overriding, else use server side value - String rebuildPageRowSize = - connection.getQueryServices().getProps() - .get(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS); - if (rebuildPageRowSize != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGE_ROWS, - Bytes.toBytes(Long.parseLong(rebuildPageRowSize))); - } - BaseQueryPlan.serializeViewConstantsIntoScan(scan, dataTable); - addEmptyColumnToScan(scan, indexMaintainer.getDataEmptyKeyValueCF(), indexMaintainer.getEmptyKeyValueQualifier()); - } - if (dataTable.isTransactional()) { - scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, connection.getMutationState().encodeTransaction()); - } + } - // Go through MutationPlan abstraction so that we can create local indexes - // with a connectionless connection (which makes testing easier). - return new RowCountMutationPlan(plan.getContext(), PhoenixStatement.Operation.UPSERT); + public MutationPlan compile(PTable index) throws SQLException { + try (final PhoenixStatement statement = new PhoenixStatement(connection)) { + String query = "SELECT /*+ NO_INDEX */ count(*) FROM " + tableName; + this.plan = statement.compileQuery(query); + TableRef tableRef = plan.getTableRef(); + Scan scan = plan.getContext().getScan(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + dataTable = tableRef.getTable(); + if (IndexUtil.isGlobalIndex(index) && dataTable.isTransactional()) { + throw new IllegalArgumentException( + "ServerBuildIndexCompiler does not support global indexes on transactional tables"); + } + IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection); + // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for + // count(*). + // However, in this case, we need to project all of the data columns that contribute to the + // index. + addColumnsToScan(indexMaintainer.getAllColumns(), scan, index); + if (indexMaintainer.getIndexWhereColumns() != null) { + addColumnsToScan(indexMaintainer.getIndexWhereColumns(), scan, index); + } + IndexMaintainer.serialize(dataTable, ptr, Collections.singletonList(index), + plan.getContext().getConnection()); + scan.setAttribute(PhoenixIndexCodec.INDEX_NAME_FOR_IDX_MAINTAINER, + index.getTableName().getBytes()); + ScanUtil.annotateScanWithMetadataAttributes(dataTable, scan); + // Set the scan attributes that UngroupedAggregateRegionObserver will switch on. + // For local indexes, the BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO attribute, and + // for global indexes PhoenixIndexCodec.INDEX_PROTO_MD attribute is set to the serialized form + // of index + // metadata to build index rows from data table rows. For global indexes, we also need to set + // (1) the + // BaseScannerRegionObserver.REBUILD_INDEXES attribute in order to signal + // UngroupedAggregateRegionObserver + // that this scan is for building global indexes and (2) the MetaDataProtocol.PHOENIX_VERSION + // attribute + // that will be passed as a mutation attribute for the scanned mutations that will be applied + // on + // the index table possibly remotely + if (index.getIndexType() == PTable.IndexType.LOCAL) { + scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_BUILD_PROTO, + ByteUtil.copyKeyBytesIfNecessary(ptr)); + } else { + scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr)); + scan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, TRUE_BYTES); + ScanUtil.setScanAttributeForMaxLookbackAge(scan, dataTable.getMaxLookbackAge()); + ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING, TRUE_BYTES); + // Serialize page row size only if we're overriding, else use server side value + String rebuildPageRowSize = connection.getQueryServices().getProps() + .get(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS); + if (rebuildPageRowSize != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGE_ROWS, + Bytes.toBytes(Long.parseLong(rebuildPageRowSize))); } + BaseQueryPlan.serializeViewConstantsIntoScan(scan, dataTable); + addEmptyColumnToScan(scan, indexMaintainer.getDataEmptyKeyValueCF(), + indexMaintainer.getEmptyKeyValueQualifier()); + } + if (dataTable.isTransactional()) { + scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, + connection.getMutationState().encodeTransaction()); + } + + // Go through MutationPlan abstraction so that we can create local indexes + // with a connectionless connection (which makes testing easier). + return new RowCountMutationPlan(plan.getContext(), PhoenixStatement.Operation.UPSERT); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ServerBuildTransformingTableCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ServerBuildTransformingTableCompiler.java index 71f50beef6b..4d47f825437 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ServerBuildTransformingTableCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/ServerBuildTransformingTableCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,11 @@ */ package org.apache.phoenix.compile; +import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; +import static org.apache.phoenix.util.ScanUtil.addEmptyColumnToScan; + +import java.sql.SQLException; + import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; @@ -36,65 +41,61 @@ import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; -import java.sql.SQLException; - -import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; -import static org.apache.phoenix.util.ScanUtil.addEmptyColumnToScan; - - /** - * Class that compiles plan to generate initial data values after a DDL command for - * transforming table (new table). + * Class that compiles plan to generate initial data values after a DDL command for transforming + * table (new table). */ public class ServerBuildTransformingTableCompiler extends ServerBuildIndexCompiler { - public ServerBuildTransformingTableCompiler(PhoenixConnection connection, String tableName) { - super(connection, tableName); - } + public ServerBuildTransformingTableCompiler(PhoenixConnection connection, String tableName) { + super(connection, tableName); + } - public MutationPlan compile(PTable newTable) throws SQLException { - try (final PhoenixStatement statement = new PhoenixStatement(connection)) { - String query = "SELECT /*+ NO_INDEX */ count(*) FROM " + tableName; - this.plan = statement.compileQuery(query); - TableRef tableRef = plan.getTableRef(); - Scan scan = plan.getContext().getScan(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - dataTable = tableRef.getTable(); + public MutationPlan compile(PTable newTable) throws SQLException { + try (final PhoenixStatement statement = new PhoenixStatement(connection)) { + String query = "SELECT /*+ NO_INDEX */ count(*) FROM " + tableName; + this.plan = statement.compileQuery(query); + TableRef tableRef = plan.getTableRef(); + Scan scan = plan.getContext().getScan(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + dataTable = tableRef.getTable(); - // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*). - // However, in this case, we need to project all of the data columns - for (PColumnFamily family : dataTable.getColumnFamilies()) { - scan.addFamily(family.getName().getBytes()); - } + // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for + // count(*). + // However, in this case, we need to project all of the data columns + for (PColumnFamily family : dataTable.getColumnFamilies()) { + scan.addFamily(family.getName().getBytes()); + } - scan.setAttribute(BaseScannerRegionObserverConstants.DO_TRANSFORMING, TRUE_BYTES); - TransformMaintainer.serialize(dataTable, ptr, newTable, plan.getContext().getConnection()); + scan.setAttribute(BaseScannerRegionObserverConstants.DO_TRANSFORMING, TRUE_BYTES); + TransformMaintainer.serialize(dataTable, ptr, newTable, plan.getContext().getConnection()); - ScanUtil.annotateScanWithMetadataAttributes(dataTable, scan); - scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr)); - scan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, TRUE_BYTES); - ScanUtil.setScanAttributeForMaxLookbackAge(scan, dataTable.getMaxLookbackAge()); - ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING, TRUE_BYTES); - // Serialize page row size only if we're overriding, else use server side value - String rebuildPageRowSize = - connection.getQueryServices().getProps() - .get(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS); - if (rebuildPageRowSize != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGE_ROWS, - Bytes.toBytes(Long.valueOf(rebuildPageRowSize))); - } - BaseQueryPlan.serializeViewConstantsIntoScan(scan, dataTable); - PTable.QualifierEncodingScheme encodingScheme = newTable.getEncodingScheme(); - addEmptyColumnToScan(scan, SchemaUtil.getEmptyColumnFamily(newTable), EncodedColumnsUtil.getEmptyKeyValueInfo(encodingScheme).getFirst()); + ScanUtil.annotateScanWithMetadataAttributes(dataTable, scan); + scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr)); + scan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, TRUE_BYTES); + ScanUtil.setScanAttributeForMaxLookbackAge(scan, dataTable.getMaxLookbackAge()); + ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING, TRUE_BYTES); + // Serialize page row size only if we're overriding, else use server side value + String rebuildPageRowSize = + connection.getQueryServices().getProps().get(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS); + if (rebuildPageRowSize != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGE_ROWS, + Bytes.toBytes(Long.valueOf(rebuildPageRowSize))); + } + BaseQueryPlan.serializeViewConstantsIntoScan(scan, dataTable); + PTable.QualifierEncodingScheme encodingScheme = newTable.getEncodingScheme(); + addEmptyColumnToScan(scan, SchemaUtil.getEmptyColumnFamily(newTable), + EncodedColumnsUtil.getEmptyKeyValueInfo(encodingScheme).getFirst()); - if (dataTable.isTransactional()) { - scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, connection.getMutationState().encodeTransaction()); - } + if (dataTable.isTransactional()) { + scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, + connection.getMutationState().encodeTransaction()); + } - // Go through MutationPlan abstraction so that we can create local indexes - // with a connectionless connection (which makes testing easier). - return new RowCountMutationPlan(plan.getContext(), PhoenixStatement.Operation.UPSERT); - } + // Go through MutationPlan abstraction so that we can create local indexes + // with a connectionless connection (which makes testing easier). + return new RowCountMutationPlan(plan.getContext(), PhoenixStatement.Operation.UPSERT); } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatelessExpressionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatelessExpressionCompiler.java index 1245532c88d..34b6193be14 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatelessExpressionCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatelessExpressionCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,35 +23,33 @@ /** * A ExpressionCompiler which does not pollute {@link StatementContext} - * */ -public class StatelessExpressionCompiler extends ExpressionCompiler{ +public class StatelessExpressionCompiler extends ExpressionCompiler { - public StatelessExpressionCompiler(StatementContext context, - boolean resolveViewConstants) { - super(context, resolveViewConstants); - } + public StatelessExpressionCompiler(StatementContext context, boolean resolveViewConstants) { + super(context, resolveViewConstants); + } - public StatelessExpressionCompiler(StatementContext context, - GroupBy groupBy, boolean resolveViewConstants) { - super(context, groupBy, resolveViewConstants); - } + public StatelessExpressionCompiler(StatementContext context, GroupBy groupBy, + boolean resolveViewConstants) { + super(context, groupBy, resolveViewConstants); + } - public StatelessExpressionCompiler(StatementContext context, GroupBy groupBy) { - super(context, groupBy); - } + public StatelessExpressionCompiler(StatementContext context, GroupBy groupBy) { + super(context, groupBy); + } - public StatelessExpressionCompiler(StatementContext context) { - super(context); - } + public StatelessExpressionCompiler(StatementContext context) { + super(context); + } - @Override - protected Expression addExpression(Expression expression) { - return expression; - } + @Override + protected Expression addExpression(Expression expression) { + return expression; + } - @Override - protected void addColumn(PColumn column) { + @Override + protected void addColumn(PColumn column) { - } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementContext.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementContext.java index 3226da69fda..9389b81e60d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementContext.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementContext.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TimeZone; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.hbase.client.Scan; @@ -53,380 +52,387 @@ import org.apache.phoenix.util.NumberUtil; import org.apache.phoenix.util.ReadOnlyProps; - /** - * - * Class that keeps common state used across processing the various clauses in a - * top level JDBC statement such as SELECT, UPSERT, DELETE, etc. - * - * + * Class that keeps common state used across processing the various clauses in a top level JDBC + * statement such as SELECT, UPSERT, DELETE, etc. * @since 0.1 */ public class StatementContext { - private ColumnResolver resolver; - private final PhoenixConnection connection; - private final BindManager binds; - private final Scan scan; - private final ExpressionManager expressions; - private final AggregationManager aggregates; - private final String numberFormat; - private final ImmutableBytesWritable tempPtr; - private final PhoenixStatement statement; - private final Map dataColumns; - private Map retryingPersistentCache; - - private long currentTime = QueryConstants.UNSET_TIMESTAMP; - private ScanRanges scanRanges = ScanRanges.EVERYTHING; - private final SequenceManager sequences; - - private TableRef currentTable; - private List> whereConditionColumns; - private Map subqueryResults; - private final ReadMetricQueue readMetricsQueue; - private final OverAllQueryMetrics overAllQueryMetrics; - private QueryLogger queryLogger; - private boolean isClientSideUpsertSelect; - private boolean isUncoveredIndex; - private String cdcIncludeScopes; - private TableRef cdcTableRef; - private TableRef cdcDataTableRef; - private AtomicBoolean hasFirstValidResult; - private Set subStatementContexts; - - public StatementContext(PhoenixStatement statement) { - this(statement, new Scan()); - } - public StatementContext(StatementContext context) { - this.resolver = context.resolver; - this.connection = context.connection; - this.binds = context.binds; - this.scan = context.scan; - this.expressions = context.expressions; - this.aggregates = context.aggregates; - this.numberFormat = context.numberFormat; - this.tempPtr = context.tempPtr; - this.statement = context.statement; - this.dataColumns = context.dataColumns; - this.retryingPersistentCache = context.retryingPersistentCache; - this.currentTime = context.currentTime; - this.scanRanges = context.scanRanges; - this.sequences = context.sequences; - this.currentTable = context.currentTable; - this.whereConditionColumns = context.whereConditionColumns; - this.subqueryResults = context.subqueryResults; - this.readMetricsQueue = context.readMetricsQueue; - this.overAllQueryMetrics = context.overAllQueryMetrics; - this.queryLogger = context.queryLogger; - this.isClientSideUpsertSelect = context.isClientSideUpsertSelect; - this.isUncoveredIndex = context.isUncoveredIndex; - this.hasFirstValidResult = new AtomicBoolean(context.getHasFirstValidResult()); - this.subStatementContexts = Sets.newHashSet(); - } - /** - * Constructor that lets you override whether or not to collect request level metrics. - */ - public StatementContext(PhoenixStatement statement, boolean collectRequestLevelMetrics) { - this(statement, FromCompiler.EMPTY_TABLE_RESOLVER, new BindManager(statement.getParameters()), new Scan(), new SequenceManager(statement), collectRequestLevelMetrics); - } - - public StatementContext(PhoenixStatement statement, Scan scan) { - this(statement, FromCompiler.EMPTY_TABLE_RESOLVER, new BindManager(statement.getParameters()), scan, new SequenceManager(statement)); - } - - public StatementContext(PhoenixStatement statement, ColumnResolver resolver) { - this(statement, resolver, new BindManager(statement.getParameters()), new Scan(), new SequenceManager(statement)); - } - - public StatementContext(PhoenixStatement statement, ColumnResolver resolver, Scan scan, SequenceManager seqManager) { - this(statement, resolver, new BindManager(statement.getParameters()), scan, seqManager); - } - - public StatementContext(PhoenixStatement statement, ColumnResolver resolver, BindManager binds, Scan scan, SequenceManager seqManager) { - this(statement, resolver, binds, scan, seqManager, statement.getConnection().isRequestLevelMetricsEnabled()); - } - - public StatementContext(PhoenixStatement statement, ColumnResolver resolver, BindManager binds, Scan scan, SequenceManager seqManager, boolean isRequestMetricsEnabled) { - this.statement = statement; - this.resolver = resolver; - this.scan = scan; - this.sequences = seqManager; - this.binds = binds; - this.aggregates = new AggregationManager(); - this.expressions = new ExpressionManager(); - this.connection = statement.getConnection(); - ReadOnlyProps props = connection.getQueryServices().getProps(); - this.numberFormat = props.get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT); - this.tempPtr = new ImmutableBytesWritable(); - this.currentTable = resolver != null && !resolver.getTables().isEmpty() ? resolver.getTables().get(0) : null; - this.whereConditionColumns = new ArrayList>(); - this.dataColumns = this.currentTable == null ? Collections. emptyMap() : Maps - . newLinkedHashMap(); - this.subqueryResults = Maps. newConcurrentMap(); - this.readMetricsQueue = new ReadMetricQueue(isRequestMetricsEnabled,connection.getLogLevel()); - this.overAllQueryMetrics = new OverAllQueryMetrics(isRequestMetricsEnabled,connection.getLogLevel()); - this.retryingPersistentCache = Maps. newHashMap(); - this.hasFirstValidResult = new AtomicBoolean(false); - this.subStatementContexts = Sets.newHashSet(); - } - - /** - * build map from dataColumn to what will be its position in single KeyValue value bytes - * returned from the coprocessor that joins from the index row back to the data row. - * @param column - * @return - */ - public int getDataColumnPosition(PColumn column) { - Integer pos = dataColumns.get(column); - if (pos == null) { - pos = dataColumns.size(); - dataColumns.put(column, pos); - } - return pos; - } - - /** - * @return return set of data columns. - */ - public Set getDataColumns() { - return dataColumns.keySet(); - } - - /** - * @return map of data columns and their positions. - */ - public Map getDataColumnsMap() { - return dataColumns; - } - - public String getDateFormatTimeZoneId() { - return connection.getDateFormatTimeZoneId(); - } - - public String getDateFormat() { - return connection.getDatePattern(); - } - - public Format getDateFormatter() { - return connection.getFormatter(PDate.INSTANCE); - } - - public String getTimeFormat() { - return connection.getTimePattern(); - } - - public Format getTimeFormatter() { - return connection.getFormatter(PTime.INSTANCE); - } - - public String getTimestampFormat() { - return connection.getTimestampPattern(); - } - - public Format getTimestampFormatter() { - return connection.getFormatter(PTimestamp.INSTANCE); - } - - public String getNumberFormat() { - return numberFormat; - } - - public Scan getScan() { - return scan; - } - - public BindManager getBindManager() { - return binds; - } - - public TableRef getCurrentTable() { - return currentTable; - } - - public boolean getHasFirstValidResult() { - return hasFirstValidResult.get(); - } - - public void setHasFirstValidResult(boolean hasValidResult) { - hasFirstValidResult.set(hasValidResult); - } - - public void setCurrentTable(TableRef table) { - this.currentTable = table; - } - - public AggregationManager getAggregationManager() { - return aggregates; - } - - public ColumnResolver getResolver() { - return resolver; - } - - public void setResolver(ColumnResolver resolver) { - this.resolver = resolver; - } - - public ExpressionManager getExpressionManager() { - return expressions; - } - - - public ImmutableBytesWritable getTempPtr() { - return tempPtr; - } - - public ScanRanges getScanRanges() { - return this.scanRanges; - } - - public void setScanRanges(ScanRanges scanRanges) { - this.scanRanges = scanRanges; - scanRanges.initializeScan(scan); - } - - public PhoenixConnection getConnection() { - return connection; - } - - public PhoenixStatement getStatement() { - return statement; - } - - public long getCurrentTime() throws SQLException { - long ts = this.getCurrentTable().getTimeStamp(); - // if the table is transactional then it is only resolved once per query, so we can't use the table timestamp - if (this.getCurrentTable().getTable().getType() != PTableType.SUBQUERY - && this.getCurrentTable().getTable().getType() != PTableType.PROJECTED - && !this.getCurrentTable().getTable().isTransactional() - && ts != QueryConstants.UNSET_TIMESTAMP) { - return ts; - } - if (currentTime != QueryConstants.UNSET_TIMESTAMP) { - return currentTime; - } - /* - * For an UPSERT VALUES where autocommit off, we won't hit the server until the commit. - * However, if the statement has a CURRENT_DATE() call as a value, we need to know the - * current time at execution time. In that case, we'll call MetaDataClient.updateCache - * purely to bind the current time based on the server time. - */ - PTable table = this.getCurrentTable().getTable(); - MetaDataClient client = new MetaDataClient(connection); - currentTime = client.getCurrentTime(table.getSchemaName().getString(), table.getTableName().getString()); - return currentTime; - } - - public long getCurrentTimeWithDisplacement() throws SQLException { - if (connection.isApplyTimeZoneDisplacement()) { - return DateUtil.applyInputDisplacement(new java.sql.Date(getCurrentTime()), - statement.getLocalCalendar().getTimeZone()).getTime(); - } else { - return getCurrentTime(); - } + private ColumnResolver resolver; + private final PhoenixConnection connection; + private final BindManager binds; + private final Scan scan; + private final ExpressionManager expressions; + private final AggregationManager aggregates; + private final String numberFormat; + private final ImmutableBytesWritable tempPtr; + private final PhoenixStatement statement; + private final Map dataColumns; + private Map retryingPersistentCache; + + private long currentTime = QueryConstants.UNSET_TIMESTAMP; + private ScanRanges scanRanges = ScanRanges.EVERYTHING; + private final SequenceManager sequences; + + private TableRef currentTable; + private List> whereConditionColumns; + private Map subqueryResults; + private final ReadMetricQueue readMetricsQueue; + private final OverAllQueryMetrics overAllQueryMetrics; + private QueryLogger queryLogger; + private boolean isClientSideUpsertSelect; + private boolean isUncoveredIndex; + private String cdcIncludeScopes; + private TableRef cdcTableRef; + private TableRef cdcDataTableRef; + private AtomicBoolean hasFirstValidResult; + private Set subStatementContexts; + + public StatementContext(PhoenixStatement statement) { + this(statement, new Scan()); + } + + public StatementContext(StatementContext context) { + this.resolver = context.resolver; + this.connection = context.connection; + this.binds = context.binds; + this.scan = context.scan; + this.expressions = context.expressions; + this.aggregates = context.aggregates; + this.numberFormat = context.numberFormat; + this.tempPtr = context.tempPtr; + this.statement = context.statement; + this.dataColumns = context.dataColumns; + this.retryingPersistentCache = context.retryingPersistentCache; + this.currentTime = context.currentTime; + this.scanRanges = context.scanRanges; + this.sequences = context.sequences; + this.currentTable = context.currentTable; + this.whereConditionColumns = context.whereConditionColumns; + this.subqueryResults = context.subqueryResults; + this.readMetricsQueue = context.readMetricsQueue; + this.overAllQueryMetrics = context.overAllQueryMetrics; + this.queryLogger = context.queryLogger; + this.isClientSideUpsertSelect = context.isClientSideUpsertSelect; + this.isUncoveredIndex = context.isUncoveredIndex; + this.hasFirstValidResult = new AtomicBoolean(context.getHasFirstValidResult()); + this.subStatementContexts = Sets.newHashSet(); + } + + /** + * Constructor that lets you override whether or not to collect request level metrics. + */ + public StatementContext(PhoenixStatement statement, boolean collectRequestLevelMetrics) { + this(statement, FromCompiler.EMPTY_TABLE_RESOLVER, new BindManager(statement.getParameters()), + new Scan(), new SequenceManager(statement), collectRequestLevelMetrics); + } + + public StatementContext(PhoenixStatement statement, Scan scan) { + this(statement, FromCompiler.EMPTY_TABLE_RESOLVER, new BindManager(statement.getParameters()), + scan, new SequenceManager(statement)); + } + + public StatementContext(PhoenixStatement statement, ColumnResolver resolver) { + this(statement, resolver, new BindManager(statement.getParameters()), new Scan(), + new SequenceManager(statement)); + } + + public StatementContext(PhoenixStatement statement, ColumnResolver resolver, Scan scan, + SequenceManager seqManager) { + this(statement, resolver, new BindManager(statement.getParameters()), scan, seqManager); + } + + public StatementContext(PhoenixStatement statement, ColumnResolver resolver, BindManager binds, + Scan scan, SequenceManager seqManager) { + this(statement, resolver, binds, scan, seqManager, + statement.getConnection().isRequestLevelMetricsEnabled()); + } + + public StatementContext(PhoenixStatement statement, ColumnResolver resolver, BindManager binds, + Scan scan, SequenceManager seqManager, boolean isRequestMetricsEnabled) { + this.statement = statement; + this.resolver = resolver; + this.scan = scan; + this.sequences = seqManager; + this.binds = binds; + this.aggregates = new AggregationManager(); + this.expressions = new ExpressionManager(); + this.connection = statement.getConnection(); + ReadOnlyProps props = connection.getQueryServices().getProps(); + this.numberFormat = + props.get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT); + this.tempPtr = new ImmutableBytesWritable(); + this.currentTable = + resolver != null && !resolver.getTables().isEmpty() ? resolver.getTables().get(0) : null; + this.whereConditionColumns = new ArrayList>(); + this.dataColumns = this.currentTable == null + ? Collections. emptyMap() + : Maps. newLinkedHashMap(); + this.subqueryResults = Maps. newConcurrentMap(); + this.readMetricsQueue = new ReadMetricQueue(isRequestMetricsEnabled, connection.getLogLevel()); + this.overAllQueryMetrics = + new OverAllQueryMetrics(isRequestMetricsEnabled, connection.getLogLevel()); + this.retryingPersistentCache = Maps. newHashMap(); + this.hasFirstValidResult = new AtomicBoolean(false); + this.subStatementContexts = Sets.newHashSet(); + } + + /** + * build map from dataColumn to what will be its position in single KeyValue value bytes returned + * from the coprocessor that joins from the index row back to the data row. + */ + public int getDataColumnPosition(PColumn column) { + Integer pos = dataColumns.get(column); + if (pos == null) { + pos = dataColumns.size(); + dataColumns.put(column, pos); + } + return pos; + } + + /** Returns return set of data columns. */ + public Set getDataColumns() { + return dataColumns.keySet(); + } + + /** Returns map of data columns and their positions. */ + public Map getDataColumnsMap() { + return dataColumns; + } + + public String getDateFormatTimeZoneId() { + return connection.getDateFormatTimeZoneId(); + } + + public String getDateFormat() { + return connection.getDatePattern(); + } + + public Format getDateFormatter() { + return connection.getFormatter(PDate.INSTANCE); + } + + public String getTimeFormat() { + return connection.getTimePattern(); + } + + public Format getTimeFormatter() { + return connection.getFormatter(PTime.INSTANCE); + } + + public String getTimestampFormat() { + return connection.getTimestampPattern(); + } + + public Format getTimestampFormatter() { + return connection.getFormatter(PTimestamp.INSTANCE); + } + + public String getNumberFormat() { + return numberFormat; + } + + public Scan getScan() { + return scan; + } + + public BindManager getBindManager() { + return binds; + } + + public TableRef getCurrentTable() { + return currentTable; + } + + public boolean getHasFirstValidResult() { + return hasFirstValidResult.get(); + } + + public void setHasFirstValidResult(boolean hasValidResult) { + hasFirstValidResult.set(hasValidResult); + } + + public void setCurrentTable(TableRef table) { + this.currentTable = table; + } + + public AggregationManager getAggregationManager() { + return aggregates; + } + + public ColumnResolver getResolver() { + return resolver; + } + + public void setResolver(ColumnResolver resolver) { + this.resolver = resolver; + } + + public ExpressionManager getExpressionManager() { + return expressions; + } + + public ImmutableBytesWritable getTempPtr() { + return tempPtr; + } + + public ScanRanges getScanRanges() { + return this.scanRanges; + } + + public void setScanRanges(ScanRanges scanRanges) { + this.scanRanges = scanRanges; + scanRanges.initializeScan(scan); + } + + public PhoenixConnection getConnection() { + return connection; + } + + public PhoenixStatement getStatement() { + return statement; + } + + public long getCurrentTime() throws SQLException { + long ts = this.getCurrentTable().getTimeStamp(); + // if the table is transactional then it is only resolved once per query, so we can't use the + // table timestamp + if ( + this.getCurrentTable().getTable().getType() != PTableType.SUBQUERY + && this.getCurrentTable().getTable().getType() != PTableType.PROJECTED + && !this.getCurrentTable().getTable().isTransactional() + && ts != QueryConstants.UNSET_TIMESTAMP + ) { + return ts; + } + if (currentTime != QueryConstants.UNSET_TIMESTAMP) { + return currentTime; } - - public SequenceManager getSequenceManager(){ - return sequences; - } - - public void addWhereConditionColumn(byte[] cf, byte[] q) { - whereConditionColumns.add(new Pair(cf, q)); - } - - public List> getWhereConditionColumns() { - return whereConditionColumns; - } - - public boolean isSubqueryResultAvailable(SelectStatement select) { - return subqueryResults.containsKey(select); - } - - public Object getSubqueryResult(SelectStatement select) { - return subqueryResults.get(select); - } - - public void setSubqueryResult(SelectStatement select, Object result) { - subqueryResults.put(select, result); - } - - public ReadMetricQueue getReadMetricsQueue() { - return readMetricsQueue; - } - - public OverAllQueryMetrics getOverallQueryMetrics() { - return overAllQueryMetrics; - } - - public void setQueryLogger(QueryLogger queryLogger) { - this.queryLogger=queryLogger; - } - - public QueryLogger getQueryLogger() { - return queryLogger; - } - - public boolean isClientSideUpsertSelect() { - return isClientSideUpsertSelect; - } - - public void setClientSideUpsertSelect(boolean isClientSideUpsertSelect) { - this.isClientSideUpsertSelect = isClientSideUpsertSelect; - } - - public boolean isUncoveredIndex() { - return isUncoveredIndex; - } - - public void setUncoveredIndex(boolean isUncoveredIndex) { - this.isUncoveredIndex = isUncoveredIndex; - } - /* - * setRetryingPersistentCache can be used to override the USE_PERSISTENT_CACHE hint and disable the use of the - * persistent cache for a specific cache ID. This can be used to retry queries that failed when using the persistent - * cache. + * For an UPSERT VALUES where autocommit off, we won't hit the server until the commit. However, + * if the statement has a CURRENT_DATE() call as a value, we need to know the current time at + * execution time. In that case, we'll call MetaDataClient.updateCache purely to bind the + * current time based on the server time. */ - public void setRetryingPersistentCache(long cacheId) { - retryingPersistentCache.put(cacheId, true); - } - - public boolean getRetryingPersistentCache(long cacheId) { - Boolean retrying = retryingPersistentCache.get(cacheId); - if (retrying == null) { - return false; - } else { - return retrying; - } - } - public String getEncodedCdcIncludeScopes() { - return cdcIncludeScopes; - } - - public void setCDCIncludeScopes(Set cdcIncludeScopes) { - this.cdcIncludeScopes = CDCUtil.makeChangeScopeStringFromEnums(cdcIncludeScopes); - } - - public TableRef getCDCDataTableRef() { - return cdcDataTableRef; - } - - public void setCDCDataTableRef(TableRef cdcDataTableRef) { - this.cdcDataTableRef = cdcDataTableRef; - } - - public TableRef getCDCTableRef() { - return cdcTableRef; - } - - public void setCDCTableRef(TableRef cdcTableRef) { - this.cdcTableRef = cdcTableRef; - } - - public void addSubStatementContext(StatementContext sub) { - subStatementContexts.add(sub); - } - - public Set getSubStatementContexts() { - return subStatementContexts; - } + PTable table = this.getCurrentTable().getTable(); + MetaDataClient client = new MetaDataClient(connection); + currentTime = + client.getCurrentTime(table.getSchemaName().getString(), table.getTableName().getString()); + return currentTime; + } + + public long getCurrentTimeWithDisplacement() throws SQLException { + if (connection.isApplyTimeZoneDisplacement()) { + return DateUtil.applyInputDisplacement(new java.sql.Date(getCurrentTime()), + statement.getLocalCalendar().getTimeZone()).getTime(); + } else { + return getCurrentTime(); + } + } + + public SequenceManager getSequenceManager() { + return sequences; + } + + public void addWhereConditionColumn(byte[] cf, byte[] q) { + whereConditionColumns.add(new Pair(cf, q)); + } + + public List> getWhereConditionColumns() { + return whereConditionColumns; + } + + public boolean isSubqueryResultAvailable(SelectStatement select) { + return subqueryResults.containsKey(select); + } + + public Object getSubqueryResult(SelectStatement select) { + return subqueryResults.get(select); + } + + public void setSubqueryResult(SelectStatement select, Object result) { + subqueryResults.put(select, result); + } + + public ReadMetricQueue getReadMetricsQueue() { + return readMetricsQueue; + } + + public OverAllQueryMetrics getOverallQueryMetrics() { + return overAllQueryMetrics; + } + + public void setQueryLogger(QueryLogger queryLogger) { + this.queryLogger = queryLogger; + } + + public QueryLogger getQueryLogger() { + return queryLogger; + } + + public boolean isClientSideUpsertSelect() { + return isClientSideUpsertSelect; + } + + public void setClientSideUpsertSelect(boolean isClientSideUpsertSelect) { + this.isClientSideUpsertSelect = isClientSideUpsertSelect; + } + + public boolean isUncoveredIndex() { + return isUncoveredIndex; + } + + public void setUncoveredIndex(boolean isUncoveredIndex) { + this.isUncoveredIndex = isUncoveredIndex; + } + + /* + * setRetryingPersistentCache can be used to override the USE_PERSISTENT_CACHE hint and disable + * the use of the persistent cache for a specific cache ID. This can be used to retry queries that + * failed when using the persistent cache. + */ + public void setRetryingPersistentCache(long cacheId) { + retryingPersistentCache.put(cacheId, true); + } + + public boolean getRetryingPersistentCache(long cacheId) { + Boolean retrying = retryingPersistentCache.get(cacheId); + if (retrying == null) { + return false; + } else { + return retrying; + } + } + + public String getEncodedCdcIncludeScopes() { + return cdcIncludeScopes; + } + + public void setCDCIncludeScopes(Set cdcIncludeScopes) { + this.cdcIncludeScopes = CDCUtil.makeChangeScopeStringFromEnums(cdcIncludeScopes); + } + + public TableRef getCDCDataTableRef() { + return cdcDataTableRef; + } + + public void setCDCDataTableRef(TableRef cdcDataTableRef) { + this.cdcDataTableRef = cdcDataTableRef; + } + + public TableRef getCDCTableRef() { + return cdcTableRef; + } + + public void setCDCTableRef(TableRef cdcTableRef) { + this.cdcTableRef = cdcTableRef; + } + + public void addSubStatementContext(StatementContext sub) { + subStatementContexts.add(sub); + } + + public Set getSubStatementContexts() { + return subStatementContexts; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java index 2491c7413f2..986fda9ff0c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementNormalizer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,7 +30,6 @@ import org.apache.phoenix.parse.DerivedTableNode; import org.apache.phoenix.parse.FamilyWildcardParseNode; import org.apache.phoenix.parse.JoinTableNode; -import org.apache.phoenix.parse.NamedNode; import org.apache.phoenix.parse.JoinTableNode.JoinType; import org.apache.phoenix.parse.LessThanOrEqualParseNode; import org.apache.phoenix.parse.NamedTableNode; @@ -41,148 +40,153 @@ import org.apache.phoenix.parse.TableNodeVisitor; import org.apache.phoenix.parse.TableWildcardParseNode; import org.apache.phoenix.parse.WildcardParseNode; -import org.apache.phoenix.util.SchemaUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - +import org.apache.phoenix.util.SchemaUtil; /** - * - * Class that creates a new select statement ensuring that a literal always occurs - * on the RHS (i.e. if literal found on the LHS, then the operator is reversed and - * the literal is put on the RHS) - * - * + * Class that creates a new select statement ensuring that a literal always occurs on the RHS (i.e. + * if literal found on the LHS, then the operator is reversed and the literal is put on the RHS) * @since 0.1 */ public class StatementNormalizer extends ParseNodeRewriter { - private boolean multiTable; - - public StatementNormalizer(ColumnResolver resolver, int expectedAliasCount, boolean multiTable) { - super(resolver, expectedAliasCount); - this.multiTable = multiTable; - } + private boolean multiTable; - public static ParseNode normalize(ParseNode where, ColumnResolver resolver) throws SQLException { - return rewrite(where, new StatementNormalizer(resolver, 0, false)); - } - - /** - * Rewrite the select statement by switching any constants to the right hand side - * of the expression. - * @param statement the select statement - * @param resolver - * @return new select statement - * @throws SQLException - */ - public static SelectStatement normalize(SelectStatement statement, ColumnResolver resolver) throws SQLException { - boolean multiTable = statement.isJoin(); - // Replace WildcardParse with a list of TableWildcardParseNode for multi-table queries - if (multiTable) { - List selectNodes = statement.getSelect(); - List normSelectNodes = selectNodes; - for (int i = 0; i < selectNodes.size(); i++) { - AliasedNode aliasedNode = selectNodes.get(i); - ParseNode selectNode = aliasedNode.getNode(); - if (selectNode == WildcardParseNode.INSTANCE) { - if (selectNodes == normSelectNodes) { - normSelectNodes = Lists.newArrayList(selectNodes.subList(0, i)); - } - List tableNames = statement.getFrom().accept(new TableNameVisitor()); - for (TableName tableName : tableNames) { - TableWildcardParseNode node = NODE_FACTORY.tableWildcard(tableName); - normSelectNodes.add(NODE_FACTORY.aliasedNode(null, node)); - } - } else if (selectNodes != normSelectNodes) { - normSelectNodes.add(aliasedNode); - } - } - if (selectNodes != normSelectNodes) { - statement = NODE_FACTORY.select(statement.getFrom(), statement.getHint(), statement.isDistinct(), - normSelectNodes, statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), - statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - } - - return rewrite(statement, new StatementNormalizer(resolver, statement.getSelect().size(), multiTable)); - } + public StatementNormalizer(ColumnResolver resolver, int expectedAliasCount, boolean multiTable) { + super(resolver, expectedAliasCount); + this.multiTable = multiTable; + } - private static class TableNameVisitor implements TableNodeVisitor> { + public static ParseNode normalize(ParseNode where, ColumnResolver resolver) throws SQLException { + return rewrite(where, new StatementNormalizer(resolver, 0, false)); + } - @Override - public List visit(BindTableNode boundTableNode) throws SQLException { - TableName name = boundTableNode.getAlias() == null ? boundTableNode.getName() : TableName.create(null, boundTableNode.getAlias()); - return Collections.singletonList(name); + /** + * Rewrite the select statement by switching any constants to the right hand side of the + * expression. + * @param statement the select statement + * @return new select statement + */ + public static SelectStatement normalize(SelectStatement statement, ColumnResolver resolver) + throws SQLException { + boolean multiTable = statement.isJoin(); + // Replace WildcardParse with a list of TableWildcardParseNode for multi-table queries + if (multiTable) { + List selectNodes = statement.getSelect(); + List normSelectNodes = selectNodes; + for (int i = 0; i < selectNodes.size(); i++) { + AliasedNode aliasedNode = selectNodes.get(i); + ParseNode selectNode = aliasedNode.getNode(); + if (selectNode == WildcardParseNode.INSTANCE) { + if (selectNodes == normSelectNodes) { + normSelectNodes = Lists.newArrayList(selectNodes.subList(0, i)); + } + List tableNames = statement.getFrom().accept(new TableNameVisitor()); + for (TableName tableName : tableNames) { + TableWildcardParseNode node = NODE_FACTORY.tableWildcard(tableName); + normSelectNodes.add(NODE_FACTORY.aliasedNode(null, node)); + } + } else if (selectNodes != normSelectNodes) { + normSelectNodes.add(aliasedNode); } + } + if (selectNodes != normSelectNodes) { + statement = NODE_FACTORY.select(statement.getFrom(), statement.getHint(), + statement.isDistinct(), normSelectNodes, statement.getWhere(), statement.getGroupBy(), + statement.getHaving(), statement.getOrderBy(), statement.getLimit(), + statement.getOffset(), statement.getBindCount(), statement.isAggregate(), + statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); + } + } - @Override - public List visit(JoinTableNode joinNode) throws SQLException { - List lhs = joinNode.getLHS().accept(this); - List rhs = joinNode.getType() == JoinType.Semi || joinNode.getType() == JoinType.Anti ? Collections. emptyList() : joinNode.getRHS().accept(this); - List ret = Lists.newArrayListWithExpectedSize(lhs.size() + rhs.size()); - ret.addAll(lhs); - ret.addAll(rhs); - return ret; - } + return rewrite(statement, + new StatementNormalizer(resolver, statement.getSelect().size(), multiTable)); + } - @Override - public List visit(NamedTableNode namedTableNode) - throws SQLException { - TableName name = namedTableNode.getAlias() == null ? namedTableNode.getName() : TableName.create(null, namedTableNode.getAlias()); - return Collections.singletonList(name); - } + private static class TableNameVisitor implements TableNodeVisitor> { - @Override - public List visit(DerivedTableNode subselectNode) - throws SQLException { - TableName name = TableName.create(null, subselectNode.getAlias()); - return Collections.singletonList(name); - } - }; - @Override - public ParseNode visitLeave(ComparisonParseNode node, List nodes) throws SQLException { - if (nodes.get(0).isStateless() && !nodes.get(1).isStateless() - && !(nodes.get(1) instanceof ArrayElemRefNode)) { - List normNodes = Lists.newArrayListWithExpectedSize(2); - normNodes.add(nodes.get(1)); - normNodes.add(nodes.get(0)); - nodes = normNodes; - node = NODE_FACTORY.comparison(node.getInvertFilterOp(), nodes.get(0), nodes.get(1)); - } - return super.visitLeave(node, nodes); + public List visit(BindTableNode boundTableNode) throws SQLException { + TableName name = boundTableNode.getAlias() == null + ? boundTableNode.getName() + : TableName.create(null, boundTableNode.getAlias()); + return Collections.singletonList(name); } @Override - public ParseNode visitLeave(final BetweenParseNode node, List nodes) throws SQLException { - - LessThanOrEqualParseNode lhsNode = NODE_FACTORY.lte(node.getChildren().get(1), node.getChildren().get(0)); - LessThanOrEqualParseNode rhsNode = NODE_FACTORY.lte(node.getChildren().get(0), node.getChildren().get(2)); - List parseNodes = Lists.newArrayListWithExpectedSize(2); - parseNodes.add(this.visitLeave(lhsNode, lhsNode.getChildren())); - parseNodes.add(this.visitLeave(rhsNode, rhsNode.getChildren())); - return super.visitLeave(node, parseNodes); + public List visit(JoinTableNode joinNode) throws SQLException { + List lhs = joinNode.getLHS().accept(this); + List rhs = + joinNode.getType() == JoinType.Semi || joinNode.getType() == JoinType.Anti + ? Collections. emptyList() + : joinNode.getRHS().accept(this); + List ret = Lists. newArrayListWithExpectedSize(lhs.size() + rhs.size()); + ret.addAll(lhs); + ret.addAll(rhs); + return ret; } @Override - public ParseNode visit(ColumnParseNode node) throws SQLException { - if (multiTable - && node.getAlias() != null - && node.getTableName() != null - && SchemaUtil.normalizeIdentifier(node.getAlias()).equals(node.getName())) { - node = NODE_FACTORY.column(TableName.create(node.getSchemaName(), node.getTableName()), - node.isCaseSensitive() ? '"' + node.getName() + '"' : node.getName(), - node.isCaseSensitive() ? '"' + node.getFullName() + '"' : node.getFullName()); - } - return super.visit(node); + public List visit(NamedTableNode namedTableNode) throws SQLException { + TableName name = namedTableNode.getAlias() == null + ? namedTableNode.getName() + : TableName.create(null, namedTableNode.getAlias()); + return Collections.singletonList(name); } - + @Override - public ParseNode visit(FamilyWildcardParseNode node) throws SQLException { - if (!multiTable) - return super.visit(node); - - return super.visit(NODE_FACTORY.tableWildcard(NODE_FACTORY.table(null, node.isCaseSensitive() ? '"' + node.getName() + '"' : node.getName()))); + public List visit(DerivedTableNode subselectNode) throws SQLException { + TableName name = TableName.create(null, subselectNode.getAlias()); + return Collections.singletonList(name); } -} + }; + + @Override + public ParseNode visitLeave(ComparisonParseNode node, List nodes) throws SQLException { + if ( + nodes.get(0).isStateless() && !nodes.get(1).isStateless() + && !(nodes.get(1) instanceof ArrayElemRefNode) + ) { + List normNodes = Lists.newArrayListWithExpectedSize(2); + normNodes.add(nodes.get(1)); + normNodes.add(nodes.get(0)); + nodes = normNodes; + node = NODE_FACTORY.comparison(node.getInvertFilterOp(), nodes.get(0), nodes.get(1)); + } + return super.visitLeave(node, nodes); + } + @Override + public ParseNode visitLeave(final BetweenParseNode node, List nodes) + throws SQLException { + + LessThanOrEqualParseNode lhsNode = + NODE_FACTORY.lte(node.getChildren().get(1), node.getChildren().get(0)); + LessThanOrEqualParseNode rhsNode = + NODE_FACTORY.lte(node.getChildren().get(0), node.getChildren().get(2)); + List parseNodes = Lists.newArrayListWithExpectedSize(2); + parseNodes.add(this.visitLeave(lhsNode, lhsNode.getChildren())); + parseNodes.add(this.visitLeave(rhsNode, rhsNode.getChildren())); + return super.visitLeave(node, parseNodes); + } + + @Override + public ParseNode visit(ColumnParseNode node) throws SQLException { + if ( + multiTable && node.getAlias() != null && node.getTableName() != null + && SchemaUtil.normalizeIdentifier(node.getAlias()).equals(node.getName()) + ) { + node = NODE_FACTORY.column(TableName.create(node.getSchemaName(), node.getTableName()), + node.isCaseSensitive() ? '"' + node.getName() + '"' : node.getName(), + node.isCaseSensitive() ? '"' + node.getFullName() + '"' : node.getFullName()); + } + return super.visit(node); + } + + @Override + public ParseNode visit(FamilyWildcardParseNode node) throws SQLException { + if (!multiTable) return super.visit(node); + + return super.visit(NODE_FACTORY.tableWildcard(NODE_FACTORY.table(null, + node.isCaseSensitive() ? '"' + node.getName() + '"' : node.getName()))); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementPlan.java index c74b1c085e5..60bcf9058f9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/StatementPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,34 +24,35 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; import org.apache.phoenix.schema.TableRef; - public interface StatementPlan { - StatementContext getContext(); - /** - * Returns the ParameterMetaData for the statement - */ - ParameterMetaData getParameterMetaData(); - - ExplainPlan getExplainPlan() throws SQLException; - public Set getSourceRefs(); - Operation getOperation(); - - /** - * @return estimated number of rows that will be scanned when this statement plan is been executed. - * Returns null if the estimate cannot be provided. - * @throws SQLException - */ - public Long getEstimatedRowsToScan() throws SQLException; - - /** - * @return estimated number of bytes that will be scanned when this statement plan is been executed. - * Returns null if the estimate cannot be provided. - */ - public Long getEstimatedBytesToScan() throws SQLException; - - /** - * @return timestamp at which the estimate information (estimated bytes and estimated rows) was - * computed. executed. Returns null if the information cannot be provided. - */ - public Long getEstimateInfoTimestamp() throws SQLException; -} \ No newline at end of file + StatementContext getContext(); + + /** + * Returns the ParameterMetaData for the statement + */ + ParameterMetaData getParameterMetaData(); + + ExplainPlan getExplainPlan() throws SQLException; + + public Set getSourceRefs(); + + Operation getOperation(); + + /** + * @return estimated number of rows that will be scanned when this statement plan is been + * executed. Returns null if the estimate cannot be provided. + */ + public Long getEstimatedRowsToScan() throws SQLException; + + /** + * @return estimated number of bytes that will be scanned when this statement plan is been + * executed. Returns null if the estimate cannot be provided. + */ + public Long getEstimatedBytesToScan() throws SQLException; + + /** + * @return timestamp at which the estimate information (estimated bytes and estimated rows) was + * computed. executed. Returns null if the information cannot be provided. + */ + public Long getEstimateInfoTimestamp() throws SQLException; +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java index 22e187d4352..6b5d0283625 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -54,834 +54,796 @@ import org.apache.phoenix.schema.ColumnFamilyNotFoundException; import org.apache.phoenix.schema.ColumnNotFoundException; import org.apache.phoenix.schema.TableNotFoundException; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * Class for rewriting where-clause sub-queries into join queries. - * - * If the where-clause sub-query is one of those top-node conditions (being - * the only condition node or direct descendant of AND nodes), we convert the - * sub-query directly into semi-joins, anti-joins or inner-joins, and meanwhile - * remove the original condition node from the where clause. - * Otherwise, we convert the sub-query into left-joins and change the original - * condition node into a null test of a join table field (ONE if matched, NULL - * if not matched). + * Class for rewriting where-clause sub-queries into join queries. If the where-clause sub-query is + * one of those top-node conditions (being the only condition node or direct descendant of AND + * nodes), we convert the sub-query directly into semi-joins, anti-joins or inner-joins, and + * meanwhile remove the original condition node from the where clause. Otherwise, we convert the + * sub-query into left-joins and change the original condition node into a null test of a join table + * field (ONE if matched, NULL if not matched). */ public class SubqueryRewriter extends ParseNodeRewriter { - private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - - private final ColumnResolver columnResolver; - private final PhoenixConnection connection; - private TableNode tableNode; - private ParseNode topNode; - - public static SelectStatement transform(SelectStatement select, ColumnResolver resolver, PhoenixConnection connection) throws SQLException { - ParseNode where = select.getWhere(); - if (where == null) - return select; - - SubqueryRewriter rewriter = new SubqueryRewriter(select, resolver, connection); - ParseNode normWhere = rewrite(where, rewriter); - if (normWhere == where) - return select; - - return NODE_FACTORY.select(select, rewriter.tableNode, normWhere); - } - - protected SubqueryRewriter(SelectStatement select, ColumnResolver resolver, PhoenixConnection connection) { - this.columnResolver = resolver; - this.connection = connection; - this.tableNode = select.getFrom(); - this.topNode = null; - } - - @Override - protected void enterParseNode(ParseNode node) { - if (topNode == null) { - topNode = node; - } - super.enterParseNode(node); + private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); + + private final ColumnResolver columnResolver; + private final PhoenixConnection connection; + private TableNode tableNode; + private ParseNode topNode; + + public static SelectStatement transform(SelectStatement select, ColumnResolver resolver, + PhoenixConnection connection) throws SQLException { + ParseNode where = select.getWhere(); + if (where == null) return select; + + SubqueryRewriter rewriter = new SubqueryRewriter(select, resolver, connection); + ParseNode normWhere = rewrite(where, rewriter); + if (normWhere == where) return select; + + return NODE_FACTORY.select(select, rewriter.tableNode, normWhere); + } + + protected SubqueryRewriter(SelectStatement select, ColumnResolver resolver, + PhoenixConnection connection) { + this.columnResolver = resolver; + this.connection = connection; + this.tableNode = select.getFrom(); + this.topNode = null; + } + + @Override + protected void enterParseNode(ParseNode node) { + if (topNode == null) { + topNode = node; } - - @Override - protected ParseNode leaveCompoundNode(CompoundParseNode node, List children, ParseNodeRewriter.CompoundNodeFactory factory) { - if (topNode == node) { - topNode = null; - } - - return super.leaveCompoundNode(node, children, factory); + super.enterParseNode(node); + } + + @Override + protected ParseNode leaveCompoundNode(CompoundParseNode node, List children, + ParseNodeRewriter.CompoundNodeFactory factory) { + if (topNode == node) { + topNode = null; } - @Override - public boolean visitEnter(AndParseNode node) throws SQLException { - return true; + return super.leaveCompoundNode(node, children, factory); + } + + @Override + public boolean visitEnter(AndParseNode node) throws SQLException { + return true; + } + + @Override + public ParseNode visitLeave(AndParseNode node, List l) throws SQLException { + return leaveCompoundNode(node, l, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + if (children.isEmpty()) { + return null; + } + if (children.size() == 1) { + return children.get(0); + } + return NODE_FACTORY.and(children); + } + }); + } + + /** + *
+   * {@code
+   * Rewrite the In Subquery to semi/anti/left join for both NonCorrelated and Correlated subquery.
+   *
+   * 1.If the {@link InParseNode} is the only node in where clause or is the ANDed part of the where clause,
+   *   then we would rewrite the In Subquery to semi/anti join:
+   *   For  NonCorrelated subquery, an example is:
+   *    SELECT item_id, name FROM item i WHERE i.item_id IN
+   *    (SELECT item_id FROM order o  where o.price > 8)
+   *
+   *    The above sql would be rewritten as:
+   *    SELECT ITEM_ID,NAME FROM item I  Semi JOIN
+   *    (SELECT DISTINCT 1 $35,ITEM_ID $36 FROM order O  WHERE O.PRICE > 8) $34
+   *     ON (I.ITEM_ID = $34.$36)
+   *
+   *   For Correlated subquery, an example is:
+   *    SELECT item_id, name FROM item i WHERE i.item_id IN
+   *    (SELECT item_id FROM order o  where o.price = i.price)
+   *
+   *    The above sql would be rewritten as:
+   *    SELECT ITEM_ID,NAME FROM item I  Semi JOIN
+   *    (SELECT DISTINCT 1 $3,ITEM_ID $4,O.PRICE $2 FROM order O ) $1
+   *    ON ((I.ITEM_ID = $1.$4 AND $1.$2 = I.PRICE))
+   *
+   * 2.If the {@link InParseNode} is the ORed part of the where clause,then we would rewrite the In Subquery to
+   *   Left Join.
+   *
+   *   For  NonCorrelated subquery, an example is:
+   *    SELECT item_id, name FROM item i WHERE i.item_id IN
+   *    (SELECT max(item_id) FROM order o  where o.price > 8 group by o.customer_id,o.item_id) or i.discount1 > 10
+   *
+   *    The above sql would be rewritten as:
+   *    SELECT ITEM_ID,NAME FROM item I  Left JOIN
+   *    (SELECT DISTINCT 1 $56, MAX(ITEM_ID) $57 FROM order O  WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID) $55
+   *    ON (I.ITEM_ID = $55.$57) WHERE ($55.$56 IS NOT NULL  OR I.DISCOUNT1 > 10)
+   *
+   *   For  Correlated subquery, an example is:
+   *     SELECT item_id, name FROM item i WHERE i.item_id IN
+   *     (SELECT max(item_id) FROM order o  where o.price = i.price group by o.customer_id) or i.discount1 > 10;
+   *
+   *     The above sql would be rewritten as:
+   *     SELECT ITEM_ID,NAME FROM item I  Left JOIN
+   *     (SELECT DISTINCT 1 $28, MAX(ITEM_ID) $29,O.PRICE $27 FROM order O  GROUP BY O.PRICE,O.CUSTOMER_ID) $26
+   *     ON ((I.ITEM_ID = $26.$29 AND $26.$27 = I.PRICE)) WHERE ($26.$28 IS NOT NULL  OR I.DISCOUNT1 > 10)
+   * }
+   * 
+ */ + @Override + public ParseNode visitLeave(InParseNode inParseNode, List childParseNodes) + throws SQLException { + boolean isTopNode = topNode == inParseNode; + if (isTopNode) { + topNode = null; } - @Override - public ParseNode visitLeave(AndParseNode node, List l) throws SQLException { - return leaveCompoundNode(node, l, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - if (children.isEmpty()) { - return null; - } - if (children.size() == 1) { - return children.get(0); - } - return NODE_FACTORY.and(children); - } - }); + SubqueryParseNode subqueryParseNode = (SubqueryParseNode) childParseNodes.get(1); + SelectStatement subquerySelectStatementToUse = + fixSubqueryStatement(subqueryParseNode.getSelectNode()); + String subqueryTableTempAlias = ParseNodeFactory.createTempAlias(); + + JoinConditionExtractor joinConditionExtractor = new JoinConditionExtractor( + subquerySelectStatementToUse, columnResolver, connection, subqueryTableTempAlias); + + List newSubquerySelectAliasedNodes = null; + ParseNode extractedJoinConditionParseNode = null; + int extractedSelectAliasNodeCount = 0; + List oldSubqueryAliasedNodes = subquerySelectStatementToUse.getSelect(); + ParseNode whereParseNodeAfterExtract = subquerySelectStatementToUse.getWhere() == null + ? null + : subquerySelectStatementToUse.getWhere().accept(joinConditionExtractor); + if (whereParseNodeAfterExtract == subquerySelectStatementToUse.getWhere()) { + /** + * It is an NonCorrelated subquery. + */ + newSubquerySelectAliasedNodes = + Lists. newArrayListWithExpectedSize(oldSubqueryAliasedNodes.size() + 1); + + newSubquerySelectAliasedNodes + .add(NODE_FACTORY.aliasedNode(ParseNodeFactory.createTempAlias(), LiteralParseNode.ONE)); + this.addNewAliasedNodes(newSubquerySelectAliasedNodes, oldSubqueryAliasedNodes); + subquerySelectStatementToUse = + NODE_FACTORY.select(subquerySelectStatementToUse, !inParseNode.isSubqueryDistinct(), + newSubquerySelectAliasedNodes, whereParseNodeAfterExtract); + } else { + /** + * It is an Correlated subquery. + */ + List extractedAdditionalSelectAliasNodes = + joinConditionExtractor.getAdditionalSubselectSelectAliasedNodes(); + extractedSelectAliasNodeCount = extractedAdditionalSelectAliasNodes.size(); + newSubquerySelectAliasedNodes = Lists. newArrayListWithExpectedSize( + oldSubqueryAliasedNodes.size() + 1 + extractedAdditionalSelectAliasNodes.size()); + + newSubquerySelectAliasedNodes + .add(NODE_FACTORY.aliasedNode(ParseNodeFactory.createTempAlias(), LiteralParseNode.ONE)); + this.addNewAliasedNodes(newSubquerySelectAliasedNodes, oldSubqueryAliasedNodes); + newSubquerySelectAliasedNodes.addAll(extractedAdditionalSelectAliasNodes); + extractedJoinConditionParseNode = joinConditionExtractor.getJoinConditionParseNode(); + + boolean isAggregate = subquerySelectStatementToUse.isAggregate(); + if (!isAggregate) { + subquerySelectStatementToUse = + NODE_FACTORY.select(subquerySelectStatementToUse, !inParseNode.isSubqueryDistinct(), + newSubquerySelectAliasedNodes, whereParseNodeAfterExtract); + } else { + /** + * If exists AggregateFunction,we must add the correlated join condition to both the groupBy + * clause and select lists of the subquery. + */ + List newGroupByParseNodes = this.createNewGroupByParseNodes( + extractedAdditionalSelectAliasNodes, subquerySelectStatementToUse); + + subquerySelectStatementToUse = + NODE_FACTORY.select(subquerySelectStatementToUse, !inParseNode.isSubqueryDistinct(), + newSubquerySelectAliasedNodes, whereParseNodeAfterExtract, newGroupByParseNodes, true); + } } + ParseNode joinOnConditionParseNode = + getJoinConditionNodeForInSubquery(childParseNodes.get(0), newSubquerySelectAliasedNodes, + subqueryTableTempAlias, extractedJoinConditionParseNode, extractedSelectAliasNodeCount); + DerivedTableNode subqueryDerivedTableNode = + NODE_FACTORY.derivedTable(subqueryTableTempAlias, subquerySelectStatementToUse); + JoinType joinType = + isTopNode ? (inParseNode.isNegate() ? JoinType.Anti : JoinType.Semi) : JoinType.Left; + ParseNode resultWhereParseNode = isTopNode + ? null + : NODE_FACTORY.isNull(NODE_FACTORY.column(NODE_FACTORY.table(null, subqueryTableTempAlias), + newSubquerySelectAliasedNodes.get(0).getAlias(), null), !inParseNode.isNegate()); + tableNode = NODE_FACTORY.join(joinType, tableNode, subqueryDerivedTableNode, + joinOnConditionParseNode, false); + + return resultWhereParseNode; + } + + /** + *
+   * {@code
+   * Rewrite the Exists Subquery to semi/anti/left join for both NonCorrelated and Correlated subquery.
+   *
+   * 1.If the {@link ExistsParseNode} is NonCorrelated subquery,the just add LIMIT 1.
+   *    an example is:
+   *    SELECT item_id, name FROM item i WHERE exists
+   *    (SELECT 1 FROM order o  where o.price > 8)
+   *
+   *    The above sql would be rewritten as:
+   *    SELECT ITEM_ID,NAME FROM item I  WHERE  EXISTS
+   *    (SELECT 1 FROM ORDER_TABLE O  WHERE O.PRICE > 8 LIMIT 1)
+   *
+   *   another example is:
+   *   SELECT item_id, name FROM item i WHERE exists
+   *   (SELECT 1 FROM order o  where o.price > 8 group by o.customer_id,o.item_id having count(order_id) > 1)
+   *    or i.discount1 > 10
+   *
+   *    The above sql would be rewritten as:
+   *    SELECT ITEM_ID,NAME FROM item I  WHERE
+   *    ( EXISTS (SELECT 1 FROM ORDER_TABLE O  WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID HAVING  COUNT(ORDER_ID) > 1 LIMIT 1)
+   *    OR I.DISCOUNT1 > 10)
+   *
+   * 2.If the {@link ExistsParseNode} is Correlated subquery and is the only node in where clause or
+   *   is the ANDed part of the where clause, then we would rewrite the Exists Subquery to semi/anti join:
+   *   an example is:
+   *    SELECT item_id, name FROM item i WHERE exists
+   *    (SELECT 1 FROM order o where o.price = i.price and o.quantity = 5 )
+   *
+   *    The above sql would be rewritten as:
+   *    SELECT ITEM_ID,NAME FROM item I  Semi JOIN
+   *    (SELECT DISTINCT 1 $3,O.PRICE $2 FROM ORDER_TABLE O  WHERE O.QUANTITY = 5) $1
+   *    ON ($1.$2 = I.PRICE)
+   *
+   *   another example with AggregateFunction and groupBy is
+   *   SELECT item_id, name FROM item i WHERE exists
+   *   (SELECT 1 FROM order o  where o.item_id = i.item_id group by customer_id having count(order_id) > 1)
+   *
+   *    The above sql would be rewritten as:
+   *     SELECT ITEM_ID,NAME FROM item I  Semi JOIN
+   *     (SELECT DISTINCT 1 $3,O.ITEM_ID $2 FROM order O  GROUP BY O.ITEM_ID,CUSTOMER_ID HAVING  COUNT(ORDER_ID) > 1) $1
+   *     ON ($1.$2 = I.ITEM_ID)
+   *
+   * 3.If the {@link ExistsParseNode} is Correlated subquery and is the ORed part of the where clause,
+   *   then we would rewrite the Exists Subquery to Left Join.
+   *   an example is:
+   *   SELECT item_id, name FROM item i WHERE exists
+   *   (SELECT 1 FROM order o  where o.item_id = i.item_id group by customer_id having count(order_id) > 1)
+   *   or i.discount1 > 10
+   *
+   *    The above sql would be rewritten as:
+   *    SELECT ITEM_ID,NAME FROM item I  Left JOIN
+   *    (SELECT DISTINCT 1 $3,O.ITEM_ID $2 FROM order O  GROUP BY O.ITEM_ID,CUSTOMER_ID HAVING  COUNT(ORDER_ID) > 1) $1
+   *    ON ($1.$2 = I.ITEM_ID) WHERE ($1.$3 IS NOT NULL  OR I.DISCOUNT1 > 10)
+   * }
+   * 
+ */ + @Override + public ParseNode visitLeave(ExistsParseNode existsParseNode, List childParseNodes) + throws SQLException { + + boolean isTopNode = topNode == existsParseNode; + if (isTopNode) { + topNode = null; + } + + SubqueryParseNode subqueryParseNode = (SubqueryParseNode) childParseNodes.get(0); + SelectStatement subquerySelectStatementToUse = + fixSubqueryStatement(subqueryParseNode.getSelectNode()); + String subqueryTableTempAlias = ParseNodeFactory.createTempAlias(); + JoinConditionExtractor joinConditionExtractor = new JoinConditionExtractor( + subquerySelectStatementToUse, columnResolver, connection, subqueryTableTempAlias); + ParseNode whereParseNodeAfterExtract = subquerySelectStatementToUse.getWhere() == null + ? null + : subquerySelectStatementToUse.getWhere().accept(joinConditionExtractor); + if (whereParseNodeAfterExtract == subquerySelectStatementToUse.getWhere()) { + /** + * It is non-correlated EXISTS subquery, add LIMIT 1 + */ + subquerySelectStatementToUse = NODE_FACTORY.select(subquerySelectStatementToUse, + NODE_FACTORY.limit(NODE_FACTORY.literal(1))); + subqueryParseNode = NODE_FACTORY.subquery(subquerySelectStatementToUse, false); + existsParseNode = NODE_FACTORY.exists(subqueryParseNode, existsParseNode.isNegate()); + return super.visitLeave(existsParseNode, + Collections. singletonList(subqueryParseNode)); + } + + List extractedAdditionalSelectAliasNodes = + joinConditionExtractor.getAdditionalSubselectSelectAliasedNodes(); + List newSubquerySelectAliasedNodes = + Lists.newArrayListWithExpectedSize(extractedAdditionalSelectAliasNodes.size() + 1); /** - *
-     * {@code
-     * Rewrite the In Subquery to semi/anti/left join for both NonCorrelated and Correlated subquery.
-     *
-     * 1.If the {@link InParseNode} is the only node in where clause or is the ANDed part of the where clause,
-     *   then we would rewrite the In Subquery to semi/anti join:
-     *   For  NonCorrelated subquery, an example is:
-     *    SELECT item_id, name FROM item i WHERE i.item_id IN
-     *    (SELECT item_id FROM order o  where o.price > 8)
-     *
-     *    The above sql would be rewritten as:
-     *    SELECT ITEM_ID,NAME FROM item I  Semi JOIN
-     *    (SELECT DISTINCT 1 $35,ITEM_ID $36 FROM order O  WHERE O.PRICE > 8) $34
-     *     ON (I.ITEM_ID = $34.$36)
-     *
-     *   For Correlated subquery, an example is:
-     *    SELECT item_id, name FROM item i WHERE i.item_id IN
-     *    (SELECT item_id FROM order o  where o.price = i.price)
-     *
-     *    The above sql would be rewritten as:
-     *    SELECT ITEM_ID,NAME FROM item I  Semi JOIN
-     *    (SELECT DISTINCT 1 $3,ITEM_ID $4,O.PRICE $2 FROM order O ) $1
-     *    ON ((I.ITEM_ID = $1.$4 AND $1.$2 = I.PRICE))
-     *
-     * 2.If the {@link InParseNode} is the ORed part of the where clause,then we would rewrite the In Subquery to
-     *   Left Join.
-     *
-     *   For  NonCorrelated subquery, an example is:
-     *    SELECT item_id, name FROM item i WHERE i.item_id IN
-     *    (SELECT max(item_id) FROM order o  where o.price > 8 group by o.customer_id,o.item_id) or i.discount1 > 10
-     *
-     *    The above sql would be rewritten as:
-     *    SELECT ITEM_ID,NAME FROM item I  Left JOIN
-     *    (SELECT DISTINCT 1 $56, MAX(ITEM_ID) $57 FROM order O  WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID) $55
-     *    ON (I.ITEM_ID = $55.$57) WHERE ($55.$56 IS NOT NULL  OR I.DISCOUNT1 > 10)
-     *
-     *   For  Correlated subquery, an example is:
-     *     SELECT item_id, name FROM item i WHERE i.item_id IN
-     *     (SELECT max(item_id) FROM order o  where o.price = i.price group by o.customer_id) or i.discount1 > 10;
-     *
-     *     The above sql would be rewritten as:
-     *     SELECT ITEM_ID,NAME FROM item I  Left JOIN
-     *     (SELECT DISTINCT 1 $28, MAX(ITEM_ID) $29,O.PRICE $27 FROM order O  GROUP BY O.PRICE,O.CUSTOMER_ID) $26
-     *     ON ((I.ITEM_ID = $26.$29 AND $26.$27 = I.PRICE)) WHERE ($26.$28 IS NOT NULL  OR I.DISCOUNT1 > 10)
-     * }
-     * 
+ * Just overwrite original subquery selectAliasNodes. */ - @Override - public ParseNode visitLeave(InParseNode inParseNode, List childParseNodes) throws SQLException { - boolean isTopNode = topNode == inParseNode; - if (isTopNode) { - topNode = null; - } + newSubquerySelectAliasedNodes + .add(NODE_FACTORY.aliasedNode(ParseNodeFactory.createTempAlias(), LiteralParseNode.ONE)); + newSubquerySelectAliasedNodes.addAll(extractedAdditionalSelectAliasNodes); + + boolean isAggregate = subquerySelectStatementToUse.isAggregate(); + if (!isAggregate) { + subquerySelectStatementToUse = NODE_FACTORY.select(subquerySelectStatementToUse, true, + newSubquerySelectAliasedNodes, whereParseNodeAfterExtract); + } else { + /** + * If exists AggregateFunction,we must add the correlated join condition to both the groupBy + * clause and select lists of the subquery. + */ + List newGroupByParseNodes = this.createNewGroupByParseNodes( + extractedAdditionalSelectAliasNodes, subquerySelectStatementToUse); + + subquerySelectStatementToUse = NODE_FACTORY.select(subquerySelectStatementToUse, true, + newSubquerySelectAliasedNodes, whereParseNodeAfterExtract, newGroupByParseNodes, true); + } + ParseNode joinOnConditionParseNode = joinConditionExtractor.getJoinConditionParseNode(); + DerivedTableNode subqueryDerivedTableNode = + NODE_FACTORY.derivedTable(subqueryTableTempAlias, subquerySelectStatementToUse); + JoinType joinType = + isTopNode ? (existsParseNode.isNegate() ? JoinType.Anti : JoinType.Semi) : JoinType.Left; + ParseNode resultWhereParseNode = isTopNode + ? null + : NODE_FACTORY.isNull(NODE_FACTORY.column(NODE_FACTORY.table(null, subqueryTableTempAlias), + newSubquerySelectAliasedNodes.get(0).getAlias(), null), !existsParseNode.isNegate()); + tableNode = NODE_FACTORY.join(joinType, tableNode, subqueryDerivedTableNode, + joinOnConditionParseNode, false); + return resultWhereParseNode; + } + + @Override + public ParseNode visitLeave(ComparisonParseNode node, List l) throws SQLException { + boolean isTopNode = topNode == node; + if (isTopNode) { + topNode = null; + } - SubqueryParseNode subqueryParseNode = (SubqueryParseNode) childParseNodes.get(1); - SelectStatement subquerySelectStatementToUse = fixSubqueryStatement(subqueryParseNode.getSelectNode()); - String subqueryTableTempAlias = ParseNodeFactory.createTempAlias(); - - JoinConditionExtractor joinConditionExtractor = new JoinConditionExtractor( - subquerySelectStatementToUse, - columnResolver, - connection, - subqueryTableTempAlias); - - List newSubquerySelectAliasedNodes = null; - ParseNode extractedJoinConditionParseNode = null; - int extractedSelectAliasNodeCount = 0; - List oldSubqueryAliasedNodes = subquerySelectStatementToUse.getSelect(); - ParseNode whereParseNodeAfterExtract = - subquerySelectStatementToUse.getWhere() == null ? - null : - subquerySelectStatementToUse.getWhere().accept(joinConditionExtractor); - if (whereParseNodeAfterExtract == subquerySelectStatementToUse.getWhere()) { - /** - * It is an NonCorrelated subquery. - */ - newSubquerySelectAliasedNodes = Lists. newArrayListWithExpectedSize( - oldSubqueryAliasedNodes.size() + 1); - - newSubquerySelectAliasedNodes.add( - NODE_FACTORY.aliasedNode( - ParseNodeFactory.createTempAlias(), - LiteralParseNode.ONE)); - this.addNewAliasedNodes(newSubquerySelectAliasedNodes, oldSubqueryAliasedNodes); - subquerySelectStatementToUse = NODE_FACTORY.select( - subquerySelectStatementToUse, - !inParseNode.isSubqueryDistinct(), - newSubquerySelectAliasedNodes, - whereParseNodeAfterExtract); - } else { - /** - * It is an Correlated subquery. - */ - List extractedAdditionalSelectAliasNodes = - joinConditionExtractor.getAdditionalSubselectSelectAliasedNodes(); - extractedSelectAliasNodeCount = extractedAdditionalSelectAliasNodes.size(); - newSubquerySelectAliasedNodes = Lists. newArrayListWithExpectedSize( - oldSubqueryAliasedNodes.size() + 1 + - extractedAdditionalSelectAliasNodes.size()); - - newSubquerySelectAliasedNodes.add(NODE_FACTORY.aliasedNode( - ParseNodeFactory.createTempAlias(), - LiteralParseNode.ONE)); - this.addNewAliasedNodes(newSubquerySelectAliasedNodes, oldSubqueryAliasedNodes); - newSubquerySelectAliasedNodes.addAll(extractedAdditionalSelectAliasNodes); - extractedJoinConditionParseNode = - joinConditionExtractor.getJoinConditionParseNode(); - - boolean isAggregate = subquerySelectStatementToUse.isAggregate(); - if (!isAggregate) { - subquerySelectStatementToUse = - NODE_FACTORY.select( - subquerySelectStatementToUse, - !inParseNode.isSubqueryDistinct(), - newSubquerySelectAliasedNodes, - whereParseNodeAfterExtract); - } else { - /** - * If exists AggregateFunction,we must add the correlated join condition to both the - * groupBy clause and select lists of the subquery. - */ - List newGroupByParseNodes = this.createNewGroupByParseNodes( - extractedAdditionalSelectAliasNodes, - subquerySelectStatementToUse); - - subquerySelectStatementToUse = NODE_FACTORY.select( - subquerySelectStatementToUse, - !inParseNode.isSubqueryDistinct(), - newSubquerySelectAliasedNodes, - whereParseNodeAfterExtract, - newGroupByParseNodes, - true); - } - } + ParseNode secondChild = l.get(1); + if (!(secondChild instanceof SubqueryParseNode)) { + return super.visitLeave(node, l); + } - ParseNode joinOnConditionParseNode = getJoinConditionNodeForInSubquery( - childParseNodes.get(0), - newSubquerySelectAliasedNodes, - subqueryTableTempAlias, - extractedJoinConditionParseNode, - extractedSelectAliasNodeCount); - DerivedTableNode subqueryDerivedTableNode = NODE_FACTORY.derivedTable( - subqueryTableTempAlias, - subquerySelectStatementToUse); - JoinType joinType = isTopNode ? - (inParseNode.isNegate() ? JoinType.Anti : JoinType.Semi) : - JoinType.Left; - ParseNode resultWhereParseNode = isTopNode ? - null : - NODE_FACTORY.isNull( - NODE_FACTORY.column( - NODE_FACTORY.table(null, subqueryTableTempAlias), - newSubquerySelectAliasedNodes.get(0).getAlias(), - null), - !inParseNode.isNegate()); - tableNode = NODE_FACTORY.join( - joinType, - tableNode, - subqueryDerivedTableNode, - joinOnConditionParseNode, - false); - - return resultWhereParseNode; + SubqueryParseNode subqueryNode = (SubqueryParseNode) secondChild; + SelectStatement subquery = fixSubqueryStatement(subqueryNode.getSelectNode()); + String rhsTableAlias = ParseNodeFactory.createTempAlias(); + JoinConditionExtractor conditionExtractor = + new JoinConditionExtractor(subquery, columnResolver, connection, rhsTableAlias); + ParseNode where = + subquery.getWhere() == null ? null : subquery.getWhere().accept(conditionExtractor); + if (where == subquery.getWhere()) { // non-correlated comparison subquery, add LIMIT 2, + // expectSingleRow = true + subquery = NODE_FACTORY.select(subquery, NODE_FACTORY.limit(NODE_FACTORY.literal(2))); + subqueryNode = NODE_FACTORY.subquery(subquery, true); + l = Lists.newArrayList(l.get(0), subqueryNode); + node = NODE_FACTORY.comparison(node.getFilterOp(), l.get(0), l.get(1)); + return super.visitLeave(node, l); } - /** - *
-     * {@code
-     * Rewrite the Exists Subquery to semi/anti/left join for both NonCorrelated and Correlated subquery.
-     *
-     * 1.If the {@link ExistsParseNode} is NonCorrelated subquery,the just add LIMIT 1.
-     *    an example is:
-     *    SELECT item_id, name FROM item i WHERE exists
-     *    (SELECT 1 FROM order o  where o.price > 8)
-     *
-     *    The above sql would be rewritten as:
-     *    SELECT ITEM_ID,NAME FROM item I  WHERE  EXISTS
-     *    (SELECT 1 FROM ORDER_TABLE O  WHERE O.PRICE > 8 LIMIT 1)
-     *
-     *   another example is:
-     *   SELECT item_id, name FROM item i WHERE exists
-     *   (SELECT 1 FROM order o  where o.price > 8 group by o.customer_id,o.item_id having count(order_id) > 1)
-     *    or i.discount1 > 10
-     *
-     *    The above sql would be rewritten as:
-     *    SELECT ITEM_ID,NAME FROM item I  WHERE
-     *    ( EXISTS (SELECT 1 FROM ORDER_TABLE O  WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID HAVING  COUNT(ORDER_ID) > 1 LIMIT 1)
-     *    OR I.DISCOUNT1 > 10)
-     *
-     * 2.If the {@link ExistsParseNode} is Correlated subquery and is the only node in where clause or
-     *   is the ANDed part of the where clause, then we would rewrite the Exists Subquery to semi/anti join:
-     *   an example is:
-     *    SELECT item_id, name FROM item i WHERE exists
-     *    (SELECT 1 FROM order o where o.price = i.price and o.quantity = 5 )
-     *
-     *    The above sql would be rewritten as:
-     *    SELECT ITEM_ID,NAME FROM item I  Semi JOIN
-     *    (SELECT DISTINCT 1 $3,O.PRICE $2 FROM ORDER_TABLE O  WHERE O.QUANTITY = 5) $1
-     *    ON ($1.$2 = I.PRICE)
-     *
-     *   another example with AggregateFunction and groupBy is
-     *   SELECT item_id, name FROM item i WHERE exists
-     *   (SELECT 1 FROM order o  where o.item_id = i.item_id group by customer_id having count(order_id) > 1)
-     *
-     *    The above sql would be rewritten as:
-     *     SELECT ITEM_ID,NAME FROM item I  Semi JOIN
-     *     (SELECT DISTINCT 1 $3,O.ITEM_ID $2 FROM order O  GROUP BY O.ITEM_ID,CUSTOMER_ID HAVING  COUNT(ORDER_ID) > 1) $1
-     *     ON ($1.$2 = I.ITEM_ID)
-     *
-     * 3.If the {@link ExistsParseNode} is Correlated subquery and is the ORed part of the where clause,
-     *   then we would rewrite the Exists Subquery to Left Join.
-     *   an example is:
-     *   SELECT item_id, name FROM item i WHERE exists
-     *   (SELECT 1 FROM order o  where o.item_id = i.item_id group by customer_id having count(order_id) > 1)
-     *   or i.discount1 > 10
-     *
-     *    The above sql would be rewritten as:
-     *    SELECT ITEM_ID,NAME FROM item I  Left JOIN
-     *    (SELECT DISTINCT 1 $3,O.ITEM_ID $2 FROM order O  GROUP BY O.ITEM_ID,CUSTOMER_ID HAVING  COUNT(ORDER_ID) > 1) $1
-     *    ON ($1.$2 = I.ITEM_ID) WHERE ($1.$3 IS NOT NULL  OR I.DISCOUNT1 > 10)
-     * }
-     * 
- */ - @Override - public ParseNode visitLeave( - ExistsParseNode existsParseNode, - List childParseNodes) throws SQLException { + ParseNode rhsNode = null; + boolean isGroupby = !subquery.getGroupBy().isEmpty(); + boolean isAggregate = subquery.isAggregate(); + List aliasedNodes = subquery.getSelect(); + if (aliasedNodes.size() == 1) { + rhsNode = aliasedNodes.get(0).getNode(); + } else { + List nodes = Lists. newArrayListWithExpectedSize(aliasedNodes.size()); + for (AliasedNode aliasedNode : aliasedNodes) { + nodes.add(aliasedNode.getNode()); + } + rhsNode = NODE_FACTORY.rowValueConstructor(nodes); + } - boolean isTopNode = topNode == existsParseNode; - if (isTopNode) { - topNode = null; - } - - SubqueryParseNode subqueryParseNode = (SubqueryParseNode) childParseNodes.get(0); - SelectStatement subquerySelectStatementToUse = - fixSubqueryStatement(subqueryParseNode.getSelectNode()); - String subqueryTableTempAlias = ParseNodeFactory.createTempAlias(); - JoinConditionExtractor joinConditionExtractor = - new JoinConditionExtractor( - subquerySelectStatementToUse, - columnResolver, - connection, - subqueryTableTempAlias); - ParseNode whereParseNodeAfterExtract = - subquerySelectStatementToUse.getWhere() == null ? - null : - subquerySelectStatementToUse.getWhere().accept(joinConditionExtractor); - if (whereParseNodeAfterExtract == subquerySelectStatementToUse.getWhere()) { - /** - * It is non-correlated EXISTS subquery, add LIMIT 1 - */ - subquerySelectStatementToUse = - NODE_FACTORY.select( - subquerySelectStatementToUse, - NODE_FACTORY.limit(NODE_FACTORY.literal(1))); - subqueryParseNode = NODE_FACTORY.subquery(subquerySelectStatementToUse, false); - existsParseNode = NODE_FACTORY.exists(subqueryParseNode, existsParseNode.isNegate()); - return super.visitLeave( - existsParseNode, - Collections.singletonList(subqueryParseNode)); - } + List additionalSelectNodes = + conditionExtractor.getAdditionalSubselectSelectAliasedNodes(); + List selectNodes = + Lists.newArrayListWithExpectedSize(additionalSelectNodes.size() + 1); + selectNodes.add(NODE_FACTORY.aliasedNode(ParseNodeFactory.createTempAlias(), rhsNode)); + selectNodes.addAll(additionalSelectNodes); + + if (!isAggregate) { + subquery = NODE_FACTORY.select(subquery, subquery.isDistinct(), selectNodes, where); + } else { + List groupbyNodes = + this.createNewGroupByParseNodes(additionalSelectNodes, subquery); + subquery = NODE_FACTORY.select(subquery, subquery.isDistinct(), selectNodes, where, + groupbyNodes, true); + } - List extractedAdditionalSelectAliasNodes = - joinConditionExtractor.getAdditionalSubselectSelectAliasedNodes(); - List newSubquerySelectAliasedNodes = Lists.newArrayListWithExpectedSize( - extractedAdditionalSelectAliasNodes.size() + 1); - /** - * Just overwrite original subquery selectAliasNodes. - */ - newSubquerySelectAliasedNodes.add( - NODE_FACTORY.aliasedNode(ParseNodeFactory.createTempAlias(), LiteralParseNode.ONE)); - newSubquerySelectAliasedNodes.addAll(extractedAdditionalSelectAliasNodes); - - boolean isAggregate = subquerySelectStatementToUse.isAggregate(); - if (!isAggregate) { - subquerySelectStatementToUse = NODE_FACTORY.select( - subquerySelectStatementToUse, - true, - newSubquerySelectAliasedNodes, - whereParseNodeAfterExtract); - } else { - /** - * If exists AggregateFunction,we must add the correlated join condition to both the - * groupBy clause and select lists of the subquery. - */ - List newGroupByParseNodes = this.createNewGroupByParseNodes( - extractedAdditionalSelectAliasNodes, - subquerySelectStatementToUse); - - subquerySelectStatementToUse = NODE_FACTORY.select( - subquerySelectStatementToUse, - true, - newSubquerySelectAliasedNodes, - whereParseNodeAfterExtract, - newGroupByParseNodes, - true); - } - ParseNode joinOnConditionParseNode = joinConditionExtractor.getJoinConditionParseNode(); - DerivedTableNode subqueryDerivedTableNode = NODE_FACTORY.derivedTable( - subqueryTableTempAlias, - subquerySelectStatementToUse); - JoinType joinType = isTopNode ? - (existsParseNode.isNegate() ? JoinType.Anti : JoinType.Semi) : - JoinType.Left; - ParseNode resultWhereParseNode = isTopNode ? - null : - NODE_FACTORY.isNull( - NODE_FACTORY.column( - NODE_FACTORY.table(null, subqueryTableTempAlias), - newSubquerySelectAliasedNodes.get(0).getAlias(), - null), - !existsParseNode.isNegate()); - tableNode = NODE_FACTORY.join( - joinType, - tableNode, - subqueryDerivedTableNode, - joinOnConditionParseNode, - false); - return resultWhereParseNode; + ParseNode onNode = conditionExtractor.getJoinConditionParseNode(); + TableNode rhsTable = NODE_FACTORY.derivedTable(rhsTableAlias, subquery); + JoinType joinType = isTopNode ? JoinType.Inner : JoinType.Left; + ParseNode ret = NODE_FACTORY.comparison(node.getFilterOp(), l.get(0), NODE_FACTORY + .column(NODE_FACTORY.table(null, rhsTableAlias), selectNodes.get(0).getAlias(), null)); + tableNode = NODE_FACTORY.join(joinType, tableNode, rhsTable, onNode, !isAggregate || isGroupby); + + return ret; + } + + @Override + public ParseNode visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { + List children = leaveArrayComparisonNode(node, l); + if (children == l) return super.visitLeave(node, l); + + node = NODE_FACTORY.arrayAny(children.get(0), (ComparisonParseNode) children.get(1)); + return node; + } + + @Override + public ParseNode visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { + List children = leaveArrayComparisonNode(node, l); + if (children == l) return super.visitLeave(node, l); + + node = NODE_FACTORY.arrayAll(children.get(0), (ComparisonParseNode) children.get(1)); + return node; + } + + protected List leaveArrayComparisonNode(ParseNode node, List l) + throws SQLException { + boolean isTopNode = topNode == node; + if (isTopNode) { + topNode = null; } - @Override - public ParseNode visitLeave(ComparisonParseNode node, List l) throws SQLException { - boolean isTopNode = topNode == node; - if (isTopNode) { - topNode = null; - } - - ParseNode secondChild = l.get(1); - if (!(secondChild instanceof SubqueryParseNode)) { - return super.visitLeave(node, l); - } - - SubqueryParseNode subqueryNode = (SubqueryParseNode) secondChild; - SelectStatement subquery = fixSubqueryStatement(subqueryNode.getSelectNode()); - String rhsTableAlias = ParseNodeFactory.createTempAlias(); - JoinConditionExtractor conditionExtractor = new JoinConditionExtractor(subquery, columnResolver, connection, rhsTableAlias); - ParseNode where = subquery.getWhere() == null ? null : subquery.getWhere().accept(conditionExtractor); - if (where == subquery.getWhere()) { // non-correlated comparison subquery, add LIMIT 2, expectSingleRow = true - subquery = NODE_FACTORY.select(subquery, NODE_FACTORY.limit(NODE_FACTORY.literal(2))); - subqueryNode = NODE_FACTORY.subquery(subquery, true); - l = Lists.newArrayList(l.get(0), subqueryNode); - node = NODE_FACTORY.comparison(node.getFilterOp(), l.get(0), l.get(1)); - return super.visitLeave(node, l); - } - - ParseNode rhsNode = null; - boolean isGroupby = !subquery.getGroupBy().isEmpty(); - boolean isAggregate = subquery.isAggregate(); - List aliasedNodes = subquery.getSelect(); - if (aliasedNodes.size() == 1) { - rhsNode = aliasedNodes.get(0).getNode(); - } else { - List nodes = Lists. newArrayListWithExpectedSize(aliasedNodes.size()); - for (AliasedNode aliasedNode : aliasedNodes) { - nodes.add(aliasedNode.getNode()); - } - rhsNode = NODE_FACTORY.rowValueConstructor(nodes); - } - - List additionalSelectNodes = - conditionExtractor.getAdditionalSubselectSelectAliasedNodes(); - List selectNodes = - Lists.newArrayListWithExpectedSize(additionalSelectNodes.size() + 1); - selectNodes.add(NODE_FACTORY.aliasedNode(ParseNodeFactory.createTempAlias(), rhsNode)); - selectNodes.addAll(additionalSelectNodes); - - if (!isAggregate) { - subquery = NODE_FACTORY.select(subquery, subquery.isDistinct(), selectNodes, where); - } else { - List groupbyNodes = this.createNewGroupByParseNodes( - additionalSelectNodes, - subquery); - subquery = NODE_FACTORY.select(subquery, subquery.isDistinct(), selectNodes, where, groupbyNodes, true); - } - - ParseNode onNode = conditionExtractor.getJoinConditionParseNode(); - TableNode rhsTable = NODE_FACTORY.derivedTable(rhsTableAlias, subquery); - JoinType joinType = isTopNode ? JoinType.Inner : JoinType.Left; - ParseNode ret = NODE_FACTORY.comparison(node.getFilterOp(), l.get(0), NODE_FACTORY.column(NODE_FACTORY.table(null, rhsTableAlias), selectNodes.get(0).getAlias(), null)); - tableNode = NODE_FACTORY.join(joinType, tableNode, rhsTable, onNode, !isAggregate || isGroupby); - - return ret; + ParseNode firstChild = l.get(0); + if (!(firstChild instanceof SubqueryParseNode)) { + return l; } - @Override - public ParseNode visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { - List children = leaveArrayComparisonNode(node, l); - if (children == l) - return super.visitLeave(node, l); - - node = NODE_FACTORY.arrayAny(children.get(0), (ComparisonParseNode) children.get(1)); - return node; + SubqueryParseNode subqueryNode = (SubqueryParseNode) firstChild; + SelectStatement subquery = fixSubqueryStatement(subqueryNode.getSelectNode()); + String rhsTableAlias = ParseNodeFactory.createTempAlias(); + JoinConditionExtractor conditionExtractor = + new JoinConditionExtractor(subquery, columnResolver, connection, rhsTableAlias); + ParseNode where = + subquery.getWhere() == null ? null : subquery.getWhere().accept(conditionExtractor); + if (where == subquery.getWhere()) { // non-correlated any/all comparison subquery + return l; } - @Override - public ParseNode visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { - List children = leaveArrayComparisonNode(node, l); - if (children == l) - return super.visitLeave(node, l); - - node = NODE_FACTORY.arrayAll(children.get(0), (ComparisonParseNode) children.get(1)); - return node; + ParseNode rhsNode = null; + boolean isNonGroupByAggregate = subquery.getGroupBy().isEmpty() && subquery.isAggregate(); + List aliasedNodes = subquery.getSelect(); + String derivedTableAlias = null; + if (!subquery.getGroupBy().isEmpty()) { + derivedTableAlias = ParseNodeFactory.createTempAlias(); + aliasedNodes = createNewAliasedNodes(aliasedNodes); } - - protected List leaveArrayComparisonNode(ParseNode node, List l) throws SQLException { - boolean isTopNode = topNode == node; - if (isTopNode) { - topNode = null; - } - ParseNode firstChild = l.get(0); - if (!(firstChild instanceof SubqueryParseNode)) { - return l; - } - - SubqueryParseNode subqueryNode = (SubqueryParseNode) firstChild; - SelectStatement subquery = fixSubqueryStatement(subqueryNode.getSelectNode()); - String rhsTableAlias = ParseNodeFactory.createTempAlias(); - JoinConditionExtractor conditionExtractor = new JoinConditionExtractor(subquery, columnResolver, connection, rhsTableAlias); - ParseNode where = subquery.getWhere() == null ? null : subquery.getWhere().accept(conditionExtractor); - if (where == subquery.getWhere()) { // non-correlated any/all comparison subquery - return l; - } - - ParseNode rhsNode = null; - boolean isNonGroupByAggregate = subquery.getGroupBy().isEmpty() && subquery.isAggregate(); - List aliasedNodes = subquery.getSelect(); - String derivedTableAlias = null; - if (!subquery.getGroupBy().isEmpty()) { - derivedTableAlias = ParseNodeFactory.createTempAlias(); - aliasedNodes = createNewAliasedNodes(aliasedNodes); - } - - if (aliasedNodes.size() == 1) { - rhsNode = derivedTableAlias == null ? aliasedNodes.get(0).getNode() : NODE_FACTORY.column(NODE_FACTORY.table(null, derivedTableAlias), aliasedNodes.get(0).getAlias(), null); - } else { - List nodes = Lists. newArrayListWithExpectedSize(aliasedNodes.size()); - for (AliasedNode aliasedNode : aliasedNodes) { - nodes.add(derivedTableAlias == null ? aliasedNode.getNode() : NODE_FACTORY.column(NODE_FACTORY.table(null, derivedTableAlias), aliasedNode.getAlias(), null)); - } - rhsNode = NODE_FACTORY.rowValueConstructor(nodes); - } - - if (!isNonGroupByAggregate) { - rhsNode = NODE_FACTORY.function(DistinctValueAggregateFunction.NAME, Collections.singletonList(rhsNode)); - } - - List additionalSelectNodes = conditionExtractor.getAdditionalSubselectSelectAliasedNodes(); - List selectNodes = Lists.newArrayListWithExpectedSize(additionalSelectNodes.size() + 1); - selectNodes.add(NODE_FACTORY.aliasedNode(ParseNodeFactory.createTempAlias(), rhsNode)); - selectNodes.addAll(additionalSelectNodes); - List groupbyNodes = Lists.newArrayListWithExpectedSize(additionalSelectNodes.size()); - for (AliasedNode aliasedNode : additionalSelectNodes) { - groupbyNodes.add(aliasedNode.getNode()); - } - - if (derivedTableAlias == null) { - subquery = NODE_FACTORY.select(subquery, false, selectNodes, where, groupbyNodes, true); - } else { - List derivedTableGroupBy = Lists.newArrayListWithExpectedSize(subquery.getGroupBy().size() + groupbyNodes.size()); - derivedTableGroupBy.addAll(groupbyNodes); - derivedTableGroupBy.addAll(subquery.getGroupBy()); - List derivedTableSelect = Lists.newArrayListWithExpectedSize(aliasedNodes.size() + selectNodes.size() - 1); - derivedTableSelect.addAll(aliasedNodes); - for (int i = 1; i < selectNodes.size(); i++) { - AliasedNode aliasedNode = selectNodes.get(i); - String alias = ParseNodeFactory.createTempAlias(); - derivedTableSelect.add(NODE_FACTORY.aliasedNode(alias, aliasedNode.getNode())); - aliasedNode = NODE_FACTORY.aliasedNode(aliasedNode.getAlias(), NODE_FACTORY.column(NODE_FACTORY.table(null, derivedTableAlias), alias, null)); - selectNodes.set(i, aliasedNode); - groupbyNodes.set(i - 1, aliasedNode.getNode()); - } - SelectStatement derivedTableStmt = NODE_FACTORY.select(subquery, subquery.isDistinct(), derivedTableSelect, where, derivedTableGroupBy, true); - subquery = NODE_FACTORY.select(NODE_FACTORY.derivedTable(derivedTableAlias, derivedTableStmt), - subquery.getHint(), false, selectNodes, null, groupbyNodes, null, - Collections. emptyList(), null, null, subquery.getBindCount(), true, false, - Collections. emptyList(), subquery.getUdfParseNodes()); - } - - ParseNode onNode = conditionExtractor.getJoinConditionParseNode(); - TableNode rhsTable = NODE_FACTORY.derivedTable(rhsTableAlias, subquery); - JoinType joinType = isTopNode ? JoinType.Inner : JoinType.Left; - tableNode = NODE_FACTORY.join(joinType, tableNode, rhsTable, onNode, false); - - firstChild = NODE_FACTORY.column(NODE_FACTORY.table(null, rhsTableAlias), selectNodes.get(0).getAlias(), null); - if (isNonGroupByAggregate) { - firstChild = NODE_FACTORY.upsertStmtArrayNode(Collections.singletonList(firstChild)); - } - ComparisonParseNode secondChild = (ComparisonParseNode) l.get(1); - secondChild = NODE_FACTORY.comparison(secondChild.getFilterOp(), secondChild.getLHS(), NODE_FACTORY.elementRef(Lists.newArrayList(firstChild, NODE_FACTORY.literal(1)))); - - return Lists.newArrayList(firstChild, secondChild); - } - - private SelectStatement fixSubqueryStatement(SelectStatement select) { - if (!select.isUnion()) - return select; - - // Wrap as a derived table. - return NODE_FACTORY.select(NODE_FACTORY.derivedTable(ParseNodeFactory.createTempAlias(), select), - HintNode.EMPTY_HINT_NODE, false, select.getSelect(), null, null, null, null, null, null, - select.getBindCount(), false, false, Collections. emptyList(), - select.getUdfParseNodes()); + if (aliasedNodes.size() == 1) { + rhsNode = derivedTableAlias == null + ? aliasedNodes.get(0).getNode() + : NODE_FACTORY.column(NODE_FACTORY.table(null, derivedTableAlias), + aliasedNodes.get(0).getAlias(), null); + } else { + List nodes = Lists. newArrayListWithExpectedSize(aliasedNodes.size()); + for (AliasedNode aliasedNode : aliasedNodes) { + nodes.add(derivedTableAlias == null + ? aliasedNode.getNode() + : NODE_FACTORY.column(NODE_FACTORY.table(null, derivedTableAlias), aliasedNode.getAlias(), + null)); + } + rhsNode = NODE_FACTORY.rowValueConstructor(nodes); } - /** - * Create new {@link AliasedNode}s by every {@link ParseNode} in subquerySelectAliasedNodes and generate new aliases - * by {@link ParseNodeFactory#createTempAlias}. - * and generate new Aliases for subquerySelectAliasedNodes, - * @param subquerySelectAliasedNodes - * @param addSelectOne - * @return - */ - private List createNewAliasedNodes(List subquerySelectAliasedNodes) { - List newAliasedNodes = Lists. newArrayListWithExpectedSize( - subquerySelectAliasedNodes.size()); + if (!isNonGroupByAggregate) { + rhsNode = NODE_FACTORY.function(DistinctValueAggregateFunction.NAME, + Collections.singletonList(rhsNode)); + } - this.addNewAliasedNodes(newAliasedNodes, subquerySelectAliasedNodes); - return newAliasedNodes; + List additionalSelectNodes = + conditionExtractor.getAdditionalSubselectSelectAliasedNodes(); + List selectNodes = + Lists.newArrayListWithExpectedSize(additionalSelectNodes.size() + 1); + selectNodes.add(NODE_FACTORY.aliasedNode(ParseNodeFactory.createTempAlias(), rhsNode)); + selectNodes.addAll(additionalSelectNodes); + List groupbyNodes = Lists.newArrayListWithExpectedSize(additionalSelectNodes.size()); + for (AliasedNode aliasedNode : additionalSelectNodes) { + groupbyNodes.add(aliasedNode.getNode()); } - /** - * Add every {@link ParseNode} in oldSelectAliasedNodes to newSelectAliasedNodes and generate new aliases by - * {@link ParseNodeFactory#createTempAlias}. - * @param oldSelectAliasedNodes - * @param addSelectOne - * @return - */ - private void addNewAliasedNodes(List newSelectAliasedNodes, List oldSelectAliasedNodes) { - for (int index = 0; index < oldSelectAliasedNodes.size(); index++) { - AliasedNode oldSelectAliasedNode = oldSelectAliasedNodes.get(index); - newSelectAliasedNodes.add(NODE_FACTORY.aliasedNode( - ParseNodeFactory.createTempAlias(), - oldSelectAliasedNode.getNode())); - } - + if (derivedTableAlias == null) { + subquery = NODE_FACTORY.select(subquery, false, selectNodes, where, groupbyNodes, true); + } else { + List derivedTableGroupBy = + Lists.newArrayListWithExpectedSize(subquery.getGroupBy().size() + groupbyNodes.size()); + derivedTableGroupBy.addAll(groupbyNodes); + derivedTableGroupBy.addAll(subquery.getGroupBy()); + List derivedTableSelect = + Lists.newArrayListWithExpectedSize(aliasedNodes.size() + selectNodes.size() - 1); + derivedTableSelect.addAll(aliasedNodes); + for (int i = 1; i < selectNodes.size(); i++) { + AliasedNode aliasedNode = selectNodes.get(i); + String alias = ParseNodeFactory.createTempAlias(); + derivedTableSelect.add(NODE_FACTORY.aliasedNode(alias, aliasedNode.getNode())); + aliasedNode = NODE_FACTORY.aliasedNode(aliasedNode.getAlias(), + NODE_FACTORY.column(NODE_FACTORY.table(null, derivedTableAlias), alias, null)); + selectNodes.set(i, aliasedNode); + groupbyNodes.set(i - 1, aliasedNode.getNode()); + } + SelectStatement derivedTableStmt = NODE_FACTORY.select(subquery, subquery.isDistinct(), + derivedTableSelect, where, derivedTableGroupBy, true); + subquery = NODE_FACTORY.select(NODE_FACTORY.derivedTable(derivedTableAlias, derivedTableStmt), + subquery.getHint(), false, selectNodes, null, groupbyNodes, null, + Collections. emptyList(), null, null, subquery.getBindCount(), true, false, + Collections. emptyList(), subquery.getUdfParseNodes()); } - /** - * Get the join conditions in order to rewrite InSubquery to Join. - * @param lhsParseNode - * @param rhsSubquerySelectAliasedNodes the first element is {@link LiteralParseNode#ONE}. - * @param rhsSubqueryTableAlias - * @param extractedJoinConditionParseNode For NonCorrelated subquery, it is null. - * @param extractedSelectAliasNodeCount For NonCorrelated subquery, it is 0. - * @throws SQLException - */ - private ParseNode getJoinConditionNodeForInSubquery( - ParseNode lhsParseNode, - List rhsSubquerySelectAliasedNodes, - String rhsSubqueryTableAlias, - ParseNode extractedJoinConditionParseNode, - int extractedSelectAliasNodeCount) throws SQLException { - List lhsParseNodes; - if (lhsParseNode instanceof RowValueConstructorParseNode) { - lhsParseNodes = ((RowValueConstructorParseNode) lhsParseNode).getChildren(); - } else { - lhsParseNodes = Collections.singletonList(lhsParseNode); - } + ParseNode onNode = conditionExtractor.getJoinConditionParseNode(); + TableNode rhsTable = NODE_FACTORY.derivedTable(rhsTableAlias, subquery); + JoinType joinType = isTopNode ? JoinType.Inner : JoinType.Left; + tableNode = NODE_FACTORY.join(joinType, tableNode, rhsTable, onNode, false); - if (lhsParseNodes.size() != - (rhsSubquerySelectAliasedNodes.size() - 1 - extractedSelectAliasNodeCount)) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.SUBQUERY_RETURNS_DIFFERENT_NUMBER_OF_FIELDS) - .build().buildException(); - } + firstChild = NODE_FACTORY.column(NODE_FACTORY.table(null, rhsTableAlias), + selectNodes.get(0).getAlias(), null); + if (isNonGroupByAggregate) { + firstChild = NODE_FACTORY.upsertStmtArrayNode(Collections.singletonList(firstChild)); + } + ComparisonParseNode secondChild = (ComparisonParseNode) l.get(1); + secondChild = NODE_FACTORY.comparison(secondChild.getFilterOp(), secondChild.getLHS(), + NODE_FACTORY.elementRef(Lists.newArrayList(firstChild, NODE_FACTORY.literal(1)))); + + return Lists.newArrayList(firstChild, secondChild); + } + + private SelectStatement fixSubqueryStatement(SelectStatement select) { + if (!select.isUnion()) return select; + + // Wrap as a derived table. + return NODE_FACTORY.select( + NODE_FACTORY.derivedTable(ParseNodeFactory.createTempAlias(), select), + HintNode.EMPTY_HINT_NODE, false, select.getSelect(), null, null, null, null, null, null, + select.getBindCount(), false, false, Collections. emptyList(), + select.getUdfParseNodes()); + } + + /** + * Create new {@link AliasedNode}s by every {@link ParseNode} in subquerySelectAliasedNodes and + * generate new aliases by {@link ParseNodeFactory#createTempAlias}. and generate new Aliases for + * subquerySelectAliasedNodes, + */ + private List createNewAliasedNodes(List subquerySelectAliasedNodes) { + List newAliasedNodes = + Lists. newArrayListWithExpectedSize(subquerySelectAliasedNodes.size()); + + this.addNewAliasedNodes(newAliasedNodes, subquerySelectAliasedNodes); + return newAliasedNodes; + } + + /** + * Add every {@link ParseNode} in oldSelectAliasedNodes to newSelectAliasedNodes and generate new + * aliases by {@link ParseNodeFactory#createTempAlias}. + */ + private void addNewAliasedNodes(List newSelectAliasedNodes, + List oldSelectAliasedNodes) { + for (int index = 0; index < oldSelectAliasedNodes.size(); index++) { + AliasedNode oldSelectAliasedNode = oldSelectAliasedNodes.get(index); + newSelectAliasedNodes.add(NODE_FACTORY.aliasedNode(ParseNodeFactory.createTempAlias(), + oldSelectAliasedNode.getNode())); + } - int count = lhsParseNodes.size(); - TableName rhsSubqueryTableName = NODE_FACTORY.table(null, rhsSubqueryTableAlias); - List joinEqualParseNodes = Lists.newArrayListWithExpectedSize( - count + (extractedJoinConditionParseNode == null ? 0: 1)); - for (int index = 0; index < count; index++) { - /** - * The +1 is to skip the first {@link LiteralParseNode#ONE} - */ - ParseNode rhsNode = NODE_FACTORY.column( - rhsSubqueryTableName, - rhsSubquerySelectAliasedNodes.get(index + 1).getAlias(), - null); - joinEqualParseNodes.add(NODE_FACTORY.equal(lhsParseNodes.get(index), rhsNode)); - } + } + + /** + * Get the join conditions in order to rewrite InSubquery to Join. + * @param rhsSubquerySelectAliasedNodes the first element is {@link LiteralParseNode#ONE}. + * @param extractedJoinConditionParseNode For NonCorrelated subquery, it is null. + * @param extractedSelectAliasNodeCount For NonCorrelated subquery, it is 0. + */ + private ParseNode getJoinConditionNodeForInSubquery(ParseNode lhsParseNode, + List rhsSubquerySelectAliasedNodes, String rhsSubqueryTableAlias, + ParseNode extractedJoinConditionParseNode, int extractedSelectAliasNodeCount) + throws SQLException { + List lhsParseNodes; + if (lhsParseNode instanceof RowValueConstructorParseNode) { + lhsParseNodes = ((RowValueConstructorParseNode) lhsParseNode).getChildren(); + } else { + lhsParseNodes = Collections.singletonList(lhsParseNode); + } - if(extractedJoinConditionParseNode != null) { - joinEqualParseNodes.add(extractedJoinConditionParseNode); - } + if ( + lhsParseNodes.size() + != (rhsSubquerySelectAliasedNodes.size() - 1 - extractedSelectAliasNodeCount) + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.SUBQUERY_RETURNS_DIFFERENT_NUMBER_OF_FIELDS).build().buildException(); + } - return joinEqualParseNodes.size() == 1 ? joinEqualParseNodes.get(0) : NODE_FACTORY.and(joinEqualParseNodes); + int count = lhsParseNodes.size(); + TableName rhsSubqueryTableName = NODE_FACTORY.table(null, rhsSubqueryTableAlias); + List joinEqualParseNodes = + Lists.newArrayListWithExpectedSize(count + (extractedJoinConditionParseNode == null ? 0 : 1)); + for (int index = 0; index < count; index++) { + /** + * The +1 is to skip the first {@link LiteralParseNode#ONE} + */ + ParseNode rhsNode = NODE_FACTORY.column(rhsSubqueryTableName, + rhsSubquerySelectAliasedNodes.get(index + 1).getAlias(), null); + joinEqualParseNodes.add(NODE_FACTORY.equal(lhsParseNodes.get(index), rhsNode)); } - - /** - * Combine every {@link ParseNode} in extractedAdditionalSelectAliasNodes and GroupBy clause of the - * subquerySelectStatementToUse to get new GroupBy ParseNodes. - * @param extractedAdditionalSelectAliasNodes - * @param subquerySelectStatementToUse - * @return - */ - private List createNewGroupByParseNodes( - List extractedAdditionalSelectAliasNodes, - SelectStatement subquerySelectStatementToUse) { - List newGroupByParseNodes = Lists.newArrayListWithExpectedSize( - extractedAdditionalSelectAliasNodes.size() + - subquerySelectStatementToUse.getGroupBy().size()); - - for (AliasedNode aliasedNode : extractedAdditionalSelectAliasNodes) { - newGroupByParseNodes.add(aliasedNode.getNode()); - } - newGroupByParseNodes.addAll(subquerySelectStatementToUse.getGroupBy()); - return newGroupByParseNodes; - } - - private static class JoinConditionExtractor extends AndRewriterBooleanParseNodeVisitor { - private final TableName tableName; - private ColumnResolveVisitor columnResolveVisitor; - private List additionalSubselectSelectAliasedNodes; - private List joinConditionParseNodes; - - public JoinConditionExtractor(SelectStatement subquery, ColumnResolver outerResolver, - PhoenixConnection connection, String tableAlias) throws SQLException { - super(NODE_FACTORY); - this.tableName = NODE_FACTORY.table(null, tableAlias); - ColumnResolver localResolver = FromCompiler.getResolverForQuery(subquery, connection); - this.columnResolveVisitor = new ColumnResolveVisitor(localResolver, outerResolver); - this.additionalSubselectSelectAliasedNodes = Lists.newArrayList(); - this.joinConditionParseNodes = Lists.newArrayList(); - } - - public List getAdditionalSubselectSelectAliasedNodes() { - return this.additionalSubselectSelectAliasedNodes; - } - - public ParseNode getJoinConditionParseNode() { - if (this.joinConditionParseNodes.isEmpty()) { - return null; - } - if (this.joinConditionParseNodes.size() == 1) { - return this.joinConditionParseNodes.get(0); - } + if (extractedJoinConditionParseNode != null) { + joinEqualParseNodes.add(extractedJoinConditionParseNode); + } - return NODE_FACTORY.and(this.joinConditionParseNodes); - } + return joinEqualParseNodes.size() == 1 + ? joinEqualParseNodes.get(0) + : NODE_FACTORY.and(joinEqualParseNodes); + } + + /** + * Combine every {@link ParseNode} in extractedAdditionalSelectAliasNodes and GroupBy clause of + * the subquerySelectStatementToUse to get new GroupBy ParseNodes. + */ + private List createNewGroupByParseNodes( + List extractedAdditionalSelectAliasNodes, + SelectStatement subquerySelectStatementToUse) { + List newGroupByParseNodes = + Lists.newArrayListWithExpectedSize(extractedAdditionalSelectAliasNodes.size() + + subquerySelectStatementToUse.getGroupBy().size()); + + for (AliasedNode aliasedNode : extractedAdditionalSelectAliasNodes) { + newGroupByParseNodes.add(aliasedNode.getNode()); + } + newGroupByParseNodes.addAll(subquerySelectStatementToUse.getGroupBy()); + return newGroupByParseNodes; + } + + private static class JoinConditionExtractor extends AndRewriterBooleanParseNodeVisitor { + private final TableName tableName; + private ColumnResolveVisitor columnResolveVisitor; + private List additionalSubselectSelectAliasedNodes; + private List joinConditionParseNodes; + + public JoinConditionExtractor(SelectStatement subquery, ColumnResolver outerResolver, + PhoenixConnection connection, String tableAlias) throws SQLException { + super(NODE_FACTORY); + this.tableName = NODE_FACTORY.table(null, tableAlias); + ColumnResolver localResolver = FromCompiler.getResolverForQuery(subquery, connection); + this.columnResolveVisitor = new ColumnResolveVisitor(localResolver, outerResolver); + this.additionalSubselectSelectAliasedNodes = Lists. newArrayList(); + this.joinConditionParseNodes = Lists. newArrayList(); + } - @Override - protected ParseNode leaveBooleanNode(ParseNode node, List l) - throws SQLException { - columnResolveVisitor.reset(); - node.accept(columnResolveVisitor); - ColumnResolveVisitor.ColumnResolveType type = columnResolveVisitor.getColumnResolveType(); - if (type != ColumnResolveVisitor.ColumnResolveType.NONE - && type != ColumnResolveVisitor.ColumnResolveType.LOCAL) - throw new SQLFeatureNotSupportedException("Does not support non-standard or non-equi correlated-subquery conditions."); - - return node; - } + public List getAdditionalSubselectSelectAliasedNodes() { + return this.additionalSubselectSelectAliasedNodes; + } - @Override - public ParseNode visitLeave(ComparisonParseNode node, List l) throws SQLException { - if (node.getFilterOp() != CompareOperator.EQUAL) - return leaveBooleanNode(node, l); - - columnResolveVisitor.reset(); - node.getLHS().accept(columnResolveVisitor); - ColumnResolveVisitor.ColumnResolveType lhsType = columnResolveVisitor.getColumnResolveType(); - columnResolveVisitor.reset(); - node.getRHS().accept(columnResolveVisitor); - ColumnResolveVisitor.ColumnResolveType rhsType = columnResolveVisitor.getColumnResolveType(); - if ((lhsType == ColumnResolveVisitor.ColumnResolveType.NONE || lhsType == ColumnResolveVisitor.ColumnResolveType.LOCAL) - && (rhsType == ColumnResolveVisitor.ColumnResolveType.NONE || rhsType == ColumnResolveVisitor.ColumnResolveType.LOCAL)) { - return node; - } - if (lhsType == ColumnResolveVisitor.ColumnResolveType.LOCAL && rhsType == ColumnResolveVisitor.ColumnResolveType.OUTER) { - String alias = ParseNodeFactory.createTempAlias(); - this.additionalSubselectSelectAliasedNodes.add( - NODE_FACTORY.aliasedNode(alias, node.getLHS())); - ParseNode lhsNode = NODE_FACTORY.column(tableName, alias, null); - this.joinConditionParseNodes.add(NODE_FACTORY.equal(lhsNode, node.getRHS())); - return null; - } - if (lhsType == ColumnResolveVisitor.ColumnResolveType.OUTER && rhsType == ColumnResolveVisitor.ColumnResolveType.LOCAL) { - String alias = ParseNodeFactory.createTempAlias(); - this.additionalSubselectSelectAliasedNodes.add( - NODE_FACTORY.aliasedNode(alias, node.getRHS())); - ParseNode rhsNode = NODE_FACTORY.column(tableName, alias, null); - this.joinConditionParseNodes.add(NODE_FACTORY.equal(node.getLHS(), rhsNode)); - return null; - } - - throw new SQLFeatureNotSupportedException("Does not support non-standard or non-equi correlated-subquery conditions."); - } + public ParseNode getJoinConditionParseNode() { + if (this.joinConditionParseNodes.isEmpty()) { + return null; + } + + if (this.joinConditionParseNodes.size() == 1) { + return this.joinConditionParseNodes.get(0); + } + + return NODE_FACTORY.and(this.joinConditionParseNodes); } - /* - * Class for resolving inner query column references - */ - private static class ColumnResolveVisitor extends StatelessTraverseAllParseNodeVisitor { - public enum ColumnResolveType {NONE, LOCAL, OUTER, MIXED}; + @Override + protected ParseNode leaveBooleanNode(ParseNode node, List l) throws SQLException { + columnResolveVisitor.reset(); + node.accept(columnResolveVisitor); + ColumnResolveVisitor.ColumnResolveType type = columnResolveVisitor.getColumnResolveType(); + if ( + type != ColumnResolveVisitor.ColumnResolveType.NONE + && type != ColumnResolveVisitor.ColumnResolveType.LOCAL + ) throw new SQLFeatureNotSupportedException( + "Does not support non-standard or non-equi correlated-subquery conditions."); + + return node; + } + + @Override + public ParseNode visitLeave(ComparisonParseNode node, List l) throws SQLException { + if (node.getFilterOp() != CompareOperator.EQUAL) return leaveBooleanNode(node, l); + + columnResolveVisitor.reset(); + node.getLHS().accept(columnResolveVisitor); + ColumnResolveVisitor.ColumnResolveType lhsType = columnResolveVisitor.getColumnResolveType(); + columnResolveVisitor.reset(); + node.getRHS().accept(columnResolveVisitor); + ColumnResolveVisitor.ColumnResolveType rhsType = columnResolveVisitor.getColumnResolveType(); + if ( + (lhsType == ColumnResolveVisitor.ColumnResolveType.NONE + || lhsType == ColumnResolveVisitor.ColumnResolveType.LOCAL) + && (rhsType == ColumnResolveVisitor.ColumnResolveType.NONE + || rhsType == ColumnResolveVisitor.ColumnResolveType.LOCAL) + ) { + return node; + } + if ( + lhsType == ColumnResolveVisitor.ColumnResolveType.LOCAL + && rhsType == ColumnResolveVisitor.ColumnResolveType.OUTER + ) { + String alias = ParseNodeFactory.createTempAlias(); + this.additionalSubselectSelectAliasedNodes + .add(NODE_FACTORY.aliasedNode(alias, node.getLHS())); + ParseNode lhsNode = NODE_FACTORY.column(tableName, alias, null); + this.joinConditionParseNodes.add(NODE_FACTORY.equal(lhsNode, node.getRHS())); + return null; + } + if ( + lhsType == ColumnResolveVisitor.ColumnResolveType.OUTER + && rhsType == ColumnResolveVisitor.ColumnResolveType.LOCAL + ) { + String alias = ParseNodeFactory.createTempAlias(); + this.additionalSubselectSelectAliasedNodes + .add(NODE_FACTORY.aliasedNode(alias, node.getRHS())); + ParseNode rhsNode = NODE_FACTORY.column(tableName, alias, null); + this.joinConditionParseNodes.add(NODE_FACTORY.equal(node.getLHS(), rhsNode)); + return null; + } + + throw new SQLFeatureNotSupportedException( + "Does not support non-standard or non-equi correlated-subquery conditions."); + } + } + + /* + * Class for resolving inner query column references + */ + private static class ColumnResolveVisitor extends StatelessTraverseAllParseNodeVisitor { + public enum ColumnResolveType { + NONE, + LOCAL, + OUTER, + MIXED + }; + + private final ColumnResolver localResolver; + private final ColumnResolver outerResolver; + private ColumnResolveType type; + + public ColumnResolveVisitor(ColumnResolver localResolver, ColumnResolver outerResolver) { + this.localResolver = localResolver; + this.outerResolver = outerResolver; + this.type = ColumnResolveType.NONE; + } - private final ColumnResolver localResolver; - private final ColumnResolver outerResolver; - private ColumnResolveType type; + public void reset() { + this.type = ColumnResolveType.NONE; + } - public ColumnResolveVisitor(ColumnResolver localResolver, ColumnResolver outerResolver) { - this.localResolver = localResolver; - this.outerResolver = outerResolver; - this.type = ColumnResolveType.NONE; - } + public ColumnResolveType getColumnResolveType() { + return this.type; + } - public void reset() { - this.type = ColumnResolveType.NONE; - } - - public ColumnResolveType getColumnResolveType() { - return this.type; - } + @Override + public Void visit(ColumnParseNode node) throws SQLException { + // Inner query column definitions should shade those of outer query. + try { + localResolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); + addType(true); + return null; + } catch (TableNotFoundException e) { + } catch (ColumnNotFoundException e) { + } catch (ColumnFamilyNotFoundException e) { + } + + outerResolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); + addType(false); + return null; + } - @Override - public Void visit(ColumnParseNode node) throws SQLException { - // Inner query column definitions should shade those of outer query. - try { - localResolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); - addType(true); - return null; - } catch (TableNotFoundException e) { - } catch (ColumnNotFoundException e) { - } catch (ColumnFamilyNotFoundException e) { - } - - outerResolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); - addType(false); - return null; - } - - private void addType(boolean isLocal) { - switch (this.type) { - case NONE: - this.type = isLocal ? ColumnResolveType.LOCAL : ColumnResolveType.OUTER; - break; - case LOCAL: - this.type = isLocal ? ColumnResolveType.LOCAL : ColumnResolveType.MIXED; - break; - case OUTER: - this.type = isLocal ? ColumnResolveType.MIXED : ColumnResolveType.OUTER; - break; - default: // MIXED do nothing - break; - } - } + private void addType(boolean isLocal) { + switch (this.type) { + case NONE: + this.type = isLocal ? ColumnResolveType.LOCAL : ColumnResolveType.OUTER; + break; + case LOCAL: + this.type = isLocal ? ColumnResolveType.LOCAL : ColumnResolveType.MIXED; + break; + case OUTER: + this.type = isLocal ? ColumnResolveType.MIXED : ColumnResolveType.OUTER; + break; + default: // MIXED do nothing + break; + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java index 93dadae7505..a183e2d1d6d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/SubselectRewriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compile; import java.sql.SQLException; @@ -45,559 +44,522 @@ import org.apache.phoenix.parse.TableNode; import org.apache.phoenix.parse.TableWildcardParseNode; import org.apache.phoenix.parse.WildcardParseNode; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ParseNodeUtil; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /* * Class for flattening derived-tables when possible. A derived-table can be * flattened if the merged statement preserves the same semantics as the original * statement. */ public class SubselectRewriter extends ParseNodeRewriter { - private final String tableAlias; - private final Map aliasMap; - private boolean removeAlias = false; + private final String tableAlias; + private final Map aliasMap; + private boolean removeAlias = false; + + /** + * Add the preFilterParseNodes to Where statement or Having statement of the subselectStatement, + * depending on whether having GroupBy statement. Note: the preFilterParseNodes parameter must + * have already been rewritten by {@link #rewritePreFilterForSubselect}. + */ + public static SelectStatement applyPreFiltersForSubselect(SelectStatement subselectStatement, + List preFilterParseNodes, String subselectAlias) { + + if (preFilterParseNodes.isEmpty()) { + return subselectStatement; + } - /** - * Add the preFilterParseNodes to Where statement or Having statement of the subselectStatement, - * depending on whether having GroupBy statement. - * Note: the preFilterParseNodes parameter must have already been rewritten by {@link #rewritePreFilterForSubselect}. - * @param subselectStatement - * @param preFilterParseNodes - * @param subselectAlias - * @return - */ - public static SelectStatement applyPreFiltersForSubselect( - SelectStatement subselectStatement, - List preFilterParseNodes, - String subselectAlias) { + assert (isFilterCanPushDownToSelect(subselectStatement)); - if (preFilterParseNodes.isEmpty()) { - return subselectStatement; - } + List newFilterParseNodes = Lists. newArrayList(preFilterParseNodes); + if (subselectStatement.getGroupBy().isEmpty()) { + ParseNode where = subselectStatement.getWhere(); + if (where != null) { + newFilterParseNodes.add(where); + } + return NODE_FACTORY.select(subselectStatement, combine(newFilterParseNodes)); + } + + ParseNode having = subselectStatement.getHaving(); + if (having != null) { + newFilterParseNodes.add(having); + } + return NODE_FACTORY.select(subselectStatement, subselectStatement.getWhere(), + combine(newFilterParseNodes)); + } + + public static ParseNode rewritePreFilterForSubselect(ParseNode preFilterParseNode, + SelectStatement subselectStatement, String subselectAlias) throws SQLException { + SubselectRewriter subselectRewriter = + new SubselectRewriter(null, subselectStatement.getSelect(), subselectAlias); + return preFilterParseNode.accept(subselectRewriter); + } + + /** + * Check if a filter can push down to the statement as a preFilter, if true, the filter can be + * rewritten by {@link #rewritePreFilterForSubselect} and added to the statement by + * {@link #applyPreFiltersForSubselect}. + */ + public static boolean isFilterCanPushDownToSelect(SelectStatement statement) { + return statement.getLimit() == null + && (!statement.isAggregate() || !statement.getGroupBy().isEmpty()); + } + + /** + *
+   * Only append orderByNodes and postFilters, the optimization is left to {@link #flatten(SelectStatement, SelectStatement)}.
+   * an example :
+   * when the subselectStatment is : (SELECT reverse(loc_id), \"supplier_id\", name FROM " + JOIN_SUPPLIER_TABLE + " LIMIT 5) AS supp
+   * orderByNodes is  : supp.\"supplier_id\"
+   * postFilterParseNodes is : supp.name != 'S1'
+   * we rewrite the subselectStatment as :
+   * (SELECT $2.$3,$2."supplier_id",$2.NAME FROM (SELECT  REVERSE(LOC_ID) $3,"supplier_id",NAME FROM SUPPLIERTABLE  LIMIT 5) $2 WHERE $2.NAME != 'S1' ORDER BY $2."supplier_id") AS supp
+   * 
+ */ + public static SelectStatement applyOrderByAndPostFilters(SelectStatement subselectStatement, + List orderByNodes, String subselectTableAliasName, + List postFilterParseNodes) throws SQLException { + + if (orderByNodes == null) { + orderByNodes = Collections.emptyList(); + } + + if (postFilterParseNodes == null) { + postFilterParseNodes = Collections.emptyList(); + } - assert(isFilterCanPushDownToSelect(subselectStatement)); + if (orderByNodes.isEmpty() && postFilterParseNodes.isEmpty()) { + return subselectStatement; + } - List newFilterParseNodes = Lists. newArrayList(preFilterParseNodes); - if (subselectStatement.getGroupBy().isEmpty()) { - ParseNode where = subselectStatement.getWhere(); - if (where != null) { - newFilterParseNodes.add(where); - } - return NODE_FACTORY.select(subselectStatement, combine(newFilterParseNodes)); + List subselectAliasedNodes = subselectStatement.getSelect(); + List newOuterSelectAliasedNodes = + new ArrayList(subselectAliasedNodes.size()); + Map subselectAliasFullNameToNewColumnParseNode = + new HashMap(); + + String newSubselectTableAliasName = ParseNodeFactory.createTempAlias(); + List newSubselectAliasedNodes = null; + int index = 0; + for (AliasedNode subselectAliasedNode : subselectAliasedNodes) { + String aliasName = subselectAliasedNode.getAlias(); + ParseNode aliasParseNode = subselectAliasedNode.getNode(); + if (aliasName == null) { + aliasName = aliasParseNode.getAlias(); + } + if (aliasName == null) { + // if there is no alias,we generate a new alias, + // and added the new alias to the old subselectAliasedNodes + aliasName = ParseNodeFactory.createTempAlias(); + if (newSubselectAliasedNodes == null) { + newSubselectAliasedNodes = new ArrayList(subselectAliasedNodes.size()); + if (index > 0) { + newSubselectAliasedNodes.addAll(subselectAliasedNodes.subList(0, index)); + } } - - ParseNode having = subselectStatement.getHaving(); - if (having != null) { - newFilterParseNodes.add(having); + newSubselectAliasedNodes.add(NODE_FACTORY.aliasedNode(aliasName, aliasParseNode)); + } else { + if (newSubselectAliasedNodes != null) { + newSubselectAliasedNodes.add(subselectAliasedNode); } - return NODE_FACTORY.select(subselectStatement, subselectStatement.getWhere(), combine(newFilterParseNodes)); + } + + ColumnParseNode newColumnParseNode = NODE_FACTORY + .column(NODE_FACTORY.table(null, newSubselectTableAliasName), aliasName, aliasName); + subselectAliasFullNameToNewColumnParseNode.put(SchemaUtil.getColumnName( + subselectTableAliasName, SchemaUtil.normalizeIdentifier(aliasName)), newColumnParseNode); + /** + * The alias of AliasedNode is set to the same as newColumnParseNode, so when the rewritten + * selectStatement is flattened by {@link SubselectRewriter#flatten} later,the + * {@link AliasedNode#getAlias} could remain the same even if the {@link AliasedNode#getNode} + * is rewritten by {@link SubselectRewriter#flatten}. + */ + AliasedNode newOuterSelectAliasNode = NODE_FACTORY.aliasedNode(aliasName, newColumnParseNode); + newOuterSelectAliasedNodes.add(newOuterSelectAliasNode); + index++; } - public static ParseNode rewritePreFilterForSubselect(ParseNode preFilterParseNode, SelectStatement subselectStatement, String subselectAlias) throws SQLException { - SubselectRewriter subselectRewriter = new SubselectRewriter( - null, - subselectStatement.getSelect(), - subselectAlias); - return preFilterParseNode.accept(subselectRewriter); + SubselectRewriter subselectRewriter = + new SubselectRewriter(subselectAliasFullNameToNewColumnParseNode); + List rewrittenOrderByNodes = null; + if (orderByNodes.size() > 0) { + rewrittenOrderByNodes = new ArrayList(orderByNodes.size()); + for (OrderByNode orderByNode : orderByNodes) { + ParseNode parseNode = orderByNode.getNode(); + rewrittenOrderByNodes.add(NODE_FACTORY.orderBy(parseNode.accept(subselectRewriter), + orderByNode.isNullsLast(), orderByNode.isAscending())); + } } - /** - * Check if a filter can push down to the statement as a preFilter, - * if true, the filter can be rewritten by {@link #rewritePreFilterForSubselect} and - * added to the statement by {@link #applyPreFiltersForSubselect}. - * @param statement - * @return - */ - public static boolean isFilterCanPushDownToSelect(SelectStatement statement) { - return statement.getLimit() == null && - (!statement.isAggregate() || !statement.getGroupBy().isEmpty()); - } - - /** - *
-     * Only append orderByNodes and postFilters, the optimization is left to {@link #flatten(SelectStatement, SelectStatement)}.
-     * an example :
-     * when the subselectStatment is : (SELECT reverse(loc_id), \"supplier_id\", name FROM " + JOIN_SUPPLIER_TABLE + " LIMIT 5) AS supp
-     * orderByNodes is  : supp.\"supplier_id\"
-     * postFilterParseNodes is : supp.name != 'S1'
-     * we rewrite the subselectStatment as :
-     * (SELECT $2.$3,$2."supplier_id",$2.NAME FROM (SELECT  REVERSE(LOC_ID) $3,"supplier_id",NAME FROM SUPPLIERTABLE  LIMIT 5) $2 WHERE $2.NAME != 'S1' ORDER BY $2."supplier_id") AS supp
-     *
-     * 
- * @param subselectStatement - * @param orderByNodes - * @param subselectTableAliasName - * @param postFilterParseNodes - * @return - * @throws SQLException - */ - public static SelectStatement applyOrderByAndPostFilters( - SelectStatement subselectStatement, - List orderByNodes, - String subselectTableAliasName, - List postFilterParseNodes) throws SQLException { - - if(orderByNodes == null) { - orderByNodes = Collections.emptyList(); - } - - if(postFilterParseNodes == null) { - postFilterParseNodes = Collections.emptyList(); - } - - if(orderByNodes.isEmpty() && postFilterParseNodes.isEmpty()) { - return subselectStatement; - } - - List subselectAliasedNodes = subselectStatement.getSelect(); - List newOuterSelectAliasedNodes = new ArrayList(subselectAliasedNodes.size()); - Map subselectAliasFullNameToNewColumnParseNode = new HashMap(); - - String newSubselectTableAliasName = ParseNodeFactory.createTempAlias(); - List newSubselectAliasedNodes = null; - int index = 0; - for (AliasedNode subselectAliasedNode : subselectAliasedNodes) { - String aliasName = subselectAliasedNode.getAlias(); - ParseNode aliasParseNode = subselectAliasedNode.getNode(); - if (aliasName == null) { - aliasName = aliasParseNode.getAlias(); - } - if(aliasName == null) { - //if there is no alias,we generate a new alias, - //and added the new alias to the old subselectAliasedNodes - aliasName = ParseNodeFactory.createTempAlias(); - if(newSubselectAliasedNodes == null) { - newSubselectAliasedNodes = new ArrayList(subselectAliasedNodes.size()); - if(index > 0) { - newSubselectAliasedNodes.addAll(subselectAliasedNodes.subList(0, index)); - } - } - newSubselectAliasedNodes.add(NODE_FACTORY.aliasedNode(aliasName, aliasParseNode)); - } else { - if(newSubselectAliasedNodes != null) { - newSubselectAliasedNodes.add(subselectAliasedNode); - } - } - - ColumnParseNode newColumnParseNode = NODE_FACTORY.column( - NODE_FACTORY.table(null, newSubselectTableAliasName), - aliasName, - aliasName); - subselectAliasFullNameToNewColumnParseNode.put( - SchemaUtil.getColumnName(subselectTableAliasName, SchemaUtil.normalizeIdentifier(aliasName)), - newColumnParseNode); - /** - * The alias of AliasedNode is set to the same as newColumnParseNode, so when the rewritten - * selectStatement is flattened by {@link SubselectRewriter#flatten} later,the {@link AliasedNode#getAlias} - * could remain the same even if the {@link AliasedNode#getNode} is rewritten by {@link SubselectRewriter#flatten}. - */ - AliasedNode newOuterSelectAliasNode = NODE_FACTORY.aliasedNode(aliasName, newColumnParseNode); - newOuterSelectAliasedNodes.add(newOuterSelectAliasNode); - index++; - } - - SubselectRewriter subselectRewriter = new SubselectRewriter(subselectAliasFullNameToNewColumnParseNode); - List rewrittenOrderByNodes = null; - if(orderByNodes.size() > 0) { - rewrittenOrderByNodes = new ArrayList(orderByNodes.size()); - for (OrderByNode orderByNode : orderByNodes) { - ParseNode parseNode = orderByNode.getNode(); - rewrittenOrderByNodes.add(NODE_FACTORY.orderBy( - parseNode.accept(subselectRewriter), - orderByNode.isNullsLast(), - orderByNode.isAscending())); - } - } - - ParseNode newWhereParseNode = null; - if(postFilterParseNodes.size() > 0) { - List rewrittenPostFilterParseNodes = - new ArrayList(postFilterParseNodes.size()); - for(ParseNode postFilterParseNode : postFilterParseNodes) { - rewrittenPostFilterParseNodes.add(postFilterParseNode.accept(subselectRewriter)); - } - newWhereParseNode = combine(rewrittenPostFilterParseNodes); - } - - SelectStatement subselectStatementToUse = subselectStatement; - if(newSubselectAliasedNodes != null) { - subselectStatementToUse = NODE_FACTORY.select(subselectStatement, subselectStatement.isDistinct(), newSubselectAliasedNodes); - } - - return NODE_FACTORY.select( - NODE_FACTORY.derivedTable(newSubselectTableAliasName, subselectStatementToUse), - HintNode.EMPTY_HINT_NODE, - false, - newOuterSelectAliasedNodes, - newWhereParseNode, - null, - null, - rewrittenOrderByNodes, - null, - null, - 0, - false, - subselectStatementToUse.hasSequence(), - Collections. emptyList(), - subselectStatementToUse.getUdfParseNodes()); + ParseNode newWhereParseNode = null; + if (postFilterParseNodes.size() > 0) { + List rewrittenPostFilterParseNodes = + new ArrayList(postFilterParseNodes.size()); + for (ParseNode postFilterParseNode : postFilterParseNodes) { + rewrittenPostFilterParseNodes.add(postFilterParseNode.accept(subselectRewriter)); + } + newWhereParseNode = combine(rewrittenPostFilterParseNodes); } - /** - * If the selectStatement has a DerivedTableNode, pruning column of the - * {@link DerivedTableNode#getSelect()}. - * @param selectStatement - * @param pheonixConnection - * @return - * @throws SQLException - */ - private static SelectStatement pruneInnerSubselectAliasedNodes( - SelectStatement selectStatement, - PhoenixConnection pheonixConnection) throws SQLException { - TableNode fromTableNode = selectStatement.getFrom(); - if (fromTableNode == null || !(fromTableNode instanceof DerivedTableNode)) { - return selectStatement; - } + SelectStatement subselectStatementToUse = subselectStatement; + if (newSubselectAliasedNodes != null) { + subselectStatementToUse = NODE_FACTORY.select(subselectStatement, + subselectStatement.isDistinct(), newSubselectAliasedNodes); + } - DerivedTableNode derivedTableNode = (DerivedTableNode) fromTableNode; - SelectStatement subSelectStatement = derivedTableNode.getSelect(); - if (subSelectStatement.isUnion()) { - return selectStatement; - } - Set referencedColumnNames = - ParseNodeUtil.collectReferencedColumnNamesForSingleTable(selectStatement); - SelectStatement newSubselectStatement = pruneSelectAliasedNodes(subSelectStatement, referencedColumnNames, pheonixConnection); - if(newSubselectStatement != subSelectStatement) { - return NODE_FACTORY.select( - selectStatement, - NODE_FACTORY.derivedTable(derivedTableNode.getAlias(), newSubselectStatement)); - } - return selectStatement; + return NODE_FACTORY.select( + NODE_FACTORY.derivedTable(newSubselectTableAliasName, subselectStatementToUse), + HintNode.EMPTY_HINT_NODE, false, newOuterSelectAliasedNodes, newWhereParseNode, null, null, + rewrittenOrderByNodes, null, null, 0, false, subselectStatementToUse.hasSequence(), + Collections. emptyList(), subselectStatementToUse.getUdfParseNodes()); + } + + /** + * If the selectStatement has a DerivedTableNode, pruning column of the + * {@link DerivedTableNode#getSelect()}. + */ + private static SelectStatement pruneInnerSubselectAliasedNodes(SelectStatement selectStatement, + PhoenixConnection pheonixConnection) throws SQLException { + TableNode fromTableNode = selectStatement.getFrom(); + if (fromTableNode == null || !(fromTableNode instanceof DerivedTableNode)) { + return selectStatement; } + DerivedTableNode derivedTableNode = (DerivedTableNode) fromTableNode; + SelectStatement subSelectStatement = derivedTableNode.getSelect(); + if (subSelectStatement.isUnion()) { + return selectStatement; + } + Set referencedColumnNames = + ParseNodeUtil.collectReferencedColumnNamesForSingleTable(selectStatement); + SelectStatement newSubselectStatement = + pruneSelectAliasedNodes(subSelectStatement, referencedColumnNames, pheonixConnection); + if (newSubselectStatement != subSelectStatement) { + return NODE_FACTORY.select(selectStatement, + NODE_FACTORY.derivedTable(derivedTableNode.getAlias(), newSubselectStatement)); + } + return selectStatement; + } + + /** + * Pruning selectAliasedNodes according to referencedColumnNames, Note: the selectStatement is + * supposed to be a {@link DerivedTableNode} of an Outer SelectStatement, so according to + * FromCompiler.MultiTableColumnResolver#visit(DerivedTableNode) , wildcard in selectAliasedNode + * is not supported. + */ + public static SelectStatement pruneSelectAliasedNodes(SelectStatement selectStatement, + Set referencedColumnNames, PhoenixConnection phoenixConnection) throws SQLException { + + if (referencedColumnNames == null || referencedColumnNames.isEmpty()) { + return selectStatement; + } + if (selectStatement.isDistinct()) { + return selectStatement; + } /** - * Pruning selectAliasedNodes according to referencedColumnNames, - * Note: the selectStatement is supposed to be a {@link DerivedTableNode} of an Outer SelectStatement, - * so according to FromCompiler.MultiTableColumnResolver#visit(DerivedTableNode) , - * wildcard in selectAliasedNode is not supported. - * @param selectStatement - * @param referencedColumnNames - * @param phoenixConnection - * @return - * @throws SQLException + * We must resolve the inner alias at first before column pruning, because the resolve may fail + * if the column is pruned. */ - public static SelectStatement pruneSelectAliasedNodes( - SelectStatement selectStatement, - Set referencedColumnNames, - PhoenixConnection phoenixConnection) throws SQLException { - - if(referencedColumnNames == null || referencedColumnNames.isEmpty()) { - return selectStatement; - } - if(selectStatement.isDistinct()) { - return selectStatement; - } + selectStatement = ParseNodeRewriter.resolveInternalAlias(selectStatement, phoenixConnection); + List selectAliasedNodes = selectStatement.getSelect(); + List newSelectAliasedNodes = new ArrayList(selectAliasedNodes.size()); + for (AliasedNode selectAliasedNode : selectAliasedNodes) { + String aliasName = selectAliasedNode.getAlias(); + ParseNode aliasParseNode = selectAliasedNode.getNode(); + if ( + aliasParseNode instanceof WildcardParseNode + || aliasParseNode instanceof TableWildcardParseNode + || aliasParseNode instanceof FamilyWildcardParseNode + ) { /** - * We must resolve the inner alias at first before column pruning, because the resolve may fail - * if the column is pruned. + * Wildcard in subselect is not supported. See also + * {@link FromCompiler.MultiTableColumnResolver#visit(DerivedTableNode)}. */ - selectStatement = ParseNodeRewriter.resolveInternalAlias(selectStatement, phoenixConnection); - List selectAliasedNodes = selectStatement.getSelect(); - List newSelectAliasedNodes = new ArrayList(selectAliasedNodes.size()); - for (AliasedNode selectAliasedNode : selectAliasedNodes) { - String aliasName = selectAliasedNode.getAlias(); - ParseNode aliasParseNode = selectAliasedNode.getNode(); - if (aliasParseNode instanceof WildcardParseNode || - aliasParseNode instanceof TableWildcardParseNode || - aliasParseNode instanceof FamilyWildcardParseNode) { - /** - * Wildcard in subselect is not supported. - * See also {@link FromCompiler.MultiTableColumnResolver#visit(DerivedTableNode)}. - */ - throw new SQLFeatureNotSupportedException("Wildcard in subqueries not supported."); - } - if (aliasName == null) { - aliasName = aliasParseNode.getAlias(); - } - if(aliasName != null) { - aliasName = SchemaUtil.normalizeIdentifier(aliasName); - if(referencedColumnNames.contains(aliasName)) { - newSelectAliasedNodes.add(selectAliasedNode); - } - } + throw new SQLFeatureNotSupportedException("Wildcard in subqueries not supported."); + } + if (aliasName == null) { + aliasName = aliasParseNode.getAlias(); + } + if (aliasName != null) { + aliasName = SchemaUtil.normalizeIdentifier(aliasName); + if (referencedColumnNames.contains(aliasName)) { + newSelectAliasedNodes.add(selectAliasedNode); } + } + } - if(newSelectAliasedNodes.isEmpty() || newSelectAliasedNodes.equals(selectAliasedNodes)) { - //if the newSelectAliasedNodes.isEmpty(), the outer select may be wildcard or constant, - //so remain the same. - return selectStatement; - } - return NODE_FACTORY.select( - selectStatement, - selectStatement.isDistinct(), - newSelectAliasedNodes); - } - - public static SelectStatement flatten(SelectStatement select, PhoenixConnection connection) throws SQLException { - TableNode from = select.getFrom(); - while (from != null && from instanceof DerivedTableNode) { - DerivedTableNode derivedTable = (DerivedTableNode) from; - SelectStatement subselect = derivedTable.getSelect(); - if (subselect.isUnion()) { - break; - } - ColumnResolver resolver = FromCompiler.getResolverForQuery(subselect, connection); - SubselectRewriter rewriter = new SubselectRewriter(resolver, subselect.getSelect(), derivedTable.getAlias()); - SelectStatement ret = rewriter.flatten(select, subselect); - if (ret == select) { - break; - } - select = ret; - from = select.getFrom(); - } - /** - * Pruning column for subselect after flatten. - */ - return pruneInnerSubselectAliasedNodes(select, connection); - } - - private SubselectRewriter(ColumnResolver resolver, List aliasedNodes, String tableAlias) { - super(resolver, aliasedNodes.size()); - this.tableAlias = tableAlias; - this.aliasMap = new HashMap(); - for (AliasedNode aliasedNode : aliasedNodes) { - String alias = aliasedNode.getAlias(); - ParseNode node = aliasedNode.getNode(); - if (alias == null) { - alias = SchemaUtil.normalizeIdentifier(node.getAlias()); - } - if (alias != null) { - aliasMap.put(SchemaUtil.getColumnName(tableAlias, alias), node); - } - } + if (newSelectAliasedNodes.isEmpty() || newSelectAliasedNodes.equals(selectAliasedNodes)) { + // if the newSelectAliasedNodes.isEmpty(), the outer select may be wildcard or constant, + // so remain the same. + return selectStatement; } - - private SubselectRewriter(Map selectAliasFullNameToAliasParseNode) { - super(null, selectAliasFullNameToAliasParseNode.size()); - this.tableAlias = null; - this.aliasMap = selectAliasFullNameToAliasParseNode; + return NODE_FACTORY.select(selectStatement, selectStatement.isDistinct(), + newSelectAliasedNodes); + } + + public static SelectStatement flatten(SelectStatement select, PhoenixConnection connection) + throws SQLException { + TableNode from = select.getFrom(); + while (from != null && from instanceof DerivedTableNode) { + DerivedTableNode derivedTable = (DerivedTableNode) from; + SelectStatement subselect = derivedTable.getSelect(); + if (subselect.isUnion()) { + break; + } + ColumnResolver resolver = FromCompiler.getResolverForQuery(subselect, connection); + SubselectRewriter rewriter = + new SubselectRewriter(resolver, subselect.getSelect(), derivedTable.getAlias()); + SelectStatement ret = rewriter.flatten(select, subselect); + if (ret == select) { + break; + } + select = ret; + from = select.getFrom(); } - /** - * if the OrderBy of outerSelectStatement is prefix of innerSelectStatement, - * we can remove the OrderBy of outerSelectStatement. - * @param outerSelectStatement - * @param innerSelectStatement - * @return - * @throws SQLException + * Pruning column for subselect after flatten. */ - private SelectStatement removeOuterSelectStatementOrderByIfNecessary( - SelectStatement outerSelectStatement, SelectStatement innerSelectStatement) throws SQLException { - if(outerSelectStatement.isDistinct() || - outerSelectStatement.isAggregate() || - (outerSelectStatement.getGroupBy() != null && !outerSelectStatement.getGroupBy().isEmpty()) || - outerSelectStatement.isJoin() || - outerSelectStatement.isUnion()) { - return outerSelectStatement; - } + return pruneInnerSubselectAliasedNodes(select, connection); + } + + private SubselectRewriter(ColumnResolver resolver, List aliasedNodes, + String tableAlias) { + super(resolver, aliasedNodes.size()); + this.tableAlias = tableAlias; + this.aliasMap = new HashMap(); + for (AliasedNode aliasedNode : aliasedNodes) { + String alias = aliasedNode.getAlias(); + ParseNode node = aliasedNode.getNode(); + if (alias == null) { + alias = SchemaUtil.normalizeIdentifier(node.getAlias()); + } + if (alias != null) { + aliasMap.put(SchemaUtil.getColumnName(tableAlias, alias), node); + } + } + } + + private SubselectRewriter(Map selectAliasFullNameToAliasParseNode) { + super(null, selectAliasFullNameToAliasParseNode.size()); + this.tableAlias = null; + this.aliasMap = selectAliasFullNameToAliasParseNode; + } + + /** + * if the OrderBy of outerSelectStatement is prefix of innerSelectStatement, we can remove the + * OrderBy of outerSelectStatement. + */ + private SelectStatement removeOuterSelectStatementOrderByIfNecessary( + SelectStatement outerSelectStatement, SelectStatement innerSelectStatement) + throws SQLException { + if ( + outerSelectStatement.isDistinct() || outerSelectStatement.isAggregate() + || (outerSelectStatement.getGroupBy() != null + && !outerSelectStatement.getGroupBy().isEmpty()) + || outerSelectStatement.isJoin() || outerSelectStatement.isUnion() + ) { + return outerSelectStatement; + } - List outerOrderByNodes = outerSelectStatement.getOrderBy(); - if(outerOrderByNodes == null || outerOrderByNodes.isEmpty()) { - return outerSelectStatement; - } + List outerOrderByNodes = outerSelectStatement.getOrderBy(); + if (outerOrderByNodes == null || outerOrderByNodes.isEmpty()) { + return outerSelectStatement; + } - if(this.isOuterOrderByNodesPrefixOfInner(innerSelectStatement.getOrderBy(), outerOrderByNodes)) { - return NODE_FACTORY.select(outerSelectStatement, (List)null); - } - return outerSelectStatement; + if ( + this.isOuterOrderByNodesPrefixOfInner(innerSelectStatement.getOrderBy(), outerOrderByNodes) + ) { + return NODE_FACTORY.select(outerSelectStatement, (List) null); } + return outerSelectStatement; + } - /** - * check if outerOrderByNodes is prefix of innerOrderByNodes. - * @param selectStatement - * @param outerOrderByNodes - * @return - */ - private boolean isOuterOrderByNodesPrefixOfInner( - List innerOrderByNodes, - List outerOrderByNodes) throws SQLException { + /** + * check if outerOrderByNodes is prefix of innerOrderByNodes. + */ + private boolean isOuterOrderByNodesPrefixOfInner(List innerOrderByNodes, + List outerOrderByNodes) throws SQLException { - assert outerOrderByNodes != null && outerOrderByNodes.size() > 0; + assert outerOrderByNodes != null && outerOrderByNodes.size() > 0; - if(innerOrderByNodes == null || outerOrderByNodes.size() > innerOrderByNodes.size()) { - return false; - } + if (innerOrderByNodes == null || outerOrderByNodes.size() > innerOrderByNodes.size()) { + return false; + } - Iterator innerOrderByNodeIter = innerOrderByNodes.iterator(); - for(OrderByNode outerOrderByNode : outerOrderByNodes) { - ParseNode outerOrderByParseNode = outerOrderByNode.getNode(); - OrderByNode rewrittenOuterOrderByNode = NODE_FACTORY.orderBy( - outerOrderByParseNode.accept(this), - outerOrderByNode.isNullsLast(), - outerOrderByNode.isAscending()); - assert innerOrderByNodeIter.hasNext(); - OrderByNode innerOrderByNode = innerOrderByNodeIter.next(); - if(!innerOrderByNode.equals(rewrittenOuterOrderByNode)) { - return false; - } - } - return true; - } - - private SelectStatement flatten(SelectStatement select, SelectStatement subselect) throws SQLException { - // Replace aliases in sub-select first. - subselect = ParseNodeRewriter.rewrite(subselect, this); - - ParseNode whereRewrite = subselect.getWhere(); - List groupByRewrite = subselect.getGroupBy(); - ParseNode havingRewrite = subselect.getHaving(); - List orderByRewrite = subselect.getOrderBy(); - LimitNode limitRewrite = subselect.getLimit(); - OffsetNode offsetRewrite = subselect.getOffset(); - HintNode hintRewrite = subselect.getHint(); - boolean isDistinctRewrite = subselect.isDistinct(); - boolean isAggregateRewrite = subselect.isAggregate(); - - ParseNode where = select.getWhere(); - if (where != null) { - if (subselect.getLimit() != null || (subselect.isAggregate() && subselect.getGroupBy().isEmpty())) { - return removeOuterSelectStatementOrderByIfNecessary(select,subselect); - } - ParseNode postFilter = where.accept(this); - if (subselect.getGroupBy().isEmpty()) { - whereRewrite = whereRewrite == null ? postFilter : NODE_FACTORY.and(Arrays.asList(whereRewrite, postFilter)); - } else { - havingRewrite = havingRewrite == null ? postFilter : NODE_FACTORY.and(Arrays.asList(havingRewrite, postFilter)); - } - } - - if (select.isDistinct()) { - if (subselect.getLimit() != null || subselect.isAggregate() || subselect.isDistinct()) { - return removeOuterSelectStatementOrderByIfNecessary(select,subselect); - } - isDistinctRewrite = true; - orderByRewrite = null; - } - - if (select.isAggregate()) { - if (subselect.getLimit() != null || subselect.isAggregate() || subselect.isDistinct()) { - return removeOuterSelectStatementOrderByIfNecessary(select,subselect); - } - isAggregateRewrite = true; - orderByRewrite = null; - } - - List groupBy = select.getGroupBy(); - if (!groupBy.isEmpty()) { - if (subselect.getLimit() != null || subselect.isAggregate() || subselect.isDistinct()) { - return removeOuterSelectStatementOrderByIfNecessary(select,subselect); - } - groupByRewrite = Lists.newArrayListWithExpectedSize(groupBy.size()); - for (ParseNode node : groupBy) { - groupByRewrite.add(node.accept(this)); - } - if (select.getHaving() != null) { - havingRewrite = select.getHaving().accept(this); - } - orderByRewrite = null; - } - - List selectNodes = select.getSelect(); - List selectNodesRewrite = Lists.newArrayListWithExpectedSize(selectNodes.size()); - for (AliasedNode aliasedNode : selectNodes) { - ParseNode node = aliasedNode.getNode(); - if (node instanceof WildcardParseNode - || (node instanceof TableWildcardParseNode - && ((TableWildcardParseNode) node).getTableName().toString(). - equals(tableAlias))) { - for (AliasedNode aNode : subselect.getSelect()) { - String alias = aNode.getAlias(); - String aliasRewrite = alias == null ? null : SchemaUtil.getColumnName(tableAlias, alias); - selectNodesRewrite.add(NODE_FACTORY.aliasedNode(aliasRewrite, aNode.getNode())); - } - } else { - selectNodesRewrite.add(NODE_FACTORY.aliasedNode(aliasedNode.getAlias(), node.accept(this))); - } - } - - List orderBy = select.getOrderBy(); - if (!orderBy.isEmpty()) { - if (subselect.getLimit() != null) { - return removeOuterSelectStatementOrderByIfNecessary(select,subselect); - } - orderByRewrite = Lists.newArrayListWithExpectedSize(orderBy.size()); - for (OrderByNode orderByNode : orderBy) { - ParseNode node = orderByNode.getNode(); - orderByRewrite.add(NODE_FACTORY.orderBy(node.accept(this), orderByNode.isNullsLast(), orderByNode.isAscending())); - } + Iterator innerOrderByNodeIter = innerOrderByNodes.iterator(); + for (OrderByNode outerOrderByNode : outerOrderByNodes) { + ParseNode outerOrderByParseNode = outerOrderByNode.getNode(); + OrderByNode rewrittenOuterOrderByNode = + NODE_FACTORY.orderBy(outerOrderByParseNode.accept(this), outerOrderByNode.isNullsLast(), + outerOrderByNode.isAscending()); + assert innerOrderByNodeIter.hasNext(); + OrderByNode innerOrderByNode = innerOrderByNodeIter.next(); + if (!innerOrderByNode.equals(rewrittenOuterOrderByNode)) { + return false; + } + } + return true; + } + + private SelectStatement flatten(SelectStatement select, SelectStatement subselect) + throws SQLException { + // Replace aliases in sub-select first. + subselect = ParseNodeRewriter.rewrite(subselect, this); + + ParseNode whereRewrite = subselect.getWhere(); + List groupByRewrite = subselect.getGroupBy(); + ParseNode havingRewrite = subselect.getHaving(); + List orderByRewrite = subselect.getOrderBy(); + LimitNode limitRewrite = subselect.getLimit(); + OffsetNode offsetRewrite = subselect.getOffset(); + HintNode hintRewrite = subselect.getHint(); + boolean isDistinctRewrite = subselect.isDistinct(); + boolean isAggregateRewrite = subselect.isAggregate(); + + ParseNode where = select.getWhere(); + if (where != null) { + if ( + subselect.getLimit() != null + || (subselect.isAggregate() && subselect.getGroupBy().isEmpty()) + ) { + return removeOuterSelectStatementOrderByIfNecessary(select, subselect); + } + ParseNode postFilter = where.accept(this); + if (subselect.getGroupBy().isEmpty()) { + whereRewrite = whereRewrite == null + ? postFilter + : NODE_FACTORY.and(Arrays. asList(whereRewrite, postFilter)); + } else { + havingRewrite = havingRewrite == null + ? postFilter + : NODE_FACTORY.and(Arrays. asList(havingRewrite, postFilter)); + } + } + + if (select.isDistinct()) { + if (subselect.getLimit() != null || subselect.isAggregate() || subselect.isDistinct()) { + return removeOuterSelectStatementOrderByIfNecessary(select, subselect); + } + isDistinctRewrite = true; + orderByRewrite = null; + } + + if (select.isAggregate()) { + if (subselect.getLimit() != null || subselect.isAggregate() || subselect.isDistinct()) { + return removeOuterSelectStatementOrderByIfNecessary(select, subselect); + } + isAggregateRewrite = true; + orderByRewrite = null; + } + + List groupBy = select.getGroupBy(); + if (!groupBy.isEmpty()) { + if (subselect.getLimit() != null || subselect.isAggregate() || subselect.isDistinct()) { + return removeOuterSelectStatementOrderByIfNecessary(select, subselect); + } + groupByRewrite = Lists. newArrayListWithExpectedSize(groupBy.size()); + for (ParseNode node : groupBy) { + groupByRewrite.add(node.accept(this)); + } + if (select.getHaving() != null) { + havingRewrite = select.getHaving().accept(this); + } + orderByRewrite = null; + } + + List selectNodes = select.getSelect(); + List selectNodesRewrite = Lists.newArrayListWithExpectedSize(selectNodes.size()); + for (AliasedNode aliasedNode : selectNodes) { + ParseNode node = aliasedNode.getNode(); + if ( + node instanceof WildcardParseNode || (node instanceof TableWildcardParseNode + && ((TableWildcardParseNode) node).getTableName().toString().equals(tableAlias)) + ) { + for (AliasedNode aNode : subselect.getSelect()) { + String alias = aNode.getAlias(); + String aliasRewrite = alias == null ? null : SchemaUtil.getColumnName(tableAlias, alias); + selectNodesRewrite.add(NODE_FACTORY.aliasedNode(aliasRewrite, aNode.getNode())); } - - OffsetNode offset = select.getOffset(); - if (offsetRewrite != null || (limitRewrite != null && offset != null)) { - return removeOuterSelectStatementOrderByIfNecessary(select,subselect); + } else { + selectNodesRewrite.add(NODE_FACTORY.aliasedNode(aliasedNode.getAlias(), node.accept(this))); + } + } + + List orderBy = select.getOrderBy(); + if (!orderBy.isEmpty()) { + if (subselect.getLimit() != null) { + return removeOuterSelectStatementOrderByIfNecessary(select, subselect); + } + orderByRewrite = Lists.newArrayListWithExpectedSize(orderBy.size()); + for (OrderByNode orderByNode : orderBy) { + ParseNode node = orderByNode.getNode(); + orderByRewrite.add(NODE_FACTORY.orderBy(node.accept(this), orderByNode.isNullsLast(), + orderByNode.isAscending())); + } + } + + OffsetNode offset = select.getOffset(); + if (offsetRewrite != null || (limitRewrite != null && offset != null)) { + return removeOuterSelectStatementOrderByIfNecessary(select, subselect); + } else { + offsetRewrite = offset; + } + + LimitNode limit = select.getLimit(); + if (limit != null) { + if (limitRewrite == null) { + limitRewrite = limit; + } else { + Integer limitValue = LimitCompiler.compile(null, select); + Integer limitValueSubselect = LimitCompiler.compile(null, subselect); + if (limitValue != null && limitValueSubselect != null) { + limitRewrite = limitValue < limitValueSubselect ? limit : limitRewrite; } else { - offsetRewrite = offset; - } - - LimitNode limit = select.getLimit(); - if (limit != null) { - if (limitRewrite == null) { - limitRewrite = limit; - } else { - Integer limitValue = LimitCompiler.compile(null, select); - Integer limitValueSubselect = LimitCompiler.compile(null, subselect); - if (limitValue != null && limitValueSubselect != null) { - limitRewrite = limitValue < limitValueSubselect ? limit : limitRewrite; - } else { - return removeOuterSelectStatementOrderByIfNecessary(select,subselect); - } - } + return removeOuterSelectStatementOrderByIfNecessary(select, subselect); } + } + } - HintNode hint = select.getHint(); - if (hint != null) { - hintRewrite = hintRewrite == null ? hint : HintNode.combine(hint, hintRewrite); - } - - SelectStatement stmt = NODE_FACTORY.select(subselect.getFrom(), hintRewrite, isDistinctRewrite, - selectNodesRewrite, whereRewrite, groupByRewrite, havingRewrite, orderByRewrite, limitRewrite, - offsetRewrite, select.getBindCount(), isAggregateRewrite, select.hasSequence(), select.getSelects(), - select.getUdfParseNodes()); - if (tableAlias != null) { - this.removeAlias = true; - stmt = ParseNodeRewriter.rewrite(stmt, this); - } - return stmt; - } - - @Override - public ParseNode visit(ColumnParseNode node) throws SQLException { - if (node.getTableName() == null) - return super.visit(node); - - if (removeAlias) { - if (node.getTableName().equals(tableAlias)) { - return NODE_FACTORY.column(null, node.getName(), node.getAlias()); - } - return super.visit(node); - } - - ParseNode aliasedNode = aliasMap.get(node.getFullName()); - if (aliasedNode != null) { - return aliasedNode; - } - return node; - } - - private static ParseNode combine(List nodes) { - if (nodes.isEmpty()) - return null; - - if (nodes.size() == 1) - return nodes.get(0); - - return NODE_FACTORY.and(nodes); + HintNode hint = select.getHint(); + if (hint != null) { + hintRewrite = hintRewrite == null ? hint : HintNode.combine(hint, hintRewrite); + } + + SelectStatement stmt = NODE_FACTORY.select(subselect.getFrom(), hintRewrite, isDistinctRewrite, + selectNodesRewrite, whereRewrite, groupByRewrite, havingRewrite, orderByRewrite, limitRewrite, + offsetRewrite, select.getBindCount(), isAggregateRewrite, select.hasSequence(), + select.getSelects(), select.getUdfParseNodes()); + if (tableAlias != null) { + this.removeAlias = true; + stmt = ParseNodeRewriter.rewrite(stmt, this); + } + return stmt; + } + + @Override + public ParseNode visit(ColumnParseNode node) throws SQLException { + if (node.getTableName() == null) return super.visit(node); + + if (removeAlias) { + if (node.getTableName().equals(tableAlias)) { + return NODE_FACTORY.column(null, node.getName(), node.getAlias()); + } + return super.visit(node); } + + ParseNode aliasedNode = aliasMap.get(node.getFullName()); + if (aliasedNode != null) { + return aliasedNode; + } + return node; + } + + private static ParseNode combine(List nodes) { + if (nodes.isEmpty()) return null; + + if (nodes.size() == 1) return nodes.get(0); + + return NODE_FACTORY.and(nodes); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java index d8238c05be1..10e9431633b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,8 +31,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.htrace.Sampler; import org.apache.htrace.TraceScope; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.execute.visitor.QueryPlanVisitor; @@ -71,248 +70,241 @@ public class TraceQueryPlan implements QueryPlan { - private TraceStatement traceStatement = null; - private PhoenixStatement stmt = null; - private StatementContext context = null; - private boolean first = true; - - private static final RowProjector TRACE_PROJECTOR; - static { - List projectedColumns = new ArrayList(); - PName colName = PNameFactory.newName(MetricInfo.TRACE.columnName); - PColumn column = - new PColumnImpl(PNameFactory.newName(MetricInfo.TRACE.columnName), null, - PLong.INSTANCE, null, null, false, 0, SortOrder.getDefault(), 0, null, - false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP); - List columns = new ArrayList(); - columns.add(column); - Expression expression = - new RowKeyColumnExpression(column, new RowKeyValueAccessor(columns, 0)); - projectedColumns.add(new ExpressionProjector(MetricInfo.TRACE.columnName, MetricInfo.TRACE.columnName, "", expression, - true)); - int estimatedByteSize = SizedUtil.KEY_VALUE_SIZE + PLong.INSTANCE.getByteSize(); - TRACE_PROJECTOR = new RowProjector(projectedColumns, estimatedByteSize, false); + private TraceStatement traceStatement = null; + private PhoenixStatement stmt = null; + private StatementContext context = null; + private boolean first = true; + + private static final RowProjector TRACE_PROJECTOR; + static { + List projectedColumns = new ArrayList(); + PName colName = PNameFactory.newName(MetricInfo.TRACE.columnName); + PColumn column = new PColumnImpl(PNameFactory.newName(MetricInfo.TRACE.columnName), null, + PLong.INSTANCE, null, null, false, 0, SortOrder.getDefault(), 0, null, false, null, false, + false, colName.getBytes(), HConstants.LATEST_TIMESTAMP); + List columns = new ArrayList(); + columns.add(column); + Expression expression = new RowKeyColumnExpression(column, new RowKeyValueAccessor(columns, 0)); + projectedColumns.add(new ExpressionProjector(MetricInfo.TRACE.columnName, + MetricInfo.TRACE.columnName, "", expression, true)); + int estimatedByteSize = SizedUtil.KEY_VALUE_SIZE + PLong.INSTANCE.getByteSize(); + TRACE_PROJECTOR = new RowProjector(projectedColumns, estimatedByteSize, false); + } + + public TraceQueryPlan(TraceStatement traceStatement, PhoenixStatement stmt) { + this.traceStatement = traceStatement; + this.stmt = stmt; + this.context = new StatementContext(stmt); + } + + @Override + public Operation getOperation() { + return traceStatement.getOperation(); + } + + @Override + public StatementContext getContext() { + return this.context; + } + + @Override + public ParameterMetaData getParameterMetaData() { + return context.getBindManager().getParameterMetaData(); + } + + @Override + public ResultIterator iterator() throws SQLException { + return iterator(DefaultParallelScanGrouper.getInstance()); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + return iterator(scanGrouper); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { + final PhoenixConnection conn = stmt.getConnection(); + if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) { + return ResultIterator.EMPTY_ITERATOR; } - - public TraceQueryPlan(TraceStatement traceStatement, PhoenixStatement stmt ) { - this.traceStatement = traceStatement; - this.stmt = stmt; - this.context = new StatementContext(stmt); - } - - @Override - public Operation getOperation() { - return traceStatement.getOperation(); - } - - @Override - public StatementContext getContext() { - return this.context; + return new TraceQueryResultIterator(conn); + } + + @Override + public long getEstimatedSize() { + return PLong.INSTANCE.getByteSize(); + } + + @Override + public Cost getCost() { + return Cost.ZERO; + } + + @Override + public Set getSourceRefs() { + return Collections.emptySet(); + } + + @Override + public TableRef getTableRef() { + return null; + } + + @Override + public RowProjector getProjector() { + return TRACE_PROJECTOR; + } + + @Override + public Integer getLimit() { + return null; + } + + @Override + public Integer getOffset() { + return null; + } + + @Override + public OrderBy getOrderBy() { + return OrderBy.EMPTY_ORDER_BY; + } + + @Override + public GroupBy getGroupBy() { + return GroupBy.EMPTY_GROUP_BY; + } + + @Override + public List getSplits() { + return Collections.emptyList(); + } + + @Override + public List> getScans() { + return Collections.emptyList(); + } + + @Override + public FilterableStatement getStatement() { + return null; + } + + @Override + public boolean isDegenerate() { + return false; + } + + @Override + public boolean isRowKeyOrdered() { + return false; + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return ExplainPlan.EMPTY_PLAN; + } + + @Override + public boolean useRoundRobinIterator() { + return false; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + @Override + public Long getEstimatedRowsToScan() { + return 0l; + } + + @Override + public Long getEstimatedBytesToScan() { + return 0l; + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return 0l; + } + + @Override + public List getOutputOrderBys() { + return Collections. emptyList(); + } + + @Override + public boolean isApplicable() { + return true; + } + + private class TraceQueryResultIterator implements ResultIterator { + + private final PhoenixConnection conn; + + public TraceQueryResultIterator(PhoenixConnection conn) { + this.conn = conn; } @Override - public ParameterMetaData getParameterMetaData() { - return context.getBindManager().getParameterMetaData(); - } - - @Override - public ResultIterator iterator() throws SQLException { - return iterator(DefaultParallelScanGrouper.getInstance()); + public void close() throws SQLException { } @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - return iterator(scanGrouper); - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - final PhoenixConnection conn = stmt.getConnection(); - if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) { - return ResultIterator.EMPTY_ITERATOR; + public Tuple next() throws SQLException { + if (!first) return null; + TraceScope traceScope = conn.getTraceScope(); + if (traceStatement.isTraceOn()) { + conn.setSampler(Tracing.getConfiguredSampler(traceStatement)); + if (conn.getSampler() == Sampler.NEVER) { + closeTraceScope(conn); } - return new TraceQueryResultIterator(conn); - } - - @Override - public long getEstimatedSize() { - return PLong.INSTANCE.getByteSize(); - } - - @Override - public Cost getCost() { - return Cost.ZERO; - } - - @Override - public Set getSourceRefs() { - return Collections.emptySet(); - } - - @Override - public TableRef getTableRef() { - return null; - } - - @Override - public RowProjector getProjector() { - return TRACE_PROJECTOR; - } - - @Override - public Integer getLimit() { - return null; - } - - @Override - public Integer getOffset() { - return null; - } - - @Override - public OrderBy getOrderBy() { - return OrderBy.EMPTY_ORDER_BY; - } - - @Override - public GroupBy getGroupBy() { - return GroupBy.EMPTY_GROUP_BY; - } - - @Override - public List getSplits() { - return Collections.emptyList(); - } - - @Override - public List> getScans() { - return Collections.emptyList(); - } - - @Override - public FilterableStatement getStatement() { - return null; - } - - @Override - public boolean isDegenerate() { - return false; - } - - @Override - public boolean isRowKeyOrdered() { - return false; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return ExplainPlan.EMPTY_PLAN; - } - - @Override - public boolean useRoundRobinIterator() { - return false; - } - - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); - } - - @Override - public Long getEstimatedRowsToScan() { - return 0l; - } - - @Override - public Long getEstimatedBytesToScan() { - return 0l; + if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) { + traceScope = Tracing.startNewSpan(conn, "Enabling trace"); + if (traceScope.getSpan() != null) { + conn.setTraceScope(traceScope); + } else { + closeTraceScope(conn); + } + } + } else { + closeTraceScope(conn); + conn.setSampler(Sampler.NEVER); + } + if (traceScope == null || traceScope.getSpan() == null) return null; + first = false; + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ParseNodeFactory factory = new ParseNodeFactory(); + LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId()); + LiteralExpression expression = + LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS); + expression.evaluate(null, ptr); + byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr); + Cell cell = PhoenixKeyValueUtil.newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, EnvironmentEdgeManager.currentTimeMillis(), + HConstants.EMPTY_BYTE_ARRAY); + List cells = new ArrayList(1); + cells.add(cell); + return new ResultTuple(Result.create(cells)); } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return 0l; + private void closeTraceScope(final PhoenixConnection conn) { + if (conn.getTraceScope() != null) { + conn.getTraceScope().close(); + conn.setTraceScope(null); + } } @Override - public List getOutputOrderBys() { - return Collections. emptyList(); + public void explain(List planSteps) { } @Override - public boolean isApplicable() { - return true; - } - - private class TraceQueryResultIterator implements ResultIterator { - - private final PhoenixConnection conn; - - public TraceQueryResultIterator(PhoenixConnection conn) { - this.conn = conn; - } - - @Override - public void close() throws SQLException { - } - - @Override - public Tuple next() throws SQLException { - if(!first) return null; - TraceScope traceScope = conn.getTraceScope(); - if (traceStatement.isTraceOn()) { - conn.setSampler(Tracing.getConfiguredSampler(traceStatement)); - if (conn.getSampler() == Sampler.NEVER) { - closeTraceScope(conn); - } - if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) { - traceScope = Tracing.startNewSpan(conn, "Enabling trace"); - if (traceScope.getSpan() != null) { - conn.setTraceScope(traceScope); - } else { - closeTraceScope(conn); - } - } - } else { - closeTraceScope(conn); - conn.setSampler(Sampler.NEVER); - } - if (traceScope == null || traceScope.getSpan() == null) return null; - first = false; - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ParseNodeFactory factory = new ParseNodeFactory(); - LiteralParseNode literal = - factory.literal(traceScope.getSpan().getTraceId()); - LiteralExpression expression = - LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, - Determinism.ALWAYS); - expression.evaluate(null, ptr); - byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr); - Cell cell = - PhoenixKeyValueUtil - .newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, - EnvironmentEdgeManager.currentTimeMillis(), - HConstants.EMPTY_BYTE_ARRAY); - List cells = new ArrayList(1); - cells.add(cell); - return new ResultTuple(Result.create(cells)); - } - - private void closeTraceScope(final PhoenixConnection conn) { - if(conn.getTraceScope()!=null) { - conn.getTraceScope().close(); - conn.setTraceScope(null); - } - } - - @Override - public void explain(List planSteps) { - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java index 780d91891e9..502dac8215f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +16,9 @@ * limitations under the License. */ package org.apache.phoenix.compile; -import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY; + import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT; +import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY; import java.sql.SQLException; import java.util.ArrayList; @@ -28,7 +29,6 @@ import java.util.List; import java.util.Set; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.parse.AliasedNode; import org.apache.phoenix.parse.ColumnParseNode; import org.apache.phoenix.parse.FamilyWildcardParseNode; @@ -53,225 +53,218 @@ import org.apache.phoenix.schema.ProjectedColumn; import org.apache.phoenix.schema.SaltingUtil; import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - public class TupleProjectionCompiler { - public static final PName PROJECTED_TABLE_SCHEMA = PNameFactory.newName("."); - public static final EnumSet PROJECTED_TABLE_TYPES = EnumSet.of(PTableType.TABLE, - PTableType.INDEX, PTableType.VIEW, PTableType.CDC); - private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - - public static PTable createProjectedTable(SelectStatement select, StatementContext context) throws SQLException { - Preconditions.checkArgument(!select.isJoin()); - // Non-group-by or group-by aggregations will create its own projected result. - if (select.getInnerSelectStatement() != null - || select.getFrom() == null - || select.isAggregate() - || select.isDistinct() - || ! PROJECTED_TABLE_TYPES.contains( - context.getResolver().getTables().get(0).getTable().getType())) { - return null; - } - - List projectedColumns = new ArrayList(); - boolean isWildcard = false; - Set families = new HashSet(); - ColumnRefVisitor visitor = new ColumnRefVisitor(context); - TableRef tableRef = context.getCurrentTable(); - PTable table = tableRef.getTable(); + public static final PName PROJECTED_TABLE_SCHEMA = PNameFactory.newName("."); + public static final EnumSet PROJECTED_TABLE_TYPES = + EnumSet.of(PTableType.TABLE, PTableType.INDEX, PTableType.VIEW, PTableType.CDC); + private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - for (AliasedNode aliasedNode : select.getSelect()) { - ParseNode node = aliasedNode.getNode(); - if (node instanceof WildcardParseNode) { - if (((WildcardParseNode) node).isRewrite()) { - TableRef parentTableRef = FromCompiler.getResolver( - NODE_FACTORY.namedTable(null, TableName.create(table.getSchemaName().getString(), - table.getParentTableName().getString())), context.getConnection()).resolveTable( - table.getSchemaName().getString(), - table.getParentTableName().getString()); - for (PColumn column : parentTableRef.getTable().getColumns()) { - // don't attempt to rewrite the parents SALTING COLUMN - if (column == SaltingUtil.SALTING_COLUMN) { - continue; - } - NODE_FACTORY.column(null, '"' + IndexUtil.getIndexColumnName(column) + '"', null).accept(visitor); - } - } - isWildcard = true; - } else if (node instanceof FamilyWildcardParseNode) { - FamilyWildcardParseNode familyWildcardNode = (FamilyWildcardParseNode) node; - String familyName = familyWildcardNode.getName(); - if (familyWildcardNode.isRewrite()) { - TableRef parentTableRef = FromCompiler.getResolver( - NODE_FACTORY.namedTable(null, TableName.create(table.getSchemaName().getString(), - table.getParentTableName().getString())), context.getConnection()).resolveTable( - table.getSchemaName().getString(), - table.getParentTableName().getString()); - for (PColumn column : parentTableRef.getTable().getColumnFamily(familyName).getColumns()) { - NODE_FACTORY.column(null, '"' + IndexUtil.getIndexColumnName(column) + '"', null).accept(visitor); - } - }else{ - for (PColumn column : table.getColumnFamily(familyName).getColumns()) { - NODE_FACTORY.column(TableName.create(null, familyName), '"' + column.getName().getString() + '"', null).accept(visitor); - } - } - families.add(familyName); - } else { - node.accept(visitor); + public static PTable createProjectedTable(SelectStatement select, StatementContext context) + throws SQLException { + Preconditions.checkArgument(!select.isJoin()); + // Non-group-by or group-by aggregations will create its own projected result. + if ( + select.getInnerSelectStatement() != null || select.getFrom() == null || select.isAggregate() + || select.isDistinct() + || !PROJECTED_TABLE_TYPES + .contains(context.getResolver().getTables().get(0).getTable().getType()) + ) { + return null; + } + + List projectedColumns = new ArrayList(); + boolean isWildcard = false; + Set families = new HashSet(); + ColumnRefVisitor visitor = new ColumnRefVisitor(context); + TableRef tableRef = context.getCurrentTable(); + PTable table = tableRef.getTable(); + + for (AliasedNode aliasedNode : select.getSelect()) { + ParseNode node = aliasedNode.getNode(); + if (node instanceof WildcardParseNode) { + if (((WildcardParseNode) node).isRewrite()) { + TableRef parentTableRef = FromCompiler.getResolver(NODE_FACTORY.namedTable(null, + TableName.create(table.getSchemaName().getString(), + table.getParentTableName().getString())), + context.getConnection()).resolveTable(table.getSchemaName().getString(), + table.getParentTableName().getString()); + for (PColumn column : parentTableRef.getTable().getColumns()) { + // don't attempt to rewrite the parents SALTING COLUMN + if (column == SaltingUtil.SALTING_COLUMN) { + continue; } + NODE_FACTORY.column(null, '"' + IndexUtil.getIndexColumnName(column) + '"', null) + .accept(visitor); + } } - if (!isWildcard) { - for (OrderByNode orderBy : select.getOrderBy()) { - orderBy.getNode().accept(visitor); - } + isWildcard = true; + } else if (node instanceof FamilyWildcardParseNode) { + FamilyWildcardParseNode familyWildcardNode = (FamilyWildcardParseNode) node; + String familyName = familyWildcardNode.getName(); + if (familyWildcardNode.isRewrite()) { + TableRef parentTableRef = FromCompiler.getResolver(NODE_FACTORY.namedTable(null, + TableName.create(table.getSchemaName().getString(), + table.getParentTableName().getString())), + context.getConnection()).resolveTable(table.getSchemaName().getString(), + table.getParentTableName().getString()); + for (PColumn column : parentTableRef.getTable().getColumnFamily(familyName) + .getColumns()) { + NODE_FACTORY.column(null, '"' + IndexUtil.getIndexColumnName(column) + '"', null) + .accept(visitor); + } + } else { + for (PColumn column : table.getColumnFamily(familyName).getColumns()) { + NODE_FACTORY.column(TableName.create(null, familyName), + '"' + column.getName().getString() + '"', null).accept(visitor); + } } + families.add(familyName); + } else { + node.accept(visitor); + } + } + if (!isWildcard) { + for (OrderByNode orderBy : select.getOrderBy()) { + orderBy.getNode().accept(visitor); + } + } - boolean hasSaltingColumn = table.getBucketNum() != null; - int position = hasSaltingColumn ? 1 : 0; - // Always project PK columns first in case there are some PK columns added by alter table. - for (int i = position; i < table.getPKColumns().size(); i++) { - PColumn sourceColumn = table.getPKColumns().get(i); - ColumnRef sourceColumnRef = new ColumnRef(tableRef, sourceColumn.getPosition()); - PColumn column = new ProjectedColumn(sourceColumn.getName(), sourceColumn.getFamilyName(), - position++, sourceColumn.isNullable(), sourceColumnRef, null); - projectedColumns.add(column); - } + boolean hasSaltingColumn = table.getBucketNum() != null; + int position = hasSaltingColumn ? 1 : 0; + // Always project PK columns first in case there are some PK columns added by alter table. + for (int i = position; i < table.getPKColumns().size(); i++) { + PColumn sourceColumn = table.getPKColumns().get(i); + ColumnRef sourceColumnRef = new ColumnRef(tableRef, sourceColumn.getPosition()); + PColumn column = new ProjectedColumn(sourceColumn.getName(), sourceColumn.getFamilyName(), + position++, sourceColumn.isNullable(), sourceColumnRef, null); + projectedColumns.add(column); + } - List nonPkColumnRefList = new ArrayList(visitor.nonPkColumnRefSet); - for (PColumn sourceColumn : table.getColumns()) { - if (SchemaUtil.isPKColumn(sourceColumn)) - continue; - ColumnRef sourceColumnRef = new ColumnRef(tableRef, sourceColumn.getPosition()); - if (!isWildcard - && !visitor.nonPkColumnRefSet.contains(sourceColumnRef) - && !families.contains(sourceColumn.getFamilyName().getString())) - continue; + List nonPkColumnRefList = new ArrayList(visitor.nonPkColumnRefSet); + for (PColumn sourceColumn : table.getColumns()) { + if (SchemaUtil.isPKColumn(sourceColumn)) continue; + ColumnRef sourceColumnRef = new ColumnRef(tableRef, sourceColumn.getPosition()); + if ( + !isWildcard && !visitor.nonPkColumnRefSet.contains(sourceColumnRef) + && !families.contains(sourceColumn.getFamilyName().getString()) + ) continue; - PColumn column = new ProjectedColumn(sourceColumn.getName(), sourceColumn.getFamilyName(), - visitor.nonPkColumnRefSet.contains(sourceColumnRef) - ? position + nonPkColumnRefList.indexOf(sourceColumnRef) : position++, - sourceColumn.isNullable(), sourceColumnRef, sourceColumn.getColumnQualifierBytes()); + PColumn column = new ProjectedColumn(sourceColumn.getName(), sourceColumn.getFamilyName(), + visitor.nonPkColumnRefSet.contains(sourceColumnRef) + ? position + nonPkColumnRefList.indexOf(sourceColumnRef) + : position++, + sourceColumn.isNullable(), sourceColumnRef, sourceColumn.getColumnQualifierBytes()); - projectedColumns.add(column); - // Wildcard or FamilyWildcard will be handled by ProjectionCompiler. - if (!isWildcard && !families.contains(sourceColumn.getFamilyName().toString())) { - EncodedColumnsUtil.setColumns(column, table, context.getScan()); - } - } - // add IndexUncoveredDataColumnRef - position = projectedColumns.size() + (hasSaltingColumn ? 1 : 0); - for (IndexUncoveredDataColumnRef sourceColumnRef : visitor.indexColumnRefSet) { - PColumn column = new ProjectedColumn(sourceColumnRef.getColumn().getName(), - sourceColumnRef.getColumn().getFamilyName(), position++, - sourceColumnRef.getColumn().isNullable(), sourceColumnRef, sourceColumnRef.getColumn().getColumnQualifierBytes()); - projectedColumns.add(column); - } - if (!visitor.indexColumnRefSet.isEmpty() - && tableRef.isHinted()) { - context.setUncoveredIndex(true); - } - return PTableImpl.builderWithColumns(table, projectedColumns) - .setType(PTableType.PROJECTED) - .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT) - .setExcludedColumns(ImmutableList.of()) - .setPhysicalNames(ImmutableList.of()) - .build(); + projectedColumns.add(column); + // Wildcard or FamilyWildcard will be handled by ProjectionCompiler. + if (!isWildcard && !families.contains(sourceColumn.getFamilyName().toString())) { + EncodedColumnsUtil.setColumns(column, table, context.getScan()); + } } - - public static PTable createProjectedTable(TableRef tableRef, List sourceColumnRefs, boolean retainPKColumns) throws SQLException { - PTable table = tableRef.getTable(); - List projectedColumns = new ArrayList(); - int position = table.getBucketNum() != null ? 1 : 0; - for (int i = retainPKColumns ? position : 0; i < sourceColumnRefs.size(); i++) { - ColumnRef sourceColumnRef = sourceColumnRefs.get(i); - PColumn sourceColumn = sourceColumnRef.getColumn(); - String colName = sourceColumn.getName().getString(); - String aliasedName = tableRef.getTableAlias() == null ? - SchemaUtil.getColumnName(table.getName().getString(), colName) - : SchemaUtil.getColumnName(tableRef.getTableAlias(), colName); - PName familyName = SchemaUtil.isPKColumn(sourceColumn) ? (retainPKColumns ? null : PNameFactory.newName(VALUE_COLUMN_FAMILY)) : sourceColumn.getFamilyName(); - // If we're not retaining the PK columns, then we should switch columns to be nullable - PColumn column = new ProjectedColumn(PNameFactory.newName(aliasedName), familyName, - position++, sourceColumn.isNullable(), sourceColumnRef, sourceColumn.getColumnQualifierBytes()); - projectedColumns.add(column); - } - EncodedCQCounter cqCounter = EncodedCQCounter.NULL_COUNTER; - if (EncodedColumnsUtil.usesEncodedColumnNames(table)) { - cqCounter = EncodedCQCounter.copy(table.getEncodedCQCounter()); - } - return new PTableImpl.Builder() - .setType(PTableType.PROJECTED) - .setTimeStamp(table.getTimeStamp()) - .setIndexDisableTimestamp(table.getIndexDisableTimestamp()) - .setSequenceNumber(table.getSequenceNumber()) - .setImmutableRows(table.isImmutableRows()) - .setDisableWAL(table.isWALDisabled()) - .setMultiTenant(table.isMultiTenant()) - .setStoreNulls(table.getStoreNulls()) - .setViewType(table.getViewType()) - .setViewIndexIdType(table.getviewIndexIdType()) - .setViewIndexId(table.getViewIndexId()) - .setTransactionProvider(table.getTransactionProvider()) - .setUpdateCacheFrequency(table.getUpdateCacheFrequency()) - .setNamespaceMapped(table.isNamespaceMapped()) - .setAutoPartitionSeqName(table.getAutoPartitionSeqName()) - .setAppendOnlySchema(table.isAppendOnlySchema()) - .setImmutableStorageScheme(table.getImmutableStorageScheme()) - .setQualifierEncodingScheme(table.getEncodingScheme()) - .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT) - .setEncodedCQCounter(cqCounter) - .setUseStatsForParallelization(table.useStatsForParallelization()) - .setExcludedColumns(ImmutableList.of()) - .setTenantId(table.getTenantId()) - .setSchemaName(PROJECTED_TABLE_SCHEMA) - .setTableName(table.getTableName()) - .setPkName(table.getPKName()) - .setRowKeyOrderOptimizable(table.rowKeyOrderOptimizable()) - .setBucketNum(table.getBucketNum()) - .setIndexes(Collections.emptyList()) - .setPhysicalNames(ImmutableList.of()) - .setColumns(projectedColumns) - .build(); + // add IndexUncoveredDataColumnRef + position = projectedColumns.size() + (hasSaltingColumn ? 1 : 0); + for (IndexUncoveredDataColumnRef sourceColumnRef : visitor.indexColumnRefSet) { + PColumn column = new ProjectedColumn(sourceColumnRef.getColumn().getName(), + sourceColumnRef.getColumn().getFamilyName(), position++, + sourceColumnRef.getColumn().isNullable(), sourceColumnRef, + sourceColumnRef.getColumn().getColumnQualifierBytes()); + projectedColumns.add(column); + } + if (!visitor.indexColumnRefSet.isEmpty() && tableRef.isHinted()) { + context.setUncoveredIndex(true); } + return PTableImpl.builderWithColumns(table, projectedColumns).setType(PTableType.PROJECTED) + .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT).setExcludedColumns(ImmutableList.of()) + .setPhysicalNames(ImmutableList.of()).build(); + } - // For extracting column references from single select statement - private static class ColumnRefVisitor extends StatelessTraverseAllParseNodeVisitor { - private final StatementContext context; - private final LinkedHashSet nonPkColumnRefSet; - private final LinkedHashSet indexColumnRefSet; - - private ColumnRefVisitor(StatementContext context) { - this.context = context; - this.nonPkColumnRefSet = new LinkedHashSet(); - this.indexColumnRefSet = new LinkedHashSet(); - } + public static PTable createProjectedTable(TableRef tableRef, List sourceColumnRefs, + boolean retainPKColumns) throws SQLException { + PTable table = tableRef.getTable(); + List projectedColumns = new ArrayList(); + int position = table.getBucketNum() != null ? 1 : 0; + for (int i = retainPKColumns ? position : 0; i < sourceColumnRefs.size(); i++) { + ColumnRef sourceColumnRef = sourceColumnRefs.get(i); + PColumn sourceColumn = sourceColumnRef.getColumn(); + String colName = sourceColumn.getName().getString(); + String aliasedName = tableRef.getTableAlias() == null + ? SchemaUtil.getColumnName(table.getName().getString(), colName) + : SchemaUtil.getColumnName(tableRef.getTableAlias(), colName); + PName familyName = SchemaUtil.isPKColumn(sourceColumn) + ? (retainPKColumns ? null : PNameFactory.newName(VALUE_COLUMN_FAMILY)) + : sourceColumn.getFamilyName(); + // If we're not retaining the PK columns, then we should switch columns to be nullable + PColumn column = + new ProjectedColumn(PNameFactory.newName(aliasedName), familyName, position++, + sourceColumn.isNullable(), sourceColumnRef, sourceColumn.getColumnQualifierBytes()); + projectedColumns.add(column); + } + EncodedCQCounter cqCounter = EncodedCQCounter.NULL_COUNTER; + if (EncodedColumnsUtil.usesEncodedColumnNames(table)) { + cqCounter = EncodedCQCounter.copy(table.getEncodedCQCounter()); + } + return new PTableImpl.Builder().setType(PTableType.PROJECTED).setTimeStamp(table.getTimeStamp()) + .setIndexDisableTimestamp(table.getIndexDisableTimestamp()) + .setSequenceNumber(table.getSequenceNumber()).setImmutableRows(table.isImmutableRows()) + .setDisableWAL(table.isWALDisabled()).setMultiTenant(table.isMultiTenant()) + .setStoreNulls(table.getStoreNulls()).setViewType(table.getViewType()) + .setViewIndexIdType(table.getviewIndexIdType()).setViewIndexId(table.getViewIndexId()) + .setTransactionProvider(table.getTransactionProvider()) + .setUpdateCacheFrequency(table.getUpdateCacheFrequency()) + .setNamespaceMapped(table.isNamespaceMapped()) + .setAutoPartitionSeqName(table.getAutoPartitionSeqName()) + .setAppendOnlySchema(table.isAppendOnlySchema()) + .setImmutableStorageScheme(table.getImmutableStorageScheme()) + .setQualifierEncodingScheme(table.getEncodingScheme()) + .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT).setEncodedCQCounter(cqCounter) + .setUseStatsForParallelization(table.useStatsForParallelization()) + .setExcludedColumns(ImmutableList.of()).setTenantId(table.getTenantId()) + .setSchemaName(PROJECTED_TABLE_SCHEMA).setTableName(table.getTableName()) + .setPkName(table.getPKName()).setRowKeyOrderOptimizable(table.rowKeyOrderOptimizable()) + .setBucketNum(table.getBucketNum()).setIndexes(Collections.emptyList()) + .setPhysicalNames(ImmutableList.of()).setColumns(projectedColumns).build(); + } - @Override - public Void visit(ColumnParseNode node) throws SQLException { - try { - ColumnRef resolveColumn = context.getResolver().resolveColumn(node.getSchemaName(), node.getTableName(), - node.getName()); - if (!SchemaUtil.isPKColumn(resolveColumn.getColumn())) { - nonPkColumnRefSet.add(resolveColumn); - } - } catch (ColumnNotFoundException e) { - if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(context.getCurrentTable())) { - try { - context.setUncoveredIndex(true); - indexColumnRefSet.add(new IndexUncoveredDataColumnRef(context, - context.getCurrentTable(), node.getName())); - } catch (ColumnFamilyNotFoundException c) { - throw e; - } - } else { - throw e; - } - } - return null; - } + // For extracting column references from single select statement + private static class ColumnRefVisitor extends StatelessTraverseAllParseNodeVisitor { + private final StatementContext context; + private final LinkedHashSet nonPkColumnRefSet; + private final LinkedHashSet indexColumnRefSet; + + private ColumnRefVisitor(StatementContext context) { + this.context = context; + this.nonPkColumnRefSet = new LinkedHashSet(); + this.indexColumnRefSet = new LinkedHashSet(); + } + + @Override + public Void visit(ColumnParseNode node) throws SQLException { + try { + ColumnRef resolveColumn = context.getResolver().resolveColumn(node.getSchemaName(), + node.getTableName(), node.getName()); + if (!SchemaUtil.isPKColumn(resolveColumn.getColumn())) { + nonPkColumnRefSet.add(resolveColumn); + } + } catch (ColumnNotFoundException e) { + if (IndexUtil.shouldIndexBeUsedForUncoveredQuery(context.getCurrentTable())) { + try { + context.setUncoveredIndex(true); + indexColumnRefSet.add( + new IndexUncoveredDataColumnRef(context, context.getCurrentTable(), node.getName())); + } catch (ColumnFamilyNotFoundException c) { + throw e; + } + } else { + throw e; + } + } + return null; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/UnionCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/UnionCompiler.java index 97597aaf8b3..412752aaf12 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/UnionCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/UnionCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +25,6 @@ import java.util.List; import java.util.function.Supplier; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.hbase.HConstants; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; @@ -52,272 +51,248 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.util.SchemaUtil; public class UnionCompiler { - private static final PName UNION_FAMILY_NAME = PNameFactory.newName("unionFamilyName"); - private static final PName UNION_SCHEMA_NAME = PNameFactory.newName("unionSchemaName"); - private static final PName UNION_TABLE_NAME = PNameFactory.newName("unionTableName"); + private static final PName UNION_FAMILY_NAME = PNameFactory.newName("unionFamilyName"); + private static final PName UNION_SCHEMA_NAME = PNameFactory.newName("unionSchemaName"); + private static final PName UNION_TABLE_NAME = PNameFactory.newName("unionTableName"); - private static List checkProjectionNumAndExpressions( - List selectPlans) throws SQLException { - int columnCount = selectPlans.get(0).getProjector().getColumnCount(); - List targetTypes = new ArrayList(columnCount); + private static List + checkProjectionNumAndExpressions(List selectPlans) throws SQLException { + int columnCount = selectPlans.get(0).getProjector().getColumnCount(); + List targetTypes = new ArrayList(columnCount); - for (int i = 0; i < columnCount; i++) { - for (QueryPlan plan : selectPlans) { - if (columnCount !=plan.getProjector().getColumnCount()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode - .SELECT_COLUMN_NUM_IN_UNIONALL_DIFFS).setMessage("1st query has " + columnCount + " columns whereas 2nd " + - "query has " + plan.getProjector().getColumnCount()) - .build().buildException(); - } - ColumnProjector colproj = plan.getProjector().getColumnProjector(i); - if(targetTypes.size() < i+1 ) { - targetTypes.add(new TargetDataExpression(colproj.getExpression())); - } else { - compareExperssions(i, colproj.getExpression(), targetTypes); - } - } + for (int i = 0; i < columnCount; i++) { + for (QueryPlan plan : selectPlans) { + if (columnCount != plan.getProjector().getColumnCount()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.SELECT_COLUMN_NUM_IN_UNIONALL_DIFFS) + .setMessage("1st query has " + columnCount + " columns whereas 2nd " + "query has " + + plan.getProjector().getColumnCount()) + .build().buildException(); } - return targetTypes; - } - - public static TableRef contructSchemaTable(PhoenixStatement statement, List plans, - List selectNodes) throws SQLException { - List targetTypes = checkProjectionNumAndExpressions(plans); - QueryPlan plan = plans.get(0); - List projectedColumns = new ArrayList(); - for (int i = 0; i < plan.getProjector().getColumnCount(); i++) { - ColumnProjector colProj = plan.getProjector().getColumnProjector(i); - String name = selectNodes == null ? colProj.getLabel() : selectNodes.get(i).getAlias(); - PName colName = PNameFactory.newName(name); - PColumnImpl projectedColumn = new PColumnImpl(PNameFactory.newName(name), - UNION_FAMILY_NAME, targetTypes.get(i).getType(), targetTypes.get(i).getMaxLength(), - targetTypes.get(i).getScale(), colProj.getExpression().isNullable(), i, - targetTypes.get(i).getSortOrder(), 500, null, false, - colProj.getExpression().toString(), false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP); - projectedColumns.add(projectedColumn); + ColumnProjector colproj = plan.getProjector().getColumnProjector(i); + if (targetTypes.size() < i + 1) { + targetTypes.add(new TargetDataExpression(colproj.getExpression())); + } else { + compareExperssions(i, colproj.getExpression(), targetTypes); } - Long scn = statement.getConnection().getSCN(); - PTable tempTable = new PTableImpl.Builder() - .setType(PTableType.SUBQUERY) - .setTimeStamp(HConstants.LATEST_TIMESTAMP) - .setIndexDisableTimestamp(0L) - .setSequenceNumber(scn == null ? HConstants.LATEST_TIMESTAMP : scn) - .setImmutableRows(true) - .setDisableWAL(true) - .setMultiTenant(true) - .setStoreNulls(true) - .setUpdateCacheFrequency(0) - .setNamespaceMapped(SchemaUtil.isNamespaceMappingEnabled(PTableType.SUBQUERY, - statement.getConnection().getQueryServices().getProps())) - .setAppendOnlySchema(false) - .setImmutableStorageScheme(ImmutableStorageScheme.ONE_CELL_PER_COLUMN) - .setQualifierEncodingScheme(QualifierEncodingScheme.NON_ENCODED_QUALIFIERS) - .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT) - .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER) - .setUseStatsForParallelization(true) - .setExcludedColumns(ImmutableList.of()) - .setTenantId(statement.getConnection().getTenantId()) - .setSchemaName(UNION_SCHEMA_NAME) - .setTableName(UNION_TABLE_NAME) - .setRowKeyOrderOptimizable(false) - .setIndexes(Collections.emptyList()) - .setPhysicalNames(ImmutableList.of()) - .setColumns(projectedColumns) - .build(); - return new TableRef(null, tempTable, 0, false); + } } + return targetTypes; + } - private static void compareExperssions(int i, Expression expression, - List targetTypes) throws SQLException { - PDataType type = expression.getDataType(); - if (type != null && type.isCoercibleTo(targetTypes.get(i).getType())) { - ; - } - else if (targetTypes.get(i).getType() == null || targetTypes.get(i).getType().isCoercibleTo(type)) { - targetTypes.get(i).setType(type); - } else { - throw new SQLExceptionInfo.Builder(SQLExceptionCode - .SELECT_COLUMN_TYPE_IN_UNIONALL_DIFFS).setMessage("Column # " + i + " is " - + targetTypes.get(i).getType().getSqlTypeName() + " in 1st query where as it is " - + type.getSqlTypeName() + " in 2nd query") - .build().buildException(); - } - Integer len = expression.getMaxLength(); - if (len != null && (targetTypes.get(i).getMaxLength() == null || - len > targetTypes.get(i).getMaxLength())) { - targetTypes.get(i).setMaxLength(len); - } - Integer scale = expression.getScale(); - if (scale != null && (targetTypes.get(i).getScale() == null || - scale > targetTypes.get(i).getScale())){ - targetTypes.get(i).setScale(scale); - } - SortOrder sortOrder = expression.getSortOrder(); - if (sortOrder != null && (!sortOrder.equals(targetTypes.get(i).getSortOrder()))) - targetTypes.get(i).setSortOrder(SortOrder.getDefault()); + public static TableRef contructSchemaTable(PhoenixStatement statement, List plans, + List selectNodes) throws SQLException { + List targetTypes = checkProjectionNumAndExpressions(plans); + QueryPlan plan = plans.get(0); + List projectedColumns = new ArrayList(); + for (int i = 0; i < plan.getProjector().getColumnCount(); i++) { + ColumnProjector colProj = plan.getProjector().getColumnProjector(i); + String name = selectNodes == null ? colProj.getLabel() : selectNodes.get(i).getAlias(); + PName colName = PNameFactory.newName(name); + PColumnImpl projectedColumn = new PColumnImpl(PNameFactory.newName(name), UNION_FAMILY_NAME, + targetTypes.get(i).getType(), targetTypes.get(i).getMaxLength(), + targetTypes.get(i).getScale(), colProj.getExpression().isNullable(), i, + targetTypes.get(i).getSortOrder(), 500, null, false, colProj.getExpression().toString(), + false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP); + projectedColumns.add(projectedColumn); } + Long scn = statement.getConnection().getSCN(); + PTable tempTable = new PTableImpl.Builder().setType(PTableType.SUBQUERY) + .setTimeStamp(HConstants.LATEST_TIMESTAMP).setIndexDisableTimestamp(0L) + .setSequenceNumber(scn == null ? HConstants.LATEST_TIMESTAMP : scn).setImmutableRows(true) + .setDisableWAL(true).setMultiTenant(true).setStoreNulls(true).setUpdateCacheFrequency(0) + .setNamespaceMapped(SchemaUtil.isNamespaceMappingEnabled(PTableType.SUBQUERY, + statement.getConnection().getQueryServices().getProps())) + .setAppendOnlySchema(false) + .setImmutableStorageScheme(ImmutableStorageScheme.ONE_CELL_PER_COLUMN) + .setQualifierEncodingScheme(QualifierEncodingScheme.NON_ENCODED_QUALIFIERS) + .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT) + .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER).setUseStatsForParallelization(true) + .setExcludedColumns(ImmutableList.of()).setTenantId(statement.getConnection().getTenantId()) + .setSchemaName(UNION_SCHEMA_NAME).setTableName(UNION_TABLE_NAME) + .setRowKeyOrderOptimizable(false).setIndexes(Collections.emptyList()) + .setPhysicalNames(ImmutableList.of()).setColumns(projectedColumns).build(); + return new TableRef(null, tempTable, 0, false); + } - private static TupleProjector getTupleProjector(RowProjector rowProj, - List columns) throws SQLException { - Expression[] exprs = new Expression[columns.size()]; - int i = 0; - for (ColumnProjector colProj : rowProj.getColumnProjectors()) { - exprs[i] = CoerceExpression.create(colProj.getExpression(), - columns.get(i).getDataType(), columns.get(i).getSortOrder(), - columns.get(i).getMaxLength()); - i++; - } - return new TupleProjector(exprs); + private static void compareExperssions(int i, Expression expression, + List targetTypes) throws SQLException { + PDataType type = expression.getDataType(); + if (type != null && type.isCoercibleTo(targetTypes.get(i).getType())) { + ; + } else if ( + targetTypes.get(i).getType() == null || targetTypes.get(i).getType().isCoercibleTo(type) + ) { + targetTypes.get(i).setType(type); + } else { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.SELECT_COLUMN_TYPE_IN_UNIONALL_DIFFS) + .setMessage("Column # " + i + " is " + targetTypes.get(i).getType().getSqlTypeName() + + " in 1st query where as it is " + type.getSqlTypeName() + " in 2nd query") + .build().buildException(); } + Integer len = expression.getMaxLength(); + if ( + len != null + && (targetTypes.get(i).getMaxLength() == null || len > targetTypes.get(i).getMaxLength()) + ) { + targetTypes.get(i).setMaxLength(len); + } + Integer scale = expression.getScale(); + if ( + scale != null + && (targetTypes.get(i).getScale() == null || scale > targetTypes.get(i).getScale()) + ) { + targetTypes.get(i).setScale(scale); + } + SortOrder sortOrder = expression.getSortOrder(); + if (sortOrder != null && (!sortOrder.equals(targetTypes.get(i).getSortOrder()))) + targetTypes.get(i).setSortOrder(SortOrder.getDefault()); + } - private static class TargetDataExpression { - private PDataType type; - private Integer maxLength; - private Integer scale; - private SortOrder sortOrder; + private static TupleProjector getTupleProjector(RowProjector rowProj, List columns) + throws SQLException { + Expression[] exprs = new Expression[columns.size()]; + int i = 0; + for (ColumnProjector colProj : rowProj.getColumnProjectors()) { + exprs[i] = CoerceExpression.create(colProj.getExpression(), columns.get(i).getDataType(), + columns.get(i).getSortOrder(), columns.get(i).getMaxLength()); + i++; + } + return new TupleProjector(exprs); + } - public TargetDataExpression(Expression expr) { - this.type = expr.getDataType(); - this.maxLength = expr.getMaxLength(); - this.scale = expr.getScale(); - this.sortOrder = expr.getSortOrder(); - } + private static class TargetDataExpression { + private PDataType type; + private Integer maxLength; + private Integer scale; + private SortOrder sortOrder; - public PDataType getType() { - return type; - } + public TargetDataExpression(Expression expr) { + this.type = expr.getDataType(); + this.maxLength = expr.getMaxLength(); + this.scale = expr.getScale(); + this.sortOrder = expr.getSortOrder(); + } - public void setType(PDataType type) { - this.type = type; - } + public PDataType getType() { + return type; + } - public Integer getMaxLength() { - return maxLength; - } + public void setType(PDataType type) { + this.type = type; + } - public void setMaxLength(Integer maxLength) { - this.maxLength = maxLength; - } + public Integer getMaxLength() { + return maxLength; + } - public Integer getScale() { - return scale; - } + public void setMaxLength(Integer maxLength) { + this.maxLength = maxLength; + } - public void setScale(Integer scale) { - this.scale = scale; - } + public Integer getScale() { + return scale; + } - public SortOrder getSortOrder() { - return sortOrder; - } + public void setScale(Integer scale) { + this.scale = scale; + } - public void setSortOrder(SortOrder sortOrder) { - this.sortOrder = sortOrder; - } + public SortOrder getSortOrder() { + return sortOrder; } - static List convertToTupleProjectionPlan( - List plans, - TableRef tableRef, - StatementContext statementContext) throws SQLException { - List columns = tableRef.getTable().getColumns(); - for (int i = 0; i < plans.size(); i++) { - QueryPlan subPlan = plans.get(i); - TupleProjector projector = getTupleProjector(subPlan.getProjector(), columns); - subPlan = new TupleProjectionPlan(subPlan, projector, statementContext, null); - plans.set(i, subPlan); - } - return plans; + public void setSortOrder(SortOrder sortOrder) { + this.sortOrder = sortOrder; } + } - /** - * If every subquery in {@link UnionPlan} is ordered, and {@link QueryPlan#getOutputOrderBys} - * of each subquery are equal(absolute equality or the same column name is unnecessary, just - * column types are compatible and columns count is same), and at the same time the outer - * query of {@link UnionPlan} has group by or order by, we would further examine whether - * maintaining this order for the entire {@link UnionPlan} can compile out the outer query's - * group by or order by. If it is sure, then {@link UnionPlan} just to perform a simple - * merge on the output of each subquery to ensure the overall order of the union all; - * otherwise, {@link UnionPlan} would not perform any special processing on the output - * of the subqueries. - */ - static void optimizeUnionOrderByIfPossible( - UnionPlan innerUnionPlan, - SelectStatement outerSelectStatement, - Supplier statementContextCreator) throws SQLException { - innerUnionPlan.enableCheckSupportOrderByOptimize(); - if (!innerUnionPlan.isSupportOrderByOptimize()) { - return; - } + static List convertToTupleProjectionPlan(List plans, TableRef tableRef, + StatementContext statementContext) throws SQLException { + List columns = tableRef.getTable().getColumns(); + for (int i = 0; i < plans.size(); i++) { + QueryPlan subPlan = plans.get(i); + TupleProjector projector = getTupleProjector(subPlan.getProjector(), columns); + subPlan = new TupleProjectionPlan(subPlan, projector, statementContext, null); + plans.set(i, subPlan); + } + return plans; + } - if (!isOptimizeUnionOrderByDeserved( - innerUnionPlan, outerSelectStatement, statementContextCreator)) { - // If maintain the order for the entire UnionPlan(by merge on the output of each - // subquery) could not compile out the outer query's group by or order by, we would - // not perform any special processing on the output of the subqueries. - innerUnionPlan.disableSupportOrderByOptimize(); - } + /** + * If every subquery in {@link UnionPlan} is ordered, and {@link QueryPlan#getOutputOrderBys} of + * each subquery are equal(absolute equality or the same column name is unnecessary, just column + * types are compatible and columns count is same), and at the same time the outer query of + * {@link UnionPlan} has group by or order by, we would further examine whether maintaining this + * order for the entire {@link UnionPlan} can compile out the outer query's group by or order by. + * If it is sure, then {@link UnionPlan} just to perform a simple merge on the output of each + * subquery to ensure the overall order of the union all; otherwise, {@link UnionPlan} would not + * perform any special processing on the output of the subqueries. + */ + static void optimizeUnionOrderByIfPossible(UnionPlan innerUnionPlan, + SelectStatement outerSelectStatement, Supplier statementContextCreator) + throws SQLException { + innerUnionPlan.enableCheckSupportOrderByOptimize(); + if (!innerUnionPlan.isSupportOrderByOptimize()) { + return; } - /** - * If group by or order by in outerSelectStatement could be compiled out, - * this optimization is deserved. - */ - private static boolean isOptimizeUnionOrderByDeserved( - UnionPlan innerUnionPlan, - SelectStatement outerSelectStatement, - Supplier statementContextCreator) throws SQLException { - if (!outerSelectStatement.haveGroupBy() && !outerSelectStatement.haveOrderBy()) { - return false; - } + if ( + !isOptimizeUnionOrderByDeserved(innerUnionPlan, outerSelectStatement, statementContextCreator) + ) { + // If maintain the order for the entire UnionPlan(by merge on the output of each + // subquery) could not compile out the outer query's group by or order by, we would + // not perform any special processing on the output of the subqueries. + innerUnionPlan.disableSupportOrderByOptimize(); + } + } - // Just to avoid additional ProjectionCompiler.compile, make the compilation of order by - // as simple as possible. - if (!outerSelectStatement.haveGroupBy() - && outerSelectStatement.getOrderBy().stream().anyMatch(OrderByNode::isIntegerLiteral)) { - return false; - } - StatementContext statementContext = statementContextCreator.get(); - ColumnResolver columResover = innerUnionPlan.getContext().getResolver(); - TableRef tableRef = innerUnionPlan.getTableRef(); - statementContext.setResolver(columResover); - statementContext.setCurrentTable(tableRef); + /** + * If group by or order by in outerSelectStatement could be compiled out, this optimization is + * deserved. + */ + private static boolean isOptimizeUnionOrderByDeserved(UnionPlan innerUnionPlan, + SelectStatement outerSelectStatement, Supplier statementContextCreator) + throws SQLException { + if (!outerSelectStatement.haveGroupBy() && !outerSelectStatement.haveOrderBy()) { + return false; + } - if (outerSelectStatement.haveGroupBy()) { - // For outer query has group by, we check whether groupBy.isOrderPreserving is true. - GroupBy groupBy = GroupByCompiler.compile(statementContext, outerSelectStatement); - outerSelectStatement = - HavingCompiler.rewrite(statementContext, outerSelectStatement, groupBy); - Expression where = WhereCompiler.compile( - statementContext, - outerSelectStatement, - null, - null, - CompiledOffset.EMPTY_COMPILED_OFFSET.getByteOffset()); - groupBy = groupBy.compile(statementContext, innerUnionPlan, where); - return groupBy.isOrderPreserving(); - } + // Just to avoid additional ProjectionCompiler.compile, make the compilation of order by + // as simple as possible. + if ( + !outerSelectStatement.haveGroupBy() + && outerSelectStatement.getOrderBy().stream().anyMatch(OrderByNode::isIntegerLiteral) + ) { + return false; + } + StatementContext statementContext = statementContextCreator.get(); + ColumnResolver columResover = innerUnionPlan.getContext().getResolver(); + TableRef tableRef = innerUnionPlan.getTableRef(); + statementContext.setResolver(columResover); + statementContext.setCurrentTable(tableRef); - assert outerSelectStatement.haveOrderBy(); - Expression where = WhereCompiler.compile( - statementContext, - outerSelectStatement, - null, - null, - CompiledOffset.EMPTY_COMPILED_OFFSET.getByteOffset()); - // For outer query has order by, we check whether orderBy is OrderBy.FWD_ROW_KEY_ORDER_BY. - OrderBy orderBy = OrderByCompiler.compile( - statementContext, - outerSelectStatement, - GroupBy.EMPTY_GROUP_BY, - null, - CompiledOffset.EMPTY_COMPILED_OFFSET, - null, - innerUnionPlan, - where); - return orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY; + if (outerSelectStatement.haveGroupBy()) { + // For outer query has group by, we check whether groupBy.isOrderPreserving is true. + GroupBy groupBy = GroupByCompiler.compile(statementContext, outerSelectStatement); + outerSelectStatement = + HavingCompiler.rewrite(statementContext, outerSelectStatement, groupBy); + Expression where = WhereCompiler.compile(statementContext, outerSelectStatement, null, null, + CompiledOffset.EMPTY_COMPILED_OFFSET.getByteOffset()); + groupBy = groupBy.compile(statementContext, innerUnionPlan, where); + return groupBy.isOrderPreserving(); } + + assert outerSelectStatement.haveOrderBy(); + Expression where = WhereCompiler.compile(statementContext, outerSelectStatement, null, null, + CompiledOffset.EMPTY_COMPILED_OFFSET.getByteOffset()); + // For outer query has order by, we check whether orderBy is OrderBy.FWD_ROW_KEY_ORDER_BY. + OrderBy orderBy = + OrderByCompiler.compile(statementContext, outerSelectStatement, GroupBy.EMPTY_GROUP_BY, null, + CompiledOffset.EMPTY_COMPILED_OFFSET, null, innerUnionPlan, where); + return orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java index 5529ec08af9..2193cc8beb6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,21 @@ */ package org.apache.phoenix.compile; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.phoenix.thirdparty.com.google.common.collect.Lists.newArrayListWithCapacity; + +import java.sql.ParameterMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.BitSet; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.client.Scan; @@ -104,1373 +119,1407 @@ import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; -import java.sql.ParameterMetaData; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.Arrays; -import java.util.BitSet; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; -import static org.apache.phoenix.thirdparty.com.google.common.collect.Lists.newArrayListWithCapacity; - public class UpsertCompiler { - private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes, - PTable table, MultiRowMutationState mutation, PhoenixStatement statement, boolean useServerTimestamp, - IndexMaintainer maintainer, byte[][] viewConstants, byte[] onDupKeyBytes, int numSplColumns, - int maxHBaseClientKeyValueSize) throws SQLException { - long columnValueSize = 0; - Map columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length); - byte[][] pkValues = new byte[table.getPKColumns().size()][]; - // If the table uses salting, the first byte is the salting byte, set to an empty array - // here and we will fill in the byte later in PRowImpl. - if (table.getBucketNum() != null) { - pkValues[0] = new byte[] {0}; - } - for(int i = 0; i < numSplColumns; i++) { - pkValues[i + (table.getBucketNum() != null ? 1 : 0)] = values[i]; - } - Long rowTimestamp = null; // case when the table doesn't have a row timestamp column - RowTimestampColInfo rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp); - for (int i = 0, j = numSplColumns; j < values.length; j++, i++) { - byte[] value = values[j]; - if (value == null) { - continue; - } - PColumn column = table.getColumns().get(columnIndexes[i]); - if (value.length >= maxHBaseClientKeyValueSize && - table.getImmutableStorageScheme() == PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - String columnInfo = getExceedMaxHBaseClientKeyValueAllowanceColumnInfo(table, column.getName().getString()); - throw new MaxPhoenixColumnSizeExceededException(columnInfo, maxHBaseClientKeyValueSize, value.length); - } + private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes, + PTable table, MultiRowMutationState mutation, PhoenixStatement statement, + boolean useServerTimestamp, IndexMaintainer maintainer, byte[][] viewConstants, + byte[] onDupKeyBytes, int numSplColumns, int maxHBaseClientKeyValueSize) throws SQLException { + long columnValueSize = 0; + Map columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length); + byte[][] pkValues = new byte[table.getPKColumns().size()][]; + // If the table uses salting, the first byte is the salting byte, set to an empty array + // here and we will fill in the byte later in PRowImpl. + if (table.getBucketNum() != null) { + pkValues[0] = new byte[] { 0 }; + } + for (int i = 0; i < numSplColumns; i++) { + pkValues[i + (table.getBucketNum() != null ? 1 : 0)] = values[i]; + } + Long rowTimestamp = null; // case when the table doesn't have a row timestamp column + RowTimestampColInfo rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp); + for (int i = 0, j = numSplColumns; j < values.length; j++, i++) { + byte[] value = values[j]; + if (value == null) { + continue; + } + PColumn column = table.getColumns().get(columnIndexes[i]); + if ( + value.length >= maxHBaseClientKeyValueSize + && table.getImmutableStorageScheme() == PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN + ) { + String columnInfo = + getExceedMaxHBaseClientKeyValueAllowanceColumnInfo(table, column.getName().getString()); + throw new MaxPhoenixColumnSizeExceededException(columnInfo, maxHBaseClientKeyValueSize, + value.length); + } - if (SchemaUtil.isPKColumn(column)) { - pkValues[pkSlotIndex[i]] = value; - if (SchemaUtil.getPKPosition(table, column) == table.getRowTimestampColPos()) { - if (!useServerTimestamp) { - PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos()); - rowTimestamp = PLong.INSTANCE.getCodec().decodeLong(value, 0, rowTimestampCol.getSortOrder()); - if (rowTimestamp < 0) { - throw new IllegalDataException("Value of a column designated as ROW_TIMESTAMP cannot be less than zero"); - } - rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp); - } - } - } else { - columnValues.put(column, value); - columnValueSize += (column.getEstimatedSize() + value.length); - } - } - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - table.newKey(ptr, pkValues); - if (table.getIndexType() == IndexType.LOCAL && maintainer != null) { - byte[] rowKey = maintainer.buildDataRowKey(ptr, viewConstants); - HRegionLocation region = - statement.getConnection().getQueryServices() - .getTableRegionLocation(table.getParentName().getBytes(), rowKey); - byte[] regionPrefix = - region.getRegion().getStartKey().length == 0 ? new byte[region - .getRegion().getEndKey().length] : region.getRegion() - .getStartKey(); - if (regionPrefix.length != 0) { - ptr.set(ScanRanges.prefixKey(ptr.get(), 0, ptr.getLength(), regionPrefix, - regionPrefix.length)); - } - } - mutation.put(ptr, new RowMutationState(columnValues, columnValueSize, statement.getConnection().getStatementExecutionCounter(), rowTsColInfo, onDupKeyBytes)); - } - - public static String getExceedMaxHBaseClientKeyValueAllowanceColumnInfo(PTable table, String columnName) { - return String.format("Upsert data to table %s on Column %s exceed max HBase client keyvalue size allowance", - SchemaUtil.getTableName(table.getSchemaName().toString(), table.getTableName().toString()), - columnName); - } - - public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, - RowProjector projector, ResultIterator iterator, int[] columnIndexes, - int[] pkSlotIndexes, boolean useServerTimestamp, - boolean prefixSysColValues) throws SQLException { - PhoenixStatement statement = childContext.getStatement(); - PhoenixConnection connection = statement.getConnection(); - ConnectionQueryServices services = connection.getQueryServices(); - int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); - long maxSizeBytes = - services.getProps().getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); - int maxHBaseClientKeyValueSize = - services.getProps().getInt(QueryServices.HBASE_CLIENT_KEYVALUE_MAXSIZE, - QueryServicesOptions.DEFAULT_HBASE_CLIENT_KEYVALUE_MAXSIZE); - int batchSize = Math.min(connection.getMutateBatchSize(), maxSize); - // we automatically flush the mutations when either auto commit is enabled, or - // the target table is transactional (in that case changes are not visible until we commit) - final boolean autoFlush = connection.getAutoCommit() || tableRef.getTable().isTransactional(); - int sizeOffset = 0; - int numSplColumns = - (tableRef.getTable().isMultiTenant() ? 1 : 0) - + (tableRef.getTable().getViewIndexId() != null ? 1 : 0); - byte[][] values = new byte[columnIndexes.length + numSplColumns][]; - if(prefixSysColValues) { - int i = 0; - if(tableRef.getTable().isMultiTenant()) { - values[i++] = connection.getTenantId().getBytes(); - } - if(tableRef.getTable().getViewIndexId() != null) { - values[i++] = PSmallint.INSTANCE.toBytes(tableRef.getTable().getViewIndexId()); + if (SchemaUtil.isPKColumn(column)) { + pkValues[pkSlotIndex[i]] = value; + if (SchemaUtil.getPKPosition(table, column) == table.getRowTimestampColPos()) { + if (!useServerTimestamp) { + PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos()); + rowTimestamp = + PLong.INSTANCE.getCodec().decodeLong(value, 0, rowTimestampCol.getSortOrder()); + if (rowTimestamp < 0) { + throw new IllegalDataException( + "Value of a column designated as ROW_TIMESTAMP cannot be less than zero"); } + rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp); + } } - int rowCount = 0; - MultiRowMutationState mutation = new MultiRowMutationState(batchSize); - PTable table = tableRef.getTable(); - IndexMaintainer indexMaintainer = null; - byte[][] viewConstants = null; - if (table.getIndexType() == IndexType.LOCAL) { - PTable parentTable = - statement - .getConnection() - .getMetaDataCache() - .getTableRef( - new PTableKey(statement.getConnection().getTenantId(), table - .getParentName().getString())).getTable(); - indexMaintainer = table.getIndexMaintainer(parentTable, connection); - viewConstants = IndexUtil.getViewConstants(parentTable); - } - try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - while (rs.next()) { - for (int i = 0, j = numSplColumns; j < values.length; j++, i++) { - PColumn column = table.getColumns().get(columnIndexes[i]); - byte[] bytes = rs.getBytes(i + 1); - ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes); - Object value = rs.getObject(i + 1); - int rsPrecision = rs.getMetaData().getPrecision(i + 1); - Integer precision = rsPrecision == 0 ? null : rsPrecision; - int rsScale = rs.getMetaData().getScale(i + 1); - Integer scale = rsScale == 0 ? null : rsScale; - // We are guaranteed that the two column will have compatible types, - // as we checked that before. - if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), - SortOrder.getDefault(), precision, - scale, column.getMaxLength(), column.getScale())) { - throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), - column.getScale(), column.getName().getString()); - } - column.getDataType().coerceBytes(ptr, value, column.getDataType(), - precision, scale, SortOrder.getDefault(), - column.getMaxLength(), column.getScale(), column.getSortOrder(), - table.rowKeyOrderOptimizable()); - values[j] = ByteUtil.copyKeyBytesIfNecessary(ptr); - } - setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, - useServerTimestamp, indexMaintainer, viewConstants, null, - numSplColumns, maxHBaseClientKeyValueSize); - rowCount++; - // Commit a batch if auto commit is true and we're at our batch size - if (autoFlush && rowCount % batchSize == 0) { - MutationState state = new MutationState(tableRef, mutation, 0, - maxSize, maxSizeBytes, connection); - connection.getMutationState().join(state); - connection.getMutationState().send(); - mutation.clear(); - } - } + } else { + columnValues.put(column, value); + columnValueSize += (column.getEstimatedSize() + value.length); + } + } + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + table.newKey(ptr, pkValues); + if (table.getIndexType() == IndexType.LOCAL && maintainer != null) { + byte[] rowKey = maintainer.buildDataRowKey(ptr, viewConstants); + HRegionLocation region = statement.getConnection().getQueryServices() + .getTableRegionLocation(table.getParentName().getBytes(), rowKey); + byte[] regionPrefix = region.getRegion().getStartKey().length == 0 + ? new byte[region.getRegion().getEndKey().length] + : region.getRegion().getStartKey(); + if (regionPrefix.length != 0) { + ptr.set( + ScanRanges.prefixKey(ptr.get(), 0, ptr.getLength(), regionPrefix, regionPrefix.length)); + } + } + mutation.put(ptr, new RowMutationState(columnValues, columnValueSize, + statement.getConnection().getStatementExecutionCounter(), rowTsColInfo, onDupKeyBytes)); + } - if (autoFlush) { - // If auto commit is true, this last batch will be committed upon return - sizeOffset = rowCount / batchSize * batchSize; - } - return new MutationState(tableRef, mutation, sizeOffset, maxSize, - maxSizeBytes, connection); - } + public static String getExceedMaxHBaseClientKeyValueAllowanceColumnInfo(PTable table, + String columnName) { + return String.format( + "Upsert data to table %s on Column %s exceed max HBase client keyvalue size allowance", + SchemaUtil.getTableName(table.getSchemaName().toString(), table.getTableName().toString()), + columnName); + } + + public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, + RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, + boolean useServerTimestamp, boolean prefixSysColValues) throws SQLException { + PhoenixStatement statement = childContext.getStatement(); + PhoenixConnection connection = statement.getConnection(); + ConnectionQueryServices services = connection.getQueryServices(); + int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); + long maxSizeBytes = + services.getProps().getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); + int maxHBaseClientKeyValueSize = + services.getProps().getInt(QueryServices.HBASE_CLIENT_KEYVALUE_MAXSIZE, + QueryServicesOptions.DEFAULT_HBASE_CLIENT_KEYVALUE_MAXSIZE); + int batchSize = Math.min(connection.getMutateBatchSize(), maxSize); + // we automatically flush the mutations when either auto commit is enabled, or + // the target table is transactional (in that case changes are not visible until we commit) + final boolean autoFlush = connection.getAutoCommit() || tableRef.getTable().isTransactional(); + int sizeOffset = 0; + int numSplColumns = (tableRef.getTable().isMultiTenant() ? 1 : 0) + + (tableRef.getTable().getViewIndexId() != null ? 1 : 0); + byte[][] values = new byte[columnIndexes.length + numSplColumns][]; + if (prefixSysColValues) { + int i = 0; + if (tableRef.getTable().isMultiTenant()) { + values[i++] = connection.getTenantId().getBytes(); + } + if (tableRef.getTable().getViewIndexId() != null) { + values[i++] = PSmallint.INSTANCE.toBytes(tableRef.getTable().getViewIndexId()); + } + } + int rowCount = 0; + MultiRowMutationState mutation = new MultiRowMutationState(batchSize); + PTable table = tableRef.getTable(); + IndexMaintainer indexMaintainer = null; + byte[][] viewConstants = null; + if (table.getIndexType() == IndexType.LOCAL) { + PTable parentTable = statement.getConnection().getMetaDataCache() + .getTableRef( + new PTableKey(statement.getConnection().getTenantId(), table.getParentName().getString())) + .getTable(); + indexMaintainer = table.getIndexMaintainer(parentTable, connection); + viewConstants = IndexUtil.getViewConstants(parentTable); } + try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + while (rs.next()) { + for (int i = 0, j = numSplColumns; j < values.length; j++, i++) { + PColumn column = table.getColumns().get(columnIndexes[i]); + byte[] bytes = rs.getBytes(i + 1); + ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes); + Object value = rs.getObject(i + 1); + int rsPrecision = rs.getMetaData().getPrecision(i + 1); + Integer precision = rsPrecision == 0 ? null : rsPrecision; + int rsScale = rs.getMetaData().getScale(i + 1); + Integer scale = rsScale == 0 ? null : rsScale; + // We are guaranteed that the two column will have compatible types, + // as we checked that before. + if ( + !column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), + SortOrder.getDefault(), precision, scale, column.getMaxLength(), column.getScale()) + ) { + throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), + column.getScale(), column.getName().getString()); + } + column.getDataType().coerceBytes(ptr, value, column.getDataType(), precision, scale, + SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder(), + table.rowKeyOrderOptimizable()); + values[j] = ByteUtil.copyKeyBytesIfNecessary(ptr); + } + setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, + useServerTimestamp, indexMaintainer, viewConstants, null, numSplColumns, + maxHBaseClientKeyValueSize); + rowCount++; + // Commit a batch if auto commit is true and we're at our batch size + if (autoFlush && rowCount % batchSize == 0) { + MutationState state = + new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection); + connection.getMutationState().join(state); + connection.getMutationState().send(); + mutation.clear(); + } + } - private static class UpsertingParallelIteratorFactory extends MutatingParallelIteratorFactory { - private RowProjector projector; - private int[] columnIndexes; - private int[] pkSlotIndexes; - private final TableRef tableRef; - private final boolean useSeverTimestamp; + if (autoFlush) { + // If auto commit is true, this last batch will be committed upon return + sizeOffset = rowCount / batchSize * batchSize; + } + return new MutationState(tableRef, mutation, sizeOffset, maxSize, maxSizeBytes, connection); + } + } - private UpsertingParallelIteratorFactory (PhoenixConnection connection, TableRef tableRef, boolean useServerTimestamp) { - super(connection); - this.tableRef = tableRef; - this.useSeverTimestamp = useServerTimestamp; - } + private static class UpsertingParallelIteratorFactory extends MutatingParallelIteratorFactory { + private RowProjector projector; + private int[] columnIndexes; + private int[] pkSlotIndexes; + private final TableRef tableRef; + private final boolean useSeverTimestamp; - @Override - protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, PhoenixConnection connection) throws SQLException { - if (parentContext.getSequenceManager().getSequenceCount() > 0) { - throw new IllegalStateException("Cannot pipeline upsert when sequence is referenced"); - } - PhoenixStatement statement = new PhoenixStatement(connection); - /* - * We don't want to collect any read metrics within the child context. This is because any read metrics that - * need to be captured are already getting collected in the parent statement context enclosed in the result - * iterator being used for reading rows out. - */ - StatementContext childContext = new StatementContext(statement, false); - // Clone the row projector as it's not thread safe and would be used simultaneously by - // multiple threads otherwise. - return upsertSelect(childContext, tableRef, projector.cloneIfNecessary(), iterator, - columnIndexes, pkSlotIndexes, useSeverTimestamp, false); - } - - public void setRowProjector(RowProjector projector) { - this.projector = projector; - } - public void setColumnIndexes(int[] columnIndexes) { - this.columnIndexes = columnIndexes; - } - public void setPkSlotIndexes(int[] pkSlotIndexes) { - this.pkSlotIndexes = pkSlotIndexes; - } + private UpsertingParallelIteratorFactory(PhoenixConnection connection, TableRef tableRef, + boolean useServerTimestamp) { + super(connection); + this.tableRef = tableRef; + this.useSeverTimestamp = useServerTimestamp; } - - private final PhoenixStatement statement; - private final Operation operation; - - public UpsertCompiler(PhoenixStatement statement, Operation operation) { - this.statement = statement; - this.operation = operation; - } - - private static LiteralParseNode getNodeForRowTimestampColumn(PColumn col) { - PDataType type = col.getDataType(); - long dummyValue = 0L; - if (type.isCoercibleTo(PTimestamp.INSTANCE)) { - return new LiteralParseNode(new Timestamp(dummyValue), PTimestamp.INSTANCE); - } else if (type == PLong.INSTANCE || type == PUnsignedLong.INSTANCE) { - return new LiteralParseNode(dummyValue, PLong.INSTANCE); - } - throw new IllegalArgumentException(); - } - - public MutationPlan compile(UpsertStatement upsert) throws SQLException { - final PhoenixConnection connection = statement.getConnection(); - ConnectionQueryServices services = connection.getQueryServices(); - final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); - final long maxSizeBytes = services.getProps() - .getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); - List columnNodes = upsert.getColumns(); - TableRef tableRefToBe = null; - PTable table = null; - Set addViewColumnsToBe = Collections.emptySet(); - Set overlapViewColumnsToBe = Collections.emptySet(); - List allColumnsToBe = Collections.emptyList(); - boolean isTenantSpecific = false; - boolean isSharedViewIndex = false; - String tenantIdStr = null; - ColumnResolver resolver = null; - int[] columnIndexesToBe; - int nColumnsToSet = 0; - int[] pkSlotIndexesToBe; - List valueNodes = upsert.getValues(); - List targetColumns; - NamedTableNode tableNode = upsert.getTable(); - String tableName = tableNode.getName().getTableName(); - String schemaName = tableNode.getName().getSchemaName(); - QueryPlan queryPlanToBe = null; - int nValuesToSet; - boolean sameTable = false; - boolean runOnServer = false; - boolean serverUpsertSelectEnabled = - services.getProps().getBoolean(QueryServices.ENABLE_SERVER_UPSERT_SELECT, - QueryServicesOptions.DEFAULT_ENABLE_SERVER_UPSERT_SELECT); - boolean allowServerMutations = - services.getProps().getBoolean(QueryServices.ENABLE_SERVER_SIDE_UPSERT_MUTATIONS, - QueryServicesOptions.DEFAULT_ENABLE_SERVER_SIDE_UPSERT_MUTATIONS); - UpsertingParallelIteratorFactory parallelIteratorFactoryToBe = null; - boolean useServerTimestampToBe = false; - - - resolver = FromCompiler.getResolverForMutation(upsert, connection); - tableRefToBe = resolver.getTables().get(0); - table = tableRefToBe.getTable(); - // Cannot update: - // - read-only VIEW - // - transactional table with a connection having an SCN - // - table with indexes and SCN set - // - tables with ROW_TIMESTAMP columns - if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) { - throw new ReadOnlyTableException(schemaName,tableName); - } else if (connection.isBuildingIndex() && table.getType() != PTableType.INDEX) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_INDEX_UPDATABLE_AT_SCN) - .setSchemaName(schemaName) - .setTableName(tableName) - .build().buildException(); - } else if (table.isTransactional() && connection.getSCN() != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode - .CANNOT_SPECIFY_SCN_FOR_TXN_TABLE) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } else if (connection.getSCN() != null && !table.getIndexes().isEmpty() - && !connection.isRunningUpgrade() && !connection.isBuildingIndex()) { - throw new SQLExceptionInfo - .Builder(SQLExceptionCode - .CANNOT_UPSERT_WITH_SCN_FOR_TABLE_WITH_INDEXES) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } else if(connection.getSCN() != null && !connection.isRunningUpgrade() - && !connection.isBuildingIndex() && table.getRowTimestampColPos() >= 0) { - throw new SQLExceptionInfo - .Builder(SQLExceptionCode - .CANNOT_UPSERT_WITH_SCN_FOR_ROW_TIMESTAMP_COLUMN) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } - boolean isSalted = table.getBucketNum() != null; - isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null; - isSharedViewIndex = table.getViewIndexId() != null; - tenantIdStr = isTenantSpecific ? connection.getTenantId().getString() : null; - int posOffset = isSalted ? 1 : 0; - // Setup array of column indexes parallel to values that are going to be set - allColumnsToBe = table.getColumns(); - - nColumnsToSet = 0; - if (table.getViewType() == ViewType.UPDATABLE) { - addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumnsToBe.size()); - for (PColumn column : allColumnsToBe) { - if (column.getViewConstant() != null) { - addViewColumnsToBe.add(column); - } - } - } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - // Allow full row upsert if no columns or only dynamic ones are specified and values count match - if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) { - nColumnsToSet = allColumnsToBe.size() - posOffset; - columnIndexesToBe = new int[nColumnsToSet]; - pkSlotIndexesToBe = new int[columnIndexesToBe.length]; - targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); - targetColumns.addAll(Collections.nCopies(columnIndexesToBe.length, null)); - int minPKPos = 0; - if (isSharedViewIndex) { - PColumn indexIdColumn = table.getPKColumns().get(minPKPos); - columnIndexesToBe[minPKPos] = indexIdColumn.getPosition(); - targetColumns.set(minPKPos, indexIdColumn); - minPKPos++; - } - if (isTenantSpecific) { - PColumn tenantColumn = table.getPKColumns().get(minPKPos); - columnIndexesToBe[minPKPos] = tenantColumn.getPosition(); - targetColumns.set(minPKPos, tenantColumn); - minPKPos++; - } - for (int i = posOffset, j = 0; i < allColumnsToBe.size(); i++) { - PColumn column = allColumnsToBe.get(i); - if (SchemaUtil.isPKColumn(column)) { - pkSlotIndexesToBe[i-posOffset] = j + posOffset; - if (j++ < minPKPos) { // Skip, as it's already been set above - continue; - } - minPKPos = 0; - } - columnIndexesToBe[i-posOffset+minPKPos] = i; - targetColumns.set(i-posOffset+minPKPos, column); - } - if (!addViewColumnsToBe.isEmpty()) { - // All view columns overlap in this case - overlapViewColumnsToBe = addViewColumnsToBe; - addViewColumnsToBe = Collections.emptySet(); - } - } else { - // Size for worse case - int numColsInUpsert = columnNodes.size(); - nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + + (isSharedViewIndex ? 1 : 0); - columnIndexesToBe = new int[nColumnsToSet]; - pkSlotIndexesToBe = new int[columnIndexesToBe.length]; - targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); - targetColumns.addAll(Collections.nCopies(columnIndexesToBe.length, null)); - Arrays.fill(columnIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced - Arrays.fill(pkSlotIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced - BitSet columnsBeingSet = new BitSet(table.getColumns().size()); - int i = 0; - if (isSharedViewIndex) { - PColumn indexIdColumn = table.getPKColumns().get(i + posOffset); - columnsBeingSet.set(columnIndexesToBe[i] = indexIdColumn.getPosition()); - pkSlotIndexesToBe[i] = i + posOffset; - targetColumns.set(i, indexIdColumn); - i++; - } - // Add tenant column directly, as we don't want to resolve it as this will fail - if (isTenantSpecific) { - PColumn tenantColumn = table.getPKColumns().get(i + posOffset); - columnsBeingSet.set(columnIndexesToBe[i] = tenantColumn.getPosition()); - pkSlotIndexesToBe[i] = i + posOffset; - targetColumns.set(i, tenantColumn); - i++; - } - for (ColumnName colName : columnNodes) { - ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()); - PColumn column = ref.getColumn(); - if (IndexUtil.getViewConstantValue(column, ptr)) { - if (overlapViewColumnsToBe.isEmpty()) { - overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size()); - } - nColumnsToSet--; - overlapViewColumnsToBe.add(column); - addViewColumnsToBe.remove(column); - } - columnsBeingSet.set(columnIndexesToBe[i] = ref.getColumnPosition()); - targetColumns.set(i, column); - if (SchemaUtil.isPKColumn(column)) { - pkSlotIndexesToBe[i] = ref.getPKSlotPosition(); - } - i++; - } - for (PColumn column : addViewColumnsToBe) { - columnsBeingSet.set(columnIndexesToBe[i] = column.getPosition()); - targetColumns.set(i, column); - if (SchemaUtil.isPKColumn(column)) { - pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column); - } - i++; - } - // If a table has rowtimestamp col, then we always set it. - useServerTimestampToBe = table.getRowTimestampColPos() != -1 && !isRowTimestampSet(pkSlotIndexesToBe, table); - if (useServerTimestampToBe) { - PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos()); - // Need to resize columnIndexesToBe and pkSlotIndexesToBe to include this extra column. - columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, columnIndexesToBe.length + 1); - pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, pkSlotIndexesToBe.length + 1); - columnsBeingSet.set(columnIndexesToBe[i] = rowTimestampCol.getPosition()); - pkSlotIndexesToBe[i] = table.getRowTimestampColPos(); - targetColumns.add(rowTimestampCol); - if (valueNodes != null && !valueNodes.isEmpty()) { - valueNodes.add(getNodeForRowTimestampColumn(rowTimestampCol)); - } - nColumnsToSet++; - } - for (i = posOffset; i < table.getColumns().size(); i++) { - PColumn column = table.getColumns().get(i); - if (!columnsBeingSet.get(i) && !column.isNullable() && column.getExpressionStr() == null) { - throw new ConstraintViolationException(table.getName().getString() + "." - + SchemaUtil.getColumnDisplayName(column) + " may not be null"); - } - } - } - boolean isAutoCommit = connection.getAutoCommit(); - if (valueNodes == null) { - SelectStatement select = upsert.getSelect(); - assert(select != null); - select = SubselectRewriter.flatten(select, connection); - ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection, false, upsert.getTable().getName()); - select = StatementNormalizer.normalize(select, selectResolver); - select = prependTenantAndViewConstants(table, select, tenantIdStr, addViewColumnsToBe, useServerTimestampToBe); - SelectStatement transformedSelect = SubqueryRewriter.transform(select, selectResolver, connection); - if (transformedSelect != select) { - selectResolver = FromCompiler.getResolverForQuery(transformedSelect, connection, false, upsert.getTable().getName()); - select = StatementNormalizer.normalize(transformedSelect, selectResolver); - } - sameTable = !select.isJoin() - && tableRefToBe.equals(selectResolver.getTables().get(0)); - /* We can run the upsert in a coprocessor if: - * 1) from has only 1 table or server UPSERT SELECT is enabled - * 2) the select query isn't doing aggregation (which requires a client-side final merge) - * 3) autoCommit is on - * 4) the table is not immutable with indexes, as the client is the one that figures out the additional - * puts for index tables. - * 5) no limit clause, as the limit clause requires client-side post processing - * 6) no sequences, as sequences imply that the order of upsert must match the order of - * selection. TODO: change this and only force client side if there's a ORDER BY on the sequence value - * Otherwise, run the query to pull the data from the server - * and populate the MutationState (upto a limit). - */ - if (! (select.isAggregate() || select.isDistinct() || select.getLimit() != null || select.hasSequence()) ) { - // We can pipeline the upsert select instead of spooling everything to disk first, - // if we don't have any post processing that's required. - parallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRefToBe, useServerTimestampToBe); - // If we're in the else, then it's not an aggregate, distinct, limited, or sequence using query, - // so we might be able to run it entirely on the server side. - // region space managed by region servers. So we bail out on executing on server side. - // Disable running upsert select on server side if a table has global mutable secondary indexes on it - boolean hasGlobalMutableIndexes = SchemaUtil.hasGlobalIndex(table) && !table.isImmutableRows(); - boolean hasWhereSubquery = select.getWhere() != null && select.getWhere().hasSubquery(); - runOnServer = (sameTable || (serverUpsertSelectEnabled && !hasGlobalMutableIndexes)) && isAutoCommit - // We can run the upsert select for initial index population on server side for transactional - // tables since the writes do not need to be done transactionally, since we gate the index - // usage on successfully writing all data rows. - && (!table.isTransactional() || table.getType() == PTableType.INDEX) - && !(table.isImmutableRows() && !table.getIndexes().isEmpty()) - && !select.isJoin() && !hasWhereSubquery && table.getRowTimestampColPos() == -1; - } - runOnServer &= allowServerMutations; - // If we may be able to run on the server, add a hint that favors using the data table - // if all else is equal. - // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing, - // as this would disallow running on the server. We currently use the row projector we - // get back to figure this out. - HintNode hint = upsert.getHint(); - if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) { - hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE); - } - select = SelectStatement.create(select, hint); - // Pass scan through if same table in upsert and select so that projection is computed correctly - // Use optimizer to choose the best plan - QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns, parallelIteratorFactoryToBe, new SequenceManager(statement), true, false, null); - queryPlanToBe = compiler.compile(); - - if (sameTable) { - // in the UPSERT INTO X ... SELECT FROM X case enforce the source tableRef's TS - // as max TS, so that the query can safely restarted and still work of a snapshot - // (so it won't see its own data in case of concurrent splits) - // see PHOENIX-4849 - long serverTime = selectResolver.getTables().get(0).getTimeStamp(); - if (serverTime == QueryConstants.UNSET_TIMESTAMP) { - // if this is the first time this table is resolved the ref's current time might not be defined, yet - // in that case force an RPC to get the server time - serverTime = new MetaDataClient(connection).getCurrentTime(schemaName, tableName); - } - Scan scan = queryPlanToBe.getContext().getScan(); - ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), serverTime); - } - // This is post-fix: if the tableRef is a projected table, this means there are post-processing - // steps and parallelIteratorFactory did not take effect. - if (queryPlanToBe.getTableRef().getTable().getType() == PTableType.PROJECTED || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY) { - parallelIteratorFactoryToBe = null; - } - nValuesToSet = queryPlanToBe.getProjector().getColumnCount(); - // Cannot auto commit if doing aggregation or topN or salted - // Salted causes problems because the row may end up living on a different region - } else { - nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + (isSharedViewIndex ? 1 : 0); - } - // Resize down to allow a subset of columns to be specifiable - if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) { - nColumnsToSet = nValuesToSet; - columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet); - pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet); - for (int i = posOffset + nValuesToSet; i < table.getColumns().size(); i++) { - PColumn column = table.getColumns().get(i); - if (!column.isNullable() && column.getExpressionStr() == null) { - throw new ConstraintViolationException(table.getName().getString() + "." - + SchemaUtil.getColumnDisplayName(column) + " may not be null"); - } + + @Override + protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, + PhoenixConnection connection) throws SQLException { + if (parentContext.getSequenceManager().getSequenceCount() > 0) { + throw new IllegalStateException("Cannot pipeline upsert when sequence is referenced"); + } + PhoenixStatement statement = new PhoenixStatement(connection); + /* + * We don't want to collect any read metrics within the child context. This is because any + * read metrics that need to be captured are already getting collected in the parent statement + * context enclosed in the result iterator being used for reading rows out. + */ + StatementContext childContext = new StatementContext(statement, false); + // Clone the row projector as it's not thread safe and would be used simultaneously by + // multiple threads otherwise. + return upsertSelect(childContext, tableRef, projector.cloneIfNecessary(), iterator, + columnIndexes, pkSlotIndexes, useSeverTimestamp, false); + } + + public void setRowProjector(RowProjector projector) { + this.projector = projector; + } + + public void setColumnIndexes(int[] columnIndexes) { + this.columnIndexes = columnIndexes; + } + + public void setPkSlotIndexes(int[] pkSlotIndexes) { + this.pkSlotIndexes = pkSlotIndexes; + } + } + + private final PhoenixStatement statement; + private final Operation operation; + + public UpsertCompiler(PhoenixStatement statement, Operation operation) { + this.statement = statement; + this.operation = operation; + } + + private static LiteralParseNode getNodeForRowTimestampColumn(PColumn col) { + PDataType type = col.getDataType(); + long dummyValue = 0L; + if (type.isCoercibleTo(PTimestamp.INSTANCE)) { + return new LiteralParseNode(new Timestamp(dummyValue), PTimestamp.INSTANCE); + } else if (type == PLong.INSTANCE || type == PUnsignedLong.INSTANCE) { + return new LiteralParseNode(dummyValue, PLong.INSTANCE); + } + throw new IllegalArgumentException(); + } + + public MutationPlan compile(UpsertStatement upsert) throws SQLException { + final PhoenixConnection connection = statement.getConnection(); + ConnectionQueryServices services = connection.getQueryServices(); + final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); + final long maxSizeBytes = + services.getProps().getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); + List columnNodes = upsert.getColumns(); + TableRef tableRefToBe = null; + PTable table = null; + Set addViewColumnsToBe = Collections.emptySet(); + Set overlapViewColumnsToBe = Collections.emptySet(); + List allColumnsToBe = Collections.emptyList(); + boolean isTenantSpecific = false; + boolean isSharedViewIndex = false; + String tenantIdStr = null; + ColumnResolver resolver = null; + int[] columnIndexesToBe; + int nColumnsToSet = 0; + int[] pkSlotIndexesToBe; + List valueNodes = upsert.getValues(); + List targetColumns; + NamedTableNode tableNode = upsert.getTable(); + String tableName = tableNode.getName().getTableName(); + String schemaName = tableNode.getName().getSchemaName(); + QueryPlan queryPlanToBe = null; + int nValuesToSet; + boolean sameTable = false; + boolean runOnServer = false; + boolean serverUpsertSelectEnabled = + services.getProps().getBoolean(QueryServices.ENABLE_SERVER_UPSERT_SELECT, + QueryServicesOptions.DEFAULT_ENABLE_SERVER_UPSERT_SELECT); + boolean allowServerMutations = + services.getProps().getBoolean(QueryServices.ENABLE_SERVER_SIDE_UPSERT_MUTATIONS, + QueryServicesOptions.DEFAULT_ENABLE_SERVER_SIDE_UPSERT_MUTATIONS); + UpsertingParallelIteratorFactory parallelIteratorFactoryToBe = null; + boolean useServerTimestampToBe = false; + + resolver = FromCompiler.getResolverForMutation(upsert, connection); + tableRefToBe = resolver.getTables().get(0); + table = tableRefToBe.getTable(); + // Cannot update: + // - read-only VIEW + // - transactional table with a connection having an SCN + // - table with indexes and SCN set + // - tables with ROW_TIMESTAMP columns + if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) { + throw new ReadOnlyTableException(schemaName, tableName); + } else if (connection.isBuildingIndex() && table.getType() != PTableType.INDEX) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_INDEX_UPDATABLE_AT_SCN) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } else if (table.isTransactional() && connection.getSCN() != null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } else if ( + connection.getSCN() != null && !table.getIndexes().isEmpty() && !connection.isRunningUpgrade() + && !connection.isBuildingIndex() + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_UPSERT_WITH_SCN_FOR_TABLE_WITH_INDEXES).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } else if ( + connection.getSCN() != null && !connection.isRunningUpgrade() && !connection.isBuildingIndex() + && table.getRowTimestampColPos() >= 0 + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_UPSERT_WITH_SCN_FOR_ROW_TIMESTAMP_COLUMN).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + boolean isSalted = table.getBucketNum() != null; + isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null; + isSharedViewIndex = table.getViewIndexId() != null; + tenantIdStr = isTenantSpecific ? connection.getTenantId().getString() : null; + int posOffset = isSalted ? 1 : 0; + // Setup array of column indexes parallel to values that are going to be set + allColumnsToBe = table.getColumns(); + + nColumnsToSet = 0; + if (table.getViewType() == ViewType.UPDATABLE) { + addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumnsToBe.size()); + for (PColumn column : allColumnsToBe) { + if (column.getViewConstant() != null) { + addViewColumnsToBe.add(column); + } + } + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + // Allow full row upsert if no columns or only dynamic ones are specified and values count match + if ( + columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size() + ) { + nColumnsToSet = allColumnsToBe.size() - posOffset; + columnIndexesToBe = new int[nColumnsToSet]; + pkSlotIndexesToBe = new int[columnIndexesToBe.length]; + targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); + targetColumns.addAll(Collections. nCopies(columnIndexesToBe.length, null)); + int minPKPos = 0; + if (isSharedViewIndex) { + PColumn indexIdColumn = table.getPKColumns().get(minPKPos); + columnIndexesToBe[minPKPos] = indexIdColumn.getPosition(); + targetColumns.set(minPKPos, indexIdColumn); + minPKPos++; + } + if (isTenantSpecific) { + PColumn tenantColumn = table.getPKColumns().get(minPKPos); + columnIndexesToBe[minPKPos] = tenantColumn.getPosition(); + targetColumns.set(minPKPos, tenantColumn); + minPKPos++; + } + for (int i = posOffset, j = 0; i < allColumnsToBe.size(); i++) { + PColumn column = allColumnsToBe.get(i); + if (SchemaUtil.isPKColumn(column)) { + pkSlotIndexesToBe[i - posOffset] = j + posOffset; + if (j++ < minPKPos) { // Skip, as it's already been set above + continue; + } + minPKPos = 0; + } + columnIndexesToBe[i - posOffset + minPKPos] = i; + targetColumns.set(i - posOffset + minPKPos, column); + } + if (!addViewColumnsToBe.isEmpty()) { + // All view columns overlap in this case + overlapViewColumnsToBe = addViewColumnsToBe; + addViewColumnsToBe = Collections.emptySet(); + } + } else { + // Size for worse case + int numColsInUpsert = columnNodes.size(); + nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + + +(isSharedViewIndex ? 1 : 0); + columnIndexesToBe = new int[nColumnsToSet]; + pkSlotIndexesToBe = new int[columnIndexesToBe.length]; + targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); + targetColumns.addAll(Collections. nCopies(columnIndexesToBe.length, null)); + Arrays.fill(columnIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's + // not replaced + Arrays.fill(pkSlotIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's + // not replaced + BitSet columnsBeingSet = new BitSet(table.getColumns().size()); + int i = 0; + if (isSharedViewIndex) { + PColumn indexIdColumn = table.getPKColumns().get(i + posOffset); + columnsBeingSet.set(columnIndexesToBe[i] = indexIdColumn.getPosition()); + pkSlotIndexesToBe[i] = i + posOffset; + targetColumns.set(i, indexIdColumn); + i++; + } + // Add tenant column directly, as we don't want to resolve it as this will fail + if (isTenantSpecific) { + PColumn tenantColumn = table.getPKColumns().get(i + posOffset); + columnsBeingSet.set(columnIndexesToBe[i] = tenantColumn.getPosition()); + pkSlotIndexesToBe[i] = i + posOffset; + targetColumns.set(i, tenantColumn); + i++; + } + for (ColumnName colName : columnNodes) { + ColumnRef ref = + resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()); + PColumn column = ref.getColumn(); + if (IndexUtil.getViewConstantValue(column, ptr)) { + if (overlapViewColumnsToBe.isEmpty()) { + overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size()); + } + nColumnsToSet--; + overlapViewColumnsToBe.add(column); + addViewColumnsToBe.remove(column); + } + columnsBeingSet.set(columnIndexesToBe[i] = ref.getColumnPosition()); + targetColumns.set(i, column); + if (SchemaUtil.isPKColumn(column)) { + pkSlotIndexesToBe[i] = ref.getPKSlotPosition(); + } + i++; + } + for (PColumn column : addViewColumnsToBe) { + columnsBeingSet.set(columnIndexesToBe[i] = column.getPosition()); + targetColumns.set(i, column); + if (SchemaUtil.isPKColumn(column)) { + pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column); + } + i++; + } + // If a table has rowtimestamp col, then we always set it. + useServerTimestampToBe = + table.getRowTimestampColPos() != -1 && !isRowTimestampSet(pkSlotIndexesToBe, table); + if (useServerTimestampToBe) { + PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos()); + // Need to resize columnIndexesToBe and pkSlotIndexesToBe to include this extra column. + columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, columnIndexesToBe.length + 1); + pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, pkSlotIndexesToBe.length + 1); + columnsBeingSet.set(columnIndexesToBe[i] = rowTimestampCol.getPosition()); + pkSlotIndexesToBe[i] = table.getRowTimestampColPos(); + targetColumns.add(rowTimestampCol); + if (valueNodes != null && !valueNodes.isEmpty()) { + valueNodes.add(getNodeForRowTimestampColumn(rowTimestampCol)); + } + nColumnsToSet++; + } + for (i = posOffset; i < table.getColumns().size(); i++) { + PColumn column = table.getColumns().get(i); + if (!columnsBeingSet.get(i) && !column.isNullable() && column.getExpressionStr() == null) { + throw new ConstraintViolationException(table.getName().getString() + "." + + SchemaUtil.getColumnDisplayName(column) + " may not be null"); + } + } + } + boolean isAutoCommit = connection.getAutoCommit(); + if (valueNodes == null) { + SelectStatement select = upsert.getSelect(); + assert (select != null); + select = SubselectRewriter.flatten(select, connection); + ColumnResolver selectResolver = + FromCompiler.getResolverForQuery(select, connection, false, upsert.getTable().getName()); + select = StatementNormalizer.normalize(select, selectResolver); + select = prependTenantAndViewConstants(table, select, tenantIdStr, addViewColumnsToBe, + useServerTimestampToBe); + SelectStatement transformedSelect = + SubqueryRewriter.transform(select, selectResolver, connection); + if (transformedSelect != select) { + selectResolver = FromCompiler.getResolverForQuery(transformedSelect, connection, false, + upsert.getTable().getName()); + select = StatementNormalizer.normalize(transformedSelect, selectResolver); + } + sameTable = !select.isJoin() && tableRefToBe.equals(selectResolver.getTables().get(0)); + /* + * We can run the upsert in a coprocessor if: 1) from has only 1 table or server UPSERT SELECT + * is enabled 2) the select query isn't doing aggregation (which requires a client-side final + * merge) 3) autoCommit is on 4) the table is not immutable with indexes, as the client is the + * one that figures out the additional puts for index tables. 5) no limit clause, as the limit + * clause requires client-side post processing 6) no sequences, as sequences imply that the + * order of upsert must match the order of selection. TODO: change this and only force client + * side if there's a ORDER BY on the sequence value Otherwise, run the query to pull the data + * from the server and populate the MutationState (upto a limit). + */ + if ( + !(select.isAggregate() || select.isDistinct() || select.getLimit() != null + || select.hasSequence()) + ) { + // We can pipeline the upsert select instead of spooling everything to disk first, + // if we don't have any post processing that's required. + parallelIteratorFactoryToBe = + new UpsertingParallelIteratorFactory(connection, tableRefToBe, useServerTimestampToBe); + // If we're in the else, then it's not an aggregate, distinct, limited, or sequence using + // query, + // so we might be able to run it entirely on the server side. + // region space managed by region servers. So we bail out on executing on server side. + // Disable running upsert select on server side if a table has global mutable secondary + // indexes on it + boolean hasGlobalMutableIndexes = + SchemaUtil.hasGlobalIndex(table) && !table.isImmutableRows(); + boolean hasWhereSubquery = select.getWhere() != null && select.getWhere().hasSubquery(); + runOnServer = + (sameTable || (serverUpsertSelectEnabled && !hasGlobalMutableIndexes)) && isAutoCommit + // We can run the upsert select for initial index population on server side for + // transactional + // tables since the writes do not need to be done transactionally, since we gate the index + // usage on successfully writing all data rows. + && (!table.isTransactional() || table.getType() == PTableType.INDEX) + && !(table.isImmutableRows() && !table.getIndexes().isEmpty()) && !select.isJoin() + && !hasWhereSubquery && table.getRowTimestampColPos() == -1; + } + runOnServer &= allowServerMutations; + // If we may be able to run on the server, add a hint that favors using the data table + // if all else is equal. + // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing, + // as this would disallow running on the server. We currently use the row projector we + // get back to figure this out. + HintNode hint = upsert.getHint(); + if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) { + hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE); + } + select = SelectStatement.create(select, hint); + // Pass scan through if same table in upsert and select so that projection is computed + // correctly + // Use optimizer to choose the best plan + QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns, + parallelIteratorFactoryToBe, new SequenceManager(statement), true, false, null); + queryPlanToBe = compiler.compile(); + + if (sameTable) { + // in the UPSERT INTO X ... SELECT FROM X case enforce the source tableRef's TS + // as max TS, so that the query can safely restarted and still work of a snapshot + // (so it won't see its own data in case of concurrent splits) + // see PHOENIX-4849 + long serverTime = selectResolver.getTables().get(0).getTimeStamp(); + if (serverTime == QueryConstants.UNSET_TIMESTAMP) { + // if this is the first time this table is resolved the ref's current time might not be + // defined, yet + // in that case force an RPC to get the server time + serverTime = new MetaDataClient(connection).getCurrentTime(schemaName, tableName); + } + Scan scan = queryPlanToBe.getContext().getScan(); + ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), serverTime); + } + // This is post-fix: if the tableRef is a projected table, this means there are + // post-processing + // steps and parallelIteratorFactory did not take effect. + if ( + queryPlanToBe.getTableRef().getTable().getType() == PTableType.PROJECTED + || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY + ) { + parallelIteratorFactoryToBe = null; + } + nValuesToSet = queryPlanToBe.getProjector().getColumnCount(); + // Cannot auto commit if doing aggregation or topN or salted + // Salted causes problems because the row may end up living on a different region + } else { + nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + + (isSharedViewIndex ? 1 : 0); + } + // Resize down to allow a subset of columns to be specifiable + if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) { + nColumnsToSet = nValuesToSet; + columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet); + pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet); + for (int i = posOffset + nValuesToSet; i < table.getColumns().size(); i++) { + PColumn column = table.getColumns().get(i); + if (!column.isNullable() && column.getExpressionStr() == null) { + throw new ConstraintViolationException(table.getName().getString() + "." + + SchemaUtil.getColumnDisplayName(column) + " may not be null"); + } + } + } + + if (nValuesToSet != nColumnsToSet) { + // We might have added columns, so refresh cache and try again if stale. + // We have logic to catch MetaNotFoundException and refresh cache in PhoenixStatement + // Note that this check is not really sufficient, as a column could have + // been removed and the added back and we wouldn't detect that here. + throw new UpsertColumnsValuesMismatchException(schemaName, tableName, + "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet); + } + final QueryPlan originalQueryPlan = queryPlanToBe; + RowProjector projectorToBe = null; + // Optimize only after all checks have been performed + if (valueNodes == null) { + queryPlanToBe = new QueryOptimizer(services).optimize(queryPlanToBe, statement, targetColumns, + parallelIteratorFactoryToBe); + projectorToBe = queryPlanToBe.getProjector(); + } + final List allColumns = allColumnsToBe; + final RowProjector projector = projectorToBe; + final QueryPlan queryPlan = queryPlanToBe; + final TableRef tableRef = tableRefToBe; + final Set addViewColumns = addViewColumnsToBe; + final Set overlapViewColumns = overlapViewColumnsToBe; + final UpsertingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe; + final int[] columnIndexes = columnIndexesToBe; + final int[] pkSlotIndexes = pkSlotIndexesToBe; + final boolean useServerTimestamp = useServerTimestampToBe; + if (table.getRowTimestampColPos() == -1 && useServerTimestamp) { + throw new IllegalStateException( + "For a table without row timestamp column, useServerTimestamp cannot be true"); + } + // TODO: break this up into multiple functions + //////////////////////////////////////////////////////////////////// + // UPSERT SELECT + ///////////////////////////////////////////////////////////////////// + if (valueNodes == null) { + // Before we re-order, check that for updatable view columns + // the projected expression either matches the column name or + // is a constant with the same required value. + throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable); + + //////////////////////////////////////////////////////////////////// + // UPSERT SELECT run server-side (maybe) + ///////////////////////////////////////////////////////////////////// + if (runOnServer) { + // At most this array will grow bigger by the number of PK columns + int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet); + int[] reverseColumnIndexes = new int[table.getColumns().size()]; + List projectedExpressions = + Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length); + Arrays.fill(reverseColumnIndexes, -1); + for (int i = 0; i < nValuesToSet; i++) { + projectedExpressions.add(projector.getColumnProjector(i).getExpression()); + reverseColumnIndexes[columnIndexes[i]] = i; + } + /* + * Order projected columns and projected expressions with PK columns leading order by slot + * position + */ + int offset = table.getBucketNum() == null ? 0 : 1; + for (int i = 0; i < table.getPKColumns().size() - offset; i++) { + PColumn column = table.getPKColumns().get(i + offset); + int pos = reverseColumnIndexes[column.getPosition()]; + if (pos == -1) { + // Last PK column may be fixed width and nullable + // We don't want to insert a null expression b/c + // it's not valid to set a fixed width type to null. + if (column.getDataType().isFixedWidth()) { + continue; } + // Add literal null for missing PK columns + pos = projectedExpressions.size(); + Expression literalNull = + LiteralExpression.newConstant(null, column.getDataType(), Determinism.ALWAYS); + projectedExpressions.add(literalNull); + allColumnsIndexes[pos] = column.getPosition(); + } + // Swap select expression at pos with i + Collections.swap(projectedExpressions, i, pos); + // Swap column indexes and reverse column indexes too + int tempPos = allColumnsIndexes[i]; + allColumnsIndexes[i] = allColumnsIndexes[pos]; + allColumnsIndexes[pos] = tempPos; + reverseColumnIndexes[tempPos] = pos; + reverseColumnIndexes[i] = i; + } + // If any pk slots are changing and server side UPSERT SELECT is disabled, do not run on + // server + if ( + !serverUpsertSelectEnabled + && ExpressionUtil.isPkPositionChanging(new TableRef(table), projectedExpressions) + ) { + runOnServer = false; } - - if (nValuesToSet != nColumnsToSet) { - // We might have added columns, so refresh cache and try again if stale. - // We have logic to catch MetaNotFoundException and refresh cache in PhoenixStatement - // Note that this check is not really sufficient, as a column could have - // been removed and the added back and we wouldn't detect that here. - throw new UpsertColumnsValuesMismatchException(schemaName, tableName, - "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet); - } - final QueryPlan originalQueryPlan = queryPlanToBe; - RowProjector projectorToBe = null; - // Optimize only after all checks have been performed - if (valueNodes == null) { - queryPlanToBe = new QueryOptimizer(services).optimize(queryPlanToBe, statement, targetColumns, parallelIteratorFactoryToBe); - projectorToBe = queryPlanToBe.getProjector(); - } - final List allColumns = allColumnsToBe; - final RowProjector projector = projectorToBe; - final QueryPlan queryPlan = queryPlanToBe; - final TableRef tableRef = tableRefToBe; - final Set addViewColumns = addViewColumnsToBe; - final Set overlapViewColumns = overlapViewColumnsToBe; - final UpsertingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe; - final int[] columnIndexes = columnIndexesToBe; - final int[] pkSlotIndexes = pkSlotIndexesToBe; - final boolean useServerTimestamp = useServerTimestampToBe; - if (table.getRowTimestampColPos() == -1 && useServerTimestamp) { - throw new IllegalStateException("For a table without row timestamp column, useServerTimestamp cannot be true"); - } - // TODO: break this up into multiple functions //////////////////////////////////////////////////////////////////// - // UPSERT SELECT + // UPSERT SELECT run server-side ///////////////////////////////////////////////////////////////////// - if (valueNodes == null) { - // Before we re-order, check that for updatable view columns - // the projected expression either matches the column name or - // is a constant with the same required value. - throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable); - - //////////////////////////////////////////////////////////////////// - // UPSERT SELECT run server-side (maybe) - ///////////////////////////////////////////////////////////////////// - if (runOnServer) { - // At most this array will grow bigger by the number of PK columns - int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet); - int[] reverseColumnIndexes = new int[table.getColumns().size()]; - List projectedExpressions = Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length); - Arrays.fill(reverseColumnIndexes, -1); - for (int i =0; i < nValuesToSet; i++) { - projectedExpressions.add(projector.getColumnProjector(i).getExpression()); - reverseColumnIndexes[columnIndexes[i]] = i; - } - /* - * Order projected columns and projected expressions with PK columns - * leading order by slot position - */ - int offset = table.getBucketNum() == null ? 0 : 1; - for (int i = 0; i < table.getPKColumns().size() - offset; i++) { - PColumn column = table.getPKColumns().get(i + offset); - int pos = reverseColumnIndexes[column.getPosition()]; - if (pos == -1) { - // Last PK column may be fixed width and nullable - // We don't want to insert a null expression b/c - // it's not valid to set a fixed width type to null. - if (column.getDataType().isFixedWidth()) { - continue; - } - // Add literal null for missing PK columns - pos = projectedExpressions.size(); - Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(), Determinism.ALWAYS); - projectedExpressions.add(literalNull); - allColumnsIndexes[pos] = column.getPosition(); - } - // Swap select expression at pos with i - Collections.swap(projectedExpressions, i, pos); - // Swap column indexes and reverse column indexes too - int tempPos = allColumnsIndexes[i]; - allColumnsIndexes[i] = allColumnsIndexes[pos]; - allColumnsIndexes[pos] = tempPos; - reverseColumnIndexes[tempPos] = pos; - reverseColumnIndexes[i] = i; - } - // If any pk slots are changing and server side UPSERT SELECT is disabled, do not run on server - if (!serverUpsertSelectEnabled && ExpressionUtil - .isPkPositionChanging(new TableRef(table), projectedExpressions)) { - runOnServer = false; - } - //////////////////////////////////////////////////////////////////// - // UPSERT SELECT run server-side - ///////////////////////////////////////////////////////////////////// - if (runOnServer) { - // Iterate through columns being projected - List projectedColumns = Lists.newArrayListWithExpectedSize(projectedExpressions.size()); - int posOff = table.getBucketNum() != null ? 1 : 0; - for (int i = 0 ; i < projectedExpressions.size(); i++) { - // Must make new column if position has changed - PColumn column = allColumns.get(allColumnsIndexes[i]); - projectedColumns.add(column.getPosition() == i + posOff ? column : new PColumnImpl(column, i + posOff)); - } - // Build table from projectedColumns - // Hack to add default column family to be used on server in case no value column is projected. - PTable projectedTable = PTableImpl.builderWithColumns(table, projectedColumns) - .setExcludedColumns(ImmutableList.of()) - .setDefaultFamilyName(PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(table))) - .build(); - - SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint()); - StatementContext statementContext = queryPlan.getContext(); - RowProjector aggProjectorToBe = ProjectionCompiler.compile(statementContext, select, GroupBy - .EMPTY_GROUP_BY); - statementContext.getAggregationManager().compile(queryPlan.getContext() - ,GroupBy.EMPTY_GROUP_BY); - if (queryPlan.getProjector().projectEveryRow()) { - aggProjectorToBe = new RowProjector(aggProjectorToBe,true); - } - final RowProjector aggProjector = aggProjectorToBe; - - /* - * Transfer over PTable representing subset of columns selected, but all PK columns. - * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones. - * Transfer over List for projection. - * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map - * Create the PRow and get the mutations, adding them to the batch - */ - final StatementContext context = queryPlan.getContext(); - final Scan scan = context.getScan(); - scan.setAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_TABLE, UngroupedAggregateRegionObserverHelper.serialize(projectedTable)); - scan.setAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_EXPRS, UngroupedAggregateRegionObserverHelper.serialize(projectedExpressions)); - // Ignore order by - it has no impact - final QueryPlan aggPlan = new AggregatePlan(context, select, statementContext.getCurrentTable(), aggProjector, null,null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, originalQueryPlan); - return new ServerUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, context, connection, scan, aggPlan, aggProjector, maxSize, maxSizeBytes); - } - } - //////////////////////////////////////////////////////////////////// - // UPSERT SELECT run client-side - ///////////////////////////////////////////////////////////////////// - return new ClientUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, parallelIteratorFactory, projector, columnIndexes, pkSlotIndexes, useServerTimestamp, maxSize, maxSizeBytes); - } + if (runOnServer) { + // Iterate through columns being projected + List projectedColumns = + Lists.newArrayListWithExpectedSize(projectedExpressions.size()); + int posOff = table.getBucketNum() != null ? 1 : 0; + for (int i = 0; i < projectedExpressions.size(); i++) { + // Must make new column if position has changed + PColumn column = allColumns.get(allColumnsIndexes[i]); + projectedColumns.add( + column.getPosition() == i + posOff ? column : new PColumnImpl(column, i + posOff)); + } + // Build table from projectedColumns + // Hack to add default column family to be used on server in case no value column is + // projected. + PTable projectedTable = PTableImpl.builderWithColumns(table, projectedColumns) + .setExcludedColumns(ImmutableList.of()) + .setDefaultFamilyName(PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(table))) + .build(); - - //////////////////////////////////////////////////////////////////// - // UPSERT VALUES - ///////////////////////////////////////////////////////////////////// - final byte[][] values = new byte[nValuesToSet][]; - int nodeIndex = 0; - if (isSharedViewIndex) { - values[nodeIndex++] = table.getviewIndexIdType().toBytes(table.getViewIndexId()); - } - if (isTenantSpecific) { - PName tenantId = connection.getTenantId(); - values[nodeIndex++] = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, isSharedViewIndex); - } - - final int nodeIndexOffset = nodeIndex; - // Allocate array based on size of all columns in table, - // since some values may not be set (if they're nullable). - final StatementContext context = new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement)); - UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context); - final List constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size()); - // First build all the expressions, as with sequences we want to collect them all first - // and initialize them in one batch - List> jsonExpressions = Lists.newArrayList(); - List> nonPKColumns = Lists.newArrayList(); - for (ParseNode valueNode : valueNodes) { - if (!valueNode.hasJsonExpression() && !valueNode.isStateless()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT).build().buildException(); - } - PColumn column = allColumns.get(columnIndexes[nodeIndex]); - expressionBuilder.setColumn(column); - Expression expression = valueNode.accept(expressionBuilder); - if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) { - throw TypeMismatchException.newException( - expression.getDataType(), column.getDataType(), "expression: " - + expression.toString() + " in column " + column); - } - if (!SchemaUtil.isPKColumn(column) && !valueNode.hasJsonExpression()) { - nonPKColumns.add(new Pair<>( - ColumnName.caseSensitiveColumnName(column.getFamilyName().getString(), - column.getName().getString()), valueNode)); - } else if (valueNode.hasJsonExpression()) { - jsonExpressions.add(new Pair<>( - ColumnName.caseSensitiveColumnName(column.getFamilyName().getString(), - column.getName().getString()), valueNode)); - } - constantExpressions.add(expression); - nodeIndex++; - } - if (nonPKColumns.size() > 0 && jsonExpressions.size() > 0) { - jsonExpressions.addAll(nonPKColumns); - nonPKColumns.clear(); - } - byte[] onDupKeyBytesToBe = null; - List> onDupKeyPairs = upsert.getOnDupKeyPairs(); - if (onDupKeyPairs != null) { - if (table.isImmutableRows()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE) - .setSchemaName(table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .build().buildException(); - } - if (table.isTransactional()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_TRANSACTIONAL) - .setSchemaName(table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .build().buildException(); - } - if (connection.getSCN() != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_SCN_IN_ON_DUP_KEY) - .setSchemaName(table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .build().buildException(); - } - if (onDupKeyPairs.isEmpty()) { // ON DUPLICATE KEY IGNORE - onDupKeyBytesToBe = PhoenixIndexBuilderHelper.serializeOnDupKeyIgnore(); - } else { // ON DUPLICATE KEY UPDATE; - onDupKeyBytesToBe = getOnDuplicateKeyBytes(table, context, onDupKeyPairs, resolver); - } - } else if (!jsonExpressions.isEmpty()) { - onDupKeyBytesToBe = getOnDuplicateKeyBytes(table, context, jsonExpressions, resolver); - } - final byte[] onDupKeyBytes = onDupKeyBytesToBe; - - return new UpsertValuesMutationPlan(context, tableRef, nodeIndexOffset, constantExpressions, - allColumns, columnIndexes, overlapViewColumns, values, addViewColumns, - connection, pkSlotIndexes, useServerTimestamp, onDupKeyBytes, maxSize, maxSizeBytes); - } - - private static byte[] getOnDuplicateKeyBytes(PTable table, StatementContext context, - List> onDupKeyPairs, ColumnResolver resolver) - throws SQLException { - byte[] onDupKeyBytesToBe; - int position = table.getBucketNum() == null ? 0 : 1; - UpdateColumnCompiler compiler = new UpdateColumnCompiler(context); - int nColumns = onDupKeyPairs.size(); - List updateExpressions = Lists.newArrayListWithExpectedSize(nColumns); - LinkedHashSet updateColumns = Sets.newLinkedHashSetWithExpectedSize(nColumns + 1); - updateColumns.add(new PColumnImpl(table.getPKColumns().get(position).getName(), - // Use first PK column name as we know it won't conflict with others - null, PVarbinary.INSTANCE, null, null, false, position, SortOrder.getDefault(), 0, - null, false, null, false, false, null, - table.getPKColumns().get(position).getTimestamp())); - position++; - for (Pair columnPair : onDupKeyPairs) { - ColumnName colName = columnPair.getFirst(); - PColumn - updateColumn = - resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()) - .getColumn(); - if (SchemaUtil.isPKColumn(updateColumn)) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_UPDATE_PK_ON_DUP_KEY).setSchemaName( - table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .setColumnName(updateColumn.getName().getString()).build().buildException(); - } - final int columnPosition = position++; - if (!updateColumns.add(new DelegateColumn(updateColumn) { - @Override - public int getPosition() { - return columnPosition; - } - })) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.DUPLICATE_COLUMN_IN_ON_DUP_KEY).setSchemaName( - table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .setColumnName(updateColumn.getName().getString()).build().buildException(); - } - ParseNode updateNode = columnPair.getSecond(); - compiler.setColumn(updateColumn); - Expression updateExpression = updateNode.accept(compiler); - // Check that updateExpression is coercible to updateColumn - if (updateExpression.getDataType() != null && !updateExpression.getDataType() - .isCastableTo(updateColumn.getDataType())) { - throw TypeMismatchException.newException(updateExpression.getDataType(), - updateColumn.getDataType(), - "expression: " + updateExpression + " for column " + updateColumn); - } - if (compiler.isAggregate()) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY).setSchemaName( - table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .setColumnName(updateColumn.getName().getString()).build().buildException(); - } - updateExpressions.add(updateExpression); - } - PTable onDupKeyTable = PTableImpl.builderWithColumns(table, updateColumns).build(); - onDupKeyBytesToBe = - PhoenixIndexBuilderHelper.serializeOnDupKeyUpdate(onDupKeyTable, updateExpressions); - return onDupKeyBytesToBe; - } - - private static boolean isRowTimestampSet(int[] pkSlotIndexes, PTable table) { - checkArgument(table.getRowTimestampColPos() != -1, "Call this method only for tables with row timestamp column"); - int rowTimestampColPKSlot = table.getRowTimestampColPos(); - for (int pkSlot : pkSlotIndexes) { - if (pkSlot == rowTimestampColPKSlot) { - return true; - } - } - return false; - } - - private static class UpdateColumnCompiler extends ExpressionCompiler { - private PColumn column; - - private UpdateColumnCompiler(StatementContext context) { - super(context); - } + SelectStatement select = + SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint()); + StatementContext statementContext = queryPlan.getContext(); + RowProjector aggProjectorToBe = + ProjectionCompiler.compile(statementContext, select, GroupBy.EMPTY_GROUP_BY); + statementContext.getAggregationManager().compile(queryPlan.getContext(), + GroupBy.EMPTY_GROUP_BY); + if (queryPlan.getProjector().projectEveryRow()) { + aggProjectorToBe = new RowProjector(aggProjectorToBe, true); + } + final RowProjector aggProjector = aggProjectorToBe; - public void setColumn(PColumn column) { - this.column = column; - } - - @Override - public Expression visit(BindParseNode node) throws SQLException { - if (isTopLevel()) { - context.getBindManager().addParamMetaData(node, column); - Object value = context.getBindManager().getBindValue(node); - return LiteralExpression.newConstant(value, column.getDataType(), column.getSortOrder(), Determinism.ALWAYS); - } - return super.visit(node); - } - - @Override - public Expression visit(LiteralParseNode node) throws SQLException { - if (isTopLevel()) { - return LiteralExpression.newConstant(node.getValue(), column.getDataType(), column.getSortOrder(), Determinism.ALWAYS); - } - return super.visit(node); - } + /* + * Transfer over PTable representing subset of columns selected, but all PK columns. Move + * columns setting PK first in pkSlot order, adding LiteralExpression of null for any + * missing ones. Transfer over List for projection. In region scan, evaluate + * expressions in order, collecting first n columns for PK and collection non PK in + * mutation Map Create the PRow and get the mutations, adding them to the batch + */ + final StatementContext context = queryPlan.getContext(); + final Scan scan = context.getScan(); + scan.setAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_TABLE, + UngroupedAggregateRegionObserverHelper.serialize(projectedTable)); + scan.setAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_EXPRS, + UngroupedAggregateRegionObserverHelper.serialize(projectedExpressions)); + // Ignore order by - it has no impact + final QueryPlan aggPlan = new AggregatePlan(context, select, + statementContext.getCurrentTable(), aggProjector, null, null, OrderBy.EMPTY_ORDER_BY, + null, GroupBy.EMPTY_GROUP_BY, null, originalQueryPlan); + return new ServerUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, context, + connection, scan, aggPlan, aggProjector, maxSize, maxSizeBytes); + } + } + //////////////////////////////////////////////////////////////////// + // UPSERT SELECT run client-side + ///////////////////////////////////////////////////////////////////// + return new ClientUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, + parallelIteratorFactory, projector, columnIndexes, pkSlotIndexes, useServerTimestamp, + maxSize, maxSizeBytes); } - - private static class UpsertValuesCompiler extends UpdateColumnCompiler { - private UpsertValuesCompiler(StatementContext context) { - super(context); - } - + + //////////////////////////////////////////////////////////////////// + // UPSERT VALUES + ///////////////////////////////////////////////////////////////////// + final byte[][] values = new byte[nValuesToSet][]; + int nodeIndex = 0; + if (isSharedViewIndex) { + values[nodeIndex++] = table.getviewIndexIdType().toBytes(table.getViewIndexId()); + } + if (isTenantSpecific) { + PName tenantId = connection.getTenantId(); + values[nodeIndex++] = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), + table.getBucketNum() != null, tenantId, isSharedViewIndex); + } + + final int nodeIndexOffset = nodeIndex; + // Allocate array based on size of all columns in table, + // since some values may not be set (if they're nullable). + final StatementContext context = + new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement)); + UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context); + final List constantExpressions = + Lists.newArrayListWithExpectedSize(valueNodes.size()); + // First build all the expressions, as with sequences we want to collect them all first + // and initialize them in one batch + List> jsonExpressions = Lists.newArrayList(); + List> nonPKColumns = Lists.newArrayList(); + for (ParseNode valueNode : valueNodes) { + if (!valueNode.hasJsonExpression() && !valueNode.isStateless()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT).build() + .buildException(); + } + PColumn column = allColumns.get(columnIndexes[nodeIndex]); + expressionBuilder.setColumn(column); + Expression expression = valueNode.accept(expressionBuilder); + if ( + expression.getDataType() != null + && !expression.getDataType().isCastableTo(column.getDataType()) + ) { + throw TypeMismatchException.newException(expression.getDataType(), column.getDataType(), + "expression: " + expression.toString() + " in column " + column); + } + if (!SchemaUtil.isPKColumn(column) && !valueNode.hasJsonExpression()) { + nonPKColumns + .add(new Pair<>(ColumnName.caseSensitiveColumnName(column.getFamilyName().getString(), + column.getName().getString()), valueNode)); + } else if (valueNode.hasJsonExpression()) { + jsonExpressions + .add(new Pair<>(ColumnName.caseSensitiveColumnName(column.getFamilyName().getString(), + column.getName().getString()), valueNode)); + } + constantExpressions.add(expression); + nodeIndex++; + } + if (nonPKColumns.size() > 0 && jsonExpressions.size() > 0) { + jsonExpressions.addAll(nonPKColumns); + nonPKColumns.clear(); + } + byte[] onDupKeyBytesToBe = null; + List> onDupKeyPairs = upsert.getOnDupKeyPairs(); + if (onDupKeyPairs != null) { + if (table.isImmutableRows()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()).build().buildException(); + } + if (table.isTransactional()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_TRANSACTIONAL) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()).build().buildException(); + } + if (connection.getSCN() != null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_SCN_IN_ON_DUP_KEY) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()).build().buildException(); + } + if (onDupKeyPairs.isEmpty()) { // ON DUPLICATE KEY IGNORE + onDupKeyBytesToBe = PhoenixIndexBuilderHelper.serializeOnDupKeyIgnore(); + } else { // ON DUPLICATE KEY UPDATE; + onDupKeyBytesToBe = getOnDuplicateKeyBytes(table, context, onDupKeyPairs, resolver); + } + } else if (!jsonExpressions.isEmpty()) { + onDupKeyBytesToBe = getOnDuplicateKeyBytes(table, context, jsonExpressions, resolver); + } + final byte[] onDupKeyBytes = onDupKeyBytesToBe; + + return new UpsertValuesMutationPlan(context, tableRef, nodeIndexOffset, constantExpressions, + allColumns, columnIndexes, overlapViewColumns, values, addViewColumns, connection, + pkSlotIndexes, useServerTimestamp, onDupKeyBytes, maxSize, maxSizeBytes); + } + + private static byte[] getOnDuplicateKeyBytes(PTable table, StatementContext context, + List> onDupKeyPairs, ColumnResolver resolver) throws SQLException { + byte[] onDupKeyBytesToBe; + int position = table.getBucketNum() == null ? 0 : 1; + UpdateColumnCompiler compiler = new UpdateColumnCompiler(context); + int nColumns = onDupKeyPairs.size(); + List updateExpressions = Lists.newArrayListWithExpectedSize(nColumns); + LinkedHashSet updateColumns = Sets.newLinkedHashSetWithExpectedSize(nColumns + 1); + updateColumns.add(new PColumnImpl(table.getPKColumns().get(position).getName(), + // Use first PK column name as we know it won't conflict with others + null, PVarbinary.INSTANCE, null, null, false, position, SortOrder.getDefault(), 0, null, + false, null, false, false, null, table.getPKColumns().get(position).getTimestamp())); + position++; + for (Pair columnPair : onDupKeyPairs) { + ColumnName colName = columnPair.getFirst(); + PColumn updateColumn = + resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn(); + if (SchemaUtil.isPKColumn(updateColumn)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_PK_ON_DUP_KEY) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()) + .setColumnName(updateColumn.getName().getString()).build().buildException(); + } + final int columnPosition = position++; + if (!updateColumns.add(new DelegateColumn(updateColumn) { @Override - public Expression visit(SequenceValueParseNode node) throws SQLException { - return context.getSequenceManager().newSequenceReference(node); - } + public int getPosition() { + return columnPosition; + } + })) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.DUPLICATE_COLUMN_IN_ON_DUP_KEY) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()) + .setColumnName(updateColumn.getName().getString()).build().buildException(); + } + ParseNode updateNode = columnPair.getSecond(); + compiler.setColumn(updateColumn); + Expression updateExpression = updateNode.accept(compiler); + // Check that updateExpression is coercible to updateColumn + if ( + updateExpression.getDataType() != null + && !updateExpression.getDataType().isCastableTo(updateColumn.getDataType()) + ) { + throw TypeMismatchException.newException(updateExpression.getDataType(), + updateColumn.getDataType(), + "expression: " + updateExpression + " for column " + updateColumn); + } + if (compiler.isAggregate()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()) + .setColumnName(updateColumn.getName().getString()).build().buildException(); + } + updateExpressions.add(updateExpression); } - + PTable onDupKeyTable = PTableImpl.builderWithColumns(table, updateColumns).build(); + onDupKeyBytesToBe = + PhoenixIndexBuilderHelper.serializeOnDupKeyUpdate(onDupKeyTable, updateExpressions); + return onDupKeyBytesToBe; + } - private static SelectStatement prependTenantAndViewConstants(PTable table, SelectStatement select, String tenantId, Set addViewColumns, boolean useServerTimestamp) { - if ((!table.isMultiTenant() || tenantId == null) && table.getViewIndexId() == null && addViewColumns.isEmpty() && !useServerTimestamp) { - return select; - } - List selectNodes = newArrayListWithCapacity(select.getSelect().size() + 1 + addViewColumns.size()); - if (table.getViewIndexId() != null) { - selectNodes.add(new AliasedNode(null, new LiteralParseNode(table.getViewIndexId()))); - } - if (table.isMultiTenant() && tenantId != null) { - selectNodes.add(new AliasedNode(null, new LiteralParseNode(tenantId))); - } - selectNodes.addAll(select.getSelect()); - for (PColumn column : addViewColumns) { - byte[] byteValue = column.getViewConstant(); - Object value = column.getDataType().toObject(byteValue, 0, byteValue.length-1); - selectNodes.add(new AliasedNode(null, new LiteralParseNode(value))); - } - if (useServerTimestamp) { - PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos()); - selectNodes.add(new AliasedNode(null, getNodeForRowTimestampColumn(rowTimestampCol))); - } - return SelectStatement.create(select, selectNodes); - } - - /** - * Check that none of no columns in our updatable VIEW are changing values. - * @param tableRef - * @param overlapViewColumns - * @param targetColumns - * @param projector - * @throws SQLException - */ - private static void throwIfNotUpdatable(TableRef tableRef, Set overlapViewColumns, - List targetColumns, RowProjector projector, boolean sameTable) throws SQLException { - PTable table = tableRef.getTable(); - if (table.getViewType() == ViewType.UPDATABLE && !overlapViewColumns.isEmpty()) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - for (int i = 0; i < targetColumns.size(); i++) { - PColumn targetColumn = targetColumns.get(i); - if (overlapViewColumns.contains(targetColumn)) { - Expression source = projector.getColumnProjector(i).getExpression(); - if (source.isStateless()) { - source.evaluate(null, ptr); - if (Bytes.compareTo(ptr.get(), ptr.getOffset(), ptr.getLength(), targetColumn.getViewConstant(), 0, targetColumn.getViewConstant().length-1) == 0) { - continue; - } - } - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN) - .setColumnName(targetColumn.getName().getString()) - .build().buildException(); - } + private static boolean isRowTimestampSet(int[] pkSlotIndexes, PTable table) { + checkArgument(table.getRowTimestampColPos() != -1, + "Call this method only for tables with row timestamp column"); + int rowTimestampColPKSlot = table.getRowTimestampColPos(); + for (int pkSlot : pkSlotIndexes) { + if (pkSlot == rowTimestampColPKSlot) { + return true; + } + } + return false; + } + + private static class UpdateColumnCompiler extends ExpressionCompiler { + private PColumn column; + + private UpdateColumnCompiler(StatementContext context) { + super(context); + } + + public void setColumn(PColumn column) { + this.column = column; + } + + @Override + public Expression visit(BindParseNode node) throws SQLException { + if (isTopLevel()) { + context.getBindManager().addParamMetaData(node, column); + Object value = context.getBindManager().getBindValue(node); + return LiteralExpression.newConstant(value, column.getDataType(), column.getSortOrder(), + Determinism.ALWAYS); + } + return super.visit(node); + } + + @Override + public Expression visit(LiteralParseNode node) throws SQLException { + if (isTopLevel()) { + return LiteralExpression.newConstant(node.getValue(), column.getDataType(), + column.getSortOrder(), Determinism.ALWAYS); + } + return super.visit(node); + } + } + + private static class UpsertValuesCompiler extends UpdateColumnCompiler { + private UpsertValuesCompiler(StatementContext context) { + super(context); + } + + @Override + public Expression visit(SequenceValueParseNode node) throws SQLException { + return context.getSequenceManager().newSequenceReference(node); + } + } + + private static SelectStatement prependTenantAndViewConstants(PTable table, SelectStatement select, + String tenantId, Set addViewColumns, boolean useServerTimestamp) { + if ( + (!table.isMultiTenant() || tenantId == null) && table.getViewIndexId() == null + && addViewColumns.isEmpty() && !useServerTimestamp + ) { + return select; + } + List selectNodes = + newArrayListWithCapacity(select.getSelect().size() + 1 + addViewColumns.size()); + if (table.getViewIndexId() != null) { + selectNodes.add(new AliasedNode(null, new LiteralParseNode(table.getViewIndexId()))); + } + if (table.isMultiTenant() && tenantId != null) { + selectNodes.add(new AliasedNode(null, new LiteralParseNode(tenantId))); + } + selectNodes.addAll(select.getSelect()); + for (PColumn column : addViewColumns) { + byte[] byteValue = column.getViewConstant(); + Object value = column.getDataType().toObject(byteValue, 0, byteValue.length - 1); + selectNodes.add(new AliasedNode(null, new LiteralParseNode(value))); + } + if (useServerTimestamp) { + PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos()); + selectNodes.add(new AliasedNode(null, getNodeForRowTimestampColumn(rowTimestampCol))); + } + return SelectStatement.create(select, selectNodes); + } + + /** + * Check that none of no columns in our updatable VIEW are changing values. + */ + private static void throwIfNotUpdatable(TableRef tableRef, Set overlapViewColumns, + List targetColumns, RowProjector projector, boolean sameTable) throws SQLException { + PTable table = tableRef.getTable(); + if (table.getViewType() == ViewType.UPDATABLE && !overlapViewColumns.isEmpty()) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + for (int i = 0; i < targetColumns.size(); i++) { + PColumn targetColumn = targetColumns.get(i); + if (overlapViewColumns.contains(targetColumn)) { + Expression source = projector.getColumnProjector(i).getExpression(); + if (source.isStateless()) { + source.evaluate(null, ptr); + if ( + Bytes.compareTo(ptr.get(), ptr.getOffset(), ptr.getLength(), + targetColumn.getViewConstant(), 0, targetColumn.getViewConstant().length - 1) == 0 + ) { + continue; } + } + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN) + .setColumnName(targetColumn.getName().getString()).build().buildException(); } + } } + } - public class ServerUpsertSelectMutationPlan implements MutationPlan { - private final QueryPlan queryPlan; - private final TableRef tableRef; - private final QueryPlan originalQueryPlan; - private final StatementContext context; - private final PhoenixConnection connection; - private final Scan scan; - private final QueryPlan aggPlan; - private final RowProjector aggProjector; - private final int maxSize; - private final long maxSizeBytes; - - public ServerUpsertSelectMutationPlan(QueryPlan queryPlan, TableRef tableRef, QueryPlan originalQueryPlan, - StatementContext context, PhoenixConnection connection, - Scan scan, QueryPlan aggPlan, RowProjector aggProjector, - int maxSize, long maxSizeBytes) { - this.queryPlan = queryPlan; - this.tableRef = tableRef; - this.originalQueryPlan = originalQueryPlan; - this.context = context; - this.connection = connection; - this.scan = scan; - this.aggPlan = aggPlan; - this.aggProjector = aggProjector; - this.maxSize = maxSize; - this.maxSizeBytes = maxSizeBytes; - } + public class ServerUpsertSelectMutationPlan implements MutationPlan { + private final QueryPlan queryPlan; + private final TableRef tableRef; + private final QueryPlan originalQueryPlan; + private final StatementContext context; + private final PhoenixConnection connection; + private final Scan scan; + private final QueryPlan aggPlan; + private final RowProjector aggProjector; + private final int maxSize; + private final long maxSizeBytes; - @Override - public ParameterMetaData getParameterMetaData() { - return queryPlan.getContext().getBindManager().getParameterMetaData(); - } + public ServerUpsertSelectMutationPlan(QueryPlan queryPlan, TableRef tableRef, + QueryPlan originalQueryPlan, StatementContext context, PhoenixConnection connection, + Scan scan, QueryPlan aggPlan, RowProjector aggProjector, int maxSize, long maxSizeBytes) { + this.queryPlan = queryPlan; + this.tableRef = tableRef; + this.originalQueryPlan = originalQueryPlan; + this.context = context; + this.connection = connection; + this.scan = scan; + this.aggPlan = aggPlan; + this.aggProjector = aggProjector; + this.maxSize = maxSize; + this.maxSizeBytes = maxSizeBytes; + } - @Override - public StatementContext getContext() { - return queryPlan.getContext(); - } + @Override + public ParameterMetaData getParameterMetaData() { + return queryPlan.getContext().getBindManager().getParameterMetaData(); + } - @Override - public TableRef getTargetRef() { - return tableRef; - } + @Override + public StatementContext getContext() { + return queryPlan.getContext(); + } - @Override - public QueryPlan getQueryPlan() { - return aggPlan; - } + @Override + public TableRef getTargetRef() { + return tableRef; + } - @Override - public Set getSourceRefs() { - return originalQueryPlan.getSourceRefs(); - } + @Override + public QueryPlan getQueryPlan() { + return aggPlan; + } - @Override - public Operation getOperation() { - return operation; - } + @Override + public Set getSourceRefs() { + return originalQueryPlan.getSourceRefs(); + } - @Override - public MutationState execute() throws SQLException { - ImmutableBytesWritable ptr = context.getTempPtr(); - PTable table = tableRef.getTable(); - table.getIndexMaintainers(ptr, context.getConnection()); - ScanUtil.annotateScanWithMetadataAttributes(table, scan); - byte[] txState = table.isTransactional() ? - connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY; - - ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); - if (aggPlan.getTableRef().getTable().isTransactional() - || (table.getType() == PTableType.INDEX && table.isTransactional())) { - scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); - } - if (ptr.getLength() > 0) { - byte[] uuidValue = ServerCacheClient.generateId(); - scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); - scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get()); - } - ResultIterator iterator = aggPlan.iterator(); - try { - Tuple row = iterator.next(); - final long mutationCount = (Long) aggProjector.getColumnProjector(0).getValue(row, - PLong.INSTANCE, ptr); - return new MutationState(maxSize, maxSizeBytes, connection) { - @Override - public long getUpdateCount() { - return mutationCount; - } - }; - } finally { - iterator.close(); - } + @Override + public Operation getOperation() { + return operation; + } - } + @Override + public MutationState execute() throws SQLException { + ImmutableBytesWritable ptr = context.getTempPtr(); + PTable table = tableRef.getTable(); + table.getIndexMaintainers(ptr, context.getConnection()); + ScanUtil.annotateScanWithMetadataAttributes(table, scan); + byte[] txState = table.isTransactional() + ? connection.getMutationState().encodeTransaction() + : ByteUtil.EMPTY_BYTE_ARRAY; - @Override - public ExplainPlan getExplainPlan() throws SQLException { - ExplainPlan explainPlan = aggPlan.getExplainPlan(); - List queryPlanSteps = explainPlan.getPlanSteps(); - ExplainPlanAttributes explainPlanAttributes = - explainPlan.getPlanStepsAsAttributes(); - List planSteps = - Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); - ExplainPlanAttributesBuilder newBuilder = - new ExplainPlanAttributesBuilder(explainPlanAttributes); - newBuilder.setAbstractExplainPlan("UPSERT ROWS"); - planSteps.add("UPSERT ROWS"); - planSteps.addAll(queryPlanSteps); - return new ExplainPlan(planSteps, newBuilder.build()); - } + ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); + if ( + aggPlan.getTableRef().getTable().isTransactional() + || (table.getType() == PTableType.INDEX && table.isTransactional()) + ) { + scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); + } + if (ptr.getLength() > 0) { + byte[] uuidValue = ServerCacheClient.generateId(); + scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); + scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get()); + } + ResultIterator iterator = aggPlan.iterator(); + try { + Tuple row = iterator.next(); + final long mutationCount = + (Long) aggProjector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr); + return new MutationState(maxSize, maxSizeBytes, connection) { + @Override + public long getUpdateCount() { + return mutationCount; + } + }; + } finally { + iterator.close(); + } - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return aggPlan.getEstimatedRowsToScan(); - } + } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return aggPlan.getEstimatedBytesToScan(); - } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + ExplainPlan explainPlan = aggPlan.getExplainPlan(); + List queryPlanSteps = explainPlan.getPlanSteps(); + ExplainPlanAttributes explainPlanAttributes = explainPlan.getPlanStepsAsAttributes(); + List planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); + ExplainPlanAttributesBuilder newBuilder = + new ExplainPlanAttributesBuilder(explainPlanAttributes); + newBuilder.setAbstractExplainPlan("UPSERT ROWS"); + planSteps.add("UPSERT ROWS"); + planSteps.addAll(queryPlanSteps); + return new ExplainPlan(planSteps, newBuilder.build()); + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return aggPlan.getEstimateInfoTimestamp(); - } + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return aggPlan.getEstimatedRowsToScan(); } - private class UpsertValuesMutationPlan implements MutationPlan { - private final StatementContext context; - private final TableRef tableRef; - private final int nodeIndexOffset; - private final List constantExpressions; - private final List allColumns; - private final int[] columnIndexes; - private final Set overlapViewColumns; - private final byte[][] values; - private final Set addViewColumns; - private final PhoenixConnection connection; - private final int[] pkSlotIndexes; - private final boolean useServerTimestamp; - private final byte[] onDupKeyBytes; - private final int maxSize; - private final long maxSizeBytes; - - public UpsertValuesMutationPlan(StatementContext context, TableRef tableRef, int nodeIndexOffset, - List constantExpressions, List allColumns, - int[] columnIndexes, Set overlapViewColumns, byte[][] values, - Set addViewColumns, PhoenixConnection connection, - int[] pkSlotIndexes, boolean useServerTimestamp, byte[] onDupKeyBytes, - int maxSize, long maxSizeBytes) { - this.context = context; - this.tableRef = tableRef; - this.nodeIndexOffset = nodeIndexOffset; - this.constantExpressions = constantExpressions; - this.allColumns = allColumns; - this.columnIndexes = columnIndexes; - this.overlapViewColumns = overlapViewColumns; - this.values = values; - this.addViewColumns = addViewColumns; - this.connection = connection; - this.pkSlotIndexes = pkSlotIndexes; - this.useServerTimestamp = useServerTimestamp; - this.onDupKeyBytes = onDupKeyBytes; - this.maxSize = maxSize; - this.maxSizeBytes = maxSizeBytes; - } + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return aggPlan.getEstimatedBytesToScan(); + } - @Override - public ParameterMetaData getParameterMetaData() { - return context.getBindManager().getParameterMetaData(); - } + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return aggPlan.getEstimateInfoTimestamp(); + } + } - @Override - public StatementContext getContext() { - return context; - } + private class UpsertValuesMutationPlan implements MutationPlan { + private final StatementContext context; + private final TableRef tableRef; + private final int nodeIndexOffset; + private final List constantExpressions; + private final List allColumns; + private final int[] columnIndexes; + private final Set overlapViewColumns; + private final byte[][] values; + private final Set addViewColumns; + private final PhoenixConnection connection; + private final int[] pkSlotIndexes; + private final boolean useServerTimestamp; + private final byte[] onDupKeyBytes; + private final int maxSize; + private final long maxSizeBytes; - @Override - public TableRef getTargetRef() { - return tableRef; - } + public UpsertValuesMutationPlan(StatementContext context, TableRef tableRef, + int nodeIndexOffset, List constantExpressions, List allColumns, + int[] columnIndexes, Set overlapViewColumns, byte[][] values, + Set addViewColumns, PhoenixConnection connection, int[] pkSlotIndexes, + boolean useServerTimestamp, byte[] onDupKeyBytes, int maxSize, long maxSizeBytes) { + this.context = context; + this.tableRef = tableRef; + this.nodeIndexOffset = nodeIndexOffset; + this.constantExpressions = constantExpressions; + this.allColumns = allColumns; + this.columnIndexes = columnIndexes; + this.overlapViewColumns = overlapViewColumns; + this.values = values; + this.addViewColumns = addViewColumns; + this.connection = connection; + this.pkSlotIndexes = pkSlotIndexes; + this.useServerTimestamp = useServerTimestamp; + this.onDupKeyBytes = onDupKeyBytes; + this.maxSize = maxSize; + this.maxSizeBytes = maxSizeBytes; + } - @Override - public QueryPlan getQueryPlan() { - return null; - } + @Override + public ParameterMetaData getParameterMetaData() { + return context.getBindManager().getParameterMetaData(); + } - @Override - public Set getSourceRefs() { - return Collections.emptySet(); - } + @Override + public StatementContext getContext() { + return context; + } - @Override - public Operation getOperation() { - return operation; - } + @Override + public TableRef getTargetRef() { + return tableRef; + } - @Override - public MutationState execute() throws SQLException { - ImmutableBytesWritable ptr = context.getTempPtr(); - final SequenceManager sequenceManager = context.getSequenceManager(); - // Next evaluate all the expressions - int nodeIndex = nodeIndexOffset; - PTable table = tableRef.getTable(); - Tuple tuple = sequenceManager.getSequenceCount() == 0 ? null : - sequenceManager.newSequenceTuple(null); - for (Expression constantExpression : constantExpressions) { - if (!constantExpression.isStateless()) { - nodeIndex++; - continue; - } - PColumn column = allColumns.get(columnIndexes[nodeIndex]); - constantExpression.evaluate(tuple, ptr); - Object value = null; - if (constantExpression.getDataType() != null) { - value = constantExpression.getDataType().toObject(ptr, constantExpression.getSortOrder(), - constantExpression.getMaxLength(), constantExpression.getScale()); - if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) { - throw TypeMismatchException.newException( - constantExpression.getDataType(), column.getDataType(), "expression: " - + constantExpression.toString() + " in column " + column); - } - if (!column.getDataType().isSizeCompatible(ptr, value, constantExpression.getDataType(), - constantExpression.getSortOrder(), constantExpression.getMaxLength(), - constantExpression.getScale(), column.getMaxLength(), column.getScale())) { - throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), - column.getScale(), column.getName().getString()); - } - } - column.getDataType().coerceBytes(ptr, value, constantExpression.getDataType(), - constantExpression.getMaxLength(), constantExpression.getScale(), constantExpression.getSortOrder(), - column.getMaxLength(), column.getScale(),column.getSortOrder(), - table.rowKeyOrderOptimizable()); - if (overlapViewColumns.contains(column) && Bytes.compareTo(ptr.get(), ptr.getOffset(), ptr.getLength(), column.getViewConstant(), 0, column.getViewConstant().length-1) != 0) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN) - .setColumnName(column.getName().getString()) - .setMessage("value=" + constantExpression.toString()).build().buildException(); - } - values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr); - nodeIndex++; - } - // Add columns based on view - for (PColumn column : addViewColumns) { - if (IndexUtil.getViewConstantValue(column, ptr)) { - values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr); - } else { - throw new IllegalStateException(); - } - } - MultiRowMutationState mutation = new MultiRowMutationState(1); - IndexMaintainer indexMaintainer = null; - byte[][] viewConstants = null; - if (table.getIndexType() == IndexType.LOCAL) { - PTable parentTable = - statement - .getConnection() - .getMetaDataCache() - .getTableRef( - new PTableKey(statement.getConnection().getTenantId(), - table.getParentName().getString())).getTable(); - indexMaintainer = table.getIndexMaintainer(parentTable, connection); - viewConstants = IndexUtil.getViewConstants(parentTable); - } - int maxHBaseClientKeyValueSize = statement.getConnection().getQueryServices().getProps(). - getInt(QueryServices.HBASE_CLIENT_KEYVALUE_MAXSIZE, - QueryServicesOptions.DEFAULT_HBASE_CLIENT_KEYVALUE_MAXSIZE); - setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp, - indexMaintainer, viewConstants, onDupKeyBytes, 0, maxHBaseClientKeyValueSize); - return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection); - } + @Override + public QueryPlan getQueryPlan() { + return null; + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - List planSteps = Lists.newArrayListWithExpectedSize(2); - if (context.getSequenceManager().getSequenceCount() > 0) { - planSteps.add("CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES"); - } - planSteps.add("PUT SINGLE ROW"); - return new ExplainPlan(planSteps); - } + @Override + public Set getSourceRefs() { + return Collections.emptySet(); + } - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return 0l; - } + @Override + public Operation getOperation() { + return operation; + } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return 0l; - } + @Override + public MutationState execute() throws SQLException { + ImmutableBytesWritable ptr = context.getTempPtr(); + final SequenceManager sequenceManager = context.getSequenceManager(); + // Next evaluate all the expressions + int nodeIndex = nodeIndexOffset; + PTable table = tableRef.getTable(); + Tuple tuple = + sequenceManager.getSequenceCount() == 0 ? null : sequenceManager.newSequenceTuple(null); + for (Expression constantExpression : constantExpressions) { + if (!constantExpression.isStateless()) { + nodeIndex++; + continue; + } + PColumn column = allColumns.get(columnIndexes[nodeIndex]); + constantExpression.evaluate(tuple, ptr); + Object value = null; + if (constantExpression.getDataType() != null) { + value = constantExpression.getDataType().toObject(ptr, constantExpression.getSortOrder(), + constantExpression.getMaxLength(), constantExpression.getScale()); + if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) { + throw TypeMismatchException.newException(constantExpression.getDataType(), + column.getDataType(), + "expression: " + constantExpression.toString() + " in column " + column); + } + if ( + !column.getDataType().isSizeCompatible(ptr, value, constantExpression.getDataType(), + constantExpression.getSortOrder(), constantExpression.getMaxLength(), + constantExpression.getScale(), column.getMaxLength(), column.getScale()) + ) { + throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), + column.getScale(), column.getName().getString()); + } + } + column.getDataType().coerceBytes(ptr, value, constantExpression.getDataType(), + constantExpression.getMaxLength(), constantExpression.getScale(), + constantExpression.getSortOrder(), column.getMaxLength(), column.getScale(), + column.getSortOrder(), table.rowKeyOrderOptimizable()); + if ( + overlapViewColumns.contains(column) && Bytes.compareTo(ptr.get(), ptr.getOffset(), + ptr.getLength(), column.getViewConstant(), 0, column.getViewConstant().length - 1) != 0 + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN) + .setColumnName(column.getName().getString()) + .setMessage("value=" + constantExpression.toString()).build().buildException(); + } + values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr); + nodeIndex++; + } + // Add columns based on view + for (PColumn column : addViewColumns) { + if (IndexUtil.getViewConstantValue(column, ptr)) { + values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr); + } else { + throw new IllegalStateException(); + } + } + MultiRowMutationState mutation = new MultiRowMutationState(1); + IndexMaintainer indexMaintainer = null; + byte[][] viewConstants = null; + if (table.getIndexType() == IndexType.LOCAL) { + PTable parentTable = statement.getConnection().getMetaDataCache().getTableRef( + new PTableKey(statement.getConnection().getTenantId(), table.getParentName().getString())) + .getTable(); + indexMaintainer = table.getIndexMaintainer(parentTable, connection); + viewConstants = IndexUtil.getViewConstants(parentTable); + } + int maxHBaseClientKeyValueSize = statement.getConnection().getQueryServices().getProps() + .getInt(QueryServices.HBASE_CLIENT_KEYVALUE_MAXSIZE, + QueryServicesOptions.DEFAULT_HBASE_CLIENT_KEYVALUE_MAXSIZE); + setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, + useServerTimestamp, indexMaintainer, viewConstants, onDupKeyBytes, 0, + maxHBaseClientKeyValueSize); + return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection); + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return 0l; - } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + List planSteps = Lists.newArrayListWithExpectedSize(2); + if (context.getSequenceManager().getSequenceCount() > 0) { + planSteps + .add("CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES"); + } + planSteps.add("PUT SINGLE ROW"); + return new ExplainPlan(planSteps); } - private class ClientUpsertSelectMutationPlan implements MutationPlan { - private final QueryPlan queryPlan; - private final TableRef tableRef; - private final QueryPlan originalQueryPlan; - private final UpsertingParallelIteratorFactory parallelIteratorFactory; - private final RowProjector projector; - private final int[] columnIndexes; - private final int[] pkSlotIndexes; - private final boolean useServerTimestamp; - private final int maxSize; - private final long maxSizeBytes; - - public ClientUpsertSelectMutationPlan(QueryPlan queryPlan, TableRef tableRef, QueryPlan originalQueryPlan, UpsertingParallelIteratorFactory parallelIteratorFactory, RowProjector projector, int[] columnIndexes, int[] pkSlotIndexes, boolean useServerTimestamp, int maxSize, long maxSizeBytes) { - this.queryPlan = queryPlan; - this.tableRef = tableRef; - this.originalQueryPlan = originalQueryPlan; - this.parallelIteratorFactory = parallelIteratorFactory; - this.projector = projector; - this.columnIndexes = columnIndexes; - this.pkSlotIndexes = pkSlotIndexes; - this.useServerTimestamp = useServerTimestamp; - this.maxSize = maxSize; - this.maxSizeBytes = maxSizeBytes; - queryPlan.getContext().setClientSideUpsertSelect(true); - } + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return 0l; + } - @Override - public ParameterMetaData getParameterMetaData() { - return queryPlan.getContext().getBindManager().getParameterMetaData(); - } + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return 0l; + } - @Override - public StatementContext getContext() { - return queryPlan.getContext(); - } + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return 0l; + } + } - @Override - public TableRef getTargetRef() { - return tableRef; - } + private class ClientUpsertSelectMutationPlan implements MutationPlan { + private final QueryPlan queryPlan; + private final TableRef tableRef; + private final QueryPlan originalQueryPlan; + private final UpsertingParallelIteratorFactory parallelIteratorFactory; + private final RowProjector projector; + private final int[] columnIndexes; + private final int[] pkSlotIndexes; + private final boolean useServerTimestamp; + private final int maxSize; + private final long maxSizeBytes; - @Override - public QueryPlan getQueryPlan() { - return queryPlan; - } + public ClientUpsertSelectMutationPlan(QueryPlan queryPlan, TableRef tableRef, + QueryPlan originalQueryPlan, UpsertingParallelIteratorFactory parallelIteratorFactory, + RowProjector projector, int[] columnIndexes, int[] pkSlotIndexes, boolean useServerTimestamp, + int maxSize, long maxSizeBytes) { + this.queryPlan = queryPlan; + this.tableRef = tableRef; + this.originalQueryPlan = originalQueryPlan; + this.parallelIteratorFactory = parallelIteratorFactory; + this.projector = projector; + this.columnIndexes = columnIndexes; + this.pkSlotIndexes = pkSlotIndexes; + this.useServerTimestamp = useServerTimestamp; + this.maxSize = maxSize; + this.maxSizeBytes = maxSizeBytes; + queryPlan.getContext().setClientSideUpsertSelect(true); + } - @Override - public Set getSourceRefs() { - return originalQueryPlan.getSourceRefs(); - } + @Override + public ParameterMetaData getParameterMetaData() { + return queryPlan.getContext().getBindManager().getParameterMetaData(); + } - @Override - public Operation getOperation() { - return operation; - } + @Override + public StatementContext getContext() { + return queryPlan.getContext(); + } - @Override - public MutationState execute() throws SQLException { - ResultIterator iterator = queryPlan.iterator(); - if (parallelIteratorFactory == null) { - return upsertSelect(new StatementContext(statement, queryPlan.getContext().getScan()), tableRef, projector, iterator, columnIndexes, pkSlotIndexes, useServerTimestamp, false); - } - try { - parallelIteratorFactory.setRowProjector(projector); - parallelIteratorFactory.setColumnIndexes(columnIndexes); - parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes); - Tuple tuple; - long totalRowCount = 0; - StatementContext context = queryPlan.getContext(); - while ((tuple=iterator.next()) != null) {// Runs query - Cell kv = tuple.getValue(0); - totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault()); - } - // Return total number of rows that have been updated. In the case of auto commit being off - // the mutations will all be in the mutation state of the current connection. - MutationState mutationState = new MutationState(maxSize, maxSizeBytes, statement.getConnection(), totalRowCount); - /* - * All the metrics collected for measuring the reads done by the parallel mutating iterators - * is included in the ReadMetricHolder of the statement context. Include these metrics in the - * returned mutation state so they can be published on commit. - */ - mutationState.setReadMetricQueue(context.getReadMetricsQueue()); - return mutationState; - } finally { - iterator.close(); - } - } + @Override + public TableRef getTargetRef() { + return tableRef; + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - ExplainPlan explainPlan = queryPlan.getExplainPlan(); - List queryPlanSteps = explainPlan.getPlanSteps(); - ExplainPlanAttributes explainPlanAttributes = - explainPlan.getPlanStepsAsAttributes(); - List planSteps = - Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); - ExplainPlanAttributesBuilder newBuilder = - new ExplainPlanAttributesBuilder(explainPlanAttributes); - newBuilder.setAbstractExplainPlan("UPSERT SELECT"); - planSteps.add("UPSERT SELECT"); - planSteps.addAll(queryPlanSteps); - return new ExplainPlan(planSteps, newBuilder.build()); - } + @Override + public QueryPlan getQueryPlan() { + return queryPlan; + } - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return queryPlan.getEstimatedRowsToScan(); - } + @Override + public Set getSourceRefs() { + return originalQueryPlan.getSourceRefs(); + } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return queryPlan.getEstimatedBytesToScan(); - } + @Override + public Operation getOperation() { + return operation; + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return queryPlan.getEstimateInfoTimestamp(); - } + @Override + public MutationState execute() throws SQLException { + ResultIterator iterator = queryPlan.iterator(); + if (parallelIteratorFactory == null) { + return upsertSelect(new StatementContext(statement, queryPlan.getContext().getScan()), + tableRef, projector, iterator, columnIndexes, pkSlotIndexes, useServerTimestamp, false); + } + try { + parallelIteratorFactory.setRowProjector(projector); + parallelIteratorFactory.setColumnIndexes(columnIndexes); + parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes); + Tuple tuple; + long totalRowCount = 0; + StatementContext context = queryPlan.getContext(); + while ((tuple = iterator.next()) != null) {// Runs query + Cell kv = tuple.getValue(0); + totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), + kv.getValueOffset(), SortOrder.getDefault()); + } + // Return total number of rows that have been updated. In the case of auto commit being off + // the mutations will all be in the mutation state of the current connection. + MutationState mutationState = + new MutationState(maxSize, maxSizeBytes, statement.getConnection(), totalRowCount); + /* + * All the metrics collected for measuring the reads done by the parallel mutating iterators + * is included in the ReadMetricHolder of the statement context. Include these metrics in + * the returned mutation state so they can be published on commit. + */ + mutationState.setReadMetricQueue(context.getReadMetricsQueue()); + return mutationState; + } finally { + iterator.close(); + } + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + ExplainPlan explainPlan = queryPlan.getExplainPlan(); + List queryPlanSteps = explainPlan.getPlanSteps(); + ExplainPlanAttributes explainPlanAttributes = explainPlan.getPlanStepsAsAttributes(); + List planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); + ExplainPlanAttributesBuilder newBuilder = + new ExplainPlanAttributesBuilder(explainPlanAttributes); + newBuilder.setAbstractExplainPlan("UPSERT SELECT"); + planSteps.add("UPSERT SELECT"); + planSteps.addAll(queryPlanSteps); + return new ExplainPlan(planSteps, newBuilder.build()); + } + + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return queryPlan.getEstimatedRowsToScan(); + } + + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return queryPlan.getEstimatedBytesToScan(); + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return queryPlan.getEstimateInfoTimestamp(); } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/WhereCompiler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/WhereCompiler.java index 4aad5e9b7af..3ebd491d70d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/WhereCompiler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/WhereCompiler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,6 +37,13 @@ import java.util.Set; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.WritableUtils; +import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.expression.AddExpression; import org.apache.phoenix.expression.AndExpression; import org.apache.phoenix.expression.ArrayConstructorExpression; @@ -70,7 +77,14 @@ import org.apache.phoenix.expression.function.ArrayElemRefExpression; import org.apache.phoenix.expression.function.ScalarFunction; import org.apache.phoenix.expression.function.SingleAggregateFunction; +import org.apache.phoenix.expression.visitor.KeyValueExpressionVisitor; import org.apache.phoenix.expression.visitor.TraverseAllExpressionVisitor; +import org.apache.phoenix.filter.MultiCFCQKeyValueComparisonFilter; +import org.apache.phoenix.filter.MultiCQKeyValueComparisonFilter; +import org.apache.phoenix.filter.MultiEncodedCQKeyValueComparisonFilter; +import org.apache.phoenix.filter.RowKeyComparisonFilter; +import org.apache.phoenix.filter.SingleCFCQKeyValueComparisonFilter; +import org.apache.phoenix.filter.SingleCQKeyValueComparisonFilter; import org.apache.phoenix.parse.ColumnParseNode; import org.apache.phoenix.parse.FilterableStatement; import org.apache.phoenix.parse.HintNode; @@ -79,35 +93,21 @@ import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.parse.StatelessTraverseAllParseNodeVisitor; import org.apache.phoenix.parse.SubqueryParseNode; +import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.AmbiguousColumnException; import org.apache.phoenix.schema.ColumnNotFoundException; import org.apache.phoenix.schema.ColumnRef; import org.apache.phoenix.schema.PColumnFamily; import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.PTableType; -import org.apache.phoenix.schema.TableRef; -import org.apache.phoenix.schema.TypeMismatchException; -import org.apache.phoenix.thirdparty.com.google.common.base.Optional; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.WritableUtils; -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.exception.SQLExceptionInfo; -import org.apache.phoenix.expression.visitor.KeyValueExpressionVisitor; -import org.apache.phoenix.filter.MultiCFCQKeyValueComparisonFilter; -import org.apache.phoenix.filter.MultiCQKeyValueComparisonFilter; -import org.apache.phoenix.filter.MultiEncodedCQKeyValueComparisonFilter; -import org.apache.phoenix.filter.RowKeyComparisonFilter; -import org.apache.phoenix.filter.SingleCFCQKeyValueComparisonFilter; -import org.apache.phoenix.filter.SingleCQKeyValueComparisonFilter; -import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.PTable.ImmutableStorageScheme; import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.PTable.QualifierEncodingScheme; import org.apache.phoenix.schema.PTable.ViewType; +import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.schema.TypeMismatchException; import org.apache.phoenix.schema.types.PBoolean; +import org.apache.phoenix.thirdparty.com.google.common.base.Optional; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; @@ -119,840 +119,868 @@ import org.apache.phoenix.util.SchemaUtil; /** - * * Class to build the filter of a scan - * - * * @since 0.1 */ public class WhereCompiler { - protected static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); + protected static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); + + private WhereCompiler() { + } + + public static Expression compile(StatementContext context, FilterableStatement statement) + throws SQLException { + return compile(context, statement, null, null, Optional. absent()); + } + + public static Expression compile(StatementContext context, ParseNode whereNode) + throws SQLException { + WhereExpressionCompiler viewWhereCompiler = new WhereExpressionCompiler(context, true); + return whereNode.accept(viewWhereCompiler); + } + + /** + * Pushes where clause filter expressions into scan by building and setting a filter. + * @param context the shared context during query compilation + * @param statement TODO + * @throws SQLException if mismatched types are found, bind value do not match + * binds, or invalid function arguments are encountered. + * @throws SQLFeatureNotSupportedException if an unsupported expression is encountered. + * @throws ColumnNotFoundException if column name could not be resolved + * @throws AmbiguousColumnException if an unaliased column name is ambiguous across + * multiple tables + */ + public static Expression compile(StatementContext context, FilterableStatement statement, + ParseNode viewWhere, Set subqueryNodes, Optional minOffset) + throws SQLException { + return compile(context, statement, viewWhere, Collections. emptyList(), + subqueryNodes, minOffset); + } + + /** + * Optimize scan ranges by applying dynamically generated filter expressions. + * @param context the shared context during query compilation + * @param statement TODO + * @throws SQLException if mismatched types are found, bind value do not match + * binds, or invalid function arguments are encountered. + * @throws SQLFeatureNotSupportedException if an unsupported expression is encountered. + * @throws ColumnNotFoundException if column name could not be resolved + * @throws AmbiguousColumnException if an unaliased column name is ambiguous across + * multiple tables + */ + public static Expression compile(StatementContext context, FilterableStatement statement, + ParseNode viewWhere, List dynamicFilters, Set subqueryNodes, + Optional minOffset) throws SQLException { + ParseNode where = statement.getWhere(); + if (subqueryNodes != null) { // if the subqueryNodes passed in is null, we assume there will be + // no sub-queries in the WHERE clause. + SubqueryParseNodeVisitor subqueryVisitor = + new SubqueryParseNodeVisitor(context, subqueryNodes); + if (where != null) { + where.accept(subqueryVisitor); + } + if (viewWhere != null) { + viewWhere.accept(subqueryVisitor); + } + if (!subqueryNodes.isEmpty()) { + return null; + } + } - private WhereCompiler() { + Set extractedNodes = Sets. newHashSet(); + WhereExpressionCompiler whereCompiler = new WhereExpressionCompiler(context); + Expression expression = where == null + ? LiteralExpression.newConstant(true, PBoolean.INSTANCE, Determinism.ALWAYS) + : where.accept(whereCompiler); + if (whereCompiler.isAggregate()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_IN_WHERE).build() + .buildException(); + } + if (expression.getDataType() != PBoolean.INSTANCE) { + throw TypeMismatchException.newException(PBoolean.INSTANCE, expression.getDataType(), + expression.toString()); + } + if (viewWhere != null) { + WhereExpressionCompiler viewWhereCompiler = new WhereExpressionCompiler(context, true); + Expression viewExpression = viewWhere.accept(viewWhereCompiler); + expression = AndExpression.create(Lists.newArrayList(expression, viewExpression)); + } + if (!dynamicFilters.isEmpty()) { + List filters = Lists.newArrayList(expression); + filters.addAll(dynamicFilters); + expression = AndExpression.create(filters); } - public static Expression compile(StatementContext context, FilterableStatement statement) throws SQLException { - return compile(context, statement, null, null, Optional.absent()); + if ( + context.getCurrentTable().getTable().getType() != PTableType.PROJECTED + && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY + ) { + Set hints = null; + if (statement.getHint() != null) { + hints = statement.getHint().getHints(); + } + expression = WhereOptimizer.pushKeyExpressionsToScan(context, hints, expression, + extractedNodes, minOffset); } + setScanFilter(context, statement, expression, whereCompiler.disambiguateWithFamily); + + return expression; + } - public static Expression compile(StatementContext context, ParseNode whereNode) throws SQLException { - WhereExpressionCompiler viewWhereCompiler = new WhereExpressionCompiler(context, true); - return whereNode.accept(viewWhereCompiler); + public static class WhereExpressionCompiler extends ExpressionCompiler { + private boolean disambiguateWithFamily; + + public WhereExpressionCompiler(StatementContext context) { + super(context, true); } - - /** - * Pushes where clause filter expressions into scan by building and setting a filter. - * @param context the shared context during query compilation - * @param statement TODO - * @throws SQLException if mismatched types are found, bind value do not match binds, - * or invalid function arguments are encountered. - * @throws SQLFeatureNotSupportedException if an unsupported expression is encountered. - * @throws ColumnNotFoundException if column name could not be resolved - * @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables - */ - public static Expression compile(StatementContext context, FilterableStatement statement, ParseNode viewWhere, Set subqueryNodes, Optional minOffset) throws SQLException { - return compile(context, statement, viewWhere, Collections.emptyList(), subqueryNodes, minOffset); + + WhereExpressionCompiler(StatementContext context, boolean resolveViewConstants) { + super(context, resolveViewConstants); } - /** - * Optimize scan ranges by applying dynamically generated filter expressions. - * @param context the shared context during query compilation - * @param statement TODO - * @throws SQLException if mismatched types are found, bind value do not match binds, - * or invalid function arguments are encountered. - * @throws SQLFeatureNotSupportedException if an unsupported expression is encountered. - * @throws ColumnNotFoundException if column name could not be resolved - * @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables - */ - public static Expression compile(StatementContext context, FilterableStatement statement, ParseNode viewWhere, List dynamicFilters, Set subqueryNodes, Optional minOffset) throws SQLException { - ParseNode where = statement.getWhere(); - if (subqueryNodes != null) { // if the subqueryNodes passed in is null, we assume there will be no sub-queries in the WHERE clause. - SubqueryParseNodeVisitor subqueryVisitor = new SubqueryParseNodeVisitor(context, subqueryNodes); - if (where != null) { - where.accept(subqueryVisitor); - } - if (viewWhere != null) { - viewWhere.accept(subqueryVisitor); - } - if (!subqueryNodes.isEmpty()) { - return null; - } - } - - Set extractedNodes = Sets.newHashSet(); - WhereExpressionCompiler whereCompiler = new WhereExpressionCompiler(context); - Expression expression = where == null ? LiteralExpression.newConstant(true, PBoolean.INSTANCE,Determinism.ALWAYS) : where.accept(whereCompiler); - if (whereCompiler.isAggregate()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_IN_WHERE).build().buildException(); - } - if (expression.getDataType() != PBoolean.INSTANCE) { - throw TypeMismatchException.newException(PBoolean.INSTANCE, expression.getDataType(), expression.toString()); - } - if (viewWhere != null) { - WhereExpressionCompiler viewWhereCompiler = new WhereExpressionCompiler(context, true); - Expression viewExpression = viewWhere.accept(viewWhereCompiler); - expression = AndExpression.create(Lists.newArrayList(expression, viewExpression)); - } - if (!dynamicFilters.isEmpty()) { - List filters = Lists.newArrayList(expression); - filters.addAll(dynamicFilters); - expression = AndExpression.create(filters); - } - - if (context.getCurrentTable().getTable().getType() != PTableType.PROJECTED && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY) { - Set hints = null; - if(statement.getHint() != null){ - hints = statement.getHint().getHints(); - } - expression = WhereOptimizer.pushKeyExpressionsToScan(context, hints, expression, extractedNodes, minOffset); - } - setScanFilter(context, statement, expression, whereCompiler.disambiguateWithFamily); + @Override + public Expression visit(ColumnParseNode node) throws SQLException { + ColumnRef ref = resolveColumn(node); + TableRef tableRef = ref.getTableRef(); + Expression newColumnExpression = + ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive()); + if (tableRef.equals(context.getCurrentTable()) && !SchemaUtil.isPKColumn(ref.getColumn())) { + byte[] cq = tableRef.getTable().getImmutableStorageScheme() + == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS + ? QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES + : ref.getColumn().getColumnQualifierBytes(); + // track the where condition columns. Later we need to ensure the Scan in HRS scans these + // column CFs + context.addWhereConditionColumn(ref.getColumn().getFamilyName().getBytes(), cq); + } + return newColumnExpression; + } - return expression; + @Override + protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { + ColumnRef ref = super.resolveColumn(node); + if (disambiguateWithFamily) { + return ref; + } + PTable table = ref.getTable(); + // Track if we need to compare KeyValue during filter evaluation + // using column family. If the column qualifier is enough, we + // just use that. + if (!SchemaUtil.isPKColumn(ref.getColumn())) { + if (!EncodedColumnsUtil.usesEncodedColumnNames(table) || ref.getColumn().isDynamic()) { + try { + table.getColumnForColumnName(ref.getColumn().getName().getString()); + } catch (AmbiguousColumnException e) { + disambiguateWithFamily = true; + } + } else { + for (PColumnFamily columnFamily : table.getColumnFamilies()) { + if (columnFamily.getName().equals(ref.getColumn().getFamilyName())) { + continue; + } + try { + table.getColumnForColumnQualifier(columnFamily.getName().getBytes(), + ref.getColumn().getColumnQualifierBytes()); + // If we find the same qualifier name with different columnFamily, + // then set disambiguateWithFamily to true + disambiguateWithFamily = true; + break; + } catch (ColumnNotFoundException ignore) { + } + } + } + } + return ref; + } + } + + private static final class Counter { + public enum Count { + NONE, + SINGLE, + MULTIPLE + }; + + private Count count = Count.NONE; + private KeyValueColumnExpression column; + + public void increment(KeyValueColumnExpression column) { + switch (count) { + case NONE: + count = Count.SINGLE; + this.column = column; + break; + case SINGLE: + count = column.equals(this.column) ? Count.SINGLE : Count.MULTIPLE; + break; + case MULTIPLE: + break; + + } } - - public static class WhereExpressionCompiler extends ExpressionCompiler { - private boolean disambiguateWithFamily; - public WhereExpressionCompiler(StatementContext context) { - super(context, true); - } + public Count getCount() { + return count; + } - WhereExpressionCompiler(StatementContext context, boolean resolveViewConstants) { - super(context, resolveViewConstants); - } + public KeyValueColumnExpression getColumn() { + return column; + } + } + + /** + * Sets the start/stop key range based on the whereClause expression. + * @param context the shared context during query compilation + * @param whereClause the final where clause expression. + */ + private static void setScanFilter(StatementContext context, FilterableStatement statement, + Expression whereClause, boolean disambiguateWithFamily) { + Scan scan = context.getScan(); + + if (LiteralExpression.isBooleanFalseOrNull(whereClause)) { + context.setScanRanges(ScanRanges.NOTHING); + } else if ( + context.getCurrentTable().getTable().getIndexType() == IndexType.LOCAL + || (IndexUtil.isGlobalIndex(context.getCurrentTable().getTable()) + && context.isUncoveredIndex()) + ) { + if (whereClause != null && !ExpressionUtil.evaluatesToTrue(whereClause)) { + // pass any extra where as scan attribute so it can be evaluated after all + // columns from the main CF have been merged in + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + try { + DataOutputStream output = new DataOutputStream(stream); + WritableUtils.writeVInt(output, ExpressionType.valueOf(whereClause).ordinal()); + whereClause.write(output); + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER, stream.toByteArray()); + + // this is needed just for ExplainTable, since de-serializing an expression does not restore + // its display properties, and that cannot be changed, due to backwards compatibility + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER_STR, + Bytes.toBytes(whereClause.toString())); + } + } else if (whereClause != null && !ExpressionUtil.evaluatesToTrue(whereClause)) { + Filter filter = null; + final Counter counter = new Counter(); + whereClause.accept(new KeyValueExpressionVisitor() { @Override - public Expression visit(ColumnParseNode node) throws SQLException { - ColumnRef ref = resolveColumn(node); - TableRef tableRef = ref.getTableRef(); - Expression newColumnExpression = ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive()); - if (tableRef.equals(context.getCurrentTable()) && !SchemaUtil.isPKColumn(ref.getColumn())) { - byte[] cq = tableRef.getTable().getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS - ? QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES : ref.getColumn().getColumnQualifierBytes(); - // track the where condition columns. Later we need to ensure the Scan in HRS scans these column CFs - context.addWhereConditionColumn(ref.getColumn().getFamilyName().getBytes(), cq); - } - return newColumnExpression; + public Iterator defaultIterator(Expression node) { + // Stop traversal once we've found multiple KeyValue columns + if (counter.getCount() == Counter.Count.MULTIPLE) { + return Collections.emptyIterator(); + } + return super.defaultIterator(node); } @Override - protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { - ColumnRef ref = super.resolveColumn(node); - if (disambiguateWithFamily) { - return ref; - } - PTable table = ref.getTable(); - // Track if we need to compare KeyValue during filter evaluation - // using column family. If the column qualifier is enough, we - // just use that. - if (!SchemaUtil.isPKColumn(ref.getColumn())) { - if (!EncodedColumnsUtil.usesEncodedColumnNames(table) - || ref.getColumn().isDynamic()) { - try { - table.getColumnForColumnName(ref.getColumn().getName().getString()); - } catch (AmbiguousColumnException e) { - disambiguateWithFamily = true; - } - } else { - for (PColumnFamily columnFamily : table.getColumnFamilies()) { - if (columnFamily.getName().equals(ref.getColumn().getFamilyName())) { - continue; - } - try { - table.getColumnForColumnQualifier(columnFamily.getName().getBytes(), - ref.getColumn().getColumnQualifierBytes()); - // If we find the same qualifier name with different columnFamily, - // then set disambiguateWithFamily to true - disambiguateWithFamily = true; - break; - } catch (ColumnNotFoundException ignore) { - } - } - } - } - return ref; - } - } - - private static final class Counter { - public enum Count {NONE, SINGLE, MULTIPLE}; - private Count count = Count.NONE; - private KeyValueColumnExpression column; - - public void increment(KeyValueColumnExpression column) { - switch (count) { - case NONE: - count = Count.SINGLE; - this.column = column; - break; - case SINGLE: - count = column.equals(this.column) ? Count.SINGLE : Count.MULTIPLE; - break; - case MULTIPLE: - break; + public Void visit(KeyValueColumnExpression expression) { + counter.increment(expression); + return null; + } + }); + PTable table = context.getCurrentTable().getTable(); + QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); + ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme(); + Counter.Count count = counter.getCount(); + boolean allCFs = false; + byte[] essentialCF = null; + if (counter.getCount() == Counter.Count.SINGLE && whereClause.requiresFinalEvaluation()) { + if (table.getViewType() == ViewType.MAPPED) { + allCFs = true; + } else { + byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table); + if (Bytes.compareTo(emptyCF, counter.getColumn().getColumnFamily()) != 0) { + essentialCF = emptyCF; + count = Counter.Count.MULTIPLE; + } + } + } + switch (count) { + case NONE: + essentialCF = table.getType() == PTableType.VIEW + ? ByteUtil.EMPTY_BYTE_ARRAY + : SchemaUtil.getEmptyColumnFamily(table); + filter = new RowKeyComparisonFilter(whereClause, essentialCF); + break; + case SINGLE: + filter = disambiguateWithFamily + ? new SingleCFCQKeyValueComparisonFilter(whereClause) + : new SingleCQKeyValueComparisonFilter(whereClause); + break; + case MULTIPLE: + filter = isPossibleToUseEncodedCQFilter(encodingScheme, storageScheme) + ? new MultiEncodedCQKeyValueComparisonFilter(whereClause, encodingScheme, allCFs, + essentialCF) + : (disambiguateWithFamily + ? new MultiCFCQKeyValueComparisonFilter(whereClause, allCFs, essentialCF) + : new MultiCQKeyValueComparisonFilter(whereClause, allCFs, essentialCF)); + break; + } + scan.setFilter(filter); + } - } - } - - public Count getCount() { - return count; - } - - public KeyValueColumnExpression getColumn() { - return column; - } + ScanRanges scanRanges = context.getScanRanges(); + if (scanRanges.useSkipScanFilter()) { + ScanUtil.andFilterAtBeginning(scan, scanRanges.getSkipScanFilter()); } + } + public static Expression transformDNF(ParseNode where, StatementContext statementContext) + throws SQLException { + if (where == null) { + return null; + } + StatementContext context = new StatementContext(statementContext); + context.setResolver(FromCompiler.getResolver(context.getCurrentTable())); + Expression expression = where.accept(new WhereExpressionCompiler(context)); + Expression dnf = expression.accept(new DNFExpressionRewriter()); + return dnf; + } + + /** + * Rewrites an expression in DNF (Disjunctive Normal Form). To do that (1) it transforms operators + * like RVC, IN, and BETWEEN to their AND/OR equivalents, (2) eliminate double negations and apply + * DeMorgan rule, i.e., NOT (A AND B) = NOT A OR NOT B and NOT (A OR B) = NOT A AND NOT B, and (3) + * distributes AND over OR, i.e., (A OR B) AND (C OR D) = (A AND C) OR (A AND D) OR (B AND C) OR + * (B AND D). + */ + public static class DNFExpressionRewriter extends TraverseAllExpressionVisitor { /** - * Sets the start/stop key range based on the whereClause expression. - * @param context the shared context during query compilation - * @param whereClause the final where clause expression. + * Flattens nested AND expressions. For example A > 10 AND (B = 10 AND C > 0) is an + * AndExpression with two children that are A > 10 and (B = 10 AND C > 0). Note the second child + * is another AndExpression. This is flattened as an AndExpression ( A > 10 AND B = 10 AND C > + * 0) with three children that are A > 10, B = 10, and C > 0. */ - private static void setScanFilter(StatementContext context, FilterableStatement statement, Expression whereClause, boolean disambiguateWithFamily) { - Scan scan = context.getScan(); - - if (LiteralExpression.isBooleanFalseOrNull(whereClause)) { - context.setScanRanges(ScanRanges.NOTHING); - } else if (context.getCurrentTable().getTable().getIndexType() == IndexType.LOCAL - || (IndexUtil.isGlobalIndex(context.getCurrentTable().getTable()) - && context.isUncoveredIndex())) { - if (whereClause != null && !ExpressionUtil.evaluatesToTrue(whereClause)) { - // pass any extra where as scan attribute so it can be evaluated after all - // columns from the main CF have been merged in - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - try { - DataOutputStream output = new DataOutputStream(stream); - WritableUtils.writeVInt(output, ExpressionType.valueOf(whereClause).ordinal()); - whereClause.write(output); - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER, stream.toByteArray()); - - // this is needed just for ExplainTable, since de-serializing an expression does not restore - // its display properties, and that cannot be changed, due to backwards compatibility - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER_STR, - Bytes.toBytes(whereClause.toString())); - } - } else if (whereClause != null && !ExpressionUtil.evaluatesToTrue(whereClause)) { - Filter filter = null; - final Counter counter = new Counter(); - whereClause.accept(new KeyValueExpressionVisitor() { - - @Override - public Iterator defaultIterator(Expression node) { - // Stop traversal once we've found multiple KeyValue columns - if (counter.getCount() == Counter.Count.MULTIPLE) { - return Collections.emptyIterator(); - } - return super.defaultIterator(node); - } - - @Override - public Void visit(KeyValueColumnExpression expression) { - counter.increment(expression); - return null; - } - }); - PTable table = context.getCurrentTable().getTable(); - QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); - ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme(); - Counter.Count count = counter.getCount(); - boolean allCFs = false; - byte[] essentialCF = null; - if (counter.getCount() == Counter.Count.SINGLE && whereClause.requiresFinalEvaluation() ) { - if (table.getViewType() == ViewType.MAPPED) { - allCFs = true; - } else { - byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table); - if (Bytes.compareTo(emptyCF, counter.getColumn().getColumnFamily()) != 0) { - essentialCF = emptyCF; - count = Counter.Count.MULTIPLE; - } - } - } - switch (count) { - case NONE: - essentialCF = table.getType() == PTableType.VIEW - ? ByteUtil.EMPTY_BYTE_ARRAY - : SchemaUtil.getEmptyColumnFamily(table); - filter = new RowKeyComparisonFilter(whereClause, essentialCF); - break; - case SINGLE: - filter = disambiguateWithFamily - ? new SingleCFCQKeyValueComparisonFilter(whereClause) - : new SingleCQKeyValueComparisonFilter(whereClause); - break; - case MULTIPLE: - filter = isPossibleToUseEncodedCQFilter(encodingScheme, storageScheme) - ? new MultiEncodedCQKeyValueComparisonFilter(whereClause, encodingScheme, allCFs, essentialCF) - : (disambiguateWithFamily - ? new MultiCFCQKeyValueComparisonFilter( whereClause, allCFs, essentialCF) - : new MultiCQKeyValueComparisonFilter(whereClause, allCFs, essentialCF)); - break; - } - scan.setFilter(filter); - } - - ScanRanges scanRanges = context.getScanRanges(); - if (scanRanges.useSkipScanFilter()) { - ScanUtil.andFilterAtBeginning(scan, scanRanges.getSkipScanFilter()); - } - } - - public static Expression transformDNF(ParseNode where, StatementContext statementContext) - throws SQLException { - if (where == null) { - return null; + private static AndExpression flattenAnd(List l) { + for (Expression e : l) { + if (e instanceof AndExpression) { + List flattenedList = new ArrayList<>(l.size() + e.getChildren().size()); + for (Expression child : l) { + if (child instanceof AndExpression) { + flattenedList.addAll(child.getChildren()); + } else { + flattenedList.add(child); + } + } + return new AndExpression(flattenedList); } - StatementContext context = new StatementContext(statementContext); - context.setResolver(FromCompiler.getResolver(context.getCurrentTable())); - Expression expression = where.accept(new WhereExpressionCompiler(context)); - Expression dnf = expression.accept(new DNFExpressionRewriter()); - return dnf; + } + return new AndExpression(l); } /** - * Rewrites an expression in DNF (Disjunctive Normal Form). To do that - * (1) it transforms operators like RVC, IN, and BETWEEN to their AND/OR equivalents, - * (2) eliminate double negations and apply DeMorgan rule, i.e., - * NOT (A AND B) = NOT A OR NOT B and NOT (A OR B) = NOT A AND NOT B, and - * (3) distributes AND over OR, i.e., - * (A OR B) AND (C OR D) = (A AND C) OR (A AND D) OR (B AND C) OR (B AND D). + * Flattens nested OR expressions. For example A > 10 OR (B = 10 OR C > 0) is an OrExpression + * with two children that are A > 10 and (B = 10 OR C > 0). Note the second child is another + * OrExpression. This is flattened as an OrExpression ( A > 10 OR B = 10 OR C > 0) with three + * children that are A > 10, B = 10, and C > 0. */ - public static class DNFExpressionRewriter extends TraverseAllExpressionVisitor { - /** - * Flattens nested AND expressions. - * For example A > 10 AND (B = 10 AND C > 0) is an AndExpression with two children that are - * A > 10 and (B = 10 AND C > 0). Note the second child is another AndExpression. This is - * flattened as an AndExpression ( A > 10 AND B = 10 AND C > 0) with three - * children that are A > 10, B = 10, and C > 0. - * - */ - - private static AndExpression flattenAnd(List l) { - for (Expression e : l) { - if (e instanceof AndExpression) { - List flattenedList = new ArrayList<>(l.size() - + e.getChildren().size()); - for (Expression child : l) { - if (child instanceof AndExpression) { - flattenedList.addAll(child.getChildren()); - } else { - flattenedList.add(child); - } - } - return new AndExpression(flattenedList); - } - } - return new AndExpression(l); - } - - /** - * Flattens nested OR expressions. - * For example A > 10 OR (B = 10 OR C > 0) is an OrExpression with two children that are - * A > 10 and (B = 10 OR C > 0). Note the second child is another OrExpression. This is - * flattened as an OrExpression ( A > 10 OR B = 10 OR C > 0) with three - * children that are A > 10, B = 10, and C > 0. - * - */ - private static OrExpression flattenOr(List l) { - for (Expression e : l) { - if (e instanceof OrExpression) { - List flattenedList = new ArrayList<>(l.size() - + e.getChildren().size()); - for (Expression child : l) { - if (child instanceof OrExpression) { - flattenedList.addAll(child.getChildren()); - } else { - flattenedList.add(child); - } - } - return new OrExpression(flattenedList); - } + private static OrExpression flattenOr(List l) { + for (Expression e : l) { + if (e instanceof OrExpression) { + List flattenedList = new ArrayList<>(l.size() + e.getChildren().size()); + for (Expression child : l) { + if (child instanceof OrExpression) { + flattenedList.addAll(child.getChildren()); + } else { + flattenedList.add(child); } - return new OrExpression(l); + } + return new OrExpression(flattenedList); } + } + return new OrExpression(l); + } - /** - * Flattens nested AND expressions and then distributes AND over OR. - * - */ - @Override public Expression visitLeave(AndExpression node, List l) { - AndExpression andExpression = flattenAnd(l); - - boolean foundOrChild = false; - int i; - Expression child = null; - List andChildren = andExpression.getChildren(); - for (i = 0; i < andChildren.size(); i++) { - child = andChildren.get(i); - if (child instanceof OrExpression) { - foundOrChild = true; - break; - } - } + /** + * Flattens nested AND expressions and then distributes AND over OR. + */ + @Override + public Expression visitLeave(AndExpression node, List l) { + AndExpression andExpression = flattenAnd(l); + + boolean foundOrChild = false; + int i; + Expression child = null; + List andChildren = andExpression.getChildren(); + for (i = 0; i < andChildren.size(); i++) { + child = andChildren.get(i); + if (child instanceof OrExpression) { + foundOrChild = true; + break; + } + } + + if (foundOrChild) { + List flattenedList = new ArrayList<>(andChildren.size() - 1); + for (int j = 0; j < andChildren.size(); j++) { + if (i != j) { + flattenedList.add(andChildren.get(j)); + } + } + List orList = new ArrayList<>(child.getChildren().size()); + for (Expression grandChild : child.getChildren()) { + List andList = new ArrayList<>(l.size()); + andList.addAll(flattenedList); + andList.add(grandChild); + orList.add(visitLeave(new AndExpression(andList), andList)); + } + return visitLeave(new OrExpression(orList), orList); + } + return andExpression; + } - if (foundOrChild) { - List flattenedList = new ArrayList<>(andChildren.size() - 1); - for (int j = 0; j < andChildren.size(); j++) { - if (i != j) { - flattenedList.add(andChildren.get(j)); - } - } - List orList = new ArrayList<>(child.getChildren().size()); - for (Expression grandChild : child.getChildren()) { - List andList = new ArrayList<>(l.size()); - andList.addAll(flattenedList); - andList.add(grandChild); - orList.add(visitLeave(new AndExpression(andList), andList)); - } - return visitLeave(new OrExpression(orList), orList); - } - return andExpression; - } - @Override public Expression visitLeave(OrExpression node, List l) { - return flattenOr(l); - } + @Override + public Expression visitLeave(OrExpression node, List l) { + return flattenOr(l); + } - @Override public Expression visitLeave(ScalarFunction node, List l) { - return node; - } + @Override + public Expression visitLeave(ScalarFunction node, List l) { + return node; + } - private static ComparisonExpression createComparisonExpression(CompareOperator op, - Expression lhs, Expression rhs) { - List children = new ArrayList<>(2); - children.add(lhs); - children.add(rhs); - return new ComparisonExpression(children, op); - } + private static ComparisonExpression createComparisonExpression(CompareOperator op, + Expression lhs, Expression rhs) { + List children = new ArrayList<>(2); + children.add(lhs); + children.add(rhs); + return new ComparisonExpression(children, op); + } - @Override public Expression visitLeave(ComparisonExpression node, List l) { - if (l == null || l.isEmpty()) { - return node; - } - Expression lhs = l.get(0); - Expression rhs = l.get(1); - if (!(lhs instanceof RowValueConstructorExpression) - || !(rhs instanceof RowValueConstructorExpression)) { - return new ComparisonExpression(l, node.getFilterOp()); - } + @Override + public Expression visitLeave(ComparisonExpression node, List l) { + if (l == null || l.isEmpty()) { + return node; + } + Expression lhs = l.get(0); + Expression rhs = l.get(1); + if ( + !(lhs instanceof RowValueConstructorExpression) + || !(rhs instanceof RowValueConstructorExpression) + ) { + return new ComparisonExpression(l, node.getFilterOp()); + } + + // Rewrite RVC in DNF (Disjunctive Normal Form) + // For example + // (A, B, C ) op (a, b, c) where op is == or != equals to + // (A != a and B != b and C != c) + // (A, B, C ) op (a, b, c) where op is <, <=, >, or >= is equals to + // (A == a and B == b and C op c) or (A == a and B op b) or A op c + + int childCount = lhs.getChildren().size(); + if (node.getFilterOp() == EQUAL || node.getFilterOp() == NOT_EQUAL) { + List andList = new ArrayList<>(childCount); + for (int i = 0; i < childCount; i++) { + andList.add(createComparisonExpression(node.getFilterOp(), lhs.getChildren().get(i), + rhs.getChildren().get(i))); + } + return new AndExpression(andList); + } + List orList = new ArrayList<>(childCount); + for (int i = 0; i < childCount; i++) { + List andList = new ArrayList<>(childCount); + int j; + for (j = 0; j < childCount - i - 1; j++) { + andList.add( + createComparisonExpression(EQUAL, lhs.getChildren().get(j), rhs.getChildren().get(j))); + } + andList.add(createComparisonExpression(node.getFilterOp(), lhs.getChildren().get(j), + rhs.getChildren().get(j))); + orList.add(new AndExpression(andList)); + } + return new OrExpression(orList); + } - // Rewrite RVC in DNF (Disjunctive Normal Form) - // For example - // (A, B, C ) op (a, b, c) where op is == or != equals to - // (A != a and B != b and C != c) - // (A, B, C ) op (a, b, c) where op is <, <=, >, or >= is equals to - // (A == a and B == b and C op c) or (A == a and B op b) or A op c - - int childCount = lhs.getChildren().size(); - if (node.getFilterOp() == EQUAL - || node.getFilterOp() == NOT_EQUAL) { - List andList = new ArrayList<>(childCount); - for (int i = 0; i < childCount; i++) { - andList.add(createComparisonExpression(node.getFilterOp(), - lhs.getChildren().get(i), - rhs.getChildren().get(i))); - } - return new AndExpression(andList); - } - List orList = new ArrayList<>(childCount); - for (int i = 0; i < childCount; i++) { - List andList = new ArrayList<>(childCount); - int j; - for (j = 0; j < childCount - i - 1; j++) { - andList.add(createComparisonExpression(EQUAL, lhs.getChildren().get(j), - rhs.getChildren().get(j))); - } - andList.add(createComparisonExpression(node.getFilterOp(), lhs.getChildren().get(j), - rhs.getChildren().get(j))); - orList.add(new AndExpression(andList)); - } - return new OrExpression(orList); - } + @Override + public Expression visitLeave(LikeExpression node, List l) { + return node; + } - @Override public Expression visitLeave(LikeExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(SingleAggregateFunction node, List l) { + return node; + } - @Override public Expression visitLeave(SingleAggregateFunction node, List l) { - return node; - } + @Override + public Expression visitLeave(CaseExpression node, List l) { + return node; + } - @Override public Expression visitLeave(CaseExpression node, List l) { - return node; - } + private static Expression negate(ComparisonExpression node) { + CompareOperator op = node.getFilterOp(); + Expression lhs = node.getChildren().get(0); + Expression rhs = node.getChildren().get(1); + switch (op) { + case LESS: + return createComparisonExpression(GREATER_OR_EQUAL, lhs, rhs); + case LESS_OR_EQUAL: + return createComparisonExpression(GREATER, lhs, rhs); + case EQUAL: + return createComparisonExpression(NOT_EQUAL, lhs, rhs); + case NOT_EQUAL: + return createComparisonExpression(EQUAL, lhs, rhs); + case GREATER_OR_EQUAL: + return createComparisonExpression(LESS, lhs, rhs); + case GREATER: + return createComparisonExpression(LESS_OR_EQUAL, lhs, rhs); + default: + throw new IllegalArgumentException("Unexpected CompareOp of " + op); + } + } - private static Expression negate(ComparisonExpression node) { - CompareOperator op = node.getFilterOp(); - Expression lhs = node.getChildren().get(0); - Expression rhs = node.getChildren().get(1); - switch (op) { - case LESS: - return createComparisonExpression(GREATER_OR_EQUAL, lhs, rhs); - case LESS_OR_EQUAL: - return createComparisonExpression(GREATER, lhs, rhs); - case EQUAL: - return createComparisonExpression(NOT_EQUAL, lhs, rhs); - case NOT_EQUAL: - return createComparisonExpression(EQUAL, lhs, rhs); - case GREATER_OR_EQUAL: - return createComparisonExpression(LESS, lhs, rhs); - case GREATER: - return createComparisonExpression(LESS_OR_EQUAL, lhs, rhs); - default: - throw new IllegalArgumentException("Unexpected CompareOp of " + op); - } - } - private static List negateChildren(List children) { - List list = new ArrayList<>(children.size()); - for (Expression child : children) { - if (child instanceof ComparisonExpression) { - list.add(negate((ComparisonExpression) child)); - } else if (child instanceof OrExpression) { - list.add(negate((OrExpression) child)); - } else if (child instanceof AndExpression) { - list.add(negate((AndExpression) child)); - } else if (child instanceof ColumnExpression) { - list.add(new NotExpression(child)); - } else if (child instanceof NotExpression) { - list.add(child.getChildren().get(0)); - } else { - throw new IllegalArgumentException("Unexpected Instance of " + child); - } - } - return list; - } - private static Expression negate(OrExpression node) { - return new AndExpression(negateChildren(node.getChildren())); + private static List negateChildren(List children) { + List list = new ArrayList<>(children.size()); + for (Expression child : children) { + if (child instanceof ComparisonExpression) { + list.add(negate((ComparisonExpression) child)); + } else if (child instanceof OrExpression) { + list.add(negate((OrExpression) child)); + } else if (child instanceof AndExpression) { + list.add(negate((AndExpression) child)); + } else if (child instanceof ColumnExpression) { + list.add(new NotExpression(child)); + } else if (child instanceof NotExpression) { + list.add(child.getChildren().get(0)); + } else { + throw new IllegalArgumentException("Unexpected Instance of " + child); } + } + return list; + } - private static Expression negate(AndExpression node) { - return new OrExpression(negateChildren(node.getChildren())); - } - @Override public Expression visitLeave(NotExpression node, List l) { - Expression child = l.get(0); - if (child instanceof OrExpression) { - return negate((OrExpression) child); - } else if (child instanceof AndExpression) { - return negate((AndExpression) child); - } else if (child instanceof ComparisonExpression) { - return negate((ComparisonExpression) child); - } else if (child instanceof NotExpression) { - return child.getChildren().get(0); - } else if (child instanceof IsNullExpression) { - return new IsNullExpression(ImmutableList.of(l.get(0).getChildren().get(0)), - !((IsNullExpression) child).isNegate()); - } else { - return new NotExpression(child); - } - } + private static Expression negate(OrExpression node) { + return new AndExpression(negateChildren(node.getChildren())); + } - private Expression transformInList(InListExpression node, boolean negate, - List l) { - List list = new ArrayList<>(node.getKeyExpressions().size()); - for (Expression element : node.getKeyExpressions()) { - if (negate) { - list.add(createComparisonExpression(NOT_EQUAL, l.get(0), element)); - } else { - list.add(createComparisonExpression(EQUAL, l.get(0), element)); - } - } - if (negate) { - return new AndExpression(list); - } else { - return new OrExpression(list); - } - } + private static Expression negate(AndExpression node) { + return new OrExpression(negateChildren(node.getChildren())); + } - @Override public Expression visitLeave(InListExpression node, List l) { - Expression inList = transformInList(node, false, l); - Expression firstElement = inList.getChildren().get(0); - // Check if inList includes RVC expressions. If so, rewrite them - if (firstElement instanceof ComparisonExpression - && firstElement.getChildren().get(0) instanceof RowValueConstructorExpression) { - List list = new ArrayList<>(node.getKeyExpressions().size()); - for (Expression e : inList.getChildren()) { - list.add(visitLeave((ComparisonExpression) e, e.getChildren())); - } - if (inList instanceof OrExpression) { - return visitLeave(new OrExpression(list), list); - } else { - return visitLeave(new AndExpression(list), list); - } - } else { - return inList; - } - } + @Override + public Expression visitLeave(NotExpression node, List l) { + Expression child = l.get(0); + if (child instanceof OrExpression) { + return negate((OrExpression) child); + } else if (child instanceof AndExpression) { + return negate((AndExpression) child); + } else if (child instanceof ComparisonExpression) { + return negate((ComparisonExpression) child); + } else if (child instanceof NotExpression) { + return child.getChildren().get(0); + } else if (child instanceof IsNullExpression) { + return new IsNullExpression(ImmutableList.of(l.get(0).getChildren().get(0)), + !((IsNullExpression) child).isNegate()); + } else { + return new NotExpression(child); + } + } - @Override public Expression visitLeave(IsNullExpression node, List l) { - return node; - } + private Expression transformInList(InListExpression node, boolean negate, List l) { + List list = new ArrayList<>(node.getKeyExpressions().size()); + for (Expression element : node.getKeyExpressions()) { + if (negate) { + list.add(createComparisonExpression(NOT_EQUAL, l.get(0), element)); + } else { + list.add(createComparisonExpression(EQUAL, l.get(0), element)); + } + } + if (negate) { + return new AndExpression(list); + } else { + return new OrExpression(list); + } + } - @Override public Expression visitLeave(SubtractExpression node, List l) { - return node; + @Override + public Expression visitLeave(InListExpression node, List l) { + Expression inList = transformInList(node, false, l); + Expression firstElement = inList.getChildren().get(0); + // Check if inList includes RVC expressions. If so, rewrite them + if ( + firstElement instanceof ComparisonExpression + && firstElement.getChildren().get(0) instanceof RowValueConstructorExpression + ) { + List list = new ArrayList<>(node.getKeyExpressions().size()); + for (Expression e : inList.getChildren()) { + list.add(visitLeave((ComparisonExpression) e, e.getChildren())); + } + if (inList instanceof OrExpression) { + return visitLeave(new OrExpression(list), list); + } else { + return visitLeave(new AndExpression(list), list); } + } else { + return inList; + } + } - @Override public Expression visitLeave(MultiplyExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(IsNullExpression node, List l) { + return node; + } - @Override public Expression visitLeave(AddExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(SubtractExpression node, List l) { + return node; + } - @Override public Expression visitLeave(DivideExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(MultiplyExpression node, List l) { + return node; + } - @Override public Expression visitLeave(CoerceExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(AddExpression node, List l) { + return node; + } - @Override - public Expression visitLeave(ArrayConstructorExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(DivideExpression node, List l) { + return node; + } - @Override - public Expression visitLeave(SingleCellConstructorExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(CoerceExpression node, List l) { + return node; + } - @Override public Expression visit(CorrelateVariableFieldAccessExpression node) { - return node; - } + @Override + public Expression visitLeave(ArrayConstructorExpression node, List l) { + return node; + } - @Override public Expression visit(LiteralExpression node) { - return node; - } + @Override + public Expression visitLeave(SingleCellConstructorExpression node, List l) { + return node; + } - @Override public Expression visit(RowKeyColumnExpression node) { - return node; - } + @Override + public Expression visit(CorrelateVariableFieldAccessExpression node) { + return node; + } - @Override public Expression visit(KeyValueColumnExpression node) { - return node; - } + @Override + public Expression visit(LiteralExpression node) { + return node; + } - @Override public Expression visit(SingleCellColumnExpression node) { - return node; - } + @Override + public Expression visit(RowKeyColumnExpression node) { + return node; + } - @Override public Expression visit(ProjectedColumnExpression node) { - return node; - } + @Override + public Expression visit(KeyValueColumnExpression node) { + return node; + } - @Override public Expression visit(SequenceValueExpression node) { - return node; - } + @Override + public Expression visit(SingleCellColumnExpression node) { + return node; + } - @Override public Expression visitLeave(StringConcatExpression node, List l) { - return node; - } + @Override + public Expression visit(ProjectedColumnExpression node) { + return node; + } - @Override - public Expression visitLeave(RowValueConstructorExpression node, List l) { - return node; - } + @Override + public Expression visit(SequenceValueExpression node) { + return node; + } - @Override public Expression visitLeave(ModulusExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(StringConcatExpression node, List l) { + return node; + } - @Override - public Expression visitLeave(ArrayAnyComparisonExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(RowValueConstructorExpression node, List l) { + return node; + } - @Override public Expression visitLeave(ArrayElemRefExpression node, List l) { - return node; - } + @Override + public Expression visitLeave(ModulusExpression node, List l) { + return node; } - public static LiteralExpression getLiteralExpression(Expression node) { - while (!node.getChildren().isEmpty()) { - node = node.getChildren().get(0); - } - if (node instanceof LiteralExpression) { - return (LiteralExpression) node; - } - throw new IllegalArgumentException("Unexpected instance type for " + node); + @Override + public Expression visitLeave(ArrayAnyComparisonExpression node, List l) { + return node; } + @Override + public Expression visitLeave(ArrayElemRefExpression node, List l) { + return node; + } + } - public static BaseTerminalExpression getBaseTerminalExpression(Expression node) { - while (!node.getChildren().isEmpty()) { - node = node.getChildren().get(0); - } - if (node instanceof BaseTerminalExpression) { - return (BaseTerminalExpression) node; - } - throw new IllegalArgumentException("Unexpected instance type for " + node); + public static LiteralExpression getLiteralExpression(Expression node) { + while (!node.getChildren().isEmpty()) { + node = node.getChildren().get(0); } + if (node instanceof LiteralExpression) { + return (LiteralExpression) node; + } + throw new IllegalArgumentException("Unexpected instance type for " + node); + } - /** - * Determines if nodeA is contained by nodeB. - * - * nodeB contains nodeA if every conjunct of nodeB contains at least one conjunct of nodeA. - * - * Example 1: nodeA is contained by nodeB where - * nodeA = (A > 5) and (A < 10) and (B > 0) and C = 5, and - * nodeB = (A > 0) - * - * Example 2: nodeA is not contained by nodeB since C < 0 does not contain any of A's conjuncts - * where - * nodeA = (A > 5) and (A < 10) and (B > 0) and C = 5, and - * nodeB = (A > 0) and (C < 0) - * - * @param nodeA is a simple term or AndExpression constructed from simple terms - * @param nodeB is a simple term or AndExpression constructed from simple terms - * @return true if nodeA is contained by nodeB. - */ - private static boolean contained(Expression nodeA, Expression nodeB) { - if (nodeB instanceof AndExpression) { - for (Expression childB : nodeB.getChildren()) { - if (nodeA instanceof AndExpression) { - boolean contains = false; - for (Expression childA : nodeA.getChildren()) { - if (childB.contains(childA)) { - contains = true; - break; - } - } - if (!contains) { - return false; - } - } else { - // node A is a simple term - if (!childB.contains(nodeA)) { - return false; - } - } - } + public static BaseTerminalExpression getBaseTerminalExpression(Expression node) { + while (!node.getChildren().isEmpty()) { + node = node.getChildren().get(0); + } + if (node instanceof BaseTerminalExpression) { + return (BaseTerminalExpression) node; + } + throw new IllegalArgumentException("Unexpected instance type for " + node); + } + + /** + * Determines if nodeA is contained by nodeB. nodeB contains nodeA if every conjunct of nodeB + * contains at least one conjunct of nodeA. Example 1: nodeA is contained by nodeB where nodeA = + * (A > 5) and (A < 10) and (B > 0) and C = 5, and nodeB = (A > 0) Example 2: nodeA is not + * contained by nodeB since C < 0 does not contain any of A's conjuncts where nodeA = (A > 5) and + * (A < 10) and (B > 0) and C = 5, and nodeB = (A > 0) and (C < 0) + * @param nodeA is a simple term or AndExpression constructed from simple terms + * @param nodeB is a simple term or AndExpression constructed from simple terms + * @return true if nodeA is contained by nodeB. + */ + private static boolean contained(Expression nodeA, Expression nodeB) { + if (nodeB instanceof AndExpression) { + for (Expression childB : nodeB.getChildren()) { + if (nodeA instanceof AndExpression) { + boolean contains = false; + for (Expression childA : nodeA.getChildren()) { + if (childB.contains(childA)) { + contains = true; + break; + } + } + if (!contains) { + return false; + } } else { - // node B is a simple term - if (nodeA instanceof AndExpression) { - boolean contains = false; - for (Expression childA : nodeA.getChildren()) { - if (nodeB.contains(childA)) { - contains = true; - break; - } - } - if (!contains) { - return false; - } - } else { - // Both nodeA and nodeB are simple terms - if (!nodeB.contains(nodeA)) { - return false; - } - } - } + // node A is a simple term + if (!childB.contains(nodeA)) { + return false; + } + } + } + } else { + // node B is a simple term + if (nodeA instanceof AndExpression) { + boolean contains = false; + for (Expression childA : nodeA.getChildren()) { + if (nodeB.contains(childA)) { + contains = true; + break; + } + } + if (!contains) { + return false; + } + } else { + // Both nodeA and nodeB are simple terms + if (!nodeB.contains(nodeA)) { + return false; + } + } + } + return true; + } + + /** + * Determines if node is contained in one of the elements of l + * @param node is a simple term or AndExpression constructed from simple terms + * @param l is a list of nodes where a node is a simple term or AndExpression constructed from + * simple terms + * @return true if an element of the list contains node + */ + private static boolean contained(Expression node, List l) { + for (Expression e : l) { + if (contained(node, e)) { return true; + } } - /** - * Determines if node is contained in one of the elements of l - * - * @param node is a simple term or AndExpression constructed from simple terms - * @param l is a list of nodes where a node is a simple term or AndExpression constructed from - * simple terms - * @return true if an element of the list contains node - */ - private static boolean contained(Expression node, List l) { - for (Expression e : l) { - if (contained(node, e)) { - return true; - } - } + return false; + } + + private static boolean containsDisjunct(Expression nodeA, Expression nodeB) throws SQLException { + // nodeB is a disjunct, that is, either an AND expression or a simple term + if (nodeA instanceof OrExpression) { + // node A is an OR expression. The following check if nodeB is contained by + // any of the disjuncts of nodeA + if (!contained(nodeB, nodeA.getChildren())) { + return false; + } + } else { + // Both nodeA and nodeB are either an AND expression or a simple term (e.g., C < 5) + if (!contained(nodeB, nodeA)) { return false; + } } - - private static boolean containsDisjunct(Expression nodeA, Expression nodeB) - throws SQLException { - // nodeB is a disjunct, that is, either an AND expression or a simple term - if (nodeA instanceof OrExpression) { - // node A is an OR expression. The following check if nodeB is contained by - // any of the disjuncts of nodeA - if (!contained(nodeB, nodeA.getChildren())) { - return false; - } - } else { - // Both nodeA and nodeB are either an AND expression or a simple term (e.g., C < 5) - if (!contained(nodeB, nodeA)) { - return false; - } - } - return true; + return true; + } + + /** + * Determines if nodeA contains/implies nodeB. Both nodeA and B are DNF (Disjunctive Normal Form) + * expressions. nodeA contains nodeB if every disjunct of nodeB is contained by a nodeA disjunct. + * A disjunct x contains another disjunct y if every conjunct of x contains at least one conjunct + * of y. Example: nodeA: (A > 0 AND B > 0) OR C < 5 nodeB: (A = 5 AND B > 1) OR (A = 3 AND C = 1) + * Disjuncts of nodeA: (A > 0 AND B > 0) and C < 5 Disjuncts of nodeB: (A = 5 AND B > 1) and (A = + * 3 AND C = 1) Conjuncts of (A > 0 AND B > 0): A > 0 and B > 0 Conjuncts of C < 5 : C < 5 nodeA + * contains node B because every disjunct of nodeB is contained by a nodeA disjunct. The first + * disjunct (A = 5 AND B > 1) is contained by the disjunct (A > 0 AND B > 0). The second disjunct + * (A = 3 AND C = 1) is contained by C < 5. Please node a disjunct x contains another disjunct y + * if every conjunct of x contains at least one conjunct of y as in the example above. + * @param nodeA is an expression in DNF + * @param nodeB is an expression in DNF + * @return true if nodeA contains/implies nodeB + */ + public static boolean contains(Expression nodeA, Expression nodeB) throws SQLException { + if (nodeA == null) { + return true; + } else if (nodeB == null) { + return false; } - /** - * Determines if nodeA contains/implies nodeB. Both nodeA and B are DNF (Disjunctive Normal - * Form) expressions. nodeA contains nodeB if every disjunct of nodeB is contained - * by a nodeA disjunct. A disjunct x contains another disjunct y if every conjunct of x - * contains at least one conjunct of y. - * - * Example: - * nodeA: (A > 0 AND B > 0) OR C < 5 - * nodeB: (A = 5 AND B > 1) OR (A = 3 AND C = 1) - * - * Disjuncts of nodeA: (A > 0 AND B > 0) and C < 5 - * Disjuncts of nodeB: (A = 5 AND B > 1) and (A = 3 AND C = 1) - * - * Conjuncts of (A > 0 AND B > 0): A > 0 and B > 0 - * Conjuncts of C < 5 : C < 5 - * - * nodeA contains node B because every disjunct of nodeB is contained - * by a nodeA disjunct. The first disjunct (A = 5 AND B > 1) is contained by the disjunct - * (A > 0 AND B > 0). The second disjunct (A = 3 AND C = 1) is contained by C < 5. Please node - * a disjunct x contains another disjunct y if every conjunct of x contains at least one - * conjunct of y as in the example above. - * - * @param nodeA is an expression in DNF - * @param nodeB is an expression in DNF - * @return true if nodeA contains/implies nodeB - * @throws SQLException - */ - public static boolean contains(Expression nodeA, Expression nodeB) throws SQLException { - if (nodeA == null) { - return true; - } else if (nodeB == null) { - return false; - } - if (nodeB instanceof OrExpression) { - // Check if every disjunct of nodeB is contained by a nodeA disjunct - for (Expression childB : nodeB.getChildren()) { - if (!containsDisjunct(nodeA, childB)) { - return false; - } - } - return true; - } else { - // nodeB is either an AND expression or a simple term - return containsDisjunct(nodeA, nodeB); - } + if (nodeB instanceof OrExpression) { + // Check if every disjunct of nodeB is contained by a nodeA disjunct + for (Expression childB : nodeB.getChildren()) { + if (!containsDisjunct(nodeA, childB)) { + return false; + } + } + return true; + } else { + // nodeB is either an AND expression or a simple term + return containsDisjunct(nodeA, nodeB); } + } - private static class SubqueryParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor { - private final StatementContext context; - private final Set subqueryNodes; - - SubqueryParseNodeVisitor(StatementContext context, Set subqueryNodes) { - this.context = context; - this.subqueryNodes = subqueryNodes; - } - - @Override - public Void visit(SubqueryParseNode node) throws SQLException { - SelectStatement select = node.getSelectNode(); - if (!context.isSubqueryResultAvailable(select)) { - this.subqueryNodes.add(node); - } - return null; - } - + private static class SubqueryParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor { + private final StatementContext context; + private final Set subqueryNodes; + + SubqueryParseNodeVisitor(StatementContext context, Set subqueryNodes) { + this.context = context; + this.subqueryNodes = subqueryNodes; } + + @Override + public Void visit(SubqueryParseNode node) throws SQLException { + SelectStatement select = node.getSelectNode(); + if (!context.isSubqueryResultAvailable(select)) { + this.subqueryNodes.add(node); + } + return null; + } + + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java index 5d476ffa883..2f396b9c8b8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,20 @@ */ package org.apache.phoenix.compile; -import edu.umd.cs.findbugs.annotations.NonNull; +import java.math.BigInteger; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -46,7 +59,6 @@ import org.apache.phoenix.parse.FilterableStatement; import org.apache.phoenix.parse.HintNode.Hint; import org.apache.phoenix.parse.LikeParseNode.LikeType; -import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.parse.TableName; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.query.QueryConstants; @@ -76,2331 +88,2381 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.math.BigInteger; - -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.BitSet; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Set; +import edu.umd.cs.findbugs.annotations.NonNull; /** - * - * Class that pushes row key expressions from the where clause to form the start/stop - * key of the scan and removes the expressions from the where clause when possible. - * - * + * Class that pushes row key expressions from the where clause to form the start/stop key of the + * scan and removes the expressions from the where clause when possible. * @since 0.1 */ public class WhereOptimizer { - private static final Logger LOGGER = LoggerFactory.getLogger(WhereOptimizer.class); - private static final List EVERYTHING_RANGES = - Collections. singletonList(KeyRange.EVERYTHING_RANGE); - private static final List SALT_PLACEHOLDER = - Collections.singletonList( - PChar.INSTANCE.getKeyRange(QueryConstants.SEPARATOR_BYTE_ARRAY, SortOrder.ASC)); + private static final Logger LOGGER = LoggerFactory.getLogger(WhereOptimizer.class); + private static final List EVERYTHING_RANGES = + Collections. singletonList(KeyRange.EVERYTHING_RANGE); + private static final List SALT_PLACEHOLDER = Collections + .singletonList(PChar.INSTANCE.getKeyRange(QueryConstants.SEPARATOR_BYTE_ARRAY, SortOrder.ASC)); + + private WhereOptimizer() { + } + + /** + * Pushes row key expressions from the where clause into the start/stop key of the scan. + * @param context the shared context during query compilation + * @param hints the set, possibly empty, of hints in this statement + * @param whereClause the where clause expression + * @return the new where clause with the key expressions removed + */ + public static Expression pushKeyExpressionsToScan(StatementContext context, Set hints, + Expression whereClause) throws SQLException { + return pushKeyExpressionsToScan(context, hints, whereClause, null, Optional. absent()); + } + + // For testing so that the extractedNodes can be verified + public static Expression pushKeyExpressionsToScan(StatementContext context, Set hints, + Expression whereClause, Set extractNodes, Optional minOffset) + throws SQLException { + PName tenantId = context.getConnection().getTenantId(); + byte[] tenantIdBytes = null; + PTable table = context.getCurrentTable().getTable(); + Integer nBuckets = table.getBucketNum(); + boolean isSalted = nBuckets != null; + RowKeySchema schema = table.getRowKeySchema(); + boolean isMultiTenant = tenantId != null && table.isMultiTenant(); + boolean isSharedIndex = table.getViewIndexId() != null; + ImmutableBytesWritable ptr = context.getTempPtr(); + int maxInListSkipScanSize = context.getConnection().getQueryServices().getConfiguration() + .getInt(QueryServices.MAX_IN_LIST_SKIP_SCAN_SIZE, + QueryServicesOptions.DEFAULT_MAX_IN_LIST_SKIP_SCAN_SIZE); + + if (isMultiTenant) { + tenantIdBytes = ScanUtil.getTenantIdBytes(schema, isSalted, tenantId, isSharedIndex); + } - private WhereOptimizer() { + if ( + whereClause == null && (tenantId == null || !table.isMultiTenant()) + && table.getViewIndexId() == null && !minOffset.isPresent() + ) { + context.setScanRanges(ScanRanges.EVERYTHING); + return whereClause; + } + if (LiteralExpression.isBooleanFalseOrNull(whereClause)) { + context.setScanRanges(ScanRanges.NOTHING); + return null; + } + KeyExpressionVisitor visitor = new KeyExpressionVisitor(context, table); + KeyExpressionVisitor.KeySlots keySlots = null; + if (whereClause != null) { + // TODO:: When we only have one where clause, the keySlots returns as a single slot object, + // instead of an array of slots for the corresponding column. Change the behavior so it + // becomes consistent. + keySlots = whereClause.accept(visitor); + + if ( + keySlots == null && (tenantId == null || !table.isMultiTenant()) + && table.getViewIndexId() == null && !minOffset.isPresent() + ) { + // FIXME this overwrites salting info in the scanRange + context.setScanRanges(ScanRanges.EVERYTHING); + return whereClause; + } + // If a parameter is bound to null (as will be the case for calculating ResultSetMetaData and + // ParameterMetaData), this will be the case. It can also happen for an equality comparison + // for unequal lengths. + if (keySlots == KeyExpressionVisitor.EMPTY_KEY_SLOTS) { + context.setScanRanges(ScanRanges.NOTHING); + return null; + } + } + if (keySlots == null) { + keySlots = KeyExpressionVisitor.EMPTY_KEY_SLOTS; } - /** - * Pushes row key expressions from the where clause into the start/stop key of the scan. - * @param context the shared context during query compilation - * @param hints the set, possibly empty, of hints in this statement - * @param whereClause the where clause expression - * @return the new where clause with the key expressions removed - */ - public static Expression pushKeyExpressionsToScan(StatementContext context, Set hints, Expression whereClause) - throws SQLException{ - return pushKeyExpressionsToScan(context, hints, whereClause, null, Optional.absent()); - } - - // For testing so that the extractedNodes can be verified - public static Expression pushKeyExpressionsToScan(StatementContext context, Set hints, - Expression whereClause, Set extractNodes, Optional minOffset) throws SQLException { - PName tenantId = context.getConnection().getTenantId(); - byte[] tenantIdBytes = null; - PTable table = context.getCurrentTable().getTable(); - Integer nBuckets = table.getBucketNum(); - boolean isSalted = nBuckets != null; - RowKeySchema schema = table.getRowKeySchema(); - boolean isMultiTenant = tenantId != null && table.isMultiTenant(); - boolean isSharedIndex = table.getViewIndexId() != null; - ImmutableBytesWritable ptr = context.getTempPtr(); - int maxInListSkipScanSize = context.getConnection().getQueryServices().getConfiguration() - .getInt(QueryServices.MAX_IN_LIST_SKIP_SCAN_SIZE, - QueryServicesOptions.DEFAULT_MAX_IN_LIST_SKIP_SCAN_SIZE); + if (extractNodes == null) { + extractNodes = new HashSet(table.getPKColumns().size()); + } - if (isMultiTenant) { - tenantIdBytes = ScanUtil.getTenantIdBytes(schema, isSalted, tenantId, isSharedIndex); - } + int pkPos = 0; + int nPKColumns = table.getPKColumns().size(); + int[] slotSpanArray = new int[nPKColumns]; + List> cnf = Lists.newArrayListWithExpectedSize(schema.getMaxFields()); + boolean hasViewIndex = table.getViewIndexId() != null; + Iterator iterator = keySlots.getSlots().iterator(); + // Add placeholder for salt byte ranges + if (isSalted) { + cnf.add(SALT_PLACEHOLDER); + // Increment the pkPos, as the salt column is in the row schema + // Do not increment the iterator, though, as there will never be + // an expression in the keySlots for the salt column + pkPos++; + } - if (whereClause == null && (tenantId == null || !table.isMultiTenant()) && table.getViewIndexId() == null && !minOffset.isPresent()) { - context.setScanRanges(ScanRanges.EVERYTHING); - return whereClause; - } - if (LiteralExpression.isBooleanFalseOrNull(whereClause)) { - context.setScanRanges(ScanRanges.NOTHING); - return null; - } - KeyExpressionVisitor visitor = new KeyExpressionVisitor(context, table); - KeyExpressionVisitor.KeySlots keySlots = null; - if (whereClause != null) { - // TODO:: When we only have one where clause, the keySlots returns as a single slot object, - // instead of an array of slots for the corresponding column. Change the behavior so it - // becomes consistent. - keySlots = whereClause.accept(visitor); - - if (keySlots == null && (tenantId == null || !table.isMultiTenant()) && table.getViewIndexId() == null && !minOffset.isPresent()) { - // FIXME this overwrites salting info in the scanRange - context.setScanRanges(ScanRanges.EVERYTHING); - return whereClause; - } - // If a parameter is bound to null (as will be the case for calculating ResultSetMetaData and - // ParameterMetaData), this will be the case. It can also happen for an equality comparison - // for unequal lengths. - if (keySlots == KeyExpressionVisitor.EMPTY_KEY_SLOTS) { - context.setScanRanges(ScanRanges.NOTHING); - return null; - } - } - if (keySlots == null) { - keySlots = KeyExpressionVisitor.EMPTY_KEY_SLOTS; - } + // Add unique index ID for shared indexes on views. This ensures + // that different indexes don't interleave. + if (hasViewIndex) { + byte[] viewIndexBytes = table.getviewIndexIdType().toBytes(table.getViewIndexId()); + KeyRange indexIdKeyRange = KeyRange.getKeyRange(viewIndexBytes); + cnf.add(Collections.singletonList(indexIdKeyRange)); + pkPos++; + } + + // Add tenant data isolation for tenant-specific tables + if (isMultiTenant) { + KeyRange tenantIdKeyRange = KeyRange.getKeyRange(tenantIdBytes); + cnf.add(Collections.singletonList(tenantIdKeyRange)); + pkPos++; + } + + boolean forcedSkipScan = hints.contains(Hint.SKIP_SCAN); + boolean forcedRangeScan = hints.contains(Hint.RANGE_SCAN); + boolean hasUnboundedRange = false; + boolean hasMultiRanges = false; + boolean hasRangeKey = false; + boolean useSkipScan = false; + boolean checkMaxSkipScanCardinality = false; + BigInteger inListSkipScanCardinality = BigInteger.ONE; // using BigInteger to avoid overflow + // issues + + // Concat byte arrays of literals to form scan start key + while (iterator.hasNext()) { + KeyExpressionVisitor.KeySlot slot = iterator.next(); + // If the position of the pk columns in the query skips any part of the row k + // then we have to handle in the next phase through a key filter. + // If the slot is null this means we have no entry for this pk position. + if (slot == null || slot.getKeyRanges().isEmpty()) { + continue; + } + if (slot.getPKPosition() < pkPos) { + continue; + } + if (slot.getPKPosition() != pkPos) { + hasUnboundedRange = hasRangeKey = true; + for (int i = pkPos; i < slot.getPKPosition(); i++) { + cnf.add(Collections.singletonList(KeyRange.EVERYTHING_RANGE)); + } + } + KeyPart keyPart = slot.getKeyPart(); + List keyRanges = slot.getKeyRanges(); + SortOrder prevSortOrder = null; + int slotOffset = 0; + int clipLeftSpan = 0; + boolean onlySplittedRVCLeftValid = false; + boolean stopExtracting = false; + // Iterate through all spans of this slot + boolean areAllSingleKey = KeyRange.areAllSingleKey(keyRanges); + boolean isInList = false; + int cnfStartPos = cnf.size(); + + // TODO: + // Using keyPart.getExtractNodes() to determine whether the keyPart has a IN List + // is not guaranteed, since the IN LIST slot may not have any extracted nodes. + if ( + keyPart.getExtractNodes() != null && keyPart.getExtractNodes().size() > 0 + && keyPart.getExtractNodes().iterator().next() instanceof InListExpression + ) { + isInList = true; + } + while (true) { + SortOrder sortOrder = schema.getField(slot.getPKPosition() + slotOffset).getSortOrder(); + if (prevSortOrder == null) { + prevSortOrder = sortOrder; + } else if (prevSortOrder != sortOrder || (prevSortOrder == SortOrder.DESC && isInList)) { + // Consider the Universe of keys to be [0,7]+ on the leading column A + // and [0,7]+ on trailing column B, with a padbyte of 0 for ASC and 7 for DESC + // if our key range for ASC keys is leading [2,*] and trailing [3,*], + // → [x203 - x777] + // for this particular plan the leading key is descending (ie index desc) + // consider the data + // (3,2) ORDER BY A,B→ x302 → ORDER BY A DESC,B → x472 + // (3,3) ORDER BY A,B→ x303 → ORDER BY A DESC,B → x473 + // (3,4) ORDER BY A,B→ x304 → ORDER BY A DESC,B → x474 + // (2,3) ORDER BY A,B→ x203 → ORDER BY A DESC,B → x573 + // (2,7) ORDER BY A,B→ x207 → ORDER BY A DESC,B → x577 + // And the logical expression (A,B) > (2,3) + // In the DESC A order the selected values are not contiguous, + // (2,7),(3,2),(3,3),(3,4) + // In the normal ASC order by the values are all contiguous + // Therefore the key cannot be extracted out and a full filter must be applied + // In addition, the boundary of the scan is tricky as the values are not bound + // by (2,3) it is instead bound by (2,7), this should map to, [x000,x577] + // FUTURE: May be able to perform a type of skip scan for this case. + + // If the sort order changes, we must clip the portion with the same sort order + // and invert the key ranges and swap the upper and lower bounds. + List leftRanges = clipLeft(schema, + slot.getPKPosition() + slotOffset - clipLeftSpan, clipLeftSpan, keyRanges, ptr); + keyRanges = + clipRight(schema, slot.getPKPosition() + slotOffset - 1, keyRanges, leftRanges, ptr); + leftRanges = KeyRange.coalesce(leftRanges); + keyRanges = KeyRange.coalesce(keyRanges); + if (prevSortOrder == SortOrder.DESC) { + leftRanges = invertKeyRanges(leftRanges); + } + slotSpanArray[cnf.size()] = clipLeftSpan - 1; + cnf.add(leftRanges); + pkPos = slot.getPKPosition() + slotOffset; + clipLeftSpan = 0; + prevSortOrder = sortOrder; + // If we had an IN clause with mixed sort ordering then we need to check the possibility + // of + // skip scan key generation explosion. + checkMaxSkipScanCardinality |= isInList; + // since we have to clip the portion with the same sort order, we can no longer + // extract the nodes from the where clause + // for eg. for the schema A VARCHAR DESC, B VARCHAR ASC and query + // WHERE (A,B) < ('a','b') + // the range (* - a\xFFb) is converted to [~a-*)(*-b) + // so we still need to filter on A,B + stopExtracting = true; + if (!areAllSingleKey) { + // for cnf, we only add [~a-*) to it, (*-b) is skipped. + // but for all single key, we can continue. + onlySplittedRVCLeftValid = true; + break; + } + } + clipLeftSpan++; + slotOffset++; + if (slotOffset >= slot.getPKSpan()) { + break; + } + } + + if (onlySplittedRVCLeftValid) { + keyRanges = cnf.get(cnf.size() - 1); + } else { + if ( + schema.getField(slot.getPKPosition() + slotOffset - 1).getSortOrder() == SortOrder.DESC + ) { + keyRanges = invertKeyRanges(keyRanges); + } + pkPos = slot.getPKPosition() + slotOffset; + slotSpanArray[cnf.size()] = clipLeftSpan - 1; + cnf.add(keyRanges); + } + + // Do not use the skipScanFilter when there is a large IN clause (for e.g > 50k elements) + // Since the generation of point keys for skip scan filter will blow up the memory usage. + // See ScanRanges.getPointKeys(...) where using the various slot key ranges + // to generate point keys will lead to combinatorial explosion. + // The following check will ensure the cardinality of generated point keys + // is below the configured max (maxInListSkipScanSize). + // We shall force a range scan if the configured max is exceeded. + // cnfStartPos => is the start slot of this IN list + if (checkMaxSkipScanCardinality) { + for (int i = cnfStartPos; i < cnf.size(); i++) { + // using int can result in overflow + inListSkipScanCardinality = + inListSkipScanCardinality.multiply(BigInteger.valueOf(cnf.get(i).size())); + } + // If the maxInListSkipScanSize <= 0 then the feature (to force range scan) is turned off + if (maxInListSkipScanSize > 0) { + forcedRangeScan = + inListSkipScanCardinality.compareTo(BigInteger.valueOf(maxInListSkipScanSize)) == 1 + ? true + : false; + } + // Reset the check flag for the next IN list clause + checkMaxSkipScanCardinality = false; + } + + // TODO: when stats are available, we may want to use a skip scan if the + // cardinality of this slot is low. + /** + * We use skip scan when: 1.previous slot has unbound and force skip scan and 2.not force + * Range Scan and 3.previous rowkey slot has range or current rowkey slot have multiple + * ranges. Once we can not use skip scan and we have a non-contiguous range, we can not remove + * the whereExpressions of current rowkey slot from the current {@link SelectStatement#where}, + * because the {@link Scan#startRow} and {@link Scan#endRow} could not exactly represent + * currentRowKeySlotRanges. So we should stop extracting whereExpressions of current rowkey + * slot once we encounter: 1. we now use range scan and 2. previous rowkey slot has unbound or + * previous rowkey slot has range or current rowkey slot have multiple ranges. + */ + hasMultiRanges |= keyRanges.size() > 1; + useSkipScan |= (!hasUnboundedRange || forcedSkipScan) && !forcedRangeScan + && (hasRangeKey || hasMultiRanges); + + stopExtracting |= !useSkipScan && (hasUnboundedRange || hasRangeKey || hasMultiRanges); + + for (int i = 0; (!hasUnboundedRange || !hasRangeKey) && i < keyRanges.size(); i++) { + KeyRange range = keyRanges.get(i); + if (range.isUnbound()) { + hasUnboundedRange = hasRangeKey = true; + } else if (!range.isSingleKey()) { + hasRangeKey = true; + } + } + // Will be null in cases for which only part of the expression was factored out here + // to set the start/end key. An example would be LIKE 'foo%bar' where we can + // set the start key to 'foo' but still need to match the regex at filter time. + // Don't extract expressions if we're forcing a range scan and we've already come + // across a multi-range for a prior slot. The reason is that we have an inexact range after + // that, so must filter on the remaining conditions (see issue #467). + if (!stopExtracting) { + Set nodesToExtract = keyPart.getExtractNodes(); + extractNodes.addAll(nodesToExtract); + } + } + // If we have fully qualified point keys with multi-column spans (i.e. RVC), + // we can still use our skip scan. The ScanRanges.create() call will explode + // out the keys. + slotSpanArray = Arrays.copyOf(slotSpanArray, cnf.size()); + ScanRanges scanRanges = ScanRanges.create(schema, cnf, slotSpanArray, nBuckets, useSkipScan, + table.getRowTimestampColPos(), minOffset); + context.setScanRanges(scanRanges); + if (whereClause == null) { + return null; + } else { + return whereClause.accept(new RemoveExtractedNodesVisitor(extractNodes)); + } + } + + private static KeyRange getTrailingRange(RowKeySchema rowKeySchema, int clippedPkPos, + KeyRange range, KeyRange clippedResult, ImmutableBytesWritable ptr) { + // We are interested in the clipped part's Seperator. Since we combined first part, we need to + // remove its separator from the trailing parts' start + int clippedSepLength = rowKeySchema.getField(clippedPkPos).getDataType().isFixedWidth() ? 0 : 1; + byte[] lowerRange = KeyRange.UNBOUND; + boolean lowerInclusive = false; + // Lower range of trailing part of RVC must be true, so we can form a new range to intersect + // going forward + if ( + !range.lowerUnbound() && range.getLowerRange().length > clippedResult.getLowerRange().length + && Bytes.startsWith(range.getLowerRange(), clippedResult.getLowerRange()) + ) { + lowerRange = range.getLowerRange(); + int offset = clippedResult.getLowerRange().length + clippedSepLength; + ptr.set(lowerRange, offset, lowerRange.length - offset); + lowerRange = ptr.copyBytes(); + lowerInclusive = range.isLowerInclusive(); + } + byte[] upperRange = KeyRange.UNBOUND; + boolean upperInclusive = false; + if ( + !range.upperUnbound() && range.getUpperRange().length > clippedResult.getUpperRange().length + && Bytes.startsWith(range.getUpperRange(), clippedResult.getUpperRange()) + ) { + upperRange = range.getUpperRange(); + int offset = clippedResult.getUpperRange().length + clippedSepLength; + ptr.set(upperRange, offset, upperRange.length - offset); + upperRange = ptr.copyBytes(); + upperInclusive = range.isUpperInclusive(); + } + return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); + } + + private static List clipRight(RowKeySchema schema, int pkPos, List keyRanges, + List leftRanges, ImmutableBytesWritable ptr) { + List clippedKeyRanges = Lists.newArrayListWithExpectedSize(keyRanges.size()); + for (int i = 0; i < leftRanges.size(); i++) { + KeyRange leftRange = leftRanges.get(i); + KeyRange range = keyRanges.get(i); + KeyRange clippedKeyRange = getTrailingRange(schema, pkPos, range, leftRange, ptr); + clippedKeyRanges.add(clippedKeyRange); + } + return clippedKeyRanges; + } + + private static List clipLeft(RowKeySchema schema, int pkPos, int clipLeftSpan, + List keyRanges, ImmutableBytesWritable ptr) { + List clippedKeyRanges = Lists.newArrayListWithExpectedSize(keyRanges.size()); + for (KeyRange keyRange : keyRanges) { + KeyRange clippedKeyRange = schema.clipLeft(pkPos, keyRange, clipLeftSpan, ptr); + clippedKeyRanges.add(clippedKeyRange); + } + return clippedKeyRanges; + } + + private static List invertKeyRanges(List keyRanges) { + keyRanges = new ArrayList(keyRanges); + for (int i = 0; i < keyRanges.size(); i++) { + KeyRange range = keyRanges.get(i); + range = range.invert(); + keyRanges.set(i, range); + } + return keyRanges; + } + + public static byte[] getRowKeyMatcher(final StatementContext context, + final TableName tableNameNode, final PTable parentTable, final Expression viewWhereExpression) + throws SQLException { + RowKeySchema schema = parentTable.getRowKeySchema(); + List> rowKeySlotRangesList = new ArrayList<>(); + Integer nBuckets = parentTable.getBucketNum(); + boolean isSalted = nBuckets != null; + PName tenantId = context.getConnection().getTenantId(); + boolean isMultiTenant = tenantId != null && parentTable.isMultiTenant(); + + byte[] tenantIdBytes = tenantId == null + ? ByteUtil.EMPTY_BYTE_ARRAY + : ScanUtil.getTenantIdBytes(schema, isSalted, tenantId, isMultiTenant, false); + if (tenantIdBytes.length != 0) { + rowKeySlotRangesList.add(Arrays.asList(KeyRange.POINT.apply(tenantIdBytes))); + } + KeyExpressionVisitor visitor = new KeyExpressionVisitor(context, parentTable); + KeyExpressionVisitor.KeySlots keySlots = viewWhereExpression.accept(visitor); + if (keySlots == null) { + return ByteUtil.EMPTY_BYTE_ARRAY; + } + for (KeyExpressionVisitor.KeySlot slot : keySlots.getSlots()) { + if (slot != null) { + if (schema.getField(slot.getPKPosition()).getSortOrder() == SortOrder.DESC) { + rowKeySlotRangesList.add(invertKeyRanges(slot.getKeyRanges())); + continue; + } + rowKeySlotRangesList.add(slot.getKeyRanges()); + } + } + ScanRanges scanRange = ScanRanges.createSingleSpan(schema, rowKeySlotRangesList, null, false); + byte[] rowKeyMatcher = scanRange.getScanRange().getLowerRange(); + if (LOGGER.isTraceEnabled()) { + String rowKeyMatcherStr = Bytes.toStringBinary(rowKeyMatcher); + String rowKeyMatcherHex = Bytes.toHex(rowKeyMatcher); + byte[] rowKeyMatcherFromHex = Bytes.fromHex(rowKeyMatcherHex); + assert Bytes.compareTo(rowKeyMatcher, rowKeyMatcherFromHex) == 0; + LOGGER.trace(String.format( + "View info view-name = %s, view-stmt-name (parent) = %s, " + + "primary-keys = %d, key-ranges: size = %d, list = %s ", + tableNameNode.toString(), parentTable.getName().toString(), + parentTable.getPKColumns().size(), rowKeySlotRangesList.size(), + rowKeySlotRangesList.isEmpty() ? "null" : rowKeySlotRangesList.toString())); + LOGGER.trace(String.format("RowKey Matcher info Hex-value = %s,StringBinary value = %s", + rowKeyMatcherHex, rowKeyMatcherStr)); + + } + return rowKeyMatcher; + } + + @VisibleForTesting + public static byte[] getRowKeyMatcher(final PhoenixConnection connection, + final TableName tableNameNode, final PTable parentTable, final byte[][] viewColumnConstantsToBe, + final BitSet isViewColumnReferencedToBe) throws SQLException { + + RowKeySchema schema = parentTable.getRowKeySchema(); + Integer nBuckets = parentTable.getBucketNum(); + boolean isSalted = nBuckets != null; + + List> rowKeySlotRangesList = new ArrayList<>(); + PName tenantId = connection.getTenantId(); + boolean isMultiTenant = tenantId != null && parentTable.isMultiTenant(); + byte[] tenantIdBytes = tenantId == null + ? ByteUtil.EMPTY_BYTE_ARRAY + : ScanUtil.getTenantIdBytes(schema, isSalted, tenantId, isMultiTenant, false); + + if (tenantIdBytes.length != 0) { + rowKeySlotRangesList.add(Arrays.asList(KeyRange.POINT.apply(tenantIdBytes))); + } - if (extractNodes == null) { - extractNodes = new HashSet(table.getPKColumns().size()); + int pkPos = 0; + for (int i = 0; viewColumnConstantsToBe != null && i < viewColumnConstantsToBe.length; i++) { + if (isViewColumnReferencedToBe.get(i)) { + pkPos++; + ValueSchema.Field field = schema.getField(pkPos); + SortOrder fieldSortOrder = schema.getField(pkPos).getSortOrder(); + byte[] viewColumnConstants = + Bytes.copy(viewColumnConstantsToBe[i], 0, viewColumnConstantsToBe[i].length - 1); + KeyRange keyRange = ByteUtil.getKeyRange(viewColumnConstants, fieldSortOrder, + CompareOperator.EQUAL, field.getDataType()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace( + String.format("Field: pos = %d, name = %s, schema = %s, " + "referenced-column %d, %s ", + pkPos, parentTable.getPKColumns().get(pkPos), schema.getField(pkPos).toString(), i, + Bytes.toHex(viewColumnConstantsToBe[i]))); } + rowKeySlotRangesList.add(Arrays.asList(keyRange)); + } - int pkPos = 0; - int nPKColumns = table.getPKColumns().size(); - int[] slotSpanArray = new int[nPKColumns]; - List> cnf = Lists.newArrayListWithExpectedSize(schema.getMaxFields()); - boolean hasViewIndex = table.getViewIndexId() != null; - Iterator iterator = keySlots.getSlots().iterator(); - // Add placeholder for salt byte ranges - if (isSalted) { - cnf.add(SALT_PLACEHOLDER); - // Increment the pkPos, as the salt column is in the row schema - // Do not increment the iterator, though, as there will never be - // an expression in the keySlots for the salt column - pkPos++; - } - - // Add unique index ID for shared indexes on views. This ensures - // that different indexes don't interleave. - if (hasViewIndex) { - byte[] viewIndexBytes = table.getviewIndexIdType().toBytes(table.getViewIndexId()); - KeyRange indexIdKeyRange = KeyRange.getKeyRange(viewIndexBytes); - cnf.add(Collections.singletonList(indexIdKeyRange)); - pkPos++; - } - - // Add tenant data isolation for tenant-specific tables - if (isMultiTenant) { - KeyRange tenantIdKeyRange = KeyRange.getKeyRange(tenantIdBytes); - cnf.add(Collections.singletonList(tenantIdKeyRange)); - pkPos++; - } - - boolean forcedSkipScan = hints.contains(Hint.SKIP_SCAN); - boolean forcedRangeScan = hints.contains(Hint.RANGE_SCAN); - boolean hasUnboundedRange = false; - boolean hasMultiRanges = false; - boolean hasRangeKey = false; - boolean useSkipScan = false; - boolean checkMaxSkipScanCardinality = false; - BigInteger inListSkipScanCardinality = BigInteger.ONE; // using BigInteger to avoid overflow issues - - - // Concat byte arrays of literals to form scan start key - while (iterator.hasNext()) { - KeyExpressionVisitor.KeySlot slot = iterator.next(); - // If the position of the pk columns in the query skips any part of the row k - // then we have to handle in the next phase through a key filter. - // If the slot is null this means we have no entry for this pk position. - if (slot == null || slot.getKeyRanges().isEmpty()) { - continue; - } - if(slot.getPKPosition() < pkPos) { - continue; - } - if (slot.getPKPosition() != pkPos) { - hasUnboundedRange = hasRangeKey = true; - for (int i= pkPos; i < slot.getPKPosition(); i++) { - cnf.add(Collections.singletonList(KeyRange.EVERYTHING_RANGE)); - } - } - KeyPart keyPart = slot.getKeyPart(); - List keyRanges = slot.getKeyRanges(); - SortOrder prevSortOrder = null; - int slotOffset = 0; - int clipLeftSpan = 0; - boolean onlySplittedRVCLeftValid = false; - boolean stopExtracting = false; - // Iterate through all spans of this slot - boolean areAllSingleKey = KeyRange.areAllSingleKey(keyRanges); - boolean isInList = false; - int cnfStartPos = cnf.size(); - - // TODO: - // Using keyPart.getExtractNodes() to determine whether the keyPart has a IN List - // is not guaranteed, since the IN LIST slot may not have any extracted nodes. - if (keyPart.getExtractNodes() != null && keyPart.getExtractNodes().size() > 0 - && keyPart.getExtractNodes().iterator().next() instanceof InListExpression){ - isInList = true; - } - while (true) { - SortOrder sortOrder = - schema.getField(slot.getPKPosition() + slotOffset).getSortOrder(); - if (prevSortOrder == null) { - prevSortOrder = sortOrder; - } else if (prevSortOrder != sortOrder || (prevSortOrder == SortOrder.DESC && isInList)) { - //Consider the Universe of keys to be [0,7]+ on the leading column A - // and [0,7]+ on trailing column B, with a padbyte of 0 for ASC and 7 for DESC - //if our key range for ASC keys is leading [2,*] and trailing [3,*], - // → [x203 - x777] - //for this particular plan the leading key is descending (ie index desc) - // consider the data - // (3,2) ORDER BY A,B→ x302 → ORDER BY A DESC,B → x472 - // (3,3) ORDER BY A,B→ x303 → ORDER BY A DESC,B → x473 - // (3,4) ORDER BY A,B→ x304 → ORDER BY A DESC,B → x474 - // (2,3) ORDER BY A,B→ x203 → ORDER BY A DESC,B → x573 - // (2,7) ORDER BY A,B→ x207 → ORDER BY A DESC,B → x577 - // And the logical expression (A,B) > (2,3) - // In the DESC A order the selected values are not contiguous, - // (2,7),(3,2),(3,3),(3,4) - // In the normal ASC order by the values are all contiguous - // Therefore the key cannot be extracted out and a full filter must be applied - // In addition, the boundary of the scan is tricky as the values are not bound - // by (2,3) it is instead bound by (2,7), this should map to, [x000,x577] - // FUTURE: May be able to perform a type of skip scan for this case. - - // If the sort order changes, we must clip the portion with the same sort order - // and invert the key ranges and swap the upper and lower bounds. - List leftRanges = clipLeft(schema, slot.getPKPosition() - + slotOffset - clipLeftSpan, clipLeftSpan, keyRanges, ptr); - keyRanges = - clipRight(schema, slot.getPKPosition() + slotOffset - 1, keyRanges, - leftRanges, ptr); - leftRanges = KeyRange.coalesce(leftRanges); - keyRanges = KeyRange.coalesce(keyRanges); - if (prevSortOrder == SortOrder.DESC) { - leftRanges = invertKeyRanges(leftRanges); - } - slotSpanArray[cnf.size()] = clipLeftSpan-1; - cnf.add(leftRanges); - pkPos = slot.getPKPosition() + slotOffset; - clipLeftSpan = 0; - prevSortOrder = sortOrder; - // If we had an IN clause with mixed sort ordering then we need to check the possibility of - // skip scan key generation explosion. - checkMaxSkipScanCardinality |= isInList; - // since we have to clip the portion with the same sort order, we can no longer - // extract the nodes from the where clause - // for eg. for the schema A VARCHAR DESC, B VARCHAR ASC and query - // WHERE (A,B) < ('a','b') - // the range (* - a\xFFb) is converted to [~a-*)(*-b) - // so we still need to filter on A,B - stopExtracting = true; - if(!areAllSingleKey) { - //for cnf, we only add [~a-*) to it, (*-b) is skipped. - //but for all single key, we can continue. - onlySplittedRVCLeftValid = true; - break; - } - } - clipLeftSpan++; - slotOffset++; - if (slotOffset >= slot.getPKSpan()) { - break; - } - } + } - if(onlySplittedRVCLeftValid) { - keyRanges = cnf.get(cnf.size()-1); - } else { - if (schema.getField( - slot.getPKPosition() + slotOffset - 1).getSortOrder() == SortOrder.DESC) { - keyRanges = invertKeyRanges(keyRanges); - } - pkPos = slot.getPKPosition() + slotOffset; - slotSpanArray[cnf.size()] = clipLeftSpan-1; - cnf.add(keyRanges); - } + ScanRanges scanRange = ScanRanges.createSingleSpan(schema, rowKeySlotRangesList, null, false); + byte[] rowKeyMatcher = scanRange.getScanRange().getLowerRange(); - // Do not use the skipScanFilter when there is a large IN clause (for e.g > 50k elements) - // Since the generation of point keys for skip scan filter will blow up the memory usage. - // See ScanRanges.getPointKeys(...) where using the various slot key ranges - // to generate point keys will lead to combinatorial explosion. - // The following check will ensure the cardinality of generated point keys - // is below the configured max (maxInListSkipScanSize). - // We shall force a range scan if the configured max is exceeded. - // cnfStartPos => is the start slot of this IN list - if (checkMaxSkipScanCardinality) { - for (int i = cnfStartPos; i < cnf.size(); i++) { - // using int can result in overflow - inListSkipScanCardinality = - inListSkipScanCardinality.multiply(BigInteger.valueOf(cnf.get(i).size())); - } - // If the maxInListSkipScanSize <= 0 then the feature (to force range scan) is turned off - if (maxInListSkipScanSize > 0) { - forcedRangeScan = - inListSkipScanCardinality.compareTo(BigInteger.valueOf(maxInListSkipScanSize)) == 1 ? true : false; - } - // Reset the check flag for the next IN list clause - checkMaxSkipScanCardinality = false; - } + if (LOGGER.isTraceEnabled()) { + String rowKeyMatcherStr = Bytes.toStringBinary(rowKeyMatcher); + String rowKeyMatcherHex = Bytes.toHex(rowKeyMatcher); + byte[] rowKeyMatcherFromHex = Bytes.fromHex(rowKeyMatcherHex); + assert Bytes.compareTo(rowKeyMatcher, rowKeyMatcherFromHex) == 0; - // TODO: when stats are available, we may want to use a skip scan if the - // cardinality of this slot is low. - /** - * We use skip scan when: - * 1.previous slot has unbound and force skip scan and - * 2.not force Range Scan and - * 3.previous rowkey slot has range or current rowkey slot have multiple ranges. - * - * Once we can not use skip scan and we have a non-contiguous range, we can not remove - * the whereExpressions of current rowkey slot from the current {@link SelectStatement#where}, - * because the {@link Scan#startRow} and {@link Scan#endRow} could not exactly represent - * currentRowKeySlotRanges. - * So we should stop extracting whereExpressions of current rowkey slot once we encounter: - * 1. we now use range scan and - * 2. previous rowkey slot has unbound or - * previous rowkey slot has range or - * current rowkey slot have multiple ranges. - */ - hasMultiRanges |= keyRanges.size() > 1; - useSkipScan |= - (!hasUnboundedRange || forcedSkipScan) && - !forcedRangeScan && - (hasRangeKey || hasMultiRanges); - - stopExtracting |= - !useSkipScan && - (hasUnboundedRange || hasRangeKey || hasMultiRanges); - - for (int i = 0; (!hasUnboundedRange || !hasRangeKey) && i < keyRanges.size(); i++) { - KeyRange range = keyRanges.get(i); - if (range.isUnbound()) { - hasUnboundedRange = hasRangeKey = true; - } else if (!range.isSingleKey()) { - hasRangeKey = true; - } - } - // Will be null in cases for which only part of the expression was factored out here - // to set the start/end key. An example would be LIKE 'foo%bar' where we can - // set the start key to 'foo' but still need to match the regex at filter time. - // Don't extract expressions if we're forcing a range scan and we've already come - // across a multi-range for a prior slot. The reason is that we have an inexact range after - // that, so must filter on the remaining conditions (see issue #467). - if (!stopExtracting) { - Set nodesToExtract = keyPart.getExtractNodes(); - extractNodes.addAll(nodesToExtract); - } + LOGGER.trace(String.format( + "View info view-name = %s, view-stmt-name (parent) = %s, " + + "primary-keys = %d, key-ranges: size = %d, list = %s ", + tableNameNode.toString(), parentTable.getName().toString(), + parentTable.getPKColumns().size(), rowKeySlotRangesList.size(), + rowKeySlotRangesList.isEmpty() ? "null" : rowKeySlotRangesList.toString())); + LOGGER.trace(String.format("RowKey Matcher info Hex-value = %s,StringBinary value = %s", + rowKeyMatcherHex, rowKeyMatcherStr)); + + } + return rowKeyMatcher; + + } + + /** + * Get an optimal combination of key expressions for hash join key range optimization. + * @return returns true if the entire combined expression is covered by key range optimization + * @param result the optimal combination of key expressions + * @param context the temporary context to get scan ranges set by pushKeyExpressionsToScan() + * @param statement the statement being compiled + * @param expressions the join key expressions + * @return the optimal list of key expressions + */ + public static boolean getKeyExpressionCombination(List result, + StatementContext context, FilterableStatement statement, List expressions) + throws SQLException { + List candidateIndexes = Lists.newArrayList(); + final List pkPositions = Lists.newArrayList(); + PTable table = context.getCurrentTable().getTable(); + for (int i = 0; i < expressions.size(); i++) { + Expression expression = expressions.get(i); + KeyExpressionVisitor visitor = new KeyExpressionVisitor(context, table); + KeyExpressionVisitor.KeySlots keySlots = expression.accept(visitor); + int minPkPos = Integer.MAX_VALUE; + if (keySlots != null) { + Iterator iterator = keySlots.getSlots().iterator(); + while (iterator.hasNext()) { + KeyExpressionVisitor.KeySlot slot = iterator.next(); + if (slot.getPKPosition() < minPkPos) { + minPkPos = slot.getPKPosition(); + } } - // If we have fully qualified point keys with multi-column spans (i.e. RVC), - // we can still use our skip scan. The ScanRanges.create() call will explode - // out the keys. - slotSpanArray = Arrays.copyOf(slotSpanArray, cnf.size()); - ScanRanges scanRanges = ScanRanges.create(schema, cnf, slotSpanArray, nBuckets, useSkipScan, table.getRowTimestampColPos(), minOffset); - context.setScanRanges(scanRanges); - if (whereClause == null) { - return null; - } else { - return whereClause.accept(new RemoveExtractedNodesVisitor(extractNodes)); - } - } - - private static KeyRange getTrailingRange(RowKeySchema rowKeySchema, int clippedPkPos, KeyRange range, KeyRange clippedResult, ImmutableBytesWritable ptr) { - // We are interested in the clipped part's Seperator. Since we combined first part, we need to - // remove its separator from the trailing parts' start - int clippedSepLength= rowKeySchema.getField(clippedPkPos).getDataType().isFixedWidth() ? 0 : 1; - byte[] lowerRange = KeyRange.UNBOUND; - boolean lowerInclusive = false; - // Lower range of trailing part of RVC must be true, so we can form a new range to intersect going forward - if (!range.lowerUnbound() - && range.getLowerRange().length > clippedResult.getLowerRange().length - && Bytes.startsWith(range.getLowerRange(), clippedResult.getLowerRange())) { - lowerRange = range.getLowerRange(); - int offset = clippedResult.getLowerRange().length + clippedSepLength; - ptr.set(lowerRange, offset, lowerRange.length - offset); - lowerRange = ptr.copyBytes(); - lowerInclusive = range.isLowerInclusive(); - } - byte[] upperRange = KeyRange.UNBOUND; - boolean upperInclusive = false; - if (!range.upperUnbound() - && range.getUpperRange().length > clippedResult.getUpperRange().length - && Bytes.startsWith(range.getUpperRange(), clippedResult.getUpperRange())) { - upperRange = range.getUpperRange(); - int offset = clippedResult.getUpperRange().length + clippedSepLength; - ptr.set(upperRange, offset, upperRange.length - offset); - upperRange = ptr.copyBytes(); - upperInclusive = range.isUpperInclusive(); - } - return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); - } - - private static List clipRight(RowKeySchema schema, int pkPos, List keyRanges, - List leftRanges, ImmutableBytesWritable ptr) { - List clippedKeyRanges = Lists.newArrayListWithExpectedSize(keyRanges.size()); - for (int i = 0; i < leftRanges.size(); i++) { - KeyRange leftRange = leftRanges.get(i); - KeyRange range = keyRanges.get(i); - KeyRange clippedKeyRange = getTrailingRange(schema, pkPos, range, leftRange, ptr); - clippedKeyRanges.add(clippedKeyRange); - } - return clippedKeyRanges; - } - - private static List clipLeft(RowKeySchema schema, int pkPos, int clipLeftSpan, List keyRanges, ImmutableBytesWritable ptr) { - List clippedKeyRanges = Lists.newArrayListWithExpectedSize(keyRanges.size()); - for (KeyRange keyRange : keyRanges) { - KeyRange clippedKeyRange = schema.clipLeft(pkPos, keyRange, clipLeftSpan, ptr); - clippedKeyRanges.add(clippedKeyRange); - } - return clippedKeyRanges; - } - - private static List invertKeyRanges(List keyRanges) { - keyRanges = new ArrayList(keyRanges); - for (int i = 0; i < keyRanges.size(); i++) { - KeyRange range = keyRanges.get(i); - range = range.invert(); - keyRanges.set(i, range); + if (minPkPos != Integer.MAX_VALUE) { + candidateIndexes.add(i); + pkPositions.add(minPkPos); } - return keyRanges; + } } - public static byte[] getRowKeyMatcher( - final StatementContext context, - final TableName tableNameNode, - final PTable parentTable, - final Expression viewWhereExpression - ) throws SQLException { - RowKeySchema schema = parentTable.getRowKeySchema(); - List> rowKeySlotRangesList = new ArrayList<>(); - Integer nBuckets = parentTable.getBucketNum(); - boolean isSalted = nBuckets != null; - PName tenantId = context.getConnection().getTenantId(); - boolean isMultiTenant = tenantId != null && parentTable.isMultiTenant(); - - byte[] tenantIdBytes = tenantId == null - ? ByteUtil.EMPTY_BYTE_ARRAY : - ScanUtil.getTenantIdBytes(schema, isSalted, tenantId, isMultiTenant, false); - if (tenantIdBytes.length != 0) { - rowKeySlotRangesList.add(Arrays.asList(KeyRange.POINT.apply(tenantIdBytes))); - } - KeyExpressionVisitor visitor = new KeyExpressionVisitor(context, parentTable); - KeyExpressionVisitor.KeySlots keySlots = viewWhereExpression.accept(visitor); - if (keySlots == null) { - return ByteUtil.EMPTY_BYTE_ARRAY; - } - for (KeyExpressionVisitor.KeySlot slot : keySlots.getSlots()) { - if (slot != null) { - if (schema.getField(slot.getPKPosition()).getSortOrder() == SortOrder.DESC) { - rowKeySlotRangesList.add(invertKeyRanges(slot.getKeyRanges())); - continue; - } - rowKeySlotRangesList.add(slot.getKeyRanges()); - } - } - ScanRanges scanRange = ScanRanges.createSingleSpan( - schema, rowKeySlotRangesList, null, false); - byte[] rowKeyMatcher = scanRange.getScanRange().getLowerRange(); - if (LOGGER.isTraceEnabled()) { - String rowKeyMatcherStr = Bytes.toStringBinary(rowKeyMatcher); - String rowKeyMatcherHex = Bytes.toHex(rowKeyMatcher); - byte[] rowKeyMatcherFromHex = Bytes.fromHex(rowKeyMatcherHex); - assert Bytes.compareTo(rowKeyMatcher, rowKeyMatcherFromHex) == 0; - LOGGER.trace(String.format("View info view-name = %s, view-stmt-name (parent) = %s, " - + "primary-keys = %d, key-ranges: size = %d, list = %s ", - tableNameNode.toString(), parentTable.getName().toString(), - parentTable.getPKColumns().size(), rowKeySlotRangesList.size(), - rowKeySlotRangesList.isEmpty() ? "null" : rowKeySlotRangesList.toString())); - LOGGER.trace(String.format("RowKey Matcher info Hex-value = %s,StringBinary value = %s", - rowKeyMatcherHex, rowKeyMatcherStr)); - - } - return rowKeyMatcher; - } - - - @VisibleForTesting - public static byte[] getRowKeyMatcher( - final PhoenixConnection connection, - final TableName tableNameNode, - final PTable parentTable, - final byte[][] viewColumnConstantsToBe, - final BitSet isViewColumnReferencedToBe - ) throws SQLException { - - RowKeySchema schema = parentTable.getRowKeySchema(); - Integer nBuckets = parentTable.getBucketNum(); - boolean isSalted = nBuckets != null; - - List> rowKeySlotRangesList = new ArrayList<>(); - PName tenantId = connection.getTenantId(); - boolean isMultiTenant = tenantId != null && parentTable.isMultiTenant(); - byte[] tenantIdBytes = tenantId == null - ? ByteUtil.EMPTY_BYTE_ARRAY : - ScanUtil.getTenantIdBytes(schema, isSalted, tenantId, isMultiTenant, false); - - if (tenantIdBytes.length != 0) { - rowKeySlotRangesList.add(Arrays.asList(KeyRange.POINT.apply(tenantIdBytes))); - } - - int pkPos = 0; - for (int i = 0; viewColumnConstantsToBe != null && i < viewColumnConstantsToBe.length; i++) { - if (isViewColumnReferencedToBe.get(i)) { - pkPos++; - ValueSchema.Field field = schema.getField(pkPos); - SortOrder fieldSortOrder = schema.getField(pkPos).getSortOrder(); - byte[] viewColumnConstants = Bytes.copy( - viewColumnConstantsToBe[i], - 0, - viewColumnConstantsToBe[i].length - 1); - KeyRange keyRange = ByteUtil.getKeyRange( - viewColumnConstants, - fieldSortOrder, - CompareOperator.EQUAL, - field.getDataType()); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Field: pos = %d, name = %s, schema = %s, " - + "referenced-column %d, %s ", - pkPos, parentTable.getPKColumns().get(pkPos), - schema.getField(pkPos).toString(), - i, Bytes.toHex(viewColumnConstantsToBe[i]))); - } - rowKeySlotRangesList.add(Arrays.asList(keyRange)); - } + if (candidateIndexes.isEmpty()) return false; - } + Collections.sort(candidateIndexes, new Comparator() { + @Override + public int compare(Integer left, Integer right) { + return pkPositions.get(left) - pkPositions.get(right); + } + }); + + List candidates = Lists.newArrayList(); + List> sampleValues = Lists.newArrayList(); + for (Integer index : candidateIndexes) { + candidates.add(expressions.get(index)); + } + for (int i = 0; i < 2; i++) { + List group = Lists.newArrayList(); + for (Expression expression : candidates) { + PDataType type = expression.getDataType(); + group.add(LiteralExpression.newConstant(type.getSampleValue(), type)); + } + sampleValues.add(group); + } - ScanRanges scanRange = ScanRanges.createSingleSpan( - schema, rowKeySlotRangesList, null, false); - byte[] rowKeyMatcher = scanRange.getScanRange().getLowerRange(); + int count = 0; + int offset = table.getBucketNum() == null ? 0 : SaltingUtil.NUM_SALTING_BYTES; + int maxPkSpan = 0; + Expression remaining = null; + while (count < candidates.size()) { + Expression lhs = count == 0 + ? candidates.get(0) + : new RowValueConstructorExpression(candidates.subList(0, count + 1), false); + Expression firstRhs = count == 0 + ? sampleValues.get(0).get(0) + : new RowValueConstructorExpression(sampleValues.get(0).subList(0, count + 1), true); + Expression secondRhs = count == 0 + ? sampleValues.get(1).get(0) + : new RowValueConstructorExpression(sampleValues.get(1).subList(0, count + 1), true); + Expression testExpression = + InListExpression.create(Lists.newArrayList(lhs, firstRhs, secondRhs), false, + context.getTempPtr(), context.getCurrentTable().getTable().rowKeyOrderOptimizable()); + Set hints = new HashSet<>(); + if (statement.getHint() != null) { + hints = statement.getHint().getHints(); + } + + remaining = pushKeyExpressionsToScan(context, hints, testExpression); + if (context.getScanRanges().isPointLookup()) { + count++; + break; // found the best match + } + int pkSpan = context.getScanRanges().getBoundPkColumnCount() - offset; + if (pkSpan <= maxPkSpan) { + break; + } + maxPkSpan = pkSpan; + count++; + } - if (LOGGER.isTraceEnabled()) { - String rowKeyMatcherStr = Bytes.toStringBinary(rowKeyMatcher); - String rowKeyMatcherHex = Bytes.toHex(rowKeyMatcher); - byte[] rowKeyMatcherFromHex = Bytes.fromHex(rowKeyMatcherHex); - assert Bytes.compareTo(rowKeyMatcher, rowKeyMatcherFromHex) == 0; + result.addAll(candidates.subList(0, count)); - LOGGER.trace(String.format("View info view-name = %s, view-stmt-name (parent) = %s, " - + "primary-keys = %d, key-ranges: size = %d, list = %s ", - tableNameNode.toString(), parentTable.getName().toString(), - parentTable.getPKColumns().size(), rowKeySlotRangesList.size(), - rowKeySlotRangesList.isEmpty() ? "null" : rowKeySlotRangesList.toString())); - LOGGER.trace(String.format("RowKey Matcher info Hex-value = %s,StringBinary value = %s", - rowKeyMatcherHex, rowKeyMatcherStr)); + return count == candidates.size() + && (context.getScanRanges().isPointLookup() || context.getScanRanges().useSkipScanFilter()) + && (remaining == null + || remaining.equals(LiteralExpression.newConstant(true, Determinism.ALWAYS))); + } - } - return rowKeyMatcher; + private static class RemoveExtractedNodesVisitor + extends StatelessTraverseNoExpressionVisitor { + private final Set nodesToRemove; + private RemoveExtractedNodesVisitor(Set nodesToRemove) { + this.nodesToRemove = nodesToRemove; } + @Override + public Expression defaultReturn(Expression node, List e) { + return nodesToRemove.contains(node) ? null : node; + } - /** - * Get an optimal combination of key expressions for hash join key range optimization. - * @return returns true if the entire combined expression is covered by key range optimization - * @param result the optimal combination of key expressions - * @param context the temporary context to get scan ranges set by pushKeyExpressionsToScan() - * @param statement the statement being compiled - * @param expressions the join key expressions - * @return the optimal list of key expressions - */ - public static boolean getKeyExpressionCombination(List result, StatementContext context, FilterableStatement statement, List expressions) throws SQLException { - List candidateIndexes = Lists.newArrayList(); - final List pkPositions = Lists.newArrayList(); - PTable table = context.getCurrentTable().getTable(); - for (int i = 0; i < expressions.size(); i++) { - Expression expression = expressions.get(i); - KeyExpressionVisitor visitor = new KeyExpressionVisitor(context, table); - KeyExpressionVisitor.KeySlots keySlots = expression.accept(visitor); - int minPkPos = Integer.MAX_VALUE; - if (keySlots != null) { - Iterator iterator = keySlots.getSlots().iterator(); - while (iterator.hasNext()) { - KeyExpressionVisitor.KeySlot slot = iterator.next(); - if (slot.getPKPosition() < minPkPos) { - minPkPos = slot.getPKPosition(); - } - } - if (minPkPos != Integer.MAX_VALUE) { - candidateIndexes.add(i); - pkPositions.add(minPkPos); - } - } - } + @Override + public Iterator visitEnter(OrExpression node) { + return node.getChildren().iterator(); + } - if (candidateIndexes.isEmpty()) - return false; + @Override + public Iterator visitEnter(AndExpression node) { + return node.getChildren().iterator(); + } - Collections.sort(candidateIndexes, new Comparator() { - @Override - public int compare(Integer left, Integer right) { - return pkPositions.get(left) - pkPositions.get(right); - } - }); - - List candidates = Lists.newArrayList(); - List> sampleValues = Lists.newArrayList(); - for (Integer index : candidateIndexes) { - candidates.add(expressions.get(index)); - } - for (int i = 0; i < 2; i++) { - List group = Lists.newArrayList(); - for (Expression expression : candidates) { - PDataType type = expression.getDataType(); - group.add(LiteralExpression.newConstant(type.getSampleValue(), type)); - } - sampleValues.add(group); - } - - int count = 0; - int offset = table.getBucketNum() == null ? 0 : SaltingUtil.NUM_SALTING_BYTES; - int maxPkSpan = 0; - Expression remaining = null; - while (count < candidates.size()) { - Expression lhs = count == 0 ? candidates.get(0) : new RowValueConstructorExpression(candidates.subList(0, count + 1), false); - Expression firstRhs = count == 0 ? sampleValues.get(0).get(0) : new RowValueConstructorExpression(sampleValues.get(0).subList(0, count + 1), true); - Expression secondRhs = count == 0 ? sampleValues.get(1).get(0) : new RowValueConstructorExpression(sampleValues.get(1).subList(0, count + 1), true); - Expression testExpression = InListExpression.create(Lists.newArrayList(lhs, firstRhs, secondRhs), false, context.getTempPtr(), context.getCurrentTable().getTable().rowKeyOrderOptimizable()); - Set hints = new HashSet<>(); - if(statement.getHint() != null){ - hints = statement.getHint().getHints(); - } + @Override + public Expression visit(LiteralExpression node) { + return nodesToRemove.contains(node) ? null : node; + } - remaining = pushKeyExpressionsToScan(context, hints, testExpression); - if (context.getScanRanges().isPointLookup()) { - count++; - break; // found the best match - } - int pkSpan = context.getScanRanges().getBoundPkColumnCount() - offset; - if (pkSpan <= maxPkSpan) { - break; - } - maxPkSpan = pkSpan; - count++; + @Override + public Expression visitLeave(AndExpression node, List l) { + if (!l.equals(node.getChildren())) { + if (l.isEmpty()) { + // Don't return null here, because then our defaultReturn will kick in + return LiteralExpression.newConstant(true, Determinism.ALWAYS); + } + if (l.size() == 1) { + return l.get(0); + } + try { + return AndExpression.create(l); + } catch (SQLException e) { + // shouldn't happen + throw new RuntimeException(e); } + } + return node; + } + } + + /* + * TODO: We could potentially rewrite simple expressions to move constants to the RHS such that we + * can form a start/stop key for a scan. For example, rewrite this: WHEREH a + 1 < 5 to this + * instead: WHERE a < 5 - 1 Currently the first case would not be optimized. This includes other + * arithmetic operators, CASE statements, and string concatenation. + */ + public static class KeyExpressionVisitor + extends StatelessTraverseNoExpressionVisitor { + private static final KeySlots EMPTY_KEY_SLOTS = new KeySlots() { + @Override + public boolean isPartialExtraction() { + return false; + } + + @Override + public List getSlots() { + return Collections.emptyList(); + } + }; + + private static boolean isDegenerate(List keyRanges) { + return keyRanges == null || keyRanges.isEmpty() + || (keyRanges.size() == 1 && keyRanges.get(0) == KeyRange.EMPTY_RANGE); + } - result.addAll(candidates.subList(0, count)); + private KeySlots newKeyParts(KeySlot slot, Expression extractNode, KeyRange keyRange) { + if (keyRange == null) { + return EMPTY_KEY_SLOTS; + } - return count == candidates.size() - && (context.getScanRanges().isPointLookup() || context.getScanRanges().useSkipScanFilter()) - && (remaining == null || remaining.equals(LiteralExpression.newConstant(true, Determinism.ALWAYS))); + List keyRanges = Collections. singletonList(keyRange); + return newKeyParts(slot, extractNode, keyRanges); } - private static class RemoveExtractedNodesVisitor extends StatelessTraverseNoExpressionVisitor { - private final Set nodesToRemove; - - private RemoveExtractedNodesVisitor(Set nodesToRemove) { - this.nodesToRemove = nodesToRemove; - } + private KeySlots newKeyParts(KeySlot slot, Expression extractNode, List keyRanges) { + if (isDegenerate(keyRanges)) { + return EMPTY_KEY_SLOTS; + } + + Set extractNodes = + extractNode == null || slot.getKeyPart().getExtractNodes().isEmpty() + ? Collections.emptySet() + : new LinkedHashSet<>(Collections. singleton(extractNode)); + return new SingleKeySlot(new BaseKeyPart(table, slot.getKeyPart().getColumn(), extractNodes), + slot.getPKPosition(), slot.getPKSpan(), keyRanges, slot.getOrderPreserving()); + } - @Override - public Expression defaultReturn(Expression node, List e) { - return nodesToRemove.contains(node) ? null : node; - } + private KeySlots newKeyParts(KeySlot slot, Set extractNodes, + List keyRanges) { + if (isDegenerate(keyRanges)) { + return EMPTY_KEY_SLOTS; + } - @Override - public Iterator visitEnter(OrExpression node) { - return node.getChildren().iterator(); - } + return new SingleKeySlot(new BaseKeyPart(table, slot.getKeyPart().getColumn(), extractNodes), + slot.getPKPosition(), slot.getPKSpan(), keyRanges, slot.getOrderPreserving()); + } - @Override - public Iterator visitEnter(AndExpression node) { - return node.getChildren().iterator(); - } + private KeySlots newRowValueConstructorKeyParts(RowValueConstructorExpression rvc, + List childSlots) { + if (childSlots.isEmpty() || rvc.isStateless()) { + return null; + } + + int position = -1; + int initialPosition = -1; + for (int i = 0; i < childSlots.size(); i++) { + KeySlots slots = childSlots.get(i); + KeySlot keySlot = slots.getSlots().iterator().next(); + Set childExtractNodes = keySlot.getKeyPart().getExtractNodes(); + // Stop if there was a gap in extraction of RVC elements. This is required if the leading + // RVC has not row key columns, as we'll still get childSlots if the RVC has trailing row + // key columns. We can't rule the RVC out completely when the childSlots is less the the + // RVC length, as a partial, *leading* match is optimizable. + if ( + childExtractNodes.size() != 1 || !childExtractNodes.contains(rvc.getChildren().get(i)) + ) { + break; + } + int pkPosition = keySlot.getPKPosition(); + if (pkPosition < 0) { // break for non PK columns + break; + } + // Continue while we have consecutive pk columns + if (position == -1) { + position = initialPosition = pkPosition; + } else if (pkPosition != position) { + break; + } + position++; + + // If we come to a point where we're not preserving order completely + // then stop. We will never get a NO here, but we might get a YES_IF_LAST + // if the child expression is only using part of the underlying pk column. + // (for example, in the case of SUBSTR). In this case, we must stop building + // the row key constructor at that point. + assert (keySlot.getOrderPreserving() != OrderPreserving.NO); + if (keySlot.getOrderPreserving() == OrderPreserving.YES_IF_LAST) { + break; + } + } + if (position > 0) { + int span = position - initialPosition; + return new SingleKeySlot( + new RowValueConstructorKeyPart(table.getPKColumns().get(initialPosition), rvc, span, + childSlots), + initialPosition, span, EVERYTHING_RANGES); + } + return null; + } - @Override - public Expression visit(LiteralExpression node) { - return nodesToRemove.contains(node) ? null : node; - } + private KeySlots newScalarFunctionKeyPart(KeySlot slot, ScalarFunction node) { + if (isDegenerate(slot.getKeyRanges())) { + return EMPTY_KEY_SLOTS; + } + KeyPart part = node.newKeyPart(slot.getKeyPart()); + if (part == null) { + return null; + } + + // Scalar function always returns primitive and never a row value constructor, so span is + // always 1 + return new SingleKeySlot(part, slot.getPKPosition(), slot.getKeyRanges(), + node.preservesOrder()); + } - @Override - public Expression visitLeave(AndExpression node, List l) { - if (!l.equals(node.getChildren())) { - if (l.isEmpty()) { - // Don't return null here, because then our defaultReturn will kick in - return LiteralExpression.newConstant(true, Determinism.ALWAYS); - } - if (l.size() == 1) { - return l.get(0); - } - try { - return AndExpression.create(l); - } catch (SQLException e) { - //shouldn't happen - throw new RuntimeException(e); - } - } - return node; - } + private KeySlots newCoerceKeyPart(KeySlot slot, final CoerceExpression node) { + if (isDegenerate(slot.getKeyRanges())) { + return EMPTY_KEY_SLOTS; + } + final Set extractNodes = + new LinkedHashSet<>(Collections. singletonList(node)); + final KeyPart childPart = slot.getKeyPart(); + final ImmutableBytesWritable ptr = context.getTempPtr(); + return new SingleKeySlot(new CoerceKeySlot(childPart, ptr, node, extractNodes), + slot.getPKPosition(), slot.getKeyRanges()); } - /* - * TODO: We could potentially rewrite simple expressions to move constants to the RHS - * such that we can form a start/stop key for a scan. For example, rewrite this: - * WHEREH a + 1 < 5 - * to this instead: - * WHERE a < 5 - 1 - * Currently the first case would not be optimized. This includes other arithmetic - * operators, CASE statements, and string concatenation. + /** + * Iterates through all combinations of KeyRanges for a given PK column (based on its slot + * position). Useful when expressions are ORed together and subsequently ANDed. For example: + * WHERE (pk1 = 1 OR pk1 = 2) AND (pk2 = 3 OR pk2 = 4) would iterate through and produce + * [1,3],[1,4],[2,3],[2,4]. */ - public static class KeyExpressionVisitor extends StatelessTraverseNoExpressionVisitor { - private static final KeySlots EMPTY_KEY_SLOTS = new KeySlots() { - @Override - public boolean isPartialExtraction() { - return false; - } + static class SlotsIterator { + public final int pkPos; + private List childSlots; + private List slotRangesIterator; + private boolean firstCall = true; + + SlotsIterator(List childSlots, int pkPos) { + this.childSlots = childSlots; + this.pkPos = pkPos; + this.slotRangesIterator = Lists.newArrayListWithExpectedSize(childSlots.size() * 3 / 2); + for (int i = 0; i < childSlots.size(); i++) { + SlotRangesIterator iterator = new SlotRangesIterator(i); + slotRangesIterator.add(iterator); + iterator.initialize(); + } + } + + public KeySlot getSlot(int index) { + SlotRangesIterator slotRanges = slotRangesIterator.get(index); + return slotRanges.getSlot(); + } + + public KeyRange getRange(int index) { + SlotRangesIterator slotRanges = slotRangesIterator.get(index); + return slotRanges.getRange(); + } + + public boolean next() { + if (firstCall) { + boolean hasAny = false; + for (int i = 0; i < childSlots.size(); i++) { + hasAny |= this.slotRangesIterator.get(i).initialize(); + } + firstCall = false; + return hasAny; + } + int i = 0; + while (i < childSlots.size() && !slotRangesIterator.get(i).next()) { + i++; + } + for (i = 0; i < childSlots.size(); i++) { + if (!this.slotRangesIterator.get(i).isWrapped()) { + return true; + } + } + return false; + } - @Override - public List getSlots() { - return Collections.emptyList(); - } - }; + private class SlotRangesIterator { + public int slotIndex; + public int rangeIndex; + public final KeySlots slots; + public boolean wrapped; - private static boolean isDegenerate(List keyRanges) { - return keyRanges == null || keyRanges.isEmpty() || (keyRanges.size() == 1 && keyRanges.get(0) == KeyRange.EMPTY_RANGE); + public SlotRangesIterator(int slotsIndex) { + this.slots = childSlots.get(slotsIndex); } - private KeySlots newKeyParts(KeySlot slot, Expression extractNode, KeyRange keyRange) { - if (keyRange == null) { - return EMPTY_KEY_SLOTS; - } + public boolean isWrapped() { + return wrapped || !hasAny(); + } - List keyRanges = Collections.singletonList(keyRange); - return newKeyParts(slot, extractNode, keyRanges); + private boolean initialize() { + slotIndex = 0; + rangeIndex = 0; + while ( + slotIndex < slots.getSlots().size() && (slots.getSlots().get(slotIndex) == null + || slots.getSlots().get(slotIndex).getKeyRanges().isEmpty() + || slots.getSlots().get(slotIndex).getPKPosition() != pkPos) + ) { + slotIndex++; + } + return hasAny(); } - private KeySlots newKeyParts(KeySlot slot, Expression extractNode, List keyRanges) { - if (isDegenerate(keyRanges)) { - return EMPTY_KEY_SLOTS; - } + private boolean hasAny() { + return slotIndex < slots.getSlots().size(); + } - Set extractNodes = extractNode == null || slot.getKeyPart().getExtractNodes().isEmpty() - ? Collections.emptySet() - : new LinkedHashSet<>(Collections.singleton(extractNode)); - return new SingleKeySlot(new BaseKeyPart(table, slot.getKeyPart().getColumn(), extractNodes), slot.getPKPosition(), slot.getPKSpan(), keyRanges, slot.getOrderPreserving()); + public KeySlot getSlot() { + if (!hasAny()) return null; + return slots.getSlots().get(slotIndex); } - private KeySlots newKeyParts(KeySlot slot, Set extractNodes, List keyRanges) { - if (isDegenerate(keyRanges)) { - return EMPTY_KEY_SLOTS; - } + public KeyRange getRange() { + if (!hasAny()) return null; + return getSlot().getKeyRanges().get(rangeIndex); + } - return new SingleKeySlot(new BaseKeyPart(table, slot.getKeyPart().getColumn(), extractNodes), slot.getPKPosition(), slot.getPKSpan(), keyRanges, slot.getOrderPreserving()); + public boolean next() { + if (!hasAny()) { + return false; + } + List ranges = getSlot().getKeyRanges(); + if ((rangeIndex = (rangeIndex + 1) % ranges.size()) == 0) { + do { + if (((slotIndex = (slotIndex + 1) % slots.getSlots().size()) == 0)) { + initialize(); + wrapped = true; + return false; + } + } while ( + getSlot() == null || getSlot().getKeyRanges().isEmpty() + || getSlot().getPKPosition() != pkPos + ); + } + + return true; } + } + } - private KeySlots newRowValueConstructorKeyParts(RowValueConstructorExpression rvc, List childSlots) { - if (childSlots.isEmpty() || rvc.isStateless()) { - return null; + /** + * Ands together an arbitrary set of compiled expressions (represented as a list of KeySlots) by + * intersecting each unique combination among the childSlots. + * @param andExpression expressions being anded together + * @param childSlots compiled form of child expressions being anded together. + */ + private KeySlots andKeySlots(AndExpression andExpression, List childSlots) { + + if (childSlots.isEmpty()) { + return null; + } + // Exit early if it's already been determined that one of the child slots cannot + // possibly be true. + boolean partialExtraction = andExpression.getChildren().size() != childSlots.size(); + + int nChildSlots = childSlots.size(); + for (int i = 0; i < nChildSlots; i++) { + KeySlots childSlot = childSlots.get(i); + if (childSlot == EMPTY_KEY_SLOTS) { + return EMPTY_KEY_SLOTS; + } + // If any child slots represent partially extracted expressions, then carry + // that forward. An example of a partially extracted expression would be a + // RVC of (K1, K2, NonK3) in which only leading PK columns are extracted + // from the RVC. + partialExtraction |= childSlot.isPartialExtraction(); + } + boolean mayExtractNodes = true; + ImmutableBytesWritable ptr = context.getTempPtr(); + RowKeySchema rowKeySchema = table.getRowKeySchema(); + int nPkColumns = table.getPKColumns().size(); + KeySlot[] keySlotArray = new KeySlot[nPkColumns]; + int initPkPos = (table.getBucketNum() == null ? 0 : 1) + + (this.context.getConnection().getTenantId() != null && table.isMultiTenant() ? 1 : 0) + + (table.getViewIndexId() == null ? 0 : 1); + + List>> slotsTrailingRanges = + Lists.newArrayListWithExpectedSize(nPkColumns); + // Process all columns being ANDed in position order to guarantee + // we have all information for leading PK columns before we attempt + // to intersect them. For example: + // (A, B, C) >= (1, 2, 3) AND (B, C) < (4, 5) AND A = 1 + // will processing slot 0 (i.e PK column A) across all children first, + // followed by slot 1 (PK column B), and finally slot 2 (C). This is + // done because we carry forward any constraints from preceding PK + // columns which may impact following PK columns. In the above example + // we'd carry forward that (B,C) >= (2,3) since we know that A is 1. + for (int pkPos = initPkPos; pkPos < nPkColumns; pkPos++) { + SlotsIterator iterator = new SlotsIterator(childSlots, pkPos); + OrderPreserving orderPreserving = null; + Set visitedKeyParts = Sets.newHashSet(); + Set extractNodes = new LinkedHashSet<>(); + List keyRanges = Lists.newArrayList(); + // This is the information carried forward as we process in PK order. + // It's parallel with the list of keyRanges. + List trailingRangesList = Lists. newArrayList(); + KeyRange result = null; + TrailingRangeIterator trailingRangeIterator = + new TrailingRangeIterator(initPkPos, pkPos, slotsTrailingRanges); + // Iterate through all combinations (i.e. constraints) for the PK slot being processed. + // For example, with (A = 1 OR A = 2) AND (A,B) > (1,2) AND C = 3, we'd process the + // following two combinations: + // A=1,(A,B) > (1,2) + // A=2,(A,B) > (1,2) + // If we have no constraints for a PK, then we still must iterate through the information + // that may have been rolled up based on the processing of previous PK slots. For example, + // in the above ANDed expressions, we have no constraint on B, but we would end up with + // rolled up information based on the B part of the (A,B) constraint. + while ( + iterator.next() || (trailingRangeIterator.hasNext() && result != KeyRange.EMPTY_RANGE) + ) { + result = null; + KeyRange[] trailingRanges = newTrailingRange(); + for (int i = 0; i < nChildSlots && result != KeyRange.EMPTY_RANGE; i++) { + KeySlot slot = iterator.getSlot(i); + // Rollup the order preserving and concatenate the extracted expressions. + // Extracted expressions end up being removed from the AND expression at + // the top level call (pushKeyExpressionsToScan) with anything remaining + // ending up as a Filter (rather than contributing to the start/stop row + // of the scan. + if (slot != null) { + KeyRange otherRange = iterator.getRange(i); + KeyRange range = result; + if (slot.getOrderPreserving() != null) { + orderPreserving = slot.getOrderPreserving().combine(orderPreserving); + } + // Extract once per iteration, when there are large number + // of OR clauses (for e.g N > 100k). + // The extractNodes.addAll method can get called N times. + if ( + visitedKeyParts.add(slot.getKeyPart()) + && slot.getKeyPart().getExtractNodes() != null + ) { + extractNodes.addAll(slot.getKeyPart().getExtractNodes()); + } + // Keep a running intersection of the ranges we see. Note that the + // ranges are derived from constants appearing on the RHS of a comparison + // expression. For example, the expression A > 5 would produce a keyRange + // of (5, *) for slot 0 (assuming A is the leading PK column) If the result + // ends up as an empty key, that combination is ruled out. This is essentially + // doing constant reduction. + result = intersectRanges(pkPos, range, otherRange, trailingRanges); + } + } + + if (result != KeyRange.EMPTY_RANGE) { + Map> results = Maps.newHashMap(); + trailingRangeIterator.init(); + // Process all constraints that have been rolled up from previous + // processing of PK slots. This occurs for RVCs which span PK slots + // in which the leading part of the RVC is determined to be equal + // to a constant on the RHS. + while (trailingRangeIterator.hasNext()) { + // Loop through all combinations of values for all previously + // calculated slots. + do { + // Loop through all combinations of range constraints for the + // current combinations of values. If no valid combinations + // are found, we can rule out the result. We can also end up + // modifying the result if it has an intersection with the + // range constraints. + do { + KeyRange priorTrailingRange = trailingRangeIterator.getRange(); + if (priorTrailingRange != KeyRange.EVERYTHING_RANGE) { + KeyRange[] intTrailingRanges = + Arrays.copyOf(trailingRanges, trailingRanges.length); + // Intersect the current result with each range constraint. We essentially + // rule out the result when we find a constraint that has no intersection + KeyRange intResult = + intersectRanges(pkPos, result, priorTrailingRange, intTrailingRanges); + if (intResult != KeyRange.EMPTY_RANGE) { + addResult(intResult, intTrailingRanges, results); + } + } + } while (trailingRangeIterator.nextTrailingRange()); + } while (trailingRangeIterator.nextRange()); } - - int position = -1; - int initialPosition = -1; - for (int i = 0; i < childSlots.size(); i++) { - KeySlots slots = childSlots.get(i); - KeySlot keySlot = slots.getSlots().iterator().next(); - Set childExtractNodes = keySlot.getKeyPart().getExtractNodes(); - // Stop if there was a gap in extraction of RVC elements. This is required if the leading - // RVC has not row key columns, as we'll still get childSlots if the RVC has trailing row - // key columns. We can't rule the RVC out completely when the childSlots is less the the - // RVC length, as a partial, *leading* match is optimizable. - if (childExtractNodes.size() != 1 || !childExtractNodes.contains(rvc.getChildren().get(i))) { - break; - } - int pkPosition = keySlot.getPKPosition(); - if (pkPosition < 0) { // break for non PK columns - break; - } - // Continue while we have consecutive pk columns - if (position == -1) { - position = initialPosition = pkPosition; - } else if (pkPosition != position) { - break; - } - position++; - - // If we come to a point where we're not preserving order completely - // then stop. We will never get a NO here, but we might get a YES_IF_LAST - // if the child expression is only using part of the underlying pk column. - // (for example, in the case of SUBSTR). In this case, we must stop building - // the row key constructor at that point. - assert(keySlot.getOrderPreserving() != OrderPreserving.NO); - if (keySlot.getOrderPreserving() == OrderPreserving.YES_IF_LAST) { - break; + if (results.isEmpty() && result != null) { // No trailing range constraints + keyRanges.add(result); + trailingRangesList.add(trailingRanges); + } else { + mayExtractNodes &= results.size() <= 1; + for (Map.Entry> entry : results.entrySet()) { + // Add same KeyRange with each KeyRange[] since the two lists are parallel + for (KeyRange[] trailingRange : entry.getValue()) { + keyRanges.add(entry.getKey()); + trailingRangesList.add(trailingRange); } + } } - if (position > 0) { - int span = position - initialPosition; - return new SingleKeySlot(new RowValueConstructorKeyPart(table.getPKColumns().get(initialPosition), rvc, span, childSlots), initialPosition, span, EVERYTHING_RANGES); - } - return null; + } } - private KeySlots newScalarFunctionKeyPart(KeySlot slot, ScalarFunction node) { - if (isDegenerate(slot.getKeyRanges())) { - return EMPTY_KEY_SLOTS; - } - KeyPart part = node.newKeyPart(slot.getKeyPart()); - if (part == null) { - return null; - } + if (result == null && keyRanges.isEmpty()) { + slotsTrailingRanges.add(Collections.> emptyList()); + } else { + // If we encountered a result for this slot and + // there are no ranges, this is the degenerate case. + if (keyRanges.isEmpty()) { + return EMPTY_KEY_SLOTS; + } + // Similar to KeyRange.coalesce(), except we must combine together + // any rolled up constraints (as a list of KeyRanges) for a + // particular value (as they're coalesced together). We maintain + // these KeyRange constraints as a parallel list between keyRanges + // and trailingRangesList. + keyRanges = + coalesceKeyRangesAndTrailingRanges(keyRanges, trailingRangesList, slotsTrailingRanges); + int maxSpan = 1; + for (KeyRange aRange : keyRanges) { + int span = rowKeySchema.computeMaxSpan(pkPos, aRange, context.getTempPtr()); + if (span > maxSpan) { + maxSpan = span; + } + } + keySlotArray[pkPos] = new KeySlot( + new BaseKeyPart(table, table.getPKColumns().get(pkPos), + mayExtractNodes ? extractNodes : Collections. emptySet()), + pkPos, maxSpan, keyRanges, orderPreserving); + } + } + + // Filters trailing part of RVC based on ranges from PK columns after the one we're + // currently processing that may overlap with this range. For example, with a PK + // columns A,B,C and a range of A from [(1,2,3) - (4,5,6)] and B from (6-*), we + // can filter the trailing part of the RVC for A, because the trailing part of + // the RVC (2,3)-(5,6) does not intersect with (6-*). By removing the trailing + // part of the RVC, we end up with a range of A from [1-4] and B from (6-*) which + // enables us to use a skip scan. + for (int i = 0; i < keySlotArray.length; i++) { + KeySlot keySlot = keySlotArray[i]; + if (keySlot == null) continue; + int pkSpan = keySlot.getPKSpan(); + int pkPos = keySlot.getPKPosition(); + boolean slotWasIntersected = false; + List keyRanges = keySlot.getKeyRanges(); + List slotTrimmedResults = Lists.newArrayListWithExpectedSize(keyRanges.size()); + for (KeyRange result : keyRanges) { + boolean resultWasIntersected = false; + Set trimmedResults = Sets.newHashSetWithExpectedSize(keyRanges.size()); + for (int trailingPkPos = pkPos + 1; trailingPkPos < pkPos + pkSpan + && trailingPkPos < nPkColumns; trailingPkPos++) { + KeySlot nextKeySlot = keySlotArray[trailingPkPos]; + if (nextKeySlot == null) continue; + for (KeyRange trailingRange : nextKeySlot.getKeyRanges()) { + resultWasIntersected = true; + KeyRange intResult = intersectTrailing(result, pkPos, trailingRange, trailingPkPos); + if (intResult != KeyRange.EMPTY_RANGE) { + trimmedResults.add(intResult); + } + } + } + if (resultWasIntersected) { + slotWasIntersected = true; + slotTrimmedResults.addAll(trimmedResults); + mayExtractNodes &= trimmedResults.size() <= 1; + } else { + slotTrimmedResults.add(result); + } + } + if (slotTrimmedResults.isEmpty()) { + return EMPTY_KEY_SLOTS; + } + if (slotWasIntersected) { + // Re-coalesce the ranges and recalc the max span since the ranges may have changed + slotTrimmedResults = KeyRange.coalesce(slotTrimmedResults); + pkSpan = 1; + for (KeyRange trimmedResult : slotTrimmedResults) { + pkSpan = Math.max(pkSpan, rowKeySchema.computeMaxSpan(pkPos, trimmedResult, ptr)); + } + } + + Set extractNodes = mayExtractNodes + ? keySlotArray[pkPos].getKeyPart().getExtractNodes() + : new LinkedHashSet<>(); + keySlotArray[pkPos] = + new KeySlot(new BaseKeyPart(table, table.getPKColumns().get(pkPos), extractNodes), pkPos, + pkSpan, slotTrimmedResults, keySlotArray[pkPos].getOrderPreserving()); + } + List keySlots = Arrays.asList(keySlotArray); + // If we have a salt column, skip that slot because + // they'll never be an expression that uses it directly. + keySlots = keySlots.subList(initPkPos, keySlots.size()); + return new MultiKeySlot(keySlots, partialExtraction); + } - // Scalar function always returns primitive and never a row value constructor, so span is always 1 - return new SingleKeySlot(part, slot.getPKPosition(), slot.getKeyRanges(), node.preservesOrder()); - } + private KeyRange[] newTrailingRange() { + KeyRange[] trailingRanges = new KeyRange[table.getPKColumns().size()]; + for (int i = 0; i < trailingRanges.length; i++) { + trailingRanges[i] = KeyRange.EVERYTHING_RANGE; + } + return trailingRanges; + } - private KeySlots newCoerceKeyPart(KeySlot slot, final CoerceExpression node) { - if (isDegenerate(slot.getKeyRanges())) { - return EMPTY_KEY_SLOTS; - } - final Set extractNodes = new LinkedHashSet<>(Collections.singletonList(node)); - final KeyPart childPart = slot.getKeyPart(); - final ImmutableBytesWritable ptr = context.getTempPtr(); - return new SingleKeySlot(new CoerceKeySlot( - childPart, ptr, node, extractNodes), slot.getPKPosition(), slot.getKeyRanges()); - } - - /** - * - * Iterates through all combinations of KeyRanges for a given - * PK column (based on its slot position). Useful when expressions - * are ORed together and subsequently ANDed. For example: - * WHERE (pk1 = 1 OR pk1 = 2) AND (pk2 = 3 OR pk2 = 4) - * would iterate through and produce [1,3],[1,4],[2,3],[2,4]. - * - */ - static class SlotsIterator { - public final int pkPos; - private List childSlots; - private List slotRangesIterator; - private boolean firstCall = true; - - SlotsIterator(List childSlots, int pkPos) { - this.childSlots = childSlots; - this.pkPos = pkPos; - this.slotRangesIterator = Lists.newArrayListWithExpectedSize(childSlots.size() * 3 / 2); - for (int i = 0; i < childSlots.size(); i++) { - SlotRangesIterator iterator = new SlotRangesIterator(i); - slotRangesIterator.add(iterator); - iterator.initialize(); - } - } + private static void addResult(KeyRange result, KeyRange[] trailingRange, + Map> results) { + List trailingRanges = Lists. newArrayList(trailingRange); + List priorTrailingRanges = results.put(result, trailingRanges); + if (priorTrailingRanges != null) { + // This is tricky case. We may have multiple possible values based on the rolled up range + // constraints from previous slots. We track unique ranges and concatenate together the + // trailing range data. If there's more than one element in the set (i.e. more than one + // possible result), we'll end up have more combinations than there actually are because + // the constraint only apply for a single value, not for *all* combinations (which is a + // limitation of our representation derived from what can be handled by our SkipScanFilter). + // For example, if we we've gathered these ranges so far in a three PK table: (1,2), (A,B) + // and have X as a constraint for value A and Y as a constraint for value B, we have the + // following possible combinations: 1AX, 2AX, 1BY, 2BY. However, our SkipScanFilter only + // supports identifying combinations for *all* combinations of (1,2),(A,B),(X,Y) or + // AX, 1AY, 1BX, 1BY, 2AX, 2AY, 2BX, 2BY. See + // WhereOptimizerTest.testNotRepresentableBySkipScan() + // for an example. + trailingRanges.addAll(priorTrailingRanges); + } + } - public KeySlot getSlot(int index) { - SlotRangesIterator slotRanges = slotRangesIterator.get(index); - return slotRanges.getSlot(); - } + private List coalesceKeyRangesAndTrailingRanges(List keyRanges, + List trailingRangesList, List>> slotsTrailingRanges) { + List>> pairs = coalesce(keyRanges, trailingRangesList); + List> trailingRanges = Lists.newArrayListWithExpectedSize(pairs.size()); + List coalescedKeyRanges = Lists.newArrayListWithExpectedSize(pairs.size()); + for (Pair> pair : pairs) { + coalescedKeyRanges.add(pair.getFirst()); + trailingRanges.add(pair.getSecond()); + } + slotsTrailingRanges.add(trailingRanges); + return coalescedKeyRanges; + } - public KeyRange getRange(int index) { - SlotRangesIterator slotRanges = slotRangesIterator.get(index); - return slotRanges.getRange(); - } + public static final Comparator>> KEY_RANGE_PAIR_COMPARATOR = + new Comparator>>() { + @Override + public int compare(Pair> o1, + Pair> o2) { + return KeyRange.COMPARATOR.compare(o1.getFirst(), o2.getFirst()); + } + }; - public boolean next() { - if (firstCall) { - boolean hasAny = false; - for (int i = 0; i < childSlots.size(); i++) { - hasAny |= this.slotRangesIterator.get(i).initialize(); - } - firstCall = false; - return hasAny; - } - int i = 0; - while (i < childSlots.size() && !slotRangesIterator.get(i).next()) { - i++; - } - for (i = 0; i < childSlots.size(); i++) { - if (!this.slotRangesIterator.get(i).isWrapped()) { - return true; - } - } - return false; - } + private static boolean isEverythingRanges(KeyRange[] ranges) { + for (KeyRange range : ranges) { + if (range != KeyRange.EVERYTHING_RANGE) { + return false; + } + } + return true; + } - private class SlotRangesIterator { - public int slotIndex; - public int rangeIndex; - public final KeySlots slots; - public boolean wrapped; + private static List concat(List list1, List list2) { + if (list1.size() == 1 && isEverythingRanges(list1.get(0))) { + if (list2.size() == 1 && isEverythingRanges(list1.get(0))) { + return Collections.emptyList(); + } + return list2; + } + if (list2.size() == 1 && isEverythingRanges(list2.get(0))) { + return list1; + } + + List newList = + Lists. newArrayListWithExpectedSize(list1.size() + list2.size()); + newList.addAll(list1); + newList.addAll(list2); + return newList; + } - public SlotRangesIterator(int slotsIndex) { - this.slots = childSlots.get(slotsIndex); - } + /** + * Similar to KeyRange.coelesce, but con + */ + @NonNull + public static List>> coalesce(List keyRanges, + List trailingRangesList) { + List>> tmp = + Lists.newArrayListWithExpectedSize(keyRanges.size()); + int nKeyRanges = keyRanges.size(); + for (int i = 0; i < nKeyRanges; i++) { + KeyRange keyRange = keyRanges.get(i); + KeyRange[] trailingRange = trailingRangesList.get(i); + Pair> pair = new Pair>(keyRange, + Lists. newArrayList(trailingRange)); + tmp.add(pair); + } + Collections.sort(tmp, KEY_RANGE_PAIR_COMPARATOR); + List>> tmp2 = + Lists.>> newArrayListWithExpectedSize(tmp.size()); + Pair> range = tmp.get(0); + for (int i = 1; i < tmp.size(); i++) { + Pair> otherRange = tmp.get(i); + KeyRange intersect = range.getFirst().intersect(otherRange.getFirst()); + if (KeyRange.EMPTY_RANGE == intersect) { + tmp2.add(range); + range = otherRange; + } else { + KeyRange newRange = range.getFirst().union(otherRange.getFirst()); + range = new Pair>(newRange, + concat(range.getSecond(), otherRange.getSecond())); + } + } + tmp2.add(range); + List>> tmp3 = + Lists.>> newArrayListWithExpectedSize(tmp2.size()); + range = tmp2.get(0); + for (int i = 1; i < tmp2.size(); i++) { + Pair> otherRange = tmp2.get(i); + assert !range.getFirst().upperUnbound(); + assert !otherRange.getFirst().lowerUnbound(); + if ( + range.getFirst().isUpperInclusive() != otherRange.getFirst().isLowerInclusive() + && Bytes.equals(range.getFirst().getUpperRange(), otherRange.getFirst().getLowerRange()) + ) { + KeyRange newRange = KeyRange.getKeyRange(range.getFirst().getLowerRange(), + range.getFirst().isLowerInclusive(), otherRange.getFirst().getUpperRange(), + otherRange.getFirst().isUpperInclusive()); + range = new Pair>(newRange, + concat(range.getSecond(), otherRange.getSecond())); + } else { + tmp3.add(range); + range = otherRange; + } + } + tmp3.add(range); - public boolean isWrapped() { - return wrapped || !hasAny(); - } + return tmp3; + } - private boolean initialize() { - slotIndex = 0; - rangeIndex = 0; - while (slotIndex < slots.getSlots().size() - && (slots.getSlots().get(slotIndex) == null - || slots.getSlots().get(slotIndex).getKeyRanges().isEmpty() - || slots.getSlots().get(slotIndex).getPKPosition() != pkPos)) { - slotIndex++; - } - return hasAny(); - } + /** + * Iterates over all unique combinations of the List representing the constraints + * from previous slot positions. For example, if we have a RVC of (A,B) = (2,1), then if A=2, we + * know that B must be 1. + */ + static class TrailingRangeIterator { + private final List>> slotTrailingRangesList; + private final int[] rangePos; + private final int[] trailingRangePos; + private final int initPkPos; + private final int pkPos; + private int trailingRangePosIndex; + private int rangePosIndex; + private boolean hasMore = true; + + TrailingRangeIterator(int initPkPos, int pkPos, + List>> slotsTrailingRangesList) { + this.slotTrailingRangesList = slotsTrailingRangesList; + int nSlots = pkPos - initPkPos; + rangePos = new int[nSlots]; + trailingRangePos = new int[nSlots]; + this.initPkPos = initPkPos; + this.pkPos = pkPos; + init(); + } + + public void init() { + Arrays.fill(rangePos, 0); + Arrays.fill(trailingRangePos, 0); + rangePosIndex = rangePos.length - 1; + trailingRangePosIndex = trailingRangePos.length - 1; + this.hasMore = pkPos > initPkPos && skipEmpty(); + } + + public boolean hasNext() { + return hasMore && skipEmpty(); + } + + public KeyRange getRange() { + if (!hasMore) { + throw new NoSuchElementException(); + } + KeyRange priorTrailingRange = KeyRange.EVERYTHING_RANGE; + for (int priorPkPos = initPkPos; priorPkPos < pkPos; priorPkPos++) { + List> trailingKeyRangesList = + slotTrailingRangesList.get(priorPkPos - initPkPos); + if (!trailingKeyRangesList.isEmpty()) { + List slotTrailingRanges = + trailingKeyRangesList.get(rangePos[priorPkPos - initPkPos]); + if (!slotTrailingRanges.isEmpty()) { + KeyRange[] slotTrailingRange = + slotTrailingRanges.get(trailingRangePos[priorPkPos - initPkPos]); + priorTrailingRange = priorTrailingRange.intersect(slotTrailingRange[pkPos]); + } + } + } + + return priorTrailingRange; + } + + private boolean skipEmptyTrailingRanges() { + while ( + trailingRangePosIndex >= 0 && (slotTrailingRangesList.get(trailingRangePosIndex).isEmpty() + || slotTrailingRangesList.get(trailingRangePosIndex) + .get(rangePos[trailingRangePosIndex]).isEmpty()) + ) { + trailingRangePosIndex--; + } + if (trailingRangePosIndex >= 0) { + return true; + } + return false; + } + + private boolean skipEmptyRanges() { + trailingRangePosIndex = trailingRangePos.length - 1; + while (rangePosIndex >= 0 && (slotTrailingRangesList.get(rangePosIndex).isEmpty())) { + rangePosIndex--; + } + return rangePosIndex >= 0; + } + + private boolean skipEmpty() { + if (!hasMore || slotTrailingRangesList.isEmpty() || rangePosIndex < 0) { + return hasMore = false; + } + do { + if (skipEmptyTrailingRanges()) { + return true; + } + } while (skipEmptyRanges()); + return hasMore = rangePosIndex >= 0; + } + + public boolean nextRange() { + trailingRangePosIndex = trailingRangePos.length - 1; + while ( + rangePosIndex >= 0 + && (slotTrailingRangesList.get(rangePosIndex).isEmpty() || (rangePos[rangePosIndex] = + (rangePos[rangePosIndex] + 1) % slotTrailingRangesList.get(rangePosIndex).size()) + == 0) + ) { + rangePosIndex--; + } + return rangePosIndex >= 0; + } + + public boolean nextTrailingRange() { + while ( + trailingRangePosIndex >= 0 && (slotTrailingRangesList.get(trailingRangePosIndex).isEmpty() + || slotTrailingRangesList.get(trailingRangePosIndex) + .get(rangePos[trailingRangePosIndex]).isEmpty() + || (trailingRangePos[trailingRangePosIndex] = + (trailingRangePos[trailingRangePosIndex] + 1) % slotTrailingRangesList + .get(trailingRangePosIndex).get(rangePos[trailingRangePosIndex]).size()) == 0) + ) { + trailingRangePosIndex--; + } + if (trailingRangePosIndex >= 0) { + return true; + } + return false; + } + } - private boolean hasAny() { - return slotIndex < slots.getSlots().size(); - } + private KeyRange intersectRanges(int pkPos, KeyRange range, KeyRange otherRange, + KeyRange[] trailingRanges) { + // We need to initialize result to the other range rather than + // initializing it to EVERYTHING_RANGE to handle the IS NULL case. + // Otherwise EVERYTHING_RANGE intersected below with NULL_RANGE + // becomes an EMPTY_RANGE. + if (range == null) { + range = otherRange; + } + KeyRange result = range; + ImmutableBytesWritable ptr = context.getTempPtr(); + RowKeySchema rowKeySchema = table.getRowKeySchema(); + int minSpan = rowKeySchema.computeMinSpan(pkPos, result, ptr); + int otherMinSpan = rowKeySchema.computeMinSpan(pkPos, otherRange, ptr); + KeyRange otherClippedRange = otherRange; + KeyRange clippedRange = result; + if ( + minSpan != otherMinSpan && result != KeyRange.EVERYTHING_RANGE + && otherRange != KeyRange.EVERYTHING_RANGE + ) { + if (otherMinSpan > minSpan) { + otherClippedRange = rowKeySchema.clipLeft(pkPos, otherRange, minSpan, ptr); + } else if (minSpan > otherMinSpan) { + clippedRange = rowKeySchema.clipLeft(pkPos, result, otherMinSpan, ptr); + } + } + + // intersect result with otherRange + result = clippedRange.intersect(otherClippedRange); + if (result == KeyRange.EMPTY_RANGE) { + return result; + } + if (minSpan != otherMinSpan) { + // If trailing ranges are of different spans, intersect them at the common + // span and add remaining part of range used to trailing ranges + // Without the special case for single key values, the trailing ranges + // code doesn't work correctly for WhereOptimizerTest.testMultiSlotTrailingIntersect() + if (result.isSingleKey() && !(range.isSingleKey() && otherRange.isSingleKey())) { + int trailingPkPos = pkPos + Math.min(minSpan, otherMinSpan); + KeyRange trailingRange = getTrailingRange(rowKeySchema, pkPos, + minSpan > otherMinSpan ? range : otherRange, result, ptr); + trailingRanges[trailingPkPos] = trailingRanges[trailingPkPos].intersect(trailingRange); + } else { + // Add back clipped part of range + if (otherMinSpan > minSpan) { + result = concatSuffix(result, otherRange); + } else if (minSpan > otherMinSpan) { + result = concatSuffix(result, range); + } + } + } + return result; + } - public KeySlot getSlot() { - if (!hasAny()) return null; - return slots.getSlots().get(slotIndex); - } + private static KeyRange concatSuffix(KeyRange result, KeyRange otherRange) { + byte[] lowerRange = result.getLowerRange(); + byte[] clippedLowerRange = lowerRange; + byte[] fullLowerRange = otherRange.getLowerRange(); + if (!result.lowerUnbound() && Bytes.startsWith(fullLowerRange, clippedLowerRange)) { + lowerRange = fullLowerRange; + } + byte[] upperRange = result.getUpperRange(); + byte[] clippedUpperRange = upperRange; + byte[] fullUpperRange = otherRange.getUpperRange(); + if (!result.lowerUnbound() && Bytes.startsWith(fullUpperRange, clippedUpperRange)) { + upperRange = fullUpperRange; + } + if (lowerRange == clippedLowerRange && upperRange == clippedUpperRange) { + return result; + } + return KeyRange.getKeyRange(lowerRange, result.isLowerInclusive(), upperRange, + result.isUpperInclusive()); + } - public KeyRange getRange() { - if (!hasAny()) return null; - return getSlot().getKeyRanges().get(rangeIndex); - } + /** + * Intersects an RVC that starts at pkPos with an overlapping range that starts at otherPKPos. + * For example, ((A, B) - (J, K)) intersected with (F - *) would return ((A,F) - (J, K)) ((A, B) + * - (J, K)) intersected with (M - P) would return (A-J) since both of the trailing part of the + * RVC, B and K, do not intersect with B and K. + * @param result an RVC expression starting from pkPos and with length of at least + * otherPKPos - pkPos. + * @param pkPos the PK position of the leading part of the RVC expression + * @param otherRange the other range to intersect with the overlapping part of the RVC. + * @param otherPKPos the PK position of the leading part of the other range + * @return resulting KeyRange from the intersection, potentially an empty range if the result + * RVC is a single key and the trailing part of the key does not intersect with the RVC. + */ + private KeyRange intersectTrailing(KeyRange result, int pkPos, KeyRange otherRange, + int otherPKPos) { + RowKeySchema rowKeySchema = table.getRowKeySchema(); + ImmutableBytesWritable ptr = context.getTempPtr(); + int separatorLength = + table.getPKColumns().get(otherPKPos - 1).getDataType().isFixedWidth() ? 0 : 1; + boolean lowerInclusive = result.isLowerInclusive(); + byte[] lowerRange = result.getLowerRange(); + ptr.set(lowerRange); + // Position ptr at the point at which the two ranges overlap + if (rowKeySchema.position(ptr, pkPos, otherPKPos)) { + int lowerOffset = ptr.getOffset(); + // Increase the length of the ptr to include the entire trailing bytes + ptr.set(ptr.get(), lowerOffset, lowerRange.length - lowerOffset); + byte[] trailingBytes = ptr.copyBytes(); + + // Special case for single key since single keys of different span lengths + // will never overlap. We do not need to process both the lower and upper + // ranges since they are the same. + if (result.isSingleKey() && otherRange.isSingleKey()) { + int minSpan = rowKeySchema.computeMinSpan(pkPos, result, ptr); + int otherMinSpan = rowKeySchema.computeMinSpan(otherPKPos, otherRange, ptr); + byte[] otherLowerRange; + boolean isFixedWidthAtEnd; + if (pkPos + minSpan <= otherPKPos + otherMinSpan) { + otherLowerRange = otherRange.getLowerRange(); + isFixedWidthAtEnd = + table.getPKColumns().get(pkPos + minSpan - 1).getDataType().isFixedWidth(); + } else { + otherLowerRange = trailingBytes; + trailingBytes = otherRange.getLowerRange(); + isFixedWidthAtEnd = + table.getPKColumns().get(otherPKPos + otherMinSpan - 1).getDataType().isFixedWidth(); + } + // If the otherRange starts with the overlapping trailing byte *and* we're comparing + // the entire key (i.e. not just a leading subset), then we have an intersection. + if ( + Bytes.startsWith(otherLowerRange, trailingBytes) + && (isFixedWidthAtEnd || otherLowerRange.length == trailingBytes.length + || otherLowerRange[trailingBytes.length] == QueryConstants.SEPARATOR_BYTE) + ) { + return result; + } + // Otherwise, there is no overlap + return KeyRange.EMPTY_RANGE; + } + // If we're not dealing with single keys, then we can use our normal intersection + // however, if we truncate a span then we need to change exclusive to inclusive + if (otherRange.intersect(KeyRange.getKeyRange(trailingBytes)) == KeyRange.EMPTY_RANGE) { + // Exit early since the upper range is the same as the lower range + if (result.isSingleKey()) { + return KeyRange.EMPTY_RANGE; + } + ptr.set(result.getLowerRange(), 0, lowerOffset - separatorLength); + lowerRange = ptr.copyBytes(); + if (pkPos < otherPKPos && !lowerInclusive) { + lowerInclusive = true; + } + } + } + boolean upperInclusive = result.isUpperInclusive(); + byte[] upperRange = result.getUpperRange(); + ptr.set(upperRange); + if (rowKeySchema.position(ptr, pkPos, otherPKPos)) { + int upperOffset = ptr.getOffset(); + ptr.set(ptr.get(), upperOffset, upperRange.length - upperOffset); + if (otherRange.intersect(KeyRange.getKeyRange(ptr.copyBytes())) == KeyRange.EMPTY_RANGE) { + ptr.set(ptr.get(), 0, upperOffset - separatorLength); + upperRange = ptr.copyBytes(); + } + } + if (lowerRange == result.getLowerRange() && upperRange == result.getUpperRange()) { + return result; + } + KeyRange range = KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); + return range; + } - public boolean next() { - if (!hasAny()) { - return false; - } - List ranges = getSlot().getKeyRanges(); - if ((rangeIndex = (rangeIndex + 1) % ranges.size()) == 0) { - do { - if (((slotIndex = (slotIndex + 1) % slots.getSlots().size()) == 0)) { - initialize(); - wrapped = true; - return false; - } - } while (getSlot() == null || getSlot().getKeyRanges().isEmpty() || getSlot().getPKPosition() != pkPos); - } + private KeySlots orKeySlots(OrExpression orExpression, List childSlots) { + // If any children were filtered out, filter out the entire + // OR expression because we don't have enough information to + // constraint the scan start/stop key. An example would be: + // WHERE organization_id=? OR key_value_column = 'x' + // In this case, we cannot simply filter the key_value_column, + // because we end up bubbling up only the organization_id=? + // expression to form the start/stop key which is obviously wrong. + // For an OR expression, you need to be able to extract + // everything or nothing. + if (orExpression.getChildren().size() != childSlots.size()) { + return null; + } + int initialPos = (table.getBucketNum() == null ? 0 : 1) + + (this.context.getConnection().getTenantId() != null && table.isMultiTenant() ? 1 : 0) + + (table.getViewIndexId() == null ? 0 : 1); + KeySlot theSlot = null; + Set slotExtractNodes = new LinkedHashSet<>(); + int thePosition = -1; + boolean partialExtraction = false; + // TODO: Have separate list for single span versus multi span + // For multi-span, we only need to keep a single range. + List slotRanges = Lists.newArrayList(); + for (KeySlots childSlot : childSlots) { + if (childSlot == EMPTY_KEY_SLOTS) { + // TODO: can this ever happen and can we safely filter the expression tree? + continue; + } + // When we OR together expressions, we can only extract the entire OR expression + // if all sub-expressions have been completely extracted. Otherwise, we must + // leave the OR as a post filter. + partialExtraction |= childSlot.isPartialExtraction(); + // TODO: Do the same optimization that we do for IN if the childSlots specify a fully + // qualified row key + for (KeySlot slot : childSlot.getSlots()) { + if (slot == null) { + continue; + } + /* + * If we see a different PK column than before, we can't optimize it because our + * SkipScanFilter only handles top level expressions that are ANDed together (where in the + * same column expressions may be ORed together). For example, WHERE a=1 OR b=2 cannot be + * handled, while WHERE (a=1 OR a=2) AND (b=2 OR b=3) can be handled. TODO: We could + * potentially handle these cases through multiple, nested SkipScanFilters, where each OR + * expression is handled by its own SkipScanFilter and the outer one increments the child + * ones and picks the one with the smallest key. + */ + if (thePosition == -1) { + theSlot = slot; + thePosition = slot.getPKPosition(); + } else if (thePosition != slot.getPKPosition()) { + return null; + } + slotExtractNodes.addAll(slot.getKeyPart().getExtractNodes()); + slotRanges.addAll(slot.getKeyRanges()); + } + } + + if (thePosition == -1) { + return null; + } + if (theSlot == null) { + theSlot = new KeySlot( + new BaseKeyPart(table, table.getPKColumns().get(initialPos), slotExtractNodes), + initialPos, 1, EVERYTHING_RANGES, null); + } + return newKeyParts(theSlot, + partialExtraction + ? slotExtractNodes + : new LinkedHashSet<>(Collections. singletonList(orExpression)), + slotRanges.isEmpty() ? EVERYTHING_RANGES : KeyRange.coalesce(slotRanges)); + } - return true; - } - } - } + private final PTable table; + private final StatementContext context; - /** - * Ands together an arbitrary set of compiled expressions (represented as a list of KeySlots) - * by intersecting each unique combination among the childSlots. - * @param andExpression expressions being anded together - * @param childSlots compiled form of child expressions being anded together. - * @return - */ - private KeySlots andKeySlots(AndExpression andExpression, List childSlots) { + public KeyExpressionVisitor(StatementContext context, PTable table) { + this.context = context; + this.table = table; + } - if(childSlots.isEmpty()) { - return null; - } - // Exit early if it's already been determined that one of the child slots cannot - // possibly be true. - boolean partialExtraction = andExpression.getChildren().size() != childSlots.size(); - - int nChildSlots = childSlots.size(); - for (int i = 0; i < nChildSlots; i++) { - KeySlots childSlot = childSlots.get(i); - if (childSlot == EMPTY_KEY_SLOTS) { - return EMPTY_KEY_SLOTS; - } - // If any child slots represent partially extracted expressions, then carry - // that forward. An example of a partially extracted expression would be a - // RVC of (K1, K2, NonK3) in which only leading PK columns are extracted - // from the RVC. - partialExtraction |= childSlot.isPartialExtraction(); - } - boolean mayExtractNodes = true; - ImmutableBytesWritable ptr = context.getTempPtr(); - RowKeySchema rowKeySchema = table.getRowKeySchema(); - int nPkColumns = table.getPKColumns().size(); - KeySlot[] keySlotArray = new KeySlot[nPkColumns]; - int initPkPos = (table.getBucketNum() ==null ? 0 : 1) + (this.context.getConnection().getTenantId() != null && table.isMultiTenant() ? 1 : 0) + (table.getViewIndexId() == null ? 0 : 1); - - List>> slotsTrailingRanges = Lists.newArrayListWithExpectedSize(nPkColumns); - // Process all columns being ANDed in position order to guarantee - // we have all information for leading PK columns before we attempt - // to intersect them. For example: - // (A, B, C) >= (1, 2, 3) AND (B, C) < (4, 5) AND A = 1 - // will processing slot 0 (i.e PK column A) across all children first, - // followed by slot 1 (PK column B), and finally slot 2 (C). This is - // done because we carry forward any constraints from preceding PK - // columns which may impact following PK columns. In the above example - // we'd carry forward that (B,C) >= (2,3) since we know that A is 1. - for (int pkPos = initPkPos; pkPos < nPkColumns; pkPos++) { - SlotsIterator iterator = new SlotsIterator(childSlots, pkPos); - OrderPreserving orderPreserving = null; - Set visitedKeyParts = Sets.newHashSet(); - Set extractNodes = new LinkedHashSet<>(); - List keyRanges = Lists.newArrayList(); - // This is the information carried forward as we process in PK order. - // It's parallel with the list of keyRanges. - List trailingRangesList = Lists.newArrayList(); - KeyRange result = null; - TrailingRangeIterator trailingRangeIterator = new TrailingRangeIterator(initPkPos, pkPos, slotsTrailingRanges); - // Iterate through all combinations (i.e. constraints) for the PK slot being processed. - // For example, with (A = 1 OR A = 2) AND (A,B) > (1,2) AND C = 3, we'd process the - // following two combinations: - // A=1,(A,B) > (1,2) - // A=2,(A,B) > (1,2) - // If we have no constraints for a PK, then we still must iterate through the information - // that may have been rolled up based on the processing of previous PK slots. For example, - // in the above ANDed expressions, we have no constraint on B, but we would end up with - // rolled up information based on the B part of the (A,B) constraint. - while (iterator.next() || (trailingRangeIterator.hasNext() && result != KeyRange.EMPTY_RANGE)) { - result = null; - KeyRange[] trailingRanges = newTrailingRange(); - for (int i = 0; i < nChildSlots && result != KeyRange.EMPTY_RANGE; i++) { - KeySlot slot = iterator.getSlot(i); - // Rollup the order preserving and concatenate the extracted expressions. - // Extracted expressions end up being removed from the AND expression at - // the top level call (pushKeyExpressionsToScan) with anything remaining - // ending up as a Filter (rather than contributing to the start/stop row - // of the scan. - if (slot != null) { - KeyRange otherRange = iterator.getRange(i); - KeyRange range = result; - if (slot.getOrderPreserving() != null) { - orderPreserving = slot.getOrderPreserving().combine(orderPreserving); - } - // Extract once per iteration, when there are large number - // of OR clauses (for e.g N > 100k). - // The extractNodes.addAll method can get called N times. - if (visitedKeyParts.add(slot.getKeyPart()) && slot.getKeyPart().getExtractNodes() != null) { - extractNodes.addAll(slot.getKeyPart().getExtractNodes()); - } - // Keep a running intersection of the ranges we see. Note that the - // ranges are derived from constants appearing on the RHS of a comparison - // expression. For example, the expression A > 5 would produce a keyRange - // of (5, *) for slot 0 (assuming A is the leading PK column) If the result - // ends up as an empty key, that combination is ruled out. This is essentially - // doing constant reduction. - result = intersectRanges(pkPos, range, otherRange, trailingRanges); - } - } + @Override + public Iterator visitEnter(CoerceExpression node) { + return node.getChildren().iterator(); + } - if (result != KeyRange.EMPTY_RANGE) { - Map> results = Maps.newHashMap(); - trailingRangeIterator.init(); - // Process all constraints that have been rolled up from previous - // processing of PK slots. This occurs for RVCs which span PK slots - // in which the leading part of the RVC is determined to be equal - // to a constant on the RHS. - while (trailingRangeIterator.hasNext()) { - // Loop through all combinations of values for all previously - // calculated slots. - do { - // Loop through all combinations of range constraints for the - // current combinations of values. If no valid combinations - // are found, we can rule out the result. We can also end up - // modifying the result if it has an intersection with the - // range constraints. - do { - KeyRange priorTrailingRange = trailingRangeIterator.getRange(); - if (priorTrailingRange != KeyRange.EVERYTHING_RANGE) { - KeyRange[] intTrailingRanges = Arrays.copyOf(trailingRanges, trailingRanges.length); - // Intersect the current result with each range constraint. We essentially - // rule out the result when we find a constraint that has no intersection - KeyRange intResult = intersectRanges(pkPos, result, priorTrailingRange, intTrailingRanges); - if (intResult != KeyRange.EMPTY_RANGE) { - addResult(intResult, intTrailingRanges, results); - } - } - } while (trailingRangeIterator.nextTrailingRange()); - } while (trailingRangeIterator.nextRange()); - } - if (results.isEmpty() && result != null) { // No trailing range constraints - keyRanges.add(result); - trailingRangesList.add(trailingRanges); - } else { - mayExtractNodes &= results.size() <= 1; - for (Map.Entry> entry : results.entrySet()) { - // Add same KeyRange with each KeyRange[] since the two lists are parallel - for (KeyRange[] trailingRange : entry.getValue()) { - keyRanges.add(entry.getKey()); - trailingRangesList.add(trailingRange); - } - } - } - } - } + @Override + public KeySlots visitLeave(CoerceExpression node, List childParts) { + if (childParts.isEmpty()) { + return null; + } + return newCoerceKeyPart(childParts.get(0).getSlots().get(0), node); + } - if (result == null && keyRanges.isEmpty()) { - slotsTrailingRanges.add(Collections.>emptyList()); - } else { - // If we encountered a result for this slot and - // there are no ranges, this is the degenerate case. - if (keyRanges.isEmpty()) { - return EMPTY_KEY_SLOTS; - } - // Similar to KeyRange.coalesce(), except we must combine together - // any rolled up constraints (as a list of KeyRanges) for a - // particular value (as they're coalesced together). We maintain - // these KeyRange constraints as a parallel list between keyRanges - // and trailingRangesList. - keyRanges = coalesceKeyRangesAndTrailingRanges(keyRanges, trailingRangesList, slotsTrailingRanges); - int maxSpan = 1; - for (KeyRange aRange : keyRanges) { - int span = rowKeySchema.computeMaxSpan(pkPos, aRange, context.getTempPtr()); - if (span > maxSpan) { - maxSpan = span; - } - } - keySlotArray[pkPos] = new KeySlot( - new BaseKeyPart(table, table.getPKColumns().get(pkPos), mayExtractNodes ? extractNodes : Collections.emptySet()), - pkPos, - maxSpan, - keyRanges, - orderPreserving); - } - } + @Override + public Iterator visitEnter(AndExpression node) { + return node.getChildren().iterator(); + } - // Filters trailing part of RVC based on ranges from PK columns after the one we're - // currently processing that may overlap with this range. For example, with a PK - // columns A,B,C and a range of A from [(1,2,3) - (4,5,6)] and B from (6-*), we - // can filter the trailing part of the RVC for A, because the trailing part of - // the RVC (2,3)-(5,6) does not intersect with (6-*). By removing the trailing - // part of the RVC, we end up with a range of A from [1-4] and B from (6-*) which - // enables us to use a skip scan. - for (int i = 0; i < keySlotArray.length; i++) { - KeySlot keySlot = keySlotArray[i]; - if (keySlot == null) continue; - int pkSpan = keySlot.getPKSpan(); - int pkPos = keySlot.getPKPosition(); - boolean slotWasIntersected = false; - List keyRanges = keySlot.getKeyRanges(); - List slotTrimmedResults = Lists.newArrayListWithExpectedSize(keyRanges.size()); - for (KeyRange result : keyRanges) { - boolean resultWasIntersected = false; - Set trimmedResults = Sets.newHashSetWithExpectedSize(keyRanges.size()); - for (int trailingPkPos = pkPos+1; trailingPkPos < pkPos+pkSpan && trailingPkPos < nPkColumns; trailingPkPos++) { - KeySlot nextKeySlot = keySlotArray[trailingPkPos]; - if (nextKeySlot == null) continue; - for (KeyRange trailingRange : nextKeySlot.getKeyRanges()) { - resultWasIntersected = true; - KeyRange intResult = intersectTrailing(result, pkPos, trailingRange, trailingPkPos); - if (intResult != KeyRange.EMPTY_RANGE) { - trimmedResults.add(intResult); - } - } - } - if (resultWasIntersected) { - slotWasIntersected = true; - slotTrimmedResults.addAll(trimmedResults); - mayExtractNodes &= trimmedResults.size() <= 1; - } else { - slotTrimmedResults.add(result); - } - } - if (slotTrimmedResults.isEmpty()) { - return EMPTY_KEY_SLOTS; - } - if (slotWasIntersected) { - // Re-coalesce the ranges and recalc the max span since the ranges may have changed - slotTrimmedResults = KeyRange.coalesce(slotTrimmedResults); - pkSpan = 1; - for (KeyRange trimmedResult : slotTrimmedResults) { - pkSpan = Math.max(pkSpan, rowKeySchema.computeMaxSpan(pkPos, trimmedResult, ptr)); - } - } - - Set extractNodes = mayExtractNodes ? - keySlotArray[pkPos].getKeyPart().getExtractNodes() : new LinkedHashSet<>(); - keySlotArray[pkPos] = new KeySlot( - new BaseKeyPart(table, table.getPKColumns().get(pkPos), extractNodes), - pkPos, - pkSpan, - slotTrimmedResults, - keySlotArray[pkPos].getOrderPreserving()); - } - List keySlots = Arrays.asList(keySlotArray); - // If we have a salt column, skip that slot because - // they'll never be an expression that uses it directly. - keySlots = keySlots.subList(initPkPos, keySlots.size()); - return new MultiKeySlot(keySlots, partialExtraction); - } - - private KeyRange[] newTrailingRange() { - KeyRange[] trailingRanges = new KeyRange[table.getPKColumns().size()]; - for (int i = 0; i < trailingRanges.length; i++) { - trailingRanges[i] = KeyRange.EVERYTHING_RANGE; - } - return trailingRanges; - } - - private static void addResult(KeyRange result, KeyRange[] trailingRange, Map> results) { - List trailingRanges = Lists.newArrayList(trailingRange); - List priorTrailingRanges = results.put(result, trailingRanges); - if (priorTrailingRanges != null) { - // This is tricky case. We may have multiple possible values based on the rolled up range - // constraints from previous slots. We track unique ranges and concatenate together the - // trailing range data. If there's more than one element in the set (i.e. more than one - // possible result), we'll end up have more combinations than there actually are because - // the constraint only apply for a single value, not for *all* combinations (which is a - // limitation of our representation derived from what can be handled by our SkipScanFilter). - // For example, if we we've gathered these ranges so far in a three PK table: (1,2), (A,B) - // and have X as a constraint for value A and Y as a constraint for value B, we have the - // following possible combinations: 1AX, 2AX, 1BY, 2BY. However, our SkipScanFilter only - // supports identifying combinations for *all* combinations of (1,2),(A,B),(X,Y) or - // AX, 1AY, 1BX, 1BY, 2AX, 2AY, 2BX, 2BY. See WhereOptimizerTest.testNotRepresentableBySkipScan() - // for an example. - trailingRanges.addAll(priorTrailingRanges); - } - } - - private List coalesceKeyRangesAndTrailingRanges(List keyRanges, - List trailingRangesList, List>> slotsTrailingRanges) { - List>> pairs = coalesce(keyRanges, trailingRangesList); - List> trailingRanges = Lists.newArrayListWithExpectedSize(pairs.size()); - ListcoalescedKeyRanges = Lists.newArrayListWithExpectedSize(pairs.size()); - for (Pair> pair : pairs) { - coalescedKeyRanges.add(pair.getFirst()); - trailingRanges.add(pair.getSecond()); - } - slotsTrailingRanges.add(trailingRanges); - return coalescedKeyRanges; - } - - public static final Comparator>> KEY_RANGE_PAIR_COMPARATOR = new Comparator>>() { - @Override public int compare(Pair> o1, Pair> o2) { - return KeyRange.COMPARATOR.compare(o1.getFirst(), o2.getFirst()); - } - }; - - private static boolean isEverythingRanges(KeyRange[] ranges) { - for (KeyRange range : ranges) { - if (range != KeyRange.EVERYTHING_RANGE) { - return false; - } - } - return true; - } - - private static List concat(List list1, List list2) { - if (list1.size() == 1 && isEverythingRanges(list1.get(0))) { - if (list2.size() == 1 && isEverythingRanges(list1.get(0))) { - return Collections.emptyList(); - } - return list2; - } - if (list2.size() == 1 && isEverythingRanges(list2.get(0))) { - return list1; - } - - List newList = Lists.newArrayListWithExpectedSize(list1.size()+list2.size()); - newList.addAll(list1); - newList.addAll(list2); - return newList; - } + @Override + public KeySlots visitLeave(AndExpression node, List l) { + KeySlots keyExpr = andKeySlots(node, l); + return keyExpr; + } - /** - * Similar to KeyRange.coelesce, but con - */ - @NonNull - public static List>> coalesce(List keyRanges, List trailingRangesList) { - List>> tmp = Lists.newArrayListWithExpectedSize(keyRanges.size()); - int nKeyRanges = keyRanges.size(); - for (int i = 0; i < nKeyRanges; i++) { - KeyRange keyRange = keyRanges.get(i); - KeyRange[] trailingRange = trailingRangesList.get(i); - Pair> pair = new Pair>(keyRange,Lists.newArrayList(trailingRange)); - tmp.add(pair); - } - Collections.sort(tmp, KEY_RANGE_PAIR_COMPARATOR); - List>> tmp2 = Lists.>>newArrayListWithExpectedSize(tmp.size()); - Pair> range = tmp.get(0); - for (int i=1; i> otherRange = tmp.get(i); - KeyRange intersect = range.getFirst().intersect(otherRange.getFirst()); - if (KeyRange.EMPTY_RANGE == intersect) { - tmp2.add(range); - range = otherRange; - } else { - KeyRange newRange = range.getFirst().union(otherRange.getFirst()); - range = new Pair>(newRange,concat(range.getSecond(),otherRange.getSecond())); - } - } - tmp2.add(range); - List>> tmp3 = Lists.>>newArrayListWithExpectedSize(tmp2.size()); - range = tmp2.get(0); - for (int i=1; i> otherRange = tmp2.get(i); - assert !range.getFirst().upperUnbound(); - assert !otherRange.getFirst().lowerUnbound(); - if (range.getFirst().isUpperInclusive() != otherRange.getFirst().isLowerInclusive() - && Bytes.equals(range.getFirst().getUpperRange(), otherRange.getFirst().getLowerRange())) { - KeyRange newRange = KeyRange.getKeyRange( - range.getFirst().getLowerRange(), range.getFirst().isLowerInclusive(), - otherRange.getFirst().getUpperRange(), otherRange.getFirst().isUpperInclusive()); - range = new Pair>(newRange,concat(range.getSecond(),otherRange.getSecond())); - } else { - tmp3.add(range); - range = otherRange; - } - } - tmp3.add(range); + @Override + public Iterator visitEnter(OrExpression node) { + return node.getChildren().iterator(); + } - return tmp3; - } + @Override + public KeySlots visitLeave(OrExpression node, List l) { + KeySlots keySlots = orKeySlots(node, l); + if (keySlots == null) { + return null; + } + return keySlots; + } - /** - * - * Iterates over all unique combinations of the List representing - * the constraints from previous slot positions. For example, if we have - * a RVC of (A,B) = (2,1), then if A=2, we know that B must be 1. - * - */ - static class TrailingRangeIterator { - private final List>> slotTrailingRangesList; - private final int[] rangePos; - private final int[] trailingRangePos; - private final int initPkPos; - private final int pkPos; - private int trailingRangePosIndex; - private int rangePosIndex; - private boolean hasMore = true; - - TrailingRangeIterator (int initPkPos, int pkPos, List>> slotsTrailingRangesList) { - this.slotTrailingRangesList = slotsTrailingRangesList; - int nSlots = pkPos - initPkPos; - rangePos = new int[nSlots]; - trailingRangePos = new int[nSlots]; - this.initPkPos = initPkPos; - this.pkPos = pkPos; - init(); - } + @Override + public Iterator visitEnter(RowValueConstructorExpression node) { + return node.getChildren().iterator(); + } - public void init() { - Arrays.fill(rangePos, 0); - Arrays.fill(trailingRangePos, 0); - rangePosIndex = rangePos.length - 1; - trailingRangePosIndex = trailingRangePos.length - 1; - this.hasMore = pkPos > initPkPos && skipEmpty(); - } + @Override + public KeySlots visitLeave(RowValueConstructorExpression node, List childSlots) { + return newRowValueConstructorKeyParts(node, childSlots); + } - public boolean hasNext() { - return hasMore && skipEmpty(); - } + @Override + public KeySlots visit(RowKeyColumnExpression node) { + PColumn column = table.getPKColumns().get(node.getPosition()); + return new SingleKeySlot( + new BaseKeyPart(table, column, + new LinkedHashSet<>(Collections. singletonList(node))), + node.getPosition(), 1, EVERYTHING_RANGES); + } - public KeyRange getRange() { - if (!hasMore) { - throw new NoSuchElementException(); - } - KeyRange priorTrailingRange = KeyRange.EVERYTHING_RANGE; - for (int priorPkPos = initPkPos; priorPkPos < pkPos; priorPkPos++) { - List>trailingKeyRangesList = slotTrailingRangesList.get(priorPkPos-initPkPos); - if (!trailingKeyRangesList.isEmpty()) { - List slotTrailingRanges = trailingKeyRangesList.get(rangePos[priorPkPos-initPkPos]); - if (!slotTrailingRanges.isEmpty()) { - KeyRange[] slotTrailingRange = slotTrailingRanges.get(trailingRangePos[priorPkPos-initPkPos]); - priorTrailingRange = priorTrailingRange.intersect(slotTrailingRange[pkPos]); - } - } - } + @Override + public Iterator visitEnter(ComparisonExpression node) { + Expression rhs = node.getChildren().get(1); + if (!rhs.isStateless() || node.getFilterOp() == CompareOperator.NOT_EQUAL) { + return Collections.emptyIterator(); + } + return Iterators.singletonIterator(node.getChildren().get(0)); + } - return priorTrailingRange; - } + @Override + public KeySlots visitLeave(ComparisonExpression node, List childParts) { + // Delay adding to extractedNodes, until we're done traversing, + // since we can't yet tell whether or not the PK column references + // are contiguous + if (childParts.isEmpty()) { + return null; + } + Expression rhs = node.getChildren().get(1); + KeySlots childSlots = childParts.get(0); + KeySlot childSlot = childSlots.getSlots().get(0); + KeyPart childPart = childSlot.getKeyPart(); + // SortOrder sortOrder = childPart.getColumn().getSortOrder(); + CompareOperator op = node.getFilterOp(); + // CompareOperator op = sortOrder.transform(node.getFilterOp()); + KeyRange keyRange = childPart.getKeyRange(op, rhs); + return newKeyParts(childSlot, node, keyRange); + } - private boolean skipEmptyTrailingRanges() { - while (trailingRangePosIndex >= 0 && - (slotTrailingRangesList.get(trailingRangePosIndex).isEmpty() - || slotTrailingRangesList.get(trailingRangePosIndex).get(rangePos[trailingRangePosIndex]).isEmpty())) { - trailingRangePosIndex--; - } - if (trailingRangePosIndex >= 0) { - return true; - } - return false; - } + // TODO: consider supporting expression substitution in the PK for pre-joined tables + // You'd need to register the expression for a given PK and substitute with a column + // reference for this during ExpressionBuilder. + @Override + public Iterator visitEnter(ScalarFunction node) { + int index = node.getKeyFormationTraversalIndex(); + if (index < 0) { + return Collections.emptyIterator(); + } + return Iterators.singletonIterator(node.getChildren().get(index)); + } - private boolean skipEmptyRanges() { - trailingRangePosIndex = trailingRangePos.length - 1; - while (rangePosIndex >= 0 && - (slotTrailingRangesList.get(rangePosIndex).isEmpty())) { - rangePosIndex--; - } - return rangePosIndex >= 0; - } + @Override + public KeySlots visitLeave(ScalarFunction node, List childParts) { + if (childParts.isEmpty()) { + return null; + } + return newScalarFunctionKeyPart(childParts.get(0).getSlots().get(0), node); + } - private boolean skipEmpty() { - if (!hasMore || slotTrailingRangesList.isEmpty() || rangePosIndex < 0) { - return hasMore=false; - } - do { - if (skipEmptyTrailingRanges()) { - return true; - } - } while (skipEmptyRanges()); - return hasMore = rangePosIndex >= 0; - } + @Override + public Iterator visitEnter(LikeExpression node) { + // TODO: can we optimize something that starts with '_' like this: foo LIKE '_a%' ? + if (node.getLikeType() == LikeType.CASE_INSENSITIVE || // TODO: remove this when we optimize + // ILIKE + !(node.getChildren().get(1) instanceof LiteralExpression) || node.startsWithWildcard() + ) { + return Collections.emptyIterator(); + } + + return Iterators.singletonIterator(node.getChildren().get(0)); + } - public boolean nextRange() { - trailingRangePosIndex = trailingRangePos.length - 1; - while (rangePosIndex >= 0 && - (slotTrailingRangesList.get(rangePosIndex).isEmpty() - || (rangePos[rangePosIndex] = (rangePos[rangePosIndex] + 1) - % slotTrailingRangesList.get(rangePosIndex).size()) == 0)) { - rangePosIndex--; - } - return rangePosIndex >= 0; - } + @Override + public KeySlots visitLeave(LikeExpression node, List childParts) { + // TODO: optimize ILIKE by creating two ranges for the literal prefix: one with lower case, + // one with upper case + if (childParts.isEmpty()) { + return null; + } + // for SUBSTR(,1,3) LIKE 'foo%' + KeySlots childSlots = childParts.get(0); + KeySlot childSlot = childSlots.getSlots().get(0); + final String startsWith = node.getLiteralPrefix(); + // TODO: is there a case where we'd need to go through the childPart to calculate the key + // range? + PColumn column = childSlot.getKeyPart().getColumn(); + PDataType type = column.getDataType(); + byte[] key = PVarchar.INSTANCE.toBytes(startsWith, SortOrder.ASC); + // If the expression is an equality expression against a fixed length column + // and the key length doesn't match the column length, the expression can + // never be true. + // An zero length byte literal is null which can never be compared against as true + Expression firstChild = node.getChildren().get(0); + Integer childNodeFixedLength = + firstChild.getDataType().isFixedWidth() ? firstChild.getMaxLength() : null; + if (childNodeFixedLength != null && key.length > childNodeFixedLength) { + return EMPTY_KEY_SLOTS; + } + byte[] lowerRange = key; + byte[] upperRange = ByteUtil.nextKey(key); + Integer columnFixedLength = column.getMaxLength(); + if (type.isFixedWidth()) { + if (columnFixedLength != null) { // Sanity check - should always be non null + // Always use minimum byte to fill as otherwise our key is bigger + // that it should be when the sort order is descending. + lowerRange = type.pad(lowerRange, columnFixedLength, SortOrder.ASC); + upperRange = type.pad(upperRange, columnFixedLength, SortOrder.ASC); + } + } + KeyRange range = type.getKeyRange(lowerRange, true, upperRange, false, SortOrder.ASC); + // Only extract LIKE expression if pattern ends with a wildcard and everything else was + // extracted + return newKeyParts(childSlot, node.endsWithOnlyWildcard() ? node : null, range); + } - public boolean nextTrailingRange() { - while (trailingRangePosIndex >= 0 && - (slotTrailingRangesList.get(trailingRangePosIndex).isEmpty() - || slotTrailingRangesList.get(trailingRangePosIndex).get(rangePos[trailingRangePosIndex]).isEmpty() - || (trailingRangePos[trailingRangePosIndex] = (trailingRangePos[trailingRangePosIndex] + 1) - % slotTrailingRangesList.get(trailingRangePosIndex).get(rangePos[trailingRangePosIndex]).size()) == 0)) { - trailingRangePosIndex--; - } - if (trailingRangePosIndex >= 0) { - return true; - } - return false; - } - } + @Override + public Iterator visitEnter(InListExpression node) { + return Iterators.singletonIterator(node.getChildren().get(0)); + } - private KeyRange intersectRanges(int pkPos, KeyRange range, KeyRange otherRange, KeyRange[] trailingRanges) { - // We need to initialize result to the other range rather than - // initializing it to EVERYTHING_RANGE to handle the IS NULL case. - // Otherwise EVERYTHING_RANGE intersected below with NULL_RANGE - // becomes an EMPTY_RANGE. - if (range == null) { - range = otherRange; - } - KeyRange result = range; - ImmutableBytesWritable ptr = context.getTempPtr(); - RowKeySchema rowKeySchema = table.getRowKeySchema(); - int minSpan = rowKeySchema.computeMinSpan(pkPos, result, ptr); - int otherMinSpan = rowKeySchema.computeMinSpan(pkPos, otherRange, ptr); - KeyRange otherClippedRange = otherRange; - KeyRange clippedRange = result; - if (minSpan != otherMinSpan && result != KeyRange.EVERYTHING_RANGE && otherRange != KeyRange.EVERYTHING_RANGE) { - if (otherMinSpan > minSpan) { - otherClippedRange = rowKeySchema.clipLeft(pkPos, otherRange, minSpan, ptr); - } else if (minSpan > otherMinSpan) { - clippedRange = rowKeySchema.clipLeft(pkPos, result, otherMinSpan, ptr); - } - } + @Override + public KeySlots visitLeave(InListExpression node, List childParts) { + if (childParts.isEmpty()) { + return null; + } + + List keyExpressions = node.getKeyExpressions(); + Set ranges = Sets.newHashSetWithExpectedSize(keyExpressions.size()); + KeySlot childSlot = childParts.get(0).getSlots().get(0); + KeyPart childPart = childSlot.getKeyPart(); + // Handles cases like WHERE substr(foo,1,3) IN ('aaa','bbb') + for (Expression key : keyExpressions) { + KeyRange range = childPart.getKeyRange(CompareOperator.EQUAL, key); + if (range == null) { + return null; + } + if (range != KeyRange.EMPTY_RANGE) { // null means it can't possibly be in range + ranges.add(range); + } + } + return newKeyParts(childSlot, node, new ArrayList(ranges)); + } - // intersect result with otherRange - result = clippedRange.intersect(otherClippedRange); - if (result == KeyRange.EMPTY_RANGE) { - return result; - } - if (minSpan != otherMinSpan) { - // If trailing ranges are of different spans, intersect them at the common - // span and add remaining part of range used to trailing ranges - // Without the special case for single key values, the trailing ranges - // code doesn't work correctly for WhereOptimizerTest.testMultiSlotTrailingIntersect() - if (result.isSingleKey() && !(range.isSingleKey() && otherRange.isSingleKey())) { - int trailingPkPos = pkPos + Math.min(minSpan, otherMinSpan); - KeyRange trailingRange = getTrailingRange(rowKeySchema, pkPos, minSpan > otherMinSpan ? range : otherRange, result, ptr); - trailingRanges[trailingPkPos] = trailingRanges[trailingPkPos].intersect(trailingRange); - } else { - // Add back clipped part of range - if (otherMinSpan > minSpan) { - result = concatSuffix(result, otherRange); - } else if (minSpan > otherMinSpan) { - result = concatSuffix(result, range); - } - } - } - return result; - } + @Override + public Iterator visitEnter(IsNullExpression node) { + return Iterators.singletonIterator(node.getChildren().get(0)); + } - private static KeyRange concatSuffix(KeyRange result, KeyRange otherRange) { - byte[] lowerRange = result.getLowerRange(); - byte[] clippedLowerRange = lowerRange; - byte[] fullLowerRange = otherRange.getLowerRange(); - if (!result.lowerUnbound() && Bytes.startsWith(fullLowerRange, clippedLowerRange)) { - lowerRange = fullLowerRange; - } - byte[] upperRange = result.getUpperRange(); - byte[] clippedUpperRange = upperRange; - byte[] fullUpperRange = otherRange.getUpperRange(); - if (!result.lowerUnbound() && Bytes.startsWith(fullUpperRange, clippedUpperRange)) { - upperRange = fullUpperRange; - } - if (lowerRange == clippedLowerRange && upperRange == clippedUpperRange) { - return result; - } - return KeyRange.getKeyRange(lowerRange, result.isLowerInclusive(), upperRange, result.isUpperInclusive()); - } - - /** - * Intersects an RVC that starts at pkPos with an overlapping range that starts at otherPKPos. - * For example, ((A, B) - (J, K)) intersected with (F - *) would return ((A,F) - (J, K)) - * ((A, B) - (J, K)) intersected with (M - P) would return (A-J) since both of the trailing - * part of the RVC, B and K, do not intersect with B and K. - * @param result an RVC expression starting from pkPos and with length of at least otherPKPos - pkPos. - * @param pkPos the PK position of the leading part of the RVC expression - * @param otherRange the other range to intersect with the overlapping part of the RVC. - * @param otherPKPos the PK position of the leading part of the other range - * @return resulting KeyRange from the intersection, potentially an empty range if the result RVC - * is a single key and the trailing part of the key does not intersect with the RVC. - */ - private KeyRange intersectTrailing(KeyRange result, int pkPos, KeyRange otherRange, int otherPKPos) { - RowKeySchema rowKeySchema = table.getRowKeySchema(); - ImmutableBytesWritable ptr = context.getTempPtr(); - int separatorLength = table.getPKColumns().get(otherPKPos-1).getDataType().isFixedWidth() ? 0 : 1; - boolean lowerInclusive = result.isLowerInclusive(); - byte[] lowerRange = result.getLowerRange(); - ptr.set(lowerRange); - // Position ptr at the point at which the two ranges overlap - if (rowKeySchema.position(ptr, pkPos, otherPKPos)) { - int lowerOffset = ptr.getOffset(); - // Increase the length of the ptr to include the entire trailing bytes - ptr.set(ptr.get(), lowerOffset, lowerRange.length - lowerOffset); - byte[] trailingBytes = ptr.copyBytes(); - - // Special case for single key since single keys of different span lengths - // will never overlap. We do not need to process both the lower and upper - // ranges since they are the same. - if (result.isSingleKey() && otherRange.isSingleKey()) { - int minSpan = rowKeySchema.computeMinSpan(pkPos, result, ptr); - int otherMinSpan = - rowKeySchema.computeMinSpan(otherPKPos, otherRange, ptr); - byte[] otherLowerRange; - boolean isFixedWidthAtEnd; - if (pkPos + minSpan <= otherPKPos + otherMinSpan) { - otherLowerRange = otherRange.getLowerRange(); - isFixedWidthAtEnd = table.getPKColumns().get(pkPos + minSpan -1).getDataType().isFixedWidth(); - } else { - otherLowerRange = trailingBytes; - trailingBytes = otherRange.getLowerRange(); - isFixedWidthAtEnd = table.getPKColumns().get(otherPKPos + otherMinSpan -1).getDataType().isFixedWidth(); - } - // If the otherRange starts with the overlapping trailing byte *and* we're comparing - // the entire key (i.e. not just a leading subset), then we have an intersection. - if (Bytes.startsWith(otherLowerRange, trailingBytes) && - (isFixedWidthAtEnd || - otherLowerRange.length == trailingBytes.length || - otherLowerRange[trailingBytes.length] == QueryConstants.SEPARATOR_BYTE)) { - return result; - } - // Otherwise, there is no overlap - return KeyRange.EMPTY_RANGE; - } - // If we're not dealing with single keys, then we can use our normal intersection - // however, if we truncate a span then we need to change exclusive to inclusive - if (otherRange.intersect(KeyRange.getKeyRange(trailingBytes)) == KeyRange.EMPTY_RANGE) { - // Exit early since the upper range is the same as the lower range - if (result.isSingleKey()) { - return KeyRange.EMPTY_RANGE; - } - ptr.set(result.getLowerRange(), 0, lowerOffset - separatorLength); - lowerRange = ptr.copyBytes(); - if(pkPos < otherPKPos && !lowerInclusive) { - lowerInclusive = true; - } - } - } - boolean upperInclusive = result.isUpperInclusive(); - byte[] upperRange = result.getUpperRange(); - ptr.set(upperRange); - if (rowKeySchema.position(ptr, pkPos, otherPKPos)) { - int upperOffset = ptr.getOffset(); - ptr.set(ptr.get(), upperOffset, upperRange.length - upperOffset); - if (otherRange.intersect(KeyRange.getKeyRange(ptr.copyBytes())) == KeyRange.EMPTY_RANGE) { - ptr.set(ptr.get(), 0, upperOffset - separatorLength); - upperRange = ptr.copyBytes(); - } - } - if (lowerRange == result.getLowerRange() && upperRange == result.getUpperRange()) { - return result; - } - KeyRange range = KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); - return range; - } - - private KeySlots orKeySlots(OrExpression orExpression, List childSlots) { - // If any children were filtered out, filter out the entire - // OR expression because we don't have enough information to - // constraint the scan start/stop key. An example would be: - // WHERE organization_id=? OR key_value_column = 'x' - // In this case, we cannot simply filter the key_value_column, - // because we end up bubbling up only the organization_id=? - // expression to form the start/stop key which is obviously wrong. - // For an OR expression, you need to be able to extract - // everything or nothing. - if (orExpression.getChildren().size() != childSlots.size()) { - return null; - } - int initialPos = (table.getBucketNum() ==null ? 0 : 1) + (this.context.getConnection().getTenantId() != null && table.isMultiTenant() ? 1 : 0) + (table.getViewIndexId() == null ? 0 : 1); - KeySlot theSlot = null; - Set slotExtractNodes = new LinkedHashSet<>(); - int thePosition = -1; - boolean partialExtraction = false; - // TODO: Have separate list for single span versus multi span - // For multi-span, we only need to keep a single range. - List slotRanges = Lists.newArrayList(); - for (KeySlots childSlot : childSlots) { - if (childSlot == EMPTY_KEY_SLOTS) { - // TODO: can this ever happen and can we safely filter the expression tree? - continue; - } - // When we OR together expressions, we can only extract the entire OR expression - // if all sub-expressions have been completely extracted. Otherwise, we must - // leave the OR as a post filter. - partialExtraction |= childSlot.isPartialExtraction(); - // TODO: Do the same optimization that we do for IN if the childSlots specify a fully qualified row key - for (KeySlot slot : childSlot.getSlots()) { - if (slot == null) { - continue; - } - /* - * If we see a different PK column than before, we can't - * optimize it because our SkipScanFilter only handles - * top level expressions that are ANDed together (where in - * the same column expressions may be ORed together). - * For example, WHERE a=1 OR b=2 cannot be handled, while - * WHERE (a=1 OR a=2) AND (b=2 OR b=3) can be handled. - * TODO: We could potentially handle these cases through - * multiple, nested SkipScanFilters, where each OR expression - * is handled by its own SkipScanFilter and the outer one - * increments the child ones and picks the one with the smallest - * key. - */ - if (thePosition == -1) { - theSlot = slot; - thePosition = slot.getPKPosition(); - } else if (thePosition != slot.getPKPosition()) { - return null; - } - slotExtractNodes.addAll(slot.getKeyPart().getExtractNodes()); - slotRanges.addAll(slot.getKeyRanges()); - } - } + @Override + public KeySlots visitLeave(IsNullExpression node, List childParts) { + if (childParts.isEmpty()) { + return null; + } + KeySlots childSlots = childParts.get(0); + KeySlot childSlot = childSlots.getSlots().get(0); + PColumn column = childSlot.getKeyPart().getColumn(); + PDataType type = column.getDataType(); + boolean isFixedWidth = type.isFixedWidth(); + // Nothing changes for IS NULL and IS NOT NULL when DESC since + // we represent NULL the same way for ASC and DESC + if (isFixedWidth) { // if column can't be null + return node.isNegate() + ? null + : newKeyParts(childSlot, node, + type.getKeyRange(new byte[SchemaUtil.getFixedByteSize(column)], true, KeyRange.UNBOUND, + true, SortOrder.ASC)); + } else { + KeyRange keyRange = node.isNegate() ? KeyRange.IS_NOT_NULL_RANGE : KeyRange.IS_NULL_RANGE; + return newKeyParts(childSlot, node, keyRange); + } + } - if (thePosition == -1) { - return null; - } - if (theSlot == null) { - theSlot = new KeySlot(new BaseKeyPart(table, table.getPKColumns().get(initialPos), slotExtractNodes), initialPos, 1, EVERYTHING_RANGES, null); - } - return newKeyParts( - theSlot, - partialExtraction ? slotExtractNodes : new LinkedHashSet<>(Collections.singletonList(orExpression)), - slotRanges.isEmpty() ? EVERYTHING_RANGES : KeyRange.coalesce(slotRanges)); - } + /** + * Top level data structure used to drive the formation of the start/stop row of scans, + * essentially taking the expression tree of a WHERE clause and producing the ScanRanges + * instance during query compilation. + */ + public static interface KeySlots { + + /** + * List of slots that store binding of constant values for primary key columns. For example: + * WHERE pk1 = 'foo' and pk2 = 'bar' would produce two KeySlot instances that store that pk1 = + * 'foo' and pk2 = 'bar'. + */ + public List getSlots(); + + /** + * Tracks whether or not the contained KeySlot(s) contain a slot that includes only a partial + * extraction of the involved expressions. For example: (A AND B) in the case of A being a PK + * column and B being a KV column, the KeySlots representing the AND would return true for + * isPartialExtraction. + * @return true if a partial expression extraction was done and false otherwise. + */ + public boolean isPartialExtraction(); + } - private final PTable table; - private final StatementContext context; + /** + * Used during query compilation to represent the constant value of a primary key column based + * on expressions in the WHERE clause. These are combined together during the compilation of + * ANDs and ORs to to produce the start and stop scan range. + */ + public static final class KeySlot { + private final int pkPosition; // Position in primary key + private final int pkSpan; // Will be > 1 for RVC + private final KeyPart keyPart; // Used to produce the KeyRanges below + // Multiple ranges means values that have been ORed together + private final List keyRanges; + // If order rows returned from scan will match desired order declared in query + private final OrderPreserving orderPreserving; + + KeySlot(KeyPart keyPart, int pkPosition, int pkSpan, List keyRanges, + OrderPreserving orderPreserving) { + this.pkPosition = pkPosition; + this.pkSpan = pkSpan; + this.keyPart = keyPart; + this.keyRanges = keyRanges; + this.orderPreserving = orderPreserving; + } + + public KeyPart getKeyPart() { + return keyPart; + } + + public int getPKPosition() { + return pkPosition; + } + + public int getPKSpan() { + return pkSpan; + } + + public List getKeyRanges() { + return keyRanges; + } + + public final KeySlot concatExtractNodes(Set extractNodes) { + return new KeySlot( + new BaseKeyPart(this.getKeyPart().getTable(), this.getKeyPart().getColumn(), + SchemaUtil.concat(this.getKeyPart().getExtractNodes(), extractNodes)), + this.getPKPosition(), this.getPKSpan(), this.getKeyRanges(), this.getOrderPreserving()); + } + + public OrderPreserving getOrderPreserving() { + return orderPreserving; + } + } - public KeyExpressionVisitor(StatementContext context, PTable table) { - this.context = context; - this.table = table; - } + /** + * Implementation of KeySlots for AND and OR expressions. The {@code List } will be in + * PK order. + */ + public static class MultiKeySlot implements KeySlots { + private final List childSlots; + private final boolean partialExtraction; + + private MultiKeySlot(List childSlots, boolean partialExtraction) { + this.childSlots = childSlots; + this.partialExtraction = partialExtraction; + } + + @Override + public List getSlots() { + return childSlots; + } + + @Override + public boolean isPartialExtraction() { + return partialExtraction; + } + } - @Override - public Iterator visitEnter(CoerceExpression node) { - return node.getChildren().iterator(); - } + /** + * Implementation of KeySlots for a constant value, + */ + public static class SingleKeySlot implements KeySlots { + private final List slots; + + SingleKeySlot(KeyPart part, int pkPosition, List ranges) { + this(part, pkPosition, 1, ranges); + } + + private SingleKeySlot(KeyPart part, int pkPosition, List ranges, + OrderPreserving orderPreserving) { + this(part, pkPosition, 1, ranges, orderPreserving); + } + + private SingleKeySlot(KeyPart part, int pkPosition, int pkSpan, List ranges) { + this(part, pkPosition, pkSpan, ranges, null); + } + + private SingleKeySlot(KeyPart part, int pkPosition, int pkSpan, List ranges, + OrderPreserving orderPreserving) { + this.slots = + Collections.singletonList(new KeySlot(part, pkPosition, pkSpan, ranges, orderPreserving)); + } + + @Override + public List getSlots() { + return slots; + } + + @Override + public boolean isPartialExtraction() { + return this.slots.get(0).getKeyPart().getExtractNodes().isEmpty(); + } - @Override - public KeySlots visitLeave(CoerceExpression node, List childParts) { - if (childParts.isEmpty()) { - return null; - } - return newCoerceKeyPart(childParts.get(0).getSlots().get(0), node); - } + } - @Override - public Iterator visitEnter(AndExpression node) { - return node.getChildren().iterator(); - } + public static class BaseKeyPart implements KeyPart { + @Override + public KeyRange getKeyRange(CompareOperator op, Expression rhs) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + rhs.evaluate(null, ptr); + // If the column is fixed width, fill is up to it's byte size + PDataType type = getColumn().getDataType(); + if (type.isFixedWidth()) { + Integer length = getColumn().getMaxLength(); + if (length != null) { + // Go through type to pad as the fill character depends on the type. + type.pad(ptr, length, SortOrder.ASC); + } + } + byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr); + KeyRange range = ByteUtil.getKeyRange(key, rhs.getSortOrder(), op, type); + // Constants will have been inverted, so we invert them back here so that + // RVC comparisons work correctly (see PHOENIX-3383). + if (rhs.getSortOrder() == SortOrder.DESC) { + range = range.invert(); + } + return range; + } + + private final PTable table; + private final PColumn column; + private final Set nodes; + + private BaseKeyPart(PTable table, PColumn column, Set nodes) { + this.table = table; + this.column = column; + this.nodes = nodes; + } + + @Override + public Set getExtractNodes() { + return nodes; + } + + @Override + public PColumn getColumn() { + return column; + } + + @Override + public PTable getTable() { + return table; + } + } - @Override - public KeySlots visitLeave(AndExpression node, List l) { - KeySlots keyExpr = andKeySlots(node, l); - return keyExpr; - } + private static class CoerceKeySlot implements KeyPart { + + private final KeyPart childPart; + private final ImmutableBytesWritable ptr; + private final CoerceExpression node; + private final Set extractNodes; + + public CoerceKeySlot(KeyPart childPart, ImmutableBytesWritable ptr, CoerceExpression node, + Set extractNodes) { + this.childPart = childPart; + this.ptr = ptr; + this.node = node; + this.extractNodes = extractNodes; + } + + @Override + public KeyRange getKeyRange(CompareOperator op, Expression rhs) { + KeyRange range = childPart.getKeyRange(op, rhs); + byte[] lower = range.getLowerRange(); + if (!range.lowerUnbound()) { + ptr.set(lower); + /*** + * Do the reverse translation so we can optimize out the coerce expression For the actual + * type of the coerceBytes call, we use the node type instead of the rhs type, because for + * IN, the rhs type will be VARBINARY and no coerce will be done in that case (and we need + * it to be done). + */ + node.getChild().getDataType().coerceBytes(ptr, node.getDataType(), rhs.getSortOrder(), + SortOrder.ASC); + lower = ByteUtil.copyKeyBytesIfNecessary(ptr); + } + byte[] upper = range.getUpperRange(); + if (!range.upperUnbound()) { + ptr.set(upper); + // Do the reverse translation so we can optimize out the coerce expression + node.getChild().getDataType().coerceBytes(ptr, node.getDataType(), rhs.getSortOrder(), + SortOrder.ASC); + upper = ByteUtil.copyKeyBytesIfNecessary(ptr); + } + range = + KeyRange.getKeyRange(lower, range.isLowerInclusive(), upper, range.isUpperInclusive()); + return range; + } + + @Override + public Set getExtractNodes() { + return extractNodes; + } + + @Override + public PColumn getColumn() { + return childPart.getColumn(); + } + + @Override + public PTable getTable() { + return childPart.getTable(); + } + } - @Override - public Iterator visitEnter(OrExpression node) { - return node.getChildren().iterator(); - } + private class RowValueConstructorKeyPart implements KeyPart { + private final RowValueConstructorExpression rvc; + private final PColumn column; + private final Set nodes; + private final List childSlots; + + private RowValueConstructorKeyPart(PColumn column, RowValueConstructorExpression rvc, + int span, List childSlots) { + this.column = column; + if (span == rvc.getChildren().size()) { + this.rvc = rvc; + this.nodes = new LinkedHashSet<>(Collections.singletonList(rvc)); + this.childSlots = childSlots; + } else { + this.rvc = new RowValueConstructorExpression(rvc.getChildren().subList(0, span), + rvc.isStateless()); + this.nodes = new LinkedHashSet<>(); + this.childSlots = childSlots.subList(0, span); + } + } + + @Override + public Set getExtractNodes() { + return nodes; + } + + @Override + public PColumn getColumn() { + return column; + } + + @Override + public PTable getTable() { + return table; + } + + @Override + public KeyRange getKeyRange(CompareOperator op, Expression rhs) { + // With row value constructors, we need to convert the operator for any transformation we do + // on individual values + // to prevent keys from being increased to the next key as would be done for fixed width + // values. The next key is + // done to compensate for the start key (lower range) always being inclusive (thus we + // convert > to >=) and the + // end key (upper range) always being exclusive (thus we convert <= to <). + boolean usedAllOfLHS = !nodes.isEmpty(); + final CompareOperator rvcElementOp = + op == CompareOperator.LESS_OR_EQUAL ? CompareOperator.LESS + : op == CompareOperator.GREATER ? CompareOperator.GREATER_OR_EQUAL + : op; + if (op != CompareOperator.EQUAL) { + // We need to transform the comparison operator for a LHS row value constructor + // that is shorter than a RHS row value constructor when we're extracting it. + // For example: a < (1,2) is true if a = 1, so we need to switch + // the compare op to <= like this: a <= 1. Since we strip trailing nulls + // in the rvc, we don't need to worry about the a < (1,null) case. + if (usedAllOfLHS) { + if (rvc.getChildren().size() < rhs.getChildren().size()) { + if (op == CompareOperator.LESS) { + op = CompareOperator.LESS_OR_EQUAL; + } else if (op == CompareOperator.GREATER_OR_EQUAL) { + op = CompareOperator.GREATER; + } + } + } else { + // If we're not using all of the LHS, we need to expand the range on either + // side to take into account the rest of the LHS. For example: + // WHERE (pk1, pk3) > ('a',1) AND pk1 = 'a'. In this case, we'll end up + // only using (pk1) and ('a'), so if we use a > operator the expression + // would end up as degenerate since we'd have a non inclusive range for + // ('a'). By switching the operator to extend the range, we end up with + // an ('a') inclusive range which is correct. + if (rvc.getChildren().size() < rhs.getChildren().size()) { + if (op == CompareOperator.LESS) { + op = CompareOperator.LESS_OR_EQUAL; + } else if (op == CompareOperator.GREATER) { + op = CompareOperator.GREATER_OR_EQUAL; + } + } + } + } + if (!usedAllOfLHS || rvc.getChildren().size() != rhs.getChildren().size()) { + // We know that rhs was converted to a row value constructor and that it's a constant + rhs = new RowValueConstructorExpression(rhs.getChildren().subList(0, + Math.min(rvc.getChildren().size(), rhs.getChildren().size())), rhs.isStateless()); + } + /* + * Recursively transform the RHS row value constructor by applying the same logic as is done + * elsewhere during WHERE optimization: optimizing out LHS functions by applying the + * appropriate transformation to the RHS key. + */ + // Child slot iterator parallel with child expressions of the LHS row value constructor + final Iterator keySlotsIterator = childSlots.iterator(); + try { + // Call our static row value expression constructor with the current LHS row value + // constructor and + // the current RHS (which has already been coerced to match the LHS expression). We pass + // through an + // implementation of ExpressionComparabilityWrapper that transforms the RHS key to match + // the row key + // structure of the LHS column. This is essentially optimizing out the expressions on the + // LHS by + // applying the appropriate transformations to the RHS (through the KeyPart#getKeyRange + // method). + // For example, with WHERE (invert(a),b) < ('abc',5), the 'abc' would be inverted by going + // through the + // childPart.getKeyRange defined for the invert function. + rhs = BaseExpression.coerce(rvc, rhs, new ExpressionComparabilityWrapper() { - @Override - public KeySlots visitLeave(OrExpression node, List l) { - KeySlots keySlots = orKeySlots(node, l); - if (keySlots == null) { - return null; + @Override + public Expression wrap(final Expression lhs, final Expression rhs, + boolean rowKeyOrderOptimizable) throws SQLException { + final KeyPart childPart = keySlotsIterator.next().getSlots().get(0).getKeyPart(); + // TODO: DelegateExpression + return new BaseTerminalExpressionWrap(childPart, rhs, rvcElementOp, lhs); } - return keySlots; - } - @Override - public Iterator visitEnter(RowValueConstructorExpression node) { - return node.getChildren().iterator(); + }, table.rowKeyOrderOptimizable()); + } catch (SQLException e) { + return null; // Shouldn't happen } - - @Override - public KeySlots visitLeave(RowValueConstructorExpression node, List childSlots) { - return newRowValueConstructorKeyParts(node, childSlots); - } - - @Override - public KeySlots visit(RowKeyColumnExpression node) { - PColumn column = table.getPKColumns().get(node.getPosition()); - return new SingleKeySlot(new BaseKeyPart(table, column, new LinkedHashSet<>(Collections.singletonList(node))), node.getPosition(), 1, EVERYTHING_RANGES); - } - - @Override - public Iterator visitEnter(ComparisonExpression node) { - Expression rhs = node.getChildren().get(1); - if (!rhs.isStateless() || node.getFilterOp() == CompareOperator.NOT_EQUAL) { - return Collections.emptyIterator(); - } - return Iterators.singletonIterator(node.getChildren().get(0)); - } - - @Override - public KeySlots visitLeave(ComparisonExpression node, List childParts) { - // Delay adding to extractedNodes, until we're done traversing, - // since we can't yet tell whether or not the PK column references - // are contiguous - if (childParts.isEmpty()) { - return null; - } - Expression rhs = node.getChildren().get(1); - KeySlots childSlots = childParts.get(0); - KeySlot childSlot = childSlots.getSlots().get(0); - KeyPart childPart = childSlot.getKeyPart(); - //SortOrder sortOrder = childPart.getColumn().getSortOrder(); - CompareOperator op = node.getFilterOp(); - //CompareOperator op = sortOrder.transform(node.getFilterOp()); - KeyRange keyRange = childPart.getKeyRange(op, rhs); - return newKeyParts(childSlot, node, keyRange); - } - - // TODO: consider supporting expression substitution in the PK for pre-joined tables - // You'd need to register the expression for a given PK and substitute with a column - // reference for this during ExpressionBuilder. - @Override - public Iterator visitEnter(ScalarFunction node) { - int index = node.getKeyFormationTraversalIndex(); - if (index < 0) { - return Collections.emptyIterator(); - } - return Iterators.singletonIterator(node.getChildren().get(index)); + ImmutableBytesWritable ptr = context.getTempPtr(); + if (!rhs.evaluate(null, ptr)) { // Don't return if evaluated to null + return null; + } + byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr); + KeyRange range = ByteUtil.getKeyRange(key, SortOrder.ASC, + /* rvc.getChildren().get(rhs.getChildren().size()-1).getSortOrder().transform(op) */op, + PVarbinary.INSTANCE); + return range; + } + + private class BaseTerminalExpressionWrap extends BaseTerminalExpression { + private final KeyPart childPart; + private final Expression rhs; + private final CompareOperator rvcElementOp; + private final Expression lhs; + + public BaseTerminalExpressionWrap(KeyPart childPart, Expression rhs, + CompareOperator rvcElementOp, Expression lhs) { + this.childPart = childPart; + this.rhs = rhs; + this.rvcElementOp = rvcElementOp; + this.lhs = lhs; } @Override - public KeySlots visitLeave(ScalarFunction node, List childParts) { - if (childParts.isEmpty()) { - return null; - } - return newScalarFunctionKeyPart(childParts.get(0).getSlots().get(0), node); + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (childPart == null) { + return rhs.evaluate(tuple, ptr); + } + if (!rhs.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; + } + // The op used to compute rvcElementOp did not take into account the sort order, + // and thus we need to transform it here before delegating to the child part + // which will do the required inversion. + KeyRange range = childPart.getKeyRange(rhs.getSortOrder().transform(rvcElementOp), rhs); + // Swap the upper and lower range if descending to compensate for the transform + // we did above of the rvcElementOp. + if (rhs.getSortOrder() == SortOrder.DESC) { + range = KeyRange.getKeyRange(range.getUpperRange(), range.isUpperInclusive(), + range.getLowerRange(), range.isLowerInclusive()); + } + // This can happen when an EQUAL operator is used and the expression cannot + // possibly match. + if (range == KeyRange.EMPTY_RANGE) { + return false; + } + /** + * We have to take the range and condense it down to a single key. We use which ever part + * of the range is inclusive (which implies being bound as well). This works in all cases, + * including this substring one, which produces a lower inclusive range and an upper non + * inclusive range. (a, substr(b,1,1)) IN (('a','b'), ('c','d')) + */ + byte[] key = range.isLowerInclusive() ? range.getLowerRange() : range.getUpperRange(); + /** + * FIXME: this is kind of a hack. The above call will fill a fixed width key,but we don't + * want to fill the key yet because it can throw off our the logic we use to compute the + * next key when we evaluate the RHS row value constructor below. We could create a new + * childPart with a delegate column that returns null for getByteSize(). + */ + if ( + lhs.getDataType().isFixedWidth() && lhs.getMaxLength() != null + && key.length > lhs.getMaxLength() + ) { + // Don't use PDataType.pad(), as this only grows the value, + // while this is shrinking it. + key = Arrays.copyOf(key, lhs.getMaxLength()); + } + ptr.set(key); + return true; } @Override - public Iterator visitEnter(LikeExpression node) { - // TODO: can we optimize something that starts with '_' like this: foo LIKE '_a%' ? - if (node.getLikeType() == LikeType.CASE_INSENSITIVE || // TODO: remove this when we optimize ILIKE - ! (node.getChildren().get(1) instanceof LiteralExpression) || node.startsWithWildcard()) { - return Collections.emptyIterator(); - } - - return Iterators.singletonIterator(node.getChildren().get(0)); + public PDataType getDataType() { + return childPart.getColumn().getDataType(); } @Override - public KeySlots visitLeave(LikeExpression node, List childParts) { - // TODO: optimize ILIKE by creating two ranges for the literal prefix: one with lower case, one with upper case - if (childParts.isEmpty()) { - return null; - } - // for SUBSTR(,1,3) LIKE 'foo%' - KeySlots childSlots = childParts.get(0); - KeySlot childSlot = childSlots.getSlots().get(0); - final String startsWith = node.getLiteralPrefix(); - // TODO: is there a case where we'd need to go through the childPart to calculate the key range? - PColumn column = childSlot.getKeyPart().getColumn(); - PDataType type = column.getDataType(); - byte[] key = PVarchar.INSTANCE.toBytes(startsWith, SortOrder.ASC); - // If the expression is an equality expression against a fixed length column - // and the key length doesn't match the column length, the expression can - // never be true. - // An zero length byte literal is null which can never be compared against as true - Expression firstChild = node.getChildren().get(0); - Integer childNodeFixedLength = firstChild.getDataType().isFixedWidth() ? firstChild.getMaxLength() : null; - if (childNodeFixedLength != null && key.length > childNodeFixedLength) { - return EMPTY_KEY_SLOTS; - } - byte[] lowerRange = key; - byte[] upperRange = ByteUtil.nextKey(key); - Integer columnFixedLength = column.getMaxLength(); - if (type.isFixedWidth()) { - if (columnFixedLength != null) { // Sanity check - should always be non null - // Always use minimum byte to fill as otherwise our key is bigger - // that it should be when the sort order is descending. - lowerRange = type.pad(lowerRange, columnFixedLength, SortOrder.ASC); - upperRange = type.pad(upperRange, columnFixedLength, SortOrder.ASC); - } - } - KeyRange range = type.getKeyRange(lowerRange, true, upperRange, false, - SortOrder.ASC); - // Only extract LIKE expression if pattern ends with a wildcard and everything else was extracted - return newKeyParts(childSlot, node.endsWithOnlyWildcard() ? node : null, range); + public boolean isNullable() { + return childPart.getColumn().isNullable(); } @Override - public Iterator visitEnter(InListExpression node) { - return Iterators.singletonIterator(node.getChildren().get(0)); + public Integer getMaxLength() { + return lhs.getMaxLength(); } @Override - public KeySlots visitLeave(InListExpression node, List childParts) { - if (childParts.isEmpty()) { - return null; - } - - List keyExpressions = node.getKeyExpressions(); - Set ranges = Sets.newHashSetWithExpectedSize(keyExpressions.size()); - KeySlot childSlot = childParts.get(0).getSlots().get(0); - KeyPart childPart = childSlot.getKeyPart(); - // Handles cases like WHERE substr(foo,1,3) IN ('aaa','bbb') - for (Expression key : keyExpressions) { - KeyRange range = childPart.getKeyRange(CompareOperator.EQUAL, key); - if (range == null) { - return null; - } - if (range != KeyRange.EMPTY_RANGE) { // null means it can't possibly be in range - ranges.add(range); - } - } - return newKeyParts(childSlot, node, new ArrayList(ranges)); + public Integer getScale() { + return childPart.getColumn().getScale(); } @Override - public Iterator visitEnter(IsNullExpression node) { - return Iterators.singletonIterator(node.getChildren().get(0)); + public SortOrder getSortOrder() { + // See PHOENIX-4969: Clean up and unify code paths for RVCs with + // respect to Optimizations for SortOrder + // Handle the different paths for InList vs Normal Comparison + // The code paths in InList assume the sortOrder is ASC for + // their optimizations + // The code paths for Comparisons on RVC rewrite equality, + // for the non-equality cases return actual sort order + // This work around should work + // but a more general approach can be taken. + // This optimization causes PHOENIX-6662 (when desc pk used with in clause) + // if(rvcElementOp == CompareOperator.EQUAL || + // rvcElementOp == CompareOperator.NOT_EQUAL){ + // return SortOrder.ASC; + // } + return childPart.getColumn().getSortOrder(); } @Override - public KeySlots visitLeave(IsNullExpression node, List childParts) { - if (childParts.isEmpty()) { - return null; - } - KeySlots childSlots = childParts.get(0); - KeySlot childSlot = childSlots.getSlots().get(0); - PColumn column = childSlot.getKeyPart().getColumn(); - PDataType type = column.getDataType(); - boolean isFixedWidth = type.isFixedWidth(); - // Nothing changes for IS NULL and IS NOT NULL when DESC since - // we represent NULL the same way for ASC and DESC - if (isFixedWidth) { // if column can't be null - return node.isNegate() ? null : - newKeyParts(childSlot, node, - type.getKeyRange(new byte[SchemaUtil.getFixedByteSize(column)], true, - KeyRange.UNBOUND, true, SortOrder.ASC)); - } else { - KeyRange keyRange = node.isNegate() ? KeyRange.IS_NOT_NULL_RANGE : KeyRange.IS_NULL_RANGE; - return newKeyParts(childSlot, node, keyRange); - } - } - - /** - * - * Top level data structure used to drive the formation - * of the start/stop row of scans, essentially taking the - * expression tree of a WHERE clause and producing the - * ScanRanges instance during query compilation. - * - */ - public static interface KeySlots { - - /** - * List of slots that store binding of constant values - * for primary key columns. For example: - * WHERE pk1 = 'foo' and pk2 = 'bar' - * would produce two KeySlot instances that store that - * pk1 = 'foo' and pk2 = 'bar'. - * @return - */ - public List getSlots(); - /** - * Tracks whether or not the contained KeySlot(s) contain - * a slot that includes only a partial extraction of the - * involved expressions. For example: (A AND B) in the case - * of A being a PK column and B being a KV column, the - * KeySlots representing the AND would return true for - * isPartialExtraction. - * @return true if a partial expression extraction was - * done and false otherwise. - */ - public boolean isPartialExtraction(); - } - - /** - * - * Used during query compilation to represent the constant value of a - * primary key column based on expressions in the WHERE clause. These - * are combined together during the compilation of ANDs and ORs to - * to produce the start and stop scan range. - * - */ - public static final class KeySlot { - private final int pkPosition; // Position in primary key - private final int pkSpan; // Will be > 1 for RVC - private final KeyPart keyPart; // Used to produce the KeyRanges below - // Multiple ranges means values that have been ORed together - private final List keyRanges; - // If order rows returned from scan will match desired order declared in query - private final OrderPreserving orderPreserving; - - KeySlot(KeyPart keyPart, int pkPosition, int pkSpan, List keyRanges, OrderPreserving orderPreserving) { - this.pkPosition = pkPosition; - this.pkSpan = pkSpan; - this.keyPart = keyPart; - this.keyRanges = keyRanges; - this.orderPreserving = orderPreserving; - } - - public KeyPart getKeyPart() { - return keyPart; - } - - public int getPKPosition() { - return pkPosition; - } - - public int getPKSpan() { - return pkSpan; - } - - public List getKeyRanges() { - return keyRanges; - } - - public final KeySlot concatExtractNodes(Set extractNodes) { - return new KeySlot( - new BaseKeyPart(this.getKeyPart().getTable(), this.getKeyPart().getColumn(), - SchemaUtil.concat(this.getKeyPart().getExtractNodes(),extractNodes)), - this.getPKPosition(), - this.getPKSpan(), - this.getKeyRanges(), - this.getOrderPreserving()); - } - - public OrderPreserving getOrderPreserving() { - return orderPreserving; - } - } - - /** - * - * Implementation of KeySlots for AND and OR expressions. The - * {@code List } will be in PK order. - * - */ - public static class MultiKeySlot implements KeySlots { - private final List childSlots; - private final boolean partialExtraction; - - private MultiKeySlot(List childSlots, boolean partialExtraction) { - this.childSlots = childSlots; - this.partialExtraction = partialExtraction; - } - - @Override - public List getSlots() { - return childSlots; - } - - @Override - public boolean isPartialExtraction() { - return partialExtraction; - } - } - - /** - * - * Implementation of KeySlots for a constant value, - * - */ - public static class SingleKeySlot implements KeySlots { - private final List slots; - - SingleKeySlot(KeyPart part, int pkPosition, List ranges) { - this(part, pkPosition, 1, ranges); - } - - private SingleKeySlot(KeyPart part, int pkPosition, List ranges, OrderPreserving orderPreserving) { - this(part, pkPosition, 1, ranges, orderPreserving); - } - - private SingleKeySlot(KeyPart part, int pkPosition, int pkSpan, List ranges) { - this(part,pkPosition,pkSpan,ranges, null); - } - - private SingleKeySlot(KeyPart part, int pkPosition, int pkSpan, List ranges, OrderPreserving orderPreserving) { - this.slots = Collections.singletonList(new KeySlot(part, pkPosition, pkSpan, ranges, orderPreserving)); - } - - @Override - public List getSlots() { - return slots; - } - - @Override - public boolean isPartialExtraction() { - return this.slots.get(0).getKeyPart().getExtractNodes().isEmpty(); - } - - } - - public static class BaseKeyPart implements KeyPart { - @Override - public KeyRange getKeyRange(CompareOperator op, Expression rhs) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - rhs.evaluate(null, ptr); - // If the column is fixed width, fill is up to it's byte size - PDataType type = getColumn().getDataType(); - if (type.isFixedWidth()) { - Integer length = getColumn().getMaxLength(); - if (length != null) { - // Go through type to pad as the fill character depends on the type. - type.pad(ptr, length, SortOrder.ASC); - } - } - byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr); - KeyRange range = ByteUtil.getKeyRange(key, rhs.getSortOrder(), op, type); - // Constants will have been inverted, so we invert them back here so that - // RVC comparisons work correctly (see PHOENIX-3383). - if (rhs.getSortOrder() == SortOrder.DESC) { - range = range.invert(); - } - return range; - } - - private final PTable table; - private final PColumn column; - private final Set nodes; - - private BaseKeyPart(PTable table, PColumn column, Set nodes) { - this.table = table; - this.column = column; - this.nodes = nodes; - } - - @Override - public Set getExtractNodes() { - return nodes; - } - - @Override - public PColumn getColumn() { - return column; - } - - @Override - public PTable getTable() { - return table; - } - } - - private static class CoerceKeySlot implements KeyPart { - - private final KeyPart childPart; - private final ImmutableBytesWritable ptr; - private final CoerceExpression node; - private final Set extractNodes; - - public CoerceKeySlot(KeyPart childPart, ImmutableBytesWritable ptr, - CoerceExpression node, Set extractNodes) { - this.childPart = childPart; - this.ptr = ptr; - this.node = node; - this.extractNodes = extractNodes; - } - - @Override - public KeyRange getKeyRange(CompareOperator op, Expression rhs) { - KeyRange range = childPart.getKeyRange(op, rhs); - byte[] lower = range.getLowerRange(); - if (!range.lowerUnbound()) { - ptr.set(lower); - /*** - Do the reverse translation so we can optimize out the coerce expression - For the actual type of the coerceBytes call, we use the node type instead of - the rhs type, because for IN, the rhs type will be VARBINARY and no coerce - will be done in that case (and we need it to be done). - */ - node.getChild().getDataType().coerceBytes(ptr, node.getDataType(), - rhs.getSortOrder(), SortOrder.ASC); - lower = ByteUtil.copyKeyBytesIfNecessary(ptr); - } - byte[] upper = range.getUpperRange(); - if (!range.upperUnbound()) { - ptr.set(upper); - // Do the reverse translation so we can optimize out the coerce expression - node.getChild().getDataType().coerceBytes(ptr, node.getDataType(), - rhs.getSortOrder(), SortOrder.ASC); - upper = ByteUtil.copyKeyBytesIfNecessary(ptr); - } - range = KeyRange.getKeyRange(lower, range.isLowerInclusive(), upper, - range.isUpperInclusive()); - return range; - } - - @Override - public Set getExtractNodes() { - return extractNodes; - } - - @Override - public PColumn getColumn() { - return childPart.getColumn(); - } - - @Override - public PTable getTable() { - return childPart.getTable(); - } - } - - private class RowValueConstructorKeyPart implements KeyPart { - private final RowValueConstructorExpression rvc; - private final PColumn column; - private final Set nodes; - private final List childSlots; - - private RowValueConstructorKeyPart(PColumn column, RowValueConstructorExpression rvc, int span, List childSlots) { - this.column = column; - if (span == rvc.getChildren().size()) { - this.rvc = rvc; - this.nodes = new LinkedHashSet<>(Collections.singletonList(rvc)); - this.childSlots = childSlots; - } else { - this.rvc = new RowValueConstructorExpression(rvc.getChildren().subList(0, span),rvc.isStateless()); - this.nodes = new LinkedHashSet<>(); - this.childSlots = childSlots.subList(0, span); - } - } - - @Override - public Set getExtractNodes() { - return nodes; - } - - @Override - public PColumn getColumn() { - return column; - } - - @Override - public PTable getTable() { - return table; - } - - @Override - public KeyRange getKeyRange(CompareOperator op, Expression rhs) { - // With row value constructors, we need to convert the operator for any transformation we do on individual values - // to prevent keys from being increased to the next key as would be done for fixed width values. The next key is - // done to compensate for the start key (lower range) always being inclusive (thus we convert > to >=) and the - // end key (upper range) always being exclusive (thus we convert <= to <). - boolean usedAllOfLHS = !nodes.isEmpty(); - final CompareOperator rvcElementOp = op == CompareOperator.LESS_OR_EQUAL ? CompareOperator.LESS : op == CompareOperator.GREATER ? CompareOperator.GREATER_OR_EQUAL : op; - if (op != CompareOperator.EQUAL) { - // We need to transform the comparison operator for a LHS row value constructor - // that is shorter than a RHS row value constructor when we're extracting it. - // For example: a < (1,2) is true if a = 1, so we need to switch - // the compare op to <= like this: a <= 1. Since we strip trailing nulls - // in the rvc, we don't need to worry about the a < (1,null) case. - if (usedAllOfLHS) { - if (rvc.getChildren().size() < rhs.getChildren().size()) { - if (op == CompareOperator.LESS) { - op = CompareOperator.LESS_OR_EQUAL; - } else if (op == CompareOperator.GREATER_OR_EQUAL) { - op = CompareOperator.GREATER; - } - } - } else { - // If we're not using all of the LHS, we need to expand the range on either - // side to take into account the rest of the LHS. For example: - // WHERE (pk1, pk3) > ('a',1) AND pk1 = 'a'. In this case, we'll end up - // only using (pk1) and ('a'), so if we use a > operator the expression - // would end up as degenerate since we'd have a non inclusive range for - // ('a'). By switching the operator to extend the range, we end up with - // an ('a') inclusive range which is correct. - if (rvc.getChildren().size() < rhs.getChildren().size()) { - if (op == CompareOperator.LESS) { - op = CompareOperator.LESS_OR_EQUAL; - } else if (op == CompareOperator.GREATER) { - op = CompareOperator.GREATER_OR_EQUAL; - } - } - } - } - if (!usedAllOfLHS || rvc.getChildren().size() != rhs.getChildren().size()) { - // We know that rhs was converted to a row value constructor and that it's a constant - rhs= new RowValueConstructorExpression(rhs.getChildren().subList(0, Math.min(rvc.getChildren().size(), rhs.getChildren().size())), rhs.isStateless()); - } - /* - * Recursively transform the RHS row value constructor by applying the same logic as - * is done elsewhere during WHERE optimization: optimizing out LHS functions by applying - * the appropriate transformation to the RHS key. - */ - // Child slot iterator parallel with child expressions of the LHS row value constructor - final Iterator keySlotsIterator = childSlots.iterator(); - try { - // Call our static row value expression constructor with the current LHS row value constructor and - // the current RHS (which has already been coerced to match the LHS expression). We pass through an - // implementation of ExpressionComparabilityWrapper that transforms the RHS key to match the row key - // structure of the LHS column. This is essentially optimizing out the expressions on the LHS by - // applying the appropriate transformations to the RHS (through the KeyPart#getKeyRange method). - // For example, with WHERE (invert(a),b) < ('abc',5), the 'abc' would be inverted by going through the - // childPart.getKeyRange defined for the invert function. - rhs = BaseExpression.coerce(rvc, rhs, new ExpressionComparabilityWrapper() { - - @Override - public Expression wrap(final Expression lhs, final Expression rhs, boolean rowKeyOrderOptimizable) throws SQLException { - final KeyPart childPart = keySlotsIterator.next().getSlots().get(0).getKeyPart(); - // TODO: DelegateExpression - return new BaseTerminalExpressionWrap(childPart, rhs, rvcElementOp, - lhs); - } - - }, table.rowKeyOrderOptimizable()); - } catch (SQLException e) { - return null; // Shouldn't happen - } - ImmutableBytesWritable ptr = context.getTempPtr(); - if (!rhs.evaluate(null, ptr)) { // Don't return if evaluated to null - return null; - } - byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr); - KeyRange range = ByteUtil.getKeyRange(key, SortOrder.ASC, /*rvc.getChildren().get(rhs.getChildren().size()-1).getSortOrder().transform(op)*/op, PVarbinary.INSTANCE); - return range; - } - - private class BaseTerminalExpressionWrap extends BaseTerminalExpression { - private final KeyPart childPart; - private final Expression rhs; - private final CompareOperator rvcElementOp; - private final Expression lhs; - - public BaseTerminalExpressionWrap(KeyPart childPart, Expression rhs, - CompareOperator rvcElementOp, Expression lhs) { - this.childPart = childPart; - this.rhs = rhs; - this.rvcElementOp = rvcElementOp; - this.lhs = lhs; - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (childPart == null) { - return rhs.evaluate(tuple, ptr); - } - if (!rhs.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - // The op used to compute rvcElementOp did not take into account the sort order, - // and thus we need to transform it here before delegating to the child part - // which will do the required inversion. - KeyRange range = childPart.getKeyRange( - rhs.getSortOrder().transform(rvcElementOp), rhs); - // Swap the upper and lower range if descending to compensate for the transform - // we did above of the rvcElementOp. - if (rhs.getSortOrder() == SortOrder.DESC) { - range = KeyRange.getKeyRange(range.getUpperRange(), - range.isUpperInclusive(), range.getLowerRange(), - range.isLowerInclusive()); - } - // This can happen when an EQUAL operator is used and the expression cannot - // possibly match. - if (range == KeyRange.EMPTY_RANGE) { - return false; - } - /** - We have to take the range and condense it down to a single key. We use which - ever part of the range is inclusive (which implies being bound as well). This - works in all cases, including this substring one, which produces a lower - inclusive range and an upper non inclusive range. - (a, substr(b,1,1)) IN (('a','b'), ('c','d')) - */ - byte[] key = range.isLowerInclusive() ? - range.getLowerRange() : range.getUpperRange(); - /** - FIXME: - this is kind of a hack. The above call will fill a fixed width key,but - we don't want to fill the key yet because it can throw off our the logic we - use to compute the next key when we evaluate the RHS row value constructor - below. We could create a new childPart with a delegate column that returns - null for getByteSize(). - */ - if (lhs.getDataType().isFixedWidth() && - lhs.getMaxLength() != null && key.length > lhs.getMaxLength()) { - // Don't use PDataType.pad(), as this only grows the value, - // while this is shrinking it. - key = Arrays.copyOf(key, lhs.getMaxLength()); - } - ptr.set(key); - return true; - } - - @Override - public PDataType getDataType() { - return childPart.getColumn().getDataType(); - } - - @Override - public boolean isNullable() { - return childPart.getColumn().isNullable(); - } - - @Override - public Integer getMaxLength() { - return lhs.getMaxLength(); - } - - @Override - public Integer getScale() { - return childPart.getColumn().getScale(); - } - - @Override - public SortOrder getSortOrder() { - //See PHOENIX-4969: Clean up and unify code paths for RVCs with - // respect to Optimizations for SortOrder - //Handle the different paths for InList vs Normal Comparison - //The code paths in InList assume the sortOrder is ASC for - // their optimizations - //The code paths for Comparisons on RVC rewrite equality, - // for the non-equality cases return actual sort order - //This work around should work - // but a more general approach can be taken. - //This optimization causes PHOENIX-6662 (when desc pk used with in clause) -// if(rvcElementOp == CompareOperator.EQUAL || -// rvcElementOp == CompareOperator.NOT_EQUAL){ -// return SortOrder.ASC; -// } - return childPart.getColumn().getSortOrder(); - } - - @Override - public T accept(ExpressionVisitor visitor) { - return null; - } - } + public T accept(ExpressionVisitor visitor) { + return null; } + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/BaseScannerRegionObserverConstants.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/BaseScannerRegionObserverConstants.java index 212592dbe2a..50f2c21bd1e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/BaseScannerRegionObserverConstants.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/BaseScannerRegionObserverConstants.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,173 +22,184 @@ import org.apache.phoenix.schema.types.PUnsignedTinyint; public class BaseScannerRegionObserverConstants { - public enum ReplayWrite { - TABLE_AND_INDEX, - INDEX_ONLY, - REBUILD_INDEX_ONLY; + public enum ReplayWrite { + TABLE_AND_INDEX, + INDEX_ONLY, + REBUILD_INDEX_ONLY; - public static ReplayWrite fromBytes(byte[] replayWriteBytes) { - if (replayWriteBytes == null) { - return null; - } - if (Bytes.compareTo(BaseScannerRegionObserverConstants.REPLAY_TABLE_AND_INDEX_WRITES, replayWriteBytes) == 0) { - return TABLE_AND_INDEX; - } - if (Bytes.compareTo(BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES, replayWriteBytes) == 0) { - return INDEX_ONLY; - } - if (Bytes.compareTo(BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES, replayWriteBytes) == 0) { - return REBUILD_INDEX_ONLY; - } - throw new IllegalArgumentException("Unknown ReplayWrite code of " + Bytes.toStringBinary(replayWriteBytes)); - } + public static ReplayWrite fromBytes(byte[] replayWriteBytes) { + if (replayWriteBytes == null) { + return null; + } + if ( + Bytes.compareTo(BaseScannerRegionObserverConstants.REPLAY_TABLE_AND_INDEX_WRITES, + replayWriteBytes) == 0 + ) { + return TABLE_AND_INDEX; + } + if ( + Bytes.compareTo(BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES, + replayWriteBytes) == 0 + ) { + return INDEX_ONLY; + } + if ( + Bytes.compareTo(BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES, + replayWriteBytes) == 0 + ) { + return REBUILD_INDEX_ONLY; + } + throw new IllegalArgumentException( + "Unknown ReplayWrite code of " + Bytes.toStringBinary(replayWriteBytes)); } + } - public static long getMaxLookbackInMillis(Configuration conf){ - //config param is in seconds, switch to millis - return conf.getLong(BaseScannerRegionObserverConstants.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, - BaseScannerRegionObserverConstants.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE) * 1000; - } + public static long getMaxLookbackInMillis(Configuration conf) { + // config param is in seconds, switch to millis + return conf.getLong(BaseScannerRegionObserverConstants.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, + BaseScannerRegionObserverConstants.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE) * 1000; + } - public static final String AGGREGATORS = "_Aggs"; - public static final String UNORDERED_GROUP_BY_EXPRESSIONS = "_UnorderedGroupByExpressions"; - public static final String KEY_ORDERED_GROUP_BY_EXPRESSIONS = "_OrderedGroupByExpressions"; - public static final String ESTIMATED_DISTINCT_VALUES = "_EstDistinctValues"; - public static final String NON_AGGREGATE_QUERY = "_NonAggregateQuery"; - public static final String TOPN = "_TopN"; - public static final String UNGROUPED_AGG = "_UngroupedAgg"; - public static final String DELETE_AGG = "_DeleteAgg"; - public static final String UPSERT_SELECT_TABLE = "_UpsertSelectTable"; - public static final String UPSERT_SELECT_EXPRS = "_UpsertSelectExprs"; - public static final String DELETE_CQ = "_DeleteCQ"; - public static final String DELETE_CF = "_DeleteCF"; - public static final String UPSERT_STATUS_CQ = "_UpsertStatusCQ"; - public static final String UPSERT_CF = "_UpsertCF"; - public static final String EMPTY_CF = "_EmptyCF"; - public static final String EMPTY_COLUMN_QUALIFIER = "_EmptyColumnQualifier"; - public static final String SPECIFIC_ARRAY_INDEX = "_SpecificArrayIndex"; - public static final String GROUP_BY_LIMIT = "_GroupByLimit"; - public static final String LOCAL_INDEX = "_LocalIndex"; - public static final String LOCAL_INDEX_BUILD = "_LocalIndexBuild"; - public static final String UNCOVERED_GLOBAL_INDEX = "_UncoveredGlobalIndex"; - public static final String INDEX_REBUILD_PAGING = "_IndexRebuildPaging"; - // The number of index rows to be rebuild in one RPC call - public static final String INDEX_REBUILD_PAGE_ROWS = "_IndexRebuildPageRows"; - public static final String INDEX_PAGE_ROWS = "_IndexPageRows"; - public static final String SERVER_PAGE_SIZE_MS = "_ServerPageSizeMs"; - // Index verification type done by the index tool - public static final String INDEX_REBUILD_VERIFY_TYPE = "_IndexRebuildVerifyType"; - public static final String INDEX_RETRY_VERIFY = "_IndexRetryVerify"; - public static final String INDEX_REBUILD_DISABLE_LOGGING_VERIFY_TYPE = - "_IndexRebuildDisableLoggingVerifyType"; - public static final String INDEX_REBUILD_DISABLE_LOGGING_BEYOND_MAXLOOKBACK_AGE = - "_IndexRebuildDisableLoggingBeyondMaxLookbackAge"; - @Deprecated - public static final String LOCAL_INDEX_FILTER = "_LocalIndexFilter"; - @Deprecated - public static final String LOCAL_INDEX_LIMIT = "_LocalIndexLimit"; - @Deprecated - public static final String LOCAL_INDEX_FILTER_STR = "_LocalIndexFilterStr"; - public static final String INDEX_FILTER = "_IndexFilter"; - public static final String INDEX_LIMIT = "_IndexLimit"; - public static final String INDEX_FILTER_STR = "_IndexFilterStr"; - public static final String JSON_VALUE_FUNCTION = "_JsonValueFunction"; - public static final String JSON_QUERY_FUNCTION = "_JsonQueryFunction"; - public static final String BSON_VALUE_FUNCTION = "_BsonValueFunction"; + public static final String AGGREGATORS = "_Aggs"; + public static final String UNORDERED_GROUP_BY_EXPRESSIONS = "_UnorderedGroupByExpressions"; + public static final String KEY_ORDERED_GROUP_BY_EXPRESSIONS = "_OrderedGroupByExpressions"; + public static final String ESTIMATED_DISTINCT_VALUES = "_EstDistinctValues"; + public static final String NON_AGGREGATE_QUERY = "_NonAggregateQuery"; + public static final String TOPN = "_TopN"; + public static final String UNGROUPED_AGG = "_UngroupedAgg"; + public static final String DELETE_AGG = "_DeleteAgg"; + public static final String UPSERT_SELECT_TABLE = "_UpsertSelectTable"; + public static final String UPSERT_SELECT_EXPRS = "_UpsertSelectExprs"; + public static final String DELETE_CQ = "_DeleteCQ"; + public static final String DELETE_CF = "_DeleteCF"; + public static final String UPSERT_STATUS_CQ = "_UpsertStatusCQ"; + public static final String UPSERT_CF = "_UpsertCF"; + public static final String EMPTY_CF = "_EmptyCF"; + public static final String EMPTY_COLUMN_QUALIFIER = "_EmptyColumnQualifier"; + public static final String SPECIFIC_ARRAY_INDEX = "_SpecificArrayIndex"; + public static final String GROUP_BY_LIMIT = "_GroupByLimit"; + public static final String LOCAL_INDEX = "_LocalIndex"; + public static final String LOCAL_INDEX_BUILD = "_LocalIndexBuild"; + public static final String UNCOVERED_GLOBAL_INDEX = "_UncoveredGlobalIndex"; + public static final String INDEX_REBUILD_PAGING = "_IndexRebuildPaging"; + // The number of index rows to be rebuild in one RPC call + public static final String INDEX_REBUILD_PAGE_ROWS = "_IndexRebuildPageRows"; + public static final String INDEX_PAGE_ROWS = "_IndexPageRows"; + public static final String SERVER_PAGE_SIZE_MS = "_ServerPageSizeMs"; + // Index verification type done by the index tool + public static final String INDEX_REBUILD_VERIFY_TYPE = "_IndexRebuildVerifyType"; + public static final String INDEX_RETRY_VERIFY = "_IndexRetryVerify"; + public static final String INDEX_REBUILD_DISABLE_LOGGING_VERIFY_TYPE = + "_IndexRebuildDisableLoggingVerifyType"; + public static final String INDEX_REBUILD_DISABLE_LOGGING_BEYOND_MAXLOOKBACK_AGE = + "_IndexRebuildDisableLoggingBeyondMaxLookbackAge"; + @Deprecated + public static final String LOCAL_INDEX_FILTER = "_LocalIndexFilter"; + @Deprecated + public static final String LOCAL_INDEX_LIMIT = "_LocalIndexLimit"; + @Deprecated + public static final String LOCAL_INDEX_FILTER_STR = "_LocalIndexFilterStr"; + public static final String INDEX_FILTER = "_IndexFilter"; + public static final String INDEX_LIMIT = "_IndexLimit"; + public static final String INDEX_FILTER_STR = "_IndexFilterStr"; + public static final String JSON_VALUE_FUNCTION = "_JsonValueFunction"; + public static final String JSON_QUERY_FUNCTION = "_JsonQueryFunction"; + public static final String BSON_VALUE_FUNCTION = "_BsonValueFunction"; - /* - * Attribute to denote that the index maintainer has been serialized using its proto-buf presentation. - * Needed for backward compatibility purposes. TODO: get rid of this in next major release. - */ - public static final String LOCAL_INDEX_BUILD_PROTO = "_LocalIndexBuild"; - public static final String LOCAL_INDEX_JOIN_SCHEMA = "_LocalIndexJoinSchema"; - public static final String DATA_TABLE_COLUMNS_TO_JOIN = "_DataTableColumnsToJoin"; - public static final String COLUMNS_STORED_IN_SINGLE_CELL = "_ColumnsStoredInSingleCell"; - public static final String VIEW_CONSTANTS = "_ViewConstants"; - public static final String EXPECTED_UPPER_REGION_KEY = "_ExpectedUpperRegionKey"; - public static final String REVERSE_SCAN = "_ReverseScan"; - public static final String ANALYZE_TABLE = "_ANALYZETABLE"; - public static final String REBUILD_INDEXES = "_RebuildIndexes"; - public static final String DO_TRANSFORMING = "_DoTransforming"; - public static final String TX_STATE = "_TxState"; - public static final String GUIDEPOST_WIDTH_BYTES = "_GUIDEPOST_WIDTH_BYTES"; - public static final String GUIDEPOST_PER_REGION = "_GUIDEPOST_PER_REGION"; - public static final String UPGRADE_DESC_ROW_KEY = "_UPGRADE_DESC_ROW_KEY"; - public static final String SCAN_REGION_SERVER = "_SCAN_REGION_SERVER"; - public static final String RUN_UPDATE_STATS_ASYNC_ATTRIB = "_RunUpdateStatsAsync"; - public static final String SKIP_REGION_BOUNDARY_CHECK = "_SKIP_REGION_BOUNDARY_CHECK"; - public static final String TX_SCN = "_TxScn"; - public static final String TTL = "_TTL"; - public static final String MASK_PHOENIX_TTL_EXPIRED = "_MASK_TTL_EXPIRED"; - public static final String DELETE_PHOENIX_TTL_EXPIRED = "_DELETE_TTL_EXPIRED"; - public static final String PHOENIX_TTL_SCAN_TABLE_NAME = "_PhoenixTTLScanTableName"; - public static final String SCAN_ACTUAL_START_ROW = "_ScanActualStartRow"; - public static final String REPLAY_WRITES = "_IGNORE_NEWER_MUTATIONS"; - public final static String SCAN_OFFSET = "_RowOffset"; - public static final String SCAN_START_ROW_SUFFIX = "_ScanStartRowSuffix"; - public static final String SCAN_STOP_ROW_SUFFIX = "_ScanStopRowSuffix"; - public final static String MIN_QUALIFIER = "_MinQualifier"; - public final static String MAX_QUALIFIER = "_MaxQualifier"; - public final static String USE_NEW_VALUE_COLUMN_QUALIFIER = "_UseNewValueColumnQualifier"; - public final static String QUALIFIER_ENCODING_SCHEME = "_QualifierEncodingScheme"; - public final static String IMMUTABLE_STORAGE_ENCODING_SCHEME = "_ImmutableStorageEncodingScheme"; - public final static String USE_ENCODED_COLUMN_QUALIFIER_LIST = "_UseEncodedColumnQualifierList"; - public static final String CLIENT_VERSION = "_ClientVersion"; - public static final String CHECK_VERIFY_COLUMN = "_CheckVerifyColumn"; - public static final String PHYSICAL_DATA_TABLE_NAME = "_PhysicalDataTableName"; - public static final String EMPTY_COLUMN_FAMILY_NAME = "_EmptyCFName"; - public static final String EMPTY_COLUMN_QUALIFIER_NAME = "_EmptyCQName"; - public static final String INDEX_ROW_KEY = "_IndexRowKey"; - public static final String READ_REPAIR_TRANSFORMING_TABLE = "_ReadRepairTransformingTable"; - public static final String CDC_DATA_TABLE_DEF = "_CdcDataTableDef"; - public static final String IS_PHOENIX_TTL_SCAN_TABLE_SYSTEM = "_IsPhoenixScanTableSystem"; + /* + * Attribute to denote that the index maintainer has been serialized using its proto-buf + * presentation. Needed for backward compatibility purposes. TODO: get rid of this in next major + * release. + */ + public static final String LOCAL_INDEX_BUILD_PROTO = "_LocalIndexBuild"; + public static final String LOCAL_INDEX_JOIN_SCHEMA = "_LocalIndexJoinSchema"; + public static final String DATA_TABLE_COLUMNS_TO_JOIN = "_DataTableColumnsToJoin"; + public static final String COLUMNS_STORED_IN_SINGLE_CELL = "_ColumnsStoredInSingleCell"; + public static final String VIEW_CONSTANTS = "_ViewConstants"; + public static final String EXPECTED_UPPER_REGION_KEY = "_ExpectedUpperRegionKey"; + public static final String REVERSE_SCAN = "_ReverseScan"; + public static final String ANALYZE_TABLE = "_ANALYZETABLE"; + public static final String REBUILD_INDEXES = "_RebuildIndexes"; + public static final String DO_TRANSFORMING = "_DoTransforming"; + public static final String TX_STATE = "_TxState"; + public static final String GUIDEPOST_WIDTH_BYTES = "_GUIDEPOST_WIDTH_BYTES"; + public static final String GUIDEPOST_PER_REGION = "_GUIDEPOST_PER_REGION"; + public static final String UPGRADE_DESC_ROW_KEY = "_UPGRADE_DESC_ROW_KEY"; + public static final String SCAN_REGION_SERVER = "_SCAN_REGION_SERVER"; + public static final String RUN_UPDATE_STATS_ASYNC_ATTRIB = "_RunUpdateStatsAsync"; + public static final String SKIP_REGION_BOUNDARY_CHECK = "_SKIP_REGION_BOUNDARY_CHECK"; + public static final String TX_SCN = "_TxScn"; + public static final String TTL = "_TTL"; + public static final String MASK_PHOENIX_TTL_EXPIRED = "_MASK_TTL_EXPIRED"; + public static final String DELETE_PHOENIX_TTL_EXPIRED = "_DELETE_TTL_EXPIRED"; + public static final String PHOENIX_TTL_SCAN_TABLE_NAME = "_PhoenixTTLScanTableName"; + public static final String SCAN_ACTUAL_START_ROW = "_ScanActualStartRow"; + public static final String REPLAY_WRITES = "_IGNORE_NEWER_MUTATIONS"; + public final static String SCAN_OFFSET = "_RowOffset"; + public static final String SCAN_START_ROW_SUFFIX = "_ScanStartRowSuffix"; + public static final String SCAN_STOP_ROW_SUFFIX = "_ScanStopRowSuffix"; + public final static String MIN_QUALIFIER = "_MinQualifier"; + public final static String MAX_QUALIFIER = "_MaxQualifier"; + public final static String USE_NEW_VALUE_COLUMN_QUALIFIER = "_UseNewValueColumnQualifier"; + public final static String QUALIFIER_ENCODING_SCHEME = "_QualifierEncodingScheme"; + public final static String IMMUTABLE_STORAGE_ENCODING_SCHEME = "_ImmutableStorageEncodingScheme"; + public final static String USE_ENCODED_COLUMN_QUALIFIER_LIST = "_UseEncodedColumnQualifierList"; + public static final String CLIENT_VERSION = "_ClientVersion"; + public static final String CHECK_VERIFY_COLUMN = "_CheckVerifyColumn"; + public static final String PHYSICAL_DATA_TABLE_NAME = "_PhysicalDataTableName"; + public static final String EMPTY_COLUMN_FAMILY_NAME = "_EmptyCFName"; + public static final String EMPTY_COLUMN_QUALIFIER_NAME = "_EmptyCQName"; + public static final String INDEX_ROW_KEY = "_IndexRowKey"; + public static final String READ_REPAIR_TRANSFORMING_TABLE = "_ReadRepairTransformingTable"; + public static final String CDC_DATA_TABLE_DEF = "_CdcDataTableDef"; + public static final String IS_PHOENIX_TTL_SCAN_TABLE_SYSTEM = "_IsPhoenixScanTableSystem"; - public static final String MAX_LOOKBACK_AGE = "MAX_LOOKBACK_AGE"; - /** - * The scan attribute to provide the scan start rowkey for analyze table queries. - */ - public static final String SCAN_ANALYZE_ACTUAL_START_ROW = "_ScanAnalyzeActualStartRow"; + public static final String MAX_LOOKBACK_AGE = "MAX_LOOKBACK_AGE"; + /** + * The scan attribute to provide the scan start rowkey for analyze table queries. + */ + public static final String SCAN_ANALYZE_ACTUAL_START_ROW = "_ScanAnalyzeActualStartRow"; - /** - * The scan attribute to provide the scan stop rowkey for analyze table queries. - */ - public static final String SCAN_ANALYZE_ACTUAL_STOP_ROW = "_ScanAnalyzeActualStopRow"; + /** + * The scan attribute to provide the scan stop rowkey for analyze table queries. + */ + public static final String SCAN_ANALYZE_ACTUAL_STOP_ROW = "_ScanAnalyzeActualStopRow"; - /** - * The scan attribute to provide the scan start rowkey include boolean value for analyze table - * queries. - */ - public static final String SCAN_ANALYZE_INCLUDE_START_ROW = "_ScanAnalyzeIncludeStartRow"; + /** + * The scan attribute to provide the scan start rowkey include boolean value for analyze table + * queries. + */ + public static final String SCAN_ANALYZE_INCLUDE_START_ROW = "_ScanAnalyzeIncludeStartRow"; - /** - * The scan attribute to provide the scan stop rowkey include boolean value for analyze table - * queries. - */ - public static final String SCAN_ANALYZE_INCLUDE_STOP_ROW = "_ScanAnalyzeIncludeStopRow"; + /** + * The scan attribute to provide the scan stop rowkey include boolean value for analyze table + * queries. + */ + public static final String SCAN_ANALYZE_INCLUDE_STOP_ROW = "_ScanAnalyzeIncludeStopRow"; - /** - * The scan attribute to determine whether client changes are compatible to consume - * new format changes sent by the server. This attribute is mainly used to address - * data integrity issues related to region moves (PHOENIX-7106). - */ - public static final String SCAN_SERVER_RETURN_VALID_ROW_KEY = "_ScanServerValidRowKey"; + /** + * The scan attribute to determine whether client changes are compatible to consume new format + * changes sent by the server. This attribute is mainly used to address data integrity issues + * related to region moves (PHOENIX-7106). + */ + public static final String SCAN_SERVER_RETURN_VALID_ROW_KEY = "_ScanServerValidRowKey"; - public final static byte[] REPLAY_TABLE_AND_INDEX_WRITES = PUnsignedTinyint.INSTANCE.toBytes(1); - public final static byte[] REPLAY_ONLY_INDEX_WRITES = PUnsignedTinyint.INSTANCE.toBytes(2); - // In case of Index Write failure, we need to determine that Index mutation - // is part of normal client write or Index Rebuilder. # PHOENIX-5080 - public final static byte[] REPLAY_INDEX_REBUILD_WRITES = PUnsignedTinyint.INSTANCE.toBytes(3); + public final static byte[] REPLAY_TABLE_AND_INDEX_WRITES = PUnsignedTinyint.INSTANCE.toBytes(1); + public final static byte[] REPLAY_ONLY_INDEX_WRITES = PUnsignedTinyint.INSTANCE.toBytes(2); + // In case of Index Write failure, we need to determine that Index mutation + // is part of normal client write or Index Rebuilder. # PHOENIX-5080 + public final static byte[] REPLAY_INDEX_REBUILD_WRITES = PUnsignedTinyint.INSTANCE.toBytes(3); - public static final String PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY = - "phoenix.max.lookback.age.seconds"; - public static final int DEFAULT_PHOENIX_MAX_LOOKBACK_AGE = 0; + public static final String PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY = "phoenix.max.lookback.age.seconds"; + public static final int DEFAULT_PHOENIX_MAX_LOOKBACK_AGE = 0; - /** - * Attribute name used to pass custom annotations in Scans and Mutations (later). Custom annotations - * are used to augment log lines emitted by Phoenix. See https://issues.apache.org/jira/browse/PHOENIX-1198. - */ - public static final String CUSTOM_ANNOTATIONS = "_Annot"; + /** + * Attribute name used to pass custom annotations in Scans and Mutations (later). Custom + * annotations are used to augment log lines emitted by Phoenix. See + * https://issues.apache.org/jira/browse/PHOENIX-1198. + */ + public static final String CUSTOM_ANNOTATIONS = "_Annot"; - /** Exposed for testing */ - public static final String SCANNER_OPENED_TRACE_INFO = "Scanner opened on server"; + /** Exposed for testing */ + public static final String SCANNER_OPENED_TRACE_INFO = "Scanner opened on server"; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/HashJoinCacheNotFoundException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/HashJoinCacheNotFoundException.java index 05355521d35..ef53bcc516b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/HashJoinCacheNotFoundException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/HashJoinCacheNotFoundException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,24 +22,24 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; -public class HashJoinCacheNotFoundException extends SQLException{ - private static final long serialVersionUID = 1L; - private Long cacheId; - private static SQLExceptionCode ERROR_CODE = SQLExceptionCode.HASH_JOIN_CACHE_NOT_FOUND; - public HashJoinCacheNotFoundException() { - this(null); - } +public class HashJoinCacheNotFoundException extends SQLException { + private static final long serialVersionUID = 1L; + private Long cacheId; + private static SQLExceptionCode ERROR_CODE = SQLExceptionCode.HASH_JOIN_CACHE_NOT_FOUND; - public HashJoinCacheNotFoundException(Long cacheId) { - super(new SQLExceptionInfo.Builder(ERROR_CODE).setMessage("joinId: " + cacheId - + ". The cache might have expired and have been removed.").build().toString(), - ERROR_CODE.getSQLState(), ERROR_CODE.getErrorCode(), null); - this.cacheId=cacheId; - } - - public Long getCacheId(){ - return this.cacheId; - } - + public HashJoinCacheNotFoundException() { + this(null); + } + + public HashJoinCacheNotFoundException(Long cacheId) { + super(new SQLExceptionInfo.Builder(ERROR_CODE) + .setMessage("joinId: " + cacheId + ". The cache might have expired and have been removed.") + .build().toString(), ERROR_CODE.getSQLState(), ERROR_CODE.getErrorCode(), null); + this.cacheId = cacheId; + } + + public Long getCacheId() { + return this.cacheId; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/InvalidateServerMetadataCacheRequest.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/InvalidateServerMetadataCacheRequest.java index 7ac94c8968d..fd4717fe6cb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/InvalidateServerMetadataCacheRequest.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/InvalidateServerMetadataCacheRequest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,33 +21,32 @@ import org.apache.phoenix.util.SchemaUtil; public class InvalidateServerMetadataCacheRequest { - private final byte[] tenantId; - private final byte[] schemaName; - private final byte[] tableName; + private final byte[] tenantId; + private final byte[] schemaName; + private final byte[] tableName; - public InvalidateServerMetadataCacheRequest(byte[] tenantId, byte[] schemaName, - byte[] tableName) { - this.tenantId = tenantId; - this.schemaName = schemaName; - this.tableName = tableName; - } + public InvalidateServerMetadataCacheRequest(byte[] tenantId, byte[] schemaName, + byte[] tableName) { + this.tenantId = tenantId; + this.schemaName = schemaName; + this.tableName = tableName; + } - public byte[] getTenantId() { - return tenantId; - } + public byte[] getTenantId() { + return tenantId; + } - public byte[] getSchemaName() { - return schemaName; - } + public byte[] getSchemaName() { + return schemaName; + } - public byte[] getTableName() { - return tableName; - } + public byte[] getTableName() { + return tableName; + } - @Override - public String toString() { - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - return "tenantId = " + Bytes.toString(tenantId) - + ", table name = " + fullTableName; - } + @Override + public String toString() { + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + return "tenantId = " + Bytes.toString(tenantId) + ", table name = " + fullTableName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataEndpointImplConstants.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataEndpointImplConstants.java index 6eddb4adbd2..d0f8e511c0f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataEndpointImplConstants.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataEndpointImplConstants.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,10 +26,12 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; public class MetaDataEndpointImplConstants { - // Column to track tables that have been upgraded based on PHOENIX-2067 - public static final String ROW_KEY_ORDER_OPTIMIZABLE = "ROW_KEY_ORDER_OPTIMIZABLE"; - public static final byte[] ROW_KEY_ORDER_OPTIMIZABLE_BYTES = Bytes.toBytes(ROW_KEY_ORDER_OPTIMIZABLE); - // Used to add a tag to a cell when a view modifies a table property to indicate that this - // property should not be derived from the base table - public static final byte[] VIEW_MODIFIED_PROPERTY_BYTES = TagUtil.fromList(ImmutableList.of(new ArrayBackedTag(VIEW_MODIFIED_PROPERTY_TAG_TYPE, Bytes.toBytes(1)))); + // Column to track tables that have been upgraded based on PHOENIX-2067 + public static final String ROW_KEY_ORDER_OPTIMIZABLE = "ROW_KEY_ORDER_OPTIMIZABLE"; + public static final byte[] ROW_KEY_ORDER_OPTIMIZABLE_BYTES = + Bytes.toBytes(ROW_KEY_ORDER_OPTIMIZABLE); + // Used to add a tag to a cell when a view modifies a table property to indicate that this + // property should not be derived from the base table + public static final byte[] VIEW_MODIFIED_PROPERTY_BYTES = TagUtil.fromList( + ImmutableList. of(new ArrayBackedTag(VIEW_MODIFIED_PROPERTY_TAG_TYPE, Bytes.toBytes(1)))); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataProtocol.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataProtocol.java index 46d7888beb1..5d49a724fab 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataProtocol.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/MetaDataProtocol.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,485 +39,497 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableImpl; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.MetaDataUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import com.google.protobuf.ByteString; /** - * - * Coprocessor protocol for Phoenix DDL. Phoenix stores the table metadata in - * an HBase table named SYSTEM.TABLE. Each table is represented by: - * - one row for the table - * - one row per column in the tabe - * Upto #DEFAULT_MAX_META_DATA_VERSIONS versions are kept. The time - * stamp of the metadata must always be increasing. The timestamp of the key - * values in the data row corresponds to the schema that it's using. - * - * TODO: dynamically prune number of schema version kept based on whether or - * not the data table still uses it (based on the min time stamp of the data - * table). - * - * + * Coprocessor protocol for Phoenix DDL. Phoenix stores the table metadata in an HBase table named + * SYSTEM.TABLE. Each table is represented by: - one row for the table - one row per column in the + * tabe Upto #DEFAULT_MAX_META_DATA_VERSIONS versions are kept. The time stamp of the metadata must + * always be increasing. The timestamp of the key values in the data row corresponds to the schema + * that it's using. TODO: dynamically prune number of schema version kept based on whether or not + * the data table still uses it (based on the min time stamp of the data table). * @since 0.1 */ public abstract class MetaDataProtocol extends MetaDataService { - public static final int PHOENIX_MAJOR_VERSION = 5; - public static final int PHOENIX_MINOR_VERSION = 3; - - public static final int PHOENIX_PATCH_NUMBER = 0; - public static final int PHOENIX_VERSION = - VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER); - - public static final long MIN_TABLE_TIMESTAMP = 0; - public static final long MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP = 0; - public static final String MIGRATION_IN_PROGRESS = "MigrationInProgress"; - - public static final int DEFAULT_LOG_TTL = 7 * 24 * 60 * 60; // 7 days - - // Min system table timestamps for every release. - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 = MIN_TABLE_TIMESTAMP + 3; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0 = MIN_TABLE_TIMESTAMP + 4; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1 = MIN_TABLE_TIMESTAMP + 5; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0 = MIN_TABLE_TIMESTAMP + 7; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0 = MIN_TABLE_TIMESTAMP + 8; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0 = MIN_TABLE_TIMESTAMP + 9; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 = MIN_TABLE_TIMESTAMP + 15; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 = MIN_TABLE_TIMESTAMP + 18; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_8_1 = MIN_TABLE_TIMESTAMP + 18; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0 = MIN_TABLE_TIMESTAMP + 20; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 = MIN_TABLE_TIMESTAMP + 25; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0 = MIN_TABLE_TIMESTAMP + 27; - // Since there's no upgrade code, keep the version the same as the previous version - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0 = MIN_TABLE_TIMESTAMP + 28; - - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_0_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0 = MIN_TABLE_TIMESTAMP + 29; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 = MIN_TABLE_TIMESTAMP + 33; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_2_0 = MIN_TABLE_TIMESTAMP + 38; - public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 = MIN_TABLE_TIMESTAMP + 42; - // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_* constants - public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0; - - // Version below which we should disallow usage of mutable secondary indexing. - public static final int MUTABLE_SI_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "10"); - public static final int MAX_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "8"); - public static final int MIN_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "6"); - public static final int MIN_RENEW_LEASE_VERSION = VersionUtil.encodeVersion("1", "1", "3"); - public static final int MIN_NAMESPACE_MAPPED_PHOENIX_VERSION = VersionUtil.encodeVersion("4", "8", "0"); - public static final int MIN_PENDING_ACTIVE_INDEX = VersionUtil.encodeVersion("4", "12", "0"); - public static final int MIN_CLIENT_RETRY_INDEX_WRITES = VersionUtil.encodeVersion("4", "14", "0"); - public static final int MIN_TX_CLIENT_SIDE_MAINTENANCE = VersionUtil.encodeVersion("4", "14", "0"); - public static final int MIN_PENDING_DISABLE_INDEX = VersionUtil.encodeVersion("4", "14", "0"); - // Version below which we should turn off essential column family. - public static final int ESSENTIAL_FAMILY_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "7"); - /** Version below which we fall back on the generic KeyValueBuilder */ - public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = VersionUtil.encodeVersion("0", "94", "14"); - // Version at which we allow SYSTEM.CATALOG to split - public static final int MIN_SPLITTABLE_SYSTEM_CATALOG = VersionUtil.encodeVersion("5", "1", "0"); - public static final String MIN_SPLITTABLE_SYSTEM_CATALOG_VERSION = "5.1.0"; - - // Version at and after which we will no longer expect client to serialize thresholdBytes for - // spooling into the scan - public static final int MIN_5_x_DISABLE_SERVER_SPOOL_THRESHOLD = - VersionUtil.encodeVersion("5", "1", "0"); - public static final int MIN_4_x_DISABLE_SERVER_SPOOL_THRESHOLD = - VersionUtil.encodeVersion("4", "15", "0"); - - // ALWAYS update this map whenever rolling out a new release (major, minor or patch release). - // Key is the SYSTEM.CATALOG timestamp for the version and value is the version string. - private static final NavigableMap TIMESTAMP_VERSION_MAP = new TreeMap<>(); - static { - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP, MIGRATION_IN_PROGRESS); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0, "4.1.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0, "4.2.0"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1, "4.2.1"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0, "4.3.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0, "4.5.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, "4.6.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0, "4.7.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0, "4.8.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0, "4.9.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0, "4.10.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0, "4.11.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0, "4.12.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0, "4.13.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_5_0_0, "5.0.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0, "5.1.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_5_2_0, "5.2.x"); - TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0, "5.3.x"); - } - - public static final String CURRENT_CLIENT_VERSION = PHOENIX_MAJOR_VERSION + "." + PHOENIX_MINOR_VERSION + "." + PHOENIX_PATCH_NUMBER; - - - // TODO: pare this down to minimum, as we don't need duplicates for both table and column errors, nor should we need - // a different code for every type of error. - // ENTITY_ALREADY_EXISTS, ENTITY_NOT_FOUND, NEWER_ENTITY_FOUND, ENTITY_NOT_IN_REGION, CONCURRENT_MODIFICATION - // ILLEGAL_MUTATION (+ sql code) - public enum MutationCode { - TABLE_ALREADY_EXISTS, - TABLE_NOT_FOUND, - COLUMN_NOT_FOUND, - COLUMN_ALREADY_EXISTS, - CONCURRENT_TABLE_MUTATION, - TABLE_NOT_IN_REGION, - NEWER_TABLE_FOUND, - UNALLOWED_TABLE_MUTATION, - NO_PK_COLUMNS, - PARENT_TABLE_NOT_FOUND, - FUNCTION_ALREADY_EXISTS, - FUNCTION_NOT_FOUND, - NEWER_FUNCTION_FOUND, - FUNCTION_NOT_IN_REGION, - SCHEMA_ALREADY_EXISTS, - NEWER_SCHEMA_FOUND, - SCHEMA_NOT_FOUND, - SCHEMA_NOT_IN_REGION, - TABLES_EXIST_ON_SCHEMA, - UNALLOWED_SCHEMA_MUTATION, - AUTO_PARTITION_SEQUENCE_NOT_FOUND, - CANNOT_COERCE_AUTO_PARTITION_ID, - TOO_MANY_INDEXES, - UNABLE_TO_CREATE_CHILD_LINK, - UNABLE_TO_UPDATE_PARENT_TABLE, - UNABLE_TO_DELETE_CHILD_LINK, - UNABLE_TO_UPSERT_TASK, - ERROR_WRITING_TO_SCHEMA_REGISTRY, - NO_OP, - } + public static final int PHOENIX_MAJOR_VERSION = 5; + public static final int PHOENIX_MINOR_VERSION = 3; + + public static final int PHOENIX_PATCH_NUMBER = 0; + public static final int PHOENIX_VERSION = + VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER); + + public static final long MIN_TABLE_TIMESTAMP = 0; + public static final long MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP = 0; + public static final String MIGRATION_IN_PROGRESS = "MigrationInProgress"; + + public static final int DEFAULT_LOG_TTL = 7 * 24 * 60 * 60; // 7 days + + // Min system table timestamps for every release. + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 = MIN_TABLE_TIMESTAMP + 3; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0 = MIN_TABLE_TIMESTAMP + 4; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1 = MIN_TABLE_TIMESTAMP + 5; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0 = MIN_TABLE_TIMESTAMP + 7; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0 = MIN_TABLE_TIMESTAMP + 8; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0 = MIN_TABLE_TIMESTAMP + 9; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 = MIN_TABLE_TIMESTAMP + 15; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 = MIN_TABLE_TIMESTAMP + 18; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_8_1 = MIN_TABLE_TIMESTAMP + 18; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0 = MIN_TABLE_TIMESTAMP + 20; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 = MIN_TABLE_TIMESTAMP + 25; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0 = MIN_TABLE_TIMESTAMP + 27; + // Since there's no upgrade code, keep the version the same as the previous version + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0 = MIN_TABLE_TIMESTAMP + 28; + + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_0_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0 = MIN_TABLE_TIMESTAMP + 29; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 = MIN_TABLE_TIMESTAMP + 33; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_2_0 = MIN_TABLE_TIMESTAMP + 38; + public static final long MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 = MIN_TABLE_TIMESTAMP + 42; + // MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_* + // constants + public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0; + + // Version below which we should disallow usage of mutable secondary indexing. + public static final int MUTABLE_SI_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "10"); + public static final int MAX_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "8"); + public static final int MIN_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "6"); + public static final int MIN_RENEW_LEASE_VERSION = VersionUtil.encodeVersion("1", "1", "3"); + public static final int MIN_NAMESPACE_MAPPED_PHOENIX_VERSION = + VersionUtil.encodeVersion("4", "8", "0"); + public static final int MIN_PENDING_ACTIVE_INDEX = VersionUtil.encodeVersion("4", "12", "0"); + public static final int MIN_CLIENT_RETRY_INDEX_WRITES = VersionUtil.encodeVersion("4", "14", "0"); + public static final int MIN_TX_CLIENT_SIDE_MAINTENANCE = + VersionUtil.encodeVersion("4", "14", "0"); + public static final int MIN_PENDING_DISABLE_INDEX = VersionUtil.encodeVersion("4", "14", "0"); + // Version below which we should turn off essential column family. + public static final int ESSENTIAL_FAMILY_VERSION_THRESHOLD = + VersionUtil.encodeVersion("0", "94", "7"); + /** Version below which we fall back on the generic KeyValueBuilder */ + public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = + VersionUtil.encodeVersion("0", "94", "14"); + // Version at which we allow SYSTEM.CATALOG to split + public static final int MIN_SPLITTABLE_SYSTEM_CATALOG = VersionUtil.encodeVersion("5", "1", "0"); + public static final String MIN_SPLITTABLE_SYSTEM_CATALOG_VERSION = "5.1.0"; + + // Version at and after which we will no longer expect client to serialize thresholdBytes for + // spooling into the scan + public static final int MIN_5_x_DISABLE_SERVER_SPOOL_THRESHOLD = + VersionUtil.encodeVersion("5", "1", "0"); + public static final int MIN_4_x_DISABLE_SERVER_SPOOL_THRESHOLD = + VersionUtil.encodeVersion("4", "15", "0"); + + // ALWAYS update this map whenever rolling out a new release (major, minor or patch release). + // Key is the SYSTEM.CATALOG timestamp for the version and value is the version string. + private static final NavigableMap TIMESTAMP_VERSION_MAP = new TreeMap<>(); + static { + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP, MIGRATION_IN_PROGRESS); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0, "4.1.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0, "4.2.0"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1, "4.2.1"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0, "4.3.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0, "4.5.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, "4.6.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0, "4.7.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0, "4.8.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0, "4.9.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0, "4.10.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0, "4.11.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0, "4.12.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0, "4.13.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_5_0_0, "5.0.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0, "5.1.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_5_2_0, "5.2.x"); + TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0, "5.3.x"); + } + + public static final String CURRENT_CLIENT_VERSION = + PHOENIX_MAJOR_VERSION + "." + PHOENIX_MINOR_VERSION + "." + PHOENIX_PATCH_NUMBER; + + // TODO: pare this down to minimum, as we don't need duplicates for both table and column errors, + // nor should we need + // a different code for every type of error. + // ENTITY_ALREADY_EXISTS, ENTITY_NOT_FOUND, NEWER_ENTITY_FOUND, ENTITY_NOT_IN_REGION, + // CONCURRENT_MODIFICATION + // ILLEGAL_MUTATION (+ sql code) + public enum MutationCode { + TABLE_ALREADY_EXISTS, + TABLE_NOT_FOUND, + COLUMN_NOT_FOUND, + COLUMN_ALREADY_EXISTS, + CONCURRENT_TABLE_MUTATION, + TABLE_NOT_IN_REGION, + NEWER_TABLE_FOUND, + UNALLOWED_TABLE_MUTATION, + NO_PK_COLUMNS, + PARENT_TABLE_NOT_FOUND, + FUNCTION_ALREADY_EXISTS, + FUNCTION_NOT_FOUND, + NEWER_FUNCTION_FOUND, + FUNCTION_NOT_IN_REGION, + SCHEMA_ALREADY_EXISTS, + NEWER_SCHEMA_FOUND, + SCHEMA_NOT_FOUND, + SCHEMA_NOT_IN_REGION, + TABLES_EXIST_ON_SCHEMA, + UNALLOWED_SCHEMA_MUTATION, + AUTO_PARTITION_SEQUENCE_NOT_FOUND, + CANNOT_COERCE_AUTO_PARTITION_ID, + TOO_MANY_INDEXES, + UNABLE_TO_CREATE_CHILD_LINK, + UNABLE_TO_UPDATE_PARENT_TABLE, + UNABLE_TO_DELETE_CHILD_LINK, + UNABLE_TO_UPSERT_TASK, + ERROR_WRITING_TO_SCHEMA_REGISTRY, + NO_OP, + } public static class SharedTableState { - private PName tenantId; - private PName schemaName; - private PName tableName; - private List columns; - private List physicalNames; - private PDataType viewIndexIdType; - private Long viewIndexId; - - public SharedTableState(PTable table) { - this.tenantId = table.getTenantId(); - this.schemaName = table.getSchemaName(); - this.tableName = table.getTableName(); - this.columns = table.getColumns(); - this.physicalNames = table.getPhysicalNames(); - this.viewIndexIdType = table.getviewIndexIdType(); - this.viewIndexId = table.getViewIndexId(); - } - - public SharedTableState( - org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState sharedTable) { - this.tenantId = sharedTable.hasTenantId() ? PNameFactory.newName(sharedTable.getTenantId().toByteArray()) : null; - this.schemaName = PNameFactory.newName(sharedTable.getSchemaName().toByteArray()); - this.tableName = PNameFactory.newName(sharedTable.getTableName().toByteArray()); - this.columns = Lists.transform(sharedTable.getColumnsList(), - new Function() { - @Override - public PColumn apply(org.apache.phoenix.coprocessor.generated.PTableProtos.PColumn column) { - return PColumnImpl.createFromProto(column); - } - }); - this.physicalNames = Lists.transform(sharedTable.getPhysicalNamesList(), - new Function() { - @Override - public PName apply(ByteString physicalName) { - return PNameFactory.newName(physicalName.toByteArray()); - } - }); - this.viewIndexId = sharedTable.getViewIndexId(); - this.viewIndexIdType = sharedTable.hasViewIndexIdType() - ? PDataType.fromTypeId(sharedTable.getViewIndexIdType()) - : MetaDataUtil.getLegacyViewIndexIdDataType(); - } + private PName tenantId; + private PName schemaName; + private PName tableName; + private List columns; + private List physicalNames; + private PDataType viewIndexIdType; + private Long viewIndexId; + + public SharedTableState(PTable table) { + this.tenantId = table.getTenantId(); + this.schemaName = table.getSchemaName(); + this.tableName = table.getTableName(); + this.columns = table.getColumns(); + this.physicalNames = table.getPhysicalNames(); + this.viewIndexIdType = table.getviewIndexIdType(); + this.viewIndexId = table.getViewIndexId(); + } - public PName getTenantId() { - return tenantId; - } + public SharedTableState( + org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState sharedTable) { + this.tenantId = sharedTable.hasTenantId() + ? PNameFactory.newName(sharedTable.getTenantId().toByteArray()) + : null; + this.schemaName = PNameFactory.newName(sharedTable.getSchemaName().toByteArray()); + this.tableName = PNameFactory.newName(sharedTable.getTableName().toByteArray()); + this.columns = Lists.transform(sharedTable.getColumnsList(), + new Function() { + @Override + public PColumn + apply(org.apache.phoenix.coprocessor.generated.PTableProtos.PColumn column) { + return PColumnImpl.createFromProto(column); + } + }); + this.physicalNames = + Lists.transform(sharedTable.getPhysicalNamesList(), new Function() { + @Override + public PName apply(ByteString physicalName) { + return PNameFactory.newName(physicalName.toByteArray()); + } + }); + this.viewIndexId = sharedTable.getViewIndexId(); + this.viewIndexIdType = sharedTable.hasViewIndexIdType() + ? PDataType.fromTypeId(sharedTable.getViewIndexIdType()) + : MetaDataUtil.getLegacyViewIndexIdDataType(); + } - public PName getSchemaName() { - return schemaName; - } + public PName getTenantId() { + return tenantId; + } - public PName getTableName() { - return tableName; - } + public PName getSchemaName() { + return schemaName; + } - public List getColumns() { - return columns; - } + public PName getTableName() { + return tableName; + } - public List getPhysicalNames() { - return physicalNames; - } + public List getColumns() { + return columns; + } - public Long getViewIndexId() { - return viewIndexId; - } + public List getPhysicalNames() { + return physicalNames; + } - public PDataType getViewIndexIdType() { - return viewIndexIdType; - } + public Long getViewIndexId() { + return viewIndexId; + } + + public PDataType getViewIndexIdType() { + return viewIndexIdType; + } } - + public static class MetaDataMutationResult { - private MutationCode returnCode; - private long mutationTime; - private PTable table; - private List tableNamesToDelete; - private List sharedTablesToDelete; - private byte[] columnName; - private byte[] familyName; - private boolean wasUpdated; - private PSchema schema; - private Long viewIndexId; - private PDataType viewIndexIdType; - private List functions = new ArrayList(1); - private long autoPartitionNum; - - public MetaDataMutationResult() { - } + private MutationCode returnCode; + private long mutationTime; + private PTable table; + private List tableNamesToDelete; + private List sharedTablesToDelete; + private byte[] columnName; + private byte[] familyName; + private boolean wasUpdated; + private PSchema schema; + private Long viewIndexId; + private PDataType viewIndexIdType; + private List functions = new ArrayList(1); + private long autoPartitionNum; + + public MetaDataMutationResult() { + } - public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table, PColumn column) { - this(returnCode, currentTime, table); - if(column != null){ - this.columnName = column.getName().getBytes(); - this.familyName = column.getFamilyName().getBytes(); - } - } + public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table, + PColumn column) { + this(returnCode, currentTime, table); + if (column != null) { + this.columnName = column.getName().getBytes(); + this.familyName = column.getFamilyName().getBytes(); + } + } - public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table) { - this(returnCode, currentTime, table, Collections. emptyList()); - } + public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table) { + this(returnCode, currentTime, table, Collections. emptyList()); + } - public MetaDataMutationResult(MutationCode returnCode, long currentTime, List functions, boolean wasUpdated) { - this.returnCode = returnCode; - this.mutationTime = currentTime; - this.functions = functions; - this.wasUpdated = wasUpdated; - } - - public MetaDataMutationResult(MutationCode returnCode, PSchema schema, long currentTime) { - this.returnCode = returnCode; - this.mutationTime = currentTime; - this.schema = schema; - } + public MetaDataMutationResult(MutationCode returnCode, long currentTime, + List functions, boolean wasUpdated) { + this.returnCode = returnCode; + this.mutationTime = currentTime; + this.functions = functions; + this.wasUpdated = wasUpdated; + } - // For testing, so that connectionless can set wasUpdated so ColumnResolver doesn't complain - public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table, boolean wasUpdated) { - this(returnCode, currentTime, table, Collections. emptyList()); - this.wasUpdated = wasUpdated; - } - - public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table, List tableNamesToDelete) { - this.returnCode = returnCode; - this.mutationTime = currentTime; - this.table = table; - this.tableNamesToDelete = tableNamesToDelete; - } - - public MetaDataMutationResult(MutationCode returnCode, int currentTime, PTable table, long viewIndexId, PDataType viewIndexIdType) { - this(returnCode, currentTime, table, Collections. emptyList()); - this.viewIndexId = viewIndexId; - this.viewIndexIdType = viewIndexIdType; - } - - public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table, List tableNamesToDelete, List sharedTablesToDelete) { - this(returnCode, currentTime, table, tableNamesToDelete); - this.sharedTablesToDelete = sharedTablesToDelete; - } + public MetaDataMutationResult(MutationCode returnCode, PSchema schema, long currentTime) { + this.returnCode = returnCode; + this.mutationTime = currentTime; + this.schema = schema; + } - public MutationCode getMutationCode() { - return returnCode; - } + // For testing, so that connectionless can set wasUpdated so ColumnResolver doesn't complain + public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table, + boolean wasUpdated) { + this(returnCode, currentTime, table, Collections. emptyList()); + this.wasUpdated = wasUpdated; + } - public long getMutationTime() { - return mutationTime; - } + public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table, + List tableNamesToDelete) { + this.returnCode = returnCode; + this.mutationTime = currentTime; + this.table = table; + this.tableNamesToDelete = tableNamesToDelete; + } - public boolean wasUpdated() { - return wasUpdated; - } + public MetaDataMutationResult(MutationCode returnCode, int currentTime, PTable table, + long viewIndexId, PDataType viewIndexIdType) { + this(returnCode, currentTime, table, Collections. emptyList()); + this.viewIndexId = viewIndexId; + this.viewIndexIdType = viewIndexIdType; + } - public PTable getTable() { - return table; - } + public MetaDataMutationResult(MutationCode returnCode, long currentTime, PTable table, + List tableNamesToDelete, List sharedTablesToDelete) { + this(returnCode, currentTime, table, tableNamesToDelete); + this.sharedTablesToDelete = sharedTablesToDelete; + } - public void setTable(PTable table) { - this.table = table; - } - - public void setFunction(PFunction function) { - this.functions.add(function); - } + public MutationCode getMutationCode() { + return returnCode; + } - public List getTableNamesToDelete() { - return tableNamesToDelete; - } + public long getMutationTime() { + return mutationTime; + } - public byte[] getColumnName() { - return columnName; - } + public boolean wasUpdated() { + return wasUpdated; + } - public byte[] getFamilyName() { - return familyName; - } + public PTable getTable() { + return table; + } - public List getFunctions() { - return functions; - } - - public List getSharedTablesToDelete() { - return sharedTablesToDelete; - } + public void setTable(PTable table) { + this.table = table; + } + + public void setFunction(PFunction function) { + this.functions.add(function); + } + + public List getTableNamesToDelete() { + return tableNamesToDelete; + } + + public byte[] getColumnName() { + return columnName; + } + + public byte[] getFamilyName() { + return familyName; + } + + public List getFunctions() { + return functions; + } - public long getAutoPartitionNum() { - return autoPartitionNum; + public List getSharedTablesToDelete() { + return sharedTablesToDelete; + } + + public long getAutoPartitionNum() { + return autoPartitionNum; + } + + public Long getViewIndexId() { + return viewIndexId; + } + + public PDataType getViewIndexIdType() { + return viewIndexIdType; + } + + public static MetaDataMutationResult constructFromProto(MetaDataResponse proto) { + MetaDataMutationResult result = new MetaDataMutationResult(); + result.returnCode = MutationCode.values()[proto.getReturnCode().getNumber()]; + result.mutationTime = proto.getMutationTime(); + if (proto.hasTable()) { + result.wasUpdated = true; + result.table = PTableImpl.createFromProto(proto.getTable()); + } + if (proto.getFunctionCount() > 0) { + result.wasUpdated = true; + for (PFunctionProtos.PFunction function : proto.getFunctionList()) + result.functions.add(PFunction.createFromProto(function)); + } + if (proto.getTablesToDeleteCount() > 0) { + result.tableNamesToDelete = + Lists.newArrayListWithExpectedSize(proto.getTablesToDeleteCount()); + for (ByteString tableName : proto.getTablesToDeleteList()) { + result.tableNamesToDelete.add(tableName.toByteArray()); } - - public Long getViewIndexId() { - return viewIndexId; + } + result.columnName = ByteUtil.EMPTY_BYTE_ARRAY; + if (proto.hasColumnName()) { + result.columnName = proto.getColumnName().toByteArray(); + } + if (proto.hasFamilyName()) { + result.familyName = proto.getFamilyName().toByteArray(); + } + if (proto.getSharedTablesToDeleteCount() > 0) { + result.sharedTablesToDelete = + Lists.newArrayListWithExpectedSize(proto.getSharedTablesToDeleteCount()); + for (org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState sharedTable : proto + .getSharedTablesToDeleteList()) { + result.sharedTablesToDelete.add(new SharedTableState(sharedTable)); } - - public PDataType getViewIndexIdType() { - return viewIndexIdType; + } + if (proto.hasSchema()) { + result.schema = PSchema.createFromProto(proto.getSchema()); + } + if (proto.hasAutoPartitionNum()) { + result.autoPartitionNum = proto.getAutoPartitionNum(); + } + if (proto.hasViewIndexId()) { + result.viewIndexId = proto.getViewIndexId(); } - public static MetaDataMutationResult constructFromProto(MetaDataResponse proto) { - MetaDataMutationResult result = new MetaDataMutationResult(); - result.returnCode = MutationCode.values()[proto.getReturnCode().getNumber()]; - result.mutationTime = proto.getMutationTime(); - if (proto.hasTable()) { - result.wasUpdated = true; - result.table = PTableImpl.createFromProto(proto.getTable()); - } - if (proto.getFunctionCount() > 0) { - result.wasUpdated = true; - for (PFunctionProtos.PFunction function: proto.getFunctionList()) - result.functions.add(PFunction.createFromProto(function)); - } - if (proto.getTablesToDeleteCount() > 0) { - result.tableNamesToDelete = - Lists.newArrayListWithExpectedSize(proto.getTablesToDeleteCount()); - for (ByteString tableName : proto.getTablesToDeleteList()) { - result.tableNamesToDelete.add(tableName.toByteArray()); - } - } - result.columnName = ByteUtil.EMPTY_BYTE_ARRAY; - if (proto.hasColumnName()){ - result.columnName = proto.getColumnName().toByteArray(); - } - if (proto.hasFamilyName()){ - result.familyName = proto.getFamilyName().toByteArray(); - } - if (proto.getSharedTablesToDeleteCount() > 0) { - result.sharedTablesToDelete = - Lists.newArrayListWithExpectedSize(proto.getSharedTablesToDeleteCount()); - for (org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState sharedTable : - proto.getSharedTablesToDeleteList()) { - result.sharedTablesToDelete.add(new SharedTableState(sharedTable)); - } - } - if (proto.hasSchema()) { - result.schema = PSchema.createFromProto(proto.getSchema()); - } - if (proto.hasAutoPartitionNum()) { - result.autoPartitionNum = proto.getAutoPartitionNum(); - } - if (proto.hasViewIndexId()) { - result.viewIndexId = proto.getViewIndexId(); - } + result.viewIndexIdType = proto.hasViewIndexIdType() + ? PDataType.fromTypeId(proto.getViewIndexIdType()) + : MetaDataUtil.getLegacyViewIndexIdDataType(); + return result; + } - result.viewIndexIdType = proto.hasViewIndexIdType() - ? PDataType.fromTypeId(proto.getViewIndexIdType()) - : MetaDataUtil.getLegacyViewIndexIdDataType(); - return result; + public static MetaDataResponse toProto(MetaDataMutationResult result) { + MetaDataProtos.MetaDataResponse.Builder builder = + MetaDataProtos.MetaDataResponse.newBuilder(); + if (result != null) { + builder + .setReturnCode(MetaDataProtos.MutationCode.values()[result.getMutationCode().ordinal()]); + builder.setMutationTime(result.getMutationTime()); + if (result.table != null) { + builder.setTable(PTableImpl.toProto(result.table)); + } + if (result.getTableNamesToDelete() != null) { + for (byte[] tableName : result.tableNamesToDelete) { + builder.addTablesToDelete(ByteStringer.wrap(tableName)); + } } - - public static MetaDataResponse toProto(MetaDataMutationResult result) { - MetaDataProtos.MetaDataResponse.Builder builder = - MetaDataProtos.MetaDataResponse.newBuilder(); - if (result != null) { - builder.setReturnCode(MetaDataProtos.MutationCode.values()[result.getMutationCode() - .ordinal()]); - builder.setMutationTime(result.getMutationTime()); - if (result.table != null) { - builder.setTable(PTableImpl.toProto(result.table)); - } - if (result.getTableNamesToDelete() != null) { - for (byte[] tableName : result.tableNamesToDelete) { - builder.addTablesToDelete(ByteStringer.wrap(tableName)); - } - } - if (result.getColumnName() != null){ - builder.setColumnName(ByteStringer.wrap(result.getColumnName())); - } - if (result.getFamilyName() != null){ - builder.setFamilyName(ByteStringer.wrap(result.getFamilyName())); - } - if (result.getSharedTablesToDelete() !=null){ - for (SharedTableState sharedTableState : result.sharedTablesToDelete) { - org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState.Builder sharedTableStateBuilder = - org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState.newBuilder(); - for (PColumn col : sharedTableState.getColumns()) { - sharedTableStateBuilder.addColumns(PColumnImpl.toProto(col)); - } - for (PName physicalName : sharedTableState.getPhysicalNames()) { - sharedTableStateBuilder.addPhysicalNames(ByteStringer.wrap(physicalName.getBytes())); - } - if (sharedTableState.getTenantId()!=null) { - sharedTableStateBuilder.setTenantId(ByteStringer.wrap(sharedTableState.getTenantId().getBytes())); - } - sharedTableStateBuilder.setSchemaName(ByteStringer.wrap(sharedTableState.getSchemaName().getBytes())); - sharedTableStateBuilder.setTableName(ByteStringer.wrap(sharedTableState.getTableName().getBytes())); - sharedTableStateBuilder.setViewIndexId(sharedTableState.getViewIndexId()); - sharedTableStateBuilder.setViewIndexIdType(sharedTableState.viewIndexIdType.getSqlType()); - builder.addSharedTablesToDelete(sharedTableStateBuilder.build()); - } + if (result.getColumnName() != null) { + builder.setColumnName(ByteStringer.wrap(result.getColumnName())); + } + if (result.getFamilyName() != null) { + builder.setFamilyName(ByteStringer.wrap(result.getFamilyName())); + } + if (result.getSharedTablesToDelete() != null) { + for (SharedTableState sharedTableState : result.sharedTablesToDelete) { + org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState.Builder sharedTableStateBuilder = + org.apache.phoenix.coprocessor.generated.MetaDataProtos.SharedTableState.newBuilder(); + for (PColumn col : sharedTableState.getColumns()) { + sharedTableStateBuilder.addColumns(PColumnImpl.toProto(col)); } - if (result.getSchema() != null) { - builder.setSchema(PSchema.toProto(result.schema)); + for (PName physicalName : sharedTableState.getPhysicalNames()) { + sharedTableStateBuilder.addPhysicalNames(ByteStringer.wrap(physicalName.getBytes())); } - builder.setAutoPartitionNum(result.getAutoPartitionNum()); - if (result.getViewIndexId() != null) { - builder.setViewIndexId(result.getViewIndexId()); + if (sharedTableState.getTenantId() != null) { + sharedTableStateBuilder + .setTenantId(ByteStringer.wrap(sharedTableState.getTenantId().getBytes())); } - builder.setViewIndexIdType(result.getViewIndexIdType() == null - ? MetaDataUtil.getLegacyViewIndexIdDataType().getSqlType() - : result.getViewIndexIdType().getSqlType()); + sharedTableStateBuilder + .setSchemaName(ByteStringer.wrap(sharedTableState.getSchemaName().getBytes())); + sharedTableStateBuilder + .setTableName(ByteStringer.wrap(sharedTableState.getTableName().getBytes())); + sharedTableStateBuilder.setViewIndexId(sharedTableState.getViewIndexId()); + sharedTableStateBuilder + .setViewIndexIdType(sharedTableState.viewIndexIdType.getSqlType()); + builder.addSharedTablesToDelete(sharedTableStateBuilder.build()); } - return builder.build(); } - - public PSchema getSchema() { - return schema; + if (result.getSchema() != null) { + builder.setSchema(PSchema.toProto(result.schema)); } - } - - public static long getPriorVersion() { - Iterator iterator = TIMESTAMP_VERSION_MAP.descendingKeySet().iterator(); - if (!iterator.hasNext()) { - return -1; + builder.setAutoPartitionNum(result.getAutoPartitionNum()); + if (result.getViewIndexId() != null) { + builder.setViewIndexId(result.getViewIndexId()); } - return iterator.next(); + builder.setViewIndexIdType(result.getViewIndexIdType() == null + ? MetaDataUtil.getLegacyViewIndexIdDataType().getSqlType() + : result.getViewIndexIdType().getSqlType()); + } + return builder.build(); } - - public static long getPriorUpgradeVersion() { - return TIMESTAMP_VERSION_MAP.lowerKey(TIMESTAMP_VERSION_MAP.lastKey()); + + public PSchema getSchema() { + return schema; } + } - public static String getVersion(long serverTimestamp) { - /* - * It is possible that when clients are trying to run upgrades concurrently, we could be at an intermediate - * server timestamp. Using floorKey provides us a range based lookup where the timestamp range for a release is - * [timeStampForRelease, timestampForNextRelease). - */ - String version = TIMESTAMP_VERSION_MAP.get(TIMESTAMP_VERSION_MAP.floorKey(serverTimestamp)); - return version; + public static long getPriorVersion() { + Iterator iterator = TIMESTAMP_VERSION_MAP.descendingKeySet().iterator(); + if (!iterator.hasNext()) { + return -1; } + return iterator.next(); + } + + public static long getPriorUpgradeVersion() { + return TIMESTAMP_VERSION_MAP.lowerKey(TIMESTAMP_VERSION_MAP.lastKey()); + } + + public static String getVersion(long serverTimestamp) { + /* + * It is possible that when clients are trying to run upgrades concurrently, we could be at an + * intermediate server timestamp. Using floorKey provides us a range based lookup where the + * timestamp range for a release is [timeStampForRelease, timestampForNextRelease). + */ + String version = TIMESTAMP_VERSION_MAP.get(TIMESTAMP_VERSION_MAP.floorKey(serverTimestamp)); + return version; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/RowKeyMatcher.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/RowKeyMatcher.java index 68a37707dd7..f35f2725889 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/RowKeyMatcher.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/RowKeyMatcher.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,136 +15,132 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessorclient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.StampedLock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** - * This class holds the index, mapping row-key matcher patterns to tableIds. - * Assumes byte[] are UTF-8 encoded. - * This class is thread safe. + * This class holds the index, mapping row-key matcher patterns to tableIds. Assumes byte[] are + * UTF-8 encoded. This class is thread safe. */ public class RowKeyMatcher { - private static final Logger LOGGER = LoggerFactory.getLogger(RowKeyMatcher.class); - - public static final int R = 256; - private TrieNode root = new TrieNode(); - private final AtomicInteger numEntries = new AtomicInteger(0); - - // Basic Trie node implementation - class TrieNode { - private Integer tableId = null; - TrieNode[] next = new TrieNode[R]; - private final StampedLock sl = new StampedLock(); - private TrieNode tryOptimisticGet(int pos) { - long stamp = sl.tryOptimisticRead(); - TrieNode nextNode = this.next[pos]; - if (!sl.validate(stamp)) { - stamp = sl.readLock(); - try { - nextNode = this.next[pos]; - } finally { - sl.unlockRead(stamp); - } - } - return nextNode; + private static final Logger LOGGER = LoggerFactory.getLogger(RowKeyMatcher.class); + + public static final int R = 256; + private TrieNode root = new TrieNode(); + private final AtomicInteger numEntries = new AtomicInteger(0); + + // Basic Trie node implementation + class TrieNode { + private Integer tableId = null; + TrieNode[] next = new TrieNode[R]; + private final StampedLock sl = new StampedLock(); + + private TrieNode tryOptimisticGet(int pos) { + long stamp = sl.tryOptimisticRead(); + TrieNode nextNode = this.next[pos]; + if (!sl.validate(stamp)) { + stamp = sl.readLock(); + try { + nextNode = this.next[pos]; + } finally { + sl.unlockRead(stamp); } + } + return nextNode; + } - protected void put(int pos, byte[] key, int val, int depth) { - long stamp = sl.writeLock(); - try { - this.next[pos] = RowKeyMatcher.this.put(this.next[pos], key, val, depth, true); - } - finally { - sl.unlock(stamp); - } + protected void put(int pos, byte[] key, int val, int depth) { + long stamp = sl.writeLock(); + try { + this.next[pos] = RowKeyMatcher.this.put(this.next[pos], key, val, depth, true); + } finally { + sl.unlock(stamp); + } - } + } - protected void registerTableId(int tableId) { - long stamp = sl.writeLock(); - try { - if (this.tableId == null) { - this.tableId = tableId; - numEntries.incrementAndGet(); - } - } - finally { - sl.unlock(stamp); - } + protected void registerTableId(int tableId) { + long stamp = sl.writeLock(); + try { + if (this.tableId == null) { + this.tableId = tableId; + numEntries.incrementAndGet(); } + } finally { + sl.unlock(stamp); + } } - - // return the number of prefixes that this index has. - public int getNumEntries() { - return numEntries.get(); + } + + // return the number of prefixes that this index has. + public int getNumEntries() { + return numEntries.get(); + } + + // return the Id associated with the rowkey. + public Integer match(byte[] rowkey, int offset) { + return get(rowkey, offset); + } + + public Integer get(byte[] key, int offset) { + TrieNode node = get(root, key, offset); + if (node == null) return null; + return node.tableId; + } + + private TrieNode get(TrieNode node, byte[] key, int depth) { + if (node == null) { + return null; } - // return the Id associated with the rowkey. - public Integer match(byte[] rowkey, int offset) { - return get(rowkey, offset); + if (node.tableId != null) { + return node; } - - public Integer get(byte[] key, int offset) { - TrieNode node = get(root, key, offset); - if (node == null) - return null; - return node.tableId; + if (key.length == depth) { + return node; } - private TrieNode get(TrieNode node, byte[] key, int depth) { - if (node == null) { - return null; - } - - if (node.tableId != null) { - return node; - } - if (key.length == depth) { - return node; - } - - int index = key[depth] & 0xFF; - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("depth = %d, index = %d", depth, index)); - } - return get(node.tryOptimisticGet(index), key, depth + 1); + int index = key[depth] & 0xFF; + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("depth = %d, index = %d", depth, index)); } - // Associate prefix key with the supplied Id. - public void put(byte[] key, int tableId) { - root = put(root, key, tableId, 0, false); - } + return get(node.tryOptimisticGet(index), key, depth + 1); + } - // helper method to recursively add the key to trie. - private TrieNode put(TrieNode node, byte[] key, int tableId, int depth, boolean isLocked) { + // Associate prefix key with the supplied Id. + public void put(byte[] key, int tableId) { + root = put(root, key, tableId, 0, false); + } - if (node == null) { - node = new TrieNode(); - } + // helper method to recursively add the key to trie. + private TrieNode put(TrieNode node, byte[] key, int tableId, int depth, boolean isLocked) { - if (key.length == depth) { - node.registerTableId(tableId); - return node; - } + if (node == null) { + node = new TrieNode(); + } - int index = key[depth] & 0xFF; - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("depth = %d, index = %d", depth, index)); - } + if (key.length == depth) { + node.registerTableId(tableId); + return node; + } - if (!isLocked && node.next[index] == null) { - node.put(index, key, tableId, depth + 1); - } - else { - node.next[index] = put(node.next[index], key, tableId, depth + 1, isLocked); - } + int index = key[depth] & 0xFF; + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("depth = %d, index = %d", depth, index)); + } - return node; + if (!isLocked && node.next[index] == null) { + node.put(index, key, tableId, depth + 1); + } else { + node.next[index] = put(node.next[index], key, tableId, depth + 1, isLocked); } + + return node; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/ScanRegionObserverConstants.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/ScanRegionObserverConstants.java index 71c35911aa7..0a36692cce0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/ScanRegionObserverConstants.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/ScanRegionObserverConstants.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,10 +20,10 @@ import org.apache.hadoop.hbase.util.Bytes; public class ScanRegionObserverConstants { - public static final byte[] DYN_COLS_METADATA_CELL_QUALIFIER = Bytes.toBytes("D#"); - public static final String DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION = - "_DynColsMetadataStoredForMutation"; - // Scan attribute that is set in case we want to project dynamic columns - public static final String WILDCARD_SCAN_INCLUDES_DYNAMIC_COLUMNS = - "_WildcardScanIncludesDynCols"; + public static final byte[] DYN_COLS_METADATA_CELL_QUALIFIER = Bytes.toBytes("D#"); + public static final String DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION = + "_DynColsMetadataStoredForMutation"; + // Scan attribute that is set in case we want to project dynamic columns + public static final String WILDCARD_SCAN_INCLUDES_DYNAMIC_COLUMNS = + "_WildcardScanIncludesDynCols"; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/SequenceRegionObserverConstants.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/SequenceRegionObserverConstants.java index 7b97dd114cb..55706801986 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/SequenceRegionObserverConstants.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/SequenceRegionObserverConstants.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,8 @@ package org.apache.phoenix.coprocessorclient; public class SequenceRegionObserverConstants { - public static final String OPERATION_ATTRIB = "SEQUENCE_OPERATION"; - public static final String MAX_TIMERANGE_ATTRIB = "MAX_TIMERANGE"; - public static final String CURRENT_VALUE_ATTRIB = "CURRENT_VALUE"; - public static final String NUM_TO_ALLOCATE = "NUM_TO_ALLOCATE"; + public static final String OPERATION_ATTRIB = "SEQUENCE_OPERATION"; + public static final String MAX_TIMERANGE_ATTRIB = "MAX_TIMERANGE"; + public static final String CURRENT_VALUE_ATTRIB = "CURRENT_VALUE"; + public static final String NUM_TO_ALLOCATE = "NUM_TO_ALLOCATE"; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/ServerCachingProtocol.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/ServerCachingProtocol.java index dc85816306e..e146e8130f9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/ServerCachingProtocol.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/ServerCachingProtocol.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,40 +22,39 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.Writable; - import org.apache.phoenix.memory.MemoryManager.MemoryChunk; /** - * - * EndPoint coprocessor to send a cache to a region server. - * Used for: - * a) hash joins, to send the smaller side of the join to each region server - * b) secondary indexes, to send the necessary meta data to each region server - * + * EndPoint coprocessor to send a cache to a region server. Used for: a) hash joins, to send the + * smaller side of the join to each region server b) secondary indexes, to send the necessary meta + * data to each region server * @since 0.1 */ public interface ServerCachingProtocol { - public static interface ServerCacheFactory extends Writable { - public Closeable newCache(ImmutableBytesWritable cachePtr, byte[] txState, MemoryChunk chunk, boolean useProtoForIndexMaintainer, int clientVersion) throws SQLException; - } - /** - * Add the cache to the region server cache. - * @param tenantId the tenantId or null if not applicable - * @param cacheId unique identifier of the cache - * @param cachePtr pointer to the byte array of the cache - * @param txState TODO - * @param cacheFactory factory that converts from byte array to object representation on the server side - * @return true on success and otherwise throws - * @throws SQLException - */ - public boolean addServerCache(byte[] tenantId, byte[] cacheId, ImmutableBytesWritable cachePtr, byte[] txState, ServerCacheFactory cacheFactory) throws SQLException; - /** - * Remove the cache from the region server cache. Called upon completion of - * the operation when cache is no longer needed. - * @param tenantId the tenantId or null if not applicable - * @param cacheId unique identifier of the cache - * @return true on success and otherwise throws - * @throws SQLException - */ - public boolean removeServerCache(byte[] tenantId, byte[] cacheId) throws SQLException; -} \ No newline at end of file + public static interface ServerCacheFactory extends Writable { + public Closeable newCache(ImmutableBytesWritable cachePtr, byte[] txState, MemoryChunk chunk, + boolean useProtoForIndexMaintainer, int clientVersion) throws SQLException; + } + + /** + * Add the cache to the region server cache. + * @param tenantId the tenantId or null if not applicable + * @param cacheId unique identifier of the cache + * @param cachePtr pointer to the byte array of the cache + * @param txState TODO + * @param cacheFactory factory that converts from byte array to object representation on the + * server side + * @return true on success and otherwise throws + */ + public boolean addServerCache(byte[] tenantId, byte[] cacheId, ImmutableBytesWritable cachePtr, + byte[] txState, ServerCacheFactory cacheFactory) throws SQLException; + + /** + * Remove the cache from the region server cache. Called upon completion of the operation when + * cache is no longer needed. + * @param tenantId the tenantId or null if not applicable + * @param cacheId unique identifier of the cache + * @return true on success and otherwise throws + */ + public boolean removeServerCache(byte[] tenantId, byte[] cacheId) throws SQLException; +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableInfo.java index f92bcb45cab..8751f8cdcc8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableInfo.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,56 +24,56 @@ public class TableInfo { - private final byte[] tenantId; - private final byte[] schema; - private final byte[] name; + private final byte[] tenantId; + private final byte[] schema; + private final byte[] name; - public TableInfo(byte[] tenantId, byte[] schema, byte[] name) { - this.tenantId = tenantId; - this.schema = schema; - this.name = name; - } - - public byte[] getRowKeyPrefix() { - return SchemaUtil.getTableKey(tenantId, schema, name); - } + public TableInfo(byte[] tenantId, byte[] schema, byte[] name) { + this.tenantId = tenantId; + this.schema = schema; + this.name = name; + } - @Override - public String toString() { - return Bytes.toStringBinary(getRowKeyPrefix()); - } - - public byte[] getTenantId() { - return tenantId; - } + public byte[] getRowKeyPrefix() { + return SchemaUtil.getTableKey(tenantId, schema, name); + } - public byte[] getSchemaName() { - return schema; - } + @Override + public String toString() { + return Bytes.toStringBinary(getRowKeyPrefix()); + } - public byte[] getTableName() { - return name; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + Arrays.hashCode(name); - result = prime * result + Arrays.hashCode(schema); - result = prime * result + Arrays.hashCode(tenantId); - return result; - } + public byte[] getTenantId() { + return tenantId; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - TableInfo other = (TableInfo) obj; - if (!Arrays.equals(name, other.name)) return false; - if (!Arrays.equals(schema, other.schema)) return false; - if (!Arrays.equals(tenantId, other.tenantId)) return false; - return true; - } + public byte[] getSchemaName() { + return schema; + } + + public byte[] getTableName() { + return name; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(name); + result = prime * result + Arrays.hashCode(schema); + result = prime * result + Arrays.hashCode(tenantId); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + TableInfo other = (TableInfo) obj; + if (!Arrays.equals(name, other.name)) return false; + if (!Arrays.equals(schema, other.schema)) return false; + if (!Arrays.equals(tenantId, other.tenantId)) return false; + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableTTLInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableTTLInfo.java index fa9e50b3fbf..0fe3bc68a9f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableTTLInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableTTLInfo.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,101 +15,97 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessorclient; -import org.apache.hadoop.hbase.util.Bytes; - import java.nio.charset.StandardCharsets; import java.util.Arrays; +import org.apache.hadoop.hbase.util.Bytes; + /** * Simple POJO class to hold TTL info */ public class TableTTLInfo implements Comparable { - private final byte[] physicalTableName; - private final byte[] tenantId; - private final byte[] entityName; - private final byte[] matchPattern; - private final int ttl; + private final byte[] physicalTableName; + private final byte[] tenantId; + private final byte[] entityName; + private final byte[] matchPattern; + private final int ttl; - public TableTTLInfo(String physicalTableName, String tenantId, String entityName, String matchPattern, int ttl) { - super(); - this.physicalTableName = physicalTableName.getBytes(StandardCharsets.UTF_8); - this.tenantId = tenantId.getBytes(StandardCharsets.UTF_8); - this.entityName = entityName.getBytes(StandardCharsets.UTF_8); - this.matchPattern = matchPattern.getBytes(StandardCharsets.UTF_8); - this.ttl = ttl; - } + public TableTTLInfo(String physicalTableName, String tenantId, String entityName, + String matchPattern, int ttl) { + super(); + this.physicalTableName = physicalTableName.getBytes(StandardCharsets.UTF_8); + this.tenantId = tenantId.getBytes(StandardCharsets.UTF_8); + this.entityName = entityName.getBytes(StandardCharsets.UTF_8); + this.matchPattern = matchPattern.getBytes(StandardCharsets.UTF_8); + this.ttl = ttl; + } - public TableTTLInfo(byte[] physicalTableName, byte[] tenantId, byte[] entityName, byte[] matchPattern, int ttl) { - super(); - this.physicalTableName = physicalTableName; - this.tenantId = tenantId; - this.matchPattern = matchPattern; - this.entityName = entityName; - this.ttl = ttl; - } + public TableTTLInfo(byte[] physicalTableName, byte[] tenantId, byte[] entityName, + byte[] matchPattern, int ttl) { + super(); + this.physicalTableName = physicalTableName; + this.tenantId = tenantId; + this.matchPattern = matchPattern; + this.entityName = entityName; + this.ttl = ttl; + } - public int getTTL() { - return ttl; - } - public byte[] getTenantId() { - return tenantId; - } + public int getTTL() { + return ttl; + } - public byte[] getEntityName() { - return entityName; - } + public byte[] getTenantId() { + return tenantId; + } - public byte[] getMatchPattern() { - return matchPattern; - } - public byte[] getPhysicalTableName() { - return physicalTableName; - } + public byte[] getEntityName() { + return entityName; + } + public byte[] getMatchPattern() { + return matchPattern; + } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - TableTTLInfo that = (TableTTLInfo) o; - return Arrays.equals(physicalTableName, that.physicalTableName) && - Arrays.equals(tenantId, that.tenantId) && - Arrays.equals(entityName, that.entityName); - } + public byte[] getPhysicalTableName() { + return physicalTableName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TableTTLInfo that = (TableTTLInfo) o; + return Arrays.equals(physicalTableName, that.physicalTableName) + && Arrays.equals(tenantId, that.tenantId) && Arrays.equals(entityName, that.entityName); + } - @Override - public int hashCode() { - return Arrays.hashCode(tenantId) + Arrays.hashCode(entityName); + @Override + public int hashCode() { + return Arrays.hashCode(tenantId) + Arrays.hashCode(entityName); + } + + @Override + public int compareTo(Object obj) { + if (this == obj) return 0; + if (obj == null) throw new NullPointerException(); + TableTTLInfo other = (TableTTLInfo) obj; + int result = Bytes.BYTES_COMPARATOR.compare(this.physicalTableName, other.physicalTableName); + if (result == 0) { + result = Bytes.BYTES_COMPARATOR.compare(this.entityName, other.entityName); } - @Override - public int compareTo(Object obj) { - if (this == obj) - return 0; - if (obj == null) - throw new NullPointerException(); - TableTTLInfo other = (TableTTLInfo) obj; - int result = Bytes.BYTES_COMPARATOR.compare(this.physicalTableName,other.physicalTableName); - if (result == 0) { - result = Bytes.BYTES_COMPARATOR.compare(this.entityName,other.entityName); - } - if (result == 0) { - result = Bytes.BYTES_COMPARATOR.compare(this.tenantId, other.tenantId); - } - return result; + if (result == 0) { + result = Bytes.BYTES_COMPARATOR.compare(this.tenantId, other.tenantId); } + return result; + } - @Override - public String toString() { - return "TableTTLInfo { " + - "physicalTableName=" + Bytes.toString(physicalTableName) + - ", tenantId=" + Bytes.toString(tenantId) + - ", entityName=" + Bytes.toString(entityName) + - ", matchPattern=" + Bytes.toStringBinary(matchPattern) + - ", ttl=" + ttl + - " }"; - } + @Override + public String toString() { + return "TableTTLInfo { " + "physicalTableName=" + Bytes.toString(physicalTableName) + + ", tenantId=" + Bytes.toString(tenantId) + ", entityName=" + Bytes.toString(entityName) + + ", matchPattern=" + Bytes.toStringBinary(matchPattern) + ", ttl=" + ttl + " }"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableTTLInfoCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableTTLInfoCache.java index 57bed2e5ea1..edd767f54ab 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableTTLInfoCache.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/TableTTLInfoCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,94 +15,90 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessorclient; import java.util.*; import java.util.concurrent.locks.StampedLock; /** - * Holds a cache of TableTTLInfo objects. - * Maps TableTTLInfo to a generated tableId. - * The generated tableId is a positive number. (>= 0) - * This class is thread-safe. + * Holds a cache of TableTTLInfo objects. Maps TableTTLInfo to a generated tableId. The generated + * tableId is a positive number. (>= 0) This class is thread-safe. */ public class TableTTLInfoCache { - // Forward mapping from ttlInfo -> tableId (integer) - private final Map tableToTableIdMap = new HashMap(); + // Forward mapping from ttlInfo -> tableId (integer) + private final Map tableToTableIdMap = new HashMap(); + + // Reverse mapping from tableId (integer position) -> ttlInfo + private final List cachedInfo = new ArrayList(); + private final StampedLock lock = new StampedLock(); + private int nextId; - // Reverse mapping from tableId (integer position) -> ttlInfo - private final List cachedInfo = new ArrayList(); - private final StampedLock lock = new StampedLock(); - private int nextId; + public int addTable(TableTTLInfo tableRow) { + return putTableIfAbsent(tableRow); + } - public int addTable(TableTTLInfo tableRow) { - return putTableIfAbsent(tableRow); + public int getNumTablesInCache() { + if (cachedInfo.size() != tableToTableIdMap.keySet().size()) { + throw new IllegalStateException(); } + return cachedInfo.size(); + } - public int getNumTablesInCache() { - if (cachedInfo.size() != tableToTableIdMap.keySet().size()) { - throw new IllegalStateException(); - } - return cachedInfo.size(); + public Set getAllTables() { + if (cachedInfo.size() != tableToTableIdMap.keySet().size()) { + throw new IllegalStateException(); } - public Set getAllTables() { - if (cachedInfo.size() != tableToTableIdMap.keySet().size()) { - throw new IllegalStateException(); - } + Set tables = new HashSet(); + tables.addAll(cachedInfo); + return tables; + } - Set tables = new HashSet(); - tables.addAll(cachedInfo); - return tables; + public TableTTLInfo getTableById(Integer id) { + if (id == null) { + return null; } + return cachedInfo.get(id); + } - public TableTTLInfo getTableById(Integer id) { - if (id == null) { - return null; - } - return cachedInfo.get(id); + private Integer tryOptimisticGet(TableTTLInfo newRow) { + long stamp = lock.tryOptimisticRead(); + Integer tableId = tableToTableIdMap.get(newRow); + if (!lock.validate(stamp)) { + stamp = lock.readLock(); + try { + tableId = tableToTableIdMap.get(newRow); + } finally { + lock.unlockRead(stamp); + } } + return tableId; + } - private Integer tryOptimisticGet(TableTTLInfo newRow) { - long stamp = lock.tryOptimisticRead(); - Integer tableId = tableToTableIdMap.get(newRow); - if (!lock.validate(stamp)) { - stamp = lock.readLock(); - try { - tableId = tableToTableIdMap.get(newRow); - } finally { - lock.unlockRead(stamp); - } - } - return tableId; + private int putTableIfAbsent(TableTTLInfo newRow) { + if (newRow == null) { + throw new IllegalArgumentException(); } - private int putTableIfAbsent(TableTTLInfo newRow) { - if (newRow == null) { - throw new IllegalArgumentException(); - } - - // if key does not exists in the forward mapping create one - Integer tableId = tryOptimisticGet(newRow); + // if key does not exists in the forward mapping create one + Integer tableId = tryOptimisticGet(newRow); + if (tableId == null) { + long writeStamp = lock.writeLock(); + try { + tableId = tableToTableIdMap.get(newRow); if (tableId == null) { - long writeStamp = lock.writeLock(); - try { - tableId = tableToTableIdMap.get(newRow); - if (tableId == null) { - tableId = nextId++; - cachedInfo.add(newRow); - if (nextId != cachedInfo.size()) { - throw new IllegalStateException(); - } - tableToTableIdMap.put(newRow, tableId); - } - } - finally { - lock.unlock(writeStamp); - } + tableId = nextId++; + cachedInfo.add(newRow); + if (nextId != cachedInfo.size()) { + throw new IllegalStateException(); + } + tableToTableIdMap.put(newRow, tableId); } - return tableId; + } finally { + lock.unlock(writeStamp); + } } + return tableId; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/UngroupedAggregateRegionObserverHelper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/UngroupedAggregateRegionObserverHelper.java index afb5034ade7..fd64b8cc4cf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/UngroupedAggregateRegionObserverHelper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/UngroupedAggregateRegionObserverHelper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,30 +30,30 @@ import org.apache.phoenix.schema.PTableImpl; public class UngroupedAggregateRegionObserverHelper { - public static byte[] serialize(PTable projectedTable) { - PTableProtos.PTable ptableProto = PTableImpl.toProto(projectedTable); - return ptableProto.toByteArray(); - } + public static byte[] serialize(PTable projectedTable) { + PTableProtos.PTable ptableProto = PTableImpl.toProto(projectedTable); + return ptableProto.toByteArray(); + } - public static byte[] serialize(List selectExpressions) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - try { - DataOutputStream output = new DataOutputStream(stream); - WritableUtils.writeVInt(output, selectExpressions.size()); - for (int i = 0; i < selectExpressions.size(); i++) { - Expression expression = selectExpressions.get(i); - WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); - expression.write(output); - } - return stream.toByteArray(); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + public static byte[] serialize(List selectExpressions) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + try { + DataOutputStream output = new DataOutputStream(stream); + WritableUtils.writeVInt(output, selectExpressions.size()); + for (int i = 0; i < selectExpressions.size(); i++) { + Expression expression = selectExpressions.get(i); + WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); + expression.write(output); + } + return stream.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/WhereConstantParser.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/WhereConstantParser.java index ab710baf9f5..d787212e502 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/WhereConstantParser.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/WhereConstantParser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -42,69 +44,68 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableImpl; import org.apache.phoenix.schema.TableRef; -import org.apache.phoenix.util.MetaDataUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - +import org.apache.phoenix.util.MetaDataUtil; public class WhereConstantParser { - public static PTable addViewInfoToPColumnsIfNeeded(PTable view) throws SQLException { - byte[][] viewColumnConstantsToBe = new byte[view.getColumns().size()][]; - if (view.getViewStatement() == null) { - return view; - } - SelectStatement select = new SQLParser(view.getViewStatement()).parseQuery(); - ParseNode whereNode = select.getWhere(); - ColumnResolver resolver = FromCompiler.getResolver(new TableRef(view)); + public static PTable addViewInfoToPColumnsIfNeeded(PTable view) throws SQLException { + byte[][] viewColumnConstantsToBe = new byte[view.getColumns().size()][]; + if (view.getViewStatement() == null) { + return view; + } + SelectStatement select = new SQLParser(view.getViewStatement()).parseQuery(); + ParseNode whereNode = select.getWhere(); + ColumnResolver resolver = FromCompiler.getResolver(new TableRef(view)); - try (PhoenixConnection conn = getConnectionlessConnection()) { - StatementContext context = new StatementContext(new PhoenixStatement(conn), resolver); + try (PhoenixConnection conn = getConnectionlessConnection()) { + StatementContext context = new StatementContext(new PhoenixStatement(conn), resolver); - Expression expression; - try { - expression = WhereCompiler.compile(context, whereNode); - } catch (ColumnNotFoundException e) { - // if we could not find a column used in the view statement - // (which means its was dropped) this view is not valid any more - return null; - } - CreateTableCompiler.ViewWhereExpressionVisitor visitor = new CreateTableCompiler - .ViewWhereExpressionVisitor(view, viewColumnConstantsToBe); - expression.accept(visitor); + Expression expression; + try { + expression = WhereCompiler.compile(context, whereNode); + } catch (ColumnNotFoundException e) { + // if we could not find a column used in the view statement + // (which means its was dropped) this view is not valid any more + return null; + } + CreateTableCompiler.ViewWhereExpressionVisitor visitor = + new CreateTableCompiler.ViewWhereExpressionVisitor(view, viewColumnConstantsToBe); + expression.accept(visitor); - BitSet isViewColumnReferencedToBe = new BitSet(view.getColumns().size()); - // Used to track column references in a view - ExpressionCompiler expressionCompiler = new CreateTableCompiler - .ColumnTrackingExpressionCompiler(context, isViewColumnReferencedToBe); - whereNode.accept(expressionCompiler); + BitSet isViewColumnReferencedToBe = new BitSet(view.getColumns().size()); + // Used to track column references in a view + ExpressionCompiler expressionCompiler = + new CreateTableCompiler.ColumnTrackingExpressionCompiler(context, + isViewColumnReferencedToBe); + whereNode.accept(expressionCompiler); - List result = Lists.newArrayList(); - for (PColumn column : PTableImpl.getColumnsToClone(view)) { - boolean isViewReferenced = isViewColumnReferencedToBe.get(column.getPosition()); - if ((visitor.isUpdatable() || view.getPKColumns() - .get(MetaDataUtil.getAutoPartitionColIndex(view)).equals(column)) - && viewColumnConstantsToBe[column.getPosition()] != null) { - result.add(new PColumnImpl(column, - viewColumnConstantsToBe[column.getPosition()], isViewReferenced)); - } - // If view is not updatable, viewColumnConstants should be empty. We will still - // inherit our parent viewConstants, but we have no additional ones. - else if (isViewReferenced ){ - result.add(new PColumnImpl(column, column.getViewConstant(), isViewReferenced)); - } else { - result.add(column); - } - } - return PTableImpl.builderWithColumns(view, result) - .build(); + List result = Lists.newArrayList(); + for (PColumn column : PTableImpl.getColumnsToClone(view)) { + boolean isViewReferenced = isViewColumnReferencedToBe.get(column.getPosition()); + if ( + (visitor.isUpdatable() + || view.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(view)).equals(column)) + && viewColumnConstantsToBe[column.getPosition()] != null + ) { + result.add(new PColumnImpl(column, viewColumnConstantsToBe[column.getPosition()], + isViewReferenced)); } + // If view is not updatable, viewColumnConstants should be empty. We will still + // inherit our parent viewConstants, but we have no additional ones. + else if (isViewReferenced) { + result.add(new PColumnImpl(column, column.getViewConstant(), isViewReferenced)); + } else { + result.add(column); + } + } + return PTableImpl.builderWithColumns(view, result).build(); } + } - private static PhoenixConnection getConnectionlessConnection() throws SQLException { - return DriverManager - .getConnection(JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS) - .unwrap(PhoenixConnection.class); - } + private static PhoenixConnection getConnectionlessConnection() throws SQLException { + return DriverManager.getConnection(JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS) + .unwrap(PhoenixConnection.class); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsMetadataCachingSource.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsMetadataCachingSource.java index 2d810d5fa48..ede603bebf2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsMetadataCachingSource.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsMetadataCachingSource.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,207 +24,204 @@ * Interface for metrics about Distributed Metadata Caching */ public interface MetricsMetadataCachingSource extends BaseSource { - // Metrics2 and JMX constants - String METRICS_NAME = "MetadataCaching"; - String METRICS_CONTEXT = "phoenix"; - String METRICS_DESCRIPTION = "Metrics about Distributed Metadata Caching"; - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - - String REGIONSERVER_METADATA_CACHE_HITS = "numRegionServerMetadataCacheHits"; - String REGIONSERVER_METADATA_CACHE_HITS_DESC - = "Number of cache hits in PhoenixRegionServerEndpoint " - + "when serving validate ddl timestamp requests."; - - String REGIONSERVER_METADATA_CACHE_MISSES = "numRegionServerMetadataCacheMisses"; - String REGIONSERVER_METADATA_CACHE_MISSES_DESC - = "Number of cache misses in PhoenixRegionServerEndpoint " - + "when serving validate ddl timestamp requests."; - - String VALIDATE_LAST_DDL_TIMESTAMP_REQUESTS = "numValidateLastDDLTimestampRequests"; - String VALIDATE_LAST_DDL_TIMESTAMP_REQUEST_DESC - = "Number of validate last ddl timestamp requests."; - - String METADATA_CACHE_INVALIDATION_OPERATIONS = "numMetadataCacheInvalidationOps"; - String METADATA_CACHE_INVALIDATION_OPERATIONS_DESC = "Number of times we invoke " - + "cache invalidation within a DDL operation"; - - String METADATA_CACHE_INVALIDATION_SUCCESS = "numMetadataCacheInvalidationOpsSuccess"; - String METADATA_CACHE_INVALIDATION_SUCCESS_DESC - = "Number of times cache invalidation was successful."; - - String METADATA_CACHE_INVALIDATION_FAILURE = "numMetadataCacheInvalidationOpsFailure"; - String METADATA_CACHE_INVALIDATION_FAILURE_DESC = "Number of times cache invalidation failed."; - - String METADATA_CACHE_INVALIDATION_RPC_TIME = "metadataCacheInvalidationRpcTimeMs"; - String METADATA_CACHE_INVALIDATION_RPC_TIME_DESC = "Histogram for the time in milliseconds for" - + " cache invalidation RPC"; - String METADATA_CACHE_INVALIDATION_TOTAL_TIME = "metadataCacheInvalidationTotalTimeMs"; - String METADATA_CACHE_INVALIDATION_TOTAL_TIME_DESC - = "Histogram for the total time in milliseconds " - + "for cache invalidation on all regionservers"; - - /** - * Report the number of cache hits when validating last ddl timestamps. - */ - void incrementRegionServerMetadataCacheHitCount(); + // Metrics2 and JMX constants + String METRICS_NAME = "MetadataCaching"; + String METRICS_CONTEXT = "phoenix"; + String METRICS_DESCRIPTION = "Metrics about Distributed Metadata Caching"; + String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + + String REGIONSERVER_METADATA_CACHE_HITS = "numRegionServerMetadataCacheHits"; + String REGIONSERVER_METADATA_CACHE_HITS_DESC = + "Number of cache hits in PhoenixRegionServerEndpoint " + + "when serving validate ddl timestamp requests."; + + String REGIONSERVER_METADATA_CACHE_MISSES = "numRegionServerMetadataCacheMisses"; + String REGIONSERVER_METADATA_CACHE_MISSES_DESC = + "Number of cache misses in PhoenixRegionServerEndpoint " + + "when serving validate ddl timestamp requests."; + + String VALIDATE_LAST_DDL_TIMESTAMP_REQUESTS = "numValidateLastDDLTimestampRequests"; + String VALIDATE_LAST_DDL_TIMESTAMP_REQUEST_DESC = + "Number of validate last ddl timestamp requests."; + + String METADATA_CACHE_INVALIDATION_OPERATIONS = "numMetadataCacheInvalidationOps"; + String METADATA_CACHE_INVALIDATION_OPERATIONS_DESC = + "Number of times we invoke " + "cache invalidation within a DDL operation"; + + String METADATA_CACHE_INVALIDATION_SUCCESS = "numMetadataCacheInvalidationOpsSuccess"; + String METADATA_CACHE_INVALIDATION_SUCCESS_DESC = + "Number of times cache invalidation was successful."; + + String METADATA_CACHE_INVALIDATION_FAILURE = "numMetadataCacheInvalidationOpsFailure"; + String METADATA_CACHE_INVALIDATION_FAILURE_DESC = "Number of times cache invalidation failed."; + + String METADATA_CACHE_INVALIDATION_RPC_TIME = "metadataCacheInvalidationRpcTimeMs"; + String METADATA_CACHE_INVALIDATION_RPC_TIME_DESC = + "Histogram for the time in milliseconds for" + " cache invalidation RPC"; + String METADATA_CACHE_INVALIDATION_TOTAL_TIME = "metadataCacheInvalidationTotalTimeMs"; + String METADATA_CACHE_INVALIDATION_TOTAL_TIME_DESC = + "Histogram for the total time in milliseconds " + "for cache invalidation on all regionservers"; + + /** + * Report the number of cache hits when validating last ddl timestamps. + */ + void incrementRegionServerMetadataCacheHitCount(); + + /** + * Report the number of cache misses when validating last ddl timestamps. + */ + void incrementRegionServerMetadataCacheMissCount(); + + /** + * Report the number of requests for validating last ddl timestamps. + */ + void incrementValidateTimestampRequestCount(); + + /** + * Report number of cache invalidations performed. + */ + void incrementMetadataCacheInvalidationOperationsCount(); + + /** + * Report number of cache invalidations which were successful. + */ + void incrementMetadataCacheInvalidationSuccessCount(); + + /** + * Report number of cache invalidations which failed. + */ + void incrementMetadataCacheInvalidationFailureCount(); + + /** + * Add to the cache invalidation rpc time histogram. + */ + void addMetadataCacheInvalidationRpcTime(long t); + + /** + * Add to the cache invalidation total time histogram. + */ + void addMetadataCacheInvalidationTotalTime(long t); + + /** + * Return current values of all metrics. + * @return {@link MetadataCachingMetricValues} object + */ + @VisibleForTesting + MetadataCachingMetricValues getCurrentMetricValues(); + + /** + * Class to represent values of all metrics related to server metadata caching. + */ + @VisibleForTesting + class MetadataCachingMetricValues { + private long cacheHitCount; + private long cacheMissCount; + private long validateDDLTimestampRequestsCount; + private long cacheInvalidationOpsCount; + private long cacheInvalidationSuccessCount; + private long cacheInvalidationFailureCount; + private long cacheInvalidationRpcTimeCount; + private long cacheInvalidationTotalTimeCount; + + MetadataCachingMetricValues(Builder builder) { + this.cacheHitCount = builder.cacheHitCount; + this.cacheMissCount = builder.cacheMissCount; + this.validateDDLTimestampRequestsCount = builder.validateDDLTimestampRequestsCount; + this.cacheInvalidationOpsCount = builder.cacheInvalidationOpsCount; + this.cacheInvalidationSuccessCount = builder.cacheInvalidationSuccessCount; + this.cacheInvalidationFailureCount = builder.cacheInvalidationFailureCount; + this.cacheInvalidationRpcTimeCount = builder.cacheInvalidationRpcTimeCount; + this.cacheInvalidationTotalTimeCount = builder.cacheInvalidationTotalTimeCount; + } - /** - * Report the number of cache misses when validating last ddl timestamps. - */ - void incrementRegionServerMetadataCacheMissCount(); + public long getCacheHitCount() { + return cacheHitCount; + } - /** - * Report the number of requests for validating last ddl timestamps. - */ - void incrementValidateTimestampRequestCount(); + public long getCacheMissCount() { + return cacheMissCount; + } - /** - * Report number of cache invalidations performed. - */ - void incrementMetadataCacheInvalidationOperationsCount(); + public long getValidateDDLTimestampRequestsCount() { + return validateDDLTimestampRequestsCount; + } - /** - * Report number of cache invalidations which were successful. - */ - void incrementMetadataCacheInvalidationSuccessCount(); + public long getCacheInvalidationOpsCount() { + return cacheInvalidationOpsCount; + } - /** - * Report number of cache invalidations which failed. - */ - void incrementMetadataCacheInvalidationFailureCount(); + public long getCacheInvalidationSuccessCount() { + return cacheInvalidationSuccessCount; + } - /** - * Add to the cache invalidation rpc time histogram. - */ - void addMetadataCacheInvalidationRpcTime(long t); + public long getCacheInvalidationFailureCount() { + return cacheInvalidationFailureCount; + } - /** - * Add to the cache invalidation total time histogram. - * @param t - */ - void addMetadataCacheInvalidationTotalTime(long t); + public long getCacheInvalidationRpcTimeCount() { + return cacheInvalidationRpcTimeCount; + } - /** - * Return current values of all metrics. - * @return {@link MetadataCachingMetricValues} object - */ - @VisibleForTesting - MetadataCachingMetricValues getCurrentMetricValues(); + public long getCacheInvalidationTotalTimeCount() { + return cacheInvalidationTotalTimeCount; + } /** - * Class to represent values of all metrics related to server metadata caching. + * Builder for {@link MetadataCachingMetricValues} */ - @VisibleForTesting - class MetadataCachingMetricValues { - private long cacheHitCount; - private long cacheMissCount; - private long validateDDLTimestampRequestsCount; - private long cacheInvalidationOpsCount; - private long cacheInvalidationSuccessCount; - private long cacheInvalidationFailureCount; - private long cacheInvalidationRpcTimeCount; - private long cacheInvalidationTotalTimeCount; - - MetadataCachingMetricValues(Builder builder) { - this.cacheHitCount = builder.cacheHitCount; - this.cacheMissCount = builder.cacheMissCount; - this.validateDDLTimestampRequestsCount = builder.validateDDLTimestampRequestsCount; - this.cacheInvalidationOpsCount = builder.cacheInvalidationOpsCount; - this.cacheInvalidationSuccessCount = builder.cacheInvalidationSuccessCount; - this.cacheInvalidationFailureCount = builder.cacheInvalidationFailureCount; - this.cacheInvalidationRpcTimeCount = builder.cacheInvalidationRpcTimeCount; - this.cacheInvalidationTotalTimeCount = builder.cacheInvalidationTotalTimeCount; - } - - public long getCacheHitCount() { - return cacheHitCount; - } - - public long getCacheMissCount() { - return cacheMissCount; - } - - public long getValidateDDLTimestampRequestsCount() { - return validateDDLTimestampRequestsCount; - } - - public long getCacheInvalidationOpsCount() { - return cacheInvalidationOpsCount; - } - - public long getCacheInvalidationSuccessCount() { - return cacheInvalidationSuccessCount; - } - - public long getCacheInvalidationFailureCount() { - return cacheInvalidationFailureCount; - } - - public long getCacheInvalidationRpcTimeCount() { - return cacheInvalidationRpcTimeCount; - } - - public long getCacheInvalidationTotalTimeCount() { - return cacheInvalidationTotalTimeCount; - } - - /** - * Builder for {@link MetadataCachingMetricValues} - */ - public static class Builder { - private long cacheHitCount; - private long cacheMissCount; - private long validateDDLTimestampRequestsCount; - private long cacheInvalidationOpsCount; - private long cacheInvalidationSuccessCount; - private long cacheInvalidationFailureCount; - private long cacheInvalidationRpcTimeCount; - private long cacheInvalidationTotalTimeCount; - - public MetadataCachingMetricValues build() { - return new MetadataCachingMetricValues(this); - } - - public Builder setCacheHitCount(long c) { - this.cacheHitCount = c; - return this; - } - public Builder setCacheMissCount(long cacheMissCount) { - this.cacheMissCount = cacheMissCount; - return this; - } - - public Builder setValidateDDLTimestampRequestsCount( - long validateDDLTimestampRequestsCount) { - this.validateDDLTimestampRequestsCount = validateDDLTimestampRequestsCount; - return this; - } - - public Builder setCacheInvalidationOpsCount(long cacheInvalidationOpsCount) { - this.cacheInvalidationOpsCount = cacheInvalidationOpsCount; - return this; - } - - public Builder setCacheInvalidationSuccessCount(long cacheInvalidationSuccessCount) { - this.cacheInvalidationSuccessCount = cacheInvalidationSuccessCount; - return this; - } - - public Builder setCacheInvalidationFailureCount(long cacheInvalidationFailureCount) { - this.cacheInvalidationFailureCount = cacheInvalidationFailureCount; - return this; - } - - public Builder setCacheInvalidationRpcTimeCount(long cacheInvalidationRpcTimeCount) { - this.cacheInvalidationRpcTimeCount = cacheInvalidationRpcTimeCount; - return this; - } - - public Builder setCacheInvalidationTotalTimeCount( - long cacheInvalidationTotalTimeCount) { - this.cacheInvalidationTotalTimeCount = cacheInvalidationTotalTimeCount; - return this; - } - } + public static class Builder { + private long cacheHitCount; + private long cacheMissCount; + private long validateDDLTimestampRequestsCount; + private long cacheInvalidationOpsCount; + private long cacheInvalidationSuccessCount; + private long cacheInvalidationFailureCount; + private long cacheInvalidationRpcTimeCount; + private long cacheInvalidationTotalTimeCount; + + public MetadataCachingMetricValues build() { + return new MetadataCachingMetricValues(this); + } + + public Builder setCacheHitCount(long c) { + this.cacheHitCount = c; + return this; + } + + public Builder setCacheMissCount(long cacheMissCount) { + this.cacheMissCount = cacheMissCount; + return this; + } + + public Builder setValidateDDLTimestampRequestsCount(long validateDDLTimestampRequestsCount) { + this.validateDDLTimestampRequestsCount = validateDDLTimestampRequestsCount; + return this; + } + + public Builder setCacheInvalidationOpsCount(long cacheInvalidationOpsCount) { + this.cacheInvalidationOpsCount = cacheInvalidationOpsCount; + return this; + } + + public Builder setCacheInvalidationSuccessCount(long cacheInvalidationSuccessCount) { + this.cacheInvalidationSuccessCount = cacheInvalidationSuccessCount; + return this; + } + + public Builder setCacheInvalidationFailureCount(long cacheInvalidationFailureCount) { + this.cacheInvalidationFailureCount = cacheInvalidationFailureCount; + return this; + } + + public Builder setCacheInvalidationRpcTimeCount(long cacheInvalidationRpcTimeCount) { + this.cacheInvalidationRpcTimeCount = cacheInvalidationRpcTimeCount; + return this; + } + + public Builder setCacheInvalidationTotalTimeCount(long cacheInvalidationTotalTimeCount) { + this.cacheInvalidationTotalTimeCount = cacheInvalidationTotalTimeCount; + return this; + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsMetadataCachingSourceImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsMetadataCachingSourceImpl.java index 1fc249bcc2c..dadfaac162d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsMetadataCachingSourceImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsMetadataCachingSourceImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,99 +24,92 @@ /** * Implementation for tracking Distributed Metadata Caching metrics. */ -public class MetricsMetadataCachingSourceImpl - extends BaseSourceImpl - implements MetricsMetadataCachingSource { +public class MetricsMetadataCachingSourceImpl extends BaseSourceImpl + implements MetricsMetadataCachingSource { - private final MutableFastCounter cacheHitCounter; - private final MutableFastCounter cacheMissCounter; - private final MutableFastCounter validateDDLTimestampRequestCounter; - private final MutableFastCounter cacheInvalidationOpsCounter; - private final MutableFastCounter cacheInvalidationSuccessCounter; - private final MutableFastCounter cacheInvalidationFailureCounter; - private final MetricHistogram cacheInvalidationRpcTimeHistogram; - private final MetricHistogram cacheInvalidationTotalTimeHistogram; + private final MutableFastCounter cacheHitCounter; + private final MutableFastCounter cacheMissCounter; + private final MutableFastCounter validateDDLTimestampRequestCounter; + private final MutableFastCounter cacheInvalidationOpsCounter; + private final MutableFastCounter cacheInvalidationSuccessCounter; + private final MutableFastCounter cacheInvalidationFailureCounter; + private final MetricHistogram cacheInvalidationRpcTimeHistogram; + private final MetricHistogram cacheInvalidationTotalTimeHistogram; - public MetricsMetadataCachingSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } + public MetricsMetadataCachingSourceImpl() { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); + } - public MetricsMetadataCachingSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - cacheHitCounter = getMetricsRegistry().newCounter( - REGIONSERVER_METADATA_CACHE_HITS, REGIONSERVER_METADATA_CACHE_HITS_DESC, 0L); - cacheMissCounter = getMetricsRegistry().newCounter( - REGIONSERVER_METADATA_CACHE_MISSES, REGIONSERVER_METADATA_CACHE_MISSES_DESC, 0L); - validateDDLTimestampRequestCounter = getMetricsRegistry().newCounter( - VALIDATE_LAST_DDL_TIMESTAMP_REQUESTS, VALIDATE_LAST_DDL_TIMESTAMP_REQUEST_DESC, 0L); - cacheInvalidationOpsCounter = getMetricsRegistry().newCounter( - METADATA_CACHE_INVALIDATION_OPERATIONS, - METADATA_CACHE_INVALIDATION_OPERATIONS_DESC, 0L); - cacheInvalidationSuccessCounter = getMetricsRegistry().newCounter( - METADATA_CACHE_INVALIDATION_SUCCESS, METADATA_CACHE_INVALIDATION_SUCCESS_DESC, 0L); - cacheInvalidationFailureCounter = getMetricsRegistry().newCounter( - METADATA_CACHE_INVALIDATION_FAILURE, METADATA_CACHE_INVALIDATION_FAILURE_DESC, 0L); - cacheInvalidationRpcTimeHistogram = getMetricsRegistry().newHistogram( - METADATA_CACHE_INVALIDATION_RPC_TIME, METADATA_CACHE_INVALIDATION_RPC_TIME_DESC); - cacheInvalidationTotalTimeHistogram = getMetricsRegistry().newHistogram( - METADATA_CACHE_INVALIDATION_TOTAL_TIME, METADATA_CACHE_INVALIDATION_TOTAL_TIME_DESC); - } + public MetricsMetadataCachingSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + cacheHitCounter = getMetricsRegistry().newCounter(REGIONSERVER_METADATA_CACHE_HITS, + REGIONSERVER_METADATA_CACHE_HITS_DESC, 0L); + cacheMissCounter = getMetricsRegistry().newCounter(REGIONSERVER_METADATA_CACHE_MISSES, + REGIONSERVER_METADATA_CACHE_MISSES_DESC, 0L); + validateDDLTimestampRequestCounter = getMetricsRegistry().newCounter( + VALIDATE_LAST_DDL_TIMESTAMP_REQUESTS, VALIDATE_LAST_DDL_TIMESTAMP_REQUEST_DESC, 0L); + cacheInvalidationOpsCounter = getMetricsRegistry().newCounter( + METADATA_CACHE_INVALIDATION_OPERATIONS, METADATA_CACHE_INVALIDATION_OPERATIONS_DESC, 0L); + cacheInvalidationSuccessCounter = getMetricsRegistry().newCounter( + METADATA_CACHE_INVALIDATION_SUCCESS, METADATA_CACHE_INVALIDATION_SUCCESS_DESC, 0L); + cacheInvalidationFailureCounter = getMetricsRegistry().newCounter( + METADATA_CACHE_INVALIDATION_FAILURE, METADATA_CACHE_INVALIDATION_FAILURE_DESC, 0L); + cacheInvalidationRpcTimeHistogram = getMetricsRegistry().newHistogram( + METADATA_CACHE_INVALIDATION_RPC_TIME, METADATA_CACHE_INVALIDATION_RPC_TIME_DESC); + cacheInvalidationTotalTimeHistogram = getMetricsRegistry().newHistogram( + METADATA_CACHE_INVALIDATION_TOTAL_TIME, METADATA_CACHE_INVALIDATION_TOTAL_TIME_DESC); + } - @Override - public void incrementRegionServerMetadataCacheHitCount() { - cacheHitCounter.incr(); - } + @Override + public void incrementRegionServerMetadataCacheHitCount() { + cacheHitCounter.incr(); + } - @Override - public void incrementRegionServerMetadataCacheMissCount() { - cacheMissCounter.incr(); - } + @Override + public void incrementRegionServerMetadataCacheMissCount() { + cacheMissCounter.incr(); + } - @Override - public void incrementValidateTimestampRequestCount() { - validateDDLTimestampRequestCounter.incr(); - } + @Override + public void incrementValidateTimestampRequestCount() { + validateDDLTimestampRequestCounter.incr(); + } - @Override - public void addMetadataCacheInvalidationRpcTime(long t) { - cacheInvalidationRpcTimeHistogram.add(t); - } + @Override + public void addMetadataCacheInvalidationRpcTime(long t) { + cacheInvalidationRpcTimeHistogram.add(t); + } - @Override - public void addMetadataCacheInvalidationTotalTime(long t) { - cacheInvalidationTotalTimeHistogram.add(t); - } + @Override + public void addMetadataCacheInvalidationTotalTime(long t) { + cacheInvalidationTotalTimeHistogram.add(t); + } - @Override - public void incrementMetadataCacheInvalidationOperationsCount() { - cacheInvalidationOpsCounter.incr(); - } + @Override + public void incrementMetadataCacheInvalidationOperationsCount() { + cacheInvalidationOpsCounter.incr(); + } - @Override - public void incrementMetadataCacheInvalidationSuccessCount() { - cacheInvalidationSuccessCounter.incr(); - } + @Override + public void incrementMetadataCacheInvalidationSuccessCount() { + cacheInvalidationSuccessCounter.incr(); + } - @Override - public void incrementMetadataCacheInvalidationFailureCount() { - cacheInvalidationFailureCounter.incr(); - } + @Override + public void incrementMetadataCacheInvalidationFailureCount() { + cacheInvalidationFailureCounter.incr(); + } - @Override - public MetadataCachingMetricValues getCurrentMetricValues() { - return new MetadataCachingMetricValues - .Builder() - .setCacheHitCount(cacheHitCounter.value()) - .setCacheMissCount(cacheMissCounter.value()) - .setValidateDDLTimestampRequestsCount(validateDDLTimestampRequestCounter.value()) - .setCacheInvalidationRpcTimeCount(cacheInvalidationRpcTimeHistogram.getCount()) - .setCacheInvalidationTotalTimeCount(cacheInvalidationTotalTimeHistogram.getCount()) - .setCacheInvalidationOpsCount(cacheInvalidationOpsCounter.value()) - .setCacheInvalidationSuccessCount(cacheInvalidationSuccessCounter.value()) - .setCacheInvalidationFailureCount(cacheInvalidationFailureCounter.value()) - .build(); - } + @Override + public MetadataCachingMetricValues getCurrentMetricValues() { + return new MetadataCachingMetricValues.Builder().setCacheHitCount(cacheHitCounter.value()) + .setCacheMissCount(cacheMissCounter.value()) + .setValidateDDLTimestampRequestsCount(validateDDLTimestampRequestCounter.value()) + .setCacheInvalidationRpcTimeCount(cacheInvalidationRpcTimeHistogram.getCount()) + .setCacheInvalidationTotalTimeCount(cacheInvalidationTotalTimeHistogram.getCount()) + .setCacheInvalidationOpsCount(cacheInvalidationOpsCounter.value()) + .setCacheInvalidationSuccessCount(cacheInvalidationSuccessCounter.value()) + .setCacheInvalidationFailureCount(cacheInvalidationFailureCounter.value()).build(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixCoprocessorSourceFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixCoprocessorSourceFactory.java index 280a116f686..84921baf5ce 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixCoprocessorSourceFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixCoprocessorSourceFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,43 +15,43 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessorclient.metrics; + /** * Factory object to create various metric sources for phoenix related coprocessors. */ public class MetricsPhoenixCoprocessorSourceFactory { - private static final MetricsPhoenixCoprocessorSourceFactory - INSTANCE = new MetricsPhoenixCoprocessorSourceFactory(); - // Holds the PHOENIX_TTL related metrics. - private static volatile MetricsPhoenixTTLSource phoenixTTLSource; - private static volatile MetricsMetadataCachingSource metadataCachingSource; + private static final MetricsPhoenixCoprocessorSourceFactory INSTANCE = + new MetricsPhoenixCoprocessorSourceFactory(); + // Holds the PHOENIX_TTL related metrics. + private static volatile MetricsPhoenixTTLSource phoenixTTLSource; + private static volatile MetricsMetadataCachingSource metadataCachingSource; - public static MetricsPhoenixCoprocessorSourceFactory getInstance() { - return INSTANCE; - } + public static MetricsPhoenixCoprocessorSourceFactory getInstance() { + return INSTANCE; + } - // return the metric source for PHOENIX_TTL coproc. - public MetricsPhoenixTTLSource getPhoenixTTLSource() { + // return the metric source for PHOENIX_TTL coproc. + public MetricsPhoenixTTLSource getPhoenixTTLSource() { + if (INSTANCE.phoenixTTLSource == null) { + synchronized (MetricsPhoenixTTLSource.class) { if (INSTANCE.phoenixTTLSource == null) { - synchronized (MetricsPhoenixTTLSource.class) { - if (INSTANCE.phoenixTTLSource == null) { - INSTANCE.phoenixTTLSource = new MetricsPhoenixTTLSourceImpl(); - } - } + INSTANCE.phoenixTTLSource = new MetricsPhoenixTTLSourceImpl(); } - return INSTANCE.phoenixTTLSource; + } } + return INSTANCE.phoenixTTLSource; + } - public MetricsMetadataCachingSource getMetadataCachingSource() { + public MetricsMetadataCachingSource getMetadataCachingSource() { + if (INSTANCE.metadataCachingSource == null) { + synchronized (MetricsMetadataCachingSource.class) { if (INSTANCE.metadataCachingSource == null) { - synchronized (MetricsMetadataCachingSource.class) { - if (INSTANCE.metadataCachingSource == null) { - INSTANCE.metadataCachingSource = new MetricsMetadataCachingSourceImpl(); - } - } + INSTANCE.metadataCachingSource = new MetricsMetadataCachingSourceImpl(); } - return INSTANCE.metadataCachingSource; + } } + return INSTANCE.metadataCachingSource; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixTTLSource.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixTTLSource.java index 39d0e87bc1d..0e7774257f7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixTTLSource.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixTTLSource.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessorclient.metrics; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -25,35 +24,38 @@ */ public interface MetricsPhoenixTTLSource extends BaseSource { - // Metrics2 and JMX constants - String METRICS_NAME = "PhoenixTTLProcessor"; - String METRICS_CONTEXT = "phoenix"; - String METRICS_DESCRIPTION = "Metrics about the Phoenix TTL Coprocessor"; - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - - - String PHOENIX_TTL_MASK_EXPIRED_REQUESTS = "phoenixMaskTTLExpiredRequests"; - String PHOENIX_TTL_MASK_EXPIRED_REQUESTS_DESC = "The number of scan requests to mask PHOENIX TTL expired rows"; - - String PHOENIX_TTL_DELETE_EXPIRED_REQUESTS = "phoenixDeleteTTLExpiredRequests"; - String PHOENIX_TTL_DELETE_EXPIRED_REQUESTS_DESC = "The number of delete requests to delete PHOENIX TTL expired rows"; - - /** - * Report the number of requests to mask TTL expired rows. - */ - long getMaskExpiredRequestCount(); - /** - * Keeps track of the number of requests to mask TTL expired rows. - */ - void incrementMaskExpiredRequestCount(); - - /** - * Report the number of requests to mask TTL expired rows. - */ - long getDeleteExpiredRequestCount(); - /** - * Keeps track of the number of requests to delete TTL expired rows. - */ - void incrementDeleteExpiredRequestCount(); + // Metrics2 and JMX constants + String METRICS_NAME = "PhoenixTTLProcessor"; + String METRICS_CONTEXT = "phoenix"; + String METRICS_DESCRIPTION = "Metrics about the Phoenix TTL Coprocessor"; + String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + + String PHOENIX_TTL_MASK_EXPIRED_REQUESTS = "phoenixMaskTTLExpiredRequests"; + String PHOENIX_TTL_MASK_EXPIRED_REQUESTS_DESC = + "The number of scan requests to mask PHOENIX TTL expired rows"; + + String PHOENIX_TTL_DELETE_EXPIRED_REQUESTS = "phoenixDeleteTTLExpiredRequests"; + String PHOENIX_TTL_DELETE_EXPIRED_REQUESTS_DESC = + "The number of delete requests to delete PHOENIX TTL expired rows"; + + /** + * Report the number of requests to mask TTL expired rows. + */ + long getMaskExpiredRequestCount(); + + /** + * Keeps track of the number of requests to mask TTL expired rows. + */ + void incrementMaskExpiredRequestCount(); + + /** + * Report the number of requests to mask TTL expired rows. + */ + long getDeleteExpiredRequestCount(); + + /** + * Keeps track of the number of requests to delete TTL expired rows. + */ + void incrementDeleteExpiredRequestCount(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixTTLSourceImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixTTLSourceImpl.java index b0e08dca011..a26ee13f945 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixTTLSourceImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/metrics/MetricsPhoenixTTLSourceImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,44 +15,52 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessorclient.metrics; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.lib.MutableFastCounter; /** - * Implementation for tracking {@link org.apache.phoenix.coprocessor.PhoenixTTLRegionObserver} metrics. + * Implementation for tracking {@link org.apache.phoenix.coprocessor.PhoenixTTLRegionObserver} + * metrics. */ public class MetricsPhoenixTTLSourceImpl extends BaseSourceImpl implements MetricsPhoenixTTLSource { - private final MutableFastCounter maskExpiredRequests; - private final MutableFastCounter deleteExpiredRequests; + private final MutableFastCounter maskExpiredRequests; + private final MutableFastCounter deleteExpiredRequests; - public MetricsPhoenixTTLSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } + public MetricsPhoenixTTLSourceImpl() { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); + } - public MetricsPhoenixTTLSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + public MetricsPhoenixTTLSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - maskExpiredRequests = getMetricsRegistry().newCounter(PHOENIX_TTL_MASK_EXPIRED_REQUESTS, - PHOENIX_TTL_MASK_EXPIRED_REQUESTS_DESC, 0L); + maskExpiredRequests = getMetricsRegistry().newCounter(PHOENIX_TTL_MASK_EXPIRED_REQUESTS, + PHOENIX_TTL_MASK_EXPIRED_REQUESTS_DESC, 0L); - deleteExpiredRequests = getMetricsRegistry().newCounter(PHOENIX_TTL_DELETE_EXPIRED_REQUESTS, - PHOENIX_TTL_DELETE_EXPIRED_REQUESTS_DESC, 0L); + deleteExpiredRequests = getMetricsRegistry().newCounter(PHOENIX_TTL_DELETE_EXPIRED_REQUESTS, + PHOENIX_TTL_DELETE_EXPIRED_REQUESTS_DESC, 0L); - } + } - @Override public void incrementMaskExpiredRequestCount() { - maskExpiredRequests.incr(); - } + @Override + public void incrementMaskExpiredRequestCount() { + maskExpiredRequests.incr(); + } - @Override public long getMaskExpiredRequestCount() { - return maskExpiredRequests.value(); - } + @Override + public long getMaskExpiredRequestCount() { + return maskExpiredRequests.value(); + } - @Override public long getDeleteExpiredRequestCount() { return deleteExpiredRequests.value(); } + @Override + public long getDeleteExpiredRequestCount() { + return deleteExpiredRequests.value(); + } - @Override public void incrementDeleteExpiredRequestCount() { deleteExpiredRequests.incr(); } + @Override + public void incrementDeleteExpiredRequestCount() { + deleteExpiredRequests.incr(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/tasks/IndexRebuildTaskConstants.java b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/tasks/IndexRebuildTaskConstants.java index 8e603010f4e..37b52aae31f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/tasks/IndexRebuildTaskConstants.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/coprocessorclient/tasks/IndexRebuildTaskConstants.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,8 +18,8 @@ package org.apache.phoenix.coprocessorclient.tasks; public class IndexRebuildTaskConstants { - public static final String INDEX_NAME = "IndexName"; - public static final String JOB_ID = "JobID"; - public static final String DISABLE_BEFORE = "DisableBefore"; - public static final String REBUILD_ALL = "RebuildAll"; + public static final String INDEX_NAME = "IndexName"; + public static final String JOB_ID = "JobID"; + public static final String DISABLE_BEFORE = "DisableBefore"; + public static final String REBUILD_ALL = "RebuildAll"; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/DataExceedsCapacityException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/DataExceedsCapacityException.java index 89f0fe4aeba..fd09cf316b4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/DataExceedsCapacityException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/DataExceedsCapacityException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,41 +22,46 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.util.SchemaUtil; - public class DataExceedsCapacityException extends IllegalDataException { - private static final long serialVersionUID = 1L; - - public DataExceedsCapacityException(String message) { - super(new SQLExceptionInfo.Builder( - SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setMessage(message).build().buildException()); - } - - public DataExceedsCapacityException(PDataType type, Integer precision, Integer scale, - String columnName) { - super(new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) - .setMessage((columnName == null ? "" : columnName + " ") - + getTypeDisplayString(type, precision, scale)) - .build().buildException()); - } - - private static String getTypeDisplayString(PDataType type, Integer precision, Integer scale) { - return type.toString() + "(" + precision + (scale == null ? "" : ("," + scale)) + ")"; - } - - @Deprecated - public DataExceedsCapacityException(PDataType type, Integer precision, Integer scale, String columnName, ImmutableBytesWritable value) { - super(new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) - .setMessage((columnName == null ? "" : columnName + " ") + getTypeDisplayString(type, precision, scale, value)) - .build().buildException()); - } - - @Deprecated - public DataExceedsCapacityException(PDataType type, Integer precision, Integer scale) { - this(type, precision, scale, null, null); - } - - @Deprecated - private static String getTypeDisplayString(PDataType type, Integer precision, Integer scale, ImmutableBytesWritable value) { - return type.toString() + "(" + precision + (scale == null ? "" : ("," + scale)) + ")" + (value == null || value.getLength() == 0 ? "" : (" value="+SchemaUtil.toString(type, value))); - } + private static final long serialVersionUID = 1L; + + public DataExceedsCapacityException(String message) { + super(new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) + .setMessage(message).build().buildException()); + } + + public DataExceedsCapacityException(PDataType type, Integer precision, Integer scale, + String columnName) { + super(new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) + .setMessage( + (columnName == null ? "" : columnName + " ") + getTypeDisplayString(type, precision, scale)) + .build().buildException()); + } + + private static String getTypeDisplayString(PDataType type, Integer precision, Integer scale) { + return type.toString() + "(" + precision + (scale == null ? "" : ("," + scale)) + ")"; + } + + @Deprecated + public DataExceedsCapacityException(PDataType type, Integer precision, Integer scale, + String columnName, ImmutableBytesWritable value) { + super(new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) + .setMessage((columnName == null ? "" : columnName + " ") + + getTypeDisplayString(type, precision, scale, value)) + .build().buildException()); + } + + @Deprecated + public DataExceedsCapacityException(PDataType type, Integer precision, Integer scale) { + this(type, precision, scale, null, null); + } + + @Deprecated + private static String getTypeDisplayString(PDataType type, Integer precision, Integer scale, + ImmutableBytesWritable value) { + return type.toString() + "(" + precision + (scale == null ? "" : ("," + scale)) + ")" + + (value == null || value.getLength() == 0 + ? "" + : (" value=" + SchemaUtil.toString(type, value))); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/FailoverSQLException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/FailoverSQLException.java index f8269e9e762..4f9c6a035a6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/FailoverSQLException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/FailoverSQLException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.exception; import java.sql.SQLException; @@ -24,14 +23,14 @@ * A subclass of SQException thrown in case of failover errors. */ public class FailoverSQLException extends SQLException { - private final String haGroupInfo; + private final String haGroupInfo; - public FailoverSQLException(String reason, String haGroupInfo, Throwable cause) { - super("reason=" + reason + ", haGroupInfo=" + haGroupInfo, cause); - this.haGroupInfo = haGroupInfo; - } + public FailoverSQLException(String reason, String haGroupInfo, Throwable cause) { + super("reason=" + reason + ", haGroupInfo=" + haGroupInfo, cause); + this.haGroupInfo = haGroupInfo; + } - public String getFailoverGroup() { - return haGroupInfo; - } + public String getFailoverGroup() { + return haGroupInfo; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/InvalidRegionSplitPolicyException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/InvalidRegionSplitPolicyException.java index ddfa3ee4f46..fdfa2c798b5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/InvalidRegionSplitPolicyException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/InvalidRegionSplitPolicyException.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.exception; import java.sql.SQLException; @@ -27,23 +25,20 @@ */ public class InvalidRegionSplitPolicyException extends SQLException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; - private static final SQLExceptionCode EXCEPTION_CODE = - SQLExceptionCode.INVALID_REGION_SPLIT_POLICY; - private static final String ERROR_MSG = "Region split policy for table %s" - + " is expected to be among: %s , actual split policy: %s"; + private static final SQLExceptionCode EXCEPTION_CODE = + SQLExceptionCode.INVALID_REGION_SPLIT_POLICY; + private static final String ERROR_MSG = + "Region split policy for table %s" + " is expected to be among: %s , actual split policy: %s"; - public InvalidRegionSplitPolicyException(final String schemaName, - final String tableName, final List expectedSplitPolicies, - final String actualSplitPolicy) { - super(new SQLExceptionInfo.Builder(EXCEPTION_CODE) - .setSchemaName(schemaName) - .setTableName(tableName) - .setMessage(String.format(ERROR_MSG, tableName, - expectedSplitPolicies, actualSplitPolicy)) - .build().toString(), - EXCEPTION_CODE.getSQLState(), EXCEPTION_CODE.getErrorCode(), null); - } + public InvalidRegionSplitPolicyException(final String schemaName, final String tableName, + final List expectedSplitPolicies, final String actualSplitPolicy) { + super( + new SQLExceptionInfo.Builder(EXCEPTION_CODE).setSchemaName(schemaName).setTableName(tableName) + .setMessage(String.format(ERROR_MSG, tableName, expectedSplitPolicies, actualSplitPolicy)) + .build().toString(), + EXCEPTION_CODE.getSQLState(), EXCEPTION_CODE.getErrorCode(), null); + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixIOException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixIOException.java index a277a2043a1..1daa48088e7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixIOException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixIOException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,13 +19,12 @@ import java.sql.SQLException; - public class PhoenixIOException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.IO_EXCEPTION; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.IO_EXCEPTION; - public PhoenixIOException(Throwable e) { - super(e.getMessage(), code.getSQLState(), code.getErrorCode(), e); - } + public PhoenixIOException(Throwable e) { + super(e.getMessage(), code.getSQLState(), code.getErrorCode(), e); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixNonRetryableRuntimeException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixNonRetryableRuntimeException.java index 89f7c06e703..d36b4358abb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixNonRetryableRuntimeException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixNonRetryableRuntimeException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,17 +18,18 @@ package org.apache.phoenix.exception; public class PhoenixNonRetryableRuntimeException extends RuntimeException { - public PhoenixNonRetryableRuntimeException() { } + public PhoenixNonRetryableRuntimeException() { + } - public PhoenixNonRetryableRuntimeException(String msg) { - super(msg); - } + public PhoenixNonRetryableRuntimeException(String msg) { + super(msg); + } - public PhoenixNonRetryableRuntimeException(String msg, Throwable throwable) { - super(msg, throwable); - } + public PhoenixNonRetryableRuntimeException(String msg, Throwable throwable) { + super(msg, throwable); + } - public PhoenixNonRetryableRuntimeException(Throwable throwable) { - super(throwable); - } + public PhoenixNonRetryableRuntimeException(Throwable throwable) { + super(throwable); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixParserException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixParserException.java index 7e84194dbec..13b2f1eb6bf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixParserException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/PhoenixParserException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,86 +25,86 @@ import org.antlr.runtime.Token; import org.antlr.runtime.UnwantedTokenException; - public class PhoenixParserException extends SQLSyntaxErrorException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; - public static final PhoenixParserException newException(Throwable cause, String[] tokens) { - return new PhoenixParserException(getErrorMessage(cause, tokens), cause); - } - - public PhoenixParserException(String msg, Throwable throwable) { - super(new SQLExceptionInfo.Builder(getErrorCode(throwable)).setRootCause(throwable) - .setMessage(msg).build().toString(), - getErrorCode(throwable).getSQLState(), getErrorCode(throwable).getErrorCode(), throwable); - } + public static final PhoenixParserException newException(Throwable cause, String[] tokens) { + return new PhoenixParserException(getErrorMessage(cause, tokens), cause); + } - public static String getLine(RecognitionException e) { - return Integer.toString(e.token.getLine()); - } + public PhoenixParserException(String msg, Throwable throwable) { + super( + new SQLExceptionInfo.Builder(getErrorCode(throwable)).setRootCause(throwable).setMessage(msg) + .build().toString(), + getErrorCode(throwable).getSQLState(), getErrorCode(throwable).getErrorCode(), throwable); + } - public static String getColumn(RecognitionException e) { - return Integer.toString(e.token.getCharPositionInLine() + 1); - } + public static String getLine(RecognitionException e) { + return Integer.toString(e.token.getLine()); + } - public static String getTokenLocation(RecognitionException e) { - return "line " + getLine(e) + ", column " + getColumn(e) + "."; - } + public static String getColumn(RecognitionException e) { + return Integer.toString(e.token.getCharPositionInLine() + 1); + } + + public static String getTokenLocation(RecognitionException e) { + return "line " + getLine(e) + ", column " + getColumn(e) + "."; + } - public static String getErrorMessage(Throwable e, String[] tokenNames) { - String msg; - if (e instanceof MissingTokenException) { - MissingTokenException mte = (MissingTokenException)e; - String tokenName; - if (mte.expecting== Token.EOF) { - tokenName = "EOF"; - } else { - tokenName = tokenNames[mte.expecting]; - } - msg = "Missing \""+ tokenName +"\" at "+ getTokenLocation(mte); - } else if (e instanceof UnwantedTokenException) { - UnwantedTokenException ute = (UnwantedTokenException)e; - String tokenName; - if (ute.expecting== Token.EOF) { - tokenName = "EOF"; - } else { - tokenName = tokenNames[ute.expecting]; - } - msg = "Unexpected input. Expecting \"" + tokenName + "\", got \"" + ute.getUnexpectedToken().getText() - + "\" at " + getTokenLocation(ute); - } else if (e instanceof MismatchedTokenException) { - MismatchedTokenException mte = (MismatchedTokenException)e; - String tokenName; - if (mte.expecting== Token.EOF) { - tokenName = "EOF"; - } else { - tokenName = tokenNames[mte.expecting]; - } - msg = "Mismatched input. Expecting \"" + tokenName + "\", got \"" + mte.token.getText() - + "\" at " + getTokenLocation(mte); - } else if (e instanceof RecognitionException){ - RecognitionException re = (RecognitionException) e; - msg = "Encountered \"" + re.token.getText() + "\" at " + getTokenLocation(re); - } else if (e instanceof UnknownFunctionException) { - UnknownFunctionException ufe = (UnknownFunctionException) e; - msg = "Unknown function: \"" + ufe.getFuncName() + "\"."; - } else { - msg = e.getMessage(); - } - return msg; + public static String getErrorMessage(Throwable e, String[] tokenNames) { + String msg; + if (e instanceof MissingTokenException) { + MissingTokenException mte = (MissingTokenException) e; + String tokenName; + if (mte.expecting == Token.EOF) { + tokenName = "EOF"; + } else { + tokenName = tokenNames[mte.expecting]; + } + msg = "Missing \"" + tokenName + "\" at " + getTokenLocation(mte); + } else if (e instanceof UnwantedTokenException) { + UnwantedTokenException ute = (UnwantedTokenException) e; + String tokenName; + if (ute.expecting == Token.EOF) { + tokenName = "EOF"; + } else { + tokenName = tokenNames[ute.expecting]; + } + msg = "Unexpected input. Expecting \"" + tokenName + "\", got \"" + + ute.getUnexpectedToken().getText() + "\" at " + getTokenLocation(ute); + } else if (e instanceof MismatchedTokenException) { + MismatchedTokenException mte = (MismatchedTokenException) e; + String tokenName; + if (mte.expecting == Token.EOF) { + tokenName = "EOF"; + } else { + tokenName = tokenNames[mte.expecting]; + } + msg = "Mismatched input. Expecting \"" + tokenName + "\", got \"" + mte.token.getText() + + "\" at " + getTokenLocation(mte); + } else if (e instanceof RecognitionException) { + RecognitionException re = (RecognitionException) e; + msg = "Encountered \"" + re.token.getText() + "\" at " + getTokenLocation(re); + } else if (e instanceof UnknownFunctionException) { + UnknownFunctionException ufe = (UnknownFunctionException) e; + msg = "Unknown function: \"" + ufe.getFuncName() + "\"."; + } else { + msg = e.getMessage(); } + return msg; + } - public static SQLExceptionCode getErrorCode(Throwable e) { - if (e instanceof MissingTokenException) { - return SQLExceptionCode.MISSING_TOKEN; - } else if (e instanceof UnwantedTokenException) { - return SQLExceptionCode.UNWANTED_TOKEN; - } else if (e instanceof MismatchedTokenException) { - return SQLExceptionCode.MISMATCHED_TOKEN; - } else if (e instanceof UnknownFunctionException) { - return SQLExceptionCode.UNKNOWN_FUNCTION; - } else { - return SQLExceptionCode.PARSER_ERROR; - } + public static SQLExceptionCode getErrorCode(Throwable e) { + if (e instanceof MissingTokenException) { + return SQLExceptionCode.MISSING_TOKEN; + } else if (e instanceof UnwantedTokenException) { + return SQLExceptionCode.UNWANTED_TOKEN; + } else if (e instanceof MismatchedTokenException) { + return SQLExceptionCode.MISMATCHED_TOKEN; + } else if (e instanceof UnknownFunctionException) { + return SQLExceptionCode.UNKNOWN_FUNCTION; + } else { + return SQLExceptionCode.PARSER_ERROR; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/ResultSetOutOfScanRangeException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/ResultSetOutOfScanRangeException.java index 3aa9347f563..53f73e568a5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/ResultSetOutOfScanRangeException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/ResultSetOutOfScanRangeException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,27 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.exception; import java.sql.SQLException; /** - * The Exception is thrown when the returned row key by the given Scanner does not satisfy the - * Scan boundaries i.e. scan start and end keys. + * The Exception is thrown when the returned row key by the given Scanner does not satisfy the Scan + * boundaries i.e. scan start and end keys. */ public class ResultSetOutOfScanRangeException extends SQLException { - private static final SQLExceptionCode EXCEPTION_CODE = - SQLExceptionCode.ROW_KEY_OUT_OF_SCAN_RANGE; + private static final SQLExceptionCode EXCEPTION_CODE = SQLExceptionCode.ROW_KEY_OUT_OF_SCAN_RANGE; - public ResultSetOutOfScanRangeException(String message) { - super(new SQLExceptionInfo - .Builder(EXCEPTION_CODE) - .setMessage(message) - .build() - .toString(), - EXCEPTION_CODE.getSQLState(), - EXCEPTION_CODE.getErrorCode()); - } + public ResultSetOutOfScanRangeException(String message) { + super(new SQLExceptionInfo.Builder(EXCEPTION_CODE).setMessage(message).build().toString(), + EXCEPTION_CODE.getSQLState(), EXCEPTION_CODE.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/RetriableUpgradeException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/RetriableUpgradeException.java index b0f747d7d21..46b062c6aa7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/RetriableUpgradeException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/RetriableUpgradeException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,12 +20,11 @@ import java.sql.SQLException; /** - * - * Super class for upgrade related exceptions whose occurrence shouldn't prevent the - * client from retrying or reestablishing connection. + * Super class for upgrade related exceptions whose occurrence shouldn't prevent the client from + * retrying or reestablishing connection. */ public abstract class RetriableUpgradeException extends SQLException { - public RetriableUpgradeException(String message, String sqlState, int sqlExceptionCode) { - super(message, sqlState, sqlExceptionCode); - } + public RetriableUpgradeException(String message, String sqlState, int sqlExceptionCode) { + super(message, sqlState, sqlExceptionCode); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java index 48eff59658b..bb0396ad9ae 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.exception; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL; + import java.sql.BatchUpdateException; import java.sql.SQLException; import java.sql.SQLTimeoutException; @@ -51,677 +54,798 @@ import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.schema.TypeMismatchException; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.util.MetaDataUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; - -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL; - +import org.apache.phoenix.util.MetaDataUtil; /** * Various SQLException Information. Including a vendor-specific errorcode and a standard SQLState. - * - * * @since 1.0 */ public enum SQLExceptionCode { - /** - * Connection Exception (errorcode 01, sqlstate 08) - */ - IO_EXCEPTION(101, "08000", "Unexpected IO exception."), - MALFORMED_CONNECTION_URL(102, "08001", "Malformed connection url."), - CANNOT_ESTABLISH_CONNECTION(103, "08004", "Unable to establish connection."), - - /** - * Data Exception (errorcode 02, sqlstate 22) - */ - ILLEGAL_DATA(201, "22000", "Illegal data."), - DIVIDE_BY_ZERO(202, "22012", "Divide by zero."), - TYPE_MISMATCH(203, "22005", "Type mismatch.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new TypeMismatchException(info.getMessage()); - } - }), - VALUE_IN_UPSERT_NOT_CONSTANT(204, "22008", "Values in UPSERT must evaluate to a constant."), - MALFORMED_URL(205, "22009", "Malformed URL."), - DATA_EXCEEDS_MAX_CAPACITY(206, "22003", "The data exceeds the max capacity for the data type."), - MISSING_MAX_LENGTH(207, "22004", "Max length must be specified for type."), - NONPOSITIVE_MAX_LENGTH(208, "22006", "Max length must have a positive length for type."), - DECIMAL_PRECISION_OUT_OF_RANGE(209, "22003", "Decimal precision outside of range. Should be within 1 and " + PDataType.MAX_PRECISION + "."), - SERVER_ARITHMETIC_ERROR(212, "22012", "Arithmetic error on server."), - VALUE_OUTSIDE_RANGE(213,"22003","Value outside range."), - VALUE_IN_LIST_NOT_CONSTANT(214, "22008", "Values in IN must evaluate to a constant."), - SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS(215, "22015", "Single-row sub-query returns more than one row."), - SUBQUERY_RETURNS_DIFFERENT_NUMBER_OF_FIELDS(216, "22016", "Sub-query must return the same number of fields as the left-hand-side expression of 'IN'."), - AMBIGUOUS_JOIN_CONDITION(217, "22017", "Ambiguous or non-equi join condition specified. Consider using table list with where clause."), - CONSTRAINT_VIOLATION(218, "23018", "Constraint violation."), - SUBQUERY_SELECT_LIST_COLUMN_MUST_HAS_ALIAS(219,"23019","Every column in subquery select lists must has alias when used for join."), - ROW_KEY_OUT_OF_SCAN_RANGE(220, "23020", "Row key is out of scan start/stop key boundaries"), - - CONCURRENT_TABLE_MUTATION(301, "23000", "Concurrent modification to table.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new ConcurrentTableMutationException(info.getSchemaName(), info.getTableName()); - } - }), - CANNOT_INDEX_COLUMN_ON_TYPE(302, "23100", "The column cannot be index due to its type."), - INVALID_INDEX_WHERE_WITH_SUBQUERY(303, "23101", - " Index where clause cannot include a subquery."), - CANNOT_EVALUATE_INDEX_WHERE(304, "23102", - "Invalid index where clause. It cannot be evaluated on a data table row."), - /** - * Invalid Cursor State (errorcode 04, sqlstate 24) - */ - CURSOR_BEFORE_FIRST_ROW(401, "24015","Cursor before first row."), - CURSOR_PAST_LAST_ROW(402, "24016", "Cursor past last row."), - - /** - * Syntax Error or Access Rule Violation (errorcode 05, sqlstate 42) - */ - AMBIGUOUS_TABLE(501, "42000", "Table name exists in more than one table schema and is used without being qualified.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new AmbiguousTableException(info.getTableName(), info.getRootCause()); - } - }), - AMBIGUOUS_COLUMN(502, "42702", "Column reference ambiguous or duplicate names.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new AmbiguousColumnException(info.getColumnName(), info.getRootCause()); - } - }), - INDEX_MISSING_PK_COLUMNS(503, "42602", "Index table missing PK Columns."), - COLUMN_NOT_FOUND(504, "42703", "Undefined column.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new ColumnNotFoundException(info.getSchemaName(), info.getTableName(), info.getFamilyName(), info.getColumnName()); - } - }), - READ_ONLY_TABLE(505, "42000", "Table is read only.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new ReadOnlyTableException(info.getMessage(), info.getSchemaName(), info.getTableName(), info.getFamilyName()); - } - }), - CANNOT_DROP_PK(506, "42817", "Primary key column may not be dropped."), - PRIMARY_KEY_MISSING(509, "42888", "The table does not have a primary key."), - PRIMARY_KEY_ALREADY_EXISTS(510, "42889", "The table already has a primary key."), - ORDER_BY_NOT_IN_SELECT_DISTINCT(511, "42890", "All ORDER BY expressions must appear in SELECT DISTINCT:"), - INVALID_PRIMARY_KEY_CONSTRAINT(512, "42891", "Invalid column reference in primary key constraint."), - ARRAY_NOT_ALLOWED_IN_PRIMARY_KEY(513, "42892", "Array type not allowed as primary key constraint."), - COLUMN_EXIST_IN_DEF(514, "42892", "A duplicate column name was detected in the object definition or ALTER TABLE/VIEW statement.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new ColumnAlreadyExistsException(info.getSchemaName(), info.getTableName(), info.getColumnName()); - } - }), - ORDER_BY_ARRAY_NOT_SUPPORTED(515, "42893", "ORDER BY of an array type is not allowed."), - NON_EQUALITY_ARRAY_COMPARISON(516, "42894", "Array types may only be compared using = or !=."), - /** - * Invalid Transaction State (errorcode 05, sqlstate 25) - */ - READ_ONLY_CONNECTION(518,"25502","Mutations are not permitted for a read-only connection."), - - VARBINARY_ARRAY_NOT_SUPPORTED(519, "42896", "VARBINARY ARRAY is not supported."), - - /** - * Expression Index exceptions. - */ - AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX(520, "42897", "Aggregate expression not allowed in an index."), - NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX(521, "42898", "Non-deterministic expression not allowed in an index."), - STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX(522, "42899", "Stateless expression not allowed in an index."), - - /** - * Transaction exceptions. - */ - TRANSACTION_CONFLICT_EXCEPTION(523, "42900", "Transaction aborted due to conflict with other mutations."), - TRANSACTION_EXCEPTION(524, "42901", "Transaction aborted due to error."), - - /** - * Union All related errors - */ - SELECT_COLUMN_NUM_IN_UNIONALL_DIFFS(525, "42902", "SELECT column number differs in a Union All query is not allowed."), - SELECT_COLUMN_TYPE_IN_UNIONALL_DIFFS(526, "42903", "SELECT column types differ in a Union All query is not allowed."), - - /** - * Row timestamp column related errors - */ - ROWTIMESTAMP_ONE_PK_COL_ONLY(527, "42904", "Only one column that is part of the primary key can be declared as a ROW_TIMESTAMP."), - ROWTIMESTAMP_PK_COL_ONLY(528, "42905", "Only columns part of the primary key can be declared as a ROW_TIMESTAMP."), - ROWTIMESTAMP_CREATE_ONLY(529, "42906", "A column can be added as ROW_TIMESTAMP only in CREATE TABLE."), - ROWTIMESTAMP_COL_INVALID_TYPE(530, "42907", "A column can be added as ROW_TIMESTAMP only if it is of type DATE, BIGINT, TIME OR TIMESTAMP."), - ROWTIMESTAMP_NOT_ALLOWED_ON_VIEW(531, "42908", "Declaring a column as row_timestamp is not allowed for views."), - INVALID_SCN(532, "42909", "Value of SCN cannot be less than zero."), - INVALID_REPLAY_AT(533, "42910", "Value of REPLAY_AT cannot be less than zero."), - UNEQUAL_SCN_AND_BUILD_INDEX_AT(534, "42911", "If both specified, values of CURRENT_SCN and BUILD_INDEX_AT must be equal."), - ONLY_INDEX_UPDATABLE_AT_SCN(535, "42912", "Only an index may be updated when the BUILD_INDEX_AT property is specified"), - PARENT_TABLE_NOT_FOUND(536, "42913", "Can't drop the index because the parent table in the DROP statement is incorrect."), - CANNOT_QUERY_TABLE_WITH_SCN_OLDER_THAN_MAX_LOOKBACK_AGE(538, "42915", - "Cannot use SCN to look further back in the past beyond the configured max lookback age"), - - COMPARISON_UNSUPPORTED(539, "42915", "Comparison not supported for the datatype."), - INVALID_JSON_DATA(540, "42916", "Invalid json data."), - JSON_FRAGMENT_NOT_ALLOWED_IN_INDEX_EXPRESSION(541, "42917", - "Functions returning JSON fragments are not allowed in Index Expression."), - - /** - * HBase and Phoenix specific implementation defined sub-classes. - * Column family related exceptions. - * - * For the following exceptions, use errorcode 10. - */ - SINGLE_PK_MAY_NOT_BE_NULL(1000, "42I00", "Single column primary key may not be NULL."), - COLUMN_FAMILY_NOT_FOUND(1001, "42I01", "Undefined column family.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new ColumnFamilyNotFoundException(info.getSchemaName(), info.getTableName(), info.getFamilyName()); - } - }), - PROPERTIES_FOR_FAMILY(1002, "42I02","Properties may not be defined for an unused family name."), - // Primary/row key related exceptions. - PRIMARY_KEY_WITH_FAMILY_NAME(1003, "42J01", "Primary key columns must not have a family name."), - PRIMARY_KEY_OUT_OF_ORDER(1004, "42J02", "Order of columns in primary key constraint must match the order in which they're declared."), - VARBINARY_IN_ROW_KEY(1005, "42J03", - "The VARBINARY/ARRAY type can only be used as the last part of a multi-part row key. " - + "For Binary types, you can use VARBINARY_ENCODED for early part of multi-part row key."), - NOT_NULLABLE_COLUMN_IN_ROW_KEY(1006, "42J04", "Only nullable columns may be added to primary key."), - VARBINARY_LAST_PK(1015, "42J04", "Cannot add column to table when the last PK column is of type VARBINARY or ARRAY."), - NULLABLE_FIXED_WIDTH_LAST_PK(1023, "42J04", "Cannot add column to table when the last PK column is nullable and fixed width."), - CANNOT_MODIFY_VIEW_PK(1036, "42J04", "Cannot modify the primary key of a VIEW if last PK column of parent is variable length."), - BASE_TABLE_COLUMN(1037, "42J04", "Cannot modify columns of base table used by tenant-specific tables."), - UNALLOWED_COLUMN_FAMILY(1090, "42J04", "Column family names should not contain local index column prefix: "+QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX), - // Key/value column related errors - KEY_VALUE_NOT_NULL(1007, "42K01", "A non primary key column may only be declared as not null on tables with immutable rows."), - // View related errors. - VIEW_WITH_TABLE_CONFIG(1008, "42L01", "A view may not contain table configuration properties."), - VIEW_WITH_PROPERTIES(1009, "42L02", "Properties may not be defined for a view."), - // Table related errors that are not in standard code. - CANNOT_MUTATE_TABLE(1010, "42M01", "Not allowed to mutate table."), - UNEXPECTED_MUTATION_CODE(1011, "42M02", "Unexpected mutation code."), - TABLE_UNDEFINED(1012, "42M03", "Table undefined.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new TableNotFoundException(info.getSchemaName(), info.getTableName()); - } - }), - INDEX_UNDEFINED(1042, "42M06", "Index undefined.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new IndexNotFoundException(info.getSchemaName(), info.getTableName()); - } - }), - TABLE_ALREADY_EXIST(1013, "42M04", "Table already exists.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new TableAlreadyExistsException(info.getSchemaName(), info.getTableName()); - } - }), - TABLES_NOT_IN_SYNC(1140, "42M05", "Tables not in sync for some properties."), - - // High Availability Errors - HA_CLOSED_AFTER_FAILOVER(1984, "F1Q84", "Connection closed after failover happened.", - i -> new FailoverSQLException(i.getMessage(), i.getHaGroupInfo(), i.getRootCause())), - HA_NO_ACTIVE_CLUSTER(1985, "F1Q85", "No ACTIVE HBase cluster found.", - i -> new FailoverSQLException(i.getMessage(), i.getHaGroupInfo(), i.getRootCause())), - HA_READ_FROM_CLUSTER_FAILED_ON_NULL(1986, "F1Q86", "Unable to read from cluster for null."), - HA_INVALID_PROPERTIES(1987, "F1Q87", "Invalid properties to get a Phoenix HA connection."), - HA_CLUSTER_CAN_NOT_CONNECT(1988, "F1Q88", "Cluster can not serve any requests for this HA group"), - - - // Syntax error - TYPE_NOT_SUPPORTED_FOR_OPERATOR(1014, "42Y01", "The operator does not support the operand type."), - AGGREGATE_IN_GROUP_BY(1016, "42Y26", "Aggregate expressions may not be used in GROUP BY."), - AGGREGATE_IN_WHERE(1017, "42Y26", "Aggregate may not be used in WHERE."), - AGGREGATE_WITH_NOT_GROUP_BY_COLUMN(1018, "42Y27", "Aggregate may not contain columns not in GROUP BY."), - ONLY_AGGREGATE_IN_HAVING_CLAUSE(1019, "42Y26", "Only aggregate maybe used in the HAVING clause."), - UPSERT_COLUMN_NUMBERS_MISMATCH(1020, "42Y60", "Number of columns upserting must match number of values."), - // Table properties exception. - INVALID_BUCKET_NUM(1021, "42Y80", "Salt bucket numbers should be with 1 and 256."), - NO_SPLITS_ON_SALTED_TABLE(1022, "42Y81", "Should not specify split points on salted table with default row key order."), - SALT_ONLY_ON_CREATE_TABLE(1024, "42Y82", "Salt bucket number may only be specified when creating a table."), - NO_NORMALIZER_ON_SALTED_TABLE(1147, "42Y86", "Should not enable normalizer on salted table."), - SET_UNSUPPORTED_PROP_ON_ALTER_TABLE(1025, "42Y83", "Unsupported property set in ALTER TABLE command."), - CANNOT_ADD_NOT_NULLABLE_COLUMN(1038, "42Y84", "Only nullable columns may be added for a pre-existing table."), - NO_MUTABLE_INDEXES(1026, "42Y85", "Mutable secondary indexes are only supported for HBase version " + MetaDataUtil.decodeHBaseVersionAsString(MetaDataProtocol.MUTABLE_SI_VERSION_THRESHOLD) + " and above."), - INVALID_INDEX_STATE_TRANSITION(1028, "42Y87", "Invalid index state transition."), - INVALID_MUTABLE_INDEX_CONFIG(1029, "42Y88", "Mutable secondary indexes must have the " - + IndexManagementUtil.WAL_EDIT_CODEC_CLASS_KEY + " property set to " - + IndexManagementUtil.INDEX_WAL_EDIT_CODEC_CLASS_NAME + " in the hbase-sites.xml of every region server."), - CANNOT_CREATE_DEFAULT(1031, "42Y90", "Cannot create column with a stateful default value."), - CANNOT_CREATE_DEFAULT_ROWTIMESTAMP(1032, "42Y90", "Cannot create ROW_TIMESTAMP column with a default value."), - - CANNOT_CREATE_TENANT_SPECIFIC_TABLE(1030, "42Y89", "Cannot create table for tenant-specific connection."), - DEFAULT_COLUMN_FAMILY_ONLY_ON_CREATE_TABLE(1034, "42Y93", "Default column family may only be specified when creating a table."), - INSUFFICIENT_MULTI_TENANT_COLUMNS(1040, "42Y96", "A MULTI_TENANT table must have two or more PK columns with the first column being NOT NULL."), - TENANTID_IS_OF_WRONG_TYPE(1041, "42Y97", "The TenantId could not be converted to correct format for this table."), - VIEW_WHERE_IS_CONSTANT(1045, "43A02", "WHERE clause in VIEW should not evaluate to a constant."), - CANNOT_UPDATE_VIEW_COLUMN(1046, "43A03", "Column updated in VIEW may not differ from value specified in WHERE clause."), - TOO_MANY_INDEXES(1047, "43A04", "Too many indexes have already been created on the physical table."), - NO_LOCAL_INDEX_ON_TABLE_WITH_IMMUTABLE_ROWS(1048,"43A05","Local indexes aren't allowed on tables with immutable rows."), - COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY(1049, "43A06", "Column family not allowed for table properties."), - COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY(1050, "43A07", "Setting or altering any of the following properties: " - + MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES.toString() - + " for a column family is not supported since they must be kept in sync. You can only set these properties for the entire table."), - CANNOT_ALTER_PROPERTY(1051, "43A08", "Property can be specified or changed only when creating a table."), - CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED(1052, "43A09", "Property cannot be specified for a column family that is not being added or modified."), - CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN(1053, "43A10", "Table level property cannot be set when adding a column."), - - NO_LOCAL_INDEXES(1054, "43A11", "Local secondary indexes are not supported for HBase versions " + - MetaDataUtil.decodeHBaseVersionAsString(MetaDataProtocol.MIN_LOCAL_SI_VERSION_DISALLOW) + " through " + MetaDataUtil.decodeHBaseVersionAsString(MetaDataProtocol.MAX_LOCAL_SI_VERSION_DISALLOW) + " inclusive."), - UNALLOWED_LOCAL_INDEXES(1055, "43A12", "Local secondary indexes are configured to not be allowed."), - - DESC_VARBINARY_NOT_SUPPORTED(1056, "43A13", "Descending VARBINARY columns not supported."), - NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT(1057, "42Y10", "No table specified for wildcard select."), - UNSUPPORTED_GROUP_BY_EXPRESSIONS(1058, "43A14", "Only a single VARBINARY, ARRAY, or nullable BINARY type may be referenced in a GROUP BY."), - - DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE(1069, "43A69", "Default column family not allowed on VIEW or shared INDEX."), - ONLY_TABLE_MAY_BE_DECLARED_TRANSACTIONAL(1070, "44A01", "Only tables may be declared as transactional."), - TX_MAY_NOT_SWITCH_TO_NON_TX(1071, "44A02", "A transactional table may not be switched to non transactional."), - STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL(1072, "44A03", "Store nulls must be true when a table is transactional."), - CANNOT_START_TRANSACTION_WITH_SCN_SET(1073, "44A04", "Cannot start a transaction on a connection with SCN set."), - TX_MAX_VERSIONS_MUST_BE_GREATER_THAN_ONE(1074, "44A05", "A transactional table must define VERSION of greater than one."), - CANNOT_SPECIFY_SCN_FOR_TXN_TABLE(1075, "44A06", "Cannot use a connection with SCN set for a transactional table."), - NULL_TRANSACTION_CONTEXT(1076, "44A07", "No Transaction Context available."), - TRANSACTION_FAILED(1077, "44A08", "Transaction Failure "), - CANNOT_CREATE_TXN_TABLE_IF_TXNS_DISABLED(1078, "44A09", "Cannot create a transactional table if transactions are disabled."), - CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED(1079, "44A10", "Cannot alter table to be transactional table if transactions are disabled."), - CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP(1080, "44A11", "Cannot create a transactional" + - " table with ROW_TIMESTAMP column."), - CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP(1081, "44A12", "Cannot alter table to be transactional table if transactions are disabled."), - TX_MUST_BE_ENABLED_TO_SET_TX_CONTEXT(1082, "44A13", "Cannot set transaction context if transactions are disabled."), - TX_MUST_BE_ENABLED_TO_SET_AUTO_FLUSH(1083, "44A14", "Cannot set auto flush if transactions are disabled."), - TX_MUST_BE_ENABLED_TO_SET_ISOLATION_LEVEL(1084, "44A15", "Cannot set isolation level to TRANSACTION_REPEATABLE_READ if transactions are disabled."), - TX_UNABLE_TO_GET_WRITE_FENCE(1085, "44A16", "Unable to obtain write fence for DDL operation."), - - SEQUENCE_NOT_CASTABLE_TO_AUTO_PARTITION_ID_COLUMN(1086, "44A17", "Sequence Value not castable to auto-partition id column"), - CANNOT_COERCE_AUTO_PARTITION_ID(1087, "44A18", "Auto-partition id cannot be coerced"), - CANNOT_CREATE_INDEX_ON_MUTABLE_TABLE_WITH_ROWTIMESTAMP(1088, "44A19", "Cannot create an index on a mutable table that has a ROW_TIMESTAMP column."), - UNKNOWN_TRANSACTION_PROVIDER(1089,"44A20", "Unknown TRANSACTION_PROVIDER: "), - CANNOT_START_TXN_IF_TXN_DISABLED(1091, "44A22", "Cannot start transaction if transactions are disabled."), - CANNOT_MIX_TXN_PROVIDERS(1092, "44A23", "Cannot mix transaction providers: "), - CANNOT_ALTER_TABLE_FROM_NON_TXN_TO_TXNL(1093, "44A24", "Cannot alter table from non transactional to transactional for "), - UNSUPPORTED_COLUMN_ENCODING_FOR_TXN_PROVIDER(1094, "44A25", "Column encoding is not supported for"), - UNSUPPORTED_STORAGE_FORMAT_FOR_TXN_PROVIDER(1095, "44A26", "Only ONE_CELL_PER_COLUMN storage scheme is supported for"), - CANNOT_SWITCH_TXN_PROVIDERS(1096, "44A27", "Cannot switch transaction providers."), - TTL_UNSUPPORTED_FOR_TXN_TABLE(10947, "44A28", "TTL is not supported for"), - CANNOT_CREATE_LOCAL_INDEX_FOR_TXN_TABLE(10948, "44A29", "Local indexes cannot be created for"), - CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX(10949, "44A30", "Cannot set or alter the following properties on an index: " - + MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES.toString()), - CANNOT_SET_OR_ALTER_UPDATE_CACHE_FREQ_FOR_INDEX(10950, "44A31", "Cannot set or alter " - + PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " on an index"), - @Deprecated - PHOENIX_LEVEL_TTL_SUPPORTED_FOR_VIEWS_ONLY(10951, "44A32", PhoenixDatabaseMetaData.TTL - + " property can only be set for views"), - @Deprecated - CANNOT_SET_OR_ALTER_PHOENIX_LEVEL_TTL_FOR_TABLE_WITH_TTL(10952, "44A33", "Cannot set or alter " - + TTL + " property on an table with TTL,"), - ABOVE_INDEX_NON_ASYNC_THRESHOLD(1097, "44A34", "The estimated read size for index creation " - + "is higher than " + QueryServices.CLIENT_INDEX_ASYNC_THRESHOLD+ ". You can edit the" - + " limit or create ASYNC index."), - CANNOT_SET_OR_ALTER_TTL(10953, "44A35", "Cannot set or alter " - + PhoenixDatabaseMetaData.TTL + " property on an view when parent/child " - + "view has TTL set,"), - CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY(10954, "44A36", - CHANGE_DETECTION_ENABLED + " is only supported on tables and views"), - CANNOT_CREATE_INDEX_CHILD_VIEWS_EXTEND_PK(10955, "44A37", "Index can be created " - + "only if none of the child views extends primary key"), - VIEW_CANNOT_EXTEND_PK_WITH_PARENT_INDEXES(10956, "44A38", "View can extend parent primary key" - + " only if none of the parents have indexes in the parent hierarchy"), - MAX_LOOKBACK_AGE_SUPPORTED_FOR_TABLES_ONLY(10957, "44A39", "Max lookback age can only be set for tables"), - UNKNOWN_INCLUDE_CHANGE_SCOPE(10958, "44A40", "Unknown change scope for CDC INCLUDE"), - TTL_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY(10959, "44A41", TTL - + "property can only be set for tables and updatable views only"), - - TTL_ALREADY_DEFINED_IN_HIERARCHY(10960, "44A42", TTL - + " property is already defined in hierarchy for this entity"), - VIEW_TTL_NOT_ENABLED(10961,"44A43", TTL + - " property can not be set on views as phoenix.view.ttl.enabled is false"), - - /** Sequence related */ - SEQUENCE_ALREADY_EXIST(1200, "42Z00", "Sequence already exists.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new SequenceAlreadyExistsException(info.getSchemaName(), info.getTableName()); - } - }), - SEQUENCE_UNDEFINED(1201, "42Z01", "Sequence undefined.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new SequenceNotFoundException(info.getSchemaName(), info.getTableName()); - } - }), - START_WITH_MUST_BE_CONSTANT(1202, "42Z02", "Sequence START WITH value must be an integer or long constant."), - INCREMENT_BY_MUST_BE_CONSTANT(1203, "42Z03", "Sequence INCREMENT BY value must be an integer or long constant."), - CACHE_MUST_BE_NON_NEGATIVE_CONSTANT(1204, "42Z04", "Sequence CACHE value must be a non negative integer constant."), - INVALID_USE_OF_NEXT_VALUE_FOR(1205, "42Z05", "NEXT VALUE FOR may only be used as in a SELECT or an UPSERT VALUES expression."), - CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE(1206, "42Z06", "NEXT VALUE FOR must be called before CURRENT VALUE FOR is called."), - EMPTY_SEQUENCE_CACHE(1207, "42Z07", "No more cached sequence values."), - MINVALUE_MUST_BE_CONSTANT(1208, "42Z08", "Sequence MINVALUE must be an integer or long constant."), - MAXVALUE_MUST_BE_CONSTANT(1209, "42Z09", "Sequence MAXVALUE must be an integer or long constant."), - MINVALUE_MUST_BE_LESS_THAN_OR_EQUAL_TO_MAXVALUE(1210, "42Z10", "Sequence MINVALUE must be less than or equal to MAXVALUE."), - STARTS_WITH_MUST_BE_BETWEEN_MIN_MAX_VALUE(1211, "42Z11", - "STARTS WITH value must be greater than or equal to MINVALUE and less than or equal to MAXVALUE."), - SEQUENCE_VAL_REACHED_MAX_VALUE(1212, "42Z12", "Reached MAXVALUE of sequence."), - SEQUENCE_VAL_REACHED_MIN_VALUE(1213, "42Z13", "Reached MINVALUE of sequence."), - INCREMENT_BY_MUST_NOT_BE_ZERO(1214, "42Z14", "Sequence INCREMENT BY value cannot be zero."), - NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT(1215, "42Z15", "Sequence NEXT n VALUES FOR must be a positive integer or constant." ), - NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED(1216, "42Z16", "Sequence NEXT n VALUES FOR is not supported for Sequences with the CYCLE flag." ), - AUTO_PARTITION_SEQUENCE_UNDEFINED(1217, "42Z17", "Auto Partition Sequence undefined", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new SequenceNotFoundException(info.getSchemaName(), info.getTableName()); - } - }), - CANNOT_UPDATE_PK_ON_DUP_KEY(1218, "42Z18", "Primary key columns may not be udpated in ON DUPLICATE KEY UPDATE clause." ), - CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE(1219, "42Z19", "The ON DUPLICATE KEY UPDATE clause may not be used for immutable tables." ), - CANNOT_USE_ON_DUP_KEY_FOR_TRANSACTIONAL(1220, "42Z20", "The ON DUPLICATE KEY UPDATE clause may not be used for transactional tables." ), - DUPLICATE_COLUMN_IN_ON_DUP_KEY(1221, "42Z21", "Duplicate column in ON DUPLICATE KEY UPDATE." ), - AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY(1222, "42Z22", "Aggregation in ON DUPLICATE KEY UPDATE is not allowed." ), - CANNOT_SET_SCN_IN_ON_DUP_KEY(1223, "42Z23", "The CURRENT_SCN may not be set for statement using ON DUPLICATE KEY." ), - CANNOT_USE_ON_DUP_KEY_WITH_GLOBAL_IDX(1224, "42Z24", "The ON DUPLICATE KEY clause may not be used when a table has a global index." ), - - /** Parser error. (errorcode 06, sqlState 42P) */ - PARSER_ERROR(601, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), - MISSING_TOKEN(602, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), - UNWANTED_TOKEN(603, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), - MISMATCHED_TOKEN(604, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), - UNKNOWN_FUNCTION(605, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), - - /** - * Implementation defined class. Execution exceptions (errorcode 11, sqlstate XCL). - */ - RESULTSET_CLOSED(1101, "XCL01", "ResultSet is closed."), - GET_TABLE_REGIONS_FAIL(1102, "XCL02", "Cannot get all table regions."), - EXECUTE_QUERY_NOT_APPLICABLE(1103, "XCL03", "executeQuery may not be used."), - EXECUTE_UPDATE_NOT_APPLICABLE(1104, "XCL04", "executeUpdate may not be used."), - SPLIT_POINT_NOT_CONSTANT(1105, "XCL05", "Split points must be constants."), - BATCH_EXCEPTION(1106, "XCL06", "Exception while executing batch."), - EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH(1107, "XCL07", "An executeUpdate is prohibited when the batch is not empty. Use clearBatch to empty the batch first."), - STALE_REGION_BOUNDARY_CACHE(1108, "XCL08", "Cache of region boundaries are out of date.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new StaleRegionBoundaryCacheException(info.getSchemaName(), info.getTableName()); - } - }), - CANNOT_SPLIT_LOCAL_INDEX(1109,"XCL09", "Local index may not be pre-split."), - CANNOT_SALT_LOCAL_INDEX(1110,"XCL10", "Local index may not be salted."), - CONNECTION_CLOSED(1111, "XCL11", "Connectioin is closed."), - - INDEX_FAILURE_BLOCK_WRITE(1120, "XCL20", "Writes to table blocked until index can be updated."), - INDEX_WRITE_FAILURE(1121, "XCL21", "Write to the index failed."), - - UPDATE_CACHE_FREQUENCY_INVALID(1130, "XCL30", "UPDATE_CACHE_FREQUENCY cannot be set to ALWAYS if APPEND_ONLY_SCHEMA is true."), - CANNOT_DROP_COL_APPEND_ONLY_SCHEMA(1131, "XCL31", "Cannot drop column from table that with append only schema."), - CANNOT_DROP_VIEW_REFERENCED_COL(1132, "XCL32", "Cannot drop column that is referenced in view where clause."), - - CANNOT_ALTER_IMMUTABLE_ROWS_PROPERTY(1133, "XCL33", "IMMUTABLE_ROWS property can be changed only if the table storage scheme is ONE_CELL_PER_KEYVALUE_COLUMN"), - CANNOT_ALTER_TABLE_PROPERTY_ON_VIEW(1134, "XCL34", "Altering this table property on a view is not allowed"), - - CANNOT_DROP_CDC_INDEX(1153, "XCL53", - "Cannot drop the index associated with CDC"), - IMMUTABLE_TABLE_PROPERTY_INVALID(1135, "XCL35", "IMMUTABLE table property cannot be used with CREATE IMMUTABLE TABLE statement "), - - MAX_COLUMNS_EXCEEDED(1136, "XCL36", "The number of columns exceed the maximum supported by the table's qualifier encoding scheme"), - INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES(1137, "XCL37", "If IMMUTABLE_STORAGE_SCHEME property is not set to ONE_CELL_PER_COLUMN COLUMN_ENCODED_BYTES cannot be 0"), - INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE(1138, "XCL38", "IMMUTABLE_STORAGE_SCHEME property cannot be changed from/to ONE_CELL_PER_COLUMN "), - CANNOT_SET_GUIDE_POST_WIDTH(1139, "XCL39", "Guide post width can only be set on base data tables"), - CANNOT_CREATE_VIEWS_ON_SYSTEM_TABLES(1141, "XCL41", "Cannot create views on tables of type" + - PTableType.SYSTEM), - UNABLE_TO_CREATE_CHILD_LINK(1142, "XCL42", "Error creating parent-child link (Link type=" + - PTable.LinkType.CHILD_TABLE + ") for view"), - UNABLE_TO_UPDATE_PARENT_TABLE(1143, "XCL43", "Error Updating the parent table"), - UNABLE_TO_DELETE_CHILD_LINK(1144, "XCL44", "Error deleting parent-child link (Link type=" + - PTable.LinkType.CHILD_TABLE + ") for view"), - TABLE_NOT_IN_REGION(1145, "XCL45", "No modifications allowed on this table. " - + "Table not in this region."), - UNABLE_TO_UPSERT_TASK(1146, "XCL46", - "Error upserting records in SYSTEM.TASK table"), - INVALID_CQ(1148, "XCL48", - "ENCODED_QUALIFIER is less than INITIAL_VALUE."), - DUPLICATE_CQ(1149, "XCL49", - "Duplicate ENCODED_QUALIFIER."), - MISSING_CQ(1150, "XCL49", - "Missing ENCODED_QUALIFIER."), - EXECUTE_BATCH_FOR_STMT_WITH_RESULT_SET(1151, "XCL51", "A batch operation can't include a " - + "statement that produces result sets.", Factory.BATCH_UPDATE_ERROR), - SPLITS_AND_SPLIT_FILE_EXISTS(1152, "XCL52", "Both splits and split file are passed"), - // 1153 code is taken by CANNOT_DROP_CDC_INDEX - SPLIT_FILE_DONT_EXIST(1154, "XCL54", "Either split file don't exist or is not a file"), - UNABLE_TO_OPEN_SPLIT_FILE(1155, "XCL55", "Exception occurred while opening splits file"), - - /** - * Implementation defined class. Phoenix internal error. (errorcode 20, sqlstate INT). - */ - CANNOT_CALL_METHOD_ON_TYPE(2001, "INT01", "Cannot call method on the argument type."), - CLASS_NOT_UNWRAPPABLE(2002, "INT03", "Class not unwrappable."), - PARAM_INDEX_OUT_OF_BOUND(2003, "INT04", "Parameter position is out of range."), - PARAM_VALUE_UNBOUND(2004, "INT05", "Parameter value unbound."), - INTERRUPTED_EXCEPTION(2005, "INT07", "Interrupted exception."), - INCOMPATIBLE_CLIENT_SERVER_JAR(2006, "INT08", "Incompatible jars detected between client and server."), - OUTDATED_JARS(2007, "INT09", "Outdated jars."), - INDEX_METADATA_NOT_FOUND(2008, "INT10", "Unable to find cached index metadata. "), - UNKNOWN_ERROR_CODE(2009, "INT11", "Unknown error code."), - CONCURRENT_UPGRADE_IN_PROGRESS(2010, "INT12", ""), - UPGRADE_REQUIRED(2011, "INT13", ""), - UPGRADE_NOT_REQUIRED(2012, "INT14", ""), - GET_TABLE_ERROR(2013, "INT15", "MetadataEndpointImpl doGetTable called for table not present " + - "on region"), - ROW_VALUE_CONSTRUCTOR_OFFSET_NOT_COERCIBLE(2014, "INT16", "Row Value Constructor Offset Not Coercible to a Primary or Indexed RowKey."), - ROW_VALUE_CONSTRUCTOR_OFFSET_INTERNAL_ERROR(2015, "INT17", "Row Value Constructor Offset had an Unexpected Error."), - ROW_VALUE_CONSTRUCTOR_OFFSET_NOT_ALLOWED_IN_QUERY(2016, "INT18", "Row Value Constructor Offset Not Allowed In Query."), - - UPGRADE_BLOCKED(2017, "INT19", ""), - - OPERATION_TIMED_OUT(6000, "TIM01", "Operation timed out.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - final String reason = info.getMessage() != null - ? info.getMessage() : OPERATION_TIMED_OUT.getMessage(); - return new SQLTimeoutException(reason, - OPERATION_TIMED_OUT.getSQLState(), - OPERATION_TIMED_OUT.getErrorCode(), - info.getRootCause()); - } - }), - FUNCTION_UNDEFINED(6001, "42F01", "Function undefined.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new FunctionNotFoundException(info.getFunctionName()); - } + /** + * Connection Exception (errorcode 01, sqlstate 08) + */ + IO_EXCEPTION(101, "08000", "Unexpected IO exception."), + MALFORMED_CONNECTION_URL(102, "08001", "Malformed connection url."), + CANNOT_ESTABLISH_CONNECTION(103, "08004", "Unable to establish connection."), + + /** + * Data Exception (errorcode 02, sqlstate 22) + */ + ILLEGAL_DATA(201, "22000", "Illegal data."), + DIVIDE_BY_ZERO(202, "22012", "Divide by zero."), + TYPE_MISMATCH(203, "22005", "Type mismatch.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new TypeMismatchException(info.getMessage()); + } + }), + VALUE_IN_UPSERT_NOT_CONSTANT(204, "22008", "Values in UPSERT must evaluate to a constant."), + MALFORMED_URL(205, "22009", "Malformed URL."), + DATA_EXCEEDS_MAX_CAPACITY(206, "22003", "The data exceeds the max capacity for the data type."), + MISSING_MAX_LENGTH(207, "22004", "Max length must be specified for type."), + NONPOSITIVE_MAX_LENGTH(208, "22006", "Max length must have a positive length for type."), + DECIMAL_PRECISION_OUT_OF_RANGE(209, "22003", + "Decimal precision outside of range. Should be within 1 and " + PDataType.MAX_PRECISION + "."), + SERVER_ARITHMETIC_ERROR(212, "22012", "Arithmetic error on server."), + VALUE_OUTSIDE_RANGE(213, "22003", "Value outside range."), + VALUE_IN_LIST_NOT_CONSTANT(214, "22008", "Values in IN must evaluate to a constant."), + SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS(215, "22015", + "Single-row sub-query returns more than one row."), + SUBQUERY_RETURNS_DIFFERENT_NUMBER_OF_FIELDS(216, "22016", + "Sub-query must return the same number of fields as the left-hand-side expression of 'IN'."), + AMBIGUOUS_JOIN_CONDITION(217, "22017", + "Ambiguous or non-equi join condition specified. Consider using table list with where clause."), + CONSTRAINT_VIOLATION(218, "23018", "Constraint violation."), + SUBQUERY_SELECT_LIST_COLUMN_MUST_HAS_ALIAS(219, "23019", + "Every column in subquery select lists must has alias when used for join."), + ROW_KEY_OUT_OF_SCAN_RANGE(220, "23020", "Row key is out of scan start/stop key boundaries"), + + CONCURRENT_TABLE_MUTATION(301, "23000", "Concurrent modification to table.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new ConcurrentTableMutationException(info.getSchemaName(), info.getTableName()); + } + }), + CANNOT_INDEX_COLUMN_ON_TYPE(302, "23100", "The column cannot be index due to its type."), + INVALID_INDEX_WHERE_WITH_SUBQUERY(303, "23101", " Index where clause cannot include a subquery."), + CANNOT_EVALUATE_INDEX_WHERE(304, "23102", + "Invalid index where clause. It cannot be evaluated on a data table row."), + /** + * Invalid Cursor State (errorcode 04, sqlstate 24) + */ + CURSOR_BEFORE_FIRST_ROW(401, "24015", "Cursor before first row."), + CURSOR_PAST_LAST_ROW(402, "24016", "Cursor past last row."), + + /** + * Syntax Error or Access Rule Violation (errorcode 05, sqlstate 42) + */ + AMBIGUOUS_TABLE(501, "42000", + "Table name exists in more than one table schema and is used without being qualified.", + new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new AmbiguousTableException(info.getTableName(), info.getRootCause()); + } }), - FUNCTION_ALREADY_EXIST(6002, "42F02", "Function already exists.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new FunctionAlreadyExistsException(info.getSchemaName(), info.getTableName()); - } + AMBIGUOUS_COLUMN(502, "42702", "Column reference ambiguous or duplicate names.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new AmbiguousColumnException(info.getColumnName(), info.getRootCause()); + } + }), + INDEX_MISSING_PK_COLUMNS(503, "42602", "Index table missing PK Columns."), + COLUMN_NOT_FOUND(504, "42703", "Undefined column.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new ColumnNotFoundException(info.getSchemaName(), info.getTableName(), + info.getFamilyName(), info.getColumnName()); + } + }), + READ_ONLY_TABLE(505, "42000", "Table is read only.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new ReadOnlyTableException(info.getMessage(), info.getSchemaName(), + info.getTableName(), info.getFamilyName()); + } + }), + CANNOT_DROP_PK(506, "42817", "Primary key column may not be dropped."), + PRIMARY_KEY_MISSING(509, "42888", "The table does not have a primary key."), + PRIMARY_KEY_ALREADY_EXISTS(510, "42889", "The table already has a primary key."), + ORDER_BY_NOT_IN_SELECT_DISTINCT(511, "42890", + "All ORDER BY expressions must appear in SELECT DISTINCT:"), + INVALID_PRIMARY_KEY_CONSTRAINT(512, "42891", + "Invalid column reference in primary key constraint."), + ARRAY_NOT_ALLOWED_IN_PRIMARY_KEY(513, "42892", + "Array type not allowed as primary key constraint."), + COLUMN_EXIST_IN_DEF(514, "42892", + "A duplicate column name was detected in the object definition or ALTER TABLE/VIEW statement.", + new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new ColumnAlreadyExistsException(info.getSchemaName(), info.getTableName(), + info.getColumnName()); + } }), - UNALLOWED_USER_DEFINED_FUNCTIONS(6003, "42F03", - "User defined functions are configured to not be allowed. To allow configure " - + QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB + " to true."), - - SCHEMA_ALREADY_EXISTS(721, "42M04", "Schema with given name already exists", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new SchemaAlreadyExistsException(info.getSchemaName()); - } + ORDER_BY_ARRAY_NOT_SUPPORTED(515, "42893", "ORDER BY of an array type is not allowed."), + NON_EQUALITY_ARRAY_COMPARISON(516, "42894", "Array types may only be compared using = or !=."), + /** + * Invalid Transaction State (errorcode 05, sqlstate 25) + */ + READ_ONLY_CONNECTION(518, "25502", "Mutations are not permitted for a read-only connection."), + + VARBINARY_ARRAY_NOT_SUPPORTED(519, "42896", "VARBINARY ARRAY is not supported."), + + /** + * Expression Index exceptions. + */ + AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX(520, "42897", + "Aggregate expression not allowed in an index."), + NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX(521, "42898", + "Non-deterministic expression not allowed in an index."), + STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX(522, "42899", + "Stateless expression not allowed in an index."), + + /** + * Transaction exceptions. + */ + TRANSACTION_CONFLICT_EXCEPTION(523, "42900", + "Transaction aborted due to conflict with other mutations."), + TRANSACTION_EXCEPTION(524, "42901", "Transaction aborted due to error."), + + /** + * Union All related errors + */ + SELECT_COLUMN_NUM_IN_UNIONALL_DIFFS(525, "42902", + "SELECT column number differs in a Union All query is not allowed."), + SELECT_COLUMN_TYPE_IN_UNIONALL_DIFFS(526, "42903", + "SELECT column types differ in a Union All query is not allowed."), + + /** + * Row timestamp column related errors + */ + ROWTIMESTAMP_ONE_PK_COL_ONLY(527, "42904", + "Only one column that is part of the primary key can be declared as a ROW_TIMESTAMP."), + ROWTIMESTAMP_PK_COL_ONLY(528, "42905", + "Only columns part of the primary key can be declared as a ROW_TIMESTAMP."), + ROWTIMESTAMP_CREATE_ONLY(529, "42906", + "A column can be added as ROW_TIMESTAMP only in CREATE TABLE."), + ROWTIMESTAMP_COL_INVALID_TYPE(530, "42907", + "A column can be added as ROW_TIMESTAMP only if it is of type DATE, BIGINT, TIME OR TIMESTAMP."), + ROWTIMESTAMP_NOT_ALLOWED_ON_VIEW(531, "42908", + "Declaring a column as row_timestamp is not allowed for views."), + INVALID_SCN(532, "42909", "Value of SCN cannot be less than zero."), + INVALID_REPLAY_AT(533, "42910", "Value of REPLAY_AT cannot be less than zero."), + UNEQUAL_SCN_AND_BUILD_INDEX_AT(534, "42911", + "If both specified, values of CURRENT_SCN and BUILD_INDEX_AT must be equal."), + ONLY_INDEX_UPDATABLE_AT_SCN(535, "42912", + "Only an index may be updated when the BUILD_INDEX_AT property is specified"), + PARENT_TABLE_NOT_FOUND(536, "42913", + "Can't drop the index because the parent table in the DROP statement is incorrect."), + CANNOT_QUERY_TABLE_WITH_SCN_OLDER_THAN_MAX_LOOKBACK_AGE(538, "42915", + "Cannot use SCN to look further back in the past beyond the configured max lookback age"), + + COMPARISON_UNSUPPORTED(539, "42915", "Comparison not supported for the datatype."), + INVALID_JSON_DATA(540, "42916", "Invalid json data."), + JSON_FRAGMENT_NOT_ALLOWED_IN_INDEX_EXPRESSION(541, "42917", + "Functions returning JSON fragments are not allowed in Index Expression."), + + /** + * HBase and Phoenix specific implementation defined sub-classes. Column family related + * exceptions. For the following exceptions, use errorcode 10. + */ + SINGLE_PK_MAY_NOT_BE_NULL(1000, "42I00", "Single column primary key may not be NULL."), + COLUMN_FAMILY_NOT_FOUND(1001, "42I01", "Undefined column family.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new ColumnFamilyNotFoundException(info.getSchemaName(), info.getTableName(), + info.getFamilyName()); + } + }), + PROPERTIES_FOR_FAMILY(1002, "42I02", "Properties may not be defined for an unused family name."), + // Primary/row key related exceptions. + PRIMARY_KEY_WITH_FAMILY_NAME(1003, "42J01", "Primary key columns must not have a family name."), + PRIMARY_KEY_OUT_OF_ORDER(1004, "42J02", + "Order of columns in primary key constraint must match the order in which they're declared."), + VARBINARY_IN_ROW_KEY(1005, "42J03", + "The VARBINARY/ARRAY type can only be used as the last part of a multi-part row key. " + + "For Binary types, you can use VARBINARY_ENCODED for early part of multi-part row key."), + NOT_NULLABLE_COLUMN_IN_ROW_KEY(1006, "42J04", + "Only nullable columns may be added to primary key."), + VARBINARY_LAST_PK(1015, "42J04", + "Cannot add column to table when the last PK column is of type VARBINARY or ARRAY."), + NULLABLE_FIXED_WIDTH_LAST_PK(1023, "42J04", + "Cannot add column to table when the last PK column is nullable and fixed width."), + CANNOT_MODIFY_VIEW_PK(1036, "42J04", + "Cannot modify the primary key of a VIEW if last PK column of parent is variable length."), + BASE_TABLE_COLUMN(1037, "42J04", + "Cannot modify columns of base table used by tenant-specific tables."), + UNALLOWED_COLUMN_FAMILY(1090, "42J04", + "Column family names should not contain local index column prefix: " + + QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX), + // Key/value column related errors + KEY_VALUE_NOT_NULL(1007, "42K01", + "A non primary key column may only be declared as not null on tables with immutable rows."), + // View related errors. + VIEW_WITH_TABLE_CONFIG(1008, "42L01", "A view may not contain table configuration properties."), + VIEW_WITH_PROPERTIES(1009, "42L02", "Properties may not be defined for a view."), + // Table related errors that are not in standard code. + CANNOT_MUTATE_TABLE(1010, "42M01", "Not allowed to mutate table."), + UNEXPECTED_MUTATION_CODE(1011, "42M02", "Unexpected mutation code."), + TABLE_UNDEFINED(1012, "42M03", "Table undefined.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new TableNotFoundException(info.getSchemaName(), info.getTableName()); + } + }), + INDEX_UNDEFINED(1042, "42M06", "Index undefined.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new IndexNotFoundException(info.getSchemaName(), info.getTableName()); + } + }), + TABLE_ALREADY_EXIST(1013, "42M04", "Table already exists.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new TableAlreadyExistsException(info.getSchemaName(), info.getTableName()); + } + }), + TABLES_NOT_IN_SYNC(1140, "42M05", "Tables not in sync for some properties."), + + // High Availability Errors + HA_CLOSED_AFTER_FAILOVER(1984, "F1Q84", "Connection closed after failover happened.", + i -> new FailoverSQLException(i.getMessage(), i.getHaGroupInfo(), i.getRootCause())), + HA_NO_ACTIVE_CLUSTER(1985, "F1Q85", "No ACTIVE HBase cluster found.", + i -> new FailoverSQLException(i.getMessage(), i.getHaGroupInfo(), i.getRootCause())), + HA_READ_FROM_CLUSTER_FAILED_ON_NULL(1986, "F1Q86", "Unable to read from cluster for null."), + HA_INVALID_PROPERTIES(1987, "F1Q87", "Invalid properties to get a Phoenix HA connection."), + HA_CLUSTER_CAN_NOT_CONNECT(1988, "F1Q88", "Cluster can not serve any requests for this HA group"), + + // Syntax error + TYPE_NOT_SUPPORTED_FOR_OPERATOR(1014, "42Y01", "The operator does not support the operand type."), + AGGREGATE_IN_GROUP_BY(1016, "42Y26", "Aggregate expressions may not be used in GROUP BY."), + AGGREGATE_IN_WHERE(1017, "42Y26", "Aggregate may not be used in WHERE."), + AGGREGATE_WITH_NOT_GROUP_BY_COLUMN(1018, "42Y27", + "Aggregate may not contain columns not in GROUP BY."), + ONLY_AGGREGATE_IN_HAVING_CLAUSE(1019, "42Y26", "Only aggregate maybe used in the HAVING clause."), + UPSERT_COLUMN_NUMBERS_MISMATCH(1020, "42Y60", + "Number of columns upserting must match number of values."), + // Table properties exception. + INVALID_BUCKET_NUM(1021, "42Y80", "Salt bucket numbers should be with 1 and 256."), + NO_SPLITS_ON_SALTED_TABLE(1022, "42Y81", + "Should not specify split points on salted table with default row key order."), + SALT_ONLY_ON_CREATE_TABLE(1024, "42Y82", + "Salt bucket number may only be specified when creating a table."), + NO_NORMALIZER_ON_SALTED_TABLE(1147, "42Y86", "Should not enable normalizer on salted table."), + SET_UNSUPPORTED_PROP_ON_ALTER_TABLE(1025, "42Y83", + "Unsupported property set in ALTER TABLE command."), + CANNOT_ADD_NOT_NULLABLE_COLUMN(1038, "42Y84", + "Only nullable columns may be added for a pre-existing table."), + NO_MUTABLE_INDEXES(1026, "42Y85", + "Mutable secondary indexes are only supported for HBase version " + + MetaDataUtil.decodeHBaseVersionAsString(MetaDataProtocol.MUTABLE_SI_VERSION_THRESHOLD) + + " and above."), + INVALID_INDEX_STATE_TRANSITION(1028, "42Y87", "Invalid index state transition."), + INVALID_MUTABLE_INDEX_CONFIG(1029, "42Y88", + "Mutable secondary indexes must have the " + IndexManagementUtil.WAL_EDIT_CODEC_CLASS_KEY + + " property set to " + IndexManagementUtil.INDEX_WAL_EDIT_CODEC_CLASS_NAME + + " in the hbase-sites.xml of every region server."), + CANNOT_CREATE_DEFAULT(1031, "42Y90", "Cannot create column with a stateful default value."), + CANNOT_CREATE_DEFAULT_ROWTIMESTAMP(1032, "42Y90", + "Cannot create ROW_TIMESTAMP column with a default value."), + + CANNOT_CREATE_TENANT_SPECIFIC_TABLE(1030, "42Y89", + "Cannot create table for tenant-specific connection."), + DEFAULT_COLUMN_FAMILY_ONLY_ON_CREATE_TABLE(1034, "42Y93", + "Default column family may only be specified when creating a table."), + INSUFFICIENT_MULTI_TENANT_COLUMNS(1040, "42Y96", + "A MULTI_TENANT table must have two or more PK columns with the first column being NOT NULL."), + TENANTID_IS_OF_WRONG_TYPE(1041, "42Y97", + "The TenantId could not be converted to correct format for this table."), + VIEW_WHERE_IS_CONSTANT(1045, "43A02", "WHERE clause in VIEW should not evaluate to a constant."), + CANNOT_UPDATE_VIEW_COLUMN(1046, "43A03", + "Column updated in VIEW may not differ from value specified in WHERE clause."), + TOO_MANY_INDEXES(1047, "43A04", + "Too many indexes have already been created on the physical table."), + NO_LOCAL_INDEX_ON_TABLE_WITH_IMMUTABLE_ROWS(1048, "43A05", + "Local indexes aren't allowed on tables with immutable rows."), + COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY(1049, "43A06", + "Column family not allowed for table properties."), + COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY(1050, "43A07", + "Setting or altering any of the following properties: " + + MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES.toString() + + " for a column family is not supported since they must be kept in sync. You can only set these properties for the entire table."), + CANNOT_ALTER_PROPERTY(1051, "43A08", + "Property can be specified or changed only when creating a table."), + CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED(1052, "43A09", + "Property cannot be specified for a column family that is not being added or modified."), + CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN(1053, "43A10", + "Table level property cannot be set when adding a column."), + + NO_LOCAL_INDEXES(1054, "43A11", + "Local secondary indexes are not supported for HBase versions " + + MetaDataUtil.decodeHBaseVersionAsString(MetaDataProtocol.MIN_LOCAL_SI_VERSION_DISALLOW) + + " through " + + MetaDataUtil.decodeHBaseVersionAsString(MetaDataProtocol.MAX_LOCAL_SI_VERSION_DISALLOW) + + " inclusive."), + UNALLOWED_LOCAL_INDEXES(1055, "43A12", + "Local secondary indexes are configured to not be allowed."), + + DESC_VARBINARY_NOT_SUPPORTED(1056, "43A13", "Descending VARBINARY columns not supported."), + NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT(1057, "42Y10", "No table specified for wildcard select."), + UNSUPPORTED_GROUP_BY_EXPRESSIONS(1058, "43A14", + "Only a single VARBINARY, ARRAY, or nullable BINARY type may be referenced in a GROUP BY."), + + DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE(1069, "43A69", + "Default column family not allowed on VIEW or shared INDEX."), + ONLY_TABLE_MAY_BE_DECLARED_TRANSACTIONAL(1070, "44A01", + "Only tables may be declared as transactional."), + TX_MAY_NOT_SWITCH_TO_NON_TX(1071, "44A02", + "A transactional table may not be switched to non transactional."), + STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL(1072, "44A03", + "Store nulls must be true when a table is transactional."), + CANNOT_START_TRANSACTION_WITH_SCN_SET(1073, "44A04", + "Cannot start a transaction on a connection with SCN set."), + TX_MAX_VERSIONS_MUST_BE_GREATER_THAN_ONE(1074, "44A05", + "A transactional table must define VERSION of greater than one."), + CANNOT_SPECIFY_SCN_FOR_TXN_TABLE(1075, "44A06", + "Cannot use a connection with SCN set for a transactional table."), + NULL_TRANSACTION_CONTEXT(1076, "44A07", "No Transaction Context available."), + TRANSACTION_FAILED(1077, "44A08", "Transaction Failure "), + CANNOT_CREATE_TXN_TABLE_IF_TXNS_DISABLED(1078, "44A09", + "Cannot create a transactional table if transactions are disabled."), + CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED(1079, "44A10", + "Cannot alter table to be transactional table if transactions are disabled."), + CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP(1080, "44A11", + "Cannot create a transactional" + " table with ROW_TIMESTAMP column."), + CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP(1081, "44A12", + "Cannot alter table to be transactional table if transactions are disabled."), + TX_MUST_BE_ENABLED_TO_SET_TX_CONTEXT(1082, "44A13", + "Cannot set transaction context if transactions are disabled."), + TX_MUST_BE_ENABLED_TO_SET_AUTO_FLUSH(1083, "44A14", + "Cannot set auto flush if transactions are disabled."), + TX_MUST_BE_ENABLED_TO_SET_ISOLATION_LEVEL(1084, "44A15", + "Cannot set isolation level to TRANSACTION_REPEATABLE_READ if transactions are disabled."), + TX_UNABLE_TO_GET_WRITE_FENCE(1085, "44A16", "Unable to obtain write fence for DDL operation."), + + SEQUENCE_NOT_CASTABLE_TO_AUTO_PARTITION_ID_COLUMN(1086, "44A17", + "Sequence Value not castable to auto-partition id column"), + CANNOT_COERCE_AUTO_PARTITION_ID(1087, "44A18", "Auto-partition id cannot be coerced"), + CANNOT_CREATE_INDEX_ON_MUTABLE_TABLE_WITH_ROWTIMESTAMP(1088, "44A19", + "Cannot create an index on a mutable table that has a ROW_TIMESTAMP column."), + UNKNOWN_TRANSACTION_PROVIDER(1089, "44A20", "Unknown TRANSACTION_PROVIDER: "), + CANNOT_START_TXN_IF_TXN_DISABLED(1091, "44A22", + "Cannot start transaction if transactions are disabled."), + CANNOT_MIX_TXN_PROVIDERS(1092, "44A23", "Cannot mix transaction providers: "), + CANNOT_ALTER_TABLE_FROM_NON_TXN_TO_TXNL(1093, "44A24", + "Cannot alter table from non transactional to transactional for "), + UNSUPPORTED_COLUMN_ENCODING_FOR_TXN_PROVIDER(1094, "44A25", + "Column encoding is not supported for"), + UNSUPPORTED_STORAGE_FORMAT_FOR_TXN_PROVIDER(1095, "44A26", + "Only ONE_CELL_PER_COLUMN storage scheme is supported for"), + CANNOT_SWITCH_TXN_PROVIDERS(1096, "44A27", "Cannot switch transaction providers."), + TTL_UNSUPPORTED_FOR_TXN_TABLE(10947, "44A28", "TTL is not supported for"), + CANNOT_CREATE_LOCAL_INDEX_FOR_TXN_TABLE(10948, "44A29", "Local indexes cannot be created for"), + CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX(10949, "44A30", + "Cannot set or alter the following properties on an index: " + + MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES.toString()), + CANNOT_SET_OR_ALTER_UPDATE_CACHE_FREQ_FOR_INDEX(10950, "44A31", + "Cannot set or alter " + PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " on an index"), + @Deprecated + PHOENIX_LEVEL_TTL_SUPPORTED_FOR_VIEWS_ONLY(10951, "44A32", + PhoenixDatabaseMetaData.TTL + " property can only be set for views"), + @Deprecated + CANNOT_SET_OR_ALTER_PHOENIX_LEVEL_TTL_FOR_TABLE_WITH_TTL(10952, "44A33", + "Cannot set or alter " + TTL + " property on an table with TTL,"), + ABOVE_INDEX_NON_ASYNC_THRESHOLD(1097, "44A34", + "The estimated read size for index creation " + "is higher than " + + QueryServices.CLIENT_INDEX_ASYNC_THRESHOLD + ". You can edit the" + + " limit or create ASYNC index."), + CANNOT_SET_OR_ALTER_TTL(10953, "44A35", + "Cannot set or alter " + PhoenixDatabaseMetaData.TTL + " property on an view when parent/child " + + "view has TTL set,"), + CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY(10954, "44A36", + CHANGE_DETECTION_ENABLED + " is only supported on tables and views"), + CANNOT_CREATE_INDEX_CHILD_VIEWS_EXTEND_PK(10955, "44A37", + "Index can be created " + "only if none of the child views extends primary key"), + VIEW_CANNOT_EXTEND_PK_WITH_PARENT_INDEXES(10956, "44A38", + "View can extend parent primary key" + + " only if none of the parents have indexes in the parent hierarchy"), + MAX_LOOKBACK_AGE_SUPPORTED_FOR_TABLES_ONLY(10957, "44A39", + "Max lookback age can only be set for tables"), + UNKNOWN_INCLUDE_CHANGE_SCOPE(10958, "44A40", "Unknown change scope for CDC INCLUDE"), + TTL_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY(10959, "44A41", + TTL + "property can only be set for tables and updatable views only"), + + TTL_ALREADY_DEFINED_IN_HIERARCHY(10960, "44A42", + TTL + " property is already defined in hierarchy for this entity"), + VIEW_TTL_NOT_ENABLED(10961, "44A43", + TTL + " property can not be set on views as phoenix.view.ttl.enabled is false"), + + /** Sequence related */ + SEQUENCE_ALREADY_EXIST(1200, "42Z00", "Sequence already exists.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new SequenceAlreadyExistsException(info.getSchemaName(), info.getTableName()); + } + }), + SEQUENCE_UNDEFINED(1201, "42Z01", "Sequence undefined.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new SequenceNotFoundException(info.getSchemaName(), info.getTableName()); + } + }), + START_WITH_MUST_BE_CONSTANT(1202, "42Z02", + "Sequence START WITH value must be an integer or long constant."), + INCREMENT_BY_MUST_BE_CONSTANT(1203, "42Z03", + "Sequence INCREMENT BY value must be an integer or long constant."), + CACHE_MUST_BE_NON_NEGATIVE_CONSTANT(1204, "42Z04", + "Sequence CACHE value must be a non negative integer constant."), + INVALID_USE_OF_NEXT_VALUE_FOR(1205, "42Z05", + "NEXT VALUE FOR may only be used as in a SELECT or an UPSERT VALUES expression."), + CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE(1206, "42Z06", + "NEXT VALUE FOR must be called before CURRENT VALUE FOR is called."), + EMPTY_SEQUENCE_CACHE(1207, "42Z07", "No more cached sequence values."), + MINVALUE_MUST_BE_CONSTANT(1208, "42Z08", + "Sequence MINVALUE must be an integer or long constant."), + MAXVALUE_MUST_BE_CONSTANT(1209, "42Z09", + "Sequence MAXVALUE must be an integer or long constant."), + MINVALUE_MUST_BE_LESS_THAN_OR_EQUAL_TO_MAXVALUE(1210, "42Z10", + "Sequence MINVALUE must be less than or equal to MAXVALUE."), + STARTS_WITH_MUST_BE_BETWEEN_MIN_MAX_VALUE(1211, "42Z11", + "STARTS WITH value must be greater than or equal to MINVALUE and less than or equal to MAXVALUE."), + SEQUENCE_VAL_REACHED_MAX_VALUE(1212, "42Z12", "Reached MAXVALUE of sequence."), + SEQUENCE_VAL_REACHED_MIN_VALUE(1213, "42Z13", "Reached MINVALUE of sequence."), + INCREMENT_BY_MUST_NOT_BE_ZERO(1214, "42Z14", "Sequence INCREMENT BY value cannot be zero."), + NUM_SEQ_TO_ALLOCATE_MUST_BE_CONSTANT(1215, "42Z15", + "Sequence NEXT n VALUES FOR must be a positive integer or constant."), + NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED(1216, "42Z16", + "Sequence NEXT n VALUES FOR is not supported for Sequences with the CYCLE flag."), + AUTO_PARTITION_SEQUENCE_UNDEFINED(1217, "42Z17", "Auto Partition Sequence undefined", + new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new SequenceNotFoundException(info.getSchemaName(), info.getTableName()); + } }), - SCHEMA_NOT_FOUND(722, "43M05", "Schema does not exist", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new SchemaNotFoundException(info.getSchemaName()); - } + CANNOT_UPDATE_PK_ON_DUP_KEY(1218, "42Z18", + "Primary key columns may not be udpated in ON DUPLICATE KEY UPDATE clause."), + CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE(1219, "42Z19", + "The ON DUPLICATE KEY UPDATE clause may not be used for immutable tables."), + CANNOT_USE_ON_DUP_KEY_FOR_TRANSACTIONAL(1220, "42Z20", + "The ON DUPLICATE KEY UPDATE clause may not be used for transactional tables."), + DUPLICATE_COLUMN_IN_ON_DUP_KEY(1221, "42Z21", "Duplicate column in ON DUPLICATE KEY UPDATE."), + AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY(1222, "42Z22", + "Aggregation in ON DUPLICATE KEY UPDATE is not allowed."), + CANNOT_SET_SCN_IN_ON_DUP_KEY(1223, "42Z23", + "The CURRENT_SCN may not be set for statement using ON DUPLICATE KEY."), + CANNOT_USE_ON_DUP_KEY_WITH_GLOBAL_IDX(1224, "42Z24", + "The ON DUPLICATE KEY clause may not be used when a table has a global index."), + + /** Parser error. (errorcode 06, sqlState 42P) */ + PARSER_ERROR(601, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), + MISSING_TOKEN(602, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), + UNWANTED_TOKEN(603, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), + MISMATCHED_TOKEN(604, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), + UNKNOWN_FUNCTION(605, "42P00", "Syntax error.", Factory.SYNTAX_ERROR), + + /** + * Implementation defined class. Execution exceptions (errorcode 11, sqlstate XCL). + */ + RESULTSET_CLOSED(1101, "XCL01", "ResultSet is closed."), + GET_TABLE_REGIONS_FAIL(1102, "XCL02", "Cannot get all table regions."), + EXECUTE_QUERY_NOT_APPLICABLE(1103, "XCL03", "executeQuery may not be used."), + EXECUTE_UPDATE_NOT_APPLICABLE(1104, "XCL04", "executeUpdate may not be used."), + SPLIT_POINT_NOT_CONSTANT(1105, "XCL05", "Split points must be constants."), + BATCH_EXCEPTION(1106, "XCL06", "Exception while executing batch."), + EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH(1107, "XCL07", + "An executeUpdate is prohibited when the batch is not empty. Use clearBatch to empty the batch first."), + STALE_REGION_BOUNDARY_CACHE(1108, "XCL08", "Cache of region boundaries are out of date.", + new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new StaleRegionBoundaryCacheException(info.getSchemaName(), info.getTableName()); + } }), - CANNOT_MUTATE_SCHEMA(723, "43M06", "Cannot mutate schema as schema has existing tables"), - SCHEMA_NOT_ALLOWED(724, "43M07", "Schema name not allowed!!"), - CREATE_SCHEMA_NOT_ALLOWED(725, "43M08", "Cannot create schema because config " - + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " for enabling name space mapping isn't enabled."), - INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES(726, "43M10", " Inconsistent namespace mapping properties."), - ASYNC_NOT_ALLOWED(727, "43M11", " ASYNC option is not allowed."), - NEW_CONNECTION_THROTTLED(728, "410M1", "Could not create connection " + - "because this client already has the maximum number" + - " of connections to the target cluster."), - MAX_MUTATION_SIZE_EXCEEDED(729, "LIM01", "MutationState size is bigger" + - " than maximum allowed number of rows, try upserting rows in smaller batches or " + - "using autocommit on for deletes.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new MaxMutationSizeExceededException( - info.getMaxMutationSize(), info.getMutationSize()); - } + CANNOT_SPLIT_LOCAL_INDEX(1109, "XCL09", "Local index may not be pre-split."), + CANNOT_SALT_LOCAL_INDEX(1110, "XCL10", "Local index may not be salted."), + CONNECTION_CLOSED(1111, "XCL11", "Connectioin is closed."), + + INDEX_FAILURE_BLOCK_WRITE(1120, "XCL20", "Writes to table blocked until index can be updated."), + INDEX_WRITE_FAILURE(1121, "XCL21", "Write to the index failed."), + + UPDATE_CACHE_FREQUENCY_INVALID(1130, "XCL30", + "UPDATE_CACHE_FREQUENCY cannot be set to ALWAYS if APPEND_ONLY_SCHEMA is true."), + CANNOT_DROP_COL_APPEND_ONLY_SCHEMA(1131, "XCL31", + "Cannot drop column from table that with append only schema."), + CANNOT_DROP_VIEW_REFERENCED_COL(1132, "XCL32", + "Cannot drop column that is referenced in view where clause."), + + CANNOT_ALTER_IMMUTABLE_ROWS_PROPERTY(1133, "XCL33", + "IMMUTABLE_ROWS property can be changed only if the table storage scheme is ONE_CELL_PER_KEYVALUE_COLUMN"), + CANNOT_ALTER_TABLE_PROPERTY_ON_VIEW(1134, "XCL34", + "Altering this table property on a view is not allowed"), + + CANNOT_DROP_CDC_INDEX(1153, "XCL53", "Cannot drop the index associated with CDC"), + IMMUTABLE_TABLE_PROPERTY_INVALID(1135, "XCL35", + "IMMUTABLE table property cannot be used with CREATE IMMUTABLE TABLE statement "), + + MAX_COLUMNS_EXCEEDED(1136, "XCL36", + "The number of columns exceed the maximum supported by the table's qualifier encoding scheme"), + INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES(1137, "XCL37", + "If IMMUTABLE_STORAGE_SCHEME property is not set to ONE_CELL_PER_COLUMN COLUMN_ENCODED_BYTES cannot be 0"), + INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE(1138, "XCL38", + "IMMUTABLE_STORAGE_SCHEME property cannot be changed from/to ONE_CELL_PER_COLUMN "), + CANNOT_SET_GUIDE_POST_WIDTH(1139, "XCL39", + "Guide post width can only be set on base data tables"), + CANNOT_CREATE_VIEWS_ON_SYSTEM_TABLES(1141, "XCL41", + "Cannot create views on tables of type" + PTableType.SYSTEM), + UNABLE_TO_CREATE_CHILD_LINK(1142, "XCL42", + "Error creating parent-child link (Link type=" + PTable.LinkType.CHILD_TABLE + ") for view"), + UNABLE_TO_UPDATE_PARENT_TABLE(1143, "XCL43", "Error Updating the parent table"), + UNABLE_TO_DELETE_CHILD_LINK(1144, "XCL44", + "Error deleting parent-child link (Link type=" + PTable.LinkType.CHILD_TABLE + ") for view"), + TABLE_NOT_IN_REGION(1145, "XCL45", + "No modifications allowed on this table. " + "Table not in this region."), + UNABLE_TO_UPSERT_TASK(1146, "XCL46", "Error upserting records in SYSTEM.TASK table"), + INVALID_CQ(1148, "XCL48", "ENCODED_QUALIFIER is less than INITIAL_VALUE."), + DUPLICATE_CQ(1149, "XCL49", "Duplicate ENCODED_QUALIFIER."), + MISSING_CQ(1150, "XCL49", "Missing ENCODED_QUALIFIER."), + EXECUTE_BATCH_FOR_STMT_WITH_RESULT_SET(1151, "XCL51", + "A batch operation can't include a " + "statement that produces result sets.", + Factory.BATCH_UPDATE_ERROR), + SPLITS_AND_SPLIT_FILE_EXISTS(1152, "XCL52", "Both splits and split file are passed"), + // 1153 code is taken by CANNOT_DROP_CDC_INDEX + SPLIT_FILE_DONT_EXIST(1154, "XCL54", "Either split file don't exist or is not a file"), + UNABLE_TO_OPEN_SPLIT_FILE(1155, "XCL55", "Exception occurred while opening splits file"), + + /** + * Implementation defined class. Phoenix internal error. (errorcode 20, sqlstate INT). + */ + CANNOT_CALL_METHOD_ON_TYPE(2001, "INT01", "Cannot call method on the argument type."), + CLASS_NOT_UNWRAPPABLE(2002, "INT03", "Class not unwrappable."), + PARAM_INDEX_OUT_OF_BOUND(2003, "INT04", "Parameter position is out of range."), + PARAM_VALUE_UNBOUND(2004, "INT05", "Parameter value unbound."), + INTERRUPTED_EXCEPTION(2005, "INT07", "Interrupted exception."), + INCOMPATIBLE_CLIENT_SERVER_JAR(2006, "INT08", + "Incompatible jars detected between client and server."), + OUTDATED_JARS(2007, "INT09", "Outdated jars."), + INDEX_METADATA_NOT_FOUND(2008, "INT10", "Unable to find cached index metadata. "), + UNKNOWN_ERROR_CODE(2009, "INT11", "Unknown error code."), + CONCURRENT_UPGRADE_IN_PROGRESS(2010, "INT12", ""), + UPGRADE_REQUIRED(2011, "INT13", ""), + UPGRADE_NOT_REQUIRED(2012, "INT14", ""), + GET_TABLE_ERROR(2013, "INT15", + "MetadataEndpointImpl doGetTable called for table not present " + "on region"), + ROW_VALUE_CONSTRUCTOR_OFFSET_NOT_COERCIBLE(2014, "INT16", + "Row Value Constructor Offset Not Coercible to a Primary or Indexed RowKey."), + ROW_VALUE_CONSTRUCTOR_OFFSET_INTERNAL_ERROR(2015, "INT17", + "Row Value Constructor Offset had an Unexpected Error."), + ROW_VALUE_CONSTRUCTOR_OFFSET_NOT_ALLOWED_IN_QUERY(2016, "INT18", + "Row Value Constructor Offset Not Allowed In Query."), + + UPGRADE_BLOCKED(2017, "INT19", ""), + + OPERATION_TIMED_OUT(6000, "TIM01", "Operation timed out.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + final String reason = + info.getMessage() != null ? info.getMessage() : OPERATION_TIMED_OUT.getMessage(); + return new SQLTimeoutException(reason, OPERATION_TIMED_OUT.getSQLState(), + OPERATION_TIMED_OUT.getErrorCode(), info.getRootCause()); + } + }), + FUNCTION_UNDEFINED(6001, "42F01", "Function undefined.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new FunctionNotFoundException(info.getFunctionName()); + } + }), + FUNCTION_ALREADY_EXIST(6002, "42F02", "Function already exists.", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new FunctionAlreadyExistsException(info.getSchemaName(), info.getTableName()); + } + }), + UNALLOWED_USER_DEFINED_FUNCTIONS(6003, "42F03", + "User defined functions are configured to not be allowed. To allow configure " + + QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB + " to true."), + + SCHEMA_ALREADY_EXISTS(721, "42M04", "Schema with given name already exists", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new SchemaAlreadyExistsException(info.getSchemaName()); + } + }), + SCHEMA_NOT_FOUND(722, "43M05", "Schema does not exist", new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new SchemaNotFoundException(info.getSchemaName()); + } + }), + CANNOT_MUTATE_SCHEMA(723, "43M06", "Cannot mutate schema as schema has existing tables"), + SCHEMA_NOT_ALLOWED(724, "43M07", "Schema name not allowed!!"), + CREATE_SCHEMA_NOT_ALLOWED(725, "43M08", + "Cannot create schema because config " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + + " for enabling name space mapping isn't enabled."), + INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES(726, "43M10", + " Inconsistent namespace mapping properties."), + ASYNC_NOT_ALLOWED(727, "43M11", " ASYNC option is not allowed."), + NEW_CONNECTION_THROTTLED(728, "410M1", + "Could not create connection " + "because this client already has the maximum number" + + " of connections to the target cluster."), + MAX_MUTATION_SIZE_EXCEEDED(729, "LIM01", + "MutationState size is bigger" + + " than maximum allowed number of rows, try upserting rows in smaller batches or " + + "using autocommit on for deletes.", + new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new MaxMutationSizeExceededException(info.getMaxMutationSize(), + info.getMutationSize()); + } }), - MAX_MUTATION_SIZE_BYTES_EXCEEDED(730, "LIM02", "MutationState size is " + - "bigger than maximum allowed number of bytes, try upserting rows in smaller batches " + - "or using autocommit on for deletes.", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new MaxMutationSizeBytesExceededException(info.getMaxMutationSizeBytes(), - info.getMutationSizeBytes()); - } + MAX_MUTATION_SIZE_BYTES_EXCEEDED(730, "LIM02", + "MutationState size is " + + "bigger than maximum allowed number of bytes, try upserting rows in smaller batches " + + "or using autocommit on for deletes.", + new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new MaxMutationSizeBytesExceededException(info.getMaxMutationSizeBytes(), + info.getMutationSizeBytes()); + } }), - NEW_INTERNAL_CONNECTION_THROTTLED(731, "410M1", "Could not create connection " + - "because the internal connections already has the maximum number" + - " of connections to the target cluster."), - MAX_HBASE_CLIENT_KEYVALUE_MAXSIZE_EXCEEDED(732, - "LIM03", "The Phoenix Column size is bigger than maximum " + - "HBase client key value allowed size for ONE_CELL_PER_COLUMN table, " + - "try upserting column in smaller value", new Factory() { - @Override - public SQLException newException(SQLExceptionInfo info) { - return new MaxPhoenixColumnSizeExceededException(info.getMessage(), info.getMaxPhoenixColumnSizeBytes(), - info.getPhoenixColumnSizeBytes()); - } + NEW_INTERNAL_CONNECTION_THROTTLED(731, "410M1", + "Could not create connection " + + "because the internal connections already has the maximum number" + + " of connections to the target cluster."), + MAX_HBASE_CLIENT_KEYVALUE_MAXSIZE_EXCEEDED(732, "LIM03", + "The Phoenix Column size is bigger than maximum " + + "HBase client key value allowed size for ONE_CELL_PER_COLUMN table, " + + "try upserting column in smaller value", + new Factory() { + @Override + public SQLException newException(SQLExceptionInfo info) { + return new MaxPhoenixColumnSizeExceededException(info.getMessage(), + info.getMaxPhoenixColumnSizeBytes(), info.getPhoenixColumnSizeBytes()); + } }), - INSUFFICIENT_MEMORY(999, "50M01", "Unable to allocate enough memory."), - HASH_JOIN_CACHE_NOT_FOUND(900, "HJ01", "Hash Join cache not found"), + INSUFFICIENT_MEMORY(999, "50M01", "Unable to allocate enough memory."), + HASH_JOIN_CACHE_NOT_FOUND(900, "HJ01", "Hash Join cache not found"), - STATS_COLLECTION_DISABLED_ON_SERVER(1401, "STS01", "Stats collection attempted but is disabled on server"), + STATS_COLLECTION_DISABLED_ON_SERVER(1401, "STS01", + "Stats collection attempted but is disabled on server"), - CANNOT_UPSERT_WITH_SCN_FOR_ROW_TIMESTAMP_COLUMN(901,"43M12", - "Cannot use a connection with SCN set to upsert data for " + - "table with ROW_TIMESTAMP column."), - CANNOT_UPSERT_WITH_SCN_FOR_TABLE_WITH_INDEXES(903,"43M14", - "Cannot use a connection with SCN set to upsert data for a table with indexes."), + CANNOT_UPSERT_WITH_SCN_FOR_ROW_TIMESTAMP_COLUMN(901, "43M12", + "Cannot use a connection with SCN set to upsert data for " + + "table with ROW_TIMESTAMP column."), + CANNOT_UPSERT_WITH_SCN_FOR_TABLE_WITH_INDEXES(903, "43M14", + "Cannot use a connection with SCN set to upsert data for a table with indexes."), - CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS(904, "43M15", - "Cannot perform DDL with pending mutations. Commit or rollback mutations before performing DDL"), + CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS(904, "43M15", + "Cannot perform DDL with pending mutations. Commit or rollback mutations before performing DDL"), - NOT_SUPPORTED_CASCADE_FEATURE_PK(905, "43M16", "CASCADE INDEX feature is not supported to add new PK column in INDEX"), + NOT_SUPPORTED_CASCADE_FEATURE_PK(905, "43M16", + "CASCADE INDEX feature is not supported to add new PK column in INDEX"), - INCORRECT_INDEX_NAME(906, "43M17", "The list contains one or more incorrect index name(s)"), + INCORRECT_INDEX_NAME(906, "43M17", "The list contains one or more incorrect index name(s)"), - NOT_SUPPORTED_CASCADE_FEATURE_LOCAL_INDEX(907, "43M18", - "CASCADE INDEX feature is not supported for local index"), + NOT_SUPPORTED_CASCADE_FEATURE_LOCAL_INDEX(907, "43M18", + "CASCADE INDEX feature is not supported for local index"), - INVALID_REGION_SPLIT_POLICY(908, "43M19", - "REGION SPLIT POLICY is incorrect."), - ERROR_WRITING_TO_SCHEMA_REGISTRY(909, "4320", - "Error writing DDL change to external schema registry"), + INVALID_REGION_SPLIT_POLICY(908, "43M19", "REGION SPLIT POLICY is incorrect."), + ERROR_WRITING_TO_SCHEMA_REGISTRY(909, "4320", + "Error writing DDL change to external schema registry"), - CANNOT_TRANSFORM_ALREADY_TRANSFORMING_TABLE(910, "43M21", - "Cannot transform an index or a table who is already going through a transform."), + CANNOT_TRANSFORM_ALREADY_TRANSFORMING_TABLE(910, "43M21", + "Cannot transform an index or a table who is already going through a transform."), - CANNOT_TRANSFORM_LOCAL_OR_VIEW_INDEX(911, "43M22", "Cannot transform a view index or a local index. For view index, consider creating a new view index."), + CANNOT_TRANSFORM_LOCAL_OR_VIEW_INDEX(911, "43M22", + "Cannot transform a view index or a local index. For view index, consider creating a new view index."), - CANNOT_TRANSFORM_TABLE_WITH_LOCAL_INDEX(912, "43M23", "Cannot transform a table with a local index."), + CANNOT_TRANSFORM_TABLE_WITH_LOCAL_INDEX(912, "43M23", + "Cannot transform a table with a local index."), - CANNOT_TRANSFORM_TABLE_WITH_APPEND_ONLY_SCHEMA(913, "43M24", "Cannot transform a table with append-only schema."), + CANNOT_TRANSFORM_TABLE_WITH_APPEND_ONLY_SCHEMA(913, "43M24", + "Cannot transform a table with append-only schema."), - CANNOT_TRANSFORM_TRANSACTIONAL_TABLE(914, "43M25", "Cannot transform a transactional table."), + CANNOT_TRANSFORM_TRANSACTIONAL_TABLE(914, "43M25", "Cannot transform a transactional table."), - STALE_METADATA_CACHE_EXCEPTION(915, "43M26", "Stale metadata cache exception", - info -> new StaleMetadataCacheException(info.getMessage())), + STALE_METADATA_CACHE_EXCEPTION(915, "43M26", "Stale metadata cache exception", + info -> new StaleMetadataCacheException(info.getMessage())), - AUTO_COMMIT_NOT_ENABLED(916, "43M27", "Connection does not have auto-commit enabled"), + AUTO_COMMIT_NOT_ENABLED(916, "43M27", "Connection does not have auto-commit enabled"), - //SQLCode for testing exceptions - FAILED_KNOWINGLY_FOR_TEST(7777, "TEST", "Exception was thrown to test something"); + // SQLCode for testing exceptions + FAILED_KNOWINGLY_FOR_TEST(7777, "TEST", "Exception was thrown to test something"); - private final int errorCode; - private final String sqlState; - private final String message; - private final Factory factory; + private final int errorCode; + private final String sqlState; + private final String message; + private final Factory factory; - private SQLExceptionCode(int errorCode, String sqlState, String message) { - this(errorCode, sqlState, message, Factory.DEFAULT); - } + private SQLExceptionCode(int errorCode, String sqlState, String message) { + this(errorCode, sqlState, message, Factory.DEFAULT); + } - private SQLExceptionCode(int errorCode, String sqlState, String message, Factory factory) { - this.errorCode = errorCode; - this.sqlState = sqlState; - this.message = message; - this.factory = factory; - } + private SQLExceptionCode(int errorCode, String sqlState, String message, Factory factory) { + this.errorCode = errorCode; + this.sqlState = sqlState; + this.message = message; + this.factory = factory; + } - public String getSQLState() { - return sqlState; - } + public String getSQLState() { + return sqlState; + } - public String getMessage() { - return message; - } + public String getMessage() { + return message; + } - public int getErrorCode() { - return errorCode; - } + public int getErrorCode() { + return errorCode; + } - @Override - public String toString() { - return "ERROR " + errorCode + " (" + sqlState + "): " + message; - } + @Override + public String toString() { + return "ERROR " + errorCode + " (" + sqlState + "): " + message; + } - public Factory getExceptionFactory() { - return factory; - } + public Factory getExceptionFactory() { + return factory; + } - public static interface Factory { - Factory DEFAULT = new Factory() { - - @Override - public SQLException newException(SQLExceptionInfo info) { - return new SQLException(info.toString(), info.getCode().getSQLState(), - info.getCode().getErrorCode(), info.getRootCause()); - } - - }; - Factory SYNTAX_ERROR = new Factory() { - - @Override - public SQLException newException(SQLExceptionInfo info) { - return new PhoenixParserException(info.getMessage(), info.getRootCause()); - } - - }; - Factory BATCH_UPDATE_ERROR = new Factory() { - - @Override - public SQLException newException(SQLExceptionInfo info) { - return new BatchUpdateException(info.toString(), info.getCode().getSQLState(), - info.getCode().getErrorCode(), (int[]) null, info.getRootCause()); - } - - }; - SQLException newException(SQLExceptionInfo info); - } - - private static final Map errorCodeMap = Maps.newHashMapWithExpectedSize(SQLExceptionCode.values().length); - static { - for (SQLExceptionCode code : SQLExceptionCode.values()) { - SQLExceptionCode otherCode = errorCodeMap.put(code.getErrorCode(), code); - if (otherCode != null) { - throw new IllegalStateException("Duplicate error code for " + code + " and " + otherCode); - } - } + public static interface Factory { + Factory DEFAULT = new Factory() { + + @Override + public SQLException newException(SQLExceptionInfo info) { + return new SQLException(info.toString(), info.getCode().getSQLState(), + info.getCode().getErrorCode(), info.getRootCause()); + } + + }; + Factory SYNTAX_ERROR = new Factory() { + + @Override + public SQLException newException(SQLExceptionInfo info) { + return new PhoenixParserException(info.getMessage(), info.getRootCause()); + } + + }; + Factory BATCH_UPDATE_ERROR = new Factory() { + + @Override + public SQLException newException(SQLExceptionInfo info) { + return new BatchUpdateException(info.toString(), info.getCode().getSQLState(), + info.getCode().getErrorCode(), (int[]) null, info.getRootCause()); + } + + }; + + SQLException newException(SQLExceptionInfo info); + } + + private static final Map errorCodeMap = + Maps.newHashMapWithExpectedSize(SQLExceptionCode.values().length); + static { + for (SQLExceptionCode code : SQLExceptionCode.values()) { + SQLExceptionCode otherCode = errorCodeMap.put(code.getErrorCode(), code); + if (otherCode != null) { + throw new IllegalStateException("Duplicate error code for " + code + " and " + otherCode); + } } - - public static SQLExceptionCode fromErrorCode(int errorCode) { - SQLExceptionCode code = errorCodeMap.get(errorCode); - if (code == null) { - return SQLExceptionCode.UNKNOWN_ERROR_CODE; - } - return code; + } + + public static SQLExceptionCode fromErrorCode(int errorCode) { + SQLExceptionCode code = errorCodeMap.get(errorCode); + if (code == null) { + return SQLExceptionCode.UNKNOWN_ERROR_CODE; } + return code; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionInfo.java index 3e27dc75c18..4ed1b46f775 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/SQLExceptionInfo.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,306 +19,304 @@ import java.sql.SQLException; -import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.util.SchemaUtil; - /** * Object serves as a closure of all coordinate information for SQLException messages. - * - * * @since 1.0 */ public class SQLExceptionInfo { - /** - * Constants used in naming exception location. - */ - public static final String SCHEMA_NAME = "schemaName"; - public static final String TABLE_NAME = "tableName"; - public static final String TABLE_TYPE = "tableType"; - public static final String FAMILY_NAME = "familyName"; - public static final String COLUMN_NAME = "columnName"; - public static final String FUNCTION_NAME = "functionName"; - public static final String MAX_MUTATION_SIZE = "maxMutationSize"; - public static final String MUTATION_SIZE = "mutationSize"; - public static final String MAX_MUTATION_SIZE_BYTES = "maxMutationSizeBytes"; - public static final String MUTATION_SIZE_BYTES = "mutationSizeBytes"; - public static final String MAX_PHOENIX_COLUMN_SIZE_BYTES = "maxPhoenixColumnSizeBytes"; - public static final String PHOENIX_COLUMN_SIZE_BYTES = "phoenixColumnSizeBytes"; - public static final String HA_GROUP_INFO = "haGroupInfo"; - - private final Throwable rootCause; - private final SQLExceptionCode code; // Should always have one. - private final String message; - private final String schemaName; - private final String tableName; - private final PTableType tableType; - private final String familyName; - private final String columnName; - private final String functionName; - private final int maxMutationSize; - private final int mutationSize; - private final long maxMutationSizeBytes; - private final long mutationSizeBytes; - private final int phoenixColumnSizeBytes; - private final int maxPhoenixColumnSizeBytes; - private final String haGroupInfo; - private final String cdcChangeScope; - - public static class Builder { - private Throwable rootCause; - private SQLExceptionCode code; // Should always have one. - private String message; - private String schemaName; - private String tableName; - private String familyName; - private String columnName; - private String functionName; - private int maxMutationSize; - private int mutationSize; - private long maxMutationSizeBytes; - private long mutationSizeBytes; - private int phoenixColumnSizeBytes; - private int maxPhoenixColumnSizeBytes; - private String haGroupInfo; - private PTableType tableType; - private String cdcChangeScope; - - public Builder(SQLExceptionCode code) { - this.code = code; - } - - public Builder setRootCause(Throwable t) { - this.rootCause = t; - return this; - } - - public Builder setMessage(String message) { - this.message = message; - return this; - } - - public Builder setSchemaName(String schemaName) { - this.schemaName = schemaName; - return this; - } - - public Builder setTableName(String tableName) { - this.tableName = tableName; - return this; - } - - public Builder setTableType(PTableType tableType) { - this.tableType = tableType; - return this; - } - - public Builder setFamilyName(String familyName) { - this.familyName = familyName; - return this; - } - - public Builder setColumnName(String columnName) { - this.columnName = columnName; - return this; - } - - public Builder setFunctionName(String functionName) { - this.functionName = functionName; - return this; - } - - public Builder setMaxMutationSize(int maxMutationSize) { - this.maxMutationSize = maxMutationSize; - return this; - } - - public Builder setMutationSize(int mutationSize) { - this.mutationSize = mutationSize; - return this; - } - - public Builder setMaxMutationSizeBytes(long maxMutationSizeBytes) { - this.maxMutationSizeBytes = maxMutationSizeBytes; - return this; - } - - public Builder setMutationSizeBytes(long mutationSizeBytes) { - this.mutationSizeBytes = mutationSizeBytes; - return this; - } - - public Builder setPhoenixColumnSizeBytes(int phoenixColumnSizeBytes) { - this.phoenixColumnSizeBytes = phoenixColumnSizeBytes; - return this; - } - - public Builder setMaxPhoenixColumnSizeBytes(int maxPhoenixColumnSizeBytes) { - this.maxPhoenixColumnSizeBytes = maxPhoenixColumnSizeBytes; - return this; - } - - public Builder setHaGroupInfo(String haGroupInfo) { - this.haGroupInfo = haGroupInfo; - return this; - } - - public Builder setCdcChangeScope(String cdcChangeScope) { - this.cdcChangeScope = cdcChangeScope; - return this; - } - - public SQLExceptionInfo build() { - return new SQLExceptionInfo(this); - } - - @Override - public String toString() { - return code.toString(); - } + /** + * Constants used in naming exception location. + */ + public static final String SCHEMA_NAME = "schemaName"; + public static final String TABLE_NAME = "tableName"; + public static final String TABLE_TYPE = "tableType"; + public static final String FAMILY_NAME = "familyName"; + public static final String COLUMN_NAME = "columnName"; + public static final String FUNCTION_NAME = "functionName"; + public static final String MAX_MUTATION_SIZE = "maxMutationSize"; + public static final String MUTATION_SIZE = "mutationSize"; + public static final String MAX_MUTATION_SIZE_BYTES = "maxMutationSizeBytes"; + public static final String MUTATION_SIZE_BYTES = "mutationSizeBytes"; + public static final String MAX_PHOENIX_COLUMN_SIZE_BYTES = "maxPhoenixColumnSizeBytes"; + public static final String PHOENIX_COLUMN_SIZE_BYTES = "phoenixColumnSizeBytes"; + public static final String HA_GROUP_INFO = "haGroupInfo"; + + private final Throwable rootCause; + private final SQLExceptionCode code; // Should always have one. + private final String message; + private final String schemaName; + private final String tableName; + private final PTableType tableType; + private final String familyName; + private final String columnName; + private final String functionName; + private final int maxMutationSize; + private final int mutationSize; + private final long maxMutationSizeBytes; + private final long mutationSizeBytes; + private final int phoenixColumnSizeBytes; + private final int maxPhoenixColumnSizeBytes; + private final String haGroupInfo; + private final String cdcChangeScope; + + public static class Builder { + private Throwable rootCause; + private SQLExceptionCode code; // Should always have one. + private String message; + private String schemaName; + private String tableName; + private String familyName; + private String columnName; + private String functionName; + private int maxMutationSize; + private int mutationSize; + private long maxMutationSizeBytes; + private long mutationSizeBytes; + private int phoenixColumnSizeBytes; + private int maxPhoenixColumnSizeBytes; + private String haGroupInfo; + private PTableType tableType; + private String cdcChangeScope; + + public Builder(SQLExceptionCode code) { + this.code = code; } - private SQLExceptionInfo(Builder builder) { - code = builder.code; - rootCause = builder.rootCause; - message = builder.message; - schemaName = builder.schemaName; - tableName = builder.tableName; - tableType = builder.tableType; - familyName = builder.familyName; - columnName = builder.columnName; - functionName = builder.functionName; - maxMutationSize = builder.maxMutationSize; - mutationSize = builder.mutationSize; - maxMutationSizeBytes = builder.maxMutationSizeBytes; - mutationSizeBytes = builder.mutationSizeBytes; - maxPhoenixColumnSizeBytes = builder.maxPhoenixColumnSizeBytes; - phoenixColumnSizeBytes = builder.phoenixColumnSizeBytes; - haGroupInfo = builder.haGroupInfo; - cdcChangeScope = builder.cdcChangeScope; + public Builder setRootCause(Throwable t) { + this.rootCause = t; + return this; } - @Override - public String toString() { - String baseMessage = code.toString(); - StringBuilder builder = new StringBuilder(baseMessage); - if (message != null) { - if (message.startsWith(baseMessage)) { - builder.append(message.substring(baseMessage.length())); - } else { - builder.append(" ").append(message); - } - } - if (functionName != null) { - builder.append(" ").append(FUNCTION_NAME).append("=").append(functionName); - return builder.toString(); - } - String columnDisplayName = SchemaUtil.getMetaDataEntityName(schemaName, tableName, familyName, columnName); - if (columnName != null) { - builder.append(" ").append(COLUMN_NAME).append("=").append(columnDisplayName); - } else if (familyName != null) { - builder.append(" ").append(FAMILY_NAME).append("=").append(columnDisplayName); - } else if (tableName != null) { - builder.append(" ").append(TABLE_NAME).append("=").append(columnDisplayName); - } else if (schemaName != null) { - builder.append(" ").append(SCHEMA_NAME).append("=").append(columnDisplayName); - } - if (tableType != null) { - builder.append(" ").append(TABLE_TYPE).append("=").append(tableType); - } - if (maxMutationSize != 0) { - builder.append(" ").append(MAX_MUTATION_SIZE).append("=").append(maxMutationSize); - builder.append(" ").append(MUTATION_SIZE).append("=").append(mutationSize); - } else if (maxMutationSizeBytes != 0) { - builder.append(" ").append(MAX_MUTATION_SIZE_BYTES).append("="). - append(maxMutationSizeBytes); - builder.append(" ").append(MUTATION_SIZE_BYTES).append("=").append(mutationSizeBytes); - } - if (maxPhoenixColumnSizeBytes != 0) { - builder.append(" ").append(MAX_PHOENIX_COLUMN_SIZE_BYTES).append("=").append(maxPhoenixColumnSizeBytes); - builder.append(" ").append(PHOENIX_COLUMN_SIZE_BYTES).append("=").append(phoenixColumnSizeBytes); - } - if (haGroupInfo != null) { - builder.append(" ").append(HA_GROUP_INFO).append("=").append(haGroupInfo); - } - if (cdcChangeScope != null) { - builder.append(": ").append(cdcChangeScope); - } - - return builder.toString(); + public Builder setMessage(String message) { + this.message = message; + return this; } - public SQLException buildException() { - return code.getExceptionFactory().newException(this); + public Builder setSchemaName(String schemaName) { + this.schemaName = schemaName; + return this; } - public Throwable getRootCause() { - return rootCause; + public Builder setTableName(String tableName) { + this.tableName = tableName; + return this; } - public String getSchemaName() { - return schemaName; + public Builder setTableType(PTableType tableType) { + this.tableType = tableType; + return this; } - public String getTableName() { - return tableName; + public Builder setFamilyName(String familyName) { + this.familyName = familyName; + return this; } - public PTableType getTableType() { - return tableType; + public Builder setColumnName(String columnName) { + this.columnName = columnName; + return this; } - public String getFamilyName() { - return familyName; + public Builder setFunctionName(String functionName) { + this.functionName = functionName; + return this; } - public String getColumnName() { - return columnName; + public Builder setMaxMutationSize(int maxMutationSize) { + this.maxMutationSize = maxMutationSize; + return this; } - public String getFunctionName() { - return functionName; - } - - public SQLExceptionCode getCode() { - return code; + public Builder setMutationSize(int mutationSize) { + this.mutationSize = mutationSize; + return this; } - public String getMessage() { - return message; + public Builder setMaxMutationSizeBytes(long maxMutationSizeBytes) { + this.maxMutationSizeBytes = maxMutationSizeBytes; + return this; } - public int getMaxMutationSize() { - return maxMutationSize; + public Builder setMutationSizeBytes(long mutationSizeBytes) { + this.mutationSizeBytes = mutationSizeBytes; + return this; } - public int getMutationSize() { - return mutationSize; + public Builder setPhoenixColumnSizeBytes(int phoenixColumnSizeBytes) { + this.phoenixColumnSizeBytes = phoenixColumnSizeBytes; + return this; } - public long getMaxMutationSizeBytes() { - return maxMutationSizeBytes; + public Builder setMaxPhoenixColumnSizeBytes(int maxPhoenixColumnSizeBytes) { + this.maxPhoenixColumnSizeBytes = maxPhoenixColumnSizeBytes; + return this; } - public long getMutationSizeBytes() { - return mutationSizeBytes; + public Builder setHaGroupInfo(String haGroupInfo) { + this.haGroupInfo = haGroupInfo; + return this; } - public int getMaxPhoenixColumnSizeBytes() { - return maxPhoenixColumnSizeBytes; + public Builder setCdcChangeScope(String cdcChangeScope) { + this.cdcChangeScope = cdcChangeScope; + return this; } - public int getPhoenixColumnSizeBytes() { - return phoenixColumnSizeBytes; + public SQLExceptionInfo build() { + return new SQLExceptionInfo(this); } - public String getHaGroupInfo() { - return haGroupInfo; + @Override + public String toString() { + return code.toString(); } - - public String getCdcChangeScope() { - return cdcChangeScope; + } + + private SQLExceptionInfo(Builder builder) { + code = builder.code; + rootCause = builder.rootCause; + message = builder.message; + schemaName = builder.schemaName; + tableName = builder.tableName; + tableType = builder.tableType; + familyName = builder.familyName; + columnName = builder.columnName; + functionName = builder.functionName; + maxMutationSize = builder.maxMutationSize; + mutationSize = builder.mutationSize; + maxMutationSizeBytes = builder.maxMutationSizeBytes; + mutationSizeBytes = builder.mutationSizeBytes; + maxPhoenixColumnSizeBytes = builder.maxPhoenixColumnSizeBytes; + phoenixColumnSizeBytes = builder.phoenixColumnSizeBytes; + haGroupInfo = builder.haGroupInfo; + cdcChangeScope = builder.cdcChangeScope; + } + + @Override + public String toString() { + String baseMessage = code.toString(); + StringBuilder builder = new StringBuilder(baseMessage); + if (message != null) { + if (message.startsWith(baseMessage)) { + builder.append(message.substring(baseMessage.length())); + } else { + builder.append(" ").append(message); + } + } + if (functionName != null) { + builder.append(" ").append(FUNCTION_NAME).append("=").append(functionName); + return builder.toString(); + } + String columnDisplayName = + SchemaUtil.getMetaDataEntityName(schemaName, tableName, familyName, columnName); + if (columnName != null) { + builder.append(" ").append(COLUMN_NAME).append("=").append(columnDisplayName); + } else if (familyName != null) { + builder.append(" ").append(FAMILY_NAME).append("=").append(columnDisplayName); + } else if (tableName != null) { + builder.append(" ").append(TABLE_NAME).append("=").append(columnDisplayName); + } else if (schemaName != null) { + builder.append(" ").append(SCHEMA_NAME).append("=").append(columnDisplayName); } + if (tableType != null) { + builder.append(" ").append(TABLE_TYPE).append("=").append(tableType); + } + if (maxMutationSize != 0) { + builder.append(" ").append(MAX_MUTATION_SIZE).append("=").append(maxMutationSize); + builder.append(" ").append(MUTATION_SIZE).append("=").append(mutationSize); + } else if (maxMutationSizeBytes != 0) { + builder.append(" ").append(MAX_MUTATION_SIZE_BYTES).append("=").append(maxMutationSizeBytes); + builder.append(" ").append(MUTATION_SIZE_BYTES).append("=").append(mutationSizeBytes); + } + if (maxPhoenixColumnSizeBytes != 0) { + builder.append(" ").append(MAX_PHOENIX_COLUMN_SIZE_BYTES).append("=") + .append(maxPhoenixColumnSizeBytes); + builder.append(" ").append(PHOENIX_COLUMN_SIZE_BYTES).append("=") + .append(phoenixColumnSizeBytes); + } + if (haGroupInfo != null) { + builder.append(" ").append(HA_GROUP_INFO).append("=").append(haGroupInfo); + } + if (cdcChangeScope != null) { + builder.append(": ").append(cdcChangeScope); + } + + return builder.toString(); + } + + public SQLException buildException() { + return code.getExceptionFactory().newException(this); + } + + public Throwable getRootCause() { + return rootCause; + } + + public String getSchemaName() { + return schemaName; + } + + public String getTableName() { + return tableName; + } + + public PTableType getTableType() { + return tableType; + } + + public String getFamilyName() { + return familyName; + } + + public String getColumnName() { + return columnName; + } + + public String getFunctionName() { + return functionName; + } + + public SQLExceptionCode getCode() { + return code; + } + + public String getMessage() { + return message; + } + + public int getMaxMutationSize() { + return maxMutationSize; + } + + public int getMutationSize() { + return mutationSize; + } + + public long getMaxMutationSizeBytes() { + return maxMutationSizeBytes; + } + + public long getMutationSizeBytes() { + return mutationSizeBytes; + } + + public int getMaxPhoenixColumnSizeBytes() { + return maxPhoenixColumnSizeBytes; + } + + public int getPhoenixColumnSizeBytes() { + return phoenixColumnSizeBytes; + } + + public String getHaGroupInfo() { + return haGroupInfo; + } + + public String getCdcChangeScope() { + return cdcChangeScope; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/StaleMetadataCacheException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/StaleMetadataCacheException.java index 584e33e0b86..f96d13b3afc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/StaleMetadataCacheException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/StaleMetadataCacheException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,10 +23,10 @@ * Indicates metadata cache is stale. */ public class StaleMetadataCacheException extends SQLException { - private static final long serialVersionUID = 1L; - private static final SQLExceptionCode code = SQLExceptionCode.STALE_METADATA_CACHE_EXCEPTION; + private static final long serialVersionUID = 1L; + private static final SQLExceptionCode code = SQLExceptionCode.STALE_METADATA_CACHE_EXCEPTION; - public StaleMetadataCacheException(String message) { - super(message, code.getSQLState(), code.getErrorCode()); - } -} \ No newline at end of file + public StaleMetadataCacheException(String message) { + super(message, code.getSQLState(), code.getErrorCode()); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UndecodableByteException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UndecodableByteException.java index fe4f4c14ed4..c744aa9e611 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UndecodableByteException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UndecodableByteException.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -17,11 +17,10 @@ */ package org.apache.phoenix.exception; - public class UndecodableByteException extends RuntimeException { - public UndecodableByteException(Byte b) { - super("Undecodable byte: " + b); - } - + public UndecodableByteException(Byte b) { + super("Undecodable byte: " + b); + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UnknownFunctionException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UnknownFunctionException.java index 4fc4b9b3fc0..3e0c2508d7d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UnknownFunctionException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UnknownFunctionException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,15 +21,15 @@ * Thrown by ParseNodeFactory when it could not identify a node as a valid function. */ public class UnknownFunctionException extends RuntimeException { - private static final long serialVersionUID = 1L; - private final String funcName; + private static final long serialVersionUID = 1L; + private final String funcName; - public UnknownFunctionException(String funcName) { - super(); - this.funcName = funcName; - } + public UnknownFunctionException(String funcName) { + super(); + this.funcName = funcName; + } - public String getFuncName() { - return funcName; - } + public String getFuncName() { + return funcName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeBlockedException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeBlockedException.java index 6313879a83b..f7cedfc119e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeBlockedException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeBlockedException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +17,10 @@ */ package org.apache.phoenix.exception; - public class UpgradeBlockedException extends RetriableUpgradeException { - public UpgradeBlockedException() { - super("Upgrade is BLOCKED by a SYSTEM.MUTEX row", SQLExceptionCode.UPGRADE_BLOCKED - .getSQLState(), SQLExceptionCode.UPGRADE_BLOCKED.getErrorCode()); - } + public UpgradeBlockedException() { + super("Upgrade is BLOCKED by a SYSTEM.MUTEX row", + SQLExceptionCode.UPGRADE_BLOCKED.getSQLState(), + SQLExceptionCode.UPGRADE_BLOCKED.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeInProgressException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeInProgressException.java index 9950aa467ea..9addae59342 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeInProgressException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeInProgressException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,15 +17,16 @@ */ package org.apache.phoenix.exception; - import org.apache.phoenix.coprocessorclient.MetaDataProtocol; public class UpgradeInProgressException extends RetriableUpgradeException { - public UpgradeInProgressException(String upgradeFrom, String upgradeTo) { - super((upgradeFrom.equals(MetaDataProtocol.MIGRATION_IN_PROGRESS) ? - "System Tables are concurrently being migrated to system namespace" : - "Cluster is being concurrently upgraded from " + upgradeFrom + " to " + upgradeTo) - + ". Please retry establishing connection.", SQLExceptionCode.CONCURRENT_UPGRADE_IN_PROGRESS - .getSQLState(), SQLExceptionCode.CONCURRENT_UPGRADE_IN_PROGRESS.getErrorCode()); - } + public UpgradeInProgressException(String upgradeFrom, String upgradeTo) { + super( + (upgradeFrom.equals(MetaDataProtocol.MIGRATION_IN_PROGRESS) + ? "System Tables are concurrently being migrated to system namespace" + : "Cluster is being concurrently upgraded from " + upgradeFrom + " to " + upgradeTo) + + ". Please retry establishing connection.", + SQLExceptionCode.CONCURRENT_UPGRADE_IN_PROGRESS.getSQLState(), + SQLExceptionCode.CONCURRENT_UPGRADE_IN_PROGRESS.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeNotRequiredException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeNotRequiredException.java index 7e94977deea..799a4ee5b9a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeNotRequiredException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeNotRequiredException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +17,10 @@ */ package org.apache.phoenix.exception; - public class UpgradeNotRequiredException extends RetriableUpgradeException { - public UpgradeNotRequiredException() { - super("Operation not allowed since cluster has already been upgraded. ", SQLExceptionCode.UPGRADE_NOT_REQUIRED - .getSQLState(), SQLExceptionCode.UPGRADE_NOT_REQUIRED.getErrorCode()); - } + public UpgradeNotRequiredException() { + super("Operation not allowed since cluster has already been upgraded. ", + SQLExceptionCode.UPGRADE_NOT_REQUIRED.getSQLState(), + SQLExceptionCode.UPGRADE_NOT_REQUIRED.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeRequiredException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeRequiredException.java index 05217f45c06..2db9f0874c9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeRequiredException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/exception/UpgradeRequiredException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,23 +17,22 @@ */ package org.apache.phoenix.exception; -import org.apache.hadoop.hbase.HConstants; - public class UpgradeRequiredException extends RetriableUpgradeException { - private final long systemCatalogTimestamp; + private final long systemCatalogTimestamp; - public UpgradeRequiredException() { - this(Long.MIN_VALUE); - } + public UpgradeRequiredException() { + this(Long.MIN_VALUE); + } - public UpgradeRequiredException(long systemCatalogTimeStamp) { - super("Operation not allowed since cluster hasn't been upgraded. Call EXECUTE UPGRADE. ", - SQLExceptionCode.UPGRADE_REQUIRED.getSQLState(), SQLExceptionCode.UPGRADE_REQUIRED.getErrorCode()); - this.systemCatalogTimestamp = systemCatalogTimeStamp; - } + public UpgradeRequiredException(long systemCatalogTimeStamp) { + super("Operation not allowed since cluster hasn't been upgraded. Call EXECUTE UPGRADE. ", + SQLExceptionCode.UPGRADE_REQUIRED.getSQLState(), + SQLExceptionCode.UPGRADE_REQUIRED.getErrorCode()); + this.systemCatalogTimestamp = systemCatalogTimeStamp; + } - public long getSystemCatalogTimeStamp() { - return systemCatalogTimestamp; - } + public long getSystemCatalogTimeStamp() { + return systemCatalogTimestamp; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/AggregatePlan.java index 635131b37eb..8a84addbb80 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/AggregatePlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/AggregatePlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.phoenix.execute; - import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; @@ -83,323 +82,321 @@ import org.slf4j.LoggerFactory; /** - * * Query plan for aggregating queries - * - * * @since 0.1 */ public class AggregatePlan extends BaseQueryPlan { - private final Aggregators aggregators; - private final Expression having; - private List splits; - private List> scans; - private static final Logger LOGGER = LoggerFactory.getLogger(AggregatePlan.class); - private boolean isSerial; - private OrderBy actualOutputOrderBy; + private final Aggregators aggregators; + private final Expression having; + private List splits; + private List> scans; + private static final Logger LOGGER = LoggerFactory.getLogger(AggregatePlan.class); + private boolean isSerial; + private OrderBy actualOutputOrderBy; - public AggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, - RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, - ParallelIteratorFactory parallelIteratorFactory, GroupBy groupBy, Expression having, - QueryPlan dataPlan) throws SQLException { - super(context, statement, table, projector, context.getBindManager().getParameterMetaData(), limit, offset, - orderBy, groupBy, parallelIteratorFactory, dataPlan); - this.having = having; - this.aggregators = context.getAggregationManager().getAggregators(); - boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); - boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table.getTable(), orderBy, context); - if (hasSerialHint && !canBeExecutedSerially) { - LOGGER.warn("This query cannot be executed serially. Ignoring the hint"); - } - this.isSerial = hasSerialHint && canBeExecutedSerially; - this.actualOutputOrderBy = convertActualOutputOrderBy(orderBy, groupBy, context); + public AggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, + RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, + ParallelIteratorFactory parallelIteratorFactory, GroupBy groupBy, Expression having, + QueryPlan dataPlan) throws SQLException { + super(context, statement, table, projector, context.getBindManager().getParameterMetaData(), + limit, offset, orderBy, groupBy, parallelIteratorFactory, dataPlan); + this.having = having; + this.aggregators = context.getAggregationManager().getAggregators(); + boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); + boolean canBeExecutedSerially = + ScanUtil.canQueryBeExecutedSerially(table.getTable(), orderBy, context); + if (hasSerialHint && !canBeExecutedSerially) { + LOGGER.warn("This query cannot be executed serially. Ignoring the hint"); } + this.isSerial = hasSerialHint && canBeExecutedSerially; + this.actualOutputOrderBy = convertActualOutputOrderBy(orderBy, groupBy, context); + } - public Expression getHaving() { - return having; - } + public Expression getHaving() { + return having; + } - @Override - public Cost getCost() { - Double outputBytes = this.accept(new ByteCountVisitor()); - Double rowWidth = this.accept(new AvgRowWidthVisitor()); - Long inputRows = null; - try { - inputRows = getEstimatedRowsToScan(); - } catch (SQLException e) { - // ignored. - } - if (inputRows == null || outputBytes == null || rowWidth == null) { - return Cost.UNKNOWN; - } - double inputBytes = inputRows * rowWidth; - double rowsBeforeHaving = RowCountVisitor.aggregate( - RowCountVisitor.filter( - inputRows.doubleValue(), - RowCountVisitor.stripSkipScanFilter( - context.getScan().getFilter())), - groupBy); - double rowsAfterHaving = RowCountVisitor.filter(rowsBeforeHaving, having); - double bytesBeforeHaving = rowWidth * rowsBeforeHaving; - double bytesAfterHaving = rowWidth * rowsAfterHaving; + @Override + public Cost getCost() { + Double outputBytes = this.accept(new ByteCountVisitor()); + Double rowWidth = this.accept(new AvgRowWidthVisitor()); + Long inputRows = null; + try { + inputRows = getEstimatedRowsToScan(); + } catch (SQLException e) { + // ignored. + } + if (inputRows == null || outputBytes == null || rowWidth == null) { + return Cost.UNKNOWN; + } + double inputBytes = inputRows * rowWidth; + double rowsBeforeHaving = + RowCountVisitor.aggregate(RowCountVisitor.filter(inputRows.doubleValue(), + RowCountVisitor.stripSkipScanFilter(context.getScan().getFilter())), groupBy); + double rowsAfterHaving = RowCountVisitor.filter(rowsBeforeHaving, having); + double bytesBeforeHaving = rowWidth * rowsBeforeHaving; + double bytesAfterHaving = rowWidth * rowsAfterHaving; - int parallelLevel = CostUtil.estimateParallelLevel( - true, context.getConnection().getQueryServices()); - Cost cost = new Cost(0, 0, inputBytes); - Cost aggCost = CostUtil.estimateAggregateCost( - inputBytes, bytesBeforeHaving, groupBy, parallelLevel); - cost = cost.plus(aggCost); - if (!orderBy.getOrderByExpressions().isEmpty()) { - parallelLevel = CostUtil.estimateParallelLevel( - false, context.getConnection().getQueryServices()); - Cost orderByCost = CostUtil.estimateOrderByCost( - bytesAfterHaving, outputBytes, parallelLevel); - cost = cost.plus(orderByCost); - } - return cost; + int parallelLevel = + CostUtil.estimateParallelLevel(true, context.getConnection().getQueryServices()); + Cost cost = new Cost(0, 0, inputBytes); + Cost aggCost = + CostUtil.estimateAggregateCost(inputBytes, bytesBeforeHaving, groupBy, parallelLevel); + cost = cost.plus(aggCost); + if (!orderBy.getOrderByExpressions().isEmpty()) { + parallelLevel = + CostUtil.estimateParallelLevel(false, context.getConnection().getQueryServices()); + Cost orderByCost = CostUtil.estimateOrderByCost(bytesAfterHaving, outputBytes, parallelLevel); + cost = cost.plus(orderByCost); } + return cost; + } - @Override - public List getSplits() { - if (splits == null) - return Collections.emptyList(); - else - return splits; + @Override + public List getSplits() { + if (splits == null) return Collections.emptyList(); + else return splits; + } + + @Override + public List> getScans() { + if (scans == null) return Collections.emptyList(); + else return scans; + } + + private static class OrderingResultIteratorFactory implements ParallelIteratorFactory { + private final QueryServices services; + private final OrderBy orderBy; + + public OrderingResultIteratorFactory(QueryServices services, OrderBy orderBy) { + this.services = services; + this.orderBy = orderBy; } @Override - public List> getScans() { - if (scans == null) - return Collections.emptyList(); - else - return scans; + public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, + Scan scan, String tableName, QueryPlan plan) throws SQLException { + /** + * Sort the result tuples by the GroupBy expressions. When orderByReverse is false,if some + * GroupBy expression is SortOrder.DESC, then sorted results on that expression are DESC, not + * ASC. When orderByReverse is true,if some GroupBy expression is SortOrder.DESC, then sorted + * results on that expression are ASC, not DESC. + */ + OrderByExpression orderByExpression = OrderByExpression.createByCheckIfOrderByReverse( + RowKeyExpression.INSTANCE, false, true, this.orderBy == OrderBy.REV_ROW_KEY_ORDER_BY); + long threshold = + services.getProps().getLongBytes(QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); + boolean spoolingEnabled = + services.getProps().getBoolean(QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); + return new OrderedResultIterator(scanner, + Collections. singletonList(orderByExpression), spoolingEnabled, + threshold); } + } - private static class OrderingResultIteratorFactory implements ParallelIteratorFactory { - private final QueryServices services; - private final OrderBy orderBy; - - public OrderingResultIteratorFactory(QueryServices services,OrderBy orderBy) { - this.services = services; - this.orderBy=orderBy; - } - @Override - public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName, QueryPlan plan) throws SQLException { - /** - * Sort the result tuples by the GroupBy expressions. - * When orderByReverse is false,if some GroupBy expression is SortOrder.DESC, then sorted results on that expression are DESC, not ASC. - * When orderByReverse is true,if some GroupBy expression is SortOrder.DESC, then sorted results on that expression are ASC, not DESC. - */ - OrderByExpression orderByExpression = - OrderByExpression.createByCheckIfOrderByReverse( - RowKeyExpression.INSTANCE, - false, - true, - this.orderBy == OrderBy.REV_ROW_KEY_ORDER_BY); - long threshold = - services.getProps() - .getLongBytes(QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); - boolean spoolingEnabled = - services.getProps().getBoolean( - QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); - return new OrderedResultIterator(scanner, - Collections. singletonList(orderByExpression), - spoolingEnabled, threshold); - } - } + private static class WrappingResultIteratorFactory implements ParallelIteratorFactory { + private final ParallelIteratorFactory innerFactory; + private final ParallelIteratorFactory outerFactory; - private static class WrappingResultIteratorFactory implements ParallelIteratorFactory { - private final ParallelIteratorFactory innerFactory; - private final ParallelIteratorFactory outerFactory; - - public WrappingResultIteratorFactory(ParallelIteratorFactory innerFactory, ParallelIteratorFactory outerFactory) { - this.innerFactory = innerFactory; - this.outerFactory = outerFactory; - } - @Override - public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName, QueryPlan plan) throws SQLException { - PeekingResultIterator iterator = innerFactory.newIterator(context, scanner, scan, tableName, plan); - return outerFactory.newIterator(context, iterator, scan, tableName, plan); - } + public WrappingResultIteratorFactory(ParallelIteratorFactory innerFactory, + ParallelIteratorFactory outerFactory) { + this.innerFactory = innerFactory; + this.outerFactory = outerFactory; } - private ParallelIteratorFactory wrapParallelIteratorFactory () { - ParallelIteratorFactory innerFactory; - QueryServices services = context.getConnection().getQueryServices(); - if (groupBy.isEmpty() || groupBy.isOrderPreserving()) { - if (ScanUtil.isPacingScannersPossible(context)) { - innerFactory = ParallelIteratorFactory.NOOP_FACTORY; - } else { - innerFactory = new SpoolingResultIterator.SpoolingResultIteratorFactory(services); - } - } else { - innerFactory = new OrderingResultIteratorFactory(services,this.getOrderBy()); - } - if (parallelIteratorFactory == null) { - return innerFactory; - } - // wrap any existing parallelIteratorFactory - return new WrappingResultIteratorFactory(innerFactory, parallelIteratorFactory); + @Override + public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, + Scan scan, String tableName, QueryPlan plan) throws SQLException { + PeekingResultIterator iterator = + innerFactory.newIterator(context, scanner, scan, tableName, plan); + return outerFactory.newIterator(context, iterator, scan, tableName, plan); } - public void serializeGroupedAggregateRegionObserverIntoScan(Scan scan, String attribName, - List groupByExpressions) { - ByteArrayOutputStream stream = - new ByteArrayOutputStream(Math.max(1, groupByExpressions.size() * 10)); - try { - if (groupByExpressions.isEmpty()) { // FIXME ? - stream.write(QueryConstants.TRUE); - } else { - DataOutputStream output = new DataOutputStream(stream); - for (Expression expression : groupByExpressions) { - WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); - expression.write(output); - } - } - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - scan.setAttribute(attribName, stream.toByteArray()); + } + private ParallelIteratorFactory wrapParallelIteratorFactory() { + ParallelIteratorFactory innerFactory; + QueryServices services = context.getConnection().getQueryServices(); + if (groupBy.isEmpty() || groupBy.isOrderPreserving()) { + if (ScanUtil.isPacingScannersPossible(context)) { + innerFactory = ParallelIteratorFactory.NOOP_FACTORY; + } else { + innerFactory = new SpoolingResultIterator.SpoolingResultIteratorFactory(services); + } + } else { + innerFactory = new OrderingResultIteratorFactory(services, this.getOrderBy()); } - - public void serializeUngroupedAggregateRegionObserverIntoScan(Scan scan) { - scan.setAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG, QueryConstants.TRUE); + if (parallelIteratorFactory == null) { + return innerFactory; } + // wrap any existing parallelIteratorFactory + return new WrappingResultIteratorFactory(innerFactory, parallelIteratorFactory); + } - @Override - protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, Map caches) throws SQLException { - if (groupBy.isEmpty()) { - serializeUngroupedAggregateRegionObserverIntoScan(scan); - } else { - // Set attribute with serialized expressions for coprocessor - serializeGroupedAggregateRegionObserverIntoScan(scan, groupBy.getScanAttribName(), groupBy.getKeyExpressions()); - if (limit != null && orderBy.getOrderByExpressions().isEmpty() && having == null - && ( ( statement.isDistinct() && ! statement.isAggregate() ) - || ( ! statement.isDistinct() && ( context.getAggregationManager().isEmpty() - || BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS.equals(groupBy.getScanAttribName()) ) ) ) ) { - /* - * Optimization to early exit from the scan for a GROUP BY or DISTINCT with a LIMIT. - * We may exit early according to the LIMIT specified if the query has: - * 1) No ORDER BY clause (or the ORDER BY was optimized out). We cannot exit - * early if there's an ORDER BY because the first group may be found last - * in the scan. - * 2) No HAVING clause, since we execute the HAVING on the client side. The LIMIT - * needs to be evaluated *after* the HAVING. - * 3) DISTINCT clause with no GROUP BY. We cannot exit early if there's a - * GROUP BY, as the GROUP BY is processed on the client-side post aggregation - * if a DISTNCT has a GROUP BY. Otherwise, since there are no aggregate - * functions in a DISTINCT, we can exit early regardless of if the - * groups are in row key order or unordered. - * 4) GROUP BY clause with no aggregate functions. This is in the same category - * as (3). If we're using aggregate functions, we need to look at all the - * rows, as otherwise we'd exit early with incorrect aggregate function - * calculations. - * 5) GROUP BY clause along the pk axis, as the rows are processed in row key - * order, so we can early exit, even when aggregate functions are used, as - * the rows in the group are contiguous. - */ - scan.setAttribute(BaseScannerRegionObserverConstants.GROUP_BY_LIMIT, - PInteger.INSTANCE.toBytes(limit + (offset == null ? 0 : offset))); - } + public void serializeGroupedAggregateRegionObserverIntoScan(Scan scan, String attribName, + List groupByExpressions) { + ByteArrayOutputStream stream = + new ByteArrayOutputStream(Math.max(1, groupByExpressions.size() * 10)); + try { + if (groupByExpressions.isEmpty()) { // FIXME ? + stream.write(QueryConstants.TRUE); + } else { + DataOutputStream output = new DataOutputStream(stream); + for (Expression expression : groupByExpressions) { + WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); + expression.write(output); } - BaseResultIterators iterators = isSerial - ? new SerialIterators(this, null, null, wrapParallelIteratorFactory(), scanGrouper, scan, caches, dataPlan) - : new ParallelIterators(this, null, wrapParallelIteratorFactory(), scan, false, caches, dataPlan); - estimatedRows = iterators.getEstimatedRowCount(); - estimatedSize = iterators.getEstimatedByteCount(); - estimateInfoTimestamp = iterators.getEstimateInfoTimestamp(); - splits = iterators.getSplits(); - scans = iterators.getScans(); + } + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + scan.setAttribute(attribName, stream.toByteArray()); - AggregatingResultIterator aggResultIterator; - // No need to merge sort for ungrouped aggregation - if (groupBy.isEmpty() || groupBy.isUngroupedAggregate()) { - aggResultIterator = new UngroupedAggregatingResultIterator(new ConcatResultIterator(iterators), aggregators); - // If salted or local index we still need a merge sort as we'll potentially have multiple group by keys that aren't contiguous. - } else if (groupBy.isOrderPreserving() && !(this.getTableRef().getTable().getBucketNum() != null || this.getTableRef().getTable().getIndexType() == IndexType.LOCAL)) { - aggResultIterator = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); - } else { - aggResultIterator = new GroupedAggregatingResultIterator( - new MergeSortRowKeyResultIterator(iterators, 0, this.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY),aggregators); - } + } - if (having != null) { - aggResultIterator = new FilterAggregatingResultIterator(aggResultIterator, having); - } - - if (statement.isDistinct() && statement.isAggregate()) { // Dedup on client if select distinct and aggregation - aggResultIterator = new DistinctAggregatingResultIterator(aggResultIterator, getProjector()); - } + public void serializeUngroupedAggregateRegionObserverIntoScan(Scan scan) { + scan.setAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG, QueryConstants.TRUE); + } - ResultIterator resultScanner = aggResultIterator; - if (orderBy.getOrderByExpressions().isEmpty()) { - if (offset != null) { - resultScanner = new OffsetResultIterator(aggResultIterator, offset); - } - if (limit != null) { - resultScanner = new LimitingResultIterator(resultScanner, limit); - } - } else { - long thresholdBytes = - context.getConnection().getQueryServices().getProps().getLongBytes( - QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); - boolean spoolingEnabled = - context.getConnection().getQueryServices().getProps().getBoolean( - QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); - resultScanner = - new OrderedAggregatingResultIterator(aggResultIterator, - orderBy.getOrderByExpressions(), spoolingEnabled, thresholdBytes, limit, - offset); - } - if (context.getSequenceManager().getSequenceCount() > 0) { - resultScanner = new SequenceResultIterator(resultScanner, context.getSequenceManager()); - } - return resultScanner; + @Override + protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, + Map caches) throws SQLException { + if (groupBy.isEmpty()) { + serializeUngroupedAggregateRegionObserverIntoScan(scan); + } else { + // Set attribute with serialized expressions for coprocessor + serializeGroupedAggregateRegionObserverIntoScan(scan, groupBy.getScanAttribName(), + groupBy.getKeyExpressions()); + if ( + limit != null && orderBy.getOrderByExpressions().isEmpty() && having == null + && ((statement.isDistinct() && !statement.isAggregate()) + || (!statement.isDistinct() && (context.getAggregationManager().isEmpty() + || BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS + .equals(groupBy.getScanAttribName())))) + ) { + /* + * Optimization to early exit from the scan for a GROUP BY or DISTINCT with a LIMIT. We may + * exit early according to the LIMIT specified if the query has: 1) No ORDER BY clause (or + * the ORDER BY was optimized out). We cannot exit early if there's an ORDER BY because the + * first group may be found last in the scan. 2) No HAVING clause, since we execute the + * HAVING on the client side. The LIMIT needs to be evaluated *after* the HAVING. 3) + * DISTINCT clause with no GROUP BY. We cannot exit early if there's a GROUP BY, as the + * GROUP BY is processed on the client-side post aggregation if a DISTNCT has a GROUP BY. + * Otherwise, since there are no aggregate functions in a DISTINCT, we can exit early + * regardless of if the groups are in row key order or unordered. 4) GROUP BY clause with no + * aggregate functions. This is in the same category as (3). If we're using aggregate + * functions, we need to look at all the rows, as otherwise we'd exit early with incorrect + * aggregate function calculations. 5) GROUP BY clause along the pk axis, as the rows are + * processed in row key order, so we can early exit, even when aggregate functions are used, + * as the rows in the group are contiguous. + */ + scan.setAttribute(BaseScannerRegionObserverConstants.GROUP_BY_LIMIT, + PInteger.INSTANCE.toBytes(limit + (offset == null ? 0 : offset))); + } } + BaseResultIterators iterators = isSerial + ? new SerialIterators(this, null, null, wrapParallelIteratorFactory(), scanGrouper, scan, + caches, dataPlan) + : new ParallelIterators(this, null, wrapParallelIteratorFactory(), scan, false, caches, + dataPlan); + estimatedRows = iterators.getEstimatedRowCount(); + estimatedSize = iterators.getEstimatedByteCount(); + estimateInfoTimestamp = iterators.getEstimateInfoTimestamp(); + splits = iterators.getSplits(); + scans = iterators.getScans(); - @Override - public boolean useRoundRobinIterator() throws SQLException { - return false; + AggregatingResultIterator aggResultIterator; + // No need to merge sort for ungrouped aggregation + if (groupBy.isEmpty() || groupBy.isUngroupedAggregate()) { + aggResultIterator = + new UngroupedAggregatingResultIterator(new ConcatResultIterator(iterators), aggregators); + // If salted or local index we still need a merge sort as we'll potentially have multiple + // group by keys that aren't contiguous. + } else if ( + groupBy.isOrderPreserving() && !(this.getTableRef().getTable().getBucketNum() != null + || this.getTableRef().getTable().getIndexType() == IndexType.LOCAL) + ) { + aggResultIterator = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); + } else { + aggResultIterator = + new GroupedAggregatingResultIterator(new MergeSortRowKeyResultIterator(iterators, 0, + this.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY), aggregators); } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); + if (having != null) { + aggResultIterator = new FilterAggregatingResultIterator(aggResultIterator, having); } - private static OrderBy convertActualOutputOrderBy(OrderBy orderBy, GroupBy groupBy, StatementContext statementContext) { - if(!orderBy.isEmpty()) { - return OrderBy.convertCompiledOrderByToOutputOrderBy(orderBy); - } - return ExpressionUtil.convertGroupByToOrderBy(groupBy, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY); + if (statement.isDistinct() && statement.isAggregate()) { // Dedup on client if select distinct + // and aggregation + aggResultIterator = new DistinctAggregatingResultIterator(aggResultIterator, getProjector()); } - @Override - public List getOutputOrderBys() { - return OrderBy.wrapForOutputOrderBys(this.actualOutputOrderBy); + ResultIterator resultScanner = aggResultIterator; + if (orderBy.getOrderByExpressions().isEmpty()) { + if (offset != null) { + resultScanner = new OffsetResultIterator(aggResultIterator, offset); + } + if (limit != null) { + resultScanner = new LimitingResultIterator(resultScanner, limit); + } + } else { + long thresholdBytes = context.getConnection().getQueryServices().getProps().getLongBytes( + QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); + boolean spoolingEnabled = context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); + resultScanner = new OrderedAggregatingResultIterator(aggResultIterator, + orderBy.getOrderByExpressions(), spoolingEnabled, thresholdBytes, limit, offset); + } + if (context.getSequenceManager().getSequenceCount() > 0) { + resultScanner = new SequenceResultIterator(resultScanner, context.getSequenceManager()); } + return resultScanner; + } - @Override - protected void setScanReversedWhenOrderByIsReversed(Scan scan) { - /** - * For {@link AggregatePlan}, when {@link GroupBy#isOrderPreserving} is false, we have no - * need to set the scan as reversed scan because we have to hash-aggregate the scanned - * results from HBase in RegionServer Coprocessor before sending them to client, only when - * {@link GroupBy#isOrderPreserving} is true and we depend on the original HBase scanned - * order to get the query result, we need to set the scan as reversed scan. - */ - if (this.groupBy.isOrderPreserving()) { - super.setScanReversedWhenOrderByIsReversed(scan); - } + @Override + public boolean useRoundRobinIterator() throws SQLException { + return false; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + private static OrderBy convertActualOutputOrderBy(OrderBy orderBy, GroupBy groupBy, + StatementContext statementContext) { + if (!orderBy.isEmpty()) { + return OrderBy.convertCompiledOrderByToOutputOrderBy(orderBy); + } + return ExpressionUtil.convertGroupByToOrderBy(groupBy, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY); + } + + @Override + public List getOutputOrderBys() { + return OrderBy.wrapForOutputOrderBys(this.actualOutputOrderBy); + } + + @Override + protected void setScanReversedWhenOrderByIsReversed(Scan scan) { + /** + * For {@link AggregatePlan}, when {@link GroupBy#isOrderPreserving} is false, we have no need + * to set the scan as reversed scan because we have to hash-aggregate the scanned results from + * HBase in RegionServer Coprocessor before sending them to client, only when + * {@link GroupBy#isOrderPreserving} is true and we depend on the original HBase scanned order + * to get the query result, we need to set the scan as reversed scan. + */ + if (this.groupBy.isOrderPreserving()) { + super.setScanReversedWhenOrderByIsReversed(scan); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java index bee410e6af4..557cb11b9d2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,8 +38,7 @@ import org.apache.phoenix.cache.ServerCacheClient.ServerCache; import org.apache.phoenix.compile.ExplainPlan; import org.apache.phoenix.compile.ExplainPlanAttributes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.FromCompiler; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; @@ -86,504 +85,509 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - - /** - * * Query plan that has no child plans - * - * * @since 0.1 */ public abstract class BaseQueryPlan implements QueryPlan { - private static final Logger LOGGER = LoggerFactory.getLogger(BaseQueryPlan.class); - protected static final long DEFAULT_ESTIMATED_SIZE = 10 * 1024; // 10 K - - protected final TableRef tableRef; - protected final Set tableRefs; - protected final StatementContext context; - protected final FilterableStatement statement; - protected final RowProjector projection; - protected final ParameterMetaData paramMetaData; - protected final Integer limit; - protected final Integer offset; - protected final OrderBy orderBy; - protected final GroupBy groupBy; - protected final ParallelIteratorFactory parallelIteratorFactory; - protected final QueryPlan dataPlan; - protected Long estimatedRows; - protected Long estimatedSize; - protected Long estimateInfoTimestamp; - private boolean getEstimatesCalled; - protected boolean isApplicable = true; - - protected BaseQueryPlan( - StatementContext context, FilterableStatement statement, TableRef table, - RowProjector projection, ParameterMetaData paramMetaData, Integer limit, Integer offset, OrderBy orderBy, - GroupBy groupBy, ParallelIteratorFactory parallelIteratorFactory, - QueryPlan dataPlan) { - this.context = context; - this.statement = statement; - this.tableRef = table; - this.tableRefs = ImmutableSet.of(table); - this.projection = projection; - this.paramMetaData = paramMetaData; - this.limit = limit; - this.offset = offset; - this.orderBy = orderBy; - this.groupBy = groupBy; - this.parallelIteratorFactory = parallelIteratorFactory; - this.dataPlan = dataPlan; - } - - @Override - public Operation getOperation() { - return Operation.QUERY; - } - - @Override - public boolean isDegenerate() { - return context.getScanRanges() == ScanRanges.NOTHING; - - } - - @Override - public GroupBy getGroupBy() { - return groupBy; - } - - - @Override - public OrderBy getOrderBy() { - return orderBy; - } - - @Override - public TableRef getTableRef() { - return tableRef; - } - - @Override - public Set getSourceRefs() { - return tableRefs; - } - - @Override - public Integer getLimit() { - return limit; - } - - @Override - public Integer getOffset() { - return offset; - } - - @Override - public RowProjector getProjector() { - return projection; - } - -// /** -// * Sets up an id used to do round robin queue processing on the server -// * @param scan -// */ -// private void setProducer(Scan scan) { -// byte[] producer = Bytes.toBytes(UUID.randomUUID().toString()); -// scan.setAttribute(HBaseServer.CALL_QUEUE_PRODUCER_ATTRIB_NAME, producer); -// } - - @Override - public final ResultIterator iterator() throws SQLException { - return iterator(DefaultParallelScanGrouper.getInstance()); - } - - @Override - public final ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - return iterator(scanGrouper, null); - } - - @Override - public final ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - return iterator(Collections.emptyMap(), scanGrouper, scan); - } - - private ResultIterator getWrappedIterator(final Map dependencies, - ResultIterator iterator) { - ResultIterator wrappedIterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) { - @Override - public void close() throws SQLException { - try { - super.close(); - } finally { - SQLCloseables.closeAll(dependencies.values()); - } - } - }; - return wrappedIterator; - } - - protected void setScanReversedWhenOrderByIsReversed(Scan scan) { - ScanUtil.setReversed(scan); - } - - public final ResultIterator iterator(final Map caches, - ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - if (scan == null) { - scan = context.getScan(); - } - - ScanRanges scanRanges = context.getScanRanges(); - - /* - * For aggregate queries, we still need to let the AggregationPlan to - * proceed so that we can give proper aggregates even if there are no - * row to be scanned. - */ - if (scanRanges == ScanRanges.NOTHING && !getStatement().isAggregate()) { - return getWrappedIterator(caches, ResultIterator.EMPTY_ITERATOR); - } - - if (tableRef == TableRef.EMPTY_TABLE_REF) { - return newIterator(scanGrouper, scan, caches); - } - - ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); - - // Set miscellaneous scan attributes. This is the last chance to set them before we - // clone the scan for each parallelized chunk. - TableRef tableRef = context.getCurrentTable(); - PTable table = tableRef.getTable(); - - if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) { - setScanReversedWhenOrderByIsReversed(scan); - // After HBASE-16296 is resolved, we no longer need to set - // scan caching - } - - - PhoenixConnection connection = context.getConnection(); - final int smallScanThreshold = connection.getQueryServices().getProps().getInt(QueryServices.SMALL_SCAN_THRESHOLD_ATTRIB, - QueryServicesOptions.DEFAULT_SMALL_SCAN_THRESHOLD); - - if (statement.getHint().hasHint(Hint.SMALL) || (scanRanges.isPointLookup() && scanRanges.getPointLookupCount() < smallScanThreshold)) { - scan.setReadType(Scan.ReadType.PREAD); + private static final Logger LOGGER = LoggerFactory.getLogger(BaseQueryPlan.class); + protected static final long DEFAULT_ESTIMATED_SIZE = 10 * 1024; // 10 K + + protected final TableRef tableRef; + protected final Set tableRefs; + protected final StatementContext context; + protected final FilterableStatement statement; + protected final RowProjector projection; + protected final ParameterMetaData paramMetaData; + protected final Integer limit; + protected final Integer offset; + protected final OrderBy orderBy; + protected final GroupBy groupBy; + protected final ParallelIteratorFactory parallelIteratorFactory; + protected final QueryPlan dataPlan; + protected Long estimatedRows; + protected Long estimatedSize; + protected Long estimateInfoTimestamp; + private boolean getEstimatesCalled; + protected boolean isApplicable = true; + + protected BaseQueryPlan(StatementContext context, FilterableStatement statement, TableRef table, + RowProjector projection, ParameterMetaData paramMetaData, Integer limit, Integer offset, + OrderBy orderBy, GroupBy groupBy, ParallelIteratorFactory parallelIteratorFactory, + QueryPlan dataPlan) { + this.context = context; + this.statement = statement; + this.tableRef = table; + this.tableRefs = ImmutableSet.of(table); + this.projection = projection; + this.paramMetaData = paramMetaData; + this.limit = limit; + this.offset = offset; + this.orderBy = orderBy; + this.groupBy = groupBy; + this.parallelIteratorFactory = parallelIteratorFactory; + this.dataPlan = dataPlan; + } + + @Override + public Operation getOperation() { + return Operation.QUERY; + } + + @Override + public boolean isDegenerate() { + return context.getScanRanges() == ScanRanges.NOTHING; + + } + + @Override + public GroupBy getGroupBy() { + return groupBy; + } + + @Override + public OrderBy getOrderBy() { + return orderBy; + } + + @Override + public TableRef getTableRef() { + return tableRef; + } + + @Override + public Set getSourceRefs() { + return tableRefs; + } + + @Override + public Integer getLimit() { + return limit; + } + + @Override + public Integer getOffset() { + return offset; + } + + @Override + public RowProjector getProjector() { + return projection; + } + + // /** + // * Sets up an id used to do round robin queue processing on the server + // * @param scan + // */ + // private void setProducer(Scan scan) { + // byte[] producer = Bytes.toBytes(UUID.randomUUID().toString()); + // scan.setAttribute(HBaseServer.CALL_QUEUE_PRODUCER_ATTRIB_NAME, producer); + // } + + @Override + public final ResultIterator iterator() throws SQLException { + return iterator(DefaultParallelScanGrouper.getInstance()); + } + + @Override + public final ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { + return iterator(scanGrouper, null); + } + + @Override + public final ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) + throws SQLException { + return iterator(Collections.emptyMap(), scanGrouper, scan); + } + + private ResultIterator getWrappedIterator(final Map dependencies, + ResultIterator iterator) { + ResultIterator wrappedIterator = + dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) { + @Override + public void close() throws SQLException { + try { + super.close(); + } finally { + SQLCloseables.closeAll(dependencies.values()); + } } - - - // set read consistency - if (table.getType() != PTableType.SYSTEM) { - scan.setConsistency(connection.getConsistency()); + }; + return wrappedIterator; + } + + protected void setScanReversedWhenOrderByIsReversed(Scan scan) { + ScanUtil.setReversed(scan); + } + + public final ResultIterator iterator(final Map caches, + ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + if (scan == null) { + scan = context.getScan(); + } + + ScanRanges scanRanges = context.getScanRanges(); + + /* + * For aggregate queries, we still need to let the AggregationPlan to proceed so that we can + * give proper aggregates even if there are no row to be scanned. + */ + if (scanRanges == ScanRanges.NOTHING && !getStatement().isAggregate()) { + return getWrappedIterator(caches, ResultIterator.EMPTY_ITERATOR); + } + + if (tableRef == TableRef.EMPTY_TABLE_REF) { + return newIterator(scanGrouper, scan, caches); + } + + ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); + + // Set miscellaneous scan attributes. This is the last chance to set them before we + // clone the scan for each parallelized chunk. + TableRef tableRef = context.getCurrentTable(); + PTable table = tableRef.getTable(); + + if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) { + setScanReversedWhenOrderByIsReversed(scan); + // After HBASE-16296 is resolved, we no longer need to set + // scan caching + } + + PhoenixConnection connection = context.getConnection(); + final int smallScanThreshold = connection.getQueryServices().getProps().getInt( + QueryServices.SMALL_SCAN_THRESHOLD_ATTRIB, QueryServicesOptions.DEFAULT_SMALL_SCAN_THRESHOLD); + + if ( + statement.getHint().hasHint(Hint.SMALL) + || (scanRanges.isPointLookup() && scanRanges.getPointLookupCount() < smallScanThreshold) + ) { + scan.setReadType(Scan.ReadType.PREAD); + } + + // set read consistency + if (table.getType() != PTableType.SYSTEM) { + scan.setConsistency(connection.getConsistency()); + } + // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables + if (!table.isTransactional()) { + // Get the time range of row_timestamp column + TimeRange rowTimestampRange = scanRanges.getRowTimestampRange(); + // Get the already existing time range on the scan. + TimeRange scanTimeRange = scan.getTimeRange(); + Long scn = connection.getSCN(); + if (scn == null) { + // Always use latest timestamp unless scn is set or transactional (see PHOENIX-4089) + scn = HConstants.LATEST_TIMESTAMP; + } + try { + TimeRange timeRangeToUse = + ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn); + if (timeRangeToUse == null) { + return ResultIterator.EMPTY_ITERATOR; } - // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables - if (!table.isTransactional()) { - // Get the time range of row_timestamp column - TimeRange rowTimestampRange = scanRanges.getRowTimestampRange(); - // Get the already existing time range on the scan. - TimeRange scanTimeRange = scan.getTimeRange(); - Long scn = connection.getSCN(); - if (scn == null) { - // Always use latest timestamp unless scn is set or transactional (see PHOENIX-4089) - scn = HConstants.LATEST_TIMESTAMP; - } - try { - TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn); - if (timeRangeToUse == null) { - return ResultIterator.EMPTY_ITERATOR; - } - scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax()); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - byte[] tenantIdBytes; - if( table.isMultiTenant() == true ) { - tenantIdBytes = connection.getTenantId() == null ? null : - ScanUtil.getTenantIdBytes( - table.getRowKeySchema(), - table.getBucketNum() != null, - connection.getTenantId(), table.getViewIndexId() != null); + scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + byte[] tenantIdBytes; + if (table.isMultiTenant() == true) { + tenantIdBytes = connection.getTenantId() == null + ? null + : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, + connection.getTenantId(), table.getViewIndexId() != null); + } else { + tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); + } + + ScanUtil.setTenantId(scan, tenantIdBytes); + String customAnnotations = LogUtil.customAnnotationsToString(connection); + ScanUtil.setCustomAnnotations(scan, + customAnnotations == null ? null : customAnnotations.getBytes()); + // Set index related scan attributes. + if (table.getType() == PTableType.INDEX) { + if (table.getIndexType() == IndexType.LOCAL) { + ScanUtil.setLocalIndex(scan); + } else if (context.isUncoveredIndex()) { + ScanUtil.setUncoveredGlobalIndex(scan); + } + + PTable dataTable = null; + Set dataColumns = context.getDataColumns(); + // If any data columns to join back from data table are present then we set following + // attributes + // 1. data columns to be projected and their key value schema. + // 2. index maintainer and view constants if exists to build data row key from index row key. + // TODO: can have an hint to skip joining back to data table, in that case if any column to + // project is not present in the index then we need to skip this plan. + if (!dataColumns.isEmpty()) { + // Set data columns to be join back from data table. + PTable parentTable = context.getCurrentTable().getTable(); + String parentSchemaName = parentTable.getParentSchemaName().getString(); + if (context.getCDCTableRef() != null) { + dataTable = context.getCDCTableRef().getTable(); } else { - tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); - } - - ScanUtil.setTenantId(scan, tenantIdBytes); - String customAnnotations = LogUtil.customAnnotationsToString(connection); - ScanUtil.setCustomAnnotations(scan, - customAnnotations == null ? null : customAnnotations.getBytes()); - // Set index related scan attributes. - if (table.getType() == PTableType.INDEX) { - if (table.getIndexType() == IndexType.LOCAL) { - ScanUtil.setLocalIndex(scan); - } else if (context.isUncoveredIndex()) { - ScanUtil.setUncoveredGlobalIndex(scan); - } - - PTable dataTable = null; - Set dataColumns = context.getDataColumns(); - // If any data columns to join back from data table are present then we set following attributes - // 1. data columns to be projected and their key value schema. - // 2. index maintainer and view constants if exists to build data row key from index row key. - // TODO: can have an hint to skip joining back to data table, in that case if any column to - // project is not present in the index then we need to skip this plan. - if (!dataColumns.isEmpty()) { - // Set data columns to be join back from data table. - PTable parentTable = context.getCurrentTable().getTable(); - String parentSchemaName = parentTable.getParentSchemaName().getString(); - if (context.getCDCTableRef() != null) { - dataTable = context.getCDCTableRef().getTable(); - } - else { - String parentTableName = parentTable.getParentTableName().getString(); - final ParseNodeFactory FACTORY = new ParseNodeFactory(); - TableRef dataTableRef = - FromCompiler.getResolver( - FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), - context.getConnection()).resolveTable(parentSchemaName, parentTableName); - dataTable = dataTableRef.getTable(); - } - } - if (! dataColumns.isEmpty()) { - // Set data columns to be join back from data table. - serializeDataTableColumnsToJoin(scan, dataColumns, dataTable); - KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns); - // Set key value schema of the data columns. - serializeSchemaIntoScan(scan, schema); - if (table.getIndexType() == IndexType.LOCAL) { - // Set index maintainer of the local index. - serializeIndexMaintainerIntoScan(scan, dataTable); - // Set view constants if exists. - serializeViewConstantsIntoScan(scan, dataTable); - } - } - } - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations( - "Scan on table " + context.getCurrentTable().getTable().getName() + " ready for iteration: " + scan, connection)); - } - - ResultIterator iterator = newIterator(scanGrouper, scan, caches); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations( - "Iterator for table " + context.getCurrentTable().getTable().getName() + " ready: " + iterator, connection)); - } - - // wrap the iterator so we start/end tracing as we expect - if (Tracing.isTracing()) { - TraceScope scope = Tracing.startNewSpan(context.getConnection(), - "Creating basic query for " + getPlanSteps(iterator)); - if (scope.getSpan() != null) return new TracingIterator(scope, iterator); + String parentTableName = parentTable.getParentTableName().getString(); + final ParseNodeFactory FACTORY = new ParseNodeFactory(); + TableRef dataTableRef = FromCompiler.getResolver( + FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)), + context.getConnection()).resolveTable(parentSchemaName, parentTableName); + dataTable = dataTableRef.getTable(); } - return iterator; - } - - private void serializeIndexMaintainerIntoScan(Scan scan, PTable dataTable) throws SQLException { - PName name = context.getCurrentTable().getTable().getName(); - List indexes = Lists.newArrayListWithExpectedSize(1); - for (PTable index : dataTable.getIndexes()) { - if (index.getName().equals(name) && ( - index.getIndexType() == IndexType.LOCAL - || dataTable.getType() == PTableType.CDC)) { - indexes.add(index); - break; - } + } + if (!dataColumns.isEmpty()) { + // Set data columns to be join back from data table. + serializeDataTableColumnsToJoin(scan, dataColumns, dataTable); + KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns); + // Set key value schema of the data columns. + serializeSchemaIntoScan(scan, schema); + if (table.getIndexType() == IndexType.LOCAL) { + // Set index maintainer of the local index. + serializeIndexMaintainerIntoScan(scan, dataTable); + // Set view constants if exists. + serializeViewConstantsIntoScan(scan, dataTable); } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - IndexMaintainer.serialize(dataTable, ptr, indexes, context.getConnection()); - scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr)); - if (dataTable.isTransactional()) { - scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, context.getConnection().getMutationState().encodeTransaction()); + } + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations("Scan on table " + + context.getCurrentTable().getTable().getName() + " ready for iteration: " + scan, + connection)); + } + + ResultIterator iterator = newIterator(scanGrouper, scan, caches); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations("Iterator for table " + + context.getCurrentTable().getTable().getName() + " ready: " + iterator, connection)); + } + + // wrap the iterator so we start/end tracing as we expect + if (Tracing.isTracing()) { + TraceScope scope = Tracing.startNewSpan(context.getConnection(), + "Creating basic query for " + getPlanSteps(iterator)); + if (scope.getSpan() != null) return new TracingIterator(scope, iterator); + } + return iterator; + } + + private void serializeIndexMaintainerIntoScan(Scan scan, PTable dataTable) throws SQLException { + PName name = context.getCurrentTable().getTable().getName(); + List indexes = Lists.newArrayListWithExpectedSize(1); + for (PTable index : dataTable.getIndexes()) { + if ( + index.getName().equals(name) + && (index.getIndexType() == IndexType.LOCAL || dataTable.getType() == PTableType.CDC) + ) { + indexes.add(index); + break; + } + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + IndexMaintainer.serialize(dataTable, ptr, indexes, context.getConnection()); + scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_BUILD_PROTO, + ByteUtil.copyKeyBytesIfNecessary(ptr)); + if (dataTable.isTransactional()) { + scan.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, + context.getConnection().getMutationState().encodeTransaction()); + } + } + + public static void serializeViewConstantsIntoScan(Scan scan, PTable dataTable) { + int dataPosOffset = + (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); + int nViewConstants = 0; + if (dataTable.getType() == PTableType.VIEW) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + List dataPkColumns = dataTable.getPKColumns(); + for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { + PColumn dataPKColumn = dataPkColumns.get(i); + if (dataPKColumn.getViewConstant() != null) { + nViewConstants++; } - } - - public static void serializeViewConstantsIntoScan(Scan scan, PTable dataTable) { - int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); - int nViewConstants = 0; - if (dataTable.getType() == PTableType.VIEW) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - List dataPkColumns = dataTable.getPKColumns(); - for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { - PColumn dataPKColumn = dataPkColumns.get(i); - if (dataPKColumn.getViewConstant() != null) { - nViewConstants++; - } - } - if (nViewConstants > 0) { - byte[][] viewConstants = new byte[nViewConstants][]; - int j = 0; - for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { - PColumn dataPkColumn = dataPkColumns.get(i); - if (dataPkColumn.getViewConstant() != null) { - if (IndexUtil.getViewConstantValue(dataPkColumn, ptr)) { - viewConstants[j++] = ByteUtil.copyKeyBytesIfNecessary(ptr); - } else { - throw new IllegalStateException(); - } - } - } - serializeViewConstantsIntoScan(viewConstants, scan); + } + if (nViewConstants > 0) { + byte[][] viewConstants = new byte[nViewConstants][]; + int j = 0; + for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { + PColumn dataPkColumn = dataPkColumns.get(i); + if (dataPkColumn.getViewConstant() != null) { + if (IndexUtil.getViewConstantValue(dataPkColumn, ptr)) { + viewConstants[j++] = ByteUtil.copyKeyBytesIfNecessary(ptr); + } else { + throw new IllegalStateException(); } + } } - } - - private static void serializeViewConstantsIntoScan(byte[][] viewConstants, Scan scan) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - try { - DataOutputStream output = new DataOutputStream(stream); - WritableUtils.writeVInt(output, viewConstants.length); - for (byte[] viewConstant : viewConstants) { - Bytes.writeByteArray(output, viewConstant); - } - scan.setAttribute(BaseScannerRegionObserverConstants.VIEW_CONSTANTS, stream.toByteArray()); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } - - private void serializeDataTableColumnsToJoin(Scan scan, Set dataColumns, PTable dataTable) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - try { - DataOutputStream output = new DataOutputStream(stream); - boolean storeColsInSingleCell = dataTable.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS; - if (storeColsInSingleCell) { - // if storeColsInSingleCell is true all columns of a given column family are stored in a single cell - scan.setAttribute(BaseScannerRegionObserverConstants.COLUMNS_STORED_IN_SINGLE_CELL, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - } - WritableUtils.writeVInt(output, dataColumns.size()); - for (PColumn column : dataColumns) { - byte[] cf = column.getFamilyName().getBytes(); - byte[] cq = column.getColumnQualifierBytes(); - Bytes.writeByteArray(output, cf); - Bytes.writeByteArray(output, cq); - } - scan.setAttribute(BaseScannerRegionObserverConstants.DATA_TABLE_COLUMNS_TO_JOIN, stream.toByteArray()); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } - - private void serializeSchemaIntoScan(Scan scan, KeyValueSchema schema) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(schema.getEstimatedByteSize()); - try { - DataOutputStream output = new DataOutputStream(stream); - schema.write(output); - scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_JOIN_SCHEMA, stream.toByteArray()); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } - - abstract protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, Map caches) throws SQLException; - - @Override - public long getEstimatedSize() { - return DEFAULT_ESTIMATED_SIZE; - } - - @Override - public ParameterMetaData getParameterMetaData() { - return paramMetaData; - } - - @Override - public FilterableStatement getStatement() { - return statement; - } - - @Override - public StatementContext getContext() { - return context; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - if (context.getScanRanges() == ScanRanges.NOTHING) { - return new ExplainPlan(Collections.singletonList("DEGENERATE SCAN OVER " + getTableRef().getTable().getName().getString())); - } - - ResultIterator iterator = iterator(); - Pair, ExplainPlanAttributes> planSteps = - getPlanStepsV2(iterator); - ExplainPlan explainPlan = new ExplainPlan(planSteps.getLeft(), - planSteps.getRight()); - iterator.close(); - return explainPlan; - } - - private List getPlanSteps(ResultIterator iterator) { - List planSteps = Lists.newArrayListWithExpectedSize(5); - iterator.explain(planSteps); - return planSteps; - } - - private Pair, ExplainPlanAttributes> getPlanStepsV2( - ResultIterator iterator) { - List planSteps = Lists.newArrayListWithExpectedSize(5); - ExplainPlanAttributesBuilder builder = - new ExplainPlanAttributesBuilder(); - iterator.explain(planSteps, builder); - return Pair.of(planSteps, builder.build()); - } - - @Override - public boolean isRowKeyOrdered() { - return groupBy.isEmpty() ? orderBy.getOrderByExpressions().isEmpty() : groupBy.isOrderPreserving(); - } - - @Override - public Long getEstimatedRowsToScan() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimatedRows; - } - - @Override - public Long getEstimatedBytesToScan() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimatedSize; - } - - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimateInfoTimestamp; - } - - public boolean isApplicable(){ - return isApplicable; - } - - public void setApplicable(boolean isApplicable){ - this.isApplicable = isApplicable; - } - - private void getEstimates() throws SQLException { - getEstimatesCalled = true; - // Initialize a dummy iterator to get the estimates based on stats. - ResultIterator iterator = iterator(); - iterator.close(); - } + serializeViewConstantsIntoScan(viewConstants, scan); + } + } + } + + private static void serializeViewConstantsIntoScan(byte[][] viewConstants, Scan scan) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + try { + DataOutputStream output = new DataOutputStream(stream); + WritableUtils.writeVInt(output, viewConstants.length); + for (byte[] viewConstant : viewConstants) { + Bytes.writeByteArray(output, viewConstant); + } + scan.setAttribute(BaseScannerRegionObserverConstants.VIEW_CONSTANTS, stream.toByteArray()); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + private void serializeDataTableColumnsToJoin(Scan scan, Set dataColumns, + PTable dataTable) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + try { + DataOutputStream output = new DataOutputStream(stream); + boolean storeColsInSingleCell = dataTable.getImmutableStorageScheme() + == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS; + if (storeColsInSingleCell) { + // if storeColsInSingleCell is true all columns of a given column family are stored in a + // single cell + scan.setAttribute(BaseScannerRegionObserverConstants.COLUMNS_STORED_IN_SINGLE_CELL, + QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + } + WritableUtils.writeVInt(output, dataColumns.size()); + for (PColumn column : dataColumns) { + byte[] cf = column.getFamilyName().getBytes(); + byte[] cq = column.getColumnQualifierBytes(); + Bytes.writeByteArray(output, cf); + Bytes.writeByteArray(output, cq); + } + scan.setAttribute(BaseScannerRegionObserverConstants.DATA_TABLE_COLUMNS_TO_JOIN, + stream.toByteArray()); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + private void serializeSchemaIntoScan(Scan scan, KeyValueSchema schema) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(schema.getEstimatedByteSize()); + try { + DataOutputStream output = new DataOutputStream(stream); + schema.write(output); + scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_JOIN_SCHEMA, + stream.toByteArray()); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + abstract protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, + Map caches) throws SQLException; + + @Override + public long getEstimatedSize() { + return DEFAULT_ESTIMATED_SIZE; + } + + @Override + public ParameterMetaData getParameterMetaData() { + return paramMetaData; + } + + @Override + public FilterableStatement getStatement() { + return statement; + } + + @Override + public StatementContext getContext() { + return context; + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + if (context.getScanRanges() == ScanRanges.NOTHING) { + return new ExplainPlan(Collections + .singletonList("DEGENERATE SCAN OVER " + getTableRef().getTable().getName().getString())); + } + + ResultIterator iterator = iterator(); + Pair, ExplainPlanAttributes> planSteps = getPlanStepsV2(iterator); + ExplainPlan explainPlan = new ExplainPlan(planSteps.getLeft(), planSteps.getRight()); + iterator.close(); + return explainPlan; + } + + private List getPlanSteps(ResultIterator iterator) { + List planSteps = Lists.newArrayListWithExpectedSize(5); + iterator.explain(planSteps); + return planSteps; + } + + private Pair, ExplainPlanAttributes> getPlanStepsV2(ResultIterator iterator) { + List planSteps = Lists.newArrayListWithExpectedSize(5); + ExplainPlanAttributesBuilder builder = new ExplainPlanAttributesBuilder(); + iterator.explain(planSteps, builder); + return Pair.of(planSteps, builder.build()); + } + + @Override + public boolean isRowKeyOrdered() { + return groupBy.isEmpty() + ? orderBy.getOrderByExpressions().isEmpty() + : groupBy.isOrderPreserving(); + } + + @Override + public Long getEstimatedRowsToScan() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimatedRows; + } + + @Override + public Long getEstimatedBytesToScan() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimatedSize; + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimateInfoTimestamp; + } + + public boolean isApplicable() { + return isApplicable; + } + + public void setApplicable(boolean isApplicable) { + this.isApplicable = isApplicable; + } + + private void getEstimates() throws SQLException { + getEstimatesCalled = true; + // Initialize a dummy iterator to get the estimates based on stats. + ResultIterator iterator = iterator(); + iterator.close(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java index b9a34c217e3..5a2a5ffb252 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,8 @@ */ package org.apache.phoenix.execute; -import static org.apache.phoenix.query.QueryConstants.UNGROUPED_AGG_ROW_KEY; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.AGGREGATORS; +import static org.apache.phoenix.query.QueryConstants.UNGROUPED_AGG_ROW_KEY; import java.io.IOException; import java.sql.SQLException; @@ -30,8 +30,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.compile.ExplainPlan; import org.apache.phoenix.compile.ExplainPlanAttributes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.compile.QueryPlan; @@ -71,315 +70,317 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.tuple.MultiKeyValueTuple; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.CostUtil; import org.apache.phoenix.util.ExpressionUtil; import org.apache.phoenix.util.TupleUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - public class ClientAggregatePlan extends ClientProcessingPlan { - private final GroupBy groupBy; - private final Expression having; - private final ServerAggregators serverAggregators; - private final ClientAggregators clientAggregators; - private final boolean useHashAgg; - private OrderBy actualOutputOrderBy; - - public ClientAggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, - Integer limit, Integer offset, Expression where, OrderBy orderBy, GroupBy groupBy, Expression having, QueryPlan delegate) { - super(context, statement, table, projector, limit, offset, where, orderBy, delegate); - this.groupBy = groupBy; - this.having = having; - this.clientAggregators = context.getAggregationManager().getAggregators(); - // We must deserialize rather than clone based off of client aggregators because - // upon deserialization we create the server-side aggregators instead of the client-side - // aggregators. We use the Configuration directly here to avoid the expense of creating - // another one. - this.serverAggregators = ServerAggregators.deserialize(context.getScan() - .getAttribute(AGGREGATORS), context.getConnection().getQueryServices().getConfiguration(), null); - - // Extract hash aggregate hint, if any. - HintNode hints = statement.getHint(); - useHashAgg = hints != null && hints.hasHint(HintNode.Hint.HASH_AGGREGATE); - this.actualOutputOrderBy = convertActualOutputOrderBy(orderBy, groupBy, context); + private final GroupBy groupBy; + private final Expression having; + private final ServerAggregators serverAggregators; + private final ClientAggregators clientAggregators; + private final boolean useHashAgg; + private OrderBy actualOutputOrderBy; + + public ClientAggregatePlan(StatementContext context, FilterableStatement statement, + TableRef table, RowProjector projector, Integer limit, Integer offset, Expression where, + OrderBy orderBy, GroupBy groupBy, Expression having, QueryPlan delegate) { + super(context, statement, table, projector, limit, offset, where, orderBy, delegate); + this.groupBy = groupBy; + this.having = having; + this.clientAggregators = context.getAggregationManager().getAggregators(); + // We must deserialize rather than clone based off of client aggregators because + // upon deserialization we create the server-side aggregators instead of the client-side + // aggregators. We use the Configuration directly here to avoid the expense of creating + // another one. + this.serverAggregators = + ServerAggregators.deserialize(context.getScan().getAttribute(AGGREGATORS), + context.getConnection().getQueryServices().getConfiguration(), null); + + // Extract hash aggregate hint, if any. + HintNode hints = statement.getHint(); + useHashAgg = hints != null && hints.hasHint(HintNode.Hint.HASH_AGGREGATE); + this.actualOutputOrderBy = convertActualOutputOrderBy(orderBy, groupBy, context); + } + + @Override + public Cost getCost() { + Double outputBytes = this.accept(new ByteCountVisitor()); + Double inputRows = this.getDelegate().accept(new RowCountVisitor()); + Double rowWidth = this.accept(new AvgRowWidthVisitor()); + if (inputRows == null || outputBytes == null || rowWidth == null) { + return Cost.UNKNOWN; } + double inputBytes = inputRows * rowWidth; + double rowsBeforeHaving = + RowCountVisitor.aggregate(RowCountVisitor.filter(inputRows.doubleValue(), + RowCountVisitor.stripSkipScanFilter(context.getScan().getFilter())), groupBy); + double rowsAfterHaving = RowCountVisitor.filter(rowsBeforeHaving, having); + double bytesBeforeHaving = rowWidth * rowsBeforeHaving; + double bytesAfterHaving = rowWidth * rowsAfterHaving; - @Override - public Cost getCost() { - Double outputBytes = this.accept(new ByteCountVisitor()); - Double inputRows = this.getDelegate().accept(new RowCountVisitor()); - Double rowWidth = this.accept(new AvgRowWidthVisitor()); - if (inputRows == null || outputBytes == null || rowWidth == null) { - return Cost.UNKNOWN; - } - double inputBytes = inputRows * rowWidth; - double rowsBeforeHaving = RowCountVisitor.aggregate( - RowCountVisitor.filter( - inputRows.doubleValue(), - RowCountVisitor.stripSkipScanFilter( - context.getScan().getFilter())), - groupBy); - double rowsAfterHaving = RowCountVisitor.filter(rowsBeforeHaving, having); - double bytesBeforeHaving = rowWidth * rowsBeforeHaving; - double bytesAfterHaving = rowWidth * rowsAfterHaving; - - int parallelLevel = CostUtil.estimateParallelLevel( - false, context.getConnection().getQueryServices()); - Cost cost = CostUtil.estimateAggregateCost( - inputBytes, bytesBeforeHaving, groupBy, parallelLevel); - if (!orderBy.getOrderByExpressions().isEmpty()) { - Cost orderByCost = CostUtil.estimateOrderByCost( - bytesAfterHaving, outputBytes, parallelLevel); - cost = cost.plus(orderByCost); - } - return super.getCost().plus(cost); + int parallelLevel = + CostUtil.estimateParallelLevel(false, context.getConnection().getQueryServices()); + Cost cost = + CostUtil.estimateAggregateCost(inputBytes, bytesBeforeHaving, groupBy, parallelLevel); + if (!orderBy.getOrderByExpressions().isEmpty()) { + Cost orderByCost = CostUtil.estimateOrderByCost(bytesAfterHaving, outputBytes, parallelLevel); + cost = cost.plus(orderByCost); } + return super.getCost().plus(cost); + } - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - ResultIterator iterator = delegate.iterator(scanGrouper, scan); - if (where != null) { - iterator = new FilterResultIterator(iterator, where); - } - - AggregatingResultIterator aggResultIterator; - if (groupBy.isEmpty()) { - aggResultIterator = new ClientUngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators); - aggResultIterator = new UngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator), clientAggregators); - } else { - List keyExpressions = groupBy.getKeyExpressions(); - if (groupBy.isOrderPreserving()) { - aggResultIterator = new ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators, keyExpressions); - } else { - long thresholdBytes = - context.getConnection().getQueryServices().getProps().getLongBytes( - QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); - boolean spoolingEnabled = - context.getConnection().getQueryServices().getProps().getBoolean( - QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); - List keyExpressionOrderBy = Lists.newArrayListWithExpectedSize(keyExpressions.size()); - for (Expression keyExpression : keyExpressions) { - /** - * Sort the result tuples by the GroupBy expressions. - * If some GroupBy expression is SortOrder.DESC, then sorted results on that expression are DESC, not ASC. - * for ClientAggregatePlan,the orderBy should not be OrderBy.REV_ROW_KEY_ORDER_BY, which is different from {@link AggregatePlan.OrderingResultIteratorFactory#newIterator} - **/ - keyExpressionOrderBy.add(OrderByExpression.createByCheckIfOrderByReverse(keyExpression, false, true, false)); - } - - if (useHashAgg) { - // Pass in orderBy to apply any sort that has been optimized away - aggResultIterator = new ClientHashAggregatingResultIterator(context, iterator, serverAggregators, keyExpressions, orderBy); - } else { - iterator = - new OrderedResultIterator(iterator, keyExpressionOrderBy, - spoolingEnabled, thresholdBytes, null, null, - projector.getEstimatedRowByteSize()); - aggResultIterator = new ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators, keyExpressions); - } - } - aggResultIterator = new GroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator), clientAggregators); - } + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + ResultIterator iterator = delegate.iterator(scanGrouper, scan); + if (where != null) { + iterator = new FilterResultIterator(iterator, where); + } - if (having != null) { - aggResultIterator = new FilterAggregatingResultIterator(aggResultIterator, having); - } - - if (statement.isDistinct() && statement.isAggregate()) { // Dedup on client if select distinct and aggregation - aggResultIterator = new DistinctAggregatingResultIterator(aggResultIterator, getProjector()); + AggregatingResultIterator aggResultIterator; + if (groupBy.isEmpty()) { + aggResultIterator = new ClientUngroupedAggregatingResultIterator( + LookAheadResultIterator.wrap(iterator), serverAggregators); + aggResultIterator = new UngroupedAggregatingResultIterator( + LookAheadResultIterator.wrap(aggResultIterator), clientAggregators); + } else { + List keyExpressions = groupBy.getKeyExpressions(); + if (groupBy.isOrderPreserving()) { + aggResultIterator = new ClientGroupedAggregatingResultIterator( + LookAheadResultIterator.wrap(iterator), serverAggregators, keyExpressions); + } else { + long thresholdBytes = context.getConnection().getQueryServices().getProps().getLongBytes( + QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); + boolean spoolingEnabled = context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); + List keyExpressionOrderBy = + Lists.newArrayListWithExpectedSize(keyExpressions.size()); + for (Expression keyExpression : keyExpressions) { + /** + * Sort the result tuples by the GroupBy expressions. If some GroupBy expression is + * SortOrder.DESC, then sorted results on that expression are DESC, not ASC. for + * ClientAggregatePlan,the orderBy should not be OrderBy.REV_ROW_KEY_ORDER_BY, which is + * different from {@link AggregatePlan.OrderingResultIteratorFactory#newIterator} + **/ + keyExpressionOrderBy.add( + OrderByExpression.createByCheckIfOrderByReverse(keyExpression, false, true, false)); } - ResultIterator resultScanner = aggResultIterator; - if (orderBy.getOrderByExpressions().isEmpty()) { - if (offset != null) { - resultScanner = new OffsetResultIterator(resultScanner, offset); - } - if (limit != null) { - resultScanner = new LimitingResultIterator(resultScanner, limit); - } + if (useHashAgg) { + // Pass in orderBy to apply any sort that has been optimized away + aggResultIterator = new ClientHashAggregatingResultIterator(context, iterator, + serverAggregators, keyExpressions, orderBy); } else { - long thresholdBytes = - context.getConnection().getQueryServices().getProps().getLongBytes( - QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); - boolean spoolingEnabled = - context.getConnection().getQueryServices().getProps().getBoolean( - QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); - resultScanner = - new OrderedAggregatingResultIterator(aggResultIterator, - orderBy.getOrderByExpressions(), spoolingEnabled, thresholdBytes, limit, - offset); + iterator = new OrderedResultIterator(iterator, keyExpressionOrderBy, spoolingEnabled, + thresholdBytes, null, null, projector.getEstimatedRowByteSize()); + aggResultIterator = new ClientGroupedAggregatingResultIterator( + LookAheadResultIterator.wrap(iterator), serverAggregators, keyExpressions); } - if (context.getSequenceManager().getSequenceCount() > 0) { - resultScanner = new SequenceResultIterator(resultScanner, context.getSequenceManager()); - } - - return resultScanner; + } + aggResultIterator = new GroupedAggregatingResultIterator( + LookAheadResultIterator.wrap(aggResultIterator), clientAggregators); } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - ExplainPlan explainPlan = delegate.getExplainPlan(); - List planSteps = Lists.newArrayList(explainPlan.getPlanSteps()); - ExplainPlanAttributes explainPlanAttributes = - explainPlan.getPlanStepsAsAttributes(); - ExplainPlanAttributesBuilder newBuilder = - new ExplainPlanAttributesBuilder(explainPlanAttributes); - if (where != null) { - planSteps.add("CLIENT FILTER BY " + where.toString()); - newBuilder.setClientFilterBy(where.toString()); - } - if (groupBy.isEmpty()) { - planSteps.add("CLIENT AGGREGATE INTO SINGLE ROW"); - newBuilder.setClientAggregate("CLIENT AGGREGATE INTO SINGLE ROW"); - } else if (groupBy.isOrderPreserving()) { - planSteps.add("CLIENT AGGREGATE INTO ORDERED DISTINCT ROWS BY " - + groupBy.getExpressions().toString()); - newBuilder.setClientAggregate("CLIENT AGGREGATE INTO ORDERED DISTINCT ROWS BY " - + groupBy.getExpressions().toString()); - } else if (useHashAgg) { - planSteps.add("CLIENT HASH AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); - newBuilder.setClientAggregate("CLIENT HASH AGGREGATE INTO DISTINCT ROWS BY " - + groupBy.getExpressions().toString()); - if (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) { - planSteps.add("CLIENT SORTED BY " + groupBy.getKeyExpressions().toString()); - newBuilder.setClientSortedBy( - groupBy.getKeyExpressions().toString()); - } - } else { - planSteps.add("CLIENT SORTED BY " + groupBy.getKeyExpressions().toString()); - planSteps.add("CLIENT AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); - newBuilder.setClientSortedBy(groupBy.getKeyExpressions().toString()); - newBuilder.setClientAggregate("CLIENT AGGREGATE INTO DISTINCT ROWS BY " - + groupBy.getExpressions().toString()); - } - if (having != null) { - planSteps.add("CLIENT AFTER-AGGREGATION FILTER BY " + having.toString()); - newBuilder.setClientAfterAggregate("CLIENT AFTER-AGGREGATION FILTER BY " - + having.toString()); - } - if (statement.isDistinct() && statement.isAggregate()) { - planSteps.add("CLIENT DISTINCT ON " + projector.toString()); - newBuilder.setClientDistinctFilter(projector.toString()); - } - if (offset != null) { - planSteps.add("CLIENT OFFSET " + offset); - newBuilder.setClientOffset(offset); - } - if (orderBy.getOrderByExpressions().isEmpty()) { - if (limit != null) { - planSteps.add("CLIENT " + limit + " ROW LIMIT"); - newBuilder.setClientRowLimit(limit); - } - } else { - planSteps.add("CLIENT" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + orderBy.getOrderByExpressions().toString()); - newBuilder.setClientRowLimit(limit); - newBuilder.setClientSortedBy( - orderBy.getOrderByExpressions().toString()); - } - if (context.getSequenceManager().getSequenceCount() > 0) { - int nSequences = context.getSequenceManager().getSequenceCount(); - planSteps.add("CLIENT RESERVE VALUES FROM " + nSequences + " SEQUENCE" + (nSequences == 1 ? "" : "S")); - newBuilder.setClientSequenceCount(nSequences); - } - - return new ExplainPlan(planSteps, newBuilder.build()); + if (having != null) { + aggResultIterator = new FilterAggregatingResultIterator(aggResultIterator, having); } - @Override - public GroupBy getGroupBy() { - return groupBy; + if (statement.isDistinct() && statement.isAggregate()) { // Dedup on client if select distinct + // and aggregation + aggResultIterator = new DistinctAggregatingResultIterator(aggResultIterator, getProjector()); } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); + ResultIterator resultScanner = aggResultIterator; + if (orderBy.getOrderByExpressions().isEmpty()) { + if (offset != null) { + resultScanner = new OffsetResultIterator(resultScanner, offset); + } + if (limit != null) { + resultScanner = new LimitingResultIterator(resultScanner, limit); + } + } else { + long thresholdBytes = context.getConnection().getQueryServices().getProps().getLongBytes( + QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); + boolean spoolingEnabled = context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); + resultScanner = new OrderedAggregatingResultIterator(aggResultIterator, + orderBy.getOrderByExpressions(), spoolingEnabled, thresholdBytes, limit, offset); } + if (context.getSequenceManager().getSequenceCount() > 0) { + resultScanner = new SequenceResultIterator(resultScanner, context.getSequenceManager()); + } + + return resultScanner; + } - public Expression getHaving() { - return having; + @Override + public ExplainPlan getExplainPlan() throws SQLException { + ExplainPlan explainPlan = delegate.getExplainPlan(); + List planSteps = Lists.newArrayList(explainPlan.getPlanSteps()); + ExplainPlanAttributes explainPlanAttributes = explainPlan.getPlanStepsAsAttributes(); + ExplainPlanAttributesBuilder newBuilder = + new ExplainPlanAttributesBuilder(explainPlanAttributes); + if (where != null) { + planSteps.add("CLIENT FILTER BY " + where.toString()); + newBuilder.setClientFilterBy(where.toString()); + } + if (groupBy.isEmpty()) { + planSteps.add("CLIENT AGGREGATE INTO SINGLE ROW"); + newBuilder.setClientAggregate("CLIENT AGGREGATE INTO SINGLE ROW"); + } else if (groupBy.isOrderPreserving()) { + planSteps.add( + "CLIENT AGGREGATE INTO ORDERED DISTINCT ROWS BY " + groupBy.getExpressions().toString()); + newBuilder.setClientAggregate( + "CLIENT AGGREGATE INTO ORDERED DISTINCT ROWS BY " + groupBy.getExpressions().toString()); + } else if (useHashAgg) { + planSteps + .add("CLIENT HASH AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); + newBuilder.setClientAggregate( + "CLIENT HASH AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); + if (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) { + planSteps.add("CLIENT SORTED BY " + groupBy.getKeyExpressions().toString()); + newBuilder.setClientSortedBy(groupBy.getKeyExpressions().toString()); + } + } else { + planSteps.add("CLIENT SORTED BY " + groupBy.getKeyExpressions().toString()); + planSteps + .add("CLIENT AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); + newBuilder.setClientSortedBy(groupBy.getKeyExpressions().toString()); + newBuilder.setClientAggregate( + "CLIENT AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString()); + } + if (having != null) { + planSteps.add("CLIENT AFTER-AGGREGATION FILTER BY " + having.toString()); + newBuilder.setClientAfterAggregate("CLIENT AFTER-AGGREGATION FILTER BY " + having.toString()); + } + if (statement.isDistinct() && statement.isAggregate()) { + planSteps.add("CLIENT DISTINCT ON " + projector.toString()); + newBuilder.setClientDistinctFilter(projector.toString()); + } + if (offset != null) { + planSteps.add("CLIENT OFFSET " + offset); + newBuilder.setClientOffset(offset); + } + if (orderBy.getOrderByExpressions().isEmpty()) { + if (limit != null) { + planSteps.add("CLIENT " + limit + " ROW LIMIT"); + newBuilder.setClientRowLimit(limit); + } + } else { + planSteps + .add("CLIENT" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + + " SORTED BY " + orderBy.getOrderByExpressions().toString()); + newBuilder.setClientRowLimit(limit); + newBuilder.setClientSortedBy(orderBy.getOrderByExpressions().toString()); + } + if (context.getSequenceManager().getSequenceCount() > 0) { + int nSequences = context.getSequenceManager().getSequenceCount(); + planSteps.add( + "CLIENT RESERVE VALUES FROM " + nSequences + " SEQUENCE" + (nSequences == 1 ? "" : "S")); + newBuilder.setClientSequenceCount(nSequences); } - private static class ClientGroupedAggregatingResultIterator extends BaseGroupedAggregatingResultIterator { - private final List groupByExpressions; + return new ExplainPlan(planSteps, newBuilder.build()); + } - public ClientGroupedAggregatingResultIterator(PeekingResultIterator iterator, Aggregators aggregators, List groupByExpressions) { - super(iterator, aggregators); - this.groupByExpressions = groupByExpressions; - } + @Override + public GroupBy getGroupBy() { + return groupBy; + } - @Override - protected ImmutableBytesWritable getGroupingKey(Tuple tuple, - ImmutableBytesWritable ptr) throws SQLException { - try { - ImmutableBytesWritable key = TupleUtil.getConcatenatedValue(tuple, groupByExpressions); - ptr.set(key.get(), key.getOffset(), key.getLength()); - return ptr; - } catch (IOException e) { - throw new SQLException(e); - } - } + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } - @Override - protected Tuple wrapKeyValueAsResult(Cell keyValue) { - return new MultiKeyValueTuple(Collections. singletonList(keyValue)); - } + public Expression getHaving() { + return having; + } - @Override - public String toString() { - return "ClientGroupedAggregatingResultIterator [resultIterator=" - + resultIterator + ", aggregators=" + aggregators + ", groupByExpressions=" - + groupByExpressions + "]"; - } + private static class ClientGroupedAggregatingResultIterator + extends BaseGroupedAggregatingResultIterator { + private final List groupByExpressions; + + public ClientGroupedAggregatingResultIterator(PeekingResultIterator iterator, + Aggregators aggregators, List groupByExpressions) { + super(iterator, aggregators); + this.groupByExpressions = groupByExpressions; } - private static class ClientUngroupedAggregatingResultIterator extends BaseGroupedAggregatingResultIterator { + @Override + protected ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) + throws SQLException { + try { + ImmutableBytesWritable key = TupleUtil.getConcatenatedValue(tuple, groupByExpressions); + ptr.set(key.get(), key.getOffset(), key.getLength()); + return ptr; + } catch (IOException e) { + throw new SQLException(e); + } + } - public ClientUngroupedAggregatingResultIterator(PeekingResultIterator iterator, Aggregators aggregators) { - super(iterator, aggregators); - } + @Override + protected Tuple wrapKeyValueAsResult(Cell keyValue) { + return new MultiKeyValueTuple(Collections. singletonList(keyValue)); + } - @Override - protected ImmutableBytesWritable getGroupingKey(Tuple tuple, - ImmutableBytesWritable ptr) throws SQLException { - ptr.set(UNGROUPED_AGG_ROW_KEY); - return ptr; - } + @Override + public String toString() { + return "ClientGroupedAggregatingResultIterator [resultIterator=" + resultIterator + + ", aggregators=" + aggregators + ", groupByExpressions=" + groupByExpressions + "]"; + } + } - @Override - protected Tuple wrapKeyValueAsResult(Cell keyValue) - throws SQLException { - return new MultiKeyValueTuple(Collections. singletonList(keyValue)); - } + private static class ClientUngroupedAggregatingResultIterator + extends BaseGroupedAggregatingResultIterator { - @Override - public String toString() { - return "ClientUngroupedAggregatingResultIterator [resultIterator=" - + resultIterator + ", aggregators=" + aggregators + "]"; - } + public ClientUngroupedAggregatingResultIterator(PeekingResultIterator iterator, + Aggregators aggregators) { + super(iterator, aggregators); } - private OrderBy convertActualOutputOrderBy(OrderBy orderBy, GroupBy groupBy, StatementContext statementContext) { - if(!orderBy.isEmpty()) { - return OrderBy.convertCompiledOrderByToOutputOrderBy(orderBy); - } - - if(this.useHashAgg && - !groupBy.isEmpty() && - !groupBy.isOrderPreserving() && - orderBy != OrderBy.FWD_ROW_KEY_ORDER_BY && - orderBy != OrderBy.REV_ROW_KEY_ORDER_BY) { - return OrderBy.EMPTY_ORDER_BY; - } + @Override + protected ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) + throws SQLException { + ptr.set(UNGROUPED_AGG_ROW_KEY); + return ptr; + } - return ExpressionUtil.convertGroupByToOrderBy(groupBy, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY); + @Override + protected Tuple wrapKeyValueAsResult(Cell keyValue) throws SQLException { + return new MultiKeyValueTuple(Collections. singletonList(keyValue)); } @Override - public List getOutputOrderBys() { - return OrderBy.wrapForOutputOrderBys(this.actualOutputOrderBy); + public String toString() { + return "ClientUngroupedAggregatingResultIterator [resultIterator=" + resultIterator + + ", aggregators=" + aggregators + "]"; + } + } + + private OrderBy convertActualOutputOrderBy(OrderBy orderBy, GroupBy groupBy, + StatementContext statementContext) { + if (!orderBy.isEmpty()) { + return OrderBy.convertCompiledOrderByToOutputOrderBy(orderBy); } + + if ( + this.useHashAgg && !groupBy.isEmpty() && !groupBy.isOrderPreserving() + && orderBy != OrderBy.FWD_ROW_KEY_ORDER_BY && orderBy != OrderBy.REV_ROW_KEY_ORDER_BY + ) { + return OrderBy.EMPTY_ORDER_BY; + } + + return ExpressionUtil.convertGroupByToOrderBy(groupBy, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY); + } + + @Override + public List getOutputOrderBys() { + return OrderBy.wrapForOutputOrderBys(this.actualOutputOrderBy); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java index 75ba8f2b071..adb96819184 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,76 +17,78 @@ */ package org.apache.phoenix.execute; +import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.RowProjector; import org.apache.phoenix.compile.StatementContext; -import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FilterableStatement; import org.apache.phoenix.schema.TableRef; /** - * Query plan that does where, order-by limit at client side, which is - * for derived-table queries that cannot be flattened by SubselectRewriter. + * Query plan that does where, order-by limit at client side, which is for derived-table queries + * that cannot be flattened by SubselectRewriter. */ public abstract class ClientProcessingPlan extends DelegateQueryPlan { - protected final StatementContext context; - protected final FilterableStatement statement; - protected final TableRef table; - protected final RowProjector projector; - protected final Integer limit; - protected final Integer offset; - protected final Expression where; - protected final OrderBy orderBy; - public ClientProcessingPlan(StatementContext context, FilterableStatement statement, TableRef table, - RowProjector projector, Integer limit, Integer offset, Expression where, OrderBy orderBy, QueryPlan delegate) { - super(delegate); - this.context = context; - this.statement = statement; - this.table = table; - this.projector = projector; - this.limit = limit; - this.where = where; - this.orderBy = orderBy; - this.offset = offset; - } - - @Override - public StatementContext getContext() { - return context; - } + protected final StatementContext context; + protected final FilterableStatement statement; + protected final TableRef table; + protected final RowProjector projector; + protected final Integer limit; + protected final Integer offset; + protected final Expression where; + protected final OrderBy orderBy; + + public ClientProcessingPlan(StatementContext context, FilterableStatement statement, + TableRef table, RowProjector projector, Integer limit, Integer offset, Expression where, + OrderBy orderBy, QueryPlan delegate) { + super(delegate); + this.context = context; + this.statement = statement; + this.table = table; + this.projector = projector; + this.limit = limit; + this.where = where; + this.orderBy = orderBy; + this.offset = offset; + } + + @Override + public StatementContext getContext() { + return context; + } + + @Override + public TableRef getTableRef() { + return table; + } - @Override - public TableRef getTableRef() { - return table; - } + @Override + public RowProjector getProjector() { + return projector; + } - @Override - public RowProjector getProjector() { - return projector; - } + @Override + public Integer getLimit() { + return limit; + } - @Override - public Integer getLimit() { - return limit; - } - - @Override - public Integer getOffset() { - return offset; - } + @Override + public Integer getOffset() { + return offset; + } - @Override - public OrderBy getOrderBy() { - return orderBy; - } + @Override + public OrderBy getOrderBy() { + return orderBy; + } - @Override - public FilterableStatement getStatement() { - return statement; - } + @Override + public FilterableStatement getStatement() { + return statement; + } - public Expression getWhere() { - return where; - } + public Expression getWhere() { + return where; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java index caec1b69d2d..f0b2650c352 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,8 +24,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.phoenix.compile.ExplainPlan; import org.apache.phoenix.compile.ExplainPlanAttributes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.RowProjector; @@ -45,140 +44,131 @@ import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.TableRef; -import org.apache.phoenix.util.CostUtil; -import org.apache.phoenix.util.ExpressionUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.CostUtil; public class ClientScanPlan extends ClientProcessingPlan { - private List actualOutputOrderBys; + private List actualOutputOrderBys; + + public ClientScanPlan(StatementContext context, FilterableStatement statement, TableRef table, + RowProjector projector, Integer limit, Integer offset, Expression where, OrderBy orderBy, + QueryPlan delegate) { + super(context, statement, table, projector, limit, offset, where, orderBy, delegate); + this.actualOutputOrderBys = convertActualOutputOrderBy(orderBy, delegate, context); + } + + @Override + public Cost getCost() { + Double inputBytes = this.getDelegate().accept(new ByteCountVisitor()); + Double outputBytes = this.accept(new ByteCountVisitor()); - public ClientScanPlan(StatementContext context, FilterableStatement statement, TableRef table, - RowProjector projector, Integer limit, Integer offset, Expression where, OrderBy orderBy, - QueryPlan delegate) { - super(context, statement, table, projector, limit, offset, where, orderBy, delegate); - this.actualOutputOrderBys = convertActualOutputOrderBy(orderBy, delegate, context); + if (inputBytes == null || outputBytes == null) { + return Cost.UNKNOWN; } - @Override - public Cost getCost() { - Double inputBytes = this.getDelegate().accept(new ByteCountVisitor()); - Double outputBytes = this.accept(new ByteCountVisitor()); - - if (inputBytes == null || outputBytes == null) { - return Cost.UNKNOWN; - } - - int parallelLevel = CostUtil.estimateParallelLevel( - false, context.getConnection().getQueryServices()); - Cost cost = new Cost(0, 0, 0); - if (!orderBy.getOrderByExpressions().isEmpty()) { - Cost orderByCost = - CostUtil.estimateOrderByCost(inputBytes, outputBytes, parallelLevel); - cost = cost.plus(orderByCost); - } - return super.getCost().plus(cost); + int parallelLevel = + CostUtil.estimateParallelLevel(false, context.getConnection().getQueryServices()); + Cost cost = new Cost(0, 0, 0); + if (!orderBy.getOrderByExpressions().isEmpty()) { + Cost orderByCost = CostUtil.estimateOrderByCost(inputBytes, outputBytes, parallelLevel); + cost = cost.plus(orderByCost); + } + return super.getCost().plus(cost); + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + ResultIterator iterator = delegate.iterator(scanGrouper, scan); + if (where != null) { + iterator = new FilterResultIterator(iterator, where); } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); + if (!orderBy.getOrderByExpressions().isEmpty()) { // TopN + long thresholdBytes = context.getConnection().getQueryServices().getProps().getLongBytes( + QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); + boolean spoolingEnabled = context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); + iterator = new OrderedResultIterator(iterator, orderBy.getOrderByExpressions(), + spoolingEnabled, thresholdBytes, limit, offset, projector.getEstimatedRowByteSize()); + } else { + if (offset != null) { + iterator = new OffsetResultIterator(iterator, offset); + } + if (limit != null) { + iterator = new LimitingResultIterator(iterator, limit); + } } - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - ResultIterator iterator = delegate.iterator(scanGrouper, scan); - if (where != null) { - iterator = new FilterResultIterator(iterator, where); - } - - if (!orderBy.getOrderByExpressions().isEmpty()) { // TopN - long thresholdBytes = - context.getConnection().getQueryServices().getProps().getLongBytes( - QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); - boolean spoolingEnabled = - context.getConnection().getQueryServices().getProps().getBoolean( - QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED); - iterator = - new OrderedResultIterator(iterator, orderBy.getOrderByExpressions(), - spoolingEnabled, thresholdBytes, limit, offset, - projector.getEstimatedRowByteSize()); - } else { - if (offset != null) { - iterator = new OffsetResultIterator(iterator, offset); - } - if (limit != null) { - iterator = new LimitingResultIterator(iterator, limit); - } - } - - if (context.getSequenceManager().getSequenceCount() > 0) { - iterator = new SequenceResultIterator(iterator, context.getSequenceManager()); - } - - return iterator; + if (context.getSequenceManager().getSequenceCount() > 0) { + iterator = new SequenceResultIterator(iterator, context.getSequenceManager()); } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - ExplainPlan explainPlan = delegate.getExplainPlan(); - List currentPlanSteps = explainPlan.getPlanSteps(); - ExplainPlanAttributes explainPlanAttributes = - explainPlan.getPlanStepsAsAttributes(); - List planSteps = Lists.newArrayList(currentPlanSteps); - ExplainPlanAttributesBuilder newBuilder = - new ExplainPlanAttributesBuilder(explainPlanAttributes); - if (where != null) { - planSteps.add("CLIENT FILTER BY " + where.toString()); - newBuilder.setClientFilterBy(where.toString()); - } - if (!orderBy.getOrderByExpressions().isEmpty()) { - if (offset != null) { - planSteps.add("CLIENT OFFSET " + offset); - newBuilder.setClientOffset(offset); - } - planSteps.add("CLIENT" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) - + " SORTED BY " + orderBy.getOrderByExpressions().toString()); - newBuilder.setClientRowLimit(limit); - newBuilder.setClientSortedBy( - orderBy.getOrderByExpressions().toString()); - } else { - if (offset != null) { - planSteps.add("CLIENT OFFSET " + offset); - newBuilder.setClientOffset(offset); - } - if (limit != null) { - planSteps.add("CLIENT " + limit + " ROW LIMIT"); - newBuilder.setClientRowLimit(limit); - } - } - if (context.getSequenceManager().getSequenceCount() > 0) { - int nSequences = context.getSequenceManager().getSequenceCount(); - planSteps.add("CLIENT RESERVE VALUES FROM " + nSequences + " SEQUENCE" + (nSequences == 1 ? "" : "S")); - newBuilder.setClientSequenceCount(nSequences); - } - - return new ExplainPlan(planSteps, newBuilder.build()); + return iterator; + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + ExplainPlan explainPlan = delegate.getExplainPlan(); + List currentPlanSteps = explainPlan.getPlanSteps(); + ExplainPlanAttributes explainPlanAttributes = explainPlan.getPlanStepsAsAttributes(); + List planSteps = Lists.newArrayList(currentPlanSteps); + ExplainPlanAttributesBuilder newBuilder = + new ExplainPlanAttributesBuilder(explainPlanAttributes); + if (where != null) { + planSteps.add("CLIENT FILTER BY " + where.toString()); + newBuilder.setClientFilterBy(where.toString()); + } + if (!orderBy.getOrderByExpressions().isEmpty()) { + if (offset != null) { + planSteps.add("CLIENT OFFSET " + offset); + newBuilder.setClientOffset(offset); + } + planSteps + .add("CLIENT" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + + " SORTED BY " + orderBy.getOrderByExpressions().toString()); + newBuilder.setClientRowLimit(limit); + newBuilder.setClientSortedBy(orderBy.getOrderByExpressions().toString()); + } else { + if (offset != null) { + planSteps.add("CLIENT OFFSET " + offset); + newBuilder.setClientOffset(offset); + } + if (limit != null) { + planSteps.add("CLIENT " + limit + " ROW LIMIT"); + newBuilder.setClientRowLimit(limit); + } + } + if (context.getSequenceManager().getSequenceCount() > 0) { + int nSequences = context.getSequenceManager().getSequenceCount(); + planSteps.add( + "CLIENT RESERVE VALUES FROM " + nSequences + " SEQUENCE" + (nSequences == 1 ? "" : "S")); + newBuilder.setClientSequenceCount(nSequences); } - private static List convertActualOutputOrderBy( - OrderBy orderBy, - QueryPlan targetQueryPlan, - StatementContext statementContext) { + return new ExplainPlan(planSteps, newBuilder.build()); + } - if(!orderBy.isEmpty()) { - return Collections.singletonList(OrderBy.convertCompiledOrderByToOutputOrderBy(orderBy)); - } + private static List convertActualOutputOrderBy(OrderBy orderBy, + QueryPlan targetQueryPlan, StatementContext statementContext) { - assert orderBy != OrderBy.REV_ROW_KEY_ORDER_BY; - return targetQueryPlan.getOutputOrderBys(); + if (!orderBy.isEmpty()) { + return Collections.singletonList(OrderBy.convertCompiledOrderByToOutputOrderBy(orderBy)); } - @Override - public List getOutputOrderBys() { - return this.actualOutputOrderBys; - } + assert orderBy != OrderBy.REV_ROW_KEY_ORDER_BY; + return targetQueryPlan.getOutputOrderBys(); + } + + @Override + public List getOutputOrderBys() { + return this.actualOutputOrderBys; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/CommitException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/CommitException.java index b0d22d310a1..40615632b83 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/CommitException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/CommitException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,35 +22,35 @@ import org.apache.phoenix.jdbc.PhoenixConnection; public class CommitException extends SQLException { - private static final long serialVersionUID = 2L; - private final int[] uncommittedStatementIndexes; - private final long serverTimestamp; + private static final long serialVersionUID = 2L; + private final int[] uncommittedStatementIndexes; + private final long serverTimestamp; - public CommitException(Exception e, int[] uncommittedStatementIndexes, long serverTimestamp) { - super(e); - this.uncommittedStatementIndexes = uncommittedStatementIndexes; - this.serverTimestamp = serverTimestamp; - } - - public long getServerTimestamp() { - return this.serverTimestamp; - } + public CommitException(Exception e, int[] uncommittedStatementIndexes, long serverTimestamp) { + super(e); + this.uncommittedStatementIndexes = uncommittedStatementIndexes; + this.serverTimestamp = serverTimestamp; + } - /** - * Returns indexes of UPSERT and DELETE statements that have failed. Indexes returned - * correspond to each failed statement's order of creation within a {@link PhoenixConnection} up to - * commit/rollback. - *

- * Statements whose index is returned in this set correspond to one or more HBase mutations that have failed. - *

- * Statement indexes are maintained correctly for connections that mutate and query - * data (DELETE, UPSERT and SELECT) only. Statement (and their subsequent failure) order - * is undefined for connections that execute metadata operations due to the fact that Phoenix rolls - * back connections after metadata mutations. - * - * @see PhoenixConnection#getStatementExecutionCounter() - */ - public int[] getUncommittedStatementIndexes() { - return uncommittedStatementIndexes; - } + public long getServerTimestamp() { + return this.serverTimestamp; + } + + /** + * Returns indexes of UPSERT and DELETE statements that have failed. Indexes returned correspond + * to each failed statement's order of creation within a {@link PhoenixConnection} up to + * commit/rollback. + *

+ * Statements whose index is returned in this set correspond to one or more HBase mutations that + * have failed. + *

+ * Statement indexes are maintained correctly for connections that mutate and query data + * (DELETE, UPSERT and SELECT) only. Statement (and their subsequent failure) order is undefined + * for connections that execute metadata operations due to the fact that Phoenix rolls back + * connections after metadata mutations. + * @see PhoenixConnection#getStatementExecutionCounter() + */ + public int[] getUncommittedStatementIndexes() { + return uncommittedStatementIndexes; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/CursorFetchPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/CursorFetchPlan.java index c6678cb58f6..eee8d09ca1f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/CursorFetchPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/CursorFetchPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.execute; import java.sql.SQLException; + import org.apache.hadoop.hbase.client.Scan; import org.apache.phoenix.compile.ExplainPlan; import org.apache.phoenix.compile.QueryPlan; @@ -31,47 +31,48 @@ public class CursorFetchPlan extends DelegateQueryPlan { - private CursorResultIterator resultIterator; - private int fetchSize; - private boolean isAggregate; - private String cursorName; + private CursorResultIterator resultIterator; + private int fetchSize; + private boolean isAggregate; + private String cursorName; - public CursorFetchPlan(QueryPlan cursorQueryPlan,String cursorName) { - super(cursorQueryPlan); - this.isAggregate = delegate.getStatement().isAggregate() || delegate.getStatement().isDistinct(); - this.cursorName = cursorName; - } + public CursorFetchPlan(QueryPlan cursorQueryPlan, String cursorName) { + super(cursorQueryPlan); + this.isAggregate = + delegate.getStatement().isAggregate() || delegate.getStatement().isDistinct(); + this.cursorName = cursorName; + } - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - StatementContext context = delegate.getContext(); - if (resultIterator == null) { - context.getOverallQueryMetrics().startQuery(); - resultIterator = new CursorResultIterator(LookAheadResultIterator.wrap(delegate.iterator(scanGrouper, scan)),cursorName); - } - return resultIterator; - } + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + StatementContext context = delegate.getContext(); + if (resultIterator == null) { + context.getOverallQueryMetrics().startQuery(); + resultIterator = new CursorResultIterator( + LookAheadResultIterator.wrap(delegate.iterator(scanGrouper, scan)), cursorName); + } + return resultIterator; + } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); - } + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return delegate.getExplainPlan(); + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return delegate.getExplainPlan(); - } - - public void setFetchSize(int fetchSize){ - this.fetchSize = fetchSize; - } + public void setFetchSize(int fetchSize) { + this.fetchSize = fetchSize; + } - public int getFetchSize() { - return fetchSize; - } + public int getFetchSize() { + return fetchSize; + } - public boolean isAggregate(){ - return this.isAggregate; - } + public boolean isAggregate() { + return this.isAggregate; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DelegateHTable.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DelegateHTable.java index d099bd1680a..235a150b9f0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DelegateHTable.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DelegateHTable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -55,282 +55,283 @@ public class DelegateHTable extends CompatDelegateHTable implements Table { - public DelegateHTable(Table delegate) { - super(delegate); - } - - @Override - public TableName getName() { - return delegate.getName(); - } - - @Override - public Configuration getConfiguration() { - return delegate.getConfiguration(); - } - - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - return delegate.getTableDescriptor(); - } - - @Override - public boolean exists(Get get) throws IOException { - return delegate.exists(get); - } - - @Override - public boolean[] existsAll(List gets) throws IOException { - return delegate.existsAll(gets); - } - - @Override - public void batch(List actions, Object[] results) throws IOException, - InterruptedException { - delegate.batch(actions, results); - } - - @Override - public void batchCallback(List actions, Object[] results, - Callback callback) throws IOException, InterruptedException { - delegate.batchCallback(actions, results, callback); - } - - @Override - public Result get(Get get) throws IOException { - return delegate.get(get); - } - - @Override - public Result[] get(List gets) throws IOException { - return delegate.get(gets); - } - - @Override - public ResultScanner getScanner(Scan scan) throws IOException { - return delegate.getScanner(scan); - } - - @Override - public ResultScanner getScanner(byte[] family) throws IOException { - return delegate.getScanner(family); - } - - @Override - public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { - return delegate.getScanner(family, qualifier); - } - - @Override - public void put(Put put) throws IOException { - delegate.put(put); - } - - @Override - public void put(List puts) throws IOException { - delegate.put(puts); - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) - throws IOException { - return delegate.checkAndPut(row, family, qualifier, value, put); - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, - byte[] value, Put put) throws IOException { - return delegate.checkAndPut(row, family, qualifier, compareOp, value, put); - } - - @Override - public void delete(Delete delete) throws IOException { - delegate.delete(delete); - } - - @Override - public void delete(List deletes) throws IOException { - delegate.delete(deletes); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, - Delete delete) throws IOException { - return delegate.checkAndDelete(row, family, qualifier, value, delete); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, - byte[] value, Delete delete) throws IOException { - return delegate.checkAndDelete(row, family, qualifier, compareOp, value, delete); - } - - @Override - public Result append(Append append) throws IOException { - return delegate.append(append); - } - - @Override - public Result increment(Increment increment) throws IOException { - return delegate.increment(increment); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) - throws IOException { - return delegate.incrementColumnValue(row, family, qualifier, amount); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, - Durability durability) throws IOException { - return delegate.incrementColumnValue(row, family, qualifier, amount, durability); - } - - @Override - public void close() throws IOException { - delegate.close(); - } - - @Override - public CoprocessorRpcChannel coprocessorService(byte[] row) { - return delegate.coprocessorService(row); - } - - @Override - public Map coprocessorService(Class service, - byte[] startKey, byte[] endKey, Call callable) throws ServiceException, Throwable { - return delegate.coprocessorService(service, startKey, endKey, callable); - } - - @Override - public void coprocessorService(Class service, byte[] startKey, - byte[] endKey, Call callable, Callback callback) throws ServiceException, - Throwable { - delegate.coprocessorService(service, startKey, endKey, callable, callback); - - } - - @Override - public Map batchCoprocessorService( - MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, - R responsePrototype) throws ServiceException, Throwable { - return delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype); - } - - @Override - public void batchCoprocessorService(MethodDescriptor methodDescriptor, - Message request, byte[] startKey, byte[] endKey, R responsePrototype, - Callback callback) throws ServiceException, Throwable { - delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype); - } - - @Override - public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException { - return delegate.checkAndMutate(checkAndMutate); - } - - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, - byte[] value, RowMutations mutation) throws IOException { - return delegate.checkAndMutate(row, family, qualifier, compareOp, value, mutation); - } - - @Override - public void setOperationTimeout(int operationTimeout) { - delegate.setOperationTimeout(operationTimeout); - } - - @Override - public int getOperationTimeout() { - return delegate.getOperationTimeout(); - } - - @Override - public int getRpcTimeout() { - return delegate.getRpcTimeout(); - } - - @Override - public void setRpcTimeout(int rpcTimeout) { - delegate.setRpcTimeout(rpcTimeout); - } - - @Override - public TableDescriptor getDescriptor() throws IOException { - return delegate.getDescriptor(); - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - byte[] value, Put put) throws IOException { - return delegate.checkAndPut(row, family, qualifier, op, value, put); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - byte[] value, Delete delete) throws IOException { - return delegate.checkAndDelete(row, family, qualifier, op, value, delete); - } - - @Override - public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { - return delegate.checkAndMutate(row, family); - } - - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - byte[] value, RowMutations mutation) throws IOException { - return delegate.checkAndMutate(row, family, qualifier, op, value, mutation); - } - - @Override - public int getReadRpcTimeout() { - return delegate.getReadRpcTimeout(); - } - - @Override - public void setReadRpcTimeout(int readRpcTimeout) { - delegate.setReadRpcTimeout(readRpcTimeout); - } - - @Override - public int getWriteRpcTimeout() { - return delegate.getWriteRpcTimeout(); - } - - @Override - public void setWriteRpcTimeout(int writeRpcTimeout) { - delegate.setWriteRpcTimeout(writeRpcTimeout); - } - - @Override - public boolean[] exists(List gets) throws IOException { - return delegate.exists(gets); - } - - @Override - public long getRpcTimeout(TimeUnit unit) { - return delegate.getRpcTimeout(unit); - } - - @Override - public long getReadRpcTimeout(TimeUnit unit) { - return delegate.getReadRpcTimeout(unit); - } - - @Override - public long getWriteRpcTimeout(TimeUnit unit) { - return delegate.getWriteRpcTimeout(unit); - } - - @Override - public long getOperationTimeout(TimeUnit unit) { - return delegate.getOperationTimeout(unit); - } - - @Override - public RegionLocator getRegionLocator() throws IOException { - return delegate.getRegionLocator(); - } + public DelegateHTable(Table delegate) { + super(delegate); + } + + @Override + public TableName getName() { + return delegate.getName(); + } + + @Override + public Configuration getConfiguration() { + return delegate.getConfiguration(); + } + + @Override + public HTableDescriptor getTableDescriptor() throws IOException { + return delegate.getTableDescriptor(); + } + + @Override + public boolean exists(Get get) throws IOException { + return delegate.exists(get); + } + + @Override + public boolean[] existsAll(List gets) throws IOException { + return delegate.existsAll(gets); + } + + @Override + public void batch(List actions, Object[] results) + throws IOException, InterruptedException { + delegate.batch(actions, results); + } + + @Override + public void batchCallback(List actions, Object[] results, Callback callback) + throws IOException, InterruptedException { + delegate.batchCallback(actions, results, callback); + } + + @Override + public Result get(Get get) throws IOException { + return delegate.get(get); + } + + @Override + public Result[] get(List gets) throws IOException { + return delegate.get(gets); + } + + @Override + public ResultScanner getScanner(Scan scan) throws IOException { + return delegate.getScanner(scan); + } + + @Override + public ResultScanner getScanner(byte[] family) throws IOException { + return delegate.getScanner(family); + } + + @Override + public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { + return delegate.getScanner(family, qualifier); + } + + @Override + public void put(Put put) throws IOException { + delegate.put(put); + } + + @Override + public void put(List puts) throws IOException { + delegate.put(puts); + } + + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) + throws IOException { + return delegate.checkAndPut(row, family, qualifier, value, put); + } + + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Put put) throws IOException { + return delegate.checkAndPut(row, family, qualifier, compareOp, value, put); + } + + @Override + public void delete(Delete delete) throws IOException { + delegate.delete(delete); + } + + @Override + public void delete(List deletes) throws IOException { + delegate.delete(deletes); + } + + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, + Delete delete) throws IOException { + return delegate.checkAndDelete(row, family, qualifier, value, delete); + } + + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Delete delete) throws IOException { + return delegate.checkAndDelete(row, family, qualifier, compareOp, value, delete); + } + + @Override + public Result append(Append append) throws IOException { + return delegate.append(append); + } + + @Override + public Result increment(Increment increment) throws IOException { + return delegate.increment(increment); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) + throws IOException { + return delegate.incrementColumnValue(row, family, qualifier, amount); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, + Durability durability) throws IOException { + return delegate.incrementColumnValue(row, family, qualifier, amount, durability); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + + @Override + public CoprocessorRpcChannel coprocessorService(byte[] row) { + return delegate.coprocessorService(row); + } + + @Override + public Map coprocessorService(Class service, byte[] startKey, + byte[] endKey, Call callable) throws ServiceException, Throwable { + return delegate.coprocessorService(service, startKey, endKey, callable); + } + + @Override + public void coprocessorService(Class service, byte[] startKey, + byte[] endKey, Call callable, Callback callback) throws ServiceException, Throwable { + delegate.coprocessorService(service, startKey, endKey, callable, callback); + + } + + @Override + public Map batchCoprocessorService( + MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, + R responsePrototype) throws ServiceException, Throwable { + return delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, + responsePrototype); + } + + @Override + public void batchCoprocessorService(MethodDescriptor methodDescriptor, + Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) + throws ServiceException, Throwable { + delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, + responsePrototype); + } + + @Override + public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException { + return delegate.checkAndMutate(checkAndMutate); + } + + @Override + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, RowMutations mutation) throws IOException { + return delegate.checkAndMutate(row, family, qualifier, compareOp, value, mutation); + } + + @Override + public void setOperationTimeout(int operationTimeout) { + delegate.setOperationTimeout(operationTimeout); + } + + @Override + public int getOperationTimeout() { + return delegate.getOperationTimeout(); + } + + @Override + public int getRpcTimeout() { + return delegate.getRpcTimeout(); + } + + @Override + public void setRpcTimeout(int rpcTimeout) { + delegate.setRpcTimeout(rpcTimeout); + } + + @Override + public TableDescriptor getDescriptor() throws IOException { + return delegate.getDescriptor(); + } + + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, + byte[] value, Put put) throws IOException { + return delegate.checkAndPut(row, family, qualifier, op, value, put); + } + + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, + byte[] value, Delete delete) throws IOException { + return delegate.checkAndDelete(row, family, qualifier, op, value, delete); + } + + @Override + public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { + return delegate.checkAndMutate(row, family); + } + + @Override + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, + byte[] value, RowMutations mutation) throws IOException { + return delegate.checkAndMutate(row, family, qualifier, op, value, mutation); + } + + @Override + public int getReadRpcTimeout() { + return delegate.getReadRpcTimeout(); + } + + @Override + public void setReadRpcTimeout(int readRpcTimeout) { + delegate.setReadRpcTimeout(readRpcTimeout); + } + + @Override + public int getWriteRpcTimeout() { + return delegate.getWriteRpcTimeout(); + } + + @Override + public void setWriteRpcTimeout(int writeRpcTimeout) { + delegate.setWriteRpcTimeout(writeRpcTimeout); + } + + @Override + public boolean[] exists(List gets) throws IOException { + return delegate.exists(gets); + } + + @Override + public long getRpcTimeout(TimeUnit unit) { + return delegate.getRpcTimeout(unit); + } + + @Override + public long getReadRpcTimeout(TimeUnit unit) { + return delegate.getReadRpcTimeout(unit); + } + + @Override + public long getWriteRpcTimeout(TimeUnit unit) { + return delegate.getWriteRpcTimeout(unit); + } + + @Override + public long getOperationTimeout(TimeUnit unit) { + return delegate.getOperationTimeout(unit); + } + + @Override + public RegionLocator getRegionLocator() throws IOException { + return delegate.getRegionLocator(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java index ba65fbe45e3..17bb3e1dd2a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DelegateQueryPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,12 +25,12 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; -import org.apache.phoenix.iterate.DefaultParallelScanGrouper; -import org.apache.phoenix.iterate.ParallelScanGrouper; -import org.apache.phoenix.iterate.ResultIterator; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.RowProjector; import org.apache.phoenix.compile.StatementContext; +import org.apache.phoenix.iterate.DefaultParallelScanGrouper; +import org.apache.phoenix.iterate.ParallelScanGrouper; +import org.apache.phoenix.iterate.ResultIterator; import org.apache.phoenix.jdbc.PhoenixStatement.Operation; import org.apache.phoenix.optimize.Cost; import org.apache.phoenix.parse.FilterableStatement; @@ -38,136 +38,138 @@ import org.apache.phoenix.schema.TableRef; public abstract class DelegateQueryPlan implements QueryPlan { - protected final QueryPlan delegate; - - public DelegateQueryPlan(QueryPlan delegate) { - this.delegate = delegate; - } - - @Override - public StatementContext getContext() { - return delegate.getContext(); - } - - @Override - public ParameterMetaData getParameterMetaData() { - return delegate.getParameterMetaData(); - } - - @Override - public long getEstimatedSize() { - return delegate.getEstimatedSize(); - } - - @Override - public Cost getCost() { - return delegate.getCost(); - } - - @Override - public TableRef getTableRef() { - return delegate.getTableRef(); - } - - @Override - public Set getSourceRefs() { - return delegate.getSourceRefs(); - } - - @Override - public RowProjector getProjector() { - return delegate.getProjector(); - } - - @Override - public Integer getLimit() { - return delegate.getLimit(); - } - - @Override - public OrderBy getOrderBy() { - return delegate.getOrderBy(); - } - - @Override - public GroupBy getGroupBy() { - return delegate.getGroupBy(); - } - - @Override - public List getSplits() { - return delegate.getSplits(); - } - - @Override - public List> getScans() { - return delegate.getScans(); - } - - @Override - public FilterableStatement getStatement() { - return delegate.getStatement(); - } - - @Override - public boolean isDegenerate() { - return delegate.isDegenerate(); - } - - @Override - public boolean isRowKeyOrdered() { - return delegate.isRowKeyOrdered(); - } - - @Override - public boolean useRoundRobinIterator() throws SQLException { - return delegate.useRoundRobinIterator(); - } - - @Override - public Operation getOperation() { - return delegate.getOperation(); - } - - @Override - public Integer getOffset() { - return delegate.getOffset(); - } - - @Override - public ResultIterator iterator() throws SQLException { - return iterator(DefaultParallelScanGrouper.getInstance()); - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - return iterator(scanGrouper, null); - } - - public QueryPlan getDelegate() { - return delegate; - } - - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return delegate.getEstimatedRowsToScan(); - } - - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return delegate.getEstimatedBytesToScan(); - } - - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return delegate.getEstimateInfoTimestamp(); - } - - @Override - public List getOutputOrderBys() { - return delegate.getOutputOrderBys(); - } - - @Override - public boolean isApplicable() { return delegate.isApplicable(); } -} \ No newline at end of file + protected final QueryPlan delegate; + + public DelegateQueryPlan(QueryPlan delegate) { + this.delegate = delegate; + } + + @Override + public StatementContext getContext() { + return delegate.getContext(); + } + + @Override + public ParameterMetaData getParameterMetaData() { + return delegate.getParameterMetaData(); + } + + @Override + public long getEstimatedSize() { + return delegate.getEstimatedSize(); + } + + @Override + public Cost getCost() { + return delegate.getCost(); + } + + @Override + public TableRef getTableRef() { + return delegate.getTableRef(); + } + + @Override + public Set getSourceRefs() { + return delegate.getSourceRefs(); + } + + @Override + public RowProjector getProjector() { + return delegate.getProjector(); + } + + @Override + public Integer getLimit() { + return delegate.getLimit(); + } + + @Override + public OrderBy getOrderBy() { + return delegate.getOrderBy(); + } + + @Override + public GroupBy getGroupBy() { + return delegate.getGroupBy(); + } + + @Override + public List getSplits() { + return delegate.getSplits(); + } + + @Override + public List> getScans() { + return delegate.getScans(); + } + + @Override + public FilterableStatement getStatement() { + return delegate.getStatement(); + } + + @Override + public boolean isDegenerate() { + return delegate.isDegenerate(); + } + + @Override + public boolean isRowKeyOrdered() { + return delegate.isRowKeyOrdered(); + } + + @Override + public boolean useRoundRobinIterator() throws SQLException { + return delegate.useRoundRobinIterator(); + } + + @Override + public Operation getOperation() { + return delegate.getOperation(); + } + + @Override + public Integer getOffset() { + return delegate.getOffset(); + } + + @Override + public ResultIterator iterator() throws SQLException { + return iterator(DefaultParallelScanGrouper.getInstance()); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { + return iterator(scanGrouper, null); + } + + public QueryPlan getDelegate() { + return delegate; + } + + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return delegate.getEstimatedRowsToScan(); + } + + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return delegate.getEstimatedBytesToScan(); + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return delegate.getEstimateInfoTimestamp(); + } + + @Override + public List getOutputOrderBys() { + return delegate.getOutputOrderBys(); + } + + @Override + public boolean isApplicable() { + return delegate.isApplicable(); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DescVarLengthFastByteComparisons.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DescVarLengthFastByteComparisons.java index 5310c5729a8..d57442200dd 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DescVarLengthFastByteComparisons.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/DescVarLengthFastByteComparisons.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,210 +22,217 @@ import java.security.AccessController; import java.security.PrivilegedAction; -import sun.misc.Unsafe; - import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; import org.apache.phoenix.thirdparty.com.google.common.primitives.UnsignedBytes; +import sun.misc.Unsafe; + /** - * Utility code to do optimized byte-array comparison. - * This is borrowed from org.apache.hadoop.io.FastByteComparisons - * which was borrowed and slightly modified from Guava's {@link UnsignedBytes} - * class to be able to compare arrays that start at non-zero offsets. - * - * The only difference is that we sort a smaller length bytes as *larger* - * than longer length bytes when all the bytes are the same. + * Utility code to do optimized byte-array comparison. This is borrowed from + * org.apache.hadoop.io.FastByteComparisons which was borrowed and slightly modified from Guava's + * {@link UnsignedBytes} class to be able to compare arrays that start at non-zero offsets. The only + * difference is that we sort a smaller length bytes as *larger* than longer length bytes when all + * the bytes are the same. */ @SuppressWarnings("restriction") public class DescVarLengthFastByteComparisons { - private DescVarLengthFastByteComparisons() {} + private DescVarLengthFastByteComparisons() { + } + + /** + * Lexicographically compare two byte arrays. + */ + public static int compareTo(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { + return LexicographicalComparerHolder.BEST_COMPARER.compareTo(b1, s1, l1, b2, s2, l2); + } + + private interface Comparer { + abstract public int compareTo(T buffer1, int offset1, int length1, T buffer2, int offset2, + int length2); + } + + private static Comparer lexicographicalComparerJavaImpl() { + return LexicographicalComparerHolder.PureJavaComparer.INSTANCE; + } + + /** + * Provides a lexicographical comparer implementation; either a Java implementation or a faster + * implementation based on {@link Unsafe}. + *

+ * Uses reflection to gracefully fall back to the Java implementation if {@code Unsafe} isn't + * available. + */ + private static class LexicographicalComparerHolder { + static final String UNSAFE_COMPARER_NAME = + LexicographicalComparerHolder.class.getName() + "$UnsafeComparer"; + + static final Comparer BEST_COMPARER = getBestComparer(); /** - * Lexicographically compare two byte arrays. + * Returns the Unsafe-using Comparer, or falls back to the pure-Java implementation if unable to + * do so. */ - public static int compareTo(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { - return LexicographicalComparerHolder.BEST_COMPARER.compareTo(b1, s1, l1, b2, s2, l2); + static Comparer getBestComparer() { + try { + Class theClass = Class.forName(UNSAFE_COMPARER_NAME); + + // yes, UnsafeComparer does implement Comparer + @SuppressWarnings("unchecked") + Comparer comparer = (Comparer) theClass.getEnumConstants()[0]; + return comparer; + } catch (Throwable t) { // ensure we really catch *everything* + return lexicographicalComparerJavaImpl(); + } } - private interface Comparer { - abstract public int compareTo(T buffer1, int offset1, int length1, T buffer2, int offset2, int length2); - } + private enum PureJavaComparer implements Comparer { + INSTANCE; - private static Comparer lexicographicalComparerJavaImpl() { - return LexicographicalComparerHolder.PureJavaComparer.INSTANCE; + @Override + public int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, + int length2) { + // Short circuit equal case + if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) { + return 0; + } + if (length1 == 0 && length2 != 0) { // nulls sort first, even for descending + return -1; + } + if (length2 == 0 && length1 != 0) { // nulls sort first, even for descending + return 1; + } + // Bring WritableComparator code local + int end1 = offset1 + length1; + int end2 = offset2 + length2; + for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) { + int a = (buffer1[i] & 0xff); + int b = (buffer2[j] & 0xff); + if (a != b) { + return a - b; + } + } + return length2 - length1; + } } - /** - * Provides a lexicographical comparer implementation; either a Java implementation or a faster implementation based - * on {@link Unsafe}. - *

- * Uses reflection to gracefully fall back to the Java implementation if {@code Unsafe} isn't available. - */ - private static class LexicographicalComparerHolder { - static final String UNSAFE_COMPARER_NAME = LexicographicalComparerHolder.class.getName() + "$UnsafeComparer"; + @SuppressWarnings("unused") + // used via reflection + private enum UnsafeComparer implements Comparer { + INSTANCE; - static final Comparer BEST_COMPARER = getBestComparer(); + static final Unsafe theUnsafe; - /** - * Returns the Unsafe-using Comparer, or falls back to the pure-Java implementation if unable to do so. - */ - static Comparer getBestComparer() { + /** The offset to the first element in a byte array. */ + static final int BYTE_ARRAY_BASE_OFFSET; + + static { + theUnsafe = (Unsafe) AccessController.doPrivileged(new PrivilegedAction() { + @Override + public Object run() { try { - Class theClass = Class.forName(UNSAFE_COMPARER_NAME); - - // yes, UnsafeComparer does implement Comparer - @SuppressWarnings("unchecked") - Comparer comparer = (Comparer)theClass.getEnumConstants()[0]; - return comparer; - } catch (Throwable t) { // ensure we really catch *everything* - return lexicographicalComparerJavaImpl(); + Field f = Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return f.get(null); + } catch (NoSuchFieldException e) { + // It doesn't matter what we throw; + // it's swallowed in getBestComparer(). + throw new Error(); + } catch (IllegalAccessException e) { + throw new Error(); } - } + } + }); - private enum PureJavaComparer implements Comparer { - INSTANCE; - - @Override - public int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, int length2) { - // Short circuit equal case - if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) { return 0; } - if (length1 == 0 && length2 != 0) { // nulls sort first, even for descending - return -1; - } - if (length2 == 0 && length1 != 0) { // nulls sort first, even for descending - return 1; - } - // Bring WritableComparator code local - int end1 = offset1 + length1; - int end2 = offset2 + length2; - for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) { - int a = (buffer1[i] & 0xff); - int b = (buffer2[j] & 0xff); - if (a != b) { return a - b; } - } - return length2 - length1; - } - } + BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class); - @SuppressWarnings("unused") - // used via reflection - private enum UnsafeComparer implements Comparer { - INSTANCE; - - static final Unsafe theUnsafe; - - /** The offset to the first element in a byte array. */ - static final int BYTE_ARRAY_BASE_OFFSET; - - static { - theUnsafe = (Unsafe)AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Object run() { - try { - Field f = Unsafe.class.getDeclaredField("theUnsafe"); - f.setAccessible(true); - return f.get(null); - } catch (NoSuchFieldException e) { - // It doesn't matter what we throw; - // it's swallowed in getBestComparer(). - throw new Error(); - } catch (IllegalAccessException e) { - throw new Error(); - } - } - }); - - BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class); - - // sanity check - this should never fail - if (theUnsafe.arrayIndexScale(byte[].class) != 1) { throw new AssertionError(); } + // sanity check - this should never fail + if (theUnsafe.arrayIndexScale(byte[].class) != 1) { + throw new AssertionError(); + } + } + + static final boolean littleEndian = ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN); + + /** + * Returns true if x1 is less than x2, when both values are treated as unsigned. + */ + static boolean lessThanUnsigned(long x1, long x2) { + return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE); + } + + /** + * Lexicographically compare two arrays. left operand right operand Where to start comparing + * in the left buffer Where to start comparing in the right buffer How much to compare from + * the left buffer How much to compare from the right buffer + * @return 0 if equal, < 0 if left is less than right, etc. + */ + @Override + public int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, + int length2) { + // Short circuit equal case + if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) { + return 0; + } + if (length1 == 0 && length2 != 0) { // nulls sort first, even for descending + return -1; + } + if (length2 == 0 && length1 != 0) { // nulls sort first, even for descending + return 1; + } + int minLength = Math.min(length1, length2); + int minWords = minLength / Longs.BYTES; + int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET; + int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET; + + /* + * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a time is no slower + * than comparing 4 bytes at a time even on 32-bit. On the other hand, it is substantially + * faster on 64-bit. + */ + for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) { + long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i); + long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i); + long diff = lw ^ rw; + + if (diff != 0) { + if (!littleEndian) { + return lessThanUnsigned(lw, rw) ? -1 : 1; } - static final boolean littleEndian = ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN); + // Use binary search + int n = 0; + int y; + int x = (int) diff; + if (x == 0) { + x = (int) (diff >>> 32); + n = 32; + } - /** - * Returns true if x1 is less than x2, when both values are treated as unsigned. - */ - static boolean lessThanUnsigned(long x1, long x2) { - return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE); + y = x << 16; + if (y == 0) { + n += 16; + } else { + x = y; } - /** - * Lexicographically compare two arrays. - * - * @param buffer1 - * left operand - * @param buffer2 - * right operand - * @param offset1 - * Where to start comparing in the left buffer - * @param offset2 - * Where to start comparing in the right buffer - * @param length1 - * How much to compare from the left buffer - * @param length2 - * How much to compare from the right buffer - * @return 0 if equal, < 0 if left is less than right, etc. - */ - @Override - public int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, int length2) { - // Short circuit equal case - if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) { return 0; } - if (length1 == 0 && length2 != 0) { // nulls sort first, even for descending - return -1; - } - if (length2 == 0 && length1 != 0) { // nulls sort first, even for descending - return 1; - } - int minLength = Math.min(length1, length2); - int minWords = minLength / Longs.BYTES; - int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET; - int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET; - - /* - * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a time is no slower than comparing - * 4 bytes at a time even on 32-bit. On the other hand, it is substantially faster on 64-bit. - */ - for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) { - long lw = theUnsafe.getLong(buffer1, offset1Adj + (long)i); - long rw = theUnsafe.getLong(buffer2, offset2Adj + (long)i); - long diff = lw ^ rw; - - if (diff != 0) { - if (!littleEndian) { return lessThanUnsigned(lw, rw) ? -1 : 1; } - - // Use binary search - int n = 0; - int y; - int x = (int)diff; - if (x == 0) { - x = (int)(diff >>> 32); - n = 32; - } - - y = x << 16; - if (y == 0) { - n += 16; - } else { - x = y; - } - - y = x << 8; - if (y == 0) { - n += 8; - } - return (int)(((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL)); - } - } - - // The epilogue to cover the last (minLength % 8) elements. - for (int i = minWords * Longs.BYTES; i < minLength; i++) { - int result = UnsignedBytes.compare(buffer1[offset1 + i], buffer2[offset2 + i]); - if (result != 0) { return result; } - } - return length2 - length1; + y = x << 8; + if (y == 0) { + n += 8; } + return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL)); + } + } + + // The epilogue to cover the last (minLength % 8) elements. + for (int i = minWords * Longs.BYTES; i < minLength; i++) { + int result = UnsignedBytes.compare(buffer1[offset1 + i], buffer2[offset2 + i]); + if (result != 0) { + return result; + } } + return length2 - length1; + } } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java index 1ea56706a82..dbe56c63f9a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,7 +34,6 @@ import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicLong; -import org.apache.phoenix.thirdparty.com.google.common.base.Optional; import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -81,6 +80,10 @@ import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.base.Optional; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.CostUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; @@ -88,616 +91,625 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; - public class HashJoinPlan extends DelegateQueryPlan { - private static final Logger LOGGER = LoggerFactory.getLogger(HashJoinPlan.class); - private static final Random RANDOM = new Random(); - - private final SelectStatement statement; - private final HashJoinInfo joinInfo; - private final SubPlan[] subPlans; - private final boolean recompileWhereClause; - private final Set tableRefs; - private final int maxServerCacheTimeToLive; - private final long serverCacheLimit; - private final Map dependencies = Maps.newHashMap(); - private HashCacheClient hashClient; - private AtomicLong firstJobEndTime; - private List keyRangeExpressions; - private Long estimatedRows; - private Long estimatedBytes; - private Long estimateInfoTs; - private boolean getEstimatesCalled; - private boolean hasSubPlansWithPersistentCache; - - public static HashJoinPlan create(SelectStatement statement, - QueryPlan plan, HashJoinInfo joinInfo, SubPlan[] subPlans) throws SQLException { - if (!(plan instanceof HashJoinPlan)) - return new HashJoinPlan(statement, plan, joinInfo, subPlans, joinInfo == null, Collections.emptyMap()); - - HashJoinPlan hashJoinPlan = (HashJoinPlan) plan; - assert (hashJoinPlan.joinInfo == null && hashJoinPlan.delegate instanceof BaseQueryPlan); - SubPlan[] mergedSubPlans = new SubPlan[hashJoinPlan.subPlans.length + subPlans.length]; - int i = 0; - for (SubPlan subPlan : hashJoinPlan.subPlans) { - mergedSubPlans[i++] = subPlan; - } - for (SubPlan subPlan : subPlans) { - mergedSubPlans[i++] = subPlan; - } - return new HashJoinPlan(statement, hashJoinPlan.delegate, joinInfo, mergedSubPlans, true, hashJoinPlan.dependencies); - } - - private HashJoinPlan(SelectStatement statement, - QueryPlan plan, HashJoinInfo joinInfo, SubPlan[] subPlans, boolean recompileWhereClause, Map dependencies) throws SQLException { - super(plan); - this.dependencies.putAll(dependencies); - this.statement = statement; - this.joinInfo = joinInfo; - this.subPlans = subPlans; - this.recompileWhereClause = recompileWhereClause; - this.tableRefs = Sets.newHashSetWithExpectedSize(subPlans.length + plan.getSourceRefs().size()); - this.tableRefs.addAll(plan.getSourceRefs()); - this.hasSubPlansWithPersistentCache = false; - for (SubPlan subPlan : subPlans) { - tableRefs.addAll(subPlan.getInnerPlan().getSourceRefs()); - if (subPlan instanceof HashSubPlan && ((HashSubPlan)subPlan).usePersistentCache) { - this.hasSubPlansWithPersistentCache = true; - } - } - QueryServices services = plan.getContext().getConnection().getQueryServices(); - this.maxServerCacheTimeToLive = services.getProps().getInt( - QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS); - this.serverCacheLimit = services.getProps().getLongBytes( - QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE); - for (SubPlan subPlan: subPlans) { - this.getContext().addSubStatementContext(subPlan.getInnerPlan().getContext()); - } + private static final Logger LOGGER = LoggerFactory.getLogger(HashJoinPlan.class); + private static final Random RANDOM = new Random(); + + private final SelectStatement statement; + private final HashJoinInfo joinInfo; + private final SubPlan[] subPlans; + private final boolean recompileWhereClause; + private final Set tableRefs; + private final int maxServerCacheTimeToLive; + private final long serverCacheLimit; + private final Map dependencies = Maps.newHashMap(); + private HashCacheClient hashClient; + private AtomicLong firstJobEndTime; + private List keyRangeExpressions; + private Long estimatedRows; + private Long estimatedBytes; + private Long estimateInfoTs; + private boolean getEstimatesCalled; + private boolean hasSubPlansWithPersistentCache; + + public static HashJoinPlan create(SelectStatement statement, QueryPlan plan, + HashJoinInfo joinInfo, SubPlan[] subPlans) throws SQLException { + if (!(plan instanceof HashJoinPlan)) return new HashJoinPlan(statement, plan, joinInfo, + subPlans, joinInfo == null, Collections. emptyMap()); + + HashJoinPlan hashJoinPlan = (HashJoinPlan) plan; + assert (hashJoinPlan.joinInfo == null && hashJoinPlan.delegate instanceof BaseQueryPlan); + SubPlan[] mergedSubPlans = new SubPlan[hashJoinPlan.subPlans.length + subPlans.length]; + int i = 0; + for (SubPlan subPlan : hashJoinPlan.subPlans) { + mergedSubPlans[i++] = subPlan; } - - @Override - public Set getSourceRefs() { - return tableRefs; + for (SubPlan subPlan : subPlans) { + mergedSubPlans[i++] = subPlan; } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - if (scan == null) { - scan = delegate.getContext().getScan(); - } - - int count = subPlans.length; - PhoenixConnection connection = getContext().getConnection(); - ConnectionQueryServices services = connection.getQueryServices(); - ExecutorService executor = services.getExecutor(); - List> futures = Lists.newArrayListWithExpectedSize(count); - if (joinInfo != null) { - hashClient = hashClient != null ? - hashClient - : new HashCacheClient(delegate.getContext().getConnection()); - firstJobEndTime = new AtomicLong(0); - keyRangeExpressions = new CopyOnWriteArrayList(); - } - - for (int i = 0; i < count; i++) { - final int index = i; - futures.add(executor.submit(new JobCallable() { - - @Override - public ServerCache call() throws Exception { - ServerCache cache = subPlans[index].execute(HashJoinPlan.this); - return cache; - } - - @Override - public Object getJobId() { - return HashJoinPlan.this; - } - - @Override - public TaskExecutionMetricsHolder getTaskExecutionMetric() { - return NO_OP_INSTANCE; - } - })); - } - - SQLException firstException = null; - for (int i = 0; i < count; i++) { - try { - ServerCache result = futures.get(i).get(); - if (result != null) { - dependencies.put(new ImmutableBytesPtr(result.getId()),result); - } - subPlans[i].postProcess(result, this); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - if (firstException == null) { - firstException = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).setMessage("Sub plan [" + i + "] execution interrupted.").build().buildException(); - } - } catch (ExecutionException e) { - if (firstException == null) { - firstException = new SQLException("Encountered exception in sub plan [" + i + "] execution.", - e.getCause()); - } - } - } - if (firstException != null) { - SQLCloseables.closeAllQuietly(dependencies.values()); - throw firstException; - } + return new HashJoinPlan(statement, hashJoinPlan.delegate, joinInfo, mergedSubPlans, true, + hashJoinPlan.dependencies); + } + + private HashJoinPlan(SelectStatement statement, QueryPlan plan, HashJoinInfo joinInfo, + SubPlan[] subPlans, boolean recompileWhereClause, + Map dependencies) throws SQLException { + super(plan); + this.dependencies.putAll(dependencies); + this.statement = statement; + this.joinInfo = joinInfo; + this.subPlans = subPlans; + this.recompileWhereClause = recompileWhereClause; + this.tableRefs = Sets.newHashSetWithExpectedSize(subPlans.length + plan.getSourceRefs().size()); + this.tableRefs.addAll(plan.getSourceRefs()); + this.hasSubPlansWithPersistentCache = false; + for (SubPlan subPlan : subPlans) { + tableRefs.addAll(subPlan.getInnerPlan().getSourceRefs()); + if (subPlan instanceof HashSubPlan && ((HashSubPlan) subPlan).usePersistentCache) { + this.hasSubPlansWithPersistentCache = true; + } + } + QueryServices services = plan.getContext().getConnection().getQueryServices(); + this.maxServerCacheTimeToLive = + services.getProps().getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS); + this.serverCacheLimit = + services.getProps().getLongBytes(QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE); + for (SubPlan subPlan : subPlans) { + this.getContext().addSubStatementContext(subPlan.getInnerPlan().getContext()); + } + } - Expression postFilter = null; - boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty(); - if (recompileWhereClause || hasKeyRangeExpressions) { - StatementContext context = delegate.getContext(); - // Since we are going to compile the WHERE conditions all over again, we will clear - // the old filter, otherwise there would be conflicts and would cause PHOENIX-4692. - context.getScan().setFilter(null); - PTable table = context.getCurrentTable().getTable(); - ParseNode viewWhere = table.getViewStatement() == null ? null : new SQLParser(table.getViewStatement()).parseQuery().getWhere(); - context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection())); - if (recompileWhereClause) { - postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null, Optional.absent()); - } - if (hasKeyRangeExpressions) { - WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, null, Optional.absent()); - } - } + @Override + public Set getSourceRefs() { + return tableRefs; + } - if (joinInfo != null) { - HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo); - } - - ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper, scan) : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper, scan); - if (statement.getInnerSelectStatement() != null && postFilter != null) { - iterator = new FilterResultIterator(iterator, postFilter); + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + if (scan == null) { + scan = delegate.getContext().getScan(); + } + + int count = subPlans.length; + PhoenixConnection connection = getContext().getConnection(); + ConnectionQueryServices services = connection.getQueryServices(); + ExecutorService executor = services.getExecutor(); + List> futures = Lists.newArrayListWithExpectedSize(count); + if (joinInfo != null) { + hashClient = hashClient != null + ? hashClient + : new HashCacheClient(delegate.getContext().getConnection()); + firstJobEndTime = new AtomicLong(0); + keyRangeExpressions = new CopyOnWriteArrayList(); + } + + for (int i = 0; i < count; i++) { + final int index = i; + futures.add(executor.submit(new JobCallable() { + + @Override + public ServerCache call() throws Exception { + ServerCache cache = subPlans[index].execute(HashJoinPlan.this); + return cache; } - if (hasSubPlansWithPersistentCache) { - return peekForPersistentCache(iterator, scanGrouper, scan); - } else { - return iterator; + @Override + public Object getJobId() { + return HashJoinPlan.this; } - } - private ResultIterator peekForPersistentCache(ResultIterator iterator, ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - // The persistent subquery is optimistic and assumes caches are present on region - // servers. We verify that this is the case by peeking at one result. If there is - // a cache missing exception, we retry the query with the persistent cache disabled - // for that specific cache ID. - PeekingResultIterator peeking = LookAheadResultIterator.wrap(iterator); - try { - peeking.peek(); - } catch (Exception e) { - try { - throw ClientUtil.parseServerException(e); - } catch (HashJoinCacheNotFoundException e2) { - Long cacheId = e2.getCacheId(); - if (delegate.getContext().getRetryingPersistentCache(cacheId)) { - throw e2; - } - delegate.getContext().setRetryingPersistentCache(cacheId); - return iterator(scanGrouper, scan); - } + @Override + public TaskExecutionMetricsHolder getTaskExecutionMetric() { + return NO_OP_INSTANCE; } - return peeking; + })); } - private Expression createKeyRangeExpression(Expression lhsExpression, - Expression rhsExpression, List rhsValues, - ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) throws SQLException { - if (rhsValues.isEmpty()) - return LiteralExpression.newConstant(false, PBoolean.INSTANCE, Determinism.ALWAYS); - - rhsValues.add(0, lhsExpression); - - return InListExpression.create(rhsValues, false, ptr, rowKeyOrderOptimizable); + SQLException firstException = null; + for (int i = 0; i < count; i++) { + try { + ServerCache result = futures.get(i).get(); + if (result != null) { + dependencies.put(new ImmutableBytesPtr(result.getId()), result); + } + subPlans[i].postProcess(result, this); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + if (firstException == null) { + firstException = + new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e) + .setMessage("Sub plan [" + i + "] execution interrupted.").build().buildException(); + } + } catch (ExecutionException e) { + if (firstException == null) { + firstException = new SQLException( + "Encountered exception in sub plan [" + i + "] execution.", e.getCause()); + } + } + } + if (firstException != null) { + SQLCloseables.closeAllQuietly(dependencies.values()); + throw firstException; } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - // TODO : Support ExplainPlanAttributes for HashJoinPlan - List planSteps = Lists.newArrayList(delegate.getExplainPlan().getPlanSteps()); - int count = subPlans.length; - for (int i = 0; i < count; i++) { - planSteps.addAll(subPlans[i].getPreSteps(this)); - } - for (int i = 0; i < count; i++) { - planSteps.addAll(subPlans[i].getPostSteps(this)); - } - - if (joinInfo != null && joinInfo.getPostJoinFilterExpression() != null) { - planSteps.add(" AFTER-JOIN SERVER FILTER BY " + joinInfo.getPostJoinFilterExpression().toString()); - } - if (joinInfo != null && joinInfo.getLimit() != null) { - planSteps.add(" JOIN-SCANNER " + joinInfo.getLimit() + " ROW LIMIT"); - } - return new ExplainPlan(planSteps); + Expression postFilter = null; + boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty(); + if (recompileWhereClause || hasKeyRangeExpressions) { + StatementContext context = delegate.getContext(); + // Since we are going to compile the WHERE conditions all over again, we will clear + // the old filter, otherwise there would be conflicts and would cause PHOENIX-4692. + context.getScan().setFilter(null); + PTable table = context.getCurrentTable().getTable(); + ParseNode viewWhere = table.getViewStatement() == null + ? null + : new SQLParser(table.getViewStatement()).parseQuery().getWhere(); + context.setResolver(FromCompiler.getResolverForQuery( + (SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection())); + if (recompileWhereClause) { + postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), + viewWhere, null, Optional. absent()); + } + if (hasKeyRangeExpressions) { + WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, + keyRangeExpressions, null, Optional. absent()); + } } - @Override - public FilterableStatement getStatement() { - return statement; + if (joinInfo != null) { + HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo); } - public HashJoinInfo getJoinInfo() { - return joinInfo; + ResultIterator iterator = joinInfo == null + ? delegate.iterator(scanGrouper, scan) + : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper, scan); + if (statement.getInnerSelectStatement() != null && postFilter != null) { + iterator = new FilterResultIterator(iterator, postFilter); } - public SubPlan[] getSubPlans() { - return subPlans; + if (hasSubPlansWithPersistentCache) { + return peekForPersistentCache(iterator, scanGrouper, scan); + } else { + return iterator; + } + } + + private ResultIterator peekForPersistentCache(ResultIterator iterator, + ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + // The persistent subquery is optimistic and assumes caches are present on region + // servers. We verify that this is the case by peeking at one result. If there is + // a cache missing exception, we retry the query with the persistent cache disabled + // for that specific cache ID. + PeekingResultIterator peeking = LookAheadResultIterator.wrap(iterator); + try { + peeking.peek(); + } catch (Exception e) { + try { + throw ClientUtil.parseServerException(e); + } catch (HashJoinCacheNotFoundException e2) { + Long cacheId = e2.getCacheId(); + if (delegate.getContext().getRetryingPersistentCache(cacheId)) { + throw e2; + } + delegate.getContext().setRetryingPersistentCache(cacheId); + return iterator(scanGrouper, scan); + } + } + return peeking; + } + + private Expression createKeyRangeExpression(Expression lhsExpression, Expression rhsExpression, + List rhsValues, ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) + throws SQLException { + if (rhsValues.isEmpty()) + return LiteralExpression.newConstant(false, PBoolean.INSTANCE, Determinism.ALWAYS); + + rhsValues.add(0, lhsExpression); + + return InListExpression.create(rhsValues, false, ptr, rowKeyOrderOptimizable); + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + // TODO : Support ExplainPlanAttributes for HashJoinPlan + List planSteps = Lists.newArrayList(delegate.getExplainPlan().getPlanSteps()); + int count = subPlans.length; + for (int i = 0; i < count; i++) { + planSteps.addAll(subPlans[i].getPreSteps(this)); + } + for (int i = 0; i < count; i++) { + planSteps.addAll(subPlans[i].getPostSteps(this)); } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); + if (joinInfo != null && joinInfo.getPostJoinFilterExpression() != null) { + planSteps.add( + " AFTER-JOIN SERVER FILTER BY " + joinInfo.getPostJoinFilterExpression().toString()); } + if (joinInfo != null && joinInfo.getLimit() != null) { + planSteps.add(" JOIN-SCANNER " + joinInfo.getLimit() + " ROW LIMIT"); + } + return new ExplainPlan(planSteps); + } + + @Override + public FilterableStatement getStatement() { + return statement; + } + + public HashJoinInfo getJoinInfo() { + return joinInfo; + } + + public SubPlan[] getSubPlans() { + return subPlans; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + @Override + public Cost getCost() { + try { + Long r = delegate.getEstimatedRowsToScan(); + Double w = delegate.accept(new AvgRowWidthVisitor()); + if (r == null || w == null) { + return Cost.UNKNOWN; + } + + int parallelLevel = + CostUtil.estimateParallelLevel(true, getContext().getConnection().getQueryServices()); + + double rowWidth = w; + double rows = RowCountVisitor.filter(r.doubleValue(), + RowCountVisitor.stripSkipScanFilter(delegate.getContext().getScan().getFilter())); + double bytes = rowWidth * rows; + Cost cost = Cost.ZERO; + double rhsByteSum = 0.0; + for (int i = 0; i < subPlans.length; i++) { + double lhsBytes = bytes; + Double rhsRows = subPlans[i].getInnerPlan().accept(new RowCountVisitor()); + Double rhsWidth = subPlans[i].getInnerPlan().accept(new AvgRowWidthVisitor()); + if (rhsRows == null || rhsWidth == null) { + return Cost.UNKNOWN; + } + double rhsBytes = rhsWidth * rhsRows; + rows = RowCountVisitor.join(rows, rhsRows, joinInfo.getJoinTypes()[i]); + rowWidth = AvgRowWidthVisitor.join(rowWidth, rhsWidth, joinInfo.getJoinTypes()[i]); + bytes = rowWidth * rows; + cost = cost.plus(CostUtil.estimateHashJoinCost(lhsBytes, rhsBytes, bytes, + subPlans[i].hasKeyRangeExpression(), parallelLevel)); + rhsByteSum += rhsBytes; + } + + if (rhsByteSum > serverCacheLimit) { + return Cost.UNKNOWN; + } + + // Calculate the cost of aggregation and ordering that is performed with the HashJoinPlan + if (delegate instanceof AggregatePlan) { + AggregatePlan aggPlan = (AggregatePlan) delegate; + double rowsBeforeHaving = RowCountVisitor.aggregate(rows, aggPlan.getGroupBy()); + double rowsAfterHaving = RowCountVisitor.filter(rowsBeforeHaving, aggPlan.getHaving()); + double bytesBeforeHaving = rowWidth * rowsBeforeHaving; + double bytesAfterHaving = rowWidth * rowsAfterHaving; + Cost aggCost = CostUtil.estimateAggregateCost(bytes, bytesBeforeHaving, + aggPlan.getGroupBy(), parallelLevel); + cost = cost.plus(aggCost); + rows = rowsAfterHaving; + bytes = bytesAfterHaving; + } + double outputRows = RowCountVisitor.limit(rows, delegate.getLimit()); + double outputBytes = rowWidth * outputRows; + if (!delegate.getOrderBy().getOrderByExpressions().isEmpty()) { + Cost orderByCost = CostUtil.estimateOrderByCost(bytes, outputBytes, parallelLevel); + cost = cost.plus(orderByCost); + } + + // Calculate the cost of child nodes + Cost lhsCost = new Cost(0, 0, r.doubleValue() * w); + Cost rhsCost = Cost.ZERO; + for (SubPlan subPlan : subPlans) { + rhsCost = rhsCost.plus(subPlan.getInnerPlan().getCost()); + } + return cost.plus(lhsCost).plus(rhsCost); + } catch (SQLException e) { + } + return Cost.UNKNOWN; + } - @Override - public Cost getCost() { - try { - Long r = delegate.getEstimatedRowsToScan(); - Double w = delegate.accept(new AvgRowWidthVisitor()); - if (r == null || w == null) { - return Cost.UNKNOWN; - } + public interface SubPlan { + public ServerCache execute(HashJoinPlan parent) throws SQLException; - int parallelLevel = CostUtil.estimateParallelLevel( - true, getContext().getConnection().getQueryServices()); - - double rowWidth = w; - double rows = RowCountVisitor.filter( - r.doubleValue(), - RowCountVisitor.stripSkipScanFilter( - delegate.getContext().getScan().getFilter())); - double bytes = rowWidth * rows; - Cost cost = Cost.ZERO; - double rhsByteSum = 0.0; - for (int i = 0; i < subPlans.length; i++) { - double lhsBytes = bytes; - Double rhsRows = subPlans[i].getInnerPlan().accept(new RowCountVisitor()); - Double rhsWidth = subPlans[i].getInnerPlan().accept(new AvgRowWidthVisitor()); - if (rhsRows == null || rhsWidth == null) { - return Cost.UNKNOWN; - } - double rhsBytes = rhsWidth * rhsRows; - rows = RowCountVisitor.join(rows, rhsRows, joinInfo.getJoinTypes()[i]); - rowWidth = AvgRowWidthVisitor.join(rowWidth, rhsWidth, joinInfo.getJoinTypes()[i]); - bytes = rowWidth * rows; - cost = cost.plus(CostUtil.estimateHashJoinCost( - lhsBytes, rhsBytes, bytes, subPlans[i].hasKeyRangeExpression(), parallelLevel)); - rhsByteSum += rhsBytes; - } + public void postProcess(ServerCache result, HashJoinPlan parent) throws SQLException; - if (rhsByteSum > serverCacheLimit) { - return Cost.UNKNOWN; - } + public List getPreSteps(HashJoinPlan parent) throws SQLException; - // Calculate the cost of aggregation and ordering that is performed with the HashJoinPlan - if (delegate instanceof AggregatePlan) { - AggregatePlan aggPlan = (AggregatePlan) delegate; - double rowsBeforeHaving = RowCountVisitor.aggregate(rows, aggPlan.getGroupBy()); - double rowsAfterHaving = RowCountVisitor.filter(rowsBeforeHaving, aggPlan.getHaving()); - double bytesBeforeHaving = rowWidth * rowsBeforeHaving; - double bytesAfterHaving = rowWidth * rowsAfterHaving; - Cost aggCost = CostUtil.estimateAggregateCost( - bytes, bytesBeforeHaving, aggPlan.getGroupBy(), parallelLevel); - cost = cost.plus(aggCost); - rows = rowsAfterHaving; - bytes = bytesAfterHaving; - } - double outputRows = RowCountVisitor.limit(rows, delegate.getLimit()); - double outputBytes = rowWidth * outputRows; - if (!delegate.getOrderBy().getOrderByExpressions().isEmpty()) { - Cost orderByCost = CostUtil.estimateOrderByCost( - bytes, outputBytes, parallelLevel); - cost = cost.plus(orderByCost); - } + public List getPostSteps(HashJoinPlan parent) throws SQLException; - // Calculate the cost of child nodes - Cost lhsCost = new Cost(0, 0, r.doubleValue() * w); - Cost rhsCost = Cost.ZERO; - for (SubPlan subPlan : subPlans) { - rhsCost = rhsCost.plus(subPlan.getInnerPlan().getCost()); - } - return cost.plus(lhsCost).plus(rhsCost); - } catch (SQLException e) { - } - return Cost.UNKNOWN; - } + public QueryPlan getInnerPlan(); - public interface SubPlan { - public ServerCache execute(HashJoinPlan parent) throws SQLException; - public void postProcess(ServerCache result, HashJoinPlan parent) throws SQLException; - public List getPreSteps(HashJoinPlan parent) throws SQLException; - public List getPostSteps(HashJoinPlan parent) throws SQLException; - public QueryPlan getInnerPlan(); - public boolean hasKeyRangeExpression(); - } - - public static class WhereClauseSubPlan implements SubPlan { - private final QueryPlan plan; - private final SelectStatement select; - private final boolean expectSingleRow; - - public WhereClauseSubPlan(QueryPlan plan, SelectStatement select, boolean expectSingleRow) { - this.plan = plan; - this.select = select; - this.expectSingleRow = expectSingleRow; - } + public boolean hasKeyRangeExpression(); + } - @Override - public ServerCache execute(HashJoinPlan parent) throws SQLException { - List values = Lists. newArrayList(); - ResultIterator iterator = plan.iterator(); - try { - RowProjector projector = plan.getProjector(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - int columnCount = projector.getColumnCount(); - int rowCount = 0; - PDataType baseType = PVarbinary.INSTANCE; - for (Tuple tuple = iterator.next(); tuple != null; tuple = iterator.next()) { - if (expectSingleRow && rowCount >= 1) - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build().buildException(); - - if (columnCount == 1) { - ColumnProjector columnProjector = projector.getColumnProjector(0); - baseType = columnProjector.getExpression().getDataType(); - Object value = columnProjector.getValue(tuple, baseType, ptr); - values.add(value); - } else { - List expressions = Lists.newArrayListWithExpectedSize(columnCount); - for (int i = 0; i < columnCount; i++) { - ColumnProjector columnProjector = projector.getColumnProjector(i); - PDataType type = columnProjector.getExpression().getDataType(); - Object value = columnProjector.getValue(tuple, type, ptr); - expressions.add(LiteralExpression.newConstant(value, type)); - } - Expression expression = new RowValueConstructorExpression(expressions, true); - baseType = expression.getDataType(); - expression.evaluate(null, ptr); - values.add(baseType.toObject(ptr)); - } - rowCount++; - } - - Object result = expectSingleRow ? (values.isEmpty() ? null : values.get(0)) : PArrayDataType.instantiatePhoenixArray(baseType, values.toArray()); - if (result != null) { - parent.getContext().setSubqueryResult(select, result); - } - return null; - } finally { - iterator.close(); - } - } + public static class WhereClauseSubPlan implements SubPlan { + private final QueryPlan plan; + private final SelectStatement select; + private final boolean expectSingleRow; - @Override - public void postProcess(ServerCache result, HashJoinPlan parent) throws SQLException { - } + public WhereClauseSubPlan(QueryPlan plan, SelectStatement select, boolean expectSingleRow) { + this.plan = plan; + this.select = select; + this.expectSingleRow = expectSingleRow; + } - @Override - public List getPreSteps(HashJoinPlan parent) throws SQLException { - List steps = Lists.newArrayList(); - steps.add(" EXECUTE " + (expectSingleRow ? "SINGLE" : "MULTIPLE") + "-ROW SUBQUERY"); - for (String step : plan.getExplainPlan().getPlanSteps()) { - steps.add(" " + step); + @Override + public ServerCache execute(HashJoinPlan parent) throws SQLException { + List values = Lists. newArrayList(); + ResultIterator iterator = plan.iterator(); + try { + RowProjector projector = plan.getProjector(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + int columnCount = projector.getColumnCount(); + int rowCount = 0; + PDataType baseType = PVarbinary.INSTANCE; + for (Tuple tuple = iterator.next(); tuple != null; tuple = iterator.next()) { + if (expectSingleRow && rowCount >= 1) throw new SQLExceptionInfo.Builder( + SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build().buildException(); + + if (columnCount == 1) { + ColumnProjector columnProjector = projector.getColumnProjector(0); + baseType = columnProjector.getExpression().getDataType(); + Object value = columnProjector.getValue(tuple, baseType, ptr); + values.add(value); + } else { + List expressions = + Lists. newArrayListWithExpectedSize(columnCount); + for (int i = 0; i < columnCount; i++) { + ColumnProjector columnProjector = projector.getColumnProjector(i); + PDataType type = columnProjector.getExpression().getDataType(); + Object value = columnProjector.getValue(tuple, type, ptr); + expressions.add(LiteralExpression.newConstant(value, type)); } - return steps; - } + Expression expression = new RowValueConstructorExpression(expressions, true); + baseType = expression.getDataType(); + expression.evaluate(null, ptr); + values.add(baseType.toObject(ptr)); + } + rowCount++; + } + + Object result = expectSingleRow + ? (values.isEmpty() ? null : values.get(0)) + : PArrayDataType.instantiatePhoenixArray(baseType, values.toArray()); + if (result != null) { + parent.getContext().setSubqueryResult(select, result); + } + return null; + } finally { + iterator.close(); + } + } - @Override - public List getPostSteps(HashJoinPlan parent) throws SQLException { - return Collections.emptyList(); - } + @Override + public void postProcess(ServerCache result, HashJoinPlan parent) throws SQLException { + } - @Override - public QueryPlan getInnerPlan() { - return plan; - } + @Override + public List getPreSteps(HashJoinPlan parent) throws SQLException { + List steps = Lists.newArrayList(); + steps.add(" EXECUTE " + (expectSingleRow ? "SINGLE" : "MULTIPLE") + "-ROW SUBQUERY"); + for (String step : plan.getExplainPlan().getPlanSteps()) { + steps.add(" " + step); + } + return steps; + } - @Override - public boolean hasKeyRangeExpression() { - return false; - } + @Override + public List getPostSteps(HashJoinPlan parent) throws SQLException { + return Collections. emptyList(); } - - public static class HashSubPlan implements SubPlan { - private final int index; - private final QueryPlan plan; - private final List hashExpressions; - private final boolean singleValueOnly; - private final boolean usePersistentCache; - private final Expression keyRangeLhsExpression; - private final Expression keyRangeRhsExpression; - private final MessageDigest digest; - - public HashSubPlan(int index, QueryPlan subPlan, - List hashExpressions, - boolean singleValueOnly, - boolean usePersistentCache, - Expression keyRangeLhsExpression, - Expression keyRangeRhsExpression) { - this.index = index; - this.plan = subPlan; - this.hashExpressions = hashExpressions; - this.singleValueOnly = singleValueOnly; - this.usePersistentCache = usePersistentCache; - this.keyRangeLhsExpression = keyRangeLhsExpression; - this.keyRangeRhsExpression = keyRangeRhsExpression; - try { - this.digest = MessageDigest.getInstance("SHA-256"); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } - } - @Override - public ServerCache execute(HashJoinPlan parent) throws SQLException { - ScanRanges ranges = parent.delegate.getContext().getScanRanges(); - List keyRangeRhsValues = null; - if (keyRangeRhsExpression != null) { - keyRangeRhsValues = Lists.newArrayList(); - } - ServerCache cache = null; - if (hashExpressions != null) { - ResultIterator iterator = plan.iterator(); - try { - final byte[] cacheId; - String queryString = plan.getStatement().toString().replaceAll("\\$[0-9]+", "\\$"); - if (usePersistentCache) { - cacheId = Arrays.copyOfRange(digest.digest( - queryString.getBytes(StandardCharsets.UTF_8)), 0, 8); - boolean retrying = parent.delegate.getContext().getRetryingPersistentCache(Bytes.toLong(cacheId)); - if (!retrying) { - try { - cache = parent.hashClient.createServerCache(cacheId, parent.delegate); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } else { - cacheId = Bytes.toBytes(RANDOM.nextLong()); - } - LOGGER.debug("Using cache ID " + Hex.encodeHexString(cacheId) + - " for " + queryString); - if (cache == null) { - LOGGER.debug("Making RPC to add cache " + Hex.encodeHexString(cacheId)); - cache = parent.hashClient.addHashCache(ranges, cacheId, iterator, - plan.getEstimatedSize(), hashExpressions, singleValueOnly, usePersistentCache, - parent.delegate.getTableRef().getTable(), keyRangeRhsExpression, - keyRangeRhsValues); - long endTime = EnvironmentEdgeManager.currentTimeMillis(); - boolean isSet = parent.firstJobEndTime.compareAndSet(0, endTime); - if (!isSet && (endTime - - parent.firstJobEndTime.get()) > parent.maxServerCacheTimeToLive) { - LOGGER.warn(addCustomAnnotations( - "Hash plan [" + index - + "] execution seems too slow. Earlier" + - " hash cache(s) might have expired on servers.", - parent.delegate.getContext().getConnection())); - } - } - } finally { - iterator.close(); - } - } else { - assert (keyRangeRhsExpression != null); - ResultIterator iterator = plan.iterator(); - try { - for (Tuple result = iterator.next(); result != null; result = iterator.next()) { - // Evaluate key expressions for hash join key range optimization. - keyRangeRhsValues.add(HashCacheClient.evaluateKeyExpression( - keyRangeRhsExpression, result, plan.getContext().getTempPtr())); - } - } finally { - iterator.close(); - } - } - if (keyRangeRhsValues != null) { - parent.keyRangeExpressions.add(parent.createKeyRangeExpression(keyRangeLhsExpression, keyRangeRhsExpression, keyRangeRhsValues, plan.getContext().getTempPtr(), plan.getContext().getCurrentTable().getTable().rowKeyOrderOptimizable())); - } - return cache; - } + @Override + public QueryPlan getInnerPlan() { + return plan; + } - @Override - public void postProcess(ServerCache result, HashJoinPlan parent) - throws SQLException { - ServerCache cache = result; - if (cache != null) { - parent.joinInfo.getJoinIds()[index].set(cache.getId()); - } - } + @Override + public boolean hasKeyRangeExpression() { + return false; + } + } + + public static class HashSubPlan implements SubPlan { + private final int index; + private final QueryPlan plan; + private final List hashExpressions; + private final boolean singleValueOnly; + private final boolean usePersistentCache; + private final Expression keyRangeLhsExpression; + private final Expression keyRangeRhsExpression; + private final MessageDigest digest; + + public HashSubPlan(int index, QueryPlan subPlan, List hashExpressions, + boolean singleValueOnly, boolean usePersistentCache, Expression keyRangeLhsExpression, + Expression keyRangeRhsExpression) { + this.index = index; + this.plan = subPlan; + this.hashExpressions = hashExpressions; + this.singleValueOnly = singleValueOnly; + this.usePersistentCache = usePersistentCache; + this.keyRangeLhsExpression = keyRangeLhsExpression; + this.keyRangeRhsExpression = keyRangeRhsExpression; + try { + this.digest = MessageDigest.getInstance("SHA-256"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + } - @Override - public List getPreSteps(HashJoinPlan parent) throws SQLException { - List steps = Lists.newArrayList(); - boolean earlyEvaluation = parent.joinInfo.earlyEvaluation()[index]; - boolean skipMerge = parent.joinInfo.getSchemas()[index].getFieldCount() == 0; - if (hashExpressions != null) { - steps.add(" PARALLEL " + parent.joinInfo.getJoinTypes()[index].toString().toUpperCase() - + "-JOIN TABLE " + index + (earlyEvaluation ? "" : "(DELAYED EVALUATION)") + (skipMerge ? " (SKIP MERGE)" : "")); - } - else { - steps.add(" SKIP-SCAN-JOIN TABLE " + index); + @Override + public ServerCache execute(HashJoinPlan parent) throws SQLException { + ScanRanges ranges = parent.delegate.getContext().getScanRanges(); + List keyRangeRhsValues = null; + if (keyRangeRhsExpression != null) { + keyRangeRhsValues = Lists. newArrayList(); + } + ServerCache cache = null; + if (hashExpressions != null) { + ResultIterator iterator = plan.iterator(); + try { + final byte[] cacheId; + String queryString = plan.getStatement().toString().replaceAll("\\$[0-9]+", "\\$"); + if (usePersistentCache) { + cacheId = + Arrays.copyOfRange(digest.digest(queryString.getBytes(StandardCharsets.UTF_8)), 0, 8); + boolean retrying = + parent.delegate.getContext().getRetryingPersistentCache(Bytes.toLong(cacheId)); + if (!retrying) { + try { + cache = parent.hashClient.createServerCache(cacheId, parent.delegate); + } catch (IOException e) { + throw new RuntimeException(e); + } } - for (String step : plan.getExplainPlan().getPlanSteps()) { - steps.add(" " + step); + } else { + cacheId = Bytes.toBytes(RANDOM.nextLong()); + } + LOGGER.debug("Using cache ID " + Hex.encodeHexString(cacheId) + " for " + queryString); + if (cache == null) { + LOGGER.debug("Making RPC to add cache " + Hex.encodeHexString(cacheId)); + cache = parent.hashClient.addHashCache(ranges, cacheId, iterator, + plan.getEstimatedSize(), hashExpressions, singleValueOnly, usePersistentCache, + parent.delegate.getTableRef().getTable(), keyRangeRhsExpression, keyRangeRhsValues); + long endTime = EnvironmentEdgeManager.currentTimeMillis(); + boolean isSet = parent.firstJobEndTime.compareAndSet(0, endTime); + if ( + !isSet && (endTime - parent.firstJobEndTime.get()) > parent.maxServerCacheTimeToLive + ) { + LOGGER.warn(addCustomAnnotations( + "Hash plan [" + index + "] execution seems too slow. Earlier" + + " hash cache(s) might have expired on servers.", + parent.delegate.getContext().getConnection())); } - return steps; + } + } finally { + iterator.close(); } + } else { + assert (keyRangeRhsExpression != null); + ResultIterator iterator = plan.iterator(); + try { + for (Tuple result = iterator.next(); result != null; result = iterator.next()) { + // Evaluate key expressions for hash join key range optimization. + keyRangeRhsValues.add(HashCacheClient.evaluateKeyExpression(keyRangeRhsExpression, + result, plan.getContext().getTempPtr())); + } + } finally { + iterator.close(); + } + } + if (keyRangeRhsValues != null) { + parent.keyRangeExpressions.add(parent.createKeyRangeExpression(keyRangeLhsExpression, + keyRangeRhsExpression, keyRangeRhsValues, plan.getContext().getTempPtr(), + plan.getContext().getCurrentTable().getTable().rowKeyOrderOptimizable())); + } + return cache; + } - @Override - public List getPostSteps(HashJoinPlan parent) throws SQLException { - if (keyRangeLhsExpression == null) - return Collections. emptyList(); - - String step = " DYNAMIC SERVER FILTER BY " + keyRangeLhsExpression.toString() - + " IN (" + keyRangeRhsExpression.toString() + ")"; - return Collections. singletonList(step); - } + @Override + public void postProcess(ServerCache result, HashJoinPlan parent) throws SQLException { + ServerCache cache = result; + if (cache != null) { + parent.joinInfo.getJoinIds()[index].set(cache.getId()); + } + } + @Override + public List getPreSteps(HashJoinPlan parent) throws SQLException { + List steps = Lists.newArrayList(); + boolean earlyEvaluation = parent.joinInfo.earlyEvaluation()[index]; + boolean skipMerge = parent.joinInfo.getSchemas()[index].getFieldCount() == 0; + if (hashExpressions != null) { + steps.add(" PARALLEL " + parent.joinInfo.getJoinTypes()[index].toString().toUpperCase() + + "-JOIN TABLE " + index + (earlyEvaluation ? "" : "(DELAYED EVALUATION)") + + (skipMerge ? " (SKIP MERGE)" : "")); + } else { + steps.add(" SKIP-SCAN-JOIN TABLE " + index); + } + for (String step : plan.getExplainPlan().getPlanSteps()) { + steps.add(" " + step); + } + return steps; + } - @Override - public QueryPlan getInnerPlan() { - return plan; - } + @Override + public List getPostSteps(HashJoinPlan parent) throws SQLException { + if (keyRangeLhsExpression == null) return Collections. emptyList(); - @Override - public boolean hasKeyRangeExpression() { - return keyRangeLhsExpression != null; - } + String step = " DYNAMIC SERVER FILTER BY " + keyRangeLhsExpression.toString() + " IN (" + + keyRangeRhsExpression.toString() + ")"; + return Collections. singletonList(step); } @Override - public Long getEstimatedRowsToScan() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimatedRows; + public QueryPlan getInnerPlan() { + return plan; } @Override - public Long getEstimatedBytesToScan() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimatedBytes; + public boolean hasKeyRangeExpression() { + return keyRangeLhsExpression != null; } + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimateInfoTs; - } - - private void getEstimates() throws SQLException { - getEstimatesCalled = true; - for (SubPlan subPlan : subPlans) { - if (subPlan.getInnerPlan().getEstimatedBytesToScan() == null - || subPlan.getInnerPlan().getEstimatedRowsToScan() == null - || subPlan.getInnerPlan().getEstimateInfoTimestamp() == null) { - /* - * If any of the sub plans doesn't have the estimate info available, then we don't - * provide estimate for the overall plan - */ - estimatedBytes = null; - estimatedRows = null; - estimateInfoTs = null; - break; - } else { - estimatedBytes = - add(estimatedBytes, subPlan.getInnerPlan().getEstimatedBytesToScan()); - estimatedRows = add(estimatedRows, subPlan.getInnerPlan().getEstimatedRowsToScan()); - estimateInfoTs = - getMin(estimateInfoTs, subPlan.getInnerPlan().getEstimateInfoTimestamp()); - } - } + @Override + public Long getEstimatedRowsToScan() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); } -} + return estimatedRows; + } + @Override + public Long getEstimatedBytesToScan() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimatedBytes; + } + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimateInfoTs; + } + + private void getEstimates() throws SQLException { + getEstimatesCalled = true; + for (SubPlan subPlan : subPlans) { + if ( + subPlan.getInnerPlan().getEstimatedBytesToScan() == null + || subPlan.getInnerPlan().getEstimatedRowsToScan() == null + || subPlan.getInnerPlan().getEstimateInfoTimestamp() == null + ) { + /* + * If any of the sub plans doesn't have the estimate info available, then we don't provide + * estimate for the overall plan + */ + estimatedBytes = null; + estimatedRows = null; + estimateInfoTs = null; + break; + } else { + estimatedBytes = add(estimatedBytes, subPlan.getInnerPlan().getEstimatedBytesToScan()); + estimatedRows = add(estimatedRows, subPlan.getInnerPlan().getEstimatedRowsToScan()); + estimateInfoTs = getMin(estimateInfoTs, subPlan.getInnerPlan().getEstimateInfoTimestamp()); + } + } + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/LiteralResultIterationPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/LiteralResultIterationPlan.java index df17e737528..d3f9370982d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/LiteralResultIterationPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/LiteralResultIterationPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,8 +26,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.phoenix.cache.ServerCacheClient.ServerCache; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.compile.RowProjector; @@ -47,111 +46,113 @@ import org.apache.phoenix.util.SQLCloseables; public class LiteralResultIterationPlan extends BaseQueryPlan { - protected final Iterable tuples; + protected final Iterable tuples; + + public LiteralResultIterationPlan(StatementContext context, FilterableStatement statement, + TableRef tableRef, RowProjector projection, Integer limit, Integer offset, OrderBy orderBy, + ParallelIteratorFactory parallelIteratorFactory) throws SQLException { + this(Collections. singletonList(new SingleKeyValueTuple(KeyValue.LOWESTKEY)), context, + statement, tableRef, projection, limit, offset, orderBy, parallelIteratorFactory); + } + + public LiteralResultIterationPlan(Iterable tuples, StatementContext context, + FilterableStatement statement, TableRef tableRef, RowProjector projection, Integer limit, + Integer offset, OrderBy orderBy, ParallelIteratorFactory parallelIteratorFactory) + throws SQLException { + super(context, statement, tableRef, projection, context.getBindManager().getParameterMetaData(), + limit, offset, orderBy, GroupBy.EMPTY_GROUP_BY, parallelIteratorFactory, null); + this.tuples = tuples; + } + + @Override + public Cost getCost() { + return Cost.ZERO; + } + + @Override + public List getSplits() { + return Collections.emptyList(); + } + + @Override + public List> getScans() { + return Collections.emptyList(); + } + + @Override + public boolean useRoundRobinIterator() throws SQLException { + return false; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + @Override + protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, + final Map caches) throws SQLException { + ResultIterator scanner = new ResultIterator() { + private final Iterator tupleIterator = tuples.iterator(); + private boolean closed = false; + private int count = 0; + private int offsetCount = 0; + + @Override + public void close() throws SQLException { + SQLCloseables.closeAll(caches.values()); + this.closed = true; + } + + @Override + public Tuple next() throws SQLException { + while ( + !this.closed && (offset != null && offsetCount < offset) && tupleIterator.hasNext() + ) { + offsetCount++; + tupleIterator.next(); + } + if (!this.closed && (limit == null || count++ < limit) && tupleIterator.hasNext()) { + return tupleIterator.next(); + } + return null; + } - public LiteralResultIterationPlan(StatementContext context, - FilterableStatement statement, TableRef tableRef, RowProjector projection, - Integer limit, Integer offset, OrderBy orderBy, ParallelIteratorFactory parallelIteratorFactory) throws SQLException { - this(Collections. singletonList(new SingleKeyValueTuple(KeyValue.LOWESTKEY)), - context, statement, tableRef, projection, limit, offset, orderBy, parallelIteratorFactory); - } + @Override + public void explain(List planSteps) { + } - public LiteralResultIterationPlan(Iterable tuples, StatementContext context, - FilterableStatement statement, TableRef tableRef, RowProjector projection, - Integer limit, Integer offset, OrderBy orderBy, ParallelIteratorFactory parallelIteratorFactory) throws SQLException { - super(context, statement, tableRef, projection, context.getBindManager().getParameterMetaData(), limit, offset, orderBy, GroupBy.EMPTY_GROUP_BY, parallelIteratorFactory, null); - this.tuples = tuples; - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } - @Override - public Cost getCost() { - return Cost.ZERO; - } + }; - @Override - public List getSplits() { - return Collections.emptyList(); + if (context.getSequenceManager().getSequenceCount() > 0) { + scanner = new SequenceResultIterator(scanner, context.getSequenceManager()); } - @Override - public List> getScans() { - return Collections.emptyList(); - } + return scanner; + } - @Override - public boolean useRoundRobinIterator() throws SQLException { - return false; - } + @Override + public Long getEstimatedRowsToScan() { + return 0l; + } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); - } + @Override + public Long getEstimatedBytesToScan() { + return 0l; + } - @Override - protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, final Map caches) - throws SQLException { - ResultIterator scanner = new ResultIterator() { - private final Iterator tupleIterator = tuples.iterator(); - private boolean closed = false; - private int count = 0; - private int offsetCount = 0; - - @Override - public void close() throws SQLException { - SQLCloseables.closeAll(caches.values()); - this.closed = true; - } - - @Override - public Tuple next() throws SQLException { - while (!this.closed && (offset != null && offsetCount < offset) && tupleIterator.hasNext()) { - offsetCount++; - tupleIterator.next(); - } - if (!this.closed - && (limit == null || count++ < limit) - && tupleIterator.hasNext()) { - return tupleIterator.next(); - } - return null; - } - - @Override - public void explain(List planSteps) { - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - - }; - - if (context.getSequenceManager().getSequenceCount() > 0) { - scanner = new SequenceResultIterator(scanner, context.getSequenceManager()); - } - - return scanner; - } - - @Override - public Long getEstimatedRowsToScan() { - return 0l; - } - - @Override - public Long getEstimatedBytesToScan() { - return 0l; - } + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return 0l; + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return 0l; - } - - @Override - public List getOutputOrderBys() { - return Collections. emptyList(); - } + @Override + public List getOutputOrderBys() { + return Collections. emptyList(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/MutationState.java index 6d3c9df480a..962bc02ce86 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/MutationState.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/MutationState.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,23 +19,23 @@ import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.UPSERT_CF; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.UPSERT_STATUS_CQ; -import static org.apache.phoenix.monitoring.MetricType.DELETE_AGGREGATE_FAILURE_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.DELETE_AGGREGATE_SUCCESS_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.UPSERT_AGGREGATE_FAILURE_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.NUM_METADATA_LOOKUP_FAILURES; -import static org.apache.phoenix.query.QueryServices.INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES_ATTRIB; -import static org.apache.phoenix.query.QueryServices.SOURCE_OPERATION_ATTRIB; -import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_BATCH_FAILED_COUNT; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_BATCH_SIZE; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_BYTES; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_COMMIT_TIME; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_INDEX_COMMIT_FAILURE_COUNT; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_SYSCAT_TIME; +import static org.apache.phoenix.monitoring.MetricType.DELETE_AGGREGATE_FAILURE_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.DELETE_AGGREGATE_SUCCESS_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.NUM_METADATA_LOOKUP_FAILURES; +import static org.apache.phoenix.monitoring.MetricType.UPSERT_AGGREGATE_FAILURE_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER; +import static org.apache.phoenix.query.QueryServices.INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES_ATTRIB; +import static org.apache.phoenix.query.QueryServices.SOURCE_OPERATION_ATTRIB; import static org.apache.phoenix.query.QueryServices.WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB; +import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull; import java.io.IOException; import java.sql.SQLException; @@ -117,14 +117,20 @@ import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PTimestamp; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.collect.Iterators; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.transaction.PhoenixTransactionContext; import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel; import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.transaction.TransactionFactory.Provider; -import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.CDCUtil; +import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.IndexUtil; @@ -139,2329 +145,2366 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; -import org.apache.phoenix.thirdparty.com.google.common.collect.Iterators; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; - /** * Tracks the uncommitted state */ public class MutationState implements SQLCloseable { - private static final Logger LOGGER = LoggerFactory.getLogger(MutationState.class); - private static final int[] EMPTY_STATEMENT_INDEX_ARRAY = new int[0]; - private static final int MAX_COMMIT_RETRIES = 3; - - private final PhoenixConnection connection; - private final int maxSize; - private final long maxSizeBytes; - private final long batchSize; - private final long batchSizeBytes; - private long batchCount = 0L; - // For each table, maintain a list of mutation batches. Each element in the - // list is a set of row mutations which can be sent in a single commit batch. - // A regular upsert and a conditional upsert on the same row conflict with - // each other so they are split and send separately in different commit batches. - private final Map> mutationsMap; - private final Set uncommittedPhysicalNames = Sets.newHashSetWithExpectedSize(10); - - private long sizeOffset; - private int numRows = 0; - private int numUpdatedRowsForAutoCommit = 0; - private long estimatedSize = 0; - private int[] uncommittedStatementIndexes = EMPTY_STATEMENT_INDEX_ARRAY; - private boolean isExternalTxContext = false; - private boolean validateLastDdlTimestamp; - private Map> txMutations = Collections.emptyMap(); - - private PhoenixTransactionContext phoenixTransactionContext = PhoenixTransactionContext.NULL_CONTEXT; - - private final MutationMetricQueue mutationMetricQueue; - private ReadMetricQueue readMetricQueue; - - private Map timeInExecuteMutationMap = new HashMap<>(); - private static boolean allUpsertsMutations = true; - private static boolean allDeletesMutations = true; - - private final boolean indexRegionObserverEnabledAllTables; - - /** - * Return result back to client. To be used when client needs to read the whole row - * or some specific attributes of the row back. As of PHOENIX-7398, returning whole - * row is supported. This is used to allow client to set Mutation attribute that is read - * by server for it to atomically read the row and return it back. - */ - private ReturnResult returnResult; - private Result result; - - public static void resetAllMutationState(){ - allDeletesMutations = true; - allUpsertsMutations = true; - } - - public MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection) { - this(maxSize, maxSizeBytes, connection, false, null); - } - - public MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, - PhoenixTransactionContext txContext) { - this(maxSize, maxSizeBytes, connection, false, txContext); - } - - public MutationState(MutationState mutationState) { - this(mutationState, mutationState.connection); - } - - public MutationState(MutationState mutationState, PhoenixConnection connection) { - this(mutationState.maxSize, mutationState.maxSizeBytes, connection, true, mutationState - .getPhoenixTransactionContext()); - } - - public MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, - long sizeOffset) { - this(maxSize, maxSizeBytes, connection, false, null, sizeOffset); - } - - private MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, - boolean subTask, PhoenixTransactionContext txContext) { - this(maxSize, maxSizeBytes, connection, subTask, txContext, 0); - } - - private MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, - boolean subTask, PhoenixTransactionContext txContext, long sizeOffset) { - this(maxSize, maxSizeBytes, connection, Maps.> newHashMapWithExpectedSize(5), - subTask, txContext); - this.sizeOffset = sizeOffset; - } - - MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, - Map> mutationsMap, boolean subTask, PhoenixTransactionContext txContext) { - this.maxSize = maxSize; - this.maxSizeBytes = maxSizeBytes; - this.connection = connection; - this.batchSize = connection.getMutateBatchSize(); - this.batchSizeBytes = connection.getMutateBatchSizeBytes(); - this.mutationsMap = mutationsMap; - boolean isMetricsEnabled = connection.isRequestLevelMetricsEnabled(); - this.mutationMetricQueue = isMetricsEnabled ? new MutationMetricQueue() - : NoOpMutationMetricsQueue.NO_OP_MUTATION_METRICS_QUEUE; - this.validateLastDdlTimestamp = ValidateLastDDLTimestampUtil - .getValidateLastDdlTimestampEnabled(this.connection); - if (subTask) { - // this code path is only used while running child scans, we can't pass the txContext to child scans - // as it is not thread safe, so we use the tx member variable - phoenixTransactionContext = txContext.newTransactionContext(txContext, subTask); - } else if (txContext != null) { - isExternalTxContext = true; - phoenixTransactionContext = txContext.newTransactionContext(txContext, subTask); - } - this.indexRegionObserverEnabledAllTables = Boolean.parseBoolean( - this.connection.getQueryServices().getConfiguration().get( - INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES_ATTRIB, - DEFAULT_INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES)); - } - - public MutationState(TableRef table, MultiRowMutationState mutations, long sizeOffset, - int maxSize, long maxSizeBytes, PhoenixConnection connection) throws SQLException { - this(maxSize, maxSizeBytes, connection, false, null, sizeOffset); - if (!mutations.isEmpty()) { - addMutations(this.mutationsMap, table, mutations); - } - this.numRows = mutations.size(); - this.estimatedSize = PhoenixKeyValueUtil.getEstimatedRowMutationSizeWithBatch(this.mutationsMap); - - throwIfTooBig(); - } - - // add a new batch of row mutations - private void addMutations(Map> mutationMap, TableRef table, - MultiRowMutationState mutations) { - List batches = mutationMap.get(table); - if (batches == null) { - batches = Lists.newArrayListWithExpectedSize(1); - } - batches.add(mutations); - mutationMap.put(table, batches); - } - - // remove a batch of mutations which have been committed - private void removeMutations(Map> mutationMap, TableRef table){ - List batches = mutationMap.get(table); - if (batches == null || batches.isEmpty()) { - mutationMap.remove(table); - return; - } - - // mutation batches are committed in FIFO order so always remove from the head - batches.remove(0); - if (batches.isEmpty()) { - mutationMap.remove(table); - } - } - - public long getEstimatedSize() { - return estimatedSize; - } - - public int getMaxSize() { - return maxSize; - } - - public long getMaxSizeBytes() { - return maxSizeBytes; - } - - public PhoenixTransactionContext getPhoenixTransactionContext() { - return phoenixTransactionContext; - } - - /** - * Commit a write fence when creating an index so that we can detect when a data table transaction is started before - * the create index but completes after it. In this case, we need to rerun the data table transaction after the - * index creation so that the index rows are generated. - * - * @param dataTable - * the data table upon which an index is being added - * @throws SQLException - */ - public void commitDDLFence(PTable dataTable) throws SQLException { - // Is this still useful after PHOENIX-6627? - if (dataTable.isTransactional()) { - try { - phoenixTransactionContext.commitDDLFence(dataTable); - } finally { - // The client expects a transaction to be in progress on the txContext while the - // VisibilityFence.prepareWait() starts a new tx and finishes/aborts it. After it's - // finished, we start a new one here. - // TODO: seems like an autonomous tx capability would be useful here. - phoenixTransactionContext.begin(); + private static final Logger LOGGER = LoggerFactory.getLogger(MutationState.class); + private static final int[] EMPTY_STATEMENT_INDEX_ARRAY = new int[0]; + private static final int MAX_COMMIT_RETRIES = 3; + + private final PhoenixConnection connection; + private final int maxSize; + private final long maxSizeBytes; + private final long batchSize; + private final long batchSizeBytes; + private long batchCount = 0L; + // For each table, maintain a list of mutation batches. Each element in the + // list is a set of row mutations which can be sent in a single commit batch. + // A regular upsert and a conditional upsert on the same row conflict with + // each other so they are split and send separately in different commit batches. + private final Map> mutationsMap; + private final Set uncommittedPhysicalNames = Sets.newHashSetWithExpectedSize(10); + + private long sizeOffset; + private int numRows = 0; + private int numUpdatedRowsForAutoCommit = 0; + private long estimatedSize = 0; + private int[] uncommittedStatementIndexes = EMPTY_STATEMENT_INDEX_ARRAY; + private boolean isExternalTxContext = false; + private boolean validateLastDdlTimestamp; + private Map> txMutations = Collections.emptyMap(); + + private PhoenixTransactionContext phoenixTransactionContext = + PhoenixTransactionContext.NULL_CONTEXT; + + private final MutationMetricQueue mutationMetricQueue; + private ReadMetricQueue readMetricQueue; + + private Map timeInExecuteMutationMap = new HashMap<>(); + private static boolean allUpsertsMutations = true; + private static boolean allDeletesMutations = true; + + private final boolean indexRegionObserverEnabledAllTables; + + /** + * Return result back to client. To be used when client needs to read the whole row or some + * specific attributes of the row back. As of PHOENIX-7398, returning whole row is supported. This + * is used to allow client to set Mutation attribute that is read by server for it to atomically + * read the row and return it back. + */ + private ReturnResult returnResult; + private Result result; + + public static void resetAllMutationState() { + allDeletesMutations = true; + allUpsertsMutations = true; + } + + public MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection) { + this(maxSize, maxSizeBytes, connection, false, null); + } + + public MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, + PhoenixTransactionContext txContext) { + this(maxSize, maxSizeBytes, connection, false, txContext); + } + + public MutationState(MutationState mutationState) { + this(mutationState, mutationState.connection); + } + + public MutationState(MutationState mutationState, PhoenixConnection connection) { + this(mutationState.maxSize, mutationState.maxSizeBytes, connection, true, + mutationState.getPhoenixTransactionContext()); + } + + public MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, + long sizeOffset) { + this(maxSize, maxSizeBytes, connection, false, null, sizeOffset); + } + + private MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, + boolean subTask, PhoenixTransactionContext txContext) { + this(maxSize, maxSizeBytes, connection, subTask, txContext, 0); + } + + private MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, + boolean subTask, PhoenixTransactionContext txContext, long sizeOffset) { + this(maxSize, maxSizeBytes, connection, + Maps.> newHashMapWithExpectedSize(5), subTask, + txContext); + this.sizeOffset = sizeOffset; + } + + MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection, + Map> mutationsMap, boolean subTask, + PhoenixTransactionContext txContext) { + this.maxSize = maxSize; + this.maxSizeBytes = maxSizeBytes; + this.connection = connection; + this.batchSize = connection.getMutateBatchSize(); + this.batchSizeBytes = connection.getMutateBatchSizeBytes(); + this.mutationsMap = mutationsMap; + boolean isMetricsEnabled = connection.isRequestLevelMetricsEnabled(); + this.mutationMetricQueue = isMetricsEnabled + ? new MutationMetricQueue() + : NoOpMutationMetricsQueue.NO_OP_MUTATION_METRICS_QUEUE; + this.validateLastDdlTimestamp = + ValidateLastDDLTimestampUtil.getValidateLastDdlTimestampEnabled(this.connection); + if (subTask) { + // this code path is only used while running child scans, we can't pass the txContext to child + // scans + // as it is not thread safe, so we use the tx member variable + phoenixTransactionContext = txContext.newTransactionContext(txContext, subTask); + } else if (txContext != null) { + isExternalTxContext = true; + phoenixTransactionContext = txContext.newTransactionContext(txContext, subTask); + } + this.indexRegionObserverEnabledAllTables = Boolean.parseBoolean(this.connection + .getQueryServices().getConfiguration().get(INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES_ATTRIB, + DEFAULT_INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES)); + } + + public MutationState(TableRef table, MultiRowMutationState mutations, long sizeOffset, + int maxSize, long maxSizeBytes, PhoenixConnection connection) throws SQLException { + this(maxSize, maxSizeBytes, connection, false, null, sizeOffset); + if (!mutations.isEmpty()) { + addMutations(this.mutationsMap, table, mutations); + } + this.numRows = mutations.size(); + this.estimatedSize = + PhoenixKeyValueUtil.getEstimatedRowMutationSizeWithBatch(this.mutationsMap); + + throwIfTooBig(); + } + + // add a new batch of row mutations + private void addMutations(Map> mutationMap, TableRef table, + MultiRowMutationState mutations) { + List batches = mutationMap.get(table); + if (batches == null) { + batches = Lists.newArrayListWithExpectedSize(1); + } + batches.add(mutations); + mutationMap.put(table, batches); + } + + // remove a batch of mutations which have been committed + private void removeMutations(Map> mutationMap, + TableRef table) { + List batches = mutationMap.get(table); + if (batches == null || batches.isEmpty()) { + mutationMap.remove(table); + return; + } + + // mutation batches are committed in FIFO order so always remove from the head + batches.remove(0); + if (batches.isEmpty()) { + mutationMap.remove(table); + } + } + + public long getEstimatedSize() { + return estimatedSize; + } + + public int getMaxSize() { + return maxSize; + } + + public long getMaxSizeBytes() { + return maxSizeBytes; + } + + public PhoenixTransactionContext getPhoenixTransactionContext() { + return phoenixTransactionContext; + } + + /** + * Commit a write fence when creating an index so that we can detect when a data table transaction + * is started before the create index but completes after it. In this case, we need to rerun the + * data table transaction after the index creation so that the index rows are generated. the data + * table upon which an index is being added + */ + public void commitDDLFence(PTable dataTable) throws SQLException { + // Is this still useful after PHOENIX-6627? + if (dataTable.isTransactional()) { + try { + phoenixTransactionContext.commitDDLFence(dataTable); + } finally { + // The client expects a transaction to be in progress on the txContext while the + // VisibilityFence.prepareWait() starts a new tx and finishes/aborts it. After it's + // finished, we start a new one here. + // TODO: seems like an autonomous tx capability would be useful here. + phoenixTransactionContext.begin(); + } + } + } + + public boolean checkpointIfNeccessary(MutationPlan plan) throws SQLException { + if ( + !phoenixTransactionContext.isTransactionRunning() || plan.getTargetRef() == null + || plan.getTargetRef().getTable() == null + || !plan.getTargetRef().getTable().isTransactional() + ) { + return false; + } + Set sources = plan.getSourceRefs(); + if (sources.isEmpty()) { + return false; + } + // For a DELETE statement, we're always querying the table being deleted from. This isn't + // a problem, but it potentially could be if there are other references to the same table + // nested in the DELETE statement (as a sub query or join, for example). + TableRef ignoreForExcludeCurrent = + plan.getOperation() == Operation.DELETE && sources.size() == 1 ? plan.getTargetRef() : null; + boolean excludeCurrent = false; + String targetPhysicalName = plan.getTargetRef().getTable().getPhysicalName().getString(); + for (TableRef source : sources) { + if (source.getTable().isTransactional() && !source.equals(ignoreForExcludeCurrent)) { + String sourcePhysicalName = source.getTable().getPhysicalName().getString(); + if (targetPhysicalName.equals(sourcePhysicalName)) { + excludeCurrent = true; + break; + } + } + } + // If we're querying the same table we're updating, we must exclude our writes to + // it from being visible. + if (excludeCurrent) { + // If any source tables have uncommitted data prior to last checkpoint, + // then we must create a new checkpoint. + boolean hasUncommittedData = false; + for (TableRef source : sources) { + String sourcePhysicalName = source.getTable().getPhysicalName().getString(); + // Tracking uncommitted physical table names is an optimization that prevents us from + // having to do a checkpoint if no data has yet been written. If we're using an + // external transaction context, it's possible that data was already written at the + // current transaction timestamp, so we always checkpoint in that case is we're + // reading and writing to the same table. + if ( + source.getTable().isTransactional() + && (isExternalTxContext || uncommittedPhysicalNames.contains(sourcePhysicalName)) + ) { + hasUncommittedData = true; + break; + } + } + + phoenixTransactionContext.checkpoint(hasUncommittedData); + + if (hasUncommittedData) { + uncommittedPhysicalNames.clear(); + } + return true; + } + return false; + } + + // Though MutationState is not thread safe in general, this method should be because it may + // be called by TableResultIterator in a multi-threaded manner. Since we do not want to expose + // the Transaction outside of MutationState, this seems reasonable, as the member variables + // would not change as these threads are running. We also clone mutationState to ensure that + // the transaction context won't change due to a commit when auto commit is true. + public Table getHTable(PTable table) throws SQLException { + Table htable = + this.getConnection().getQueryServices().getTable(table.getPhysicalName().getBytes()); + if (table.isTransactional() && phoenixTransactionContext.isTransactionRunning()) { + // We're only using this table for reading, so we want it wrapped even if it's an index + htable = phoenixTransactionContext.getTransactionalTable(htable, + table.isImmutableRows() || table.getType() == PTableType.INDEX); + } + return htable; + } + + public PhoenixConnection getConnection() { + return connection; + } + + public boolean isTransactionStarted() { + return phoenixTransactionContext.isTransactionRunning(); + } + + public long getInitialWritePointer() { + return phoenixTransactionContext.getTransactionId(); // First write pointer - won't change with + // checkpointing + } + + // For testing + public long getWritePointer() { + return phoenixTransactionContext.getWritePointer(); + } + + // For testing + public PhoenixVisibilityLevel getVisibilityLevel() { + return phoenixTransactionContext.getVisibilityLevel(); + } + + public boolean startTransaction(Provider provider) throws SQLException { + if (provider == null) { + return false; + } + if ( + !connection.getQueryServices().getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, + QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_START_TXN_IF_TXN_DISABLED).build() + .buildException(); + } + if (connection.getSCN() != null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_START_TRANSACTION_WITH_SCN_SET) + .build().buildException(); + } + + if (phoenixTransactionContext == PhoenixTransactionContext.NULL_CONTEXT) { + phoenixTransactionContext = + provider.getTransactionProvider().getTransactionContext(connection); + } else { + if (provider != phoenixTransactionContext.getProvider()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MIX_TXN_PROVIDERS) + .setMessage(phoenixTransactionContext.getProvider().name() + " and " + provider.name()) + .build().buildException(); + } + } + if (!isTransactionStarted()) { + // Clear any transactional state in case transaction was ended outside + // of Phoenix so we don't carry the old transaction state forward. We + // cannot call reset() here due to the case of having mutations and + // then transitioning from non transactional to transactional (which + // would end up clearing our uncommitted state). + resetTransactionalState(); + phoenixTransactionContext.begin(); + return true; + } + + return false; + } + + public static MutationState emptyMutationState(int maxSize, long maxSizeBytes, + PhoenixConnection connection) { + MutationState state = new MutationState(maxSize, maxSizeBytes, connection, + Collections.> emptyMap(), false, null); + state.sizeOffset = 0; + return state; + } + + private void throwIfTooBig() throws SQLException { + if (numRows > maxSize) { + int mutationSize = numRows; + resetState(); + throw new MaxMutationSizeExceededException(maxSize, mutationSize); + } + if (estimatedSize > maxSizeBytes) { + long mutationSizeByte = estimatedSize; + resetState(); + throw new MaxMutationSizeBytesExceededException(maxSizeBytes, mutationSizeByte); + } + } + + public long getUpdateCount() { + return sizeOffset + numRows; + } + + public int getNumUpdatedRowsForAutoCommit() { + return numUpdatedRowsForAutoCommit; + } + + public int getNumRows() { + return numRows; + } + + public Result getResult() { + return this.result; + } + + public void clearResult() { + this.result = null; + } + + private MultiRowMutationState + getLastMutationBatch(Map> mutations, TableRef tableRef) { + List mutationBatches = mutations.get(tableRef); + if (mutationBatches == null || mutationBatches.isEmpty()) { + return null; + } + return mutationBatches.get(mutationBatches.size() - 1); + } + + private void joinMutationState(TableRef tableRef, MultiRowMutationState srcRows, + Map> dstMutations) { + PTable table = tableRef.getTable(); + boolean isIndex = table.getType() == PTableType.INDEX; + boolean incrementRowCount = dstMutations == this.mutationsMap; + // we only need to check if the new mutation batch (srcRows) conflicts with the + // last mutation batch since we try to merge it with that only + MultiRowMutationState existingRows = getLastMutationBatch(dstMutations, tableRef); + + if (existingRows == null) { // no rows found for this table + // Size new map at batch size as that's what it'll likely grow to. + MultiRowMutationState newRows = new MultiRowMutationState(connection.getMutateBatchSize()); + newRows.putAll(srcRows); + addMutations(dstMutations, tableRef, newRows); + if (incrementRowCount && !isIndex) { + numRows += srcRows.size(); + // if we added all the rows from newMutationState we can just increment the + // estimatedSize by newMutationState.estimatedSize + estimatedSize += srcRows.estimatedSize; + } + return; + } + + // for conflicting rows + MultiRowMutationState conflictingRows = + new MultiRowMutationState(connection.getMutateBatchSize()); + + // Rows for this table already exist, check for conflicts + for (Map.Entry rowEntry : srcRows.entrySet()) { + ImmutableBytesPtr key = rowEntry.getKey(); + RowMutationState newRowMutationState = rowEntry.getValue(); + RowMutationState existingRowMutationState = existingRows.get(key); + if (existingRowMutationState == null) { + existingRows.put(key, newRowMutationState); + if (incrementRowCount && !isIndex) { // Don't count index rows in row count + numRows++; + // increment estimated size by the size of the new row + estimatedSize += newRowMutationState.calculateEstimatedSize(); + } + continue; + } + Map existingValues = existingRowMutationState.getColumnValues(); + Map newValues = newRowMutationState.getColumnValues(); + if (existingValues != PRow.DELETE_MARKER && newValues != PRow.DELETE_MARKER) { + // Check if we can merge existing column values with new column values + long beforeMergeSize = existingRowMutationState.calculateEstimatedSize(); + boolean isMerged = existingRowMutationState.join(rowEntry.getValue()); + if (isMerged) { + // decrement estimated size by the size of the old row + estimatedSize -= beforeMergeSize; + // increment estimated size by the size of the new row + estimatedSize += existingRowMutationState.calculateEstimatedSize(); + } else { + // cannot merge regular upsert and conditional upsert + // conflicting row is not a new row so no need to increment numRows + conflictingRows.put(key, newRowMutationState); + } + } else { + existingRows.put(key, newRowMutationState); + } + } + + if (!conflictingRows.isEmpty()) { + addMutations(dstMutations, tableRef, conflictingRows); + } + } + + private void joinMutationState(Map> srcMutations, + Map> dstMutations) { + // Merge newMutation with this one, keeping state from newMutation for any overlaps + for (Map.Entry> entry : srcMutations.entrySet()) { + TableRef tableRef = entry.getKey(); + for (MultiRowMutationState srcRows : entry.getValue()) { + // Replace existing entries for the table with new entries + joinMutationState(tableRef, srcRows, dstMutations); + } + } + } + + /** + * Combine a newer mutation with this one, where in the event of overlaps, the newer one will take + * precedence. Combine any metrics collected for the newer mutation. the newer mutation state + */ + public void join(MutationState newMutationState) throws SQLException { + if (this == newMutationState) { // Doesn't make sense + return; + } + + phoenixTransactionContext.join(newMutationState.getPhoenixTransactionContext()); + + this.sizeOffset += newMutationState.sizeOffset; + joinMutationState(newMutationState.mutationsMap, this.mutationsMap); + if (!newMutationState.txMutations.isEmpty()) { + if (txMutations.isEmpty()) { + txMutations = Maps.newHashMapWithExpectedSize(this.mutationsMap.size()); + } + joinMutationState(newMutationState.txMutations, this.txMutations); + } + mutationMetricQueue.combineMetricQueues(newMutationState.mutationMetricQueue); + if (readMetricQueue == null) { + readMetricQueue = newMutationState.readMetricQueue; + } else if (readMetricQueue != null && newMutationState.readMetricQueue != null) { + readMetricQueue.combineReadMetrics(newMutationState.readMetricQueue); + } + throwIfTooBig(); + } + + private static ImmutableBytesPtr getNewRowKeyWithRowTimestamp(ImmutableBytesPtr ptr, + long rowTimestamp, PTable table) { + RowKeySchema schema = table.getRowKeySchema(); + int rowTimestampColPos = table.getRowTimestampColPos(); + Field rowTimestampField = schema.getField(rowTimestampColPos); + byte[] rowTimestampBytes = rowTimestampField.getDataType() == PTimestamp.INSTANCE + ? PTimestamp.INSTANCE.toBytes(new Timestamp(rowTimestamp), rowTimestampField.getSortOrder()) + : PLong.INSTANCE.toBytes(rowTimestamp, rowTimestampField.getSortOrder()); + int oldOffset = ptr.getOffset(); + int oldLength = ptr.getLength(); + // Move the pointer to the start byte of the row timestamp pk + schema.position(ptr, 0, rowTimestampColPos); + byte[] b = ptr.get(); + int newOffset = ptr.getOffset(); + int length = ptr.getLength(); + for (int i = newOffset; i < newOffset + length; i++) { + // modify the underlying bytes array with the bytes of the row timestamp + b[i] = rowTimestampBytes[i - newOffset]; + } + // move the pointer back to where it was before. + ptr.set(ptr.get(), oldOffset, oldLength); + return ptr; + } + + private List getCDCDeleteMutations(PTable table, PTable index, Long mutationTimestamp, + List mutationList) throws SQLException { + final ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); + List indexMutations = Lists.newArrayListWithExpectedSize(mutationList.size()); + for (final Mutation mutation : mutationList) { + // Only generate extra row mutations for DELETE + if (mutation instanceof Delete) { + ptr.set(mutation.getRow()); + ValueGetter getter = new AbstractValueGetter() { + @Override + public byte[] getRowKey() { + return mutation.getRow(); + } + + @Override + public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { + // Always return null for our empty key value, as this will cause the index + // maintainer to always treat this Put as a new row. + if (IndexUtil.isEmptyKeyValue(table, ref)) { + return null; } - } - } - - public boolean checkpointIfNeccessary(MutationPlan plan) throws SQLException { - if (!phoenixTransactionContext.isTransactionRunning() || plan.getTargetRef() == null - || plan.getTargetRef().getTable() == null || !plan.getTargetRef().getTable().isTransactional()) { return false; } - Set sources = plan.getSourceRefs(); - if (sources.isEmpty()) { return false; } - // For a DELETE statement, we're always querying the table being deleted from. This isn't - // a problem, but it potentially could be if there are other references to the same table - // nested in the DELETE statement (as a sub query or join, for example). - TableRef ignoreForExcludeCurrent = plan.getOperation() == Operation.DELETE && sources.size() == 1 ? plan - .getTargetRef() : null; - boolean excludeCurrent = false; - String targetPhysicalName = plan.getTargetRef().getTable().getPhysicalName().getString(); - for (TableRef source : sources) { - if (source.getTable().isTransactional() && !source.equals(ignoreForExcludeCurrent)) { - String sourcePhysicalName = source.getTable().getPhysicalName().getString(); - if (targetPhysicalName.equals(sourcePhysicalName)) { - excludeCurrent = true; - break; - } + byte[] family = ref.getFamily(); + byte[] qualifier = ref.getQualifier(); + Map> familyMap = mutation.getFamilyCellMap(); + List kvs = familyMap.get(family); + if (kvs == null) { + return null; } - } - // If we're querying the same table we're updating, we must exclude our writes to - // it from being visible. - if (excludeCurrent) { - // If any source tables have uncommitted data prior to last checkpoint, - // then we must create a new checkpoint. - boolean hasUncommittedData = false; - for (TableRef source : sources) { - String sourcePhysicalName = source.getTable().getPhysicalName().getString(); - // Tracking uncommitted physical table names is an optimization that prevents us from - // having to do a checkpoint if no data has yet been written. If we're using an - // external transaction context, it's possible that data was already written at the - // current transaction timestamp, so we always checkpoint in that case is we're - // reading and writing to the same table. - if (source.getTable().isTransactional() - && (isExternalTxContext || uncommittedPhysicalNames.contains(sourcePhysicalName))) { - hasUncommittedData = true; - break; - } - } - - phoenixTransactionContext.checkpoint(hasUncommittedData); - - if (hasUncommittedData) { - uncommittedPhysicalNames.clear(); + for (Cell kv : kvs) { + if ( + Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), + family, 0, family.length) == 0 + && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0 + ) { + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + connection.getKeyValueBuilder().getValueAsPtr(kv, ptr); + return ptr; + } } - return true; - } - return false; - } - - // Though MutationState is not thread safe in general, this method should be because it may - // be called by TableResultIterator in a multi-threaded manner. Since we do not want to expose - // the Transaction outside of MutationState, this seems reasonable, as the member variables - // would not change as these threads are running. We also clone mutationState to ensure that - // the transaction context won't change due to a commit when auto commit is true. - public Table getHTable(PTable table) throws SQLException { - Table htable = this.getConnection().getQueryServices().getTable(table.getPhysicalName().getBytes()); - if (table.isTransactional() && phoenixTransactionContext.isTransactionRunning()) { - // We're only using this table for reading, so we want it wrapped even if it's an index - htable = phoenixTransactionContext.getTransactionalTable(htable, table.isImmutableRows() || table.getType() == PTableType.INDEX); - } - return htable; - } - - public PhoenixConnection getConnection() { - return connection; - } - - public boolean isTransactionStarted() { - return phoenixTransactionContext.isTransactionRunning(); - } - - public long getInitialWritePointer() { - return phoenixTransactionContext.getTransactionId(); // First write pointer - won't change with checkpointing - } - - // For testing - public long getWritePointer() { - return phoenixTransactionContext.getWritePointer(); - } - - // For testing - public PhoenixVisibilityLevel getVisibilityLevel() { - return phoenixTransactionContext.getVisibilityLevel(); - } - - public boolean startTransaction(Provider provider) throws SQLException { - if (provider == null) { return false; } - if (!connection.getQueryServices().getProps() - .getBoolean(QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED)) { throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_START_TXN_IF_TXN_DISABLED).build().buildException(); } - if (connection.getSCN() != null) { throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_START_TRANSACTION_WITH_SCN_SET).build().buildException(); } - - if (phoenixTransactionContext == PhoenixTransactionContext.NULL_CONTEXT) { - phoenixTransactionContext = provider.getTransactionProvider().getTransactionContext(connection); - } else { - if (provider != phoenixTransactionContext.getProvider()) { throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_MIX_TXN_PROVIDERS) - .setMessage(phoenixTransactionContext.getProvider().name() + " and " + provider.name()).build() - .buildException(); } - } - if (!isTransactionStarted()) { - // Clear any transactional state in case transaction was ended outside - // of Phoenix so we don't carry the old transaction state forward. We - // cannot call reset() here due to the case of having mutations and - // then transitioning from non transactional to transactional (which - // would end up clearing our uncommitted state). - resetTransactionalState(); - phoenixTransactionContext.begin(); - return true; - } - - return false; - } - - public static MutationState emptyMutationState(int maxSize, long maxSizeBytes, - PhoenixConnection connection) { - MutationState state = new MutationState(maxSize, maxSizeBytes, connection, - Collections.> emptyMap(), false, null); - state.sizeOffset = 0; - return state; - } - - private void throwIfTooBig() throws SQLException { - if (numRows > maxSize) { - int mutationSize = numRows; - resetState(); - throw new MaxMutationSizeExceededException(maxSize, mutationSize); - } - if (estimatedSize > maxSizeBytes) { - long mutationSizeByte = estimatedSize; - resetState(); - throw new MaxMutationSizeBytesExceededException(maxSizeBytes, mutationSizeByte); - } - } - - public long getUpdateCount() { - return sizeOffset + numRows; - } - - public int getNumUpdatedRowsForAutoCommit() { - return numUpdatedRowsForAutoCommit; - } - - public int getNumRows() { - return numRows; - } - - public Result getResult() { - return this.result; - } - - public void clearResult() { - this.result = null; - } - - private MultiRowMutationState getLastMutationBatch(Map> mutations, TableRef tableRef) { - List mutationBatches = mutations.get(tableRef); - if (mutationBatches == null || mutationBatches.isEmpty()) { return null; - } - return mutationBatches.get(mutationBatches.size() - 1); - } - - private void joinMutationState(TableRef tableRef, MultiRowMutationState srcRows, - Map> dstMutations) { - PTable table = tableRef.getTable(); - boolean isIndex = table.getType() == PTableType.INDEX; - boolean incrementRowCount = dstMutations == this.mutationsMap; - // we only need to check if the new mutation batch (srcRows) conflicts with the - // last mutation batch since we try to merge it with that only - MultiRowMutationState existingRows = getLastMutationBatch(dstMutations, tableRef); - - if (existingRows == null) { // no rows found for this table - // Size new map at batch size as that's what it'll likely grow to. - MultiRowMutationState newRows = new MultiRowMutationState(connection.getMutateBatchSize()); - newRows.putAll(srcRows); - addMutations(dstMutations, tableRef, newRows); - if (incrementRowCount && !isIndex) { - numRows += srcRows.size(); - // if we added all the rows from newMutationState we can just increment the - // estimatedSize by newMutationState.estimatedSize - estimatedSize += srcRows.estimatedSize; - } - return; - } + } + }; + ImmutableBytesPtr key = + new ImmutableBytesPtr(maintainer.buildRowKey(getter, ptr, null, null, mutationTimestamp)); + PRow row = index.newRow(connection.getKeyValueBuilder(), mutationTimestamp, key, false); + row.delete(); + indexMutations.addAll(row.toRowMutations()); + } + } + return indexMutations; + } + + private Iterator>> addRowMutations(final TableRef tableRef, + final MultiRowMutationState values, final long mutationTimestamp, final long serverTimestamp, + boolean includeAllIndexes, final boolean sendAll) { + final PTable table = tableRef.getTable(); + final List indexList = includeAllIndexes + ? Lists.newArrayList(IndexMaintainer.maintainedIndexes(table.getIndexes().iterator())) + : IndexUtil.getClientMaintainedIndexes(table); + final Iterator indexes = indexList.iterator(); + final List mutationList = Lists.newArrayListWithExpectedSize(values.size()); + final List mutationsPertainingToIndex = + indexes.hasNext() ? Lists.newArrayListWithExpectedSize(values.size()) : null; + generateMutations(tableRef, mutationTimestamp, serverTimestamp, values, mutationList, + mutationsPertainingToIndex); + return new Iterator>>() { + boolean isFirst = true; + Map> indexMutationsMap = null; + + @Override + public boolean hasNext() { + return isFirst || indexes.hasNext(); + } + + @Override + public Pair> next() { + if (isFirst) { + isFirst = false; + return new Pair<>(table, mutationList); + } + + PTable index = indexes.next(); + + List indexMutations = null; + try { - // for conflicting rows - MultiRowMutationState conflictingRows = new MultiRowMutationState(connection.getMutateBatchSize()); - - // Rows for this table already exist, check for conflicts - for (Map.Entry rowEntry : srcRows.entrySet()) { - ImmutableBytesPtr key = rowEntry.getKey(); - RowMutationState newRowMutationState = rowEntry.getValue(); - RowMutationState existingRowMutationState = existingRows.get(key); - if (existingRowMutationState == null) { - existingRows.put(key, newRowMutationState); - if (incrementRowCount && !isIndex) { // Don't count index rows in row count - numRows++; - // increment estimated size by the size of the new row - estimatedSize += newRowMutationState.calculateEstimatedSize(); - } - continue; - } - Map existingValues = existingRowMutationState.getColumnValues(); - Map newValues = newRowMutationState.getColumnValues(); - if (existingValues != PRow.DELETE_MARKER && newValues != PRow.DELETE_MARKER) { - // Check if we can merge existing column values with new column values - long beforeMergeSize = existingRowMutationState.calculateEstimatedSize(); - boolean isMerged = existingRowMutationState.join(rowEntry.getValue()); - if (isMerged) { - // decrement estimated size by the size of the old row - estimatedSize -= beforeMergeSize; - // increment estimated size by the size of the new row - estimatedSize += existingRowMutationState.calculateEstimatedSize(); - } else { - // cannot merge regular upsert and conditional upsert - // conflicting row is not a new row so no need to increment numRows - conflictingRows.put(key, newRowMutationState); + if (!mutationsPertainingToIndex.isEmpty()) { + if (table.isTransactional()) { + if (indexMutationsMap == null) { + PhoenixTxIndexMutationGenerator generator = + PhoenixTxIndexMutationGenerator.newGenerator(connection, table, indexList, + mutationsPertainingToIndex.get(0).getAttributesMap()); + try (Table htable = + connection.getQueryServices().getTable(table.getPhysicalName().getBytes())) { + Collection> allMutations = + generator.getIndexUpdates(htable, mutationsPertainingToIndex.iterator()); + indexMutationsMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + for (Pair mutation : allMutations) { + List mutations = indexMutationsMap.get(mutation.getSecond()); + if (mutations == null) { + mutations = Lists.newArrayList(); + indexMutationsMap.put(mutation.getSecond(), mutations); + } + mutations.add(mutation.getFirst()); + } } + } + indexMutations = indexMutationsMap.get(index.getPhysicalName().getBytes()); } else { - existingRows.put(key, newRowMutationState); + indexMutations = IndexUtil.generateIndexData(table, index, values, + mutationsPertainingToIndex, connection.getKeyValueBuilder(), connection); } - } - - if (!conflictingRows.isEmpty()) { - addMutations(dstMutations, tableRef, conflictingRows); - } - } - - private void joinMutationState(Map> srcMutations, - Map> dstMutations) { - // Merge newMutation with this one, keeping state from newMutation for any overlaps - for (Map.Entry> entry : srcMutations.entrySet()) { - TableRef tableRef = entry.getKey(); - for (MultiRowMutationState srcRows : entry.getValue()) { - // Replace existing entries for the table with new entries - joinMutationState(tableRef, srcRows, dstMutations); + } + + // we may also have to include delete mutations for immutable tables if we are not + // processing all + // the tables in the mutations map + if (!sendAll) { + TableRef key = new TableRef(index); + List multiRowMutationState = mutationsMap.remove(key); + if (multiRowMutationState != null) { + final List deleteMutations = Lists.newArrayList(); + // for index table there will only be 1 mutation batch in the list + generateMutations(key, mutationTimestamp, serverTimestamp, + multiRowMutationState.get(0), deleteMutations, null); + if (indexMutations == null) { + indexMutations = deleteMutations; + } else { + indexMutations.addAll(deleteMutations); + } } - } - } - - /** - * Combine a newer mutation with this one, where in the event of overlaps, the newer one will take precedence. - * Combine any metrics collected for the newer mutation. - * - * @param newMutationState - * the newer mutation state - */ - public void join(MutationState newMutationState) throws SQLException { - if (this == newMutationState) { // Doesn't make sense - return; - } - - phoenixTransactionContext.join(newMutationState.getPhoenixTransactionContext()); - - this.sizeOffset += newMutationState.sizeOffset; - joinMutationState(newMutationState.mutationsMap, this.mutationsMap); - if (!newMutationState.txMutations.isEmpty()) { - if (txMutations.isEmpty()) { - txMutations = Maps.newHashMapWithExpectedSize(this.mutationsMap.size()); + } + + if (CDCUtil.isCDCIndex(index)) { + List cdcMutations = + getCDCDeleteMutations(table, index, mutationTimestamp, mutationList); + if (cdcMutations.size() > 0) { + if (indexMutations == null) { + indexMutations = cdcMutations; + } else { + indexMutations.addAll(cdcMutations); + } } - joinMutationState(newMutationState.txMutations, this.txMutations); - } - mutationMetricQueue.combineMetricQueues(newMutationState.mutationMetricQueue); - if (readMetricQueue == null) { - readMetricQueue = newMutationState.readMetricQueue; - } else if (readMetricQueue != null && newMutationState.readMetricQueue != null) { - readMetricQueue.combineReadMetrics(newMutationState.readMetricQueue); - } - throwIfTooBig(); - } - - private static ImmutableBytesPtr getNewRowKeyWithRowTimestamp(ImmutableBytesPtr ptr, long rowTimestamp, PTable table) { - RowKeySchema schema = table.getRowKeySchema(); - int rowTimestampColPos = table.getRowTimestampColPos(); - Field rowTimestampField = schema.getField(rowTimestampColPos); - byte[] rowTimestampBytes = rowTimestampField.getDataType() == PTimestamp.INSTANCE ? - PTimestamp.INSTANCE.toBytes(new Timestamp(rowTimestamp), rowTimestampField.getSortOrder()) : - PLong.INSTANCE.toBytes(rowTimestamp, rowTimestampField.getSortOrder()); - int oldOffset = ptr.getOffset(); - int oldLength = ptr.getLength(); - // Move the pointer to the start byte of the row timestamp pk - schema.position(ptr, 0, rowTimestampColPos); - byte[] b = ptr.get(); - int newOffset = ptr.getOffset(); - int length = ptr.getLength(); - for (int i = newOffset; i < newOffset + length; i++) { - // modify the underlying bytes array with the bytes of the row timestamp - b[i] = rowTimestampBytes[i - newOffset]; - } - // move the pointer back to where it was before. - ptr.set(ptr.get(), oldOffset, oldLength); - return ptr; - } - - private List getCDCDeleteMutations(PTable table, PTable index, - Long mutationTimestamp, - List mutationList) throws - SQLException { - final ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); - List indexMutations = Lists.newArrayListWithExpectedSize(mutationList.size()); - for (final Mutation mutation : mutationList) { - // Only generate extra row mutations for DELETE - if (mutation instanceof Delete) { - ptr.set(mutation.getRow()); - ValueGetter getter = new AbstractValueGetter() { - @Override - public byte[] getRowKey() { - return mutation.getRow(); - } - @Override - public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { - // Always return null for our empty key value, as this will cause the index - // maintainer to always treat this Put as a new row. - if (IndexUtil.isEmptyKeyValue(table, ref)) { - return null; - } - byte[] family = ref.getFamily(); - byte[] qualifier = ref.getQualifier(); - Map> familyMap = mutation.getFamilyCellMap(); - List kvs = familyMap.get(family); - if (kvs == null) { - return null; - } - for (Cell kv : kvs) { - if (Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), - kv.getFamilyLength(), family, 0, family.length) == 0 - && Bytes.compareTo(kv.getQualifierArray(), - kv.getQualifierOffset(), kv.getQualifierLength(), - qualifier, 0, qualifier.length) == 0) { - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - connection.getKeyValueBuilder().getValueAsPtr(kv, ptr); - return ptr; - } - } - return null; - } - }; - ImmutableBytesPtr key = new ImmutableBytesPtr(maintainer.buildRowKey( - getter, ptr, null, null, mutationTimestamp)); - PRow row = index.newRow( - connection.getKeyValueBuilder(), mutationTimestamp, key, false); - row.delete(); - indexMutations.addAll(row.toRowMutations()); + } + + } catch (SQLException | IOException e) { + throw new IllegalDataException(e); + } + return new Pair>(index, + indexMutations == null ? Collections. emptyList() : indexMutations); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } + + private void generateMutations(final TableRef tableRef, final long mutationTimestamp, + final long serverTimestamp, final MultiRowMutationState values, + final List mutationList, final List mutationsPertainingToIndex) { + final PTable table = tableRef.getTable(); + boolean tableWithRowTimestampCol = table.getRowTimestampColPos() != -1; + Iterator> iterator = + values.entrySet().iterator(); + long timestampToUse = mutationTimestamp; + MultiRowMutationState modifiedValues = new MultiRowMutationState(16); + boolean wildcardIncludesDynamicCols = connection.getQueryServices().getProps() + .getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); + while (iterator.hasNext()) { + Map.Entry rowEntry = iterator.next(); + byte[] onDupKeyBytes = rowEntry.getValue().getOnDupKeyBytes(); + boolean hasOnDupKey = onDupKeyBytes != null; + ImmutableBytesPtr key = rowEntry.getKey(); + RowMutationState state = rowEntry.getValue(); + if (tableWithRowTimestampCol) { + RowTimestampColInfo rowTsColInfo = state.getRowTimestampColInfo(); + if (rowTsColInfo.useServerTimestamp()) { + // regenerate the key with this timestamp. + key = getNewRowKeyWithRowTimestamp(key, serverTimestamp, table); + // since we are about to modify the byte[] stored in key (which changes its hashcode) + // we need to remove the entry from the values map and add a new entry with the modified + // byte[] + modifiedValues.put(key, state); + iterator.remove(); + timestampToUse = serverTimestamp; + } else { + if (rowTsColInfo.getTimestamp() != null) { + timestampToUse = rowTsColInfo.getTimestamp(); + } + } + } + PRow row = table.newRow(connection.getKeyValueBuilder(), timestampToUse, key, hasOnDupKey); + List rowMutations, rowMutationsPertainingToIndex; + if (rowEntry.getValue().getColumnValues() == PRow.DELETE_MARKER) { // means delete + row.delete(); + rowMutations = row.toRowMutations(); + String sourceOfDelete = getConnection().getSourceOfOperation(); + if (sourceOfDelete != null) { + byte[] sourceOfDeleteBytes = Bytes.toBytes(sourceOfDelete); + // Set the source of operation attribute. + for (Mutation mutation : rowMutations) { + mutation.setAttribute(SOURCE_OPERATION_ATTRIB, sourceOfDeleteBytes); + } + } + if (this.returnResult != null) { + if (this.returnResult == ReturnResult.ROW) { + for (Mutation mutation : rowMutations) { + mutation.setAttribute(PhoenixIndexBuilderHelper.RETURN_RESULT, + PhoenixIndexBuilderHelper.RETURN_RESULT_ROW); } + } } - return indexMutations; - } - - private Iterator>> addRowMutations(final TableRef tableRef, - final MultiRowMutationState values, final long mutationTimestamp, final long serverTimestamp, - boolean includeAllIndexes, final boolean sendAll) { - final PTable table = tableRef.getTable(); - final List indexList = includeAllIndexes ? - Lists.newArrayList(IndexMaintainer.maintainedIndexes(table.getIndexes().iterator())) : - IndexUtil.getClientMaintainedIndexes(table); - final Iterator indexes = indexList.iterator(); - final List mutationList = Lists.newArrayListWithExpectedSize(values.size()); - final List mutationsPertainingToIndex = indexes.hasNext() ? Lists - .newArrayListWithExpectedSize(values.size()) : null; - generateMutations(tableRef, mutationTimestamp, serverTimestamp, values, mutationList, - mutationsPertainingToIndex); - return new Iterator>>() { - boolean isFirst = true; - Map> indexMutationsMap = null; - - @Override - public boolean hasNext() { - return isFirst || indexes.hasNext(); - } - - @Override - public Pair> next() { - if (isFirst) { - isFirst = false; - return new Pair<>(table, mutationList); - } - - PTable index = indexes.next(); - - List indexMutations = null; - try { - - if (!mutationsPertainingToIndex.isEmpty()) { - if (table.isTransactional()) { - if (indexMutationsMap == null) { - PhoenixTxIndexMutationGenerator generator = PhoenixTxIndexMutationGenerator.newGenerator(connection, table, - indexList, mutationsPertainingToIndex.get(0).getAttributesMap()); - try (Table htable = connection.getQueryServices().getTable( - table.getPhysicalName().getBytes())) { - Collection> allMutations = generator.getIndexUpdates(htable, - mutationsPertainingToIndex.iterator()); - indexMutationsMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - for (Pair mutation : allMutations) { - List mutations = indexMutationsMap.get(mutation.getSecond()); - if (mutations == null) { - mutations = Lists.newArrayList(); - indexMutationsMap.put(mutation.getSecond(), mutations); - } - mutations.add(mutation.getFirst()); - } - } - } - indexMutations = indexMutationsMap.get(index.getPhysicalName().getBytes()); - } else { - indexMutations = IndexUtil.generateIndexData(table, index, values, - mutationsPertainingToIndex, connection.getKeyValueBuilder(), connection); - } - } - - // we may also have to include delete mutations for immutable tables if we are not processing all - // the tables in the mutations map - if (!sendAll) { - TableRef key = new TableRef(index); - List multiRowMutationState = mutationsMap.remove(key); - if (multiRowMutationState != null) { - final List deleteMutations = Lists.newArrayList(); - // for index table there will only be 1 mutation batch in the list - generateMutations(key, mutationTimestamp, serverTimestamp, multiRowMutationState.get(0), deleteMutations, null); - if (indexMutations == null) { - indexMutations = deleteMutations; - } else { - indexMutations.addAll(deleteMutations); - } - } - } + // The DeleteCompiler already generates the deletes for indexes, so no need to do it again + rowMutationsPertainingToIndex = Collections.emptyList(); - if (CDCUtil.isCDCIndex(index)) { - List cdcMutations = getCDCDeleteMutations( - table, index, mutationTimestamp, mutationList); - if (cdcMutations.size() > 0) { - if (indexMutations == null) { - indexMutations = cdcMutations; - } else { - indexMutations.addAll(cdcMutations); - } - } - } - - } catch (SQLException | IOException e) { - throw new IllegalDataException(e); - } - return new Pair>(index, - indexMutations == null ? Collections. emptyList() - : indexMutations); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); + } else { + for (Map.Entry valueEntry : rowEntry.getValue().getColumnValues() + .entrySet()) { + row.setValue(valueEntry.getKey(), valueEntry.getValue()); + } + if (wildcardIncludesDynamicCols && row.setAttributesForDynamicColumnsIfReqd()) { + row.setAttributeToProcessDynamicColumnsMetadata(); + } + rowMutations = row.toRowMutations(); + // Pass through ON DUPLICATE KEY info through mutations + // In the case of the same clause being used on many statements, this will be + // inefficient because we're transmitting the same information for each mutation. + // TODO: use our ServerCache + for (Mutation mutation : rowMutations) { + if (onDupKeyBytes != null) { + mutation.setAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB, onDupKeyBytes); + } + if (this.returnResult != null) { + if (this.returnResult == ReturnResult.ROW) { + mutation.setAttribute(PhoenixIndexBuilderHelper.RETURN_RESULT, + PhoenixIndexBuilderHelper.RETURN_RESULT_ROW); } + } + } + rowMutationsPertainingToIndex = rowMutations; + } + annotateMutationsWithMetadata(table, rowMutations); + mutationList.addAll(rowMutations); + if (mutationsPertainingToIndex != null) + mutationsPertainingToIndex.addAll(rowMutationsPertainingToIndex); + } + values.putAll(modifiedValues); + } + + private void annotateMutationsWithMetadata(PTable table, List rowMutations) { + if (table == null) { + return; + } + // Annotate each mutation with enough phoenix metadata so that anyone interested can + // deterministically figure out exactly what Phoenix schema object created the mutation + // Server-side we can annotate the HBase WAL with these. + for (Mutation mutation : rowMutations) { + annotateMutationWithMetadata(table, mutation); + } + + // only annotate external schema id if the change detection flag is on the table. + if (!table.isChangeDetectionEnabled()) { + return; + } + // annotate each mutation with enough metadata so that anyone interested can + // deterministically figure out exactly what Phoenix schema object created the mutation + // Server-side we can annotate the HBase WAL with these. + for (Mutation mutation : rowMutations) { + annotateMutationWithMetadataWithExternalSchemaId(table, mutation); + } + + } + + private void annotateMutationWithMetadataWithExternalSchemaId(PTable table, Mutation mutation) { + byte[] externalSchemaRegistryId = + table.getExternalSchemaId() != null ? Bytes.toBytes(table.getExternalSchemaId()) : null; + WALAnnotationUtil.annotateMutation(mutation, externalSchemaRegistryId); + } + + private void annotateMutationWithMetadata(PTable table, Mutation mutation) { + byte[] tenantId = table.getTenantId() != null ? table.getTenantId().getBytes() : null; + byte[] schemaName = table.getSchemaName() != null ? table.getSchemaName().getBytes() : null; + byte[] tableName = table.getTableName() != null ? table.getTableName().getBytes() : null; + byte[] tableType = table.getType().getValue().getBytes(); + byte[] externalSchemaRegistryId = + table.getExternalSchemaId() != null ? Bytes.toBytes(table.getExternalSchemaId()) : null; + byte[] lastDDLTimestamp = + table.getLastDDLTimestamp() != null ? Bytes.toBytes(table.getLastDDLTimestamp()) : null; + WALAnnotationUtil.annotateMutation(mutation, tenantId, schemaName, tableName, tableType, + lastDDLTimestamp); + } + + /** + * Get the unsorted list of HBase mutations for the tables with uncommitted data. + * @return list of HBase mutations for uncommitted data. + */ + public Iterator>> toMutations(Long timestamp) { + return toMutations(false, timestamp); + } + + public Iterator>> toMutations() { + return toMutations(false, null); + } + + public Iterator>> toMutations(final boolean includeMutableIndexes) { + return toMutations(includeMutableIndexes, null); + } + + public Iterator>> toMutations(final boolean includeMutableIndexes, + final Long tableTimestamp) { + final Iterator>> iterator = + this.mutationsMap.entrySet().iterator(); + if (!iterator.hasNext()) { + return Collections.emptyIterator(); + } + Long scn = connection.getSCN(); + final long serverTimestamp = getTableTimestamp(tableTimestamp, scn); + final long mutationTimestamp = getMutationTimestamp(scn); + return new Iterator>>() { + private Map.Entry> current = iterator.next(); + private int batchOffset = 0; + private Iterator>> innerIterator = init(); + + private Iterator>> init() { + final Iterator>> mutationIterator = + addRowMutations(current.getKey(), current.getValue().get(batchOffset), mutationTimestamp, + serverTimestamp, includeMutableIndexes, true); + return new Iterator>>() { + @Override + public boolean hasNext() { + return mutationIterator.hasNext(); + } + + @Override + public Pair> next() { + Pair> pair = mutationIterator.next(); + return new Pair>(pair.getFirst().getPhysicalName().getBytes(), + pair.getSecond()); + } + + @Override + public void remove() { + mutationIterator.remove(); + } }; - } - - private void generateMutations(final TableRef tableRef, final long mutationTimestamp, final long serverTimestamp, - final MultiRowMutationState values, final List mutationList, - final List mutationsPertainingToIndex) { - final PTable table = tableRef.getTable(); - boolean tableWithRowTimestampCol = table.getRowTimestampColPos() != -1; - Iterator> iterator = values.entrySet().iterator(); - long timestampToUse = mutationTimestamp; - MultiRowMutationState modifiedValues = new MultiRowMutationState(16); - boolean wildcardIncludesDynamicCols = connection.getQueryServices().getProps().getBoolean( - WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); - while (iterator.hasNext()) { - Map.Entry rowEntry = iterator.next(); - byte[] onDupKeyBytes = rowEntry.getValue().getOnDupKeyBytes(); - boolean hasOnDupKey = onDupKeyBytes != null; - ImmutableBytesPtr key = rowEntry.getKey(); - RowMutationState state = rowEntry.getValue(); - if (tableWithRowTimestampCol) { - RowTimestampColInfo rowTsColInfo = state.getRowTimestampColInfo(); - if (rowTsColInfo.useServerTimestamp()) { - // regenerate the key with this timestamp. - key = getNewRowKeyWithRowTimestamp(key, serverTimestamp, table); - // since we are about to modify the byte[] stored in key (which changes its hashcode) - // we need to remove the entry from the values map and add a new entry with the modified byte[] - modifiedValues.put(key, state); - iterator.remove(); - timestampToUse = serverTimestamp; - } else { - if (rowTsColInfo.getTimestamp() != null) { - timestampToUse = rowTsColInfo.getTimestamp(); - } + } + + @Override + public boolean hasNext() { + return innerIterator.hasNext() || batchOffset + 1 < current.getValue().size() + || iterator.hasNext(); + } + + @Override + public Pair> next() { + if (!innerIterator.hasNext()) { + ++batchOffset; + if (batchOffset == current.getValue().size()) { + current = iterator.next(); + batchOffset = 0; + } + innerIterator = init(); + } + return innerIterator.next(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } + + public static long getTableTimestamp(final Long tableTimestamp, Long scn) { + return (tableTimestamp != null && tableTimestamp != QueryConstants.UNSET_TIMESTAMP) + ? tableTimestamp + : (scn == null ? HConstants.LATEST_TIMESTAMP : scn); + } + + public static long getMutationTimestamp(final Long scn) { + return scn == null ? HConstants.LATEST_TIMESTAMP : scn; + } + + /** + * Validates that the meta data is valid against the server meta data if we haven't yet done so. + * Otherwise, for every UPSERT VALUES call, we'd need to hit the server to see if the meta data + * has changed. + * @return the server time to use for the upsert if the table or any columns no longer exist + */ + private long[] validateAll(Map commitBatch) throws SQLException { + int i = 0; + long[] timeStamps = new long[commitBatch.size()]; + for (Map.Entry entry : commitBatch.entrySet()) { + TableRef tableRef = entry.getKey(); + timeStamps[i++] = validateAndGetServerTimestamp(tableRef, entry.getValue()); + } + return timeStamps; + } + + private long validateAndGetServerTimestamp(TableRef tableRef, + MultiRowMutationState rowKeyToColumnMap) throws SQLException { + MetaDataClient client = new MetaDataClient(connection); + long serverTimeStamp = tableRef.getTimeStamp(); + PTable table = null; + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + try { + // If we're auto committing, we've already validated the schema when we got the + // ColumnResolver, + // so no need to do it again here. + table = tableRef.getTable(); + + // We generally don't re-resolve SYSTEM tables, but if it relies on ROW_TIMESTAMP, we must + // get the latest timestamp in order to upsert data with the correct server-side timestamp + // in case the ROW_TIMESTAMP is not provided in the UPSERT statement. + boolean hitServerForLatestTimestamp = + table.getRowTimestampColPos() != -1 && table.getType() == PTableType.SYSTEM; + MetaDataMutationResult result = client.updateCache(table.getSchemaName().getString(), + table.getTableName().getString(), hitServerForLatestTimestamp); + PTable resolvedTable = result.getTable(); + if (resolvedTable == null) { + throw new TableNotFoundException(table.getSchemaName().getString(), + table.getTableName().getString()); + } + // Always update tableRef table as the one we've cached may be out of date since when we + // executed + // the UPSERT VALUES call and updated in the cache before this. + tableRef.setTable(resolvedTable); + List indexes = resolvedTable.getIndexes(); + for (PTable idxTtable : indexes) { + // If index is still active, but has a non zero INDEX_DISABLE_TIMESTAMP value, then infer + // that + // our failure mode is block writes on index failure. + if ( + (idxTtable.getIndexState() == PIndexState.ACTIVE + || idxTtable.getIndexState() == PIndexState.PENDING_ACTIVE) + && idxTtable.getIndexDisableTimestamp() > 0 + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_FAILURE_BLOCK_WRITE) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()).build().buildException(); + } + } + long timestamp = result.getMutationTime(); + serverTimeStamp = timestamp; + + /* + * when last_ddl_timestamp validation is enabled, we don't know if this table's cache result + * was force updated during the validation, so always validate columns + */ + if ( + (timestamp != QueryConstants.UNSET_TIMESTAMP && result.wasUpdated()) + || this.validateLastDdlTimestamp + ) { + List columns = Lists.newArrayListWithExpectedSize(table.getColumns().size()); + for (Map.Entry rowEntry : rowKeyToColumnMap + .entrySet()) { + RowMutationState valueEntry = rowEntry.getValue(); + if (valueEntry != null) { + Map colValues = valueEntry.getColumnValues(); + if (colValues != PRow.DELETE_MARKER) { + for (PColumn column : colValues.keySet()) { + if (!column.isDynamic()) { + columns.add(column); } + } } - PRow row = table.newRow(connection.getKeyValueBuilder(), timestampToUse, key, hasOnDupKey); - List rowMutations, rowMutationsPertainingToIndex; - if (rowEntry.getValue().getColumnValues() == PRow.DELETE_MARKER) { // means delete - row.delete(); - rowMutations = row.toRowMutations(); - String sourceOfDelete = getConnection().getSourceOfOperation(); - if (sourceOfDelete != null) { - byte[] sourceOfDeleteBytes = Bytes.toBytes(sourceOfDelete); - // Set the source of operation attribute. - for (Mutation mutation: rowMutations) { - mutation.setAttribute(SOURCE_OPERATION_ATTRIB, sourceOfDeleteBytes); - } - } - if (this.returnResult != null) { - if (this.returnResult == ReturnResult.ROW) { - for (Mutation mutation : rowMutations) { - mutation.setAttribute(PhoenixIndexBuilderHelper.RETURN_RESULT, - PhoenixIndexBuilderHelper.RETURN_RESULT_ROW); - } - } - } - // The DeleteCompiler already generates the deletes for indexes, so no need to do it again - rowMutationsPertainingToIndex = Collections.emptyList(); - - } else { - for (Map.Entry valueEntry : rowEntry.getValue().getColumnValues().entrySet()) { - row.setValue(valueEntry.getKey(), valueEntry.getValue()); - } - if (wildcardIncludesDynamicCols && row.setAttributesForDynamicColumnsIfReqd()) { - row.setAttributeToProcessDynamicColumnsMetadata(); - } - rowMutations = row.toRowMutations(); - // Pass through ON DUPLICATE KEY info through mutations - // In the case of the same clause being used on many statements, this will be - // inefficient because we're transmitting the same information for each mutation. - // TODO: use our ServerCache - for (Mutation mutation : rowMutations) { - if (onDupKeyBytes != null) { - mutation.setAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB, onDupKeyBytes); - } - if (this.returnResult != null) { - if (this.returnResult == ReturnResult.ROW) { - mutation.setAttribute(PhoenixIndexBuilderHelper.RETURN_RESULT, - PhoenixIndexBuilderHelper.RETURN_RESULT_ROW); - } - } - } - rowMutationsPertainingToIndex = rowMutations; - } - annotateMutationsWithMetadata(table, rowMutations); - mutationList.addAll(rowMutations); - if (mutationsPertainingToIndex != null) mutationsPertainingToIndex.addAll(rowMutationsPertainingToIndex); + } + } + for (PColumn column : columns) { + if (column != null) { + resolvedTable.getColumnFamily(column.getFamilyName().getString()) + .getPColumnForColumnName(column.getName().getString()); + } + } + } + } catch (Throwable e) { + if (table != null) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod( + table.getTableName().toString(), NUM_METADATA_LOOKUP_FAILURES, 1); + } + throw e; + } finally { + long endTime = EnvironmentEdgeManager.currentTimeMillis(); + GLOBAL_MUTATION_SYSCAT_TIME.update(endTime - startTime); + } + return serverTimeStamp == QueryConstants.UNSET_TIMESTAMP + ? HConstants.LATEST_TIMESTAMP + : serverTimeStamp; + } + + static MutationBytes calculateMutationSize(List mutations, + boolean updateGlobalClientMetrics) { + long byteSize = 0; + long temp; + long deleteSize = 0, deleteCounter = 0; + long upsertsize = 0, upsertCounter = 0; + long atomicUpsertsize = 0; + if (GlobalClientMetrics.isMetricsEnabled()) { + for (Mutation mutation : mutations) { + temp = PhoenixKeyValueUtil.calculateMutationDiskSize(mutation); + byteSize += temp; + if (mutation instanceof Delete) { + deleteSize += temp; + deleteCounter++; + allUpsertsMutations = false; + } else if (mutation instanceof Put) { + upsertsize += temp; + upsertCounter++; + if (mutation.getAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB) != null) { + atomicUpsertsize += temp; + } + allDeletesMutations = false; + } else { + allUpsertsMutations = false; + allDeletesMutations = false; } - values.putAll(modifiedValues); + } } - - private void annotateMutationsWithMetadata(PTable table, List rowMutations) { - if (table == null) { - return; - } - // Annotate each mutation with enough phoenix metadata so that anyone interested can - // deterministically figure out exactly what Phoenix schema object created the mutation - // Server-side we can annotate the HBase WAL with these. - for (Mutation mutation : rowMutations) { - annotateMutationWithMetadata(table, mutation); - } - - //only annotate external schema id if the change detection flag is on the table. - if (!table.isChangeDetectionEnabled()) { - return; - } - //annotate each mutation with enough metadata so that anyone interested can - // deterministically figure out exactly what Phoenix schema object created the mutation - // Server-side we can annotate the HBase WAL with these. - for (Mutation mutation : rowMutations) { - annotateMutationWithMetadataWithExternalSchemaId(table, mutation); - } - + if (updateGlobalClientMetrics) { + GLOBAL_MUTATION_BYTES.update(byteSize); } + return new MutationBytes(deleteCounter, deleteSize, byteSize, upsertCounter, upsertsize, + atomicUpsertsize); + } - private void annotateMutationWithMetadataWithExternalSchemaId(PTable table, Mutation mutation) { - byte[] externalSchemaRegistryId = table.getExternalSchemaId() != null ? - Bytes.toBytes(table.getExternalSchemaId()) : null; - WALAnnotationUtil.annotateMutation(mutation, externalSchemaRegistryId); - } + public long getBatchSizeBytes() { + return batchSizeBytes; + } - private void annotateMutationWithMetadata(PTable table, Mutation mutation) { - byte[] tenantId = table.getTenantId() != null ? table.getTenantId().getBytes() : null; - byte[] schemaName = table.getSchemaName() != null ? table.getSchemaName().getBytes() : null; - byte[] tableName = table.getTableName() != null ? table.getTableName().getBytes() : null; - byte[] tableType = table.getType().getValue().getBytes(); - byte[] externalSchemaRegistryId = table.getExternalSchemaId() != null ? - Bytes.toBytes(table.getExternalSchemaId()) : null; - byte[] lastDDLTimestamp = - table.getLastDDLTimestamp() != null ? Bytes.toBytes(table.getLastDDLTimestamp()) : null; - WALAnnotationUtil.annotateMutation(mutation, tenantId, schemaName, tableName, tableType, lastDDLTimestamp); - } + public long getBatchCount() { + return batchCount; + } - /** - * Get the unsorted list of HBase mutations for the tables with uncommitted data. - * - * @return list of HBase mutations for uncommitted data. - */ - public Iterator>> toMutations(Long timestamp) { - return toMutations(false, timestamp); - } + public void setReturnResult(ReturnResult returnResult) { + this.returnResult = returnResult; + } - public Iterator>> toMutations() { - return toMutations(false, null); - } + public static final class MutationBytes { - public Iterator>> toMutations(final boolean includeMutableIndexes) { - return toMutations(includeMutableIndexes, null); - } + private long deleteMutationCounter; + private long deleteMutationBytes; + private long totalMutationBytes; + private long upsertMutationCounter; + private long upsertMutationBytes; + private long atomicUpsertMutationBytes; // needed to calculate atomic upsert commit time - public Iterator>> toMutations(final boolean includeMutableIndexes, - final Long tableTimestamp) { - final Iterator>> iterator = this.mutationsMap.entrySet().iterator(); - if (!iterator.hasNext()) { return Collections.emptyIterator(); } - Long scn = connection.getSCN(); - final long serverTimestamp = getTableTimestamp(tableTimestamp, scn); - final long mutationTimestamp = getMutationTimestamp(scn); - return new Iterator>>() { - private Map.Entry> current = iterator.next(); - private int batchOffset = 0; - private Iterator>> innerIterator = init(); - - private Iterator>> init() { - final Iterator>> mutationIterator = - addRowMutations(current.getKey(), current.getValue().get(batchOffset), - mutationTimestamp, serverTimestamp, includeMutableIndexes, true); - - return new Iterator>>() { - @Override - public boolean hasNext() { - return mutationIterator.hasNext(); - } - - @Override - public Pair> next() { - Pair> pair = mutationIterator.next(); - return new Pair>(pair.getFirst().getPhysicalName() - .getBytes(), pair.getSecond()); - } - - @Override - public void remove() { - mutationIterator.remove(); - } - }; - } - - @Override - public boolean hasNext() { - return innerIterator.hasNext() || - batchOffset + 1 < current.getValue().size() || - iterator.hasNext(); - } - - @Override - public Pair> next() { - if (!innerIterator.hasNext()) { - ++batchOffset; - if (batchOffset == current.getValue().size()) { - current = iterator.next(); - batchOffset = 0; - } - innerIterator = init(); - } - return innerIterator.next(); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - }; + public MutationBytes(long deleteMutationCounter, long deleteMutationBytes, + long totalMutationBytes, long upsertMutationCounter, long upsertMutationBytes, + long atomicUpsertMutationBytes) { + this.deleteMutationCounter = deleteMutationCounter; + this.deleteMutationBytes = deleteMutationBytes; + this.totalMutationBytes = totalMutationBytes; + this.upsertMutationCounter = upsertMutationCounter; + this.upsertMutationBytes = upsertMutationBytes; + this.atomicUpsertMutationBytes = atomicUpsertMutationBytes; } - public static long getTableTimestamp(final Long tableTimestamp, Long scn) { - return (tableTimestamp != null && tableTimestamp != QueryConstants.UNSET_TIMESTAMP) ? tableTimestamp - : (scn == null ? HConstants.LATEST_TIMESTAMP : scn); + public long getDeleteMutationCounter() { + return deleteMutationCounter; } - public static long getMutationTimestamp(final Long scn) { - return scn == null ? HConstants.LATEST_TIMESTAMP : scn; + public long getDeleteMutationBytes() { + return deleteMutationBytes; } - /** - * Validates that the meta data is valid against the server meta data if we haven't yet done so. Otherwise, for - * every UPSERT VALUES call, we'd need to hit the server to see if the meta data has changed. - * - * @return the server time to use for the upsert - * @throws SQLException - * if the table or any columns no longer exist - */ - private long[] validateAll(Map commitBatch) throws SQLException { - int i = 0; - long[] timeStamps = new long[commitBatch.size()]; - for (Map.Entry entry : commitBatch.entrySet()) { - TableRef tableRef = entry.getKey(); - timeStamps[i++] = validateAndGetServerTimestamp(tableRef, entry.getValue()); - } - return timeStamps; + public long getTotalMutationBytes() { + return totalMutationBytes; } - private long validateAndGetServerTimestamp(TableRef tableRef, MultiRowMutationState rowKeyToColumnMap) - throws SQLException { - MetaDataClient client = new MetaDataClient(connection); - long serverTimeStamp = tableRef.getTimeStamp(); - PTable table = null; - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - try { - // If we're auto committing, we've already validated the schema when we got the ColumnResolver, - // so no need to do it again here. - table = tableRef.getTable(); - - // We generally don't re-resolve SYSTEM tables, but if it relies on ROW_TIMESTAMP, we must - // get the latest timestamp in order to upsert data with the correct server-side timestamp - // in case the ROW_TIMESTAMP is not provided in the UPSERT statement. - boolean hitServerForLatestTimestamp = - table.getRowTimestampColPos() != -1 && table.getType() == PTableType.SYSTEM; - MetaDataMutationResult result = client.updateCache(table.getSchemaName().getString(), - table.getTableName().getString(), hitServerForLatestTimestamp); - PTable resolvedTable = result.getTable(); - if (resolvedTable == null) { throw new TableNotFoundException(table.getSchemaName().getString(), table - .getTableName().getString()); } - // Always update tableRef table as the one we've cached may be out of date since when we executed - // the UPSERT VALUES call and updated in the cache before this. - tableRef.setTable(resolvedTable); - List indexes = resolvedTable.getIndexes(); - for (PTable idxTtable : indexes) { - // If index is still active, but has a non zero INDEX_DISABLE_TIMESTAMP value, then infer that - // our failure mode is block writes on index failure. - if ((idxTtable.getIndexState() == PIndexState.ACTIVE || idxTtable.getIndexState() == PIndexState.PENDING_ACTIVE) - && idxTtable.getIndexDisableTimestamp() > 0) { throw new SQLExceptionInfo.Builder( - SQLExceptionCode.INDEX_FAILURE_BLOCK_WRITE).setSchemaName(table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()).build().buildException(); } - } - long timestamp = result.getMutationTime(); - serverTimeStamp = timestamp; - - /* when last_ddl_timestamp validation is enabled, - we don't know if this table's cache result was force updated - during the validation, so always validate columns */ - if ((timestamp != QueryConstants.UNSET_TIMESTAMP && result.wasUpdated()) - || this.validateLastDdlTimestamp) { - List columns - = Lists.newArrayListWithExpectedSize(table.getColumns().size()); - for (Map.Entry - rowEntry : rowKeyToColumnMap.entrySet()) { - RowMutationState valueEntry = rowEntry.getValue(); - if (valueEntry != null) { - Map colValues = valueEntry.getColumnValues(); - if (colValues != PRow.DELETE_MARKER) { - for (PColumn column : colValues.keySet()) { - if (!column.isDynamic()) { - columns.add(column); - } - } - } - } - } - for (PColumn column : columns) { - if (column != null) { - resolvedTable.getColumnFamily(column.getFamilyName().getString()) - .getPColumnForColumnName(column.getName().getString()); - } - } - } - } catch(Throwable e) { - if (table != null) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(table.getTableName().toString(), - NUM_METADATA_LOOKUP_FAILURES, 1); - } - throw e; - } finally { - long endTime = EnvironmentEdgeManager.currentTimeMillis(); - GLOBAL_MUTATION_SYSCAT_TIME.update(endTime - startTime); - } - return serverTimeStamp == QueryConstants.UNSET_TIMESTAMP ? HConstants.LATEST_TIMESTAMP : serverTimeStamp; - } - - static MutationBytes calculateMutationSize(List mutations, - boolean updateGlobalClientMetrics) { - long byteSize = 0; - long temp; - long deleteSize = 0, deleteCounter = 0; - long upsertsize = 0, upsertCounter = 0; - long atomicUpsertsize = 0; - if (GlobalClientMetrics.isMetricsEnabled()) { - for (Mutation mutation : mutations) { - temp = PhoenixKeyValueUtil.calculateMutationDiskSize(mutation); - byteSize += temp; - if (mutation instanceof Delete) { - deleteSize += temp; - deleteCounter++; - allUpsertsMutations = false; - } else if (mutation instanceof Put) { - upsertsize += temp; - upsertCounter++; - if (mutation.getAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB) != null) { - atomicUpsertsize += temp; - } - allDeletesMutations = false; - } else { - allUpsertsMutations = false; - allDeletesMutations = false; - } - } - } - if (updateGlobalClientMetrics) { - GLOBAL_MUTATION_BYTES.update(byteSize); - } - return new MutationBytes(deleteCounter, deleteSize, byteSize, upsertCounter, upsertsize, atomicUpsertsize); - } - - public long getBatchSizeBytes() { - return batchSizeBytes; + public long getUpsertMutationCounter() { + return upsertMutationCounter; } - public long getBatchCount() { - return batchCount; + public long getUpsertMutationBytes() { + return upsertMutationBytes; } - public void setReturnResult(ReturnResult returnResult) { - this.returnResult = returnResult; + public long getAtomicUpsertMutationBytes() { + return atomicUpsertMutationBytes; } + } - public static final class MutationBytes { - - private long deleteMutationCounter; - private long deleteMutationBytes; - private long totalMutationBytes; - private long upsertMutationCounter; - private long upsertMutationBytes; - private long atomicUpsertMutationBytes; // needed to calculate atomic upsert commit time + public enum MutationMetadataType { + TENANT_ID, + SCHEMA_NAME, + LOGICAL_TABLE_NAME, + TIMESTAMP, + TABLE_TYPE, + EXTERNAL_SCHEMA_ID + } - public MutationBytes(long deleteMutationCounter, long deleteMutationBytes, long totalMutationBytes, - long upsertMutationCounter, long upsertMutationBytes, long atomicUpsertMutationBytes) { - this.deleteMutationCounter = deleteMutationCounter; - this.deleteMutationBytes = deleteMutationBytes; - this.totalMutationBytes = totalMutationBytes; - this.upsertMutationCounter = upsertMutationCounter; - this.upsertMutationBytes = upsertMutationBytes; - this.atomicUpsertMutationBytes = atomicUpsertMutationBytes; - } - - - public long getDeleteMutationCounter() { - return deleteMutationCounter; - } + private static class TableInfo { - public long getDeleteMutationBytes() { - return deleteMutationBytes; - } - - public long getTotalMutationBytes() { - return totalMutationBytes; - } + private final boolean isDataTable; + @Nonnull + private final PName hTableName; + @Nonnull + private final TableRef origTableRef; + private final PTable pTable; - public long getUpsertMutationCounter() { - return upsertMutationCounter; - } - - public long getUpsertMutationBytes() { - return upsertMutationBytes; - } - - public long getAtomicUpsertMutationBytes() { return atomicUpsertMutationBytes; } + public TableInfo(boolean isDataTable, PName hTableName, TableRef origTableRef, PTable pTable) { + super(); + checkNotNull(hTableName); + checkNotNull(origTableRef); + this.isDataTable = isDataTable; + this.hTableName = hTableName; + this.origTableRef = origTableRef; + this.pTable = pTable; } - public enum MutationMetadataType { - TENANT_ID, - SCHEMA_NAME, - LOGICAL_TABLE_NAME, - TIMESTAMP, - TABLE_TYPE, - EXTERNAL_SCHEMA_ID + public boolean isDataTable() { + return isDataTable; } - private static class TableInfo { - - private final boolean isDataTable; - @Nonnull - private final PName hTableName; - @Nonnull - private final TableRef origTableRef; - private final PTable pTable; - - public TableInfo(boolean isDataTable, PName hTableName, TableRef origTableRef, PTable pTable) { - super(); - checkNotNull(hTableName); - checkNotNull(origTableRef); - this.isDataTable = isDataTable; - this.hTableName = hTableName; - this.origTableRef = origTableRef; - this.pTable = pTable; - } - - public boolean isDataTable() { - return isDataTable; - } - - public PName getHTableName() { - return hTableName; - } - - public TableRef getOrigTableRef() { - return origTableRef; - } - - public PTable getPTable() { - return pTable; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + hTableName.hashCode(); - result = prime * result + (isDataTable ? 1231 : 1237); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - TableInfo other = (TableInfo)obj; - if (!hTableName.equals(other.hTableName)) return false; - if (isDataTable != other.isDataTable) return false; - if (!pTable.equals(other.pTable)) return false; - - return true; - } - + public PName getHTableName() { + return hTableName; } - /** - * Split the mutation batches for each table into separate commit batches. - * Each commit batch contains only one mutation batch (MultiRowMutationState) for a table. - * @param tableRefIterator - * @return List of commit batches - */ - private List> createCommitBatches(Iterator tableRefIterator) { - List> commitBatches = Lists.newArrayList(); - while (tableRefIterator.hasNext()) { - final TableRef tableRef = tableRefIterator.next(); - List batches = this.mutationsMap.get(tableRef); - if (batches == null) { - continue; - } - for (MultiRowMutationState batch : batches) { - // get the first commit batch which doesn't have any mutations for the table - Map nextCommitBatch = getNextCommitBatchForTable(commitBatches, tableRef); - // add the next mutation batch of the table to the commit batch - nextCommitBatch.put(tableRef, batch); - } - } - return commitBatches; + public TableRef getOrigTableRef() { + return origTableRef; } - // visible for testing - List> createCommitBatches() { - return createCommitBatches(this.mutationsMap.keySet().iterator()); + public PTable getPTable() { + return pTable; } - /** - * Return the first commit batch which doesn't have any mutations for the passed table. - * If no such commit batch exists, creates a new commit batch, adds it to the list of - * commit batches and returns it. - * @param commitBatchesList current list of commit batches - * @param tableRef - * @return commit batch - */ - private Map getNextCommitBatchForTable(List> commitBatchesList, - TableRef tableRef) { - Map nextCommitBatch = null; - for (Map commitBatch : commitBatchesList) { - if (commitBatch.get(tableRef) == null) { - nextCommitBatch = commitBatch; - break; - } - } - if (nextCommitBatch == null) { - // create a new commit batch and add it to the list of commit batches - nextCommitBatch = Maps.newHashMapWithExpectedSize(this.mutationsMap.size()); - commitBatchesList.add(nextCommitBatch); - } - return nextCommitBatch; + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + hTableName.hashCode(); + result = prime * result + (isDataTable ? 1231 : 1237); + return result; } - private void send(Iterator tableRefIterator) throws SQLException { - boolean sendAll = false; - boolean validateServerTimestamps = false; - List> commitBatches; - if (tableRefIterator == null) { - commitBatches = createCommitBatches(this.mutationsMap.keySet().iterator()); - sendAll = true; - validateServerTimestamps = true; - } else { - commitBatches = createCommitBatches(tableRefIterator); - } - - //if enabled, validate last ddl timestamps for all tables in the mutationsMap - //for now, force update client cache for all tables if StaleMetadataCacheException is seen - //mutationsMap can be empty, for e.g. during a DDL operation - if (this.validateLastDdlTimestamp && !this.mutationsMap.isEmpty()) { - List tableRefs = new ArrayList<>(this.mutationsMap.keySet()); - try { - ValidateLastDDLTimestampUtil.validateLastDDLTimestamp( - connection, tableRefs, true); - } catch (StaleMetadataCacheException e) { - GlobalClientMetrics - .GLOBAL_CLIENT_STALE_METADATA_CACHE_EXCEPTION_COUNTER.increment(); - MetaDataClient mc = new MetaDataClient(connection); - PName tenantId = connection.getTenantId(); - LOGGER.debug("Force updating client metadata cache for {}", - ValidateLastDDLTimestampUtil.getInfoString(tenantId, tableRefs)); - for (TableRef tableRef : tableRefs) { - String schemaName = tableRef.getTable().getSchemaName().toString(); - String tableName = tableRef.getTable().getTableName().toString(); - mc.updateCache(tenantId, schemaName, tableName, true); - } - } - } - - for (Map commitBatch : commitBatches) { - long [] serverTimestamps = validateServerTimestamps ? validateAll(commitBatch) : null; - sendBatch(commitBatch, serverTimestamps, sendAll); - } - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + TableInfo other = (TableInfo) obj; + if (!hTableName.equals(other.hTableName)) return false; + if (isDataTable != other.isDataTable) return false; + if (!pTable.equals(other.pTable)) return false; + + return true; + } + + } + + /** + * Split the mutation batches for each table into separate commit batches. Each commit batch + * contains only one mutation batch (MultiRowMutationState) for a table. + * @return List of commit batches + */ + private List> + createCommitBatches(Iterator tableRefIterator) { + List> commitBatches = Lists.newArrayList(); + while (tableRefIterator.hasNext()) { + final TableRef tableRef = tableRefIterator.next(); + List batches = this.mutationsMap.get(tableRef); + if (batches == null) { + continue; + } + for (MultiRowMutationState batch : batches) { + // get the first commit batch which doesn't have any mutations for the table + Map nextCommitBatch = + getNextCommitBatchForTable(commitBatches, tableRef); + // add the next mutation batch of the table to the commit batch + nextCommitBatch.put(tableRef, batch); + } + } + return commitBatches; + } + + // visible for testing + List> createCommitBatches() { + return createCommitBatches(this.mutationsMap.keySet().iterator()); + } + + /** + * Return the first commit batch which doesn't have any mutations for the passed table. If no such + * commit batch exists, creates a new commit batch, adds it to the list of commit batches and + * returns it. + * @param commitBatchesList current list of commit batches + * @return commit batch + */ + private Map getNextCommitBatchForTable( + List> commitBatchesList, TableRef tableRef) { + Map nextCommitBatch = null; + for (Map commitBatch : commitBatchesList) { + if (commitBatch.get(tableRef) == null) { + nextCommitBatch = commitBatch; + break; + } + } + if (nextCommitBatch == null) { + // create a new commit batch and add it to the list of commit batches + nextCommitBatch = Maps.newHashMapWithExpectedSize(this.mutationsMap.size()); + commitBatchesList.add(nextCommitBatch); + } + return nextCommitBatch; + } + + private void send(Iterator tableRefIterator) throws SQLException { + boolean sendAll = false; + boolean validateServerTimestamps = false; + List> commitBatches; + if (tableRefIterator == null) { + commitBatches = createCommitBatches(this.mutationsMap.keySet().iterator()); + sendAll = true; + validateServerTimestamps = true; + } else { + commitBatches = createCommitBatches(tableRefIterator); + } + + // if enabled, validate last ddl timestamps for all tables in the mutationsMap + // for now, force update client cache for all tables if StaleMetadataCacheException is seen + // mutationsMap can be empty, for e.g. during a DDL operation + if (this.validateLastDdlTimestamp && !this.mutationsMap.isEmpty()) { + List tableRefs = new ArrayList<>(this.mutationsMap.keySet()); + try { + ValidateLastDDLTimestampUtil.validateLastDDLTimestamp(connection, tableRefs, true); + } catch (StaleMetadataCacheException e) { + GlobalClientMetrics.GLOBAL_CLIENT_STALE_METADATA_CACHE_EXCEPTION_COUNTER.increment(); + MetaDataClient mc = new MetaDataClient(connection); + PName tenantId = connection.getTenantId(); + LOGGER.debug("Force updating client metadata cache for {}", + ValidateLastDDLTimestampUtil.getInfoString(tenantId, tableRefs)); + for (TableRef tableRef : tableRefs) { + String schemaName = tableRef.getTable().getSchemaName().toString(); + String tableName = tableRef.getTable().getTableName().toString(); + mc.updateCache(tenantId, schemaName, tableName, true); + } + } + } + + for (Map commitBatch : commitBatches) { + long[] serverTimestamps = validateServerTimestamps ? validateAll(commitBatch) : null; + sendBatch(commitBatch, serverTimestamps, sendAll); + } + } + + private void sendBatch(Map commitBatch, long[] serverTimeStamps, + boolean sendAll) throws SQLException { + int i = 0; + Map> physicalTableMutationMap = Maps.newLinkedHashMap(); + + // add tracing for this operation + try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) { + Span span = trace.getSpan(); + ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable(); + for (Map.Entry entry : commitBatch.entrySet()) { + // at this point we are going through mutations for each table + final TableRef tableRef = entry.getKey(); + MultiRowMutationState multiRowMutationState = entry.getValue(); + if (multiRowMutationState == null || multiRowMutationState.isEmpty()) { + continue; + } + // Validate as we go if transactional since we can undo if a problem occurs (which is + // unlikely) + long serverTimestamp = serverTimeStamps == null + ? validateAndGetServerTimestamp(tableRef, multiRowMutationState) + : serverTimeStamps[i++]; + final PTable table = tableRef.getTable(); + Long scn = connection.getSCN(); + long mutationTimestamp = scn == null + ? (table.isTransactional() == true + ? HConstants.LATEST_TIMESTAMP + : EnvironmentEdgeManager.currentTimeMillis()) + : scn; + Iterator>> mutationsIterator = addRowMutations(tableRef, + multiRowMutationState, mutationTimestamp, serverTimestamp, false, sendAll); + // build map from physical table to mutation list + boolean isDataTable = true; + while (mutationsIterator.hasNext()) { + Pair> pair = mutationsIterator.next(); + PTable logicalTable = pair.getFirst(); + List mutationList = pair.getSecond(); + + TableInfo tableInfo = + new TableInfo(isDataTable, logicalTable.getPhysicalName(), tableRef, logicalTable); + + List oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList); + if (oldMutationList != null) mutationList.addAll(0, oldMutationList); + isDataTable = false; + } + // For transactions, track the statement indexes as we send data + // over because our CommitException should include all statements + // involved in the transaction since none of them would have been + // committed in the event of a failure. + if (table.isTransactional()) { + addUncommittedStatementIndexes(multiRowMutationState.values()); + if (txMutations.isEmpty()) { + txMutations = Maps.newHashMapWithExpectedSize(this.mutationsMap.size()); + } + // Keep all mutations we've encountered until a commit or rollback. + // This is not ideal, but there's not good way to get the values back + // in the event that we need to replay the commit. + // Copy TableRef so we have the original PTable and know when the + // indexes have changed. + joinMutationState(new TableRef(tableRef), multiRowMutationState, txMutations); + } + } + + Map> unverifiedIndexMutations = new LinkedHashMap<>(); + Map> verifiedOrDeletedIndexMutations = new LinkedHashMap<>(); + filterIndexCheckerMutations(physicalTableMutationMap, unverifiedIndexMutations, + verifiedOrDeletedIndexMutations); + + // Phase 1: Send index mutations with the empty column value = "unverified" + sendMutations(unverifiedIndexMutations.entrySet().iterator(), span, indexMetaDataPtr, false); + + // Phase 2: Send data table and other indexes + sendMutations(physicalTableMutationMap.entrySet().iterator(), span, indexMetaDataPtr, false); + + // Phase 3: Send put index mutations with the empty column value = "verified" and/or delete + // index mutations + try { + sendMutations(verifiedOrDeletedIndexMutations.entrySet().iterator(), span, indexMetaDataPtr, + true); + } catch (SQLException ex) { + LOGGER.warn( + "Ignoring exception that happened during setting index verified value to verified=TRUE ", + ex); + } + } + } + + private void sendMutations(Iterator>> mutationsIterator, + Span span, ImmutableBytesWritable indexMetaDataPtr, boolean isVerifiedPhase) + throws SQLException { + while (mutationsIterator.hasNext()) { + Entry> pair = mutationsIterator.next(); + TableInfo tableInfo = pair.getKey(); + byte[] htableName = tableInfo.getHTableName().getBytes(); + String htableNameStr = tableInfo.getHTableName().getString(); + List mutationList = pair.getValue(); + List> mutationBatchList = + getMutationBatchList(batchSize, batchSizeBytes, mutationList); + + // create a span per target table + // TODO maybe we can be smarter about the table name to string here? + Span child = + Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName)); + + int retryCount = 0; + boolean shouldRetry = false; + long numMutations = 0; + long mutationSizeBytes = 0; + long mutationCommitTime = 0; + long numFailedMutations = 0; + long numFailedPhase3Mutations = 0; + + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + MutationBytes totalMutationBytesObject = null; + boolean shouldRetryIndexedMutation = false; + IndexWriteException iwe = null; + do { + TableRef origTableRef = tableInfo.getOrigTableRef(); + PTable table = origTableRef.getTable(); + table.getIndexMaintainers(indexMetaDataPtr, connection); + final ServerCache cache = tableInfo.isDataTable() + ? IndexMetaDataCacheClient.setMetaDataOnMutations(connection, table, mutationList, + indexMetaDataPtr) + : null; + // If we haven't retried yet, retry for this case only, as it's possible that + // a split will occur after we send the index metadata cache to all known + // region servers. + shouldRetry = cache != null; + SQLException sqlE = null; + Table hTable = connection.getQueryServices().getTable(htableName); + List currentMutationBatch = null; + boolean areAllBatchesSuccessful = false; + Object[] resultObjects = null; - private void sendBatch(Map commitBatch, long[] serverTimeStamps, boolean sendAll) throws SQLException { - int i = 0; - Map> physicalTableMutationMap = Maps.newLinkedHashMap(); - - // add tracing for this operation - try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) { - Span span = trace.getSpan(); - ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable(); - for (Map.Entry entry : commitBatch.entrySet()) { - // at this point we are going through mutations for each table - final TableRef tableRef = entry.getKey(); - MultiRowMutationState multiRowMutationState = entry.getValue(); - if (multiRowMutationState == null || multiRowMutationState.isEmpty()) { - continue; - } - // Validate as we go if transactional since we can undo if a problem occurs (which is unlikely) - long - serverTimestamp = - serverTimeStamps == null ? - validateAndGetServerTimestamp(tableRef, multiRowMutationState) : - serverTimeStamps[i++]; - final PTable table = tableRef.getTable(); - Long scn = connection.getSCN(); - long mutationTimestamp = scn == null ? - (table.isTransactional() == true ? HConstants.LATEST_TIMESTAMP : EnvironmentEdgeManager.currentTimeMillis()) - : scn; - Iterator>> - mutationsIterator = - addRowMutations(tableRef, multiRowMutationState, mutationTimestamp, - serverTimestamp, false, sendAll); - // build map from physical table to mutation list - boolean isDataTable = true; - while (mutationsIterator.hasNext()) { - Pair> pair = mutationsIterator.next(); - PTable logicalTable = pair.getFirst(); - List mutationList = pair.getSecond(); - - TableInfo tableInfo = new TableInfo(isDataTable, logicalTable.getPhysicalName(), - tableRef, logicalTable); - - List - oldMutationList = - physicalTableMutationMap.put(tableInfo, mutationList); - if (oldMutationList != null) mutationList.addAll(0, oldMutationList); - isDataTable = false; - } - // For transactions, track the statement indexes as we send data - // over because our CommitException should include all statements - // involved in the transaction since none of them would have been - // committed in the event of a failure. - if (table.isTransactional()) { - addUncommittedStatementIndexes(multiRowMutationState.values()); - if (txMutations.isEmpty()) { - txMutations = Maps.newHashMapWithExpectedSize(this.mutationsMap.size()); - } - // Keep all mutations we've encountered until a commit or rollback. - // This is not ideal, but there's not good way to get the values back - // in the event that we need to replay the commit. - // Copy TableRef so we have the original PTable and know when the - // indexes have changed. - joinMutationState(new TableRef(tableRef), multiRowMutationState, txMutations); - } + try { + if (table.isTransactional()) { + // Track tables to which we've sent uncommitted data + if (tableInfo.isDataTable()) { + uncommittedPhysicalNames.add(table.getPhysicalName().getString()); + phoenixTransactionContext.markDMLFence(table); } - - Map> unverifiedIndexMutations = new LinkedHashMap<>(); - Map> verifiedOrDeletedIndexMutations = new LinkedHashMap<>(); - filterIndexCheckerMutations(physicalTableMutationMap, unverifiedIndexMutations, - verifiedOrDeletedIndexMutations); - - // Phase 1: Send index mutations with the empty column value = "unverified" - sendMutations(unverifiedIndexMutations.entrySet().iterator(), span, indexMetaDataPtr, false); - - // Phase 2: Send data table and other indexes - sendMutations(physicalTableMutationMap.entrySet().iterator(), span, indexMetaDataPtr, false); - - // Phase 3: Send put index mutations with the empty column value = "verified" and/or delete index mutations - try { - sendMutations(verifiedOrDeletedIndexMutations.entrySet().iterator(), span, indexMetaDataPtr, true); - } catch (SQLException ex) { - LOGGER.warn( - "Ignoring exception that happened during setting index verified value to verified=TRUE ", - ex); + // Only pass true for last argument if the index is being written to on it's own (i.e. + // initial + // index population), not if it's being written to for normal maintenance due to writes + // to + // the data table. This case is different because the initial index population does not + // need + // to be done transactionally since the index is only made active after all writes have + // occurred successfully. + hTable = phoenixTransactionContext.getTransactionalTableWriter(connection, table, + hTable, tableInfo.isDataTable() && table.getType() == PTableType.INDEX); + } + numMutations = mutationList.size(); + GLOBAL_MUTATION_BATCH_SIZE.update(numMutations); + totalMutationBytesObject = calculateMutationSize(mutationList, true); + + child.addTimelineAnnotation("Attempt " + retryCount); + Iterator> itrListMutation = mutationBatchList.iterator(); + while (itrListMutation.hasNext()) { + final List mutationBatch = itrListMutation.next(); + currentMutationBatch = mutationBatch; + if (connection.getAutoCommit() && mutationBatch.size() == 1) { + resultObjects = new Object[mutationBatch.size()]; } - } - } - - private void sendMutations(Iterator>> mutationsIterator, Span span, ImmutableBytesWritable indexMetaDataPtr, boolean isVerifiedPhase) - throws SQLException { - while (mutationsIterator.hasNext()) { - Entry> pair = mutationsIterator.next(); - TableInfo tableInfo = pair.getKey(); - byte[] htableName = tableInfo.getHTableName().getBytes(); - String htableNameStr = tableInfo.getHTableName().getString(); - List mutationList = pair.getValue(); - List> mutationBatchList = - getMutationBatchList(batchSize, batchSizeBytes, mutationList); - - // create a span per target table - // TODO maybe we can be smarter about the table name to string here? - Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName)); - - int retryCount = 0; - boolean shouldRetry = false; - long numMutations = 0; - long mutationSizeBytes = 0; - long mutationCommitTime = 0; - long numFailedMutations = 0; - long numFailedPhase3Mutations = 0; - - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - MutationBytes totalMutationBytesObject = null; - boolean shouldRetryIndexedMutation = false; - IndexWriteException iwe = null; - do { - TableRef origTableRef = tableInfo.getOrigTableRef(); - PTable table = origTableRef.getTable(); - table.getIndexMaintainers(indexMetaDataPtr, connection); - final ServerCache cache = tableInfo.isDataTable() ? - IndexMetaDataCacheClient.setMetaDataOnMutations(connection, table, - mutationList, indexMetaDataPtr) : null; - // If we haven't retried yet, retry for this case only, as it's possible that - // a split will occur after we send the index metadata cache to all known - // region servers. - shouldRetry = cache != null; - SQLException sqlE = null; - Table hTable = connection.getQueryServices().getTable(htableName); - List currentMutationBatch = null; - boolean areAllBatchesSuccessful = false; - Object[] resultObjects = null; - - try { - if (table.isTransactional()) { - // Track tables to which we've sent uncommitted data - if (tableInfo.isDataTable()) { - uncommittedPhysicalNames.add(table.getPhysicalName().getString()); - phoenixTransactionContext.markDMLFence(table); - } - // Only pass true for last argument if the index is being written to on it's own (i.e. initial - // index population), not if it's being written to for normal maintenance due to writes to - // the data table. This case is different because the initial index population does not need - // to be done transactionally since the index is only made active after all writes have - // occurred successfully. - hTable = phoenixTransactionContext.getTransactionalTableWriter(connection, table, hTable, tableInfo.isDataTable() && table.getType() == PTableType.INDEX); - } - numMutations = mutationList.size(); - GLOBAL_MUTATION_BATCH_SIZE.update(numMutations); - totalMutationBytesObject = calculateMutationSize(mutationList, true); - - child.addTimelineAnnotation("Attempt " + retryCount); - Iterator> itrListMutation = mutationBatchList.iterator(); - while (itrListMutation.hasNext()) { - final List mutationBatch = itrListMutation.next(); - currentMutationBatch = mutationBatch; - if (connection.getAutoCommit() && mutationBatch.size() == 1) { - resultObjects = new Object[mutationBatch.size()]; - } - if (shouldRetryIndexedMutation) { - // if there was an index write failure, retry the mutation in a loop - final Table finalHTable = hTable; - final ImmutableBytesWritable finalindexMetaDataPtr = - indexMetaDataPtr; - final PTable finalPTable = table; - final Object[] finalResultObjects = resultObjects; - PhoenixIndexFailurePolicyHelper.doBatchWithRetries(new MutateCommand() { - @Override - public void doMutation() throws IOException { - try { - finalHTable.batch(mutationBatch, finalResultObjects); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IOException(e); - } catch (IOException e) { - e = updateTableRegionCacheIfNecessary(e); - throw e; - } - } - - @Override - public List getMutationList() { - return mutationBatch; - } - - private IOException - updateTableRegionCacheIfNecessary(IOException ioe) { - SQLException sqlE = - ClientUtil.parseLocalOrRemoteServerException(ioe); - if (sqlE != null - && sqlE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND - .getErrorCode()) { - try { - connection.getQueryServices().clearTableRegionCache( - finalHTable.getName()); - IndexMetaDataCacheClient.setMetaDataOnMutations( - connection, finalPTable, mutationBatch, - finalindexMetaDataPtr); - } catch (SQLException e) { - return ClientUtil.createIOException( - "Exception during updating index meta data cache", - ioe); - } - } - return ioe; - } - }, iwe, connection, connection.getQueryServices().getProps()); - shouldRetryIndexedMutation = false; - } else { - hTable.batch(mutationBatch, resultObjects); - } - - if (resultObjects != null) { - Result result = (Result) resultObjects[0]; - if (result != null && !result.isEmpty()) { - Cell cell = result.getColumnLatestCell( - Bytes.toBytes(UPSERT_CF), Bytes.toBytes(UPSERT_STATUS_CQ)); - numUpdatedRowsForAutoCommit = PInteger.INSTANCE.getCodec() - .decodeInt(cell.getValueArray(), cell.getValueOffset(), - SortOrder.getDefault()); - if (this.returnResult != null) { - if (this.returnResult == ReturnResult.ROW) { - this.result = result; - } - } - } else { - numUpdatedRowsForAutoCommit = 1; - } - } - - // remove each batch from the list once it gets applied - // so when failures happens for any batch we only start - // from that batch only instead of doing duplicate reply of already - // applied batches from entire list, also we can set - // REPLAY_ONLY_INDEX_WRITES for first batch - // only in case of 1121 SQLException - itrListMutation.remove(); - - batchCount++; - if (LOGGER.isDebugEnabled()) - LOGGER.debug("Sent batch of " + mutationBatch.size() + " for " - + Bytes.toString(htableName)); - } - child.stop(); - child.stop(); - shouldRetry = false; - numFailedMutations = 0; - - // Remove batches as we process them - removeMutations(this.mutationsMap, origTableRef); - if (tableInfo.isDataTable()) { - numRows -= numMutations; - // recalculate the estimated size - estimatedSize = PhoenixKeyValueUtil.getEstimatedRowMutationSizeWithBatch(this.mutationsMap); - } - areAllBatchesSuccessful = true; - } catch (Exception e) { - long serverTimestamp = ClientUtil.parseServerTimestamp(e); - SQLException inferredE = ClientUtil.parseServerExceptionOrNull(e); - if (inferredE != null) { - if (shouldRetry - && retryCount == 0 - && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND - .getErrorCode()) { - // Swallow this exception once, as it's possible that we split after sending the index - // metadata - // and one of the region servers doesn't have it. This will cause it to have it the next - // go around. - // If it fails again, we don't retry. - String msg = "Swallowing exception and retrying after clearing meta cache on connection. " - + inferredE; - LOGGER.warn(LogUtil.addCustomAnnotations(msg, connection)); - connection.getQueryServices().clearTableRegionCache(TableName.valueOf(htableName)); - - // add a new child span as this one failed - child.addTimelineAnnotation(msg); - child.stop(); - child = Tracing.child(span, "Failed batch, attempting retry"); - - continue; - } else if (inferredE.getErrorCode() == SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode()) { - iwe = PhoenixIndexFailurePolicyHelper.getIndexWriteException(inferredE); - if (iwe != null && !shouldRetryIndexedMutation) { - // For an index write failure, the data table write succeeded, - // so when we retry we need to set REPLAY_WRITES - // for first batch in list only. - for (Mutation m : mutationBatchList.get(0)) { - if (!PhoenixIndexMetaData.isIndexRebuild( - m.getAttributesMap())){ - m.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, - BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES - ); - } - PhoenixKeyValueUtil.setTimestamp(m, serverTimestamp); - } - shouldRetry = true; - shouldRetryIndexedMutation = true; - continue; - } - } - e = inferredE; - } - // Throw to client an exception that indicates the statements that - // were not committed successfully. - int[] uncommittedStatementIndexes = getUncommittedStatementIndexes(); - sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp); - - numFailedMutations = uncommittedStatementIndexes.length; - - if (isVerifiedPhase) { - numFailedPhase3Mutations = numFailedMutations; - GLOBAL_MUTATION_INDEX_COMMIT_FAILURE_COUNT.update(numFailedPhase3Mutations); - } - } finally { - mutationCommitTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; - GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime); - MutationMetric failureMutationMetrics = MutationMetric.EMPTY_METRIC; - if (!areAllBatchesSuccessful) { - failureMutationMetrics = - updateMutationBatchFailureMetrics(currentMutationBatch, - htableNameStr, numFailedMutations, - table.isTransactional()); - } + if (shouldRetryIndexedMutation) { + // if there was an index write failure, retry the mutation in a loop + final Table finalHTable = hTable; + final ImmutableBytesWritable finalindexMetaDataPtr = indexMetaDataPtr; + final PTable finalPTable = table; + final Object[] finalResultObjects = resultObjects; + PhoenixIndexFailurePolicyHelper.doBatchWithRetries(new MutateCommand() { + @Override + public void doMutation() throws IOException { + try { + finalHTable.batch(mutationBatch, finalResultObjects); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException(e); + } catch (IOException e) { + e = updateTableRegionCacheIfNecessary(e); + throw e; + } + } - MutationMetric committedMutationsMetric = - getCommittedMutationsMetric( - totalMutationBytesObject, - mutationBatchList, - numMutations, - numFailedMutations, - numFailedPhase3Mutations, - mutationCommitTime); - // Combine failure mutation metrics with committed ones for the final picture - committedMutationsMetric.combineMetric(failureMutationMetrics); - mutationMetricQueue.addMetricsForTable(htableNameStr, committedMutationsMetric); - - if (allUpsertsMutations ^ allDeletesMutations) { - //success cases are updated for both cases autoCommit=true and conn.commit explicit - if (areAllBatchesSuccessful){ - TableMetricsManager - .updateMetricsMethod(htableNameStr, allUpsertsMutations ? UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER : - DELETE_AGGREGATE_SUCCESS_SQL_COUNTER, 1); - } - //Failures cases are updated only for conn.commit explicit case. - if (!areAllBatchesSuccessful && !connection.getAutoCommit()){ - TableMetricsManager.updateMetricsMethod(htableNameStr, allUpsertsMutations ? UPSERT_AGGREGATE_FAILURE_SQL_COUNTER : - DELETE_AGGREGATE_FAILURE_SQL_COUNTER, 1); - } - // Update size and latency histogram metrics. - TableMetricsManager.updateSizeHistogramMetricsForMutations(htableNameStr, - committedMutationsMetric.getTotalMutationsSizeBytes().getValue(), allUpsertsMutations); - Long latency = timeInExecuteMutationMap.get(htableNameStr); - if (latency == null) { - latency = 0l; - } - latency += mutationCommitTime; - TableMetricsManager.updateLatencyHistogramForMutations(htableNameStr, - latency, allUpsertsMutations); - } - resetAllMutationState(); + @Override + public List getMutationList() { + return mutationBatch; + } + private IOException updateTableRegionCacheIfNecessary(IOException ioe) { + SQLException sqlE = ClientUtil.parseLocalOrRemoteServerException(ioe); + if ( + sqlE != null && sqlE.getErrorCode() + == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode() + ) { try { - if (cache != null) cache.close(); - } finally { - try { - hTable.close(); - } catch (IOException e) { - if (sqlE != null) { - sqlE.setNextException(ClientUtil.parseServerException(e)); - } else { - sqlE = ClientUtil.parseServerException(e); - } - } - if (sqlE != null) { throw sqlE; } + connection.getQueryServices().clearTableRegionCache(finalHTable.getName()); + IndexMetaDataCacheClient.setMetaDataOnMutations(connection, finalPTable, + mutationBatch, finalindexMetaDataPtr); + } catch (SQLException e) { + return ClientUtil + .createIOException("Exception during updating index meta data cache", ioe); } + } + return ioe; } - } while (shouldRetry && retryCount++ < 1); - } - } - - /** - * Update metrics related to failed mutations - * @param failedMutationBatch the batch of mutations that failed - * @param tableName table that was to be mutated - * @param numFailedMutations total number of failed mutations - * @param isTransactional true if the table is transactional - */ - public static MutationMetricQueue.MutationMetric updateMutationBatchFailureMetrics( - List failedMutationBatch, - String tableName, - long numFailedMutations, - boolean isTransactional) { - - if (failedMutationBatch == null || failedMutationBatch.isEmpty() || - Strings.isNullOrEmpty(tableName)) { - return MutationMetricQueue.MutationMetric.EMPTY_METRIC; - } - - long numUpsertMutationsInBatch = 0L; - long numDeleteMutationsInBatch = 0L; - - for (Mutation m : failedMutationBatch) { - if (m instanceof Put) { - numUpsertMutationsInBatch++; - } else if (m instanceof Delete) { - numDeleteMutationsInBatch++; + }, iwe, connection, connection.getQueryServices().getProps()); + shouldRetryIndexedMutation = false; + } else { + hTable.batch(mutationBatch, resultObjects); } - } - - long totalFailedMutation = numUpsertMutationsInBatch + numDeleteMutationsInBatch; - //this case should not happen but the if condition makes sense if this ever happens - if (totalFailedMutation < numFailedMutations) { - LOGGER.warn( - "total failed mutation less than num of failed mutation. This is not expected."); - totalFailedMutation = numFailedMutations; - } - - long totalNumFailedMutations = allDeletesMutations && !isTransactional - ? numDeleteMutationsInBatch : totalFailedMutation; - GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(totalNumFailedMutations); - - // Update the MUTATION_BATCH_FAILED_SIZE counter with the number of failed delete mutations - // in case we are dealing with all deletes for a non-transactional table, since there is a - // bug in sendMutations where we don't get the correct value for numFailedMutations when - // we don't use transactions - return new MutationMetricQueue.MutationMetric(0, 0, 0, 0, 0, 0, - totalNumFailedMutations, - 0, 0, 0, 0, - numUpsertMutationsInBatch, - allUpsertsMutations ? 1 : 0, - numDeleteMutationsInBatch, - allDeletesMutations ? 1 : 0); - } - /** - * Get mutation metrics that correspond to committed mutations only - * @param totalMutationBytesObject MutationBytes object corresponding to all the mutations we - * attempted to commit including those that failed, those that - * were already sent and those that were unsent - * @param unsentMutationBatchList list of mutation batches that are unsent - * @param numMutations total number of mutations - * @param numFailedMutations number of failed mutations in the most recent failed batch - * @param numFailedPhase3Mutations number of mutations failed in phase 3 of index commits - * @param mutationCommitTime time taken for committing all mutations - * @return mutation metric object just accounting for mutations that are already - * successfully committed - */ - static MutationMetric getCommittedMutationsMetric( - MutationBytes totalMutationBytesObject, List> unsentMutationBatchList, - long numMutations, long numFailedMutations, - long numFailedPhase3Mutations, long mutationCommitTime) { - long committedUpsertMutationBytes = totalMutationBytesObject == null ? 0 : - totalMutationBytesObject.getUpsertMutationBytes(); - long committedAtomicUpsertMutationBytes = totalMutationBytesObject == null ? 0: - totalMutationBytesObject.getAtomicUpsertMutationBytes(); - long committedDeleteMutationBytes = totalMutationBytesObject == null ? 0 : - totalMutationBytesObject.getDeleteMutationBytes(); - long committedUpsertMutationCounter = totalMutationBytesObject == null ? 0 : - totalMutationBytesObject.getUpsertMutationCounter(); - long committedDeleteMutationCounter = totalMutationBytesObject == null ? 0 : - totalMutationBytesObject.getDeleteMutationCounter(); - long committedTotalMutationBytes = totalMutationBytesObject == null ? 0 : - totalMutationBytesObject.getTotalMutationBytes(); - long upsertMutationCommitTime = 0L; - long atomicUpsertMutationCommitTime = 0L; - long deleteMutationCommitTime = 0L; - - if (totalMutationBytesObject != null && numFailedMutations != 0) { - List uncommittedMutationsList = new ArrayList<>(); - for (List mutationBatch : unsentMutationBatchList) { - uncommittedMutationsList.addAll(mutationBatch); + if (resultObjects != null) { + Result result = (Result) resultObjects[0]; + if (result != null && !result.isEmpty()) { + Cell cell = result.getColumnLatestCell(Bytes.toBytes(UPSERT_CF), + Bytes.toBytes(UPSERT_STATUS_CQ)); + numUpdatedRowsForAutoCommit = PInteger.INSTANCE.getCodec() + .decodeInt(cell.getValueArray(), cell.getValueOffset(), SortOrder.getDefault()); + if (this.returnResult != null) { + if (this.returnResult == ReturnResult.ROW) { + this.result = result; + } + } + } else { + numUpdatedRowsForAutoCommit = 1; + } } - // Calculate the uncommitted mutations - MutationBytes uncommittedMutationBytesObject = - calculateMutationSize(uncommittedMutationsList, false); - committedUpsertMutationBytes -= - uncommittedMutationBytesObject.getUpsertMutationBytes(); - committedAtomicUpsertMutationBytes -= - uncommittedMutationBytesObject.getAtomicUpsertMutationBytes(); - committedDeleteMutationBytes -= - uncommittedMutationBytesObject.getDeleteMutationBytes(); - committedUpsertMutationCounter -= - uncommittedMutationBytesObject.getUpsertMutationCounter(); - committedDeleteMutationCounter -= - uncommittedMutationBytesObject.getDeleteMutationCounter(); - committedTotalMutationBytes -= - uncommittedMutationBytesObject.getTotalMutationBytes(); - } - // TODO: For V1, we don't expect mixed upserts and deletes so this is fine, - // but we may need to support it later, at which point we should segregate upsert - // mutation time vs delete mutation time - if (committedTotalMutationBytes > 0) { - upsertMutationCommitTime = - (long)Math.floor((double)(committedUpsertMutationBytes * mutationCommitTime)/ - committedTotalMutationBytes); - atomicUpsertMutationCommitTime = - (long)Math.floor((double)(committedAtomicUpsertMutationBytes * mutationCommitTime)/ - committedTotalMutationBytes); - deleteMutationCommitTime = - (long)Math.ceil((double)(committedDeleteMutationBytes * mutationCommitTime)/ - committedTotalMutationBytes); - } - return new MutationMetric(numMutations, - committedUpsertMutationBytes, - committedDeleteMutationBytes, - upsertMutationCommitTime, - atomicUpsertMutationCommitTime, - deleteMutationCommitTime, - 0, // num failed mutations have been counted already in updateMutationBatchFailureMetrics() - committedUpsertMutationCounter, - committedDeleteMutationCounter, - committedTotalMutationBytes, - numFailedPhase3Mutations, - 0, 0, 0, 0 ); - } - - private void filterIndexCheckerMutations(Map> mutationMap, - Map> unverifiedIndexMutations, - Map> verifiedOrDeletedIndexMutations) throws SQLException { - Iterator>> mapIter = mutationMap.entrySet().iterator(); - - while (mapIter.hasNext()) { - Entry> pair = mapIter.next(); - TableInfo tableInfo = pair.getKey(); - if (!tableInfo.getPTable().getType().equals(PTableType.INDEX)) { - continue; - } - PTable logicalTable = tableInfo.getPTable(); - if (tableInfo.getOrigTableRef().getTable().isImmutableRows() && - (this.indexRegionObserverEnabledAllTables || - IndexUtil.isGlobalIndexCheckerEnabled(connection, tableInfo.getHTableName())) + // remove each batch from the list once it gets applied + // so when failures happens for any batch we only start + // from that batch only instead of doing duplicate reply of already + // applied batches from entire list, also we can set + // REPLAY_ONLY_INDEX_WRITES for first batch + // only in case of 1121 SQLException + itrListMutation.remove(); + + batchCount++; + if (LOGGER.isDebugEnabled()) LOGGER.debug( + "Sent batch of " + mutationBatch.size() + " for " + Bytes.toString(htableName)); + } + child.stop(); + child.stop(); + shouldRetry = false; + numFailedMutations = 0; + + // Remove batches as we process them + removeMutations(this.mutationsMap, origTableRef); + if (tableInfo.isDataTable()) { + numRows -= numMutations; + // recalculate the estimated size + estimatedSize = + PhoenixKeyValueUtil.getEstimatedRowMutationSizeWithBatch(this.mutationsMap); + } + areAllBatchesSuccessful = true; + } catch (Exception e) { + long serverTimestamp = ClientUtil.parseServerTimestamp(e); + SQLException inferredE = ClientUtil.parseServerExceptionOrNull(e); + if (inferredE != null) { + if ( + shouldRetry && retryCount == 0 + && inferredE.getErrorCode() + == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode() ) { - - byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(logicalTable); - byte[] emptyCQ = EncodedColumnsUtil.getEmptyKeyValueInfo(logicalTable).getFirst(); - List mutations = pair.getValue(); - - for (Mutation m : mutations) { - if (m == null) { - continue; - } - if (m instanceof Delete) { - if (tableInfo.getOrigTableRef().getTable().getIndexType() - != PTable.IndexType.UNCOVERED_GLOBAL) { - Put put = new Put(m.getRow()); - put.addColumn(emptyCF, emptyCQ, IndexUtil.getMaxTimestamp(m), - QueryConstants.UNVERIFIED_BYTES); - // The Delete gets marked as unverified in Phase 1 and gets deleted on Phase 3. - addToMap(unverifiedIndexMutations, tableInfo, put); - } - // Uncovered indexes do not use two phase commit write and thus do not use - // the verified status stored in the empty column. The index rows are - // deleted only after their data table rows are deleted - addToMap(verifiedOrDeletedIndexMutations, tableInfo, m); - } else if (m instanceof Put) { - long timestamp = IndexUtil.getMaxTimestamp(m); - - // Phase 1 index mutations are set to unverified - // Send entire mutation with the unverified status - // Remove the empty column prepared by Index codec as we need to change its value - IndexUtil.removeEmptyColumn(m, emptyCF, emptyCQ); - ((Put) m).addColumn(emptyCF, emptyCQ, timestamp, - QueryConstants.UNVERIFIED_BYTES); - addToMap(unverifiedIndexMutations, tableInfo, m); - - // Phase 3 mutations are verified - // Uncovered indexes do not use two phase commit write. The index rows are - // updated only once and before their data table rows are updated. So - // index rows do not have phase-3 put mutations - if (tableInfo.getOrigTableRef().getTable().getIndexType() - != PTable.IndexType.UNCOVERED_GLOBAL) { - Put verifiedPut = new Put(m.getRow()); - verifiedPut.addColumn(emptyCF, emptyCQ, timestamp, - QueryConstants.VERIFIED_BYTES); - addToMap(verifiedOrDeletedIndexMutations, tableInfo, verifiedPut); - } - } else { - addToMap(unverifiedIndexMutations, tableInfo, m); + // Swallow this exception once, as it's possible that we split after sending the index + // metadata + // and one of the region servers doesn't have it. This will cause it to have it the + // next + // go around. + // If it fails again, we don't retry. + String msg = + "Swallowing exception and retrying after clearing meta cache on connection. " + + inferredE; + LOGGER.warn(LogUtil.addCustomAnnotations(msg, connection)); + connection.getQueryServices().clearTableRegionCache(TableName.valueOf(htableName)); + + // add a new child span as this one failed + child.addTimelineAnnotation(msg); + child.stop(); + child = Tracing.child(span, "Failed batch, attempting retry"); + + continue; + } else + if (inferredE.getErrorCode() == SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode()) { + iwe = PhoenixIndexFailurePolicyHelper.getIndexWriteException(inferredE); + if (iwe != null && !shouldRetryIndexedMutation) { + // For an index write failure, the data table write succeeded, + // so when we retry we need to set REPLAY_WRITES + // for first batch in list only. + for (Mutation m : mutationBatchList.get(0)) { + if (!PhoenixIndexMetaData.isIndexRebuild(m.getAttributesMap())) { + m.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, + BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES); } + PhoenixKeyValueUtil.setTimestamp(m, serverTimestamp); + } + shouldRetry = true; + shouldRetryIndexedMutation = true; + continue; } - - mapIter.remove(); - } - - } - } - - private void addToMap(Map> map, TableInfo tableInfo, Mutation mutation) { - List mutations = null; - if (map.containsKey(tableInfo)) { - mutations = map.get(tableInfo); - } else { - mutations = Lists.newArrayList(); - } - mutations.add(mutation); - map.put(tableInfo, mutations); - } - - /** - * - * Split the list of mutations into multiple lists. since a single row update can contain multiple mutations, - * we only check if the current batch has exceeded the row or size limit for different rows, - * so that mutations for a single row don't end up in different batches. - * - * @param allMutationList - * List of HBase mutations - * @return List of lists of mutations - */ - public static List> getMutationBatchList(long batchSize, long batchSizeBytes, List allMutationList) { - Preconditions.checkArgument(batchSize> 1, - "Mutation types are put or delete, for one row all mutations must be in one batch."); - Preconditions.checkArgument(batchSizeBytes > 0, "Batch size must be larger than 0"); - List> mutationBatchList = Lists.newArrayList(); - List currentList = Lists.newArrayList(); - List sameRowList = Lists.newArrayList(); - long currentBatchSizeBytes = 0L; - for (int i = 0; i < allMutationList.size(); ) { - long sameRowBatchSize = 1L; - Mutation mutation = allMutationList.get(i); - long sameRowMutationSizeBytes = PhoenixKeyValueUtil.calculateMutationDiskSize(mutation); - sameRowList.add(mutation); - while (i + 1 < allMutationList.size() && - Bytes.compareTo(allMutationList.get(i + 1).getRow(), mutation.getRow()) == 0) { - Mutation sameRowMutation = allMutationList.get(i + 1); - sameRowList.add(sameRowMutation); - sameRowMutationSizeBytes += PhoenixKeyValueUtil.calculateMutationDiskSize(sameRowMutation); - sameRowBatchSize++; - i++; + } + e = inferredE; + } + // Throw to client an exception that indicates the statements that + // were not committed successfully. + int[] uncommittedStatementIndexes = getUncommittedStatementIndexes(); + sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp); + + numFailedMutations = uncommittedStatementIndexes.length; + + if (isVerifiedPhase) { + numFailedPhase3Mutations = numFailedMutations; + GLOBAL_MUTATION_INDEX_COMMIT_FAILURE_COUNT.update(numFailedPhase3Mutations); + } + } finally { + mutationCommitTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime); + MutationMetric failureMutationMetrics = MutationMetric.EMPTY_METRIC; + if (!areAllBatchesSuccessful) { + failureMutationMetrics = updateMutationBatchFailureMetrics(currentMutationBatch, + htableNameStr, numFailedMutations, table.isTransactional()); + } + + MutationMetric committedMutationsMetric = + getCommittedMutationsMetric(totalMutationBytesObject, mutationBatchList, numMutations, + numFailedMutations, numFailedPhase3Mutations, mutationCommitTime); + // Combine failure mutation metrics with committed ones for the final picture + committedMutationsMetric.combineMetric(failureMutationMetrics); + mutationMetricQueue.addMetricsForTable(htableNameStr, committedMutationsMetric); + + if (allUpsertsMutations ^ allDeletesMutations) { + // success cases are updated for both cases autoCommit=true and conn.commit explicit + if (areAllBatchesSuccessful) { + TableMetricsManager.updateMetricsMethod(htableNameStr, + allUpsertsMutations + ? UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER + : DELETE_AGGREGATE_SUCCESS_SQL_COUNTER, + 1); } - - if (currentList.size() + sameRowBatchSize > batchSize || - currentBatchSizeBytes + sameRowMutationSizeBytes > batchSizeBytes) { - if (currentList.size() > 0) { - mutationBatchList.add(currentList); - currentList = Lists.newArrayList(); - currentBatchSizeBytes = 0L; - } + // Failures cases are updated only for conn.commit explicit case. + if (!areAllBatchesSuccessful && !connection.getAutoCommit()) { + TableMetricsManager.updateMetricsMethod(htableNameStr, + allUpsertsMutations + ? UPSERT_AGGREGATE_FAILURE_SQL_COUNTER + : DELETE_AGGREGATE_FAILURE_SQL_COUNTER, + 1); } - - currentList.addAll(sameRowList); - currentBatchSizeBytes += sameRowMutationSizeBytes; - sameRowList.clear(); - i++; - } - - if (currentList.size() > 0) { - mutationBatchList.add(currentList); - } - return mutationBatchList; - } - - public byte[] encodeTransaction() throws SQLException { - return phoenixTransactionContext.encodeTransaction(); - } - - private void addUncommittedStatementIndexes(Collection rowMutations) { - for (RowMutationState rowMutationState : rowMutations) { - uncommittedStatementIndexes = joinSortedIntArrays(uncommittedStatementIndexes, - rowMutationState.getStatementIndexes()); - } - } - - private int[] getUncommittedStatementIndexes() { - for (List batches : mutationsMap.values()) { - for (MultiRowMutationState rowMutationMap : batches) { - addUncommittedStatementIndexes(rowMutationMap.values()); + // Update size and latency histogram metrics. + TableMetricsManager.updateSizeHistogramMetricsForMutations(htableNameStr, + committedMutationsMetric.getTotalMutationsSizeBytes().getValue(), + allUpsertsMutations); + Long latency = timeInExecuteMutationMap.get(htableNameStr); + if (latency == null) { + latency = 0l; } - } - return uncommittedStatementIndexes; - } - - @Override - public void close() throws SQLException {} - - private void resetState() { - numRows = 0; - estimatedSize = 0; - this.mutationsMap.clear(); - phoenixTransactionContext = PhoenixTransactionContext.NULL_CONTEXT; - this.returnResult = null; - } - - private void resetTransactionalState() { - phoenixTransactionContext.reset(); - txMutations = Collections.emptyMap(); - uncommittedPhysicalNames.clear(); - uncommittedStatementIndexes = EMPTY_STATEMENT_INDEX_ARRAY; - } - - public void rollback() throws SQLException { - try { - phoenixTransactionContext.abort(); - } finally { - resetState(); - } - } - - public void commit() throws SQLException { - Map> txMutations = Collections.emptyMap(); - int retryCount = 0; - do { - boolean sendSuccessful = false; - boolean retryCommit = false; - SQLException sqlE = null; + latency += mutationCommitTime; + TableMetricsManager.updateLatencyHistogramForMutations(htableNameStr, latency, + allUpsertsMutations); + } + resetAllMutationState(); + + try { + if (cache != null) cache.close(); + } finally { try { - send(); - txMutations = this.txMutations; - sendSuccessful = true; - } catch (SQLException e) { - sqlE = e; - } finally { - try { - boolean finishSuccessful = false; - try { - if (sendSuccessful) { - phoenixTransactionContext.commit(); - finishSuccessful = true; - } - } catch (SQLException e) { - if (LOGGER.isInfoEnabled()) - LOGGER.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer() - + " with retry count of " + retryCount); - retryCommit = (e.getErrorCode() == SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION - .getErrorCode() && retryCount < MAX_COMMIT_RETRIES); - if (sqlE == null) { - sqlE = e; - } else { - sqlE.setNextException(e); - } - } finally { - // If send fails or finish fails, abort the tx - if (!finishSuccessful) { - try { - phoenixTransactionContext.abort(); - if (LOGGER.isInfoEnabled()) LOGGER.info("Abort successful"); - } catch (SQLException e) { - if (LOGGER.isInfoEnabled()) LOGGER.info("Abort failed with " + e); - if (sqlE == null) { - sqlE = e; - } else { - sqlE.setNextException(e); - } - } - } - } - } finally { - TransactionFactory.Provider provider = phoenixTransactionContext.getProvider(); - try { - resetState(); - } finally { - if (retryCommit) { - startTransaction(provider); - // Add back read fences - Set txTableRefs = txMutations.keySet(); - for (TableRef tableRef : txTableRefs) { - PTable dataTable = tableRef.getTable(); - phoenixTransactionContext.markDMLFence(dataTable); - } - try { - // Only retry if an index was added - retryCommit = shouldResubmitTransaction(txTableRefs); - } catch (SQLException e) { - retryCommit = false; - if (sqlE == null) { - sqlE = e; - } else { - sqlE.setNextException(e); - } - } - } - if (sqlE != null && !retryCommit) { throw sqlE; } - } - } + hTable.close(); + } catch (IOException e) { + if (sqlE != null) { + sqlE.setNextException(ClientUtil.parseServerException(e)); + } else { + sqlE = ClientUtil.parseServerException(e); + } } - // Retry commit once if conflict occurred and index was added - if (!retryCommit) { - break; + if (sqlE != null) { + throw sqlE; } - retryCount++; - mutationsMap.putAll(txMutations); - } while (true); - } - - /** - * Determines whether indexes were added to mutated tables while the transaction was in progress. - * - * @return true if indexes were added and false otherwise. - * @throws SQLException - */ - private boolean shouldResubmitTransaction(Set txTableRefs) throws SQLException { - if (LOGGER.isInfoEnabled()) LOGGER.info("Checking for index updates as of " + getInitialWritePointer()); - MetaDataClient client = new MetaDataClient(connection); - PMetaData cache = connection.getMetaDataCache(); - boolean addedAnyIndexes = false; - boolean allImmutableTables = !txTableRefs.isEmpty(); - PTable dataTable = null; + } + } + } while (shouldRetry && retryCount++ < 1); + } + } + + /** + * Update metrics related to failed mutations + * @param failedMutationBatch the batch of mutations that failed + * @param tableName table that was to be mutated + * @param numFailedMutations total number of failed mutations + * @param isTransactional true if the table is transactional + */ + public static MutationMetricQueue.MutationMetric updateMutationBatchFailureMetrics( + List failedMutationBatch, String tableName, long numFailedMutations, + boolean isTransactional) { + + if ( + failedMutationBatch == null || failedMutationBatch.isEmpty() + || Strings.isNullOrEmpty(tableName) + ) { + return MutationMetricQueue.MutationMetric.EMPTY_METRIC; + } + + long numUpsertMutationsInBatch = 0L; + long numDeleteMutationsInBatch = 0L; + + for (Mutation m : failedMutationBatch) { + if (m instanceof Put) { + numUpsertMutationsInBatch++; + } else if (m instanceof Delete) { + numDeleteMutationsInBatch++; + } + } + + long totalFailedMutation = numUpsertMutationsInBatch + numDeleteMutationsInBatch; + // this case should not happen but the if condition makes sense if this ever happens + if (totalFailedMutation < numFailedMutations) { + LOGGER.warn("total failed mutation less than num of failed mutation. This is not expected."); + totalFailedMutation = numFailedMutations; + } + + long totalNumFailedMutations = + allDeletesMutations && !isTransactional ? numDeleteMutationsInBatch : totalFailedMutation; + GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(totalNumFailedMutations); + + // Update the MUTATION_BATCH_FAILED_SIZE counter with the number of failed delete mutations + // in case we are dealing with all deletes for a non-transactional table, since there is a + // bug in sendMutations where we don't get the correct value for numFailedMutations when + // we don't use transactions + return new MutationMetricQueue.MutationMetric(0, 0, 0, 0, 0, 0, totalNumFailedMutations, 0, 0, + 0, 0, numUpsertMutationsInBatch, allUpsertsMutations ? 1 : 0, numDeleteMutationsInBatch, + allDeletesMutations ? 1 : 0); + } + + /** + * Get mutation metrics that correspond to committed mutations only + * @param totalMutationBytesObject MutationBytes object corresponding to all the mutations we + * attempted to commit including those that failed, those that + * were already sent and those that were unsent + * @param unsentMutationBatchList list of mutation batches that are unsent + * @param numMutations total number of mutations + * @param numFailedMutations number of failed mutations in the most recent failed batch + * @param numFailedPhase3Mutations number of mutations failed in phase 3 of index commits + * @param mutationCommitTime time taken for committing all mutations + * @return mutation metric object just accounting for mutations that are already successfully + * committed + */ + static MutationMetric getCommittedMutationsMetric(MutationBytes totalMutationBytesObject, + List> unsentMutationBatchList, long numMutations, long numFailedMutations, + long numFailedPhase3Mutations, long mutationCommitTime) { + long committedUpsertMutationBytes = + totalMutationBytesObject == null ? 0 : totalMutationBytesObject.getUpsertMutationBytes(); + long committedAtomicUpsertMutationBytes = totalMutationBytesObject == null + ? 0 + : totalMutationBytesObject.getAtomicUpsertMutationBytes(); + long committedDeleteMutationBytes = + totalMutationBytesObject == null ? 0 : totalMutationBytesObject.getDeleteMutationBytes(); + long committedUpsertMutationCounter = + totalMutationBytesObject == null ? 0 : totalMutationBytesObject.getUpsertMutationCounter(); + long committedDeleteMutationCounter = + totalMutationBytesObject == null ? 0 : totalMutationBytesObject.getDeleteMutationCounter(); + long committedTotalMutationBytes = + totalMutationBytesObject == null ? 0 : totalMutationBytesObject.getTotalMutationBytes(); + long upsertMutationCommitTime = 0L; + long atomicUpsertMutationCommitTime = 0L; + long deleteMutationCommitTime = 0L; + + if (totalMutationBytesObject != null && numFailedMutations != 0) { + List uncommittedMutationsList = new ArrayList<>(); + for (List mutationBatch : unsentMutationBatchList) { + uncommittedMutationsList.addAll(mutationBatch); + } + // Calculate the uncommitted mutations + MutationBytes uncommittedMutationBytesObject = + calculateMutationSize(uncommittedMutationsList, false); + committedUpsertMutationBytes -= uncommittedMutationBytesObject.getUpsertMutationBytes(); + committedAtomicUpsertMutationBytes -= + uncommittedMutationBytesObject.getAtomicUpsertMutationBytes(); + committedDeleteMutationBytes -= uncommittedMutationBytesObject.getDeleteMutationBytes(); + committedUpsertMutationCounter -= uncommittedMutationBytesObject.getUpsertMutationCounter(); + committedDeleteMutationCounter -= uncommittedMutationBytesObject.getDeleteMutationCounter(); + committedTotalMutationBytes -= uncommittedMutationBytesObject.getTotalMutationBytes(); + } + + // TODO: For V1, we don't expect mixed upserts and deletes so this is fine, + // but we may need to support it later, at which point we should segregate upsert + // mutation time vs delete mutation time + if (committedTotalMutationBytes > 0) { + upsertMutationCommitTime = (long) Math.floor( + (double) (committedUpsertMutationBytes * mutationCommitTime) / committedTotalMutationBytes); + atomicUpsertMutationCommitTime = + (long) Math.floor((double) (committedAtomicUpsertMutationBytes * mutationCommitTime) + / committedTotalMutationBytes); + deleteMutationCommitTime = (long) Math.ceil( + (double) (committedDeleteMutationBytes * mutationCommitTime) / committedTotalMutationBytes); + } + return new MutationMetric(numMutations, committedUpsertMutationBytes, + committedDeleteMutationBytes, upsertMutationCommitTime, atomicUpsertMutationCommitTime, + deleteMutationCommitTime, 0, // num failed mutations have been counted already in + // updateMutationBatchFailureMetrics() + committedUpsertMutationCounter, committedDeleteMutationCounter, committedTotalMutationBytes, + numFailedPhase3Mutations, 0, 0, 0, 0); + } + + private void filterIndexCheckerMutations(Map> mutationMap, + Map> unverifiedIndexMutations, + Map> verifiedOrDeletedIndexMutations) throws SQLException { + Iterator>> mapIter = mutationMap.entrySet().iterator(); + + while (mapIter.hasNext()) { + Entry> pair = mapIter.next(); + TableInfo tableInfo = pair.getKey(); + if (!tableInfo.getPTable().getType().equals(PTableType.INDEX)) { + continue; + } + PTable logicalTable = tableInfo.getPTable(); + if ( + tableInfo.getOrigTableRef().getTable().isImmutableRows() + && (this.indexRegionObserverEnabledAllTables + || IndexUtil.isGlobalIndexCheckerEnabled(connection, tableInfo.getHTableName())) + ) { + + byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(logicalTable); + byte[] emptyCQ = EncodedColumnsUtil.getEmptyKeyValueInfo(logicalTable).getFirst(); + List mutations = pair.getValue(); + + for (Mutation m : mutations) { + if (m == null) { + continue; + } + if (m instanceof Delete) { + if ( + tableInfo.getOrigTableRef().getTable().getIndexType() + != PTable.IndexType.UNCOVERED_GLOBAL + ) { + Put put = new Put(m.getRow()); + put.addColumn(emptyCF, emptyCQ, IndexUtil.getMaxTimestamp(m), + QueryConstants.UNVERIFIED_BYTES); + // The Delete gets marked as unverified in Phase 1 and gets deleted on Phase 3. + addToMap(unverifiedIndexMutations, tableInfo, put); + } + // Uncovered indexes do not use two phase commit write and thus do not use + // the verified status stored in the empty column. The index rows are + // deleted only after their data table rows are deleted + addToMap(verifiedOrDeletedIndexMutations, tableInfo, m); + } else if (m instanceof Put) { + long timestamp = IndexUtil.getMaxTimestamp(m); + + // Phase 1 index mutations are set to unverified + // Send entire mutation with the unverified status + // Remove the empty column prepared by Index codec as we need to change its value + IndexUtil.removeEmptyColumn(m, emptyCF, emptyCQ); + ((Put) m).addColumn(emptyCF, emptyCQ, timestamp, QueryConstants.UNVERIFIED_BYTES); + addToMap(unverifiedIndexMutations, tableInfo, m); + + // Phase 3 mutations are verified + // Uncovered indexes do not use two phase commit write. The index rows are + // updated only once and before their data table rows are updated. So + // index rows do not have phase-3 put mutations + if ( + tableInfo.getOrigTableRef().getTable().getIndexType() + != PTable.IndexType.UNCOVERED_GLOBAL + ) { + Put verifiedPut = new Put(m.getRow()); + verifiedPut.addColumn(emptyCF, emptyCQ, timestamp, QueryConstants.VERIFIED_BYTES); + addToMap(verifiedOrDeletedIndexMutations, tableInfo, verifiedPut); + } + } else { + addToMap(unverifiedIndexMutations, tableInfo, m); + } + } + + mapIter.remove(); + } + + } + } + + private void addToMap(Map> map, TableInfo tableInfo, + Mutation mutation) { + List mutations = null; + if (map.containsKey(tableInfo)) { + mutations = map.get(tableInfo); + } else { + mutations = Lists.newArrayList(); + } + mutations.add(mutation); + map.put(tableInfo, mutations); + } + + /** + * Split the list of mutations into multiple lists. since a single row update can contain multiple + * mutations, we only check if the current batch has exceeded the row or size limit for different + * rows, so that mutations for a single row don't end up in different batches. List of HBase + * mutations + * @return List of lists of mutations + */ + public static List> getMutationBatchList(long batchSize, long batchSizeBytes, + List allMutationList) { + Preconditions.checkArgument(batchSize > 1, + "Mutation types are put or delete, for one row all mutations must be in one batch."); + Preconditions.checkArgument(batchSizeBytes > 0, "Batch size must be larger than 0"); + List> mutationBatchList = Lists.newArrayList(); + List currentList = Lists.newArrayList(); + List sameRowList = Lists.newArrayList(); + long currentBatchSizeBytes = 0L; + for (int i = 0; i < allMutationList.size();) { + long sameRowBatchSize = 1L; + Mutation mutation = allMutationList.get(i); + long sameRowMutationSizeBytes = PhoenixKeyValueUtil.calculateMutationDiskSize(mutation); + sameRowList.add(mutation); + while ( + i + 1 < allMutationList.size() + && Bytes.compareTo(allMutationList.get(i + 1).getRow(), mutation.getRow()) == 0 + ) { + Mutation sameRowMutation = allMutationList.get(i + 1); + sameRowList.add(sameRowMutation); + sameRowMutationSizeBytes += PhoenixKeyValueUtil.calculateMutationDiskSize(sameRowMutation); + sameRowBatchSize++; + i++; + } + + if ( + currentList.size() + sameRowBatchSize > batchSize + || currentBatchSizeBytes + sameRowMutationSizeBytes > batchSizeBytes + ) { + if (currentList.size() > 0) { + mutationBatchList.add(currentList); + currentList = Lists.newArrayList(); + currentBatchSizeBytes = 0L; + } + } + + currentList.addAll(sameRowList); + currentBatchSizeBytes += sameRowMutationSizeBytes; + sameRowList.clear(); + i++; + } + + if (currentList.size() > 0) { + mutationBatchList.add(currentList); + } + return mutationBatchList; + } + + public byte[] encodeTransaction() throws SQLException { + return phoenixTransactionContext.encodeTransaction(); + } + + private void addUncommittedStatementIndexes(Collection rowMutations) { + for (RowMutationState rowMutationState : rowMutations) { + uncommittedStatementIndexes = + joinSortedIntArrays(uncommittedStatementIndexes, rowMutationState.getStatementIndexes()); + } + } + + private int[] getUncommittedStatementIndexes() { + for (List batches : mutationsMap.values()) { + for (MultiRowMutationState rowMutationMap : batches) { + addUncommittedStatementIndexes(rowMutationMap.values()); + } + } + return uncommittedStatementIndexes; + } + + @Override + public void close() throws SQLException { + } + + private void resetState() { + numRows = 0; + estimatedSize = 0; + this.mutationsMap.clear(); + phoenixTransactionContext = PhoenixTransactionContext.NULL_CONTEXT; + this.returnResult = null; + } + + private void resetTransactionalState() { + phoenixTransactionContext.reset(); + txMutations = Collections.emptyMap(); + uncommittedPhysicalNames.clear(); + uncommittedStatementIndexes = EMPTY_STATEMENT_INDEX_ARRAY; + } + + public void rollback() throws SQLException { + try { + phoenixTransactionContext.abort(); + } finally { + resetState(); + } + } + + public void commit() throws SQLException { + Map> txMutations = Collections.emptyMap(); + int retryCount = 0; + do { + boolean sendSuccessful = false; + boolean retryCommit = false; + SQLException sqlE = null; + try { + send(); + txMutations = this.txMutations; + sendSuccessful = true; + } catch (SQLException e) { + sqlE = e; + } finally { try { - for (TableRef tableRef : txTableRefs) { - dataTable = tableRef.getTable(); - List oldIndexes; - PTableRef ptableRef = cache.getTableRef(dataTable.getKey()); - oldIndexes = ptableRef.getTable().getIndexes(); - // Always check at server for metadata change, as it's possible that the table is configured to not check - // for metadata changes - // but in this case, the tx manager is telling us it's likely that there has been a change. - MetaDataMutationResult result = client.updateCache(dataTable.getTenantId(), dataTable.getSchemaName() - .getString(), dataTable.getTableName().getString(), true); - long timestamp = TransactionUtil.getResolvedTime(connection, result); - tableRef.setTimeStamp(timestamp); - PTable updatedDataTable = result.getTable(); - if (updatedDataTable == null) { - throw new TableNotFoundException(dataTable.getSchemaName().getString(), - dataTable.getTableName().getString()); + boolean finishSuccessful = false; + try { + if (sendSuccessful) { + phoenixTransactionContext.commit(); + finishSuccessful = true; + } + } catch (SQLException e) { + if (LOGGER.isInfoEnabled()) LOGGER.info(e.getClass().getName() + " at timestamp " + + getInitialWritePointer() + " with retry count of " + retryCount); + retryCommit = + (e.getErrorCode() == SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode() + && retryCount < MAX_COMMIT_RETRIES); + if (sqlE == null) { + sqlE = e; + } else { + sqlE.setNextException(e); + } + } finally { + // If send fails or finish fails, abort the tx + if (!finishSuccessful) { + try { + phoenixTransactionContext.abort(); + if (LOGGER.isInfoEnabled()) LOGGER.info("Abort successful"); + } catch (SQLException e) { + if (LOGGER.isInfoEnabled()) LOGGER.info("Abort failed with " + e); + if (sqlE == null) { + sqlE = e; + } else { + sqlE.setNextException(e); } - allImmutableTables &= updatedDataTable.isImmutableRows(); - tableRef.setTable(updatedDataTable); - if (!addedAnyIndexes) { - // TODO: in theory we should do a deep equals check here, as it's possible - // that an index was dropped and recreated with the same name but different - // indexed/covered columns. - addedAnyIndexes = (!oldIndexes.equals(updatedDataTable.getIndexes())); - if (LOGGER.isInfoEnabled()) - LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to " - + updatedDataTable.getName().getString() + " with indexes " + updatedDataTable.getIndexes()); + } + } + } + } finally { + TransactionFactory.Provider provider = phoenixTransactionContext.getProvider(); + try { + resetState(); + } finally { + if (retryCommit) { + startTransaction(provider); + // Add back read fences + Set txTableRefs = txMutations.keySet(); + for (TableRef tableRef : txTableRefs) { + PTable dataTable = tableRef.getTable(); + phoenixTransactionContext.markDMLFence(dataTable); + } + try { + // Only retry if an index was added + retryCommit = shouldResubmitTransaction(txTableRefs); + } catch (SQLException e) { + retryCommit = false; + if (sqlE == null) { + sqlE = e; + } else { + sqlE.setNextException(e); } + } } - if (LOGGER.isInfoEnabled()) - LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer() - + " over " + (allImmutableTables ? " all immutable tables" : " some mutable tables")); - // If all tables are immutable, we know the conflict we got was due to our DDL/DML fence. - // If any indexes were added, then the conflict might be due to DDL/DML fence. - return allImmutableTables || addedAnyIndexes; - } catch (Throwable e) { - if (dataTable != null) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod( - dataTable.getTableName().toString(), NUM_METADATA_LOOKUP_FAILURES, 1); + if (sqlE != null && !retryCommit) { + throw sqlE; } - throw e; - } + } + } + } + // Retry commit once if conflict occurred and index was added + if (!retryCommit) { + break; + } + retryCount++; + mutationsMap.putAll(txMutations); + } while (true); + } + + /** + * Determines whether indexes were added to mutated tables while the transaction was in progress. + * @return true if indexes were added and false otherwise. + */ + private boolean shouldResubmitTransaction(Set txTableRefs) throws SQLException { + if (LOGGER.isInfoEnabled()) + LOGGER.info("Checking for index updates as of " + getInitialWritePointer()); + MetaDataClient client = new MetaDataClient(connection); + PMetaData cache = connection.getMetaDataCache(); + boolean addedAnyIndexes = false; + boolean allImmutableTables = !txTableRefs.isEmpty(); + PTable dataTable = null; + try { + for (TableRef tableRef : txTableRefs) { + dataTable = tableRef.getTable(); + List oldIndexes; + PTableRef ptableRef = cache.getTableRef(dataTable.getKey()); + oldIndexes = ptableRef.getTable().getIndexes(); + // Always check at server for metadata change, as it's possible that the table is configured + // to not check + // for metadata changes + // but in this case, the tx manager is telling us it's likely that there has been a change. + MetaDataMutationResult result = client.updateCache(dataTable.getTenantId(), + dataTable.getSchemaName().getString(), dataTable.getTableName().getString(), true); + long timestamp = TransactionUtil.getResolvedTime(connection, result); + tableRef.setTimeStamp(timestamp); + PTable updatedDataTable = result.getTable(); + if (updatedDataTable == null) { + throw new TableNotFoundException(dataTable.getSchemaName().getString(), + dataTable.getTableName().getString()); + } + allImmutableTables &= updatedDataTable.isImmutableRows(); + tableRef.setTable(updatedDataTable); + if (!addedAnyIndexes) { + // TODO: in theory we should do a deep equals check here, as it's possible + // that an index was dropped and recreated with the same name but different + // indexed/covered columns. + addedAnyIndexes = (!oldIndexes.equals(updatedDataTable.getIndexes())); + if ( + LOGGER.isInfoEnabled() + ) LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + + " to " + updatedDataTable.getName().getString() + " with indexes " + + updatedDataTable.getIndexes()); + } + } + if ( + LOGGER.isInfoEnabled() + ) LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + + getInitialWritePointer() + " over " + + (allImmutableTables ? " all immutable tables" : " some mutable tables")); + // If all tables are immutable, we know the conflict we got was due to our DDL/DML fence. + // If any indexes were added, then the conflict might be due to DDL/DML fence. + return allImmutableTables || addedAnyIndexes; + } catch (Throwable e) { + if (dataTable != null) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod( + dataTable.getTableName().toString(), NUM_METADATA_LOOKUP_FAILURES, 1); + } + throw e; + } + } + + /** + * Send to HBase any uncommitted data for transactional tables. + * @return true if any data was sent and false otherwise. + */ + public boolean sendUncommitted() throws SQLException { + return sendUncommitted(mutationsMap.keySet().iterator()); + } + + /** + * Support read-your-own-write semantics by sending uncommitted data to HBase prior to running a + * query. In this way, they are visible to subsequent reads but are not actually committed until + * commit is called. + * @return true if any data was sent and false otherwise. + */ + public boolean sendUncommitted(Iterator tableRefs) throws SQLException { + + if (phoenixTransactionContext.isTransactionRunning()) { + // Initialize visibility so that transactions see their own writes. + // The checkpoint() method will set it to not see writes if necessary. + phoenixTransactionContext.setVisibilityLevel(PhoenixVisibilityLevel.SNAPSHOT); + } + + Iterator filteredTableRefs = Iterators.filter(tableRefs, new Predicate() { + @Override + public boolean apply(TableRef tableRef) { + return tableRef.getTable().isTransactional(); + } + }); + if (filteredTableRefs.hasNext()) { + // FIXME: strip table alias to prevent equality check from failing due to alias mismatch on + // null alias. + // We really should be keying the tables based on the physical table name. + List strippedAliases = + Lists.newArrayListWithExpectedSize(mutationsMap.keySet().size()); + while (filteredTableRefs.hasNext()) { + TableRef tableRef = filteredTableRefs.next(); + // REVIEW: unclear if we need this given we start transactions when resolving a table + if (tableRef.getTable().isTransactional()) { + startTransaction(tableRef.getTable().getTransactionProvider()); + } + strippedAliases.add(new TableRef(null, tableRef.getTable(), tableRef.getTimeStamp(), + tableRef.getLowerBoundTimeStamp(), tableRef.hasDynamicCols())); + } + send(strippedAliases.iterator()); + return true; + } + return false; + } + + public void send() throws SQLException { + send(null); + } + + public static int[] joinSortedIntArrays(int[] a, int[] b) { + int[] result = new int[a.length + b.length]; + int i = 0, j = 0, k = 0, current; + while (i < a.length && j < b.length) { + current = a[i] < b[j] ? a[i++] : b[j++]; + for (; i < a.length && a[i] == current; i++) + ; + for (; j < b.length && b[j] == current; j++) + ; + result[k++] = current; + } + while (i < a.length) { + for (current = a[i++]; i < a.length && a[i] == current; i++) + ; + result[k++] = current; + } + while (j < b.length) { + for (current = b[j++]; j < b.length && b[j] == current; j++) + ; + result[k++] = current; + } + return Arrays.copyOf(result, k); + } + + @Immutable + public static class RowTimestampColInfo { + private final boolean useServerTimestamp; + private final Long rowTimestamp; + + public static final RowTimestampColInfo NULL_ROWTIMESTAMP_INFO = + new RowTimestampColInfo(false, null); + + public RowTimestampColInfo(boolean autoGenerate, Long value) { + this.useServerTimestamp = autoGenerate; + this.rowTimestamp = value; + } + + public boolean useServerTimestamp() { + return useServerTimestamp; + } + + public Long getTimestamp() { + return rowTimestamp; } - - /** - * Send to HBase any uncommitted data for transactional tables. - * - * @return true if any data was sent and false otherwise. - * @throws SQLException - */ - public boolean sendUncommitted() throws SQLException { - return sendUncommitted(mutationsMap.keySet().iterator()); + } + + public static class MultiRowMutationState { + private Map rowKeyToRowMutationState; + private long estimatedSize; + + public MultiRowMutationState(int size) { + this.rowKeyToRowMutationState = Maps.newHashMapWithExpectedSize(size); + this.estimatedSize = 0; } - /** - * Support read-your-own-write semantics by sending uncommitted data to HBase prior to running a query. In this way, - * they are visible to subsequent reads but are not actually committed until commit is called. - * - * @param tableRefs - * @return true if any data was sent and false otherwise. - * @throws SQLException - */ - public boolean sendUncommitted(Iterator tableRefs) throws SQLException { - - if (phoenixTransactionContext.isTransactionRunning()) { - // Initialize visibility so that transactions see their own writes. - // The checkpoint() method will set it to not see writes if necessary. - phoenixTransactionContext.setVisibilityLevel(PhoenixVisibilityLevel.SNAPSHOT); - } - - Iterator filteredTableRefs = Iterators.filter(tableRefs, new Predicate() { - @Override - public boolean apply(TableRef tableRef) { - return tableRef.getTable().isTransactional(); - } - }); - if (filteredTableRefs.hasNext()) { - // FIXME: strip table alias to prevent equality check from failing due to alias mismatch on null alias. - // We really should be keying the tables based on the physical table name. - List strippedAliases = Lists.newArrayListWithExpectedSize(mutationsMap.keySet().size()); - while (filteredTableRefs.hasNext()) { - TableRef tableRef = filteredTableRefs.next(); - // REVIEW: unclear if we need this given we start transactions when resolving a table - if (tableRef.getTable().isTransactional()) { - startTransaction(tableRef.getTable().getTransactionProvider()); - } - strippedAliases.add(new TableRef(null, tableRef.getTable(), tableRef.getTimeStamp(), tableRef - .getLowerBoundTimeStamp(), tableRef.hasDynamicCols())); - } - send(strippedAliases.iterator()); - return true; - } - return false; + public RowMutationState put(ImmutableBytesPtr ptr, RowMutationState rowMutationState) { + estimatedSize += rowMutationState.calculateEstimatedSize(); + return rowKeyToRowMutationState.put(ptr, rowMutationState); } - public void send() throws SQLException { - send(null); + public RowMutationState get(ImmutableBytesPtr ptr) { + return rowKeyToRowMutationState.get(ptr); } - public static int[] joinSortedIntArrays(int[] a, int[] b) { - int[] result = new int[a.length + b.length]; - int i = 0, j = 0, k = 0, current; - while (i < a.length && j < b.length) { - current = a[i] < b[j] ? a[i++] : b[j++]; - for (; i < a.length && a[i] == current; i++) - ; - for (; j < b.length && b[j] == current; j++) - ; - result[k++] = current; - } - while (i < a.length) { - for (current = a[i++]; i < a.length && a[i] == current; i++) - ; - result[k++] = current; - } - while (j < b.length) { - for (current = b[j++]; j < b.length && b[j] == current; j++) - ; - result[k++] = current; - } - return Arrays.copyOf(result, k); + public void putAll(MultiRowMutationState other) { + estimatedSize += other.estimatedSize; + rowKeyToRowMutationState.putAll(other.rowKeyToRowMutationState); } - @Immutable - public static class RowTimestampColInfo { - private final boolean useServerTimestamp; - private final Long rowTimestamp; - - public static final RowTimestampColInfo NULL_ROWTIMESTAMP_INFO = new RowTimestampColInfo(false, null); - - public RowTimestampColInfo(boolean autoGenerate, Long value) { - this.useServerTimestamp = autoGenerate; - this.rowTimestamp = value; - } - - public boolean useServerTimestamp() { - return useServerTimestamp; - } - - public Long getTimestamp() { - return rowTimestamp; - } + public boolean isEmpty() { + return rowKeyToRowMutationState.isEmpty(); } - public static class MultiRowMutationState { - private Map rowKeyToRowMutationState; - private long estimatedSize; - - public MultiRowMutationState(int size) { - this.rowKeyToRowMutationState = Maps.newHashMapWithExpectedSize(size); - this.estimatedSize = 0; - } - - public RowMutationState put(ImmutableBytesPtr ptr, RowMutationState rowMutationState) { - estimatedSize += rowMutationState.calculateEstimatedSize(); - return rowKeyToRowMutationState.put(ptr, rowMutationState); - } - - public RowMutationState get(ImmutableBytesPtr ptr) { - return rowKeyToRowMutationState.get(ptr); - } - - public void putAll(MultiRowMutationState other) { - estimatedSize += other.estimatedSize; - rowKeyToRowMutationState.putAll(other.rowKeyToRowMutationState); - } - - public boolean isEmpty() { - return rowKeyToRowMutationState.isEmpty(); - } - - public int size() { - return rowKeyToRowMutationState.size(); - } - - public Set> entrySet() { - return rowKeyToRowMutationState.entrySet(); - } - - public void clear() { - rowKeyToRowMutationState.clear(); - estimatedSize = 0; - } - - public Collection values() { - return rowKeyToRowMutationState.values(); - } + public int size() { + return rowKeyToRowMutationState.size(); } - public static class RowMutationState { - @Nonnull - private Map columnValues; - private int[] statementIndexes; - @Nonnull - private final RowTimestampColInfo rowTsColInfo; - private byte[] onDupKeyBytes; - private long colValuesSize; - - public RowMutationState(@Nonnull Map columnValues, long colValuesSize, int statementIndex, - @Nonnull RowTimestampColInfo rowTsColInfo, byte[] onDupKeyBytes) { - checkNotNull(columnValues); - checkNotNull(rowTsColInfo); - this.columnValues = columnValues; - this.statementIndexes = new int[] { statementIndex }; - this.rowTsColInfo = rowTsColInfo; - this.onDupKeyBytes = onDupKeyBytes; - this.colValuesSize = colValuesSize; - } - - public long calculateEstimatedSize() { - return colValuesSize + statementIndexes.length * SizedUtil.INT_SIZE + SizedUtil.LONG_SIZE - + (onDupKeyBytes != null ? onDupKeyBytes.length : 0); - } - - byte[] getOnDupKeyBytes() { - return onDupKeyBytes; - } - - public Map getColumnValues() { - return columnValues; - } - - int[] getStatementIndexes() { - return statementIndexes; - } - - /** - * Join the newRow with the current row if it doesn't conflict with it. - * A regular upsert conflicts with a conditional upsert - * @param newRow - * @return True if the rows were successfully joined else False - */ - boolean join(RowMutationState newRow) { - if (isConflicting(newRow)) { - return false; - } - // If we already have a row and the new row has an ON DUPLICATE KEY clause - // ignore the new values (as that's what the server will do). - if (newRow.onDupKeyBytes == null) { - // increment the column value size by the new row column value size - colValuesSize += newRow.colValuesSize; - for (Map.Entry entry : newRow.columnValues.entrySet()) { - PColumn col = entry.getKey(); - byte[] oldValue = columnValues.put(col, entry.getValue()); - if (oldValue != null) { - // decrement column value size by the size of all column values that were replaced - colValuesSize -= (col.getEstimatedSize() + oldValue.length); - } - } - } - // Concatenate ON DUPLICATE KEY bytes to allow multiple - // increments of the same row in the same commit batch. - this.onDupKeyBytes = PhoenixIndexBuilderHelper.combineOnDupKey(this.onDupKeyBytes, newRow.onDupKeyBytes); - statementIndexes = joinSortedIntArrays(statementIndexes, newRow.getStatementIndexes()); - return true; - } - - @Nonnull - RowTimestampColInfo getRowTimestampColInfo() { - return rowTsColInfo; - } - - public boolean isConflicting(RowMutationState newRowMutationState) { - return (this.onDupKeyBytes != null && newRowMutationState.onDupKeyBytes == null || - this.onDupKeyBytes == null && newRowMutationState.onDupKeyBytes != null); - } + public Set> entrySet() { + return rowKeyToRowMutationState.entrySet(); } - public ReadMetricQueue getReadMetricQueue() { - return readMetricQueue; + public void clear() { + rowKeyToRowMutationState.clear(); + estimatedSize = 0; } - public void setReadMetricQueue(ReadMetricQueue readMetricQueue) { - this.readMetricQueue = readMetricQueue; + public Collection values() { + return rowKeyToRowMutationState.values(); } + } - public MutationMetricQueue getMutationMetricQueue() { - return mutationMetricQueue; + public static class RowMutationState { + @Nonnull + private Map columnValues; + private int[] statementIndexes; + @Nonnull + private final RowTimestampColInfo rowTsColInfo; + private byte[] onDupKeyBytes; + private long colValuesSize; + + public RowMutationState(@Nonnull Map columnValues, long colValuesSize, + int statementIndex, @Nonnull RowTimestampColInfo rowTsColInfo, byte[] onDupKeyBytes) { + checkNotNull(columnValues); + checkNotNull(rowTsColInfo); + this.columnValues = columnValues; + this.statementIndexes = new int[] { statementIndex }; + this.rowTsColInfo = rowTsColInfo; + this.onDupKeyBytes = onDupKeyBytes; + this.colValuesSize = colValuesSize; } - public void addExecuteMutationTime(long time, String tableName) { - Long timeSpent = timeInExecuteMutationMap.get(tableName); - if (timeSpent == null) { - timeSpent = 0l; - } - timeSpent += time; - timeInExecuteMutationMap.put(tableName, timeSpent); + public long calculateEstimatedSize() { + return colValuesSize + statementIndexes.length * SizedUtil.INT_SIZE + SizedUtil.LONG_SIZE + + (onDupKeyBytes != null ? onDupKeyBytes.length : 0); } - public void resetExecuteMutationTimeMap() { - timeInExecuteMutationMap.clear(); + byte[] getOnDupKeyBytes() { + return onDupKeyBytes; } - public boolean isEmpty() { - return mutationsMap != null ? mutationsMap.isEmpty() : true; + public Map getColumnValues() { + return columnValues; } - public enum ReturnResult { - ROW + int[] getStatementIndexes() { + return statementIndexes; } + /** + * Join the newRow with the current row if it doesn't conflict with it. A regular upsert + * conflicts with a conditional upsert + * @return True if the rows were successfully joined else False + */ + boolean join(RowMutationState newRow) { + if (isConflicting(newRow)) { + return false; + } + // If we already have a row and the new row has an ON DUPLICATE KEY clause + // ignore the new values (as that's what the server will do). + if (newRow.onDupKeyBytes == null) { + // increment the column value size by the new row column value size + colValuesSize += newRow.colValuesSize; + for (Map.Entry entry : newRow.columnValues.entrySet()) { + PColumn col = entry.getKey(); + byte[] oldValue = columnValues.put(col, entry.getValue()); + if (oldValue != null) { + // decrement column value size by the size of all column values that were replaced + colValuesSize -= (col.getEstimatedSize() + oldValue.length); + } + } + } + // Concatenate ON DUPLICATE KEY bytes to allow multiple + // increments of the same row in the same commit batch. + this.onDupKeyBytes = + PhoenixIndexBuilderHelper.combineOnDupKey(this.onDupKeyBytes, newRow.onDupKeyBytes); + statementIndexes = joinSortedIntArrays(statementIndexes, newRow.getStatementIndexes()); + return true; + } + + @Nonnull + RowTimestampColInfo getRowTimestampColInfo() { + return rowTsColInfo; + } + + public boolean isConflicting(RowMutationState newRowMutationState) { + return (this.onDupKeyBytes != null && newRowMutationState.onDupKeyBytes == null + || this.onDupKeyBytes == null && newRowMutationState.onDupKeyBytes != null); + } + } + + public ReadMetricQueue getReadMetricQueue() { + return readMetricQueue; + } + + public void setReadMetricQueue(ReadMetricQueue readMetricQueue) { + this.readMetricQueue = readMetricQueue; + } + + public MutationMetricQueue getMutationMetricQueue() { + return mutationMetricQueue; + } + + public void addExecuteMutationTime(long time, String tableName) { + Long timeSpent = timeInExecuteMutationMap.get(tableName); + if (timeSpent == null) { + timeSpent = 0l; + } + timeSpent += time; + timeInExecuteMutationMap.put(tableName, timeSpent); + } + + public void resetExecuteMutationTimeMap() { + timeInExecuteMutationMap.clear(); + } + + public boolean isEmpty() { + return mutationsMap != null ? mutationsMap.isEmpty() : true; + } + + public enum ReturnResult { + ROW + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/PhoenixTxIndexMutationGenerator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/PhoenixTxIndexMutationGenerator.java index 0b4423ba0fa..39c5bdf697b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/PhoenixTxIndexMutationGenerator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/PhoenixTxIndexMutationGenerator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -66,457 +66,493 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; import org.apache.phoenix.transaction.PhoenixTransactionContext; import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; -import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; +public class PhoenixTxIndexMutationGenerator { + private final PhoenixIndexCodec codec; + private final PhoenixIndexMetaData indexMetaData; + private final ConnectionQueryServices services; + private final byte[] regionStartKey; + private final byte[] regionEndKey; + private final byte[] tableName; + + private PhoenixTxIndexMutationGenerator(ConnectionQueryServices services, Configuration conf, + PhoenixIndexMetaData indexMetaData, byte[] tableName, byte[] regionStartKey, + byte[] regionEndKey) { + this.services = services; + this.indexMetaData = indexMetaData; + this.regionStartKey = regionStartKey; + this.regionEndKey = regionEndKey; + this.tableName = tableName; + this.codec = new PhoenixIndexCodec(conf, tableName); + } + + public PhoenixTxIndexMutationGenerator(Configuration conf, PhoenixIndexMetaData indexMetaData, + byte[] tableName, byte[] regionStartKey, byte[] regionEndKey) { + this(null, conf, indexMetaData, tableName, regionStartKey, regionEndKey); + } + + public PhoenixTxIndexMutationGenerator(ConnectionQueryServices services, + PhoenixIndexMetaData indexMetaData, byte[] tableName) { + this(services, services.getConfiguration(), indexMetaData, tableName, null, null); + } + + private static void addMutation(Map mutations, + ImmutableBytesPtr row, Mutation m) { + MultiMutation stored = mutations.get(row); + // we haven't seen this row before, so add it + if (stored == null) { + stored = new MultiMutation(row); + mutations.put(row, stored); + } + stored.addAll(m); + } + public Collection> getIndexUpdates(Table htable, + Iterator mutationIterator) throws IOException, SQLException { -public class PhoenixTxIndexMutationGenerator { - private final PhoenixIndexCodec codec; - private final PhoenixIndexMetaData indexMetaData; - private final ConnectionQueryServices services; - private final byte[] regionStartKey; - private final byte[] regionEndKey; - private final byte[] tableName; - - private PhoenixTxIndexMutationGenerator(ConnectionQueryServices services, Configuration conf, PhoenixIndexMetaData indexMetaData, byte[] tableName, byte[] regionStartKey, byte[] regionEndKey) { - this.services = services; - this.indexMetaData = indexMetaData; - this.regionStartKey = regionStartKey; - this.regionEndKey = regionEndKey; - this.tableName = tableName; - this.codec = new PhoenixIndexCodec(conf, tableName); + if (!mutationIterator.hasNext()) { + return Collections.emptyList(); } - public PhoenixTxIndexMutationGenerator(Configuration conf, PhoenixIndexMetaData indexMetaData, byte[] tableName, byte[] regionStartKey, byte[] regionEndKey) { - this(null, conf, indexMetaData, tableName, regionStartKey, regionEndKey); + List indexMaintainers = indexMetaData.getIndexMaintainers(); + ResultScanner currentScanner = null; + // Collect up all mutations in batch + Map mutations = + new HashMap(); + // Collect the set of mutable ColumnReferences so that we can first + // run a scan to get the current state. We'll need this to delete + // the existing index rows. + int estimatedSize = indexMaintainers.size() * 10; + Set mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize); + for (IndexMaintainer indexMaintainer : indexMaintainers) { + // For transactional tables, we use an index maintainer + // to aid in rollback if there's a KeyValue column in the index. The alternative would be + // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the + // client side. + Set allColumns = indexMaintainer.getAllColumns(); + mutableColumns.addAll(allColumns); } - public PhoenixTxIndexMutationGenerator(ConnectionQueryServices services, PhoenixIndexMetaData indexMetaData, byte[] tableName) { - this(services, services.getConfiguration(), indexMetaData, tableName, null, null); + Mutation m = mutationIterator.next(); + Map updateAttributes = m.getAttributesMap(); + byte[] txRollbackAttribute = + updateAttributes.get(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY); + boolean isRollback = txRollbackAttribute != null; + + boolean isImmutable = indexMetaData.isImmutableRows(); + Map findPriorValueMutations; + if (isImmutable && !isRollback) { + findPriorValueMutations = new HashMap(); + } else { + findPriorValueMutations = mutations; } - private static void addMutation(Map mutations, ImmutableBytesPtr row, Mutation m) { - MultiMutation stored = mutations.get(row); - // we haven't seen this row before, so add it - if (stored == null) { - stored = new MultiMutation(row); - mutations.put(row, stored); - } - stored.addAll(m); + while (true) { + // add the mutation to the batch set + ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); + // if we have no non PK columns, no need to find the prior values + if (mutations != findPriorValueMutations && indexMetaData.requiresPriorRowState(m)) { + addMutation(findPriorValueMutations, row, m); + } + addMutation(mutations, row, m); + + if (!mutationIterator.hasNext()) { + break; + } + m = mutationIterator.next(); } - public Collection> getIndexUpdates(Table htable, Iterator mutationIterator) throws IOException, SQLException { + Collection> indexUpdates = + new ArrayList>(mutations.size() * 2 * indexMaintainers.size()); + // Track if we have row keys with Delete mutations. If there are none, we don't need to do the + // scan for + // prior versions, if there are, we do. Since rollbacks always have delete mutations, + // this logic will work there too. + if (!findPriorValueMutations.isEmpty()) { + List keys = Lists.newArrayListWithExpectedSize(mutations.size()); + for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) { + keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary(), SortOrder.ASC)); + } + Scan scan = new Scan(); + // Project all mutable columns + for (ColumnReference ref : mutableColumns) { + scan.addColumn(ref.getFamily(), ref.getQualifier()); + } + /* + * Indexes inherit the storage scheme of the data table which means all the indexes have the + * same storage scheme and empty key value qualifier. Note that this assumption would be + * broken if we start supporting new indexes over existing data tables to have a different + * storage scheme than the data table. + */ + byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier(); + + // Project empty key value column + scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier); + ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, + Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, null, true, -1); + scanRanges.initializeScan(scan); + Table txTable = indexMetaData.getTransactionContext().getTransactionalTable(htable, true); + // For rollback, we need to see all versions, including + // the last committed version as there may be multiple + // checkpointed versions. + SkipScanFilter filter = scanRanges.getSkipScanFilter(); + if (isRollback) { + filter = new SkipScanFilter(filter, true, false); + indexMetaData.getTransactionContext() + .setVisibilityLevel(PhoenixVisibilityLevel.SNAPSHOT_ALL); + } + scan.setFilter(filter); + currentScanner = txTable.getScanner(scan); + } + if (isRollback) { + processRollback(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, + indexUpdates, mutations); + } else { + processMutation(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, + indexUpdates, mutations, findPriorValueMutations); + } - if (!mutationIterator.hasNext()) { - return Collections.emptyList(); + return indexUpdates; + } + + private void processMutation(PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute, + ResultScanner scanner, Set upsertColumns, + Collection> indexUpdates, + Map mutations, + Map mutationsToFindPreviousValue) throws IOException { + List indexMaintainers = indexMetaData.getIndexMaintainers(); + if (scanner != null) { + Result result; + ColumnReference emptyColRef = + new ColumnReference(indexMaintainers.get(0).getDataEmptyKeyValueCF(), + indexMaintainers.get(0).getEmptyKeyValueQualifier()); + // Process existing data table rows by removing the old index row and adding the new index row + while ((result = scanner.next()) != null) { + Mutation m = mutationsToFindPreviousValue.remove(new ImmutableBytesPtr(result.getRow())); + TxTableState state = new TxTableState(upsertColumns, + indexMetaData.getTransactionContext().getWritePointer(), m, emptyColRef, result); + generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state); + generatePuts(indexMetaData, indexUpdates, state); + } + } + // Process new data table by adding new index rows + for (Mutation m : mutations.values()) { + TxTableState state = + new TxTableState(upsertColumns, indexMetaData.getTransactionContext().getWritePointer(), m); + generatePuts(indexMetaData, indexUpdates, state); + generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state); + } + } + + private void processRollback(PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute, + ResultScanner scanner, Set mutableColumns, + Collection> indexUpdates, + Map mutations) throws IOException { + if (scanner != null) { + long readPtr = indexMetaData.getTransactionContext().getReadPointer(); + Result result; + // Loop through last committed row state plus all new rows associated with current transaction + // to generate point delete markers for all index rows that were added. + // Note: After PHOENIX-6627 is it worth revisiting managing index rows in change sets? + ColumnReference emptyColRef = + new ColumnReference(indexMetaData.getIndexMaintainers().get(0).getDataEmptyKeyValueCF(), + indexMetaData.getIndexMaintainers().get(0).getEmptyKeyValueQualifier()); + while ((result = scanner.next()) != null) { + Mutation m = mutations.remove(new ImmutableBytesPtr(result.getRow())); + // Sort by timestamp, type, cf, cq so we can process in time batches from oldest to newest + // (as if we're "replaying" them in time order). + List cells = result.listCells(); + Collections.sort(cells, new Comparator() { + + @Override + public int compare(Cell o1, Cell o2) { + int c = Longs.compare(o1.getTimestamp(), o2.getTimestamp()); + if (c != 0) return c; + c = o1.getType().getCode() - o2.getType().getCode(); + if (c != 0) return c; + c = Bytes.compareTo(o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength(), + o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength()); + if (c != 0) return c; + return Bytes.compareTo(o1.getQualifierArray(), o1.getQualifierOffset(), + o1.getQualifierLength(), o1.getQualifierArray(), o1.getQualifierOffset(), + o1.getQualifierLength()); + } + + }); + int i = 0; + int nCells = cells.size(); + Result oldResult = null, newResult; + do { + boolean hasPuts = false; + LinkedList singleTimeCells = Lists.newLinkedList(); + long writePtr; + Cell cell = cells.get(i); + do { + hasPuts |= cell.getType() == Cell.Type.Put; + writePtr = cell.getTimestamp(); + ListIterator it = singleTimeCells.listIterator(); + do { + // Add at the beginning of the list to match the expected HBase + // newest to oldest sort order (which TxTableState relies on + // with the Result.getLatestColumnValue() calls). However, we + // still want to add Cells in the expected order for each time + // bound as otherwise we won't find it in our old state. + it.add(cell); + } while (++i < nCells && (cell = cells.get(i)).getTimestamp() == writePtr); + } while (i < nCells && cell.getTimestamp() <= readPtr); + + // Generate point delete markers for the prior row deletion of the old index value. + // The write timestamp is the next timestamp, not the current timestamp, + // as the earliest cells are the current values for the row (and we don't + // want to delete the current row). + if (oldResult != null) { + TxTableState state = + new TxTableState(mutableColumns, writePtr, m, emptyColRef, oldResult); + generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state); + } + // Generate point delete markers for the new index value. + // If our time batch doesn't have Puts (i.e. we have only Deletes), then do not + // generate deletes. We would have generated the delete above based on the state + // of the previous row. The delete markers do not give us the state we need to + // delete. + if (hasPuts) { + newResult = Result.create(singleTimeCells); + // First row may represent the current state which we don't want to delete + if (writePtr > readPtr) { + TxTableState state = + new TxTableState(mutableColumns, writePtr, m, emptyColRef, newResult); + generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state); + } + oldResult = newResult; + } else { + oldResult = null; + } + } while (i < nCells); + } + } + } + + private void generateDeletes(PhoenixIndexMetaData indexMetaData, + Collection> indexUpdates, byte[] attribValue, TxTableState state) + throws IOException { + byte[] regionStartKey = this.regionStartKey; + byte[] regionEndKey = this.regionEndKey; + if (services != null && indexMetaData.hasLocalIndexes()) { + try { + HRegionLocation tableRegionLocation = + services.getTableRegionLocation(tableName, state.getCurrentRowKey()); + regionStartKey = tableRegionLocation.getRegion().getStartKey(); + regionEndKey = tableRegionLocation.getRegion().getEndKey(); + } catch (SQLException e) { + throw new IOException(e); + } + } + Iterable deletes = + codec.getIndexDeletes(state, indexMetaData, regionStartKey, regionEndKey); + for (IndexUpdate delete : deletes) { + if (delete.isValid()) { + delete.getUpdate().setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, + attribValue); + indexUpdates.add(new Pair(delete.getUpdate(), delete.getTableName())); + } + } + } + + private boolean generatePuts(PhoenixIndexMetaData indexMetaData, + Collection> indexUpdates, TxTableState state) throws IOException { + state.applyMutation(); + byte[] regionStartKey = this.regionStartKey; + byte[] regionEndKey = this.regionEndKey; + if (services != null && indexMetaData.hasLocalIndexes()) { + try { + HRegionLocation tableRegionLocation = + services.getTableRegionLocation(tableName, state.getCurrentRowKey()); + regionStartKey = tableRegionLocation.getRegion().getStartKey(); + regionEndKey = tableRegionLocation.getRegion().getEndKey(); + } catch (SQLException e) { + throw new IOException(e); + } + } + Iterable puts = + codec.getIndexUpserts(state, indexMetaData, regionStartKey, regionEndKey, true); + boolean validPut = false; + for (IndexUpdate put : puts) { + if (put.isValid()) { + indexUpdates.add(new Pair(put.getUpdate(), put.getTableName())); + validPut = true; + } + } + return validPut; + } + + private static class TxTableState implements TableState { + private final Mutation mutation; + private final long currentTimestamp; + private final List pendingUpdates; + private final Set indexedColumns; + private final Map valueMap; + + private TxTableState(Set indexedColumns, long currentTimestamp, + Mutation mutation) { + this.currentTimestamp = currentTimestamp; + this.indexedColumns = indexedColumns; + this.mutation = mutation; + int estimatedSize = indexedColumns.size(); + this.valueMap = Maps.newHashMapWithExpectedSize(estimatedSize); + this.pendingUpdates = Lists.newArrayListWithExpectedSize(estimatedSize); + try { + CellScanner scanner = mutation.cellScanner(); + while (scanner.advance()) { + Cell cell = scanner.current(); + pendingUpdates.add(PhoenixKeyValueUtil.maybeCopyCell(cell)); } + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } + } - List indexMaintainers = indexMetaData.getIndexMaintainers(); - ResultScanner currentScanner = null; - // Collect up all mutations in batch - Map mutations = - new HashMap(); - // Collect the set of mutable ColumnReferences so that we can first - // run a scan to get the current state. We'll need this to delete - // the existing index rows. - int estimatedSize = indexMaintainers.size() * 10; - Set mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize); - for (IndexMaintainer indexMaintainer : indexMaintainers) { - // For transactional tables, we use an index maintainer - // to aid in rollback if there's a KeyValue column in the index. The alternative would be - // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the - // client side. - Set allColumns = indexMaintainer.getAllColumns(); - mutableColumns.addAll(allColumns); - } + public TxTableState(Set indexedColumns, long currentTimestamp, Mutation m, + ColumnReference emptyColRef, Result r) { + this(indexedColumns, currentTimestamp, m); - Mutation m = mutationIterator.next(); - Map updateAttributes = m.getAttributesMap(); - byte[] txRollbackAttribute = updateAttributes.get(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY); - boolean isRollback = txRollbackAttribute!=null; - - boolean isImmutable = indexMetaData.isImmutableRows(); - Map findPriorValueMutations; - if (isImmutable && !isRollback) { - findPriorValueMutations = new HashMap(); - } else { - findPriorValueMutations = mutations; - } - - while (true) { - // add the mutation to the batch set - ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); - // if we have no non PK columns, no need to find the prior values - if ( mutations != findPriorValueMutations && indexMetaData.requiresPriorRowState(m) ) { - addMutation(findPriorValueMutations, row, m); - } - addMutation(mutations, row, m); - - if (!mutationIterator.hasNext()) { - break; - } - m = mutationIterator.next(); - } - - Collection> indexUpdates = new ArrayList>(mutations.size() * 2 * indexMaintainers.size()); - // Track if we have row keys with Delete mutations. If there are none, we don't need to do the scan for - // prior versions, if there are, we do. Since rollbacks always have delete mutations, - // this logic will work there too. - if (!findPriorValueMutations.isEmpty()) { - List keys = Lists.newArrayListWithExpectedSize(mutations.size()); - for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) { - keys.add( - PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary(), SortOrder.ASC)); - } - Scan scan = new Scan(); - // Project all mutable columns - for (ColumnReference ref : mutableColumns) { - scan.addColumn(ref.getFamily(), ref.getQualifier()); - } - /* - * Indexes inherit the storage scheme of the data table which means all the indexes have the same - * storage scheme and empty key value qualifier. Note that this assumption would be broken if we start - * supporting new indexes over existing data tables to have a different storage scheme than the data - * table. - */ - byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier(); - - // Project empty key value column - scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier); - ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, null, true, -1); - scanRanges.initializeScan(scan); - Table txTable = indexMetaData.getTransactionContext().getTransactionalTable(htable, true); - // For rollback, we need to see all versions, including - // the last committed version as there may be multiple - // checkpointed versions. - SkipScanFilter filter = scanRanges.getSkipScanFilter(); - if (isRollback) { - filter = new SkipScanFilter(filter,true, false); - indexMetaData.getTransactionContext().setVisibilityLevel(PhoenixVisibilityLevel.SNAPSHOT_ALL); - } - scan.setFilter(filter); - currentScanner = txTable.getScanner(scan); + for (ColumnReference ref : indexedColumns) { + Cell cell = r.getColumnLatestCell(ref.getFamily(), ref.getQualifier()); + if (cell != null) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + valueMap.put(ref, ptr); } - if (isRollback) { - processRollback(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, indexUpdates, mutations); - } else { - processMutation(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, indexUpdates, mutations, findPriorValueMutations); - } - - return indexUpdates; + } } - private void processMutation(PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute, - ResultScanner scanner, - Set upsertColumns, - Collection> indexUpdates, - Map mutations, - Map mutationsToFindPreviousValue) throws IOException { - List indexMaintainers = indexMetaData.getIndexMaintainers(); - if (scanner != null) { - Result result; - ColumnReference emptyColRef = new ColumnReference(indexMaintainers.get(0) - .getDataEmptyKeyValueCF(), indexMaintainers.get(0).getEmptyKeyValueQualifier()); - // Process existing data table rows by removing the old index row and adding the new index row - while ((result = scanner.next()) != null) { - Mutation m = mutationsToFindPreviousValue.remove(new ImmutableBytesPtr(result.getRow())); - TxTableState state = new TxTableState(upsertColumns, indexMetaData.getTransactionContext().getWritePointer(), m, emptyColRef, result); - generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state); - generatePuts(indexMetaData, indexUpdates, state); - } - } - // Process new data table by adding new index rows - for (Mutation m : mutations.values()) { - TxTableState state = new TxTableState(upsertColumns, indexMetaData.getTransactionContext().getWritePointer(), m); - generatePuts(indexMetaData, indexUpdates, state); - generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state); - } + @Override + public long getCurrentTimestamp() { + return currentTimestamp; } - private void processRollback(PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute, - ResultScanner scanner, - Set mutableColumns, - Collection> indexUpdates, - Map mutations) throws IOException { - if (scanner != null) { - long readPtr = indexMetaData.getTransactionContext().getReadPointer(); - Result result; - // Loop through last committed row state plus all new rows associated with current transaction - // to generate point delete markers for all index rows that were added. - // Note: After PHOENIX-6627 is it worth revisiting managing index rows in change sets? - ColumnReference emptyColRef = new ColumnReference(indexMetaData.getIndexMaintainers().get(0).getDataEmptyKeyValueCF(), indexMetaData.getIndexMaintainers().get(0).getEmptyKeyValueQualifier()); - while ((result = scanner.next()) != null) { - Mutation m = mutations.remove(new ImmutableBytesPtr(result.getRow())); - // Sort by timestamp, type, cf, cq so we can process in time batches from oldest to newest - // (as if we're "replaying" them in time order). - List cells = result.listCells(); - Collections.sort(cells, new Comparator() { - - @Override - public int compare(Cell o1, Cell o2) { - int c = Longs.compare(o1.getTimestamp(), o2.getTimestamp()); - if (c != 0) return c; - c = o1.getType().getCode() - o2.getType().getCode(); - if (c != 0) return c; - c = Bytes.compareTo(o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength(), o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength()); - if (c != 0) return c; - return Bytes.compareTo(o1.getQualifierArray(), o1.getQualifierOffset(), o1.getQualifierLength(), o1.getQualifierArray(), o1.getQualifierOffset(), o1.getQualifierLength()); - } - - }); - int i = 0; - int nCells = cells.size(); - Result oldResult = null, newResult; - do { - boolean hasPuts = false; - LinkedList singleTimeCells = Lists.newLinkedList(); - long writePtr; - Cell cell = cells.get(i); - do { - hasPuts |= cell.getType() == Cell.Type.Put; - writePtr = cell.getTimestamp(); - ListIterator it = singleTimeCells.listIterator(); - do { - // Add at the beginning of the list to match the expected HBase - // newest to oldest sort order (which TxTableState relies on - // with the Result.getLatestColumnValue() calls). However, we - // still want to add Cells in the expected order for each time - // bound as otherwise we won't find it in our old state. - it.add(cell); - } while (++i < nCells && (cell=cells.get(i)).getTimestamp() == writePtr); - } while (i < nCells && cell.getTimestamp() <= readPtr); - - // Generate point delete markers for the prior row deletion of the old index value. - // The write timestamp is the next timestamp, not the current timestamp, - // as the earliest cells are the current values for the row (and we don't - // want to delete the current row). - if (oldResult != null) { - TxTableState state = new TxTableState(mutableColumns, writePtr, m, emptyColRef, oldResult); - generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state); - } - // Generate point delete markers for the new index value. - // If our time batch doesn't have Puts (i.e. we have only Deletes), then do not - // generate deletes. We would have generated the delete above based on the state - // of the previous row. The delete markers do not give us the state we need to - // delete. - if (hasPuts) { - newResult = Result.create(singleTimeCells); - // First row may represent the current state which we don't want to delete - if (writePtr > readPtr) { - TxTableState state = new TxTableState(mutableColumns, writePtr, m, emptyColRef, newResult); - generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state); - } - oldResult = newResult; - } else { - oldResult = null; - } - } while (i < nCells); - } - } + @Override + public byte[] getCurrentRowKey() { + return mutation.getRow(); } - private void generateDeletes(PhoenixIndexMetaData indexMetaData, - Collection> indexUpdates, - byte[] attribValue, TxTableState state) throws IOException { - byte[] regionStartKey = this.regionStartKey; - byte[] regionEndKey = this.regionEndKey; - if (services != null && indexMetaData.hasLocalIndexes()) { - try { - HRegionLocation tableRegionLocation = services.getTableRegionLocation(tableName, state.getCurrentRowKey()); - regionStartKey = tableRegionLocation.getRegion().getStartKey(); - regionEndKey = tableRegionLocation.getRegion().getEndKey(); - } catch (SQLException e) { - throw new IOException(e); - } - } - Iterable deletes = codec.getIndexDeletes(state, indexMetaData, regionStartKey, regionEndKey); - for (IndexUpdate delete : deletes) { - if (delete.isValid()) { - delete.getUpdate().setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, attribValue); - indexUpdates.add(new Pair(delete.getUpdate(),delete.getTableName())); - } - } + @Override + public List getIndexColumnHints() { + return Collections.emptyList(); } - private boolean generatePuts( - PhoenixIndexMetaData indexMetaData, - Collection> indexUpdates, - TxTableState state) - throws IOException { - state.applyMutation(); - byte[] regionStartKey = this.regionStartKey; - byte[] regionEndKey = this.regionEndKey; - if (services != null && indexMetaData.hasLocalIndexes()) { - try { - HRegionLocation tableRegionLocation = services.getTableRegionLocation(tableName, state.getCurrentRowKey()); - regionStartKey = tableRegionLocation.getRegion().getStartKey(); - regionEndKey = tableRegionLocation.getRegion().getEndKey(); - } catch (SQLException e) { - throw new IOException(e); - } - } - Iterable puts = - codec.getIndexUpserts(state, indexMetaData, regionStartKey, regionEndKey, true); - boolean validPut = false; - for (IndexUpdate put : puts) { - if (put.isValid()) { - indexUpdates.add(new Pair(put.getUpdate(),put.getTableName())); - validPut = true; + private void applyMutation() { + for (Cell cell : pendingUpdates) { + if (cell.getType() == Cell.Type.Delete || cell.getType() == Cell.Type.DeleteColumn) { + ColumnReference ref = new ColumnReference(cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()); + valueMap.remove(ref); + } else if ( + cell.getType() == Cell.Type.DeleteFamily + || cell.getType() == Cell.Type.DeleteFamilyVersion + ) { + for (ColumnReference ref : indexedColumns) { + if ( + ref.matchesFamily(cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength()) + ) { + valueMap.remove(ref); } + } + } else if (cell.getType() == Cell.Type.Put) { + ColumnReference ref = new ColumnReference(cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()); + if (indexedColumns.contains(ref)) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + valueMap.put(ref, ptr); + } + } else { + throw new IllegalStateException("Unexpected mutation type for " + cell); } - return validPut; + } } + @Override + public Collection getPendingUpdate() { + return pendingUpdates; + } - private static class TxTableState implements TableState { - private final Mutation mutation; - private final long currentTimestamp; - private final List pendingUpdates; - private final Set indexedColumns; - private final Map valueMap; - - private TxTableState(Set indexedColumns, long currentTimestamp, Mutation mutation) { - this.currentTimestamp = currentTimestamp; - this.indexedColumns = indexedColumns; - this.mutation = mutation; - int estimatedSize = indexedColumns.size(); - this.valueMap = Maps.newHashMapWithExpectedSize(estimatedSize); - this.pendingUpdates = Lists.newArrayListWithExpectedSize(estimatedSize); - try { - CellScanner scanner = mutation.cellScanner(); - while (scanner.advance()) { - Cell cell = scanner.current(); - pendingUpdates.add(PhoenixKeyValueUtil.maybeCopyCell(cell)); - } - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } - } - - public TxTableState(Set indexedColumns, long currentTimestamp, Mutation m, ColumnReference emptyColRef, Result r) { - this(indexedColumns, currentTimestamp, m); - - for (ColumnReference ref : indexedColumns) { - Cell cell = r.getColumnLatestCell(ref.getFamily(), ref.getQualifier()); - if (cell != null) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - valueMap.put(ref, ptr); - } - } - } - - @Override - public long getCurrentTimestamp() { - return currentTimestamp; - } - - @Override - public byte[] getCurrentRowKey() { - return mutation.getRow(); - } + @Override + public Pair getIndexUpdateState( + Collection indexedColumns, boolean ignoreNewerMutations, + boolean returnNullScannerIfRowNotFound, IndexMetaData indexMetaData) throws IOException { + // TODO: creating these objects over and over again is wasteful + ColumnTracker tracker = new ColumnTracker(indexedColumns); + ValueGetter getter = new AbstractValueGetter() { @Override - public List getIndexColumnHints() { - return Collections.emptyList(); + public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) + throws IOException { + return valueMap.get(ref); } - private void applyMutation() { - for (Cell cell : pendingUpdates) { - if (cell.getType() == Cell.Type.Delete || cell.getType() == Cell.Type.DeleteColumn) { - ColumnReference ref = new ColumnReference(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); - valueMap.remove(ref); - } else if (cell.getType() == Cell.Type.DeleteFamily || cell.getType() == Cell.Type.DeleteFamilyVersion) { - for (ColumnReference ref : indexedColumns) { - if (ref.matchesFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) { - valueMap.remove(ref); - } - } - } else if (cell.getType() == Cell.Type.Put){ - ColumnReference ref = new ColumnReference(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); - if (indexedColumns.contains(ref)) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - valueMap.put(ref, ptr); - } - } else { - throw new IllegalStateException("Unexpected mutation type for " + cell); - } - } - } - @Override - public Collection getPendingUpdate() { - return pendingUpdates; + public byte[] getRowKey() { + return mutation.getRow(); } - @Override - public Pair getIndexUpdateState(Collection indexedColumns, boolean ignoreNewerMutations, boolean returnNullScannerIfRowNotFound, IndexMetaData indexMetaData) - throws IOException { - // TODO: creating these objects over and over again is wasteful - ColumnTracker tracker = new ColumnTracker(indexedColumns); - ValueGetter getter = new AbstractValueGetter() { - - @Override - public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException { - return valueMap.get(ref); - } - - @Override - public byte[] getRowKey() { - return mutation.getRow(); - } - - }; - Pair pair = new Pair(getter, new IndexUpdate(tracker)); - return pair; - } + }; + Pair pair = + new Pair(getter, new IndexUpdate(tracker)); + return pair; } - - public static PhoenixTxIndexMutationGenerator newGenerator(final PhoenixConnection connection, - PTable table, List indexes, Map attributes) - throws SQLException { - final List indexMaintainers = Lists.newArrayListWithExpectedSize(indexes.size()); - for (PTable index : indexes) { - IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); - indexMaintainers.add(maintainer); - } - IndexMetaDataCache indexMetaDataCache = new IndexMetaDataCache() { - - @Override - public void close() throws IOException {} - - @Override - public List getIndexMaintainers() { - return indexMaintainers; - } - - @Override - public PhoenixTransactionContext getTransactionContext() { - PhoenixTransactionContext context = connection.getMutationState().getPhoenixTransactionContext(); - return context.newTransactionContext(context, true); - } - - @Override - public int getClientVersion() { - return MetaDataProtocol.PHOENIX_VERSION; - } - - }; - try { - PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaData(indexMetaDataCache, attributes); - return new PhoenixTxIndexMutationGenerator(connection.getQueryServices(),connection.getQueryServices().getConfiguration(), indexMetaData, - table.getPhysicalName().getBytes(), null, null); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } + } + + public static PhoenixTxIndexMutationGenerator newGenerator(final PhoenixConnection connection, + PTable table, List indexes, Map attributes) throws SQLException { + final List indexMaintainers = + Lists.newArrayListWithExpectedSize(indexes.size()); + for (PTable index : indexes) { + IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); + indexMaintainers.add(maintainer); + } + IndexMetaDataCache indexMetaDataCache = new IndexMetaDataCache() { + + @Override + public void close() throws IOException { + } + + @Override + public List getIndexMaintainers() { + return indexMaintainers; + } + + @Override + public PhoenixTransactionContext getTransactionContext() { + PhoenixTransactionContext context = + connection.getMutationState().getPhoenixTransactionContext(); + return context.newTransactionContext(context, true); + } + + @Override + public int getClientVersion() { + return MetaDataProtocol.PHOENIX_VERSION; + } + + }; + try { + PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaData(indexMetaDataCache, attributes); + return new PhoenixTxIndexMutationGenerator(connection.getQueryServices(), + connection.getQueryServices().getConfiguration(), indexMetaData, + table.getPhysicalName().getBytes(), null, null); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/RuntimeContext.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/RuntimeContext.java index 89dd082881b..ebe0b274142 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/RuntimeContext.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/RuntimeContext.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,12 +22,12 @@ public interface RuntimeContext { - public abstract void defineCorrelateVariable(String variableId, TableRef def); + public abstract void defineCorrelateVariable(String variableId, TableRef def); - public abstract TableRef getCorrelateVariableDef(String variableId); + public abstract TableRef getCorrelateVariableDef(String variableId); - public abstract void setCorrelateVariableValue(String variableId, Tuple value); + public abstract void setCorrelateVariableValue(String variableId, Tuple value); - public abstract Tuple getCorrelateVariableValue(String variableId); + public abstract Tuple getCorrelateVariableValue(String variableId); -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/RuntimeContextImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/RuntimeContextImpl.java index 5787e330b78..4a8e396502d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/RuntimeContextImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/RuntimeContextImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,66 +21,62 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.tuple.Tuple; - import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; public class RuntimeContextImpl implements RuntimeContext { - Map correlateVariables; + Map correlateVariables; - public RuntimeContextImpl() { - this.correlateVariables = Maps.newHashMap(); - } - - @Override - public void defineCorrelateVariable(String variableId, TableRef def) { - this.correlateVariables.put(variableId, new VariableEntry(def)); - } - - @Override - public TableRef getCorrelateVariableDef(String variableId) { - VariableEntry entry = this.correlateVariables.get(variableId); - if (entry == null) - throw new RuntimeException("Variable '" + variableId + "' undefined."); - - return entry.getDef(); + public RuntimeContextImpl() { + this.correlateVariables = Maps.newHashMap(); + } + + @Override + public void defineCorrelateVariable(String variableId, TableRef def) { + this.correlateVariables.put(variableId, new VariableEntry(def)); + } + + @Override + public TableRef getCorrelateVariableDef(String variableId) { + VariableEntry entry = this.correlateVariables.get(variableId); + if (entry == null) throw new RuntimeException("Variable '" + variableId + "' undefined."); + + return entry.getDef(); + } + + @Override + public void setCorrelateVariableValue(String variableId, Tuple value) { + VariableEntry entry = this.correlateVariables.get(variableId); + if (entry == null) throw new RuntimeException("Variable '" + variableId + "' undefined."); + + entry.setValue(value); + } + + @Override + public Tuple getCorrelateVariableValue(String variableId) { + VariableEntry entry = this.correlateVariables.get(variableId); + if (entry == null) throw new RuntimeException("Variable '" + variableId + "' undefined."); + + return entry.getValue(); + } + + private static class VariableEntry { + private final TableRef def; + private Tuple value; + + VariableEntry(TableRef def) { + this.def = def; } - - @Override - public void setCorrelateVariableValue(String variableId, Tuple value) { - VariableEntry entry = this.correlateVariables.get(variableId); - if (entry == null) - throw new RuntimeException("Variable '" + variableId + "' undefined."); - - entry.setValue(value); + + TableRef getDef() { + return def; } - @Override - public Tuple getCorrelateVariableValue(String variableId) { - VariableEntry entry = this.correlateVariables.get(variableId); - if (entry == null) - throw new RuntimeException("Variable '" + variableId + "' undefined."); - - return entry.getValue(); + Tuple getValue() { + return value; } - - private static class VariableEntry { - private final TableRef def; - private Tuple value; - - VariableEntry(TableRef def) { - this.def = def; - } - - TableRef getDef() { - return def; - } - - Tuple getValue() { - return value; - } - - void setValue(Tuple value) { - this.value = value; - } + + void setValue(Tuple value) { + this.value = value; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ScanPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ScanPlan.java index ed01ab882fc..b61e10cdff5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ScanPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/ScanPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.phoenix.execute; - import static org.apache.phoenix.util.ScanUtil.isPacingScannersPossible; import static org.apache.phoenix.util.ScanUtil.isRoundRobinPossible; @@ -29,12 +28,10 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.io.WritableUtils; -import org.apache.phoenix.expression.OrderByExpression; -import org.apache.phoenix.thirdparty.com.google.common.base.Optional; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.io.WritableUtils; import org.apache.phoenix.cache.ServerCacheClient.ServerCache; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; @@ -45,6 +42,7 @@ import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.execute.visitor.ByteCountVisitor; import org.apache.phoenix.execute.visitor.QueryPlanVisitor; +import org.apache.phoenix.expression.OrderByExpression; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.iterate.BaseResultIterators; import org.apache.phoenix.iterate.ChunkedResultIterator; @@ -74,6 +72,7 @@ import org.apache.phoenix.schema.SaltingUtil; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.stats.StatisticsUtil; +import org.apache.phoenix.thirdparty.com.google.common.base.Optional; import org.apache.phoenix.util.CostUtil; import org.apache.phoenix.util.ExpressionUtil; import org.apache.phoenix.util.QueryUtil; @@ -82,324 +81,337 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - - /** - * * Query plan for a basic table scan - * - * * @since 0.1 */ public class ScanPlan extends BaseQueryPlan { - private static final Logger LOGGER = LoggerFactory.getLogger(ScanPlan.class); - private List splits; - private List> scans; - private boolean allowPageFilter; - private boolean isSerial; - private boolean isDataToScanWithinThreshold; - private Long serialRowsEstimate; - private Long serialBytesEstimate; - private Long serialEstimateInfoTs; - private OrderBy actualOutputOrderBy; - private Optional rowOffset; + private static final Logger LOGGER = LoggerFactory.getLogger(ScanPlan.class); + private List splits; + private List> scans; + private boolean allowPageFilter; + private boolean isSerial; + private boolean isDataToScanWithinThreshold; + private Long serialRowsEstimate; + private Long serialBytesEstimate; + private Long serialEstimateInfoTs; + private OrderBy actualOutputOrderBy; + private Optional rowOffset; - public ScanPlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, Integer limit, Integer offset, - OrderBy orderBy, ParallelIteratorFactory parallelIteratorFactory, boolean allowPageFilter, QueryPlan dataPlan, Optional rowOffset) throws SQLException { - super(context, statement, table, projector, context.getBindManager().getParameterMetaData(), limit,offset, orderBy, GroupBy.EMPTY_GROUP_BY, - parallelIteratorFactory != null ? parallelIteratorFactory : - buildResultIteratorFactory(context, statement, table, orderBy, limit, offset, allowPageFilter), dataPlan); - this.allowPageFilter = allowPageFilter; - boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty(); - if (isOrdered) { // TopN - serializeScanRegionObserverIntoScan(context.getScan(), - limit == null ? -1 : QueryUtil.getOffsetLimit(limit, offset), - orderBy.getOrderByExpressions(), projector.getEstimatedRowByteSize()); - ScanUtil.setClientVersion(context.getScan(), MetaDataProtocol.PHOENIX_VERSION); - } - Integer perScanLimit = !allowPageFilter || isOrdered ? null : limit; - perScanLimit = QueryUtil.getOffsetLimit(perScanLimit, offset); - Pair estimate = getEstimateOfDataSizeToScanIfWithinThreshold(context, table.getTable(), perScanLimit); - this.isDataToScanWithinThreshold = estimate != null; - this.isSerial = isSerial(context, statement, tableRef, orderBy, isDataToScanWithinThreshold); - if (isSerial) { - serialBytesEstimate = estimate.getFirst(); - serialRowsEstimate = estimate.getSecond(); - serialEstimateInfoTs = StatisticsUtil.NOT_STATS_BASED_TS; - } - this.actualOutputOrderBy = convertActualOutputOrderBy(orderBy, context); - this.rowOffset = rowOffset; + public ScanPlan(StatementContext context, FilterableStatement statement, TableRef table, + RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, + ParallelIteratorFactory parallelIteratorFactory, boolean allowPageFilter, QueryPlan dataPlan, + Optional rowOffset) throws SQLException { + super(context, statement, table, projector, context.getBindManager().getParameterMetaData(), + limit, offset, orderBy, GroupBy.EMPTY_GROUP_BY, + parallelIteratorFactory != null + ? parallelIteratorFactory + : buildResultIteratorFactory(context, statement, table, orderBy, limit, offset, + allowPageFilter), + dataPlan); + this.allowPageFilter = allowPageFilter; + boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty(); + if (isOrdered) { // TopN + serializeScanRegionObserverIntoScan(context.getScan(), + limit == null ? -1 : QueryUtil.getOffsetLimit(limit, offset), + orderBy.getOrderByExpressions(), projector.getEstimatedRowByteSize()); + ScanUtil.setClientVersion(context.getScan(), MetaDataProtocol.PHOENIX_VERSION); + } + Integer perScanLimit = !allowPageFilter || isOrdered ? null : limit; + perScanLimit = QueryUtil.getOffsetLimit(perScanLimit, offset); + Pair estimate = + getEstimateOfDataSizeToScanIfWithinThreshold(context, table.getTable(), perScanLimit); + this.isDataToScanWithinThreshold = estimate != null; + this.isSerial = isSerial(context, statement, tableRef, orderBy, isDataToScanWithinThreshold); + if (isSerial) { + serialBytesEstimate = estimate.getFirst(); + serialRowsEstimate = estimate.getSecond(); + serialEstimateInfoTs = StatisticsUtil.NOT_STATS_BASED_TS; } + this.actualOutputOrderBy = convertActualOutputOrderBy(orderBy, context); + this.rowOffset = rowOffset; + } - // Static because called from tests - public static void serializeScanRegionObserverIntoScan(Scan scan, int limit, - List orderByExpressions, int estimatedRowSize) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); // TODO: size? - try { - DataOutputStream output = new DataOutputStream(stream); - WritableUtils.writeVInt(output, limit); - WritableUtils.writeVInt(output, estimatedRowSize); - WritableUtils.writeVInt(output, orderByExpressions.size()); - for (OrderByExpression orderingCol : orderByExpressions) { - orderingCol.write(output); - } - scan.setAttribute(BaseScannerRegionObserverConstants.TOPN, stream.toByteArray()); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + // Static because called from tests + public static void serializeScanRegionObserverIntoScan(Scan scan, int limit, + List orderByExpressions, int estimatedRowSize) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); // TODO: size? + try { + DataOutputStream output = new DataOutputStream(stream); + WritableUtils.writeVInt(output, limit); + WritableUtils.writeVInt(output, estimatedRowSize); + WritableUtils.writeVInt(output, orderByExpressions.size()); + for (OrderByExpression orderingCol : orderByExpressions) { + orderingCol.write(output); + } + scan.setAttribute(BaseScannerRegionObserverConstants.TOPN, stream.toByteArray()); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } + } - private static boolean isSerial(StatementContext context, FilterableStatement statement, - TableRef tableRef, OrderBy orderBy, boolean isDataWithinThreshold) throws SQLException { - if (isDataWithinThreshold) { - PTable table = tableRef.getTable(); - boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); - boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); - if (!canBeExecutedSerially) { - if (hasSerialHint) { - LOGGER.warn("This query cannot be executed serially. Ignoring the hint"); - } - return false; - } - return true; + private static boolean isSerial(StatementContext context, FilterableStatement statement, + TableRef tableRef, OrderBy orderBy, boolean isDataWithinThreshold) throws SQLException { + if (isDataWithinThreshold) { + PTable table = tableRef.getTable(); + boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL); + boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context); + if (!canBeExecutedSerially) { + if (hasSerialHint) { + LOGGER.warn("This query cannot be executed serially. Ignoring the hint"); } return false; + } + return true; } - - /** - * @return Pair of numbers in which the first part is estimated number of bytes that will be - * scanned and the second part is estimated number of rows. Returned value is null if - * estimated size of data to scan is beyond a threshold. - * @throws SQLException - */ - private static Pair getEstimateOfDataSizeToScanIfWithinThreshold(StatementContext context, PTable table, Integer perScanLimit) throws SQLException { - Scan scan = context.getScan(); - ConnectionQueryServices services = context.getConnection().getQueryServices(); - long estRowSize = SchemaUtil.estimateRowSize(table); - long regionSize = services.getProps().getLong(HConstants.HREGION_MAX_FILESIZE, - HConstants.DEFAULT_MAX_FILE_SIZE); - if (perScanLimit == null || scan.getFilter() != null) { - /* - * If a limit is not provided or if we have a filter, then we are not able to decide whether - * the amount of data we need to scan is less than the threshold. - */ - return null; - } - float factor = - services.getProps().getFloat(QueryServices.LIMITED_QUERY_SERIAL_THRESHOLD, - QueryServicesOptions.DEFAULT_LIMITED_QUERY_SERIAL_THRESHOLD); - long threshold = (long)(factor * regionSize); - long estimatedBytes = perScanLimit * estRowSize; - long estimatedRows = perScanLimit; - return (perScanLimit * estRowSize < threshold) ? new Pair<>(estimatedBytes, estimatedRows) : null; - } - - @SuppressWarnings("deprecation") - private static ParallelIteratorFactory buildResultIteratorFactory(StatementContext context, FilterableStatement statement, - TableRef tableRef, OrderBy orderBy, Integer limit,Integer offset, boolean allowPageFilter) throws SQLException { - - if ((isSerial(context, statement, tableRef, orderBy, getEstimateOfDataSizeToScanIfWithinThreshold(context, tableRef.getTable(), QueryUtil.getOffsetLimit(limit, offset)) != null) - || isRoundRobinPossible(orderBy, context) || isPacingScannersPossible(context))) { - return ParallelIteratorFactory.NOOP_FACTORY; - } - ParallelIteratorFactory spoolingResultIteratorFactory = - new SpoolingResultIterator.SpoolingResultIteratorFactory( - context.getConnection().getQueryServices()); + return false; + } - // If we're doing an order by then we need the full result before we can do anything, - // so we don't bother chunking it. If we're just doing a simple scan then we chunk - // the scan to have a quicker initial response. - if (!orderBy.getOrderByExpressions().isEmpty()) { - return spoolingResultIteratorFactory; - } else { - return new ChunkedResultIterator.ChunkedResultIteratorFactory( - spoolingResultIteratorFactory, context.getConnection().getMutationState(), tableRef); - } + /** + * @return Pair of numbers in which the first part is estimated number of bytes that will be + * scanned and the second part is estimated number of rows. Returned value is null if + * estimated size of data to scan is beyond a threshold. + */ + private static Pair getEstimateOfDataSizeToScanIfWithinThreshold( + StatementContext context, PTable table, Integer perScanLimit) throws SQLException { + Scan scan = context.getScan(); + ConnectionQueryServices services = context.getConnection().getQueryServices(); + long estRowSize = SchemaUtil.estimateRowSize(table); + long regionSize = services.getProps().getLong(HConstants.HREGION_MAX_FILESIZE, + HConstants.DEFAULT_MAX_FILE_SIZE); + if (perScanLimit == null || scan.getFilter() != null) { + /* + * If a limit is not provided or if we have a filter, then we are not able to decide whether + * the amount of data we need to scan is less than the threshold. + */ + return null; } + float factor = services.getProps().getFloat(QueryServices.LIMITED_QUERY_SERIAL_THRESHOLD, + QueryServicesOptions.DEFAULT_LIMITED_QUERY_SERIAL_THRESHOLD); + long threshold = (long) (factor * regionSize); + long estimatedBytes = perScanLimit * estRowSize; + long estimatedRows = perScanLimit; + return (perScanLimit * estRowSize < threshold) + ? new Pair<>(estimatedBytes, estimatedRows) + : null; + } - @Override - public Cost getCost() { - Long byteCount = null; - try { - byteCount = getEstimatedBytesToScan(); - } catch (SQLException e) { - // ignored. - } - Double outputBytes = this.accept(new ByteCountVisitor()); + @SuppressWarnings("deprecation") + private static ParallelIteratorFactory buildResultIteratorFactory(StatementContext context, + FilterableStatement statement, TableRef tableRef, OrderBy orderBy, Integer limit, + Integer offset, boolean allowPageFilter) throws SQLException { - if (byteCount == null || outputBytes == null) { - return Cost.UNKNOWN; - } + if ( + (isSerial(context, statement, tableRef, orderBy, + getEstimateOfDataSizeToScanIfWithinThreshold(context, tableRef.getTable(), + QueryUtil.getOffsetLimit(limit, offset)) != null) + || isRoundRobinPossible(orderBy, context) || isPacingScannersPossible(context)) + ) { + return ParallelIteratorFactory.NOOP_FACTORY; + } + ParallelIteratorFactory spoolingResultIteratorFactory = + new SpoolingResultIterator.SpoolingResultIteratorFactory( + context.getConnection().getQueryServices()); - int parallelLevel = CostUtil.estimateParallelLevel( - true, context.getConnection().getQueryServices()); - Cost cost = new Cost(0, 0, byteCount); - if (!orderBy.getOrderByExpressions().isEmpty()) { - Cost orderByCost = CostUtil.estimateOrderByCost(byteCount, outputBytes, parallelLevel); - cost = cost.plus(orderByCost); - } - return cost; + // If we're doing an order by then we need the full result before we can do anything, + // so we don't bother chunking it. If we're just doing a simple scan then we chunk + // the scan to have a quicker initial response. + if (!orderBy.getOrderByExpressions().isEmpty()) { + return spoolingResultIteratorFactory; + } else { + return new ChunkedResultIterator.ChunkedResultIteratorFactory(spoolingResultIteratorFactory, + context.getConnection().getMutationState(), tableRef); } + } - @Override - public List getSplits() { - if (splits == null) - return Collections.emptyList(); - else - return splits; + @Override + public Cost getCost() { + Long byteCount = null; + try { + byteCount = getEstimatedBytesToScan(); + } catch (SQLException e) { + // ignored. } + Double outputBytes = this.accept(new ByteCountVisitor()); - @Override - public List> getScans() { - if (scans == null) - return Collections.emptyList(); - else - return scans; + if (byteCount == null || outputBytes == null) { + return Cost.UNKNOWN; } - private static boolean isOffsetPossibleOnServer(StatementContext context, OrderBy orderBy, Integer offset, - boolean isSalted, IndexType indexType) { - return offset != null && orderBy.getOrderByExpressions().isEmpty() - && !((isSalted || indexType == IndexType.LOCAL) - && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)); + int parallelLevel = + CostUtil.estimateParallelLevel(true, context.getConnection().getQueryServices()); + Cost cost = new Cost(0, 0, byteCount); + if (!orderBy.getOrderByExpressions().isEmpty()) { + Cost orderByCost = CostUtil.estimateOrderByCost(byteCount, outputBytes, parallelLevel); + cost = cost.plus(orderByCost); } + return cost; + } - @Override - protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, Map caches) throws SQLException { - // Set any scan attributes before creating the scanner, as it will be too late afterwards - scan.setAttribute(BaseScannerRegionObserverConstants.NON_AGGREGATE_QUERY, QueryConstants.TRUE); - ResultIterator scanner; - TableRef tableRef = this.getTableRef(); - PTable table = tableRef.getTable(); - boolean isSalted = table.getBucketNum() != null; - /* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if - * limit is provided, run query serially. + @Override + public List getSplits() { + if (splits == null) return Collections.emptyList(); + else return splits; + } + + @Override + public List> getScans() { + if (scans == null) return Collections.emptyList(); + else return scans; + } + + private static boolean isOffsetPossibleOnServer(StatementContext context, OrderBy orderBy, + Integer offset, boolean isSalted, IndexType indexType) { + return offset != null && orderBy.getOrderByExpressions().isEmpty() + && !((isSalted || indexType == IndexType.LOCAL) + && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)); + } + + @Override + protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, + Map caches) throws SQLException { + // Set any scan attributes before creating the scanner, as it will be too late afterwards + scan.setAttribute(BaseScannerRegionObserverConstants.NON_AGGREGATE_QUERY, QueryConstants.TRUE); + ResultIterator scanner; + TableRef tableRef = this.getTableRef(); + PTable table = tableRef.getTable(); + boolean isSalted = table.getBucketNum() != null; + /* + * If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if limit + * is provided, run query serially. + */ + boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty(); + Integer perScanLimit = + !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset); + boolean isOffsetOnServer = + isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType()); + /* + * For queries that are doing a row key order by and are not possibly querying more than a + * threshold worth of data, then we only need to initialize scanners corresponding to the first + * (or last, if reverse) scan per region. + */ + boolean initFirstScanOnly = + (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) + && isDataToScanWithinThreshold; + BaseResultIterators iterators; + if (isOffsetOnServer) { + iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, + scanGrouper, scan, caches, dataPlan); + } else if (isSerial) { + iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, + scanGrouper, scan, caches, dataPlan); + } else { + iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, + scan, initFirstScanOnly, caches, dataPlan); + } + estimatedRows = iterators.getEstimatedRowCount(); + estimatedSize = iterators.getEstimatedByteCount(); + estimateInfoTimestamp = iterators.getEstimateInfoTimestamp(); + splits = iterators.getSplits(); + scans = iterators.getScans(); + if (isOffsetOnServer) { + scanner = new ConcatResultIterator(iterators); + if (limit != null) { + scanner = new LimitingResultIterator(scanner, limit); + } + } else if (isOrdered) { + scanner = + new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions()); + } else { + if ( + (isSalted || table.getIndexType() == IndexType.LOCAL) + && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context) + ) { + /* + * For salted tables or local index, a merge sort is needed if: 1) The config + * phoenix.query.force.rowkeyorder is set to true 2) Or if the query has an order by that + * wants to sort the results by the row key (forward or reverse ordering) */ - boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty(); - Integer perScanLimit = !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset); - boolean isOffsetOnServer = isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType()); + scanner = new MergeSortRowKeyResultIterator(iterators, + isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY); + } else if (useRoundRobinIterator()) { /* - * For queries that are doing a row key order by and are not possibly querying more than a - * threshold worth of data, then we only need to initialize scanners corresponding to the - * first (or last, if reverse) scan per region. + * For any kind of tables, round robin is possible if there is no ordering of rows needed. */ - boolean initFirstScanOnly = - (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) - && isDataToScanWithinThreshold; - BaseResultIterators iterators; - if (isOffsetOnServer) { - iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, scanGrouper, scan, caches, dataPlan); - } else if (isSerial) { - iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, scanGrouper, scan, caches, dataPlan); - } else { - iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, scan, initFirstScanOnly, caches, dataPlan); - } - estimatedRows = iterators.getEstimatedRowCount(); - estimatedSize = iterators.getEstimatedByteCount(); - estimateInfoTimestamp = iterators.getEstimateInfoTimestamp(); - splits = iterators.getSplits(); - scans = iterators.getScans(); - if (isOffsetOnServer) { - scanner = new ConcatResultIterator(iterators); - if (limit != null) { - scanner = new LimitingResultIterator(scanner, limit); - } - } else if (isOrdered) { - scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions()); - } else { - if ((isSalted || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) { - /* - * For salted tables or local index, a merge sort is needed if: - * 1) The config phoenix.query.force.rowkeyorder is set to true - * 2) Or if the query has an order by that wants to sort - * the results by the row key (forward or reverse ordering) - */ - scanner = new MergeSortRowKeyResultIterator(iterators, isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY); - } else if (useRoundRobinIterator()) { - /* - * For any kind of tables, round robin is possible if there is - * no ordering of rows needed. - */ - scanner = new RoundRobinResultIterator(iterators, this); - } else { - scanner = new ConcatResultIterator(iterators); - } - if (offset != null) { - scanner = new OffsetResultIterator(scanner, offset); - } - if (limit != null) { - scanner = new LimitingResultIterator(scanner, limit); - } - } - - if (context.getSequenceManager().getSequenceCount() > 0) { - scanner = new SequenceResultIterator(scanner, context.getSequenceManager()); - } - return scanner; - } - - @Override - public boolean useRoundRobinIterator() throws SQLException { - return ScanUtil.isRoundRobinPossible(orderBy, context); + scanner = new RoundRobinResultIterator(iterators, this); + } else { + scanner = new ConcatResultIterator(iterators); + } + if (offset != null) { + scanner = new OffsetResultIterator(scanner, offset); + } + if (limit != null) { + scanner = new LimitingResultIterator(scanner, limit); + } } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); + if (context.getSequenceManager().getSequenceCount() > 0) { + scanner = new SequenceResultIterator(scanner, context.getSequenceManager()); } + return scanner; + } - @Override - public Long getEstimatedRowsToScan() throws SQLException { - if (isSerial) { - return serialRowsEstimate; - } - return super.getEstimatedRowsToScan(); - } + @Override + public boolean useRoundRobinIterator() throws SQLException { + return ScanUtil.isRoundRobinPossible(orderBy, context); + } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - if (isSerial) { - return serialBytesEstimate; - } - return super.getEstimatedBytesToScan(); - } + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - if (isSerial) { - return serialEstimateInfoTs; - } - return super.getEstimateInfoTimestamp(); + @Override + public Long getEstimatedRowsToScan() throws SQLException { + if (isSerial) { + return serialRowsEstimate; } + return super.getEstimatedRowsToScan(); + } - private static OrderBy convertActualOutputOrderBy(OrderBy orderBy, StatementContext statementContext) throws SQLException { - if(!orderBy.isEmpty()) { - return OrderBy.convertCompiledOrderByToOutputOrderBy(orderBy); - } - - if(!ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, statementContext)) { - return OrderBy.EMPTY_ORDER_BY; - } + @Override + public Long getEstimatedBytesToScan() throws SQLException { + if (isSerial) { + return serialBytesEstimate; + } + return super.getEstimatedBytesToScan(); + } - TableRef tableRef = statementContext.getResolver().getTables().get(0); - return ExpressionUtil.getOrderByFromTable( - tableRef, - statementContext.getConnection(), - orderBy == OrderBy.REV_ROW_KEY_ORDER_BY).getFirst(); + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + if (isSerial) { + return serialEstimateInfoTs; } + return super.getEstimateInfoTimestamp(); + } - @Override - public List getOutputOrderBys() { - return OrderBy.wrapForOutputOrderBys(this.actualOutputOrderBy); + private static OrderBy convertActualOutputOrderBy(OrderBy orderBy, + StatementContext statementContext) throws SQLException { + if (!orderBy.isEmpty()) { + return OrderBy.convertCompiledOrderByToOutputOrderBy(orderBy); } - public Optional getRowOffset() { - return this.rowOffset; + if (!ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, statementContext)) { + return OrderBy.EMPTY_ORDER_BY; } -} \ No newline at end of file + + TableRef tableRef = statementContext.getResolver().getTables().get(0); + return ExpressionUtil.getOrderByFromTable(tableRef, statementContext.getConnection(), + orderBy == OrderBy.REV_ROW_KEY_ORDER_BY).getFirst(); + } + + @Override + public List getOutputOrderBys() { + return OrderBy.wrapForOutputOrderBys(this.actualOutputOrderBy); + } + + public Optional getRowOffset() { + return this.rowOffset; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java index c22ac0ff18d..d47dcfdf651 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,8 +40,7 @@ import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.compile.ExplainPlan; import org.apache.phoenix.compile.ExplainPlanAttributes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.compile.QueryCompiler; @@ -62,12 +61,11 @@ import org.apache.phoenix.iterate.PhoenixQueues; import org.apache.phoenix.iterate.ResultIterator; import org.apache.phoenix.iterate.SizeAwareQueue; -import org.apache.phoenix.jdbc.PhoenixParameterMetaData; import org.apache.phoenix.jdbc.PhoenixStatement.Operation; import org.apache.phoenix.optimize.Cost; import org.apache.phoenix.parse.FilterableStatement; -import org.apache.phoenix.parse.OrderByNode; import org.apache.phoenix.parse.JoinTableNode.JoinType; +import org.apache.phoenix.parse.OrderByNode; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; @@ -81,927 +79,900 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.ValueBitSet; import org.apache.phoenix.schema.tuple.Tuple; -import org.apache.phoenix.util.SchemaUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import org.apache.phoenix.util.SchemaUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class SortMergeJoinPlan implements QueryPlan { - private static final Logger LOGGER = LoggerFactory.getLogger(SortMergeJoinPlan.class); - private static final byte[] EMPTY_PTR = new byte[0]; - - private final StatementContext context; - private final FilterableStatement statement; - private final TableRef table; - /** - * In {@link QueryCompiler#compileJoinQuery},{@link JoinType#Right} is converted - * to {@link JoinType#Left}. - */ - private final JoinType joinType; - private final QueryPlan lhsPlan; - private final QueryPlan rhsPlan; - private final List lhsKeyExpressions; - private final List rhsKeyExpressions; - private final KeyValueSchema joinedSchema; - private final KeyValueSchema lhsSchema; - private final KeyValueSchema rhsSchema; - private final int rhsFieldPosition; - private final boolean isSingleValueOnly; - private final Set tableRefs; - private final long thresholdBytes; - private final boolean spoolingEnabled; - private Long estimatedBytes; - private Long estimatedRows; - private Long estimateInfoTs; - private boolean getEstimatesCalled; - private List actualOutputOrderBys; - - public SortMergeJoinPlan( - StatementContext context, - FilterableStatement statement, - TableRef table, - JoinType type, - QueryPlan lhsPlan, - QueryPlan rhsPlan, - Pair,List> lhsAndRhsKeyExpressions, - List rhsKeyExpressions, - PTable joinedTable, - PTable lhsTable, - PTable rhsTable, - int rhsFieldPosition, - boolean isSingleValueOnly, - Pair,List> lhsAndRhsOrderByNodes) throws SQLException { - if (type == JoinType.Right) throw new IllegalArgumentException("JoinType should not be " + type); - this.context = context; - this.statement = statement; - this.table = table; - this.joinType = type; - this.lhsPlan = lhsPlan; - this.rhsPlan = rhsPlan; - this.lhsKeyExpressions = lhsAndRhsKeyExpressions.getFirst(); - this.rhsKeyExpressions = lhsAndRhsKeyExpressions.getSecond(); - this.joinedSchema = buildSchema(joinedTable); - this.lhsSchema = buildSchema(lhsTable); - this.rhsSchema = buildSchema(rhsTable); - this.rhsFieldPosition = rhsFieldPosition; - this.isSingleValueOnly = isSingleValueOnly; - this.tableRefs = Sets.newHashSetWithExpectedSize(lhsPlan.getSourceRefs().size() + rhsPlan.getSourceRefs().size()); - this.tableRefs.addAll(lhsPlan.getSourceRefs()); - this.tableRefs.addAll(rhsPlan.getSourceRefs()); - this.thresholdBytes = - context.getConnection().getQueryServices().getProps().getLongBytes( - QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); - this.spoolingEnabled = - context.getConnection().getQueryServices().getProps().getBoolean( - QueryServices.CLIENT_JOIN_SPOOLING_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_JOIN_SPOOLING_ENABLED); - this.actualOutputOrderBys = convertActualOutputOrderBy(lhsAndRhsOrderByNodes.getFirst(), lhsAndRhsOrderByNodes.getSecond(), context); - } - - @Override - public Operation getOperation() { - return statement.getOperation(); - } - - private static KeyValueSchema buildSchema(PTable table) { - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); - if (table != null) { - for (PColumn column : table.getColumns()) { - if (!SchemaUtil.isPKColumn(column)) { - builder.addField(column); - } - } - } - return builder.build(); - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - return iterator(scanGrouper, null); - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - return joinType == JoinType.Semi || joinType == JoinType.Anti ? - new SemiAntiJoinIterator(lhsPlan.iterator(scanGrouper), rhsPlan.iterator(scanGrouper)) : - new BasicJoinIterator(lhsPlan.iterator(scanGrouper), rhsPlan.iterator(scanGrouper)); - } - - @Override - public ResultIterator iterator() throws SQLException { - return iterator(DefaultParallelScanGrouper.getInstance()); - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - List steps = Lists.newArrayList(); - steps.add("SORT-MERGE-JOIN (" + joinType.toString().toUpperCase() + ") TABLES"); - ExplainPlan lhsExplainPlan = lhsPlan.getExplainPlan(); - List lhsPlanSteps = lhsExplainPlan.getPlanSteps(); - ExplainPlanAttributes lhsPlanAttributes = - lhsExplainPlan.getPlanStepsAsAttributes(); - ExplainPlanAttributesBuilder lhsPlanBuilder = - new ExplainPlanAttributesBuilder(lhsPlanAttributes); - lhsPlanBuilder.setAbstractExplainPlan("SORT-MERGE-JOIN (" - + joinType.toString().toUpperCase() + ")"); - - for (String step : lhsPlanSteps) { - steps.add(" " + step); - } - steps.add("AND" + (rhsSchema.getFieldCount() == 0 ? " (SKIP MERGE)" : "")); - - ExplainPlan rhsExplainPlan = rhsPlan.getExplainPlan(); - List rhsPlanSteps = rhsExplainPlan.getPlanSteps(); - ExplainPlanAttributes rhsPlanAttributes = - rhsExplainPlan.getPlanStepsAsAttributes(); - ExplainPlanAttributesBuilder rhsPlanBuilder = - new ExplainPlanAttributesBuilder(rhsPlanAttributes); - - lhsPlanBuilder.setRhsJoinQueryExplainPlan(rhsPlanBuilder.build()); - - for (String step : rhsPlanSteps) { - steps.add(" " + step); - } - return new ExplainPlan(steps, lhsPlanBuilder.build()); + private static final Logger LOGGER = LoggerFactory.getLogger(SortMergeJoinPlan.class); + private static final byte[] EMPTY_PTR = new byte[0]; + + private final StatementContext context; + private final FilterableStatement statement; + private final TableRef table; + /** + * In {@link QueryCompiler#compileJoinQuery},{@link JoinType#Right} is converted to + * {@link JoinType#Left}. + */ + private final JoinType joinType; + private final QueryPlan lhsPlan; + private final QueryPlan rhsPlan; + private final List lhsKeyExpressions; + private final List rhsKeyExpressions; + private final KeyValueSchema joinedSchema; + private final KeyValueSchema lhsSchema; + private final KeyValueSchema rhsSchema; + private final int rhsFieldPosition; + private final boolean isSingleValueOnly; + private final Set tableRefs; + private final long thresholdBytes; + private final boolean spoolingEnabled; + private Long estimatedBytes; + private Long estimatedRows; + private Long estimateInfoTs; + private boolean getEstimatesCalled; + private List actualOutputOrderBys; + + public SortMergeJoinPlan(StatementContext context, FilterableStatement statement, TableRef table, + JoinType type, QueryPlan lhsPlan, QueryPlan rhsPlan, + Pair, List> lhsAndRhsKeyExpressions, + List rhsKeyExpressions, PTable joinedTable, PTable lhsTable, PTable rhsTable, + int rhsFieldPosition, boolean isSingleValueOnly, + Pair, List> lhsAndRhsOrderByNodes) throws SQLException { + if (type == JoinType.Right) + throw new IllegalArgumentException("JoinType should not be " + type); + this.context = context; + this.statement = statement; + this.table = table; + this.joinType = type; + this.lhsPlan = lhsPlan; + this.rhsPlan = rhsPlan; + this.lhsKeyExpressions = lhsAndRhsKeyExpressions.getFirst(); + this.rhsKeyExpressions = lhsAndRhsKeyExpressions.getSecond(); + this.joinedSchema = buildSchema(joinedTable); + this.lhsSchema = buildSchema(lhsTable); + this.rhsSchema = buildSchema(rhsTable); + this.rhsFieldPosition = rhsFieldPosition; + this.isSingleValueOnly = isSingleValueOnly; + this.tableRefs = Sets + .newHashSetWithExpectedSize(lhsPlan.getSourceRefs().size() + rhsPlan.getSourceRefs().size()); + this.tableRefs.addAll(lhsPlan.getSourceRefs()); + this.tableRefs.addAll(rhsPlan.getSourceRefs()); + this.thresholdBytes = context.getConnection().getQueryServices().getProps().getLongBytes( + QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES); + this.spoolingEnabled = context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.CLIENT_JOIN_SPOOLING_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_JOIN_SPOOLING_ENABLED); + this.actualOutputOrderBys = convertActualOutputOrderBy(lhsAndRhsOrderByNodes.getFirst(), + lhsAndRhsOrderByNodes.getSecond(), context); + } + + @Override + public Operation getOperation() { + return statement.getOperation(); + } + + private static KeyValueSchema buildSchema(PTable table) { + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); + if (table != null) { + for (PColumn column : table.getColumns()) { + if (!SchemaUtil.isPKColumn(column)) { + builder.addField(column); + } + } + } + return builder.build(); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { + return iterator(scanGrouper, null); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + return joinType == JoinType.Semi || joinType == JoinType.Anti + ? new SemiAntiJoinIterator(lhsPlan.iterator(scanGrouper), rhsPlan.iterator(scanGrouper)) + : new BasicJoinIterator(lhsPlan.iterator(scanGrouper), rhsPlan.iterator(scanGrouper)); + } + + @Override + public ResultIterator iterator() throws SQLException { + return iterator(DefaultParallelScanGrouper.getInstance()); + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + List steps = Lists.newArrayList(); + steps.add("SORT-MERGE-JOIN (" + joinType.toString().toUpperCase() + ") TABLES"); + ExplainPlan lhsExplainPlan = lhsPlan.getExplainPlan(); + List lhsPlanSteps = lhsExplainPlan.getPlanSteps(); + ExplainPlanAttributes lhsPlanAttributes = lhsExplainPlan.getPlanStepsAsAttributes(); + ExplainPlanAttributesBuilder lhsPlanBuilder = + new ExplainPlanAttributesBuilder(lhsPlanAttributes); + lhsPlanBuilder + .setAbstractExplainPlan("SORT-MERGE-JOIN (" + joinType.toString().toUpperCase() + ")"); + + for (String step : lhsPlanSteps) { + steps.add(" " + step); + } + steps.add("AND" + (rhsSchema.getFieldCount() == 0 ? " (SKIP MERGE)" : "")); + + ExplainPlan rhsExplainPlan = rhsPlan.getExplainPlan(); + List rhsPlanSteps = rhsExplainPlan.getPlanSteps(); + ExplainPlanAttributes rhsPlanAttributes = rhsExplainPlan.getPlanStepsAsAttributes(); + ExplainPlanAttributesBuilder rhsPlanBuilder = + new ExplainPlanAttributesBuilder(rhsPlanAttributes); + + lhsPlanBuilder.setRhsJoinQueryExplainPlan(rhsPlanBuilder.build()); + + for (String step : rhsPlanSteps) { + steps.add(" " + step); + } + return new ExplainPlan(steps, lhsPlanBuilder.build()); + } + + @Override + public Cost getCost() { + Double byteCount = this.accept(new ByteCountVisitor()); + + if (byteCount == null) { + return Cost.UNKNOWN; + } + + Cost cost = new Cost(0, 0, byteCount); + return cost.plus(lhsPlan.getCost()).plus(rhsPlan.getCost()); + } + + @Override + public StatementContext getContext() { + return context; + } + + @Override + public ParameterMetaData getParameterMetaData() { + return context.getBindManager().getParameterMetaData(); + } + + @Override + public long getEstimatedSize() { + return lhsPlan.getEstimatedSize() + rhsPlan.getEstimatedSize(); + } + + @Override + public TableRef getTableRef() { + return table; + } + + @Override + public RowProjector getProjector() { + return null; + } + + @Override + public Integer getLimit() { + return null; + } + + @Override + public Integer getOffset() { + return null; + } + + @Override + public OrderBy getOrderBy() { + return null; + } + + @Override + public GroupBy getGroupBy() { + return null; + } + + @Override + public List getSplits() { + return Collections. emptyList(); + } + + @Override + public List> getScans() { + return Collections.> emptyList(); + } + + @Override + public FilterableStatement getStatement() { + return statement; + } + + @Override + public boolean isDegenerate() { + return false; + } + + @Override + public boolean isRowKeyOrdered() { + return false; + } + + public JoinType getJoinType() { + return joinType; + } + + private static SQLException closeIterators(ResultIterator lhsIterator, + ResultIterator rhsIterator) { + SQLException e = null; + try { + lhsIterator.close(); + } catch (Throwable e1) { + e = e1 instanceof SQLException ? (SQLException) e1 : new SQLException(e1); + } + try { + rhsIterator.close(); + } catch (Throwable e2) { + SQLException e22 = e2 instanceof SQLException ? (SQLException) e2 : new SQLException(e2); + if (e != null) { + e.setNextException(e22); + } else { + e = e22; + } + } + return e; + } + + /** + * close the futures and threadPoolExecutor,ignore exception. + */ + private static void clearThreadPoolExecutor(ExecutorService threadPoolExecutor, + List> futures) { + for (Future future : futures) { + try { + future.cancel(true); + } catch (Throwable ignore) { + LOGGER.error("cancel future error", ignore); + } + } + + try { + threadPoolExecutor.shutdownNow(); + } catch (Throwable ignore) { + LOGGER.error("shutdownNow threadPoolExecutor error", ignore); + } + } + + @VisibleForTesting + public class BasicJoinIterator implements ResultIterator { + private final ResultIterator lhsIterator; + private final ResultIterator rhsIterator; + private boolean initialized; + private Tuple lhsTuple; + private Tuple rhsTuple; + private JoinKey lhsKey; + private JoinKey rhsKey; + private Tuple nextLhsTuple; + private Tuple nextRhsTuple; + private JoinKey nextLhsKey; + private JoinKey nextRhsKey; + private ValueBitSet destBitSet; + private ValueBitSet lhsBitSet; + private ValueBitSet rhsBitSet; + private byte[] emptyProjectedValue; + private SizeAwareQueue queue; + private Iterator queueIterator; + private boolean joinResultNullBecauseOneSideNull = false; + + public BasicJoinIterator(ResultIterator lhsIterator, ResultIterator rhsIterator) { + this.lhsIterator = lhsIterator; + this.rhsIterator = rhsIterator; + this.initialized = false; + this.lhsTuple = null; + this.rhsTuple = null; + this.lhsKey = new JoinKey(lhsKeyExpressions); + this.rhsKey = new JoinKey(rhsKeyExpressions); + this.nextLhsTuple = null; + this.nextRhsTuple = null; + this.nextLhsKey = new JoinKey(lhsKeyExpressions); + this.nextRhsKey = new JoinKey(rhsKeyExpressions); + this.destBitSet = ValueBitSet.newInstance(joinedSchema); + this.lhsBitSet = ValueBitSet.newInstance(lhsSchema); + this.rhsBitSet = ValueBitSet.newInstance(rhsSchema); + lhsBitSet.clear(); + int len = lhsBitSet.getEstimatedLength(); + this.emptyProjectedValue = new byte[len]; + lhsBitSet.toBytes(emptyProjectedValue, 0); + this.queue = PhoenixQueues.newTupleQueue(spoolingEnabled, thresholdBytes); + this.queueIterator = null; + } + + public boolean isJoinResultNullBecauseOneSideNull() { + return this.joinResultNullBecauseOneSideNull; + } + + public boolean isInitialized() { + return this.initialized; } @Override - public Cost getCost() { - Double byteCount = this.accept(new ByteCountVisitor()); - - if (byteCount == null) { - return Cost.UNKNOWN; + public void close() throws SQLException { + SQLException sqlException = closeIterators(lhsIterator, rhsIterator); + try { + queue.close(); + } catch (IOException t) { + if (sqlException != null) { + sqlException.setNextException( + new SQLException("Also encountered exception while closing queue", t)); + } else { + sqlException = new SQLException("Error while closing queue", t); } - - Cost cost = new Cost(0, 0, byteCount); - return cost.plus(lhsPlan.getCost()).plus(rhsPlan.getCost()); - } - - @Override - public StatementContext getContext() { - return context; - } - - @Override - public ParameterMetaData getParameterMetaData() { - return context.getBindManager().getParameterMetaData(); - } - - @Override - public long getEstimatedSize() { - return lhsPlan.getEstimatedSize() + rhsPlan.getEstimatedSize(); - } - - @Override - public TableRef getTableRef() { - return table; - } - - @Override - public RowProjector getProjector() { - return null; - } - - @Override - public Integer getLimit() { - return null; + } + if (sqlException != null) { + LOGGER.error("BasicJoinIterator close error!", sqlException); + } } @Override - public Integer getOffset() { - return null; - } - - @Override - public OrderBy getOrderBy() { - return null; - } + public Tuple next() throws SQLException { + if (!initialized) { + init(); + } - @Override - public GroupBy getGroupBy() { + if (this.joinResultNullBecauseOneSideNull) { return null; - } - - @Override - public List getSplits() { - return Collections. emptyList(); - } - - @Override - public List> getScans() { - return Collections.> emptyList(); - } + } + + Tuple next = null; + while (next == null && !isEnd()) { + if (queueIterator != null) { + if (queueIterator.hasNext()) { + next = join(lhsTuple, queueIterator.next()); + } else { + boolean eq = nextLhsTuple != null && lhsKey.equals(nextLhsKey); + advance(true); + if (eq) { + queueIterator = queue.iterator(); + } else { + queue.clear(); + queueIterator = null; + } + } + } else if (lhsTuple != null) { + if (rhsTuple != null) { + if (lhsKey.equals(rhsKey)) { + next = join(lhsTuple, rhsTuple); + if (nextLhsTuple != null && lhsKey.equals(nextLhsKey)) { + try { + queue.add(rhsTuple); + } catch (IllegalStateException e) { + throw new PhoenixIOException(e); + } + if (nextRhsTuple == null || !rhsKey.equals(nextRhsKey)) { + queueIterator = queue.iterator(); + advance(true); + } else if (isSingleValueOnly) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build() + .buildException(); + } + } else if (nextRhsTuple == null || !rhsKey.equals(nextRhsKey)) { + advance(true); + } else if (isSingleValueOnly) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build() + .buildException(); + } + advance(false); + } else if (lhsKey.compareTo(rhsKey) < 0) { + if (joinType == JoinType.Full || joinType == JoinType.Left) { + next = join(lhsTuple, null); + } + advance(true); + } else { + if (joinType == JoinType.Full) { + next = join(null, rhsTuple); + } + advance(false); + } + } else { // left-join or full-join + next = join(lhsTuple, null); + advance(true); + } + } else { // full-join + next = join(null, rhsTuple); + advance(false); + } + } - @Override - public FilterableStatement getStatement() { - return statement; + return next; } @Override - public boolean isDegenerate() { - return false; + public void explain(List planSteps) { } @Override - public boolean isRowKeyOrdered() { - return false; + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { } - public JoinType getJoinType() { - return joinType; - } - - private static SQLException closeIterators(ResultIterator lhsIterator, ResultIterator rhsIterator) { - SQLException e = null; - try { - lhsIterator.close(); - } catch (Throwable e1) { - e = e1 instanceof SQLException ? (SQLException)e1 : new SQLException(e1); + private void doInit(boolean lhs) throws SQLException { + if (lhs) { + nextLhsTuple = lhsIterator.next(); + if (nextLhsTuple != null) { + nextLhsKey.evaluate(nextLhsTuple); } - try { - rhsIterator.close(); - } catch (Throwable e2) { - SQLException e22 = e2 instanceof SQLException ? (SQLException)e2 : new SQLException(e2); - if (e != null) { - e.setNextException(e22); - } else { - e = e22; - } + advance(true); + } else { + nextRhsTuple = rhsIterator.next(); + if (nextRhsTuple != null) { + nextRhsKey.evaluate(nextRhsTuple); } - return e; + advance(false); + } } /** - * close the futures and threadPoolExecutor,ignore exception. - * @param threadPoolExecutor - * @param futures + * Parallel init, when: 1. {@link #lhsTuple} is null for inner join or left join. 2. + * {@link #rhsTuple} is null for inner join. we could conclude that the join result is null + * early, set {@link #joinResultNullBecauseOneSideNull} true. */ - private static void clearThreadPoolExecutor( - ExecutorService threadPoolExecutor, - List> futures) { - for(Future future : futures) { - try { - future.cancel(true); - } catch(Throwable ignore) { - LOGGER.error("cancel future error", ignore); - } - } - - try { - threadPoolExecutor.shutdownNow(); - } catch(Throwable ignore) { - LOGGER.error("shutdownNow threadPoolExecutor error", ignore); - } - } - - @VisibleForTesting - public class BasicJoinIterator implements ResultIterator { - private final ResultIterator lhsIterator; - private final ResultIterator rhsIterator; - private boolean initialized; - private Tuple lhsTuple; - private Tuple rhsTuple; - private JoinKey lhsKey; - private JoinKey rhsKey; - private Tuple nextLhsTuple; - private Tuple nextRhsTuple; - private JoinKey nextLhsKey; - private JoinKey nextRhsKey; - private ValueBitSet destBitSet; - private ValueBitSet lhsBitSet; - private ValueBitSet rhsBitSet; - private byte[] emptyProjectedValue; - private SizeAwareQueue queue; - private Iterator queueIterator; - private boolean joinResultNullBecauseOneSideNull = false; - - public BasicJoinIterator(ResultIterator lhsIterator, ResultIterator rhsIterator) { - this.lhsIterator = lhsIterator; - this.rhsIterator = rhsIterator; - this.initialized = false; - this.lhsTuple = null; - this.rhsTuple = null; - this.lhsKey = new JoinKey(lhsKeyExpressions); - this.rhsKey = new JoinKey(rhsKeyExpressions); - this.nextLhsTuple = null; - this.nextRhsTuple = null; - this.nextLhsKey = new JoinKey(lhsKeyExpressions); - this.nextRhsKey = new JoinKey(rhsKeyExpressions); - this.destBitSet = ValueBitSet.newInstance(joinedSchema); - this.lhsBitSet = ValueBitSet.newInstance(lhsSchema); - this.rhsBitSet = ValueBitSet.newInstance(rhsSchema); - lhsBitSet.clear(); - int len = lhsBitSet.getEstimatedLength(); - this.emptyProjectedValue = new byte[len]; - lhsBitSet.toBytes(emptyProjectedValue, 0); - this.queue = PhoenixQueues.newTupleQueue(spoolingEnabled, thresholdBytes); - this.queueIterator = null; - } - - public boolean isJoinResultNullBecauseOneSideNull() { - return this.joinResultNullBecauseOneSideNull; - } - - public boolean isInitialized() { - return this.initialized; - } - + private void init() throws SQLException { + ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(2); + ExecutorCompletionService executorCompletionService = + new ExecutorCompletionService(threadPoolExecutor); + List> futures = new ArrayList>(2); + futures.add(executorCompletionService.submit(new Callable() { @Override - public void close() throws SQLException { - SQLException sqlException = closeIterators(lhsIterator, rhsIterator); - try { - queue.close(); - } catch (IOException t) { - if (sqlException != null) { - sqlException.setNextException( - new SQLException("Also encountered exception while closing queue", t)); - } else { - sqlException = new SQLException("Error while closing queue",t); - } - } - if (sqlException != null) { - LOGGER.error("BasicJoinIterator close error!", sqlException); - } + public Boolean call() throws Exception { + doInit(true); + return lhsTuple == null && ((joinType == JoinType.Inner) || (joinType == JoinType.Left)); } + })); + futures.add(executorCompletionService.submit(new Callable() { @Override - public Tuple next() throws SQLException { - if (!initialized) { - init(); - } - - if(this.joinResultNullBecauseOneSideNull) { - return null; - } - - Tuple next = null; - while (next == null && !isEnd()) { - if (queueIterator != null) { - if (queueIterator.hasNext()) { - next = join(lhsTuple, queueIterator.next()); - } else { - boolean eq = nextLhsTuple != null && lhsKey.equals(nextLhsKey); - advance(true); - if (eq) { - queueIterator = queue.iterator(); - } else { - queue.clear(); - queueIterator = null; - } - } - } else if (lhsTuple != null) { - if (rhsTuple != null) { - if (lhsKey.equals(rhsKey)) { - next = join(lhsTuple, rhsTuple); - if (nextLhsTuple != null && lhsKey.equals(nextLhsKey)) { - try { - queue.add(rhsTuple); - } catch (IllegalStateException e) { - throw new PhoenixIOException(e); - } - if (nextRhsTuple == null || !rhsKey.equals(nextRhsKey)) { - queueIterator = queue.iterator(); - advance(true); - } else if (isSingleValueOnly) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build().buildException(); - } - } else if (nextRhsTuple == null || !rhsKey.equals(nextRhsKey)) { - advance(true); - } else if (isSingleValueOnly) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build().buildException(); - } - advance(false); - } else if (lhsKey.compareTo(rhsKey) < 0) { - if (joinType == JoinType.Full || joinType == JoinType.Left) { - next = join(lhsTuple, null); - } - advance(true); - } else { - if (joinType == JoinType.Full) { - next = join(null, rhsTuple); - } - advance(false); - } - } else { // left-join or full-join - next = join(lhsTuple, null); - advance(true); - } - } else { // full-join - next = join(null, rhsTuple); - advance(false); - } - } + public Boolean call() throws Exception { + doInit(false); + return rhsTuple == null && joinType == JoinType.Inner; + } + })); + + try { + Future future = executorCompletionService.take(); + if (future.get()) { + this.joinResultNullBecauseOneSideNull = true; + this.initialized = true; + return; + } + + future = executorCompletionService.take(); + if (future.get()) { + this.joinResultNullBecauseOneSideNull = true; + } + initialized = true; + } catch (Throwable throwable) { + throw new SQLException("failed in init join iterators", throwable); + } finally { + clearThreadPoolExecutor(threadPoolExecutor, futures); + } + } + + private void advance(boolean lhs) throws SQLException { + if (lhs) { + lhsTuple = nextLhsTuple; + lhsKey.set(nextLhsKey); + if (lhsTuple != null) { + nextLhsTuple = lhsIterator.next(); + if (nextLhsTuple != null) { + nextLhsKey.evaluate(nextLhsTuple); + } else { + nextLhsKey.clear(); + } + } + } else { + rhsTuple = nextRhsTuple; + rhsKey.set(nextRhsKey); + if (rhsTuple != null) { + nextRhsTuple = rhsIterator.next(); + if (nextRhsTuple != null) { + nextRhsKey.evaluate(nextRhsTuple); + } else { + nextRhsKey.clear(); + } + } + } + } + + private boolean isEnd() { + return (lhsTuple == null && (rhsTuple == null || joinType != JoinType.Full)) + || (queueIterator == null && rhsTuple == null && joinType == JoinType.Inner); + } + + private Tuple join(Tuple lhs, Tuple rhs) throws SQLException { + try { + ProjectedValueTuple t = null; + if (lhs == null) { + t = new ProjectedValueTuple(rhs, rhs.getValue(0).getTimestamp(), this.emptyProjectedValue, + 0, this.emptyProjectedValue.length, this.emptyProjectedValue.length); + } else if (lhs instanceof ProjectedValueTuple) { + t = (ProjectedValueTuple) lhs; + } else { + ImmutableBytesWritable ptr = context.getTempPtr(); + TupleProjector.decodeProjectedValue(lhs, ptr); + lhsBitSet.clear(); + lhsBitSet.or(ptr); + int bitSetLen = lhsBitSet.getEstimatedLength(); + t = new ProjectedValueTuple(lhs, lhs.getValue(0).getTimestamp(), ptr.get(), + ptr.getOffset(), ptr.getLength(), bitSetLen); + + } + return rhsBitSet == ValueBitSet.EMPTY_VALUE_BITSET + ? t + : TupleProjector.mergeProjectedValue(t, destBitSet, rhs, rhsBitSet, rhsFieldPosition, + true); + } catch (IOException e) { + throw new SQLException(e); + } + } + } + + @VisibleForTesting + public class SemiAntiJoinIterator implements ResultIterator { + private final ResultIterator lhsIterator; + private final ResultIterator rhsIterator; + private final boolean isSemi; + private boolean initialized; + private Tuple lhsTuple; + private Tuple rhsTuple; + private JoinKey lhsKey; + private JoinKey rhsKey; + private boolean joinResultNullBecauseOneSideNull = false; + + public SemiAntiJoinIterator(ResultIterator lhsIterator, ResultIterator rhsIterator) { + if (joinType != JoinType.Semi && joinType != JoinType.Anti) { + throw new IllegalArgumentException( + "Type " + joinType + " is not allowed by " + SemiAntiJoinIterator.class.getName()); + } + this.lhsIterator = lhsIterator; + this.rhsIterator = rhsIterator; + this.isSemi = joinType == JoinType.Semi; + this.initialized = false; + this.lhsTuple = null; + this.rhsTuple = null; + this.lhsKey = new JoinKey(lhsKeyExpressions); + this.rhsKey = new JoinKey(rhsKeyExpressions); + } + + public boolean isJoinResultNullBecauseOneSideNull() { + return this.joinResultNullBecauseOneSideNull; + } + + public boolean isInitialized() { + return this.initialized; + } - return next; - } + @Override + public void close() throws SQLException { + SQLException sqlException = closeIterators(lhsIterator, rhsIterator); + if (sqlException != null) { + LOGGER.error("SemiAntiJoinIterator close error!", sqlException); + } + } + /** + * Parallel init, when: 1. {@link #lhsTuple} is null. 2. {@link #rhsTuple} is null for left semi + * join. we could conclude that the join result is null early, set + * {@link #joinResultNullBecauseOneSideNull} true. + */ + private void init() throws SQLException { + ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(2); + ExecutorCompletionService executorCompletionService = + new ExecutorCompletionService(threadPoolExecutor); + List> futures = new ArrayList>(2); + futures.add(executorCompletionService.submit(new Callable() { @Override - public void explain(List planSteps) { + public Boolean call() throws Exception { + advance(true); + return lhsTuple == null; } + })); + futures.add(executorCompletionService.submit(new Callable() { @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - - private void doInit(boolean lhs) throws SQLException { - if(lhs) { - nextLhsTuple = lhsIterator.next(); - if (nextLhsTuple != null) { - nextLhsKey.evaluate(nextLhsTuple); - } - advance(true); - } else { - nextRhsTuple = rhsIterator.next(); - if (nextRhsTuple != null) { - nextRhsKey.evaluate(nextRhsTuple); - } - advance(false); - } + public Boolean call() throws Exception { + advance(false); + return (rhsTuple == null && isSemi); } + })); - /** - * Parallel init, when: - * 1. {@link #lhsTuple} is null for inner join or left join. - * 2. {@link #rhsTuple} is null for inner join. - * we could conclude that the join result is null early, set {@link #joinResultNullBecauseOneSideNull} true. - * @throws SQLException - */ - private void init() throws SQLException { - ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(2); - ExecutorCompletionService executorCompletionService = - new ExecutorCompletionService(threadPoolExecutor); - List> futures = new ArrayList>(2); - futures.add(executorCompletionService.submit(new Callable() { - @Override - public Boolean call() throws Exception { - doInit(true); - return lhsTuple == null && - ((joinType == JoinType.Inner) || (joinType == JoinType.Left)); - } - })); - - futures.add(executorCompletionService.submit(new Callable() { - @Override - public Boolean call() throws Exception { - doInit(false); - return rhsTuple == null && joinType == JoinType.Inner; - } - })); - - try { - Future future = executorCompletionService.take(); - if(future.get()) { - this.joinResultNullBecauseOneSideNull = true; - this.initialized = true; - return; - } - - future = executorCompletionService.take(); - if(future.get()) { - this.joinResultNullBecauseOneSideNull = true; - } - initialized = true; - } catch (Throwable throwable) { - throw new SQLException("failed in init join iterators", throwable); - } finally { - clearThreadPoolExecutor(threadPoolExecutor, futures); - } + try { + Future future = executorCompletionService.take(); + if (future.get()) { + this.joinResultNullBecauseOneSideNull = true; + this.initialized = true; + return; } - private void advance(boolean lhs) throws SQLException { - if (lhs) { - lhsTuple = nextLhsTuple; - lhsKey.set(nextLhsKey); - if (lhsTuple != null) { - nextLhsTuple = lhsIterator.next(); - if (nextLhsTuple != null) { - nextLhsKey.evaluate(nextLhsTuple); - } else { - nextLhsKey.clear(); - } - } - } else { - rhsTuple = nextRhsTuple; - rhsKey.set(nextRhsKey); - if (rhsTuple != null) { - nextRhsTuple = rhsIterator.next(); - if (nextRhsTuple != null) { - nextRhsKey.evaluate(nextRhsTuple); - } else { - nextRhsKey.clear(); - } - } - } - } - - private boolean isEnd() { - return (lhsTuple == null && (rhsTuple == null || joinType != JoinType.Full)) - || (queueIterator == null && rhsTuple == null && joinType == JoinType.Inner); - } - - private Tuple join(Tuple lhs, Tuple rhs) throws SQLException { - try { - ProjectedValueTuple t = null; - if (lhs == null) { - t = new ProjectedValueTuple(rhs, rhs.getValue(0).getTimestamp(), - this.emptyProjectedValue, 0, this.emptyProjectedValue.length, - this.emptyProjectedValue.length); - } else if (lhs instanceof ProjectedValueTuple) { - t = (ProjectedValueTuple) lhs; - } else { - ImmutableBytesWritable ptr = context.getTempPtr(); - TupleProjector.decodeProjectedValue(lhs, ptr); - lhsBitSet.clear(); - lhsBitSet.or(ptr); - int bitSetLen = lhsBitSet.getEstimatedLength(); - t = new ProjectedValueTuple(lhs, lhs.getValue(0).getTimestamp(), - ptr.get(), ptr.getOffset(), ptr.getLength(), bitSetLen); - - } - return rhsBitSet == ValueBitSet.EMPTY_VALUE_BITSET ? - t : TupleProjector.mergeProjectedValue(t, destBitSet, - rhs, rhsBitSet, rhsFieldPosition, true); - } catch (IOException e) { - throw new SQLException(e); - } + future = executorCompletionService.take(); + if (future.get()) { + this.joinResultNullBecauseOneSideNull = true; } + initialized = true; + } catch (Throwable throwable) { + throw new SQLException("failed in init join iterators", throwable); + } finally { + clearThreadPoolExecutor(threadPoolExecutor, futures); + } } - @VisibleForTesting - public class SemiAntiJoinIterator implements ResultIterator { - private final ResultIterator lhsIterator; - private final ResultIterator rhsIterator; - private final boolean isSemi; - private boolean initialized; - private Tuple lhsTuple; - private Tuple rhsTuple; - private JoinKey lhsKey; - private JoinKey rhsKey; - private boolean joinResultNullBecauseOneSideNull = false; - - public SemiAntiJoinIterator(ResultIterator lhsIterator, ResultIterator rhsIterator) { - if (joinType != JoinType.Semi && joinType != JoinType.Anti) { - throw new IllegalArgumentException("Type " + joinType + " is not allowed by " + SemiAntiJoinIterator.class.getName()); - } - this.lhsIterator = lhsIterator; - this.rhsIterator = rhsIterator; - this.isSemi = joinType == JoinType.Semi; - this.initialized = false; - this.lhsTuple = null; - this.rhsTuple = null; - this.lhsKey = new JoinKey(lhsKeyExpressions); - this.rhsKey = new JoinKey(rhsKeyExpressions); - } - - public boolean isJoinResultNullBecauseOneSideNull() { - return this.joinResultNullBecauseOneSideNull; - } - - public boolean isInitialized() { - return this.initialized; - } - - @Override - public void close() throws SQLException { - SQLException sqlException = closeIterators(lhsIterator, rhsIterator); - if (sqlException != null) { - LOGGER.error("SemiAntiJoinIterator close error!", sqlException); - } - } - - /** - * Parallel init, when: - * 1. {@link #lhsTuple} is null. - * 2. {@link #rhsTuple} is null for left semi join. - * we could conclude that the join result is null early, set {@link #joinResultNullBecauseOneSideNull} true. - * @throws SQLException - */ - private void init() throws SQLException { - ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(2); - ExecutorCompletionService executorCompletionService = - new ExecutorCompletionService(threadPoolExecutor); - List> futures = new ArrayList>(2); - futures.add(executorCompletionService.submit(new Callable() { - @Override - public Boolean call() throws Exception { - advance(true); - return lhsTuple == null; - } - })); - - futures.add(executorCompletionService.submit(new Callable() { - @Override - public Boolean call() throws Exception { - advance(false); - return (rhsTuple == null && isSemi); - } - })); - - try { - Future future = executorCompletionService.take(); - if(future.get()) { - this.joinResultNullBecauseOneSideNull = true; - this.initialized = true; - return; - } - - future = executorCompletionService.take(); - if(future.get()) { - this.joinResultNullBecauseOneSideNull = true; - } - initialized = true; - } catch (Throwable throwable) { - throw new SQLException("failed in init join iterators", throwable); - } finally { - clearThreadPoolExecutor(threadPoolExecutor, futures); - } - } - - @Override - public Tuple next() throws SQLException { - if (!initialized) { - init(); - } + @Override + public Tuple next() throws SQLException { + if (!initialized) { + init(); + } - if(this.joinResultNullBecauseOneSideNull) { - return null; + if (this.joinResultNullBecauseOneSideNull) { + return null; + } + + Tuple next = null; + while (next == null && !isEnd()) { + if (rhsTuple != null) { + if (lhsKey.equals(rhsKey)) { + if (isSemi) { + next = lhsTuple; } - - Tuple next = null; - while (next == null && !isEnd()) { - if (rhsTuple != null) { - if (lhsKey.equals(rhsKey)) { - if (isSemi) { - next = lhsTuple; - } - advance(true); - } else if (lhsKey.compareTo(rhsKey) < 0) { - if (!isSemi) { - next = lhsTuple; - } - advance(true); - } else { - advance(false); - } - } else { - if (!isSemi) { - next = lhsTuple; - } - advance(true); - } + advance(true); + } else if (lhsKey.compareTo(rhsKey) < 0) { + if (!isSemi) { + next = lhsTuple; } - - return next; - } - - /** - * Check if the {@link #next} could exit early when the {@link #lhsTuple} - * or {@link #rhsTuple} is null. - */ - @VisibleForTesting - public boolean isEnd() { - return (this.lhsTuple == null) || - (this.rhsTuple == null && this.isSemi); - } - - @Override - public void explain(List planSteps) { - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + advance(true); + } else { + advance(false); + } + } else { + if (!isSemi) { + next = lhsTuple; + } + advance(true); } + } - private void advance(boolean lhs) throws SQLException { - if (lhs) { - lhsTuple = lhsIterator.next(); - if (lhsTuple != null) { - lhsKey.evaluate(lhsTuple); - } else { - lhsKey.clear(); - } - } else { - rhsTuple = rhsIterator.next(); - if (rhsTuple != null) { - rhsKey.evaluate(rhsTuple); - } else { - rhsKey.clear(); - } - } - } - } - - private static class JoinKey implements Comparable { - private final List expressions; - private final List keys; - - public JoinKey(List expressions) { - this.expressions = expressions; - this.keys = Lists.newArrayListWithExpectedSize(expressions.size()); - for (int i = 0; i < expressions.size(); i++) { - this.keys.add(new ImmutableBytesWritable(EMPTY_PTR)); - } - } - - public void evaluate(Tuple tuple) { - for (int i = 0; i < keys.size(); i++) { - if (!expressions.get(i).evaluate(tuple, keys.get(i))) { - keys.get(i).set(EMPTY_PTR); - } - } - } - - public void set(JoinKey other) { - for (int i = 0; i < keys.size(); i++) { - ImmutableBytesWritable key = other.keys.get(i); - this.keys.get(i).set(key.get(), key.getOffset(), key.getLength()); - } - } - - public void clear() { - for (int i = 0; i < keys.size(); i++) { - this.keys.get(i).set(EMPTY_PTR); - } - } - - @Override - public boolean equals(Object other) { - if (!(other instanceof JoinKey)) - return false; - return this.compareTo((JoinKey) other) == 0; - } - - @Override - public int compareTo(JoinKey other) { - for (int i = 0; i < keys.size(); i++) { - int comp = this.keys.get(i).compareTo(other.keys.get(i)); - if (comp != 0) - return comp; - } - - return 0; - } + return next; } - - - @Override - public boolean useRoundRobinIterator() { - return false; + + /** + * Check if the {@link #next} could exit early when the {@link #lhsTuple} or {@link #rhsTuple} + * is null. + */ + @VisibleForTesting + public boolean isEnd() { + return (this.lhsTuple == null) || (this.rhsTuple == null && this.isSemi); } @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); + public void explain(List planSteps) { } @Override - public Set getSourceRefs() { - return tableRefs; + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { } - public QueryPlan getLhsPlan() { - return lhsPlan; + private void advance(boolean lhs) throws SQLException { + if (lhs) { + lhsTuple = lhsIterator.next(); + if (lhsTuple != null) { + lhsKey.evaluate(lhsTuple); + } else { + lhsKey.clear(); + } + } else { + rhsTuple = rhsIterator.next(); + if (rhsTuple != null) { + rhsKey.evaluate(rhsTuple); + } else { + rhsKey.clear(); + } + } } + } - public QueryPlan getRhsPlan() { - return rhsPlan; - } + private static class JoinKey implements Comparable { + private final List expressions; + private final List keys; - @Override - public Long getEstimatedRowsToScan() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimatedRows; + public JoinKey(List expressions) { + this.expressions = expressions; + this.keys = Lists.newArrayListWithExpectedSize(expressions.size()); + for (int i = 0; i < expressions.size(); i++) { + this.keys.add(new ImmutableBytesWritable(EMPTY_PTR)); + } } - @Override - public Long getEstimatedBytesToScan() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); + public void evaluate(Tuple tuple) { + for (int i = 0; i < keys.size(); i++) { + if (!expressions.get(i).evaluate(tuple, keys.get(i))) { + keys.get(i).set(EMPTY_PTR); } - return estimatedBytes; + } } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimateInfoTs; - } - - private void getEstimates() throws SQLException { - getEstimatesCalled = true; - if ((lhsPlan.getEstimatedBytesToScan() == null || rhsPlan.getEstimatedBytesToScan() == null) - || (lhsPlan.getEstimatedRowsToScan() == null - || rhsPlan.getEstimatedRowsToScan() == null) - || (lhsPlan.getEstimateInfoTimestamp() == null - || rhsPlan.getEstimateInfoTimestamp() == null)) { - /* - * If any of the sub plans doesn't have the estimate info available, then we don't - * provide estimate for the overall plan - */ - estimatedBytes = null; - estimatedRows = null; - estimateInfoTs = null; - } else { - estimatedBytes = - add(add(estimatedBytes, lhsPlan.getEstimatedBytesToScan()), - rhsPlan.getEstimatedBytesToScan()); - estimatedRows = - add(add(estimatedRows, lhsPlan.getEstimatedRowsToScan()), - rhsPlan.getEstimatedRowsToScan()); - estimateInfoTs = - getMin(lhsPlan.getEstimateInfoTimestamp(), rhsPlan.getEstimateInfoTimestamp()); - } + public void set(JoinKey other) { + for (int i = 0; i < keys.size(); i++) { + ImmutableBytesWritable key = other.keys.get(i); + this.keys.get(i).set(key.get(), key.getOffset(), key.getLength()); + } } - /** - * We do not use {@link #lhsKeyExpressions} and {@link #rhsKeyExpressions} directly because {@link #lhsKeyExpressions} is compiled by the - * {@link ColumnResolver} of lhs and {@link #rhsKeyExpressions} is compiled by the {@link ColumnResolver} of rhs, so we must recompile use - * the {@link ColumnResolver} of joinProjectedTables. - * @param lhsOrderByNodes - * @param rhsOrderByNodes - * @param statementContext - * @return - * @throws SQLException - */ - private static List convertActualOutputOrderBy( - List lhsOrderByNodes, - List rhsOrderByNodes, - StatementContext statementContext) throws SQLException { - - List orderBys = new ArrayList(2); - List lhsOrderByExpressions = - compileOutputOrderByExpressions(lhsOrderByNodes, statementContext); - if(!lhsOrderByExpressions.isEmpty()) { - orderBys.add(new OrderBy(lhsOrderByExpressions)); - } - - List rhsOrderByExpressions = - compileOutputOrderByExpressions(rhsOrderByNodes, statementContext); - if(!rhsOrderByExpressions.isEmpty()) { - orderBys.add(new OrderBy(rhsOrderByExpressions)); - } - if(orderBys.isEmpty()) { - return Collections. emptyList(); - } - return orderBys; - } - - private static List compileOutputOrderByExpressions( - List orderByNodes, - StatementContext statementContext) throws SQLException { - /** - * If there is TableNotFoundException or ColumnNotFoundException, it means that the orderByNodes is not referenced by other parts of the sql, - * so could be ignored. - */ - StatelessExpressionCompiler expressionCompiler = new StatelessExpressionCompiler(statementContext); - List orderByExpressions = new ArrayList(orderByNodes.size()); - for(OrderByNode orderByNode : orderByNodes) { - expressionCompiler.reset(); - Expression expression = null; - try { - expression = orderByNode.getNode().accept(expressionCompiler); - } catch(TableNotFoundException exception) { - return orderByExpressions; - } catch(ColumnNotFoundException exception) { - return orderByExpressions; - } catch(ColumnFamilyNotFoundException exception) { - return orderByExpressions; - } - assert expression != null; - // Note here we don't like OrderByCompiler#compile method to reverse the - // OrderByExpression#isAscending if expression#sortOrder is SortOrder.DESC. That's - // because we compile it for QueryPlan#getOutputOrderBys and the compiled - // OrderByExpression is used for OrderPreservingTracker, not used in - // OrderedResultIterator to compare based on binary representation. - // TODO: We should make a explicit distinction between OrderByExpression for - // OrderPreservingTracker and OrderByExpression for OrderedResultIterator computation. - orderByExpressions.add( - OrderByExpression.createByCheckIfOrderByReverse( - expression, - orderByNode.isNullsLast(), - orderByNode.isAscending(), - false)); - } - return orderByExpressions; + public void clear() { + for (int i = 0; i < keys.size(); i++) { + this.keys.get(i).set(EMPTY_PTR); + } } @Override - public List getOutputOrderBys() { - return this.actualOutputOrderBys; + public boolean equals(Object other) { + if (!(other instanceof JoinKey)) return false; + return this.compareTo((JoinKey) other) == 0; } @Override - public boolean isApplicable() { - return true; - } + public int compareTo(JoinKey other) { + for (int i = 0; i < keys.size(); i++) { + int comp = this.keys.get(i).compareTo(other.keys.get(i)); + if (comp != 0) return comp; + } + + return 0; + } + } + + @Override + public boolean useRoundRobinIterator() { + return false; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + @Override + public Set getSourceRefs() { + return tableRefs; + } + + public QueryPlan getLhsPlan() { + return lhsPlan; + } + + public QueryPlan getRhsPlan() { + return rhsPlan; + } + + @Override + public Long getEstimatedRowsToScan() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimatedRows; + } + + @Override + public Long getEstimatedBytesToScan() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimatedBytes; + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimateInfoTs; + } + + private void getEstimates() throws SQLException { + getEstimatesCalled = true; + if ( + (lhsPlan.getEstimatedBytesToScan() == null || rhsPlan.getEstimatedBytesToScan() == null) + || (lhsPlan.getEstimatedRowsToScan() == null || rhsPlan.getEstimatedRowsToScan() == null) + || (lhsPlan.getEstimateInfoTimestamp() == null + || rhsPlan.getEstimateInfoTimestamp() == null) + ) { + /* + * If any of the sub plans doesn't have the estimate info available, then we don't provide + * estimate for the overall plan + */ + estimatedBytes = null; + estimatedRows = null; + estimateInfoTs = null; + } else { + estimatedBytes = add(add(estimatedBytes, lhsPlan.getEstimatedBytesToScan()), + rhsPlan.getEstimatedBytesToScan()); + estimatedRows = + add(add(estimatedRows, lhsPlan.getEstimatedRowsToScan()), rhsPlan.getEstimatedRowsToScan()); + estimateInfoTs = + getMin(lhsPlan.getEstimateInfoTimestamp(), rhsPlan.getEstimateInfoTimestamp()); + } + } + + /** + * We do not use {@link #lhsKeyExpressions} and {@link #rhsKeyExpressions} directly because + * {@link #lhsKeyExpressions} is compiled by the {@link ColumnResolver} of lhs and + * {@link #rhsKeyExpressions} is compiled by the {@link ColumnResolver} of rhs, so we must + * recompile use the {@link ColumnResolver} of joinProjectedTables. + */ + private static List convertActualOutputOrderBy(List lhsOrderByNodes, + List rhsOrderByNodes, StatementContext statementContext) throws SQLException { + + List orderBys = new ArrayList(2); + List lhsOrderByExpressions = + compileOutputOrderByExpressions(lhsOrderByNodes, statementContext); + if (!lhsOrderByExpressions.isEmpty()) { + orderBys.add(new OrderBy(lhsOrderByExpressions)); + } + + List rhsOrderByExpressions = + compileOutputOrderByExpressions(rhsOrderByNodes, statementContext); + if (!rhsOrderByExpressions.isEmpty()) { + orderBys.add(new OrderBy(rhsOrderByExpressions)); + } + if (orderBys.isEmpty()) { + return Collections. emptyList(); + } + return orderBys; + } + + private static List compileOutputOrderByExpressions( + List orderByNodes, StatementContext statementContext) throws SQLException { + /** + * If there is TableNotFoundException or ColumnNotFoundException, it means that the orderByNodes + * is not referenced by other parts of the sql, so could be ignored. + */ + StatelessExpressionCompiler expressionCompiler = + new StatelessExpressionCompiler(statementContext); + List orderByExpressions = + new ArrayList(orderByNodes.size()); + for (OrderByNode orderByNode : orderByNodes) { + expressionCompiler.reset(); + Expression expression = null; + try { + expression = orderByNode.getNode().accept(expressionCompiler); + } catch (TableNotFoundException exception) { + return orderByExpressions; + } catch (ColumnNotFoundException exception) { + return orderByExpressions; + } catch (ColumnFamilyNotFoundException exception) { + return orderByExpressions; + } + assert expression != null; + // Note here we don't like OrderByCompiler#compile method to reverse the + // OrderByExpression#isAscending if expression#sortOrder is SortOrder.DESC. That's + // because we compile it for QueryPlan#getOutputOrderBys and the compiled + // OrderByExpression is used for OrderPreservingTracker, not used in + // OrderedResultIterator to compare based on binary representation. + // TODO: We should make a explicit distinction between OrderByExpression for + // OrderPreservingTracker and OrderByExpression for OrderedResultIterator computation. + orderByExpressions.add(OrderByExpression.createByCheckIfOrderByReverse(expression, + orderByNode.isNullsLast(), orderByNode.isAscending(), false)); + } + return orderByExpressions; + } + + @Override + public List getOutputOrderBys() { + return this.actualOutputOrderBys; + } + + @Override + public boolean isApplicable() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java index a9324469368..04555a9f946 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,14 +28,13 @@ import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.compile.ExplainPlan; import org.apache.phoenix.compile.ExplainPlanAttributes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; -import org.apache.phoenix.compile.OrderPreservingTracker; -import org.apache.phoenix.compile.OrderPreservingTracker.Info; -import org.apache.phoenix.compile.QueryPlan; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; +import org.apache.phoenix.compile.OrderPreservingTracker; +import org.apache.phoenix.compile.OrderPreservingTracker.Info; import org.apache.phoenix.compile.OrderPreservingTracker.Ordering; +import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.execute.visitor.QueryPlanVisitor; import org.apache.phoenix.expression.Expression; @@ -48,165 +47,148 @@ import org.apache.phoenix.schema.ColumnRef; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.tuple.Tuple; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; public class TupleProjectionPlan extends DelegateQueryPlan { - private final TupleProjector tupleProjector; - private final Expression postFilter; - private final ColumnResolver columnResolver; - private final List actualOutputOrderBys; - - public TupleProjectionPlan( - QueryPlan plan, - TupleProjector tupleProjector, - StatementContext statementContext, - Expression postFilter) throws SQLException { - super(plan); - if (tupleProjector == null) { - throw new IllegalArgumentException("tupleProjector is null"); - } - this.tupleProjector = tupleProjector; - this.postFilter = postFilter; - if (statementContext != null) { - this.columnResolver = statementContext.getResolver(); - this.actualOutputOrderBys = this.convertInputOrderBys(plan); - } else { - this.columnResolver = null; - this.actualOutputOrderBys = Collections.emptyList(); - } - } + private final TupleProjector tupleProjector; + private final Expression postFilter; + private final ColumnResolver columnResolver; + private final List actualOutputOrderBys; - /** - * Map the expressions in the actualOutputOrderBys of targetQueryPlan to {@link ProjectedColumnExpression}. - * @param targetQueryPlan - * @return - * @throws SQLException - */ - private List convertInputOrderBys(QueryPlan targetQueryPlan) throws SQLException { - List inputOrderBys = targetQueryPlan.getOutputOrderBys(); - if(inputOrderBys.isEmpty()) { - return Collections. emptyList(); - } - Expression[] selectColumnExpressions = this.tupleProjector.getExpressions(); - Map selectColumnExpressionToIndex = - new HashMap(selectColumnExpressions.length); - int columnIndex = 0; - for(Expression selectColumnExpression : selectColumnExpressions) { - selectColumnExpressionToIndex.put(selectColumnExpression, columnIndex++); - } - List newOrderBys = new ArrayList(inputOrderBys.size()); - for(OrderBy inputOrderBy : inputOrderBys) { - OrderBy newOrderBy = this.convertSingleInputOrderBy( - targetQueryPlan, - selectColumnExpressionToIndex, - selectColumnExpressions, - inputOrderBy); - if(newOrderBy != OrderBy.EMPTY_ORDER_BY) { - newOrderBys.add(newOrderBy); - } - } - if(newOrderBys.isEmpty()) { - return Collections. emptyList(); - } - return newOrderBys; + public TupleProjectionPlan(QueryPlan plan, TupleProjector tupleProjector, + StatementContext statementContext, Expression postFilter) throws SQLException { + super(plan); + if (tupleProjector == null) { + throw new IllegalArgumentException("tupleProjector is null"); } - - private OrderBy convertSingleInputOrderBy( - QueryPlan targetQueryPlan, - Map selectColumnExpressionToIndex, - Expression[] selectColumnExpressions, - OrderBy inputOrderBy) throws SQLException { - //Here we track targetQueryPlan's output so we use targetQueryPlan's StatementContext - OrderPreservingTracker orderPreservingTracker = new OrderPreservingTracker( - targetQueryPlan.getContext(), - GroupBy.EMPTY_GROUP_BY, - Ordering.UNORDERED, - selectColumnExpressions.length, - Collections.singletonList(inputOrderBy), - null, - null); - for(Expression selectColumnExpression : selectColumnExpressions) { - orderPreservingTracker.track(selectColumnExpression); - } - orderPreservingTracker.isOrderPreserving(); - List orderPreservingTrackInfos = orderPreservingTracker.getOrderPreservingTrackInfos(); - if(orderPreservingTrackInfos.isEmpty()) { - return OrderBy.EMPTY_ORDER_BY; - } - List newOrderByExpressions = new ArrayList(orderPreservingTrackInfos.size()); - for(Info orderPreservingTrackInfo : orderPreservingTrackInfos) { - Expression expression = orderPreservingTrackInfo.getExpression(); - Integer index = selectColumnExpressionToIndex.get(expression); - assert index != null; - ProjectedColumnExpression projectedValueColumnExpression = this.getProjectedValueColumnExpression(index); - OrderByExpression newOrderByExpression = OrderByExpression.createByCheckIfOrderByReverse( - projectedValueColumnExpression, - orderPreservingTrackInfo.isNullsLast(), - orderPreservingTrackInfo.isAscending(), - false); - newOrderByExpressions.add(newOrderByExpression); - } - return new OrderBy(newOrderByExpressions); + this.tupleProjector = tupleProjector; + this.postFilter = postFilter; + if (statementContext != null) { + this.columnResolver = statementContext.getResolver(); + this.actualOutputOrderBys = this.convertInputOrderBys(plan); + } else { + this.columnResolver = null; + this.actualOutputOrderBys = Collections. emptyList(); } + } - private ProjectedColumnExpression getProjectedValueColumnExpression(int columnIndex) throws SQLException { - assert this.columnResolver != null; - TableRef tableRef = this.columnResolver.getTables().get(0); - ColumnRef columnRef = new ColumnRef(tableRef, columnIndex); - return (ProjectedColumnExpression)columnRef.newColumnExpression(); + /** + * Map the expressions in the actualOutputOrderBys of targetQueryPlan to + * {@link ProjectedColumnExpression}. + */ + private List convertInputOrderBys(QueryPlan targetQueryPlan) throws SQLException { + List inputOrderBys = targetQueryPlan.getOutputOrderBys(); + if (inputOrderBys.isEmpty()) { + return Collections. emptyList(); } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - ExplainPlan explainPlan = delegate.getExplainPlan(); - List planSteps = Lists.newArrayList(explainPlan.getPlanSteps()); - ExplainPlanAttributes explainPlanAttributes = - explainPlan.getPlanStepsAsAttributes(); - if (postFilter != null) { - planSteps.add("CLIENT FILTER BY " + postFilter.toString()); - ExplainPlanAttributesBuilder newBuilder = - new ExplainPlanAttributesBuilder(explainPlanAttributes); - newBuilder.setClientFilterBy(postFilter.toString()); - explainPlanAttributes = newBuilder.build(); - } - - return new ExplainPlan(planSteps, explainPlanAttributes); + Expression[] selectColumnExpressions = this.tupleProjector.getExpressions(); + Map selectColumnExpressionToIndex = + new HashMap(selectColumnExpressions.length); + int columnIndex = 0; + for (Expression selectColumnExpression : selectColumnExpressions) { + selectColumnExpressionToIndex.put(selectColumnExpression, columnIndex++); + } + List newOrderBys = new ArrayList(inputOrderBys.size()); + for (OrderBy inputOrderBy : inputOrderBys) { + OrderBy newOrderBy = this.convertSingleInputOrderBy(targetQueryPlan, + selectColumnExpressionToIndex, selectColumnExpressions, inputOrderBy); + if (newOrderBy != OrderBy.EMPTY_ORDER_BY) { + newOrderBys.add(newOrderBy); + } + } + if (newOrderBys.isEmpty()) { + return Collections. emptyList(); } + return newOrderBys; + } - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - ResultIterator iterator = new DelegateResultIterator(delegate.iterator(scanGrouper, scan)) { - - @Override - public Tuple next() throws SQLException { - Tuple tuple = super.next(); - if (tuple == null) - return null; - - return tupleProjector.projectResults(tuple); - } - - @Override - public String toString() { - return "TupleProjectionResultIterator [projector=" + tupleProjector + "]"; - } - }; - - if (postFilter != null) { - iterator = new FilterResultIterator(iterator, postFilter); - } - - return iterator; + private OrderBy convertSingleInputOrderBy(QueryPlan targetQueryPlan, + Map selectColumnExpressionToIndex, Expression[] selectColumnExpressions, + OrderBy inputOrderBy) throws SQLException { + // Here we track targetQueryPlan's output so we use targetQueryPlan's StatementContext + OrderPreservingTracker orderPreservingTracker = new OrderPreservingTracker( + targetQueryPlan.getContext(), GroupBy.EMPTY_GROUP_BY, Ordering.UNORDERED, + selectColumnExpressions.length, Collections.singletonList(inputOrderBy), null, null); + for (Expression selectColumnExpression : selectColumnExpressions) { + orderPreservingTracker.track(selectColumnExpression); } + orderPreservingTracker.isOrderPreserving(); + List orderPreservingTrackInfos = orderPreservingTracker.getOrderPreservingTrackInfos(); + if (orderPreservingTrackInfos.isEmpty()) { + return OrderBy.EMPTY_ORDER_BY; + } + List newOrderByExpressions = + new ArrayList(orderPreservingTrackInfos.size()); + for (Info orderPreservingTrackInfo : orderPreservingTrackInfos) { + Expression expression = orderPreservingTrackInfo.getExpression(); + Integer index = selectColumnExpressionToIndex.get(expression); + assert index != null; + ProjectedColumnExpression projectedValueColumnExpression = + this.getProjectedValueColumnExpression(index); + OrderByExpression newOrderByExpression = + OrderByExpression.createByCheckIfOrderByReverse(projectedValueColumnExpression, + orderPreservingTrackInfo.isNullsLast(), orderPreservingTrackInfo.isAscending(), false); + newOrderByExpressions.add(newOrderByExpression); + } + return new OrderBy(newOrderByExpressions); + } + + private ProjectedColumnExpression getProjectedValueColumnExpression(int columnIndex) + throws SQLException { + assert this.columnResolver != null; + TableRef tableRef = this.columnResolver.getTables().get(0); + ColumnRef columnRef = new ColumnRef(tableRef, columnIndex); + return (ProjectedColumnExpression) columnRef.newColumnExpression(); + } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); + @Override + public ExplainPlan getExplainPlan() throws SQLException { + ExplainPlan explainPlan = delegate.getExplainPlan(); + List planSteps = Lists.newArrayList(explainPlan.getPlanSteps()); + ExplainPlanAttributes explainPlanAttributes = explainPlan.getPlanStepsAsAttributes(); + if (postFilter != null) { + planSteps.add("CLIENT FILTER BY " + postFilter.toString()); + ExplainPlanAttributesBuilder newBuilder = + new ExplainPlanAttributesBuilder(explainPlanAttributes); + newBuilder.setClientFilterBy(postFilter.toString()); + explainPlanAttributes = newBuilder.build(); } - @Override - public List getOutputOrderBys() { - return this.actualOutputOrderBys; + return new ExplainPlan(planSteps, explainPlanAttributes); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + ResultIterator iterator = new DelegateResultIterator(delegate.iterator(scanGrouper, scan)) { + + @Override + public Tuple next() throws SQLException { + Tuple tuple = super.next(); + if (tuple == null) return null; + + return tupleProjector.projectResults(tuple); + } + + @Override + public String toString() { + return "TupleProjectionResultIterator [projector=" + tupleProjector + "]"; + } + }; + + if (postFilter != null) { + iterator = new FilterResultIterator(iterator, postFilter); } + + return iterator; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + @Override + public List getOutputOrderBys() { + return this.actualOutputOrderBys; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/TupleProjector.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/TupleProjector.java index 03c8b00c27d..011281c2694 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/TupleProjector.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/TupleProjector.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,8 @@ */ package org.apache.phoenix.execute; -import static org.apache.phoenix.coprocessorclient.ScanRegionObserverConstants.WILDCARD_SCAN_INCLUDES_DYNAMIC_COLUMNS; import static org.apache.phoenix.coprocessorclient.ScanRegionObserverConstants.DYN_COLS_METADATA_CELL_QUALIFIER; +import static org.apache.phoenix.coprocessorclient.ScanRegionObserverConstants.WILDCARD_SCAN_INCLUDES_DYNAMIC_COLUMNS; import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY; import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_QUALIFIER; import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; @@ -35,7 +35,6 @@ import java.util.List; import java.util.Set; -import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Scan; @@ -59,437 +58,446 @@ import org.apache.phoenix.schema.ValueBitSet; import org.apache.phoenix.schema.tuple.BaseTuple; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; -public class TupleProjector { - private static final String SCAN_PROJECTOR = "scanProjector"; - - private final KeyValueSchema schema; - private final Expression[] expressions; - private ValueBitSet valueSet; - private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - private static final byte[] OLD_VALUE_COLUMN_QUALIFIER = new byte[0]; - - public TupleProjector(RowProjector rowProjector) { - List columnProjectors = rowProjector.getColumnProjectors(); - int count = columnProjectors.size(); - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); - expressions = new Expression[count]; - for (int i = 0; i < count; i++) { - Expression expression = columnProjectors.get(i).getExpression(); - builder.addField(expression); - expressions[i] = expression; - } - schema = builder.build(); - valueSet = ValueBitSet.newInstance(schema); +public class TupleProjector { + private static final String SCAN_PROJECTOR = "scanProjector"; + + private final KeyValueSchema schema; + private final Expression[] expressions; + private ValueBitSet valueSet; + private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + + private static final byte[] OLD_VALUE_COLUMN_QUALIFIER = new byte[0]; + + public TupleProjector(RowProjector rowProjector) { + List columnProjectors = rowProjector.getColumnProjectors(); + int count = columnProjectors.size(); + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); + expressions = new Expression[count]; + for (int i = 0; i < count; i++) { + Expression expression = columnProjectors.get(i).getExpression(); + builder.addField(expression); + expressions[i] = expression; } + schema = builder.build(); + valueSet = ValueBitSet.newInstance(schema); + } - public TupleProjector(Expression[] expressions) { - this.expressions = expressions; - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); - for (int i = 0; i < expressions.length; i++) { - builder.addField(expressions[i]); - } - schema = builder.build(); - valueSet = ValueBitSet.newInstance(schema); - } - - public TupleProjector(PTable projectedTable) throws SQLException { - Preconditions.checkArgument(projectedTable.getType() == PTableType.PROJECTED); - List columns = projectedTable.getColumns(); - this.expressions = new Expression[columns.size() - projectedTable.getPKColumns().size()]; - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); - int i = 0; - for (PColumn column : columns) { - if (!SchemaUtil.isPKColumn(column)) { - builder.addField(column); - expressions[i++] = ((ProjectedColumn) column).getSourceColumnRef().newColumnExpression(); - } - } - schema = builder.build(); - valueSet = ValueBitSet.newInstance(schema); - } - - public TupleProjector(KeyValueSchema schema, Expression[] expressions) { - this.schema = schema; - this.expressions = expressions; - this.valueSet = ValueBitSet.newInstance(schema); - } - - public void setValueBitSet(ValueBitSet bitSet) { - this.valueSet = bitSet; - } - - public static void serializeProjectorIntoScan(Scan scan, TupleProjector projector, - boolean projectDynColsInWildcardQueries) { - scan.setAttribute(SCAN_PROJECTOR, serializeProjectorIntoBytes(projector)); - if (projectDynColsInWildcardQueries) { - scan.setAttribute(WILDCARD_SCAN_INCLUDES_DYNAMIC_COLUMNS, TRUE_BYTES); - } + public TupleProjector(Expression[] expressions) { + this.expressions = expressions; + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); + for (int i = 0; i < expressions.length; i++) { + builder.addField(expressions[i]); } + schema = builder.build(); + valueSet = ValueBitSet.newInstance(schema); + } - /** - * Serialize the projector into a byte array - * @param projector projector to serialize - * @return byte array - */ - public static byte[] serializeProjectorIntoBytes(TupleProjector projector) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - try { - DataOutputStream output = new DataOutputStream(stream); - projector.schema.write(output); - int count = projector.expressions.length; - WritableUtils.writeVInt(output, count); - for (int i = 0; i < count; i++) { - WritableUtils.writeVInt(output, - ExpressionType.valueOf(projector.expressions[i]).ordinal()); - projector.expressions[i].write(output); - } - return stream.toByteArray(); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + public TupleProjector(PTable projectedTable) throws SQLException { + Preconditions.checkArgument(projectedTable.getType() == PTableType.PROJECTED); + List columns = projectedTable.getColumns(); + this.expressions = new Expression[columns.size() - projectedTable.getPKColumns().size()]; + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); + int i = 0; + for (PColumn column : columns) { + if (!SchemaUtil.isPKColumn(column)) { + builder.addField(column); + expressions[i++] = ((ProjectedColumn) column).getSourceColumnRef().newColumnExpression(); + } } - - public static TupleProjector deserializeProjectorFromScan(Scan scan) { - return deserializeProjectorFromBytes(scan.getAttribute(SCAN_PROJECTOR)); + schema = builder.build(); + valueSet = ValueBitSet.newInstance(schema); + } + + public TupleProjector(KeyValueSchema schema, Expression[] expressions) { + this.schema = schema; + this.expressions = expressions; + this.valueSet = ValueBitSet.newInstance(schema); + } + + public void setValueBitSet(ValueBitSet bitSet) { + this.valueSet = bitSet; + } + + public static void serializeProjectorIntoScan(Scan scan, TupleProjector projector, + boolean projectDynColsInWildcardQueries) { + scan.setAttribute(SCAN_PROJECTOR, serializeProjectorIntoBytes(projector)); + if (projectDynColsInWildcardQueries) { + scan.setAttribute(WILDCARD_SCAN_INCLUDES_DYNAMIC_COLUMNS, TRUE_BYTES); } + } - /** - * Deserialize the byte array to form a projector - * @param proj byte array to deserialize - * @return projector - */ - public static TupleProjector deserializeProjectorFromBytes(byte[] proj) { - if (proj == null) { - return null; - } - ByteArrayInputStream stream = new ByteArrayInputStream(proj); - try { - DataInputStream input = new DataInputStream(stream); - KeyValueSchema schema = new KeyValueSchema(); - schema.readFields(input); - int count = WritableUtils.readVInt(input); - Expression[] expressions = new Expression[count]; - for (int i = 0; i < count; i++) { - int ordinal = WritableUtils.readVInt(input); - expressions[i] = ExpressionType.values()[ordinal].newInstance(); - expressions[i].readFields(input); - } - return new TupleProjector(schema, expressions); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + /** + * Serialize the projector into a byte array + * @param projector projector to serialize + * @return byte array + */ + public static byte[] serializeProjectorIntoBytes(TupleProjector projector) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + try { + DataOutputStream output = new DataOutputStream(stream); + projector.schema.write(output); + int count = projector.expressions.length; + WritableUtils.writeVInt(output, count); + for (int i = 0; i < count; i++) { + WritableUtils.writeVInt(output, ExpressionType.valueOf(projector.expressions[i]).ordinal()); + projector.expressions[i].write(output); + } + return stream.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } + } - /** - * Iterate over the list of cells returned from the scan and return a tuple projector for the - * dynamic columns by parsing the metadata stored for the list of dynamic columns - * @param result list of cells - * @param dynCols list of dynamic columns to be populated - * @param dynColCells list of cells corresponding to dynamic columns to be populated - * @return The tuple projector corresponding to dynamic columns or null if there are no dynamic - * columns to process - * @throws InvalidProtocolBufferException Thrown if there is an error parsing byte[] to protobuf - */ - public static TupleProjector getDynamicColumnsTupleProjector(List result, - List dynCols, List dynColCells) throws InvalidProtocolBufferException { - Set> dynColCellQualifiers = new HashSet<>(); - populateDynColsFromResult(result, dynCols, dynColCellQualifiers); - if (dynCols.isEmpty()) { - return null; - } - populateDynamicColumnCells(result, dynColCellQualifiers, dynColCells); - if (dynColCells.isEmpty()) { - return null; - } - KeyValueSchema dynColsSchema = PhoenixRuntime.buildKeyValueSchema(dynCols); - Expression[] expressions = new Expression[dynCols.size()]; - for (int i = 0; i < dynCols.size(); i++) { - expressions[i] = new KeyValueColumnExpression(dynCols.get(i)); - } - return new TupleProjector(dynColsSchema, expressions); - } - - /** - * Populate cells corresponding to dynamic columns - * @param result list of cells - * @param dynColCellQualifiers Set of pairs corresponding to - * cells of dynamic columns - * @param dynColCells Populated list of cells corresponding to dynamic columns - */ - private static void populateDynamicColumnCells(List result, - Set> dynColCellQualifiers, List dynColCells) { - for (Cell c : result) { - Pair famQualPair = new Pair<>(ByteBuffer.wrap(CellUtil.cloneFamily(c)), - ByteBuffer.wrap(CellUtil.cloneQualifier(c))); - if (dynColCellQualifiers.contains(famQualPair)) { - dynColCells.add(c); - } - } + public static TupleProjector deserializeProjectorFromScan(Scan scan) { + return deserializeProjectorFromBytes(scan.getAttribute(SCAN_PROJECTOR)); + } + + /** + * Deserialize the byte array to form a projector + * @param proj byte array to deserialize + */ + public static TupleProjector deserializeProjectorFromBytes(byte[] proj) { + if (proj == null) { + return null; + } + ByteArrayInputStream stream = new ByteArrayInputStream(proj); + try { + DataInputStream input = new DataInputStream(stream); + KeyValueSchema schema = new KeyValueSchema(); + schema.readFields(input); + int count = WritableUtils.readVInt(input); + Expression[] expressions = new Expression[count]; + for (int i = 0; i < count; i++) { + int ordinal = WritableUtils.readVInt(input); + expressions[i] = ExpressionType.values()[ordinal].newInstance(); + expressions[i].readFields(input); + } + return new TupleProjector(schema, expressions); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } + } - /** - * Iterate over the list of cells and populate dynamic columns - * @param result list of cells - * @param dynCols Populated list of PColumns corresponding to dynamic columns - * @param dynColCellQualifiers Populated set of pairs - * for the cells in the list, which correspond to dynamic columns - * @throws InvalidProtocolBufferException Thrown if there is an error parsing byte[] to protobuf - */ - private static void populateDynColsFromResult(List result, List dynCols, - Set> dynColCellQualifiers) - throws InvalidProtocolBufferException { - for (Cell c : result) { - byte[] qual = CellUtil.cloneQualifier(c); - byte[] fam = CellUtil.cloneFamily(c); - int index = Bytes.indexOf(qual, DYN_COLS_METADATA_CELL_QUALIFIER); - - // Contains dynamic column metadata, so add it to the list of dynamic columns - if (index != -1) { - byte[] dynColMetaDataProto = CellUtil.cloneValue(c); - dynCols.add(PColumnImpl.createFromProto( - PTableProtos.PColumn.parseFrom(dynColMetaDataProto))); - // Add the pair for the actual dynamic column. The column qualifier - // of the dynamic column is got by parsing out the known bytes from the shadow cell - // containing the metadata for that column i.e. - // DYN_COLS_METADATA_CELL_QUALIFIER - byte[] dynColQual = Arrays.copyOfRange(qual, - index + DYN_COLS_METADATA_CELL_QUALIFIER.length, qual.length); - dynColCellQualifiers.add( - new Pair<>(ByteBuffer.wrap(fam), ByteBuffer.wrap(dynColQual))); - } - } + /** + * Iterate over the list of cells returned from the scan and return a tuple projector for the + * dynamic columns by parsing the metadata stored for the list of dynamic columns + * @param result list of cells + * @param dynCols list of dynamic columns to be populated + * @param dynColCells list of cells corresponding to dynamic columns to be populated + * @return The tuple projector corresponding to dynamic columns or null if there are no dynamic + * columns to process + * @throws InvalidProtocolBufferException Thrown if there is an error parsing byte[] to protobuf + */ + public static TupleProjector getDynamicColumnsTupleProjector(List result, + List dynCols, List dynColCells) throws InvalidProtocolBufferException { + Set> dynColCellQualifiers = new HashSet<>(); + populateDynColsFromResult(result, dynCols, dynColCellQualifiers); + if (dynCols.isEmpty()) { + return null; } - - public static class ProjectedValueTuple extends BaseTuple { - ImmutableBytesWritable keyPtr = new ImmutableBytesWritable(); - long timestamp; - ImmutableBytesWritable projectedValue = new ImmutableBytesWritable(); - int bitSetLen; - Cell keyValue; - - public ProjectedValueTuple(Tuple keyBase, long timestamp, byte[] projectedValue, int valueOffset, int valueLength, int bitSetLen) { - keyBase.getKey(this.keyPtr); - this.timestamp = timestamp; - this.projectedValue.set(projectedValue, valueOffset, valueLength); - this.bitSetLen = bitSetLen; - } + populateDynamicColumnCells(result, dynColCellQualifiers, dynColCells); + if (dynColCells.isEmpty()) { + return null; + } + KeyValueSchema dynColsSchema = PhoenixRuntime.buildKeyValueSchema(dynCols); + Expression[] expressions = new Expression[dynCols.size()]; + for (int i = 0; i < dynCols.size(); i++) { + expressions[i] = new KeyValueColumnExpression(dynCols.get(i)); + } + return new TupleProjector(dynColsSchema, expressions); + } - public ProjectedValueTuple(byte[] keyBuffer, int keyOffset, int keyLength, long timestamp, byte[] projectedValue, int valueOffset, int valueLength, int bitSetLen) { - this.keyPtr.set(keyBuffer, keyOffset, keyLength); - this.timestamp = timestamp; - this.projectedValue.set(projectedValue, valueOffset, valueLength); - this.bitSetLen = bitSetLen; - } - - public ImmutableBytesWritable getKeyPtr() { - return keyPtr; - } - - public long getTimestamp() { - return timestamp; - } - - public ImmutableBytesWritable getProjectedValue() { - return projectedValue; - } - - public int getBitSetLength() { - return bitSetLen; - } - - @Override - public void getKey(ImmutableBytesWritable ptr) { - ptr.set(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength()); - } + /** + * Populate cells corresponding to dynamic columns + * @param result list of cells + * @param dynColCellQualifiers Set of pairs corresponding to + * cells of dynamic columns + * @param dynColCells Populated list of cells corresponding to dynamic columns + */ + private static void populateDynamicColumnCells(List result, + Set> dynColCellQualifiers, List dynColCells) { + for (Cell c : result) { + Pair famQualPair = new Pair<>(ByteBuffer.wrap(CellUtil.cloneFamily(c)), + ByteBuffer.wrap(CellUtil.cloneQualifier(c))); + if (dynColCellQualifiers.contains(famQualPair)) { + dynColCells.add(c); + } + } + } - @Override - public Cell mergeWithDynColsListBytesAndGetValue(int index, byte[] dynColsList) { - if (index != 0) { - throw new IndexOutOfBoundsException(Integer.toString(index)); - } - if (dynColsList == null || dynColsList.length == 0) { - return getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER); - } - // We put the known reserved bytes before the serialized list of dynamic column - // PColumns to easily parse out the column list on the client - byte[] concatBytes = ByteUtil.concat(projectedValue.get(), - DYN_COLS_METADATA_CELL_QUALIFIER, dynColsList); - ImmutableBytesWritable projectedValueWithDynColsListBytes = - new ImmutableBytesWritable(concatBytes); - keyValue = PhoenixKeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), - keyPtr.getLength(), VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, timestamp, - projectedValueWithDynColsListBytes.get(), - projectedValueWithDynColsListBytes.getOffset(), - projectedValueWithDynColsListBytes.getLength()); - return keyValue; - } + /** + * Iterate over the list of cells and populate dynamic columns + * @param result list of cells + * @param dynCols Populated list of PColumns corresponding to dynamic columns + * @param dynColCellQualifiers Populated set of pairs for the + * cells in the list, which correspond to dynamic columns + * @throws InvalidProtocolBufferException Thrown if there is an error parsing byte[] to protobuf + */ + private static void populateDynColsFromResult(List result, List dynCols, + Set> dynColCellQualifiers) throws InvalidProtocolBufferException { + for (Cell c : result) { + byte[] qual = CellUtil.cloneQualifier(c); + byte[] fam = CellUtil.cloneFamily(c); + int index = Bytes.indexOf(qual, DYN_COLS_METADATA_CELL_QUALIFIER); - @Override - public Cell getValue(int index) { - if (index != 0) { - throw new IndexOutOfBoundsException(Integer.toString(index)); - } - return getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER); - } + // Contains dynamic column metadata, so add it to the list of dynamic columns + if (index != -1) { + byte[] dynColMetaDataProto = CellUtil.cloneValue(c); + dynCols + .add(PColumnImpl.createFromProto(PTableProtos.PColumn.parseFrom(dynColMetaDataProto))); + // Add the pair for the actual dynamic column. The column qualifier + // of the dynamic column is got by parsing out the known bytes from the shadow cell + // containing the metadata for that column i.e. + // DYN_COLS_METADATA_CELL_QUALIFIER + byte[] dynColQual = + Arrays.copyOfRange(qual, index + DYN_COLS_METADATA_CELL_QUALIFIER.length, qual.length); + dynColCellQualifiers.add(new Pair<>(ByteBuffer.wrap(fam), ByteBuffer.wrap(dynColQual))); + } + } + } - @Override - public Cell getValue(byte[] family, byte[] qualifier) { - if (keyValue == null) { - keyValue = PhoenixKeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(), - VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, timestamp, projectedValue.get(), projectedValue.getOffset(), projectedValue.getLength()); - } - return keyValue; - } + public static class ProjectedValueTuple extends BaseTuple { + ImmutableBytesWritable keyPtr = new ImmutableBytesWritable(); + long timestamp; + ImmutableBytesWritable projectedValue = new ImmutableBytesWritable(); + int bitSetLen; + Cell keyValue; - @Override - public boolean getValue(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - ptr.set(projectedValue.get(), projectedValue.getOffset(), projectedValue.getLength()); - return true; - } + public ProjectedValueTuple(Tuple keyBase, long timestamp, byte[] projectedValue, + int valueOffset, int valueLength, int bitSetLen) { + keyBase.getKey(this.keyPtr); + this.timestamp = timestamp; + this.projectedValue.set(projectedValue, valueOffset, valueLength); + this.bitSetLen = bitSetLen; + } - @Override - public boolean isImmutable() { - return true; - } + public ProjectedValueTuple(byte[] keyBuffer, int keyOffset, int keyLength, long timestamp, + byte[] projectedValue, int valueOffset, int valueLength, int bitSetLen) { + this.keyPtr.set(keyBuffer, keyOffset, keyLength); + this.timestamp = timestamp; + this.projectedValue.set(projectedValue, valueOffset, valueLength); + this.bitSetLen = bitSetLen; + } - @Override - public int size() { - return 1; - } + public ImmutableBytesWritable getKeyPtr() { + return keyPtr; } - - public static class OldProjectedValueTuple extends ProjectedValueTuple { - public OldProjectedValueTuple(byte[] keyBuffer, int keyOffset, int keyLength, long timestamp, - byte[] projectedValue, int valueOffset, int valueLength, int bitSetLen) { - super(keyBuffer, keyOffset, keyLength, timestamp, projectedValue, valueOffset, valueLength, bitSetLen); - } + public long getTimestamp() { + return timestamp; + } - public OldProjectedValueTuple(Tuple keyBase, long timestamp, byte[] projectedValue, int valueOffset, - int valueLength, int bitSetLen) { - super(keyBase, timestamp, projectedValue, valueOffset, valueLength, bitSetLen); - } + public ImmutableBytesWritable getProjectedValue() { + return projectedValue; + } - @Override - public Cell getValue(int index) { - if (index != 0) { throw new IndexOutOfBoundsException(Integer.toString(index)); } - return getValue(VALUE_COLUMN_FAMILY, OLD_VALUE_COLUMN_QUALIFIER); - } + public int getBitSetLength() { + return bitSetLen; + } - @Override - public Cell getValue(byte[] family, byte[] qualifier) { - if (keyValue == null) { - keyValue = PhoenixKeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(), - VALUE_COLUMN_FAMILY, OLD_VALUE_COLUMN_QUALIFIER, timestamp, projectedValue.get(), - projectedValue.getOffset(), projectedValue.getLength()); - } - return keyValue; - } - - } - - public ProjectedValueTuple projectResults(Tuple tuple) { - byte[] bytesValue = schema.toBytes(tuple, getExpressions(), valueSet, ptr); - Cell base = tuple.getValue(0); - return new ProjectedValueTuple(base.getRowArray(), base.getRowOffset(), base.getRowLength(), base.getTimestamp(), bytesValue, 0, bytesValue.length, valueSet.getEstimatedLength()); - } - - public ProjectedValueTuple projectResults(Tuple tuple, boolean useNewValueQualifier) { - long maxTS = tuple.getValue(0).getTimestamp(); - int nCells = tuple.size(); - for (int i = 1; i < nCells; i++) { - long ts = tuple.getValue(i).getTimestamp(); - if (ts > maxTS) { - maxTS = ts; - } - } - byte[] bytesValue = schema.toBytes(tuple, getExpressions(), valueSet, ptr); - Cell base = tuple.getValue(0); - if (useNewValueQualifier) { - return new ProjectedValueTuple(base.getRowArray(), base.getRowOffset(), base.getRowLength(), maxTS, bytesValue, 0, bytesValue.length, valueSet.getEstimatedLength()); - } else { - return new OldProjectedValueTuple(base.getRowArray(), base.getRowOffset(), base.getRowLength(), maxTS, bytesValue, 0, bytesValue.length, valueSet.getEstimatedLength()); - } + @Override + public void getKey(ImmutableBytesWritable ptr) { + ptr.set(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength()); } - - public static void decodeProjectedValue(Tuple tuple, ImmutableBytesWritable ptr) throws IOException { - boolean b = tuple.getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, ptr); - if (!b) { - // fall back to use the old value column qualifier for backward compatibility - b = tuple.getValue(VALUE_COLUMN_FAMILY, OLD_VALUE_COLUMN_QUALIFIER, ptr); - } - if (!b) throw new IOException("Trying to decode a non-projected value."); - } - - public static ProjectedValueTuple mergeProjectedValue(ProjectedValueTuple dest, - ValueBitSet destBitSet, Tuple src, ValueBitSet srcBitSet, int offset, - boolean useNewValueColumnQualifier) throws IOException { - ImmutableBytesWritable destValue = dest.getProjectedValue(); - int origDestBitSetLen = dest.getBitSetLength(); - destBitSet.clear(); - destBitSet.or(destValue, origDestBitSetLen); - ImmutableBytesWritable srcValue = null; - int srcValueLen = 0; - if (src != null) { - srcValue = new ImmutableBytesWritable(); - decodeProjectedValue(src, srcValue); - srcBitSet.clear(); - srcBitSet.or(srcValue); - int origSrcBitSetLen = srcBitSet.getEstimatedLength(); - for (int i = 0; i <= srcBitSet.getMaxSetBit(); i++) { - if (srcBitSet.get(i)) { - destBitSet.set(offset + i); - } - } - srcValueLen = srcValue.getLength() - origSrcBitSetLen; - } - int destBitSetLen = destBitSet.getEstimatedLength(); - byte[] merged = new byte[destValue.getLength() - origDestBitSetLen + srcValueLen + destBitSetLen]; - int o = Bytes.putBytes(merged, 0, destValue.get(), destValue.getOffset(), destValue.getLength() - origDestBitSetLen); - if (src != null) { - o = Bytes.putBytes(merged, o, srcValue.get(), srcValue.getOffset(), srcValueLen); - } - destBitSet.toBytes(merged, o); - return useNewValueColumnQualifier ? new ProjectedValueTuple(dest, dest.getTimestamp(), merged, 0, merged.length, destBitSetLen) : - new OldProjectedValueTuple(dest, dest.getTimestamp(), merged, 0, merged.length, destBitSetLen); - } - - public KeyValueSchema getSchema() { - return schema; - } - - public Expression[] getExpressions() { - return expressions; - } - - public ValueBitSet getValueBitSet() { - return valueSet; - } - + @Override - public String toString() { - return "TUPLE-PROJECTOR {" + Arrays.toString(expressions) + " ==> " + schema.toString() + "}"; + public Cell mergeWithDynColsListBytesAndGetValue(int index, byte[] dynColsList) { + if (index != 0) { + throw new IndexOutOfBoundsException(Integer.toString(index)); + } + if (dynColsList == null || dynColsList.length == 0) { + return getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER); + } + // We put the known reserved bytes before the serialized list of dynamic column + // PColumns to easily parse out the column list on the client + byte[] concatBytes = + ByteUtil.concat(projectedValue.get(), DYN_COLS_METADATA_CELL_QUALIFIER, dynColsList); + ImmutableBytesWritable projectedValueWithDynColsListBytes = + new ImmutableBytesWritable(concatBytes); + keyValue = PhoenixKeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), + keyPtr.getLength(), VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, timestamp, + projectedValueWithDynColsListBytes.get(), projectedValueWithDynColsListBytes.getOffset(), + projectedValueWithDynColsListBytes.getLength()); + return keyValue; } -} + @Override + public Cell getValue(int index) { + if (index != 0) { + throw new IndexOutOfBoundsException(Integer.toString(index)); + } + return getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER); + } + + @Override + public Cell getValue(byte[] family, byte[] qualifier) { + if (keyValue == null) { + keyValue = PhoenixKeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), + keyPtr.getLength(), VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, timestamp, + projectedValue.get(), projectedValue.getOffset(), projectedValue.getLength()); + } + return keyValue; + } + + @Override + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + ptr.set(projectedValue.get(), projectedValue.getOffset(), projectedValue.getLength()); + return true; + } + + @Override + public boolean isImmutable() { + return true; + } + + @Override + public int size() { + return 1; + } + } + + public static class OldProjectedValueTuple extends ProjectedValueTuple { + + public OldProjectedValueTuple(byte[] keyBuffer, int keyOffset, int keyLength, long timestamp, + byte[] projectedValue, int valueOffset, int valueLength, int bitSetLen) { + super(keyBuffer, keyOffset, keyLength, timestamp, projectedValue, valueOffset, valueLength, + bitSetLen); + } + + public OldProjectedValueTuple(Tuple keyBase, long timestamp, byte[] projectedValue, + int valueOffset, int valueLength, int bitSetLen) { + super(keyBase, timestamp, projectedValue, valueOffset, valueLength, bitSetLen); + } + + @Override + public Cell getValue(int index) { + if (index != 0) { + throw new IndexOutOfBoundsException(Integer.toString(index)); + } + return getValue(VALUE_COLUMN_FAMILY, OLD_VALUE_COLUMN_QUALIFIER); + } + + @Override + public Cell getValue(byte[] family, byte[] qualifier) { + if (keyValue == null) { + keyValue = PhoenixKeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), + keyPtr.getLength(), VALUE_COLUMN_FAMILY, OLD_VALUE_COLUMN_QUALIFIER, timestamp, + projectedValue.get(), projectedValue.getOffset(), projectedValue.getLength()); + } + return keyValue; + } + + } + + public ProjectedValueTuple projectResults(Tuple tuple) { + byte[] bytesValue = schema.toBytes(tuple, getExpressions(), valueSet, ptr); + Cell base = tuple.getValue(0); + return new ProjectedValueTuple(base.getRowArray(), base.getRowOffset(), base.getRowLength(), + base.getTimestamp(), bytesValue, 0, bytesValue.length, valueSet.getEstimatedLength()); + } + + public ProjectedValueTuple projectResults(Tuple tuple, boolean useNewValueQualifier) { + long maxTS = tuple.getValue(0).getTimestamp(); + int nCells = tuple.size(); + for (int i = 1; i < nCells; i++) { + long ts = tuple.getValue(i).getTimestamp(); + if (ts > maxTS) { + maxTS = ts; + } + } + byte[] bytesValue = schema.toBytes(tuple, getExpressions(), valueSet, ptr); + Cell base = tuple.getValue(0); + if (useNewValueQualifier) { + return new ProjectedValueTuple(base.getRowArray(), base.getRowOffset(), base.getRowLength(), + maxTS, bytesValue, 0, bytesValue.length, valueSet.getEstimatedLength()); + } else { + return new OldProjectedValueTuple(base.getRowArray(), base.getRowOffset(), + base.getRowLength(), maxTS, bytesValue, 0, bytesValue.length, + valueSet.getEstimatedLength()); + } + } + + public static void decodeProjectedValue(Tuple tuple, ImmutableBytesWritable ptr) + throws IOException { + boolean b = tuple.getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, ptr); + if (!b) { + // fall back to use the old value column qualifier for backward compatibility + b = tuple.getValue(VALUE_COLUMN_FAMILY, OLD_VALUE_COLUMN_QUALIFIER, ptr); + } + if (!b) throw new IOException("Trying to decode a non-projected value."); + } + + public static ProjectedValueTuple mergeProjectedValue(ProjectedValueTuple dest, + ValueBitSet destBitSet, Tuple src, ValueBitSet srcBitSet, int offset, + boolean useNewValueColumnQualifier) throws IOException { + ImmutableBytesWritable destValue = dest.getProjectedValue(); + int origDestBitSetLen = dest.getBitSetLength(); + destBitSet.clear(); + destBitSet.or(destValue, origDestBitSetLen); + ImmutableBytesWritable srcValue = null; + int srcValueLen = 0; + if (src != null) { + srcValue = new ImmutableBytesWritable(); + decodeProjectedValue(src, srcValue); + srcBitSet.clear(); + srcBitSet.or(srcValue); + int origSrcBitSetLen = srcBitSet.getEstimatedLength(); + for (int i = 0; i <= srcBitSet.getMaxSetBit(); i++) { + if (srcBitSet.get(i)) { + destBitSet.set(offset + i); + } + } + srcValueLen = srcValue.getLength() - origSrcBitSetLen; + } + int destBitSetLen = destBitSet.getEstimatedLength(); + byte[] merged = + new byte[destValue.getLength() - origDestBitSetLen + srcValueLen + destBitSetLen]; + int o = Bytes.putBytes(merged, 0, destValue.get(), destValue.getOffset(), + destValue.getLength() - origDestBitSetLen); + if (src != null) { + o = Bytes.putBytes(merged, o, srcValue.get(), srcValue.getOffset(), srcValueLen); + } + destBitSet.toBytes(merged, o); + return useNewValueColumnQualifier + ? new ProjectedValueTuple(dest, dest.getTimestamp(), merged, 0, merged.length, destBitSetLen) + : new OldProjectedValueTuple(dest, dest.getTimestamp(), merged, 0, merged.length, + destBitSetLen); + } + + public KeyValueSchema getSchema() { + return schema; + } + + public Expression[] getExpressions() { + return expressions; + } + + public ValueBitSet getValueBitSet() { + return valueSet; + } + + @Override + public String toString() { + return "TUPLE-PROJECTOR {" + Arrays.toString(expressions) + " ==> " + schema.toString() + "}"; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/UnionPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/UnionPlan.java index e1f08858866..8bdb55fcb35 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/UnionPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/UnionPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,8 +30,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.phoenix.compile.ExplainPlan; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.compile.QueryPlan; @@ -53,352 +52,355 @@ import org.apache.phoenix.parse.FilterableStatement; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.TableRef; - import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; - public class UnionPlan implements QueryPlan { - private static final long DEFAULT_ESTIMATED_SIZE = 10 * 1024; // 10 K - - private final TableRef tableRef; - private final FilterableStatement statement; - private final ParameterMetaData paramMetaData; - private final OrderBy orderBy; - private final StatementContext parentContext; - private final Integer limit; - private final Integer offset; - private final GroupBy groupBy; - private final RowProjector projector; - private final boolean isDegenerate; - private final List plans; - private UnionResultIterators iterators; - private Long estimatedRows; - private Long estimatedBytes; - private Long estimateInfoTs; - private boolean getEstimatesCalled; - private boolean supportOrderByOptimize = false; - private List outputOrderBys = null; - - public UnionPlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector, - Integer limit, Integer offset, OrderBy orderBy, GroupBy groupBy, List plans, ParameterMetaData paramMetaData) throws SQLException { - this.parentContext = context; - this.statement = statement; - this.tableRef = table; - this.projector = projector; - this.limit = limit; - this.orderBy = orderBy; - this.groupBy = groupBy; - this.plans = plans; - this.offset= offset; - this.paramMetaData = paramMetaData; - boolean isDegen = true; - for (QueryPlan plan : plans) { - if (plan.getContext().getScanRanges() != ScanRanges.NOTHING) { - isDegen = false; - break; - } - } - this.isDegenerate = isDegen; - } - - /** - * If every subquery in {@link UnionPlan} is ordered, and {@link QueryPlan#getOutputOrderBys} - * of each subquery are equal(absolute equality or the same column name is unnecessary, just - * column types are compatible and columns count is same), then it just needs to perform a - * simple merge on the subquery results to ensure the overall order of the union all, see - * comments on {@link QueryCompiler#optimizeUnionOrderByIfPossible}. - */ - private boolean checkIfSupportOrderByOptimize() { - if (!this.orderBy.isEmpty()) { - return false; - } - if (plans.isEmpty()) { - return false; - } - OrderBy prevOrderBy = null; - for (QueryPlan queryPlan : plans) { - List orderBys = queryPlan.getOutputOrderBys(); - if (orderBys.isEmpty() || orderBys.size() > 1) { - return false; - } - OrderBy orderBy = orderBys.get(0); - if (prevOrderBy != null && !OrderBy.equalsForOutputOrderBy(prevOrderBy, orderBy)) { - return false; - } - prevOrderBy = orderBy; - } - return true; - } - - public boolean isSupportOrderByOptimize() { - return this.supportOrderByOptimize; - } - - public void enableCheckSupportOrderByOptimize() { - this.supportOrderByOptimize = checkIfSupportOrderByOptimize(); - this.outputOrderBys = null; - } - - public void disableSupportOrderByOptimize() { - if (!this.supportOrderByOptimize) { - return; - } - this.outputOrderBys = null; - this.supportOrderByOptimize = false; - } - - @Override - public boolean isDegenerate() { - return isDegenerate; - } - - @Override - public List getSplits() { - if (iterators == null) - return null; - return iterators.getSplits(); - } - - @Override - public List> getScans() { - if (iterators == null) - return null; - return iterators.getScans(); - } - - public List getSubPlans() { - return plans; - } - - @Override - public GroupBy getGroupBy() { - return groupBy; - } - - @Override - public OrderBy getOrderBy() { - return orderBy; - } - - @Override - public TableRef getTableRef() { - return tableRef; - } - - @Override - public Integer getLimit() { - return limit; - } - - @Override - public Integer getOffset() { - return offset; - } - - @Override - public RowProjector getProjector() { - return projector; - } - - @Override - public ResultIterator iterator() throws SQLException { - return iterator(DefaultParallelScanGrouper.getInstance()); - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - return iterator(scanGrouper, null); - } - - @Override - public final ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - this.iterators = new UnionResultIterators(plans, parentContext); - ResultIterator scanner; - - if (!orderBy.isEmpty()) { // TopN - scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions()); - } else if (this.supportOrderByOptimize) { - //Every subquery is ordered - scanner = new MergeSortTopNResultIterator( - iterators, limit, offset, getOrderByExpressionsWhenSupportOrderByOptimize()); - } else { - scanner = new ConcatResultIterator(iterators); - if (offset != null) { - scanner = new OffsetResultIterator(scanner, offset); - } - if (limit != null) { - scanner = new LimitingResultIterator(scanner, limit); - } - } - return scanner; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - List steps = new ArrayList(); - ExplainPlanAttributesBuilder builder = new ExplainPlanAttributesBuilder(); - String abstractExplainPlan = "UNION ALL OVER " + this.plans.size() - + " QUERIES"; - builder.setAbstractExplainPlan(abstractExplainPlan); - steps.add(abstractExplainPlan); - ResultIterator iterator = iterator(); - iterator.explain(steps, builder); - // Indent plans steps nested under union, except last client-side merge/concat step (if there is one) - int offset = !orderBy.getOrderByExpressions().isEmpty() && limit != null ? 2 : limit != null ? 1 : 0; - for (int i = 1 ; i < steps.size()-offset; i++) { - steps.set(i, " " + steps.get(i)); - } - return new ExplainPlan(steps, builder.build()); - } - - - @Override - public long getEstimatedSize() { - return DEFAULT_ESTIMATED_SIZE; - } - - @Override - public Cost getCost() { - Cost cost = Cost.ZERO; - for (QueryPlan plan : plans) { - cost = cost.plus(plan.getCost()); - } - return cost; - } - - @Override - public ParameterMetaData getParameterMetaData() { - return paramMetaData; - } - - @Override - public FilterableStatement getStatement() { - return statement; - } - - @Override - public StatementContext getContext() { - return parentContext; - } - - @Override - public boolean isRowKeyOrdered() { - return groupBy.isEmpty() ? orderBy.getOrderByExpressions().isEmpty() : groupBy.isOrderPreserving(); - } - - public List getPlans() { - return this.plans; - } - - @Override - public boolean useRoundRobinIterator() throws SQLException { + private static final long DEFAULT_ESTIMATED_SIZE = 10 * 1024; // 10 K + + private final TableRef tableRef; + private final FilterableStatement statement; + private final ParameterMetaData paramMetaData; + private final OrderBy orderBy; + private final StatementContext parentContext; + private final Integer limit; + private final Integer offset; + private final GroupBy groupBy; + private final RowProjector projector; + private final boolean isDegenerate; + private final List plans; + private UnionResultIterators iterators; + private Long estimatedRows; + private Long estimatedBytes; + private Long estimateInfoTs; + private boolean getEstimatesCalled; + private boolean supportOrderByOptimize = false; + private List outputOrderBys = null; + + public UnionPlan(StatementContext context, FilterableStatement statement, TableRef table, + RowProjector projector, Integer limit, Integer offset, OrderBy orderBy, GroupBy groupBy, + List plans, ParameterMetaData paramMetaData) throws SQLException { + this.parentContext = context; + this.statement = statement; + this.tableRef = table; + this.projector = projector; + this.limit = limit; + this.orderBy = orderBy; + this.groupBy = groupBy; + this.plans = plans; + this.offset = offset; + this.paramMetaData = paramMetaData; + boolean isDegen = true; + for (QueryPlan plan : plans) { + if (plan.getContext().getScanRanges() != ScanRanges.NOTHING) { + isDegen = false; + break; + } + } + this.isDegenerate = isDegen; + } + + /** + * If every subquery in {@link UnionPlan} is ordered, and {@link QueryPlan#getOutputOrderBys} of + * each subquery are equal(absolute equality or the same column name is unnecessary, just column + * types are compatible and columns count is same), then it just needs to perform a simple merge + * on the subquery results to ensure the overall order of the union all, see comments on + * {@link QueryCompiler#optimizeUnionOrderByIfPossible}. + */ + private boolean checkIfSupportOrderByOptimize() { + if (!this.orderBy.isEmpty()) { + return false; + } + if (plans.isEmpty()) { + return false; + } + OrderBy prevOrderBy = null; + for (QueryPlan queryPlan : plans) { + List orderBys = queryPlan.getOutputOrderBys(); + if (orderBys.isEmpty() || orderBys.size() > 1) { return false; - } - - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); - } - - @Override - public Operation getOperation() { - return statement.getOperation(); - } - - @Override - public Set getSourceRefs() { - // TODO is this correct? - Set sources = Sets.newHashSetWithExpectedSize(plans.size()); - for (QueryPlan plan : plans) { - sources.addAll(plan.getSourceRefs()); - } - return sources; - } - - @Override - public Long getEstimatedRowsToScan() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimatedRows; - } - - @Override - public Long getEstimatedBytesToScan() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimatedBytes; - } - - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - if (!getEstimatesCalled) { - getEstimates(); - } - return estimateInfoTs; - } - - private void getEstimates() throws SQLException { - getEstimatesCalled = true; - for (QueryPlan plan : plans) { - if (plan.getEstimatedBytesToScan() == null || plan.getEstimatedRowsToScan() == null - || plan.getEstimateInfoTimestamp() == null) { - /* - * If any of the sub plans doesn't have the estimate info available, then we don't - * provide estimate for the overall plan - */ - estimatedBytes = null; - estimatedRows = null; - estimateInfoTs = null; - break; - } else { - estimatedBytes = add(estimatedBytes, plan.getEstimatedBytesToScan()); - estimatedRows = add(estimatedRows, plan.getEstimatedRowsToScan()); - estimateInfoTs = getMin(estimateInfoTs, plan.getEstimateInfoTimestamp()); - } - } - } - - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = "EI_EXPOSE_REP", - justification = "getOutputOrderBys designed to work this way.") - @Override - public List getOutputOrderBys() { - if (this.outputOrderBys != null) { - return this.outputOrderBys; - } - return this.outputOrderBys = convertToOutputOrderBys(); - } - - private List convertToOutputOrderBys() { - assert this.groupBy == GroupBy.EMPTY_GROUP_BY; - assert this.orderBy != OrderBy.FWD_ROW_KEY_ORDER_BY && this.orderBy != OrderBy.REV_ROW_KEY_ORDER_BY; - if(!this.orderBy.isEmpty()) { - return Collections. singletonList( - OrderBy.convertCompiledOrderByToOutputOrderBy(this.orderBy)); - } - if (this.supportOrderByOptimize) { - assert this.plans.size() > 0; - return this.plans.get(0).getOutputOrderBys(); - } - return Collections. emptyList(); - } - - private List getOrderByExpressionsWhenSupportOrderByOptimize() { - assert this.supportOrderByOptimize; - assert this.plans.size() > 0; - assert this.orderBy.isEmpty(); - List outputOrderBys = this.plans.get(0).getOutputOrderBys(); - assert outputOrderBys != null && outputOrderBys.size() == 1; - List orderByExpressions = outputOrderBys.get(0).getOrderByExpressions(); - assert orderByExpressions != null && orderByExpressions.size() > 0; - return orderByExpressions.stream().map(OrderByExpression::convertIfExpressionSortOrderDesc) - .collect(Collectors.toList()); - } - - @Override - public boolean isApplicable() { - return true; - } + } + OrderBy orderBy = orderBys.get(0); + if (prevOrderBy != null && !OrderBy.equalsForOutputOrderBy(prevOrderBy, orderBy)) { + return false; + } + prevOrderBy = orderBy; + } + return true; + } + + public boolean isSupportOrderByOptimize() { + return this.supportOrderByOptimize; + } + + public void enableCheckSupportOrderByOptimize() { + this.supportOrderByOptimize = checkIfSupportOrderByOptimize(); + this.outputOrderBys = null; + } + + public void disableSupportOrderByOptimize() { + if (!this.supportOrderByOptimize) { + return; + } + this.outputOrderBys = null; + this.supportOrderByOptimize = false; + } + + @Override + public boolean isDegenerate() { + return isDegenerate; + } + + @Override + public List getSplits() { + if (iterators == null) return null; + return iterators.getSplits(); + } + + @Override + public List> getScans() { + if (iterators == null) return null; + return iterators.getScans(); + } + + public List getSubPlans() { + return plans; + } + + @Override + public GroupBy getGroupBy() { + return groupBy; + } + + @Override + public OrderBy getOrderBy() { + return orderBy; + } + + @Override + public TableRef getTableRef() { + return tableRef; + } + + @Override + public Integer getLimit() { + return limit; + } + + @Override + public Integer getOffset() { + return offset; + } + + @Override + public RowProjector getProjector() { + return projector; + } + + @Override + public ResultIterator iterator() throws SQLException { + return iterator(DefaultParallelScanGrouper.getInstance()); + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { + return iterator(scanGrouper, null); + } + + @Override + public final ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) + throws SQLException { + this.iterators = new UnionResultIterators(plans, parentContext); + ResultIterator scanner; + + if (!orderBy.isEmpty()) { // TopN + scanner = + new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions()); + } else if (this.supportOrderByOptimize) { + // Every subquery is ordered + scanner = new MergeSortTopNResultIterator(iterators, limit, offset, + getOrderByExpressionsWhenSupportOrderByOptimize()); + } else { + scanner = new ConcatResultIterator(iterators); + if (offset != null) { + scanner = new OffsetResultIterator(scanner, offset); + } + if (limit != null) { + scanner = new LimitingResultIterator(scanner, limit); + } + } + return scanner; + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + List steps = new ArrayList(); + ExplainPlanAttributesBuilder builder = new ExplainPlanAttributesBuilder(); + String abstractExplainPlan = "UNION ALL OVER " + this.plans.size() + " QUERIES"; + builder.setAbstractExplainPlan(abstractExplainPlan); + steps.add(abstractExplainPlan); + ResultIterator iterator = iterator(); + iterator.explain(steps, builder); + // Indent plans steps nested under union, except last client-side merge/concat step (if there is + // one) + int offset = + !orderBy.getOrderByExpressions().isEmpty() && limit != null ? 2 : limit != null ? 1 : 0; + for (int i = 1; i < steps.size() - offset; i++) { + steps.set(i, " " + steps.get(i)); + } + return new ExplainPlan(steps, builder.build()); + } + + @Override + public long getEstimatedSize() { + return DEFAULT_ESTIMATED_SIZE; + } + + @Override + public Cost getCost() { + Cost cost = Cost.ZERO; + for (QueryPlan plan : plans) { + cost = cost.plus(plan.getCost()); + } + return cost; + } + + @Override + public ParameterMetaData getParameterMetaData() { + return paramMetaData; + } + + @Override + public FilterableStatement getStatement() { + return statement; + } + + @Override + public StatementContext getContext() { + return parentContext; + } + + @Override + public boolean isRowKeyOrdered() { + return groupBy.isEmpty() + ? orderBy.getOrderByExpressions().isEmpty() + : groupBy.isOrderPreserving(); + } + + public List getPlans() { + return this.plans; + } + + @Override + public boolean useRoundRobinIterator() throws SQLException { + return false; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + @Override + public Operation getOperation() { + return statement.getOperation(); + } + + @Override + public Set getSourceRefs() { + // TODO is this correct? + Set sources = Sets.newHashSetWithExpectedSize(plans.size()); + for (QueryPlan plan : plans) { + sources.addAll(plan.getSourceRefs()); + } + return sources; + } + + @Override + public Long getEstimatedRowsToScan() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimatedRows; + } + + @Override + public Long getEstimatedBytesToScan() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimatedBytes; + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + if (!getEstimatesCalled) { + getEstimates(); + } + return estimateInfoTs; + } + + private void getEstimates() throws SQLException { + getEstimatesCalled = true; + for (QueryPlan plan : plans) { + if ( + plan.getEstimatedBytesToScan() == null || plan.getEstimatedRowsToScan() == null + || plan.getEstimateInfoTimestamp() == null + ) { + /* + * If any of the sub plans doesn't have the estimate info available, then we don't provide + * estimate for the overall plan + */ + estimatedBytes = null; + estimatedRows = null; + estimateInfoTs = null; + break; + } else { + estimatedBytes = add(estimatedBytes, plan.getEstimatedBytesToScan()); + estimatedRows = add(estimatedRows, plan.getEstimatedRowsToScan()); + estimateInfoTs = getMin(estimateInfoTs, plan.getEstimateInfoTimestamp()); + } + } + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EI_EXPOSE_REP", + justification = "getOutputOrderBys designed to work this way.") + @Override + public List getOutputOrderBys() { + if (this.outputOrderBys != null) { + return this.outputOrderBys; + } + return this.outputOrderBys = convertToOutputOrderBys(); + } + + private List convertToOutputOrderBys() { + assert this.groupBy == GroupBy.EMPTY_GROUP_BY; + assert this.orderBy != OrderBy.FWD_ROW_KEY_ORDER_BY + && this.orderBy != OrderBy.REV_ROW_KEY_ORDER_BY; + if (!this.orderBy.isEmpty()) { + return Collections.< + OrderBy> singletonList(OrderBy.convertCompiledOrderByToOutputOrderBy(this.orderBy)); + } + if (this.supportOrderByOptimize) { + assert this.plans.size() > 0; + return this.plans.get(0).getOutputOrderBys(); + } + return Collections. emptyList(); + } + + private List getOrderByExpressionsWhenSupportOrderByOptimize() { + assert this.supportOrderByOptimize; + assert this.plans.size() > 0; + assert this.orderBy.isEmpty(); + List outputOrderBys = this.plans.get(0).getOutputOrderBys(); + assert outputOrderBys != null && outputOrderBys.size() == 1; + List orderByExpressions = outputOrderBys.get(0).getOrderByExpressions(); + assert orderByExpressions != null && orderByExpressions.size() > 0; + return orderByExpressions.stream().map(OrderByExpression::convertIfExpressionSortOrderDesc) + .collect(Collectors.toList()); + } + + @Override + public boolean isApplicable() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/UnnestArrayPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/UnnestArrayPlan.java index 19c93405d18..cb1d2baa63b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/UnnestArrayPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/UnnestArrayPlan.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,10 +25,9 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.compile.ExplainPlan; import org.apache.phoenix.compile.ExplainPlanAttributes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; -import org.apache.phoenix.compile.QueryPlan; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; +import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.execute.visitor.QueryPlanVisitor; import org.apache.phoenix.expression.BaseSingleExpression; import org.apache.phoenix.expression.BaseTerminalExpression; @@ -44,164 +43,166 @@ import org.apache.phoenix.schema.types.PInteger; public class UnnestArrayPlan extends DelegateQueryPlan { - private final Expression arrayExpression; - private final boolean withOrdinality; - - public UnnestArrayPlan(QueryPlan delegate, Expression arrayExpression, boolean withOrdinality) { - super(delegate); - this.arrayExpression = arrayExpression; - this.withOrdinality = withOrdinality; + private final Expression arrayExpression; + private final boolean withOrdinality; + + public UnnestArrayPlan(QueryPlan delegate, Expression arrayExpression, boolean withOrdinality) { + super(delegate); + this.arrayExpression = arrayExpression; + this.withOrdinality = withOrdinality; + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { + return new UnnestArrayResultIterator(delegate.iterator(scanGrouper, scan)); + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + ExplainPlan explainPlan = delegate.getExplainPlan(); + List planSteps = explainPlan.getPlanSteps(); + ExplainPlanAttributes explainPlanAttributes = explainPlan.getPlanStepsAsAttributes(); + ExplainPlanAttributesBuilder newBuilder = + new ExplainPlanAttributesBuilder(explainPlanAttributes); + planSteps.add("UNNEST"); + newBuilder.setAbstractExplainPlan("UNNEST"); + return new ExplainPlan(planSteps, newBuilder.build()); + } + + @Override + public Integer getLimit() { + return null; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.visit(this); + } + + public class UnnestArrayResultIterator extends DelegateResultIterator { + private final UnnestArrayElemRefExpression elemRefExpression; + private final UnnestArrayElemIndexExpression elemIndexExpression; + private final TupleProjector projector; + private Tuple current; + private ImmutableBytesWritable arrayPtr; + private int length; + private int index; + private boolean closed; + + public UnnestArrayResultIterator(ResultIterator iterator) { + super(iterator); + this.elemRefExpression = new UnnestArrayElemRefExpression(arrayExpression); + this.elemIndexExpression = withOrdinality ? new UnnestArrayElemIndexExpression() : null; + this.projector = new TupleProjector(withOrdinality + ? new Expression[] { elemRefExpression, elemIndexExpression } + : new Expression[] { elemRefExpression }); + this.arrayPtr = new ImmutableBytesWritable(); + this.length = 0; + this.index = 0; + this.closed = false; } @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - return new UnnestArrayResultIterator(delegate.iterator(scanGrouper, scan)); + public Tuple next() throws SQLException { + if (closed) return null; + + while (index >= length) { + this.current = super.next(); + if (current == null) { + this.closed = true; + return null; + } + if (arrayExpression.evaluate(current, arrayPtr)) { + this.length = PArrayDataType.getArrayLength(arrayPtr, elemRefExpression.getDataType(), + arrayExpression.getMaxLength()); + this.index = 0; + this.elemRefExpression.setArrayPtr(arrayPtr); + } + } + elemRefExpression.setIndex(index); + if (elemIndexExpression != null) { + elemIndexExpression.setIndex(index); + } + index++; + return projector.projectResults(current); } @Override - public ExplainPlan getExplainPlan() throws SQLException { - ExplainPlan explainPlan = delegate.getExplainPlan(); - List planSteps = explainPlan.getPlanSteps(); - ExplainPlanAttributes explainPlanAttributes = - explainPlan.getPlanStepsAsAttributes(); - ExplainPlanAttributesBuilder newBuilder = - new ExplainPlanAttributesBuilder(explainPlanAttributes); - planSteps.add("UNNEST"); - newBuilder.setAbstractExplainPlan("UNNEST"); - return new ExplainPlan(planSteps, newBuilder.build()); + public void close() throws SQLException { + super.close(); + closed = true; } - - @Override - public Integer getLimit() { - return null; + } + + @SuppressWarnings("rawtypes") + private static class UnnestArrayElemRefExpression extends BaseSingleExpression { + private final PDataType type; + private int index = 0; + private ImmutableBytesWritable arrayPtr = new ImmutableBytesWritable(); + + public UnnestArrayElemRefExpression(Expression arrayExpression) { + super(arrayExpression); + this.type = PDataType + .fromTypeId(arrayExpression.getDataType().getSqlType() - PDataType.ARRAY_TYPE_BASE); } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.visit(this); + public void setIndex(int index) { + this.index = index; } - public class UnnestArrayResultIterator extends DelegateResultIterator { - private final UnnestArrayElemRefExpression elemRefExpression; - private final UnnestArrayElemIndexExpression elemIndexExpression; - private final TupleProjector projector; - private Tuple current; - private ImmutableBytesWritable arrayPtr; - private int length; - private int index; - private boolean closed; - - public UnnestArrayResultIterator(ResultIterator iterator) { - super(iterator); - this.elemRefExpression = new UnnestArrayElemRefExpression(arrayExpression); - this.elemIndexExpression = withOrdinality ? new UnnestArrayElemIndexExpression() : null; - this.projector = new TupleProjector(withOrdinality ? new Expression[] {elemRefExpression, elemIndexExpression} : new Expression[] {elemRefExpression}); - this.arrayPtr = new ImmutableBytesWritable(); - this.length = 0; - this.index = 0; - this.closed = false; - } - - @Override - public Tuple next() throws SQLException { - if (closed) - return null; - - while (index >= length) { - this.current = super.next(); - if (current == null) { - this.closed = true; - return null; - } - if (arrayExpression.evaluate(current, arrayPtr)) { - this.length = PArrayDataType.getArrayLength(arrayPtr, elemRefExpression.getDataType(), arrayExpression.getMaxLength()); - this.index = 0; - this.elemRefExpression.setArrayPtr(arrayPtr); - } - } - elemRefExpression.setIndex(index); - if (elemIndexExpression != null) { - elemIndexExpression.setIndex(index); - } - index++; - return projector.projectResults(current); - } + public void setArrayPtr(ImmutableBytesWritable arrayPtr) { + this.arrayPtr.set(arrayPtr.get(), arrayPtr.getOffset(), arrayPtr.getLength()); + } - @Override - public void close() throws SQLException { - super.close(); - closed = true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + ptr.set(arrayPtr.get(), arrayPtr.getOffset(), arrayPtr.getLength()); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, index++, getDataType(), getMaxLength()); + return true; } - - @SuppressWarnings("rawtypes") - private static class UnnestArrayElemRefExpression extends BaseSingleExpression { - private final PDataType type; - private int index = 0; - private ImmutableBytesWritable arrayPtr = new ImmutableBytesWritable(); - - public UnnestArrayElemRefExpression(Expression arrayExpression) { - super(arrayExpression); - this.type = PDataType.fromTypeId(arrayExpression.getDataType().getSqlType() - PDataType.ARRAY_TYPE_BASE); - } - - public void setIndex(int index) { - this.index = index; - } - - public void setArrayPtr(ImmutableBytesWritable arrayPtr) { - this.arrayPtr.set(arrayPtr.get(), arrayPtr.getOffset(), arrayPtr.getLength()); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - ptr.set(arrayPtr.get(), arrayPtr.getOffset(), arrayPtr.getLength()); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, index++, getDataType(), getMaxLength()); - return true; - } - @Override - public T accept(ExpressionVisitor visitor) { - // This Expression class is only used at runtime. - return null; - } + @Override + public T accept(ExpressionVisitor visitor) { + // This Expression class is only used at runtime. + return null; + } - @Override - public PDataType getDataType() { - return type; - } + @Override + public PDataType getDataType() { + return type; } - - @SuppressWarnings("rawtypes") - private static class UnnestArrayElemIndexExpression extends BaseTerminalExpression { - private int index = 0; - - public void setIndex(int index) { - this.index = index; - } + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - byte[] lengthBuf = new byte[PInteger.INSTANCE.getByteSize()]; - PInteger.INSTANCE.getCodec().encodeInt(index + 1, lengthBuf, 0); - ptr.set(lengthBuf); - return true; - } + @SuppressWarnings("rawtypes") + private static class UnnestArrayElemIndexExpression extends BaseTerminalExpression { + private int index = 0; - @Override - public T accept(ExpressionVisitor visitor) { - // This Expression class is only used at runtime. - return null; - } + public void setIndex(int index) { + this.index = index; + } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + byte[] lengthBuf = new byte[PInteger.INSTANCE.getByteSize()]; + PInteger.INSTANCE.getCodec().encodeInt(index + 1, lengthBuf, 0); + ptr.set(lengthBuf); + return true; } @Override - public List getOutputOrderBys() { - return Collections. emptyList(); + public T accept(ExpressionVisitor visitor) { + // This Expression class is only used at runtime. + return null; } + + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } + } + + @Override + public List getOutputOrderBys() { + return Collections. emptyList(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/AvgRowWidthVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/AvgRowWidthVisitor.java index ada866490b4..e3da0bde83e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/AvgRowWidthVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/AvgRowWidthVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.execute.visitor; +import java.sql.SQLException; + import org.apache.phoenix.compile.ListJarsQueryPlan; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.TraceQueryPlan; @@ -33,167 +35,164 @@ import org.apache.phoenix.execute.UnnestArrayPlan; import org.apache.phoenix.parse.JoinTableNode; -import java.sql.SQLException; - /** - * Implementation of QueryPlanVisitor used to get the average number of bytes each - * row for a QueryPlan. + * Implementation of QueryPlanVisitor used to get the average number of bytes each row for a + * QueryPlan. */ public class AvgRowWidthVisitor implements QueryPlanVisitor { - @Override - public Double defaultReturn(QueryPlan plan) { - return null; - } - - @Override - public Double visit(AggregatePlan plan) { - try { - Long byteCount = plan.getEstimatedBytesToScan(); - Long rowCount = plan.getEstimatedRowsToScan(); - if (byteCount != null && rowCount != null) { - if (byteCount == 0) { - return 0.0; - } - if (rowCount != 0) { - return ((double) byteCount) / rowCount; - } - } - } catch (SQLException e) { + @Override + public Double defaultReturn(QueryPlan plan) { + return null; + } + + @Override + public Double visit(AggregatePlan plan) { + try { + Long byteCount = plan.getEstimatedBytesToScan(); + Long rowCount = plan.getEstimatedRowsToScan(); + if (byteCount != null && rowCount != null) { + if (byteCount == 0) { + return 0.0; } - - return null; - } - - @Override - public Double visit(ScanPlan plan) { - try { - Long byteCount = plan.getEstimatedBytesToScan(); - Long rowCount = plan.getEstimatedRowsToScan(); - if (byteCount != null && rowCount != null) { - if (byteCount == 0) { - return 0.0; - } - if (rowCount != 0) { - return ((double) byteCount) / rowCount; - } - } - } catch (SQLException e) { + if (rowCount != 0) { + return ((double) byteCount) / rowCount; } - - return null; - } - - @Override - public Double visit(ClientAggregatePlan plan) { - return plan.getDelegate().accept(this); - } - - @Override - public Double visit(ClientScanPlan plan) { - return plan.getDelegate().accept(this); + } + } catch (SQLException e) { } - @Override - public Double visit(LiteralResultIterationPlan plan) { - return (double) plan.getEstimatedSize(); - } - - @Override - public Double visit(TupleProjectionPlan plan) { - return plan.getDelegate().accept(this); - } - - @Override - public Double visit(HashJoinPlan plan) { - Double lhsWidth = plan.getDelegate().accept(this); - if (lhsWidth == null) { - return null; + return null; + } + + @Override + public Double visit(ScanPlan plan) { + try { + Long byteCount = plan.getEstimatedBytesToScan(); + Long rowCount = plan.getEstimatedRowsToScan(); + if (byteCount != null && rowCount != null) { + if (byteCount == 0) { + return 0.0; } - JoinTableNode.JoinType[] joinTypes = plan.getJoinInfo().getJoinTypes(); - HashJoinPlan.SubPlan[] subPlans = plan.getSubPlans(); - Double width = lhsWidth; - for (int i = 0; i < joinTypes.length; i++) { - Double rhsWidth = subPlans[i].getInnerPlan().accept(this); - if (rhsWidth == null) { - return null; - } - width = join(width, rhsWidth, joinTypes[i]); + if (rowCount != 0) { + return ((double) byteCount) / rowCount; } - - return width; + } + } catch (SQLException e) { } - @Override - public Double visit(SortMergeJoinPlan plan) { - Double lhsWidth = plan.getLhsPlan().accept(this); - Double rhsWidth = plan.getRhsPlan().accept(this); - if (lhsWidth == null || rhsWidth == null) { - return null; - } - - return join(lhsWidth, rhsWidth, plan.getJoinType()); + return null; + } + + @Override + public Double visit(ClientAggregatePlan plan) { + return plan.getDelegate().accept(this); + } + + @Override + public Double visit(ClientScanPlan plan) { + return plan.getDelegate().accept(this); + } + + @Override + public Double visit(LiteralResultIterationPlan plan) { + return (double) plan.getEstimatedSize(); + } + + @Override + public Double visit(TupleProjectionPlan plan) { + return plan.getDelegate().accept(this); + } + + @Override + public Double visit(HashJoinPlan plan) { + Double lhsWidth = plan.getDelegate().accept(this); + if (lhsWidth == null) { + return null; } - - @Override - public Double visit(UnionPlan plan) { - Double sum = 0.0; - for (QueryPlan subPlan : plan.getSubPlans()) { - Double avgWidth = subPlan.accept(this); - if (avgWidth == null) { - return null; - } - sum += avgWidth; - } - - return sum / plan.getSubPlans().size(); + JoinTableNode.JoinType[] joinTypes = plan.getJoinInfo().getJoinTypes(); + HashJoinPlan.SubPlan[] subPlans = plan.getSubPlans(); + Double width = lhsWidth; + for (int i = 0; i < joinTypes.length; i++) { + Double rhsWidth = subPlans[i].getInnerPlan().accept(this); + if (rhsWidth == null) { + return null; + } + width = join(width, rhsWidth, joinTypes[i]); } - @Override - public Double visit(UnnestArrayPlan plan) { - return plan.getDelegate().accept(this); - } + return width; + } - @Override - public Double visit(CursorFetchPlan plan) { - return plan.getDelegate().accept(this); + @Override + public Double visit(SortMergeJoinPlan plan) { + Double lhsWidth = plan.getLhsPlan().accept(this); + Double rhsWidth = plan.getRhsPlan().accept(this); + if (lhsWidth == null || rhsWidth == null) { + return null; } - @Override - public Double visit(ListJarsQueryPlan plan) { - return (double) plan.getEstimatedSize(); - } + return join(lhsWidth, rhsWidth, plan.getJoinType()); + } - @Override - public Double visit(TraceQueryPlan plan) { - return (double) plan.getEstimatedSize(); + @Override + public Double visit(UnionPlan plan) { + Double sum = 0.0; + for (QueryPlan subPlan : plan.getSubPlans()) { + Double avgWidth = subPlan.accept(this); + if (avgWidth == null) { + return null; + } + sum += avgWidth; } - - /* - * The below methods provide estimation of row width based on the input row width as well as - * the operator. - */ - - public static double join(double lhsWidth, double rhsWidth, JoinTableNode.JoinType type) { - double width; - switch (type) { - case Inner: - case Left: - case Right: - case Full: { - width = lhsWidth + rhsWidth; - break; - } - case Semi: - case Anti: { - width = lhsWidth; - break; - } - default: { - throw new IllegalArgumentException("Invalid join type: " + type); - } - } - return width; + return sum / plan.getSubPlans().size(); + } + + @Override + public Double visit(UnnestArrayPlan plan) { + return plan.getDelegate().accept(this); + } + + @Override + public Double visit(CursorFetchPlan plan) { + return plan.getDelegate().accept(this); + } + + @Override + public Double visit(ListJarsQueryPlan plan) { + return (double) plan.getEstimatedSize(); + } + + @Override + public Double visit(TraceQueryPlan plan) { + return (double) plan.getEstimatedSize(); + } + + /* + * The below methods provide estimation of row width based on the input row width as well as the + * operator. + */ + + public static double join(double lhsWidth, double rhsWidth, JoinTableNode.JoinType type) { + double width; + switch (type) { + case Inner: + case Left: + case Right: + case Full: { + width = lhsWidth + rhsWidth; + break; + } + case Semi: + case Anti: { + width = lhsWidth; + break; + } + default: { + throw new IllegalArgumentException("Invalid join type: " + type); + } } + return width; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/ByteCountVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/ByteCountVisitor.java index 7e9e88bb4dd..e5d9d71f7c2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/ByteCountVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/ByteCountVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,83 +37,83 @@ */ public class ByteCountVisitor implements QueryPlanVisitor { - @Override - public Double defaultReturn(QueryPlan plan) { - return null; + @Override + public Double defaultReturn(QueryPlan plan) { + return null; + } + + @Override + public Double visit(AggregatePlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(ScanPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(ClientAggregatePlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(ClientScanPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(LiteralResultIterationPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(TupleProjectionPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(HashJoinPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(SortMergeJoinPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(UnionPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(UnnestArrayPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(CursorFetchPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(ListJarsQueryPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + @Override + public Double visit(TraceQueryPlan plan) { + return getByteCountFromRowCountAndRowWidth(plan); + } + + protected Double getByteCountFromRowCountAndRowWidth(QueryPlan plan) { + Double rowCount = plan.accept(new RowCountVisitor()); + Double rowWidth = plan.accept(new AvgRowWidthVisitor()); + if (rowCount == null || rowWidth == null) { + return null; } - @Override - public Double visit(AggregatePlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(ScanPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(ClientAggregatePlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(ClientScanPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(LiteralResultIterationPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(TupleProjectionPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(HashJoinPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(SortMergeJoinPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(UnionPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(UnnestArrayPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(CursorFetchPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(ListJarsQueryPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - @Override - public Double visit(TraceQueryPlan plan) { - return getByteCountFromRowCountAndRowWidth(plan); - } - - protected Double getByteCountFromRowCountAndRowWidth(QueryPlan plan) { - Double rowCount = plan.accept(new RowCountVisitor()); - Double rowWidth = plan.accept(new AvgRowWidthVisitor()); - if (rowCount == null || rowWidth == null) { - return null; - } - - return rowCount * rowWidth; - } + return rowCount * rowWidth; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/QueryPlanVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/QueryPlanVisitor.java index 9229f9fbf02..307712136e4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/QueryPlanVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/QueryPlanVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,23 +23,34 @@ import org.apache.phoenix.execute.*; /** - * * Visitor for a QueryPlan (which may contain other nested query-plans) - * */ public interface QueryPlanVisitor { - E defaultReturn(QueryPlan plan); - E visit(AggregatePlan plan); - E visit(ScanPlan plan); - E visit(ClientAggregatePlan plan); - E visit(ClientScanPlan plan); - E visit(LiteralResultIterationPlan plan); - E visit(TupleProjectionPlan plan); - E visit(HashJoinPlan plan); - E visit(SortMergeJoinPlan plan); - E visit(UnionPlan plan); - E visit(UnnestArrayPlan plan); - E visit(CursorFetchPlan plan); - E visit(ListJarsQueryPlan plan); - E visit(TraceQueryPlan plan); + E defaultReturn(QueryPlan plan); + + E visit(AggregatePlan plan); + + E visit(ScanPlan plan); + + E visit(ClientAggregatePlan plan); + + E visit(ClientScanPlan plan); + + E visit(LiteralResultIterationPlan plan); + + E visit(TupleProjectionPlan plan); + + E visit(HashJoinPlan plan); + + E visit(SortMergeJoinPlan plan); + + E visit(UnionPlan plan); + + E visit(UnnestArrayPlan plan); + + E visit(CursorFetchPlan plan); + + E visit(ListJarsQueryPlan plan); + + E visit(TraceQueryPlan plan); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/RowCountVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/RowCountVisitor.java index 81c39a20346..fbb786827de 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/RowCountVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/visitor/RowCountVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,10 @@ */ package org.apache.phoenix.execute.visitor; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.phoenix.compile.GroupByCompiler; @@ -38,287 +42,268 @@ import org.apache.phoenix.filter.BooleanExpressionFilter; import org.apache.phoenix.parse.JoinTableNode; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - /** * Implementation of QueryPlanVisitor used to get the number of output rows for a QueryPlan. */ public class RowCountVisitor implements QueryPlanVisitor { - // An estimate of the ratio of result data from group-by against the input data. - private final static double GROUPING_FACTOR = 0.1; - - private final static double OUTER_JOIN_FACTOR = 1.15; - private final static double INNER_JOIN_FACTOR = 0.85; - private final static double SEMI_OR_ANTI_JOIN_FACTOR = 0.5; - - private final static double UNION_DISTINCT_FACTOR = 0.8; - - @Override - public Double defaultReturn(QueryPlan plan) { - return null; + // An estimate of the ratio of result data from group-by against the input data. + private final static double GROUPING_FACTOR = 0.1; + + private final static double OUTER_JOIN_FACTOR = 1.15; + private final static double INNER_JOIN_FACTOR = 0.85; + private final static double SEMI_OR_ANTI_JOIN_FACTOR = 0.5; + + private final static double UNION_DISTINCT_FACTOR = 0.8; + + @Override + public Double defaultReturn(QueryPlan plan) { + return null; + } + + @Override + public Double visit(AggregatePlan plan) { + try { + Long b = plan.getEstimatedRowsToScan(); + if (b != null) { + return limit(filter(aggregate( + filter(b.doubleValue(), stripSkipScanFilter(plan.getContext().getScan().getFilter())), + plan.getGroupBy()), plan.getHaving()), plan.getLimit()); + } + } catch (SQLException e) { } - @Override - public Double visit(AggregatePlan plan) { - try { - Long b = plan.getEstimatedRowsToScan(); - if (b != null) { - return limit( - filter( - aggregate( - filter( - b.doubleValue(), - stripSkipScanFilter( - plan.getContext().getScan().getFilter())), - plan.getGroupBy()), - plan.getHaving()), - plan.getLimit()); - } - } catch (SQLException e) { - } - - return null; + return null; + } + + @Override + public Double visit(ScanPlan plan) { + try { + Long b = plan.getEstimatedRowsToScan(); + if (b != null) { + return limit( + filter(b.doubleValue(), stripSkipScanFilter(plan.getContext().getScan().getFilter())), + plan.getLimit()); + } + } catch (SQLException e) { } - @Override - public Double visit(ScanPlan plan) { - try { - Long b = plan.getEstimatedRowsToScan(); - if (b != null) { - return limit( - filter( - b.doubleValue(), - stripSkipScanFilter(plan.getContext().getScan().getFilter())), - plan.getLimit()); - } - } catch (SQLException e) { - } + return null; + } - return null; + @Override + public Double visit(ClientAggregatePlan plan) { + Double b = plan.getDelegate().accept(this); + if (b != null) { + return limit(filter(aggregate(filter(b.doubleValue(), plan.getWhere()), plan.getGroupBy()), + plan.getHaving()), plan.getLimit()); } - @Override - public Double visit(ClientAggregatePlan plan) { - Double b = plan.getDelegate().accept(this); - if (b != null) { - return limit( - filter( - aggregate( - filter(b.doubleValue(), plan.getWhere()), - plan.getGroupBy()), - plan.getHaving()), - plan.getLimit()); - } + return null; + } - return null; + @Override + public Double visit(ClientScanPlan plan) { + if (plan.getLimit() != null) { + return (double) plan.getLimit(); + } + Double b = plan.getDelegate().accept(this); + if (b != null) { + return limit(filter(b.doubleValue(), plan.getWhere()), plan.getLimit()); } - @Override - public Double visit(ClientScanPlan plan) { - if (plan.getLimit() != null) { - return (double) plan.getLimit(); - } - Double b = plan.getDelegate().accept(this); - if (b != null) { - return limit( - filter(b.doubleValue(), plan.getWhere()), - plan.getLimit()); - } - + return null; + } + + @Override + public Double visit(LiteralResultIterationPlan plan) { + return 1.0; + } + + @Override + public Double visit(TupleProjectionPlan plan) { + return plan.getDelegate().accept(this); + } + + @Override + public Double visit(HashJoinPlan plan) { + try { + QueryPlan lhsPlan = plan.getDelegate(); + Long b = lhsPlan.getEstimatedRowsToScan(); + if (b == null) { return null; + } + + Double rows = + filter(b.doubleValue(), stripSkipScanFilter(lhsPlan.getContext().getScan().getFilter())); + JoinTableNode.JoinType[] joinTypes = plan.getJoinInfo().getJoinTypes(); + HashJoinPlan.SubPlan[] subPlans = plan.getSubPlans(); + for (int i = 0; i < joinTypes.length; i++) { + Double rhsRows = subPlans[i].getInnerPlan().accept(this); + if (rhsRows == null) { + return null; + } + rows = join(rows, rhsRows.doubleValue(), joinTypes[i]); + } + if (lhsPlan instanceof AggregatePlan) { + AggregatePlan aggPlan = (AggregatePlan) lhsPlan; + rows = filter(aggregate(rows, aggPlan.getGroupBy()), aggPlan.getHaving()); + } + return limit(rows, lhsPlan.getLimit()); + } catch (SQLException e) { } - @Override - public Double visit(LiteralResultIterationPlan plan) { - return 1.0; - } + return null; + } - @Override - public Double visit(TupleProjectionPlan plan) { - return plan.getDelegate().accept(this); + @Override + public Double visit(SortMergeJoinPlan plan) { + Double lhsRows = plan.getLhsPlan().accept(this); + Double rhsRows = plan.getRhsPlan().accept(this); + if (lhsRows != null && rhsRows != null) { + return join(lhsRows, rhsRows, plan.getJoinType()); } - @Override - public Double visit(HashJoinPlan plan) { - try { - QueryPlan lhsPlan = plan.getDelegate(); - Long b = lhsPlan.getEstimatedRowsToScan(); - if (b == null) { - return null; - } - - Double rows = filter(b.doubleValue(), - stripSkipScanFilter(lhsPlan.getContext().getScan().getFilter())); - JoinTableNode.JoinType[] joinTypes = plan.getJoinInfo().getJoinTypes(); - HashJoinPlan.SubPlan[] subPlans = plan.getSubPlans(); - for (int i = 0; i < joinTypes.length; i++) { - Double rhsRows = subPlans[i].getInnerPlan().accept(this); - if (rhsRows == null) { - return null; - } - rows = join(rows, rhsRows.doubleValue(), joinTypes[i]); - } - if (lhsPlan instanceof AggregatePlan) { - AggregatePlan aggPlan = (AggregatePlan) lhsPlan; - rows = filter(aggregate(rows, aggPlan.getGroupBy()), aggPlan.getHaving()); - } - return limit(rows, lhsPlan.getLimit()); - } catch (SQLException e) { - } - + return null; + } + + @Override + public Double visit(UnionPlan plan) { + int count = plan.getSubPlans().size(); + double[] inputRows = new double[count]; + for (int i = 0; i < count; i++) { + Double b = plan.getSubPlans().get(i).accept(this); + if (b != null) { + inputRows[i] = b.doubleValue(); + } else { return null; + } } - @Override - public Double visit(SortMergeJoinPlan plan) { - Double lhsRows = plan.getLhsPlan().accept(this); - Double rhsRows = plan.getRhsPlan().accept(this); - if (lhsRows != null && rhsRows != null) { - return join(lhsRows, rhsRows, plan.getJoinType()); - } + return limit(union(true, inputRows), plan.getLimit()); + } - return null; - } + @Override + public Double visit(UnnestArrayPlan plan) { + return plan.getDelegate().accept(this); + } - @Override - public Double visit(UnionPlan plan) { - int count = plan.getSubPlans().size(); - double[] inputRows = new double[count]; - for (int i = 0; i < count; i++) { - Double b = plan.getSubPlans().get(i).accept(this); - if (b != null) { - inputRows[i] = b.doubleValue(); - } else { - return null; - } - } + @Override + public Double visit(CursorFetchPlan plan) { + return plan.getDelegate().accept(this); + } - return limit(union(true, inputRows),plan.getLimit()); - } + @Override + public Double visit(ListJarsQueryPlan plan) { + return 0.0; + } - @Override - public Double visit(UnnestArrayPlan plan) { - return plan.getDelegate().accept(this); - } + @Override + public Double visit(TraceQueryPlan plan) { + return 0.0; + } - @Override - public Double visit(CursorFetchPlan plan) { - return plan.getDelegate().accept(this); + public static Filter stripSkipScanFilter(Filter filter) { + if (filter == null) { + return null; } - - @Override - public Double visit(ListJarsQueryPlan plan) { - return 0.0; + if (!(filter instanceof FilterList)) { + return filter instanceof BooleanExpressionFilter ? filter : null; } - - @Override - public Double visit(TraceQueryPlan plan) { - return 0.0; + FilterList filterList = (FilterList) filter; + if (filterList.getOperator() != FilterList.Operator.MUST_PASS_ALL) { + return filter; } - - public static Filter stripSkipScanFilter(Filter filter) { - if (filter == null) { - return null; - } - if (!(filter instanceof FilterList)) { - return filter instanceof BooleanExpressionFilter ? filter : null; - } - FilterList filterList = (FilterList) filter; - if (filterList.getOperator() != FilterList.Operator.MUST_PASS_ALL) { - return filter; - } - List list = new ArrayList<>(); - for (Filter f : filterList.getFilters()) { - Filter stripped = stripSkipScanFilter(f); - if (stripped != null) { - list.add(stripped); - } - } - return list.isEmpty() ? null : (list.size() == 1 ? list.get(0) : new FilterList(FilterList.Operator.MUST_PASS_ALL, list)); + List list = new ArrayList<>(); + for (Filter f : filterList.getFilters()) { + Filter stripped = stripSkipScanFilter(f); + if (stripped != null) { + list.add(stripped); + } } - - - /* - * The below methods provide estimation of row count based on the input row count as well as - * the operator. They should be replaced by more accurate calculation based on histogram and - * a logical operator layer is expect to facilitate this. - */ - - public static double filter(double inputRows, Filter filter) { - if (filter == null) { - return inputRows; - } - return 0.5 * inputRows; + return list.isEmpty() + ? null + : (list.size() == 1 ? list.get(0) : new FilterList(FilterList.Operator.MUST_PASS_ALL, list)); + } + + /* + * The below methods provide estimation of row count based on the input row count as well as the + * operator. They should be replaced by more accurate calculation based on histogram and a logical + * operator layer is expect to facilitate this. + */ + + public static double filter(double inputRows, Filter filter) { + if (filter == null) { + return inputRows; } + return 0.5 * inputRows; + } - public static double filter(double inputRows, Expression filter) { - if (filter == null) { - return inputRows; - } - return 0.5 * inputRows; + public static double filter(double inputRows, Expression filter) { + if (filter == null) { + return inputRows; } + return 0.5 * inputRows; + } - public static double aggregate(double inputRows, GroupByCompiler.GroupBy groupBy) { - if (groupBy.isUngroupedAggregate()) { - return 1.0; - } - return GROUPING_FACTOR * inputRows; + public static double aggregate(double inputRows, GroupByCompiler.GroupBy groupBy) { + if (groupBy.isUngroupedAggregate()) { + return 1.0; } + return GROUPING_FACTOR * inputRows; + } - public static double limit(double inputRows, Integer limit) { - if (limit == null) { - return inputRows; - } - return limit; + public static double limit(double inputRows, Integer limit) { + if (limit == null) { + return inputRows; } - - public static double join(double lhsRows, double[] rhsRows, JoinTableNode.JoinType[] types) { - assert rhsRows.length == types.length; - double rows = lhsRows; - for (int i = 0; i < rhsRows.length; i++) { - rows = join(rows, rhsRows[i], types[i]); - } - return rows; + return limit; + } + + public static double join(double lhsRows, double[] rhsRows, JoinTableNode.JoinType[] types) { + assert rhsRows.length == types.length; + double rows = lhsRows; + for (int i = 0; i < rhsRows.length; i++) { + rows = join(rows, rhsRows[i], types[i]); } - - public static double join(double lhsRows, double rhsRows, JoinTableNode.JoinType type) { - double rows; - switch (type) { - case Inner: { - rows = Math.min(lhsRows, rhsRows); - rows = rows * INNER_JOIN_FACTOR; - break; - } - case Left: - case Right: - case Full: { - rows = Math.max(lhsRows, rhsRows); - rows = rows * OUTER_JOIN_FACTOR; - break; - } - case Semi: - case Anti: { - rows = lhsRows * SEMI_OR_ANTI_JOIN_FACTOR; - break; - } - default: { - throw new IllegalArgumentException("Invalid join type: " + type); - } - } - return rows; + return rows; + } + + public static double join(double lhsRows, double rhsRows, JoinTableNode.JoinType type) { + double rows; + switch (type) { + case Inner: { + rows = Math.min(lhsRows, rhsRows); + rows = rows * INNER_JOIN_FACTOR; + break; + } + case Left: + case Right: + case Full: { + rows = Math.max(lhsRows, rhsRows); + rows = rows * OUTER_JOIN_FACTOR; + break; + } + case Semi: + case Anti: { + rows = lhsRows * SEMI_OR_ANTI_JOIN_FACTOR; + break; + } + default: { + throw new IllegalArgumentException("Invalid join type: " + type); + } } + return rows; + } - public static double union(boolean all, double... inputRows) { - double rows = 0.0; - for (double d : inputRows) { - rows += d; - } - if (!all) { - rows *= UNION_DISTINCT_FACTOR; - } - return rows; + public static double union(boolean all, double... inputRows) { + double rows = 0.0; + for (double d : inputRows) { + rows += d; + } + if (!all) { + rows *= UNION_DISTINCT_FACTOR; } + return rows; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AddExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AddExpression.java index 247b86cae98..025bc13b409 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AddExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AddExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,34 +21,30 @@ import org.apache.phoenix.expression.visitor.ExpressionVisitor; - /** - * * Subtract expression implementation - * - * * @since 0.1 */ public abstract class AddExpression extends BaseAddSubtractExpression { - public AddExpression() { - } + public AddExpression() { + } - public AddExpression(List children) { - super(children); - } + public AddExpression(List children) { + super(children); + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } - @Override - public String getOperatorString() { - return " + "; - } + @Override + public String getOperatorString() { + return " + "; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AndExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AndExpression.java index 2aa182769d6..18f47ec6d96 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AndExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AndExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,85 +22,82 @@ import java.util.List; import org.apache.phoenix.expression.visitor.ExpressionVisitor; -import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.TypeMismatchException; - +import org.apache.phoenix.schema.types.PBoolean; /** - * * AND expression implementation - * - * * @since 0.1 */ public class AndExpression extends AndOrExpression { - private static final String AND = "AND"; - - public static Expression create(List children) throws SQLException { - Determinism determinism = Determinism.ALWAYS; - Iterator iterator = children.iterator(); - while (iterator.hasNext()) { - Expression child = iterator.next(); - if (child.getDataType() != PBoolean.INSTANCE) { - throw TypeMismatchException.newException(PBoolean.INSTANCE, child.getDataType(), child.toString()); - } - if (LiteralExpression.isFalse(child)) { - return child; - } - if (LiteralExpression.isTrue(child)) { - iterator.remove(); - } - determinism.combine(child.getDeterminism()); - } - if (children.size() == 0) { - return LiteralExpression.newConstant(true, determinism); - } - if (children.size() == 1) { - return children.get(0); - } - return new AndExpression(children); + private static final String AND = "AND"; + + public static Expression create(List children) throws SQLException { + Determinism determinism = Determinism.ALWAYS; + Iterator iterator = children.iterator(); + while (iterator.hasNext()) { + Expression child = iterator.next(); + if (child.getDataType() != PBoolean.INSTANCE) { + throw TypeMismatchException.newException(PBoolean.INSTANCE, child.getDataType(), + child.toString()); + } + if (LiteralExpression.isFalse(child)) { + return child; + } + if (LiteralExpression.isTrue(child)) { + iterator.remove(); + } + determinism.combine(child.getDeterminism()); } - - public static String combine(String expression1, String expression2) { - if (expression1 == null) { - return expression2; - } - if (expression2 == null) { - return expression1; - } - return "(" + expression1 + ") " + AND + " (" + expression2 + ")"; + if (children.size() == 0) { + return LiteralExpression.newConstant(true, determinism); } - - public AndExpression() { + if (children.size() == 1) { + return children.get(0); } + return new AndExpression(children); + } - public AndExpression(List children) { - super(children); + public static String combine(String expression1, String expression2) { + if (expression1 == null) { + return expression2; } - - @Override - protected boolean isStopValue(Boolean value) { - return Boolean.FALSE.equals(value); + if (expression2 == null) { + return expression1; } + return "(" + expression1 + ") " + AND + " (" + expression2 + ")"; + } + + public AndExpression() { + } + + public AndExpression(List children) { + super(children); + } - @Override - public String toString() { - StringBuilder buf = new StringBuilder("("); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + " " + AND + " "); - } - buf.append(children.get(children.size()-1)); - buf.append(')'); - return buf.toString(); + @Override + protected boolean isStopValue(Boolean value) { + return Boolean.FALSE.equals(value); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder("("); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + " " + AND + " "); } - - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + buf.append(children.get(children.size() - 1)); + buf.append(')'); + return buf.toString(); + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AndOrExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AndOrExpression.java index 07b07a2c348..89c6a9cc386 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AndOrExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/AndOrExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,84 +21,81 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.ByteUtil; /** - * * Abstract expression implementation for compound AND and OR expressions - * - * * @since 0.1 */ public abstract class AndOrExpression extends BaseCompoundExpression { - // Remember evaluation of child expression for partial evaluation - private BitSet partialEvalState; - // true if we have seen NULL as the value of some child expression - private boolean seenNull = false; - - public AndOrExpression() { - } - - public AndOrExpression(List children) { - super(children); - } - - @Override - public PDataType getDataType() { - return PBoolean.INSTANCE; + // Remember evaluation of child expression for partial evaluation + private BitSet partialEvalState; + // true if we have seen NULL as the value of some child expression + private boolean seenNull = false; + + public AndOrExpression() { + } + + public AndOrExpression(List children) { + super(children); + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } + + @Override + public void reset() { + if (partialEvalState == null) { + partialEvalState = new BitSet(children.size()); + } else { + partialEvalState.clear(); } + seenNull = false; + super.reset(); + } - @Override - public void reset() { - if (partialEvalState == null) { - partialEvalState = new BitSet(children.size()); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + boolean childFailed = false; + for (int i = 0; i < children.size(); i++) { + // If partial state is available, then use that to know we've already evaluated this + // child expression and do not need to do so again. + if (partialEvalState == null || !partialEvalState.get(i)) { + Expression child = children.get(i); + // Call through to child evaluate method matching parent call to allow child to optimize + // evaluate versus getValue code path. + if (child.evaluate(tuple, ptr)) { + // Short circuit if we see our stop value + if (isStopValue((Boolean) PBoolean.INSTANCE.toObject(ptr, child.getDataType()))) { + return true; + } + if (ptr.getLength() == 0) { + seenNull = true; + } + if (partialEvalState != null) { + partialEvalState.set(i); + } } else { - partialEvalState.clear(); + childFailed = true; } - seenNull = false; - super.reset(); + } + } + if (childFailed) { + return false; } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - boolean childFailed = false; - for (int i = 0; i < children.size(); i++) { - // If partial state is available, then use that to know we've already evaluated this - // child expression and do not need to do so again. - if (partialEvalState == null || !partialEvalState.get(i)) { - Expression child = children.get(i); - // Call through to child evaluate method matching parent call to allow child to optimize - // evaluate versus getValue code path. - if (child.evaluate(tuple, ptr)) { - // Short circuit if we see our stop value - if (isStopValue((Boolean) PBoolean.INSTANCE.toObject(ptr, child.getDataType()))) { - return true; - } - if (ptr.getLength() == 0) { - seenNull = true; - } - if (partialEvalState != null) { - partialEvalState.set(i); - } - } else { - childFailed = true; - } - } - } - if (childFailed) { - return false; - } - if (seenNull) { - // Some child evaluated to NULL and we never saw a stop value. - // The expression evaluates as NULL even if the last child evaluated was non-NULL. - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } - return true; + if (seenNull) { + // Some child evaluated to NULL and we never saw a stop value. + // The expression evaluates as NULL even if the last child evaluated was non-NULL. + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); } + return true; + } - protected abstract boolean isStopValue(Boolean value); + protected abstract boolean isStopValue(Boolean value); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ArithmeticExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ArithmeticExpression.java index 2500cbbfc6f..cc0b719ce52 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ArithmeticExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ArithmeticExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,54 +23,55 @@ public abstract class ArithmeticExpression extends BaseCompoundExpression { - public ArithmeticExpression() { - } + public ArithmeticExpression() { + } + + public ArithmeticExpression(List children) { + super(children); + } + + abstract public ArithmeticExpression clone(List children); - public ArithmeticExpression(List children) { - super(children); + @Override + public String toString() { + StringBuilder buf = new StringBuilder("("); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + getOperatorString()); } + buf.append(children.get(children.size() - 1)); + buf.append(')'); + return buf.toString(); + } - abstract public ArithmeticExpression clone(List children); - - @Override - public String toString() { - StringBuilder buf = new StringBuilder("("); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + getOperatorString()); - } - buf.append(children.get(children.size()-1)); - buf.append(')'); - return buf.toString(); + protected Integer getScale(Expression e) { + Integer scale = e.getScale(); + if (scale != null) { + return scale; } - - protected Integer getScale(Expression e) { - Integer scale = e.getScale(); - if (scale != null) { - return scale; - } - PDataType dataType = e.getDataType(); - if (dataType != null) { - scale = dataType.getScale(null); - if (scale != null) { - return scale; - } - } - return null; + PDataType dataType = e.getDataType(); + if (dataType != null) { + scale = dataType.getScale(null); + if (scale != null) { + return scale; + } } - protected int getPrecision(Expression e) { - Integer precision = e.getMaxLength(); - if (precision != null) { - return precision; - } - PDataType dataType = e.getDataType(); - if (dataType != null) { - precision = dataType.getMaxLength(null); - if (precision != null) { - return precision; - } - } - return PDataType.MAX_PRECISION; + return null; + } + + protected int getPrecision(Expression e) { + Integer precision = e.getMaxLength(); + if (precision != null) { + return precision; + } + PDataType dataType = e.getDataType(); + if (dataType != null) { + precision = dataType.getMaxLength(null); + if (precision != null) { + return precision; + } } - - abstract protected String getOperatorString(); + return PDataType.MAX_PRECISION; + } + + abstract protected String getOperatorString(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ArrayConstructorExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ArrayConstructorExpression.java index 8b83bf7525e..68b15d33ebb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ArrayConstructorExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ArrayConstructorExpression.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.expression; @@ -30,122 +38,122 @@ * Creates an expression for Upsert with Values/Select using ARRAY */ public class ArrayConstructorExpression extends BaseCompoundExpression { - private PDataType baseType; - private int position = -1; - private Object[] elements; - private final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable(); - private int estimatedSize = 0; - private boolean rowKeyOrderOptimizable; - - public ArrayConstructorExpression() { - } + private PDataType baseType; + private int position = -1; + private Object[] elements; + private final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable(); + private int estimatedSize = 0; + private boolean rowKeyOrderOptimizable; - public ArrayConstructorExpression(List children, PDataType baseType, boolean rowKeyOrderOptimizable) { - super(children); - init(baseType, rowKeyOrderOptimizable); - } + public ArrayConstructorExpression() { + } - public ArrayConstructorExpression clone(List children) { - return new ArrayConstructorExpression(children, this.baseType, this.rowKeyOrderOptimizable); - } - - private void init(PDataType baseType, boolean rowKeyOrderOptimizable) { - this.baseType = baseType; - this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; - elements = new Object[getChildren().size()]; - valuePtr.set(ByteUtil.EMPTY_BYTE_ARRAY); - estimatedSize = PArrayDataType.estimateSize(this.children.size(), this.baseType); - } + public ArrayConstructorExpression(List children, PDataType baseType, + boolean rowKeyOrderOptimizable) { + super(children); + init(baseType, rowKeyOrderOptimizable); + } - @Override - public PDataType getDataType() { - return PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE); - } + public ArrayConstructorExpression clone(List children) { + return new ArrayConstructorExpression(children, this.baseType, this.rowKeyOrderOptimizable); + } - @Override - public void reset() { - super.reset(); - position = 0; - Arrays.fill(elements, null); - valuePtr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } + private void init(PDataType baseType, boolean rowKeyOrderOptimizable) { + this.baseType = baseType; + this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; + elements = new Object[getChildren().size()]; + valuePtr.set(ByteUtil.EMPTY_BYTE_ARRAY); + estimatedSize = PArrayDataType.estimateSize(this.children.size(), this.baseType); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (position == elements.length) { - ptr.set(valuePtr.get(), valuePtr.getOffset(), valuePtr.getLength()); - return true; - } - TrustedByteArrayOutputStream byteStream = new TrustedByteArrayOutputStream(estimatedSize); - DataOutputStream oStream = new DataOutputStream(byteStream); - PArrayDataTypeEncoder builder = - new PArrayDataTypeEncoder(byteStream, oStream, children.size(), baseType, getSortOrder(), rowKeyOrderOptimizable, PArrayDataType.SORTABLE_SERIALIZATION_VERSION); - for (int i = position >= 0 ? position : 0; i < elements.length; i++) { - Expression child = children.get(i); - if (!child.evaluate(tuple, ptr)) { - if (tuple != null && !tuple.isImmutable()) { - if (position >= 0) position = i; - return false; - } - } else { - builder.appendValue(ptr.get(), ptr.getOffset(), ptr.getLength()); - } - } - if (position >= 0) position = elements.length; - byte[] bytes = builder.encode(); - ptr.set(bytes, 0, bytes.length); - valuePtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); - return true; - } + @Override + public PDataType getDataType() { + return PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE); + } + @Override + public void reset() { + super.reset(); + position = 0; + Arrays.fill(elements, null); + valuePtr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - boolean rowKeyOrderOptimizable = false; - int baseTypeOrdinal = WritableUtils.readVInt(input); - if (baseTypeOrdinal < 0) { - rowKeyOrderOptimizable = true; - baseTypeOrdinal = -(baseTypeOrdinal+1); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (position == elements.length) { + ptr.set(valuePtr.get(), valuePtr.getOffset(), valuePtr.getLength()); + return true; + } + TrustedByteArrayOutputStream byteStream = new TrustedByteArrayOutputStream(estimatedSize); + DataOutputStream oStream = new DataOutputStream(byteStream); + PArrayDataTypeEncoder builder = + new PArrayDataTypeEncoder(byteStream, oStream, children.size(), baseType, getSortOrder(), + rowKeyOrderOptimizable, PArrayDataType.SORTABLE_SERIALIZATION_VERSION); + for (int i = position >= 0 ? position : 0; i < elements.length; i++) { + Expression child = children.get(i); + if (!child.evaluate(tuple, ptr)) { + if (tuple != null && !tuple.isImmutable()) { + if (position >= 0) position = i; + return false; } - init(PDataType.values()[baseTypeOrdinal], rowKeyOrderOptimizable); + } else { + builder.appendValue(ptr.get(), ptr.getOffset(), ptr.getLength()); + } } + if (position >= 0) position = elements.length; + byte[] bytes = builder.encode(); + ptr.set(bytes, 0, bytes.length); + valuePtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); + return true; + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - if (rowKeyOrderOptimizable) { - WritableUtils.writeVInt(output, -(baseType.ordinal()+1)); - } else { - WritableUtils.writeVInt(output, baseType.ordinal()); - } + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + boolean rowKeyOrderOptimizable = false; + int baseTypeOrdinal = WritableUtils.readVInt(input); + if (baseTypeOrdinal < 0) { + rowKeyOrderOptimizable = true; + baseTypeOrdinal = -(baseTypeOrdinal + 1); } - - @Override - public boolean requiresFinalEvaluation() { - return true; + init(PDataType.values()[baseTypeOrdinal], rowKeyOrderOptimizable); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + if (rowKeyOrderOptimizable) { + WritableUtils.writeVInt(output, -(baseType.ordinal() + 1)); + } else { + WritableUtils.writeVInt(output, baseType.ordinal()); } + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + @Override + public boolean requiresFinalEvaluation() { + return true; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder(PArrayDataType.ARRAY_TYPE_SUFFIX + "["); - if (children.size()==0) - return buf.append("]").toString(); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + ","); - } - buf.append(children.get(children.size()-1) + "]"); - return buf.toString(); + return t; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(PArrayDataType.ARRAY_TYPE_SUFFIX + "["); + if (children.size() == 0) return buf.append("]").toString(); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + ","); } + buf.append(children.get(children.size() - 1) + "]"); + return buf.toString(); + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseAddSubtractExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseAddSubtractExpression.java index 43654b7ddda..eea04f9a97f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseAddSubtractExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseAddSubtractExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,30 +21,29 @@ import org.apache.phoenix.schema.types.PDataType; - abstract public class BaseAddSubtractExpression extends ArithmeticExpression { - public BaseAddSubtractExpression() { - } + public BaseAddSubtractExpression() { + } - public BaseAddSubtractExpression(List children) { - super(children); - } + public BaseAddSubtractExpression(List children) { + super(children); + } - protected static Integer getPrecision(Integer lp, Integer rp, Integer ls, Integer rs) { - if (ls == null || rs == null) { - return PDataType.MAX_PRECISION; - } - int val = getScale(lp, rp, ls, rs) + Math.max(lp - ls, rp - rs) + 1; - return Math.min(PDataType.MAX_PRECISION, val); + protected static Integer getPrecision(Integer lp, Integer rp, Integer ls, Integer rs) { + if (ls == null || rs == null) { + return PDataType.MAX_PRECISION; } + int val = getScale(lp, rp, ls, rs) + Math.max(lp - ls, rp - rs) + 1; + return Math.min(PDataType.MAX_PRECISION, val); + } - protected static Integer getScale(Integer lp, Integer rp, Integer ls, Integer rs) { - // If we are adding a decimal with scale and precision to a decimal - // with no precision nor scale, the scale system does not apply. - if (ls == null || rs == null) { - return null; - } - int val = Math.max(ls, rs); - return Math.min(PDataType.MAX_PRECISION, val); + protected static Integer getScale(Integer lp, Integer rp, Integer ls, Integer rs) { + // If we are adding a decimal with scale and precision to a decimal + // with no precision nor scale, the scale system does not apply. + if (ls == null || rs == null) { + return null; } + int val = Math.max(ls, rs); + return Math.min(PDataType.MAX_PRECISION, val); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseCompoundExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseCompoundExpression.java index 20db0cb7913..ab132742aff 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseCompoundExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseCompoundExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,127 +25,124 @@ import java.util.List; import org.apache.hadoop.io.WritableUtils; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; - public abstract class BaseCompoundExpression extends BaseExpression { - protected List children; - private boolean isNullable; - private boolean isStateless; - private Determinism determinism; - private boolean requiresFinalEvaluation; - private boolean cloneExpression; - - public BaseCompoundExpression() { - init(Collections.emptyList()); - } - - public BaseCompoundExpression(List children) { - init(children); - } - - private void init(List children) { - this.children = ImmutableList.copyOf(children); - boolean isStateless = true; - boolean isNullable = false; - boolean requiresFinalEvaluation = false; - boolean cloneExpression = false; - this.determinism = Determinism.ALWAYS; - for (int i = 0; i < children.size(); i++) { - Expression child = children.get(i); - isNullable |= child.isNullable(); - isStateless &= child.isStateless(); - this.determinism = this.determinism.combine(child.getDeterminism()); - requiresFinalEvaluation |= child.requiresFinalEvaluation(); - cloneExpression |= child.isCloneExpression(); - } - this.isStateless = isStateless; - this.isNullable = isNullable; - this.requiresFinalEvaluation = requiresFinalEvaluation; - this.cloneExpression = cloneExpression; - } - - @Override - public List getChildren() { - return children; - } - - - @Override - public Determinism getDeterminism() { - return determinism; - } + protected List children; + private boolean isNullable; + private boolean isStateless; + private Determinism determinism; + private boolean requiresFinalEvaluation; + private boolean cloneExpression; - @Override - public boolean isCloneExpression() { - return this.cloneExpression; - } + public BaseCompoundExpression() { + init(Collections. emptyList()); + } - @Override - public boolean isStateless() { - return isStateless; - } + public BaseCompoundExpression(List children) { + init(children); + } - @Override - public boolean isNullable() { - return isNullable; + private void init(List children) { + this.children = ImmutableList.copyOf(children); + boolean isStateless = true; + boolean isNullable = false; + boolean requiresFinalEvaluation = false; + boolean cloneExpression = false; + this.determinism = Determinism.ALWAYS; + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + isNullable |= child.isNullable(); + isStateless &= child.isStateless(); + this.determinism = this.determinism.combine(child.getDeterminism()); + requiresFinalEvaluation |= child.requiresFinalEvaluation(); + cloneExpression |= child.isCloneExpression(); } + this.isStateless = isStateless; + this.isNullable = isNullable; + this.requiresFinalEvaluation = requiresFinalEvaluation; + this.cloneExpression = cloneExpression; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + children.hashCode(); - return result; - } + @Override + public List getChildren() { + return children; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - BaseCompoundExpression other = (BaseCompoundExpression)obj; - if (!children.equals(other.children)) return false; - return true; - } + @Override + public Determinism getDeterminism() { + return determinism; + } - @Override - public void readFields(DataInput input) throws IOException { - int len = WritableUtils.readVInt(input); - Listchildren = new ArrayList(len); - for (int i = 0; i < len; i++) { - Expression child = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); - child.readFields(input); - children.add(child); - } - init(children); - } + @Override + public boolean isCloneExpression() { + return this.cloneExpression; + } - @Override - public void write(DataOutput output) throws IOException { - WritableUtils.writeVInt(output, children.size()); - for (int i = 0; i < children.size(); i++) { - Expression child = children.get(i); - WritableUtils.writeVInt(output, ExpressionType.valueOf(child).ordinal()); - child.write(output); - } - } + @Override + public boolean isStateless() { + return isStateless; + } + + @Override + public boolean isNullable() { + return isNullable; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + children.hashCode(); + return result; + } - @Override - public void reset() { - for (int i = 0; i < children.size(); i++) { - children.get(i).reset(); - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + BaseCompoundExpression other = (BaseCompoundExpression) obj; + if (!children.equals(other.children)) return false; + return true; + } + + @Override + public void readFields(DataInput input) throws IOException { + int len = WritableUtils.readVInt(input); + List children = new ArrayList(len); + for (int i = 0; i < len; i++) { + Expression child = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); + child.readFields(input); + children.add(child); } - - @Override - public String toString() { - return this.getClass().getName() + " [children=" + children + "]"; + init(children); + } + + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeVInt(output, children.size()); + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + WritableUtils.writeVInt(output, ExpressionType.valueOf(child).ordinal()); + child.write(output); } - - @Override - public boolean requiresFinalEvaluation() { - return requiresFinalEvaluation; + } + + @Override + public void reset() { + for (int i = 0; i < children.size(); i++) { + children.get(i).reset(); } + } + + @Override + public String toString() { + return this.getClass().getName() + " [children=" + children + "]"; + } + + @Override + public boolean requiresFinalEvaluation() { + return requiresFinalEvaluation; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java index 9c81cf0072e..b3cf4a9505f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseDecimalAddSubtractExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression; public class BaseDecimalAddSubtractExpression { diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseExpression.java index 3b21b508959..c019f6fc4d5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,243 +39,273 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PUnsignedTimestamp; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * - * Base class for Expression hierarchy that provides common - * default implementations for most methods - * - * + * Base class for Expression hierarchy that provides common default implementations for most methods * @since 0.1 */ public abstract class BaseExpression implements Expression { - - public static interface ExpressionComparabilityWrapper { - public Expression wrap(Expression lhs, Expression rhs, boolean rowKeyOrderOptimizable) throws SQLException; - } - - /* - * Used to coerce the RHS to the expected type based on the LHS. In some circumstances, - * we may need to round the value up or down. For example: - * WHERE (a,b) < (2.4, 'foo') - * We take the ceiling of 2.4 to make it 3 if a is an INTEGER to prevent needing to coerce - * every time during evaluation. - */ - private static ExpressionComparabilityWrapper[] WRAPPERS = new ExpressionComparabilityWrapper[CompareOperator.values().length]; - static { - WRAPPERS[CompareOperator.LESS.ordinal()] = new ExpressionComparabilityWrapper() { - - @Override - public Expression wrap(Expression lhs, Expression rhs, boolean rowKeyOrderOptimizable) throws SQLException { - Expression e = rhs; - PDataType rhsType = rhs.getDataType(); - PDataType lhsType = lhs.getDataType(); - if (rhsType == PDecimal.INSTANCE && lhsType != PDecimal.INSTANCE) { - e = FloorDecimalExpression.create(rhs); - } else if ((rhsType == PTimestamp.INSTANCE || rhsType == PUnsignedTimestamp.INSTANCE) && (lhsType != PTimestamp.INSTANCE && lhsType != PUnsignedTimestamp.INSTANCE)) { - e = FloorDateExpression.create(rhs, TimeUnit.MILLISECOND); - } - e = CoerceExpression.create(e, lhsType, lhs.getSortOrder(), lhs.getMaxLength(), rowKeyOrderOptimizable); - return e; - } - - }; - WRAPPERS[CompareOperator.LESS_OR_EQUAL.ordinal()] = WRAPPERS[CompareOperator.LESS.ordinal()]; - - WRAPPERS[CompareOperator.GREATER.ordinal()] = new ExpressionComparabilityWrapper() { - - @Override - public Expression wrap(Expression lhs, Expression rhs, boolean rowKeyOrderOptimizable) throws SQLException { - Expression e = rhs; - PDataType rhsType = rhs.getDataType(); - PDataType lhsType = lhs.getDataType(); - if (rhsType == PDecimal.INSTANCE && lhsType != PDecimal.INSTANCE) { - e = CeilDecimalExpression.create(rhs); - } else if ((rhsType == PTimestamp.INSTANCE || rhsType == PUnsignedTimestamp.INSTANCE) && (lhsType != PTimestamp.INSTANCE && lhsType != PUnsignedTimestamp.INSTANCE)) { - e = CeilTimestampExpression.create(rhs); - } - e = CoerceExpression.create(e, lhsType, lhs.getSortOrder(), lhs.getMaxLength(), rowKeyOrderOptimizable); - return e; - } - - }; - WRAPPERS[CompareOperator.GREATER_OR_EQUAL.ordinal()] = WRAPPERS[CompareOperator.GREATER.ordinal()]; - WRAPPERS[CompareOperator.EQUAL.ordinal()] = new ExpressionComparabilityWrapper() { - - @Override - public Expression wrap(Expression lhs, Expression rhs, boolean rowKeyOrderOptimizable) throws SQLException { - PDataType lhsType = lhs.getDataType(); - Expression e = CoerceExpression.create(rhs, lhsType, lhs.getSortOrder(), lhs.getMaxLength(), rowKeyOrderOptimizable); - return e; - } - - }; - } - - private static ExpressionComparabilityWrapper getWrapper(CompareOperator op) { - ExpressionComparabilityWrapper wrapper = WRAPPERS[op.ordinal()]; - if (wrapper == null) { - throw new IllegalStateException("Unexpected compare op of " + op + " for row value constructor"); - } - return wrapper; - } - - /** - * Coerce the RHS to match the LHS type, throwing if the types are incompatible. - * @param lhs left hand side expression - * @param rhs right hand side expression - * @param op operator being used to compare the expressions, which can affect rounding we may need to do. - * @param rowKeyOrderOptimizable - * @return the newly coerced expression - * @throws SQLException - */ - public static Expression coerce(Expression lhs, Expression rhs, CompareOperator op, boolean rowKeyOrderOptimizable) throws SQLException { - return coerce(lhs, rhs, getWrapper(op), rowKeyOrderOptimizable); - } - - public static Expression coerce(Expression lhs, Expression rhs, ExpressionComparabilityWrapper wrapper, boolean rowKeyOrderOptimizable) throws SQLException { - - if (lhs instanceof RowValueConstructorExpression && rhs instanceof RowValueConstructorExpression) { - int i = 0; - List coercedNodes = Lists.newArrayListWithExpectedSize(Math.max(lhs.getChildren().size(), rhs.getChildren().size())); - for (; i < Math.min(lhs.getChildren().size(),rhs.getChildren().size()); i++) { - coercedNodes.add(coerce(lhs.getChildren().get(i), rhs.getChildren().get(i), wrapper, rowKeyOrderOptimizable)); - } - for (; i < lhs.getChildren().size(); i++) { - coercedNodes.add(coerce(lhs.getChildren().get(i), null, wrapper, rowKeyOrderOptimizable)); - } - for (; i < rhs.getChildren().size(); i++) { - coercedNodes.add(coerce(null, rhs.getChildren().get(i), wrapper, rowKeyOrderOptimizable)); - } - trimTrailingNulls(coercedNodes); - return coercedNodes.equals(rhs.getChildren()) ? rhs : new RowValueConstructorExpression(coercedNodes, rhs.isStateless()); - } else if (lhs instanceof RowValueConstructorExpression) { - List coercedNodes = Lists.newArrayListWithExpectedSize(Math.max(rhs.getChildren().size(), lhs.getChildren().size())); - coercedNodes.add(coerce(lhs.getChildren().get(0), rhs, wrapper, rowKeyOrderOptimizable)); - for (int i = 1; i < lhs.getChildren().size(); i++) { - coercedNodes.add(coerce(lhs.getChildren().get(i), null, wrapper, rowKeyOrderOptimizable)); - } - trimTrailingNulls(coercedNodes); - return coercedNodes.equals(rhs.getChildren()) ? rhs : new RowValueConstructorExpression(coercedNodes, rhs.isStateless()); - } else if (rhs instanceof RowValueConstructorExpression) { - List coercedNodes = Lists.newArrayListWithExpectedSize(Math.max(rhs.getChildren().size(), lhs.getChildren().size())); - coercedNodes.add(coerce(lhs, rhs.getChildren().get(0), wrapper, rowKeyOrderOptimizable)); - for (int i = 1; i < rhs.getChildren().size(); i++) { - coercedNodes.add(coerce(null, rhs.getChildren().get(i), wrapper, rowKeyOrderOptimizable)); - } - trimTrailingNulls(coercedNodes); - return coercedNodes.equals(rhs.getChildren()) ? rhs : new RowValueConstructorExpression(coercedNodes, rhs.isStateless()); - } else if (lhs == null) { - return rhs; - } else if (rhs == null) { - return LiteralExpression.newConstant(null, lhs.getDataType(), lhs.getDeterminism()); - } else { - if (rhs.getDataType() != null && lhs.getDataType() != null && !rhs.getDataType().isCastableTo(lhs.getDataType())) { - throw TypeMismatchException.newException(lhs.getDataType(), rhs.getDataType()); - } - return wrapper.wrap(lhs, rhs, rowKeyOrderOptimizable); + + public static interface ExpressionComparabilityWrapper { + public Expression wrap(Expression lhs, Expression rhs, boolean rowKeyOrderOptimizable) + throws SQLException; + } + + /* + * Used to coerce the RHS to the expected type based on the LHS. In some circumstances, we may + * need to round the value up or down. For example: WHERE (a,b) < (2.4, 'foo') We take the ceiling + * of 2.4 to make it 3 if a is an INTEGER to prevent needing to coerce every time during + * evaluation. + */ + private static ExpressionComparabilityWrapper[] WRAPPERS = + new ExpressionComparabilityWrapper[CompareOperator.values().length]; + static { + WRAPPERS[CompareOperator.LESS.ordinal()] = new ExpressionComparabilityWrapper() { + + @Override + public Expression wrap(Expression lhs, Expression rhs, boolean rowKeyOrderOptimizable) + throws SQLException { + Expression e = rhs; + PDataType rhsType = rhs.getDataType(); + PDataType lhsType = lhs.getDataType(); + if (rhsType == PDecimal.INSTANCE && lhsType != PDecimal.INSTANCE) { + e = FloorDecimalExpression.create(rhs); + } else if ( + (rhsType == PTimestamp.INSTANCE || rhsType == PUnsignedTimestamp.INSTANCE) + && (lhsType != PTimestamp.INSTANCE && lhsType != PUnsignedTimestamp.INSTANCE) + ) { + e = FloorDateExpression.create(rhs, TimeUnit.MILLISECOND); } - } - - private static void trimTrailingNulls(List expressions) { - for (int i = expressions.size() - 1; i >= 0; i--) { - Expression e = expressions.get(i); - if (e instanceof LiteralExpression && ((LiteralExpression)e).getValue() == null) { - expressions.remove(i); - } else { - break; - } + e = CoerceExpression.create(e, lhsType, lhs.getSortOrder(), lhs.getMaxLength(), + rowKeyOrderOptimizable); + return e; + } + + }; + WRAPPERS[CompareOperator.LESS_OR_EQUAL.ordinal()] = WRAPPERS[CompareOperator.LESS.ordinal()]; + + WRAPPERS[CompareOperator.GREATER.ordinal()] = new ExpressionComparabilityWrapper() { + + @Override + public Expression wrap(Expression lhs, Expression rhs, boolean rowKeyOrderOptimizable) + throws SQLException { + Expression e = rhs; + PDataType rhsType = rhs.getDataType(); + PDataType lhsType = lhs.getDataType(); + if (rhsType == PDecimal.INSTANCE && lhsType != PDecimal.INSTANCE) { + e = CeilDecimalExpression.create(rhs); + } else if ( + (rhsType == PTimestamp.INSTANCE || rhsType == PUnsignedTimestamp.INSTANCE) + && (lhsType != PTimestamp.INSTANCE && lhsType != PUnsignedTimestamp.INSTANCE) + ) { + e = CeilTimestampExpression.create(rhs); } - } + e = CoerceExpression.create(e, lhsType, lhs.getSortOrder(), lhs.getMaxLength(), + rowKeyOrderOptimizable); + return e; + } - @Override - public boolean isNullable() { - return false; - } + }; + WRAPPERS[CompareOperator.GREATER_OR_EQUAL.ordinal()] = + WRAPPERS[CompareOperator.GREATER.ordinal()]; + WRAPPERS[CompareOperator.EQUAL.ordinal()] = new ExpressionComparabilityWrapper() { - @Override - public Integer getMaxLength() { - return null; - } + @Override + public Expression wrap(Expression lhs, Expression rhs, boolean rowKeyOrderOptimizable) + throws SQLException { + PDataType lhsType = lhs.getDataType(); + Expression e = CoerceExpression.create(rhs, lhsType, lhs.getSortOrder(), lhs.getMaxLength(), + rowKeyOrderOptimizable); + return e; + } - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - @Override - public void readFields(DataInput input) throws IOException { - } + }; + } - @Override - public void write(DataOutput output) throws IOException { + private static ExpressionComparabilityWrapper getWrapper(CompareOperator op) { + ExpressionComparabilityWrapper wrapper = WRAPPERS[op.ordinal()]; + if (wrapper == null) { + throw new IllegalStateException( + "Unexpected compare op of " + op + " for row value constructor"); } + return wrapper; + } - @Override - public void reset() { - } - - protected final List acceptChildren(ExpressionVisitor visitor, Iterator iterator) { - if (iterator == null) { - iterator = visitor.defaultIterator(this); - } + /** + * Coerce the RHS to match the LHS type, throwing if the types are incompatible. + * @param lhs left hand side expression + * @param rhs right hand side expression + * @param op operator being used to compare the expressions, which can affect rounding we may + * need to do. + * @return the newly coerced expression + */ + public static Expression coerce(Expression lhs, Expression rhs, CompareOperator op, + boolean rowKeyOrderOptimizable) throws SQLException { + return coerce(lhs, rhs, getWrapper(op), rowKeyOrderOptimizable); + } - // PHOENIX-6669 Sort RVCs together and first so that where optimizer intersectrages work correctly - List children = new ArrayList<>(); - while (iterator.hasNext()) { - Expression child = iterator.next(); - if (child != null && child.getChildren() != null && child.getChildren().size() > 1 && - child.getChildren().get(1) instanceof RowValueConstructorExpression) { - children.add(0, child); - } else { - children.add(child); - } - } + public static Expression coerce(Expression lhs, Expression rhs, + ExpressionComparabilityWrapper wrapper, boolean rowKeyOrderOptimizable) throws SQLException { - List l = Collections.emptyList(); - for (Expression child : children) { - T t = child.accept(visitor); - if (t != null) { - if (l.isEmpty()) { - l = new ArrayList(getChildren().size()); - } - l.add(t); - } - } - return l; + if ( + lhs instanceof RowValueConstructorExpression && rhs instanceof RowValueConstructorExpression + ) { + int i = 0; + List coercedNodes = Lists + .newArrayListWithExpectedSize(Math.max(lhs.getChildren().size(), rhs.getChildren().size())); + for (; i < Math.min(lhs.getChildren().size(), rhs.getChildren().size()); i++) { + coercedNodes.add(coerce(lhs.getChildren().get(i), rhs.getChildren().get(i), wrapper, + rowKeyOrderOptimizable)); + } + for (; i < lhs.getChildren().size(); i++) { + coercedNodes.add(coerce(lhs.getChildren().get(i), null, wrapper, rowKeyOrderOptimizable)); + } + for (; i < rhs.getChildren().size(); i++) { + coercedNodes.add(coerce(null, rhs.getChildren().get(i), wrapper, rowKeyOrderOptimizable)); + } + trimTrailingNulls(coercedNodes); + return coercedNodes.equals(rhs.getChildren()) + ? rhs + : new RowValueConstructorExpression(coercedNodes, rhs.isStateless()); + } else if (lhs instanceof RowValueConstructorExpression) { + List coercedNodes = Lists + .newArrayListWithExpectedSize(Math.max(rhs.getChildren().size(), lhs.getChildren().size())); + coercedNodes.add(coerce(lhs.getChildren().get(0), rhs, wrapper, rowKeyOrderOptimizable)); + for (int i = 1; i < lhs.getChildren().size(); i++) { + coercedNodes.add(coerce(lhs.getChildren().get(i), null, wrapper, rowKeyOrderOptimizable)); + } + trimTrailingNulls(coercedNodes); + return coercedNodes.equals(rhs.getChildren()) + ? rhs + : new RowValueConstructorExpression(coercedNodes, rhs.isStateless()); + } else if (rhs instanceof RowValueConstructorExpression) { + List coercedNodes = Lists + .newArrayListWithExpectedSize(Math.max(rhs.getChildren().size(), lhs.getChildren().size())); + coercedNodes.add(coerce(lhs, rhs.getChildren().get(0), wrapper, rowKeyOrderOptimizable)); + for (int i = 1; i < rhs.getChildren().size(); i++) { + coercedNodes.add(coerce(null, rhs.getChildren().get(i), wrapper, rowKeyOrderOptimizable)); + } + trimTrailingNulls(coercedNodes); + return coercedNodes.equals(rhs.getChildren()) + ? rhs + : new RowValueConstructorExpression(coercedNodes, rhs.isStateless()); + } else if (lhs == null) { + return rhs; + } else if (rhs == null) { + return LiteralExpression.newConstant(null, lhs.getDataType(), lhs.getDeterminism()); + } else { + if ( + rhs.getDataType() != null && lhs.getDataType() != null + && !rhs.getDataType().isCastableTo(lhs.getDataType()) + ) { + throw TypeMismatchException.newException(lhs.getDataType(), rhs.getDataType()); + } + return wrapper.wrap(lhs, rhs, rowKeyOrderOptimizable); } - - @Override - public Determinism getDeterminism() { - return Determinism.ALWAYS; - } - - @Override - public boolean isStateless() { - return false; + } + + private static void trimTrailingNulls(List expressions) { + for (int i = expressions.size() - 1; i >= 0; i--) { + Expression e = expressions.get(i); + if (e instanceof LiteralExpression && ((LiteralExpression) e).getValue() == null) { + expressions.remove(i); + } else { + break; + } } - - @Override - public boolean requiresFinalEvaluation() { - return false; + } + + @Override + public boolean isNullable() { + return false; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + + @Override + public void readFields(DataInput input) throws IOException { + } + + @Override + public void write(DataOutput output) throws IOException { + } + + @Override + public void reset() { + } + + protected final List acceptChildren(ExpressionVisitor visitor, + Iterator iterator) { + if (iterator == null) { + iterator = visitor.defaultIterator(this); } - @Override - public boolean isCloneExpression() { - return isCloneExpressionByDeterminism(this); + // PHOENIX-6669 Sort RVCs together and first so that where optimizer intersectrages work + // correctly + List children = new ArrayList<>(); + while (iterator.hasNext()) { + Expression child = iterator.next(); + if ( + child != null && child.getChildren() != null && child.getChildren().size() > 1 + && child.getChildren().get(1) instanceof RowValueConstructorExpression + ) { + children.add(0, child); + } else { + children.add(child); + } } - protected static boolean isCloneExpressionByDeterminism(BaseExpression expression) { - if(expression.getDeterminism() == Determinism.PER_INVOCATION) { - return true; + List l = Collections.emptyList(); + for (Expression child : children) { + T t = child.accept(visitor); + if (t != null) { + if (l.isEmpty()) { + l = new ArrayList(getChildren().size()); } - return false; + l.add(t); + } + } + return l; + } + + @Override + public Determinism getDeterminism() { + return Determinism.ALWAYS; + } + + @Override + public boolean isStateless() { + return false; + } + + @Override + public boolean requiresFinalEvaluation() { + return false; + } + + @Override + public boolean isCloneExpression() { + return isCloneExpressionByDeterminism(this); + } + + protected static boolean isCloneExpressionByDeterminism(BaseExpression expression) { + if (expression.getDeterminism() == Determinism.PER_INVOCATION) { + return true; } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseSingleExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseSingleExpression.java index 5d9252919ea..26fe7ea7366 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseSingleExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseSingleExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,103 +24,98 @@ import org.apache.hadoop.io.WritableUtils; import org.apache.phoenix.expression.visitor.ExpressionVisitor; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; - /** - * * Base class for expressions which have a single child expression - * - * * @since 0.1 */ public abstract class BaseSingleExpression extends BaseExpression { - protected List children; - - public BaseSingleExpression() { - } - - public BaseSingleExpression(Expression expression) { - this(ImmutableList.of(expression)); - } - - public BaseSingleExpression(List children) { - this.children = children; - } - - @Override - public List getChildren() { - return children; - } - - @Override - public void readFields(DataInput input) throws IOException { - Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); - expression.readFields(input); - children = ImmutableList.of(expression); - } - - @Override - public void write(DataOutput output) throws IOException { - WritableUtils.writeVInt(output, ExpressionType.valueOf(children.get(0)).ordinal()); - children.get(0).write(output); - } - - @Override - public boolean isNullable() { - return children.get(0).isNullable(); - } - - @Override - public void reset() { - children.get(0).reset(); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + children.get(0).hashCode(); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - BaseSingleExpression other = (BaseSingleExpression)obj; - if (!children.get(0).equals(other.children.get(0))) return false; - return true; - } - - @Override - public T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, null); - if (l.isEmpty()) { - return visitor.defaultReturn(this, l); - } - return l.get(0); - } - - public Expression getChild() { - return children.get(0); - } - - @Override - public boolean requiresFinalEvaluation() { - return children.get(0).requiresFinalEvaluation(); - } - - @Override - public Determinism getDeterminism() { - return children.get(0).getDeterminism(); - } - - @Override - public boolean isCloneExpression() { - return children.get(0).isCloneExpression(); + protected List children; + + public BaseSingleExpression() { + } + + public BaseSingleExpression(Expression expression) { + this(ImmutableList.of(expression)); + } + + public BaseSingleExpression(List children) { + this.children = children; + } + + @Override + public List getChildren() { + return children; + } + + @Override + public void readFields(DataInput input) throws IOException { + Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); + expression.readFields(input); + children = ImmutableList.of(expression); + } + + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeVInt(output, ExpressionType.valueOf(children.get(0)).ordinal()); + children.get(0).write(output); + } + + @Override + public boolean isNullable() { + return children.get(0).isNullable(); + } + + @Override + public void reset() { + children.get(0).reset(); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + children.get(0).hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + BaseSingleExpression other = (BaseSingleExpression) obj; + if (!children.get(0).equals(other.children.get(0))) return false; + return true; + } + + @Override + public T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, null); + if (l.isEmpty()) { + return visitor.defaultReturn(this, l); } + return l.get(0); + } + + public Expression getChild() { + return children.get(0); + } + + @Override + public boolean requiresFinalEvaluation() { + return children.get(0).requiresFinalEvaluation(); + } + + @Override + public Determinism getDeterminism() { + return children.get(0).getDeterminism(); + } + + @Override + public boolean isCloneExpression() { + return children.get(0).isCloneExpression(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseTerminalExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseTerminalExpression.java index 832c95c447c..95155a4a457 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseTerminalExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/BaseTerminalExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,18 +20,13 @@ import java.util.Collections; import java.util.List; - - /** - * * Grouping class for expression that have no expression children - * - * * @since 0.1 */ public abstract class BaseTerminalExpression extends BaseExpression { - @Override - public List getChildren() { - return Collections.emptyList(); - } + @Override + public List getChildren() { + return Collections.emptyList(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ByteBasedLikeExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ByteBasedLikeExpression.java index 5b1dd2e2700..f868f706387 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ByteBasedLikeExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ByteBasedLikeExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,6 @@ package org.apache.phoenix.expression; import java.util.List; -import java.util.regex.Pattern; import org.apache.phoenix.expression.util.regex.AbstractBasePattern; import org.apache.phoenix.expression.util.regex.JONIPattern; @@ -27,24 +26,24 @@ public class ByteBasedLikeExpression extends LikeExpression { - public ByteBasedLikeExpression() { - } + public ByteBasedLikeExpression() { + } - public ByteBasedLikeExpression(List children) { - super(children); - } + public ByteBasedLikeExpression(List children) { + super(children); + } - @Override - protected AbstractBasePattern compilePatternSpec(String value) { - return new JONIPattern(value, Option.MULTILINE); - } + @Override + protected AbstractBasePattern compilePatternSpec(String value) { + return new JONIPattern(value, Option.MULTILINE); + } - public static LikeExpression create(List children, LikeType likeType) { - return new ByteBasedLikeExpression(addLikeTypeChild(children, likeType)); - } + public static LikeExpression create(List children, LikeType likeType) { + return new ByteBasedLikeExpression(addLikeTypeChild(children, likeType)); + } - @Override - public LikeExpression clone(List children) { - return new ByteBasedLikeExpression(children); - } + @Override + public LikeExpression clone(List children) { + return new ByteBasedLikeExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CaseExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CaseExpression.java index 966dc634b44..f41a688b177 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CaseExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CaseExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,215 +35,212 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.util.ExpressionUtil; - /** - * * CASE/WHEN expression implementation - * - * * @since 0.1 */ public class CaseExpression extends BaseCompoundExpression { - private static final int FULLY_EVALUATE = -1; - - private short evalIndex = FULLY_EVALUATE; - private boolean foundIndex; - private PDataType returnType; - - public CaseExpression() { - } - - public static Expression create(List children) throws SQLException { - CaseExpression caseExpression = new CaseExpression(coerceIfNecessary(children)); - if (ExpressionUtil.isConstant(caseExpression)) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - int index = caseExpression.evaluateIndexOf(null, ptr); - if (index < 0) { - return LiteralExpression.newConstant(null, caseExpression.getDeterminism()); - } - return caseExpression.getChildren().get(index); - } - return caseExpression; - } - - private static List coerceIfNecessary(List children) throws SQLException { - boolean isChildTypeUnknown = false; - PDataType returnType = children.get(0).getDataType(); - for (int i = 2; i < children.size(); i+=2) { - Expression child = children.get(i); - PDataType childType = child.getDataType(); - if (childType == null) { - isChildTypeUnknown = true; - } else if (returnType == null) { - returnType = childType; - isChildTypeUnknown = true; - } else if (returnType == childType || childType.isCoercibleTo(returnType)) { - continue; - } else if (returnType.isCoercibleTo(childType)) { - returnType = childType; - } else { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) - .setMessage("Case expressions must have common type: " + returnType + " cannot be coerced to " + childType) - .build().buildException(); - } - } - // If we found an "unknown" child type and the return type is a number - // make the return type be the most general number type of DECIMAL. - if (isChildTypeUnknown && returnType != null && returnType.isCoercibleTo(PDecimal.INSTANCE)) { - returnType = PDecimal.INSTANCE; - } - List newChildren = children; - for (int i = 0; i < children.size(); i+=2) { - Expression child = children.get(i); - PDataType childType = child.getDataType(); - if (childType != returnType) { - if (newChildren == children) { - newChildren = new ArrayList(children); - } - newChildren.set(i, CoerceExpression.create(child, returnType)); - } - } - return newChildren; - } - /** - * Construct CASE/WHEN expression - * @param children list of expressions in the form of: - * {@code ((, )+, []) } - */ - public CaseExpression(List children) { - super(children); - returnType = children.get(0).getDataType(); - } - - private boolean isPartiallyEvaluating() { - return evalIndex != FULLY_EVALUATE; - } - - public boolean hasElse() { - return children.size() % 2 != 0; - } - - @Override - public boolean isNullable() { - // If any expression is nullable or there's no else clause - // return true since null may be returned. - if (super.isNullable() || !hasElse()) { - return true; - } - return children.get(children.size()-1).isNullable(); - } + private static final int FULLY_EVALUATE = -1; - @Override - public PDataType getDataType() { - return returnType; - } + private short evalIndex = FULLY_EVALUATE; + private boolean foundIndex; + private PDataType returnType; - @Override - public void reset() { - foundIndex = false; - evalIndex = 0; - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - this.returnType = PDataType.values()[WritableUtils.readVInt(input)]; - } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeVInt(output, this.returnType.ordinal()); - } - - public int evaluateIndexOf(Tuple tuple, ImmutableBytesWritable ptr) { - if (foundIndex) { - return evalIndex; - } - int size = children.size(); - // If we're doing partial evaluation, start where we left off - for (int i = isPartiallyEvaluating() ? evalIndex : 0; i < size; i+=2) { - // Short circuit if we see our stop value - if (i+1 == size) { - return i; - } - // If we get null, we have to re-evaluate from that point (special case this in filter, like is null) - // We may only run this when we're done/have all values - boolean evaluated = children.get(i+1).evaluate(tuple, ptr); - if (evaluated && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr))) { - if (isPartiallyEvaluating()) { - foundIndex = true; - } - return i; - } - if (isPartiallyEvaluating()) { - if (evaluated || tuple.isImmutable()) { - evalIndex+=2; - } else { - /* - * Return early here if incrementally evaluating and we don't - * have all the key values yet. We can't continue because we'd - * potentially be bypassing cases which we could later evaluate - * once we have more column values. - */ - return -1; - } - } - } - // No conditions matched, return size to indicate that we were able - // to evaluate all cases, but didn't find any matches. - return size; - } - - /** - * Only expression that currently uses the isPartial flag. The IS NULL - * expression will use it too. TODO: We could alternatively have a non interface - * method, like setIsPartial in which we set to false prior to calling - * evaluate. - */ - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - int index = evaluateIndexOf(tuple, ptr); - if (index < 0) { - return false; - } else if (index == children.size()) { - ptr.set(PDataType.NULL_BYTES); - return true; - } - if (children.get(index).evaluate(tuple, ptr)) { - return true; - } - return false; - } - - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); + public CaseExpression() { + } + + public static Expression create(List children) throws SQLException { + CaseExpression caseExpression = new CaseExpression(coerceIfNecessary(children)); + if (ExpressionUtil.isConstant(caseExpression)) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + int index = caseExpression.evaluateIndexOf(null, ptr); + if (index < 0) { + return LiteralExpression.newConstant(null, caseExpression.getDeterminism()); + } + return caseExpression.getChildren().get(index); + } + return caseExpression; + } + + private static List coerceIfNecessary(List children) throws SQLException { + boolean isChildTypeUnknown = false; + PDataType returnType = children.get(0).getDataType(); + for (int i = 2; i < children.size(); i += 2) { + Expression child = children.get(i); + PDataType childType = child.getDataType(); + if (childType == null) { + isChildTypeUnknown = true; + } else if (returnType == null) { + returnType = childType; + isChildTypeUnknown = true; + } else if (returnType == childType || childType.isCoercibleTo(returnType)) { + continue; + } else if (returnType.isCoercibleTo(childType)) { + returnType = childType; + } else { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) + .setMessage("Case expressions must have common type: " + returnType + + " cannot be coerced to " + childType) + .build().buildException(); + } + } + // If we found an "unknown" child type and the return type is a number + // make the return type be the most general number type of DECIMAL. + if (isChildTypeUnknown && returnType != null && returnType.isCoercibleTo(PDecimal.INSTANCE)) { + returnType = PDecimal.INSTANCE; + } + List newChildren = children; + for (int i = 0; i < children.size(); i += 2) { + Expression child = children.get(i); + PDataType childType = child.getDataType(); + if (childType != returnType) { + if (newChildren == children) { + newChildren = new ArrayList(children); } - return t; + newChildren.set(i, CoerceExpression.create(child, returnType)); + } } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder("CASE "); - for (int i = 0; i < children.size() - 1; i+=2) { - buf.append("WHEN "); - buf.append(children.get(i+1)); - buf.append(" THEN "); - buf.append(children.get(i)); + return newChildren; + } + + /** + * Construct CASE/WHEN expression + * @param children list of expressions in the form of: + * {@code ((, )+, []) } + */ + public CaseExpression(List children) { + super(children); + returnType = children.get(0).getDataType(); + } + + private boolean isPartiallyEvaluating() { + return evalIndex != FULLY_EVALUATE; + } + + public boolean hasElse() { + return children.size() % 2 != 0; + } + + @Override + public boolean isNullable() { + // If any expression is nullable or there's no else clause + // return true since null may be returned. + if (super.isNullable() || !hasElse()) { + return true; + } + return children.get(children.size() - 1).isNullable(); + } + + @Override + public PDataType getDataType() { + return returnType; + } + + @Override + public void reset() { + foundIndex = false; + evalIndex = 0; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + this.returnType = PDataType.values()[WritableUtils.readVInt(input)]; + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeVInt(output, this.returnType.ordinal()); + } + + public int evaluateIndexOf(Tuple tuple, ImmutableBytesWritable ptr) { + if (foundIndex) { + return evalIndex; + } + int size = children.size(); + // If we're doing partial evaluation, start where we left off + for (int i = isPartiallyEvaluating() ? evalIndex : 0; i < size; i += 2) { + // Short circuit if we see our stop value + if (i + 1 == size) { + return i; + } + // If we get null, we have to re-evaluate from that point (special case this in filter, like + // is null) + // We may only run this when we're done/have all values + boolean evaluated = children.get(i + 1).evaluate(tuple, ptr); + if (evaluated && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr))) { + if (isPartiallyEvaluating()) { + foundIndex = true; } - if (hasElse()) { - buf.append(" ELSE " + children.get(children.size()-1)); + return i; + } + if (isPartiallyEvaluating()) { + if (evaluated || tuple.isImmutable()) { + evalIndex += 2; + } else { + /* + * Return early here if incrementally evaluating and we don't have all the key values yet. + * We can't continue because we'd potentially be bypassing cases which we could later + * evaluate once we have more column values. + */ + return -1; } - buf.append(" END"); - return buf.toString(); - } - - @Override - public boolean requiresFinalEvaluation() { - return super.requiresFinalEvaluation() || this.hasElse(); + } } + // No conditions matched, return size to indicate that we were able + // to evaluate all cases, but didn't find any matches. + return size; + } + + /** + * Only expression that currently uses the isPartial flag. The IS NULL expression will use it too. + * TODO: We could alternatively have a non interface method, like setIsPartial in which we set to + * false prior to calling evaluate. + */ + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + int index = evaluateIndexOf(tuple, ptr); + if (index < 0) { + return false; + } else if (index == children.size()) { + ptr.set(PDataType.NULL_BYTES); + return true; + } + if (children.get(index).evaluate(tuple, ptr)) { + return true; + } + return false; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); + } + return t; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder("CASE "); + for (int i = 0; i < children.size() - 1; i += 2) { + buf.append("WHEN "); + buf.append(children.get(i + 1)); + buf.append(" THEN "); + buf.append(children.get(i)); + } + if (hasElse()) { + buf.append(" ELSE " + children.get(children.size() - 1)); + } + buf.append(" END"); + return buf.toString(); + } + + @Override + public boolean requiresFinalEvaluation() { + return super.requiresFinalEvaluation() || this.hasElse(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CoerceExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CoerceExpression.java index 548426cfef5..0e142e06cb8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CoerceExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CoerceExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,184 +29,186 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; - import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarbinaryEncoded; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; - public class CoerceExpression extends BaseSingleExpression { - private PDataType toType; - private SortOrder toSortOrder; - private Integer maxLength; - private boolean rowKeyOrderOptimizable; - - public CoerceExpression() { - } + private PDataType toType; + private SortOrder toSortOrder; + private Integer maxLength; + private boolean rowKeyOrderOptimizable; - public static Expression create(Expression expression, PDataType toType) throws SQLException { - if (toType == expression.getDataType()) { - return expression; - } - return new CoerceExpression(expression, toType); - } - - public static Expression create(Expression expression, PDataType toType, SortOrder toSortOrder, Integer maxLength) throws SQLException { - return create(expression, toType, toSortOrder, maxLength, true); - } - - public static Expression create(Expression expression, PDataType toType, SortOrder toSortOrder, Integer maxLength, boolean rowKeyOrderOptimizable) throws SQLException { - if ( toType == expression.getDataType() && - toSortOrder == expression.getSortOrder() && - (maxLength == null || maxLength.equals(expression.getMaxLength())) ) { - return expression; - } - return new CoerceExpression(expression, toType, toSortOrder, maxLength, rowKeyOrderOptimizable); - } - - //Package protected for tests - CoerceExpression(Expression expression, PDataType toType) { - this(expression, toType, expression.getSortOrder(), null, true); - } - - CoerceExpression(Expression expression, PDataType toType, SortOrder toSortOrder, Integer maxLength, boolean rowKeyOrderOptimizable) { - this(ImmutableList.of(expression), toType, toSortOrder, maxLength, rowKeyOrderOptimizable); - } + public CoerceExpression() { + } - public CoerceExpression(List children, PDataType toType, SortOrder toSortOrder, Integer maxLength, boolean rowKeyOrderOptimizable) { - super(children); - Preconditions.checkNotNull(toSortOrder); - this.toType = toType; - this.toSortOrder = toSortOrder; - this.maxLength = maxLength; - this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; - } - - public CoerceExpression clone(List children) { - return new CoerceExpression(children, this.getDataType(), this.getSortOrder(), this.getMaxLength(), this.rowKeyOrderOptimizable); - } - - @Override - public Integer getMaxLength() { - return maxLength; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((maxLength == null) ? 0 : maxLength.hashCode()); - result = prime * result + ((toSortOrder == null) ? 0 : toSortOrder.hashCode()); - result = prime * result + ((toType == null) ? 0 : toType.hashCode()); - return result; + public static Expression create(Expression expression, PDataType toType) throws SQLException { + if (toType == expression.getDataType()) { + return expression; } + return new CoerceExpression(expression, toType); + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!super.equals(obj)) return false; - if (getClass() != obj.getClass()) return false; - CoerceExpression other = (CoerceExpression)obj; - if (maxLength == null) { - if (other.maxLength != null) return false; - } else if (!maxLength.equals(other.maxLength)) return false; - if (toSortOrder != other.toSortOrder) return false; - if (toType == null) { - if (other.toType != null) return false; - } else if (!toType.equals(other.toType)) return false; - return rowKeyOrderOptimizable == other.rowKeyOrderOptimizable; - } + public static Expression create(Expression expression, PDataType toType, SortOrder toSortOrder, + Integer maxLength) throws SQLException { + return create(expression, toType, toSortOrder, maxLength, true); + } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - int ordinal = WritableUtils.readVInt(input); - rowKeyOrderOptimizable = false; - if (ordinal < 0) { - rowKeyOrderOptimizable = true; - ordinal = -(ordinal+1); - } - toType = PDataType.values()[ordinal]; - toSortOrder = SortOrder.fromSystemValue(WritableUtils.readVInt(input)); - int byteSize = WritableUtils.readVInt(input); - this.maxLength = byteSize == -1 ? null : byteSize; - } + public static Expression create(Expression expression, PDataType toType, SortOrder toSortOrder, + Integer maxLength, boolean rowKeyOrderOptimizable) throws SQLException { + if ( + toType == expression.getDataType() && toSortOrder == expression.getSortOrder() + && (maxLength == null || maxLength.equals(expression.getMaxLength())) + ) { + return expression; + } + return new CoerceExpression(expression, toType, toSortOrder, maxLength, rowKeyOrderOptimizable); + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - if (rowKeyOrderOptimizable) { - WritableUtils.writeVInt(output, -(toType.ordinal()+1)); - } else { - WritableUtils.writeVInt(output, toType.ordinal()); - } - WritableUtils.writeVInt(output, toSortOrder.getSystemValue()); - WritableUtils.writeVInt(output, maxLength == null ? -1 : maxLength); - } + // Package protected for tests + CoerceExpression(Expression expression, PDataType toType) { + this(expression, toType, expression.getSortOrder(), null, true); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // For CoerceExpression evaluation, lhs is coerced to rhs literal expression. However, - // in case of variable length binary literal expression, literal value by default - // gets VARBINARY data type. If lhs expression is of type VARBINARY_ENCODED, we should - // encode rhs literal value to VARBINARY_ENCODED type. This makes the eventual coerce - // evaluation successful. - if (getChild() instanceof LiteralExpression - && getChild().getDataType() == PVarbinary.INSTANCE - && getDataType() == PVarbinaryEncoded.INSTANCE) { - Expression expression; - try { - expression = - LiteralExpression.newConstant(((LiteralExpression) getChild()).getValue(), - PVarbinaryEncoded.INSTANCE); - } catch (SQLException e) { - throw new RuntimeException(e); - } - if (expression.evaluate(tuple, ptr)) { - getDataType().coerceBytes(ptr, null, expression.getDataType(), - expression.getMaxLength(), null, expression.getSortOrder(), maxLength, null, - getSortOrder(), rowKeyOrderOptimizable); - return true; - } - } else { - if (getChild().evaluate(tuple, ptr)) { - getDataType().coerceBytes(ptr, null, getChild().getDataType(), - getChild().getMaxLength(), null, getChild().getSortOrder(), maxLength, null, - getSortOrder(), rowKeyOrderOptimizable); - return true; - } - } - return false; - } + CoerceExpression(Expression expression, PDataType toType, SortOrder toSortOrder, + Integer maxLength, boolean rowKeyOrderOptimizable) { + this(ImmutableList.of(expression), toType, toSortOrder, maxLength, rowKeyOrderOptimizable); + } - @Override - public PDataType getDataType() { - return toType; - } - - @Override - public SortOrder getSortOrder() { - return toSortOrder; - } - - @Override - public T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder("TO_" + toType.toString() + "("); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + ", "); - } - buf.append(children.get(children.size()-1) + ")"); - return buf.toString(); - } + public CoerceExpression(List children, PDataType toType, SortOrder toSortOrder, + Integer maxLength, boolean rowKeyOrderOptimizable) { + super(children); + Preconditions.checkNotNull(toSortOrder); + this.toType = toType; + this.toSortOrder = toSortOrder; + this.maxLength = maxLength; + this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; + } + + public CoerceExpression clone(List children) { + return new CoerceExpression(children, this.getDataType(), this.getSortOrder(), + this.getMaxLength(), this.rowKeyOrderOptimizable); + } + + @Override + public Integer getMaxLength() { + return maxLength; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((maxLength == null) ? 0 : maxLength.hashCode()); + result = prime * result + ((toSortOrder == null) ? 0 : toSortOrder.hashCode()); + result = prime * result + ((toType == null) ? 0 : toType.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + CoerceExpression other = (CoerceExpression) obj; + if (maxLength == null) { + if (other.maxLength != null) return false; + } else if (!maxLength.equals(other.maxLength)) return false; + if (toSortOrder != other.toSortOrder) return false; + if (toType == null) { + if (other.toType != null) return false; + } else if (!toType.equals(other.toType)) return false; + return rowKeyOrderOptimizable == other.rowKeyOrderOptimizable; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + int ordinal = WritableUtils.readVInt(input); + rowKeyOrderOptimizable = false; + if (ordinal < 0) { + rowKeyOrderOptimizable = true; + ordinal = -(ordinal + 1); + } + toType = PDataType.values()[ordinal]; + toSortOrder = SortOrder.fromSystemValue(WritableUtils.readVInt(input)); + int byteSize = WritableUtils.readVInt(input); + this.maxLength = byteSize == -1 ? null : byteSize; + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + if (rowKeyOrderOptimizable) { + WritableUtils.writeVInt(output, -(toType.ordinal() + 1)); + } else { + WritableUtils.writeVInt(output, toType.ordinal()); + } + WritableUtils.writeVInt(output, toSortOrder.getSystemValue()); + WritableUtils.writeVInt(output, maxLength == null ? -1 : maxLength); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // For CoerceExpression evaluation, lhs is coerced to rhs literal expression. However, + // in case of variable length binary literal expression, literal value by default + // gets VARBINARY data type. If lhs expression is of type VARBINARY_ENCODED, we should + // encode rhs literal value to VARBINARY_ENCODED type. This makes the eventual coerce + // evaluation successful. + if ( + getChild() instanceof LiteralExpression && getChild().getDataType() == PVarbinary.INSTANCE + && getDataType() == PVarbinaryEncoded.INSTANCE + ) { + Expression expression; + try { + expression = LiteralExpression.newConstant(((LiteralExpression) getChild()).getValue(), + PVarbinaryEncoded.INSTANCE); + } catch (SQLException e) { + throw new RuntimeException(e); + } + if (expression.evaluate(tuple, ptr)) { + getDataType().coerceBytes(ptr, null, expression.getDataType(), expression.getMaxLength(), + null, expression.getSortOrder(), maxLength, null, getSortOrder(), rowKeyOrderOptimizable); + return true; + } + } else { + if (getChild().evaluate(tuple, ptr)) { + getDataType().coerceBytes(ptr, null, getChild().getDataType(), getChild().getMaxLength(), + null, getChild().getSortOrder(), maxLength, null, getSortOrder(), rowKeyOrderOptimizable); + return true; + } + } + return false; + } + + @Override + public PDataType getDataType() { + return toType; + } + + @Override + public SortOrder getSortOrder() { + return toSortOrder; + } + + @Override + public T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); + } + return t; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder("TO_" + toType.toString() + "("); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + ", "); + } + buf.append(children.get(children.size() - 1) + ")"); + return buf.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ColumnExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ColumnExpression.java index 2c9d3780a01..7e21945aa45 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ColumnExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ColumnExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,109 +22,108 @@ import java.io.IOException; import org.apache.hadoop.io.WritableUtils; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.PDatum; import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.schema.types.PDataType; /** - * * Common base class for column value accessors - * - * * @since 0.1 */ abstract public class ColumnExpression extends BaseTerminalExpression { - protected PDataType type; - private boolean isNullable; - private Integer maxLength; - private Integer scale; - private SortOrder sortOrder; + protected PDataType type; + private boolean isNullable; + private Integer maxLength; + private Integer scale; + private SortOrder sortOrder; - public ColumnExpression() { - } + public ColumnExpression() { + } - // TODO: review, as the hashCode() and equals() here seem unnecessary - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (isNullable() ? 1231 : 1237); - PDataType type = this.getDataType(); - result = prime * result + ((type == null) ? 0 : type.hashCode()); - return result; - } + // TODO: review, as the hashCode() and equals() here seem unnecessary + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (isNullable() ? 1231 : 1237); + PDataType type = this.getDataType(); + result = prime * result + ((type == null) ? 0 : type.hashCode()); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ColumnExpression other = (ColumnExpression)obj; - if (this.isNullable() != other.isNullable()) return false; - if (this.getDataType() != other.getDataType()) return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ColumnExpression other = (ColumnExpression) obj; + if (this.isNullable() != other.isNullable()) return false; + if (this.getDataType() != other.getDataType()) return false; + return true; + } - public ColumnExpression(PDatum datum) { - this.type = datum.getDataType(); - this.isNullable = datum.isNullable(); - this.maxLength = datum.getMaxLength(); - this.scale = datum.getScale(); - this.sortOrder = datum.getSortOrder(); - } + public ColumnExpression(PDatum datum) { + this.type = datum.getDataType(); + this.isNullable = datum.isNullable(); + this.maxLength = datum.getMaxLength(); + this.scale = datum.getScale(); + this.sortOrder = datum.getSortOrder(); + } - @Override - public boolean isNullable() { - return isNullable; - } - - @Override - public PDataType getDataType() { - return type; - } - - @Override - public SortOrder getSortOrder() { - return sortOrder; - } + @Override + public boolean isNullable() { + return isNullable; + } - @Override - public Integer getMaxLength() { - return maxLength; - } + @Override + public PDataType getDataType() { + return type; + } - @Override - public Integer getScale() { - return scale; - } + @Override + public SortOrder getSortOrder() { + return sortOrder; + } - @Override - public void readFields(DataInput input) throws IOException { - // read/write type ordinal, maxLength presence, scale presence and isNullable bit together to save space - int typeAndFlag = WritableUtils.readVInt(input); - isNullable = (typeAndFlag & 0x01) != 0; - if ((typeAndFlag & 0x02) != 0) { - scale = WritableUtils.readVInt(input); - } - if ((typeAndFlag & 0x04) != 0) { - maxLength = WritableUtils.readVInt(input); - } - type = PDataType.values()[typeAndFlag >>> 3]; - sortOrder = SortOrder.fromSystemValue(WritableUtils.readVInt(input)); + @Override + public Integer getMaxLength() { + return maxLength; + } + + @Override + public Integer getScale() { + return scale; + } + + @Override + public void readFields(DataInput input) throws IOException { + // read/write type ordinal, maxLength presence, scale presence and isNullable bit together to + // save space + int typeAndFlag = WritableUtils.readVInt(input); + isNullable = (typeAndFlag & 0x01) != 0; + if ((typeAndFlag & 0x02) != 0) { + scale = WritableUtils.readVInt(input); } + if ((typeAndFlag & 0x04) != 0) { + maxLength = WritableUtils.readVInt(input); + } + type = PDataType.values()[typeAndFlag >>> 3]; + sortOrder = SortOrder.fromSystemValue(WritableUtils.readVInt(input)); + } - @Override - public void write(DataOutput output) throws IOException { - // read/write type ordinal, maxLength presence, scale presence and isNullable bit together to save space - int typeAndFlag = (isNullable ? 1 : 0) | ((scale != null ? 1 : 0) << 1) | ((maxLength != null ? 1 : 0) << 2) - | (type.ordinal() << 3); - WritableUtils.writeVInt(output,typeAndFlag); - if (scale != null) { - WritableUtils.writeVInt(output, scale); - } - if (maxLength != null) { - WritableUtils.writeVInt(output, maxLength); - } - WritableUtils.writeVInt(output, sortOrder.getSystemValue()); + @Override + public void write(DataOutput output) throws IOException { + // read/write type ordinal, maxLength presence, scale presence and isNullable bit together to + // save space + int typeAndFlag = (isNullable ? 1 : 0) | ((scale != null ? 1 : 0) << 1) + | ((maxLength != null ? 1 : 0) << 2) | (type.ordinal() << 3); + WritableUtils.writeVInt(output, typeAndFlag); + if (scale != null) { + WritableUtils.writeVInt(output, scale); + } + if (maxLength != null) { + WritableUtils.writeVInt(output, maxLength); } + WritableUtils.writeVInt(output, sortOrder.getSystemValue()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java index 6879cc93569..2cc351018f3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ComparisonExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,13 @@ */ package org.apache.phoenix.expression; +import static org.apache.hadoop.hbase.CompareOperator.EQUAL; +import static org.apache.hadoop.hbase.CompareOperator.GREATER; +import static org.apache.hadoop.hbase.CompareOperator.GREATER_OR_EQUAL; +import static org.apache.hadoop.hbase.CompareOperator.LESS; +import static org.apache.hadoop.hbase.CompareOperator.LESS_OR_EQUAL; +import static org.apache.hadoop.hbase.CompareOperator.NOT_EQUAL; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -27,9 +34,9 @@ import java.util.List; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.phoenix.compile.WhereCompiler; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableUtils; +import org.apache.phoenix.compile.WhereCompiler; import org.apache.phoenix.expression.function.ArrayElemRefExpression; import org.apache.phoenix.expression.rewrite.RowValueConstructorExpressionRewriter; import org.apache.phoenix.expression.visitor.ExpressionVisitor; @@ -44,477 +51,497 @@ import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PUnsignedInt; import org.apache.phoenix.schema.types.PUnsignedLong; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ExpressionUtil; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.StringUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - -import static org.apache.hadoop.hbase.CompareOperator.EQUAL; -import static org.apache.hadoop.hbase.CompareOperator.GREATER; -import static org.apache.hadoop.hbase.CompareOperator.GREATER_OR_EQUAL; -import static org.apache.hadoop.hbase.CompareOperator.LESS; -import static org.apache.hadoop.hbase.CompareOperator.LESS_OR_EQUAL; -import static org.apache.hadoop.hbase.CompareOperator.NOT_EQUAL; - /** - * * Implementation for {@code <,<=,>,>=,=,!= } comparison expressions - * * @since 0.1 */ public class ComparisonExpression extends BaseCompoundExpression { - private CompareOperator op; - - private static void addEqualityExpression(Expression lhs, Expression rhs, List andNodes, ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) throws SQLException { - boolean isLHSNull = ExpressionUtil.isNull(lhs, ptr); - boolean isRHSNull = ExpressionUtil.isNull(rhs, ptr); - if (isLHSNull && isRHSNull) { // null == null will end up making the query degenerate - andNodes.add(LiteralExpression.newConstant(false, PBoolean.INSTANCE)); - } else if (isLHSNull) { // AND rhs IS NULL - andNodes.add(IsNullExpression.create(rhs, false, ptr)); - } else if (isRHSNull) { // AND lhs IS NULL - andNodes.add(IsNullExpression.create(lhs, false, ptr)); - } else { // AND lhs = rhs - andNodes.add(ComparisonExpression.create(CompareOperator.EQUAL, Arrays.asList(lhs, rhs), ptr, rowKeyOrderOptimizable)); - } - } - - /** - * Rewrites expressions of the form (a, b, c) = (1, 2) as a = 1 and b = 2 and c is null - * as this is equivalent and already optimized - * @param lhs - * @param rhs - * @param andNodes - * @throws SQLException - */ - private static void rewriteRVCAsEqualityExpression(Expression lhs, Expression rhs, List andNodes, ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) throws SQLException { - if (lhs instanceof RowValueConstructorExpression && rhs instanceof RowValueConstructorExpression) { - int i = 0; - for (; i < Math.min(lhs.getChildren().size(),rhs.getChildren().size()); i++) { - addEqualityExpression(lhs.getChildren().get(i), rhs.getChildren().get(i), andNodes, ptr, rowKeyOrderOptimizable); - } - for (; i < lhs.getChildren().size(); i++) { - addEqualityExpression(lhs.getChildren().get(i), LiteralExpression.newConstant(null, lhs.getChildren().get(i).getDataType()), andNodes, ptr, rowKeyOrderOptimizable); - } - for (; i < rhs.getChildren().size(); i++) { - addEqualityExpression(LiteralExpression.newConstant(null, rhs.getChildren().get(i).getDataType()), rhs.getChildren().get(i), andNodes, ptr, rowKeyOrderOptimizable); - } - } else if (lhs instanceof RowValueConstructorExpression) { - addEqualityExpression(lhs.getChildren().get(0), rhs, andNodes, ptr, rowKeyOrderOptimizable); - for (int i = 1; i < lhs.getChildren().size(); i++) { - addEqualityExpression(lhs.getChildren().get(i), LiteralExpression.newConstant(null, lhs.getChildren().get(i).getDataType()), andNodes, ptr, rowKeyOrderOptimizable); - } - } else if (rhs instanceof RowValueConstructorExpression) { - addEqualityExpression(lhs, rhs.getChildren().get(0), andNodes, ptr, rowKeyOrderOptimizable); - for (int i = 1; i < rhs.getChildren().size(); i++) { - addEqualityExpression(LiteralExpression.newConstant(null, rhs.getChildren().get(i).getDataType()), rhs.getChildren().get(i), andNodes, ptr, rowKeyOrderOptimizable); - } - } + private CompareOperator op; + + private static void addEqualityExpression(Expression lhs, Expression rhs, + List andNodes, ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) + throws SQLException { + boolean isLHSNull = ExpressionUtil.isNull(lhs, ptr); + boolean isRHSNull = ExpressionUtil.isNull(rhs, ptr); + if (isLHSNull && isRHSNull) { // null == null will end up making the query degenerate + andNodes.add(LiteralExpression.newConstant(false, PBoolean.INSTANCE)); + } else if (isLHSNull) { // AND rhs IS NULL + andNodes.add(IsNullExpression.create(rhs, false, ptr)); + } else if (isRHSNull) { // AND lhs IS NULL + andNodes.add(IsNullExpression.create(lhs, false, ptr)); + } else { // AND lhs = rhs + andNodes.add(ComparisonExpression.create(CompareOperator.EQUAL, Arrays.asList(lhs, rhs), ptr, + rowKeyOrderOptimizable)); } - - public static Expression create(CompareOperator op, List children, ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) throws SQLException { - Expression lhsExpr = children.get(0); - Expression rhsExpr = children.get(1); - PDataType lhsExprDataType = lhsExpr.getDataType(); - PDataType rhsExprDataType = rhsExpr.getDataType(); + } - if ((lhsExpr instanceof RowValueConstructorExpression || rhsExpr instanceof RowValueConstructorExpression) && !(lhsExpr instanceof ArrayElemRefExpression) && !(rhsExpr instanceof ArrayElemRefExpression)) { - if (op == CompareOperator.EQUAL || op == CompareOperator.NOT_EQUAL) { - List andNodes = Lists.newArrayListWithExpectedSize(Math.max(lhsExpr.getChildren().size(), rhsExpr.getChildren().size())); - rewriteRVCAsEqualityExpression(lhsExpr, rhsExpr, andNodes, ptr, rowKeyOrderOptimizable); - Expression expr = AndExpression.create(andNodes); - if (op == CompareOperator.NOT_EQUAL) { - expr = NotExpression.create(expr, ptr); - } - return expr; - } - rhsExpr = RowValueConstructorExpression.coerce(lhsExpr, rhsExpr, op, rowKeyOrderOptimizable); - // Always wrap both sides in row value constructor, so we don't have to consider comparing - // a non rvc with a rvc. - if ( ! ( lhsExpr instanceof RowValueConstructorExpression ) ) { - lhsExpr = new RowValueConstructorExpression(Collections.singletonList(lhsExpr), lhsExpr.isStateless()); - } + /** + * Rewrites expressions of the form (a, b, c) = (1, 2) as a = 1 and b = 2 and c is null as this is + * equivalent and already optimized + */ + private static void rewriteRVCAsEqualityExpression(Expression lhs, Expression rhs, + List andNodes, ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) + throws SQLException { + if ( + lhs instanceof RowValueConstructorExpression && rhs instanceof RowValueConstructorExpression + ) { + int i = 0; + for (; i < Math.min(lhs.getChildren().size(), rhs.getChildren().size()); i++) { + addEqualityExpression(lhs.getChildren().get(i), rhs.getChildren().get(i), andNodes, ptr, + rowKeyOrderOptimizable); + } + for (; i < lhs.getChildren().size(); i++) { + addEqualityExpression(lhs.getChildren().get(i), + LiteralExpression.newConstant(null, lhs.getChildren().get(i).getDataType()), andNodes, + ptr, rowKeyOrderOptimizable); + } + for (; i < rhs.getChildren().size(); i++) { + addEqualityExpression( + LiteralExpression.newConstant(null, rhs.getChildren().get(i).getDataType()), + rhs.getChildren().get(i), andNodes, ptr, rowKeyOrderOptimizable); + } + } else if (lhs instanceof RowValueConstructorExpression) { + addEqualityExpression(lhs.getChildren().get(0), rhs, andNodes, ptr, rowKeyOrderOptimizable); + for (int i = 1; i < lhs.getChildren().size(); i++) { + addEqualityExpression(lhs.getChildren().get(i), + LiteralExpression.newConstant(null, lhs.getChildren().get(i).getDataType()), andNodes, + ptr, rowKeyOrderOptimizable); + } + } else if (rhs instanceof RowValueConstructorExpression) { + addEqualityExpression(lhs, rhs.getChildren().get(0), andNodes, ptr, rowKeyOrderOptimizable); + for (int i = 1; i < rhs.getChildren().size(); i++) { + addEqualityExpression( + LiteralExpression.newConstant(null, rhs.getChildren().get(i).getDataType()), + rhs.getChildren().get(i), andNodes, ptr, rowKeyOrderOptimizable); + } + } + } - /* - At this point both sides should be in the same row format. - We add the inverts so the filtering can be done properly for mixed sort type RVCs. - The entire RVC has to be in ASC for the actual compare to work since compare simply does - a varbyte compare. See PHOENIX-4841 - */ - RowValueConstructorExpressionRewriter rvcRewriter = - RowValueConstructorExpressionRewriter.getSingleton(); - lhsExpr = rvcRewriter.rewriteAllChildrenAsc((RowValueConstructorExpression) lhsExpr); - rhsExpr = rvcRewriter.rewriteAllChildrenAsc((RowValueConstructorExpression) rhsExpr); + public static Expression create(CompareOperator op, List children, + ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) throws SQLException { + Expression lhsExpr = children.get(0); + Expression rhsExpr = children.get(1); + PDataType lhsExprDataType = lhsExpr.getDataType(); + PDataType rhsExprDataType = rhsExpr.getDataType(); - children = Arrays.asList(lhsExpr, rhsExpr); - } else if(lhsExprDataType != null && rhsExprDataType != null && !lhsExprDataType.isComparableTo(rhsExprDataType)) { - throw TypeMismatchException.newException(lhsExprDataType, rhsExprDataType, - toString(op, children)); + if ( + (lhsExpr instanceof RowValueConstructorExpression + || rhsExpr instanceof RowValueConstructorExpression) + && !(lhsExpr instanceof ArrayElemRefExpression) + && !(rhsExpr instanceof ArrayElemRefExpression) + ) { + if (op == CompareOperator.EQUAL || op == CompareOperator.NOT_EQUAL) { + List andNodes = Lists. newArrayListWithExpectedSize( + Math.max(lhsExpr.getChildren().size(), rhsExpr.getChildren().size())); + rewriteRVCAsEqualityExpression(lhsExpr, rhsExpr, andNodes, ptr, rowKeyOrderOptimizable); + Expression expr = AndExpression.create(andNodes); + if (op == CompareOperator.NOT_EQUAL) { + expr = NotExpression.create(expr, ptr); } - Determinism determinism = lhsExpr.getDeterminism().combine(rhsExpr.getDeterminism()); - - Object lhsValue = null; - // Can't use lhsNode.isConstant(), because we have cases in which we don't know - // in advance if a function evaluates to null (namely when bind variables are used) - // TODO: use lhsExpr.isStateless instead - if (lhsExpr instanceof LiteralExpression) { - lhsValue = ((LiteralExpression)lhsExpr).getValue(); - if (lhsValue == null) { - return LiteralExpression.newConstant(null, PBoolean.INSTANCE, lhsExpr.getDeterminism()); - } - } - Object rhsValue = null; - // TODO: use lhsExpr.isStateless instead - if (rhsExpr instanceof LiteralExpression) { - rhsValue = ((LiteralExpression)rhsExpr).getValue(); - if (rhsValue == null) { - return LiteralExpression.newConstant(null, PBoolean.INSTANCE, rhsExpr.getDeterminism()); + return expr; + } + rhsExpr = RowValueConstructorExpression.coerce(lhsExpr, rhsExpr, op, rowKeyOrderOptimizable); + // Always wrap both sides in row value constructor, so we don't have to consider comparing + // a non rvc with a rvc. + if (!(lhsExpr instanceof RowValueConstructorExpression)) { + lhsExpr = new RowValueConstructorExpression(Collections.singletonList(lhsExpr), + lhsExpr.isStateless()); + } + + /* + * At this point both sides should be in the same row format. We add the inverts so the + * filtering can be done properly for mixed sort type RVCs. The entire RVC has to be in ASC + * for the actual compare to work since compare simply does a varbyte compare. See + * PHOENIX-4841 + */ + RowValueConstructorExpressionRewriter rvcRewriter = + RowValueConstructorExpressionRewriter.getSingleton(); + lhsExpr = rvcRewriter.rewriteAllChildrenAsc((RowValueConstructorExpression) lhsExpr); + rhsExpr = rvcRewriter.rewriteAllChildrenAsc((RowValueConstructorExpression) rhsExpr); + + children = Arrays.asList(lhsExpr, rhsExpr); + } else if ( + lhsExprDataType != null && rhsExprDataType != null + && !lhsExprDataType.isComparableTo(rhsExprDataType) + ) { + throw TypeMismatchException.newException(lhsExprDataType, rhsExprDataType, + toString(op, children)); + } + Determinism determinism = lhsExpr.getDeterminism().combine(rhsExpr.getDeterminism()); + + Object lhsValue = null; + // Can't use lhsNode.isConstant(), because we have cases in which we don't know + // in advance if a function evaluates to null (namely when bind variables are used) + // TODO: use lhsExpr.isStateless instead + if (lhsExpr instanceof LiteralExpression) { + lhsValue = ((LiteralExpression) lhsExpr).getValue(); + if (lhsValue == null) { + return LiteralExpression.newConstant(null, PBoolean.INSTANCE, lhsExpr.getDeterminism()); + } + } + Object rhsValue = null; + // TODO: use lhsExpr.isStateless instead + if (rhsExpr instanceof LiteralExpression) { + rhsValue = ((LiteralExpression) rhsExpr).getValue(); + if (rhsValue == null) { + return LiteralExpression.newConstant(null, PBoolean.INSTANCE, rhsExpr.getDeterminism()); + } + } + if (lhsValue != null && rhsValue != null) { + return LiteralExpression.newConstant( + ByteUtil.compare(op, lhsExprDataType.compareTo(lhsValue, rhsValue, rhsExprDataType)), + determinism); + } + // Coerce constant to match type of lhs so that we don't need to + // convert at filter time. Since we normalize the select statement + // to put constants on the LHS, we don't need to check the RHS. + if (rhsValue != null) { + // Comparing an unsigned int/long against a negative int/long would be an example. We just + // need to take + // into account the comparison operator. + if ( + rhsExprDataType != lhsExprDataType || rhsExpr.getSortOrder() != lhsExpr.getSortOrder() + || (rhsExprDataType.isFixedWidth() && rhsExpr.getMaxLength() != null + && lhsExprDataType.isFixedWidth() && lhsExpr.getMaxLength() != null + && rhsExpr.getMaxLength() < lhsExpr.getMaxLength()) + ) { + // TODO: if lengths are unequal and fixed width? + if (rhsExprDataType.isCoercibleTo(lhsExprDataType, rhsValue)) { // will convert 2.0 -> 2 + children = Arrays.asList(children.get(0), + LiteralExpression.newConstant(rhsValue, lhsExprDataType, lhsExpr.getMaxLength(), null, + lhsExpr.getSortOrder(), determinism, rowKeyOrderOptimizable)); + } else if (op == CompareOperator.EQUAL) { + return LiteralExpression.newConstant(false, PBoolean.INSTANCE, Determinism.ALWAYS); + } else if (op == CompareOperator.NOT_EQUAL) { + return LiteralExpression.newConstant(true, PBoolean.INSTANCE, Determinism.ALWAYS); + } else { // TODO: generalize this with PDataType.getMinValue(), PDataTypeType.getMaxValue() + // methods + if (rhsExprDataType == PDecimal.INSTANCE) { + /* + * We're comparing an int/long to a constant decimal with a fraction part. We need the + * types to match in case this is used to form a key. To form the start/stop key, we + * need to adjust the decimal by truncating it or taking its ceiling, depending on the + * comparison operator, to get a whole number. + */ + int increment = 0; + switch (op) { + case GREATER_OR_EQUAL: + case LESS: // get next whole number + increment = 1; + default: // Else, we truncate the value + BigDecimal bd = (BigDecimal) rhsValue; + rhsValue = bd.longValue() + increment; + children = Arrays.asList(lhsExpr, LiteralExpression.newConstant(rhsValue, + lhsExprDataType, lhsExpr.getSortOrder(), rhsExpr.getDeterminism())); + break; } - } - if (lhsValue != null && rhsValue != null) { - return LiteralExpression.newConstant(ByteUtil.compare(op,lhsExprDataType.compareTo(lhsValue, rhsValue, rhsExprDataType)), determinism); - } - // Coerce constant to match type of lhs so that we don't need to - // convert at filter time. Since we normalize the select statement - // to put constants on the LHS, we don't need to check the RHS. - if (rhsValue != null) { - // Comparing an unsigned int/long against a negative int/long would be an example. We just need to take - // into account the comparison operator. - if (rhsExprDataType != lhsExprDataType - || rhsExpr.getSortOrder() != lhsExpr.getSortOrder() - || (rhsExprDataType.isFixedWidth() && rhsExpr.getMaxLength() != null && - lhsExprDataType.isFixedWidth() && lhsExpr.getMaxLength() != null && - rhsExpr.getMaxLength() < lhsExpr.getMaxLength())) { - // TODO: if lengths are unequal and fixed width? - if (rhsExprDataType.isCoercibleTo(lhsExprDataType, rhsValue)) { // will convert 2.0 -> 2 - children = Arrays.asList(children.get(0), LiteralExpression.newConstant(rhsValue, lhsExprDataType, - lhsExpr.getMaxLength(), null, lhsExpr.getSortOrder(), determinism, rowKeyOrderOptimizable)); - } else if (op == CompareOperator.EQUAL) { - return LiteralExpression.newConstant(false, PBoolean.INSTANCE, Determinism.ALWAYS); - } else if (op == CompareOperator.NOT_EQUAL) { - return LiteralExpression.newConstant(true, PBoolean.INSTANCE, Determinism.ALWAYS); - } else { // TODO: generalize this with PDataType.getMinValue(), PDataTypeType.getMaxValue() methods - if (rhsExprDataType == PDecimal.INSTANCE) { - /* - * We're comparing an int/long to a constant decimal with a fraction part. - * We need the types to match in case this is used to form a key. To form the start/stop key, - * we need to adjust the decimal by truncating it or taking its ceiling, depending on the comparison - * operator, to get a whole number. - */ - int increment = 0; - switch (op) { - case GREATER_OR_EQUAL: - case LESS: // get next whole number - increment = 1; - default: // Else, we truncate the value - BigDecimal bd = (BigDecimal)rhsValue; - rhsValue = bd.longValue() + increment; - children = Arrays.asList(lhsExpr, LiteralExpression.newConstant(rhsValue, lhsExprDataType, lhsExpr.getSortOrder(), rhsExpr.getDeterminism())); - break; - } - } else if (rhsExprDataType == PLong.INSTANCE) { - /* - * We are comparing an int, unsigned_int to a long, or an unsigned_long to a negative long. - * int has range of -2147483648 to 2147483647, and unsigned_int has a value range of 0 to 4294967295. - * - * If lhs is int or unsigned_int, since we already determined that we cannot coerce the rhs - * to become the lhs, we know the value on the rhs is greater than lhs if it's positive, or smaller than - * lhs if it's negative. - * - * If lhs is an unsigned_long, then we know the rhs is definitely a negative long. rhs in this case - * will always be bigger than rhs. - */ - if (lhsExprDataType == PInteger.INSTANCE || - lhsExprDataType == PUnsignedInt.INSTANCE) { - switch (op) { - case LESS: - case LESS_OR_EQUAL: - if ((Long)rhsValue > 0) { - return LiteralExpression.newConstant(true, PBoolean.INSTANCE, determinism); - } else { - return LiteralExpression.newConstant(false, PBoolean.INSTANCE, determinism); - } - case GREATER: - case GREATER_OR_EQUAL: - if ((Long)rhsValue > 0) { - return LiteralExpression.newConstant(false, PBoolean.INSTANCE, determinism); - } else { - return LiteralExpression.newConstant(true, PBoolean.INSTANCE, determinism); - } - default: - break; - } - } else if (lhsExprDataType == PUnsignedLong.INSTANCE) { - switch (op) { - case LESS: - case LESS_OR_EQUAL: - return LiteralExpression.newConstant(false, PBoolean.INSTANCE, determinism); - case GREATER: - case GREATER_OR_EQUAL: - return LiteralExpression.newConstant(true, PBoolean.INSTANCE, determinism); - default: - break; - } - } - children = Arrays.asList(lhsExpr, LiteralExpression.newConstant(rhsValue, rhsExprDataType, lhsExpr.getSortOrder(), determinism)); + } else if (rhsExprDataType == PLong.INSTANCE) { + /* + * We are comparing an int, unsigned_int to a long, or an unsigned_long to a negative + * long. int has range of -2147483648 to 2147483647, and unsigned_int has a value range + * of 0 to 4294967295. If lhs is int or unsigned_int, since we already determined that + * we cannot coerce the rhs to become the lhs, we know the value on the rhs is greater + * than lhs if it's positive, or smaller than lhs if it's negative. If lhs is an + * unsigned_long, then we know the rhs is definitely a negative long. rhs in this case + * will always be bigger than rhs. + */ + if (lhsExprDataType == PInteger.INSTANCE || lhsExprDataType == PUnsignedInt.INSTANCE) { + switch (op) { + case LESS: + case LESS_OR_EQUAL: + if ((Long) rhsValue > 0) { + return LiteralExpression.newConstant(true, PBoolean.INSTANCE, determinism); + } else { + return LiteralExpression.newConstant(false, PBoolean.INSTANCE, determinism); } - } - } - - // Determine if we know the expression must be TRUE or FALSE based on the max size of - // a fixed length expression. - if (children.get(1).getMaxLength() != null && lhsExpr.getMaxLength() != null && lhsExpr.getMaxLength() < children.get(1).getMaxLength()) { - switch (op) { - case EQUAL: + case GREATER: + case GREATER_OR_EQUAL: + if ((Long) rhsValue > 0) { return LiteralExpression.newConstant(false, PBoolean.INSTANCE, determinism); - case NOT_EQUAL: + } else { return LiteralExpression.newConstant(true, PBoolean.INSTANCE, determinism); + } + default: + break; + } + } else if (lhsExprDataType == PUnsignedLong.INSTANCE) { + switch (op) { + case LESS: + case LESS_OR_EQUAL: + return LiteralExpression.newConstant(false, PBoolean.INSTANCE, determinism); + case GREATER: + case GREATER_OR_EQUAL: + return LiteralExpression.newConstant(true, PBoolean.INSTANCE, determinism); default: - break; - } + break; + } } + children = Arrays.asList(lhsExpr, LiteralExpression.newConstant(rhsValue, + rhsExprDataType, lhsExpr.getSortOrder(), determinism)); + } + } + } + + // Determine if we know the expression must be TRUE or FALSE based on the max size of + // a fixed length expression. + if ( + children.get(1).getMaxLength() != null && lhsExpr.getMaxLength() != null + && lhsExpr.getMaxLength() < children.get(1).getMaxLength() + ) { + switch (op) { + case EQUAL: + return LiteralExpression.newConstant(false, PBoolean.INSTANCE, determinism); + case NOT_EQUAL: + return LiteralExpression.newConstant(true, PBoolean.INSTANCE, determinism); + default: + break; } - return new ComparisonExpression(children, op); + } } - - public ComparisonExpression() { + return new ComparisonExpression(children, op); + } + + public ComparisonExpression() { + } + + public ComparisonExpression(List children, CompareOperator op) { + super(children); + if (op == null) { + throw new NullPointerException(); } + this.op = op; + } - public ComparisonExpression(List children, CompareOperator op) { - super(children); - if (op == null) { - throw new NullPointerException(); - } - this.op = op; + public ComparisonExpression clone(List children) { + return new ComparisonExpression(children, this.getFilterOp()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + op.hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + ComparisonExpression other = (ComparisonExpression) obj; + if (op != other.op) return false; + return true; + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!children.get(0).evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { // null comparison evals to null + return true; + } + byte[] lhsBytes = ptr.get(); + int lhsOffset = ptr.getOffset(); + int lhsLength = ptr.getLength(); + PDataType lhsDataType = children.get(0).getDataType(); + SortOrder lhsSortOrder = children.get(0).getSortOrder(); + + if (!children.get(1).evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { // null comparison evals to null + return true; } - public ComparisonExpression clone(List children) { - return new ComparisonExpression(children, this.getFilterOp()); + byte[] rhsBytes = ptr.get(); + int rhsOffset = ptr.getOffset(); + int rhsLength = ptr.getLength(); + PDataType rhsDataType = children.get(1).getDataType(); + SortOrder rhsSortOrder = children.get(1).getSortOrder(); + if (rhsDataType == PChar.INSTANCE) { + rhsLength = StringUtil.getUnpaddedCharLength(rhsBytes, rhsOffset, rhsLength, rhsSortOrder); } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + op.hashCode(); - return result; + if (lhsDataType == PChar.INSTANCE) { + lhsLength = StringUtil.getUnpaddedCharLength(lhsBytes, lhsOffset, lhsLength, lhsSortOrder); } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!super.equals(obj)) return false; - if (getClass() != obj.getClass()) return false; - ComparisonExpression other = (ComparisonExpression)obj; - if (op != other.op) return false; - return true; + int comparisonResult = lhsDataType.compareTo(lhsBytes, lhsOffset, lhsLength, lhsSortOrder, + rhsBytes, rhsOffset, rhsLength, rhsSortOrder, rhsDataType); + ptr.set(ByteUtil.compare(op, comparisonResult) ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); + return true; + } + + @Override + public boolean contains(Expression other) { + if (!(other instanceof ComparisonExpression || other instanceof IsNullExpression)) { + return false; + } + if (other instanceof IsNullExpression) { + return !((IsNullExpression) other).isNegate(); } - @Override - public PDataType getDataType() { - return PBoolean.INSTANCE; + BaseTerminalExpression lhsA = + WhereCompiler.getBaseTerminalExpression(this.getChildren().get(0)); + BaseTerminalExpression lhsB = + WhereCompiler.getBaseTerminalExpression(other.getChildren().get(0)); + if (!lhsA.equals(lhsB)) { + return false; + } + CompareOperator opA = this.getFilterOp(); + CompareOperator opB = ((ComparisonExpression) other).getFilterOp(); + BaseTerminalExpression rhs = WhereCompiler.getBaseTerminalExpression(this.getChildren().get(1)); + if (rhs instanceof ColumnExpression) { + BaseTerminalExpression rhsB = + WhereCompiler.getBaseTerminalExpression(other.getChildren().get(1)); + if (!rhs.equals(rhsB)) { + return false; + } + switch (opA) { + case LESS_OR_EQUAL: + if (opB == LESS || opB == LESS_OR_EQUAL || opB == EQUAL) { + return true; + } + return false; + case LESS: + case EQUAL: + case NOT_EQUAL: + case GREATER: + if (opA == opB) { + return true; + } + return false; + case GREATER_OR_EQUAL: + if (opB == GREATER || opB == GREATER_OR_EQUAL || opB == EQUAL) { + return true; + } + return false; + default: + throw new IllegalArgumentException("Unexpected CompareOp " + opA); + } } + LiteralExpression rhsA = WhereCompiler.getLiteralExpression(this.getChildren().get(1)); + LiteralExpression rhsB = WhereCompiler.getLiteralExpression(other.getChildren().get(1)); + Object valA = rhsA.getValue(); + Object valB = rhsB.getValue(); - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!children.get(0).evaluate(tuple, ptr)) { - return false; + PDataType typeA = rhsA.getDataType(); + PDataType typeB = rhsB.getDataType(); + switch (opA) { + case LESS: + if (opB == GREATER_OR_EQUAL || opB == GREATER || opB == NOT_EQUAL) { + return false; + } + if (opB == LESS) { + if (typeA.compareTo(valA, valB, typeB) >= 0) { + return true; + } + return false; } - if (ptr.getLength() == 0) { // null comparison evals to null + if (opB == LESS_OR_EQUAL || opB == EQUAL) { + if (typeA.compareTo(valA, valB, typeB) > 0) { return true; + } + return false; } - byte[] lhsBytes = ptr.get(); - int lhsOffset = ptr.getOffset(); - int lhsLength = ptr.getLength(); - PDataType lhsDataType = children.get(0).getDataType(); - SortOrder lhsSortOrder = children.get(0).getSortOrder(); - - if (!children.get(1).evaluate(tuple, ptr)) { - return false; + return false; + case LESS_OR_EQUAL: + if (opB == GREATER_OR_EQUAL || opB == GREATER || opB == NOT_EQUAL) { + return false; } - if (ptr.getLength() == 0) { // null comparison evals to null + if (opB == LESS_OR_EQUAL || opB == LESS || opB == EQUAL) { + if (typeA.compareTo(valA, valB, typeB) >= 0) { return true; + } + return false; } - - byte[] rhsBytes = ptr.get(); - int rhsOffset = ptr.getOffset(); - int rhsLength = ptr.getLength(); - PDataType rhsDataType = children.get(1).getDataType(); - SortOrder rhsSortOrder = children.get(1).getSortOrder(); - if (rhsDataType == PChar.INSTANCE) { - rhsLength = StringUtil.getUnpaddedCharLength(rhsBytes, rhsOffset, rhsLength, rhsSortOrder); + return false; + case EQUAL: + case NOT_EQUAL: + if (opA != opB) { + return false; } - if (lhsDataType == PChar.INSTANCE) { - lhsLength = StringUtil.getUnpaddedCharLength(lhsBytes, lhsOffset, lhsLength, lhsSortOrder); + if (typeA.compareTo(valA, valB, typeB) == 0) { + return true; } - - - int comparisonResult = lhsDataType.compareTo(lhsBytes, lhsOffset, lhsLength, lhsSortOrder, - rhsBytes, rhsOffset, rhsLength, rhsSortOrder, rhsDataType); - ptr.set(ByteUtil.compare(op, comparisonResult) ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); - return true; - } - - @Override - public boolean contains(Expression other) { - if (!(other instanceof ComparisonExpression || other instanceof IsNullExpression)) { - return false; + return false; + case GREATER_OR_EQUAL: + if (opB == LESS_OR_EQUAL || opB == LESS || opB == NOT_EQUAL) { + return false; } - if (other instanceof IsNullExpression) { - return !((IsNullExpression) other).isNegate(); + if (opB == GREATER_OR_EQUAL || opB == GREATER || opB == EQUAL) { + if (typeA.compareTo(valA, valB, typeB) <= 0) { + return true; + } + return false; } - - BaseTerminalExpression lhsA = - WhereCompiler.getBaseTerminalExpression(this.getChildren().get(0)); - BaseTerminalExpression lhsB = - WhereCompiler.getBaseTerminalExpression(other.getChildren().get(0)); - if (!lhsA.equals(lhsB)) { - return false; + return false; + case GREATER: + if (opB == LESS_OR_EQUAL || opB == LESS || opB == NOT_EQUAL) { + return false; } - CompareOperator opA = this.getFilterOp(); - CompareOperator opB = ((ComparisonExpression) other).getFilterOp(); - BaseTerminalExpression rhs = WhereCompiler.getBaseTerminalExpression( - this.getChildren().get(1)); - if (rhs instanceof ColumnExpression) { - BaseTerminalExpression rhsB = WhereCompiler.getBaseTerminalExpression( - other.getChildren().get(1)); - if (!rhs.equals(rhsB)) { - return false; - } - switch (opA) { - case LESS_OR_EQUAL: - if (opB == LESS || opB == LESS_OR_EQUAL || opB == EQUAL) { - return true; - } - return false; - case LESS: - case EQUAL: - case NOT_EQUAL: - case GREATER: - if (opA == opB) { - return true; - } - return false; - case GREATER_OR_EQUAL: - if (opB == GREATER || opB == GREATER_OR_EQUAL || opB == EQUAL) { - return true; - } - return false; - default: - throw new IllegalArgumentException("Unexpected CompareOp " + opA); - } + if (opB == GREATER) { + if (typeA.compareTo(valA, valB, typeB) <= 0) { + return true; + } + return false; } - LiteralExpression rhsA = WhereCompiler.getLiteralExpression(this.getChildren().get(1)); - LiteralExpression rhsB = WhereCompiler.getLiteralExpression(other.getChildren().get(1)); - Object valA = rhsA.getValue(); - Object valB = rhsB.getValue(); - - PDataType typeA = rhsA.getDataType(); - PDataType typeB = rhsB.getDataType(); - switch (opA){ - case LESS: - if (opB == GREATER_OR_EQUAL || opB == GREATER || opB == NOT_EQUAL) { - return false; - } - if (opB == LESS) { - if (typeA.compareTo(valA, valB, typeB) >= 0) { - return true; - } - return false; - } - if (opB == LESS_OR_EQUAL || opB == EQUAL) { - if (typeA.compareTo(valA, valB, typeB) > 0) { - return true; - } - return false; - } - return false; - case LESS_OR_EQUAL: - if (opB == GREATER_OR_EQUAL || opB == GREATER || opB ==NOT_EQUAL) { - return false; - } - if (opB == LESS_OR_EQUAL || opB == LESS || opB == EQUAL) { - if (typeA.compareTo(valA, valB, typeB) >= 0) { - return true; - } - return false; - } - return false; - case EQUAL: - case NOT_EQUAL: - if (opA != opB) { - return false; - } - if (typeA.compareTo(valA, valB, typeB) == 0) { - return true; - } - return false; - case GREATER_OR_EQUAL: - if (opB == LESS_OR_EQUAL || opB == LESS || opB ==NOT_EQUAL) { - return false; - } - if (opB == GREATER_OR_EQUAL || opB == GREATER || opB == EQUAL) { - if (typeA.compareTo(valA, valB, typeB) <= 0) { - return true; - } - return false; - } - return false; - case GREATER: - if (opB == LESS_OR_EQUAL || opB == LESS || opB ==NOT_EQUAL) { - return false; - } - if (opB == GREATER) { - if (typeA.compareTo(valA, valB, typeB) <= 0) { - return true; - } - return false; - } - if (opB == GREATER_OR_EQUAL || opB == EQUAL) { - if (typeA.compareTo(valA, valB, typeB) < 0) { - return true; - } - return false; - } - return false; - default: - throw new IllegalArgumentException("Unexpected CompareOp of " + opA); + if (opB == GREATER_OR_EQUAL || opB == EQUAL) { + if (typeA.compareTo(valA, valB, typeB) < 0) { + return true; + } + return false; } + return false; + default: + throw new IllegalArgumentException("Unexpected CompareOp of " + opA); } - @Override - public void readFields(DataInput input) throws IOException { - op = CompareOperator.values()[WritableUtils.readVInt(input)]; - super.readFields(input); - } + } - @Override - public void write(DataOutput output) throws IOException { - WritableUtils.writeVInt(output, op.ordinal()); - super.write(output); - } + @Override + public void readFields(DataInput input) throws IOException { + op = CompareOperator.values()[WritableUtils.readVInt(input)]; + super.readFields(input); + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; - } + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeVInt(output, op.ordinal()); + super.write(output); + } - public CompareOperator getFilterOp() { - return op; + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } - - public static String toString(CompareOperator op, List children) { - return (children.get(0) + " " + QueryUtil.toSQL(op) + " " + children.get(1)); - } - - @Override - public String toString() { - return toString(getFilterOp(), children); - } + return t; + } + + public CompareOperator getFilterOp() { + return op; + } + + public static String toString(CompareOperator op, List children) { + return (children.get(0) + " " + QueryUtil.toSQL(op) + " " + children.get(1)); + } + + @Override + public String toString() { + return toString(getFilterOp(), children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CorrelateVariableFieldAccessExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CorrelateVariableFieldAccessExpression.java index 7ba43c72294..8fece13f65b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CorrelateVariableFieldAccessExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CorrelateVariableFieldAccessExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,48 +28,48 @@ import org.apache.phoenix.schema.types.PDataType; public class CorrelateVariableFieldAccessExpression extends BaseTerminalExpression { - private final RuntimeContext runtimeContext; - private final String variableId; - private final Expression fieldAccessExpression; - - public CorrelateVariableFieldAccessExpression(RuntimeContext context, String variableId, Expression fieldAccessExpression) { - super(); - this.runtimeContext = context; - this.variableId = variableId; - this.fieldAccessExpression = fieldAccessExpression; - } + private final RuntimeContext runtimeContext; + private final String variableId; + private final Expression fieldAccessExpression; - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Tuple variable = runtimeContext.getCorrelateVariableValue(variableId); - if (variable == null) - throw new RuntimeException("Variable '" + variableId + "' not set."); - - return fieldAccessExpression.evaluate(variable, ptr); - } + public CorrelateVariableFieldAccessExpression(RuntimeContext context, String variableId, + Expression fieldAccessExpression) { + super(); + this.runtimeContext = context; + this.variableId = variableId; + this.fieldAccessExpression = fieldAccessExpression; + } - @Override - public T accept(ExpressionVisitor visitor) { - return visitor.visit(this); - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Tuple variable = runtimeContext.getCorrelateVariableValue(variableId); + if (variable == null) throw new RuntimeException("Variable '" + variableId + "' not set."); - @Override - public void write(DataOutput output) throws IOException { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean success = evaluate(null, ptr); - Object value = success ? getDataType().toObject(ptr) : null; - try { - LiteralExpression expr = LiteralExpression.newConstant(value, getDataType()); - expr.write(output); - } catch (SQLException e) { - throw new IOException(e); - } - } - - @SuppressWarnings("rawtypes") - @Override - public PDataType getDataType() { - return this.fieldAccessExpression.getDataType(); + return fieldAccessExpression.evaluate(variable, ptr); + } + + @Override + public T accept(ExpressionVisitor visitor) { + return visitor.visit(this); + } + + @Override + public void write(DataOutput output) throws IOException { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean success = evaluate(null, ptr); + Object value = success ? getDataType().toObject(ptr) : null; + try { + LiteralExpression expr = LiteralExpression.newConstant(value, getDataType()); + expr.write(output); + } catch (SQLException e) { + throw new IOException(e); } + } + + @SuppressWarnings("rawtypes") + @Override + public PDataType getDataType() { + return this.fieldAccessExpression.getDataType(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CurrentDateTimeFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CurrentDateTimeFunction.java index 0b5faa2f534..131ea35ac41 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CurrentDateTimeFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/CurrentDateTimeFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,20 +23,20 @@ public abstract class CurrentDateTimeFunction extends ScalarFunction { - public CurrentDateTimeFunction() { - } + public CurrentDateTimeFunction() { + } - public CurrentDateTimeFunction(List children) { - super(children); - } - - @Override - public boolean isStateless() { - return true; - } + public CurrentDateTimeFunction(List children) { + super(children); + } - @Override - public Determinism getDeterminism() { - return Determinism.PER_STATEMENT; - } + @Override + public boolean isStateless() { + return true; + } + + @Override + public Determinism getDeterminism() { + return Determinism.PER_STATEMENT; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DateAddExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DateAddExpression.java index e9ffe90a00c..a7f1209c759 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DateAddExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DateAddExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,57 +30,58 @@ import org.apache.phoenix.schema.types.PDouble; import org.apache.phoenix.schema.types.PLong; - public class DateAddExpression extends AddExpression { - static private final BigDecimal BD_MILLIS_IN_DAY = BigDecimal.valueOf(QueryConstants.MILLIS_IN_DAY); - - public DateAddExpression() { - } + static private final BigDecimal BD_MILLIS_IN_DAY = + BigDecimal.valueOf(QueryConstants.MILLIS_IN_DAY); - public DateAddExpression(List children) { - super(children); - } + public DateAddExpression() { + } + + public DateAddExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - long finalResult=0; - - for(int i=0;i children) { - return new DateAddExpression(children); - } + @Override + public ArithmeticExpression clone(List children) { + return new DateAddExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DateSubtractExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DateSubtractExpression.java index 2c0bec9a1d0..c8383b0125f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DateSubtractExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DateSubtractExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,57 +30,57 @@ import org.apache.phoenix.schema.types.PDouble; import org.apache.phoenix.schema.types.PLong; - public class DateSubtractExpression extends SubtractExpression { - - public DateSubtractExpression() { - } - public DateSubtractExpression(List children) { - super(children); - } + public DateSubtractExpression() { + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - long finalResult=0; - - for(int i=0;i children) { + super(children); + } - @Override - public final PDataType getDataType() { - return PDate.INSTANCE; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + long finalResult = 0; - @Override - public ArithmeticExpression clone(List children) { - return new DateSubtractExpression(children); + for (int i = 0; i < children.size(); i++) { + if (!children.get(i).evaluate(tuple, ptr) || ptr.getLength() == 0) { + return false; + } + long value; + PDataType type = children.get(i).getDataType(); + SortOrder sortOrder = children.get(i).getSortOrder(); + if (type == PDecimal.INSTANCE) { + BigDecimal bd = (BigDecimal) PDecimal.INSTANCE.toObject(ptr, type, sortOrder); + value = bd.multiply(BD_MILLIS_IN_DAY).longValue(); + } else if (type.isCoercibleTo(PLong.INSTANCE)) { + value = type.getCodec().decodeLong(ptr, sortOrder) * QueryConstants.MILLIS_IN_DAY; + } else if (type.isCoercibleTo(PDouble.INSTANCE)) { + value = + (long) (type.getCodec().decodeDouble(ptr, sortOrder) * QueryConstants.MILLIS_IN_DAY); + } else { + value = type.getCodec().decodeLong(ptr, sortOrder); + } + if (i == 0) { + finalResult = value; + } else { + finalResult -= value; + } } + byte[] resultPtr = new byte[getDataType().getByteSize()]; + getDataType().getCodec().encodeLong(finalResult, resultPtr, 0); + ptr.set(resultPtr); + return true; + } + + @Override + public final PDataType getDataType() { + return PDate.INSTANCE; + } + + @Override + public ArithmeticExpression clone(List children) { + return new DateSubtractExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalAddExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalAddExpression.java index efc66c36d9e..d66c50407cb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalAddExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalAddExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,75 +28,74 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.util.NumberUtil; - public class DecimalAddExpression extends AddExpression { - private Integer maxLength; - private Integer scale; + private Integer maxLength; + private Integer scale; - public DecimalAddExpression() { - } + public DecimalAddExpression() { + } - public DecimalAddExpression(List children) { - super(children); - Expression firstChild = children.get(0); - maxLength = getPrecision(firstChild); - scale = getScale(firstChild); - for (int i=1; i children) { + super(children); + Expression firstChild = children.get(0); + maxLength = getPrecision(firstChild); + scale = getScale(firstChild); + for (int i = 1; i < children.size(); i++) { + Expression childExpr = children.get(i); + maxLength = getPrecision(maxLength, getPrecision(childExpr), scale, getScale(childExpr)); + scale = getScale(maxLength, getPrecision(childExpr), scale, getScale(childExpr)); } + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - BigDecimal result = null; - for (int i=0; i children) { - return new DecimalAddExpression(children); + if (result == null) { + throw new DataExceedsCapacityException(PDecimal.INSTANCE, maxLength, scale, null); } + ptr.set(PDecimal.INSTANCE.toBytes(result)); + return true; + } + + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } + + @Override + public Integer getScale() { + return scale; + } + + @Override + public Integer getMaxLength() { + return maxLength; + } + + @Override + public ArithmeticExpression clone(List children) { + return new DecimalAddExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalDivideExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalDivideExpression.java index 78441a6424b..4691bb1526a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalDivideExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalDivideExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,56 +28,54 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.util.NumberUtil; - public class DecimalDivideExpression extends DivideExpression { - public DecimalDivideExpression() { - } + public DecimalDivideExpression() { + } - public DecimalDivideExpression(List children) { - super(children); - } + public DecimalDivideExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - BigDecimal result = null; - for (int i=0; i children) { - return new DecimalDivideExpression(children); + if (result == null) { + result = bd; + } else { + result = result.divide(bd, PDataType.DEFAULT_MATH_CONTEXT); + } } + if (getMaxLength() != null || getScale() != null) { + result = NumberUtil.setDecimalWidthAndScale(result, getMaxLength(), getScale()); + } + if (result == null) { + throw new DataExceedsCapacityException(PDecimal.INSTANCE, getMaxLength(), getScale(), null); + } + ptr.set(PDecimal.INSTANCE.toBytes(result)); + return true; + } + + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } + + @Override + public ArithmeticExpression clone(List children) { + return new DecimalDivideExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalMultiplyExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalMultiplyExpression.java index 536c1a84ac9..9e317d4d41e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalMultiplyExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalMultiplyExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,56 +28,54 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.util.NumberUtil; - public class DecimalMultiplyExpression extends MultiplyExpression { - public DecimalMultiplyExpression() { - } + public DecimalMultiplyExpression() { + } - public DecimalMultiplyExpression(List children) { - super(children); - } + public DecimalMultiplyExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - BigDecimal result = null; - for (int i=0; i children) { - return new DecimalMultiplyExpression(children); + if (result == null) { + result = bd; + } else { + result = result.multiply(bd); + } } + if (getMaxLength() != null || getScale() != null) { + result = NumberUtil.setDecimalWidthAndScale(result, getMaxLength(), getScale()); + } + if (result == null) { + throw new DataExceedsCapacityException(PDecimal.INSTANCE, getMaxLength(), getScale(), null); + } + ptr.set(PDecimal.INSTANCE.toBytes(result)); + return true; + } + + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } + + @Override + public ArithmeticExpression clone(List children) { + return new DecimalMultiplyExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalSubtractExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalSubtractExpression.java index 70754fd0b0e..bf95022d571 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalSubtractExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DecimalSubtractExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,92 +29,88 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.util.NumberUtil; - /** - * * Subtract expression implementation - * - * * @since 0.1 */ public class DecimalSubtractExpression extends SubtractExpression { - private Integer maxLength; - private Integer scale; + private Integer maxLength; + private Integer scale; - public DecimalSubtractExpression() { - } + public DecimalSubtractExpression() { + } - public DecimalSubtractExpression(List children) { - super(children); - Expression firstChild = children.get(0); - maxLength = getPrecision(firstChild); - scale = getScale(firstChild); - for (int i=1; i children) { + super(children); + Expression firstChild = children.get(0); + maxLength = getPrecision(firstChild); + scale = getScale(firstChild); + for (int i = 1; i < children.size(); i++) { + Expression childExpr = children.get(i); + maxLength = getPrecision(maxLength, getPrecision(childExpr), scale, getScale(childExpr)); + scale = getScale(maxLength, getPrecision(childExpr), scale, getScale(childExpr)); } + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - BigDecimal result = null; - for (int i=0; i children) { - return new DecimalSubtractExpression(children); + if (result == null) { + throw new DataExceedsCapacityException(PDecimal.INSTANCE, maxLength, scale, null); } + ptr.set(PDecimal.INSTANCE.toBytes(result)); + return true; + } + + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } + + @Override + public Integer getScale() { + return scale; + } + + @Override + public Integer getMaxLength() { + return maxLength; + } + + @Override + public ArithmeticExpression clone(List children) { + return new DecimalSubtractExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DelegateExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DelegateExpression.java index fd783c30008..b5c63c00c87 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DelegateExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DelegateExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,90 +24,90 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.visitor.ExpressionVisitor; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; public class DelegateExpression implements Expression { - private final Expression delegate; - - public DelegateExpression(Expression delegate) { - this.delegate = delegate; - } - - @Override - public boolean isNullable() { - return delegate.isNullable(); - } - - @Override - public PDataType getDataType() { - return delegate.getDataType(); - } - - @Override - public Integer getMaxLength() { - return delegate.getMaxLength(); - } - - @Override - public Integer getScale() { - return delegate.getScale(); - } - - @Override - public SortOrder getSortOrder() { - return delegate.getSortOrder(); - } - - @Override - public void readFields(DataInput input) throws IOException { - delegate.readFields(input); - } - - @Override - public void write(DataOutput output) throws IOException { - delegate.write(output); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - return delegate.evaluate(tuple, ptr); - } - - @Override - public T accept(ExpressionVisitor visitor) { - return delegate.accept(visitor); - } - - @Override - public List getChildren() { - return delegate.getChildren(); - } - - @Override - public void reset() { - delegate.reset(); - } - - @Override - public boolean isStateless() { - return delegate.isStateless(); - } - - @Override - public Determinism getDeterminism() { - return delegate.getDeterminism(); - } - - @Override - public boolean requiresFinalEvaluation() { - return delegate.requiresFinalEvaluation(); - } - - @Override - public boolean isCloneExpression() { - return delegate.isCloneExpression(); - } + private final Expression delegate; + + public DelegateExpression(Expression delegate) { + this.delegate = delegate; + } + + @Override + public boolean isNullable() { + return delegate.isNullable(); + } + + @Override + public PDataType getDataType() { + return delegate.getDataType(); + } + + @Override + public Integer getMaxLength() { + return delegate.getMaxLength(); + } + + @Override + public Integer getScale() { + return delegate.getScale(); + } + + @Override + public SortOrder getSortOrder() { + return delegate.getSortOrder(); + } + + @Override + public void readFields(DataInput input) throws IOException { + delegate.readFields(input); + } + + @Override + public void write(DataOutput output) throws IOException { + delegate.write(output); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + return delegate.evaluate(tuple, ptr); + } + + @Override + public T accept(ExpressionVisitor visitor) { + return delegate.accept(visitor); + } + + @Override + public List getChildren() { + return delegate.getChildren(); + } + + @Override + public void reset() { + delegate.reset(); + } + + @Override + public boolean isStateless() { + return delegate.isStateless(); + } + + @Override + public Determinism getDeterminism() { + return delegate.getDeterminism(); + } + + @Override + public boolean requiresFinalEvaluation() { + return delegate.requiresFinalEvaluation(); + } + + @Override + public boolean isCloneExpression() { + return delegate.isCloneExpression(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/Determinism.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/Determinism.java index 5acbfc73ff4..9feef67a762 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/Determinism.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/Determinism.java @@ -1,19 +1,30 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.expression; public enum Determinism { - - ALWAYS, PER_STATEMENT, PER_ROW, PER_INVOCATION; - - public Determinism combine (Determinism that) { - return Determinism.values()[Math.max(this.ordinal(), that.ordinal())]; - } -} \ No newline at end of file + + ALWAYS, + PER_STATEMENT, + PER_ROW, + PER_INVOCATION; + + public Determinism combine(Determinism that) { + return Determinism.values()[Math.max(this.ordinal(), that.ordinal())]; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DivideExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DivideExpression.java index d5ff956c9bf..0ac089a08bf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DivideExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DivideExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,74 +22,69 @@ import org.apache.phoenix.expression.visitor.ExpressionVisitor; import org.apache.phoenix.schema.types.PDataType; - /** - * * Divide expression implementation - * - * * @since 0.1 */ public abstract class DivideExpression extends ArithmeticExpression { - private Integer maxLength; - private Integer scale; + private Integer maxLength; + private Integer scale; - public DivideExpression() { - } + public DivideExpression() { + } - public DivideExpression(List children) { - super(children); - Expression firstChild = children.get(0); - maxLength = getPrecision(firstChild); - scale = getScale(firstChild); - for (int i=1; i children) { + super(children); + Expression firstChild = children.get(0); + maxLength = getPrecision(firstChild); + scale = getScale(firstChild); + for (int i = 1; i < children.size(); i++) { + Expression childExpr = children.get(i); + maxLength = getPrecision(maxLength, getPrecision(childExpr), scale, getScale(childExpr)); + scale = getScale(maxLength, getPrecision(childExpr), scale, getScale(childExpr)); } + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } - @Override - public String getOperatorString() { - return " / "; - } - - private static Integer getPrecision(Integer lp, Integer rp, Integer ls, Integer rs) { - if (ls == null || rs == null) { - return PDataType.MAX_PRECISION; - } - int val = getScale(lp, rp, ls, rs) + lp - ls + rp; - return Math.min(PDataType.MAX_PRECISION, val); - } + @Override + public String getOperatorString() { + return " / "; + } - private static Integer getScale(Integer lp, Integer rp, Integer ls, Integer rs) { - // If we are adding a decimal with scale and precision to a decimal - // with no precision nor scale, the scale system does not apply. - if (ls == null || rs == null) { - return null; - } - int val = Math.max(PDataType.MAX_PRECISION - lp + ls - rs, 0); - return Math.min(PDataType.MAX_PRECISION, val); + private static Integer getPrecision(Integer lp, Integer rp, Integer ls, Integer rs) { + if (ls == null || rs == null) { + return PDataType.MAX_PRECISION; } + int val = getScale(lp, rp, ls, rs) + lp - ls + rp; + return Math.min(PDataType.MAX_PRECISION, val); + } - - @Override - public Integer getScale() { - return scale; + private static Integer getScale(Integer lp, Integer rp, Integer ls, Integer rs) { + // If we are adding a decimal with scale and precision to a decimal + // with no precision nor scale, the scale system does not apply. + if (ls == null || rs == null) { + return null; } + int val = Math.max(PDataType.MAX_PRECISION - lp + ls - rs, 0); + return Math.min(PDataType.MAX_PRECISION, val); + } - @Override - public Integer getMaxLength() { - return maxLength; - } + @Override + public Integer getScale() { + return scale; + } + + @Override + public Integer getMaxLength() { + return maxLength; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleAddExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleAddExpression.java index 5a6d839531e..dcd9a202aba 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleAddExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleAddExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,48 +26,48 @@ public class DoubleAddExpression extends AddExpression { - public DoubleAddExpression() { - } + public DoubleAddExpression() { + } - public DoubleAddExpression(List children) { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - double result = 0.0; - for (int i = 0; i < children.size(); i++) { - Expression child = children.get(i); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - double childvalue = child.getDataType().getCodec() - .decodeDouble(ptr, child.getSortOrder()); - if (!Double.isNaN(childvalue) - && childvalue != Double.NEGATIVE_INFINITY - && childvalue != Double.POSITIVE_INFINITY) { - result += childvalue; - } else { - return false; - } - } - byte[] resultPtr = new byte[getDataType().getByteSize()]; - getDataType().getCodec().encodeDouble(result, resultPtr, 0); - ptr.set(resultPtr); + public DoubleAddExpression(List children) { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + double result = 0.0; + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + if (!child.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { return true; + } + double childvalue = child.getDataType().getCodec().decodeDouble(ptr, child.getSortOrder()); + if ( + !Double.isNaN(childvalue) && childvalue != Double.NEGATIVE_INFINITY + && childvalue != Double.POSITIVE_INFINITY + ) { + result += childvalue; + } else { + return false; + } } + byte[] resultPtr = new byte[getDataType().getByteSize()]; + getDataType().getCodec().encodeDouble(result, resultPtr, 0); + ptr.set(resultPtr); + return true; + } - @Override - public PDataType getDataType() { - return PDouble.INSTANCE; - } + @Override + public PDataType getDataType() { + return PDouble.INSTANCE; + } - @Override - public ArithmeticExpression clone(List children) { - return new DoubleAddExpression(children); - } + @Override + public ArithmeticExpression clone(List children) { + return new DoubleAddExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleDivideExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleDivideExpression.java index f153fd91832..e4d335c2596 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleDivideExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleDivideExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,52 +26,52 @@ public class DoubleDivideExpression extends DivideExpression { - public DoubleDivideExpression() { - } + public DoubleDivideExpression() { + } - public DoubleDivideExpression(List children) { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - double result = 0.0; - for (int i = 0; i < children.size(); i++) { - Expression child = children.get(i); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - double childvalue = child.getDataType().getCodec() - .decodeDouble(ptr, child.getSortOrder()); - if (!Double.isNaN(childvalue) - && childvalue != Double.NEGATIVE_INFINITY - && childvalue != Double.POSITIVE_INFINITY) { - if (i == 0) { - result = childvalue; - } else { - result /= childvalue; - } - } else { - return false; - } - } - byte[] resultPtr = new byte[getDataType().getByteSize()]; - getDataType().getCodec().encodeDouble(result, resultPtr, 0); - ptr.set(resultPtr); + public DoubleDivideExpression(List children) { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + double result = 0.0; + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + if (!child.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { return true; + } + double childvalue = child.getDataType().getCodec().decodeDouble(ptr, child.getSortOrder()); + if ( + !Double.isNaN(childvalue) && childvalue != Double.NEGATIVE_INFINITY + && childvalue != Double.POSITIVE_INFINITY + ) { + if (i == 0) { + result = childvalue; + } else { + result /= childvalue; + } + } else { + return false; + } } + byte[] resultPtr = new byte[getDataType().getByteSize()]; + getDataType().getCodec().encodeDouble(result, resultPtr, 0); + ptr.set(resultPtr); + return true; + } - @Override - public PDataType getDataType() { - return PDouble.INSTANCE; - } + @Override + public PDataType getDataType() { + return PDouble.INSTANCE; + } - @Override - public ArithmeticExpression clone(List children) { - return new DoubleDivideExpression(children); - } + @Override + public ArithmeticExpression clone(List children) { + return new DoubleDivideExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleMultiplyExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleMultiplyExpression.java index 9daf3fbe78b..ffe41bb2af9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleMultiplyExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleMultiplyExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,48 +26,48 @@ public class DoubleMultiplyExpression extends MultiplyExpression { - public DoubleMultiplyExpression() { - } + public DoubleMultiplyExpression() { + } - public DoubleMultiplyExpression(List children) { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - double result = 1.0; - for (int i = 0; i < children.size(); i++) { - Expression child = children.get(i); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - double childvalue = child.getDataType().getCodec() - .decodeDouble(ptr, child.getSortOrder()); - if (!Double.isNaN(childvalue) - && childvalue != Double.NEGATIVE_INFINITY - && childvalue != Double.POSITIVE_INFINITY) { - result *= childvalue; - } else { - return false; - } - } - byte[] resultPtr = new byte[getDataType().getByteSize()]; - getDataType().getCodec().encodeDouble(result, resultPtr, 0); - ptr.set(resultPtr); + public DoubleMultiplyExpression(List children) { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + double result = 1.0; + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + if (!child.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { return true; + } + double childvalue = child.getDataType().getCodec().decodeDouble(ptr, child.getSortOrder()); + if ( + !Double.isNaN(childvalue) && childvalue != Double.NEGATIVE_INFINITY + && childvalue != Double.POSITIVE_INFINITY + ) { + result *= childvalue; + } else { + return false; + } } + byte[] resultPtr = new byte[getDataType().getByteSize()]; + getDataType().getCodec().encodeDouble(result, resultPtr, 0); + ptr.set(resultPtr); + return true; + } - @Override - public PDataType getDataType() { - return PDouble.INSTANCE; - } + @Override + public PDataType getDataType() { + return PDouble.INSTANCE; + } - @Override - public ArithmeticExpression clone(List children) { - return new DoubleMultiplyExpression(children); - } + @Override + public ArithmeticExpression clone(List children) { + return new DoubleMultiplyExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleSubtractExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleSubtractExpression.java index ae014e449ba..29c5529c89e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleSubtractExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/DoubleSubtractExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,52 +26,52 @@ public class DoubleSubtractExpression extends SubtractExpression { - public DoubleSubtractExpression() { - } + public DoubleSubtractExpression() { + } - public DoubleSubtractExpression(List children) { - super(children); - } + public DoubleSubtractExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - double result = 0.0; - for (int i = 0; i < children.size(); i++) { - Expression child = children.get(i); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - double childvalue = child.getDataType().getCodec() - .decodeDouble(ptr, child.getSortOrder()); - if (!Double.isNaN(childvalue) - && childvalue != Double.NEGATIVE_INFINITY - && childvalue != Double.POSITIVE_INFINITY) { - if (i == 0) { - result = childvalue; - } else { - result -= childvalue; - } - } else { - return false; - } - } - byte[] resultPtr = new byte[getDataType().getByteSize()]; - getDataType().getCodec().encodeDouble(result, resultPtr, 0); - ptr.set(resultPtr); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + double result = 0.0; + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + if (!child.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { return true; + } + double childvalue = child.getDataType().getCodec().decodeDouble(ptr, child.getSortOrder()); + if ( + !Double.isNaN(childvalue) && childvalue != Double.NEGATIVE_INFINITY + && childvalue != Double.POSITIVE_INFINITY + ) { + if (i == 0) { + result = childvalue; + } else { + result -= childvalue; + } + } else { + return false; + } } + byte[] resultPtr = new byte[getDataType().getByteSize()]; + getDataType().getCodec().encodeDouble(result, resultPtr, 0); + ptr.set(resultPtr); + return true; + } - @Override - public PDataType getDataType() { - return PDouble.INSTANCE; - } + @Override + public PDataType getDataType() { + return PDouble.INSTANCE; + } - @Override - public ArithmeticExpression clone(List children) { - return new DoubleSubtractExpression(children); - } + @Override + public ArithmeticExpression clone(List children) { + return new DoubleSubtractExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/Expression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/Expression.java index b744a6e5049..fe44b62112b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/Expression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/Expression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,83 +25,68 @@ import org.apache.phoenix.schema.PDatum; import org.apache.phoenix.schema.tuple.Tuple; - /** - * * Interface for general expression evaluation - * - * * @since 0.1 */ public interface Expression extends PDatum, Writable { - - /** - * Access the value by setting a pointer to it (as opposed to making - * a copy of it which can be expensive) - * @param tuple Single row result during scan iteration - * @param ptr Pointer to byte value being accessed - * @return true if the expression could be evaluated (i.e. ptr was set) - * and false otherwise - */ - boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr); - - /** - * Means of traversing expression tree through visitor. - * @param visitor - */ - T accept(ExpressionVisitor visitor); - - /** - * @return the child expressions - */ - List getChildren(); - - /** - * Resets the state of a expression back to its initial state and - * enables the expession to be evaluated incrementally (which - * occurs during filter evaluation where we see one key value at - * a time; it's possible to evaluate immediately rather than - * wait until all key values have been seen). Note that when - * evaluating incrementally, you must call this method before - * processing a new row. - */ - void reset(); - - /** - * @return true if the expression can be evaluated on the client - * side with out server state. If a sequence is involved, you - * still need a Tuple from a {@link org.apache.phoenix.iterate.SequenceResultIterator}, - * but otherwise the Tuple may be null. - */ - boolean isStateless(); - - /** - * @return Determinism enum - */ - Determinism getDeterminism(); - - /** - * Determines if an evaluate is required after partial evaluation - * is run. For example, in the case of an IS NULL expression, we - * only know we can return TRUE after all KeyValues have been seen - * while an expression is used in the context of a filter. - * @return - */ - boolean requiresFinalEvaluation(); - /** - * Determines if expression needs to be cloned in {@link org.apache.phoenix.compile.RowProjector} - * @return - */ - boolean isCloneExpression(); + /** + * Access the value by setting a pointer to it (as opposed to making a copy of it which can be + * expensive) + * @param tuple Single row result during scan iteration + * @param ptr Pointer to byte value being accessed + * @return true if the expression could be evaluated (i.e. ptr was set) and false otherwise + */ + boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr); + + /** + * Means of traversing expression tree through visitor. + */ + T accept(ExpressionVisitor visitor); + + /** Returns the child expressions */ + List getChildren(); + + /** + * Resets the state of a expression back to its initial state and enables the expession to be + * evaluated incrementally (which occurs during filter evaluation where we see one key value at a + * time; it's possible to evaluate immediately rather than wait until all key values have been + * seen). Note that when evaluating incrementally, you must call this method before processing a + * new row. + */ + void reset(); + + /** + * @return true if the expression can be evaluated on the client side with out server state. If a + * sequence is involved, you still need a Tuple from a + * {@link org.apache.phoenix.iterate.SequenceResultIterator}, but otherwise the Tuple may + * be null. + */ + boolean isStateless(); + + /** Returns Determinism enum */ + Determinism getDeterminism(); + + /** + * Determines if an evaluate is required after partial evaluation is run. For example, in the case + * of an IS NULL expression, we only know we can return TRUE after all KeyValues have been seen + * while an expression is used in the context of a filter. + */ + boolean requiresFinalEvaluation(); + + /** + * Determines if expression needs to be cloned in {@link org.apache.phoenix.compile.RowProjector} + */ + boolean isCloneExpression(); - /** - * Determines if this contains/implies other. For example A > 0 contains A >= 5. - * @param other is an expression with the lhs (left-hand side) column having the same type of - * the lhs column of other; - * @return true if this contains other. - */ - default boolean contains(Expression other) { - return this.equals(other); - } + /** + * Determines if this contains/implies other. For example A > 0 contains A >= 5. + * @param other is an expression with the lhs (left-hand side) column having the same type of the + * lhs column of other; + * @return true if this contains other. + */ + default boolean contains(Expression other) { + return this.equals(other); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ExpressionType.java index d71c07185ac..d62de41a970 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ExpressionType.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ExpressionType.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,240 +20,236 @@ import java.util.Map; import org.apache.phoenix.expression.function.*; - import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; /** - * - * Enumeration of all Expression types that will be looked up. They may be evaluated on the server-side. - * Used during serialization and deserialization to pass Expression between client - * and server. - * - * - * + * Enumeration of all Expression types that will be looked up. They may be evaluated on the + * server-side. Used during serialization and deserialization to pass Expression between client and + * server. * @since 0.1 */ -// Important : When you want to add new Types make sure to add those towards the end, not changing the existing type's +// Important : When you want to add new Types make sure to add those towards the end, not changing +// the existing type's // ordinal public enum ExpressionType { - ReverseFunction(ReverseFunction.class), - RowKey(RowKeyColumnExpression.class), - KeyValue(KeyValueColumnExpression.class), - LiteralValue(LiteralExpression.class), - RoundFunction(RoundFunction.class), - FloorFunction(FloorFunction.class), - CeilFunction(CeilFunction.class), - RoundDateExpression(RoundDateExpression.class), - FloorDateExpression(FloorDateExpression.class), - CeilDateExpression(CeilDateExpression.class), - RoundTimestampExpression(RoundTimestampExpression.class), - CeilTimestampExpression(CeilTimestampExpression.class), - RoundDecimalExpression(RoundDecimalExpression.class), - FloorDecimalExpression(FloorDecimalExpression.class), - CeilDecimalExpression(CeilDecimalExpression.class), - TruncFunction(TruncFunction.class), - ToDateFunction(ToDateFunction.class), - ToCharFunction(ToCharFunction.class), - ToNumberFunction(ToNumberFunction.class), - CoerceFunction(CoerceExpression.class), - SubstrFunction(SubstrFunction.class), - AndExpression(AndExpression.class), - OrExpression(OrExpression.class), - ComparisonExpression(ComparisonExpression.class), - CountAggregateFunction(CountAggregateFunction.class), - SumAggregateFunction(SumAggregateFunction.class), - MinAggregateFunction(MinAggregateFunction.class), - MaxAggregateFunction(MaxAggregateFunction.class), - StringBasedLikeExpression(StringBasedLikeExpression.class), - NotExpression(NotExpression.class), - CaseExpression(CaseExpression.class), - InListExpression(InListExpression.class), - IsNullExpression(IsNullExpression.class), - LongSubtractExpression(LongSubtractExpression.class), - DateSubtractExpression(DateSubtractExpression.class), - DecimalSubtractExpression(DecimalSubtractExpression.class), - LongAddExpression(LongAddExpression.class), - DecimalAddExpression(DecimalAddExpression.class), - DateAddExpression(DateAddExpression.class), - LongMultiplyExpression(LongMultiplyExpression.class), - DecimalMultiplyExpression(DecimalMultiplyExpression.class), - LongDivideExpression(LongDivideExpression.class), - DecimalDivideExpression(DecimalDivideExpression.class), - CoalesceFunction(CoalesceFunction.class), - StringBasedRegexpReplaceFunction(StringBasedRegexpReplaceFunction.class), - SQLTypeNameFunction(SqlTypeNameFunction.class), - StringBasedRegexpSubstrFunction(StringBasedRegexpSubstrFunction.class), - StringConcatExpression(StringConcatExpression.class), - LengthFunction(LengthFunction.class), - LTrimFunction(LTrimFunction.class), - RTrimFunction(RTrimFunction.class), - UpperFunction(UpperFunction.class), - LowerFunction(LowerFunction.class), - TrimFunction(TrimFunction.class), - DistinctCountAggregateFunction(DistinctCountAggregateFunction.class), - PercentileContAggregateFunction(PercentileContAggregateFunction.class), - PercentRankAggregateFunction(PercentRankAggregateFunction.class), - StddevPopFunction(StddevPopFunction.class), - StddevSampFunction(StddevSampFunction.class), - PercentileDiscAggregateFunction(PercentileDiscAggregateFunction.class), - DoubleAddExpression(DoubleAddExpression.class), - DoubleSubtractExpression(DoubleSubtractExpression.class), - DoubleMultiplyExpression(DoubleMultiplyExpression.class), - DoubleDivideExpression(DoubleDivideExpression.class), - RowValueConstructorExpression(RowValueConstructorExpression.class), - MD5Function(MD5Function.class), - SQLTableTypeFunction(SQLTableTypeFunction.class), - IndexStateName(IndexStateNameFunction.class), - InvertFunction(InvertFunction.class), - ProjectedColumnExpression(ProjectedColumnExpression.class), - TimestampAddExpression(TimestampAddExpression.class), - TimestampSubtractExpression(TimestampSubtractExpression.class), - ArrayIndexFunction(ArrayIndexFunction.class), - ArrayLengthFunction(ArrayLengthFunction.class), - ArrayConstructorExpression(ArrayConstructorExpression.class), - SQLViewTypeFunction(SQLViewTypeFunction.class), - ExternalSqlTypeIdFunction(ExternalSqlTypeIdFunction.class), - ConvertTimezoneFunction(ConvertTimezoneFunction.class), - DecodeFunction(DecodeFunction.class), - TimezoneOffsetFunction(TimezoneOffsetFunction.class), - EncodeFunction(EncodeFunction.class), - LpadFunction(LpadFunction.class), - NthValueFunction(NthValueFunction.class), - FirstValueFunction(FirstValueFunction.class), - LastValueFunction(LastValueFunction.class), - ArrayAnyComparisonExpression(ArrayAnyComparisonExpression.class), - ArrayAllComparisonExpression(ArrayAllComparisonExpression.class), - InlineArrayElemRefExpression(ArrayElemRefExpression.class), - SQLIndexTypeFunction(SQLIndexTypeFunction.class), - ModulusExpression(ModulusExpression.class), - DistinctValueAggregateFunction(DistinctValueAggregateFunction.class), - StringBasedRegexpSplitFunction(StringBasedRegexpSplitFunction.class), - RandomFunction(RandomFunction.class), - ToTimeFunction(ToTimeFunction.class), - ToTimestampFunction(ToTimestampFunction.class), - ByteBasedLikeExpression(ByteBasedLikeExpression.class), - ByteBasedRegexpReplaceFunction(ByteBasedRegexpReplaceFunction.class), - ByteBasedRegexpSubstrFunction(ByteBasedRegexpSubstrFunction.class), - ByteBasedRegexpSplitFunction(ByteBasedRegexpSplitFunction.class), - LikeExpression(LikeExpression.class), - RegexpReplaceFunction(RegexpReplaceFunction.class), - RegexpSubstrFunction(RegexpSubstrFunction.class), - RegexpSplitFunction(RegexpSplitFunction.class), - SignFunction(SignFunction.class), - YearFunction(YearFunction.class), - MonthFunction(MonthFunction.class), - SecondFunction(SecondFunction.class), - WeekFunction(WeekFunction.class), - HourFunction(HourFunction.class), - NowFunction(NowFunction.class), - InstrFunction(InstrFunction.class), - MinuteFunction(MinuteFunction.class), - DayOfMonthFunction(DayOfMonthFunction.class), - ArrayAppendFunction(ArrayAppendFunction.class), - UDFExpression(UDFExpression.class), - ArrayPrependFunction(ArrayPrependFunction.class), - SqrtFunction(SqrtFunction.class), - AbsFunction(AbsFunction.class), - CbrtFunction(CbrtFunction.class), - LnFunction(LnFunction.class), - LogFunction(LogFunction.class), - ExpFunction(ExpFunction.class), - PowerFunction(PowerFunction.class), - ArrayConcatFunction(ArrayConcatFunction.class), - ArrayFillFunction(ArrayFillFunction.class), - ArrayToStringFunction(ArrayToStringFunction.class), - StringToArrayFunction(StringToArrayFunction.class), - GetByteFunction(GetByteFunction.class), - SetByteFunction(SetByteFunction.class), - GetBitFunction(GetBitFunction.class), - SetBitFunction(SetBitFunction.class), - OctetLengthFunction(OctetLengthFunction.class), - RoundWeekExpression(RoundWeekExpression.class), - RoundMonthExpression(RoundMonthExpression.class), - RoundYearExpression(RoundYearExpression.class), - FloorWeekExpression(FloorWeekExpression.class), - FloorMonthExpression(FloorMonthExpression.class), - FloorYearExpression(FloorYearExpression.class), - CeilWeekExpression(CeilWeekExpression.class), - CeilMonthExpression(CeilMonthExpression.class), - CeilYearExpression(CeilYearExpression.class), - DayOfWeekFunction(DayOfWeekFunction.class), - DayOfYearFunction(DayOfYearFunction.class), - DefaultValueExpression(DefaultValueExpression.class), - ArrayColumnExpression(SingleCellColumnExpression.class), - FirstValuesFunction(FirstValuesFunction.class), - LastValuesFunction(LastValuesFunction.class), - DistinctCountHyperLogLogAggregateFunction(DistinctCountHyperLogLogAggregateFunction.class), - CollationKeyFunction(CollationKeyFunction.class), - ArrayRemoveFunction(ArrayRemoveFunction.class), - TransactionProviderNameFunction(TransactionProviderNameFunction.class), - MathPIFunction(MathPIFunction.class), - SinFunction(SinFunction.class), - CosFunction(CosFunction.class), - TanFunction(TanFunction.class), - RowKeyBytesStringFunction(RowKeyBytesStringFunction.class), - PhoenixRowTimestampFunction(PhoenixRowTimestampFunction.class), - JsonValueFunction(JsonValueFunction.class), - JsonQueryFunction(JsonQueryFunction.class), - JsonExistsFunction(JsonExistsFunction.class), - JsonModifyFunction(JsonModifyFunction.class), - BsonConditionExpressionFunction(BsonConditionExpressionFunction.class), - BsonUpdateExpressionFunction(BsonUpdateExpressionFunction.class), - BsonValueFunction(BsonValueFunction.class); + ReverseFunction(ReverseFunction.class), + RowKey(RowKeyColumnExpression.class), + KeyValue(KeyValueColumnExpression.class), + LiteralValue(LiteralExpression.class), + RoundFunction(RoundFunction.class), + FloorFunction(FloorFunction.class), + CeilFunction(CeilFunction.class), + RoundDateExpression(RoundDateExpression.class), + FloorDateExpression(FloorDateExpression.class), + CeilDateExpression(CeilDateExpression.class), + RoundTimestampExpression(RoundTimestampExpression.class), + CeilTimestampExpression(CeilTimestampExpression.class), + RoundDecimalExpression(RoundDecimalExpression.class), + FloorDecimalExpression(FloorDecimalExpression.class), + CeilDecimalExpression(CeilDecimalExpression.class), + TruncFunction(TruncFunction.class), + ToDateFunction(ToDateFunction.class), + ToCharFunction(ToCharFunction.class), + ToNumberFunction(ToNumberFunction.class), + CoerceFunction(CoerceExpression.class), + SubstrFunction(SubstrFunction.class), + AndExpression(AndExpression.class), + OrExpression(OrExpression.class), + ComparisonExpression(ComparisonExpression.class), + CountAggregateFunction(CountAggregateFunction.class), + SumAggregateFunction(SumAggregateFunction.class), + MinAggregateFunction(MinAggregateFunction.class), + MaxAggregateFunction(MaxAggregateFunction.class), + StringBasedLikeExpression(StringBasedLikeExpression.class), + NotExpression(NotExpression.class), + CaseExpression(CaseExpression.class), + InListExpression(InListExpression.class), + IsNullExpression(IsNullExpression.class), + LongSubtractExpression(LongSubtractExpression.class), + DateSubtractExpression(DateSubtractExpression.class), + DecimalSubtractExpression(DecimalSubtractExpression.class), + LongAddExpression(LongAddExpression.class), + DecimalAddExpression(DecimalAddExpression.class), + DateAddExpression(DateAddExpression.class), + LongMultiplyExpression(LongMultiplyExpression.class), + DecimalMultiplyExpression(DecimalMultiplyExpression.class), + LongDivideExpression(LongDivideExpression.class), + DecimalDivideExpression(DecimalDivideExpression.class), + CoalesceFunction(CoalesceFunction.class), + StringBasedRegexpReplaceFunction(StringBasedRegexpReplaceFunction.class), + SQLTypeNameFunction(SqlTypeNameFunction.class), + StringBasedRegexpSubstrFunction(StringBasedRegexpSubstrFunction.class), + StringConcatExpression(StringConcatExpression.class), + LengthFunction(LengthFunction.class), + LTrimFunction(LTrimFunction.class), + RTrimFunction(RTrimFunction.class), + UpperFunction(UpperFunction.class), + LowerFunction(LowerFunction.class), + TrimFunction(TrimFunction.class), + DistinctCountAggregateFunction(DistinctCountAggregateFunction.class), + PercentileContAggregateFunction(PercentileContAggregateFunction.class), + PercentRankAggregateFunction(PercentRankAggregateFunction.class), + StddevPopFunction(StddevPopFunction.class), + StddevSampFunction(StddevSampFunction.class), + PercentileDiscAggregateFunction(PercentileDiscAggregateFunction.class), + DoubleAddExpression(DoubleAddExpression.class), + DoubleSubtractExpression(DoubleSubtractExpression.class), + DoubleMultiplyExpression(DoubleMultiplyExpression.class), + DoubleDivideExpression(DoubleDivideExpression.class), + RowValueConstructorExpression(RowValueConstructorExpression.class), + MD5Function(MD5Function.class), + SQLTableTypeFunction(SQLTableTypeFunction.class), + IndexStateName(IndexStateNameFunction.class), + InvertFunction(InvertFunction.class), + ProjectedColumnExpression(ProjectedColumnExpression.class), + TimestampAddExpression(TimestampAddExpression.class), + TimestampSubtractExpression(TimestampSubtractExpression.class), + ArrayIndexFunction(ArrayIndexFunction.class), + ArrayLengthFunction(ArrayLengthFunction.class), + ArrayConstructorExpression(ArrayConstructorExpression.class), + SQLViewTypeFunction(SQLViewTypeFunction.class), + ExternalSqlTypeIdFunction(ExternalSqlTypeIdFunction.class), + ConvertTimezoneFunction(ConvertTimezoneFunction.class), + DecodeFunction(DecodeFunction.class), + TimezoneOffsetFunction(TimezoneOffsetFunction.class), + EncodeFunction(EncodeFunction.class), + LpadFunction(LpadFunction.class), + NthValueFunction(NthValueFunction.class), + FirstValueFunction(FirstValueFunction.class), + LastValueFunction(LastValueFunction.class), + ArrayAnyComparisonExpression(ArrayAnyComparisonExpression.class), + ArrayAllComparisonExpression(ArrayAllComparisonExpression.class), + InlineArrayElemRefExpression(ArrayElemRefExpression.class), + SQLIndexTypeFunction(SQLIndexTypeFunction.class), + ModulusExpression(ModulusExpression.class), + DistinctValueAggregateFunction(DistinctValueAggregateFunction.class), + StringBasedRegexpSplitFunction(StringBasedRegexpSplitFunction.class), + RandomFunction(RandomFunction.class), + ToTimeFunction(ToTimeFunction.class), + ToTimestampFunction(ToTimestampFunction.class), + ByteBasedLikeExpression(ByteBasedLikeExpression.class), + ByteBasedRegexpReplaceFunction(ByteBasedRegexpReplaceFunction.class), + ByteBasedRegexpSubstrFunction(ByteBasedRegexpSubstrFunction.class), + ByteBasedRegexpSplitFunction(ByteBasedRegexpSplitFunction.class), + LikeExpression(LikeExpression.class), + RegexpReplaceFunction(RegexpReplaceFunction.class), + RegexpSubstrFunction(RegexpSubstrFunction.class), + RegexpSplitFunction(RegexpSplitFunction.class), + SignFunction(SignFunction.class), + YearFunction(YearFunction.class), + MonthFunction(MonthFunction.class), + SecondFunction(SecondFunction.class), + WeekFunction(WeekFunction.class), + HourFunction(HourFunction.class), + NowFunction(NowFunction.class), + InstrFunction(InstrFunction.class), + MinuteFunction(MinuteFunction.class), + DayOfMonthFunction(DayOfMonthFunction.class), + ArrayAppendFunction(ArrayAppendFunction.class), + UDFExpression(UDFExpression.class), + ArrayPrependFunction(ArrayPrependFunction.class), + SqrtFunction(SqrtFunction.class), + AbsFunction(AbsFunction.class), + CbrtFunction(CbrtFunction.class), + LnFunction(LnFunction.class), + LogFunction(LogFunction.class), + ExpFunction(ExpFunction.class), + PowerFunction(PowerFunction.class), + ArrayConcatFunction(ArrayConcatFunction.class), + ArrayFillFunction(ArrayFillFunction.class), + ArrayToStringFunction(ArrayToStringFunction.class), + StringToArrayFunction(StringToArrayFunction.class), + GetByteFunction(GetByteFunction.class), + SetByteFunction(SetByteFunction.class), + GetBitFunction(GetBitFunction.class), + SetBitFunction(SetBitFunction.class), + OctetLengthFunction(OctetLengthFunction.class), + RoundWeekExpression(RoundWeekExpression.class), + RoundMonthExpression(RoundMonthExpression.class), + RoundYearExpression(RoundYearExpression.class), + FloorWeekExpression(FloorWeekExpression.class), + FloorMonthExpression(FloorMonthExpression.class), + FloorYearExpression(FloorYearExpression.class), + CeilWeekExpression(CeilWeekExpression.class), + CeilMonthExpression(CeilMonthExpression.class), + CeilYearExpression(CeilYearExpression.class), + DayOfWeekFunction(DayOfWeekFunction.class), + DayOfYearFunction(DayOfYearFunction.class), + DefaultValueExpression(DefaultValueExpression.class), + ArrayColumnExpression(SingleCellColumnExpression.class), + FirstValuesFunction(FirstValuesFunction.class), + LastValuesFunction(LastValuesFunction.class), + DistinctCountHyperLogLogAggregateFunction(DistinctCountHyperLogLogAggregateFunction.class), + CollationKeyFunction(CollationKeyFunction.class), + ArrayRemoveFunction(ArrayRemoveFunction.class), + TransactionProviderNameFunction(TransactionProviderNameFunction.class), + MathPIFunction(MathPIFunction.class), + SinFunction(SinFunction.class), + CosFunction(CosFunction.class), + TanFunction(TanFunction.class), + RowKeyBytesStringFunction(RowKeyBytesStringFunction.class), + PhoenixRowTimestampFunction(PhoenixRowTimestampFunction.class), + JsonValueFunction(JsonValueFunction.class), + JsonQueryFunction(JsonQueryFunction.class), + JsonExistsFunction(JsonExistsFunction.class), + JsonModifyFunction(JsonModifyFunction.class), + BsonConditionExpressionFunction(BsonConditionExpressionFunction.class), + BsonUpdateExpressionFunction(BsonUpdateExpressionFunction.class), + BsonValueFunction(BsonValueFunction.class); - ExpressionType(Class clazz) { - this.clazz = clazz; - } + ExpressionType(Class clazz) { + this.clazz = clazz; + } - public Class getExpressionClass() { - return clazz; - } + public Class getExpressionClass() { + return clazz; + } - private final Class clazz; + private final Class clazz; - private static final Map,ExpressionType> classToEnumMap = Maps.newHashMapWithExpectedSize(3); - static { - for (ExpressionType type : ExpressionType.values()) { - classToEnumMap.put(type.clazz, type); - } + private static final Map, ExpressionType> classToEnumMap = + Maps.newHashMapWithExpectedSize(3); + static { + for (ExpressionType type : ExpressionType.values()) { + classToEnumMap.put(type.clazz, type); } + } - /** - * Return the ExpressionType for a given Expression instance - */ - public static ExpressionType valueOf(Expression expression) { - ExpressionType type = valueOfOrNull(expression); - if (type == null) { // FIXME: this exception gets swallowed and retries happen - throw new IllegalArgumentException("No ExpressionType for " + expression.getClass()); - } - return type; + /** + * Return the ExpressionType for a given Expression instance + */ + public static ExpressionType valueOf(Expression expression) { + ExpressionType type = valueOfOrNull(expression); + if (type == null) { // FIXME: this exception gets swallowed and retries happen + throw new IllegalArgumentException("No ExpressionType for " + expression.getClass()); } + return type; + } - /** - * Return the ExpressionType for a given Expression instance - * or null if none exists. - */ - public static ExpressionType valueOfOrNull(Expression expression) { - Class clazz = expression.getClass(); - // We will not have CorrelateVariableFieldAccessExpression on the server side, - // it will be evaluated at client side and will be serialized as - // LiteralExpression instead. - if (clazz == CorrelateVariableFieldAccessExpression.class) { - clazz = LiteralExpression.class; - } - return classToEnumMap.get(clazz); + /** + * Return the ExpressionType for a given Expression instance or null if none exists. + */ + public static ExpressionType valueOfOrNull(Expression expression) { + Class clazz = expression.getClass(); + // We will not have CorrelateVariableFieldAccessExpression on the server side, + // it will be evaluated at client side and will be serialized as + // LiteralExpression instead. + if (clazz == CorrelateVariableFieldAccessExpression.class) { + clazz = LiteralExpression.class; } + return classToEnumMap.get(clazz); + } - /** - * Instantiates a DataAccessor based on its DataAccessorType - */ - public Expression newInstance() { - try { - return clazz.newInstance(); - } catch (InstantiationException e) { - throw new RuntimeException(e); - } catch (IllegalAccessException e) { - throw new RuntimeException(e); - } + /** + * Instantiates a DataAccessor based on its DataAccessorType + */ + public Expression newInstance() { + try { + return clazz.newInstance(); + } catch (InstantiationException e) { + throw new RuntimeException(e); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/InListExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/InListExpression.java index d4156ea1580..b085750defa 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/InListExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/InListExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,12 +21,12 @@ import java.io.DataOutput; import java.io.IOException; import java.sql.SQLException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; -import java.util.ArrayList; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -38,13 +38,12 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.util.ByteUtil; -import org.apache.phoenix.util.ExpressionUtil; -import org.apache.phoenix.util.StringUtil; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.util.ExpressionUtil; +import org.apache.phoenix.util.StringUtil; /* * Implementation of a SQL foo IN (a,b,c) expression. Other than the first @@ -52,402 +51,413 @@ * */ public class InListExpression extends BaseSingleExpression { - private Set values; - private ImmutableBytesPtr minValue; - private ImmutableBytesPtr maxValue; - private int valuesByteLength; - private int fixedWidth = -1; - private List keyExpressions; // client side only - private boolean rowKeyOrderOptimizable; // client side only - - // reduce hashCode() complexity - private int hashCode = -1; - private boolean hashCodeSet = false; - - public static Expression create (List children, boolean isNegate, ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) throws SQLException { - if (children.size() == 1) { - throw new SQLException("No element in the IN list"); - } - - Expression firstChild = children.get(0); + private Set values; + private ImmutableBytesPtr minValue; + private ImmutableBytesPtr maxValue; + private int valuesByteLength; + private int fixedWidth = -1; + private List keyExpressions; // client side only + private boolean rowKeyOrderOptimizable; // client side only + + // reduce hashCode() complexity + private int hashCode = -1; + private boolean hashCodeSet = false; + + public static Expression create(List children, boolean isNegate, + ImmutableBytesWritable ptr, boolean rowKeyOrderOptimizable) throws SQLException { + if (children.size() == 1) { + throw new SQLException("No element in the IN list"); + } - if (firstChild.isStateless() && (!firstChild.evaluate(null, ptr) || ptr.getLength() == 0)) { - return LiteralExpression.newConstant(null, PBoolean.INSTANCE, firstChild.getDeterminism()); - } + Expression firstChild = children.get(0); - List childrenWithoutNulls = Lists.newArrayList(); - for (Expression child : children){ - if(!child.equals(LiteralExpression.newConstant(null))){ - childrenWithoutNulls.add(child); - } - } - if (childrenWithoutNulls.size() <= 1 ) { - // In case of after removing nulls there is no remaining element in the IN list - return LiteralExpression.newConstant(false); - } + if (firstChild.isStateless() && (!firstChild.evaluate(null, ptr) || ptr.getLength() == 0)) { + return LiteralExpression.newConstant(null, PBoolean.INSTANCE, firstChild.getDeterminism()); + } - if (firstChild instanceof RowValueConstructorExpression) { - List inListColumnKeyValuePairList = - getSortedInListColumnKeyValuePair(childrenWithoutNulls); - if (inListColumnKeyValuePairList != null) { - childrenWithoutNulls = getSortedRowValueConstructorExpressionList( - inListColumnKeyValuePairList, firstChild.isStateless(),children.size() - 1); - firstChild = childrenWithoutNulls.get(0); - } - } + List childrenWithoutNulls = Lists.newArrayList(); + for (Expression child : children) { + if (!child.equals(LiteralExpression.newConstant(null))) { + childrenWithoutNulls.add(child); + } + } + if (childrenWithoutNulls.size() <= 1) { + // In case of after removing nulls there is no remaining element in the IN list + return LiteralExpression.newConstant(false); + } - boolean nullInList = children.size() != childrenWithoutNulls.size(); + if (firstChild instanceof RowValueConstructorExpression) { + List inListColumnKeyValuePairList = + getSortedInListColumnKeyValuePair(childrenWithoutNulls); + if (inListColumnKeyValuePairList != null) { + childrenWithoutNulls = getSortedRowValueConstructorExpressionList( + inListColumnKeyValuePairList, firstChild.isStateless(), children.size() - 1); + firstChild = childrenWithoutNulls.get(0); + } + } - if (childrenWithoutNulls.size() == 2 && !nullInList) { - return ComparisonExpression.create(isNegate ? CompareOperator.NOT_EQUAL : CompareOperator.EQUAL, - childrenWithoutNulls, ptr, rowKeyOrderOptimizable); - } + boolean nullInList = children.size() != childrenWithoutNulls.size(); - SQLException sqlE = null; - List coercedKeyExpressions = Lists.newArrayListWithExpectedSize(childrenWithoutNulls.size()); - coercedKeyExpressions.add(firstChild); - for (int i = 1; i < childrenWithoutNulls.size(); i++) { - try { - Expression rhs = BaseExpression.coerce(firstChild, childrenWithoutNulls.get(i), - CompareOperator.EQUAL, rowKeyOrderOptimizable); - coercedKeyExpressions.add(rhs); - } catch (SQLException e) { - // Type mismatch exception or invalid data exception. - // Ignore and filter the element from the list and it means it cannot possibly - // be in the list. If list is empty, we'll throw the last exception we ignored, - // as this is an error condition. - sqlE = e; - } - } - if (coercedKeyExpressions.size() <= 1 ) { - if(nullInList || sqlE == null){ - // In case of after removing nulls there is no remaining element in the IN list - return LiteralExpression.newConstant(false); - } else { - throw sqlE; - } - } - if (coercedKeyExpressions.size() == 2) { - return ComparisonExpression.create(isNegate ? CompareOperator.NOT_EQUAL : CompareOperator.EQUAL, - coercedKeyExpressions, ptr, rowKeyOrderOptimizable); - } - Expression expression = new InListExpression(coercedKeyExpressions, rowKeyOrderOptimizable); - if (isNegate) { - expression = NotExpression.create(expression, ptr); - } - if (ExpressionUtil.isConstant(expression)) { - return ExpressionUtil.getConstantExpression(expression, ptr); - } - return expression; + if (childrenWithoutNulls.size() == 2 && !nullInList) { + return ComparisonExpression.create( + isNegate ? CompareOperator.NOT_EQUAL : CompareOperator.EQUAL, childrenWithoutNulls, ptr, + rowKeyOrderOptimizable); } - public InListExpression() { + SQLException sqlE = null; + List coercedKeyExpressions = + Lists.newArrayListWithExpectedSize(childrenWithoutNulls.size()); + coercedKeyExpressions.add(firstChild); + for (int i = 1; i < childrenWithoutNulls.size(); i++) { + try { + Expression rhs = BaseExpression.coerce(firstChild, childrenWithoutNulls.get(i), + CompareOperator.EQUAL, rowKeyOrderOptimizable); + coercedKeyExpressions.add(rhs); + } catch (SQLException e) { + // Type mismatch exception or invalid data exception. + // Ignore and filter the element from the list and it means it cannot possibly + // be in the list. If list is empty, we'll throw the last exception we ignored, + // as this is an error condition. + sqlE = e; + } } - - @VisibleForTesting - protected InListExpression(List values) { - this.children = Collections.emptyList(); - this.values = Sets.newHashSet(values); + if (coercedKeyExpressions.size() <= 1) { + if (nullInList || sqlE == null) { + // In case of after removing nulls there is no remaining element in the IN list + return LiteralExpression.newConstant(false); + } else { + throw sqlE; + } } - - public InListExpression(List keyExpressions, boolean rowKeyOrderOptimizable) { - super(keyExpressions.get(0)); - this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; - Expression firstChild = keyExpressions.get(0); - this.keyExpressions = keyExpressions.subList(1, keyExpressions.size()); - Set values = Sets.newHashSetWithExpectedSize(keyExpressions.size()-1); - Integer maxLength = firstChild.getDataType().isFixedWidth() ? firstChild.getMaxLength() : null; - int fixedWidth = -1; - boolean isFixedLength = true; - for (int i = 1; i < keyExpressions.size(); i++) { - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - Expression child = keyExpressions.get(i); - child.evaluate(null, ptr); - if (ptr.getLength() > 0) { // filter null as it has no impact - if (rowKeyOrderOptimizable) { - firstChild.getDataType().pad(ptr, maxLength, firstChild.getSortOrder()); - } else if (maxLength != null) { - byte[] paddedBytes = StringUtil.padChar(ByteUtil.copyKeyBytesIfNecessary(ptr), maxLength); - ptr.set(paddedBytes); - } - if (values.add(ptr)) { - int length = ptr.getLength(); - if (fixedWidth == -1) { - fixedWidth = length; - } else { - isFixedLength &= fixedWidth == length; - } - - valuesByteLength += ptr.getLength(); - } - } - } - this.fixedWidth = isFixedLength ? fixedWidth : -1; - // Sort values by byte value so we can get min/max easily - ImmutableBytesPtr[] valuesArray = values.toArray(new ImmutableBytesPtr[values.size()]); - Arrays.sort(valuesArray, ByteUtil.BYTES_PTR_COMPARATOR); - if (values.isEmpty()) { - this.minValue = ByteUtil.EMPTY_BYTE_ARRAY_PTR; - this.maxValue = ByteUtil.EMPTY_BYTE_ARRAY_PTR; - this.values = Collections.emptySet(); - } else { - this.minValue = valuesArray[0]; - this.maxValue = valuesArray[valuesArray.length-1]; - // Use LinkedHashSet on client-side so that we don't need to serialize the - // minValue and maxValue but can infer them based on the first and last position. - this.values = new LinkedHashSet(Arrays.asList(valuesArray)); - } - this.hashCodeSet = false; + if (coercedKeyExpressions.size() == 2) { + return ComparisonExpression.create( + isNegate ? CompareOperator.NOT_EQUAL : CompareOperator.EQUAL, coercedKeyExpressions, ptr, + rowKeyOrderOptimizable); } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getChild().evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { // null IN (...) is always null - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; + Expression expression = new InListExpression(coercedKeyExpressions, rowKeyOrderOptimizable); + if (isNegate) { + expression = NotExpression.create(expression, ptr); + } + if (ExpressionUtil.isConstant(expression)) { + return ExpressionUtil.getConstantExpression(expression, ptr); + } + return expression; + } + + public InListExpression() { + } + + @VisibleForTesting + protected InListExpression(List values) { + this.children = Collections.emptyList(); + this.values = Sets.newHashSet(values); + } + + public InListExpression(List keyExpressions, boolean rowKeyOrderOptimizable) { + super(keyExpressions.get(0)); + this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; + Expression firstChild = keyExpressions.get(0); + this.keyExpressions = keyExpressions.subList(1, keyExpressions.size()); + Set values = Sets.newHashSetWithExpectedSize(keyExpressions.size() - 1); + Integer maxLength = firstChild.getDataType().isFixedWidth() ? firstChild.getMaxLength() : null; + int fixedWidth = -1; + boolean isFixedLength = true; + for (int i = 1; i < keyExpressions.size(); i++) { + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + Expression child = keyExpressions.get(i); + child.evaluate(null, ptr); + if (ptr.getLength() > 0) { // filter null as it has no impact + if (rowKeyOrderOptimizable) { + firstChild.getDataType().pad(ptr, maxLength, firstChild.getSortOrder()); + } else if (maxLength != null) { + byte[] paddedBytes = StringUtil.padChar(ByteUtil.copyKeyBytesIfNecessary(ptr), maxLength); + ptr.set(paddedBytes); } - if (values.contains(ptr)) { - ptr.set(PDataType.TRUE_BYTES); - return true; + if (values.add(ptr)) { + int length = ptr.getLength(); + if (fixedWidth == -1) { + fixedWidth = length; + } else { + isFixedLength &= fixedWidth == length; + } + + valuesByteLength += ptr.getLength(); } - ptr.set(PDataType.FALSE_BYTES); - return true; + } } - - @Override - public int hashCode() { - if (!hashCodeSet) { - final int prime = 31; - int result = 1; - result = prime * result + children.hashCode() + values.hashCode(); - hashCode = result; - hashCodeSet = true; - } - return hashCode; + this.fixedWidth = isFixedLength ? fixedWidth : -1; + // Sort values by byte value so we can get min/max easily + ImmutableBytesPtr[] valuesArray = values.toArray(new ImmutableBytesPtr[values.size()]); + Arrays.sort(valuesArray, ByteUtil.BYTES_PTR_COMPARATOR); + if (values.isEmpty()) { + this.minValue = ByteUtil.EMPTY_BYTE_ARRAY_PTR; + this.maxValue = ByteUtil.EMPTY_BYTE_ARRAY_PTR; + this.values = Collections.emptySet(); + } else { + this.minValue = valuesArray[0]; + this.maxValue = valuesArray[valuesArray.length - 1]; + // Use LinkedHashSet on client-side so that we don't need to serialize the + // minValue and maxValue but can infer them based on the first and last position. + this.values = new LinkedHashSet(Arrays.asList(valuesArray)); } + this.hashCodeSet = false; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - InListExpression other = (InListExpression)obj; - if (!children.equals(other.children) || !values.equals(other.values)) return false; - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getChild().evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PBoolean.INSTANCE; + if (ptr.getLength() == 0) { // null IN (...) is always null + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } - - private int readValue(DataInput input, byte[] valuesBytes, int offset, ImmutableBytesPtr ptr) throws IOException { - int valueLen = fixedWidth == -1 ? WritableUtils.readVInt(input) : fixedWidth; - values.add(new ImmutableBytesPtr(valuesBytes,offset,valueLen)); - return offset + valueLen; + if (values.contains(ptr)) { + ptr.set(PDataType.TRUE_BYTES); + return true; } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - input.readBoolean(); // Unused, but left for b/w compat. TODO: remove in next major release - fixedWidth = WritableUtils.readVInt(input); - byte[] valuesBytes = Bytes.readByteArray(input); - valuesByteLength = valuesBytes.length; - int len = fixedWidth == -1 ? WritableUtils.readVInt(input) : valuesByteLength / fixedWidth; - // TODO: consider using a regular HashSet as we never serialize from the server-side - values = Sets.newLinkedHashSetWithExpectedSize(len); - hashCodeSet = false; - int offset = 0; - int i = 0; - if (i < len) { - offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr()); - while (++i < len-1) { - offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr()); - } - if (i < len) { - offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr()); - } else { - maxValue = minValue; - } - } else { - minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY); - } + ptr.set(PDataType.FALSE_BYTES); + return true; + } + + @Override + public int hashCode() { + if (!hashCodeSet) { + final int prime = 31; + int result = 1; + result = prime * result + children.hashCode() + values.hashCode(); + hashCode = result; + hashCodeSet = true; } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - output.writeBoolean(false); // Unused, but left for b/w compat. TODO: remove in next major release - WritableUtils.writeVInt(output, fixedWidth); - WritableUtils.writeVInt(output, valuesByteLength); - for (ImmutableBytesPtr ptr : values) { - output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); - } - if (fixedWidth == -1) { - WritableUtils.writeVInt(output, values.size()); - for (ImmutableBytesPtr ptr : values) { - WritableUtils.writeVInt(output, ptr.getLength()); - } - } + return hashCode; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + InListExpression other = (InListExpression) obj; + if (!children.equals(other.children) || !values.equals(other.values)) return false; + return true; + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } + + private int readValue(DataInput input, byte[] valuesBytes, int offset, ImmutableBytesPtr ptr) + throws IOException { + int valueLen = fixedWidth == -1 ? WritableUtils.readVInt(input) : fixedWidth; + values.add(new ImmutableBytesPtr(valuesBytes, offset, valueLen)); + return offset + valueLen; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + input.readBoolean(); // Unused, but left for b/w compat. TODO: remove in next major release + fixedWidth = WritableUtils.readVInt(input); + byte[] valuesBytes = Bytes.readByteArray(input); + valuesByteLength = valuesBytes.length; + int len = fixedWidth == -1 ? WritableUtils.readVInt(input) : valuesByteLength / fixedWidth; + // TODO: consider using a regular HashSet as we never serialize from the server-side + values = Sets.newLinkedHashSetWithExpectedSize(len); + hashCodeSet = false; + int offset = 0; + int i = 0; + if (i < len) { + offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr()); + while (++i < len - 1) { + offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr()); + } + if (i < len) { + offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr()); + } else { + maxValue = minValue; + } + } else { + minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY); } - - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + output.writeBoolean(false); // Unused, but left for b/w compat. TODO: remove in next major + // release + WritableUtils.writeVInt(output, fixedWidth); + WritableUtils.writeVInt(output, valuesByteLength); + for (ImmutableBytesPtr ptr : values) { + output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); } - - public List getKeyExpressions() { - return keyExpressions; + if (fixedWidth == -1) { + WritableUtils.writeVInt(output, values.size()); + for (ImmutableBytesPtr ptr : values) { + WritableUtils.writeVInt(output, ptr.getLength()); + } } - - public ImmutableBytesWritable getMinKey() { - return minValue; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } - - public ImmutableBytesWritable getMaxKey() { - return maxValue; + return t; + } + + public List getKeyExpressions() { + return keyExpressions; + } + + public ImmutableBytesWritable getMinKey() { + return minValue; + } + + public ImmutableBytesWritable getMaxKey() { + return maxValue; + } + + @Override + public String toString() { + int maxToStringLen = 200; + Expression firstChild = children.get(0); + PDataType type = firstChild.getDataType(); + StringBuilder buf = new StringBuilder(firstChild + " IN ("); + for (ImmutableBytesPtr value : values) { + ImmutableBytesWritable currValue = value; + if ( + firstChild.getSortOrder() != null + && !firstChild.getSortOrder().equals(SortOrder.getDefault()) + ) { + // if we have to invert the bytes create a new ImmutableBytesWritable so that the + // original value is not changed + currValue = new ImmutableBytesWritable(value); + type.coerceBytes(currValue, type, firstChild.getSortOrder(), SortOrder.getDefault()); + } + buf.append(type.toStringLiteral(currValue, null)); + buf.append(','); + if (buf.length() >= maxToStringLen) { + buf.append("... "); + break; + } } + buf.setCharAt(buf.length() - 1, ')'); + return buf.toString(); + } + + public InListExpression clone(List l) { + return new InListExpression(l, this.rowKeyOrderOptimizable); + } + + /** + * get list of InListColumnKeyValuePair with a PK ordered structure + * @param children children from rvc + * @return the list of InListColumnKeyValuePair + */ + public static List + getSortedInListColumnKeyValuePair(List children) { + List inListColumnKeyValuePairList = new ArrayList<>(); + int numberOfColumns = 0; + + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + if (i == 0) { + numberOfColumns = child.getChildren().size(); + for (int j = 0; j < child.getChildren().size(); j++) { + if (child.getChildren().get(j) instanceof RowKeyColumnExpression) { + RowKeyColumnExpression rowKeyColumnExpression = + (RowKeyColumnExpression) child.getChildren().get(j); + InListColumnKeyValuePair inListColumnKeyValuePair = + new InListColumnKeyValuePair(rowKeyColumnExpression); + inListColumnKeyValuePairList.add(inListColumnKeyValuePair); + } else { + // if one of the columns is not part of the pk, we ignore. + return null; + } + } + } else { + if (numberOfColumns != child.getChildren().size()) { + // if the number of the PK columns doesn't match number of values, + // it should not sort it in PK position. + return null; + } - @Override - public String toString() { - int maxToStringLen = 200; - Expression firstChild = children.get(0); - PDataType type = firstChild.getDataType(); - StringBuilder buf = new StringBuilder(firstChild + " IN ("); - for (ImmutableBytesPtr value : values) { - ImmutableBytesWritable currValue = value; - if (firstChild.getSortOrder() != null && !firstChild.getSortOrder().equals(SortOrder.getDefault())) { - // if we have to invert the bytes create a new ImmutableBytesWritable so that the - // original value is not changed - currValue = new ImmutableBytesWritable(value); - type.coerceBytes(currValue, type, firstChild.getSortOrder(), - SortOrder.getDefault()); - } - buf.append(type.toStringLiteral(currValue, null)); - buf.append(','); - if (buf.length() >= maxToStringLen) { - buf.append("... "); - break; - } + for (int j = 0; j < child.getChildren().size(); j++) { + LiteralExpression literalExpression = (LiteralExpression) child.getChildren().get(j); + inListColumnKeyValuePairList.get(j).addToLiteralExpressionList(literalExpression); } - buf.setCharAt(buf.length()-1,')'); - return buf.toString(); + } } - - public InListExpression clone(List l) { - return new InListExpression(l, this.rowKeyOrderOptimizable); + Collections.sort(inListColumnKeyValuePairList); + return inListColumnKeyValuePairList; + } + + /** + * get a PK ordered Expression RowValueConstructor + * @param inListColumnKeyValuePairList the object stores RowKeyColumnExpression and List of + * LiteralExpression + * @param numberOfRows number of literalExpressions + * @return the new RowValueConstructorExpression with PK ordered expressions + */ + public static List getSortedRowValueConstructorExpressionList( + List inListColumnKeyValuePairList, boolean isStateless, + int numberOfRows) { + List l = new ArrayList<>(); + // reconstruct columns + List keyExpressions = new ArrayList<>(); + for (int i = 0; i < inListColumnKeyValuePairList.size(); i++) { + keyExpressions.add(inListColumnKeyValuePairList.get(i).getRowKeyColumnExpression()); } + l.add(new RowValueConstructorExpression(keyExpressions, isStateless)); - /** - * get list of InListColumnKeyValuePair with a PK ordered structure - * @param children children from rvc - * @return the list of InListColumnKeyValuePair - */ - public static List getSortedInListColumnKeyValuePair(List children) { - List inListColumnKeyValuePairList = new ArrayList<>(); - int numberOfColumns = 0; - - for (int i = 0; i < children.size(); i++) { - Expression child = children.get(i); - if (i == 0) { - numberOfColumns = child.getChildren().size(); - for (int j = 0; j < child.getChildren().size(); j++) { - if (child.getChildren().get(j) instanceof RowKeyColumnExpression) { - RowKeyColumnExpression rowKeyColumnExpression = - (RowKeyColumnExpression)child.getChildren().get(j); - InListColumnKeyValuePair inListColumnKeyValuePair = - new InListColumnKeyValuePair(rowKeyColumnExpression); - inListColumnKeyValuePairList.add(inListColumnKeyValuePair); - } else { - // if one of the columns is not part of the pk, we ignore. - return null; - } - } - } else { - if (numberOfColumns != child.getChildren().size()) { - // if the number of the PK columns doesn't match number of values, - // it should not sort it in PK position. - return null; - } - - for (int j = 0; j < child.getChildren().size(); j++) { - LiteralExpression literalExpression = (LiteralExpression) child.getChildren().get(j); - inListColumnKeyValuePairList.get(j).addToLiteralExpressionList(literalExpression); - } - } - } - Collections.sort(inListColumnKeyValuePairList); - return inListColumnKeyValuePairList; - } + // reposition to corresponding values + List> valueExpressionsList = new ArrayList<>(); - /** - * get a PK ordered Expression RowValueConstructor - * @param inListColumnKeyValuePairList the object stores RowKeyColumnExpression and List of LiteralExpression - * @param isStateless - * @param numberOfRows number of literalExpressions - * @return the new RowValueConstructorExpression with PK ordered expressions - */ - public static List getSortedRowValueConstructorExpressionList( - List inListColumnKeyValuePairList, boolean isStateless, int numberOfRows) { - List l = new ArrayList<>(); - //reconstruct columns - List keyExpressions = new ArrayList<>(); - for (int i = 0; i < inListColumnKeyValuePairList.size(); i++) { - keyExpressions.add(inListColumnKeyValuePairList.get(i).getRowKeyColumnExpression()); + for (int j = 0; j < inListColumnKeyValuePairList.size(); j++) { + List valueList = + inListColumnKeyValuePairList.get(j).getLiteralExpressionList(); + for (int i = 0; i < numberOfRows; i++) { + if (j == 0) { + valueExpressionsList.add(new ArrayList()); } - l.add(new RowValueConstructorExpression(keyExpressions,isStateless)); - - //reposition to corresponding values - List> valueExpressionsList = new ArrayList<>(); - - for (int j = 0; j < inListColumnKeyValuePairList.size(); j++) { - List valueList = inListColumnKeyValuePairList.get(j).getLiteralExpressionList(); - for (int i = 0; i < numberOfRows; i++) { - if (j == 0) { - valueExpressionsList.add(new ArrayList()); - } - valueExpressionsList.get(i).add(valueList.get(i)); - } - } - for (List valueExpressions: valueExpressionsList) { - l.add(new RowValueConstructorExpression(valueExpressions, isStateless)); - } - return l; + valueExpressionsList.get(i).add(valueList.get(i)); + } } + for (List valueExpressions : valueExpressionsList) { + l.add(new RowValueConstructorExpression(valueExpressions, isStateless)); + } + return l; + } - public static class InListColumnKeyValuePair implements Comparable { - RowKeyColumnExpression rowKeyColumnExpression; - List literalExpressionList; + public static class InListColumnKeyValuePair implements Comparable { + RowKeyColumnExpression rowKeyColumnExpression; + List literalExpressionList; - public InListColumnKeyValuePair(RowKeyColumnExpression rowKeyColumnExpression) { - this.rowKeyColumnExpression = rowKeyColumnExpression; - this.literalExpressionList = new ArrayList<>(); - } + public InListColumnKeyValuePair(RowKeyColumnExpression rowKeyColumnExpression) { + this.rowKeyColumnExpression = rowKeyColumnExpression; + this.literalExpressionList = new ArrayList<>(); + } - public RowKeyColumnExpression getRowKeyColumnExpression() { - return this.rowKeyColumnExpression; - } + public RowKeyColumnExpression getRowKeyColumnExpression() { + return this.rowKeyColumnExpression; + } - public void addToLiteralExpressionList(LiteralExpression literalExpression) { - this.literalExpressionList.add(literalExpression); - } + public void addToLiteralExpressionList(LiteralExpression literalExpression) { + this.literalExpressionList.add(literalExpression); + } - public List getLiteralExpressionList() { - return this.literalExpressionList; - } + public List getLiteralExpressionList() { + return this.literalExpressionList; + } - @Override - public int compareTo(InListColumnKeyValuePair o) { - return rowKeyColumnExpression.getPosition() - o.getRowKeyColumnExpression().getPosition(); - } + @Override + public int compareTo(InListColumnKeyValuePair o) { + return rowKeyColumnExpression.getPosition() - o.getRowKeyColumnExpression().getPosition(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/IsNullExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/IsNullExpression.java index 09f9a6e36d8..9230a6f306e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/IsNullExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/IsNullExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,132 +30,131 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.util.ExpressionUtil; - /** - * * Implementation of IS NULL and IS NOT NULL expression - * */ public class IsNullExpression extends BaseSingleExpression { - private boolean isNegate; - - public static Expression create(Expression child, boolean negate, ImmutableBytesWritable ptr) throws SQLException { - if (!child.isNullable()) { - return LiteralExpression.newConstant(negate, PBoolean.INSTANCE, child.getDeterminism()); - } - if (ExpressionUtil.isConstant(child)) { - boolean evaluated = child.evaluate(null, ptr); - return LiteralExpression.newConstant(negate ^ (!evaluated || ptr.getLength() == 0), PBoolean.INSTANCE, child.getDeterminism()); - } - return new IsNullExpression(child, negate); - } - - public IsNullExpression() { - } - - private IsNullExpression(Expression expression, boolean negate) { - super(expression); - this.isNegate = negate; - } + private boolean isNegate; - public IsNullExpression(List children, boolean negate) { - super(children); - this.isNegate = negate; - } + public static Expression create(Expression child, boolean negate, ImmutableBytesWritable ptr) + throws SQLException { + if (!child.isNullable()) { + return LiteralExpression.newConstant(negate, PBoolean.INSTANCE, child.getDeterminism()); + } + if (ExpressionUtil.isConstant(child)) { + boolean evaluated = child.evaluate(null, ptr); + return LiteralExpression.newConstant(negate ^ (!evaluated || ptr.getLength() == 0), + PBoolean.INSTANCE, child.getDeterminism()); + } + return new IsNullExpression(child, negate); + } - public IsNullExpression clone(List children) { - return new IsNullExpression(children, this.isNegate()); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - boolean evaluated = getChild().evaluate(tuple, ptr); - if (evaluated) { - ptr.set(isNegate ^ ptr.getLength() == 0 ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); - return true; - } - if (tuple.isImmutable()) { - ptr.set(isNegate ? PDataType.FALSE_BYTES : PDataType.TRUE_BYTES); - return true; - } - - return false; - } + public IsNullExpression() { + } - public boolean isNegate() { - return isNegate; - } + private IsNullExpression(Expression expression, boolean negate) { + super(expression); + this.isNegate = negate; + } - @Override - public boolean contains(Expression other) { - if (!(other instanceof ComparisonExpression || other instanceof IsNullExpression)) { - return false; - } - if (!this.getChildren().get(0).equals(other.getChildren().get(0))) { - return false; - } - if (other instanceof ComparisonExpression) { - return isNegate; - } - return isNegate == ((IsNullExpression) other).isNegate; - } + public IsNullExpression(List children, boolean negate) { + super(children); + this.isNegate = negate; + } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - isNegate = input.readBoolean(); - } + public IsNullExpression clone(List children) { + return new IsNullExpression(children, this.isNegate()); + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - output.writeBoolean(isNegate); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + boolean evaluated = getChild().evaluate(tuple, ptr); + if (evaluated) { + ptr.set(isNegate ^ ptr.getLength() == 0 ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); + return true; } - - @Override - public PDataType getDataType() { - return PBoolean.INSTANCE; + if (tuple.isImmutable()) { + ptr.set(isNegate ? PDataType.FALSE_BYTES : PDataType.TRUE_BYTES); + return true; } - - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + + return false; + } + + public boolean isNegate() { + return isNegate; + } + + @Override + public boolean contains(Expression other) { + if (!(other instanceof ComparisonExpression || other instanceof IsNullExpression)) { + return false; } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder(children.get(0).toString()); - if (isNegate) { - buf.append(" IS NOT NULL"); - } else { - buf.append(" IS NULL"); - } - return buf.toString(); + if (!this.getChildren().get(0).equals(other.getChildren().get(0))) { + return false; } - - @Override - public boolean requiresFinalEvaluation() { - return super.requiresFinalEvaluation() || !this.isNegate(); + if (other instanceof ComparisonExpression) { + return isNegate; } + return isNegate == ((IsNullExpression) other).isNegate; + } - @Override - public boolean equals(Object o) { - if (!super.equals(o)) { - return false; - } - IsNullExpression that = (IsNullExpression) o; - return isNegate == that.isNegate; - } + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + isNegate = input.readBoolean(); + } - @Override - public int hashCode() { - int result = super.hashCode(); - result = 31 * result + (isNegate ? 1 : 0); - return result; - } + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + output.writeBoolean(isNegate); + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); + } + return t; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(children.get(0).toString()); + if (isNegate) { + buf.append(" IS NOT NULL"); + } else { + buf.append(" IS NULL"); + } + return buf.toString(); + } + + @Override + public boolean requiresFinalEvaluation() { + return super.requiresFinalEvaluation() || !this.isNegate(); + } + + @Override + public boolean equals(Object o) { + if (!super.equals(o)) { + return false; + } + IsNullExpression that = (IsNullExpression) o; + return isNegate == that.isNegate; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + (isNegate ? 1 : 0); + return result; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/KeyValueColumnExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/KeyValueColumnExpression.java index f6a9e046162..8f9b49aa0cc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/KeyValueColumnExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/KeyValueColumnExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,116 +31,118 @@ import org.apache.phoenix.schema.tuple.ValueGetterTuple; import org.apache.phoenix.util.SchemaUtil; - /** - * * Class to access a column value stored in a KeyValue - * - * * @since 0.1 */ public class KeyValueColumnExpression extends ColumnExpression { - private byte[] cf; - private byte[] cq; - private String displayName; // client-side only. - - public KeyValueColumnExpression() { - } - - public KeyValueColumnExpression(final byte[] cf, final byte[] cq) { - this.cf = cf; - this.cq = cq; - } - - public KeyValueColumnExpression(PColumn column) { - super(column); - this.cf = column.getFamilyName().getBytes(); - // for backward compatibility since older tables won't have columnQualifierBytes in their metadata - this.cq = column.getColumnQualifierBytes() != null ? column.getColumnQualifierBytes() : column.getName().getBytes(); - this.displayName = column.getName().getString(); - } - - public KeyValueColumnExpression(PColumn column, String displayName) { - super(column); - this.cf = column.getFamilyName().getBytes(); - // for backward compatibility since older tables won't have columnQualifierBytes in their metadata - this.cq = column.getColumnQualifierBytes() != null ? column.getColumnQualifierBytes() : column.getName().getBytes(); - this.displayName = displayName; - } - - public KeyValueColumnExpression(PDatum column, byte[] cf, byte[] cq) { - super(column); - this.cf = cf; - this.cq = cq; - } - - public byte[] getColumnFamily() { - return cf; - } - - public byte[] getColumnQualifier() { - return cq; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + Arrays.hashCode(cf); - result = prime * result + Arrays.hashCode(cq); - return result; - } - - // TODO: assumes single table - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - KeyValueColumnExpression other = (KeyValueColumnExpression)obj; - if (!Arrays.equals(cf, other.cf)) return false; - if (!Arrays.equals(cq, other.cq)) return false; - return true; - } - - @Override - public String toString() { - if (displayName == null) { - displayName = SchemaUtil.getColumnDisplayName(cf, cq); - } - return displayName; - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - return tuple.getValue(cf, cq, ptr); + private byte[] cf; + private byte[] cq; + private String displayName; // client-side only. + + public KeyValueColumnExpression() { + } + + public KeyValueColumnExpression(final byte[] cf, final byte[] cq) { + this.cf = cf; + this.cq = cq; + } + + public KeyValueColumnExpression(PColumn column) { + super(column); + this.cf = column.getFamilyName().getBytes(); + // for backward compatibility since older tables won't have columnQualifierBytes in their + // metadata + this.cq = column.getColumnQualifierBytes() != null + ? column.getColumnQualifierBytes() + : column.getName().getBytes(); + this.displayName = column.getName().getString(); + } + + public KeyValueColumnExpression(PColumn column, String displayName) { + super(column); + this.cf = column.getFamilyName().getBytes(); + // for backward compatibility since older tables won't have columnQualifierBytes in their + // metadata + this.cq = column.getColumnQualifierBytes() != null + ? column.getColumnQualifierBytes() + : column.getName().getBytes(); + this.displayName = displayName; + } + + public KeyValueColumnExpression(PDatum column, byte[] cf, byte[] cq) { + super(column); + this.cf = cf; + this.cq = cq; + } + + public byte[] getColumnFamily() { + return cf; + } + + public byte[] getColumnQualifier() { + return cq; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(cf); + result = prime * result + Arrays.hashCode(cq); + return result; + } + + // TODO: assumes single table + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + KeyValueColumnExpression other = (KeyValueColumnExpression) obj; + if (!Arrays.equals(cf, other.cf)) return false; + if (!Arrays.equals(cq, other.cq)) return false; + return true; + } + + @Override + public String toString() { + if (displayName == null) { + displayName = SchemaUtil.getColumnDisplayName(cf, cq); } - - public boolean evaluateUnsafe(Tuple tuple, ImmutableBytesWritable ptr) { - if (tuple instanceof ValueGetterTuple) { - return ((ValueGetterTuple) tuple).getValueUnsafe(cf, cq, ptr); - } else { - return tuple.getValue(cf, cq, ptr); - } - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - cf = Bytes.readByteArray(input); - cq = Bytes.readByteArray(input); - } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - Bytes.writeByteArray(output, cf); - Bytes.writeByteArray(output, cq); - } - - @Override - public T accept(ExpressionVisitor visitor) { - return visitor.visit(this); + return displayName; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + return tuple.getValue(cf, cq, ptr); + } + + public boolean evaluateUnsafe(Tuple tuple, ImmutableBytesWritable ptr) { + if (tuple instanceof ValueGetterTuple) { + return ((ValueGetterTuple) tuple).getValueUnsafe(cf, cq, ptr); + } else { + return tuple.getValue(cf, cq, ptr); } + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + cf = Bytes.readByteArray(input); + cq = Bytes.readByteArray(input); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + Bytes.writeByteArray(output, cf); + Bytes.writeByteArray(output, cq); + } + + @Override + public T accept(ExpressionVisitor visitor) { + return visitor.visit(this); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LikeExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LikeExpression.java index 6386a2372d1..f0cc9d0185c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LikeExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LikeExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,335 +31,328 @@ import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - /** - * - * Implementation for LIKE operation where the first child expression is the string - * and the second is the pattern. The pattern supports '_' character for single - * character wildcard match and '%' for zero or more character match. where these - * characters may be escaped by preceding them with a '\'. - * - * Example: foo LIKE 'ab%' will match a row in which foo starts with 'ab' - * - * + * Implementation for LIKE operation where the first child expression is the string and the second + * is the pattern. The pattern supports '_' character for single character wildcard match and '%' + * for zero or more character match. where these characters may be escaped by preceding them with a + * '\'. Example: foo LIKE 'ab%' will match a row in which foo starts with 'ab' * @since 0.1 */ public abstract class LikeExpression extends BaseCompoundExpression { - private static final Logger LOGGER = LoggerFactory.getLogger(LikeExpression.class); - - private static final String ZERO_OR_MORE = "\\E.*\\Q"; - private static final String ANY_ONE = "\\E.\\Q"; - - private static final String[] LIKE_ESCAPE_SEQS; - private static final String[] LIKE_UNESCAPED_SEQS; - - static { - LIKE_ESCAPE_SEQS = StringUtil.getLikeEscapeSeqs(); - LIKE_UNESCAPED_SEQS = StringUtil.getLikeUnescapedSeqs(); + private static final Logger LOGGER = LoggerFactory.getLogger(LikeExpression.class); + + private static final String ZERO_OR_MORE = "\\E.*\\Q"; + private static final String ANY_ONE = "\\E.\\Q"; + + private static final String[] LIKE_ESCAPE_SEQS; + private static final String[] LIKE_UNESCAPED_SEQS; + + static { + LIKE_ESCAPE_SEQS = StringUtil.getLikeEscapeSeqs(); + LIKE_UNESCAPED_SEQS = StringUtil.getLikeUnescapedSeqs(); + } + + /** + * Store whether this like expression has to be case sensitive or not. + */ + private LikeType likeType; + + public static String unescapeLike(String s) { + return StringUtil.replace(s, LIKE_ESCAPE_SEQS, LIKE_UNESCAPED_SEQS); + } + + /** + * @return the substring of s for which we have a literal string that we can potentially use to + * set the start/end key, or null if there is none. + */ + public static String getStartsWithPrefix(String s) { + int i = indexOfWildcard(s); + return i == -1 ? s : s.substring(0, i); + } + + public static boolean hasWildcards(String s) { + return indexOfWildcard(s) != -1; + } + + /** + * Replace unescaped '*' and '?' in s with '%' and '_' respectively such that the returned string + * may be used in a LIKE expression. Provides an alternate way of expressing a LIKE pattern which + * is more friendly for wildcard matching when the source string is likely to contain an '%' or + * '_' character. + * @param s wildcard pattern that may use '*' for multi character match and '?' for single + * character match, escaped by the backslash character + */ + public static String wildCardToLike(String s) { + s = StringUtil.escapeLike(s); + StringBuilder buf = new StringBuilder(); + // Look for another unprotected * or ? in the middle + int i = 0; + int j = 0; + while (true) { + int pctPos = s.indexOf(StringUtil.MULTI_CHAR_WILDCARD, i); + int underPos = s.indexOf(StringUtil.SINGLE_CHAR_WILDCARD, i); + if (pctPos == -1 && underPos == -1) { + return i == 0 ? s : buf.append(s.substring(i)).toString(); + } + i = pctPos; + if (underPos != -1 && (i == -1 || underPos < i)) { + i = underPos; + } + + if (i > 0 && s.charAt(i - 1) == '\\') { + // If we found protection then keep looking + buf.append(s.substring(j, i - 1)); + buf.append(s.charAt(i)); + } else { + // We found an unprotected % or _ in the middle + buf.append(s.substring(j, i)); + buf.append(s.charAt(i) == StringUtil.MULTI_CHAR_WILDCARD + ? StringUtil.MULTI_CHAR_LIKE + : StringUtil.SINGLE_CHAR_LIKE); + } + j = ++i; } + } - /** - * Store whether this like expression has to be case sensitive or not. - */ - private LikeType likeType; - - public static String unescapeLike(String s) { - return StringUtil.replace(s, LIKE_ESCAPE_SEQS, LIKE_UNESCAPED_SEQS); + public static int indexOfWildcard(String s) { + // Look for another unprotected % or _ in the middle + if (s == null) { + return -1; } - - /** - * @return the substring of s for which we have a literal string - * that we can potentially use to set the start/end key, or null - * if there is none. - */ - public static String getStartsWithPrefix(String s) { - int i = indexOfWildcard(s); - return i == -1 ? s : s.substring(0,i); + int i = 0; + while (true) { + int pctPos = s.indexOf(StringUtil.MULTI_CHAR_LIKE, i); + int underPos = s.indexOf(StringUtil.SINGLE_CHAR_LIKE, i); + if (pctPos == -1 && underPos == -1) { + return -1; + } + i = pctPos; + if (underPos != -1 && (i == -1 || underPos < i)) { + i = underPos; + } + + if (i > 0 && s.charAt(i - 1) == '\\') { + // If we found protection then keep looking + i++; + } else { + // We found an unprotected % or _ in the middle + return i; + } } - - public static boolean hasWildcards(String s) { - return indexOfWildcard(s) != -1; + } + + public static String toPattern(String s) { + StringBuilder sb = new StringBuilder(s.length()); + + // From the JDK doc: \Q and \E protect everything between them + sb.append("\\Q"); + boolean wasSlash = false; + for (int i = 0; i < s.length(); i++) { + char c = s.charAt(i); + if (wasSlash) { + sb.append(c); + wasSlash = false; + } else if (c == StringUtil.SINGLE_CHAR_LIKE) { + sb.append(ANY_ONE); + } else if (c == StringUtil.MULTI_CHAR_LIKE) { + sb.append(ZERO_OR_MORE); + } else if (c == '\\') { + wasSlash = true; + } else { + sb.append(c); + } } - - /** - * Replace unescaped '*' and '?' in s with '%' and '_' respectively - * such that the returned string may be used in a LIKE expression. - * Provides an alternate way of expressing a LIKE pattern which is - * more friendly for wildcard matching when the source string is - * likely to contain an '%' or '_' character. - * @param s wildcard pattern that may use '*' for multi character - * match and '?' for single character match, escaped by the backslash - * character - * @return replaced - */ - public static String wildCardToLike(String s) { - s = StringUtil.escapeLike(s); - StringBuilder buf = new StringBuilder(); - // Look for another unprotected * or ? in the middle - int i = 0; - int j = 0; - while (true) { - int pctPos = s.indexOf(StringUtil.MULTI_CHAR_WILDCARD, i); - int underPos = s.indexOf(StringUtil.SINGLE_CHAR_WILDCARD, i); - if (pctPos == -1 && underPos == -1) { - return i == 0 ? s : buf.append(s.substring(i)).toString(); - } - i = pctPos; - if (underPos != -1 && (i == -1 || underPos < i)) { - i = underPos; - } - - if (i > 0 && s.charAt(i - 1) == '\\') { - // If we found protection then keep looking - buf.append(s.substring(j,i-1)); - buf.append(s.charAt(i)); - } else { - // We found an unprotected % or _ in the middle - buf.append(s.substring(j,i)); - buf.append(s.charAt(i) == StringUtil.MULTI_CHAR_WILDCARD ? StringUtil.MULTI_CHAR_LIKE : StringUtil.SINGLE_CHAR_LIKE); - } - j = ++i; - } + sb.append("\\E"); + // Found nothing interesting + return sb.toString(); + } + + // private static String fromPattern(String s) { + // StringBuilder sb = new StringBuilder(s.length()); + // + // for (int i = 0; i < s.length(); i++) { + // if (s.substring(i).startsWith("\\Q")) { + // while (s.substring(i + "\\Q".length()).startsWith("\\E")) { + // sb.append(s.charAt(i++ + "\\Q".length())); + // } + // i+= "\\E".length(); + // } + // if (s.charAt(i) == '.') { + // if (s.charAt(i+1) == '*') { + // sb.append('%'); + // i+=2; + // } else { + // sb.append('_'); + // i++; + // } + // } + // } + // return sb.toString(); + // } + + private static final int LIKE_TYPE_INDEX = 2; + private static final LiteralExpression[] LIKE_TYPE_LITERAL = + new LiteralExpression[LikeType.values().length]; + static { + for (LikeType likeType : LikeType.values()) { + LIKE_TYPE_LITERAL[likeType.ordinal()] = LiteralExpression.newConstant(likeType.name()); } - - public static int indexOfWildcard(String s) { - // Look for another unprotected % or _ in the middle - if (s == null) { - return -1; - } - int i = 0; - while (true) { - int pctPos = s.indexOf(StringUtil.MULTI_CHAR_LIKE, i); - int underPos = s.indexOf(StringUtil.SINGLE_CHAR_LIKE, i); - if (pctPos == -1 && underPos == -1) { - return -1; - } - i = pctPos; - if (underPos != -1 && (i == -1 || underPos < i)) { - i = underPos; - } - - if (i > 0 && s.charAt(i - 1) == '\\') { - // If we found protection then keep looking - i++; - } else { - // We found an unprotected % or _ in the middle - return i; - } - } + } + private AbstractBasePattern pattern; + + public LikeExpression() { + } + + protected static List addLikeTypeChild(List children, LikeType likeType) { + List newChildren = Lists.newArrayListWithExpectedSize(children.size() + 1); + newChildren.addAll(children); + newChildren.add(LIKE_TYPE_LITERAL[likeType.ordinal()]); + return newChildren; + } + + public LikeExpression(List children) { + super(children); + init(); + } + + public LikeType getLikeType() { + return likeType; + } + + public boolean startsWithWildcard() { + return pattern != null && pattern.pattern().startsWith("\\Q\\E"); + } + + private void init() { + List children = getChildren(); + if (children.size() <= LIKE_TYPE_INDEX) { + this.likeType = LikeType.CASE_SENSITIVE; + } else { + LiteralExpression likeTypeExpression = (LiteralExpression) children.get(LIKE_TYPE_INDEX); + this.likeType = LikeType.valueOf((String) likeTypeExpression.getValue()); } - - public static String toPattern(String s) { - StringBuilder sb = new StringBuilder(s.length()); - - // From the JDK doc: \Q and \E protect everything between them - sb.append("\\Q"); - boolean wasSlash = false; - for (int i = 0; i < s.length(); i++) { - char c = s.charAt(i); - if (wasSlash) { - sb.append(c); - wasSlash = false; - } else if (c == StringUtil.SINGLE_CHAR_LIKE) { - sb.append(ANY_ONE); - } else if (c == StringUtil.MULTI_CHAR_LIKE) { - sb.append(ZERO_OR_MORE); - } else if (c == '\\') { - wasSlash = true; - } else { - sb.append(c); - } - } - sb.append("\\E"); - // Found nothing interesting - return sb.toString(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + Expression e = getPatternExpression(); + if (e.isStateless() && e.getDeterminism() == Determinism.ALWAYS && e.evaluate(null, ptr)) { + String value = (String) PVarchar.INSTANCE.toObject(ptr, e.getDataType(), e.getSortOrder()); + pattern = compilePattern(value); } + } -// private static String fromPattern(String s) { -// StringBuilder sb = new StringBuilder(s.length()); -// -// for (int i = 0; i < s.length(); i++) { -// if (s.substring(i).startsWith("\\Q")) { -// while (s.substring(i + "\\Q".length()).startsWith("\\E")) { -// sb.append(s.charAt(i++ + "\\Q".length())); -// } -// i+= "\\E".length(); -// } -// if (s.charAt(i) == '.') { -// if (s.charAt(i+1) == '*') { -// sb.append('%'); -// i+=2; -// } else { -// sb.append('_'); -// i++; -// } -// } -// } -// return sb.toString(); -// } - - private static final int LIKE_TYPE_INDEX = 2; - private static final LiteralExpression[] LIKE_TYPE_LITERAL = new LiteralExpression[LikeType.values().length]; - static { - for (LikeType likeType : LikeType.values()) { - LIKE_TYPE_LITERAL[likeType.ordinal()] = LiteralExpression.newConstant(likeType.name()); - } - } - private AbstractBasePattern pattern; + protected abstract AbstractBasePattern compilePatternSpec(String value); - public LikeExpression() { + protected AbstractBasePattern compilePattern(String value) { + if (likeType == LikeType.CASE_SENSITIVE) { + return compilePatternSpec(toPattern(value)); + } else { + return compilePatternSpec("(?i)" + toPattern(value)); } + } - protected static List addLikeTypeChild(List children, LikeType likeType) { - List newChildren = Lists.newArrayListWithExpectedSize(children.size()+1); - newChildren.addAll(children); - newChildren.add(LIKE_TYPE_LITERAL[likeType.ordinal()]); - return newChildren; - } - - public LikeExpression(List children) { - super(children); - init(); - } - - public LikeType getLikeType () { - return likeType; - } + private Expression getStrExpression() { + return children.get(0); + } - public boolean startsWithWildcard() { - return pattern != null && pattern.pattern().startsWith("\\Q\\E"); - } + private Expression getPatternExpression() { + return children.get(1); + } - private void init() { - List children = getChildren(); - if (children.size() <= LIKE_TYPE_INDEX) { - this.likeType = LikeType.CASE_SENSITIVE; - } else { - LiteralExpression likeTypeExpression = (LiteralExpression)children.get(LIKE_TYPE_INDEX); - this.likeType = LikeType.valueOf((String)likeTypeExpression.getValue()); - } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - Expression e = getPatternExpression(); - if (e.isStateless() && e.getDeterminism() == Determinism.ALWAYS && e.evaluate(null, ptr)) { - String value = (String) PVarchar.INSTANCE.toObject(ptr, e.getDataType(), e.getSortOrder()); - pattern = compilePattern(value); - } - } - - protected abstract AbstractBasePattern compilePatternSpec(String value); - - protected AbstractBasePattern compilePattern(String value) { - if (likeType == LikeType.CASE_SENSITIVE) { - return compilePatternSpec(toPattern(value)); - } else { - return compilePatternSpec("(?i)" + toPattern(value)); - } - } - - private Expression getStrExpression() { - return children.get(0); - } - - private Expression getPatternExpression() { - return children.get(1); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - AbstractBasePattern pattern = this.pattern; - if (pattern == null) { // TODO: don't allow? this is going to be slooowwww - if (!getPatternExpression().evaluate(tuple, ptr)) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("LIKE is FALSE: pattern is null"); - } - return false; - } - String value = (String) PVarchar.INSTANCE.toObject(ptr, getPatternExpression().getSortOrder()); - pattern = compilePattern(value); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("LIKE pattern is expression: " + pattern.pattern()); - } - } - - Expression strExpression = getStrExpression(); - SortOrder strSortOrder = strExpression.getSortOrder(); - PVarchar strDataType = PVarchar.INSTANCE; - if (!strExpression.evaluate(tuple, ptr)) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("LIKE is FALSE: child expression is null"); - } - return false; - } - - String value = null; - if (LOGGER.isTraceEnabled()) { - value = (String) strDataType.toObject(ptr, strSortOrder); - } - strDataType.coerceBytes(ptr, strDataType, strSortOrder, SortOrder.ASC); - pattern.matches(ptr); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + AbstractBasePattern pattern = this.pattern; + if (pattern == null) { // TODO: don't allow? this is going to be slooowwww + if (!getPatternExpression().evaluate(tuple, ptr)) { if (LOGGER.isTraceEnabled()) { - boolean matched = ((Boolean) PBoolean.INSTANCE.toObject(ptr)).booleanValue(); - LOGGER.trace("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched); + LOGGER.trace("LIKE is FALSE: pattern is null"); } - return true; + return false; + } + String value = + (String) PVarchar.INSTANCE.toObject(ptr, getPatternExpression().getSortOrder()); + pattern = compilePattern(value); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("LIKE pattern is expression: " + pattern.pattern()); + } } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); + Expression strExpression = getStrExpression(); + SortOrder strSortOrder = strExpression.getSortOrder(); + PVarchar strDataType = PVarchar.INSTANCE; + if (!strExpression.evaluate(tuple, ptr)) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("LIKE is FALSE: child expression is null"); + } + return false; } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); + String value = null; + if (LOGGER.isTraceEnabled()) { + value = (String) strDataType.toObject(ptr, strSortOrder); } - - @Override - public PDataType getDataType() { - return PBoolean.INSTANCE; - } - - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + strDataType.coerceBytes(ptr, strDataType, strSortOrder, SortOrder.ASC); + pattern.matches(ptr); + if (LOGGER.isTraceEnabled()) { + boolean matched = ((Boolean) PBoolean.INSTANCE.toObject(ptr)).booleanValue(); + LOGGER.trace("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched); } - - public String getLiteralPrefix() { - if (pattern == null) { - return ""; - } - String pattern = this.pattern.pattern(); - int fromIndex = "\\Q".length(); - return pattern.substring(fromIndex, pattern.indexOf("\\E", fromIndex)); + return true; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } - public boolean endsWithOnlyWildcard() { - if (pattern == null) { - return false; - } - String pattern = this.pattern.pattern(); - String endsWith = ZERO_OR_MORE + "\\E"; - return pattern.endsWith(endsWith) && - pattern.lastIndexOf(ANY_ONE, pattern.length() - endsWith.length() - 1) == -1 && - pattern.lastIndexOf(ZERO_OR_MORE, pattern.length() - endsWith.length() - 1) == -1; + public String getLiteralPrefix() { + if (pattern == null) { + return ""; } - - @Override - public String toString() { - return (children.get(0) + " LIKE " + children.get(1)); + String pattern = this.pattern.pattern(); + int fromIndex = "\\Q".length(); + return pattern.substring(fromIndex, pattern.indexOf("\\E", fromIndex)); + } + + public boolean endsWithOnlyWildcard() { + if (pattern == null) { + return false; } - - abstract public LikeExpression clone(List children); + String pattern = this.pattern.pattern(); + String endsWith = ZERO_OR_MORE + "\\E"; + return pattern.endsWith(endsWith) + && pattern.lastIndexOf(ANY_ONE, pattern.length() - endsWith.length() - 1) == -1 + && pattern.lastIndexOf(ZERO_OR_MORE, pattern.length() - endsWith.length() - 1) == -1; + } + + @Override + public String toString() { + return (children.get(0) + " LIKE " + children.get(1)); + } + + abstract public LikeExpression clone(List children); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LiteralExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LiteralExpression.java index c00c7878822..fbe560db75c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LiteralExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LiteralExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,354 +34,375 @@ import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PDate; -import org.apache.phoenix.schema.types.PTime; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.schema.types.PhoenixArray; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.StringUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - - - /** - * * Accessor for a literal value. - * - * * @since 0.1 */ public class LiteralExpression extends BaseTerminalExpression { - private static final LiteralExpression[] NULL_EXPRESSIONS = new LiteralExpression[Determinism.values().length]; - private static final LiteralExpression[] TYPED_NULL_EXPRESSIONS = new LiteralExpression[PDataType.values().length * Determinism.values().length]; - private static final LiteralExpression[] BOOLEAN_EXPRESSIONS = new LiteralExpression[2 * Determinism.values().length]; - - static { - for (Determinism determinism : Determinism.values()) { - NULL_EXPRESSIONS[determinism.ordinal()] = new LiteralExpression(null, determinism); - for (int i = 0; i < PDataType.values().length; i++) { - TYPED_NULL_EXPRESSIONS[i+PDataType.values().length*determinism.ordinal()] = new LiteralExpression(PDataType.values()[i], determinism); - } - BOOLEAN_EXPRESSIONS[determinism.ordinal()] = new LiteralExpression(Boolean.FALSE, - PBoolean.INSTANCE, PBoolean.INSTANCE.toBytes(Boolean.FALSE), determinism); - BOOLEAN_EXPRESSIONS[Determinism.values().length+determinism.ordinal()] = new LiteralExpression(Boolean.TRUE, PBoolean.INSTANCE, PBoolean.INSTANCE.toBytes(Boolean.TRUE), determinism); - } - } - - private Object value; - private PDataType type; - private Determinism determinism; - private byte[] byteValue; - private Integer maxLength; - private Integer scale; - private SortOrder sortOrder; - - private static LiteralExpression getNullLiteralExpression(Determinism determinism) { - return NULL_EXPRESSIONS[determinism.ordinal()] ; - } - - private static LiteralExpression getTypedNullLiteralExpression(PDataType type, Determinism determinism){ - return TYPED_NULL_EXPRESSIONS[type.ordinal()+PDataType.values().length*determinism.ordinal()]; - } - - private static LiteralExpression getBooleanLiteralExpression(Boolean bool, Determinism determinism){ - return BOOLEAN_EXPRESSIONS[ (Boolean.FALSE.equals(bool) ? 0 : Determinism.values().length) + determinism.ordinal()]; - } + private static final LiteralExpression[] NULL_EXPRESSIONS = + new LiteralExpression[Determinism.values().length]; + private static final LiteralExpression[] TYPED_NULL_EXPRESSIONS = + new LiteralExpression[PDataType.values().length * Determinism.values().length]; + private static final LiteralExpression[] BOOLEAN_EXPRESSIONS = + new LiteralExpression[2 * Determinism.values().length]; - public static boolean isFalse(Expression child) { - if (child!=null) { - return child == BOOLEAN_EXPRESSIONS[child.getDeterminism().ordinal()]; - } - return false; + static { + for (Determinism determinism : Determinism.values()) { + NULL_EXPRESSIONS[determinism.ordinal()] = new LiteralExpression(null, determinism); + for (int i = 0; i < PDataType.values().length; i++) { + TYPED_NULL_EXPRESSIONS[i + PDataType.values().length * determinism.ordinal()] = + new LiteralExpression(PDataType.values()[i], determinism); + } + BOOLEAN_EXPRESSIONS[determinism.ordinal()] = new LiteralExpression(Boolean.FALSE, + PBoolean.INSTANCE, PBoolean.INSTANCE.toBytes(Boolean.FALSE), determinism); + BOOLEAN_EXPRESSIONS[Determinism.values().length + determinism.ordinal()] = + new LiteralExpression(Boolean.TRUE, PBoolean.INSTANCE, + PBoolean.INSTANCE.toBytes(Boolean.TRUE), determinism); + } + } + + private Object value; + private PDataType type; + private Determinism determinism; + private byte[] byteValue; + private Integer maxLength; + private Integer scale; + private SortOrder sortOrder; + + private static LiteralExpression getNullLiteralExpression(Determinism determinism) { + return NULL_EXPRESSIONS[determinism.ordinal()]; + } + + private static LiteralExpression getTypedNullLiteralExpression(PDataType type, + Determinism determinism) { + return TYPED_NULL_EXPRESSIONS[type.ordinal() + + PDataType.values().length * determinism.ordinal()]; + } + + private static LiteralExpression getBooleanLiteralExpression(Boolean bool, + Determinism determinism) { + return BOOLEAN_EXPRESSIONS[(Boolean.FALSE.equals(bool) ? 0 : Determinism.values().length) + + determinism.ordinal()]; + } + + public static boolean isFalse(Expression child) { + if (child != null) { + return child == BOOLEAN_EXPRESSIONS[child.getDeterminism().ordinal()]; } - - public static boolean isTrue(Expression child) { - if (child!=null) { - return child == BOOLEAN_EXPRESSIONS[Determinism.values().length+child.getDeterminism().ordinal()]; - } - return false; + return false; + } + + public static boolean isTrue(Expression child) { + if (child != null) { + return child + == BOOLEAN_EXPRESSIONS[Determinism.values().length + child.getDeterminism().ordinal()]; } + return false; + } - public static boolean isBooleanNull(Expression child) { - if (child!=null) { - return child == TYPED_NULL_EXPRESSIONS[PBoolean.INSTANCE.ordinal()+PDataType.values().length*child.getDeterminism().ordinal()]; - } - return false; + public static boolean isBooleanNull(Expression child) { + if (child != null) { + return child == TYPED_NULL_EXPRESSIONS[PBoolean.INSTANCE.ordinal() + + PDataType.values().length * child.getDeterminism().ordinal()]; } + return false; + } + + public static boolean isBooleanFalseOrNull(Expression child) { + if (child != null) { + return child == BOOLEAN_EXPRESSIONS[child.getDeterminism().ordinal()] + || child == TYPED_NULL_EXPRESSIONS[PBoolean.INSTANCE.ordinal() + + PDataType.values().length * child.getDeterminism().ordinal()]; + } + return false; + } + + public static LiteralExpression newConstant(Object value) { + return newConstant(value, Determinism.ALWAYS); + } + + // TODO: cache? + public static LiteralExpression newConstant(Object value, Determinism determinism) { + if (value instanceof Boolean) { + return getBooleanLiteralExpression((Boolean) value, determinism); + } else if (value == null) { + return getNullLiteralExpression(determinism); + } + PDataType type = PDataType.fromLiteral(value); + byte[] b = type.toBytes(value); + if (type.isNull(b)) { + return getTypedNullLiteralExpression(type, determinism); + } + if (type == PVarchar.INSTANCE) { + String s = (String) value; + if (s.length() == b.length) { // single byte characters only + type = PChar.INSTANCE; + } + } + return new LiteralExpression(value, type, b, determinism); + } - public static boolean isBooleanFalseOrNull(Expression child) { - if (child!=null) { - return child == BOOLEAN_EXPRESSIONS[child.getDeterminism().ordinal()] - || child == TYPED_NULL_EXPRESSIONS[PBoolean.INSTANCE.ordinal()+PDataType.values().length*child.getDeterminism().ordinal()]; + public static LiteralExpression newConstant(Object value, PDataType type) throws SQLException { + return newConstant(value, type, Determinism.ALWAYS); + } + + public static LiteralExpression newConstant(Object value, PDataType type, Determinism determinism) + throws SQLException { + return newConstant(value, type, SortOrder.getDefault(), determinism); + } + + public static LiteralExpression newConstant(Object value, PDataType type, SortOrder sortOrder) + throws SQLException { + return newConstant(value, type, null, null, sortOrder, Determinism.ALWAYS); + } + + public static LiteralExpression newConstant(Object value, PDataType type, SortOrder sortOrder, + Determinism determinism) throws SQLException { + return newConstant(value, type, null, null, sortOrder, determinism); + } + + public static LiteralExpression newConstant(Object value, PDataType type, Integer maxLength, + Integer scale) throws SQLException { + return newConstant(value, type, maxLength, scale, SortOrder.getDefault(), Determinism.ALWAYS); + } + + public static LiteralExpression newConstant(Object value, PDataType type, Integer maxLength, + Integer scale, Determinism determinism) throws SQLException { // remove? + return newConstant(value, type, maxLength, scale, SortOrder.getDefault(), determinism); + } + + public static LiteralExpression newConstant(Object value, PDataType type, Integer maxLength, + Integer scale, SortOrder sortOrder, Determinism determinism) throws SQLException { + return newConstant(value, type, maxLength, scale, sortOrder, determinism, true); + } + + // TODO: cache? + public static LiteralExpression newConstant(Object value, PDataType type, Integer maxLength, + Integer scale, SortOrder sortOrder, Determinism determinism, boolean rowKeyOrderOptimizable) + throws SQLException { + if (value == null) { + return (type == null) + ? getNullLiteralExpression(determinism) + : getTypedNullLiteralExpression(type, determinism); + } else if (value instanceof Boolean) { + return getBooleanLiteralExpression((Boolean) value, determinism); + } + PDataType actualType = PDataType.fromLiteral(value); + type = type == null ? actualType : type; + try { + value = type.toObject(value, actualType); + } catch (IllegalDataException e) { + throw TypeMismatchException.newException(type, actualType, value.toString()); + } + byte[] b = type.isArrayType() + ? ((PArrayDataType) type).toBytes(value, PArrayDataType.arrayBaseType(type), sortOrder, + rowKeyOrderOptimizable) + : type.toBytes(value, sortOrder); + if (type == PVarchar.INSTANCE || type == PChar.INSTANCE) { + if (type == PChar.INSTANCE && maxLength != null && b.length < maxLength) { + if (rowKeyOrderOptimizable) { + b = type.pad(b, maxLength, sortOrder); + } else { + b = StringUtil.padChar(b, maxLength); } - return false; + } else if (value != null) { + maxLength = ((String) value).length(); + } + } else if (type.isArrayType()) { + maxLength = ((PhoenixArray) value).getMaxLength(); } - - public static LiteralExpression newConstant(Object value) { - return newConstant(value, Determinism.ALWAYS); + if (b.length == 0) { + return getTypedNullLiteralExpression(type, determinism); } - - // TODO: cache? - public static LiteralExpression newConstant(Object value, Determinism determinism) { - if (value instanceof Boolean) { - return getBooleanLiteralExpression((Boolean)value, determinism); - } - else if (value == null) { - return getNullLiteralExpression(determinism); - } - PDataType type = PDataType.fromLiteral(value); - byte[] b = type.toBytes(value); - if (type.isNull(b)) { - return getTypedNullLiteralExpression(type, determinism); - } - if (type == PVarchar.INSTANCE) { - String s = (String) value; - if (s.length() == b.length) { // single byte characters only - type = PChar.INSTANCE; - } - } - return new LiteralExpression(value, type, b, determinism); + if (maxLength == null) { + maxLength = type.isFixedWidth() ? type.getMaxLength(value) : null; } + return new LiteralExpression(value, type, b, maxLength, scale, sortOrder, determinism); + } - public static LiteralExpression newConstant(Object value, PDataType type) throws SQLException { - return newConstant(value, type, Determinism.ALWAYS); - } - - public static LiteralExpression newConstant(Object value, PDataType type, Determinism determinism) throws SQLException { - return newConstant(value, type, SortOrder.getDefault(), determinism); - } - - public static LiteralExpression newConstant(Object value, PDataType type, SortOrder sortOrder) throws SQLException { - return newConstant(value, type, null, null, sortOrder, Determinism.ALWAYS); - } - - public static LiteralExpression newConstant(Object value, PDataType type, SortOrder sortOrder, Determinism determinism) throws SQLException { - return newConstant(value, type, null, null, sortOrder, determinism); - } - - public static LiteralExpression newConstant(Object value, PDataType type, Integer maxLength, Integer scale) throws SQLException { - return newConstant(value, type, maxLength, scale, SortOrder.getDefault(), Determinism.ALWAYS); - } - - public static LiteralExpression newConstant(Object value, PDataType type, Integer maxLength, Integer scale, Determinism determinism) throws SQLException { // remove? - return newConstant(value, type, maxLength, scale, SortOrder.getDefault(), determinism); - } + public LiteralExpression() { + } - public static LiteralExpression newConstant(Object value, PDataType type, Integer maxLength, Integer scale, SortOrder sortOrder, Determinism determinism) - throws SQLException { - return newConstant(value, type, maxLength, scale, sortOrder, determinism, true); - } - - // TODO: cache? - public static LiteralExpression newConstant(Object value, PDataType type, Integer maxLength, Integer scale, SortOrder sortOrder, Determinism determinism, boolean rowKeyOrderOptimizable) - throws SQLException { - if (value == null) { - return (type == null) ? getNullLiteralExpression(determinism) : getTypedNullLiteralExpression(type, determinism); - } - else if (value instanceof Boolean) { - return getBooleanLiteralExpression((Boolean)value, determinism); - } - PDataType actualType = PDataType.fromLiteral(value); - type = type == null ? actualType : type; - try { - value = type.toObject(value, actualType); - } catch (IllegalDataException e) { - throw TypeMismatchException.newException(type, actualType, value.toString()); - } - byte[] b = type.isArrayType() ? ((PArrayDataType)type).toBytes(value, PArrayDataType.arrayBaseType(type), sortOrder, rowKeyOrderOptimizable) : - type.toBytes(value, sortOrder); - if (type == PVarchar.INSTANCE || type == PChar.INSTANCE) { - if (type == PChar.INSTANCE && maxLength != null && b.length < maxLength) { - if (rowKeyOrderOptimizable) { - b = type.pad(b, maxLength, sortOrder); - } else { - b = StringUtil.padChar(b, maxLength); - } - } else if (value != null) { - maxLength = ((String)value).length(); - } - } else if (type.isArrayType()) { - maxLength = ((PhoenixArray)value).getMaxLength(); - } - if (b.length == 0) { - return getTypedNullLiteralExpression(type, determinism); - } - if (maxLength == null) { - maxLength = type.isFixedWidth() ? type.getMaxLength(value) : null; - } - return new LiteralExpression(value, type, b, maxLength, scale, sortOrder, determinism); - } + public LiteralExpression(byte[] byteValue) { + this.byteValue = byteValue != null ? byteValue : ByteUtil.EMPTY_BYTE_ARRAY; + this.determinism = Determinism.ALWAYS; + } - public LiteralExpression() { - } - - public LiteralExpression(byte[] byteValue) { - this.byteValue = byteValue!=null ? byteValue : ByteUtil.EMPTY_BYTE_ARRAY; - this.determinism = Determinism.ALWAYS; - } + private LiteralExpression(PDataType type, Determinism determinism) { + this(null, type, ByteUtil.EMPTY_BYTE_ARRAY, determinism); + } - private LiteralExpression(PDataType type, Determinism determinism) { - this(null, type, ByteUtil.EMPTY_BYTE_ARRAY, determinism); - } + private LiteralExpression(Object value, PDataType type, byte[] byteValue, + Determinism determinism) { + this(value, type, byteValue, + type == null || !type.isFixedWidth() ? null : type.getMaxLength(value), null, + SortOrder.getDefault(), determinism); + } - private LiteralExpression(Object value, PDataType type, byte[] byteValue, Determinism determinism) { - this(value, type, byteValue, type == null || !type.isFixedWidth() ? null : type.getMaxLength(value), null, SortOrder.getDefault(), determinism); - } + private LiteralExpression(Object value, PDataType type, byte[] byteValue, Integer maxLength, + Integer scale, SortOrder sortOrder, Determinism deterministic) { + Preconditions.checkNotNull(sortOrder); + this.value = value; + this.type = type; + this.byteValue = byteValue; + this.maxLength = maxLength; + this.scale = scale != null ? scale : type == null ? null : type.getScale(value); + this.sortOrder = sortOrder; + this.determinism = deterministic; + } - private LiteralExpression(Object value, PDataType type, byte[] byteValue, - Integer maxLength, Integer scale, SortOrder sortOrder, Determinism deterministic) { - Preconditions.checkNotNull(sortOrder); - this.value = value; - this.type = type; - this.byteValue = byteValue; - this.maxLength = maxLength; - this.scale = scale != null ? scale : type == null ? null : type.getScale(value); - this.sortOrder = sortOrder; - this.determinism = deterministic; - } + @Override + public Determinism getDeterminism() { + return determinism; + } - @Override - public Determinism getDeterminism() { - return determinism; - } - - @Override - public String toString() { - if (value == null && byteValue!=null) { - return Bytes.toStringBinary(byteValue); - } - else if (value == null) { - return "null"; - } - // TODO: move into PDataType? - if (type.isCoercibleTo(PTimestamp.INSTANCE)) { - return type + " " + type.toStringLiteral(value, null); - } - return type.toStringLiteral(value, null); - } + @Override + public String toString() { + if (value == null && byteValue != null) { + return Bytes.toStringBinary(byteValue); + } else if (value == null) { + return "null"; + } + // TODO: move into PDataType? + if (type.isCoercibleTo(PTimestamp.INSTANCE)) { + return type + " " + type.toStringLiteral(value, null); + } + return type.toStringLiteral(value, null); + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((value == null) ? 0 : value.hashCode()); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((value == null) ? 0 : value.hashCode()); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - LiteralExpression other = (LiteralExpression)obj; - if (value == null) { - if (other.value != null) return false; - } else if (!value.equals(other.value)) return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + LiteralExpression other = (LiteralExpression) obj; + if (value == null) { + if (other.value != null) return false; + } else if (!value.equals(other.value)) return false; + return true; + } - @Override - public void readFields(DataInput input) throws IOException { - int encodedByteLengthAndBool = WritableUtils.readVInt(input); - int byteLength = Math.abs(encodedByteLengthAndBool)-1; - this.byteValue = new byte[byteLength]; - input.readFully(byteValue, 0, byteLength); - int sortOrderAndDeterminism = WritableUtils.readVInt(input); - if (sortOrderAndDeterminism<=2) { - //client is on an older version - this.determinism = encodedByteLengthAndBool > 0 ? Determinism.ALWAYS : Determinism.PER_ROW; - this.sortOrder = SortOrder.fromSystemValue(sortOrderAndDeterminism);; - } - else { - int determinismOrdinal = (sortOrderAndDeterminism>>2)-1; - this.determinism = Determinism.values()[determinismOrdinal]; - int sortOrderValue = sortOrderAndDeterminism & ((1 << 2) - 1); //get the least 2 significant bits - this.sortOrder = SortOrder.fromSystemValue(sortOrderValue); - } - int typeOrdinal = WritableUtils.readVInt(input); - if (typeOrdinal < 0) { - this.type = null; - } else { - this.type = PDataType.values()[typeOrdinal]; - } - if (this.byteValue.length == 0) { - this.value = null; - } else { - this.value = this.type.toObject(byteValue, 0, byteValue.length, this.type, sortOrder); - } - } + @Override + public void readFields(DataInput input) throws IOException { + int encodedByteLengthAndBool = WritableUtils.readVInt(input); + int byteLength = Math.abs(encodedByteLengthAndBool) - 1; + this.byteValue = new byte[byteLength]; + input.readFully(byteValue, 0, byteLength); + int sortOrderAndDeterminism = WritableUtils.readVInt(input); + if (sortOrderAndDeterminism <= 2) { + // client is on an older version + this.determinism = encodedByteLengthAndBool > 0 ? Determinism.ALWAYS : Determinism.PER_ROW; + this.sortOrder = SortOrder.fromSystemValue(sortOrderAndDeterminism); + ; + } else { + int determinismOrdinal = (sortOrderAndDeterminism >> 2) - 1; + this.determinism = Determinism.values()[determinismOrdinal]; + int sortOrderValue = sortOrderAndDeterminism & ((1 << 2) - 1); // get the least 2 significant + // bits + this.sortOrder = SortOrder.fromSystemValue(sortOrderValue); + } + int typeOrdinal = WritableUtils.readVInt(input); + if (typeOrdinal < 0) { + this.type = null; + } else { + this.type = PDataType.values()[typeOrdinal]; + } + if (this.byteValue.length == 0) { + this.value = null; + } else { + this.value = this.type.toObject(byteValue, 0, byteValue.length, this.type, sortOrder); + } + } - @Override - public void write(DataOutput output) throws IOException { - WritableUtils.writeVInt(output, (byteValue.length + 1) * (this.determinism==Determinism.ALWAYS ? 1 : -1)); - output.write(byteValue); - // since we need to support clients of a lower version, serialize the determinism enum ordinal in the int used to - // serialize sort order system value (which is either 1 or 2) - int sortOrderAndDeterminism = ((this.determinism.ordinal()+1)<<2) + sortOrder.getSystemValue(); - WritableUtils.writeVInt(output, sortOrderAndDeterminism); - WritableUtils.writeVInt(output, this.type == null ? -1 : this.type.ordinal()); - } + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeVInt(output, + (byteValue.length + 1) * (this.determinism == Determinism.ALWAYS ? 1 : -1)); + output.write(byteValue); + // since we need to support clients of a lower version, serialize the determinism enum ordinal + // in the int used to + // serialize sort order system value (which is either 1 or 2) + int sortOrderAndDeterminism = + ((this.determinism.ordinal() + 1) << 2) + sortOrder.getSystemValue(); + WritableUtils.writeVInt(output, sortOrderAndDeterminism); + WritableUtils.writeVInt(output, this.type == null ? -1 : this.type.ordinal()); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // Literal always evaluates, even when it returns null - ptr.set(byteValue); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // Literal always evaluates, even when it returns null + ptr.set(byteValue); + return true; + } - @Override - public PDataType getDataType() { - return type; - } + @Override + public PDataType getDataType() { + return type; + } - @Override - public Integer getMaxLength() { - // For literals representing arrays of CHAR or BINARY, the byte size is null and the max - // length of the expression is also null, so we must get the max length of the - // actual underlying array - if (maxLength == null && getDataType() != null && getDataType().isArrayType() && - PDataType.arrayBaseType(getDataType()).getByteSize() == null) { - Object value = getValue(); - if (value instanceof PhoenixArray) { - // Return the max length of the underlying PhoenixArray data - return ((PhoenixArray) value).getMaxLength(); - } - } - return maxLength; - } + @Override + public Integer getMaxLength() { + // For literals representing arrays of CHAR or BINARY, the byte size is null and the max + // length of the expression is also null, so we must get the max length of the + // actual underlying array + if ( + maxLength == null && getDataType() != null && getDataType().isArrayType() + && PDataType.arrayBaseType(getDataType()).getByteSize() == null + ) { + Object value = getValue(); + if (value instanceof PhoenixArray) { + // Return the max length of the underlying PhoenixArray data + return ((PhoenixArray) value).getMaxLength(); + } + } + return maxLength; + } - @Override - public Integer getScale() { - return scale; - } - - @Override - public SortOrder getSortOrder() { - return sortOrder; - } + @Override + public Integer getScale() { + return scale; + } - @Override - public boolean isNullable() { - return value == null; - } + @Override + public SortOrder getSortOrder() { + return sortOrder; + } - public Object getValue() { - return value; - } + @Override + public boolean isNullable() { + return value == null; + } - public byte[] getBytes() { - return byteValue; - } - - @Override - public final T accept(ExpressionVisitor visitor) { - return visitor.visit(this); - } - - @Override - public boolean isStateless() { - return true; - } + public Object getValue() { + return value; + } + + public byte[] getBytes() { + return byteValue; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + return visitor.visit(this); + } + + @Override + public boolean isStateless() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongAddExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongAddExpression.java index d498b829f89..c24ec3695ae 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongAddExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongAddExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,42 +24,41 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; - public class LongAddExpression extends AddExpression { - public LongAddExpression() { - } + public LongAddExpression() { + } - public LongAddExpression(List children) { - super(children); - } + public LongAddExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - long finalResult=0; - - for(int i=0;i children) { - return new LongAddExpression(children); - } + @Override + public final PDataType getDataType() { + return PLong.INSTANCE; + } + + @Override + public ArithmeticExpression clone(List children) { + return new LongAddExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongDivideExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongDivideExpression.java index e59594e3470..48300981b46 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongDivideExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongDivideExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,46 +24,45 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; - public class LongDivideExpression extends DivideExpression { - public LongDivideExpression() { - } + public LongDivideExpression() { + } - public LongDivideExpression(List children) { - super(children); - } + public LongDivideExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - long finalResult=0; - - for(int i=0;i children) { - return new LongDivideExpression(children); - } + @Override + public final PDataType getDataType() { + return PLong.INSTANCE; + } + + @Override + public ArithmeticExpression clone(List children) { + return new LongDivideExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongMultiplyExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongMultiplyExpression.java index fa30029149e..852ec834524 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongMultiplyExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongMultiplyExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,45 +24,44 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; - public class LongMultiplyExpression extends MultiplyExpression { - public LongMultiplyExpression() { - } + public LongMultiplyExpression() { + } - public LongMultiplyExpression(List children) { - super(children); - } + public LongMultiplyExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - long finalResult=1; - - for(int i=0;i children) { - return new LongMultiplyExpression(children); - } + @Override + public final PDataType getDataType() { + return PLong.INSTANCE; + } + + @Override + public ArithmeticExpression clone(List children) { + return new LongMultiplyExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongSubtractExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongSubtractExpression.java index 95505a697cd..e0f240a23c6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongSubtractExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/LongSubtractExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,62 +26,57 @@ import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.schema.types.PLong; - - /** - * * Subtract expression implementation - * - * * @since 0.1 */ public class LongSubtractExpression extends SubtractExpression { - public LongSubtractExpression() { - } + public LongSubtractExpression() { + } - public LongSubtractExpression(List children) { - super(children); + public LongSubtractExpression(List children) { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + long finalResult = 0; + + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + if (!child.evaluate(tuple, ptr) || ptr.getLength() == 0) { + return false; + } + PDataType childType = child.getDataType(); + boolean isDate = childType.isCoercibleTo(PDate.INSTANCE); + long childvalue = childType.getCodec().decodeLong(ptr, child.getSortOrder()); + if (i == 0) { + finalResult = childvalue; + } else { + finalResult -= childvalue; + /* + * Special case for date subtraction - note that only first two expression may be dates. We + * need to convert the date to a unit of "days" because that's what sql expects. + */ + if (isDate) { + finalResult /= QueryConstants.MILLIS_IN_DAY; + } + } } + byte[] resultPtr = new byte[getDataType().getByteSize()]; + getDataType().getCodec().encodeLong(finalResult, resultPtr, 0); + ptr.set(resultPtr); + return true; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - long finalResult=0; - - for(int i=0;i children) { + return new LongSubtractExpression(children); + } - @Override - public ArithmeticExpression clone(List children) { - return new LongSubtractExpression(children); - } - } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ModulusExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ModulusExpression.java index 99006c026db..5a6382442eb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ModulusExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ModulusExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,86 +25,87 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; - /** * {@code * Implementation of the LENGTH() build-in function. is the string * of characters we want to find the length of. If is NULL or empty, null * is returned. * } - * * @since 0.1 */ public class ModulusExpression extends ArithmeticExpression { - public ModulusExpression() { } + public ModulusExpression() { + } - public ModulusExpression(List children) { - super(children); - } + public ModulusExpression(List children) { + super(children); + } + + private Expression getDividendExpression() { + return children.get(0); + } - private Expression getDividendExpression() { - return children.get(0); + private Expression getDivisorExpression() { + return children.get(1); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // get the dividend + Expression dividendExpression = getDividendExpression(); + if (!dividendExpression.evaluate(tuple, ptr)) { + return false; } - - private Expression getDivisorExpression() { - return children.get(1); + if (ptr.getLength() == 0) { + return true; } + long dividend = dividendExpression.getDataType().getCodec().decodeLong(ptr, + dividendExpression.getSortOrder()); - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // get the dividend - Expression dividendExpression = getDividendExpression(); - if (!dividendExpression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - long dividend = dividendExpression.getDataType().getCodec().decodeLong(ptr, dividendExpression.getSortOrder()); - - // get the divisor - Expression divisorExpression = getDivisorExpression(); - if (!divisorExpression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - long divisor = divisorExpression.getDataType().getCodec().decodeLong(ptr, divisorExpression.getSortOrder()); - - // actually perform modulus - long remainder = dividend % divisor; - - // return the result, use encodeLong to avoid extra Long allocation - byte[] resultPtr=new byte[PLong.INSTANCE.getByteSize()]; - getDataType().getCodec().encodeLong(remainder, resultPtr, 0); - ptr.set(resultPtr); - return true; + // get the divisor + Expression divisorExpression = getDivisorExpression(); + if (!divisorExpression.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PLong.INSTANCE; + if (ptr.getLength() == 0) { + return true; } + long divisor = + divisorExpression.getDataType().getCodec().decodeLong(ptr, divisorExpression.getSortOrder()); - @Override - protected String getOperatorString() { - return " % "; - } + // actually perform modulus + long remainder = dividend % divisor; - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; - } + // return the result, use encodeLong to avoid extra Long allocation + byte[] resultPtr = new byte[PLong.INSTANCE.getByteSize()]; + getDataType().getCodec().encodeLong(remainder, resultPtr, 0); + ptr.set(resultPtr); + return true; + } + + @Override + public PDataType getDataType() { + return PLong.INSTANCE; + } - @Override - public ArithmeticExpression clone(List children) { - return new ModulusExpression(children); + @Override + protected String getOperatorString() { + return " % "; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } + + @Override + public ArithmeticExpression clone(List children) { + return new ModulusExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/MultiplyExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/MultiplyExpression.java index fa669e9c719..5f6dc0779b7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/MultiplyExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/MultiplyExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,73 +22,69 @@ import org.apache.phoenix.expression.visitor.ExpressionVisitor; import org.apache.phoenix.schema.types.PDataType; - /** - * * Subtract expression implementation - * - * * @since 0.1 */ public abstract class MultiplyExpression extends ArithmeticExpression { - private Integer maxLength; - private Integer scale; + private Integer maxLength; + private Integer scale; - public MultiplyExpression() { - } + public MultiplyExpression() { + } - public MultiplyExpression(List children) { - super(children); - Expression firstChild = children.get(0); - maxLength = getPrecision(firstChild); - scale = getScale(firstChild); - for (int i=1; i children) { + super(children); + Expression firstChild = children.get(0); + maxLength = getPrecision(firstChild); + scale = getScale(firstChild); + for (int i = 1; i < children.size(); i++) { + Expression childExpr = children.get(i); + maxLength = getPrecision(maxLength, getPrecision(childExpr), scale, getScale(childExpr)); + scale = getScale(maxLength, getPrecision(childExpr), scale, getScale(childExpr)); } + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } - @Override - public String getOperatorString() { - return " * "; - } - - private static Integer getPrecision(Integer lp, Integer rp, Integer ls, Integer rs) { - if (ls == null || rs == null) { - return PDataType.MAX_PRECISION; - } - int val = lp + rp; - return Math.min(PDataType.MAX_PRECISION, val); - } + @Override + public String getOperatorString() { + return " * "; + } - private static Integer getScale(Integer lp, Integer rp, Integer ls, Integer rs) { - // If we are adding a decimal with scale and precision to a decimal - // with no precision nor scale, the scale system does not apply. - if (ls == null || rs == null) { - return null; - } - int val = ls + rs; - return Math.min(PDataType.MAX_PRECISION, val); - } - - @Override - public Integer getScale() { - return scale; + private static Integer getPrecision(Integer lp, Integer rp, Integer ls, Integer rs) { + if (ls == null || rs == null) { + return PDataType.MAX_PRECISION; } + int val = lp + rp; + return Math.min(PDataType.MAX_PRECISION, val); + } - @Override - public Integer getMaxLength() { - return maxLength; + private static Integer getScale(Integer lp, Integer rp, Integer ls, Integer rs) { + // If we are adding a decimal with scale and precision to a decimal + // with no precision nor scale, the scale system does not apply. + if (ls == null || rs == null) { + return null; } + int val = ls + rs; + return Math.min(PDataType.MAX_PRECISION, val); + } + + @Override + public Integer getScale() { + return scale; + } + + @Override + public Integer getMaxLength() { + return maxLength; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/NotExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/NotExpression.java index 4bbdcc4fc04..613204b3651 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/NotExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/NotExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,74 +27,73 @@ import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; - /** - * - * Implementation of the NOT operator that negates it's - * single boolean child expression. - * - * + * Implementation of the NOT operator that negates it's single boolean child expression. * @since 0.1 */ public class NotExpression extends BaseSingleExpression { - public static Expression create(Expression child, ImmutableBytesWritable ptr) throws SQLException { - if (child.getDataType() != PBoolean.INSTANCE) { - throw TypeMismatchException.newException(child.getDataType(), PBoolean.INSTANCE, "NOT"); - } - if (child.isStateless()) { - if (!child.evaluate(null, ptr) || ptr.getLength() == 0) { - return LiteralExpression.newConstant(null, PBoolean.INSTANCE, child.getDeterminism()); - } - return LiteralExpression.newConstant(!(Boolean) PBoolean.INSTANCE.toObject(ptr), PBoolean.INSTANCE, child.getDeterminism()); - } - return new NotExpression(child); + public static Expression create(Expression child, ImmutableBytesWritable ptr) + throws SQLException { + if (child.getDataType() != PBoolean.INSTANCE) { + throw TypeMismatchException.newException(child.getDataType(), PBoolean.INSTANCE, "NOT"); } - - public NotExpression() { + if (child.isStateless()) { + if (!child.evaluate(null, ptr) || ptr.getLength() == 0) { + return LiteralExpression.newConstant(null, PBoolean.INSTANCE, child.getDeterminism()); + } + return LiteralExpression.newConstant(!(Boolean) PBoolean.INSTANCE.toObject(ptr), + PBoolean.INSTANCE, child.getDeterminism()); } + return new NotExpression(child); + } - public NotExpression(Expression expression) { - super(expression); - } + public NotExpression() { + } - public NotExpression(List l) { - super(l); - } + public NotExpression(Expression expression) { + super(expression); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getChild().evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - - ptr.set(Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)) ? PDataType.FALSE_BYTES : PDataType.TRUE_BYTES); - return true; - } + public NotExpression(List l) { + super(l); + } - @Override - public PDataType getDataType() { - return PBoolean.INSTANCE; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getChild().evaluate(tuple, ptr)) { + return false; } - - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + if (ptr.getLength() == 0) { + return true; } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder("NOT ("); - buf.append(children.get(0).toString()); - buf.append(")"); - return buf.toString(); + + ptr.set(Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)) + ? PDataType.FALSE_BYTES + : PDataType.TRUE_BYTES); + return true; + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder("NOT ("); + buf.append(children.get(0).toString()); + buf.append(")"); + return buf.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/OrExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/OrExpression.java index 5b1b62e7da7..fa17494eafc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/OrExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/OrExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,45 +21,41 @@ import org.apache.phoenix.expression.visitor.ExpressionVisitor; - /** - * * OR expression implementation - * - * * @since 0.1 */ public class OrExpression extends AndOrExpression { - public OrExpression() { - } + public OrExpression() { + } - public OrExpression(List children) { - super(children); - } + public OrExpression(List children) { + super(children); + } - @Override - protected boolean isStopValue(Boolean value) { - return Boolean.TRUE.equals(value); - } + @Override + protected boolean isStopValue(Boolean value) { + return Boolean.TRUE.equals(value); + } - @Override - public String toString() { - StringBuilder buf = new StringBuilder("("); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + " OR "); - } - buf.append(children.get(children.size()-1)); - buf.append(')'); - return buf.toString(); + @Override + public String toString() { + StringBuilder buf = new StringBuilder("("); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + " OR "); } - - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + buf.append(children.get(children.size() - 1)); + buf.append(')'); + return buf.toString(); + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/OrderByExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/OrderByExpression.java index 0ea8ef4eb07..5d718508fc7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/OrderByExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/OrderByExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression; import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull; @@ -26,172 +25,156 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; -import org.apache.phoenix.compile.OrderByCompiler; import org.apache.phoenix.compile.OrderPreservingTracker.Info; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.execute.AggregatePlan; /** * A container for a column that appears in ORDER BY clause. */ public class OrderByExpression implements Writable { - private Expression expression; - private boolean isNullsLast; - private boolean isAscending; - - public OrderByExpression() { - } + private Expression expression; + private boolean isNullsLast; + private boolean isAscending; - private OrderByExpression(Expression expression, boolean isNullsLast, boolean isAcending) { - checkNotNull(expression); - this.expression = expression; - this.isNullsLast = isNullsLast; - this.isAscending = isAcending; - } + public OrderByExpression() { + } - /** - * If {@link Expression#getSortOrder()} is {@link SortOrder#DESC}, reverse the isAscending, but isNullsLast is untouched. - * @param orderByExpression - * @return - */ - public static OrderByExpression convertIfExpressionSortOrderDesc(OrderByExpression orderByExpression) { - if (orderByExpression.getExpression().getSortOrder() != SortOrder.DESC) { - return orderByExpression; - } - return new OrderByExpression( - orderByExpression.getExpression(), - orderByExpression.isNullsLast(), - !orderByExpression.isAscending()); - } + private OrderByExpression(Expression expression, boolean isNullsLast, boolean isAcending) { + checkNotNull(expression); + this.expression = expression; + this.isNullsLast = isNullsLast; + this.isAscending = isAcending; + } - /** - * If {@link Expression#getSortOrder()} is {@link SortOrder#DESC},reverse the isAscending,but isNullsLast is untouched. - * A typical case is in OrderByCompiler#compile to get the compiled OrderByExpression to used for OrderedResultIterator. - * @param expression - * @param isNullsLast - * @param isAscending - * @return - */ - public static OrderByExpression createByCheckIfExpressionSortOrderDesc(Expression expression, boolean isNullsLast, boolean isAscending) { - if(expression.getSortOrder() == SortOrder.DESC) { - isAscending = !isAscending; - } - return new OrderByExpression(expression, isNullsLast, isAscending); + /** + * If {@link Expression#getSortOrder()} is {@link SortOrder#DESC}, reverse the isAscending, but + * isNullsLast is untouched. + */ + public static OrderByExpression + convertIfExpressionSortOrderDesc(OrderByExpression orderByExpression) { + if (orderByExpression.getExpression().getSortOrder() != SortOrder.DESC) { + return orderByExpression; } + return new OrderByExpression(orderByExpression.getExpression(), orderByExpression.isNullsLast(), + !orderByExpression.isAscending()); + } - /** - * If orderByReverse is true, reverse the isNullsLast and isAscending. - * A typical case is in AggregatePlan.OrderingResultIteratorFactory#newIterator - * @param expression - * @param isNullsLast - * @param isAscending - * @param orderByReverse - * @return - */ - public static OrderByExpression createByCheckIfOrderByReverse(Expression expression, boolean isNullsLast, boolean isAscending, boolean orderByReverse) { - if(orderByReverse) { - isNullsLast = !isNullsLast; - isAscending = !isAscending; - } - return new OrderByExpression(expression, isNullsLast, isAscending); + /** + * If {@link Expression#getSortOrder()} is {@link SortOrder#DESC},reverse the isAscending,but + * isNullsLast is untouched. A typical case is in OrderByCompiler#compile to get the compiled + * OrderByExpression to used for OrderedResultIterator. + */ + public static OrderByExpression createByCheckIfExpressionSortOrderDesc(Expression expression, + boolean isNullsLast, boolean isAscending) { + if (expression.getSortOrder() == SortOrder.DESC) { + isAscending = !isAscending; } + return new OrderByExpression(expression, isNullsLast, isAscending); + } - /** - * Create OrderByExpression from expression,isNullsLast is the default value "false",isAscending is based on {@link Expression#getSortOrder()}. - * If orderByReverse is true, reverses the isNullsLast and isAscending. - * @param expression - * @param orderByReverse - * @return - */ - public static OrderByExpression convertExpressionToOrderByExpression(Expression expression, boolean orderByReverse) { - return convertExpressionToOrderByExpression(expression, null, orderByReverse); + /** + * If orderByReverse is true, reverse the isNullsLast and isAscending. A typical case is in + * AggregatePlan.OrderingResultIteratorFactory#newIterator + */ + public static OrderByExpression createByCheckIfOrderByReverse(Expression expression, + boolean isNullsLast, boolean isAscending, boolean orderByReverse) { + if (orderByReverse) { + isNullsLast = !isNullsLast; + isAscending = !isAscending; } + return new OrderByExpression(expression, isNullsLast, isAscending); + } - /** - * Create OrderByExpression from expression, if the orderPreservingTrackInfo is not null, use isNullsLast and isAscending from orderPreservingTrackInfo. - * If orderByReverse is true, reverses the isNullsLast and isAscending. - * @param expression - * @param orderPreservingTrackInfo - * @param orderByReverse - * @return - */ - public static OrderByExpression convertExpressionToOrderByExpression( - Expression expression, - Info orderPreservingTrackInfo, - boolean orderByReverse) { - boolean isNullsLast = false; - boolean isAscending = expression.getSortOrder() == SortOrder.ASC; - if(orderPreservingTrackInfo != null) { - isNullsLast = orderPreservingTrackInfo.isNullsLast(); - isAscending = orderPreservingTrackInfo.isAscending(); - } - return OrderByExpression.createByCheckIfOrderByReverse(expression, isNullsLast, isAscending, orderByReverse); - } + /** + * Create OrderByExpression from expression,isNullsLast is the default value "false",isAscending + * is based on {@link Expression#getSortOrder()}. If orderByReverse is true, reverses the + * isNullsLast and isAscending. + */ + public static OrderByExpression convertExpressionToOrderByExpression(Expression expression, + boolean orderByReverse) { + return convertExpressionToOrderByExpression(expression, null, orderByReverse); + } - public Expression getExpression() { - return expression; - } - - public boolean isNullsLast() { - return isNullsLast; - } - - public boolean isAscending() { - return isAscending; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o != null && this.getClass() == o.getClass()) { - OrderByExpression that = (OrderByExpression)o; - return isNullsLast == that.isNullsLast - && isAscending == that.isAscending - && expression.equals(that.expression); - } - return false; + /** + * Create OrderByExpression from expression, if the orderPreservingTrackInfo is not null, use + * isNullsLast and isAscending from orderPreservingTrackInfo. If orderByReverse is true, reverses + * the isNullsLast and isAscending. + */ + public static OrderByExpression convertExpressionToOrderByExpression(Expression expression, + Info orderPreservingTrackInfo, boolean orderByReverse) { + boolean isNullsLast = false; + boolean isAscending = expression.getSortOrder() == SortOrder.ASC; + if (orderPreservingTrackInfo != null) { + isNullsLast = orderPreservingTrackInfo.isNullsLast(); + isAscending = orderPreservingTrackInfo.isAscending(); } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (isNullsLast ? 0 : 1); - result = prime * result + (isAscending ? 0 : 1); - result = prime * result + expression.hashCode(); - return result; - } - - @Override - public String toString() { - Expression e = this.getExpression(); - boolean isNullsLast = this.isNullsLast; - boolean isAscending = this.isAscending; - // Flip back here based on sort order, as the compiler - // flips this, but we want to display the original back - // to the user. - if (e.getSortOrder() == SortOrder.DESC) { - isAscending = !isAscending; - } - return e + (isAscending ? "" : " DESC") + (isNullsLast ? " NULLS LAST" : ""); + return OrderByExpression.createByCheckIfOrderByReverse(expression, isNullsLast, isAscending, + orderByReverse); + } + + public Expression getExpression() { + return expression; + } + + public boolean isNullsLast() { + return isNullsLast; + } + + public boolean isAscending() { + return isAscending; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; } - - @Override - public void readFields(DataInput input) throws IOException { - this.isNullsLast = input.readBoolean(); - this.isAscending = input.readBoolean(); - expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); - expression.readFields(input); + if (o != null && this.getClass() == o.getClass()) { + OrderByExpression that = (OrderByExpression) o; + return isNullsLast == that.isNullsLast && isAscending == that.isAscending + && expression.equals(that.expression); } + return false; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (isNullsLast ? 0 : 1); + result = prime * result + (isAscending ? 0 : 1); + result = prime * result + expression.hashCode(); + return result; + } - @Override - public void write(DataOutput output) throws IOException { - output.writeBoolean(isNullsLast); - output.writeBoolean(isAscending); - WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); - expression.write(output); + @Override + public String toString() { + Expression e = this.getExpression(); + boolean isNullsLast = this.isNullsLast; + boolean isAscending = this.isAscending; + // Flip back here based on sort order, as the compiler + // flips this, but we want to display the original back + // to the user. + if (e.getSortOrder() == SortOrder.DESC) { + isAscending = !isAscending; } + return e + (isAscending ? "" : " DESC") + (isNullsLast ? " NULLS LAST" : ""); + } + + @Override + public void readFields(DataInput input) throws IOException { + this.isNullsLast = input.readBoolean(); + this.isAscending = input.readBoolean(); + expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); + expression.readFields(input); + } + + @Override + public void write(DataOutput output) throws IOException { + output.writeBoolean(isNullsLast); + output.writeBoolean(isAscending); + WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); + expression.write(output); + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java index 21eb2fe359b..af0f454b263 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,133 +36,136 @@ import org.apache.phoenix.util.SchemaUtil; public class ProjectedColumnExpression extends ColumnExpression implements Cloneable { - private KeyValueSchema schema; - private ValueBitSet bitSet; - private int position; - private String displayName; - private final Collection columns; - private PColumn column; - - public ProjectedColumnExpression() { - this.columns = Collections.emptyList(); - } - - public ProjectedColumnExpression(PColumn column, PTable table, String displayName) { - this(column, table.getColumns(), column.getPosition() - table.getPKColumns().size(), displayName); - } - - public ProjectedColumnExpression(PColumn column, Collection columns, int position, String displayName) { - super(column); - this.column = column; - this.columns = columns; - this.position = position; - this.displayName = displayName; - } - - public static KeyValueSchema buildSchema(Collection columns) { - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); - for (PColumn column : columns) { - if (!SchemaUtil.isPKColumn(column)) { - builder.addField(column); - } - } - return builder.build(); - } - - public KeyValueSchema getSchema() { - if (this.schema == null) { - this.schema = buildSchema(columns); - this.bitSet = ValueBitSet.newInstance(schema); - } - return schema; - } - - public int getPosition() { - return position; - } + private KeyValueSchema schema; + private ValueBitSet bitSet; + private int position; + private String displayName; + private final Collection columns; + private PColumn column; - public Collection getColumns() { - return columns; - } - - @Override - public String toString() { - return displayName; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + position; - return result; - } + public ProjectedColumnExpression() { + this.columns = Collections.emptyList(); + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!super.equals(obj)) return false; - if (getClass() != obj.getClass()) return false; - ProjectedColumnExpression other = (ProjectedColumnExpression)obj; - if (position != other.position) return false; - return true; - } + public ProjectedColumnExpression(PColumn column, PTable table, String displayName) { + this(column, table.getColumns(), column.getPosition() - table.getPKColumns().size(), + displayName); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - try { - KeyValueSchema schema = getSchema(); - TupleProjector.decodeProjectedValue(tuple, ptr); - bitSet.clear(); - bitSet.or(ptr); - int maxOffset = ptr.getOffset() + ptr.getLength() - bitSet.getEstimatedLength(); - schema.iterator(ptr, position, bitSet); - Boolean hasValue = schema.next(ptr, position, maxOffset, bitSet); - if (hasValue == null || !hasValue.booleanValue()) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - } catch (IOException e) { - return false; - } - - return true; - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - schema = new KeyValueSchema(); - schema.readFields(input); - bitSet = ValueBitSet.newInstance(schema); - position = input.readInt(); - displayName = input.readUTF(); - } + public ProjectedColumnExpression(PColumn column, Collection columns, int position, + String displayName) { + super(column); + this.column = column; + this.columns = columns; + this.position = position; + this.displayName = displayName; + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - getSchema().write(output); - output.writeInt(position); - output.writeUTF(displayName); + public static KeyValueSchema buildSchema(Collection columns) { + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); + for (PColumn column : columns) { + if (!SchemaUtil.isPKColumn(column)) { + builder.addField(column); + } } + return builder.build(); + } - @Override - public final T accept(ExpressionVisitor visitor) { - return visitor.visit(this); + public KeyValueSchema getSchema() { + if (this.schema == null) { + this.schema = buildSchema(columns); + this.bitSet = ValueBitSet.newInstance(schema); } + return schema; + } - public PColumn getColumn() { - return column; - } + public int getPosition() { + return position; + } - @Override - public ProjectedColumnExpression clone() { - return new ProjectedColumnExpression(this.column, this.columns, this.position, this.displayName); - } + public Collection getColumns() { + return columns; + } + + @Override + public String toString() { + return displayName; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + position; + return result; + } - @Override - public boolean isCloneExpression() { + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + ProjectedColumnExpression other = (ProjectedColumnExpression) obj; + if (position != other.position) return false; + return true; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + try { + KeyValueSchema schema = getSchema(); + TupleProjector.decodeProjectedValue(tuple, ptr); + bitSet.clear(); + bitSet.or(ptr); + int maxOffset = ptr.getOffset() + ptr.getLength() - bitSet.getEstimatedLength(); + schema.iterator(ptr, position, bitSet); + Boolean hasValue = schema.next(ptr, position, maxOffset, bitSet); + if (hasValue == null || !hasValue.booleanValue()) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); return true; + } + } catch (IOException e) { + return false; } + + return true; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + schema = new KeyValueSchema(); + schema.readFields(input); + bitSet = ValueBitSet.newInstance(schema); + position = input.readInt(); + displayName = input.readUTF(); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + getSchema().write(output); + output.writeInt(position); + output.writeUTF(displayName); + } + + @Override + public final T accept(ExpressionVisitor visitor) { + return visitor.visit(this); + } + + public PColumn getColumn() { + return column; + } + + @Override + public ProjectedColumnExpression clone() { + return new ProjectedColumnExpression(this.column, this.columns, this.position, + this.displayName); + } + + @Override + public boolean isCloneExpression() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowKeyColumnExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowKeyColumnExpression.java index 60ad23d953d..2aaa837874c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowKeyColumnExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowKeyColumnExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,176 +31,176 @@ import org.apache.phoenix.util.ByteUtil; /** - * * Class to access a value stored in the row key - * - * * @since 0.1 */ -public class RowKeyColumnExpression extends ColumnExpression { - private PDataType fromType; - private RowKeyValueAccessor accessor; - protected final String name; - private int offset; - - public RowKeyColumnExpression() { - name = null; // Only on client - } - - private RowKeyColumnExpression(PDatum datum, RowKeyValueAccessor accessor, PDataType fromType, String name) { - super(datum); - this.accessor = accessor; - this.fromType = fromType; - this.name = name; - } - - public RowKeyColumnExpression(PDatum datum, RowKeyValueAccessor accessor) { - this(datum, accessor, datum.getDataType(), datum.toString()); - } - - public RowKeyColumnExpression(PDatum datum, RowKeyValueAccessor accessor, String name) { - this(datum, accessor, datum.getDataType(), name); - } - - public RowKeyColumnExpression(PDatum datum, RowKeyValueAccessor accessor, PDataType fromType) { - this(datum, accessor, fromType, datum.toString()); - } - - /** - * Used to set an offset to be skipped from the start of a the row key. Used by - * local indexing to skip the region start key bytes. - * @param offset the number of bytes to offset accesses to row key columns - */ - public void setOffset(int offset) { - this.offset = offset; - } - - public int getPosition() { - return accessor.getIndex(); - } - - public String getName() { - return name; - } +public class RowKeyColumnExpression extends ColumnExpression { + private PDataType fromType; + private RowKeyValueAccessor accessor; + protected final String name; + private int offset; - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((accessor == null) ? 0 : accessor.hashCode()); - return result; - } + public RowKeyColumnExpression() { + name = null; // Only on client + } - @Override - public String toString() { - return name == null ? "PK[" + accessor.getIndex() + "]" : name; - } + private RowKeyColumnExpression(PDatum datum, RowKeyValueAccessor accessor, PDataType fromType, + String name) { + super(datum); + this.accessor = accessor; + this.fromType = fromType; + this.name = name; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!super.equals(obj)) return false; - if (getClass() != obj.getClass()) return false; - RowKeyColumnExpression other = (RowKeyColumnExpression)obj; - return accessor.equals(other.accessor); - } + public RowKeyColumnExpression(PDatum datum, RowKeyValueAccessor accessor) { + this(datum, accessor, datum.getDataType(), datum.toString()); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - tuple.getKey(ptr); - int offset = - accessor.getOffset(ptr.get(), ptr.getOffset() + this.offset); - // Null is represented in the last expression of a multi-part key - // by the bytes not being present. - int maxOffset = ptr.getOffset() + ptr.getLength(); - if (offset < maxOffset) { - byte[] buffer = ptr.get(); - int byteSize = -1; - // FIXME: fixedByteSize <= maxByteSize ? fixedByteSize : 0 required because HBase passes bogus keys to filter to position scan (HBASE-6562) - if (fromType.isFixedWidth()) { - Integer maxLength = getMaxLength(); - byteSize = fromType.getByteSize() == null ? maxLength : fromType.getByteSize(); - byteSize = byteSize <= maxOffset ? byteSize : 0; - } - int length = byteSize >= 0 ? byteSize - : accessor.getLength(buffer, offset, maxOffset, type, getSortOrder()); - // In the middle of the key, an empty variable length byte array represents null - if (length > 0) { - ptr.set(buffer,offset,length); - type.coerceBytes(ptr, fromType, getSortOrder(), getSortOrder()); - } else { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } - } else { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } - // Always return true because we're always able to evaluate a row key column - return true; - } + public RowKeyColumnExpression(PDatum datum, RowKeyValueAccessor accessor, String name) { + this(datum, accessor, datum.getDataType(), name); + } - public int evaluateAndGetNextOffset(Tuple tuple, ImmutableBytesWritable ptr, int offset) { - tuple.getKey(ptr); - int maxOffset = ptr.getOffset() + ptr.getLength(); - if (offset < maxOffset) { - byte[] buffer = ptr.get(); - int byteSize = -1; - // FIXME: fixedByteSize <= maxByteSize ? fixedByteSize : 0 required because HBase passes bogus keys to filter to position scan (HBASE-6562) - if (fromType.isFixedWidth()) { - Integer maxLength = getMaxLength(); - byteSize = fromType.getByteSize() == null ? maxLength : fromType.getByteSize(); - byteSize = byteSize <= maxOffset ? byteSize : 0; - } - int length = byteSize >= 0 ? byteSize - : accessor.getLength(buffer, offset, maxOffset, type, getSortOrder()); - // In the middle of the key, an empty variable length byte array represents null - if (length > 0) { - ptr.set(buffer,offset,length); - type.coerceBytes(ptr, fromType, getSortOrder(), getSortOrder()); - if (fromType.isFixedWidth()) { - return offset + length; - } else if (fromType == PVarbinaryEncoded.INSTANCE) { - return offset + length + 2; - } else { - return offset + length + 1; - } - } else { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return offset + 1; - } + public RowKeyColumnExpression(PDatum datum, RowKeyValueAccessor accessor, PDataType fromType) { + this(datum, accessor, fromType, datum.toString()); + } + + /** + * Used to set an offset to be skipped from the start of a the row key. Used by local indexing to + * skip the region start key bytes. + * @param offset the number of bytes to offset accesses to row key columns + */ + public void setOffset(int offset) { + this.offset = offset; + } + + public int getPosition() { + return accessor.getIndex(); + } + + public String getName() { + return name; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((accessor == null) ? 0 : accessor.hashCode()); + return result; + } + + @Override + public String toString() { + return name == null ? "PK[" + accessor.getIndex() + "]" : name; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + RowKeyColumnExpression other = (RowKeyColumnExpression) obj; + return accessor.equals(other.accessor); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + tuple.getKey(ptr); + int offset = accessor.getOffset(ptr.get(), ptr.getOffset() + this.offset); + // Null is represented in the last expression of a multi-part key + // by the bytes not being present. + int maxOffset = ptr.getOffset() + ptr.getLength(); + if (offset < maxOffset) { + byte[] buffer = ptr.get(); + int byteSize = -1; + // FIXME: fixedByteSize <= maxByteSize ? fixedByteSize : 0 required because HBase passes bogus + // keys to filter to position scan (HBASE-6562) + if (fromType.isFixedWidth()) { + Integer maxLength = getMaxLength(); + byteSize = fromType.getByteSize() == null ? maxLength : fromType.getByteSize(); + byteSize = byteSize <= maxOffset ? byteSize : 0; + } + int length = byteSize >= 0 + ? byteSize + : accessor.getLength(buffer, offset, maxOffset, type, getSortOrder()); + // In the middle of the key, an empty variable length byte array represents null + if (length > 0) { + ptr.set(buffer, offset, length); + type.coerceBytes(ptr, fromType, getSortOrder(), getSortOrder()); + } else { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } + } else { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } + // Always return true because we're always able to evaluate a row key column + return true; + } + + public int evaluateAndGetNextOffset(Tuple tuple, ImmutableBytesWritable ptr, int offset) { + tuple.getKey(ptr); + int maxOffset = ptr.getOffset() + ptr.getLength(); + if (offset < maxOffset) { + byte[] buffer = ptr.get(); + int byteSize = -1; + // FIXME: fixedByteSize <= maxByteSize ? fixedByteSize : 0 required because HBase passes bogus + // keys to filter to position scan (HBASE-6562) + if (fromType.isFixedWidth()) { + Integer maxLength = getMaxLength(); + byteSize = fromType.getByteSize() == null ? maxLength : fromType.getByteSize(); + byteSize = byteSize <= maxOffset ? byteSize : 0; + } + int length = byteSize >= 0 + ? byteSize + : accessor.getLength(buffer, offset, maxOffset, type, getSortOrder()); + // In the middle of the key, an empty variable length byte array represents null + if (length > 0) { + ptr.set(buffer, offset, length); + type.coerceBytes(ptr, fromType, getSortOrder(), getSortOrder()); + if (fromType.isFixedWidth()) { + return offset + length; + } else if (fromType == PVarbinaryEncoded.INSTANCE) { + return offset + length + 2; } else { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return maxOffset; + return offset + length + 1; } - } + } else { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return offset + 1; + } + } else { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return maxOffset; + } + } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - accessor = new RowKeyValueAccessor(); - accessor.readFields(input); - fromType = type; // fromType only needed on client side - } + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + accessor = new RowKeyValueAccessor(); + accessor.readFields(input); + fromType = type; // fromType only needed on client side + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - accessor.write(output); - } - - @Override - public final T accept(ExpressionVisitor visitor) { - return visitor.visit(this); - } - - /** - * Since we may never have encountered a key value column of interest, but the - * expression may evaluate to true just based on the row key columns, we need - * to do a final evaluation. An example of when this would be required is: - * SELECT a FROM t WHERE a = 5 OR b = 2 - * in the case where a is a PK column, b is a KV column and no b KV is found. - */ - @Override - public boolean requiresFinalEvaluation() { - return true; - } + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + accessor.write(output); + } + + @Override + public final T accept(ExpressionVisitor visitor) { + return visitor.visit(this); + } + + /** + * Since we may never have encountered a key value column of interest, but the expression may + * evaluate to true just based on the row key columns, we need to do a final evaluation. An + * example of when this would be required is: SELECT a FROM t WHERE a = 5 OR b = 2 in the case + * where a is a PK column, b is a KV column and no b KV is found. + */ + @Override + public boolean requiresFinalEvaluation() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowKeyExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowKeyExpression.java index 97001e87fcd..bb11d78b422 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowKeyExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowKeyExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -25,26 +24,26 @@ import org.apache.phoenix.schema.types.PVarbinary; public class RowKeyExpression extends BaseTerminalExpression { - public static final RowKeyExpression INSTANCE = new RowKeyExpression(); - - private RowKeyExpression() { - } + public static final RowKeyExpression INSTANCE = new RowKeyExpression(); + + private RowKeyExpression() { + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - tuple.getKey(ptr); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + tuple.getKey(ptr); + return true; + } - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; - } + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; + } - @Override - public T accept(ExpressionVisitor visitor) { - // TODO Auto-generated method stub - return null; - } + @Override + public T accept(ExpressionVisitor visitor) { + // TODO Auto-generated method stub + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java index eaab87b4529..1cedeffa69f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -/** - * Implementation for row value constructor (a,b,c) expression. - * - * - * @since 0.1 - */ package org.apache.phoenix.expression; import java.io.DataInput; @@ -44,264 +37,266 @@ import org.apache.phoenix.util.TrustedByteArrayOutputStream; public class RowValueConstructorExpression extends BaseCompoundExpression { - - private ImmutableBytesWritable ptrs[]; - private ImmutableBytesWritable literalExprPtr; - private int partialEvalIndex = -1; - private int estimatedByteSize; - // The boolean field that indicated the object is a literal constant, - // has been repurposed to a bitset and now holds additional information. - // This is to facilitate b/w compat to 4.13 clients. - // @see PHOENIX-5122 - private BitSet extraFields; - - // Important : When you want to add new bits make sure to add those towards the end, - // else will break b/w compat again. - private enum ExtraFieldPosition { - - LITERAL_CONSTANT(0), - STRIP_TRAILING_SEPARATOR_BYTE(1); - - private int bitPosition; + private ImmutableBytesWritable ptrs[]; + private ImmutableBytesWritable literalExprPtr; + private int partialEvalIndex = -1; + private int estimatedByteSize; - private ExtraFieldPosition(int position) { - bitPosition = position; - } - - private int getBitPosition() { - return bitPosition; - } - } + // The boolean field that indicated the object is a literal constant, + // has been repurposed to a bitset and now holds additional information. + // This is to facilitate b/w compat to 4.13 clients. + // @see PHOENIX-5122 + private BitSet extraFields; - public RowValueConstructorExpression() { - } - - public RowValueConstructorExpression(List children, boolean isConstant) { - super(children); - extraFields = new BitSet(8); - extraFields.set(ExtraFieldPosition.STRIP_TRAILING_SEPARATOR_BYTE.getBitPosition()); - if (isConstant) { - extraFields.set(ExtraFieldPosition.LITERAL_CONSTANT.getBitPosition()); - } - estimatedByteSize = 0; - init(); + // Important : When you want to add new bits make sure to add those towards the end, + // else will break b/w compat again. + private enum ExtraFieldPosition { + + LITERAL_CONSTANT(0), + STRIP_TRAILING_SEPARATOR_BYTE(1); + + private int bitPosition; + + private ExtraFieldPosition(int position) { + bitPosition = position; } - public RowValueConstructorExpression clone(List children) { - return new RowValueConstructorExpression(children, literalExprPtr != null); + private int getBitPosition() { + return bitPosition; } - - public int getEstimatedSize() { - return estimatedByteSize; + } + + public RowValueConstructorExpression() { + } + + public RowValueConstructorExpression(List children, boolean isConstant) { + super(children); + extraFields = new BitSet(8); + extraFields.set(ExtraFieldPosition.STRIP_TRAILING_SEPARATOR_BYTE.getBitPosition()); + if (isConstant) { + extraFields.set(ExtraFieldPosition.LITERAL_CONSTANT.getBitPosition()); } - - @Override - public boolean isStateless() { - return literalExprPtr != null; + estimatedByteSize = 0; + init(); + } + + public RowValueConstructorExpression clone(List children) { + return new RowValueConstructorExpression(children, literalExprPtr != null); + } + + public int getEstimatedSize() { + return estimatedByteSize; + } + + @Override + public boolean isStateless() { + return literalExprPtr != null; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } - - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + return t; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + extraFields = BitSet.valueOf(new byte[] { input.readByte() }); + init(); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + byte[] b = extraFields.toByteArray(); + output.writeByte((b.length > 0 ? b[0] & 0xff : 0)); + } + + private void init() { + this.ptrs = new ImmutableBytesWritable[children.size()]; + if (isConstant()) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + this.evaluate(null, ptr); + literalExprPtr = ptr; } + } + + private boolean isConstant() { + return extraFields.get(ExtraFieldPosition.LITERAL_CONSTANT.getBitPosition()); + } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - extraFields = BitSet.valueOf(new byte[] {input.readByte()}); - init(); + private boolean isStripTrailingSepByte() { + return extraFields.get(ExtraFieldPosition.STRIP_TRAILING_SEPARATOR_BYTE.getBitPosition()); + } + + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; + } + + @Override + public void reset() { + partialEvalIndex = 0; + estimatedByteSize = 0; + Arrays.fill(ptrs, null); + super.reset(); + } + + private static int getExpressionByteCount(Expression e) { + PDataType childType = e.getDataType(); + if (childType != null && !childType.isFixedWidth()) { + return childType != PVarbinaryEncoded.INSTANCE ? 1 : 2; + } else { + // Write at least one null byte in the case of the child being null with a childType of null + return childType == null ? 1 : SchemaUtil.getFixedByteSize(e); } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - byte[] b = extraFields.toByteArray(); - output.writeByte((b.length > 0 ? b[0] & 0xff : 0)); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (literalExprPtr != null) { + // if determined during construction that the row value constructor is just comprised of + // literal expressions, + // let's just return the ptr we have already computed and be done with evaluation. + ptr.set(literalExprPtr.get(), literalExprPtr.getOffset(), literalExprPtr.getLength()); + return true; } - - private void init() { - this.ptrs = new ImmutableBytesWritable[children.size()]; - if (isConstant()) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - this.evaluate(null, ptr); - literalExprPtr = ptr; + try { + boolean isPartialEval = this.partialEvalIndex >= 0; + int evalIndex = isPartialEval ? this.partialEvalIndex : 0; + int expressionCount = evalIndex; + for (; evalIndex < ptrs.length; evalIndex++) { + final Expression expression = children.get(evalIndex); + // TODO: handle overflow and underflow + if (expression.evaluate(tuple, ptr)) { + if (ptr.getLength() == 0) { + estimatedByteSize += getExpressionByteCount(expression); + } else { + expressionCount = evalIndex + 1; + ptrs[evalIndex] = new ImmutableBytesWritable(); + ptrs[evalIndex].set(ptr.get(), ptr.getOffset(), ptr.getLength()); + estimatedByteSize += ptr.getLength() + + (expression.getDataType().isFixedWidth() ? 0 : getSeparatorBytesLength(expression)); // 1 + // extra + // for + // the + // separator + // byte. + } + } else if (tuple == null || tuple.isImmutable()) { + estimatedByteSize += getExpressionByteCount(expression); + } else { // Cannot yet be evaluated + return false; } - } - - private boolean isConstant() { - return extraFields.get(ExtraFieldPosition.LITERAL_CONSTANT.getBitPosition()); - } - - private boolean isStripTrailingSepByte() { - return extraFields.get(ExtraFieldPosition.STRIP_TRAILING_SEPARATOR_BYTE.getBitPosition()); - } - - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; - } - - @Override - public void reset() { - partialEvalIndex = 0; - estimatedByteSize = 0; - Arrays.fill(ptrs, null); - super.reset(); - } - - private static int getExpressionByteCount(Expression e) { - PDataType childType = e.getDataType(); - if (childType != null && !childType.isFixedWidth()) { - return childType != PVarbinaryEncoded.INSTANCE ? 1 : 2; - } else { - // Write at least one null byte in the case of the child being null with a childType of null - return childType == null ? 1 : SchemaUtil.getFixedByteSize(e); + } + if (isPartialEval) { + this.partialEvalIndex = evalIndex; // Move counter forward + } + + if (evalIndex == ptrs.length) { + if (expressionCount == 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if(literalExprPtr != null) { - // if determined during construction that the row value constructor is just comprised of literal expressions, - // let's just return the ptr we have already computed and be done with evaluation. - ptr.set(literalExprPtr.get(), literalExprPtr.getOffset(), literalExprPtr.getLength()); - return true; + if (expressionCount == 1) { + ptr.set(ptrs[0].get(), ptrs[0].getOffset(), ptrs[0].getLength()); + return true; } + TrustedByteArrayOutputStream output = new TrustedByteArrayOutputStream(estimatedByteSize); try { - boolean isPartialEval = this.partialEvalIndex >= 0; - int evalIndex = isPartialEval ? this.partialEvalIndex : 0; - int expressionCount = evalIndex; - for(; evalIndex < ptrs.length; evalIndex++) { - final Expression expression = children.get(evalIndex); - // TODO: handle overflow and underflow - if (expression.evaluate(tuple, ptr)) { - if (ptr.getLength() == 0) { - estimatedByteSize += getExpressionByteCount(expression); - } else { - expressionCount = evalIndex+1; - ptrs[evalIndex] = new ImmutableBytesWritable(); - ptrs[evalIndex].set(ptr.get(), ptr.getOffset(), ptr.getLength()); - estimatedByteSize += - ptr.getLength() + (expression.getDataType().isFixedWidth() ? - 0 : - getSeparatorBytesLength( - expression)); // 1 extra for the separator byte. - } - } else if (tuple == null || tuple.isImmutable()) { - estimatedByteSize += getExpressionByteCount(expression); - } else { // Cannot yet be evaluated - return false; + boolean previousCarryOver = false; + for (int i = 0; i < expressionCount; i++) { + Expression child = getChildren().get(i); + PDataType childType = child.getDataType(); + ImmutableBytesWritable tempPtr = ptrs[i]; + if (tempPtr == null) { + // Since we have a null and have no representation for null, + // we must decrement the value of the current. Otherwise, + // we'd have an ambiguity if this value happened to be the + // min possible value. + previousCarryOver = childType == null || childType.isFixedWidth(); + if (childType == PVarbinaryEncoded.INSTANCE) { + output.write(QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES); + } else { + int bytesToWrite = getExpressionByteCount(child); + for (int m = 0; m < bytesToWrite; m++) { + output.write(QueryConstants.SEPARATOR_BYTE); } + } + } else { + output.write(tempPtr.get(), tempPtr.getOffset(), tempPtr.getLength()); + if (!childType.isFixedWidth()) { + output.write( + SchemaUtil.getSeparatorBytes(childType, true, false, child.getSortOrder())); + } + if (previousCarryOver) { + previousCarryOver = !ByteUtil.previousKey(output.getBuffer(), output.size()); + } } - if (isPartialEval) { - this.partialEvalIndex = evalIndex; // Move counter forward + } + int outputSize = output.size(); + byte[] outputBytes = output.getBuffer(); + // Don't remove trailing separator byte unless it's the one for ASC + // as otherwise we need it to ensure sort order is correct. + // Additionally for b/w compat with clients older than 4.14.1 - + // If SortOorder.ASC then always strip trailing separator byte (as before) + // else only strip for >= 4.14 client (when STRIP_TRAILING_SEPARATOR_BYTE bit is set) + for (int k = expressionCount - 1; k >= 0 && getChildren().get(k).getDataType() != null + && !getChildren().get(k).getDataType().isFixedWidth() + && hasSeparatorBytes(outputBytes, outputSize, k) + && (getChildren().get(k).getSortOrder() == SortOrder.ASC + || isStripTrailingSepByte()); k--) { + outputSize--; + if (getChildren().get(k).getDataType() == PVarbinaryEncoded.INSTANCE) { + outputSize--; } - - if (evalIndex == ptrs.length) { - if (expressionCount == 0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - if (expressionCount == 1) { - ptr.set(ptrs[0].get(), ptrs[0].getOffset(), ptrs[0].getLength()); - return true; - } - TrustedByteArrayOutputStream output = new TrustedByteArrayOutputStream(estimatedByteSize); - try { - boolean previousCarryOver = false; - for (int i = 0; i< expressionCount; i++) { - Expression child = getChildren().get(i); - PDataType childType = child.getDataType(); - ImmutableBytesWritable tempPtr = ptrs[i]; - if (tempPtr == null) { - // Since we have a null and have no representation for null, - // we must decrement the value of the current. Otherwise, - // we'd have an ambiguity if this value happened to be the - // min possible value. - previousCarryOver = childType == null || childType.isFixedWidth(); - if (childType == PVarbinaryEncoded.INSTANCE) { - output.write(QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES); - } else { - int bytesToWrite = getExpressionByteCount(child); - for (int m = 0; m < bytesToWrite; m++) { - output.write(QueryConstants.SEPARATOR_BYTE); - } - } - } else { - output.write(tempPtr.get(), tempPtr.getOffset(), tempPtr.getLength()); - if (!childType.isFixedWidth()) { - output.write(SchemaUtil.getSeparatorBytes(childType, true, false, - child.getSortOrder())); - } - if (previousCarryOver) { - previousCarryOver = !ByteUtil.previousKey(output.getBuffer(), output.size()); - } - } - } - int outputSize = output.size(); - byte[] outputBytes = output.getBuffer(); - // Don't remove trailing separator byte unless it's the one for ASC - // as otherwise we need it to ensure sort order is correct. - // Additionally for b/w compat with clients older than 4.14.1 - - // If SortOorder.ASC then always strip trailing separator byte (as before) - // else only strip for >= 4.14 client (when STRIP_TRAILING_SEPARATOR_BYTE bit is set) - for (int k = expressionCount - 1; k >= 0 - && getChildren().get(k).getDataType() != null - && !getChildren().get(k).getDataType().isFixedWidth() - && hasSeparatorBytes(outputBytes, outputSize, k) - && (getChildren().get(k).getSortOrder() == SortOrder.ASC - || isStripTrailingSepByte()); k--) { - outputSize--; - if (getChildren().get(k).getDataType() == PVarbinaryEncoded.INSTANCE) { - outputSize--; - } - } - ptr.set(outputBytes, 0, outputSize); - return true; - } finally { - output.close(); - } - } - return false; - } catch (IOException e) { - throw new RuntimeException(e); //Impossible. + } + ptr.set(outputBytes, 0, outputSize); + return true; + } finally { + output.close(); } + } + return false; + } catch (IOException e) { + throw new RuntimeException(e); // Impossible. } + } - private boolean hasSeparatorBytes(byte[] outputBytes, int outputSize, int k) { - if (getChildren().get(k).getDataType() != PVarbinaryEncoded.INSTANCE) { - return outputBytes[outputSize - 1] == SchemaUtil.getSeparatorByte(true, false, - getChildren().get(k)); - } else { - byte[] sepBytes = SchemaUtil.getSeparatorBytesForVarBinaryEncoded(true, false, - getChildren().get(k).getSortOrder()); - return outputSize >= 2 && outputBytes[outputSize - 1] == sepBytes[1] - && outputBytes[outputSize - 2] == sepBytes[0]; - } + private boolean hasSeparatorBytes(byte[] outputBytes, int outputSize, int k) { + if (getChildren().get(k).getDataType() != PVarbinaryEncoded.INSTANCE) { + return outputBytes[outputSize - 1] + == SchemaUtil.getSeparatorByte(true, false, getChildren().get(k)); + } else { + byte[] sepBytes = SchemaUtil.getSeparatorBytesForVarBinaryEncoded(true, false, + getChildren().get(k).getSortOrder()); + return outputSize >= 2 && outputBytes[outputSize - 1] == sepBytes[1] + && outputBytes[outputSize - 2] == sepBytes[0]; } + } - private static int getSeparatorBytesLength(Expression expression) { - return expression.getDataType() != PVarbinaryEncoded.INSTANCE ? 1 : 2; - } + private static int getSeparatorBytesLength(Expression expression) { + return expression.getDataType() != PVarbinaryEncoded.INSTANCE ? 1 : 2; + } - @Override - public final String toString() { - StringBuilder buf = new StringBuilder("("); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + ", "); - } - buf.append(children.get(children.size()-1) + ")"); - return buf.toString(); - } - - @Override - public boolean requiresFinalEvaluation() { - return true; + @Override + public final String toString() { + StringBuilder buf = new StringBuilder("("); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + ", "); } + buf.append(children.get(children.size() - 1) + ")"); + return buf.toString(); + } + + @Override + public boolean requiresFinalEvaluation() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/SingleCellColumnExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/SingleCellColumnExpression.java index 115e4e6d6de..474a14987b4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/SingleCellColumnExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/SingleCellColumnExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,178 +37,177 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.util.SchemaUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.util.SchemaUtil; /** - * - * Class to access a column that is stored in a Cell that contains all - * columns for a given column family (stored in a serialized array). - * + * Class to access a column that is stored in a Cell that contains all columns for a given column + * family (stored in a serialized array). */ public class SingleCellColumnExpression extends KeyValueColumnExpression { - - private int decodedColumnQualifier; - private String arrayColDisplayName; - private KeyValueColumnExpression keyValueColumnExpression; - private QualifierEncodingScheme encodingScheme; - private ImmutableStorageScheme immutableStorageScheme; - - public SingleCellColumnExpression() { - } - - public SingleCellColumnExpression(ImmutableStorageScheme immutableStorageScheme) { - this.immutableStorageScheme = immutableStorageScheme; - } - - public SingleCellColumnExpression(PDatum column, byte[] cf, byte[] cq, - QualifierEncodingScheme encodingScheme, ImmutableStorageScheme immutableStorageScheme) { - super(column, cf, SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); - this.immutableStorageScheme = immutableStorageScheme; - Preconditions.checkNotNull(encodingScheme); - Preconditions.checkArgument(encodingScheme != NON_ENCODED_QUALIFIERS); - this.decodedColumnQualifier = encodingScheme.decode(cq); - this.encodingScheme = encodingScheme; - setKeyValueExpression(); - } - - public SingleCellColumnExpression(PColumn column, String displayName, QualifierEncodingScheme encodingScheme, ImmutableStorageScheme immutableStorageScheme) { - super(column, column.getFamilyName().getBytes(), SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); - this.immutableStorageScheme = immutableStorageScheme; - Preconditions.checkNotNull(encodingScheme); - Preconditions.checkArgument(encodingScheme != NON_ENCODED_QUALIFIERS); - this.arrayColDisplayName = displayName; - this.decodedColumnQualifier = encodingScheme.decode(column.getColumnQualifierBytes()); - this.encodingScheme = encodingScheme; - setKeyValueExpression(); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!super.evaluate(tuple, ptr)) { - return false; - } - return evaluate(ptr); - } - - @Override - public boolean evaluateUnsafe(Tuple tuple, ImmutableBytesWritable ptr) { - if (!super.evaluateUnsafe(tuple, ptr)) { - return false; - } - return evaluate(ptr); - } - public boolean evaluate(ImmutableBytesWritable ptr) { - if (ptr.getLength() == 0) { - return true; - } - // the first position is reserved and we offset maxEncodedColumnQualifier by - // ENCODED_CQ_COUNTER_INITIAL_VALUE (which is the minimum encoded column qualifier) - int index = decodedColumnQualifier - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE + 1; - // Given a ptr to the entire array, set ptr to point to a particular element - // within that array - ColumnValueDecoder encoderDecoder = immutableStorageScheme.getDecoder(); - return encoderDecoder.decode(ptr, index); - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - this.decodedColumnQualifier = WritableUtils.readVInt(input); - int serializedEncodingScheme = WritableUtils.readVInt(input); - // prior to PHOENIX-4432 we weren't writing out the immutableStorageScheme in write(), - // so we use the decodedColumnQualifier sign to determine whether it's there - if (Integer.signum(serializedEncodingScheme) == -1) { - this.immutableStorageScheme = - ImmutableStorageScheme - .fromSerializedValue((byte) WritableUtils.readVInt(input)); - serializedEncodingScheme = -serializedEncodingScheme; - } else { - this.immutableStorageScheme = ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS; - } - this.encodingScheme = QualifierEncodingScheme.values()[serializedEncodingScheme]; - setKeyValueExpression(); - } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeVInt(output, decodedColumnQualifier); - WritableUtils.writeVInt(output, -encodingScheme.ordinal()); //negative since PHOENIX-4432 - WritableUtils.writeVInt(output, immutableStorageScheme.getSerializedMetadataValue()); - } - - public KeyValueColumnExpression getKeyValueExpression() { - return keyValueColumnExpression; - } - - private void setKeyValueExpression() { - final boolean isNullable = isNullable(); - final SortOrder sortOrder = getSortOrder(); - final Integer scale = getScale(); - final Integer maxLength = getMaxLength(); - final PDataType datatype = getDataType(); - this.keyValueColumnExpression = new KeyValueColumnExpression(new PDatum() { - @Override - public boolean isNullable() { - return isNullable; - } - - @Override - public SortOrder getSortOrder() { - return sortOrder; - } - - @Override - public Integer getScale() { - return scale; - } - - @Override - public Integer getMaxLength() { - return maxLength; - } - - @Override - public PDataType getDataType() { - return datatype; - } - }, getColumnFamily(), getPositionInArray()); - } - - @Override - public String toString() { - if (arrayColDisplayName == null) { - arrayColDisplayName = SchemaUtil.getColumnDisplayName(getColumnFamily(), getColumnQualifier()); - } - return arrayColDisplayName; - } - - public byte[] getPositionInArray() { - return encodingScheme.encode(decodedColumnQualifier); - } - - @Override - public T accept(ExpressionVisitor visitor) { - //FIXME: this is ugly but can't think of a good solution. - if (visitor instanceof ViewWhereExpressionVisitor) { - return visitor.visit(this); - } else { - return super.accept(visitor); - } - } - - @Override - public boolean equals(Object obj) { - if (obj.getClass() != SingleCellColumnExpression.class) return false; - return keyValueColumnExpression.equals(((SingleCellColumnExpression)obj).getKeyValueExpression()); - } - - @Override - public int hashCode() { - return keyValueColumnExpression.hashCode(); - } + private int decodedColumnQualifier; + private String arrayColDisplayName; + private KeyValueColumnExpression keyValueColumnExpression; + private QualifierEncodingScheme encodingScheme; + private ImmutableStorageScheme immutableStorageScheme; + + public SingleCellColumnExpression() { + } + + public SingleCellColumnExpression(ImmutableStorageScheme immutableStorageScheme) { + this.immutableStorageScheme = immutableStorageScheme; + } + + public SingleCellColumnExpression(PDatum column, byte[] cf, byte[] cq, + QualifierEncodingScheme encodingScheme, ImmutableStorageScheme immutableStorageScheme) { + super(column, cf, SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); + this.immutableStorageScheme = immutableStorageScheme; + Preconditions.checkNotNull(encodingScheme); + Preconditions.checkArgument(encodingScheme != NON_ENCODED_QUALIFIERS); + this.decodedColumnQualifier = encodingScheme.decode(cq); + this.encodingScheme = encodingScheme; + setKeyValueExpression(); + } + + public SingleCellColumnExpression(PColumn column, String displayName, + QualifierEncodingScheme encodingScheme, ImmutableStorageScheme immutableStorageScheme) { + super(column, column.getFamilyName().getBytes(), SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); + this.immutableStorageScheme = immutableStorageScheme; + Preconditions.checkNotNull(encodingScheme); + Preconditions.checkArgument(encodingScheme != NON_ENCODED_QUALIFIERS); + this.arrayColDisplayName = displayName; + this.decodedColumnQualifier = encodingScheme.decode(column.getColumnQualifierBytes()); + this.encodingScheme = encodingScheme; + setKeyValueExpression(); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!super.evaluate(tuple, ptr)) { + return false; + } + return evaluate(ptr); + } + + @Override + public boolean evaluateUnsafe(Tuple tuple, ImmutableBytesWritable ptr) { + if (!super.evaluateUnsafe(tuple, ptr)) { + return false; + } + return evaluate(ptr); + } + + public boolean evaluate(ImmutableBytesWritable ptr) { + if (ptr.getLength() == 0) { + return true; + } + // the first position is reserved and we offset maxEncodedColumnQualifier by + // ENCODED_CQ_COUNTER_INITIAL_VALUE (which is the minimum encoded column qualifier) + int index = decodedColumnQualifier - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE + 1; + // Given a ptr to the entire array, set ptr to point to a particular element + // within that array + ColumnValueDecoder encoderDecoder = immutableStorageScheme.getDecoder(); + return encoderDecoder.decode(ptr, index); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + this.decodedColumnQualifier = WritableUtils.readVInt(input); + int serializedEncodingScheme = WritableUtils.readVInt(input); + // prior to PHOENIX-4432 we weren't writing out the immutableStorageScheme in write(), + // so we use the decodedColumnQualifier sign to determine whether it's there + if (Integer.signum(serializedEncodingScheme) == -1) { + this.immutableStorageScheme = + ImmutableStorageScheme.fromSerializedValue((byte) WritableUtils.readVInt(input)); + serializedEncodingScheme = -serializedEncodingScheme; + } else { + this.immutableStorageScheme = ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS; + } + this.encodingScheme = QualifierEncodingScheme.values()[serializedEncodingScheme]; + setKeyValueExpression(); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeVInt(output, decodedColumnQualifier); + WritableUtils.writeVInt(output, -encodingScheme.ordinal()); // negative since PHOENIX-4432 + WritableUtils.writeVInt(output, immutableStorageScheme.getSerializedMetadataValue()); + } + + public KeyValueColumnExpression getKeyValueExpression() { + return keyValueColumnExpression; + } + + private void setKeyValueExpression() { + final boolean isNullable = isNullable(); + final SortOrder sortOrder = getSortOrder(); + final Integer scale = getScale(); + final Integer maxLength = getMaxLength(); + final PDataType datatype = getDataType(); + this.keyValueColumnExpression = new KeyValueColumnExpression(new PDatum() { + @Override + public boolean isNullable() { + return isNullable; + } + + @Override + public SortOrder getSortOrder() { + return sortOrder; + } + + @Override + public Integer getScale() { + return scale; + } + + @Override + public Integer getMaxLength() { + return maxLength; + } + + @Override + public PDataType getDataType() { + return datatype; + } + }, getColumnFamily(), getPositionInArray()); + } + + @Override + public String toString() { + if (arrayColDisplayName == null) { + arrayColDisplayName = + SchemaUtil.getColumnDisplayName(getColumnFamily(), getColumnQualifier()); + } + return arrayColDisplayName; + } + + public byte[] getPositionInArray() { + return encodingScheme.encode(decodedColumnQualifier); + } + + @Override + public T accept(ExpressionVisitor visitor) { + // FIXME: this is ugly but can't think of a good solution. + if (visitor instanceof ViewWhereExpressionVisitor) { + return visitor.visit(this); + } else { + return super.accept(visitor); + } + } + + @Override + public boolean equals(Object obj) { + if (obj.getClass() != SingleCellColumnExpression.class) return false; + return keyValueColumnExpression + .equals(((SingleCellColumnExpression) obj).getKeyValueExpression()); + } + + @Override + public int hashCode() { + return keyValueColumnExpression.hashCode(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/SingleCellConstructorExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/SingleCellConstructorExpression.java index 48485be4591..34258819d5f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/SingleCellConstructorExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/SingleCellConstructorExpression.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.expression; @@ -27,76 +35,75 @@ * Expression used to create a single cell containing all the column values for a column family */ public class SingleCellConstructorExpression extends BaseCompoundExpression { - - private ImmutableStorageScheme immutableStorageScheme; - - public SingleCellConstructorExpression(ImmutableStorageScheme immutableStorageScheme, List children) { - super(children); - this.immutableStorageScheme = immutableStorageScheme; - } - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; - } + private ImmutableStorageScheme immutableStorageScheme; - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - ColumnValueEncoder encoderDecoder = immutableStorageScheme.getEncoder(children.size()); - for (int i=0; i < children.size(); i++) { - Expression child = children.get(i); - if (!child.evaluate(tuple, ptr)) { - encoderDecoder.appendAbsentValue(); - } else { - encoderDecoder.appendValue(ptr.get(), ptr.getOffset(), ptr.getLength()); - } - } - byte[] bytes = encoderDecoder.encode(); - ptr.set(bytes, 0, bytes.length); - return true; - } + public SingleCellConstructorExpression(ImmutableStorageScheme immutableStorageScheme, + List children) { + super(children); + this.immutableStorageScheme = immutableStorageScheme; + } + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; + } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - this.immutableStorageScheme = WritableUtils.readEnum(input, ImmutableStorageScheme.class); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + ColumnValueEncoder encoderDecoder = immutableStorageScheme.getEncoder(children.size()); + for (int i = 0; i < children.size(); i++) { + Expression child = children.get(i); + if (!child.evaluate(tuple, ptr)) { + encoderDecoder.appendAbsentValue(); + } else { + encoderDecoder.appendValue(ptr.get(), ptr.getOffset(), ptr.getLength()); + } } + byte[] bytes = encoderDecoder.encode(); + ptr.set(bytes, 0, bytes.length); + return true; + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeEnum(output, immutableStorageScheme); - } - - @Override - public boolean requiresFinalEvaluation() { - return true; - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder("["); - if (children.size()==0) - return buf.append("]").toString(); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + ","); - } - buf.append(children.get(children.size()-1) + "]"); - return buf.toString(); - } + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + this.immutableStorageScheme = WritableUtils.readEnum(input, ImmutableStorageScheme.class); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeEnum(output, immutableStorageScheme); + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + @Override + public boolean requiresFinalEvaluation() { + return true; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder("["); + if (children.size() == 0) return buf.append("]").toString(); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + ","); } + buf.append(children.get(children.size() - 1) + "]"); + return buf.toString(); + } - public SingleCellConstructorExpression clone(List children) { - return new SingleCellConstructorExpression(immutableStorageScheme, children); + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } -} \ No newline at end of file + return t; + } + + public SingleCellConstructorExpression clone(List children) { + return new SingleCellConstructorExpression(immutableStorageScheme, children); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/StringBasedLikeExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/StringBasedLikeExpression.java index 10c5fd4ca7c..7624f646613 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/StringBasedLikeExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/StringBasedLikeExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,24 +26,24 @@ public class StringBasedLikeExpression extends LikeExpression { - public StringBasedLikeExpression() { - } + public StringBasedLikeExpression() { + } - public StringBasedLikeExpression(List children) { - super(children); - } + public StringBasedLikeExpression(List children) { + super(children); + } - @Override - protected AbstractBasePattern compilePatternSpec(String value) { - return new JavaPattern(value, Pattern.DOTALL); - } + @Override + protected AbstractBasePattern compilePatternSpec(String value) { + return new JavaPattern(value, Pattern.DOTALL); + } - public static LikeExpression create(List children, LikeType likeType) { - return new StringBasedLikeExpression(addLikeTypeChild(children, likeType)); - } + public static LikeExpression create(List children, LikeType likeType) { + return new StringBasedLikeExpression(addLikeTypeChild(children, likeType)); + } - @Override - public LikeExpression clone(List children) { - return new StringBasedLikeExpression(children); - } + @Override + public LikeExpression clone(List children) { + return new StringBasedLikeExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/StringConcatExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/StringConcatExpression.java index dc9868df914..f928c35a7c3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/StringConcatExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/StringConcatExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.phoenix.expression; - import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -28,83 +27,81 @@ import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.ByteUtil; - /** - * * Implementation for || string concatenation expression. - * * @since 0.1 */ public class StringConcatExpression extends BaseCompoundExpression { - public StringConcatExpression() { - } + public StringConcatExpression() { + } - public StringConcatExpression(List children) { - super(children); - } + public StringConcatExpression(List children) { + super(children); + } - @Override - public String toString() { - StringBuilder buf = new StringBuilder("("); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + " || "); - } - buf.append(children.get(children.size()-1)); - buf.append(')'); - return buf.toString(); + @Override + public String toString() { + StringBuilder buf = new StringBuilder("("); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + " || "); } + buf.append(children.get(children.size() - 1)); + buf.append(')'); + return buf.toString(); + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } - @Override - public boolean requiresFinalEvaluation() { - return true; - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - byte[] result = ByteUtil.EMPTY_BYTE_ARRAY; - for (int i=0; i children) { - super(children); - } + public SubtractExpression(List children) { + super(children); + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; - } - - @Override - public String getOperatorString() { - return " - "; + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } + + @Override + public String getOperatorString() { + return " - "; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/TimestampAddExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/TimestampAddExpression.java index 6d1f214ce5b..067cf0c6398 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/TimestampAddExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/TimestampAddExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,62 +34,61 @@ import org.apache.phoenix.util.DateUtil; /** - * * Class to encapsulate addition arithmetic for {@link org.apache.phoenix.schema.types.PTimestamp}. - * - * * @since 2.1.3 */ public class TimestampAddExpression extends AddExpression { - public TimestampAddExpression() { - } + public TimestampAddExpression() { + } - public TimestampAddExpression(List children) { - super(children); - } + public TimestampAddExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - BigDecimal finalResult = BigDecimal.ZERO; - - for(int i=0; i children) { - return new TimestampAddExpression(children); - } + @Override + public ArithmeticExpression clone(List children) { + return new TimestampAddExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/TimestampSubtractExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/TimestampSubtractExpression.java index 115bc16c2d2..0b1accad2b0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/TimestampSubtractExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/TimestampSubtractExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,65 +31,66 @@ import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PUnsignedTimestamp; import org.apache.phoenix.util.DateUtil; + /** - * - * Class to encapsulate subtraction arithmetic for {@link org.apache.phoenix.schema.types.PTimestamp}. - * - * + * Class to encapsulate subtraction arithmetic for + * {@link org.apache.phoenix.schema.types.PTimestamp}. * @since 2.1.3 */ public class TimestampSubtractExpression extends SubtractExpression { - public TimestampSubtractExpression() { - } + public TimestampSubtractExpression() { + } - public TimestampSubtractExpression(List children) { - super(children); - } + public TimestampSubtractExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - BigDecimal finalResult = BigDecimal.ZERO; - - for(int i=0; i children) { - return new TimestampSubtractExpression(children); - } + @Override + public ArithmeticExpression clone(List children) { + return new TimestampSubtractExpression(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/Aggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/Aggregator.java index 6e570254cbf..76a1bb242c9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/Aggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/Aggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,33 +21,28 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.schema.tuple.Tuple; - /** - * * Interface to abstract the incremental calculation of an aggregated value. - * - * * @since 0.1 */ public interface Aggregator extends Expression { - - /** - * Incrementally aggregate the value with the current row - * @param tuple the result containing all the key values of the row - * @param ptr the bytes pointer to the underlying result - */ - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr); - - /** - * Get the size in bytes - */ - public int getSize(); - - /** - * Determines whether or not we should track the heap size as - * this aggregator is executing on the server side. - * @return true if the size should be tracked and false - * otherwise. - */ - public boolean trackSize(); + + /** + * Incrementally aggregate the value with the current row + * @param tuple the result containing all the key values of the row + * @param ptr the bytes pointer to the underlying result + */ + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr); + + /** + * Get the size in bytes + */ + public int getSize(); + + /** + * Determines whether or not we should track the heap size as this aggregator is executing on the + * server side. + * @return true if the size should be tracked and false otherwise. + */ + public boolean trackSize(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/Aggregators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/Aggregators.java index b1dc6586506..08dac017814 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/Aggregators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/Aggregators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,110 +25,107 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.SizedUtil; - /** - * * Represents an ordered list of Aggregators - * - * * @since 0.1 */ abstract public class Aggregators { - protected final int estimatedByteSize; - protected final KeyValueSchema schema; - protected final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - protected final ValueBitSet valueSet; - protected final Aggregator[] aggregators; - protected final SingleAggregateFunction[] functions; - - public int getEstimatedByteSize() { - return estimatedByteSize; - } - - public Aggregators(SingleAggregateFunction[] functions, Aggregator[] aggregators, int minNullableIndex) { - this.functions = functions; - this.aggregators = aggregators; - this.estimatedByteSize = calculateSize(aggregators); - this.schema = newValueSchema(aggregators, minNullableIndex); - this.valueSet = ValueBitSet.newInstance(schema); - } - - public KeyValueSchema getValueSchema() { - return schema; - } - - public int getMinNullableIndex() { - return schema.getMinNullable(); - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder(this.getClass().getName() + " [" + functions.length + "]:"); - for (int i = 0; i < functions.length; i++) { - SingleAggregateFunction function = functions[i]; - buf.append("\t" + i + ") " + function ); - } - return buf.toString(); - } - - /** - * Return the aggregate functions - */ - public SingleAggregateFunction[] getFunctions() { - return functions; - } - - /** - * Aggregate over aggregators - * @param result the single row Result from scan iteration - */ - abstract public void aggregate(Aggregator[] aggregators, Tuple result); - - protected static int calculateSize(Aggregator[] aggregators) { - - int size = SizedUtil.ARRAY_SIZE /*aggregators[]*/ + (SizedUtil.POINTER_SIZE * aggregators.length); - for (Aggregator aggregator : aggregators) { - size += aggregator.getSize(); - } - return size; - } - - /** - * Get the ValueSchema for the Aggregators - */ - private static KeyValueSchema newValueSchema(Aggregator[] aggregators, int minNullableIndex) { - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(minNullableIndex); - for (int i = 0; i < aggregators.length; i++) { - Aggregator aggregator = aggregators[i]; - builder.addField(aggregator); - } - return builder.build(); - } + protected final int estimatedByteSize; + protected final KeyValueSchema schema; + protected final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + protected final ValueBitSet valueSet; + protected final Aggregator[] aggregators; + protected final SingleAggregateFunction[] functions; - /** - * @return byte representation of the ValueSchema - */ - public byte[] toBytes(Aggregator[] aggregators) { - return schema.toBytes(aggregators, valueSet, ptr); - } - - public int getAggregatorCount() { - return aggregators.length; + public int getEstimatedByteSize() { + return estimatedByteSize; + } + + public Aggregators(SingleAggregateFunction[] functions, Aggregator[] aggregators, + int minNullableIndex) { + this.functions = functions; + this.aggregators = aggregators; + this.estimatedByteSize = calculateSize(aggregators); + this.schema = newValueSchema(aggregators, minNullableIndex); + this.valueSet = ValueBitSet.newInstance(schema); + } + + public KeyValueSchema getValueSchema() { + return schema; + } + + public int getMinNullableIndex() { + return schema.getMinNullable(); + } + + @Override + public String toString() { + StringBuilder buf = + new StringBuilder(this.getClass().getName() + " [" + functions.length + "]:"); + for (int i = 0; i < functions.length; i++) { + SingleAggregateFunction function = functions[i]; + buf.append("\t" + i + ") " + function); } + return buf.toString(); + } + + /** + * Return the aggregate functions + */ + public SingleAggregateFunction[] getFunctions() { + return functions; + } - public Aggregator[] getAggregators() { - return aggregators; + /** + * Aggregate over aggregators + * @param result the single row Result from scan iteration + */ + abstract public void aggregate(Aggregator[] aggregators, Tuple result); + + protected static int calculateSize(Aggregator[] aggregators) { + + int size = + SizedUtil.ARRAY_SIZE /* aggregators[] */ + (SizedUtil.POINTER_SIZE * aggregators.length); + for (Aggregator aggregator : aggregators) { + size += aggregator.getSize(); } - - abstract public Aggregator[] newAggregators(); - - public void reset(Aggregator[] aggregators) { - for (int i = 0; i < aggregators.length; i++) { - aggregators[i].reset(); - } + return size; + } + + /** + * Get the ValueSchema for the Aggregators + */ + private static KeyValueSchema newValueSchema(Aggregator[] aggregators, int minNullableIndex) { + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(minNullableIndex); + for (int i = 0; i < aggregators.length; i++) { + Aggregator aggregator = aggregators[i]; + builder.addField(aggregator); } - - protected Aggregator getAggregator(int position) { - return aggregators[position]; + return builder.build(); + } + + /** Returns byte representation of the ValueSchema */ + public byte[] toBytes(Aggregator[] aggregators) { + return schema.toBytes(aggregators, valueSet, ptr); + } + + public int getAggregatorCount() { + return aggregators.length; + } + + public Aggregator[] getAggregators() { + return aggregators; + } + + abstract public Aggregator[] newAggregators(); + + public void reset(Aggregator[] aggregators) { + for (int i = 0; i < aggregators.length; i++) { + aggregators[i].reset(); } + } + + protected Aggregator getAggregator(int position) { + return aggregators[position]; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseAggregator.java index 903b2ec1e8d..23164671a9d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,54 +17,50 @@ */ package org.apache.phoenix.expression.aggregator; - import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.BaseTerminalExpression; import org.apache.phoenix.expression.visitor.ExpressionVisitor; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.util.SizedUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.util.SizedUtil; /** * Base class for Aggregator implementations - * - * * @since 0.1 */ public abstract class BaseAggregator extends BaseTerminalExpression implements Aggregator { - - protected final SortOrder sortOrder; - - public BaseAggregator(SortOrder sortOrder) { - Preconditions.checkNotNull(sortOrder); - this.sortOrder = sortOrder; - } - - @Override - public boolean isNullable() { - return true; - } - - @Override - public int getSize() { - return SizedUtil.OBJECT_SIZE; - } - - ImmutableBytesWritable evalClientAggs(Aggregator clientAgg) { - CountAggregator ca = (CountAggregator)clientAgg; - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ca.evaluate(null, ptr); - return ptr; - } - - @Override - public T accept(ExpressionVisitor visitor) { - return null; - } - @Override - public boolean trackSize() { - return false; - } + protected final SortOrder sortOrder; + + public BaseAggregator(SortOrder sortOrder) { + Preconditions.checkNotNull(sortOrder); + this.sortOrder = sortOrder; + } + + @Override + public boolean isNullable() { + return true; + } + + @Override + public int getSize() { + return SizedUtil.OBJECT_SIZE; + } + + ImmutableBytesWritable evalClientAggs(Aggregator clientAgg) { + CountAggregator ca = (CountAggregator) clientAgg; + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ca.evaluate(null, ptr); + return ptr; + } + + @Override + public T accept(ExpressionVisitor visitor) { + return null; + } + + @Override + public boolean trackSize() { + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseDecimalStddevAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseDecimalStddevAggregator.java index 0583f3550ca..560200b71b3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseDecimalStddevAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseDecimalStddevAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,85 +26,84 @@ import org.apache.phoenix.expression.ColumnExpression; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.util.BigDecimalUtil; import org.apache.phoenix.util.BigDecimalUtil.Operation; /** - * - * * @since 1.2.1 */ public abstract class BaseDecimalStddevAggregator extends DistinctValueWithCountClientAggregator { - private int colPrecision; - private int colScale; + private int colPrecision; + private int colScale; - public BaseDecimalStddevAggregator(List exps, SortOrder sortOrder) { - super(sortOrder); - ColumnExpression stdDevColExp = (ColumnExpression)exps.get(0); - this.colPrecision = stdDevColExp.getMaxLength(); - this.colScale = stdDevColExp.getScale(); - } + public BaseDecimalStddevAggregator(List exps, SortOrder sortOrder) { + super(sortOrder); + ColumnExpression stdDevColExp = (ColumnExpression) exps.get(0); + this.colPrecision = stdDevColExp.getMaxLength(); + this.colScale = stdDevColExp.getScale(); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (cachedResult == null) { - BigDecimal ssd = sumSquaredDeviation(); - ssd = ssd.divide(new BigDecimal(getDataPointsCount()), PDataType.DEFAULT_MATH_CONTEXT); - // Calculate the precision for the stddev result. - // There are totalCount #Decimal values for which we are calculating the stddev - // The resultant precision depends on precision and scale of all these values. (See - // BigDecimalUtil.getResultPrecisionScale) - // As of now we are not using the actual precision and scale of individual values but just using the table - // column's max length(precision) and scale for each of the values. - int resultPrecision = colPrecision; - for (int i = 1; i < this.totalCount; i++) { - // Max precision that we can support is 38 See PDataType.MAX_PRECISION - if (resultPrecision >= PDataType.MAX_PRECISION) break; - Pair precisionScale = BigDecimalUtil.getResultPrecisionScale(this.colPrecision, - this.colScale, this.colPrecision, this.colScale, Operation.OTHERS); - resultPrecision = precisionScale.getFirst(); - } - BigDecimal result = new BigDecimal(Math.sqrt(ssd.doubleValue()), new MathContext(resultPrecision, - RoundingMode.HALF_UP)); - cachedResult = result.setScale(this.colScale, RoundingMode.HALF_UP); - } - if (buffer == null) { - initBuffer(); - } - buffer = PDecimal.INSTANCE.toBytes(cachedResult); - ptr.set(buffer); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (cachedResult == null) { + BigDecimal ssd = sumSquaredDeviation(); + ssd = ssd.divide(new BigDecimal(getDataPointsCount()), PDataType.DEFAULT_MATH_CONTEXT); + // Calculate the precision for the stddev result. + // There are totalCount #Decimal values for which we are calculating the stddev + // The resultant precision depends on precision and scale of all these values. (See + // BigDecimalUtil.getResultPrecisionScale) + // As of now we are not using the actual precision and scale of individual values but just + // using the table + // column's max length(precision) and scale for each of the values. + int resultPrecision = colPrecision; + for (int i = 1; i < this.totalCount; i++) { + // Max precision that we can support is 38 See PDataType.MAX_PRECISION + if (resultPrecision >= PDataType.MAX_PRECISION) break; + Pair precisionScale = BigDecimalUtil.getResultPrecisionScale( + this.colPrecision, this.colScale, this.colPrecision, this.colScale, Operation.OTHERS); + resultPrecision = precisionScale.getFirst(); + } + BigDecimal result = new BigDecimal(Math.sqrt(ssd.doubleValue()), + new MathContext(resultPrecision, RoundingMode.HALF_UP)); + cachedResult = result.setScale(this.colScale, RoundingMode.HALF_UP); + } + if (buffer == null) { + initBuffer(); } + buffer = PDecimal.INSTANCE.toBytes(cachedResult); + ptr.set(buffer); + return true; + } - protected abstract long getDataPointsCount(); + protected abstract long getDataPointsCount(); - private BigDecimal sumSquaredDeviation() { - BigDecimal m = mean(); - BigDecimal result = BigDecimal.ZERO; - for (Entry entry : valueVsCount.entrySet()) { - BigDecimal colValue = (BigDecimal) PDecimal.INSTANCE.toObject(entry.getKey()); - BigDecimal delta = colValue.subtract(m); - result = result.add(delta.multiply(delta).multiply(new BigDecimal(entry.getValue()))); - } - return result; + private BigDecimal sumSquaredDeviation() { + BigDecimal m = mean(); + BigDecimal result = BigDecimal.ZERO; + for (Entry entry : valueVsCount.entrySet()) { + BigDecimal colValue = (BigDecimal) PDecimal.INSTANCE.toObject(entry.getKey()); + BigDecimal delta = colValue.subtract(m); + result = result.add(delta.multiply(delta).multiply(new BigDecimal(entry.getValue()))); } + return result; + } - private BigDecimal mean() { - BigDecimal sum = BigDecimal.ZERO; - for (Entry entry : valueVsCount.entrySet()) { - BigDecimal colValue = (BigDecimal) PDecimal.INSTANCE.toObject(entry.getKey()); - sum = sum.add(colValue.multiply(new BigDecimal(entry.getValue()))); - } - return sum.divide(new BigDecimal(totalCount), PDataType.DEFAULT_MATH_CONTEXT); + private BigDecimal mean() { + BigDecimal sum = BigDecimal.ZERO; + for (Entry entry : valueVsCount.entrySet()) { + BigDecimal colValue = (BigDecimal) PDecimal.INSTANCE.toObject(entry.getKey()); + sum = sum.add(colValue.multiply(new BigDecimal(entry.getValue()))); } + return sum.divide(new BigDecimal(totalCount), PDataType.DEFAULT_MATH_CONTEXT); + } - @Override - protected PDataType getResultDataType() { - return PDecimal.INSTANCE; - } + @Override + protected PDataType getResultDataType() { + return PDecimal.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseStddevAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseStddevAggregator.java index cb9861a1730..449b623f731 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseStddevAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/BaseStddevAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,65 +24,65 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.apache.phoenix.schema.types.PDecimal; -import org.apache.phoenix.schema.types.PDouble; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; +import org.apache.phoenix.schema.types.PDouble; /** - * - * * @since 1.2.1 */ public abstract class BaseStddevAggregator extends DistinctValueWithCountClientAggregator { - protected Expression stdDevColExp; + protected Expression stdDevColExp; - public BaseStddevAggregator(List exps, SortOrder sortOrder) { - super(sortOrder); - this.stdDevColExp = exps.get(0); - } + public BaseStddevAggregator(List exps, SortOrder sortOrder) { + super(sortOrder); + this.stdDevColExp = exps.get(0); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (cachedResult == null) { - double ssd = sumSquaredDeviation(); - double result = Math.sqrt(ssd / getDataPointsCount()); - cachedResult = new BigDecimal(result); - } - if (buffer == null) { - initBuffer(); - } - buffer = PDecimal.INSTANCE.toBytes(cachedResult); - ptr.set(buffer); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (cachedResult == null) { + double ssd = sumSquaredDeviation(); + double result = Math.sqrt(ssd / getDataPointsCount()); + cachedResult = new BigDecimal(result); } - - protected abstract long getDataPointsCount(); - - private double sumSquaredDeviation() { - double m = mean(); - double result = 0.0; - for (Entry entry : valueVsCount.entrySet()) { - double colValue = (Double) PDouble.INSTANCE.toObject(entry.getKey(), this.stdDevColExp.getDataType()); - double delta = colValue - m; - result += (delta * delta) * entry.getValue(); - } - return result; + if (buffer == null) { + initBuffer(); } + buffer = PDecimal.INSTANCE.toBytes(cachedResult); + ptr.set(buffer); + return true; + } + + protected abstract long getDataPointsCount(); - private double mean() { - double sum = 0.0; - for (Entry entry : valueVsCount.entrySet()) { - double colValue = (Double) PDouble.INSTANCE.toObject(entry.getKey(), this.stdDevColExp.getDataType()); - sum += colValue * entry.getValue(); - } - return sum / totalCount; + private double sumSquaredDeviation() { + double m = mean(); + double result = 0.0; + for (Entry entry : valueVsCount.entrySet()) { + double colValue = + (Double) PDouble.INSTANCE.toObject(entry.getKey(), this.stdDevColExp.getDataType()); + double delta = colValue - m; + result += (delta * delta) * entry.getValue(); } - - @Override - protected PDataType getResultDataType() { - return PDecimal.INSTANCE; + return result; + } + + private double mean() { + double sum = 0.0; + for (Entry entry : valueVsCount.entrySet()) { + double colValue = + (Double) PDouble.INSTANCE.toObject(entry.getKey(), this.stdDevColExp.getDataType()); + sum += colValue * entry.getValue(); } + return sum / totalCount; + } + + @Override + protected PDataType getResultDataType() { + return PDecimal.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/ClientAggregators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/ClientAggregators.java index 54d569028a2..9d0fe177fdd 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/ClientAggregators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/ClientAggregators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,55 +24,51 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.TupleUtil; - - /** - * * Aggregators that execute on the client-side - * - * * @since 0.1 */ public class ClientAggregators extends Aggregators { - private final ValueBitSet tempValueSet; - - private static Aggregator[] getAggregators(List aggFuncs) { - Aggregator[] aggregators = new Aggregator[aggFuncs.size()]; - for (int i = 0; i < aggregators.length; i++) { - aggregators[i] = aggFuncs.get(i).getAggregator(); - } - return aggregators; - } - - public ClientAggregators(List functions, int minNullableIndex) { - super(functions.toArray(new SingleAggregateFunction[functions.size()]), getAggregators(functions), minNullableIndex); - this.tempValueSet = ValueBitSet.newInstance(schema); + private final ValueBitSet tempValueSet; + + private static Aggregator[] getAggregators(List aggFuncs) { + Aggregator[] aggregators = new Aggregator[aggFuncs.size()]; + for (int i = 0; i < aggregators.length; i++) { + aggregators[i] = aggFuncs.get(i).getAggregator(); } - - @Override - public void aggregate(Aggregator[] aggregators, Tuple result) { - TupleUtil.getAggregateValue(result, ptr); - tempValueSet.clear(); - tempValueSet.or(ptr); + return aggregators; + } - int i = 0, maxOffset = ptr.getOffset() + ptr.getLength(); - Boolean hasValue; - schema.iterator(ptr); - while ((hasValue=schema.next(ptr, i, maxOffset, tempValueSet)) != null) { - if (hasValue) { - aggregators[i].aggregate(result, ptr); - } - i++; - } + public ClientAggregators(List functions, int minNullableIndex) { + super(functions.toArray(new SingleAggregateFunction[functions.size()]), + getAggregators(functions), minNullableIndex); + this.tempValueSet = ValueBitSet.newInstance(schema); + } + + @Override + public void aggregate(Aggregator[] aggregators, Tuple result) { + TupleUtil.getAggregateValue(result, ptr); + tempValueSet.clear(); + tempValueSet.or(ptr); + + int i = 0, maxOffset = ptr.getOffset() + ptr.getLength(); + Boolean hasValue; + schema.iterator(ptr); + while ((hasValue = schema.next(ptr, i, maxOffset, tempValueSet)) != null) { + if (hasValue) { + aggregators[i].aggregate(result, ptr); + } + i++; } - - @Override - public Aggregator[] newAggregators() { - Aggregator[] aggregators = new Aggregator[functions.length]; - for (int i = 0; i < functions.length; i++) { - aggregators[i] = functions[i].newClientAggregator(); - } - return aggregators; + } + + @Override + public Aggregator[] newAggregators() { + Aggregator[] aggregators = new Aggregator[functions.length]; + for (int i = 0; i < functions.length; i++) { + aggregators[i] = functions[i].newClientAggregator(); } - + return aggregators; + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/CountAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/CountAggregator.java index bd6725bfc6f..bbfa7e3a816 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/CountAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/CountAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,74 +18,71 @@ package org.apache.phoenix.expression.aggregator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.util.SizedUtil; /** - * * Aggregator for COUNT aggregations - * - * * @since 0.1 */ public class CountAggregator extends BaseAggregator { - private long count = 0; - private byte[] buffer = null; + private long count = 0; + private byte[] buffer = null; - public CountAggregator() { - super(SortOrder.getDefault()); - } + public CountAggregator() { + super(SortOrder.getDefault()); + } - public CountAggregator(LongSumAggregator clientAgg) { - this(); - count = clientAgg.getSum(); - } + public CountAggregator(LongSumAggregator clientAgg) { + this(); + count = clientAgg.getSum(); + } - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - count++; - } + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + count++; + } - @Override - public boolean isNullable() { - return false; - } + @Override + public boolean isNullable() { + return false; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (buffer == null) { - buffer = new byte[getDataType().getByteSize()]; - } - getDataType().getCodec().encodeLong(count, buffer, 0); - ptr.set(buffer); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (buffer == null) { + buffer = new byte[getDataType().getByteSize()]; } + getDataType().getCodec().encodeLong(count, buffer, 0); + ptr.set(buffer); + return true; + } - @Override - public final PDataType getDataType() { - return PLong.INSTANCE; - } + @Override + public final PDataType getDataType() { + return PLong.INSTANCE; + } - @Override - public void reset() { - count = 0; - buffer = null; - super.reset(); - } + @Override + public void reset() { + count = 0; + buffer = null; + super.reset(); + } - @Override - public String toString() { - return "COUNT [count=" + count + "]"; - } + @Override + public String toString() { + return "COUNT [count=" + count + "]"; + } - @Override - public int getSize() { - return super.getSize() + SizedUtil.LONG_SIZE + SizedUtil.ARRAY_SIZE - + getDataType().getByteSize(); - } + @Override + public int getSize() { + return super.getSize() + SizedUtil.LONG_SIZE + SizedUtil.ARRAY_SIZE + + getDataType().getByteSize(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalStddevPopAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalStddevPopAggregator.java index 7372c0385f2..18e0d25a29d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalStddevPopAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalStddevPopAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,18 +24,16 @@ /** * Client side Aggregator for STDDEV_POP aggregations for DECIMAL data type. - * - * * @since 1.2.1 */ public class DecimalStddevPopAggregator extends BaseDecimalStddevAggregator { - public DecimalStddevPopAggregator(List exps, SortOrder sortOrder) { - super(exps, sortOrder); - } + public DecimalStddevPopAggregator(List exps, SortOrder sortOrder) { + super(exps, sortOrder); + } - @Override - protected long getDataPointsCount() { - return totalCount; - } + @Override + protected long getDataPointsCount() { + return totalCount; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalStddevSampAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalStddevSampAggregator.java index 7962c682559..20d5178b997 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalStddevSampAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalStddevSampAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,18 +24,16 @@ /** * Client side Aggregator for STDDEV_SAMP aggregations for DECIMAL data type. - * - * * @since 1.2.1 */ public class DecimalStddevSampAggregator extends BaseDecimalStddevAggregator { - public DecimalStddevSampAggregator(List exps, SortOrder sortOrder) { - super(exps, sortOrder); - } + public DecimalStddevSampAggregator(List exps, SortOrder sortOrder) { + super(exps, sortOrder); + } - @Override - protected long getDataPointsCount() { - return totalCount - 1; - } + @Override + protected long getDataPointsCount() { + return totalCount - 1; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalSumAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalSumAggregator.java index 4fa46b137b1..b24cc1de967 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalSumAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DecimalSumAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,83 +20,79 @@ import java.math.BigDecimal; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.util.SizedUtil; - /** - * * Aggregator that sums BigDecimal values - * - * * @since 0.1 */ public class DecimalSumAggregator extends BaseAggregator { - private BigDecimal sum = BigDecimal.ZERO; - private byte[] sumBuffer; - - public DecimalSumAggregator(SortOrder sortOrder, ImmutableBytesWritable ptr) { - super(sortOrder); - if (ptr != null) { - initBuffer(); - sum = (BigDecimal) PDecimal.INSTANCE.toObject(ptr); - } - } - - private PDataType getInputDataType() { - return PDecimal.INSTANCE; - } - - private int getBufferLength() { - return getDataType().getByteSize(); - } + private BigDecimal sum = BigDecimal.ZERO; + private byte[] sumBuffer; - private void initBuffer() { - sumBuffer = new byte[getBufferLength()]; - } - - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - BigDecimal value = (BigDecimal)getDataType().toObject(ptr, getInputDataType(), sortOrder); - sum = sum.add(value); - if (sumBuffer == null) { - sumBuffer = new byte[getDataType().getByteSize()]; - } - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (sumBuffer == null) { - return false; - } - int len = getDataType().toBytes(sum, sumBuffer, 0); - ptr.set(sumBuffer, 0, len); - return true; - } - - @Override - public final PDataType getDataType() { - return PDecimal.INSTANCE; - } - - @Override - public void reset() { - sum = BigDecimal.ZERO; - sumBuffer = null; - super.reset(); + public DecimalSumAggregator(SortOrder sortOrder, ImmutableBytesWritable ptr) { + super(sortOrder); + if (ptr != null) { + initBuffer(); + sum = (BigDecimal) PDecimal.INSTANCE.toObject(ptr); } + } + + private PDataType getInputDataType() { + return PDecimal.INSTANCE; + } + + private int getBufferLength() { + return getDataType().getByteSize(); + } - @Override - public String toString() { - return "DECIMAL SUM [sum=" + sum + "]"; + private void initBuffer() { + sumBuffer = new byte[getBufferLength()]; + } + + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + BigDecimal value = (BigDecimal) getDataType().toObject(ptr, getInputDataType(), sortOrder); + sum = sum.add(value); + if (sumBuffer == null) { + sumBuffer = new byte[getDataType().getByteSize()]; } + } - @Override - public int getSize() { - return super.getSize() + SizedUtil.BIG_DECIMAL_SIZE + SizedUtil.ARRAY_SIZE + getDataType().getByteSize(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (sumBuffer == null) { + return false; } + int len = getDataType().toBytes(sum, sumBuffer, 0); + ptr.set(sumBuffer, 0, len); + return true; + } + + @Override + public final PDataType getDataType() { + return PDecimal.INSTANCE; + } + + @Override + public void reset() { + sum = BigDecimal.ZERO; + sumBuffer = null; + super.reset(); + } + + @Override + public String toString() { + return "DECIMAL SUM [sum=" + sum + "]"; + } + + @Override + public int getSize() { + return super.getSize() + SizedUtil.BIG_DECIMAL_SIZE + SizedUtil.ARRAY_SIZE + + getDataType().getByteSize(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctCountClientAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctCountClientAggregator.java index 9ef6cc03ebe..7377a7fb174 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctCountClientAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctCountClientAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,40 +18,37 @@ package org.apache.phoenix.expression.aggregator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - -import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PLong; /** * Client side Aggregator for DISTINCT COUNT aggregations - * - * * @since 1.2.1 */ public class DistinctCountClientAggregator extends DistinctValueWithCountClientAggregator { - public DistinctCountClientAggregator(SortOrder sortOrder) { - super(sortOrder); - } + public DistinctCountClientAggregator(SortOrder sortOrder) { + super(sortOrder); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (buffer == null) { - initBuffer(); - } - if (cachedResult != null) { - buffer = PLong.INSTANCE.toBytes(cachedResult); - } else { - buffer = PLong.INSTANCE.toBytes(this.valueVsCount.size()); - } - ptr.set(buffer); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (buffer == null) { + initBuffer(); } - - @Override - protected PDataType getResultDataType() { - return PLong.INSTANCE; + if (cachedResult != null) { + buffer = PLong.INSTANCE.toBytes(cachedResult); + } else { + buffer = PLong.INSTANCE.toBytes(this.valueVsCount.size()); } + ptr.set(buffer); + return true; + } + + @Override + protected PDataType getResultDataType() { + return PLong.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueClientAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueClientAggregator.java index d9d66d16b9d..d7e09cf3edf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueClientAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueClientAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,44 +19,45 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.apache.phoenix.schema.types.PArrayDataType; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PArrayDataType; +import org.apache.phoenix.schema.types.PDataType; public class DistinctValueClientAggregator extends DistinctValueWithCountClientAggregator { - private final PDataType valueType; - private final PDataType resultType; - - public DistinctValueClientAggregator(SortOrder sortOrder, PDataType valueType, PDataType resultType) { - super(sortOrder); - this.valueType = valueType; - this.resultType = resultType; - } + private final PDataType valueType; + private final PDataType resultType; - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (cachedResult == null) { - Object[] values = new Object[valueVsCount.size()]; - int i = 0; - for (ImmutableBytesPtr key : valueVsCount.keySet()) { - values[i++] = valueType.toObject(key, sortOrder); - } - cachedResult = PArrayDataType.instantiatePhoenixArray(valueType, values); - } - buffer = resultType.toBytes(cachedResult, sortOrder); - ptr.set(buffer); - return true; - } + public DistinctValueClientAggregator(SortOrder sortOrder, PDataType valueType, + PDataType resultType) { + super(sortOrder); + this.valueType = valueType; + this.resultType = resultType; + } - @Override - protected PDataType getResultDataType() { - return resultType; - } - - @Override - protected int getBufferLength() { - return 0; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (cachedResult == null) { + Object[] values = new Object[valueVsCount.size()]; + int i = 0; + for (ImmutableBytesPtr key : valueVsCount.keySet()) { + values[i++] = valueType.toObject(key, sortOrder); + } + cachedResult = PArrayDataType.instantiatePhoenixArray(valueType, values); } + buffer = resultType.toBytes(cachedResult, sortOrder); + ptr.set(buffer); + return true; + } + + @Override + protected PDataType getResultDataType() { + return resultType; + } + + @Override + protected int getBufferLength() { + return 0; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountClientAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountClientAggregator.java index cea6d49c636..55288e7539b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountClientAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountClientAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,120 +31,124 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableUtils; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.SingleKeyValueTuple; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PVarbinary; import org.iq80.snappy.Snappy; /** - * Client side Aggregator which will aggregate data and find distinct values with number of occurrences for each. - * - * + * Client side Aggregator which will aggregate data and find distinct values with number of + * occurrences for each. * @since 1.2.1 */ public abstract class DistinctValueWithCountClientAggregator extends BaseAggregator { - protected Map valueVsCount = new HashMap(); - protected byte[] buffer; - protected long totalCount = 0L; - protected Object cachedResult; + protected Map valueVsCount = + new HashMap(); + protected byte[] buffer; + protected long totalCount = 0L; + protected Object cachedResult; - public DistinctValueWithCountClientAggregator(SortOrder sortOrder) { - super(sortOrder); - } + public DistinctValueWithCountClientAggregator(SortOrder sortOrder) { + super(sortOrder); + } - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - if (tuple instanceof SingleKeyValueTuple) { - // Case when scanners do look ahead and re-aggregate result row.The result is already available in the ptr - PDataType resultDataType = getResultDataType(); - cachedResult = resultDataType.toObject(ptr, resultDataType, sortOrder); + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + if (tuple instanceof SingleKeyValueTuple) { + // Case when scanners do look ahead and re-aggregate result row.The result is already + // available in the ptr + PDataType resultDataType = getResultDataType(); + cachedResult = resultDataType.toObject(ptr, resultDataType, sortOrder); + } else { + InputStream is; + try { + if ( + Bytes.equals(ptr.get(), ptr.getOffset(), 1, + DistinctValueWithCountServerAggregator.COMPRESS_MARKER, 0, 1) + ) { + // This reads the uncompressed length from the front of the compressed input + int uncompressedLength = Snappy.getUncompressedLength(ptr.get(), ptr.getOffset() + 1); + byte[] uncompressed = new byte[uncompressedLength]; + // This will throw CorruptionException, a RuntimeException if the snappy data is invalid. + // We're making a RuntimeException out of a checked IOException below so assume it's ok + // to let any CorruptionException escape. + Snappy.uncompress(ptr.get(), ptr.getOffset() + 1, ptr.getLength() - 1, uncompressed, 0); + is = new ByteArrayInputStream(uncompressed, 0, uncompressedLength); } else { - InputStream is; - try { - if (Bytes.equals(ptr.get(), ptr.getOffset(), 1, DistinctValueWithCountServerAggregator.COMPRESS_MARKER, - 0, 1)) { - // This reads the uncompressed length from the front of the compressed input - int uncompressedLength = Snappy.getUncompressedLength(ptr.get(), ptr.getOffset() + 1); - byte[] uncompressed = new byte[uncompressedLength]; - // This will throw CorruptionException, a RuntimeException if the snappy data is invalid. - // We're making a RuntimeException out of a checked IOException below so assume it's ok - // to let any CorruptionException escape. - Snappy.uncompress(ptr.get(), ptr.getOffset() + 1, ptr.getLength() - 1, uncompressed, 0); - is = new ByteArrayInputStream(uncompressed, 0, uncompressedLength); - } else { - is = new ByteArrayInputStream(ptr.get(), ptr.getOffset() + 1, ptr.getLength() - 1); - } - DataInputStream in = new DataInputStream(is); - int mapSize = WritableUtils.readVInt(in); - for (int i = 0; i < mapSize; i++) { - int keyLen = WritableUtils.readVInt(in); - byte[] keyBytes = new byte[keyLen]; - in.read(keyBytes, 0, keyLen); - ImmutableBytesPtr key = new ImmutableBytesPtr(keyBytes); - int value = WritableUtils.readVInt(in); - Integer curCount = valueVsCount.get(key); - if (curCount == null) { - valueVsCount.put(key, value); - } else { - valueVsCount.put(key, curCount + value); - } - totalCount += value; - } - } catch (IOException ioe) { - throw new RuntimeException(ioe); // Impossible as we're using a ByteArrayInputStream - } + is = new ByteArrayInputStream(ptr.get(), ptr.getOffset() + 1, ptr.getLength() - 1); } - if (buffer == null) { - initBuffer(); + DataInputStream in = new DataInputStream(is); + int mapSize = WritableUtils.readVInt(in); + for (int i = 0; i < mapSize; i++) { + int keyLen = WritableUtils.readVInt(in); + byte[] keyBytes = new byte[keyLen]; + in.read(keyBytes, 0, keyLen); + ImmutableBytesPtr key = new ImmutableBytesPtr(keyBytes); + int value = WritableUtils.readVInt(in); + Integer curCount = valueVsCount.get(key); + if (curCount == null) { + valueVsCount.put(key, value); + } else { + valueVsCount.put(key, curCount + value); + } + totalCount += value; } + } catch (IOException ioe) { + throw new RuntimeException(ioe); // Impossible as we're using a ByteArrayInputStream + } } - - protected void initBuffer() { - buffer = new byte[getBufferLength()]; + if (buffer == null) { + initBuffer(); } + } - @Override - public boolean isNullable() { - return false; - } + protected void initBuffer() { + buffer = new byte[getBufferLength()]; + } - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; - } + @Override + public boolean isNullable() { + return false; + } - @Override - public void reset() { - valueVsCount = new HashMap(); - buffer = null; - totalCount = 0L; - cachedResult = null; - super.reset(); - } - - protected Map getSortedValueVsCount(final boolean ascending, final PDataType type) { - // To sort the valueVsCount - Comparator comparator = new Comparator() { - @Override - public int compare(Object o1, Object o2) { - if (ascending) { - return type.compareTo(o1, o2); - } - return type.compareTo(o2, o1); - } - }; - Map sorted = new TreeMap(comparator); - for (Entry entry : valueVsCount.entrySet()) { - sorted.put(type.toObject(entry.getKey(), sortOrder), entry.getValue()); + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; + } + + @Override + public void reset() { + valueVsCount = new HashMap(); + buffer = null; + totalCount = 0L; + cachedResult = null; + super.reset(); + } + + protected Map getSortedValueVsCount(final boolean ascending, + final PDataType type) { + // To sort the valueVsCount + Comparator comparator = new Comparator() { + @Override + public int compare(Object o1, Object o2) { + if (ascending) { + return type.compareTo(o1, o2); } - return sorted; + return type.compareTo(o2, o1); + } + }; + Map sorted = new TreeMap(comparator); + for (Entry entry : valueVsCount.entrySet()) { + sorted.put(type.toObject(entry.getKey(), sortOrder), entry.getValue()); } + return sorted; + } - protected int getBufferLength() { - return getResultDataType().getByteSize(); - } + protected int getBufferLength() { + return getResultDataType().getByteSize(); + } - protected abstract PDataType getResultDataType(); + protected abstract PDataType getResultDataType(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountServerAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountServerAggregator.java index af649004b74..272b235e3f1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountServerAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueWithCountServerAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,131 +36,135 @@ import org.iq80.snappy.Snappy; /** - * Server side Aggregator which will aggregate data and find distinct values with number of occurrences for each. - * - * + * Server side Aggregator which will aggregate data and find distinct values with number of + * occurrences for each. * @since 1.2.1 */ public class DistinctValueWithCountServerAggregator extends BaseAggregator { - public static final int DEFAULT_ESTIMATED_DISTINCT_VALUES = 10000; - public static final byte[] COMPRESS_MARKER = new byte[] { (byte)1 }; - // copy a key unless it uses at least 10% of the backing array - private static final int COPY_THRESHOLD = 100/10; - // copy key only (make a new array) if the backing array is at least this size - // (to avoid ending up using _more_ memory) - private static final int FIXED_COPY_THRESHOLD = SizedUtil.ARRAY_SIZE * 2; - - private int compressThreshold; - private int heapSize = 0; - private byte[] buffer = null; - protected Map valueVsCount = new HashMap(); - - public DistinctValueWithCountServerAggregator(Configuration conf) { - super(SortOrder.getDefault()); - compressThreshold = conf.getInt(QueryServices.DISTINCT_VALUE_COMPRESS_THRESHOLD_ATTRIB, - QueryServicesOptions.DEFAULT_DISTINCT_VALUE_COMPRESS_THRESHOLD); - } + public static final int DEFAULT_ESTIMATED_DISTINCT_VALUES = 10000; + public static final byte[] COMPRESS_MARKER = new byte[] { (byte) 1 }; + // copy a key unless it uses at least 10% of the backing array + private static final int COPY_THRESHOLD = 100 / 10; + // copy key only (make a new array) if the backing array is at least this size + // (to avoid ending up using _more_ memory) + private static final int FIXED_COPY_THRESHOLD = SizedUtil.ARRAY_SIZE * 2; - public DistinctValueWithCountServerAggregator(Configuration conf, DistinctValueWithCountClientAggregator clientAgg) { - this(conf); - valueVsCount = clientAgg.valueVsCount; - } + private int compressThreshold; + private int heapSize = 0; + private byte[] buffer = null; + protected Map valueVsCount = + new HashMap(); - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - ImmutableBytesPtr key = ptr.get().length > FIXED_COPY_THRESHOLD && - ptr.get().length > ptr.getLength() * COPY_THRESHOLD ? - new ImmutableBytesPtr(ptr.copyBytes()) : - new ImmutableBytesPtr(ptr); - Integer count = this.valueVsCount.get(key); - if (count == null) { - this.valueVsCount.put(key, 1); - heapSize += SizedUtil.MAP_ENTRY_SIZE + // entry - Bytes.SIZEOF_INT + // key size - key.getLength() + SizedUtil.ARRAY_SIZE; // value size - } else { - this.valueVsCount.put(key, ++count); - } - } + public DistinctValueWithCountServerAggregator(Configuration conf) { + super(SortOrder.getDefault()); + compressThreshold = conf.getInt(QueryServices.DISTINCT_VALUE_COMPRESS_THRESHOLD_ATTRIB, + QueryServicesOptions.DEFAULT_DISTINCT_VALUE_COMPRESS_THRESHOLD); + } - @Override - public boolean isNullable() { - return false; - } + public DistinctValueWithCountServerAggregator(Configuration conf, + DistinctValueWithCountClientAggregator clientAgg) { + this(conf); + valueVsCount = clientAgg.valueVsCount; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // This serializes the Map. The format is as follows - // Map size(VInt ie. 1 to 5 bytes) + - // ( key length [VInt ie. 1 to 5 bytes] + key bytes + value [VInt ie. 1 to 5 bytes] )* - int serializationSize = countMapSerializationSize(); - buffer = new byte[serializationSize]; - int offset = 1; - offset += ByteUtil.vintToBytes(buffer, offset, this.valueVsCount.size()); - for (Entry entry : this.valueVsCount.entrySet()) { - ImmutableBytesPtr key = entry.getKey(); - offset += ByteUtil.vintToBytes(buffer, offset, key.getLength()); - System.arraycopy(key.get(), key.getOffset(), buffer, offset, key.getLength()); - offset += key.getLength(); - offset += ByteUtil.vintToBytes(buffer, offset, entry.getValue().intValue()); - } - if (serializationSize > compressThreshold) { - // The size for the map serialization is above the threshold. We will do the Snappy compression here. - byte[] compressed = new byte[COMPRESS_MARKER.length + Snappy.maxCompressedLength(buffer.length)]; - System.arraycopy(COMPRESS_MARKER, 0, compressed, 0, COMPRESS_MARKER.length); - int compressedLen = Snappy.compress(buffer, 1, buffer.length - 1, compressed, COMPRESS_MARKER.length); - ptr.set(compressed, 0, compressedLen + 1); - return true; - } - ptr.set(buffer, 0, offset); - return true; + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + ImmutableBytesPtr key = + ptr.get().length > FIXED_COPY_THRESHOLD && ptr.get().length > ptr.getLength() * COPY_THRESHOLD + ? new ImmutableBytesPtr(ptr.copyBytes()) + : new ImmutableBytesPtr(ptr); + Integer count = this.valueVsCount.get(key); + if (count == null) { + this.valueVsCount.put(key, 1); + heapSize += SizedUtil.MAP_ENTRY_SIZE + // entry + Bytes.SIZEOF_INT + // key size + key.getLength() + SizedUtil.ARRAY_SIZE; // value size + } else { + this.valueVsCount.put(key, ++count); } + } - // The #bytes required to serialize the count map. - // Here let us assume to use 4 bytes for each of the int items. Normally it will consume lesser - // bytes as we will use vints. - // TODO Do we need to consider 5 as the number of bytes for each of the int field? Else there is - // a chance of ArrayIndexOutOfBoundsException when all the int fields are having very large - // values. Will that ever occur? - private int countMapSerializationSize() { - int size = Bytes.SIZEOF_INT;// Write the number of entries in the Map - for (ImmutableBytesPtr key : this.valueVsCount.keySet()) { - // Add up the key and key's lengths (Int) and the value - size += key.getLength() + Bytes.SIZEOF_INT + Bytes.SIZEOF_INT; - } - return size; - } + @Override + public boolean isNullable() { + return false; + } - // The heap size which will be taken by the count map. - private int countMapHeapSize() { - return heapSize; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // This serializes the Map. The format is as follows + // Map size(VInt ie. 1 to 5 bytes) + + // ( key length [VInt ie. 1 to 5 bytes] + key bytes + value [VInt ie. 1 to 5 bytes] )* + int serializationSize = countMapSerializationSize(); + buffer = new byte[serializationSize]; + int offset = 1; + offset += ByteUtil.vintToBytes(buffer, offset, this.valueVsCount.size()); + for (Entry entry : this.valueVsCount.entrySet()) { + ImmutableBytesPtr key = entry.getKey(); + offset += ByteUtil.vintToBytes(buffer, offset, key.getLength()); + System.arraycopy(key.get(), key.getOffset(), buffer, offset, key.getLength()); + offset += key.getLength(); + offset += ByteUtil.vintToBytes(buffer, offset, entry.getValue().intValue()); } - - @Override - public final PDataType getDataType() { - return PVarbinary.INSTANCE; + if (serializationSize > compressThreshold) { + // The size for the map serialization is above the threshold. We will do the Snappy + // compression here. + byte[] compressed = + new byte[COMPRESS_MARKER.length + Snappy.maxCompressedLength(buffer.length)]; + System.arraycopy(COMPRESS_MARKER, 0, compressed, 0, COMPRESS_MARKER.length); + int compressedLen = + Snappy.compress(buffer, 1, buffer.length - 1, compressed, COMPRESS_MARKER.length); + ptr.set(compressed, 0, compressedLen + 1); + return true; } + ptr.set(buffer, 0, offset); + return true; + } - @Override - public void reset() { - valueVsCount = new HashMap(); - heapSize = 0; - buffer = null; - super.reset(); + // The #bytes required to serialize the count map. + // Here let us assume to use 4 bytes for each of the int items. Normally it will consume lesser + // bytes as we will use vints. + // TODO Do we need to consider 5 as the number of bytes for each of the int field? Else there is + // a chance of ArrayIndexOutOfBoundsException when all the int fields are having very large + // values. Will that ever occur? + private int countMapSerializationSize() { + int size = Bytes.SIZEOF_INT;// Write the number of entries in the Map + for (ImmutableBytesPtr key : this.valueVsCount.keySet()) { + // Add up the key and key's lengths (Int) and the value + size += key.getLength() + Bytes.SIZEOF_INT + Bytes.SIZEOF_INT; } + return size; + } - @Override - public String toString() { - return "DISTINCT VALUE vs COUNT"; - } + // The heap size which will be taken by the count map. + private int countMapHeapSize() { + return heapSize; + } - @Override - public int getSize() { - return super.getSize() + SizedUtil.ARRAY_SIZE + countMapHeapSize(); - } - - @Override - public boolean trackSize() { - return true; - } + @Override + public final PDataType getDataType() { + return PVarbinary.INSTANCE; + } + + @Override + public void reset() { + valueVsCount = new HashMap(); + heapSize = 0; + buffer = null; + super.reset(); + } + + @Override + public String toString() { + return "DISTINCT VALUE vs COUNT"; + } + + @Override + public int getSize() { + return super.getSize() + SizedUtil.ARRAY_SIZE + countMapHeapSize(); + } + + @Override + public boolean trackSize() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DoubleSumAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DoubleSumAggregator.java index 3f953e61d2f..704b7039707 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DoubleSumAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/DoubleSumAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,76 +18,76 @@ package org.apache.phoenix.expression.aggregator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - -import org.apache.phoenix.schema.types.PDouble; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDouble; import org.apache.phoenix.util.SizedUtil; public class DoubleSumAggregator extends BaseAggregator { - - private double sum = 0; - private byte[] buffer; - public DoubleSumAggregator(SortOrder sortOrder, ImmutableBytesWritable ptr) { - super(sortOrder); - if (ptr != null) { - initBuffer(); - sum = PDouble.INSTANCE.getCodec().decodeDouble(ptr, sortOrder); - } - } - - protected PDataType getInputDataType() { - return PDouble.INSTANCE; - } - - private void initBuffer() { - buffer = new byte[getDataType().getByteSize()]; - } + private double sum = 0; + private byte[] buffer; - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - double value = getInputDataType().getCodec().decodeDouble(ptr, sortOrder); - sum += value; - if (buffer == null) { - initBuffer(); - } + public DoubleSumAggregator(SortOrder sortOrder, ImmutableBytesWritable ptr) { + super(sortOrder); + if (ptr != null) { + initBuffer(); + sum = PDouble.INSTANCE.getCodec().decodeDouble(ptr, sortOrder); } + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (buffer == null) { - if (isNullable()) { - return false; - } - initBuffer(); - } - getDataType().getCodec().encodeDouble(sum, buffer, 0); - ptr.set(buffer); - return true; - } + protected PDataType getInputDataType() { + return PDouble.INSTANCE; + } - @Override - public PDataType getDataType() { - return PDouble.INSTANCE; - } - - @Override - public String toString() { - return "SUM [sum=" + sum + "]"; - } - - @Override - public void reset() { - sum = 0; - buffer = null; - super.reset(); + private void initBuffer() { + buffer = new byte[getDataType().getByteSize()]; + } + + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + double value = getInputDataType().getCodec().decodeDouble(ptr, sortOrder); + sum += value; + if (buffer == null) { + initBuffer(); } - - @Override - public int getSize() { - return super.getSize() + SizedUtil.LONG_SIZE + SizedUtil.ARRAY_SIZE + getDataType().getByteSize(); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (buffer == null) { + if (isNullable()) { + return false; + } + initBuffer(); } + getDataType().getCodec().encodeDouble(sum, buffer, 0); + ptr.set(buffer); + return true; + } + + @Override + public PDataType getDataType() { + return PDouble.INSTANCE; + } + + @Override + public String toString() { + return "SUM [sum=" + sum + "]"; + } + + @Override + public void reset() { + sum = 0; + buffer = null; + super.reset(); + } + + @Override + public int getSize() { + return super.getSize() + SizedUtil.LONG_SIZE + SizedUtil.ARRAY_SIZE + + getDataType().getByteSize(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueBaseClientAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueBaseClientAggregator.java index 5c49312b7e8..40b188d8335 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueBaseClientAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueBaseClientAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,155 +38,149 @@ /** * Base client aggregator for (FIRST|LAST|NTH)_VALUE and (FIRST|LAST)_VALUES functions - * */ public class FirstLastValueBaseClientAggregator extends BaseAggregator { - protected boolean useOffset = false; - protected int offset = -1; - protected BinaryComparator topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY); - protected byte[] topValue = null; - protected TreeMap> topValues = new TreeMap>(new ByteArrayComparator()); - protected boolean isAscending; - protected PDataType dataType; - - // Set to true for retrieving multiple top values for FIRST_VALUES or LAST_VALUES - protected boolean isArrayReturnType = false; + protected boolean useOffset = false; + protected int offset = -1; + protected BinaryComparator topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY); + protected byte[] topValue = null; + protected TreeMap> topValues = + new TreeMap>(new ByteArrayComparator()); + protected boolean isAscending; + protected PDataType dataType; + + // Set to true for retrieving multiple top values for FIRST_VALUES or LAST_VALUES + protected boolean isArrayReturnType = false; + + public FirstLastValueBaseClientAggregator() { + super(SortOrder.getDefault()); + this.dataType = PVarbinary.INSTANCE; + } + + public FirstLastValueBaseClientAggregator(PDataType type) { + super(SortOrder.getDefault()); + this.dataType = (type == null) ? PVarbinary.INSTANCE : type; + } + + @Override + public void reset() { + topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY); + topValue = null; + topValues.clear(); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (useOffset) { + if (topValues.size() == 0) { + return false; + } + + Set>> entrySet; + if (isAscending) { + entrySet = topValues.entrySet(); + } else { + entrySet = topValues.descendingMap().entrySet(); + } + + int counter = 0; + ImmutableBytesWritable arrPtr = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); + for (Map.Entry> entry : entrySet) { + ListIterator it = entry.getValue().listIterator(); + while (it.hasNext()) { + if (isArrayReturnType) { + ImmutableBytesWritable newArrPtr = new ImmutableBytesWritable(it.next()); + PArrayDataType.appendItemToArray(newArrPtr, arrPtr.getLength(), arrPtr.getOffset(), + arrPtr.get(), PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE), + counter, null, sortOrder); + arrPtr = newArrPtr; + + if (++counter == offset) { + break; + } + } else { + if (++counter == offset) { + ptr.set(it.next()); + return true; + } + it.next(); + } + } + } - public FirstLastValueBaseClientAggregator() { - super(SortOrder.getDefault()); - this.dataType = PVarbinary.INSTANCE; - } + if (isArrayReturnType) { + ptr.set(arrPtr.get()); + return true; + } - public FirstLastValueBaseClientAggregator(PDataType type) { - super(SortOrder.getDefault()); - this.dataType = (type == null) ? PVarbinary.INSTANCE : type; + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } - @Override - public void reset() { - topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY); - topValue = null; - topValues.clear(); + if (topValue == null) { + return false; } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (useOffset) { - if (topValues.size() == 0) { - return false; - } - - Set>> entrySet; - if (isAscending) { - entrySet = topValues.entrySet(); - } else { - entrySet = topValues.descendingMap().entrySet(); - } - - int counter = 0; - ImmutableBytesWritable arrPtr = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - for (Map.Entry> entry : entrySet) { - ListIterator it = entry.getValue().listIterator(); - while (it.hasNext()) { - if (isArrayReturnType) { - ImmutableBytesWritable newArrPtr = new ImmutableBytesWritable(it.next()); - PArrayDataType.appendItemToArray( - newArrPtr, - arrPtr.getLength(), - arrPtr.getOffset(), - arrPtr.get(), - PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE), - counter, - null, - sortOrder); - arrPtr = newArrPtr; - - if (++counter == offset) { - break; - } - } else { - if (++counter == offset) { - ptr.set(it.next()); - return true; - } - it.next(); - } - } - } - - if (isArrayReturnType) { - ptr.set(arrPtr.get()); - return true; - } - - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } + ptr.set(topValue); + return true; + } - if (topValue == null) { - return false; - } + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - ptr.set(topValue); - return true; + // if is called cause aggregation in ORDER BY clause + if (tuple instanceof SingleKeyValueTuple) { + topValue = ptr.copyBytes(); + return; } - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - - //if is called cause aggregation in ORDER BY clause - if (tuple instanceof SingleKeyValueTuple) { - topValue = ptr.copyBytes(); - return; - } - - FirstLastNthValueDataContainer payload = new FirstLastNthValueDataContainer(); + FirstLastNthValueDataContainer payload = new FirstLastNthValueDataContainer(); - payload.setPayload(ptr.copyBytes()); - isAscending = payload.getIsAscending(); - TreeMap> serverAggregatorResult = payload.getData(); + payload.setPayload(ptr.copyBytes()); + isAscending = payload.getIsAscending(); + TreeMap> serverAggregatorResult = payload.getData(); - if (useOffset) { - //merge topValues - for (Entry> entry : serverAggregatorResult.entrySet()) { - byte[] itemKey = entry.getKey(); - LinkedList itemList = entry.getValue(); + if (useOffset) { + // merge topValues + for (Entry> entry : serverAggregatorResult.entrySet()) { + byte[] itemKey = entry.getKey(); + LinkedList itemList = entry.getValue(); - if (topValues.containsKey(itemKey)) { - topValues.get(itemKey).addAll(itemList); - } else { - topValues.put(itemKey, itemList); - } - } + if (topValues.containsKey(itemKey)) { + topValues.get(itemKey).addAll(itemList); } else { - Entry> valueEntry = serverAggregatorResult.firstEntry(); - byte[] currentOrder = valueEntry.getKey(); - - boolean isBetter; - if (isAscending) { - isBetter = topOrder.compareTo(currentOrder) > 0; - } else { - isBetter = topOrder.compareTo(currentOrder) < 0; //desc - } - if (topOrder.getValue().length < 1 || isBetter) { - topOrder = new BinaryComparator(currentOrder); - topValue = valueEntry.getValue().getFirst(); - } + topValues.put(itemKey, itemList); } + } + } else { + Entry> valueEntry = serverAggregatorResult.firstEntry(); + byte[] currentOrder = valueEntry.getKey(); + + boolean isBetter; + if (isAscending) { + isBetter = topOrder.compareTo(currentOrder) > 0; + } else { + isBetter = topOrder.compareTo(currentOrder) < 0; // desc + } + if (topOrder.getValue().length < 1 || isBetter) { + topOrder = new BinaryComparator(currentOrder); + topValue = valueEntry.getValue().getFirst(); + } } + } - @Override - public PDataType getDataType() { - return dataType; - } + @Override + public PDataType getDataType() { + return dataType; + } - public void init(int offset, boolean isArrayReturnType) { - if (offset > 0) { - useOffset = true; - this.offset = offset; - } - - this.isArrayReturnType = isArrayReturnType; + public void init(int offset, boolean isArrayReturnType) { + if (offset > 0) { + useOffset = true; + this.offset = offset; } + + this.isArrayReturnType = isArrayReturnType; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java index 298877ed7cc..d77b0d81470 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,188 +39,190 @@ /** * Base server aggregator for (FIRST|LAST|NTH)_VALUE functions - * */ public class FirstLastValueServerAggregator extends BaseAggregator { - private static final Logger LOGGER = LoggerFactory.getLogger(FirstLastValueServerAggregator.class); - protected List children; - protected BinaryComparator topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY); - protected byte[] topValue; - protected boolean useOffset = false; - protected int offset = -1; - protected TreeMap> topValues = new TreeMap>(new Bytes.ByteArrayComparator()); - protected boolean isAscending; - protected boolean hasValueDescSortOrder; - protected Expression orderByColumn; - protected Expression dataColumn; - protected int topValuesCount = 0; - - public FirstLastValueServerAggregator() { - super(SortOrder.getDefault()); - } - - @Override - public void reset() { - topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY); - topValue = null; - topValues.clear(); - topValuesCount = 0; - } - - @Override - public int getSize() { - return super.getSize() + SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE; + private static final Logger LOGGER = + LoggerFactory.getLogger(FirstLastValueServerAggregator.class); + protected List children; + protected BinaryComparator topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY); + protected byte[] topValue; + protected boolean useOffset = false; + protected int offset = -1; + protected TreeMap> topValues = + new TreeMap>(new Bytes.ByteArrayComparator()); + protected boolean isAscending; + protected boolean hasValueDescSortOrder; + protected Expression orderByColumn; + protected Expression dataColumn; + protected int topValuesCount = 0; + + public FirstLastValueServerAggregator() { + super(SortOrder.getDefault()); + } + + @Override + public void reset() { + topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY); + topValue = null; + topValues.clear(); + topValuesCount = 0; + } + + @Override + public int getSize() { + return super.getSize() + SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE; + } + + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + // set pointer to ordering by field + orderByColumn.evaluate(tuple, ptr); + byte[] currentOrder = ptr.copyBytes(); + + if (!dataColumn.evaluate(tuple, ptr)) { + return; } - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - //set pointer to ordering by field - orderByColumn.evaluate(tuple, ptr); - byte[] currentOrder = ptr.copyBytes(); - - if (!dataColumn.evaluate(tuple, ptr)) { - return; + if (useOffset) { + boolean addFlag = false; + if (topValuesCount < offset) { + try { + addFlag = true; + } catch (Exception e) { + LOGGER.error(e.getMessage()); } - - if (useOffset) { - boolean addFlag = false; - if (topValuesCount < offset) { - try { - addFlag = true; - } catch (Exception e) { - LOGGER.error(e.getMessage()); - } - } else { - if (isAscending) { - if (removeLastElement(currentOrder, topValues.lastKey(), -1)) { - addFlag = true; - topValuesCount--; - } - } else { - if (removeLastElement(currentOrder, topValues.firstKey(), 1)) { - addFlag = true; - topValuesCount--; - } - } - } - if (addFlag) { - topValuesCount++; - if (!topValues.containsKey(currentOrder)) { - topValues.put(currentOrder, new LinkedList()); - } - //invert bytes if is SortOrder set - if (hasValueDescSortOrder) { - topValues.get(currentOrder).push(SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength())); - } else { - topValues.get(currentOrder).push(ptr.copyBytes()); - } - } + } else { + if (isAscending) { + if (removeLastElement(currentOrder, topValues.lastKey(), -1)) { + addFlag = true; + topValuesCount--; + } } else { - boolean isHigher; - if (isAscending) { - isHigher = topOrder.compareTo(currentOrder) > 0; - } else { - isHigher = topOrder.compareTo(currentOrder) < 0;//desc - } - if (topOrder.getValue().length < 1 || isHigher) { - if (hasValueDescSortOrder) { - topValue = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength()); - } else { - topValue = ptr.copyBytes(); - } - - topOrder = new BinaryComparator(currentOrder); - } + if (removeLastElement(currentOrder, topValues.firstKey(), 1)) { + addFlag = true; + topValuesCount--; + } } - - } - - @Override - public String toString() { - StringBuilder out = new StringBuilder("FirstLastValueServerAggregator" - + " is ascending: " + isAscending + " value="); - if (useOffset) { - for (byte[] key : topValues.keySet()) { - out.append(Arrays.asList(topValues.get(key))); - } - out.append(" offset = ").append(offset); + } + if (addFlag) { + topValuesCount++; + if (!topValues.containsKey(currentOrder)) { + topValues.put(currentOrder, new LinkedList()); + } + // invert bytes if is SortOrder set + if (hasValueDescSortOrder) { + topValues.get(currentOrder) + .push(SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength())); } else { - out.append(Arrays.asList(topValue)); + topValues.get(currentOrder).push(ptr.copyBytes()); } + } + } else { + boolean isHigher; + if (isAscending) { + isHigher = topOrder.compareTo(currentOrder) > 0; + } else { + isHigher = topOrder.compareTo(currentOrder) < 0;// desc + } + if (topOrder.getValue().length < 1 || isHigher) { + if (hasValueDescSortOrder) { + topValue = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength()); + } else { + topValue = ptr.copyBytes(); + } + + topOrder = new BinaryComparator(currentOrder); + } + } - return out.toString(); + } + + @Override + public String toString() { + StringBuilder out = new StringBuilder( + "FirstLastValueServerAggregator" + " is ascending: " + isAscending + " value="); + if (useOffset) { + for (byte[] key : topValues.keySet()) { + out.append(Arrays.asList(topValues.get(key))); + } + out.append(" offset = ").append(offset); + } else { + out.append(Arrays.asList(topValue)); } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + return out.toString(); + } - FirstLastNthValueDataContainer payload = new FirstLastNthValueDataContainer(); - payload.setIsAscending(isAscending); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - payload.setFixedWidthOrderValues(orderByColumn.getDataType().isFixedWidth()); - payload.setFixedWidthDataValues(dataColumn.getDataType().isFixedWidth()); + FirstLastNthValueDataContainer payload = new FirstLastNthValueDataContainer(); + payload.setIsAscending(isAscending); - if (useOffset) { - payload.setOffset(offset); + payload.setFixedWidthOrderValues(orderByColumn.getDataType().isFixedWidth()); + payload.setFixedWidthDataValues(dataColumn.getDataType().isFixedWidth()); - if (topValuesCount == 0) { - return false; - } - } else { - if (topValue == null) { - return false; - } + if (useOffset) { + payload.setOffset(offset); - LinkedList topValueList = new LinkedList(); - topValueList.push(topValue); - topValues.put(topOrder.getValue(), topValueList); - } - payload.setData(topValues); + if (topValuesCount == 0) { + return false; + } + } else { + if (topValue == null) { + return false; + } - try { - ptr.set(payload.getPayload()); - } catch (IOException ex) { - LOGGER.error(ex.getMessage()); - return false; - } - return true; + LinkedList topValueList = new LinkedList(); + topValueList.push(topValue); + topValues.put(topOrder.getValue(), topValueList); } + payload.setData(topValues); - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; + try { + ptr.set(payload.getPayload()); + } catch (IOException ex) { + LOGGER.error(ex.getMessage()); + return false; + } + return true; + } + + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; + } + + public void init(List children, boolean isAscending, int offset) { + this.children = children; + this.offset = offset; + if (offset > 0) { + useOffset = true; } - public void init(List children, boolean isAscending, int offset) { - this.children = children; - this.offset = offset; - if (offset > 0) { - useOffset = true; - } - - orderByColumn = children.get(0); - dataColumn = children.get(2); + orderByColumn = children.get(0); + dataColumn = children.get(2); - //set order if modified - hasValueDescSortOrder = (dataColumn.getSortOrder() == SortOrder.DESC); + // set order if modified + hasValueDescSortOrder = (dataColumn.getSortOrder() == SortOrder.DESC); - if (orderByColumn.getSortOrder() == SortOrder.DESC) { - this.isAscending = !isAscending; - } else { - this.isAscending = isAscending; - } + if (orderByColumn.getSortOrder() == SortOrder.DESC) { + this.isAscending = !isAscending; + } else { + this.isAscending = isAscending; } - - private boolean removeLastElement(byte[] currentOrder, byte[] lowestKey, int sortOrderInt) { - if (Bytes.compareTo(currentOrder, lowestKey) * sortOrderInt >= 0) { - if (topValues.get(lowestKey).size() == 1) { - topValues.remove(lowestKey); - } else { - topValues.get(lowestKey).pollFirst(); - } - return true; - } - return false; + } + + private boolean removeLastElement(byte[] currentOrder, byte[] lowestKey, int sortOrderInt) { + if (Bytes.compareTo(currentOrder, lowestKey) * sortOrderInt >= 0) { + if (topValues.get(lowestKey).size() == 1) { + topValues.remove(lowestKey); + } else { + topValues.get(lowestKey).pollFirst(); + } + return true; } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/IntSumAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/IntSumAggregator.java index ef62b27c403..b99d0104c18 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/IntSumAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/IntSumAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,26 +17,23 @@ */ package org.apache.phoenix.expression.aggregator; -import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PInteger; /** - * * Aggregator that sums integer values - * - * * @since 0.1 */ public class IntSumAggregator extends NumberSumAggregator { - - public IntSumAggregator(SortOrder sortOrder) { - super(sortOrder); - } - - @Override - protected PDataType getInputDataType() { - return PInteger.INSTANCE; - } + + public IntSumAggregator(SortOrder sortOrder) { + super(sortOrder); + } + + @Override + protected PDataType getInputDataType() { + return PInteger.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/LongSumAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/LongSumAggregator.java index 4007bb48123..3591faff0da 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/LongSumAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/LongSumAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,25 +17,22 @@ */ package org.apache.phoenix.expression.aggregator; -import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PLong; /** - * * Aggregator that sums long values - * - * * @since 0.1 */ public class LongSumAggregator extends NumberSumAggregator { - - public LongSumAggregator() { - super(SortOrder.getDefault()); - } - - @Override - protected PDataType getInputDataType() { - return PLong.INSTANCE; - } + + public LongSumAggregator() { + super(SortOrder.getDefault()); + } + + @Override + protected PDataType getInputDataType() { + return PLong.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/MaxAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/MaxAggregator.java index 17dd49b9e9b..4d63b8e7883 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/MaxAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/MaxAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,30 +19,27 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.phoenix.schema.SortOrder; - /** * Aggregator that finds the max of values. Inverse of {@link MinAggregator}. - * - * * @since 0.1 */ abstract public class MaxAggregator extends MinAggregator { - - public MaxAggregator(SortOrder sortOrder) { - super(sortOrder); - } - - @Override - protected boolean keepFirst(ImmutableBytesWritable ibw1, ImmutableBytesWritable ibw2) { - return !super.keepFirst(ibw1, ibw2); - } - - @Override - public String toString() { - return "MAX [value=" + Bytes.toStringBinary(value.get(),value.getOffset(),value.getLength()) + "]"; - } + + public MaxAggregator(SortOrder sortOrder) { + super(sortOrder); + } + + @Override + protected boolean keepFirst(ImmutableBytesWritable ibw1, ImmutableBytesWritable ibw2) { + return !super.keepFirst(ibw1, ibw2); + } + + @Override + public String toString() { + return "MAX [value=" + Bytes.toStringBinary(value.get(), value.getOffset(), value.getLength()) + + "]"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/MinAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/MinAggregator.java index be12df21a23..792063ee1a5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/MinAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/MinAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,78 +19,75 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.SizedUtil; - /** * Aggregator that finds the min of values. Inverse of {@link MaxAggregator}. - * - * * @since 0.1 */ abstract public class MinAggregator extends BaseAggregator { - /** Used to store the accumulate the results of the MIN function */ - protected final ImmutableBytesWritable value = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - - public MinAggregator(SortOrder sortOrder) { - super(sortOrder); - } + /** Used to store the accumulate the results of the MIN function */ + protected final ImmutableBytesWritable value = + new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - @Override - public void reset() { - value.set(ByteUtil.EMPTY_BYTE_ARRAY); - super.reset(); - } + public MinAggregator(SortOrder sortOrder) { + super(sortOrder); + } - @Override - public int getSize() { - return super.getSize() + /*value*/ SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE; - } + @Override + public void reset() { + value.set(ByteUtil.EMPTY_BYTE_ARRAY); + super.reset(); + } - /** - * Compares two bytes writables, and returns true if the first one should be - * kept, and false otherwise. For the MIN function, this method will return - * true if the first bytes writable is less than the second. - * - * @param ibw1 the first bytes writable - * @param ibw2 the second bytes writable - * @return true if the first bytes writable should be kept - */ - protected boolean keepFirst(ImmutableBytesWritable ibw1, ImmutableBytesWritable ibw2) { - return 0 >= getDataType().compareTo(ibw1, sortOrder, ibw2, sortOrder, getDataType()); - } + @Override + public int getSize() { + return super.getSize() + /* value */ SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE; + } - private boolean isNull() { - return value.get() == ByteUtil.EMPTY_BYTE_ARRAY; - } - - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - if (isNull()) { - value.set(ptr.get(), ptr.getOffset(), ptr.getLength()); - } else { - if (!keepFirst(value, ptr)) { - // replace the value with the new value - value.set(ptr.get(), ptr.getOffset(), ptr.getLength()); - } - } - } - - @Override - public String toString() { - return "MIN [value=" + Bytes.toStringBinary(value.get(),value.getOffset(),value.getLength()) + "]"; + /** + * Compares two bytes writables, and returns true if the first one should be kept, and false + * otherwise. For the MIN function, this method will return true if the first bytes writable is + * less than the second. + * @param ibw1 the first bytes writable + * @param ibw2 the second bytes writable + * @return true if the first bytes writable should be kept + */ + protected boolean keepFirst(ImmutableBytesWritable ibw1, ImmutableBytesWritable ibw2) { + return 0 >= getDataType().compareTo(ibw1, sortOrder, ibw2, sortOrder, getDataType()); + } + + private boolean isNull() { + return value.get() == ByteUtil.EMPTY_BYTE_ARRAY; + } + + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + if (isNull()) { + value.set(ptr.get(), ptr.getOffset(), ptr.getLength()); + } else { + if (!keepFirst(value, ptr)) { + // replace the value with the new value + value.set(ptr.get(), ptr.getOffset(), ptr.getLength()); + } } + } + + @Override + public String toString() { + return "MIN [value=" + Bytes.toStringBinary(value.get(), value.getOffset(), value.getLength()) + + "]"; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (isNull()) { - return false; - } - ptr.set(value.get(), value.getOffset(), value.getLength()); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (isNull()) { + return false; } + ptr.set(value.get(), value.getOffset(), value.getLength()); + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/NonSizeTrackingServerAggregators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/NonSizeTrackingServerAggregators.java index 8836c4544aa..8276af1e747 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/NonSizeTrackingServerAggregators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/NonSizeTrackingServerAggregators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,21 +22,22 @@ import org.apache.phoenix.schema.tuple.Tuple; public class NonSizeTrackingServerAggregators extends ServerAggregators { - public static final ServerAggregators EMPTY_AGGREGATORS = new NonSizeTrackingServerAggregators(new SingleAggregateFunction[0], new Aggregator[0], new Expression[0], 0); + public static final ServerAggregators EMPTY_AGGREGATORS = new NonSizeTrackingServerAggregators( + new SingleAggregateFunction[0], new Aggregator[0], new Expression[0], 0); - public NonSizeTrackingServerAggregators(SingleAggregateFunction[] functions, Aggregator[] aggregators, - Expression[] expressions, int minNullableIndex) { - super(functions, aggregators, expressions, minNullableIndex); - } + public NonSizeTrackingServerAggregators(SingleAggregateFunction[] functions, + Aggregator[] aggregators, Expression[] expressions, int minNullableIndex) { + super(functions, aggregators, expressions, minNullableIndex); + } - @Override - public void aggregate(Aggregator[] aggregators, Tuple result) { - for (int i = 0; i < expressions.length; i++) { - if (expressions[i].evaluate(result, ptr) && ptr.getLength() != 0) { - aggregators[i].aggregate(result, ptr); - } - expressions[i].reset(); - } + @Override + public void aggregate(Aggregator[] aggregators, Tuple result) { + for (int i = 0; i < expressions.length; i++) { + if (expressions[i].evaluate(result, ptr) && ptr.getLength() != 0) { + aggregators[i].aggregate(result, ptr); + } + expressions[i].reset(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/NumberSumAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/NumberSumAggregator.java index bfcecd16499..e0da51c8f28 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/NumberSumAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/NumberSumAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,96 +18,89 @@ package org.apache.phoenix.expression.aggregator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - -import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.util.SizedUtil; /** - * * Aggregator that sums integral number values - * - * * @since 0.1 */ abstract public class NumberSumAggregator extends BaseAggregator { - private long sum = 0; - private byte[] buffer; - - public NumberSumAggregator(SortOrder sortOrder) { - super(sortOrder); - } - - public NumberSumAggregator(SortOrder sortOrder, - ImmutableBytesWritable ptr) { - this(sortOrder); - if (ptr != null) { - initBuffer(); - sum = PLong.INSTANCE.getCodec().decodeLong(ptr, sortOrder); - } - } - - public long getSum() { - return sum; + private long sum = 0; + private byte[] buffer; + + public NumberSumAggregator(SortOrder sortOrder) { + super(sortOrder); + } + + public NumberSumAggregator(SortOrder sortOrder, ImmutableBytesWritable ptr) { + this(sortOrder); + if (ptr != null) { + initBuffer(); + sum = PLong.INSTANCE.getCodec().decodeLong(ptr, sortOrder); } + } - abstract protected PDataType getInputDataType(); + public long getSum() { + return sum; + } - private int getBufferLength() { - return getDataType().getByteSize(); - } - - private void initBuffer() { - buffer = new byte[getBufferLength()]; - } - - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - // Get either IntNative or LongNative depending on input type - long value = getInputDataType().getCodec().decodeLong(ptr, - sortOrder); - sum += value; - if (buffer == null) { - initBuffer(); - } - } + abstract protected PDataType getInputDataType(); - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (buffer == null) { - if (isNullable()) { - return false; - } - initBuffer(); - } - getDataType().getCodec().encodeLong(sum, buffer, 0); - ptr.set(buffer); - return true; - } + private int getBufferLength() { + return getDataType().getByteSize(); + } - @Override - public final PDataType getDataType() { - return PLong.INSTANCE; - } + private void initBuffer() { + buffer = new byte[getBufferLength()]; + } - @Override - public void reset() { - sum = 0; - buffer = null; - super.reset(); + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + // Get either IntNative or LongNative depending on input type + long value = getInputDataType().getCodec().decodeLong(ptr, sortOrder); + sum += value; + if (buffer == null) { + initBuffer(); } - - @Override - public String toString() { - return "SUM [sum=" + sum + "]"; - } - - @Override - public int getSize() { - return super.getSize() + SizedUtil.LONG_SIZE + SizedUtil.ARRAY_SIZE - + getBufferLength(); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (buffer == null) { + if (isNullable()) { + return false; + } + initBuffer(); } + getDataType().getCodec().encodeLong(sum, buffer, 0); + ptr.set(buffer); + return true; + } + + @Override + public final PDataType getDataType() { + return PLong.INSTANCE; + } + + @Override + public void reset() { + sum = 0; + buffer = null; + super.reset(); + } + + @Override + public String toString() { + return "SUM [sum=" + sum + "]"; + } + + @Override + public int getSize() { + return super.getSize() + SizedUtil.LONG_SIZE + SizedUtil.ARRAY_SIZE + getBufferLength(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentRankClientAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentRankClientAggregator.java index fceb1625616..442c4ec2e61 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentRankClientAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentRankClientAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,61 +23,60 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.*; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; /** * Client side Aggregator for PERCENT_RANK aggregations - * - * * @since 1.2.1 */ public class PercentRankClientAggregator extends DistinctValueWithCountClientAggregator { - private final List exps; + private final List exps; - public PercentRankClientAggregator(List exps, SortOrder sortOrder) { - super(sortOrder); - this.exps = exps; - } + public PercentRankClientAggregator(List exps, SortOrder sortOrder) { + super(sortOrder); + this.exps = exps; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (cachedResult == null) { - ColumnExpression columnExp = (ColumnExpression)exps.get(0); - // Second exp will be a LiteralExpression of Boolean type indicating whether the ordering to - // be ASC/DESC - LiteralExpression isAscendingExpression = (LiteralExpression)exps.get(1); - boolean isAscending = (Boolean)isAscendingExpression.getValue(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (cachedResult == null) { + ColumnExpression columnExp = (ColumnExpression) exps.get(0); + // Second exp will be a LiteralExpression of Boolean type indicating whether the ordering to + // be ASC/DESC + LiteralExpression isAscendingExpression = (LiteralExpression) exps.get(1); + boolean isAscending = (Boolean) isAscendingExpression.getValue(); - // Third expression will be LiteralExpression - LiteralExpression valueExp = (LiteralExpression)exps.get(2); - Map sorted = getSortedValueVsCount(isAscending, columnExp.getDataType()); - long distinctCountsSum = 0; - Object value = valueExp.getValue(); - for (Entry entry : sorted.entrySet()) { - Object colValue = entry.getKey(); - int compareResult = columnExp.getDataType().compareTo(colValue, value, valueExp.getDataType()); - boolean done = isAscending ? compareResult > 0 : compareResult <= 0; - if (done) break; - distinctCountsSum += entry.getValue(); - } + // Third expression will be LiteralExpression + LiteralExpression valueExp = (LiteralExpression) exps.get(2); + Map sorted = getSortedValueVsCount(isAscending, columnExp.getDataType()); + long distinctCountsSum = 0; + Object value = valueExp.getValue(); + for (Entry entry : sorted.entrySet()) { + Object colValue = entry.getKey(); + int compareResult = + columnExp.getDataType().compareTo(colValue, value, valueExp.getDataType()); + boolean done = isAscending ? compareResult > 0 : compareResult <= 0; + if (done) break; + distinctCountsSum += entry.getValue(); + } - float result = (float)distinctCountsSum / totalCount; - this.cachedResult = new BigDecimal(result); - } - if (buffer == null) { - initBuffer(); - } - buffer = PDecimal.INSTANCE.toBytes(this.cachedResult); - ptr.set(buffer); - return true; + float result = (float) distinctCountsSum / totalCount; + this.cachedResult = new BigDecimal(result); } - - @Override - protected PDataType getResultDataType() { - return PDecimal.INSTANCE; + if (buffer == null) { + initBuffer(); } + buffer = PDecimal.INSTANCE.toBytes(this.cachedResult); + ptr.set(buffer); + return true; + } + + @Override + protected PDataType getResultDataType() { + return PDecimal.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentileClientAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentileClientAggregator.java index 9a6b2c84977..b5df9e9c416 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentileClientAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentileClientAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,83 +23,81 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.*; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.util.ByteUtil; /** * Client side Aggregator for PERCENTILE_CONT aggregations - * - * * @since 1.2.1 */ public class PercentileClientAggregator extends DistinctValueWithCountClientAggregator { - private final List exps; + private final List exps; - public PercentileClientAggregator(List exps, SortOrder sortOrder) { - super(sortOrder); - this.exps = exps; - } + public PercentileClientAggregator(List exps, SortOrder sortOrder) { + super(sortOrder); + this.exps = exps; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (cachedResult == null) { - ColumnExpression columnExp = (ColumnExpression)exps.get(0); - // Second exp will be a LiteralExpression of Boolean type indicating whether the ordering to - // be ASC/DESC - LiteralExpression isAscendingExpression = (LiteralExpression)exps.get(1); - boolean isAscending = (Boolean)isAscendingExpression.getValue(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (cachedResult == null) { + ColumnExpression columnExp = (ColumnExpression) exps.get(0); + // Second exp will be a LiteralExpression of Boolean type indicating whether the ordering to + // be ASC/DESC + LiteralExpression isAscendingExpression = (LiteralExpression) exps.get(1); + boolean isAscending = (Boolean) isAscendingExpression.getValue(); - // Third expression will be LiteralExpression - LiteralExpression percentileExp = (LiteralExpression)exps.get(2); - float p = ((Number)percentileExp.getValue()).floatValue(); - Map sorted = getSortedValueVsCount(isAscending, columnExp.getDataType()); - float i = (p * this.totalCount) + 0.5F; - long k = (long)i; - float f = i - k; - Object o1 = null; - Object o2 = null; - long distinctCountsSum = 0; - for (Entry entry : sorted.entrySet()) { - if (o1 != null) { - o2 = entry.getKey(); - break; - } - distinctCountsSum += entry.getValue(); - if (distinctCountsSum == k) { - o1 = entry.getKey(); - } else if (distinctCountsSum > k) { - o1 = o2 = entry.getKey(); - break; - } - } - - double result = 0.0; - Number n1 = (Number)o1; - if (o1 == null && o2 == null) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } else if (o2 == null || o1 == o2) { - result = n1.doubleValue(); - } else { - Number n2 = (Number)o2; - result = (n1.doubleValue() * (1.0F - f)) + (n2.doubleValue() * f); - } - this.cachedResult = new BigDecimal(result); + // Third expression will be LiteralExpression + LiteralExpression percentileExp = (LiteralExpression) exps.get(2); + float p = ((Number) percentileExp.getValue()).floatValue(); + Map sorted = getSortedValueVsCount(isAscending, columnExp.getDataType()); + float i = (p * this.totalCount) + 0.5F; + long k = (long) i; + float f = i - k; + Object o1 = null; + Object o2 = null; + long distinctCountsSum = 0; + for (Entry entry : sorted.entrySet()) { + if (o1 != null) { + o2 = entry.getKey(); + break; } - if (buffer == null) { - initBuffer(); + distinctCountsSum += entry.getValue(); + if (distinctCountsSum == k) { + o1 = entry.getKey(); + } else if (distinctCountsSum > k) { + o1 = o2 = entry.getKey(); + break; } - buffer = PDecimal.INSTANCE.toBytes(this.cachedResult); - ptr.set(buffer); + } + + double result = 0.0; + Number n1 = (Number) o1; + if (o1 == null && o2 == null) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); return true; + } else if (o2 == null || o1 == o2) { + result = n1.doubleValue(); + } else { + Number n2 = (Number) o2; + result = (n1.doubleValue() * (1.0F - f)) + (n2.doubleValue() * f); + } + this.cachedResult = new BigDecimal(result); } - - @Override - protected PDataType getResultDataType() { - return PDecimal.INSTANCE; + if (buffer == null) { + initBuffer(); } + buffer = PDecimal.INSTANCE.toBytes(this.cachedResult); + ptr.set(buffer); + return true; + } + + @Override + protected PDataType getResultDataType() { + return PDecimal.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentileDiscClientAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentileDiscClientAggregator.java index 25897955371..60379ae5ad0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentileDiscClientAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/PercentileDiscClientAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,80 +22,79 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.*; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; /** - * - * Built-in function for {@code PERCENTILE_DISC() WITHIN GROUP (ORDER BY ASC/DESC) } aggregate function - * - * + * Built-in function for + * {@code PERCENTILE_DISC() WITHIN GROUP (ORDER BY ASC/DESC) } aggregate + * function * @since 1.2.1 */ public class PercentileDiscClientAggregator extends DistinctValueWithCountClientAggregator { - private final List exps; - ColumnExpression columnExp = null; + private final List exps; + ColumnExpression columnExp = null; - public PercentileDiscClientAggregator(List exps, SortOrder sortOrder) { - super(sortOrder); - this.exps = exps; - } + public PercentileDiscClientAggregator(List exps, SortOrder sortOrder) { + super(sortOrder); + this.exps = exps; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // Reset buffer so that it gets initialized with the current datatype of the column - buffer = null; - if (cachedResult == null) { - columnExp = (ColumnExpression)exps.get(0); - // Second exp will be a LiteralExpression of Boolean type indicating - // whether the ordering to be ASC/DESC - LiteralExpression isAscendingExpression = (LiteralExpression) exps - .get(1); - boolean isAscending = (Boolean) isAscendingExpression.getValue(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // Reset buffer so that it gets initialized with the current datatype of the column + buffer = null; + if (cachedResult == null) { + columnExp = (ColumnExpression) exps.get(0); + // Second exp will be a LiteralExpression of Boolean type indicating + // whether the ordering to be ASC/DESC + LiteralExpression isAscendingExpression = (LiteralExpression) exps.get(1); + boolean isAscending = (Boolean) isAscendingExpression.getValue(); - // Third expression will be LiteralExpression - LiteralExpression percentileExp = (LiteralExpression) exps.get(2); - float p = ((Number) percentileExp.getValue()).floatValue(); - Map sorted = getSortedValueVsCount(isAscending, columnExp.getDataType()); - int currValue = 0; - Object result = null; - // Here the Percentile_disc returns the cum_dist() that is greater or equal to the - // Percentile (p) specified in the query. So the result set will be of that of the - // datatype of the column being selected - for (Entry entry : sorted.entrySet()) { - result = entry.getKey(); - Integer value = entry.getValue(); - currValue += value; - float cum_dist = (float) currValue / (float) totalCount; - if (cum_dist >= p) { - break; - } - } - this.cachedResult = result; - } - if (buffer == null) { - // Initialize based on the datatype - // columnExp cannot be null - buffer = new byte[columnExp.getDataType().getByteSize()]; - } - // Copy the result to the buffer. - System.arraycopy(columnExp.getDataType().toBytes(this.cachedResult), 0, buffer, 0, buffer.length); - ptr.set(buffer); - return true; - } + // Third expression will be LiteralExpression + LiteralExpression percentileExp = (LiteralExpression) exps.get(2); + float p = ((Number) percentileExp.getValue()).floatValue(); + Map sorted = getSortedValueVsCount(isAscending, columnExp.getDataType()); + int currValue = 0; + Object result = null; + // Here the Percentile_disc returns the cum_dist() that is greater or equal to the + // Percentile (p) specified in the query. So the result set will be of that of the + // datatype of the column being selected + for (Entry entry : sorted.entrySet()) { + result = entry.getKey(); + Integer value = entry.getValue(); + currValue += value; + float cum_dist = (float) currValue / (float) totalCount; + if (cum_dist >= p) { + break; + } + } + this.cachedResult = result; + } + if (buffer == null) { + // Initialize based on the datatype + // columnExp cannot be null + buffer = new byte[columnExp.getDataType().getByteSize()]; + } + // Copy the result to the buffer. + System.arraycopy(columnExp.getDataType().toBytes(this.cachedResult), 0, buffer, 0, + buffer.length); + ptr.set(buffer); + return true; + } - @Override - protected int getBufferLength() { - // Will be used in the aggregate() call - return PDecimal.INSTANCE.getByteSize(); - } + @Override + protected int getBufferLength() { + // Will be used in the aggregate() call + return PDecimal.INSTANCE.getByteSize(); + } + + @Override + protected PDataType getResultDataType() { + return columnExp.getDataType(); + } - @Override - protected PDataType getResultDataType() { - return columnExp.getDataType(); - } - } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/ServerAggregators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/ServerAggregators.java index ef9ca0ff911..9ec937e4a2a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/ServerAggregators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/ServerAggregators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,115 +35,117 @@ import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.tuple.Tuple; - /** - * * Aggregators that execute on the server-side - * */ public abstract class ServerAggregators extends Aggregators { - protected final Expression[] expressions; - - protected ServerAggregators(SingleAggregateFunction[] functions, Aggregator[] aggregators, Expression[] expressions, int minNullableIndex) { - super(functions, aggregators, minNullableIndex); - if (aggregators.length != expressions.length) { - throw new IllegalArgumentException("Number of aggregators (" + aggregators.length - + ") must match the number of expressions (" + Arrays.toString(expressions) + ")"); - } - this.expressions = expressions; - } - - @Override - public abstract void aggregate(Aggregator[] aggregators, Tuple result); - - /** - * Serialize an Aggregator into a byte array - * @param aggFuncs list of aggregator to serialize - * @return serialized byte array respresentation of aggregator - */ - public static byte[] serialize(List aggFuncs, int minNullableIndex) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - try { - DataOutputStream output = new DataOutputStream(stream); - WritableUtils.writeVInt(output, minNullableIndex); - WritableUtils.writeVInt(output, aggFuncs.size()); - for (int i = 0; i < aggFuncs.size(); i++) { - SingleAggregateFunction aggFunc = aggFuncs.get(i); - WritableUtils.writeVInt(output, ExpressionType.valueOf(aggFunc).ordinal()); - aggFunc.write(output); - } - return stream.toByteArray(); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + protected final Expression[] expressions; + + protected ServerAggregators(SingleAggregateFunction[] functions, Aggregator[] aggregators, + Expression[] expressions, int minNullableIndex) { + super(functions, aggregators, minNullableIndex); + if (aggregators.length != expressions.length) { + throw new IllegalArgumentException("Number of aggregators (" + aggregators.length + + ") must match the number of expressions (" + Arrays.toString(expressions) + ")"); } + this.expressions = expressions; + } - @Override - public Aggregator[] newAggregators() { - return newAggregators(null); + @Override + public abstract void aggregate(Aggregator[] aggregators, Tuple result); + + /** + * Serialize an Aggregator into a byte array + * @param aggFuncs list of aggregator to serialize + * @return serialized byte array respresentation of aggregator + */ + public static byte[] serialize(List aggFuncs, int minNullableIndex) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + try { + DataOutputStream output = new DataOutputStream(stream); + WritableUtils.writeVInt(output, minNullableIndex); + WritableUtils.writeVInt(output, aggFuncs.size()); + for (int i = 0; i < aggFuncs.size(); i++) { + SingleAggregateFunction aggFunc = aggFuncs.get(i); + WritableUtils.writeVInt(output, ExpressionType.valueOf(aggFunc).ordinal()); + aggFunc.write(output); + } + return stream.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } + } - public Aggregator[] newAggregators(Configuration conf) { - Aggregator[] aggregators = new Aggregator[functions.length]; - for (int i = 0; i < functions.length; i++) { - aggregators[i] = functions[i].newServerAggregator(conf); - } - return aggregators; + @Override + public Aggregator[] newAggregators() { + return newAggregators(null); + } + + public Aggregator[] newAggregators(Configuration conf) { + Aggregator[] aggregators = new Aggregator[functions.length]; + for (int i = 0; i < functions.length; i++) { + aggregators[i] = functions[i].newServerAggregator(conf); } + return aggregators; + } - /** - * Deserialize aggregators from the serialized byte array representation - * @param b byte array representation of a list of Aggregators - * @param conf Server side configuration used by HBase - * @return newly instantiated Aggregators instance - */ - public static ServerAggregators deserialize(byte[] b, Configuration conf, MemoryChunk chunk) { - if (b == null) { - return NonSizeTrackingServerAggregators.EMPTY_AGGREGATORS; - } - ByteArrayInputStream stream = new ByteArrayInputStream(b); - try { - DataInputStream input = new DataInputStream(stream); - int minNullableIndex = WritableUtils.readVInt(input); - int len = WritableUtils.readVInt(input); - Aggregator[] aggregators = new Aggregator[len]; - Expression[] expressions = new Expression[len]; - SingleAggregateFunction[] functions = new SingleAggregateFunction[len]; - for (int i = 0; i < aggregators.length; i++) { - SingleAggregateFunction aggFunc = (SingleAggregateFunction)ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); - aggFunc.readFields(input, conf); - functions[i] = aggFunc; - aggregators[i] = aggFunc.getAggregator(); - expressions[i] = aggFunc.getAggregatorExpression(); - } - boolean trackSize = false; - if (chunk != null) { - for (Aggregator aggregator : aggregators) { - if (aggregator.trackSize()) { - trackSize = true; - break; - } - } - } - return trackSize ? - new SizeTrackingServerAggregators(functions, aggregators,expressions, minNullableIndex, chunk, - conf.getInt(QueryServices.AGGREGATE_CHUNK_SIZE_INCREASE_ATTRIB, - QueryServicesOptions.DEFAULT_AGGREGATE_CHUNK_SIZE_INCREASE)) : - new NonSizeTrackingServerAggregators(functions, aggregators,expressions, minNullableIndex); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } + /** + * Deserialize aggregators from the serialized byte array representation + * @param b byte array representation of a list of Aggregators + * @param conf Server side configuration used by HBase + * @return newly instantiated Aggregators instance + */ + public static ServerAggregators deserialize(byte[] b, Configuration conf, MemoryChunk chunk) { + if (b == null) { + return NonSizeTrackingServerAggregators.EMPTY_AGGREGATORS; + } + ByteArrayInputStream stream = new ByteArrayInputStream(b); + try { + DataInputStream input = new DataInputStream(stream); + int minNullableIndex = WritableUtils.readVInt(input); + int len = WritableUtils.readVInt(input); + Aggregator[] aggregators = new Aggregator[len]; + Expression[] expressions = new Expression[len]; + SingleAggregateFunction[] functions = new SingleAggregateFunction[len]; + for (int i = 0; i < aggregators.length; i++) { + SingleAggregateFunction aggFunc = + (SingleAggregateFunction) ExpressionType.values()[WritableUtils.readVInt(input)] + .newInstance(); + aggFunc.readFields(input, conf); + functions[i] = aggFunc; + aggregators[i] = aggFunc.getAggregator(); + expressions[i] = aggFunc.getAggregatorExpression(); + } + boolean trackSize = false; + if (chunk != null) { + for (Aggregator aggregator : aggregators) { + if (aggregator.trackSize()) { + trackSize = true; + break; + } } + } + return trackSize + ? new SizeTrackingServerAggregators(functions, aggregators, expressions, minNullableIndex, + chunk, + conf.getInt(QueryServices.AGGREGATE_CHUNK_SIZE_INCREASE_ATTRIB, + QueryServicesOptions.DEFAULT_AGGREGATE_CHUNK_SIZE_INCREASE)) + : new NonSizeTrackingServerAggregators(functions, aggregators, expressions, + minNullableIndex); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java index e0571737ff9..9967290bc3b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,35 +25,36 @@ import org.slf4j.LoggerFactory; public class SizeTrackingServerAggregators extends ServerAggregators { - private static final Logger LOGGER = LoggerFactory.getLogger(SizeTrackingServerAggregators.class); + private static final Logger LOGGER = LoggerFactory.getLogger(SizeTrackingServerAggregators.class); - private final MemoryChunk chunk; - private final int sizeIncrease; - private long memoryUsed = 0; + private final MemoryChunk chunk; + private final int sizeIncrease; + private long memoryUsed = 0; - public SizeTrackingServerAggregators(SingleAggregateFunction[] functions, Aggregator[] aggregators, - Expression[] expressions, int minNullableIndex, MemoryChunk chunk, int sizeIncrease) { - super(functions, aggregators, expressions, minNullableIndex); - this.chunk = chunk; - this.sizeIncrease = sizeIncrease; - } + public SizeTrackingServerAggregators(SingleAggregateFunction[] functions, + Aggregator[] aggregators, Expression[] expressions, int minNullableIndex, MemoryChunk chunk, + int sizeIncrease) { + super(functions, aggregators, expressions, minNullableIndex); + this.chunk = chunk; + this.sizeIncrease = sizeIncrease; + } - @Override - public void aggregate(Aggregator[] aggregators, Tuple result) { - long dsize = memoryUsed; - for (int i = 0; i < expressions.length; i++) { - if (expressions[i].evaluate(result, ptr) && ptr.getLength() != 0) { - dsize -= aggregators[i].getSize(); - aggregators[i].aggregate(result, ptr); - dsize += aggregators[i].getSize(); - } - expressions[i].reset(); - } - while(dsize > chunk.getSize()) { - LOGGER.info("Request: {}, resizing {} by 1024*1024", dsize, chunk.getSize()); - chunk.resize(chunk.getSize() + sizeIncrease); - } - memoryUsed = dsize; + @Override + public void aggregate(Aggregator[] aggregators, Tuple result) { + long dsize = memoryUsed; + for (int i = 0; i < expressions.length; i++) { + if (expressions[i].evaluate(result, ptr) && ptr.getLength() != 0) { + dsize -= aggregators[i].getSize(); + aggregators[i].aggregate(result, ptr); + dsize += aggregators[i].getSize(); + } + expressions[i].reset(); + } + while (dsize > chunk.getSize()) { + LOGGER.info("Request: {}, resizing {} by 1024*1024", dsize, chunk.getSize()); + chunk.resize(chunk.getSize() + sizeIncrease); } - + memoryUsed = dsize; + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/StddevPopAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/StddevPopAggregator.java index afad511d070..88754a0e1d7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/StddevPopAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/StddevPopAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,18 +24,16 @@ /** * Client side Aggregator for STDDEV_POP aggregations - * - * * @since 1.2.1 */ public class StddevPopAggregator extends BaseStddevAggregator { - public StddevPopAggregator(List exps, SortOrder sortOrder) { - super(exps, sortOrder); - } + public StddevPopAggregator(List exps, SortOrder sortOrder) { + super(exps, sortOrder); + } - @Override - protected long getDataPointsCount() { - return totalCount; - } + @Override + protected long getDataPointsCount() { + return totalCount; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/StddevSampAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/StddevSampAggregator.java index 26e44376f02..0425f13f4ab 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/StddevSampAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/StddevSampAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,18 +24,16 @@ /** * Client side Aggregator for STDDEV_SAMP aggregations - * - * * @since 1.2.1 */ public class StddevSampAggregator extends BaseStddevAggregator { - public StddevSampAggregator(List exps, SortOrder sortOrder) { - super(exps, sortOrder); - } + public StddevSampAggregator(List exps, SortOrder sortOrder) { + super(exps, sortOrder); + } - @Override - protected long getDataPointsCount() { - return totalCount - 1; - } + @Override + protected long getDataPointsCount() { + return totalCount - 1; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/UnsignedIntSumAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/UnsignedIntSumAggregator.java index ed08cb2ca60..eed0eae2619 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/UnsignedIntSumAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/UnsignedIntSumAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,26 +17,23 @@ */ package org.apache.phoenix.expression.aggregator; -import org.apache.phoenix.schema.types.PUnsignedInt; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PUnsignedInt; /** - * * Aggregator that sums unsigned integer values - * - * * @since 0.12 */ public class UnsignedIntSumAggregator extends NumberSumAggregator { - - public UnsignedIntSumAggregator(SortOrder sortOrder) { - super(sortOrder); - } - - @Override - protected PDataType getInputDataType() { - return PUnsignedInt.INSTANCE; - } + + public UnsignedIntSumAggregator(SortOrder sortOrder) { + super(sortOrder); + } + + @Override + protected PDataType getInputDataType() { + return PUnsignedInt.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/UnsignedLongSumAggregator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/UnsignedLongSumAggregator.java index 3c474c67613..dee476e1d63 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/UnsignedLongSumAggregator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/aggregator/UnsignedLongSumAggregator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,27 +17,24 @@ */ package org.apache.phoenix.expression.aggregator; -import org.apache.phoenix.schema.types.PUnsignedLong; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PUnsignedLong; /** - * - * Aggregator that sums unsigned long values - * TODO: create these classes dynamically based on the type passed through - * - * + * Aggregator that sums unsigned long values TODO: create these classes dynamically based on the + * type passed through * @since 0.12 */ public class UnsignedLongSumAggregator extends NumberSumAggregator { - - public UnsignedLongSumAggregator(SortOrder sortOrder) { - super(sortOrder); - } - - @Override - protected PDataType getInputDataType() { - return PUnsignedLong.INSTANCE; - } + + public UnsignedLongSumAggregator(SortOrder sortOrder) { + super(sortOrder); + } + + @Override + protected PDataType getInputDataType() { + return PUnsignedLong.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AbsFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AbsFunction.java index 6ef1b3823a9..326e7a2e881 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AbsFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AbsFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,36 +31,36 @@ @BuiltInFunction(name = AbsFunction.NAME, args = { @Argument(allowedTypes = PDecimal.class) }) public class AbsFunction extends ScalarFunction { - public static final String NAME = "ABS"; + public static final String NAME = "ABS"; - public AbsFunction() { - } + public AbsFunction() { + } - public AbsFunction(List children) { - super(children); - } + public AbsFunction(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression childExpr = children.get(0); - PDataType dataType = childExpr.getDataType(); - if (childExpr.evaluate(tuple, ptr)) { - byte[] bytes = ptr.get(); - int offset = ptr.getOffset(), length = ptr.getLength(); - ptr.set(new byte[getDataType().getByteSize()]); - ((PNumericType) dataType).abs(bytes, offset, length, childExpr.getSortOrder(), ptr); - return true; - } - return false; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression childExpr = children.get(0); + PDataType dataType = childExpr.getDataType(); + if (childExpr.evaluate(tuple, ptr)) { + byte[] bytes = ptr.get(); + int offset = ptr.getOffset(), length = ptr.getLength(); + ptr.set(new byte[getDataType().getByteSize()]); + ((PNumericType) dataType).abs(bytes, offset, length, childExpr.getSortOrder(), ptr); + return true; } + return false; + } - @Override - public PDataType getDataType() { - return children.get(0).getDataType(); - } + @Override + public PDataType getDataType() { + return children.get(0).getDataType(); + } - @Override - public String getName() { - return AbsFunction.NAME; - } + @Override + public String getName() { + return AbsFunction.NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AggregateFunction.java index 32cae195233..110c9268233 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,32 +22,26 @@ import org.apache.phoenix.expression.Determinism; import org.apache.phoenix.expression.Expression; - - - /** - * - * Compiled representation of a built-in aggregate function - * - * + * Compiled representation of a built-in aggregate function * @since 0.1 */ abstract public class AggregateFunction extends FunctionExpression { - public AggregateFunction() { - } + public AggregateFunction() { + } + + public AggregateFunction(List children) { + super(children); + } - public AggregateFunction(List children) { - super(children); - } - - @Override - public boolean isStateless() { - return false; - } + @Override + public boolean isStateless() { + return false; + } - @Override - public Determinism getDeterminism() { - return Determinism.PER_ROW; - } + @Override + public Determinism getDeterminism() { + return Determinism.PER_ROW; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAllComparisonExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAllComparisonExpression.java index 22d9f0e11b1..7cf239f42f2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAllComparisonExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAllComparisonExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,20 +26,23 @@ public class ArrayAllComparisonExpression extends ArrayAnyComparisonExpression { - public ArrayAllComparisonExpression() {} + public ArrayAllComparisonExpression() { + } - public ArrayAllComparisonExpression(List children) { - super(children); - } + public ArrayAllComparisonExpression(List children) { + super(children); + } - @Override - protected boolean resultFound(ImmutableBytesWritable ptr) { - if (Bytes.equals(ptr.get(), PDataType.FALSE_BYTES)) { return true; } - return false; + @Override + protected boolean resultFound(ImmutableBytesWritable ptr) { + if (Bytes.equals(ptr.get(), PDataType.FALSE_BYTES)) { + return true; } + return false; + } - @Override - protected boolean result() { - return false; - } + @Override + protected boolean result() { + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAnyComparisonExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAnyComparisonExpression.java index e6702d0df90..17190ba9f96 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAnyComparisonExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAnyComparisonExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,60 +31,68 @@ import org.apache.phoenix.schema.types.PDataType; public class ArrayAnyComparisonExpression extends BaseCompoundExpression { - public ArrayAnyComparisonExpression () { - } - public ArrayAnyComparisonExpression(List children) { - super(children); - } + public ArrayAnyComparisonExpression() { + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression arrayKVExpression = children.get(0); - if (!arrayKVExpression.evaluate(tuple, ptr)) { - return false; - } else if (ptr.getLength() == 0) { return true; } - int length = PArrayDataType.getArrayLength(ptr, - PDataType.fromTypeId(children.get(0).getDataType().getSqlType() - PDataType.ARRAY_TYPE_BASE), - arrayKVExpression.getMaxLength()); - boolean elementAvailable = false; - for (int i = 0; i < length; i++) { - Expression comparisonExpr = children.get(1); - Expression arrayElemRef = ((ComparisonExpression)comparisonExpr).getChildren().get(1); - ((ArrayElemRefExpression)arrayElemRef).setIndex(i + 1); - comparisonExpr.evaluate(tuple, ptr); - if (expectedReturnResult(resultFound(ptr))) { return result(); } - elementAvailable = true; - } - if (!elementAvailable) { return false; } - return true; - } - protected boolean resultFound(ImmutableBytesWritable ptr) { - if(Bytes.equals(ptr.get(), PDataType.TRUE_BYTES)) { - return true; - } - return false; + public ArrayAnyComparisonExpression(List children) { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression arrayKVExpression = children.get(0); + if (!arrayKVExpression.evaluate(tuple, ptr)) { + return false; + } else if (ptr.getLength() == 0) { + return true; } - - protected boolean result() { - return true; + int length = PArrayDataType.getArrayLength(ptr, + PDataType.fromTypeId(children.get(0).getDataType().getSqlType() - PDataType.ARRAY_TYPE_BASE), + arrayKVExpression.getMaxLength()); + boolean elementAvailable = false; + for (int i = 0; i < length; i++) { + Expression comparisonExpr = children.get(1); + Expression arrayElemRef = ((ComparisonExpression) comparisonExpr).getChildren().get(1); + ((ArrayElemRefExpression) arrayElemRef).setIndex(i + 1); + comparisonExpr.evaluate(tuple, ptr); + if (expectedReturnResult(resultFound(ptr))) { + return result(); + } + elementAvailable = true; } - - protected boolean expectedReturnResult(boolean result) { - return true == result; + if (!elementAvailable) { + return false; } + return true; + } - @Override - public PDataType getDataType() { - return PBoolean.INSTANCE; + protected boolean resultFound(ImmutableBytesWritable ptr) { + if (Bytes.equals(ptr.get(), PDataType.TRUE_BYTES)) { + return true; } + return false; + } + + protected boolean result() { + return true; + } + + protected boolean expectedReturnResult(boolean result) { + return true == result; + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } -} \ No newline at end of file + return t; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java index d0c0b3b504a..d7a9e04581a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,27 +30,32 @@ import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarbinaryArray; -@FunctionParseNode.BuiltInFunction(name = ArrayAppendFunction.NAME, nodeClass=ArrayModifierParseNode.class, args = { - @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class, PVarbinaryArray.class}), - @FunctionParseNode.Argument(allowedTypes = {PVarbinary.class})}) +@FunctionParseNode.BuiltInFunction(name = ArrayAppendFunction.NAME, + nodeClass = ArrayModifierParseNode.class, + args = { + @FunctionParseNode.Argument(allowedTypes = { PBinaryArray.class, PVarbinaryArray.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarbinary.class }) }) public class ArrayAppendFunction extends ArrayModifierFunction { - public static final String NAME = "ARRAY_APPEND"; + public static final String NAME = "ARRAY_APPEND"; - public ArrayAppendFunction() { - } + public ArrayAppendFunction() { + } - public ArrayAppendFunction(List children) throws TypeMismatchException { - super(children); - } + public ArrayAppendFunction(List children) throws TypeMismatchException { + super(children); + } - @Override - protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset, byte[] arrayBytes, PDataType baseDataType, int arrayLength, Integer maxLength, Expression arrayExp) { - return PArrayDataType.appendItemToArray(ptr, len, offset, arrayBytes, baseDataType, arrayLength, getMaxLength(), arrayExp.getSortOrder()); - } + @Override + protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset, + byte[] arrayBytes, PDataType baseDataType, int arrayLength, Integer maxLength, + Expression arrayExp) { + return PArrayDataType.appendItemToArray(ptr, len, offset, arrayBytes, baseDataType, arrayLength, + getMaxLength(), arrayExp.getSortOrder()); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java index 5e4dba98923..76dbe12f0b9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,70 +31,79 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarbinaryArray; -@FunctionParseNode.BuiltInFunction(name = ArrayConcatFunction.NAME, nodeClass=ArrayModifierParseNode.class, args = { - @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class, PVarbinaryArray.class}), - @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class, PVarbinaryArray.class})}) +@FunctionParseNode.BuiltInFunction(name = ArrayConcatFunction.NAME, + nodeClass = ArrayModifierParseNode.class, + args = { + @FunctionParseNode.Argument(allowedTypes = { PBinaryArray.class, PVarbinaryArray.class }), + @FunctionParseNode.Argument(allowedTypes = { PBinaryArray.class, PVarbinaryArray.class }) }) public class ArrayConcatFunction extends ArrayModifierFunction { - public static final String NAME = "ARRAY_CAT"; + public static final String NAME = "ARRAY_CAT"; - public ArrayConcatFunction() { - } - - public ArrayConcatFunction(List children) throws TypeMismatchException { - super(children); - } + public ArrayConcatFunction() { + } + public ArrayConcatFunction(List children) throws TypeMismatchException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getLHSExpr().evaluate(tuple, ptr)){ - return false; - } - boolean isLHSRowKeyOrderOptimized = PArrayDataType.isRowKeyOrderOptimized(getLHSExpr().getDataType(), getLHSExpr().getSortOrder(), ptr); + if (!getLHSExpr().evaluate(tuple, ptr)) { + return false; + } + boolean isLHSRowKeyOrderOptimized = PArrayDataType + .isRowKeyOrderOptimized(getLHSExpr().getDataType(), getLHSExpr().getSortOrder(), ptr); - SortOrder sortOrder = getRHSExpr().getSortOrder(); - int actualLengthOfArray1 = Math.abs(PArrayDataType.getArrayLength(ptr, getLHSBaseType(), getLHSExpr().getMaxLength())); - int lengthArray1 = ptr.getLength(); - int offsetArray1 = ptr.getOffset(); - byte[] array1Bytes = ptr.get(); - if (!getRHSExpr().evaluate(tuple, ptr)) { - return false; - } - // If second array is null, return first array - if (ptr.getLength() == 0){ - ptr.set(array1Bytes, offsetArray1, lengthArray1); - return true; - } + SortOrder sortOrder = getRHSExpr().getSortOrder(); + int actualLengthOfArray1 = + Math.abs(PArrayDataType.getArrayLength(ptr, getLHSBaseType(), getLHSExpr().getMaxLength())); + int lengthArray1 = ptr.getLength(); + int offsetArray1 = ptr.getOffset(); + byte[] array1Bytes = ptr.get(); + if (!getRHSExpr().evaluate(tuple, ptr)) { + return false; + } + // If second array is null, return first array + if (ptr.getLength() == 0) { + ptr.set(array1Bytes, offsetArray1, lengthArray1); + return true; + } - checkSizeCompatibility(ptr, sortOrder, getLHSExpr(), getLHSExpr().getDataType(), getRHSExpr(),getRHSExpr().getDataType()); + checkSizeCompatibility(ptr, sortOrder, getLHSExpr(), getLHSExpr().getDataType(), getRHSExpr(), + getRHSExpr().getDataType()); - // FIXME: calling version of coerceBytes that takes into account the separator used by LHS - // If the RHS does not have the same separator, it'll be coerced to use it. It's unclear - // if we should do the same for all classes derived from the base class. - // Coerce RHS to LHS type - getLHSExpr().getDataType().coerceBytes(ptr, null, getRHSExpr().getDataType(), getRHSExpr().getMaxLength(), - getRHSExpr().getScale(), getRHSExpr().getSortOrder(), getLHSExpr().getMaxLength(), - getLHSExpr().getScale(), getLHSExpr().getSortOrder(), isLHSRowKeyOrderOptimized); - if (lengthArray1 == 0) { - return true; - } - return modifierFunction(ptr, lengthArray1, offsetArray1, array1Bytes, getLHSBaseType(), actualLengthOfArray1, getMaxLength(), getLHSExpr()); + // FIXME: calling version of coerceBytes that takes into account the separator used by LHS + // If the RHS does not have the same separator, it'll be coerced to use it. It's unclear + // if we should do the same for all classes derived from the base class. + // Coerce RHS to LHS type + getLHSExpr().getDataType().coerceBytes(ptr, null, getRHSExpr().getDataType(), + getRHSExpr().getMaxLength(), getRHSExpr().getScale(), getRHSExpr().getSortOrder(), + getLHSExpr().getMaxLength(), getLHSExpr().getScale(), getLHSExpr().getSortOrder(), + isLHSRowKeyOrderOptimized); + if (lengthArray1 == 0) { + return true; } + return modifierFunction(ptr, lengthArray1, offsetArray1, array1Bytes, getLHSBaseType(), + actualLengthOfArray1, getMaxLength(), getLHSExpr()); + } - @Override - protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset, - byte[] array1Bytes, PDataType baseDataType, int actualLengthOfArray1, Integer maxLength, - Expression array1Exp) { - int actualLengthOfArray2 = Math.abs(PArrayDataType.getArrayLength(ptr, baseDataType, array1Exp.getMaxLength())); - // FIXME: concatArrays will be fine if it's copying the separator bytes, including the terminating bytes. - return PArrayDataType.concatArrays(ptr, len, offset, array1Bytes, baseDataType, actualLengthOfArray1, actualLengthOfArray2); - } + @Override + protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset, + byte[] array1Bytes, PDataType baseDataType, int actualLengthOfArray1, Integer maxLength, + Expression array1Exp) { + int actualLengthOfArray2 = + Math.abs(PArrayDataType.getArrayLength(ptr, baseDataType, array1Exp.getMaxLength())); + // FIXME: concatArrays will be fine if it's copying the separator bytes, including the + // terminating bytes. + return PArrayDataType.concatArrays(ptr, len, offset, array1Bytes, baseDataType, + actualLengthOfArray1, actualLengthOfArray2); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayElemRefExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayElemRefExpression.java index 06bbced2eb1..6cc05728f57 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayElemRefExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayElemRefExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,52 +32,54 @@ public class ArrayElemRefExpression extends BaseCompoundExpression { - private int index; + private int index; - public ArrayElemRefExpression() { - } - - public ArrayElemRefExpression(List children) { - super(children); - } + public ArrayElemRefExpression() { + } - public void setIndex(int index) { - this.index = index; - } + public ArrayElemRefExpression(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression arrayExpr = children.get(0); - return PArrayDataTypeDecoder.positionAtArrayElement(tuple, ptr, index, arrayExpr, getDataType(), getMaxLength()); - } + public void setIndex(int index) { + this.index = index; + } - @Override - public Integer getMaxLength() { - return this.children.get(0).getMaxLength(); - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression arrayExpr = children.get(0); + return PArrayDataTypeDecoder.positionAtArrayElement(tuple, ptr, index, arrayExpr, getDataType(), + getMaxLength()); + } - @Override - public PDataType getDataType() { - return PDataType.fromTypeId(children.get(0).getDataType().getSqlType() - PDataType.ARRAY_TYPE_BASE); - } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - } + @Override + public Integer getMaxLength() { + return this.children.get(0).getMaxLength(); + } + + @Override + public PDataType getDataType() { + return PDataType + .fromTypeId(children.get(0).getDataType().getSqlType() - PDataType.ARRAY_TYPE_BASE); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + } - @Override - public final T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; + @Override + public final T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } -} \ No newline at end of file + return t; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayFillFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayFillFunction.java index 996be3dda1b..6988ee33c2f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayFillFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayFillFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,69 +28,79 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.*; -@FunctionParseNode.BuiltInFunction(name = ArrayFillFunction.NAME, args = { - @FunctionParseNode.Argument(allowedTypes = {PVarbinary.class}), - @FunctionParseNode.Argument(allowedTypes = {PInteger.class})}) +@FunctionParseNode.BuiltInFunction(name = ArrayFillFunction.NAME, + args = { @FunctionParseNode.Argument(allowedTypes = { PVarbinary.class }), + @FunctionParseNode.Argument(allowedTypes = { PInteger.class }) }) public class ArrayFillFunction extends ScalarFunction { - public static final String NAME = "ARRAY_FILL"; + public static final String NAME = "ARRAY_FILL"; - public ArrayFillFunction() { - } + public ArrayFillFunction() { + } - public ArrayFillFunction(List children) throws TypeMismatchException { - super(children); - } + public ArrayFillFunction(List children) throws TypeMismatchException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getElementExpr().evaluate(tuple, ptr)) { - return false; - } - Object element = getElementExpr().getDataType().toObject(ptr, getElementExpr().getSortOrder(), getElementExpr().getMaxLength(), getElementExpr().getScale()); - if (!getLengthExpr().evaluate(tuple, ptr) || ptr.getLength() == 0) { - return false; - } - int length = (Integer) getLengthExpr().getDataType().toObject(ptr, getLengthExpr().getSortOrder(), getLengthExpr().getMaxLength(), getLengthExpr().getScale()); - if (length <= 0) { - throw new IllegalArgumentException("Array length should be greater than 0"); - } - Object[] elements = new Object[length]; - Arrays.fill(elements, element); - PhoenixArray array = PDataType.instantiatePhoenixArray(getElementExpr().getDataType(), elements); - //When max length of a char array is not the max length of the element passed in - if (getElementExpr().getDataType().isFixedWidth() && getMaxLength() != null && !getMaxLength().equals(array.getMaxLength())) { - array = new PhoenixArray(array, getMaxLength()); - } - ptr.set(((PArrayDataType) getDataType()).toBytes(array, getElementExpr().getDataType(), getElementExpr().getSortOrder())); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getElementExpr().evaluate(tuple, ptr)) { + return false; } - - @Override - public String getName() { - return NAME; + Object element = getElementExpr().getDataType().toObject(ptr, getElementExpr().getSortOrder(), + getElementExpr().getMaxLength(), getElementExpr().getScale()); + if (!getLengthExpr().evaluate(tuple, ptr) || ptr.getLength() == 0) { + return false; } - - @Override - public PDataType getDataType() { - return PArrayDataType.fromTypeId(PDataType.ARRAY_TYPE_BASE + getElementExpr().getDataType().getSqlType()); + int length = (Integer) getLengthExpr().getDataType().toObject(ptr, + getLengthExpr().getSortOrder(), getLengthExpr().getMaxLength(), getLengthExpr().getScale()); + if (length <= 0) { + throw new IllegalArgumentException("Array length should be greater than 0"); } - - @Override - public Integer getMaxLength() { - return getElementExpr().getDataType().getByteSize() == null ? getElementExpr().getMaxLength() : null; + Object[] elements = new Object[length]; + Arrays.fill(elements, element); + PhoenixArray array = + PDataType.instantiatePhoenixArray(getElementExpr().getDataType(), elements); + // When max length of a char array is not the max length of the element passed in + if ( + getElementExpr().getDataType().isFixedWidth() && getMaxLength() != null + && !getMaxLength().equals(array.getMaxLength()) + ) { + array = new PhoenixArray(array, getMaxLength()); } + ptr.set(((PArrayDataType) getDataType()).toBytes(array, getElementExpr().getDataType(), + getElementExpr().getSortOrder())); + return true; + } - @Override - public SortOrder getSortOrder() { - return children.get(0).getSortOrder(); - } + @Override + public String getName() { + return NAME; + } - public Expression getElementExpr() { - return children.get(0); - } + @Override + public PDataType getDataType() { + return PArrayDataType + .fromTypeId(PDataType.ARRAY_TYPE_BASE + getElementExpr().getDataType().getSqlType()); + } - public Expression getLengthExpr() { - return children.get(1); - } + @Override + public Integer getMaxLength() { + return getElementExpr().getDataType().getByteSize() == null + ? getElementExpr().getMaxLength() + : null; + } + + @Override + public SortOrder getSortOrder() { + return children.get(0).getSortOrder(); + } + + public Expression getElementExpr() { + return children.get(0); + } + + public Expression getLengthExpr() { + return children.get(1); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayIndexFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayIndexFunction.java index 0f3c40c58f8..d44b656399c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayIndexFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayIndexFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,66 +24,65 @@ import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.ParseException; +import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PArrayDataTypeDecoder; import org.apache.phoenix.schema.types.PBinaryArray; -import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarbinaryArray; -import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.tuple.Tuple; -@BuiltInFunction(name = ArrayIndexFunction.NAME, args = { - @Argument(allowedTypes = { PBinaryArray.class, - PVarbinaryArray.class }), - @Argument(allowedTypes = { PInteger.class }) }) +@BuiltInFunction(name = ArrayIndexFunction.NAME, + args = { @Argument(allowedTypes = { PBinaryArray.class, PVarbinaryArray.class }), + @Argument(allowedTypes = { PInteger.class }) }) public class ArrayIndexFunction extends ScalarFunction { - public static final String NAME = "ARRAY_ELEM"; + public static final String NAME = "ARRAY_ELEM"; - public ArrayIndexFunction() { - } + public ArrayIndexFunction() { + } - public ArrayIndexFunction(List children) { - super(children); - } + public ArrayIndexFunction(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression indexExpr = children.get(1); - if (!indexExpr.evaluate(tuple, ptr)) { - return false; - } else if (ptr.getLength() == 0) { - return true; - } - // Use Codec to prevent Integer object allocation - int index = PInteger.INSTANCE.getCodec().decodeInt(ptr, indexExpr.getSortOrder()); - if(index < 0) { - throw new ParseException("Index cannot be negative :" + index); - } - Expression arrayExpr = children.get(0); - return PArrayDataTypeDecoder.positionAtArrayElement(tuple, ptr, index, arrayExpr, getDataType(), - getMaxLength()); - } - - @Override - public PDataType getDataType() { - return PDataType.fromTypeId(children.get(0).getDataType().getSqlType() - - PDataType.ARRAY_TYPE_BASE); - } - - @Override - public Integer getMaxLength() { - return this.children.get(0).getMaxLength(); - } - - @Override - public String getName() { - return NAME; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression indexExpr = children.get(1); + if (!indexExpr.evaluate(tuple, ptr)) { + return false; + } else if (ptr.getLength() == 0) { + return true; } - - @Override - public SortOrder getSortOrder() { - return this.children.get(0).getSortOrder(); + // Use Codec to prevent Integer object allocation + int index = PInteger.INSTANCE.getCodec().decodeInt(ptr, indexExpr.getSortOrder()); + if (index < 0) { + throw new ParseException("Index cannot be negative :" + index); } + Expression arrayExpr = children.get(0); + return PArrayDataTypeDecoder.positionAtArrayElement(tuple, ptr, index, arrayExpr, getDataType(), + getMaxLength()); + } + + @Override + public PDataType getDataType() { + return PDataType + .fromTypeId(children.get(0).getDataType().getSqlType() - PDataType.ARRAY_TYPE_BASE); + } + + @Override + public Integer getMaxLength() { + return this.children.get(0).getMaxLength(); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public SortOrder getSortOrder() { + return this.children.get(0).getSortOrder(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayLengthFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayLengthFunction.java index 3f7fe22ec0f..32233d8e1c4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayLengthFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayLengthFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,52 +23,51 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PBinaryArray; -import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PArrayDataType; +import org.apache.phoenix.schema.types.PBinaryArray; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarbinaryArray; -import org.apache.phoenix.schema.tuple.Tuple; -@BuiltInFunction(name = ArrayLengthFunction.NAME, args = { @Argument(allowedTypes = { - PBinaryArray.class, PVarbinaryArray.class }) }) +@BuiltInFunction(name = ArrayLengthFunction.NAME, + args = { @Argument(allowedTypes = { PBinaryArray.class, PVarbinaryArray.class }) }) public class ArrayLengthFunction extends ScalarFunction { - public static final String NAME = "ARRAY_LENGTH"; + public static final String NAME = "ARRAY_LENGTH"; - public ArrayLengthFunction() { - } + public ArrayLengthFunction() { + } - public ArrayLengthFunction(List children) { - super(children); - } + public ArrayLengthFunction(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression arrayExpr = children.get(0); - if (!arrayExpr.evaluate(tuple, ptr)) { - return false; - } else if (ptr.getLength() == 0) { - return true; - } - PDataType baseType = PDataType.fromTypeId(children.get(0).getDataType() - .getSqlType() - - PDataType.ARRAY_TYPE_BASE); - int length = Math.abs(PArrayDataType.getArrayLength(ptr, baseType, arrayExpr.getMaxLength())); - byte[] lengthBuf = new byte[PInteger.INSTANCE.getByteSize()]; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression arrayExpr = children.get(0); + if (!arrayExpr.evaluate(tuple, ptr)) { + return false; + } else if (ptr.getLength() == 0) { + return true; + } + PDataType baseType = + PDataType.fromTypeId(children.get(0).getDataType().getSqlType() - PDataType.ARRAY_TYPE_BASE); + int length = Math.abs(PArrayDataType.getArrayLength(ptr, baseType, arrayExpr.getMaxLength())); + byte[] lengthBuf = new byte[PInteger.INSTANCE.getByteSize()]; PInteger.INSTANCE.getCodec().encodeInt(length, lengthBuf, 0); - ptr.set(lengthBuf); - return true; - } + ptr.set(lengthBuf); + return true; + } - @Override - public PDataType getDataType() { - // Array length will return an Integer - return PInteger.INSTANCE; - } + @Override + public PDataType getDataType() { + // Array length will return an Integer + return PInteger.INSTANCE; + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java index b69f26c1034..e718fcc6cc8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.function; import java.io.DataInput; @@ -34,193 +33,217 @@ import org.apache.phoenix.util.ExpressionUtil; public abstract class ArrayModifierFunction extends ScalarFunction { - private boolean isNullArray; - - public ArrayModifierFunction() { - } - - public ArrayModifierFunction(List children) throws TypeMismatchException { - super(children); - Expression arrayExpr = null; - PDataType baseDataType = null; - Expression otherExpr = null; - PDataType otherExpressionType = null; - if (getLHSExpr().getDataType().isArrayType()) { - arrayExpr = getLHSExpr(); - baseDataType = getLHSBaseType(); - otherExpr = getRHSExpr(); - otherExpressionType = getRHSBaseType(); - } else { - arrayExpr = getRHSExpr(); - baseDataType = getRHSBaseType(); - otherExpr = getLHSExpr(); - otherExpressionType = getLHSBaseType(); - } - if (getDataType() != null && !(otherExpr instanceof LiteralExpression && otherExpr.isNullable()) && !otherExpressionType.isCoercibleTo(baseDataType)) { - throw TypeMismatchException.newException(baseDataType, otherExpressionType); - } - - // If the base type of an element is fixed width, make sure the element - // being appended will fit - if (getDataType() != null && otherExpressionType.getByteSize() == null - && otherExpressionType != null && baseDataType.isFixedWidth() - && otherExpressionType.isFixedWidth() && arrayExpr.getMaxLength() != null - && otherExpr.getMaxLength() != null - && otherExpr.getMaxLength() > arrayExpr.getMaxLength()) { - throw new DataExceedsCapacityException("Values are not size compatible"); - } - // If the base type has a scale, make sure the element being appended has a - // scale less than or equal to it - if (getDataType() != null && arrayExpr.getScale() != null && otherExpr.getScale() != null - && otherExpr.getScale() > arrayExpr.getScale()) { - throw new DataExceedsCapacityException(baseDataType, arrayExpr.getMaxLength(), - arrayExpr.getScale(), null); - } - init(); - } - - private void init() { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - if (getLHSExpr().getDataType().isArrayType()) { - isNullArray = ExpressionUtil.isNull(getLHSExpr(), ptr); - } else { - isNullArray = ExpressionUtil.isNull(getRHSExpr(), ptr); - } - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression arrayExpr = null; - PDataType baseDataType = null; - Expression otherExpr = null; - PDataType otherExpressionType = null; - if (getLHSExpr().getDataType().isArrayType()) { - arrayExpr = getLHSExpr(); - baseDataType = getLHSBaseType(); - otherExpr = getRHSExpr(); - otherExpressionType = getRHSBaseType(); - } else { - arrayExpr = getRHSExpr(); - baseDataType = getRHSBaseType(); - otherExpr = getLHSExpr(); - otherExpressionType = getLHSBaseType(); - } - if (!arrayExpr.evaluate(tuple, ptr)) { - return false; - } - int arrayLength = PArrayDataType.getArrayLength(ptr, baseDataType, arrayExpr.getMaxLength()); - - int length = ptr.getLength(); - int offset = ptr.getOffset(); - byte[] arrayBytes = ptr.get(); - - otherExpr.evaluate(tuple, ptr); - - checkSizeCompatibility(ptr, otherExpr.getSortOrder(), arrayExpr, baseDataType, otherExpr, otherExpressionType); - coerceBytes(ptr, arrayExpr, baseDataType, otherExpr, otherExpressionType); - return modifierFunction(ptr, length, offset, arrayBytes, baseDataType, arrayLength, getMaxLength(), - arrayExpr); - } - - // Override this method for various function implementations - protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset, - byte[] arrayBytes, PDataType baseDataType, int arrayLength, Integer maxLength, - Expression arrayExp) { - return false; - } - - protected void checkSizeCompatibility(ImmutableBytesWritable ptr, SortOrder sortOrder, Expression arrayExpr, - PDataType baseDataType, Expression otherExpr, PDataType otherExpressionType) { - if (!baseDataType.isSizeCompatible(ptr, null, otherExpressionType, - sortOrder, otherExpr.getMaxLength(), otherExpr.getScale(), - arrayExpr.getMaxLength(), arrayExpr.getScale())) { - throw new DataExceedsCapacityException("Values are not size compatible"); - } - } - - - protected void coerceBytes(ImmutableBytesWritable ptr, Expression arrayExpr, - PDataType baseDataType, Expression otherExpr, PDataType otherExpressionType) { - baseDataType.coerceBytes(ptr, null, otherExpressionType, otherExpr.getMaxLength(), - otherExpr.getScale(), otherExpr.getSortOrder(), arrayExpr.getMaxLength(), - arrayExpr.getScale(), arrayExpr.getSortOrder()); - } - - public Expression getRHSExpr() { - return this.children.get(1); - } - - public Expression getLHSExpr() { - return this.children.get(0); - } - - public PDataType getLHSBaseType() { - if (getLHSExpr().getDataType().isArrayType()) { - // Use RHS type if we have a null constant to get the correct array type - return isNullArray ? getRHSExpr().getDataType() : PDataType.arrayBaseType(getLHSExpr().getDataType()); - } else { - return getLHSExpr().getDataType(); - } - } - - public PDataType getRHSBaseType() { - if (getRHSExpr().getDataType().isArrayType()) { - // Use LHS type if we have a null constant to get the correct array type - return isNullArray ? getLHSExpr().getDataType() : PDataType.arrayBaseType(getRHSExpr().getDataType()); - } else { - return getRHSExpr().getDataType(); - } - } - - @Override - public PDataType getDataType() { - if (getLHSExpr().getDataType().isArrayType()) { - // Use array of RHS type if we have a null constant since otherwise we'd use binary - return isNullArray ? getRHSExpr().getDataType().isArrayType() ? getRHSExpr().getDataType() : PDataType.fromTypeId(getRHSExpr().getDataType().getSqlType() + PDataType.ARRAY_TYPE_BASE) : getLHSExpr().getDataType(); - } else { - return isNullArray ? getLHSExpr().getDataType().isArrayType() ? getLHSExpr().getDataType() : PDataType.fromTypeId(getLHSExpr().getDataType().getSqlType() + PDataType.ARRAY_TYPE_BASE) : getRHSExpr().getDataType(); - } - } - - private Integer getMaxLength(Expression expression) { - PDataType type = expression.getDataType(); - if (type.isFixedWidth() && type.getByteSize() != null) { - return type.getByteSize(); - } - return expression.getMaxLength(); - } - - @Override - public Integer getMaxLength() { - if (getLHSExpr().getDataType().isArrayType()) { - // Use max length of RHS if we have a null constant since otherwise we'd use null (which breaks fixed types) - return getMaxLength(isNullArray ? getRHSExpr() : getLHSExpr()); - } else { - return getMaxLength(isNullArray ? getLHSExpr() : getRHSExpr()); - } - } - - @Override - public SortOrder getSortOrder() { - if (getLHSExpr().getDataType().isArrayType()) { - return isNullArray ? getRHSExpr().getSortOrder() : getLHSExpr().getSortOrder(); - } else { - return isNullArray ? getLHSExpr().getSortOrder() : getRHSExpr().getSortOrder(); - } - } - - @Override - public Integer getScale() { - if (getLHSExpr().getDataType().isArrayType()) { - return isNullArray ? getRHSExpr().getScale() : getLHSExpr().getScale(); - } else { - return isNullArray ? getLHSExpr().getScale() : getRHSExpr().getScale(); - } - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); - } + private boolean isNullArray; + + public ArrayModifierFunction() { + } + + public ArrayModifierFunction(List children) throws TypeMismatchException { + super(children); + Expression arrayExpr = null; + PDataType baseDataType = null; + Expression otherExpr = null; + PDataType otherExpressionType = null; + if (getLHSExpr().getDataType().isArrayType()) { + arrayExpr = getLHSExpr(); + baseDataType = getLHSBaseType(); + otherExpr = getRHSExpr(); + otherExpressionType = getRHSBaseType(); + } else { + arrayExpr = getRHSExpr(); + baseDataType = getRHSBaseType(); + otherExpr = getLHSExpr(); + otherExpressionType = getLHSBaseType(); + } + if ( + getDataType() != null && !(otherExpr instanceof LiteralExpression && otherExpr.isNullable()) + && !otherExpressionType.isCoercibleTo(baseDataType) + ) { + throw TypeMismatchException.newException(baseDataType, otherExpressionType); + } + + // If the base type of an element is fixed width, make sure the element + // being appended will fit + if ( + getDataType() != null && otherExpressionType.getByteSize() == null + && otherExpressionType != null && baseDataType.isFixedWidth() + && otherExpressionType.isFixedWidth() && arrayExpr.getMaxLength() != null + && otherExpr.getMaxLength() != null && otherExpr.getMaxLength() > arrayExpr.getMaxLength() + ) { + throw new DataExceedsCapacityException("Values are not size compatible"); + } + // If the base type has a scale, make sure the element being appended has a + // scale less than or equal to it + if ( + getDataType() != null && arrayExpr.getScale() != null && otherExpr.getScale() != null + && otherExpr.getScale() > arrayExpr.getScale() + ) { + throw new DataExceedsCapacityException(baseDataType, arrayExpr.getMaxLength(), + arrayExpr.getScale(), null); + } + init(); + } + + private void init() { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + if (getLHSExpr().getDataType().isArrayType()) { + isNullArray = ExpressionUtil.isNull(getLHSExpr(), ptr); + } else { + isNullArray = ExpressionUtil.isNull(getRHSExpr(), ptr); + } + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression arrayExpr = null; + PDataType baseDataType = null; + Expression otherExpr = null; + PDataType otherExpressionType = null; + if (getLHSExpr().getDataType().isArrayType()) { + arrayExpr = getLHSExpr(); + baseDataType = getLHSBaseType(); + otherExpr = getRHSExpr(); + otherExpressionType = getRHSBaseType(); + } else { + arrayExpr = getRHSExpr(); + baseDataType = getRHSBaseType(); + otherExpr = getLHSExpr(); + otherExpressionType = getLHSBaseType(); + } + if (!arrayExpr.evaluate(tuple, ptr)) { + return false; + } + int arrayLength = PArrayDataType.getArrayLength(ptr, baseDataType, arrayExpr.getMaxLength()); + + int length = ptr.getLength(); + int offset = ptr.getOffset(); + byte[] arrayBytes = ptr.get(); + + otherExpr.evaluate(tuple, ptr); + + checkSizeCompatibility(ptr, otherExpr.getSortOrder(), arrayExpr, baseDataType, otherExpr, + otherExpressionType); + coerceBytes(ptr, arrayExpr, baseDataType, otherExpr, otherExpressionType); + return modifierFunction(ptr, length, offset, arrayBytes, baseDataType, arrayLength, + getMaxLength(), arrayExpr); + } + + // Override this method for various function implementations + protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset, + byte[] arrayBytes, PDataType baseDataType, int arrayLength, Integer maxLength, + Expression arrayExp) { + return false; + } + + protected void checkSizeCompatibility(ImmutableBytesWritable ptr, SortOrder sortOrder, + Expression arrayExpr, PDataType baseDataType, Expression otherExpr, + PDataType otherExpressionType) { + if ( + !baseDataType.isSizeCompatible(ptr, null, otherExpressionType, sortOrder, + otherExpr.getMaxLength(), otherExpr.getScale(), arrayExpr.getMaxLength(), + arrayExpr.getScale()) + ) { + throw new DataExceedsCapacityException("Values are not size compatible"); + } + } + + protected void coerceBytes(ImmutableBytesWritable ptr, Expression arrayExpr, + PDataType baseDataType, Expression otherExpr, PDataType otherExpressionType) { + baseDataType.coerceBytes(ptr, null, otherExpressionType, otherExpr.getMaxLength(), + otherExpr.getScale(), otherExpr.getSortOrder(), arrayExpr.getMaxLength(), + arrayExpr.getScale(), arrayExpr.getSortOrder()); + } + + public Expression getRHSExpr() { + return this.children.get(1); + } + + public Expression getLHSExpr() { + return this.children.get(0); + } + + public PDataType getLHSBaseType() { + if (getLHSExpr().getDataType().isArrayType()) { + // Use RHS type if we have a null constant to get the correct array type + return isNullArray + ? getRHSExpr().getDataType() + : PDataType.arrayBaseType(getLHSExpr().getDataType()); + } else { + return getLHSExpr().getDataType(); + } + } + + public PDataType getRHSBaseType() { + if (getRHSExpr().getDataType().isArrayType()) { + // Use LHS type if we have a null constant to get the correct array type + return isNullArray + ? getLHSExpr().getDataType() + : PDataType.arrayBaseType(getRHSExpr().getDataType()); + } else { + return getRHSExpr().getDataType(); + } + } + + @Override + public PDataType getDataType() { + if (getLHSExpr().getDataType().isArrayType()) { + // Use array of RHS type if we have a null constant since otherwise we'd use binary + return isNullArray + ? getRHSExpr().getDataType().isArrayType() + ? getRHSExpr().getDataType() + : PDataType + .fromTypeId(getRHSExpr().getDataType().getSqlType() + PDataType.ARRAY_TYPE_BASE) + : getLHSExpr().getDataType(); + } else { + return isNullArray + ? getLHSExpr().getDataType().isArrayType() + ? getLHSExpr().getDataType() + : PDataType + .fromTypeId(getLHSExpr().getDataType().getSqlType() + PDataType.ARRAY_TYPE_BASE) + : getRHSExpr().getDataType(); + } + } + + private Integer getMaxLength(Expression expression) { + PDataType type = expression.getDataType(); + if (type.isFixedWidth() && type.getByteSize() != null) { + return type.getByteSize(); + } + return expression.getMaxLength(); + } + + @Override + public Integer getMaxLength() { + if (getLHSExpr().getDataType().isArrayType()) { + // Use max length of RHS if we have a null constant since otherwise we'd use null (which + // breaks fixed types) + return getMaxLength(isNullArray ? getRHSExpr() : getLHSExpr()); + } else { + return getMaxLength(isNullArray ? getLHSExpr() : getRHSExpr()); + } + } + + @Override + public SortOrder getSortOrder() { + if (getLHSExpr().getDataType().isArrayType()) { + return isNullArray ? getRHSExpr().getSortOrder() : getLHSExpr().getSortOrder(); + } else { + return isNullArray ? getLHSExpr().getSortOrder() : getRHSExpr().getSortOrder(); + } + } + + @Override + public Integer getScale() { + if (getLHSExpr().getDataType().isArrayType()) { + return isNullArray ? getRHSExpr().getScale() : getLHSExpr().getScale(); + } else { + return isNullArray ? getLHSExpr().getScale() : getRHSExpr().getScale(); + } + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java index 41fdf2a4dfd..56973573e65 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.function; import java.util.List; @@ -31,29 +30,31 @@ import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarbinaryArray; -@FunctionParseNode.BuiltInFunction(name = ArrayPrependFunction.NAME, nodeClass=ArrayModifierParseNode.class, args = { - @FunctionParseNode.Argument(allowedTypes = {PVarbinary.class}), - @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class, PVarbinaryArray.class})}) +@FunctionParseNode.BuiltInFunction(name = ArrayPrependFunction.NAME, + nodeClass = ArrayModifierParseNode.class, + args = { @FunctionParseNode.Argument(allowedTypes = { PVarbinary.class }), + @FunctionParseNode.Argument(allowedTypes = { PBinaryArray.class, PVarbinaryArray.class }) }) public class ArrayPrependFunction extends ArrayModifierFunction { - public static final String NAME = "ARRAY_PREPEND"; + public static final String NAME = "ARRAY_PREPEND"; - public ArrayPrependFunction() { - } + public ArrayPrependFunction() { + } - public ArrayPrependFunction(List children) throws TypeMismatchException { - super(children); - } + public ArrayPrependFunction(List children) throws TypeMismatchException { + super(children); + } - @Override - protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset, - byte[] arrayBytes, PDataType baseDataType, int arrayLength, Integer maxLength, - Expression arrayExp) { - return PArrayDataType.prependItemToArray(ptr, len, offset, arrayBytes, baseDataType, arrayLength, getMaxLength(), arrayExp.getSortOrder()); - } + @Override + protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset, + byte[] arrayBytes, PDataType baseDataType, int arrayLength, Integer maxLength, + Expression arrayExp) { + return PArrayDataType.prependItemToArray(ptr, len, offset, arrayBytes, baseDataType, + arrayLength, getMaxLength(), arrayExp.getSortOrder()); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayRemoveFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayRemoveFunction.java index d71cc23e21a..119f48b0829 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayRemoveFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayRemoveFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.function; import java.util.List; @@ -35,54 +34,57 @@ import org.apache.phoenix.schema.types.PVarbinaryArray; import org.apache.phoenix.util.StringUtil; -@FunctionParseNode.BuiltInFunction(name = ArrayRemoveFunction.NAME, nodeClass = ArrayModifierParseNode.class, args = { - @FunctionParseNode.Argument(allowedTypes = { PBinaryArray.class, PVarbinaryArray.class }), - @FunctionParseNode.Argument(allowedTypes = { PVarbinary.class }) }) +@FunctionParseNode.BuiltInFunction(name = ArrayRemoveFunction.NAME, + nodeClass = ArrayModifierParseNode.class, + args = { + @FunctionParseNode.Argument(allowedTypes = { PBinaryArray.class, PVarbinaryArray.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarbinary.class }) }) public class ArrayRemoveFunction extends ArrayModifierFunction { - public static final String NAME = "ARRAY_REMOVE"; + public static final String NAME = "ARRAY_REMOVE"; - public ArrayRemoveFunction() { - } + public ArrayRemoveFunction() { + } - public ArrayRemoveFunction(List children) throws TypeMismatchException { - super(children); - } + public ArrayRemoveFunction(List children) throws TypeMismatchException { + super(children); + } - @Override - protected boolean modifierFunction(ImmutableBytesWritable ptr, int length, int offset, byte[] arrayBytes, - PDataType baseType, int arrayLength, Integer maxLength, Expression arrayExp) { - SortOrder sortOrder = arrayExp.getSortOrder(); + @Override + protected boolean modifierFunction(ImmutableBytesWritable ptr, int length, int offset, + byte[] arrayBytes, PDataType baseType, int arrayLength, Integer maxLength, + Expression arrayExp) { + SortOrder sortOrder = arrayExp.getSortOrder(); - if (ptr.getLength() == 0 || arrayBytes.length == 0) { - ptr.set(arrayBytes, offset, length); - return true; - } + if (ptr.getLength() == 0 || arrayBytes.length == 0) { + ptr.set(arrayBytes, offset, length); + return true; + } - PArrayDataTypeEncoder arrayDataTypeEncoder = new PArrayDataTypeEncoder(baseType, sortOrder); + PArrayDataTypeEncoder arrayDataTypeEncoder = new PArrayDataTypeEncoder(baseType, sortOrder); - if (getRHSBaseType().equals(PChar.INSTANCE)) { - int unpaddedCharLength = StringUtil.getUnpaddedCharLength(ptr.get(), ptr.getOffset(), ptr.getLength(), - sortOrder); - ptr.set(ptr.get(), offset, unpaddedCharLength); - } + if (getRHSBaseType().equals(PChar.INSTANCE)) { + int unpaddedCharLength = + StringUtil.getUnpaddedCharLength(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); + ptr.set(ptr.get(), offset, unpaddedCharLength); + } - for (int arrayIndex = 0; arrayIndex < arrayLength; arrayIndex++) { - ImmutableBytesWritable ptr2 = new ImmutableBytesWritable(arrayBytes, offset, length); - PArrayDataTypeDecoder.positionAtArrayElement(ptr2, arrayIndex, baseType, maxLength); - if (baseType.compareTo(ptr2, sortOrder, ptr, sortOrder, baseType) != 0) { - arrayDataTypeEncoder.appendValue(ptr2.get(), ptr2.getOffset(), ptr2.getLength()); - } - } + for (int arrayIndex = 0; arrayIndex < arrayLength; arrayIndex++) { + ImmutableBytesWritable ptr2 = new ImmutableBytesWritable(arrayBytes, offset, length); + PArrayDataTypeDecoder.positionAtArrayElement(ptr2, arrayIndex, baseType, maxLength); + if (baseType.compareTo(ptr2, sortOrder, ptr, sortOrder, baseType) != 0) { + arrayDataTypeEncoder.appendValue(ptr2.get(), ptr2.getOffset(), ptr2.getLength()); + } + } - ptr.set(arrayDataTypeEncoder.encode()); + ptr.set(arrayDataTypeEncoder.encode()); - return true; - } + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayToStringFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayToStringFunction.java index 9102df403b8..6c467760a1b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayToStringFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ArrayToStringFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,59 +26,64 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.*; -@FunctionParseNode.BuiltInFunction(name = ArrayToStringFunction.NAME, args = { - @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class, PVarbinaryArray.class}), - @FunctionParseNode.Argument(allowedTypes = {PVarchar.class, PChar.class}), - @FunctionParseNode.Argument(allowedTypes = {PVarchar.class, PChar.class}, defaultValue = "null")}) +@FunctionParseNode.BuiltInFunction(name = ArrayToStringFunction.NAME, + args = { + @FunctionParseNode.Argument(allowedTypes = { PBinaryArray.class, PVarbinaryArray.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class, PChar.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class, PChar.class }, + defaultValue = "null") }) public class ArrayToStringFunction extends ScalarFunction { - public static final String NAME = "ARRAY_TO_STRING"; + public static final String NAME = "ARRAY_TO_STRING"; - public ArrayToStringFunction() { - } - - public ArrayToStringFunction(List children) { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression delimiterExpr = children.get(1); - if (!delimiterExpr.evaluate(tuple, ptr)) { - return false; - } else if (ptr.getLength() == 0) { - return true; - } - String delimiter = (String) delimiterExpr.getDataType().toObject(ptr, delimiterExpr.getSortOrder(), delimiterExpr.getMaxLength(), delimiterExpr.getScale()); - - Expression arrayExpr = children.get(0); - if (!arrayExpr.evaluate(tuple, ptr)) { - return false; - } else if (ptr.getLength() == 0) { - return true; - } - PhoenixArray array = (PhoenixArray) arrayExpr.getDataType().toObject(ptr, arrayExpr.getSortOrder(), arrayExpr.getMaxLength(), arrayExpr.getScale()); + public ArrayToStringFunction() { + } - Expression nullExpr = children.get(2); - String nullString = null; - if (nullExpr.evaluate(tuple, ptr) && ptr.getLength() != 0) { - nullString = (String) nullExpr.getDataType().toObject(ptr, nullExpr.getSortOrder(), nullExpr.getMaxLength(), nullExpr.getScale()); - } + public ArrayToStringFunction(List children) { + super(children); + } - return PArrayDataType.arrayToString(ptr, array, delimiter, nullString, getSortOrder()); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression delimiterExpr = children.get(1); + if (!delimiterExpr.evaluate(tuple, ptr)) { + return false; + } else if (ptr.getLength() == 0) { + return true; } + String delimiter = (String) delimiterExpr.getDataType().toObject(ptr, + delimiterExpr.getSortOrder(), delimiterExpr.getMaxLength(), delimiterExpr.getScale()); - @Override - public String getName() { - return NAME; + Expression arrayExpr = children.get(0); + if (!arrayExpr.evaluate(tuple, ptr)) { + return false; + } else if (ptr.getLength() == 0) { + return true; } + PhoenixArray array = (PhoenixArray) arrayExpr.getDataType().toObject(ptr, + arrayExpr.getSortOrder(), arrayExpr.getMaxLength(), arrayExpr.getScale()); - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + Expression nullExpr = children.get(2); + String nullString = null; + if (nullExpr.evaluate(tuple, ptr) && ptr.getLength() != 0) { + nullString = (String) nullExpr.getDataType().toObject(ptr, nullExpr.getSortOrder(), + nullExpr.getMaxLength(), nullExpr.getScale()); } - @Override - public SortOrder getSortOrder() { - return children.get(0).getSortOrder(); - } + return PArrayDataType.arrayToString(ptr, array, delimiter, nullString, getSortOrder()); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public SortOrder getSortOrder() { + return children.get(0).getSortOrder(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AvgAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AvgAggregateFunction.java index ccba14fdbb8..d0cc60300ba 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AvgAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/AvgAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,87 +26,89 @@ import org.apache.phoenix.parse.AvgAggregateParseNode; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDecimal; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; - -@BuiltInFunction(name=AvgAggregateFunction.NAME, nodeClass=AvgAggregateParseNode.class, args= {@Argument(allowedTypes={PDecimal.class})} ) +@BuiltInFunction(name = AvgAggregateFunction.NAME, nodeClass = AvgAggregateParseNode.class, + args = { @Argument(allowedTypes = { PDecimal.class }) }) public class AvgAggregateFunction extends CompositeAggregateFunction { - public static final String NAME = "AVG"; - private final CountAggregateFunction countFunc; - private final SumAggregateFunction sumFunc; - private Integer scale; + public static final String NAME = "AVG"; + private final CountAggregateFunction countFunc; + private final SumAggregateFunction sumFunc; + private Integer scale; - // TODO: remove when not required at built-in func register time - public AvgAggregateFunction(List children) { - super(children); - this.countFunc = null; - this.sumFunc = null; - setScale(children); - } + // TODO: remove when not required at built-in func register time + public AvgAggregateFunction(List children) { + super(children); + this.countFunc = null; + this.sumFunc = null; + setScale(children); + } - public AvgAggregateFunction(List children, CountAggregateFunction countFunc, SumAggregateFunction sumFunc) { - super(children); - this.countFunc = countFunc; - this.sumFunc = sumFunc; - setScale(children); - } + public AvgAggregateFunction(List children, CountAggregateFunction countFunc, + SumAggregateFunction sumFunc) { + super(children); + this.countFunc = countFunc; + this.sumFunc = sumFunc; + setScale(children); + } - private void setScale(List children) { - scale = PDataType.MIN_DECIMAL_AVG_SCALE; // At least 4; - for (Expression child: children) { - if (child.getScale() != null) { - scale = Math.max(scale, child.getScale()); - } - } + private void setScale(List children) { + scale = PDataType.MIN_DECIMAL_AVG_SCALE; // At least 4; + for (Expression child : children) { + if (child.getScale() != null) { + scale = Math.max(scale, child.getScale()); + } } + } - @Override - public PDataType getDataType() { - return PDecimal.INSTANCE; - } + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!countFunc.evaluate(tuple, ptr)) { - return false; - } - long count = countFunc.getDataType().getCodec().decodeLong(ptr, SortOrder.getDefault()); - if (count == 0) { - return false; - } - - // Normal case where a column reference was used as the argument to AVG - if (!countFunc.isConstantExpression()) { - sumFunc.evaluate(tuple, ptr); - BigDecimal sum = (BigDecimal) PDecimal.INSTANCE.toObject(ptr, sumFunc.getDataType()); - // For the final column projection, we divide the sum by the count, both coerced to BigDecimal. - // TODO: base the precision on column metadata instead of constant - BigDecimal avg = sum.divide(BigDecimal.valueOf(count), PDataType.DEFAULT_MATH_CONTEXT); - avg = avg.setScale(scale, BigDecimal.ROUND_DOWN); - ptr.set(PDecimal.INSTANCE.toBytes(avg)); - return true; - } - BigDecimal value = (BigDecimal) ((LiteralExpression)countFunc.getChildren().get(0)).getValue(); - value = value.setScale(scale, BigDecimal.ROUND_DOWN); - ptr.set(PDecimal.INSTANCE.toBytes(value)); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!countFunc.evaluate(tuple, ptr)) { + return false; } - - @Override - public boolean isNullable() { - return sumFunc != null && sumFunc.isNullable(); + long count = countFunc.getDataType().getCodec().decodeLong(ptr, SortOrder.getDefault()); + if (count == 0) { + return false; } - @Override - public String getName() { - return NAME; + // Normal case where a column reference was used as the argument to AVG + if (!countFunc.isConstantExpression()) { + sumFunc.evaluate(tuple, ptr); + BigDecimal sum = (BigDecimal) PDecimal.INSTANCE.toObject(ptr, sumFunc.getDataType()); + // For the final column projection, we divide the sum by the count, both coerced to + // BigDecimal. + // TODO: base the precision on column metadata instead of constant + BigDecimal avg = sum.divide(BigDecimal.valueOf(count), PDataType.DEFAULT_MATH_CONTEXT); + avg = avg.setScale(scale, BigDecimal.ROUND_DOWN); + ptr.set(PDecimal.INSTANCE.toBytes(avg)); + return true; } + BigDecimal value = (BigDecimal) ((LiteralExpression) countFunc.getChildren().get(0)).getValue(); + value = value.setScale(scale, BigDecimal.ROUND_DOWN); + ptr.set(PDecimal.INSTANCE.toBytes(value)); + return true; + } - @Override - public Integer getScale() { - return scale; - } + @Override + public boolean isNullable() { + return sumFunc != null && sumFunc.isNullable(); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public Integer getScale() { + return scale; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonConditionExpressionFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonConditionExpressionFunction.java index f9cd26c84e4..aa4bd758b5c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonConditionExpressionFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonConditionExpressionFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,111 +19,106 @@ import java.util.List; -import org.apache.phoenix.expression.util.bson.DocumentComparisonExpressionUtils; -import org.bson.BsonDocument; -import org.bson.BsonString; -import org.bson.BsonValue; -import org.bson.RawBsonDocument; - import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; +import org.apache.phoenix.expression.util.bson.DocumentComparisonExpressionUtils; import org.apache.phoenix.expression.util.bson.SQLComparisonExpressionUtils; -import org.apache.phoenix.parse.FunctionParseNode; import org.apache.phoenix.parse.BsonConditionExpressionParseNode; +import org.apache.phoenix.parse.FunctionParseNode; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PBoolean; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PBson; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; @FunctionParseNode.BuiltInFunction(name = BsonConditionExpressionFunction.NAME, nodeClass = BsonConditionExpressionParseNode.class, - args = - { - @FunctionParseNode.Argument(allowedTypes = {PBson.class, PVarbinary.class}), - @FunctionParseNode.Argument(allowedTypes = {PBson.class, PVarbinary.class}, - isConstant = true) - }) + args = { @FunctionParseNode.Argument(allowedTypes = { PBson.class, PVarbinary.class }), + @FunctionParseNode.Argument(allowedTypes = { PBson.class, PVarbinary.class }, + isConstant = true) }) public class BsonConditionExpressionFunction extends ScalarFunction { - public static final String NAME = "BSON_CONDITION_EXPRESSION"; + public static final String NAME = "BSON_CONDITION_EXPRESSION"; - public BsonConditionExpressionFunction() { - } - - public BsonConditionExpressionFunction(List children) { - super(children); - Preconditions.checkNotNull(getChildren().get(1)); - } + public BsonConditionExpressionFunction() { + } - @Override - public String getName() { - return NAME; - } + public BsonConditionExpressionFunction(List children) { + super(children); + Preconditions.checkNotNull(getChildren().get(1)); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // Evaluate the BSON cell value - if (!getChildren().get(0).evaluate(tuple, ptr)) { - return false; - } - if (ptr == null || ptr.getLength() == 0) { - return false; - } + @Override + public String getName() { + return NAME; + } - RawBsonDocument rawBsonDocument = (RawBsonDocument) PBson.INSTANCE.toObject(ptr); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // Evaluate the BSON cell value + if (!getChildren().get(0).evaluate(tuple, ptr)) { + return false; + } + if (ptr == null || ptr.getLength() == 0) { + return false; + } - // Evaluate condition expression - if (!getChildren().get(1).evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return false; - } + RawBsonDocument rawBsonDocument = (RawBsonDocument) PBson.INSTANCE.toObject(ptr); - final RawBsonDocument conditionExpressionBsonDoc; - if (getChildren().get(1).getDataType() == PVarchar.INSTANCE) { - String conditionExpression = - (String) PVarchar.INSTANCE.toObject(ptr, getChildren().get(1).getSortOrder()); - if (conditionExpression == null || conditionExpression.isEmpty()) { - ptr.set(PBoolean.INSTANCE.toBytes(true)); - return true; - } - conditionExpressionBsonDoc = RawBsonDocument.parse(conditionExpression); - } else { - conditionExpressionBsonDoc = (RawBsonDocument) PBson.INSTANCE.toObject(ptr); - if (conditionExpressionBsonDoc == null || conditionExpressionBsonDoc.isEmpty()) { - ptr.set(PBoolean.INSTANCE.toBytes(true)); - return true; - } - } + // Evaluate condition expression + if (!getChildren().get(1).evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return false; + } - BsonValue conditionExp = conditionExpressionBsonDoc.get("$EXPR"); - BsonValue exprValues = conditionExpressionBsonDoc.get("$VAL"); - if (conditionExp != null && exprValues != null) { - if (conditionExp.isString() && exprValues.isDocument()) { - SQLComparisonExpressionUtils sqlComparisonExpressionUtils = - new SQLComparisonExpressionUtils(rawBsonDocument, - (BsonDocument) exprValues); - boolean result = sqlComparisonExpressionUtils.evaluateConditionExpression( - ((BsonString) conditionExp).getValue()); - ptr.set(PBoolean.INSTANCE.toBytes(result)); - return true; - } - throw new IllegalArgumentException( - "Condition Expression should contain valid expression and values"); - } else { - boolean result = DocumentComparisonExpressionUtils.evaluateConditionExpression( - rawBsonDocument, conditionExpressionBsonDoc); - ptr.set(PBoolean.INSTANCE.toBytes(result)); - return true; - } + final RawBsonDocument conditionExpressionBsonDoc; + if (getChildren().get(1).getDataType() == PVarchar.INSTANCE) { + String conditionExpression = + (String) PVarchar.INSTANCE.toObject(ptr, getChildren().get(1).getSortOrder()); + if (conditionExpression == null || conditionExpression.isEmpty()) { + ptr.set(PBoolean.INSTANCE.toBytes(true)); + return true; + } + conditionExpressionBsonDoc = RawBsonDocument.parse(conditionExpression); + } else { + conditionExpressionBsonDoc = (RawBsonDocument) PBson.INSTANCE.toObject(ptr); + if (conditionExpressionBsonDoc == null || conditionExpressionBsonDoc.isEmpty()) { + ptr.set(PBoolean.INSTANCE.toBytes(true)); + return true; + } } - @Override - public PDataType getDataType() { - return PBoolean.INSTANCE; + BsonValue conditionExp = conditionExpressionBsonDoc.get("$EXPR"); + BsonValue exprValues = conditionExpressionBsonDoc.get("$VAL"); + if (conditionExp != null && exprValues != null) { + if (conditionExp.isString() && exprValues.isDocument()) { + SQLComparisonExpressionUtils sqlComparisonExpressionUtils = + new SQLComparisonExpressionUtils(rawBsonDocument, (BsonDocument) exprValues); + boolean result = sqlComparisonExpressionUtils + .evaluateConditionExpression(((BsonString) conditionExp).getValue()); + ptr.set(PBoolean.INSTANCE.toBytes(result)); + return true; + } + throw new IllegalArgumentException( + "Condition Expression should contain valid expression and values"); + } else { + boolean result = DocumentComparisonExpressionUtils + .evaluateConditionExpression(rawBsonDocument, conditionExpressionBsonDoc); + ptr.set(PBoolean.INSTANCE.toBytes(result)); + return true; } + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonUpdateExpressionFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonUpdateExpressionFunction.java index a12b308110c..9ce4c3a885a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonUpdateExpressionFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonUpdateExpressionFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,13 +20,6 @@ import java.nio.ByteBuffer; import java.util.List; -import org.bson.BsonBinaryReader; -import org.bson.BsonDocument; -import org.bson.RawBsonDocument; -import org.bson.codecs.BsonDocumentCodec; -import org.bson.codecs.DecoderContext; -import org.bson.io.ByteBufferBsonInput; - import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.util.bson.UpdateExpressionUtils; @@ -38,86 +31,86 @@ import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.bson.BsonBinaryReader; +import org.bson.BsonDocument; +import org.bson.RawBsonDocument; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.io.ByteBufferBsonInput; @FunctionParseNode.BuiltInFunction(name = BsonUpdateExpressionFunction.NAME, nodeClass = BsonUpdateExpressionParseNode.class, - args = - { - @FunctionParseNode.Argument(allowedTypes = {PBson.class, PVarbinary.class}), - @FunctionParseNode.Argument(allowedTypes = {PBson.class, PVarbinary.class}, - isConstant = true) - }) + args = { @FunctionParseNode.Argument(allowedTypes = { PBson.class, PVarbinary.class }), + @FunctionParseNode.Argument(allowedTypes = { PBson.class, PVarbinary.class }, + isConstant = true) }) public class BsonUpdateExpressionFunction extends ScalarFunction { - public static final String NAME = "BSON_UPDATE_EXPRESSION"; + public static final String NAME = "BSON_UPDATE_EXPRESSION"; - public BsonUpdateExpressionFunction() { - } + public BsonUpdateExpressionFunction() { + } - public BsonUpdateExpressionFunction(List children) { - super(children); - Preconditions.checkNotNull(getChildren().get(1)); - } + public BsonUpdateExpressionFunction(List children) { + super(children); + Preconditions.checkNotNull(getChildren().get(1)); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // Evaluate the BSON cell value - if (!getChildren().get(0).evaluate(tuple, ptr)) { - return false; - } - if (ptr == null || ptr.getLength() == 0) { - return false; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // Evaluate the BSON cell value + if (!getChildren().get(0).evaluate(tuple, ptr)) { + return false; + } + if (ptr == null || ptr.getLength() == 0) { + return false; + } - RawBsonDocument rawBsonDocument = (RawBsonDocument) PBson.INSTANCE.toObject(ptr); - BsonDocument bsonDocument; - try (BsonBinaryReader bsonReader = new BsonBinaryReader( - new ByteBufferBsonInput(rawBsonDocument.getByteBuffer()))) { - bsonDocument = - new BsonDocumentCodec().decode(bsonReader, DecoderContext.builder().build()); - } + RawBsonDocument rawBsonDocument = (RawBsonDocument) PBson.INSTANCE.toObject(ptr); + BsonDocument bsonDocument; + try (BsonBinaryReader bsonReader = + new BsonBinaryReader(new ByteBufferBsonInput(rawBsonDocument.getByteBuffer()))) { + bsonDocument = new BsonDocumentCodec().decode(bsonReader, DecoderContext.builder().build()); + } - // Evaluate update expression - if (!getChildren().get(1).evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return false; - } + // Evaluate update expression + if (!getChildren().get(1).evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return false; + } - final RawBsonDocument updateExpressionBsonDoc; - if (getChildren().get(1).getDataType() == PVarchar.INSTANCE) { - String updateExpression = - (String) PVarchar.INSTANCE.toObject(ptr, getChildren().get(1).getSortOrder()); - if (updateExpression == null || updateExpression.isEmpty()) { - return true; - } - updateExpressionBsonDoc = RawBsonDocument.parse(updateExpression); - } else { - updateExpressionBsonDoc = (RawBsonDocument) PBson.INSTANCE.toObject(ptr); - if (updateExpressionBsonDoc == null || updateExpressionBsonDoc.isEmpty()) { - return true; - } - } + final RawBsonDocument updateExpressionBsonDoc; + if (getChildren().get(1).getDataType() == PVarchar.INSTANCE) { + String updateExpression = + (String) PVarchar.INSTANCE.toObject(ptr, getChildren().get(1).getSortOrder()); + if (updateExpression == null || updateExpression.isEmpty()) { + return true; + } + updateExpressionBsonDoc = RawBsonDocument.parse(updateExpression); + } else { + updateExpressionBsonDoc = (RawBsonDocument) PBson.INSTANCE.toObject(ptr); + if (updateExpressionBsonDoc == null || updateExpressionBsonDoc.isEmpty()) { + return true; + } + } - UpdateExpressionUtils.updateExpression(updateExpressionBsonDoc, - bsonDocument); + UpdateExpressionUtils.updateExpression(updateExpressionBsonDoc, bsonDocument); - RawBsonDocument updatedDocument = - new RawBsonDocument(bsonDocument, new BsonDocumentCodec()); - ByteBuffer buffer = updatedDocument.getByteBuffer().asNIO(); + RawBsonDocument updatedDocument = new RawBsonDocument(bsonDocument, new BsonDocumentCodec()); + ByteBuffer buffer = updatedDocument.getByteBuffer().asNIO(); - ptr.set(buffer.array(), buffer.arrayOffset(), buffer.limit()); - return true; - } + ptr.set(buffer.array(), buffer.arrayOffset(), buffer.limit()); + return true; + } - @Override - public PDataType getDataType() { - return PBson.INSTANCE; - } + @Override + public PDataType getDataType() { + return PBson.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonValueFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonValueFunction.java index b7ff1088c4b..731c2b849b8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonValueFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/BsonValueFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,14 +20,6 @@ import java.util.Date; import java.util.List; -import org.bson.BsonBinary; -import org.bson.BsonBoolean; -import org.bson.BsonDateTime; -import org.bson.BsonNumber; -import org.bson.BsonString; -import org.bson.BsonValue; -import org.bson.RawBsonDocument; - import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; @@ -45,129 +37,129 @@ import org.apache.phoenix.schema.types.PJson; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarbinary; -//import org.apache.phoenix.schema.types.PVarbinaryEncoded; import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ByteUtil; +import org.bson.BsonBinary; +import org.bson.BsonBoolean; +import org.bson.BsonDateTime; +import org.bson.BsonNumber; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; /** * BSON_VALUE function to retrieve the value of any field in BSON. This can be used for any - * top-level or nested Bson fields. - * 1. The first argument represents BSON Object on which the function performs scan. - * 2. The second argument represents the field key. The field key can represent any top level or - * nested fields within the document. The caller should use "." notation for accessing nested - * document elements and "[n]" notation for accessing nested array elements. - * Top level fields do not require any additional character. - * 3. The third argument represents the data type that the client expects the value of the - * field to be converted to while returning the value. + * top-level or nested Bson fields. 1. The first argument represents BSON Object on which the + * function performs scan. 2. The second argument represents the field key. The field key can + * represent any top level or nested fields within the document. The caller should use "." notation + * for accessing nested document elements and "[n]" notation for accessing nested array elements. + * Top level fields do not require any additional character. 3. The third argument represents the + * data type that the client expects the value of the field to be converted to while returning the + * value. */ -@FunctionParseNode.BuiltInFunction( - name = BsonValueFunction.NAME, +@FunctionParseNode.BuiltInFunction(name = BsonValueFunction.NAME, nodeClass = BsonValueParseNode.class, args = { - @FunctionParseNode.Argument(allowedTypes = {PJson.class, PBson.class, PVarbinary.class}), - @FunctionParseNode.Argument(allowedTypes = {PVarchar.class}, isConstant = true), - @FunctionParseNode.Argument(allowedTypes = {PVarchar.class}, isConstant = true), - } -) + @FunctionParseNode.Argument(allowedTypes = { PJson.class, PBson.class, PVarbinary.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }, isConstant = true), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }, isConstant = true), }) public class BsonValueFunction extends ScalarFunction { - public static final String NAME = "BSON_VALUE"; + public static final String NAME = "BSON_VALUE"; - public BsonValueFunction() { - // no-op - } + public BsonValueFunction() { + // no-op + } - public BsonValueFunction(List children) { - super(children); - Preconditions.checkNotNull(getChildren().get(1)); - Preconditions.checkNotNull(getChildren().get(2)); - } + public BsonValueFunction(List children) { + super(children); + Preconditions.checkNotNull(getChildren().get(1)); + Preconditions.checkNotNull(getChildren().get(2)); + } - private PDataType getPDataType() { - String dataType = (String) ((LiteralExpression) getChildren().get(2)).getValue(); - return PDataType.fromSqlTypeName(dataType); - } - - @Override - public String getName() { - return NAME; - } + private PDataType getPDataType() { + String dataType = (String) ((LiteralExpression) getChildren().get(2)).getValue(); + return PDataType.fromSqlTypeName(dataType); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getChildren().get(0).evaluate(tuple, ptr)) { - return false; - } - if (ptr == null || ptr.getLength() == 0) { - return false; - } + @Override + public String getName() { + return NAME; + } - Object object = PBson.INSTANCE.toObject(ptr, getChildren().get(0).getSortOrder()); - RawBsonDocument rawBsonDocument = (RawBsonDocument) object; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getChildren().get(0).evaluate(tuple, ptr)) { + return false; + } + if (ptr == null || ptr.getLength() == 0) { + return false; + } - if (!getChildren().get(1).evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return false; - } + Object object = PBson.INSTANCE.toObject(ptr, getChildren().get(0).getSortOrder()); + RawBsonDocument rawBsonDocument = (RawBsonDocument) object; - String documentFieldKey = - (String) PVarchar.INSTANCE.toObject(ptr, getChildren().get(1).getSortOrder()); - if (documentFieldKey == null) { - return false; - } + if (!getChildren().get(1).evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return false; + } - PDataType bsonValueDataType = getPDataType(); - BsonValue bsonValue = - CommonComparisonExpressionUtils.getFieldFromDocument(documentFieldKey, rawBsonDocument); - if (bsonValue == null) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - if (bsonValueDataType == PVarchar.INSTANCE) { - if (bsonValue instanceof BsonString) { - ptr.set(PVarchar.INSTANCE.toBytes(((BsonString) bsonValue).getValue())); - } else if (bsonValue instanceof BsonNumber) { - ptr.set(PVarchar.INSTANCE.toBytes( - String.valueOf(((BsonNumber) bsonValue).doubleValue()))); - } else if (bsonValue instanceof BsonBoolean) { - ptr.set(PVarchar.INSTANCE.toBytes( - String.valueOf(((BsonBoolean) bsonValue).getValue()))); - } else if (bsonValue instanceof BsonBinary) { - ptr.set(PVarchar.INSTANCE.toBytes(((BsonBinary) bsonValue).getData().toString())); - } else if (bsonValue instanceof BsonDateTime) { - ptr.set(PVarchar.INSTANCE.toBytes( - new Date(((BsonDateTime) bsonValue).getValue()).toString())); - } - } else if (bsonValueDataType == PInteger.INSTANCE && bsonValue instanceof BsonNumber) { - ptr.set(PInteger.INSTANCE.toBytes(((BsonNumber) bsonValue).intValue())); - } else if (bsonValueDataType == PLong.INSTANCE && bsonValue instanceof BsonNumber) { - ptr.set(PLong.INSTANCE.toBytes(((BsonNumber) bsonValue).longValue())); - } else if (bsonValueDataType == PDouble.INSTANCE && bsonValue instanceof BsonNumber) { - ptr.set(PDouble.INSTANCE.toBytes(((BsonNumber) bsonValue).doubleValue())); - } else if (bsonValueDataType == PDecimal.INSTANCE && bsonValue instanceof BsonNumber) { - ptr.set(PDecimal.INSTANCE.toBytes(((BsonNumber) bsonValue).decimal128Value())); - } else if (bsonValueDataType == PBoolean.INSTANCE && bsonValue instanceof BsonBoolean) { - ptr.set(PBoolean.INSTANCE.toBytes(((BsonBoolean) bsonValue).getValue())); - } else if (bsonValueDataType == PVarbinary.INSTANCE && bsonValue instanceof BsonBinary) { - ptr.set(PVarbinary.INSTANCE.toBytes(((BsonBinary) bsonValue).getData())); -// TODO : uncomment after PHOENIX-7357 -// } else if (bsonValueDataType == PVarbinaryEncoded.INSTANCE -// && bsonValue instanceof BsonBinary) { -// ptr.set(PVarbinaryEncoded.INSTANCE.toBytes(((BsonBinary) bsonValue).getData())); - } else if (bsonValueDataType == PDate.INSTANCE && bsonValue instanceof BsonDateTime) { - ptr.set(PDate.INSTANCE.toBytes(new Date(((BsonDateTime) bsonValue).getValue()))); - } else { - throw new IllegalArgumentException( - "The function data type does not match with actual data type"); - } - return true; + String documentFieldKey = + (String) PVarchar.INSTANCE.toObject(ptr, getChildren().get(1).getSortOrder()); + if (documentFieldKey == null) { + return false; } - @Override - public PDataType getDataType() { - return getPDataType(); + PDataType bsonValueDataType = getPDataType(); + BsonValue bsonValue = + CommonComparisonExpressionUtils.getFieldFromDocument(documentFieldKey, rawBsonDocument); + if (bsonValue == null) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; + } + if (bsonValueDataType == PVarchar.INSTANCE) { + if (bsonValue instanceof BsonString) { + ptr.set(PVarchar.INSTANCE.toBytes(((BsonString) bsonValue).getValue())); + } else if (bsonValue instanceof BsonNumber) { + ptr.set(PVarchar.INSTANCE.toBytes(String.valueOf(((BsonNumber) bsonValue).doubleValue()))); + } else if (bsonValue instanceof BsonBoolean) { + ptr.set(PVarchar.INSTANCE.toBytes(String.valueOf(((BsonBoolean) bsonValue).getValue()))); + } else if (bsonValue instanceof BsonBinary) { + ptr.set(PVarchar.INSTANCE.toBytes(((BsonBinary) bsonValue).getData().toString())); + } else if (bsonValue instanceof BsonDateTime) { + ptr.set( + PVarchar.INSTANCE.toBytes(new Date(((BsonDateTime) bsonValue).getValue()).toString())); + } + } else if (bsonValueDataType == PInteger.INSTANCE && bsonValue instanceof BsonNumber) { + ptr.set(PInteger.INSTANCE.toBytes(((BsonNumber) bsonValue).intValue())); + } else if (bsonValueDataType == PLong.INSTANCE && bsonValue instanceof BsonNumber) { + ptr.set(PLong.INSTANCE.toBytes(((BsonNumber) bsonValue).longValue())); + } else if (bsonValueDataType == PDouble.INSTANCE && bsonValue instanceof BsonNumber) { + ptr.set(PDouble.INSTANCE.toBytes(((BsonNumber) bsonValue).doubleValue())); + } else if (bsonValueDataType == PDecimal.INSTANCE && bsonValue instanceof BsonNumber) { + ptr.set(PDecimal.INSTANCE.toBytes(((BsonNumber) bsonValue).decimal128Value())); + } else if (bsonValueDataType == PBoolean.INSTANCE && bsonValue instanceof BsonBoolean) { + ptr.set(PBoolean.INSTANCE.toBytes(((BsonBoolean) bsonValue).getValue())); + } else if (bsonValueDataType == PVarbinary.INSTANCE && bsonValue instanceof BsonBinary) { + ptr.set(PVarbinary.INSTANCE.toBytes(((BsonBinary) bsonValue).getData())); + // TODO : uncomment after PHOENIX-7357 + // } else if (bsonValueDataType == PVarbinaryEncoded.INSTANCE + // && bsonValue instanceof BsonBinary) { + // ptr.set(PVarbinaryEncoded.INSTANCE.toBytes(((BsonBinary) bsonValue).getData())); + } else if (bsonValueDataType == PDate.INSTANCE && bsonValue instanceof BsonDateTime) { + ptr.set(PDate.INSTANCE.toBytes(new Date(((BsonDateTime) bsonValue).getValue()))); + } else { + throw new IllegalArgumentException( + "The function data type does not match with actual data type"); } + return true; + } + + @Override + public PDataType getDataType() { + return getPDataType(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpReplaceFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpReplaceFunction.java index 45ab5b0d4a6..71554e164f3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpReplaceFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpReplaceFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,31 +22,29 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.util.regex.AbstractBasePattern; import org.apache.phoenix.expression.util.regex.JONIPattern; -import org.joni.Option; -import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; +import org.apache.phoenix.schema.types.PVarchar; +import org.joni.Option; -@BuiltInFunction(name=RegexpReplaceFunction.NAME, - args= { - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class},defaultValue="null")}, - classType = FunctionClassType.DERIVED -) +@BuiltInFunction(name = RegexpReplaceFunction.NAME, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }, defaultValue = "null") }, + classType = FunctionClassType.DERIVED) public class ByteBasedRegexpReplaceFunction extends RegexpReplaceFunction { - public ByteBasedRegexpReplaceFunction() { - } + public ByteBasedRegexpReplaceFunction() { + } - public ByteBasedRegexpReplaceFunction(List children) { - super(children); - } + public ByteBasedRegexpReplaceFunction(List children) { + super(children); + } - @Override - protected AbstractBasePattern compilePatternSpec(String value) { - return new JONIPattern(value, Option.MULTILINE); - } + @Override + protected AbstractBasePattern compilePatternSpec(String value) { + return new JONIPattern(value, Option.MULTILINE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSplitFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSplitFunction.java index 3ed100ac5a1..c395ab47aa5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSplitFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSplitFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,28 +22,25 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.util.regex.AbstractBaseSplitter; import org.apache.phoenix.expression.util.regex.JONIPattern; -import org.joni.Option; -import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; +import org.apache.phoenix.schema.types.PVarchar; +import org.joni.Option; -@BuiltInFunction(name=RegexpSplitFunction.NAME, - args= { - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class})}, - classType = FunctionClassType.DERIVED -) +@BuiltInFunction(name = RegexpSplitFunction.NAME, args = { + @Argument(allowedTypes = { PVarchar.class }), @Argument(allowedTypes = { PVarchar.class }) }, + classType = FunctionClassType.DERIVED) public class ByteBasedRegexpSplitFunction extends RegexpSplitFunction { - public ByteBasedRegexpSplitFunction() { - } + public ByteBasedRegexpSplitFunction() { + } - public ByteBasedRegexpSplitFunction(List children) { - super(children); - } + public ByteBasedRegexpSplitFunction(List children) { + super(children); + } - @Override - protected AbstractBaseSplitter compilePatternSpec(String value) { - return new JONIPattern(value, Option.MULTILINE); - } + @Override + protected AbstractBaseSplitter compilePatternSpec(String value) { + return new JONIPattern(value, Option.MULTILINE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSubstrFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSubstrFunction.java index ea4ea2a33c1..42045ffd49b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSubstrFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ByteBasedRegexpSubstrFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,34 +22,28 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.util.regex.AbstractBasePattern; import org.apache.phoenix.expression.util.regex.JONIPattern; -import org.joni.Option; -import org.apache.phoenix.expression.Expression; -import org.apache.phoenix.expression.util.regex.AbstractBasePattern; -import org.apache.phoenix.expression.util.regex.JONIPattern; -import org.joni.Option; -import org.apache.phoenix.schema.types.PLong; -import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; +import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.schema.types.PVarchar; +import org.joni.Option; -@BuiltInFunction(name=RegexpSubstrFunction.NAME, - args= { - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PLong.class}, defaultValue="1")}, - classType = FunctionClassType.DERIVED -) +@BuiltInFunction(name = RegexpSubstrFunction.NAME, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PLong.class }, defaultValue = "1") }, + classType = FunctionClassType.DERIVED) public class ByteBasedRegexpSubstrFunction extends RegexpSubstrFunction { - public ByteBasedRegexpSubstrFunction() { - } + public ByteBasedRegexpSubstrFunction() { + } - public ByteBasedRegexpSubstrFunction(List children) { - super(children); - } + public ByteBasedRegexpSubstrFunction(List children) { + super(children); + } - @Override - protected AbstractBasePattern compilePatternSpec(String value) { - return new JONIPattern(value, Option.MULTILINE); - } + @Override + protected AbstractBasePattern compilePatternSpec(String value) { + return new JONIPattern(value, Option.MULTILINE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CbrtFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CbrtFunction.java index 1c1392417da..c51994ef24f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CbrtFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CbrtFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,30 +26,31 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; -@BuiltInFunction(name = CbrtFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) +@BuiltInFunction(name = CbrtFunction.NAME, + args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) public class CbrtFunction extends JavaMathOneArgumentFunction { - public static final String NAME = "CBRT"; + public static final String NAME = "CBRT"; - public CbrtFunction() { - } + public CbrtFunction() { + } - public CbrtFunction(List children) throws SQLException { - super(children); - } + public CbrtFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - protected double compute(double firstArg) { - return Math.cbrt(firstArg); - } + @Override + protected double compute(double firstArg) { + return Math.cbrt(firstArg); + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilDateExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilDateExpression.java index 4730d005418..a727d4e6938 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilDateExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilDateExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,101 +24,97 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; +import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; +import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.schema.types.PInteger; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.parse.FunctionParseNode.Argument; -import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; - +import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * * Class encapsulating ceil operation on org.apache.phoenix.schema.types.PDataType#DATE. - * - * * @since 3.0.0 */ @BuiltInFunction(name = CeilFunction.NAME, - args = { - @Argument(allowedTypes={PDate.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionClassType.DERIVED -) + args = { @Argument(allowedTypes = { PDate.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionClassType.DERIVED) public class CeilDateExpression extends RoundDateExpression { - - public CeilDateExpression() {} - - /** - * @param timeUnit - unit of time to round up to. - * Creates a {@link CeilDateExpression} with default multiplier of 1. - */ - public static Expression create(Expression expr, TimeUnit timeUnit) throws SQLException { - return create(expr, timeUnit, 1); - } - - /** - * @param timeUnit - unit of time to round up to - * @param multiplier - determines the roll up window size. - * Create a {@link CeilDateExpression}. - */ - public static Expression create(Expression expr, TimeUnit timeUnit, int multiplier) throws SQLException { - Expression timeUnitExpr = getTimeUnitExpr(timeUnit); - Expression defaultMultiplierExpr = getMultiplierExpr(multiplier); - List expressions = Lists.newArrayList(expr, timeUnitExpr, defaultMultiplierExpr); - return CeilDateExpression.create(expressions); - } - - public static Expression create(List children) throws SQLException { - Object timeUnitValue = ((LiteralExpression)children.get(1)).getValue(); - TimeUnit timeUnit = TimeUnit.getTimeUnit(timeUnitValue != null ? timeUnitValue.toString() : null); - switch(timeUnit) { - case WEEK: - return new CeilWeekExpression(children); - case MONTH: - return new CeilMonthExpression(children); - case YEAR: - return new CeilYearExpression(children); - default: - return new CeilDateExpression(children); - } - - } - - public CeilDateExpression(List children) { - super(children); - } - - @Override - protected long getRoundUpAmount() { - return divBy - 1; - } - - @Override - public String getName() { - return CeilFunction.NAME; + + public CeilDateExpression() { + } + + /** + * @param timeUnit - unit of time to round up to. Creates a {@link CeilDateExpression} with + * default multiplier of 1. + */ + public static Expression create(Expression expr, TimeUnit timeUnit) throws SQLException { + return create(expr, timeUnit, 1); + } + + /** + * @param timeUnit - unit of time to round up to + * @param multiplier - determines the roll up window size. Create a {@link CeilDateExpression}. + */ + public static Expression create(Expression expr, TimeUnit timeUnit, int multiplier) + throws SQLException { + Expression timeUnitExpr = getTimeUnitExpr(timeUnit); + Expression defaultMultiplierExpr = getMultiplierExpr(multiplier); + List expressions = Lists.newArrayList(expr, timeUnitExpr, defaultMultiplierExpr); + return CeilDateExpression.create(expressions); + } + + public static Expression create(List children) throws SQLException { + Object timeUnitValue = ((LiteralExpression) children.get(1)).getValue(); + TimeUnit timeUnit = + TimeUnit.getTimeUnit(timeUnitValue != null ? timeUnitValue.toString() : null); + switch (timeUnit) { + case WEEK: + return new CeilWeekExpression(children); + case MONTH: + return new CeilMonthExpression(children); + case YEAR: + return new CeilYearExpression(children); + default: + return new CeilDateExpression(children); } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (children.get(0).evaluate(tuple, ptr)) { - if (ptr.getLength() == 0) { - return true; // child evaluated to null - } - PDataType dataType = getDataType(); - long time = dataType.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); - long value = roundTime(time); - Date d = new Date(value); - byte[] byteValue = dataType.toBytes(d); - ptr.set(byteValue); - return true; - } - return false; + + } + + public CeilDateExpression(List children) { + super(children); + } + + @Override + protected long getRoundUpAmount() { + return divBy - 1; + } + + @Override + public String getName() { + return CeilFunction.NAME; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (children.get(0).evaluate(tuple, ptr)) { + if (ptr.getLength() == 0) { + return true; // child evaluated to null + } + PDataType dataType = getDataType(); + long time = dataType.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); + long value = roundTime(time); + Date d = new Date(value); + byte[] byteValue = dataType.toBytes(d); + ptr.set(byteValue); + return true; } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilDecimalExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilDecimalExpression.java index 7cb8e164a39..c8c57f4536b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilDecimalExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilDecimalExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,96 +25,90 @@ import org.apache.phoenix.expression.Determinism; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; +import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; +import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.types.PInteger; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.parse.FunctionParseNode.Argument; -import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * * Class encapsulating the CEIL operation on a {@link org.apache.phoenix.schema.types.PDecimal} - * - * * @since 3.0.0 */ @BuiltInFunction(name = CeilFunction.NAME, - args = { - @Argument(allowedTypes={PDecimal.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionClassType.DERIVED -) + args = { @Argument(allowedTypes = { PDecimal.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionClassType.DERIVED) public class CeilDecimalExpression extends RoundDecimalExpression { - public CeilDecimalExpression() {} + public CeilDecimalExpression() { + } - public CeilDecimalExpression(List children) { - super(children); - } + public CeilDecimalExpression(List children) { + super(children); + } - /** - * Creates a {@link CeilDecimalExpression} with rounding scale given by @param scale. - * - */ - public static Expression create(Expression expr, int scale) throws SQLException { - if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { - return expr; - } - Expression scaleExpr = LiteralExpression.newConstant(scale, PInteger.INSTANCE, Determinism.ALWAYS); - List expressions = Lists.newArrayList(expr, scaleExpr); - return new CeilDecimalExpression(expressions); + /** + * Creates a {@link CeilDecimalExpression} with rounding scale given by @param scale. + */ + public static Expression create(Expression expr, int scale) throws SQLException { + if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { + return expr; } + Expression scaleExpr = + LiteralExpression.newConstant(scale, PInteger.INSTANCE, Determinism.ALWAYS); + List expressions = Lists.newArrayList(expr, scaleExpr); + return new CeilDecimalExpression(expressions); + } - public static Expression create(List exprs) throws SQLException { - Expression expr = exprs.get(0); - if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { - return expr; - } - if (exprs.size() == 1) { - Expression scaleExpr = LiteralExpression.newConstant(0, PInteger.INSTANCE, Determinism.ALWAYS); - exprs = Lists.newArrayList(expr, scaleExpr); - } - return new CeilDecimalExpression(exprs); + public static Expression create(List exprs) throws SQLException { + Expression expr = exprs.get(0); + if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { + return expr; } - - /** - * Creates a {@link CeilDecimalExpression} with a default scale of 0 used for rounding. - * - */ - public static Expression create(Expression expr) throws SQLException { - return create(expr, 0); + if (exprs.size() == 1) { + Expression scaleExpr = + LiteralExpression.newConstant(0, PInteger.INSTANCE, Determinism.ALWAYS); + exprs = Lists.newArrayList(expr, scaleExpr); } + return new CeilDecimalExpression(exprs); + } - @Override - protected RoundingMode getRoundingMode() { - return RoundingMode.CEILING; - } + /** + * Creates a {@link CeilDecimalExpression} with a default scale of 0 used for rounding. + */ + public static Expression create(Expression expr) throws SQLException { + return create(expr, 0); + } - @Override - public String getName() { - return CeilFunction.NAME; - } - - /** - * {@inheritDoc } - */ - @Override - protected KeyRange getInputRangeProducing(BigDecimal result) { - if(!hasEnoughPrecisionToProduce(result)) { - throw new IllegalArgumentException("Cannot produce input range for decimal " + result - + ", not enough precision with scale " + getRoundingScale()); - } - byte[] lowerRange = PDecimal.INSTANCE.toBytes(stepPrevInScale(result)); - byte[] upperRange = PDecimal.INSTANCE.toBytes(result); - return KeyRange.getKeyRange(lowerRange, false, upperRange, true); + @Override + protected RoundingMode getRoundingMode() { + return RoundingMode.CEILING; + } + + @Override + public String getName() { + return CeilFunction.NAME; + } + + /** + * {@inheritDoc } + */ + @Override + protected KeyRange getInputRangeProducing(BigDecimal result) { + if (!hasEnoughPrecisionToProduce(result)) { + throw new IllegalArgumentException("Cannot produce input range for decimal " + result + + ", not enough precision with scale " + getRoundingScale()); } + byte[] lowerRange = PDecimal.INSTANCE.toBytes(stepPrevInScale(result)); + byte[] upperRange = PDecimal.INSTANCE.toBytes(result); + return KeyRange.getKeyRange(lowerRange, false, upperRange, true); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilFunction.java index 28fdc15030c..9a56d8570b0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.phoenix.expression.function; -import java.sql.SQLException; import java.util.List; import org.apache.phoenix.expression.Expression; @@ -25,45 +24,36 @@ import org.apache.phoenix.parse.FunctionParseNode; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.TypeMismatchException; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PTimestamp; -import org.apache.phoenix.schema.types.PUnsignedTimestamp; import org.apache.phoenix.schema.types.PVarchar; /** - * * Base class for built-in CEIL function. - * - * * @since 3.0.0 */ -@BuiltInFunction(name = CeilFunction.NAME, - nodeClass = CeilParseNode.class, - args = { - @Argument(allowedTypes={PTimestamp.class, PDecimal.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionParseNode.FunctionClassType.ABSTRACT, - derivedFunctions = {CeilDateExpression.class, CeilTimestampExpression.class, CeilDecimalExpression.class} - ) +@BuiltInFunction(name = CeilFunction.NAME, nodeClass = CeilParseNode.class, + args = { @Argument(allowedTypes = { PTimestamp.class, PDecimal.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionParseNode.FunctionClassType.ABSTRACT, derivedFunctions = { + CeilDateExpression.class, CeilTimestampExpression.class, CeilDecimalExpression.class }) public abstract class CeilFunction extends ScalarFunction { - - public static final String NAME = "CEIL"; - public CeilFunction() {} - - public CeilFunction(List children) { - super(children); - } - - @Override - public String getName() { - return NAME; - } - + public static final String NAME = "CEIL"; + + public CeilFunction() { + } + + public CeilFunction(List children) { + super(children); + } + + @Override + public String getName() { + return NAME; + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilMonthExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilMonthExpression.java index e35a0f348bd..2485ef0d78a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilMonthExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilMonthExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,34 +24,33 @@ import org.joda.time.chrono.GJChronology; /** - * - * Ceil function that rounds up the {@link DateTime} to next month. + * Ceil function that rounds up the {@link DateTime} to next month. */ public class CeilMonthExpression extends RoundJodaDateExpression { - public CeilMonthExpression() { - super(); - } - - public CeilMonthExpression(List children) { - super(children); - } - - @Override - public long roundDateTime(DateTime dateTime) { - return dateTime.monthOfYear().roundCeilingCopy().getMillis(); - } - - @Override - public long rangeLower(long time) { - // floor(time - 1) + 1 - return (new DateTime(time - 1, GJChronology.getInstanceUTC())).monthOfYear() - .roundFloorCopy().getMillis() + 1; - } - - @Override - public long rangeUpper(long time) { - // ceil - return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); - } + public CeilMonthExpression() { + super(); + } + + public CeilMonthExpression(List children) { + super(children); + } + + @Override + public long roundDateTime(DateTime dateTime) { + return dateTime.monthOfYear().roundCeilingCopy().getMillis(); + } + + @Override + public long rangeLower(long time) { + // floor(time - 1) + 1 + return (new DateTime(time - 1, GJChronology.getInstanceUTC())).monthOfYear().roundFloorCopy() + .getMillis() + 1; + } + + @Override + public long rangeUpper(long time) { + // ceil + return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilTimestampExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilTimestampExpression.java index 0a28c191e32..501a6c25917 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilTimestampExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilTimestampExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,106 +25,101 @@ import org.apache.phoenix.expression.CoerceExpression; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; +import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; +import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDataType.PDataCodec; import org.apache.phoenix.schema.types.PDate; +import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PUnsignedDate; import org.apache.phoenix.schema.types.PUnsignedTimestamp; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.types.PInteger; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.parse.FunctionParseNode.Argument; -import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * - * Class encapsulating the CEIL operation on {@link org.apache.phoenix.schema.types.PTimestamp} - * This class only supports CEIL {@link TimeUnit#MILLISECOND}. If you want more options of CEIL like + * Class encapsulating the CEIL operation on {@link org.apache.phoenix.schema.types.PTimestamp} This + * class only supports CEIL {@link TimeUnit#MILLISECOND}. If you want more options of CEIL like * using {@link TimeUnit#HOUR} use {@link CeilDateExpression} - * - * * @since 3.0.0 */ @BuiltInFunction(name = CeilFunction.NAME, - args = { - @Argument(allowedTypes={PTimestamp.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionClassType.DERIVED -) + args = { @Argument(allowedTypes = { PTimestamp.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionClassType.DERIVED) public class CeilTimestampExpression extends CeilDateExpression { - - public CeilTimestampExpression() {} - - public CeilTimestampExpression(List children) { - super(children); - } - - /** - * Creates a {@link CeilTimestampExpression} that uses {@link TimeUnit#MILLISECOND} - * as the time unit for rounding. - */ - public static CeilTimestampExpression create(Expression expr, int multiplier) throws SQLException { - List childExprs = Lists.newArrayList(expr, getTimeUnitExpr(TimeUnit.MILLISECOND), getMultiplierExpr(multiplier)); - return new CeilTimestampExpression(childExprs); - } - - public static Expression create(List children) throws SQLException { - Expression firstChild = children.get(0); - PDataType firstChildDataType = firstChild.getDataType(); - String timeUnit = (String)((LiteralExpression)children.get(1)).getValue(); - if(TimeUnit.MILLISECOND.toString().equalsIgnoreCase(timeUnit)) { - return new CeilTimestampExpression(children); - } - // Coerce TIMESTAMP to DATE, as the nanos has no affect - List newChildren = Lists.newArrayListWithExpectedSize(children.size()); - newChildren.add(CoerceExpression.create(firstChild, firstChildDataType == PTimestamp.INSTANCE ? - PDate.INSTANCE : PUnsignedDate.INSTANCE)); - newChildren.addAll(children.subList(1, children.size())); - return CeilDateExpression.create(newChildren); - } - - /** - * Creates a {@link CeilTimestampExpression} that uses {@link TimeUnit#MILLISECOND} - * as the time unit for rounding. - */ - public static CeilTimestampExpression create (Expression expr) throws SQLException { - return create(expr, 1); + + public CeilTimestampExpression() { + } + + public CeilTimestampExpression(List children) { + super(children); + } + + /** + * Creates a {@link CeilTimestampExpression} that uses {@link TimeUnit#MILLISECOND} as the time + * unit for rounding. + */ + public static CeilTimestampExpression create(Expression expr, int multiplier) + throws SQLException { + List childExprs = Lists.newArrayList(expr, getTimeUnitExpr(TimeUnit.MILLISECOND), + getMultiplierExpr(multiplier)); + return new CeilTimestampExpression(childExprs); + } + + public static Expression create(List children) throws SQLException { + Expression firstChild = children.get(0); + PDataType firstChildDataType = firstChild.getDataType(); + String timeUnit = (String) ((LiteralExpression) children.get(1)).getValue(); + if (TimeUnit.MILLISECOND.toString().equalsIgnoreCase(timeUnit)) { + return new CeilTimestampExpression(children); } + // Coerce TIMESTAMP to DATE, as the nanos has no affect + List newChildren = Lists.newArrayListWithExpectedSize(children.size()); + newChildren.add(CoerceExpression.create(firstChild, + firstChildDataType == PTimestamp.INSTANCE ? PDate.INSTANCE : PUnsignedDate.INSTANCE)); + newChildren.addAll(children.subList(1, children.size())); + return CeilDateExpression.create(newChildren); + } + + /** + * Creates a {@link CeilTimestampExpression} that uses {@link TimeUnit#MILLISECOND} as the time + * unit for rounding. + */ + public static CeilTimestampExpression create(Expression expr) throws SQLException { + return create(expr, 1); + } + + @Override + protected PDataCodec getKeyRangeCodec(PDataType columnDataType) { + return columnDataType == PTimestamp.INSTANCE ? PDate.INSTANCE.getCodec() + : columnDataType == PUnsignedTimestamp.INSTANCE ? PUnsignedDate.INSTANCE.getCodec() + : super.getKeyRangeCodec(columnDataType); + } - @Override - protected PDataCodec getKeyRangeCodec(PDataType columnDataType) { - return columnDataType == PTimestamp.INSTANCE - ? PDate.INSTANCE.getCodec() - : columnDataType == PUnsignedTimestamp.INSTANCE - ? PUnsignedDate.INSTANCE.getCodec() - : super.getKeyRangeCodec(columnDataType); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (children.get(0).evaluate(tuple, ptr)) { + if (ptr.getLength() == 0) { + return true; // child evaluated to null + } + SortOrder sortOrder = children.get(0).getSortOrder(); + PDataType dataType = getDataType(); + int nanos = dataType.getNanos(ptr, sortOrder); + if (nanos > 0) { + long millis = dataType.getMillis(ptr, sortOrder); + Timestamp roundedTs = new Timestamp(millis + 1); + byte[] byteValue = dataType.toBytes(roundedTs); + ptr.set(byteValue); + } + return true; // for timestamp we only support rounding up the milliseconds. } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (children.get(0).evaluate(tuple, ptr)) { - if (ptr.getLength() == 0) { - return true; // child evaluated to null - } - SortOrder sortOrder = children.get(0).getSortOrder(); - PDataType dataType = getDataType(); - int nanos = dataType.getNanos(ptr, sortOrder); - if (nanos > 0) { - long millis = dataType.getMillis(ptr, sortOrder); - Timestamp roundedTs = new Timestamp(millis + 1); - byte[] byteValue = dataType.toBytes(roundedTs); - ptr.set(byteValue); - } - return true; // for timestamp we only support rounding up the milliseconds. - } - return false; - } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilWeekExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilWeekExpression.java index 658eb402bb4..1dea177c4c1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilWeekExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilWeekExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,34 +24,33 @@ import org.joda.time.chrono.GJChronology; /** - * - * Ceil function that rounds up the {@link DateTime} to next week. + * Ceil function that rounds up the {@link DateTime} to next week. */ public class CeilWeekExpression extends RoundJodaDateExpression { - - public CeilWeekExpression() { - super(); - } - - public CeilWeekExpression(List children) { - super(children); - } - - @Override - public long roundDateTime(DateTime dateTime) { - return dateTime.weekOfWeekyear().roundCeilingCopy().getMillis(); - } - - @Override - public long rangeLower(long time) { - // floor(time - 1) + 1 - return (new DateTime(time - 1, GJChronology.getInstanceUTC())).weekOfWeekyear() - .roundFloorCopy().getMillis() + 1; - } - - @Override - public long rangeUpper(long time) { - // ceil - return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); - } + + public CeilWeekExpression() { + super(); + } + + public CeilWeekExpression(List children) { + super(children); + } + + @Override + public long roundDateTime(DateTime dateTime) { + return dateTime.weekOfWeekyear().roundCeilingCopy().getMillis(); + } + + @Override + public long rangeLower(long time) { + // floor(time - 1) + 1 + return (new DateTime(time - 1, GJChronology.getInstanceUTC())).weekOfWeekyear().roundFloorCopy() + .getMillis() + 1; + } + + @Override + public long rangeUpper(long time) { + // ceil + return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilYearExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilYearExpression.java index c09e56a31bc..f873f2c0236 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilYearExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CeilYearExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,34 +24,33 @@ import org.joda.time.chrono.GJChronology; /** - * - * Ceil function that rounds up the {@link DateTime} to next year. + * Ceil function that rounds up the {@link DateTime} to next year. */ public class CeilYearExpression extends RoundJodaDateExpression { - - public CeilYearExpression() { - super(); - } - - public CeilYearExpression(List children) { - super(children); - } - - @Override - public long roundDateTime(DateTime dateTime) { - return dateTime.year().roundCeilingCopy().getMillis(); - } - - @Override - public long rangeLower(long time) { - // floor(time - 1) + 1 - return (new DateTime(time - 1, GJChronology.getInstanceUTC())).year().roundFloorCopy() - .getMillis() + 1; - } - - @Override - public long rangeUpper(long time) { - // ceil - return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); - } + + public CeilYearExpression() { + super(); + } + + public CeilYearExpression(List children) { + super(children); + } + + @Override + public long roundDateTime(DateTime dateTime) { + return dateTime.year().roundCeilingCopy().getMillis(); + } + + @Override + public long rangeLower(long time) { + // floor(time - 1) + 1 + return (new DateTime(time - 1, GJChronology.getInstanceUTC())).year().roundFloorCopy() + .getMillis() + 1; + } + + @Override + public long rangeUpper(long time) { + // ceil + return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CoalesceFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CoalesceFunction.java index 71d51563e10..434c5cb6f75 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CoalesceFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CoalesceFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,98 +32,94 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.util.ExpressionUtil; - /** - * - * Function used to provide an alternative value when the first argument is null. - * Usage: - * COALESCE(expr1,expr2) - * If expr1 is not null, then it is returned, otherwise expr2 is returned. - * - * TODO: better bind parameter type matching, since arg2 must be coercible - * to arg1. consider allowing a common base type? - * + * Function used to provide an alternative value when the first argument is null. Usage: + * COALESCE(expr1,expr2) If expr1 is not null, then it is returned, otherwise expr2 is returned. + * TODO: better bind parameter type matching, since arg2 must be coercible to arg1. consider + * allowing a common base type? * @since 0.1 */ -@BuiltInFunction(name=CoalesceFunction.NAME, args= { - @Argument(), - @Argument()} ) +@BuiltInFunction(name = CoalesceFunction.NAME, args = { @Argument(), @Argument() }) public class CoalesceFunction extends ScalarFunction { - public static final String NAME = "COALESCE"; - - public CoalesceFunction() { - } - - public CoalesceFunction(List children) throws SQLException { - super(children); - - Expression firstChild = children.get(0); - Expression secondChild = children.get(1); - - if (ExpressionUtil.isConstant(secondChild)) { // is literal - - ImmutableBytesWritable ptr = new ImmutableBytesPtr(); - secondChild.evaluate(null, ptr); - - if (ptr.getLength()!=0 && !secondChild.getDataType().isCoercibleTo(firstChild.getDataType(), secondChild.getDataType().toObject(ptr))) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) - .setMessage(getName() + " expected " + firstChild.getDataType() + ", but got " + secondChild.getDataType()) - .build().buildException(); - } - } else { // second parameter is expression - if (!secondChild.getDataType().isCoercibleTo(getDataType())) { - // cast explicitly - children.add(1, CoerceExpression.create(secondChild, firstChild.getDataType())); - } - } + public static final String NAME = "COALESCE"; + + public CoalesceFunction() { + } + + public CoalesceFunction(List children) throws SQLException { + super(children); + + Expression firstChild = children.get(0); + Expression secondChild = children.get(1); + + if (ExpressionUtil.isConstant(secondChild)) { // is literal + + ImmutableBytesWritable ptr = new ImmutableBytesPtr(); + secondChild.evaluate(null, ptr); + + if ( + ptr.getLength() != 0 && !secondChild.getDataType().isCoercibleTo(firstChild.getDataType(), + secondChild.getDataType().toObject(ptr)) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH).setMessage(getName() + + " expected " + firstChild.getDataType() + ", but got " + secondChild.getDataType()) + .build().buildException(); + } + } else { // second parameter is expression + if (!secondChild.getDataType().isCoercibleTo(getDataType())) { + // cast explicitly + children.add(1, CoerceExpression.create(secondChild, firstChild.getDataType())); + } } + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - boolean evaluated = children.get(0).evaluate(tuple, ptr); - if (evaluated && ptr.getLength() > 0) { - return true; - } - if (evaluated || tuple.isImmutable()) { - Expression secondChild = children.get(1); - if (secondChild.evaluate(tuple, ptr)) { - // Coerce the type of the second child to the type of the first child - getDataType().coerceBytes(ptr, secondChild.getDataType(), secondChild.getSortOrder(), getSortOrder()); - return true; - } - } - return false; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + boolean evaluated = children.get(0).evaluate(tuple, ptr); + if (evaluated && ptr.getLength() > 0) { + return true; } - - @Override - public PDataType getDataType() { - return children.get(0).getDataType(); - } - - @Override - public Integer getMaxLength() { - Integer maxLength1 = children.get(0).getMaxLength(); - if (maxLength1 != null) { - Integer maxLength2 = children.get(1).getMaxLength(); - if (maxLength2 != null) { - return maxLength1 > maxLength2 ? maxLength1 : maxLength2; - } - } - return null; - } - - @Override - public boolean isNullable() { - return children.get(0).isNullable() && children.get(1).isNullable(); - } - - @Override - public String getName() { - return NAME; - } - - @Override - public boolean requiresFinalEvaluation() { + if (evaluated || tuple.isImmutable()) { + Expression secondChild = children.get(1); + if (secondChild.evaluate(tuple, ptr)) { + // Coerce the type of the second child to the type of the first child + getDataType().coerceBytes(ptr, secondChild.getDataType(), secondChild.getSortOrder(), + getSortOrder()); return true; + } + } + return false; + } + + @Override + public PDataType getDataType() { + return children.get(0).getDataType(); + } + + @Override + public Integer getMaxLength() { + Integer maxLength1 = children.get(0).getMaxLength(); + if (maxLength1 != null) { + Integer maxLength2 = children.get(1).getMaxLength(); + if (maxLength2 != null) { + return maxLength1 > maxLength2 ? maxLength1 : maxLength2; + } } + return null; + } + + @Override + public boolean isNullable() { + return children.get(0).isNullable() && children.get(1).isNullable(); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public boolean requiresFinalEvaluation() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java index 676b6460df3..1c7c34a6c25 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,162 +40,153 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * A Phoenix Function that calculates a collation key for an input string based - * on a caller-provided locale and collator strength and decomposition settings. - * - * The locale should be specified as xx_yy_variant where xx is the ISO 639-1 - * 2-letter language code, yy is the the ISO 3166 2-letter country code. Both - * countryCode and variant are optional. For example, zh_TW_STROKE, zh_TW and zh - * are all valid locale representations. Note the language code, country code - * and variant are used as arguments to the constructor of java.util.Locale. - * - * This function originally used the open-source i18n-util package to obtain the - * collators it needs from the provided locale. As i18n-util is not maintained - * anymore, the relevant parts from it were copied into Phoenix. - * See: https://issues.apache.org/jira/browse/PHOENIX-6818 - * - * The LinguisticSort implementation from i18n-util encapsulates sort-related - * functionality for a substantive list of locales. For each locale, it provides - * a collator and an Oracle-specific database function that can be used to sort - * strings according to the natural language rules of that locale. - * - * This function uses the collator returned by LinguisticSort.getCollator to - * produce a collation key for its input string. A user can expect that the - * sorting semantics of this function for a given locale is equivalent to the - * sorting behaviour of an Oracle query that is constructed using the Oracle - * functions returned by LinguisticSort for that locale. - * - * The optional third argument to the function is a boolean that specifies - * whether to use the upper-case collator (case-insensitive) returned by - * LinguisticSort.getUpperCaseCollator. - * - * The optional fourth and fifth arguments are used to set respectively the - * strength and composition of the collator returned by LinguisticSort using the + * A Phoenix Function that calculates a collation key for an input string based on a caller-provided + * locale and collator strength and decomposition settings. The locale should be specified as + * xx_yy_variant where xx is the ISO 639-1 2-letter language code, yy is the the ISO 3166 2-letter + * country code. Both countryCode and variant are optional. For example, zh_TW_STROKE, zh_TW and zh + * are all valid locale representations. Note the language code, country code and variant are used + * as arguments to the constructor of java.util.Locale. This function originally used the + * open-source i18n-util package to obtain the collators it needs from the provided locale. As + * i18n-util is not maintained anymore, the relevant parts from it were copied into Phoenix. See: + * https://issues.apache.org/jira/browse/PHOENIX-6818 The LinguisticSort implementation from + * i18n-util encapsulates sort-related functionality for a substantive list of locales. For each + * locale, it provides a collator and an Oracle-specific database function that can be used to sort + * strings according to the natural language rules of that locale. This function uses the collator + * returned by LinguisticSort.getCollator to produce a collation key for its input string. A user + * can expect that the sorting semantics of this function for a given locale is equivalent to the + * sorting behaviour of an Oracle query that is constructed using the Oracle functions returned by + * LinguisticSort for that locale. The optional third argument to the function is a boolean that + * specifies whether to use the upper-case collator (case-insensitive) returned by + * LinguisticSort.getUpperCaseCollator. The optional fourth and fifth arguments are used to set + * respectively the strength and composition of the collator returned by LinguisticSort using the * setStrength and setDecomposition methods of java.text.Collator. - * */ @FunctionParseNode.BuiltInFunction(name = CollationKeyFunction.NAME, args = { - // input string - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), - // ISO Code for Locale - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }, isConstant = true), - // whether to use special upper case collator - @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, defaultValue = "false", isConstant = true), - // collator strength - @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, defaultValue = "null", isConstant = true), - // collator decomposition - @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, defaultValue = "null", isConstant = true) }) + // input string + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), + // ISO Code for Locale + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }, isConstant = true), + // whether to use special upper case collator + @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, defaultValue = "false", + isConstant = true), + // collator strength + @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, defaultValue = "null", + isConstant = true), + // collator decomposition + @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, defaultValue = "null", + isConstant = true) }) public class CollationKeyFunction extends ScalarFunction { - private static final Logger LOGGER = LoggerFactory.getLogger(CollationKeyFunction.class); - - public static final String NAME = "COLLATION_KEY"; - - private Collator collator; - - public CollationKeyFunction() { - } - - public CollationKeyFunction(List children) throws SQLException { - super(children); - initialize(); - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - initialize(); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getChildren().get(0); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - String inputString = (String) PVarchar.INSTANCE.toObject(ptr, expression.getSortOrder()); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("CollationKey inputString: " + inputString); - } - - if (inputString == null) { - return true; - } - - byte[] collationKeyByteArray = collator.getCollationKey(inputString).toByteArray(); - - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("CollationKey bytes: " + - VarBinaryFormatter.INSTANCE.format(collationKeyByteArray)); - } - - ptr.set(collationKeyByteArray); - return true; - } - - private void initialize() { - String localeISOCode = getLiteralValue(1, String.class); - Boolean useSpecialUpperCaseCollator = getLiteralValue(2, Boolean.class); - Integer collatorStrength = getLiteralValue(3, Integer.class); - Integer collatorDecomposition = getLiteralValue(4, Integer.class); - - if (LOGGER.isTraceEnabled()) { - StringBuilder logInputsMessage = new StringBuilder(); - logInputsMessage.append("Input (literal) arguments:").append("localeISOCode: " + localeISOCode) - .append(", useSpecialUpperCaseCollator: " + useSpecialUpperCaseCollator) - .append(", collatorStrength: " + collatorStrength) - .append(", collatorDecomposition: " + collatorDecomposition); - LOGGER.trace(logInputsMessage.toString()); - } - - Locale locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode); - - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Locale: " + locale.toLanguageTag())); - } - - LinguisticSort linguisticSort = LinguisticSort.get(locale); - - collator = BooleanUtils.isTrue(useSpecialUpperCaseCollator) ? linguisticSort.getUpperCaseCollator(false) - : linguisticSort.getCollator(); - - if (collatorStrength != null) { - collator.setStrength(collatorStrength); - } - - if (collatorDecomposition != null) { - collator.setDecomposition(collatorDecomposition); - } - - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format( - "Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s", - collator.getStrength(), collator.getDecomposition(), - BooleanUtils.isTrue(useSpecialUpperCaseCollator))); - } - } - - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; - } - - @Override - public String getName() { - return NAME; - } - - @Override - public boolean isThreadSafe() { - // ICU4J Collators are not thread-safe unless they are frozen. - // TODO: Look into calling freeze() on them to be able return true here. - return false; - } - - @Override - public boolean isNullable() { - return getChildren().get(0).isNullable(); + private static final Logger LOGGER = LoggerFactory.getLogger(CollationKeyFunction.class); + + public static final String NAME = "COLLATION_KEY"; + + private Collator collator; + + public CollationKeyFunction() { + } + + public CollationKeyFunction(List children) throws SQLException { + super(children); + initialize(); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + initialize(); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getChildren().get(0); + if (!expression.evaluate(tuple, ptr)) { + return false; + } + String inputString = (String) PVarchar.INSTANCE.toObject(ptr, expression.getSortOrder()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("CollationKey inputString: " + inputString); + } + + if (inputString == null) { + return true; + } + + byte[] collationKeyByteArray = collator.getCollationKey(inputString).toByteArray(); + + if (LOGGER.isTraceEnabled()) { + LOGGER + .trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray)); + } + + ptr.set(collationKeyByteArray); + return true; + } + + private void initialize() { + String localeISOCode = getLiteralValue(1, String.class); + Boolean useSpecialUpperCaseCollator = getLiteralValue(2, Boolean.class); + Integer collatorStrength = getLiteralValue(3, Integer.class); + Integer collatorDecomposition = getLiteralValue(4, Integer.class); + + if (LOGGER.isTraceEnabled()) { + StringBuilder logInputsMessage = new StringBuilder(); + logInputsMessage.append("Input (literal) arguments:") + .append("localeISOCode: " + localeISOCode) + .append(", useSpecialUpperCaseCollator: " + useSpecialUpperCaseCollator) + .append(", collatorStrength: " + collatorStrength) + .append(", collatorDecomposition: " + collatorDecomposition); + LOGGER.trace(logInputsMessage.toString()); + } + + Locale locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode); + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Locale: " + locale.toLanguageTag())); + } + + LinguisticSort linguisticSort = LinguisticSort.get(locale); + + collator = BooleanUtils.isTrue(useSpecialUpperCaseCollator) + ? linguisticSort.getUpperCaseCollator(false) + : linguisticSort.getCollator(); + + if (collatorStrength != null) { + collator.setStrength(collatorStrength); + } + + if (collatorDecomposition != null) { + collator.setDecomposition(collatorDecomposition); + } + + if (LOGGER.isTraceEnabled()) { + LOGGER + .trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s", + collator.getStrength(), collator.getDecomposition(), + BooleanUtils.isTrue(useSpecialUpperCaseCollator))); } + } + + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public boolean isThreadSafe() { + // ICU4J Collators are not thread-safe unless they are frozen. + // TODO: Look into calling freeze() on them to be able return true here. + return false; + } + + @Override + public boolean isNullable() { + return getChildren().get(0).isNullable(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CompositeAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CompositeAggregateFunction.java index f5adb6f4a93..7a4bda1e380 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CompositeAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CompositeAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,24 +22,19 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.visitor.ExpressionVisitor; - /** - * - * Base class for aggregation functions which are composed of other - * aggregation functions (for example, AVG is modeled as a SUM aggregate - * function and a COUNT aggregate function). - * - * + * Base class for aggregation functions which are composed of other aggregation functions (for + * example, AVG is modeled as a SUM aggregate function and a COUNT aggregate function). * @since 0.1 */ abstract public class CompositeAggregateFunction extends AggregateFunction { - public CompositeAggregateFunction(List children) { - super(children); - } - - @Override - public final T accept(ExpressionVisitor visitor) { - return null; - } + public CompositeAggregateFunction(List children) { + super(children); + } + + @Override + public final T accept(ExpressionVisitor visitor) { + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java index 56f65e0d69f..7b344a92a7a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,65 +32,64 @@ import org.joda.time.DateTimeZone; /** - * Build in function CONVERT_TZ(date, 'timezone_from', 'timezone_to). Convert date from one timezone to - * another - * + * Build in function CONVERT_TZ(date, 'timezone_from', 'timezone_to). Convert date from one timezone + * to another */ -@FunctionParseNode.BuiltInFunction(name = ConvertTimezoneFunction.NAME, args = { - @FunctionParseNode.Argument(allowedTypes = { PTimestamp.class }), - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class })}) +@FunctionParseNode.BuiltInFunction(name = ConvertTimezoneFunction.NAME, + args = { @FunctionParseNode.Argument(allowedTypes = { PTimestamp.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }) public class ConvertTimezoneFunction extends ScalarFunction { - public static final String NAME = "CONVERT_TZ"; - - public ConvertTimezoneFunction() { - } - - public ConvertTimezoneFunction(List children) throws SQLException { - super(children); - } + public static final String NAME = "CONVERT_TZ"; - @Override - public String getName() { - return NAME; - } + public ConvertTimezoneFunction() { + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!children.get(0).evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - long date = PDate.INSTANCE.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); + public ConvertTimezoneFunction(List children) throws SQLException { + super(children); + } - if (!children.get(1).evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - DateTimeZone timezoneFrom = JodaTimezoneCache.getInstance(ptr); + @Override + public String getName() { + return NAME; + } - if (!children.get(2).evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - DateTimeZone timezoneTo = JodaTimezoneCache.getInstance(ptr); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!children.get(0).evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return true; + } + long date = PDate.INSTANCE.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); - long convertedDate = date - timezoneFrom.getOffset(date) + timezoneTo.getOffset(date); - byte[] outBytes = new byte[8]; - PDate.INSTANCE.getCodec().encodeLong(convertedDate, outBytes, 0); - ptr.set(outBytes); - return true; + if (!children.get(1).evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return true; } + DateTimeZone timezoneFrom = JodaTimezoneCache.getInstance(ptr); - @Override - public PDataType getDataType() { - return PDate.INSTANCE; + if (!children.get(2).evaluate(tuple, ptr)) { + return false; } + if (ptr.getLength() == 0) { + return true; + } + DateTimeZone timezoneTo = JodaTimezoneCache.getInstance(ptr); + + long convertedDate = date - timezoneFrom.getOffset(date) + timezoneTo.getOffset(date); + byte[] outBytes = new byte[8]; + PDate.INSTANCE.getCodec().encodeLong(convertedDate, outBytes, 0); + ptr.set(outBytes); + return true; + } + + @Override + public PDataType getDataType() { + return PDate.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CosFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CosFunction.java index b6532d8689e..fd1d48c4e38 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CosFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CosFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,31 +26,31 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; -@BuiltInFunction(name = CosFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, - PDecimal.class }) }) +@BuiltInFunction(name = CosFunction.NAME, + args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) public class CosFunction extends JavaMathOneArgumentFunction { - public static final String NAME = "COS"; + public static final String NAME = "COS"; - public CosFunction() { - } + public CosFunction() { + } - public CosFunction(List children) throws SQLException { - super(children); - } + public CosFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - protected double compute(double firstArg) { - return Math.cos(firstArg); - } + @Override + protected double compute(double firstArg) { + return Math.cos(firstArg); + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CountAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CountAggregateFunction.java index 9f6fe273e53..ef4581b09e5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CountAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CountAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,86 +30,84 @@ import org.apache.phoenix.expression.aggregator.LongSumAggregator; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.util.SchemaUtil; - /** - * - * Built-in function for {@code COUNT() } aggregate function, - * for example COUNT(foo), COUNT(1), COUNT(*) - * - * + * Built-in function for {@code COUNT() } aggregate function, for example COUNT(foo), + * COUNT(1), COUNT(*) * @since 0.1 */ -@BuiltInFunction(name=CountAggregateFunction.NAME, args= {@Argument()} ) +@BuiltInFunction(name = CountAggregateFunction.NAME, args = { @Argument() }) public class CountAggregateFunction extends SingleAggregateFunction { - public static final String NAME = "COUNT"; - public static final List STAR = Arrays.asList(LiteralExpression.newConstant(1, Determinism.ALWAYS)); - public static final String NORMALIZED_NAME = SchemaUtil.normalizeIdentifier(NAME); - - public CountAggregateFunction() { - } - - public CountAggregateFunction(List childExpressions) { - super(childExpressions); - } + public static final String NAME = "COUNT"; + public static final List STAR = + Arrays. asList(LiteralExpression.newConstant(1, Determinism.ALWAYS)); + public static final String NORMALIZED_NAME = SchemaUtil.normalizeIdentifier(NAME); + + public CountAggregateFunction() { + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - CountAggregateFunction other = (CountAggregateFunction)obj; - return (isConstantExpression() && other.isConstantExpression()) || children.equals(other.getChildren()); - } + public CountAggregateFunction(List childExpressions) { + super(childExpressions); + } - @Override - public int hashCode() { - return isConstantExpression() ? 0 : super.hashCode(); - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + CountAggregateFunction other = (CountAggregateFunction) obj; + return (isConstantExpression() && other.isConstantExpression()) + || children.equals(other.getChildren()); + } - /** - * The COUNT function never returns null - */ - @Override - public boolean isNullable() { + @Override + public int hashCode() { + return isConstantExpression() ? 0 : super.hashCode(); + } + + /** + * The COUNT function never returns null + */ + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PLong.INSTANCE; + } + + @Override + public LongSumAggregator newClientAggregator() { + // Since COUNT can never be null, ensure the aggregator is not nullable. + // This allows COUNT(*) to return 0 with the initial state of ClientAggregators + // when no rows are returned. + return new LongSumAggregator() { + @Override + public boolean isNullable() { return false; - } - - @Override - public PDataType getDataType() { - return PLong.INSTANCE; - } + } + }; + } + + @Override + public Aggregator newServerAggregator(Configuration conf) { + return new CountAggregator(); + } - @Override - public LongSumAggregator newClientAggregator() { - // Since COUNT can never be null, ensure the aggregator is not nullable. - // This allows COUNT(*) to return 0 with the initial state of ClientAggregators - // when no rows are returned. - return new LongSumAggregator() { - @Override - public boolean isNullable() { - return false; - } - }; - } - - @Override - public Aggregator newServerAggregator(Configuration conf) { - return new CountAggregator(); - } - - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public Aggregator newServerAggregator(Configuration config, ImmutableBytesWritable ptr) { - LongSumAggregator sumAgg = newClientAggregator(); - sumAgg.aggregate(null, ptr); - return new CountAggregator(sumAgg); - } + @Override + public Aggregator newServerAggregator(Configuration config, ImmutableBytesWritable ptr) { + LongSumAggregator sumAgg = newClientAggregator(); + sumAgg.aggregate(null, ptr); + return new CountAggregator(sumAgg); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CurrentDateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CurrentDateFunction.java index 0c30e810328..88460621073 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CurrentDateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CurrentDateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,49 +31,46 @@ import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.util.EnvironmentEdgeManager; - /** - * - * Function that returns the current date accurate to the millisecond. Note that this - * function is never evaluated on the server-side, instead the server side date is - * retrieved (piggy-backed on the call to check that the metadata is up-to-date) and - * passed into this function at create time. - * - * + * Function that returns the current date accurate to the millisecond. Note that this function is + * never evaluated on the server-side, instead the server side date is retrieved (piggy-backed on + * the call to check that the metadata is up-to-date) and passed into this function at create time. * @since 0.1 */ -@BuiltInFunction(name=CurrentDateFunction.NAME, nodeClass=CurrentDateParseNode.class, args= {} ) +@BuiltInFunction(name = CurrentDateFunction.NAME, nodeClass = CurrentDateParseNode.class, args = {}) public class CurrentDateFunction extends CurrentDateTimeFunction { - public static final String NAME = "CURRENT_DATE"; - private final ImmutableBytesWritable currentDate = new ImmutableBytesWritable(new byte[PDate.INSTANCE.getByteSize()]); - - public CurrentDateFunction() { - this(EnvironmentEdgeManager.currentTimeMillis()); - } + public static final String NAME = "CURRENT_DATE"; + private final ImmutableBytesWritable currentDate = + new ImmutableBytesWritable(new byte[PDate.INSTANCE.getByteSize()]); + + public CurrentDateFunction() { + this(EnvironmentEdgeManager.currentTimeMillis()); + } - public CurrentDateFunction(List children, StatementContext context) throws SQLException { - // Note that according to the standard Date is always WITHOUT TIMEZONE, but we don't - // implement real dates - this(context.getCurrentTimeWithDisplacement()); - } + public CurrentDateFunction(List children, StatementContext context) + throws SQLException { + // Note that according to the standard Date is always WITHOUT TIMEZONE, but we don't + // implement real dates + this(context.getCurrentTimeWithDisplacement()); + } - public CurrentDateFunction(long timeStamp) { - getDataType().getCodec().encodeLong(timeStamp, currentDate); - } + public CurrentDateFunction(long timeStamp) { + getDataType().getCodec().encodeLong(timeStamp, currentDate); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - ptr.set(currentDate.get(), 0, PDate.INSTANCE.getByteSize()); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + ptr.set(currentDate.get(), 0, PDate.INSTANCE.getByteSize()); + return true; + } - @Override - public final PDataType getDataType() { - return PDate.INSTANCE; - } + @Override + public final PDataType getDataType() { + return PDate.INSTANCE; + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CurrentTimeFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CurrentTimeFunction.java index ef76cc027f3..5a190502531 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CurrentTimeFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/CurrentTimeFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,47 +31,44 @@ import org.apache.phoenix.schema.types.PTime; import org.apache.phoenix.util.EnvironmentEdgeManager; - /** - * - * Function that returns the current date accurate to the millisecond. Note that this - * function is never evaluated on the server-side, instead the server side date is - * retrieved (piggy-backed on the call to check that the metadata is up-to-date) and - * passed into this function at create time. - * - * + * Function that returns the current date accurate to the millisecond. Note that this function is + * never evaluated on the server-side, instead the server side date is retrieved (piggy-backed on + * the call to check that the metadata is up-to-date) and passed into this function at create time. * @since 0.1 */ -@BuiltInFunction(name=CurrentTimeFunction.NAME, nodeClass=CurrentTimeParseNode.class, args={} ) +@BuiltInFunction(name = CurrentTimeFunction.NAME, nodeClass = CurrentTimeParseNode.class, args = {}) public class CurrentTimeFunction extends CurrentDateTimeFunction { - public static final String NAME = "CURRENT_TIME"; - private final ImmutableBytesWritable currentDate = new ImmutableBytesWritable(new byte[PTime.INSTANCE.getByteSize()]); - - public CurrentTimeFunction() { - this(EnvironmentEdgeManager.currentTimeMillis()); - } + public static final String NAME = "CURRENT_TIME"; + private final ImmutableBytesWritable currentDate = + new ImmutableBytesWritable(new byte[PTime.INSTANCE.getByteSize()]); + + public CurrentTimeFunction() { + this(EnvironmentEdgeManager.currentTimeMillis()); + } - public CurrentTimeFunction(List children, StatementContext context) throws SQLException { - this(context.getCurrentTimeWithDisplacement()); - } + public CurrentTimeFunction(List children, StatementContext context) + throws SQLException { + this(context.getCurrentTimeWithDisplacement()); + } - public CurrentTimeFunction(long timeStamp) { - getDataType().getCodec().encodeLong(timeStamp, currentDate); - } + public CurrentTimeFunction(long timeStamp) { + getDataType().getCodec().encodeLong(timeStamp, currentDate); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - ptr.set(currentDate.get(), 0, PTime.INSTANCE.getByteSize()); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + ptr.set(currentDate.get(), 0, PTime.INSTANCE.getByteSize()); + return true; + } - @Override - public final PDataType getDataType() { - return PTime.INSTANCE; - } + @Override + public final PDataType getDataType() { + return PTime.INSTANCE; + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DateScalarFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DateScalarFunction.java index aa56a0f6b19..e523c3fe169 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DateScalarFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DateScalarFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,28 +27,28 @@ import org.apache.phoenix.util.DateUtil; public abstract class DateScalarFunction extends ScalarFunction { - protected PDataCodec inputCodec; - - public DateScalarFunction() { - } - - public DateScalarFunction(List children) { - super(children); - init(); - } - - protected final PDataCodec getInputCodec() { - return inputCodec; - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); - } - - private void init() { - PDataType returnType = getChildren().get(0).getDataType(); - inputCodec = DateUtil.getCodecFor(returnType); - } + protected PDataCodec inputCodec; + + public DateScalarFunction() { + } + + public DateScalarFunction(List children) { + super(children); + init(); + } + + protected final PDataCodec getInputCodec() { + return inputCodec; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } + + private void init() { + PDataType returnType = getChildren().get(0).getDataType(); + inputCodec = DateUtil.getCodecFor(returnType); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfMonthFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfMonthFunction.java index 721f8e6599b..c9df8a69396 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfMonthFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfMonthFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,53 +32,51 @@ import org.joda.time.chrono.GJChronology; /** - * - * Implementation of the DayOfMonth() buildin. Input Date/Timestamp. - * An integer from 1 to 31 representing the day of the month in date - * + * Implementation of the DayOfMonth() buildin. Input Date/Timestamp. An integer from 1 to 31 + * representing the day of the month in date */ -@BuiltInFunction(name=DayOfMonthFunction.NAME, -args={@Argument(allowedTypes={PTimestamp.class})}) +@BuiltInFunction(name = DayOfMonthFunction.NAME, + args = { @Argument(allowedTypes = { PTimestamp.class }) }) public class DayOfMonthFunction extends DateScalarFunction { - public static final String NAME = "DAYOFMONTH"; + public static final String NAME = "DAYOFMONTH"; - public DayOfMonthFunction() { - } + public DayOfMonthFunction() { + } - public DayOfMonthFunction(List children) throws SQLException { - super(children); - } + public DayOfMonthFunction(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getChildExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if ( ptr.getLength() == 0) { - return true; //means null - } - long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); - DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); - int day = dt.getDayOfMonth(); - PDataType returnType = getDataType(); - byte[] byteValue = new byte[returnType.getByteSize()]; - returnType.getCodec().encodeInt(day, byteValue, 0); - ptr.set(byteValue); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getChildExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + return true; // means null } + long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); + DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); + int day = dt.getDayOfMonth(); + PDataType returnType = getDataType(); + byte[] byteValue = new byte[returnType.getByteSize()]; + returnType.getCodec().encodeInt(day, byteValue, 0); + ptr.set(byteValue); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - private Expression getChildExpression() { - return children.get(0); - } + @Override + public String getName() { + return NAME; + } + + private Expression getChildExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfWeekFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfWeekFunction.java index 1dbcc10ad9c..27c534bce22 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfWeekFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfWeekFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,57 +31,49 @@ import org.joda.time.chrono.GJChronology; /** - * Implementation of DayOfWeekFunction(Date/Timestamp) - * - * Returns an integer from 1 to 7. Each represents a day of the week as follows : - * MONDAY = 1; - * TUESDAY = 2; - * WEDNESDAY = 3; - * THURSDAY = 4; - * FRIDAY = 5; - * SATURDAY = 6; - * SUNDAY = 7; - * + * Implementation of DayOfWeekFunction(Date/Timestamp) Returns an integer from 1 to 7. Each + * represents a day of the week as follows : MONDAY = 1; TUESDAY = 2; WEDNESDAY = 3; THURSDAY = 4; + * FRIDAY = 5; SATURDAY = 6; SUNDAY = 7; */ -@BuiltInFunction(name=DayOfWeekFunction.NAME, - args={@Argument(allowedTypes={PTimestamp.class})}) +@BuiltInFunction(name = DayOfWeekFunction.NAME, + args = { @Argument(allowedTypes = { PTimestamp.class }) }) public class DayOfWeekFunction extends DateScalarFunction { - public static final String NAME = "DAYOFWEEK"; + public static final String NAME = "DAYOFWEEK"; - public DayOfWeekFunction(){ + public DayOfWeekFunction() { - } + } - public DayOfWeekFunction(List children){ - super(children); - } - @Override - public String getName() { - return NAME; - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression arg = getChildren().get(0); - if (!arg.evaluate(tuple,ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - long dateTime = inputCodec.decodeLong(ptr, arg.getSortOrder()); - DateTime jodaDT = new DateTime(dateTime, GJChronology.getInstanceUTC()); - int day = jodaDT.getDayOfWeek(); - PDataType returnDataType = getDataType(); - byte[] byteValue = new byte[returnDataType.getByteSize()]; - returnDataType.getCodec().encodeInt(day, byteValue, 0); - ptr.set(byteValue); - return true; - } + public DayOfWeekFunction(List children) { + super(children); + } + @Override + public String getName() { + return NAME; + } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression arg = getChildren().get(0); + if (!arg.evaluate(tuple, ptr)) { + return false; } + if (ptr.getLength() == 0) { + return true; + } + long dateTime = inputCodec.decodeLong(ptr, arg.getSortOrder()); + DateTime jodaDT = new DateTime(dateTime, GJChronology.getInstanceUTC()); + int day = jodaDT.getDayOfWeek(); + PDataType returnDataType = getDataType(); + byte[] byteValue = new byte[returnDataType.getByteSize()]; + returnDataType.getCodec().encodeInt(day, byteValue, 0); + ptr.set(byteValue); + return true; + } + + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfYearFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfYearFunction.java index 2cde41e4a17..d8eba68156a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfYearFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DayOfYearFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,49 +31,47 @@ import org.joda.time.chrono.GJChronology; /** - * Implementation of DayOfYearFunction(Date/Timestamp) - * - * Returns an integer from 1 to 365 (for each day of the week). Returns 366 in a leap year. - * + * Implementation of DayOfYearFunction(Date/Timestamp) Returns an integer from 1 to 365 (for each + * day of the week). Returns 366 in a leap year. */ -@BuiltInFunction(name=DayOfYearFunction.NAME, - args={@Argument(allowedTypes={PTimestamp.class})}) +@BuiltInFunction(name = DayOfYearFunction.NAME, + args = { @Argument(allowedTypes = { PTimestamp.class }) }) public class DayOfYearFunction extends DateScalarFunction { - public static final String NAME = "DAYOFYEAR"; + public static final String NAME = "DAYOFYEAR"; - public DayOfYearFunction() { - } + public DayOfYearFunction() { + } - public DayOfYearFunction(List children) { - super(children); - } + public DayOfYearFunction(List children) { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression arg = getChildren().get(0); - if (!arg.evaluate(tuple,ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - long dateTime = inputCodec.decodeLong(ptr, arg.getSortOrder()); - DateTime jodaDT = new DateTime(dateTime, GJChronology.getInstanceUTC()); - int day = jodaDT.getDayOfYear(); - PDataType returnDataType = getDataType(); - byte[] byteValue = new byte[returnDataType.getByteSize()]; - returnDataType.getCodec().encodeInt(day, byteValue, 0); - ptr.set(byteValue); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression arg = getChildren().get(0); + if (!arg.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + return true; } + long dateTime = inputCodec.decodeLong(ptr, arg.getSortOrder()); + DateTime jodaDT = new DateTime(dateTime, GJChronology.getInstanceUTC()); + int day = jodaDT.getDayOfYear(); + PDataType returnDataType = getDataType(); + byte[] byteValue = new byte[returnDataType.getByteSize()]; + returnDataType.getCodec().encodeInt(day, byteValue, 0); + ptr.set(byteValue); + return true; + } + + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DecodeFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DecodeFunction.java index 7b6ef382467..563d3d9f06c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DecodeFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DecodeFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,110 +26,112 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode; import org.apache.phoenix.schema.IllegalDataException; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.tuple.Tuple; /** * Convert string to bytes */ -@FunctionParseNode.BuiltInFunction(name = DecodeFunction.NAME, args = { - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), - @FunctionParseNode.Argument(enumeration = "EncodeFormat")}) +@FunctionParseNode.BuiltInFunction(name = DecodeFunction.NAME, + args = { @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), + @FunctionParseNode.Argument(enumeration = "EncodeFormat") }) public class DecodeFunction extends ScalarFunction { - public static final String NAME = "DECODE"; - - public DecodeFunction() { - } - - public DecodeFunction(List children) throws SQLException { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; // expression was evaluated, but evaluated to null - } - - PDataType type = expression.getDataType(); - String stringToDecode = (String) type.toObject(ptr); - - Expression encodingExpression = getEncodingExpression(); - if (!encodingExpression.evaluate(tuple, ptr)) { - return false; - } - - if (ptr.getLength() == 0) { - throw new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) - .setMessage("Missing bytes encoding").build().buildException()); - } - - type = encodingExpression.getDataType(); - String encoding = ((String) type.toObject(ptr)).toUpperCase(); - - byte out[]; - - EncodeFormat format = EncodeFormat.valueOf(encoding); - switch (format) { - case HEX: - out = decodeHex(stringToDecode); - break; - default: - throw new IllegalDataException("Unsupported encoding \"" + encoding + "\""); - } - ptr.set(out); - - return true; - } - - private byte[] decodeHex(String hexStr) { - byte[] out = new byte[hexStr.length() / 2]; - for (int i = 0; i < hexStr.length(); i = i + 2) { - try { - out[i / 2] = (byte) Integer.parseInt(hexStr.substring(i, i + 2), 16); - } catch (NumberFormatException ex) { - throw new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) - .setMessage("Value " + hexStr.substring(i, i + 2) + " cannot be cast to hex number").build().buildException()); - } catch (StringIndexOutOfBoundsException ex) { - throw new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) - .setMessage("Invalid value length, cannot cast to hex number (" + hexStr + ")").build().buildException()); - } - } - return out; - } - - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; - } - - @Override - public boolean isNullable() { - return getExpression().isNullable(); - } - - private Expression getExpression() { - return children.get(0); - } - - private Expression getEncodingExpression() { - return children.get(1); - } - - @Override - public String getName() { - return NAME; - } - - @Override - public Integer getMaxLength() { - return getExpression().getMaxLength(); - } + public static final String NAME = "DECODE"; + + public DecodeFunction() { + } + + public DecodeFunction(List children) throws SQLException { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return true; // expression was evaluated, but evaluated to null + } + + PDataType type = expression.getDataType(); + String stringToDecode = (String) type.toObject(ptr); + + Expression encodingExpression = getEncodingExpression(); + if (!encodingExpression.evaluate(tuple, ptr)) { + return false; + } + + if (ptr.getLength() == 0) { + throw new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) + .setMessage("Missing bytes encoding").build().buildException()); + } + + type = encodingExpression.getDataType(); + String encoding = ((String) type.toObject(ptr)).toUpperCase(); + + byte out[]; + + EncodeFormat format = EncodeFormat.valueOf(encoding); + switch (format) { + case HEX: + out = decodeHex(stringToDecode); + break; + default: + throw new IllegalDataException("Unsupported encoding \"" + encoding + "\""); + } + ptr.set(out); + + return true; + } + + private byte[] decodeHex(String hexStr) { + byte[] out = new byte[hexStr.length() / 2]; + for (int i = 0; i < hexStr.length(); i = i + 2) { + try { + out[i / 2] = (byte) Integer.parseInt(hexStr.substring(i, i + 2), 16); + } catch (NumberFormatException ex) { + throw new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) + .setMessage("Value " + hexStr.substring(i, i + 2) + " cannot be cast to hex number") + .build().buildException()); + } catch (StringIndexOutOfBoundsException ex) { + throw new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) + .setMessage("Invalid value length, cannot cast to hex number (" + hexStr + ")").build() + .buildException()); + } + } + return out; + } + + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; + } + + @Override + public boolean isNullable() { + return getExpression().isNullable(); + } + + private Expression getExpression() { + return children.get(0); + } + + private Expression getEncodingExpression() { + return children.get(1); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public Integer getMaxLength() { + return getExpression().getMaxLength(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DefaultValueExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DefaultValueExpression.java index bf27df4cc7d..e1bf70f722c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DefaultValueExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DefaultValueExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,75 +27,70 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; - /** - * - * Internal function used to get the default value for a column not specified in UPSERT. - * If expr1 is evaluated (can be null), then it is returned, otherwise expr2 is returned. - * + * Internal function used to get the default value for a column not specified in UPSERT. If expr1 is + * evaluated (can be null), then it is returned, otherwise expr2 is returned. */ public class DefaultValueExpression extends ScalarFunction { - public static final String NAME = "DEFAULT"; + public static final String NAME = "DEFAULT"; - public DefaultValueExpression() { - } + public DefaultValueExpression() { + } - public DefaultValueExpression(List children) throws SQLException { - super(children); - } + public DefaultValueExpression(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression firstChild = children.get(0); - boolean evaluated; - if (firstChild instanceof SingleCellColumnExpression) { - evaluated = ((SingleCellColumnExpression) firstChild).evaluateUnsafe(tuple, ptr); - } else if (firstChild instanceof KeyValueColumnExpression) { - evaluated = ((KeyValueColumnExpression) firstChild).evaluateUnsafe(tuple, ptr); - } else { - evaluated = children.get(0).evaluate(tuple, ptr); - } - if (evaluated) { - // Will potentially evaluate to null without evaluating the second expression - return true; - } - if (tuple.isImmutable()) {// True for the last time an evaluation is happening on the row - Expression secondChild = children.get(1); - if (secondChild.evaluate(tuple, ptr)) { - // Coerce the type of the second child to the type of the first child - getDataType().coerceBytes(ptr, null, secondChild.getDataType(), - secondChild.getMaxLength(), secondChild.getScale(), - secondChild.getSortOrder(), - getMaxLength(), getScale(), - getSortOrder()); - return true; - } - } - return false; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression firstChild = children.get(0); + boolean evaluated; + if (firstChild instanceof SingleCellColumnExpression) { + evaluated = ((SingleCellColumnExpression) firstChild).evaluateUnsafe(tuple, ptr); + } else if (firstChild instanceof KeyValueColumnExpression) { + evaluated = ((KeyValueColumnExpression) firstChild).evaluateUnsafe(tuple, ptr); + } else { + evaluated = children.get(0).evaluate(tuple, ptr); } - - @Override - public PDataType getDataType() { - return children.get(0).getDataType(); + if (evaluated) { + // Will potentially evaluate to null without evaluating the second expression + return true; } - - @Override - public Integer getMaxLength() { - return children.get(0).getMaxLength(); + if (tuple.isImmutable()) {// True for the last time an evaluation is happening on the row + Expression secondChild = children.get(1); + if (secondChild.evaluate(tuple, ptr)) { + // Coerce the type of the second child to the type of the first child + getDataType().coerceBytes(ptr, null, secondChild.getDataType(), secondChild.getMaxLength(), + secondChild.getScale(), secondChild.getSortOrder(), getMaxLength(), getScale(), + getSortOrder()); + return true; + } } + return false; + } - @Override - public boolean isNullable() { - return children.get(0).isNullable() && children.get(1).isNullable(); - } + @Override + public PDataType getDataType() { + return children.get(0).getDataType(); + } - @Override - public String getName() { - return NAME; - } + @Override + public Integer getMaxLength() { + return children.get(0).getMaxLength(); + } - @Override - public boolean requiresFinalEvaluation() { - return true; - } + @Override + public boolean isNullable() { + return children.get(0).isNullable() && children.get(1).isNullable(); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public boolean requiresFinalEvaluation() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DelegateConstantToCountAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DelegateConstantToCountAggregateFunction.java index e6532c690fb..3f82cd2a46f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DelegateConstantToCountAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DelegateConstantToCountAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,54 +20,49 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; -import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.tuple.Tuple; - +import org.apache.phoenix.schema.types.PLong; /** - * - * Base class for non composite aggregation functions that optimize aggregation by - * delegating to {@link CountAggregateFunction} when the child expression is a - * constant. - * - * + * Base class for non composite aggregation functions that optimize aggregation by delegating to + * {@link CountAggregateFunction} when the child expression is a constant. * @since 0.1 */ abstract public class DelegateConstantToCountAggregateFunction extends SingleAggregateFunction { - private static final ImmutableBytesWritable ZERO = new ImmutableBytesWritable(PLong.INSTANCE.toBytes(0L)); - private CountAggregateFunction delegate; - - public DelegateConstantToCountAggregateFunction() { - } - - public DelegateConstantToCountAggregateFunction(List childExpressions, CountAggregateFunction delegate) { - super(childExpressions); - // Using a delegate here causes us to optimize the number of aggregators - // by sharing the CountAggregator across functions. On the server side, - // this will always be null, since if it had not been null on the client, - // the function would not have been transfered over (the delegate would - // have instead). - this.delegate = delegate; - } + private static final ImmutableBytesWritable ZERO = + new ImmutableBytesWritable(PLong.INSTANCE.toBytes(0L)); + private CountAggregateFunction delegate; - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (delegate == null) { - return super.evaluate(tuple, ptr); - } - delegate.evaluate(tuple, ptr); - if (PLong.INSTANCE.compareTo(ptr,ZERO) == 0) { - return false; - } - return true; - } + public DelegateConstantToCountAggregateFunction() { + } + public DelegateConstantToCountAggregateFunction(List childExpressions, + CountAggregateFunction delegate) { + super(childExpressions); + // Using a delegate here causes us to optimize the number of aggregators + // by sharing the CountAggregator across functions. On the server side, + // this will always be null, since if it had not been null on the client, + // the function would not have been transfered over (the delegate would + // have instead). + this.delegate = delegate; + } - @Override - protected SingleAggregateFunction getDelegate() { - return delegate != null ? delegate : super.getDelegate(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (delegate == null) { + return super.evaluate(tuple, ptr); } + delegate.evaluate(tuple, ptr); + if (PLong.INSTANCE.compareTo(ptr, ZERO) == 0) { + return false; + } + return true; + } + + @Override + protected SingleAggregateFunction getDelegate() { + return delegate != null ? delegate : super.getDelegate(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctCountAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctCountAggregateFunction.java index 5f73fb0baa8..896a58ec6c3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctCountAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctCountAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,96 +34,94 @@ import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.util.SchemaUtil; - /** - * * Built-in function for {@code COUNT(distinct ) } aggregate function, - * - * * @since 1.2.1 */ -@BuiltInFunction(name=DistinctCountAggregateFunction.NAME, nodeClass=DistinctCountParseNode.class, args= {@Argument()} ) +@BuiltInFunction(name = DistinctCountAggregateFunction.NAME, + nodeClass = DistinctCountParseNode.class, args = { @Argument() }) public class DistinctCountAggregateFunction extends DelegateConstantToCountAggregateFunction { - public static final String NAME = "DISTINCT_COUNT"; - public static final String NORMALIZED_NAME = SchemaUtil.normalizeIdentifier(NAME); - public final static byte[] ZERO = PLong.INSTANCE.toBytes(0L); - public final static byte[] ONE = PLong.INSTANCE.toBytes(1L); - - public DistinctCountAggregateFunction() { - } + public static final String NAME = "DISTINCT_COUNT"; + public static final String NORMALIZED_NAME = SchemaUtil.normalizeIdentifier(NAME); + public final static byte[] ZERO = PLong.INSTANCE.toBytes(0L); + public final static byte[] ONE = PLong.INSTANCE.toBytes(1L); - public DistinctCountAggregateFunction(List childExpressions) { - this(childExpressions, null); - } + public DistinctCountAggregateFunction() { + } - public DistinctCountAggregateFunction(List childExpressions, - CountAggregateFunction delegate) { - super(childExpressions, delegate); - assert childExpressions.size() == 1; - } - - @Override - public int hashCode() { - return isConstantExpression() ? 0 : super.hashCode(); - } + public DistinctCountAggregateFunction(List childExpressions) { + this(childExpressions, null); + } - /** - * The COUNT function never returns null - */ - @Override - public boolean isNullable() { - return false; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - DistinctCountAggregateFunction other = (DistinctCountAggregateFunction)obj; - return (isConstantExpression() && other.isConstantExpression()) || children.equals(other.getChildren()); - } + public DistinctCountAggregateFunction(List childExpressions, + CountAggregateFunction delegate) { + super(childExpressions, delegate); + assert childExpressions.size() == 1; + } - @Override - public PDataType getDataType() { - return PLong.INSTANCE; - } + @Override + public int hashCode() { + return isConstantExpression() ? 0 : super.hashCode(); + } - @Override - public DistinctCountClientAggregator newClientAggregator() { - return new DistinctCountClientAggregator(getAggregatorExpression().getSortOrder()); - } - - @Override - public Aggregator newServerAggregator(Configuration conf) { - return new DistinctValueWithCountServerAggregator(conf); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - for (Expression child : getChildren()) { - if (child.getDataType() != null && !child.getDataType().isComparisonSupported()) { - throw new ComparisonNotSupportedException(child.getDataType()); - } - } - // TODO: optimize query plan of this to run scan serially for a limit of one row - if (!super.evaluate(tuple, ptr)) { - ptr.set(ZERO); // If evaluate returns false, then no rows were found, so result is 0 - } else if (isConstantExpression()) { - ptr.set(ONE); // Otherwise, we found one or more rows, so a distinct on a constant is 1 - } - return true; // Always evaluates to a LONG value + /** + * The COUNT function never returns null + */ + @Override + public boolean isNullable() { + return false; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + DistinctCountAggregateFunction other = (DistinctCountAggregateFunction) obj; + return (isConstantExpression() && other.isConstantExpression()) + || children.equals(other.getChildren()); + } + + @Override + public PDataType getDataType() { + return PLong.INSTANCE; + } + + @Override + public DistinctCountClientAggregator newClientAggregator() { + return new DistinctCountClientAggregator(getAggregatorExpression().getSortOrder()); + } + + @Override + public Aggregator newServerAggregator(Configuration conf) { + return new DistinctValueWithCountServerAggregator(conf); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + for (Expression child : getChildren()) { + if (child.getDataType() != null && !child.getDataType().isComparisonSupported()) { + throw new ComparisonNotSupportedException(child.getDataType()); + } } - - @Override - public String getName() { - return NAME; + // TODO: optimize query plan of this to run scan serially for a limit of one row + if (!super.evaluate(tuple, ptr)) { + ptr.set(ZERO); // If evaluate returns false, then no rows were found, so result is 0 + } else if (isConstantExpression()) { + ptr.set(ONE); // Otherwise, we found one or more rows, so a distinct on a constant is 1 } + return true; // Always evaluates to a LONG value + } - @Override - public Aggregator newServerAggregator(Configuration config, ImmutableBytesWritable ptr) { - DistinctCountClientAggregator clientAgg = newClientAggregator(); - clientAgg.aggregate(null, ptr); - return new DistinctValueWithCountServerAggregator(config, clientAgg); - } + @Override + public String getName() { + return NAME; + } + + @Override + public Aggregator newServerAggregator(Configuration config, ImmutableBytesWritable ptr) { + DistinctCountClientAggregator clientAgg = newClientAggregator(); + clientAgg.aggregate(null, ptr); + return new DistinctValueWithCountServerAggregator(config, clientAgg); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctCountHyperLogLogAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctCountHyperLogLogAggregateFunction.java index 7e18afdeaa6..ecc91b58351 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctCountHyperLogLogAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctCountHyperLogLogAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,171 +22,160 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.BaseAggregator; import org.apache.phoenix.expression.aggregator.DistinctCountClientAggregator; +import org.apache.phoenix.parse.DistinctCountHyperLogLogAggregateParseNode; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.parse.DistinctCountHyperLogLogAggregateParseNode; +import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.util.ByteUtil; import com.clearspring.analytics.stream.cardinality.HyperLogLogPlus; -import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.tuple.Tuple; - /** - * Built-in function for Distinct Count Aggregation - * function in approximation. - * This aggregator is implemented using HyperLogLog. - * Please refer to PHOENIX-418 - * https://issues.apache.org/jira/browse/PHOENIX-418 - * - * - * 1, Accuracy input is not a customizeable. In HyperLogLog - * accuracy is propertional to 1/sqrt(m), m is the size of - * the hll hash. Also, this process is irrelavent to runtime - * or space complexity. - * - * 2, The two parameters that requires during HLL initialization. - * i.e., the precision value for the normal set and the precision - * value for the sparse set, is hard coded as static final - * variable. Any change of them requires re-deployment of the - * phoenix server coprocessors. - * + * Built-in function for Distinct Count Aggregation function in approximation. This aggregator is + * implemented using HyperLogLog. Please refer to PHOENIX-418 + * https://issues.apache.org/jira/browse/PHOENIX-418 1, Accuracy input is not a customizeable. In + * HyperLogLog accuracy is propertional to 1/sqrt(m), m is the size of the hll hash. Also, this + * process is irrelavent to runtime or space complexity. 2, The two parameters that requires during + * HLL initialization. i.e., the precision value for the normal set and the precision value for the + * sparse set, is hard coded as static final variable. Any change of them requires re-deployment of + * the phoenix server coprocessors. */ -@BuiltInFunction(name=DistinctCountHyperLogLogAggregateFunction.NAME, nodeClass=DistinctCountHyperLogLogAggregateParseNode.class, args= {@Argument()} ) +@BuiltInFunction(name = DistinctCountHyperLogLogAggregateFunction.NAME, + nodeClass = DistinctCountHyperLogLogAggregateParseNode.class, args = { @Argument() }) public class DistinctCountHyperLogLogAggregateFunction extends DistinctCountAggregateFunction { - public static final String NAME = "APPROX_COUNT_DISTINCT"; - public static final int NormalSetPrecision = 16; - public static final int SparseSetPrecision = 25; - - public DistinctCountHyperLogLogAggregateFunction() { - } - - public DistinctCountHyperLogLogAggregateFunction(List childExpressions){ - super(childExpressions, null); - } - - public DistinctCountHyperLogLogAggregateFunction(List childExpressions, CountAggregateFunction delegate){ - super(childExpressions, delegate); - } - - @Override - public DistinctCountClientAggregator newClientAggregator() { - return new HyperLogLogClientAggregator(SortOrder.getDefault()); - } - - @Override - public Aggregator newServerAggregator(Configuration conf) { - final Expression child = getAggregatorExpression(); - return new HyperLogLogServerAggregator(child.getSortOrder()){ - @Override - protected PDataType getInputDataType() { - return child.getDataType(); - } - }; - } - - @Override - public Aggregator newServerAggregator(Configuration conf, ImmutableBytesWritable ptr) { - final Expression child = getAggregatorExpression(); - return new HyperLogLogServerAggregator(child.getSortOrder(), ptr) { - @Override - protected PDataType getInputDataType() { - return child.getDataType(); - } - }; - } - - @Override - public String getName() { - return NAME; - } + public static final String NAME = "APPROX_COUNT_DISTINCT"; + public static final int NormalSetPrecision = 16; + public static final int SparseSetPrecision = 25; + + public DistinctCountHyperLogLogAggregateFunction() { + } + + public DistinctCountHyperLogLogAggregateFunction(List childExpressions) { + super(childExpressions, null); + } + + public DistinctCountHyperLogLogAggregateFunction(List childExpressions, + CountAggregateFunction delegate) { + super(childExpressions, delegate); + } + + @Override + public DistinctCountClientAggregator newClientAggregator() { + return new HyperLogLogClientAggregator(SortOrder.getDefault()); + } + + @Override + public Aggregator newServerAggregator(Configuration conf) { + final Expression child = getAggregatorExpression(); + return new HyperLogLogServerAggregator(child.getSortOrder()) { + @Override + protected PDataType getInputDataType() { + return child.getDataType(); + } + }; + } + + @Override + public Aggregator newServerAggregator(Configuration conf, ImmutableBytesWritable ptr) { + final Expression child = getAggregatorExpression(); + return new HyperLogLogServerAggregator(child.getSortOrder(), ptr) { + @Override + protected PDataType getInputDataType() { + return child.getDataType(); + } + }; + } + + @Override + public String getName() { + return NAME; + } } - /** -* ClientSide HyperLogLogAggregator -* It will be called when server side aggregator has finished -* Method aggregate is called for every new server aggregator returned -* Method evaluate is called when the aggregate is done. -* the return of evaluate will be send back to user as -* counted result of expression.evaluate -*/ -class HyperLogLogClientAggregator extends DistinctCountClientAggregator{ - private HyperLogLogPlus hll = new HyperLogLogPlus(DistinctCountHyperLogLogAggregateFunction.NormalSetPrecision, DistinctCountHyperLogLogAggregateFunction.SparseSetPrecision); - - public HyperLogLogClientAggregator(SortOrder sortOrder) { - super(sortOrder); - } - - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - try { - hll.addAll(HyperLogLogPlus.Builder.build(ByteUtil.copyKeyBytesIfNecessary(ptr))); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - byte[] buffer = new byte[PLong.INSTANCE.getByteSize()]; - PLong.INSTANCE.getCodec().encodeLong(hll.cardinality(), buffer, 0); - ptr.set(buffer); - return true; - } + * ClientSide HyperLogLogAggregator It will be called when server side aggregator has finished + * Method aggregate is called for every new server aggregator returned Method evaluate is called + * when the aggregate is done. the return of evaluate will be send back to user as counted result of + * expression.evaluate + */ +class HyperLogLogClientAggregator extends DistinctCountClientAggregator { + private HyperLogLogPlus hll = + new HyperLogLogPlus(DistinctCountHyperLogLogAggregateFunction.NormalSetPrecision, + DistinctCountHyperLogLogAggregateFunction.SparseSetPrecision); + + public HyperLogLogClientAggregator(SortOrder sortOrder) { + super(sortOrder); + } + + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + try { + hll.addAll(HyperLogLogPlus.Builder.build(ByteUtil.copyKeyBytesIfNecessary(ptr))); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + byte[] buffer = new byte[PLong.INSTANCE.getByteSize()]; + PLong.INSTANCE.getCodec().encodeLong(hll.cardinality(), buffer, 0); + ptr.set(buffer); + return true; + } } - /** - * ServerSide HyperLogLogAggregator - * It will be serialized and dispatched to region server - * Method aggregate is called for every new row scanned - * Method evaluate is called when this remote scan is over. - * the return of evaluate will be send back to ClientSideAggregator.aggregate + * ServerSide HyperLogLogAggregator It will be serialized and dispatched to region server Method + * aggregate is called for every new row scanned Method evaluate is called when this remote scan is + * over. the return of evaluate will be send back to ClientSideAggregator.aggregate */ -abstract class HyperLogLogServerAggregator extends BaseAggregator{ - private HyperLogLogPlus hll = new HyperLogLogPlus(DistinctCountHyperLogLogAggregateFunction.NormalSetPrecision, DistinctCountHyperLogLogAggregateFunction.SparseSetPrecision); - protected final ImmutableBytesWritable valueByteArray = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - - public HyperLogLogServerAggregator(SortOrder sortOrder) { - super(sortOrder); - } - - public HyperLogLogServerAggregator(SortOrder sortOrder, ImmutableBytesWritable ptr) { - this(sortOrder); - if(ptr !=null){ - hll.offer(ptr); - } - } - - @Override - public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { - hll.offer(ptr); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - try { - valueByteArray.set(hll.getBytes(), 0, hll.getBytes().length); - ptr.set(ByteUtil.copyKeyBytesIfNecessary(valueByteArray)); - } catch (IOException e) { - throw new RuntimeException(e); - } - return true; - } - - @Override - public final PDataType getDataType() { - return PVarbinary.INSTANCE; - } - - abstract protected PDataType getInputDataType(); -} \ No newline at end of file +abstract class HyperLogLogServerAggregator extends BaseAggregator { + private HyperLogLogPlus hll = + new HyperLogLogPlus(DistinctCountHyperLogLogAggregateFunction.NormalSetPrecision, + DistinctCountHyperLogLogAggregateFunction.SparseSetPrecision); + protected final ImmutableBytesWritable valueByteArray = + new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); + + public HyperLogLogServerAggregator(SortOrder sortOrder) { + super(sortOrder); + } + + public HyperLogLogServerAggregator(SortOrder sortOrder, ImmutableBytesWritable ptr) { + this(sortOrder); + if (ptr != null) { + hll.offer(ptr); + } + } + + @Override + public void aggregate(Tuple tuple, ImmutableBytesWritable ptr) { + hll.offer(ptr); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + try { + valueByteArray.set(hll.getBytes(), 0, hll.getBytes().length); + ptr.set(ByteUtil.copyKeyBytesIfNecessary(valueByteArray)); + } catch (IOException e) { + throw new RuntimeException(e); + } + return true; + } + + @Override + public final PDataType getDataType() { + return PVarbinary.INSTANCE; + } + + abstract protected PDataType getInputDataType(); +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctValueAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctValueAggregateFunction.java index a48e523e3a7..2f2196df0ba 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctValueAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctValueAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,39 +30,43 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarbinary; -@BuiltInFunction(name=DistinctValueAggregateFunction.NAME, args= {@Argument()} ) +@BuiltInFunction(name = DistinctValueAggregateFunction.NAME, args = { @Argument() }) public class DistinctValueAggregateFunction extends DistinctValueWithCountAggregateFunction { - public static final String NAME = "COLLECTDISTINCT"; - - public DistinctValueAggregateFunction() { - } - - public DistinctValueAggregateFunction(List children) { - super(children); - } + public static final String NAME = "COLLECTDISTINCT"; - @Override - public Aggregator newServerAggregator(Configuration conf) { - return new DistinctValueWithCountServerAggregator(conf); - } + public DistinctValueAggregateFunction() { + } - @Override - public DistinctValueWithCountClientAggregator newClientAggregator() { - PDataType baseType = getAggregatorExpression().getDataType().isArrayType() ? - PVarbinary.INSTANCE : getAggregatorExpression().getDataType(); - PDataType resultType = PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE); - return new DistinctValueClientAggregator(getAggregatorExpression().getSortOrder(), baseType, resultType); - } - - @Override - public String getName() { - return NAME; - } + public DistinctValueAggregateFunction(List children) { + super(children); + } - @Override - public PDataType getDataType() { - PDataType baseType = getAggregatorExpression().getDataType().isArrayType() ? PVarbinary.INSTANCE : getAggregatorExpression().getDataType(); - return PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE); - } + @Override + public Aggregator newServerAggregator(Configuration conf) { + return new DistinctValueWithCountServerAggregator(conf); + } + + @Override + public DistinctValueWithCountClientAggregator newClientAggregator() { + PDataType baseType = getAggregatorExpression().getDataType().isArrayType() + ? PVarbinary.INSTANCE + : getAggregatorExpression().getDataType(); + PDataType resultType = PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE); + return new DistinctValueClientAggregator(getAggregatorExpression().getSortOrder(), baseType, + resultType); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public PDataType getDataType() { + PDataType baseType = getAggregatorExpression().getDataType().isArrayType() + ? PVarbinary.INSTANCE + : getAggregatorExpression().getDataType(); + return PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctValueWithCountAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctValueWithCountAggregateFunction.java index c4110f1bb95..0d786e3cfa9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctValueWithCountAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/DistinctValueWithCountAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +21,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.DistinctValueWithCountClientAggregator; @@ -29,20 +28,20 @@ public abstract class DistinctValueWithCountAggregateFunction extends SingleAggregateFunction { - public DistinctValueWithCountAggregateFunction() { - } + public DistinctValueWithCountAggregateFunction() { + } + + public DistinctValueWithCountAggregateFunction(List children) { + super(children); + } - public DistinctValueWithCountAggregateFunction(List children) { - super(children); - } + @Override + abstract public DistinctValueWithCountClientAggregator newClientAggregator(); - @Override - abstract public DistinctValueWithCountClientAggregator newClientAggregator(); - - @Override - public Aggregator newServerAggregator(Configuration config, ImmutableBytesWritable ptr) { - DistinctValueWithCountClientAggregator clientAgg = newClientAggregator(); - clientAgg.aggregate(null, ptr); - return new DistinctValueWithCountServerAggregator(config, clientAgg); - } + @Override + public Aggregator newServerAggregator(Configuration config, ImmutableBytesWritable ptr) { + DistinctValueWithCountClientAggregator clientAgg = newClientAggregator(); + clientAgg.aggregate(null, ptr); + return new DistinctValueWithCountServerAggregator(config, clientAgg); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/EncodeFormat.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/EncodeFormat.java index 8130228baab..27890edf585 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/EncodeFormat.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/EncodeFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,8 +19,8 @@ public enum EncodeFormat { - HEX, //format for encoding HEX value to bytes - BASE62, //format for encoding a base 10 long value to base 62 string - BASE64, //format for encoding a base 10 long value to base 64 string - ASCII // Plain Text + HEX, // format for encoding HEX value to bytes + BASE62, // format for encoding a base 10 long value to base 62 string + BASE64, // format for encoding a base 10 long value to base 64 string + ASCII // Plain Text }; diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/EncodeFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/EncodeFunction.java index 1078566086a..a76d29867bc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/EncodeFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/EncodeFunction.java @@ -1,18 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.expression.function; @@ -30,80 +31,79 @@ import org.apache.phoenix.util.Base62Encoder; /** - * Implementation of ENCODE(input number, format encodeformat) - * - * Converts the given base 10 number to a base 62 number and returns a string representing the number. + * Implementation of ENCODE(input number, format encodeformat) Converts the given base 10 number to + * a base 62 number and returns a string representing the number. */ -@BuiltInFunction(name = EncodeFunction.NAME, args = { @Argument(allowedTypes = { PLong.class }), - @Argument(enumeration = "EncodeFormat") }) +@BuiltInFunction(name = EncodeFunction.NAME, + args = { @Argument(allowedTypes = { PLong.class }), @Argument(enumeration = "EncodeFormat") }) public class EncodeFunction extends ScalarFunction { - public static final String NAME = "ENCODE"; - - public EncodeFunction() { - } + public static final String NAME = "ENCODE"; - public EncodeFunction(List children) { - super(children); - } + public EncodeFunction() { + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression numExpr = getNumExpr(); - if (!numExpr.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - long num = numExpr.getDataType().getCodec().decodeLong(ptr, numExpr.getSortOrder()); - - Expression encodingExpression = getEncodingExpr(); - if (!encodingExpression.evaluate(tuple, ptr)) { - return false; - } - - if (ptr.getLength() == 0) { - throw new IllegalDataException(getMissingEncodeFormatMsg()); - } - - PDataType type = encodingExpression.getDataType(); - String encodingFormat = ((String) type.toObject(ptr)).toUpperCase(); - EncodeFormat format = EncodeFormat.valueOf(encodingFormat); - switch (format) { - case BASE62: - String encodedString = Base62Encoder.toString(num); - ptr.set(PVarchar.INSTANCE.toBytes(encodedString)); - break; - default: - throw new IllegalDataException(getUnsupportedEncodeFormatMsg(encodingFormat)); - } - return true; - } - - public static String getMissingEncodeFormatMsg() { - return "Missing Encode Format"; + public EncodeFunction(List children) { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression numExpr = getNumExpr(); + if (!numExpr.evaluate(tuple, ptr)) { + return false; } - - public static String getUnsupportedEncodeFormatMsg(String encodeFormat) { - return "Unsupported Encode Format : " + encodeFormat; + if (ptr.getLength() == 0) { + return true; } + long num = numExpr.getDataType().getCodec().decodeLong(ptr, numExpr.getSortOrder()); - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + Expression encodingExpression = getEncodingExpr(); + if (!encodingExpression.evaluate(tuple, ptr)) { + return false; } - @Override - public String getName() { - return NAME; + if (ptr.getLength() == 0) { + throw new IllegalDataException(getMissingEncodeFormatMsg()); } - private Expression getNumExpr() { - return children.get(0); + PDataType type = encodingExpression.getDataType(); + String encodingFormat = ((String) type.toObject(ptr)).toUpperCase(); + EncodeFormat format = EncodeFormat.valueOf(encodingFormat); + switch (format) { + case BASE62: + String encodedString = Base62Encoder.toString(num); + ptr.set(PVarchar.INSTANCE.toBytes(encodedString)); + break; + default: + throw new IllegalDataException(getUnsupportedEncodeFormatMsg(encodingFormat)); } + return true; + } - private Expression getEncodingExpr() { - return children.get(1); - } + public static String getMissingEncodeFormatMsg() { + return "Missing Encode Format"; + } + + public static String getUnsupportedEncodeFormatMsg(String encodeFormat) { + return "Unsupported Encode Format : " + encodeFormat; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } + + private Expression getNumExpr() { + return children.get(0); + } + + private Expression getEncodingExpr() { + return children.get(1); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ExpFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ExpFunction.java index 5c0ca7234db..1e34772e129 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ExpFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ExpFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,30 +26,31 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; -@BuiltInFunction(name = ExpFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) +@BuiltInFunction(name = ExpFunction.NAME, + args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) public class ExpFunction extends JavaMathOneArgumentFunction { - public static final String NAME = "EXP"; + public static final String NAME = "EXP"; - public ExpFunction() { - } + public ExpFunction() { + } - public ExpFunction(List children) throws SQLException { - super(children); - } + public ExpFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - protected double compute(double firstArg) { - return Math.exp(firstArg); - } + @Override + protected double compute(double firstArg) { + return Math.exp(firstArg); + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunction.java index d8b300b5f05..218185c3f95 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,72 +17,65 @@ */ package org.apache.phoenix.expression.function; +import java.sql.SQLException; +import java.util.List; + import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.schema.IllegalDataException; -import org.apache.phoenix.schema.types.PInteger; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.util.ByteUtil; -import java.sql.SQLException; -import java.util.List; - - /** - * - * Function used to get the external SQL type id from the internal SQL type integer. - * Typically the external and internal ids are the same, but for some types (e.g. arrays) - * there is are multiple specific internal types to represent multiple external types. - * - * Usage: - * ExternalSqlTypeId(12) - * will return 12 based on {@link java.sql.Types#VARCHAR} being 12 - * - * + * Function used to get the external SQL type id from the internal SQL type integer. Typically the + * external and internal ids are the same, but for some types (e.g. arrays) there is are multiple + * specific internal types to represent multiple external types. Usage: ExternalSqlTypeId(12) will + * return 12 based on {@link java.sql.Types#VARCHAR} being 12 * @since 3.0 */ -@BuiltInFunction(name=ExternalSqlTypeIdFunction.NAME, args= { - @Argument(allowedTypes= PInteger.class )} ) +@BuiltInFunction(name = ExternalSqlTypeIdFunction.NAME, + args = { @Argument(allowedTypes = PInteger.class) }) public class ExternalSqlTypeIdFunction extends ScalarFunction { - public static final String NAME = "ExternalSqlTypeId"; + public static final String NAME = "ExternalSqlTypeId"; - public ExternalSqlTypeIdFunction() { - } + public ExternalSqlTypeIdFunction() { + } - public ExternalSqlTypeIdFunction(List children) throws SQLException { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression child = children.get(0); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - int sqlType = child.getDataType().getCodec().decodeInt(ptr, child.getSortOrder()); - try { - byte[] externalIdTypeBytes = PInteger.INSTANCE.toBytes( - PDataType.fromTypeId(sqlType).getResultSetSqlType()); - ptr.set(externalIdTypeBytes); - } catch (IllegalDataException e) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } - return true; - } + public ExternalSqlTypeIdFunction(List children) throws SQLException { + super(children); + } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression child = children.get(0); + if (!child.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return true; } - - @Override - public String getName() { - return NAME; + int sqlType = child.getDataType().getCodec().decodeInt(ptr, child.getSortOrder()); + try { + byte[] externalIdTypeBytes = + PInteger.INSTANCE.toBytes(PDataType.fromTypeId(sqlType).getResultSetSqlType()); + ptr.set(externalIdTypeBytes); + } catch (IllegalDataException e) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); } + return true; + } + + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstLastValueBaseFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstLastValueBaseFunction.java index 61a196d4438..c07278ed2c8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstLastValueBaseFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstLastValueBaseFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,41 +26,41 @@ /** * (FIRST|LAST|NTH)_VALUE build in function interface - * */ abstract public class FirstLastValueBaseFunction extends DelegateConstantToCountAggregateFunction { - public FirstLastValueBaseFunction() { - } + public FirstLastValueBaseFunction() { + } - public FirstLastValueBaseFunction(List childExpressions, CountAggregateFunction delegate) { - super(childExpressions, delegate); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - boolean wasEvaluated = super.evaluate(tuple, ptr); - if (!wasEvaluated) { - return false; - } - if (isConstantExpression()) { - getAggregatorExpression().evaluate(tuple, ptr); - } - return true; - } + public FirstLastValueBaseFunction(List childExpressions, + CountAggregateFunction delegate) { + super(childExpressions, delegate); + } - @Override - public PDataType getDataType() { - return children.get(2).getDataType(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + boolean wasEvaluated = super.evaluate(tuple, ptr); + if (!wasEvaluated) { + return false; } - - @Override - public Integer getMaxLength() { - return children.get(2).getMaxLength(); - } - - @Override - public Integer getScale() { - return children.get(2).getScale(); + if (isConstantExpression()) { + getAggregatorExpression().evaluate(tuple, ptr); } + return true; + } + + @Override + public PDataType getDataType() { + return children.get(2).getDataType(); + } + + @Override + public Integer getMaxLength() { + return children.get(2).getMaxLength(); + } + + @Override + public Integer getScale() { + return children.get(2).getScale(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstValueFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstValueFunction.java index 09871d98222..b12613e7647 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstValueFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstValueFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,49 +30,50 @@ import org.apache.phoenix.schema.types.PBoolean; /** - * Built-in function for {@code FIRST_VALUE() WITHIN GROUP (ORDER BY ASC/DESC) aggregate } + * Built-in function for + * {@code FIRST_VALUE() WITHIN GROUP (ORDER BY ASC/DESC) aggregate } * function - * */ -@FunctionParseNode.BuiltInFunction(name = FirstValueFunction.NAME, nodeClass = FirstValueAggregateParseNode.class, args = { - @FunctionParseNode.Argument(), - @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, isConstant = true), - @FunctionParseNode.Argument()}) +@FunctionParseNode.BuiltInFunction(name = FirstValueFunction.NAME, + nodeClass = FirstValueAggregateParseNode.class, + args = { @FunctionParseNode.Argument(), + @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, isConstant = true), + @FunctionParseNode.Argument() }) public class FirstValueFunction extends FirstLastValueBaseFunction { - public static final String NAME = "FIRST_VALUE"; + public static final String NAME = "FIRST_VALUE"; - public FirstValueFunction() { - } + public FirstValueFunction() { + } - public FirstValueFunction(List childExpressions) { - this(childExpressions, null); - } + public FirstValueFunction(List childExpressions) { + this(childExpressions, null); + } - public FirstValueFunction(List childExpressions, CountAggregateFunction delegate) { - super(childExpressions, delegate); - } + public FirstValueFunction(List childExpressions, CountAggregateFunction delegate) { + super(childExpressions, delegate); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); + @Override + public Aggregator newServerAggregator(Configuration conf) { + FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); - boolean order = (Boolean) ((LiteralExpression) children.get(1)).getValue(); - aggregator.init(children, order, 0); + boolean order = (Boolean) ((LiteralExpression) children.get(1)).getValue(); + aggregator.init(children, order, 0); - return aggregator; - } + return aggregator; + } - @Override - public Aggregator newClientAggregator() { - FirstLastValueBaseClientAggregator aggregator = new FirstLastValueBaseClientAggregator(); - aggregator.init(0, false); + @Override + public Aggregator newClientAggregator() { + FirstLastValueBaseClientAggregator aggregator = new FirstLastValueBaseClientAggregator(); + aggregator.init(0, false); - return aggregator; - } + return aggregator; + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstValuesFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstValuesFunction.java index b9114a227c0..67e65efff89 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstValuesFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FirstValuesFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,65 +33,68 @@ import org.apache.phoenix.schema.types.PInteger; /** - * Built-in function for {@code FIRST_VALUES(, ) WITHIN GROUP (ORDER BY ASC/DESC) aggregate } + * Built-in function for + * {@code FIRST_VALUES(, ) WITHIN GROUP (ORDER BY ASC/DESC) aggregate } * function - * */ -@FunctionParseNode.BuiltInFunction(name = FirstValuesFunction.NAME, nodeClass = FirstValuesAggregateParseNode.class, args = { - @FunctionParseNode.Argument(), - @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, isConstant = true), - @FunctionParseNode.Argument(), - @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, isConstant = true)}) +@FunctionParseNode.BuiltInFunction(name = FirstValuesFunction.NAME, + nodeClass = FirstValuesAggregateParseNode.class, + args = { @FunctionParseNode.Argument(), + @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, isConstant = true), + @FunctionParseNode.Argument(), + @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, isConstant = true) }) public class FirstValuesFunction extends FirstLastValueBaseFunction { - public static final String NAME = "FIRST_VALUES"; - private int offset; - - public FirstValuesFunction() { - } + public static final String NAME = "FIRST_VALUES"; + private int offset; - public FirstValuesFunction(List childExpressions) { - this(childExpressions, null); - } + public FirstValuesFunction() { + } - public FirstValuesFunction(List childExpressions, CountAggregateFunction delegate) { - super(childExpressions, delegate); - } + public FirstValuesFunction(List childExpressions) { + this(childExpressions, null); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); + public FirstValuesFunction(List childExpressions, CountAggregateFunction delegate) { + super(childExpressions, delegate); + } - offset = ((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(); - boolean order = (Boolean) ((LiteralExpression) children.get(1)).getValue(); + @Override + public Aggregator newServerAggregator(Configuration conf) { + FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); - aggregator.init(children, order, offset); + offset = ((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(); + boolean order = (Boolean) ((LiteralExpression) children.get(1)).getValue(); - return aggregator; - } + aggregator.init(children, order, offset); - @Override - public Aggregator newClientAggregator() { - FirstLastValueBaseClientAggregator aggregator = new FirstLastValueBaseClientAggregator(getDataType()); + return aggregator; + } - if (children.size() < 3) { - aggregator.init(offset, true); - } else { - aggregator.init(((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(), true); - } + @Override + public Aggregator newClientAggregator() { + FirstLastValueBaseClientAggregator aggregator = + new FirstLastValueBaseClientAggregator(getDataType()); - return aggregator; + if (children.size() < 3) { + aggregator.init(offset, true); + } else { + aggregator.init(((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(), true); } - @Override - public String getName() { - return NAME; - } + return aggregator; + } + + @Override + public String getName() { + return NAME; + } - @Override - public PDataType getDataType() { - if (children.size() < 3) { - return null; - } - return PDataType.fromTypeId(children.get(2).getDataType().getSqlType() + PArrayDataType.ARRAY_TYPE_BASE); + @Override + public PDataType getDataType() { + if (children.size() < 3) { + return null; } + return PDataType + .fromTypeId(children.get(2).getDataType().getSqlType() + PArrayDataType.ARRAY_TYPE_BASE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorDateExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorDateExpression.java index 257430e187f..4633ab0a17d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorDateExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorDateExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,103 +36,102 @@ import org.apache.phoenix.schema.types.PUnsignedDate; import org.apache.phoenix.schema.types.PUnsignedTimestamp; import org.apache.phoenix.schema.types.PVarchar; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * - * Class encapsulating the FLOOR operation on - * a column/literal of type {@link org.apache.phoenix.schema.types.PDate}. - * - * + * Class encapsulating the FLOOR operation on a column/literal of type + * {@link org.apache.phoenix.schema.types.PDate}. * @since 3.0.0 */ @BuiltInFunction(name = FloorFunction.NAME, - args = { - @Argument(allowedTypes={PTimestamp.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionClassType.DERIVED - ) + args = { @Argument(allowedTypes = { PTimestamp.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionClassType.DERIVED) public class FloorDateExpression extends RoundDateExpression { - - public FloorDateExpression() {} - - public FloorDateExpression(List children) { - super(children); - } - - public static Expression create(List children) throws SQLException { - Expression firstChild = children.get(0); - PDataType firstChildDataType = firstChild.getDataType(); - if (firstChildDataType == PTimestamp.INSTANCE || firstChildDataType == PUnsignedTimestamp.INSTANCE){ - // Coerce TIMESTAMP to DATE, as the nanos has no affect - List newChildren = Lists.newArrayListWithExpectedSize(children.size()); - newChildren.add(CoerceExpression.create(firstChild, firstChildDataType == PTimestamp.INSTANCE ? PDate.INSTANCE : PUnsignedDate.INSTANCE)); - newChildren.addAll(children.subList(1, children.size())); - children = newChildren; - } - - Object timeUnitValue = ((LiteralExpression)children.get(1)).getValue(); - TimeUnit timeUnit = TimeUnit.getTimeUnit(timeUnitValue != null ? timeUnitValue.toString() : null); - switch(timeUnit) { - case WEEK: - return new FloorWeekExpression(children); - case MONTH: - return new FloorMonthExpression(children); - case YEAR: - return new FloorYearExpression(children); - default: - return new FloorDateExpression(children); - } - - } - - /** - * @param timeUnit - unit of time to round up to. - * Creates a {@link FloorDateExpression} with default multiplier of 1. - */ - public static Expression create(Expression expr, TimeUnit timeUnit) throws SQLException { - return create(expr, timeUnit, 1); - } - - /** - * @param timeUnit - unit of time to round up to - * @param multiplier - determines the roll up window size. - * Create a {@link FloorDateExpression}. - */ - public static Expression create(Expression expr, TimeUnit timeUnit, int multiplier) throws SQLException { - Expression timeUnitExpr = getTimeUnitExpr(timeUnit); - Expression defaultMultiplierExpr = getMultiplierExpr(multiplier); - List expressions = Lists.newArrayList(expr, timeUnitExpr, defaultMultiplierExpr); - return create(expressions); - } - - @Override - protected long getRoundUpAmount() { - return 0; + + public FloorDateExpression() { + } + + public FloorDateExpression(List children) { + super(children); + } + + public static Expression create(List children) throws SQLException { + Expression firstChild = children.get(0); + PDataType firstChildDataType = firstChild.getDataType(); + if ( + firstChildDataType == PTimestamp.INSTANCE || firstChildDataType == PUnsignedTimestamp.INSTANCE + ) { + // Coerce TIMESTAMP to DATE, as the nanos has no affect + List newChildren = Lists.newArrayListWithExpectedSize(children.size()); + newChildren.add(CoerceExpression.create(firstChild, + firstChildDataType == PTimestamp.INSTANCE ? PDate.INSTANCE : PUnsignedDate.INSTANCE)); + newChildren.addAll(children.subList(1, children.size())); + children = newChildren; } - - @Override - public String getName() { - return FloorFunction.NAME; + + Object timeUnitValue = ((LiteralExpression) children.get(1)).getValue(); + TimeUnit timeUnit = + TimeUnit.getTimeUnit(timeUnitValue != null ? timeUnitValue.toString() : null); + switch (timeUnit) { + case WEEK: + return new FloorWeekExpression(children); + case MONTH: + return new FloorMonthExpression(children); + case YEAR: + return new FloorYearExpression(children); + default: + return new FloorDateExpression(children); } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (children.get(0).evaluate(tuple, ptr)) { - if (ptr.getLength()==0) { - return true; - } - PDataType dataType = getDataType(); - long time = dataType.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); - long value = roundTime(time); - Date d = new Date(value); - byte[] byteValue = dataType.toBytes(d); - ptr.set(byteValue); - return true; - } - return false; + + } + + /** + * @param timeUnit - unit of time to round up to. Creates a {@link FloorDateExpression} with + * default multiplier of 1. + */ + public static Expression create(Expression expr, TimeUnit timeUnit) throws SQLException { + return create(expr, timeUnit, 1); + } + + /** + * @param timeUnit - unit of time to round up to + * @param multiplier - determines the roll up window size. Create a {@link FloorDateExpression}. + */ + public static Expression create(Expression expr, TimeUnit timeUnit, int multiplier) + throws SQLException { + Expression timeUnitExpr = getTimeUnitExpr(timeUnit); + Expression defaultMultiplierExpr = getMultiplierExpr(multiplier); + List expressions = Lists.newArrayList(expr, timeUnitExpr, defaultMultiplierExpr); + return create(expressions); + } + + @Override + protected long getRoundUpAmount() { + return 0; + } + + @Override + public String getName() { + return FloorFunction.NAME; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (children.get(0).evaluate(tuple, ptr)) { + if (ptr.getLength() == 0) { + return true; + } + PDataType dataType = getDataType(); + long time = dataType.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); + long value = roundTime(time); + Date d = new Date(value); + byte[] byteValue = dataType.toBytes(d); + ptr.set(byteValue); + return true; } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorDecimalExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorDecimalExpression.java index e1a9e368444..47eab72030d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorDecimalExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorDecimalExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,97 +25,91 @@ import org.apache.phoenix.expression.Determinism; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; +import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; +import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.types.PDecimal; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.parse.FunctionParseNode.Argument; -import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * - * Class encapsulating the FLOOR operation on - * a column/literal of type {@link org.apache.phoenix.schema.types.PDecimal}. - * - * + * Class encapsulating the FLOOR operation on a column/literal of type + * {@link org.apache.phoenix.schema.types.PDecimal}. * @since 3.0.0 */ @BuiltInFunction(name = FloorFunction.NAME, - args = { - @Argument(allowedTypes={PDecimal.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionClassType.DERIVED -) + args = { @Argument(allowedTypes = { PDecimal.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionClassType.DERIVED) public class FloorDecimalExpression extends RoundDecimalExpression { - public FloorDecimalExpression() {} + public FloorDecimalExpression() { + } - public FloorDecimalExpression(List children) { - super(children); - } + public FloorDecimalExpression(List children) { + super(children); + } - /** - * Creates a {@link FloorDecimalExpression} with rounding scale given by @param scale. - * - */ - public static Expression create(Expression expr, int scale) throws SQLException { - if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { - return expr; - } - Expression scaleExpr = LiteralExpression.newConstant(scale, PInteger.INSTANCE, Determinism.ALWAYS); - List expressions = Lists.newArrayList(expr, scaleExpr); - return new FloorDecimalExpression(expressions); + /** + * Creates a {@link FloorDecimalExpression} with rounding scale given by @param scale. + */ + public static Expression create(Expression expr, int scale) throws SQLException { + if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { + return expr; } + Expression scaleExpr = + LiteralExpression.newConstant(scale, PInteger.INSTANCE, Determinism.ALWAYS); + List expressions = Lists.newArrayList(expr, scaleExpr); + return new FloorDecimalExpression(expressions); + } - public static Expression create(List exprs) throws SQLException { - Expression expr = exprs.get(0); - if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { - return expr; - } - if (exprs.size() == 1) { - Expression scaleExpr = LiteralExpression.newConstant(0, PInteger.INSTANCE, Determinism.ALWAYS); - exprs = Lists.newArrayList(expr, scaleExpr); - } - return new FloorDecimalExpression(exprs); + public static Expression create(List exprs) throws SQLException { + Expression expr = exprs.get(0); + if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { + return expr; } - - /** - * Creates a {@link FloorDecimalExpression} with a default scale of 0 used for rounding. - * - */ - public static Expression create(Expression expr) throws SQLException { - return create(expr, 0); + if (exprs.size() == 1) { + Expression scaleExpr = + LiteralExpression.newConstant(0, PInteger.INSTANCE, Determinism.ALWAYS); + exprs = Lists.newArrayList(expr, scaleExpr); } + return new FloorDecimalExpression(exprs); + } - @Override - protected RoundingMode getRoundingMode() { - return RoundingMode.FLOOR; - } + /** + * Creates a {@link FloorDecimalExpression} with a default scale of 0 used for rounding. + */ + public static Expression create(Expression expr) throws SQLException { + return create(expr, 0); + } - @Override - public String getName() { - return FloorFunction.NAME; - } + @Override + protected RoundingMode getRoundingMode() { + return RoundingMode.FLOOR; + } + + @Override + public String getName() { + return FloorFunction.NAME; + } - /** - * {@inheritDoc } - */ - @Override - protected KeyRange getInputRangeProducing(BigDecimal result) { - if(!hasEnoughPrecisionToProduce(result)) { - throw new IllegalArgumentException("Cannot produce input range for decimal " + result - + ", not enough precision with scale " + getRoundingScale()); - } - byte[] lowerRange = PDecimal.INSTANCE.toBytes(result); - byte[] upperRange = PDecimal.INSTANCE.toBytes(stepNextInScale(result)); - return KeyRange.getKeyRange(lowerRange, upperRange); + /** + * {@inheritDoc } + */ + @Override + protected KeyRange getInputRangeProducing(BigDecimal result) { + if (!hasEnoughPrecisionToProduce(result)) { + throw new IllegalArgumentException("Cannot produce input range for decimal " + result + + ", not enough precision with scale " + getRoundingScale()); } + byte[] lowerRange = PDecimal.INSTANCE.toBytes(result); + byte[] upperRange = PDecimal.INSTANCE.toBytes(stepNextInScale(result)); + return KeyRange.getKeyRange(lowerRange, upperRange); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorFunction.java index 2072a4a2ca9..5c47f84201e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,32 +30,28 @@ import org.apache.phoenix.schema.types.PVarchar; /** - * * Base class for built-in FLOOR function. - * */ -@BuiltInFunction(name = FloorFunction.NAME, - nodeClass = FloorParseNode.class, - args = { - @Argument(allowedTypes={PTimestamp.class, PDecimal.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionParseNode.FunctionClassType.ABSTRACT, - derivedFunctions = {FloorDateExpression.class, FloorDecimalExpression.class} - ) +@BuiltInFunction(name = FloorFunction.NAME, nodeClass = FloorParseNode.class, + args = { @Argument(allowedTypes = { PTimestamp.class, PDecimal.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionParseNode.FunctionClassType.ABSTRACT, + derivedFunctions = { FloorDateExpression.class, FloorDecimalExpression.class }) public abstract class FloorFunction extends ScalarFunction { - - public static final String NAME = "FLOOR"; - - public FloorFunction() {} - - public FloorFunction(List children) { - super(children); - } - - @Override - public String getName() { - return NAME; - } + + public static final String NAME = "FLOOR"; + + public FloorFunction() { + } + + public FloorFunction(List children) { + super(children); + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorMonthExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorMonthExpression.java index 1d5733d8763..997765bacae 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorMonthExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorMonthExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,34 +24,33 @@ import org.joda.time.chrono.GJChronology; /** - * - * Floor function that rounds up the {@link DateTime} to start of month. + * Floor function that rounds up the {@link DateTime} to start of month. */ public class FloorMonthExpression extends RoundJodaDateExpression { - public FloorMonthExpression() { - super(); - } - - public FloorMonthExpression(List children) { - super(children); - } - - @Override - public long roundDateTime(DateTime datetime) { - return datetime.monthOfYear().roundFloorCopy().getMillis(); - } - - @Override - public long rangeLower(long time) { - // floor - return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); - } - - @Override - public long rangeUpper(long time) { - // ceil(time + 1) -1 - return (new DateTime(time + 1, GJChronology.getInstanceUTC())).monthOfYear() - .roundCeilingCopy().getMillis() - 1; - } + public FloorMonthExpression() { + super(); + } + + public FloorMonthExpression(List children) { + super(children); + } + + @Override + public long roundDateTime(DateTime datetime) { + return datetime.monthOfYear().roundFloorCopy().getMillis(); + } + + @Override + public long rangeLower(long time) { + // floor + return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); + } + + @Override + public long rangeUpper(long time) { + // ceil(time + 1) -1 + return (new DateTime(time + 1, GJChronology.getInstanceUTC())).monthOfYear().roundCeilingCopy() + .getMillis() - 1; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorWeekExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorWeekExpression.java index 73cd048ae71..9bf72afa8d8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorWeekExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorWeekExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,33 +24,33 @@ import org.joda.time.chrono.GJChronology; /** - * Floor function that rounds up the {@link DateTime} to start of week. + * Floor function that rounds up the {@link DateTime} to start of week. */ public class FloorWeekExpression extends RoundJodaDateExpression { - public FloorWeekExpression() { - super(); - } - - public FloorWeekExpression(List children) { - super(children); - } - - @Override - public long roundDateTime(DateTime datetime) { - return datetime.weekOfWeekyear().roundFloorCopy().getMillis(); - } - - @Override - public long rangeLower(long time) { - // floor - return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); - } - - @Override - public long rangeUpper(long time) { - // ceil(time + 1) -1 - return (new DateTime(time + 1, GJChronology.getInstanceUTC())).weekOfWeekyear() - .roundCeilingCopy().getMillis() - 1; - } + public FloorWeekExpression() { + super(); + } + + public FloorWeekExpression(List children) { + super(children); + } + + @Override + public long roundDateTime(DateTime datetime) { + return datetime.weekOfWeekyear().roundFloorCopy().getMillis(); + } + + @Override + public long rangeLower(long time) { + // floor + return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); + } + + @Override + public long rangeUpper(long time) { + // ceil(time + 1) -1 + return (new DateTime(time + 1, GJChronology.getInstanceUTC())).weekOfWeekyear() + .roundCeilingCopy().getMillis() - 1; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorYearExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorYearExpression.java index 77c926dfdc5..54c89631d9a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorYearExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FloorYearExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,34 +24,33 @@ import org.joda.time.chrono.GJChronology; /** - * - * Floor function that rounds up the {@link DateTime} to start of year. + * Floor function that rounds up the {@link DateTime} to start of year. */ public class FloorYearExpression extends RoundJodaDateExpression { - public FloorYearExpression() { - super(); - } - - public FloorYearExpression(List children) { - super(children); - } - - @Override - public long roundDateTime(DateTime datetime) { - return datetime.year().roundFloorCopy().getMillis(); - } - - @Override - public long rangeLower(long time) { - // floor - return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); - } - - @Override - public long rangeUpper(long time) { - // ceil(time + 1) -1 - return (new DateTime(time + 1, GJChronology.getInstanceUTC())).year().roundCeilingCopy() - .getMillis() - 1; - } + public FloorYearExpression() { + super(); + } + + public FloorYearExpression(List children) { + super(children); + } + + @Override + public long roundDateTime(DateTime datetime) { + return datetime.year().roundFloorCopy().getMillis(); + } + + @Override + public long rangeLower(long time) { + // floor + return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); + } + + @Override + public long rangeUpper(long time) { + // ceil(time + 1) -1 + return (new DateTime(time + 1, GJChronology.getInstanceUTC())).year().roundCeilingCopy() + .getMillis() - 1; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FunctionArgumentType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FunctionArgumentType.java index faa473e8625..572394118ee 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FunctionArgumentType.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FunctionArgumentType.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.function; import java.text.DecimalFormat; @@ -24,30 +23,30 @@ import org.apache.phoenix.util.DateUtil; public enum FunctionArgumentType { - TEMPORAL { - @Override - public Format getFormatter(String format) { - return DateUtil.getDateFormatter(format); - } - }, - NUMERIC { - @Override - public Format getFormatter(String format) { - return new DecimalFormat(format); - } - }, - CHAR { - @Override - public Format getFormatter(String format) { - return getDecimalFormat(format); - } - }; - - public abstract Format getFormatter(String format); - - private static DecimalFormat getDecimalFormat(String formatString) { - DecimalFormat result = new DecimalFormat(formatString); - result.setParseBigDecimal(true); - return result; + TEMPORAL { + @Override + public Format getFormatter(String format) { + return DateUtil.getDateFormatter(format); + } + }, + NUMERIC { + @Override + public Format getFormatter(String format) { + return new DecimalFormat(format); } + }, + CHAR { + @Override + public Format getFormatter(String format) { + return getDecimalFormat(format); + } + }; + + public abstract Format getFormatter(String format); + + private static DecimalFormat getDecimalFormat(String formatString) { + DecimalFormat result = new DecimalFormat(formatString); + result.setParseBigDecimal(true); + return result; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FunctionExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FunctionExpression.java index bc9fa9fec1a..7182e4c06b6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FunctionExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/FunctionExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,57 +23,54 @@ import org.apache.phoenix.expression.Expression; /** - * * Compiled representation of a built-in function - * - * * @since 0.1 */ public abstract class FunctionExpression extends BaseCompoundExpression { - public enum OrderPreserving {NO, YES_IF_LAST, YES; + public enum OrderPreserving { + NO, + YES_IF_LAST, + YES; public OrderPreserving combine(OrderPreserving that) { - if (that == null) { - return this; - } - return OrderPreserving.values()[Math.min(this.ordinal(), that.ordinal())]; - }}; - - public FunctionExpression() { - } - - public FunctionExpression(List children) { - super(children); - } - - /** - * Determines whether or not the result of the function invocation - * will be ordered in the same way as the input to the function. - * Returning YES enables an optimization to occur when a - * GROUP BY contains function invocations using the leading PK - * column(s). - * @return YES if the function invocation will always preserve order for - * the inputs versus the outputs and false otherwise, YES_IF_LAST if the - * function preserves order, but any further column reference would not - * continue to preserve order, and NO if the function does not preserve - * order. - */ - public OrderPreserving preservesOrder() { - return OrderPreserving.NO; + if (that == null) { + return this; + } + return OrderPreserving.values()[Math.min(this.ordinal(), that.ordinal())]; } + }; + + public FunctionExpression() { + } - abstract public String getName(); - - @Override - public String toString() { - StringBuilder buf = new StringBuilder(getName() + "("); - if (children.size()==0) - return buf.append(")").toString(); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + ", "); - } - buf.append(children.get(children.size()-1) + ")"); - return buf.toString(); + public FunctionExpression(List children) { + super(children); + } + + /** + * Determines whether or not the result of the function invocation will be ordered in the same way + * as the input to the function. Returning YES enables an optimization to occur when a GROUP BY + * contains function invocations using the leading PK column(s). + * @return YES if the function invocation will always preserve order for the inputs versus the + * outputs and false otherwise, YES_IF_LAST if the function preserves order, but any + * further column reference would not continue to preserve order, and NO if the function + * does not preserve order. + */ + public OrderPreserving preservesOrder() { + return OrderPreserving.NO; + } + + abstract public String getName(); + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(getName() + "("); + if (children.size() == 0) return buf.append(")").toString(); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + ", "); } - + buf.append(children.get(children.size() - 1) + ")"); + return buf.toString(); + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/GetBitFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/GetBitFunction.java index b6f2ac187a9..8538d70e79e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/GetBitFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/GetBitFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,66 +32,69 @@ import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarbinary; -@BuiltInFunction(name = GetBitFunction.NAME, args = { - @Argument(allowedTypes = { PBinary.class, PVarbinary.class }), - @Argument(allowedTypes = { PInteger.class }) }) +@BuiltInFunction(name = GetBitFunction.NAME, + args = { @Argument(allowedTypes = { PBinary.class, PVarbinary.class }), + @Argument(allowedTypes = { PInteger.class }) }) public class GetBitFunction extends PrefixFunction { - public static final String NAME = "GET_BIT"; + public static final String NAME = "GET_BIT"; - private Integer offsetPreCompute; + private Integer offsetPreCompute; - public GetBitFunction() { - } + public GetBitFunction() { + } - public GetBitFunction(List children) throws SQLException { - super(children); - init(); - } + public GetBitFunction(List children) throws SQLException { + super(children); + init(); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // get offset parameter - int offset; - if (offsetPreCompute == null) { - Expression offsetExpr = children.get(1); - if (!offsetExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength() == 0) return true; - offset = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); - } else offset = offsetPreCompute; - // get binary data parameter - Expression dataExpr = children.get(0); - if (!dataExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength() == 0) return true; - int len = ptr.getLength() * Byte.SIZE; - offset = (offset % len + len) % len; - // set result - ((PBinaryBase) dataExpr.getDataType()).getBit(ptr, dataExpr.getSortOrder(), offset, ptr); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // get offset parameter + int offset; + if (offsetPreCompute == null) { + Expression offsetExpr = children.get(1); + if (!offsetExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + offset = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); + } else offset = offsetPreCompute; + // get binary data parameter + Expression dataExpr = children.get(0); + if (!dataExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + int len = ptr.getLength() * Byte.SIZE; + offset = (offset % len + len) % len; + // set result + ((PBinaryBase) dataExpr.getDataType()).getBit(ptr, dataExpr.getSortOrder(), offset, ptr); + return true; + } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - private void init() { - Expression offsetExpr = children.get(1); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - if (offsetExpr.isStateless() && offsetExpr.getDeterminism() == Determinism.ALWAYS - && offsetExpr.evaluate(null, ptr)) { - offsetPreCompute = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); - } else offsetPreCompute = null; - } + private void init() { + Expression offsetExpr = children.get(1); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + if ( + offsetExpr.isStateless() && offsetExpr.getDeterminism() == Determinism.ALWAYS + && offsetExpr.evaluate(null, ptr) + ) { + offsetPreCompute = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); + } else offsetPreCompute = null; + } - @Override - public OrderPreserving preservesOrder() { - return (offsetPreCompute != null && offsetPreCompute == 0) ? OrderPreserving.YES_IF_LAST - : OrderPreserving.NO; - } + @Override + public OrderPreserving preservesOrder() { + return (offsetPreCompute != null && offsetPreCompute == 0) + ? OrderPreserving.YES_IF_LAST + : OrderPreserving.NO; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/GetByteFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/GetByteFunction.java index 44f61a8950e..db1b13cf9af 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/GetByteFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/GetByteFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,66 +32,69 @@ import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarbinary; -@BuiltInFunction(name = GetByteFunction.NAME, args = { - @Argument(allowedTypes = { PBinary.class, PVarbinary.class }), - @Argument(allowedTypes = { PInteger.class }) }) +@BuiltInFunction(name = GetByteFunction.NAME, + args = { @Argument(allowedTypes = { PBinary.class, PVarbinary.class }), + @Argument(allowedTypes = { PInteger.class }) }) public class GetByteFunction extends PrefixFunction { - public static final String NAME = "GET_BYTE"; + public static final String NAME = "GET_BYTE"; - private Integer offsetPreCompute; + private Integer offsetPreCompute; - public GetByteFunction() { - } + public GetByteFunction() { + } - public GetByteFunction(List children) throws SQLException { - super(children); - init(); - } + public GetByteFunction(List children) throws SQLException { + super(children); + init(); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // get offset parameter - int offset; - if (offsetPreCompute == null) { - Expression offsetExpr = children.get(1); - if (!offsetExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength() == 0) return true; - offset = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); - } else offset = offsetPreCompute; - // get binary data parameter - Expression dataExpr = children.get(0); - if (!dataExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength() == 0) return true; - int len = ptr.getLength(); - offset = (offset % len + len) % len; - // set result - ((PBinaryBase) dataExpr.getDataType()).getByte(ptr, dataExpr.getSortOrder(), offset, ptr); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // get offset parameter + int offset; + if (offsetPreCompute == null) { + Expression offsetExpr = children.get(1); + if (!offsetExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + offset = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); + } else offset = offsetPreCompute; + // get binary data parameter + Expression dataExpr = children.get(0); + if (!dataExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + int len = ptr.getLength(); + offset = (offset % len + len) % len; + // set result + ((PBinaryBase) dataExpr.getDataType()).getByte(ptr, dataExpr.getSortOrder(), offset, ptr); + return true; + } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - private void init() { - Expression offsetExpr = children.get(1); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - if (offsetExpr.isStateless() && offsetExpr.getDeterminism() == Determinism.ALWAYS - && offsetExpr.evaluate(null, ptr)) { - offsetPreCompute = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); - } else offsetPreCompute = null; - } + private void init() { + Expression offsetExpr = children.get(1); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + if ( + offsetExpr.isStateless() && offsetExpr.getDeterminism() == Determinism.ALWAYS + && offsetExpr.evaluate(null, ptr) + ) { + offsetPreCompute = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); + } else offsetPreCompute = null; + } - @Override - public OrderPreserving preservesOrder() { - return (offsetPreCompute != null && offsetPreCompute == 0) ? OrderPreserving.YES_IF_LAST - : OrderPreserving.NO; - } + @Override + public OrderPreserving preservesOrder() { + return (offsetPreCompute != null && offsetPreCompute == 0) + ? OrderPreserving.YES_IF_LAST + : OrderPreserving.NO; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/HourFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/HourFunction.java index f9ceb68637a..7a320b7af7f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/HourFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/HourFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,53 +32,51 @@ import org.joda.time.chrono.GJChronology; /** - * - * Implementation of the HOUR() buildin. Input Date/Timestamp/Time. - * Returns an integer from 0 to 23 representing the hour component of time - * + * Implementation of the HOUR() buildin. Input Date/Timestamp/Time. Returns an integer from 0 to 23 + * representing the hour component of time */ -@BuiltInFunction(name=HourFunction.NAME, -args={@Argument(allowedTypes={PTimestamp.class})}) +@BuiltInFunction(name = HourFunction.NAME, + args = { @Argument(allowedTypes = { PTimestamp.class }) }) public class HourFunction extends DateScalarFunction { - public static final String NAME = "HOUR"; + public static final String NAME = "HOUR"; - public HourFunction() { - } + public HourFunction() { + } - public HourFunction(List children) throws SQLException { - super(children); - } + public HourFunction(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getChildExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if ( ptr.getLength() == 0) { - return true; //means null - } - long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); - DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); - int hour = dt.getHourOfDay(); - PDataType returnType = getDataType(); - byte[] byteValue = new byte[returnType.getByteSize()]; - returnType.getCodec().encodeInt(hour, byteValue, 0); - ptr.set(byteValue); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getChildExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + return true; // means null } + long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); + DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); + int hour = dt.getHourOfDay(); + PDataType returnType = getDataType(); + byte[] byteValue = new byte[returnType.getByteSize()]; + returnType.getCodec().encodeInt(hour, byteValue, 0); + ptr.set(byteValue); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - private Expression getChildExpression() { - return children.get(0); - } + @Override + public String getName() { + return NAME; + } + + private Expression getChildExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/IndexStateNameFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/IndexStateNameFunction.java index d8440df3dc7..e64f63577f6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/IndexStateNameFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/IndexStateNameFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,61 +21,54 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; +import org.apache.phoenix.schema.PIndexState; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.PIndexState; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.tuple.Tuple; - /** - * - * Function used to get the index state name from the serialized byte value - * Usage: - * IndexStateName('a') - * will return 'ACTIVE' - * - * + * Function used to get the index state name from the serialized byte value Usage: + * IndexStateName('a') will return 'ACTIVE' * @since 2.1 */ -@BuiltInFunction(name=IndexStateNameFunction.NAME, args= { - @Argument(allowedTypes= PChar.class)} ) +@BuiltInFunction(name = IndexStateNameFunction.NAME, + args = { @Argument(allowedTypes = PChar.class) }) public class IndexStateNameFunction extends ScalarFunction { - public static final String NAME = "IndexStateName"; + public static final String NAME = "IndexStateName"; - public IndexStateNameFunction() { - } - - public IndexStateNameFunction(List children) throws SQLException { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression child = children.get(0); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - byte serializedByte = ptr.get()[ptr.getOffset()]; - PIndexState indexState = PIndexState.fromSerializedValue(serializedByte); - ptr.set(indexState.toBytes()); - return true; - } + public IndexStateNameFunction() { + } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + public IndexStateNameFunction(List children) throws SQLException { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression child = children.get(0); + if (!child.evaluate(tuple, ptr)) { + return false; } - - @Override - public String getName() { - return NAME; + if (ptr.getLength() == 0) { + return true; } + byte serializedByte = ptr.get()[ptr.getOffset()]; + PIndexState indexState = PIndexState.fromSerializedValue(serializedByte); + ptr.set(indexState.toBytes()); + return true; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java index e6b4c16317c..e0855b2bb22 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,97 +31,95 @@ import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarchar; -@BuiltInFunction(name=InstrFunction.NAME, args={ - @Argument(allowedTypes={ PVarchar.class }), - @Argument(allowedTypes={ PVarchar.class })}) -public class InstrFunction extends ScalarFunction{ - - public static final String NAME = "INSTR"; - - private String literalSourceStr = null; - private String literalSearchStr = null; - - public InstrFunction() { } - - public InstrFunction(List children) { - super(children); - init(); - } - - private void init() { - literalSourceStr = maybeExtractLiteralString(getChildren().get(0)); - literalSearchStr = maybeExtractLiteralString(getChildren().get(1)); - } +@BuiltInFunction(name = InstrFunction.NAME, args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }) }) +public class InstrFunction extends ScalarFunction { + + public static final String NAME = "INSTR"; + + private String literalSourceStr = null; + private String literalSearchStr = null; + + public InstrFunction() { + } - /** - * Extracts the string-representation of {@code expr} only if {@code expr} is a - * non-null {@link LiteralExpression}. - * - * @param expr An Expression. - * @return The string value for the expression or null - */ - private String maybeExtractLiteralString(Expression expr) { - if (expr instanceof LiteralExpression) { - // Whether the value is null or non-null, we can give it back right away - return (String) ((LiteralExpression) expr).getValue(); - } - return null; + public InstrFunction(List children) { + super(children); + init(); + } + + private void init() { + literalSourceStr = maybeExtractLiteralString(getChildren().get(0)); + literalSearchStr = maybeExtractLiteralString(getChildren().get(1)); + } + + /** + * Extracts the string-representation of {@code expr} only if {@code expr} is a non-null + * {@link LiteralExpression}. + * @param expr An Expression. + * @return The string value for the expression or null + */ + private String maybeExtractLiteralString(Expression expr) { + if (expr instanceof LiteralExpression) { + // Whether the value is null or non-null, we can give it back right away + return (String) ((LiteralExpression) expr).getValue(); } - - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - String sourceStr = literalSourceStr; - if (sourceStr == null) { - Expression child = getChildren().get(0); - - if (!child.evaluate(tuple, ptr)) { - return false; - } - - // We need something non-empty to search against - if (ptr.getLength() == 0) { - return true; - } - - sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, child.getSortOrder()); - } - - String searchStr = literalSearchStr; - // A literal was not provided, try to evaluate the expression to a literal - if (searchStr == null){ - Expression child = getChildren().get(1); - - if (!child.evaluate(tuple, ptr)) { - return false; - } - - // A null (or zero-length) search string - if (ptr.getLength() == 0) { - return true; - } - - searchStr = (String) PVarchar.INSTANCE.toObject(ptr, child.getSortOrder()); - } - - int position = sourceStr.indexOf(searchStr) + 1; - ptr.set(PInteger.INSTANCE.toBytes(position)); + return null; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + String sourceStr = literalSourceStr; + if (sourceStr == null) { + Expression child = getChildren().get(0); + + if (!child.evaluate(tuple, ptr)) { + return false; + } + + // We need something non-empty to search against + if (ptr.getLength() == 0) { return true; - } + } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, child.getSortOrder()); } - @Override - public String getName() { - return NAME; - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); + String searchStr = literalSearchStr; + // A literal was not provided, try to evaluate the expression to a literal + if (searchStr == null) { + Expression child = getChildren().get(1); + + if (!child.evaluate(tuple, ptr)) { + return false; + } + + // A null (or zero-length) search string + if (ptr.getLength() == 0) { + return true; + } + + searchStr = (String) PVarchar.INSTANCE.toObject(ptr, child.getSortOrder()); } + + int position = sourceStr.indexOf(searchStr) + 1; + ptr.set(PInteger.INSTANCE.toBytes(position)); + return true; + } + + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/InvertFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/InvertFunction.java index 47652f94e73..acadf8c2aa9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/InvertFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/InvertFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,118 +36,124 @@ @BuiltInFunction(name = InvertFunction.NAME, args = { @Argument() }) public class InvertFunction extends ScalarFunction { - public static final String NAME = "INVERT"; + public static final String NAME = "INVERT"; - public InvertFunction() throws SQLException {} + public InvertFunction() throws SQLException { + } - public InvertFunction(List children) throws SQLException { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getChildExpression().evaluate(tuple, ptr)) { return false; } - if (ptr.getLength() == 0) { return true; } - PDataType type = getDataType(); - // FIXME: losing rowKeyOrderOptimizable here - type.coerceBytes(ptr, type, getChildExpression().getSortOrder(), getSortOrder()); - return true; - } - - @Override - public SortOrder getSortOrder() { - return getChildExpression().getSortOrder() == SortOrder.ASC ? SortOrder.DESC : SortOrder.ASC; - } + public InvertFunction(List children) throws SQLException { + super(children); + } - @Override - public PDataType getDataType() { - return getChildExpression().getDataType(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getChildExpression().evaluate(tuple, ptr)) { + return false; } - - @Override - public Integer getMaxLength() { - return getChildExpression().getMaxLength(); + if (ptr.getLength() == 0) { + return true; } - - @Override - public boolean isNullable() { - return getChildExpression().isNullable(); + PDataType type = getDataType(); + // FIXME: losing rowKeyOrderOptimizable here + type.coerceBytes(ptr, type, getChildExpression().getSortOrder(), getSortOrder()); + return true; + } + + @Override + public SortOrder getSortOrder() { + return getChildExpression().getSortOrder() == SortOrder.ASC ? SortOrder.DESC : SortOrder.ASC; + } + + @Override + public PDataType getDataType() { + return getChildExpression().getDataType(); + } + + @Override + public Integer getMaxLength() { + return getChildExpression().getMaxLength(); + } + + @Override + public boolean isNullable() { + return getChildExpression().isNullable(); + } + + @Override + public String getName() { + return NAME; + } + + /** + * INVERT may be optimized through + */ + @Override + public int getKeyFormationTraversalIndex() { + return 0; + } + + /** + * Invert the childPart key range + */ + @Override + public KeyPart newKeyPart(final KeyPart childPart) { + return new InvertKeyPart(childPart); + } + + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } + + private Expression getChildExpression() { + return children.get(0); + } + + private static class InvertKeyPart implements KeyPart { + + private final KeyPart childPart; + + public InvertKeyPart(KeyPart childPart) { + this.childPart = childPart; } @Override - public String getName() { - return NAME; + public KeyRange getKeyRange(CompareOperator op, Expression rhs) { + KeyRange range = childPart.getKeyRange(op, rhs); + byte[] lower = range.getLowerRange(); + if (!range.lowerUnbound()) { + lower = SortOrder.invert(lower, 0, lower.length); + } + byte[] upper; + if (range.isSingleKey()) { + upper = lower; + } else { + upper = range.getUpperRange(); + if (!range.upperUnbound()) { + upper = SortOrder.invert(upper, 0, upper.length); + } + } + range = + KeyRange.getKeyRange(lower, range.isLowerInclusive(), upper, range.isUpperInclusive()); + if (getColumn().getSortOrder() == SortOrder.DESC) { + range = range.invert(); + } + return range; } - /** - * INVERT may be optimized through - */ @Override - public int getKeyFormationTraversalIndex() { - return 0; + public Set getExtractNodes() { + return childPart.getExtractNodes(); } - /** - * Invert the childPart key range - */ @Override - public KeyPart newKeyPart(final KeyPart childPart) { - return new InvertKeyPart(childPart); + public PColumn getColumn() { + return childPart.getColumn(); } @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } - - private Expression getChildExpression() { - return children.get(0); - } - - private static class InvertKeyPart implements KeyPart { - - private final KeyPart childPart; - - public InvertKeyPart(KeyPart childPart) { - this.childPart = childPart; - } - - @Override - public KeyRange getKeyRange(CompareOperator op, Expression rhs) { - KeyRange range = childPart.getKeyRange(op, rhs); - byte[] lower = range.getLowerRange(); - if (!range.lowerUnbound()) { - lower = SortOrder.invert(lower, 0, lower.length); - } - byte[] upper; - if (range.isSingleKey()) { - upper = lower; - } else { - upper = range.getUpperRange(); - if (!range.upperUnbound()) { - upper = SortOrder.invert(upper, 0, upper.length); - } - } - range = KeyRange.getKeyRange(lower, range.isLowerInclusive(), upper, range.isUpperInclusive()); - if (getColumn().getSortOrder() == SortOrder.DESC) { - range = range.invert(); - } - return range; - } - - @Override - public Set getExtractNodes() { - return childPart.getExtractNodes(); - } - - @Override - public PColumn getColumn() { - return childPart.getColumn(); - } - - @Override - public PTable getTable() { - return childPart.getTable(); - } + public PTable getTable() { + return childPart.getTable(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java index 733f6fc7e08..3756705b30f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,39 +30,39 @@ public abstract class JavaMathOneArgumentFunction extends ScalarFunction { - public JavaMathOneArgumentFunction() { - } + public JavaMathOneArgumentFunction() { + } - public JavaMathOneArgumentFunction(List children) throws SQLException { - super(children); - } + public JavaMathOneArgumentFunction(List children) throws SQLException { + super(children); + } - protected abstract double compute(double firstArg); + protected abstract double compute(double firstArg); - static double getArg(Expression exp, ImmutableBytesWritable ptr) { - if (exp.getDataType() == PDecimal.INSTANCE) { - return ((BigDecimal) exp.getDataType().toObject(ptr, exp.getSortOrder())).doubleValue(); - } else { - return exp.getDataType().getCodec().decodeDouble(ptr, exp.getSortOrder()); - } + static double getArg(Expression exp, ImmutableBytesWritable ptr) { + if (exp.getDataType() == PDecimal.INSTANCE) { + return ((BigDecimal) exp.getDataType().toObject(ptr, exp.getSortOrder())).doubleValue(); + } else { + return exp.getDataType().getCodec().decodeDouble(ptr, exp.getSortOrder()); } + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - PDataType returnType = getDataType(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + PDataType returnType = getDataType(); - Expression arg1Expr = children.get(0); - if (!arg1Expr.evaluate(tuple, ptr)) return false; - if (ptr.getLength() == 0) return true; - double arg1 = getArg(arg1Expr, ptr); + Expression arg1Expr = children.get(0); + if (!arg1Expr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + double arg1 = getArg(arg1Expr, ptr); - ptr.set(new byte[returnType.getByteSize()]); - returnType.getCodec().encodeDouble(compute(arg1), ptr); - return true; - } + ptr.set(new byte[returnType.getByteSize()]); + returnType.getCodec().encodeDouble(compute(arg1), ptr); + return true; + } - @Override - public PDataType getDataType() { - return PDouble.INSTANCE; - } + @Override + public PDataType getDataType() { + return PDouble.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JavaMathTwoArgumentFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JavaMathTwoArgumentFunction.java index 0d857973926..59f68457cb2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JavaMathTwoArgumentFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JavaMathTwoArgumentFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,41 +29,41 @@ public abstract class JavaMathTwoArgumentFunction extends ScalarFunction { - public JavaMathTwoArgumentFunction() { - } - - public JavaMathTwoArgumentFunction(List children) throws SQLException { - super(children); - } + public JavaMathTwoArgumentFunction() { + } - protected abstract double compute(double firstArg, double secondArg); + public JavaMathTwoArgumentFunction(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - PDataType returnType = getDataType(); + protected abstract double compute(double firstArg, double secondArg); - Expression arg1Expr = children.get(0); - if (!arg1Expr.evaluate(tuple, ptr)) return false; - if (ptr.getLength() == 0) return true; - double arg1 = JavaMathOneArgumentFunction.getArg(arg1Expr, ptr); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + PDataType returnType = getDataType(); - Expression arg2Expr = (children.size() <= 1) ? null : children.get(1); - double arg2; - if (arg2Expr != null && !arg2Expr.evaluate(tuple, ptr)) return false; - if (arg2Expr == null || ptr.getLength() == 0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } else { - arg2 = JavaMathOneArgumentFunction.getArg(arg2Expr, ptr); - } + Expression arg1Expr = children.get(0); + if (!arg1Expr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + double arg1 = JavaMathOneArgumentFunction.getArg(arg1Expr, ptr); - ptr.set(new byte[returnType.getByteSize()]); - returnType.getCodec().encodeDouble(compute(arg1, arg2), ptr); - return true; + Expression arg2Expr = (children.size() <= 1) ? null : children.get(1); + double arg2; + if (arg2Expr != null && !arg2Expr.evaluate(tuple, ptr)) return false; + if (arg2Expr == null || ptr.getLength() == 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; + } else { + arg2 = JavaMathOneArgumentFunction.getArg(arg2Expr, ptr); } - @Override - public PDataType getDataType() { - return PDouble.INSTANCE; - } + ptr.set(new byte[returnType.getByteSize()]); + returnType.getCodec().encodeDouble(compute(arg1, arg2), ptr); + return true; + } + + @Override + public PDataType getDataType() { + return PDouble.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonExistsFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonExistsFunction.java index 5b82361eeb4..9bd42402386 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonExistsFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonExistsFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.expression.function; +import java.util.List; + import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode; @@ -31,80 +33,76 @@ import org.apache.phoenix.util.json.JsonDataFormat; import org.apache.phoenix.util.json.JsonDataFormatFactory; -import java.util.List; - /** * Built-in function for JSON_EXISTS JSON_EXISTS(, ) JSON_EXISTS * determines whether a JSON value satisfies a search criterion. */ @FunctionParseNode.BuiltInFunction(name = JsonExistsFunction.NAME, - nodeClass = JsonExistsParseNode.class, - args = { @FunctionParseNode.Argument(allowedTypes = { PJson.class, PVarbinary.class }), - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }) + nodeClass = JsonExistsParseNode.class, + args = { @FunctionParseNode.Argument(allowedTypes = { PJson.class, PVarbinary.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }) public class JsonExistsFunction extends ScalarFunction { - public static final String NAME = "JSON_EXISTS"; - private final JsonDataFormat - jsonDataFormat = - JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); + public static final String NAME = "JSON_EXISTS"; + private final JsonDataFormat jsonDataFormat = + JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); - // This is called from ExpressionType newInstance - public JsonExistsFunction() { + // This is called from ExpressionType newInstance + public JsonExistsFunction() { - } + } - public JsonExistsFunction(List children) { - super(children); - Preconditions.checkNotNull(getJSONPathExpr()); - } + public JsonExistsFunction(List children) { + super(children); + Preconditions.checkNotNull(getJSONPathExpr()); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getColValExpr().evaluate(tuple, ptr)) { - return false; - } - if (ptr == null || ptr.getLength() == 0) { - return false; - } - - // Column name or JSON string - Object top = PJson.INSTANCE.toObject(ptr, getColValExpr().getSortOrder()); - - if (!getJSONPathExpr().evaluate(tuple, ptr)) { - return false; - } - - if (ptr.getLength() == 0) { - return false; - } - - String - jsonPathExprStr = - (String) PVarchar.INSTANCE.toObject(ptr, getJSONPathExpr().getSortOrder()); - if (jsonPathExprStr == null) { - return false; - } - - boolean isPathValid = jsonDataFormat.isPathValid(top, jsonPathExprStr); - ptr.set(PBoolean.INSTANCE.toBytes(isPathValid)); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getColValExpr().evaluate(tuple, ptr)) { + return false; } + if (ptr == null || ptr.getLength() == 0) { + return false; + } + + // Column name or JSON string + Object top = PJson.INSTANCE.toObject(ptr, getColValExpr().getSortOrder()); - private Expression getColValExpr() { - return getChildren().get(0); + if (!getJSONPathExpr().evaluate(tuple, ptr)) { + return false; } - private Expression getJSONPathExpr() { - return getChildren().get(1); + if (ptr.getLength() == 0) { + return false; } - @Override - public PDataType getDataType() { - return PBoolean.INSTANCE; + String jsonPathExprStr = + (String) PVarchar.INSTANCE.toObject(ptr, getJSONPathExpr().getSortOrder()); + if (jsonPathExprStr == null) { + return false; } + + boolean isPathValid = jsonDataFormat.isPathValid(top, jsonPathExprStr); + ptr.set(PBoolean.INSTANCE.toBytes(isPathValid)); + return true; + } + + private Expression getColValExpr() { + return getChildren().get(0); + } + + private Expression getJSONPathExpr() { + return getChildren().get(1); + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonModifyFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonModifyFunction.java index a66f67be440..b78c90da569 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonModifyFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonModifyFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.expression.function; +import java.nio.ByteBuffer; +import java.util.List; + import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode; @@ -29,92 +32,87 @@ import org.apache.phoenix.util.json.JsonDataFormat; import org.apache.phoenix.util.json.JsonDataFormatFactory; -import java.nio.ByteBuffer; -import java.util.List; - /** * Built-in function for JSON_MODIFY JSON_MODIFY(, [returning * ], newValue) Updates the value of a property in a JSON string and returns the updated JSON * string. */ @FunctionParseNode.BuiltInFunction(name = JsonModifyFunction.NAME, - nodeClass = JsonModifyParseNode.class, - args = { @FunctionParseNode.Argument(allowedTypes = { PJson.class, PVarchar.class }), - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }) + nodeClass = JsonModifyParseNode.class, + args = { @FunctionParseNode.Argument(allowedTypes = { PJson.class, PVarchar.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }) public class JsonModifyFunction extends ScalarFunction { - public static final String NAME = "JSON_MODIFY"; - private final JsonDataFormat - jsonDataFormat = - JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); + public static final String NAME = "JSON_MODIFY"; + private final JsonDataFormat jsonDataFormat = + JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); - // This is called from ExpressionType newInstance - public JsonModifyFunction() { + // This is called from ExpressionType newInstance + public JsonModifyFunction() { - } + } - public JsonModifyFunction(List children) { - super(children); - Preconditions.checkNotNull(getJSONPathExpr()); - } + public JsonModifyFunction(List children) { + super(children); + Preconditions.checkNotNull(getJSONPathExpr()); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getColValExpr().evaluate(tuple, ptr)) { - return false; - } - if (ptr == null || ptr.getLength() == 0) { - return false; - } - - // Column name or JSON string - Object top = PJson.INSTANCE.toObject(ptr, getColValExpr().getSortOrder()); - - if (!getJSONPathExpr().evaluate(tuple, ptr)) { - return false; - } - - if (ptr.getLength() == 0) { - return false; - } - - String - jsonPathExprStr = - (String) PVarchar.INSTANCE.toObject(ptr, getJSONPathExpr().getSortOrder()); - if (jsonPathExprStr == null) { - return false; - } - - if (!getNewValueExpr().evaluate(tuple, ptr)) { - return false; - } - - String newVal = (String) PVarchar.INSTANCE.toObject(ptr, getNewValueExpr().getSortOrder()); - ByteBuffer buffer = jsonDataFormat.updateValue(top, jsonPathExprStr, newVal); - ptr.set(buffer.array(), buffer.arrayOffset(), buffer.limit()); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getColValExpr().evaluate(tuple, ptr)) { + return false; } + if (ptr == null || ptr.getLength() == 0) { + return false; + } + + // Column name or JSON string + Object top = PJson.INSTANCE.toObject(ptr, getColValExpr().getSortOrder()); - private Expression getNewValueExpr() { - return getChildren().get(2); + if (!getJSONPathExpr().evaluate(tuple, ptr)) { + return false; } - private Expression getColValExpr() { - return getChildren().get(0); + if (ptr.getLength() == 0) { + return false; } - private Expression getJSONPathExpr() { - return getChildren().get(1); + String jsonPathExprStr = + (String) PVarchar.INSTANCE.toObject(ptr, getJSONPathExpr().getSortOrder()); + if (jsonPathExprStr == null) { + return false; } - @Override - public PDataType getDataType() { - return PJson.INSTANCE; + if (!getNewValueExpr().evaluate(tuple, ptr)) { + return false; } -} \ No newline at end of file + + String newVal = (String) PVarchar.INSTANCE.toObject(ptr, getNewValueExpr().getSortOrder()); + ByteBuffer buffer = jsonDataFormat.updateValue(top, jsonPathExprStr, newVal); + ptr.set(buffer.array(), buffer.arrayOffset(), buffer.limit()); + return true; + } + + private Expression getNewValueExpr() { + return getChildren().get(2); + } + + private Expression getColValExpr() { + return getChildren().get(0); + } + + private Expression getJSONPathExpr() { + return getChildren().get(1); + } + + @Override + public PDataType getDataType() { + return PJson.INSTANCE; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonQueryFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonQueryFunction.java index a2a3d00e42b..555403a8969 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonQueryFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonQueryFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.expression.function; +import java.sql.Types; +import java.util.List; + import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode; @@ -30,91 +33,86 @@ import org.apache.phoenix.util.json.JsonDataFormat; import org.apache.phoenix.util.json.JsonDataFormatFactory; -import java.sql.Types; -import java.util.List; - /** * Built-in function for JSON_QUERY JSON_QUERY(, [returning * ]) Extracts an object or an array from a JSON string. */ @FunctionParseNode.BuiltInFunction(name = JsonQueryFunction.NAME, - nodeClass = JsonQueryParseNode.class, - args = { @FunctionParseNode.Argument(allowedTypes = { PJson.class, PVarbinary.class }), - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }) + nodeClass = JsonQueryParseNode.class, + args = { @FunctionParseNode.Argument(allowedTypes = { PJson.class, PVarbinary.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }) public class JsonQueryFunction extends ScalarFunction { - public static final String NAME = "JSON_QUERY"; - private final JsonDataFormat - jsonDataFormat = - JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); + public static final String NAME = "JSON_QUERY"; + private final JsonDataFormat jsonDataFormat = + JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); - // This is called from ExpressionType newInstance - public JsonQueryFunction() { + // This is called from ExpressionType newInstance + public JsonQueryFunction() { - } + } - public JsonQueryFunction(List children) { - super(children); - Preconditions.checkNotNull(getJSONPathExpr()); - } + public JsonQueryFunction(List children) { + super(children); + Preconditions.checkNotNull(getJSONPathExpr()); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getColValExpr().evaluate(tuple, ptr)) { - return false; - } - if (ptr == null || ptr.getLength() == 0) { - return false; - } - - // Column name or JSON string - Object top = PJson.INSTANCE.toObject(ptr, getColValExpr().getSortOrder()); - - if (!getJSONPathExpr().evaluate(tuple, ptr)) { - return false; - } - - if (ptr.getLength() == 0) { - return false; - } - - String - jsonPathExprStr = - (String) PVarchar.INSTANCE.toObject(ptr, getJSONPathExpr().getSortOrder()); - if (jsonPathExprStr == null) { - return false; - } - Object value = jsonDataFormat.getValue(top, jsonPathExprStr); - int valueType = jsonDataFormat.getValueType(top, jsonPathExprStr); - if (value != null) { - switch (valueType) { - case Types.ARRAY: - case Types.NVARCHAR: - ptr.set(PVarchar.INSTANCE.toBytes(value)); - break; - default: - return false; - } - } - - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getColValExpr().evaluate(tuple, ptr)) { + return false; + } + if (ptr == null || ptr.getLength() == 0) { + return false; } - private Expression getColValExpr() { - return getChildren().get(0); + // Column name or JSON string + Object top = PJson.INSTANCE.toObject(ptr, getColValExpr().getSortOrder()); + + if (!getJSONPathExpr().evaluate(tuple, ptr)) { + return false; } - private Expression getJSONPathExpr() { - return getChildren().get(1); + if (ptr.getLength() == 0) { + return false; } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + String jsonPathExprStr = + (String) PVarchar.INSTANCE.toObject(ptr, getJSONPathExpr().getSortOrder()); + if (jsonPathExprStr == null) { + return false; + } + Object value = jsonDataFormat.getValue(top, jsonPathExprStr); + int valueType = jsonDataFormat.getValueType(top, jsonPathExprStr); + if (value != null) { + switch (valueType) { + case Types.ARRAY: + case Types.NVARCHAR: + ptr.set(PVarchar.INSTANCE.toBytes(value)); + break; + default: + return false; + } } + + return true; + } + + private Expression getColValExpr() { + return getChildren().get(0); + } + + private Expression getJSONPathExpr() { + return getChildren().get(1); + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonValueFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonValueFunction.java index 941666841e1..d06673f03c1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonValueFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/JsonValueFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.expression.function; +import java.sql.Types; +import java.util.List; + import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode; @@ -31,9 +34,6 @@ import org.apache.phoenix.util.json.JsonDataFormat; import org.apache.phoenix.util.json.JsonDataFormatFactory; -import java.sql.Types; -import java.util.List; - /** * Built-in function for JSON_VALUE JSON_VALUE(, [returning * ]) Extracts a scalar JSON value—everything except object and array—and returns it as a @@ -41,92 +41,90 @@ * JSON_VALUE returns a string. */ @FunctionParseNode.BuiltInFunction(name = JsonValueFunction.NAME, - nodeClass = JsonValueParseNode.class, - args = { @FunctionParseNode.Argument(allowedTypes = { PJson.class, PBson.class, - PVarbinary.class }), - @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }) + nodeClass = JsonValueParseNode.class, + args = { + @FunctionParseNode.Argument(allowedTypes = { PJson.class, PBson.class, PVarbinary.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }) public class JsonValueFunction extends ScalarFunction { - public static final String NAME = "JSON_VALUE"; - private final JsonDataFormat - jsonDataFormat = - JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); + public static final String NAME = "JSON_VALUE"; + private final JsonDataFormat jsonDataFormat = + JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); - // This is called from ExpressionType newInstance - public JsonValueFunction() { + // This is called from ExpressionType newInstance + public JsonValueFunction() { - } + } - public JsonValueFunction(List children) { - super(children); - Preconditions.checkNotNull(getJSONPathExpr()); - } + public JsonValueFunction(List children) { + super(children); + Preconditions.checkNotNull(getJSONPathExpr()); + } - @Override - public String getName() { - return NAME; + @Override + public String getName() { + return NAME; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getColValExpr().evaluate(tuple, ptr)) { + return false; + } + if (ptr == null || ptr.getLength() == 0) { + return false; } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getColValExpr().evaluate(tuple, ptr)) { - return false; - } - if (ptr == null || ptr.getLength() == 0) { - return false; - } - - // Column name or JSON string - Object top = PJson.INSTANCE.toObject(ptr, getColValExpr().getSortOrder()); - - if (!getJSONPathExpr().evaluate(tuple, ptr)) { - return false; - } - - if (ptr.getLength() == 0) { - return false; - } - - String - jsonPathExprStr = - (String) PVarchar.INSTANCE.toObject(ptr, getJSONPathExpr().getSortOrder()); - if (jsonPathExprStr == null) { - return false; - } - - Object value = jsonDataFormat.getValue(top, jsonPathExprStr); - int valueType = jsonDataFormat.getValueType(top, jsonPathExprStr); - if (value != null) { - switch (valueType) { - case Types.INTEGER: - case Types.BOOLEAN: - case Types.DOUBLE: - case Types.VARCHAR: - case Types.BIGINT: - case Types.BINARY: - case Types.DATE: - ptr.set(PVarchar.INSTANCE.toBytes(String.valueOf(value))); - break; - default: - return false; - } - } else { - ptr.set(PVarchar.INSTANCE.toBytes(null)); - } - - return true; + // Column name or JSON string + Object top = PJson.INSTANCE.toObject(ptr, getColValExpr().getSortOrder()); + + if (!getJSONPathExpr().evaluate(tuple, ptr)) { + return false; } - private Expression getColValExpr() { - return getChildren().get(0); + if (ptr.getLength() == 0) { + return false; } - private Expression getJSONPathExpr() { - return getChildren().get(1); + String jsonPathExprStr = + (String) PVarchar.INSTANCE.toObject(ptr, getJSONPathExpr().getSortOrder()); + if (jsonPathExprStr == null) { + return false; } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + Object value = jsonDataFormat.getValue(top, jsonPathExprStr); + int valueType = jsonDataFormat.getValueType(top, jsonPathExprStr); + if (value != null) { + switch (valueType) { + case Types.INTEGER: + case Types.BOOLEAN: + case Types.DOUBLE: + case Types.VARCHAR: + case Types.BIGINT: + case Types.BINARY: + case Types.DATE: + ptr.set(PVarchar.INSTANCE.toBytes(String.valueOf(value))); + break; + default: + return false; + } + } else { + ptr.set(PVarchar.INSTANCE.toBytes(null)); } + + return true; + } + + private Expression getColValExpr() { + return getChildren().get(0); + } + + private Expression getJSONPathExpr() { + return getChildren().get(1); + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LTrimFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LTrimFunction.java index fb2706cab13..dcb40af94dc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LTrimFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LTrimFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,83 +24,78 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.StringUtil; - /** - * * Implementation of the {@code LTrim() } build-in function. It removes from the left end of - * {@code } space character and other function bytes in single byte utf8 characters - * set. - * - * + * {@code } space character and other function bytes in single byte utf8 characters set. * @since 0.1 */ -@BuiltInFunction(name=LTrimFunction.NAME, args={ - @Argument(allowedTypes={PVarchar.class})}) +@BuiltInFunction(name = LTrimFunction.NAME, args = { @Argument(allowedTypes = { PVarchar.class }) }) public class LTrimFunction extends ScalarFunction { - public static final String NAME = "LTRIM"; + public static final String NAME = "LTRIM"; - public LTrimFunction() { } + public LTrimFunction() { + } - public LTrimFunction(List children) throws SQLException { - super(children); - } + public LTrimFunction(List children) throws SQLException { + super(children); + } - private Expression getStringExpression() { - return children.get(0); - } + private Expression getStringExpression() { + return children.get(0); + } - @Override - public SortOrder getSortOrder() { - return children.get(0).getSortOrder(); - } + @Override + public SortOrder getSortOrder() { + return children.get(0).getSortOrder(); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // Starting from the front of the byte, look for all single bytes at the end of the string - // that is below SPACE_UTF8 (space and control characters) or 0x7f (control chars). - if (!getStringExpression().evaluate(tuple, ptr)) { - return false; - } - - if (ptr.getLength() == 0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - byte[] string = ptr.get(); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - - SortOrder sortOrder = getStringExpression().getSortOrder(); - int i = StringUtil.getFirstNonBlankCharIdxFromStart(string, offset, length, sortOrder); - if (i == offset + length) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - - ptr.set(string, i, offset + length - i); - return true; - } - - @Override - public Integer getMaxLength() { - return getStringExpression().getMaxLength(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // Starting from the front of the byte, look for all single bytes at the end of the string + // that is below SPACE_UTF8 (space and control characters) or 0x7f (control chars). + if (!getStringExpression().evaluate(tuple, ptr)) { + return false; } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + if (ptr.getLength() == 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } + byte[] string = ptr.get(); + int offset = ptr.getOffset(); + int length = ptr.getLength(); - @Override - public String getName() { - return NAME; + SortOrder sortOrder = getStringExpression().getSortOrder(); + int i = StringUtil.getFirstNonBlankCharIdxFromStart(string, offset, length, sortOrder); + if (i == offset + length) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } + ptr.set(string, i, offset + length - i); + return true; + } + + @Override + public Integer getMaxLength() { + return getStringExpression().getMaxLength(); + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LastValueFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LastValueFunction.java index dd2a4b67302..bc3d620e480 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LastValueFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LastValueFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,52 +30,53 @@ import org.apache.phoenix.schema.types.PBoolean; /** - * Built-in function for {@code LAST_VALUE() WITHIN GROUP (ORDER BY ASC/DESC) aggregate } + * Built-in function for + * {@code LAST_VALUE() WITHIN GROUP (ORDER BY ASC/DESC) aggregate } * function - * */ -@FunctionParseNode.BuiltInFunction(name = LastValueFunction.NAME, nodeClass = LastValueAggregateParseNode.class, args = { - @FunctionParseNode.Argument(), - @FunctionParseNode.Argument(allowedTypes = { PBoolean.class}, isConstant = true), - @FunctionParseNode.Argument()}) +@FunctionParseNode.BuiltInFunction(name = LastValueFunction.NAME, + nodeClass = LastValueAggregateParseNode.class, + args = { @FunctionParseNode.Argument(), + @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, isConstant = true), + @FunctionParseNode.Argument() }) public class LastValueFunction extends FirstLastValueBaseFunction { - public static final String NAME = "LAST_VALUE"; + public static final String NAME = "LAST_VALUE"; - public LastValueFunction() { - } + public LastValueFunction() { + } - public LastValueFunction(List childExpressions) { - this(childExpressions, null); - } + public LastValueFunction(List childExpressions) { + this(childExpressions, null); + } - public LastValueFunction(List childExpressions, CountAggregateFunction delegate) { - super(childExpressions, delegate); - } + public LastValueFunction(List childExpressions, CountAggregateFunction delegate) { + super(childExpressions, delegate); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); + @Override + public Aggregator newServerAggregator(Configuration conf) { + FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); - //invert order for LAST_BY function cause it is inverted version of FIRST_BY - boolean order = !(Boolean) ((LiteralExpression) children.get(1)).getValue(); - aggregator.init(children, order, 0); + // invert order for LAST_BY function cause it is inverted version of FIRST_BY + boolean order = !(Boolean) ((LiteralExpression) children.get(1)).getValue(); + aggregator.init(children, order, 0); - return aggregator; - } + return aggregator; + } - @Override - public Aggregator newClientAggregator() { + @Override + public Aggregator newClientAggregator() { - FirstLastValueBaseClientAggregator aggregator = new FirstLastValueBaseClientAggregator(); - aggregator.init(0, false); + FirstLastValueBaseClientAggregator aggregator = new FirstLastValueBaseClientAggregator(); + aggregator.init(0, false); - return aggregator; - } + return aggregator; + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LastValuesFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LastValuesFunction.java index 21cb0824fb8..c08cd41c466 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LastValuesFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LastValuesFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,65 +33,68 @@ import org.apache.phoenix.schema.types.PInteger; /** - * Built-in function for {@code FIRST_VALUES(, ) WITHIN GROUP (ORDER BY ASC/DESC) aggregate } + * Built-in function for + * {@code FIRST_VALUES(, ) WITHIN GROUP (ORDER BY ASC/DESC) aggregate } * function - * */ -@FunctionParseNode.BuiltInFunction(name = LastValuesFunction.NAME, nodeClass = LastValuesAggregateParseNode.class, args = { - @FunctionParseNode.Argument(), - @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, isConstant = true), - @FunctionParseNode.Argument(), - @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, isConstant = true)}) +@FunctionParseNode.BuiltInFunction(name = LastValuesFunction.NAME, + nodeClass = LastValuesAggregateParseNode.class, + args = { @FunctionParseNode.Argument(), + @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, isConstant = true), + @FunctionParseNode.Argument(), + @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, isConstant = true) }) public class LastValuesFunction extends FirstLastValueBaseFunction { - public static final String NAME = "LAST_VALUES"; - private int offset; - - public LastValuesFunction() { - } + public static final String NAME = "LAST_VALUES"; + private int offset; - public LastValuesFunction(List childExpressions) { - this(childExpressions, null); - } + public LastValuesFunction() { + } - public LastValuesFunction(List childExpressions, CountAggregateFunction delegate) { - super(childExpressions, delegate); - } + public LastValuesFunction(List childExpressions) { + this(childExpressions, null); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); + public LastValuesFunction(List childExpressions, CountAggregateFunction delegate) { + super(childExpressions, delegate); + } - offset = ((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(); - boolean order = !(Boolean) ((LiteralExpression) children.get(1)).getValue(); + @Override + public Aggregator newServerAggregator(Configuration conf) { + FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); - aggregator.init(children, order, offset); + offset = ((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(); + boolean order = !(Boolean) ((LiteralExpression) children.get(1)).getValue(); - return aggregator; - } + aggregator.init(children, order, offset); - @Override - public Aggregator newClientAggregator() { - FirstLastValueBaseClientAggregator aggregator = new FirstLastValueBaseClientAggregator(getDataType()); + return aggregator; + } - if (children.size() < 3) { - aggregator.init(offset, true); - } else { - aggregator.init(((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(), true); - } + @Override + public Aggregator newClientAggregator() { + FirstLastValueBaseClientAggregator aggregator = + new FirstLastValueBaseClientAggregator(getDataType()); - return aggregator; + if (children.size() < 3) { + aggregator.init(offset, true); + } else { + aggregator.init(((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(), true); } - @Override - public String getName() { - return NAME; - } + return aggregator; + } + + @Override + public String getName() { + return NAME; + } - @Override - public PDataType getDataType() { - if (children.size() < 3) { - return null; - } - return PDataType.fromTypeId(children.get(2).getDataType().getSqlType() + PArrayDataType.ARRAY_TYPE_BASE); + @Override + public PDataType getDataType() { + if (children.size() < 3) { + return null; } + return PDataType + .fromTypeId(children.get(2).getDataType().getSqlType() + PArrayDataType.ARRAY_TYPE_BASE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LengthFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LengthFunction.java index e69b45a1ae9..6064a783260 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LengthFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LengthFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,72 +25,70 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PChar; -import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.StringUtil; - /** - * - * Implementation of the {@code LENGTH() } build-in function. {@code } is the string - * of characters we want to find the length of. If {@code } is NULL or empty, null + * Implementation of the {@code LENGTH() } build-in function. {@code } is the + * string of characters we want to find the length of. If {@code } is NULL or empty, null * is returned. - * - * * @since 0.1 */ -@BuiltInFunction(name=LengthFunction.NAME, args={ - @Argument(allowedTypes={ PVarchar.class })} ) +@BuiltInFunction(name = LengthFunction.NAME, + args = { @Argument(allowedTypes = { PVarchar.class }) }) public class LengthFunction extends ScalarFunction { - public static final String NAME = "LENGTH"; + public static final String NAME = "LENGTH"; - public LengthFunction() { } + public LengthFunction() { + } - public LengthFunction(List children) throws SQLException { - super(children); - } + public LengthFunction(List children) throws SQLException { + super(children); + } - private Expression getStringExpression() { - return children.get(0); - } + private Expression getStringExpression() { + return children.get(0); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression child = getStringExpression(); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - int len; - if (child.getDataType() == PChar.INSTANCE) { - // Only single-byte characters allowed in CHAR - len = ptr.getLength(); - } else { - try { - len = StringUtil.calculateUTF8Length(ptr.get(), ptr.getOffset(), ptr.getLength(), child.getSortOrder()); - } catch (UndecodableByteException e) { - return false; - } - } - ptr.set(PInteger.INSTANCE.toBytes(len)); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression child = getStringExpression(); + if (!child.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } - - @Override - public String getName() { - return NAME; + int len; + if (child.getDataType() == PChar.INSTANCE) { + // Only single-byte characters allowed in CHAR + len = ptr.getLength(); + } else { + try { + len = StringUtil.calculateUTF8Length(ptr.get(), ptr.getOffset(), ptr.getLength(), + child.getSortOrder()); + } catch (UndecodableByteException e) { + return false; + } } + ptr.set(PInteger.INSTANCE.toBytes(len)); + return true; + } + + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LnFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LnFunction.java index 4275336573f..928aef745c7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LnFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LnFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,30 +26,31 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; -@BuiltInFunction(name = LnFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) +@BuiltInFunction(name = LnFunction.NAME, + args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) public class LnFunction extends JavaMathOneArgumentFunction { - public static final String NAME = "LN"; + public static final String NAME = "LN"; - public LnFunction() { - } + public LnFunction() { + } - public LnFunction(List children) throws SQLException { - super(children); - } + public LnFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - protected double compute(double firstArg) { - return Math.log(firstArg); - } + @Override + protected double compute(double firstArg) { + return Math.log(firstArg); + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LogFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LogFunction.java index 87b9a795aa1..3def57136e9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LogFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LogFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,31 +26,32 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; -@BuiltInFunction(name = LogFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }), - @Argument(allowedTypes = { PDouble.class, PDecimal.class }, defaultValue = "1e1") }) +@BuiltInFunction(name = LogFunction.NAME, + args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }), + @Argument(allowedTypes = { PDouble.class, PDecimal.class }, defaultValue = "1e1") }) public class LogFunction extends JavaMathTwoArgumentFunction { - public static final String NAME = "LOG"; + public static final String NAME = "LOG"; - public LogFunction() { - } + public LogFunction() { + } - public LogFunction(List children) throws SQLException { - super(children); - } + public LogFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - protected double compute(double firstArg, double secondArg) { - return Math.log(firstArg) / Math.log(secondArg); - } + @Override + protected double compute(double firstArg, double secondArg) { + return Math.log(firstArg) / Math.log(secondArg); + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LowerFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LowerFunction.java index 264ebfbb791..23624d055ba 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LowerFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LowerFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.function; import java.io.DataInput; @@ -32,77 +31,78 @@ import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.i18n.LocaleUtils; -@FunctionParseNode.BuiltInFunction(name=LowerFunction.NAME, args={ - @FunctionParseNode.Argument(allowedTypes={PVarchar.class}), - @FunctionParseNode.Argument(allowedTypes={PVarchar.class}, defaultValue="null", isConstant=true)} ) +@FunctionParseNode.BuiltInFunction(name = LowerFunction.NAME, + args = { @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }, defaultValue = "null", + isConstant = true) }) public class LowerFunction extends ScalarFunction { - public static final String NAME = "LOWER"; + public static final String NAME = "LOWER"; - private Locale locale = null; + private Locale locale = null; - public LowerFunction() { - } + public LowerFunction() { + } - public LowerFunction(List children) throws SQLException { - super(children); - initialize(); - } + public LowerFunction(List children) throws SQLException { + super(children); + initialize(); + } - private void initialize() { - if (children.size() > 1) { - String localeISOCode = getLiteralValue(1, String.class); - locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode); - } + private void initialize() { + if (children.size() > 1) { + String localeISOCode = getLiteralValue(1, String.class); + locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode); } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - initialize(); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + initialize(); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getStrExpression().evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return true; } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getStrExpression().evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength()==0) { - return true; - } + String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, getStrExpression().getSortOrder()); - String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, getStrExpression().getSortOrder()); + if (sourceStr == null) { + return true; + } - if (sourceStr == null) { - return true; - } + String resultStr = locale == null ? sourceStr.toLowerCase() : sourceStr.toLowerCase(locale); - String resultStr = locale == null ? sourceStr.toLowerCase() : sourceStr.toLowerCase(locale); + ptr.set(PVarchar.INSTANCE.toBytes(resultStr)); + return true; + } - ptr.set(PVarchar.INSTANCE.toBytes(resultStr)); - return true; - } + @Override + public PDataType getDataType() { + return getStrExpression().getDataType(); + } - @Override - public PDataType getDataType() { - return getStrExpression().getDataType(); - } - - @Override - public Integer getMaxLength() { - return getStrExpression().getMaxLength(); - } + @Override + public Integer getMaxLength() { + return getStrExpression().getMaxLength(); + } - @Override - public boolean isNullable() { - return getStrExpression().isNullable(); - } + @Override + public boolean isNullable() { + return getStrExpression().isNullable(); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - private Expression getStrExpression() { - return children.get(0); - } + private Expression getStrExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LpadFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LpadFunction.java index 6f0f2629703..f499e902457 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LpadFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/LpadFunction.java @@ -1,18 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.expression.function; @@ -32,168 +33,161 @@ import org.apache.phoenix.util.StringUtil; /** - * Implementation of LPAD(input string, length int [, fill string]) - * - * Fills up the input to length (number of characters) by prepending characters in fill (space by default). If the input - * is already longer than length then it is truncated on the right. + * Implementation of LPAD(input string, length int [, fill string]) Fills up the input to length + * (number of characters) by prepending characters in fill (space by default). If the input is + * already longer than length then it is truncated on the right. */ -@BuiltInFunction(name = LpadFunction.NAME, args = { @Argument(allowedTypes = { PVarchar.class }), - @Argument(allowedTypes = { PInteger.class }), - @Argument(allowedTypes = { PVarchar.class }, defaultValue = "' '") }) +@BuiltInFunction(name = LpadFunction.NAME, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PInteger.class }), + @Argument(allowedTypes = { PVarchar.class }, defaultValue = "' '") }) public class LpadFunction extends ScalarFunction { - public static final String NAME = "LPAD"; - - public LpadFunction() { + public static final String NAME = "LPAD"; + + public LpadFunction() { + } + + public LpadFunction(List children) { + super(children); + } + + /** + * Helper function to get the utf8 length of CHAR or VARCHAR points to the string sortOrder of the + * string whether the string is of char type + * @return utf8 length of the string + */ + private int getUTF8Length(ImmutableBytesWritable ptr, SortOrder sortOrder, boolean isCharType) { + return isCharType + ? ptr.getLength() + : StringUtil.calculateUTF8Length(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); + } + + /** + * Helper function to get the byte length of a utf8 encoded string points to the string sortOrder + * of the string whether the string is of char type + * @return byte length of the string + */ + private int getSubstringByteLength(ImmutableBytesWritable ptr, int length, SortOrder sortOrder, + boolean isCharType) { + return isCharType + ? length + : StringUtil.getByteLengthForUtf8SubStr(ptr.get(), ptr.getOffset(), length, sortOrder); + } + + /** + * Left pads a string with with the given fill expression. + */ + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression outputStrLenExpr = getOutputStrLenExpr(); + if (!outputStrLenExpr.evaluate(tuple, ptr)) { + return false; } - - public LpadFunction(List children) { - super(children); + if (ptr.getLength() == 0) { + return true; } - - /** - * Helper function to get the utf8 length of CHAR or VARCHAR - * - * @param ptr - * points to the string - * @param sortOrder - * sortOrder of the string - * @param isCharType - * whether the string is of char type - * @return utf8 length of the string - */ - private int getUTF8Length(ImmutableBytesWritable ptr, SortOrder sortOrder, boolean isCharType) { - return isCharType ? ptr.getLength() : StringUtil.calculateUTF8Length(ptr.get(), ptr.getOffset(), - ptr.getLength(), sortOrder); + int outputStrLen = + outputStrLenExpr.getDataType().getCodec().decodeInt(ptr, outputStrLenExpr.getSortOrder()); + if (outputStrLen < 0) { + return false; } - /** - * Helper function to get the byte length of a utf8 encoded string - * - * @param ptr - * points to the string - * @param sortOrder - * sortOrder of the string - * @param isCharType - * whether the string is of char type - * @return byte length of the string - */ - private int getSubstringByteLength(ImmutableBytesWritable ptr, int length, SortOrder sortOrder, boolean isCharType) { - return isCharType ? length : StringUtil.getByteLengthForUtf8SubStr(ptr.get(), ptr.getOffset(), length, - sortOrder); + Expression strExp = getStrExpr(); + if (!strExp.evaluate(tuple, ptr)) { + return false; } - /** - * Left pads a string with with the given fill expression. - */ - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression outputStrLenExpr = getOutputStrLenExpr(); - if (!outputStrLenExpr.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength()==0) { - return true; - } - int outputStrLen = outputStrLenExpr.getDataType().getCodec().decodeInt(ptr, outputStrLenExpr.getSortOrder()); - if (outputStrLen < 0) { - return false; - } - - Expression strExp = getStrExpr(); - if (!strExp.evaluate(tuple, ptr)) { - return false; - } - - boolean isStrCharType = getStrExpr().getDataType() == PChar.INSTANCE; - boolean isFillCharType = getFillExpr().getDataType() == PChar.INSTANCE; - SortOrder strSortOrder = getStrExpr().getSortOrder(); - SortOrder fillSortOrder = getFillExpr().getSortOrder(); - int inputStrLen = getUTF8Length(ptr, strSortOrder, isStrCharType); - - if (outputStrLen == inputStrLen) { - // nothing to do - return true; - } - if (outputStrLen < inputStrLen) { - // truncate the string from the right - int subStrByteLength = getSubstringByteLength(ptr, outputStrLen, strSortOrder, isStrCharType); - ptr.set(ptr.get(), ptr.getOffset(), subStrByteLength); - return true; - } - - // left pad the input string with the fill chars - Expression fillExpr = getFillExpr(); - ImmutableBytesWritable fillPtr = new ImmutableBytesWritable(); - if (!fillExpr.evaluate(tuple, fillPtr)) { - return false; - } - if (fillPtr.getLength()==0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - - // if the padding to be added is not a multiple of the length of the - // fill string then we need to use part of the fill string to pad - // LPAD(ab, 5, xy) = xyxab - // padLen = 3 - // numFillsPrepended = 1 - // numFillCharsPrepended = 1 - - // length of the fill string - int fillLen = getUTF8Length(fillPtr, fillSortOrder, isFillCharType); - // length of padding to be added - int padLen = outputStrLen - inputStrLen; - // number of fill strings to be prepended - int numFillsPrepended = padLen / fillLen; - // number of chars from fill string to be prepended - int numFillCharsPrepended = padLen % fillLen; - - // byte length of the input string - int strByteLength = ptr.getLength(); - // byte length of the fill string - int fillByteLength = getSubstringByteLength(fillPtr, fillPtr.getLength(), fillSortOrder, isFillCharType); - // byte length of the full fills to be prepended - int fullFillsByteLength = numFillsPrepended * fillByteLength; - // byte length of the chars of fill string to be prepended - int fillCharsByteLength = getSubstringByteLength(fillPtr, numFillCharsPrepended, fillSortOrder, isFillCharType); - // byte length of the padded string = - int strWithPaddingByteLength = fullFillsByteLength + fillCharsByteLength + strByteLength; - - // need to invert the fill string if the sort order of fill and - // input are different - boolean invertFill = fillSortOrder != strSortOrder; - byte[] paddedStr = - StringUtil.lpad(ptr.get(), ptr.getOffset(), ptr.getLength(), fillPtr.get(), fillPtr.getOffset(), - fillPtr.getLength(), invertFill, strWithPaddingByteLength); - ptr.set(paddedStr); - return true; - } + boolean isStrCharType = getStrExpr().getDataType() == PChar.INSTANCE; + boolean isFillCharType = getFillExpr().getDataType() == PChar.INSTANCE; + SortOrder strSortOrder = getStrExpr().getSortOrder(); + SortOrder fillSortOrder = getFillExpr().getSortOrder(); + int inputStrLen = getUTF8Length(ptr, strSortOrder, isStrCharType); - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + if (outputStrLen == inputStrLen) { + // nothing to do + return true; } - - @Override - public String getName() { - return NAME; + if (outputStrLen < inputStrLen) { + // truncate the string from the right + int subStrByteLength = getSubstringByteLength(ptr, outputStrLen, strSortOrder, isStrCharType); + ptr.set(ptr.get(), ptr.getOffset(), subStrByteLength); + return true; } - @Override - public SortOrder getSortOrder() { - return getStrExpr().getSortOrder(); + // left pad the input string with the fill chars + Expression fillExpr = getFillExpr(); + ImmutableBytesWritable fillPtr = new ImmutableBytesWritable(); + if (!fillExpr.evaluate(tuple, fillPtr)) { + return false; } - - private Expression getStrExpr() { - return children.get(0); - } - - private Expression getFillExpr() { - return children.get(2); - } - - private Expression getOutputStrLenExpr() { - return children.get(1); + if (fillPtr.getLength() == 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } -} \ No newline at end of file + // if the padding to be added is not a multiple of the length of the + // fill string then we need to use part of the fill string to pad + // LPAD(ab, 5, xy) = xyxab + // padLen = 3 + // numFillsPrepended = 1 + // numFillCharsPrepended = 1 + + // length of the fill string + int fillLen = getUTF8Length(fillPtr, fillSortOrder, isFillCharType); + // length of padding to be added + int padLen = outputStrLen - inputStrLen; + // number of fill strings to be prepended + int numFillsPrepended = padLen / fillLen; + // number of chars from fill string to be prepended + int numFillCharsPrepended = padLen % fillLen; + + // byte length of the input string + int strByteLength = ptr.getLength(); + // byte length of the fill string + int fillByteLength = + getSubstringByteLength(fillPtr, fillPtr.getLength(), fillSortOrder, isFillCharType); + // byte length of the full fills to be prepended + int fullFillsByteLength = numFillsPrepended * fillByteLength; + // byte length of the chars of fill string to be prepended + int fillCharsByteLength = + getSubstringByteLength(fillPtr, numFillCharsPrepended, fillSortOrder, isFillCharType); + // byte length of the padded string = + int strWithPaddingByteLength = fullFillsByteLength + fillCharsByteLength + strByteLength; + + // need to invert the fill string if the sort order of fill and + // input are different + boolean invertFill = fillSortOrder != strSortOrder; + byte[] paddedStr = StringUtil.lpad(ptr.get(), ptr.getOffset(), ptr.getLength(), fillPtr.get(), + fillPtr.getOffset(), fillPtr.getLength(), invertFill, strWithPaddingByteLength); + ptr.set(paddedStr); + return true; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public SortOrder getSortOrder() { + return getStrExpr().getSortOrder(); + } + + private Expression getStrExpr() { + return children.get(0); + } + + private Expression getFillExpr() { + return children.get(2); + } + + private Expression getOutputStrLenExpr() { + return children.get(1); + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MD5Function.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MD5Function.java index 0ed301029ae..bf32b0167fb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MD5Function.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MD5Function.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,61 +32,65 @@ @BuiltInFunction(name = MD5Function.NAME, args = { @Argument() }) public class MD5Function extends ScalarFunction { - public static final String NAME = "MD5"; - public static final Integer LENGTH = 16; + public static final String NAME = "MD5"; + public static final Integer LENGTH = 16; - private final MessageDigest messageDigest; + private final MessageDigest messageDigest; - public MD5Function() throws SQLException { - try { - messageDigest = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new SQLException(e); - } + public MD5Function() throws SQLException { + try { + messageDigest = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new SQLException(e); } + } - public MD5Function(List children) throws SQLException { - super(children); - try { - messageDigest = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } + public MD5Function(List children) throws SQLException { + super(children); + try { + messageDigest = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); } + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getChildExpression().evaluate(tuple, ptr)) { return false; } - if (ptr.getLength()==0) { return true; } - - // Update the digest value - messageDigest.update(ptr.get(), ptr.getOffset(), ptr.getLength()); - // Get the digest bytes (note this resets the messageDigest as well) - ptr.set(messageDigest.digest()); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getChildExpression().evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PBinary.INSTANCE; + if (ptr.getLength() == 0) { + return true; } - @Override - public Integer getMaxLength() { - return LENGTH; - } + // Update the digest value + messageDigest.update(ptr.get(), ptr.getOffset(), ptr.getLength()); + // Get the digest bytes (note this resets the messageDigest as well) + ptr.set(messageDigest.digest()); + return true; + } - @Override - public boolean isNullable() { - return getChildExpression().isNullable(); - } + @Override + public PDataType getDataType() { + return PBinary.INSTANCE; + } - @Override - public String getName() { - return NAME; - } + @Override + public Integer getMaxLength() { + return LENGTH; + } - private Expression getChildExpression() { - return children.get(0); - } + @Override + public boolean isNullable() { + return getChildExpression().isNullable(); + } + + @Override + public String getName() { + return NAME; + } + + private Expression getChildExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MathPIFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MathPIFunction.java index 1e276ed3ebe..6b81c2b4d0c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MathPIFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MathPIFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,39 +27,36 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDouble; - /** - * - * Function used to represent PI() - * The function returns a {@link org.apache.phoenix.schema.types.PDouble} - * + * Function used to represent PI() The function returns a + * {@link org.apache.phoenix.schema.types.PDouble} */ -@BuiltInFunction(name = MathPIFunction.NAME, args= {} -) +@BuiltInFunction(name = MathPIFunction.NAME, args = {}) public class MathPIFunction extends ScalarFunction { - public static final String NAME = "PI"; + public static final String NAME = "PI"; - public MathPIFunction() {} + public MathPIFunction() { + } - public MathPIFunction(List children) throws SQLException { - super(children); - } + public MathPIFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - ptr.set(new byte[getDataType().getByteSize()]); - getDataType().getCodec().encodeDouble(Math.PI, ptr); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + ptr.set(new byte[getDataType().getByteSize()]); + getDataType().getCodec().encodeDouble(Math.PI, ptr); + return true; + } - @Override - public PDataType getDataType() { - return PDouble.INSTANCE; - } + @Override + public PDataType getDataType() { + return PDouble.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MaxAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MaxAggregateFunction.java index 63252d7836e..4d1727e4264 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MaxAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MaxAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,54 +29,51 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PDataType; - - /** * Built-in function for finding MAX. - * - * * @since 0.1 */ -@BuiltInFunction(name=MaxAggregateFunction.NAME, nodeClass=MaxAggregateParseNode.class, args= {@Argument()} ) +@BuiltInFunction(name = MaxAggregateFunction.NAME, nodeClass = MaxAggregateParseNode.class, + args = { @Argument() }) public class MaxAggregateFunction extends MinAggregateFunction { - public static final String NAME = "MAX"; + public static final String NAME = "MAX"; + + public MaxAggregateFunction() { + } + + public MaxAggregateFunction(List childExpressions) { + this(childExpressions, null); + } + + public MaxAggregateFunction(List childExpressions, CountAggregateFunction delegate) { + super(childExpressions, delegate); + } - public MaxAggregateFunction() { - } + @Override + public Aggregator newServerAggregator(Configuration conf) { + Expression child = getAggregatorExpression(); + final PDataType type = child.getDataType(); + final Integer maxLength = child.getMaxLength(); + return new MaxAggregator(child.getSortOrder()) { + @Override + public PDataType getDataType() { + return type; + } - public MaxAggregateFunction(List childExpressions) { - this(childExpressions, null); - } - - public MaxAggregateFunction(List childExpressions, CountAggregateFunction delegate) { - super(childExpressions, delegate); - } + @Override + public Integer getMaxLength() { + return maxLength; + } + }; + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - Expression child = getAggregatorExpression(); - final PDataType type = child.getDataType(); - final Integer maxLength = child.getMaxLength(); - return new MaxAggregator(child.getSortOrder()) { - @Override - public PDataType getDataType() { - return type; - } + @Override + public String getName() { + return NAME; + } - @Override - public Integer getMaxLength() { - return maxLength; - } - }; - } - - @Override - public String getName() { - return NAME; - } - - @Override - public SortOrder getSortOrder() { - return getAggregatorExpression().getSortOrder(); - } + @Override + public SortOrder getSortOrder() { + return getAggregatorExpression().getSortOrder(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MinAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MinAggregateFunction.java index b26a8862b74..2c4138df8d3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MinAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MinAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,68 +28,66 @@ import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.MinAggregateParseNode; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; - - +import org.apache.phoenix.schema.types.PDataType; /** * Built-in function for finding MIN. - * - * * @since 0.1 */ -@BuiltInFunction(name=MinAggregateFunction.NAME, nodeClass=MinAggregateParseNode.class, args= {@Argument()} ) +@BuiltInFunction(name = MinAggregateFunction.NAME, nodeClass = MinAggregateParseNode.class, + args = { @Argument() }) public class MinAggregateFunction extends DelegateConstantToCountAggregateFunction { - public static final String NAME = "MIN"; + public static final String NAME = "MIN"; - public MinAggregateFunction() { - } + public MinAggregateFunction() { + } - public MinAggregateFunction(List childExpressions) { - super(childExpressions, null); - } - - public MinAggregateFunction(List childExpressions, CountAggregateFunction delegate) { - super(childExpressions, delegate); - } + public MinAggregateFunction(List childExpressions) { + super(childExpressions, null); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - boolean wasEvaluated = super.evaluate(tuple, ptr); - if (!wasEvaluated) { - return false; - } - if (isConstantExpression()) { - getAggregatorExpression().evaluate(tuple, ptr); - } - return true; - } + public MinAggregateFunction(List childExpressions, CountAggregateFunction delegate) { + super(childExpressions, delegate); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - Expression child = getAggregatorExpression(); - final PDataType type = child.getDataType(); - final Integer maxLength = child.getMaxLength(); - return new MinAggregator(child.getSortOrder()) { - @Override - public PDataType getDataType() { - return type; - } - @Override - public Integer getMaxLength() { - return maxLength; - } - }; - } - - @Override - public SortOrder getSortOrder() { - return getAggregatorExpression().getSortOrder(); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + boolean wasEvaluated = super.evaluate(tuple, ptr); + if (!wasEvaluated) { + return false; } - - @Override - public String getName() { - return NAME; + if (isConstantExpression()) { + getAggregatorExpression().evaluate(tuple, ptr); } + return true; + } + + @Override + public Aggregator newServerAggregator(Configuration conf) { + Expression child = getAggregatorExpression(); + final PDataType type = child.getDataType(); + final Integer maxLength = child.getMaxLength(); + return new MinAggregator(child.getSortOrder()) { + @Override + public PDataType getDataType() { + return type; + } + + @Override + public Integer getMaxLength() { + return maxLength; + } + }; + } + + @Override + public SortOrder getSortOrder() { + return getAggregatorExpression().getSortOrder(); + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MinuteFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MinuteFunction.java index 1ba672984db..d7ccf184d47 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MinuteFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MinuteFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,53 +32,51 @@ import org.joda.time.chrono.GJChronology; /** - * - * Implementation of the Minute() buildin. Input Date/Timestamp/Time. - * Returns an integer from 0 to 59 representing the minute component of time - * + * Implementation of the Minute() buildin. Input Date/Timestamp/Time. Returns an integer from 0 to + * 59 representing the minute component of time */ -@BuiltInFunction(name=MinuteFunction.NAME, -args={@Argument(allowedTypes={PTimestamp.class})}) +@BuiltInFunction(name = MinuteFunction.NAME, + args = { @Argument(allowedTypes = { PTimestamp.class }) }) public class MinuteFunction extends DateScalarFunction { - public static final String NAME = "MINUTE"; + public static final String NAME = "MINUTE"; - public MinuteFunction() { - } + public MinuteFunction() { + } - public MinuteFunction(List children) throws SQLException { - super(children); - } + public MinuteFunction(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getChildExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if ( ptr.getLength() == 0) { - return true; //means null - } - long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); - DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); - int minute = dt.getMinuteOfHour(); - PDataType returnType = getDataType(); - byte[] byteValue = new byte[returnType.getByteSize()]; - returnType.getCodec().encodeInt(minute, byteValue, 0); - ptr.set(byteValue); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getChildExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + return true; // means null } + long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); + DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); + int minute = dt.getMinuteOfHour(); + PDataType returnType = getDataType(); + byte[] byteValue = new byte[returnType.getByteSize()]; + returnType.getCodec().encodeInt(minute, byteValue, 0); + ptr.set(byteValue); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - private Expression getChildExpression() { - return children.get(0); - } + @Override + public String getName() { + return NAME; + } + + private Expression getChildExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MonthFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MonthFunction.java index cb6ce3b5dbe..1e0572efa15 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MonthFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/MonthFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,53 +32,51 @@ import org.joda.time.chrono.GJChronology; /** - * - * Implementation of the Month() buildin. Input Date/Timestamp/Time. - * Returns an integer from 1 to 12 representing the month omponent of date - * + * Implementation of the Month() buildin. Input Date/Timestamp/Time. Returns an integer from 1 to 12 + * representing the month omponent of date */ -@BuiltInFunction(name=MonthFunction.NAME, -args={@Argument(allowedTypes={PTimestamp.class})}) +@BuiltInFunction(name = MonthFunction.NAME, + args = { @Argument(allowedTypes = { PTimestamp.class }) }) public class MonthFunction extends DateScalarFunction { - public static final String NAME = "MONTH"; + public static final String NAME = "MONTH"; - public MonthFunction() { - } + public MonthFunction() { + } - public MonthFunction(List children) throws SQLException { - super(children); - } + public MonthFunction(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getChildExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if ( ptr.getLength() == 0) { - return true; //means null - } - long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); - DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); - int month = dt.getMonthOfYear(); - PDataType returnType = getDataType(); - byte[] byteValue = new byte[returnType.getByteSize()]; - returnType.getCodec().encodeInt(month, byteValue, 0); - ptr.set(byteValue); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getChildExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + return true; // means null } + long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); + DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); + int month = dt.getMonthOfYear(); + PDataType returnType = getDataType(); + byte[] byteValue = new byte[returnType.getByteSize()]; + returnType.getCodec().encodeInt(month, byteValue, 0); + ptr.set(byteValue); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - private Expression getChildExpression() { - return children.get(0); - } + @Override + public String getName() { + return NAME; + } + + private Expression getChildExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/NowFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/NowFunction.java index 3b7aebbc49d..889664ec1ce 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/NowFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/NowFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,34 +20,31 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.CurrentDateParseNode; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; - /** - * - * Function used to represent NOW() - * The function returns a {@link org.apache.phoenix.schema.types.PTimestamp} - * + * Function used to represent NOW() The function returns a + * {@link org.apache.phoenix.schema.types.PTimestamp} */ -@BuiltInFunction(name = NowFunction.NAME, - nodeClass=CurrentDateParseNode.class, args= {}, classType = FunctionClassType.ALIAS, derivedFunctions = {CurrentDateFunction.class}) +@BuiltInFunction(name = NowFunction.NAME, nodeClass = CurrentDateParseNode.class, args = {}, + classType = FunctionClassType.ALIAS, derivedFunctions = { CurrentDateFunction.class }) public abstract class NowFunction extends ScalarFunction { - - public static final String NAME = "NOW"; - - public NowFunction() {} - - public NowFunction(List children) throws SQLException { - super(children); - } - - @Override - public String getName() { - return NAME; - } - + + public static final String NAME = "NOW"; + + public NowFunction() { + } + + public NowFunction(List children) throws SQLException { + super(children); + } + + @Override + public String getName() { + return NAME; + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/NthValueFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/NthValueFunction.java index 08f6b6e1594..6072a165836 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/NthValueFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/NthValueFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,59 +31,61 @@ import org.apache.phoenix.schema.types.PInteger; /** - * Built-in function for {@code NTH_VALUE(, ) WITHIN GROUP (ORDER BY ASC/DESC) } + * Built-in function for + * {@code NTH_VALUE(, ) WITHIN GROUP (ORDER BY ASC/DESC) } * aggregate function - * */ -@FunctionParseNode.BuiltInFunction(name = NthValueFunction.NAME, nodeClass = NthValueAggregateParseNode.class, args = { - @FunctionParseNode.Argument(), - @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, isConstant = true), - @FunctionParseNode.Argument(), - @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, isConstant = true)}) +@FunctionParseNode.BuiltInFunction(name = NthValueFunction.NAME, + nodeClass = NthValueAggregateParseNode.class, + args = { @FunctionParseNode.Argument(), + @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, isConstant = true), + @FunctionParseNode.Argument(), + @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, isConstant = true) }) public class NthValueFunction extends FirstLastValueBaseFunction { - public static final String NAME = "NTH_VALUE"; - private int offset; - - public NthValueFunction() { - } + public static final String NAME = "NTH_VALUE"; + private int offset; - public NthValueFunction(List childExpressions) { - this(childExpressions, null); - } + public NthValueFunction() { + } - public NthValueFunction(List childExpressions, CountAggregateFunction delegate) { - super(childExpressions, delegate); - } + public NthValueFunction(List childExpressions) { + this(childExpressions, null); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); + public NthValueFunction(List childExpressions, CountAggregateFunction delegate) { + super(childExpressions, delegate); + } - offset = ((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(); - boolean order = (Boolean) ((LiteralExpression) children.get(1)).getValue(); + @Override + public Aggregator newServerAggregator(Configuration conf) { + FirstLastValueServerAggregator aggregator = new FirstLastValueServerAggregator(); - aggregator.init(children, order, offset); + offset = ((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(); + boolean order = (Boolean) ((LiteralExpression) children.get(1)).getValue(); - return aggregator; - } + aggregator.init(children, order, offset); - @Override - public Aggregator newClientAggregator() { - FirstLastValueBaseClientAggregator aggregator = new FirstLastValueBaseClientAggregator(); + return aggregator; + } - if (children.size() < 3) { - aggregator.init(offset, false); - } else { - aggregator.init(((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(), false); - } + @Override + public Aggregator newClientAggregator() { + FirstLastValueBaseClientAggregator aggregator = new FirstLastValueBaseClientAggregator(); - return aggregator; + if (children.size() < 3) { + aggregator.init(offset, false); + } else { + aggregator.init(((Number) ((LiteralExpression) children.get(3)).getValue()).intValue(), + false); } - @Override - public String getName() { - return NAME; - } + return aggregator; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/OctetLengthFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/OctetLengthFunction.java index 7372c80cec9..39f5dcec727 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/OctetLengthFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/OctetLengthFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,37 +31,37 @@ import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarbinary; -@BuiltInFunction(name = OctetLengthFunction.NAME, args = { @Argument(allowedTypes = { - PBinary.class, PVarbinary.class }), }) +@BuiltInFunction(name = OctetLengthFunction.NAME, + args = { @Argument(allowedTypes = { PBinary.class, PVarbinary.class }), }) public class OctetLengthFunction extends ScalarFunction { - public static final String NAME = "OCTET_LENGTH"; + public static final String NAME = "OCTET_LENGTH"; - public OctetLengthFunction() { - } + public OctetLengthFunction() { + } - public OctetLengthFunction(List children) throws SQLException { - super(children); - } + public OctetLengthFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // get binary data parameter - Expression dataExpr = children.get(0); - if (!dataExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength()==0) return true; - // set result - ((PBinaryBase) dataExpr.getDataType()).octetLength(ptr, dataExpr.getSortOrder(), ptr); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // get binary data parameter + Expression dataExpr = children.get(0); + if (!dataExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + // set result + ((PBinaryBase) dataExpr.getDataType()).octetLength(ptr, dataExpr.getSortOrder(), ptr); + return true; + } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentRankAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentRankAggregateFunction.java index d56129fac31..4e0d8d72713 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentRankAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentRankAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.DistinctValueWithCountClientAggregator; @@ -28,47 +27,45 @@ import org.apache.phoenix.expression.aggregator.PercentRankClientAggregator; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; /** - * - * {@code PERCENT_RANK([,]) WITHIN GROUP (ORDER BY [,] ASC/DESC) } aggregate function - * - * + * {@code PERCENT_RANK([,]) WITHIN GROUP (ORDER BY [,] ASC/DESC) } + * aggregate function * @since 1.2.1 */ @BuiltInFunction(name = PercentRankAggregateFunction.NAME, args = { @Argument(), - @Argument(allowedTypes = { PBoolean.class }, isConstant = true), @Argument(isConstant = true) }) + @Argument(allowedTypes = { PBoolean.class }, isConstant = true), @Argument(isConstant = true) }) public class PercentRankAggregateFunction extends DistinctValueWithCountAggregateFunction { - public static final String NAME = "PERCENT_RANK"; + public static final String NAME = "PERCENT_RANK"; - public PercentRankAggregateFunction() { + public PercentRankAggregateFunction() { - } + } - public PercentRankAggregateFunction(List childern) { - super(childern); - } + public PercentRankAggregateFunction(List childern) { + super(childern); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - return new DistinctValueWithCountServerAggregator(conf); - } + @Override + public Aggregator newServerAggregator(Configuration conf) { + return new DistinctValueWithCountServerAggregator(conf); + } - @Override - public DistinctValueWithCountClientAggregator newClientAggregator() { - return new PercentRankClientAggregator(children, getAggregatorExpression().getSortOrder()); - } + @Override + public DistinctValueWithCountClientAggregator newClientAggregator() { + return new PercentRankClientAggregator(children, getAggregatorExpression().getSortOrder()); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public PDataType getDataType() { - return PDecimal.INSTANCE; - } + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentileContAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentileContAggregateFunction.java index 5a466e18154..a4287c111c2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentileContAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentileContAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.DistinctValueWithCountClientAggregator; @@ -28,48 +27,47 @@ import org.apache.phoenix.expression.aggregator.PercentileClientAggregator; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; /** - * - * Built-in function for {@code PERCENTILE_CONT() WITHIN GROUP (ORDER BY ASC/DESC) aggregate function } - * - * + * Built-in function for + * {@code PERCENTILE_CONT() WITHIN GROUP (ORDER BY ASC/DESC) aggregate function } * @since 1.2.1 */ -@BuiltInFunction(name = PercentileContAggregateFunction.NAME, args = { @Argument(allowedTypes = { PDecimal.class }), - @Argument(allowedTypes = { PBoolean.class }, isConstant = true), - @Argument(allowedTypes = { PDecimal.class }, isConstant = true, minValue = "0", maxValue = "1") }) +@BuiltInFunction(name = PercentileContAggregateFunction.NAME, args = { + @Argument(allowedTypes = { PDecimal.class }), + @Argument(allowedTypes = { PBoolean.class }, isConstant = true), + @Argument(allowedTypes = { PDecimal.class }, isConstant = true, minValue = "0", maxValue = "1") }) public class PercentileContAggregateFunction extends DistinctValueWithCountAggregateFunction { - public static final String NAME = "PERCENTILE_CONT"; + public static final String NAME = "PERCENTILE_CONT"; + + public PercentileContAggregateFunction() { + + } + + public PercentileContAggregateFunction(List childern) { + super(childern); + } - public PercentileContAggregateFunction() { - - } - - public PercentileContAggregateFunction(List childern) { - super(childern); - } + @Override + public Aggregator newServerAggregator(Configuration conf) { + return new DistinctValueWithCountServerAggregator(conf); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - return new DistinctValueWithCountServerAggregator(conf); - } + @Override + public DistinctValueWithCountClientAggregator newClientAggregator() { + return new PercentileClientAggregator(children, getAggregatorExpression().getSortOrder()); + } - @Override - public DistinctValueWithCountClientAggregator newClientAggregator() { - return new PercentileClientAggregator(children, getAggregatorExpression().getSortOrder()); - } + @Override + public String getName() { + return NAME; + } - @Override - public String getName() { - return NAME; - } - - @Override - public PDataType getDataType() { - return PDecimal.INSTANCE; - } + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentileDiscAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentileDiscAggregateFunction.java index b79cf8b18a5..70f36ccf385 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentileDiscAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PercentileDiscAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.DistinctValueWithCountClientAggregator; @@ -28,43 +27,42 @@ import org.apache.phoenix.expression.aggregator.PercentileDiscClientAggregator; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PBoolean; +import org.apache.phoenix.schema.types.PDecimal; /** - * - * Built-in function for {@code PERCENTILE_DISC() WITHIN GROUP (ORDER BY ASC/DESC) aggregate function } - * - * + * Built-in function for + * {@code PERCENTILE_DISC() WITHIN GROUP (ORDER BY ASC/DESC) aggregate function } * @since 1.2.1 */ -@BuiltInFunction(name = PercentileDiscAggregateFunction.NAME, args = { @Argument(allowedTypes = { PDecimal.class }), - @Argument(allowedTypes = { PBoolean.class }, isConstant = true), - @Argument(allowedTypes = { PDecimal.class }, isConstant = true, minValue = "0", maxValue = "1") }) +@BuiltInFunction(name = PercentileDiscAggregateFunction.NAME, args = { + @Argument(allowedTypes = { PDecimal.class }), + @Argument(allowedTypes = { PBoolean.class }, isConstant = true), + @Argument(allowedTypes = { PDecimal.class }, isConstant = true, minValue = "0", maxValue = "1") }) public class PercentileDiscAggregateFunction extends DistinctValueWithCountAggregateFunction { - public static final String NAME = "PERCENTILE_DISC"; + public static final String NAME = "PERCENTILE_DISC"; + + public PercentileDiscAggregateFunction() { + } + + public PercentileDiscAggregateFunction(List childern) { + super(childern); + } + + @Override + public Aggregator newServerAggregator(Configuration conf) { + return new DistinctValueWithCountServerAggregator(conf); + } - public PercentileDiscAggregateFunction() { - } + @Override + public DistinctValueWithCountClientAggregator newClientAggregator() { + return new PercentileDiscClientAggregator(children, getAggregatorExpression().getSortOrder()); + } - public PercentileDiscAggregateFunction(List childern) { - super(childern); - } - - @Override - public Aggregator newServerAggregator(Configuration conf) { - return new DistinctValueWithCountServerAggregator(conf); - } - - @Override - public DistinctValueWithCountClientAggregator newClientAggregator() { - return new PercentileDiscClientAggregator(children, getAggregatorExpression().getSortOrder()); - } + @Override + public String getName() { + return NAME; + } - @Override - public String getName() { - return NAME; - } - } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PhoenixRowTimestampFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PhoenixRowTimestampFunction.java index 5d99b03bf15..f71b06a272f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PhoenixRowTimestampFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PhoenixRowTimestampFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +17,12 @@ */ package org.apache.phoenix.expression.function; +import java.util.Date; +import java.util.List; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.expression.Determinism; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.KeyValueColumnExpression; @@ -29,105 +31,94 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDate; -import org.apache.phoenix.schema.types.PTimestamp; - -import java.sql.Timestamp; -import java.util.Date; -import java.util.List; /** * Function to return the timestamp of the empty column which functions as the row timestamp. The - * result returned can be used for debugging(eg. using HBase shell), logging etc. - * Can also be used in sql predicates. + * result returned can be used for debugging(eg. using HBase shell), logging etc. Can also be used + * in sql predicates. */ @BuiltInFunction(name = PhoenixRowTimestampFunction.NAME, - nodeClass= PhoenixRowTimestampParseNode.class, - args = {}) + nodeClass = PhoenixRowTimestampParseNode.class, args = {}) public class PhoenixRowTimestampFunction extends ScalarFunction { - public static final String NAME = "PHOENIX_ROW_TIMESTAMP"; + public static final String NAME = "PHOENIX_ROW_TIMESTAMP"; - public PhoenixRowTimestampFunction() { - } + public PhoenixRowTimestampFunction() { + } - /** - * @param children An EMPTY_COLUMN key value expression injected thru - * {@link org.apache.phoenix.parse.PhoenixRowTimestampParseNode#create create} - * will cause the empty column key value to be evaluated during scan filter processing. - */ - public PhoenixRowTimestampFunction(List children) { - super(children); - if ((children.size() != 1) || !children.get(0).getClass().isAssignableFrom( - KeyValueColumnExpression.class)) { - throw new IllegalArgumentException( - "PhoenixRowTimestampFunction should only have an " - + "EMPTY_COLUMN key value expression." - ); - } - if (!(children.get(0).getDataType().equals(PDate.INSTANCE))) { - throw new IllegalArgumentException( - "PhoenixRowTimestampFunction should have an " - + "EMPTY_COLUMN key value expression of type PDate" - ); - } + /** + * @param children An EMPTY_COLUMN key value expression injected thru + * {@link org.apache.phoenix.parse.PhoenixRowTimestampParseNode#create create} + * will cause the empty column key value to be evaluated during scan filter + * processing. + */ + public PhoenixRowTimestampFunction(List children) { + super(children); + if ( + (children.size() != 1) + || !children.get(0).getClass().isAssignableFrom(KeyValueColumnExpression.class) + ) { + throw new IllegalArgumentException( + "PhoenixRowTimestampFunction should only have an " + "EMPTY_COLUMN key value expression."); } - - @Override - public String getName() { - return NAME; + if (!(children.get(0).getDataType().equals(PDate.INSTANCE))) { + throw new IllegalArgumentException("PhoenixRowTimestampFunction should have an " + + "EMPTY_COLUMN key value expression of type PDate"); } + } - /** - * The evaluate method is called under the following conditions - - * 1. When PHOENIX_ROW_TIMESTAMP() is evaluated in the projection list. - * Since the EMPTY_COLUMN is not part of the table column list, - * emptyColumnKV will be null. - * PHOENIX-4179 ensures that the maxTS (which will be EMPTY_COLUMN ts) - * is returned for the tuple. - * - * 2. When PHOENIX_ROW_TIMESTAMP() is evaluated in the backend as part of the where clause. - * Here the emptyColumnKV will not be null, since we ensured that by adding it to - * scan column list in PhoenixRowTimestampParseNode. - * In this case the emptyColumnKV.getTimestamp() is used. - */ - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - - if (tuple == null) { - return false; - } + @Override + public String getName() { + return NAME; + } - byte[] emptyCF = ((KeyValueColumnExpression)children.get(0)).getColumnFamily(); - byte[] emptyCQ = ((KeyValueColumnExpression)children.get(0)).getColumnQualifier(); - long ts; - // Currently there is no good way to figure out if this function is being evaluated during - // result or filter processing. - // For now relying on whether empty column exists, - // if true indicates filter processing else result processing. - Cell emptyColumnKV = tuple.getValue(emptyCF, emptyCQ); - if ((emptyColumnKV != null) && CellUtil.matchingColumn(emptyColumnKV, emptyCF, emptyCQ)) { - ts = emptyColumnKV.getTimestamp(); - } else { - ts = tuple.getValue(0).getTimestamp(); - } + /** + * The evaluate method is called under the following conditions - 1. When PHOENIX_ROW_TIMESTAMP() + * is evaluated in the projection list. Since the EMPTY_COLUMN is not part of the table column + * list, emptyColumnKV will be null. PHOENIX-4179 ensures that the maxTS (which will be + * EMPTY_COLUMN ts) is returned for the tuple. 2. When PHOENIX_ROW_TIMESTAMP() is evaluated in the + * backend as part of the where clause. Here the emptyColumnKV will not be null, since we ensured + * that by adding it to scan column list in PhoenixRowTimestampParseNode. In this case the + * emptyColumnKV.getTimestamp() is used. + */ + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Date rowTimestamp = new Date(ts); - ptr.set(PDate.INSTANCE.toBytes(rowTimestamp)); - return true; + if (tuple == null) { + return false; } - @Override - public PDataType getDataType() { - return PDate.INSTANCE; + byte[] emptyCF = ((KeyValueColumnExpression) children.get(0)).getColumnFamily(); + byte[] emptyCQ = ((KeyValueColumnExpression) children.get(0)).getColumnQualifier(); + long ts; + // Currently there is no good way to figure out if this function is being evaluated during + // result or filter processing. + // For now relying on whether empty column exists, + // if true indicates filter processing else result processing. + Cell emptyColumnKV = tuple.getValue(emptyCF, emptyCQ); + if ((emptyColumnKV != null) && CellUtil.matchingColumn(emptyColumnKV, emptyCF, emptyCQ)) { + ts = emptyColumnKV.getTimestamp(); + } else { + ts = tuple.getValue(0).getTimestamp(); } - @Override - public boolean isStateless() { - return false; - } + Date rowTimestamp = new Date(ts); + ptr.set(PDate.INSTANCE.toBytes(rowTimestamp)); + return true; + } - @Override - public Determinism getDeterminism() { - return Determinism.PER_ROW; - } + @Override + public PDataType getDataType() { + return PDate.INSTANCE; + } + + @Override + public boolean isStateless() { + return false; + } + + @Override + public Determinism getDeterminism() { + return Determinism.PER_ROW; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PowerFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PowerFunction.java index 1125ce1817a..a05af5cfa15 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PowerFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PowerFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,26 +26,27 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; -@BuiltInFunction(name = PowerFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }), - @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) +@BuiltInFunction(name = PowerFunction.NAME, + args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }), + @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) public class PowerFunction extends JavaMathTwoArgumentFunction { - public static final String NAME = "POWER"; + public static final String NAME = "POWER"; - public PowerFunction() { - } + public PowerFunction() { + } - public PowerFunction(List children) throws SQLException { - super(children); - } + public PowerFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - protected double compute(double firstArg, double secondArg) { - return Math.pow(firstArg, secondArg); - } + @Override + protected double compute(double firstArg, double secondArg) { + return Math.pow(firstArg, secondArg); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PrefixFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PrefixFunction.java index 6aed48ba5f7..315adf55155 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PrefixFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/PrefixFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,17 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.function; import java.util.Arrays; import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; - -import org.apache.hadoop.hbase.CompareOperator; import java.util.Set; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.phoenix.compile.KeyPart; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.query.KeyRange; @@ -37,102 +35,103 @@ import org.apache.phoenix.util.ByteUtil; abstract public class PrefixFunction extends ScalarFunction { - public PrefixFunction() { } + public PrefixFunction() { + } + + public PrefixFunction(List children) { + super(children); + } - public PrefixFunction(List children) { - super(children); + @Override + public int getKeyFormationTraversalIndex() { + return preservesOrder() == OrderPreserving.NO ? NO_TRAVERSAL : 0; + } + + protected boolean extractNode() { + return false; + } + + @Override + public KeyPart newKeyPart(final KeyPart childPart) { + return new PrefixKeyPart(childPart); + } + + private class PrefixKeyPart implements KeyPart { + private final Set extractNodes = extractNode() + ? new LinkedHashSet<>(Collections. singleton(PrefixFunction.this)) + : Collections.emptySet(); + private final KeyPart childPart; + + PrefixKeyPart(KeyPart childPart) { + this.childPart = childPart; } @Override - public int getKeyFormationTraversalIndex() { - return preservesOrder() == OrderPreserving.NO ? NO_TRAVERSAL : 0; - } - - protected boolean extractNode() { - return false; + public PColumn getColumn() { + return childPart.getColumn(); } @Override - public KeyPart newKeyPart(final KeyPart childPart) { - return new PrefixKeyPart(childPart); + public Set getExtractNodes() { + return extractNodes; } - private class PrefixKeyPart implements KeyPart { - private final Set extractNodes = extractNode() ? - new LinkedHashSet<>(Collections.singleton(PrefixFunction.this)) - : Collections.emptySet(); - private final KeyPart childPart; - - PrefixKeyPart(KeyPart childPart) { - this.childPart = childPart; - } - - @Override - public PColumn getColumn() { - return childPart.getColumn(); - } - - @Override - public Set getExtractNodes() { - return extractNodes; + @Override + public KeyRange getKeyRange(CompareOperator op, Expression rhs) { + byte[] lowerRange = KeyRange.UNBOUND; + byte[] upperRange = KeyRange.UNBOUND; + boolean lowerInclusive = true; + PDataType type = getColumn().getDataType(); + switch (op) { + case EQUAL: + lowerRange = evaluateExpression(rhs); + upperRange = ByteUtil.nextKey(lowerRange); + break; + case GREATER: + lowerRange = ByteUtil.nextKey(evaluateExpression(rhs)); + break; + case LESS_OR_EQUAL: + upperRange = ByteUtil.nextKey(evaluateExpression(rhs)); + lowerInclusive = false; + break; + default: + return childPart.getKeyRange(op, rhs); + } + PColumn column = getColumn(); + Integer length = column.getMaxLength(); + if (type.isFixedWidth()) { + if (length != null) { // Sanity check - shouldn't be necessary + // Don't pad based on current sort order, but instead use our + // minimum byte as otherwise we'll end up skipping rows in + // the case of descending, since rows with more padding appear + // *after* rows with no padding. + if (lowerRange != KeyRange.UNBOUND) { + lowerRange = type.pad(lowerRange, length, SortOrder.ASC); + } + if (upperRange != KeyRange.UNBOUND) { + upperRange = type.pad(upperRange, length, SortOrder.ASC); + } } - - @Override - public KeyRange getKeyRange(CompareOperator op, Expression rhs) { - byte[] lowerRange = KeyRange.UNBOUND; - byte[] upperRange = KeyRange.UNBOUND; - boolean lowerInclusive = true; - PDataType type = getColumn().getDataType(); - switch (op) { - case EQUAL: - lowerRange = evaluateExpression(rhs); - upperRange = ByteUtil.nextKey(lowerRange); - break; - case GREATER: - lowerRange = ByteUtil.nextKey(evaluateExpression(rhs)); - break; - case LESS_OR_EQUAL: - upperRange = ByteUtil.nextKey(evaluateExpression(rhs)); - lowerInclusive = false; - break; - default: - return childPart.getKeyRange(op, rhs); - } - PColumn column = getColumn(); - Integer length = column.getMaxLength(); - if (type.isFixedWidth()) { - if (length != null) { // Sanity check - shouldn't be necessary - // Don't pad based on current sort order, but instead use our - // minimum byte as otherwise we'll end up skipping rows in - // the case of descending, since rows with more padding appear - // *after* rows with no padding. - if (lowerRange != KeyRange.UNBOUND) { - lowerRange = type.pad(lowerRange, length, SortOrder.ASC); - } - if (upperRange != KeyRange.UNBOUND) { - upperRange = type.pad(upperRange, length, SortOrder.ASC); - } - } - } else if (column.getSortOrder() == SortOrder.DESC && getTable().rowKeyOrderOptimizable()) { - // Append a zero byte if descending since a \xFF byte will be appended to the lowerRange - // causing rows to be skipped that should be included. For example, with rows 'ab', 'a', - // a lowerRange of 'a\xFF' would skip 'ab', while 'a\x00\xFF' would not. - if (lowerRange != KeyRange.UNBOUND) { - lowerRange = Arrays.copyOf(lowerRange, lowerRange.length+1); - lowerRange[lowerRange.length-1] = QueryConstants.SEPARATOR_BYTE; - } - } - KeyRange range = KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, false); - if (column.getSortOrder() == SortOrder.DESC) { - range = range.invert(); - } - return range; + } else if (column.getSortOrder() == SortOrder.DESC && getTable().rowKeyOrderOptimizable()) { + // Append a zero byte if descending since a \xFF byte will be appended to the lowerRange + // causing rows to be skipped that should be included. For example, with rows 'ab', 'a', + // a lowerRange of 'a\xFF' would skip 'ab', while 'a\x00\xFF' would not. + if (lowerRange != KeyRange.UNBOUND) { + lowerRange = Arrays.copyOf(lowerRange, lowerRange.length + 1); + lowerRange[lowerRange.length - 1] = QueryConstants.SEPARATOR_BYTE; } + } + KeyRange range = KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, false); + if (column.getSortOrder() == SortOrder.DESC) { + range = range.invert(); + } + return range; + } - @Override - public PTable getTable() { - return childPart.getTable(); - } + @Override + public PTable getTable() { + return childPart.getTable(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RTrimFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RTrimFunction.java index c3767cddd3b..fea9e611bc3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RTrimFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RTrimFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import java.sql.SQLException; import java.util.Arrays; import java.util.Collections; -import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -41,173 +40,169 @@ import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.StringUtil; - /** - * - * Implementation of the {@code RTrim() } build-in function. It removes from the right end of - * {@code } space character and other function bytes in single byte utf8 characters set - * - * + * Implementation of the {@code RTrim() } build-in function. It removes from the right end + * of {@code } space character and other function bytes in single byte utf8 characters set * @since 0.1 */ -@BuiltInFunction(name=RTrimFunction.NAME, args={ - @Argument(allowedTypes={PVarchar.class})}) +@BuiltInFunction(name = RTrimFunction.NAME, args = { @Argument(allowedTypes = { PVarchar.class }) }) public class RTrimFunction extends ScalarFunction { - public static final String NAME = "RTRIM"; - - public RTrimFunction() { } - - public RTrimFunction(List children) throws SQLException { - super(children); - } - - private Expression getStringExpression() { - return children.get(0); - } - - @Override - public SortOrder getSortOrder() { - return children.get(0).getSortOrder(); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // Starting from the end of the byte, look for all single bytes at the end of the string - // that is below SPACE_UTF8 (space and control characters) or above (control chars). - if (!getStringExpression().evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - byte[] string = ptr.get(); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - - SortOrder sortOrder = getStringExpression().getSortOrder(); - int i = StringUtil.getFirstNonBlankCharIdxFromEnd(string, offset, length, sortOrder); - if (i == offset - 1) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - ptr.set(string, offset, i - offset + 1); - return true; + public static final String NAME = "RTRIM"; + + public RTrimFunction() { + } + + public RTrimFunction(List children) throws SQLException { + super(children); + } + + private Expression getStringExpression() { + return children.get(0); + } + + @Override + public SortOrder getSortOrder() { + return children.get(0).getSortOrder(); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // Starting from the end of the byte, look for all single bytes at the end of the string + // that is below SPACE_UTF8 (space and control characters) or above (control chars). + if (!getStringExpression().evaluate(tuple, ptr)) { + return false; } - - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES_IF_LAST; + if (ptr.getLength() == 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } - - @Override - public int getKeyFormationTraversalIndex() { - return 0; + byte[] string = ptr.get(); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + + SortOrder sortOrder = getStringExpression().getSortOrder(); + int i = StringUtil.getFirstNonBlankCharIdxFromEnd(string, offset, length, sortOrder); + if (i == offset - 1) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } - - @Override - public KeyPart newKeyPart(final KeyPart childPart) { - return new KeyPart() { - @Override - public KeyRange getKeyRange(CompareOperator op, Expression rhs) { - byte[] lowerRange = KeyRange.UNBOUND; - byte[] upperRange = KeyRange.UNBOUND; - boolean lowerInclusive = true; - boolean upperInclusive = false; - - PDataType type = getColumn().getDataType(); - SortOrder sortOrder = getColumn().getSortOrder(); - switch (op) { - case LESS_OR_EQUAL: - lowerInclusive = false; - case EQUAL: - upperRange = evaluateExpression(rhs); - if (op == CompareOperator.EQUAL) { - lowerRange = upperRange; - } - if (sortOrder == SortOrder.ASC || !getTable().rowKeyOrderOptimizable()) { - upperRange = Arrays.copyOf(upperRange, upperRange.length + 1); - upperRange[upperRange.length-1] = StringUtil.SPACE_UTF8; - ByteUtil.nextKey(upperRange, upperRange.length); - } else { - upperInclusive = true; - if (op == CompareOperator.LESS_OR_EQUAL) { - // Nothing more to do here, as the biggest value for DESC - // will be the RHS value. - break; - } - /* - * Somewhat tricky to get the range correct for the DESC equality case. - * The lower range is the RHS value followed by any number of inverted spaces. - * We need to add a zero byte as the lower range will have an \xFF byte - * appended to it and otherwise we'd skip past any rows where there is more - * than one space following the RHS. - * The upper range should span up to and including the RHS value. We need - * to add our own \xFF as otherwise this will look like a degenerate query - * since the lower would be bigger than the upper range. - */ - lowerRange = Arrays.copyOf(lowerRange, lowerRange.length + 2); - lowerRange[lowerRange.length-2] = StringUtil.INVERTED_SPACE_UTF8; - lowerRange[lowerRange.length-1] = QueryConstants.SEPARATOR_BYTE; - upperRange = Arrays.copyOf(upperRange, upperRange.length + 1); - upperRange[upperRange.length-1] = QueryConstants.DESC_SEPARATOR_BYTE; - } - break; - default: - // TOOD: Is this ok for DESC? - return childPart.getKeyRange(op, rhs); - } - Integer length = getColumn().getMaxLength(); - if (type.isFixedWidth() && length != null) { - // Don't pad based on current sort order, but instead use our - // minimum byte as otherwise we'll end up skipping rows in - // the case of descending, since rows with more padding appear - // *after* rows with no padding. - if (lowerRange != KeyRange.UNBOUND) { - lowerRange = type.pad(lowerRange, length, SortOrder.ASC); - } - if (upperRange != KeyRange.UNBOUND) { - upperRange = type.pad(upperRange, length, SortOrder.ASC); - } - } - KeyRange range = KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); - if (getColumn().getSortOrder() == SortOrder.DESC) { - range = range.invert(); - } - return range; - } - - @Override - public Set getExtractNodes() { - // We cannot extract the node, as we may have false positives with trailing - // non blank characters such as 'foo bar' where the RHS constant is 'foo'. - return Collections.emptySet(); - } - - @Override - public PColumn getColumn() { - return childPart.getColumn(); + ptr.set(string, offset, i - offset + 1); + return true; + } + + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES_IF_LAST; + } + + @Override + public int getKeyFormationTraversalIndex() { + return 0; + } + + @Override + public KeyPart newKeyPart(final KeyPart childPart) { + return new KeyPart() { + @Override + public KeyRange getKeyRange(CompareOperator op, Expression rhs) { + byte[] lowerRange = KeyRange.UNBOUND; + byte[] upperRange = KeyRange.UNBOUND; + boolean lowerInclusive = true; + boolean upperInclusive = false; + + PDataType type = getColumn().getDataType(); + SortOrder sortOrder = getColumn().getSortOrder(); + switch (op) { + case LESS_OR_EQUAL: + lowerInclusive = false; + case EQUAL: + upperRange = evaluateExpression(rhs); + if (op == CompareOperator.EQUAL) { + lowerRange = upperRange; } - - @Override - public PTable getTable() { - return childPart.getTable(); + if (sortOrder == SortOrder.ASC || !getTable().rowKeyOrderOptimizable()) { + upperRange = Arrays.copyOf(upperRange, upperRange.length + 1); + upperRange[upperRange.length - 1] = StringUtil.SPACE_UTF8; + ByteUtil.nextKey(upperRange, upperRange.length); + } else { + upperInclusive = true; + if (op == CompareOperator.LESS_OR_EQUAL) { + // Nothing more to do here, as the biggest value for DESC + // will be the RHS value. + break; + } + /* + * Somewhat tricky to get the range correct for the DESC equality case. The lower + * range is the RHS value followed by any number of inverted spaces. We need to add a + * zero byte as the lower range will have an \xFF byte appended to it and otherwise + * we'd skip past any rows where there is more than one space following the RHS. The + * upper range should span up to and including the RHS value. We need to add our own + * \xFF as otherwise this will look like a degenerate query since the lower would be + * bigger than the upper range. + */ + lowerRange = Arrays.copyOf(lowerRange, lowerRange.length + 2); + lowerRange[lowerRange.length - 2] = StringUtil.INVERTED_SPACE_UTF8; + lowerRange[lowerRange.length - 1] = QueryConstants.SEPARATOR_BYTE; + upperRange = Arrays.copyOf(upperRange, upperRange.length + 1); + upperRange[upperRange.length - 1] = QueryConstants.DESC_SEPARATOR_BYTE; } - }; - } - - @Override - public Integer getMaxLength() { - return getStringExpression().getMaxLength(); - } - - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } - - @Override - public String getName() { - return NAME; - } + break; + default: + // TOOD: Is this ok for DESC? + return childPart.getKeyRange(op, rhs); + } + Integer length = getColumn().getMaxLength(); + if (type.isFixedWidth() && length != null) { + // Don't pad based on current sort order, but instead use our + // minimum byte as otherwise we'll end up skipping rows in + // the case of descending, since rows with more padding appear + // *after* rows with no padding. + if (lowerRange != KeyRange.UNBOUND) { + lowerRange = type.pad(lowerRange, length, SortOrder.ASC); + } + if (upperRange != KeyRange.UNBOUND) { + upperRange = type.pad(upperRange, length, SortOrder.ASC); + } + } + KeyRange range = + KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); + if (getColumn().getSortOrder() == SortOrder.DESC) { + range = range.invert(); + } + return range; + } + + @Override + public Set getExtractNodes() { + // We cannot extract the node, as we may have false positives with trailing + // non blank characters such as 'foo bar' where the RHS constant is 'foo'. + return Collections.emptySet(); + } + + @Override + public PColumn getColumn() { + return childPart.getColumn(); + } + + @Override + public PTable getTable() { + return childPart.getTable(); + } + }; + } + + @Override + public Integer getMaxLength() { + return getStringExpression().getMaxLength(); + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java index 56e0642a7fb..203f2c3c8aa 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RandomFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,11 +34,12 @@ import org.apache.phoenix.schema.types.PLong; /** - * Random function that produces a unique value upon each invocation unless a seed is provided. - * If a seed is provided the returned value is identical across each invocation for a single row, but different across multiple rows. - * The seed must be a constant. + * Random function that produces a unique value upon each invocation unless a seed is provided. If a + * seed is provided the returned value is identical across each invocation for a single row, but + * different across multiple rows. The seed must be a constant. *

* Example: + * *

  * {@code
  * 0: jdbc:phoenix:localhost> select rand(), rand(), rand(1), rand(2), rand(1) from t;
@@ -60,98 +61,99 @@
  * }
  * 
*/ -@BuiltInFunction(name = RandomFunction.NAME, args = {@Argument(allowedTypes={PLong.class},defaultValue="null",isConstant=true)}) +@BuiltInFunction(name = RandomFunction.NAME, + args = { @Argument(allowedTypes = { PLong.class }, defaultValue = "null", isConstant = true) }) public class RandomFunction extends ScalarFunction { - public static final String NAME = "RAND"; - private Random random; - private boolean hasSeed; - private Double current; - - public RandomFunction() { - } - - public RandomFunction(List children) { - super(children); - init(); - } - - private void init() { - Number seed = (Number)((LiteralExpression)children.get(0)).getValue(); - random = seed == null ? new Random() : new Random(seed.longValue()); - hasSeed = seed != null; - current = null; - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (current == null) { - current = random.nextDouble(); - } - ptr.set(PDouble.INSTANCE.toBytes(current)); - return true; - } - - // produce a new random value for each row - @Override - public void reset() { - super.reset(); - current = null; - } - - @Override - public PDataType getDataType() { - return PDouble.INSTANCE; + public static final String NAME = "RAND"; + private Random random; + private boolean hasSeed; + private Double current; + + public RandomFunction() { + } + + public RandomFunction(List children) { + super(children); + init(); + } + + private void init() { + Number seed = (Number) ((LiteralExpression) children.get(0)).getValue(); + random = seed == null ? new Random() : new Random(seed.longValue()); + hasSeed = seed != null; + current = null; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (current == null) { + current = random.nextDouble(); } - - @Override - public String getName() { - return NAME; - } - - @Override - public Determinism getDeterminism() { - return hasSeed ? Determinism.PER_ROW : Determinism.PER_INVOCATION; - } - - @Override - public boolean isCloneExpression() { - return isCloneExpressionByDeterminism(this); - } - - @Override - public boolean isStateless() { - return true; - } - - // take the random object onto account - @Override - public int hashCode() { - int hashCode = super.hashCode(); - return hasSeed ? hashCode : (hashCode + random.hashCode()); - } - - // take the random object onto account, as otherwise we'll potentially collapse two - // RAND() calls into a single one. - @Override - public boolean equals(Object obj) { - return super.equals(obj) && (hasSeed || random.equals(((RandomFunction)obj).random)); - } - - // make sure we do not show the default 'null' parameter - @Override - public final String toString() { - StringBuilder buf = new StringBuilder(getName() + "("); - if (!hasSeed) return buf.append(")").toString(); - for (int i = 0; i < children.size() - 1; i++) { - buf.append(children.get(i) + ", "); - } - buf.append(children.get(children.size()-1) + ")"); - return buf.toString(); + ptr.set(PDouble.INSTANCE.toBytes(current)); + return true; + } + + // produce a new random value for each row + @Override + public void reset() { + super.reset(); + current = null; + } + + @Override + public PDataType getDataType() { + return PDouble.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public Determinism getDeterminism() { + return hasSeed ? Determinism.PER_ROW : Determinism.PER_INVOCATION; + } + + @Override + public boolean isCloneExpression() { + return isCloneExpressionByDeterminism(this); + } + + @Override + public boolean isStateless() { + return true; + } + + // take the random object onto account + @Override + public int hashCode() { + int hashCode = super.hashCode(); + return hasSeed ? hashCode : (hashCode + random.hashCode()); + } + + // take the random object onto account, as otherwise we'll potentially collapse two + // RAND() calls into a single one. + @Override + public boolean equals(Object obj) { + return super.equals(obj) && (hasSeed || random.equals(((RandomFunction) obj).random)); + } + + // make sure we do not show the default 'null' parameter + @Override + public final String toString() { + StringBuilder buf = new StringBuilder(getName() + "("); + if (!hasSeed) return buf.append(")").toString(); + for (int i = 0; i < children.size() - 1; i++) { + buf.append(children.get(i) + ", "); } + buf.append(children.get(children.size() - 1) + ")"); + return buf.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java index 1a29bcaf9c3..12e5befe33b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpReplaceFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,138 +34,132 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarchar; - /** - * - * Function similar to the regexp_replace function in Postgres, which is used to pattern - * match a segment of the string. Usage: - * {@code REGEXP_REPLACE(,,) } - * source_char is the string in which we want to perform string replacement. pattern is a - * Java compatible regular expression string, and we replace all the matching part with - * replace_string. The first 2 arguments are required and are {@link org.apache.phoenix.schema.types.PVarchar}, - * the replace_string is default to empty string. - * - * The function returns a {@link org.apache.phoenix.schema.types.PVarchar} - * - * + * Function similar to the regexp_replace function in Postgres, which is used to pattern match a + * segment of the string. Usage: {@code REGEXP_REPLACE(,,) } + * source_char is the string in which we want to perform string replacement. pattern is a Java + * compatible regular expression string, and we replace all the matching part with replace_string. + * The first 2 arguments are required and are {@link org.apache.phoenix.schema.types.PVarchar}, the + * replace_string is default to empty string. The function returns a + * {@link org.apache.phoenix.schema.types.PVarchar} * @since 0.1 */ -@BuiltInFunction(name=RegexpReplaceFunction.NAME, - nodeClass = RegexpReplaceParseNode.class, args= { - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class},defaultValue="null")}, - classType = FunctionParseNode.FunctionClassType.ABSTRACT, - derivedFunctions = {ByteBasedRegexpReplaceFunction.class, StringBasedRegexpReplaceFunction.class}) +@BuiltInFunction(name = RegexpReplaceFunction.NAME, nodeClass = RegexpReplaceParseNode.class, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }, defaultValue = "null") }, + classType = FunctionParseNode.FunctionClassType.ABSTRACT, derivedFunctions = { + ByteBasedRegexpReplaceFunction.class, StringBasedRegexpReplaceFunction.class }) public abstract class RegexpReplaceFunction extends ScalarFunction { - public static final String NAME = "REGEXP_REPLACE"; - - private static final PVarchar TYPE = PVarchar.INSTANCE; - private byte [] rStrBytes; - private int rStrOffset, rStrLen; - private AbstractBasePattern pattern; - - public RegexpReplaceFunction() { } - - // Expect 1 arguments, the pattern. - public RegexpReplaceFunction(List children) { - super(children); - init(); + public static final String NAME = "REGEXP_REPLACE"; + + private static final PVarchar TYPE = PVarchar.INSTANCE; + private byte[] rStrBytes; + private int rStrOffset, rStrLen; + private AbstractBasePattern pattern; + + public RegexpReplaceFunction() { + } + + // Expect 1 arguments, the pattern. + public RegexpReplaceFunction(List children) { + super(children); + init(); + } + + protected abstract AbstractBasePattern compilePatternSpec(String value); + + private void init() { + ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(); + Expression e = getPatternStrExpression(); + if (e.isStateless() && e.getDeterminism() == Determinism.ALWAYS && e.evaluate(null, tmpPtr)) { + String patternStr = (String) TYPE.toObject(tmpPtr, e.getDataType(), e.getSortOrder()); + if (patternStr != null) pattern = compilePatternSpec(patternStr); } - - protected abstract AbstractBasePattern compilePatternSpec(String value); - - private void init() { - ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(); - Expression e = getPatternStrExpression(); - if (e.isStateless() && e.getDeterminism() == Determinism.ALWAYS && e.evaluate(null, tmpPtr)) { - String patternStr = (String) TYPE.toObject(tmpPtr, e.getDataType(), e.getSortOrder()); - if (patternStr != null) pattern = compilePatternSpec(patternStr); - } - e = getReplaceStrExpression(); - if (e.isStateless() && e.getDeterminism() == Determinism.ALWAYS && e.evaluate(null, tmpPtr)) { - TYPE.coerceBytes(tmpPtr, TYPE, e.getSortOrder(), SortOrder.ASC); - rStrBytes = tmpPtr.get(); - rStrOffset = tmpPtr.getOffset(); - rStrLen = tmpPtr.getLength(); - } else { - rStrBytes = null; - } + e = getReplaceStrExpression(); + if (e.isStateless() && e.getDeterminism() == Determinism.ALWAYS && e.evaluate(null, tmpPtr)) { + TYPE.coerceBytes(tmpPtr, TYPE, e.getSortOrder(), SortOrder.ASC); + rStrBytes = tmpPtr.get(); + rStrOffset = tmpPtr.getOffset(); + rStrLen = tmpPtr.getLength(); + } else { + rStrBytes = null; } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - AbstractBasePattern pattern = this.pattern; - if (pattern == null) { - Expression e = getPatternStrExpression(); - if (!e.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength()==0) { - return true; - } - String patternStr = (String) TYPE.toObject(ptr, e.getDataType(), e.getSortOrder()); - if (patternStr == null) { - return false; - } else { - pattern = compilePatternSpec(patternStr); - } - } - - byte[] rStrBytes = this.rStrBytes; - int rStrOffset = this.rStrOffset, rStrLen = this.rStrLen; - if (rStrBytes == null) { - Expression replaceStrExpression = getReplaceStrExpression(); - if (!replaceStrExpression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength()==0) { - return true; - } - TYPE.coerceBytes(ptr, TYPE, replaceStrExpression.getSortOrder(), SortOrder.ASC); - rStrBytes = ptr.get(); - rStrOffset = ptr.getOffset(); - rStrLen = ptr.getLength(); - } - - Expression sourceStrExpression = getSourceStrExpression(); - if (!sourceStrExpression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength()==0) { - return true; - } - TYPE.coerceBytes(ptr, TYPE, sourceStrExpression.getSortOrder(), SortOrder.ASC); - - pattern.replaceAll(ptr, rStrBytes, rStrOffset, rStrLen); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + AbstractBasePattern pattern = this.pattern; + if (pattern == null) { + Expression e = getPatternStrExpression(); + if (!e.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { return true; + } + String patternStr = (String) TYPE.toObject(ptr, e.getDataType(), e.getSortOrder()); + if (patternStr == null) { + return false; + } else { + pattern = compilePatternSpec(patternStr); + } } - private Expression getSourceStrExpression() { - return children.get(0); - } - - private Expression getPatternStrExpression() { - return children.get(1); - } - - private Expression getReplaceStrExpression() { - return children.get(2); - } - - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + byte[] rStrBytes = this.rStrBytes; + int rStrOffset = this.rStrOffset, rStrLen = this.rStrLen; + if (rStrBytes == null) { + Expression replaceStrExpression = getReplaceStrExpression(); + if (!replaceStrExpression.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return true; + } + TYPE.coerceBytes(ptr, TYPE, replaceStrExpression.getSortOrder(), SortOrder.ASC); + rStrBytes = ptr.get(); + rStrOffset = ptr.getOffset(); + rStrLen = ptr.getLength(); } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); + Expression sourceStrExpression = getSourceStrExpression(); + if (!sourceStrExpression.evaluate(tuple, ptr)) { + return false; } - - @Override - public String getName() { - return NAME; + if (ptr.getLength() == 0) { + return true; } + TYPE.coerceBytes(ptr, TYPE, sourceStrExpression.getSortOrder(), SortOrder.ASC); + + pattern.replaceAll(ptr, rStrBytes, rStrOffset, rStrLen); + return true; + } + + private Expression getSourceStrExpression() { + return children.get(0); + } + + private Expression getPatternStrExpression() { + return children.get(1); + } + + private Expression getReplaceStrExpression() { + return children.get(2); + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpSplitFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpSplitFunction.java index e86fe3b56fd..35a32383f32 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpSplitFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpSplitFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,96 +37,95 @@ /** * Function to split a string value into a {@code VARCHAR_ARRAY}. *

- * Usage: - * {@code REGEXP_SPLIT(<source_str>, <split_pattern>)} + * Usage: {@code REGEXP_SPLIT(<source_str>, <split_pattern>)} *

- * {@code source_str} is the string in which we want to split. {@code split_pattern} is a - * Java compatible regular expression string to split the source string. - * - * The function returns a {@link org.apache.phoenix.schema.types.PVarcharArray} + * {@code source_str} is the string in which we want to split. {@code split_pattern} is a Java + * compatible regular expression string to split the source string. The function returns a + * {@link org.apache.phoenix.schema.types.PVarcharArray} */ - @FunctionParseNode.BuiltInFunction(name=RegexpSplitFunction.NAME, - nodeClass = RegexpSplitParseNode.class, args= { - @FunctionParseNode.Argument(allowedTypes={PVarchar.class}), - @FunctionParseNode.Argument(allowedTypes={PVarchar.class})}, - classType = FunctionParseNode.FunctionClassType.ABSTRACT, - derivedFunctions = {ByteBasedRegexpSplitFunction.class, StringBasedRegexpSplitFunction.class}) +@FunctionParseNode.BuiltInFunction(name = RegexpSplitFunction.NAME, + nodeClass = RegexpSplitParseNode.class, + args = { @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }) }, + classType = FunctionParseNode.FunctionClassType.ABSTRACT, + derivedFunctions = { ByteBasedRegexpSplitFunction.class, StringBasedRegexpSplitFunction.class }) public abstract class RegexpSplitFunction extends ScalarFunction { - public static final String NAME = "REGEXP_SPLIT"; + public static final String NAME = "REGEXP_SPLIT"; - private static final PVarchar TYPE = PVarchar.INSTANCE; + private static final PVarchar TYPE = PVarchar.INSTANCE; - private AbstractBaseSplitter initializedSplitter = null; + private AbstractBaseSplitter initializedSplitter = null; - public RegexpSplitFunction() {} + public RegexpSplitFunction() { + } - public RegexpSplitFunction(List children) { - super(children); - init(); - } + public RegexpSplitFunction(List children) { + super(children); + init(); + } - private void init() { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - Expression e = getPatternStrExpression(); - if (e.isStateless() && e.getDeterminism() == Determinism.ALWAYS && e.evaluate(null, ptr)) { - String pattern = (String) TYPE.toObject(ptr, TYPE, e.getSortOrder()); - if (pattern != null) { - initializedSplitter = compilePatternSpec(pattern); - } - } + private void init() { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + Expression e = getPatternStrExpression(); + if (e.isStateless() && e.getDeterminism() == Determinism.ALWAYS && e.evaluate(null, ptr)) { + String pattern = (String) TYPE.toObject(ptr, TYPE, e.getSortOrder()); + if (pattern != null) { + initializedSplitter = compilePatternSpec(pattern); + } } - - protected abstract AbstractBaseSplitter compilePatternSpec(String value); - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); + } + + protected abstract AbstractBaseSplitter compilePatternSpec(String value); + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + AbstractBaseSplitter splitter = initializedSplitter; + if (splitter == null) { + Expression e = getPatternStrExpression(); + if (e.evaluate(tuple, ptr)) { + String pattern = (String) TYPE.toObject(ptr, TYPE, e.getSortOrder()); + if (pattern != null) { + splitter = compilePatternSpec(pattern); + } else { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); // set ptr to null + return true; + } + } else { + return false; + } } - @Override - public String getName() { - return NAME; + Expression e = getSourceStrExpression(); + if (!e.evaluate(tuple, ptr)) { + return false; } + TYPE.coerceBytes(ptr, TYPE, e.getSortOrder(), SortOrder.ASC); - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - AbstractBaseSplitter splitter = initializedSplitter; - if (splitter == null) { - Expression e = getPatternStrExpression(); - if (e.evaluate(tuple, ptr)) { - String pattern = (String) TYPE.toObject(ptr, TYPE, e.getSortOrder()); - if (pattern != null) { - splitter = compilePatternSpec(pattern); - } else { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); // set ptr to null - return true; - } - } else { - return false; - } - } - - Expression e = getSourceStrExpression(); - if (!e.evaluate(tuple, ptr)) { - return false; - } - TYPE.coerceBytes(ptr, TYPE, e.getSortOrder(), SortOrder.ASC); + return splitter.split(ptr); + } - return splitter.split(ptr); - } + private Expression getSourceStrExpression() { + return children.get(0); + } - private Expression getSourceStrExpression() { - return children.get(0); - } + private Expression getPatternStrExpression() { + return children.get(1); + } - private Expression getPatternStrExpression() { - return children.get(1); - } - - @Override - public PDataType getDataType() { - return PVarcharArray.INSTANCE; - } + @Override + public PDataType getDataType() { + return PVarcharArray.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java index a699a6be9aa..300bdfff85e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RegexpSubstrFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,165 +36,171 @@ import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarchar; - /** - * - * Implementation of {@code REGEXP_SUBSTR(, , ) } built-in function, - * where {@code } is the offset from the start of {@code }. Positive offset is treated as 1-based, - * a zero offset is treated as 0-based, and a negative offset starts from the end of the string - * working backwards. The {@code } is the pattern we would like to search for in the {@code } string. - * The function returns the first occurrence of any substring in the {@code } string that matches - * the {@code } input as a VARCHAR. - * - * + * Implementation of {@code REGEXP_SUBSTR(, , ) } built-in function, where + * {@code } is the offset from the start of {@code }. Positive offset is treated + * as 1-based, a zero offset is treated as 0-based, and a negative offset starts from the end of the + * string working backwards. The {@code } is the pattern we would like to search for in + * the {@code } string. The function returns the first occurrence of any substring in the + * {@code } string that matches the {@code } input as a VARCHAR. * @since 0.1 */ -@BuiltInFunction(name=RegexpSubstrFunction.NAME, - nodeClass = RegexpSubstrParseNode.class, args={ - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PLong.class}, defaultValue="1")}, - classType = FunctionParseNode.FunctionClassType.ABSTRACT, - derivedFunctions = {ByteBasedRegexpSubstrFunction.class, StringBasedRegexpSubstrFunction.class}) +@BuiltInFunction(name = RegexpSubstrFunction.NAME, nodeClass = RegexpSubstrParseNode.class, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PLong.class }, defaultValue = "1") }, + classType = FunctionParseNode.FunctionClassType.ABSTRACT, derivedFunctions = { + ByteBasedRegexpSubstrFunction.class, StringBasedRegexpSubstrFunction.class }) public abstract class RegexpSubstrFunction extends PrefixFunction { - public static final String NAME = "REGEXP_SUBSTR"; - - private AbstractBasePattern pattern; - private Integer offset; - private Integer maxLength; - - private static final PDataType TYPE = PVarchar.INSTANCE; - - public RegexpSubstrFunction() { } - - public RegexpSubstrFunction(List children) { - super(children); - init(); + public static final String NAME = "REGEXP_SUBSTR"; + + private AbstractBasePattern pattern; + private Integer offset; + private Integer maxLength; + + private static final PDataType TYPE = PVarchar.INSTANCE; + + public RegexpSubstrFunction() { + } + + public RegexpSubstrFunction(List children) { + super(children); + init(); + } + + protected abstract AbstractBasePattern compilePatternSpec(String value); + + private void init() { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + Expression patternExpr = getPatternExpression(); + if ( + patternExpr.isStateless() && patternExpr.getDeterminism() == Determinism.ALWAYS + && patternExpr.evaluate(null, ptr) + ) { + String patternStr = + (String) patternExpr.getDataType().toObject(ptr, patternExpr.getSortOrder()); + if (patternStr != null) { + pattern = compilePatternSpec(patternStr); + } } - - protected abstract AbstractBasePattern compilePatternSpec(String value); - - private void init() { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - Expression patternExpr = getPatternExpression(); - if (patternExpr.isStateless() && patternExpr.getDeterminism() == Determinism.ALWAYS && patternExpr.evaluate(null, ptr)) { - String patternStr = (String) patternExpr.getDataType().toObject(ptr, patternExpr.getSortOrder()); - if (patternStr != null) { - pattern = compilePatternSpec(patternStr); - } - } - // If the source string has a fixed width, then the max length would be the length - // of the source string minus the offset, or the absolute value of the offset if - // it's negative. Offset number is a required argument. However, if the source string - // is not fixed width, the maxLength would be null. - Expression offsetExpr = getOffsetExpression(); - if (offsetExpr.isStateless() && offsetExpr.getDeterminism() == Determinism.ALWAYS && offsetExpr.evaluate(null, ptr)) { - offset = (Integer)PInteger.INSTANCE.toObject(ptr, offsetExpr.getDataType(), offsetExpr.getSortOrder()); - if (offset != null) { - PDataType type = getSourceStrExpression().getDataType(); - if (type.isFixedWidth()) { - if (offset >= 0) { - Integer maxLength = getSourceStrExpression().getMaxLength(); - this.maxLength = maxLength - offset - (offset == 0 ? 0 : 1); - } else { - this.maxLength = -offset; - } - } - } + // If the source string has a fixed width, then the max length would be the length + // of the source string minus the offset, or the absolute value of the offset if + // it's negative. Offset number is a required argument. However, if the source string + // is not fixed width, the maxLength would be null. + Expression offsetExpr = getOffsetExpression(); + if ( + offsetExpr.isStateless() && offsetExpr.getDeterminism() == Determinism.ALWAYS + && offsetExpr.evaluate(null, ptr) + ) { + offset = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getDataType(), + offsetExpr.getSortOrder()); + if (offset != null) { + PDataType type = getSourceStrExpression().getDataType(); + if (type.isFixedWidth()) { + if (offset >= 0) { + Integer maxLength = getSourceStrExpression().getMaxLength(); + this.maxLength = maxLength - offset - (offset == 0 ? 0 : 1); + } else { + this.maxLength = -offset; + } } + } } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - AbstractBasePattern pattern = this.pattern; - if (pattern == null) { - Expression patternExpr = getPatternExpression(); - if (!patternExpr.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - pattern = compilePatternSpec((String) patternExpr.getDataType().toObject(ptr, patternExpr.getSortOrder())); - } - int offset; - if (this.offset == null) { - Expression offsetExpression = getOffsetExpression(); - if (!offsetExpression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - offset = offsetExpression.getDataType().getCodec().decodeInt(ptr, offsetExpression.getSortOrder()); - } else { - offset = this.offset; - } - Expression strExpression = getSourceStrExpression(); - if (!strExpression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.get().length == 0) { - return true; - } - - TYPE.coerceBytes(ptr, strExpression.getDataType(), strExpression.getSortOrder(), SortOrder.ASC); - - // Account for 1 versus 0-based offset - offset = offset - (offset <= 0 ? 0 : 1); - - pattern.substr(ptr, offset); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + AbstractBasePattern pattern = this.pattern; + if (pattern == null) { + Expression patternExpr = getPatternExpression(); + if (!patternExpr.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { return true; + } + pattern = compilePatternSpec( + (String) patternExpr.getDataType().toObject(ptr, patternExpr.getSortOrder())); } - - @Override - public Integer getMaxLength() { - return maxLength; + int offset; + if (this.offset == null) { + Expression offsetExpression = getOffsetExpression(); + if (!offsetExpression.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return true; + } + offset = + offsetExpression.getDataType().getCodec().decodeInt(ptr, offsetExpression.getSortOrder()); + } else { + offset = this.offset; } - - @Override - public OrderPreserving preservesOrder() { - if (offset != null) { - if (offset == 0 || offset == 1) { - return OrderPreserving.YES_IF_LAST; - } - } - return OrderPreserving.NO; + Expression strExpression = getSourceStrExpression(); + if (!strExpression.evaluate(tuple, ptr)) { + return false; } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); + if (ptr.get().length == 0) { + return true; } - @Override - public int getKeyFormationTraversalIndex() { - return preservesOrder() == OrderPreserving.NO ? NO_TRAVERSAL : 0; - } - - private Expression getOffsetExpression() { - return children.get(2); - } + TYPE.coerceBytes(ptr, strExpression.getDataType(), strExpression.getSortOrder(), SortOrder.ASC); - private Expression getPatternExpression() { - return children.get(1); - } + // Account for 1 versus 0-based offset + offset = offset - (offset <= 0 ? 0 : 1); - private Expression getSourceStrExpression() { - return children.get(0); - } + pattern.substr(ptr, offset); + return true; + } - @Override - public PDataType getDataType() { - // ALways VARCHAR since we do not know in advanced how long the - // matched string will be. - return TYPE; - } + @Override + public Integer getMaxLength() { + return maxLength; + } - @Override - public String getName() { - return NAME; + @Override + public OrderPreserving preservesOrder() { + if (offset != null) { + if (offset == 0 || offset == 1) { + return OrderPreserving.YES_IF_LAST; + } } + return OrderPreserving.NO; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } + + @Override + public int getKeyFormationTraversalIndex() { + return preservesOrder() == OrderPreserving.NO ? NO_TRAVERSAL : 0; + } + + private Expression getOffsetExpression() { + return children.get(2); + } + + private Expression getPatternExpression() { + return children.get(1); + } + + private Expression getSourceStrExpression() { + return children.get(0); + } + + @Override + public PDataType getDataType() { + // ALways VARCHAR since we do not know in advanced how long the + // matched string will be. + return TYPE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ReverseFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ReverseFunction.java index dd1f3a8d65c..10cc83c2c3d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ReverseFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ReverseFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,75 +15,73 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.function; import java.sql.SQLException; import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.StringUtil; -@BuiltInFunction(name=ReverseFunction.NAME, args={ - @Argument(allowedTypes={PVarchar.class})} ) +@BuiltInFunction(name = ReverseFunction.NAME, + args = { @Argument(allowedTypes = { PVarchar.class }) }) public class ReverseFunction extends ScalarFunction { - public static final String NAME = "REVERSE"; - - public ReverseFunction() { - } - - public ReverseFunction(List children) throws SQLException { - super(children); - } + public static final String NAME = "REVERSE"; - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression arg = getChildren().get(0); - if (!arg.evaluate(tuple, ptr)) { - return false; - } + public ReverseFunction() { + } - int targetOffset = ptr.getLength(); - if (targetOffset == 0) { - return true; - } + public ReverseFunction(List children) throws SQLException { + super(children); + } - byte[] source = ptr.get(); - byte[] target = new byte[targetOffset]; - int sourceOffset = ptr.getOffset(); - int endOffset = sourceOffset + ptr.getLength(); - SortOrder sortOrder = arg.getSortOrder(); - while (sourceOffset < endOffset) { - int nBytes = StringUtil.getBytesInChar(source[sourceOffset], sortOrder); - targetOffset -= nBytes; - System.arraycopy(source, sourceOffset, target, targetOffset, nBytes); - sourceOffset += nBytes; - } - ptr.set(target); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression arg = getChildren().get(0); + if (!arg.evaluate(tuple, ptr)) { + return false; } - @Override - public SortOrder getSortOrder() { - return getChildren().get(0).getSortOrder(); + int targetOffset = ptr.getLength(); + if (targetOffset == 0) { + return true; } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + byte[] source = ptr.get(); + byte[] target = new byte[targetOffset]; + int sourceOffset = ptr.getOffset(); + int endOffset = sourceOffset + ptr.getLength(); + SortOrder sortOrder = arg.getSortOrder(); + while (sourceOffset < endOffset) { + int nBytes = StringUtil.getBytesInChar(source[sourceOffset], sortOrder); + targetOffset -= nBytes; + System.arraycopy(source, sourceOffset, target, targetOffset, nBytes); + sourceOffset += nBytes; } + ptr.set(target); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public SortOrder getSortOrder() { + return getChildren().get(0).getSortOrder(); + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundDateExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundDateExpression.java index 6b8271a111f..cdc2c156978 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundDateExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundDateExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -52,323 +52,315 @@ import org.apache.phoenix.util.ByteUtil; /** - * Function used to bucketize date/time values by rounding them to - * an even increment. Usage: + * Function used to bucketize date/time values by rounding them to an even increment. Usage: * {@code ROUND(,<'day'|'hour'|'minute'|'second'|'millisecond'|'week'|'month'|'year'>,) } - * The integer multiplier is optional and is used to do rollups to a partial time unit (i.e. 10 minute rollup) - * The function returns a {@link org.apache.phoenix.schema.types.PDate} - - * + * The integer multiplier is optional and is used to do rollups to a partial time unit (i.e. 10 + * minute rollup) The function returns a {@link org.apache.phoenix.schema.types.PDate} * @since 0.1 */ @BuiltInFunction(name = RoundFunction.NAME, - args = { - @Argument(allowedTypes={PDate.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionClassType.DERIVED -) + args = { @Argument(allowedTypes = { PDate.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionClassType.DERIVED) public class RoundDateExpression extends ScalarFunction { - - long divBy; - - public static final String NAME = "ROUND"; - - private static final long[] TIME_UNIT_MS = new long[] { - 24 * 60 * 60 * 1000, - 60 * 60 * 1000, - 60 * 1000, - 1000, - 1 - }; - - public RoundDateExpression() {} - - /** - * @param timeUnit - unit of time to round up to. - * Creates a {@link RoundDateExpression} with default multiplier of 1. - */ - public static Expression create(Expression expr, TimeUnit timeUnit) throws SQLException { - return create(expr, timeUnit, 1); - } - - /** - * @param timeUnit - unit of time to round up to - * @param multiplier - determines the roll up window size. - * Create a {@link RoundDateExpression}. - */ - public static Expression create(Expression expr, TimeUnit timeUnit, int multiplier) throws SQLException { - Expression timeUnitExpr = getTimeUnitExpr(timeUnit); - Expression defaultMultiplierExpr = getMultiplierExpr(multiplier); - List expressions = Lists.newArrayList(expr, timeUnitExpr, defaultMultiplierExpr); - return create(expressions); - } - - public static Expression create(List children) throws SQLException { - int numChildren = children.size(); - if (numChildren < 2 || numChildren > 3) { - throw new IllegalArgumentException("Wrong number of arguments : " + numChildren); - } - Object timeUnitValue = ((LiteralExpression)children.get(1)).getValue(); - TimeUnit timeUnit = TimeUnit.getTimeUnit(timeUnitValue != null ? timeUnitValue.toString() : null); - switch(timeUnit) { - case WEEK: - return new RoundWeekExpression(children); - case MONTH: - return new RoundMonthExpression(children); - case YEAR: - return new RoundYearExpression(children); - default: - return new RoundDateExpression(children); - } - - } - - static Expression getTimeUnitExpr(TimeUnit timeUnit) throws SQLException { - return LiteralExpression.newConstant(timeUnit.name(), PVarchar.INSTANCE, Determinism.ALWAYS); - } - - static Expression getMultiplierExpr(int multiplier) throws SQLException { - return LiteralExpression.newConstant(multiplier, PInteger.INSTANCE, Determinism.ALWAYS); - } - - public RoundDateExpression(List children) { - super(children.subList(0, 1)); - int numChildren = children.size(); - Object timeUnitValue = ((LiteralExpression)children.get(1)).getValue(); - Object multiplierValue = numChildren > 2 ? ((LiteralExpression)children.get(2)).getValue() : null; - int multiplier = multiplierValue == null ? 1 :((Number)multiplierValue).intValue(); - TimeUnit timeUnit = TimeUnit.getTimeUnit(timeUnitValue != null ? timeUnitValue.toString() : null); - if (timeUnit.ordinal() < TIME_UNIT_MS.length) { - divBy = multiplier * TIME_UNIT_MS[timeUnit.ordinal()]; - } - } - - - protected long getRoundUpAmount() { - return divBy/2; - } - - @VisibleForTesting - public long roundTime(long time) { - long value; - long roundUpAmount = getRoundUpAmount(); - if (time <= Long.MAX_VALUE - roundUpAmount) { // If no overflow, add - value = (time + roundUpAmount) / divBy; - } else { // Else subtract and add one - value = (time - roundUpAmount) / divBy + 1; - } - return value * divBy; - } - @VisibleForTesting - public long rangeLower(long time) { - // This is for the ms based intervals. This needs to be separately implemented for the - // joda based intervals - return roundTime(time) - getRoundUpAmount(); - } + long divBy; - @VisibleForTesting - public long rangeUpper(long time) { - // This is for the ms based intervals. This needs to be separately implemented for the - // joda based intervals - return roundTime(time) + (divBy - getRoundUpAmount()) - 1; - } + public static final String NAME = "ROUND"; - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (children.get(0).evaluate(tuple, ptr)) { - if (ptr.getLength() == 0) { - return true; // child evaluated to null - } - PDataType dataType = getDataType(); - long time = dataType.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); - long value = roundTime(time); - Date d = new Date(value); - byte[] byteValue = dataType.toBytes(d); - ptr.set(byteValue); - return true; - } - return false; - } + private static final long[] TIME_UNIT_MS = + new long[] { 24 * 60 * 60 * 1000, 60 * 60 * 1000, 60 * 1000, 1000, 1 }; - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - long roundUpAmount = this.getRoundUpAmount(); - result = prime * result + (int)(divBy ^ (divBy >>> 32)); - result = prime * result + (int)(roundUpAmount ^ (roundUpAmount >>> 32)); - result = prime * result + children.get(0).hashCode(); - return result; - } + public RoundDateExpression() { + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - RoundDateExpression other = (RoundDateExpression)obj; - if (divBy != other.divBy) return false; - if (getRoundUpAmount() != other.getRoundUpAmount()) return false; - return children.get(0).equals(other.children.get(0)); - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - divBy = WritableUtils.readVLong(input); - } + /** + * @param timeUnit - unit of time to round up to. Creates a {@link RoundDateExpression} with + * default multiplier of 1. + */ + public static Expression create(Expression expr, TimeUnit timeUnit) throws SQLException { + return create(expr, timeUnit, 1); + } + + /** + * @param timeUnit - unit of time to round up to + * @param multiplier - determines the roll up window size. Create a {@link RoundDateExpression}. + */ + public static Expression create(Expression expr, TimeUnit timeUnit, int multiplier) + throws SQLException { + Expression timeUnitExpr = getTimeUnitExpr(timeUnit); + Expression defaultMultiplierExpr = getMultiplierExpr(multiplier); + List expressions = Lists.newArrayList(expr, timeUnitExpr, defaultMultiplierExpr); + return create(expressions); + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeVLong(output, divBy); + public static Expression create(List children) throws SQLException { + int numChildren = children.size(); + if (numChildren < 2 || numChildren > 3) { + throw new IllegalArgumentException("Wrong number of arguments : " + numChildren); } - - @Override - public Integer getMaxLength() { - return children.get(0).getMaxLength(); + Object timeUnitValue = ((LiteralExpression) children.get(1)).getValue(); + TimeUnit timeUnit = + TimeUnit.getTimeUnit(timeUnitValue != null ? timeUnitValue.toString() : null); + switch (timeUnit) { + case WEEK: + return new RoundWeekExpression(children); + case MONTH: + return new RoundMonthExpression(children); + case YEAR: + return new RoundYearExpression(children); + default: + return new RoundDateExpression(children); } - - @Override - public PDataType getDataType() { - return children.get(0).getDataType(); + + } + + static Expression getTimeUnitExpr(TimeUnit timeUnit) throws SQLException { + return LiteralExpression.newConstant(timeUnit.name(), PVarchar.INSTANCE, Determinism.ALWAYS); + } + + static Expression getMultiplierExpr(int multiplier) throws SQLException { + return LiteralExpression.newConstant(multiplier, PInteger.INSTANCE, Determinism.ALWAYS); + } + + public RoundDateExpression(List children) { + super(children.subList(0, 1)); + int numChildren = children.size(); + Object timeUnitValue = ((LiteralExpression) children.get(1)).getValue(); + Object multiplierValue = + numChildren > 2 ? ((LiteralExpression) children.get(2)).getValue() : null; + int multiplier = multiplierValue == null ? 1 : ((Number) multiplierValue).intValue(); + TimeUnit timeUnit = + TimeUnit.getTimeUnit(timeUnitValue != null ? timeUnitValue.toString() : null); + if (timeUnit.ordinal() < TIME_UNIT_MS.length) { + divBy = multiplier * TIME_UNIT_MS[timeUnit.ordinal()]; } - - @Override - public boolean isNullable() { - return children.get(0).isNullable() || divBy == 0; + } + + protected long getRoundUpAmount() { + return divBy / 2; + } + + @VisibleForTesting + public long roundTime(long time) { + long value; + long roundUpAmount = getRoundUpAmount(); + if (time <= Long.MAX_VALUE - roundUpAmount) { // If no overflow, add + value = (time + roundUpAmount) / divBy; + } else { // Else subtract and add one + value = (time - roundUpAmount) / divBy + 1; } - - protected PDataCodec getKeyRangeCodec(PDataType columnDataType) { - return columnDataType.getCodec(); + return value * divBy; + } + + @VisibleForTesting + public long rangeLower(long time) { + // This is for the ms based intervals. This needs to be separately implemented for the + // joda based intervals + return roundTime(time) - getRoundUpAmount(); + } + + @VisibleForTesting + public long rangeUpper(long time) { + // This is for the ms based intervals. This needs to be separately implemented for the + // joda based intervals + return roundTime(time) + (divBy - getRoundUpAmount()) - 1; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (children.get(0).evaluate(tuple, ptr)) { + if (ptr.getLength() == 0) { + return true; // child evaluated to null + } + PDataType dataType = getDataType(); + long time = dataType.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); + long value = roundTime(time); + Date d = new Date(value); + byte[] byteValue = dataType.toBytes(d); + ptr.set(byteValue); + return true; } - - /** - * Form the key range from the key to the key right before or at the - * next rounded value. - */ - @Override - public KeyPart newKeyPart(final KeyPart childPart) { - return new KeyPart() { - private final Set extractNodes = new LinkedHashSet<>(Collections.singleton(RoundDateExpression.this)); - - @Override - public PColumn getColumn() { - return childPart.getColumn(); - } + return false; + } - @Override - public Set getExtractNodes() { - return extractNodes; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long roundUpAmount = this.getRoundUpAmount(); + result = prime * result + (int) (divBy ^ (divBy >>> 32)); + result = prime * result + (int) (roundUpAmount ^ (roundUpAmount >>> 32)); + result = prime * result + children.get(0).hashCode(); + return result; + } - @Override - public KeyRange getKeyRange(CompareOperator op, Expression rhs) { - PDataType type = getColumn().getDataType(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - rhs.evaluate(null, ptr); - byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr); - // No need to take into account SortOrder, because ROUND - // always forces the value to be in ascending order - PDataCodec codec = getKeyRangeCodec(type); - int offset = ByteUtil.isInclusive(op) ? 1 : 0; - long value = codec.decodeLong(key, 0, SortOrder.getDefault()); - byte[] lowerKey = new byte[type.getByteSize()]; - byte[] upperKey = new byte[type.getByteSize()]; - SortOrder order = this.getColumn().getSortOrder(); - KeyRange range; - switch (op) { - case EQUAL: - // If the value isn't evenly divisible by the div amount, then it - // can't possibly be equal to any rounded value. For example, if you - // had ROUND(dateCol,'DAY') = TO_DATE('2013-01-01 23:00:00') - // it could never be equal, since date constant isn't at a day - // boundary. - if (value != roundTime(value)) { - return KeyRange.EMPTY_RANGE; - } - codec.encodeLong(rangeLower(value), lowerKey, 0); - codec.encodeLong(rangeUpper(value), upperKey, 0); - range = type.getKeyRange(lowerKey, true, upperKey, true, order); - break; - // a simple number example (with half up rounding): - // round(x) = 10 ==> [9.5, 10.5) - // round(x) <= 10 ==> [-inf, 10.5) - // round(x) <= 10.1 === round(x) <= 10 => [-inf, 10.5) - // round(x) <= 9.9 === round(x) <= 9 => [-inf, 9.5) - // round(x) < 10 ==> round(x) <= 9 ==> [-inf, 9.5) - case GREATER: - if (value == roundTime(value)) { - codec.encodeLong(rangeUpper(value), lowerKey, 0); - range = type.getKeyRange(lowerKey, false, KeyRange.UNBOUND, false, order); - break; - } - //fallthrough intended - case GREATER_OR_EQUAL: - codec.encodeLong(rangeLower(value), lowerKey, 0); - range = type.getKeyRange(lowerKey, true, KeyRange.UNBOUND, false, order); - if (value <= roundTime(value)) { - //always true for ceil - codec.encodeLong(rangeLower(value), lowerKey, 0); - range = type.getKeyRange(lowerKey, true, KeyRange.UNBOUND, false, order); - } else { - //always true for floor, except when exact - codec.encodeLong(rangeUpper(value), lowerKey, 0); - range = type.getKeyRange(lowerKey, false, KeyRange.UNBOUND, false, order); - } - break; - case LESS: - if (value == roundTime(value)) { - codec.encodeLong(rangeLower(value), upperKey, 0); - range = type.getKeyRange(KeyRange.UNBOUND, false, upperKey, false, order); - break; - } - //fallthrough intended - case LESS_OR_EQUAL: - codec.encodeLong(rangeUpper(value), upperKey, 0); - range = type.getKeyRange(KeyRange.UNBOUND, false, upperKey, true, order); - if (value >= roundTime(value)) { - //always true for floor - codec.encodeLong(rangeUpper(value), upperKey, 0); - range = type.getKeyRange(KeyRange.UNBOUND, false, upperKey, true, order); - } else { - //always true for ceil, except when exact - codec.encodeLong(rangeLower(value), upperKey, 0); - range = type.getKeyRange(KeyRange.UNBOUND, false, upperKey, false, order); - } - break; - default: - return childPart.getKeyRange(op, rhs); - } - if (getColumn().getSortOrder() == SortOrder.DESC) { - range = range.invert(); - } - return range; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + RoundDateExpression other = (RoundDateExpression) obj; + if (divBy != other.divBy) return false; + if (getRoundUpAmount() != other.getRoundUpAmount()) return false; + return children.get(0).equals(other.children.get(0)); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + divBy = WritableUtils.readVLong(input); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeVLong(output, divBy); + } + + @Override + public Integer getMaxLength() { + return children.get(0).getMaxLength(); + } + + @Override + public PDataType getDataType() { + return children.get(0).getDataType(); + } + + @Override + public boolean isNullable() { + return children.get(0).isNullable() || divBy == 0; + } + + protected PDataCodec getKeyRangeCodec(PDataType columnDataType) { + return columnDataType.getCodec(); + } + + /** + * Form the key range from the key to the key right before or at the next rounded value. + */ + @Override + public KeyPart newKeyPart(final KeyPart childPart) { + return new KeyPart() { + private final Set extractNodes = + new LinkedHashSet<>(Collections. singleton(RoundDateExpression.this)); - @Override - public PTable getTable() { - return childPart.getTable(); + @Override + public PColumn getColumn() { + return childPart.getColumn(); + } + + @Override + public Set getExtractNodes() { + return extractNodes; + } + + @Override + public KeyRange getKeyRange(CompareOperator op, Expression rhs) { + PDataType type = getColumn().getDataType(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + rhs.evaluate(null, ptr); + byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr); + // No need to take into account SortOrder, because ROUND + // always forces the value to be in ascending order + PDataCodec codec = getKeyRangeCodec(type); + int offset = ByteUtil.isInclusive(op) ? 1 : 0; + long value = codec.decodeLong(key, 0, SortOrder.getDefault()); + byte[] lowerKey = new byte[type.getByteSize()]; + byte[] upperKey = new byte[type.getByteSize()]; + SortOrder order = this.getColumn().getSortOrder(); + KeyRange range; + switch (op) { + case EQUAL: + // If the value isn't evenly divisible by the div amount, then it + // can't possibly be equal to any rounded value. For example, if you + // had ROUND(dateCol,'DAY') = TO_DATE('2013-01-01 23:00:00') + // it could never be equal, since date constant isn't at a day + // boundary. + if (value != roundTime(value)) { + return KeyRange.EMPTY_RANGE; } - }; - } + codec.encodeLong(rangeLower(value), lowerKey, 0); + codec.encodeLong(rangeUpper(value), upperKey, 0); + range = type.getKeyRange(lowerKey, true, upperKey, true, order); + break; + // a simple number example (with half up rounding): + // round(x) = 10 ==> [9.5, 10.5) + // round(x) <= 10 ==> [-inf, 10.5) + // round(x) <= 10.1 === round(x) <= 10 => [-inf, 10.5) + // round(x) <= 9.9 === round(x) <= 9 => [-inf, 9.5) + // round(x) < 10 ==> round(x) <= 9 ==> [-inf, 9.5) + case GREATER: + if (value == roundTime(value)) { + codec.encodeLong(rangeUpper(value), lowerKey, 0); + range = type.getKeyRange(lowerKey, false, KeyRange.UNBOUND, false, order); + break; + } + // fallthrough intended + case GREATER_OR_EQUAL: + codec.encodeLong(rangeLower(value), lowerKey, 0); + range = type.getKeyRange(lowerKey, true, KeyRange.UNBOUND, false, order); + if (value <= roundTime(value)) { + // always true for ceil + codec.encodeLong(rangeLower(value), lowerKey, 0); + range = type.getKeyRange(lowerKey, true, KeyRange.UNBOUND, false, order); + } else { + // always true for floor, except when exact + codec.encodeLong(rangeUpper(value), lowerKey, 0); + range = type.getKeyRange(lowerKey, false, KeyRange.UNBOUND, false, order); + } + break; + case LESS: + if (value == roundTime(value)) { + codec.encodeLong(rangeLower(value), upperKey, 0); + range = type.getKeyRange(KeyRange.UNBOUND, false, upperKey, false, order); + break; + } + // fallthrough intended + case LESS_OR_EQUAL: + codec.encodeLong(rangeUpper(value), upperKey, 0); + range = type.getKeyRange(KeyRange.UNBOUND, false, upperKey, true, order); + if (value >= roundTime(value)) { + // always true for floor + codec.encodeLong(rangeUpper(value), upperKey, 0); + range = type.getKeyRange(KeyRange.UNBOUND, false, upperKey, true, order); + } else { + // always true for ceil, except when exact + codec.encodeLong(rangeLower(value), upperKey, 0); + range = type.getKeyRange(KeyRange.UNBOUND, false, upperKey, false, order); + } + break; + default: + return childPart.getKeyRange(op, rhs); + } + if (getColumn().getSortOrder() == SortOrder.DESC) { + range = range.invert(); + } + return range; + } + @Override + public PTable getTable() { + return childPart.getTable(); + } + }; + } - @Override - public String getName() { - return NAME; - } - - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public String getName() { + return NAME; + } - @Override - public int getKeyFormationTraversalIndex() { - return 0; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } + + @Override + public int getKeyFormationTraversalIndex() { + return 0; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundDecimalExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundDecimalExpression.java index 563ec542669..1e4f906223c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundDecimalExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundDecimalExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -49,346 +49,346 @@ import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarchar; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * * Class encapsulating the process for rounding off a column/literal of type * {@link org.apache.phoenix.schema.types.PDecimal} - * - * * @since 3.0.0 */ @BuiltInFunction(name = RoundFunction.NAME, - args = { - @Argument(allowedTypes={PDecimal.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionClassType.DERIVED -) + args = { @Argument(allowedTypes = { PDecimal.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionClassType.DERIVED) public class RoundDecimalExpression extends ScalarFunction { - private int scale; + private int scale; - /** - * Creates a {@link RoundDecimalExpression} with rounding scale given by @param scale. - * - */ - public static Expression create(Expression expr, int scale) throws SQLException { - if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { - return expr; - } - Expression scaleExpr = LiteralExpression.newConstant(scale, PInteger.INSTANCE, Determinism.ALWAYS); - List expressions = Lists.newArrayList(expr, scaleExpr); - return new RoundDecimalExpression(expressions); + /** + * Creates a {@link RoundDecimalExpression} with rounding scale given by @param scale. + */ + public static Expression create(Expression expr, int scale) throws SQLException { + if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { + return expr; } + Expression scaleExpr = + LiteralExpression.newConstant(scale, PInteger.INSTANCE, Determinism.ALWAYS); + List expressions = Lists.newArrayList(expr, scaleExpr); + return new RoundDecimalExpression(expressions); + } - /** - * Creates a {@link RoundDecimalExpression} with a default scale of 0 used for rounding. - * - */ - public static Expression create(Expression expr) throws SQLException { - return create(expr, 0); - } + /** + * Creates a {@link RoundDecimalExpression} with a default scale of 0 used for rounding. + */ + public static Expression create(Expression expr) throws SQLException { + return create(expr, 0); + } - public static Expression create(List exprs) throws SQLException { - Expression expr = exprs.get(0); - if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { - return expr; - } - if (exprs.size() == 1) { - Expression scaleExpr = LiteralExpression.newConstant(0, PInteger.INSTANCE, Determinism.ALWAYS); - exprs = Lists.newArrayList(expr, scaleExpr); - } - return new RoundDecimalExpression(exprs); + public static Expression create(List exprs) throws SQLException { + Expression expr = exprs.get(0); + if (expr.getDataType().isCoercibleTo(PLong.INSTANCE)) { + return expr; } - - public RoundDecimalExpression() {} - - public RoundDecimalExpression(List children) { - super(children); - LiteralExpression scaleChild = (LiteralExpression)children.get(1); - PDataType scaleType = scaleChild.getDataType(); - Object scaleValue = scaleChild.getValue(); - if(scaleValue != null) { - if (scaleType.isCoercibleTo(PInteger.INSTANCE, scaleValue)) { - int scale = (Integer) PInteger.INSTANCE.toObject(scaleValue, scaleType); - if (scale <= PDataType.MAX_PRECISION) { - this.scale = scale; - return; - } - } - throw new IllegalDataException("Invalid second argument for scale: " + scaleValue + ". The scale must be between 0 and " + PDataType.MAX_PRECISION + " inclusive."); - } + if (exprs.size() == 1) { + Expression scaleExpr = + LiteralExpression.newConstant(0, PInteger.INSTANCE, Determinism.ALWAYS); + exprs = Lists.newArrayList(expr, scaleExpr); } + return new RoundDecimalExpression(exprs); + } + + public RoundDecimalExpression() { + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression childExpr = children.get(0); - if(childExpr.evaluate(tuple, ptr)) { - if (ptr.getLength()==0) { - return true; - } - BigDecimal value = (BigDecimal) PDecimal.INSTANCE.toObject(ptr, childExpr.getDataType(), childExpr.getSortOrder()); - BigDecimal scaledValue = value.setScale(scale, getRoundingMode()); - ptr.set(PDecimal.INSTANCE.toBytes(scaledValue)); - return true; + public RoundDecimalExpression(List children) { + super(children); + LiteralExpression scaleChild = (LiteralExpression) children.get(1); + PDataType scaleType = scaleChild.getDataType(); + Object scaleValue = scaleChild.getValue(); + if (scaleValue != null) { + if (scaleType.isCoercibleTo(PInteger.INSTANCE, scaleValue)) { + int scale = (Integer) PInteger.INSTANCE.toObject(scaleValue, scaleType); + if (scale <= PDataType.MAX_PRECISION) { + this.scale = scale; + return; } - return false; + } + throw new IllegalDataException("Invalid second argument for scale: " + scaleValue + + ". The scale must be between 0 and " + PDataType.MAX_PRECISION + " inclusive."); } + } - @Override - public PDataType getDataType() { - return PDecimal.INSTANCE; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression childExpr = children.get(0); + if (childExpr.evaluate(tuple, ptr)) { + if (ptr.getLength() == 0) { + return true; + } + BigDecimal value = (BigDecimal) PDecimal.INSTANCE.toObject(ptr, childExpr.getDataType(), + childExpr.getSortOrder()); + BigDecimal scaledValue = value.setScale(scale, getRoundingMode()); + ptr.set(PDecimal.INSTANCE.toBytes(scaledValue)); + return true; } + return false; + } - protected RoundingMode getRoundingMode() { - return RoundingMode.HALF_UP; - } + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } - protected final int getRoundingScale() { - return scale; - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - scale = WritableUtils.readVInt(input); - } + protected RoundingMode getRoundingMode() { + return RoundingMode.HALF_UP; + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeVInt(output, scale); - } + protected final int getRoundingScale() { + return scale; + } - @Override - public String getName() { - return RoundFunction.NAME; - } + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + scale = WritableUtils.readVInt(input); + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeVInt(output, scale); + } - @Override - public int getKeyFormationTraversalIndex() { - return 0; - } + @Override + public String getName() { + return RoundFunction.NAME; + } - @Override - public KeyPart newKeyPart(final KeyPart childPart) { - return new KeyPart() { - private final Set extractNodes = new LinkedHashSet<>(Collections.singleton(RoundDecimalExpression.this)); - - @Override - public PColumn getColumn() { - return childPart.getColumn(); - } - - @Override - public Set getExtractNodes() { - return extractNodes; - } - - @Override - public KeyRange getKeyRange(CompareOperator op, Expression rhs) { - final BigDecimal rhsDecimal = (BigDecimal) PDecimal.INSTANCE.toObject(evaluateExpression(rhs)); - - // equality requires an exact match. if rounding would cut off more precision - // than needed for a match, it's impossible for there to be any matches - if(op == CompareOperator.EQUAL && !hasEnoughPrecisionToProduce(rhsDecimal)) { - return KeyRange.EMPTY_RANGE; - } - - // if the decimal needs to be rounded, round it such that the given - // operator will still be valid - BigDecimal roundedDecimal = roundAndPreserveOperator(rhsDecimal, op); - - // the range of big decimals that could be rounded to produce the rounded result - // alternatively, the "rounding bucket" that this decimal falls into - final KeyRange equalityRange = getInputRangeProducing(roundedDecimal); - boolean lowerInclusive = equalityRange.isLowerInclusive(); - boolean upperInclusive = equalityRange.isUpperInclusive(); - byte[] lowerRange = KeyRange.UNBOUND; - byte[] upperRange = KeyRange.UNBOUND; - - switch(op) { - case EQUAL: - return equalityRange; - case GREATER: - // from the equality range and up, NOT including the equality range - lowerRange = equalityRange.getUpperRange(); - lowerInclusive = !equalityRange.isUpperInclusive(); - break; - case GREATER_OR_EQUAL: - // from the equality range and up, including the equality range - lowerRange = equalityRange.getLowerRange(); - break; - case LESS: - // from the equality range and down, NOT including the equality range - upperRange = equalityRange.getLowerRange(); - upperInclusive = !equalityRange.isLowerInclusive(); - break; - case LESS_OR_EQUAL: - // from the equality range and down, including the equality range - upperRange = equalityRange.getUpperRange(); - break; - default: - throw new AssertionError("Invalid CompareOp: " + op); - } - - KeyRange range = KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); - if (getColumn().getSortOrder() == SortOrder.DESC) { - range = range.invert(); - } - return range; - } - - /** - * Produces a the given decimal rounded to this rounding expression's scale. If the - * decimal requires more scale precision to produce than this expression has, as in - * ROUND(?, 2) > 2.0098974, it ensures that the decimal is rounded such that the - * given operator will still produce correct results. - * @param decimal the decimal to round with this expression's scale - * @param op the operator to preserve comparison with in the event of lost precision - * @return the rounded decimal - */ - private BigDecimal roundAndPreserveOperator(BigDecimal decimal, CompareOperator op) { - final BigDecimal rounded = roundToScale(decimal); - - // if we lost information, make sure that the rounding didn't break the operator - if(!hasEnoughPrecisionToProduce(decimal)) { - switch(op) { - case GREATER_OR_EQUAL: - // e.g. 'ROUND(dec, 2) >= 2.013' would be converted to - // 'ROUND(dec, 2) >= 2.01' but should be 'ROUND(dec, 2) >= 2.02' - if(decimal.compareTo(rounded) > 0) { - return stepNextInScale(rounded); - } - break; - case GREATER: - // e.g. 'ROUND(dec, 2) > 2.017' would be converted to - // 'ROUND(dec, 2) > 2.02' but should be 'ROUND(dec, 2) > 2.01' - if(decimal.compareTo(rounded) < 0) { - return stepPrevInScale(rounded); - } - break; - case LESS_OR_EQUAL: - // e.g. 'ROUND(dec, 2) < 2.017' would be converted to - // 'ROUND(dec, 2) < 2.02' but should be 'ROUND(dec, 2) < 2.01' - if(decimal.compareTo(rounded) < 0) { - return stepPrevInScale(rounded); - } - break; - case LESS: - // e.g. 'ROUND(dec, 2) <= 2.013' would be converted to - // 'ROUND(dec, 2) <= 2.01' but should be 'ROUND(dec, 2) <= 2.02' - if(decimal.compareTo(rounded) > 0) { - return stepNextInScale(rounded); - } - break; - } - } - - // otherwise, rounding has not affected the operator, so return normally - return rounded; - } - - @Override - public PTable getTable() { - return childPart.getTable(); - } - }; - } - - /** - * Finds the Decimal KeyRange that will produce the given result when fed into this - * rounding expression. For example, a ROUND expression with scale 2 will produce the - * result "2.05" with any decimal in the range [2.045, 2.0545). - * The result must be pre-rounded to within this rounding expression's scale. - * @param result the result to find an input range for. Must be producable. - * @return a KeyRange of DECIMAL keys that can be rounded by this expression to produce result - * @throws IllegalArgumentException if the result has more scale than this expression can produce - */ - protected KeyRange getInputRangeProducing(BigDecimal result) { - if(!hasEnoughPrecisionToProduce(result)) { - throw new IllegalArgumentException("Cannot produce input range for decimal " + result - + ", not enough precision with scale " + getRoundingScale()); + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } + + @Override + public int getKeyFormationTraversalIndex() { + return 0; + } + + @Override + public KeyPart newKeyPart(final KeyPart childPart) { + return new KeyPart() { + private final Set extractNodes = + new LinkedHashSet<>(Collections. singleton(RoundDecimalExpression.this)); + + @Override + public PColumn getColumn() { + return childPart.getColumn(); + } + + @Override + public Set getExtractNodes() { + return extractNodes; + } + + @Override + public KeyRange getKeyRange(CompareOperator op, Expression rhs) { + final BigDecimal rhsDecimal = + (BigDecimal) PDecimal.INSTANCE.toObject(evaluateExpression(rhs)); + + // equality requires an exact match. if rounding would cut off more precision + // than needed for a match, it's impossible for there to be any matches + if (op == CompareOperator.EQUAL && !hasEnoughPrecisionToProduce(rhsDecimal)) { + return KeyRange.EMPTY_RANGE; } - byte[] lowerRange = PDecimal.INSTANCE.toBytes(halfStepPrevInScale(result)); - byte[] upperRange = PDecimal.INSTANCE.toBytes(halfStepNextInScale(result)); - // inclusiveness changes depending on sign - // e.g. -0.5 rounds "up" to -1 even though it is the lower boundary - boolean lowerInclusive = result.signum() > 0; - boolean upperInclusive = result.signum() < 0; - return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); - } - - /** - * Determines whether this rounding expression's scale has enough precision to produce the - * minimum precision for the input decimal. In other words, determines whether the given - * decimal can be rounded to this scale without losing ordering information. - * For example, an expression with a scale of 2 has enough precision to produce "2.3", "2.71" - * and "2.100000", but does not have enough precision to produce "2.001" - * @param result the decimal to round - * @return true if the given decimal can be precisely matched by this rounding expression - */ - protected final boolean hasEnoughPrecisionToProduce(BigDecimal result) { - // use compareTo so that 2.0 and 2.00 are treated as "equal" - return roundToScale(result).compareTo(result) == 0; - } - - /** - * Returns the given decimal rounded to this rounding expression's scale. - * For example, with scale 2 the decimal "2.453" would be rounded to either 2.45 or - * 2.46 depending on the rounding mode, while "2.38" and "2.7" would be unchanged. - * @param decimal the decimal to round - * @return the rounded result decimal - */ - protected final BigDecimal roundToScale(BigDecimal decimal) { - return decimal.setScale(getRoundingScale(), getRoundingMode()); - } - - /** - * Produces a value half of a "step" back in this expression's rounding scale. - * For example with a scale of 2, "2.5" would be stepped back to "2.495". - */ - protected final BigDecimal halfStepPrevInScale(BigDecimal decimal) { - BigDecimal step = BigDecimal.ONE.scaleByPowerOfTen(-getRoundingScale()); - BigDecimal halfStep = step.divide(BigDecimal.valueOf(2)); - return decimal.subtract(halfStep); - } - - /** - * Produces a value half of a "step" forward in this expression's rounding scale. - * For example with a scale of 2, "2.5" would be stepped forward to "2.505". - */ - protected final BigDecimal halfStepNextInScale(BigDecimal decimal) { - BigDecimal step = BigDecimal.ONE.scaleByPowerOfTen(-getRoundingScale()); - BigDecimal halfStep = step.divide(BigDecimal.valueOf(2)); - return decimal.add(halfStep); - } - /** - * Produces a value one "step" back in this expression's rounding scale. - * For example with a scale of 2, "2.5" would be stepped back to "2.49". - */ - protected final BigDecimal stepPrevInScale(BigDecimal decimal) { - BigDecimal step = BigDecimal.ONE.scaleByPowerOfTen(-getRoundingScale()); - return decimal.subtract(step); - } + // if the decimal needs to be rounded, round it such that the given + // operator will still be valid + BigDecimal roundedDecimal = roundAndPreserveOperator(rhsDecimal, op); + + // the range of big decimals that could be rounded to produce the rounded result + // alternatively, the "rounding bucket" that this decimal falls into + final KeyRange equalityRange = getInputRangeProducing(roundedDecimal); + boolean lowerInclusive = equalityRange.isLowerInclusive(); + boolean upperInclusive = equalityRange.isUpperInclusive(); + byte[] lowerRange = KeyRange.UNBOUND; + byte[] upperRange = KeyRange.UNBOUND; + + switch (op) { + case EQUAL: + return equalityRange; + case GREATER: + // from the equality range and up, NOT including the equality range + lowerRange = equalityRange.getUpperRange(); + lowerInclusive = !equalityRange.isUpperInclusive(); + break; + case GREATER_OR_EQUAL: + // from the equality range and up, including the equality range + lowerRange = equalityRange.getLowerRange(); + break; + case LESS: + // from the equality range and down, NOT including the equality range + upperRange = equalityRange.getLowerRange(); + upperInclusive = !equalityRange.isLowerInclusive(); + break; + case LESS_OR_EQUAL: + // from the equality range and down, including the equality range + upperRange = equalityRange.getUpperRange(); + break; + default: + throw new AssertionError("Invalid CompareOp: " + op); + } + + KeyRange range = + KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); + if (getColumn().getSortOrder() == SortOrder.DESC) { + range = range.invert(); + } + return range; + } - /** - * Produces a value one "step" forward in this expression's rounding scale. - * For example with a scale of 2, "2.5" would be stepped forward to "2.51". - */ - protected final BigDecimal stepNextInScale(BigDecimal decimal) { - BigDecimal step = BigDecimal.ONE.scaleByPowerOfTen(-getRoundingScale()); - return decimal.add(step); + /** + * Produces a the given decimal rounded to this rounding expression's scale. If the decimal + * requires more scale precision to produce than this expression has, as in ROUND(?, 2) > + * 2.0098974, it ensures that the decimal is rounded such that the given operator will still + * produce correct results. + * @param decimal the decimal to round with this expression's scale + * @param op the operator to preserve comparison with in the event of lost precision + * @return the rounded decimal + */ + private BigDecimal roundAndPreserveOperator(BigDecimal decimal, CompareOperator op) { + final BigDecimal rounded = roundToScale(decimal); + + // if we lost information, make sure that the rounding didn't break the operator + if (!hasEnoughPrecisionToProduce(decimal)) { + switch (op) { + case GREATER_OR_EQUAL: + // e.g. 'ROUND(dec, 2) >= 2.013' would be converted to + // 'ROUND(dec, 2) >= 2.01' but should be 'ROUND(dec, 2) >= 2.02' + if (decimal.compareTo(rounded) > 0) { + return stepNextInScale(rounded); + } + break; + case GREATER: + // e.g. 'ROUND(dec, 2) > 2.017' would be converted to + // 'ROUND(dec, 2) > 2.02' but should be 'ROUND(dec, 2) > 2.01' + if (decimal.compareTo(rounded) < 0) { + return stepPrevInScale(rounded); + } + break; + case LESS_OR_EQUAL: + // e.g. 'ROUND(dec, 2) < 2.017' would be converted to + // 'ROUND(dec, 2) < 2.02' but should be 'ROUND(dec, 2) < 2.01' + if (decimal.compareTo(rounded) < 0) { + return stepPrevInScale(rounded); + } + break; + case LESS: + // e.g. 'ROUND(dec, 2) <= 2.013' would be converted to + // 'ROUND(dec, 2) <= 2.01' but should be 'ROUND(dec, 2) <= 2.02' + if (decimal.compareTo(rounded) > 0) { + return stepNextInScale(rounded); + } + break; + } + } + + // otherwise, rounding has not affected the operator, so return normally + return rounded; + } + + @Override + public PTable getTable() { + return childPart.getTable(); + } + }; + } + + /** + * Finds the Decimal KeyRange that will produce the given result when fed into this rounding + * expression. For example, a ROUND expression with scale 2 will produce the result "2.05" with + * any decimal in the range [2.045, 2.0545). The result must be pre-rounded to within this + * rounding expression's scale. + * @param result the result to find an input range for. Must be producable. + * @return a KeyRange of DECIMAL keys that can be rounded by this expression to produce result + * @throws IllegalArgumentException if the result has more scale than this expression can produce + */ + protected KeyRange getInputRangeProducing(BigDecimal result) { + if (!hasEnoughPrecisionToProduce(result)) { + throw new IllegalArgumentException("Cannot produce input range for decimal " + result + + ", not enough precision with scale " + getRoundingScale()); } + byte[] lowerRange = PDecimal.INSTANCE.toBytes(halfStepPrevInScale(result)); + byte[] upperRange = PDecimal.INSTANCE.toBytes(halfStepNextInScale(result)); + // inclusiveness changes depending on sign + // e.g. -0.5 rounds "up" to -1 even though it is the lower boundary + boolean lowerInclusive = result.signum() > 0; + boolean upperInclusive = result.signum() < 0; + return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); + } + + /** + * Determines whether this rounding expression's scale has enough precision to produce the minimum + * precision for the input decimal. In other words, determines whether the given decimal can be + * rounded to this scale without losing ordering information. For example, an expression with a + * scale of 2 has enough precision to produce "2.3", "2.71" and "2.100000", but does not have + * enough precision to produce "2.001" + * @param result the decimal to round + * @return true if the given decimal can be precisely matched by this rounding expression + */ + protected final boolean hasEnoughPrecisionToProduce(BigDecimal result) { + // use compareTo so that 2.0 and 2.00 are treated as "equal" + return roundToScale(result).compareTo(result) == 0; + } + + /** + * Returns the given decimal rounded to this rounding expression's scale. For example, with scale + * 2 the decimal "2.453" would be rounded to either 2.45 or 2.46 depending on the rounding mode, + * while "2.38" and "2.7" would be unchanged. + * @param decimal the decimal to round + * @return the rounded result decimal + */ + protected final BigDecimal roundToScale(BigDecimal decimal) { + return decimal.setScale(getRoundingScale(), getRoundingMode()); + } + + /** + * Produces a value half of a "step" back in this expression's rounding scale. For example with a + * scale of 2, "2.5" would be stepped back to "2.495". + */ + protected final BigDecimal halfStepPrevInScale(BigDecimal decimal) { + BigDecimal step = BigDecimal.ONE.scaleByPowerOfTen(-getRoundingScale()); + BigDecimal halfStep = step.divide(BigDecimal.valueOf(2)); + return decimal.subtract(halfStep); + } + + /** + * Produces a value half of a "step" forward in this expression's rounding scale. For example with + * a scale of 2, "2.5" would be stepped forward to "2.505". + */ + protected final BigDecimal halfStepNextInScale(BigDecimal decimal) { + BigDecimal step = BigDecimal.ONE.scaleByPowerOfTen(-getRoundingScale()); + BigDecimal halfStep = step.divide(BigDecimal.valueOf(2)); + return decimal.add(halfStep); + } + + /** + * Produces a value one "step" back in this expression's rounding scale. For example with a scale + * of 2, "2.5" would be stepped back to "2.49". + */ + protected final BigDecimal stepPrevInScale(BigDecimal decimal) { + BigDecimal step = BigDecimal.ONE.scaleByPowerOfTen(-getRoundingScale()); + return decimal.subtract(step); + } + + /** + * Produces a value one "step" forward in this expression's rounding scale. For example with a + * scale of 2, "2.5" would be stepped forward to "2.51". + */ + protected final BigDecimal stepNextInScale(BigDecimal decimal) { + BigDecimal step = BigDecimal.ONE.scaleByPowerOfTen(-getRoundingScale()); + return decimal.add(step); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundFunction.java index d95a396037a..34223730f66 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,30 +31,27 @@ /** * Base class for RoundFunction. - * */ -@BuiltInFunction(name = RoundFunction.NAME, - nodeClass = RoundParseNode.class, - args = { - @Argument(allowedTypes={PTimestamp.class, PDecimal.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionParseNode.FunctionClassType.ABSTRACT, - derivedFunctions = {RoundDateExpression.class, RoundTimestampExpression.class, RoundDecimalExpression.class} - ) +@BuiltInFunction(name = RoundFunction.NAME, nodeClass = RoundParseNode.class, + args = { @Argument(allowedTypes = { PTimestamp.class, PDecimal.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionParseNode.FunctionClassType.ABSTRACT, derivedFunctions = { + RoundDateExpression.class, RoundTimestampExpression.class, RoundDecimalExpression.class }) public abstract class RoundFunction extends ScalarFunction { - - public static final String NAME = "ROUND"; - - public RoundFunction() {} - - public RoundFunction(List children) { - super(children); - } - - @Override - public String getName() { - return NAME; - } + + public static final String NAME = "ROUND"; + + public RoundFunction() { + } + + public RoundFunction(List children) { + super(children); + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundJodaDateExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundJodaDateExpression.java index 0a8ee678cf5..def788cc55f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundJodaDateExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundJodaDateExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,45 +28,42 @@ import org.joda.time.chrono.GJChronology; /** - * - * Base class for functions that use joda time. - * Used primarily by FLOOR , ROUND and CEIL on the time units WEEK,MONTH and YEAR. + * Base class for functions that use joda time. Used primarily by FLOOR , ROUND and CEIL on the time + * units WEEK,MONTH and YEAR. */ -public abstract class RoundJodaDateExpression extends RoundDateExpression{ +public abstract class RoundJodaDateExpression extends RoundDateExpression { - public RoundJodaDateExpression(){} - - public RoundJodaDateExpression(List children) { - super(children); - } + public RoundJodaDateExpression() { + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (children.get(0).evaluate(tuple, ptr)) { - if (ptr.getLength() == 0) { - return true; // child evaluated to null - } - PDataType dataType = getDataType(); - long time = dataType.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); - DateTime dt = new DateTime(time, GJChronology.getInstanceUTC()); - long value = roundDateTime(dt); - Date d = new Date(value); - byte[] byteValue = dataType.toBytes(d); - ptr.set(byteValue); - return true; - } - return false; - } - - /** - * @param dateTime - * @return Time in millis. - */ - public abstract long roundDateTime(DateTime dateTime); + public RoundJodaDateExpression(List children) { + super(children); + } - @Override - // We need a working roundTime() for the RowKey pushdown logic. - public long roundTime(long time) { - return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (children.get(0).evaluate(tuple, ptr)) { + if (ptr.getLength() == 0) { + return true; // child evaluated to null + } + PDataType dataType = getDataType(); + long time = dataType.getCodec().decodeLong(ptr, children.get(0).getSortOrder()); + DateTime dt = new DateTime(time, GJChronology.getInstanceUTC()); + long value = roundDateTime(dt); + Date d = new Date(value); + byte[] byteValue = dataType.toBytes(d); + ptr.set(byteValue); + return true; } + return false; + } + + /** Returns Time in millis. */ + public abstract long roundDateTime(DateTime dateTime); + + @Override + // We need a working roundTime() for the RowKey pushdown logic. + public long roundTime(long time) { + return roundDateTime(new DateTime(time, GJChronology.getInstanceUTC())); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundMonthExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundMonthExpression.java index e333dcdd9e5..5b073e35602 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundMonthExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundMonthExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,38 +26,38 @@ import org.joda.time.chrono.GJChronology; /** - * * Rounds off the given {@link DateTime} to month. */ public class RoundMonthExpression extends RoundJodaDateExpression { - - public RoundMonthExpression(){} - - public RoundMonthExpression(List children) { - super(children); - } - - @Override - public long roundDateTime(DateTime dateTime) { - return dateTime.monthOfYear().roundHalfEvenCopy().getMillis(); - } - - @Override - public long rangeLower(long epochMs) { - // We're doing unnecessary conversions here, but this is not perf sensitive - DateTime rounded = - new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), - GJChronology.getInstanceUTC()); - DateTime prev = rounded.minusMonths(1); - return DateUtil.rangeJodaHalfEven(rounded, prev, DateTimeFieldType.monthOfYear()); - } - - @Override - public long rangeUpper(long epochMs) { - DateTime rounded = - new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), - GJChronology.getInstanceUTC()); - DateTime next = rounded.plusMonths(1); - return DateUtil.rangeJodaHalfEven(rounded, next, DateTimeFieldType.monthOfYear()); - } + + public RoundMonthExpression() { + } + + public RoundMonthExpression(List children) { + super(children); + } + + @Override + public long roundDateTime(DateTime dateTime) { + return dateTime.monthOfYear().roundHalfEvenCopy().getMillis(); + } + + @Override + public long rangeLower(long epochMs) { + // We're doing unnecessary conversions here, but this is not perf sensitive + DateTime rounded = + new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), + GJChronology.getInstanceUTC()); + DateTime prev = rounded.minusMonths(1); + return DateUtil.rangeJodaHalfEven(rounded, prev, DateTimeFieldType.monthOfYear()); + } + + @Override + public long rangeUpper(long epochMs) { + DateTime rounded = + new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), + GJChronology.getInstanceUTC()); + DateTime next = rounded.plusMonths(1); + return DateUtil.rangeJodaHalfEven(rounded, next, DateTimeFieldType.monthOfYear()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundTimestampExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundTimestampExpression.java index 55982ff9ec1..ae416c858c5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundTimestampExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundTimestampExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,87 +38,83 @@ import org.apache.phoenix.schema.types.PUnsignedDate; import org.apache.phoenix.schema.types.PUnsignedTimestamp; import org.apache.phoenix.schema.types.PVarchar; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * - * Class encapsulating the process for rounding off a column/literal of - * type {@link org.apache.phoenix.schema.types.PTimestamp} - * This class only supports rounding off the milliseconds that is for - * {@link TimeUnit#MILLISECOND}. If you want more options of rounding like + * Class encapsulating the process for rounding off a column/literal of type + * {@link org.apache.phoenix.schema.types.PTimestamp} This class only supports rounding off the + * milliseconds that is for {@link TimeUnit#MILLISECOND}. If you want more options of rounding like * using {@link TimeUnit#HOUR} use {@link RoundDateExpression} - * - * * @since 3.0.0 */ @BuiltInFunction(name = RoundFunction.NAME, - args = { - @Argument(allowedTypes={PTimestamp.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, - classType = FunctionClassType.DERIVED -) + args = { @Argument(allowedTypes = { PTimestamp.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionClassType.DERIVED) public class RoundTimestampExpression extends RoundDateExpression { - - private static final long HALF_OF_NANOS_IN_MILLI = java.util.concurrent.TimeUnit.MILLISECONDS.toNanos(1)/2; - public RoundTimestampExpression() {} - - public RoundTimestampExpression(List children) { - super(children); - } - - public static Expression create (List children) throws SQLException { - Expression firstChild = children.get(0); - PDataType firstChildDataType = firstChild.getDataType(); - String timeUnit = (String)((LiteralExpression)children.get(1)).getValue(); - LiteralExpression multiplierExpr = (LiteralExpression)children.get(2); - - /* - * When rounding off timestamp to milliseconds, nanos play a part only when the multiplier value - * is equal to 1. This is because for cases when multiplier value is greater than 1, number of nanos/multiplier - * will always be less than half the nanos in a millisecond. - */ - if((timeUnit == null || TimeUnit.MILLISECOND.toString().equalsIgnoreCase(timeUnit)) && ((Number)multiplierExpr.getValue()).intValue() == 1) { - return new RoundTimestampExpression(children); - } - // Coerce TIMESTAMP to DATE, as the nanos has no affect - List newChildren = Lists.newArrayListWithExpectedSize(children.size()); - newChildren.add(CoerceExpression.create(firstChild, firstChildDataType == PTimestamp.INSTANCE ? - PDate.INSTANCE : PUnsignedDate.INSTANCE)); - newChildren.addAll(children.subList(1, children.size())); - return RoundDateExpression.create(newChildren); - } - - @Override - protected PDataCodec getKeyRangeCodec(PDataType columnDataType) { - return columnDataType == PTimestamp.INSTANCE - ? PDate.INSTANCE.getCodec() - : columnDataType == PUnsignedTimestamp.INSTANCE - ? PUnsignedDate.INSTANCE.getCodec() - : super.getKeyRangeCodec(columnDataType); + private static final long HALF_OF_NANOS_IN_MILLI = + java.util.concurrent.TimeUnit.MILLISECONDS.toNanos(1) / 2; + + public RoundTimestampExpression() { + } + + public RoundTimestampExpression(List children) { + super(children); + } + + public static Expression create(List children) throws SQLException { + Expression firstChild = children.get(0); + PDataType firstChildDataType = firstChild.getDataType(); + String timeUnit = (String) ((LiteralExpression) children.get(1)).getValue(); + LiteralExpression multiplierExpr = (LiteralExpression) children.get(2); + + /* + * When rounding off timestamp to milliseconds, nanos play a part only when the multiplier value + * is equal to 1. This is because for cases when multiplier value is greater than 1, number of + * nanos/multiplier will always be less than half the nanos in a millisecond. + */ + if ( + (timeUnit == null || TimeUnit.MILLISECOND.toString().equalsIgnoreCase(timeUnit)) + && ((Number) multiplierExpr.getValue()).intValue() == 1 + ) { + return new RoundTimestampExpression(children); } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (children.get(0).evaluate(tuple, ptr)) { - if (ptr.getLength()==0) { - return true; - } - SortOrder sortOrder = children.get(0).getSortOrder(); - PDataType dataType = getDataType(); - int nanos = dataType.getNanos(ptr, sortOrder); - if(nanos >= HALF_OF_NANOS_IN_MILLI) { - long timeMillis = dataType.getMillis(ptr, sortOrder); - Timestamp roundedTs = new Timestamp(timeMillis + 1); - byte[] byteValue = dataType.toBytes(roundedTs); - ptr.set(byteValue); - } - return true; // for timestamp we only support rounding up the milliseconds. - } - return false; + // Coerce TIMESTAMP to DATE, as the nanos has no affect + List newChildren = Lists.newArrayListWithExpectedSize(children.size()); + newChildren.add(CoerceExpression.create(firstChild, + firstChildDataType == PTimestamp.INSTANCE ? PDate.INSTANCE : PUnsignedDate.INSTANCE)); + newChildren.addAll(children.subList(1, children.size())); + return RoundDateExpression.create(newChildren); + } + + @Override + protected PDataCodec getKeyRangeCodec(PDataType columnDataType) { + return columnDataType == PTimestamp.INSTANCE ? PDate.INSTANCE.getCodec() + : columnDataType == PUnsignedTimestamp.INSTANCE ? PUnsignedDate.INSTANCE.getCodec() + : super.getKeyRangeCodec(columnDataType); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (children.get(0).evaluate(tuple, ptr)) { + if (ptr.getLength() == 0) { + return true; + } + SortOrder sortOrder = children.get(0).getSortOrder(); + PDataType dataType = getDataType(); + int nanos = dataType.getNanos(ptr, sortOrder); + if (nanos >= HALF_OF_NANOS_IN_MILLI) { + long timeMillis = dataType.getMillis(ptr, sortOrder); + Timestamp roundedTs = new Timestamp(timeMillis + 1); + byte[] byteValue = dataType.toBytes(roundedTs); + ptr.set(byteValue); + } + return true; // for timestamp we only support rounding up the milliseconds. } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundWeekExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundWeekExpression.java index e66f580cf06..bd57cbeaa99 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundWeekExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundWeekExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,38 +26,38 @@ import org.joda.time.chrono.GJChronology; /** - * * Rounds off the given {@link DateTime} to the nearest Monday. */ public class RoundWeekExpression extends RoundJodaDateExpression { - public RoundWeekExpression(){} - - public RoundWeekExpression(List children) { - super(children); - } - - @Override - public long roundDateTime(DateTime dateTime) { - return dateTime.weekOfWeekyear().roundHalfEvenCopy().getMillis(); - } - - @Override - public long rangeLower(long epochMs) { - // We're doing unnecessary conversions here, but this is NOT perf sensitive - DateTime rounded = - new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), - GJChronology.getInstanceUTC()); - DateTime prev = rounded.minusWeeks(1); - return DateUtil.rangeJodaHalfEven(rounded, prev, DateTimeFieldType.weekOfWeekyear()); - } - - @Override - public long rangeUpper(long epochMs) { - DateTime rounded = - new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), - GJChronology.getInstanceUTC()); - DateTime next = rounded.plusWeeks(1); - return DateUtil.rangeJodaHalfEven(rounded, next, DateTimeFieldType.weekOfWeekyear()); - } + public RoundWeekExpression() { + } + + public RoundWeekExpression(List children) { + super(children); + } + + @Override + public long roundDateTime(DateTime dateTime) { + return dateTime.weekOfWeekyear().roundHalfEvenCopy().getMillis(); + } + + @Override + public long rangeLower(long epochMs) { + // We're doing unnecessary conversions here, but this is NOT perf sensitive + DateTime rounded = + new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), + GJChronology.getInstanceUTC()); + DateTime prev = rounded.minusWeeks(1); + return DateUtil.rangeJodaHalfEven(rounded, prev, DateTimeFieldType.weekOfWeekyear()); + } + + @Override + public long rangeUpper(long epochMs) { + DateTime rounded = + new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), + GJChronology.getInstanceUTC()); + DateTime next = rounded.plusWeeks(1); + return DateUtil.rangeJodaHalfEven(rounded, next, DateTimeFieldType.weekOfWeekyear()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundYearExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundYearExpression.java index 8a99d7b20de..12d690cf40c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundYearExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RoundYearExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,38 +26,38 @@ import org.joda.time.chrono.GJChronology; /** - * * Rounds off the given {@link DateTime} to year. */ public class RoundYearExpression extends RoundJodaDateExpression { - public RoundYearExpression(){} - - public RoundYearExpression(List children) { - super(children); - } - - @Override - public long roundDateTime(DateTime dateTime) { - return dateTime.year().roundHalfEvenCopy().getMillis(); - } - - @Override - public long rangeLower(long epochMs) { - // We're doing unnecessary conversions here, but this is NOT perf sensitive - DateTime rounded = - new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), - GJChronology.getInstanceUTC()); - DateTime prev = rounded.minusYears(1); - return DateUtil.rangeJodaHalfEven(rounded, prev, DateTimeFieldType.year()); - } - - @Override - public long rangeUpper(long epochMs) { - DateTime rounded = - new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), - GJChronology.getInstanceUTC()); - DateTime next = rounded.plusYears(1); - return DateUtil.rangeJodaHalfEven(rounded, next, DateTimeFieldType.year()); - } + public RoundYearExpression() { + } + + public RoundYearExpression(List children) { + super(children); + } + + @Override + public long roundDateTime(DateTime dateTime) { + return dateTime.year().roundHalfEvenCopy().getMillis(); + } + + @Override + public long rangeLower(long epochMs) { + // We're doing unnecessary conversions here, but this is NOT perf sensitive + DateTime rounded = + new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), + GJChronology.getInstanceUTC()); + DateTime prev = rounded.minusYears(1); + return DateUtil.rangeJodaHalfEven(rounded, prev, DateTimeFieldType.year()); + } + + @Override + public long rangeUpper(long epochMs) { + DateTime rounded = + new DateTime(roundDateTime(new DateTime(epochMs, GJChronology.getInstanceUTC())), + GJChronology.getInstanceUTC()); + DateTime next = rounded.plusYears(1); + return DateUtil.rangeJodaHalfEven(rounded, next, DateTimeFieldType.year()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RowKeyBytesStringFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RowKeyBytesStringFunction.java index 5c851415f9b..0d219ef6222 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RowKeyBytesStringFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/RowKeyBytesStringFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,40 +34,40 @@ @BuiltInFunction(name = RowKeyBytesStringFunction.NAME, args = {}) public class RowKeyBytesStringFunction extends ScalarFunction { - public static final String NAME = "ROWKEY_BYTES_STRING"; + public static final String NAME = "ROWKEY_BYTES_STRING"; - public RowKeyBytesStringFunction() { - } + public RowKeyBytesStringFunction() { + } - public RowKeyBytesStringFunction(List children) { - super(children); - } + public RowKeyBytesStringFunction(List children) { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - tuple.getKey(ptr); - String rowkey = Bytes.toStringBinary(ptr.get(), ptr.getOffset(), ptr.getLength()); - ptr.set(PVarchar.INSTANCE.toBytes(rowkey)); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + tuple.getKey(ptr); + String rowkey = Bytes.toStringBinary(ptr.get(), ptr.getOffset(), ptr.getLength()); + ptr.set(PVarchar.INSTANCE.toBytes(rowkey)); + return true; + } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } - @Override - public boolean isStateless() { - return false; - } + @Override + public boolean isStateless() { + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLIndexTypeFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLIndexTypeFunction.java index 14b7dea42fc..af4a2ab7f60 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLIndexTypeFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLIndexTypeFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,56 +24,50 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.PTable.IndexType; +import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PUnsignedTinyint; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.tuple.Tuple; - /** - * - * Function used to get the SQL view type name from the serialized view type. - * Usage: - * SQLViewType('v') will return 'VIEW' based on - * {@link java.sql.DatabaseMetaData#getTableTypes()} - * - * + * Function used to get the SQL view type name from the serialized view type. Usage: + * SQLViewType('v') will return 'VIEW' based on {@link java.sql.DatabaseMetaData#getTableTypes()} * @since 2.2 */ -@BuiltInFunction(name=SQLIndexTypeFunction.NAME, args= { - @Argument(allowedTypes= PUnsignedTinyint.class)} ) +@BuiltInFunction(name = SQLIndexTypeFunction.NAME, + args = { @Argument(allowedTypes = PUnsignedTinyint.class) }) public class SQLIndexTypeFunction extends ScalarFunction { - public static final String NAME = "SQLIndexType"; + public static final String NAME = "SQLIndexType"; - public SQLIndexTypeFunction() { - } - - public SQLIndexTypeFunction(List children) throws SQLException { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression child = children.get(0); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - IndexType viewType = IndexType.fromSerializedValue(ptr.get()[ptr.getOffset()]); - ptr.set(viewType.getBytes()); - return true; - } + public SQLIndexTypeFunction() { + } + + public SQLIndexTypeFunction(List children) throws SQLException { + super(children); + } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression child = children.get(0); + if (!child.evaluate(tuple, ptr)) { + return false; } - - @Override - public String getName() { - return NAME; + if (ptr.getLength() == 0) { + return true; } + IndexType viewType = IndexType.fromSerializedValue(ptr.get()[ptr.getOffset()]); + ptr.set(viewType.getBytes()); + return true; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLTableTypeFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLTableTypeFunction.java index 597051220e0..150c8a07949 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLTableTypeFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLTableTypeFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,60 +21,52 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; +import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.tuple.Tuple; - /** - * - * Function used to get the SQL table type name from the serialized table type. - * Usage: - * SqlTableType('v') will return 'VIEW' based on - * {@link java.sql.DatabaseMetaData#getTableTypes()} - * - * + * Function used to get the SQL table type name from the serialized table type. Usage: + * SqlTableType('v') will return 'VIEW' based on {@link java.sql.DatabaseMetaData#getTableTypes()} * @since 2.2 */ -@BuiltInFunction(name=SQLTableTypeFunction.NAME, args= { - @Argument(allowedTypes= PChar.class)} ) +@BuiltInFunction(name = SQLTableTypeFunction.NAME, args = { @Argument(allowedTypes = PChar.class) }) public class SQLTableTypeFunction extends ScalarFunction { - public static final String NAME = "SQLTableType"; + public static final String NAME = "SQLTableType"; - public SQLTableTypeFunction() { - } - - public SQLTableTypeFunction(List children) throws SQLException { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression child = children.get(0); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - PTableType tableType = PTableType.fromSerializedValue(ptr.get()[ptr.getOffset()]); - ptr.set(tableType.getValue().getBytes()); - return true; - } + public SQLTableTypeFunction() { + } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + public SQLTableTypeFunction(List children) throws SQLException { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression child = children.get(0); + if (!child.evaluate(tuple, ptr)) { + return false; } - - @Override - public String getName() { - return NAME; + if (ptr.getLength() == 0) { + return true; } + PTableType tableType = PTableType.fromSerializedValue(ptr.get()[ptr.getOffset()]); + ptr.set(tableType.getValue().getBytes()); + return true; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLViewTypeFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLViewTypeFunction.java index d105d21d518..bf759e7d55e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLViewTypeFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SQLViewTypeFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,60 +21,53 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.PTable.ViewType; +import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PUnsignedTinyint; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.tuple.Tuple; - /** - * - * Function used to get the SQL view type name from the serialized view type. - * Usage: - * SQLViewType('v') will return 'VIEW' based on - * {@link java.sql.DatabaseMetaData#getTableTypes()} - * - * + * Function used to get the SQL view type name from the serialized view type. Usage: + * SQLViewType('v') will return 'VIEW' based on {@link java.sql.DatabaseMetaData#getTableTypes()} * @since 2.2 */ -@BuiltInFunction(name=SQLViewTypeFunction.NAME, args= { - @Argument(allowedTypes= PUnsignedTinyint.class)} ) +@BuiltInFunction(name = SQLViewTypeFunction.NAME, + args = { @Argument(allowedTypes = PUnsignedTinyint.class) }) public class SQLViewTypeFunction extends ScalarFunction { - public static final String NAME = "SQLViewType"; + public static final String NAME = "SQLViewType"; - public SQLViewTypeFunction() { - } - - public SQLViewTypeFunction(List children) throws SQLException { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression child = children.get(0); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - ViewType viewType = ViewType.fromSerializedValue(ptr.get()[ptr.getOffset()]); - ptr.set(viewType.getBytes()); - return true; - } + public SQLViewTypeFunction() { + } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + public SQLViewTypeFunction(List children) throws SQLException { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression child = children.get(0); + if (!child.evaluate(tuple, ptr)) { + return false; } - - @Override - public String getName() { - return NAME; + if (ptr.getLength() == 0) { + return true; } + ViewType viewType = ViewType.fromSerializedValue(ptr.get()[ptr.getOffset()]); + ptr.set(viewType.getBytes()); + return true; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java index 30adbd7df4f..eda5bf5d633 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,87 +26,82 @@ import org.apache.phoenix.expression.visitor.ExpressionVisitor; import org.apache.phoenix.util.ByteUtil; - public abstract class ScalarFunction extends FunctionExpression { - public static final int NO_TRAVERSAL = -1; - - public ScalarFunction() { - } - - public ScalarFunction(List children) { - super(children); - } - - public ScalarFunction clone(List children) { - try { - // FIXME: we could potentially implement this on each subclass and not use reflection - return getClass().getConstructor(List.class).newInstance(children); - } catch (Exception e) { - throw new RuntimeException(e); // Impossible, since it was originally constructed this way - } - } - - protected static byte[] evaluateExpression(Expression rhs) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - rhs.evaluate(null, ptr); - byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr); - return key; - } + public static final int NO_TRAVERSAL = -1; - /** - * Retrieve the literal value at childIndex. The argument must be a constant - * (i.e. marked as isConstant=true) - */ - protected final T getLiteralValue(int childIndex, Class type) { - Expression expression = getChildren().get(childIndex); - // It's safe to assume expression is a LiteralExpression since - // only arguments marked as isConstant = true should be handled through - // this method. - return type.cast(((LiteralExpression) expression).getValue()); - } + public ScalarFunction() { + } - @Override - public T accept(ExpressionVisitor visitor) { - List l = acceptChildren(visitor, visitor.visitEnter(this)); - T t = visitor.visitLeave(this, l); - if (t == null) { - t = visitor.defaultReturn(this, l); - } - return t; - } - - /** - * Determines whether or not a function may be used to form - * the start/stop key of a scan - * When OrderPreserving is YES, in order to make order-by optimization - * valid, it should return 0. (refer to {@link RoundDateExpression}) - * @return the zero-based position of the argument to traverse - * into to look for a primary key column reference, or - * {@value #NO_TRAVERSAL} if the function cannot be used to - * form the scan key. - */ - public int getKeyFormationTraversalIndex() { - return preservesOrder() == OrderPreserving.NO ? NO_TRAVERSAL : 0; - } + public ScalarFunction(List children) { + super(children); + } - /** - * Manufactures a KeyPart used to construct the KeyRange given - * a constant and a comparison operator. - * @param childPart the KeyPart formulated for the child expression - * at the {@link #getKeyFormationTraversalIndex()} position. - * @return the KeyPart for constructing the KeyRange for this - * function. - */ - public KeyPart newKeyPart(KeyPart childPart) { - return null; + public ScalarFunction clone(List children) { + try { + // FIXME: we could potentially implement this on each subclass and not use reflection + return getClass().getConstructor(List.class).newInstance(children); + } catch (Exception e) { + throw new RuntimeException(e); // Impossible, since it was originally constructed this way } - - /** - * Used to determine if the same ScalarFunction instance may be - * used by multiple threads. - * @return true if function is thread safe and false otherwise. - */ - public boolean isThreadSafe() { - return true; + } + + protected static byte[] evaluateExpression(Expression rhs) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + rhs.evaluate(null, ptr); + byte[] key = ByteUtil.copyKeyBytesIfNecessary(ptr); + return key; + } + + /** + * Retrieve the literal value at childIndex. The argument must be a constant (i.e. marked as + * isConstant=true) + */ + protected final T getLiteralValue(int childIndex, Class type) { + Expression expression = getChildren().get(childIndex); + // It's safe to assume expression is a LiteralExpression since + // only arguments marked as isConstant = true should be handled through + // this method. + return type.cast(((LiteralExpression) expression).getValue()); + } + + @Override + public T accept(ExpressionVisitor visitor) { + List l = acceptChildren(visitor, visitor.visitEnter(this)); + T t = visitor.visitLeave(this, l); + if (t == null) { + t = visitor.defaultReturn(this, l); } + return t; + } + + /** + * Determines whether or not a function may be used to form the start/stop key of a scan When + * OrderPreserving is YES, in order to make order-by optimization valid, it should return 0. + * (refer to {@link RoundDateExpression}) + * @return the zero-based position of the argument to traverse into to look for a primary key + * column reference, or {@value #NO_TRAVERSAL} if the function cannot be used to form the + * scan key. + */ + public int getKeyFormationTraversalIndex() { + return preservesOrder() == OrderPreserving.NO ? NO_TRAVERSAL : 0; + } + + /** + * Manufactures a KeyPart used to construct the KeyRange given a constant and a comparison + * operator. + * @param childPart the KeyPart formulated for the child expression at the + * {@link #getKeyFormationTraversalIndex()} position. + * @return the KeyPart for constructing the KeyRange for this function. + */ + public KeyPart newKeyPart(KeyPart childPart) { + return null; + } + + /** + * Used to determine if the same ScalarFunction instance may be used by multiple threads. + * @return true if function is thread safe and false otherwise. + */ + public boolean isThreadSafe() { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SecondFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SecondFunction.java index 09e3058505c..240b2f71a4c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SecondFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SecondFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,53 +32,51 @@ import org.joda.time.chrono.GJChronology; /** - * - * Implementation of the Second() buildin. Input Date/Timestamp/Time. - * Returns an integer from 0 to 59 representing the second component of time - * + * Implementation of the Second() buildin. Input Date/Timestamp/Time. Returns an integer from 0 to + * 59 representing the second component of time */ -@BuiltInFunction(name=SecondFunction.NAME, -args={@Argument(allowedTypes={PTimestamp.class})}) +@BuiltInFunction(name = SecondFunction.NAME, + args = { @Argument(allowedTypes = { PTimestamp.class }) }) public class SecondFunction extends DateScalarFunction { - public static final String NAME = "SECOND"; + public static final String NAME = "SECOND"; - public SecondFunction() { - } + public SecondFunction() { + } - public SecondFunction(List children) throws SQLException { - super(children); - } + public SecondFunction(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getChildExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if ( ptr.getLength() == 0) { - return true; //means null - } - long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); - DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); - int sec = dt.getSecondOfMinute(); - PDataType returnType = getDataType(); - byte[] byteValue = new byte[returnType.getByteSize()]; - returnType.getCodec().encodeInt(sec, byteValue, 0); - ptr.set(byteValue); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getChildExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + return true; // means null } + long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); + DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); + int sec = dt.getSecondOfMinute(); + PDataType returnType = getDataType(); + byte[] byteValue = new byte[returnType.getByteSize()]; + returnType.getCodec().encodeInt(sec, byteValue, 0); + ptr.set(byteValue); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - private Expression getChildExpression() { - return children.get(0); - } + @Override + public String getName() { + return NAME; + } + + private Expression getChildExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SetBitFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SetBitFunction.java index 756fc9afd1c..0174686b887 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SetBitFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SetBitFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,53 +32,52 @@ import org.apache.phoenix.schema.types.PVarbinary; @BuiltInFunction(name = SetBitFunction.NAME, - args = { @Argument(allowedTypes = { PBinary.class, PVarbinary.class }), - @Argument(allowedTypes = { PInteger.class }), - @Argument(allowedTypes = { PInteger.class }) }) + args = { @Argument(allowedTypes = { PBinary.class, PVarbinary.class }), + @Argument(allowedTypes = { PInteger.class }), @Argument(allowedTypes = { PInteger.class }) }) public class SetBitFunction extends ScalarFunction { - public static final String NAME = "SET_BIT"; + public static final String NAME = "SET_BIT"; - public SetBitFunction() { - } + public SetBitFunction() { + } - public SetBitFunction(List children) throws SQLException { - super(children); - } + public SetBitFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // get offset parameter - Expression offsetExpr = children.get(1); - if (!offsetExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength()==0) return true; - int offset = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); - // get newValue parameter - Expression newValueExpr = children.get(2); - if (!newValueExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength()==0) return true; - int newValue = (Integer) PInteger.INSTANCE.toObject(ptr, newValueExpr.getSortOrder()); - byte newByteValue = (byte) (newValue & 0x1); - // get binary data parameter - Expression dataExpr = children.get(0); - if (!dataExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength()==0) return true; - if (ptr.getLength() == 0) return true; - int len = ptr.getLength() * Byte.SIZE; - offset = (offset % len + len) % len; - // set result - ((PBinaryBase) dataExpr.getDataType()).setBit(ptr, dataExpr.getSortOrder(), offset, - newByteValue, ptr); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // get offset parameter + Expression offsetExpr = children.get(1); + if (!offsetExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + int offset = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); + // get newValue parameter + Expression newValueExpr = children.get(2); + if (!newValueExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + int newValue = (Integer) PInteger.INSTANCE.toObject(ptr, newValueExpr.getSortOrder()); + byte newByteValue = (byte) (newValue & 0x1); + // get binary data parameter + Expression dataExpr = children.get(0); + if (!dataExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + if (ptr.getLength() == 0) return true; + int len = ptr.getLength() * Byte.SIZE; + offset = (offset % len + len) % len; + // set result + ((PBinaryBase) dataExpr.getDataType()).setBit(ptr, dataExpr.getSortOrder(), offset, + newByteValue, ptr); + return true; + } - @Override - public PDataType getDataType() { - return children.get(0).getDataType(); - } + @Override + public PDataType getDataType() { + return children.get(0).getDataType(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SetByteFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SetByteFunction.java index 0dc9ad5d786..8be372d3c57 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SetByteFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SetByteFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,52 +32,51 @@ import org.apache.phoenix.schema.types.PVarbinary; @BuiltInFunction(name = SetByteFunction.NAME, - args = { @Argument(allowedTypes = { PBinary.class, PVarbinary.class }), - @Argument(allowedTypes = { PInteger.class }), - @Argument(allowedTypes = { PInteger.class }) }) + args = { @Argument(allowedTypes = { PBinary.class, PVarbinary.class }), + @Argument(allowedTypes = { PInteger.class }), @Argument(allowedTypes = { PInteger.class }) }) public class SetByteFunction extends ScalarFunction { - public static final String NAME = "SET_BYTE"; + public static final String NAME = "SET_BYTE"; - public SetByteFunction() { - } + public SetByteFunction() { + } - public SetByteFunction(List children) throws SQLException { - super(children); - } + public SetByteFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - // get offset parameter - Expression offsetExpr = children.get(1); - if (!offsetExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength()==0) return true; - int offset = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); - // get newValue parameter - Expression newValueExpr = children.get(2); - if (!newValueExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength()==0) return true; - int newValue = (Integer) PInteger.INSTANCE.toObject(ptr, newValueExpr.getSortOrder()); - byte newByteValue = (byte) (newValue & 0xff); - // get binary data parameter - Expression dataExpr = children.get(0); - if (!dataExpr.evaluate(tuple, ptr)) return false; - if (ptr.getLength() == 0) return true; - int len = ptr.getLength(); - offset = (offset % len + len) % len; - // set result - ((PBinaryBase) dataExpr.getDataType()).setByte(ptr, dataExpr.getSortOrder(), offset, - newByteValue, ptr); - return true; - } + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + // get offset parameter + Expression offsetExpr = children.get(1); + if (!offsetExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + int offset = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder()); + // get newValue parameter + Expression newValueExpr = children.get(2); + if (!newValueExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + int newValue = (Integer) PInteger.INSTANCE.toObject(ptr, newValueExpr.getSortOrder()); + byte newByteValue = (byte) (newValue & 0xff); + // get binary data parameter + Expression dataExpr = children.get(0); + if (!dataExpr.evaluate(tuple, ptr)) return false; + if (ptr.getLength() == 0) return true; + int len = ptr.getLength(); + offset = (offset % len + len) % len; + // set result + ((PBinaryBase) dataExpr.getDataType()).setByte(ptr, dataExpr.getSortOrder(), offset, + newByteValue, ptr); + return true; + } - @Override - public PDataType getDataType() { - return children.get(0).getDataType(); - } + @Override + public PDataType getDataType() { + return children.get(0).getDataType(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SignFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SignFunction.java index 1fd108e09ff..db81a36e380 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SignFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SignFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,46 +37,45 @@ @BuiltInFunction(name = SignFunction.NAME, args = { @Argument(allowedTypes = { PDecimal.class }) }) public class SignFunction extends ScalarFunction { - public static final String NAME = "SIGN"; + public static final String NAME = "SIGN"; - private static final byte[][] RESULT = { PInteger.INSTANCE.toBytes(Integer.valueOf(-1)), - PInteger.INSTANCE.toBytes(Integer.valueOf(0)), - PInteger.INSTANCE.toBytes(Integer.valueOf(1)), }; + private static final byte[][] RESULT = { PInteger.INSTANCE.toBytes(Integer.valueOf(-1)), + PInteger.INSTANCE.toBytes(Integer.valueOf(0)), PInteger.INSTANCE.toBytes(Integer.valueOf(1)), }; - public SignFunction() { - } + public SignFunction() { + } - public SignFunction(List children) throws SQLException { - super(children); - } + public SignFunction(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression childExpr = children.get(0); - PDataType dataType = childExpr.getDataType(); - if (childExpr.evaluate(tuple, ptr)) { - if (ptr.getLength()==0) { - return true; - } - int ret = ((PNumericType) dataType).signum(ptr, childExpr.getSortOrder()); - ptr.set(RESULT[ret + 1]); - return true; - } - return false; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression childExpr = children.get(0); + PDataType dataType = childExpr.getDataType(); + if (childExpr.evaluate(tuple, ptr)) { + if (ptr.getLength() == 0) { + return true; + } + int ret = ((PNumericType) dataType).signum(ptr, childExpr.getSortOrder()); + ptr.set(RESULT[ret + 1]); + return true; } + return false; + } - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SinFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SinFunction.java index 3b29f7f5e37..bf99e794dd8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SinFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SinFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,31 +26,31 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; -@BuiltInFunction(name = SinFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, - PDecimal.class }) }) +@BuiltInFunction(name = SinFunction.NAME, + args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) public class SinFunction extends JavaMathOneArgumentFunction { - public static final String NAME = "SIN"; + public static final String NAME = "SIN"; - public SinFunction() { - } + public SinFunction() { + } - public SinFunction(List children) throws SQLException { - super(children); - } + public SinFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - protected double compute(double firstArg) { - return Math.sin(firstArg); - } + @Override + protected double compute(double firstArg) { + return Math.sin(firstArg); + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SingleAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SingleAggregateFunction.java index 458ef87affc..15e87ec571a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SingleAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SingleAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,139 +33,136 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; - /** - * - * Base class for aggregate functions that calculate an aggregation - * using a single {{@link Aggregator} - * - * + * Base class for aggregate functions that calculate an aggregation using a single + * {{@link Aggregator} * @since 0.1 */ abstract public class SingleAggregateFunction extends AggregateFunction { - private static final List DEFAULT_EXPRESSION_LIST = Arrays.asList(LiteralExpression.newConstant(1, Determinism.ALWAYS)); - protected boolean isConstant; - private Aggregator aggregator; - - /** - * Sort aggregate functions with nullable fields last. This allows us not to have to store trailing null values. - * Within non-nullable/nullable groups, put fixed width values first since we can access those more efficiently - * (i.e. we can skip over groups of them in-mass instead of reading the length of each one to skip over as - * required by a variable length value). - */ - public static final Comparator SCHEMA_COMPARATOR = new Comparator() { - - @Override - public int compare(SingleAggregateFunction o1, SingleAggregateFunction o2) { - boolean isNullable1 = o1.isNullable(); - boolean isNullable2 = o2.isNullable(); - if (isNullable1 != isNullable2) { - return isNullable1 ? 1 : -1; - } - isNullable1 = o1.getAggregatorExpression().isNullable(); - isNullable2 = o2.getAggregatorExpression().isNullable(); - if (isNullable1 != isNullable2) { - return isNullable1 ? 1 : -1; - } - // Ensures COUNT(1) sorts first TODO: unit test for this - boolean isConstant1 = o1.isConstantExpression(); - boolean isConstant2 = o2.isConstantExpression(); - if (isConstant1 != isConstant2) { - return isConstant1 ? 1 : -1; - } - PDataType r1 = o1.getAggregator().getDataType(); - PDataType r2 = o2.getAggregator().getDataType(); - if (r1.isFixedWidth() != r2.isFixedWidth()) { - return r1.isFixedWidth() ? -1 : 1; - } - return r1.compareTo(r2); + private static final List DEFAULT_EXPRESSION_LIST = + Arrays. asList(LiteralExpression.newConstant(1, Determinism.ALWAYS)); + protected boolean isConstant; + private Aggregator aggregator; + + /** + * Sort aggregate functions with nullable fields last. This allows us not to have to store + * trailing null values. Within non-nullable/nullable groups, put fixed width values first since + * we can access those more efficiently (i.e. we can skip over groups of them in-mass instead of + * reading the length of each one to skip over as required by a variable length value). + */ + public static final Comparator SCHEMA_COMPARATOR = + new Comparator() { + + @Override + public int compare(SingleAggregateFunction o1, SingleAggregateFunction o2) { + boolean isNullable1 = o1.isNullable(); + boolean isNullable2 = o2.isNullable(); + if (isNullable1 != isNullable2) { + return isNullable1 ? 1 : -1; } + isNullable1 = o1.getAggregatorExpression().isNullable(); + isNullable2 = o2.getAggregatorExpression().isNullable(); + if (isNullable1 != isNullable2) { + return isNullable1 ? 1 : -1; + } + // Ensures COUNT(1) sorts first TODO: unit test for this + boolean isConstant1 = o1.isConstantExpression(); + boolean isConstant2 = o2.isConstantExpression(); + if (isConstant1 != isConstant2) { + return isConstant1 ? 1 : -1; + } + PDataType r1 = o1.getAggregator().getDataType(); + PDataType r2 = o2.getAggregator().getDataType(); + if (r1.isFixedWidth() != r2.isFixedWidth()) { + return r1.isFixedWidth() ? -1 : 1; + } + return r1.compareTo(r2); + } }; - - protected SingleAggregateFunction() { - this(DEFAULT_EXPRESSION_LIST, true); - } - public SingleAggregateFunction(List children) { - this(children, children.get(0) instanceof LiteralExpression); - } - - private SingleAggregateFunction(List children, boolean isConstant) { - super(children); - this.isConstant = isConstant; - this.aggregator = newClientAggregator(); - } + protected SingleAggregateFunction() { + this(DEFAULT_EXPRESSION_LIST, true); + } - public boolean isConstantExpression() { - return isConstant; - } - - @Override - public PDataType getDataType() { - return children.get(0).getDataType(); - } - - public Expression getAggregatorExpression() { - return children.get(0); - } - - public Aggregator getAggregator() { - return aggregator; - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - return getAggregator().evaluate(tuple, ptr); - } + public SingleAggregateFunction(List children) { + this(children, children.get(0) instanceof LiteralExpression); + } - /** - * Create the aggregator to do server-side aggregation. - * The data type of the returned Aggregator must match - * the data type returned by {@link #newClientAggregator()} - * @param conf HBase configuration. - * @return the aggregator to use on the server-side - */ - abstract public Aggregator newServerAggregator(Configuration conf); - /** - * Create the aggregator to do client-side aggregation - * based on the results returned from the aggregating - * coprocessor. The data type of the returned Aggregator - * must match the data type returned by {@link #newServerAggregator(Configuration)} - * @return the aggregator to use on the client-side - */ - public Aggregator newClientAggregator() { - return newServerAggregator(null); - } + private SingleAggregateFunction(List children, boolean isConstant) { + super(children); + this.isConstant = isConstant; + this.aggregator = newClientAggregator(); + } - public Aggregator newServerAggregator(Configuration config, ImmutableBytesWritable ptr) { - Aggregator agg = newServerAggregator(config); - agg.aggregate(null, ptr); - return agg; - } - - public final void readFields(DataInput input, Configuration conf) throws IOException { - super.readFields(input); - aggregator = newServerAggregator(conf); - } + public boolean isConstantExpression() { + return isConstant; + } - @Override - public boolean isNullable() { - return true; - } - - protected SingleAggregateFunction getDelegate() { - return this; - } + @Override + public PDataType getDataType() { + return children.get(0).getDataType(); + } - @Override - public final T accept(ExpressionVisitor visitor) { - SingleAggregateFunction function = getDelegate(); - List l = acceptChildren(visitor, visitor.visitEnter(function)); - T t = visitor.visitLeave(function, l); - if (t == null) { - t = visitor.defaultReturn(function, l); - } - return t; + public Expression getAggregatorExpression() { + return children.get(0); + } + + public Aggregator getAggregator() { + return aggregator; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + return getAggregator().evaluate(tuple, ptr); + } + + /** + * Create the aggregator to do server-side aggregation. The data type of the returned Aggregator + * must match the data type returned by {@link #newClientAggregator()} + * @param conf HBase configuration. + * @return the aggregator to use on the server-side + */ + abstract public Aggregator newServerAggregator(Configuration conf); + + /** + * Create the aggregator to do client-side aggregation based on the results returned from the + * aggregating coprocessor. The data type of the returned Aggregator must match the data type + * returned by {@link #newServerAggregator(Configuration)} + * @return the aggregator to use on the client-side + */ + public Aggregator newClientAggregator() { + return newServerAggregator(null); + } + + public Aggregator newServerAggregator(Configuration config, ImmutableBytesWritable ptr) { + Aggregator agg = newServerAggregator(config); + agg.aggregate(null, ptr); + return agg; + } + + public final void readFields(DataInput input, Configuration conf) throws IOException { + super.readFields(input); + aggregator = newServerAggregator(conf); + } + + @Override + public boolean isNullable() { + return true; + } + + protected SingleAggregateFunction getDelegate() { + return this; + } + + @Override + public final T accept(ExpressionVisitor visitor) { + SingleAggregateFunction function = getDelegate(); + List l = acceptChildren(visitor, visitor.visitEnter(function)); + T t = visitor.visitLeave(function, l); + if (t == null) { + t = visitor.defaultReturn(function, l); } + return t; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SqlTypeNameFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SqlTypeNameFunction.java index bbd7efaf6a2..537d9af7a12 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SqlTypeNameFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SqlTypeNameFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,66 +21,59 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.schema.IllegalDataException; -import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.ByteUtil; - /** - * - * Function used to get the SQL type name from the SQL type integer. - * Usage: - * SqlTypeName(12) - * will return 'VARCHAR' based on {@link java.sql.Types#VARCHAR} being 12 - * - * + * Function used to get the SQL type name from the SQL type integer. Usage: SqlTypeName(12) will + * return 'VARCHAR' based on {@link java.sql.Types#VARCHAR} being 12 * @since 0.1 */ -@BuiltInFunction(name=SqlTypeNameFunction.NAME, args= { - @Argument(allowedTypes= PInteger.class)} ) +@BuiltInFunction(name = SqlTypeNameFunction.NAME, + args = { @Argument(allowedTypes = PInteger.class) }) public class SqlTypeNameFunction extends ScalarFunction { - public static final String NAME = "SqlTypeName"; + public static final String NAME = "SqlTypeName"; - public SqlTypeNameFunction() { + public SqlTypeNameFunction() { + } + + public SqlTypeNameFunction(List children) throws SQLException { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression child = children.get(0); + if (!child.evaluate(tuple, ptr)) { + return false; } - - public SqlTypeNameFunction(List children) throws SQLException { - super(children); + if (ptr.getLength() == 0) { + return true; } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression child = children.get(0); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - int sqlType = child.getDataType().getCodec().decodeInt(ptr, child.getSortOrder()); - try { - byte[] sqlTypeNameBytes = PDataType.fromTypeId(sqlType).getSqlTypeNameBytes(); - ptr.set(sqlTypeNameBytes); - } catch (IllegalDataException e) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } - return true; + int sqlType = child.getDataType().getCodec().decodeInt(ptr, child.getSortOrder()); + try { + byte[] sqlTypeNameBytes = PDataType.fromTypeId(sqlType).getSqlTypeNameBytes(); + ptr.set(sqlTypeNameBytes); + } catch (IllegalDataException e) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); } + return true; + } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } - - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java index 260305abd7e..40111eab5c8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,30 +26,31 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; -@BuiltInFunction(name = SqrtFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) +@BuiltInFunction(name = SqrtFunction.NAME, + args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) public class SqrtFunction extends JavaMathOneArgumentFunction { - public static final String NAME = "SQRT"; + public static final String NAME = "SQRT"; - public SqrtFunction() { - } + public SqrtFunction() { + } - public SqrtFunction(List children) throws SQLException { - super(children); - } + public SqrtFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - protected double compute(double firstArg) { - return Math.sqrt(firstArg); - } + @Override + protected double compute(double firstArg) { + return Math.sqrt(firstArg); + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StddevPopFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StddevPopFunction.java index e6e79fc36de..4542eea0f2e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StddevPopFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StddevPopFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.DecimalStddevPopAggregator; @@ -29,49 +28,47 @@ import org.apache.phoenix.expression.aggregator.StddevPopAggregator; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; /** - * * Built-in function for {@code STDDEV_POP() } aggregate function - * - * * @since 1.2.1 */ -@BuiltInFunction(name = StddevPopFunction.NAME, args = { @Argument(allowedTypes={PDecimal.class})}) +@BuiltInFunction(name = StddevPopFunction.NAME, + args = { @Argument(allowedTypes = { PDecimal.class }) }) public class StddevPopFunction extends DistinctValueWithCountAggregateFunction { - public static final String NAME = "STDDEV_POP"; + public static final String NAME = "STDDEV_POP"; - public StddevPopFunction() { + public StddevPopFunction() { - } + } - public StddevPopFunction(List childern) { - super(childern); - } + public StddevPopFunction(List childern) { + super(childern); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - return new DistinctValueWithCountServerAggregator(conf); - } + @Override + public Aggregator newServerAggregator(Configuration conf) { + return new DistinctValueWithCountServerAggregator(conf); + } - @Override - public DistinctValueWithCountClientAggregator newClientAggregator() { - if (children.get(0).getDataType() == PDecimal.INSTANCE) { - // Special Aggregators for DECIMAL datatype for more precision than double - return new DecimalStddevPopAggregator(children, getAggregatorExpression().getSortOrder()); - } - return new StddevPopAggregator(children, getAggregatorExpression().getSortOrder()); - } - - @Override - public String getName() { - return NAME; + @Override + public DistinctValueWithCountClientAggregator newClientAggregator() { + if (children.get(0).getDataType() == PDecimal.INSTANCE) { + // Special Aggregators for DECIMAL datatype for more precision than double + return new DecimalStddevPopAggregator(children, getAggregatorExpression().getSortOrder()); } + return new StddevPopAggregator(children, getAggregatorExpression().getSortOrder()); + } - @Override - public PDataType getDataType() { - return PDecimal.INSTANCE; - } + @Override + public String getName() { + return NAME; + } + + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StddevSampFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StddevSampFunction.java index 16d27ad186d..85b62b5cc51 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StddevSampFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StddevSampFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.DecimalStddevSampAggregator; @@ -29,49 +28,47 @@ import org.apache.phoenix.expression.aggregator.StddevSampAggregator; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; /** - * * Built-in function for {@code STDDEV_SAMP() } aggregate function - * - * * @since 1.2.1 */ -@BuiltInFunction(name = StddevSampFunction.NAME, args = { @Argument(allowedTypes={PDecimal.class})}) +@BuiltInFunction(name = StddevSampFunction.NAME, + args = { @Argument(allowedTypes = { PDecimal.class }) }) public class StddevSampFunction extends DistinctValueWithCountAggregateFunction { - public static final String NAME = "STDDEV_SAMP"; + public static final String NAME = "STDDEV_SAMP"; - public StddevSampFunction() { + public StddevSampFunction() { - } + } - public StddevSampFunction(List childern) { - super(childern); - } + public StddevSampFunction(List childern) { + super(childern); + } - @Override - public Aggregator newServerAggregator(Configuration conf) { - return new DistinctValueWithCountServerAggregator(conf); - } + @Override + public Aggregator newServerAggregator(Configuration conf) { + return new DistinctValueWithCountServerAggregator(conf); + } - @Override - public DistinctValueWithCountClientAggregator newClientAggregator() { - if (children.get(0).getDataType() == PDecimal.INSTANCE) { - // Special Aggregators for DECIMAL datatype for more precision than double - return new DecimalStddevSampAggregator(children, getAggregatorExpression().getSortOrder()); - } - return new StddevSampAggregator(children, getAggregatorExpression().getSortOrder()); - } - - @Override - public String getName() { - return NAME; + @Override + public DistinctValueWithCountClientAggregator newClientAggregator() { + if (children.get(0).getDataType() == PDecimal.INSTANCE) { + // Special Aggregators for DECIMAL datatype for more precision than double + return new DecimalStddevSampAggregator(children, getAggregatorExpression().getSortOrder()); } + return new StddevSampAggregator(children, getAggregatorExpression().getSortOrder()); + } - @Override - public PDataType getDataType() { - return PDecimal.INSTANCE; - } + @Override + public String getName() { + return NAME; + } + + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpReplaceFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpReplaceFunction.java index 89f8beaea49..d7282b72a09 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpReplaceFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpReplaceFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,31 +23,28 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.util.regex.AbstractBasePattern; import org.apache.phoenix.expression.util.regex.JavaPattern; - -import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; +import org.apache.phoenix.schema.types.PVarchar; -@BuiltInFunction(name=RegexpReplaceFunction.NAME, - args= { - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class},defaultValue="null")}, - classType = FunctionClassType.DERIVED -) +@BuiltInFunction(name = RegexpReplaceFunction.NAME, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }, defaultValue = "null") }, + classType = FunctionClassType.DERIVED) public class StringBasedRegexpReplaceFunction extends RegexpReplaceFunction { - public StringBasedRegexpReplaceFunction() { - } + public StringBasedRegexpReplaceFunction() { + } - public StringBasedRegexpReplaceFunction(List children) { - super(children); - } + public StringBasedRegexpReplaceFunction(List children) { + super(children); + } - @Override - protected AbstractBasePattern compilePatternSpec(String value) { - return new JavaPattern(value, Pattern.DOTALL); - } + @Override + protected AbstractBasePattern compilePatternSpec(String value) { + return new JavaPattern(value, Pattern.DOTALL); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSplitFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSplitFunction.java index 029a5d300d9..651ef98ab65 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSplitFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSplitFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,27 +22,24 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.util.regex.AbstractBaseSplitter; import org.apache.phoenix.expression.util.regex.GuavaSplitter; -import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; +import org.apache.phoenix.schema.types.PVarchar; -@BuiltInFunction(name=RegexpSplitFunction.NAME, - args= { - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class})}, - classType = FunctionClassType.DERIVED -) +@BuiltInFunction(name = RegexpSplitFunction.NAME, args = { + @Argument(allowedTypes = { PVarchar.class }), @Argument(allowedTypes = { PVarchar.class }) }, + classType = FunctionClassType.DERIVED) public class StringBasedRegexpSplitFunction extends RegexpSplitFunction { - public StringBasedRegexpSplitFunction() { - } + public StringBasedRegexpSplitFunction() { + } - public StringBasedRegexpSplitFunction(List children) { - super(children); - } + public StringBasedRegexpSplitFunction(List children) { + super(children); + } - @Override - protected AbstractBaseSplitter compilePatternSpec(String value) { - return new GuavaSplitter(value); - } + @Override + protected AbstractBaseSplitter compilePatternSpec(String value) { + return new GuavaSplitter(value); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSubstrFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSubstrFunction.java index de2e0d418af..c68e9da73b9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSubstrFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringBasedRegexpSubstrFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,29 +23,27 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.util.regex.AbstractBasePattern; import org.apache.phoenix.expression.util.regex.JavaPattern; -import org.apache.phoenix.schema.types.PLong; -import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.Argument; +import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.FunctionParseNode.FunctionClassType; +import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.schema.types.PVarchar; -@BuiltInFunction(name=RegexpSubstrFunction.NAME, - args= { - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PLong.class}, defaultValue="1")}, - classType = FunctionClassType.DERIVED -) +@BuiltInFunction(name = RegexpSubstrFunction.NAME, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PLong.class }, defaultValue = "1") }, + classType = FunctionClassType.DERIVED) public class StringBasedRegexpSubstrFunction extends RegexpSubstrFunction { - public StringBasedRegexpSubstrFunction() { - } + public StringBasedRegexpSubstrFunction() { + } - public StringBasedRegexpSubstrFunction(List children) { - super(children); - } + public StringBasedRegexpSubstrFunction(List children) { + super(children); + } - @Override - protected AbstractBasePattern compilePatternSpec(String value) { - return new JavaPattern(value, Pattern.DOTALL); - } + @Override + protected AbstractBasePattern compilePatternSpec(String value) { + return new JavaPattern(value, Pattern.DOTALL); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringToArrayFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringToArrayFunction.java index ffbda013e05..7a1d83fa342 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringToArrayFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/StringToArrayFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,66 +26,70 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.*; -@FunctionParseNode.BuiltInFunction(name = StringToArrayFunction.NAME, args = { - @FunctionParseNode.Argument(allowedTypes = {PVarchar.class, PChar.class}), - @FunctionParseNode.Argument(allowedTypes = {PVarchar.class, PChar.class}), - @FunctionParseNode.Argument(allowedTypes = {PVarchar.class, PChar.class}, defaultValue = "null")}) +@FunctionParseNode.BuiltInFunction(name = StringToArrayFunction.NAME, + args = { @FunctionParseNode.Argument(allowedTypes = { PVarchar.class, PChar.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class, PChar.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class, PChar.class }, + defaultValue = "null") }) public class StringToArrayFunction extends ScalarFunction { - public static final String NAME = "STRING_TO_ARRAY"; + public static final String NAME = "STRING_TO_ARRAY"; - public StringToArrayFunction() { - } - - public StringToArrayFunction(List children) { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression delimiterExpr = children.get(1); - String delimiter; - if (!delimiterExpr.evaluate(tuple, ptr)) { - return false; - } else if (ptr.getLength() == 0) { - delimiter = ""; - } else { - delimiter = (String) delimiterExpr.getDataType().toObject(ptr, delimiterExpr.getSortOrder(), delimiterExpr.getMaxLength(), delimiterExpr.getScale()); - } - - Expression stringExpr = children.get(0); - if (!stringExpr.evaluate(tuple, ptr)) { - return false; - } else if (ptr.getLength() == 0) { - return true; - } - String string = (String) stringExpr.getDataType().toObject(ptr, stringExpr.getSortOrder(), stringExpr.getMaxLength(), stringExpr.getScale()); + public StringToArrayFunction() { + } - Expression nullExpr = children.get(2); - String nullString = null; - if (nullExpr.evaluate(tuple, ptr) && ptr.getLength() != 0) { - nullString = (String) nullExpr.getDataType().toObject(ptr, nullExpr.getSortOrder(), nullExpr.getMaxLength(), nullExpr.getScale()); - } + public StringToArrayFunction(List children) { + super(children); + } - return PArrayDataType.stringToArray(ptr, string, delimiter, nullString, getSortOrder()); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression delimiterExpr = children.get(1); + String delimiter; + if (!delimiterExpr.evaluate(tuple, ptr)) { + return false; + } else if (ptr.getLength() == 0) { + delimiter = ""; + } else { + delimiter = (String) delimiterExpr.getDataType().toObject(ptr, delimiterExpr.getSortOrder(), + delimiterExpr.getMaxLength(), delimiterExpr.getScale()); } - @Override - public String getName() { - return NAME; + Expression stringExpr = children.get(0); + if (!stringExpr.evaluate(tuple, ptr)) { + return false; + } else if (ptr.getLength() == 0) { + return true; } + String string = (String) stringExpr.getDataType().toObject(ptr, stringExpr.getSortOrder(), + stringExpr.getMaxLength(), stringExpr.getScale()); - @Override - public Integer getMaxLength() { - return null; + Expression nullExpr = children.get(2); + String nullString = null; + if (nullExpr.evaluate(tuple, ptr) && ptr.getLength() != 0) { + nullString = (String) nullExpr.getDataType().toObject(ptr, nullExpr.getSortOrder(), + nullExpr.getMaxLength(), nullExpr.getScale()); } - @Override - public PDataType getDataType() { - return PVarcharArray.INSTANCE; - } + return PArrayDataType.stringToArray(ptr, string, delimiter, nullString, getSortOrder()); + } - @Override - public SortOrder getSortOrder() { - return children.get(0).getSortOrder(); - } + @Override + public String getName() { + return NAME; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public PDataType getDataType() { + return PVarcharArray.INSTANCE; + } + + @Override + public SortOrder getSortOrder() { + return children.get(0).getSortOrder(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java index 7cf32e0b71f..483517f2b1a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SubstrFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,202 +34,210 @@ import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.StringUtil; - /** - * - * Implementation of the {@code SUBSTR(,[,]) } built-in function - * where {@code } is the offset from the start of {@code }. A positive offset - * is treated as 1-based, a zero offset is treated as 0-based, and a negative - * offset starts from the end of the string working backwards. The optional - * {@code } argument is the number of characters to return. In the absence of the - * {@code } argument, the rest of the string starting from {@code } is returned. - * If {@code } is less than 1, null is returned. - * - * + * Implementation of the {@code SUBSTR(,[,]) } built-in function where + * {@code } is the offset from the start of {@code }. A positive offset is + * treated as 1-based, a zero offset is treated as 0-based, and a negative offset starts from the + * end of the string working backwards. The optional {@code } argument is the number of + * characters to return. In the absence of the {@code } argument, the rest of the string + * starting from {@code } is returned. If {@code } is less than 1, null is + * returned. * @since 0.1 */ -@BuiltInFunction(name=SubstrFunction.NAME, args={ - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PLong.class}), // These are LONG because negative numbers end up as longs - @Argument(allowedTypes={PLong.class},defaultValue="null")} ) +@BuiltInFunction(name = SubstrFunction.NAME, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PLong.class }), // These are LONG because negative numbers end up + // as longs + @Argument(allowedTypes = { PLong.class }, defaultValue = "null") }) public class SubstrFunction extends PrefixFunction { - public static final String NAME = "SUBSTR"; - private boolean hasLengthExpression; - private boolean isOffsetConstant; - private boolean isLengthConstant; - private boolean isFixedWidth; - private Integer maxLength; - - public SubstrFunction() { - } - - public SubstrFunction(List children) { - super(children); - init(); - } - - private void init() { - isOffsetConstant = getOffsetExpression() instanceof LiteralExpression; - isLengthConstant = getLengthExpression() instanceof LiteralExpression; - hasLengthExpression = !isLengthConstant || ((LiteralExpression)getLengthExpression()).getValue() != null; - isFixedWidth = getStrExpression().getDataType().isFixedWidth() && ((hasLengthExpression && isLengthConstant) || (!hasLengthExpression && isOffsetConstant)); - if (hasLengthExpression && isLengthConstant) { - Integer maxLength = ((Number)((LiteralExpression)getLengthExpression()).getValue()).intValue(); - this.maxLength = maxLength >= 0 ? maxLength : 0; - } else if (isOffsetConstant) { - Number offsetNumber = (Number)((LiteralExpression)getOffsetExpression()).getValue(); - if (offsetNumber != null) { - int offset = offsetNumber.intValue(); - PDataType type = getStrExpression().getDataType(); - if (type.isFixedWidth()) { - if (offset >= 0) { - Integer maxLength = getStrExpression().getMaxLength(); - this.maxLength = maxLength - offset + (offset == 0 ? 0 : 1); - } else { - this.maxLength = -offset; - } - } - } + public static final String NAME = "SUBSTR"; + private boolean hasLengthExpression; + private boolean isOffsetConstant; + private boolean isLengthConstant; + private boolean isFixedWidth; + private Integer maxLength; + + public SubstrFunction() { + } + + public SubstrFunction(List children) { + super(children); + init(); + } + + private void init() { + isOffsetConstant = getOffsetExpression() instanceof LiteralExpression; + isLengthConstant = getLengthExpression() instanceof LiteralExpression; + hasLengthExpression = + !isLengthConstant || ((LiteralExpression) getLengthExpression()).getValue() != null; + isFixedWidth = getStrExpression().getDataType().isFixedWidth() + && ((hasLengthExpression && isLengthConstant) || (!hasLengthExpression && isOffsetConstant)); + if (hasLengthExpression && isLengthConstant) { + Integer maxLength = + ((Number) ((LiteralExpression) getLengthExpression()).getValue()).intValue(); + this.maxLength = maxLength >= 0 ? maxLength : 0; + } else if (isOffsetConstant) { + Number offsetNumber = (Number) ((LiteralExpression) getOffsetExpression()).getValue(); + if (offsetNumber != null) { + int offset = offsetNumber.intValue(); + PDataType type = getStrExpression().getDataType(); + if (type.isFixedWidth()) { + if (offset >= 0) { + Integer maxLength = getStrExpression().getMaxLength(); + this.maxLength = maxLength - offset + (offset == 0 ? 0 : 1); + } else { + this.maxLength = -offset; + } } + } } + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression offsetExpression = getOffsetExpression(); - if (!offsetExpression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength()==0) { - return true; - } - int offset = offsetExpression.getDataType().getCodec().decodeInt(ptr, offsetExpression.getSortOrder()); - - int length = -1; - if (hasLengthExpression) { - Expression lengthExpression = getLengthExpression(); - if (!lengthExpression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength()==0) { - return true; - } - length = lengthExpression.getDataType().getCodec().decodeInt(ptr, lengthExpression.getSortOrder()); - if (length <= 0) { - return false; - } - } - - if (!getStrExpression().evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength()==0) { - return true; - } - - boolean isCharType = getStrExpression().getDataType() == PChar.INSTANCE; - SortOrder sortOrder = getStrExpression().getSortOrder(); - int strlen = isCharType ? ptr.getLength() : StringUtil.calculateUTF8Length(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); - - // Account for 1 versus 0-based offset - offset = offset - (offset <= 0 ? 0 : 1); - if (offset < 0) { // Offset < 0 means get from end - offset = strlen + offset; - } - if (offset < 0 || offset >= strlen) { - return false; - } - int maxLength = strlen - offset; - length = length == -1 ? maxLength : Math.min(length,maxLength); - - int byteOffset = isCharType ? offset : StringUtil.getByteLengthForUtf8SubStr(ptr.get(), ptr.getOffset(), offset, sortOrder); - int byteLength = isCharType ? length : StringUtil.getByteLengthForUtf8SubStr(ptr.get(), ptr.getOffset() + byteOffset, length, sortOrder); - ptr.set(ptr.get(), ptr.getOffset() + byteOffset, byteLength); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression offsetExpression = getOffsetExpression(); + if (!offsetExpression.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - // If fixed width, then return child expression type. - // If not fixed width, then we don't know how big this will be across the board - return isFixedWidth ? getStrExpression().getDataType() : PVarchar.INSTANCE; + if (ptr.getLength() == 0) { + return true; } + int offset = + offsetExpression.getDataType().getCodec().decodeInt(ptr, offsetExpression.getSortOrder()); - @Override - public boolean isNullable() { - return getStrExpression().isNullable() || !isFixedWidth || getOffsetExpression().isNullable(); - } - - @Override - public Integer getMaxLength() { - return maxLength; - } - - @Override - public SortOrder getSortOrder() { - return getStrExpression().getSortOrder(); - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); - } - - private Expression getStrExpression() { - return children.get(0); - } - - private Expression getOffsetExpression() { - return children.get(1); - } - - private Expression getLengthExpression() { - return children.get(2); - } - - @Override - public OrderPreserving preservesOrder() { - if (isOffsetConstant) { - LiteralExpression literal = (LiteralExpression) getOffsetExpression(); - Number offsetNumber = (Number) literal.getValue(); - if (offsetNumber != null) { - int offset = offsetNumber.intValue(); - if ((offset == 0 || offset == 1) && (!hasLengthExpression || isLengthConstant)) { - return OrderPreserving.YES_IF_LAST; - } - } - } - return OrderPreserving.NO; - } - - @Override - protected boolean extractNode() { + int length = -1; + if (hasLengthExpression) { + Expression lengthExpression = getLengthExpression(); + if (!lengthExpression.evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { return true; - } - - @Override - public String getName() { - return NAME; - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder(getName() + "("); - if (children.size()==0) - return buf.append(")").toString(); - if (hasLengthExpression) { - buf.append(getStrExpression()); - buf.append(", "); - buf.append(getOffsetExpression()); - buf.append(", "); - buf.append(getLengthExpression()); - } else { - buf.append(getStrExpression()); - buf.append(", "); - buf.append(getOffsetExpression()); + } + length = + lengthExpression.getDataType().getCodec().decodeInt(ptr, lengthExpression.getSortOrder()); + if (length <= 0) { + return false; + } + } + + if (!getStrExpression().evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return true; + } + + boolean isCharType = getStrExpression().getDataType() == PChar.INSTANCE; + SortOrder sortOrder = getStrExpression().getSortOrder(); + int strlen = isCharType + ? ptr.getLength() + : StringUtil.calculateUTF8Length(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); + + // Account for 1 versus 0-based offset + offset = offset - (offset <= 0 ? 0 : 1); + if (offset < 0) { // Offset < 0 means get from end + offset = strlen + offset; + } + if (offset < 0 || offset >= strlen) { + return false; + } + int maxLength = strlen - offset; + length = length == -1 ? maxLength : Math.min(length, maxLength); + + int byteOffset = isCharType + ? offset + : StringUtil.getByteLengthForUtf8SubStr(ptr.get(), ptr.getOffset(), offset, sortOrder); + int byteLength = isCharType + ? length + : StringUtil.getByteLengthForUtf8SubStr(ptr.get(), ptr.getOffset() + byteOffset, length, + sortOrder); + ptr.set(ptr.get(), ptr.getOffset() + byteOffset, byteLength); + return true; + } + + @Override + public PDataType getDataType() { + // If fixed width, then return child expression type. + // If not fixed width, then we don't know how big this will be across the board + return isFixedWidth ? getStrExpression().getDataType() : PVarchar.INSTANCE; + } + + @Override + public boolean isNullable() { + return getStrExpression().isNullable() || !isFixedWidth || getOffsetExpression().isNullable(); + } + + @Override + public Integer getMaxLength() { + return maxLength; + } + + @Override + public SortOrder getSortOrder() { + return getStrExpression().getSortOrder(); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } + + private Expression getStrExpression() { + return children.get(0); + } + + private Expression getOffsetExpression() { + return children.get(1); + } + + private Expression getLengthExpression() { + return children.get(2); + } + + @Override + public OrderPreserving preservesOrder() { + if (isOffsetConstant) { + LiteralExpression literal = (LiteralExpression) getOffsetExpression(); + Number offsetNumber = (Number) literal.getValue(); + if (offsetNumber != null) { + int offset = offsetNumber.intValue(); + if ((offset == 0 || offset == 1) && (!hasLengthExpression || isLengthConstant)) { + return OrderPreserving.YES_IF_LAST; } - buf.append(")"); - return buf.toString(); - } - + } + } + return OrderPreserving.NO; + } + + @Override + protected boolean extractNode() { + return true; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(getName() + "("); + if (children.size() == 0) return buf.append(")").toString(); + if (hasLengthExpression) { + buf.append(getStrExpression()); + buf.append(", "); + buf.append(getOffsetExpression()); + buf.append(", "); + buf.append(getLengthExpression()); + } else { + buf.append(getStrExpression()); + buf.append(", "); + buf.append(getOffsetExpression()); + } + buf.append(")"); + return buf.toString(); + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SumAggregateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SumAggregateFunction.java index d761a788b73..a55a91c4428 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SumAggregateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/SumAggregateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; import org.apache.phoenix.expression.aggregator.Aggregator; @@ -32,113 +31,117 @@ import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.SumAggregateParseNode; +import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; import org.apache.phoenix.schema.types.PFloat; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PUnsignedDouble; import org.apache.phoenix.schema.types.PUnsignedFloat; -import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.tuple.Tuple; - /** - * * Built-in function for SUM aggregation function. - * - * * @since 0.1 */ -@BuiltInFunction(name=SumAggregateFunction.NAME, nodeClass=SumAggregateParseNode.class, args= {@Argument(allowedTypes={PDecimal.class})} ) +@BuiltInFunction(name = SumAggregateFunction.NAME, nodeClass = SumAggregateParseNode.class, + args = { @Argument(allowedTypes = { PDecimal.class }) }) public class SumAggregateFunction extends DelegateConstantToCountAggregateFunction { - public static final String NAME = "SUM"; - - public SumAggregateFunction() { - } - - // TODO: remove when not required at built-in func register time - public SumAggregateFunction(List childExpressions){ - super(childExpressions, null); - } - - public SumAggregateFunction(List childExpressions, CountAggregateFunction delegate){ - super(childExpressions, delegate); - } - - private Aggregator newAggregator(final PDataType type, SortOrder sortOrder, ImmutableBytesWritable ptr) { - if (type == PDecimal.INSTANCE) { - return new DecimalSumAggregator(sortOrder, ptr); - } else if (PDataType.equalsAny(type, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, PDouble.INSTANCE, PFloat.INSTANCE)) { - return new DoubleSumAggregator(sortOrder, ptr) { - @Override - protected PDataType getInputDataType() { - return type; - } - }; - } else { - return new NumberSumAggregator(sortOrder, ptr) { - @Override - protected PDataType getInputDataType() { - return type; - } - }; - } - } + public static final String NAME = "SUM"; - @Override - public Aggregator newClientAggregator() { - return newAggregator(getDataType(), SortOrder.getDefault(), null); - } - - @Override - public Aggregator newServerAggregator(Configuration conf) { - Expression child = getAggregatorExpression(); - return newAggregator(child.getDataType(), child.getSortOrder(), null); - } - - @Override - public Aggregator newServerAggregator(Configuration conf, ImmutableBytesWritable ptr) { - Expression child = getAggregatorExpression(); - return newAggregator(child.getDataType(), child.getSortOrder(), ptr); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!super.evaluate(tuple, ptr)) { - return false; + public SumAggregateFunction() { + } + + // TODO: remove when not required at built-in func register time + public SumAggregateFunction(List childExpressions) { + super(childExpressions, null); + } + + public SumAggregateFunction(List childExpressions, CountAggregateFunction delegate) { + super(childExpressions, delegate); + } + + private Aggregator newAggregator(final PDataType type, SortOrder sortOrder, + ImmutableBytesWritable ptr) { + if (type == PDecimal.INSTANCE) { + return new DecimalSumAggregator(sortOrder, ptr); + } else if ( + PDataType.equalsAny(type, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, PDouble.INSTANCE, + PFloat.INSTANCE) + ) { + return new DoubleSumAggregator(sortOrder, ptr) { + @Override + protected PDataType getInputDataType() { + return type; } - if (isConstantExpression()) { - PDataType type = getDataType(); - Object constantValue = ((LiteralExpression)children.get(0)).getValue(); - if (type == PDecimal.INSTANCE) { - BigDecimal value = ((BigDecimal)constantValue).multiply((BigDecimal) PDecimal.INSTANCE.toObject(ptr, PLong.INSTANCE)); - ptr.set(PDecimal.INSTANCE.toBytes(value)); - } else { - long constantLongValue = ((Number)constantValue).longValue(); - long value = constantLongValue * type.getCodec().decodeLong(ptr, SortOrder.getDefault()); - byte[] resultPtr = new byte[type.getByteSize()]; - type.getCodec().encodeLong(value, resultPtr, 0); - ptr.set(resultPtr); - } + }; + } else { + return new NumberSumAggregator(sortOrder, ptr) { + @Override + protected PDataType getInputDataType() { + return type; } - return true; + }; } + } - @Override - public PDataType getDataType() { - if (super.getDataType() == PDecimal.INSTANCE) { - return PDecimal.INSTANCE; - } else if (PDataType.equalsAny(super.getDataType(), PUnsignedFloat.INSTANCE, PUnsignedDouble.INSTANCE, - PFloat.INSTANCE, PDouble.INSTANCE)) { - return PDouble.INSTANCE; - } else { - return PLong.INSTANCE; - } + @Override + public Aggregator newClientAggregator() { + return newAggregator(getDataType(), SortOrder.getDefault(), null); + } + + @Override + public Aggregator newServerAggregator(Configuration conf) { + Expression child = getAggregatorExpression(); + return newAggregator(child.getDataType(), child.getSortOrder(), null); + } + + @Override + public Aggregator newServerAggregator(Configuration conf, ImmutableBytesWritable ptr) { + Expression child = getAggregatorExpression(); + return newAggregator(child.getDataType(), child.getSortOrder(), ptr); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!super.evaluate(tuple, ptr)) { + return false; + } + if (isConstantExpression()) { + PDataType type = getDataType(); + Object constantValue = ((LiteralExpression) children.get(0)).getValue(); + if (type == PDecimal.INSTANCE) { + BigDecimal value = ((BigDecimal) constantValue) + .multiply((BigDecimal) PDecimal.INSTANCE.toObject(ptr, PLong.INSTANCE)); + ptr.set(PDecimal.INSTANCE.toBytes(value)); + } else { + long constantLongValue = ((Number) constantValue).longValue(); + long value = constantLongValue * type.getCodec().decodeLong(ptr, SortOrder.getDefault()); + byte[] resultPtr = new byte[type.getByteSize()]; + type.getCodec().encodeLong(value, resultPtr, 0); + ptr.set(resultPtr); + } } + return true; + } - @Override - public String getName() { - return NAME; + @Override + public PDataType getDataType() { + if (super.getDataType() == PDecimal.INSTANCE) { + return PDecimal.INSTANCE; + } else if ( + PDataType.equalsAny(super.getDataType(), PUnsignedFloat.INSTANCE, PUnsignedDouble.INSTANCE, + PFloat.INSTANCE, PDouble.INSTANCE) + ) { + return PDouble.INSTANCE; + } else { + return PLong.INSTANCE; } + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TanFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TanFunction.java index 5951cabc1a6..77117d6df4d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TanFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TanFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,31 +26,31 @@ import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDouble; -@BuiltInFunction(name = TanFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, - PDecimal.class }) }) +@BuiltInFunction(name = TanFunction.NAME, + args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) }) public class TanFunction extends JavaMathOneArgumentFunction { - public static final String NAME = "TAN"; + public static final String NAME = "TAN"; - public TanFunction() { - } + public TanFunction() { + } - public TanFunction(List children) throws SQLException { - super(children); - } + public TanFunction(List children) throws SQLException { + super(children); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } - @Override - protected double compute(double firstArg) { - return Math.tan(firstArg); - } + @Override + protected double compute(double firstArg) { + return Math.tan(firstArg); + } - @Override - public OrderPreserving preservesOrder() { - return OrderPreserving.YES; - } + @Override + public OrderPreserving preservesOrder() { + return OrderPreserving.YES; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TimeUnit.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TimeUnit.java index 7e8ffb87a56..a86ab87bbcc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TimeUnit.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TimeUnit.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,32 +20,35 @@ import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; public enum TimeUnit { - DAY("day"), - HOUR("hour"), - MINUTE("minute"), - SECOND("second"), - MILLISECOND("millisecond"), - WEEK("week"), - MONTH("month"), - YEAR("year"); - - private String value; - - private TimeUnit(String value) { - this.value = value; + DAY("day"), + HOUR("hour"), + MINUTE("minute"), + SECOND("second"), + MILLISECOND("millisecond"), + WEEK("week"), + MONTH("month"), + YEAR("year"); + + private String value; + + private TimeUnit(String value) { + this.value = value; + } + + public static final String VALID_VALUES = Joiner.on(", ").join(TimeUnit.values()); + + public static TimeUnit getTimeUnit(String timeUnit) { + if (timeUnit == null) { + throw new IllegalArgumentException( + "No time unit value specified. Only a time unit value that belongs to one of these : " + + VALID_VALUES + " is allowed."); } - - public static final String VALID_VALUES = Joiner.on(", ").join(TimeUnit.values()); - - public static TimeUnit getTimeUnit(String timeUnit) { - if(timeUnit == null) { - throw new IllegalArgumentException("No time unit value specified. Only a time unit value that belongs to one of these : " + VALID_VALUES + " is allowed."); - } - for(TimeUnit tu : values()) { - if(timeUnit.equalsIgnoreCase(tu.value)) { - return tu; - } - } - throw new IllegalArgumentException("Invalid value of time unit " + timeUnit + ". Only a time unit value that belongs to one of these : " + VALID_VALUES + " is allowed."); + for (TimeUnit tu : values()) { + if (timeUnit.equalsIgnoreCase(tu.value)) { + return tu; + } } + throw new IllegalArgumentException("Invalid value of time unit " + timeUnit + + ". Only a time unit value that belongs to one of these : " + VALID_VALUES + " is allowed."); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java index c61f838b148..df4197d9a7e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TimezoneOffsetFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.function; import java.sql.SQLException; @@ -35,57 +34,57 @@ /** * Returns offset (shift in minutes) of timezone at particular datetime in minutes. */ -@FunctionParseNode.BuiltInFunction(name = TimezoneOffsetFunction.NAME, args = { - @FunctionParseNode.Argument(allowedTypes = {PVarchar.class}), - @FunctionParseNode.Argument(allowedTypes = {PDate.class})}) +@FunctionParseNode.BuiltInFunction(name = TimezoneOffsetFunction.NAME, + args = { @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), + @FunctionParseNode.Argument(allowedTypes = { PDate.class }) }) public class TimezoneOffsetFunction extends ScalarFunction { - public static final String NAME = "TIMEZONE_OFFSET"; - private static final int MILLIS_TO_MINUTES = 60 * 1000; - - public TimezoneOffsetFunction() { - } - - public TimezoneOffsetFunction(List children) throws SQLException { - super(children); - } + public static final String NAME = "TIMEZONE_OFFSET"; + private static final int MILLIS_TO_MINUTES = 60 * 1000; - @Override - public String getName() { - return NAME; - } + public TimezoneOffsetFunction() { + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!children.get(0).evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - DateTimeZone timezoneInstance = JodaTimezoneCache.getInstance(ptr); + public TimezoneOffsetFunction(List children) throws SQLException { + super(children); + } - if (!children.get(1).evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - long date = PDate.INSTANCE.getCodec().decodeLong(ptr, children.get(1).getSortOrder()); + @Override + public String getName() { + return NAME; + } - int offset = timezoneInstance.getOffset(date); - ptr.set(PInteger.INSTANCE.toBytes(offset / MILLIS_TO_MINUTES)); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!children.get(0).evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + return true; } + DateTimeZone timezoneInstance = JodaTimezoneCache.getInstance(ptr); - @Override - public boolean isNullable() { - return children.get(0).isNullable() || children.get(1).isNullable(); + if (!children.get(1).evaluate(tuple, ptr)) { + return false; + } + if (ptr.getLength() == 0) { + return true; } + long date = PDate.INSTANCE.getCodec().decodeLong(ptr, children.get(1).getSortOrder()); + + int offset = timezoneInstance.getOffset(date); + ptr.set(PInteger.INSTANCE.toBytes(offset / MILLIS_TO_MINUTES)); + return true; + } + + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } + + @Override + public boolean isNullable() { + return children.get(0).isNullable() || children.get(1).isNullable(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToCharFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToCharFunction.java index a4e48c2253a..6b6215e63b8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToCharFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToCharFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,159 +24,157 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableUtils; - -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; +import org.apache.phoenix.parse.*; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.parse.*; -import org.apache.phoenix.schema.types.PDecimal; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.tuple.Tuple; - +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * - * Implementation of the TO_CHAR(<date>/<number>,[<format-string>] built-in function. - * The first argument must be of type DATE or TIME or TIMESTAMP or DECIMAL or INTEGER, and the second argument must be a constant string. - * - * + * Implementation of the TO_CHAR(<date>/<number>,[<format-string>] built-in + * function. The first argument must be of type DATE or TIME or TIMESTAMP or DECIMAL or INTEGER, and + * the second argument must be a constant string. * @since 0.1 */ -@BuiltInFunction(name=ToCharFunction.NAME, nodeClass=ToCharParseNode.class, args={ - @Argument(allowedTypes={PTimestamp.class, PDecimal.class}), - @Argument(allowedTypes={PVarchar.class},isConstant=true,defaultValue="null") } ) +@BuiltInFunction(name = ToCharFunction.NAME, nodeClass = ToCharParseNode.class, + args = { @Argument(allowedTypes = { PTimestamp.class, PDecimal.class }), + @Argument(allowedTypes = { PVarchar.class }, isConstant = true, defaultValue = "null") }) public class ToCharFunction extends ScalarFunction { - public static final String NAME = "TO_CHAR"; - private String formatString; - private Format formatter; - private FunctionArgumentType type; - - public ToCharFunction() { - } + public static final String NAME = "TO_CHAR"; + private String formatString; + private Format formatter; + private FunctionArgumentType type; - public ToCharFunction(List children, StatementContext context) throws SQLException { - super(children.subList(0, 1)); - PDataType dataType = children.get(0).getDataType(); - String formatString = (String)((LiteralExpression)children.get(1)).getValue(); // either date or number format string - Format formatter; - FunctionArgumentType type; - if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { - if (formatString == null) { - formatString = context.getDateFormat(); - formatter = context.getDateFormatter(); - } else { - formatter = FunctionArgumentType.TEMPORAL.getFormatter(formatString); - } - type = FunctionArgumentType.TEMPORAL; - } - else if (dataType.isCoercibleTo(PDecimal.INSTANCE)) { - if (formatString == null) - formatString = context.getNumberFormat(); - formatter = FunctionArgumentType.NUMERIC.getFormatter(formatString); - type = FunctionArgumentType.NUMERIC; - } - else { - throw new SQLException(dataType + " type is unsupported for TO_CHAR(). Numeric and temporal types are supported."); - } - Preconditions.checkNotNull(formatString); - Preconditions.checkNotNull(formatter); - Preconditions.checkNotNull(type); - this.type = type; - this.formatString = formatString; - this.formatter = formatter; - } + public ToCharFunction() { + } - public ToCharFunction(List children, FunctionArgumentType type, String formatString, Format formatter) throws SQLException { - super(children.subList(0, 1)); - Preconditions.checkNotNull(formatString); - Preconditions.checkNotNull(formatter); - Preconditions.checkNotNull(type); - this.type = type; - this.formatString = formatString; - this.formatter = formatter; - } - - @Override - public ToCharFunction clone(List children) { - try { - return new ToCharFunction(children, type, formatString, formatter); - } catch (Exception e) { - throw new RuntimeException(e); // Impossible, since it was originally constructed this way - } - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + formatString.hashCode(); - result = prime * result + getExpression().hashCode(); - return result; + public ToCharFunction(List children, StatementContext context) throws SQLException { + super(children.subList(0, 1)); + PDataType dataType = children.get(0).getDataType(); + String formatString = (String) ((LiteralExpression) children.get(1)).getValue(); // either date + // or number + // format + // string + Format formatter; + FunctionArgumentType type; + if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { + if (formatString == null) { + formatString = context.getDateFormat(); + formatter = context.getDateFormatter(); + } else { + formatter = FunctionArgumentType.TEMPORAL.getFormatter(formatString); + } + type = FunctionArgumentType.TEMPORAL; + } else if (dataType.isCoercibleTo(PDecimal.INSTANCE)) { + if (formatString == null) formatString = context.getNumberFormat(); + formatter = FunctionArgumentType.NUMERIC.getFormatter(formatString); + type = FunctionArgumentType.NUMERIC; + } else { + throw new SQLException(dataType + + " type is unsupported for TO_CHAR(). Numeric and temporal types are supported."); } + Preconditions.checkNotNull(formatString); + Preconditions.checkNotNull(formatter); + Preconditions.checkNotNull(type); + this.type = type; + this.formatString = formatString; + this.formatter = formatter; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ToCharFunction other = (ToCharFunction)obj; - if (!getExpression().equals(other.getExpression())) return false; - if (!formatString.equals(other.formatString)) return false; - return true; - } + public ToCharFunction(List children, FunctionArgumentType type, String formatString, + Format formatter) throws SQLException { + super(children.subList(0, 1)); + Preconditions.checkNotNull(formatString); + Preconditions.checkNotNull(formatter); + Preconditions.checkNotNull(type); + this.type = type; + this.formatString = formatString; + this.formatter = formatter; + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - PDataType type = expression.getDataType(); - Object value = formatter.format(type.toObject(ptr, expression.getSortOrder())); - byte[] b = getDataType().toBytes(value); - ptr.set(b); - return true; - } - - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + @Override + public ToCharFunction clone(List children) { + try { + return new ToCharFunction(children, type, formatString, formatter); + } catch (Exception e) { + throw new RuntimeException(e); // Impossible, since it was originally constructed this way } + } - @Override - public boolean isNullable() { - return getExpression().isNullable(); - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + formatString.hashCode(); + result = prime * result + getExpression().hashCode(); + return result; + } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - formatString = WritableUtils.readString(input); - type = WritableUtils.readEnum(input, FunctionArgumentType.class); - formatter = type.getFormatter(formatString); - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ToCharFunction other = (ToCharFunction) obj; + if (!getExpression().equals(other.getExpression())) return false; + if (!formatString.equals(other.formatString)) return false; + return true; + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeString(output, formatString); - WritableUtils.writeEnum(output, type); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; } - - private Expression getExpression() { - return children.get(0); + if (ptr.getLength() == 0) { + return true; } + PDataType type = expression.getDataType(); + Object value = formatter.format(type.toObject(ptr, expression.getSortOrder())); + byte[] b = getDataType().toBytes(value); + ptr.set(b); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public boolean isNullable() { + return getExpression().isNullable(); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + formatString = WritableUtils.readString(input); + type = WritableUtils.readEnum(input, FunctionArgumentType.class); + formatter = type.getFormatter(formatString); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeString(output, formatString); + WritableUtils.writeEnum(output, type); + } + + private Expression getExpression() { + return children.get(0); + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java index 31d6fd998c1..8f086b9b716 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToDateFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,173 +38,172 @@ import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.DateUtil; - /** - * - * Implementation of the {@code TO_DATE(,[,[]])} built-in function. - * The second argument is optional and defaults to the phoenix.query.dateFormat value - * from the HBase config. If present it must be a constant string. The third argument is either a - * valid (constant) timezone id, or the string "local". The third argument is also optional, and - * it defaults to GMT. - * + * Implementation of the {@code TO_DATE(,[,[]])} built-in + * function. The second argument is optional and defaults to the phoenix.query.dateFormat value from + * the HBase config. If present it must be a constant string. The third argument is either a valid + * (constant) timezone id, or the string "local". The third argument is also optional, and it + * defaults to GMT. */ -@BuiltInFunction(name=ToDateFunction.NAME, nodeClass=ToDateParseNode.class, - args={@Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class},isConstant=true,defaultValue="null"), - @Argument(allowedTypes={PVarchar.class}, isConstant=true, defaultValue = "null") } ) +@BuiltInFunction(name = ToDateFunction.NAME, nodeClass = ToDateParseNode.class, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }, isConstant = true, defaultValue = "null"), + @Argument(allowedTypes = { PVarchar.class }, isConstant = true, defaultValue = "null") }) public class ToDateFunction extends ScalarFunction { - public static final String NAME = "TO_DATE"; - private DateUtil.DateTimeParser dateParser; - private PDataCodec codec; - protected String dateFormat; - protected String timeZoneId; - - public ToDateFunction() { - } - - public ToDateFunction(List children, StatementContext context) throws SQLException { - super(children); - String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue(); - String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue(); - if (dateFormat == null) { - dateFormat = context.getDateFormat(); - } - if (timeZoneId == null) { - timeZoneId = context.getDateFormatTimeZoneId(); - } - init(dateFormat, timeZoneId); - } - - public ToDateFunction(List children, String dateFormat, String timeZoneId) throws SQLException { - super(children); - init(dateFormat, timeZoneId); - } - - @Override - public ToDateFunction clone(List children) { - try { - return new ToDateFunction(children, dateFormat, timeZoneId); - } catch (Exception e) { - throw new RuntimeException(e); // Impossible, since it was originally constructed this way - } - } - - private void init(String dateFormat, String timeZoneId) { - this.dateFormat = dateFormat; - this.dateParser = DateUtil.getDateTimeParser(dateFormat, getDataType(), timeZoneId); - // Store resolved timeZoneId, as if it's LOCAL, we don't want the - // server to evaluate using the local time zone. Instead, we want - // to use the client local time zone. - this.timeZoneId = this.dateParser.getTimeZone().getID(); - this.codec = DateUtil.getCodecFor(getDataType()); + public static final String NAME = "TO_DATE"; + private DateUtil.DateTimeParser dateParser; + private PDataCodec codec; + protected String dateFormat; + protected String timeZoneId; + + public ToDateFunction() { + } + + public ToDateFunction(List children, StatementContext context) throws SQLException { + super(children); + String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue(); + String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue(); + if (dateFormat == null) { + dateFormat = context.getDateFormat(); } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((dateFormat == null) ? 0 : dateFormat.hashCode()); - result = prime * result + ((timeZoneId == null) ? 0 : timeZoneId.hashCode()); - return result; + if (timeZoneId == null) { + timeZoneId = context.getDateFormatTimeZoneId(); } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (getClass() != obj.getClass()) return false; - ToDateFunction other = (ToDateFunction)obj; - // Only compare first child, as the other two are potentially resolved on the fly. - if (!this.getChildren().get(0).equals(other.getChildren().get(0))) return false; - if (dateFormat == null) { - if (other.dateFormat != null) return false; - } else if (!dateFormat.equals(other.dateFormat)) return false; - if (timeZoneId == null) { - if (other.timeZoneId != null) return false; - } else if (!timeZoneId.equals(other.timeZoneId)) return false; - return true; + init(dateFormat, timeZoneId); + } + + public ToDateFunction(List children, String dateFormat, String timeZoneId) + throws SQLException { + super(children); + init(dateFormat, timeZoneId); + } + + @Override + public ToDateFunction clone(List children) { + try { + return new ToDateFunction(children, dateFormat, timeZoneId); + } catch (Exception e) { + throw new RuntimeException(e); // Impossible, since it was originally constructed this way } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - PDataType type = expression.getDataType(); - String dateStr = (String)type.toObject(ptr, expression.getSortOrder()); - long epochTime = dateParser.parseDateTime(dateStr); - PDataType returnType = getDataType(); - byte[] byteValue = new byte[returnType.getByteSize()]; - codec.encodeLong(epochTime, byteValue, 0); - ptr.set(byteValue); - return true; - } - - @Override - public PDataType getDataType() { - return PDate.INSTANCE; + } + + private void init(String dateFormat, String timeZoneId) { + this.dateFormat = dateFormat; + this.dateParser = DateUtil.getDateTimeParser(dateFormat, getDataType(), timeZoneId); + // Store resolved timeZoneId, as if it's LOCAL, we don't want the + // server to evaluate using the local time zone. Instead, we want + // to use the client local time zone. + this.timeZoneId = this.dateParser.getTimeZone().getID(); + this.codec = DateUtil.getCodecFor(getDataType()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((dateFormat == null) ? 0 : dateFormat.hashCode()); + result = prime * result + ((timeZoneId == null) ? 0 : timeZoneId.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (getClass() != obj.getClass()) return false; + ToDateFunction other = (ToDateFunction) obj; + // Only compare first child, as the other two are potentially resolved on the fly. + if (!this.getChildren().get(0).equals(other.getChildren().get(0))) return false; + if (dateFormat == null) { + if (other.dateFormat != null) return false; + } else if (!dateFormat.equals(other.dateFormat)) return false; + if (timeZoneId == null) { + if (other.timeZoneId != null) return false; + } else if (!timeZoneId.equals(other.timeZoneId)) return false; + return true; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; } - - @Override - public boolean isNullable() { - return getExpression().isNullable(); + if (ptr.getLength() == 0) { + return true; } - - private String getTimeZoneIdArg() { - return children.size() < 3 ? null : (String) ((LiteralExpression) children.get(2)).getValue(); - } - - private String getDateFormatArg() { - return children.size() < 2 ? null : (String) ((LiteralExpression) children.get(1)).getValue(); - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - String timeZoneId; - String dateFormat = WritableUtils.readString(input); - if (dateFormat.length() != 0) { // pre 4.3 - timeZoneId = DateUtil.DEFAULT_TIME_ZONE_ID; + PDataType type = expression.getDataType(); + String dateStr = (String) type.toObject(ptr, expression.getSortOrder()); + long epochTime = dateParser.parseDateTime(dateStr); + PDataType returnType = getDataType(); + byte[] byteValue = new byte[returnType.getByteSize()]; + codec.encodeLong(epochTime, byteValue, 0); + ptr.set(byteValue); + return true; + } + + @Override + public PDataType getDataType() { + return PDate.INSTANCE; + } + + @Override + public boolean isNullable() { + return getExpression().isNullable(); + } + + private String getTimeZoneIdArg() { + return children.size() < 3 ? null : (String) ((LiteralExpression) children.get(2)).getValue(); + } + + private String getDateFormatArg() { + return children.size() < 2 ? null : (String) ((LiteralExpression) children.get(1)).getValue(); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + String timeZoneId; + String dateFormat = WritableUtils.readString(input); + if (dateFormat.length() != 0) { // pre 4.3 + timeZoneId = DateUtil.DEFAULT_TIME_ZONE_ID; + } else { + int nChildren = children.size(); + if (nChildren == 1) { + dateFormat = WritableUtils.readString(input); + timeZoneId = WritableUtils.readString(input); + } else + if (nChildren == 2 || DateUtil.LOCAL_TIME_ZONE_ID.equalsIgnoreCase(getTimeZoneIdArg())) { + dateFormat = getDateFormatArg(); + timeZoneId = WritableUtils.readString(input); } else { - int nChildren = children.size(); - if (nChildren == 1) { - dateFormat = WritableUtils.readString(input); - timeZoneId = WritableUtils.readString(input); - } else if (nChildren == 2 || DateUtil.LOCAL_TIME_ZONE_ID.equalsIgnoreCase(getTimeZoneIdArg())) { - dateFormat = getDateFormatArg(); - timeZoneId = WritableUtils.readString(input); - } else { - dateFormat = getDateFormatArg(); - timeZoneId = getTimeZoneIdArg(); - } + dateFormat = getDateFormatArg(); + timeZoneId = getTimeZoneIdArg(); } - init(dateFormat, timeZoneId); } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeString(output, ""); // For b/w compat - int nChildren = children.size(); - // If dateFormat and/or timeZoneId are supplied as children, don't write them again, - // except if using LOCAL, in which case we want to write the resolved/actual time zone. - if (nChildren == 1) { - WritableUtils.writeString(output, dateFormat); - WritableUtils.writeString(output, timeZoneId); - } else if (nChildren == 2 || DateUtil.LOCAL_TIME_ZONE_ID.equalsIgnoreCase(getTimeZoneIdArg())) { - WritableUtils.writeString(output, timeZoneId); - } + init(dateFormat, timeZoneId); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeString(output, ""); // For b/w compat + int nChildren = children.size(); + // If dateFormat and/or timeZoneId are supplied as children, don't write them again, + // except if using LOCAL, in which case we want to write the resolved/actual time zone. + if (nChildren == 1) { + WritableUtils.writeString(output, dateFormat); + WritableUtils.writeString(output, timeZoneId); + } else if (nChildren == 2 || DateUtil.LOCAL_TIME_ZONE_ID.equalsIgnoreCase(getTimeZoneIdArg())) { + WritableUtils.writeString(output, timeZoneId); } + } - private Expression getExpression() { - return children.get(0); - } + private Expression getExpression() { + return children.get(0); + } - @Override - public String getName() { - return NAME; - } + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToNumberFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToNumberFunction.java index ceb0f891ec1..bd4ed8db026 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToNumberFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToNumberFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,194 +28,194 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableUtils; - -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; +import org.apache.phoenix.parse.*; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.parse.*; +import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PChar; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * - * Implementation of TO_NUMBER(<string>/<date>/<timestamp>, [<pattern-string>]) built-in function. The format for the optional + * Implementation of TO_NUMBER(<string>/<date>/<timestamp>, + * [<pattern-string>]) built-in function. The format for the optional * pattern_string param is specified in {@link DecimalFormat}. - * - * * @since 0.1 */ -@BuiltInFunction(name=ToNumberFunction.NAME, nodeClass=ToNumberParseNode.class, args= { - @Argument(allowedTypes={PVarchar.class, PTimestamp.class}), - @Argument(allowedTypes={PVarchar.class}, isConstant=true, defaultValue="null")} ) +@BuiltInFunction(name = ToNumberFunction.NAME, nodeClass = ToNumberParseNode.class, + args = { @Argument(allowedTypes = { PVarchar.class, PTimestamp.class }), + @Argument(allowedTypes = { PVarchar.class }, isConstant = true, defaultValue = "null") }) public class ToNumberFunction extends ScalarFunction { - public static final String NAME = "TO_NUMBER"; - - private String formatString = null; - private Format format = null; - private FunctionArgumentType type; - - public ToNumberFunction() {} - - public ToNumberFunction(List children, StatementContext context) throws SQLException { - super(children.subList(0, 1)); - PDataType dataType = children.get(0).getDataType(); - String formatString = (String)((LiteralExpression)children.get(1)).getValue(); // either date or number format string - Format formatter = null; - FunctionArgumentType type; - - if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { - if (formatString == null) { - formatString = context.getDateFormat(); - formatter = context.getDateFormatter(); - } else { - formatter = FunctionArgumentType.TEMPORAL.getFormatter(formatString); - } - type = FunctionArgumentType.TEMPORAL; - } - else if (dataType.isCoercibleTo(PChar.INSTANCE)) { - if (formatString != null) { - formatter = FunctionArgumentType.CHAR.getFormatter(formatString); - } - type = FunctionArgumentType.CHAR; - } - else { - throw new SQLException(dataType + " type is unsupported for TO_NUMBER(). Numeric and temporal types are supported."); - } - Preconditions.checkNotNull(type); - this.type = type; - this.formatString = formatString; - this.format = formatter; - } - - public ToNumberFunction(List children, FunctionArgumentType type, String formatString, Format formatter) throws SQLException { - super(children.subList(0, 1)); - Preconditions.checkNotNull(type); - this.type = type; - this.formatString = formatString; - this.format = formatter; + public static final String NAME = "TO_NUMBER"; + + private String formatString = null; + private Format format = null; + private FunctionArgumentType type; + + public ToNumberFunction() { + } + + public ToNumberFunction(List children, StatementContext context) throws SQLException { + super(children.subList(0, 1)); + PDataType dataType = children.get(0).getDataType(); + String formatString = (String) ((LiteralExpression) children.get(1)).getValue(); // either date + // or number + // format + // string + Format formatter = null; + FunctionArgumentType type; + + if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { + if (formatString == null) { + formatString = context.getDateFormat(); + formatter = context.getDateFormatter(); + } else { + formatter = FunctionArgumentType.TEMPORAL.getFormatter(formatString); + } + type = FunctionArgumentType.TEMPORAL; + } else if (dataType.isCoercibleTo(PChar.INSTANCE)) { + if (formatString != null) { + formatter = FunctionArgumentType.CHAR.getFormatter(formatString); + } + type = FunctionArgumentType.CHAR; + } else { + throw new SQLException(dataType + + " type is unsupported for TO_NUMBER(). Numeric and temporal types are supported."); } - - @Override - public ToNumberFunction clone(List children) { - try { - return new ToNumberFunction(children, type, formatString, format); - } catch (Exception e) { - throw new RuntimeException(e); // Impossible, since it was originally constructed this way - } + Preconditions.checkNotNull(type); + this.type = type; + this.formatString = formatString; + this.format = formatter; + } + + public ToNumberFunction(List children, FunctionArgumentType type, String formatString, + Format formatter) throws SQLException { + super(children.subList(0, 1)); + Preconditions.checkNotNull(type); + this.type = type; + this.formatString = formatString; + this.format = formatter; + } + + @Override + public ToNumberFunction clone(List children) { + try { + return new ToNumberFunction(children, type, formatString, format); + } catch (Exception e) { + throw new RuntimeException(e); // Impossible, since it was originally constructed this way } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } else if (ptr.getLength() == 0) { - return true; - } - - PDataType type = expression.getDataType(); - if (type.isCoercibleTo(PTimestamp.INSTANCE)) { - Date date = (Date) type.toObject(ptr, expression.getSortOrder()); - BigDecimal time = new BigDecimal(date.getTime()); - byte[] byteValue = getDataType().toBytes(time); - ptr.set(byteValue); - return true; - } - - String stringValue = (String)type.toObject(ptr, expression.getSortOrder()); - if (stringValue == null) { - ptr.set(EMPTY_BYTE_ARRAY); - return true; - } - stringValue = stringValue.trim(); - BigDecimal decimalValue; - if (format == null) { - decimalValue = (BigDecimal) getDataType().toObject(stringValue); - } else { - ParsePosition parsePosition = new ParsePosition(0); - Number number = ((DecimalFormat) format).parse(stringValue, parsePosition); - if (parsePosition.getErrorIndex() > -1) { - ptr.set(EMPTY_BYTE_ARRAY); - return true; - } - - if (number instanceof BigDecimal) { - // since we set DecimalFormat.setParseBigDecimal(true) we are guaranteeing result to be - // of type BigDecimal in most cases. see java.text.DecimalFormat.parse() JavaDoc. - decimalValue = (BigDecimal)number; - } else { - ptr.set(EMPTY_BYTE_ARRAY); - return true; - } - } - byte[] byteValue = getDataType().toBytes(decimalValue); - ptr.set(byteValue); - return true; + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; + } else if (ptr.getLength() == 0) { + return true; } - @Override - public PDataType getDataType() { - return PDecimal.INSTANCE; - } - - @Override - public boolean isNullable() { - return getExpression().isNullable(); + PDataType type = expression.getDataType(); + if (type.isCoercibleTo(PTimestamp.INSTANCE)) { + Date date = (Date) type.toObject(ptr, expression.getSortOrder()); + BigDecimal time = new BigDecimal(date.getTime()); + byte[] byteValue = getDataType().toBytes(time); + ptr.set(byteValue); + return true; } - private Expression getExpression() { - return children.get(0); + String stringValue = (String) type.toObject(ptr, expression.getSortOrder()); + if (stringValue == null) { + ptr.set(EMPTY_BYTE_ARRAY); + return true; } - - @Override - public String getName() { - return NAME; - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - formatString = WritableUtils.readString(input); - type = WritableUtils.readEnum(input, FunctionArgumentType.class); - if (formatString != null) { - format = type.getFormatter(formatString); - } - } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeString(output, formatString); - WritableUtils.writeEnum(output, type); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((formatString == null) ? 0 : formatString.hashCode()); - result = prime * result + getExpression().hashCode(); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!super.equals(obj)) return false; - if (getClass() != obj.getClass()) return false; - ToNumberFunction other = (ToNumberFunction)obj; - if (formatString == null) { - if (other.formatString != null) return false; - } else if (!formatString.equals(other.formatString)) return false; - if (!getExpression().equals(other.getExpression())) return false; + stringValue = stringValue.trim(); + BigDecimal decimalValue; + if (format == null) { + decimalValue = (BigDecimal) getDataType().toObject(stringValue); + } else { + ParsePosition parsePosition = new ParsePosition(0); + Number number = ((DecimalFormat) format).parse(stringValue, parsePosition); + if (parsePosition.getErrorIndex() > -1) { + ptr.set(EMPTY_BYTE_ARRAY); + return true; + } + + if (number instanceof BigDecimal) { + // since we set DecimalFormat.setParseBigDecimal(true) we are guaranteeing result to be + // of type BigDecimal in most cases. see java.text.DecimalFormat.parse() JavaDoc. + decimalValue = (BigDecimal) number; + } else { + ptr.set(EMPTY_BYTE_ARRAY); return true; + } + } + byte[] byteValue = getDataType().toBytes(decimalValue); + ptr.set(byteValue); + return true; + } + + @Override + public PDataType getDataType() { + return PDecimal.INSTANCE; + } + + @Override + public boolean isNullable() { + return getExpression().isNullable(); + } + + private Expression getExpression() { + return children.get(0); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + formatString = WritableUtils.readString(input); + type = WritableUtils.readEnum(input, FunctionArgumentType.class); + if (formatString != null) { + format = type.getFormatter(formatString); } + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeString(output, formatString); + WritableUtils.writeEnum(output, type); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((formatString == null) ? 0 : formatString.hashCode()); + result = prime * result + getExpression().hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + ToNumberFunction other = (ToNumberFunction) obj; + if (formatString == null) { + if (other.formatString != null) return false; + } else if (!formatString.equals(other.formatString)) return false; + if (!getExpression().equals(other.getExpression())) return false; + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToTimeFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToTimeFunction.java index 905d955e0c8..4b0a73d7c3c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToTimeFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToTimeFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; -import org.apache.phoenix.expression.LiteralExpression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; import org.apache.phoenix.parse.ToTimeParseNode; @@ -31,48 +30,47 @@ import org.apache.phoenix.schema.types.PVarchar; /** -* -* Implementation of the {@code TO_TIME(,[,[]])} built-in function. -* The second argument is optional and defaults to the phoenix.query.dateFormat value -* from the HBase config. If present it must be a constant string. The third argument is either a -* valid (constant) timezone id, or the string "LOCAL". The third argument is also optional, and -* it defaults to GMT. -* -*/ -@BuiltInFunction(name=ToTimeFunction.NAME, nodeClass=ToTimeParseNode.class, - args={@Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class},isConstant=true,defaultValue="null"), - @Argument(allowedTypes={PVarchar.class}, isConstant=true, defaultValue = "null") } ) + * Implementation of the {@code TO_TIME(,[,[]])} built-in + * function. The second argument is optional and defaults to the phoenix.query.dateFormat value from + * the HBase config. If present it must be a constant string. The third argument is either a valid + * (constant) timezone id, or the string "LOCAL". The third argument is also optional, and it + * defaults to GMT. + */ +@BuiltInFunction(name = ToTimeFunction.NAME, nodeClass = ToTimeParseNode.class, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }, isConstant = true, defaultValue = "null"), + @Argument(allowedTypes = { PVarchar.class }, isConstant = true, defaultValue = "null") }) public class ToTimeFunction extends ToDateFunction { - public static final String NAME = "TO_TIME"; + public static final String NAME = "TO_TIME"; - public ToTimeFunction() { - } + public ToTimeFunction() { + } - public ToTimeFunction(List children, StatementContext context) throws SQLException { - super(children, context); - } + public ToTimeFunction(List children, StatementContext context) throws SQLException { + super(children, context); + } - public ToTimeFunction(List children, String dateFormat, String timeZoneId) throws SQLException { - super(children, dateFormat, timeZoneId); - } - - @Override - public ToTimeFunction clone(List children) { - try { - return new ToTimeFunction(children, dateFormat, timeZoneId); - } catch (Exception e) { - throw new RuntimeException(e); // Impossible, since it was originally constructed this way - } - } + public ToTimeFunction(List children, String dateFormat, String timeZoneId) + throws SQLException { + super(children, dateFormat, timeZoneId); + } - @Override - public PDataType getDataType() { - return PTime.INSTANCE; + @Override + public ToTimeFunction clone(List children) { + try { + return new ToTimeFunction(children, dateFormat, timeZoneId); + } catch (Exception e) { + throw new RuntimeException(e); // Impossible, since it was originally constructed this way } + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PTime.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToTimestampFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToTimestampFunction.java index a727d0e301e..2ef224a2c3a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToTimestampFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/ToTimestampFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,48 +30,48 @@ import org.apache.phoenix.schema.types.PVarchar; /** -* -* Implementation of the {@code TO_TIMESTAMP(,[,[]])} built-in function. -* The second argument is optional and defaults to the phoenix.query.timestampFormat value -* from the HBase config. If present it must be a constant string. The third argument is either a -* valid (constant) timezone id, or the string "local". The third argument is also optional, and -* it defaults to GMT. -* -*/ -@BuiltInFunction(name=ToTimestampFunction.NAME, nodeClass=ToTimestampParseNode.class, - args={@Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class},isConstant=true,defaultValue="null"), - @Argument(allowedTypes={PVarchar.class}, isConstant=true, defaultValue = "null") } ) + * Implementation of the {@code TO_TIMESTAMP(,[,[]])} + * built-in function. The second argument is optional and defaults to the + * phoenix.query.timestampFormat value from the HBase config. If present it must be a constant + * string. The third argument is either a valid (constant) timezone id, or the string "local". The + * third argument is also optional, and it defaults to GMT. + */ +@BuiltInFunction(name = ToTimestampFunction.NAME, nodeClass = ToTimestampParseNode.class, + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }, isConstant = true, defaultValue = "null"), + @Argument(allowedTypes = { PVarchar.class }, isConstant = true, defaultValue = "null") }) public class ToTimestampFunction extends ToDateFunction { - public static final String NAME = "TO_TIMESTAMP"; + public static final String NAME = "TO_TIMESTAMP"; - public ToTimestampFunction() { - } + public ToTimestampFunction() { + } - public ToTimestampFunction(List children, StatementContext context) throws SQLException { - super(children, context); - } + public ToTimestampFunction(List children, StatementContext context) + throws SQLException { + super(children, context); + } - public ToTimestampFunction(List children, String dateFormat, String timeZoneId) throws SQLException { - super(children, dateFormat, timeZoneId); - } - - @Override - public ToTimestampFunction clone(List children) { - try { - return new ToTimestampFunction(children, dateFormat, timeZoneId); - } catch (Exception e) { - throw new RuntimeException(e); // Impossible, since it was originally constructed this way - } - } + public ToTimestampFunction(List children, String dateFormat, String timeZoneId) + throws SQLException { + super(children, dateFormat, timeZoneId); + } - @Override - public PDataType getDataType() { - return PTimestamp.INSTANCE; + @Override + public ToTimestampFunction clone(List children) { + try { + return new ToTimestampFunction(children, dateFormat, timeZoneId); + } catch (Exception e) { + throw new RuntimeException(e); // Impossible, since it was originally constructed this way } + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PTimestamp.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TransactionProviderNameFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TransactionProviderNameFunction.java index 0117c1f9c6b..a620c14dbe1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TransactionProviderNameFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TransactionProviderNameFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,51 +31,45 @@ import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.transaction.TransactionFactory; - /** - * - * Function used to get the index state name from the serialized byte value - * Usage: - * IndexStateName('a') - * will return 'ACTIVE' - * - * + * Function used to get the index state name from the serialized byte value Usage: + * IndexStateName('a') will return 'ACTIVE' * @since 2.1 */ -@BuiltInFunction(name=TransactionProviderNameFunction.NAME, args= { - @Argument(allowedTypes= PInteger.class)} ) +@BuiltInFunction(name = TransactionProviderNameFunction.NAME, + args = { @Argument(allowedTypes = PInteger.class) }) public class TransactionProviderNameFunction extends ScalarFunction { - public static final String NAME = "TransactionProviderName"; + public static final String NAME = "TransactionProviderName"; - public TransactionProviderNameFunction() { - } - - public TransactionProviderNameFunction(List children) throws SQLException { - super(children); - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression child = children.get(0); - if (!child.evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - return true; - } - int code = PTinyint.INSTANCE.getCodec().decodeByte(ptr, child.getSortOrder()); - TransactionFactory.Provider provider = TransactionFactory.Provider.fromCode(code); - ptr.set(PVarchar.INSTANCE.toBytes(provider.name())); - return true; - } + public TransactionProviderNameFunction() { + } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + public TransactionProviderNameFunction(List children) throws SQLException { + super(children); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression child = children.get(0); + if (!child.evaluate(tuple, ptr)) { + return false; } - - @Override - public String getName() { - return NAME; + if (ptr.getLength() == 0) { + return true; } + int code = PTinyint.INSTANCE.getCodec().decodeByte(ptr, child.getSortOrder()); + TransactionFactory.Provider provider = TransactionFactory.Provider.fromCode(code); + ptr.set(PVarchar.INSTANCE.toBytes(provider.name())); + return true; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TrimFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TrimFunction.java index 25654455269..18e29fcb805 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TrimFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TrimFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,83 +24,80 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.parse.FunctionParseNode.Argument; import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.StringUtil; - /** - * Implementation of the {@code Trim() } build-in function. It removes from both end of {@code } - * space character and other function bytes in single byte utf8 characters set. - * - * + * Implementation of the {@code Trim() } build-in function. It removes from both end of + * {@code } space character and other function bytes in single byte utf8 characters set. * @since 0.1 */ -@BuiltInFunction(name=TrimFunction.NAME, args={ - @Argument(allowedTypes={ PVarchar.class })} ) +@BuiltInFunction(name = TrimFunction.NAME, args = { @Argument(allowedTypes = { PVarchar.class }) }) public class TrimFunction extends ScalarFunction { - public static final String NAME = "TRIM"; + public static final String NAME = "TRIM"; - private Integer maxLength; + private Integer maxLength; - public TrimFunction() { } + public TrimFunction() { + } - public TrimFunction(List children) throws SQLException { - super(children); - if (getStringExpression().getDataType().isFixedWidth()) { - maxLength = getStringExpression().getMaxLength(); - } + public TrimFunction(List children) throws SQLException { + super(children); + if (getStringExpression().getDataType().isFixedWidth()) { + maxLength = getStringExpression().getMaxLength(); } + } - private Expression getStringExpression() { - return children.get(0); - } + private Expression getStringExpression() { + return children.get(0); + } - @Override - public SortOrder getSortOrder() { - return children.get(0).getSortOrder(); - } + @Override + public SortOrder getSortOrder() { + return children.get(0).getSortOrder(); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getStringExpression().evaluate(tuple, ptr)) { - return false; - } - if (ptr.getLength() == 0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - byte[] string = ptr.get(); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - - SortOrder sortOrder = getSortOrder(); - int end = StringUtil.getFirstNonBlankCharIdxFromEnd(string, offset, length, sortOrder); - if (end == offset - 1) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return true; - } - int head = StringUtil.getFirstNonBlankCharIdxFromStart(string, offset, length, sortOrder); - ptr.set(string, head, end - head + 1); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getStringExpression().evaluate(tuple, ptr)) { + return false; } - - @Override - public Integer getMaxLength() { - return maxLength; + if (ptr.getLength() == 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } + byte[] string = ptr.get(); + int offset = ptr.getOffset(); + int length = ptr.getLength(); - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; + SortOrder sortOrder = getSortOrder(); + int end = StringUtil.getFirstNonBlankCharIdxFromEnd(string, offset, length, sortOrder); + if (end == offset - 1) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return true; } + int head = StringUtil.getFirstNonBlankCharIdxFromStart(string, offset, length, sortOrder); + ptr.set(string, head, end - head + 1); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public Integer getMaxLength() { + return maxLength; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public String getName() { + return NAME; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TruncFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TruncFunction.java index 6d223a78778..bbca49a7c39 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TruncFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/TruncFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,40 +31,33 @@ import org.apache.phoenix.schema.types.PVarchar; /** - * - * Function used to bucketize date/time values by truncating them to - * an even increment. Usage: + * Function used to bucketize date/time values by truncating them to an even increment. Usage: * {@code TRUNC(,<'day'|'hour'|'minute'|'second'|'millisecond'>,[]) } - * The integer multiplier is optional and is used to do rollups to a partial time unit (i.e. 10 minute rollup) - * The function returns a {@link org.apache.phoenix.schema.types.PDate} - * - * + * The integer multiplier is optional and is used to do rollups to a partial time unit (i.e. 10 + * minute rollup) The function returns a {@link org.apache.phoenix.schema.types.PDate} * @since 0.1 */ -@BuiltInFunction(name = TruncFunction.NAME, -nodeClass = FloorParseNode.class, -args = { - @Argument(allowedTypes={PTimestamp.class, PDecimal.class}), - @Argument(allowedTypes={PVarchar.class, PInteger.class}, defaultValue = "null", isConstant=true), - @Argument(allowedTypes={PInteger.class}, defaultValue="1", isConstant=true) - }, -classType = FunctionParseNode.FunctionClassType.ALIAS, -derivedFunctions = {FloorFunction.class} -) +@BuiltInFunction(name = TruncFunction.NAME, nodeClass = FloorParseNode.class, + args = { @Argument(allowedTypes = { PTimestamp.class, PDecimal.class }), + @Argument(allowedTypes = { PVarchar.class, PInteger.class }, defaultValue = "null", + isConstant = true), + @Argument(allowedTypes = { PInteger.class }, defaultValue = "1", isConstant = true) }, + classType = FunctionParseNode.FunctionClassType.ALIAS, + derivedFunctions = { FloorFunction.class }) public abstract class TruncFunction extends ScalarFunction { - - public static final String NAME = "TRUNC"; - public TruncFunction() {} - - public TruncFunction(List children) throws SQLException { - super(children); - } + public static final String NAME = "TRUNC"; + + public TruncFunction() { + } + + public TruncFunction(List children) throws SQLException { + super(children); + } + + @Override + public String getName() { + return NAME; + } - @Override - public String getName() { - return NAME; - } - - } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/UDFExpression.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/UDFExpression.java index dc75d3664b5..8bd90afc485 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/UDFExpression.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/UDFExpression.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,201 +41,206 @@ import org.apache.phoenix.schema.PNameFactory; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.thirdparty.com.google.common.collect.MapMaker; public class UDFExpression extends ScalarFunction { - - private static Configuration config = HBaseConfiguration.create(); - - private static final ConcurrentMap tenantIdSpecificCls = - new MapMaker().concurrencyLevel(3).weakValues().makeMap(); - - private static final ConcurrentMap pathSpecificCls = - new MapMaker().concurrencyLevel(3).weakValues().makeMap(); - - private PName tenantId; - private String functionClassName; - private String jarPath; - private ScalarFunction udfFunction; - - public UDFExpression() { - } - - public UDFExpression(List children,PFunction functionInfo) { - super(children); - this.tenantId = - functionInfo.getTenantId() == null ? PName.EMPTY_NAME : functionInfo.getTenantId(); - this.functionClassName = functionInfo.getClassName(); - this.jarPath = functionInfo.getJarPath(); - constructUDFFunction(); - } - - public UDFExpression(List children, PName tenantId, String functionClassName, - String jarPath, ScalarFunction udfFunction) { - super(children); - this.tenantId = tenantId; - this.functionClassName = functionClassName; - this.jarPath = jarPath; - if(udfFunction != null) { - this.udfFunction = udfFunction; - } else { - constructUDFFunction(); - } - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - return udfFunction.evaluate(tuple, ptr); - } - - @Override - public T accept(ExpressionVisitor visitor) { - return udfFunction.accept(visitor); - } - - @Override - public PDataType getDataType() { - return udfFunction.getDataType(); - } - - @Override - public String getName() { - return udfFunction.getName(); - } - - @Override - public OrderPreserving preservesOrder() { - return udfFunction.preservesOrder(); - } - - @Override - public KeyPart newKeyPart(KeyPart childPart) { - return udfFunction.newKeyPart(childPart); - } - @Override - public int getKeyFormationTraversalIndex() { - return udfFunction.getKeyFormationTraversalIndex(); - } - - public PName getTenantId() { - return tenantId; - } - - public String getFunctionClassName() { - return functionClassName; - } - - public String getJarPath() { - return jarPath; - } - - public ScalarFunction getUdfFunction() { - return udfFunction; - } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeString(output, tenantId.getString()); - WritableUtils.writeString(output, this.functionClassName); - if(this.jarPath == null) { - WritableUtils.writeString(output, ""); - } else { - WritableUtils.writeString(output, this.jarPath); - } - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - this.tenantId = PNameFactory.newName(WritableUtils.readString(input)); - this.functionClassName = WritableUtils.readString(input); - String str = WritableUtils.readString(input); - this.jarPath = str.length() == 0 ? null: str; - constructUDFFunction(); - } - - private void constructUDFFunction() { - try { - DynamicClassLoader classLoader = getClassLoader(this.tenantId, this.jarPath); - Class clazz = classLoader.loadClass(this.functionClassName); - Constructor constructor = clazz.getConstructor(List.class); - udfFunction = (ScalarFunction)constructor.newInstance(this.children); - } catch (ClassNotFoundException | NoSuchMethodException | SecurityException - | InstantiationException | IllegalAccessException | IllegalArgumentException - | InvocationTargetException e) { - throw new RuntimeException(e); - } - } - - public static DynamicClassLoader getClassLoader(final PName tenantId, final String jarPath) { - DynamicClassLoader cl = tenantIdSpecificCls.get(tenantId); - Path parent = null; - if (cl != null) return cl; - if(jarPath != null && !jarPath.isEmpty()) { - cl = pathSpecificCls.get(jarPath); - if (cl != null) return cl; - parent = getPathForParent(jarPath); - } - // Parse the DYNAMIC_JARS_DIR_KEY value as a Path if it's present in the configuration - Path allowedDynamicJarsPath = config.get(DYNAMIC_JARS_DIR_KEY) != null ? new Path(config.get(DYNAMIC_JARS_DIR_KEY)) : null; - // The case jarPath is not provided, or it is provided and the jar is inside hbase.dynamic.jars.dir - if (jarPath == null || jarPath.isEmpty() - || (allowedDynamicJarsPath != null && parent != null && parent.equals(allowedDynamicJarsPath))) { - cl = tenantIdSpecificCls.get(tenantId); - if (cl == null) { - cl = new DynamicClassLoader(config, UDFExpression.class.getClassLoader()); - } - // Cache class loader as a weak value, will be GC'ed when no reference left - DynamicClassLoader prev = tenantIdSpecificCls.putIfAbsent(tenantId, cl); - if (prev != null) { - cl = prev; - } - return cl; - } else { - //The case jarPath is provided as not part of DYNAMIC_JARS_DIR_KEY - //As per PHOENIX-4231, DYNAMIC_JARS_DIR_KEY is the only place where loading a udf jar is allowed - throw new SecurityException("Loading jars from " + jarPath + " is not allowed. The only location that is allowed is "+ config.get(DYNAMIC_JARS_DIR_KEY)); - } - } - - public static Path getPathForParent(String jarPath) { - Path path = new Path(jarPath); - if (jarPath.endsWith(".jar")) { - return path.getParent(); - } - return path; - } - - @VisibleForTesting - public static void setConfig(Configuration conf) { - config = conf; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!(obj instanceof UDFExpression)) { - return false; - } - UDFExpression that = (UDFExpression) obj; - if (!this.udfFunction.getName().equals(that.udfFunction.getName())) { - return false; - } - if (!this.udfFunction.getChildren().equals( - that.udfFunction.getChildren())) { - return false; - } - if (!functionClassName.equals(that.functionClassName)) { - return false; - } - if (!jarPath.equals(that.jarPath)) { - return false; - } - return true; - } + private static Configuration config = HBaseConfiguration.create(); + + private static final ConcurrentMap tenantIdSpecificCls = + new MapMaker().concurrencyLevel(3).weakValues().makeMap(); + + private static final ConcurrentMap pathSpecificCls = + new MapMaker().concurrencyLevel(3).weakValues().makeMap(); + + private PName tenantId; + private String functionClassName; + private String jarPath; + private ScalarFunction udfFunction; + + public UDFExpression() { + } + + public UDFExpression(List children, PFunction functionInfo) { + super(children); + this.tenantId = + functionInfo.getTenantId() == null ? PName.EMPTY_NAME : functionInfo.getTenantId(); + this.functionClassName = functionInfo.getClassName(); + this.jarPath = functionInfo.getJarPath(); + constructUDFFunction(); + } + + public UDFExpression(List children, PName tenantId, String functionClassName, + String jarPath, ScalarFunction udfFunction) { + super(children); + this.tenantId = tenantId; + this.functionClassName = functionClassName; + this.jarPath = jarPath; + if (udfFunction != null) { + this.udfFunction = udfFunction; + } else { + constructUDFFunction(); + } + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + return udfFunction.evaluate(tuple, ptr); + } + + @Override + public T accept(ExpressionVisitor visitor) { + return udfFunction.accept(visitor); + } + + @Override + public PDataType getDataType() { + return udfFunction.getDataType(); + } + + @Override + public String getName() { + return udfFunction.getName(); + } + + @Override + public OrderPreserving preservesOrder() { + return udfFunction.preservesOrder(); + } + + @Override + public KeyPart newKeyPart(KeyPart childPart) { + return udfFunction.newKeyPart(childPart); + } + + @Override + public int getKeyFormationTraversalIndex() { + return udfFunction.getKeyFormationTraversalIndex(); + } + + public PName getTenantId() { + return tenantId; + } + + public String getFunctionClassName() { + return functionClassName; + } + + public String getJarPath() { + return jarPath; + } + + public ScalarFunction getUdfFunction() { + return udfFunction; + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeString(output, tenantId.getString()); + WritableUtils.writeString(output, this.functionClassName); + if (this.jarPath == null) { + WritableUtils.writeString(output, ""); + } else { + WritableUtils.writeString(output, this.jarPath); + } + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + this.tenantId = PNameFactory.newName(WritableUtils.readString(input)); + this.functionClassName = WritableUtils.readString(input); + String str = WritableUtils.readString(input); + this.jarPath = str.length() == 0 ? null : str; + constructUDFFunction(); + } + + private void constructUDFFunction() { + try { + DynamicClassLoader classLoader = getClassLoader(this.tenantId, this.jarPath); + Class clazz = classLoader.loadClass(this.functionClassName); + Constructor constructor = clazz.getConstructor(List.class); + udfFunction = (ScalarFunction) constructor.newInstance(this.children); + } catch (ClassNotFoundException | NoSuchMethodException | SecurityException + | InstantiationException | IllegalAccessException | IllegalArgumentException + | InvocationTargetException e) { + throw new RuntimeException(e); + } + } + + public static DynamicClassLoader getClassLoader(final PName tenantId, final String jarPath) { + DynamicClassLoader cl = tenantIdSpecificCls.get(tenantId); + Path parent = null; + if (cl != null) return cl; + if (jarPath != null && !jarPath.isEmpty()) { + cl = pathSpecificCls.get(jarPath); + if (cl != null) return cl; + parent = getPathForParent(jarPath); + } + // Parse the DYNAMIC_JARS_DIR_KEY value as a Path if it's present in the configuration + Path allowedDynamicJarsPath = + config.get(DYNAMIC_JARS_DIR_KEY) != null ? new Path(config.get(DYNAMIC_JARS_DIR_KEY)) : null; + // The case jarPath is not provided, or it is provided and the jar is inside + // hbase.dynamic.jars.dir + if ( + jarPath == null || jarPath.isEmpty() + || (allowedDynamicJarsPath != null && parent != null + && parent.equals(allowedDynamicJarsPath)) + ) { + cl = tenantIdSpecificCls.get(tenantId); + if (cl == null) { + cl = new DynamicClassLoader(config, UDFExpression.class.getClassLoader()); + } + // Cache class loader as a weak value, will be GC'ed when no reference left + DynamicClassLoader prev = tenantIdSpecificCls.putIfAbsent(tenantId, cl); + if (prev != null) { + cl = prev; + } + return cl; + } else { + // The case jarPath is provided as not part of DYNAMIC_JARS_DIR_KEY + // As per PHOENIX-4231, DYNAMIC_JARS_DIR_KEY is the only place where loading a udf jar is + // allowed + throw new SecurityException( + "Loading jars from " + jarPath + " is not allowed. The only location that is allowed is " + + config.get(DYNAMIC_JARS_DIR_KEY)); + } + } + + public static Path getPathForParent(String jarPath) { + Path path = new Path(jarPath); + if (jarPath.endsWith(".jar")) { + return path.getParent(); + } + return path; + } + + @VisibleForTesting + public static void setConfig(Configuration conf) { + config = conf; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof UDFExpression)) { + return false; + } + UDFExpression that = (UDFExpression) obj; + if (!this.udfFunction.getName().equals(that.udfFunction.getName())) { + return false; + } + if (!this.udfFunction.getChildren().equals(that.udfFunction.getChildren())) { + return false; + } + if (!functionClassName.equals(that.functionClassName)) { + return false; + } + if (!jarPath.equals(that.jarPath)) { + return false; + } + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/UpperFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/UpperFunction.java index 56a228c6dd8..0a6f2f7aab0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/UpperFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/UpperFunction.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,73 +31,74 @@ import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.i18n.LocaleUtils; -@FunctionParseNode.BuiltInFunction(name=UpperFunction.NAME, args={ - @FunctionParseNode.Argument(allowedTypes={PVarchar.class}), - @FunctionParseNode.Argument(allowedTypes={PVarchar.class}, defaultValue="null", isConstant=true)} ) +@FunctionParseNode.BuiltInFunction(name = UpperFunction.NAME, + args = { @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }), + @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }, defaultValue = "null", + isConstant = true) }) public class UpperFunction extends ScalarFunction { - public static final String NAME = "UPPER"; + public static final String NAME = "UPPER"; - private Locale locale; + private Locale locale; - public UpperFunction() { - } + public UpperFunction() { + } - public UpperFunction(List children) throws SQLException { - super(children); - initialize(); - } + public UpperFunction(List children) throws SQLException { + super(children); + initialize(); + } - private void initialize() { - if (children.size() > 1) { - String localeISOCode = getLiteralValue(1, String.class); - locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode); - } + private void initialize() { + if (children.size() > 1) { + String localeISOCode = getLiteralValue(1, String.class); + locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode); + } + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + initialize(); + } + + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + if (!getStrExpression().evaluate(tuple, ptr)) { + return false; } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - initialize(); + String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, getStrExpression().getSortOrder()); + if (sourceStr == null) { + return true; } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - if (!getStrExpression().evaluate(tuple, ptr)) { - return false; - } + String resultStr = locale == null ? sourceStr.toUpperCase() : sourceStr.toUpperCase(locale); - String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr, getStrExpression().getSortOrder()); - if (sourceStr == null) { - return true; - } + ptr.set(PVarchar.INSTANCE.toBytes(resultStr)); + return true; + } - String resultStr = locale == null ? sourceStr.toUpperCase() : sourceStr.toUpperCase(locale); + @Override + public PDataType getDataType() { + return getStrExpression().getDataType(); + } - ptr.set(PVarchar.INSTANCE.toBytes(resultStr)); - return true; - } + @Override + public Integer getMaxLength() { + return getStrExpression().getMaxLength(); + } - @Override - public PDataType getDataType() { - return getStrExpression().getDataType(); - } + @Override + public boolean isNullable() { + return getStrExpression().isNullable(); + } - @Override - public Integer getMaxLength() { - return getStrExpression().getMaxLength(); - } + @Override + public String getName() { + return NAME; + } - @Override - public boolean isNullable() { - return getStrExpression().isNullable(); - } - - @Override - public String getName() { - return NAME; - } - - private Expression getStrExpression() { - return children.get(0); - } + private Expression getStrExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/WeekFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/WeekFunction.java index f3032bfd297..3df0d193fc6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/WeekFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/WeekFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,53 +32,51 @@ import org.joda.time.chrono.GJChronology; /** - * - * Implementation of the WEEK() buildin. Input Date/Timestamp. - * Returns an integer from 1 to 53 representing the week of the year in date - * + * Implementation of the WEEK() buildin. Input Date/Timestamp. Returns an integer from 1 to 53 + * representing the week of the year in date */ -@BuiltInFunction(name=WeekFunction.NAME, -args={@Argument(allowedTypes={PTimestamp.class})}) +@BuiltInFunction(name = WeekFunction.NAME, + args = { @Argument(allowedTypes = { PTimestamp.class }) }) public class WeekFunction extends DateScalarFunction { - public static final String NAME = "WEEK"; + public static final String NAME = "WEEK"; - public WeekFunction() { - } + public WeekFunction() { + } - public WeekFunction(List children) throws SQLException { - super(children); - } + public WeekFunction(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getChildExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if ( ptr.getLength() == 0) { - return true; //means null - } - long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); - DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); - int week = dt.getWeekOfWeekyear(); - PDataType returnType = getDataType(); - byte[] byteValue = new byte[returnType.getByteSize()]; - returnType.getCodec().encodeInt(week, byteValue, 0); - ptr.set(byteValue); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getChildExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + return true; // means null } + long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); + DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); + int week = dt.getWeekOfWeekyear(); + PDataType returnType = getDataType(); + byte[] byteValue = new byte[returnType.getByteSize()]; + returnType.getCodec().encodeInt(week, byteValue, 0); + ptr.set(byteValue); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - private Expression getChildExpression() { - return children.get(0); - } + @Override + public String getName() { + return NAME; + } + + private Expression getChildExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/YearFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/YearFunction.java index ee10eadb467..935c00cf1f0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/YearFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/function/YearFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,52 +32,50 @@ import org.joda.time.chrono.GJChronology; /** - * * Implementation of the Year() buildin. Input Date/Timestamp. - * */ -@BuiltInFunction(name=YearFunction.NAME, -args={@Argument(allowedTypes={PTimestamp.class})}) +@BuiltInFunction(name = YearFunction.NAME, + args = { @Argument(allowedTypes = { PTimestamp.class }) }) public class YearFunction extends DateScalarFunction { - public static final String NAME = "YEAR"; + public static final String NAME = "YEAR"; - public YearFunction() { - } + public YearFunction() { + } - public YearFunction(List children) throws SQLException { - super(children); - } + public YearFunction(List children) throws SQLException { + super(children); + } - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - Expression expression = getChildExpression(); - if (!expression.evaluate(tuple, ptr)) { - return false; - } - if ( ptr.getLength() == 0) { - return true; //means null - } - long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); - DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); - int year = dt.getYear(); - PDataType returnType = getDataType(); - byte[] byteValue = new byte[returnType.getByteSize()]; - returnType.getCodec().encodeInt(year, byteValue, 0); - ptr.set(byteValue); - return true; + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + Expression expression = getChildExpression(); + if (!expression.evaluate(tuple, ptr)) { + return false; } - - @Override - public PDataType getDataType() { - return PInteger.INSTANCE; + if (ptr.getLength() == 0) { + return true; // means null } + long dateTime = inputCodec.decodeLong(ptr, expression.getSortOrder()); + DateTime dt = new DateTime(dateTime, GJChronology.getInstanceUTC()); + int year = dt.getYear(); + PDataType returnType = getDataType(); + byte[] byteValue = new byte[returnType.getByteSize()]; + returnType.getCodec().encodeInt(year, byteValue, 0); + ptr.set(byteValue); + return true; + } - @Override - public String getName() { - return NAME; - } + @Override + public PDataType getDataType() { + return PInteger.INSTANCE; + } - private Expression getChildExpression() { - return children.get(0); - } + @Override + public String getName() { + return NAME; + } + + private Expression getChildExpression() { + return children.get(0); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/rewrite/RowValueConstructorExpressionRewriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/rewrite/RowValueConstructorExpressionRewriter.java index fc0cafde758..468788a57cb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/rewrite/RowValueConstructorExpressionRewriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/rewrite/RowValueConstructorExpressionRewriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,40 +15,39 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.rewrite; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + import org.apache.phoenix.expression.CoerceExpression; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.RowValueConstructorExpression; import org.apache.phoenix.schema.SortOrder; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - public class RowValueConstructorExpressionRewriter { - static RowValueConstructorExpressionRewriter singleton = null; + static RowValueConstructorExpressionRewriter singleton = null; - public static RowValueConstructorExpressionRewriter getSingleton() { - if (singleton == null) { - singleton = new RowValueConstructorExpressionRewriter(); - } - return singleton; + public static RowValueConstructorExpressionRewriter getSingleton() { + if (singleton == null) { + singleton = new RowValueConstructorExpressionRewriter(); } + return singleton; + } - public RowValueConstructorExpression rewriteAllChildrenAsc( - RowValueConstructorExpression rvcExpression) throws SQLException { - List replacementChildren = new ArrayList<>(rvcExpression.getChildren().size()); - for (int i = 0; i < rvcExpression.getChildren().size(); i++) { - Expression child = rvcExpression.getChildren().get(i); - if (child.getSortOrder() == SortOrder.DESC) { - //As The KeySlot visitor has not been setup for InvertFunction need to Use Coerce - child = CoerceExpression.create(child, child.getDataType(), SortOrder.ASC, null); - } - replacementChildren.add(child); - } - return rvcExpression.clone(replacementChildren); + public RowValueConstructorExpression + rewriteAllChildrenAsc(RowValueConstructorExpression rvcExpression) throws SQLException { + List replacementChildren = new ArrayList<>(rvcExpression.getChildren().size()); + for (int i = 0; i < rvcExpression.getChildren().size(); i++) { + Expression child = rvcExpression.getChildren().get(i); + if (child.getSortOrder() == SortOrder.DESC) { + // As The KeySlot visitor has not been setup for InvertFunction need to Use Coerce + child = CoerceExpression.create(child, child.getDataType(), SortOrder.ASC, null); + } + replacementChildren.add(child); } + return rvcExpression.clone(replacementChildren); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/CommonComparisonExpressionUtils.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/CommonComparisonExpressionUtils.java index af739b720dd..3d66cf5d245 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/CommonComparisonExpressionUtils.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/CommonComparisonExpressionUtils.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.util.bson; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.bson.BsonArray; import org.bson.BsonBinary; @@ -29,15 +29,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; - /** * Common Util functions to help retrieve BSON Document values based on the given field expressions. */ public class CommonComparisonExpressionUtils { private static final Logger LOGGER = - LoggerFactory.getLogger(CommonComparisonExpressionUtils.class); + LoggerFactory.getLogger(CommonComparisonExpressionUtils.class); /** * Comparison operators supported for the Document value comparisons. @@ -52,18 +50,17 @@ public enum CompareOp { } /** - * Retrieve the value associated with the document field key. The field key can represent - * any top level or nested fields within the document. The caller should use "." notation for - * accessing nested document elements and "[n]" notation for accessing nested array elements. Top - * level fields do not require any additional character. - * + * Retrieve the value associated with the document field key. The field key can represent any top + * level or nested fields within the document. The caller should use "." notation for accessing + * nested document elements and "[n]" notation for accessing nested array elements. Top level + * fields do not require any additional character. * @param documentFieldKey The document field key for which the value is returned. - * @param rawBsonDocument The document from which to find the value. - * @return If the field key exists in the document, return the corresponding value. Else - * return null. + * @param rawBsonDocument The document from which to find the value. + * @return If the field key exists in the document, return the corresponding value. Else return + * null. */ public static BsonValue getFieldFromDocument(final String documentFieldKey, - final BsonDocument rawBsonDocument) { + final BsonDocument rawBsonDocument) { if (documentFieldKey.contains(".") || documentFieldKey.contains("[")) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < documentFieldKey.length(); i++) { @@ -91,23 +88,23 @@ public static BsonValue getFieldFromDocument(final String documentFieldKey, /** * Retrieve the value associated with the nested field key within the document. - * - * @param value Value of the parent data structure (document or array) which is used to search - * nested elements from. - * @param idx Index used to track which part of the field key has been covered so far. + * @param value Value of the parent data structure (document or array) which is used to + * search nested elements from. + * @param idx Index used to track which part of the field key has been covered so + * far. * @param documentFieldKey The document field key for which the value is returned. - * @return If the field key exists in the document, return the corresponding value. Else - * return null. + * @return If the field key exists in the document, return the corresponding value. Else return + * null. */ public static BsonValue getNestedFieldVal(BsonValue value, int idx, - final String documentFieldKey) { + final String documentFieldKey) { if (idx == documentFieldKey.length()) { return value; } int curIdx = idx; if (documentFieldKey.charAt(curIdx) == '.') { BsonDocument nestedDocument = - value != null && value.isDocument() ? (BsonDocument) value : null; + value != null && value.isDocument() ? (BsonDocument) value : null; if (nestedDocument == null) { LOGGER.warn("Incorrect access. Should have found nested map for value: {}", value); return null; @@ -142,8 +139,8 @@ public static BsonValue getNestedFieldVal(BsonValue value, int idx, } if (arrayIdx >= nestedArray.size()) { LOGGER.warn( - "Incorrect access. Nested list size {} is less than attempted index access at {}", - nestedArray.size(), arrayIdx); + "Incorrect access. Nested list size {} is less than attempted index access at {}", + nestedArray.size(), arrayIdx); return null; } BsonValue valueAtIdx = nestedArray.get(arrayIdx); @@ -153,25 +150,24 @@ public static BsonValue getNestedFieldVal(BsonValue value, int idx, return getNestedFieldVal(valueAtIdx, curIdx, documentFieldKey); } LOGGER.warn("This is erroneous case. getNestedFieldVal should not be used for " - + "top level document fields"); + + "top level document fields"); return null; } /** - * Compare the given Bson values. All values of the CompareOp enum are supported as - * comparison operators. For the comparison to be successful, both the value and the - * data type of the LHS and RHS operands must be considered. - * + * Compare the given Bson values. All values of the CompareOp enum are supported as comparison + * operators. For the comparison to be successful, both the value and the data type of the LHS and + * RHS operands must be considered. * @param lhsOperand LHS operand to be compared with RHS operand. * @param rhsOperand RHS operand. - * @param operator Comparison operator used to compare LHS and RHS operands. + * @param operator Comparison operator used to compare LHS and RHS operands. * @return True if the comparison of LHS with RHS is successful. */ public static boolean compareValues(final BsonValue lhsOperand, final BsonValue rhsOperand, - final CompareOp operator) { + final CompareOp operator) { Preconditions.checkNotNull(operator, "Comparison operator should not be null"); Preconditions.checkNotNull(lhsOperand, - "LHS operand for the Comparison operation should not be null"); + "LHS operand for the Comparison operation should not be null"); if (operator == CompareOp.EQUALS) { return lhsOperand.equals(rhsOperand); @@ -180,11 +176,11 @@ public static boolean compareValues(final BsonValue lhsOperand, final BsonValue } Preconditions.checkNotNull(rhsOperand, - "RHS operand for the Comparison operation should not be null"); + "RHS operand for the Comparison operation should not be null"); if (lhsOperand.isString() && rhsOperand.isString()) { int compare = - ((BsonString) lhsOperand).getValue().compareTo(((BsonString) rhsOperand).getValue()); + ((BsonString) lhsOperand).getValue().compareTo(((BsonString) rhsOperand).getValue()); switch (operator) { case LESS: return compare < 0; @@ -196,25 +192,25 @@ public static boolean compareValues(final BsonValue lhsOperand, final BsonValue return compare >= 0; } } - if ((lhsOperand.isNumber() || lhsOperand.isDecimal128()) && (rhsOperand.isNumber() - || rhsOperand.isDecimal128())) { + if ( + (lhsOperand.isNumber() || lhsOperand.isDecimal128()) + && (rhsOperand.isNumber() || rhsOperand.isDecimal128()) + ) { switch (operator) { case LESS: - return ((BsonNumber) lhsOperand).doubleValue() < ((BsonNumber) rhsOperand) - .doubleValue(); + return ((BsonNumber) lhsOperand).doubleValue() < ((BsonNumber) rhsOperand).doubleValue(); case LESS_OR_EQUAL: - return ((BsonNumber) lhsOperand).doubleValue() <= ((BsonNumber) rhsOperand) - .doubleValue(); + return ((BsonNumber) lhsOperand).doubleValue() <= ((BsonNumber) rhsOperand).doubleValue(); case GREATER: - return ((BsonNumber) lhsOperand).doubleValue() > ((BsonNumber) rhsOperand) - .doubleValue(); + return ((BsonNumber) lhsOperand).doubleValue() > ((BsonNumber) rhsOperand).doubleValue(); case GREATER_OR_EQUAL: - return ((BsonNumber) lhsOperand).doubleValue() >= ((BsonNumber) rhsOperand) - .doubleValue(); + return ((BsonNumber) lhsOperand).doubleValue() >= ((BsonNumber) rhsOperand).doubleValue(); } } - if (lhsOperand.isBinary() && rhsOperand.isBinary() - && ((BsonBinary) lhsOperand).getType() == ((BsonBinary) rhsOperand).getType()) { + if ( + lhsOperand.isBinary() && rhsOperand.isBinary() + && ((BsonBinary) lhsOperand).getType() == ((BsonBinary) rhsOperand).getType() + ) { byte[] b1 = ((BsonBinary) lhsOperand).getData(); byte[] b2 = ((BsonBinary) rhsOperand).getData(); switch (operator) { @@ -231,22 +227,17 @@ public static boolean compareValues(final BsonValue lhsOperand, final BsonValue if (lhsOperand.isDateTime() && rhsOperand.isDateTime()) { switch (operator) { case LESS: - return ((BsonDateTime) lhsOperand).getValue() < ((BsonDateTime) rhsOperand) - .getValue(); + return ((BsonDateTime) lhsOperand).getValue() < ((BsonDateTime) rhsOperand).getValue(); case LESS_OR_EQUAL: - return ((BsonDateTime) lhsOperand).getValue() <= ((BsonDateTime) rhsOperand) - .getValue(); + return ((BsonDateTime) lhsOperand).getValue() <= ((BsonDateTime) rhsOperand).getValue(); case GREATER: - return ((BsonDateTime) lhsOperand).getValue() > ((BsonDateTime) rhsOperand) - .getValue(); + return ((BsonDateTime) lhsOperand).getValue() > ((BsonDateTime) rhsOperand).getValue(); case GREATER_OR_EQUAL: - return ((BsonDateTime) lhsOperand).getValue() >= ((BsonDateTime) rhsOperand) - .getValue(); + return ((BsonDateTime) lhsOperand).getValue() >= ((BsonDateTime) rhsOperand).getValue(); } } LOGGER.error("Expected comparison for {} is not of type String, Number, Binary" - + " or DateTime. LhsOperand: {} , RhsOperand: {}", operator, lhsOperand, - rhsOperand); + + " or DateTime. LhsOperand: {} , RhsOperand: {}", operator, lhsOperand, rhsOperand); return false; } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/DocumentComparisonExpressionUtils.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/DocumentComparisonExpressionUtils.java index 08c5e9d66a0..2361c22429d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/DocumentComparisonExpressionUtils.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/DocumentComparisonExpressionUtils.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.util.bson; import java.util.List; import java.util.Map; import java.util.Set; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.bson.BsonArray; import org.bson.BsonBoolean; import org.bson.BsonDocument; @@ -29,15 +29,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - /** * Document style condition expression evaluation support. */ public class DocumentComparisonExpressionUtils { private static final Logger LOGGER = - LoggerFactory.getLogger(DocumentComparisonExpressionUtils.class); + LoggerFactory.getLogger(DocumentComparisonExpressionUtils.class); private static final String EXISTS_OP = "$exists"; private static final String EQUALS_OP = "$eq"; @@ -48,25 +46,24 @@ public class DocumentComparisonExpressionUtils { private static final String GREATER_THAN_OR_EQUALS_OP = "$gte"; public static boolean evaluateConditionExpression(final BsonDocument document, - final BsonDocument conditionExpression) { + final BsonDocument conditionExpression) { if (document == null || conditionExpression == null) { - LOGGER.warn( - "Document and/or Condition Expression document are empty. Document: {}, " - + "conditionExpression: {}", document, conditionExpression); + LOGGER.warn("Document and/or Condition Expression document are empty. Document: {}, " + + "conditionExpression: {}", document, conditionExpression); return false; } return evaluateExpression(document, conditionExpression); } private static boolean evaluateExpression(final BsonDocument document, - final BsonDocument conditionExpression) { + final BsonDocument conditionExpression) { final String firstFieldKey = conditionExpression.getFirstKey(); Preconditions.checkArgument(conditionExpression.size() == 1, - "Expected num of document entries is 1"); + "Expected num of document entries is 1"); if (!firstFieldKey.startsWith("$")) { BsonValue bsonValue = conditionExpression.get(firstFieldKey); Preconditions.checkArgument(bsonValue instanceof BsonDocument, - "Expected type for Bson value is Document for field based condition operation"); + "Expected type for Bson value is Document for field based condition operation"); BsonDocument bsonDocument = (BsonDocument) bsonValue; if (bsonDocument.containsKey(EXISTS_OP)) { return isExists(document, conditionExpression); @@ -90,7 +87,7 @@ private static boolean evaluateExpression(final BsonDocument document, case "$or": { BsonValue bsonValue = conditionExpression.get(firstFieldKey); Preconditions.checkArgument(bsonValue instanceof BsonArray, - "Expected type for Bson value is Array for $or operator"); + "Expected type for Bson value is Array for $or operator"); BsonArray bsonArray = (BsonArray) bsonValue; List bsonValues = bsonArray.getValues(); for (BsonValue value : bsonValues) { @@ -103,7 +100,7 @@ private static boolean evaluateExpression(final BsonDocument document, case "$and": { BsonValue bsonValue = conditionExpression.get(firstFieldKey); Preconditions.checkArgument(bsonValue instanceof BsonArray, - "Expected type for Bson value is Array for $and operator"); + "Expected type for Bson value is Array for $and operator"); BsonArray bsonArray = (BsonArray) bsonValue; List bsonValues = bsonArray.getValues(); for (BsonValue value : bsonValues) { @@ -121,19 +118,19 @@ private static boolean evaluateExpression(final BsonDocument document, } private static boolean isExists(final BsonDocument document, - final BsonDocument conditionExpression) { + final BsonDocument conditionExpression) { Set> entrySet = conditionExpression.entrySet(); - Preconditions.checkArgument(entrySet.size() == 1, "Expected entry for the exists operation" - + " is 1"); + Preconditions.checkArgument(entrySet.size() == 1, + "Expected entry for the exists operation" + " is 1"); for (Map.Entry bsonValueEntry : entrySet) { String fieldKey = bsonValueEntry.getKey(); BsonValue bsonValue = bsonValueEntry.getValue(); Preconditions.checkArgument(bsonValue instanceof BsonDocument, - "Expected type for Bson value is Document for exists operation"); + "Expected type for Bson value is Document for exists operation"); BsonDocument bsonDocument = (BsonDocument) bsonValue; BsonValue existsValue = bsonDocument.get(EXISTS_OP); Preconditions.checkArgument(existsValue instanceof BsonBoolean, - "Expected type for $exists value is boolean"); + "Expected type for $exists value is boolean"); BsonBoolean existsValBoolean = (BsonBoolean) existsValue; if (existsValBoolean.getValue()) { return exists(fieldKey, document); @@ -145,14 +142,13 @@ private static boolean isExists(final BsonDocument document, } /** - * Get the document style comparison operator based on the Comparison operator enum value - * used to differentiate all comparisons. - * + * Get the document style comparison operator based on the Comparison operator enum value used to + * differentiate all comparisons. * @param compareOp The comparison operator enum. * @return The document style comparison operator constant value. */ - private static String getCompareOperator( - final CommonComparisonExpressionUtils.CompareOp compareOp) { + private static String + getCompareOperator(final CommonComparisonExpressionUtils.CompareOp compareOp) { if (compareOp == null) { return null; } @@ -183,160 +179,121 @@ private static String getCompareOperator( /** * Common utility for performing comparison with document style comparison expression. - * - * @param document The document used for comparison. + * @param document The document used for comparison. * @param conditionExpression Condition Expression Document. - * @param compareOp The comparison operator. - * @return True if the field provided in the condition expression has value comparable to - * the value provided in the condition expression as per the given comparison operator. + * @param compareOp The comparison operator. + * @return True if the field provided in the condition expression has value comparable to the + * value provided in the condition expression as per the given comparison operator. */ private static boolean compare(final BsonDocument document, - final BsonDocument conditionExpression, - final CommonComparisonExpressionUtils.CompareOp compareOp) { + final BsonDocument conditionExpression, + final CommonComparisonExpressionUtils.CompareOp compareOp) { Set> entrySet = conditionExpression.entrySet(); Preconditions.checkArgument(entrySet.size() == 1, - "Expected entry for the " + compareOp + " operation is 1"); + "Expected entry for the " + compareOp + " operation is 1"); for (Map.Entry bsonValueEntry : entrySet) { String fieldKey = bsonValueEntry.getKey(); BsonValue bsonValue = bsonValueEntry.getValue(); Preconditions.checkArgument(bsonValue instanceof BsonDocument, - "Expected type for Bson value is Document for " + compareOp + " operation"); + "Expected type for Bson value is Document for " + compareOp + " operation"); BsonDocument bsonDocument = (BsonDocument) bsonValue; BsonValue compareValue = bsonDocument.get(getCompareOperator(compareOp)); BsonValue topLevelValue = document.get(fieldKey); - BsonValue actualValue = topLevelValue != null ? - topLevelValue : - CommonComparisonExpressionUtils.getFieldFromDocument(fieldKey, document); - return actualValue != null && CommonComparisonExpressionUtils.compareValues(actualValue, - compareValue, compareOp); + BsonValue actualValue = topLevelValue != null + ? topLevelValue + : CommonComparisonExpressionUtils.getFieldFromDocument(fieldKey, document); + return actualValue != null + && CommonComparisonExpressionUtils.compareValues(actualValue, compareValue, compareOp); } return false; } /** - * Returns true if the field provided in the condition expression has value greater than - * or equals to the value provided in the condition expression. - * Condition Expression format: - * { - * : { - * "$gte": - * } - * } - * - * @param document The document used for comparison. + * Returns true if the field provided in the condition expression has value greater than or equals + * to the value provided in the condition expression. Condition Expression format: { : { + * "$gte": } } + * @param document The document used for comparison. * @param conditionExpression Condition Expression Document. - * @return True if the field provided in the condition expression has value greater than - * or equals to the value provided in the condition expression. + * @return True if the field provided in the condition expression has value greater than or equals + * to the value provided in the condition expression. */ private static boolean greaterThanOrEquals(final BsonDocument document, - final BsonDocument conditionExpression) { + final BsonDocument conditionExpression) { return compare(document, conditionExpression, - CommonComparisonExpressionUtils.CompareOp.GREATER_OR_EQUAL); + CommonComparisonExpressionUtils.CompareOp.GREATER_OR_EQUAL); } /** - * Returns true if the field provided in the condition expression has value greater than - * the value provided in the condition expression. - * Condition Expression format: - * { - * : { - * "$gt": - * } - * } - * - * @param document The document used for comparison. + * Returns true if the field provided in the condition expression has value greater than the value + * provided in the condition expression. Condition Expression format: { : { "$gt": + * } } + * @param document The document used for comparison. * @param conditionExpression Condition Expression Document. - * @return True if the field provided in the condition expression has value greater than - * the value provided in the condition expression. + * @return True if the field provided in the condition expression has value greater than the value + * provided in the condition expression. */ private static boolean greaterThan(final BsonDocument document, - final BsonDocument conditionExpression) { + final BsonDocument conditionExpression) { return compare(document, conditionExpression, - CommonComparisonExpressionUtils.CompareOp.GREATER); + CommonComparisonExpressionUtils.CompareOp.GREATER); } /** - * Returns true if the field provided in the condition expression has value less than - * or equals to the value provided in the condition expression. - * Condition Expression format: - * { - * : { - * "$lte": - * } - * } - * - * @param document The document used for comparison. + * Returns true if the field provided in the condition expression has value less than or equals to + * the value provided in the condition expression. Condition Expression format: { : { + * "$lte": } } + * @param document The document used for comparison. * @param conditionExpression Condition Expression Document. - * @return True if the field provided in the condition expression has value less than - * or equals to the value provided in the condition expression. + * @return True if the field provided in the condition expression has value less than or equals to + * the value provided in the condition expression. */ private static boolean lessThanOrEquals(final BsonDocument document, - final BsonDocument conditionExpression) { + final BsonDocument conditionExpression) { return compare(document, conditionExpression, - CommonComparisonExpressionUtils.CompareOp.LESS_OR_EQUAL); + CommonComparisonExpressionUtils.CompareOp.LESS_OR_EQUAL); } /** - * Returns true if the field provided in the condition expression has value less than - * the value provided in the condition expression. - * Condition Expression format: - * { - * : { - * "$lt": - * } - * } - * - * @param document The document used for comparison. + * Returns true if the field provided in the condition expression has value less than the value + * provided in the condition expression. Condition Expression format: { : { "$lt": + * } } + * @param document The document used for comparison. * @param conditionExpression Condition Expression Document. - * @return True if the field provided in the condition expression has value less than - * the value provided in the condition expression. + * @return True if the field provided in the condition expression has value less than the value + * provided in the condition expression. */ private static boolean lessThan(final BsonDocument document, - final BsonDocument conditionExpression) { - return compare(document, conditionExpression, - CommonComparisonExpressionUtils.CompareOp.LESS); + final BsonDocument conditionExpression) { + return compare(document, conditionExpression, CommonComparisonExpressionUtils.CompareOp.LESS); } /** * Returns true if the field provided in the condition expression has value equal to the value - * provided in the condition expression. - * Condition Expression format: - * { - * : { - * "$eq": - * } - * } - * - * @param document The document used for comparison. + * provided in the condition expression. Condition Expression format: { : { "$eq": + * } } + * @param document The document used for comparison. * @param conditionExpression Condition Expression Document. - * @return if the field provided in the condition expression has value equal to the value - * provided in the condition expression. + * @return if the field provided in the condition expression has value equal to the value provided + * in the condition expression. */ private static boolean equals(final BsonDocument document, - final BsonDocument conditionExpression) { - return compare(document, conditionExpression, - CommonComparisonExpressionUtils.CompareOp.EQUALS); + final BsonDocument conditionExpression) { + return compare(document, conditionExpression, CommonComparisonExpressionUtils.CompareOp.EQUALS); } /** - * Returns true if the field provided in the condition expression has value not equal to the - * value provided in the condition expression. - * Condition Expression format: - * { - * : { - * "$ne": - * } - * } - * - * @param document The document used for comparison. + * Returns true if the field provided in the condition expression has value not equal to the value + * provided in the condition expression. Condition Expression format: { : { "$ne": + * } } + * @param document The document used for comparison. * @param conditionExpression Condition Expression Document. * @return if the field provided in the condition expression has value not equal to the value - * provided in the condition expression. + * provided in the condition expression. */ private static boolean notEquals(final BsonDocument document, - final BsonDocument conditionExpression) { + final BsonDocument conditionExpression) { return compare(document, conditionExpression, - CommonComparisonExpressionUtils.CompareOp.NOT_EQUALS); + CommonComparisonExpressionUtils.CompareOp.NOT_EQUALS); } private static boolean exists(final String documentField, final BsonDocument bsonDocument) { diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/SQLComparisonExpressionUtils.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/SQLComparisonExpressionUtils.java index 96d2bb534c4..138e3512876 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/SQLComparisonExpressionUtils.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/SQLComparisonExpressionUtils.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.util.bson; +import java.util.HashSet; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + import org.bson.BsonDocument; import org.bson.BsonValue; import org.bson.RawBsonDocument; @@ -25,11 +29,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.HashSet; -import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - /** * SQL style condition expression evaluation support. */ @@ -38,12 +37,12 @@ public class SQLComparisonExpressionUtils { private static final Logger LOGGER = LoggerFactory.getLogger(SQLComparisonExpressionUtils.class); /** - * All supported operators. Used to parse the input string and identify how many of the - * operators are in use and accordingly performs string conversions using individual + * All supported operators. Used to parse the input string and identify how many of the operators + * are in use and accordingly performs string conversions using individual * pattern-matcher-replace. */ private static final String ALL_SUPPORTED_OPS = - "\\b(field_not_exists|field_exists|BETWEEN|IN|AND|OR|NOT)\\b|<=|>=|!=|==|=|<>|<|>"; + "\\b(field_not_exists|field_exists|BETWEEN|IN|AND|OR|NOT)\\b|<=|>=|!=|==|=|<>|<|>"; private static final Pattern ALL_SUPPORTED_OPS_PATTERN = Pattern.compile(ALL_SUPPORTED_OPS); private static final String FIELD_NOT_EXISTS = "field_not_exists\\(([^)]+)\\)"; @@ -57,7 +56,7 @@ public class SQLComparisonExpressionUtils { private static final String GREATER_THAN = "\\b([\\w.\\[\\]]+)\\s*>\\s*([#:$]*\\w+)"; private static final String GREATER_THAN_OR_EQUALS = "\\b([\\w.\\[\\]]+)\\s*>=\\s*([#:$]*\\w+)"; private static final String BETWEEN = - "\\b([\\w.\\[\\]]+)\\s+BETWEEN\\s+([#:$]*\\w+)\\s+AND\\s+([#:$]*\\w+)"; + "\\b([\\w.\\[\\]]+)\\s+BETWEEN\\s+([#:$]*\\w+)\\s+AND\\s+([#:$]*\\w+)"; private static final String IN = "\\b([\\w.\\[\\]]+)\\s+IN\\s+\\(([^)]+)\\)"; private static final String AND = "\\bAND\\b"; private static final String OR = "\\bOR\\b"; @@ -73,7 +72,7 @@ public class SQLComparisonExpressionUtils { private static final Pattern LESS_THAN_OR_EQUALS_PATTERN = Pattern.compile(LESS_THAN_OR_EQUALS); private static final Pattern GREATER_THAN_PATTERN = Pattern.compile(GREATER_THAN); private static final Pattern GREATER_THAN_OR_EQUALS_PATTERN = - Pattern.compile(GREATER_THAN_OR_EQUALS); + Pattern.compile(GREATER_THAN_OR_EQUALS); private static final Pattern BETWEEN_PATTERN = Pattern.compile(BETWEEN); private static final Pattern IN_PATTERN = Pattern.compile(IN); private static final Pattern AND_PATTERN = Pattern.compile(AND); @@ -98,7 +97,7 @@ public class SQLComparisonExpressionUtils { private final BsonDocument comparisonValuesDocument; public SQLComparisonExpressionUtils(RawBsonDocument rawBsonDocument, - BsonDocument comparisonValuesDocument) { + BsonDocument comparisonValuesDocument) { this.rawBsonDocument = rawBsonDocument; this.comparisonValuesDocument = comparisonValuesDocument; } @@ -117,7 +116,6 @@ public boolean evaluateConditionExpression(final String conditionExpression) { /** * Converts the input string expression into Java executable statement. - * * @param expression Input string expression. * @return Executable string conversion statement. */ @@ -151,14 +149,14 @@ public String convertExpression(String expression) { } if (patternsMatched.contains("<=")) { expression = - LESS_THAN_OR_EQUALS_PATTERN.matcher(expression).replaceAll(FUNC_LESS_THAN_OR_EQUALS); + LESS_THAN_OR_EQUALS_PATTERN.matcher(expression).replaceAll(FUNC_LESS_THAN_OR_EQUALS); } if (patternsMatched.contains(">")) { expression = GREATER_THAN_PATTERN.matcher(expression).replaceAll(FUNC_GREATER_THAN); } if (patternsMatched.contains(">=")) { - expression = GREATER_THAN_OR_EQUALS_PATTERN.matcher(expression) - .replaceAll(FUNC_GREATER_THAN_OR_EQUALS); + expression = + GREATER_THAN_OR_EQUALS_PATTERN.matcher(expression).replaceAll(FUNC_GREATER_THAN_OR_EQUALS); } if (patternsMatched.contains("BETWEEN")) { expression = BETWEEN_PATTERN.matcher(expression).replaceAll(FUNC_BETWEEN); @@ -180,32 +178,28 @@ public String convertExpression(String expression) { /** * Returns true if the value of the field is comparable to the value represented by - * {@code expectedFieldValue} as per the comparison operator represented by {@code compareOp}. - * The comparison can happen only if the data type of both values match. - * - * @param fieldKey The field key for which value is compared against expectedFieldValue. + * {@code expectedFieldValue} as per the comparison operator represented by {@code compareOp}. The + * comparison can happen only if the data type of both values match. + * @param fieldKey The field key for which value is compared against expectedFieldValue. * @param expectedFieldValue The literal value to compare against the field value. - * @param compareOp The comparison operator. + * @param compareOp The comparison operator. * @return True if the comparison is successful, False otherwise. */ - private boolean compare(final String fieldKey, - final String expectedFieldValue, - final CommonComparisonExpressionUtils.CompareOp compareOp) { + private boolean compare(final String fieldKey, final String expectedFieldValue, + final CommonComparisonExpressionUtils.CompareOp compareOp) { BsonValue topLevelValue = rawBsonDocument.get(fieldKey); - BsonValue value = topLevelValue != null ? - topLevelValue : - CommonComparisonExpressionUtils.getFieldFromDocument(fieldKey, rawBsonDocument); + BsonValue value = topLevelValue != null + ? topLevelValue + : CommonComparisonExpressionUtils.getFieldFromDocument(fieldKey, rawBsonDocument); if (value != null) { BsonValue compareValue = comparisonValuesDocument.get(expectedFieldValue); - return CommonComparisonExpressionUtils.compareValues( - value, compareValue, compareOp); + return CommonComparisonExpressionUtils.compareValues(value, compareValue, compareOp); } return false; } /** * Returns true if the given field exists in the document. - * * @param documentField The document field. * @return True if the given field exists in the document. */ @@ -221,56 +215,49 @@ public boolean exists(final String documentField) { /** * Returns true if the value of the field is less than the value represented by {@code * expectedFieldValue}. The comparison can happen only if the data type of both values match. - * - * @param fieldKey The field key for which value is compared against expectedFieldValue. + * @param fieldKey The field key for which value is compared against expectedFieldValue. * @param expectedFieldValue The literal value to compare against the field value. * @return True if the value of the field is less than expectedFieldValue. */ public boolean lessThan(final String fieldKey, final String expectedFieldValue) { - return compare(fieldKey, expectedFieldValue, - CommonComparisonExpressionUtils.CompareOp.LESS); + return compare(fieldKey, expectedFieldValue, CommonComparisonExpressionUtils.CompareOp.LESS); } /** * Returns true if the value of the field is less than or equal to the value represented by * {@code expectedFieldValue}. The comparison can happen only if the data type of both values * match. - * - * @param fieldKey The field key for which value is compared against expectedFieldValue. + * @param fieldKey The field key for which value is compared against expectedFieldValue. * @param expectedFieldValue The literal value to compare against the field value. * @return True if the value of the field is less than or equal to expectedFieldValue. */ public boolean lessThanOrEquals(final String fieldKey, final String expectedFieldValue) { return compare(fieldKey, expectedFieldValue, - CommonComparisonExpressionUtils.CompareOp.LESS_OR_EQUAL); + CommonComparisonExpressionUtils.CompareOp.LESS_OR_EQUAL); } /** * Returns true if the value of the field is greater than the value represented by {@code * expectedFieldValue}. The comparison can happen only if the data type of both values match. - * - * @param fieldKey The field key for which value is compared against expectedFieldValue. + * @param fieldKey The field key for which value is compared against expectedFieldValue. * @param expectedFieldValue The literal value to compare against the field value. * @return True if the value of the field is greater than expectedFieldValue. */ public boolean greaterThan(final String fieldKey, final String expectedFieldValue) { - return compare(fieldKey, expectedFieldValue, - CommonComparisonExpressionUtils.CompareOp.GREATER); + return compare(fieldKey, expectedFieldValue, CommonComparisonExpressionUtils.CompareOp.GREATER); } /** * Returns true if the value of the field is greater than or equal to the value represented by * {@code expectedFieldValue}. The comparison can happen only if the data type of both values * match. - * - * @param fieldKey The field key for which value is compared against expectedFieldValue. + * @param fieldKey The field key for which value is compared against expectedFieldValue. * @param expectedFieldValue The literal value to compare against the field value. * @return True if the value of the field is greater than or equal to expectedFieldValue. */ - public boolean greaterThanOrEquals(final String fieldKey, - final String expectedFieldValue) { + public boolean greaterThanOrEquals(final String fieldKey, final String expectedFieldValue) { return compare(fieldKey, expectedFieldValue, - CommonComparisonExpressionUtils.CompareOp.GREATER_OR_EQUAL); + CommonComparisonExpressionUtils.CompareOp.GREATER_OR_EQUAL); } /** @@ -278,36 +265,35 @@ public boolean greaterThanOrEquals(final String fieldKey, * {@code expectedFieldValue1} and less than or equal to the value represented by * {@code expectedFieldValue2}. The comparison can happen only if the data type of both values * match. - * - * @param fieldKey The field key for which value is compared against two values. + * @param fieldKey The field key for which value is compared against two values. * @param expectedFieldValue1 The first literal value to compare against the field value. * @param expectedFieldValue2 The second literal value to compare against the field value. * @return True if the value of the field is greater than or equal to the value represented by - * expectedFieldValue1 and less than or equal to the value represented by expectedFieldValue2. + * expectedFieldValue1 and less than or equal to the value represented by + * expectedFieldValue2. */ public boolean between(final String fieldKey, final String expectedFieldValue1, - final String expectedFieldValue2) { - return greaterThanOrEquals(fieldKey, expectedFieldValue1) && lessThanOrEquals( - fieldKey, expectedFieldValue2); + final String expectedFieldValue2) { + return greaterThanOrEquals(fieldKey, expectedFieldValue1) + && lessThanOrEquals(fieldKey, expectedFieldValue2); } /** - * Returns true if the value of the field equals to any of the comma separated values - * represented by {@code expectedInValues}. The equality check is successful only if the value - * and the data type both match. - * - * @param fieldKey The field key for which value is compared against expectedInValues. + * Returns true if the value of the field equals to any of the comma separated values represented + * by {@code expectedInValues}. The equality check is successful only if the value and the data + * type both match. + * @param fieldKey The field key for which value is compared against expectedInValues. * @param expectedInValues The array of values for comparison, separated by comma. - * @return True if the value of the field equals to any of the comma separated values - * represented by expectedInValues. The equality check is successful only if the value - * and the data type both match. + * @return True if the value of the field equals to any of the comma separated values represented + * by expectedInValues. The equality check is successful only if the value and the data + * type both match. */ public boolean in(final String fieldKey, final String expectedInValues) { String[] expectedInVals = expectedInValues.split("\\s*,\\s*"); BsonValue topLevelValue = rawBsonDocument.get(fieldKey); - BsonValue value = topLevelValue != null ? - topLevelValue : - CommonComparisonExpressionUtils.getFieldFromDocument(fieldKey, rawBsonDocument); + BsonValue value = topLevelValue != null + ? topLevelValue + : CommonComparisonExpressionUtils.getFieldFromDocument(fieldKey, rawBsonDocument); if (value != null) { for (String expectedInVal : expectedInVals) { if (isEquals(fieldKey, expectedInVal)) { @@ -320,16 +306,14 @@ public boolean in(final String fieldKey, final String expectedInValues) { /** * Returns true if the value of the field is equal to the value represented by {@code - * expectedFieldValue}. The equality check is successful only if the value - * and the data type both match. - * - * @param fieldKey The field key for which value is compared against expectedFieldValue. + * expectedFieldValue}. The equality check is successful only if the value and the data type both + * match. + * @param fieldKey The field key for which value is compared against expectedFieldValue. * @param expectedFieldValue The literal value to compare against the field value. * @return True if the value of the field is equal to expectedFieldValue. */ public boolean isEquals(final String fieldKey, final String expectedFieldValue) { - return compare(fieldKey, expectedFieldValue, - CommonComparisonExpressionUtils.CompareOp.EQUALS); + return compare(fieldKey, expectedFieldValue, CommonComparisonExpressionUtils.CompareOp.EQUALS); } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/UpdateExpressionUtils.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/UpdateExpressionUtils.java index 46fb9bd2688..a80d7ab9239 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/UpdateExpressionUtils.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/bson/UpdateExpressionUtils.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.util.bson; import java.math.BigDecimal; @@ -42,11 +41,11 @@ import org.slf4j.LoggerFactory; /** - * BSON Update Expression Utility to perform the Document updates. All update expressions - * provided by this utility supports operations on nested document fields. The field key can - * represent any top level or nested fields within the document. The caller should use "." - * notation for accessing nested document elements and "[n]" notation for accessing nested array - * elements. Top level fields do not require any additional character. + * BSON Update Expression Utility to perform the Document updates. All update expressions provided + * by this utility supports operations on nested document fields. The field key can represent any + * top level or nested fields within the document. The caller should use "." notation for accessing + * nested document elements and "[n]" notation for accessing nested array elements. Top level fields + * do not require any additional character. */ public class UpdateExpressionUtils { @@ -66,45 +65,40 @@ private enum UpdateOp { /** * Updates the given document based on the update expression. *

- * { - * "$SET": { <field1>: <value1>, <field2>: <value2>, .... }, - * "$UNSET": { <field1>: null, <field2>: null, ... }, - * "$ADD": { <field1>: <value1>, <field2>: <value2>, .... }, - * "$DELETE_FROM_SET": { <field1>: <value1>, <field2>: <value2>, .... } - * } + * { "$SET": { <field1>: <value1>, <field2>: <value2>, .... }, "$UNSET": { + * <field1>: null, <field2>: null, ... }, "$ADD": { <field1>: <value1>, + * <field2>: <value2>, .... }, "$DELETE_FROM_SET": { <field1>: <value1>, + * <field2>: <value2>, .... } } *

* "$SET": Use the SET action in an update expression to add one or more fields to a BSON - * Document. If any of these fields already exists, they are overwritten by the new values. - * To perform multiple SET actions, provide multiple fields key-value entries within the nested - * document under $SET field key. - * "$UNSET": Use the UNSET action in an update expression to unset or remove one or more fields - * from a BSON Document. To perform multiple UNSET actions, provide multiple field key-value - * entries within the nested document under $UNSET field key. - * "$ADD": Use the ADD action in an update expression to add a new field and its values to a - * BSON document. If the field already exists, the behavior of ADD depends on the field's - * data type: + * Document. If any of these fields already exists, they are overwritten by the new values. To + * perform multiple SET actions, provide multiple fields key-value entries within the nested + * document under $SET field key. "$UNSET": Use the UNSET action in an update expression to unset + * or remove one or more fields from a BSON Document. To perform multiple UNSET actions, provide + * multiple field key-value entries within the nested document under $UNSET field key. "$ADD": Use + * the ADD action in an update expression to add a new field and its values to a BSON document. If + * the field already exists, the behavior of ADD depends on the field's data type: *

* 1. If the field is a number, and the value you are adding is also a number, the value is * mathematically added to the existing field. *

- * 2. If the field is a set, and the value you are adding is also a set, the value is appended - * to the existing set. + * 2. If the field is a set, and the value you are adding is also a set, the value is appended to + * the existing set. *

* "$DELETE_FROM_SET": Use the DELETE action in an update expression to remove one or more * elements from a set. To perform multiple DELETE actions, provide multiple field key-value - * entries within the nested document under $DELETE_FROM_SET field key. - * Definition of path and subset in the context of the expression: + * entries within the nested document under $DELETE_FROM_SET field key. Definition of path and + * subset in the context of the expression: *

- * 1. The path element is the document path to a field. The field must be a set data type. - * 2. The subset is one or more elements that you want to delete from the given path. Subset - * must be of set type. + * 1. The path element is the document path to a field. The field must be a set data type. 2. The + * subset is one or more elements that you want to delete from the given path. Subset must be of + * set type. *

- * * @param updateExpression Update Expression as a document. - * @param bsonDocument Document contents to be updated. + * @param bsonDocument Document contents to be updated. */ public static void updateExpression(final BsonDocument updateExpression, - final BsonDocument bsonDocument) { + final BsonDocument bsonDocument) { LOGGER.info("Update Expression: {} , current bsonDocument: {}", updateExpression, bsonDocument); @@ -122,27 +116,25 @@ public static void updateExpression(final BsonDocument updateExpression, if (updateExpression.containsKey("$DELETE_FROM_SET")) { executeDeleteExpression((BsonDocument) updateExpression.get("$DELETE_FROM_SET"), - bsonDocument); + bsonDocument); } } /** - * Update the given document by performing DELETE operation. This operation is applicable - * only on Set data structure. The document is updated by removing the given set of elements from - * the given set of elements. - * Let's say if the document field is of string set data type, and the elements are: - * {"yellow", "green", "red", "blue"}. The elements to be removed from the set are provided - * as {"blue", "yellow"} with the delete expression. The operation is expected to update the - * existing field by removing "blue" and "yellow" from the given set and the resultant set is + * Update the given document by performing DELETE operation. This operation is applicable only on + * Set data structure. The document is updated by removing the given set of elements from the + * given set of elements. Let's say if the document field is of string set data type, and the + * elements are: {"yellow", "green", "red", "blue"}. The elements to be removed from the set are + * provided as {"blue", "yellow"} with the delete expression. The operation is expected to update + * the existing field by removing "blue" and "yellow" from the given set and the resultant set is * expected to contain: {"green", "red"}. - * - * @param deleteExpr Delete Expression Document with key-value pairs. Key represents field in the - * given document, on which operation is to be performed. Value represents set of elements to be - * removed from the existing set. + * @param deleteExpr Delete Expression Document with key-value pairs. Key represents field in + * the given document, on which operation is to be performed. Value represents + * set of elements to be removed from the existing set. * @param bsonDocument Document contents to be updated. */ private static void executeDeleteExpression(final BsonDocument deleteExpr, - final BsonDocument bsonDocument) { + final BsonDocument bsonDocument) { for (Map.Entry deleteEntry : deleteExpr.entrySet()) { String fieldKey = deleteEntry.getKey(); BsonValue newVal = deleteEntry.getValue(); @@ -172,20 +164,19 @@ private static void executeDeleteExpression(final BsonDocument deleteExpr, /** * Update the existing set by removing the set values that are present in * {@code setValuesToDelete}. For this operation to be successful, both {@code currentValue} and - * {@code setValuesToDelete} must be of same set data type. For instance, both must be either - * set of string, set of numbers or set of binary values. - * - * @param currentValue The value that needs to be updated by performing deletion operation. + * {@code setValuesToDelete} must be of same set data type. For instance, both must be either set + * of string, set of numbers or set of binary values. + * @param currentValue The value that needs to be updated by performing deletion operation. * @param setValuesToDelete The set values that need to be deleted from the currentValue set. * @return Updated set after performing the set difference operation. */ private static BsonValue modifyFieldValueByDeleteFromSet(final BsonValue currentValue, - final BsonValue setValuesToDelete) { + final BsonValue setValuesToDelete) { if (areBsonSetOfSameType(currentValue, setValuesToDelete)) { Set set1 = - new HashSet<>(((BsonArray) ((BsonDocument) currentValue).get("$set")).getValues()); + new HashSet<>(((BsonArray) ((BsonDocument) currentValue).get("$set")).getValues()); Set set2 = - new HashSet<>(((BsonArray) ((BsonDocument) setValuesToDelete).get("$set")).getValues()); + new HashSet<>(((BsonArray) ((BsonDocument) setValuesToDelete).get("$set")).getValues()); set1.removeAll(set2); if (set1.isEmpty()) { return null; @@ -194,40 +185,38 @@ private static BsonValue modifyFieldValueByDeleteFromSet(final BsonValue current bsonDocument.put("$set", new BsonArray(new ArrayList<>(set1))); return bsonDocument; } - throw new RuntimeException( - "Data type for current value " + currentValue + " is not matching with new value " - + setValuesToDelete); + throw new RuntimeException("Data type for current value " + currentValue + + " is not matching with new value " + setValuesToDelete); } /** - * Update the given document by performing ADD operation. This operation is applicable - * only on either Set data structure or Numerical value represented by Int32, Int64, Double or - * Decimal. If the field is of type set, the document is updated by adding the given set of - * elements to the given set of elements. If the field is of type number, the document is updated - * by adding the numerical value to the given number field value. + * Update the given document by performing ADD operation. This operation is applicable only on + * either Set data structure or Numerical value represented by Int32, Int64, Double or Decimal. If + * the field is of type set, the document is updated by adding the given set of elements to the + * given set of elements. If the field is of type number, the document is updated by adding the + * numerical value to the given number field value. *

- * Let's say if the document field is of numeric data type with value "234.5" and the value - * to be added is "10", the resultant value is expected to be "244.5". Adding negative value - * would result in subtract operation. For example, adding "-10" would result in "224.5". + * Let's say if the document field is of numeric data type with value "234.5" and the value to be + * added is "10", the resultant value is expected to be "244.5". Adding negative value would + * result in subtract operation. For example, adding "-10" would result in "224.5". *

* On the other hand, if the document field is of string set data type, and the elements are: - * {"yellow", "green", "red"}. The elements to be added to the set are provided - * as {"blue", "yellow"} with the add expression. The operation is expected to update the - * existing field by removing adding unique value "blue" and the resultant set is - * expected to contain: {"yellow", "green", "red", "blue"}. - * - * @param addExpr Add Expression Document + * {"yellow", "green", "red"}. The elements to be added to the set are provided as {"blue", + * "yellow"} with the add expression. The operation is expected to update the existing field by + * removing adding unique value "blue" and the resultant set is expected to contain: {"yellow", + * "green", "red", "blue"}. + * @param addExpr Add Expression Document * @param bsonDocument Document contents to be updated. */ private static void executeAddExpression(final BsonDocument addExpr, - final BsonDocument bsonDocument) { + final BsonDocument bsonDocument) { for (Map.Entry addEntry : addExpr.entrySet()) { String fieldKey = addEntry.getKey(); BsonValue newVal = addEntry.getValue(); BsonValue topLevelValue = bsonDocument.get(fieldKey); if (!newVal.isNumber() && !newVal.isDecimal128() && !isBsonSet(newVal)) { throw new RuntimeException( - "Type of new value to be updated should be either number or sets only"); + "Type of new value to be updated should be either number or sets only"); } // If the top level field exists, perform the operation here and return. if (topLevelValue != null) { @@ -245,49 +234,48 @@ private static void executeAddExpression(final BsonDocument addExpr, /** * Update the existing value {@code currentValue} depending on its data type. If the data type of - * {@code currentValue} is numeric, add numeric value represented by {@code newVal} - * to it. If the data type of {@code currentValue} is set, add set values represented by - * {@code newVal} to it. For this operation to be successful, both {@code currentValue} and - * {@code newVal} must be of same set data type. For instance, both must be either - * set of string, set of numbers or set of binary values, or both must be of number data type. - * + * {@code currentValue} is numeric, add numeric value represented by {@code newVal} to it. If the + * data type of {@code currentValue} is set, add set values represented by {@code newVal} to it. + * For this operation to be successful, both {@code currentValue} and {@code newVal} must be of + * same set data type. For instance, both must be either set of string, set of numbers or set of + * binary values, or both must be of number data type. * @param currentValue The value that needs to be updated by performing add operation. - * @param newVal The numeric or set values that need to be added to the currentValue. + * @param newVal The numeric or set values that need to be added to the currentValue. * @return Updated value after performing the add operation. */ private static BsonValue modifyFieldValueByAdd(final BsonValue currentValue, - final BsonValue newVal) { - if ((currentValue.isNumber() || currentValue.isDecimal128()) && (newVal.isNumber() - || newVal.isDecimal128())) { + final BsonValue newVal) { + if ( + (currentValue.isNumber() || currentValue.isDecimal128()) + && (newVal.isNumber() || newVal.isDecimal128()) + ) { Number num1 = getNumberFromBsonNumber((BsonNumber) currentValue); Number num2 = getNumberFromBsonNumber((BsonNumber) newVal); Number newNum = addNum(num1, num2); return getBsonNumberFromNumber(newNum); } else if (areBsonSetOfSameType(currentValue, newVal)) { Set set1 = - new HashSet<>(((BsonArray) ((BsonDocument) currentValue).get("$set")).getValues()); + new HashSet<>(((BsonArray) ((BsonDocument) currentValue).get("$set")).getValues()); Set set2 = - new HashSet<>(((BsonArray) ((BsonDocument) newVal).get("$set")).getValues()); + new HashSet<>(((BsonArray) ((BsonDocument) newVal).get("$set")).getValues()); set1.addAll(set2); BsonDocument bsonDocument = new BsonDocument(); bsonDocument.put("$set", new BsonArray(new ArrayList<>(set1))); return bsonDocument; } throw new RuntimeException( - "Data type for current value " + currentValue + " is not matching with new value " - + newVal); + "Data type for current value " + currentValue + " is not matching with new value " + newVal); } /** * Update the given document by performing UNSET operation on one or more fields. This operation * is applicable to any field of the document at any level of the hierarchy. If the field exists, * it will be deleted. - * - * @param unsetExpr Unset Expression Document. + * @param unsetExpr Unset Expression Document. * @param bsonDocument Document contents to be updated. */ private static void executeRemoveExpression(final BsonDocument unsetExpr, - final BsonDocument bsonDocument) { + final BsonDocument bsonDocument) { for (Map.Entry removeField : unsetExpr.entrySet()) { String fieldKey = removeField.getKey(); BsonValue topLevelValue = bsonDocument.get(fieldKey); @@ -305,15 +293,14 @@ private static void executeRemoveExpression(final BsonDocument unsetExpr, /** * Update the given document by performing SET operation on a given field. This operation is - * applicable to any field of the document. The SET operation represents either adding a new - * field with the given value of updating the existing field with new value provided by the - * SET Expression Document. - * + * applicable to any field of the document. The SET operation represents either adding a new field + * with the given value of updating the existing field with new value provided by the SET + * Expression Document. * @param setExpression SET Expression Document. - * @param bsonDocument Document contents to be updated. + * @param bsonDocument Document contents to be updated. */ private static void executeSetExpression(final BsonDocument setExpression, - final BsonDocument bsonDocument) { + final BsonDocument bsonDocument) { for (Map.Entry setEntry : setExpression.entrySet()) { String fieldKey = setEntry.getKey(); BsonValue fieldVal = setEntry.getValue(); @@ -332,46 +319,38 @@ private static void executeSetExpression(final BsonDocument setExpression, } /** - * Update the nested field with the given update operation. The update operation is determined - * by the enum value {@code updateOp}. - * The field key is expected to contain "." and/or "[]" notations for nested documents and/or - * nested array elements. This function keeps recursively calling itself until it reaches the - * leaf node in the given tree. - * For instance, for field key "category.subcategories.brands[5]", first the function - * evaluates and retrieves the value for top-level field "category". The value of "category" - * is expected to be nested document. First function call has value as full document, it retries - * nested document under "category" field and calls the function recursively with index value - * same as index value of first "." (dot) in the field key. For field key - * "category.subcategories.brands[5]", the index value would be 8. The second function call - * retrieves value of field key "subcategories", which is expected to be nested document. - * The third function call gets this nested document as BsonValue and index value as 22 as the - * field key has second "." (dot) notation at index 22. The third function call searches for - * field key "brands" and expects its value as nested array. The forth function call gets - * this nested array as BsonValue and index 29 as the field key has "[]" array notation starting - * at index 29. The forth function call retrieves value of nested array element at index 5. - * As the function is at leaf node in the tree, now it performs the update operation as per the + * Update the nested field with the given update operation. The update operation is determined by + * the enum value {@code updateOp}. The field key is expected to contain "." and/or "[]" notations + * for nested documents and/or nested array elements. This function keeps recursively calling + * itself until it reaches the leaf node in the given tree. For instance, for field key + * "category.subcategories.brands[5]", first the function evaluates and retrieves the value for + * top-level field "category". The value of "category" is expected to be nested document. First + * function call has value as full document, it retries nested document under "category" field and + * calls the function recursively with index value same as index value of first "." (dot) in the + * field key. For field key "category.subcategories.brands[5]", the index value would be 8. The + * second function call retrieves value of field key "subcategories", which is expected to be + * nested document. The third function call gets this nested document as BsonValue and index value + * as 22 as the field key has second "." (dot) notation at index 22. The third function call + * searches for field key "brands" and expects its value as nested array. The forth function call + * gets this nested array as BsonValue and index 29 as the field key has "[]" array notation + * starting at index 29. The forth function call retrieves value of nested array element at index + * 5. As the function is at leaf node in the tree, now it performs the update operation as per the * given update operator ($SET / $UNSET / $ADD / $DELETE_FROM_SET) semantics. - * - * @param value Bson value at the given level of the document hierarchy. - * @param idx The index value for the given field key. The function is expected to retrieve - * the value of the nested document or array at the given level of the tree. + * @param value Bson value at the given level of the document hierarchy. + * @param idx The index value for the given field key. The function is expected to retrieve + * the value of the nested document or array at the given level of the tree. * @param fieldKey The full field key. - * @param newVal The new Bson value to be added to the given nested document or a particular - * index of the given nested array. + * @param newVal The new Bson value to be added to the given nested document or a particular + * index of the given nested array. * @param updateOp The enum value representing the update operation to perform. */ - private static void updateNestedField(final BsonValue value, - final int idx, - final String fieldKey, - final BsonValue newVal, - final UpdateOp updateOp) { + private static void updateNestedField(final BsonValue value, final int idx, final String fieldKey, + final BsonValue newVal, final UpdateOp updateOp) { int curIdx = idx; if (fieldKey.charAt(curIdx) == '.') { if (value == null || !value.isDocument()) { - LOGGER.error( - "Value is null or not document. Value: {}, Idx: {}, fieldKey: {}, New val: {}," - + " Update op: {}", - value, idx, fieldKey, newVal, updateOp); + LOGGER.error("Value is null or not document. Value: {}, Idx: {}, fieldKey: {}, New val: {}," + + " Update op: {}", value, idx, fieldKey, newVal, updateOp); throw new RuntimeException("Value is null or it is not of type document."); } BsonDocument nestedDocument = (BsonDocument) value; @@ -404,7 +383,7 @@ private static void updateNestedField(final BsonValue value, int arrayIdx = Integer.parseInt(arrayIdxStr.toString()); if (value == null || !value.isArray()) { LOGGER.error("Value is null or not document. Value: {}, Idx: {}, fieldKey: {}, New val: {}", - value, idx, fieldKey, newVal); + value, idx, fieldKey, newVal); throw new RuntimeException("Value is null or not array."); } BsonArray nestedArray = (BsonArray) value; @@ -447,26 +426,21 @@ private static void updateNestedField(final BsonValue value, } /** - * Perform update operation at the leaf node. This method is called only when the leaf - * node or the target node for the given field key is encountered while iterating through - * the tree structure. This is specifically called when the closest ancestor of the - * given node is a nested array. The leaf node is for the field key. For example, for - * "category.subcategories", the leaf node is "subcategories" whereas for - * "category.subcategories.brands[2]", the leaf node is "brands". - * - * @param fieldKey The full field key. - * @param newVal New value to be used while performing update operation on the existing node. - * @param updateOp Type of the update operation to be performed. - * @param arrayIdx The index of the array at which the target node value is to be updated. - * @param nestedArray The parent or ancestor node, expected to be nested array only. - * For example, for "category.subcategories.brands[5]" field key, the nestedArray is expected to - * be "brands" array and arrayIdx is expected to be 5. + * Perform update operation at the leaf node. This method is called only when the leaf node or the + * target node for the given field key is encountered while iterating through the tree structure. + * This is specifically called when the closest ancestor of the given node is a nested array. The + * leaf node is for the field key. For example, for "category.subcategories", the leaf node is + * "subcategories" whereas for "category.subcategories.brands[2]", the leaf node is "brands". + * @param fieldKey The full field key. + * @param newVal New value to be used while performing update operation on the existing node. + * @param updateOp Type of the update operation to be performed. + * @param arrayIdx The index of the array at which the target node value is to be updated. + * @param nestedArray The parent or ancestor node, expected to be nested array only. For example, + * for "category.subcategories.brands[5]" field key, the nestedArray is + * expected to be "brands" array and arrayIdx is expected to be 5. */ - private static void updateArrayAtLeafNode(final String fieldKey, - final BsonValue newVal, - final UpdateOp updateOp, - final int arrayIdx, - final BsonArray nestedArray) { + private static void updateArrayAtLeafNode(final String fieldKey, final BsonValue newVal, + final UpdateOp updateOp, final int arrayIdx, final BsonArray nestedArray) { switch (updateOp) { case SET: { if (arrayIdx < nestedArray.size()) { @@ -506,10 +480,8 @@ private static void updateArrayAtLeafNode(final String fieldKey, nestedArray.set(arrayIdx, modifiedVal); } } else { - LOGGER.info( - "Nothing to be removed as nested list does not have value for field {}. " - + "Update operator: {}", - fieldKey, updateOp); + LOGGER.info("Nothing to be removed as nested list does not have value for field {}. " + + "Update operator: {}", fieldKey, updateOp); } } break; @@ -518,22 +490,22 @@ private static void updateArrayAtLeafNode(final String fieldKey, } /** - * Perform update operation at the leaf node. This method is called only when the leaf - * node or the target node for the given field key is encountered while iterating through - * the tree structure. This is specifically called when the closest ancestor of the - * given node is a nested document. The leaf node is for the field key. For example, for - * "category.subcategories", the leaf node is "subcategories" whereas for - * "category.subcategories.brands[2]", the leaf node is "brands". - * - * @param newVal New value to be used while performing update operation on the existing node. - * @param updateOp Type of the update operation to be performed. - * @param nestedDocument The parent or ancestor node, expected to be nested document only. + * Perform update operation at the leaf node. This method is called only when the leaf node or the + * target node for the given field key is encountered while iterating through the tree structure. + * This is specifically called when the closest ancestor of the given node is a nested document. + * The leaf node is for the field key. For example, for "category.subcategories", the leaf node is + * "subcategories" whereas for "category.subcategories.brands[2]", the leaf node is "brands". + * @param newVal New value to be used while performing update operation on the + * existing node. + * @param updateOp Type of the update operation to be performed. + * @param nestedDocument The parent or ancestor node, expected to be nested document only. * @param targetNodeFieldKey The target fieldKey value. For example, for "category.subcategories" - * field key, the target node field key is expected to be "subcategories" and the nestedDocument - * is expected to be "category" document. + * field key, the target node field key is expected to be + * "subcategories" and the nestedDocument is expected to be "category" + * document. */ private static void updateDocumentAtLeafNode(BsonValue newVal, UpdateOp updateOp, - BsonDocument nestedDocument, StringBuilder targetNodeFieldKey) { + BsonDocument nestedDocument, StringBuilder targetNodeFieldKey) { switch (updateOp) { case SET: { nestedDocument.put(targetNodeFieldKey.toString(), newVal); @@ -546,7 +518,8 @@ private static void updateDocumentAtLeafNode(BsonValue newVal, UpdateOp updateOp case ADD: { BsonValue currentValue = nestedDocument.get(targetNodeFieldKey.toString()); if (currentValue != null) { - nestedDocument.put(targetNodeFieldKey.toString(), modifyFieldValueByAdd(currentValue, newVal)); + nestedDocument.put(targetNodeFieldKey.toString(), + modifyFieldValueByAdd(currentValue, newVal)); } else { nestedDocument.put(targetNodeFieldKey.toString(), newVal); } @@ -563,8 +536,8 @@ private static void updateDocumentAtLeafNode(BsonValue newVal, UpdateOp updateOp } } else { LOGGER.info( - "Nothing to be removed as field with key {} does not exist. Update operator: {}", - targetNodeFieldKey, updateOp); + "Nothing to be removed as field with key {} does not exist. Update operator: {}", + targetNodeFieldKey, updateOp); } break; } @@ -573,23 +546,24 @@ private static void updateDocumentAtLeafNode(BsonValue newVal, UpdateOp updateOp /** * Retrieve the value to be updated for the given current value. If the current value does not - * contain any arithmetic operators, the current value is returned without any modifications. - * If the current value contains arithmetic expressions like "a + b" or "a - b", the values of + * contain any arithmetic operators, the current value is returned without any modifications. If + * the current value contains arithmetic expressions like "a + b" or "a - b", the values of * operands are retrieved from the given document and if the values are numeric, the given * arithmetic operation is performed. - * - * @param curValue The current value. + * @param curValue The current value. * @param bsonDocument The document with all field key-value pairs. * @return Updated values to be used by SET operation. */ private static BsonValue getNewFieldValue(final BsonValue curValue, - final BsonDocument bsonDocument) { - if (curValue != null && curValue.isString() && ( - ((BsonString) curValue).getValue().contains(" + ") || ((BsonString) curValue).getValue() - .contains(" - "))) { + final BsonDocument bsonDocument) { + if ( + curValue != null && curValue.isString() + && (((BsonString) curValue).getValue().contains(" + ") + || ((BsonString) curValue).getValue().contains(" - ")) + ) { String[] tokens = ((BsonString) curValue).getValue().split("\\s+"); boolean addNum = true; - // Pattern pattern = Pattern.compile(":?[a-zA-Z0-9]+"); + // Pattern pattern = Pattern.compile(":?[a-zA-Z0-9]+"); Pattern pattern = Pattern.compile("[#:$]?[^\\s\\n]+"); Number newNum = null; for (String token : tokens) { @@ -605,25 +579,25 @@ private static BsonValue getNewFieldValue(final BsonValue curValue, String operand = matcher.group(); Number literalNum; BsonValue topLevelValue = bsonDocument.get(operand); - BsonValue bsonValue = topLevelValue != null ? - topLevelValue : - CommonComparisonExpressionUtils.getFieldFromDocument(operand, bsonDocument); + BsonValue bsonValue = topLevelValue != null + ? topLevelValue + : CommonComparisonExpressionUtils.getFieldFromDocument(operand, bsonDocument); if (bsonValue == null && (literalNum = stringToNumber(operand)) != null) { Number val = literalNum; newNum = - newNum == null ? val : (addNum ? addNum(newNum, val) : subtractNum(newNum, val)); + newNum == null ? val : (addNum ? addNum(newNum, val) : subtractNum(newNum, val)); } else { if (bsonValue == null) { throw new IllegalArgumentException("Operand " + operand + " does not exist"); } if (!bsonValue.isNumber() && !bsonValue.isDecimal128()) { throw new IllegalArgumentException( - "Operand " + operand + " is not provided as number type"); + "Operand " + operand + " is not provided as number type"); } Number val = getNumberFromBsonNumber((BsonNumber) bsonValue); newNum = - newNum == null ? val : (addNum ? addNum(newNum, val) : subtractNum(newNum, val)); + newNum == null ? val : (addNum ? addNum(newNum, val) : subtractNum(newNum, val)); } } } @@ -634,7 +608,6 @@ private static BsonValue getNewFieldValue(final BsonValue curValue, /** * Performs arithmetic addition operation on the given numeric operands. - * * @param num1 First number. * @param num2 Second number. * @return The value as addition of both numbers. @@ -653,7 +626,6 @@ private static Number addNum(final Number num1, final Number num2) { /** * Performs arithmetic subtraction operation on the given numeric operands. - * * @param num1 First number. * @param num2 Second number. * @return The subtracted value as first number minus second number. @@ -672,7 +644,6 @@ private static Number subtractNum(final Number num1, final Number num2) { /** * Convert the given Number to String. - * * @param number The Number object. * @return String represented number value. */ @@ -691,7 +662,6 @@ private static String numberToString(Number number) { /** * Convert the given String to Number. - * * @param number The String represented numeric value. * @return The Number object. */ @@ -720,7 +690,6 @@ private static Number stringToNumber(String number) { /** * Convert Number to BsonNumber. - * * @param number The Number object. * @return The BsonNumber object. */ @@ -742,7 +711,6 @@ private static BsonNumber getBsonNumberFromNumber(Number number) { /** * Convert BsonNumber to Number. - * * @param bsonNumber The BsonNumber object. * @return The Number object. */ @@ -762,7 +730,6 @@ public static Number getNumberFromBsonNumber(BsonNumber bsonNumber) { /** * Returns true if the given BsonValue represents Set data structure. - * * @param bsonValue The value. * @return True if the given BsonValue represents Set data structure. */ @@ -779,16 +746,15 @@ private static boolean isBsonSet(final BsonValue bsonValue) { } /** - * Returns true if both values represent Set data structure and the contents of the Set are - * of same type. - * + * Returns true if both values represent Set data structure and the contents of the Set are of + * same type. * @param bsonValue1 First value. * @param bsonValue2 Second value. - * @return True if both values represent Set data structure and the contents of the Set are - * of same type. + * @return True if both values represent Set data structure and the contents of the Set are of + * same type. */ private static boolean areBsonSetOfSameType(final BsonValue bsonValue1, - final BsonValue bsonValue2) { + final BsonValue bsonValue2) { if (!isBsonSet(bsonValue1) || !isBsonSet(bsonValue2)) { return false; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBasePattern.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBasePattern.java index 922c7c94bcb..7fc01038f4a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBasePattern.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBasePattern.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,12 +21,12 @@ public abstract class AbstractBasePattern { - public abstract void matches(ImmutableBytesWritable srcPtr); + public abstract void matches(ImmutableBytesWritable srcPtr); - public abstract void replaceAll(ImmutableBytesWritable srcPtr, byte[] rStrBytes, - int rStrOffset, int rStrLen); + public abstract void replaceAll(ImmutableBytesWritable srcPtr, byte[] rStrBytes, int rStrOffset, + int rStrLen); - public abstract void substr(ImmutableBytesWritable srcPtr, int offsetInStr); + public abstract void substr(ImmutableBytesWritable srcPtr, int offsetInStr); - public abstract String pattern(); + public abstract String pattern(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBaseSplitter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBaseSplitter.java index 756533895e2..dc8def2861d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBaseSplitter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/AbstractBaseSplitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,5 +20,5 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; public abstract interface AbstractBaseSplitter { - public abstract boolean split(ImmutableBytesWritable srcPtr); + public abstract boolean split(ImmutableBytesWritable srcPtr); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/GuavaSplitter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/GuavaSplitter.java index 76344f217fb..b30e06fa369 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/GuavaSplitter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/GuavaSplitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,32 +23,31 @@ import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.schema.types.PVarcharArray; import org.apache.phoenix.schema.types.PhoenixArray; -import org.apache.phoenix.util.ByteUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Splitter; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.ByteUtil; public class GuavaSplitter implements AbstractBaseSplitter { - private final Splitter splitter; + private final Splitter splitter; - public GuavaSplitter(String patternString) { - if (patternString != null) { - splitter = Splitter.onPattern(patternString); - } else { - splitter = null; - } + public GuavaSplitter(String patternString) { + if (patternString != null) { + splitter = Splitter.onPattern(patternString); + } else { + splitter = null; } + } - @Override - public boolean split(ImmutableBytesWritable srcPtr) { - String sourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr); - if (sourceStr == null) { // sourceStr evaluated to null - srcPtr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } else { - List splitStrings = Lists.newArrayList(splitter.split(sourceStr)); - PhoenixArray splitArray = new PhoenixArray(PVarchar.INSTANCE, splitStrings.toArray()); - srcPtr.set(PVarcharArray.INSTANCE.toBytes(splitArray)); - } - return true; + @Override + public boolean split(ImmutableBytesWritable srcPtr) { + String sourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr); + if (sourceStr == null) { // sourceStr evaluated to null + srcPtr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } else { + List splitStrings = Lists.newArrayList(splitter.split(sourceStr)); + PhoenixArray splitArray = new PhoenixArray(PVarchar.INSTANCE, splitStrings.toArray()); + srcPtr.set(PVarcharArray.INSTANCE.toBytes(splitArray)); } + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/JONIPattern.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/JONIPattern.java index d9bb54deb87..f7b01d0efeb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/JONIPattern.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/JONIPattern.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,6 +25,7 @@ import org.apache.phoenix.schema.types.PArrayDataTypeEncoder; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.StringUtil; import org.jcodings.Encoding; @@ -34,165 +35,160 @@ import org.joni.Regex; import org.joni.Syntax; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - public class JONIPattern extends AbstractBasePattern implements AbstractBaseSplitter { - private final Regex pattern; - private final String patternString; + private final Regex pattern; + private final String patternString; - public JONIPattern(String patternString) { - this(patternString, 0); - } + public JONIPattern(String patternString) { + this(patternString, 0); + } - public JONIPattern(String patternString, int flags) { - this(patternString, flags, UTF8Encoding.INSTANCE); - } + public JONIPattern(String patternString, int flags) { + this(patternString, flags, UTF8Encoding.INSTANCE); + } - public JONIPattern(String patternString, int flags, Encoding coding) { - this.patternString = patternString; - if (patternString != null) { - byte[] bytes = patternString.getBytes(coding.getCharset()); - pattern = new Regex(bytes, 0, bytes.length, flags, coding, Syntax.Java); - } else { - pattern = null; - } + public JONIPattern(String patternString, int flags, Encoding coding) { + this.patternString = patternString; + if (patternString != null) { + byte[] bytes = patternString.getBytes(coding.getCharset()); + pattern = new Regex(bytes, 0, bytes.length, flags, coding, Syntax.Java); + } else { + pattern = null; } - - @Override - public void matches(ImmutableBytesWritable srcPtr) { - Preconditions.checkNotNull(srcPtr); - boolean ret = matches(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength()); - srcPtr.set(ret ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); + } + + @Override + public void matches(ImmutableBytesWritable srcPtr) { + Preconditions.checkNotNull(srcPtr); + boolean ret = matches(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength()); + srcPtr.set(ret ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); + } + + private boolean matches(byte[] bytes, int offset, int len) { + int range = offset + len; + Matcher matcher = pattern.matcher(bytes, offset, range); + int ret = matcher.match(offset, range, Option.DEFAULT); + return len == ret; + } + + @Override + public String pattern() { + return patternString; + } + + @Override + public void replaceAll(ImmutableBytesWritable srcPtr, byte[] rStrBytes, int rStrOffset, + int rStrLen) { + Preconditions.checkNotNull(srcPtr); + Preconditions.checkNotNull(rStrBytes); + byte[] replacedBytes = replaceAll(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength(), + rStrBytes, rStrOffset, rStrLen); + srcPtr.set(replacedBytes); + } + + private byte[] replaceAll(byte[] srcBytes, int srcOffset, int srcLen, byte[] replaceBytes, + int replaceOffset, int replaceLen) { + class PairInt { + public int begin, end; + + public PairInt(int begin, int end) { + this.begin = begin; + this.end = end; + } } - - private boolean matches(byte[] bytes, int offset, int len) { - int range = offset + len; - Matcher matcher = pattern.matcher(bytes, offset, range); - int ret = matcher.match(offset, range, Option.DEFAULT); - return len == ret; + int srcRange = srcOffset + srcLen; + Matcher matcher = pattern.matcher(srcBytes, 0, srcRange); + int cur = srcOffset; + List searchResults = new LinkedList(); + int totalBytesNeeded = 0; + while (true) { + int nextCur = matcher.search(cur, srcRange, Option.DEFAULT); + if (nextCur < 0) { + totalBytesNeeded += srcRange - cur; + break; + } + searchResults.add(new PairInt(matcher.getBegin(), matcher.getEnd())); + totalBytesNeeded += (nextCur - cur) + replaceLen; + cur = matcher.getEnd(); } - - @Override - public String pattern() { - return patternString; + byte[] ret = new byte[totalBytesNeeded]; + int curPosInSrc = srcOffset, curPosInRet = 0; + for (PairInt pair : searchResults) { + System.arraycopy(srcBytes, curPosInSrc, ret, curPosInRet, pair.begin - curPosInSrc); + curPosInRet += pair.begin - curPosInSrc; + System.arraycopy(replaceBytes, replaceOffset, ret, curPosInRet, replaceLen); + curPosInRet += replaceLen; + curPosInSrc = pair.end; } - - @Override - public void replaceAll(ImmutableBytesWritable srcPtr, byte[] rStrBytes, int rStrOffset, - int rStrLen) { - Preconditions.checkNotNull(srcPtr); - Preconditions.checkNotNull(rStrBytes); - byte[] replacedBytes = - replaceAll(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength(), rStrBytes, - rStrOffset, rStrLen); - srcPtr.set(replacedBytes); + System.arraycopy(srcBytes, curPosInSrc, ret, curPosInRet, srcRange - curPosInSrc); + return ret; + } + + @Override + public void substr(ImmutableBytesWritable ptr, int offsetInStr) { + Preconditions.checkNotNull(ptr); + int offsetInBytes = StringUtil.calculateUTF8Offset(ptr.get(), ptr.getOffset(), ptr.getLength(), + SortOrder.ASC, offsetInStr); + if (offsetInBytes < 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } else { + substr(ptr.get(), offsetInBytes, ptr.getOffset() + ptr.getLength(), ptr); } - - private byte[] replaceAll(byte[] srcBytes, int srcOffset, int srcLen, byte[] replaceBytes, - int replaceOffset, int replaceLen) { - class PairInt { - public int begin, end; - - public PairInt(int begin, int end) { - this.begin = begin; - this.end = end; - } - } - int srcRange = srcOffset + srcLen; - Matcher matcher = pattern.matcher(srcBytes, 0, srcRange); - int cur = srcOffset; - List searchResults = new LinkedList(); - int totalBytesNeeded = 0; - while (true) { - int nextCur = matcher.search(cur, srcRange, Option.DEFAULT); - if (nextCur < 0) { - totalBytesNeeded += srcRange - cur; - break; - } - searchResults.add(new PairInt(matcher.getBegin(), matcher.getEnd())); - totalBytesNeeded += (nextCur - cur) + replaceLen; - cur = matcher.getEnd(); - } - byte[] ret = new byte[totalBytesNeeded]; - int curPosInSrc = srcOffset, curPosInRet = 0; - for (PairInt pair : searchResults) { - System.arraycopy(srcBytes, curPosInSrc, ret, curPosInRet, pair.begin - curPosInSrc); - curPosInRet += pair.begin - curPosInSrc; - System.arraycopy(replaceBytes, replaceOffset, ret, curPosInRet, replaceLen); - curPosInRet += replaceLen; - curPosInSrc = pair.end; - } - System.arraycopy(srcBytes, curPosInSrc, ret, curPosInRet, srcRange - curPosInSrc); - return ret; - } - - @Override - public void substr(ImmutableBytesWritable ptr, int offsetInStr) { - Preconditions.checkNotNull(ptr); - int offsetInBytes = StringUtil.calculateUTF8Offset(ptr.get(), ptr.getOffset(), - ptr.getLength(), SortOrder.ASC, offsetInStr); - if (offsetInBytes < 0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } else { - substr(ptr.get(), offsetInBytes, ptr.getOffset() + ptr.getLength(), ptr); - } - } - - private boolean substr(byte[] srcBytes, int offset, int range, ImmutableBytesWritable outPtr) { - Matcher matcher = pattern.matcher(srcBytes, 0, range); - boolean ret = matcher.search(offset, range, Option.DEFAULT) >= 0; - if (ret) { - int len = matcher.getEnd() - matcher.getBegin(); - outPtr.set(srcBytes, matcher.getBegin(), len); - } else { - outPtr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } - return ret; - } - - @Override - public boolean split(ImmutableBytesWritable srcPtr) { - return split(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength(), srcPtr); + } + + private boolean substr(byte[] srcBytes, int offset, int range, ImmutableBytesWritable outPtr) { + Matcher matcher = pattern.matcher(srcBytes, 0, range); + boolean ret = matcher.search(offset, range, Option.DEFAULT) >= 0; + if (ret) { + int len = matcher.getEnd() - matcher.getBegin(); + outPtr.set(srcBytes, matcher.getBegin(), len); + } else { + outPtr.set(ByteUtil.EMPTY_BYTE_ARRAY); } - - private boolean - split(byte[] srcBytes, int srcOffset, int srcLen, ImmutableBytesWritable outPtr) { - SortOrder sortOrder = SortOrder.ASC; - PArrayDataTypeEncoder builder = - new PArrayDataTypeEncoder(PVarchar.INSTANCE, sortOrder); - int srcRange = srcOffset + srcLen; - Matcher matcher = pattern.matcher(srcBytes, 0, srcRange); - int cur = srcOffset; - boolean append; - while (true) { - int nextCur = matcher.search(cur, srcRange, Option.DEFAULT); - if (nextCur < 0) { - builder.appendValue(srcBytes, cur, srcRange - cur); - break; - } - - // To handle the following case, which adds null at first. - // REGEXP_SPLIT("12ONE34TWO56THREE78","[0-9]+")={null, "ONE", "TWO", "THREE", null} - if (cur == matcher.getBegin()) { - builder.appendValue(srcBytes, cur, 0); - } - - if (cur < matcher.getBegin()) { - builder.appendValue(srcBytes, cur, matcher.getBegin() - cur); - } - cur = matcher.getEnd(); - - // To handle the following case, which adds null at last. - // REGEXP_SPLIT("12ONE34TWO56THREE78","[0-9]+")={null, "ONE", "TWO", "THREE", null} - if (cur == srcRange) { - builder.appendValue(srcBytes, cur, 0); - break; - } - } - byte[] bytes = builder.encode(); - if (bytes == null) return false; - outPtr.set(bytes); - return true; + return ret; + } + + @Override + public boolean split(ImmutableBytesWritable srcPtr) { + return split(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength(), srcPtr); + } + + private boolean split(byte[] srcBytes, int srcOffset, int srcLen, ImmutableBytesWritable outPtr) { + SortOrder sortOrder = SortOrder.ASC; + PArrayDataTypeEncoder builder = new PArrayDataTypeEncoder(PVarchar.INSTANCE, sortOrder); + int srcRange = srcOffset + srcLen; + Matcher matcher = pattern.matcher(srcBytes, 0, srcRange); + int cur = srcOffset; + boolean append; + while (true) { + int nextCur = matcher.search(cur, srcRange, Option.DEFAULT); + if (nextCur < 0) { + builder.appendValue(srcBytes, cur, srcRange - cur); + break; + } + + // To handle the following case, which adds null at first. + // REGEXP_SPLIT("12ONE34TWO56THREE78","[0-9]+")={null, "ONE", "TWO", "THREE", null} + if (cur == matcher.getBegin()) { + builder.appendValue(srcBytes, cur, 0); + } + + if (cur < matcher.getBegin()) { + builder.appendValue(srcBytes, cur, matcher.getBegin() - cur); + } + cur = matcher.getEnd(); + + // To handle the following case, which adds null at last. + // REGEXP_SPLIT("12ONE34TWO56THREE78","[0-9]+")={null, "ONE", "TWO", "THREE", null} + if (cur == srcRange) { + builder.appendValue(srcBytes, cur, 0); + break; + } } + byte[] bytes = builder.encode(); + if (bytes == null) return false; + outPtr.set(bytes); + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/JavaPattern.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/JavaPattern.java index e812803433d..ddd2935b808 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/JavaPattern.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/util/regex/JavaPattern.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,72 +23,71 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.util.ByteUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.util.ByteUtil; public class JavaPattern extends AbstractBasePattern { - private final Pattern pattern; + private final Pattern pattern; - public JavaPattern(String patternString) { - this(patternString, 0); - } + public JavaPattern(String patternString) { + this(patternString, 0); + } - public JavaPattern(String patternString, int flags) { - if (patternString != null) { - pattern = Pattern.compile(patternString, flags); - } else { - pattern = null; - } + public JavaPattern(String patternString, int flags) { + if (patternString != null) { + pattern = Pattern.compile(patternString, flags); + } else { + pattern = null; } + } - @Override - public void matches(ImmutableBytesWritable srcPtr) { - Preconditions.checkNotNull(srcPtr); - String matcherSourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr); - if (srcPtr.getLength() == 0 && matcherSourceStr == null) matcherSourceStr = ""; - boolean ret = pattern.matcher(matcherSourceStr).matches(); - srcPtr.set(ret ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); - } + @Override + public void matches(ImmutableBytesWritable srcPtr) { + Preconditions.checkNotNull(srcPtr); + String matcherSourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr); + if (srcPtr.getLength() == 0 && matcherSourceStr == null) matcherSourceStr = ""; + boolean ret = pattern.matcher(matcherSourceStr).matches(); + srcPtr.set(ret ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); + } - @Override - public String pattern() { - return pattern.pattern(); - } + @Override + public String pattern() { + return pattern.pattern(); + } - @Override - public void replaceAll(ImmutableBytesWritable srcPtr, byte[] rStrBytes, int rStrOffset, - int rStrLen) { - Preconditions.checkNotNull(srcPtr); - Preconditions.checkNotNull(rStrBytes); - String sourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr); - String replaceStr = (String) PVarchar.INSTANCE.toObject(rStrBytes, rStrOffset, rStrLen); - if (srcPtr.getLength() == 0 && sourceStr == null) sourceStr = ""; - if (rStrLen == 0 && replaceStr == null) replaceStr = ""; - String replacedStr = pattern.matcher(sourceStr).replaceAll(replaceStr); - srcPtr.set(PVarchar.INSTANCE.toBytes(replacedStr)); - } + @Override + public void replaceAll(ImmutableBytesWritable srcPtr, byte[] rStrBytes, int rStrOffset, + int rStrLen) { + Preconditions.checkNotNull(srcPtr); + Preconditions.checkNotNull(rStrBytes); + String sourceStr = (String) PVarchar.INSTANCE.toObject(srcPtr); + String replaceStr = (String) PVarchar.INSTANCE.toObject(rStrBytes, rStrOffset, rStrLen); + if (srcPtr.getLength() == 0 && sourceStr == null) sourceStr = ""; + if (rStrLen == 0 && replaceStr == null) replaceStr = ""; + String replacedStr = pattern.matcher(sourceStr).replaceAll(replaceStr); + srcPtr.set(PVarchar.INSTANCE.toBytes(replacedStr)); + } - @Override - public void substr(ImmutableBytesWritable ptr, int offsetInStr) { - Preconditions.checkNotNull(ptr); - String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr); - if (sourceStr == null) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + @Override + public void substr(ImmutableBytesWritable ptr, int offsetInStr) { + Preconditions.checkNotNull(ptr); + String sourceStr = (String) PVarchar.INSTANCE.toObject(ptr); + if (sourceStr == null) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } else { + if (offsetInStr < 0) offsetInStr += sourceStr.length(); + if (offsetInStr < 0 || offsetInStr >= sourceStr.length()) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } else { + Matcher matcher = pattern.matcher(sourceStr); + boolean ret = matcher.find(offsetInStr); + if (ret) { + ptr.set(PVarchar.INSTANCE.toBytes(matcher.group())); } else { - if (offsetInStr < 0) offsetInStr += sourceStr.length(); - if (offsetInStr < 0 || offsetInStr >= sourceStr.length()) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } else { - Matcher matcher = pattern.matcher(sourceStr); - boolean ret = matcher.find(offsetInStr); - if (ret) { - ptr.set(PVarchar.INSTANCE.toBytes(matcher.group())); - } else { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } - } + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); } + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/BaseExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/BaseExpressionVisitor.java index d79b54695f5..c58e29baf28 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/BaseExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/BaseExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,122 +44,122 @@ import org.apache.phoenix.expression.function.ScalarFunction; import org.apache.phoenix.expression.function.SingleAggregateFunction; - public abstract class BaseExpressionVisitor implements ExpressionVisitor { - @Override - public E defaultReturn(Expression node, List l) { - return null; - } - - @Override - public Iterator visitEnter(AndExpression node) { - return null; - } - - @Override - public Iterator visitEnter(OrExpression node) { - return null; - } - - @Override - public Iterator visitEnter(ScalarFunction node) { - return null; - } - - @Override - public Iterator visitEnter(ComparisonExpression node) { - return null; - } - - @Override - public Iterator visitEnter(LikeExpression node) { - return null; - } - - @Override - public Iterator visitEnter(SingleAggregateFunction node) { - return null; - } - - @Override - public Iterator visitEnter(CaseExpression node) { - return null; - } - - @Override - public Iterator visitEnter(NotExpression node) { - return null; - } - - @Override - public Iterator visitEnter(IsNullExpression node) { - return null; - } - - @Override - public Iterator visitEnter(InListExpression node) { - return null; - } - - @Override - public Iterator visitEnter(AddExpression node) { - return null; - } - - @Override - public Iterator visitEnter(SubtractExpression node) { - return null; - } - - @Override - public Iterator visitEnter(MultiplyExpression node) { - return null; - } - - @Override - public Iterator visitEnter(DivideExpression node) { - return null; - } - - @Override - public Iterator visitEnter(StringConcatExpression node) { - return null; - } - - @Override - public Iterator visitEnter(RowValueConstructorExpression node) { - return null; - } - - @Override - public Iterator visitEnter(CoerceExpression node) { - return null; - } - - @Override - public Iterator visitEnter(ArrayConstructorExpression node) { - return null; - } - - @Override - public Iterator visitEnter(SingleCellConstructorExpression node) { - return null; - } - - @Override - public Iterator visitEnter(ModulusExpression modulusExpression) { - return null; - } - - @Override - public Iterator visitEnter(ArrayAnyComparisonExpression arrayAnyComparisonExpression) { - return null; - } - - @Override - public Iterator visitEnter(ArrayElemRefExpression arrayElemRefExpression) { - return null; - } + @Override + public E defaultReturn(Expression node, List l) { + return null; + } + + @Override + public Iterator visitEnter(AndExpression node) { + return null; + } + + @Override + public Iterator visitEnter(OrExpression node) { + return null; + } + + @Override + public Iterator visitEnter(ScalarFunction node) { + return null; + } + + @Override + public Iterator visitEnter(ComparisonExpression node) { + return null; + } + + @Override + public Iterator visitEnter(LikeExpression node) { + return null; + } + + @Override + public Iterator visitEnter(SingleAggregateFunction node) { + return null; + } + + @Override + public Iterator visitEnter(CaseExpression node) { + return null; + } + + @Override + public Iterator visitEnter(NotExpression node) { + return null; + } + + @Override + public Iterator visitEnter(IsNullExpression node) { + return null; + } + + @Override + public Iterator visitEnter(InListExpression node) { + return null; + } + + @Override + public Iterator visitEnter(AddExpression node) { + return null; + } + + @Override + public Iterator visitEnter(SubtractExpression node) { + return null; + } + + @Override + public Iterator visitEnter(MultiplyExpression node) { + return null; + } + + @Override + public Iterator visitEnter(DivideExpression node) { + return null; + } + + @Override + public Iterator visitEnter(StringConcatExpression node) { + return null; + } + + @Override + public Iterator visitEnter(RowValueConstructorExpression node) { + return null; + } + + @Override + public Iterator visitEnter(CoerceExpression node) { + return null; + } + + @Override + public Iterator visitEnter(ArrayConstructorExpression node) { + return null; + } + + @Override + public Iterator visitEnter(SingleCellConstructorExpression node) { + return null; + } + + @Override + public Iterator visitEnter(ModulusExpression modulusExpression) { + return null; + } + + @Override + public Iterator + visitEnter(ArrayAnyComparisonExpression arrayAnyComparisonExpression) { + return null; + } + + @Override + public Iterator visitEnter(ArrayElemRefExpression arrayElemRefExpression) { + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java index b7ea4ab779a..1dc26f9342c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -53,169 +53,169 @@ public class CloneExpressionVisitor extends TraverseAllExpressionVisitor { - public CloneExpressionVisitor() { - } - - @Override - public Expression defaultReturn(Expression node, List l) { - // Needed for Expressions derived from BaseTerminalExpression which don't - // have accept methods. TODO: get rid of those - return node; - } - - @Override - public Expression visit(CorrelateVariableFieldAccessExpression node) { - return node; - } - - @Override - public Expression visit(LiteralExpression node) { - return node; - } - - @Override - public Expression visit(RowKeyColumnExpression node) { - return node; - } - - @Override - public Expression visit(KeyValueColumnExpression node) { - return node; - } - - @Override - public Expression visit(SingleCellColumnExpression node) { - return node; - } - - @Override - public Expression visit(ProjectedColumnExpression node) { - return node.clone(); - } - - @Override - public Expression visit(SequenceValueExpression node) { - return node; - } - - @Override - public Expression visitLeave(AndExpression node, List l) { - return isCloneNode(node, l) ? new AndExpression(l) : node; - } - - @Override - public Expression visitLeave(OrExpression node, List l) { - return isCloneNode(node, l) ? new OrExpression(l) : node; - } - - @Override - public Expression visitLeave(ScalarFunction node, List l) { - return isCloneNode(node, l) || !node.isThreadSafe() ? node.clone(l) : node; - } - - public Expression visitLeave(UDFExpression node, List l) { - return new UDFExpression(l, node.getTenantId(), node.getFunctionClassName(), - node.getJarPath(), node.getUdfFunction()); - } - - @Override - public Expression visitLeave(ComparisonExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(LikeExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l): node; - } - - @Override - public Expression visitLeave(SingleAggregateFunction node, List l) { - // Do not clone aggregate functions, as they're executed on the server side, - // so any state for evaluation will live there. - return isCloneNode(node, l) ? node : node; - } - - @Override - public Expression visitLeave(CaseExpression node, List l) { - return isCloneNode(node, l) ? new CaseExpression(l) : node; - } - - @Override - public Expression visitLeave(NotExpression node, List l) { - return isCloneNode(node, l) ? new NotExpression(l) : node; - } - - @Override - public Expression visitLeave(InListExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(IsNullExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(SubtractExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(MultiplyExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(AddExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(DivideExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(ModulusExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(CoerceExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(ArrayConstructorExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(SingleCellConstructorExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(StringConcatExpression node, List l) { - return isCloneNode(node, l) ? new StringConcatExpression(l) : node; - } - - @Override - public Expression visitLeave(RowValueConstructorExpression node, List l) { - return isCloneNode(node, l) ? node.clone(l) : node; - } - - @Override - public Expression visitLeave(ArrayAnyComparisonExpression node, List l) { - return isCloneNode(node, l) ? new ArrayAnyComparisonExpression(l) : node; - } - - @Override - public Expression visitLeave(ArrayElemRefExpression node, List l) { - return isCloneNode(node, l) ? new ArrayElemRefExpression(l) : node; - } - - public boolean isCloneNode(Expression node, List children) { - return node.isCloneExpression(); - } + public CloneExpressionVisitor() { + } + + @Override + public Expression defaultReturn(Expression node, List l) { + // Needed for Expressions derived from BaseTerminalExpression which don't + // have accept methods. TODO: get rid of those + return node; + } + + @Override + public Expression visit(CorrelateVariableFieldAccessExpression node) { + return node; + } + + @Override + public Expression visit(LiteralExpression node) { + return node; + } + + @Override + public Expression visit(RowKeyColumnExpression node) { + return node; + } + + @Override + public Expression visit(KeyValueColumnExpression node) { + return node; + } + + @Override + public Expression visit(SingleCellColumnExpression node) { + return node; + } + + @Override + public Expression visit(ProjectedColumnExpression node) { + return node.clone(); + } + + @Override + public Expression visit(SequenceValueExpression node) { + return node; + } + + @Override + public Expression visitLeave(AndExpression node, List l) { + return isCloneNode(node, l) ? new AndExpression(l) : node; + } + + @Override + public Expression visitLeave(OrExpression node, List l) { + return isCloneNode(node, l) ? new OrExpression(l) : node; + } + + @Override + public Expression visitLeave(ScalarFunction node, List l) { + return isCloneNode(node, l) || !node.isThreadSafe() ? node.clone(l) : node; + } + + public Expression visitLeave(UDFExpression node, List l) { + return new UDFExpression(l, node.getTenantId(), node.getFunctionClassName(), node.getJarPath(), + node.getUdfFunction()); + } + + @Override + public Expression visitLeave(ComparisonExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(LikeExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(SingleAggregateFunction node, List l) { + // Do not clone aggregate functions, as they're executed on the server side, + // so any state for evaluation will live there. + return isCloneNode(node, l) ? node : node; + } + + @Override + public Expression visitLeave(CaseExpression node, List l) { + return isCloneNode(node, l) ? new CaseExpression(l) : node; + } + + @Override + public Expression visitLeave(NotExpression node, List l) { + return isCloneNode(node, l) ? new NotExpression(l) : node; + } + + @Override + public Expression visitLeave(InListExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(IsNullExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(SubtractExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(MultiplyExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(AddExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(DivideExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(ModulusExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(CoerceExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(ArrayConstructorExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(SingleCellConstructorExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(StringConcatExpression node, List l) { + return isCloneNode(node, l) ? new StringConcatExpression(l) : node; + } + + @Override + public Expression visitLeave(RowValueConstructorExpression node, List l) { + return isCloneNode(node, l) ? node.clone(l) : node; + } + + @Override + public Expression visitLeave(ArrayAnyComparisonExpression node, List l) { + return isCloneNode(node, l) ? new ArrayAnyComparisonExpression(l) : node; + } + + @Override + public Expression visitLeave(ArrayElemRefExpression node, List l) { + return isCloneNode(node, l) ? new ArrayElemRefExpression(l) : node; + } + + public boolean isCloneNode(Expression node, List children) { + return node.isCloneExpression(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ExpressionVisitor.java index 5936dc7c78f..2c066280553 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,7 +23,6 @@ import org.apache.phoenix.compile.SequenceValueExpression; import org.apache.phoenix.expression.AddExpression; import org.apache.phoenix.expression.AndExpression; -import org.apache.phoenix.expression.SingleCellColumnExpression; import org.apache.phoenix.expression.ArrayConstructorExpression; import org.apache.phoenix.expression.CaseExpression; import org.apache.phoenix.expression.CoerceExpression; @@ -43,6 +42,7 @@ import org.apache.phoenix.expression.ProjectedColumnExpression; import org.apache.phoenix.expression.RowKeyColumnExpression; import org.apache.phoenix.expression.RowValueConstructorExpression; +import org.apache.phoenix.expression.SingleCellColumnExpression; import org.apache.phoenix.expression.SingleCellConstructorExpression; import org.apache.phoenix.expression.StringConcatExpression; import org.apache.phoenix.expression.SubtractExpression; @@ -51,91 +51,115 @@ import org.apache.phoenix.expression.function.ScalarFunction; import org.apache.phoenix.expression.function.SingleAggregateFunction; - /** - * * Visitor for an expression (which may contain other nested expressions) - * - * * @since 0.1 */ public interface ExpressionVisitor { - public E defaultReturn(Expression node, List l); - public Iterator defaultIterator(Expression node); - - public Iterator visitEnter(AndExpression node); - public E visitLeave(AndExpression node, List l); - - public Iterator visitEnter(OrExpression node); - public E visitLeave(OrExpression node, List l); - - public Iterator visitEnter(ScalarFunction node); - public E visitLeave(ScalarFunction node, List l); - - public Iterator visitEnter(ComparisonExpression node); - public E visitLeave(ComparisonExpression node, List l); - - public Iterator visitEnter(LikeExpression node); - public E visitLeave(LikeExpression node, List l); - - public Iterator visitEnter(SingleAggregateFunction node); - public E visitLeave(SingleAggregateFunction node, List l); - - public Iterator visitEnter(CaseExpression node); - public E visitLeave(CaseExpression node, List l); - - public Iterator visitEnter(NotExpression node); - public E visitLeave(NotExpression node, List l); - - public Iterator visitEnter(InListExpression node); - public E visitLeave(InListExpression node, List l); - - public Iterator visitEnter(IsNullExpression node); - public E visitLeave(IsNullExpression node, List l); - - public Iterator visitEnter(SubtractExpression node); - public E visitLeave(SubtractExpression node, List l); - - public Iterator visitEnter(MultiplyExpression node); - public E visitLeave(MultiplyExpression node, List l); - - public Iterator visitEnter(AddExpression node); - public E visitLeave(AddExpression node, List l); - - public Iterator visitEnter(DivideExpression node); - public E visitLeave(DivideExpression node, List l); - - public Iterator visitEnter(CoerceExpression node); - public E visitLeave(CoerceExpression node, List l); - - public Iterator visitEnter(ArrayConstructorExpression node); - public E visitLeave(ArrayConstructorExpression node, List l); - - public Iterator visitEnter(SingleCellConstructorExpression node); - public E visitLeave(SingleCellConstructorExpression node, List l); - - public E visit(CorrelateVariableFieldAccessExpression node); - public E visit(LiteralExpression node); - public E visit(RowKeyColumnExpression node); - public E visit(KeyValueColumnExpression node); - public E visit(SingleCellColumnExpression node); - public E visit(ProjectedColumnExpression node); - public E visit(SequenceValueExpression node); - - public Iterator visitEnter(StringConcatExpression node); - public E visitLeave(StringConcatExpression node, List l); - - public Iterator visitEnter(RowValueConstructorExpression node); - public E visitLeave(RowValueConstructorExpression node, List l); - - public Iterator visitEnter(ModulusExpression modulusExpression); - public E visitLeave(ModulusExpression node, List l); - - public Iterator visitEnter(ArrayAnyComparisonExpression arrayAnyComparisonExpression); - public E visitLeave(ArrayAnyComparisonExpression node, List l); - - public Iterator visitEnter(ArrayElemRefExpression arrayElemRefExpression); - public E visitLeave(ArrayElemRefExpression node, List l); - - + public E defaultReturn(Expression node, List l); + + public Iterator defaultIterator(Expression node); + + public Iterator visitEnter(AndExpression node); + + public E visitLeave(AndExpression node, List l); + + public Iterator visitEnter(OrExpression node); + + public E visitLeave(OrExpression node, List l); + + public Iterator visitEnter(ScalarFunction node); + + public E visitLeave(ScalarFunction node, List l); + + public Iterator visitEnter(ComparisonExpression node); + + public E visitLeave(ComparisonExpression node, List l); + + public Iterator visitEnter(LikeExpression node); + + public E visitLeave(LikeExpression node, List l); + + public Iterator visitEnter(SingleAggregateFunction node); + + public E visitLeave(SingleAggregateFunction node, List l); + + public Iterator visitEnter(CaseExpression node); + + public E visitLeave(CaseExpression node, List l); + + public Iterator visitEnter(NotExpression node); + + public E visitLeave(NotExpression node, List l); + + public Iterator visitEnter(InListExpression node); + + public E visitLeave(InListExpression node, List l); + + public Iterator visitEnter(IsNullExpression node); + + public E visitLeave(IsNullExpression node, List l); + + public Iterator visitEnter(SubtractExpression node); + + public E visitLeave(SubtractExpression node, List l); + + public Iterator visitEnter(MultiplyExpression node); + + public E visitLeave(MultiplyExpression node, List l); + + public Iterator visitEnter(AddExpression node); + + public E visitLeave(AddExpression node, List l); + + public Iterator visitEnter(DivideExpression node); + + public E visitLeave(DivideExpression node, List l); + + public Iterator visitEnter(CoerceExpression node); + + public E visitLeave(CoerceExpression node, List l); + + public Iterator visitEnter(ArrayConstructorExpression node); + + public E visitLeave(ArrayConstructorExpression node, List l); + + public Iterator visitEnter(SingleCellConstructorExpression node); + + public E visitLeave(SingleCellConstructorExpression node, List l); + + public E visit(CorrelateVariableFieldAccessExpression node); + + public E visit(LiteralExpression node); + + public E visit(RowKeyColumnExpression node); + + public E visit(KeyValueColumnExpression node); + + public E visit(SingleCellColumnExpression node); + + public E visit(ProjectedColumnExpression node); + + public E visit(SequenceValueExpression node); + + public Iterator visitEnter(StringConcatExpression node); + + public E visitLeave(StringConcatExpression node, List l); + + public Iterator visitEnter(RowValueConstructorExpression node); + + public E visitLeave(RowValueConstructorExpression node, List l); + + public Iterator visitEnter(ModulusExpression modulusExpression); + + public E visitLeave(ModulusExpression node, List l); + + public Iterator visitEnter(ArrayAnyComparisonExpression arrayAnyComparisonExpression); + + public E visitLeave(ArrayAnyComparisonExpression node, List l); + + public Iterator visitEnter(ArrayElemRefExpression arrayElemRefExpression); + + public E visitLeave(ArrayElemRefExpression node, List l); + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/KeyValueExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/KeyValueExpressionVisitor.java index df6a30cc814..3622cab2533 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/KeyValueExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/KeyValueExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,18 +19,12 @@ import org.apache.phoenix.expression.KeyValueColumnExpression; - - - /** - * - * Implementation of ExpressionVisitor where only KeyValueDataAccessor - * is being visited - * - * + * Implementation of ExpressionVisitor where only KeyValueDataAccessor is being visited * @since 0.1 */ -public abstract class KeyValueExpressionVisitor extends StatelessTraverseAllExpressionVisitor { - @Override - abstract public Void visit(KeyValueColumnExpression node); +public abstract class KeyValueExpressionVisitor + extends StatelessTraverseAllExpressionVisitor { + @Override + abstract public Void visit(KeyValueColumnExpression node); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ProjectedColumnExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ProjectedColumnExpressionVisitor.java index 2380c6b3ce7..b2cb2b24115 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ProjectedColumnExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ProjectedColumnExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,7 +19,8 @@ import org.apache.phoenix.expression.ProjectedColumnExpression; -public abstract class ProjectedColumnExpressionVisitor extends StatelessTraverseAllExpressionVisitor { - @Override - abstract public Void visit(ProjectedColumnExpression node); +public abstract class ProjectedColumnExpressionVisitor + extends StatelessTraverseAllExpressionVisitor { + @Override + abstract public Void visit(ProjectedColumnExpression node); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ReplaceArrayFunctionExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ReplaceArrayFunctionExpressionVisitor.java index 2a460a43049..3edbf3874ba 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ReplaceArrayFunctionExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/ReplaceArrayFunctionExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,23 +24,23 @@ import org.apache.phoenix.expression.function.ScalarFunction; public class ReplaceArrayFunctionExpressionVisitor extends CloneExpressionVisitor { - private Map replacementMap; + private Map replacementMap; - public ReplaceArrayFunctionExpressionVisitor(Map replacementMap) { - this.replacementMap = replacementMap; - } + public ReplaceArrayFunctionExpressionVisitor(Map replacementMap) { + this.replacementMap = replacementMap; + } - @Override - public boolean isCloneNode(Expression node, List children) { - return !children.equals(node.getChildren()); - } + @Override + public boolean isCloneNode(Expression node, List children) { + return !children.equals(node.getChildren()); + } - @Override - public Expression visitLeave(ScalarFunction node, List l) { - Expression replacement = replacementMap.get(node); - if (replacement != null) { - return replacement; - } - return super.visitLeave(node, l); + @Override + public Expression visitLeave(ScalarFunction node, List l) { + Expression replacement = replacementMap.get(node); + if (replacement != null) { + return replacement; } + return super.visitLeave(node, l); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/RowKeyExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/RowKeyExpressionVisitor.java index 4b78550789e..ee193f02f84 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/RowKeyExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/RowKeyExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,18 +19,12 @@ import org.apache.phoenix.expression.RowKeyColumnExpression; - - - /** - * - * Implementation of ExpressionVisitor where only a RowKeyColumnExpression (i.e. - * a reference to a column that makes up the row key) is being visited, - * - * + * Implementation of ExpressionVisitor where only a RowKeyColumnExpression (i.e. a reference to a + * column that makes up the row key) is being visited, * @since 0.1 */ public abstract class RowKeyExpressionVisitor extends StatelessTraverseAllExpressionVisitor { - @Override - abstract public Void visit(RowKeyColumnExpression node); + @Override + abstract public Void visit(RowKeyColumnExpression node); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/SingleAggregateFunctionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/SingleAggregateFunctionVisitor.java index 7981a9831e7..f9bf7896c41 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/SingleAggregateFunctionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/SingleAggregateFunctionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,17 +22,12 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.function.SingleAggregateFunction; - - /** - * - * Implementation of ExpressionVisitor where only SingleAggregateFunction - * instances are visited - * - * + * Implementation of ExpressionVisitor where only SingleAggregateFunction instances are visited * @since 0.1 */ -public abstract class SingleAggregateFunctionVisitor extends StatelessTraverseAllExpressionVisitor { - @Override - abstract public Iterator visitEnter(SingleAggregateFunction node); +public abstract class SingleAggregateFunctionVisitor + extends StatelessTraverseAllExpressionVisitor { + @Override + abstract public Iterator visitEnter(SingleAggregateFunction node); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/StatelessTraverseAllExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/StatelessTraverseAllExpressionVisitor.java index f5615be9e02..f63dab6a254 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/StatelessTraverseAllExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/StatelessTraverseAllExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,149 +51,149 @@ public class StatelessTraverseAllExpressionVisitor extends TraverseAllExpressionVisitor { - @Override - public E visitLeave(AndExpression node, List l) { - return null; - } - - @Override - public E visitLeave(OrExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ScalarFunction node, List l) { - return null; - } - - @Override - public E visitLeave(ComparisonExpression node, List l) { - return null; - } - - @Override - public E visitLeave(LikeExpression node, List l) { - return null; - } - - @Override - public E visitLeave(SingleAggregateFunction node, List l) { - return null; - } - - @Override - public E visitLeave(CaseExpression node, List l) { - return null; - } - - @Override - public E visitLeave(NotExpression node, List l) { - return null; - } - - @Override - public E visitLeave(IsNullExpression node, List l) { - return null; - } - - @Override - public E visitLeave(InListExpression node, List l) { - return null; - } - - @Override - public E visit(CorrelateVariableFieldAccessExpression node) { - return null; - } - - @Override - public E visit(LiteralExpression node) { - return null; - } - - @Override - public E visit(RowKeyColumnExpression node) { - return null; - } - - @Override - public E visit(KeyValueColumnExpression node) { - return null; - } - - @Override - public E visit(SingleCellColumnExpression node) { - return null; - } - - @Override - public E visit(ProjectedColumnExpression node) { - return null; - } - - @Override - public E visitLeave(AddExpression node, List l) { - return null; - } - - @Override - public E visitLeave(SubtractExpression node, List l) { - return null; - } - - @Override - public E visitLeave(MultiplyExpression node, List l) { - return null; - } - - @Override - public E visitLeave(DivideExpression node, List l) { - return null; - } - - @Override - public E visitLeave(StringConcatExpression node, List l) { - return null; - } - - @Override - public E visitLeave(RowValueConstructorExpression node, List l) { - return null; - } - - @Override - public E visitLeave(CoerceExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ArrayConstructorExpression node, List l) { - return null; - } - - @Override - public E visitLeave(SingleCellConstructorExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ModulusExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ArrayAnyComparisonExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ArrayElemRefExpression node, List l) { - return null; - } - - @Override - public E visit(SequenceValueExpression node) { - return null; - } + @Override + public E visitLeave(AndExpression node, List l) { + return null; + } + + @Override + public E visitLeave(OrExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ScalarFunction node, List l) { + return null; + } + + @Override + public E visitLeave(ComparisonExpression node, List l) { + return null; + } + + @Override + public E visitLeave(LikeExpression node, List l) { + return null; + } + + @Override + public E visitLeave(SingleAggregateFunction node, List l) { + return null; + } + + @Override + public E visitLeave(CaseExpression node, List l) { + return null; + } + + @Override + public E visitLeave(NotExpression node, List l) { + return null; + } + + @Override + public E visitLeave(IsNullExpression node, List l) { + return null; + } + + @Override + public E visitLeave(InListExpression node, List l) { + return null; + } + + @Override + public E visit(CorrelateVariableFieldAccessExpression node) { + return null; + } + + @Override + public E visit(LiteralExpression node) { + return null; + } + + @Override + public E visit(RowKeyColumnExpression node) { + return null; + } + + @Override + public E visit(KeyValueColumnExpression node) { + return null; + } + + @Override + public E visit(SingleCellColumnExpression node) { + return null; + } + + @Override + public E visit(ProjectedColumnExpression node) { + return null; + } + + @Override + public E visitLeave(AddExpression node, List l) { + return null; + } + + @Override + public E visitLeave(SubtractExpression node, List l) { + return null; + } + + @Override + public E visitLeave(MultiplyExpression node, List l) { + return null; + } + + @Override + public E visitLeave(DivideExpression node, List l) { + return null; + } + + @Override + public E visitLeave(StringConcatExpression node, List l) { + return null; + } + + @Override + public E visitLeave(RowValueConstructorExpression node, List l) { + return null; + } + + @Override + public E visitLeave(CoerceExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ArrayConstructorExpression node, List l) { + return null; + } + + @Override + public E visitLeave(SingleCellConstructorExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ModulusExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ArrayAnyComparisonExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ArrayElemRefExpression node, List l) { + return null; + } + + @Override + public E visit(SequenceValueExpression node) { + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/StatelessTraverseNoExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/StatelessTraverseNoExpressionVisitor.java index 7f447b3d2bd..a82b184653a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/StatelessTraverseNoExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/StatelessTraverseNoExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,149 +51,149 @@ public class StatelessTraverseNoExpressionVisitor extends TraverseNoExpressionVisitor { - @Override - public E visitLeave(AndExpression node, List l) { - return null; - } - - @Override - public E visitLeave(OrExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ScalarFunction node, List l) { - return null; - } - - @Override - public E visitLeave(ComparisonExpression node, List l) { - return null; - } - - @Override - public E visitLeave(LikeExpression node, List l) { - return null; - } - - @Override - public E visitLeave(SingleAggregateFunction node, List l) { - return null; - } - - @Override - public E visitLeave(CaseExpression node, List l) { - return null; - } - - @Override - public E visitLeave(NotExpression node, List l) { - return null; - } - - @Override - public E visitLeave(IsNullExpression node, List l) { - return null; - } - - @Override - public E visitLeave(InListExpression node, List l) { - return null; - } - - @Override - public E visit(CorrelateVariableFieldAccessExpression node) { - return null; - } - - @Override - public E visit(LiteralExpression node) { - return null; - } - - @Override - public E visit(RowKeyColumnExpression node) { - return null; - } - - @Override - public E visit(SingleCellColumnExpression node) { - return null; - } - - @Override - public E visit(KeyValueColumnExpression node) { - return null; - } - - @Override - public E visit(ProjectedColumnExpression node) { - return null; - } - - @Override - public E visitLeave(AddExpression node, List l) { - return null; - } - - @Override - public E visitLeave(SubtractExpression node, List l) { - return null; - } - - @Override - public E visitLeave(MultiplyExpression node, List l) { - return null; - } - - @Override - public E visitLeave(DivideExpression node, List l) { - return null; - } - - @Override - public E visitLeave(StringConcatExpression node, List l) { - return null; - } - - @Override - public E visitLeave(RowValueConstructorExpression node, List l) { - return null; - } - - @Override - public E visitLeave(CoerceExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ArrayConstructorExpression node, List l) { - return null; - } - - @Override - public E visitLeave(SingleCellConstructorExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ModulusExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ArrayAnyComparisonExpression node, List l) { - return null; - } - - @Override - public E visitLeave(ArrayElemRefExpression node, List l) { - return null; - } - - @Override - public E visit(SequenceValueExpression node) { - return null; - } + @Override + public E visitLeave(AndExpression node, List l) { + return null; + } + + @Override + public E visitLeave(OrExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ScalarFunction node, List l) { + return null; + } + + @Override + public E visitLeave(ComparisonExpression node, List l) { + return null; + } + + @Override + public E visitLeave(LikeExpression node, List l) { + return null; + } + + @Override + public E visitLeave(SingleAggregateFunction node, List l) { + return null; + } + + @Override + public E visitLeave(CaseExpression node, List l) { + return null; + } + + @Override + public E visitLeave(NotExpression node, List l) { + return null; + } + + @Override + public E visitLeave(IsNullExpression node, List l) { + return null; + } + + @Override + public E visitLeave(InListExpression node, List l) { + return null; + } + + @Override + public E visit(CorrelateVariableFieldAccessExpression node) { + return null; + } + + @Override + public E visit(LiteralExpression node) { + return null; + } + + @Override + public E visit(RowKeyColumnExpression node) { + return null; + } + + @Override + public E visit(SingleCellColumnExpression node) { + return null; + } + + @Override + public E visit(KeyValueColumnExpression node) { + return null; + } + + @Override + public E visit(ProjectedColumnExpression node) { + return null; + } + + @Override + public E visitLeave(AddExpression node, List l) { + return null; + } + + @Override + public E visitLeave(SubtractExpression node, List l) { + return null; + } + + @Override + public E visitLeave(MultiplyExpression node, List l) { + return null; + } + + @Override + public E visitLeave(DivideExpression node, List l) { + return null; + } + + @Override + public E visitLeave(StringConcatExpression node, List l) { + return null; + } + + @Override + public E visitLeave(RowValueConstructorExpression node, List l) { + return null; + } + + @Override + public E visitLeave(CoerceExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ArrayConstructorExpression node, List l) { + return null; + } + + @Override + public E visitLeave(SingleCellConstructorExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ModulusExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ArrayAnyComparisonExpression node, List l) { + return null; + } + + @Override + public E visitLeave(ArrayElemRefExpression node, List l) { + return null; + } + + @Override + public E visit(SequenceValueExpression node) { + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/TraverseAllExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/TraverseAllExpressionVisitor.java index 126be8d733d..a975f355a7b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/TraverseAllExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/TraverseAllExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,35 +23,32 @@ import org.apache.phoenix.expression.Expression; - - - public abstract class TraverseAllExpressionVisitor extends BaseExpressionVisitor { - @Override - public Iterator defaultIterator(Expression node) { - final List children = node.getChildren(); - return new Iterator() { - private int position; - - @Override - public final boolean hasNext() { - return position < children.size(); - } - - @Override - public final Expression next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - return children.get(position++); - } - - @Override - public final void remove() { - throw new UnsupportedOperationException(); - } - }; - } + @Override + public Iterator defaultIterator(Expression node) { + final List children = node.getChildren(); + return new Iterator() { + private int position; + + @Override + public final boolean hasNext() { + return position < children.size(); + } + + @Override + public final Expression next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return children.get(position++); + } + + @Override + public final void remove() { + throw new UnsupportedOperationException(); + } + }; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/TraverseNoExpressionVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/TraverseNoExpressionVisitor.java index 37a1255f5c6..a5912f31451 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/TraverseNoExpressionVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/visitor/TraverseNoExpressionVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,9 +24,9 @@ public abstract class TraverseNoExpressionVisitor extends BaseExpressionVisitor { - @Override - public Iterator defaultIterator(Expression node) { - return Collections.emptyIterator(); - } + @Override + public Iterator defaultIterator(Expression node) { + return Collections.emptyIterator(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/AllVersionsIndexRebuildFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/AllVersionsIndexRebuildFilter.java index f52c9336793..d618de69f0a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/AllVersionsIndexRebuildFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/AllVersionsIndexRebuildFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,28 +23,27 @@ import org.apache.hadoop.hbase.filter.Filter; /** - * This filter overrides the behavior of delegate so that we do not jump to the next - * column as soon as we find a value for a column but rather include all versions which is - * needed for rebuilds. + * This filter overrides the behavior of delegate so that we do not jump to the next column as soon + * as we find a value for a column but rather include all versions which is needed for rebuilds. */ public class AllVersionsIndexRebuildFilter extends DelegateFilter { - public AllVersionsIndexRebuildFilter(Filter originalFilter) { - super(originalFilter); - } + public AllVersionsIndexRebuildFilter(Filter originalFilter) { + super(originalFilter); + } - @Override - public ReturnCode filterKeyValue(Cell v) throws IOException { - return filterCell(v); - } + @Override + public ReturnCode filterKeyValue(Cell v) throws IOException { + return filterCell(v); + } - @Override - public ReturnCode filterCell(Cell v) throws IOException { - ReturnCode delegateCode = super.filterCell(v); - if (delegateCode == ReturnCode.INCLUDE_AND_NEXT_COL) { - return ReturnCode.INCLUDE; - } else { - return delegateCode; - } + @Override + public ReturnCode filterCell(Cell v) throws IOException { + ReturnCode delegateCode = super.filterCell(v); + if (delegateCode == ReturnCode.INCLUDE_AND_NEXT_COL) { + return ReturnCode.INCLUDE; + } else { + return delegateCode; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java index 87e5c136d52..4e9db4c926a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,104 +32,99 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.ClientUtil; - /** - * - * Base class for filter that evaluates a WHERE clause expression. - * - * Subclass is expected to implement filterRow() method - * + * Base class for filter that evaluates a WHERE clause expression. Subclass is expected to implement + * filterRow() method * @since 0.1 */ abstract public class BooleanExpressionFilter extends FilterBase implements Writable { - protected Expression expression; - private ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); - - public BooleanExpressionFilter() { - } + protected Expression expression; + private ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); - public BooleanExpressionFilter(Expression expression) { - this.expression = expression; - } + public BooleanExpressionFilter() { + } - public Expression getExpression() { - return expression; - } - - @Override - public boolean hasFilterRow() { - return true; - } + public BooleanExpressionFilter(Expression expression) { + this.expression = expression; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + expression.hashCode(); - return result; - } + public Expression getExpression() { + return expression; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - BooleanExpressionFilter other = (BooleanExpressionFilter)obj; - if (!expression.equals(other.expression)) return false; - return true; - } + @Override + public boolean hasFilterRow() { + return true; + } - @Override - public String toString() { - return expression.toString(); - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + expression.hashCode(); + return result; + } - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="NP_BOOLEAN_RETURN_NULL", - justification="Returns null by design.") - protected Boolean evaluate(Tuple input) { - try { - if (!expression.evaluate(input, tempPtr)) { - return null; - } - } catch (IllegalDataException e) { - return Boolean.FALSE; - } - // If the entire Boolean expression evaluated to completion (evaluate returned true), - // but the result was SQL NULL, treat it as Java Boolean FALSE rather than returning null, - // which is used above to indicate incomplete evaluation. - return Boolean.TRUE.equals(expression.getDataType().toObject(tempPtr)); - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + BooleanExpressionFilter other = (BooleanExpressionFilter) obj; + if (!expression.equals(other.expression)) return false; + return true; + } - @Override - public void readFields(DataInput input) throws IOException { - try { - expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); - expression.readFields(input); - expression.reset(); // Initializes expression tree for partial evaluation - } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry - ClientUtil.throwIOException("BooleanExpressionFilter failed during reading", t); - } - } + @Override + public String toString() { + return expression.toString(); + } - @Override - public void write(DataOutput output) throws IOException { - try { - WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); - expression.write(output); - } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry - ClientUtil.throwIOException("BooleanExpressionFilter failed during writing", t); - } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_BOOLEAN_RETURN_NULL", + justification = "Returns null by design.") + protected Boolean evaluate(Tuple input) { + try { + if (!expression.evaluate(input, tempPtr)) { + return null; + } + } catch (IllegalDataException e) { + return Boolean.FALSE; } + // If the entire Boolean expression evaluated to completion (evaluate returned true), + // but the result was SQL NULL, treat it as Java Boolean FALSE rather than returning null, + // which is used above to indicate incomplete evaluation. + return Boolean.TRUE.equals(expression.getDataType().toObject(tempPtr)); + } - @Override - public byte[] toByteArray() throws IOException { - return Writables.getBytes(this); + @Override + public void readFields(DataInput input) throws IOException { + try { + expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); + expression.readFields(input); + expression.reset(); // Initializes expression tree for partial evaluation + } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry + ClientUtil.throwIOException("BooleanExpressionFilter failed during reading", t); } + } - @Override - public void reset() { - expression.reset(); + @Override + public void write(DataOutput output) throws IOException { + try { + WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); + expression.write(output); + } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry + ClientUtil.throwIOException("BooleanExpressionFilter failed during writing", t); } + } + + @Override + public byte[] toByteArray() throws IOException { + return Writables.getBytes(this); + } + + @Override + public void reset() { + expression.reset(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/ColumnProjectionFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/ColumnProjectionFilter.java index b58b9b8a9c7..69c2c076232 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/ColumnProjectionFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/ColumnProjectionFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,8 +28,6 @@ import java.util.TreeMap; import java.util.TreeSet; -import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; -import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -41,170 +39,176 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; +import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.phoenix.util.EncodedColumnsUtil; /** - * When selecting specific columns in a SELECT query, this filter passes only selected columns - * back to client. - * + * When selecting specific columns in a SELECT query, this filter passes only selected columns back + * to client. * @since 3.0 */ public class ColumnProjectionFilter extends FilterBase implements Writable { - private byte[] emptyCFName; - private Map> columnsTracker; - private Set conditionOnlyCfs; - private boolean usesEncodedColumnNames; - private byte[] emptyKVQualifier; - - public ColumnProjectionFilter() { - - } - - public ColumnProjectionFilter(byte[] emptyCFName, - Map> columnsTracker, - Set conditionOnlyCfs, boolean usesEncodedColumnNames) { - this.emptyCFName = emptyCFName; - this.columnsTracker = columnsTracker; - this.conditionOnlyCfs = conditionOnlyCfs; - this.usesEncodedColumnNames = usesEncodedColumnNames; - this.emptyKVQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(usesEncodedColumnNames).getFirst(); - } - - @Override - public void readFields(DataInput input) throws IOException { - this.emptyCFName = WritableUtils.readCompressedByteArray(input); - int familyMapSize = WritableUtils.readVInt(input); - assert familyMapSize > 0; - columnsTracker = new TreeMap>(); - while (familyMapSize > 0) { - byte[] cf = WritableUtils.readCompressedByteArray(input); - int qualifiersSize = WritableUtils.readVInt(input); - NavigableSet qualifiers = null; - if (qualifiersSize > 0) { - qualifiers = new TreeSet(); - while (qualifiersSize > 0) { - qualifiers.add(new ImmutableBytesPtr(WritableUtils.readCompressedByteArray(input))); - qualifiersSize--; - } - } - columnsTracker.put(new ImmutableBytesPtr(cf), qualifiers); - familyMapSize--; - } - int conditionOnlyCfsSize = WritableUtils.readVInt(input); - usesEncodedColumnNames = conditionOnlyCfsSize > 0; - emptyKVQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(usesEncodedColumnNames).getFirst(); - conditionOnlyCfsSize = Math.abs(conditionOnlyCfsSize) - 1; // restore to the actual value. - this.conditionOnlyCfs = new TreeSet(Bytes.BYTES_COMPARATOR); - while (conditionOnlyCfsSize > 0) { - this.conditionOnlyCfs.add(WritableUtils.readCompressedByteArray(input)); - conditionOnlyCfsSize--; - } - } - - @Override - public void write(DataOutput output) throws IOException { - WritableUtils.writeCompressedByteArray(output, this.emptyCFName); - WritableUtils.writeVInt(output, this.columnsTracker.size()); - for (Entry> entry : this.columnsTracker.entrySet()) { - // write family name - WritableUtils.writeCompressedByteArray(output, entry.getKey().copyBytes()); - int qaulsSize = entry.getValue() == null ? 0 : entry.getValue().size(); - WritableUtils.writeVInt(output, qaulsSize); - if (qaulsSize > 0) { - for (ImmutableBytesPtr cq : entry.getValue()) { - // write qualifier name - WritableUtils.writeCompressedByteArray(output, cq.copyBytes()); - } - } - } - // Encode usesEncodedColumnNames in conditionOnlyCfs size. - WritableUtils.writeVInt(output, (this.conditionOnlyCfs.size() + 1) * (usesEncodedColumnNames ? 1 : -1)); - for (byte[] f : this.conditionOnlyCfs) { - WritableUtils.writeCompressedByteArray(output, f); + private byte[] emptyCFName; + private Map> columnsTracker; + private Set conditionOnlyCfs; + private boolean usesEncodedColumnNames; + private byte[] emptyKVQualifier; + + public ColumnProjectionFilter() { + + } + + public ColumnProjectionFilter(byte[] emptyCFName, + Map> columnsTracker, + Set conditionOnlyCfs, boolean usesEncodedColumnNames) { + this.emptyCFName = emptyCFName; + this.columnsTracker = columnsTracker; + this.conditionOnlyCfs = conditionOnlyCfs; + this.usesEncodedColumnNames = usesEncodedColumnNames; + this.emptyKVQualifier = + EncodedColumnsUtil.getEmptyKeyValueInfo(usesEncodedColumnNames).getFirst(); + } + + @Override + public void readFields(DataInput input) throws IOException { + this.emptyCFName = WritableUtils.readCompressedByteArray(input); + int familyMapSize = WritableUtils.readVInt(input); + assert familyMapSize > 0; + columnsTracker = new TreeMap>(); + while (familyMapSize > 0) { + byte[] cf = WritableUtils.readCompressedByteArray(input); + int qualifiersSize = WritableUtils.readVInt(input); + NavigableSet qualifiers = null; + if (qualifiersSize > 0) { + qualifiers = new TreeSet(); + while (qualifiersSize > 0) { + qualifiers.add(new ImmutableBytesPtr(WritableUtils.readCompressedByteArray(input))); + qualifiersSize--; } - -} - - @Override - public byte[] toByteArray() throws IOException { - return Writables.getBytes(this); + } + columnsTracker.put(new ImmutableBytesPtr(cf), qualifiers); + familyMapSize--; } - - public static ColumnProjectionFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (ColumnProjectionFilter)Writables.getWritable(pbBytes, new ColumnProjectionFilter()); - } catch (IOException e) { - throw new DeserializationException(e); - } + int conditionOnlyCfsSize = WritableUtils.readVInt(input); + usesEncodedColumnNames = conditionOnlyCfsSize > 0; + emptyKVQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(usesEncodedColumnNames).getFirst(); + conditionOnlyCfsSize = Math.abs(conditionOnlyCfsSize) - 1; // restore to the actual value. + this.conditionOnlyCfs = new TreeSet(Bytes.BYTES_COMPARATOR); + while (conditionOnlyCfsSize > 0) { + this.conditionOnlyCfs.add(WritableUtils.readCompressedByteArray(input)); + conditionOnlyCfsSize--; } - - // "ptr" to be used for one time comparisons in filterRowCells - private ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - @Override - public void filterRowCells(List kvs) throws IOException { - if (kvs.isEmpty()) return; - Cell firstKV = kvs.get(0); - Iterables.removeIf(kvs, new Predicate() { - @Override - public boolean apply(Cell kv) { - ptr.set(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()); - if (columnsTracker.containsKey(ptr)) { - Set cols = columnsTracker.get(ptr); - ptr.set(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); - if (cols != null && !(cols.contains(ptr))) { - return true; - } - } else { - return true; - } - return false; - } - }); - // make sure we're not holding to any of the byte[]'s - ptr.set(HConstants.EMPTY_BYTE_ARRAY); - if (kvs.isEmpty()) { - kvs.add(new KeyValue(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), - this.emptyCFName, 0, this.emptyCFName.length, emptyKVQualifier, 0, - emptyKVQualifier.length, HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0)); + } + + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeCompressedByteArray(output, this.emptyCFName); + WritableUtils.writeVInt(output, this.columnsTracker.size()); + for (Entry> entry : this.columnsTracker + .entrySet()) { + // write family name + WritableUtils.writeCompressedByteArray(output, entry.getKey().copyBytes()); + int qaulsSize = entry.getValue() == null ? 0 : entry.getValue().size(); + WritableUtils.writeVInt(output, qaulsSize); + if (qaulsSize > 0) { + for (ImmutableBytesPtr cq : entry.getValue()) { + // write qualifier name + WritableUtils.writeCompressedByteArray(output, cq.copyBytes()); } + } } - - @Override - public boolean hasFilterRow() { - return true; - } - - @Override - public boolean isFamilyEssential(byte[] name) { - return conditionOnlyCfs.isEmpty() || this.conditionOnlyCfs.contains(name); + // Encode usesEncodedColumnNames in conditionOnlyCfs size. + WritableUtils.writeVInt(output, + (this.conditionOnlyCfs.size() + 1) * (usesEncodedColumnNames ? 1 : -1)); + for (byte[] f : this.conditionOnlyCfs) { + WritableUtils.writeCompressedByteArray(output, f); } - @Override - public String toString() { - return ""; - } + } - @Override - public ReturnCode filterKeyValue(Cell ignored) throws IOException { - return filterCell(ignored); - } + @Override + public byte[] toByteArray() throws IOException { + return Writables.getBytes(this); + } - @Override - public ReturnCode filterCell(Cell ignored) throws IOException { - return ReturnCode.INCLUDE_AND_NEXT_COL; + public static ColumnProjectionFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + return (ColumnProjectionFilter) Writables.getWritable(pbBytes, new ColumnProjectionFilter()); + } catch (IOException e) { + throw new DeserializationException(e); } - - public void addTrackedColumn(ImmutableBytesPtr cf, ImmutableBytesPtr cq) { - NavigableSet columns = columnsTracker.get(cf); - - if (columns == null) { - if (columnsTracker.containsKey(cf)) { - return; - } - columns = new TreeSet<>(); - columnsTracker.put(cf, columns); + } + + // "ptr" to be used for one time comparisons in filterRowCells + private ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + + @Override + public void filterRowCells(List kvs) throws IOException { + if (kvs.isEmpty()) return; + Cell firstKV = kvs.get(0); + Iterables.removeIf(kvs, new Predicate() { + @Override + public boolean apply(Cell kv) { + ptr.set(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()); + if (columnsTracker.containsKey(ptr)) { + Set cols = columnsTracker.get(ptr); + ptr.set(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); + if (cols != null && !(cols.contains(ptr))) { + return true; + } + } else { + return true; } - columns.add(cq); + return false; + } + }); + // make sure we're not holding to any of the byte[]'s + ptr.set(HConstants.EMPTY_BYTE_ARRAY); + if (kvs.isEmpty()) { + kvs.add(new KeyValue(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), + this.emptyCFName, 0, this.emptyCFName.length, emptyKVQualifier, 0, emptyKVQualifier.length, + HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0)); + } + } + + @Override + public boolean hasFilterRow() { + return true; + } + + @Override + public boolean isFamilyEssential(byte[] name) { + return conditionOnlyCfs.isEmpty() || this.conditionOnlyCfs.contains(name); + } + + @Override + public String toString() { + return ""; + } + + @Override + public ReturnCode filterKeyValue(Cell ignored) throws IOException { + return filterCell(ignored); + } + + @Override + public ReturnCode filterCell(Cell ignored) throws IOException { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + + public void addTrackedColumn(ImmutableBytesPtr cf, ImmutableBytesPtr cq) { + NavigableSet columns = columnsTracker.get(cf); + + if (columns == null) { + if (columnsTracker.containsKey(cf)) { + return; + } + columns = new TreeSet<>(); + columnsTracker.put(cf, columns); } -} \ No newline at end of file + columns.add(cq); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/DelegateFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/DelegateFilter.java index 1f732419720..fcfb7bd96a8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/DelegateFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/DelegateFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,95 +21,94 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; public class DelegateFilter extends FilterBase { - protected Filter delegate = null; - - public DelegateFilter(Filter delegate) { - this.delegate = delegate; - } - - @Override - public void reset() throws IOException { - delegate.reset(); - } - - @Override - public boolean filterRowKey(byte[] buffer, int offset, int length) throws IOException { - return delegate.filterRowKey(buffer, offset, length); - } - - @Override - public boolean filterRowKey(Cell cell) throws IOException { - return delegate.filterRowKey(cell); - } - - @Override - public ReturnCode filterCell(Cell v) throws IOException { - return delegate.filterCell(v); - } - - @Override - public boolean filterAllRemaining() throws IOException { - return delegate.filterAllRemaining(); - } - - @Override - public ReturnCode filterKeyValue(Cell v) throws IOException { - return delegate.filterKeyValue(v); - } - - @Override - public Cell transformCell(Cell v) throws IOException { - return delegate.transformCell(v); - } - - @Override - public void filterRowCells(List kvs) throws IOException { - delegate.filterRowCells(kvs); - } - - @Override - public boolean hasFilterRow() { - return delegate.hasFilterRow(); - } - - @Override - public boolean filterRow() throws IOException { - return delegate.filterRow(); - } - - @Override - public Cell getNextCellHint(Cell currentKV) throws IOException { - return delegate.getNextCellHint(currentKV); - } - - @Override - public boolean isFamilyEssential(byte[] name) throws IOException { - return delegate.isFamilyEssential(name); - } - - @Override - public byte[] toByteArray() throws IOException { - return delegate.toByteArray(); - } - - @Override - public String toString() { - return delegate.toString(); - } - - @Override - public void setReversed(boolean reversed) { - delegate.setReversed(reversed); - } - - @Override - public boolean isReversed() { - return delegate.isReversed(); - } + protected Filter delegate = null; + + public DelegateFilter(Filter delegate) { + this.delegate = delegate; + } + + @Override + public void reset() throws IOException { + delegate.reset(); + } + + @Override + public boolean filterRowKey(byte[] buffer, int offset, int length) throws IOException { + return delegate.filterRowKey(buffer, offset, length); + } + + @Override + public boolean filterRowKey(Cell cell) throws IOException { + return delegate.filterRowKey(cell); + } + + @Override + public ReturnCode filterCell(Cell v) throws IOException { + return delegate.filterCell(v); + } + + @Override + public boolean filterAllRemaining() throws IOException { + return delegate.filterAllRemaining(); + } + + @Override + public ReturnCode filterKeyValue(Cell v) throws IOException { + return delegate.filterKeyValue(v); + } + + @Override + public Cell transformCell(Cell v) throws IOException { + return delegate.transformCell(v); + } + + @Override + public void filterRowCells(List kvs) throws IOException { + delegate.filterRowCells(kvs); + } + + @Override + public boolean hasFilterRow() { + return delegate.hasFilterRow(); + } + + @Override + public boolean filterRow() throws IOException { + return delegate.filterRow(); + } + + @Override + public Cell getNextCellHint(Cell currentKV) throws IOException { + return delegate.getNextCellHint(currentKV); + } + + @Override + public boolean isFamilyEssential(byte[] name) throws IOException { + return delegate.isFamilyEssential(name); + } + + @Override + public byte[] toByteArray() throws IOException { + return delegate.toByteArray(); + } + + @Override + public String toString() { + return delegate.toString(); + } + + @Override + public void setReversed(boolean reversed) { + delegate.setReversed(reversed); + } + + @Override + public boolean isReversed() { + return delegate.isReversed(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java index e3e78d4c230..59b88c08153 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,139 +36,143 @@ import org.apache.phoenix.util.ByteUtil; public class DistinctPrefixFilter extends FilterBase implements Writable { - private static byte VERSION = 1; - - private int offset; - private RowKeySchema schema; - private int prefixLength; - private boolean filterAll = false; - private int lastPosition; - private final ImmutableBytesWritable lastKey = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY, -1, -1); - - public DistinctPrefixFilter() { - } - - public DistinctPrefixFilter(RowKeySchema schema, int prefixLength) { - this.schema = schema; - this.prefixLength = prefixLength; - } - - public void setOffset(int offset) { - this.offset = offset; + private static byte VERSION = 1; + + private int offset; + private RowKeySchema schema; + private int prefixLength; + private boolean filterAll = false; + private int lastPosition; + private final ImmutableBytesWritable lastKey = + new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY, -1, -1); + + public DistinctPrefixFilter() { + } + + public DistinctPrefixFilter(RowKeySchema schema, int prefixLength) { + this.schema = schema; + this.prefixLength = prefixLength; + } + + public void setOffset(int offset) { + this.offset = offset; + } + + @Override + public ReturnCode filterKeyValue(Cell v) throws IOException { + return filterCell(v); + } + + @Override + public ReturnCode filterCell(Cell v) throws IOException { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + + // First determine the prefix based on the schema + int maxOffset = + schema.iterator(v.getRowArray(), v.getRowOffset() + offset, v.getRowLength() - offset, ptr); + int position = schema.next(ptr, 0, maxOffset, prefixLength - 1); + + // now check whether we have seen this prefix before + if ( + lastKey.getLength() != ptr.getLength() || !Bytes.equals(ptr.get(), ptr.getOffset(), + ptr.getLength(), lastKey.get(), lastKey.getOffset(), ptr.getLength()) + ) { + // if we haven't seen this prefix, include the row and remember this prefix + lastKey.set(ptr.get(), ptr.getOffset(), ptr.getLength()); + lastPosition = position - 1; + return ReturnCode.INCLUDE; } - - @Override - public ReturnCode filterKeyValue(Cell v) throws IOException { - return filterCell(v); - } - - @Override - public ReturnCode filterCell(Cell v) throws IOException { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - // First determine the prefix based on the schema - int maxOffset = schema.iterator(v.getRowArray(), v.getRowOffset()+offset, v.getRowLength()-offset, ptr); - int position = schema.next(ptr, 0, maxOffset, prefixLength - 1); - - // now check whether we have seen this prefix before - if (lastKey.getLength() != ptr.getLength() || !Bytes.equals(ptr.get(), ptr.getOffset(), - ptr.getLength(), lastKey.get(), lastKey.getOffset(), ptr.getLength())) { - // if we haven't seen this prefix, include the row and remember this prefix - lastKey.set(ptr.get(), ptr.getOffset(), ptr.getLength()); - lastPosition = position - 1; - return ReturnCode.INCLUDE; + // we've seen this prefix already, seek to the next + return ReturnCode.SEEK_NEXT_USING_HINT; + } + + @Override + public Cell getNextCellHint(Cell v) throws IOException { + Field field = schema.getField(prefixLength - 1); + PDataType type = field.getDataType(); + + ImmutableBytesWritable tmp; + // In the following we make sure we copy the key at most once + // Either because we have an offset, or when needed for nextKey + if (offset > 0) { + // make space to copy the missing offset, also 0-pad here if needed + // (since we're making a copy anyway) + // We need to pad all null columns, otherwise we'll potentially + // skip rows. + byte[] tmpKey = new byte[offset + lastKey.getLength() + + (reversed || type.isFixedWidth() || field.getSortOrder() == SortOrder.DESC ? 0 : 1) + + (prefixLength - 1 - lastPosition)]; + System.arraycopy(v.getRowArray(), v.getRowOffset(), tmpKey, 0, offset); + System.arraycopy(lastKey.get(), lastKey.getOffset(), tmpKey, offset, lastKey.getLength()); + tmp = new ImmutableBytesWritable(tmpKey); + if (!reversed) { + // calculate the next key, the above already 0-padded if needed + if (!ByteUtil.nextKey(tmp.get(), tmp.getOffset(), tmp.getLength())) { + filterAll = true; } - // we've seen this prefix already, seek to the next - return ReturnCode.SEEK_NEXT_USING_HINT; - } - - @Override - public Cell getNextCellHint(Cell v) throws IOException { - Field field = schema.getField(prefixLength - 1); - PDataType type = field.getDataType(); - - ImmutableBytesWritable tmp; - // In the following we make sure we copy the key at most once - // Either because we have an offset, or when needed for nextKey - if (offset > 0) { - // make space to copy the missing offset, also 0-pad here if needed - // (since we're making a copy anyway) - // We need to pad all null columns, otherwise we'll potentially - // skip rows. - byte[] tmpKey = new byte[offset + lastKey.getLength() + - (reversed || type.isFixedWidth() || field.getSortOrder() == SortOrder.DESC ? 0 : 1) + (prefixLength - 1 - lastPosition)]; - System.arraycopy(v.getRowArray(), v.getRowOffset(), tmpKey, 0, offset); - System.arraycopy(lastKey.get(), lastKey.getOffset(), tmpKey, offset, lastKey.getLength()); - tmp = new ImmutableBytesWritable(tmpKey); - if (!reversed) { - // calculate the next key, the above already 0-padded if needed - if (!ByteUtil.nextKey(tmp.get(), tmp.getOffset(), tmp.getLength())) { - filterAll = true; - } - } + } + } else { + if (reversed) { + // simply seek right before the first occurrence of the row + tmp = lastKey; + } else { + if (type.isFixedWidth()) { + // copy the bytes, since nextKey will modify in place + tmp = new ImmutableBytesWritable(lastKey.copyBytes()); } else { - if (reversed) { - // simply seek right before the first occurrence of the row - tmp = lastKey; - } else { - if (type.isFixedWidth()) { - // copy the bytes, since nextKey will modify in place - tmp = new ImmutableBytesWritable(lastKey.copyBytes()); - } else { - // pad with a 0x00 byte (makes a copy) - tmp = new ImmutableBytesWritable(lastKey); - ByteUtil.nullPad(tmp, tmp.getLength() + prefixLength - lastPosition); - // Trim back length if: - // 1) field is descending since the separator byte if 0xFF - // 2) last key has trailing null - // Otherwise, in both cases we'd potentially be seeking to a row before - // our current key. - if (field.getSortOrder() == SortOrder.DESC || prefixLength - lastPosition > 1) { - tmp.set(tmp.get(),tmp.getOffset(),tmp.getLength()-1); - } - } - // calculate the next key - if (!ByteUtil.nextKey(tmp.get(), tmp.getOffset(), tmp.getLength())) { - filterAll = true; - } - } + // pad with a 0x00 byte (makes a copy) + tmp = new ImmutableBytesWritable(lastKey); + ByteUtil.nullPad(tmp, tmp.getLength() + prefixLength - lastPosition); + // Trim back length if: + // 1) field is descending since the separator byte if 0xFF + // 2) last key has trailing null + // Otherwise, in both cases we'd potentially be seeking to a row before + // our current key. + if (field.getSortOrder() == SortOrder.DESC || prefixLength - lastPosition > 1) { + tmp.set(tmp.get(), tmp.getOffset(), tmp.getLength() - 1); + } } - return KeyValueUtil.createFirstOnRow(tmp.get(), tmp.getOffset(), tmp.getLength(), null, 0, 0, - null, 0, 0); - } - - @Override - public boolean filterAllRemaining() throws IOException { - return filterAll; - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeByte(VERSION); - schema.write(out); - out.writeInt(prefixLength); - } - - @Override - public void readFields(DataInput in) throws IOException { - in.readByte(); // ignore - schema = new RowKeySchema(); - schema.readFields(in); - prefixLength = in.readInt(); - } - - @Override - public byte[] toByteArray() throws IOException { - return Writables.getBytes(this); - } - - public static DistinctPrefixFilter parseFrom(final byte[] pbBytes) - throws DeserializationException { - try { - return (DistinctPrefixFilter) Writables.getWritable(pbBytes, - new DistinctPrefixFilter()); - } catch (IOException e) { - throw new DeserializationException(e); + // calculate the next key + if (!ByteUtil.nextKey(tmp.get(), tmp.getOffset(), tmp.getLength())) { + filterAll = true; } + } + } + return KeyValueUtil.createFirstOnRow(tmp.get(), tmp.getOffset(), tmp.getLength(), null, 0, 0, + null, 0, 0); + } + + @Override + public boolean filterAllRemaining() throws IOException { + return filterAll; + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeByte(VERSION); + schema.write(out); + out.writeInt(prefixLength); + } + + @Override + public void readFields(DataInput in) throws IOException { + in.readByte(); // ignore + schema = new RowKeySchema(); + schema.readFields(in); + prefixLength = in.readInt(); + } + + @Override + public byte[] toByteArray() throws IOException { + return Writables.getBytes(this); + } + + public static DistinctPrefixFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + return (DistinctPrefixFilter) Writables.getWritable(pbBytes, new DistinctPrefixFilter()); + } catch (IOException e) { + throw new DeserializationException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/EmptyColumnOnlyFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/EmptyColumnOnlyFilter.java index b31ed263564..1a68e8db29e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/EmptyColumnOnlyFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/EmptyColumnOnlyFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.util.Iterator; import java.util.List; import org.apache.hadoop.hbase.Cell; @@ -32,87 +31,89 @@ import org.apache.phoenix.util.ScanUtil; /** - * This filter returns only the empty column cell if it exists. If an empty column cell - * does not exist, then it returns the first cell, that is, behaves like FirstKeyOnlyFilter + * This filter returns only the empty column cell if it exists. If an empty column cell does not + * exist, then it returns the first cell, that is, behaves like FirstKeyOnlyFilter */ public class EmptyColumnOnlyFilter extends FilterBase implements Writable { - private byte[] emptyCF; - private byte[] emptyCQ; - private boolean found = false; - private boolean first = true; + private byte[] emptyCF; + private byte[] emptyCQ; + private boolean found = false; + private boolean first = true; - public EmptyColumnOnlyFilter() {} - public EmptyColumnOnlyFilter(byte[] emptyCF, byte[] emptyCQ) { - Preconditions.checkArgument(emptyCF != null, - "Column family must not be null"); - Preconditions.checkArgument(emptyCQ != null, - "Column qualifier must not be null"); - this.emptyCF = emptyCF; - this.emptyCQ = emptyCQ; - } + public EmptyColumnOnlyFilter() { + } + + public EmptyColumnOnlyFilter(byte[] emptyCF, byte[] emptyCQ) { + Preconditions.checkArgument(emptyCF != null, "Column family must not be null"); + Preconditions.checkArgument(emptyCQ != null, "Column qualifier must not be null"); + this.emptyCF = emptyCF; + this.emptyCQ = emptyCQ; + } + + @Override + public void reset() throws IOException { + found = false; + first = true; + } + + @Deprecated + @Override + public ReturnCode filterKeyValue(final Cell c) throws IOException { + return filterCell(c); + } - @Override - public void reset() throws IOException { - found = false; - first = true; + @Override + public ReturnCode filterCell(final Cell cell) throws IOException { + if (found) { + return ReturnCode.NEXT_ROW; } - @Deprecated - @Override - public ReturnCode filterKeyValue(final Cell c) throws IOException { - return filterCell(c); + if (ScanUtil.isEmptyColumn(cell, emptyCF, emptyCQ)) { + found = true; + return ReturnCode.INCLUDE; } - - @Override - public ReturnCode filterCell(final Cell cell) throws IOException { - if (found) { - return ReturnCode.NEXT_ROW; - } - if (ScanUtil.isEmptyColumn(cell, emptyCF, emptyCQ)) { - found = true; - return ReturnCode.INCLUDE; - } - if (first) { - first = false; - return ReturnCode.INCLUDE; - } - return ReturnCode.NEXT_COL; + if (first) { + first = false; + return ReturnCode.INCLUDE; } + return ReturnCode.NEXT_COL; + } - @Override - public void filterRowCells(List kvs) throws IOException { - if (kvs.size() > 1) { - kvs.remove(0); - } + @Override + public void filterRowCells(List kvs) throws IOException { + if (kvs.size() > 1) { + kvs.remove(0); } + } - public static EmptyColumnOnlyFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (EmptyColumnOnlyFilter) Writables.getWritable(pbBytes, new EmptyColumnOnlyFilter()); - } catch (IOException e) { - throw new DeserializationException(e); - } + public static EmptyColumnOnlyFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + return (EmptyColumnOnlyFilter) Writables.getWritable(pbBytes, new EmptyColumnOnlyFilter()); + } catch (IOException e) { + throw new DeserializationException(e); } + } - @Override - public void write(DataOutput out) throws IOException { - out.writeInt(emptyCF.length); - out.write(emptyCF); - out.writeInt(emptyCQ.length); - out.write(emptyCQ); - } + @Override + public void write(DataOutput out) throws IOException { + out.writeInt(emptyCF.length); + out.write(emptyCF); + out.writeInt(emptyCQ.length); + out.write(emptyCQ); + } - @Override - public void readFields(DataInput in) throws IOException { - int length = in.readInt(); - emptyCF = new byte[length]; - in.readFully(emptyCF, 0, length); - length = in.readInt(); - emptyCQ = new byte[length]; - in.readFully(emptyCQ, 0, length); - } + @Override + public void readFields(DataInput in) throws IOException { + int length = in.readInt(); + emptyCF = new byte[length]; + in.readFully(emptyCF, 0, length); + length = in.readInt(); + emptyCQ = new byte[length]; + in.readFully(emptyCQ, 0, length); + } - @Override - public byte[] toByteArray() throws IOException { - return Writables.getBytes(this); - } -} \ No newline at end of file + @Override + public byte[] toByteArray() throws IOException { + return Writables.getBytes(this); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/EncodedQualifiersColumnProjectionFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/EncodedQualifiersColumnProjectionFilter.java index ef1d161dea5..bc3e6fb3a47 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/EncodedQualifiersColumnProjectionFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/EncodedQualifiersColumnProjectionFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,9 @@ */ package org.apache.phoenix.filter; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.phoenix.query.QueryConstants.ENCODED_EMPTY_COLUMN_BYTES; import static org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import java.io.DataInput; import java.io.DataOutput; @@ -40,129 +40,134 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.phoenix.schema.PTable.QualifierEncodingScheme; - import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; public class EncodedQualifiersColumnProjectionFilter extends FilterBase implements Writable { - private byte[] emptyCFName; - private BitSet trackedColumns; - private QualifierEncodingScheme encodingScheme; - private Set conditionOnlyCfs; - - public EncodedQualifiersColumnProjectionFilter() {} - - public EncodedQualifiersColumnProjectionFilter(byte[] emptyCFName, BitSet trackedColumns, Set conditionCfs, QualifierEncodingScheme encodingScheme) { - checkArgument(encodingScheme != NON_ENCODED_QUALIFIERS, "Filter can only be used for encoded qualifiers"); - this.emptyCFName = emptyCFName; - this.trackedColumns = trackedColumns; - this.encodingScheme = encodingScheme; - this.conditionOnlyCfs = conditionCfs; + private byte[] emptyCFName; + private BitSet trackedColumns; + private QualifierEncodingScheme encodingScheme; + private Set conditionOnlyCfs; + + public EncodedQualifiersColumnProjectionFilter() { + } + + public EncodedQualifiersColumnProjectionFilter(byte[] emptyCFName, BitSet trackedColumns, + Set conditionCfs, QualifierEncodingScheme encodingScheme) { + checkArgument(encodingScheme != NON_ENCODED_QUALIFIERS, + "Filter can only be used for encoded qualifiers"); + this.emptyCFName = emptyCFName; + this.trackedColumns = trackedColumns; + this.encodingScheme = encodingScheme; + this.conditionOnlyCfs = conditionCfs; + } + + @Override + public void readFields(DataInput input) throws IOException { + this.emptyCFName = WritableUtils.readCompressedByteArray(input); + int bitsetLongArraySize = WritableUtils.readVInt(input); + long[] bitsetLongArray = new long[bitsetLongArraySize]; + for (int i = 0; i < bitsetLongArraySize; i++) { + bitsetLongArray[i] = WritableUtils.readVLong(input); } - - @Override - public void readFields(DataInput input) throws IOException { - this.emptyCFName = WritableUtils.readCompressedByteArray(input); - int bitsetLongArraySize = WritableUtils.readVInt(input); - long[] bitsetLongArray = new long[bitsetLongArraySize]; - for (int i = 0; i < bitsetLongArraySize; i++) { - bitsetLongArray[i] = WritableUtils.readVLong(input); - } - this.trackedColumns = BitSet.valueOf(bitsetLongArray); - this.encodingScheme = QualifierEncodingScheme.values()[WritableUtils.readVInt(input)]; - int conditionOnlyCfsSize = WritableUtils.readVInt(input); - this.conditionOnlyCfs = new TreeSet(Bytes.BYTES_COMPARATOR); - while (conditionOnlyCfsSize > 0) { - this.conditionOnlyCfs.add(WritableUtils.readCompressedByteArray(input)); - conditionOnlyCfsSize--; - } + this.trackedColumns = BitSet.valueOf(bitsetLongArray); + this.encodingScheme = QualifierEncodingScheme.values()[WritableUtils.readVInt(input)]; + int conditionOnlyCfsSize = WritableUtils.readVInt(input); + this.conditionOnlyCfs = new TreeSet(Bytes.BYTES_COMPARATOR); + while (conditionOnlyCfsSize > 0) { + this.conditionOnlyCfs.add(WritableUtils.readCompressedByteArray(input)); + conditionOnlyCfsSize--; } - - @Override - public void write(DataOutput output) throws IOException { - WritableUtils.writeCompressedByteArray(output, this.emptyCFName); - long[] longArrayOfBitSet = trackedColumns.toLongArray(); - WritableUtils.writeVInt(output, longArrayOfBitSet.length); - for (Long l : longArrayOfBitSet) { - WritableUtils.writeVLong(output, l); - } - WritableUtils.writeVInt(output, encodingScheme.ordinal()); - WritableUtils.writeVInt(output, this.conditionOnlyCfs.size()); - for (byte[] f : this.conditionOnlyCfs) { - WritableUtils.writeCompressedByteArray(output, f); - } + } + + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeCompressedByteArray(output, this.emptyCFName); + long[] longArrayOfBitSet = trackedColumns.toLongArray(); + WritableUtils.writeVInt(output, longArrayOfBitSet.length); + for (Long l : longArrayOfBitSet) { + WritableUtils.writeVLong(output, l); } - - @Override - public byte[] toByteArray() throws IOException { - return Writables.getBytes(this); + WritableUtils.writeVInt(output, encodingScheme.ordinal()); + WritableUtils.writeVInt(output, this.conditionOnlyCfs.size()); + for (byte[] f : this.conditionOnlyCfs) { + WritableUtils.writeCompressedByteArray(output, f); } - - public static EncodedQualifiersColumnProjectionFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (EncodedQualifiersColumnProjectionFilter)Writables.getWritable(pbBytes, new EncodedQualifiersColumnProjectionFilter()); - } catch (IOException e) { - throw new DeserializationException(e); - } + } + + @Override + public byte[] toByteArray() throws IOException { + return Writables.getBytes(this); + } + + public static EncodedQualifiersColumnProjectionFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + return (EncodedQualifiersColumnProjectionFilter) Writables.getWritable(pbBytes, + new EncodedQualifiersColumnProjectionFilter()); + } catch (IOException e) { + throw new DeserializationException(e); } - - @Override - public void filterRowCells(List kvs) throws IOException { - if (kvs.isEmpty()) return; - Cell firstKV = kvs.get(0); - Iterables.removeIf(kvs, new Predicate() { - @Override - public boolean apply(Cell kv) { - int qualifier = encodingScheme.decode(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); - return !trackedColumns.get(qualifier); - } - }); - if (kvs.isEmpty()) { - kvs.add(new KeyValue(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), - this.emptyCFName, 0, this.emptyCFName.length, ENCODED_EMPTY_COLUMN_BYTES, 0, - ENCODED_EMPTY_COLUMN_BYTES.length, HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0)); - } + } + + @Override + public void filterRowCells(List kvs) throws IOException { + if (kvs.isEmpty()) return; + Cell firstKV = kvs.get(0); + Iterables.removeIf(kvs, new Predicate() { + @Override + public boolean apply(Cell kv) { + int qualifier = encodingScheme.decode(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength()); + return !trackedColumns.get(qualifier); + } + }); + if (kvs.isEmpty()) { + kvs.add(new KeyValue(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), + this.emptyCFName, 0, this.emptyCFName.length, ENCODED_EMPTY_COLUMN_BYTES, 0, + ENCODED_EMPTY_COLUMN_BYTES.length, HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0)); } - - @Override - public boolean hasFilterRow() { - return true; + } + + @Override + public boolean hasFilterRow() { + return true; + } + + @Override + public boolean isFamilyEssential(byte[] name) { + return conditionOnlyCfs.isEmpty() || this.conditionOnlyCfs.contains(name); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(100); + sb.append(String.format("EmptyCFName: %s, ", Bytes.toStringBinary(this.emptyCFName))); + sb.append(String.format("EncodingScheme: %s, ", this.encodingScheme)); + sb.append(String.format("TrackedColumns: %s, ", this.trackedColumns)); + sb.append("ConditionOnlyCfs: "); + for (byte[] conditionOnlyCf : this.conditionOnlyCfs) { + sb.append(String.format("%s, ", Bytes.toStringBinary(conditionOnlyCf))); } + return sb.toString(); + } - @Override - public boolean isFamilyEssential(byte[] name) { - return conditionOnlyCfs.isEmpty() || this.conditionOnlyCfs.contains(name); - } + @Override + public ReturnCode filterKeyValue(Cell ignored) throws IOException { + return filterCell(ignored); + } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(100); - sb.append(String.format("EmptyCFName: %s, ", Bytes.toStringBinary(this.emptyCFName))); - sb.append(String.format("EncodingScheme: %s, ", this.encodingScheme)); - sb.append(String.format("TrackedColumns: %s, ", this.trackedColumns)); - sb.append("ConditionOnlyCfs: "); - for (byte[] conditionOnlyCf : this.conditionOnlyCfs) { - sb.append(String.format("%s, ", Bytes.toStringBinary(conditionOnlyCf))); - } - return sb.toString(); - } + @Override + public ReturnCode filterCell(Cell ignored) throws IOException { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } - @Override - public ReturnCode filterKeyValue(Cell ignored) throws IOException { - return filterCell(ignored); - } + public void addTrackedColumn(int qualifier) { + trackedColumns.set(qualifier); + } - @Override - public ReturnCode filterCell(Cell ignored) throws IOException { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } + interface ColumnTracker { - public void addTrackedColumn(int qualifier) { - trackedColumns.set(qualifier); - } - - interface ColumnTracker { - - } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java index 222e7cc159e..e01613851b2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiCFCQKeyValueComparisonFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,102 +24,110 @@ import org.apache.hadoop.hbase.util.Writables; import org.apache.phoenix.expression.Expression; - /** - * - * Filter that evaluates WHERE clause expression, used in the case where there - * are references to multiple column qualifiers over multiple column families. - * Also there same qualifier names in different families. - * + * Filter that evaluates WHERE clause expression, used in the case where there are references to + * multiple column qualifiers over multiple column families. Also there same qualifier names in + * different families. */ public class MultiCFCQKeyValueComparisonFilter extends MultiKeyValueComparisonFilter { - private final ImmutablePairBytesPtr ptr = new ImmutablePairBytesPtr(); - - public MultiCFCQKeyValueComparisonFilter() { + private final ImmutablePairBytesPtr ptr = new ImmutablePairBytesPtr(); + + public MultiCFCQKeyValueComparisonFilter() { + } + + public MultiCFCQKeyValueComparisonFilter(Expression expression, boolean allCFs, + byte[] essentialCF) { + super(expression, allCFs, essentialCF); + } + + @Override + protected Object setColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, + int cqLength) { + ptr.set(cf, cfOffset, cfLength, cq, cqOffset, cqLength); + return ptr; + } + + @Override + protected Object newColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, + int cqLength) { + + byte[] cfKey; + if (cfOffset == 0 && cf.length == cfLength) { + cfKey = cf; + } else { + // Copy bytes here, but figure cf names are typically a few bytes at most, + // so this will be better than creating an ImmutableBytesPtr + cfKey = new byte[cfLength]; + System.arraycopy(cf, cfOffset, cfKey, 0, cfLength); } - - public MultiCFCQKeyValueComparisonFilter(Expression expression, boolean allCFs, byte[] essentialCF) { - super(expression, allCFs, essentialCF); + cfSet.add(cfKey); + return new ImmutablePairBytesPtr(cf, cfOffset, cfLength, cq, cqOffset, cqLength); + } + + private static class ImmutablePairBytesPtr { + private byte[] bytes1; + private int offset1; + private int length1; + private byte[] bytes2; + private int offset2; + private int length2; + private int hashCode; + + private ImmutablePairBytesPtr() { } - @Override - protected Object setColumnKey(byte[] cf, int cfOffset, int cfLength, - byte[] cq, int cqOffset, int cqLength) { - ptr.set(cf, cfOffset, cfLength, cq, cqOffset, cqLength); - return ptr; + private ImmutablePairBytesPtr(byte[] bytes1, int offset1, int length1, byte[] bytes2, + int offset2, int length2) { + set(bytes1, offset1, length1, bytes2, offset2, length2); } @Override - protected Object newColumnKey(byte[] cf, int cfOffset, int cfLength, - byte[] cq, int cqOffset, int cqLength) { - - byte[] cfKey; - if (cfOffset == 0 && cf.length == cfLength) { - cfKey = cf; - } else { - // Copy bytes here, but figure cf names are typically a few bytes at most, - // so this will be better than creating an ImmutableBytesPtr - cfKey = new byte[cfLength]; - System.arraycopy(cf, cfOffset, cfKey, 0, cfLength); - } - cfSet.add(cfKey); - return new ImmutablePairBytesPtr(cf, cfOffset, cfLength, cq, cqOffset, cqLength); + public int hashCode() { + return hashCode; } - private static class ImmutablePairBytesPtr { - private byte[] bytes1; - private int offset1; - private int length1; - private byte[] bytes2; - private int offset2; - private int length2; - private int hashCode; - - private ImmutablePairBytesPtr() { - } - - private ImmutablePairBytesPtr(byte[] bytes1, int offset1, int length1, byte[] bytes2, int offset2, int length2) { - set(bytes1, offset1, length1, bytes2, offset2, length2); - } - - @Override - public int hashCode() { - return hashCode; - } - - public void set(byte[] bytes1, int offset1, int length1, byte[] bytes2, int offset2, int length2) { - this.bytes1 = bytes1; - this.offset1 = offset1; - this.length1 = length1; - this.bytes2 = bytes2; - this.offset2 = offset2; - this.length2 = length2; - int hash = 1; - for (int i = offset1; i < offset1 + length1; i++) - hash = (31 * hash) + bytes1[i]; - for (int i = offset2; i < offset2 + length2; i++) - hash = (31 * hash) + bytes2[i]; - hashCode = hash; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ImmutablePairBytesPtr that = (ImmutablePairBytesPtr)obj; - if (this.hashCode != that.hashCode) return false; - if (Bytes.compareTo(this.bytes2, this.offset2, this.length2, that.bytes2, that.offset2, that.length2) != 0) return false; - if (Bytes.compareTo(this.bytes1, this.offset1, this.length1, that.bytes1, that.offset1, that.length1) != 0) return false; - return true; - } + public void set(byte[] bytes1, int offset1, int length1, byte[] bytes2, int offset2, + int length2) { + this.bytes1 = bytes1; + this.offset1 = offset1; + this.length1 = length1; + this.bytes2 = bytes2; + this.offset2 = offset2; + this.length2 = length2; + int hash = 1; + for (int i = offset1; i < offset1 + length1; i++) + hash = (31 * hash) + bytes1[i]; + for (int i = offset2; i < offset2 + length2; i++) + hash = (31 * hash) + bytes2[i]; + hashCode = hash; } - public static MultiCFCQKeyValueComparisonFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (MultiCFCQKeyValueComparisonFilter)Writables.getWritable(pbBytes, new MultiCFCQKeyValueComparisonFilter()); - } catch (IOException e) { - throw new DeserializationException(e); - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ImmutablePairBytesPtr that = (ImmutablePairBytesPtr) obj; + if (this.hashCode != that.hashCode) return false; + if ( + Bytes.compareTo(this.bytes2, this.offset2, this.length2, that.bytes2, that.offset2, + that.length2) != 0 + ) return false; + if ( + Bytes.compareTo(this.bytes1, this.offset1, this.length1, that.bytes1, that.offset1, + that.length1) != 0 + ) return false; + return true; + } + } + + public static MultiCFCQKeyValueComparisonFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + return (MultiCFCQKeyValueComparisonFilter) Writables.getWritable(pbBytes, + new MultiCFCQKeyValueComparisonFilter()); + } catch (IOException e) { + throw new DeserializationException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiCQKeyValueComparisonFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiCQKeyValueComparisonFilter.java index 1a3a6e96692..a5611e16134 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiCQKeyValueComparisonFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiCQKeyValueComparisonFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,47 +25,48 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; /** - * - * Filter that evaluates WHERE clause expression, used in the case where there - * are references to multiple unique column qualifiers over one or more column families. - * + * Filter that evaluates WHERE clause expression, used in the case where there are references to + * multiple unique column qualifiers over one or more column families. */ public class MultiCQKeyValueComparisonFilter extends MultiKeyValueComparisonFilter { - private ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + private ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - public MultiCQKeyValueComparisonFilter() { - } + public MultiCQKeyValueComparisonFilter() { + } - public MultiCQKeyValueComparisonFilter(Expression expression, boolean allCFs, byte[] essentialCF) { - super(expression, allCFs, essentialCF); - } + public MultiCQKeyValueComparisonFilter(Expression expression, boolean allCFs, + byte[] essentialCF) { + super(expression, allCFs, essentialCF); + } - @Override - protected Object setColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, - int cqLength) { - ptr.set(cq, cqOffset, cqLength); - return ptr; - } + @Override + protected Object setColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, + int cqLength) { + ptr.set(cq, cqOffset, cqLength); + return ptr; + } - @Override - protected Object newColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, - int cqLength) { - byte[] cfKey; - if (cfOffset == 0 && cf.length == cfLength) { - cfKey = cf; - } else { - cfKey = new byte[cfLength]; - System.arraycopy(cf, cfOffset, cfKey, 0, cfLength); - } - cfSet.add(cfKey); - return new ImmutableBytesPtr(cq, cqOffset, cqLength); + @Override + protected Object newColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, + int cqLength) { + byte[] cfKey; + if (cfOffset == 0 && cf.length == cfLength) { + cfKey = cf; + } else { + cfKey = new byte[cfLength]; + System.arraycopy(cf, cfOffset, cfKey, 0, cfLength); } + cfSet.add(cfKey); + return new ImmutableBytesPtr(cq, cqOffset, cqLength); + } - public static MultiCQKeyValueComparisonFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (MultiCQKeyValueComparisonFilter)Writables.getWritable(pbBytes, new MultiCQKeyValueComparisonFilter()); - } catch (IOException e) { - throw new DeserializationException(e); - } + public static MultiCQKeyValueComparisonFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + return (MultiCQKeyValueComparisonFilter) Writables.getWritable(pbBytes, + new MultiCQKeyValueComparisonFilter()); + } catch (IOException e) { + throw new DeserializationException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiEncodedCQKeyValueComparisonFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiEncodedCQKeyValueComparisonFilter.java index 75773abd77e..287b7f1a09c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiEncodedCQKeyValueComparisonFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiEncodedCQKeyValueComparisonFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,8 @@ */ package org.apache.phoenix.filter; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import java.io.DataInput; import java.io.DataOutput; @@ -46,370 +46,390 @@ import org.apache.phoenix.util.ClientUtil; /** - * Filter used for tables that use number based column qualifiers generated by one of the encoding schemes in - * {@link QualifierEncodingScheme}. Because the qualifiers are number based, instead of using a map of cells to track - * the columns that have been found, we can use an array of cells where the index into the array would be derived by the - * number based column qualifier. See {@link EncodedCQIncrementalResultTuple}. Using this filter helps us to directly - * seek to the next row when the column qualifier that we have encountered is greater than the maxQualifier that we - * expect. This helps in speeding up the queries filtering on key value columns. - * - * TODO: derived this from MultiKeyValueComparisonFilter to reduce the copy/paste from that class. + * Filter used for tables that use number based column qualifiers generated by one of the encoding + * schemes in {@link QualifierEncodingScheme}. Because the qualifiers are number based, instead of + * using a map of cells to track the columns that have been found, we can use an array of cells + * where the index into the array would be derived by the number based column qualifier. See + * {@link EncodedCQIncrementalResultTuple}. Using this filter helps us to directly seek to the next + * row when the column qualifier that we have encountered is greater than the maxQualifier that we + * expect. This helps in speeding up the queries filtering on key value columns. TODO: derived this + * from MultiKeyValueComparisonFilter to reduce the copy/paste from that class. */ public class MultiEncodedCQKeyValueComparisonFilter extends BooleanExpressionFilter { - // Smallest qualifier for the columns that are being projected and filtered on - private int minQualifier; - - // Largest qualifier for the columns that are being projected and filtered on - private int maxQualifier; - - private QualifierEncodingScheme encodingScheme; - - // Smallest qualifier for the columns in where expression - private int whereExpressionMinQualifier; - - // Largest qualifier for the columns in where expression - private int whereExpressionMaxQualifier; - - private FilteredKeyValueHolder filteredKeyValues; - - // BitSet to track the qualifiers in where expression that we expect to find while filtering a row - private BitSet whereExpressionQualifiers; - - // Set to track the column families of the columns in where expression - private TreeSet cfSet; - - // Boolean that tells us whether the result of expression evaluation as and when we filter key values in a row - private Boolean matchedColumn; - - // Tuple used to store the relevant key values found while filtering a row - private EncodedCQIncrementalResultTuple inputTuple = new EncodedCQIncrementalResultTuple(); - - // Member variable to cache the size of whereExpressionQualifiers - private int expectedCardinality; - - private byte[] essentialCF = ByteUtil.EMPTY_BYTE_ARRAY; - private boolean allCFs; - - private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; - - public MultiEncodedCQKeyValueComparisonFilter() {} - - public MultiEncodedCQKeyValueComparisonFilter(Expression expression, QualifierEncodingScheme scheme, boolean allCFs, byte[] essentialCF) { - super(expression); - checkArgument(scheme != NON_ENCODED_QUALIFIERS, "Filter can only be used for encoded qualifiers"); - this.encodingScheme = scheme; - this.allCFs = allCFs; - this.essentialCF = essentialCF == null ? ByteUtil.EMPTY_BYTE_ARRAY : essentialCF; - initFilter(expression); - } - - private final class FilteredKeyValueHolder { - // Cell values corresponding to columns in where expression that were found while filtering a row. - private Cell[] filteredCells; - - // BitSet to track whether qualifiers in where expression were found when filtering a row - private BitSet filteredQualifiers; - - // Using an explicit counter instead of relying on the cardinality of the bitset as computing the - // cardinality could be slightly more expensive than just incrementing an integer - private int numKeyValues; - - private FilteredKeyValueHolder(int size) { - filteredCells = new Cell[size]; - filteredQualifiers = new BitSet(size); - } + // Smallest qualifier for the columns that are being projected and filtered on + private int minQualifier; - private void setCell(int qualifier, Cell c) { - int index = qualifier - whereExpressionMinQualifier; - filteredCells[index] = c; - filteredQualifiers.set(index); - numKeyValues++; - } + // Largest qualifier for the columns that are being projected and filtered on + private int maxQualifier; - private Cell getCell(int qualifier) { - int index = qualifier - whereExpressionMinQualifier; - return filteredQualifiers.get(index) ? filteredCells[index] : null; - } + private QualifierEncodingScheme encodingScheme; - private void clear() { - // Note here that we are only clearing out the filteredQualifiers bitset. We are not setting all the - // entries in filteredKeyValues to null or allocating a new Cell array as that would be expensive. - filteredQualifiers.clear(); - numKeyValues = 0; - } - - /** - * This method really shouldn't be the way for getting hold of cells. It was - * just added to keep the tuple.get(index) method happy. - */ - public Cell getCellAtIndex(int index) { - int bitIndex; - for (bitIndex = filteredQualifiers.nextSetBit(0); bitIndex >= 0 && index >= 0; bitIndex = filteredQualifiers - .nextSetBit(bitIndex + 1)) { - index--; - } - if (bitIndex < 0) { throw new NoSuchElementException(); } - return filteredCells[bitIndex]; - } + // Smallest qualifier for the columns in where expression + private int whereExpressionMinQualifier; - @Override - public String toString() { - StringBuilder sb = new StringBuilder(100); - int length = filteredQualifiers.length(); - for (int i = 0; i < length; i++) { - sb.append(filteredCells[i].toString()); - } - return sb.toString(); - } - - private boolean allColumnsFound() { - return numKeyValues == expectedCardinality; - } - - private int numKeyValues() { - return numKeyValues; - } + // Largest qualifier for the columns in where expression + private int whereExpressionMaxQualifier; + + private FilteredKeyValueHolder filteredKeyValues; + + // BitSet to track the qualifiers in where expression that we expect to find while filtering a row + private BitSet whereExpressionQualifiers; + + // Set to track the column families of the columns in where expression + private TreeSet cfSet; + + // Boolean that tells us whether the result of expression evaluation as and when we filter key + // values in a row + private Boolean matchedColumn; + + // Tuple used to store the relevant key values found while filtering a row + private EncodedCQIncrementalResultTuple inputTuple = new EncodedCQIncrementalResultTuple(); + + // Member variable to cache the size of whereExpressionQualifiers + private int expectedCardinality; + + private byte[] essentialCF = ByteUtil.EMPTY_BYTE_ARRAY; + private boolean allCFs; + + private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; + + public MultiEncodedCQKeyValueComparisonFilter() { + } + + public MultiEncodedCQKeyValueComparisonFilter(Expression expression, + QualifierEncodingScheme scheme, boolean allCFs, byte[] essentialCF) { + super(expression); + checkArgument(scheme != NON_ENCODED_QUALIFIERS, + "Filter can only be used for encoded qualifiers"); + this.encodingScheme = scheme; + this.allCFs = allCFs; + this.essentialCF = essentialCF == null ? ByteUtil.EMPTY_BYTE_ARRAY : essentialCF; + initFilter(expression); + } + + private final class FilteredKeyValueHolder { + // Cell values corresponding to columns in where expression that were found while filtering a + // row. + private Cell[] filteredCells; + // BitSet to track whether qualifiers in where expression were found when filtering a row + private BitSet filteredQualifiers; + + // Using an explicit counter instead of relying on the cardinality of the bitset as computing + // the + // cardinality could be slightly more expensive than just incrementing an integer + private int numKeyValues; + + private FilteredKeyValueHolder(int size) { + filteredCells = new Cell[size]; + filteredQualifiers = new BitSet(size); } - - private void initFilter(Expression expression) { - cfSet = new TreeSet(Bytes.BYTES_COMPARATOR); - final BitSet expressionQualifiers = new BitSet(20); - final Pair range = new Pair<>(); - ExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { - @Override - public Void visit(KeyValueColumnExpression expression) { - int qualifier = encodingScheme.decode(expression.getColumnQualifier()); - if (range.getFirst() == null) { - range.setFirst(qualifier); - range.setSecond(qualifier); - } else if (qualifier < range.getFirst()) { - range.setFirst(qualifier); - } else if (qualifier > range.getSecond()) { - range.setSecond(qualifier); - } - cfSet.add(expression.getColumnFamily()); - expressionQualifiers.set(qualifier); - return null; - } - }; - expression.accept(visitor); - // Set min and max qualifiers for columns in the where expression - whereExpressionMinQualifier = range.getFirst(); - whereExpressionMaxQualifier = range.getSecond(); - - int size = whereExpressionMaxQualifier - whereExpressionMinQualifier + 1; - filteredKeyValues = new FilteredKeyValueHolder(size); - - // Initialize the bitset and mark the qualifiers for columns in where expression - whereExpressionQualifiers = new BitSet(size); - for (int i = whereExpressionMinQualifier; i <= whereExpressionMaxQualifier; i++) { - if (expressionQualifiers.get(i)) { - whereExpressionQualifiers.set(i - whereExpressionMinQualifier); - } - } - expectedCardinality = whereExpressionQualifiers.cardinality(); + + private void setCell(int qualifier, Cell c) { + int index = qualifier - whereExpressionMinQualifier; + filteredCells[index] = c; + filteredQualifiers.set(index); + numKeyValues++; } - - private boolean isQualifierForColumnInWhereExpression(int qualifier) { - return qualifier >= whereExpressionMinQualifier ? whereExpressionQualifiers.get(qualifier - whereExpressionMinQualifier) : false; + + private Cell getCell(int qualifier) { + int index = qualifier - whereExpressionMinQualifier; + return filteredQualifiers.get(index) ? filteredCells[index] : null; } - @Override - public ReturnCode filterKeyValue(Cell cell) { - return filterCell(cell); + private void clear() { + // Note here that we are only clearing out the filteredQualifiers bitset. We are not setting + // all the + // entries in filteredKeyValues to null or allocating a new Cell array as that would be + // expensive. + filteredQualifiers.clear(); + numKeyValues = 0; } - @Override - public ReturnCode filterCell(Cell cell) { - if (Boolean.TRUE.equals(this.matchedColumn)) { - // We already found and matched the single column, all keys now pass - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - if (Boolean.FALSE.equals(this.matchedColumn)) { - // We found all the columns, but did not match the expression, so skip to next row - return ReturnCode.NEXT_ROW; - } - inputTuple.setKey(cell); - int qualifier = encodingScheme.decode(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); - if (isQualifierForColumnInWhereExpression(qualifier)) { - filteredKeyValues.setCell(qualifier, cell); - // We found a new column, so we can re-evaluate - this.matchedColumn = this.evaluate(inputTuple); - if (this.matchedColumn == null) { - if (inputTuple.isImmutable()) { - this.matchedColumn = Boolean.FALSE; - } else { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - } - return this.matchedColumn ? ReturnCode.INCLUDE_AND_NEXT_COL : ReturnCode.NEXT_ROW; - } - // The qualifier is not one of the qualifiers in the expression. So decide whether - // we would need to include it in our result. - if (qualifier < minQualifier) { - // Qualifier is smaller than the minimum expected qualifier. Look at the next column. - return ReturnCode.NEXT_COL; - } - // TODO: I don't think we would ever hit this case of encountering a greater than what we expect. - // Leaving the code commented out here for future reference. - // if (qualifier > maxQualifier) { - // Qualifier is larger than the max expected qualifier. We are done looking at columns in this row. - // return ReturnCode.NEXT_ROW; - // } - return ReturnCode.INCLUDE_AND_NEXT_COL; + /** + * This method really shouldn't be the way for getting hold of cells. It was just added to keep + * the tuple.get(index) method happy. + */ + public Cell getCellAtIndex(int index) { + int bitIndex; + for (bitIndex = filteredQualifiers.nextSetBit(0); bitIndex >= 0 && index >= 0; bitIndex = + filteredQualifiers.nextSetBit(bitIndex + 1)) { + index--; + } + if (bitIndex < 0) { + throw new NoSuchElementException(); + } + return filteredCells[bitIndex]; } @Override - public boolean filterRow() { - if (this.matchedColumn == null && !inputTuple.isImmutable() && expression.requiresFinalEvaluation()) { - inputTuple.setImmutable(); - this.matchedColumn = this.evaluate(inputTuple); - } - return ! (Boolean.TRUE.equals(this.matchedColumn)); + public String toString() { + StringBuilder sb = new StringBuilder(100); + int length = filteredQualifiers.length(); + for (int i = 0; i < length; i++) { + sb.append(filteredCells[i].toString()); + } + return sb.toString(); } - final class EncodedCQIncrementalResultTuple extends BaseTuple { - private final ImmutableBytesWritable keyPtr = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); - private boolean isImmutable; - - @Override - public boolean isImmutable() { - return isImmutable || filteredKeyValues.allColumnsFound(); - } - - public void setImmutable() { - this.isImmutable = true; - } - - private void setKey(Cell value) { - keyPtr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); - } - - @Override - public void getKey(ImmutableBytesWritable ptr) { - ptr.set(keyPtr.get(),keyPtr.getOffset(),keyPtr.getLength()); - } - - @Override - public Cell getValue(byte[] cf, byte[] cq) { - int qualifier = encodingScheme.decode(cq); - return filteredKeyValues.getCell(qualifier); - } - - @Override - public String toString() { - return filteredKeyValues.toString(); - } + private boolean allColumnsFound() { + return numKeyValues == expectedCardinality; + } - @Override - public int size() { - return filteredKeyValues.numKeyValues(); - } + private int numKeyValues() { + return numKeyValues; + } - /** - * This method doesn't perform well and shouldn't be the way of - * getting hold of elements in the tuple. - */ - @Override - public Cell getValue(int index) { - return filteredKeyValues.getCellAtIndex(index); - } + } - @Override - public boolean getValue(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - Cell cell = getValue(family, qualifier); - if (cell == null) - return false; - ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - return true; + private void initFilter(Expression expression) { + cfSet = new TreeSet(Bytes.BYTES_COMPARATOR); + final BitSet expressionQualifiers = new BitSet(20); + final Pair range = new Pair<>(); + ExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { + @Override + public Void visit(KeyValueColumnExpression expression) { + int qualifier = encodingScheme.decode(expression.getColumnQualifier()); + if (range.getFirst() == null) { + range.setFirst(qualifier); + range.setSecond(qualifier); + } else if (qualifier < range.getFirst()) { + range.setFirst(qualifier); + } else if (qualifier > range.getSecond()) { + range.setSecond(qualifier); } - - void reset() { - isImmutable = false; - keyPtr.set(UNITIALIZED_KEY_BUFFER); + cfSet.add(expression.getColumnFamily()); + expressionQualifiers.set(qualifier); + return null; + } + }; + expression.accept(visitor); + // Set min and max qualifiers for columns in the where expression + whereExpressionMinQualifier = range.getFirst(); + whereExpressionMaxQualifier = range.getSecond(); + + int size = whereExpressionMaxQualifier - whereExpressionMinQualifier + 1; + filteredKeyValues = new FilteredKeyValueHolder(size); + + // Initialize the bitset and mark the qualifiers for columns in where expression + whereExpressionQualifiers = new BitSet(size); + for (int i = whereExpressionMinQualifier; i <= whereExpressionMaxQualifier; i++) { + if (expressionQualifiers.get(i)) { + whereExpressionQualifiers.set(i - whereExpressionMinQualifier); + } + } + expectedCardinality = whereExpressionQualifiers.cardinality(); + } + + private boolean isQualifierForColumnInWhereExpression(int qualifier) { + return qualifier >= whereExpressionMinQualifier + ? whereExpressionQualifiers.get(qualifier - whereExpressionMinQualifier) + : false; + } + + @Override + public ReturnCode filterKeyValue(Cell cell) { + return filterCell(cell); + } + + @Override + public ReturnCode filterCell(Cell cell) { + if (Boolean.TRUE.equals(this.matchedColumn)) { + // We already found and matched the single column, all keys now pass + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + if (Boolean.FALSE.equals(this.matchedColumn)) { + // We found all the columns, but did not match the expression, so skip to next row + return ReturnCode.NEXT_ROW; + } + inputTuple.setKey(cell); + int qualifier = encodingScheme.decode(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()); + if (isQualifierForColumnInWhereExpression(qualifier)) { + filteredKeyValues.setCell(qualifier, cell); + // We found a new column, so we can re-evaluate + this.matchedColumn = this.evaluate(inputTuple); + if (this.matchedColumn == null) { + if (inputTuple.isImmutable()) { + this.matchedColumn = Boolean.FALSE; + } else { + return ReturnCode.INCLUDE_AND_NEXT_COL; } + } + return this.matchedColumn ? ReturnCode.INCLUDE_AND_NEXT_COL : ReturnCode.NEXT_ROW; } - + // The qualifier is not one of the qualifiers in the expression. So decide whether + // we would need to include it in our result. + if (qualifier < minQualifier) { + // Qualifier is smaller than the minimum expected qualifier. Look at the next column. + return ReturnCode.NEXT_COL; + } + // TODO: I don't think we would ever hit this case of encountering a greater than what we + // expect. + // Leaving the code commented out here for future reference. + // if (qualifier > maxQualifier) { + // Qualifier is larger than the max expected qualifier. We are done looking at columns in this + // row. + // return ReturnCode.NEXT_ROW; + // } + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + + @Override + public boolean filterRow() { + if ( + this.matchedColumn == null && !inputTuple.isImmutable() + && expression.requiresFinalEvaluation() + ) { + inputTuple.setImmutable(); + this.matchedColumn = this.evaluate(inputTuple); + } + return !(Boolean.TRUE.equals(this.matchedColumn)); + } + + final class EncodedCQIncrementalResultTuple extends BaseTuple { + private final ImmutableBytesWritable keyPtr = + new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); + private boolean isImmutable; + @Override - public void readFields(DataInput input) throws IOException { - try { - this.minQualifier = WritableUtils.readVInt(input); - this.maxQualifier = WritableUtils.readVInt(input); - this.whereExpressionMinQualifier = WritableUtils.readVInt(input); - this.whereExpressionMaxQualifier = WritableUtils.readVInt(input); - this.encodingScheme = QualifierEncodingScheme.values()[WritableUtils.readVInt(input)]; - super.readFields(input); - try { - allCFs = input.readBoolean(); - if (!allCFs) { - essentialCF = Bytes.readByteArray(input); - } - } catch (EOFException e) { // Ignore as this will occur when a 4.10 client is used - } - } catch (DoNotRetryIOException e) { - throw e; - } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry - ClientUtil.throwIOException("MultiEncodedCQKeyValueComparisonFilter failed during writing", t); - } - initFilter(expression); + public boolean isImmutable() { + return isImmutable || filteredKeyValues.allColumnsFound(); } - + + public void setImmutable() { + this.isImmutable = true; + } + + private void setKey(Cell value) { + keyPtr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); + } + @Override - public void write(DataOutput output) throws IOException { - try { - WritableUtils.writeVInt(output, minQualifier); - WritableUtils.writeVInt(output, maxQualifier); - WritableUtils.writeVInt(output, whereExpressionMinQualifier); - WritableUtils.writeVInt(output, whereExpressionMaxQualifier); - WritableUtils.writeVInt(output, encodingScheme.ordinal()); - super.write(output); - output.writeBoolean(allCFs); - if (!allCFs) { - Bytes.writeByteArray(output, essentialCF); - } - } catch (DoNotRetryIOException e) { - throw e; - } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry - ClientUtil.throwIOException("MultiEncodedCQKeyValueComparisonFilter failed during writing", t); - } + public void getKey(ImmutableBytesWritable ptr) { + ptr.set(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength()); } - - public void setMinMaxQualifierRange(Pair minMaxQualifiers) { - this.minQualifier = minMaxQualifiers.getFirst(); - this.maxQualifier = minMaxQualifiers.getSecond(); + + @Override + public Cell getValue(byte[] cf, byte[] cq) { + int qualifier = encodingScheme.decode(cq); + return filteredKeyValues.getCell(qualifier); } - public void setMinQualifier(int minQualifier) { - this.minQualifier = minQualifier; + @Override + public String toString() { + return filteredKeyValues.toString(); } - public static MultiEncodedCQKeyValueComparisonFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (MultiEncodedCQKeyValueComparisonFilter)Writables.getWritable(pbBytes, new MultiEncodedCQKeyValueComparisonFilter()); - } catch (IOException e) { - throw new DeserializationException(e); - } + @Override + public int size() { + return filteredKeyValues.numKeyValues(); } - + + /** + * This method doesn't perform well and shouldn't be the way of getting hold of elements in the + * tuple. + */ @Override - public void reset() { - filteredKeyValues.clear(); - matchedColumn = null; - inputTuple.reset(); - super.reset(); + public Cell getValue(int index) { + return filteredKeyValues.getCellAtIndex(index); } @Override - public boolean isFamilyEssential(byte[] name) { - // Typically only the column families involved in the expression are essential. - // The others are for columns projected in the select expression. However, depending - // on the expression (i.e. IS NULL), we may need to include the column family - // containing the empty key value or all column families in the case of a mapped - // view (where we don't have an empty key value). - return allCFs || Bytes.compareTo(name, essentialCF) == 0 || cfSet.contains(name); + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + Cell cell = getValue(family, qualifier); + if (cell == null) return false; + ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + return true; + } + + void reset() { + isImmutable = false; + keyPtr.set(UNITIALIZED_KEY_BUFFER); } - - + } + + @Override + public void readFields(DataInput input) throws IOException { + try { + this.minQualifier = WritableUtils.readVInt(input); + this.maxQualifier = WritableUtils.readVInt(input); + this.whereExpressionMinQualifier = WritableUtils.readVInt(input); + this.whereExpressionMaxQualifier = WritableUtils.readVInt(input); + this.encodingScheme = QualifierEncodingScheme.values()[WritableUtils.readVInt(input)]; + super.readFields(input); + try { + allCFs = input.readBoolean(); + if (!allCFs) { + essentialCF = Bytes.readByteArray(input); + } + } catch (EOFException e) { // Ignore as this will occur when a 4.10 client is used + } + } catch (DoNotRetryIOException e) { + throw e; + } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry + ClientUtil.throwIOException("MultiEncodedCQKeyValueComparisonFilter failed during writing", + t); + } + initFilter(expression); + } + + @Override + public void write(DataOutput output) throws IOException { + try { + WritableUtils.writeVInt(output, minQualifier); + WritableUtils.writeVInt(output, maxQualifier); + WritableUtils.writeVInt(output, whereExpressionMinQualifier); + WritableUtils.writeVInt(output, whereExpressionMaxQualifier); + WritableUtils.writeVInt(output, encodingScheme.ordinal()); + super.write(output); + output.writeBoolean(allCFs); + if (!allCFs) { + Bytes.writeByteArray(output, essentialCF); + } + } catch (DoNotRetryIOException e) { + throw e; + } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry + ClientUtil.throwIOException("MultiEncodedCQKeyValueComparisonFilter failed during writing", + t); + } + } + + public void setMinMaxQualifierRange(Pair minMaxQualifiers) { + this.minQualifier = minMaxQualifiers.getFirst(); + this.maxQualifier = minMaxQualifiers.getSecond(); + } + + public void setMinQualifier(int minQualifier) { + this.minQualifier = minQualifier; + } + + public static MultiEncodedCQKeyValueComparisonFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + return (MultiEncodedCQKeyValueComparisonFilter) Writables.getWritable(pbBytes, + new MultiEncodedCQKeyValueComparisonFilter()); + } catch (IOException e) { + throw new DeserializationException(e); + } + } + + @Override + public void reset() { + filteredKeyValues.clear(); + matchedColumn = null; + inputTuple.reset(); + super.reset(); + } + + @Override + public boolean isFamilyEssential(byte[] name) { + // Typically only the column families involved in the expression are essential. + // The others are for columns projected in the select expression. However, depending + // on the expression (i.e. IS NULL), we may need to include the column family + // containing the empty key value or all column families in the case of a mapped + // view (where we don't have an empty key value). + return allCFs || Bytes.compareTo(name, essentialCF) == 0 || cfSet.contains(name); + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiKeyValueComparisonFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiKeyValueComparisonFilter.java index 6a6cf224194..bbc3c1dc0b9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiKeyValueComparisonFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/MultiKeyValueComparisonFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,256 +36,259 @@ import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; - - /** - * - * Modeled after {@link org.apache.hadoop.hbase.filter.SingleColumnValueFilter}, - * but for general expression evaluation in the case where multiple KeyValue - * columns are referenced in the expression. - * + * Modeled after {@link org.apache.hadoop.hbase.filter.SingleColumnValueFilter}, but for general + * expression evaluation in the case where multiple KeyValue columns are referenced in the + * expression. */ public abstract class MultiKeyValueComparisonFilter extends BooleanExpressionFilter { - private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; + private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; - private Boolean matchedColumn; - protected final IncrementalResultTuple inputTuple = new IncrementalResultTuple(); - protected TreeSet cfSet; - private byte[] essentialCF = ByteUtil.EMPTY_BYTE_ARRAY; - private boolean allCFs; + private Boolean matchedColumn; + protected final IncrementalResultTuple inputTuple = new IncrementalResultTuple(); + protected TreeSet cfSet; + private byte[] essentialCF = ByteUtil.EMPTY_BYTE_ARRAY; + private boolean allCFs; - public MultiKeyValueComparisonFilter() { - } + public MultiKeyValueComparisonFilter() { + } - public MultiKeyValueComparisonFilter(Expression expression, boolean allCFs, byte[] essentialCF) { - super(expression); - this.allCFs = allCFs; - this.essentialCF = essentialCF == null ? ByteUtil.EMPTY_BYTE_ARRAY : essentialCF; - init(); + public MultiKeyValueComparisonFilter(Expression expression, boolean allCFs, byte[] essentialCF) { + super(expression); + this.allCFs = allCFs; + this.essentialCF = essentialCF == null ? ByteUtil.EMPTY_BYTE_ARRAY : essentialCF; + init(); + } + + private static final class CellRef { + public Cell cell; + + @Override + public String toString() { + if (cell != null) { + return cell.toString() + " value = " + Bytes.toStringBinary(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength()); + } else { + return super.toString(); + } } + } - private static final class CellRef { - public Cell cell; - - @Override - public String toString() { - if(cell != null) { - return cell.toString() + " value = " + Bytes.toStringBinary( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - } else { - return super.toString(); - } - } + protected abstract Object setColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, + int cqOffset, int cqLength); + + protected abstract Object newColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, + int cqOffset, int cqLength); + + private final class IncrementalResultTuple extends BaseTuple { + private int refCount; + private final ImmutableBytesWritable keyPtr = + new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); + private final Map foundColumns = new HashMap(5); + + public void reset() { + refCount = 0; + keyPtr.set(UNITIALIZED_KEY_BUFFER); + for (CellRef ref : foundColumns.values()) { + ref.cell = null; + } } - - protected abstract Object setColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength); - protected abstract Object newColumnKey(byte[] cf, int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength); - - private final class IncrementalResultTuple extends BaseTuple { - private int refCount; - private final ImmutableBytesWritable keyPtr = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); - private final Map foundColumns = new HashMap(5); - - public void reset() { - refCount = 0; - keyPtr.set(UNITIALIZED_KEY_BUFFER); - for (CellRef ref : foundColumns.values()) { - ref.cell = null; - } - } - - @Override - public boolean isImmutable() { - return refCount == foundColumns.size(); - } - - public void setImmutable() { - refCount = foundColumns.size(); - } - - private ReturnCode resolveColumn(Cell value) { - // Always set key, in case we never find a key value column of interest, - // and our expression uses row key columns. - setKey(value); - Object ptr = setColumnKey(value.getFamilyArray(), value.getFamilyOffset(), value.getFamilyLength(), - value.getQualifierArray(), value.getQualifierOffset(), value.getQualifierLength()); - CellRef ref = foundColumns.get(ptr); - if (ref == null) { - // Return INCLUDE_AND_NEXT_COL here. Although this filter doesn't need this KV - // it should still be projected into the Result - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - // Since we only look at the latest key value for a given column, - // we are not interested in older versions - // TODO: test with older versions to confirm this doesn't get tripped - // This shouldn't be necessary, because a scan only looks at the latest - // version - if (ref.cell != null) { - // Can't do NEXT_ROW, because then we don't match the other columns - // SKIP, INCLUDE, and NEXT_COL seem to all act the same - return ReturnCode.NEXT_COL; - } - ref.cell = value; - refCount++; - return null; - } - - public void addColumn(byte[] cf, byte[] cq) { - Object ptr = MultiKeyValueComparisonFilter.this.newColumnKey(cf, 0, cf.length, cq, 0, cq.length); - foundColumns.put(ptr, new CellRef()); - } - - public void setKey(Cell value) { - keyPtr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); - } - - @Override - public void getKey(ImmutableBytesWritable ptr) { - ptr.set(keyPtr.get(),keyPtr.getOffset(),keyPtr.getLength()); - } - - @Override - public Cell getValue(byte[] cf, byte[] cq) { - Object ptr = setColumnKey(cf, 0, cf.length, cq, 0, cq.length); - CellRef ref = foundColumns.get(ptr); - return ref == null ? null : ref.cell; - } - - @Override - public String toString() { - return foundColumns.toString(); - } - @Override - public int size() { - return refCount; - } + @Override + public boolean isImmutable() { + return refCount == foundColumns.size(); + } - @Override - public Cell getValue(int index) { - // This won't perform very well, but it's not - // currently used anyway - for (CellRef ref : foundColumns.values()) { - if (ref.cell == null) { - continue; - } - if (index == 0) { - return ref.cell; - } - index--; - } - throw new IndexOutOfBoundsException(Integer.toString(index)); - } + public void setImmutable() { + refCount = foundColumns.size(); + } - @Override - public boolean getValue(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - Cell cell = getValue(family, qualifier); - if (cell == null) - return false; - ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - return true; - } + private ReturnCode resolveColumn(Cell value) { + // Always set key, in case we never find a key value column of interest, + // and our expression uses row key columns. + setKey(value); + Object ptr = + setColumnKey(value.getFamilyArray(), value.getFamilyOffset(), value.getFamilyLength(), + value.getQualifierArray(), value.getQualifierOffset(), value.getQualifierLength()); + CellRef ref = foundColumns.get(ptr); + if (ref == null) { + // Return INCLUDE_AND_NEXT_COL here. Although this filter doesn't need this KV + // it should still be projected into the Result + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + // Since we only look at the latest key value for a given column, + // we are not interested in older versions + // TODO: test with older versions to confirm this doesn't get tripped + // This shouldn't be necessary, because a scan only looks at the latest + // version + if (ref.cell != null) { + // Can't do NEXT_ROW, because then we don't match the other columns + // SKIP, INCLUDE, and NEXT_COL seem to all act the same + return ReturnCode.NEXT_COL; + } + ref.cell = value; + refCount++; + return null; } - - protected void init() { - cfSet = new TreeSet(Bytes.BYTES_COMPARATOR); - ExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { - @Override - public Void visit(KeyValueColumnExpression expression) { - inputTuple.addColumn(expression.getColumnFamily(), expression.getColumnQualifier()); - return null; - } - }; - expression.accept(visitor); + + public void addColumn(byte[] cf, byte[] cq) { + Object ptr = + MultiKeyValueComparisonFilter.this.newColumnKey(cf, 0, cf.length, cq, 0, cq.length); + foundColumns.put(ptr, new CellRef()); } - @Override - public ReturnCode filterKeyValue(Cell cell) { - return filterCell(cell); + public void setKey(Cell value) { + keyPtr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); } @Override - public ReturnCode filterCell(Cell cell) { - if (Boolean.TRUE.equals(this.matchedColumn)) { - // We already found and matched the single column, all keys now pass - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - if (Boolean.FALSE.equals(this.matchedColumn)) { - // We found all the columns, but did not match the expression, so skip to next row - return ReturnCode.NEXT_ROW; - } - // This is a key value we're not interested in (TODO: why INCLUDE here instead of NEXT_COL?) - ReturnCode code = inputTuple.resolveColumn(cell); - if (code != null) { - return code; - } - - // We found a new column, so we can re-evaluate - // TODO: if we have row key columns in our expression, should - // we always evaluate or just wait until the end? - this.matchedColumn = this.evaluate(inputTuple); - if (this.matchedColumn == null) { - if (inputTuple.isImmutable()) { - this.matchedColumn = Boolean.FALSE; - } else { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - } - return this.matchedColumn ? ReturnCode.INCLUDE_AND_NEXT_COL : ReturnCode.NEXT_ROW; + public void getKey(ImmutableBytesWritable ptr) { + ptr.set(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength()); } @Override - public boolean filterRow() { - if (this.matchedColumn == null && !inputTuple.isImmutable() && expression.requiresFinalEvaluation()) { - inputTuple.setImmutable(); - this.matchedColumn = this.evaluate(inputTuple); - } - - return ! (Boolean.TRUE.equals(this.matchedColumn)); + public Cell getValue(byte[] cf, byte[] cq) { + Object ptr = setColumnKey(cf, 0, cf.length, cq, 0, cq.length); + CellRef ref = foundColumns.get(ptr); + return ref == null ? null : ref.cell; } @Override - public void reset() { - matchedColumn = null; - inputTuple.reset(); - super.reset(); + public String toString() { + return foundColumns.toString(); } @Override - public boolean isFamilyEssential(byte[] name) { - // Typically only the column families involved in the expression are essential. - // The others are for columns projected in the select expression. However, depending - // on the expression (i.e. IS NULL), we may need to include the column family - // containing the empty key value or all column families in the case of a mapped - // view (where we don't have an empty key value). - return allCFs || Bytes.compareTo(name, essentialCF) == 0 || cfSet.contains(name); + public int size() { + return refCount; } @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - try { - allCFs = input.readBoolean(); - if (!allCFs) { - essentialCF = Bytes.readByteArray(input); - } - } catch (EOFException e) { // Ignore as this will occur when a 4.10 client is used + public Cell getValue(int index) { + // This won't perform very well, but it's not + // currently used anyway + for (CellRef ref : foundColumns.values()) { + if (ref.cell == null) { + continue; } - init(); + if (index == 0) { + return ref.cell; + } + index--; + } + throw new IndexOutOfBoundsException(Integer.toString(index)); } - + @Override - public void write(DataOutput output) throws IOException { - super.write(output); - try { - output.writeBoolean(allCFs); - if (!allCFs) { - Bytes.writeByteArray(output, essentialCF); - } - } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry - ClientUtil.throwIOException("MultiKeyValueComparisonFilter failed during writing", t); - } + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + Cell cell = getValue(family, qualifier); + if (cell == null) return false; + ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + return true; + } + } + + protected void init() { + cfSet = new TreeSet(Bytes.BYTES_COMPARATOR); + ExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { + @Override + public Void visit(KeyValueColumnExpression expression) { + inputTuple.addColumn(expression.getColumnFamily(), expression.getColumnQualifier()); + return null; + } + }; + expression.accept(visitor); + } + + @Override + public ReturnCode filterKeyValue(Cell cell) { + return filterCell(cell); + } + + @Override + public ReturnCode filterCell(Cell cell) { + if (Boolean.TRUE.equals(this.matchedColumn)) { + // We already found and matched the single column, all keys now pass + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + if (Boolean.FALSE.equals(this.matchedColumn)) { + // We found all the columns, but did not match the expression, so skip to next row + return ReturnCode.NEXT_ROW; + } + // This is a key value we're not interested in (TODO: why INCLUDE here instead of NEXT_COL?) + ReturnCode code = inputTuple.resolveColumn(cell); + if (code != null) { + return code; + } + + // We found a new column, so we can re-evaluate + // TODO: if we have row key columns in our expression, should + // we always evaluate or just wait until the end? + this.matchedColumn = this.evaluate(inputTuple); + if (this.matchedColumn == null) { + if (inputTuple.isImmutable()) { + this.matchedColumn = Boolean.FALSE; + } else { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + } + return this.matchedColumn ? ReturnCode.INCLUDE_AND_NEXT_COL : ReturnCode.NEXT_ROW; + } + + @Override + public boolean filterRow() { + if ( + this.matchedColumn == null && !inputTuple.isImmutable() + && expression.requiresFinalEvaluation() + ) { + inputTuple.setImmutable(); + this.matchedColumn = this.evaluate(inputTuple); + } + + return !(Boolean.TRUE.equals(this.matchedColumn)); + } + + @Override + public void reset() { + matchedColumn = null; + inputTuple.reset(); + super.reset(); + } + + @Override + public boolean isFamilyEssential(byte[] name) { + // Typically only the column families involved in the expression are essential. + // The others are for columns projected in the select expression. However, depending + // on the expression (i.e. IS NULL), we may need to include the column family + // containing the empty key value or all column families in the case of a mapped + // view (where we don't have an empty key value). + return allCFs || Bytes.compareTo(name, essentialCF) == 0 || cfSet.contains(name); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + try { + allCFs = input.readBoolean(); + if (!allCFs) { + essentialCF = Bytes.readByteArray(input); + } + } catch (EOFException e) { // Ignore as this will occur when a 4.10 client is used + } + init(); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + try { + output.writeBoolean(allCFs); + if (!allCFs) { + Bytes.writeByteArray(output, essentialCF); + } + } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry + ClientUtil.throwIOException("MultiKeyValueComparisonFilter failed during writing", t); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/PagingFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/PagingFilter.java index eb9eb3b4261..4e7e20dcaa0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/PagingFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/PagingFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,252 +37,246 @@ /** * This is a top level Phoenix filter which is injected to a scan at the server side. If the scan * already has a filter then PagingFilter wraps it. This filter is for server pagination. It makes - * sure that the scan does not take more than pageSizeInMs. - * - * PagingRegionScanner initializes PagingFilter before retrieving a row. The state of PagingFilter - * consists of three variables startTime, isStopped, and currentCell. During this - * initialization, starTime is set to the current time, isStopped to false, and currentCell to null. - * - * PagingFilter implements the paging state machine in three filter methods that are - * hasFilterRow(), filterAllRemaining(), and filterRowKey(). These methods are called in the - * following order for each row: hasFilterRow(), filterAllRemaining(), filterRowKey(), and - * filterAllRemaining(). Please note that filterAllRemaining() is called twice (before and after - * filterRowKey()). Sometimes, filterAllRemaining() is called multiple times back to back. - * - * In hasFilterRow(), if currentCell is not null, meaning that at least one row has been - * scanned, and it is time to page out, then PagingFilter sets isStopped to true. - * - * In filterAllRemaining(), PagingFilter returns true if isStopped is true. Returning true from this - * method causes the HBase region scanner to signal the caller (that is PagingRegionScanner in this - * case) that there are no more rows to scan by returning false from the next() call. In that case, - * PagingRegionScanner checks if PagingFilter is stopped. If PagingFilter is stopped, then it means - * the last next() call paged out rather than the scan operation reached at its last row. - * Please note it is crucial that PagingFilter returns true in the first filterAllRemaining() call - * for a given row. This allows to the HBase region scanner to resume the scanning rows when the - * next() method is called even though the region scanner already signaled the caller that there - * were no more rows to scan. PagingRegionScanner leverages this behavior to resume the scan - * operation using the same scanner instead closing the current one and starting a new scanner. If - * this specific HBase region scanner behavior changes, it will cause server paging test failures. - * To fix them, the PagingRegionScanner code needs to change such that PagingRegionScanner needs to - * create a new scanner with adjusted start row to resume the scan operation after PagingFilter - * stops. - * - * If the scan operation has not been terminated by PageFilter, HBase subsequently calls - * filterRowKey(). In this method, PagingFilter records the last row that is scanned. - * + * sure that the scan does not take more than pageSizeInMs. PagingRegionScanner initializes + * PagingFilter before retrieving a row. The state of PagingFilter consists of three variables + * startTime, isStopped, and currentCell. During this initialization, starTime is set to the current + * time, isStopped to false, and currentCell to null. PagingFilter implements the paging state + * machine in three filter methods that are hasFilterRow(), filterAllRemaining(), and + * filterRowKey(). These methods are called in the following order for each row: hasFilterRow(), + * filterAllRemaining(), filterRowKey(), and filterAllRemaining(). Please note that + * filterAllRemaining() is called twice (before and after filterRowKey()). Sometimes, + * filterAllRemaining() is called multiple times back to back. In hasFilterRow(), if currentCell is + * not null, meaning that at least one row has been scanned, and it is time to page out, then + * PagingFilter sets isStopped to true. In filterAllRemaining(), PagingFilter returns true if + * isStopped is true. Returning true from this method causes the HBase region scanner to signal the + * caller (that is PagingRegionScanner in this case) that there are no more rows to scan by + * returning false from the next() call. In that case, PagingRegionScanner checks if PagingFilter is + * stopped. If PagingFilter is stopped, then it means the last next() call paged out rather than the + * scan operation reached at its last row. Please note it is crucial that PagingFilter returns true + * in the first filterAllRemaining() call for a given row. This allows to the HBase region scanner + * to resume the scanning rows when the next() method is called even though the region scanner + * already signaled the caller that there were no more rows to scan. PagingRegionScanner leverages + * this behavior to resume the scan operation using the same scanner instead closing the current one + * and starting a new scanner. If this specific HBase region scanner behavior changes, it will cause + * server paging test failures. To fix them, the PagingRegionScanner code needs to change such that + * PagingRegionScanner needs to create a new scanner with adjusted start row to resume the scan + * operation after PagingFilter stops. If the scan operation has not been terminated by PageFilter, + * HBase subsequently calls filterRowKey(). In this method, PagingFilter records the last row that + * is scanned. */ public class PagingFilter extends FilterBase implements Writable { - private long pageSizeMs; - private long startTime; - // tracks the row we last visited - private Cell currentCell; - private boolean isStopped; - private Filter delegate = null; + private long pageSizeMs; + private long startTime; + // tracks the row we last visited + private Cell currentCell; + private boolean isStopped; + private Filter delegate = null; - public PagingFilter() { - } + public PagingFilter() { + } - public PagingFilter(Filter delegate, long pageSizeMs) { - this.delegate = delegate; - this.pageSizeMs = pageSizeMs; - } + public PagingFilter(Filter delegate, long pageSizeMs) { + this.delegate = delegate; + this.pageSizeMs = pageSizeMs; + } - public Filter getDelegateFilter() { - return delegate; - } + public Filter getDelegateFilter() { + return delegate; + } - public void setDelegateFilter (Filter delegate) { - this.delegate = delegate; - } + public void setDelegateFilter(Filter delegate) { + this.delegate = delegate; + } - public byte[] getCurrentRowKeyToBeExcluded() { - byte[] rowKeyAtStop = null; - if (currentCell != null) { - rowKeyAtStop = CellUtil.cloneRow(currentCell); - } - return rowKeyAtStop; + public byte[] getCurrentRowKeyToBeExcluded() { + byte[] rowKeyAtStop = null; + if (currentCell != null) { + rowKeyAtStop = CellUtil.cloneRow(currentCell); } + return rowKeyAtStop; + } - public boolean isStopped() { - return isStopped; - } + public boolean isStopped() { + return isStopped; + } - public void init() { - isStopped = false; - currentCell = null; - startTime = EnvironmentEdgeManager.currentTimeMillis(); - } + public void init() { + isStopped = false; + currentCell = null; + startTime = EnvironmentEdgeManager.currentTimeMillis(); + } - @Override - public boolean hasFilterRow() { - if (currentCell != null - && EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { - isStopped = true; - } - return true; + @Override + public boolean hasFilterRow() { + if ( + currentCell != null && EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs + ) { + isStopped = true; } + return true; + } - @Override - public boolean filterAllRemaining() throws IOException { - if (isStopped) { - return true; - } - if (delegate != null) { - return delegate.filterAllRemaining(); - } - return super.filterAllRemaining(); + @Override + public boolean filterAllRemaining() throws IOException { + if (isStopped) { + return true; } - - @Override - public boolean filterRowKey(Cell cell) throws IOException { - currentCell = cell; - if (delegate != null) { - return delegate.filterRowKey(cell); - } - return super.filterRowKey(cell); + if (delegate != null) { + return delegate.filterAllRemaining(); } + return super.filterAllRemaining(); + } - @Override - public void reset() throws IOException { - if (delegate != null) { - delegate.reset(); - return; - } - super.reset(); + @Override + public boolean filterRowKey(Cell cell) throws IOException { + currentCell = cell; + if (delegate != null) { + return delegate.filterRowKey(cell); } + return super.filterRowKey(cell); + } - @Override - public Cell getNextCellHint(Cell currentKV) throws IOException { - if (delegate != null) { - return delegate.getNextCellHint(currentKV); - } - return super.getNextCellHint(currentKV); + @Override + public void reset() throws IOException { + if (delegate != null) { + delegate.reset(); + return; } + super.reset(); + } - @Override - public boolean filterRow() throws IOException { - if (delegate != null) { - return delegate.filterRow(); - } - return super.filterRow(); + @Override + public Cell getNextCellHint(Cell currentKV) throws IOException { + if (delegate != null) { + return delegate.getNextCellHint(currentKV); } + return super.getNextCellHint(currentKV); + } - @Override - public Cell transformCell(Cell v) throws IOException { - if (delegate != null) { - return delegate.transformCell(v); - } - return super.transformCell(v); + @Override + public boolean filterRow() throws IOException { + if (delegate != null) { + return delegate.filterRow(); } + return super.filterRow(); + } - @Override - public void filterRowCells(List kvs) throws IOException { - if (delegate != null) { - delegate.filterRowCells(kvs); - return; - } - super.filterRowCells(kvs); + @Override + public Cell transformCell(Cell v) throws IOException { + if (delegate != null) { + return delegate.transformCell(v); } + return super.transformCell(v); + } - @Override - public void setReversed(boolean reversed) { - if (delegate != null) { - delegate.setReversed(reversed); - } - super.setReversed(reversed); + @Override + public void filterRowCells(List kvs) throws IOException { + if (delegate != null) { + delegate.filterRowCells(kvs); + return; } + super.filterRowCells(kvs); + } - @Override - public boolean isReversed() { - if (delegate != null) { - return delegate.isReversed(); - } - return super.isReversed(); + @Override + public void setReversed(boolean reversed) { + if (delegate != null) { + delegate.setReversed(reversed); } + super.setReversed(reversed); + } - @Override - public boolean isFamilyEssential(byte[] name) throws IOException { - if (delegate != null) { - return delegate.isFamilyEssential(name); - } - return super.isFamilyEssential(name); + @Override + public boolean isReversed() { + if (delegate != null) { + return delegate.isReversed(); } + return super.isReversed(); + } - @Override - public ReturnCode filterKeyValue(Cell v) throws IOException { - if (delegate != null) { - return delegate.filterKeyValue(v); - } - return super.filterKeyValue(v); + @Override + public boolean isFamilyEssential(byte[] name) throws IOException { + if (delegate != null) { + return delegate.isFamilyEssential(name); } + return super.isFamilyEssential(name); + } - @Override - public Filter.ReturnCode filterCell(Cell c) throws IOException { - if (delegate != null) { - return delegate.filterCell(c); - } - return super.filterCell(c); + @Override + public ReturnCode filterKeyValue(Cell v) throws IOException { + if (delegate != null) { + return delegate.filterKeyValue(v); } + return super.filterKeyValue(v); + } - public static PagingFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (PagingFilter) Writables.getWritable(pbBytes, new PagingFilter()); - } catch (IOException e) { - throw new DeserializationException(e); - } + @Override + public Filter.ReturnCode filterCell(Cell c) throws IOException { + if (delegate != null) { + return delegate.filterCell(c); } + return super.filterCell(c); + } - @Override - public void write(DataOutput out) throws IOException { - out.writeLong(pageSizeMs); - if (delegate != null) { - out.writeUTF(delegate.getClass().getName()); - byte[] b = delegate.toByteArray(); - out.writeInt(b.length); - out.write(b); - } else { - out.writeUTF(""); - } + public static PagingFilter parseFrom(final byte[] pbBytes) throws DeserializationException { + try { + return (PagingFilter) Writables.getWritable(pbBytes, new PagingFilter()); + } catch (IOException e) { + throw new DeserializationException(e); } + } - @Override - public void readFields(DataInput in) throws IOException { - pageSizeMs = in.readLong(); - String className = in.readUTF(); - if (className.length() == 0) { - return; - } - Class cls = null; - try { - cls = Class.forName(className); - } catch (ClassNotFoundException e) { - e.printStackTrace(); - throw new DoNotRetryIOException(e); - } + @Override + public void write(DataOutput out) throws IOException { + out.writeLong(pageSizeMs); + if (delegate != null) { + out.writeUTF(delegate.getClass().getName()); + byte[] b = delegate.toByteArray(); + out.writeInt(b.length); + out.write(b); + } else { + out.writeUTF(""); + } + } - Method m = null; - try { - m = cls.getDeclaredMethod("parseFrom", byte[].class); - } catch (NoSuchMethodException e) { - e.printStackTrace(); - throw new DoNotRetryIOException(e); - } - int length = in.readInt(); - byte[] b = new byte[length]; - in.readFully(b); - try { - delegate = (Filter) m.invoke(null, b); - } catch (IllegalAccessException e) { - e.printStackTrace(); - throw new DoNotRetryIOException(e); - } catch (InvocationTargetException e) { - e.printStackTrace(); - throw new DoNotRetryIOException(e); - } + @Override + public void readFields(DataInput in) throws IOException { + pageSizeMs = in.readLong(); + String className = in.readUTF(); + if (className.length() == 0) { + return; + } + Class cls = null; + try { + cls = Class.forName(className); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + throw new DoNotRetryIOException(e); } - @Override - public byte[] toByteArray() throws IOException { - return Writables.getBytes(this); + Method m = null; + try { + m = cls.getDeclaredMethod("parseFrom", byte[].class); + } catch (NoSuchMethodException e) { + e.printStackTrace(); + throw new DoNotRetryIOException(e); } + int length = in.readInt(); + byte[] b = new byte[length]; + in.readFully(b); + try { + delegate = (Filter) m.invoke(null, b); + } catch (IllegalAccessException e) { + e.printStackTrace(); + throw new DoNotRetryIOException(e); + } catch (InvocationTargetException e) { + e.printStackTrace(); + throw new DoNotRetryIOException(e); + } + } + + @Override + public byte[] toByteArray() throws IOException { + return Writables.getBytes(this); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java index 6982564c11f..b22cbdb1163 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,140 +32,137 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * * Filter for use when expressions only reference row key columns - * */ public class RowKeyComparisonFilter extends BooleanExpressionFilter { - private static final Logger LOGGER = LoggerFactory.getLogger(RowKeyComparisonFilter.class); - - private boolean evaluate = true; - private boolean keepRow = false; - private final RowKeyTuple inputTuple = new RowKeyTuple(); - private byte[] essentialCF; - - public RowKeyComparisonFilter() { + private static final Logger LOGGER = LoggerFactory.getLogger(RowKeyComparisonFilter.class); + + private boolean evaluate = true; + private boolean keepRow = false; + private final RowKeyTuple inputTuple = new RowKeyTuple(); + private byte[] essentialCF; + + public RowKeyComparisonFilter() { + } + + public RowKeyComparisonFilter(Expression expression, byte[] essentialCF) { + super(expression); + this.essentialCF = essentialCF; + } + + @Override + public void reset() { + this.keepRow = false; + this.evaluate = true; + super.reset(); + } + + /** + * Evaluate in filterKeyValue instead of filterRowKey, because HBASE-6562 causes filterRowKey to + * be called with deleted or partial row keys. + */ + @Override + public ReturnCode filterKeyValue(Cell v) { + return filterCell(v); + } + + /** + * Evaluate in filterKeyValue instead of filterRowKey, because HBASE-6562 causes filterRowKey to + * be called with deleted or partial row keys. + */ + @Override + public ReturnCode filterCell(Cell v) { + if (evaluate) { + inputTuple.setKey(v.getRowArray(), v.getRowOffset(), v.getRowLength()); + this.keepRow = Boolean.TRUE.equals(evaluate(inputTuple)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace( + "RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER") + " row " + inputTuple); + } + evaluate = false; } - - public RowKeyComparisonFilter(Expression expression, byte[] essentialCF) { - super(expression); - this.essentialCF = essentialCF; + return keepRow ? ReturnCode.INCLUDE_AND_NEXT_COL : ReturnCode.NEXT_ROW; + } + + public static final class RowKeyTuple extends BaseTuple { + private byte[] buf; + private int offset; + private int length; + + public void setKey(byte[] buf, int offset, int length) { + this.buf = buf; + this.offset = offset; + this.length = length; } @Override - public void reset() { - this.keepRow = false; - this.evaluate = true; - super.reset(); + public void getKey(ImmutableBytesWritable ptr) { + ptr.set(buf, offset, length); } - /** - * Evaluate in filterKeyValue instead of filterRowKey, because HBASE-6562 causes filterRowKey - * to be called with deleted or partial row keys. - */ @Override - public ReturnCode filterKeyValue(Cell v) { - return filterCell(v); + public Cell getValue(byte[] cf, byte[] cq) { + return null; } - /** - * Evaluate in filterKeyValue instead of filterRowKey, because HBASE-6562 causes filterRowKey - * to be called with deleted or partial row keys. - */ @Override - public ReturnCode filterCell(Cell v) { - if (evaluate) { - inputTuple.setKey(v.getRowArray(), v.getRowOffset(), v.getRowLength()); - this.keepRow = Boolean.TRUE.equals(evaluate(inputTuple)); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER") - + " row " + inputTuple); - } - evaluate = false; - } - return keepRow ? ReturnCode.INCLUDE_AND_NEXT_COL : ReturnCode.NEXT_ROW; - } - - public static final class RowKeyTuple extends BaseTuple { - private byte[] buf; - private int offset; - private int length; - - public void setKey(byte[] buf, int offset, int length) { - this.buf = buf; - this.offset = offset; - this.length = length; - } - - @Override - public void getKey(ImmutableBytesWritable ptr) { - ptr.set(buf, offset, length); - } - - @Override - public Cell getValue(byte[] cf, byte[] cq) { - return null; - } - - @Override - public boolean isImmutable() { - return true; - } - - @Override - public String toString() { - return Bytes.toStringBinary(buf, offset, length); - } - - @Override - public int size() { - return 0; - } - - @Override - public Cell getValue(int index) { - throw new IndexOutOfBoundsException(Integer.toString(index)); - } - - @Override - public boolean getValue(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - return false; - } + public boolean isImmutable() { + return true; } @Override - public boolean filterRow() { - return !this.keepRow; + public String toString() { + return Bytes.toStringBinary(buf, offset, length); } @Override - public boolean isFamilyEssential(byte[] name) { - // We only need our "guaranteed to have a key value" column family, - // which we pass in and serialize through. In the case of a VIEW where - // we don't have this, we have to say that all families are essential. - return this.essentialCF.length == 0 ? true : Bytes.compareTo(this.essentialCF, name) == 0; + public int size() { + return 0; } @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - this.essentialCF = WritableUtils.readCompressedByteArray(input); + public Cell getValue(int index) { + throw new IndexOutOfBoundsException(Integer.toString(index)); } @Override - public void write(DataOutput output) throws IOException { - super.write(output); - WritableUtils.writeCompressedByteArray(output, this.essentialCF); + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + return false; } - - public static RowKeyComparisonFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (RowKeyComparisonFilter)Writables.getWritable(pbBytes, new RowKeyComparisonFilter()); - } catch (IOException e) { - throw new DeserializationException(e); - } + } + + @Override + public boolean filterRow() { + return !this.keepRow; + } + + @Override + public boolean isFamilyEssential(byte[] name) { + // We only need our "guaranteed to have a key value" column family, + // which we pass in and serialize through. In the case of a VIEW where + // we don't have this, we have to say that all families are essential. + return this.essentialCF.length == 0 ? true : Bytes.compareTo(this.essentialCF, name) == 0; + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + this.essentialCF = WritableUtils.readCompressedByteArray(input); + } + + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + WritableUtils.writeCompressedByteArray(output, this.essentialCF); + } + + public static RowKeyComparisonFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + return (RowKeyComparisonFilter) Writables.getWritable(pbBytes, new RowKeyComparisonFilter()); + } catch (IOException e) { + throw new DeserializationException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleCFCQKeyValueComparisonFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleCFCQKeyValueComparisonFilter.java index c63673cb1ef..59a723b1658 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleCFCQKeyValueComparisonFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleCFCQKeyValueComparisonFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,34 +24,34 @@ import org.apache.hadoop.hbase.util.Writables; import org.apache.phoenix.expression.Expression; - /** - * - * SingleKeyValueComparisonFilter that needs to compare both the column family and - * column qualifier parts of the key value to disambiguate with another similarly - * named column qualifier in a different column family. - * + * SingleKeyValueComparisonFilter that needs to compare both the column family and column qualifier + * parts of the key value to disambiguate with another similarly named column qualifier in a + * different column family. */ public class SingleCFCQKeyValueComparisonFilter extends SingleKeyValueComparisonFilter { - public SingleCFCQKeyValueComparisonFilter() { - } + public SingleCFCQKeyValueComparisonFilter() { + } - public SingleCFCQKeyValueComparisonFilter(Expression expression) { - super(expression); - } + public SingleCFCQKeyValueComparisonFilter(Expression expression) { + super(expression); + } - @Override - protected final int compare(byte[] cfBuf, int cfOffset, int cfLength, byte[] cqBuf, int cqOffset, int cqLength) { - int c = Bytes.compareTo(cf, 0, cf.length, cfBuf, cfOffset, cfLength); - if (c != 0) return c; - return Bytes.compareTo(cq, 0, cq.length, cqBuf, cqOffset, cqLength); - } - - public static SingleCFCQKeyValueComparisonFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (SingleCFCQKeyValueComparisonFilter)Writables.getWritable(pbBytes, new SingleCFCQKeyValueComparisonFilter()); - } catch (IOException e) { - throw new DeserializationException(e); - } + @Override + protected final int compare(byte[] cfBuf, int cfOffset, int cfLength, byte[] cqBuf, int cqOffset, + int cqLength) { + int c = Bytes.compareTo(cf, 0, cf.length, cfBuf, cfOffset, cfLength); + if (c != 0) return c; + return Bytes.compareTo(cq, 0, cq.length, cqBuf, cqOffset, cqLength); + } + + public static SingleCFCQKeyValueComparisonFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + return (SingleCFCQKeyValueComparisonFilter) Writables.getWritable(pbBytes, + new SingleCFCQKeyValueComparisonFilter()); + } catch (IOException e) { + throw new DeserializationException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleCQKeyValueComparisonFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleCQKeyValueComparisonFilter.java index 195c89c1823..0932675e56d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleCQKeyValueComparisonFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleCQKeyValueComparisonFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,33 +24,32 @@ import org.apache.hadoop.hbase.util.Writables; import org.apache.phoenix.expression.Expression; - /** - * - * SingleKeyValueComparisonFilter that needs to only compare the column qualifier - * part of the key value since the column qualifier is unique across all column - * families. - * + * SingleKeyValueComparisonFilter that needs to only compare the column qualifier part of the key + * value since the column qualifier is unique across all column families. */ public class SingleCQKeyValueComparisonFilter extends SingleKeyValueComparisonFilter { - public SingleCQKeyValueComparisonFilter() { - } + public SingleCQKeyValueComparisonFilter() { + } - public SingleCQKeyValueComparisonFilter(Expression expression) { - super(expression); - } + public SingleCQKeyValueComparisonFilter(Expression expression) { + super(expression); + } - @Override - protected final int compare(byte[] cfBuf, int cfOffset, int cfLength, byte[] cqBuf, int cqOffset, int cqLength) { - return Bytes.compareTo(cq, 0, cq.length, cqBuf, cqOffset, cqLength); - } + @Override + protected final int compare(byte[] cfBuf, int cfOffset, int cfLength, byte[] cqBuf, int cqOffset, + int cqLength) { + return Bytes.compareTo(cq, 0, cq.length, cqBuf, cqOffset, cqLength); + } - public static SingleCQKeyValueComparisonFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - SingleCQKeyValueComparisonFilter writable = (SingleCQKeyValueComparisonFilter)Writables.getWritable(pbBytes, new SingleCQKeyValueComparisonFilter()); - return writable; - } catch (IOException e) { - throw new DeserializationException(e); - } + public static SingleCQKeyValueComparisonFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + SingleCQKeyValueComparisonFilter writable = (SingleCQKeyValueComparisonFilter) Writables + .getWritable(pbBytes, new SingleCQKeyValueComparisonFilter()); + return writable; + } catch (IOException e) { + throw new DeserializationException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleKeyValueComparisonFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleKeyValueComparisonFilter.java index a55ed3784dc..da7722b80ef 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleKeyValueComparisonFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SingleKeyValueComparisonFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,127 +22,125 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.expression.SingleCellColumnExpression; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.KeyValueColumnExpression; import org.apache.phoenix.expression.visitor.StatelessTraverseAllExpressionVisitor; import org.apache.phoenix.expression.visitor.TraverseAllExpressionVisitor; import org.apache.phoenix.schema.tuple.SingleKeyValueTuple; -import org.apache.phoenix.schema.tuple.Tuple; - - /** - * - * Modeled after {@link org.apache.hadoop.hbase.filter.SingleColumnValueFilter}, - * but for general expression evaluation in the case where only a single KeyValue - * column is referenced in the expression. - * + * Modeled after {@link org.apache.hadoop.hbase.filter.SingleColumnValueFilter}, but for general + * expression evaluation in the case where only a single KeyValue column is referenced in the + * expression. */ public abstract class SingleKeyValueComparisonFilter extends BooleanExpressionFilter { - private final SingleKeyValueTuple inputTuple = new SingleKeyValueTuple(); - private boolean matchedColumn; - protected byte[] cf; - protected byte[] cq; - - public SingleKeyValueComparisonFilter() { + private final SingleKeyValueTuple inputTuple = new SingleKeyValueTuple(); + private boolean matchedColumn; + protected byte[] cf; + protected byte[] cq; + + public SingleKeyValueComparisonFilter() { + } + + public SingleKeyValueComparisonFilter(Expression expression) { + super(expression); + init(); + } + + protected abstract int compare(byte[] cfBuf, int cfOffset, int cfLength, byte[] cqBuf, + int cqOffset, int cqLength); + + private void init() { + TraverseAllExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { + @Override + public Void visit(KeyValueColumnExpression expression) { + cf = expression.getColumnFamily(); + cq = expression.getColumnQualifier(); + return null; + } + }; + expression.accept(visitor); + } + + private boolean foundColumn() { + return inputTuple.size() > 0; + } + + @Override + public ReturnCode filterKeyValue(Cell keyValue) { + return filterCell(keyValue); + } + + @Override + public ReturnCode filterCell(Cell keyValue) { + if (this.matchedColumn) { + // We already found and matched the single column, all keys now pass + return ReturnCode.INCLUDE_AND_NEXT_COL; } - - public SingleKeyValueComparisonFilter(Expression expression) { - super(expression); - init(); + if (this.foundColumn()) { + // We found all the columns, but did not match the expression, so skip to next row + return ReturnCode.NEXT_ROW; } - - protected abstract int compare(byte[] cfBuf, int cfOffset, int cfLength, byte[] cqBuf, int cqOffset, int cqLength); - - private void init() { - TraverseAllExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { - @Override - public Void visit(KeyValueColumnExpression expression) { - cf = expression.getColumnFamily(); - cq = expression.getColumnQualifier(); - return null; - } - }; - expression.accept(visitor); + if ( + compare(keyValue.getFamilyArray(), keyValue.getFamilyOffset(), keyValue.getFamilyLength(), + keyValue.getQualifierArray(), keyValue.getQualifierOffset(), keyValue.getQualifierLength()) + != 0 + ) { + // Remember the key in case this is the only key value we see. + // We'll need it if we have row key columns too. + inputTuple.setKey(keyValue); + // This is a key value we're not interested in + return ReturnCode.INCLUDE_AND_NEXT_COL; } + inputTuple.setCell(keyValue); - private boolean foundColumn() { - return inputTuple.size() > 0; + // We have the columns, so evaluate here + if (!Boolean.TRUE.equals(evaluate(inputTuple))) { + return ReturnCode.NEXT_ROW; } - - @Override - public ReturnCode filterKeyValue(Cell keyValue) { - return filterCell(keyValue); + this.matchedColumn = true; + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + + @Override + public boolean filterRow() { + // If column was found, return false if it was matched, true if it was not. + if (foundColumn()) { + return !this.matchedColumn; } - - @Override - public ReturnCode filterCell(Cell keyValue) { - if (this.matchedColumn) { - // We already found and matched the single column, all keys now pass - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - if (this.foundColumn()) { - // We found all the columns, but did not match the expression, so skip to next row - return ReturnCode.NEXT_ROW; - } - if (compare(keyValue.getFamilyArray(), keyValue.getFamilyOffset(), keyValue.getFamilyLength(), - keyValue.getQualifierArray(), keyValue.getQualifierOffset(), keyValue.getQualifierLength()) != 0) { - // Remember the key in case this is the only key value we see. - // We'll need it if we have row key columns too. - inputTuple.setKey(keyValue); - // This is a key value we're not interested in - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - inputTuple.setCell(keyValue); - - // We have the columns, so evaluate here - if (!Boolean.TRUE.equals(evaluate(inputTuple))) { - return ReturnCode.NEXT_ROW; - } - this.matchedColumn = true; - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - - @Override - public boolean filterRow() { - // If column was found, return false if it was matched, true if it was not. - if (foundColumn()) { - return !this.matchedColumn; - } - // If column was not found, evaluate the expression here upon completion. - // This is required with certain expressions, for example, with IS NULL - // expressions where they'll evaluate to TRUE when the column being - // tested wasn't found. - // Since the filter is called also to position the scan initially, we have - // to guard against this by checking whether or not we've filtered in - // the key value (i.e. filterKeyValue was called and we found the keyValue - // for which we're looking). - if (inputTuple.hasKey() && expression.requiresFinalEvaluation()) { - return !Boolean.TRUE.equals(evaluate(inputTuple)); - } - // Finally, if we have no values, and we're not required to re-evaluate it - // just filter the row - return true; - } - - @Override - public void reset() { - inputTuple.reset(); - matchedColumn = false; - super.reset(); - } - - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - init(); - } - - @Override - public boolean isFamilyEssential(byte[] name) { - // Only the column families involved in the expression are essential. - // The others are for columns projected in the select expression - return Bytes.compareTo(cf, name) == 0; + // If column was not found, evaluate the expression here upon completion. + // This is required with certain expressions, for example, with IS NULL + // expressions where they'll evaluate to TRUE when the column being + // tested wasn't found. + // Since the filter is called also to position the scan initially, we have + // to guard against this by checking whether or not we've filtered in + // the key value (i.e. filterKeyValue was called and we found the keyValue + // for which we're looking). + if (inputTuple.hasKey() && expression.requiresFinalEvaluation()) { + return !Boolean.TRUE.equals(evaluate(inputTuple)); } + // Finally, if we have no values, and we're not required to re-evaluate it + // just filter the row + return true; + } + + @Override + public void reset() { + inputTuple.reset(); + matchedColumn = false; + super.reset(); + } + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + init(); + } + + @Override + public boolean isFamilyEssential(byte[] name) { + // Only the column families involved in the expression are essential. + // The others are for columns projected in the select expression + return Bytes.compareTo(cf, name) == 0; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java index b0d75c20eb5..cec2bab7cc9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,683 +40,722 @@ import org.apache.phoenix.query.KeyRange.Bound; import org.apache.phoenix.schema.RowKeySchema; import org.apache.phoenix.schema.ValueSchema.Field; -import org.apache.phoenix.schema.types.PVarbinaryEncoded; -import org.apache.phoenix.util.ByteUtil; -import org.apache.phoenix.util.ScanUtil; -import org.apache.phoenix.util.ScanUtil.BytesComparator; -import org.apache.phoenix.util.SchemaUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Objects; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.hash.HashFunction; import org.apache.phoenix.thirdparty.com.google.common.hash.Hasher; import org.apache.phoenix.thirdparty.com.google.common.hash.Hashing; - +import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.util.ScanUtil; +import org.apache.phoenix.util.ScanUtil.BytesComparator; +import org.apache.phoenix.util.SchemaUtil; /** - * - * Filter that seeks based on CNF containing anded and ored key ranges - * - * TODO: figure out when to reset/not reset position array - * - * + * Filter that seeks based on CNF containing anded and ored key ranges TODO: figure out when to + * reset/not reset position array * @since 0.1 */ public class SkipScanFilter extends FilterBase implements Writable { - private enum Terminate {AT, AFTER}; - // Conjunctive normal form of or-ed ranges or point lookups - private List> slots; - // How far each slot spans minus one. We only handle a single column span currently - private int[] slotSpan; - // schema of the row key - private RowKeySchema schema; - private boolean includeMultipleVersions; - // current position for each slot - private int[] position; - // buffer used for skip hint - private int maxKeyLength; - private byte[] startKey; - private int startKeyLength; - private byte[] endKey; - private int endKeyLength; - private boolean isDone; - private int offset; - private boolean isMultiKeyPointLookup; - private Map nextCellHintMap = - new HashMap(); - - private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - /** - * We know that initially the first row will be positioned at or - * after the first possible key. - */ - public SkipScanFilter() { - } - - public SkipScanFilter(SkipScanFilter filter, boolean includeMultipleVersions) { - this(filter.slots, filter.slotSpan, filter.schema, includeMultipleVersions, - filter.isMultiKeyPointLookup); - } - - public SkipScanFilter(SkipScanFilter filter, boolean includeMultipleVersions, - boolean isMultiKeyPointLookup) { - this(filter.slots, filter.slotSpan, filter.schema, includeMultipleVersions, - isMultiKeyPointLookup); - } - - public SkipScanFilter(List> slots, RowKeySchema schema) { - this(slots, ScanUtil.getDefaultSlotSpans(slots.size()), schema, false); - } - - public SkipScanFilter(List> slots, RowKeySchema schema, boolean isMultiKeyPointLookup) { - this(slots, ScanUtil.getDefaultSlotSpans(slots.size()), schema, isMultiKeyPointLookup); - } - - public SkipScanFilter(List> slots, int[] slotSpan, RowKeySchema schema, - boolean isMultiKeyPointLookup) { - this(slots, slotSpan, schema, false, isMultiKeyPointLookup); - } - - private SkipScanFilter(List> slots, int[] slotSpan, RowKeySchema schema, - boolean includeMultipleVersions, boolean isMultiKeyPointLookup) { - init(slots, slotSpan, schema, includeMultipleVersions, isMultiKeyPointLookup); - } - - public void setOffset(int offset) { - this.offset = offset; - } - public int getOffset() { - return offset; - } - public boolean isMultiKeyPointLookup() { - return isMultiKeyPointLookup; - } - - public List getPointLookupKeyRanges() { - return isMultiKeyPointLookup ? slots.get(0) : Collections.emptyList(); - } - - private void init(List> slots, int[] slotSpan, RowKeySchema schema, - boolean includeMultipleVersions, boolean isPointLookup) { - for (List ranges : slots) { - if (ranges.isEmpty()) { - throw new IllegalStateException(); - } + private enum Terminate { + AT, + AFTER + }; + + // Conjunctive normal form of or-ed ranges or point lookups + private List> slots; + // How far each slot spans minus one. We only handle a single column span currently + private int[] slotSpan; + // schema of the row key + private RowKeySchema schema; + private boolean includeMultipleVersions; + // current position for each slot + private int[] position; + // buffer used for skip hint + private int maxKeyLength; + private byte[] startKey; + private int startKeyLength; + private byte[] endKey; + private int endKeyLength; + private boolean isDone; + private int offset; + private boolean isMultiKeyPointLookup; + private Map nextCellHintMap = + new HashMap(); + + private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + + /** + * We know that initially the first row will be positioned at or after the first possible key. + */ + public SkipScanFilter() { + } + + public SkipScanFilter(SkipScanFilter filter, boolean includeMultipleVersions) { + this(filter.slots, filter.slotSpan, filter.schema, includeMultipleVersions, + filter.isMultiKeyPointLookup); + } + + public SkipScanFilter(SkipScanFilter filter, boolean includeMultipleVersions, + boolean isMultiKeyPointLookup) { + this(filter.slots, filter.slotSpan, filter.schema, includeMultipleVersions, + isMultiKeyPointLookup); + } + + public SkipScanFilter(List> slots, RowKeySchema schema) { + this(slots, ScanUtil.getDefaultSlotSpans(slots.size()), schema, false); + } + + public SkipScanFilter(List> slots, RowKeySchema schema, + boolean isMultiKeyPointLookup) { + this(slots, ScanUtil.getDefaultSlotSpans(slots.size()), schema, isMultiKeyPointLookup); + } + + public SkipScanFilter(List> slots, int[] slotSpan, RowKeySchema schema, + boolean isMultiKeyPointLookup) { + this(slots, slotSpan, schema, false, isMultiKeyPointLookup); + } + + private SkipScanFilter(List> slots, int[] slotSpan, RowKeySchema schema, + boolean includeMultipleVersions, boolean isMultiKeyPointLookup) { + init(slots, slotSpan, schema, includeMultipleVersions, isMultiKeyPointLookup); + } + + public void setOffset(int offset) { + this.offset = offset; + } + + public int getOffset() { + return offset; + } + + public boolean isMultiKeyPointLookup() { + return isMultiKeyPointLookup; + } + + public List getPointLookupKeyRanges() { + return isMultiKeyPointLookup ? slots.get(0) : Collections.emptyList(); + } + + private void init(List> slots, int[] slotSpan, RowKeySchema schema, + boolean includeMultipleVersions, boolean isPointLookup) { + for (List ranges : slots) { + if (ranges.isEmpty()) { + throw new IllegalStateException(); + } + } + this.slots = slots; + this.slotSpan = slotSpan; + this.schema = schema; + this.maxKeyLength = SchemaUtil.getMaxKeyLength(schema, slots); + this.position = new int[slots.size()]; + this.startKey = new byte[maxKeyLength]; + this.endKey = new byte[maxKeyLength]; + this.endKeyLength = 0; + this.includeMultipleVersions = includeMultipleVersions; + this.isMultiKeyPointLookup = isPointLookup; + } + + // Exposed for testing. + public List> getSlots() { + return slots; + } + + @Override + public boolean filterAllRemaining() { + return isDone; + } + + @Override + public ReturnCode filterKeyValue(Cell kv) { + return filterCell(kv); + } + + @Override + public ReturnCode filterCell(Cell kv) { + ReturnCode code = navigate(kv.getRowArray(), kv.getRowOffset() + offset, + kv.getRowLength() - offset, Terminate.AFTER); + if (code == ReturnCode.SEEK_NEXT_USING_HINT) { + setNextCellHint(kv); + } + return code; + } + + private void setNextCellHint(Cell kv) { + ImmutableBytesWritable family = + new ImmutableBytesWritable(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()); + Cell nextCellHint = null; + if (offset == 0) { + nextCellHint = new KeyValue(startKey, 0, startKeyLength, null, 0, 0, null, 0, 0, + HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0); + } else { // Prepend key of NextCellHint with bytes before offset + byte[] nextKey = new byte[offset + startKeyLength]; + System.arraycopy(kv.getRowArray(), kv.getRowOffset(), nextKey, 0, offset); + System.arraycopy(startKey, 0, nextKey, offset, startKeyLength); + nextCellHint = new KeyValue(nextKey, 0, nextKey.length, null, 0, 0, null, 0, 0, + HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0); + } + Cell previousCellHint = nextCellHintMap.put(family, nextCellHint); + // we should either have no previous hint, or the next hint should always come after the + // previous hint + boolean isHintAfterPrevious = + previousCellHint == null || Bytes.compareTo(nextCellHint.getRowArray(), + nextCellHint.getRowOffset(), nextCellHint.getRowLength(), previousCellHint.getRowArray(), + previousCellHint.getRowOffset(), previousCellHint.getRowLength()) > 0; + if (!isHintAfterPrevious) { + String msg = "The next hint must come after previous hint (prev=" + previousCellHint + + ", next=" + nextCellHint + ", kv=" + kv + ")"; + throw new IllegalStateException(msg); + } + } + + @Override + public Cell getNextCellHint(Cell kv) { + return isDone + ? null + : nextCellHintMap.get(new ImmutableBytesWritable(kv.getFamilyArray(), kv.getFamilyOffset(), + kv.getFamilyLength())); + } + + public boolean hasIntersect(byte[] lowerInclusiveKey, byte[] upperExclusiveKey) { + return intersect(lowerInclusiveKey, upperExclusiveKey, null); + } + + /** + * Intersect the ranges of this filter with the ranges form by lowerInclusive and upperInclusive + * key and filter out the ones that are not included in the region. Return the new intersected + * SkipScanFilter or null if there is no intersection. + */ + public SkipScanFilter intersect(byte[] lowerInclusiveKey, byte[] upperExclusiveKey) { + List> newSlots = Lists.newArrayListWithCapacity(slots.size()); + if (intersect(lowerInclusiveKey, upperExclusiveKey, newSlots)) { + return new SkipScanFilter(newSlots, slotSpan, schema, isMultiKeyPointLookup); + } + return null; + } + + private boolean areSlotsSingleKey(int startPosInclusive, int endPosExclusive) { + for (int i = startPosInclusive; i < endPosExclusive; i++) { + if (!slots.get(i).get(position[i]).isSingleKey()) { + return false; + } + } + return true; + } + + private void resetState() { + isDone = false; + endKeyLength = 0; + Arrays.fill(position, 0); + } + + private boolean intersect(final byte[] lowerInclusiveKey, final byte[] upperExclusiveKey, + List> newSlots) { + resetState(); + boolean lowerUnbound = (lowerInclusiveKey.length == 0); + int startPos = 0; + int lastSlot = slots.size() - 1; + if (!lowerUnbound) { + // Find the position of the first slot of the lower range + schema.next(ptr, 0, schema.iterator(lowerInclusiveKey, ptr), slotSpan[0]); + startPos = ScanUtil.searchClosestKeyRangeWithUpperHigherThanPtr(slots.get(0), ptr, 0, + schema.getField(0)); + // Lower range is past last upper range of first slot, so cannot possibly be in range + if (startPos >= slots.get(0).size()) { + return false; + } + } + boolean upperUnbound = (upperExclusiveKey.length == 0); + int endPos = slots.get(0).size() - 1; + if (!upperUnbound) { + // Find the position of the first slot of the upper range + schema.next(ptr, 0, schema.iterator(upperExclusiveKey, ptr), slotSpan[0]); + endPos = ScanUtil.searchClosestKeyRangeWithUpperHigherThanPtr(slots.get(0), ptr, startPos, + schema.getField(0)); + // Upper range lower than first lower range of first slot, so cannot possibly be in range + // if (endPos == 0 && Bytes.compareTo(upperExclusiveKey, slots.get(0).get(0).getLowerRange()) + // <= 0) { + // return false; + // } + // Past last position, so we can include everything from the start position + if (endPos >= slots.get(0).size()) { + upperUnbound = true; + endPos = slots.get(0).size() - 1; + } else if ( + slots.get(0).get(endPos).compareLowerToUpperBound(upperExclusiveKey, + ScanUtil.getComparator(schema.getField(0))) >= 0 + ) { + // We know that the endPos range is higher than the previous range, but we need + // to test if it ends before the next range starts. + endPos--; + } + if (endPos < startPos) { + return false; + } + + } + // Short circuit out if we only have a single set of keys + if (slots.size() == 1) { + if (newSlots != null) { + List newRanges = slots.get(0).subList(startPos, endPos + 1); + newSlots.add(newRanges); + } + return true; + } + if (!lowerUnbound) { + position[0] = startPos; + navigate(lowerInclusiveKey, 0, lowerInclusiveKey.length, Terminate.AFTER); + if (filterAllRemaining()) { + return false; + } + } + if (upperUnbound) { + if (newSlots != null) { + newSlots.add(slots.get(0).subList(startPos, endPos + 1)); + newSlots.addAll(slots.subList(1, slots.size())); + } + return true; + } + int[] lowerPosition = Arrays.copyOf(position, position.length); + // Navigate to the upperExclusiveKey, but not past it + // TODO: We're including everything between the lowerPosition and end position, which is + // more than we need. We can optimize this by tracking whether each range in each slot position + // intersects. + ReturnCode endCode = navigate(upperExclusiveKey, 0, upperExclusiveKey.length, Terminate.AT); + if (endCode == ReturnCode.INCLUDE || endCode == ReturnCode.INCLUDE_AND_NEXT_COL) { + setStartKey(); + // If the upperExclusiveKey is equal to the start key, we've gone one position too far, since + // our upper key is exclusive. In that case, go to the previous key + if ( + Bytes.compareTo(startKey, 0, startKeyLength, upperExclusiveKey, 0, upperExclusiveKey.length) + == 0 + && (previousPosition(lastSlot) < 0 || position[0] < lowerPosition[0]) + ) { + // If by backing up one position we have an empty range, then return + return false; + } + } else if (endCode == ReturnCode.SEEK_NEXT_USING_HINT) { + // The upperExclusive key is smaller than the slots stored in the position. Check if it's the + // same position + // as the slots for lowerInclusive. If so, there is no intersection. + if (Arrays.equals(lowerPosition, position) && areSlotsSingleKey(0, position.length - 1)) { + return false; + } + } else if (filterAllRemaining()) { + // We wrapped around the position array. We know there's an intersection, but it can only at + // the last + // slot position. So reset the position array here to the last position index for each slot. + // This will + // be used below as the end bounds to formulate the list of intersecting slots. + for (int i = 0; i <= lastSlot; i++) { + position[i] = slots.get(i).size() - 1; + } + } + int prevRowKeyPos = -1; + ImmutableBytesWritable lowerPtr = new ImmutableBytesWritable(); + ImmutableBytesWritable upperPtr = new ImmutableBytesWritable(); + schema.iterator(lowerInclusiveKey, lowerPtr); + schema.iterator(upperExclusiveKey, upperPtr); + // Copy inclusive all positions + for (int i = 0; i <= lastSlot; i++) { + List newRanges = + slots.get(i).subList(lowerPosition[i], Math.min(position[i] + 1, slots.get(i).size())); + if (newRanges.isEmpty()) { + return false; + } + if (newSlots != null) { + newSlots.add(newRanges); + } + // Must include all "less-significant" slot values if: + // 1) a more-significant slot was incremented + if (position[i] > lowerPosition[i]) { + if (newSlots != null) { + newSlots.addAll(slots.subList(i + 1, slots.size())); } - this.slots = slots; - this.slotSpan = slotSpan; - this.schema = schema; - this.maxKeyLength = SchemaUtil.getMaxKeyLength(schema, slots); - this.position = new int[slots.size()]; - this.startKey = new byte[maxKeyLength]; - this.endKey = new byte[maxKeyLength]; - this.endKeyLength = 0; - this.includeMultipleVersions = includeMultipleVersions; - this.isMultiKeyPointLookup = isPointLookup; - } - - // Exposed for testing. - public List> getSlots() { - return slots; - } - - @Override - public boolean filterAllRemaining() { - return isDone; - } - - @Override - public ReturnCode filterKeyValue(Cell kv) { - return filterCell(kv); - } - - @Override - public ReturnCode filterCell(Cell kv) { - ReturnCode code = navigate(kv.getRowArray(), kv.getRowOffset() + offset,kv.getRowLength()- offset,Terminate.AFTER); - if (code == ReturnCode.SEEK_NEXT_USING_HINT) { - setNextCellHint(kv); + break; + } + // 2) we're at a slot containing a range and the values differ between the lower and upper + // range, + // since less-significant slots may be lower after traversal than where they started. + if (!slots.get(i).get(position[i]).isSingleKey()) { + int rowKeyPos = ScanUtil.getRowKeyPosition(slotSpan, i); + // Position lowerPtr/upperPtr within lowerInclusiveKey/upperExclusiveKey at value for slot i + // The reposition method will do this incrementally, where we we're initially have + // prevRowKeyPos = -1. + schema.reposition(lowerPtr, prevRowKeyPos, rowKeyPos, 0, lowerInclusiveKey.length, + slotSpan[i]); + schema.reposition(upperPtr, prevRowKeyPos, rowKeyPos, 0, upperExclusiveKey.length, + slotSpan[i]); + // If we have a range and the values differ, we must include all slots that are less + // significant. + // For example: [A-D][1,23], the lower/upper keys could be B5/C2, where the C is in range + // and the + // next slot value of 2 is less than the next corresponding slot value of the 5. + if (lowerPtr.compareTo(upperPtr) != 0) { + if (newSlots != null) { + newSlots.addAll(slots.subList(i + 1, slots.size())); + } + break; } - return code; + prevRowKeyPos = rowKeyPos; + } } + return true; + } - private void setNextCellHint(Cell kv) { - ImmutableBytesWritable family = new ImmutableBytesWritable(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()); - Cell nextCellHint = null; - if (offset == 0) { - nextCellHint = new KeyValue(startKey, 0, startKeyLength, - null, 0, 0, null, 0, 0, HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0); - } else { // Prepend key of NextCellHint with bytes before offset - byte[] nextKey = new byte[offset + startKeyLength]; - System.arraycopy(kv.getRowArray(), kv.getRowOffset(), nextKey, 0, offset); - System.arraycopy(startKey, 0, nextKey, offset, startKeyLength); - nextCellHint = new KeyValue(nextKey, 0, nextKey.length, - null, 0, 0, null, 0, 0, HConstants.LATEST_TIMESTAMP, Type.Maximum, null, 0, 0); - } - Cell previousCellHint = nextCellHintMap.put(family, nextCellHint); - // we should either have no previous hint, or the next hint should always come after the previous hint - boolean isHintAfterPrevious = previousCellHint == null - || Bytes.compareTo(nextCellHint.getRowArray(), nextCellHint.getRowOffset(), - nextCellHint.getRowLength(), previousCellHint.getRowArray(), previousCellHint - .getRowOffset(), previousCellHint.getRowLength()) > 0; - if (!isHintAfterPrevious) { - String msg = "The next hint must come after previous hint (prev=" + previousCellHint + ", next=" + nextCellHint + ", kv=" + kv + ")"; - throw new IllegalStateException(msg); - } - } - - @Override - public Cell getNextCellHint(Cell kv) { - return isDone ? null : nextCellHintMap.get(new ImmutableBytesWritable(kv.getFamilyArray(), - kv.getFamilyOffset(), kv.getFamilyLength())); + private int previousPosition(int i) { + while (i >= 0 && --position[i] < 0) { + position[i] = slots.get(i).size() - 1; + i--; } + return i; + } - public boolean hasIntersect(byte[] lowerInclusiveKey, byte[] upperExclusiveKey) { - return intersect(lowerInclusiveKey, upperExclusiveKey, null); - } - /** - * Intersect the ranges of this filter with the ranges form by lowerInclusive and upperInclusive - * key and filter out the ones that are not included in the region. Return the new intersected - * SkipScanFilter or null if there is no intersection. - */ - public SkipScanFilter intersect(byte[] lowerInclusiveKey, byte[] upperExclusiveKey) { - List> newSlots = Lists.newArrayListWithCapacity(slots.size()); - if (intersect(lowerInclusiveKey, upperExclusiveKey, newSlots)) { - return new SkipScanFilter(newSlots, slotSpan, schema, isMultiKeyPointLookup); - } - return null; - } - - private boolean areSlotsSingleKey(int startPosInclusive, int endPosExclusive) { - for (int i = startPosInclusive; i < endPosExclusive; i++) { - if (!slots.get(i).get(position[i]).isSingleKey()) { - return false; - } - } - return true; - } - - private void resetState() { - isDone = false; - endKeyLength = 0; - Arrays.fill(position, 0); - } - - private boolean intersect(final byte[] lowerInclusiveKey, final byte[] upperExclusiveKey, List> newSlots) { - resetState(); - boolean lowerUnbound = (lowerInclusiveKey.length == 0); - int startPos = 0; - int lastSlot = slots.size()-1; - if (!lowerUnbound) { - // Find the position of the first slot of the lower range - schema.next(ptr, 0, schema.iterator(lowerInclusiveKey,ptr), slotSpan[0]); - startPos = ScanUtil.searchClosestKeyRangeWithUpperHigherThanPtr(slots.get(0), ptr, 0, schema.getField(0)); - // Lower range is past last upper range of first slot, so cannot possibly be in range - if (startPos >= slots.get(0).size()) { - return false; - } - } - boolean upperUnbound = (upperExclusiveKey.length == 0); - int endPos = slots.get(0).size()-1; - if (!upperUnbound) { - // Find the position of the first slot of the upper range - schema.next(ptr, 0, schema.iterator(upperExclusiveKey,ptr), slotSpan[0]); - endPos = ScanUtil.searchClosestKeyRangeWithUpperHigherThanPtr(slots.get(0), ptr, startPos, schema.getField(0)); - // Upper range lower than first lower range of first slot, so cannot possibly be in range -// if (endPos == 0 && Bytes.compareTo(upperExclusiveKey, slots.get(0).get(0).getLowerRange()) <= 0) { -// return false; -// } - // Past last position, so we can include everything from the start position - if (endPos >= slots.get(0).size()) { - upperUnbound = true; - endPos = slots.get(0).size()-1; - } else if (slots.get(0).get(endPos).compareLowerToUpperBound(upperExclusiveKey, ScanUtil.getComparator(schema.getField(0))) >= 0) { - // We know that the endPos range is higher than the previous range, but we need - // to test if it ends before the next range starts. - endPos--; - } - if (endPos < startPos) { - return false; - } - - } - // Short circuit out if we only have a single set of keys - if (slots.size() == 1) { - if (newSlots != null) { - List newRanges = slots.get(0).subList(startPos, endPos+1); - newSlots.add(newRanges); - } - return true; - } - if (!lowerUnbound) { - position[0] = startPos; - navigate(lowerInclusiveKey, 0, lowerInclusiveKey.length, Terminate.AFTER); - if (filterAllRemaining()) { - return false; - } - } - if (upperUnbound) { - if (newSlots != null) { - newSlots.add(slots.get(0).subList(startPos, endPos+1)); - newSlots.addAll(slots.subList(1, slots.size())); - } - return true; - } - int[] lowerPosition = Arrays.copyOf(position, position.length); - // Navigate to the upperExclusiveKey, but not past it - // TODO: We're including everything between the lowerPosition and end position, which is - // more than we need. We can optimize this by tracking whether each range in each slot position - // intersects. - ReturnCode endCode = navigate(upperExclusiveKey, 0, upperExclusiveKey.length, Terminate.AT); - if (endCode == ReturnCode.INCLUDE || endCode == ReturnCode.INCLUDE_AND_NEXT_COL) { - setStartKey(); - // If the upperExclusiveKey is equal to the start key, we've gone one position too far, since - // our upper key is exclusive. In that case, go to the previous key - if (Bytes.compareTo(startKey, 0, startKeyLength, upperExclusiveKey, 0, upperExclusiveKey.length) == 0 && - (previousPosition(lastSlot) < 0 || position[0] < lowerPosition[0])) { - // If by backing up one position we have an empty range, then return - return false; - } - } else if (endCode == ReturnCode.SEEK_NEXT_USING_HINT) { - // The upperExclusive key is smaller than the slots stored in the position. Check if it's the same position - // as the slots for lowerInclusive. If so, there is no intersection. - if (Arrays.equals(lowerPosition, position) && areSlotsSingleKey(0, position.length-1)) { - return false; - } - } else if (filterAllRemaining()) { - // We wrapped around the position array. We know there's an intersection, but it can only at the last - // slot position. So reset the position array here to the last position index for each slot. This will - // be used below as the end bounds to formulate the list of intersecting slots. - for (int i = 0; i <= lastSlot; i++) { - position[i] = slots.get(i).size() - 1; - } - } - int prevRowKeyPos = -1; - ImmutableBytesWritable lowerPtr = new ImmutableBytesWritable(); - ImmutableBytesWritable upperPtr = new ImmutableBytesWritable(); - schema.iterator(lowerInclusiveKey, lowerPtr); - schema.iterator(upperExclusiveKey, upperPtr); - // Copy inclusive all positions - for (int i = 0; i <= lastSlot; i++) { - List newRanges = slots.get(i).subList(lowerPosition[i], Math.min(position[i] + 1, slots.get(i).size())); - if (newRanges.isEmpty()) { - return false; - } - if (newSlots != null) { - newSlots.add(newRanges); - } - // Must include all "less-significant" slot values if: - // 1) a more-significant slot was incremented - if (position[i] > lowerPosition[i]) { - if (newSlots != null) { - newSlots.addAll(slots.subList(i+1, slots.size())); - } - break; - } - // 2) we're at a slot containing a range and the values differ between the lower and upper range, - // since less-significant slots may be lower after traversal than where they started. - if (!slots.get(i).get(position[i]).isSingleKey()) { - int rowKeyPos = ScanUtil.getRowKeyPosition(slotSpan, i); - // Position lowerPtr/upperPtr within lowerInclusiveKey/upperExclusiveKey at value for slot i - // The reposition method will do this incrementally, where we we're initially have prevRowKeyPos = -1. - schema.reposition(lowerPtr, prevRowKeyPos, rowKeyPos, 0, lowerInclusiveKey.length, slotSpan[i]); - schema.reposition(upperPtr, prevRowKeyPos, rowKeyPos, 0, upperExclusiveKey.length, slotSpan[i]); - // If we have a range and the values differ, we must include all slots that are less significant. - // For example: [A-D][1,23], the lower/upper keys could be B5/C2, where the C is in range and the - // next slot value of 2 is less than the next corresponding slot value of the 5. - if (lowerPtr.compareTo(upperPtr) != 0) { - if (newSlots != null) { - newSlots.addAll(slots.subList(i+1, slots.size())); - } - break; - } - prevRowKeyPos = rowKeyPos; - } - } - return true; - } + private ReturnCode getIncludeReturnCode() { + return includeMultipleVersions ? ReturnCode.INCLUDE : ReturnCode.INCLUDE_AND_NEXT_COL; + } - private int previousPosition(int i) { - while (i >= 0 && --position[i] < 0) { - position[i] = slots.get(i).size()-1; - i--; - } - return i; - } - - private ReturnCode getIncludeReturnCode() { - return includeMultipleVersions ? ReturnCode.INCLUDE : ReturnCode.INCLUDE_AND_NEXT_COL; - } - - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="QBA_QUESTIONABLE_BOOLEAN_ASSIGNMENT", - justification="Assignment designed to work this way.") - private ReturnCode navigate(final byte[] currentKey, final int offset, final int length, Terminate terminate) { - int nSlots = slots.size(); - - // First check to see if we're in-range until we reach our end key - if (endKeyLength > 0) { - if (Bytes.compareTo(currentKey, offset, length, endKey, 0, endKeyLength) < 0) { - return getIncludeReturnCode(); - } - - // If key range of last slot is a single key, we can increment our position - // since we know we'll be past the current row after including it. - if (slots.get(nSlots-1).get(position[nSlots-1]).isSingleKey()) { - if (nextPosition(nSlots-1) < 0) { - // Current row will be included, but we have no more - isDone = true; - return ReturnCode.NEXT_ROW; - } - } - else { - // Reset the positions to zero from the next slot after the earliest ranged slot, since the - // next key could be bigger at this ranged slot, and smaller than the current position of - // less significant slots. - int earliestRangeIndex = nSlots-1; - for (int i = 0; i < nSlots; i++) { - if (!slots.get(i).get(position[i]).isSingleKey()) { - earliestRangeIndex = i; - break; - } - } - Arrays.fill(position, earliestRangeIndex+1, position.length, 0); - } - } - endKeyLength = 0; - - // We could have included the previous - if (isDone) { - return ReturnCode.NEXT_ROW; - } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "QBA_QUESTIONABLE_BOOLEAN_ASSIGNMENT", + justification = "Assignment designed to work this way.") + private ReturnCode navigate(final byte[] currentKey, final int offset, final int length, + Terminate terminate) { + int nSlots = slots.size(); - int i = 0; - boolean seek = false; - int earliestRangeIndex = nSlots-1; - int minOffset = offset; - int maxOffset = schema.iterator(currentKey, minOffset, length, ptr); - schema.next(ptr, ScanUtil.getRowKeyPosition(slotSpan, i), maxOffset, slotSpan[i]); - while (true) { - // Comparator depends on field in schema - BytesComparator comparator = ScanUtil.getComparator(schema.getField(ScanUtil.getRowKeyPosition(slotSpan, i))); - // Increment to the next range while the upper bound of our current slot is less than our current key - while (position[i] < slots.get(i).size() && slots.get(i).get(position[i]).compareUpperToLowerBound(ptr, comparator) < 0) { - position[i]++; - } - Arrays.fill(position, i+1, position.length, 0); - if (position[i] >= slots.get(i).size()) { - // Our current key is bigger than the last range of the current slot. - // If navigating after current key, backtrack and increment the key of the previous slot values. - // If navigating to current key, just return - if (terminate == Terminate.AT) { - return ReturnCode.SEEK_NEXT_USING_HINT; - } - if (i == 0) { - isDone = true; - return ReturnCode.NEXT_ROW; - } - // Increment key and backtrack until in range. We know at this point that we'll be - // issuing a seek next hint. - seek = true; - Arrays.fill(position, i, position.length, 0); - int j = i - 1; - // If we're positioned at a single key, no need to copy the current key and get the next key . - // Instead, just increment to the next key and continue. - boolean incremented = false; - while (j >= 0 && slots.get(j).get(position[j]).isSingleKey() && (incremented=true) && (position[j] = (position[j] + 1) % slots.get(j).size()) == 0) { - j--; - incremented = false; - } - if (j < 0) { - isDone = true; - return ReturnCode.NEXT_ROW; - } - if (incremented) { - // Continue the loop after setting the start key, because our start key maybe smaller than - // the current key, so we'll end up incrementing the start key until it's bigger than the - // current key. - setStartKey(); - schema.reposition(ptr, ScanUtil.getRowKeyPosition(slotSpan, i), ScanUtil.getRowKeyPosition(slotSpan, j), minOffset, maxOffset, slotSpan[j]); - } else { - //for PHOENIX-3705, now ptr is still point to slot i, we must make ptr point to slot j+1, - //because following setStartKey method will copy rowKey columns before ptr to startKey and - //then copy the lower bound of slots from j+1, according to position array, so if we do not - //make ptr point to slot j+1 before setStartKey,the startKey would be erroneous. - schema.reposition( - ptr, - ScanUtil.getRowKeyPosition(slotSpan, i), - ScanUtil.getRowKeyPosition(slotSpan, j + 1), - minOffset, - maxOffset, - slotSpan[j + 1]); - int currentLength = setStartKey(ptr, minOffset, j+1, nSlots, false); - // From here on, we use startKey as our buffer (resetting minOffset and maxOffset) - // We've copied the part of the current key above that we need into startKey - // Reinitialize the iterator to be positioned at previous slot position - minOffset = 0; - maxOffset = startKeyLength; - //make ptr point to the first rowKey column of slot j,why we need slotSpan[j] because for Row Value Constructor(RVC), - //slot j may span multiple rowKey columns, so the length of ptr must consider the slotSpan[j]. - schema.iterator(startKey, minOffset, maxOffset, ptr, ScanUtil.getRowKeyPosition(slotSpan, j)+1,slotSpan[j]); - // Do nextKey after setting the accessor b/c otherwise the null byte may have - // been incremented causing us not to find it - ByteUtil.nextKey(startKey, currentLength); - } - i = j; - } else if (slots.get(i).get(position[i]).compareLowerToUpperBound(ptr, comparator) > 0) { - // Our current key is less than the lower range of the current position in the current slot. - // Seek to the lower range, since it's bigger than the current key - setStartKey(ptr, minOffset, i, nSlots, false); - return ReturnCode.SEEK_NEXT_USING_HINT; - } else { // We're in range, check the next slot - if (!slots.get(i).get(position[i]).isSingleKey() && i < earliestRangeIndex) { - earliestRangeIndex = i; - } - // If we're past the last slot or we know we're seeking to the next (in - // which case the previously updated slot was verified to be within the - // range, so we don't need to check the rest of the slots. If we were - // to check the rest of the slots, we'd get into trouble because we may - // have a null byte that was incremented which screws up our schema.next call) - if (i == nSlots-1 || seek) { - break; - } - i++; - // If we run out of slots in our key, it means we have a partial key. - int rowKeyPos = ScanUtil.getRowKeyPosition(slotSpan, i); - int slotSpans = slotSpan[i]; - if (schema.next(ptr, rowKeyPos, maxOffset, slotSpans) < rowKeyPos + slotSpans) { - // If the rest of the slots are checking for IS NULL, then break because - // that's the case (since we don't store trailing nulls). - if (allTrailingNulls(i)) { - break; - } - // Otherwise we seek to the next start key because we're before it now - setStartKey(ptr, minOffset, i, nSlots, true); - return ReturnCode.SEEK_NEXT_USING_HINT; - } - } - } - - if (seek) { - return ReturnCode.SEEK_NEXT_USING_HINT; - } - // Else, we're in range for all slots and can include this row plus all rows - // up to the upper range of our last slot. We do this for ranges and single keys - // since we potentially have multiple key values for the same row key. - setEndKey(ptr, minOffset, i); + // First check to see if we're in-range until we reach our end key + if (endKeyLength > 0) { + if (Bytes.compareTo(currentKey, offset, length, endKey, 0, endKeyLength) < 0) { return getIncludeReturnCode(); - } - - private boolean allTrailingNulls(int i) { - for (; i < slots.size(); i++) { - List keyRanges = slots.get(i); - if (keyRanges.size() != 1) { - return false; - } - KeyRange keyRange = keyRanges.get(0); - if (!keyRange.isSingleKey()) { - return false; - } - if (keyRange.getLowerRange().length != 0) { - return false; - } + } + + // If key range of last slot is a single key, we can increment our position + // since we know we'll be past the current row after including it. + if (slots.get(nSlots - 1).get(position[nSlots - 1]).isSingleKey()) { + if (nextPosition(nSlots - 1) < 0) { + // Current row will be included, but we have no more + isDone = true; + return ReturnCode.NEXT_ROW; } - return true; - } - - private int nextPosition(int i) { - while (i >= 0 && slots.get(i).get(position[i]).isSingleKey() && (position[i] = (position[i] + 1) % slots.get(i).size()) == 0) { - i--; + } else { + // Reset the positions to zero from the next slot after the earliest ranged slot, since the + // next key could be bigger at this ranged slot, and smaller than the current position of + // less significant slots. + int earliestRangeIndex = nSlots - 1; + for (int i = 0; i < nSlots; i++) { + if (!slots.get(i).get(position[i]).isSingleKey()) { + earliestRangeIndex = i; + break; + } } - return i; - } - - private void setStartKey() { - startKeyLength = setKey(Bound.LOWER, startKey, 0, 0); - } - - private int setStartKey(ImmutableBytesWritable ptr, int offset, int i, int nSlots, boolean atEndOfKey) { - int length = ptr.getOffset() - offset; - startKey = copyKey(startKey, length + this.maxKeyLength + 1, ptr.get(), offset, length); - startKeyLength = length; - // Add separator byte if we're at end of the key, since trailing separator bytes are stripped - if (atEndOfKey && i > 0 && i - 1 < nSlots) { - Field field = schema.getField(i - 1); - if (!field.getDataType().isFixedWidth()) { - byte[] sepBytes = SchemaUtil.getSeparatorBytes(field.getDataType(), - schema.rowKeyOrderOptimizable(), - true, - field.getSortOrder()); - for (byte sepByte : sepBytes) { - startKey[startKeyLength++] = sepByte; - } - } + Arrays.fill(position, earliestRangeIndex + 1, position.length, 0); + } + } + endKeyLength = 0; + + // We could have included the previous + if (isDone) { + return ReturnCode.NEXT_ROW; + } + + int i = 0; + boolean seek = false; + int earliestRangeIndex = nSlots - 1; + int minOffset = offset; + int maxOffset = schema.iterator(currentKey, minOffset, length, ptr); + schema.next(ptr, ScanUtil.getRowKeyPosition(slotSpan, i), maxOffset, slotSpan[i]); + while (true) { + // Comparator depends on field in schema + BytesComparator comparator = + ScanUtil.getComparator(schema.getField(ScanUtil.getRowKeyPosition(slotSpan, i))); + // Increment to the next range while the upper bound of our current slot is less than our + // current key + while ( + position[i] < slots.get(i).size() + && slots.get(i).get(position[i]).compareUpperToLowerBound(ptr, comparator) < 0 + ) { + position[i]++; + } + Arrays.fill(position, i + 1, position.length, 0); + if (position[i] >= slots.get(i).size()) { + // Our current key is bigger than the last range of the current slot. + // If navigating after current key, backtrack and increment the key of the previous slot + // values. + // If navigating to current key, just return + if (terminate == Terminate.AT) { + return ReturnCode.SEEK_NEXT_USING_HINT; } - startKeyLength += setKey(Bound.LOWER, startKey, startKeyLength, i); - return length; - } - - private int setEndKey(ImmutableBytesWritable ptr, int offset, int i) { - int length = ptr.getOffset() - offset; - endKey = copyKey(endKey, length + this.maxKeyLength, ptr.get(), offset, length); - endKeyLength = length; - endKeyLength += setKey(Bound.UPPER, endKey, length, i); - return length; - } - - private int setKey(Bound bound, byte[] key, int keyOffset, int slotStartIndex) { - return ScanUtil.setKey(schema, slots, slotSpan, position, bound, key, keyOffset, slotStartIndex, position.length); - } - - private static byte[] copyKey(byte[] targetKey, int targetLength, byte[] sourceKey, int offset, int length) { - if (targetLength > targetKey.length) { - targetKey = new byte[targetLength]; + if (i == 0) { + isDone = true; + return ReturnCode.NEXT_ROW; } - System.arraycopy(sourceKey, offset, targetKey, 0, length); - return targetKey; - } - - private static final int KEY_RANGE_LENGTH_BITS = 21; - private static final int SLOT_SPAN_BITS = 32 - KEY_RANGE_LENGTH_BITS; - - @Override - public void readFields(DataInput in) throws IOException { - RowKeySchema schema = new RowKeySchema(); - schema.readFields(in); - int andLen = in.readInt(); - boolean includeMultipleVersions = false; - if (andLen < 0) { - andLen = -andLen; - includeMultipleVersions = true; + // Increment key and backtrack until in range. We know at this point that we'll be + // issuing a seek next hint. + seek = true; + Arrays.fill(position, i, position.length, 0); + int j = i - 1; + // If we're positioned at a single key, no need to copy the current key and get the next key + // . + // Instead, just increment to the next key and continue. + boolean incremented = false; + while ( + j >= 0 && slots.get(j).get(position[j]).isSingleKey() && (incremented = true) + && (position[j] = (position[j] + 1) % slots.get(j).size()) == 0 + ) { + j--; + incremented = false; } - int[] slotSpan = new int[andLen]; - List> slots = Lists.newArrayListWithExpectedSize(andLen); - for (int i = 0; i < andLen; i++) { - int orLenWithSlotSpan = in.readInt(); - int orLen = orLenWithSlotSpan; - /* - * For 4.2+ clients, we serialize the slotSpan array. To maintain backward - * compatibility, we encode the slotSpan values with the size of the list - * of key ranges. We reserve 21 bits for the key range list and 10 bits - * for the slotSpan value (up to 1024 which should be plenty). - */ - if (orLenWithSlotSpan < 0) { - orLenWithSlotSpan = -orLenWithSlotSpan - 1; - slotSpan[i] = orLenWithSlotSpan >>> KEY_RANGE_LENGTH_BITS; - orLen = (orLenWithSlotSpan << SLOT_SPAN_BITS) >>> SLOT_SPAN_BITS; - } - List orClause = Lists.newArrayListWithExpectedSize(orLen); - slots.add(orClause); - for (int j=0; j orLen = slots.get(i); - int span = slotSpan[i]; - int orLenWithSlotSpan = -( ( (span << KEY_RANGE_LENGTH_BITS) | orLen.size() ) + 1); - out.writeInt(orLenWithSlotSpan); - for (KeyRange range : orLen) { - range.write(out); - } + i = j; + } else if (slots.get(i).get(position[i]).compareLowerToUpperBound(ptr, comparator) > 0) { + // Our current key is less than the lower range of the current position in the current slot. + // Seek to the lower range, since it's bigger than the current key + setStartKey(ptr, minOffset, i, nSlots, false); + return ReturnCode.SEEK_NEXT_USING_HINT; + } else { // We're in range, check the next slot + if (!slots.get(i).get(position[i]).isSingleKey() && i < earliestRangeIndex) { + earliestRangeIndex = i; } - out.writeBoolean(isMultiKeyPointLookup); - } - - @Override - public byte[] toByteArray() throws IOException { - return Writables.getBytes(this); - } - - public static SkipScanFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - return (SkipScanFilter)Writables.getWritable(pbBytes, new SkipScanFilter()); - } catch (IOException e) { - throw new DeserializationException(e); + // If we're past the last slot or we know we're seeking to the next (in + // which case the previously updated slot was verified to be within the + // range, so we don't need to check the rest of the slots. If we were + // to check the rest of the slots, we'd get into trouble because we may + // have a null byte that was incremented which screws up our schema.next call) + if (i == nSlots - 1 || seek) { + break; } - } - - @Override - public int hashCode() { - HashFunction hf = Hashing.goodFastHash(32); - Hasher h = hf.newHasher(); - h.putInt(slots.size()); - for (int i=0; i keyRanges = slots.get(i); + if (keyRanges.size() != 1) { + return false; + } + KeyRange keyRange = keyRanges.get(0); + if (!keyRange.isSingleKey()) { + return false; + } + if (keyRange.getLowerRange().length != 0) { + return false; + } + } + return true; + } + + private int nextPosition(int i) { + while ( + i >= 0 && slots.get(i).get(position[i]).isSingleKey() + && (position[i] = (position[i] + 1) % slots.get(i).size()) == 0 + ) { + i--; + } + return i; + } + + private void setStartKey() { + startKeyLength = setKey(Bound.LOWER, startKey, 0, 0); + } + + private int setStartKey(ImmutableBytesWritable ptr, int offset, int i, int nSlots, + boolean atEndOfKey) { + int length = ptr.getOffset() - offset; + startKey = copyKey(startKey, length + this.maxKeyLength + 1, ptr.get(), offset, length); + startKeyLength = length; + // Add separator byte if we're at end of the key, since trailing separator bytes are stripped + if (atEndOfKey && i > 0 && i - 1 < nSlots) { + Field field = schema.getField(i - 1); + if (!field.getDataType().isFixedWidth()) { + byte[] sepBytes = SchemaUtil.getSeparatorBytes(field.getDataType(), + schema.rowKeyOrderOptimizable(), true, field.getSortOrder()); + for (byte sepByte : sepBytes) { + startKey[startKeyLength++] = sepByte; + } + } + } + startKeyLength += setKey(Bound.LOWER, startKey, startKeyLength, i); + return length; + } + + private int setEndKey(ImmutableBytesWritable ptr, int offset, int i) { + int length = ptr.getOffset() - offset; + endKey = copyKey(endKey, length + this.maxKeyLength, ptr.get(), offset, length); + endKeyLength = length; + endKeyLength += setKey(Bound.UPPER, endKey, length, i); + return length; + } + + private int setKey(Bound bound, byte[] key, int keyOffset, int slotStartIndex) { + return ScanUtil.setKey(schema, slots, slotSpan, position, bound, key, keyOffset, slotStartIndex, + position.length); + } + + private static byte[] copyKey(byte[] targetKey, int targetLength, byte[] sourceKey, int offset, + int length) { + if (targetLength > targetKey.length) { + targetKey = new byte[targetLength]; + } + System.arraycopy(sourceKey, offset, targetKey, 0, length); + return targetKey; + } + + private static final int KEY_RANGE_LENGTH_BITS = 21; + private static final int SLOT_SPAN_BITS = 32 - KEY_RANGE_LENGTH_BITS; + + @Override + public void readFields(DataInput in) throws IOException { + RowKeySchema schema = new RowKeySchema(); + schema.readFields(in); + int andLen = in.readInt(); + boolean includeMultipleVersions = false; + if (andLen < 0) { + andLen = -andLen; + includeMultipleVersions = true; + } + int[] slotSpan = new int[andLen]; + List> slots = Lists.newArrayListWithExpectedSize(andLen); + for (int i = 0; i < andLen; i++) { + int orLenWithSlotSpan = in.readInt(); + int orLen = orLenWithSlotSpan; + /* + * For 4.2+ clients, we serialize the slotSpan array. To maintain backward compatibility, we + * encode the slotSpan values with the size of the list of key ranges. We reserve 21 bits for + * the key range list and 10 bits for the slotSpan value (up to 1024 which should be plenty). + */ + if (orLenWithSlotSpan < 0) { + orLenWithSlotSpan = -orLenWithSlotSpan - 1; + slotSpan[i] = orLenWithSlotSpan >>> KEY_RANGE_LENGTH_BITS; + orLen = (orLenWithSlotSpan << SLOT_SPAN_BITS) >>> SLOT_SPAN_BITS; + } + List orClause = Lists.newArrayListWithExpectedSize(orLen); + slots.add(orClause); + for (int j = 0; j < orLen; j++) { + KeyRange range = KeyRange.read(in); + orClause.add(range); + } + } + try { + boolean isPointLookup = in.readBoolean(); + this.init(slots, slotSpan, schema, includeMultipleVersions, isPointLookup); + } catch (IOException e) { + // Reached the end of the stream before reading the boolean field. The client can be + // an older client + this.init(slots, slotSpan, schema, includeMultipleVersions, false); + } + } + + @Override + public void write(DataOutput out) throws IOException { + assert (slots.size() == slotSpan.length); + schema.write(out); + int nSlots = slots.size(); + out.writeInt(this.includeMultipleVersions ? -nSlots : nSlots); + for (int i = 0; i < nSlots; i++) { + List orLen = slots.get(i); + int span = slotSpan[i]; + int orLenWithSlotSpan = -(((span << KEY_RANGE_LENGTH_BITS) | orLen.size()) + 1); + out.writeInt(orLenWithSlotSpan); + for (KeyRange range : orLen) { + range.write(out); + } + } + out.writeBoolean(isMultiKeyPointLookup); + } + + @Override + public byte[] toByteArray() throws IOException { + return Writables.getBytes(this); + } + + public static SkipScanFilter parseFrom(final byte[] pbBytes) throws DeserializationException { + try { + return (SkipScanFilter) Writables.getWritable(pbBytes, new SkipScanFilter()); + } catch (IOException e) { + throw new DeserializationException(e); + } + } + + @Override + public int hashCode() { + HashFunction hf = Hashing.goodFastHash(32); + Hasher h = hf.newHasher(); + h.putInt(slots.size()); + for (int i = 0; i < slots.size(); i++) { + h.putInt(slots.get(i).size()); + for (int j = 0; j < slots.size(); j++) { + h.putBytes(slots.get(i).get(j).getLowerRange()); + h.putBytes(slots.get(i).get(j).getUpperRange()); + } + } + return h.hash().asInt(); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof SkipScanFilter)) return false; + SkipScanFilter other = (SkipScanFilter) obj; + return Objects.equal(slots, other.slots) && Objects.equal(schema, other.schema); + } + + @Override + public String toString() { + return "SkipScanFilter " + slots.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SystemCatalogViewIndexIdFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SystemCatalogViewIndexIdFilter.java index 6aa8c559a01..ec0b339ab0c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SystemCatalogViewIndexIdFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/SystemCatalogViewIndexIdFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,21 @@ */ package org.apache.phoenix.filter; +import static org.apache.phoenix.coprocessorclient.MetaDataProtocol.MIN_SPLITTABLE_SYSTEM_CATALOG; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE_BYTES; +import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; +import static org.apache.phoenix.util.ViewIndexIdRetrieveUtil.NULL_DATA_TYPE_VALUE; +import static org.apache.phoenix.util.ViewIndexIdRetrieveUtil.VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN; +import static org.apache.phoenix.util.ViewIndexIdRetrieveUtil.VIEW_INDEX_ID_SMALLINT_TYPE_VALUE_LEN; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.sql.Types; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.FilterBase; @@ -28,134 +43,114 @@ import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.ViewIndexIdRetrieveUtil; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.sql.Types; -import java.util.Collections; -import java.util.List; - - -import static org.apache.phoenix.coprocessorclient.MetaDataProtocol.MIN_SPLITTABLE_SYSTEM_CATALOG; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE_BYTES; -import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; -import static org.apache.phoenix.util.ViewIndexIdRetrieveUtil.NULL_DATA_TYPE_VALUE; -import static org.apache.phoenix.util.ViewIndexIdRetrieveUtil.VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN; -import static org.apache.phoenix.util.ViewIndexIdRetrieveUtil.VIEW_INDEX_ID_SMALLINT_TYPE_VALUE_LEN; - public class SystemCatalogViewIndexIdFilter extends FilterBase implements Writable { - private int clientVersion; - - public SystemCatalogViewIndexIdFilter() { - } - - public SystemCatalogViewIndexIdFilter(int clientVersion) { - this.clientVersion = clientVersion; - } - - @Override - public ReturnCode filterKeyValue(Cell keyValue) { - return filterCell(keyValue); - } - - @Override - public ReturnCode filterCell(Cell keyValue) { - return ReturnCode.INCLUDE_AND_NEXT_COL; - } - - @Override - public boolean hasFilterRow() { - return true; - } - - @Override - public void filterRowCells(List kvs) throws IOException { - Cell viewIndexIdCell = PhoenixKeyValueUtil.getColumnLatest( - GenericKeyValueBuilder.INSTANCE, kvs, - DEFAULT_COLUMN_FAMILY_BYTES, VIEW_INDEX_ID_BYTES); - + private int clientVersion; + + public SystemCatalogViewIndexIdFilter() { + } + + public SystemCatalogViewIndexIdFilter(int clientVersion) { + this.clientVersion = clientVersion; + } + + @Override + public ReturnCode filterKeyValue(Cell keyValue) { + return filterCell(keyValue); + } + + @Override + public ReturnCode filterCell(Cell keyValue) { + return ReturnCode.INCLUDE_AND_NEXT_COL; + } + + @Override + public boolean hasFilterRow() { + return true; + } + + @Override + public void filterRowCells(List kvs) throws IOException { + Cell viewIndexIdCell = PhoenixKeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, kvs, + DEFAULT_COLUMN_FAMILY_BYTES, VIEW_INDEX_ID_BYTES); + + /* + * We retrieve the VIEW_INDEX_ID cell from SMALLINT to BIGINT or BIGINT to SMALLINT if and only + * if VIEW_INDEX_ID is included as part of the projected column. This is combination of diff + * client created view index looks like: client VIEW_INDEX_ID(Cell number of bytes) + * VIEW_INDEX_ID_DATA_TYPE pre-4.15 2 bytes NULL post-4.15[config smallint] 2 bytes 5(smallint) + * post-4.15[config bigint] 8 bytes -5(bigint) + */ + if (viewIndexIdCell != null) { + int type = NULL_DATA_TYPE_VALUE; + Cell viewIndexIdDataTypeCell = + PhoenixKeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, kvs, + DEFAULT_COLUMN_FAMILY_BYTES, VIEW_INDEX_ID_DATA_TYPE_BYTES); + if (viewIndexIdDataTypeCell != null) { + type = (Integer) PInteger.INSTANCE.toObject(viewIndexIdDataTypeCell.getValueArray(), + viewIndexIdDataTypeCell.getValueOffset(), viewIndexIdDataTypeCell.getValueLength(), + PInteger.INSTANCE, SortOrder.ASC); + } + if (this.clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG) { /* - We retrieve the VIEW_INDEX_ID cell from SMALLINT to BIGINT or BIGINT to SMALLINT if and - only if VIEW_INDEX_ID is included as part of the projected column. - This is combination of diff client created view index looks like: - client VIEW_INDEX_ID(Cell number of bytes) VIEW_INDEX_ID_DATA_TYPE - pre-4.15 2 bytes NULL - post-4.15[config smallint] 2 bytes 5(smallint) - post-4.15[config bigint] 8 bytes -5(bigint) + * For pre-4.15 client select query cannot include VIEW_INDEX_ID_DATA_TYPE as part of the + * projected columns; for this reason, the TYPE will always be NULL. Since the pre-4.15 + * client always assume the VIEW_INDEX_ID column is type of SMALLINT, we need to retrieve + * the BIGINT cell to SMALLINT cell. VIEW_INDEX_ID_DATA_TYPE, VIEW_INDEX_ID(Cell + * representation of the data) NULL, SMALLINT -> DO NOT CONVERT SMALLINT, SMALLINT -> DO NOT + * CONVERT BIGINT, BIGINT -> RETRIEVE AND SEND SMALLINT BACK */ - if (viewIndexIdCell != null) { - int type = NULL_DATA_TYPE_VALUE; - Cell viewIndexIdDataTypeCell = PhoenixKeyValueUtil.getColumnLatest( - GenericKeyValueBuilder.INSTANCE, kvs, - DEFAULT_COLUMN_FAMILY_BYTES, VIEW_INDEX_ID_DATA_TYPE_BYTES); - if (viewIndexIdDataTypeCell != null) { - type = (Integer) PInteger.INSTANCE.toObject( - viewIndexIdDataTypeCell.getValueArray(), - viewIndexIdDataTypeCell.getValueOffset(), - viewIndexIdDataTypeCell.getValueLength(), - PInteger.INSTANCE, - SortOrder.ASC); - } - if (this.clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG) { - /* - For pre-4.15 client select query cannot include VIEW_INDEX_ID_DATA_TYPE as part - of the projected columns; for this reason, the TYPE will always be NULL. Since - the pre-4.15 client always assume the VIEW_INDEX_ID column is type of SMALLINT, - we need to retrieve the BIGINT cell to SMALLINT cell. - VIEW_INDEX_ID_DATA_TYPE, VIEW_INDEX_ID(Cell representation of the data) - NULL, SMALLINT -> DO NOT CONVERT - SMALLINT, SMALLINT -> DO NOT CONVERT - BIGINT, BIGINT -> RETRIEVE AND SEND SMALLINT BACK - */ - if (type == NULL_DATA_TYPE_VALUE && viewIndexIdCell.getValueLength() > - VIEW_INDEX_ID_SMALLINT_TYPE_VALUE_LEN) { - Cell keyValue = ViewIndexIdRetrieveUtil. - getRetrievedViewIndexIdCell(viewIndexIdCell, false); - Collections.replaceAll(kvs, viewIndexIdCell, keyValue); - } - } else { - /* - For post-4.15 client select query needs to include VIEW_INDEX_ID_DATA_TYPE as - part of the projected columns, and VIEW_INDEX_ID depends on it. - VIEW_INDEX_ID_DATA_TYPE, VIEW_INDEX_ID(Cell representation of the data) - NULL, SMALLINT -> RETRIEVE AND SEND BIGINT BACK - SMALLINT, SMALLINT -> RETRIEVE AND SEND BIGINT BACK - BIGINT, BIGINT -> DO NOT RETRIEVE - */ - if (type != Types.BIGINT && viewIndexIdCell.getValueLength() < - VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN) { - Cell keyValue = ViewIndexIdRetrieveUtil. - getRetrievedViewIndexIdCell(viewIndexIdCell, true); - Collections.replaceAll(kvs, viewIndexIdCell, keyValue); - } - } + if ( + type == NULL_DATA_TYPE_VALUE + && viewIndexIdCell.getValueLength() > VIEW_INDEX_ID_SMALLINT_TYPE_VALUE_LEN + ) { + Cell keyValue = + ViewIndexIdRetrieveUtil.getRetrievedViewIndexIdCell(viewIndexIdCell, false); + Collections.replaceAll(kvs, viewIndexIdCell, keyValue); } - } - - public static SystemCatalogViewIndexIdFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - try { - SystemCatalogViewIndexIdFilter writable = (SystemCatalogViewIndexIdFilter) - Writables.getWritable(pbBytes, new SystemCatalogViewIndexIdFilter()); - return writable; - } catch (IOException e) { - throw new DeserializationException(e); + } else { + /* + * For post-4.15 client select query needs to include VIEW_INDEX_ID_DATA_TYPE as part of the + * projected columns, and VIEW_INDEX_ID depends on it. VIEW_INDEX_ID_DATA_TYPE, + * VIEW_INDEX_ID(Cell representation of the data) NULL, SMALLINT -> RETRIEVE AND SEND BIGINT + * BACK SMALLINT, SMALLINT -> RETRIEVE AND SEND BIGINT BACK BIGINT, BIGINT -> DO NOT + * RETRIEVE + */ + if ( + type != Types.BIGINT + && viewIndexIdCell.getValueLength() < VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN + ) { + Cell keyValue = + ViewIndexIdRetrieveUtil.getRetrievedViewIndexIdCell(viewIndexIdCell, true); + Collections.replaceAll(kvs, viewIndexIdCell, keyValue); } + } } - - @Override - public byte[] toByteArray() throws IOException { - return Writables.getBytes(this); + } + + public static SystemCatalogViewIndexIdFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { + try { + SystemCatalogViewIndexIdFilter writable = (SystemCatalogViewIndexIdFilter) Writables + .getWritable(pbBytes, new SystemCatalogViewIndexIdFilter()); + return writable; + } catch (IOException e) { + throw new DeserializationException(e); } + } - @Override - public void readFields(DataInput input) throws IOException { - this.clientVersion = input.readInt(); - } + @Override + public byte[] toByteArray() throws IOException { + return Writables.getBytes(this); + } - @Override - public void write(DataOutput output) throws IOException { - output.writeInt(this.clientVersion); - } + @Override + public void readFields(DataInput input) throws IOException { + this.clientVersion = input.readInt(); + } + + @Override + public void write(DataOutput output) throws IOException { + output.writeInt(this.clientVersion); + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/UnverifiedRowFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/UnverifiedRowFilter.java index 400b48d477c..c23f76ecebe 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/filter/UnverifiedRowFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/filter/UnverifiedRowFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.filter; +import static org.apache.phoenix.query.QueryConstants.VERIFIED_BYTES; + import java.io.IOException; import java.util.List; @@ -26,105 +28,97 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ScanUtil; -import static org.apache.phoenix.query.QueryConstants.VERIFIED_BYTES; - /** - * This filter overrides the behavior of delegate so that we do not jump to - * the next row if the row is unverified and doesn't match the filter since - * it is possible that a previous verified version of the same row could match - * the filter and thus should be included in the results. - * For tables using encoded columns, the empty column is the first column the - * filter processes, so we can check whether it is verified or not. - * If no encoding is used, the empty column is the last column to be processed - * by the filter, so we have to wait to determine whether the row is verified - * or not. + * This filter overrides the behavior of delegate so that we do not jump to the next row if the row + * is unverified and doesn't match the filter since it is possible that a previous verified version + * of the same row could match the filter and thus should be included in the results. For tables + * using encoded columns, the empty column is the first column the filter processes, so we can check + * whether it is verified or not. If no encoding is used, the empty column is the last column to be + * processed by the filter, so we have to wait to determine whether the row is verified or not. */ public class UnverifiedRowFilter extends DelegateFilter { - private final byte[] emptyCF; - private final byte[] emptyCQ; - private boolean verified = false; - // save the code from delegate filter while waiting for the empty column - private ReturnCode recordedRetCode = null; + private final byte[] emptyCF; + private final byte[] emptyCQ; + private boolean verified = false; + // save the code from delegate filter while waiting for the empty column + private ReturnCode recordedRetCode = null; - private void init() { - verified = false; - recordedRetCode = null; - } - public UnverifiedRowFilter(Filter delegate, byte[] emptyCF, byte[] emptyCQ) { - super(delegate); - Preconditions.checkArgument(emptyCF != null, - "Column family must not be null"); - Preconditions.checkArgument(emptyCQ != null, - "Column qualifier must not be null"); - this.emptyCF = emptyCF; - this.emptyCQ = emptyCQ; - init(); - } + private void init() { + verified = false; + recordedRetCode = null; + } - @Override - public void reset() throws IOException { - init(); - delegate.reset(); - } + public UnverifiedRowFilter(Filter delegate, byte[] emptyCF, byte[] emptyCQ) { + super(delegate); + Preconditions.checkArgument(emptyCF != null, "Column family must not be null"); + Preconditions.checkArgument(emptyCQ != null, "Column qualifier must not be null"); + this.emptyCF = emptyCF; + this.emptyCQ = emptyCQ; + init(); + } - @Override - public ReturnCode filterKeyValue(Cell v) throws IOException { - return filterCell(v); - } + @Override + public void reset() throws IOException { + init(); + delegate.reset(); + } + + @Override + public ReturnCode filterKeyValue(Cell v) throws IOException { + return filterCell(v); + } - @Override - public ReturnCode filterCell(final Cell cell) throws IOException { - if (verified) { - // we have processed the empty column and found that it is verified - return delegate.filterCell(cell); - } + @Override + public ReturnCode filterCell(final Cell cell) throws IOException { + if (verified) { + // we have processed the empty column and found that it is verified + return delegate.filterCell(cell); + } - if (ScanUtil.isEmptyColumn(cell, emptyCF, emptyCQ)) { - verified = Bytes.compareTo( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - VERIFIED_BYTES, 0, VERIFIED_BYTES.length) == 0; - if (verified) { - // if we saved the return code while waiting for the empty - // column, use that code else call the delegate - return recordedRetCode != null ? recordedRetCode : delegate.filterCell(cell); - } else { - // it is an unverified row, no need to look at more columns - // include it so that it can be repaired and evaluated again by the filter - return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; - } - } - // we haven't seen the empty column yet so don't know whether - // the row is verified or not + if (ScanUtil.isEmptyColumn(cell, emptyCF, emptyCQ)) { + verified = Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), + VERIFIED_BYTES, 0, VERIFIED_BYTES.length) == 0; + if (verified) { + // if we saved the return code while waiting for the empty + // column, use that code else call the delegate + return recordedRetCode != null ? recordedRetCode : delegate.filterCell(cell); + } else { + // it is an unverified row, no need to look at more columns + // include it so that it can be repaired and evaluated again by the filter + return ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW; + } + } + // we haven't seen the empty column yet so don't know whether + // the row is verified or not - if (recordedRetCode != null) { - // we already have recorded the return code from the wrapped - // delegate filter so skip this column - return ReturnCode.NEXT_COL; - } - ReturnCode ret = delegate.filterCell(cell); - if (ret == ReturnCode.NEXT_ROW - || ret == ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) { - // Save the return code but don't move to the next row. - // Continue processing the current row till we find the empty column - recordedRetCode = ret; - ret = ReturnCode.NEXT_COL; - } - return ret; + if (recordedRetCode != null) { + // we already have recorded the return code from the wrapped + // delegate filter so skip this column + return ReturnCode.NEXT_COL; + } + ReturnCode ret = delegate.filterCell(cell); + if (ret == ReturnCode.NEXT_ROW || ret == ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) { + // Save the return code but don't move to the next row. + // Continue processing the current row till we find the empty column + recordedRetCode = ret; + ret = ReturnCode.NEXT_COL; } + return ret; + } - @Override - public void filterRowCells(List kvs) throws IOException { - if (verified) { - delegate.filterRowCells(kvs); - } + @Override + public void filterRowCells(List kvs) throws IOException { + if (verified) { + delegate.filterRowCells(kvs); } + } - @Override - public boolean filterRow() throws IOException { - if (verified) { - return delegate.filterRow(); - } - return false; + @Override + public boolean filterRow() throws IOException { + if (verified) { + return delegate.filterRow(); } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/AbstractValueGetter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/AbstractValueGetter.java index 90f90948dfb..715013cefe3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/AbstractValueGetter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/AbstractValueGetter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,29 +19,28 @@ import java.io.IOException; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.hbase.index.covered.update.ColumnReference; -public abstract class AbstractValueGetter implements ValueGetter{ - @Override - public KeyValue getLatestKeyValue(ColumnReference ref, long ts) throws IOException { - ImmutableBytesWritable value = getLatestValue(ref, ts); - byte[] rowKey = getRowKey(); - int valueOffset = 0; - int valueLength = 0; - byte[] valueBytes = HConstants.EMPTY_BYTE_ARRAY; - if (value == null) { - return null; - } else { - valueBytes = value.get(); - valueOffset = value.getOffset(); - valueLength = value.getLength(); - } - return new KeyValue(rowKey, 0, rowKey.length, ref.getFamily(), 0, ref.getFamily().length, - ref.getQualifier(), 0, ref.getQualifier().length, ts, KeyValue.Type.Put, - valueBytes, valueOffset, valueLength); +public abstract class AbstractValueGetter implements ValueGetter { + @Override + public KeyValue getLatestKeyValue(ColumnReference ref, long ts) throws IOException { + ImmutableBytesWritable value = getLatestValue(ref, ts); + byte[] rowKey = getRowKey(); + int valueOffset = 0; + int valueLength = 0; + byte[] valueBytes = HConstants.EMPTY_BYTE_ARRAY; + if (value == null) { + return null; + } else { + valueBytes = value.get(); + valueOffset = value.getOffset(); + valueLength = value.getLength(); } - } + return new KeyValue(rowKey, 0, rowKey.length, ref.getFamily(), 0, ref.getFamily().length, + ref.getQualifier(), 0, ref.getQualifier().length, ts, KeyValue.Type.Put, valueBytes, + valueOffset, valueLength); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/BaseIndexCodec.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/BaseIndexCodec.java index 7e6c528a344..7c2d5389d60 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/BaseIndexCodec.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/BaseIndexCodec.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,14 +21,14 @@ import org.apache.phoenix.hbase.index.covered.IndexCodec; public abstract class BaseIndexCodec implements IndexCodec { - /** - * {@inheritDoc} - *

- * By default, the codec is always enabled. Subclasses should override this method if they want do - * decide to index on a per-mutation basis. - */ - @Override - public boolean isEnabled(Mutation m) { - return true; - } -} \ No newline at end of file + /** + * {@inheritDoc} + *

+ * By default, the codec is always enabled. Subclasses should override this method if they want do + * decide to index on a per-mutation basis. + */ + @Override + public boolean isEnabled(Mutation m) { + return true; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/MultiMutation.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/MultiMutation.java index f6381c4af83..e0c4ff5720b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/MultiMutation.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/MultiMutation.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,60 +27,59 @@ public class MultiMutation extends Mutation { - private ImmutableBytesPtr rowKey; + private ImmutableBytesPtr rowKey; - public MultiMutation(ImmutableBytesPtr rowkey) { - this.rowKey = rowkey; - } - - /** - * @param stored - */ - public void addAll(Mutation stored) { - // add all the kvs - for (Entry> kvs : stored.getFamilyCellMap().entrySet()) { - byte[] family = kvs.getKey(); - List list = getKeyValueList(family, kvs.getValue().size()); - list.addAll(kvs.getValue()); - familyMap.put(family, list); - } + public MultiMutation(ImmutableBytesPtr rowkey) { + this.rowKey = rowkey; + } - // add all the attributes, not overriding already stored ones - for (Entry attrib : stored.getAttributesMap().entrySet()) { - if (this.getAttribute(attrib.getKey()) == null) { - this.setAttribute(attrib.getKey(), attrib.getValue()); - } - } + /** + */ + public void addAll(Mutation stored) { + // add all the kvs + for (Entry> kvs : stored.getFamilyCellMap().entrySet()) { + byte[] family = kvs.getKey(); + List list = getKeyValueList(family, kvs.getValue().size()); + list.addAll(kvs.getValue()); + familyMap.put(family, list); } - private List getKeyValueList(byte[] family, int hint) { - List list = familyMap.get(family); - if (list == null) { - list = new ArrayList(hint); + // add all the attributes, not overriding already stored ones + for (Entry attrib : stored.getAttributesMap().entrySet()) { + if (this.getAttribute(attrib.getKey()) == null) { + this.setAttribute(attrib.getKey(), attrib.getValue()); } - return list; } + } - @Override - public byte[] getRow(){ - return this.rowKey.copyBytesIfNecessary(); + private List getKeyValueList(byte[] family, int hint) { + List list = familyMap.get(family); + if (list == null) { + list = new ArrayList(hint); } + return list; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((rowKey == null) ? 0 : rowKey.hashCode()); - return result; - } + @Override + public byte[] getRow() { + return this.rowKey.copyBytesIfNecessary(); + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - MultiMutation other = (MultiMutation)obj; - return rowKey.equals(other.rowKey); - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((rowKey == null) ? 0 : rowKey.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + MultiMutation other = (MultiMutation) obj; + return rowKey.equals(other.rowKey); + } - } \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java index 4582f3b9d60..60a9ba6e30d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,116 +21,117 @@ public class OffsetCell implements Cell { - private Cell cell; - private int offset; - - public OffsetCell(Cell cell, int offset) { - this.cell = cell; - this.offset = offset; - } - - @Override - public byte[] getRowArray() { - return cell.getRowArray(); - } - - @Override - public int getRowOffset() { - return cell.getRowOffset() + offset; - } - - @Override - public short getRowLength() { - return (short) (cell.getRowLength() - offset); - } - - @Override - public byte[] getFamilyArray() { - return cell.getFamilyArray(); - } - - @Override - public int getFamilyOffset() { - return cell.getFamilyOffset(); - } - - @Override - public byte getFamilyLength() { - return cell.getFamilyLength(); - } - - @Override - public byte[] getQualifierArray() { - return cell.getQualifierArray(); - } - - @Override - public int getQualifierOffset() { - return cell.getQualifierOffset(); - } - - @Override - public int getQualifierLength() { - return cell.getQualifierLength(); - } - - @Override - public long getTimestamp() { - return cell.getTimestamp(); - } - - @Override - public byte getTypeByte() { - return cell.getTypeByte(); - } - - @Override public long getSequenceId() { - return cell.getSequenceId(); - } - - @Override - public byte[] getValueArray() { - return cell.getValueArray(); - } - - @Override - public int getValueOffset() { - return cell.getValueOffset(); - } - - @Override - public int getValueLength() { - return cell.getValueLength(); - } - - @Override - public byte[] getTagsArray() { - return cell.getTagsArray(); - } - - @Override - public int getTagsOffset() { - return cell.getTagsOffset(); - } - - @Override - public int getTagsLength() { - return cell.getTagsLength(); - } - - @Override - public Type getType() { - return cell.getType(); - } - - @Override - public long heapSize() { - return cell.heapSize(); - } - - @Override - public int getSerializedSize() { - return cell.getSerializedSize() - offset; - } + private Cell cell; + private int offset; + + public OffsetCell(Cell cell, int offset) { + this.cell = cell; + this.offset = offset; + } + + @Override + public byte[] getRowArray() { + return cell.getRowArray(); + } + + @Override + public int getRowOffset() { + return cell.getRowOffset() + offset; + } + + @Override + public short getRowLength() { + return (short) (cell.getRowLength() - offset); + } + + @Override + public byte[] getFamilyArray() { + return cell.getFamilyArray(); + } + + @Override + public int getFamilyOffset() { + return cell.getFamilyOffset(); + } + + @Override + public byte getFamilyLength() { + return cell.getFamilyLength(); + } + + @Override + public byte[] getQualifierArray() { + return cell.getQualifierArray(); + } + + @Override + public int getQualifierOffset() { + return cell.getQualifierOffset(); + } + + @Override + public int getQualifierLength() { + return cell.getQualifierLength(); + } + + @Override + public long getTimestamp() { + return cell.getTimestamp(); + } + + @Override + public byte getTypeByte() { + return cell.getTypeByte(); + } + + @Override + public long getSequenceId() { + return cell.getSequenceId(); + } + + @Override + public byte[] getValueArray() { + return cell.getValueArray(); + } + + @Override + public int getValueOffset() { + return cell.getValueOffset(); + } + + @Override + public int getValueLength() { + return cell.getValueLength(); + } + + @Override + public byte[] getTagsArray() { + return cell.getTagsArray(); + } + + @Override + public int getTagsOffset() { + return cell.getTagsOffset(); + } + + @Override + public int getTagsLength() { + return cell.getTagsLength(); + } + + @Override + public Type getType() { + return cell.getType(); + } + + @Override + public long heapSize() { + return cell.heapSize(); + } + + @Override + public int getSerializedSize() { + return cell.getSerializedSize() - offset; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/ValueGetter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/ValueGetter.java index 7b8763e3c80..c85b69d4c66 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/ValueGetter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/ValueGetter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,27 +19,28 @@ import java.io.IOException; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.hbase.index.covered.update.ColumnReference; public interface ValueGetter { - public static final ImmutableBytesWritable HIDDEN_BY_DELETE = new ImmutableBytesWritable(new byte[0]); + public static final ImmutableBytesWritable HIDDEN_BY_DELETE = + new ImmutableBytesWritable(new byte[0]); + /** * Get the most recent (largest timestamp) for the given column reference * @param ref to match against an underlying key value. Uses the passed object to match the - * keyValue via {@link ColumnReference#matches} - * @param ts time stamp at which mutations will be issued + * keyValue via {@link ColumnReference#matches} + * @param ts time stamp at which mutations will be issued * @return the stored value for the given {@link ColumnReference}, null if no value is * present, or {@link ValueGetter#HIDDEN_BY_DELETE} if no value is present and the ref * will be shadowed by a delete marker. * @throws IOException if there is an error accessing the underlying data storage */ public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException; + public KeyValue getLatestKeyValue(ColumnReference ref, long ts) throws IOException; - + public byte[] getRowKey(); -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/builder/FatalIndexBuildingFailureException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/builder/FatalIndexBuildingFailureException.java index 249f6c4fc3a..161b2346bbc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/builder/FatalIndexBuildingFailureException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/builder/FatalIndexBuildingFailureException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,27 +18,23 @@ package org.apache.phoenix.hbase.index.builder; /** - * This exception should be thrown if we are unable to handle Index failure and want regionserver to go down to avoid - * inconsistency + * This exception should be thrown if we are unable to handle Index failure and want regionserver to + * go down to avoid inconsistency */ public class FatalIndexBuildingFailureException extends RuntimeException { - /** - * @param msg - * reason for the failure - */ - public FatalIndexBuildingFailureException(String msg) { - super(msg); - } + /** + * reason for the failure + */ + public FatalIndexBuildingFailureException(String msg) { + super(msg); + } - /** - * @param msg - * reason - * @param cause - * underlying cause for the failure - */ - public FatalIndexBuildingFailureException(String msg, Throwable cause) { - super(msg, cause); - } + /** + * reason underlying cause for the failure + */ + public FatalIndexBuildingFailureException(String msg, Throwable cause) { + super(msg, cause); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildingFailureException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildingFailureException.java index cc7cc355d92..eed14e9d1a5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildingFailureException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildingFailureException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,10 +39,10 @@ public IndexBuildingFailureException(String msg) { } /** - * @param msg reason + * @param msg reason * @param cause underlying cause for the failure */ public IndexBuildingFailureException(String msg, Throwable cause) { super(msg, cause); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/Batch.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/Batch.java index a8c53cd3769..b55526318ab 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/Batch.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/Batch.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,13 +33,12 @@ public class Batch { private boolean allPointDeletes = true; /** - * @param ts */ public Batch(long ts) { this.timestamp = ts; } - public void add(Cell kv){ + public void add(Cell kv) { if (Cell.Type.Delete != kv.getType()) { allPointDeletes = false; } @@ -57,4 +56,4 @@ public long getTimestamp() { public List getKvs() { return this.batch; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexCodec.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexCodec.java index ecc2c24fcff..f15c5b0c1fc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexCodec.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexCodec.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.hbase.index.covered; @@ -19,70 +27,65 @@ /** * Codec for creating index updates from the current state of a table. *

- * Generally, you should extend {@link BaseIndexCodec} instead, so help maintain compatibility as features need to be - * added to the codec, as well as potentially not haivng to implement some methods. + * Generally, you should extend {@link BaseIndexCodec} instead, so help maintain compatibility as + * features need to be added to the codec, as well as potentially not haivng to implement some + * methods. */ public interface IndexCodec { - /** - * Get the index cleanup entries. Currently, this must return just single row deletes (where just the row-key is - * specified and no columns are returned) mapped to the table name. For instance, to you have an index 'myIndex' - * with row : - * - *

-     * v1,v2,v3 | CF:CQ0  | rowkey
-     *          | CF:CQ1  | rowkey
-     * 
- * - * To then cleanup this entry, you would just return 'v1,v2,v3', 'myIndex'. - * - * @param state - * the current state of the table that needs to be cleaned up. Generally, you only care about the latest - * column values, for each column you are indexing for each index table. - * @param context TODO - * @param regionStartKey TODO - * @param regionEndKey TODO - * @return the pairs of (deletes, index table name) that should be applied. - * @throws IOException - */ - public Iterable getIndexDeletes(TableState state, IndexMetaData context, byte[] regionStartKey, byte[] regionEndKey) throws IOException; + /** + * Get the index cleanup entries. Currently, this must return just single row deletes (where just + * the row-key is specified and no columns are returned) mapped to the table name. For instance, + * to you have an index 'myIndex' with row : + * + *
+   * v1,v2,v3 | CF:CQ0  | rowkey
+   *          | CF:CQ1  | rowkey
+   * 
+ * + * To then cleanup this entry, you would just return 'v1,v2,v3', 'myIndex'. the current state of + * the table that needs to be cleaned up. Generally, you only care about the latest column values, + * for each column you are indexing for each index table. + * @param context TODO + * @param regionStartKey TODO + * @param regionEndKey TODO + * @return the pairs of (deletes, index table name) that should be applied. + */ + public Iterable getIndexDeletes(TableState state, IndexMetaData context, + byte[] regionStartKey, byte[] regionEndKey) throws IOException; - // table state has the pending update already applied, before calling - // get the new index entries - /** - * Get the index updates for the primary table state, for each index table. The returned {@link Put}s need to be - * fully specified (including timestamp) to minimize passes over the same key-values multiple times. - *

- * You must specify the same timestamps on the Put as {@link TableState#getCurrentTimestamp()} so the index entries - * match the primary table row. This could be managed at a higher level, but would require iterating all the kvs in - * the Put again - very inefficient when compared to the current interface where you must provide a timestamp - * anyways (so you might as well provide the right one). - * - * @param state - * the current state of the table that needs to an index update Generally, you only care about the latest - * column values, for each column you are indexing for each index table. - * @param context TODO - * @param regionStartKey TODO - * @param regionEndKey TODO - * @return the pairs of (updates,index table name) that should be applied. - * @throws IOException - */ - public Iterable getIndexUpserts(TableState state, IndexMetaData context, - byte[] regionStartKey, byte[] regionEndKey, boolean verified) throws IOException; + // table state has the pending update already applied, before calling + // get the new index entries + /** + * Get the index updates for the primary table state, for each index table. The returned + * {@link Put}s need to be fully specified (including timestamp) to minimize passes over the same + * key-values multiple times. + *

+ * You must specify the same timestamps on the Put as {@link TableState#getCurrentTimestamp()} so + * the index entries match the primary table row. This could be managed at a higher level, but + * would require iterating all the kvs in the Put again - very inefficient when compared to the + * current interface where you must provide a timestamp anyways (so you might as well provide the + * right one). the current state of the table that needs to an index update Generally, you only + * care about the latest column values, for each column you are indexing for each index table. + * @param context TODO + * @param regionStartKey TODO + * @param regionEndKey TODO + * @return the pairs of (updates,index table name) that should be applied. + */ + public Iterable getIndexUpserts(TableState state, IndexMetaData context, + byte[] regionStartKey, byte[] regionEndKey, boolean verified) throws IOException; - /** - * This allows the codec to dynamically change whether or not indexing should take place for a table. If it doesn't - * take place, we can save a lot of time on the regular Put patch. By making it dynamic, we can save offlining and - * then onlining a table just to turn indexing on. - *

- * We can also be smart about even indexing a given update here too - if the update doesn't contain any columns that - * we care about indexing, we can save the effort of analyzing the put and further. - * - * @param m - * mutation that should be indexed. - * @return true if indexing is enabled for the given table. This should be on a per-table basis, as each - * codec is instantiated per-region. - */ - public boolean isEnabled(Mutation m); + /** + * This allows the codec to dynamically change whether or not indexing should take place for a + * table. If it doesn't take place, we can save a lot of time on the regular Put patch. By making + * it dynamic, we can save offlining and then onlining a table just to turn indexing on. + *

+ * We can also be smart about even indexing a given update here too - if the update doesn't + * contain any columns that we care about indexing, we can save the effort of analyzing the put + * and further. mutation that should be indexed. + * @return true if indexing is enabled for the given table. This should be on a per-table + * basis, as each codec is instantiated per-region. + */ + public boolean isEnabled(Mutation m); - public void initialize(Configuration conf, byte[] tableName); -} \ No newline at end of file + public void initialize(Configuration conf, byte[] tableName); +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexMetaData.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexMetaData.java index 1093a78a053..dea87c50b2a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexMetaData.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexMetaData.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,33 +23,33 @@ public interface IndexMetaData { - public static final IndexMetaData NULL_INDEX_META_DATA = new IndexMetaData() { - - @Override - public boolean requiresPriorRowState(Mutation m) { - return true; - } - - @Override - public ReplayWrite getReplayWrite() { - return null; - } - - @Override - public int getClientVersion() { - return ScanUtil.UNKNOWN_CLIENT_VERSION; - } - }; - - - /** - * Determines whether or not we need to look up the old row to retrieve old row values for maintaining the index. - * @param m mutation being performed on the data table - * @return true if prior row state is required and false otherwise - */ - public boolean requiresPriorRowState(Mutation m); - - public ReplayWrite getReplayWrite(); - - public int getClientVersion(); + public static final IndexMetaData NULL_INDEX_META_DATA = new IndexMetaData() { + + @Override + public boolean requiresPriorRowState(Mutation m) { + return true; + } + + @Override + public ReplayWrite getReplayWrite() { + return null; + } + + @Override + public int getClientVersion() { + return ScanUtil.UNKNOWN_CLIENT_VERSION; + } + }; + + /** + * Determines whether or not we need to look up the old row to retrieve old row values for + * maintaining the index. + * @param m mutation being performed on the data table + * @return true if prior row state is required and false otherwise + */ + public boolean requiresPriorRowState(Mutation m); + + public ReplayWrite getReplayWrite(); + + public int getClientVersion(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexUpdate.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexUpdate.java index fd43d40c809..1336b684458 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexUpdate.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/IndexUpdate.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.phoenix.hbase.index.covered.update.ColumnTracker; /** @@ -58,20 +57,19 @@ public ColumnTracker getIndexedColumns() { @Override public String toString() { return "IndexUpdate: \n\ttable - " + Bytes.toString(tableName) + "\n\tupdate: " + update - + "\n\tcolumns: " + columns; + + "\n\tcolumns: " + columns; } - public static IndexUpdate createIndexUpdateForTesting(ColumnTracker tracker, byte[] table, Put p) { + public static IndexUpdate createIndexUpdateForTesting(ColumnTracker tracker, byte[] table, + Put p) { IndexUpdate update = new IndexUpdate(tracker); update.setTable(table); update.setUpdate(p); return update; } - /** - * @return true if the necessary state for a valid index update has been set. - */ + /** Returns true if the necessary state for a valid index update has been set. */ public boolean isValid() { return this.tableName != null && this.update != null; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/KeyValueStore.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/KeyValueStore.java index 0848e29249c..2c2eb1ea275 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/KeyValueStore.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/KeyValueStore.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,4 +30,4 @@ public interface KeyValueStore { public ReseekableScanner getScanner(); public void rollback(Cell kv); -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java index a0bccaed3b3..d7287c69286 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.covered; import java.io.IOException; @@ -35,25 +34,23 @@ */ public interface TableState { - /** - * @return the current timestamp up-to-which we are releasing table state. - */ + /** Returns the current timestamp up-to-which we are releasing table state. */ public long getCurrentTimestamp(); /** * Get a getter interface for the state of the index row - * @param indexedColumns list of indexed columns. - * @param ignoreNewerMutations ignore mutations newer than m when determining current state. Useful - * when replaying mutation state for partial index rebuild where writes succeeded to the data - * table, but not to the index table. - * @param indexMetaData TODO + * @param indexedColumns list of indexed columns. + * @param ignoreNewerMutations ignore mutations newer than m when determining current state. + * Useful when replaying mutation state for partial index rebuild + * where writes succeeded to the data table, but not to the index + * table. + * @param indexMetaData TODO */ Pair getIndexUpdateState( - Collection indexedColumns, boolean ignoreNewerMutations, boolean returnNullScannerIfRowNotFound, IndexMetaData indexMetaData) throws IOException; + Collection indexedColumns, boolean ignoreNewerMutations, + boolean returnNullScannerIfRowNotFound, IndexMetaData indexMetaData) throws IOException; - /** - * @return the row key for the current row for which we are building an index update. - */ + /** Returns the row key for the current row for which we are building an index update. */ byte[] getCurrentRowKey(); /** diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java index 478d98bf2cc..a8de7d5651b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/DelegateComparator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,61 +23,61 @@ import org.apache.hadoop.hbase.CellComparator; public class DelegateComparator implements CellComparator { - - private CellComparator delegate; - - public DelegateComparator(CellComparator delegate) { - this.delegate=delegate; - } - - @Override - public int compare(Cell leftCell, Cell rightCell) { - return delegate.compare(leftCell, rightCell); - } - - @Override - public int compareRows(Cell leftCell, Cell rightCell) { - return delegate.compareRows(leftCell, rightCell); - } - - @Override - public int compareRows(Cell cell, byte[] bytes, int offset, int length) { - return delegate.compareRows(cell, bytes, offset, length); - } - - @Override - public int compareWithoutRow(Cell leftCell, Cell rightCell) { - return delegate.compareWithoutRow(leftCell, rightCell); - } - - @Override - public int compareFamilies(Cell leftCell, Cell rightCell) { - return delegate.compareFamilies(leftCell, rightCell); - } - - @Override - public int compareQualifiers(Cell leftCell, Cell rightCell) { - return delegate.compareQualifiers(leftCell, rightCell); - } - - @Override - public int compareTimestamps(Cell leftCell, Cell rightCell) { - return delegate.compareTimestamps(leftCell, rightCell); - } - - @Override - public int compareTimestamps(long leftCellts, long rightCellts) { - return delegate.compareTimestamps(leftCellts, rightCellts); - } - - @Override - public int compare(Cell leftCell, Cell rightCell, boolean ignoreSequenceid) { - return delegate.compare(leftCell, rightCell, ignoreSequenceid); - } - - @Override - public Comparator getSimpleComparator() { - return delegate.getSimpleComparator(); - } + + private CellComparator delegate; + + public DelegateComparator(CellComparator delegate) { + this.delegate = delegate; + } + + @Override + public int compare(Cell leftCell, Cell rightCell) { + return delegate.compare(leftCell, rightCell); + } + + @Override + public int compareRows(Cell leftCell, Cell rightCell) { + return delegate.compareRows(leftCell, rightCell); + } + + @Override + public int compareRows(Cell cell, byte[] bytes, int offset, int length) { + return delegate.compareRows(cell, bytes, offset, length); + } + + @Override + public int compareWithoutRow(Cell leftCell, Cell rightCell) { + return delegate.compareWithoutRow(leftCell, rightCell); + } + + @Override + public int compareFamilies(Cell leftCell, Cell rightCell) { + return delegate.compareFamilies(leftCell, rightCell); + } + + @Override + public int compareQualifiers(Cell leftCell, Cell rightCell) { + return delegate.compareQualifiers(leftCell, rightCell); + } + + @Override + public int compareTimestamps(Cell leftCell, Cell rightCell) { + return delegate.compareTimestamps(leftCell, rightCell); + } + + @Override + public int compareTimestamps(long leftCellts, long rightCellts) { + return delegate.compareTimestamps(leftCellts, rightCellts); + } + + @Override + public int compare(Cell leftCell, Cell rightCell, boolean ignoreSequenceid) { + return delegate.compare(leftCell, rightCell, ignoreSequenceid); + } + + @Override + public Comparator getSimpleComparator() { + return delegate.getSimpleComparator(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java index fc2cb53415a..acfe94ba2ec 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/LazyValueGetter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,70 +39,69 @@ */ public class LazyValueGetter extends AbstractValueGetter { - private CoveredDeleteScanner scan; - private volatile Map values; - private byte[] row; + private CoveredDeleteScanner scan; + private volatile Map values; + private byte[] row; - /** - * Back the getter with a {@link Scanner} to actually access the local data. - * @param scan backing scanner - * @param currentRow row key for the row to seek in the scanner - */ - public LazyValueGetter(CoveredDeleteScanner scan, byte[] currentRow) { - this.scan = scan; - this.row = currentRow; - } + /** + * Back the getter with a {@link Scanner} to actually access the local data. + * @param scan backing scanner + * @param currentRow row key for the row to seek in the scanner + */ + public LazyValueGetter(CoveredDeleteScanner scan, byte[] currentRow) { + this.scan = scan; + this.row = currentRow; + } - @Override - public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException { - Map v = values; - // ensure we have a backing map + @Override + public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException { + Map v = values; + // ensure we have a backing map + if (v == null) { + synchronized (this) { + v = values; if (v == null) { - synchronized (this) { - v = values; - if (v == null) { - v = values = Collections.synchronizedMap(new HashMap()); - } - } - } - - // check the value in the map - ImmutableBytesWritable value = v.get(ref); - if (value == null) { - value = get(ref); - DeleteTracker deleteTracker = scan.getDeleteTracker(); - if (value == null) { - // Delete family is used for row deletion. Family won't necessarily match as we'll be at - // the delete family marker on the last column family if there is one. - if (deleteTracker.deleteFamily != null && deleteTracker.deleteFamily.getTimestamp() == ts) { - value = HIDDEN_BY_DELETE; - } - } - v.put(ref, value); + v = values = + Collections.synchronizedMap(new HashMap()); } - - return value; + } } - /** - * @param ref - * @return the first value on the scanner for the given column - */ - private ImmutableBytesPtr get(ColumnReference ref) throws IOException { - KeyValue first = ref.getFirstKeyValueForRow(row); - if (!scan.seek(first)) { - return null; + // check the value in the map + ImmutableBytesWritable value = v.get(ref); + if (value == null) { + value = get(ref); + DeleteTracker deleteTracker = scan.getDeleteTracker(); + if (value == null) { + // Delete family is used for row deletion. Family won't necessarily match as we'll be at + // the delete family marker on the last column family if there is one. + if (deleteTracker.deleteFamily != null && deleteTracker.deleteFamily.getTimestamp() == ts) { + value = HIDDEN_BY_DELETE; } - // there is a next value - we only care about the current value, so we can just snag that - Cell next = scan.next(); - if (ref.matches(next)) { - return new ImmutableBytesPtr(next.getValueArray(), next.getValueOffset(), next.getValueLength()); - } - return null; + } + v.put(ref, value); } - @Override - public byte[] getRowKey() { - return this.row; + return value; + } + + /** Returns the first value on the scanner for the given column */ + private ImmutableBytesPtr get(ColumnReference ref) throws IOException { + KeyValue first = ref.getFirstKeyValueForRow(row); + if (!scan.seek(first)) { + return null; } -} \ No newline at end of file + // there is a next value - we only care about the current value, so we can just snag that + Cell next = scan.next(); + if (ref.matches(next)) { + return new ImmutableBytesPtr(next.getValueArray(), next.getValueOffset(), + next.getValueLength()); + } + return null; + } + + @Override + public byte[] getRowKey() { + return this.row; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalHBaseState.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalHBaseState.java index 56e731aacbc..6440fa635b0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalHBaseState.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/data/LocalHBaseState.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,17 +30,18 @@ */ public interface LocalHBaseState { - /** - * @param m mutation for which we should get the current table state - * @param toCover all the columns the current row state needs to cover; hint the underlying lookup - * to save getting all the columns for the row - * @param ignoreNewerMutations ignore mutations newer than m when determining current state. Useful - * when replaying mutation state for partial index rebuild where writes succeeded to the data - * table, but not to the index table. - * @return the full state of the given row. Includes all current versions (even if they are not - * usually visible to the client (unless they are also doing a raw scan)),may return null. - * @throws IOException if there is an issue reading the row - */ - public List getCurrentRowState(Mutation m, Collection toCover, boolean ignoreNewerMutations) - throws IOException; -} \ No newline at end of file + /** + * @param m mutation for which we should get the current table state + * @param toCover all the columns the current row state needs to cover; hint the + * underlying lookup to save getting all the columns for the row + * @param ignoreNewerMutations ignore mutations newer than m when determining current state. + * Useful when replaying mutation state for partial index rebuild + * where writes succeeded to the data table, but not to the index + * table. + * @return the full state of the given row. Includes all current versions (even if they are not + * usually visible to the client (unless they are also doing a raw scan)),may return null. + * @throws IOException if there is an issue reading the row + */ + public List getCurrentRowState(Mutation m, Collection toCover, + boolean ignoreNewerMutations) throws IOException; +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java index 51d82fd83c9..4b649c3e253 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -58,47 +58,45 @@ public class ApplyAndFilterDeletesFilter extends FilterBase { private Hinter currentHint; private DeleteColumnHinter columnHint = new DeleteColumnHinter(); private DeleteFamilyHinter familyHint = new DeleteFamilyHinter(); - + /** * Setup the filter to only include the given families. This allows us to seek intelligently pass * families we don't care about. - * @param families */ public ApplyAndFilterDeletesFilter(Set families) { this.families = new ArrayList(families); Collections.sort(this.families); } - + public DeleteTracker getDeleteTracker() { - return coveringDelete; + return coveringDelete; } - + private ImmutableBytesPtr getNextFamily(ImmutableBytesPtr family) { int index = Collections.binarySearch(families, family); - //doesn't match exactly, be we can find the right next match - //this is pretty unlikely, but just incase - if(index < 0){ - //the actual location of the next match - index = -index -1; - }else{ - //its an exact match for a family, so we get the next entry - index = index +1; + // doesn't match exactly, be we can find the right next match + // this is pretty unlikely, but just incase + if (index < 0) { + // the actual location of the next match + index = -index - 1; + } else { + // its an exact match for a family, so we get the next entry + index = index + 1; } - //now we have the location of the next entry - if(index >= families.size()){ + // now we have the location of the next entry + if (index >= families.size()) { return null; } - return families.get(index); + return families.get(index); } - + @Override - public void reset(){ + public void reset() { this.coveringDelete.reset(); } - - + @Override - public Cell getNextCellHint(Cell peeked){ + public Cell getNextCellHint(Cell peeked) { return currentHint.getHint(PhoenixKeyValueUtil.maybeCopyCell(peeked)); } @@ -111,57 +109,62 @@ public ReturnCode filterKeyValue(Cell next) { public ReturnCode filterCell(Cell next) { KeyValue nextKV = PhoenixKeyValueUtil.maybeCopyCell(next); switch (next.getType()) { - /* - * DeleteFamily will always sort first because those KVs (we assume) don't have qualifiers (or - * rather are null). Therefore, we have to keep a hold of all the delete families until we get - * to a Put entry that is covered by that delete (in which case, we are done with the family). - */ - case DeleteFamily: - // track the family to delete. If we are updating the delete, that means we have passed all - // kvs in the last column, so we can safely ignore the last deleteFamily, and just use this - // one. In fact, it means that all the previous deletes can be ignored because the family must - // not match anymore. - // We could potentially have multiple deleteFamily for the same row and family - // (e.g. upsert row+family, delete it, upsert again, delete again), - // in which case we keep the first one since its timestamp dominates - if (coveringDelete.deleteFamily == null || !CellUtil.matchingFamily(coveringDelete.deleteFamily, nextKV)) { + /* + * DeleteFamily will always sort first because those KVs (we assume) don't have qualifiers (or + * rather are null). Therefore, we have to keep a hold of all the delete families until we get + * to a Put entry that is covered by that delete (in which case, we are done with the family). + */ + case DeleteFamily: + // track the family to delete. If we are updating the delete, that means we have passed all + // kvs in the last column, so we can safely ignore the last deleteFamily, and just use this + // one. In fact, it means that all the previous deletes can be ignored because the family + // must + // not match anymore. + // We could potentially have multiple deleteFamily for the same row and family + // (e.g. upsert row+family, delete it, upsert again, delete again), + // in which case we keep the first one since its timestamp dominates + if ( + coveringDelete.deleteFamily == null + || !CellUtil.matchingFamily(coveringDelete.deleteFamily, nextKV) + ) { this.coveringDelete.reset(); this.coveringDelete.deleteFamily = nextKV; - } - return ReturnCode.SKIP; - case DeleteColumn: - // similar to deleteFamily, all the newer deletes/puts would have been seen at this point, so - // we can safely replace the more recent delete column with the more recent one - this.coveringDelete.pointDelete = null; - this.coveringDelete.deleteColumn = nextKV; - return ReturnCode.SKIP; - case Delete: - // we are just deleting the single column value at this point. - // therefore we just skip this entry and go onto the next one. The only caveat is that - // we should still cover the next entry if this delete applies to the next entry, so we - // have to keep around a reference to the KV to compare against the next valid entry - this.coveringDelete.pointDelete = nextKV; - return ReturnCode.SKIP; - default: - // no covering deletes - if (coveringDelete.empty()) { - return ReturnCode.INCLUDE; - } + } + return ReturnCode.SKIP; + case DeleteColumn: + // similar to deleteFamily, all the newer deletes/puts would have been seen at this point, + // so + // we can safely replace the more recent delete column with the more recent one + this.coveringDelete.pointDelete = null; + this.coveringDelete.deleteColumn = nextKV; + return ReturnCode.SKIP; + case Delete: + // we are just deleting the single column value at this point. + // therefore we just skip this entry and go onto the next one. The only caveat is that + // we should still cover the next entry if this delete applies to the next entry, so we + // have to keep around a reference to the KV to compare against the next valid entry + this.coveringDelete.pointDelete = nextKV; + return ReturnCode.SKIP; + default: + // no covering deletes + if (coveringDelete.empty()) { + return ReturnCode.INCLUDE; + } - if (coveringDelete.matchesFamily(nextKV)) { - this.currentHint = familyHint; - return ReturnCode.SEEK_NEXT_USING_HINT; - } + if (coveringDelete.matchesFamily(nextKV)) { + this.currentHint = familyHint; + return ReturnCode.SEEK_NEXT_USING_HINT; + } - if (coveringDelete.matchesColumn(nextKV)) { - // hint to the next column - this.currentHint = columnHint; - return ReturnCode.SEEK_NEXT_USING_HINT; - } + if (coveringDelete.matchesColumn(nextKV)) { + // hint to the next column + this.currentHint = columnHint; + return ReturnCode.SEEK_NEXT_USING_HINT; + } - if (coveringDelete.matchesPoint(nextKV)) { - return ReturnCode.SKIP; - } + if (coveringDelete.matchesPoint(nextKV)) { + return ReturnCode.SKIP; + } } @@ -186,16 +189,15 @@ class DeleteFamilyHinter implements Hinter { @Override public Cell getHint(Cell peeked) { // check to see if we have another column to seek - ImmutableBytesPtr nextFamily = - getNextFamily(new ImmutableBytesPtr(peeked.getFamilyArray(), peeked.getFamilyOffset(), - peeked.getFamilyLength())); + ImmutableBytesPtr nextFamily = getNextFamily(new ImmutableBytesPtr(peeked.getFamilyArray(), + peeked.getFamilyOffset(), peeked.getFamilyLength())); if (nextFamily == null) { return KeyValue.LOWESTKEY; } - // there is a valid family, so we should seek to that + // there is a valid family, so we should seek to that return org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(peeked.getRowArray(), - peeked.getRowOffset(), peeked.getRowLength(), nextFamily.get(), - nextFamily.getOffset(), nextFamily.getLength(), HConstants.EMPTY_BYTE_ARRAY, 0, 0); + peeked.getRowOffset(), peeked.getRowLength(), nextFamily.get(), nextFamily.getOffset(), + nextFamily.getLength(), HConstants.EMPTY_BYTE_ARRAY, 0, 0); } } @@ -208,9 +210,10 @@ private static class DeleteColumnHinter implements Hinter { @Override public Cell getHint(Cell kv) { - return org.apache.hadoop.hbase.KeyValueUtil.createLastOnRow(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), - kv.getQualifierOffset(), kv.getQualifierLength()); + return org.apache.hadoop.hbase.KeyValueUtil.createLastOnRow(kv.getRowArray(), + kv.getRowOffset(), kv.getRowLength(), kv.getFamilyArray(), kv.getFamilyOffset(), + kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength()); } } @@ -233,7 +236,6 @@ public void reset() { * Internally, also resets the currently tracked "Delete Family" marker we are tracking if the * keyvalue is into another family (since CFs sort lexicographically, we can discard the current * marker since it must not be applicable to any more kvs in a linear scan). - * @param next * @return true if this {@link KeyValue} matches a delete. */ public boolean matchesFamily(KeyValue next) { @@ -253,16 +255,16 @@ public boolean matchesFamily(KeyValue next) { return false; } - /** - * @param next - * @return */ public boolean matchesColumn(KeyValue next) { if (deleteColumn == null) { return false; } - if (CellUtil.matchingFamily(deleteColumn, next) && CellUtil.matchingQualifier(deleteColumn, next)) { + if ( + CellUtil.matchingFamily(deleteColumn, next) + && CellUtil.matchingQualifier(deleteColumn, next) + ) { // falls within the timestamp range if (deleteColumn.getTimestamp() >= next.getTimestamp()) { return true; @@ -274,16 +276,16 @@ public boolean matchesColumn(KeyValue next) { } /** - * @param next - * @return */ public boolean matchesPoint(KeyValue next) { // point deletes only apply to the exact KV that they reference, so we only need to ensure // that the timestamp matches exactly. Because we sort by timestamp first, either the next // keyvalue has the exact timestamp or is an older (smaller) timestamp, and we can allow that // one. - if (pointDelete != null && CellUtil.matchingFamily(pointDelete, next) - && CellUtil.matchingQualifier(pointDelete, next)) { + if ( + pointDelete != null && CellUtil.matchingFamily(pointDelete, next) + && CellUtil.matchingQualifier(pointDelete, next) + ) { if (pointDelete.getTimestamp() == next.getTimestamp()) { return true; } @@ -293,11 +295,9 @@ public boolean matchesPoint(KeyValue next) { return false; } - /** - * @return true if no delete has been set - */ + /** Returns true if no delete has been set */ public boolean empty() { return deleteFamily == null && deleteColumn == null && pointDelete == null; } } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ColumnTrackingNextLargestTimestampFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ColumnTrackingNextLargestTimestampFilter.java index 21250af1869..328868a81e7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ColumnTrackingNextLargestTimestampFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ColumnTrackingNextLargestTimestampFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.filter.FilterBase; - import org.apache.phoenix.hbase.index.covered.update.ColumnTracker; /** @@ -62,4 +61,4 @@ public ReturnCode filterCell(Cell v) { return ReturnCode.INCLUDE; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java index b9640d7e669..e0a76b3584d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/MaxTimestampFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.filter.FilterBase; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.util.PhoenixKeyValueUtil; /** @@ -42,10 +41,10 @@ public Cell getNextCellHint(Cell currentKV) { // with other filters too much. KeyValue kv = null; try { - kv = PhoenixKeyValueUtil.maybeCopyCell(currentKV).clone(); + kv = PhoenixKeyValueUtil.maybeCopyCell(currentKV).clone(); } catch (CloneNotSupportedException e) { - // the exception should not happen at all - throw new IllegalArgumentException(e); + // the exception should not happen at all + throw new IllegalArgumentException(e); } kv.setTimestamp(ts); return kv; @@ -64,4 +63,4 @@ public ReturnCode filterCell(Cell v) { } return ReturnCode.INCLUDE; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/NewerTimestampFilter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/NewerTimestampFilter.java index 85d16d90154..5b9da9d394a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/NewerTimestampFilter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/filter/NewerTimestampFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.covered.filter; import org.apache.hadoop.hbase.Cell; @@ -44,4 +43,4 @@ public ReturnCode filterCell(Cell ignored) { return ignored.getTimestamp() > timestamp ? ReturnCode.SKIP : ReturnCode.INCLUDE; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java index 5aa1037b7fe..5c774d06a6c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnReference.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,13 +25,14 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; /** - * + * */ public class ColumnReference implements Comparable { - + public static final byte[] ALL_QUALIFIERS = new byte[0]; - - private static int calcHashCode(ImmutableBytesWritable familyPtr, ImmutableBytesWritable qualifierPtr) { + + private static int calcHashCode(ImmutableBytesWritable familyPtr, + ImmutableBytesWritable qualifierPtr) { final int prime = 31; int result = 1; result = prime * result + familyPtr.hashCode(); @@ -39,58 +40,59 @@ private static int calcHashCode(ImmutableBytesWritable familyPtr, ImmutableBytes return result; } - private final int hashCode; - protected volatile byte[] family; - protected volatile byte[] qualifier; - private final ImmutableBytesPtr familyPtr; - private final ImmutableBytesPtr qualifierPtr; + private final int hashCode; + protected volatile byte[] family; + protected volatile byte[] qualifier; + private final ImmutableBytesPtr familyPtr; + private final ImmutableBytesPtr qualifierPtr; - public ColumnReference(byte[] family, byte[] qualifier) { - this.familyPtr = new ImmutableBytesPtr(family); - this.qualifierPtr = new ImmutableBytesPtr(qualifier); - this.hashCode = calcHashCode(this.familyPtr, this.qualifierPtr); - } + public ColumnReference(byte[] family, byte[] qualifier) { + this.familyPtr = new ImmutableBytesPtr(family); + this.qualifierPtr = new ImmutableBytesPtr(qualifier); + this.hashCode = calcHashCode(this.familyPtr, this.qualifierPtr); + } - public ColumnReference(byte[] family, int familyOffset, int familyLength, byte[] qualifier, - int qualifierOffset, int qualifierLength) { - this.familyPtr = new ImmutableBytesPtr(family, familyOffset, familyLength); - this.qualifierPtr = new ImmutableBytesPtr(qualifier, qualifierOffset, qualifierLength); - this.hashCode = calcHashCode(this.familyPtr, this.qualifierPtr); - } - - public byte[] getFamily() { + public ColumnReference(byte[] family, int familyOffset, int familyLength, byte[] qualifier, + int qualifierOffset, int qualifierLength) { + this.familyPtr = new ImmutableBytesPtr(family, familyOffset, familyLength); + this.qualifierPtr = new ImmutableBytesPtr(qualifier, qualifierOffset, qualifierLength); + this.hashCode = calcHashCode(this.familyPtr, this.qualifierPtr); + } + + public byte[] getFamily() { + if (this.family == null) { + synchronized (this.familyPtr) { if (this.family == null) { - synchronized (this.familyPtr) { - if (this.family == null) { - this.family = this.familyPtr.copyBytesIfNecessary(); - } - } + this.family = this.familyPtr.copyBytesIfNecessary(); } - return this.family; + } } + return this.family; + } - public byte[] getQualifier() { + public byte[] getQualifier() { + if (this.qualifier == null) { + synchronized (this.qualifierPtr) { if (this.qualifier == null) { - synchronized (this.qualifierPtr) { - if (this.qualifier == null) { - this.qualifier = this.qualifierPtr.copyBytesIfNecessary(); - } - } + this.qualifier = this.qualifierPtr.copyBytesIfNecessary(); } - return this.qualifier; + } } + return this.qualifier; + } - public ImmutableBytesPtr getFamilyWritable() { - return this.familyPtr; - } + public ImmutableBytesPtr getFamilyWritable() { + return this.familyPtr; + } - public ImmutableBytesPtr getQualifierWritable() { - return this.qualifierPtr; - } + public ImmutableBytesPtr getQualifierWritable() { + return this.qualifierPtr; + } public boolean matches(Cell kv) { if (matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength())) { - return matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); + return matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength()); } return false; } @@ -103,10 +105,12 @@ public boolean matchesQualifier(byte[] qual) { return matchesQualifier(qual, 0, qual.length); } - public boolean matchesQualifier(byte[] bytes, int offset, int length) { - return allColumns() ? true : match(bytes, offset, length, qualifierPtr.get(), - qualifierPtr.getOffset(), qualifierPtr.getLength()); - } + public boolean matchesQualifier(byte[] bytes, int offset, int length) { + return allColumns() + ? true + : match(bytes, offset, length, qualifierPtr.get(), qualifierPtr.getOffset(), + qualifierPtr.getLength()); + } /** * @param family to check against @@ -117,32 +121,33 @@ public boolean matchesFamily(byte[] family) { } public boolean matchesFamily(byte[] bytes, int offset, int length) { - return match(bytes, offset, length, familyPtr.get(), familyPtr.getOffset(), familyPtr.getLength()); + return match(bytes, offset, length, familyPtr.get(), familyPtr.getOffset(), + familyPtr.getLength()); } /** - * @return true if this should include all column qualifiers, false otherwise + * Returns true if this should include all column qualifiers, false otherwise */ public boolean allColumns() { return getQualifier() == ALL_QUALIFIERS; } - /** - * Check to see if the passed bytes match the stored bytes - * @param first - * @param storedKey the stored byte[], should never be null - * @return true if they are byte-equal - */ - private boolean match(byte[] first, int offset1, int length1, byte[] storedKey, int offset2, - int length2) { - return first == null ? false : Bytes.equals(first, offset1, length1, storedKey, offset2, - length2); - } + /** + * Check to see if the passed bytes match the stored bytes + * @param storedKey the stored byte[], should never be null + * @return true if they are byte-equal + */ + private boolean match(byte[] first, int offset1, int length1, byte[] storedKey, int offset2, + int length2) { + return first == null + ? false + : Bytes.equals(first, offset1, length1, storedKey, offset2, length2); + } - public KeyValue getFirstKeyValueForRow(byte[] row) { - return KeyValueUtil.createFirstOnRow(row, getFamily(), getQualifier() == ALL_QUALIFIERS ? null - : getQualifier()); - } + public KeyValue getFirstKeyValueForRow(byte[] row) { + return KeyValueUtil.createFirstOnRow(row, getFamily(), + getQualifier() == ALL_QUALIFIERS ? null : getQualifier()); + } @Override public int compareTo(ColumnReference o) { @@ -172,6 +177,7 @@ public int hashCode() { @Override public String toString() { - return "ColumnReference - " + Bytes.toString(getFamily()) + ":" + Bytes.toString(getQualifier()); + return "ColumnReference - " + Bytes.toString(getFamily()) + ":" + + Bytes.toString(getQualifier()); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnTracker.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnTracker.java index 7c69493be12..5131bcd1f2a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnTracker.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/ColumnTracker.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +21,6 @@ import java.util.Collection; import java.util.List; - /** * Simple POJO for tracking a bunch of column references and the next-newest timestamp for those * columns @@ -38,8 +37,8 @@ public class ColumnTracker implements IndexedColumnGroup { private final int hashCode; private static int calcHashCode(List columns) { - return columns.hashCode(); - } + return columns.hashCode(); + } public ColumnTracker(Collection columns) { this.columns = new ArrayList(columns); @@ -69,13 +68,13 @@ public int hashCode() { } @Override - public boolean equals(Object o){ - if(!(o instanceof ColumnTracker)){ + public boolean equals(Object o) { + if (!(o instanceof ColumnTracker)) { return false; } - ColumnTracker other = (ColumnTracker)o; + ColumnTracker other = (ColumnTracker) o; if (hashCode != other.hashCode) { - return false; + return false; } if (other.columns.size() != columns.size()) { return false; @@ -111,4 +110,4 @@ public boolean hasNewerTimestamps() { public static boolean isNewestTime(long ts) { return ts == NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexUpdateManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexUpdateManager.java index 1c08f2cad4a..554a45f5dcc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexUpdateManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexUpdateManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.hbase.index.covered.IndexMetaData; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; @@ -46,8 +45,7 @@ public class IndexUpdateManager { public Comparator COMPARATOR = new MutationComparator(); - private static class MutationComparator implements Comparator, - Serializable { + private static class MutationComparator implements Comparator, Serializable { @Override public int compare(Mutation o1, Mutation o2) { @@ -80,8 +78,7 @@ public int compare(Mutation o1, Mutation o2) { } throw new RuntimeException( - "Got unexpected mutation types! Can only be Put or Delete, but got: " + o1 + ", and " - + o2); + "Got unexpected mutation types! Can only be Put or Delete, but got: " + o1 + ", and " + o2); } private int comparePuts(Put p1, Put p2) { @@ -103,7 +100,7 @@ private int comparePuts(Put p1, Put p2) { private static final byte[] TRUE_MARKER = new byte[] { 1 }; protected final Map> map = - new HashMap>(); + new HashMap>(); private IndexMetaData indexMetaData; public IndexUpdateManager(IndexMetaData indexMetaData) { @@ -113,8 +110,6 @@ public IndexUpdateManager(IndexMetaData indexMetaData) { /** * Add an index update. Keeps the latest {@link Put} for a given timestamp - * @param tableName - * @param m */ public void addIndexUpdate(byte[] tableName, Mutation m) { // we only keep the most recent update @@ -135,7 +130,6 @@ public void addIndexUpdate(byte[] tableName, Mutation m) { /** * Fix up the current updates, given the pending mutation. * @param updates current updates - * @param pendingMutation */ protected void fixUpCurrentUpdates(Collection updates, Mutation pendingMutation) { // need to check for each entry to see if we have a duplicate @@ -185,10 +179,10 @@ protected void fixUpCurrentUpdates(Collection updates, Mutation pendin } } if (toRemove != null) { - updates.remove(toRemove); + updates.remove(toRemove); } if (pendingMutation != null) { - updates.add(pendingMutation); + updates.add(pendingMutation); } } @@ -213,7 +207,6 @@ public List> toMap() { } /** - * @param updates */ public void addAll(Collection> updates) { for (Pair update : updates) { @@ -236,8 +229,8 @@ public String toString() { if (shouldBeRemoved(m)) { sb.append("[REMOVED]"); } - sb.append(m.getClass().getSimpleName() + ":" - + ((m instanceof Put) ? m.getTimestamp() + " " : "")); + sb.append( + m.getClass().getSimpleName() + ":" + ((m instanceof Put) ? m.getTimestamp() + " " : "")); sb.append(" row=" + Bytes.toStringBinary(m.getRow())); sb.append("\n"); if (m.getFamilyCellMap().isEmpty()) { @@ -245,8 +238,8 @@ public String toString() { } for (List kvs : m.getFamilyCellMap().values()) { for (Cell kv : kvs) { - sb.append("\t\t" + kv.toString() + "/value=" + Bytes.toStringBinary(kv.getValueArray(), - kv.getValueOffset(), kv.getValueLength())); + sb.append("\t\t" + kv.toString() + "/value=" + + Bytes.toStringBinary(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); sb.append("\n"); } } @@ -254,4 +247,4 @@ public String toString() { } return sb.toString(); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexedColumnGroup.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexedColumnGroup.java index 3c98f0f2ce0..b1d23b8a589 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexedColumnGroup.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/covered/update/IndexedColumnGroup.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,4 +25,4 @@ public interface IndexedColumnGroup { public List getColumns(); -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java index 0b7c7e881dd..b56b7f41258 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/IndexWriteException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,9 +19,9 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; + import org.apache.hadoop.hbase.HBaseIOException; import org.apache.phoenix.query.QueryServicesOptions; - import org.apache.phoenix.thirdparty.com.google.common.base.MoreObjects; /** @@ -30,25 +30,23 @@ @SuppressWarnings("serial") public class IndexWriteException extends HBaseIOException { - /* - * We pass this message back to the client so that the config only needs to be set on the - * server side. - */ - private static final String DISABLE_INDEX_ON_FAILURE_MSG = "disableIndexOnFailure="; - private boolean disableIndexOnFailure = QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX; + /* + * We pass this message back to the client so that the config only needs to be set on the server + * side. + */ + private static final String DISABLE_INDEX_ON_FAILURE_MSG = "disableIndexOnFailure="; + private boolean disableIndexOnFailure = QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX; public IndexWriteException() { super(); } - /** - * Used for the case where we cannot reach the index, but not sure of the table or the mutations - * that caused the failure - * @param message - * @param cause - */ + /** + * Used for the case where we cannot reach the index, but not sure of the table or the mutations + * that caused the failure + */ public IndexWriteException(String message, Throwable cause) { - super(message, cause); + super(message, cause); } public IndexWriteException(Throwable cause, boolean disableIndexOnFailure) { @@ -60,29 +58,28 @@ public IndexWriteException(boolean disableIndexOnFailure) { this.disableIndexOnFailure = disableIndexOnFailure; } -public IndexWriteException(Throwable cause) { + public IndexWriteException(Throwable cause) { super(cause); } - public static boolean parseDisableIndexOnFailure(String message) { - Pattern p = - Pattern.compile(DISABLE_INDEX_ON_FAILURE_MSG + "(true|false)", - Pattern.CASE_INSENSITIVE); - Matcher m = p.matcher(message); - if (m.find()) { - boolean disableIndexOnFailure = Boolean.parseBoolean(m.group(1)); - return disableIndexOnFailure; - } - return QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX; + public static boolean parseDisableIndexOnFailure(String message) { + Pattern p = + Pattern.compile(DISABLE_INDEX_ON_FAILURE_MSG + "(true|false)", Pattern.CASE_INSENSITIVE); + Matcher m = p.matcher(message); + if (m.find()) { + boolean disableIndexOnFailure = Boolean.parseBoolean(m.group(1)); + return disableIndexOnFailure; } + return QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX; + } - public boolean isDisableIndexOnFailure() { - return disableIndexOnFailure; - } + public boolean isDisableIndexOnFailure() { + return disableIndexOnFailure; + } - @Override - public String getMessage() { - return MoreObjects.firstNonNull(super.getMessage(), "") + " " - + DISABLE_INDEX_ON_FAILURE_MSG + disableIndexOnFailure + ","; - } -} \ No newline at end of file + @Override + public String getMessage() { + return MoreObjects.firstNonNull(super.getMessage(), "") + " " + DISABLE_INDEX_ON_FAILURE_MSG + + disableIndexOnFailure + ","; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java index 97a7994a5f6..79cce3f1df6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/MultiIndexWriteFailureException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,15 +40,15 @@ public class MultiIndexWriteFailureException extends IndexWriteException { * @param failures the tables to which the index write did not succeed */ public MultiIndexWriteFailureException(List failures, - boolean disableIndexOnFailure) { - super(disableIndexOnFailure); - this.failures = failures; + boolean disableIndexOnFailure) { + super(disableIndexOnFailure); + this.failures = failures; } public MultiIndexWriteFailureException(List failures, - boolean disableIndexOnFailure, Throwable cause) { - super(cause, disableIndexOnFailure); - this.failures = failures; + boolean disableIndexOnFailure, Throwable cause) { + super(cause, disableIndexOnFailure); + this.failures = failures; } /** @@ -57,17 +57,18 @@ public MultiIndexWriteFailureException(List failures, * @param message detail message */ public MultiIndexWriteFailureException(String message) { - super(IndexWriteException.parseDisableIndexOnFailure(message)); - Pattern p = Pattern.compile(FAILURE_MSG + "\\[(.*)\\]"); - Matcher m = p.matcher(message); - if (m.find()) { - failures = Lists.newArrayList(); - String tablesStr = m.group(1); - for (String tableName : tablesStr.split(",\\s")) { - HTableInterfaceReference tableRef = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes(tableName))); - failures.add(tableRef); - } + super(IndexWriteException.parseDisableIndexOnFailure(message)); + Pattern p = Pattern.compile(FAILURE_MSG + "\\[(.*)\\]"); + Matcher m = p.matcher(message); + if (m.find()) { + failures = Lists.newArrayList(); + String tablesStr = m.group(1); + for (String tableName : tablesStr.split(",\\s")) { + HTableInterfaceReference tableRef = + new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes(tableName))); + failures.add(tableRef); } + } } public List getFailedTables() { @@ -75,11 +76,11 @@ public List getFailedTables() { } public void setFailedTables(List failedTables) { - this.failures = failedTables; + this.failures = failedTables; } @Override - public String getMessage() { - return MoreObjects.firstNonNull(super.getMessage(),"") + " " + FAILURE_MSG + failures; - } -} \ No newline at end of file + public String getMessage() { + return MoreObjects.firstNonNull(super.getMessage(), "") + " " + FAILURE_MSG + failures; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java index e58895c3f64..27443df7e9a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/exception/SingleIndexWriteFailureException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.phoenix.thirdparty.com.google.common.base.MoreObjects; -import org.apache.phoenix.thirdparty.com.google.common.base.Objects; /** * Exception thrown if we cannot successfully write to an index table. @@ -37,7 +36,7 @@ public class SingleIndexWriteFailureException extends IndexWriteException { /** * Cannot reach the index, but not sure of the table or the mutations that caused the failure - * @param msg more description of what happened + * @param msg more description of what happened * @param cause original cause */ public SingleIndexWriteFailureException(String msg, Throwable cause) { @@ -47,28 +46,28 @@ public SingleIndexWriteFailureException(String msg, Throwable cause) { /** * Failed to write the passed mutations to an index table for some reason. * @param targetTableName index table to which we attempted to write - * @param mutations mutations that were attempted - * @param cause underlying reason for the failure + * @param mutations mutations that were attempted + * @param cause underlying reason for the failure */ public SingleIndexWriteFailureException(String targetTableName, List mutations, - Exception cause, boolean disableIndexOnFailure) { + Exception cause, boolean disableIndexOnFailure) { super(cause, disableIndexOnFailure); this.table = targetTableName; this.mutationsMsg = mutations.toString(); } /** - * This constructor used to rematerialize this exception when receiving - * an rpc exception from the server + * This constructor used to rematerialize this exception when receiving an rpc exception from the + * server * @param msg detail message */ public SingleIndexWriteFailureException(String msg) { - super(IndexWriteException.parseDisableIndexOnFailure(msg)); - Pattern pattern = Pattern.compile(FAILED_MSG + ".* table: ([\\S]*)\\s.*", Pattern.DOTALL); - Matcher m = pattern.matcher(msg); - if (m.find()) { - this.table = m.group(1); - } + super(IndexWriteException.parseDisableIndexOnFailure(msg)); + Pattern pattern = Pattern.compile(FAILED_MSG + ".* table: ([\\S]*)\\s.*", Pattern.DOTALL); + Matcher m = pattern.matcher(msg); + if (m.find()) { + this.table = m.group(1); + } } /** @@ -80,8 +79,10 @@ public String getTableName() { } @Override - public String getMessage() { - return MoreObjects.firstNonNull(super.getMessage(), "") + " " + FAILED_MSG + "\n\t table: " + this.table + "\n\t edits: " + mutationsMsg - + "\n\tcause: " + getCause() == null ? "UNKNOWN" : getCause().getMessage(); - } -} \ No newline at end of file + public String getMessage() { + return MoreObjects.firstNonNull(super.getMessage(), "") + " " + FAILED_MSG + "\n\t table: " + + this.table + "\n\t edits: " + mutationsMsg + "\n\tcause: " + getCause() == null + ? "UNKNOWN" + : getCause().getMessage(); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/GlobalIndexCheckerSource.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/GlobalIndexCheckerSource.java index 0ef19da6d08..5db1c2c99ab 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/GlobalIndexCheckerSource.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/GlobalIndexCheckerSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.metrics; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -23,67 +23,68 @@ * Interface for metrics from GlobalIndexChecker */ public interface GlobalIndexCheckerSource extends BaseSource { - // Metrics2 and JMX constants - String METRICS_NAME = "GlobalIndexChecker"; - String METRICS_CONTEXT = "phoenix"; - String METRICS_DESCRIPTION = "Metrics about the Phoenix Global Index Checker"; - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; + // Metrics2 and JMX constants + String METRICS_NAME = "GlobalIndexChecker"; + String METRICS_CONTEXT = "phoenix"; + String METRICS_DESCRIPTION = "Metrics about the Phoenix Global Index Checker"; + String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String INDEX_INSPECTION = "indexInspections"; - String INDEX_INSPECTION_DESC = "The number of index rows inspected for verified status"; + String INDEX_INSPECTION = "indexInspections"; + String INDEX_INSPECTION_DESC = "The number of index rows inspected for verified status"; - String INDEX_REPAIR = "indexRepairs"; - String INDEX_REPAIR_DESC = "The number of index row repairs"; + String INDEX_REPAIR = "indexRepairs"; + String INDEX_REPAIR_DESC = "The number of index row repairs"; - String INDEX_REPAIR_FAILURE = "indexRepairFailures"; - String INDEX_REPAIR_FAILURE_DESC = "The number of index row repair failures"; + String INDEX_REPAIR_FAILURE = "indexRepairFailures"; + String INDEX_REPAIR_FAILURE_DESC = "The number of index row repair failures"; - String INDEX_REPAIR_TIME = "indexRepairTime"; - String INDEX_REPAIR_TIME_DESC = "Histogram for the time in milliseconds for index row repairs"; + String INDEX_REPAIR_TIME = "indexRepairTime"; + String INDEX_REPAIR_TIME_DESC = "Histogram for the time in milliseconds for index row repairs"; - String INDEX_REPAIR_FAILURE_TIME = "indexRepairFailureTime"; - String INDEX_REPAIR_FAILURE_TIME_DESC = "Histogram for the time in milliseconds for index row repair failures"; + String INDEX_REPAIR_FAILURE_TIME = "indexRepairFailureTime"; + String INDEX_REPAIR_FAILURE_TIME_DESC = + "Histogram for the time in milliseconds for index row repair failures"; - String UNVERIFIED_INDEX_ROW_AGE = "unverifiedIndexRowAge"; - String UNVERIFIED_INDEX_ROW_AGE_DESC = "Histogram for the age in " + - "milliseconds for unverified row soon after it is repaired"; + String UNVERIFIED_INDEX_ROW_AGE = "unverifiedIndexRowAge"; + String UNVERIFIED_INDEX_ROW_AGE_DESC = + "Histogram for the age in " + "milliseconds for unverified row soon after it is repaired"; - /** - * Increments the number of index rows inspected for verified status - * @param indexName Name of the index - */ - public void incrementIndexInspections(String indexName); + /** + * Increments the number of index rows inspected for verified status + * @param indexName Name of the index + */ + public void incrementIndexInspections(String indexName); - /** - * Increments the number of index repairs - * @param indexName Name of the index - */ - void incrementIndexRepairs(String indexName); + /** + * Increments the number of index repairs + * @param indexName Name of the index + */ + void incrementIndexRepairs(String indexName); - /** - * Updates the index age of unverified row histogram - * @param indexName name of the index - * @param time time taken in milliseconds - */ - void updateUnverifiedIndexRowAge(String indexName, long time); + /** + * Updates the index age of unverified row histogram + * @param indexName name of the index + * @param time time taken in milliseconds + */ + void updateUnverifiedIndexRowAge(String indexName, long time); - /** - * Increments the number of index repair failures - * @param indexName Name of the index - */ - void incrementIndexRepairFailures(String indexName); + /** + * Increments the number of index repair failures + * @param indexName Name of the index + */ + void incrementIndexRepairFailures(String indexName); - /** - * Updates the index repair time histogram - * @param indexName Name of the index - * @param t time taken in milliseconds - */ - void updateIndexRepairTime(String indexName, long t); + /** + * Updates the index repair time histogram + * @param indexName Name of the index + * @param t time taken in milliseconds + */ + void updateIndexRepairTime(String indexName, long t); - /** - * Updates the index repair failure time histogram - * @param indexName Name of the index - * @param t time taken in milliseconds - */ - void updateIndexRepairFailureTime(String indexName, long t); -} \ No newline at end of file + /** + * Updates the index repair failure time histogram + * @param indexName Name of the index + * @param t time taken in milliseconds + */ + void updateIndexRepairFailureTime(String indexName, long t); +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/GlobalIndexCheckerSourceImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/GlobalIndexCheckerSourceImpl.java index f1724bd9f5d..d88e535acc4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/GlobalIndexCheckerSourceImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/GlobalIndexCheckerSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,105 +24,103 @@ /** * Implementation for tracking Phoenix Index Checker metrics. */ -public class GlobalIndexCheckerSourceImpl extends BaseSourceImpl implements GlobalIndexCheckerSource { - - private final MutableFastCounter indexInspections; - private final MutableFastCounter indexRepairs; - private final MutableFastCounter indexRepairFailures; - - private final MetricHistogram indexRepairTimeHisto; - private final MetricHistogram indexRepairFailureTimeHisto; - private final MetricHistogram unverifiedIndexRowAge; - - public GlobalIndexCheckerSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - public GlobalIndexCheckerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - - indexInspections = getMetricsRegistry().newCounter(INDEX_INSPECTION, INDEX_INSPECTION_DESC, 0L); - indexRepairs = getMetricsRegistry().newCounter(INDEX_REPAIR, INDEX_REPAIR_DESC, 0L); - indexRepairFailures = getMetricsRegistry().newCounter(INDEX_REPAIR_FAILURE, INDEX_REPAIR_FAILURE_DESC, 0L); - - indexRepairTimeHisto = getMetricsRegistry().newHistogram(INDEX_REPAIR_TIME, INDEX_REPAIR_TIME_DESC); - indexRepairFailureTimeHisto = getMetricsRegistry().newHistogram(INDEX_REPAIR_FAILURE_TIME, INDEX_REPAIR_FAILURE_TIME_DESC); - unverifiedIndexRowAge = getMetricsRegistry().newHistogram( - UNVERIFIED_INDEX_ROW_AGE, UNVERIFIED_INDEX_ROW_AGE_DESC); - } - - /** - * Increments the number of index rows inspected for verified status - */ - public void incrementIndexInspections(String indexName) { - incrementIndexSpecificCounter(INDEX_INSPECTION, indexName); - indexInspections.incr(); - } - - /** - * Increments the number of index repairs - */ - public void incrementIndexRepairs(String indexName) { - incrementIndexSpecificCounter(INDEX_REPAIR, indexName); - indexRepairs.incr(); - } - - /** - * Increments the number of index repair failures - */ - public void incrementIndexRepairFailures(String indexName) { - incrementIndexSpecificCounter(INDEX_REPAIR_FAILURE, indexName); - indexRepairFailures.incr(); - } - - /** - * Updates the index age of unverified row histogram - * @param indexName name of the index - * @param time time taken in milliseconds - */ - public void updateUnverifiedIndexRowAge(final String indexName, - final long time) { - incrementIndexSpecificHistogram(UNVERIFIED_INDEX_ROW_AGE, indexName, - time); - unverifiedIndexRowAge.add(time); - } - - /** - * Updates the index repair time histogram - * - * @param t time taken in milliseconds - */ - public void updateIndexRepairTime(String indexName, long t) { - incrementIndexSpecificHistogram(INDEX_REPAIR_TIME, indexName, t); - indexRepairTimeHisto.add(t); - } - - /** - * Updates the index repair failure time histogram - * - * @param t time taken in milliseconds - */ - public void updateIndexRepairFailureTime(String indexName, long t) { - incrementIndexSpecificHistogram(INDEX_REPAIR_FAILURE_TIME, indexName, t); - indexRepairFailureTimeHisto.add(t); - } - - private void incrementIndexSpecificCounter(String baseCounterName, String indexName) { - MutableFastCounter indexSpecificCounter = - getMetricsRegistry().getCounter(getCounterName(baseCounterName, indexName), 0); - indexSpecificCounter.incr(); - } - - private void incrementIndexSpecificHistogram(String baseCounterName, String indexName, long t) { - MetricHistogram indexSpecificHistogram = - getMetricsRegistry().getHistogram(getCounterName(baseCounterName, indexName)); - indexSpecificHistogram.add(t); - } - - private String getCounterName(String baseCounterName, String indexName) { - return baseCounterName + "." + indexName; - } -} \ No newline at end of file +public class GlobalIndexCheckerSourceImpl extends BaseSourceImpl + implements GlobalIndexCheckerSource { + + private final MutableFastCounter indexInspections; + private final MutableFastCounter indexRepairs; + private final MutableFastCounter indexRepairFailures; + + private final MetricHistogram indexRepairTimeHisto; + private final MetricHistogram indexRepairFailureTimeHisto; + private final MetricHistogram unverifiedIndexRowAge; + + public GlobalIndexCheckerSourceImpl() { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); + } + + public GlobalIndexCheckerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + + indexInspections = getMetricsRegistry().newCounter(INDEX_INSPECTION, INDEX_INSPECTION_DESC, 0L); + indexRepairs = getMetricsRegistry().newCounter(INDEX_REPAIR, INDEX_REPAIR_DESC, 0L); + indexRepairFailures = + getMetricsRegistry().newCounter(INDEX_REPAIR_FAILURE, INDEX_REPAIR_FAILURE_DESC, 0L); + + indexRepairTimeHisto = + getMetricsRegistry().newHistogram(INDEX_REPAIR_TIME, INDEX_REPAIR_TIME_DESC); + indexRepairFailureTimeHisto = + getMetricsRegistry().newHistogram(INDEX_REPAIR_FAILURE_TIME, INDEX_REPAIR_FAILURE_TIME_DESC); + unverifiedIndexRowAge = + getMetricsRegistry().newHistogram(UNVERIFIED_INDEX_ROW_AGE, UNVERIFIED_INDEX_ROW_AGE_DESC); + } + + /** + * Increments the number of index rows inspected for verified status + */ + public void incrementIndexInspections(String indexName) { + incrementIndexSpecificCounter(INDEX_INSPECTION, indexName); + indexInspections.incr(); + } + + /** + * Increments the number of index repairs + */ + public void incrementIndexRepairs(String indexName) { + incrementIndexSpecificCounter(INDEX_REPAIR, indexName); + indexRepairs.incr(); + } + + /** + * Increments the number of index repair failures + */ + public void incrementIndexRepairFailures(String indexName) { + incrementIndexSpecificCounter(INDEX_REPAIR_FAILURE, indexName); + indexRepairFailures.incr(); + } + + /** + * Updates the index age of unverified row histogram + * @param indexName name of the index + * @param time time taken in milliseconds + */ + public void updateUnverifiedIndexRowAge(final String indexName, final long time) { + incrementIndexSpecificHistogram(UNVERIFIED_INDEX_ROW_AGE, indexName, time); + unverifiedIndexRowAge.add(time); + } + + /** + * Updates the index repair time histogram + * @param t time taken in milliseconds + */ + public void updateIndexRepairTime(String indexName, long t) { + incrementIndexSpecificHistogram(INDEX_REPAIR_TIME, indexName, t); + indexRepairTimeHisto.add(t); + } + + /** + * Updates the index repair failure time histogram + * @param t time taken in milliseconds + */ + public void updateIndexRepairFailureTime(String indexName, long t) { + incrementIndexSpecificHistogram(INDEX_REPAIR_FAILURE_TIME, indexName, t); + indexRepairFailureTimeHisto.add(t); + } + + private void incrementIndexSpecificCounter(String baseCounterName, String indexName) { + MutableFastCounter indexSpecificCounter = + getMetricsRegistry().getCounter(getCounterName(baseCounterName, indexName), 0); + indexSpecificCounter.incr(); + } + + private void incrementIndexSpecificHistogram(String baseCounterName, String indexName, long t) { + MetricHistogram indexSpecificHistogram = + getMetricsRegistry().getHistogram(getCounterName(baseCounterName, indexName)); + indexSpecificHistogram.add(t); + } + + private String getCounterName(String baseCounterName, String indexName) { + return baseCounterName + "." + indexName; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSource.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSource.java index 97072441487..b0d625e4178 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSource.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,24 +30,31 @@ public interface MetricsIndexerSource extends BaseSource { String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; String INDEX_PREPARE_TIME = "indexPrepareTime"; - String INDEX_PREPARE_TIME_DESC = "Histogram for the time in milliseconds for preparing an index write"; + String INDEX_PREPARE_TIME_DESC = + "Histogram for the time in milliseconds for preparing an index write"; String SLOW_INDEX_PREPARE = "slowIndexPrepareCalls"; - String SLOW_INDEX_PREPARE_DESC = "The number of index preparations slower than the configured threshold"; + String SLOW_INDEX_PREPARE_DESC = + "The number of index preparations slower than the configured threshold"; String INDEX_WRITE_TIME = "indexWriteTime"; - String INDEX_WRITE_TIME_DESC = "Histogram for the time in milliseconds for writing an index update"; + String INDEX_WRITE_TIME_DESC = + "Histogram for the time in milliseconds for writing an index update"; String SLOW_INDEX_WRITE = "slowIndexWriteCalls"; String SLOW_INDEX_WRITE_DESC = "The number of index writes slower than the configured threshold"; String DUPLICATE_KEY_TIME = "duplicateKeyCheckTime"; - String DUPLICATE_KEY_TIME_DESC = "Histogram for the time in milliseconds to handle ON DUPLICATE keywords"; + String DUPLICATE_KEY_TIME_DESC = + "Histogram for the time in milliseconds to handle ON DUPLICATE keywords"; String SLOW_DUPLICATE_KEY = "slowDuplicateKeyCheckCalls"; - String SLOW_DUPLICATE_KEY_DESC = "The number of on duplicate key checks slower than the configured threshold"; + String SLOW_DUPLICATE_KEY_DESC = + "The number of on duplicate key checks slower than the configured threshold"; String PRE_WAL_RESTORE_TIME = "preWALRestoreTime"; - String PRE_WAL_RESTORE_TIME_DESC = "Histogram for the time in milliseconds for Indexer's preWALRestore"; + String PRE_WAL_RESTORE_TIME_DESC = + "Histogram for the time in milliseconds for Indexer's preWALRestore"; String SLOW_PRE_WAL_RESTORE = "slowPreWALRestoreCalls"; - String SLOW_PRE_WAL_RESTORE_DESC = "The number of preWALRestore calls slower than the configured threshold"; + String SLOW_PRE_WAL_RESTORE_DESC = + "The number of preWALRestore calls slower than the configured threshold"; String POST_PUT_TIME = "postPutTime"; String POST_PUT_TIME_DESC = "Histogram for the time in milliseconds for Indexer's postPut"; @@ -56,7 +64,8 @@ public interface MetricsIndexerSource extends BaseSource { String POST_DELETE_TIME = "postDeleteTime"; String POST_DELETE_TIME_DESC = "Histogram for the time in milliseconds for Indexer's postDelete"; String SLOW_POST_DELETE = "slowPostDeleteCalls"; - String SLOW_POST_DELETE_DESC = "The number of postDelete calls slower than the configured threshold"; + String SLOW_POST_DELETE_DESC = + "The number of postDelete calls slower than the configured threshold"; String POST_OPEN_TIME = "postOpenTime"; String POST_OPEN_TIME_DESC = "Histogram for the time in milliseconds for Indexer's postOpen"; @@ -64,50 +73,55 @@ public interface MetricsIndexerSource extends BaseSource { String SLOW_POST_OPEN_DESC = "The number of postOpen calls slower than the configured threshold"; String PRE_INDEX_UPDATE_TIME = "preIndexUpdateTime"; - String PRE_INDEX_UPDATE_TIME_DESC = "Histogram for the time in milliseconds for index updates pre data updates"; + String PRE_INDEX_UPDATE_TIME_DESC = + "Histogram for the time in milliseconds for index updates pre data updates"; String POST_INDEX_UPDATE_TIME = "postIndexUpdateTime"; - String POST_INDEX_UPDATE_TIME_DESC = "Histogram for the time in milliseconds for index updates post data updates"; + String POST_INDEX_UPDATE_TIME_DESC = + "Histogram for the time in milliseconds for index updates post data updates"; String PRE_INDEX_UPDATE_FAILURE_TIME = "preIndexUpdateFailureTime"; - String PRE_INDEX_UPDATE_FAILURE_TIME_DESC = "Histogram for the time in milliseconds on failures of index updates pre data updates"; + String PRE_INDEX_UPDATE_FAILURE_TIME_DESC = + "Histogram for the time in milliseconds on failures of index updates pre data updates"; String POST_INDEX_UPDATE_FAILURE_TIME = "postIndexUpdateFailureTime"; - String POST_INDEX_UPDATE_FAILURE_TIME_DESC = "Histogram for the time in milliseconds on failures of index updates post data updates"; + String POST_INDEX_UPDATE_FAILURE_TIME_DESC = + "Histogram for the time in milliseconds on failures of index updates post data updates"; String PRE_INDEX_UPDATE_FAILURE = "preIndexUpdateFailure"; String PRE_INDEX_UPDATE_FAILURE_DESC = "The number of failures of index updates pre data updates"; String POST_INDEX_UPDATE_FAILURE = "postIndexUpdateFailure"; - String POST_INDEX_UPDATE_FAILURE_DESC = "The number of failures of index updates post data updates"; + String POST_INDEX_UPDATE_FAILURE_DESC = + "The number of failures of index updates post data updates"; /** * Updates the index preparation time histogram (preBatchMutate). - * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param dataTableName Physical data table name + * @param t time taken in milliseconds */ void updateIndexPrepareTime(String dataTableName, long t); /** - * @param dataTableName Physical data table name - * Increments the number of slow calls prepare an index write. + * @param dataTableName Physical data table name Increments the number of slow calls prepare an + * index write. */ void incrementNumSlowIndexPrepareCalls(String dataTableName); /** * Updates the index write time histogram (postBatchMutate). - * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param dataTableName Physical data table name + * @param t time taken in milliseconds */ void updateIndexWriteTime(String dataTableName, long t); /** * Increments the number of slow calls to write to the index. - * @param dataTableName Physical data table name + * @param dataTableName Physical data table name */ void incrementNumSlowIndexWriteCalls(String dataTableName); /** * Updates the preWALRestore time histogram. * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param t time taken in milliseconds */ void updatePreWALRestoreTime(String dataTableName, long t); @@ -119,34 +133,34 @@ public interface MetricsIndexerSource extends BaseSource { /** * Updates the postPut time histogram. - * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param dataTableName Physical data table name + * @param t time taken in milliseconds */ void updatePostPutTime(String dataTableName, long t); /** * Increments the number of slow postPut calls. - * @param dataTableName Physical data table name + * @param dataTableName Physical data table name */ void incrementNumSlowPostPutCalls(String dataTableName); /** * Updates the postDelete time histogram. - * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param dataTableName Physical data table name + * @param t time taken in milliseconds */ void updatePostDeleteTime(String dataTableName, long t); /** * Increments the number of slow postDelete calls. - * @param dataTableName Physical data table name + * @param dataTableName Physical data table name */ void incrementNumSlowPostDeleteCalls(String dataTableName); /** * Updates the postOpen time histogram. * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param t time taken in milliseconds */ void updatePostOpenTime(String dataTableName, long t); @@ -159,7 +173,7 @@ public interface MetricsIndexerSource extends BaseSource { /** * Updates the preIncrementAfterRowLock time histogram. * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param t time taken in milliseconds */ void updateDuplicateKeyCheckTime(String dataTableName, long t); @@ -172,41 +186,41 @@ public interface MetricsIndexerSource extends BaseSource { // Below metrics are introduced by IndexRegionObserver coprocessor /** * Updates the pre index update time histogram. - * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param dataTableName Physical data table name + * @param t time taken in milliseconds */ void updatePreIndexUpdateTime(String dataTableName, long t); /** * Updates the post index update time histogram. - * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param dataTableName Physical data table name + * @param t time taken in milliseconds */ void updatePostIndexUpdateTime(String dataTableName, long t); /** * Updates the pre index update failure time histogram. - * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param dataTableName Physical data table name + * @param t time taken in milliseconds */ void updatePreIndexUpdateFailureTime(String dataTableName, long t); /** * Updates the post index update failure time histogram. - * @param dataTableName Physical data table name - * @param t time taken in milliseconds + * @param dataTableName Physical data table name + * @param t time taken in milliseconds */ void updatePostIndexUpdateFailureTime(String dataTableName, long t); /** * Increments the number of pre index update failures. - * @param dataTableName Physical data table name + * @param dataTableName Physical data table name */ void incrementPreIndexUpdateFailures(String dataTableName); /** * Increments the number of post index update failures. - * @param dataTableName Physical data table name + * @param dataTableName Physical data table name */ void incrementPostIndexUpdateFailures(String dataTableName); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSourceFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSourceFactory.java index baf27f4cf82..e579b2f836e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSourceFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSourceFactory.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,7 +25,8 @@ public class MetricsIndexerSourceFactory { private volatile MetricsIndexerSource indexerSource; private GlobalIndexCheckerSource globalIndexCheckerSource; - private MetricsIndexerSourceFactory() {} + private MetricsIndexerSourceFactory() { + } public static MetricsIndexerSourceFactory getInstance() { return INSTANCE; @@ -43,4 +45,4 @@ public synchronized GlobalIndexCheckerSource getGlobalIndexCheckerSource() { } return INSTANCE.globalIndexCheckerSource; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSourceImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSourceImpl.java index 79060fa9864..4968ea5a3ff 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSourceImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/metrics/MetricsIndexerSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,198 +26,208 @@ */ public class MetricsIndexerSourceImpl extends BaseSourceImpl implements MetricsIndexerSource { - private final MetricHistogram indexPrepareTimeHisto; - private final MutableFastCounter slowIndexPrepareCalls; - private final MetricHistogram indexWriteTimeHisto; - private final MutableFastCounter slowIndexWriteCalls; - private final MetricHistogram preWALRestoreTimeHisto; - private final MutableFastCounter slowPreWALRestoreCalls; - private final MetricHistogram postPutTimeHisto; - private final MutableFastCounter slowPostPutCalls; - private final MetricHistogram postDeleteTimeHisto; - private final MutableFastCounter slowPostDeleteCalls; - private final MetricHistogram postOpenTimeHisto; - private final MutableFastCounter slowPostOpenCalls; - private final MetricHistogram duplicateKeyTimeHisto; - private final MutableFastCounter slowDuplicateKeyCalls; - - private final MetricHistogram preIndexUpdateTimeHisto; - private final MetricHistogram postIndexUpdateTimeHisto; - private final MetricHistogram preIndexUpdateFailureTimeHisto; - private final MetricHistogram postIndexUpdateFailureTimeHisto; - private final MutableFastCounter preIndexUpdateFailures; - private final MutableFastCounter postIndexUpdateFailures; - - public MetricsIndexerSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - public MetricsIndexerSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - - indexPrepareTimeHisto = getMetricsRegistry().newHistogram(INDEX_PREPARE_TIME, INDEX_PREPARE_TIME_DESC); - slowIndexPrepareCalls = getMetricsRegistry().newCounter(SLOW_INDEX_PREPARE, SLOW_INDEX_PREPARE_DESC, 0L); - indexWriteTimeHisto = getMetricsRegistry().newHistogram(INDEX_WRITE_TIME, INDEX_WRITE_TIME_DESC); - slowIndexWriteCalls = getMetricsRegistry().newCounter(SLOW_INDEX_WRITE, SLOW_INDEX_WRITE_DESC, 0L); - preWALRestoreTimeHisto = getMetricsRegistry().newHistogram(PRE_WAL_RESTORE_TIME, PRE_WAL_RESTORE_TIME_DESC); - slowPreWALRestoreCalls = getMetricsRegistry().newCounter(SLOW_PRE_WAL_RESTORE, SLOW_PRE_WAL_RESTORE_DESC, 0L); - postPutTimeHisto = getMetricsRegistry().newHistogram(POST_PUT_TIME, POST_PUT_TIME_DESC); - slowPostPutCalls = getMetricsRegistry().newCounter(SLOW_POST_PUT, SLOW_POST_PUT_DESC, 0L); - postDeleteTimeHisto = getMetricsRegistry().newHistogram(POST_DELETE_TIME, POST_DELETE_TIME_DESC); - slowPostDeleteCalls = getMetricsRegistry().newCounter(SLOW_POST_DELETE, SLOW_POST_DELETE_DESC, 0L); - postOpenTimeHisto = getMetricsRegistry().newHistogram(POST_OPEN_TIME, POST_OPEN_TIME_DESC); - slowPostOpenCalls = getMetricsRegistry().newCounter(SLOW_POST_OPEN, SLOW_POST_OPEN_DESC, 0L); - duplicateKeyTimeHisto = getMetricsRegistry().newHistogram(DUPLICATE_KEY_TIME, DUPLICATE_KEY_TIME_DESC); - slowDuplicateKeyCalls = getMetricsRegistry().newCounter(SLOW_DUPLICATE_KEY, SLOW_DUPLICATE_KEY_DESC, 0L); - - postIndexUpdateTimeHisto = getMetricsRegistry().newHistogram( - POST_INDEX_UPDATE_TIME, POST_INDEX_UPDATE_TIME_DESC); - preIndexUpdateTimeHisto = getMetricsRegistry().newHistogram( - PRE_INDEX_UPDATE_TIME, PRE_INDEX_UPDATE_TIME_DESC); - postIndexUpdateFailureTimeHisto = getMetricsRegistry().newHistogram( - POST_INDEX_UPDATE_FAILURE_TIME, POST_INDEX_UPDATE_FAILURE_TIME_DESC); - preIndexUpdateFailureTimeHisto = getMetricsRegistry().newHistogram( - PRE_INDEX_UPDATE_FAILURE_TIME, PRE_INDEX_UPDATE_FAILURE_TIME_DESC); - postIndexUpdateFailures = getMetricsRegistry().newCounter( - POST_INDEX_UPDATE_FAILURE, POST_INDEX_UPDATE_FAILURE_DESC, 0L); - preIndexUpdateFailures = getMetricsRegistry().newCounter( - PRE_INDEX_UPDATE_FAILURE, PRE_INDEX_UPDATE_FAILURE_DESC, 0L); - } - - @Override - public void updateIndexPrepareTime(String dataTableName, long t) { - incrementTableSpecificHistogram(INDEX_PREPARE_TIME, dataTableName, t); - indexPrepareTimeHisto.add(t); - } - - @Override - public void updateIndexWriteTime(String dataTableName, long t) { - incrementTableSpecificHistogram(INDEX_WRITE_TIME, dataTableName, t); - indexWriteTimeHisto.add(t); - } - - @Override - public void updatePreWALRestoreTime(String dataTableName, long t) { - incrementTableSpecificHistogram(PRE_WAL_RESTORE_TIME, dataTableName, t); - preWALRestoreTimeHisto.add(t); - } - - @Override - public void updatePostPutTime(String dataTableName, long t) { - incrementTableSpecificHistogram(POST_PUT_TIME, dataTableName, t); - postPutTimeHisto.add(t); - } - - @Override - public void updatePostDeleteTime(String dataTableName, long t) { - incrementTableSpecificHistogram(POST_DELETE_TIME, dataTableName, t); - postDeleteTimeHisto.add(t); - } - - @Override - public void updatePostOpenTime(String dataTableName, long t) { - incrementTableSpecificHistogram(POST_OPEN_TIME, dataTableName, t); - postOpenTimeHisto.add(t); - } - - @Override - public void incrementNumSlowIndexPrepareCalls(String dataTableName) { - incrementTableSpecificCounter(SLOW_INDEX_PREPARE, dataTableName); - slowIndexPrepareCalls.incr(); - } - - @Override - public void incrementNumSlowIndexWriteCalls(String dataTableName) { - incrementTableSpecificCounter(SLOW_INDEX_WRITE, dataTableName); - slowIndexWriteCalls.incr(); - } - - @Override - public void incrementNumSlowPreWALRestoreCalls(String dataTableName) { - incrementTableSpecificCounter(SLOW_PRE_WAL_RESTORE, dataTableName); - slowPreWALRestoreCalls.incr(); - } - - @Override - public void incrementNumSlowPostPutCalls(String dataTableName) { - incrementTableSpecificCounter(SLOW_POST_PUT, dataTableName); - slowPostPutCalls.incr(); - } - - @Override - public void incrementNumSlowPostDeleteCalls(String dataTableName) { - incrementTableSpecificCounter(SLOW_POST_DELETE, dataTableName); - slowPostDeleteCalls.incr(); - } - - @Override - public void incrementNumSlowPostOpenCalls(String dataTableName) { - incrementTableSpecificCounter(SLOW_POST_OPEN, dataTableName); - slowPostOpenCalls.incr(); - } - - @Override - public void updateDuplicateKeyCheckTime(String dataTableName, long t) { - incrementTableSpecificHistogram(DUPLICATE_KEY_TIME, dataTableName, t); - duplicateKeyTimeHisto.add(t); - } - - @Override - public void incrementSlowDuplicateKeyCheckCalls(String dataTableName) { - incrementTableSpecificCounter(SLOW_DUPLICATE_KEY, dataTableName); - slowDuplicateKeyCalls.incr(); - } - - @Override - public void updatePreIndexUpdateTime(String dataTableName, long t) { - incrementTableSpecificHistogram(PRE_INDEX_UPDATE_TIME, dataTableName, t); - preIndexUpdateTimeHisto.add(t); - } - - @Override - public void updatePostIndexUpdateTime(String dataTableName, long t) { - incrementTableSpecificHistogram(POST_INDEX_UPDATE_TIME, dataTableName, t); - postIndexUpdateTimeHisto.add(t); - } - - @Override - public void updatePreIndexUpdateFailureTime(String dataTableName, long t) { - incrementTableSpecificHistogram(PRE_INDEX_UPDATE_FAILURE_TIME, dataTableName, t); - preIndexUpdateFailureTimeHisto.add(t); - } - - @Override - public void updatePostIndexUpdateFailureTime(String dataTableName, long t) { - incrementTableSpecificHistogram(POST_INDEX_UPDATE_FAILURE_TIME, dataTableName, t); - postIndexUpdateFailureTimeHisto.add(t); - } - - @Override - public void incrementPreIndexUpdateFailures(String dataTableName) { - incrementTableSpecificCounter(PRE_INDEX_UPDATE_FAILURE, dataTableName); - preIndexUpdateFailures.incr(); - } - - @Override - public void incrementPostIndexUpdateFailures(String dataTableName) { - incrementTableSpecificCounter(POST_INDEX_UPDATE_FAILURE, dataTableName); - postIndexUpdateFailures.incr(); - } - - private void incrementTableSpecificCounter(String baseCounterName, String tableName) { - MutableFastCounter indexSpecificCounter = - getMetricsRegistry().getCounter(getCounterName(baseCounterName, tableName), 0); - indexSpecificCounter.incr(); - } - - private void incrementTableSpecificHistogram(String baseCounterName, String tableName, long t) { - MetricHistogram tableSpecificHistogram = - getMetricsRegistry().getHistogram(getCounterName(baseCounterName, tableName)); - tableSpecificHistogram.add(t); - } - - private String getCounterName(String baseCounterName, String tableName) { - return baseCounterName + "." + tableName; - } -} \ No newline at end of file + private final MetricHistogram indexPrepareTimeHisto; + private final MutableFastCounter slowIndexPrepareCalls; + private final MetricHistogram indexWriteTimeHisto; + private final MutableFastCounter slowIndexWriteCalls; + private final MetricHistogram preWALRestoreTimeHisto; + private final MutableFastCounter slowPreWALRestoreCalls; + private final MetricHistogram postPutTimeHisto; + private final MutableFastCounter slowPostPutCalls; + private final MetricHistogram postDeleteTimeHisto; + private final MutableFastCounter slowPostDeleteCalls; + private final MetricHistogram postOpenTimeHisto; + private final MutableFastCounter slowPostOpenCalls; + private final MetricHistogram duplicateKeyTimeHisto; + private final MutableFastCounter slowDuplicateKeyCalls; + + private final MetricHistogram preIndexUpdateTimeHisto; + private final MetricHistogram postIndexUpdateTimeHisto; + private final MetricHistogram preIndexUpdateFailureTimeHisto; + private final MetricHistogram postIndexUpdateFailureTimeHisto; + private final MutableFastCounter preIndexUpdateFailures; + private final MutableFastCounter postIndexUpdateFailures; + + public MetricsIndexerSourceImpl() { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); + } + + public MetricsIndexerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + + indexPrepareTimeHisto = + getMetricsRegistry().newHistogram(INDEX_PREPARE_TIME, INDEX_PREPARE_TIME_DESC); + slowIndexPrepareCalls = + getMetricsRegistry().newCounter(SLOW_INDEX_PREPARE, SLOW_INDEX_PREPARE_DESC, 0L); + indexWriteTimeHisto = + getMetricsRegistry().newHistogram(INDEX_WRITE_TIME, INDEX_WRITE_TIME_DESC); + slowIndexWriteCalls = + getMetricsRegistry().newCounter(SLOW_INDEX_WRITE, SLOW_INDEX_WRITE_DESC, 0L); + preWALRestoreTimeHisto = + getMetricsRegistry().newHistogram(PRE_WAL_RESTORE_TIME, PRE_WAL_RESTORE_TIME_DESC); + slowPreWALRestoreCalls = + getMetricsRegistry().newCounter(SLOW_PRE_WAL_RESTORE, SLOW_PRE_WAL_RESTORE_DESC, 0L); + postPutTimeHisto = getMetricsRegistry().newHistogram(POST_PUT_TIME, POST_PUT_TIME_DESC); + slowPostPutCalls = getMetricsRegistry().newCounter(SLOW_POST_PUT, SLOW_POST_PUT_DESC, 0L); + postDeleteTimeHisto = + getMetricsRegistry().newHistogram(POST_DELETE_TIME, POST_DELETE_TIME_DESC); + slowPostDeleteCalls = + getMetricsRegistry().newCounter(SLOW_POST_DELETE, SLOW_POST_DELETE_DESC, 0L); + postOpenTimeHisto = getMetricsRegistry().newHistogram(POST_OPEN_TIME, POST_OPEN_TIME_DESC); + slowPostOpenCalls = getMetricsRegistry().newCounter(SLOW_POST_OPEN, SLOW_POST_OPEN_DESC, 0L); + duplicateKeyTimeHisto = + getMetricsRegistry().newHistogram(DUPLICATE_KEY_TIME, DUPLICATE_KEY_TIME_DESC); + slowDuplicateKeyCalls = + getMetricsRegistry().newCounter(SLOW_DUPLICATE_KEY, SLOW_DUPLICATE_KEY_DESC, 0L); + + postIndexUpdateTimeHisto = + getMetricsRegistry().newHistogram(POST_INDEX_UPDATE_TIME, POST_INDEX_UPDATE_TIME_DESC); + preIndexUpdateTimeHisto = + getMetricsRegistry().newHistogram(PRE_INDEX_UPDATE_TIME, PRE_INDEX_UPDATE_TIME_DESC); + postIndexUpdateFailureTimeHisto = getMetricsRegistry() + .newHistogram(POST_INDEX_UPDATE_FAILURE_TIME, POST_INDEX_UPDATE_FAILURE_TIME_DESC); + preIndexUpdateFailureTimeHisto = getMetricsRegistry() + .newHistogram(PRE_INDEX_UPDATE_FAILURE_TIME, PRE_INDEX_UPDATE_FAILURE_TIME_DESC); + postIndexUpdateFailures = getMetricsRegistry().newCounter(POST_INDEX_UPDATE_FAILURE, + POST_INDEX_UPDATE_FAILURE_DESC, 0L); + preIndexUpdateFailures = + getMetricsRegistry().newCounter(PRE_INDEX_UPDATE_FAILURE, PRE_INDEX_UPDATE_FAILURE_DESC, 0L); + } + + @Override + public void updateIndexPrepareTime(String dataTableName, long t) { + incrementTableSpecificHistogram(INDEX_PREPARE_TIME, dataTableName, t); + indexPrepareTimeHisto.add(t); + } + + @Override + public void updateIndexWriteTime(String dataTableName, long t) { + incrementTableSpecificHistogram(INDEX_WRITE_TIME, dataTableName, t); + indexWriteTimeHisto.add(t); + } + + @Override + public void updatePreWALRestoreTime(String dataTableName, long t) { + incrementTableSpecificHistogram(PRE_WAL_RESTORE_TIME, dataTableName, t); + preWALRestoreTimeHisto.add(t); + } + + @Override + public void updatePostPutTime(String dataTableName, long t) { + incrementTableSpecificHistogram(POST_PUT_TIME, dataTableName, t); + postPutTimeHisto.add(t); + } + + @Override + public void updatePostDeleteTime(String dataTableName, long t) { + incrementTableSpecificHistogram(POST_DELETE_TIME, dataTableName, t); + postDeleteTimeHisto.add(t); + } + + @Override + public void updatePostOpenTime(String dataTableName, long t) { + incrementTableSpecificHistogram(POST_OPEN_TIME, dataTableName, t); + postOpenTimeHisto.add(t); + } + + @Override + public void incrementNumSlowIndexPrepareCalls(String dataTableName) { + incrementTableSpecificCounter(SLOW_INDEX_PREPARE, dataTableName); + slowIndexPrepareCalls.incr(); + } + + @Override + public void incrementNumSlowIndexWriteCalls(String dataTableName) { + incrementTableSpecificCounter(SLOW_INDEX_WRITE, dataTableName); + slowIndexWriteCalls.incr(); + } + + @Override + public void incrementNumSlowPreWALRestoreCalls(String dataTableName) { + incrementTableSpecificCounter(SLOW_PRE_WAL_RESTORE, dataTableName); + slowPreWALRestoreCalls.incr(); + } + + @Override + public void incrementNumSlowPostPutCalls(String dataTableName) { + incrementTableSpecificCounter(SLOW_POST_PUT, dataTableName); + slowPostPutCalls.incr(); + } + + @Override + public void incrementNumSlowPostDeleteCalls(String dataTableName) { + incrementTableSpecificCounter(SLOW_POST_DELETE, dataTableName); + slowPostDeleteCalls.incr(); + } + + @Override + public void incrementNumSlowPostOpenCalls(String dataTableName) { + incrementTableSpecificCounter(SLOW_POST_OPEN, dataTableName); + slowPostOpenCalls.incr(); + } + + @Override + public void updateDuplicateKeyCheckTime(String dataTableName, long t) { + incrementTableSpecificHistogram(DUPLICATE_KEY_TIME, dataTableName, t); + duplicateKeyTimeHisto.add(t); + } + + @Override + public void incrementSlowDuplicateKeyCheckCalls(String dataTableName) { + incrementTableSpecificCounter(SLOW_DUPLICATE_KEY, dataTableName); + slowDuplicateKeyCalls.incr(); + } + + @Override + public void updatePreIndexUpdateTime(String dataTableName, long t) { + incrementTableSpecificHistogram(PRE_INDEX_UPDATE_TIME, dataTableName, t); + preIndexUpdateTimeHisto.add(t); + } + + @Override + public void updatePostIndexUpdateTime(String dataTableName, long t) { + incrementTableSpecificHistogram(POST_INDEX_UPDATE_TIME, dataTableName, t); + postIndexUpdateTimeHisto.add(t); + } + + @Override + public void updatePreIndexUpdateFailureTime(String dataTableName, long t) { + incrementTableSpecificHistogram(PRE_INDEX_UPDATE_FAILURE_TIME, dataTableName, t); + preIndexUpdateFailureTimeHisto.add(t); + } + + @Override + public void updatePostIndexUpdateFailureTime(String dataTableName, long t) { + incrementTableSpecificHistogram(POST_INDEX_UPDATE_FAILURE_TIME, dataTableName, t); + postIndexUpdateFailureTimeHisto.add(t); + } + + @Override + public void incrementPreIndexUpdateFailures(String dataTableName) { + incrementTableSpecificCounter(PRE_INDEX_UPDATE_FAILURE, dataTableName); + preIndexUpdateFailures.incr(); + } + + @Override + public void incrementPostIndexUpdateFailures(String dataTableName) { + incrementTableSpecificCounter(POST_INDEX_UPDATE_FAILURE, dataTableName); + postIndexUpdateFailures.incr(); + } + + private void incrementTableSpecificCounter(String baseCounterName, String tableName) { + MutableFastCounter indexSpecificCounter = + getMetricsRegistry().getCounter(getCounterName(baseCounterName, tableName), 0); + indexSpecificCounter.incr(); + } + + private void incrementTableSpecificHistogram(String baseCounterName, String tableName, long t) { + MetricHistogram tableSpecificHistogram = + getMetricsRegistry().getHistogram(getCounterName(baseCounterName, tableName)); + tableSpecificHistogram.add(t); + } + + private String getCounterName(String baseCounterName, String tableName) { + return baseCounterName + "." + tableName; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java index e32c34a8881..6d2373c233b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/BaseTaskRunner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,12 +27,11 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.util.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.MoreExecutors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * {@link TaskRunner} that just manages the underlying thread pool. On called to @@ -53,8 +52,8 @@ public BaseTaskRunner(ExecutorService service) { } @Override - public Pair, List>> submit(TaskBatch tasks) throws CancellationException, ExecutionException, - InterruptedException { + public Pair, List>> submit(TaskBatch tasks) + throws CancellationException, ExecutionException, InterruptedException { // submit each task to the pool and queue it up to be watched List> futures = new ArrayList>(tasks.size()); for (Task task : tasks.getTasks()) { @@ -66,7 +65,8 @@ public Pair, List>> submit(TaskBatch tasks) throws Canc // advantage of being (1) less code, and (2) supported as part of a library, it is just that // little bit slower. If push comes to shove, we can revert back to the previous // implementation, but for right now, this works just fine. - return Pair.newPair(submitTasks(futures).get(), Collections.unmodifiableList(((List>)(List)futures))); + return Pair.newPair(submitTasks(futures).get(), + Collections.unmodifiableList(((List>) (List) futures))); } catch (CancellationException e) { // propagate the failure back out logAndNotifyAbort(e, tasks); @@ -93,8 +93,8 @@ private void logAndNotifyAbort(Exception e, Abortable abort) { protected abstract ListenableFuture> submitTasks(List> futures); @Override - public Pair, List>> submitUninterruptible(TaskBatch tasks) throws EarlyExitFailure, - ExecutionException { + public Pair, List>> submitUninterruptible(TaskBatch tasks) + throws EarlyExitFailure, ExecutionException { boolean interrupted = false; try { while (!this.isStopped()) { @@ -130,4 +130,4 @@ public void stop(String why) { public boolean isStopped() { return this.stopped; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/EarlyExitFailure.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/EarlyExitFailure.java index 8a0dedc09a1..6dcd9f5c969 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/EarlyExitFailure.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/EarlyExitFailure.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,4 +31,4 @@ public class EarlyExitFailure extends IOException { public EarlyExitFailure(String msg) { super(msg); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java index f5366f40a7e..0e80e7b924e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/QuickFailingTaskRunner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,11 +20,10 @@ import java.util.List; import java.util.concurrent.ExecutorService; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * {@link TaskRunner} that attempts to run all tasks passed, but quits early if any {@link Task} @@ -36,7 +35,7 @@ public class QuickFailingTaskRunner extends BaseTaskRunner { /** * @param service thread pool to which {@link Task}s are submitted. This service is then 'owned' - * by this and will be shutdown on calls to {@link #stop(String)}. + * by this and will be shutdown on calls to {@link #stop(String)}. */ public QuickFailingTaskRunner(ExecutorService service) { super(service); @@ -46,4 +45,4 @@ public QuickFailingTaskRunner(ExecutorService service) { protected ListenableFuture> submitTasks(List> futures) { return Futures.allAsList(futures); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/Task.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/Task.java index 4b32e712236..1159b01c4ed 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/Task.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/Task.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,4 +37,4 @@ void setBatchMonitor(Abortable abort) { protected boolean isBatchFailed() { return this.batch.isAborted(); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java index 208464ef72a..b1a8de99a85 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskBatch.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -65,10 +65,8 @@ public boolean isAborted() { return this.aborted.get(); } - /** - * @return the number of tasks assigned to this batch - */ + /** Returns the number of tasks assigned to this batch */ public int size() { return this.tasks.size(); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskRunner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskRunner.java index 2581d520ae1..6ca5634537b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskRunner.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/TaskRunner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,13 +39,13 @@ public interface TaskRunner extends Stoppable { * @param tasks to run * @return Pair containing ordered List of results from each task and an ordered immutable list of * underlying futures which can be used for getting underlying exceptions - * @throws ExecutionException if any of the tasks fails. Wraps the underyling failure, which can - * be retrieved via {@link ExecutionException#getCause()}. + * @throws ExecutionException if any of the tasks fails. Wraps the underyling failure, which can + * be retrieved via {@link ExecutionException#getCause()}. * @throws InterruptedException if the current thread is interrupted while waiting for the batch - * to complete + * to complete */ - public Pair, List>> submit(TaskBatch tasks) throws - ExecutionException, InterruptedException; + public Pair, List>> submit(TaskBatch tasks) + throws ExecutionException, InterruptedException; /** * Similar to {@link #submit(TaskBatch)}, but is not interruptible. If an interrupt is found while @@ -54,11 +54,11 @@ public Pair, List>> submit(TaskBatch tasks) throws * @param tasks to run * @return Pair containing ordered List of results from each task and an ordered immutable list of * underlying futures which can be used for getting underlying exceptions - * @throws EarlyExitFailure if there are still tasks to submit to the pool, but there is a stop - * notification + * @throws EarlyExitFailure if there are still tasks to submit to the pool, but there is a stop + * notification * @throws ExecutionException if any of the tasks fails. Wraps the underyling failure, which can - * be retrieved via {@link ExecutionException#getCause()}. + * be retrieved via {@link ExecutionException#getCause()}. */ - public Pair, List>> submitUninterruptible(TaskBatch tasks) throws EarlyExitFailure, - ExecutionException; -} \ No newline at end of file + public Pair, List>> submitUninterruptible(TaskBatch tasks) + throws EarlyExitFailure, ExecutionException; +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java index bedd4959ff5..386d564a1e4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -62,7 +62,7 @@ public ThreadPoolBuilder setMaxThread(String confkey, int defaultThreads) { } String getName() { - return this.name; + return this.name; } int getMaxThreads() { @@ -70,21 +70,21 @@ int getMaxThreads() { if (this.maxThreads != null) { String key = this.maxThreads.getFirst(); maxThreads = - key == null ? this.maxThreads.getSecond() : conf.getInt(key, this.maxThreads.getSecond()); + key == null ? this.maxThreads.getSecond() : conf.getInt(key, this.maxThreads.getSecond()); } LOGGER.trace("Creating pool builder with max " + maxThreads + " threads "); return maxThreads; } long getKeepAliveTime() { - long timeout =DEFAULT_TIMEOUT; + long timeout = DEFAULT_TIMEOUT; if (this.timeout != null) { String key = this.timeout.getFirst(); timeout = - key == null ? this.timeout.getSecond() : conf.getLong(key, this.timeout.getSecond()); + key == null ? this.timeout.getSecond() : conf.getLong(key, this.timeout.getSecond()); } LOGGER.trace("Creating pool builder with core thread timeout of " + timeout + " seconds "); return timeout; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/WaitForCompletionTaskRunner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/WaitForCompletionTaskRunner.java index 3504fd429e3..d337a0b965e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/WaitForCompletionTaskRunner.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/parallel/WaitForCompletionTaskRunner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,10 +35,10 @@ * Does not throw an {@link ExecutionException} if any of the tasks fail. */ public class WaitForCompletionTaskRunner extends BaseTaskRunner { - + /** * @param service thread pool to which {@link Task}s are submitted. This service is then 'owned' - * by this and will be shutdown on calls to {@link #stop(String)}. + * by this and will be shutdown on calls to {@link #stop(String)}. */ public WaitForCompletionTaskRunner(ExecutorService service) { super(service); @@ -48,4 +48,4 @@ public WaitForCompletionTaskRunner(ExecutorService service) { public ListenableFuture> submitTasks(List> futures) { return Futures.successfulAsList(futures); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/EmptyScanner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/EmptyScanner.java index 1c36ebb0601..0a79c1c595e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/EmptyScanner.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/EmptyScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.scanner; import java.io.IOException; @@ -24,17 +23,16 @@ import org.apache.phoenix.hbase.index.covered.filter.ApplyAndFilterDeletesFilter.DeleteTracker; import org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner; - /** * {@link Scanner} that has no underlying data */ public class EmptyScanner implements CoveredDeleteScanner { private final DeleteTracker deleteTracker; - - public EmptyScanner (DeleteTracker deleteTracker) { - this.deleteTracker = deleteTracker; + + public EmptyScanner(DeleteTracker deleteTracker) { + this.deleteTracker = deleteTracker; } - + @Override public Cell next() throws IOException { return null; @@ -59,4 +57,4 @@ public void close() throws IOException { public DeleteTracker getDeleteTracker() { return deleteTracker; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java index 3453030a30a..878685e3fc6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/FilteredKeyValueScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.scanner; import java.io.IOException; @@ -28,91 +27,96 @@ import org.apache.phoenix.util.PhoenixKeyValueUtil; /** - * Combine a simplified version of the logic in the ScanQueryMatcher and the KeyValueScanner. We can get away with this - * here because we are only concerned with a single MemStore for the index; we don't need to worry about multiple column - * families or minimizing seeking through file - we just want to iterate the kvs quickly, in-memory. + * Combine a simplified version of the logic in the ScanQueryMatcher and the KeyValueScanner. We can + * get away with this here because we are only concerned with a single MemStore for the index; we + * don't need to worry about multiple column families or minimizing seeking through file - we just + * want to iterate the kvs quickly, in-memory. */ public class FilteredKeyValueScanner implements ReseekableScanner { - private ReseekableScanner delegate; - private Filter filter; - - public FilteredKeyValueScanner(Filter filter, KeyValueStore store) { - this(filter, store.getScanner()); - } - - private FilteredKeyValueScanner(Filter filter, ReseekableScanner delegate) { - this.delegate = delegate; - this.filter = filter; - } - - @Override - public Cell peek() throws IOException { - return delegate.peek(); - } - - /** - * Same a KeyValueScanner#next() except that we filter out the next {@link KeyValue} until we find one that - * passes the filter. - * - * @return the next {@link KeyValue} or null if no next {@link KeyValue} is present and passes all the - * filters. - */ - @Override - public Cell next() throws IOException { - seekToNextUnfilteredKeyValue(); - return delegate.next(); + private ReseekableScanner delegate; + private Filter filter; + + public FilteredKeyValueScanner(Filter filter, KeyValueStore store) { + this(filter, store.getScanner()); + } + + private FilteredKeyValueScanner(Filter filter, ReseekableScanner delegate) { + this.delegate = delegate; + this.filter = filter; + } + + @Override + public Cell peek() throws IOException { + return delegate.peek(); + } + + /** + * Same a KeyValueScanner#next() except that we filter out the next {@link KeyValue} until we find + * one that passes the filter. + * @return the next {@link KeyValue} or null if no next {@link KeyValue} is present and + * passes all the filters. + */ + @Override + public Cell next() throws IOException { + seekToNextUnfilteredKeyValue(); + return delegate.next(); + } + + @Override + public boolean seek(Cell key) throws IOException { + if (filter.filterAllRemaining()) { + return false; } - - @Override - public boolean seek(Cell key) throws IOException { - if (filter.filterAllRemaining()) { return false; } - // see if we can seek to the next key - if (!delegate.seek(key)) { return false; } - - return seekToNextUnfilteredKeyValue(); + // see if we can seek to the next key + if (!delegate.seek(key)) { + return false; } - private boolean seekToNextUnfilteredKeyValue() throws IOException { - while (true) { - Cell peeked = delegate.peek(); - // no more key values, so we are done - if (peeked == null) { return false; } - - // filter the peeked value to see if it should be served - ReturnCode code = filter.filterCell(peeked); - switch (code) { - // included, so we are done - case INCLUDE: - case INCLUDE_AND_NEXT_COL: - return true; - // not included, so we need to go to the next row - case SKIP: - case NEXT_COL: - case NEXT_ROW: - delegate.next(); - break; - // use a seek hint to find out where we should go - case SEEK_NEXT_USING_HINT: - Cell nextCellHint = filter.getNextCellHint(peeked); - if(nextCellHint == KeyValue.LOWESTKEY) { - delegate.next(); - } else { - delegate.seek(PhoenixKeyValueUtil.maybeCopyCell(nextCellHint)); - } - } - } - } - - public boolean reseek(Cell key) throws IOException { - this.delegate.reseek(key); - return this.seekToNextUnfilteredKeyValue(); + return seekToNextUnfilteredKeyValue(); + } + + private boolean seekToNextUnfilteredKeyValue() throws IOException { + while (true) { + Cell peeked = delegate.peek(); + // no more key values, so we are done + if (peeked == null) { + return false; + } + + // filter the peeked value to see if it should be served + ReturnCode code = filter.filterCell(peeked); + switch (code) { + // included, so we are done + case INCLUDE: + case INCLUDE_AND_NEXT_COL: + return true; + // not included, so we need to go to the next row + case SKIP: + case NEXT_COL: + case NEXT_ROW: + delegate.next(); + break; + // use a seek hint to find out where we should go + case SEEK_NEXT_USING_HINT: + Cell nextCellHint = filter.getNextCellHint(peeked); + if (nextCellHint == KeyValue.LOWESTKEY) { + delegate.next(); + } else { + delegate.seek(PhoenixKeyValueUtil.maybeCopyCell(nextCellHint)); + } + } } + } + public boolean reseek(Cell key) throws IOException { + this.delegate.reseek(key); + return this.seekToNextUnfilteredKeyValue(); + } - @Override - public void close() throws IOException { - this.delegate.close(); - } + @Override + public void close() throws IOException { + this.delegate.close(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/ReseekableScanner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/ReseekableScanner.java index cc82fb6d2d9..3b4239e4828 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/ReseekableScanner.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/ReseekableScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +17,10 @@ */ package org.apache.phoenix.hbase.index.scanner; -import org.apache.hadoop.hbase.Cell; - import java.io.IOException; +import org.apache.hadoop.hbase.Cell; + public interface ReseekableScanner extends Scanner { boolean reseek(Cell key) throws IOException; diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java index 9454de5cf3c..9f8c9cb7f0f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/Scanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.scanner; import java.io.Closeable; @@ -53,4 +52,4 @@ public interface Scanner extends Closeable { * @throws IOException if there is an error reading the underlying data. */ public Cell peek() throws IOException; -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java index 7ec746e5e51..07ed42e0712 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.scanner; import java.io.IOException; @@ -45,7 +44,6 @@ import org.apache.phoenix.hbase.index.covered.update.ColumnReference; import org.apache.phoenix.hbase.index.covered.update.ColumnTracker; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** @@ -56,13 +54,14 @@ public class ScannerBuilder { private KeyValueStore memstore; private Mutation update; - public ScannerBuilder(KeyValueStore memstore, Mutation update) { this.memstore = memstore; this.update = update; } - public CoveredDeleteScanner buildIndexedColumnScanner(Collection indexedColumns, ColumnTracker tracker, long ts, boolean returnNullIfRowNotFound) { + public CoveredDeleteScanner buildIndexedColumnScanner( + Collection indexedColumns, ColumnTracker tracker, long ts, + boolean returnNullIfRowNotFound) { Filter columnFilters = getColumnFilters(indexedColumns); FilterList filters = new FilterList(Lists.newArrayList(columnFilters)); @@ -72,9 +71,10 @@ public CoveredDeleteScanner buildIndexedColumnScanner(Collection columns) { + private Filter getColumnFilters(Collection columns) { // each column needs to be added as an OR, so we need to separate them out FilterList columnFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE); // create a filter that matches each column reference for (ColumnReference ref : columns) { Filter columnFilter = - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(ref.getFamily())); + new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(ref.getFamily())); // combine with a match for the qualifier, if the qualifier is a specific qualifier // in that case we *must* let empty qualifiers through for family delete markers if (!Bytes.equals(ColumnReference.ALL_QUALIFIERS, ref.getQualifier())) { - columnFilter = - new FilterList(columnFilter, - new FilterList(Operator.MUST_PASS_ONE, - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(ref.getQualifier())), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(HConstants.EMPTY_BYTE_ARRAY)))); + columnFilter = new FilterList(columnFilter, + new FilterList(Operator.MUST_PASS_ONE, + new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(ref.getQualifier())), + new QualifierFilter(CompareOperator.EQUAL, + new BinaryComparator(HConstants.EMPTY_BYTE_ARRAY)))); } columnFilters.addFilter(columnFilter); } - + if (columns.isEmpty()) { - columnFilters.addFilter(new FilterBase() { - @Override - public boolean filterAllRemaining() throws IOException { - return true; - } - }); + columnFilters.addFilter(new FilterBase() { + @Override + public boolean filterAllRemaining() throws IOException { + return true; + } + }); } return columnFilters; } - private Set - getAllFamilies(Collection columns) { + private Set getAllFamilies(Collection columns) { Set families = new HashSet(); for (ColumnReference ref : columns) { families.add(ref.getFamilyWritable()); @@ -126,10 +124,11 @@ public boolean filterAllRemaining() throws IOException { } public static interface CoveredDeleteScanner extends Scanner { - public DeleteTracker getDeleteTracker(); + public DeleteTracker getDeleteTracker(); } - - private CoveredDeleteScanner getFilteredScanner(Filter filters, boolean returnNullIfRowNotFound, final DeleteTracker deleteTracker) { + + private CoveredDeleteScanner getFilteredScanner(Filter filters, boolean returnNullIfRowNotFound, + final DeleteTracker deleteTracker) { // create a scanner and wrap it as an iterator, meaning you can only go forward final FilteredKeyValueScanner kvScanner = new FilteredKeyValueScanner(filters, memstore); // seek the scanner to initialize it @@ -140,8 +139,8 @@ private CoveredDeleteScanner getFilteredScanner(Filter filters, boolean returnNu } } catch (IOException e) { // This should never happen - everything should explode if so. - throw new RuntimeException( - "Failed to seek to first key from update on the memstore scanner!", e); + throw new RuntimeException("Failed to seek to first key from update on the memstore scanner!", + e); } // we have some info in the scanner, so wrap it in an iterator and return. @@ -189,4 +188,4 @@ public DeleteTracker getDeleteTracker() { } }; } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java index f1785a75d38..d4f40445d6a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.table; import java.io.IOException; @@ -34,4 +33,4 @@ public interface HTableFactory { public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException; public Connection getConnection() throws IOException; -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/table/HTableInterfaceReference.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/table/HTableInterfaceReference.java index b6d8d8e421a..e88e9ad598a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/table/HTableInterfaceReference.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/table/HTableInterfaceReference.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.table; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; /** @@ -30,7 +28,6 @@ public class HTableInterfaceReference { private ImmutableBytesPtr tableName; - public HTableInterfaceReference(ImmutableBytesPtr tableName) { this.tableName = tableName; } @@ -40,25 +37,26 @@ public ImmutableBytesPtr get() { } public String getTableName() { - return Bytes.toString(this.tableName.get(),this.tableName.getOffset(), this.tableName.getLength()); + return Bytes.toString(this.tableName.get(), this.tableName.getOffset(), + this.tableName.getLength()); } @Override public int hashCode() { - return tableName.hashCode(); + return tableName.hashCode(); } @Override public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - HTableInterfaceReference other = (HTableInterfaceReference)obj; - return tableName.equals(other.tableName); + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + HTableInterfaceReference other = (HTableInterfaceReference) obj; + return tableName.equals(other.tableName); } @Override public String toString() { return Bytes.toString(this.tableName.get()); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/GenericKeyValueBuilder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/GenericKeyValueBuilder.java index b2bfa0b2b35..02d37037c9c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/GenericKeyValueBuilder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/GenericKeyValueBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,74 +33,78 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * {@link KeyValueBuilder} that does simple byte[] copies to build the underlying key-value. This is exactly the same - * behavior as currently used in {@link Delete} and {@link Put}. + * {@link KeyValueBuilder} that does simple byte[] copies to build the underlying key-value. This is + * exactly the same behavior as currently used in {@link Delete} and {@link Put}. */ public class GenericKeyValueBuilder extends KeyValueBuilder { - public static final KeyValueBuilder INSTANCE = new GenericKeyValueBuilder(); - - private GenericKeyValueBuilder() { - // private ctor for singleton - } - - @Override - public KeyValue buildPut(ImmutableBytesWritable row, ImmutableBytesWritable family, - ImmutableBytesWritable qualifier, long ts, ImmutableBytesWritable value) { - return build(row, family, qualifier, ts, Type.Put, value); - } - - @Override - public KeyValue buildDeleteFamily(ImmutableBytesWritable row, ImmutableBytesWritable family, - ImmutableBytesWritable qualifier, long ts) { - return build(row, family, qualifier, ts, Type.DeleteFamily, null); - } - - @Override - public KeyValue buildDeleteColumns(ImmutableBytesWritable row, ImmutableBytesWritable family, - ImmutableBytesWritable qualifier, long ts) { - return build(row, family, qualifier, ts, Type.DeleteColumn, null); - } - - @Override - public KeyValue buildDeleteColumn(ImmutableBytesWritable row, ImmutableBytesWritable family, - ImmutableBytesWritable qualifier, long ts) { - return build(row, family, qualifier, ts, Type.Delete, null); - } - - private KeyValue build(ImmutableBytesWritable row, ImmutableBytesWritable family, ImmutableBytesWritable qualifier, - long ts, KeyValue.Type type, ImmutableBytesWritable value) { - return new KeyValue(copyBytesIfNecessary(row), copyBytesIfNecessary(family), copyBytesIfNecessary(qualifier), - ts, type, value == null ? null : copyBytesIfNecessary(value)); - } - - @Override - public int compareQualifier(Cell kv, byte[] key, int offset, int length) { - return Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), key, offset, length); - } - - @Override - public int compareFamily(Cell kv, byte[] key, int offset, int length) { - return Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), key, offset, length); - } - - @Override - public void getValueAsPtr(Cell kv, ImmutableBytesWritable writable) { - writable.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - } - - @Override - public CellComparator getKeyValueComparator() { - return CellComparatorImpl.COMPARATOR; - } - - @Override - public int compareRow(Cell kv, byte[] rrow, int roffset, int rlength) { - return Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), rrow, roffset, rlength); - } - - @Override - public List cloneIfNecessary(List mutations) { - return mutations; - } -} \ No newline at end of file + public static final KeyValueBuilder INSTANCE = new GenericKeyValueBuilder(); + + private GenericKeyValueBuilder() { + // private ctor for singleton + } + + @Override + public KeyValue buildPut(ImmutableBytesWritable row, ImmutableBytesWritable family, + ImmutableBytesWritable qualifier, long ts, ImmutableBytesWritable value) { + return build(row, family, qualifier, ts, Type.Put, value); + } + + @Override + public KeyValue buildDeleteFamily(ImmutableBytesWritable row, ImmutableBytesWritable family, + ImmutableBytesWritable qualifier, long ts) { + return build(row, family, qualifier, ts, Type.DeleteFamily, null); + } + + @Override + public KeyValue buildDeleteColumns(ImmutableBytesWritable row, ImmutableBytesWritable family, + ImmutableBytesWritable qualifier, long ts) { + return build(row, family, qualifier, ts, Type.DeleteColumn, null); + } + + @Override + public KeyValue buildDeleteColumn(ImmutableBytesWritable row, ImmutableBytesWritable family, + ImmutableBytesWritable qualifier, long ts) { + return build(row, family, qualifier, ts, Type.Delete, null); + } + + private KeyValue build(ImmutableBytesWritable row, ImmutableBytesWritable family, + ImmutableBytesWritable qualifier, long ts, KeyValue.Type type, ImmutableBytesWritable value) { + return new KeyValue(copyBytesIfNecessary(row), copyBytesIfNecessary(family), + copyBytesIfNecessary(qualifier), ts, type, + value == null ? null : copyBytesIfNecessary(value)); + } + + @Override + public int compareQualifier(Cell kv, byte[] key, int offset, int length) { + return Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), + key, offset, length); + } + + @Override + public int compareFamily(Cell kv, byte[] key, int offset, int length) { + return Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), key, + offset, length); + } + + @Override + public void getValueAsPtr(Cell kv, ImmutableBytesWritable writable) { + writable.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + } + + @Override + public CellComparator getKeyValueComparator() { + return CellComparatorImpl.COMPARATOR; + } + + @Override + public int compareRow(Cell kv, byte[] rrow, int roffset, int rlength) { + return Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), rrow, roffset, + rlength); + } + + @Override + public List cloneIfNecessary(List mutations) { + return mutations; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/ImmutableBytesPtr.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/ImmutableBytesPtr.java index 946fa2e861b..423ec01520b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/ImmutableBytesPtr.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/ImmutableBytesPtr.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,113 +26,110 @@ import org.apache.hadoop.hbase.util.Bytes; public class ImmutableBytesPtr extends ImmutableBytesWritable { - private int hashCode; - - public ImmutableBytesPtr() { - } - - public ImmutableBytesPtr(byte[] bytes) { - super(bytes); - hashCode = super.hashCode(); - } - - public ImmutableBytesPtr(ImmutableBytesWritable ibw) { - super(ibw.get(), ibw.getOffset(), ibw.getLength()); - hashCode = super.hashCode(); - } - - public ImmutableBytesPtr(ImmutableBytesPtr ibp) { - super(ibp.get(), ibp.getOffset(), ibp.getLength()); - hashCode = ibp.hashCode; - } - - public ImmutableBytesPtr(byte[] bytes, int offset, int length) { - super(bytes, offset, length); - hashCode = super.hashCode(); - } - - @Override - public int hashCode() { - return hashCode; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ImmutableBytesPtr that = (ImmutableBytesPtr)obj; - if (this.hashCode != that.hashCode) return false; - if (Bytes.compareTo(this.get(), this.getOffset(), this.getLength(), that.get(), that.getOffset(), that.getLength()) != 0) return false; - return true; - } - - public void set(ImmutableBytesWritable ptr) { - set(ptr.get(),ptr.getOffset(),ptr.getLength()); - } - - /** - * @param b Use passed bytes as backing array for this instance. - */ - @Override - public void set(final byte [] b) { - super.set(b); - hashCode = super.hashCode(); - } - - /** - * @param b Use passed bytes as backing array for this instance. - * @param offset - * @param length - */ - @Override - public void set(final byte [] b, final int offset, final int length) { - super.set(b,offset,length); - hashCode = super.hashCode(); - } - - @Override - public void readFields(final DataInput in) throws IOException { - super.readFields(in); - hashCode = super.hashCode(); - } - - /** - * @return the backing byte array, copying only if necessary - */ - public byte[] copyBytesIfNecessary() { + private int hashCode; + + public ImmutableBytesPtr() { + } + + public ImmutableBytesPtr(byte[] bytes) { + super(bytes); + hashCode = super.hashCode(); + } + + public ImmutableBytesPtr(ImmutableBytesWritable ibw) { + super(ibw.get(), ibw.getOffset(), ibw.getLength()); + hashCode = super.hashCode(); + } + + public ImmutableBytesPtr(ImmutableBytesPtr ibp) { + super(ibp.get(), ibp.getOffset(), ibp.getLength()); + hashCode = ibp.hashCode; + } + + public ImmutableBytesPtr(byte[] bytes, int offset, int length) { + super(bytes, offset, length); + hashCode = super.hashCode(); + } + + @Override + public int hashCode() { + return hashCode; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ImmutableBytesPtr that = (ImmutableBytesPtr) obj; + if (this.hashCode != that.hashCode) return false; + if ( + Bytes.compareTo(this.get(), this.getOffset(), this.getLength(), that.get(), that.getOffset(), + that.getLength()) != 0 + ) return false; + return true; + } + + public void set(ImmutableBytesWritable ptr) { + set(ptr.get(), ptr.getOffset(), ptr.getLength()); + } + + /** + * @param b Use passed bytes as backing array for this instance. + */ + @Override + public void set(final byte[] b) { + super.set(b); + hashCode = super.hashCode(); + } + + /** + * @param b Use passed bytes as backing array for this instance. + */ + @Override + public void set(final byte[] b, final int offset, final int length) { + super.set(b, offset, length); + hashCode = super.hashCode(); + } + + @Override + public void readFields(final DataInput in) throws IOException { + super.readFields(in); + hashCode = super.hashCode(); + } + + /** Returns the backing byte array, copying only if necessary */ + public byte[] copyBytesIfNecessary() { return copyBytesIfNecessary(this); - } + } - public static byte[] copyBytesIfNecessary(ImmutableBytesWritable ptr) { - return copyBytesIfNecessary(ptr.get(), ptr.getOffset(), ptr.getLength()); - } + public static byte[] copyBytesIfNecessary(ImmutableBytesWritable ptr) { + return copyBytesIfNecessary(ptr.get(), ptr.getOffset(), ptr.getLength()); + } - public static byte[] copyBytesIfNecessary(byte[] bytes, int offset, int length) { - if (offset == 0 && length == bytes.length) { - return bytes; - } - return Arrays.copyOfRange(bytes, offset, offset + length); + public static byte[] copyBytesIfNecessary(byte[] bytes, int offset, int length) { + if (offset == 0 && length == bytes.length) { + return bytes; } + return Arrays.copyOfRange(bytes, offset, offset + length); + } - public static byte[] cloneCellRowIfNecessary(Cell cell) { - return copyBytesIfNecessary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); - } + public static byte[] cloneCellRowIfNecessary(Cell cell) { + return copyBytesIfNecessary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + } - public static byte[] cloneCellFamilyIfNecessary(Cell cell) { - return copyBytesIfNecessary(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength()); - } + public static byte[] cloneCellFamilyIfNecessary(Cell cell) { + return copyBytesIfNecessary(cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength()); + } - public static byte[] cloneCellQualifierIfNecessary(Cell cell) { - return ImmutableBytesPtr.copyBytesIfNecessary( - cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); - } + public static byte[] cloneCellQualifierIfNecessary(Cell cell) { + return ImmutableBytesPtr.copyBytesIfNecessary(cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength()); + } - public static byte[] cloneCellValueIfNecessary(Cell cell) { - return ImmutableBytesPtr.copyBytesIfNecessary( - cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()); - } + public static byte[] cloneCellValueIfNecessary(Cell cell) { + return ImmutableBytesPtr.copyBytesIfNecessary(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java index 3d8c111d8eb..369de362d24 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,252 +42,262 @@ import org.apache.phoenix.hbase.index.covered.data.LazyValueGetter; import org.apache.phoenix.hbase.index.covered.update.ColumnReference; import org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; - /** * Utility class to help manage indexes */ public class IndexManagementUtil { - // Copied here to avoid depending on phoenix-server - /** Configuration key for the class to use when encoding cells in the WAL */ - public static final String WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec"; - private IndexManagementUtil() { - // private ctor for util classes - } + // Copied here to avoid depending on phoenix-server + /** Configuration key for the class to use when encoding cells in the WAL */ + public static final String WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec"; - // Don't rely on statically defined classes constants from classes that may not exist - // in earlier HBase versions - public static final String INDEX_WAL_EDIT_CODEC_CLASS_NAME = "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"; - public static final String HLOG_READER_IMPL_KEY = "hbase.regionserver.hlog.reader.impl"; - public static final String WAL_EDIT_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec"; + private IndexManagementUtil() { + // private ctor for util classes + } - private static final String INDEX_HLOG_READER_CLASS_NAME = "org.apache.hadoop.hbase.regionserver.wal.IndexedHLogReader"; - private static final Logger LOGGER = LoggerFactory.getLogger(IndexManagementUtil.class); + // Don't rely on statically defined classes constants from classes that may not exist + // in earlier HBase versions + public static final String INDEX_WAL_EDIT_CODEC_CLASS_NAME = + "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"; + public static final String HLOG_READER_IMPL_KEY = "hbase.regionserver.hlog.reader.impl"; + public static final String WAL_EDIT_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec"; - public static boolean isWALEditCodecSet(Configuration conf) { - // check to see if the WALEditCodec is installed - try { - // Use reflection to load the IndexedWALEditCodec, since it may not load with an older version - // of HBase - Class.forName(INDEX_WAL_EDIT_CODEC_CLASS_NAME); - } catch (Throwable t) { - return false; - } - if (INDEX_WAL_EDIT_CODEC_CLASS_NAME.equals(conf - .get(WAL_CELL_CODEC_CLASS_KEY, null))) { - // its installed, and it can handle compression and non-compression cases - return true; - } - return false; + private static final String INDEX_HLOG_READER_CLASS_NAME = + "org.apache.hadoop.hbase.regionserver.wal.IndexedHLogReader"; + private static final Logger LOGGER = LoggerFactory.getLogger(IndexManagementUtil.class); + + public static boolean isWALEditCodecSet(Configuration conf) { + // check to see if the WALEditCodec is installed + try { + // Use reflection to load the IndexedWALEditCodec, since it may not load with an older version + // of HBase + Class.forName(INDEX_WAL_EDIT_CODEC_CLASS_NAME); + } catch (Throwable t) { + return false; + } + if (INDEX_WAL_EDIT_CODEC_CLASS_NAME.equals(conf.get(WAL_CELL_CODEC_CLASS_KEY, null))) { + // its installed, and it can handle compression and non-compression cases + return true; } + return false; + } - public static void ensureMutableIndexingCorrectlyConfigured(Configuration conf) throws IllegalStateException { + public static void ensureMutableIndexingCorrectlyConfigured(Configuration conf) + throws IllegalStateException { - // check to see if the WALEditCodec is installed - if (isWALEditCodecSet(conf)) { return; } + // check to see if the WALEditCodec is installed + if (isWALEditCodecSet(conf)) { + return; + } - // otherwise, we have to install the indexedhlogreader, but it cannot have compression - String codecClass = INDEX_WAL_EDIT_CODEC_CLASS_NAME; - String indexLogReaderName = INDEX_HLOG_READER_CLASS_NAME; - try { - // Use reflection to load the IndexedHLogReader, since it may not load with an older version - // of HBase - Class.forName(indexLogReaderName); - } catch (ClassNotFoundException e) { - throw new IllegalStateException(codecClass + " is not installed, but " - + indexLogReaderName + " hasn't been installed in hbase-site.xml under " + HLOG_READER_IMPL_KEY); - } - if (indexLogReaderName.equals(conf.get(HLOG_READER_IMPL_KEY, indexLogReaderName))) { - if (conf.getBoolean(HConstants.ENABLE_WAL_COMPRESSION, false)) { throw new IllegalStateException( - "WAL Compression is only supported with " + codecClass - + ". You can install in hbase-site.xml, under " + WAL_CELL_CODEC_CLASS_KEY); + // otherwise, we have to install the indexedhlogreader, but it cannot have compression + String codecClass = INDEX_WAL_EDIT_CODEC_CLASS_NAME; + String indexLogReaderName = INDEX_HLOG_READER_CLASS_NAME; + try { + // Use reflection to load the IndexedHLogReader, since it may not load with an older version + // of HBase + Class.forName(indexLogReaderName); + } catch (ClassNotFoundException e) { + throw new IllegalStateException(codecClass + " is not installed, but " + indexLogReaderName + + " hasn't been installed in hbase-site.xml under " + HLOG_READER_IMPL_KEY); + } + if (indexLogReaderName.equals(conf.get(HLOG_READER_IMPL_KEY, indexLogReaderName))) { + if (conf.getBoolean(HConstants.ENABLE_WAL_COMPRESSION, false)) { + throw new IllegalStateException("WAL Compression is only supported with " + codecClass + + ". You can install in hbase-site.xml, under " + WAL_CELL_CODEC_CLASS_KEY); } - } else { - throw new IllegalStateException(codecClass + " is not installed, but " - + indexLogReaderName + " hasn't been installed in hbase-site.xml under " + HLOG_READER_IMPL_KEY); - } - + } else { + throw new IllegalStateException(codecClass + " is not installed, but " + indexLogReaderName + + " hasn't been installed in hbase-site.xml under " + HLOG_READER_IMPL_KEY); } - public static ValueGetter createGetterFromScanner(CoveredDeleteScanner scanner, byte[] currentRow) { - return scanner!=null ? new LazyValueGetter(scanner, currentRow) : null; - } + } - /** - * check to see if the kvs in the update match any of the passed columns. Generally, this is useful to for an index - * codec to determine if a given update should even be indexed. This assumes that for any index, there are going to - * small number of columns, versus the number of kvs in any one batch. - */ - public static boolean updateMatchesColumns(Collection update, List columns) { - // check to see if the kvs in the new update even match any of the columns requested - // assuming that for any index, there are going to small number of columns, versus the number of - // kvs in any one batch. - boolean matches = false; - outer: for (KeyValue kv : update) { - for (ColumnReference ref : columns) { - if (ref.matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(), - kv.getFamilyLength()) - && ref.matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength())) { - matches = true; - // if a single column matches a single kv, we need to build a whole scanner - break outer; - } - } + public static ValueGetter createGetterFromScanner(CoveredDeleteScanner scanner, + byte[] currentRow) { + return scanner != null ? new LazyValueGetter(scanner, currentRow) : null; + } + + /** + * check to see if the kvs in the update match any of the passed columns. Generally, this is + * useful to for an index codec to determine if a given update should even be indexed. This + * assumes that for any index, there are going to small number of columns, versus the number of + * kvs in any one batch. + */ + public static boolean updateMatchesColumns(Collection update, + List columns) { + // check to see if the kvs in the new update even match any of the columns requested + // assuming that for any index, there are going to small number of columns, versus the number of + // kvs in any one batch. + boolean matches = false; + outer: for (KeyValue kv : update) { + for (ColumnReference ref : columns) { + if ( + ref.matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()) + && ref.matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength()) + ) { + matches = true; + // if a single column matches a single kv, we need to build a whole scanner + break outer; } - return matches; + } } + return matches; + } - /** - * Check to see if the kvs in the update match any of the passed columns. Generally, this is useful to for an index - * codec to determine if a given update should even be indexed. This assumes that for any index, there are going to - * small number of kvs, versus the number of columns in any one batch. - *

- * This employs the same logic as {@link #updateMatchesColumns(Collection, List)}, but is flips the iteration logic - * to search columns before kvs. - */ - public static boolean columnMatchesUpdate(List columns, Collection update) { - boolean matches = false; - outer: for (ColumnReference ref : columns) { - for (KeyValue kv : update) { - if (ref.matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(), - kv.getFamilyLength()) - && ref.matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength())) { - matches = true; - // if a single column matches a single kv, we need to build a whole scanner - break outer; - } - } + /** + * Check to see if the kvs in the update match any of the passed columns. Generally, this is + * useful to for an index codec to determine if a given update should even be indexed. This + * assumes that for any index, there are going to small number of kvs, versus the number of + * columns in any one batch. + *

+ * This employs the same logic as {@link #updateMatchesColumns(Collection, List)}, but is flips + * the iteration logic to search columns before kvs. + */ + public static boolean columnMatchesUpdate(List columns, + Collection update) { + boolean matches = false; + outer: for (ColumnReference ref : columns) { + for (KeyValue kv : update) { + if ( + ref.matchesFamily(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()) + && ref.matchesQualifier(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength()) + ) { + matches = true; + // if a single column matches a single kv, we need to build a whole scanner + break outer; } - return matches; + } } + return matches; + } - public static Scan newLocalStateScan(List> refsArray) { - return newLocalStateScan(null, refsArray); - } + public static Scan + newLocalStateScan(List> refsArray) { + return newLocalStateScan(null, refsArray); + } - public static Scan newLocalStateScan(Scan scan, List> refsArray) { - Scan s = scan; - if (scan == null) { - s = new Scan(); - } - s.setRaw(true); - // add the necessary columns to the scan - for (Iterable refs : refsArray) { - for (ColumnReference ref : refs) { - s.addFamily(ref.getFamily()); - } - } - s.readAllVersions(); - return s; + public static Scan newLocalStateScan(Scan scan, + List> refsArray) { + Scan s = scan; + if (scan == null) { + s = new Scan(); + } + s.setRaw(true); + // add the necessary columns to the scan + for (Iterable refs : refsArray) { + for (ColumnReference ref : refs) { + s.addFamily(ref.getFamily()); + } } + s.readAllVersions(); + return s; + } - /** - * Propagate the given failure as a generic {@link IOException}, if it isn't already - * - * @param e - * reason indexing failed. If ,null, throws a {@link NullPointerException}, which should unload - * the coprocessor. - */ - public static void rethrowIndexingException(Throwable e) throws IOException { - try { - throw e; - } catch (IOException | FatalIndexBuildingFailureException e1) { - LOGGER.info("Rethrowing " + e); - throw e1; - } - catch (Throwable e1) { - LOGGER.info("Rethrowing " + e1 + " as a " + - IndexBuildingFailureException.class.getSimpleName()); - throw new IndexBuildingFailureException("Failed to build index for unexpected reason!", e1); - } + /** + * Propagate the given failure as a generic {@link IOException}, if it isn't already reason + * indexing failed. If ,null, throws a {@link NullPointerException}, which should unload + * the coprocessor. + */ + public static void rethrowIndexingException(Throwable e) throws IOException { + try { + throw e; + } catch (IOException | FatalIndexBuildingFailureException e1) { + LOGGER.info("Rethrowing " + e); + throw e1; + } catch (Throwable e1) { + LOGGER + .info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName()); + throw new IndexBuildingFailureException("Failed to build index for unexpected reason!", e1); } + } - public static void setIfNotSet(Configuration conf, String key, int value) { - if (conf.get(key) == null) { - conf.setInt(key, value); - } + public static void setIfNotSet(Configuration conf, String key, int value) { + if (conf.get(key) == null) { + conf.setInt(key, value); } + } - /** - * Batch all the {@link KeyValue}s in a collection of kvs by timestamp. - * - * @param kvs {@link KeyValue}s to break into batches - * @param batches to update with the given kvs - */ - public static void createTimestampBatchesFromKeyValues(Collection kvs, Map batches) { - // batch kvs by timestamp - for (KeyValue kv : kvs) { - long ts = kv.getTimestamp(); - Batch batch = batches.get(ts); - if (batch == null) { - batch = new Batch(ts); - batches.put(ts, batch); - } - batch.add(kv); - } + /** + * Batch all the {@link KeyValue}s in a collection of kvs by timestamp. + * @param kvs {@link KeyValue}s to break into batches + * @param batches to update with the given kvs + */ + public static void createTimestampBatchesFromKeyValues(Collection kvs, + Map batches) { + // batch kvs by timestamp + for (KeyValue kv : kvs) { + long ts = kv.getTimestamp(); + Batch batch = batches.get(ts); + if (batch == null) { + batch = new Batch(ts); + batches.put(ts, batch); + } + batch.add(kv); } + } - /** - * Batch all the {@link KeyValue}s in a {@link Mutation} by timestamp. - * - * @param m {@link Mutation} from which to extract the {@link KeyValue}s - * @return the mutation, broken into batches and sorted in ascending order (smallest first) - */ - public static Collection createTimestampBatchesFromMutation(Mutation m) { - Map batches = new HashMap(); - for (List family : m.getFamilyCellMap().values()) { - // TODO do we really need this to be on-heap ? - List familyKVs = PhoenixKeyValueUtil.ensureKeyValues(family); - createTimestampBatchesFromKeyValues(familyKVs, batches); - } - // sort the batches - List sorted = new ArrayList(batches.values()); - Collections.sort(sorted, new Comparator() { - @Override - public int compare(Batch o1, Batch o2) { - return Longs.compare(o1.getTimestamp(), o2.getTimestamp()); - } - }); - return sorted; + /** + * Batch all the {@link KeyValue}s in a {@link Mutation} by timestamp. + * @param m {@link Mutation} from which to extract the {@link KeyValue}s + * @return the mutation, broken into batches and sorted in ascending order (smallest first) + */ + public static Collection createTimestampBatchesFromMutation(Mutation m) { + Map batches = new HashMap(); + for (List family : m.getFamilyCellMap().values()) { + // TODO do we really need this to be on-heap ? + List familyKVs = PhoenixKeyValueUtil.ensureKeyValues(family); + createTimestampBatchesFromKeyValues(familyKVs, batches); } + // sort the batches + List sorted = new ArrayList(batches.values()); + Collections.sort(sorted, new Comparator() { + @Override + public int compare(Batch o1, Batch o2) { + return Longs.compare(o1.getTimestamp(), o2.getTimestamp()); + } + }); + return sorted; + } - public static Collection flattenMutationsByTimestamp(Collection mutations) { - List flattenedMutations = Lists.newArrayListWithExpectedSize(mutations.size() * 10); - for (Mutation m : mutations) { - byte[] row = m.getRow(); - Collection batches = createTimestampBatchesFromMutation(m); - for (Batch batch : batches) { - Mutation mWithSameTS; - Cell firstCell = batch.getKvs().get(0); - if (firstCell.getType() == Cell.Type.Put) { - mWithSameTS = new Put(row); - } else { - mWithSameTS = new Delete(row); - } - if (m.getAttributesMap() != null) { - for (Map.Entry entry : m.getAttributesMap().entrySet()) { - mWithSameTS.setAttribute(entry.getKey(), entry.getValue()); - } - } - for (Cell cell : batch.getKvs()) { - byte[] fam = CellUtil.cloneFamily(cell); - List famCells = mWithSameTS.getFamilyCellMap().get(fam); - if (famCells == null) { - famCells = Lists.newArrayList(); - mWithSameTS.getFamilyCellMap().put(fam, famCells); - } - famCells.add(cell); - } - flattenedMutations.add(mWithSameTS); - } + public static Collection + flattenMutationsByTimestamp(Collection mutations) { + List flattenedMutations = Lists.newArrayListWithExpectedSize(mutations.size() * 10); + for (Mutation m : mutations) { + byte[] row = m.getRow(); + Collection batches = createTimestampBatchesFromMutation(m); + for (Batch batch : batches) { + Mutation mWithSameTS; + Cell firstCell = batch.getKvs().get(0); + if (firstCell.getType() == Cell.Type.Put) { + mWithSameTS = new Put(row); + } else { + mWithSameTS = new Delete(row); + } + if (m.getAttributesMap() != null) { + for (Map.Entry entry : m.getAttributesMap().entrySet()) { + mWithSameTS.setAttribute(entry.getKey(), entry.getValue()); } - return flattenedMutations; + } + for (Cell cell : batch.getKvs()) { + byte[] fam = CellUtil.cloneFamily(cell); + List famCells = mWithSameTS.getFamilyCellMap().get(fam); + if (famCells == null) { + famCells = Lists.newArrayList(); + mWithSameTS.getFamilyCellMap().put(fam, famCells); + } + famCells.add(cell); + } + flattenedMutations.add(mWithSameTS); } -} \ No newline at end of file + } + return flattenedMutations; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/KeyValueBuilder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/KeyValueBuilder.java index 7957774543e..f49a3ea9927 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/KeyValueBuilder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/KeyValueBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,79 +35,79 @@ * Build {@link KeyValue} in an efficient way */ public abstract class KeyValueBuilder { - - /** - * Helper method for a {@link KeyValueBuilder} that catches an IOException from a {@link Put} - * when adding a {@link KeyValue} generated by the KeyValueBuilder. - * @throws RuntimeException if there is an IOException thrown from the underlying {@link Put} - */ - @SuppressWarnings("javadoc") - public static void addQuietly(Mutation m, KeyValue kv) { - byte [] family = CellUtil.cloneFamily(kv); - List list = m.getFamilyCellMap().get(family); - if (list == null) { - list = new ArrayList<>(); - m.getFamilyCellMap().put(family, list); - } - list.add(kv); - } - /** - * Helper method for a {@link KeyValueBuilder} that catches an IOException from a {@link Put} - * when adding a {@link KeyValue} generated by the KeyValueBuilder. - * @throws RuntimeException if there is an IOException thrown from the underlying {@link Put} - */ - @SuppressWarnings("javadoc") - public static void deleteQuietly(Delete delete, KeyValueBuilder builder, KeyValue kv) { - try { - delete.add(kv); - } catch (IOException e) { - throw new RuntimeException("KeyValue Builder " + builder + " created an invalid kv: " - + kv + "!"); - } + /** + * Helper method for a {@link KeyValueBuilder} that catches an IOException from a {@link Put} when + * adding a {@link KeyValue} generated by the KeyValueBuilder. + * @throws RuntimeException if there is an IOException thrown from the underlying {@link Put} + */ + @SuppressWarnings("javadoc") + public static void addQuietly(Mutation m, KeyValue kv) { + byte[] family = CellUtil.cloneFamily(kv); + List list = m.getFamilyCellMap().get(family); + if (list == null) { + list = new ArrayList<>(); + m.getFamilyCellMap().put(family, list); } + list.add(kv); + } - public static KeyValueBuilder get(String hbaseVersion) { - return GenericKeyValueBuilder.INSTANCE; + /** + * Helper method for a {@link KeyValueBuilder} that catches an IOException from a {@link Put} when + * adding a {@link KeyValue} generated by the KeyValueBuilder. + * @throws RuntimeException if there is an IOException thrown from the underlying {@link Put} + */ + @SuppressWarnings("javadoc") + public static void deleteQuietly(Delete delete, KeyValueBuilder builder, KeyValue kv) { + try { + delete.add(kv); + } catch (IOException e) { + throw new RuntimeException( + "KeyValue Builder " + builder + " created an invalid kv: " + kv + "!"); } + } + + public static KeyValueBuilder get(String hbaseVersion) { + return GenericKeyValueBuilder.INSTANCE; + } public KeyValue buildPut(ImmutableBytesWritable row, ImmutableBytesWritable family, - ImmutableBytesWritable qualifier, ImmutableBytesWritable value) { + ImmutableBytesWritable qualifier, ImmutableBytesWritable value) { return buildPut(row, family, qualifier, HConstants.LATEST_TIMESTAMP, value); } public abstract KeyValue buildPut(ImmutableBytesWritable row, ImmutableBytesWritable family, - ImmutableBytesWritable qualifier, long ts, ImmutableBytesWritable value); + ImmutableBytesWritable qualifier, long ts, ImmutableBytesWritable value); public KeyValue buildDeleteFamily(ImmutableBytesWritable row, ImmutableBytesWritable family, - ImmutableBytesWritable qualifier) { - return buildDeleteFamily(row, family, qualifier, HConstants.LATEST_TIMESTAMP); + ImmutableBytesWritable qualifier) { + return buildDeleteFamily(row, family, qualifier, HConstants.LATEST_TIMESTAMP); } public abstract KeyValue buildDeleteFamily(ImmutableBytesWritable row, - ImmutableBytesWritable family, ImmutableBytesWritable qualifier, long ts); + ImmutableBytesWritable family, ImmutableBytesWritable qualifier, long ts); public KeyValue buildDeleteColumns(ImmutableBytesWritable row, ImmutableBytesWritable family, - ImmutableBytesWritable qualifier) { - return buildDeleteColumns(row, family, qualifier, HConstants.LATEST_TIMESTAMP); + ImmutableBytesWritable qualifier) { + return buildDeleteColumns(row, family, qualifier, HConstants.LATEST_TIMESTAMP); } public abstract KeyValue buildDeleteColumns(ImmutableBytesWritable row, - ImmutableBytesWritable family, ImmutableBytesWritable qualifier, long ts); + ImmutableBytesWritable family, ImmutableBytesWritable qualifier, long ts); public KeyValue buildDeleteColumn(ImmutableBytesWritable row, ImmutableBytesWritable family, - ImmutableBytesWritable qualifier) { - return buildDeleteColumn(row, family, qualifier, HConstants.LATEST_TIMESTAMP); + ImmutableBytesWritable qualifier) { + return buildDeleteColumn(row, family, qualifier, HConstants.LATEST_TIMESTAMP); } public abstract KeyValue buildDeleteColumn(ImmutableBytesWritable row, - ImmutableBytesWritable family, ImmutableBytesWritable qualifier, long ts); + ImmutableBytesWritable family, ImmutableBytesWritable qualifier, long ts); /** * Compare the qualifier based on the type of keyvalue. Assumes that the {@link KeyValue} passed * in was generated by the {@link KeyValueBuilder} - * @param kv to compare against - * @param key to compare + * @param kv to compare against + * @param key to compare * @param offset in the passed key * @param length length of the key from the offset to check * @return the byte difference between the passed keyvalue's qualifier and the passed key @@ -115,15 +115,17 @@ public abstract KeyValue buildDeleteColumn(ImmutableBytesWritable row, public abstract int compareQualifier(Cell kv, byte[] key, int offset, int length); public abstract int compareFamily(Cell kv, byte[] key, int offset, int length); + public abstract int compareRow(Cell kv, byte[] row, int offset, int length); + /** - * @param kv to read + * @param kv to read * @param ptr set with the value from the {@link KeyValue} */ public abstract void getValueAsPtr(Cell kv, ImmutableBytesWritable ptr); - + public abstract CellComparator getKeyValueComparator(); - + public abstract List cloneIfNecessary(List mutations); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/VersionUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/VersionUtil.java index fd02ab5ee41..637cbfa11e4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/VersionUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/hbase/index/util/VersionUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,72 +17,73 @@ */ package org.apache.phoenix.hbase.index.util; - public class VersionUtil { - private VersionUtil() { - } + private VersionUtil() { + } - // Encode a version string in the format of "major.minor.patch" into an integer. - public static int encodeVersion(String version) { - String[] versionParts = VersionUtil.splitHBaseVersionString(version); - return VersionUtil.encodeVersion(versionParts[0], versionParts.length > 1 ? versionParts[1] : null, versionParts.length > 2 ? versionParts[2] : null); - } + // Encode a version string in the format of "major.minor.patch" into an integer. + public static int encodeVersion(String version) { + String[] versionParts = VersionUtil.splitHBaseVersionString(version); + return VersionUtil.encodeVersion(versionParts[0], + versionParts.length > 1 ? versionParts[1] : null, + versionParts.length > 2 ? versionParts[2] : null); + } - public static String[] splitHBaseVersionString(String version) { - return version.split("[-\\.]"); - } + public static String[] splitHBaseVersionString(String version) { + return version.split("[-\\.]"); + } - // Encode the major as 2nd byte in the int, minor as the first byte and patch as the last byte. - public static int encodeVersion(String major, String minor, String patch) { - return encodeVersion(major == null ? 0 : Integer.parseInt(major), minor == null ? 0 : Integer.parseInt(minor), - patch == null ? 0 : Integer.parseInt(patch)); - } + // Encode the major as 2nd byte in the int, minor as the first byte and patch as the last byte. + public static int encodeVersion(String major, String minor, String patch) { + return encodeVersion(major == null ? 0 : Integer.parseInt(major), + minor == null ? 0 : Integer.parseInt(minor), patch == null ? 0 : Integer.parseInt(patch)); + } - public static int encodeVersion(int major, int minor, int patch) { - int version = 0; - version |= (major << Byte.SIZE * 2); - version |= (minor << Byte.SIZE); - version |= patch; - return version; - } + public static int encodeVersion(int major, int minor, int patch) { + int version = 0; + version |= (major << Byte.SIZE * 2); + version |= (minor << Byte.SIZE); + version |= patch; + return version; + } - public static int encodeMaxPatchVersion(int major, int minor) { - int version = 0; - version |= (major << Byte.SIZE * 2); - version |= (minor << Byte.SIZE); - version |= 0xFF; - return version; - } + public static int encodeMaxPatchVersion(int major, int minor) { + int version = 0; + version |= (major << Byte.SIZE * 2); + version |= (minor << Byte.SIZE); + version |= 0xFF; + return version; + } - public static int encodeMinPatchVersion(int major, int minor) { - int version = 0; - version |= (major << Byte.SIZE * 2); - version |= (minor << Byte.SIZE); - return version; - } + public static int encodeMinPatchVersion(int major, int minor) { + int version = 0; + version |= (major << Byte.SIZE * 2); + version |= (minor << Byte.SIZE); + return version; + } - public static int encodeMaxMinorVersion(int major) { - int version = 0; - version |= (major << Byte.SIZE * 2); - version |= 0xFFFF; - return version; - } + public static int encodeMaxMinorVersion(int major) { + int version = 0; + version |= (major << Byte.SIZE * 2); + version |= 0xFFFF; + return version; + } - public static int encodeMinMinorVersion(int major) { - int version = 0; - version |= (major << Byte.SIZE * 2); - return version; - } + public static int encodeMinMinorVersion(int major) { + int version = 0; + version |= (major << Byte.SIZE * 2); + return version; + } - public static int decodeMajorVersion(int encodedVersion) { - return (encodedVersion >> Byte.SIZE * 2); - } + public static int decodeMajorVersion(int encodedVersion) { + return (encodedVersion >> Byte.SIZE * 2); + } - public static int decodeMinorVersion(int encodedVersion) { - return (encodedVersion >> Byte.SIZE) & 0xFF; - } + public static int decodeMinorVersion(int encodedVersion) { + return (encodedVersion >> Byte.SIZE) & 0xFF; + } - public static int decodePatchVersion(int encodedVersion) { - return encodedVersion & 0xFF; - } + public static int decodePatchVersion(int encodedVersion) { + return encodedVersion & 0xFF; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/index/CDCTableInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/index/CDCTableInfo.java index 02fe008ab39..d52b2f9d4c5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/index/CDCTableInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/index/CDCTableInfo.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.index; +import static org.apache.phoenix.query.QueryConstants.CDC_JSON_COL_NAME; +import static org.apache.phoenix.query.QueryConstants.NAME_SEPARATOR; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; + import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.compile.TupleProjectionCompiler; @@ -33,243 +44,224 @@ import org.apache.phoenix.util.CDCUtil; import org.apache.phoenix.util.SchemaUtil; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Set; - -import static org.apache.phoenix.query.QueryConstants.CDC_JSON_COL_NAME; -import static org.apache.phoenix.query.QueryConstants.NAME_SEPARATOR; - - /** * CDC Table Def Class */ public class CDCTableInfo { - private List columnInfoList; - private byte[] defaultColumnFamily; - private final Set includeScopes; - private PTable.QualifierEncodingScheme qualifierEncodingScheme; - private final byte[] cdcJsonColQualBytes; - private final TupleProjector dataTableProjector; + private List columnInfoList; + private byte[] defaultColumnFamily; + private final Set includeScopes; + private PTable.QualifierEncodingScheme qualifierEncodingScheme; + private final byte[] cdcJsonColQualBytes; + private final TupleProjector dataTableProjector; - private CDCTableInfo(List columnInfoList, - Set includeScopes, byte[] cdcJsonColQualBytes, - TupleProjector dataTableProjector) { - Collections.sort(columnInfoList); - this.columnInfoList = columnInfoList; - this.includeScopes = includeScopes; - this.cdcJsonColQualBytes = cdcJsonColQualBytes; - this.dataTableProjector = dataTableProjector; - } + private CDCTableInfo(List columnInfoList, Set includeScopes, + byte[] cdcJsonColQualBytes, TupleProjector dataTableProjector) { + Collections.sort(columnInfoList); + this.columnInfoList = columnInfoList; + this.includeScopes = includeScopes; + this.cdcJsonColQualBytes = cdcJsonColQualBytes; + this.dataTableProjector = dataTableProjector; + } - public CDCTableInfo(byte[] defaultColumnFamily, List columnInfoList, - Set includeScopes, - PTable.QualifierEncodingScheme qualifierEncodingScheme, - byte[] cdcJsonColQualBytes, TupleProjector dataTableProjector) { - this(columnInfoList, includeScopes, cdcJsonColQualBytes, dataTableProjector); - this.defaultColumnFamily = defaultColumnFamily; - this.qualifierEncodingScheme = qualifierEncodingScheme; - } + public CDCTableInfo(byte[] defaultColumnFamily, List columnInfoList, + Set includeScopes, + PTable.QualifierEncodingScheme qualifierEncodingScheme, byte[] cdcJsonColQualBytes, + TupleProjector dataTableProjector) { + this(columnInfoList, includeScopes, cdcJsonColQualBytes, dataTableProjector); + this.defaultColumnFamily = defaultColumnFamily; + this.qualifierEncodingScheme = qualifierEncodingScheme; + } - public List getColumnInfoList() { - return columnInfoList; - } + public List getColumnInfoList() { + return columnInfoList; + } - public byte[] getDefaultColumnFamily() { - return defaultColumnFamily; - } + public byte[] getDefaultColumnFamily() { + return defaultColumnFamily; + } - public PTable.QualifierEncodingScheme getQualifierEncodingScheme() { - return qualifierEncodingScheme; - } + public PTable.QualifierEncodingScheme getQualifierEncodingScheme() { + return qualifierEncodingScheme; + } - public Set getIncludeScopes() { - return includeScopes; - } + public Set getIncludeScopes() { + return includeScopes; + } - public byte[] getCdcJsonColQualBytes() { - return cdcJsonColQualBytes; - } + public byte[] getCdcJsonColQualBytes() { + return cdcJsonColQualBytes; + } - public TupleProjector getDataTableProjector() { - return dataTableProjector; - } + public TupleProjector getDataTableProjector() { + return dataTableProjector; + } - public static CDCTableInfo createFromProto(CDCInfoProtos.CDCTableDef table) { - byte[] defaultColumnFamily = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; - if (table.hasDefaultFamilyName()) { - defaultColumnFamily = table.getDefaultFamilyName().toByteArray(); - } - // For backward compatibility. Clients older than 4.10 will always have - // non-encoded qualifiers. - PTable.QualifierEncodingScheme qualifierEncodingScheme - = PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; - if (table.hasQualifierEncodingScheme()) { - qualifierEncodingScheme = PTable.QualifierEncodingScheme.fromSerializedValue( - table.getQualifierEncodingScheme().toByteArray()[0]); - } - List columns = Lists.newArrayListWithExpectedSize(table.getColumnsCount()); - for (CDCInfoProtos.CDCColumnDef curColumnProto : table.getColumnsList()) { - columns.add(CDCColumnInfo.createFromProto(curColumnProto)); - } - String includeScopesStr = table.getCdcIncludeScopes(); - Set changeScopeSet; - try { - changeScopeSet = CDCUtil.makeChangeScopeEnumsFromString(includeScopesStr); - } catch (SQLException e) { - throw new RuntimeException(e); - } - TupleProjector dataTableProjector = null; - if (table.hasDataTableProjectorBytes()) { - dataTableProjector = TupleProjector.deserializeProjectorFromBytes( - table.getDataTableProjectorBytes().toByteArray()); - } - return new CDCTableInfo(defaultColumnFamily, columns, changeScopeSet, - qualifierEncodingScheme, table.getCdcJsonColQualBytes().toByteArray(), - dataTableProjector); + public static CDCTableInfo createFromProto(CDCInfoProtos.CDCTableDef table) { + byte[] defaultColumnFamily = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; + if (table.hasDefaultFamilyName()) { + defaultColumnFamily = table.getDefaultFamilyName().toByteArray(); } + // For backward compatibility. Clients older than 4.10 will always have + // non-encoded qualifiers. + PTable.QualifierEncodingScheme qualifierEncodingScheme = + PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; + if (table.hasQualifierEncodingScheme()) { + qualifierEncodingScheme = PTable.QualifierEncodingScheme + .fromSerializedValue(table.getQualifierEncodingScheme().toByteArray()[0]); + } + List columns = Lists.newArrayListWithExpectedSize(table.getColumnsCount()); + for (CDCInfoProtos.CDCColumnDef curColumnProto : table.getColumnsList()) { + columns.add(CDCColumnInfo.createFromProto(curColumnProto)); + } + String includeScopesStr = table.getCdcIncludeScopes(); + Set changeScopeSet; + try { + changeScopeSet = CDCUtil.makeChangeScopeEnumsFromString(includeScopesStr); + } catch (SQLException e) { + throw new RuntimeException(e); + } + TupleProjector dataTableProjector = null; + if (table.hasDataTableProjectorBytes()) { + dataTableProjector = TupleProjector + .deserializeProjectorFromBytes(table.getDataTableProjectorBytes().toByteArray()); + } + return new CDCTableInfo(defaultColumnFamily, columns, changeScopeSet, qualifierEncodingScheme, + table.getCdcJsonColQualBytes().toByteArray(), dataTableProjector); + } - public static CDCInfoProtos.CDCTableDef toProto(StatementContext context) - throws SQLException { - PTable cdcTable = context.getCDCTableRef().getTable(); - PTable dataTable = context.getCDCDataTableRef().getTable(); - CDCInfoProtos.CDCTableDef.Builder builder = CDCInfoProtos.CDCTableDef.newBuilder(); - if (dataTable.getDefaultFamilyName() != null) { - builder.setDefaultFamilyName( - ByteStringer.wrap(dataTable.getDefaultFamilyName().getBytes())); - } - String cdcIncludeScopes = context.getEncodedCdcIncludeScopes(); - if (cdcIncludeScopes != null) { - builder.setCdcIncludeScopes(cdcIncludeScopes); - } - if (dataTable.getEncodingScheme() != null) { - builder.setQualifierEncodingScheme(ByteStringer.wrap( - new byte[] { dataTable.getEncodingScheme().getSerializedMetadataValue() })); - } - for (PColumn column : dataTable.getColumns()) { - if (column.getFamilyName() == null) { - continue; - } - builder.addColumns(CDCColumnInfo.toProto(column)); - } - PColumn cdcJsonCol = cdcTable.getColumnForColumnName(CDC_JSON_COL_NAME); - builder.setCdcJsonColQualBytes(ByteStringer.wrap(cdcJsonCol.getColumnQualifierBytes())); - - TableRef cdcDataTableRef = context.getCDCDataTableRef(); - if (cdcDataTableRef.getTable().isImmutableRows() && - cdcDataTableRef.getTable().getImmutableStorageScheme() == - PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { - List dataColumns = new ArrayList(); - PTable table = cdcDataTableRef.getTable(); - for (PColumn column : table.getColumns()) { - if (!SchemaUtil.isPKColumn(column)) { - dataColumns.add(new ColumnRef(cdcDataTableRef, column.getPosition())); - } - } + public static CDCInfoProtos.CDCTableDef toProto(StatementContext context) throws SQLException { + PTable cdcTable = context.getCDCTableRef().getTable(); + PTable dataTable = context.getCDCDataTableRef().getTable(); + CDCInfoProtos.CDCTableDef.Builder builder = CDCInfoProtos.CDCTableDef.newBuilder(); + if (dataTable.getDefaultFamilyName() != null) { + builder.setDefaultFamilyName(ByteStringer.wrap(dataTable.getDefaultFamilyName().getBytes())); + } + String cdcIncludeScopes = context.getEncodedCdcIncludeScopes(); + if (cdcIncludeScopes != null) { + builder.setCdcIncludeScopes(cdcIncludeScopes); + } + if (dataTable.getEncodingScheme() != null) { + builder.setQualifierEncodingScheme(ByteStringer + .wrap(new byte[] { dataTable.getEncodingScheme().getSerializedMetadataValue() })); + } + for (PColumn column : dataTable.getColumns()) { + if (column.getFamilyName() == null) { + continue; + } + builder.addColumns(CDCColumnInfo.toProto(column)); + } + PColumn cdcJsonCol = cdcTable.getColumnForColumnName(CDC_JSON_COL_NAME); + builder.setCdcJsonColQualBytes(ByteStringer.wrap(cdcJsonCol.getColumnQualifierBytes())); - PTable projectedDataTable = TupleProjectionCompiler.createProjectedTable( - cdcDataTableRef, dataColumns, false);; - TupleProjector dataTableProjector = new TupleProjector(projectedDataTable); - builder.setDataTableProjectorBytes(ByteStringer.wrap( - TupleProjector.serializeProjectorIntoBytes(dataTableProjector))); + TableRef cdcDataTableRef = context.getCDCDataTableRef(); + if ( + cdcDataTableRef.getTable().isImmutableRows() + && cdcDataTableRef.getTable().getImmutableStorageScheme() + == PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS + ) { + List dataColumns = new ArrayList(); + PTable table = cdcDataTableRef.getTable(); + for (PColumn column : table.getColumns()) { + if (!SchemaUtil.isPKColumn(column)) { + dataColumns.add(new ColumnRef(cdcDataTableRef, column.getPosition())); } + } - return builder.build(); + PTable projectedDataTable = + TupleProjectionCompiler.createProjectedTable(cdcDataTableRef, dataColumns, false); + ; + TupleProjector dataTableProjector = new TupleProjector(projectedDataTable); + builder.setDataTableProjectorBytes( + ByteStringer.wrap(TupleProjector.serializeProjectorIntoBytes(dataTableProjector))); } - /** - * CDC Column Def Class - */ - public static class CDCColumnInfo implements Comparable { + return builder.build(); + } - private final byte[] columnFamily; - private final byte[] columnQualifier; - private final String columnName; - private final PDataType columnType; - private final String columnFamilyName; - private String columnDisplayName; + /** + * CDC Column Def Class + */ + public static class CDCColumnInfo implements Comparable { - public CDCColumnInfo(byte[] columnFamily, byte[] columnQualifier, - String columnName, PDataType columnType, - String columnFamilyName) { - this.columnFamily = columnFamily; - this.columnQualifier = columnQualifier; - this.columnName = columnName; - this.columnType = columnType; - this.columnFamilyName = columnFamilyName; - } + private final byte[] columnFamily; + private final byte[] columnQualifier; + private final String columnName; + private final PDataType columnType; + private final String columnFamilyName; + private String columnDisplayName; - public byte[] getColumnFamily() { - return columnFamily; - } + public CDCColumnInfo(byte[] columnFamily, byte[] columnQualifier, String columnName, + PDataType columnType, String columnFamilyName) { + this.columnFamily = columnFamily; + this.columnQualifier = columnQualifier; + this.columnName = columnName; + this.columnType = columnType; + this.columnFamilyName = columnFamilyName; + } - public byte[] getColumnQualifier() { - return columnQualifier; - } + public byte[] getColumnFamily() { + return columnFamily; + } - public String getColumnName() { - return columnName; - } + public byte[] getColumnQualifier() { + return columnQualifier; + } - public PDataType getColumnType() { - return columnType; - } + public String getColumnName() { + return columnName; + } - public String getColumnFamilyName() { - return columnFamilyName; - } + public PDataType getColumnType() { + return columnType; + } - @Override - public int compareTo(CDCColumnInfo columnInfo) { - return CDCUtil.compareCellFamilyAndQualifier(this.getColumnFamily(), - this.getColumnQualifier(), - columnInfo.getColumnFamily(), - columnInfo.getColumnQualifier()); - } + public String getColumnFamilyName() { + return columnFamilyName; + } - public static CDCColumnInfo createFromProto(CDCInfoProtos.CDCColumnDef column) { - String columnName = column.getColumnName(); - byte[] familyNameBytes = column.getFamilyNameBytes().toByteArray(); - PDataType dataType = PDataType.fromSqlTypeName(column.getDataType()); - byte[] columnQualifierBytes = column.getColumnQualifierBytes().toByteArray(); - String columnFamilyName = StandardCharsets.UTF_8 - .decode(ByteBuffer.wrap(familyNameBytes)).toString(); - return new CDCColumnInfo(familyNameBytes, - columnQualifierBytes, columnName, dataType, columnFamilyName); - } + @Override + public int compareTo(CDCColumnInfo columnInfo) { + return CDCUtil.compareCellFamilyAndQualifier(this.getColumnFamily(), + this.getColumnQualifier(), columnInfo.getColumnFamily(), columnInfo.getColumnQualifier()); + } - public static CDCInfoProtos.CDCColumnDef toProto(PColumn column) { - CDCInfoProtos.CDCColumnDef.Builder builder = CDCInfoProtos.CDCColumnDef.newBuilder(); - builder.setColumnName(column.getName().toString()); - if (column.getFamilyName() != null) { - builder.setFamilyNameBytes(ByteStringer.wrap(column.getFamilyName().getBytes())); - } - if (column.getDataType() != null) { - builder.setDataType(column.getDataType().getSqlTypeName()); - } - if (column.getColumnQualifierBytes() != null) { - builder.setColumnQualifierBytes( - ByteStringer.wrap(column.getColumnQualifierBytes())); - } - return builder.build(); - } + public static CDCColumnInfo createFromProto(CDCInfoProtos.CDCColumnDef column) { + String columnName = column.getColumnName(); + byte[] familyNameBytes = column.getFamilyNameBytes().toByteArray(); + PDataType dataType = PDataType.fromSqlTypeName(column.getDataType()); + byte[] columnQualifierBytes = column.getColumnQualifierBytes().toByteArray(); + String columnFamilyName = + StandardCharsets.UTF_8.decode(ByteBuffer.wrap(familyNameBytes)).toString(); + return new CDCColumnInfo(familyNameBytes, columnQualifierBytes, columnName, dataType, + columnFamilyName); + } + + public static CDCInfoProtos.CDCColumnDef toProto(PColumn column) { + CDCInfoProtos.CDCColumnDef.Builder builder = CDCInfoProtos.CDCColumnDef.newBuilder(); + builder.setColumnName(column.getName().toString()); + if (column.getFamilyName() != null) { + builder.setFamilyNameBytes(ByteStringer.wrap(column.getFamilyName().getBytes())); + } + if (column.getDataType() != null) { + builder.setDataType(column.getDataType().getSqlTypeName()); + } + if (column.getColumnQualifierBytes() != null) { + builder.setColumnQualifierBytes(ByteStringer.wrap(column.getColumnQualifierBytes())); + } + return builder.build(); + } - public String getColumnDisplayName(CDCTableInfo tableInfo) { - if (columnDisplayName == null) { - // Don't include Column Family if it is a default column Family - if (Arrays.equals(getColumnFamily(), tableInfo.getDefaultColumnFamily())) { - columnDisplayName = getColumnName(); - } else { - columnDisplayName = getColumnFamilyName() - + NAME_SEPARATOR + getColumnName(); - } - } - return columnDisplayName; + public String getColumnDisplayName(CDCTableInfo tableInfo) { + if (columnDisplayName == null) { + // Don't include Column Family if it is a default column Family + if (Arrays.equals(getColumnFamily(), tableInfo.getDefaultColumnFamily())) { + columnDisplayName = getColumnName(); + } else { + columnDisplayName = getColumnFamilyName() + NAME_SEPARATOR + getColumnName(); } + } + return columnDisplayName; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMaintainer.java index 97a0ca785c6..043a98157f6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMaintainer.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMaintainer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,7 +39,6 @@ import java.util.Map.Entry; import java.util.Set; -import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; @@ -106,14 +105,13 @@ import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarbinaryEncoded; -import org.apache.phoenix.transaction.PhoenixTransactionProvider.Feature; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; import org.apache.phoenix.thirdparty.com.google.common.collect.Iterators; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import org.apache.phoenix.transaction.PhoenixTransactionProvider.Feature; import org.apache.phoenix.util.BitSet; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.CDCUtil; @@ -126,2258 +124,2425 @@ import org.apache.phoenix.util.TransactionUtil; import org.apache.phoenix.util.TrustedByteArrayOutputStream; +import com.google.protobuf.InvalidProtocolBufferException; + /** - * - * Class that builds index row key from data row key and current state of - * row and caches any covered columns. Client-side serializes into byte array using - * #serialize(PTable, ImmutableBytesWritable) - * and transmits to server-side through either the - * {@link org.apache.phoenix.index.PhoenixIndexCodec#INDEX_PROTO_MD} - * Mutation attribute or as a separate RPC call using - * {@link org.apache.phoenix.cache.ServerCacheClient}) - * - * + * Class that builds index row key from data row key and current state of row and caches any covered + * columns. Client-side serializes into byte array using #serialize(PTable, ImmutableBytesWritable) + * and transmits to server-side through either the + * {@link org.apache.phoenix.index.PhoenixIndexCodec#INDEX_PROTO_MD} Mutation attribute or as a + * separate RPC call using {@link org.apache.phoenix.cache.ServerCacheClient}) * @since 2.1.0 */ public class IndexMaintainer implements Writable, Iterable { - private static final int EXPRESSION_NOT_PRESENT = -1; - private static final int ESTIMATED_EXPRESSION_SIZE = 8; - - public static IndexMaintainer create(PTable dataTable, PTable index, - PhoenixConnection connection) throws SQLException { - return create(dataTable, null, index, connection); + private static final int EXPRESSION_NOT_PRESENT = -1; + private static final int ESTIMATED_EXPRESSION_SIZE = 8; + + public static IndexMaintainer create(PTable dataTable, PTable index, PhoenixConnection connection) + throws SQLException { + return create(dataTable, null, index, connection); + } + + public static IndexMaintainer create(PTable dataTable, PTable cdcTable, PTable index, + PhoenixConnection connection) throws SQLException { + if ( + dataTable.getType() == PTableType.INDEX || index.getType() != PTableType.INDEX + || !dataTable.getIndexes().contains(index) + ) { + throw new IllegalArgumentException(); + } + IndexMaintainer maintainer = new IndexMaintainer(dataTable, cdcTable, index, connection); + return maintainer; + } + + /** + * Determines whether the client should send IndexMaintainer for the given Index table. + * @param index PTable for the index table. + * @return True if the client needs to send IndexMaintainer for the given Index. + */ + public static boolean sendIndexMaintainer(PTable index) { + PIndexState indexState = index.getIndexState(); + return !(indexState.isDisabled() || PIndexState.PENDING_ACTIVE == indexState); + } + + public static Iterator maintainedIndexes(Iterator indexes) { + return Iterators.filter(indexes, new Predicate() { + @Override + public boolean apply(PTable index) { + return sendIndexMaintainer(index); + } + }); + } + + public static Iterator maintainedGlobalIndexesWithMatchingStorageScheme( + final PTable dataTable, Iterator indexes) { + return Iterators.filter(indexes, new Predicate() { + @Override + public boolean apply(PTable index) { + return sendIndexMaintainer(index) && IndexUtil.isGlobalIndex(index) + && dataTable.getImmutableStorageScheme() == index.getImmutableStorageScheme(); + } + }); + } + + public static Iterator maintainedLocalOrGlobalIndexesWithoutMatchingStorageScheme( + final PTable dataTable, Iterator indexes) { + return Iterators.filter(indexes, new Predicate() { + @Override + public boolean apply(PTable index) { + return sendIndexMaintainer(index) && ((index.getIndexType() == IndexType.GLOBAL + && dataTable.getImmutableStorageScheme() != index.getImmutableStorageScheme()) + || index.getIndexType() == IndexType.LOCAL); + } + }); + } + + public static Iterator maintainedLocalIndexes(Iterator indexes) { + return Iterators.filter(indexes, new Predicate() { + @Override + public boolean apply(PTable index) { + return sendIndexMaintainer(index) && index.getIndexType() == IndexType.LOCAL; + } + }); + } + + /** + * For client-side to serialize all IndexMaintainers for a given table + * @param dataTable data table + * @param ptr bytes pointer to hold returned serialized value + */ + public static void serialize(PTable dataTable, ImmutableBytesWritable ptr, + PhoenixConnection connection) throws SQLException { + List indexes = dataTable.getIndexes(); + serializeServerMaintainedIndexes(dataTable, ptr, indexes, connection); + } + + public static void serializeServerMaintainedIndexes(PTable dataTable, ImmutableBytesWritable ptr, + List indexes, PhoenixConnection connection) throws SQLException { + Iterator indexesItr = Collections.emptyListIterator(); + boolean onlyLocalIndexes = dataTable.isImmutableRows() || dataTable.isTransactional(); + if (onlyLocalIndexes) { + if ( + !dataTable.isTransactional() || !dataTable.getTransactionProvider().getTransactionProvider() + .isUnsupported(Feature.MAINTAIN_LOCAL_INDEX_ON_SERVER) + ) { + indexesItr = + maintainedLocalOrGlobalIndexesWithoutMatchingStorageScheme(dataTable, indexes.iterator()); + } + } else { + indexesItr = maintainedIndexes(indexes.iterator()); + } + + serialize(dataTable, ptr, Lists.newArrayList(indexesItr), connection); + } + + /** + * For client-side to serialize all IndexMaintainers for a given table + * @param dataTable data table + * @param ptr bytes pointer to hold returned serialized value + * @param indexes indexes to serialize + */ + public static void serialize(PTable dataTable, ImmutableBytesWritable ptr, List indexes, + PhoenixConnection connection) throws SQLException { + if (indexes.isEmpty() && dataTable.getTransformingNewTable() == null) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return; + } + int nIndexes = indexes.size(); + if (dataTable.getTransformingNewTable() != null) { + // If the transforming new table is in CREATE_DISABLE state, the mutations don't go into the + // table. + boolean disabled = dataTable.getTransformingNewTable().isIndexStateDisabled(); + if (disabled && nIndexes == 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return; + } + if (!disabled) { + nIndexes++; + } } - - public static IndexMaintainer create(PTable dataTable, PTable cdcTable, PTable index, - PhoenixConnection connection) throws SQLException { - if (dataTable.getType() == PTableType.INDEX || index.getType() != PTableType.INDEX || !dataTable.getIndexes().contains(index)) { - throw new IllegalArgumentException(); + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + DataOutputStream output = new DataOutputStream(stream); + try { + // Encode data table salting in sign of number of indexes + WritableUtils.writeVInt(output, nIndexes * (dataTable.getBucketNum() == null ? 1 : -1)); + // Write out data row key schema once, since it's the same for all index maintainers + dataTable.getRowKeySchema().write(output); + for (PTable index : indexes) { + org.apache.phoenix.coprocessor.generated.ServerCachingProtos.IndexMaintainer proto = + IndexMaintainer.toProto(index.getIndexMaintainer(dataTable, connection)); + byte[] protoBytes = proto.toByteArray(); + WritableUtils.writeVInt(output, protoBytes.length); + output.write(protoBytes); + } + if (dataTable.getTransformingNewTable() != null) { + // We're not serializing the TransformMaintainer if the new transformed table is disabled + boolean disabled = dataTable.getTransformingNewTable().isIndexStateDisabled(); + if (!disabled) { + ServerCachingProtos.TransformMaintainer proto = TransformMaintainer.toProto( + dataTable.getTransformingNewTable().getTransformMaintainer(dataTable, connection)); + byte[] protoBytes = proto.toByteArray(); + WritableUtils.writeVInt(output, protoBytes.length); + output.write(protoBytes); } - IndexMaintainer maintainer = new IndexMaintainer(dataTable, cdcTable, index, connection); - return maintainer; + } + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } + ptr.set(stream.toByteArray(), 0, stream.size()); + } + + /** + * For client-side to append serialized IndexMaintainers of keyValueIndexes + * @param table data table + * @param indexMetaDataPtr bytes pointer to hold returned serialized value + * @param keyValueIndexes indexes to serialize + */ + public static void serializeAdditional(PTable table, ImmutableBytesWritable indexMetaDataPtr, + List keyValueIndexes, PhoenixConnection connection) throws SQLException { + int nMutableIndexes = + indexMetaDataPtr.getLength() == 0 ? 0 : ByteUtil.vintFromBytes(indexMetaDataPtr); + int nIndexes = nMutableIndexes + keyValueIndexes.size(); + int estimatedSize = indexMetaDataPtr.getLength() + 1; // Just in case new size increases buffer + if (indexMetaDataPtr.getLength() == 0) { + estimatedSize += table.getRowKeySchema().getEstimatedByteSize(); + } + for (PTable index : keyValueIndexes) { + estimatedSize += index.getIndexMaintainer(table, connection).getEstimatedByteSize(); + } + TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedSize + 1); + DataOutput output = new DataOutputStream(stream); + try { + // Encode data table salting in sign of number of indexes + WritableUtils.writeVInt(output, nIndexes * (table.getBucketNum() == null ? 1 : -1)); + // Serialize current mutable indexes, subtracting the vint size from the length + // as its still included + if (indexMetaDataPtr.getLength() > 0) { + output.write(indexMetaDataPtr.get(), indexMetaDataPtr.getOffset(), + indexMetaDataPtr.getLength() - WritableUtils.getVIntSize(nMutableIndexes)); + } else { + table.getRowKeySchema().write(output); + } + // Serialize mutable indexes afterwards + for (PTable index : keyValueIndexes) { + IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); + byte[] protoBytes = IndexMaintainer.toProto(maintainer).toByteArray(); + WritableUtils.writeVInt(output, protoBytes.length); + output.write(protoBytes); + } + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } + indexMetaDataPtr.set(stream.getBuffer(), 0, stream.size()); + } + + public static List deserialize(ImmutableBytesWritable metaDataPtr, + KeyValueBuilder builder, boolean useProtoForIndexMaintainer) { + return deserialize(metaDataPtr.get(), metaDataPtr.getOffset(), metaDataPtr.getLength(), + useProtoForIndexMaintainer); + } + + public static List deserialize(byte[] buf, boolean useProtoForIndexMaintainer) { + return deserialize(buf, 0, buf.length, useProtoForIndexMaintainer); + } + + private static List deserialize(byte[] buf, int offset, int length, + boolean useProtoForIndexMaintainer) { + List maintainers = Collections.emptyList(); + if (length > 0) { + ByteArrayInputStream stream = new ByteArrayInputStream(buf, offset, length); + DataInput input = new DataInputStream(stream); + try { + int size = WritableUtils.readVInt(input); + boolean isDataTableSalted = size < 0; + size = Math.abs(size); + RowKeySchema rowKeySchema = new RowKeySchema(); + rowKeySchema.readFields(input); + maintainers = Lists.newArrayListWithExpectedSize(size); + for (int i = 0; i < size; i++) { + if (useProtoForIndexMaintainer) { + int protoSize = WritableUtils.readVInt(input); + byte[] b = new byte[protoSize]; + input.readFully(b); + try { + org.apache.phoenix.coprocessor.generated.ServerCachingProtos.IndexMaintainer proto = + ServerCachingProtos.IndexMaintainer.parseFrom(b); + maintainers.add(IndexMaintainer.fromProto(proto, rowKeySchema, isDataTableSalted)); + } catch (InvalidProtocolBufferException e) { + org.apache.phoenix.coprocessor.generated.ServerCachingProtos.TransformMaintainer proto = + ServerCachingProtos.TransformMaintainer.parseFrom(b); + maintainers + .add(TransformMaintainer.fromProto(proto, rowKeySchema, isDataTableSalted)); + } + } else { + IndexMaintainer maintainer = new IndexMaintainer(rowKeySchema, isDataTableSalted); + maintainer.readFields(input); + maintainers.add(maintainer); + } + } + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } } + return maintainers; + } - /** - * Determines whether the client should send IndexMaintainer for the given Index table. - * - * @param index PTable for the index table. - * @return True if the client needs to send IndexMaintainer for the given Index. - */ - public static boolean sendIndexMaintainer(PTable index) { - PIndexState indexState = index.getIndexState(); - return ! ( indexState.isDisabled() || PIndexState.PENDING_ACTIVE == indexState ); + public static IndexMaintainer getIndexMaintainer(List maintainers, + byte[] indexTableName) { + Iterator maintainerIterator = maintainers.iterator(); + while (maintainerIterator.hasNext()) { + IndexMaintainer maintainer = maintainerIterator.next(); + if (Bytes.compareTo(indexTableName, maintainer.getIndexTableName()) == 0) { + return maintainer; + } } - - public static Iterator maintainedIndexes(Iterator indexes) { - return Iterators.filter(indexes, new Predicate() { - @Override - public boolean apply(PTable index) { - return sendIndexMaintainer(index); - } - }); - } - - public static Iterator maintainedGlobalIndexesWithMatchingStorageScheme(final PTable dataTable, Iterator indexes) { - return Iterators.filter(indexes, new Predicate() { - @Override - public boolean apply(PTable index) { - return sendIndexMaintainer(index) && IndexUtil.isGlobalIndex(index) - && dataTable.getImmutableStorageScheme() == index.getImmutableStorageScheme(); - } - }); + return null; + } + + private byte[] viewIndexId; + private PDataType viewIndexIdType; + private boolean isMultiTenant; + private PTableType parentTableType; + // indexed expressions that are not present in the row key of the data table, the expression can + // also refer to a regular column + private List indexedExpressions; + // columns required to evaluate all expressions in indexedExpressions (this does not include + // columns in the data row key) + private Set indexedColumns; + + // columns required to create index row i.e. indexedColumns + coveredColumns (this does not + // include columns in the data row key) + private Set allColumns; + // TODO remove this in the next major release + private List indexedColumnTypes; + private int indexDataColumnCount; + private RowKeyMetaData rowKeyMetaData; + private byte[] indexTableName; + private int nIndexSaltBuckets; + private int nDataTableSaltBuckets; + private byte[] dataEmptyKeyValueCF; + private ImmutableBytesPtr emptyKeyValueCFPtr; + private int nDataCFs; + private boolean indexWALDisabled; + private boolean isLocalIndex; + private boolean immutableRows; + // Transient state + private final boolean isDataTableSalted; + private final RowKeySchema dataRowKeySchema; + + private int estimatedIndexRowKeyBytes; + private int estimatedExpressionSize; + private int[] dataPkPosition; + private int maxTrailingNulls; + private ColumnReference indexEmptyKeyValueRef; + private ColumnReference dataEmptyKeyValueRef; + private boolean rowKeyOrderOptimizable; + + /**** START: New member variables added in 4.10 *****/ + private QualifierEncodingScheme encodingScheme; + private ImmutableStorageScheme immutableStorageScheme; + private QualifierEncodingScheme dataEncodingScheme; + private ImmutableStorageScheme dataImmutableStorageScheme; + /* + * Information for columns of data tables that are being indexed. The first part of the pair is + * column family name and second part is the column name. The reason we need to track this state + * is because for certain storage schemes like + * ImmutableStorageScheme#SINGLE_CELL_ARRAY_WITH_OFFSETS, the column for which we need to generate + * an index table put/delete is different from the columns that are indexed in the phoenix schema. + * This information helps us determine whether or not certain operations like DROP COLUMN should + * impact the index. + */ + private Set> indexedColumnsInfo; + /* + * Map of covered columns where a key is column reference for a column in the data table and value + * is column reference for corresponding column in the index table. + */ + private Map coveredColumnsMap; + /**** END: New member variables added in 4.10 *****/ + + // **** START: New member variables added in 4.16 ****/ + private String logicalIndexName; + + private boolean isUncovered; + private Expression indexWhere; + private Set indexWhereColumns; + private boolean isCDCIndex; + + protected IndexMaintainer(RowKeySchema dataRowKeySchema, boolean isDataTableSalted) { + this.dataRowKeySchema = dataRowKeySchema; + this.isDataTableSalted = isDataTableSalted; + } + + private IndexMaintainer(final PTable dataTable, final PTable index, PhoenixConnection connection) + throws SQLException { + this(dataTable, null, index, connection); + } + + private IndexMaintainer(final PTable dataTable, final PTable cdcTable, final PTable index, + PhoenixConnection connection) throws SQLException { + this(dataTable.getRowKeySchema(), dataTable.getBucketNum() != null); + this.rowKeyOrderOptimizable = index.rowKeyOrderOptimizable(); + this.isMultiTenant = dataTable.isMultiTenant(); + this.viewIndexId = index.getViewIndexId() == null + ? null + : index.getviewIndexIdType().toBytes(index.getViewIndexId()); + this.viewIndexIdType = index.getviewIndexIdType(); + this.isLocalIndex = index.getIndexType() == IndexType.LOCAL; + this.isUncovered = index.getIndexType() == IndexType.UNCOVERED_GLOBAL; + this.encodingScheme = index.getEncodingScheme(); + this.isCDCIndex = CDCUtil.isCDCIndex(index); + + // null check for b/w compatibility + this.encodingScheme = index.getEncodingScheme() == null + ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + : index.getEncodingScheme(); + this.immutableStorageScheme = index.getImmutableStorageScheme() == null + ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN + : index.getImmutableStorageScheme(); + this.dataEncodingScheme = dataTable.getEncodingScheme() == null + ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + : dataTable.getEncodingScheme(); + this.dataImmutableStorageScheme = dataTable.getImmutableStorageScheme() == null + ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN + : dataTable.getImmutableStorageScheme(); + this.nDataTableSaltBuckets = isDataTableSalted ? dataTable.getBucketNum() : PTable.NO_SALTING; + + byte[] indexTableName = index.getPhysicalName().getBytes(); + // Use this for the nDataSaltBuckets as we need this for local indexes + // TODO: persist nDataSaltBuckets separately, but maintain b/w compat. + Integer nIndexSaltBuckets = isLocalIndex ? dataTable.getBucketNum() : index.getBucketNum(); + boolean indexWALDisabled = index.isWALDisabled(); + int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (this.isMultiTenant ? 1 : 0) + + (this.viewIndexId == null ? 0 : 1); + int nIndexColumns = index.getColumns().size() - indexPosOffset; + int nIndexPKColumns = index.getPKColumns().size() - indexPosOffset; + // number of expressions that are indexed that are not present in the row key of the data table + int indexedExpressionCount = 0; + for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) { + PColumn indexColumn = index.getPKColumns().get(i); + String indexColumnName = indexColumn.getName().getString(); + String dataFamilyName = IndexUtil.getDataColumnFamilyName(indexColumnName); + String dataColumnName = IndexUtil.getDataColumnName(indexColumnName); + try { + PColumn dataColumn = dataFamilyName.equals("") + ? dataTable.getColumnForColumnName(dataColumnName) + : dataTable.getColumnFamily(dataFamilyName).getPColumnForColumnName(dataColumnName); + if (SchemaUtil.isPKColumn(dataColumn)) continue; + } catch (ColumnNotFoundException e) { + // This column must be an expression + } catch (Exception e) { + throw new IllegalArgumentException(e); + } + indexedExpressionCount++; + } + + int dataPosOffset = (isDataTableSalted ? 1 : 0) + (this.isMultiTenant ? 1 : 0); + + // For indexes on views, we need to remember which data columns are "constants" + // These are the values in a VIEW where clause. For these, we don't put them in the + // index, as they're the same for every row in the index. The data table can be + // either a VIEW or PROJECTED + List dataPKColumns = dataTable.getPKColumns(); + this.indexDataColumnCount = dataPKColumns.size(); + PTable parentTable = dataTable; + // We need to get the PK column for the table on which the index is created + if ( + !dataTable.getName() + .equals(cdcTable != null ? cdcTable.getParentName() : index.getParentName()) + ) { + try { + String tenantId = (index.getTenantId() != null) ? index.getTenantId().getString() : null; + parentTable = connection.getTable(tenantId, index.getParentName().getString()); + this.indexDataColumnCount = parentTable.getPKColumns().size(); + } catch (SQLException e) { + throw new RuntimeException(e); + } } + this.parentTableType = parentTable.getType(); - public static Iterator maintainedLocalOrGlobalIndexesWithoutMatchingStorageScheme(final PTable dataTable, Iterator indexes) { - return Iterators.filter(indexes, new Predicate() { - @Override - public boolean apply(PTable index) { - return sendIndexMaintainer(index) && ((index.getIndexType() == IndexType.GLOBAL - && dataTable.getImmutableStorageScheme() != index.getImmutableStorageScheme()) - || index.getIndexType() == IndexType.LOCAL); - } - }); - } + int indexPkColumnCount = this.indexDataColumnCount + indexedExpressionCount + - (this.isDataTableSalted ? 1 : 0) - (this.isMultiTenant ? 1 : 0); + this.rowKeyMetaData = newRowKeyMetaData(indexPkColumnCount); + BitSet bitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); - public static Iterator maintainedLocalIndexes(Iterator indexes) { - return Iterators.filter(indexes, new Predicate() { - @Override - public boolean apply(PTable index) { - return sendIndexMaintainer(index) && index.getIndexType() == IndexType.LOCAL; - } - }); - } - - /** - * For client-side to serialize all IndexMaintainers for a given table - * @param dataTable data table - * @param ptr bytes pointer to hold returned serialized value - */ - public static void serialize(PTable dataTable, ImmutableBytesWritable ptr, - PhoenixConnection connection) throws SQLException { - List indexes = dataTable.getIndexes(); - serializeServerMaintainedIndexes(dataTable, ptr, indexes, connection); - } - - public static void serializeServerMaintainedIndexes(PTable dataTable, ImmutableBytesWritable ptr, - List indexes, PhoenixConnection connection) throws SQLException { - Iterator indexesItr = Collections.emptyListIterator(); - boolean onlyLocalIndexes = dataTable.isImmutableRows() || dataTable.isTransactional(); - if (onlyLocalIndexes) { - if (!dataTable.isTransactional() - || !dataTable.getTransactionProvider().getTransactionProvider().isUnsupported(Feature.MAINTAIN_LOCAL_INDEX_ON_SERVER)) { - indexesItr = maintainedLocalOrGlobalIndexesWithoutMatchingStorageScheme(dataTable, indexes.iterator()); - } + int nDataPKColumns = this.indexDataColumnCount - dataPosOffset; + for (int i = dataPosOffset; i < dataPKColumns.size(); i++) { + PColumn dataPKColumn = dataPKColumns.get(i); + if (dataPKColumn.getViewConstant() != null) { + bitSet.set(i); + nDataPKColumns--; + } + } + this.indexTableName = indexTableName; + this.indexedColumnTypes = + Lists. newArrayListWithExpectedSize(nIndexPKColumns - nDataPKColumns); + this.indexedExpressions = Lists.newArrayListWithExpectedSize(nIndexPKColumns - nDataPKColumns); + this.coveredColumnsMap = Maps.newHashMapWithExpectedSize(nIndexColumns - nIndexPKColumns); + this.nIndexSaltBuckets = nIndexSaltBuckets == null ? PTable.NO_SALTING : nIndexSaltBuckets; + this.dataEmptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(dataTable); + this.emptyKeyValueCFPtr = SchemaUtil.getEmptyColumnFamilyPtr(index); + this.nDataCFs = dataTable.getColumnFamilies().size(); + this.indexWALDisabled = indexWALDisabled; + // TODO: check whether index is immutable or not. Currently it's always false so checking + // data table is with immutable rows or not. + this.immutableRows = dataTable.isImmutableRows(); + int indexColByteSize = 0; + ColumnResolver resolver = null; + List parseNodes = new ArrayList(1); + UDFParseNodeVisitor visitor = new UDFParseNodeVisitor(); + for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) { + PColumn indexColumn = index.getPKColumns().get(i); + String expressionStr = IndexUtil.getIndexColumnExpressionStr(indexColumn); + try { + ParseNode parseNode = SQLParser.parseCondition(expressionStr); + parseNode.accept(visitor); + parseNodes.add(parseNode); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + try { + resolver = + FromCompiler.getResolver(connection, new TableRef(dataTable), visitor.getUdfParseNodes()); + } catch (SQLException e) { + throw new RuntimeException(e); // Impossible + } + StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver); + this.indexedColumnsInfo = Sets.newHashSetWithExpectedSize(nIndexColumns - nIndexPKColumns); + + IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context); + for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) { + PColumn indexColumn = index.getPKColumns().get(i); + int indexPos = i - indexPosOffset; + Expression expression = null; + try { + expressionIndexCompiler.reset(); + expression = parseNodes.get(indexPos).accept(expressionIndexCompiler); + } catch (SQLException e) { + throw new RuntimeException(e); // Impossible + } + if (expressionIndexCompiler.getColumnRef() != null) { + // get the column of the data column that corresponds to this index column + PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString()); + boolean isPKColumn = SchemaUtil.isPKColumn(column); + if (isPKColumn) { + int dataPkPos = dataTable.getPKColumns().indexOf(column) + - (dataTable.getBucketNum() == null ? 0 : 1) - (this.isMultiTenant ? 1 : 0); + this.rowKeyMetaData.setIndexPkPosition(dataPkPos, indexPos); + indexedColumnsInfo.add(new Pair<>((String) null, column.getName().getString())); } else { - indexesItr = maintainedIndexes(indexes.iterator()); - } - - serialize(dataTable, ptr, Lists.newArrayList(indexesItr), connection); - } - /** - * For client-side to serialize all IndexMaintainers for a given table - * @param dataTable data table - * @param ptr bytes pointer to hold returned serialized value - * @param indexes indexes to serialize - */ - public static void serialize(PTable dataTable, ImmutableBytesWritable ptr, - List indexes, PhoenixConnection connection) throws SQLException { - if (indexes.isEmpty() && dataTable.getTransformingNewTable() == null) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return; - } - int nIndexes = indexes.size(); - if (dataTable.getTransformingNewTable() != null) { - // If the transforming new table is in CREATE_DISABLE state, the mutations don't go into the table. - boolean disabled = dataTable.getTransformingNewTable().isIndexStateDisabled(); - if (disabled && nIndexes == 0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return; - } - if (!disabled) { - nIndexes++; - } - } - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - DataOutputStream output = new DataOutputStream(stream); - try { - // Encode data table salting in sign of number of indexes - WritableUtils.writeVInt(output, nIndexes * (dataTable.getBucketNum() == null ? 1 : -1)); - // Write out data row key schema once, since it's the same for all index maintainers - dataTable.getRowKeySchema().write(output); - for (PTable index : indexes) { - org.apache.phoenix.coprocessor.generated.ServerCachingProtos.IndexMaintainer proto = IndexMaintainer.toProto(index.getIndexMaintainer(dataTable, connection)); - byte[] protoBytes = proto.toByteArray(); - WritableUtils.writeVInt(output, protoBytes.length); - output.write(protoBytes); - } - if (dataTable.getTransformingNewTable() != null) { - // We're not serializing the TransformMaintainer if the new transformed table is disabled - boolean disabled = dataTable.getTransformingNewTable().isIndexStateDisabled(); - if (!disabled) { - ServerCachingProtos.TransformMaintainer proto = TransformMaintainer.toProto( - dataTable.getTransformingNewTable().getTransformMaintainer(dataTable, connection)); - byte[] protoBytes = proto.toByteArray(); - WritableUtils.writeVInt(output, protoBytes.length); - output.write(protoBytes); - } - } - } catch (IOException e) { + indexColByteSize += column.getDataType().isFixedWidth() + ? SchemaUtil.getFixedByteSize(column) + : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE; + try { + // Surround constant with cast so that we can still know the original type. Otherwise, + // if we lose the type, + // (for example when VARCHAR becomes CHAR), it can lead to problems in the type + // translation we do between data tables and indexes. + if (column.isNullable() && ExpressionUtil.isConstant(expression)) { + expression = CoerceExpression.create(expression, indexColumn.getDataType()); + } + this.indexedExpressions.add(expression); + indexedColumnsInfo + .add(new Pair<>(column.getFamilyName().getString(), column.getName().getString())); + } catch (SQLException e) { throw new RuntimeException(e); // Impossible - } - ptr.set(stream.toByteArray(), 0, stream.size()); - } - - /** - * For client-side to append serialized IndexMaintainers of keyValueIndexes - * @param table data table - * @param indexMetaDataPtr bytes pointer to hold returned serialized value - * @param keyValueIndexes indexes to serialize - */ - public static void serializeAdditional(PTable table, ImmutableBytesWritable indexMetaDataPtr, - List keyValueIndexes, PhoenixConnection connection) throws SQLException { - int nMutableIndexes = indexMetaDataPtr.getLength() == 0 ? 0 : ByteUtil.vintFromBytes(indexMetaDataPtr); - int nIndexes = nMutableIndexes + keyValueIndexes.size(); - int estimatedSize = indexMetaDataPtr.getLength() + 1; // Just in case new size increases buffer - if (indexMetaDataPtr.getLength() == 0) { - estimatedSize += table.getRowKeySchema().getEstimatedByteSize(); - } - for (PTable index : keyValueIndexes) { - estimatedSize += index.getIndexMaintainer(table, connection).getEstimatedByteSize(); - } - TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedSize + 1); - DataOutput output = new DataOutputStream(stream); - try { - // Encode data table salting in sign of number of indexes - WritableUtils.writeVInt(output, nIndexes * (table.getBucketNum() == null ? 1 : -1)); - // Serialize current mutable indexes, subtracting the vint size from the length - // as its still included - if (indexMetaDataPtr.getLength() > 0) { - output.write(indexMetaDataPtr.get(), indexMetaDataPtr.getOffset(), indexMetaDataPtr.getLength()-WritableUtils.getVIntSize(nMutableIndexes)); + } + } + } else { + indexColByteSize += expression.getDataType().isFixedWidth() + ? SchemaUtil.getFixedByteSize(expression) + : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE; + this.indexedExpressions.add(expression); + KeyValueExpressionVisitor kvVisitor = new KeyValueExpressionVisitor() { + @Override + public Void visit(KeyValueColumnExpression colExpression) { + return addDataColInfo(dataTable, colExpression); + } + + @Override + public Void visit(SingleCellColumnExpression expression) { + return addDataColInfo(dataTable, expression); + } + + private Void addDataColInfo(final PTable dataTable, Expression expression) { + Preconditions.checkArgument(expression instanceof SingleCellColumnExpression + || expression instanceof KeyValueColumnExpression); + + KeyValueColumnExpression colExpression = null; + if (expression instanceof SingleCellColumnExpression) { + colExpression = ((SingleCellColumnExpression) expression).getKeyValueExpression(); } else { - table.getRowKeySchema().write(output); + colExpression = ((KeyValueColumnExpression) expression); } - // Serialize mutable indexes afterwards - for (PTable index : keyValueIndexes) { - IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); - byte[] protoBytes = IndexMaintainer.toProto(maintainer).toByteArray(); - WritableUtils.writeVInt(output, protoBytes.length); - output.write(protoBytes); - } - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } - indexMetaDataPtr.set(stream.getBuffer(), 0, stream.size()); - } - - public static List deserialize(ImmutableBytesWritable metaDataPtr, - KeyValueBuilder builder, boolean useProtoForIndexMaintainer) { - return deserialize(metaDataPtr.get(), metaDataPtr.getOffset(), metaDataPtr.getLength(), useProtoForIndexMaintainer); - } - - public static List deserialize(byte[] buf, boolean useProtoForIndexMaintainer) { - return deserialize(buf, 0, buf.length, useProtoForIndexMaintainer); - } - - private static List deserialize(byte[] buf, int offset, int length, boolean useProtoForIndexMaintainer) { - List maintainers = Collections.emptyList(); - if (length > 0) { - ByteArrayInputStream stream = new ByteArrayInputStream(buf, offset, length); - DataInput input = new DataInputStream(stream); + byte[] cf = colExpression.getColumnFamily(); + byte[] cq = colExpression.getColumnQualifier(); try { - int size = WritableUtils.readVInt(input); - boolean isDataTableSalted = size < 0; - size = Math.abs(size); - RowKeySchema rowKeySchema = new RowKeySchema(); - rowKeySchema.readFields(input); - maintainers = Lists.newArrayListWithExpectedSize(size); - for (int i = 0; i < size; i++) { - if (useProtoForIndexMaintainer) { - int protoSize = WritableUtils.readVInt(input); - byte[] b = new byte[protoSize]; - input.readFully(b); - try { - org.apache.phoenix.coprocessor.generated.ServerCachingProtos.IndexMaintainer proto = ServerCachingProtos.IndexMaintainer.parseFrom(b); - maintainers.add(IndexMaintainer.fromProto(proto, rowKeySchema, isDataTableSalted)); - } catch (InvalidProtocolBufferException e) { - org.apache.phoenix.coprocessor.generated.ServerCachingProtos.TransformMaintainer proto = ServerCachingProtos.TransformMaintainer.parseFrom(b); - maintainers.add(TransformMaintainer.fromProto(proto, rowKeySchema, isDataTableSalted)); - } - } else { - IndexMaintainer maintainer = new IndexMaintainer(rowKeySchema, isDataTableSalted); - maintainer.readFields(input); - maintainers.add(maintainer); - } + PColumn dataColumn = cf == null + ? dataTable.getColumnForColumnQualifier(null, cq) + : dataTable.getColumnFamily(cf).getPColumnForColumnQualifier(cq); + if (dataColumn == null) { + if ( + Bytes.compareTo(cf, dataEmptyKeyValueCF) == 0 && Bytes.compareTo(cq, + EncodedColumnsUtil.getEmptyKeyValueInfo(dataEncodingScheme).getFirst()) == 0 + ) { + return null; + } else { + throw new ColumnNotFoundException(dataTable.getSchemaName().getString(), + dataTable.getTableName().getString(), Bytes.toString(cf), Bytes.toString(cq)); } - } catch (IOException e) { - throw new RuntimeException(e); // Impossible + } else { + indexedColumnsInfo.add(new Pair<>(dataColumn.getFamilyName().getString(), + dataColumn.getName().getString())); + } + } catch (ColumnNotFoundException | ColumnFamilyNotFoundException + | AmbiguousColumnException e) { + if (dataTable.hasOnlyPkColumns()) { + return null; + } + throw new RuntimeException(e); } + return null; + } + + }; + expression.accept(kvVisitor); + } + // set the sort order of the expression correctly + if (indexColumn.getSortOrder() == SortOrder.DESC) { + this.rowKeyMetaData.getDescIndexColumnBitSet().set(indexPos); + } + } + this.estimatedExpressionSize = + expressionIndexCompiler.getTotalNodeCount() * ESTIMATED_EXPRESSION_SIZE; + for (int i = 0; i < index.getColumnFamilies().size(); i++) { + PColumnFamily family = index.getColumnFamilies().get(i); + for (PColumn indexColumn : family.getColumns()) { + PColumn dataColumn = + IndexUtil.getDataColumnOrNull(dataTable, indexColumn.getName().getString()); + // This can happen during deletion where we don't need covered columns + if (dataColumn != null) { + byte[] dataColumnCq = dataColumn.getColumnQualifierBytes(); + byte[] indexColumnCq = indexColumn.getColumnQualifierBytes(); + this.coveredColumnsMap.put( + new ColumnReference(dataColumn.getFamilyName().getBytes(), dataColumnCq), + new ColumnReference(indexColumn.getFamilyName().getBytes(), indexColumnCq)); } - return maintainers; + } } + this.estimatedIndexRowKeyBytes = estimateIndexRowKeyByteSize(indexColByteSize); + this.logicalIndexName = index.getName().getString(); + if (index.getIndexWhere() != null) { + this.indexWhere = index.getIndexWhereExpression(connection); + this.indexWhereColumns = index.getIndexWhereColumns(connection); + } + + initCachedState(); + } + + public void setDataImmutableStorageScheme(ImmutableStorageScheme sc) { + this.dataImmutableStorageScheme = sc; + } + + public void setDataEncodingScheme(QualifierEncodingScheme sc) { + this.dataEncodingScheme = sc; + } + + public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, + byte[] regionStartKey, byte[] regionEndKey, long ts) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean prependRegionStartKey = isLocalIndex && regionStartKey != null; + boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0; + int prefixKeyLength = prependRegionStartKey + ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) + : 0; + TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream( + estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0)); + DataOutput output = new DataOutputStream(stream); + + try { + // For local indexes, we must prepend the row key with the start region key + if (prependRegionStartKey) { + if (regionStartKey.length == 0) { + output.write(new byte[prefixKeyLength]); + } else { + output.write(regionStartKey); + } + } + if (isIndexSalted) { + output.write(0); // will be set at end to index salt byte + } + // The dataRowKeySchema includes the salt byte field, + // so we must adjust for that here. + int dataPosOffset = isDataTableSalted ? 1 : 0; + BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); + int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants(); + int[][] dataRowKeyLocator = new int[2][nIndexedColumns]; + // Skip data table salt byte + int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength(); + dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset); + + if (viewIndexId != null) { + output.write(viewIndexId); + } - public static IndexMaintainer getIndexMaintainer(List maintainers, byte[] indexTableName) { - Iterator maintainerIterator = maintainers.iterator(); - while (maintainerIterator.hasNext()) { - IndexMaintainer maintainer = maintainerIterator.next(); - if (Bytes.compareTo(indexTableName, maintainer.getIndexTableName()) == 0) { - return maintainer; - } + if (isMultiTenant) { + dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset); + output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); + if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { + output.write(SchemaUtil.getSeparatorBytes( + dataRowKeySchema.getField(dataPosOffset).getDataType(), rowKeyOrderOptimizable, + ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset).getSortOrder())); } - return null; - } - - private byte[] viewIndexId; - private PDataType viewIndexIdType; - private boolean isMultiTenant; - private PTableType parentTableType; - // indexed expressions that are not present in the row key of the data table, the expression can also refer to a regular column - private List indexedExpressions; - // columns required to evaluate all expressions in indexedExpressions (this does not include columns in the data row key) - private Set indexedColumns; - - // columns required to create index row i.e. indexedColumns + coveredColumns (this does not include columns in the data row key) - private Set allColumns; - // TODO remove this in the next major release - private List indexedColumnTypes; - private int indexDataColumnCount; - private RowKeyMetaData rowKeyMetaData; - private byte[] indexTableName; - private int nIndexSaltBuckets; - private int nDataTableSaltBuckets; - private byte[] dataEmptyKeyValueCF; - private ImmutableBytesPtr emptyKeyValueCFPtr; - private int nDataCFs; - private boolean indexWALDisabled; - private boolean isLocalIndex; - private boolean immutableRows; - // Transient state - private final boolean isDataTableSalted; - private final RowKeySchema dataRowKeySchema; - - private int estimatedIndexRowKeyBytes; - private int estimatedExpressionSize; - private int[] dataPkPosition; - private int maxTrailingNulls; - private ColumnReference indexEmptyKeyValueRef; - private ColumnReference dataEmptyKeyValueRef; - private boolean rowKeyOrderOptimizable; - - /**** START: New member variables added in 4.10 *****/ - private QualifierEncodingScheme encodingScheme; - private ImmutableStorageScheme immutableStorageScheme; - private QualifierEncodingScheme dataEncodingScheme; - private ImmutableStorageScheme dataImmutableStorageScheme; - /* - * Information for columns of data tables that are being indexed. The first part of the pair is column family name - * and second part is the column name. The reason we need to track this state is because for certain storage schemes - * like ImmutableStorageScheme#SINGLE_CELL_ARRAY_WITH_OFFSETS, the column for which we need to generate an index - * table put/delete is different from the columns that are indexed in the phoenix schema. This information helps us - * determine whether or not certain operations like DROP COLUMN should impact the index. - */ - private Set> indexedColumnsInfo; - /* - * Map of covered columns where a key is column reference for a column in the data table - * and value is column reference for corresponding column in the index table. - */ - private Map coveredColumnsMap; - /**** END: New member variables added in 4.10 *****/ - - //**** START: New member variables added in 4.16 ****/ - private String logicalIndexName; - - private boolean isUncovered; - private Expression indexWhere; - private Set indexWhereColumns; - private boolean isCDCIndex; - - protected IndexMaintainer(RowKeySchema dataRowKeySchema, boolean isDataTableSalted) { - this.dataRowKeySchema = dataRowKeySchema; - this.isDataTableSalted = isDataTableSalted; - } - - private IndexMaintainer(final PTable dataTable, final PTable index, - PhoenixConnection connection) throws SQLException { - this(dataTable, null, index, connection); - } - - private IndexMaintainer(final PTable dataTable, final PTable cdcTable, final PTable index, - PhoenixConnection connection) throws SQLException { - this(dataTable.getRowKeySchema(), dataTable.getBucketNum() != null); - this.rowKeyOrderOptimizable = index.rowKeyOrderOptimizable(); - this.isMultiTenant = dataTable.isMultiTenant(); - this.viewIndexId = index.getViewIndexId() == null ? null : index.getviewIndexIdType().toBytes(index.getViewIndexId()); - this.viewIndexIdType = index.getviewIndexIdType(); - this.isLocalIndex = index.getIndexType() == IndexType.LOCAL; - this.isUncovered = index.getIndexType() == IndexType.UNCOVERED_GLOBAL; - this.encodingScheme = index.getEncodingScheme(); - this.isCDCIndex = CDCUtil.isCDCIndex(index); - - // null check for b/w compatibility - this.encodingScheme = index.getEncodingScheme() == null ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : index.getEncodingScheme(); - this.immutableStorageScheme = index.getImmutableStorageScheme() == null ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN : index.getImmutableStorageScheme(); - this.dataEncodingScheme = dataTable.getEncodingScheme() == null ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : dataTable.getEncodingScheme(); - this.dataImmutableStorageScheme = dataTable.getImmutableStorageScheme() == null ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN : dataTable.getImmutableStorageScheme(); - this.nDataTableSaltBuckets = isDataTableSalted ? dataTable.getBucketNum() : PTable.NO_SALTING; - - byte[] indexTableName = index.getPhysicalName().getBytes(); - // Use this for the nDataSaltBuckets as we need this for local indexes - // TODO: persist nDataSaltBuckets separately, but maintain b/w compat. - Integer nIndexSaltBuckets = isLocalIndex ? dataTable.getBucketNum() : index.getBucketNum(); - boolean indexWALDisabled = index.isWALDisabled(); - int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (this.isMultiTenant ? 1 : 0) + (this.viewIndexId == null ? 0 : 1); - int nIndexColumns = index.getColumns().size() - indexPosOffset; - int nIndexPKColumns = index.getPKColumns().size() - indexPosOffset; - // number of expressions that are indexed that are not present in the row key of the data table - int indexedExpressionCount = 0; - for (int i = indexPosOffset; i expressionIterator = indexedExpressions.iterator(); + int trailingVariableWidthColumnNum = 0; + PDataType[] indexedColumnDataTypes = new PDataType[nIndexedColumns]; + for (int i = 0; i < nIndexedColumns; i++) { + PDataType dataColumnType; + boolean isNullable; + SortOrder dataSortOrder; + if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) { + Expression expression = expressionIterator.next(); + dataColumnType = expression.getDataType(); + dataSortOrder = expression.getSortOrder(); + isNullable = expression.isNullable(); + expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); + } else { + Field field = dataRowKeySchema.getField(dataPkPosition[i]); + dataColumnType = field.getDataType(); + ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]); + dataSortOrder = field.getSortOrder(); + isNullable = field.isNullable(); + } + boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC; + PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType); + indexedColumnDataTypes[i] = indexColumnType; + boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType); + boolean isIndexColumnDesc = descIndexColumnBitSet.get(i); + if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) { + output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); + } else { + if (!isBytesComparable) { + indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault()); + } + if (isDataColumnInverted != isIndexColumnDesc) { + writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output); + } else { + output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); + } + } + + if (!indexColumnType.isFixedWidth()) { + output.write(SchemaUtil.getSeparatorBytes(indexColumnType, rowKeyOrderOptimizable, + ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC)); + trailingVariableWidthColumnNum++; + } else { + trailingVariableWidthColumnNum = 0; + } + } + byte[] indexRowKey = stream.getBuffer(); + // Remove trailing nulls + int length = stream.size(); + int minLength = length - maxTrailingNulls; + // The existing code does not eliminate the separator if the data type is not nullable. It not + // clear why. + // The actual bug is in the calculation of maxTrailingNulls with view indexes. So, in order + // not to impact some other cases, we should keep minLength check here. + int indexColumnIdx = nIndexedColumns - 1; + while (trailingVariableWidthColumnNum > 0 && length > minLength) { + if (indexColumnIdx < 0) { + break; + } + if (indexedColumnDataTypes[indexColumnIdx] != PVarbinaryEncoded.INSTANCE) { + if (indexRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) { + length--; + } else { + break; + } + } else { + byte[] sepBytes = QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES; + if ( + length >= 2 && indexRowKey[length - 1] == sepBytes[1] + && indexRowKey[length - 2] == sepBytes[0] + ) { + length -= 2; + } else { + break; + } + } + trailingVariableWidthColumnNum--; + indexColumnIdx--; + } - int dataPosOffset = (isDataTableSalted ? 1 : 0) + (this.isMultiTenant ? 1 : 0); - - // For indexes on views, we need to remember which data columns are "constants" - // These are the values in a VIEW where clause. For these, we don't put them in the - // index, as they're the same for every row in the index. The data table can be - // either a VIEW or PROJECTED - ListdataPKColumns = dataTable.getPKColumns(); - this.indexDataColumnCount = dataPKColumns.size(); - PTable parentTable = dataTable; - // We need to get the PK column for the table on which the index is created - if (!dataTable.getName().equals(cdcTable != null - ? cdcTable.getParentName() : index.getParentName())) { - try { - String tenantId = (index.getTenantId() != null) ? - index.getTenantId().getString() : null; - parentTable = connection.getTable(tenantId, index.getParentName().getString()); - this.indexDataColumnCount = parentTable.getPKColumns().size(); - } catch (SQLException e) { - throw new RuntimeException(e); + if (isIndexSalted) { + // Set salt byte + byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, + length - SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets); + indexRowKey[0] = saltByte; + } + return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } + } + } + + /* + * Build the data row key from the index row key + */ + public byte[] buildDataRowKey(ImmutableBytesWritable indexRowKeyPtr, byte[][] viewConstants) { + RowKeySchema indexRowKeySchema = getIndexRowKeySchema(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + TrustedByteArrayOutputStream stream = + new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes); + DataOutput output = new DataOutputStream(stream); + // Increment dataPosOffset until all have been written + int dataPosOffset = 0; + int viewConstantsIndex = 0; + try { + int indexPosOffset = !isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0; + int maxRowKeyOffset = indexRowKeyPtr.getOffset() + indexRowKeyPtr.getLength(); + indexRowKeySchema.iterator(indexRowKeyPtr, ptr, indexPosOffset); + if (isDataTableSalted) { + dataPosOffset++; + output.write(0); // will be set at end to salt byte + } + if (viewIndexId != null) { + indexRowKeySchema.next(ptr, indexPosOffset++, maxRowKeyOffset); + } + if (isMultiTenant) { + indexRowKeySchema.next(ptr, indexPosOffset, maxRowKeyOffset); + output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); + if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { + output.write(SchemaUtil.getSeparatorBytes( + dataRowKeySchema.getField(dataPosOffset).getDataType(), rowKeyOrderOptimizable, + ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset).getSortOrder())); + } + indexPosOffset++; + dataPosOffset++; + } + indexPosOffset = (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0) + (isMultiTenant ? 1 : 0) + + (viewIndexId == null ? 0 : 1); + BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); + BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet(); + int trailingVariableWidthColumnNum = 0; + for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) { + // Write view constants from the data table, as these + // won't appear in the index (as they're the + // same for all rows in this index) + if (viewConstantColumnBitSet.get(i)) { + output.write(viewConstants[viewConstantsIndex++]); + } else { + int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset); + Boolean hasValue = + indexRowKeySchema.iterator(indexRowKeyPtr, ptr, pos + indexPosOffset + 1); + if (Boolean.TRUE.equals(hasValue)) { + // Write data row key value taking into account coercion and inversion + // if necessary + Field dataField = dataRowKeySchema.getField(i); + Field indexField = indexRowKeySchema.getField(pos + indexPosOffset); + PDataType indexColumnType = indexField.getDataType(); + PDataType dataColumnType = dataField.getDataType(); + SortOrder dataSortOrder = dataField.getSortOrder(); + SortOrder indexSortOrder = indexField.getSortOrder(); + boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC; + boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType); + if (isBytesComparable && isDataColumnInverted == descIndexColumnBitSet.get(pos)) { + output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); + } else { + if (!isBytesComparable) { + dataColumnType.coerceBytes(ptr, indexColumnType, indexSortOrder, + SortOrder.getDefault()); + } + if (descIndexColumnBitSet.get(pos) != isDataColumnInverted) { + writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output); + } else { + output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); + } } + } } - this.parentTableType = parentTable.getType(); - - int indexPkColumnCount = this.indexDataColumnCount + - indexedExpressionCount - (this.isDataTableSalted ? 1 : 0) - (this.isMultiTenant ? 1 : 0); - this.rowKeyMetaData = newRowKeyMetaData(indexPkColumnCount); - BitSet bitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); - - int nDataPKColumns = this.indexDataColumnCount - dataPosOffset; - for (int i = dataPosOffset; i < dataPKColumns.size(); i++) { - PColumn dataPKColumn = dataPKColumns.get(i); - if (dataPKColumn.getViewConstant() != null) { - bitSet.set(i); - nDataPKColumns--; - } + // Write separator byte(s) if variable length + if (!dataRowKeySchema.getField(i).getDataType().isFixedWidth()) { + output.write(SchemaUtil.getSeparatorBytes(dataRowKeySchema.getField(i).getDataType(), + rowKeyOrderOptimizable, ptr.getLength() == 0, + dataRowKeySchema.getField(i).getSortOrder())); + trailingVariableWidthColumnNum++; + } else { + trailingVariableWidthColumnNum = 0; } - this.indexTableName = indexTableName; - this.indexedColumnTypes = Lists.newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns); - this.indexedExpressions = Lists.newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns); - this.coveredColumnsMap = Maps.newHashMapWithExpectedSize(nIndexColumns - nIndexPKColumns); - this.nIndexSaltBuckets = nIndexSaltBuckets == null ? PTable.NO_SALTING : nIndexSaltBuckets; - this.dataEmptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(dataTable); - this.emptyKeyValueCFPtr = SchemaUtil.getEmptyColumnFamilyPtr(index); - this.nDataCFs = dataTable.getColumnFamilies().size(); - this.indexWALDisabled = indexWALDisabled; - // TODO: check whether index is immutable or not. Currently it's always false so checking - // data table is with immutable rows or not. - this.immutableRows = dataTable.isImmutableRows(); - int indexColByteSize = 0; - ColumnResolver resolver = null; - List parseNodes = new ArrayList(1); - UDFParseNodeVisitor visitor = new UDFParseNodeVisitor(); - for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) { - PColumn indexColumn = index.getPKColumns().get(i); - String expressionStr = IndexUtil.getIndexColumnExpressionStr(indexColumn); - try { - ParseNode parseNode = SQLParser.parseCondition(expressionStr); - parseNode.accept(visitor); - parseNodes.add(parseNode); - } catch (SQLException e) { - throw new RuntimeException(e); - } + } + int length = stream.size(); + byte[] dataRowKey = stream.getBuffer(); + // Remove trailing nulls + int indexColumnIdx = dataRowKeySchema.getFieldCount() - 1; + while (trailingVariableWidthColumnNum > 0) { + PDataType dataType = dataRowKeySchema.getField(indexColumnIdx).getDataType(); + if (dataType != PVarbinaryEncoded.INSTANCE) { + if (dataRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) { + length--; + } else { + break; + } + } else { + byte[] sepBytes = QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES; + if ( + length >= 2 && dataRowKey[length - 1] == sepBytes[1] + && dataRowKey[length - 2] == sepBytes[0] + ) { + length -= 2; + } else { + break; + } + } + trailingVariableWidthColumnNum--; + indexColumnIdx--; + } + if (isDataTableSalted) { + // Set salt byte + byte saltByte = SaltingUtil.getSaltingByte(dataRowKey, SaltingUtil.NUM_SALTING_BYTES, + length - SaltingUtil.NUM_SALTING_BYTES, nDataTableSaltBuckets); + dataRowKey[0] = saltByte; + } + return dataRowKey.length == length ? dataRowKey : Arrays.copyOf(dataRowKey, length); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } + } + } + + public byte[] getIndexRowKey(final Put dataRow) { + ValueGetter valueGetter = new IndexUtil.SimpleValueGetter(dataRow); + return buildRowKey(valueGetter, new ImmutableBytesWritable(dataRow.getRow()), null, null, + IndexUtil.getMaxTimestamp(dataRow)); + } + + public boolean checkIndexRow(final byte[] indexRowKey, final Put dataRow) { + if (!shouldPrepareIndexMutations(dataRow)) { + return false; + } + byte[] builtIndexRowKey = getIndexRowKey(dataRow); + if ( + Bytes.compareTo(builtIndexRowKey, 0, builtIndexRowKey.length, indexRowKey, 0, + indexRowKey.length) != 0 + ) { + return false; + } + return true; + } + + /** + * Determines if the index row for a given data row should be prepared. For full indexes, index + * rows should always be prepared. For the partial indexes, the index row should be prepared only + * if the index where clause is satisfied on the given data row. + * @param dataRowState data row represented as a put mutation, that is list of put cells + * @return always true for full indexes, and true for partial indexes if the index where + * expression evaluates to true on the given data row + */ + + public boolean shouldPrepareIndexMutations(Put dataRowState) { + if (getIndexWhere() == null) { + // It is a full index and the index row should be prepared. + return true; + } + List cols = IndexUtil.readColumnsFromRow(dataRowState, getIndexWhereColumns()); + // Cells should be sorted as they are searched using a binary search during expression + // evaluation + Collections.sort(cols, CellComparator.getInstance()); + MultiKeyValueTuple tuple = new MultiKeyValueTuple(cols); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + if (!getIndexWhere().evaluate(tuple, ptr)) { + return false; + } + Object value = PBoolean.INSTANCE.toObject(ptr); + return value.equals(Boolean.TRUE); + } + + public Boolean isAgedEnough(long ts, long ageThreshold) { + return (EnvironmentEdgeManager.currentTimeMillis() - ts) > ageThreshold; + } + + public Delete createDelete(byte[] indexRowKey, long ts, boolean singleVersion) { + if (singleVersion) { + return buildRowDeleteMutation(indexRowKey, IndexMaintainer.DeleteType.SINGLE_VERSION, ts); + } else { + return buildRowDeleteMutation(indexRowKey, IndexMaintainer.DeleteType.ALL_VERSIONS, ts); + } + } + + /* + * return the view index id from the index row key + */ + public byte[] getViewIndexIdFromIndexRowKey(ImmutableBytesWritable indexRowKeyPtr) { + assert (isLocalIndex); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(indexRowKeyPtr.get(), + (indexRowKeyPtr.getOffset() + (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0)), + viewIndexId.length); + return ptr.copyBytesIfNecessary(); + } + + private volatile RowKeySchema indexRowKeySchema; + + // We have enough information to generate the index row key schema + private RowKeySchema generateIndexRowKeySchema() { + int nIndexedColumns = getIndexPkColumnCount() + (isMultiTenant ? 1 : 0) + + (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0) + (viewIndexId != null ? 1 : 0) + - getNumViewConstants(); + RowKeySchema.RowKeySchemaBuilder builder = + new RowKeySchema.RowKeySchemaBuilder(nIndexedColumns); + builder.rowKeyOrderOptimizable(rowKeyOrderOptimizable); + if (!isLocalIndex && nIndexSaltBuckets > 0) { + builder.addField(SaltingUtil.SALTING_COLUMN, false, SortOrder.ASC); + nIndexedColumns--; + } + int dataPosOffset = isDataTableSalted ? 1 : 0; + if (viewIndexId != null) { + nIndexedColumns--; + builder.addField(new PDatum() { + + @Override + public boolean isNullable() { + return false; } - try { - resolver = FromCompiler.getResolver(connection, new TableRef(dataTable), visitor.getUdfParseNodes()); - } catch (SQLException e) { - throw new RuntimeException(e); // Impossible + + @Override + public PDataType getDataType() { + return viewIndexIdType; } - StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver); - this.indexedColumnsInfo = Sets.newHashSetWithExpectedSize(nIndexColumns - nIndexPKColumns); - - IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context); - for (int i = indexPosOffset; i < index.getPKColumns().size(); i++) { - PColumn indexColumn = index.getPKColumns().get(i); - int indexPos = i - indexPosOffset; - Expression expression = null; - try { - expressionIndexCompiler.reset(); - expression = parseNodes.get(indexPos).accept(expressionIndexCompiler); - } catch (SQLException e) { - throw new RuntimeException(e); // Impossible - } - if ( expressionIndexCompiler.getColumnRef()!=null ) { - // get the column of the data column that corresponds to this index column - PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString()); - boolean isPKColumn = SchemaUtil.isPKColumn(column); - if (isPKColumn) { - int dataPkPos = dataTable.getPKColumns().indexOf(column) - (dataTable.getBucketNum() == null ? 0 : 1) - (this.isMultiTenant ? 1 : 0); - this.rowKeyMetaData.setIndexPkPosition(dataPkPos, indexPos); - indexedColumnsInfo.add(new Pair<>((String)null, column.getName().getString())); - } else { - indexColByteSize += column.getDataType().isFixedWidth() ? SchemaUtil.getFixedByteSize(column) : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE; - try { - // Surround constant with cast so that we can still know the original type. Otherwise, if we lose the type, - // (for example when VARCHAR becomes CHAR), it can lead to problems in the type translation we do between data tables and indexes. - if (column.isNullable() && ExpressionUtil.isConstant(expression)) { - expression = CoerceExpression.create(expression, indexColumn.getDataType()); - } - this.indexedExpressions.add(expression); - indexedColumnsInfo.add(new Pair<>(column.getFamilyName().getString(), column.getName().getString())); - } catch (SQLException e) { - throw new RuntimeException(e); // Impossible - } - } - } - else { - indexColByteSize += expression.getDataType().isFixedWidth() ? SchemaUtil.getFixedByteSize(expression) : ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE; - this.indexedExpressions.add(expression); - KeyValueExpressionVisitor kvVisitor = new KeyValueExpressionVisitor() { - @Override - public Void visit(KeyValueColumnExpression colExpression) { - return addDataColInfo(dataTable, colExpression); - } - - @Override - public Void visit(SingleCellColumnExpression expression) { - return addDataColInfo(dataTable, expression); - } - - private Void addDataColInfo(final PTable dataTable, Expression expression) { - Preconditions.checkArgument(expression instanceof SingleCellColumnExpression - || expression instanceof KeyValueColumnExpression); - - KeyValueColumnExpression colExpression = null; - if (expression instanceof SingleCellColumnExpression) { - colExpression = - ((SingleCellColumnExpression) expression).getKeyValueExpression(); - } else { - colExpression = ((KeyValueColumnExpression) expression); - } - byte[] cf = colExpression.getColumnFamily(); - byte[] cq = colExpression.getColumnQualifier(); - try { - PColumn dataColumn = - cf == null ? dataTable.getColumnForColumnQualifier(null, cq) - : dataTable.getColumnFamily(cf) - .getPColumnForColumnQualifier(cq); - if (dataColumn == null) { - if (Bytes.compareTo(cf, dataEmptyKeyValueCF) == 0 - && Bytes.compareTo(cq, EncodedColumnsUtil.getEmptyKeyValueInfo(dataEncodingScheme).getFirst()) == 0) { - return null; - } else { - throw new ColumnNotFoundException(dataTable.getSchemaName().getString(), - dataTable.getTableName().getString(), Bytes.toString(cf), Bytes.toString(cq)); - } - } else { - indexedColumnsInfo.add(new Pair<>(dataColumn.getFamilyName() - .getString(), dataColumn.getName().getString())); - } - } catch (ColumnNotFoundException | ColumnFamilyNotFoundException - | AmbiguousColumnException e) { - if (dataTable.hasOnlyPkColumns()) { - return null; - } - throw new RuntimeException(e); - } - return null; - } - - }; - expression.accept(kvVisitor); - } - // set the sort order of the expression correctly - if (indexColumn.getSortOrder() == SortOrder.DESC) { - this.rowKeyMetaData.getDescIndexColumnBitSet().set(indexPos); - } + + @Override + public Integer getMaxLength() { + return null; } - this.estimatedExpressionSize = expressionIndexCompiler.getTotalNodeCount() * ESTIMATED_EXPRESSION_SIZE; - for (int i = 0; i < index.getColumnFamilies().size(); i++) { - PColumnFamily family = index.getColumnFamilies().get(i); - for (PColumn indexColumn : family.getColumns()) { - PColumn dataColumn = IndexUtil.getDataColumnOrNull(dataTable, indexColumn.getName().getString()); - // This can happen during deletion where we don't need covered columns - if (dataColumn != null) { - byte[] dataColumnCq = dataColumn.getColumnQualifierBytes(); - byte[] indexColumnCq = indexColumn.getColumnQualifierBytes(); - this.coveredColumnsMap.put(new ColumnReference(dataColumn.getFamilyName().getBytes(), dataColumnCq), - new ColumnReference(indexColumn.getFamilyName().getBytes(), indexColumnCq)); - } - } + + @Override + public Integer getScale() { + return null; } - this.estimatedIndexRowKeyBytes = estimateIndexRowKeyByteSize(indexColByteSize); - this.logicalIndexName = index.getName().getString(); - if (index.getIndexWhere() != null) { - this.indexWhere = index.getIndexWhereExpression(connection); - this.indexWhereColumns = index.getIndexWhereColumns(connection); + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); } - initCachedState(); + }, false, SortOrder.getDefault()); } - - public void setDataImmutableStorageScheme(ImmutableStorageScheme sc) { - this.dataImmutableStorageScheme = sc; + if (isMultiTenant) { + Field field = dataRowKeySchema.getField(dataPosOffset++); + builder.addField(field, field.isNullable(), field.getSortOrder()); + nIndexedColumns--; } - public void setDataEncodingScheme(QualifierEncodingScheme sc) { - this.dataEncodingScheme = sc; + Field[] indexFields = new Field[nIndexedColumns]; + BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); + // Add Field for all data row pk columns + for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) { + // Ignore view constants from the data table, as these + // don't need to appear in the index (as they're the + // same for all rows in this index) + if (!viewConstantColumnBitSet.get(i)) { + int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset); + indexFields[pos] = dataRowKeySchema.getField(i); + } } + BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet(); + Iterator expressionItr = indexedExpressions.iterator(); + for (int i = 0; i < indexFields.length; i++) { + Field indexField = indexFields[i]; + PDataType dataTypeToBe; + SortOrder sortOrderToBe; + boolean isNullableToBe; + Integer maxLengthToBe; + Integer scaleToBe; + if (indexField == null) { + Expression e = expressionItr.next(); + isNullableToBe = e.isNullable(); + dataTypeToBe = IndexUtil.getIndexColumnDataType(isNullableToBe, e.getDataType()); + sortOrderToBe = descIndexColumnBitSet.get(i) ? SortOrder.DESC : SortOrder.ASC; + maxLengthToBe = e.getMaxLength(); + scaleToBe = e.getScale(); + } else { + isNullableToBe = indexField.isNullable(); + dataTypeToBe = IndexUtil.getIndexColumnDataType(isNullableToBe, indexField.getDataType()); + sortOrderToBe = descIndexColumnBitSet.get(i) ? SortOrder.DESC : SortOrder.ASC; + maxLengthToBe = indexField.getMaxLength(); + scaleToBe = indexField.getScale(); + } + final PDataType dataType = dataTypeToBe; + final SortOrder sortOrder = sortOrderToBe; + final boolean isNullable = isNullableToBe; + final Integer maxLength = maxLengthToBe; + final Integer scale = scaleToBe; + builder.addField(new PDatum() { - public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean prependRegionStartKey = isLocalIndex && regionStartKey != null; - boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0; - int prefixKeyLength = - prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length - : regionEndKey.length) : 0; - TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0)); - DataOutput output = new DataOutputStream(stream); + @Override + public boolean isNullable() { + return isNullable; + } - try { - // For local indexes, we must prepend the row key with the start region key - if (prependRegionStartKey) { - if (regionStartKey.length == 0) { - output.write(new byte[prefixKeyLength]); - } else { - output.write(regionStartKey); - } - } - if (isIndexSalted) { - output.write(0); // will be set at end to index salt byte - } - // The dataRowKeySchema includes the salt byte field, - // so we must adjust for that here. - int dataPosOffset = isDataTableSalted ? 1 : 0 ; - BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); - int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants(); - int[][] dataRowKeyLocator = new int[2][nIndexedColumns]; - // Skip data table salt byte - int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength(); - dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset); - - if (viewIndexId != null) { - output.write(viewIndexId); - } - - if (isMultiTenant) { - dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset); - output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); - if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { - output.write(SchemaUtil.getSeparatorBytes( - dataRowKeySchema.getField(dataPosOffset).getDataType(), - rowKeyOrderOptimizable, - ptr.getLength() == 0, - dataRowKeySchema.getField(dataPosOffset).getSortOrder())); - } - dataPosOffset++; - } - - // Write index row key - for (int i = dataPosOffset; i < indexDataColumnCount; i++) { - Boolean hasValue=dataRowKeySchema.next(ptr, i, maxRowKeyOffset); - // Ignore view constants from the data table, as these - // don't need to appear in the index (as they're the - // same for all rows in this index) - if (!viewConstantColumnBitSet.get(i) || isIndexOnBaseTable()) { - int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset); - if (Boolean.TRUE.equals(hasValue)) { - dataRowKeyLocator[0][pos] = ptr.getOffset(); - dataRowKeyLocator[1][pos] = ptr.getLength(); - } else { - dataRowKeyLocator[0][pos] = 0; - dataRowKeyLocator[1][pos] = 0; - } - } - } - BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet(); - Iterator expressionIterator = indexedExpressions.iterator(); - int trailingVariableWidthColumnNum = 0; - PDataType[] indexedColumnDataTypes = new PDataType[nIndexedColumns]; - for (int i = 0; i < nIndexedColumns; i++) { - PDataType dataColumnType; - boolean isNullable; - SortOrder dataSortOrder; - if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) { - Expression expression = expressionIterator.next(); - dataColumnType = expression.getDataType(); - dataSortOrder = expression.getSortOrder(); - isNullable = expression.isNullable(); - expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); - } - else { - Field field = dataRowKeySchema.getField(dataPkPosition[i]); - dataColumnType = field.getDataType(); - ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]); - dataSortOrder = field.getSortOrder(); - isNullable = field.isNullable(); - } - boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC; - PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType); - indexedColumnDataTypes[i] = indexColumnType; - boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType); - boolean isIndexColumnDesc = descIndexColumnBitSet.get(i); - if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) { - output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); - } else { - if (!isBytesComparable) { - indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault()); - } - if (isDataColumnInverted != isIndexColumnDesc) { - writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output); - } else { - output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); - } - } + @Override + public PDataType getDataType() { + return dataType; + } - if (!indexColumnType.isFixedWidth()) { - output.write( - SchemaUtil.getSeparatorBytes(indexColumnType, - rowKeyOrderOptimizable, - ptr.getLength() == 0, - isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC)); - trailingVariableWidthColumnNum++; - } else { - trailingVariableWidthColumnNum = 0; - } - } - byte[] indexRowKey = stream.getBuffer(); - // Remove trailing nulls - int length = stream.size(); - int minLength = length - maxTrailingNulls; - // The existing code does not eliminate the separator if the data type is not nullable. It not clear why. - // The actual bug is in the calculation of maxTrailingNulls with view indexes. So, in order not to impact some other cases, we should keep minLength check here. - int indexColumnIdx = nIndexedColumns - 1; - while (trailingVariableWidthColumnNum > 0 && length > minLength) { - if (indexColumnIdx < 0) { - break; - } - if (indexedColumnDataTypes[indexColumnIdx] != PVarbinaryEncoded.INSTANCE) { - if (indexRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) { - length--; - } else { - break; - } - } else { - byte[] sepBytes = QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES; - if (length >= 2 && indexRowKey[length - 1] == sepBytes[1] - && indexRowKey[length - 2] == sepBytes[0]) { - length -= 2; - } else { - break; - } - } - trailingVariableWidthColumnNum--; - indexColumnIdx--; - } + @Override + public Integer getMaxLength() { + return maxLength; + } - if (isIndexSalted) { - // Set salt byte - byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets); - indexRowKey[0] = saltByte; - } - return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } + @Override + public Integer getScale() { + return scale; } - } - /* - * Build the data row key from the index row key - */ - public byte[] buildDataRowKey(ImmutableBytesWritable indexRowKeyPtr, byte[][] viewConstants) { - RowKeySchema indexRowKeySchema = getIndexRowKeySchema(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes); - DataOutput output = new DataOutputStream(stream); - // Increment dataPosOffset until all have been written - int dataPosOffset = 0; - int viewConstantsIndex = 0; - try { - int indexPosOffset = !isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0; - int maxRowKeyOffset = indexRowKeyPtr.getOffset() + indexRowKeyPtr.getLength(); - indexRowKeySchema.iterator(indexRowKeyPtr, ptr, indexPosOffset); - if (isDataTableSalted) { - dataPosOffset++; - output.write(0); // will be set at end to salt byte - } - if (viewIndexId != null) { - indexRowKeySchema.next(ptr, indexPosOffset++, maxRowKeyOffset); - } - if (isMultiTenant) { - indexRowKeySchema.next(ptr, indexPosOffset, maxRowKeyOffset); - output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); - if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { - output.write(SchemaUtil.getSeparatorBytes( - dataRowKeySchema.getField(dataPosOffset).getDataType(), - rowKeyOrderOptimizable, - ptr.getLength() == 0, - dataRowKeySchema.getField(dataPosOffset).getSortOrder())); - } - indexPosOffset++; - dataPosOffset++; - } - indexPosOffset = (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0) + (isMultiTenant ? 1 : 0) + (viewIndexId == null ? 0 : 1); - BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); - BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet(); - int trailingVariableWidthColumnNum = 0; - for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) { - // Write view constants from the data table, as these - // won't appear in the index (as they're the - // same for all rows in this index) - if (viewConstantColumnBitSet.get(i)) { - output.write(viewConstants[viewConstantsIndex++]); - } else { - int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset); - Boolean hasValue=indexRowKeySchema.iterator(indexRowKeyPtr, ptr, pos + indexPosOffset+1); - if (Boolean.TRUE.equals(hasValue)) { - // Write data row key value taking into account coercion and inversion - // if necessary - Field dataField = dataRowKeySchema.getField(i); - Field indexField = indexRowKeySchema.getField(pos + indexPosOffset); - PDataType indexColumnType = indexField.getDataType(); - PDataType dataColumnType = dataField.getDataType(); - SortOrder dataSortOrder = dataField.getSortOrder(); - SortOrder indexSortOrder = indexField.getSortOrder(); - boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC; - boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType) ; - if (isBytesComparable && isDataColumnInverted == descIndexColumnBitSet.get(pos)) { - output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); - } else { - if (!isBytesComparable) { - dataColumnType.coerceBytes(ptr, indexColumnType, indexSortOrder, SortOrder.getDefault()); - } - if (descIndexColumnBitSet.get(pos) != isDataColumnInverted) { - writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output); - } else { - output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); - } - } - } - } - // Write separator byte(s) if variable length - if (!dataRowKeySchema.getField(i).getDataType().isFixedWidth()) { - output.write( - SchemaUtil.getSeparatorBytes(dataRowKeySchema.getField(i).getDataType(), - rowKeyOrderOptimizable, - ptr.getLength() == 0, - dataRowKeySchema.getField(i).getSortOrder())); - trailingVariableWidthColumnNum++; - } else { - trailingVariableWidthColumnNum = 0; - } - } - int length = stream.size(); - byte[] dataRowKey = stream.getBuffer(); - // Remove trailing nulls - int indexColumnIdx = dataRowKeySchema.getFieldCount() - 1; - while (trailingVariableWidthColumnNum > 0) { - PDataType dataType = dataRowKeySchema.getField(indexColumnIdx).getDataType(); - if (dataType != PVarbinaryEncoded.INSTANCE) { - if (dataRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) { - length--; - } else { - break; - } - } else { - byte[] sepBytes = QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES; - if (length >= 2 && dataRowKey[length - 1] == sepBytes[1] - && dataRowKey[length - 2] == sepBytes[0]) { - length -= 2; - } else { - break; - } - } - trailingVariableWidthColumnNum--; - indexColumnIdx--; - } - if (isDataTableSalted) { - // Set salt byte - byte saltByte = SaltingUtil.getSaltingByte(dataRowKey, - SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, - nDataTableSaltBuckets); - dataRowKey[0] = saltByte; - } - return dataRowKey.length == length ? dataRowKey : Arrays.copyOf(dataRowKey, length); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } + @Override + public SortOrder getSortOrder() { + return sortOrder; } + + }, true, sortOrder); } + return builder.build(); + } - public byte[] getIndexRowKey(final Put dataRow) { - ValueGetter valueGetter = new IndexUtil.SimpleValueGetter(dataRow); - return buildRowKey(valueGetter, new ImmutableBytesWritable(dataRow.getRow()), - null, null, IndexUtil.getMaxTimestamp(dataRow)); + private int getNumViewConstants() { + if (isIndexOnBaseTable()) { + return 0; } - public boolean checkIndexRow(final byte[] indexRowKey, - final Put dataRow) { - if (!shouldPrepareIndexMutations(dataRow)) { - return false; - } - byte[] builtIndexRowKey = getIndexRowKey(dataRow); - if (Bytes.compareTo(builtIndexRowKey, 0, builtIndexRowKey.length, - indexRowKey, 0, indexRowKey.length) != 0) { - return false; - } - return true; - } - - /** - * Determines if the index row for a given data row should be prepared. For full - * indexes, index rows should always be prepared. For the partial indexes, the index row should - * be prepared only if the index where clause is satisfied on the given data row. - * - * @param dataRowState data row represented as a put mutation, that is list of put cells - * @return always true for full indexes, and true for partial indexes if the index where - * expression evaluates to true on the given data row - */ - - public boolean shouldPrepareIndexMutations(Put dataRowState) { - if (getIndexWhere() == null) { - // It is a full index and the index row should be prepared. - return true; - } - List cols = IndexUtil.readColumnsFromRow(dataRowState, getIndexWhereColumns()); - // Cells should be sorted as they are searched using a binary search during expression - // evaluation - Collections.sort(cols, CellComparator.getInstance()); - MultiKeyValueTuple tuple = new MultiKeyValueTuple(cols); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - if (!getIndexWhere().evaluate(tuple, ptr)) { - return false; - } - Object value = PBoolean.INSTANCE.toObject(ptr); - return value.equals(Boolean.TRUE); + BitSet bitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); + int num = 0; + for (int i = 0; i < dataRowKeySchema.getFieldCount(); i++) { + if (bitSet.get(i)) num++; } + return num; + } - public Boolean isAgedEnough(long ts, long ageThreshold) { - return (EnvironmentEdgeManager.currentTimeMillis() - ts) > ageThreshold; + private RowKeySchema getIndexRowKeySchema() { + if (indexRowKeySchema != null) { + return indexRowKeySchema; } - - public Delete createDelete(byte[] indexRowKey, long ts, boolean singleVersion) { - if (singleVersion) { - return buildRowDeleteMutation(indexRowKey, - IndexMaintainer.DeleteType.SINGLE_VERSION, ts); - } else { - return buildRowDeleteMutation(indexRowKey, - IndexMaintainer.DeleteType.ALL_VERSIONS, ts); - } + synchronized (this) { + if (indexRowKeySchema == null) { + indexRowKeySchema = generateIndexRowKeySchema(); + } } + return indexRowKeySchema; + } + + public Put buildUpdateMutation(KeyValueBuilder kvBuilder, ValueGetter valueGetter, + ImmutableBytesWritable dataRowKeyPtr, long ts, byte[] regionStartKey, byte[] regionEndKey, + boolean verified) throws IOException { + byte[] indexRowKey = + this.buildRowKey(valueGetter, dataRowKeyPtr, regionStartKey, regionEndKey, ts); + return buildUpdateMutation(kvBuilder, valueGetter, dataRowKeyPtr, ts, regionStartKey, + regionEndKey, indexRowKey, this.getEmptyKeyValueFamily(), coveredColumnsMap, + indexEmptyKeyValueRef, indexWALDisabled, dataImmutableStorageScheme, immutableStorageScheme, + encodingScheme, dataEncodingScheme, verified); + } + + public static Put buildUpdateMutation(KeyValueBuilder kvBuilder, ValueGetter valueGetter, + ImmutableBytesWritable dataRowKeyPtr, long ts, byte[] regionStartKey, byte[] regionEndKey, + byte[] destRowKey, ImmutableBytesPtr emptyKeyValueCFPtr, + Map coveredColumnsMap, ColumnReference destEmptyKeyValueRef, + boolean destWALDisabled, ImmutableStorageScheme srcImmutableStorageScheme, + ImmutableStorageScheme destImmutableStorageScheme, QualifierEncodingScheme destEncodingScheme, + QualifierEncodingScheme srcEncodingScheme, boolean verified) throws IOException { + Set coveredColumns = coveredColumnsMap.keySet(); + Put put = null; + // New row being inserted: add the empty key value + ImmutableBytesWritable latestValue = null; + if ( + valueGetter == null || coveredColumns.isEmpty() + || (latestValue = valueGetter.getLatestValue(destEmptyKeyValueRef, ts)) == null + || latestValue == ValueGetter.HIDDEN_BY_DELETE + ) { + // We need to track whether or not our empty key value is hidden by a Delete Family marker at + // the same timestamp. + // If it is, these Puts will be masked so should not be emitted. + if (latestValue == ValueGetter.HIDDEN_BY_DELETE) { + return null; + } + put = new Put(destRowKey); + // add the keyvalue for the empty row + put.add(kvBuilder.buildPut(new ImmutableBytesPtr(destRowKey), emptyKeyValueCFPtr, + destEmptyKeyValueRef.getQualifierWritable(), ts, + verified + ? QueryConstants.VERIFIED_BYTES_PTR + : QueryConstants.EMPTY_COLUMN_VALUE_BYTES_PTR)); + put.setDurability(!destWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); + } + + ImmutableBytesPtr rowKey = new ImmutableBytesPtr(destRowKey); + if (destImmutableStorageScheme != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { + // map from index column family to list of pair of index column and data column (for covered + // columns) + Map>> familyToColListMap = + Maps.newHashMap(); + for (ColumnReference ref : coveredColumns) { + ColumnReference indexColRef = coveredColumnsMap.get(ref); + ImmutableBytesPtr cf = new ImmutableBytesPtr(indexColRef.getFamily()); + if (!familyToColListMap.containsKey(cf)) { + familyToColListMap.put(cf, Lists.> newArrayList()); + } + familyToColListMap.get(cf).add(Pair.newPair(indexColRef, ref)); + } + // iterate over each column family and create a byte[] containing all the columns + for (Entry>> entry : familyToColListMap.entrySet()) { + byte[] columnFamily = entry.getKey().copyBytesIfNecessary(); + List> colRefPairs = entry.getValue(); + int maxEncodedColumnQualifier = Integer.MIN_VALUE; + // find the max col qualifier + for (Pair colRefPair : colRefPairs) { + maxEncodedColumnQualifier = Math.max(maxEncodedColumnQualifier, + destEncodingScheme.decode(colRefPair.getFirst().getQualifier())); + } + Expression[] colValues = + EncodedColumnsUtil.createColumnExpressionArray(maxEncodedColumnQualifier); + // set the values of the columns + for (Pair colRefPair : colRefPairs) { + ColumnReference indexColRef = colRefPair.getFirst(); + ColumnReference dataColRef = colRefPair.getSecond(); + byte[] value = null; + if (srcImmutableStorageScheme == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { + Expression expression = new SingleCellColumnExpression(new PDatum() { + @Override + public boolean isNullable() { + return false; + } - /* - * return the view index id from the index row key - */ - public byte[] getViewIndexIdFromIndexRowKey(ImmutableBytesWritable indexRowKeyPtr) { - assert (isLocalIndex); - ImmutableBytesPtr ptr = - new ImmutableBytesPtr(indexRowKeyPtr.get(),( indexRowKeyPtr.getOffset() - + (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0)), viewIndexId.length); - return ptr.copyBytesIfNecessary(); - } - - private volatile RowKeySchema indexRowKeySchema; - - // We have enough information to generate the index row key schema - private RowKeySchema generateIndexRowKeySchema() { - int nIndexedColumns = getIndexPkColumnCount() + (isMultiTenant ? 1 : 0) + (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0) + (viewIndexId != null ? 1 : 0) - getNumViewConstants(); - RowKeySchema.RowKeySchemaBuilder builder = new RowKeySchema.RowKeySchemaBuilder(nIndexedColumns); - builder.rowKeyOrderOptimizable(rowKeyOrderOptimizable); - if (!isLocalIndex && nIndexSaltBuckets > 0) { - builder.addField(SaltingUtil.SALTING_COLUMN, false, SortOrder.ASC); - nIndexedColumns--; - } - int dataPosOffset = isDataTableSalted ? 1 : 0 ; - if (viewIndexId != null) { - nIndexedColumns--; - builder.addField(new PDatum() { - - @Override - public boolean isNullable() { - return false; - } - - @Override - public PDataType getDataType() { - return viewIndexIdType; - } + @Override + public SortOrder getSortOrder() { + return null; + } - @Override - public Integer getMaxLength() { - return null; - } + @Override + public Integer getScale() { + return null; + } - @Override - public Integer getScale() { - return null; - } + @Override + public Integer getMaxLength() { + return null; + } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - }, false, SortOrder.getDefault()); - } - if (isMultiTenant) { - Field field = dataRowKeySchema.getField(dataPosOffset++); - builder.addField(field, field.isNullable(), field.getSortOrder()); - nIndexedColumns--; - } - - Field[] indexFields = new Field[nIndexedColumns]; - BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); - // Add Field for all data row pk columns - for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) { - // Ignore view constants from the data table, as these - // don't need to appear in the index (as they're the - // same for all rows in this index) - if (!viewConstantColumnBitSet.get(i)) { - int pos = rowKeyMetaData.getIndexPkPosition(i-dataPosOffset); - indexFields[pos] = - dataRowKeySchema.getField(i); - } - } - BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet(); - Iterator expressionItr = indexedExpressions.iterator(); - for (int i = 0; i < indexFields.length; i++) { - Field indexField = indexFields[i]; - PDataType dataTypeToBe; - SortOrder sortOrderToBe; - boolean isNullableToBe; - Integer maxLengthToBe; - Integer scaleToBe; - if (indexField == null) { - Expression e = expressionItr.next(); - isNullableToBe = e.isNullable(); - dataTypeToBe = IndexUtil.getIndexColumnDataType(isNullableToBe, e.getDataType()); - sortOrderToBe = descIndexColumnBitSet.get(i) ? SortOrder.DESC : SortOrder.ASC; - maxLengthToBe = e.getMaxLength(); - scaleToBe = e.getScale(); - } else { - isNullableToBe = indexField.isNullable(); - dataTypeToBe = IndexUtil.getIndexColumnDataType(isNullableToBe, indexField.getDataType()); - sortOrderToBe = descIndexColumnBitSet.get(i) ? SortOrder.DESC : SortOrder.ASC; - maxLengthToBe = indexField.getMaxLength(); - scaleToBe = indexField.getScale(); - } - final PDataType dataType = dataTypeToBe; - final SortOrder sortOrder = sortOrderToBe; - final boolean isNullable = isNullableToBe; - final Integer maxLength = maxLengthToBe; - final Integer scale = scaleToBe; - builder.addField(new PDatum() { - - @Override - public boolean isNullable() { - return isNullable; - } + @Override + public PDataType getDataType() { + return null; + } + }, dataColRef.getFamily(), dataColRef.getQualifier(), destEncodingScheme, + destImmutableStorageScheme); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); + value = ptr.copyBytesIfNecessary(); + } else { + // Data table is ONE_CELL_PER_COLUMN. Get the col value. + ImmutableBytesWritable dataValue = valueGetter.getLatestValue(dataColRef, ts); + if (dataValue != null && dataValue != ValueGetter.HIDDEN_BY_DELETE) { + value = dataValue.copyBytes(); + } + } + if (value != null) { + int indexArrayPos = destEncodingScheme.decode(indexColRef.getQualifier()) + - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE + 1; + colValues[indexArrayPos] = new LiteralExpression(value); + } + } + + List children = Arrays.asList(colValues); + // we use SingleCellConstructorExpression to serialize multiple columns into a single byte[] + SingleCellConstructorExpression singleCellConstructorExpression = + new SingleCellConstructorExpression(destImmutableStorageScheme, children); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + singleCellConstructorExpression.evaluate(new BaseTuple() { + }, ptr); + if (put == null) { + put = new Put(destRowKey); + put.setDurability(!destWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); + } + ImmutableBytesPtr colFamilyPtr = new ImmutableBytesPtr(columnFamily); + // this is a little bit of extra work for installations that are running <0.94.14, but that + // should be rare and is a short-term set of wrappers - it shouldn't kill GC + put.add(kvBuilder.buildPut(rowKey, colFamilyPtr, + QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES_PTR, ts, ptr)); + } + } else { + if (srcImmutableStorageScheme == destImmutableStorageScheme) { // both ONE_CELL + for (ColumnReference ref : coveredColumns) { + ColumnReference indexColRef = coveredColumnsMap.get(ref); + ImmutableBytesPtr cq = indexColRef.getQualifierWritable(); + ImmutableBytesPtr cf = indexColRef.getFamilyWritable(); + ImmutableBytesWritable value = valueGetter.getLatestValue(ref, ts); + if (value != null && value != ValueGetter.HIDDEN_BY_DELETE) { + if (put == null) { + put = new Put(destRowKey); + put.setDurability(!destWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); + } + put.add(kvBuilder.buildPut(rowKey, cf, cq, ts, value)); + } + } + } else { + // Src is SINGLE_CELL, destination is ONE_CELL + Map>> familyToColListMap = + Maps.newHashMap(); + for (ColumnReference ref : coveredColumns) { + ColumnReference indexColRef = coveredColumnsMap.get(ref); + ImmutableBytesPtr cf = new ImmutableBytesPtr(indexColRef.getFamily()); + if (!familyToColListMap.containsKey(cf)) { + familyToColListMap.put(cf, + Lists.> newArrayList()); + } + familyToColListMap.get(cf).add(Pair.newPair(indexColRef, ref)); + } + // iterate over each column family and create a byte[] containing all the columns + for (Entry>> entry : familyToColListMap.entrySet()) { + byte[] columnFamily = entry.getKey().copyBytesIfNecessary(); + List> colRefPairs = entry.getValue(); + int maxEncodedColumnQualifier = Integer.MIN_VALUE; + // find the max col qualifier + for (Pair colRefPair : colRefPairs) { + maxEncodedColumnQualifier = Math.max(maxEncodedColumnQualifier, + srcEncodingScheme.decode(colRefPair.getSecond().getQualifier())); + } + // set the values of the columns + for (Pair colRefPair : colRefPairs) { + ColumnReference indexColRef = colRefPair.getFirst(); + ColumnReference dataColRef = colRefPair.getSecond(); + byte[] valueBytes = null; + Expression expression = new SingleCellColumnExpression(new PDatum() { + @Override + public boolean isNullable() { + return false; + } - @Override - public PDataType getDataType() { - return dataType; - } + @Override + public SortOrder getSortOrder() { + return null; + } - @Override - public Integer getMaxLength() { - return maxLength; - } + @Override + public Integer getScale() { + return null; + } - @Override - public Integer getScale() { - return scale; - } + @Override + public Integer getMaxLength() { + return null; + } - @Override - public SortOrder getSortOrder() { - return sortOrder; - } - - }, true, sortOrder); + @Override + public PDataType getDataType() { + return null; + } + }, dataColRef.getFamily(), dataColRef.getQualifier(), srcEncodingScheme, + srcImmutableStorageScheme); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); + valueBytes = ptr.copyBytesIfNecessary(); + + if (valueBytes != null) { + ImmutableBytesPtr cq = indexColRef.getQualifierWritable(); + ImmutableBytesPtr cf = indexColRef.getFamilyWritable(); + if (put == null) { + put = new Put(destRowKey); + put.setDurability(!destWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); + } + put.add( + kvBuilder.buildPut(rowKey, cf, cq, ts, new ImmutableBytesWritable(valueBytes))); + } + } } - return builder.build(); + } } - - private int getNumViewConstants() { - if (isIndexOnBaseTable()) { - return 0; - } - BitSet bitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); - int num = 0; - for(int i = 0; i pendingUpdates) { + return getDeleteTypeOrNull(pendingUpdates, this.nDataCFs); + } + + private DeleteType getDeleteTypeOrNull(Collection pendingUpdates, int nCFs) { + int nDeleteCF = 0; + int nDeleteVersionCF = 0; + for (Cell kv : pendingUpdates) { + if (kv.getType() == Cell.Type.DeleteFamilyVersion) { + nDeleteVersionCF++; + } else if ( + kv.getType() == Cell.Type.DeleteFamily + // Since we don't include the index rows in the change set for txn tables, we need to + // detect row deletes that have transformed by TransactionProcessor + || TransactionUtil.isDeleteFamily(kv) + ) { + nDeleteCF++; + } + } + // This is what a delete looks like on the server side for mutable indexing... + // Should all be one or the other for DeleteFamily versus DeleteFamilyVersion, but just in case + // not + DeleteType deleteType = null; + if (nDeleteVersionCF > 0 && nDeleteVersionCF >= nCFs) { + deleteType = DeleteType.SINGLE_VERSION; + } else { + int nDelete = nDeleteCF + nDeleteVersionCF; + if (nDelete > 0 && nDelete >= nCFs) { + deleteType = DeleteType.ALL_VERSIONS; + } } + return deleteType; + } - private RowKeySchema getIndexRowKeySchema() { - if (indexRowKeySchema != null) { - return indexRowKeySchema; - } - synchronized (this) { - if (indexRowKeySchema == null) { - indexRowKeySchema = generateIndexRowKeySchema(); - } - } - return indexRowKeySchema; - } - - public Put buildUpdateMutation(KeyValueBuilder kvBuilder, ValueGetter valueGetter, ImmutableBytesWritable dataRowKeyPtr, long ts, byte[] regionStartKey, byte[] regionEndKey, boolean verified) throws IOException { - byte[] indexRowKey = this.buildRowKey(valueGetter, dataRowKeyPtr, regionStartKey, regionEndKey, ts); - return buildUpdateMutation(kvBuilder, valueGetter, dataRowKeyPtr, ts, regionStartKey, regionEndKey, - indexRowKey, this.getEmptyKeyValueFamily(), coveredColumnsMap, - indexEmptyKeyValueRef, indexWALDisabled, dataImmutableStorageScheme, immutableStorageScheme, encodingScheme, dataEncodingScheme, verified); - } - - public static Put buildUpdateMutation(KeyValueBuilder kvBuilder, ValueGetter valueGetter, ImmutableBytesWritable dataRowKeyPtr, long ts, - byte[] regionStartKey, byte[] regionEndKey, byte[] destRowKey, ImmutableBytesPtr emptyKeyValueCFPtr, - Map coveredColumnsMap, - ColumnReference destEmptyKeyValueRef, boolean destWALDisabled, - ImmutableStorageScheme srcImmutableStorageScheme, ImmutableStorageScheme destImmutableStorageScheme, - QualifierEncodingScheme destEncodingScheme, QualifierEncodingScheme srcEncodingScheme, boolean verified) throws IOException { - Set coveredColumns = coveredColumnsMap.keySet(); - Put put = null; - // New row being inserted: add the empty key value - ImmutableBytesWritable latestValue = null; - if (valueGetter==null || - coveredColumns.isEmpty() || - (latestValue = valueGetter.getLatestValue(destEmptyKeyValueRef, ts)) == null || - latestValue == ValueGetter.HIDDEN_BY_DELETE) { - // We need to track whether or not our empty key value is hidden by a Delete Family marker at the same timestamp. - // If it is, these Puts will be masked so should not be emitted. - if (latestValue == ValueGetter.HIDDEN_BY_DELETE) { - return null; - } - put = new Put(destRowKey); - // add the keyvalue for the empty row - put.add(kvBuilder.buildPut(new ImmutableBytesPtr(destRowKey), - emptyKeyValueCFPtr, destEmptyKeyValueRef.getQualifierWritable(), ts, - verified ? QueryConstants.VERIFIED_BYTES_PTR : QueryConstants.EMPTY_COLUMN_VALUE_BYTES_PTR)); - put.setDurability(!destWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); - } + public boolean isRowDeleted(Collection pendingUpdates) { + return getDeleteTypeOrNull(pendingUpdates) != null; + } - ImmutableBytesPtr rowKey = new ImmutableBytesPtr(destRowKey); - if (destImmutableStorageScheme != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - // map from index column family to list of pair of index column and data column (for covered columns) - Map>> familyToColListMap = Maps.newHashMap(); - for (ColumnReference ref : coveredColumns) { - ColumnReference indexColRef = coveredColumnsMap.get(ref); - ImmutableBytesPtr cf = new ImmutableBytesPtr(indexColRef.getFamily()); - if (!familyToColListMap.containsKey(cf)) { - familyToColListMap.put(cf, Lists.>newArrayList()); - } - familyToColListMap.get(cf).add(Pair.newPair(indexColRef, ref)); - } - // iterate over each column family and create a byte[] containing all the columns - for (Entry>> entry : familyToColListMap.entrySet()) { - byte[] columnFamily = entry.getKey().copyBytesIfNecessary(); - List> colRefPairs = entry.getValue(); - int maxEncodedColumnQualifier = Integer.MIN_VALUE; - // find the max col qualifier - for (Pair colRefPair : colRefPairs) { - maxEncodedColumnQualifier = Math.max(maxEncodedColumnQualifier, destEncodingScheme.decode(colRefPair.getFirst().getQualifier())); - } - Expression[] colValues = EncodedColumnsUtil.createColumnExpressionArray(maxEncodedColumnQualifier); - // set the values of the columns - for (Pair colRefPair : colRefPairs) { - ColumnReference indexColRef = colRefPair.getFirst(); - ColumnReference dataColRef = colRefPair.getSecond(); - byte[] value = null; - if (srcImmutableStorageScheme == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { - Expression expression = new SingleCellColumnExpression(new PDatum() { - @Override public boolean isNullable() { - return false; - } - - @Override public SortOrder getSortOrder() { - return null; - } - - @Override public Integer getScale() { - return null; - } - - @Override public Integer getMaxLength() { - return null; - } - - @Override public PDataType getDataType() { - return null; - } - }, dataColRef.getFamily(), dataColRef.getQualifier(), destEncodingScheme, - destImmutableStorageScheme); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); - value = ptr.copyBytesIfNecessary(); - } else { - // Data table is ONE_CELL_PER_COLUMN. Get the col value. - ImmutableBytesWritable dataValue = valueGetter.getLatestValue(dataColRef, ts); - if (dataValue != null && dataValue != ValueGetter.HIDDEN_BY_DELETE) { - value = dataValue.copyBytes(); - } - } - if (value != null) { - int indexArrayPos = destEncodingScheme.decode(indexColRef.getQualifier())-QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE+1; - colValues[indexArrayPos] = new LiteralExpression(value); - } - } - - List children = Arrays.asList(colValues); - // we use SingleCellConstructorExpression to serialize multiple columns into a single byte[] - SingleCellConstructorExpression singleCellConstructorExpression = new SingleCellConstructorExpression(destImmutableStorageScheme, children); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - singleCellConstructorExpression.evaluate(new BaseTuple() {}, ptr); - if (put == null) { - put = new Put(destRowKey); - put.setDurability(!destWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); - } - ImmutableBytesPtr colFamilyPtr = new ImmutableBytesPtr(columnFamily); - //this is a little bit of extra work for installations that are running <0.94.14, but that should be rare and is a short-term set of wrappers - it shouldn't kill GC - put.add(kvBuilder.buildPut(rowKey, colFamilyPtr, QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES_PTR, ts, ptr)); - } - } else { - if (srcImmutableStorageScheme == destImmutableStorageScheme) { //both ONE_CELL - for (ColumnReference ref : coveredColumns) { - ColumnReference indexColRef = coveredColumnsMap.get(ref); - ImmutableBytesPtr cq = indexColRef.getQualifierWritable(); - ImmutableBytesPtr cf = indexColRef.getFamilyWritable(); - ImmutableBytesWritable value = valueGetter.getLatestValue(ref, ts); - if (value != null && value != ValueGetter.HIDDEN_BY_DELETE) { - if (put == null) { - put = new Put(destRowKey); - put.setDurability(!destWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); - } - put.add(kvBuilder.buildPut(rowKey, cf, cq, ts, value)); - } - } - } else { - // Src is SINGLE_CELL, destination is ONE_CELL - Map>> familyToColListMap = Maps.newHashMap(); - for (ColumnReference ref : coveredColumns) { - ColumnReference indexColRef = coveredColumnsMap.get(ref); - ImmutableBytesPtr cf = new ImmutableBytesPtr(indexColRef.getFamily()); - if (!familyToColListMap.containsKey(cf)) { - familyToColListMap.put(cf, Lists.>newArrayList()); - } - familyToColListMap.get(cf).add(Pair.newPair(indexColRef, ref)); - } - // iterate over each column family and create a byte[] containing all the columns - for (Entry>> entry : familyToColListMap.entrySet()) { - byte[] columnFamily = entry.getKey().copyBytesIfNecessary(); - List> colRefPairs = entry.getValue(); - int maxEncodedColumnQualifier = Integer.MIN_VALUE; - // find the max col qualifier - for (Pair colRefPair : colRefPairs) { - maxEncodedColumnQualifier = Math.max(maxEncodedColumnQualifier, srcEncodingScheme.decode(colRefPair.getSecond().getQualifier())); - } - // set the values of the columns - for (Pair colRefPair : colRefPairs) { - ColumnReference indexColRef = colRefPair.getFirst(); - ColumnReference dataColRef = colRefPair.getSecond(); - byte[] valueBytes = null; - Expression expression = new SingleCellColumnExpression(new PDatum() { - @Override public boolean isNullable() { - return false; - } - - @Override public SortOrder getSortOrder() { - return null; - } - - @Override public Integer getScale() { - return null; - } - - @Override public Integer getMaxLength() { - return null; - } - - @Override public PDataType getDataType() { - return null; - } - }, dataColRef.getFamily(), dataColRef.getQualifier(), srcEncodingScheme, - srcImmutableStorageScheme); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr); - valueBytes = ptr.copyBytesIfNecessary(); - - if (valueBytes != null) { - ImmutableBytesPtr cq = indexColRef.getQualifierWritable(); - ImmutableBytesPtr cf = indexColRef.getFamilyWritable(); - if (put == null) { - put = new Put(destRowKey); - put.setDurability(!destWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); - } - put.add(kvBuilder.buildPut(rowKey, cf, cq, ts, new ImmutableBytesWritable(valueBytes))); - } - } - } - } - } - return put; + public boolean isRowDeleted(Mutation m) { + if (m.getFamilyCellMap().size() < this.nDataCFs) { + return false; } - - public enum DeleteType {SINGLE_VERSION, ALL_VERSIONS}; - private DeleteType getDeleteTypeOrNull(Collection pendingUpdates) { - return getDeleteTypeOrNull(pendingUpdates, this.nDataCFs); + for (List cells : m.getFamilyCellMap().values()) { + if (getDeleteTypeOrNull(cells, 1) == null) { // Checking CFs one by one + return false; + } } - - private DeleteType getDeleteTypeOrNull(Collection pendingUpdates, int nCFs) { - int nDeleteCF = 0; - int nDeleteVersionCF = 0; - for (Cell kv : pendingUpdates) { - if (kv.getType() == Cell.Type.DeleteFamilyVersion) { - nDeleteVersionCF++; - } - else if (kv.getType() == Cell.Type.DeleteFamily - // Since we don't include the index rows in the change set for txn tables, we need to detect row deletes that have transformed by TransactionProcessor - || TransactionUtil.isDeleteFamily(kv)) { - nDeleteCF++; - } - } - // This is what a delete looks like on the server side for mutable indexing... - // Should all be one or the other for DeleteFamily versus DeleteFamilyVersion, but just in case not - DeleteType deleteType = null; - if (nDeleteVersionCF > 0 && nDeleteVersionCF >= nCFs) { - deleteType = DeleteType.SINGLE_VERSION; - } else { - int nDelete = nDeleteCF + nDeleteVersionCF; - if (nDelete>0 && nDelete >= nCFs) { - deleteType = DeleteType.ALL_VERSIONS; - } - } - return deleteType; - } - - public boolean isRowDeleted(Collection pendingUpdates) { - return getDeleteTypeOrNull(pendingUpdates) != null; - } - - public boolean isRowDeleted(Mutation m) { - if (m.getFamilyCellMap().size() < this.nDataCFs) { - return false; + return true; + } + + private boolean hasIndexedColumnChanged(ValueGetter oldState, + Collection pendingUpdates, long ts) throws IOException { + if (pendingUpdates.isEmpty()) { + return false; + } + Map newState = Maps.newHashMapWithExpectedSize(pendingUpdates.size()); + for (Cell kv : pendingUpdates) { + newState.put(new ColumnReference(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv)), kv); + } + for (ColumnReference ref : indexedColumns) { + Cell newValue = newState.get(ref); + if (newValue != null) { // Indexed column has potentially changed + ImmutableBytesWritable oldValue = oldState.getLatestValue(ref, ts); + boolean newValueSetAsNull = + (newValue.getType() == Cell.Type.DeleteColumn || newValue.getType() == Cell.Type.Delete + || CellUtil.matchingValue(newValue, HConstants.EMPTY_BYTE_ARRAY)); + boolean oldValueSetAsNull = oldValue == null || oldValue.getLength() == 0; + // If the new column value has to be set as null and the older value is null too, + // then just skip to the next indexed column. + if (newValueSetAsNull && oldValueSetAsNull) { + continue; + } + if (oldValueSetAsNull || newValueSetAsNull) { + return true; + } + // If the old value is different than the new value, the index row needs to be deleted + if ( + Bytes.compareTo(oldValue.get(), oldValue.getOffset(), oldValue.getLength(), + newValue.getValueArray(), newValue.getValueOffset(), newValue.getValueLength()) != 0 + ) { + return true; } - for (List cells : m.getFamilyCellMap().values()) { - if (getDeleteTypeOrNull(cells, 1) == null) { // Checking CFs one by one - return false; - } - } - return true; + } } + return false; + } - private boolean hasIndexedColumnChanged(ValueGetter oldState, Collection pendingUpdates, long ts) throws IOException { - if (pendingUpdates.isEmpty()) { - return false; - } - Map newState = Maps.newHashMapWithExpectedSize(pendingUpdates.size()); - for (Cell kv : pendingUpdates) { - newState.put(new ColumnReference(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv)), kv); - } - for (ColumnReference ref : indexedColumns) { - Cell newValue = newState.get(ref); - if (newValue != null) { // Indexed column has potentially changed - ImmutableBytesWritable oldValue = oldState.getLatestValue(ref, ts); - boolean newValueSetAsNull = (newValue.getType() == Cell.Type.DeleteColumn || - newValue.getType() == Cell.Type.Delete || - CellUtil.matchingValue(newValue, HConstants.EMPTY_BYTE_ARRAY)); - boolean oldValueSetAsNull = oldValue == null || oldValue.getLength() == 0; - //If the new column value has to be set as null and the older value is null too, - //then just skip to the next indexed column. - if (newValueSetAsNull && oldValueSetAsNull) { - continue; - } - if (oldValueSetAsNull || newValueSetAsNull) { - return true; - } - // If the old value is different than the new value, the index row needs to be deleted - if (Bytes.compareTo(oldValue.get(), oldValue.getOffset(), oldValue.getLength(), - newValue.getValueArray(), newValue.getValueOffset(), newValue.getValueLength()) != 0) { - return true; - } - } + public Delete buildRowDeleteMutation(byte[] indexRowKey, DeleteType deleteType, long ts) { + byte[] emptyCF = emptyKeyValueCFPtr.copyBytesIfNecessary(); + Delete delete = new Delete(indexRowKey); + + for (ColumnReference ref : getCoveredColumns()) { + ColumnReference indexColumn = coveredColumnsMap.get(ref); + // If table delete was single version, then index delete should be as well + if (deleteType == DeleteType.SINGLE_VERSION) { + delete.addFamilyVersion(indexColumn.getFamily(), ts); + } else { + delete.addFamily(indexColumn.getFamily(), ts); + } + } + if (deleteType == DeleteType.SINGLE_VERSION) { + delete.addFamilyVersion(emptyCF, ts); + } else { + delete.addFamily(emptyCF, ts); + } + delete.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); + return delete; + } + + /** + * Used for immutable indexes that only index PK column values. In that case, we can handle a data + * row deletion, since we can build the corresponding index row key. + */ + public Delete buildDeleteMutation(KeyValueBuilder kvBuilder, ImmutableBytesWritable dataRowKeyPtr, + long ts) throws IOException { + return buildDeleteMutation(kvBuilder, null, dataRowKeyPtr, Collections. emptyList(), ts, + null, null); + } + + public Delete buildDeleteMutation(KeyValueBuilder kvBuilder, ValueGetter oldState, + ImmutableBytesWritable dataRowKeyPtr, Collection pendingUpdates, long ts, + byte[] regionStartKey, byte[] regionEndKey) throws IOException { + byte[] indexRowKey = + this.buildRowKey(oldState, dataRowKeyPtr, regionStartKey, regionEndKey, ts); + // Delete the entire row if any of the indexed columns changed + DeleteType deleteType = null; + if ( + oldState == null || (deleteType = getDeleteTypeOrNull(pendingUpdates)) != null + || hasIndexedColumnChanged(oldState, pendingUpdates, ts) + ) { // Deleting the entire row + return buildRowDeleteMutation(indexRowKey, deleteType, ts); + } + Delete delete = null; + Set dataTableColRefs = coveredColumnsMap.keySet(); + // Delete columns for missing key values + for (Cell kv : pendingUpdates) { + if (kv.getType() != Cell.Type.Put) { + ColumnReference ref = + new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), + kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); + if (dataTableColRefs.contains(ref)) { + if (delete == null) { + delete = new Delete(indexRowKey); + delete.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); + } + ColumnReference indexColumn = coveredColumnsMap.get(ref); + // If point delete for data table, then use point delete for index as well + if (kv.getType() == Cell.Type.Delete) { + delete.addColumn(indexColumn.getFamily(), indexColumn.getQualifier(), ts); + } else { + delete.addColumns(indexColumn.getFamily(), indexColumn.getQualifier(), ts); + } } - return false; + } } + return delete; + } - public Delete buildRowDeleteMutation(byte[] indexRowKey, DeleteType deleteType, long ts) { - byte[] emptyCF = emptyKeyValueCFPtr.copyBytesIfNecessary(); - Delete delete = new Delete(indexRowKey); + public byte[] getIndexTableName() { + return indexTableName; + } - for (ColumnReference ref : getCoveredColumns()) { - ColumnReference indexColumn = coveredColumnsMap.get(ref); - // If table delete was single version, then index delete should be as well - if (deleteType == DeleteType.SINGLE_VERSION) { - delete.addFamilyVersion(indexColumn.getFamily(), ts); - } else { - delete.addFamily(indexColumn.getFamily(), ts); - } - } - if (deleteType == DeleteType.SINGLE_VERSION) { - delete.addFamilyVersion(emptyCF, ts); - } else { - delete.addFamily(emptyCF, ts); - } - delete.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); - return delete; - } - - /** - * Used for immutable indexes that only index PK column values. In that case, we can handle a data row deletion, - * since we can build the corresponding index row key. - */ - public Delete buildDeleteMutation(KeyValueBuilder kvBuilder, ImmutableBytesWritable dataRowKeyPtr, long ts) throws IOException { - return buildDeleteMutation(kvBuilder, null, dataRowKeyPtr, Collections.emptyList(), ts, null, null); - } - - public Delete buildDeleteMutation(KeyValueBuilder kvBuilder, ValueGetter oldState, ImmutableBytesWritable dataRowKeyPtr, Collection pendingUpdates, long ts, byte[] regionStartKey, byte[] regionEndKey) throws IOException { - byte[] indexRowKey = this.buildRowKey(oldState, dataRowKeyPtr, regionStartKey, regionEndKey, ts); - // Delete the entire row if any of the indexed columns changed - DeleteType deleteType = null; - if (oldState == null || (deleteType=getDeleteTypeOrNull(pendingUpdates)) != null || hasIndexedColumnChanged(oldState, pendingUpdates, ts)) { // Deleting the entire row - return buildRowDeleteMutation(indexRowKey, deleteType, ts); + public Set getCoveredColumns() { + return coveredColumnsMap.keySet(); + } + + public Set getAllColumns() { + return allColumns; + } + + private void addColumnRefForScan(Set from, Set to) { + for (ColumnReference colRef : from) { + if (getDataImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { + to.add(colRef); + } else { + to.add(new ColumnReference(colRef.getFamily(), + QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES)); + } + } + } + + public Set getAllColumnsForDataTable() { + Set result = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size() + + coveredColumnsMap.size() + (indexWhereColumns == null ? 0 : indexWhereColumns.size())); + addColumnRefForScan(indexedColumns, result); + addColumnRefForScan(coveredColumnsMap.keySet(), result); + if (indexWhereColumns != null) { + addColumnRefForScan(indexWhereColumns, result); + } + return result; + } + + public ImmutableBytesPtr getEmptyKeyValueFamily() { + // Since the metadata of an index table will never change, + // we can infer this based on the family of the first covered column + // If if there are no covered columns, we know it's our default name + return emptyKeyValueCFPtr; + } + + /** + * The logical index name. For global indexes on base tables this will be the same as the physical + * index table name (unless namespaces are enabled, then . gets replaced with : for the physical + * table name). For view indexes, the logical and physical names will be different because all + * view indexes of a base table are stored in the same physical table + * @return The logical index name + */ + public String getLogicalIndexName() { + return logicalIndexName; + } + + @Deprecated // Only called by code older than our 4.10 release + @Override + public void readFields(DataInput input) throws IOException { + int encodedIndexSaltBucketsAndMultiTenant = WritableUtils.readVInt(input); + isMultiTenant = encodedIndexSaltBucketsAndMultiTenant < 0; + nIndexSaltBuckets = Math.abs(encodedIndexSaltBucketsAndMultiTenant) - 1; + int encodedIndexedColumnsAndViewId = WritableUtils.readVInt(input); + boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0; + if (hasViewIndexId) { + // Fixed length + // Use legacy viewIndexIdType for clients older than 4.10 release + viewIndexId = new byte[MetaDataUtil.getLegacyViewIndexIdDataType().getByteSize()]; + viewIndexIdType = MetaDataUtil.getLegacyViewIndexIdDataType(); + input.readFully(viewIndexId); + } + int nIndexedColumns = Math.abs(encodedIndexedColumnsAndViewId) - 1; + indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns); + for (int i = 0; i < nIndexedColumns; i++) { + byte[] cf = Bytes.readByteArray(input); + byte[] cq = Bytes.readByteArray(input); + indexedColumns.add(new ColumnReference(cf, cq)); + } + indexedColumnTypes = Lists.newArrayListWithExpectedSize(nIndexedColumns); + for (int i = 0; i < nIndexedColumns; i++) { + PDataType type = PDataType.values()[WritableUtils.readVInt(input)]; + indexedColumnTypes.add(type); + } + int encodedCoveredolumnsAndLocalIndex = WritableUtils.readVInt(input); + isLocalIndex = encodedCoveredolumnsAndLocalIndex < 0; + int nCoveredColumns = Math.abs(encodedCoveredolumnsAndLocalIndex) - 1; + coveredColumnsMap = Maps.newHashMapWithExpectedSize(nCoveredColumns); + for (int i = 0; i < nCoveredColumns; i++) { + byte[] dataTableCf = Bytes.readByteArray(input); + byte[] dataTableCq = Bytes.readByteArray(input); + ColumnReference dataTableRef = new ColumnReference(dataTableCf, dataTableCq); + byte[] indexTableCf = + isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableCf) : dataTableCf; + byte[] indexTableCq = IndexUtil.getIndexColumnName(dataTableCf, dataTableCq); + ColumnReference indexTableRef = new ColumnReference(indexTableCf, indexTableCq); + coveredColumnsMap.put(dataTableRef, indexTableRef); + } + // Hack to serialize whether the index row key is optimizable + int len = WritableUtils.readVInt(input); + if (len < 0) { + rowKeyOrderOptimizable = false; + len *= -1; + } else { + rowKeyOrderOptimizable = true; + } + indexTableName = new byte[len]; + input.readFully(indexTableName, 0, len); + dataEmptyKeyValueCF = Bytes.readByteArray(input); + len = WritableUtils.readVInt(input); + // TODO remove this in the next major release + boolean isNewClient = false; + if (len < 0) { + isNewClient = true; + len = Math.abs(len); + } + byte[] emptyKeyValueCF = new byte[len]; + input.readFully(emptyKeyValueCF, 0, len); + emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueCF); + + if (isNewClient) { + int numIndexedExpressions = WritableUtils.readVInt(input); + indexedExpressions = Lists.newArrayListWithExpectedSize(numIndexedExpressions); + for (int i = 0; i < numIndexedExpressions; i++) { + Expression expression = + ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); + expression.readFields(input); + indexedExpressions.add(expression); + } + } else { + indexedExpressions = Lists.newArrayListWithExpectedSize(indexedColumns.size()); + Iterator colReferenceIter = indexedColumns.iterator(); + Iterator dataTypeIter = indexedColumnTypes.iterator(); + while (colReferenceIter.hasNext()) { + ColumnReference colRef = colReferenceIter.next(); + final PDataType dataType = dataTypeIter.next(); + indexedExpressions.add(new KeyValueColumnExpression(new PDatum() { + + @Override + public boolean isNullable() { + return true; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public PDataType getDataType() { + return dataType; + } + }, colRef.getFamily(), colRef.getQualifier())); + } + } + + rowKeyMetaData = newRowKeyMetaData(); + rowKeyMetaData.readFields(input); + int nDataCFs = WritableUtils.readVInt(input); + // Encode indexWALDisabled in nDataCFs + indexWALDisabled = nDataCFs < 0; + this.nDataCFs = Math.abs(nDataCFs) - 1; + int encodedEstimatedIndexRowKeyBytesAndImmutableRows = WritableUtils.readVInt(input); + this.immutableRows = encodedEstimatedIndexRowKeyBytesAndImmutableRows < 0; + this.estimatedIndexRowKeyBytes = Math.abs(encodedEstimatedIndexRowKeyBytesAndImmutableRows); + // Needed for backward compatibility. Clients older than 4.10 will have non-encoded tables. + this.immutableStorageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN; + this.encodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; + this.dataImmutableStorageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN; + this.dataEncodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; + initCachedState(); + } + + public static IndexMaintainer fromProto(ServerCachingProtos.IndexMaintainer proto, + RowKeySchema dataTableRowKeySchema, boolean isDataTableSalted) throws IOException { + IndexMaintainer maintainer = new IndexMaintainer(dataTableRowKeySchema, isDataTableSalted); + maintainer.nIndexSaltBuckets = proto.getSaltBuckets(); + maintainer.isMultiTenant = proto.getIsMultiTenant(); + maintainer.viewIndexId = proto.hasViewIndexId() ? proto.getViewIndexId().toByteArray() : null; + maintainer.viewIndexIdType = proto.hasViewIndexIdType() + ? PDataType.fromTypeId(proto.getViewIndexIdType()) + : MetaDataUtil.getLegacyViewIndexIdDataType(); + List indexedColumnsList = proto.getIndexedColumnsList(); + maintainer.indexedColumns = new HashSet(indexedColumnsList.size()); + for (ServerCachingProtos.ColumnReference colRefFromProto : indexedColumnsList) { + maintainer.indexedColumns.add(new ColumnReference(colRefFromProto.getFamily().toByteArray(), + colRefFromProto.getQualifier().toByteArray())); + } + List indexedColumnTypes = proto.getIndexedColumnTypeOrdinalList(); + maintainer.indexedColumnTypes = new ArrayList(indexedColumnTypes.size()); + for (Integer typeOrdinal : indexedColumnTypes) { + maintainer.indexedColumnTypes.add(PDataType.values()[typeOrdinal]); + } + maintainer.indexTableName = proto.getIndexTableName().toByteArray(); + maintainer.indexDataColumnCount = dataTableRowKeySchema.getFieldCount(); + if (proto.getIndexDataColumnCount() != -1) { + maintainer.indexDataColumnCount = proto.getIndexDataColumnCount(); + } + maintainer.rowKeyOrderOptimizable = proto.getRowKeyOrderOptimizable(); + maintainer.dataEmptyKeyValueCF = proto.getDataTableEmptyKeyValueColFamily().toByteArray(); + ServerCachingProtos.ImmutableBytesWritable emptyKeyValueColFamily = + proto.getEmptyKeyValueColFamily(); + maintainer.emptyKeyValueCFPtr = + new ImmutableBytesPtr(emptyKeyValueColFamily.getByteArray().toByteArray(), + emptyKeyValueColFamily.getOffset(), emptyKeyValueColFamily.getLength()); + maintainer.indexedExpressions = new ArrayList<>(); + try (ByteArrayInputStream stream = + new ByteArrayInputStream(proto.getIndexedExpressions().toByteArray())) { + DataInput input = new DataInputStream(stream); + while (stream.available() > 0) { + int expressionOrdinal = WritableUtils.readVInt(input); + Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); + expression.readFields(input); + maintainer.indexedExpressions.add(expression); + } + } + maintainer.rowKeyMetaData = newRowKeyMetaData(maintainer, dataTableRowKeySchema, + maintainer.indexedExpressions.size(), isDataTableSalted, maintainer.isMultiTenant); + try (ByteArrayInputStream stream = + new ByteArrayInputStream(proto.getRowKeyMetadata().toByteArray())) { + DataInput input = new DataInputStream(stream); + maintainer.rowKeyMetaData.readFields(input); + } + maintainer.nDataCFs = proto.getNumDataTableColFamilies(); + maintainer.indexWALDisabled = proto.getIndexWalDisabled(); + maintainer.estimatedIndexRowKeyBytes = proto.getIndexRowKeyByteSize(); + maintainer.immutableRows = proto.getImmutable(); + List indexedColumnInfoList = proto.getIndexedColumnInfoList(); + maintainer.indexedColumnsInfo = Sets.newHashSet(); + for (ColumnInfo info : indexedColumnInfoList) { + maintainer.indexedColumnsInfo.add(new Pair<>(info.getFamilyName(), info.getColumnName())); + } + // proto doesn't support single byte so need an explicit cast here + maintainer.encodingScheme = + PTable.QualifierEncodingScheme.fromSerializedValue((byte) proto.getEncodingScheme()); + maintainer.immutableStorageScheme = + PTable.ImmutableStorageScheme.fromSerializedValue((byte) proto.getImmutableStorageScheme()); + maintainer.dataEncodingScheme = + PTable.QualifierEncodingScheme.fromSerializedValue((byte) proto.getDataEncodingScheme()); + maintainer.dataImmutableStorageScheme = PTable.ImmutableStorageScheme + .fromSerializedValue((byte) proto.getDataImmutableStorageScheme()); + maintainer.isLocalIndex = proto.getIsLocalIndex(); + if (proto.hasParentTableType()) { + maintainer.parentTableType = PTableType.fromValue(proto.getParentTableType()); + } + + List dataTableColRefsForCoveredColumnsList = + proto.getDataTableColRefForCoveredColumnsList(); + List indexTableColRefsForCoveredColumnsList = + proto.getIndexTableColRefForCoveredColumnsList(); + maintainer.coveredColumnsMap = + Maps.newHashMapWithExpectedSize(dataTableColRefsForCoveredColumnsList.size()); + boolean encodedColumnNames = maintainer.encodingScheme != NON_ENCODED_QUALIFIERS; + Iterator indexTableColRefItr = + indexTableColRefsForCoveredColumnsList.iterator(); + for (ServerCachingProtos.ColumnReference colRefFromProto : dataTableColRefsForCoveredColumnsList) { + ColumnReference dataTableColRef = new ColumnReference( + colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray()); + ColumnReference indexTableColRef; + if (encodedColumnNames) { + ServerCachingProtos.ColumnReference fromProto = indexTableColRefItr.next(); + indexTableColRef = new ColumnReference(fromProto.getFamily().toByteArray(), + fromProto.getQualifier().toByteArray()); + } else { + byte[] cq = + IndexUtil.getIndexColumnName(dataTableColRef.getFamily(), dataTableColRef.getQualifier()); + byte[] cf = maintainer.isLocalIndex + ? IndexUtil.getLocalIndexColumnFamily(dataTableColRef.getFamily()) + : dataTableColRef.getFamily(); + indexTableColRef = new ColumnReference(cf, cq); + } + maintainer.coveredColumnsMap.put(dataTableColRef, indexTableColRef); + } + maintainer.logicalIndexName = proto.getLogicalIndexName(); + if (proto.hasIsUncovered()) { + maintainer.isUncovered = proto.getIsUncovered(); + } else { + maintainer.isUncovered = false; + } + if (proto.hasIndexWhere()) { + try (ByteArrayInputStream stream = + new ByteArrayInputStream(proto.getIndexWhere().toByteArray())) { + DataInput input = new DataInputStream(stream); + int expressionOrdinal = WritableUtils.readVInt(input); + Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); + expression.readFields(input); + maintainer.indexWhere = expression; + List indexWhereColumnsList = + proto.getIndexWhereColumnsList(); + maintainer.indexWhereColumns = new HashSet<>(indexWhereColumnsList.size()); + for (ServerCachingProtos.ColumnReference colRefFromProto : indexWhereColumnsList) { + maintainer.indexWhereColumns + .add(new ColumnReference(colRefFromProto.getFamily().toByteArray(), + colRefFromProto.getQualifier().toByteArray())); } - Delete delete = null; - Set dataTableColRefs = coveredColumnsMap.keySet(); - // Delete columns for missing key values - for (Cell kv : pendingUpdates) { - if (kv.getType() != Cell.Type.Put) { - ColumnReference ref = - new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), - kv.getFamilyLength(), kv.getQualifierArray(), - kv.getQualifierOffset(), kv.getQualifierLength()); - if (dataTableColRefs.contains(ref)) { - if (delete == null) { - delete = new Delete(indexRowKey); - delete.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL); - } - ColumnReference indexColumn = coveredColumnsMap.get(ref); - // If point delete for data table, then use point delete for index as well - if (kv.getType() == Cell.Type.Delete) { - delete.addColumn(indexColumn.getFamily(), indexColumn.getQualifier(), ts); - } else { - delete.addColumns(indexColumn.getFamily(), indexColumn.getQualifier(), ts); - } - } - } + } + } else { + maintainer.indexWhere = null; + maintainer.indexWhereColumns = null; + } + if (proto.hasIsCDCIndex()) { + maintainer.isCDCIndex = proto.getIsCDCIndex(); + } else { + maintainer.isCDCIndex = false; + } + maintainer.nDataTableSaltBuckets = + proto.hasDataTableSaltBuckets() ? proto.getDataTableSaltBuckets() : -1; + maintainer.initCachedState(); + return maintainer; + } + + @Deprecated // Only called by code older than our 4.10 release + @Override + public void write(DataOutput output) throws IOException { + // Encode nIndexSaltBuckets and isMultiTenant together + WritableUtils.writeVInt(output, (nIndexSaltBuckets + 1) * (isMultiTenant ? -1 : 1)); + // Encode indexedColumns.size() and whether or not there's a viewIndexId + WritableUtils.writeVInt(output, (indexedColumns.size() + 1) * (viewIndexId != null ? -1 : 1)); + if (viewIndexId != null) { + output.write(viewIndexId); + } + for (ColumnReference ref : indexedColumns) { + Bytes.writeByteArray(output, ref.getFamily()); + Bytes.writeByteArray(output, ref.getQualifier()); + } + // TODO remove indexedColumnTypes in the next major release + for (int i = 0; i < indexedColumnTypes.size(); i++) { + PDataType type = indexedColumnTypes.get(i); + WritableUtils.writeVInt(output, type.ordinal()); + } + // Encode coveredColumns.size() and whether or not this is a local index + WritableUtils.writeVInt(output, (coveredColumnsMap.size() + 1) * (isLocalIndex ? -1 : 1)); + for (ColumnReference ref : coveredColumnsMap.keySet()) { + Bytes.writeByteArray(output, ref.getFamily()); + Bytes.writeByteArray(output, ref.getQualifier()); + } + // TODO: remove when rowKeyOrderOptimizable hack no longer needed + WritableUtils.writeVInt(output, indexTableName.length * (rowKeyOrderOptimizable ? 1 : -1)); + output.write(indexTableName, 0, indexTableName.length); + Bytes.writeByteArray(output, dataEmptyKeyValueCF); + // TODO in order to maintain b/w compatibility encode emptyKeyValueCFPtr.getLength() as a + // negative value (so we can distinguish between new and old clients) + // when indexedColumnTypes is removed, remove this + WritableUtils.writeVInt(output, -emptyKeyValueCFPtr.getLength()); + output.write(emptyKeyValueCFPtr.get(), emptyKeyValueCFPtr.getOffset(), + emptyKeyValueCFPtr.getLength()); + + WritableUtils.writeVInt(output, indexedExpressions.size()); + for (Expression expression : indexedExpressions) { + WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); + expression.write(output); + } + + rowKeyMetaData.write(output); + // Encode indexWALDisabled in nDataCFs + WritableUtils.writeVInt(output, (nDataCFs + 1) * (indexWALDisabled ? -1 : 1)); + // Encode estimatedIndexRowKeyBytes and immutableRows together. + WritableUtils.writeVInt(output, estimatedIndexRowKeyBytes * (immutableRows ? -1 : 1)); + } + + public static ServerCachingProtos.IndexMaintainer toProto(IndexMaintainer maintainer) + throws IOException { + ServerCachingProtos.IndexMaintainer.Builder builder = + ServerCachingProtos.IndexMaintainer.newBuilder(); + builder.setSaltBuckets(maintainer.nIndexSaltBuckets); + builder.setIsMultiTenant(maintainer.isMultiTenant); + if (maintainer.viewIndexId != null) { + builder.setViewIndexId(ByteStringer.wrap(maintainer.viewIndexId)); + builder.setViewIndexIdType(maintainer.viewIndexIdType.getSqlType()); + } + for (ColumnReference colRef : maintainer.indexedColumns) { + ServerCachingProtos.ColumnReference.Builder cRefBuilder = + ServerCachingProtos.ColumnReference.newBuilder(); + cRefBuilder.setFamily(ByteStringer.wrap(colRef.getFamily())); + cRefBuilder.setQualifier(ByteStringer.wrap(colRef.getQualifier())); + builder.addIndexedColumns(cRefBuilder.build()); + } + for (PDataType dataType : maintainer.indexedColumnTypes) { + builder.addIndexedColumnTypeOrdinal(dataType.ordinal()); + } + for (Entry e : maintainer.coveredColumnsMap.entrySet()) { + ServerCachingProtos.ColumnReference.Builder cRefBuilder = + ServerCachingProtos.ColumnReference.newBuilder(); + ColumnReference dataTableColRef = e.getKey(); + cRefBuilder.setFamily(ByteStringer.wrap(dataTableColRef.getFamily())); + cRefBuilder.setQualifier(ByteStringer.wrap(dataTableColRef.getQualifier())); + builder.addDataTableColRefForCoveredColumns(cRefBuilder.build()); + if (maintainer.encodingScheme != NON_ENCODED_QUALIFIERS) { + // We need to serialize the colRefs of index tables only in case of encoded column names. + ColumnReference indexTableColRef = e.getValue(); + cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); + cRefBuilder.setFamily(ByteStringer.wrap(indexTableColRef.getFamily())); + cRefBuilder.setQualifier(ByteStringer.wrap(indexTableColRef.getQualifier())); + builder.addIndexTableColRefForCoveredColumns(cRefBuilder.build()); + } + } + builder.setIsLocalIndex(maintainer.isLocalIndex); + if (maintainer.parentTableType != null) { + builder.setParentTableType(maintainer.parentTableType.toString()); + } + builder.setIndexDataColumnCount(maintainer.indexDataColumnCount); + builder.setIndexTableName(ByteStringer.wrap(maintainer.indexTableName)); + builder.setRowKeyOrderOptimizable(maintainer.rowKeyOrderOptimizable); + builder.setDataTableEmptyKeyValueColFamily(ByteStringer.wrap(maintainer.dataEmptyKeyValueCF)); + ServerCachingProtos.ImmutableBytesWritable.Builder ibwBuilder = + ServerCachingProtos.ImmutableBytesWritable.newBuilder(); + ibwBuilder.setByteArray(ByteStringer.wrap(maintainer.emptyKeyValueCFPtr.get())); + ibwBuilder.setLength(maintainer.emptyKeyValueCFPtr.getLength()); + ibwBuilder.setOffset(maintainer.emptyKeyValueCFPtr.getOffset()); + builder.setEmptyKeyValueColFamily(ibwBuilder.build()); + try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { + DataOutput output = new DataOutputStream(stream); + for (Expression expression : maintainer.indexedExpressions) { + WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); + expression.write(output); + } + builder.setIndexedExpressions(ByteStringer.wrap(stream.toByteArray())); + } + try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { + DataOutput output = new DataOutputStream(stream); + maintainer.rowKeyMetaData.write(output); + builder.setRowKeyMetadata(ByteStringer.wrap(stream.toByteArray())); + } + builder.setNumDataTableColFamilies(maintainer.nDataCFs); + builder.setIndexWalDisabled(maintainer.indexWALDisabled); + builder.setIndexRowKeyByteSize(maintainer.estimatedIndexRowKeyBytes); + builder.setImmutable(maintainer.immutableRows); + for (Pair p : maintainer.indexedColumnsInfo) { + ServerCachingProtos.ColumnInfo.Builder ciBuilder = + ServerCachingProtos.ColumnInfo.newBuilder(); + if (p.getFirst() != null) { + ciBuilder.setFamilyName(p.getFirst()); + } + ciBuilder.setColumnName(p.getSecond()); + builder.addIndexedColumnInfo(ciBuilder.build()); + } + builder.setEncodingScheme(maintainer.encodingScheme.getSerializedMetadataValue()); + builder + .setImmutableStorageScheme(maintainer.immutableStorageScheme.getSerializedMetadataValue()); + builder.setLogicalIndexName(maintainer.logicalIndexName); + builder.setDataEncodingScheme(maintainer.dataEncodingScheme.getSerializedMetadataValue()); + builder.setDataImmutableStorageScheme( + maintainer.dataImmutableStorageScheme.getSerializedMetadataValue()); + builder.setIsUncovered(maintainer.isUncovered); + if (maintainer.indexWhere != null) { + try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { + DataOutput output = new DataOutputStream(stream); + WritableUtils.writeVInt(output, ExpressionType.valueOf(maintainer.indexWhere).ordinal()); + maintainer.indexWhere.write(output); + builder.setIndexWhere(ByteStringer.wrap(stream.toByteArray())); + for (ColumnReference colRef : maintainer.indexWhereColumns) { + ServerCachingProtos.ColumnReference.Builder cRefBuilder = + ServerCachingProtos.ColumnReference.newBuilder(); + cRefBuilder.setFamily(ByteStringer.wrap(colRef.getFamily())); + cRefBuilder.setQualifier(ByteStringer.wrap(colRef.getQualifier())); + builder.addIndexWhereColumns(cRefBuilder.build()); } - return delete; + } } - - public byte[] getIndexTableName() { - return indexTableName; + builder.setIsCDCIndex(maintainer.isCDCIndex); + if (maintainer.isDataTableSalted) { + builder.setDataTableSaltBuckets(maintainer.nDataTableSaltBuckets); + } + return builder.build(); + } + + public int getEstimatedByteSize() { + int size = WritableUtils.getVIntSize(nIndexSaltBuckets); + size += WritableUtils.getVIntSize(estimatedIndexRowKeyBytes); + size += WritableUtils.getVIntSize(indexedColumns.size()); + size += viewIndexId == null ? 0 : viewIndexId.length; + for (ColumnReference ref : indexedColumns) { + size += WritableUtils.getVIntSize(ref.getFamily().length); + size += ref.getFamily().length; + size += WritableUtils.getVIntSize(ref.getQualifier().length); + size += ref.getQualifier().length; + } + for (int i = 0; i < indexedColumnTypes.size(); i++) { + PDataType type = indexedColumnTypes.get(i); + size += WritableUtils.getVIntSize(type.ordinal()); + } + Set dataTableColRefs = coveredColumnsMap.keySet(); + size += WritableUtils.getVIntSize(dataTableColRefs.size()); + for (ColumnReference ref : dataTableColRefs) { + size += WritableUtils.getVIntSize(ref.getFamilyWritable().getLength()); + size += ref.getFamily().length; + size += WritableUtils.getVIntSize(ref.getQualifierWritable().getLength()); + size += ref.getQualifier().length; + } + size += indexTableName.length + WritableUtils.getVIntSize(indexTableName.length); + size += rowKeyMetaData.getByteSize(); + size += dataEmptyKeyValueCF.length + WritableUtils.getVIntSize(dataEmptyKeyValueCF.length); + size += + emptyKeyValueCFPtr.getLength() + WritableUtils.getVIntSize(emptyKeyValueCFPtr.getLength()); + size += WritableUtils.getVIntSize(nDataCFs + 1); + size += WritableUtils.getVIntSize(indexedExpressions.size()); + for (Expression expression : indexedExpressions) { + size += WritableUtils.getVIntSize(ExpressionType.valueOf(expression).ordinal()); + } + size += estimatedExpressionSize; + return size; + } + + public Expression getIndexWhere() { + return indexWhere; + } + + public Set getIndexWhereColumns() { + return indexWhereColumns; + } + + private int estimateIndexRowKeyByteSize(int indexColByteSize) { + int estimatedIndexRowKeyBytes = indexColByteSize + dataRowKeySchema.getEstimatedValueLength() + + (nIndexSaltBuckets == 0 || isLocalIndex || this.isDataTableSalted + ? 0 + : SaltingUtil.NUM_SALTING_BYTES); + return estimatedIndexRowKeyBytes; + } + + /** + * Init calculated state reading/creating + */ + private void initCachedState() { + byte[] indexEmptyKvQualifier = + EncodedColumnsUtil.getEmptyKeyValueInfo(encodingScheme).getFirst(); + byte[] dataEmptyKvQualifier = + EncodedColumnsUtil.getEmptyKeyValueInfo(dataEncodingScheme).getFirst(); + indexEmptyKeyValueRef = new ColumnReference(dataEmptyKeyValueCF, indexEmptyKvQualifier); + dataEmptyKeyValueRef = new ColumnReference(dataEmptyKeyValueCF, dataEmptyKvQualifier); + this.allColumns = + Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size() + coveredColumnsMap.size()); + // columns that are required to evaluate all expressions in indexedExpressions (not including + // columns in data row key) + this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size()); + for (Expression expression : indexedExpressions) { + KeyValueExpressionVisitor visitor = new KeyValueExpressionVisitor() { + @Override + public Void visit(KeyValueColumnExpression expression) { + if ( + indexedColumns.add( + new ColumnReference(expression.getColumnFamily(), expression.getColumnQualifier())) + ) { + indexedColumnTypes.add(expression.getDataType()); + } + return null; + } + }; + expression.accept(visitor); + } + allColumns.addAll(indexedColumns); + for (ColumnReference colRef : coveredColumnsMap.keySet()) { + if (immutableStorageScheme == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { + allColumns.add(colRef); + } else { + allColumns.add(new ColumnReference(colRef.getFamily(), + QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES)); + } } - - public Set getCoveredColumns() { - return coveredColumnsMap.keySet(); + + int dataPkOffset = (isDataTableSalted ? 1 : 0) + (isMultiTenant ? 1 : 0); + int nIndexPkColumns = getIndexPkColumnCount(); + dataPkPosition = new int[nIndexPkColumns]; + Arrays.fill(dataPkPosition, EXPRESSION_NOT_PRESENT); + int numViewConstantColumns = 0; + BitSet viewConstantColumnBitSet = rowKeyMetaData.getViewConstantColumnBitSet(); + for (int i = dataPkOffset; i < indexDataColumnCount; i++) { + if (!viewConstantColumnBitSet.get(i) || isIndexOnBaseTable()) { + int indexPkPosition = rowKeyMetaData.getIndexPkPosition(i - dataPkOffset); + this.dataPkPosition[indexPkPosition] = i; + } else { + numViewConstantColumns++; + } } - public Set getAllColumns() { - return allColumns; + // Calculate the max number of trailing nulls that we should get rid of after building the index + // row key. + // We only get rid of nulls for variable length types, so we have to be careful to consider the + // type of the + // index table, not the data type of the data table + int expressionsPos = indexedExpressions.size(); + int indexPkPos = nIndexPkColumns - numViewConstantColumns - 1; + while (indexPkPos >= 0) { + int dataPkPos = dataPkPosition[indexPkPos]; + boolean isDataNullable; + PDataType dataType; + if (dataPkPos == EXPRESSION_NOT_PRESENT) { + isDataNullable = true; + dataType = indexedExpressions.get(--expressionsPos).getDataType(); + } else { + Field dataField = dataRowKeySchema.getField(dataPkPos); + dataType = dataField.getDataType(); + isDataNullable = dataField.isNullable(); + } + PDataType indexDataType = IndexUtil.getIndexColumnDataType(isDataNullable, dataType); + if (indexDataType.isFixedWidth()) { + break; + } + indexPkPos--; } + maxTrailingNulls = nIndexPkColumns - indexPkPos - 1; + } + private int getIndexPkColumnCount() { + return getIndexPkColumnCount(indexDataColumnCount, indexedExpressions.size(), isDataTableSalted, + isMultiTenant); + } - private void addColumnRefForScan(Set from, Set to) { - for (ColumnReference colRef : from) { - if (getDataImmutableStorageScheme()==ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - to.add(colRef); - } else { - to.add(new ColumnReference(colRef.getFamily(), - QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES)); - } - } + private static int getIndexPkColumnCount(int indexDataColumnCount, int numIndexExpressions, + boolean isDataTableSalted, boolean isMultiTenant) { + return indexDataColumnCount + numIndexExpressions - (isDataTableSalted ? 1 : 0) + - (isMultiTenant ? 1 : 0); + } + + private RowKeyMetaData newRowKeyMetaData() { + return getIndexPkColumnCount() < 0xFF + ? new ByteSizeRowKeyMetaData() + : new IntSizedRowKeyMetaData(); + } + + private static RowKeyMetaData newRowKeyMetaData(IndexMaintainer i, RowKeySchema rowKeySchema, + int numIndexExpressions, boolean isDataTableSalted, boolean isMultiTenant) { + int indexPkColumnCount = getIndexPkColumnCount(i.indexDataColumnCount, numIndexExpressions, + isDataTableSalted, isMultiTenant); + return indexPkColumnCount < 0xFF + ? i.new ByteSizeRowKeyMetaData() + : i.new IntSizedRowKeyMetaData(); + } + + private RowKeyMetaData newRowKeyMetaData(int capacity) { + return capacity < 0xFF + ? new ByteSizeRowKeyMetaData(capacity) + : new IntSizedRowKeyMetaData(capacity); + } + + private static void writeInverted(byte[] buf, int offset, int length, DataOutput output) + throws IOException { + for (int i = offset; i < offset + length; i++) { + byte b = SortOrder.invert(buf[i]); + output.write(b); } - public Set getAllColumnsForDataTable() { - Set result = Sets.newLinkedHashSetWithExpectedSize( - indexedExpressions.size() + coveredColumnsMap.size() - + (indexWhereColumns == null ? 0 : indexWhereColumns.size())); - addColumnRefForScan(indexedColumns, result); - addColumnRefForScan(coveredColumnsMap.keySet(), result); - if (indexWhereColumns != null) { - addColumnRefForScan(indexWhereColumns, result); - } - return result; + } + + private abstract class RowKeyMetaData implements Writable { + private BitSet descIndexColumnBitSet; + private BitSet viewConstantColumnBitSet; + + private RowKeyMetaData() { } - public ImmutableBytesPtr getEmptyKeyValueFamily() { - // Since the metadata of an index table will never change, - // we can infer this based on the family of the first covered column - // If if there are no covered columns, we know it's our default name - return emptyKeyValueCFPtr; + private RowKeyMetaData(int nIndexedColumns) { + descIndexColumnBitSet = BitSet.withCapacity(nIndexedColumns); + viewConstantColumnBitSet = BitSet.withCapacity(dataRowKeySchema.getMaxFields()); // Size based + // on number + // of data PK + // columns } - /** - * The logical index name. For global indexes on base tables this will be the same as the - * physical index table name (unless namespaces are enabled, then . gets replaced with : for - * the physical table name). For view indexes, the logical and physical names will be - * different because all view indexes of a base table are stored in the same physical table - * @return The logical index name - */ - public String getLogicalIndexName() { - return logicalIndexName; + protected int getByteSize() { + return BitSet.getByteSize(getIndexPkColumnCount()) * 3 + + BitSet.getByteSize(dataRowKeySchema.getMaxFields()); } - @Deprecated // Only called by code older than our 4.10 release + protected abstract int getIndexPkPosition(int dataPkPosition); + + protected abstract int setIndexPkPosition(int dataPkPosition, int indexPkPosition); + @Override public void readFields(DataInput input) throws IOException { - int encodedIndexSaltBucketsAndMultiTenant = WritableUtils.readVInt(input); - isMultiTenant = encodedIndexSaltBucketsAndMultiTenant < 0; - nIndexSaltBuckets = Math.abs(encodedIndexSaltBucketsAndMultiTenant) - 1; - int encodedIndexedColumnsAndViewId = WritableUtils.readVInt(input); - boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0; - if (hasViewIndexId) { - // Fixed length - //Use legacy viewIndexIdType for clients older than 4.10 release - viewIndexId = new byte[MetaDataUtil.getLegacyViewIndexIdDataType().getByteSize()]; - viewIndexIdType = MetaDataUtil.getLegacyViewIndexIdDataType(); - input.readFully(viewIndexId); - } - int nIndexedColumns = Math.abs(encodedIndexedColumnsAndViewId) - 1; - indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns); - for (int i = 0; i < nIndexedColumns; i++) { - byte[] cf = Bytes.readByteArray(input); - byte[] cq = Bytes.readByteArray(input); - indexedColumns.add(new ColumnReference(cf,cq)); - } - indexedColumnTypes = Lists.newArrayListWithExpectedSize(nIndexedColumns); - for (int i = 0; i < nIndexedColumns; i++) { - PDataType type = PDataType.values()[WritableUtils.readVInt(input)]; - indexedColumnTypes.add(type); - } - int encodedCoveredolumnsAndLocalIndex = WritableUtils.readVInt(input); - isLocalIndex = encodedCoveredolumnsAndLocalIndex < 0; - int nCoveredColumns = Math.abs(encodedCoveredolumnsAndLocalIndex) - 1; - coveredColumnsMap = Maps.newHashMapWithExpectedSize(nCoveredColumns); - for (int i = 0; i < nCoveredColumns; i++) { - byte[] dataTableCf = Bytes.readByteArray(input); - byte[] dataTableCq = Bytes.readByteArray(input); - ColumnReference dataTableRef = new ColumnReference(dataTableCf, dataTableCq); - byte[] indexTableCf = isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableCf) : dataTableCf; - byte[] indexTableCq = IndexUtil.getIndexColumnName(dataTableCf, dataTableCq); - ColumnReference indexTableRef = new ColumnReference(indexTableCf, indexTableCq); - coveredColumnsMap.put(dataTableRef, indexTableRef); - } - // Hack to serialize whether the index row key is optimizable - int len = WritableUtils.readVInt(input); - if (len < 0) { - rowKeyOrderOptimizable = false; - len *= -1; - } else { - rowKeyOrderOptimizable = true; - } - indexTableName = new byte[len]; - input.readFully(indexTableName, 0, len); - dataEmptyKeyValueCF = Bytes.readByteArray(input); - len = WritableUtils.readVInt(input); - //TODO remove this in the next major release - boolean isNewClient = false; - if (len < 0) { - isNewClient = true; - len=Math.abs(len); - } - byte [] emptyKeyValueCF = new byte[len]; - input.readFully(emptyKeyValueCF, 0, len); - emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueCF); - - if (isNewClient) { - int numIndexedExpressions = WritableUtils.readVInt(input); - indexedExpressions = Lists.newArrayListWithExpectedSize(numIndexedExpressions); - for (int i = 0; i < numIndexedExpressions; i++) { - Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); - expression.readFields(input); - indexedExpressions.add(expression); - } - } - else { - indexedExpressions = Lists.newArrayListWithExpectedSize(indexedColumns.size()); - Iterator colReferenceIter = indexedColumns.iterator(); - Iterator dataTypeIter = indexedColumnTypes.iterator(); - while (colReferenceIter.hasNext()) { - ColumnReference colRef = colReferenceIter.next(); - final PDataType dataType = dataTypeIter.next(); - indexedExpressions.add(new KeyValueColumnExpression(new PDatum() { - - @Override - public boolean isNullable() { - return true; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public Integer getMaxLength() { - return null; - } - - @Override - public PDataType getDataType() { - return dataType; - } - }, colRef.getFamily(), colRef.getQualifier())); - } - } - - rowKeyMetaData = newRowKeyMetaData(); - rowKeyMetaData.readFields(input); - int nDataCFs = WritableUtils.readVInt(input); - // Encode indexWALDisabled in nDataCFs - indexWALDisabled = nDataCFs < 0; - this.nDataCFs = Math.abs(nDataCFs) - 1; - int encodedEstimatedIndexRowKeyBytesAndImmutableRows = WritableUtils.readVInt(input); - this.immutableRows = encodedEstimatedIndexRowKeyBytesAndImmutableRows < 0; - this.estimatedIndexRowKeyBytes = Math.abs(encodedEstimatedIndexRowKeyBytesAndImmutableRows); - // Needed for backward compatibility. Clients older than 4.10 will have non-encoded tables. - this.immutableStorageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN; - this.encodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; - this.dataImmutableStorageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN; - this.dataEncodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; - initCachedState(); - } - - - public static IndexMaintainer fromProto(ServerCachingProtos.IndexMaintainer proto, RowKeySchema dataTableRowKeySchema, boolean isDataTableSalted) throws IOException { - IndexMaintainer maintainer = new IndexMaintainer(dataTableRowKeySchema, isDataTableSalted); - maintainer.nIndexSaltBuckets = proto.getSaltBuckets(); - maintainer.isMultiTenant = proto.getIsMultiTenant(); - maintainer.viewIndexId = proto.hasViewIndexId() ? proto.getViewIndexId().toByteArray() : null; - maintainer.viewIndexIdType = proto.hasViewIndexIdType() - ? PDataType.fromTypeId(proto.getViewIndexIdType()) - : MetaDataUtil.getLegacyViewIndexIdDataType(); - List indexedColumnsList = proto.getIndexedColumnsList(); - maintainer.indexedColumns = new HashSet(indexedColumnsList.size()); - for (ServerCachingProtos.ColumnReference colRefFromProto : indexedColumnsList) { - maintainer.indexedColumns.add(new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray())); - } - List indexedColumnTypes = proto.getIndexedColumnTypeOrdinalList(); - maintainer.indexedColumnTypes = new ArrayList(indexedColumnTypes.size()); - for (Integer typeOrdinal : indexedColumnTypes) { - maintainer.indexedColumnTypes.add(PDataType.values()[typeOrdinal]); - } - maintainer.indexTableName = proto.getIndexTableName().toByteArray(); - maintainer.indexDataColumnCount = dataTableRowKeySchema.getFieldCount(); - if (proto.getIndexDataColumnCount() != -1) { - maintainer.indexDataColumnCount = proto.getIndexDataColumnCount(); - } - maintainer.rowKeyOrderOptimizable = proto.getRowKeyOrderOptimizable(); - maintainer.dataEmptyKeyValueCF = proto.getDataTableEmptyKeyValueColFamily().toByteArray(); - ServerCachingProtos.ImmutableBytesWritable emptyKeyValueColFamily = proto.getEmptyKeyValueColFamily(); - maintainer.emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueColFamily.getByteArray().toByteArray(), emptyKeyValueColFamily.getOffset(), emptyKeyValueColFamily.getLength()); - maintainer.indexedExpressions = new ArrayList<>(); - try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getIndexedExpressions().toByteArray())) { - DataInput input = new DataInputStream(stream); - while (stream.available() > 0) { - int expressionOrdinal = WritableUtils.readVInt(input); - Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); - expression.readFields(input); - maintainer.indexedExpressions.add(expression); - } - } - maintainer.rowKeyMetaData = newRowKeyMetaData(maintainer, dataTableRowKeySchema, maintainer.indexedExpressions.size(), isDataTableSalted, maintainer.isMultiTenant); - try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getRowKeyMetadata().toByteArray())) { - DataInput input = new DataInputStream(stream); - maintainer.rowKeyMetaData.readFields(input); - } - maintainer.nDataCFs = proto.getNumDataTableColFamilies(); - maintainer.indexWALDisabled = proto.getIndexWalDisabled(); - maintainer.estimatedIndexRowKeyBytes = proto.getIndexRowKeyByteSize(); - maintainer.immutableRows = proto.getImmutable(); - List indexedColumnInfoList = proto.getIndexedColumnInfoList(); - maintainer.indexedColumnsInfo = Sets.newHashSet(); - for (ColumnInfo info : indexedColumnInfoList) { - maintainer.indexedColumnsInfo.add(new Pair<>(info.getFamilyName(), info.getColumnName())); - } - // proto doesn't support single byte so need an explicit cast here - maintainer.encodingScheme = PTable.QualifierEncodingScheme.fromSerializedValue((byte)proto.getEncodingScheme()); - maintainer.immutableStorageScheme = PTable.ImmutableStorageScheme.fromSerializedValue((byte)proto.getImmutableStorageScheme()); - maintainer.dataEncodingScheme = PTable.QualifierEncodingScheme.fromSerializedValue((byte)proto.getDataEncodingScheme()); - maintainer.dataImmutableStorageScheme = PTable.ImmutableStorageScheme.fromSerializedValue((byte)proto.getDataImmutableStorageScheme()); - maintainer.isLocalIndex = proto.getIsLocalIndex(); - if (proto.hasParentTableType()) { - maintainer.parentTableType = PTableType.fromValue(proto.getParentTableType()); - } - - List dataTableColRefsForCoveredColumnsList = proto.getDataTableColRefForCoveredColumnsList(); - List indexTableColRefsForCoveredColumnsList = proto.getIndexTableColRefForCoveredColumnsList(); - maintainer.coveredColumnsMap = Maps.newHashMapWithExpectedSize(dataTableColRefsForCoveredColumnsList.size()); - boolean encodedColumnNames = maintainer.encodingScheme != NON_ENCODED_QUALIFIERS; - Iterator indexTableColRefItr = indexTableColRefsForCoveredColumnsList.iterator(); - for (ServerCachingProtos.ColumnReference colRefFromProto : dataTableColRefsForCoveredColumnsList) { - ColumnReference dataTableColRef = new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier( ).toByteArray()); - ColumnReference indexTableColRef; - if (encodedColumnNames) { - ServerCachingProtos.ColumnReference fromProto = indexTableColRefItr.next(); - indexTableColRef = new ColumnReference(fromProto.getFamily().toByteArray(), fromProto.getQualifier( ).toByteArray()); - } else { - byte[] cq = IndexUtil.getIndexColumnName(dataTableColRef.getFamily(), dataTableColRef.getQualifier()); - byte[] cf = maintainer.isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableColRef.getFamily()) : dataTableColRef.getFamily(); - indexTableColRef = new ColumnReference(cf, cq); - } - maintainer.coveredColumnsMap.put(dataTableColRef, indexTableColRef); - } - maintainer.logicalIndexName = proto.getLogicalIndexName(); - if (proto.hasIsUncovered()) { - maintainer.isUncovered = proto.getIsUncovered(); - } else { - maintainer.isUncovered = false; - } - if (proto.hasIndexWhere()) { - try (ByteArrayInputStream stream = - new ByteArrayInputStream(proto.getIndexWhere().toByteArray())) { - DataInput input = new DataInputStream(stream); - int expressionOrdinal = WritableUtils.readVInt(input); - Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); - expression.readFields(input); - maintainer.indexWhere = expression; - List indexWhereColumnsList = - proto.getIndexWhereColumnsList(); - maintainer.indexWhereColumns = new HashSet<>(indexWhereColumnsList.size()); - for (ServerCachingProtos.ColumnReference colRefFromProto : indexWhereColumnsList) { - maintainer.indexWhereColumns.add(new ColumnReference( - colRefFromProto.getFamily().toByteArray(), - colRefFromProto.getQualifier().toByteArray())); - } - } - } else { - maintainer.indexWhere = null; - maintainer.indexWhereColumns = null; - } - if (proto.hasIsCDCIndex()) { - maintainer.isCDCIndex = proto.getIsCDCIndex(); - } else { - maintainer.isCDCIndex = false; - } - maintainer.nDataTableSaltBuckets = proto.hasDataTableSaltBuckets() ? - proto.getDataTableSaltBuckets() : -1; - maintainer.initCachedState(); - return maintainer; + int length = getIndexPkColumnCount(); + descIndexColumnBitSet = BitSet.read(input, length); + int vclength = dataRowKeySchema.getMaxFields(); + viewConstantColumnBitSet = BitSet.read(input, vclength); } - - @Deprecated // Only called by code older than our 4.10 release + @Override public void write(DataOutput output) throws IOException { - // Encode nIndexSaltBuckets and isMultiTenant together - WritableUtils.writeVInt(output, (nIndexSaltBuckets + 1) * (isMultiTenant ? -1 : 1)); - // Encode indexedColumns.size() and whether or not there's a viewIndexId - WritableUtils.writeVInt(output, (indexedColumns.size() + 1) * (viewIndexId != null ? -1 : 1)); - if (viewIndexId != null) { - output.write(viewIndexId); - } - for (ColumnReference ref : indexedColumns) { - Bytes.writeByteArray(output, ref.getFamily()); - Bytes.writeByteArray(output, ref.getQualifier()); - } - //TODO remove indexedColumnTypes in the next major release - for (int i = 0; i < indexedColumnTypes.size(); i++) { - PDataType type = indexedColumnTypes.get(i); - WritableUtils.writeVInt(output, type.ordinal()); - } - // Encode coveredColumns.size() and whether or not this is a local index - WritableUtils.writeVInt(output, (coveredColumnsMap.size() + 1) * (isLocalIndex ? -1 : 1)); - for (ColumnReference ref : coveredColumnsMap.keySet()) { - Bytes.writeByteArray(output, ref.getFamily()); - Bytes.writeByteArray(output, ref.getQualifier()); - } - // TODO: remove when rowKeyOrderOptimizable hack no longer needed - WritableUtils.writeVInt(output,indexTableName.length * (rowKeyOrderOptimizable ? 1 : -1)); - output.write(indexTableName, 0, indexTableName.length); - Bytes.writeByteArray(output, dataEmptyKeyValueCF); - // TODO in order to maintain b/w compatibility encode emptyKeyValueCFPtr.getLength() as a negative value (so we can distinguish between new and old clients) - // when indexedColumnTypes is removed, remove this - WritableUtils.writeVInt(output,-emptyKeyValueCFPtr.getLength()); - output.write(emptyKeyValueCFPtr.get(),emptyKeyValueCFPtr.getOffset(), emptyKeyValueCFPtr.getLength()); - - WritableUtils.writeVInt(output, indexedExpressions.size()); - for (Expression expression : indexedExpressions) { - WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); - expression.write(output); - } - - rowKeyMetaData.write(output); - // Encode indexWALDisabled in nDataCFs - WritableUtils.writeVInt(output, (nDataCFs + 1) * (indexWALDisabled ? -1 : 1)); - // Encode estimatedIndexRowKeyBytes and immutableRows together. - WritableUtils.writeVInt(output, estimatedIndexRowKeyBytes * (immutableRows ? -1 : 1)); - } - - public static ServerCachingProtos.IndexMaintainer toProto(IndexMaintainer maintainer) throws IOException { - ServerCachingProtos.IndexMaintainer.Builder builder = ServerCachingProtos.IndexMaintainer.newBuilder(); - builder.setSaltBuckets(maintainer.nIndexSaltBuckets); - builder.setIsMultiTenant(maintainer.isMultiTenant); - if (maintainer.viewIndexId != null) { - builder.setViewIndexId(ByteStringer.wrap(maintainer.viewIndexId)); - builder.setViewIndexIdType(maintainer.viewIndexIdType.getSqlType()); - } - for (ColumnReference colRef : maintainer.indexedColumns) { - ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); - cRefBuilder.setFamily(ByteStringer.wrap(colRef.getFamily())); - cRefBuilder.setQualifier(ByteStringer.wrap(colRef.getQualifier())); - builder.addIndexedColumns(cRefBuilder.build()); - } - for (PDataType dataType : maintainer.indexedColumnTypes) { - builder.addIndexedColumnTypeOrdinal(dataType.ordinal()); - } - for (Entry e : maintainer.coveredColumnsMap.entrySet()) { - ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); - ColumnReference dataTableColRef = e.getKey(); - cRefBuilder.setFamily(ByteStringer.wrap(dataTableColRef.getFamily())); - cRefBuilder.setQualifier(ByteStringer.wrap(dataTableColRef.getQualifier())); - builder.addDataTableColRefForCoveredColumns(cRefBuilder.build()); - if (maintainer.encodingScheme != NON_ENCODED_QUALIFIERS) { - // We need to serialize the colRefs of index tables only in case of encoded column names. - ColumnReference indexTableColRef = e.getValue(); - cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); - cRefBuilder.setFamily(ByteStringer.wrap(indexTableColRef.getFamily())); - cRefBuilder.setQualifier(ByteStringer.wrap(indexTableColRef.getQualifier())); - builder.addIndexTableColRefForCoveredColumns(cRefBuilder.build()); - } - } - builder.setIsLocalIndex(maintainer.isLocalIndex); - if (maintainer.parentTableType != null) { - builder.setParentTableType(maintainer.parentTableType.toString()); - } - builder.setIndexDataColumnCount(maintainer.indexDataColumnCount); - builder.setIndexTableName(ByteStringer.wrap(maintainer.indexTableName)); - builder.setRowKeyOrderOptimizable(maintainer.rowKeyOrderOptimizable); - builder.setDataTableEmptyKeyValueColFamily(ByteStringer.wrap(maintainer.dataEmptyKeyValueCF)); - ServerCachingProtos.ImmutableBytesWritable.Builder ibwBuilder = ServerCachingProtos.ImmutableBytesWritable.newBuilder(); - ibwBuilder.setByteArray(ByteStringer.wrap(maintainer.emptyKeyValueCFPtr.get())); - ibwBuilder.setLength(maintainer.emptyKeyValueCFPtr.getLength()); - ibwBuilder.setOffset(maintainer.emptyKeyValueCFPtr.getOffset()); - builder.setEmptyKeyValueColFamily(ibwBuilder.build()); - try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { - DataOutput output = new DataOutputStream(stream); - for (Expression expression : maintainer.indexedExpressions) { - WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); - expression.write(output); - } - builder.setIndexedExpressions(ByteStringer.wrap(stream.toByteArray())); - } - try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { - DataOutput output = new DataOutputStream(stream); - maintainer.rowKeyMetaData.write(output); - builder.setRowKeyMetadata(ByteStringer.wrap(stream.toByteArray())); - } - builder.setNumDataTableColFamilies(maintainer.nDataCFs); - builder.setIndexWalDisabled(maintainer.indexWALDisabled); - builder.setIndexRowKeyByteSize(maintainer.estimatedIndexRowKeyBytes); - builder.setImmutable(maintainer.immutableRows); - for (Pair p : maintainer.indexedColumnsInfo) { - ServerCachingProtos.ColumnInfo.Builder ciBuilder = ServerCachingProtos.ColumnInfo.newBuilder(); - if (p.getFirst() != null) { - ciBuilder.setFamilyName(p.getFirst()); - } - ciBuilder.setColumnName(p.getSecond()); - builder.addIndexedColumnInfo(ciBuilder.build()); - } - builder.setEncodingScheme(maintainer.encodingScheme.getSerializedMetadataValue()); - builder.setImmutableStorageScheme(maintainer.immutableStorageScheme.getSerializedMetadataValue()); - builder.setLogicalIndexName(maintainer.logicalIndexName); - builder.setDataEncodingScheme(maintainer.dataEncodingScheme.getSerializedMetadataValue()); - builder.setDataImmutableStorageScheme(maintainer.dataImmutableStorageScheme.getSerializedMetadataValue()); - builder.setIsUncovered(maintainer.isUncovered); - if (maintainer.indexWhere != null) { - try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { - DataOutput output = new DataOutputStream(stream); - WritableUtils.writeVInt(output, - ExpressionType.valueOf(maintainer.indexWhere).ordinal()); - maintainer.indexWhere.write(output); - builder.setIndexWhere(ByteStringer.wrap(stream.toByteArray())); - for (ColumnReference colRef : maintainer.indexWhereColumns) { - ServerCachingProtos.ColumnReference.Builder cRefBuilder = - ServerCachingProtos.ColumnReference.newBuilder(); - cRefBuilder.setFamily(ByteStringer.wrap(colRef.getFamily())); - cRefBuilder.setQualifier(ByteStringer.wrap(colRef.getQualifier())); - builder.addIndexWhereColumns(cRefBuilder.build()); - } - } - } - builder.setIsCDCIndex(maintainer.isCDCIndex); - if (maintainer.isDataTableSalted) { - builder.setDataTableSaltBuckets(maintainer.nDataTableSaltBuckets); - } - return builder.build(); - } - - public int getEstimatedByteSize() { - int size = WritableUtils.getVIntSize(nIndexSaltBuckets); - size += WritableUtils.getVIntSize(estimatedIndexRowKeyBytes); - size += WritableUtils.getVIntSize(indexedColumns.size()); - size += viewIndexId == null ? 0 : viewIndexId.length; - for (ColumnReference ref : indexedColumns) { - size += WritableUtils.getVIntSize(ref.getFamily().length); - size += ref.getFamily().length; - size += WritableUtils.getVIntSize(ref.getQualifier().length); - size += ref.getQualifier().length; - } - for (int i = 0; i < indexedColumnTypes.size(); i++) { - PDataType type = indexedColumnTypes.get(i); - size += WritableUtils.getVIntSize(type.ordinal()); - } - Set dataTableColRefs = coveredColumnsMap.keySet(); - size += WritableUtils.getVIntSize(dataTableColRefs.size()); - for (ColumnReference ref : dataTableColRefs) { - size += WritableUtils.getVIntSize(ref.getFamilyWritable().getLength()); - size += ref.getFamily().length; - size += WritableUtils.getVIntSize(ref.getQualifierWritable().getLength()); - size += ref.getQualifier().length; - } - size += indexTableName.length + WritableUtils.getVIntSize(indexTableName.length); - size += rowKeyMetaData.getByteSize(); - size += dataEmptyKeyValueCF.length + WritableUtils.getVIntSize(dataEmptyKeyValueCF.length); - size += emptyKeyValueCFPtr.getLength() + WritableUtils.getVIntSize(emptyKeyValueCFPtr.getLength()); - size += WritableUtils.getVIntSize(nDataCFs+1); - size += WritableUtils.getVIntSize(indexedExpressions.size()); - for (Expression expression : indexedExpressions) { - size += WritableUtils.getVIntSize(ExpressionType.valueOf(expression).ordinal()); - } - size += estimatedExpressionSize; - return size; - } - public Expression getIndexWhere() { - return indexWhere; - } - - public Set getIndexWhereColumns() { - return indexWhereColumns; - } - private int estimateIndexRowKeyByteSize(int indexColByteSize) { - int estimatedIndexRowKeyBytes = indexColByteSize + dataRowKeySchema.getEstimatedValueLength() + (nIndexSaltBuckets == 0 || isLocalIndex || this.isDataTableSalted ? 0 : SaltingUtil.NUM_SALTING_BYTES); - return estimatedIndexRowKeyBytes; - } - - /** - * Init calculated state reading/creating - */ - private void initCachedState() { - byte[] indexEmptyKvQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(encodingScheme).getFirst(); - byte[] dataEmptyKvQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(dataEncodingScheme).getFirst(); - indexEmptyKeyValueRef = new ColumnReference(dataEmptyKeyValueCF, indexEmptyKvQualifier); - dataEmptyKeyValueRef = new ColumnReference(dataEmptyKeyValueCF, dataEmptyKvQualifier); - this.allColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size() + coveredColumnsMap.size()); - // columns that are required to evaluate all expressions in indexedExpressions (not including columns in data row key) - this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size()); - for (Expression expression : indexedExpressions) { - KeyValueExpressionVisitor visitor = new KeyValueExpressionVisitor() { - @Override - public Void visit(KeyValueColumnExpression expression) { - if (indexedColumns.add(new ColumnReference(expression.getColumnFamily(), expression.getColumnQualifier()))) { - indexedColumnTypes.add(expression.getDataType()); - } - return null; - } - }; - expression.accept(visitor); - } - allColumns.addAll(indexedColumns); - for (ColumnReference colRef : coveredColumnsMap.keySet()) { - if (immutableStorageScheme==ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - allColumns.add(colRef); - } else { - allColumns.add(new ColumnReference(colRef.getFamily(), QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES)); - } - } - - int dataPkOffset = (isDataTableSalted ? 1 : 0) + (isMultiTenant ? 1 : 0); - int nIndexPkColumns = getIndexPkColumnCount(); - dataPkPosition = new int[nIndexPkColumns]; - Arrays.fill(dataPkPosition, EXPRESSION_NOT_PRESENT); - int numViewConstantColumns = 0; - BitSet viewConstantColumnBitSet = rowKeyMetaData.getViewConstantColumnBitSet(); - for (int i = dataPkOffset; i < indexDataColumnCount; i++) { - if (!viewConstantColumnBitSet.get(i) || isIndexOnBaseTable()) { - int indexPkPosition = rowKeyMetaData.getIndexPkPosition(i-dataPkOffset); - this.dataPkPosition[indexPkPosition] = i; - } else { - numViewConstantColumns++; - } - } - - // Calculate the max number of trailing nulls that we should get rid of after building the index row key. - // We only get rid of nulls for variable length types, so we have to be careful to consider the type of the - // index table, not the data type of the data table - int expressionsPos = indexedExpressions.size(); - int indexPkPos = nIndexPkColumns - numViewConstantColumns - 1; - while (indexPkPos >= 0) { - int dataPkPos = dataPkPosition[indexPkPos]; - boolean isDataNullable; - PDataType dataType; - if (dataPkPos == EXPRESSION_NOT_PRESENT) { - isDataNullable = true; - dataType = indexedExpressions.get(--expressionsPos).getDataType(); - } else { - Field dataField = dataRowKeySchema.getField(dataPkPos); - dataType = dataField.getDataType(); - isDataNullable = dataField.isNullable(); - } - PDataType indexDataType = IndexUtil.getIndexColumnDataType(isDataNullable, dataType); - if (indexDataType.isFixedWidth()) { - break; - } - indexPkPos--; - } - maxTrailingNulls = nIndexPkColumns-indexPkPos-1; + int length = getIndexPkColumnCount(); + BitSet.write(output, descIndexColumnBitSet, length); + int vclength = dataRowKeySchema.getMaxFields(); + BitSet.write(output, viewConstantColumnBitSet, vclength); } - private int getIndexPkColumnCount() { - return getIndexPkColumnCount(indexDataColumnCount, indexedExpressions.size(), - isDataTableSalted, isMultiTenant); - } - - private static int getIndexPkColumnCount(int indexDataColumnCount, int numIndexExpressions, boolean isDataTableSalted, boolean isMultiTenant) { - return indexDataColumnCount + numIndexExpressions - (isDataTableSalted ? 1 : 0) - (isMultiTenant ? 1 : 0); - } - - private RowKeyMetaData newRowKeyMetaData() { - return getIndexPkColumnCount() < 0xFF ? new ByteSizeRowKeyMetaData() : new IntSizedRowKeyMetaData(); - } - - private static RowKeyMetaData newRowKeyMetaData(IndexMaintainer i, RowKeySchema rowKeySchema, int numIndexExpressions, boolean isDataTableSalted, boolean isMultiTenant) { - int indexPkColumnCount = getIndexPkColumnCount(i.indexDataColumnCount, numIndexExpressions, - isDataTableSalted, isMultiTenant); - return indexPkColumnCount < 0xFF ? i.new ByteSizeRowKeyMetaData() : i.new IntSizedRowKeyMetaData(); + private BitSet getDescIndexColumnBitSet() { + return descIndexColumnBitSet; } - private RowKeyMetaData newRowKeyMetaData(int capacity) { - return capacity < 0xFF ? new ByteSizeRowKeyMetaData(capacity) : new IntSizedRowKeyMetaData(capacity); + private BitSet getViewConstantColumnBitSet() { + return viewConstantColumnBitSet; } + } - private static void writeInverted(byte[] buf, int offset, int length, DataOutput output) throws IOException { - for (int i = offset; i < offset + length; i++) { - byte b = SortOrder.invert(buf[i]); - output.write(b); - } - } - - private abstract class RowKeyMetaData implements Writable { - private BitSet descIndexColumnBitSet; - private BitSet viewConstantColumnBitSet; - - private RowKeyMetaData() { - } - - private RowKeyMetaData(int nIndexedColumns) { - descIndexColumnBitSet = BitSet.withCapacity(nIndexedColumns); - viewConstantColumnBitSet = BitSet.withCapacity(dataRowKeySchema.getMaxFields()); // Size based on number of data PK columns - } - - protected int getByteSize() { - return BitSet.getByteSize(getIndexPkColumnCount()) * 3 + BitSet.getByteSize(dataRowKeySchema.getMaxFields()); - } - - protected abstract int getIndexPkPosition(int dataPkPosition); - protected abstract int setIndexPkPosition(int dataPkPosition, int indexPkPosition); - - @Override - public void readFields(DataInput input) throws IOException { - int length = getIndexPkColumnCount(); - descIndexColumnBitSet = BitSet.read(input, length); - int vclength = dataRowKeySchema.getMaxFields(); - viewConstantColumnBitSet = BitSet.read(input, vclength); - } - - @Override - public void write(DataOutput output) throws IOException { - int length = getIndexPkColumnCount(); - BitSet.write(output, descIndexColumnBitSet, length); - int vclength = dataRowKeySchema.getMaxFields(); - BitSet.write(output, viewConstantColumnBitSet, vclength); - } + private static int BYTE_OFFSET = 127; - private BitSet getDescIndexColumnBitSet() { - return descIndexColumnBitSet; - } + private class ByteSizeRowKeyMetaData extends RowKeyMetaData { + private byte[] indexPkPosition; - private BitSet getViewConstantColumnBitSet() { - return viewConstantColumnBitSet; - } + private ByteSizeRowKeyMetaData() { } - - private static int BYTE_OFFSET = 127; - - private class ByteSizeRowKeyMetaData extends RowKeyMetaData { - private byte[] indexPkPosition; - - private ByteSizeRowKeyMetaData() { - } - private ByteSizeRowKeyMetaData(int nIndexedColumns) { - super(nIndexedColumns); - this.indexPkPosition = new byte[nIndexedColumns]; - } - - @Override - protected int getIndexPkPosition(int dataPkPosition) { - // Use offset for byte so that we can get full range of 0 - 255 - // We use -128 as marker for a non row key index column, - // that's why our offset if 127 instead of 128 - return this.indexPkPosition[dataPkPosition] + BYTE_OFFSET; - } + private ByteSizeRowKeyMetaData(int nIndexedColumns) { + super(nIndexedColumns); + this.indexPkPosition = new byte[nIndexedColumns]; + } - @Override - protected int setIndexPkPosition(int dataPkPosition, int indexPkPosition) { - return this.indexPkPosition[dataPkPosition] = (byte)(indexPkPosition - BYTE_OFFSET); - } + @Override + protected int getIndexPkPosition(int dataPkPosition) { + // Use offset for byte so that we can get full range of 0 - 255 + // We use -128 as marker for a non row key index column, + // that's why our offset if 127 instead of 128 + return this.indexPkPosition[dataPkPosition] + BYTE_OFFSET; + } - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - output.write(indexPkPosition); - } + @Override + protected int setIndexPkPosition(int dataPkPosition, int indexPkPosition) { + return this.indexPkPosition[dataPkPosition] = (byte) (indexPkPosition - BYTE_OFFSET); + } - @Override - protected int getByteSize() { - return super.getByteSize() + indexPkPosition.length; - } + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + output.write(indexPkPosition); + } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - this.indexPkPosition = new byte[getIndexPkColumnCount()]; - input.readFully(indexPkPosition); - } + @Override + protected int getByteSize() { + return super.getByteSize() + indexPkPosition.length; } - - private class IntSizedRowKeyMetaData extends RowKeyMetaData { - private int[] indexPkPosition; - - private IntSizedRowKeyMetaData() { - } - private IntSizedRowKeyMetaData(int nIndexedColumns) { - super(nIndexedColumns); - this.indexPkPosition = new int[nIndexedColumns]; - } - - @Override - protected int getIndexPkPosition(int dataPkPosition) { - return this.indexPkPosition[dataPkPosition]; - } + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + this.indexPkPosition = new byte[getIndexPkColumnCount()]; + input.readFully(indexPkPosition); + } + } - @Override - protected int setIndexPkPosition(int dataPkPosition, int indexPkPosition) { - return this.indexPkPosition[dataPkPosition] = indexPkPosition; - } - - @Override - public void write(DataOutput output) throws IOException { - super.write(output); - for (int i = 0; i < indexPkPosition.length; i++) { - output.writeInt(indexPkPosition[i]); - } - } + private class IntSizedRowKeyMetaData extends RowKeyMetaData { + private int[] indexPkPosition; - @Override - protected int getByteSize() { - return super.getByteSize() + indexPkPosition.length * Bytes.SIZEOF_INT; - } + private IntSizedRowKeyMetaData() { + } - @Override - public void readFields(DataInput input) throws IOException { - super.readFields(input); - this.indexPkPosition = new int[getIndexPkColumnCount()]; - for (int i = 0; i < indexPkPosition.length; i++) { - indexPkPosition[i] = input.readInt(); - } - } + private IntSizedRowKeyMetaData(int nIndexedColumns) { + super(nIndexedColumns); + this.indexPkPosition = new int[nIndexedColumns]; } @Override - public Iterator iterator() { - return allColumns.iterator(); + protected int getIndexPkPosition(int dataPkPosition) { + return this.indexPkPosition[dataPkPosition]; } - public ValueGetter createGetterFromKeyValues(final byte[] rowKey, Collection pendingUpdates) { - final Map valueMap = Maps.newHashMapWithExpectedSize(pendingUpdates - .size()); - for (Cell kv : pendingUpdates) { - // create new pointers to each part of the kv - ImmutableBytesPtr value = new ImmutableBytesPtr(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - valueMap.put(new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()), value); - } - return new AbstractValueGetter() { - @Override - public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { - if(ref.equals(indexEmptyKeyValueRef)) return null; - return valueMap.get(ref); - } - @Override - public byte[] getRowKey() { - return rowKey; - } - }; + @Override + protected int setIndexPkPosition(int dataPkPosition, int indexPkPosition) { + return this.indexPkPosition[dataPkPosition] = indexPkPosition; } - public byte[] getDataEmptyKeyValueCF() { - return dataEmptyKeyValueCF; - } - - public boolean isLocalIndex() { - return isLocalIndex; + @Override + public void write(DataOutput output) throws IOException { + super.write(output); + for (int i = 0; i < indexPkPosition.length; i++) { + output.writeInt(indexPkPosition[i]); + } } - public boolean isUncovered() { - return isUncovered; - } - public boolean isCDCIndex() { - return isCDCIndex; - } - - public boolean isImmutableRows() { - return immutableRows; - } - - public boolean isIndexOnBaseTable() { - if (parentTableType == null) { - return false; - } - return parentTableType == PTableType.TABLE; + @Override + protected int getByteSize() { + return super.getByteSize() + indexPkPosition.length * Bytes.SIZEOF_INT; } - - public Set getIndexedColumns() { - return indexedColumns; + + @Override + public void readFields(DataInput input) throws IOException { + super.readFields(input); + this.indexPkPosition = new int[getIndexPkColumnCount()]; + for (int i = 0; i < indexPkPosition.length; i++) { + indexPkPosition[i] = input.readInt(); + } } + } + + @Override + public Iterator iterator() { + return allColumns.iterator(); + } + + public ValueGetter createGetterFromKeyValues(final byte[] rowKey, + Collection pendingUpdates) { + final Map valueMap = + Maps.newHashMapWithExpectedSize(pendingUpdates.size()); + for (Cell kv : pendingUpdates) { + // create new pointers to each part of the kv + ImmutableBytesPtr value = + new ImmutableBytesPtr(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + valueMap + .put(new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), + kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()), value); + } + return new AbstractValueGetter() { + @Override + public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { + if (ref.equals(indexEmptyKeyValueRef)) return null; + return valueMap.get(ref); + } - public static class UDFParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor { + @Override + public byte[] getRowKey() { + return rowKey; + } + }; + } - private Map udfParseNodes; - public UDFParseNodeVisitor() { - udfParseNodes = new HashMap(1); - } + public byte[] getDataEmptyKeyValueCF() { + return dataEmptyKeyValueCF; + } - @Override - public boolean visitEnter(FunctionParseNode node) throws SQLException { - if(node instanceof UDFParseNode) { - udfParseNodes.put(node.getName(), (UDFParseNode)node); - } - return super.visitEnter(node); - } - - public Map getUdfParseNodes() { - return udfParseNodes; - } - } - - public byte[] getEmptyKeyValueQualifier() { - return indexEmptyKeyValueRef.getQualifier(); - } + public boolean isLocalIndex() { + return isLocalIndex; + } - public byte[] getEmptyKeyValueQualifierForDataTable() { - return dataEmptyKeyValueRef.getQualifier(); - } + public boolean isUncovered() { + return isUncovered; + } + + public boolean isCDCIndex() { + return isCDCIndex; + } - public Set> getIndexedColumnInfo() { - return indexedColumnsInfo; + public boolean isImmutableRows() { + return immutableRows; + } + + public boolean isIndexOnBaseTable() { + if (parentTableType == null) { + return false; } - - public ImmutableStorageScheme getIndexStorageScheme() { - return immutableStorageScheme; + return parentTableType == PTableType.TABLE; + } + + public Set getIndexedColumns() { + return indexedColumns; + } + + public static class UDFParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor { + + private Map udfParseNodes; + + public UDFParseNodeVisitor() { + udfParseNodes = new HashMap(1); } - public ImmutableStorageScheme getDataImmutableStorageScheme() { - return dataImmutableStorageScheme; + + @Override + public boolean visitEnter(FunctionParseNode node) throws SQLException { + if (node instanceof UDFParseNode) { + udfParseNodes.put(node.getName(), (UDFParseNode) node); + } + return super.visitEnter(node); } - public QualifierEncodingScheme getDataEncodingScheme() { - return dataEncodingScheme; + public Map getUdfParseNodes() { + return udfParseNodes; } + } + + public byte[] getEmptyKeyValueQualifier() { + return indexEmptyKeyValueRef.getQualifier(); + } + + public byte[] getEmptyKeyValueQualifierForDataTable() { + return dataEmptyKeyValueRef.getQualifier(); + } + + public Set> getIndexedColumnInfo() { + return indexedColumnsInfo; + } + + public ImmutableStorageScheme getIndexStorageScheme() { + return immutableStorageScheme; + } + + public ImmutableStorageScheme getDataImmutableStorageScheme() { + return dataImmutableStorageScheme; + } + + public QualifierEncodingScheme getDataEncodingScheme() { + return dataEncodingScheme; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheClient.java index 5eadc7d29d9..db08d9f3320 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheClient.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheClient.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,117 +42,124 @@ public class IndexMetaDataCacheClient { - private final ServerCacheClient serverCache; - private PTable cacheUsingTable; - + private final ServerCacheClient serverCache; + private PTable cacheUsingTable; + + /** + * Construct client used to send index metadata to each region server for caching during batched + * put for secondary index maintenance. + * @param connection the client connection + * @param cacheUsingTable table ref to table that will use the cache during its scan + */ + public IndexMetaDataCacheClient(PhoenixConnection connection, PTable cacheUsingTable) { + serverCache = new ServerCacheClient(connection); + this.cacheUsingTable = cacheUsingTable; + } + + /** + * Determines whether or not to use the IndexMetaDataCache to send the index metadata to the + * region servers. The alternative is to just set the index metadata as an attribute on the + * mutations. + * @param mutations the list of mutations that will be sent in a batch to server + * @param indexMetaDataByteLength length in bytes of the index metadata cache + */ + public static boolean useIndexMetadataCache(PhoenixConnection connection, + List mutations, int indexMetaDataByteLength) { + ReadOnlyProps props = connection.getQueryServices().getProps(); + int threshold = props.getInt(INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_MUTATE_BATCH_SIZE_THRESHOLD); + return (indexMetaDataByteLength > ServerCacheClient.UUID_LENGTH + && mutations.size() > threshold); + } + + /** + * Send the index metadata cahce to all region servers for regions that will handle the mutations. + * @return client-side {@link ServerCache} representing the added index metadata cache + * @throws MaxServerCacheSizeExceededException if size of hash cache exceeds max allowed size + */ + public ServerCache addIndexMetadataCache(List mutations, + ImmutableBytesWritable ptr, byte[] txState) throws SQLException { /** - * Construct client used to send index metadata to each region server - * for caching during batched put for secondary index maintenance. - * @param connection the client connection - * @param cacheUsingTable table ref to table that will use the cache during its scan + * Serialize and compress hashCacheTable */ - public IndexMetaDataCacheClient(PhoenixConnection connection, PTable cacheUsingTable) { - serverCache = new ServerCacheClient(connection); - this.cacheUsingTable = cacheUsingTable; - } + return serverCache.addServerCache(ScanUtil.newScanRanges(mutations), ptr, txState, + new IndexMetaDataCacheFactory(), cacheUsingTable); + } + /** + * Send the index metadata cahce to all region servers for regions that will handle the mutations. + * @param txState TODO + * @return client-side {@link ServerCache} representing the added index metadata cache + * @throws MaxServerCacheSizeExceededException if size of hash cache exceeds max allowed size + */ + public ServerCache addIndexMetadataCache(ScanRanges ranges, ImmutableBytesWritable ptr, + byte[] txState) throws SQLException { /** - * Determines whether or not to use the IndexMetaDataCache to send the index metadata - * to the region servers. The alternative is to just set the index metadata as an attribute on - * the mutations. - * @param connection - * @param mutations the list of mutations that will be sent in a batch to server - * @param indexMetaDataByteLength length in bytes of the index metadata cache + * Serialize and compress hashCacheTable */ - public static boolean useIndexMetadataCache(PhoenixConnection connection, List mutations, int indexMetaDataByteLength) { - ReadOnlyProps props = connection.getQueryServices().getProps(); - int threshold = props.getInt(INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_MUTATE_BATCH_SIZE_THRESHOLD); - return (indexMetaDataByteLength > ServerCacheClient.UUID_LENGTH && mutations.size() > threshold); + return serverCache.addServerCache(ranges, ptr, txState, new IndexMetaDataCacheFactory(), + cacheUsingTable); + } + + public static ServerCache setMetaDataOnMutations(PhoenixConnection connection, PTable table, + List mutations, ImmutableBytesWritable indexMetaDataPtr) + throws SQLException { + final byte[] tenantIdBytes; + if (table.isMultiTenant()) { + tenantIdBytes = connection.getTenantId() == null + ? null + : ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, + connection.getTenantId(), table.getViewIndexId() != null); + } else { + tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); } - - /** - * Send the index metadata cahce to all region servers for regions that will handle the mutations. - * @return client-side {@link ServerCache} representing the added index metadata cache - * @throws SQLException - * @throws MaxServerCacheSizeExceededException if size of hash cache exceeds max allowed - * size - */ - public ServerCache addIndexMetadataCache(List mutations, ImmutableBytesWritable ptr, byte[] txState) throws SQLException { - /** - * Serialize and compress hashCacheTable - */ - return serverCache.addServerCache(ScanUtil.newScanRanges(mutations), ptr, txState, new IndexMetaDataCacheFactory(), cacheUsingTable); + ServerCache cache = null; + byte[] attribValue = null; + byte[] uuidValue = null; + byte[] txState = ByteUtil.EMPTY_BYTE_ARRAY; + if (table.isTransactional()) { + txState = connection.getMutationState().encodeTransaction(); } - - - /** - * Send the index metadata cahce to all region servers for regions that will handle the mutations. - * @param txState TODO - * @return client-side {@link ServerCache} representing the added index metadata cache - * @throws SQLException - * @throws MaxServerCacheSizeExceededException if size of hash cache exceeds max allowed - * size - */ - public ServerCache addIndexMetadataCache(ScanRanges ranges, ImmutableBytesWritable ptr, byte[] txState) throws SQLException { - /** - * Serialize and compress hashCacheTable - */ - return serverCache.addServerCache(ranges, ptr, txState, new IndexMetaDataCacheFactory(), cacheUsingTable); + boolean hasIndexMetaData = indexMetaDataPtr.getLength() > 0; + if (hasIndexMetaData) { + if ( + useIndexMetadataCache(connection, mutations, indexMetaDataPtr.getLength() + txState.length) + ) { + IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, table); + cache = client.addIndexMetadataCache(mutations, indexMetaDataPtr, txState); + uuidValue = cache.getId(); + } else { + attribValue = ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr); + uuidValue = ServerCacheClient.generateId(); + } + } else if (txState.length == 0) { + return null; } - - public static ServerCache setMetaDataOnMutations(PhoenixConnection connection, PTable table, List mutations, - ImmutableBytesWritable indexMetaDataPtr) throws SQLException { - final byte[] tenantIdBytes; - if (table.isMultiTenant()) { - tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes( - table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), - table.getViewIndexId() != null); - } else { - tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); - } - ServerCache cache = null; - byte[] attribValue = null; - byte[] uuidValue = null; - byte[] txState = ByteUtil.EMPTY_BYTE_ARRAY; - if (table.isTransactional()) { - txState = connection.getMutationState().encodeTransaction(); + // Either set the UUID to be able to access the index metadata from the cache + // or set the index metadata directly on the Mutation + for (Mutation mutation : mutations) { + if (connection.getTenantId() != null) { + mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantIdBytes); + } + mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); + if (table.getTransformingNewTable() != null) { + boolean disabled = table.getTransformingNewTable().isIndexStateDisabled(); + if (!disabled) { + mutation.setAttribute(BaseScannerRegionObserverConstants.DO_TRANSFORMING, TRUE_BYTES); } - boolean hasIndexMetaData = indexMetaDataPtr.getLength() > 0; - if (hasIndexMetaData) { - if (useIndexMetadataCache(connection, mutations, indexMetaDataPtr.getLength() + txState.length)) { - IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, table); - cache = client.addIndexMetadataCache(mutations, indexMetaDataPtr, txState); - uuidValue = cache.getId(); - } else { - attribValue = ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr); - uuidValue = ServerCacheClient.generateId(); - } - } else if (txState.length == 0) { return null; } - // Either set the UUID to be able to access the index metadata from the cache - // or set the index metadata directly on the Mutation - for (Mutation mutation : mutations) { - if (connection.getTenantId() != null) { - mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantIdBytes); - } - mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); - if (table.getTransformingNewTable() != null) { - boolean disabled = table.getTransformingNewTable().isIndexStateDisabled(); - if (!disabled) { - mutation.setAttribute(BaseScannerRegionObserverConstants.DO_TRANSFORMING, TRUE_BYTES); - } - } - ScanUtil.annotateMutationWithMetadataAttributes(table, mutation); - if (attribValue != null) { - mutation.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue); - mutation.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, - Bytes.toBytes(MetaDataProtocol.PHOENIX_VERSION)); - if (txState.length > 0) { - mutation.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); - } - } else if (!hasIndexMetaData && txState.length > 0) { - mutation.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); - } + } + ScanUtil.annotateMutationWithMetadataAttributes(table, mutation); + if (attribValue != null) { + mutation.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue); + mutation.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, + Bytes.toBytes(MetaDataProtocol.PHOENIX_VERSION)); + if (txState.length > 0) { + mutation.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); } - return cache; + } else if (!hasIndexMetaData && txState.length > 0) { + mutation.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); + } } + return cache; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java index facf3e63c72..e31cac4c0e9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/index/IndexMetaDataCacheFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,50 +33,52 @@ import org.apache.phoenix.transaction.TransactionFactory; public class IndexMetaDataCacheFactory implements ServerCacheFactory { - public IndexMetaDataCacheFactory() { - } + public IndexMetaDataCacheFactory() { + } - @Override - public void readFields(DataInput arg0) throws IOException { - } + @Override + public void readFields(DataInput arg0) throws IOException { + } - @Override - public void write(DataOutput arg0) throws IOException { - } + @Override + public void write(DataOutput arg0) throws IOException { + } - @Override - public Closeable newCache (ImmutableBytesWritable cachePtr, byte[] txState, final MemoryChunk chunk, boolean useProtoForIndexMaintainer, final int clientVersion) throws SQLException { - // just use the standard keyvalue builder - this doesn't really need to be fast - - final List maintainers = - IndexMaintainer.deserialize(cachePtr, GenericKeyValueBuilder.INSTANCE, useProtoForIndexMaintainer); - final PhoenixTransactionContext txnContext; - try { - txnContext = TransactionFactory.getTransactionContext(txState, clientVersion); - } catch (IOException e) { - throw new SQLException(e); - } - return new IndexMetaDataCache() { + @Override + public Closeable newCache(ImmutableBytesWritable cachePtr, byte[] txState, + final MemoryChunk chunk, boolean useProtoForIndexMaintainer, final int clientVersion) + throws SQLException { + // just use the standard keyvalue builder - this doesn't really need to be fast - @Override - public void close() throws IOException { - chunk.close(); - } + final List maintainers = IndexMaintainer.deserialize(cachePtr, + GenericKeyValueBuilder.INSTANCE, useProtoForIndexMaintainer); + final PhoenixTransactionContext txnContext; + try { + txnContext = TransactionFactory.getTransactionContext(txState, clientVersion); + } catch (IOException e) { + throw new SQLException(e); + } + return new IndexMetaDataCache() { - @Override - public List getIndexMaintainers() { - return maintainers; - } + @Override + public void close() throws IOException { + chunk.close(); + } - @Override - public PhoenixTransactionContext getTransactionContext() { - return txnContext; - } + @Override + public List getIndexMaintainers() { + return maintainers; + } - @Override - public int getClientVersion() { - return clientVersion; - } - }; - } + @Override + public PhoenixTransactionContext getTransactionContext() { + return txnContext; + } + + @Override + public int getClientVersion() { + return clientVersion; + } + }; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilderHelper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilderHelper.java index 6ef1d9e38a9..8311e693f70 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilderHelper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilderHelper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,116 +35,125 @@ import org.apache.phoenix.util.TrustedByteArrayOutputStream; public final class PhoenixIndexBuilderHelper { - private static final byte[] ON_DUP_KEY_IGNORE_BYTES = new byte[] {1}; // boolean true - private static final int ON_DUP_KEY_HEADER_BYTE_SIZE = Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BOOLEAN; - public static final String ATOMIC_OP_ATTRIB = "_ATOMIC_OP_ATTRIB"; + private static final byte[] ON_DUP_KEY_IGNORE_BYTES = new byte[] { 1 }; // boolean true + private static final int ON_DUP_KEY_HEADER_BYTE_SIZE = Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BOOLEAN; + public static final String ATOMIC_OP_ATTRIB = "_ATOMIC_OP_ATTRIB"; - public static final String RETURN_RESULT = "_RETURN_RESULT"; - public static final byte[] RETURN_RESULT_ROW = new byte[]{0}; + public static final String RETURN_RESULT = "_RETURN_RESULT"; + public static final byte[] RETURN_RESULT_ROW = new byte[] { 0 }; - public static byte[] serializeOnDupKeyIgnore() { - return ON_DUP_KEY_IGNORE_BYTES; - } + public static byte[] serializeOnDupKeyIgnore() { + return ON_DUP_KEY_IGNORE_BYTES; + } - /** - * Serialize ON DUPLICATE KEY UPDATE info with the following format: - * 1) Boolean value tracking whether or not to execute the first ON DUPLICATE KEY clause. - * We know the clause should be executed when there are other UPSERT VALUES clauses earlier in - * the same batch for this row key. We need this for two main cases: - * UPSERT VALUES followed by UPSERT VALUES ON DUPLICATE KEY UPDATE - * UPSERT VALUES ON DUPLICATE KEY IGNORE followed by UPSERT VALUES ON DUPLICATE KEY UPDATE - * 2) Short value tracking how many times the next first clause should be executed. This - * optimizes the same clause be executed many times by only serializing it once. - * 3) Repeating {@code List, PTable } pairs that encapsulate the ON DUPLICATE KEY clause. - * @param table table representing columns being updated - * @param expressions list of expressions to evaluate for updating columns - * @return serialized byte array representation of ON DUPLICATE KEY UPDATE info - */ - public static byte[] serializeOnDupKeyUpdate(PTable table, List expressions) { - PTableProtos.PTable ptableProto = PTableImpl.toProto(table); - int size = ptableProto.getSerializedSize(); - try (ByteArrayOutputStream stream = new ByteArrayOutputStream(size * 2)) { - DataOutputStream output = new DataOutputStream(stream); - output.writeBoolean(true); // Skip this ON DUPLICATE KEY clause if row already exists - output.writeShort(1); // Execute this ON DUPLICATE KEY once - WritableUtils.writeVInt(output, expressions.size()); - for (int i = 0; i < expressions.size(); i++) { - Expression expression = expressions.get(i); - WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); - expression.write(output); - } - ptableProto.writeDelimitedTo(output); - return stream.toByteArray(); - } catch (IOException e) { - throw new RuntimeException(e); - } + /** + * Serialize ON DUPLICATE KEY UPDATE info with the following format: 1) Boolean value tracking + * whether or not to execute the first ON DUPLICATE KEY clause. We know the clause should be + * executed when there are other UPSERT VALUES clauses earlier in the same batch for this row key. + * We need this for two main cases: UPSERT VALUES followed by UPSERT VALUES ON DUPLICATE KEY + * UPDATE UPSERT VALUES ON DUPLICATE KEY IGNORE followed by UPSERT VALUES ON DUPLICATE KEY UPDATE + * 2) Short value tracking how many times the next first clause should be executed. This optimizes + * the same clause be executed many times by only serializing it once. 3) Repeating + * {@code List, PTable } pairs that encapsulate the ON DUPLICATE KEY clause. + * @param table table representing columns being updated + * @param expressions list of expressions to evaluate for updating columns + * @return serialized byte array representation of ON DUPLICATE KEY UPDATE info + */ + public static byte[] serializeOnDupKeyUpdate(PTable table, List expressions) { + PTableProtos.PTable ptableProto = PTableImpl.toProto(table); + int size = ptableProto.getSerializedSize(); + try (ByteArrayOutputStream stream = new ByteArrayOutputStream(size * 2)) { + DataOutputStream output = new DataOutputStream(stream); + output.writeBoolean(true); // Skip this ON DUPLICATE KEY clause if row already exists + output.writeShort(1); // Execute this ON DUPLICATE KEY once + WritableUtils.writeVInt(output, expressions.size()); + for (int i = 0; i < expressions.size(); i++) { + Expression expression = expressions.get(i); + WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); + expression.write(output); + } + ptableProto.writeDelimitedTo(output); + return stream.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); } + } - private static byte[] doNotSkipFirstOnDupKey(byte[] oldOnDupKeyBytes) { - byte[] newOnDupKeyBytes = Arrays.copyOf(oldOnDupKeyBytes, oldOnDupKeyBytes.length); - newOnDupKeyBytes[0] = 0; // false means do not skip first ON DUPLICATE KEY + private static byte[] doNotSkipFirstOnDupKey(byte[] oldOnDupKeyBytes) { + byte[] newOnDupKeyBytes = Arrays.copyOf(oldOnDupKeyBytes, oldOnDupKeyBytes.length); + newOnDupKeyBytes[0] = 0; // false means do not skip first ON DUPLICATE KEY + return newOnDupKeyBytes; + } + + public static byte[] combineOnDupKey(byte[] oldOnDupKeyBytes, byte[] newOnDupKeyBytes) { + // If old ON DUPLICATE KEY is null, then the new value always takes effect + // If new ON DUPLICATE KEY is null, then reset back to null + if (oldOnDupKeyBytes == null || newOnDupKeyBytes == null) { + if (newOnDupKeyBytes == null) { return newOnDupKeyBytes; + } + return doNotSkipFirstOnDupKey(newOnDupKeyBytes); } + // If the new UPSERT VALUES statement has an ON DUPLICATE KEY IGNORE, and there + // is an already existing UPSERT VALUES statement with an ON DUPLICATE KEY clause, + // then we can just keep that one as the new one has no impact. + if (isDupKeyIgnore(newOnDupKeyBytes)) { + return oldOnDupKeyBytes; + } + boolean isOldDupKeyIgnore = isDupKeyIgnore(oldOnDupKeyBytes); + try (TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream( + Math.max(0, oldOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE) + newOnDupKeyBytes.length); + ByteArrayInputStream oldStream = new ByteArrayInputStream(oldOnDupKeyBytes); + ByteArrayInputStream newStream = new ByteArrayInputStream(newOnDupKeyBytes); + DataOutputStream output = new DataOutputStream(stream); + DataInputStream oldInput = new DataInputStream(oldStream); + DataInputStream newInput = new DataInputStream(newStream)) { - public static byte[] combineOnDupKey(byte[] oldOnDupKeyBytes, byte[] newOnDupKeyBytes) { - // If old ON DUPLICATE KEY is null, then the new value always takes effect - // If new ON DUPLICATE KEY is null, then reset back to null - if (oldOnDupKeyBytes == null || newOnDupKeyBytes == null) { - if (newOnDupKeyBytes == null) { - return newOnDupKeyBytes; - } - return doNotSkipFirstOnDupKey(newOnDupKeyBytes); - } - // If the new UPSERT VALUES statement has an ON DUPLICATE KEY IGNORE, and there - // is an already existing UPSERT VALUES statement with an ON DUPLICATE KEY clause, - // then we can just keep that one as the new one has no impact. - if (isDupKeyIgnore(newOnDupKeyBytes)) { - return oldOnDupKeyBytes; - } - boolean isOldDupKeyIgnore = isDupKeyIgnore(oldOnDupKeyBytes); - try (TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(Math.max(0, oldOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE) + newOnDupKeyBytes.length); - ByteArrayInputStream oldStream = new ByteArrayInputStream(oldOnDupKeyBytes); - ByteArrayInputStream newStream = new ByteArrayInputStream(newOnDupKeyBytes); - DataOutputStream output = new DataOutputStream(stream); - DataInputStream oldInput = new DataInputStream(oldStream); - DataInputStream newInput = new DataInputStream(newStream)) { - - boolean execute1 = oldInput.readBoolean(); - newInput.readBoolean(); // ignore - int repeating2 = newInput.readShort(); - if (isOldDupKeyIgnore) { - output.writeBoolean(false); // Will force subsequent ON DUPLICATE KEY UPDATE statement to execute - output.writeShort(repeating2); - output.write(newOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, newOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE); - } else { - int repeating1 = oldInput.readShort(); - if (Bytes.compareTo( - oldOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, oldOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE, - newOnDupKeyBytes, Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BOOLEAN, newOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE) == 0) { - // If both old and new ON DUPLICATE KEY UPDATE clauses match, - // reduce the size of data we're sending over the wire. - // TODO: optimization size of RPC more. - output.writeBoolean(execute1); - output.writeShort(repeating1 + repeating2); - output.write(newOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, newOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE); - } else { - output.writeBoolean(execute1); - output.writeShort(repeating1); // retain first ON DUPLICATE KEY UPDATE having repeated - output.write(oldOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, oldOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE); - // If the new ON DUPLICATE KEY UPDATE was repeating, we need to write it multiple times as only the first - // statement is effected by the repeating amount - for (int i = 0; i < repeating2; i++) { - output.write(newOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, newOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE); - } - } - } - return stream.toByteArray(); - } catch (IOException e) { // Shouldn't be possible with ByteInput/Output streams - throw new RuntimeException(e); + boolean execute1 = oldInput.readBoolean(); + newInput.readBoolean(); // ignore + int repeating2 = newInput.readShort(); + if (isOldDupKeyIgnore) { + output.writeBoolean(false); // Will force subsequent ON DUPLICATE KEY UPDATE statement to + // execute + output.writeShort(repeating2); + output.write(newOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, + newOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE); + } else { + int repeating1 = oldInput.readShort(); + if ( + Bytes.compareTo(oldOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, + oldOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE, newOnDupKeyBytes, + Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BOOLEAN, + newOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE) == 0 + ) { + // If both old and new ON DUPLICATE KEY UPDATE clauses match, + // reduce the size of data we're sending over the wire. + // TODO: optimization size of RPC more. + output.writeBoolean(execute1); + output.writeShort(repeating1 + repeating2); + output.write(newOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, + newOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE); + } else { + output.writeBoolean(execute1); + output.writeShort(repeating1); // retain first ON DUPLICATE KEY UPDATE having repeated + output.write(oldOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, + oldOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE); + // If the new ON DUPLICATE KEY UPDATE was repeating, we need to write it multiple times as + // only the first + // statement is effected by the repeating amount + for (int i = 0; i < repeating2; i++) { + output.write(newOnDupKeyBytes, ON_DUP_KEY_HEADER_BYTE_SIZE, + newOnDupKeyBytes.length - ON_DUP_KEY_HEADER_BYTE_SIZE); + } } + } + return stream.toByteArray(); + } catch (IOException e) { // Shouldn't be possible with ByteInput/Output streams + throw new RuntimeException(e); } + } - public static boolean isDupKeyIgnore(byte[] onDupKeyBytes) { - return onDupKeyBytes != null && Bytes.compareTo(ON_DUP_KEY_IGNORE_BYTES, onDupKeyBytes) == 0; - } + public static boolean isDupKeyIgnore(byte[] onDupKeyBytes) { + return onDupKeyBytes != null && Bytes.compareTo(ON_DUP_KEY_IGNORE_BYTES, onDupKeyBytes) == 0; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java b/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java index 6c9f4920e10..86180169b56 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java @@ -1,14 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.index; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.DO_TRANSFORMING; + import java.io.IOException; import java.util.Collections; import java.util.List; @@ -30,110 +40,118 @@ import org.apache.phoenix.hbase.index.covered.update.ColumnReference; import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder; import org.apache.phoenix.hbase.index.util.KeyValueBuilder; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.DO_TRANSFORMING; - /** * Phoenix-based {@link IndexCodec}. Manages all the logic of how to cleanup an index ( - * {@link #getIndexDeletes(TableState, IndexMetaData, byte[], byte[])}) as well as what the new index state should be ( - * {@link #getIndexUpserts(TableState, IndexMetaData, byte[], byte[])}). + * {@link #getIndexDeletes(TableState, IndexMetaData, byte[], byte[])}) as well as what the new + * index state should be ( {@link #getIndexUpserts(TableState, IndexMetaData, byte[], byte[])}). */ public class PhoenixIndexCodec extends BaseIndexCodec { - public static final String INDEX_MD = "IdxMD"; - public static final String INDEX_PROTO_MD = "IdxProtoMD"; - public static final String INDEX_UUID = "IdxUUID"; - public static final String INDEX_MAINTAINERS = "IndexMaintainers"; - public static final String INDEX_NAME_FOR_IDX_MAINTAINER = "INDEX_IDX_MAINTAINER"; - public static final KeyValueBuilder KV_BUILDER = GenericKeyValueBuilder.INSTANCE; - - private byte[] tableName; - - public PhoenixIndexCodec() { - - } + public static final String INDEX_MD = "IdxMD"; + public static final String INDEX_PROTO_MD = "IdxProtoMD"; + public static final String INDEX_UUID = "IdxUUID"; + public static final String INDEX_MAINTAINERS = "IndexMaintainers"; + public static final String INDEX_NAME_FOR_IDX_MAINTAINER = "INDEX_IDX_MAINTAINER"; + public static final KeyValueBuilder KV_BUILDER = GenericKeyValueBuilder.INSTANCE; - public PhoenixIndexCodec(Configuration conf, byte[] tableName) { - initialize(conf, tableName); - } - + private byte[] tableName; - @Override - public void initialize(Configuration conf, byte[] tableName) { - this.tableName = tableName; - } + public PhoenixIndexCodec() { - boolean hasIndexMaintainers(Map attributes) { - if (attributes == null) { return false; } - byte[] uuid = attributes.get(INDEX_UUID); - if (uuid == null) { return false; } - return true; - } + } + + public PhoenixIndexCodec(Configuration conf, byte[] tableName) { + initialize(conf, tableName); + } - boolean isTransforming(Map attributes) { - if (attributes == null) { return false; } - byte[] transforming = attributes.get(DO_TRANSFORMING); - if (transforming == null) { return false; } - return true; + @Override + public void initialize(Configuration conf, byte[] tableName) { + this.tableName = tableName; + } + + boolean hasIndexMaintainers(Map attributes) { + if (attributes == null) { + return false; + } + byte[] uuid = attributes.get(INDEX_UUID); + if (uuid == null) { + return false; } + return true; + } - @Override - public Iterable getIndexUpserts( - TableState state, IndexMetaData context, byte[] regionStartKey, byte[] regionEndKey, - boolean verified) throws IOException { - PhoenixIndexMetaData metaData = (PhoenixIndexMetaData)context; - List indexMaintainers = metaData.getIndexMaintainers(); - if (indexMaintainers.get(0).isRowDeleted(state.getPendingUpdate())) { - return Collections.emptyList(); - } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(state.getCurrentRowKey()); - List indexUpdates = Lists.newArrayList(); - for (IndexMaintainer maintainer : indexMaintainers) { - Pair statePair = state.getIndexUpdateState(maintainer.getAllColumns(), metaData.getReplayWrite() != null, false, context); - ValueGetter valueGetter = statePair.getFirst(); - IndexUpdate indexUpdate = statePair.getSecond(); - indexUpdate.setTable(maintainer.isLocalIndex() ? tableName : maintainer.getIndexTableName()); - Put put = maintainer.buildUpdateMutation(KV_BUILDER, valueGetter, ptr, state.getCurrentTimestamp(), - regionStartKey, regionEndKey, verified); - indexUpdate.setUpdate(put); - indexUpdates.add(indexUpdate); - } - return indexUpdates; + boolean isTransforming(Map attributes) { + if (attributes == null) { + return false; } + byte[] transforming = attributes.get(DO_TRANSFORMING); + if (transforming == null) { + return false; + } + return true; + } - @Override - public Iterable getIndexDeletes(TableState state, IndexMetaData context, byte[] regionStartKey, byte[] regionEndKey) throws IOException { - PhoenixIndexMetaData metaData = (PhoenixIndexMetaData)context; - List indexMaintainers = metaData.getIndexMaintainers(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(state.getCurrentRowKey()); - List indexUpdates = Lists.newArrayList(); - for (IndexMaintainer maintainer : indexMaintainers) { - // For transactional tables, we use an index maintainer - // to aid in rollback if there's a KeyValue column in the index. The alternative would be - // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the - // client side. - Set cols = Sets.newHashSet(maintainer.getAllColumns()); - cols.add(new ColumnReference(indexMaintainers.get(0).getDataEmptyKeyValueCF(), indexMaintainers.get(0).getEmptyKeyValueQualifier())); - Pair statePair = state.getIndexUpdateState(cols, metaData.getReplayWrite() != null, true, context); - ValueGetter valueGetter = statePair.getFirst(); - if (valueGetter!=null) { - IndexUpdate indexUpdate = statePair.getSecond(); - indexUpdate.setTable(maintainer.isLocalIndex() ? tableName : maintainer.getIndexTableName()); - Delete delete = maintainer.buildDeleteMutation(KV_BUILDER, valueGetter, ptr, state.getPendingUpdate(), - state.getCurrentTimestamp(), regionStartKey, regionEndKey); - indexUpdate.setUpdate(delete); - indexUpdates.add(indexUpdate); - } - } - return indexUpdates; + @Override + public Iterable getIndexUpserts(TableState state, IndexMetaData context, + byte[] regionStartKey, byte[] regionEndKey, boolean verified) throws IOException { + PhoenixIndexMetaData metaData = (PhoenixIndexMetaData) context; + List indexMaintainers = metaData.getIndexMaintainers(); + if (indexMaintainers.get(0).isRowDeleted(state.getPendingUpdate())) { + return Collections.emptyList(); + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(state.getCurrentRowKey()); + List indexUpdates = Lists.newArrayList(); + for (IndexMaintainer maintainer : indexMaintainers) { + Pair statePair = state.getIndexUpdateState( + maintainer.getAllColumns(), metaData.getReplayWrite() != null, false, context); + ValueGetter valueGetter = statePair.getFirst(); + IndexUpdate indexUpdate = statePair.getSecond(); + indexUpdate.setTable(maintainer.isLocalIndex() ? tableName : maintainer.getIndexTableName()); + Put put = maintainer.buildUpdateMutation(KV_BUILDER, valueGetter, ptr, + state.getCurrentTimestamp(), regionStartKey, regionEndKey, verified); + indexUpdate.setUpdate(put); + indexUpdates.add(indexUpdate); } + return indexUpdates; + } - @Override - public boolean isEnabled(Mutation m) { - return hasIndexMaintainers(m.getAttributesMap()) || isTransforming(m.getAttributesMap()); + @Override + public Iterable getIndexDeletes(TableState state, IndexMetaData context, + byte[] regionStartKey, byte[] regionEndKey) throws IOException { + PhoenixIndexMetaData metaData = (PhoenixIndexMetaData) context; + List indexMaintainers = metaData.getIndexMaintainers(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(state.getCurrentRowKey()); + List indexUpdates = Lists.newArrayList(); + for (IndexMaintainer maintainer : indexMaintainers) { + // For transactional tables, we use an index maintainer + // to aid in rollback if there's a KeyValue column in the index. The alternative would be + // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the + // client side. + Set cols = Sets.newHashSet(maintainer.getAllColumns()); + cols.add(new ColumnReference(indexMaintainers.get(0).getDataEmptyKeyValueCF(), + indexMaintainers.get(0).getEmptyKeyValueQualifier())); + Pair statePair = + state.getIndexUpdateState(cols, metaData.getReplayWrite() != null, true, context); + ValueGetter valueGetter = statePair.getFirst(); + if (valueGetter != null) { + IndexUpdate indexUpdate = statePair.getSecond(); + indexUpdate + .setTable(maintainer.isLocalIndex() ? tableName : maintainer.getIndexTableName()); + Delete delete = maintainer.buildDeleteMutation(KV_BUILDER, valueGetter, ptr, + state.getPendingUpdate(), state.getCurrentTimestamp(), regionStartKey, regionEndKey); + indexUpdate.setUpdate(delete); + indexUpdates.add(indexUpdate); + } } + return indexUpdates; + } + + @Override + public boolean isEnabled(Mutation m) { + return hasIndexMaintainers(m.getAttributesMap()) || isTransforming(m.getAttributesMap()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicyHelper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicyHelper.java index 7a099b366af..95befb90433 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicyHelper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicyHelper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,211 +35,227 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.schema.PIndexState; +import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.ReadOnlyProps; -import org.apache.phoenix.util.ClientUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class PhoenixIndexFailurePolicyHelper { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexFailurePolicyHelper.class); + private static final Logger LOGGER = + LoggerFactory.getLogger(PhoenixIndexFailurePolicyHelper.class); - public static interface MutateCommand { - void doMutation() throws IOException; + public static interface MutateCommand { + void doMutation() throws IOException; - List getMutationList(); - } + List getMutationList(); + } - /** - * Retries a mutationBatch where the index write failed. - * One attempt should have already been made before calling this. - * Max retries and exponential backoff logic mimics that of HBase's client - * If max retries are hit, the index is disabled. - * If the write is successful on a subsequent retry, the index is set back to ACTIVE - * @param mutateCommand mutation command to execute - * @param iwe original IndexWriteException - * @param connection connection to use - * @param config config used to get retry settings - * @throws IOException - */ - public static void doBatchWithRetries(MutateCommand mutateCommand, - IndexWriteException iwe, PhoenixConnection connection, ReadOnlyProps config) - throws IOException { - if (!PhoenixIndexMetaData.isIndexRebuild( - mutateCommand.getMutationList().get(0).getAttributesMap())) { - incrementPendingDisableCounter(iwe, connection); - } - int maxTries = config.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - long pause = config.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); - int numRetry = 1; // already tried once - // calculate max time to retry for - int timeout = 0; - for (int i = 0; i < maxTries; ++i) { - timeout = (int) (timeout + ConnectionUtils.getPauseTime(pause, i)); - } - long canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout; - while (canRetryMore(numRetry++, maxTries, canRetryUntil)) { - try { - Thread.sleep(ConnectionUtils.getPauseTime(pause, numRetry)); // HBase's exponential backoff - mutateCommand.doMutation(); - // success - change the index state from PENDING_DISABLE back to ACTIVE - // If it's not Index Rebuild - if (!PhoenixIndexMetaData.isIndexRebuild( - mutateCommand.getMutationList().get(0).getAttributesMap())){ - handleIndexWriteSuccessFromClient(iwe, connection); - } - return; - } catch (IOException e) { - SQLException inferredE = ClientUtil.parseLocalOrRemoteServerException(e); - if (inferredE != null && inferredE.getErrorCode() != SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode()) { - // If this call is from phoenix client, we also need to check if SQLException - // error is INDEX_METADATA_NOT_FOUND or not - // if it's not an INDEX_METADATA_NOT_FOUND, throw exception, - // to be handled normally in caller's try-catch - if (inferredE.getErrorCode() != SQLExceptionCode.INDEX_METADATA_NOT_FOUND - .getErrorCode()) { - throw e; - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IOException(e); - } + /** + * Retries a mutationBatch where the index write failed. One attempt should have already been made + * before calling this. Max retries and exponential backoff logic mimics that of HBase's client If + * max retries are hit, the index is disabled. If the write is successful on a subsequent retry, + * the index is set back to ACTIVE + * @param mutateCommand mutation command to execute + * @param iwe original IndexWriteException + * @param connection connection to use + * @param config config used to get retry settings + */ + public static void doBatchWithRetries(MutateCommand mutateCommand, IndexWriteException iwe, + PhoenixConnection connection, ReadOnlyProps config) throws IOException { + if ( + !PhoenixIndexMetaData + .isIndexRebuild(mutateCommand.getMutationList().get(0).getAttributesMap()) + ) { + incrementPendingDisableCounter(iwe, connection); + } + int maxTries = config.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + long pause = + config.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + int numRetry = 1; // already tried once + // calculate max time to retry for + int timeout = 0; + for (int i = 0; i < maxTries; ++i) { + timeout = (int) (timeout + ConnectionUtils.getPauseTime(pause, i)); + } + long canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout; + while (canRetryMore(numRetry++, maxTries, canRetryUntil)) { + try { + Thread.sleep(ConnectionUtils.getPauseTime(pause, numRetry)); // HBase's exponential backoff + mutateCommand.doMutation(); + // success - change the index state from PENDING_DISABLE back to ACTIVE + // If it's not Index Rebuild + if ( + !PhoenixIndexMetaData + .isIndexRebuild(mutateCommand.getMutationList().get(0).getAttributesMap()) + ) { + handleIndexWriteSuccessFromClient(iwe, connection); } - if (!PhoenixIndexMetaData.isIndexRebuild( - mutateCommand.getMutationList().get(0).getAttributesMap())) { - // max retries hit - disable the index - handleIndexWriteFailureFromClient(iwe, connection); + return; + } catch (IOException e) { + SQLException inferredE = ClientUtil.parseLocalOrRemoteServerException(e); + if ( + inferredE != null + && inferredE.getErrorCode() != SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode() + ) { + // If this call is from phoenix client, we also need to check if SQLException + // error is INDEX_METADATA_NOT_FOUND or not + // if it's not an INDEX_METADATA_NOT_FOUND, throw exception, + // to be handled normally in caller's try-catch + if ( + inferredE.getErrorCode() != SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode() + ) { + throw e; + } } - throw new DoNotRetryIOException(iwe); // send failure back to client + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException(e); + } } - - /** - * If we're leaving the index active after index write failures on the server side, then we get - * the exception on the client side here after hitting the max # of hbase client retries. We - * disable the index as it may now be inconsistent. The indexDisableTimestamp was already set - * on the server side, so the rebuilder will be run. - */ - private static void handleIndexWriteFailureFromClient(IndexWriteException indexWriteException, - PhoenixConnection conn) { - handleExceptionFromClient(indexWriteException, conn, PIndexState.DISABLE); + if ( + !PhoenixIndexMetaData + .isIndexRebuild(mutateCommand.getMutationList().get(0).getAttributesMap()) + ) { + // max retries hit - disable the index + handleIndexWriteFailureFromClient(iwe, connection); } + throw new DoNotRetryIOException(iwe); // send failure back to client + } - private static void handleIndexWriteSuccessFromClient(IndexWriteException indexWriteException, - PhoenixConnection conn) { - handleExceptionFromClient(indexWriteException, conn, PIndexState.ACTIVE); - } + /** + * If we're leaving the index active after index write failures on the server side, then we get + * the exception on the client side here after hitting the max # of hbase client retries. We + * disable the index as it may now be inconsistent. The indexDisableTimestamp was already set on + * the server side, so the rebuilder will be run. + */ + private static void handleIndexWriteFailureFromClient(IndexWriteException indexWriteException, + PhoenixConnection conn) { + handleExceptionFromClient(indexWriteException, conn, PIndexState.DISABLE); + } + + private static void handleIndexWriteSuccessFromClient(IndexWriteException indexWriteException, + PhoenixConnection conn) { + handleExceptionFromClient(indexWriteException, conn, PIndexState.ACTIVE); + } - private static void handleExceptionFromClient(IndexWriteException indexWriteException, - PhoenixConnection conn, PIndexState indexState) { - try { - Set indexesToUpdate = new HashSet<>(); - if (indexWriteException instanceof MultiIndexWriteFailureException) { - MultiIndexWriteFailureException indexException = - (MultiIndexWriteFailureException) indexWriteException; - List failedIndexes = indexException.getFailedTables(); - if (indexException.isDisableIndexOnFailure() && failedIndexes != null) { - for (HTableInterfaceReference failedIndex : failedIndexes) { - String failedIndexTable = failedIndex.getTableName(); - if (!indexesToUpdate.contains(failedIndexTable)) { - updateIndex(failedIndexTable, conn, indexState); - indexesToUpdate.add(failedIndexTable); - } - } - } - } else if (indexWriteException instanceof SingleIndexWriteFailureException) { - SingleIndexWriteFailureException indexException = - (SingleIndexWriteFailureException) indexWriteException; - String failedIndex = indexException.getTableName(); - if (indexException.isDisableIndexOnFailure() && failedIndex != null) { - updateIndex(failedIndex, conn, indexState); - } + private static void handleExceptionFromClient(IndexWriteException indexWriteException, + PhoenixConnection conn, PIndexState indexState) { + try { + Set indexesToUpdate = new HashSet<>(); + if (indexWriteException instanceof MultiIndexWriteFailureException) { + MultiIndexWriteFailureException indexException = + (MultiIndexWriteFailureException) indexWriteException; + List failedIndexes = indexException.getFailedTables(); + if (indexException.isDisableIndexOnFailure() && failedIndexes != null) { + for (HTableInterfaceReference failedIndex : failedIndexes) { + String failedIndexTable = failedIndex.getTableName(); + if (!indexesToUpdate.contains(failedIndexTable)) { + updateIndex(failedIndexTable, conn, indexState); + indexesToUpdate.add(failedIndexTable); } - } catch (Exception handleE) { - LOGGER.warn("Error while trying to handle index write exception", indexWriteException); + } + } + } else if (indexWriteException instanceof SingleIndexWriteFailureException) { + SingleIndexWriteFailureException indexException = + (SingleIndexWriteFailureException) indexWriteException; + String failedIndex = indexException.getTableName(); + if (indexException.isDisableIndexOnFailure() && failedIndex != null) { + updateIndex(failedIndex, conn, indexState); } + } + } catch (Exception handleE) { + LOGGER.warn("Error while trying to handle index write exception", indexWriteException); } + } - private static void incrementPendingDisableCounter(IndexWriteException indexWriteException,PhoenixConnection conn) { - try { - Set indexesToUpdate = new HashSet<>(); - if (indexWriteException instanceof MultiIndexWriteFailureException) { - MultiIndexWriteFailureException indexException = - (MultiIndexWriteFailureException) indexWriteException; - List failedIndexes = indexException.getFailedTables(); - if (indexException.isDisableIndexOnFailure() && failedIndexes != null) { - for (HTableInterfaceReference failedIndex : failedIndexes) { - String failedIndexTable = failedIndex.getTableName(); - if (!indexesToUpdate.contains(failedIndexTable)) { - incrementCounterForIndex(conn,failedIndexTable); - indexesToUpdate.add(failedIndexTable); - } - } - } - } else if (indexWriteException instanceof SingleIndexWriteFailureException) { - SingleIndexWriteFailureException indexException = - (SingleIndexWriteFailureException) indexWriteException; - String failedIndex = indexException.getTableName(); - if (indexException.isDisableIndexOnFailure() && failedIndex != null) { - incrementCounterForIndex(conn,failedIndex); - } + private static void incrementPendingDisableCounter(IndexWriteException indexWriteException, + PhoenixConnection conn) { + try { + Set indexesToUpdate = new HashSet<>(); + if (indexWriteException instanceof MultiIndexWriteFailureException) { + MultiIndexWriteFailureException indexException = + (MultiIndexWriteFailureException) indexWriteException; + List failedIndexes = indexException.getFailedTables(); + if (indexException.isDisableIndexOnFailure() && failedIndexes != null) { + for (HTableInterfaceReference failedIndex : failedIndexes) { + String failedIndexTable = failedIndex.getTableName(); + if (!indexesToUpdate.contains(failedIndexTable)) { + incrementCounterForIndex(conn, failedIndexTable); + indexesToUpdate.add(failedIndexTable); } - } catch (Exception handleE) { - LOGGER.warn("Error while trying to handle index write exception", indexWriteException); + } } + } else if (indexWriteException instanceof SingleIndexWriteFailureException) { + SingleIndexWriteFailureException indexException = + (SingleIndexWriteFailureException) indexWriteException; + String failedIndex = indexException.getTableName(); + if (indexException.isDisableIndexOnFailure() && failedIndex != null) { + incrementCounterForIndex(conn, failedIndex); + } + } + } catch (Exception handleE) { + LOGGER.warn("Error while trying to handle index write exception", indexWriteException); } + } - private static void incrementCounterForIndex(PhoenixConnection conn, String failedIndexTable) throws IOException { - IndexUtil.incrementCounterForIndex(conn, failedIndexTable, 1); - } + private static void incrementCounterForIndex(PhoenixConnection conn, String failedIndexTable) + throws IOException { + IndexUtil.incrementCounterForIndex(conn, failedIndexTable, 1); + } - private static void decrementCounterForIndex(PhoenixConnection conn, String failedIndexTable) throws IOException { - IndexUtil.incrementCounterForIndex(conn, failedIndexTable, -1); - } + private static void decrementCounterForIndex(PhoenixConnection conn, String failedIndexTable) + throws IOException { + IndexUtil.incrementCounterForIndex(conn, failedIndexTable, -1); + } - private static boolean canRetryMore(int numRetry, int maxRetries, long canRetryUntil) { - // If there is a single try we must not take into account the time. - return numRetry < maxRetries - || (maxRetries > 1 && EnvironmentEdgeManager.currentTime() < canRetryUntil); - } + private static boolean canRetryMore(int numRetry, int maxRetries, long canRetryUntil) { + // If there is a single try we must not take into account the time. + return numRetry < maxRetries + || (maxRetries > 1 && EnvironmentEdgeManager.currentTime() < canRetryUntil); + } - /** - * Converts from SQLException to IndexWriteException - * @param sqlE the SQLException - * @return the IndexWriteException - */ - public static IndexWriteException getIndexWriteException(SQLException sqlE) { - String sqlMsg = sqlE.getMessage(); - if (sqlMsg.contains(MultiIndexWriteFailureException.FAILURE_MSG)) { - return new MultiIndexWriteFailureException(sqlMsg); - } else if (sqlMsg.contains(SingleIndexWriteFailureException.FAILED_MSG)) { - return new SingleIndexWriteFailureException(sqlMsg); - } - return null; + /** + * Converts from SQLException to IndexWriteException + * @param sqlE the SQLException + * @return the IndexWriteException + */ + public static IndexWriteException getIndexWriteException(SQLException sqlE) { + String sqlMsg = sqlE.getMessage(); + if (sqlMsg.contains(MultiIndexWriteFailureException.FAILURE_MSG)) { + return new MultiIndexWriteFailureException(sqlMsg); + } else if (sqlMsg.contains(SingleIndexWriteFailureException.FAILED_MSG)) { + return new SingleIndexWriteFailureException(sqlMsg); } + return null; + } - private static void updateIndex(String indexFullName, PhoenixConnection conn, - PIndexState indexState) throws SQLException, IOException { - //Decrement the counter because we will be here when client give retry after getting failed or succeed - decrementCounterForIndex(conn,indexFullName); - Long indexDisableTimestamp = null; - if (PIndexState.DISABLE.equals(indexState)) { - LOGGER.info("Disabling index after hitting max number of index write retries: " - + indexFullName); - IndexUtil.updateIndexState(conn, indexFullName, indexState, indexDisableTimestamp); - } else if (PIndexState.ACTIVE.equals(indexState)) { - LOGGER.debug("Resetting index to active after subsequent success " + indexFullName); - //At server disabled timestamp will be reset only if there is no other client is in PENDING_DISABLE state - indexDisableTimestamp = 0L; - try { - IndexUtil.updateIndexState(conn, indexFullName, indexState, indexDisableTimestamp); - } catch (SQLException e) { - // It's possible that some other client had made the Index DISABLED already , so we can ignore unallowed - // transition(DISABLED->ACTIVE) - if (e.getErrorCode() != SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION.getErrorCode()) { throw e; } - } + private static void updateIndex(String indexFullName, PhoenixConnection conn, + PIndexState indexState) throws SQLException, IOException { + // Decrement the counter because we will be here when client give retry after getting failed or + // succeed + decrementCounterForIndex(conn, indexFullName); + Long indexDisableTimestamp = null; + if (PIndexState.DISABLE.equals(indexState)) { + LOGGER + .info("Disabling index after hitting max number of index write retries: " + indexFullName); + IndexUtil.updateIndexState(conn, indexFullName, indexState, indexDisableTimestamp); + } else if (PIndexState.ACTIVE.equals(indexState)) { + LOGGER.debug("Resetting index to active after subsequent success " + indexFullName); + // At server disabled timestamp will be reset only if there is no other client is in + // PENDING_DISABLE state + indexDisableTimestamp = 0L; + try { + IndexUtil.updateIndexState(conn, indexFullName, indexState, indexDisableTimestamp); + } catch (SQLException e) { + // It's possible that some other client had made the Index DISABLED already , so we can + // ignore unallowed + // transition(DISABLED->ACTIVE) + if (e.getErrorCode() != SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION.getErrorCode()) { + throw e; } + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java b/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java index a4f4effc230..2e836795d7a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,72 +30,74 @@ import org.apache.phoenix.transaction.PhoenixTransactionContext; public class PhoenixIndexMetaData implements IndexMetaData { - private final Map attributes; - private final IndexMetaDataCache indexMetaDataCache; - private final ReplayWrite replayWrite; - private final boolean isImmutable; - private final boolean hasNonPkColumns; - private final boolean hasLocalIndexes; - - public static boolean isIndexRebuild(Map attributes) { - return attributes.get(BaseScannerRegionObserverConstants.REPLAY_WRITES) - == BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES; - } - - public static ReplayWrite getReplayWrite(Map attributes) { - return ReplayWrite.fromBytes(attributes.get(BaseScannerRegionObserverConstants.REPLAY_WRITES)); - } - - public PhoenixIndexMetaData(IndexMetaDataCache indexMetaDataCache, Map attributes) throws IOException { - this.indexMetaDataCache = indexMetaDataCache; - boolean isImmutable = true; - boolean hasNonPkColumns = false; - boolean hasLocalIndexes = false; - for (IndexMaintainer maintainer : indexMetaDataCache.getIndexMaintainers()) { - isImmutable &= maintainer.isImmutableRows(); - if (!(maintainer instanceof TransformMaintainer)) { - hasNonPkColumns |= !maintainer.getIndexedColumns().isEmpty(); - } - hasLocalIndexes |= maintainer.isLocalIndex(); - } - this.isImmutable = isImmutable; - this.hasNonPkColumns = hasNonPkColumns; - this.attributes = attributes; - this.replayWrite = getReplayWrite(attributes); - this.hasLocalIndexes = hasLocalIndexes; - } - - public PhoenixTransactionContext getTransactionContext() { - return indexMetaDataCache.getTransactionContext(); - } - - public List getIndexMaintainers() { - return indexMetaDataCache.getIndexMaintainers(); - } + private final Map attributes; + private final IndexMetaDataCache indexMetaDataCache; + private final ReplayWrite replayWrite; + private final boolean isImmutable; + private final boolean hasNonPkColumns; + private final boolean hasLocalIndexes; - public Map getAttributes() { - return attributes; - } - - public int getClientVersion() { - return indexMetaDataCache.getClientVersion(); - } - - @Override - public ReplayWrite getReplayWrite() { - return replayWrite; - } - - public boolean isImmutableRows() { - return isImmutable; - } - - public boolean hasLocalIndexes() { - return hasLocalIndexes; - } + public static boolean isIndexRebuild(Map attributes) { + return attributes.get(BaseScannerRegionObserverConstants.REPLAY_WRITES) + == BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES; + } - @Override - public boolean requiresPriorRowState(Mutation m) { - return !isImmutable || (indexMetaDataCache.getIndexMaintainers().get(0).isRowDeleted(m) && hasNonPkColumns); + public static ReplayWrite getReplayWrite(Map attributes) { + return ReplayWrite.fromBytes(attributes.get(BaseScannerRegionObserverConstants.REPLAY_WRITES)); + } + + public PhoenixIndexMetaData(IndexMetaDataCache indexMetaDataCache, Map attributes) + throws IOException { + this.indexMetaDataCache = indexMetaDataCache; + boolean isImmutable = true; + boolean hasNonPkColumns = false; + boolean hasLocalIndexes = false; + for (IndexMaintainer maintainer : indexMetaDataCache.getIndexMaintainers()) { + isImmutable &= maintainer.isImmutableRows(); + if (!(maintainer instanceof TransformMaintainer)) { + hasNonPkColumns |= !maintainer.getIndexedColumns().isEmpty(); + } + hasLocalIndexes |= maintainer.isLocalIndex(); } + this.isImmutable = isImmutable; + this.hasNonPkColumns = hasNonPkColumns; + this.attributes = attributes; + this.replayWrite = getReplayWrite(attributes); + this.hasLocalIndexes = hasLocalIndexes; + } + + public PhoenixTransactionContext getTransactionContext() { + return indexMetaDataCache.getTransactionContext(); + } + + public List getIndexMaintainers() { + return indexMetaDataCache.getIndexMaintainers(); + } + + public Map getAttributes() { + return attributes; + } + + public int getClientVersion() { + return indexMetaDataCache.getClientVersion(); + } + + @Override + public ReplayWrite getReplayWrite() { + return replayWrite; + } + + public boolean isImmutableRows() { + return isImmutable; + } + + public boolean hasLocalIndexes() { + return hasLocalIndexes; + } + + @Override + public boolean requiresPriorRowState(Mutation m) { + return !isImmutable + || (indexMetaDataCache.getIndexMaintainers().get(0).isRowDeleted(m) && hasNonPkColumns); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/AggregatingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/AggregatingResultIterator.java index 59a89adfcca..685fff2c22e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/AggregatingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/AggregatingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,21 +20,16 @@ import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.schema.tuple.Tuple; - /** - * - * Interface for scanners that either do aggregation - * or delegate to scanners that do aggregation. - * - * + * Interface for scanners that either do aggregation or delegate to scanners that do aggregation. * @since 0.1 */ public interface AggregatingResultIterator extends ResultIterator { - /** - * Provides a means of re-aggregating a result row. For - * scanners that need to look ahead (i.e. {@link org.apache.phoenix.iterate.OrderedAggregatingResultIterator} - * @param result the row to re-aggregate - * @return Aggregator[] results - */ - Aggregator[] aggregate(Tuple result); + /** + * Provides a means of re-aggregating a result row. For scanners that need to look ahead (i.e. + * {@link org.apache.phoenix.iterate.OrderedAggregatingResultIterator} + * @param result the row to re-aggregate + * @return Aggregator[] results + */ + Aggregator[] aggregate(Tuple result); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java index 2e3ee10dfdd..bc9137d64b5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,88 +26,87 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.Aggregators; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.PhoenixKeyValueUtil; /** - * - * Base class for result scanners that aggregate the row count value for rows with - * duplicate keys. This result scanner assumes that the results of the inner result - * scanner are returned in order of grouping keys. - * + * Base class for result scanners that aggregate the row count value for rows with duplicate keys. + * This result scanner assumes that the results of the inner result scanner are returned in order of + * grouping keys. */ -public abstract class BaseGroupedAggregatingResultIterator implements - AggregatingResultIterator { - private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; - protected final PeekingResultIterator resultIterator; - protected final Aggregators aggregators; - private ImmutableBytesWritable currentKey; - private ImmutableBytesWritable nextKey; +public abstract class BaseGroupedAggregatingResultIterator implements AggregatingResultIterator { + private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; + protected final PeekingResultIterator resultIterator; + protected final Aggregators aggregators; + private ImmutableBytesWritable currentKey; + private ImmutableBytesWritable nextKey; - public BaseGroupedAggregatingResultIterator( - PeekingResultIterator resultIterator, Aggregators aggregators) { - if (resultIterator == null) throw new NullPointerException(); - if (aggregators == null) throw new NullPointerException(); - this.resultIterator = resultIterator; - this.aggregators = aggregators; - this.currentKey = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); - this.nextKey = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); - } - - protected abstract ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) throws SQLException; - protected abstract Tuple wrapKeyValueAsResult(Cell keyValue) throws SQLException; + public BaseGroupedAggregatingResultIterator(PeekingResultIterator resultIterator, + Aggregators aggregators) { + if (resultIterator == null) throw new NullPointerException(); + if (aggregators == null) throw new NullPointerException(); + this.resultIterator = resultIterator; + this.aggregators = aggregators; + this.currentKey = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); + this.nextKey = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); + } + + protected abstract ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) + throws SQLException; - @Override - public Tuple next() throws SQLException { - Tuple result = resultIterator.next(); - if (result == null) { - return null; - } - if (currentKey.get() == UNITIALIZED_KEY_BUFFER) { - getGroupingKey(result, currentKey); - } - Aggregator[] rowAggregators = aggregators.getAggregators(); - aggregators.reset(rowAggregators); - while (true) { - aggregators.aggregate(rowAggregators, result); - Tuple nextResult = resultIterator.peek(); - if (nextResult == null || !currentKey.equals(getGroupingKey(nextResult, nextKey))) { - break; - } - result = resultIterator.next(); - } - - byte[] value = aggregators.toBytes(rowAggregators); - Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(currentKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); - currentKey.set(nextKey.get(), nextKey.getOffset(), nextKey.getLength()); - return tuple; + protected abstract Tuple wrapKeyValueAsResult(Cell keyValue) throws SQLException; + + @Override + public Tuple next() throws SQLException { + Tuple result = resultIterator.next(); + if (result == null) { + return null; } - - @Override - public void close() throws SQLException { - resultIterator.close(); + if (currentKey.get() == UNITIALIZED_KEY_BUFFER) { + getGroupingKey(result, currentKey); } - - @Override - public Aggregator[] aggregate(Tuple result) { - Aggregator[] rowAggregators = aggregators.getAggregators(); - aggregators.reset(rowAggregators); - aggregators.aggregate(rowAggregators, result); - return rowAggregators; + Aggregator[] rowAggregators = aggregators.getAggregators(); + aggregators.reset(rowAggregators); + while (true) { + aggregators.aggregate(rowAggregators, result); + Tuple nextResult = resultIterator.peek(); + if (nextResult == null || !currentKey.equals(getGroupingKey(nextResult, nextKey))) { + break; + } + result = resultIterator.next(); } - @Override - public void explain(List planSteps) { - resultIterator.explain(planSteps); - } + byte[] value = aggregators.toBytes(rowAggregators); + Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(currentKey, + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); + currentKey.set(nextKey.get(), nextKey.getOffset(), nextKey.getLength()); + return tuple; + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - resultIterator.explain(planSteps, explainPlanAttributesBuilder); - } + @Override + public void close() throws SQLException { + resultIterator.close(); + } + + @Override + public Aggregator[] aggregate(Tuple result) { + Aggregator[] rowAggregators = aggregators.getAggregators(); + aggregators.reset(rowAggregators); + aggregators.aggregate(rowAggregators, result); + return rowAggregators; + } + + @Override + public void explain(List planSteps) { + resultIterator.explain(planSteps); + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + resultIterator.explain(planSteps, explainPlanAttributesBuilder); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseResultIterator.java index 17b6372a7ae..d2bdf2d3461 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,32 +17,28 @@ */ package org.apache.phoenix.iterate; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; - import java.sql.SQLException; import java.util.List; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; + /** - * - * Abstract base class for ResultIterator implementations that - * do nothing on close and have no explain plan steps - * - * + * Abstract base class for ResultIterator implementations that do nothing on close and have no + * explain plan steps * @since 1.2 */ public abstract class BaseResultIterator implements ResultIterator { - - @Override - public void close() throws SQLException { - } - @Override - public void explain(List planSteps) { - } + @Override + public void close() throws SQLException { + } + + @Override + public void explain(List planSteps) { + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java index 30bc118da91..feefd802f15 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -63,8 +63,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.cache.ServerCacheClient.ServerCache; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.RowProjector; @@ -111,8 +110,11 @@ import org.apache.phoenix.schema.stats.GuidePostsInfo; import org.apache.phoenix.schema.stats.GuidePostsKey; import org.apache.phoenix.schema.stats.StatisticsUtil; -import org.apache.phoenix.schema.types.PVarbinaryEncoded; import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.Closeables; @@ -129,1714 +131,1758 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - /** - * - * Class that parallelizes the scan over a table using the ExecutorService provided. Each region of the table will be scanned in parallel with - * the results accessible through {@link #getIterators()} - * - * + * Class that parallelizes the scan over a table using the ExecutorService provided. Each region of + * the table will be scanned in parallel with the results accessible through {@link #getIterators()} * @since 0.1 */ public abstract class BaseResultIterators extends ExplainTable implements ResultIterators { - public static final Logger LOGGER = LoggerFactory.getLogger(BaseResultIterators.class); - private static final int ESTIMATED_GUIDEPOSTS_PER_REGION = 20; - private static final int MIN_SEEK_TO_COLUMN_VERSION = VersionUtil.encodeVersion("0", "98", "12"); - private final List> scans; - private final List regionLocations; - private final List splits; - private final byte[] physicalTableName; - protected final QueryPlan plan; - protected final String scanId; - protected final MutationState mutationState; - protected final ParallelScanGrouper scanGrouper; - // TODO: too much nesting here - breakup into new classes. - private final List>>>> allFutures; - private Long estimatedRows; - private Long estimatedSize; - private Long estimateInfoTimestamp; - private boolean hasGuidePosts; - private Scan scan; - private final boolean useStatsForParallelization; - protected Map caches; - private final QueryPlan dataPlan; - private static boolean forTestingSetTimeoutToMaxToLetQueryPassHere = false; - private int numRegionLocationLookups = 0; - - static final Function TO_KEY_RANGE = new Function() { - @Override - public KeyRange apply(HRegionLocation region) { - return KeyRange.getKeyRange(region.getRegion().getStartKey(), region.getRegion().getEndKey()); - } + public static final Logger LOGGER = LoggerFactory.getLogger(BaseResultIterators.class); + private static final int ESTIMATED_GUIDEPOSTS_PER_REGION = 20; + private static final int MIN_SEEK_TO_COLUMN_VERSION = VersionUtil.encodeVersion("0", "98", "12"); + private final List> scans; + private final List regionLocations; + private final List splits; + private final byte[] physicalTableName; + protected final QueryPlan plan; + protected final String scanId; + protected final MutationState mutationState; + protected final ParallelScanGrouper scanGrouper; + // TODO: too much nesting here - breakup into new classes. + private final List>>>> allFutures; + private Long estimatedRows; + private Long estimatedSize; + private Long estimateInfoTimestamp; + private boolean hasGuidePosts; + private Scan scan; + private final boolean useStatsForParallelization; + protected Map caches; + private final QueryPlan dataPlan; + private static boolean forTestingSetTimeoutToMaxToLetQueryPassHere = false; + private int numRegionLocationLookups = 0; + + static final Function TO_KEY_RANGE = + new Function() { + @Override + public KeyRange apply(HRegionLocation region) { + return KeyRange.getKeyRange(region.getRegion().getStartKey(), + region.getRegion().getEndKey()); + } }; - private PTable getTable() { - return plan.getTableRef().getTable(); - } - - abstract protected boolean isSerial(); - - protected boolean useStats() { - /* - * Don't use guide posts: - * 1) If we're collecting stats, as in this case we need to scan entire - * regions worth of data to track where to put the guide posts. - * 2) If the query is going to be executed serially. - */ - if (ScanUtil.isAnalyzeTable(scan)) { - return false; - } - return !isSerial(); + private PTable getTable() { + return plan.getTableRef().getTable(); + } + + abstract protected boolean isSerial(); + + protected boolean useStats() { + /* + * Don't use guide posts: 1) If we're collecting stats, as in this case we need to scan entire + * regions worth of data to track where to put the guide posts. 2) If the query is going to be + * executed serially. + */ + if (ScanUtil.isAnalyzeTable(scan)) { + return false; } - - private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException { - StatementContext context = plan.getContext(); - TableRef tableRef = plan.getTableRef(); - boolean wildcardIncludesDynamicCols = context.getConnection().getQueryServices() - .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, - DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); - PTable table = tableRef.getTable(); - - Map> familyMap = scan.getFamilyMap(); - // Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys - if (context.getConnection().isDescVarLengthRowKeyUpgrade()) { - // We project *all* KeyValues across all column families as we make a pass over - // a physical table and we want to make sure we catch all KeyValues that may be - // dynamic or part of an updatable view. - familyMap.clear(); - scan.readAllVersions(); - scan.setFilter(null); // Remove any filter - scan.setRaw(true); // Traverse (and subsequently clone) all KeyValues - // Pass over PTable so we can re-write rows according to the row key schema - scan.setAttribute(BaseScannerRegionObserverConstants.UPGRADE_DESC_ROW_KEY, UngroupedAggregateRegionObserverHelper.serialize(table)); + return !isSerial(); + } + + private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, + Scan scan) throws SQLException { + StatementContext context = plan.getContext(); + TableRef tableRef = plan.getTableRef(); + boolean wildcardIncludesDynamicCols = + context.getConnection().getQueryServices().getConfiguration() + .getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); + PTable table = tableRef.getTable(); + + Map> familyMap = scan.getFamilyMap(); + // Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys + if (context.getConnection().isDescVarLengthRowKeyUpgrade()) { + // We project *all* KeyValues across all column families as we make a pass over + // a physical table and we want to make sure we catch all KeyValues that may be + // dynamic or part of an updatable view. + familyMap.clear(); + scan.readAllVersions(); + scan.setFilter(null); // Remove any filter + scan.setRaw(true); // Traverse (and subsequently clone) all KeyValues + // Pass over PTable so we can re-write rows according to the row key schema + scan.setAttribute(BaseScannerRegionObserverConstants.UPGRADE_DESC_ROW_KEY, + UngroupedAggregateRegionObserverHelper.serialize(table)); + } else { + FilterableStatement statement = plan.getStatement(); + RowProjector projector = plan.getProjector(); + boolean optimizeProjection = false; + boolean keyOnlyFilter = familyMap.isEmpty() && !wildcardIncludesDynamicCols + && context.getWhereConditionColumns().isEmpty(); + if (!projector.projectEverything()) { + // If nothing projected into scan and we only have one column family, just allow everything + // to be projected and use a FirstKeyOnlyFilter to skip from row to row. This turns out to + // be quite a bit faster. + // Where condition columns also will get added into familyMap + // When where conditions are present, we cannot add FirstKeyOnlyFilter at beginning. + // FIXME: we only enter this if the number of column families is 1 because otherwise + // local indexes break because it appears that the column families in the PTable do + // not match the actual column families of the table (which is bad). + if (keyOnlyFilter && table.getColumnFamilies().size() == 1) { + // Project the one column family. We must project a column family since it's possible + // that there are other non declared column families that we need to ignore. + scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes()); } else { - FilterableStatement statement = plan.getStatement(); - RowProjector projector = plan.getProjector(); - boolean optimizeProjection = false; - boolean keyOnlyFilter = familyMap.isEmpty() && !wildcardIncludesDynamicCols && - context.getWhereConditionColumns().isEmpty(); - if (!projector.projectEverything()) { - // If nothing projected into scan and we only have one column family, just allow everything - // to be projected and use a FirstKeyOnlyFilter to skip from row to row. This turns out to - // be quite a bit faster. - // Where condition columns also will get added into familyMap - // When where conditions are present, we cannot add FirstKeyOnlyFilter at beginning. - // FIXME: we only enter this if the number of column families is 1 because otherwise - // local indexes break because it appears that the column families in the PTable do - // not match the actual column families of the table (which is bad). - if (keyOnlyFilter && table.getColumnFamilies().size() == 1) { - // Project the one column family. We must project a column family since it's possible - // that there are other non declared column families that we need to ignore. - scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes()); - } else { - optimizeProjection = true; - if (projector.projectEveryRow()) { - if (table.getViewType() == ViewType.MAPPED) { - // Since we don't have the empty key value in MAPPED tables, - // we must project all CFs in HRS. However, only the - // selected column values are returned back to client. - context.getWhereConditionColumns().clear(); - for (PColumnFamily family : table.getColumnFamilies()) { - context.addWhereConditionColumn(family.getName().getBytes(), null); - } - } else { - byte[] ecf = SchemaUtil.getEmptyColumnFamily(table); - // Project empty key value unless the column family containing it has - // been projected in its entirety. - if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) { - scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst()); - } - } - } - } + optimizeProjection = true; + if (projector.projectEveryRow()) { + if (table.getViewType() == ViewType.MAPPED) { + // Since we don't have the empty key value in MAPPED tables, + // we must project all CFs in HRS. However, only the + // selected column values are returned back to client. + context.getWhereConditionColumns().clear(); + for (PColumnFamily family : table.getColumnFamilies()) { + context.addWhereConditionColumn(family.getName().getBytes(), null); + } } else { - boolean containsNullableGroubBy = false; - if (!plan.getOrderBy().isEmpty()) { - for (OrderByExpression orderByExpression : plan.getOrderBy() - .getOrderByExpressions()) { - if (orderByExpression.getExpression().isNullable()) { - containsNullableGroubBy = true; - break; - } - } - } - if (containsNullableGroubBy) { - byte[] ecf = SchemaUtil.getEmptyColumnFamily(table); - if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) { - scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table) - .getFirst()); - } - } + byte[] ecf = SchemaUtil.getEmptyColumnFamily(table); + // Project empty key value unless the column family containing it has + // been projected in its entirety. + if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) { + scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst()); + } } - // Add FirstKeyOnlyFilter or EmptyColumnOnlyFilter if there are no references - // to key value columns. We use FirstKeyOnlyFilter when possible - if (keyOnlyFilter) { - byte[] ecf = SchemaUtil.getEmptyColumnFamily(table); - byte[] ecq = table.getEncodingScheme() == NON_ENCODED_QUALIFIERS ? - QueryConstants.EMPTY_COLUMN_BYTES : - table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); - if (table.getEncodingScheme() == NON_ENCODED_QUALIFIERS) { - ScanUtil.andFilterAtBeginning(scan, new EmptyColumnOnlyFilter(ecf, ecq)); - } else if (table.getColumnFamilies().size() == 0) { - ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter()); - } else { - // There are more than column families. If the empty column family is the - // first column family lexicographically then FirstKeyOnlyFilter would return - // the empty column - List families = new ArrayList<>(table.getColumnFamilies().size()); - for (PColumnFamily family : table.getColumnFamilies()) { - families.add(family.getName().getBytes()); - } - Collections.sort(families, Bytes.BYTES_COMPARATOR); - byte[] firstFamily = families.get(0); - if (Bytes.compareTo(ecf, 0, ecf.length, - firstFamily, 0, firstFamily.length) == 0) { - ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter()); - } else { - ScanUtil.andFilterAtBeginning(scan, new EmptyColumnOnlyFilter(ecf, ecq)); - } - } + } + } + } else { + boolean containsNullableGroubBy = false; + if (!plan.getOrderBy().isEmpty()) { + for (OrderByExpression orderByExpression : plan.getOrderBy().getOrderByExpressions()) { + if (orderByExpression.getExpression().isNullable()) { + containsNullableGroubBy = true; + break; } + } + } + if (containsNullableGroubBy) { + byte[] ecf = SchemaUtil.getEmptyColumnFamily(table); + if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) { + scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst()); + } + } + } + // Add FirstKeyOnlyFilter or EmptyColumnOnlyFilter if there are no references + // to key value columns. We use FirstKeyOnlyFilter when possible + if (keyOnlyFilter) { + byte[] ecf = SchemaUtil.getEmptyColumnFamily(table); + byte[] ecq = table.getEncodingScheme() == NON_ENCODED_QUALIFIERS + ? QueryConstants.EMPTY_COLUMN_BYTES + : table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); + if (table.getEncodingScheme() == NON_ENCODED_QUALIFIERS) { + ScanUtil.andFilterAtBeginning(scan, new EmptyColumnOnlyFilter(ecf, ecq)); + } else if (table.getColumnFamilies().size() == 0) { + ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter()); + } else { + // There are more than column families. If the empty column family is the + // first column family lexicographically then FirstKeyOnlyFilter would return + // the empty column + List families = new ArrayList<>(table.getColumnFamilies().size()); + for (PColumnFamily family : table.getColumnFamilies()) { + families.add(family.getName().getBytes()); + } + Collections.sort(families, Bytes.BYTES_COMPARATOR); + byte[] firstFamily = families.get(0); + if (Bytes.compareTo(ecf, 0, ecf.length, firstFamily, 0, firstFamily.length) == 0) { + ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter()); + } else { + ScanUtil.andFilterAtBeginning(scan, new EmptyColumnOnlyFilter(ecf, ecq)); + } + } + } - if (perScanLimit != null) { - if (scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER) == null) { - ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit)); - } else { - // if we have an index filter and a limit, handle the limit after the filter - // we cast the limit to a long even though it passed as an Integer so that - // if we need extend this in the future the serialization is unchanged - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_LIMIT, - Bytes.toBytes((long) perScanLimit)); - } - } + if (perScanLimit != null) { + if (scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER) == null) { + ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit)); + } else { + // if we have an index filter and a limit, handle the limit after the filter + // we cast the limit to a long even though it passed as an Integer so that + // if we need extend this in the future the serialization is unchanged + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_LIMIT, + Bytes.toBytes((long) perScanLimit)); + } + } - if (offset != null) { - ScanUtil.addOffsetAttribute(scan, offset); - } - GroupBy groupBy = plan.getGroupBy(); - int cols = groupBy.getOrderPreservingColumnCount(); - if (cols > 0 && keyOnlyFilter && - !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && - cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && - groupBy.isOrderPreserving() && - (context.getAggregationManager().isEmpty() || groupBy.isUngroupedAggregate())) { - - ScanUtil.andFilterAtEnd(scan, - new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(),cols)); - if (!groupBy.isUngroupedAggregate() && plan.getLimit() != null) { - // We can push the limit to the server,but for UngroupedAggregate - // we can not push the limit. - ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit())); - } - } - scan.setAttribute(BaseScannerRegionObserverConstants.QUALIFIER_ENCODING_SCHEME, new byte[]{table.getEncodingScheme().getSerializedMetadataValue()}); - scan.setAttribute(BaseScannerRegionObserverConstants.IMMUTABLE_STORAGE_ENCODING_SCHEME, new byte[]{table.getImmutableStorageScheme().getSerializedMetadataValue()}); - // we use this flag on the server side to determine which value column qualifier to use in the key value we return from server. - scan.setAttribute(BaseScannerRegionObserverConstants.USE_NEW_VALUE_COLUMN_QUALIFIER, Bytes.toBytes(true)); - // When analyzing the table, there is no look up for key values being done. - // So there is no point setting the range. - if (!ScanUtil.isAnalyzeTable(scan)) { - setQualifierRanges(keyOnlyFilter, table, scan, context); - } - if (optimizeProjection) { - optimizeProjection(context, scan, table, statement); - } + if (offset != null) { + ScanUtil.addOffsetAttribute(scan, offset); + } + GroupBy groupBy = plan.getGroupBy(); + int cols = groupBy.getOrderPreservingColumnCount(); + if ( + cols > 0 && keyOnlyFilter + && !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) + && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() + && groupBy.isOrderPreserving() + && (context.getAggregationManager().isEmpty() || groupBy.isUngroupedAggregate()) + ) { + + ScanUtil.andFilterAtEnd(scan, + new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(), cols)); + if (!groupBy.isUngroupedAggregate() && plan.getLimit() != null) { + // We can push the limit to the server,but for UngroupedAggregate + // we can not push the limit. + ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit())); } + } + scan.setAttribute(BaseScannerRegionObserverConstants.QUALIFIER_ENCODING_SCHEME, + new byte[] { table.getEncodingScheme().getSerializedMetadataValue() }); + scan.setAttribute(BaseScannerRegionObserverConstants.IMMUTABLE_STORAGE_ENCODING_SCHEME, + new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() }); + // we use this flag on the server side to determine which value column qualifier to use in the + // key value we return from server. + scan.setAttribute(BaseScannerRegionObserverConstants.USE_NEW_VALUE_COLUMN_QUALIFIER, + Bytes.toBytes(true)); + // When analyzing the table, there is no look up for key values being done. + // So there is no point setting the range. + if (!ScanUtil.isAnalyzeTable(scan)) { + setQualifierRanges(keyOnlyFilter, table, scan, context); + } + if (optimizeProjection) { + optimizeProjection(context, scan, table, statement); + } } - - private static void setQualifierRanges(boolean keyOnlyFilter, PTable table, Scan scan, - StatementContext context) throws SQLException { - if (EncodedColumnsUtil.useEncodedQualifierListOptimization(table, scan)) { - Pair minMaxQualifiers = new Pair<>(); - for (Pair whereCol : context.getWhereConditionColumns()) { - byte[] cq = whereCol.getSecond(); - if (cq != null) { - int qualifier = table.getEncodingScheme().decode(cq); - adjustQualifierRange(qualifier, minMaxQualifiers); - } - } - Map> familyMap = scan.getFamilyMap(); - for (Entry> entry : familyMap.entrySet()) { - if (entry.getValue() != null) { - for (byte[] cq : entry.getValue()) { - if (cq != null) { - int qualifier = table.getEncodingScheme().decode(cq); - adjustQualifierRange(qualifier, minMaxQualifiers); - } - } - } else { - byte[] cf = entry.getKey(); - String family = Bytes.toString(cf); - if (table.getType() == INDEX && table.getIndexType() == LOCAL - && !IndexUtil.isLocalIndexFamily(family)) { - // TODO: samarth confirm with James why do we need this hack here :( - family = IndexUtil.getLocalIndexColumnFamily(family); - } - byte[] familyBytes = Bytes.toBytes(family); - NavigableSet qualifierSet = new TreeSet(Bytes.BYTES_COMPARATOR); - if (Bytes.equals(familyBytes, SchemaUtil.getEmptyColumnFamily(table))) { - // If the column family is also the empty column family, project the - // empty key value column - Pair emptyKeyValueInfo = - EncodedColumnsUtil.getEmptyKeyValueInfo(table); - qualifierSet.add(emptyKeyValueInfo.getFirst()); - } - // In case of a keyOnlyFilter, we only need to project the - // empty key value column - if (!keyOnlyFilter) { - Pair qualifierRangeForFamily = - EncodedColumnsUtil.setQualifiersForColumnsInFamily(table, family, - qualifierSet); - familyMap.put(familyBytes, qualifierSet); - if (qualifierRangeForFamily != null) { - adjustQualifierRange(qualifierRangeForFamily.getFirst(), - minMaxQualifiers); - adjustQualifierRange(qualifierRangeForFamily.getSecond(), - minMaxQualifiers); - } - } - } + } + + private static void setQualifierRanges(boolean keyOnlyFilter, PTable table, Scan scan, + StatementContext context) throws SQLException { + if (EncodedColumnsUtil.useEncodedQualifierListOptimization(table, scan)) { + Pair minMaxQualifiers = new Pair<>(); + for (Pair whereCol : context.getWhereConditionColumns()) { + byte[] cq = whereCol.getSecond(); + if (cq != null) { + int qualifier = table.getEncodingScheme().decode(cq); + adjustQualifierRange(qualifier, minMaxQualifiers); + } + } + Map> familyMap = scan.getFamilyMap(); + for (Entry> entry : familyMap.entrySet()) { + if (entry.getValue() != null) { + for (byte[] cq : entry.getValue()) { + if (cq != null) { + int qualifier = table.getEncodingScheme().decode(cq); + adjustQualifierRange(qualifier, minMaxQualifiers); } - if (minMaxQualifiers.getFirst() != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.MIN_QUALIFIER, - Bytes.toBytes(minMaxQualifiers.getFirst())); - scan.setAttribute(BaseScannerRegionObserverConstants.MAX_QUALIFIER, - Bytes.toBytes(minMaxQualifiers.getSecond())); - ScanUtil.setQualifierRangesOnFilter(scan, minMaxQualifiers); + } + } else { + byte[] cf = entry.getKey(); + String family = Bytes.toString(cf); + if ( + table.getType() == INDEX && table.getIndexType() == LOCAL + && !IndexUtil.isLocalIndexFamily(family) + ) { + // TODO: samarth confirm with James why do we need this hack here :( + family = IndexUtil.getLocalIndexColumnFamily(family); + } + byte[] familyBytes = Bytes.toBytes(family); + NavigableSet qualifierSet = new TreeSet(Bytes.BYTES_COMPARATOR); + if (Bytes.equals(familyBytes, SchemaUtil.getEmptyColumnFamily(table))) { + // If the column family is also the empty column family, project the + // empty key value column + Pair emptyKeyValueInfo = EncodedColumnsUtil.getEmptyKeyValueInfo(table); + qualifierSet.add(emptyKeyValueInfo.getFirst()); + } + // In case of a keyOnlyFilter, we only need to project the + // empty key value column + if (!keyOnlyFilter) { + Pair qualifierRangeForFamily = + EncodedColumnsUtil.setQualifiersForColumnsInFamily(table, family, qualifierSet); + familyMap.put(familyBytes, qualifierSet); + if (qualifierRangeForFamily != null) { + adjustQualifierRange(qualifierRangeForFamily.getFirst(), minMaxQualifiers); + adjustQualifierRange(qualifierRangeForFamily.getSecond(), minMaxQualifiers); } + } } + } + if (minMaxQualifiers.getFirst() != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.MIN_QUALIFIER, + Bytes.toBytes(minMaxQualifiers.getFirst())); + scan.setAttribute(BaseScannerRegionObserverConstants.MAX_QUALIFIER, + Bytes.toBytes(minMaxQualifiers.getSecond())); + ScanUtil.setQualifierRangesOnFilter(scan, minMaxQualifiers); + } } + } - private static void adjustQualifierRange(Integer qualifier, Pair minMaxQualifiers) { - if (minMaxQualifiers.getFirst() == null) { - minMaxQualifiers.setFirst(qualifier); - minMaxQualifiers.setSecond(qualifier); - } else { - if (minMaxQualifiers.getFirst() > qualifier) { - minMaxQualifiers.setFirst(qualifier); - } else if (minMaxQualifiers.getSecond() < qualifier) { - minMaxQualifiers.setSecond(qualifier); - } - } + private static void adjustQualifierRange(Integer qualifier, + Pair minMaxQualifiers) { + if (minMaxQualifiers.getFirst() == null) { + minMaxQualifiers.setFirst(qualifier); + minMaxQualifiers.setSecond(qualifier); + } else { + if (minMaxQualifiers.getFirst() > qualifier) { + minMaxQualifiers.setFirst(qualifier); + } else if (minMaxQualifiers.getSecond() < qualifier) { + minMaxQualifiers.setSecond(qualifier); + } } - - private static void optimizeProjection(StatementContext context, Scan scan, PTable table, FilterableStatement statement) { - Map> familyMap = scan.getFamilyMap(); - // columnsTracker contain cf -> qualifiers which should get returned. - Map> columnsTracker = - new TreeMap>(); - Set conditionOnlyCfs = new TreeSet(Bytes.BYTES_COMPARATOR); - int referencedCfCount = familyMap.size(); - QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); - ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme(); - BitSet trackedColumnsBitset = isPossibleToUseEncodedCQFilter(encodingScheme, storageScheme) && !hasDynamicColumns(table) ? new BitSet(10) : null; - boolean filteredColumnNotInProjection = false; - - for (Pair whereCol : context.getWhereConditionColumns()) { - byte[] filteredFamily = whereCol.getFirst(); - if (!(familyMap.containsKey(filteredFamily))) { - referencedCfCount++; - filteredColumnNotInProjection = true; - } else if (!filteredColumnNotInProjection) { - NavigableSet projectedColumns = familyMap.get(filteredFamily); - if (projectedColumns != null) { - byte[] filteredColumn = whereCol.getSecond(); - if (filteredColumn == null) { - filteredColumnNotInProjection = true; - } else { - filteredColumnNotInProjection = !projectedColumns.contains(filteredColumn); - } - } - } + } + + private static void optimizeProjection(StatementContext context, Scan scan, PTable table, + FilterableStatement statement) { + Map> familyMap = scan.getFamilyMap(); + // columnsTracker contain cf -> qualifiers which should get returned. + Map> columnsTracker = + new TreeMap>(); + Set conditionOnlyCfs = new TreeSet(Bytes.BYTES_COMPARATOR); + int referencedCfCount = familyMap.size(); + QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); + ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme(); + BitSet trackedColumnsBitset = + isPossibleToUseEncodedCQFilter(encodingScheme, storageScheme) && !hasDynamicColumns(table) + ? new BitSet(10) + : null; + boolean filteredColumnNotInProjection = false; + + for (Pair whereCol : context.getWhereConditionColumns()) { + byte[] filteredFamily = whereCol.getFirst(); + if (!(familyMap.containsKey(filteredFamily))) { + referencedCfCount++; + filteredColumnNotInProjection = true; + } else if (!filteredColumnNotInProjection) { + NavigableSet projectedColumns = familyMap.get(filteredFamily); + if (projectedColumns != null) { + byte[] filteredColumn = whereCol.getSecond(); + if (filteredColumn == null) { + filteredColumnNotInProjection = true; + } else { + filteredColumnNotInProjection = !projectedColumns.contains(filteredColumn); + } } - boolean preventSeekToColumn = false; - if (statement.getHint().hasHint(Hint.SEEK_TO_COLUMN)) { - // Allow seeking to column during filtering - preventSeekToColumn = false; - } else if (!EncodedColumnsUtil.useEncodedQualifierListOptimization(table, scan)) { - /* - * preventSeekToColumn cannot be true, even if hinted, when encoded qualifier list - * optimization is being used. When using the optimization, it is necessary that we - * explicitly set the column qualifiers of the column family in the scan and not just - * project the entire column family. - */ - if (statement.getHint().hasHint(Hint.NO_SEEK_TO_COLUMN)) { - // Prevent seeking to column during filtering - preventSeekToColumn = true; - } else { - int hbaseServerVersion = context.getConnection().getQueryServices().getLowestClusterHBaseVersion(); - // When only a single column family is referenced, there are no hints, and HBase server version - // is less than when the fix for HBASE-13109 went in (0.98.12), then we prevent seeking to a - // column. - preventSeekToColumn = referencedCfCount == 1 && hbaseServerVersion < MIN_SEEK_TO_COLUMN_VERSION; - } + } + } + boolean preventSeekToColumn = false; + if (statement.getHint().hasHint(Hint.SEEK_TO_COLUMN)) { + // Allow seeking to column during filtering + preventSeekToColumn = false; + } else if (!EncodedColumnsUtil.useEncodedQualifierListOptimization(table, scan)) { + /* + * preventSeekToColumn cannot be true, even if hinted, when encoded qualifier list + * optimization is being used. When using the optimization, it is necessary that we explicitly + * set the column qualifiers of the column family in the scan and not just project the entire + * column family. + */ + if (statement.getHint().hasHint(Hint.NO_SEEK_TO_COLUMN)) { + // Prevent seeking to column during filtering + preventSeekToColumn = true; + } else { + int hbaseServerVersion = + context.getConnection().getQueryServices().getLowestClusterHBaseVersion(); + // When only a single column family is referenced, there are no hints, and HBase server + // version + // is less than when the fix for HBASE-13109 went in (0.98.12), then we prevent seeking to a + // column. + preventSeekToColumn = + referencedCfCount == 1 && hbaseServerVersion < MIN_SEEK_TO_COLUMN_VERSION; + } + } + // Making sure that where condition CFs are getting scanned at HRS. + for (Pair whereCol : context.getWhereConditionColumns()) { + byte[] family = whereCol.getFirst(); + if (preventSeekToColumn) { + if (!(familyMap.containsKey(family))) { + conditionOnlyCfs.add(family); } - // Making sure that where condition CFs are getting scanned at HRS. - for (Pair whereCol : context.getWhereConditionColumns()) { - byte[] family = whereCol.getFirst(); - if (preventSeekToColumn) { - if (!(familyMap.containsKey(family))) { - conditionOnlyCfs.add(family); - } - scan.addFamily(family); + scan.addFamily(family); + } else { + if (familyMap.containsKey(family)) { + // where column's CF is present. If there are some specific columns added against this CF, + // we + // need to ensure this where column also getting added in it. + // If the select was like select cf1.*, then that itself will select the whole CF. So no + // need to + // specifically add the where column. Adding that will remove the cf1.* stuff and only + // this + // where condition column will get returned! + NavigableSet cols = familyMap.get(family); + // cols is null means the whole CF will get scanned. + if (cols != null) { + if (whereCol.getSecond() == null) { + scan.addFamily(family); } else { - if (familyMap.containsKey(family)) { - // where column's CF is present. If there are some specific columns added against this CF, we - // need to ensure this where column also getting added in it. - // If the select was like select cf1.*, then that itself will select the whole CF. So no need to - // specifically add the where column. Adding that will remove the cf1.* stuff and only this - // where condition column will get returned! - NavigableSet cols = familyMap.get(family); - // cols is null means the whole CF will get scanned. - if (cols != null) { - if (whereCol.getSecond() == null) { - scan.addFamily(family); - } else { - scan.addColumn(family, whereCol.getSecond()); - } - } - } else if (whereCol.getSecond() == null) { - scan.addFamily(family); - } else { - // where column's CF itself is not present in family map. We need to add the column - scan.addColumn(family, whereCol.getSecond()); - } - } - } - for (Entry> entry : familyMap.entrySet()) { - ImmutableBytesPtr cf = new ImmutableBytesPtr(entry.getKey()); - NavigableSet qs = entry.getValue(); - NavigableSet cols = null; - if (qs != null) { - cols = new TreeSet(); - for (byte[] q : qs) { - cols.add(new ImmutableBytesPtr(q)); - if (trackedColumnsBitset != null) { - int qualifier = encodingScheme.decode(q); - trackedColumnsBitset.set(qualifier); - } - } - } else { - // cannot use EncodedQualifiersColumnProjectionFilter in this case - // since there's an unknown set of qualifiers (cf.*) - trackedColumnsBitset = null; - } - columnsTracker.put(cf, cols); - } - if (!columnsTracker.isEmpty()) { - if (preventSeekToColumn) { - for (ImmutableBytesPtr f : columnsTracker.keySet()) { - // This addFamily will remove explicit cols in scan familyMap and make it as entire row. - // We don't want the ExplicitColumnTracker to be used. Instead we have the ColumnProjectionFilter - scan.addFamily(f.get()); - } - } - // We don't need this filter for aggregates, as we're not returning back what's - // in the scan in this case. We still want the other optimization that causes - // the ExplicitColumnTracker not to be used, though. - if (!statement.isAggregate() && filteredColumnNotInProjection) { - ScanUtil.andFilterAtEnd(scan, - trackedColumnsBitset != null ? new EncodedQualifiersColumnProjectionFilter(SchemaUtil.getEmptyColumnFamily(table), trackedColumnsBitset, conditionOnlyCfs, table.getEncodingScheme()) : new ColumnProjectionFilter(SchemaUtil.getEmptyColumnFamily(table), - columnsTracker, conditionOnlyCfs, EncodedColumnsUtil.usesEncodedColumnNames(table.getEncodingScheme()))); + scan.addColumn(family, whereCol.getSecond()); } + } + } else if (whereCol.getSecond() == null) { + scan.addFamily(family); + } else { + // where column's CF itself is not present in family map. We need to add the column + scan.addColumn(family, whereCol.getSecond()); } + } } - - public BaseResultIterators(QueryPlan plan, Integer perScanLimit, Integer offset, ParallelScanGrouper scanGrouper, Scan scan, Map caches, QueryPlan dataPlan) throws SQLException { - super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), - plan.getStatement().getHint(), QueryUtil.getOffsetLimit(plan.getLimit(), plan.getOffset()), offset); - this.plan = plan; - this.scan = scan; - this.caches = caches; - this.scanGrouper = scanGrouper; - this.dataPlan = dataPlan; - StatementContext context = plan.getContext(); - // Clone MutationState as the one on the connection will change if auto commit is on - // yet we need the original one with the original transaction from TableResultIterator. - this.mutationState = new MutationState(context.getConnection().getMutationState()); - TableRef tableRef = plan.getTableRef(); - PTable table = tableRef.getTable(); - physicalTableName = table.getPhysicalName().getBytes(); - Long currentSCN = context.getConnection().getSCN(); - if (null == currentSCN) { - currentSCN = HConstants.LATEST_TIMESTAMP; + for (Entry> entry : familyMap.entrySet()) { + ImmutableBytesPtr cf = new ImmutableBytesPtr(entry.getKey()); + NavigableSet qs = entry.getValue(); + NavigableSet cols = null; + if (qs != null) { + cols = new TreeSet(); + for (byte[] q : qs) { + cols.add(new ImmutableBytesPtr(q)); + if (trackedColumnsBitset != null) { + int qualifier = encodingScheme.decode(q); + trackedColumnsBitset.set(qualifier); + } } - // Used to tie all the scans together during logging - scanId = new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()).toString(); - - initializeScan(plan, perScanLimit, offset, scan); - this.useStatsForParallelization = ScanUtil.getStatsForParallelizationProp(context.getConnection(), table); - ScansWithRegionLocations scansWithRegionLocations = getParallelScans(); - this.scans = scansWithRegionLocations.getScans(); - this.regionLocations = scansWithRegionLocations.getRegionLocations(); - List splitRanges = Lists.newArrayListWithExpectedSize(scans.size() * ESTIMATED_GUIDEPOSTS_PER_REGION); - for (List scanList : scans) { - for (Scan aScan : scanList) { - splitRanges.add(KeyRange.getKeyRange(aScan.getStartRow(), aScan.getStopRow())); - } + } else { + // cannot use EncodedQualifiersColumnProjectionFilter in this case + // since there's an unknown set of qualifiers (cf.*) + trackedColumnsBitset = null; + } + columnsTracker.put(cf, cols); + } + if (!columnsTracker.isEmpty()) { + if (preventSeekToColumn) { + for (ImmutableBytesPtr f : columnsTracker.keySet()) { + // This addFamily will remove explicit cols in scan familyMap and make it as entire row. + // We don't want the ExplicitColumnTracker to be used. Instead we have the + // ColumnProjectionFilter + scan.addFamily(f.get()); } - this.splits = ImmutableList.copyOf(splitRanges); - // If split detected, this will be more than one, but that's unlikely - this.allFutures = Lists.newArrayListWithExpectedSize(1); + } + // We don't need this filter for aggregates, as we're not returning back what's + // in the scan in this case. We still want the other optimization that causes + // the ExplicitColumnTracker not to be used, though. + if (!statement.isAggregate() && filteredColumnNotInProjection) { + ScanUtil.andFilterAtEnd(scan, + trackedColumnsBitset != null + ? new EncodedQualifiersColumnProjectionFilter(SchemaUtil.getEmptyColumnFamily(table), + trackedColumnsBitset, conditionOnlyCfs, table.getEncodingScheme()) + : new ColumnProjectionFilter(SchemaUtil.getEmptyColumnFamily(table), columnsTracker, + conditionOnlyCfs, + EncodedColumnsUtil.usesEncodedColumnNames(table.getEncodingScheme()))); + } } + } - @Override - public List getSplits() { - if (splits == null) - return Collections.emptyList(); - else - return splits; + public BaseResultIterators(QueryPlan plan, Integer perScanLimit, Integer offset, + ParallelScanGrouper scanGrouper, Scan scan, Map caches, + QueryPlan dataPlan) throws SQLException { + super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), + plan.getStatement().getHint(), QueryUtil.getOffsetLimit(plan.getLimit(), plan.getOffset()), + offset); + this.plan = plan; + this.scan = scan; + this.caches = caches; + this.scanGrouper = scanGrouper; + this.dataPlan = dataPlan; + StatementContext context = plan.getContext(); + // Clone MutationState as the one on the connection will change if auto commit is on + // yet we need the original one with the original transaction from TableResultIterator. + this.mutationState = new MutationState(context.getConnection().getMutationState()); + TableRef tableRef = plan.getTableRef(); + PTable table = tableRef.getTable(); + physicalTableName = table.getPhysicalName().getBytes(); + Long currentSCN = context.getConnection().getSCN(); + if (null == currentSCN) { + currentSCN = HConstants.LATEST_TIMESTAMP; } + // Used to tie all the scans together during logging + scanId = + new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()) + .toString(); - @Override - public List> getScans() { - if (scans == null) - return Collections.emptyList(); - else - return scans; + initializeScan(plan, perScanLimit, offset, scan); + this.useStatsForParallelization = + ScanUtil.getStatsForParallelizationProp(context.getConnection(), table); + ScansWithRegionLocations scansWithRegionLocations = getParallelScans(); + this.scans = scansWithRegionLocations.getScans(); + this.regionLocations = scansWithRegionLocations.getRegionLocations(); + List splitRanges = + Lists.newArrayListWithExpectedSize(scans.size() * ESTIMATED_GUIDEPOSTS_PER_REGION); + for (List scanList : scans) { + for (Scan aScan : scanList) { + splitRanges.add(KeyRange.getKeyRange(aScan.getStartRow(), aScan.getStopRow())); + } } + this.splits = ImmutableList.copyOf(splitRanges); + // If split detected, this will be more than one, but that's unlikely + this.allFutures = Lists.newArrayListWithExpectedSize(1); + } + + @Override + public List getSplits() { + if (splits == null) return Collections.emptyList(); + else return splits; + } - private List getRegionBoundaries(ParallelScanGrouper scanGrouper, - byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) throws SQLException { - return scanGrouper.getRegionBoundaries(context, physicalTableName, startRegionBoundaryKey, - stopRegionBoundaryKey); + @Override + public List> getScans() { + if (scans == null) return Collections.emptyList(); + else return scans; + } + + private List getRegionBoundaries(ParallelScanGrouper scanGrouper, + byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) throws SQLException { + return scanGrouper.getRegionBoundaries(context, physicalTableName, startRegionBoundaryKey, + stopRegionBoundaryKey); + } + + private static List toBoundaries(List regionLocations) { + int nBoundaries = regionLocations.size() - 1; + List ranges = Lists.newArrayListWithExpectedSize(nBoundaries); + for (int i = 0; i < nBoundaries; i++) { + RegionInfo regionInfo = regionLocations.get(i).getRegion(); + ranges.add(regionInfo.getEndKey()); } + return ranges; + } - private static List toBoundaries(List regionLocations) { - int nBoundaries = regionLocations.size() - 1; - List ranges = Lists.newArrayListWithExpectedSize(nBoundaries); - for (int i = 0; i < nBoundaries; i++) { - RegionInfo regionInfo = regionLocations.get(i).getRegion(); - ranges.add(regionInfo.getEndKey()); - } - return ranges; + private static int getIndexContainingInclusive(List boundaries, byte[] inclusiveKey) { + int guideIndex = Collections.binarySearch(boundaries, inclusiveKey, Bytes.BYTES_COMPARATOR); + // If we found an exact match, return the index+1, as the inclusiveKey will be contained + // in the next region (since we're matching on the end boundary). + guideIndex = (guideIndex < 0 ? -(guideIndex + 1) : (guideIndex + 1)); + return guideIndex; + } + + private static int getIndexContainingExclusive(List boundaries, byte[] exclusiveKey) { + int guideIndex = Collections.binarySearch(boundaries, exclusiveKey, Bytes.BYTES_COMPARATOR); + // If we found an exact match, return the index we found as the exclusiveKey won't be + // contained in the next region as with getIndexContainingInclusive. + guideIndex = (guideIndex < 0 ? -(guideIndex + 1) : guideIndex); + return guideIndex; + } + + private GuidePostsInfo getGuidePosts() throws SQLException { + if (!useStats() || !StatisticsUtil.isStatsEnabled(TableName.valueOf(physicalTableName))) { + return GuidePostsInfo.NO_GUIDEPOST; + } + + TreeSet whereConditions = new TreeSet(Bytes.BYTES_COMPARATOR); + for (Pair where : context.getWhereConditionColumns()) { + byte[] cf = where.getFirst(); + if (cf != null) { + whereConditions.add(cf); + } } - - private static int getIndexContainingInclusive(List boundaries, byte[] inclusiveKey) { - int guideIndex = Collections.binarySearch(boundaries, inclusiveKey, Bytes.BYTES_COMPARATOR); - // If we found an exact match, return the index+1, as the inclusiveKey will be contained - // in the next region (since we're matching on the end boundary). - guideIndex = (guideIndex < 0 ? -(guideIndex + 1) : (guideIndex + 1)); - return guideIndex; + PTable table = getTable(); + byte[] defaultCF = SchemaUtil.getEmptyColumnFamily(getTable()); + byte[] cf = null; + if (!table.getColumnFamilies().isEmpty() && !whereConditions.isEmpty()) { + for (Pair where : context.getWhereConditionColumns()) { + byte[] whereCF = where.getFirst(); + if (Bytes.compareTo(defaultCF, whereCF) == 0) { + cf = defaultCF; + break; + } + } + if (cf == null) { + cf = context.getWhereConditionColumns().get(0).getFirst(); + } } - - private static int getIndexContainingExclusive(List boundaries, byte[] exclusiveKey) { - int guideIndex = Collections.binarySearch(boundaries, exclusiveKey, Bytes.BYTES_COMPARATOR); - // If we found an exact match, return the index we found as the exclusiveKey won't be - // contained in the next region as with getIndexContainingInclusive. - guideIndex = (guideIndex < 0 ? -(guideIndex + 1) : guideIndex); - return guideIndex; + if (cf == null) { + cf = defaultCF; } + GuidePostsKey key = new GuidePostsKey(physicalTableName, cf); + return context.getConnection().getQueryServices().getTableStats(key); + } - private GuidePostsInfo getGuidePosts() throws SQLException { - if (!useStats() || !StatisticsUtil.isStatsEnabled(TableName.valueOf(physicalTableName))) { - return GuidePostsInfo.NO_GUIDEPOST; - } + private static void updateEstimates(GuidePostsInfo gps, int guideIndex, + GuidePostEstimate estimate) { + estimate.rowsEstimate += gps.getRowCounts()[guideIndex]; + estimate.bytesEstimate += gps.getByteCounts()[guideIndex]; + /* + * It is possible that the timestamp of guideposts could be different. So we report the time at + * which stats information was collected as the minimum of timestamp of the guideposts that we + * will be going over. + */ + estimate.lastUpdated = Math.min(estimate.lastUpdated, gps.getGuidePostTimestamps()[guideIndex]); + } - TreeSet whereConditions = new TreeSet(Bytes.BYTES_COMPARATOR); - for (Pair where : context.getWhereConditionColumns()) { - byte[] cf = where.getFirst(); - if (cf != null) { - whereConditions.add(cf); - } - } - PTable table = getTable(); - byte[] defaultCF = SchemaUtil.getEmptyColumnFamily(getTable()); - byte[] cf = null; - if ( !table.getColumnFamilies().isEmpty() && !whereConditions.isEmpty() ) { - for (Pair where : context.getWhereConditionColumns()) { - byte[] whereCF = where.getFirst(); - if (Bytes.compareTo(defaultCF, whereCF) == 0) { - cf = defaultCF; - break; - } - } - if (cf == null) { - cf = context.getWhereConditionColumns().get(0).getFirst(); - } + private ScansWithRegionLocations getParallelScans() throws SQLException { + // If the scan boundaries are not matching with scan in context that means we need to get + // parallel scans for the chunk after split/merge. + if (!ScanUtil.isContextScan(scan, context)) { + return getParallelScans(scan); + } + return getParallelScans(EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY); + } + + /** + * Get parallel scans of the specified scan boundaries. This can be used for getting parallel + * scans when there is split/merges while scanning a chunk. In this case we need not go by all the + * regions or guideposts. + */ + private ScansWithRegionLocations getParallelScans(Scan scan) throws SQLException { + List regionLocations = + getRegionBoundaries(scanGrouper, scan.getStartRow(), scan.getStopRow()); + numRegionLocationLookups = regionLocations.size(); + List regionBoundaries = toBoundaries(regionLocations); + int regionIndex = 0; + int stopIndex = regionBoundaries.size(); + if (scan.getStartRow().length > 0) { + regionIndex = getIndexContainingInclusive(regionBoundaries, scan.getStartRow()); + } + if (scan.getStopRow().length > 0) { + stopIndex = Math.min(stopIndex, + regionIndex + getIndexContainingExclusive(regionBoundaries.subList(regionIndex, stopIndex), + scan.getStopRow())); + } + ParallelScansCollector parallelScans = new ParallelScansCollector(scanGrouper); + while (regionIndex <= stopIndex) { + HRegionLocation regionLocation = regionLocations.get(regionIndex); + RegionInfo regionInfo = regionLocation.getRegion(); + Scan newScan = ScanUtil.newScan(scan); + if (ScanUtil.isLocalIndex(scan)) { + ScanUtil.setLocalIndexAttributes(newScan, 0, regionInfo.getStartKey(), + regionInfo.getEndKey(), newScan.getAttribute(SCAN_START_ROW_SUFFIX), + newScan.getAttribute(SCAN_STOP_ROW_SUFFIX)); + } else { + if (Bytes.compareTo(scan.getStartRow(), regionInfo.getStartKey()) <= 0) { + newScan.setAttribute(SCAN_ACTUAL_START_ROW, regionInfo.getStartKey()); + newScan.withStartRow(regionInfo.getStartKey()); } - if (cf == null) { - cf = defaultCF; + if ( + scan.getStopRow().length == 0 || (regionInfo.getEndKey().length != 0 + && Bytes.compareTo(scan.getStopRow(), regionInfo.getEndKey()) > 0) + ) { + newScan.withStopRow(regionInfo.getEndKey()); } - GuidePostsKey key = new GuidePostsKey(physicalTableName, cf); - return context.getConnection().getQueryServices().getTableStats(key); + } + if (regionLocation.getServerName() != null) { + newScan.setAttribute(BaseScannerRegionObserverConstants.SCAN_REGION_SERVER, + regionLocation.getServerName().getVersionedBytes()); + } + parallelScans.addNewScan(plan, newScan, true, regionLocation); + regionIndex++; } + return new ScansWithRegionLocations(parallelScans.getParallelScans(), + parallelScans.getRegionLocations()); + } - private static void updateEstimates(GuidePostsInfo gps, int guideIndex, GuidePostEstimate estimate) { - estimate.rowsEstimate += gps.getRowCounts()[guideIndex]; - estimate.bytesEstimate += gps.getByteCounts()[guideIndex]; - /* - * It is possible that the timestamp of guideposts could be different. - * So we report the time at which stats information was collected as the - * minimum of timestamp of the guideposts that we will be going over. - */ - estimate.lastUpdated = - Math.min(estimate.lastUpdated, - gps.getGuidePostTimestamps()[guideIndex]); + private static class GuidePostEstimate { + private long bytesEstimate; + private long rowsEstimate; + private long lastUpdated = Long.MAX_VALUE; + } + + private int computeColumnsInCommon() { + PTable dataTable; + if ((dataTable = dataPlan.getTableRef().getTable()).getBucketNum() != null) { // unable to + // compute prefix + // range for + // salted data + // table + return 0; } - private ScansWithRegionLocations getParallelScans() throws SQLException { - // If the scan boundaries are not matching with scan in context that means we need to get - // parallel scans for the chunk after split/merge. - if (!ScanUtil.isContextScan(scan, context)) { - return getParallelScans(scan); - } - return getParallelScans(EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY); + PTable table = getTable(); + int nColumnsOffset = dataTable.isMultiTenant() ? 1 : 0; + int nColumnsInCommon = nColumnsOffset; + List dataPKColumns = dataTable.getPKColumns(); + List indexPKColumns = table.getPKColumns(); + int nIndexPKColumns = indexPKColumns.size(); + int nDataPKColumns = dataPKColumns.size(); + // Skip INDEX_ID and tenant ID columns + for (int i = 1 + nColumnsInCommon; i < nIndexPKColumns; i++) { + PColumn indexColumn = indexPKColumns.get(i); + String indexColumnName = indexColumn.getName().getString(); + String cf = IndexUtil.getDataColumnFamilyName(indexColumnName); + if (cf.length() != 0) { + break; + } + if (i > nDataPKColumns) { + break; + } + PColumn dataColumn = dataPKColumns.get(i - 1); + String dataColumnName = dataColumn.getName().getString(); + // Ensure both name and type are the same. Because of the restrictions we have + // on PK column types (namely that you can only have a fixed width nullable + // column as your last column), the type check is more of a sanity check + // since it wouldn't make sense to have an index with every column in common. + if ( + indexColumn.getDataType() == dataColumn.getDataType() + && dataColumnName.equals(IndexUtil.getDataColumnName(indexColumnName)) + ) { + nColumnsInCommon++; + continue; + } + break; } + return nColumnsInCommon; + } - /** - * Get parallel scans of the specified scan boundaries. This can be used for getting parallel - * scans when there is split/merges while scanning a chunk. In this case we need not go by all - * the regions or guideposts. - * @param scan - * @return - * @throws SQLException - */ - private ScansWithRegionLocations getParallelScans(Scan scan) throws SQLException { - List regionLocations = - getRegionBoundaries(scanGrouper, scan.getStartRow(), scan.getStopRow()); - numRegionLocationLookups = regionLocations.size(); - List regionBoundaries = toBoundaries(regionLocations); - int regionIndex = 0; - int stopIndex = regionBoundaries.size(); - if (scan.getStartRow().length > 0) { - regionIndex = getIndexContainingInclusive(regionBoundaries, scan.getStartRow()); - } - if (scan.getStopRow().length > 0) { - stopIndex = Math.min(stopIndex, regionIndex + getIndexContainingExclusive(regionBoundaries.subList(regionIndex, stopIndex), scan.getStopRow())); + // public for testing + public static ScanRanges computePrefixScanRanges(ScanRanges dataScanRanges, + int nColumnsInCommon) { + if (nColumnsInCommon == 0) { + return ScanRanges.EVERYTHING; + } + + int offset = 0; + List> cnf = Lists.newArrayListWithExpectedSize(nColumnsInCommon); + int[] slotSpan = new int[nColumnsInCommon]; + boolean useSkipScan = false; + boolean hasRange = false; + List> rangesList = dataScanRanges.getRanges(); + int rangesListSize = rangesList.size(); + while (offset < nColumnsInCommon && offset < rangesListSize) { + List ranges = rangesList.get(offset); + // We use a skip scan if we have multiple ranges or if + // we have a non single key range before the last range. + useSkipScan |= ranges.size() > 1 || hasRange; + cnf.add(ranges); + int rangeSpan = 1 + dataScanRanges.getSlotSpans()[offset]; + if (offset + rangeSpan > nColumnsInCommon) { + rangeSpan = nColumnsInCommon - offset; + // trim range to only be rangeSpan in length + ranges = Lists.newArrayListWithExpectedSize(cnf.get(cnf.size() - 1).size()); + for (KeyRange range : cnf.get(cnf.size() - 1)) { + range = clipRange(dataScanRanges.getSchema(), offset, rangeSpan, range); + // trim range to be only rangeSpan in length + ranges.add(range); } - ParallelScansCollector parallelScans = new ParallelScansCollector(scanGrouper); - while (regionIndex <= stopIndex) { - HRegionLocation regionLocation = regionLocations.get(regionIndex); - RegionInfo regionInfo = regionLocation.getRegion(); - Scan newScan = ScanUtil.newScan(scan); - if (ScanUtil.isLocalIndex(scan)) { - ScanUtil.setLocalIndexAttributes(newScan, 0, regionInfo.getStartKey(), - regionInfo.getEndKey(), newScan.getAttribute(SCAN_START_ROW_SUFFIX), - newScan.getAttribute(SCAN_STOP_ROW_SUFFIX)); - } else { - if (Bytes.compareTo(scan.getStartRow(), regionInfo.getStartKey()) <= 0) { - newScan.setAttribute(SCAN_ACTUAL_START_ROW, regionInfo.getStartKey()); - newScan.withStartRow(regionInfo.getStartKey()); - } - if (scan.getStopRow().length == 0 || (regionInfo.getEndKey().length != 0 - && Bytes.compareTo(scan.getStopRow(), regionInfo.getEndKey()) > 0)) { - newScan.withStopRow(regionInfo.getEndKey()); - } - } - if (regionLocation.getServerName() != null) { - newScan.setAttribute(BaseScannerRegionObserverConstants.SCAN_REGION_SERVER, - regionLocation.getServerName().getVersionedBytes()); - } - parallelScans.addNewScan(plan, newScan, true, regionLocation); - regionIndex++; + cnf.set(cnf.size() - 1, ranges); + } + for (KeyRange range : ranges) { + if (!range.isSingleKey()) { + hasRange = true; + break; } - return new ScansWithRegionLocations(parallelScans.getParallelScans(), - parallelScans.getRegionLocations()); + } + slotSpan[offset] = rangeSpan - 1; + offset = offset + rangeSpan; } + useSkipScan &= dataScanRanges.useSkipScanFilter(); + slotSpan = slotSpan.length == cnf.size() ? slotSpan : Arrays.copyOf(slotSpan, cnf.size()); + ScanRanges commonScanRanges = + ScanRanges.create(dataScanRanges.getSchema(), cnf, slotSpan, null, useSkipScan, -1); + return commonScanRanges; + } - private static class GuidePostEstimate { - private long bytesEstimate; - private long rowsEstimate; - private long lastUpdated = Long.MAX_VALUE; + /** + * Truncates range to be a max of rangeSpan fields + * @param schema row key schema + * @param fieldIndex starting index of field with in the row key schema + * @param rangeSpan maximum field length + * @return the same range if unchanged and otherwise a new range + */ + public static KeyRange clipRange(RowKeySchema schema, int fieldIndex, int rangeSpan, + KeyRange range) { + if (range == KeyRange.EVERYTHING_RANGE) { + return range; + } + if (range == KeyRange.EMPTY_RANGE) { + return range; + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean newRange = false; + boolean lowerUnbound = range.lowerUnbound(); + boolean lowerInclusive = range.isLowerInclusive(); + byte[] lowerRange = range.getLowerRange(); + if (!lowerUnbound && lowerRange.length > 0) { + if (clipKeyRangeBytes(schema, fieldIndex, rangeSpan, lowerRange, ptr, true)) { + // Make lower range inclusive since we're decreasing the range by chopping the last part off + lowerInclusive = true; + lowerRange = ptr.copyBytes(); + newRange = true; + } + } + boolean upperUnbound = range.upperUnbound(); + boolean upperInclusive = range.isUpperInclusive(); + byte[] upperRange = range.getUpperRange(); + if (!upperUnbound && upperRange.length > 0) { + if (clipKeyRangeBytes(schema, fieldIndex, rangeSpan, upperRange, ptr, false)) { + // Make lower range inclusive since we're decreasing the range by chopping the last part off + upperInclusive = true; + upperRange = ptr.copyBytes(); + newRange = true; + } } - private int computeColumnsInCommon() { - PTable dataTable; - if ((dataTable=dataPlan.getTableRef().getTable()).getBucketNum() != null) { // unable to compute prefix range for salted data table - return 0; - } + return newRange + ? KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive) + : range; + } - PTable table = getTable(); - int nColumnsOffset = dataTable.isMultiTenant() ? 1 :0; - int nColumnsInCommon = nColumnsOffset; - List dataPKColumns = dataTable.getPKColumns(); - List indexPKColumns = table.getPKColumns(); - int nIndexPKColumns = indexPKColumns.size(); - int nDataPKColumns = dataPKColumns.size(); - // Skip INDEX_ID and tenant ID columns - for (int i = 1 + nColumnsInCommon; i < nIndexPKColumns; i++) { - PColumn indexColumn = indexPKColumns.get(i); - String indexColumnName = indexColumn.getName().getString(); - String cf = IndexUtil.getDataColumnFamilyName(indexColumnName); - if (cf.length() != 0) { - break; - } - if (i > nDataPKColumns) { - break; - } - PColumn dataColumn = dataPKColumns.get(i-1); - String dataColumnName = dataColumn.getName().getString(); - // Ensure both name and type are the same. Because of the restrictions we have - // on PK column types (namely that you can only have a fixed width nullable - // column as your last column), the type check is more of a sanity check - // since it wouldn't make sense to have an index with every column in common. - if (indexColumn.getDataType() == dataColumn.getDataType() - && dataColumnName.equals(IndexUtil.getDataColumnName(indexColumnName))) { - nColumnsInCommon++; - continue; - } - break; + private static boolean clipKeyRangeBytes(RowKeySchema schema, int fieldIndex, int rangeSpan, + byte[] rowKey, ImmutableBytesWritable ptr, boolean trimTrailingNulls) { + int position = 0; + int maxOffset = schema.iterator(rowKey, ptr); + byte[] newRowKey = new byte[rowKey.length]; + int offset = 0; + int trailingNullsToTrim = 0; + do { + if (schema.next(ptr, fieldIndex, maxOffset) == null) { + break; + } + System.arraycopy(ptr.get(), ptr.getOffset(), newRowKey, offset, ptr.getLength()); + offset += ptr.getLength(); + Field field = schema.getField(fieldIndex); + if (field.getDataType().isFixedWidth()) { + trailingNullsToTrim = 0; + } else { + boolean isNull = ptr.getLength() == 0; + byte[] sepBytes = + SchemaUtil.getSeparatorBytes(field.getDataType(), true, isNull, field.getSortOrder()); + for (byte sepByte : sepBytes) { + newRowKey[offset++] = sepByte; } - return nColumnsInCommon; - } - - // public for testing - public static ScanRanges computePrefixScanRanges(ScanRanges dataScanRanges, int nColumnsInCommon) { - if (nColumnsInCommon == 0) { - return ScanRanges.EVERYTHING; + if (isNull) { + if (trimTrailingNulls) { + trailingNullsToTrim++; + } else { + trailingNullsToTrim = 0; + } + } else { + // So that last zero separator byte is always trimmed + trailingNullsToTrim = 1; } - - int offset = 0; - List> cnf = Lists.newArrayListWithExpectedSize(nColumnsInCommon); - int[] slotSpan = new int[nColumnsInCommon]; - boolean useSkipScan = false; - boolean hasRange = false; - List> rangesList = dataScanRanges.getRanges(); - int rangesListSize = rangesList.size(); - while (offset < nColumnsInCommon && offset < rangesListSize) { - List ranges = rangesList.get(offset); - // We use a skip scan if we have multiple ranges or if - // we have a non single key range before the last range. - useSkipScan |= ranges.size() > 1 || hasRange; - cnf.add(ranges); - int rangeSpan = 1 + dataScanRanges.getSlotSpans()[offset]; - if (offset + rangeSpan > nColumnsInCommon) { - rangeSpan = nColumnsInCommon - offset; - // trim range to only be rangeSpan in length - ranges = Lists.newArrayListWithExpectedSize(cnf.get(cnf.size()-1).size()); - for (KeyRange range : cnf.get(cnf.size()-1)) { - range = clipRange(dataScanRanges.getSchema(), offset, rangeSpan, range); - // trim range to be only rangeSpan in length - ranges.add(range); - } - cnf.set(cnf.size()-1, ranges); - } - for (KeyRange range : ranges) { - if (!range.isSingleKey()) { - hasRange = true; - break; - } - } - slotSpan[offset] = rangeSpan - 1; - offset = offset + rangeSpan; + } + fieldIndex++; + } while (++position < rangeSpan); + // remove trailing nulls + ptr.set(newRowKey, 0, offset - trailingNullsToTrim); + // return true if we've clipped the rowKey + return maxOffset != offset; + } + + /** + * Compute the list of parallel scans to run for a given query. The inner scans may be + * concatenated together directly, while the other ones may need to be merge sorted, depending on + * the query. Also computes an estimated bytes scanned, rows scanned, and last update time of + * statistics. To compute correctly, we need to handle a couple of edge cases: 1) if a guidepost + * is equal to the start key of the scan. 2) If a guidepost is equal to the end region key. In + * both cases, we set a flag (delayAddingEst) which indicates that the previous gp should be use + * in our stats calculation. The normal case is that a gp is encountered which is in the scan + * range in which case it is simply added to our calculation. For the last update time, we use the + * min timestamp of the gp that are in range of the scans that will be issued. If we find no gp in + * the range, we use the gp in the first or last region of the scan. If we encounter a region with + * no gp, then we return a null value as an indication that we don't know with certainty when the + * stats were updated last. This handles the case of a split occurring for a large ingest with + * stats never having been calculated for the new region. + * @return list of parallel scans to run for a given query. + */ + private ScansWithRegionLocations getParallelScans(byte[] startKey, byte[] stopKey) + throws SQLException { + ScanRanges scanRanges = context.getScanRanges(); + PTable table = getTable(); + boolean isLocalIndex = table.getIndexType() == IndexType.LOCAL; + GuidePostEstimate estimates = new GuidePostEstimate(); + if (!isLocalIndex && scanRanges.isPointLookup() && !scanRanges.useSkipScanFilter()) { + List> parallelScans = Lists.newArrayListWithExpectedSize(1); + List scans = Lists.newArrayListWithExpectedSize(1); + Scan scanFromContext = context.getScan(); + Integer limit = plan.getLimit(); + boolean isAggregate = plan.getStatement().isAggregate(); + if (scanRanges.getPointLookupCount() == 1 && limit == null && !isAggregate) { + // leverage bloom filter for single key point lookup by turning scan to + // Get Scan#isGetScan(). There should also be no limit on the point lookup query. + // The limit and the aggregate check is needed to handle cases where a child view + // extends the parent's PK and you insert data through the child but do a point + // lookup using the parent's PK. Since the parent's PK is only a prefix of the + // actual PK we can't do a Get but need to do a regular scan with the stop key + // set to the next key after the start key. + try { + scanFromContext = new Scan(context.getScan()); + } catch (IOException e) { + LOGGER.error("Failure to construct point lookup scan", e); + throw new PhoenixIOException(e); } - useSkipScan &= dataScanRanges.useSkipScanFilter(); - slotSpan = slotSpan.length == cnf.size() ? slotSpan : Arrays.copyOf(slotSpan, cnf.size()); - ScanRanges commonScanRanges = ScanRanges.create(dataScanRanges.getSchema(), cnf, slotSpan, null, useSkipScan, -1); - return commonScanRanges; + scanFromContext.withStopRow(scanFromContext.getStartRow(), + scanFromContext.includeStartRow()); + } + scans.add(scanFromContext); + parallelScans.add(scans); + generateEstimates(scanRanges, table, GuidePostsInfo.NO_GUIDEPOST, + GuidePostsInfo.NO_GUIDEPOST.isEmptyGuidePost(), parallelScans, estimates, Long.MAX_VALUE, + false); + // we don't retrieve region location for the given scan range + return new ScansWithRegionLocations(parallelScans, null); } - - /** - * Truncates range to be a max of rangeSpan fields - * @param schema row key schema - * @param fieldIndex starting index of field with in the row key schema - * @param rangeSpan maximum field length - * @return the same range if unchanged and otherwise a new range - */ - public static KeyRange clipRange(RowKeySchema schema, int fieldIndex, int rangeSpan, KeyRange range) { - if (range == KeyRange.EVERYTHING_RANGE) { - return range; - } - if (range == KeyRange.EMPTY_RANGE) { - return range; + byte[] sampleProcessedSaltByte = + SchemaUtil.processSplit(new byte[] { 0 }, table.getPKColumns()); + byte[] splitPostfix = + Arrays.copyOfRange(sampleProcessedSaltByte, 1, sampleProcessedSaltByte.length); + boolean isSalted = table.getBucketNum() != null; + GuidePostsInfo gps = getGuidePosts(); + // case when stats wasn't collected + hasGuidePosts = gps != GuidePostsInfo.NO_GUIDEPOST; + // Case when stats collection did run but there possibly wasn't enough data. In such a + // case we generate an empty guide post with the byte estimate being set as guide post + // width. + boolean emptyGuidePost = gps.isEmptyGuidePost(); + byte[] startRegionBoundaryKey = startKey; + byte[] stopRegionBoundaryKey = stopKey; + int columnsInCommon = 0; + ScanRanges prefixScanRanges = ScanRanges.EVERYTHING; + boolean traverseAllRegions = isSalted || isLocalIndex; + if (isLocalIndex) { + // TODO: when implementing PHOENIX-4585, we should change this to an assert + // as we should always have a data plan when a local index is being used. + if (dataPlan != null && dataPlan.getTableRef().getTable().getType() != PTableType.INDEX) { // Sanity + // check + prefixScanRanges = computePrefixScanRanges(dataPlan.getContext().getScanRanges(), + columnsInCommon = computeColumnsInCommon()); + KeyRange prefixRange = prefixScanRanges.getScanRange(); + if (!prefixRange.lowerUnbound()) { + startRegionBoundaryKey = prefixRange.getLowerRange(); } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean newRange = false; - boolean lowerUnbound = range.lowerUnbound(); - boolean lowerInclusive = range.isLowerInclusive(); - byte[] lowerRange = range.getLowerRange(); - if (!lowerUnbound && lowerRange.length > 0) { - if (clipKeyRangeBytes(schema, fieldIndex, rangeSpan, lowerRange, ptr, true)) { - // Make lower range inclusive since we're decreasing the range by chopping the last part off - lowerInclusive = true; - lowerRange = ptr.copyBytes(); - newRange = true; - } + if (!prefixRange.upperUnbound()) { + stopRegionBoundaryKey = prefixRange.getUpperRange(); } - boolean upperUnbound = range.upperUnbound(); - boolean upperInclusive = range.isUpperInclusive(); - byte[] upperRange = range.getUpperRange(); - if (!upperUnbound && upperRange.length > 0) { - if (clipKeyRangeBytes(schema, fieldIndex, rangeSpan, upperRange, ptr, false)) { - // Make lower range inclusive since we're decreasing the range by chopping the last part off - upperInclusive = true; - upperRange = ptr.copyBytes(); - newRange = true; - } + } + } else if (!traverseAllRegions) { + byte[] scanStartRow = scan.getStartRow(); + if (scanStartRow.length != 0 && Bytes.compareTo(scanStartRow, startKey) > 0) { + startRegionBoundaryKey = startKey = scanStartRow; + } + byte[] scanStopRow = scan.getStopRow(); + if ( + stopKey.length == 0 + || (scanStopRow.length != 0 && Bytes.compareTo(scanStopRow, stopKey) < 0) + ) { + stopRegionBoundaryKey = stopKey = scanStopRow; + } + } + + int regionIndex = 0; + int startRegionIndex = 0; + + List regionLocations; + if (isSalted && !isLocalIndex) { + // key prefix = salt num + view index id + tenant id + // If salting is used with tenant or view index id, scan start and end + // rowkeys will not be empty. We need to generate region locations for + // all the scan range such that we cover (each salt bucket num) + (prefix starting from + // index position 1 to cover view index and/or tenant id and/or remaining prefix). + if (scan.getStartRow().length > 0 && scan.getStopRow().length > 0) { + regionLocations = new ArrayList<>(); + for (int i = 0; i < getTable().getBucketNum(); i++) { + byte[] saltStartRegionKey = new byte[scan.getStartRow().length]; + saltStartRegionKey[0] = (byte) i; + System.arraycopy(scan.getStartRow(), 1, saltStartRegionKey, 1, + scan.getStartRow().length - 1); + + byte[] saltStopRegionKey = new byte[scan.getStopRow().length]; + saltStopRegionKey[0] = (byte) i; + System.arraycopy(scan.getStopRow(), 1, saltStopRegionKey, 1, + scan.getStopRow().length - 1); + + regionLocations + .addAll(getRegionBoundaries(scanGrouper, saltStartRegionKey, saltStopRegionKey)); } - - return newRange ? KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive) : range; + } else { + // If scan start and end rowkeys are empty, we end up fetching all region locations. + regionLocations = + getRegionBoundaries(scanGrouper, startRegionBoundaryKey, stopRegionBoundaryKey); + } + } else { + // For range scans, startRegionBoundaryKey and stopRegionBoundaryKey should refer + // to the boundary specified by the scan context. + regionLocations = + getRegionBoundaries(scanGrouper, startRegionBoundaryKey, stopRegionBoundaryKey); } - private static boolean clipKeyRangeBytes(RowKeySchema schema, int fieldIndex, int rangeSpan, byte[] rowKey, ImmutableBytesWritable ptr, boolean trimTrailingNulls) { - int position = 0; - int maxOffset = schema.iterator(rowKey, ptr); - byte[] newRowKey = new byte[rowKey.length]; - int offset = 0; - int trailingNullsToTrim = 0; - do { - if (schema.next(ptr, fieldIndex, maxOffset) == null) { - break; - } - System.arraycopy(ptr.get(), ptr.getOffset(), newRowKey, offset, ptr.getLength()); - offset += ptr.getLength(); - Field field = schema.getField(fieldIndex); - if (field.getDataType().isFixedWidth()) { - trailingNullsToTrim = 0; - } else { - boolean isNull = ptr.getLength() == 0; - byte[] sepBytes = SchemaUtil.getSeparatorBytes(field.getDataType(), - true, - isNull, - field.getSortOrder()); - for (byte sepByte : sepBytes) { - newRowKey[offset++] = sepByte; - } - if (isNull) { - if (trimTrailingNulls) { - trailingNullsToTrim++; - } else { - trailingNullsToTrim = 0; - } - } else { - // So that last zero separator byte is always trimmed - trailingNullsToTrim = 1; - } - } - fieldIndex++; - } while (++position < rangeSpan); - // remove trailing nulls - ptr.set(newRowKey, 0, offset - trailingNullsToTrim); - // return true if we've clipped the rowKey - return maxOffset != offset; + numRegionLocationLookups = regionLocations.size(); + List regionBoundaries = toBoundaries(regionLocations); + int stopIndex = regionBoundaries.size(); + if (startRegionBoundaryKey.length > 0) { + startRegionIndex = + regionIndex = getIndexContainingInclusive(regionBoundaries, startRegionBoundaryKey); } + if (stopRegionBoundaryKey.length > 0) { + stopIndex = Math.min(stopIndex, + regionIndex + getIndexContainingExclusive(regionBoundaries.subList(regionIndex, stopIndex), + stopRegionBoundaryKey)); + if (isLocalIndex) { + stopKey = regionLocations.get(stopIndex).getRegion().getEndKey(); + } + } + ParallelScansCollector parallelScanCollector = new ParallelScansCollector(scanGrouper); - /** - * Compute the list of parallel scans to run for a given query. The inner scans - * may be concatenated together directly, while the other ones may need to be - * merge sorted, depending on the query. - * Also computes an estimated bytes scanned, rows scanned, and last update time - * of statistics. To compute correctly, we need to handle a couple of edge cases: - * 1) if a guidepost is equal to the start key of the scan. - * 2) If a guidepost is equal to the end region key. - * In both cases, we set a flag (delayAddingEst) which indicates that the previous - * gp should be use in our stats calculation. The normal case is that a gp is - * encountered which is in the scan range in which case it is simply added to - * our calculation. - * For the last update time, we use the min timestamp of the gp that are in - * range of the scans that will be issued. If we find no gp in the range, we use - * the gp in the first or last region of the scan. If we encounter a region with - * no gp, then we return a null value as an indication that we don't know with - * certainty when the stats were updated last. This handles the case of a split - * occurring for a large ingest with stats never having been calculated for the - * new region. - * @return list of parallel scans to run for a given query. - * @throws SQLException - */ - private ScansWithRegionLocations getParallelScans(byte[] startKey, byte[] stopKey) - throws SQLException { - ScanRanges scanRanges = context.getScanRanges(); - PTable table = getTable(); - boolean isLocalIndex = table.getIndexType() == IndexType.LOCAL; - GuidePostEstimate estimates = new GuidePostEstimate(); - if (!isLocalIndex && scanRanges.isPointLookup() && !scanRanges.useSkipScanFilter()) { - List> parallelScans = Lists.newArrayListWithExpectedSize(1); - List scans = Lists.newArrayListWithExpectedSize(1); - Scan scanFromContext = context.getScan(); - Integer limit = plan.getLimit(); - boolean isAggregate = plan.getStatement().isAggregate(); - if (scanRanges.getPointLookupCount() == 1 && limit == null && !isAggregate) { - // leverage bloom filter for single key point lookup by turning scan to - // Get Scan#isGetScan(). There should also be no limit on the point lookup query. - // The limit and the aggregate check is needed to handle cases where a child view - // extends the parent's PK and you insert data through the child but do a point - // lookup using the parent's PK. Since the parent's PK is only a prefix of the - // actual PK we can't do a Get but need to do a regular scan with the stop key - // set to the next key after the start key. - try { - scanFromContext = new Scan(context.getScan()); - } catch (IOException e) { - LOGGER.error("Failure to construct point lookup scan", e); - throw new PhoenixIOException(e); - } - scanFromContext.withStopRow(scanFromContext.getStartRow(), - scanFromContext.includeStartRow()); + ImmutableBytesWritable currentKey = new ImmutableBytesWritable(startKey); + + int gpsSize = gps.getGuidePostsCount(); + int keyOffset = 0; + ImmutableBytesWritable currentGuidePost = ByteUtil.EMPTY_IMMUTABLE_BYTE_ARRAY; + ImmutableBytesWritable guidePosts = gps.getGuidePosts(); + ByteArrayInputStream stream = null; + DataInput input = null; + PrefixByteDecoder decoder = null; + int guideIndex = 0; + boolean gpsForFirstRegion = false; + boolean intersectWithGuidePosts = true; + // Maintain min ts for gps in first or last region outside of + // gps that are in the scan range. We'll use this if we find + // no gps in range. + long fallbackTs = Long.MAX_VALUE; + // Determination of whether of not we found a guidepost in + // every region between the start and stop key. If not, then + // we cannot definitively say at what time the guideposts + // were collected. + boolean gpsAvailableForAllRegions = true; + try { + boolean delayAddingEst = false; + ImmutableBytesWritable firstRegionStartKey = null; + if (gpsSize > 0) { + stream = new ByteArrayInputStream(guidePosts.get(), guidePosts.getOffset(), + guidePosts.getLength()); + input = new DataInputStream(stream); + decoder = new PrefixByteDecoder(gps.getMaxLength()); + firstRegionStartKey = + new ImmutableBytesWritable(regionLocations.get(regionIndex).getRegion().getStartKey()); + try { + int c; + // Continue walking guideposts until we get past the currentKey + while ( + (c = currentKey.compareTo(currentGuidePost = PrefixByteCodec.decode(decoder, input))) + >= 0 + ) { + // Detect if we found a guidepost that might be in the first region. This + // is for the case where the start key may be past the only guidepost in + // the first region. + if (!gpsForFirstRegion && firstRegionStartKey.compareTo(currentGuidePost) <= 0) { + gpsForFirstRegion = true; + } + // While we have gps in the region (but outside of start/stop key), track + // the min ts as a fallback for the time at which stas were calculated. + if (gpsForFirstRegion) { + fallbackTs = Math.min(fallbackTs, gps.getGuidePostTimestamps()[guideIndex]); } - scans.add(scanFromContext); - parallelScans.add(scans); - generateEstimates(scanRanges, table, GuidePostsInfo.NO_GUIDEPOST, - GuidePostsInfo.NO_GUIDEPOST.isEmptyGuidePost(), parallelScans, estimates, - Long.MAX_VALUE, false); - // we don't retrieve region location for the given scan range - return new ScansWithRegionLocations(parallelScans, null); + // Special case for gp == startKey in which case we want to + // count this gp (if it's in range) though we go past it. + delayAddingEst = (c == 0); + guideIndex++; + } + } catch (EOFException e) { + // expected. Thrown when we have decoded all guide posts. + intersectWithGuidePosts = false; + } + } + byte[] endRegionKey = regionLocations.get(stopIndex).getRegion().getEndKey(); + byte[] currentKeyBytes = currentKey.copyBytes(); + intersectWithGuidePosts &= guideIndex < gpsSize; + // Merge bisect with guideposts for all but the last region + while (regionIndex <= stopIndex) { + HRegionLocation regionLocation = regionLocations.get(regionIndex); + RegionInfo regionInfo = regionLocation.getRegion(); + byte[] currentGuidePostBytes = currentGuidePost.copyBytes(); + byte[] endKey; + if (regionIndex == stopIndex) { + endKey = stopKey; + } else { + endKey = regionBoundaries.get(regionIndex); } - byte[] sampleProcessedSaltByte = - SchemaUtil.processSplit(new byte[] { 0 }, table.getPKColumns()); - byte[] splitPostfix = - Arrays.copyOfRange(sampleProcessedSaltByte, 1, sampleProcessedSaltByte.length); - boolean isSalted = table.getBucketNum() != null; - GuidePostsInfo gps = getGuidePosts(); - // case when stats wasn't collected - hasGuidePosts = gps != GuidePostsInfo.NO_GUIDEPOST; - // Case when stats collection did run but there possibly wasn't enough data. In such a - // case we generate an empty guide post with the byte estimate being set as guide post - // width. - boolean emptyGuidePost = gps.isEmptyGuidePost(); - byte[] startRegionBoundaryKey = startKey; - byte[] stopRegionBoundaryKey = stopKey; - int columnsInCommon = 0; - ScanRanges prefixScanRanges = ScanRanges.EVERYTHING; - boolean traverseAllRegions = isSalted || isLocalIndex; if (isLocalIndex) { - // TODO: when implementing PHOENIX-4585, we should change this to an assert - // as we should always have a data plan when a local index is being used. - if (dataPlan != null && dataPlan.getTableRef().getTable().getType() != PTableType.INDEX) { // Sanity check - prefixScanRanges = computePrefixScanRanges(dataPlan.getContext().getScanRanges(), columnsInCommon=computeColumnsInCommon()); - KeyRange prefixRange = prefixScanRanges.getScanRange(); - if (!prefixRange.lowerUnbound()) { - startRegionBoundaryKey = prefixRange.getLowerRange(); - } - if (!prefixRange.upperUnbound()) { - stopRegionBoundaryKey = prefixRange.getUpperRange(); - } + if (dataPlan != null && dataPlan.getTableRef().getTable().getType() != PTableType.INDEX) { // Sanity + // check + ScanRanges dataScanRanges = dataPlan.getContext().getScanRanges(); + // we can skip a region completely for local indexes if the data plan does not intersect + if ( + !dataScanRanges.intersectRegion(regionInfo.getStartKey(), regionInfo.getEndKey(), + false) + ) { + currentKeyBytes = endKey; + regionIndex++; + continue; } - } else if (!traverseAllRegions) { - byte[] scanStartRow = scan.getStartRow(); - if (scanStartRow.length != 0 && Bytes.compareTo(scanStartRow, startKey) > 0) { - startRegionBoundaryKey = startKey = scanStartRow; - } - byte[] scanStopRow = scan.getStopRow(); - if (stopKey.length == 0 - || (scanStopRow.length != 0 && Bytes.compareTo(scanStopRow, stopKey) < 0)) { - stopRegionBoundaryKey = stopKey = scanStopRow; + } + // Only attempt further pruning if the prefix range is using + // a skip scan since we've already pruned the range of regions + // based on the start/stop key. + if (columnsInCommon > 0 && prefixScanRanges.useSkipScanFilter()) { + byte[] regionStartKey = regionInfo.getStartKey(); + ImmutableBytesWritable ptr = context.getTempPtr(); + clipKeyRangeBytes(prefixScanRanges.getSchema(), 0, columnsInCommon, regionStartKey, ptr, + false); + regionStartKey = ByteUtil.copyKeyBytesIfNecessary(ptr); + // Prune this region if there's no intersection + if (!prefixScanRanges.intersectRegion(regionStartKey, regionInfo.getEndKey(), false)) { + currentKeyBytes = endKey; + regionIndex++; + continue; } + } + keyOffset = ScanUtil.getRowKeyOffset(regionInfo.getStartKey(), regionInfo.getEndKey()); } - - int regionIndex = 0; - int startRegionIndex = 0; - - List regionLocations; - if (isSalted && !isLocalIndex) { - // key prefix = salt num + view index id + tenant id - // If salting is used with tenant or view index id, scan start and end - // rowkeys will not be empty. We need to generate region locations for - // all the scan range such that we cover (each salt bucket num) + (prefix starting from - // index position 1 to cover view index and/or tenant id and/or remaining prefix). - if (scan.getStartRow().length > 0 && scan.getStopRow().length > 0) { - regionLocations = new ArrayList<>(); - for (int i = 0; i < getTable().getBucketNum(); i++) { - byte[] saltStartRegionKey = new byte[scan.getStartRow().length]; - saltStartRegionKey[0] = (byte) i; - System.arraycopy(scan.getStartRow(), 1, saltStartRegionKey, 1, - scan.getStartRow().length - 1); - - byte[] saltStopRegionKey = new byte[scan.getStopRow().length]; - saltStopRegionKey[0] = (byte) i; - System.arraycopy(scan.getStopRow(), 1, saltStopRegionKey, 1, - scan.getStopRow().length - 1); - - regionLocations.addAll( - getRegionBoundaries(scanGrouper, saltStartRegionKey, saltStopRegionKey)); - } - } else { - // If scan start and end rowkeys are empty, we end up fetching all region locations. - regionLocations = - getRegionBoundaries(scanGrouper, startRegionBoundaryKey, stopRegionBoundaryKey); + byte[] initialKeyBytes = currentKeyBytes; + int gpsComparedToEndKey = -1; + boolean everNotDelayed = false; + while ( + intersectWithGuidePosts && (endKey.length == 0 + || (gpsComparedToEndKey = currentGuidePost.compareTo(endKey)) <= 0) + ) { + List newScans = + scanRanges.intersectScan(scan, currentKeyBytes, currentGuidePostBytes, keyOffset, + splitPostfix, getTable().getBucketNum(), gpsComparedToEndKey == 0); + if (useStatsForParallelization) { + for (int newScanIdx = 0; newScanIdx < newScans.size(); newScanIdx++) { + Scan newScan = newScans.get(newScanIdx); + ScanUtil.setLocalIndexAttributes(newScan, keyOffset, regionInfo.getStartKey(), + regionInfo.getEndKey(), newScan.getStartRow(), newScan.getStopRow()); + if (regionLocation.getServerName() != null) { + newScan.setAttribute(BaseScannerRegionObserverConstants.SCAN_REGION_SERVER, + regionLocation.getServerName().getVersionedBytes()); + } + boolean lastOfNew = newScanIdx == newScans.size() - 1; + parallelScanCollector.addNewScan(plan, newScan, gpsComparedToEndKey == 0 && lastOfNew, + regionLocation); } - } else { - // For range scans, startRegionBoundaryKey and stopRegionBoundaryKey should refer - // to the boundary specified by the scan context. - regionLocations = - getRegionBoundaries(scanGrouper, startRegionBoundaryKey, stopRegionBoundaryKey); + } + if (newScans.size() > 0) { + // If we've delaying adding estimates, add the previous + // gp estimates now that we know they are in range. + if (delayAddingEst) { + updateEstimates(gps, guideIndex - 1, estimates); + } + // If we're not delaying adding estimates, add the + // current gp estimates. + if (!(delayAddingEst = gpsComparedToEndKey == 0)) { + updateEstimates(gps, guideIndex, estimates); + } + } else { + delayAddingEst = false; + } + everNotDelayed |= !delayAddingEst; + currentKeyBytes = currentGuidePostBytes; + try { + currentGuidePost = PrefixByteCodec.decode(decoder, input); + currentGuidePostBytes = currentGuidePost.copyBytes(); + guideIndex++; + } catch (EOFException e) { + // We have read all guide posts + intersectWithGuidePosts = false; + } } - - numRegionLocationLookups = regionLocations.size(); - List regionBoundaries = toBoundaries(regionLocations); - int stopIndex = regionBoundaries.size(); - if (startRegionBoundaryKey.length > 0) { - startRegionIndex = regionIndex = getIndexContainingInclusive(regionBoundaries, startRegionBoundaryKey); + boolean gpsInThisRegion = initialKeyBytes != currentKeyBytes; + if (!useStatsForParallelization) { + /* + * If we are not using stats for generating parallel scans, we need to reset the + * currentKey back to what it was at the beginning of the loop. + */ + currentKeyBytes = initialKeyBytes; } - if (stopRegionBoundaryKey.length > 0) { - stopIndex = Math.min(stopIndex, regionIndex + getIndexContainingExclusive(regionBoundaries.subList(regionIndex, stopIndex), stopRegionBoundaryKey)); - if (isLocalIndex) { - stopKey = regionLocations.get(stopIndex).getRegion().getEndKey(); - } + List newScans = scanRanges.intersectScan(scan, currentKeyBytes, endKey, keyOffset, + splitPostfix, getTable().getBucketNum(), true); + for (int newScanIdx = 0; newScanIdx < newScans.size(); newScanIdx++) { + Scan newScan = newScans.get(newScanIdx); + ScanUtil.setLocalIndexAttributes(newScan, keyOffset, regionInfo.getStartKey(), + regionInfo.getEndKey(), newScan.getStartRow(), newScan.getStopRow()); + if (regionLocation.getServerName() != null) { + newScan.setAttribute(BaseScannerRegionObserverConstants.SCAN_REGION_SERVER, + regionLocation.getServerName().getVersionedBytes()); + } + boolean lastOfNew = newScanIdx == newScans.size() - 1; + parallelScanCollector.addNewScan(plan, newScan, lastOfNew, regionLocation); } - ParallelScansCollector parallelScanCollector = new ParallelScansCollector(scanGrouper); - - ImmutableBytesWritable currentKey = new ImmutableBytesWritable(startKey); - - int gpsSize = gps.getGuidePostsCount(); - int keyOffset = 0; - ImmutableBytesWritable currentGuidePost = ByteUtil.EMPTY_IMMUTABLE_BYTE_ARRAY; - ImmutableBytesWritable guidePosts = gps.getGuidePosts(); - ByteArrayInputStream stream = null; - DataInput input = null; - PrefixByteDecoder decoder = null; - int guideIndex = 0; - boolean gpsForFirstRegion = false; - boolean intersectWithGuidePosts = true; - // Maintain min ts for gps in first or last region outside of - // gps that are in the scan range. We'll use this if we find - // no gps in range. - long fallbackTs = Long.MAX_VALUE; - // Determination of whether of not we found a guidepost in - // every region between the start and stop key. If not, then - // we cannot definitively say at what time the guideposts - // were collected. - boolean gpsAvailableForAllRegions = true; - try { - boolean delayAddingEst = false; - ImmutableBytesWritable firstRegionStartKey = null; - if (gpsSize > 0) { - stream = new ByteArrayInputStream(guidePosts.get(), guidePosts.getOffset(), guidePosts.getLength()); - input = new DataInputStream(stream); - decoder = new PrefixByteDecoder(gps.getMaxLength()); - firstRegionStartKey = new ImmutableBytesWritable(regionLocations.get(regionIndex).getRegion().getStartKey()); - try { - int c; - // Continue walking guideposts until we get past the currentKey - while ((c=currentKey.compareTo(currentGuidePost = PrefixByteCodec.decode(decoder, input))) >= 0) { - // Detect if we found a guidepost that might be in the first region. This - // is for the case where the start key may be past the only guidepost in - // the first region. - if (!gpsForFirstRegion && firstRegionStartKey.compareTo(currentGuidePost) <= 0) { - gpsForFirstRegion = true; - } - // While we have gps in the region (but outside of start/stop key), track - // the min ts as a fallback for the time at which stas were calculated. - if (gpsForFirstRegion) { - fallbackTs = - Math.min(fallbackTs, - gps.getGuidePostTimestamps()[guideIndex]); - } - // Special case for gp == startKey in which case we want to - // count this gp (if it's in range) though we go past it. - delayAddingEst = (c == 0); - guideIndex++; - } - } catch (EOFException e) { - // expected. Thrown when we have decoded all guide posts. - intersectWithGuidePosts = false; - } - } - byte[] endRegionKey = regionLocations.get(stopIndex).getRegion().getEndKey(); - byte[] currentKeyBytes = currentKey.copyBytes(); - intersectWithGuidePosts &= guideIndex < gpsSize; - // Merge bisect with guideposts for all but the last region - while (regionIndex <= stopIndex) { - HRegionLocation regionLocation = regionLocations.get(regionIndex); - RegionInfo regionInfo = regionLocation.getRegion(); - byte[] currentGuidePostBytes = currentGuidePost.copyBytes(); - byte[] endKey; - if (regionIndex == stopIndex) { - endKey = stopKey; - } else { - endKey = regionBoundaries.get(regionIndex); - } - if (isLocalIndex) { - if (dataPlan != null && dataPlan.getTableRef().getTable().getType() != PTableType.INDEX) { // Sanity check - ScanRanges dataScanRanges = dataPlan.getContext().getScanRanges(); - // we can skip a region completely for local indexes if the data plan does not intersect - if (!dataScanRanges.intersectRegion(regionInfo.getStartKey(), regionInfo.getEndKey(), false)) { - currentKeyBytes = endKey; - regionIndex++; - continue; - } - } - // Only attempt further pruning if the prefix range is using - // a skip scan since we've already pruned the range of regions - // based on the start/stop key. - if (columnsInCommon > 0 && prefixScanRanges.useSkipScanFilter()) { - byte[] regionStartKey = regionInfo.getStartKey(); - ImmutableBytesWritable ptr = context.getTempPtr(); - clipKeyRangeBytes(prefixScanRanges.getSchema(), 0, columnsInCommon, regionStartKey, ptr, false); - regionStartKey = ByteUtil.copyKeyBytesIfNecessary(ptr); - // Prune this region if there's no intersection - if (!prefixScanRanges.intersectRegion(regionStartKey, regionInfo.getEndKey(), false)) { - currentKeyBytes = endKey; - regionIndex++; - continue; - } - } - keyOffset = ScanUtil.getRowKeyOffset(regionInfo.getStartKey(), regionInfo.getEndKey()); - } - byte[] initialKeyBytes = currentKeyBytes; - int gpsComparedToEndKey = -1; - boolean everNotDelayed = false; - while (intersectWithGuidePosts && (endKey.length == 0 - || (gpsComparedToEndKey = currentGuidePost.compareTo(endKey)) <= 0)) { - List newScans = - scanRanges.intersectScan(scan, currentKeyBytes, currentGuidePostBytes, - keyOffset, splitPostfix, getTable().getBucketNum(), - gpsComparedToEndKey == 0); - if (useStatsForParallelization) { - for (int newScanIdx = 0; newScanIdx < newScans.size(); newScanIdx++) { - Scan newScan = newScans.get(newScanIdx); - ScanUtil.setLocalIndexAttributes(newScan, keyOffset, - regionInfo.getStartKey(), regionInfo.getEndKey(), - newScan.getStartRow(), newScan.getStopRow()); - if (regionLocation.getServerName() != null) { - newScan.setAttribute(BaseScannerRegionObserverConstants.SCAN_REGION_SERVER, - regionLocation.getServerName().getVersionedBytes()); - } - boolean lastOfNew = newScanIdx == newScans.size() - 1; - parallelScanCollector.addNewScan(plan, newScan, - gpsComparedToEndKey == 0 && lastOfNew, regionLocation); - } - } - if (newScans.size() > 0) { - // If we've delaying adding estimates, add the previous - // gp estimates now that we know they are in range. - if (delayAddingEst) { - updateEstimates(gps, guideIndex-1, estimates); - } - // If we're not delaying adding estimates, add the - // current gp estimates. - if (! (delayAddingEst = gpsComparedToEndKey == 0) ) { - updateEstimates(gps, guideIndex, estimates); - } - } else { - delayAddingEst = false; - } - everNotDelayed |= !delayAddingEst; - currentKeyBytes = currentGuidePostBytes; - try { - currentGuidePost = PrefixByteCodec.decode(decoder, input); - currentGuidePostBytes = currentGuidePost.copyBytes(); - guideIndex++; - } catch (EOFException e) { - // We have read all guide posts - intersectWithGuidePosts = false; - } - } - boolean gpsInThisRegion = initialKeyBytes != currentKeyBytes; - if (!useStatsForParallelization) { - /* - * If we are not using stats for generating parallel scans, we need to reset the - * currentKey back to what it was at the beginning of the loop. - */ - currentKeyBytes = initialKeyBytes; - } - List newScans = - scanRanges.intersectScan(scan, currentKeyBytes, endKey, keyOffset, - splitPostfix, getTable().getBucketNum(), true); - for (int newScanIdx = 0; newScanIdx < newScans.size(); newScanIdx++) { - Scan newScan = newScans.get(newScanIdx); - ScanUtil.setLocalIndexAttributes(newScan, keyOffset, regionInfo.getStartKey(), - regionInfo.getEndKey(), newScan.getStartRow(), newScan.getStopRow()); - if (regionLocation.getServerName() != null) { - newScan.setAttribute(BaseScannerRegionObserverConstants.SCAN_REGION_SERVER, - regionLocation.getServerName().getVersionedBytes()); - } - boolean lastOfNew = newScanIdx == newScans.size() - 1; - parallelScanCollector.addNewScan(plan, newScan, lastOfNew, regionLocation); - } - if (newScans.size() > 0) { - // Boundary case of no GP in region after delaying adding of estimates - if (!gpsInThisRegion && delayAddingEst) { - updateEstimates(gps, guideIndex-1, estimates); - gpsInThisRegion = true; - delayAddingEst = false; - } - } else if (!gpsInThisRegion) { - delayAddingEst = false; - } - currentKeyBytes = endKey; - // We have a guide post in the region if the above loop was entered - // or if the current key is less than the region end key (since the loop - // may not have been entered if our scan end key is smaller than the - // first guide post in that region). - boolean gpsAfterStopKey = false; - gpsAvailableForAllRegions &= - ( gpsInThisRegion && everNotDelayed) || // GP in this region - ( regionIndex == startRegionIndex && gpsForFirstRegion ) || // GP in first region (before start key) - ( gpsAfterStopKey = ( regionIndex == stopIndex && intersectWithGuidePosts && // GP in last region (after stop key) - ( endRegionKey.length == 0 || // then check if gp is in the region - currentGuidePost.compareTo(endRegionKey) < 0))); - if (gpsAfterStopKey) { - // If gp after stop key, but still in last region, track min ts as fallback - fallbackTs = - Math.min(fallbackTs, - gps.getGuidePostTimestamps()[guideIndex]); - } - regionIndex++; - } - generateEstimates(scanRanges, table, gps, emptyGuidePost, parallelScanCollector.getParallelScans(), estimates, - fallbackTs, gpsAvailableForAllRegions); - } finally { - if (stream != null) Closeables.closeQuietly(stream); + if (newScans.size() > 0) { + // Boundary case of no GP in region after delaying adding of estimates + if (!gpsInThisRegion && delayAddingEst) { + updateEstimates(gps, guideIndex - 1, estimates); + gpsInThisRegion = true; + delayAddingEst = false; + } + } else if (!gpsInThisRegion) { + delayAddingEst = false; } - sampleScans(parallelScanCollector.getParallelScans(),this.plan.getStatement().getTableSamplingRate()); - return new ScansWithRegionLocations(parallelScanCollector.getParallelScans(), - parallelScanCollector.getRegionLocations()); - } - - private void generateEstimates(ScanRanges scanRanges, PTable table, GuidePostsInfo gps, - boolean emptyGuidePost, List> parallelScans, GuidePostEstimate estimates, - long fallbackTs, boolean gpsAvailableForAllRegions) { - Long pageLimit = getUnfilteredPageLimit(scan); - if (scanRanges.isPointLookup() || pageLimit != null) { - // If run in parallel, the limit is pushed to each parallel scan so must be accounted - // for in all of them - int parallelFactor = this.isSerial() ? 1 : parallelScans.size(); - if (scanRanges.isPointLookup() && pageLimit != null) { - this.estimatedRows = - Long.valueOf(Math.min(scanRanges.getPointLookupCount(), - pageLimit * parallelFactor)); - } else if (scanRanges.isPointLookup()) { - this.estimatedRows = Long.valueOf(scanRanges.getPointLookupCount()); - } else { - this.estimatedRows = pageLimit * parallelFactor; - } - this.estimatedSize = this.estimatedRows * SchemaUtil.estimateRowSize(table); - // Indication to client that the statistics estimates were not - // calculated based on statistics but instead are based on row - // limits from the query. - this.estimateInfoTimestamp = StatisticsUtil.NOT_STATS_BASED_TS; - } else if (emptyGuidePost) { - // In case of an empty guide post, we estimate the number of rows scanned by - // using the estimated row size - this.estimatedRows = gps.getByteCounts()[0] / SchemaUtil.estimateRowSize(table); - this.estimatedSize = gps.getByteCounts()[0]; - this.estimateInfoTimestamp = gps.getGuidePostTimestamps()[0]; - } else if (hasGuidePosts) { - this.estimatedRows = estimates.rowsEstimate; - this.estimatedSize = estimates.bytesEstimate; - this.estimateInfoTimestamp = computeMinTimestamp(gpsAvailableForAllRegions, estimates, - fallbackTs); - } else { - this.estimatedRows = null; - this.estimatedSize = null; - this.estimateInfoTimestamp = null; + currentKeyBytes = endKey; + // We have a guide post in the region if the above loop was entered + // or if the current key is less than the region end key (since the loop + // may not have been entered if our scan end key is smaller than the + // first guide post in that region). + boolean gpsAfterStopKey = false; + gpsAvailableForAllRegions &= (gpsInThisRegion && everNotDelayed) || // GP in this region + (regionIndex == startRegionIndex && gpsForFirstRegion) || // GP in first region (before + // start key) + (gpsAfterStopKey = (regionIndex == stopIndex && intersectWithGuidePosts && // GP in last + // region + // (after stop + // key) + (endRegionKey.length == 0 || // then check if gp is in the region + currentGuidePost.compareTo(endRegionKey) < 0))); + if (gpsAfterStopKey) { + // If gp after stop key, but still in last region, track min ts as fallback + fallbackTs = Math.min(fallbackTs, gps.getGuidePostTimestamps()[guideIndex]); } + regionIndex++; + } + generateEstimates(scanRanges, table, gps, emptyGuidePost, + parallelScanCollector.getParallelScans(), estimates, fallbackTs, gpsAvailableForAllRegions); + } finally { + if (stream != null) Closeables.closeQuietly(stream); } + sampleScans(parallelScanCollector.getParallelScans(), + this.plan.getStatement().getTableSamplingRate()); + return new ScansWithRegionLocations(parallelScanCollector.getParallelScans(), + parallelScanCollector.getRegionLocations()); + } - /** - * Return row count limit of PageFilter if exists and there is no where - * clause filter. - * @return - */ - private static Long getUnfilteredPageLimit(Scan scan) { - Long pageLimit = null; - Iterator filters = ScanUtil.getFilterIterator(scan); - while (filters.hasNext()) { - Filter filter = filters.next(); - if (filter instanceof BooleanExpressionFilter) { - return null; - } - if (filter instanceof PageFilter) { - pageLimit = ((PageFilter)filter).getPageSize(); - } - } - return pageLimit; + private void generateEstimates(ScanRanges scanRanges, PTable table, GuidePostsInfo gps, + boolean emptyGuidePost, List> parallelScans, GuidePostEstimate estimates, + long fallbackTs, boolean gpsAvailableForAllRegions) { + Long pageLimit = getUnfilteredPageLimit(scan); + if (scanRanges.isPointLookup() || pageLimit != null) { + // If run in parallel, the limit is pushed to each parallel scan so must be accounted + // for in all of them + int parallelFactor = this.isSerial() ? 1 : parallelScans.size(); + if (scanRanges.isPointLookup() && pageLimit != null) { + this.estimatedRows = + Long.valueOf(Math.min(scanRanges.getPointLookupCount(), pageLimit * parallelFactor)); + } else if (scanRanges.isPointLookup()) { + this.estimatedRows = Long.valueOf(scanRanges.getPointLookupCount()); + } else { + this.estimatedRows = pageLimit * parallelFactor; + } + this.estimatedSize = this.estimatedRows * SchemaUtil.estimateRowSize(table); + // Indication to client that the statistics estimates were not + // calculated based on statistics but instead are based on row + // limits from the query. + this.estimateInfoTimestamp = StatisticsUtil.NOT_STATS_BASED_TS; + } else if (emptyGuidePost) { + // In case of an empty guide post, we estimate the number of rows scanned by + // using the estimated row size + this.estimatedRows = gps.getByteCounts()[0] / SchemaUtil.estimateRowSize(table); + this.estimatedSize = gps.getByteCounts()[0]; + this.estimateInfoTimestamp = gps.getGuidePostTimestamps()[0]; + } else if (hasGuidePosts) { + this.estimatedRows = estimates.rowsEstimate; + this.estimatedSize = estimates.bytesEstimate; + this.estimateInfoTimestamp = + computeMinTimestamp(gpsAvailableForAllRegions, estimates, fallbackTs); + } else { + this.estimatedRows = null; + this.estimatedSize = null; + this.estimateInfoTimestamp = null; } + } - private static Long computeMinTimestamp(boolean gpsAvailableForAllRegions, - GuidePostEstimate estimates, - long fallbackTs) { - if (gpsAvailableForAllRegions) { - if (estimates.lastUpdated < Long.MAX_VALUE) { - return estimates.lastUpdated; - } - if (fallbackTs < Long.MAX_VALUE) { - return fallbackTs; - } - } + /** + * Return row count limit of PageFilter if exists and there is no where clause filter. + */ + private static Long getUnfilteredPageLimit(Scan scan) { + Long pageLimit = null; + Iterator filters = ScanUtil.getFilterIterator(scan); + while (filters.hasNext()) { + Filter filter = filters.next(); + if (filter instanceof BooleanExpressionFilter) { return null; + } + if (filter instanceof PageFilter) { + pageLimit = ((PageFilter) filter).getPageSize(); + } } + return pageLimit; + } - /** - * Loop through List> parallelScans object, - * rolling dice on each scan based on startRowKey. - * - * All FilterableStatement should have tableSamplingRate. - * In case it is delete statement, an unsupported message is raised. - * In case it is null tableSamplingRate, 100% sampling rate will be applied by default. - * - * @param parallelScans - */ - private void sampleScans(final List> parallelScans, final Double tableSamplingRate){ - if (tableSamplingRate == null || tableSamplingRate == 100d) { - return; - } - final Predicate tableSamplerPredicate = TableSamplerPredicate.of(tableSamplingRate); + private static Long computeMinTimestamp(boolean gpsAvailableForAllRegions, + GuidePostEstimate estimates, long fallbackTs) { + if (gpsAvailableForAllRegions) { + if (estimates.lastUpdated < Long.MAX_VALUE) { + return estimates.lastUpdated; + } + if (fallbackTs < Long.MAX_VALUE) { + return fallbackTs; + } + } + return null; + } - for (Iterator> is = parallelScans.iterator(); is.hasNext();) { - for (Iterator i = is.next().iterator(); i.hasNext();) { - final Scan scan=i.next(); - if (!tableSamplerPredicate.apply(scan.getStartRow())) { - i.remove(); - } - } - } + /** + * Loop through List> parallelScans object, rolling dice on each scan based on + * startRowKey. All FilterableStatement should have tableSamplingRate. In case it is delete + * statement, an unsupported message is raised. In case it is null tableSamplingRate, 100% + * sampling rate will be applied by default. + */ + private void sampleScans(final List> parallelScans, final Double tableSamplingRate) { + if (tableSamplingRate == null || tableSamplingRate == 100d) { + return; } - - public static List reverseIfNecessary(List list, boolean reverse) { - if (!reverse) { - return list; + final Predicate tableSamplerPredicate = TableSamplerPredicate.of(tableSamplingRate); + + for (Iterator> is = parallelScans.iterator(); is.hasNext();) { + for (Iterator i = is.next().iterator(); i.hasNext();) { + final Scan scan = i.next(); + if (!tableSamplerPredicate.apply(scan.getStartRow())) { + i.remove(); } - return Lists.reverse(list); + } } - - /** - * Executes the scan in parallel across all regions, blocking until all scans are complete. - * @return the result iterators for the scan of each region - */ - @Override - public List getIterators() throws SQLException { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this, - ScanUtil.getCustomAnnotations(scan)) + "on table " + context.getCurrentTable().getTable().getName()); - } - boolean isReverse = ScanUtil.isReversed(scan); - boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL; - final ConnectionQueryServices services = context.getConnection().getQueryServices(); - // Get query time out from Statement - final long startTime = EnvironmentEdgeManager.currentTimeMillis(); - final long maxQueryEndTime = startTime + context.getStatement().getQueryTimeoutInMillis(); - int numScans = size(); - // Capture all iterators so that if something goes wrong, we close them all - // The iterators list is based on the submission of work, so it may not - // contain them all (for example if work was rejected from the queue) - Queue allIterators = new ConcurrentLinkedQueue<>(); - List iterators = new ArrayList(numScans); - ScanWrapper previousScan = new ScanWrapper(null); - return getIterators(scans, services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, - splits.size(), previousScan, context.getConnection().getQueryServices().getConfiguration() - .getInt(QueryConstants.HASH_JOIN_CACHE_RETRIES, QueryConstants.DEFAULT_HASH_JOIN_CACHE_RETRIES)); + } + + public static List reverseIfNecessary(List list, boolean reverse) { + if (!reverse) { + return list; } + return Lists.reverse(list); + } - private static class ScanWrapper { - Scan scan; + /** + * Executes the scan in parallel across all regions, blocking until all scans are complete. + * @return the result iterators for the scan of each region + */ + @Override + public List getIterators() throws SQLException { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this, + ScanUtil.getCustomAnnotations(scan)) + "on table " + + context.getCurrentTable().getTable().getName()); + } + boolean isReverse = ScanUtil.isReversed(scan); + boolean isLocalIndex = getTable().getIndexType() == IndexType.LOCAL; + final ConnectionQueryServices services = context.getConnection().getQueryServices(); + // Get query time out from Statement + final long startTime = EnvironmentEdgeManager.currentTimeMillis(); + final long maxQueryEndTime = startTime + context.getStatement().getQueryTimeoutInMillis(); + int numScans = size(); + // Capture all iterators so that if something goes wrong, we close them all + // The iterators list is based on the submission of work, so it may not + // contain them all (for example if work was rejected from the queue) + Queue allIterators = new ConcurrentLinkedQueue<>(); + List iterators = new ArrayList(numScans); + ScanWrapper previousScan = new ScanWrapper(null); + return getIterators(scans, services, isLocalIndex, allIterators, iterators, isReverse, + maxQueryEndTime, splits.size(), previousScan, + context.getConnection().getQueryServices().getConfiguration().getInt( + QueryConstants.HASH_JOIN_CACHE_RETRIES, QueryConstants.DEFAULT_HASH_JOIN_CACHE_RETRIES)); + } - public Scan getScan() { - return scan; - } + private static class ScanWrapper { + Scan scan; - public void setScan(Scan scan) { - this.scan = scan; - } + public Scan getScan() { + return scan; + } - public ScanWrapper(Scan scan) { - this.scan = scan; - } + public void setScan(Scan scan) { + this.scan = scan; + } + public ScanWrapper(Scan scan) { + this.scan = scan; } - private List getIterators(List> scan, ConnectionQueryServices services, - boolean isLocalIndex, Queue allIterators, List iterators, - boolean isReverse, long maxQueryEndTime, int splitSize, ScanWrapper previousScan, int retryCount) throws SQLException { - boolean success = false; - final List>>> futures = Lists.newArrayListWithExpectedSize(splitSize); - allFutures.add(futures); - SQLException toThrow = null; - final HashCacheClient hashCacheClient = new HashCacheClient(context.getConnection()); - int queryTimeOut = context.getStatement().getQueryTimeoutInMillis(); - try { - submitWork(scan, futures, allIterators, splitSize, isReverse, scanGrouper, maxQueryEndTime); - boolean clearedCache = false; - for (List>> future : reverseIfNecessary(futures,isReverse)) { - List concatIterators = Lists.newArrayListWithExpectedSize(future.size()); - Iterator>> scanPairItr = reverseIfNecessary(future,isReverse).iterator(); - while (scanPairItr.hasNext()) { - Pair> scanPair = scanPairItr.next(); - try { - long timeOutForScan = maxQueryEndTime - EnvironmentEdgeManager.currentTimeMillis(); - if (forTestingSetTimeoutToMaxToLetQueryPassHere) { - timeOutForScan = Long.MAX_VALUE; - } - if (timeOutForScan < 0) { - throw new SQLExceptionInfo.Builder(OPERATION_TIMED_OUT).setMessage( - ". Query couldn't be completed in the allotted time: " - + queryTimeOut + " ms").build().buildException(); - } - // make sure we apply the iterators in order - if (isLocalIndex && previousScan != null && previousScan.getScan() != null - && (((!isReverse && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), - previousScan.getScan().getStopRow()) < 0) - || (isReverse && previousScan.getScan().getStopRow().length > 0 && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), - previousScan.getScan().getStopRow()) > 0) - || (Bytes.compareTo(scanPair.getFirst().getStopRow(), previousScan.getScan().getStopRow()) == 0)) - && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_START_ROW_SUFFIX), previousScan.getScan().getAttribute(SCAN_START_ROW_SUFFIX))==0)) { - continue; - } - PeekingResultIterator iterator = scanPair.getSecond().get(timeOutForScan, TimeUnit.MILLISECONDS); - concatIterators.add(iterator); - previousScan.setScan(scanPair.getFirst()); - } catch (ExecutionException e) { - LOGGER.warn("Getting iterators at BaseResultIterators encountered error " - + "for table {}", TableName.valueOf(physicalTableName), e); - try { // Rethrow as SQLException - throw ClientUtil.parseServerException(e); - } catch (StaleRegionBoundaryCacheException | HashJoinCacheNotFoundException e2){ - // Catch only to try to recover from region boundary cache being out of date - if (!clearedCache) { // Clear cache once so that we rejigger job based on new boundaries - services.clearTableRegionCache(TableName.valueOf(physicalTableName)); - context.getOverallQueryMetrics().cacheRefreshedDueToSplits(); - } - // Resubmit just this portion of work again - Scan oldScan = scanPair.getFirst(); - byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW); - if (e2 instanceof HashJoinCacheNotFoundException) { - LOGGER.debug( - "Retrying when Hash Join cache is not found on the server ,by sending the cache again"); - if (retryCount <= 0) { - throw e2; - } - Long cacheId = ((HashJoinCacheNotFoundException)e2).getCacheId(); - ServerCache cache = caches.get(new ImmutableBytesPtr(Bytes.toBytes(cacheId))); - if (cache .getCachePtr() != null) { - if (!hashCacheClient.addHashCacheToServer(startKey, cache, plan.getTableRef().getTable())) { - throw e2; - } - } - } - concatIterators = - recreateIterators(services, isLocalIndex, allIterators, - iterators, isReverse, maxQueryEndTime, previousScan, - clearedCache, concatIterators, scanPairItr, scanPair, retryCount-1); - } catch(ColumnFamilyNotFoundException cfnfe) { - if (scanPair.getFirst().getAttribute(LOCAL_INDEX_BUILD) != null) { - Thread.sleep(1000); - concatIterators = - recreateIterators(services, isLocalIndex, allIterators, - iterators, isReverse, maxQueryEndTime, previousScan, - clearedCache, concatIterators, scanPairItr, scanPair, retryCount); - } - - } - } catch (CancellationException ce) { - LOGGER.warn("Iterator scheduled to be executed in Future was being cancelled", ce); - } - } - addIterator(iterators, concatIterators); + } + + private List getIterators(List> scan, + ConnectionQueryServices services, boolean isLocalIndex, + Queue allIterators, List iterators, + boolean isReverse, long maxQueryEndTime, int splitSize, ScanWrapper previousScan, + int retryCount) throws SQLException { + boolean success = false; + final List>>> futures = + Lists.newArrayListWithExpectedSize(splitSize); + allFutures.add(futures); + SQLException toThrow = null; + final HashCacheClient hashCacheClient = new HashCacheClient(context.getConnection()); + int queryTimeOut = context.getStatement().getQueryTimeoutInMillis(); + try { + submitWork(scan, futures, allIterators, splitSize, isReverse, scanGrouper, maxQueryEndTime); + boolean clearedCache = false; + for (List>> future : reverseIfNecessary(futures, + isReverse)) { + List concatIterators = + Lists.newArrayListWithExpectedSize(future.size()); + Iterator>> scanPairItr = + reverseIfNecessary(future, isReverse).iterator(); + while (scanPairItr.hasNext()) { + Pair> scanPair = scanPairItr.next(); + try { + long timeOutForScan = maxQueryEndTime - EnvironmentEdgeManager.currentTimeMillis(); + if (forTestingSetTimeoutToMaxToLetQueryPassHere) { + timeOutForScan = Long.MAX_VALUE; } - success = true; - return iterators; - } catch (TimeoutException e) { - OverAllQueryMetrics overAllQueryMetrics = context.getOverallQueryMetrics(); - overAllQueryMetrics.queryTimedOut(); - if (context.getScanRanges().isPointLookup()) { - overAllQueryMetrics.queryPointLookupTimedOut(); - } else { - overAllQueryMetrics.queryScanTimedOut(); + if (timeOutForScan < 0) { + throw new SQLExceptionInfo.Builder(OPERATION_TIMED_OUT) + .setMessage( + ". Query couldn't be completed in the allotted time: " + queryTimeOut + " ms") + .build().buildException(); } - GLOBAL_QUERY_TIMEOUT_COUNTER.increment(); - // thrown when a thread times out waiting for the future.get() call to return - toThrow = new SQLExceptionInfo.Builder(OPERATION_TIMED_OUT).setMessage( - ". Query couldn't be completed in the allotted time: " + queryTimeOut + " ms") - .setRootCause(e).build().buildException(); - } catch (SQLException e) { - if (e.getErrorCode() == OPERATION_TIMED_OUT.getErrorCode()) { - OverAllQueryMetrics overAllQueryMetrics = context.getOverallQueryMetrics(); - overAllQueryMetrics.queryTimedOut(); - if (context.getScanRanges().isPointLookup()) { - overAllQueryMetrics.queryPointLookupTimedOut(); - } else { - overAllQueryMetrics.queryScanTimedOut(); - } - GLOBAL_QUERY_TIMEOUT_COUNTER.increment(); + // make sure we apply the iterators in order + if ( + isLocalIndex && previousScan != null && previousScan.getScan() != null + && (((!isReverse + && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), + previousScan.getScan().getStopRow()) < 0) + || (isReverse && previousScan.getScan().getStopRow().length > 0 + && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), + previousScan.getScan().getStopRow()) > 0) + || (Bytes.compareTo(scanPair.getFirst().getStopRow(), + previousScan.getScan().getStopRow()) == 0)) + && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_START_ROW_SUFFIX), + previousScan.getScan().getAttribute(SCAN_START_ROW_SUFFIX)) == 0) + ) { + continue; } - toThrow = e; - } catch (Exception e) { - toThrow = ClientUtil.parseServerException(e); - } finally { - try { - if (!success) { - try { - close(); - } catch (Exception e) { - if (toThrow == null) { - toThrow = ClientUtil.parseServerException(e); - } else { - toThrow.setNextException(ClientUtil.parseServerException(e)); - } - } finally { - try { - SQLCloseables.closeAll(allIterators); - } catch (Exception e) { - if (toThrow == null) { - toThrow = ClientUtil.parseServerException(e); - } else { - toThrow.setNextException(ClientUtil.parseServerException(e)); - } - } - } + PeekingResultIterator iterator = + scanPair.getSecond().get(timeOutForScan, TimeUnit.MILLISECONDS); + concatIterators.add(iterator); + previousScan.setScan(scanPair.getFirst()); + } catch (ExecutionException e) { + LOGGER.warn( + "Getting iterators at BaseResultIterators encountered error " + "for table {}", + TableName.valueOf(physicalTableName), e); + try { // Rethrow as SQLException + throw ClientUtil.parseServerException(e); + } catch (StaleRegionBoundaryCacheException | HashJoinCacheNotFoundException e2) { + // Catch only to try to recover from region boundary cache being out of date + if (!clearedCache) { // Clear cache once so that we rejigger job based on new + // boundaries + services.clearTableRegionCache(TableName.valueOf(physicalTableName)); + context.getOverallQueryMetrics().cacheRefreshedDueToSplits(); + } + // Resubmit just this portion of work again + Scan oldScan = scanPair.getFirst(); + byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW); + if (e2 instanceof HashJoinCacheNotFoundException) { + LOGGER.debug( + "Retrying when Hash Join cache is not found on the server ,by sending the cache again"); + if (retryCount <= 0) { + throw e2; } - } finally { - if (toThrow != null) { - GLOBAL_FAILED_QUERY_COUNTER.increment(); - OverAllQueryMetrics overAllQueryMetrics = context.getOverallQueryMetrics(); - overAllQueryMetrics.queryFailed(); - if (context.getScanRanges().isPointLookup()) { - overAllQueryMetrics.queryPointLookupFailed(); - } else { - overAllQueryMetrics.queryScanFailed(); - } - throw toThrow; + Long cacheId = ((HashJoinCacheNotFoundException) e2).getCacheId(); + ServerCache cache = caches.get(new ImmutableBytesPtr(Bytes.toBytes(cacheId))); + if (cache.getCachePtr() != null) { + if ( + !hashCacheClient.addHashCacheToServer(startKey, cache, + plan.getTableRef().getTable()) + ) { + throw e2; + } } + } + concatIterators = recreateIterators(services, isLocalIndex, allIterators, iterators, + isReverse, maxQueryEndTime, previousScan, clearedCache, concatIterators, + scanPairItr, scanPair, retryCount - 1); + } catch (ColumnFamilyNotFoundException cfnfe) { + if (scanPair.getFirst().getAttribute(LOCAL_INDEX_BUILD) != null) { + Thread.sleep(1000); + concatIterators = recreateIterators(services, isLocalIndex, allIterators, iterators, + isReverse, maxQueryEndTime, previousScan, clearedCache, concatIterators, + scanPairItr, scanPair, retryCount); + } + } + } catch (CancellationException ce) { + LOGGER.warn("Iterator scheduled to be executed in Future was being cancelled", ce); + } } - return null; // Not reachable - } - - private List recreateIterators(ConnectionQueryServices services, - boolean isLocalIndex, Queue allIterators, - List iterators, boolean isReverse, long maxQueryEndTime, - ScanWrapper previousScan, boolean clearedCache, - List concatIterators, - Iterator>> scanPairItr, - Pair> scanPair, int retryCount) throws SQLException { - scanPairItr.remove(); - // Resubmit just this portion of work again - Scan oldScan = scanPair.getFirst(); - byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW); - byte[] endKey = oldScan.getStopRow(); - - List> newNestedScans = this.getParallelScans(startKey, endKey).getScans(); - // Add any concatIterators that were successful so far - // as we need these to be in order addIterator(iterators, concatIterators); - concatIterators = Lists.newArrayList(); - getIterators(newNestedScans, services, isLocalIndex, allIterators, iterators, isReverse, - maxQueryEndTime, newNestedScans.size(), previousScan, retryCount); - return concatIterators; - } - - - @Override - public void close() throws SQLException { - // Don't call cancel on already started work, as it causes the HConnection - // to get into a funk. Instead, just cancel queued work. - boolean cancelledWork = false; - try { - if (allFutures.isEmpty()) { - return; - } - List> futuresToClose = Lists.newArrayListWithExpectedSize(getSplits().size()); - for (List>>> futures : allFutures) { - for (List>> futureScans : futures) { - for (Pair> futurePair : futureScans) { - // When work is rejected, we may have null futurePair entries, because - // we randomize these and set them as they're submitted. - if (futurePair != null) { - Future future = futurePair.getSecond(); - if (future != null) { - if (future.cancel(false)) { - cancelledWork = true; - } else { - futuresToClose.add(future); - } - } - } - } - } - } - // Wait for already started tasks to complete as we can't interrupt them without - // leaving our HConnection in a funky state. - for (Future future : futuresToClose) { - try { - PeekingResultIterator iterator = future.get(); - iterator.close(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new RuntimeException(e); - } catch (ExecutionException e) { - LOGGER.info("Failed to execute task during cancel", e); - } + } + success = true; + return iterators; + } catch (TimeoutException e) { + OverAllQueryMetrics overAllQueryMetrics = context.getOverallQueryMetrics(); + overAllQueryMetrics.queryTimedOut(); + if (context.getScanRanges().isPointLookup()) { + overAllQueryMetrics.queryPointLookupTimedOut(); + } else { + overAllQueryMetrics.queryScanTimedOut(); + } + GLOBAL_QUERY_TIMEOUT_COUNTER.increment(); + // thrown when a thread times out waiting for the future.get() call to return + toThrow = new SQLExceptionInfo.Builder(OPERATION_TIMED_OUT) + .setMessage(". Query couldn't be completed in the allotted time: " + queryTimeOut + " ms") + .setRootCause(e).build().buildException(); + } catch (SQLException e) { + if (e.getErrorCode() == OPERATION_TIMED_OUT.getErrorCode()) { + OverAllQueryMetrics overAllQueryMetrics = context.getOverallQueryMetrics(); + overAllQueryMetrics.queryTimedOut(); + if (context.getScanRanges().isPointLookup()) { + overAllQueryMetrics.queryPointLookupTimedOut(); + } else { + overAllQueryMetrics.queryScanTimedOut(); + } + GLOBAL_QUERY_TIMEOUT_COUNTER.increment(); + } + toThrow = e; + } catch (Exception e) { + toThrow = ClientUtil.parseServerException(e); + } finally { + try { + if (!success) { + try { + close(); + } catch (Exception e) { + if (toThrow == null) { + toThrow = ClientUtil.parseServerException(e); + } else { + toThrow.setNextException(ClientUtil.parseServerException(e)); } - } finally { - SQLCloseables.closeAllQuietly(caches.values()); - caches.clear(); - if (cancelledWork) { - context.getConnection().getQueryServices().getExecutor().purge(); + } finally { + try { + SQLCloseables.closeAll(allIterators); + } catch (Exception e) { + if (toThrow == null) { + toThrow = ClientUtil.parseServerException(e); + } else { + toThrow.setNextException(ClientUtil.parseServerException(e)); + } } - allFutures.clear(); + } } + } finally { + if (toThrow != null) { + GLOBAL_FAILED_QUERY_COUNTER.increment(); + OverAllQueryMetrics overAllQueryMetrics = context.getOverallQueryMetrics(); + overAllQueryMetrics.queryFailed(); + if (context.getScanRanges().isPointLookup()) { + overAllQueryMetrics.queryPointLookupFailed(); + } else { + overAllQueryMetrics.queryScanFailed(); + } + throw toThrow; + } + } } + return null; // Not reachable + } - private void addIterator(List parentIterators, List childIterators) throws SQLException { - if (!childIterators.isEmpty()) { - if (plan.useRoundRobinIterator()) { - /* - * When using a round robin iterator we shouldn't concatenate the iterators together. This is because a - * round robin iterator should be calling next() on these iterators directly after selecting them in a - * round robin fashion. This helps take advantage of loading the underlying scanners' caches in parallel - * as well as preventing errors arising out of scanner lease expirations. - */ - parentIterators.addAll(childIterators); - } else { - parentIterators.add(ConcatResultIterator.newIterator(childIterators)); + private List recreateIterators(ConnectionQueryServices services, + boolean isLocalIndex, Queue allIterators, + List iterators, boolean isReverse, long maxQueryEndTime, + ScanWrapper previousScan, boolean clearedCache, List concatIterators, + Iterator>> scanPairItr, + Pair> scanPair, int retryCount) throws SQLException { + scanPairItr.remove(); + // Resubmit just this portion of work again + Scan oldScan = scanPair.getFirst(); + byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW); + byte[] endKey = oldScan.getStopRow(); + + List> newNestedScans = this.getParallelScans(startKey, endKey).getScans(); + // Add any concatIterators that were successful so far + // as we need these to be in order + addIterator(iterators, concatIterators); + concatIterators = Lists.newArrayList(); + getIterators(newNestedScans, services, isLocalIndex, allIterators, iterators, isReverse, + maxQueryEndTime, newNestedScans.size(), previousScan, retryCount); + return concatIterators; + } + + @Override + public void close() throws SQLException { + // Don't call cancel on already started work, as it causes the HConnection + // to get into a funk. Instead, just cancel queued work. + boolean cancelledWork = false; + try { + if (allFutures.isEmpty()) { + return; + } + List> futuresToClose = + Lists.newArrayListWithExpectedSize(getSplits().size()); + for (List>>> futures : allFutures) { + for (List>> futureScans : futures) { + for (Pair> futurePair : futureScans) { + // When work is rejected, we may have null futurePair entries, because + // we randomize these and set them as they're submitted. + if (futurePair != null) { + Future future = futurePair.getSecond(); + if (future != null) { + if (future.cancel(false)) { + cancelledWork = true; + } else { + futuresToClose.add(future); + } + } } + } } + } + // Wait for already started tasks to complete as we can't interrupt them without + // leaving our HConnection in a funky state. + for (Future future : futuresToClose) { + try { + PeekingResultIterator iterator = future.get(); + iterator.close(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } catch (ExecutionException e) { + LOGGER.info("Failed to execute task during cancel", e); + } + } + } finally { + SQLCloseables.closeAllQuietly(caches.values()); + caches.clear(); + if (cancelledWork) { + context.getConnection().getQueryServices().getExecutor().purge(); + } + allFutures.clear(); } + } - protected static final class ScanLocator { - private final int outerListIndex; - private final int innerListIndex; - private final Scan scan; - private final boolean isFirstScan; - private final boolean isLastScan; - - public ScanLocator(Scan scan, int outerListIndex, int innerListIndex, boolean isFirstScan, boolean isLastScan) { - this.outerListIndex = outerListIndex; - this.innerListIndex = innerListIndex; - this.scan = scan; - this.isFirstScan = isFirstScan; - this.isLastScan = isLastScan; - } - public int getOuterListIndex() { - return outerListIndex; - } - public int getInnerListIndex() { - return innerListIndex; - } - public Scan getScan() { - return scan; - } - public boolean isFirstScan() { - return isFirstScan; - } - public boolean isLastScan() { - return isLastScan; - } - } - - - abstract protected String getName(); - abstract protected void submitWork(List> nestedScans, List>>> nestedFutures, - Queue allIterators, int estFlattenedSize, boolean isReverse, ParallelScanGrouper scanGrouper, - long maxQueryEndTime) throws SQLException; - - @Override - public int size() { - return this.scans.size(); + private void addIterator(List parentIterators, + List childIterators) throws SQLException { + if (!childIterators.isEmpty()) { + if (plan.useRoundRobinIterator()) { + /* + * When using a round robin iterator we shouldn't concatenate the iterators together. This + * is because a round robin iterator should be calling next() on these iterators directly + * after selecting them in a round robin fashion. This helps take advantage of loading the + * underlying scanners' caches in parallel as well as preventing errors arising out of + * scanner lease expirations. + */ + parentIterators.addAll(childIterators); + } else { + parentIterators.add(ConcatResultIterator.newIterator(childIterators)); + } } + } - public int getNumRegionLocationLookups() { - return this.numRegionLocationLookups; - } + protected static final class ScanLocator { + private final int outerListIndex; + private final int innerListIndex; + private final Scan scan; + private final boolean isFirstScan; + private final boolean isLastScan; - @Override - public void explain(List planSteps) { - explainUtil(planSteps, null); + public ScanLocator(Scan scan, int outerListIndex, int innerListIndex, boolean isFirstScan, + boolean isLastScan) { + this.outerListIndex = outerListIndex; + this.innerListIndex = innerListIndex; + this.scan = scan; + this.isFirstScan = isFirstScan; + this.isLastScan = isLastScan; } - /** - * Utility to generate ExplainPlan steps. - * - * @param planSteps Add generated plan in list of planSteps. This argument - * is used to provide planSteps as whole statement consisting of - * list of Strings. - * @param explainPlanAttributesBuilder Add generated plan in attributes - * object. Having an API to provide planSteps as an object is easier - * while comparing individual attributes of ExplainPlan. - */ - private void explainUtil(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - boolean displayChunkCount = context.getConnection().getQueryServices().getProps().getBoolean( - QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, - QueryServicesOptions.DEFAULT_EXPLAIN_CHUNK_COUNT); - StringBuilder buf = new StringBuilder(); - buf.append("CLIENT "); - if (displayChunkCount) { - boolean displayRowCount = context.getConnection().getQueryServices().getProps().getBoolean( - QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, - QueryServicesOptions.DEFAULT_EXPLAIN_ROW_COUNT); - buf.append(this.splits.size()).append("-CHUNK "); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setSplitsChunk(this.splits.size()); - } - if (displayRowCount && estimatedRows != null) { - buf.append(estimatedRows).append(" ROWS "); - buf.append(estimatedSize).append(" BYTES "); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setEstimatedRows(estimatedRows); - explainPlanAttributesBuilder.setEstimatedSizeInBytes(estimatedSize); - } - } - } - String iteratorTypeAndScanSize = getName() + " " + size() + "-WAY"; - buf.append(iteratorTypeAndScanSize).append(" "); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setIteratorTypeAndScanSize( - iteratorTypeAndScanSize); - explainPlanAttributesBuilder.setNumRegionLocationLookups(getNumRegionLocationLookups()); - } - - if (this.plan.getStatement().getTableSamplingRate() != null) { - Double samplingRate = plan.getStatement().getTableSamplingRate() / 100D; - buf.append(samplingRate).append("-").append("SAMPLED "); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setSamplingRate(samplingRate); - } - } - try { - if (plan.useRoundRobinIterator()) { - buf.append("ROUND ROBIN "); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setUseRoundRobinIterator(true); - } - } - } catch (SQLException e) { - throw new RuntimeException(e); - } + public int getOuterListIndex() { + return outerListIndex; + } - if (this.plan instanceof ScanPlan) { - ScanPlan scanPlan = (ScanPlan) this.plan; - if (scanPlan.getRowOffset().isPresent()) { - String rowOffset = - Hex.encodeHexString(scanPlan.getRowOffset().get()); - buf.append("With RVC Offset " + "0x") - .append(rowOffset) - .append(" "); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setHexStringRVCOffset( - "0x" + rowOffset); - } - } - } + public int getInnerListIndex() { + return innerListIndex; + } - explain(buf.toString(), planSteps, explainPlanAttributesBuilder, regionLocations); + public Scan getScan() { + return scan; } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - explainUtil(planSteps, explainPlanAttributesBuilder); + public boolean isFirstScan() { + return isFirstScan; } - public Long getEstimatedRowCount() { - return this.estimatedRows; + public boolean isLastScan() { + return isLastScan; } - - public Long getEstimatedByteCount() { - return this.estimatedSize; + } + + abstract protected String getName(); + + abstract protected void submitWork(List> nestedScans, + List>>> nestedFutures, + Queue allIterators, int estFlattenedSize, boolean isReverse, + ParallelScanGrouper scanGrouper, long maxQueryEndTime) throws SQLException; + + @Override + public int size() { + return this.scans.size(); + } + + public int getNumRegionLocationLookups() { + return this.numRegionLocationLookups; + } + + @Override + public void explain(List planSteps) { + explainUtil(planSteps, null); + } + + /** + * Utility to generate ExplainPlan steps. + * @param planSteps Add generated plan in list of planSteps. This argument is + * used to provide planSteps as whole statement consisting of + * list of Strings. + * @param explainPlanAttributesBuilder Add generated plan in attributes object. Having an API to + * provide planSteps as an object is easier while comparing + * individual attributes of ExplainPlan. + */ + private void explainUtil(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + boolean displayChunkCount = context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_EXPLAIN_CHUNK_COUNT); + StringBuilder buf = new StringBuilder(); + buf.append("CLIENT "); + if (displayChunkCount) { + boolean displayRowCount = context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_EXPLAIN_ROW_COUNT); + buf.append(this.splits.size()).append("-CHUNK "); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setSplitsChunk(this.splits.size()); + } + if (displayRowCount && estimatedRows != null) { + buf.append(estimatedRows).append(" ROWS "); + buf.append(estimatedSize).append(" BYTES "); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setEstimatedRows(estimatedRows); + explainPlanAttributesBuilder.setEstimatedSizeInBytes(estimatedSize); + } + } } - - @Override - public String toString() { - return "ResultIterators [name=" + getName() + ",id=" + scanId + ",scans=" + scans + "]"; + String iteratorTypeAndScanSize = getName() + " " + size() + "-WAY"; + buf.append(iteratorTypeAndScanSize).append(" "); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setIteratorTypeAndScanSize(iteratorTypeAndScanSize); + explainPlanAttributesBuilder.setNumRegionLocationLookups(getNumRegionLocationLookups()); } - public Long getEstimateInfoTimestamp() { - return this.estimateInfoTimestamp; + if (this.plan.getStatement().getTableSamplingRate() != null) { + Double samplingRate = plan.getStatement().getTableSamplingRate() / 100D; + buf.append(samplingRate).append("-").append("SAMPLED "); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setSamplingRate(samplingRate); + } + } + try { + if (plan.useRoundRobinIterator()) { + buf.append("ROUND ROBIN "); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setUseRoundRobinIterator(true); + } + } + } catch (SQLException e) { + throw new RuntimeException(e); } - /** - * Used for specific test case to check if timeouts are working in ScanningResultIterator. - * @param setTimeoutToMax - */ - @VisibleForTesting - public static void setForTestingSetTimeoutToMaxToLetQueryPassHere(boolean setTimeoutToMax) { - forTestingSetTimeoutToMaxToLetQueryPassHere = setTimeoutToMax; + if (this.plan instanceof ScanPlan) { + ScanPlan scanPlan = (ScanPlan) this.plan; + if (scanPlan.getRowOffset().isPresent()) { + String rowOffset = Hex.encodeHexString(scanPlan.getRowOffset().get()); + buf.append("With RVC Offset " + "0x").append(rowOffset).append(" "); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setHexStringRVCOffset("0x" + rowOffset); + } + } } + explain(buf.toString(), planSteps, explainPlanAttributesBuilder, regionLocations); + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + explainUtil(planSteps, explainPlanAttributesBuilder); + } + + public Long getEstimatedRowCount() { + return this.estimatedRows; + } + + public Long getEstimatedByteCount() { + return this.estimatedSize; + } + + @Override + public String toString() { + return "ResultIterators [name=" + getName() + ",id=" + scanId + ",scans=" + scans + "]"; + } + + public Long getEstimateInfoTimestamp() { + return this.estimateInfoTimestamp; + } + + /** + * Used for specific test case to check if timeouts are working in ScanningResultIterator. + */ + @VisibleForTesting + public static void setForTestingSetTimeoutToMaxToLetQueryPassHere(boolean setTimeoutToMax) { + forTestingSetTimeoutToMaxToLetQueryPassHere = setTimeoutToMax; + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedQueue.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedQueue.java index 3dd2056476e..a8ef303ea3c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedQueue.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedQueue.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,367 +36,364 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.MinMaxPriorityQueue; public abstract class BufferedQueue extends AbstractQueue implements SizeAwareQueue { + private final long thresholdBytes; + private List> queues; + private int currentIndex; + private BufferedSegmentQueue currentQueue; + private MinMaxPriorityQueue> mergedQueue; + + public BufferedQueue(long thresholdBytes) { + this.thresholdBytes = thresholdBytes; + this.queues = Lists.> newArrayList(); + this.currentIndex = -1; + this.currentQueue = null; + this.mergedQueue = null; + } + + abstract protected BufferedSegmentQueue createSegmentQueue(int index, long thresholdBytes); + + abstract protected Comparator> getSegmentQueueComparator(); + + protected final List> getSegmentQueues() { + return queues.subList(0, currentIndex + 1); + } + + @Override + public boolean offer(T e) { + boolean startNewQueue = this.currentQueue == null || this.currentQueue.isFlushed(); + if (startNewQueue) { + currentIndex++; + if (currentIndex < queues.size()) { + currentQueue = queues.get(currentIndex); + } else { + currentQueue = createSegmentQueue(currentIndex, thresholdBytes); + queues.add(currentQueue); + } + } + + return this.currentQueue.offer(e); + } + + @Override + public T poll() { + initMergedQueue(); + if (mergedQueue != null && !mergedQueue.isEmpty()) { + BufferedSegmentQueue queue = mergedQueue.poll(); + T re = queue.poll(); + if (queue.peek() != null) { + mergedQueue.add(queue); + } + return re; + } + return null; + } + + @Override + public T peek() { + initMergedQueue(); + if (mergedQueue != null && !mergedQueue.isEmpty()) { + return mergedQueue.peek().peek(); + } + return null; + } + + @Override + public void clear() { + for (BufferedSegmentQueue queue : getSegmentQueues()) { + queue.clear(); + } + currentIndex = -1; + currentQueue = null; + mergedQueue = null; + } + + @Override + public Iterator iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public int size() { + int size = 0; + for (BufferedSegmentQueue queue : getSegmentQueues()) { + size += queue.size(); + } + return size; + } + + @Override + public long getByteSize() { + return currentQueue == null ? 0 : currentQueue.getInMemByteSize(); + } + + @Override + public void close() { + for (BufferedSegmentQueue queue : queues) { + queue.close(); + } + queues.clear(); + } + + private void initMergedQueue() { + if (mergedQueue == null && currentIndex >= 0) { + mergedQueue = + MinMaxPriorityQueue.> orderedBy(getSegmentQueueComparator()) + .maximumSize(currentIndex + 1).create(); + for (BufferedSegmentQueue queue : getSegmentQueues()) { + T re = queue.peek(); + if (re != null) { + mergedQueue.add(queue); + } + } + } + } + + public abstract static class BufferedSegmentQueue extends AbstractQueue { + protected static final int EOF = -1; + + private final int index; private final long thresholdBytes; - private List> queues; - private int currentIndex; - private BufferedSegmentQueue currentQueue; - private MinMaxPriorityQueue> mergedQueue; - - public BufferedQueue(long thresholdBytes) { - this.thresholdBytes = thresholdBytes; - this.queues = Lists.> newArrayList(); - this.currentIndex = -1; - this.currentQueue = null; - this.mergedQueue = null; + private final boolean hasMaxQueueSize; + private long totalResultSize = 0; + private long maxResultSize = 0; + private File file; + private boolean isClosed = false; + private boolean flushBuffer = false; + private int flushedCount = 0; + private T current = null; + private SegmentQueueFileIterator thisIterator; + // iterators to close on close() + private List iterators; + + public BufferedSegmentQueue(int index, long thresholdBytes, boolean hasMaxQueueSize) { + this.index = index; + this.thresholdBytes = thresholdBytes; + this.hasMaxQueueSize = hasMaxQueueSize; + this.iterators = Lists. newArrayList(); } - - abstract protected BufferedSegmentQueue createSegmentQueue(int index, long thresholdBytes); - - abstract protected Comparator> getSegmentQueueComparator(); - - protected final List> getSegmentQueues() { - return queues.subList(0, currentIndex + 1); + + abstract protected Queue getInMemoryQueue(); + + abstract protected long sizeOf(T e); + + abstract protected void writeToStream(DataOutputStream out, T e) throws IOException; + + abstract protected T readFromStream(DataInputStream in) throws IOException; + + public int index() { + return this.index; } @Override - public boolean offer(T e) { - boolean startNewQueue = this.currentQueue == null || this.currentQueue.isFlushed(); - if (startNewQueue) { - currentIndex++; - if (currentIndex < queues.size()) { - currentQueue = queues.get(currentIndex); - } else { - currentQueue = createSegmentQueue(currentIndex, thresholdBytes); - queues.add(currentQueue); - } - } + public int size() { + if (flushBuffer) return flushedCount; + return getInMemoryQueue().size(); + } + + public long getInMemByteSize() { + if (flushBuffer) return 0; + return totalResultSize; + } - return this.currentQueue.offer(e); + public boolean isFlushed() { + return flushBuffer; } @Override - public T poll() { - initMergedQueue(); - if (mergedQueue != null && !mergedQueue.isEmpty()) { - BufferedSegmentQueue queue = mergedQueue.poll(); - T re = queue.poll(); - if (queue.peek() != null) { - mergedQueue.add(queue); - } - return re; + public boolean offer(T e) { + if (isClosed || flushBuffer) return false; + + boolean added = getInMemoryQueue().add(e); + if (added) { + try { + flush(e); + } catch (IOException ex) { + throw new RuntimeException(ex); } - return null; + } + + return added; } @Override public T peek() { - initMergedQueue(); - if (mergedQueue != null && !mergedQueue.isEmpty()) { - return mergedQueue.peek().peek(); - } - return null; + if (current == null && !isClosed) { + current = next(); + } + + return current; } - + @Override - public void clear() { - for (BufferedSegmentQueue queue : getSegmentQueues()) { - queue.clear(); - } - currentIndex = -1; - currentQueue = null; - mergedQueue = null; + public T poll() { + T ret = peek(); + if (!isClosed) { + current = next(); + } else { + current = null; + } + + return ret; } @Override public Iterator iterator() { - throw new UnsupportedOperationException(); - } + if (isClosed) return null; - @Override - public int size() { - int size = 0; - for (BufferedSegmentQueue queue : getSegmentQueues()) { - size += queue.size(); - } - return size; + if (!flushBuffer) return getInMemoryQueue().iterator(); + + SegmentQueueFileIterator iterator = new SegmentQueueFileIterator(thisIterator); + iterators.add(iterator); + return iterator; } - + @Override - public long getByteSize() { - return currentQueue == null ? 0 : currentQueue.getInMemByteSize(); + public void clear() { + getInMemoryQueue().clear(); + this.totalResultSize = 0; + this.maxResultSize = 0; + this.flushBuffer = false; + this.flushedCount = 0; + this.current = null; + if (thisIterator != null) { + thisIterator.close(); + thisIterator = null; + } + for (SegmentQueueFileIterator iter : iterators) { + iter.close(); + } + iterators.clear(); + if (this.file != null) { + file.delete(); + file = null; + } } - @Override public void close() { - for (BufferedSegmentQueue queue : queues) { - queue.close(); - } - queues.clear(); - } - - private void initMergedQueue() { - if (mergedQueue == null && currentIndex >= 0) { - mergedQueue = MinMaxPriorityQueue.> orderedBy( - getSegmentQueueComparator()).maximumSize(currentIndex + 1).create(); - for (BufferedSegmentQueue queue : getSegmentQueues()) { - T re = queue.peek(); - if (re != null) { - mergedQueue.add(queue); - } - } - } + if (!isClosed) { + clear(); + this.isClosed = true; + } } - public abstract static class BufferedSegmentQueue extends AbstractQueue { - protected static final int EOF = -1; - - private final int index; - private final long thresholdBytes; - private final boolean hasMaxQueueSize; - private long totalResultSize = 0; - private long maxResultSize = 0; - private File file; - private boolean isClosed = false; - private boolean flushBuffer = false; - private int flushedCount = 0; - private T current = null; - private SegmentQueueFileIterator thisIterator; - // iterators to close on close() - private List iterators; - - public BufferedSegmentQueue(int index, long thresholdBytes, boolean hasMaxQueueSize) { - this.index = index; - this.thresholdBytes = thresholdBytes; - this.hasMaxQueueSize = hasMaxQueueSize; - this.iterators = Lists. newArrayList(); - } - - abstract protected Queue getInMemoryQueue(); - abstract protected long sizeOf(T e); - abstract protected void writeToStream(DataOutputStream out, T e) throws IOException; - abstract protected T readFromStream(DataInputStream in) throws IOException; - - public int index() { - return this.index; - } - - @Override - public int size() { - if (flushBuffer) - return flushedCount; - return getInMemoryQueue().size(); - } - - public long getInMemByteSize() { - if (flushBuffer) - return 0; - return totalResultSize; - } - - public boolean isFlushed() { - return flushBuffer; + private T next() { + T ret = null; + if (!flushBuffer) { + ret = getInMemoryQueue().poll(); + } else { + if (thisIterator == null) { + thisIterator = new SegmentQueueFileIterator(); } + ret = thisIterator.next(); + } - @Override - public boolean offer(T e) { - if (isClosed || flushBuffer) - return false; - - boolean added = getInMemoryQueue().add(e); - if (added) { - try { - flush(e); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - } - - return added; - } - - @Override - public T peek() { - if (current == null && !isClosed) { - current = next(); - } - - return current; + if (ret == null) { + close(); + } + + return ret; + } + + private void flush(T entry) throws IOException { + Queue inMemQueue = getInMemoryQueue(); + long resultSize = sizeOf(entry); + maxResultSize = Math.max(maxResultSize, resultSize); + totalResultSize = + hasMaxQueueSize ? maxResultSize * inMemQueue.size() : (totalResultSize + resultSize); + if (totalResultSize >= thresholdBytes) { + this.file = File.createTempFile(UUID.randomUUID().toString(), null); + try (DataOutputStream out = + new DataOutputStream(new BufferedOutputStream(Files.newOutputStream(file.toPath())))) { + int resSize = inMemQueue.size(); + for (int i = 0; i < resSize; i++) { + T e = inMemQueue.poll(); + writeToStream(out, e); + } + out.writeInt(EOF); // end + flushedCount = resSize; + inMemQueue.clear(); + flushBuffer = true; } - - @Override - public T poll() { - T ret = peek(); - if (!isClosed) { - current = next(); - } else { - current = null; - } - - return ret; + } + } + + private class SegmentQueueFileIterator implements Iterator, Closeable { + private boolean isEnd; + private long readIndex; + private DataInputStream in; + private T next; + + public SegmentQueueFileIterator() { + init(0); + } + + public SegmentQueueFileIterator(SegmentQueueFileIterator iterator) { + if (iterator != null && iterator.isEnd) { + this.isEnd = true; + } else { + init(iterator == null ? 0 : iterator.readIndex); } + } - @Override - public Iterator iterator() { - if (isClosed) - return null; - - if (!flushBuffer) - return getInMemoryQueue().iterator(); - - SegmentQueueFileIterator iterator = new SegmentQueueFileIterator(thisIterator); - iterators.add(iterator); - return iterator; + private void init(long readIndex) { + this.isEnd = false; + this.readIndex = readIndex; + this.next = null; + try { + this.in = + new DataInputStream(new BufferedInputStream(Files.newInputStream(file.toPath()))); + } catch (IOException e) { + throw new RuntimeException(e); } + } - @Override - public void clear() { - getInMemoryQueue().clear(); - this.totalResultSize = 0; - this.maxResultSize = 0; - this.flushBuffer = false; - this.flushedCount = 0; - this.current = null; - if (thisIterator != null) { - thisIterator.close(); - thisIterator = null; - } - for (SegmentQueueFileIterator iter : iterators) { - iter.close(); - } - iterators.clear(); - if (this.file != null) { - file.delete(); - file = null; - } + @Override + public boolean hasNext() { + if (!isEnd && next == null) { + next = readNext(); } - - public void close() { - if (!isClosed) { - clear(); - this.isClosed = true; - } + + return next != null; + } + + @Override + public T next() { + if (!hasNext()) return null; + + T ret = next; + next = readNext(); + return ret; + } + + private T readNext() { + if (isEnd) return null; + + T e = null; + try { + e = readFromStream(in); + } catch (IOException ex) { + throw new RuntimeException(ex); } - - private T next() { - T ret = null; - if (!flushBuffer) { - ret = getInMemoryQueue().poll(); - } else { - if (thisIterator == null) { - thisIterator = new SegmentQueueFileIterator(); - } - ret = thisIterator.next(); - } - - if (ret == null) { - close(); - } - - return ret; + if (e == null) { + close(); + return null; } + return e; + } - private void flush(T entry) throws IOException { - Queue inMemQueue = getInMemoryQueue(); - long resultSize = sizeOf(entry); - maxResultSize = Math.max(maxResultSize, resultSize); - totalResultSize = hasMaxQueueSize ? maxResultSize * inMemQueue.size() : (totalResultSize + resultSize); - if (totalResultSize >= thresholdBytes) { - this.file = File.createTempFile(UUID.randomUUID().toString(), null); - try (DataOutputStream out = new DataOutputStream( - new BufferedOutputStream(Files.newOutputStream(file.toPath())))) { - int resSize = inMemQueue.size(); - for (int i = 0; i < resSize; i++) { - T e = inMemQueue.poll(); - writeToStream(out, e); - } - out.writeInt(EOF); // end - flushedCount = resSize; - inMemQueue.clear(); - flushBuffer = true; - } - } - } - - private class SegmentQueueFileIterator implements Iterator, Closeable { - private boolean isEnd; - private long readIndex; - private DataInputStream in; - private T next; - - public SegmentQueueFileIterator() { - init(0); - } - - public SegmentQueueFileIterator(SegmentQueueFileIterator iterator) { - if (iterator != null && iterator.isEnd) { - this.isEnd = true; - } else { - init(iterator == null ? 0 : iterator.readIndex); - } - } - - private void init(long readIndex) { - this.isEnd = false; - this.readIndex = readIndex; - this.next = null; - try { - this.in = new DataInputStream( - new BufferedInputStream(Files.newInputStream(file.toPath()))); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public boolean hasNext() { - if (!isEnd && next == null) { - next = readNext(); - } - - return next != null; - } - - @Override - public T next() { - if (!hasNext()) - return null; - - T ret = next; - next = readNext(); - return ret; - } - - private T readNext() { - if (isEnd) - return null; - - T e = null; - try { - e = readFromStream(in); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - if (e == null) { - close(); - return null; - } - return e; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - @Override - public void close() { - this.isEnd = true; - try { - this.in.close(); - } catch (IOException ignored) { - } - } + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public void close() { + this.isEnd = true; + try { + this.in.close(); + } catch (IOException ignored) { } + } } + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedSortedQueue.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedSortedQueue.java index c794caad60b..38970074e90 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedSortedQueue.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedSortedQueue.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,120 +32,116 @@ import org.apache.phoenix.iterate.OrderedResultIterator.ResultEntry; import org.apache.phoenix.schema.tuple.ResultTuple; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.collect.MinMaxPriorityQueue; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.ResultUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.MinMaxPriorityQueue; - public class BufferedSortedQueue extends BufferedQueue { - private Comparator comparator; - private final int limit; - - public BufferedSortedQueue(Comparator comparator, - Integer limit, long thresholdBytes) throws IOException { - super(thresholdBytes); - this.comparator = comparator; - this.limit = limit == null ? -1 : limit; + private Comparator comparator; + private final int limit; + + public BufferedSortedQueue(Comparator comparator, Integer limit, long thresholdBytes) + throws IOException { + super(thresholdBytes); + this.comparator = comparator; + this.limit = limit == null ? -1 : limit; + } + + @Override + protected BufferedSegmentQueue createSegmentQueue(int index, long thresholdBytes) { + return new BufferedResultEntryPriorityQueue(index, thresholdBytes, limit, comparator); + } + + @Override + protected Comparator> getSegmentQueueComparator() { + return new Comparator>() { + @Override + public int compare(BufferedSegmentQueue q1, + BufferedSegmentQueue q2) { + return comparator.compare(q1.peek(), q2.peek()); + } + }; + } + + private static class BufferedResultEntryPriorityQueue extends BufferedSegmentQueue { + private MinMaxPriorityQueue results = null; + + public BufferedResultEntryPriorityQueue(int index, long thresholdBytes, int limit, + Comparator comparator) { + super(index, thresholdBytes, limit >= 0); + this.results = limit < 0 + ? MinMaxPriorityQueue. orderedBy(comparator).create() + : MinMaxPriorityQueue. orderedBy(comparator).maximumSize(limit).create(); } @Override - protected BufferedSegmentQueue createSegmentQueue( - int index, long thresholdBytes) { - return new BufferedResultEntryPriorityQueue(index, thresholdBytes, limit, comparator); + protected Queue getInMemoryQueue() { + return results; } @Override - protected Comparator> getSegmentQueueComparator() { - return new Comparator>() { - @Override - public int compare(BufferedSegmentQueue q1, - BufferedSegmentQueue q2) { - return comparator.compare(q1.peek(), q2.peek()); - }}; + protected long sizeOf(ResultEntry e) { + return ResultEntry.sizeOf(e); } - private static class BufferedResultEntryPriorityQueue extends BufferedSegmentQueue { - private MinMaxPriorityQueue results = null; - - public BufferedResultEntryPriorityQueue(int index, - long thresholdBytes, int limit, Comparator comparator) { - super(index, thresholdBytes, limit >= 0); - this.results = limit < 0 ? - MinMaxPriorityQueue. orderedBy(comparator).create() - : MinMaxPriorityQueue. orderedBy(comparator).maximumSize(limit).create(); - } - - @Override - protected Queue getInMemoryQueue() { - return results; - } - - @Override - protected long sizeOf(ResultEntry e) { - return ResultEntry.sizeOf(e); - } - - @Override - protected void writeToStream(DataOutputStream os, ResultEntry e) throws IOException { - int totalLen = 0; - List keyValues = toKeyValues(e); - for (KeyValue kv : keyValues) { - totalLen += (kv.getLength() + Bytes.SIZEOF_INT); - } - os.writeInt(totalLen); - for (KeyValue kv : keyValues) { - os.writeInt(kv.getLength()); - os.write(kv.getBuffer(), kv.getOffset(), kv - .getLength()); - } - ImmutableBytesWritable[] sortKeys = e.sortKeys; - os.writeInt(sortKeys.length); - for (ImmutableBytesWritable sortKey : sortKeys) { - if (sortKey != null) { - os.writeInt(sortKey.getLength()); - os.write(sortKey.get(), sortKey.getOffset(), - sortKey.getLength()); - } else { - os.writeInt(0); - } - } + @Override + protected void writeToStream(DataOutputStream os, ResultEntry e) throws IOException { + int totalLen = 0; + List keyValues = toKeyValues(e); + for (KeyValue kv : keyValues) { + totalLen += (kv.getLength() + Bytes.SIZEOF_INT); + } + os.writeInt(totalLen); + for (KeyValue kv : keyValues) { + os.writeInt(kv.getLength()); + os.write(kv.getBuffer(), kv.getOffset(), kv.getLength()); + } + ImmutableBytesWritable[] sortKeys = e.sortKeys; + os.writeInt(sortKeys.length); + for (ImmutableBytesWritable sortKey : sortKeys) { + if (sortKey != null) { + os.writeInt(sortKey.getLength()); + os.write(sortKey.get(), sortKey.getOffset(), sortKey.getLength()); + } else { + os.writeInt(0); } + } + } - @Override - protected ResultEntry readFromStream(DataInputStream is) throws IOException { - int length = is.readInt(); - if (length < 0) - return null; - - byte[] rb = new byte[length]; - is.readFully(rb); - Result result = ResultUtil.toResult(new ImmutableBytesWritable(rb)); - ResultTuple rt = new ResultTuple(result); - int sortKeySize = is.readInt(); - ImmutableBytesWritable[] sortKeys = new ImmutableBytesWritable[sortKeySize]; - for (int i = 0; i < sortKeySize; i++) { - int contentLength = is.readInt(); - if (contentLength > 0) { - byte[] sortKeyContent = new byte[contentLength]; - is.readFully(sortKeyContent); - sortKeys[i] = new ImmutableBytesWritable(sortKeyContent); - } else { - sortKeys[i] = null; - } - } - - return new ResultEntry(sortKeys, rt); + @Override + protected ResultEntry readFromStream(DataInputStream is) throws IOException { + int length = is.readInt(); + if (length < 0) return null; + + byte[] rb = new byte[length]; + is.readFully(rb); + Result result = ResultUtil.toResult(new ImmutableBytesWritable(rb)); + ResultTuple rt = new ResultTuple(result); + int sortKeySize = is.readInt(); + ImmutableBytesWritable[] sortKeys = new ImmutableBytesWritable[sortKeySize]; + for (int i = 0; i < sortKeySize; i++) { + int contentLength = is.readInt(); + if (contentLength > 0) { + byte[] sortKeyContent = new byte[contentLength]; + is.readFully(sortKeyContent); + sortKeys[i] = new ImmutableBytesWritable(sortKeyContent); + } else { + sortKeys[i] = null; } + } - private List toKeyValues(ResultEntry entry) { - Tuple result = entry.getResult(); - int size = result.size(); - List kvs = new ArrayList(size); - for (int i = 0; i < size; i++) { - kvs.add(PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i))); - } - return kvs; - } + return new ResultEntry(sortKeys, rt); + } + private List toKeyValues(ResultEntry entry) { + Tuple result = entry.getResult(); + int size = result.size(); + List kvs = new ArrayList(size); + for (int i = 0; i < size; i++) { + kvs.add(PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i))); + } + return kvs; } + + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedTupleQueue.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedTupleQueue.java index 3da244cbe12..05f45d12f3c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedTupleQueue.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/BufferedTupleQueue.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,104 +31,103 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.tuple.ResultTuple; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.ResultUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - public class BufferedTupleQueue extends BufferedQueue { - public BufferedTupleQueue(long thresholdBytes) { - super(thresholdBytes); + public BufferedTupleQueue(long thresholdBytes) { + super(thresholdBytes); + } + + @Override + protected BufferedSegmentQueue createSegmentQueue(int index, long thresholdBytes) { + return new BufferedTupleSegmentQueue(index, thresholdBytes, false); + } + + @Override + protected Comparator> getSegmentQueueComparator() { + return new Comparator>() { + @Override + public int compare(BufferedSegmentQueue q1, BufferedSegmentQueue q2) { + return q1.index() - q2.index(); + } + }; + } + + @Override + public Iterator iterator() { + return new Iterator() { + private Iterator> queueIter; + private Iterator currentIter; + { + this.queueIter = getSegmentQueues().iterator(); + this.currentIter = queueIter.hasNext() ? queueIter.next().iterator() : null; + } + + @Override + public boolean hasNext() { + return currentIter != null && currentIter.hasNext(); + } + + @Override + public Tuple next() { + if (!hasNext()) return null; + + Tuple ret = currentIter.next(); + if (!currentIter.hasNext()) { + this.currentIter = queueIter.hasNext() ? queueIter.next().iterator() : null; + } + + return ret; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } + + private static class BufferedTupleSegmentQueue extends BufferedSegmentQueue { + private LinkedList results; + + public BufferedTupleSegmentQueue(int index, long thresholdBytes, boolean hasMaxQueueSize) { + super(index, thresholdBytes, hasMaxQueueSize); + this.results = Lists.newLinkedList(); } @Override - protected BufferedSegmentQueue createSegmentQueue(int index, long thresholdBytes) { - return new BufferedTupleSegmentQueue(index, thresholdBytes, false); + protected Queue getInMemoryQueue() { + return results; } @Override - protected Comparator> getSegmentQueueComparator() { - return new Comparator>() { - @Override - public int compare(BufferedSegmentQueue q1, BufferedSegmentQueue q2) { - return q1.index() - q2.index(); - } - }; + protected long sizeOf(Tuple e) { + KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(e.getValue(0)); + return Bytes.SIZEOF_INT * 2 + kv.getLength(); } @Override - public Iterator iterator() { - return new Iterator() { - private Iterator> queueIter; - private Iterator currentIter; - { - this.queueIter = getSegmentQueues().iterator(); - this.currentIter = queueIter.hasNext() ? queueIter.next().iterator() : null; - } - - @Override - public boolean hasNext() { - return currentIter != null && currentIter.hasNext(); - } - - @Override - public Tuple next() { - if (!hasNext()) return null; - - Tuple ret = currentIter.next(); - if (!currentIter.hasNext()) { - this.currentIter = queueIter.hasNext() ? queueIter.next().iterator() : null; - } - - return ret; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - }; + protected void writeToStream(DataOutputStream out, Tuple e) throws IOException { + KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(e.getValue(0)); + out.writeInt(kv.getLength() + Bytes.SIZEOF_INT); + out.writeInt(kv.getLength()); + out.write(kv.getBuffer(), kv.getOffset(), kv.getLength()); } - private static class BufferedTupleSegmentQueue extends BufferedSegmentQueue { - private LinkedList results; - - public BufferedTupleSegmentQueue(int index, long thresholdBytes, boolean hasMaxQueueSize) { - super(index, thresholdBytes, hasMaxQueueSize); - this.results = Lists.newLinkedList(); - } - - @Override - protected Queue getInMemoryQueue() { - return results; - } - - @Override - protected long sizeOf(Tuple e) { - KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(e.getValue(0)); - return Bytes.SIZEOF_INT * 2 + kv.getLength(); - } - - @Override - protected void writeToStream(DataOutputStream out, Tuple e) throws IOException { - KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(e.getValue(0)); - out.writeInt(kv.getLength() + Bytes.SIZEOF_INT); - out.writeInt(kv.getLength()); - out.write(kv.getBuffer(), kv.getOffset(), kv.getLength()); - } - - @Override - protected Tuple readFromStream(DataInputStream in) throws IOException { - int length = in.readInt(); - if (length < 0) return null; - - byte[] b = new byte[length]; - in.readFully(b); - Result result = ResultUtil.toResult(new ImmutableBytesWritable(b)); - return new ResultTuple(result); - } - + @Override + protected Tuple readFromStream(DataInputStream in) throws IOException { + int length = in.readInt(); + if (length < 0) return null; + + byte[] b = new byte[length]; + in.readFully(b); + Result result = ResultUtil.toResult(new ImmutableBytesWritable(b)); + return new ResultTuple(result); } -} \ No newline at end of file + + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java index de384c94791..655305c24c8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.iterate; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.SCAN_START_ROW_SUFFIX; @@ -26,8 +25,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.execute.MutationState; @@ -37,213 +35,231 @@ import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.LogUtil; import org.apache.phoenix.util.ScanUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - /** * {@code PeekingResultIterator} implementation that loads data in chunks. This is intended for * basic scan plans, to avoid loading large quantities of data from HBase in one go. - * *

- * Chunking is deprecated and shouldn't be used while implementing new features. As of HBase 0.98.17, - * we rely on pacing the server side scanners instead of pulling rows from the server in chunks. + * Chunking is deprecated and shouldn't be used while implementing new features. As of HBase + * 0.98.17, we rely on pacing the server side scanners instead of pulling rows from the server in + * chunks. *

*/ @Deprecated public class ChunkedResultIterator implements PeekingResultIterator { - private static final Logger LOGGER = LoggerFactory.getLogger(ChunkedResultIterator.class); + private static final Logger LOGGER = LoggerFactory.getLogger(ChunkedResultIterator.class); + + private final ParallelIteratorFactory delegateIteratorFactory; + private ImmutableBytesWritable lastKey = new ImmutableBytesWritable(); + private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable(); + private final StatementContext context; + private final TableRef tableRef; + private final long chunkSize; + private final MutationState mutationState; + private Scan scan; + private PeekingResultIterator resultIterator; + private QueryPlan plan; + + /** + * Chunking is deprecated and shouldn't be used while implementing new features. As of HBase + * 0.98.17, we rely on pacing the server side scanners instead of pulling rows from the server in + * chunks. + */ + @Deprecated + public static class ChunkedResultIteratorFactory implements ParallelIteratorFactory { - private final ParallelIteratorFactory delegateIteratorFactory; - private ImmutableBytesWritable lastKey = new ImmutableBytesWritable(); - private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable(); - private final StatementContext context; + private final ParallelIteratorFactory delegateFactory; private final TableRef tableRef; - private final long chunkSize; private final MutationState mutationState; - private Scan scan; - private PeekingResultIterator resultIterator; - private QueryPlan plan; - - /** - * Chunking is deprecated and shouldn't be used while implementing new features. As of HBase 0.98.17, - * we rely on pacing the server side scanners instead of pulling rows from the server in chunks. - */ - @Deprecated - public static class ChunkedResultIteratorFactory implements ParallelIteratorFactory { - - private final ParallelIteratorFactory delegateFactory; - private final TableRef tableRef; - private final MutationState mutationState; - - public ChunkedResultIteratorFactory(ParallelIteratorFactory - delegateFactory, MutationState mutationState, TableRef tableRef) { - this.delegateFactory = delegateFactory; - this.tableRef = tableRef; - // Clone MutationState, as the one on the connection may change if auto commit is on - // while we need a handle to the original one (for it's transaction state). - this.mutationState = new MutationState(mutationState); - } - @Override - public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName, QueryPlan plan) throws SQLException { - if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan))); - return new ChunkedResultIterator(delegateFactory, mutationState, context, tableRef, scan, - mutationState.getConnection().getQueryServices().getProps().getLong( - QueryServices.SCAN_RESULT_CHUNK_SIZE, - QueryServicesOptions.DEFAULT_SCAN_RESULT_CHUNK_SIZE), scanner, plan); - } + public ChunkedResultIteratorFactory(ParallelIteratorFactory delegateFactory, + MutationState mutationState, TableRef tableRef) { + this.delegateFactory = delegateFactory; + this.tableRef = tableRef; + // Clone MutationState, as the one on the connection may change if auto commit is on + // while we need a handle to the original one (for it's transaction state). + this.mutationState = new MutationState(mutationState); } - private ChunkedResultIterator(ParallelIteratorFactory delegateIteratorFactory, - MutationState mutationState, StatementContext context, TableRef tableRef, Scan scan, - long chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException { - this.delegateIteratorFactory = delegateIteratorFactory; - this.context = context; - this.tableRef = tableRef; - this.scan = scan; - this.chunkSize = chunkSize; - this.mutationState = mutationState; - this.plan = plan; - // Instantiate single chunk iterator and the delegate iterator in constructor - // to get parallel scans kicked off in separate threads. If we delay this, - // we'll get serialized behavior (see PHOENIX- - if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan))); - ResultIterator singleChunkResultIterator = new SingleChunkResultIterator(scanner, chunkSize); - String tableName = tableRef.getTable().getPhysicalName().getString(); - resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, tableName, plan); + @Override + public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, + Scan scan, String tableName, QueryPlan plan) throws SQLException { + if ( + LOGGER.isDebugEnabled() + ) LOGGER.debug(LogUtil.addCustomAnnotations( + "ChunkedResultIteratorFactory.newIterator over " + + tableRef.getTable().getPhysicalName().getString() + " with " + scan, + ScanUtil.getCustomAnnotations(scan))); + return new ChunkedResultIterator(delegateFactory, mutationState, context, tableRef, scan, + mutationState.getConnection().getQueryServices().getProps().getLong( + QueryServices.SCAN_RESULT_CHUNK_SIZE, + QueryServicesOptions.DEFAULT_SCAN_RESULT_CHUNK_SIZE), + scanner, plan); } + } - @Override - public Tuple peek() throws SQLException { - return getResultIterator().peek(); + private ChunkedResultIterator(ParallelIteratorFactory delegateIteratorFactory, + MutationState mutationState, StatementContext context, TableRef tableRef, Scan scan, + long chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException { + this.delegateIteratorFactory = delegateIteratorFactory; + this.context = context; + this.tableRef = tableRef; + this.scan = scan; + this.chunkSize = chunkSize; + this.mutationState = mutationState; + this.plan = plan; + // Instantiate single chunk iterator and the delegate iterator in constructor + // to get parallel scans kicked off in separate threads. If we delay this, + // we'll get serialized behavior (see PHOENIX- + if (LOGGER.isDebugEnabled()) + LOGGER.debug(LogUtil.addCustomAnnotations( + "Get first chunked result iterator over " + + tableRef.getTable().getPhysicalName().getString() + " with " + scan, + ScanUtil.getCustomAnnotations(scan))); + ResultIterator singleChunkResultIterator = new SingleChunkResultIterator(scanner, chunkSize); + String tableName = tableRef.getTable().getPhysicalName().getString(); + resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, + tableName, plan); + } + + @Override + public Tuple peek() throws SQLException { + return getResultIterator().peek(); + } + + @Override + public Tuple next() throws SQLException { + return getResultIterator().next(); + } + + @Override + public void explain(List planSteps) { + resultIterator.explain(planSteps); + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + resultIterator.explain(planSteps, explainPlanAttributesBuilder); + } + + @Override + public void close() throws SQLException { + resultIterator.close(); + } + + private PeekingResultIterator getResultIterator() throws SQLException { + if (resultIterator.peek() == null && lastKey != null) { + resultIterator.close(); + scan = ScanUtil.newScan(scan); + if (ScanUtil.isLocalIndex(scan)) { + scan.setAttribute(SCAN_START_ROW_SUFFIX, ByteUtil.copyKeyBytesIfNecessary(lastKey)); + } else if (ScanUtil.isReversed(scan)) { + // lastKey is the last row the previous iterator meet but not returned. + // for reverse scan, use prevLastKey as the new stopRow. + scan.withStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey)); + } else { + scan.withStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey)); + } + if (LOGGER.isDebugEnabled()) + LOGGER.debug(LogUtil.addCustomAnnotations( + "Get next chunked result iterator over " + + tableRef.getTable().getPhysicalName().getString() + " with " + scan, + ScanUtil.getCustomAnnotations(scan))); + String tableName = tableRef.getTable().getPhysicalName().getString(); + ReadMetricQueue readMetrics = context.getReadMetricsQueue(); + ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, + scan, context.getConnection().getLogLevel()); + long renewLeaseThreshold = + context.getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds(); + // Chunking is deprecated, putting max value for timeout here. + ResultIterator singleChunkResultIterator = new SingleChunkResultIterator( + new TableResultIterator(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, + DefaultParallelScanGrouper.getInstance(), Long.MAX_VALUE), + chunkSize); + resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, + tableName, plan); + } + return resultIterator; + } + + /** + * ResultIterator that runs over a single chunk of results (i.e. a portion of a scan). + */ + private class SingleChunkResultIterator implements ResultIterator { + + private int rowCount = 0; + private boolean chunkComplete; + private final ResultIterator delegate; + private final long chunkSize; + + private SingleChunkResultIterator(ResultIterator delegate, long chunkSize) { + Preconditions.checkArgument(chunkSize > 0); + this.delegate = delegate; + this.chunkSize = chunkSize; } @Override public Tuple next() throws SQLException { - return getResultIterator().next(); + if (chunkComplete || lastKey == null) { + return null; + } + Tuple next = delegate.next(); + if (next != null) { + // We actually keep going past the chunk size until the row key changes. This is + // necessary for (at least) hash joins, as they can return multiple rows with the + // same row key. Stopping a chunk at a row key boundary is necessary in order to + // be able to start the next chunk on the next row key + if (rowCount == chunkSize) { + next.getKey(lastKey); + } else if (rowCount > chunkSize && rowKeyChanged(next)) { + chunkComplete = true; + return null; + } + rowCount++; + } else { + lastKey = null; + } + return next; } @Override public void explain(List planSteps) { - resultIterator.explain(planSteps); + delegate.explain(planSteps); } @Override public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - resultIterator.explain(planSteps, explainPlanAttributesBuilder); + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + delegate.explain(planSteps, explainPlanAttributesBuilder); } @Override public void close() throws SQLException { - resultIterator.close(); - } - - private PeekingResultIterator getResultIterator() throws SQLException { - if (resultIterator.peek() == null && lastKey != null) { - resultIterator.close(); - scan = ScanUtil.newScan(scan); - if (ScanUtil.isLocalIndex(scan)) { - scan.setAttribute(SCAN_START_ROW_SUFFIX, ByteUtil.copyKeyBytesIfNecessary(lastKey)); - } else if (ScanUtil.isReversed(scan)) { - // lastKey is the last row the previous iterator meet but not returned. - // for reverse scan, use prevLastKey as the new stopRow. - scan.withStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey)); - } else { - scan.withStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey)); - } - if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan))); - String tableName = tableRef.getTable().getPhysicalName().getString(); - ReadMetricQueue readMetrics = context.getReadMetricsQueue(); - ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, scan, - context.getConnection().getLogLevel()); - long renewLeaseThreshold = context.getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds(); - //Chunking is deprecated, putting max value for timeout here. - ResultIterator singleChunkResultIterator = - new SingleChunkResultIterator(new TableResultIterator(mutationState, scan, - scanMetricsHolder, renewLeaseThreshold, plan, - DefaultParallelScanGrouper.getInstance(), Long.MAX_VALUE), chunkSize); - resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, tableName, plan); - } - return resultIterator; + delegate.close(); } - /** - * ResultIterator that runs over a single chunk of results (i.e. a portion of a scan). - */ - private class SingleChunkResultIterator implements ResultIterator { - - private int rowCount = 0; - private boolean chunkComplete; - private final ResultIterator delegate; - private final long chunkSize; - - private SingleChunkResultIterator(ResultIterator delegate, long chunkSize) { - Preconditions.checkArgument(chunkSize > 0); - this.delegate = delegate; - this.chunkSize = chunkSize; - } - - @Override - public Tuple next() throws SQLException { - if (chunkComplete || lastKey == null) { - return null; - } - Tuple next = delegate.next(); - if (next != null) { - // We actually keep going past the chunk size until the row key changes. This is - // necessary for (at least) hash joins, as they can return multiple rows with the - // same row key. Stopping a chunk at a row key boundary is necessary in order to - // be able to start the next chunk on the next row key - if (rowCount == chunkSize) { - next.getKey(lastKey); - } else if (rowCount > chunkSize && rowKeyChanged(next)) { - chunkComplete = true; - return null; - } - rowCount++; - } else { - lastKey = null; - } - return next; - } - - @Override - public void explain(List planSteps) { - delegate.explain(planSteps); - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - delegate.explain(planSteps, explainPlanAttributesBuilder); - } - - @Override - public void close() throws SQLException { - delegate.close(); - } - - private boolean rowKeyChanged(Tuple newTuple) { - byte[] currentKey = lastKey.get(); - int offset = lastKey.getOffset(); - int length = lastKey.getLength(); - prevLastKey.set(lastKey.copyBytes()); - newTuple.getKey(lastKey); + private boolean rowKeyChanged(Tuple newTuple) { + byte[] currentKey = lastKey.get(); + int offset = lastKey.getOffset(); + int length = lastKey.getLength(); + prevLastKey.set(lastKey.copyBytes()); + newTuple.getKey(lastKey); - return Bytes.compareTo(currentKey, offset, length, lastKey.get(), lastKey.getOffset(), lastKey.getLength()) != 0; - } + return Bytes.compareTo(currentKey, offset, length, lastKey.get(), lastKey.getOffset(), + lastKey.getLength()) != 0; + } - @Override - public String toString() { - return "SingleChunkResultIterator [rowCount=" + rowCount - + ", chunkComplete=" + chunkComplete + ", delegate=" - + delegate + ", chunkSize=" + chunkSize + "]"; - } + @Override + public String toString() { + return "SingleChunkResultIterator [rowCount=" + rowCount + ", chunkComplete=" + chunkComplete + + ", delegate=" + delegate + ", chunkSize=" + chunkSize + "]"; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ClientHashAggregatingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ClientHashAggregatingResultIterator.java index d8ff9eae9ec..6d1ccdc2294 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ClientHashAggregatingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ClientHashAggregatingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,16 +29,13 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Objects; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; -import org.apache.phoenix.compile.StatementContext; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; +import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.Aggregators; @@ -50,169 +47,170 @@ import org.apache.phoenix.util.TupleUtil; /** - * - * This class implements client-side hash aggregation in memory. - * Issue https://issues.apache.org/jira/browse/PHOENIX-4751. - * + * This class implements client-side hash aggregation in memory. Issue + * https://issues.apache.org/jira/browse/PHOENIX-4751. */ -public class ClientHashAggregatingResultIterator - implements AggregatingResultIterator { - - private static final int HASH_AGG_INIT_SIZE = 64*1024; - private static final int CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE = 64*1024; - private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; - private final ResultIterator resultIterator; - private final Aggregators aggregators; - private final List groupByExpressions; - private final OrderBy orderBy; - private final MemoryChunk memoryChunk; - private HashMap hash; - private List keyList; - private Iterator keyIterator; - - public ClientHashAggregatingResultIterator(StatementContext context, ResultIterator resultIterator, - Aggregators aggregators, List groupByExpressions, OrderBy orderBy) { - - Objects.requireNonNull(resultIterator); - Objects.requireNonNull(aggregators); - Objects.requireNonNull(groupByExpressions); - this.resultIterator = resultIterator; - this.aggregators = aggregators; - this.groupByExpressions = groupByExpressions; - this.orderBy = orderBy; - memoryChunk = context.getConnection().getQueryServices().getMemoryManager().allocate(CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE); +public class ClientHashAggregatingResultIterator implements AggregatingResultIterator { + + private static final int HASH_AGG_INIT_SIZE = 64 * 1024; + private static final int CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE = 64 * 1024; + private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; + private final ResultIterator resultIterator; + private final Aggregators aggregators; + private final List groupByExpressions; + private final OrderBy orderBy; + private final MemoryChunk memoryChunk; + private HashMap hash; + private List keyList; + private Iterator keyIterator; + + public ClientHashAggregatingResultIterator(StatementContext context, + ResultIterator resultIterator, Aggregators aggregators, List groupByExpressions, + OrderBy orderBy) { + + Objects.requireNonNull(resultIterator); + Objects.requireNonNull(aggregators); + Objects.requireNonNull(groupByExpressions); + this.resultIterator = resultIterator; + this.aggregators = aggregators; + this.groupByExpressions = groupByExpressions; + this.orderBy = orderBy; + memoryChunk = context.getConnection().getQueryServices().getMemoryManager() + .allocate(CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE); + } + + @Override + public Tuple next() throws SQLException { + if (keyIterator == null) { + hash = populateHash(); + /******** + * Perform a post-aggregation sort only when required. There are 3 possible scenarios: (1) The + * query DOES NOT have an ORDER BY -- in this case, we DO NOT perform a sort, and the results + * will be in random order. (2) The query DOES have an ORDER BY, the ORDER BY keys match the + * GROUP BY keys, and all the ORDER BY keys are ASCENDING -- in this case, we DO perform a + * sort. THE ORDER BY has been optimized away, because the non-hash client aggregation + * generates results in ascending order of the GROUP BY keys. (3) The query DOES have an ORDER + * BY, but the ORDER BY keys do not match the GROUP BY keys, or at least one ORDER BY key is + * DESCENDING -- in this case, we DO NOT perform a sort, because the ORDER BY has not been + * optimized away and will be performed later by the client aggregation code. Finally, we also + * handle optimization of reverse sort here. This is currently defensive, because reverse sort + * is not optimized away. + ********/ + if (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) { + keyList = sortKeys(); + keyIterator = keyList.iterator(); + } else { + keyIterator = hash.keySet().iterator(); + } } - @Override - public Tuple next() throws SQLException { - if (keyIterator == null) { - hash = populateHash(); - /******** - * - * Perform a post-aggregation sort only when required. There are 3 possible scenarios: - * (1) The query DOES NOT have an ORDER BY -- in this case, we DO NOT perform a sort, and the results will be in random order. - * (2) The query DOES have an ORDER BY, the ORDER BY keys match the GROUP BY keys, and all the ORDER BY keys are ASCENDING - * -- in this case, we DO perform a sort. THE ORDER BY has been optimized away, because the non-hash client aggregation - * generates results in ascending order of the GROUP BY keys. - * (3) The query DOES have an ORDER BY, but the ORDER BY keys do not match the GROUP BY keys, or at least one ORDER BY key is DESCENDING - * -- in this case, we DO NOT perform a sort, because the ORDER BY has not been optimized away and will be performed later by the - * client aggregation code. - * - * Finally, we also handle optimization of reverse sort here. This is currently defensive, because reverse sort is not optimized away. - * - ********/ - if (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) { - keyList = sortKeys(); - keyIterator = keyList.iterator(); - } else { - keyIterator = hash.keySet().iterator(); - } - } - - if (!keyIterator.hasNext()) { - return null; - } - - ImmutableBytesWritable key = keyIterator.next(); - Aggregator[] rowAggregators = hash.get(key); - byte[] value = aggregators.toBytes(rowAggregators); - Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(key, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); - return tuple; + if (!keyIterator.hasNext()) { + return null; } - @Override - public void close() throws SQLException { - keyIterator = null; - keyList = null; - hash = null; - try { - memoryChunk.close(); - } finally { - resultIterator.close(); - } + ImmutableBytesWritable key = keyIterator.next(); + Aggregator[] rowAggregators = hash.get(key); + byte[] value = aggregators.toBytes(rowAggregators); + Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(key, SINGLE_COLUMN_FAMILY, + SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); + return tuple; + } + + @Override + public void close() throws SQLException { + keyIterator = null; + keyList = null; + hash = null; + try { + memoryChunk.close(); + } finally { + resultIterator.close(); } - - @Override - public Aggregator[] aggregate(Tuple result) { - Aggregator[] rowAggregators = aggregators.getAggregators(); - aggregators.reset(rowAggregators); - aggregators.aggregate(rowAggregators, result); - return rowAggregators; + } + + @Override + public Aggregator[] aggregate(Tuple result) { + Aggregator[] rowAggregators = aggregators.getAggregators(); + aggregators.reset(rowAggregators); + aggregators.aggregate(rowAggregators, result); + return rowAggregators; + } + + @Override + public void explain(List planSteps) { + resultIterator.explain(planSteps); + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + resultIterator.explain(planSteps, explainPlanAttributesBuilder); + } + + @Override + public String toString() { + return "ClientHashAggregatingResultIterator [resultIterator=" + resultIterator + + ", aggregators=" + aggregators + ", groupByExpressions=" + groupByExpressions + "]"; + } + + // Copied from ClientGroupedAggregatingResultIterator + protected ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) + throws SQLException { + try { + ImmutableBytesWritable key = TupleUtil.getConcatenatedValue(tuple, groupByExpressions); + ptr.set(key.get(), key.getOffset(), key.getLength()); + return ptr; + } catch (IOException e) { + throw new SQLException(e); } - - @Override - public void explain(List planSteps) { - resultIterator.explain(planSteps); - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - resultIterator.explain(planSteps, explainPlanAttributesBuilder); - } - - @Override - public String toString() { - return "ClientHashAggregatingResultIterator [resultIterator=" - + resultIterator + ", aggregators=" + aggregators + ", groupByExpressions=" - + groupByExpressions + "]"; - } - - // Copied from ClientGroupedAggregatingResultIterator - protected ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) throws SQLException { - try { - ImmutableBytesWritable key = TupleUtil.getConcatenatedValue(tuple, groupByExpressions); - ptr.set(key.get(), key.getOffset(), key.getLength()); - return ptr; - } catch (IOException e) { - throw new SQLException(e); + } + + // Copied from ClientGroupedAggregatingResultIterator + protected Tuple wrapKeyValueAsResult(Cell keyValue) { + return new MultiKeyValueTuple(Collections. singletonList(keyValue)); + } + + private HashMap populateHash() throws SQLException { + + hash = new HashMap(HASH_AGG_INIT_SIZE, 0.75f); + final int aggSize = aggregators.getEstimatedByteSize(); + long keySize = 0; + + for (Tuple result = resultIterator.next(); result != null; result = resultIterator.next()) { + ImmutableBytesWritable key = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); + key = getGroupingKey(result, key); + Aggregator[] rowAggregators = hash.get(key); + if (rowAggregators == null) { + keySize += key.getLength(); + long hashSize = + SizedUtil.sizeOfMap(hash.size() + 1, SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE, aggSize) + + keySize; + if (hashSize > memoryChunk.getSize() + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE) { + // This will throw InsufficientMemoryException if necessary + memoryChunk.resize(hashSize + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE); } - } - // Copied from ClientGroupedAggregatingResultIterator - protected Tuple wrapKeyValueAsResult(Cell keyValue) { - return new MultiKeyValueTuple(Collections. singletonList(keyValue)); - } - - private HashMap populateHash() throws SQLException { - - hash = new HashMap(HASH_AGG_INIT_SIZE, 0.75f); - final int aggSize = aggregators.getEstimatedByteSize(); - long keySize = 0; - - for (Tuple result = resultIterator.next(); result != null; result = resultIterator.next()) { - ImmutableBytesWritable key = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); - key = getGroupingKey(result, key); - Aggregator[] rowAggregators = hash.get(key); - if (rowAggregators == null) { - keySize += key.getLength(); - long hashSize = SizedUtil.sizeOfMap(hash.size() + 1, SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE, aggSize) + keySize; - if (hashSize > memoryChunk.getSize() + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE) { - // This will throw InsufficientMemoryException if necessary - memoryChunk.resize(hashSize + CLIENT_HASH_AGG_MEMORY_CHUNK_SIZE); - } - - rowAggregators = aggregators.newAggregators(); - hash.put(key, rowAggregators); - } - - aggregators.aggregate(rowAggregators, result); - } + rowAggregators = aggregators.newAggregators(); + hash.put(key, rowAggregators); + } - return hash; + aggregators.aggregate(rowAggregators, result); } - private List sortKeys() { - // This will throw InsufficientMemoryException if necessary - memoryChunk.resize(memoryChunk.getSize() + SizedUtil.sizeOfArrayList(hash.size())); + return hash; + } - keyList = new ArrayList(hash.size()); - keyList.addAll(hash.keySet()); - Comparator comp = new ImmutableBytesWritable.Comparator(); - if (orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) { - comp = Collections.reverseOrder(comp); - } - Collections.sort(keyList, comp); - return keyList; + private List sortKeys() { + // This will throw InsufficientMemoryException if necessary + memoryChunk.resize(memoryChunk.getSize() + SizedUtil.sizeOfArrayList(hash.size())); + + keyList = new ArrayList(hash.size()); + keyList.addAll(hash.keySet()); + Comparator comp = new ImmutableBytesWritable.Comparator(); + if (orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) { + comp = Collections.reverseOrder(comp); } + Collections.sort(keyList, comp); + return keyList; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ConcatResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ConcatResultIterator.java index e76c08da4f5..a0fd735ba56 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ConcatResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ConcatResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,131 +20,127 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.ClientUtil; - /** - * * Result iterator that concatenates a list of other iterators. - * - * * @since 0.1 */ public class ConcatResultIterator implements PeekingResultIterator { - private final ResultIterators resultIterators; - private List iterators; - private int index; - - public ConcatResultIterator(ResultIterators iterators) { - this.resultIterators = iterators; - } - - private ConcatResultIterator(List iterators) { - this.resultIterators = null; - this.iterators = iterators; - } - - private List getIterators() throws SQLException { - if (iterators == null && resultIterators != null) { - iterators = resultIterators.getIterators(); - } - return iterators; + private final ResultIterators resultIterators; + private List iterators; + private int index; + + public ConcatResultIterator(ResultIterators iterators) { + this.resultIterators = iterators; + } + + private ConcatResultIterator(List iterators) { + this.resultIterators = null; + this.iterators = iterators; + } + + private List getIterators() throws SQLException { + if (iterators == null && resultIterators != null) { + iterators = resultIterators.getIterators(); } - - @Override - public void close() throws SQLException { - SQLException toThrow = null; - try { - if (resultIterators != null) { - resultIterators.close(); - } - } catch (Exception e) { - toThrow = ClientUtil.parseServerException(e); - } finally { + return iterators; + } + + @Override + public void close() throws SQLException { + SQLException toThrow = null; + try { + if (resultIterators != null) { + resultIterators.close(); + } + } catch (Exception e) { + toThrow = ClientUtil.parseServerException(e); + } finally { + try { + if (iterators != null) { + for (; index < iterators.size(); index++) { + PeekingResultIterator iterator = iterators.get(index); try { - if (iterators != null) { - for (;index < iterators.size(); index++) { - PeekingResultIterator iterator = iterators.get(index); - try { - iterator.close(); - } catch (Exception e) { - if (toThrow == null) { - toThrow = ClientUtil.parseServerException(e); - } else { - toThrow.setNextException(ClientUtil.parseServerException(e)); - } - } - } - } - } finally { - if (toThrow != null) { - throw toThrow; - } + iterator.close(); + } catch (Exception e) { + if (toThrow == null) { + toThrow = ClientUtil.parseServerException(e); + } else { + toThrow.setNextException(ClientUtil.parseServerException(e)); + } } + } } - } - - - @Override - public void explain(List planSteps) { - if (resultIterators != null) { - resultIterators.explain(planSteps); + } finally { + if (toThrow != null) { + throw toThrow; } + } } + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - if (resultIterators != null) { - resultIterators.explain(planSteps, explainPlanAttributesBuilder); - } + @Override + public void explain(List planSteps) { + if (resultIterators != null) { + resultIterators.explain(planSteps); } + } - private PeekingResultIterator currentIterator() throws SQLException { - List iterators = getIterators(); - while (index < iterators.size()) { - PeekingResultIterator iterator = iterators.get(index); - Tuple r = iterator.peek(); - if (r != null) { - return iterator; - } - iterator.close(); - index++; - } - return EMPTY_ITERATOR; + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + if (resultIterators != null) { + resultIterators.explain(planSteps, explainPlanAttributesBuilder); } - - @Override - public Tuple peek() throws SQLException { - return currentIterator().peek(); + } + + private PeekingResultIterator currentIterator() throws SQLException { + List iterators = getIterators(); + while (index < iterators.size()) { + PeekingResultIterator iterator = iterators.get(index); + Tuple r = iterator.peek(); + if (r != null) { + return iterator; + } + iterator.close(); + index++; } + return EMPTY_ITERATOR; + } - @Override - public Tuple next() throws SQLException { - Tuple next = currentIterator().next(); - if (next == null) { - close(); // Close underlying ResultIterators to free resources sooner rather than later - } - return next; + @Override + public Tuple peek() throws SQLException { + return currentIterator().peek(); + } + + @Override + public Tuple next() throws SQLException { + Tuple next = currentIterator().next(); + if (next == null) { + close(); // Close underlying ResultIterators to free resources sooner rather than later } + return next; + } - @Override - public String toString() { - return "ConcatResultIterator [" + resultIterators == null ? ("iterators=" + iterators) : ("resultIterators=" + resultIterators) - + ", index=" + index + "]"; - } + @Override + public String toString() { + return "ConcatResultIterator [" + resultIterators == null + ? ("iterators=" + iterators) + : ("resultIterators=" + resultIterators) + ", index=" + index + "]"; + } - public static PeekingResultIterator newIterator(final List concatIterators) { - if (concatIterators.isEmpty()) { - return PeekingResultIterator.EMPTY_ITERATOR; - } - - if (concatIterators.size() == 1) { - return concatIterators.get(0); - } - return new ConcatResultIterator(concatIterators); + public static PeekingResultIterator + newIterator(final List concatIterators) { + if (concatIterators.isEmpty()) { + return PeekingResultIterator.EMPTY_ITERATOR; + } + + if (concatIterators.size() == 1) { + return concatIterators.get(0); } + return new ConcatResultIterator(concatIterators); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/CursorResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/CursorResultIterator.java index c09f2e1010c..6671908bd8a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/CursorResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/CursorResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,69 +17,69 @@ */ package org.apache.phoenix.iterate; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; -import org.apache.phoenix.schema.tuple.Tuple; -import org.apache.phoenix.util.CursorUtil; - import java.sql.SQLException; import java.util.List; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; +import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.util.CursorUtil; + public class CursorResultIterator implements ResultIterator { - private String cursorName; - private PeekingResultIterator delegate; - //TODO Configure fetch size from FETCH call - private int fetchSize = 0; - private int rowsRead = 0; - public CursorResultIterator(PeekingResultIterator delegate, String cursorName) { - this.delegate = delegate; - this.cursorName = cursorName; - } + private String cursorName; + private PeekingResultIterator delegate; + // TODO Configure fetch size from FETCH call + private int fetchSize = 0; + private int rowsRead = 0; - @Override - public Tuple next() throws SQLException { - if(!CursorUtil.moreValues(cursorName)){ - return null; - } else if (fetchSize == rowsRead) { - return null; - } + public CursorResultIterator(PeekingResultIterator delegate, String cursorName) { + this.delegate = delegate; + this.cursorName = cursorName; + } - Tuple next = delegate.next(); - CursorUtil.updateCursor(cursorName,next, delegate.peek()); - rowsRead++; - return next; - } - - @Override - public void explain(List planSteps) { - delegate.explain(planSteps); - planSteps.add("CLIENT CURSOR " + cursorName); + @Override + public Tuple next() throws SQLException { + if (!CursorUtil.moreValues(cursorName)) { + return null; + } else if (fetchSize == rowsRead) { + return null; } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - delegate.explain(planSteps, explainPlanAttributesBuilder); - explainPlanAttributesBuilder.setClientCursorName(cursorName); - planSteps.add("CLIENT CURSOR " + cursorName); - } + Tuple next = delegate.next(); + CursorUtil.updateCursor(cursorName, next, delegate.peek()); + rowsRead++; + return next; + } - @Override - public String toString() { - return "CursorResultIterator [cursor=" + cursorName + "]"; - } + @Override + public void explain(List planSteps) { + delegate.explain(planSteps); + planSteps.add("CLIENT CURSOR " + cursorName); + } - @Override - public void close() throws SQLException { - //NOP - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + delegate.explain(planSteps, explainPlanAttributesBuilder); + explainPlanAttributesBuilder.setClientCursorName(cursorName); + planSteps.add("CLIENT CURSOR " + cursorName); + } - public void closeCursor() throws SQLException { - delegate.close(); - } + @Override + public String toString() { + return "CursorResultIterator [cursor=" + cursorName + "]"; + } - public void setFetchSize(int fetchSize){ - this.fetchSize = fetchSize; - this.rowsRead = 0; - } + @Override + public void close() throws SQLException { + // NOP + } + + public void closeCursor() throws SQLException { + delegate.close(); + } + + public void setFetchSize(int fetchSize) { + this.fetchSize = fetchSize; + this.rowsRead = 0; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DefaultParallelScanGrouper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DefaultParallelScanGrouper.java index 23ea7974862..d7a63d1c477 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DefaultParallelScanGrouper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DefaultParallelScanGrouper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,60 +30,57 @@ import org.apache.phoenix.util.ScanUtil; /** - * Default implementation that creates a scan group if a plan is row key ordered (which requires a merge sort), - * or if a scan crosses a region boundary and the table is salted or a local index. + * Default implementation that creates a scan group if a plan is row key ordered (which requires a + * merge sort), or if a scan crosses a region boundary and the table is salted or a local index. */ public class DefaultParallelScanGrouper implements ParallelScanGrouper { - private static DefaultParallelScanGrouper INSTANCE = new DefaultParallelScanGrouper(); + private static DefaultParallelScanGrouper INSTANCE = new DefaultParallelScanGrouper(); - public DefaultParallelScanGrouper() { - } + public DefaultParallelScanGrouper() { + } - public static DefaultParallelScanGrouper getInstance() { - return INSTANCE; - } + public static DefaultParallelScanGrouper getInstance() { + return INSTANCE; + } - /** - * Returns true if the scan with the startKey is to be the first of a new batch - */ - @Override - public boolean shouldStartNewScan(QueryPlan plan, Scan lastScan, byte[] startKey, - boolean crossesRegionBoundary) { - PTable table = plan.getTableRef().getTable(); - if (lastScan == null) { - return false; - } else if (!plan.isRowKeyOrdered()) { - return true; - } else if (crossesRegionBoundary && table.getIndexType() == IndexType.LOCAL) { - return true; - } else if (table.getBucketNum() != null ) { - return crossesRegionBoundary - || ScanUtil.crossesPrefixBoundary(startKey, - ScanUtil.getPrefix(lastScan.getStartRow(), - SaltingUtil.NUM_SALTING_BYTES), - SaltingUtil.NUM_SALTING_BYTES); - } else { - return false; - } + /** + * Returns true if the scan with the startKey is to be the first of a new batch + */ + @Override + public boolean shouldStartNewScan(QueryPlan plan, Scan lastScan, byte[] startKey, + boolean crossesRegionBoundary) { + PTable table = plan.getTableRef().getTable(); + if (lastScan == null) { + return false; + } else if (!plan.isRowKeyOrdered()) { + return true; + } else if (crossesRegionBoundary && table.getIndexType() == IndexType.LOCAL) { + return true; + } else if (table.getBucketNum() != null) { + return crossesRegionBoundary || ScanUtil.crossesPrefixBoundary(startKey, + ScanUtil.getPrefix(lastScan.getStartRow(), SaltingUtil.NUM_SALTING_BYTES), + SaltingUtil.NUM_SALTING_BYTES); + } else { + return false; } + } - @Override - public List getRegionBoundaries(StatementContext context, byte[] tableName) - throws SQLException { - return context.getConnection().getQueryServices().getAllTableRegions(tableName, - context.getStatement().getQueryTimeoutInMillis()); - } + @Override + public List getRegionBoundaries(StatementContext context, byte[] tableName) + throws SQLException { + return context.getConnection().getQueryServices().getAllTableRegions(tableName, + context.getStatement().getQueryTimeoutInMillis()); + } - /** - * {@inheritDoc}. - */ - @Override - public List getRegionBoundaries(StatementContext context, - byte[] tableName, byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) - throws SQLException { - return context.getConnection().getQueryServices() - .getTableRegions(tableName, startRegionBoundaryKey, stopRegionBoundaryKey, - context.getStatement().getQueryTimeoutInMillis()); - } + /** + * {@inheritDoc}. + */ + @Override + public List getRegionBoundaries(StatementContext context, byte[] tableName, + byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) throws SQLException { + return context.getConnection().getQueryServices().getTableRegions(tableName, + startRegionBoundaryKey, stopRegionBoundaryKey, + context.getStatement().getQueryTimeoutInMillis()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DefaultTableResultIteratorFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DefaultTableResultIteratorFactory.java index 1008b1b9397..3aba4753b31 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DefaultTableResultIteratorFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DefaultTableResultIteratorFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,13 +30,13 @@ public class DefaultTableResultIteratorFactory implements TableResultIteratorFactory { - @Override - public TableResultIterator newIterator(MutationState mutationState, TableRef tableRef, - Scan scan, ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, - QueryPlan plan, ParallelScanGrouper scanGrouper, Map caches, - long maxQueryEndTime) throws SQLException { - return new TableResultIterator(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, - plan, scanGrouper, caches, maxQueryEndTime); - } + @Override + public TableResultIterator newIterator(MutationState mutationState, TableRef tableRef, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper, Map caches, + long maxQueryEndTime) throws SQLException { + return new TableResultIterator(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, + plan, scanGrouper, caches, maxQueryEndTime); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DelegateResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DelegateResultIterator.java index 375e0c38d19..26b63956877 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DelegateResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DelegateResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,41 +20,39 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.Tuple; - public class DelegateResultIterator implements ResultIterator { - private final ResultIterator delegate; - - public DelegateResultIterator(ResultIterator delegate) { - this.delegate = delegate; - } - - protected ResultIterator getDelegate() { - return delegate; - } - - @Override - public void close() throws SQLException { - delegate.close(); - } - - @Override - public Tuple next() throws SQLException { - return delegate.next(); - } - - @Override - public void explain(List planSteps) { - delegate.explain(planSteps); - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - delegate.explain(planSteps, explainPlanAttributesBuilder); - } + private final ResultIterator delegate; + + public DelegateResultIterator(ResultIterator delegate) { + this.delegate = delegate; + } + + protected ResultIterator getDelegate() { + return delegate; + } + + @Override + public void close() throws SQLException { + delegate.close(); + } + + @Override + public Tuple next() throws SQLException { + return delegate.next(); + } + + @Override + public void explain(List planSteps) { + delegate.explain(planSteps); + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + delegate.explain(planSteps, explainPlanAttributesBuilder); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DistinctAggregatingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DistinctAggregatingResultIterator.java index 59cd333291f..d33cb393044 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DistinctAggregatingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/DistinctAggregatingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,137 +23,128 @@ import java.util.Set; import org.apache.phoenix.compile.ColumnProjector; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.RowProjector; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.schema.tuple.Tuple; - import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; /** * Result scanner that dedups the incoming tuples to make them distinct. *

* Note that the results are held in memory - * - * * @since 1.2 */ public class DistinctAggregatingResultIterator implements AggregatingResultIterator { + /** + * Original AggregatingResultIterator + */ + private final AggregatingResultIterator targetAggregatingResultIterator; + private final RowProjector rowProjector; + /** + * Cached tuples already seen. + */ + private final Set resultEntries = Sets. newHashSet(); + + private class ResultEntry { /** - * Original AggregatingResultIterator + * cached hashCode. */ - private final AggregatingResultIterator targetAggregatingResultIterator; - private final RowProjector rowProjector; + private final int hashCode; + private final Tuple result; /** - * Cached tuples already seen. + * cached column values. */ - private final Set resultEntries = - Sets.newHashSet(); - - private class ResultEntry { - /** - * cached hashCode. - */ - private final int hashCode; - private final Tuple result; - /** - * cached column values. - */ - private final ImmutableBytesPtr[] columnValues; - - ResultEntry(Tuple result) { - this.result = result; - this.columnValues = - new ImmutableBytesPtr[rowProjector.getColumnCount()]; - int columnIndex = 0; - for (ColumnProjector columnProjector : rowProjector.getColumnProjectors()) { - Expression expression = columnProjector.getExpression(); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - if (!expression.evaluate(this.result, ptr)) { - columnValues[columnIndex] = null; - } else { - columnValues[columnIndex] = ptr; - } - columnIndex++; - } - this.hashCode = Arrays.hashCode(columnValues); - } + private final ImmutableBytesPtr[] columnValues; - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } - if (o == null) { - return false; - } - if (o.getClass() != this.getClass()) { - return false; - } - ResultEntry that = (ResultEntry) o; - return Arrays.equals(this.columnValues, that.columnValues); - } - - @Override - public int hashCode() { - return hashCode; + ResultEntry(Tuple result) { + this.result = result; + this.columnValues = new ImmutableBytesPtr[rowProjector.getColumnCount()]; + int columnIndex = 0; + for (ColumnProjector columnProjector : rowProjector.getColumnProjectors()) { + Expression expression = columnProjector.getExpression(); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + if (!expression.evaluate(this.result, ptr)) { + columnValues[columnIndex] = null; + } else { + columnValues[columnIndex] = ptr; } + columnIndex++; + } + this.hashCode = Arrays.hashCode(columnValues); } - public DistinctAggregatingResultIterator(AggregatingResultIterator delegate, - RowProjector rowProjector) { - this.targetAggregatingResultIterator = delegate; - this.rowProjector = rowProjector; + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o == null) { + return false; + } + if (o.getClass() != this.getClass()) { + return false; + } + ResultEntry that = (ResultEntry) o; + return Arrays.equals(this.columnValues, that.columnValues); } @Override - public Tuple next() throws SQLException { - while (true) { - Tuple nextTuple = this.targetAggregatingResultIterator.next(); - if (nextTuple == null) { - return null; - } - ResultEntry resultEntry = new ResultEntry(nextTuple); - if (!this.resultEntries.contains(resultEntry)) { - this.resultEntries.add(resultEntry); - return nextTuple; - } - } + public int hashCode() { + return hashCode; } + } - @Override - public void close() throws SQLException { - this.targetAggregatingResultIterator.close(); - } + public DistinctAggregatingResultIterator(AggregatingResultIterator delegate, + RowProjector rowProjector) { + this.targetAggregatingResultIterator = delegate; + this.rowProjector = rowProjector; + } - @Override - public void explain(List planSteps) { - targetAggregatingResultIterator.explain(planSteps); - planSteps.add("CLIENT DISTINCT ON " + rowProjector.toString()); + @Override + public Tuple next() throws SQLException { + while (true) { + Tuple nextTuple = this.targetAggregatingResultIterator.next(); + if (nextTuple == null) { + return null; + } + ResultEntry resultEntry = new ResultEntry(nextTuple); + if (!this.resultEntries.contains(resultEntry)) { + this.resultEntries.add(resultEntry); + return nextTuple; + } } + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - targetAggregatingResultIterator.explain( - planSteps, - explainPlanAttributesBuilder); - explainPlanAttributesBuilder.setClientDistinctFilter( - rowProjector.toString()); - planSteps.add("CLIENT DISTINCT ON " + rowProjector.toString()); - } + @Override + public void close() throws SQLException { + this.targetAggregatingResultIterator.close(); + } - @Override - public Aggregator[] aggregate(Tuple result) { - return targetAggregatingResultIterator.aggregate(result); - } + @Override + public void explain(List planSteps) { + targetAggregatingResultIterator.explain(planSteps); + planSteps.add("CLIENT DISTINCT ON " + rowProjector.toString()); + } - @Override - public String toString() { - return "DistinctAggregatingResultIterator [targetAggregatingResultIterator=" + targetAggregatingResultIterator - + ", rowProjector=" + rowProjector; - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + targetAggregatingResultIterator.explain(planSteps, explainPlanAttributesBuilder); + explainPlanAttributesBuilder.setClientDistinctFilter(rowProjector.toString()); + planSteps.add("CLIENT DISTINCT ON " + rowProjector.toString()); + } + + @Override + public Aggregator[] aggregate(Tuple result) { + return targetAggregatingResultIterator.aggregate(result); + } + + @Override + public String toString() { + return "DistinctAggregatingResultIterator [targetAggregatingResultIterator=" + + targetAggregatingResultIterator + ", rowProjector=" + rowProjector; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ExplainTable.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ExplainTable.java index 2cb6cd4b025..3925c222beb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ExplainTable.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ExplainTable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,8 +36,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; import org.apache.phoenix.compile.ScanRanges; @@ -66,489 +65,488 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public abstract class ExplainTable { - private static final Logger LOGGER = LoggerFactory.getLogger(ExplainTable.class); - private static final List EVERYTHING = Collections.singletonList(KeyRange.EVERYTHING_RANGE); - public static final String POINT_LOOKUP_ON_STRING = "POINT LOOKUP ON "; - public static final String REGION_LOCATIONS = " (region locations = "; + private static final Logger LOGGER = LoggerFactory.getLogger(ExplainTable.class); + private static final List EVERYTHING = + Collections.singletonList(KeyRange.EVERYTHING_RANGE); + public static final String POINT_LOOKUP_ON_STRING = "POINT LOOKUP ON "; + public static final String REGION_LOCATIONS = " (region locations = "; + + protected final StatementContext context; + protected final TableRef tableRef; + protected final GroupBy groupBy; + protected final OrderBy orderBy; + protected final HintNode hint; + protected final Integer limit; + protected final Integer offset; + + public ExplainTable(StatementContext context, TableRef table) { + this(context, table, GroupBy.EMPTY_GROUP_BY, OrderBy.EMPTY_ORDER_BY, HintNode.EMPTY_HINT_NODE, + null, null); + } - protected final StatementContext context; - protected final TableRef tableRef; - protected final GroupBy groupBy; - protected final OrderBy orderBy; - protected final HintNode hint; - protected final Integer limit; - protected final Integer offset; + public ExplainTable(StatementContext context, TableRef table, GroupBy groupBy, OrderBy orderBy, + HintNode hintNode, Integer limit, Integer offset) { + this.context = context; + this.tableRef = table; + this.groupBy = groupBy; + this.orderBy = orderBy; + this.hint = hintNode; + this.limit = limit; + this.offset = offset; + } - public ExplainTable(StatementContext context, TableRef table) { - this(context, table, GroupBy.EMPTY_GROUP_BY, OrderBy.EMPTY_ORDER_BY, HintNode.EMPTY_HINT_NODE, null, null); + private String explainSkipScan() { + StringBuilder buf = new StringBuilder(); + ScanRanges scanRanges = context.getScanRanges(); + if (scanRanges.isPointLookup()) { + int keyCount = scanRanges.getPointLookupCount(); + buf.append(POINT_LOOKUP_ON_STRING + keyCount + " KEY" + (keyCount > 1 ? "S " : " ")); + } else if (scanRanges.useSkipScanFilter()) { + buf.append("SKIP SCAN "); + int count = 1; + boolean hasRanges = false; + int nSlots = scanRanges.getBoundSlotCount(); + for (int i = 0; i < nSlots; i++) { + List ranges = scanRanges.getRanges().get(i); + count *= ranges.size(); + for (KeyRange range : ranges) { + hasRanges |= !range.isSingleKey(); + } + } + buf.append("ON "); + buf.append(count); + buf.append(hasRanges ? " RANGE" : " KEY"); + buf.append(count > 1 ? "S " : " "); + } else { + buf.append("RANGE SCAN "); } + return buf.toString(); + } - public ExplainTable(StatementContext context, TableRef table, GroupBy groupBy, OrderBy orderBy, HintNode hintNode, - Integer limit, Integer offset) { - this.context = context; - this.tableRef = table; - this.groupBy = groupBy; - this.orderBy = orderBy; - this.hint = hintNode; - this.limit = limit; - this.offset = offset; + protected void explain(String prefix, List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder, + List regionLocations) { + StringBuilder buf = new StringBuilder(prefix); + ScanRanges scanRanges = context.getScanRanges(); + Scan scan = context.getScan(); + + if (scan.getConsistency() != Consistency.STRONG) { + buf.append("TIMELINE-CONSISTENCY "); + } + if (hint.hasHint(Hint.SMALL)) { + buf.append(Hint.SMALL).append(" "); } + if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) { + buf.append("REVERSE "); + } + String scanTypeDetails; + if (scanRanges.isEverything()) { + scanTypeDetails = "FULL SCAN "; + } else { + scanTypeDetails = explainSkipScan(); + } + buf.append(scanTypeDetails); - private String explainSkipScan() { - StringBuilder buf = new StringBuilder(); - ScanRanges scanRanges = context.getScanRanges(); - if (scanRanges.isPointLookup()) { - int keyCount = scanRanges.getPointLookupCount(); - buf.append(POINT_LOOKUP_ON_STRING + keyCount + " KEY" + (keyCount > 1 ? "S " : " ")); - } else if (scanRanges.useSkipScanFilter()) { - buf.append("SKIP SCAN "); - int count = 1; - boolean hasRanges = false; - int nSlots = scanRanges.getBoundSlotCount(); - for (int i = 0; i < nSlots; i++) { - List ranges = scanRanges.getRanges().get(i); - count *= ranges.size(); - for (KeyRange range : ranges) { - hasRanges |= !range.isSingleKey(); - } - } - buf.append("ON "); - buf.append(count); - buf.append(hasRanges ? " RANGE" : " KEY"); - buf.append(count > 1 ? "S " : " "); - } else { - buf.append("RANGE SCAN "); - } - return buf.toString(); + String tableName = tableRef.getTable().getPhysicalName().getString(); + if (tableRef.getTable().getIndexType() == PTable.IndexType.LOCAL) { + String indexName = tableRef.getTable().getName().getString(); + if ( + tableRef.getTable().getViewIndexId() != null + && indexName.contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR) + ) { + int lastIndexOf = indexName.lastIndexOf(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); + indexName = indexName.substring(lastIndexOf + 1); + } + tableName = indexName + "(" + tableName + ")"; } + buf.append("OVER ").append(tableName); - protected void explain(String prefix, - List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder, - List regionLocations) { - StringBuilder buf = new StringBuilder(prefix); - ScanRanges scanRanges = context.getScanRanges(); - Scan scan = context.getScan(); + if (!scanRanges.isPointLookup()) { + buf.append(appendKeyRanges()); + } + planSteps.add(buf.toString()); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setConsistency(scan.getConsistency()); + if (hint.hasHint(Hint.SMALL)) { + explainPlanAttributesBuilder.setHint(Hint.SMALL); + } + if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) { + explainPlanAttributesBuilder.setClientSortedBy("REVERSE"); + } + explainPlanAttributesBuilder.setExplainScanType(scanTypeDetails); + explainPlanAttributesBuilder.setTableName(tableName); + if (!scanRanges.isPointLookup()) { + explainPlanAttributesBuilder.setKeyRanges(appendKeyRanges()); + } + } + if (context.getScan() != null && tableRef.getTable().getRowTimestampColPos() != -1) { + TimeRange range = context.getScan().getTimeRange(); + planSteps.add(" ROW TIMESTAMP FILTER [" + range.getMin() + ", " + range.getMax() + ")"); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setScanTimeRangeMin(range.getMin()); + explainPlanAttributesBuilder.setScanTimeRangeMax(range.getMax()); + } + } - if (scan.getConsistency() != Consistency.STRONG){ - buf.append("TIMELINE-CONSISTENCY "); - } - if (hint.hasHint(Hint.SMALL)) { - buf.append(Hint.SMALL).append(" "); - } - if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) { - buf.append("REVERSE "); - } - String scanTypeDetails; - if (scanRanges.isEverything()) { - scanTypeDetails = "FULL SCAN "; - } else { - scanTypeDetails = explainSkipScan(); - } - buf.append(scanTypeDetails); + PageFilter pageFilter = null; + FirstKeyOnlyFilter firstKeyOnlyFilter = null; + EmptyColumnOnlyFilter emptyColumnOnlyFilter = null; + BooleanExpressionFilter whereFilter = null; + DistinctPrefixFilter distinctFilter = null; + Iterator filterIterator = ScanUtil.getFilterIterator(scan); + if (filterIterator.hasNext()) { + do { + Filter filter = filterIterator.next(); + if (filter instanceof FirstKeyOnlyFilter) { + firstKeyOnlyFilter = (FirstKeyOnlyFilter) filter; + } else if (filter instanceof EmptyColumnOnlyFilter) { + emptyColumnOnlyFilter = (EmptyColumnOnlyFilter) filter; + } else if (filter instanceof PageFilter) { + pageFilter = (PageFilter) filter; + } else if (filter instanceof BooleanExpressionFilter) { + whereFilter = (BooleanExpressionFilter) filter; + } else if (filter instanceof DistinctPrefixFilter) { + distinctFilter = (DistinctPrefixFilter) filter; + } + } while (filterIterator.hasNext()); + } + Set dataColumns = context.getDataColumns(); + if (dataColumns != null && !dataColumns.isEmpty()) { + planSteps.add(" SERVER MERGE " + dataColumns.toString()); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setServerMergeColumns(dataColumns); + } + } + String whereFilterStr = null; + if (whereFilter != null) { + whereFilterStr = whereFilter.toString(); + } else { + byte[] expBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER_STR); + if (expBytes == null) { + // For older clients + expBytes = scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_FILTER_STR); + } + if (expBytes != null) { + whereFilterStr = Bytes.toString(expBytes); + } + } + if (whereFilterStr != null) { + String serverWhereFilter = + "SERVER FILTER BY " + (firstKeyOnlyFilter == null ? "" : "FIRST KEY ONLY AND ") + + (emptyColumnOnlyFilter == null ? "" : "EMPTY COLUMN ONLY AND ") + whereFilterStr; + planSteps.add(" " + serverWhereFilter); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setServerWhereFilter(serverWhereFilter); + } + } else if (firstKeyOnlyFilter != null) { + planSteps.add(" SERVER FILTER BY FIRST KEY ONLY"); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setServerWhereFilter("SERVER FILTER BY FIRST KEY ONLY"); + } + } else if (emptyColumnOnlyFilter != null) { + planSteps.add(" SERVER FILTER BY EMPTY COLUMN ONLY"); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setServerWhereFilter("SERVER FILTER BY EMPTY COLUMN ONLY"); + } + } + if (distinctFilter != null) { + String serverDistinctFilter = + "SERVER DISTINCT PREFIX FILTER OVER " + groupBy.getExpressions().toString(); + planSteps.add(" " + serverDistinctFilter); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setServerDistinctFilter(serverDistinctFilter); + } + } + if (!orderBy.getOrderByExpressions().isEmpty() && groupBy.isEmpty()) { // with GROUP BY, sort + // happens client-side + String orderByExpressions = + "SERVER" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + + " SORTED BY " + orderBy.getOrderByExpressions().toString(); + planSteps.add(" " + orderByExpressions); + if (explainPlanAttributesBuilder != null) { + if (limit != null) { + explainPlanAttributesBuilder.setServerRowLimit(limit.longValue()); + } + explainPlanAttributesBuilder.setServerSortedBy(orderBy.getOrderByExpressions().toString()); + } + } else { + if (offset != null) { + planSteps.add(" SERVER OFFSET " + offset); + } + Long limit = null; + if (pageFilter != null) { + limit = pageFilter.getPageSize(); + } else { + byte[] limitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_LIMIT); + if (limitBytes != null) { + limit = Bytes.toLong(limitBytes); + } + } + if (limit != null) { + planSteps.add(" SERVER " + limit + " ROW LIMIT"); + } + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setServerOffset(offset); + if (pageFilter != null) { + explainPlanAttributesBuilder.setServerRowLimit(pageFilter.getPageSize()); + } + } + } + Integer groupByLimit = null; + byte[] groupByLimitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.GROUP_BY_LIMIT); + if (groupByLimitBytes != null) { + groupByLimit = (Integer) PInteger.INSTANCE.toObject(groupByLimitBytes); + } + getRegionLocations(planSteps, explainPlanAttributesBuilder, regionLocations); + groupBy.explain(planSteps, groupByLimit, explainPlanAttributesBuilder); + if (scan.getAttribute(BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX) != null) { + planSteps.add(" SERVER ARRAY ELEMENT PROJECTION"); + if (explainPlanAttributesBuilder != null) { + explainPlanAttributesBuilder.setServerArrayElementProjection(true); + } + } + if ( + scan.getAttribute(BaseScannerRegionObserverConstants.JSON_VALUE_FUNCTION) != null + || scan.getAttribute(BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION) != null + ) { + planSteps.add(" SERVER JSON FUNCTION PROJECTION"); + } + } - String tableName = tableRef.getTable().getPhysicalName().getString(); - if (tableRef.getTable().getIndexType() == PTable.IndexType.LOCAL) { - String indexName = tableRef.getTable().getName().getString(); - if (tableRef.getTable().getViewIndexId() != null - && indexName.contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { - int lastIndexOf = indexName.lastIndexOf( - QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); - indexName = indexName.substring(lastIndexOf + 1); - } - tableName = indexName + "(" + tableName + ")"; - } - buf.append("OVER ").append(tableName); + /** + * Retrieve region locations and set the values in the explain plan output. + * @param planSteps list of plan steps to add explain plan output to. + * @param explainPlanAttributesBuilder explain plan v2 attributes builder instance. + * @param regionLocations region locations. + */ + private void getRegionLocations(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder, + List regionLocations) { + String regionLocationPlan = + getRegionLocationsForExplainPlan(explainPlanAttributesBuilder, regionLocations); + if (regionLocationPlan.length() > 0) { + planSteps.add(regionLocationPlan); + } + } - if (!scanRanges.isPointLookup()) { - buf.append(appendKeyRanges()); - } - planSteps.add(buf.toString()); + /** + * Retrieve region locations from hbase client and set the values for the explain plan output. If + * the list of region locations exceed max limit, print only list with the max limit and print num + * of total list size. + * @param explainPlanAttributesBuilder explain plan v2 attributes builder instance. + * @param regionLocationsFromResultIterator region locations. + * @return region locations to be added to the explain plan output. + */ + private String getRegionLocationsForExplainPlan( + ExplainPlanAttributesBuilder explainPlanAttributesBuilder, + List regionLocationsFromResultIterator) { + if (regionLocationsFromResultIterator == null) { + return ""; + } + try { + StringBuilder buf = new StringBuilder().append(REGION_LOCATIONS); + Set regionBoundaries = new HashSet<>(); + List regionLocations = new ArrayList<>(); + for (HRegionLocation regionLocation : regionLocationsFromResultIterator) { + RegionBoundary regionBoundary = new RegionBoundary(regionLocation.getRegion().getStartKey(), + regionLocation.getRegion().getEndKey()); + if (!regionBoundaries.contains(regionBoundary)) { + regionLocations.add(regionLocation); + regionBoundaries.add(regionBoundary); + } + } + int maxLimitRegionLoc = context.getConnection().getQueryServices().getConfiguration().getInt( + QueryServices.MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN, + QueryServicesOptions.DEFAULT_MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN); + if (regionLocations.size() > maxLimitRegionLoc) { + int originalSize = regionLocations.size(); + List trimmedRegionLocations = + regionLocations.subList(0, maxLimitRegionLoc); if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setConsistency(scan.getConsistency()); - if (hint.hasHint(Hint.SMALL)) { - explainPlanAttributesBuilder.setHint(Hint.SMALL); - } - if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) { - explainPlanAttributesBuilder.setClientSortedBy("REVERSE"); - } - explainPlanAttributesBuilder.setExplainScanType(scanTypeDetails); - explainPlanAttributesBuilder.setTableName(tableName); - if (!scanRanges.isPointLookup()) { - explainPlanAttributesBuilder.setKeyRanges(appendKeyRanges()); - } - } - if (context.getScan() != null && tableRef.getTable().getRowTimestampColPos() != -1) { - TimeRange range = context.getScan().getTimeRange(); - planSteps.add(" ROW TIMESTAMP FILTER [" + range.getMin() + ", " + range.getMax() + ")"); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setScanTimeRangeMin(range.getMin()); - explainPlanAttributesBuilder.setScanTimeRangeMax(range.getMax()); - } - } - - PageFilter pageFilter = null; - FirstKeyOnlyFilter firstKeyOnlyFilter = null; - EmptyColumnOnlyFilter emptyColumnOnlyFilter = null; - BooleanExpressionFilter whereFilter = null; - DistinctPrefixFilter distinctFilter = null; - Iterator filterIterator = ScanUtil.getFilterIterator(scan); - if (filterIterator.hasNext()) { - do { - Filter filter = filterIterator.next(); - if (filter instanceof FirstKeyOnlyFilter) { - firstKeyOnlyFilter = (FirstKeyOnlyFilter)filter; - } else if (filter instanceof EmptyColumnOnlyFilter) { - emptyColumnOnlyFilter = (EmptyColumnOnlyFilter)filter; - } else if (filter instanceof PageFilter) { - pageFilter = (PageFilter)filter; - } else if (filter instanceof BooleanExpressionFilter) { - whereFilter = (BooleanExpressionFilter)filter; - } else if (filter instanceof DistinctPrefixFilter) { - distinctFilter = (DistinctPrefixFilter)filter; - } - } while (filterIterator.hasNext()); - } - Set dataColumns = context.getDataColumns(); - if (dataColumns != null && !dataColumns.isEmpty()) { - planSteps.add(" SERVER MERGE " + dataColumns.toString()); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setServerMergeColumns(dataColumns); - } - } - String whereFilterStr = null; - if (whereFilter != null) { - whereFilterStr = whereFilter.toString(); - } else { - byte[] expBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER_STR); - if (expBytes == null) { - // For older clients - expBytes = scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_FILTER_STR); - } - if (expBytes != null) { - whereFilterStr = Bytes.toString(expBytes); - } - } - if (whereFilterStr != null) { - String serverWhereFilter = "SERVER FILTER BY " - + (firstKeyOnlyFilter == null ? "" : "FIRST KEY ONLY AND ") - + (emptyColumnOnlyFilter == null ? "" : "EMPTY COLUMN ONLY AND ") - + whereFilterStr; - planSteps.add(" " + serverWhereFilter); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setServerWhereFilter(serverWhereFilter); - } - } else if (firstKeyOnlyFilter != null) { - planSteps.add(" SERVER FILTER BY FIRST KEY ONLY"); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setServerWhereFilter( - "SERVER FILTER BY FIRST KEY ONLY"); - } - } else if (emptyColumnOnlyFilter != null) { - planSteps.add(" SERVER FILTER BY EMPTY COLUMN ONLY"); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setServerWhereFilter( - "SERVER FILTER BY EMPTY COLUMN ONLY"); - } - } - if (distinctFilter != null) { - String serverDistinctFilter = "SERVER DISTINCT PREFIX FILTER OVER " - + groupBy.getExpressions().toString(); - planSteps.add(" " + serverDistinctFilter); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setServerDistinctFilter(serverDistinctFilter); - } - } - if (!orderBy.getOrderByExpressions().isEmpty() && groupBy.isEmpty()) { // with GROUP BY, sort happens client-side - String orderByExpressions = "SERVER" - + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) - + " SORTED BY " + orderBy.getOrderByExpressions().toString(); - planSteps.add(" " + orderByExpressions); - if (explainPlanAttributesBuilder != null) { - if (limit != null) { - explainPlanAttributesBuilder.setServerRowLimit(limit.longValue()); - } - explainPlanAttributesBuilder.setServerSortedBy( - orderBy.getOrderByExpressions().toString()); - } - } else { - if (offset != null) { - planSteps.add(" SERVER OFFSET " + offset); - } - Long limit = null; - if (pageFilter != null) { - limit = pageFilter.getPageSize(); - } else { - byte[] limitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_LIMIT); - if (limitBytes != null) { - limit = Bytes.toLong(limitBytes); - } - } - if (limit != null) { - planSteps.add(" SERVER " + limit + " ROW LIMIT"); - } - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setServerOffset(offset); - if (pageFilter != null) { - explainPlanAttributesBuilder.setServerRowLimit( - pageFilter.getPageSize()); - } - } - } - Integer groupByLimit = null; - byte[] groupByLimitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.GROUP_BY_LIMIT); - if (groupByLimitBytes != null) { - groupByLimit = (Integer) PInteger.INSTANCE.toObject(groupByLimitBytes); - } - getRegionLocations(planSteps, explainPlanAttributesBuilder, regionLocations); - groupBy.explain(planSteps, groupByLimit, explainPlanAttributesBuilder); - if (scan.getAttribute(BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX) != null) { - planSteps.add(" SERVER ARRAY ELEMENT PROJECTION"); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setServerArrayElementProjection(true); - } - } - if (scan.getAttribute(BaseScannerRegionObserverConstants.JSON_VALUE_FUNCTION) != null - || scan.getAttribute(BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION) != null) { - planSteps.add(" SERVER JSON FUNCTION PROJECTION"); - } + explainPlanAttributesBuilder + .setRegionLocations(Collections.unmodifiableList(trimmedRegionLocations)); + } + buf.append(trimmedRegionLocations); + buf.append("...total size = "); + buf.append(originalSize); + } else { + buf.append(regionLocations); + } + buf.append(") "); + return buf.toString(); + } catch (Exception e) { + LOGGER.error("Explain table unable to add region locations.", e); + return ""; } + } - /** - * Retrieve region locations and set the values in the explain plan output. - * - * @param planSteps list of plan steps to add explain plan output to. - * @param explainPlanAttributesBuilder explain plan v2 attributes builder instance. - * @param regionLocations region locations. - */ - private void getRegionLocations(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder, - List regionLocations) { - String regionLocationPlan = getRegionLocationsForExplainPlan(explainPlanAttributesBuilder, - regionLocations); - if (regionLocationPlan.length() > 0) { - planSteps.add(regionLocationPlan); - } + /** + * Region boundary class with start and end key of the region. + */ + private static class RegionBoundary { + private final byte[] startKey; + private final byte[] endKey; + + RegionBoundary(byte[] startKey, byte[] endKey) { + this.startKey = startKey; + this.endKey = endKey; } - /** - * Retrieve region locations from hbase client and set the values for the explain plan output. - * If the list of region locations exceed max limit, print only list with the max limit and - * print num of total list size. - * - * @param explainPlanAttributesBuilder explain plan v2 attributes builder instance. - * @param regionLocationsFromResultIterator region locations. - * @return region locations to be added to the explain plan output. - */ - private String getRegionLocationsForExplainPlan( - ExplainPlanAttributesBuilder explainPlanAttributesBuilder, - List regionLocationsFromResultIterator) { - if (regionLocationsFromResultIterator == null) { - return ""; - } - try { - StringBuilder buf = new StringBuilder().append(REGION_LOCATIONS); - Set regionBoundaries = new HashSet<>(); - List regionLocations = new ArrayList<>(); - for (HRegionLocation regionLocation : regionLocationsFromResultIterator) { - RegionBoundary regionBoundary = - new RegionBoundary(regionLocation.getRegion().getStartKey(), - regionLocation.getRegion().getEndKey()); - if (!regionBoundaries.contains(regionBoundary)) { - regionLocations.add(regionLocation); - regionBoundaries.add(regionBoundary); - } - } - int maxLimitRegionLoc = context.getConnection().getQueryServices().getConfiguration() - .getInt(QueryServices.MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN, - QueryServicesOptions.DEFAULT_MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN); - if (regionLocations.size() > maxLimitRegionLoc) { - int originalSize = regionLocations.size(); - List trimmedRegionLocations = - regionLocations.subList(0, maxLimitRegionLoc); - if (explainPlanAttributesBuilder != null) { - explainPlanAttributesBuilder.setRegionLocations( - Collections.unmodifiableList(trimmedRegionLocations)); - } - buf.append(trimmedRegionLocations); - buf.append("...total size = "); - buf.append(originalSize); - } else { - buf.append(regionLocations); - } - buf.append(") "); - return buf.toString(); - } catch (Exception e) { - LOGGER.error("Explain table unable to add region locations.", e); - return ""; - } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RegionBoundary that = (RegionBoundary) o; + return Bytes.compareTo(startKey, that.startKey) == 0 + && Bytes.compareTo(endKey, that.endKey) == 0; + } + + @Override + public int hashCode() { + int result = Arrays.hashCode(startKey); + result = 31 * result + Arrays.hashCode(endKey); + return result; } + } - /** - * Region boundary class with start and end key of the region. - */ - private static class RegionBoundary { - private final byte[] startKey; - private final byte[] endKey; + private void appendPKColumnValue(StringBuilder buf, byte[] range, Boolean isNull, int slotIndex, + boolean changeViewIndexId) { + if (Boolean.TRUE.equals(isNull)) { + buf.append("null"); + return; + } + if (Boolean.FALSE.equals(isNull)) { + buf.append("not null"); + return; + } + if (range.length == 0) { + buf.append('*'); + return; + } + ScanRanges scanRanges = context.getScanRanges(); + PDataType type = scanRanges.getSchema().getField(slotIndex).getDataType(); + SortOrder sortOrder = tableRef.getTable().getPKColumns().get(slotIndex).getSortOrder(); + if (sortOrder == SortOrder.DESC) { + buf.append('~'); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(range); + type.coerceBytes(ptr, type, sortOrder, SortOrder.getDefault()); + range = ptr.get(); + } + if (changeViewIndexId) { + buf.append(getViewIndexValue(type, range).toString()); + } else { + Format formatter = context.getConnection().getFormatter(type); + buf.append(type.toStringLiteral(range, formatter)); + } + } - RegionBoundary(byte[] startKey, byte[] endKey) { - this.startKey = startKey; - this.endKey = endKey; - } + private Long getViewIndexValue(PDataType type, byte[] range) { + boolean useLongViewIndex = MetaDataUtil.getViewIndexIdDataType().equals(type); + Object s = type.toObject(range); + return (useLongViewIndex ? (Long) s : (Short) s) + Short.MAX_VALUE + 2; + } - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - RegionBoundary that = (RegionBoundary) o; - return Bytes.compareTo(startKey, that.startKey) == 0 - && Bytes.compareTo(endKey, that.endKey) == 0; - } + private static class RowKeyValueIterator implements Iterator { + private final RowKeySchema schema; + private ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + private int position = 0; + private final int maxOffset; + private byte[] nextValue; - @Override - public int hashCode() { - int result = Arrays.hashCode(startKey); - result = 31 * result + Arrays.hashCode(endKey); - return result; - } + public RowKeyValueIterator(RowKeySchema schema, byte[] rowKey) { + this.schema = schema; + this.maxOffset = schema.iterator(rowKey, ptr); + iterate(); } - private void appendPKColumnValue(StringBuilder buf, byte[] range, Boolean isNull, int slotIndex, boolean changeViewIndexId) { - if (Boolean.TRUE.equals(isNull)) { - buf.append("null"); - return; - } - if (Boolean.FALSE.equals(isNull)) { - buf.append("not null"); - return; - } - if (range.length == 0) { - buf.append('*'); - return; - } - ScanRanges scanRanges = context.getScanRanges(); - PDataType type = scanRanges.getSchema().getField(slotIndex).getDataType(); - SortOrder sortOrder = tableRef.getTable().getPKColumns().get(slotIndex).getSortOrder(); - if (sortOrder == SortOrder.DESC) { - buf.append('~'); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(range); - type.coerceBytes(ptr, type, sortOrder, SortOrder.getDefault()); - range = ptr.get(); - } - if (changeViewIndexId) { - buf.append(getViewIndexValue(type, range).toString()); - } else { - Format formatter = context.getConnection().getFormatter(type); - buf.append(type.toStringLiteral(range, formatter)); - } + private void iterate() { + if (schema.next(ptr, position++, maxOffset) == null) { + nextValue = null; + } else { + nextValue = ptr.copyBytes(); + } } - private Long getViewIndexValue(PDataType type, byte[] range) { - boolean useLongViewIndex = MetaDataUtil.getViewIndexIdDataType().equals(type); - Object s = type.toObject(range); - return (useLongViewIndex ? (Long) s : (Short) s) + Short.MAX_VALUE + 2; + @Override + public boolean hasNext() { + return nextValue != null; } - private static class RowKeyValueIterator implements Iterator { - private final RowKeySchema schema; - private ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - private int position = 0; - private final int maxOffset; - private byte[] nextValue; - - public RowKeyValueIterator(RowKeySchema schema, byte[] rowKey) { - this.schema = schema; - this.maxOffset = schema.iterator(rowKey, ptr); - iterate(); - } - - private void iterate() { - if (schema.next(ptr, position++, maxOffset) == null) { - nextValue = null; - } else { - nextValue = ptr.copyBytes(); - } - } - - @Override - public boolean hasNext() { - return nextValue != null; - } + @Override + public byte[] next() { + if (nextValue == null) { + throw new NoSuchElementException(); + } + byte[] value = nextValue; + iterate(); + return value; + } - @Override - public byte[] next() { - if (nextValue == null) { - throw new NoSuchElementException(); - } - byte[] value = nextValue; - iterate(); - return value; - } + @Override + public void remove() { + throw new UnsupportedOperationException(); + } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - } - - private void appendScanRow(StringBuilder buf, Bound bound) { - ScanRanges scanRanges = context.getScanRanges(); - Iterator minMaxIterator = Collections.emptyIterator(); - boolean isLocalIndex = ScanUtil.isLocalIndex(context.getScan()); - boolean forceSkipScan = this.hint.hasHint(Hint.SKIP_SCAN); - int nRanges = forceSkipScan ? scanRanges.getRanges().size() : scanRanges.getBoundSlotCount(); - for (int i = 0, minPos = 0; minPos < nRanges || minMaxIterator.hasNext(); i++) { - List ranges = minPos >= nRanges ? EVERYTHING : scanRanges.getRanges().get(minPos++); - KeyRange range = bound == Bound.LOWER ? ranges.get(0) : ranges.get(ranges.size()-1); - byte[] b = range.getRange(bound); - Boolean isNull = KeyRange.IS_NULL_RANGE == range ? Boolean.TRUE : KeyRange.IS_NOT_NULL_RANGE == range ? Boolean.FALSE : null; - if (minMaxIterator.hasNext()) { - byte[] bMinMax = minMaxIterator.next(); - int cmp = Bytes.compareTo(bMinMax, b) * (bound == Bound.LOWER ? 1 : -1); - if (cmp > 0) { - minPos = nRanges; - b = bMinMax; - isNull = null; - } else if (cmp < 0) { - minMaxIterator = Collections.emptyIterator(); - } - } - if (isLocalIndex && i == 0) { - appendPKColumnValue(buf, b, isNull, i, true); - } else { - appendPKColumnValue(buf, b, isNull, i, false); - } - buf.append(','); - } + } + + private void appendScanRow(StringBuilder buf, Bound bound) { + ScanRanges scanRanges = context.getScanRanges(); + Iterator minMaxIterator = Collections.emptyIterator(); + boolean isLocalIndex = ScanUtil.isLocalIndex(context.getScan()); + boolean forceSkipScan = this.hint.hasHint(Hint.SKIP_SCAN); + int nRanges = forceSkipScan ? scanRanges.getRanges().size() : scanRanges.getBoundSlotCount(); + for (int i = 0, minPos = 0; minPos < nRanges || minMaxIterator.hasNext(); i++) { + List ranges = minPos >= nRanges ? EVERYTHING : scanRanges.getRanges().get(minPos++); + KeyRange range = bound == Bound.LOWER ? ranges.get(0) : ranges.get(ranges.size() - 1); + byte[] b = range.getRange(bound); + Boolean isNull = KeyRange.IS_NULL_RANGE == range ? Boolean.TRUE + : KeyRange.IS_NOT_NULL_RANGE == range ? Boolean.FALSE + : null; + if (minMaxIterator.hasNext()) { + byte[] bMinMax = minMaxIterator.next(); + int cmp = Bytes.compareTo(bMinMax, b) * (bound == Bound.LOWER ? 1 : -1); + if (cmp > 0) { + minPos = nRanges; + b = bMinMax; + isNull = null; + } else if (cmp < 0) { + minMaxIterator = Collections.emptyIterator(); + } + } + if (isLocalIndex && i == 0) { + appendPKColumnValue(buf, b, isNull, i, true); + } else { + appendPKColumnValue(buf, b, isNull, i, false); + } + buf.append(','); } + } - private String appendKeyRanges() { - final StringBuilder buf = new StringBuilder(); - ScanRanges scanRanges = context.getScanRanges(); - if (scanRanges.isDegenerate() || scanRanges.isEverything()) { - return ""; - } - buf.append(" ["); - StringBuilder buf1 = new StringBuilder(); - appendScanRow(buf1, Bound.LOWER); - buf.append(buf1); - buf.setCharAt(buf.length()-1, ']'); - StringBuilder buf2 = new StringBuilder(); - appendScanRow(buf2, Bound.UPPER); - if (!StringUtil.equals(buf1, buf2)) { - buf.append( " - ["); - buf.append(buf2); - } - buf.setCharAt(buf.length()-1, ']'); - return buf.toString(); + private String appendKeyRanges() { + final StringBuilder buf = new StringBuilder(); + ScanRanges scanRanges = context.getScanRanges(); + if (scanRanges.isDegenerate() || scanRanges.isEverything()) { + return ""; + } + buf.append(" ["); + StringBuilder buf1 = new StringBuilder(); + appendScanRow(buf1, Bound.LOWER); + buf.append(buf1); + buf.setCharAt(buf.length() - 1, ']'); + StringBuilder buf2 = new StringBuilder(); + appendScanRow(buf2, Bound.UPPER); + if (!StringUtil.equals(buf1, buf2)) { + buf.append(" - ["); + buf.append(buf2); } + buf.setCharAt(buf.length() - 1, ']'); + return buf.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/FilterAggregatingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/FilterAggregatingResultIterator.java index bd47a78bbce..12220157c93 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/FilterAggregatingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/FilterAggregatingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,74 +21,74 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PBoolean; - /** - * - * Post aggregation filter for HAVING clause. Due to the way we cache aggregation values - * we cannot have a look ahead for this Iterator, because the expressions in the SELECT - * clause would return values for the peeked row instead of the current row. If we only - * use the Result argument in {@link org.apache.phoenix.expression.Expression} - * instead of our cached value in Aggregators, we could have a look ahead. - * - * + * Post aggregation filter for HAVING clause. Due to the way we cache aggregation values we cannot + * have a look ahead for this Iterator, because the expressions in the SELECT clause would return + * values for the peeked row instead of the current row. If we only use the Result argument in + * {@link org.apache.phoenix.expression.Expression} instead of our cached value in Aggregators, we + * could have a look ahead. * @since 0.1 */ -public class FilterAggregatingResultIterator implements AggregatingResultIterator { - private final AggregatingResultIterator delegate; - private final Expression expression; - private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - public FilterAggregatingResultIterator(AggregatingResultIterator delegate, Expression expression) { - this.delegate = delegate; - this.expression = expression; - if (expression.getDataType() != PBoolean.INSTANCE) { - throw new IllegalArgumentException("FilterResultIterator requires a boolean expression, but got " + expression); - } - } +public class FilterAggregatingResultIterator implements AggregatingResultIterator { + private final AggregatingResultIterator delegate; + private final Expression expression; + private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - @Override - public Tuple next() throws SQLException { - Tuple next; - do { - next = delegate.next(); - } while (next != null && expression.evaluate(next, ptr) && Boolean.FALSE.equals(expression.getDataType().toObject(ptr))); - return next; + public FilterAggregatingResultIterator(AggregatingResultIterator delegate, + Expression expression) { + this.delegate = delegate; + this.expression = expression; + if (expression.getDataType() != PBoolean.INSTANCE) { + throw new IllegalArgumentException( + "FilterResultIterator requires a boolean expression, but got " + expression); } + } - @Override - public void close() throws SQLException { - delegate.close(); - } + @Override + public Tuple next() throws SQLException { + Tuple next; + do { + next = delegate.next(); + } while ( + next != null && expression.evaluate(next, ptr) + && Boolean.FALSE.equals(expression.getDataType().toObject(ptr)) + ); + return next; + } - @Override - public Aggregator[] aggregate(Tuple result) { - return delegate.aggregate(result); - } + @Override + public void close() throws SQLException { + delegate.close(); + } - @Override - public void explain(List planSteps) { - delegate.explain(planSteps); - planSteps.add("CLIENT FILTER BY " + expression.toString()); - } + @Override + public Aggregator[] aggregate(Tuple result) { + return delegate.aggregate(result); + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - delegate.explain(planSteps, explainPlanAttributesBuilder); - explainPlanAttributesBuilder.setClientFilterBy(expression.toString()); - planSteps.add("CLIENT FILTER BY " + expression.toString()); - } + @Override + public void explain(List planSteps) { + delegate.explain(planSteps); + planSteps.add("CLIENT FILTER BY " + expression.toString()); + } - @Override - public String toString() { - return "FilterAggregatingResultIterator [delegate=" + delegate - + ", expression=" + expression + ", ptr=" + ptr + "]"; - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + delegate.explain(planSteps, explainPlanAttributesBuilder); + explainPlanAttributesBuilder.setClientFilterBy(expression.toString()); + planSteps.add("CLIENT FILTER BY " + expression.toString()); + } + + @Override + public String toString() { + return "FilterAggregatingResultIterator [delegate=" + delegate + ", expression=" + expression + + ", ptr=" + ptr + "]"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/FilterResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/FilterResultIterator.java index bf97782b4eb..a6cd614bec4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/FilterResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/FilterResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,72 +21,72 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PBoolean; - /** - * - * Result scanner that filters out rows based on the results of a boolean - * expression (i.e. filters out if {@link org.apache.phoenix.expression.Expression#evaluate(Tuple, ImmutableBytesWritable)} - * returns false or the ptr contains a FALSE value}). May not be used where - * the delegate provided is an {@link org.apache.phoenix.iterate.AggregatingResultIterator}. - * For these, the {@link org.apache.phoenix.iterate.FilterAggregatingResultIterator} should be used. - * - * + * Result scanner that filters out rows based on the results of a boolean expression (i.e. filters + * out if {@link org.apache.phoenix.expression.Expression#evaluate(Tuple, ImmutableBytesWritable)} + * returns false or the ptr contains a FALSE value}). May not be used where the delegate provided is + * an {@link org.apache.phoenix.iterate.AggregatingResultIterator}. For these, the + * {@link org.apache.phoenix.iterate.FilterAggregatingResultIterator} should be used. * @since 0.1 */ -public class FilterResultIterator extends LookAheadResultIterator { - private final ResultIterator delegate; - private final Expression expression; - private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - public FilterResultIterator(ResultIterator delegate, Expression expression) { - if (delegate instanceof AggregatingResultIterator) { - throw new IllegalArgumentException("FilterResultScanner may not be used with an aggregate delegate. Use phoenix.iterate.FilterAggregateResultScanner instead"); - } - this.delegate = delegate; - this.expression = expression; - if (expression.getDataType() != PBoolean.INSTANCE) { - throw new IllegalArgumentException("FilterResultIterator requires a boolean expression, but got " + expression); - } - } +public class FilterResultIterator extends LookAheadResultIterator { + private final ResultIterator delegate; + private final Expression expression; + private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - @Override - protected Tuple advance() throws SQLException { - Tuple next; - do { - next = delegate.next(); - expression.reset(); - } while (next != null && (!expression.evaluate(next, ptr) || ptr.getLength() == 0 || !Boolean.TRUE.equals(expression.getDataType().toObject(ptr)))); - return next; + public FilterResultIterator(ResultIterator delegate, Expression expression) { + if (delegate instanceof AggregatingResultIterator) { + throw new IllegalArgumentException( + "FilterResultScanner may not be used with an aggregate delegate. Use phoenix.iterate.FilterAggregateResultScanner instead"); } - - @Override - public void close() throws SQLException { - delegate.close(); + this.delegate = delegate; + this.expression = expression; + if (expression.getDataType() != PBoolean.INSTANCE) { + throw new IllegalArgumentException( + "FilterResultIterator requires a boolean expression, but got " + expression); } + } - @Override - public void explain(List planSteps) { - delegate.explain(planSteps); - planSteps.add("CLIENT FILTER BY " + expression.toString()); - } + @Override + protected Tuple advance() throws SQLException { + Tuple next; + do { + next = delegate.next(); + expression.reset(); + } while ( + next != null && (!expression.evaluate(next, ptr) || ptr.getLength() == 0 + || !Boolean.TRUE.equals(expression.getDataType().toObject(ptr))) + ); + return next; + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - delegate.explain(planSteps, explainPlanAttributesBuilder); - explainPlanAttributesBuilder.setClientFilterBy(expression.toString()); - planSteps.add("CLIENT FILTER BY " + expression.toString()); - } + @Override + public void close() throws SQLException { + delegate.close(); + } - @Override - public String toString() { - return "FilterResultIterator [delegate=" + delegate + ", expression=" - + expression + ", ptr=" + ptr + "]"; - } + @Override + public void explain(List planSteps) { + delegate.explain(planSteps); + planSteps.add("CLIENT FILTER BY " + expression.toString()); + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + delegate.explain(planSteps, explainPlanAttributesBuilder); + explainPlanAttributesBuilder.setClientFilterBy(expression.toString()); + planSteps.add("CLIENT FILTER BY " + expression.toString()); + } + + @Override + public String toString() { + return "FilterResultIterator [delegate=" + delegate + ", expression=" + expression + ", ptr=" + + ptr + "]"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java index c0553fac824..c0f9395ca33 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,46 +25,34 @@ import org.apache.phoenix.schema.tuple.SingleKeyValueTuple; import org.apache.phoenix.schema.tuple.Tuple; - - /** - * - * Result scanner that aggregates the row count value for rows with duplicate keys. - * The rows from the backing result iterator must be in key sorted order. For example, - * given the following input: - * a 1 - * a 2 - * b 1 - * b 3 - * c 1 - * the following will be output: - * a 3 - * b 4 - * c 1 - * - * + * Result scanner that aggregates the row count value for rows with duplicate keys. The rows from + * the backing result iterator must be in key sorted order. For example, given the following input: + * a 1 a 2 b 1 b 3 c 1 the following will be output: a 3 b 4 c 1 * @since 0.1 */ public class GroupedAggregatingResultIterator extends BaseGroupedAggregatingResultIterator { - public GroupedAggregatingResultIterator(PeekingResultIterator resultIterator, Aggregators aggregators) { - super(resultIterator, aggregators); - } - - @Override - protected ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) throws SQLException { - tuple.getKey(ptr); - return ptr; - } - - @Override - protected Tuple wrapKeyValueAsResult(Cell keyValue) throws SQLException { - return new SingleKeyValueTuple(keyValue); - } - - @Override - public String toString() { - return "GroupedAggregatingResultIterator [resultIterator=" - + resultIterator + ", aggregators=" + aggregators + "]"; - } + public GroupedAggregatingResultIterator(PeekingResultIterator resultIterator, + Aggregators aggregators) { + super(resultIterator, aggregators); + } + + @Override + protected ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) + throws SQLException { + tuple.getKey(ptr); + return ptr; + } + + @Override + protected Tuple wrapKeyValueAsResult(Cell keyValue) throws SQLException { + return new SingleKeyValueTuple(keyValue); + } + + @Override + public String toString() { + return "GroupedAggregatingResultIterator [resultIterator=" + resultIterator + ", aggregators=" + + aggregators + "]"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LimitingPeekingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LimitingPeekingResultIterator.java index a80693d9bd0..a6ebe9553a6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LimitingPeekingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LimitingPeekingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,26 +22,23 @@ import org.apache.phoenix.schema.tuple.Tuple; /** - * * Iterates through tuples up to a limit - * - * * @since 1.2 */ -public class LimitingPeekingResultIterator extends LimitingResultIterator implements PeekingResultIterator { - - public LimitingPeekingResultIterator(PeekingResultIterator delegate, int limit) { - super(delegate, limit); - } +public class LimitingPeekingResultIterator extends LimitingResultIterator + implements PeekingResultIterator { + + public LimitingPeekingResultIterator(PeekingResultIterator delegate, int limit) { + super(delegate, limit); + } + + @Override + protected PeekingResultIterator getDelegate() { + return (PeekingResultIterator) super.getDelegate(); + } - - @Override - protected PeekingResultIterator getDelegate() { - return (PeekingResultIterator) super.getDelegate(); - } - - @Override - public Tuple peek() throws SQLException { - return getDelegate().peek(); - } + @Override + public Tuple peek() throws SQLException { + return getDelegate().peek(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LimitingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LimitingResultIterator.java index 6e1c52de5d1..a242478efd5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LimitingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LimitingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,52 +20,47 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.Tuple; /** - * * Iterates through tuples up to a limit - * - * * @since 1.2 */ public class LimitingResultIterator extends DelegateResultIterator { - private int rowCount; - private final int limit; - - public LimitingResultIterator(ResultIterator delegate, int limit) { - super(delegate); - this.limit = limit; - } + private int rowCount; + private final int limit; - @Override - public Tuple next() throws SQLException { - if (rowCount++ >= limit) { - close(); // Free resources early - return null; - } - return super.next(); - } + public LimitingResultIterator(ResultIterator delegate, int limit) { + super(delegate); + this.limit = limit; + } - @Override - public void explain(List planSteps) { - super.explain(planSteps); - planSteps.add("CLIENT " + limit + " ROW LIMIT"); + @Override + public Tuple next() throws SQLException { + if (rowCount++ >= limit) { + close(); // Free resources early + return null; } + return super.next(); + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - super.explain(planSteps, explainPlanAttributesBuilder); - explainPlanAttributesBuilder.setClientRowLimit(limit); - planSteps.add("CLIENT " + limit + " ROW LIMIT"); - } + @Override + public void explain(List planSteps) { + super.explain(planSteps); + planSteps.add("CLIENT " + limit + " ROW LIMIT"); + } - @Override - public String toString() { - return "LimitingResultIterator [rowCount=" + rowCount + ", limit=" - + limit + "]"; - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + super.explain(planSteps, explainPlanAttributesBuilder); + explainPlanAttributesBuilder.setClientRowLimit(limit); + planSteps.add("CLIENT " + limit + " ROW LIMIT"); + } + + @Override + public String toString() { + return "LimitingResultIterator [rowCount=" + rowCount + ", limit=" + limit + "]"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java index f7c46d4430b..e377c4254ed 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,65 +20,63 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.ResultTuple; import org.apache.phoenix.schema.tuple.Tuple; - abstract public class LookAheadResultIterator implements PeekingResultIterator { - public static PeekingResultIterator wrap(final ResultIterator iterator) { - if (iterator instanceof PeekingResultIterator) { - return (PeekingResultIterator) iterator; - } - - return new LookAheadResultIterator() { + public static PeekingResultIterator wrap(final ResultIterator iterator) { + if (iterator instanceof PeekingResultIterator) { + return (PeekingResultIterator) iterator; + } - @Override - public void explain(List planSteps) { - iterator.explain(planSteps); - } + return new LookAheadResultIterator() { - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - iterator.explain(planSteps, explainPlanAttributesBuilder); - } + @Override + public void explain(List planSteps) { + iterator.explain(planSteps); + } - @Override - public void close() throws SQLException { - iterator.close(); - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + iterator.explain(planSteps, explainPlanAttributesBuilder); + } - @Override - protected Tuple advance() throws SQLException { - return iterator.next(); - } - }; - } - - private final static Tuple UNINITIALIZED = ResultTuple.EMPTY_TUPLE; - private Tuple next = UNINITIALIZED; - - abstract protected Tuple advance() throws SQLException; - - private void init() throws SQLException { - if (next == UNINITIALIZED) { - next = advance(); - } - } - - @Override - public Tuple next() throws SQLException { - init(); - Tuple next = this.next; - this.next = advance(); - return next; - } - - @Override - public Tuple peek() throws SQLException { - init(); - return next; + @Override + public void close() throws SQLException { + iterator.close(); + } + + @Override + protected Tuple advance() throws SQLException { + return iterator.next(); + } + }; + } + + private final static Tuple UNINITIALIZED = ResultTuple.EMPTY_TUPLE; + private Tuple next = UNINITIALIZED; + + abstract protected Tuple advance() throws SQLException; + + private void init() throws SQLException { + if (next == UNINITIALIZED) { + next = advance(); } + } + + @Override + public Tuple next() throws SQLException { + init(); + Tuple next = this.next; + this.next = advance(); + return next; + } + + @Override + public Tuple peek() throws SQLException { + init(); + return next; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MaterializedComparableResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MaterializedComparableResultIterator.java index 5808a0e7aea..2e87cca2d68 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MaterializedComparableResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MaterializedComparableResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,8 +21,7 @@ import java.util.Comparator; import java.util.List; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.Tuple; /** @@ -30,54 +29,54 @@ * comparator. No copy is made of the backing results iterator. */ public class MaterializedComparableResultIterator - implements PeekingResultIterator, Comparable { + implements PeekingResultIterator, Comparable { - private PeekingResultIterator delegate; - private Comparator comparator; - private Tuple current; + private PeekingResultIterator delegate; + private Comparator comparator; + private Tuple current; - public Tuple getCurrent() { - return current; - } + public Tuple getCurrent() { + return current; + } - public MaterializedComparableResultIterator(PeekingResultIterator delegate, - Comparator c) throws SQLException { - this.delegate = delegate; - this.comparator = c; - this.current = delegate.peek(); - } + public MaterializedComparableResultIterator(PeekingResultIterator delegate, + Comparator c) throws SQLException { + this.delegate = delegate; + this.comparator = c; + this.current = delegate.peek(); + } - @Override - public Tuple next() throws SQLException { - Tuple next = delegate.next(); - this.current = delegate.peek(); - return next; - } + @Override + public Tuple next() throws SQLException { + Tuple next = delegate.next(); + this.current = delegate.peek(); + return next; + } - @Override - public Tuple peek() throws SQLException { - return delegate.peek(); - } + @Override + public Tuple peek() throws SQLException { + return delegate.peek(); + } - @Override - public void close() throws SQLException { - delegate.close(); - } + @Override + public void close() throws SQLException { + delegate.close(); + } - @Override - public int compareTo(MaterializedComparableResultIterator o) { - return comparator.compare(this.getCurrent(), o.getCurrent()); + @Override + public int compareTo(MaterializedComparableResultIterator o) { + return comparator.compare(this.getCurrent(), o.getCurrent()); - } + } - @Override - public void explain(List planSteps) { - delegate.explain(planSteps); - } + @Override + public void explain(List planSteps) { + delegate.explain(planSteps); + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - delegate.explain(planSteps, explainPlanAttributesBuilder); - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + delegate.explain(planSteps, explainPlanAttributesBuilder); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MaterializedResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MaterializedResultIterator.java index a8c75af6631..6511111c104 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MaterializedResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MaterializedResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,98 +20,93 @@ import java.sql.SQLException; import java.util.*; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.Tuple; - /** - * - * Fully materialized result iterator backed by the result list provided. - * No copy is made of the backing results collection. - * - * + * Fully materialized result iterator backed by the result list provided. No copy is made of the + * backing results collection. * @since 0.1 */ public class MaterializedResultIterator implements PeekingResultIterator { - private final PeekingCollectionIterator iterator; - - public MaterializedResultIterator(Collection results) { - iterator = new PeekingCollectionIterator(results); + private final PeekingCollectionIterator iterator; + + public MaterializedResultIterator(Collection results) { + iterator = new PeekingCollectionIterator(results); + } + + @Override + public void close() { + } + + @Override + public Tuple next() throws SQLException { + return iterator.nextOrNull(); + } + + @Override + public Tuple peek() throws SQLException { + return iterator.peek(); + } + + private static class PeekingCollectionIterator implements Iterator { + private final Iterator iterator; + private Tuple current; + + private PeekingCollectionIterator(Collection results) { + iterator = results.iterator(); + advance(); } - - @Override - public void close() { + + private Tuple advance() { + if (iterator.hasNext()) { + current = iterator.next(); + } else { + current = null; + } + return current; } @Override - public Tuple next() throws SQLException { - return iterator.nextOrNull(); + public boolean hasNext() { + return current != null; } @Override - public Tuple peek() throws SQLException { - return iterator.peek(); + public Tuple next() { + Tuple next = nextOrNull(); + if (next == null) { + throw new NoSuchElementException(); + } + return next; } - private static class PeekingCollectionIterator implements Iterator { - private final Iterator iterator; - private Tuple current; - - private PeekingCollectionIterator(Collection results) { - iterator = results.iterator(); - advance(); - } - - private Tuple advance() { - if (iterator.hasNext()) { - current = iterator.next(); - } else { - current = null; - } - return current; - } - - @Override - public boolean hasNext() { - return current != null; - } - - @Override - public Tuple next() { - Tuple next = nextOrNull(); - if (next == null) { - throw new NoSuchElementException(); - } - return next; - } - - public Tuple nextOrNull() { - if (current == null) { - return null; - } - Tuple next = current; - advance(); - return next; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - public Tuple peek() { - return current; - } - + public Tuple nextOrNull() { + if (current == null) { + return null; + } + Tuple next = current; + advance(); + return next; } @Override - public void explain(List planSteps) { + public void remove() { + throw new UnsupportedOperationException(); } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + public Tuple peek() { + return current; } + + } + + @Override + public void explain(List planSteps) { + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortResultIterator.java index 8392ab6f482..4b41139e468 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,72 +30,75 @@ * @since 1.2 */ public abstract class MergeSortResultIterator implements PeekingResultIterator { - protected final ResultIterators resultIterators; - protected final ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); - private PriorityQueue minHeap; - private final IteratorComparator itrComparator = new IteratorComparator(); + protected final ResultIterators resultIterators; + protected final ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); + private PriorityQueue minHeap; + private final IteratorComparator itrComparator = new IteratorComparator(); - public MergeSortResultIterator(ResultIterators iterators) { - this.resultIterators = iterators; - } + public MergeSortResultIterator(ResultIterators iterators) { + this.resultIterators = iterators; + } - @Override - public void close() throws SQLException { - resultIterators.close(); - } + @Override + public void close() throws SQLException { + resultIterators.close(); + } - abstract protected int compare(Tuple t1, Tuple t2); + abstract protected int compare(Tuple t1, Tuple t2); - @Override - public Tuple peek() throws SQLException { - MaterializedComparableResultIterator iterator = minIterator(); - if (iterator == null) { return null; } - return iterator.peek(); + @Override + public Tuple peek() throws SQLException { + MaterializedComparableResultIterator iterator = minIterator(); + if (iterator == null) { + return null; } + return iterator.peek(); + } - @Override - public Tuple next() throws SQLException { - MaterializedComparableResultIterator iterator = minIterator(); - if (iterator == null) { - close(); - return null; - } - Tuple next = iterator.next(); - minHeap.poll(); - if (iterator.peek() != null) { - minHeap.add(iterator); - } else { - iterator.close(); - } - return next; + @Override + public Tuple next() throws SQLException { + MaterializedComparableResultIterator iterator = minIterator(); + if (iterator == null) { + close(); + return null; } - - private PriorityQueue getMinHeap() throws SQLException { - if (minHeap == null) { - List iterators = resultIterators.getIterators(); - minHeap = new PriorityQueue(Math.max(1, iterators.size())); - for (PeekingResultIterator itr : iterators) { - if (itr.peek() == null) { - itr.close(); - continue; - } - minHeap.add(new MaterializedComparableResultIterator(itr, itrComparator)); - } - } - return minHeap; + Tuple next = iterator.next(); + minHeap.poll(); + if (iterator.peek() != null) { + minHeap.add(iterator); + } else { + iterator.close(); } + return next; + } - private class IteratorComparator implements Comparator { - @Override - public int compare(Tuple c1, Tuple c2) { - return MergeSortResultIterator.this.compare(c1, c2); + private PriorityQueue getMinHeap() throws SQLException { + if (minHeap == null) { + List iterators = resultIterators.getIterators(); + minHeap = + new PriorityQueue(Math.max(1, iterators.size())); + for (PeekingResultIterator itr : iterators) { + if (itr.peek() == null) { + itr.close(); + continue; } + minHeap.add(new MaterializedComparableResultIterator(itr, itrComparator)); + } } + return minHeap; + } - private MaterializedComparableResultIterator minIterator() throws SQLException { - PriorityQueue minHeap = getMinHeap(); - MaterializedComparableResultIterator minIterator = minHeap.peek(); - return minIterator; + private class IteratorComparator implements Comparator { + @Override + public int compare(Tuple c1, Tuple c2) { + return MergeSortResultIterator.this.compare(c1, c2); } + } + + private MaterializedComparableResultIterator minIterator() throws SQLException { + PriorityQueue minHeap = getMinHeap(); + MaterializedComparableResultIterator minIterator = minHeap.peek(); + return minIterator; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortRowKeyResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortRowKeyResultIterator.java index 16551b52a21..22757c09666 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortRowKeyResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortRowKeyResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,57 +19,51 @@ import java.util.List; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.TupleUtil; - /** - * - * ResultIterator that does a merge sort on the list of iterators provided, - * returning the rows in row key ascending order. The iterators provided - * must be in row key ascending order. - * - * + * ResultIterator that does a merge sort on the list of iterators provided, returning the rows in + * row key ascending order. The iterators provided must be in row key ascending order. * @since 0.1 */ public class MergeSortRowKeyResultIterator extends MergeSortResultIterator { - private final int keyOffset; - private final int factor; - - public MergeSortRowKeyResultIterator(ResultIterators iterators) { - this(iterators, 0, false); - } - - public MergeSortRowKeyResultIterator(ResultIterators iterators, int keyOffset, boolean isReverse) { - super(iterators); - this.keyOffset = keyOffset; - this.factor = isReverse ? -1 : 1; - } - - @Override - protected int compare(Tuple t1, Tuple t2) { - return factor * TupleUtil.compare(t1, t2, tempPtr, keyOffset); - } + private final int keyOffset; + private final int factor; + + public MergeSortRowKeyResultIterator(ResultIterators iterators) { + this(iterators, 0, false); + } + + public MergeSortRowKeyResultIterator(ResultIterators iterators, int keyOffset, + boolean isReverse) { + super(iterators); + this.keyOffset = keyOffset; + this.factor = isReverse ? -1 : 1; + } + + @Override + protected int compare(Tuple t1, Tuple t2) { + return factor * TupleUtil.compare(t1, t2, tempPtr, keyOffset); + } - @Override - public void explain(List planSteps) { - resultIterators.explain(planSteps); - planSteps.add("CLIENT MERGE SORT"); - } + @Override + public void explain(List planSteps) { + resultIterators.explain(planSteps); + planSteps.add("CLIENT MERGE SORT"); + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - resultIterators.explain(planSteps, explainPlanAttributesBuilder); - explainPlanAttributesBuilder.setClientSortAlgo("CLIENT MERGE SORT"); - planSteps.add("CLIENT MERGE SORT"); - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + resultIterators.explain(planSteps, explainPlanAttributesBuilder); + explainPlanAttributesBuilder.setClientSortAlgo("CLIENT MERGE SORT"); + planSteps.add("CLIENT MERGE SORT"); + } - @Override - public String toString() { - return "MergeSortRowKeyResultIterator [keyOffset=" + keyOffset - + ", factor=" + factor + "]"; - } -} \ No newline at end of file + @Override + public String toString() { + return "MergeSortRowKeyResultIterator [keyOffset=" + keyOffset + ", factor=" + factor + "]"; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java index eace2445e83..7c18aac93c8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/MergeSortTopNResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,115 +21,115 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.OrderByExpression; import org.apache.phoenix.schema.tuple.Tuple; /** - * - * ResultIterator that does a merge sort on the list of iterators provided, - * returning the rows ordered by the OrderByExpression. The input - * iterators must be ordered by the OrderByExpression. - * + * ResultIterator that does a merge sort on the list of iterators provided, returning the rows + * ordered by the OrderByExpression. The input iterators must be ordered by the OrderByExpression. */ public class MergeSortTopNResultIterator extends MergeSortResultIterator { - private final int limit; - private int count = 0; - private int offsetCount = 0; - private final List orderByColumns; - private final ImmutableBytesWritable ptr1 = new ImmutableBytesWritable(); - private final ImmutableBytesWritable ptr2 = new ImmutableBytesWritable(); - private final int offset; - - public MergeSortTopNResultIterator(ResultIterators iterators, Integer limit, Integer offset, - List orderByColumns) { - super(iterators); - this.limit = limit == null ? -1 : limit; - this.offset = offset == null ? -1 : offset; - this.orderByColumns = orderByColumns; - } + private final int limit; + private int count = 0; + private int offsetCount = 0; + private final List orderByColumns; + private final ImmutableBytesWritable ptr1 = new ImmutableBytesWritable(); + private final ImmutableBytesWritable ptr2 = new ImmutableBytesWritable(); + private final int offset; - @Override - protected int compare(Tuple t1, Tuple t2) { - for (int i = 0; i < orderByColumns.size(); i++) { - OrderByExpression order = orderByColumns.get(i); - Expression orderExpr = order.getExpression(); - boolean isNull1 = !orderExpr.evaluate(t1, ptr1) || ptr1.getLength() == 0; - boolean isNull2 = !orderExpr.evaluate(t2, ptr2) || ptr2.getLength() == 0; - if (isNull1 && isNull2) { - continue; - } else if (isNull1) { - return order.isNullsLast() ? 1 : -1; - } else if (isNull2) { - return order.isNullsLast() ? -1 : 1; - } - int cmp = ptr1.compareTo(ptr2); - if (cmp == 0) { - continue; - } - return order.isAscending() ? cmp : -cmp; - } - return 0; - } + public MergeSortTopNResultIterator(ResultIterators iterators, Integer limit, Integer offset, + List orderByColumns) { + super(iterators); + this.limit = limit == null ? -1 : limit; + this.offset = offset == null ? -1 : offset; + this.orderByColumns = orderByColumns; + } - @Override - public Tuple peek() throws SQLException { - while (offsetCount < offset) { - if (super.next() == null) { return null; } - offsetCount++; - } - if (limit >= 0 && count >= limit) { - return null; - } - return super.peek(); + @Override + protected int compare(Tuple t1, Tuple t2) { + for (int i = 0; i < orderByColumns.size(); i++) { + OrderByExpression order = orderByColumns.get(i); + Expression orderExpr = order.getExpression(); + boolean isNull1 = !orderExpr.evaluate(t1, ptr1) || ptr1.getLength() == 0; + boolean isNull2 = !orderExpr.evaluate(t2, ptr2) || ptr2.getLength() == 0; + if (isNull1 && isNull2) { + continue; + } else if (isNull1) { + return order.isNullsLast() ? 1 : -1; + } else if (isNull2) { + return order.isNullsLast() ? -1 : 1; + } + int cmp = ptr1.compareTo(ptr2); + if (cmp == 0) { + continue; + } + return order.isAscending() ? cmp : -cmp; } + return 0; + } - @Override - public Tuple next() throws SQLException { - while (offsetCount < offset) { - if (super.next() == null) { return null; } - offsetCount++; - } - if (limit >= 0 && count++ >= limit) { return null; } - return super.next(); + @Override + public Tuple peek() throws SQLException { + while (offsetCount < offset) { + if (super.next() == null) { + return null; + } + offsetCount++; } + if (limit >= 0 && count >= limit) { + return null; + } + return super.peek(); + } - - @Override - public void explain(List planSteps) { - resultIterators.explain(planSteps); - planSteps.add("CLIENT MERGE SORT"); - if (offset > 0) { - planSteps.add("CLIENT OFFSET " + offset); - } - if (limit > 0) { - planSteps.add("CLIENT LIMIT " + limit); - } + @Override + public Tuple next() throws SQLException { + while (offsetCount < offset) { + if (super.next() == null) { + return null; + } + offsetCount++; } + if (limit >= 0 && count++ >= limit) { + return null; + } + return super.next(); + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - resultIterators.explain(planSteps, explainPlanAttributesBuilder); - explainPlanAttributesBuilder.setClientSortAlgo("CLIENT MERGE SORT"); - planSteps.add("CLIENT MERGE SORT"); - if (offset > 0) { - explainPlanAttributesBuilder.setClientOffset(offset); - planSteps.add("CLIENT OFFSET " + offset); - } - if (limit > 0) { - explainPlanAttributesBuilder.setClientRowLimit(limit); - planSteps.add("CLIENT LIMIT " + limit); - } + @Override + public void explain(List planSteps) { + resultIterators.explain(planSteps); + planSteps.add("CLIENT MERGE SORT"); + if (offset > 0) { + planSteps.add("CLIENT OFFSET " + offset); + } + if (limit > 0) { + planSteps.add("CLIENT LIMIT " + limit); } + } - @Override - public String toString() { - return "MergeSortTopNResultIterator [limit=" + limit + ", count=" - + count + ", orderByColumns=" + orderByColumns + ", ptr1=" - + ptr1 + ", ptr2=" + ptr2 + ",offset=" + offset + "]"; + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + resultIterators.explain(planSteps, explainPlanAttributesBuilder); + explainPlanAttributesBuilder.setClientSortAlgo("CLIENT MERGE SORT"); + planSteps.add("CLIENT MERGE SORT"); + if (offset > 0) { + explainPlanAttributesBuilder.setClientOffset(offset); + planSteps.add("CLIENT OFFSET " + offset); } + if (limit > 0) { + explainPlanAttributesBuilder.setClientRowLimit(limit); + planSteps.add("CLIENT LIMIT " + limit); + } + } + + @Override + public String toString() { + return "MergeSortTopNResultIterator [limit=" + limit + ", count=" + count + ", orderByColumns=" + + orderByColumns + ", ptr1=" + ptr1 + ", ptr2=" + ptr2 + ",offset=" + offset + "]"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OffsetResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OffsetResultIterator.java index f16bb84948c..76d5c2036b7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OffsetResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OffsetResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,99 +17,99 @@ */ package org.apache.phoenix.iterate; +import static org.apache.phoenix.util.ScanUtil.getDummyTuple; +import static org.apache.phoenix.util.ScanUtil.isDummy; + import java.sql.SQLException; import java.util.List; -import static org.apache.phoenix.util.ScanUtil.getDummyTuple; -import static org.apache.phoenix.util.ScanUtil.isDummy; import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.EnvironmentEdgeManager; /** * Iterates through tuples up to a limit - * * @since 1.2 */ public class OffsetResultIterator extends DelegateResultIterator { - private int rowCount; - private final int offset; - private Tuple lastScannedTuple; - private long pageSizeMs = Long.MAX_VALUE; - private boolean isIncompatibleClient = false; + private int rowCount; + private final int offset; + private Tuple lastScannedTuple; + private long pageSizeMs = Long.MAX_VALUE; + private boolean isIncompatibleClient = false; - public OffsetResultIterator(ResultIterator delegate, Integer offset) { - super(delegate); - this.offset = offset == null ? -1 : offset; - this.lastScannedTuple = null; - } + public OffsetResultIterator(ResultIterator delegate, Integer offset) { + super(delegate); + this.offset = offset == null ? -1 : offset; + this.lastScannedTuple = null; + } - public OffsetResultIterator(ResultIterator delegate, Integer offset, long pageSizeMs, - boolean isIncompatibleClient) { - this(delegate, offset); - this.pageSizeMs = pageSizeMs; - this.isIncompatibleClient = isIncompatibleClient; - } + public OffsetResultIterator(ResultIterator delegate, Integer offset, long pageSizeMs, + boolean isIncompatibleClient) { + this(delegate, offset); + this.pageSizeMs = pageSizeMs; + this.isIncompatibleClient = isIncompatibleClient; + } - @Override - public Tuple next() throws SQLException { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - while (rowCount < offset) { - Tuple tuple = super.next(); - if (tuple == null) { - return null; - } - if (tuple.size() == 0 || isDummy(tuple)) { - if (!isIncompatibleClient) { - return tuple; - } - // While rowCount < offset absorb the dummy and call next on the underlying scanner. - // This is applicable to old client. - continue; - } - rowCount++; - lastScannedTuple = tuple; - if (!isIncompatibleClient) { - if (EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { - return getDummyTuple(lastScannedTuple); - } - } + @Override + public Tuple next() throws SQLException { + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + while (rowCount < offset) { + Tuple tuple = super.next(); + if (tuple == null) { + return null; + } + if (tuple.size() == 0 || isDummy(tuple)) { + if (!isIncompatibleClient) { + return tuple; } - Tuple result = super.next(); - if (result != null) { - lastScannedTuple = result; + // While rowCount < offset absorb the dummy and call next on the underlying scanner. + // This is applicable to old client. + continue; + } + rowCount++; + lastScannedTuple = tuple; + if (!isIncompatibleClient) { + if (EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { + return getDummyTuple(lastScannedTuple); } - return result; + } } - - @Override - public void explain(List planSteps) { - super.explain(planSteps); - planSteps.add("CLIENT OFFSET " + offset); + Tuple result = super.next(); + if (result != null) { + lastScannedTuple = result; } + return result; + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - super.explain(planSteps, explainPlanAttributesBuilder); - explainPlanAttributesBuilder.setClientOffset(offset); - planSteps.add("CLIENT OFFSET " + offset); - } + @Override + public void explain(List planSteps) { + super.explain(planSteps); + planSteps.add("CLIENT OFFSET " + offset); + } - @Override - public String toString() { - return "OffsetResultIterator [rowCount=" + rowCount + ", offset=" + offset + "]"; - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + super.explain(planSteps, explainPlanAttributesBuilder); + explainPlanAttributesBuilder.setClientOffset(offset); + planSteps.add("CLIENT OFFSET " + offset); + } - public Integer getRemainingOffset() { - return Math.max(offset - rowCount, 0); - } + @Override + public String toString() { + return "OffsetResultIterator [rowCount=" + rowCount + ", offset=" + offset + "]"; + } - public Tuple getLastScannedTuple() { - return lastScannedTuple; - } + public Integer getRemainingOffset() { + return Math.max(offset - rowCount, 0); + } - public void setRowCountToOffset() { - this.rowCount = this.offset; - } + public Tuple getLastScannedTuple() { + return lastScannedTuple; + } + + public void setRowCountToOffset() { + this.rowCount = this.offset; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OrderedAggregatingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OrderedAggregatingResultIterator.java index ef4b60745a9..68e515db10a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OrderedAggregatingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OrderedAggregatingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,40 +24,37 @@ import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.schema.tuple.Tuple; - /** * Result scanner that sorts aggregated rows by columns specified in the ORDER BY clause. *

- * Note that currently the sort is entirely done in memory. - * - * + * Note that currently the sort is entirely done in memory. * @since 0.1 */ -public class OrderedAggregatingResultIterator extends OrderedResultIterator implements AggregatingResultIterator { +public class OrderedAggregatingResultIterator extends OrderedResultIterator + implements AggregatingResultIterator { - public OrderedAggregatingResultIterator(AggregatingResultIterator delegate, - List orderByExpressions, boolean spoolingEnabled, long thresholdBytes, - Integer limit, Integer offset) - throws SQLException { - super(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, limit, offset); - } + public OrderedAggregatingResultIterator(AggregatingResultIterator delegate, + List orderByExpressions, boolean spoolingEnabled, long thresholdBytes, + Integer limit, Integer offset) throws SQLException { + super(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, limit, offset); + } - @Override - protected AggregatingResultIterator getDelegate() { - return (AggregatingResultIterator)super.getDelegate(); - } - - @Override - public Tuple next() throws SQLException { - Tuple tuple = super.next(); - if (tuple != null) { - aggregate(tuple); - } - return tuple; - } - - @Override - public Aggregator[] aggregate(Tuple result) { - return getDelegate().aggregate(result); + @Override + protected AggregatingResultIterator getDelegate() { + return (AggregatingResultIterator) super.getDelegate(); + } + + @Override + public Tuple next() throws SQLException { + Tuple tuple = super.next(); + if (tuple != null) { + aggregate(tuple); } + return tuple; + } + + @Override + public Aggregator[] aggregate(Tuple result) { + return getDelegate().aggregate(result); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java index 3d6713b3b82..f75c40f8061 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,16 @@ */ package org.apache.phoenix.iterate; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkPositionIndex; +import static org.apache.phoenix.util.ScanUtil.isDummy; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; + import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -45,549 +55,545 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; - -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkPositionIndex; -import static org.apache.phoenix.util.ScanUtil.isDummy; - /** * Result scanner that sorts aggregated rows by columns specified in the ORDER BY clause. *

- * Note that currently the sort is entirely done in memory. - * - * + * Note that currently the sort is entirely done in memory. * @since 0.1 */ public class OrderedResultIterator implements PeekingResultIterator { - private static final Logger LOGGER = LoggerFactory.getLogger(OrderedResultIterator.class); - - /** A container that holds pointers to a {@link Result} and its sort keys. */ - protected static class ResultEntry { - protected final ImmutableBytesWritable[] sortKeys; - protected final Tuple result; - - ResultEntry(ImmutableBytesWritable[] sortKeys, Tuple result) { - this.sortKeys = sortKeys; - this.result = result; - } - - ImmutableBytesWritable getSortKey(int index) { - checkPositionIndex(index, sortKeys.length); - return sortKeys[index]; - } - - Tuple getResult() { - return result; - } - - static long sizeOf(ResultEntry e) { - return sizeof(e.sortKeys) + sizeof(toKeyValues(e)); - } - - private static long sizeof(List kvs) { - long size = Bytes.SIZEOF_INT; // totalLen + private static final Logger LOGGER = LoggerFactory.getLogger(OrderedResultIterator.class); - for (KeyValue kv : kvs) { - size += kv.getLength(); - size += Bytes.SIZEOF_INT; // kv.getLength - } + /** A container that holds pointers to a {@link Result} and its sort keys. */ + protected static class ResultEntry { + protected final ImmutableBytesWritable[] sortKeys; + protected final Tuple result; - return size; - } + ResultEntry(ImmutableBytesWritable[] sortKeys, Tuple result) { + this.sortKeys = sortKeys; + this.result = result; + } - private static long sizeof(ImmutableBytesWritable[] sortKeys) { - long size = Bytes.SIZEOF_INT; - if (sortKeys != null) { - for (ImmutableBytesWritable sortKey : sortKeys) { - if (sortKey != null) { - size += sortKey.getLength(); - } - size += Bytes.SIZEOF_INT; - } - } - return size; - } + ImmutableBytesWritable getSortKey(int index) { + checkPositionIndex(index, sortKeys.length); + return sortKeys[index]; + } - private static List toKeyValues(ResultEntry entry) { - Tuple result = entry.getResult(); - int size = result.size(); - List kvs = new ArrayList(size); - for (int i = 0; i < size; i++) { - kvs.add(PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i))); - } - return kvs; - } + Tuple getResult() { + return result; } - - /** A function that returns Nth key for a given {@link ResultEntry}. */ - private static class NthKey implements Function { - private final int index; - NthKey(int index) { - this.index = index; - } - @Override - public ImmutableBytesWritable apply(ResultEntry entry) { - return entry.getSortKey(index); - } + static long sizeOf(ResultEntry e) { + return sizeof(e.sortKeys) + sizeof(toKeyValues(e)); } - /** Returns the expression of a given {@link OrderByExpression}. */ - private static final Function TO_EXPRESSION = new Function() { - @Override - public Expression apply(OrderByExpression column) { - return column.getExpression(); - } - }; + private static long sizeof(List kvs) { + long size = Bytes.SIZEOF_INT; // totalLen - private final boolean spoolingEnabled; - private final long thresholdBytes; - private final Integer limit; - private final Integer offset; - private final ResultIterator delegate; - private final List orderByExpressions; - private final long estimatedByteSize; - - private PeekingResultIterator resultIterator; - private boolean resultIteratorReady = false; - private Tuple dummyTuple = null; - private long byteSize; - private long pageSizeMs; - private Scan scan; - private byte[] scanStartRowKey; - private byte[] actualScanStartRowKey; - private Boolean actualScanIncludeStartRowKey; - private RegionInfo regionInfo = null; - private boolean includeStartRowKey; - private boolean serverSideIterator = false; - private boolean firstScan = true; - private boolean skipValidRowsSent = false; - - protected ResultIterator getDelegate() { - return delegate; - } - - public OrderedResultIterator(ResultIterator delegate, List orderByExpressions, - boolean spoolingEnabled, long thresholdBytes, Integer limit, Integer offset) { - this(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, limit, offset, 0, Long.MAX_VALUE); - } + for (KeyValue kv : kvs) { + size += kv.getLength(); + size += Bytes.SIZEOF_INT; // kv.getLength + } - public OrderedResultIterator(ResultIterator delegate, List orderByExpressions, - boolean spoolingEnabled, long thresholdBytes) throws SQLException { - this(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, null, null); + return size; } - public OrderedResultIterator(ResultIterator delegate, - List orderByExpressions, boolean spoolingEnabled, - long thresholdBytes, Integer limit, Integer offset, int estimatedRowSize) { - this(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, limit, offset, estimatedRowSize, Long.MAX_VALUE); + private static long sizeof(ImmutableBytesWritable[] sortKeys) { + long size = Bytes.SIZEOF_INT; + if (sortKeys != null) { + for (ImmutableBytesWritable sortKey : sortKeys) { + if (sortKey != null) { + size += sortKey.getLength(); + } + size += Bytes.SIZEOF_INT; + } + } + return size; } - public OrderedResultIterator(ResultIterator delegate, - List orderByExpressions, - boolean spoolingEnabled, - long thresholdBytes, Integer limit, Integer offset, - int estimatedRowSize, long pageSizeMs, Scan scan, - RegionInfo regionInfo) { - this(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, limit, offset, - estimatedRowSize, pageSizeMs); - this.scan = scan; - // If scan start rowkey is empty, use region boundaries. Reverse region boundaries - // for reverse scan. - // Keep this same as ServerUtil#getScanStartRowKeyFromScanOrRegionBoundaries. - this.scanStartRowKey = - scan.getStartRow().length > 0 ? scan.getStartRow() - : (scan.isReversed() ? regionInfo.getEndKey() - : regionInfo.getStartKey()); - // Retrieve start rowkey of the previous scan. This would be different than - // current scan start rowkey if the region has recently moved or split or merged. - this.actualScanStartRowKey = - scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW); - this.actualScanIncludeStartRowKey = true; - this.includeStartRowKey = scan.includeStartRow(); - this.serverSideIterator = true; - this.regionInfo = regionInfo; + private static List toKeyValues(ResultEntry entry) { + Tuple result = entry.getResult(); + int size = result.size(); + List kvs = new ArrayList(size); + for (int i = 0; i < size; i++) { + kvs.add(PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i))); + } + return kvs; } + } - public OrderedResultIterator(ResultIterator delegate, - List orderByExpressions, boolean spoolingEnabled, - long thresholdBytes, Integer limit, Integer offset, - int estimatedRowSize, long pageSizeMs) { - checkArgument(!orderByExpressions.isEmpty()); - this.delegate = delegate; - this.orderByExpressions = orderByExpressions; - this.spoolingEnabled = spoolingEnabled; - this.thresholdBytes = thresholdBytes; - this.offset = offset == null ? 0 : offset; - if (limit != null) { - this.limit = limit + this.offset; - } else { - this.limit = null; - } - long estimatedEntrySize = - // ResultEntry - SizedUtil.OBJECT_SIZE + - // ImmutableBytesWritable[] - SizedUtil.ARRAY_SIZE + orderByExpressions.size() * SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE + - // Tuple - SizedUtil.OBJECT_SIZE + estimatedRowSize; - - // Make sure we don't overflow Long, though this is really unlikely to happen. - assert(limit == null || Long.MAX_VALUE / estimatedEntrySize >= limit + this.offset); - - // Both BufferedSortedQueue and SizeBoundQueue won't allocate more than thresholdBytes. - this.estimatedByteSize = limit == null ? 0 : Math.min((limit + this.offset) * estimatedEntrySize, thresholdBytes); - this.pageSizeMs = pageSizeMs; - } + /** A function that returns Nth key for a given {@link ResultEntry}. */ + private static class NthKey implements Function { + private final int index; - public Integer getLimit() { - return limit; + NthKey(int index) { + this.index = index; } - public long getEstimatedByteSize() { - return estimatedByteSize; + @Override + public ImmutableBytesWritable apply(ResultEntry entry) { + return entry.getSortKey(index); } + } + + /** Returns the expression of a given {@link OrderByExpression}. */ + private static final Function TO_EXPRESSION = + new Function() { + @Override + public Expression apply(OrderByExpression column) { + return column.getExpression(); + } + }; - public long getByteSize() { - return byteSize; + private final boolean spoolingEnabled; + private final long thresholdBytes; + private final Integer limit; + private final Integer offset; + private final ResultIterator delegate; + private final List orderByExpressions; + private final long estimatedByteSize; + + private PeekingResultIterator resultIterator; + private boolean resultIteratorReady = false; + private Tuple dummyTuple = null; + private long byteSize; + private long pageSizeMs; + private Scan scan; + private byte[] scanStartRowKey; + private byte[] actualScanStartRowKey; + private Boolean actualScanIncludeStartRowKey; + private RegionInfo regionInfo = null; + private boolean includeStartRowKey; + private boolean serverSideIterator = false; + private boolean firstScan = true; + private boolean skipValidRowsSent = false; + + protected ResultIterator getDelegate() { + return delegate; + } + + public OrderedResultIterator(ResultIterator delegate, List orderByExpressions, + boolean spoolingEnabled, long thresholdBytes, Integer limit, Integer offset) { + this(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, limit, offset, 0, + Long.MAX_VALUE); + } + + public OrderedResultIterator(ResultIterator delegate, List orderByExpressions, + boolean spoolingEnabled, long thresholdBytes) throws SQLException { + this(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, null, null); + } + + public OrderedResultIterator(ResultIterator delegate, List orderByExpressions, + boolean spoolingEnabled, long thresholdBytes, Integer limit, Integer offset, + int estimatedRowSize) { + this(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, limit, offset, + estimatedRowSize, Long.MAX_VALUE); + } + + public OrderedResultIterator(ResultIterator delegate, List orderByExpressions, + boolean spoolingEnabled, long thresholdBytes, Integer limit, Integer offset, + int estimatedRowSize, long pageSizeMs, Scan scan, RegionInfo regionInfo) { + this(delegate, orderByExpressions, spoolingEnabled, thresholdBytes, limit, offset, + estimatedRowSize, pageSizeMs); + this.scan = scan; + // If scan start rowkey is empty, use region boundaries. Reverse region boundaries + // for reverse scan. + // Keep this same as ServerUtil#getScanStartRowKeyFromScanOrRegionBoundaries. + this.scanStartRowKey = scan.getStartRow().length > 0 + ? scan.getStartRow() + : (scan.isReversed() ? regionInfo.getEndKey() : regionInfo.getStartKey()); + // Retrieve start rowkey of the previous scan. This would be different than + // current scan start rowkey if the region has recently moved or split or merged. + this.actualScanStartRowKey = + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW); + this.actualScanIncludeStartRowKey = true; + this.includeStartRowKey = scan.includeStartRow(); + this.serverSideIterator = true; + this.regionInfo = regionInfo; + } + + public OrderedResultIterator(ResultIterator delegate, List orderByExpressions, + boolean spoolingEnabled, long thresholdBytes, Integer limit, Integer offset, + int estimatedRowSize, long pageSizeMs) { + checkArgument(!orderByExpressions.isEmpty()); + this.delegate = delegate; + this.orderByExpressions = orderByExpressions; + this.spoolingEnabled = spoolingEnabled; + this.thresholdBytes = thresholdBytes; + this.offset = offset == null ? 0 : offset; + if (limit != null) { + this.limit = limit + this.offset; + } else { + this.limit = null; } - /** - * Builds a comparator from the list of columns in ORDER BY clause. - * @param orderByExpressions the columns in ORDER BY clause. - * @return the comparator built from the list of columns in ORDER BY clause. - */ - // ImmutableBytesWritable.Comparator doesn't implement generics - @SuppressWarnings("unchecked") - private static Comparator buildComparator(List orderByExpressions) { - Ordering ordering = null; - int pos = 0; - for (OrderByExpression col : orderByExpressions) { - Expression e = col.getExpression(); - Comparator comparator = - e.getSortOrder() == SortOrder.DESC && !e.getDataType().isFixedWidth() - ? buildDescVarLengthComparator() - : new ImmutableBytesWritable.Comparator(); - Ordering o = Ordering.from(comparator); - if(!col.isAscending()) o = o.reverse(); - o = col.isNullsLast() ? o.nullsLast() : o.nullsFirst(); - Ordering entryOrdering = o.onResultOf(new NthKey(pos++)); - ordering = ordering == null ? entryOrdering : ordering.compound(entryOrdering); - } - return ordering; + long estimatedEntrySize = + // ResultEntry + SizedUtil.OBJECT_SIZE + + // ImmutableBytesWritable[] + SizedUtil.ARRAY_SIZE + orderByExpressions.size() * SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE + + // Tuple + SizedUtil.OBJECT_SIZE + estimatedRowSize; + + // Make sure we don't overflow Long, though this is really unlikely to happen. + assert (limit == null || Long.MAX_VALUE / estimatedEntrySize >= limit + this.offset); + + // Both BufferedSortedQueue and SizeBoundQueue won't allocate more than thresholdBytes. + this.estimatedByteSize = + limit == null ? 0 : Math.min((limit + this.offset) * estimatedEntrySize, thresholdBytes); + this.pageSizeMs = pageSizeMs; + } + + public Integer getLimit() { + return limit; + } + + public long getEstimatedByteSize() { + return estimatedByteSize; + } + + public long getByteSize() { + return byteSize; + } + + /** + * Builds a comparator from the list of columns in ORDER BY clause. + * @param orderByExpressions the columns in ORDER BY clause. + * @return the comparator built from the list of columns in ORDER BY clause. + */ + // ImmutableBytesWritable.Comparator doesn't implement generics + @SuppressWarnings("unchecked") + private static Comparator + buildComparator(List orderByExpressions) { + Ordering ordering = null; + int pos = 0; + for (OrderByExpression col : orderByExpressions) { + Expression e = col.getExpression(); + Comparator comparator = + e.getSortOrder() == SortOrder.DESC && !e.getDataType().isFixedWidth() + ? buildDescVarLengthComparator() + : new ImmutableBytesWritable.Comparator(); + Ordering o = Ordering.from(comparator); + if (!col.isAscending()) o = o.reverse(); + o = col.isNullsLast() ? o.nullsLast() : o.nullsFirst(); + Ordering entryOrdering = o.onResultOf(new NthKey(pos++)); + ordering = ordering == null ? entryOrdering : ordering.compound(entryOrdering); } + return ordering; + } + + /* + * Same as regular comparator, but if all the bytes match and the length is different, returns the + * longer length as bigger. + */ + private static Comparator buildDescVarLengthComparator() { + return new Comparator() { + + @Override + public int compare(ImmutableBytesWritable o1, ImmutableBytesWritable o2) { + return DescVarLengthFastByteComparisons.compareTo(o1.get(), o1.getOffset(), o1.getLength(), + o2.get(), o2.getOffset(), o2.getLength()); + } - /* - * Same as regular comparator, but if all the bytes match and the length is - * different, returns the longer length as bigger. - */ - private static Comparator buildDescVarLengthComparator() { - return new Comparator() { - - @Override - public int compare(ImmutableBytesWritable o1, ImmutableBytesWritable o2) { - return DescVarLengthFastByteComparisons.compareTo( - o1.get(), o1.getOffset(), o1.getLength(), - o2.get(), o2.getOffset(), o2.getLength()); - } - - }; - } - - @Override - public Tuple next() throws SQLException { - try { - if (firstScan && serverSideIterator && actualScanStartRowKey != null - && actualScanIncludeStartRowKey != null) { - if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) { - if (Bytes.compareTo(actualScanStartRowKey, scanStartRowKey) != 0 - || actualScanIncludeStartRowKey != includeStartRowKey) { - LOGGER.info("Region has moved. Actual scan start rowkey {} is not same as" - + " current scan start rowkey {}", - Bytes.toStringBinary(actualScanStartRowKey), - Bytes.toStringBinary(scanStartRowKey)); - // If region has moved in the middle of the scan operation, after resetting - // the scanner, hbase client uses (latest received rowkey + \x00) as new - // start rowkey for resuming the scan operation on the new scanner. - if (Bytes.compareTo( - ByteUtil.concat(actualScanStartRowKey, ByteUtil.ZERO_BYTE), - scanStartRowKey) == 0) { - scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, - actualScanStartRowKey); - scan.setAttribute( - QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, - Bytes.toBytes(actualScanIncludeStartRowKey)); - } else { - // This happens when the server side scanner has already sent some - // rows back to the client and region has moved, so now we need to - // use skipValidRowsSent flag and also reset the scanner - // at paging region scanner level to re-read the previously sent - // values in order to re-compute the aggregation and then return - // only the next rowkey that was not yet sent back to the client. - skipValidRowsSent = true; - scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, - actualScanStartRowKey); - scan.setAttribute( - QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, - Bytes.toBytes(actualScanIncludeStartRowKey)); - } - } - } - } - if (firstScan) { - firstScan = false; - } - getResultIterator(); - if (!resultIteratorReady) { - return dummyTuple; + }; + } + + @Override + public Tuple next() throws SQLException { + try { + if ( + firstScan && serverSideIterator && actualScanStartRowKey != null + && actualScanIncludeStartRowKey != null + ) { + if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) { + if ( + Bytes.compareTo(actualScanStartRowKey, scanStartRowKey) != 0 + || actualScanIncludeStartRowKey != includeStartRowKey + ) { + LOGGER.info( + "Region has moved. Actual scan start rowkey {} is not same as" + + " current scan start rowkey {}", + Bytes.toStringBinary(actualScanStartRowKey), Bytes.toStringBinary(scanStartRowKey)); + // If region has moved in the middle of the scan operation, after resetting + // the scanner, hbase client uses (latest received rowkey + \x00) as new + // start rowkey for resuming the scan operation on the new scanner. + if ( + Bytes.compareTo(ByteUtil.concat(actualScanStartRowKey, ByteUtil.ZERO_BYTE), + scanStartRowKey) == 0 + ) { + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, + actualScanStartRowKey); + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, + Bytes.toBytes(actualScanIncludeStartRowKey)); + } else { + // This happens when the server side scanner has already sent some + // rows back to the client and region has moved, so now we need to + // use skipValidRowsSent flag and also reset the scanner + // at paging region scanner level to re-read the previously sent + // values in order to re-compute the aggregation and then return + // only the next rowkey that was not yet sent back to the client. + skipValidRowsSent = true; + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, + actualScanStartRowKey); + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, + Bytes.toBytes(actualScanIncludeStartRowKey)); } - Tuple result = resultIterator.next(); - if (skipValidRowsSent) { - while (true) { - if (result == null) { - skipValidRowsSent = false; - return null; - } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - result.getKey(ptr); - byte[] resultRowKey = new byte[ptr.getLength()]; - System.arraycopy(ptr.get(), ptr.getOffset(), resultRowKey, 0, - resultRowKey.length); - // In case of regular scans, if the region moves and scanner is reset, - // hbase client checks the last returned row by the server, gets the - // rowkey and appends "\x00" byte, before resuming the scan. With this, - // scan includeStartRowKey is set to true. - // However, same is not the case with reverse scans. For the reverse scan, - // hbase client checks the last returned row by the server, gets the - // rowkey and treats it as startRowKey for resuming the scan. With this, - // scan includeStartRowKey is set to false. - // Hence, we need to cover both cases here. - if (Bytes.compareTo(resultRowKey, scanStartRowKey) == 0) { - // This can be true for reverse scan case. - skipValidRowsSent = false; - if (includeStartRowKey) { - return result; - } - // If includeStartRowKey is false and the current rowkey is matching - // with scanStartRowKey, return the next row result. - return resultIterator.next(); - } else if ( - Bytes.compareTo( - ByteUtil.concat(resultRowKey, ByteUtil.ZERO_BYTE), - scanStartRowKey) == 0) { - // This can be true for regular scan case. - skipValidRowsSent = false; - if (includeStartRowKey) { - // If includeStartRowKey is true and the (current rowkey + "\0xx") is - // matching with scanStartRowKey, return the next row result. - return resultIterator.next(); - } - } - result = resultIterator.next(); - } + } + } + } + if (firstScan) { + firstScan = false; + } + getResultIterator(); + if (!resultIteratorReady) { + return dummyTuple; + } + Tuple result = resultIterator.next(); + if (skipValidRowsSent) { + while (true) { + if (result == null) { + skipValidRowsSent = false; + return null; + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + result.getKey(ptr); + byte[] resultRowKey = new byte[ptr.getLength()]; + System.arraycopy(ptr.get(), ptr.getOffset(), resultRowKey, 0, resultRowKey.length); + // In case of regular scans, if the region moves and scanner is reset, + // hbase client checks the last returned row by the server, gets the + // rowkey and appends "\x00" byte, before resuming the scan. With this, + // scan includeStartRowKey is set to true. + // However, same is not the case with reverse scans. For the reverse scan, + // hbase client checks the last returned row by the server, gets the + // rowkey and treats it as startRowKey for resuming the scan. With this, + // scan includeStartRowKey is set to false. + // Hence, we need to cover both cases here. + if (Bytes.compareTo(resultRowKey, scanStartRowKey) == 0) { + // This can be true for reverse scan case. + skipValidRowsSent = false; + if (includeStartRowKey) { + return result; } - return result; - } catch (Exception e) { - LOGGER.error("Ordered result iterator next encountered error " + (regionInfo != null - ? " for region: " + regionInfo.getRegionNameAsString() : "."), e); - if (e instanceof SQLException) { - throw e; - } else { - throw new PhoenixIOException(e); + // If includeStartRowKey is false and the current rowkey is matching + // with scanStartRowKey, return the next row result. + return resultIterator.next(); + } else if ( + Bytes.compareTo(ByteUtil.concat(resultRowKey, ByteUtil.ZERO_BYTE), scanStartRowKey) == 0 + ) { + // This can be true for regular scan case. + skipValidRowsSent = false; + if (includeStartRowKey) { + // If includeStartRowKey is true and the (current rowkey + "\0xx") is + // matching with scanStartRowKey, return the next row result. + return resultIterator.next(); } + } + result = resultIterator.next(); } + } + return result; + } catch (Exception e) { + LOGGER.error("Ordered result iterator next encountered error " + + (regionInfo != null ? " for region: " + regionInfo.getRegionNameAsString() : "."), e); + if (e instanceof SQLException) { + throw e; + } else { + throw new PhoenixIOException(e); + } } - - private PeekingResultIterator getResultIterator() throws SQLException { - if (resultIteratorReady) { - // The results have not been ordered yet. When the results are ordered then the result iterator - // will be ready to iterate over them - return resultIterator; + } + + private PeekingResultIterator getResultIterator() throws SQLException { + if (resultIteratorReady) { + // The results have not been ordered yet. When the results are ordered then the result + // iterator + // will be ready to iterate over them + return resultIterator; + } + + final int numSortKeys = orderByExpressions.size(); + List expressions = + Lists.newArrayList(Collections2.transform(orderByExpressions, TO_EXPRESSION)); + final Comparator comparator = buildComparator(orderByExpressions); + try { + if (resultIterator == null) { + resultIterator = new RecordPeekingResultIterator(PhoenixQueues + .newResultEntrySortedQueue(comparator, limit, spoolingEnabled, thresholdBytes)); + } + final SizeAwareQueue queueEntries = + ((RecordPeekingResultIterator) resultIterator).getQueueEntries(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + for (Tuple result = delegate.next(); result != null; result = delegate.next()) { + // result might be empty if it was filtered by a local index + if (result.size() == 0) { + continue; } - - final int numSortKeys = orderByExpressions.size(); - List expressions = Lists.newArrayList(Collections2.transform(orderByExpressions, TO_EXPRESSION)); - final Comparator comparator = buildComparator(orderByExpressions); - try{ - if (resultIterator == null) { - resultIterator = new RecordPeekingResultIterator(PhoenixQueues.newResultEntrySortedQueue(comparator, - limit, spoolingEnabled, thresholdBytes)); - } - final SizeAwareQueue queueEntries = ((RecordPeekingResultIterator)resultIterator).getQueueEntries(); - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - for (Tuple result = delegate.next(); result != null; result = delegate.next()) { - // result might be empty if it was filtered by a local index - if (result.size() == 0) { - continue; - } - - if (isDummy(result)) { - getDummyResult(); - return resultIterator; - } - int pos = 0; - ImmutableBytesWritable[] sortKeys = new ImmutableBytesWritable[numSortKeys]; - for (Expression expression : expressions) { - final ImmutableBytesWritable sortKey = new ImmutableBytesWritable(); - boolean evaluated = expression.evaluate(result, sortKey); - // set the sort key that failed to get evaluated with null - sortKeys[pos++] = evaluated && sortKey.getLength() > 0 ? sortKey : null; - } - queueEntries.add(new ResultEntry(sortKeys, result)); - if (EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { - getDummyResult(); - return resultIterator; - } - } - resultIteratorReady = true; - this.byteSize = queueEntries.getByteSize(); - } catch (IOException e) { - LOGGER.error("Error while getting result iterator from OrderedResultIterator.", e); - ClientUtil.createIOException(e.getMessage(), e); - throw new SQLException(e); - } finally { - delegate.close(); + + if (isDummy(result)) { + getDummyResult(); + return resultIterator; + } + int pos = 0; + ImmutableBytesWritable[] sortKeys = new ImmutableBytesWritable[numSortKeys]; + for (Expression expression : expressions) { + final ImmutableBytesWritable sortKey = new ImmutableBytesWritable(); + boolean evaluated = expression.evaluate(result, sortKey); + // set the sort key that failed to get evaluated with null + sortKeys[pos++] = evaluated && sortKey.getLength() > 0 ? sortKey : null; } - - return resultIterator; + queueEntries.add(new ResultEntry(sortKeys, result)); + if (EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { + getDummyResult(); + return resultIterator; + } + } + resultIteratorReady = true; + this.byteSize = queueEntries.getByteSize(); + } catch (IOException e) { + LOGGER.error("Error while getting result iterator from OrderedResultIterator.", e); + ClientUtil.createIOException(e.getMessage(), e); + throw new SQLException(e); + } finally { + delegate.close(); } - /** - * Retrieve dummy rowkey. - */ - private void getDummyResult() { - if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) { - if (Bytes.compareTo(actualScanStartRowKey, scanStartRowKey) != 0 - || actualScanIncludeStartRowKey != includeStartRowKey) { - byte[] lastByte = - new byte[]{scanStartRowKey[scanStartRowKey.length - 1]}; - if (scanStartRowKey.length > 1 && Bytes.compareTo(lastByte, - ByteUtil.ZERO_BYTE) == 0) { - byte[] prevKey = new byte[scanStartRowKey.length - 1]; - System.arraycopy(scanStartRowKey, 0, prevKey, 0, - prevKey.length); - dummyTuple = ScanUtil.getDummyTuple(prevKey); - } else { - dummyTuple = ScanUtil.getDummyTuple(scanStartRowKey); - } - } else { - dummyTuple = ScanUtil.getDummyTuple(scanStartRowKey); - } + return resultIterator; + } + + /** + * Retrieve dummy rowkey. + */ + private void getDummyResult() { + if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) { + if ( + Bytes.compareTo(actualScanStartRowKey, scanStartRowKey) != 0 + || actualScanIncludeStartRowKey != includeStartRowKey + ) { + byte[] lastByte = new byte[] { scanStartRowKey[scanStartRowKey.length - 1] }; + if (scanStartRowKey.length > 1 && Bytes.compareTo(lastByte, ByteUtil.ZERO_BYTE) == 0) { + byte[] prevKey = new byte[scanStartRowKey.length - 1]; + System.arraycopy(scanStartRowKey, 0, prevKey, 0, prevKey.length); + dummyTuple = ScanUtil.getDummyTuple(prevKey); } else { - dummyTuple = ScanUtil.getDummyTuple(scanStartRowKey); + dummyTuple = ScanUtil.getDummyTuple(scanStartRowKey); } + } else { + dummyTuple = ScanUtil.getDummyTuple(scanStartRowKey); + } + } else { + dummyTuple = ScanUtil.getDummyTuple(scanStartRowKey); + } + } + + @Override + public Tuple peek() throws SQLException { + return getResultIterator().peek(); + } + + @Override + public void close() throws SQLException { + // Guard against resultIterator being null + if (null != resultIterator) { + resultIterator.close(); + } + resultIterator = PeekingResultIterator.EMPTY_ITERATOR; + } + + @Override + public void explain(List planSteps) { + delegate.explain(planSteps); + planSteps.add("CLIENT" + (offset == null || offset == 0 ? "" : " OFFSET " + offset) + + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + + orderByExpressions.toString()); + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + delegate.explain(planSteps, explainPlanAttributesBuilder); + explainPlanAttributesBuilder.setClientOffset(offset); + explainPlanAttributesBuilder.setClientRowLimit(limit); + explainPlanAttributesBuilder.setClientSortedBy(orderByExpressions.toString()); + planSteps.add("CLIENT" + (offset == null || offset == 0 ? "" : " OFFSET " + offset) + + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + + orderByExpressions.toString()); + } + + @Override + public String toString() { + return "OrderedResultIterator [thresholdBytes=" + thresholdBytes + ", limit=" + limit + + ", offset=" + offset + ", delegate=" + delegate + ", orderByExpressions=" + + orderByExpressions + ", estimatedByteSize=" + estimatedByteSize + ", resultIterator=" + + resultIterator + ", byteSize=" + byteSize + "]"; + } + + private class RecordPeekingResultIterator implements PeekingResultIterator { + int count = 0; + + private SizeAwareQueue queueEntries; + + RecordPeekingResultIterator(SizeAwareQueue queueEntries) { + this.queueEntries = queueEntries; } - @Override - public Tuple peek() throws SQLException { - return getResultIterator().peek(); + public SizeAwareQueue getQueueEntries() { + return queueEntries; } @Override - public void close() throws SQLException { - // Guard against resultIterator being null - if (null != resultIterator) { - resultIterator.close(); + public Tuple next() throws SQLException { + ResultEntry entry = queueEntries.poll(); + while (entry != null && offset != null && count < offset) { + count++; + if (entry.getResult() == null) { + return null; } + entry = queueEntries.poll(); + } + if (entry == null || (limit != null && count++ > limit)) { + resultIterator.close(); resultIterator = PeekingResultIterator.EMPTY_ITERATOR; + return null; + } + return entry.getResult(); } + @Override + public Tuple peek() throws SQLException { + ResultEntry entry = queueEntries.peek(); + while (entry != null && offset != null && count < offset) { + entry = queueEntries.poll(); + count++; + if (entry == null) { + return null; + } + } + if (limit != null && count > limit) { + return null; + } + entry = queueEntries.peek(); + if (entry == null) { + return null; + } + return entry.getResult(); + } @Override public void explain(List planSteps) { - delegate.explain(planSteps); - planSteps.add("CLIENT" + (offset == null || offset == 0 ? "" : " OFFSET " + offset) - + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " - + orderByExpressions.toString()); } @Override public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - delegate.explain(planSteps, explainPlanAttributesBuilder); - explainPlanAttributesBuilder.setClientOffset(offset); - explainPlanAttributesBuilder.setClientRowLimit(limit); - explainPlanAttributesBuilder.setClientSortedBy( - orderByExpressions.toString()); - planSteps.add("CLIENT" + (offset == null || offset == 0 ? "" : " OFFSET " + offset) - + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) - + " SORTED BY " + orderByExpressions.toString()); + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { } @Override - public String toString() { - return "OrderedResultIterator [thresholdBytes=" + thresholdBytes - + ", limit=" + limit + ", offset=" + offset + ", delegate=" + delegate - + ", orderByExpressions=" + orderByExpressions - + ", estimatedByteSize=" + estimatedByteSize - + ", resultIterator=" + resultIterator + ", byteSize=" - + byteSize + "]"; - } - - private class RecordPeekingResultIterator implements PeekingResultIterator { - int count = 0; - - private SizeAwareQueue queueEntries; - - RecordPeekingResultIterator(SizeAwareQueue queueEntries){ - this.queueEntries = queueEntries; - } - - public SizeAwareQueue getQueueEntries() { - return queueEntries; - } - - @Override - public Tuple next() throws SQLException { - ResultEntry entry = queueEntries.poll(); - while (entry != null && offset != null && count < offset) { - count++; - if (entry.getResult() == null) { return null; } - entry = queueEntries.poll(); - } - if (entry == null || (limit != null && count++ > limit)) { - resultIterator.close(); - resultIterator = PeekingResultIterator.EMPTY_ITERATOR; - return null; - } - return entry.getResult(); - } - - @Override - public Tuple peek() throws SQLException { - ResultEntry entry = queueEntries.peek(); - while (entry != null && offset != null && count < offset) { - entry = queueEntries.poll(); - count++; - if (entry == null) { return null; } - } - if (limit != null && count > limit) { return null; } - entry = queueEntries.peek(); - if (entry == null) { return null; } - return entry.getResult(); - } - - @Override - public void explain(List planSteps) { - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - - @Override - public void close() throws SQLException { - try { - queueEntries.close(); - } catch (IOException e) { - throw new SQLException(e); - } - } + public void close() throws SQLException { + try { + queueEntries.close(); + } catch (IOException e) { + throw new SQLException(e); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIteratorFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIteratorFactory.java index dbe9910722d..0baa9ecae4e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIteratorFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIteratorFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,12 +24,14 @@ import org.apache.phoenix.compile.StatementContext; public interface ParallelIteratorFactory { - public static ParallelIteratorFactory NOOP_FACTORY = new ParallelIteratorFactory() { - @Override - public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String physicalTableName, QueryPlan plan) - throws SQLException { - return LookAheadResultIterator.wrap(scanner); - } - }; - PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String physicalTableName, QueryPlan plan) throws SQLException; -} \ No newline at end of file + public static ParallelIteratorFactory NOOP_FACTORY = new ParallelIteratorFactory() { + @Override + public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, + Scan scan, String physicalTableName, QueryPlan plan) throws SQLException { + return LookAheadResultIterator.wrap(scanner); + } + }; + + PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, + String physicalTableName, QueryPlan plan) throws SQLException; +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitter.java index 74029fdd41b..d407caeb902 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,14 +22,11 @@ import org.apache.phoenix.query.KeyRange; - /** * Interface for strategies determining how to split regions in ParallelIterators. - * - * */ public interface ParallelIteratorRegionSplitter { - public List getSplits() throws SQLException; + public List getSplits() throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java index 9f26faab8aa..8ed4416570b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,6 +36,7 @@ import org.apache.phoenix.monitoring.ReadMetricQueue; import org.apache.phoenix.monitoring.ScanMetricsHolder; import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.LogUtil; @@ -43,136 +44,140 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * - * Class that parallelizes the scan over a table using the ExecutorService provided. Each region of the table will be scanned in parallel with - * the results accessible through {@link #getIterators()} - * - * + * Class that parallelizes the scan over a table using the ExecutorService provided. Each region of + * the table will be scanned in parallel with the results accessible through {@link #getIterators()} * @since 0.1 */ public class ParallelIterators extends BaseResultIterators { - private static final Logger LOGGER = LoggerFactory.getLogger(ParallelIterators.class); - private static final String NAME = "PARALLEL"; - private final ParallelIteratorFactory iteratorFactory; - private final boolean initFirstScanOnly; - - public ParallelIterators(QueryPlan plan, Integer perScanLimit, ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper, Scan scan, boolean initFirstScanOnly, Map caches, QueryPlan dataPlan) - throws SQLException { - super(plan, perScanLimit, null, scanGrouper, scan,caches, dataPlan); - this.iteratorFactory = iteratorFactory; - this.initFirstScanOnly = initFirstScanOnly; - } - - public ParallelIterators(QueryPlan plan, Integer perScanLimit, ParallelIteratorFactory iteratorFactory, Scan scan, boolean initOneScanPerRegion, Map caches, QueryPlan dataPlan) - throws SQLException { - this(plan, perScanLimit, iteratorFactory, DefaultParallelScanGrouper.getInstance(), scan, initOneScanPerRegion, caches, dataPlan); - } + private static final Logger LOGGER = LoggerFactory.getLogger(ParallelIterators.class); + private static final String NAME = "PARALLEL"; + private final ParallelIteratorFactory iteratorFactory; + private final boolean initFirstScanOnly; + + public ParallelIterators(QueryPlan plan, Integer perScanLimit, + ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper, Scan scan, + boolean initFirstScanOnly, Map caches, QueryPlan dataPlan) + throws SQLException { + super(plan, perScanLimit, null, scanGrouper, scan, caches, dataPlan); + this.iteratorFactory = iteratorFactory; + this.initFirstScanOnly = initFirstScanOnly; + } - /** - * No need to use stats when executing serially - */ - @Override - protected boolean isSerial() { - return false; + public ParallelIterators(QueryPlan plan, Integer perScanLimit, + ParallelIteratorFactory iteratorFactory, Scan scan, boolean initOneScanPerRegion, + Map caches, QueryPlan dataPlan) throws SQLException { + this(plan, perScanLimit, iteratorFactory, DefaultParallelScanGrouper.getInstance(), scan, + initOneScanPerRegion, caches, dataPlan); + } + + /** + * No need to use stats when executing serially + */ + @Override + protected boolean isSerial() { + return false; + } + + @Override + protected void submitWork(final List> nestedScans, + List>>> nestedFutures, + final Queue allIterators, int estFlattenedSize, final boolean isReverse, + ParallelScanGrouper scanGrouper, long maxQueryEndTime) throws SQLException { + // Pre-populate nestedFutures lists so that we can shuffle the scans + // and add the future to the right nested list. By shuffling the scans + // we get better utilization of the cluster since our thread executor + // will spray the scans across machines as opposed to targeting a + // single one since the scans are in row key order. + ExecutorService executor = context.getConnection().getQueryServices().getExecutor(); + List scanLocations = Lists.newArrayListWithExpectedSize(estFlattenedSize); + for (int i = 0; i < nestedScans.size(); i++) { + List scans = nestedScans.get(i); + int numScans = scans.size(); + List>> futures = + Lists.newArrayListWithExpectedSize(numScans); + nestedFutures.add(futures); + for (int j = 0; j < numScans; j++) { + Scan scan = nestedScans.get(i).get(j); + scanLocations.add(new ScanLocator(scan, i, j, j == 0, (j == numScans - 1))); + futures.add(null); // placeholder + } } - - @Override - protected void submitWork(final List> nestedScans, List>>> nestedFutures, - final Queue allIterators, int estFlattenedSize, final boolean isReverse, ParallelScanGrouper scanGrouper, - long maxQueryEndTime) throws SQLException { - // Pre-populate nestedFutures lists so that we can shuffle the scans - // and add the future to the right nested list. By shuffling the scans - // we get better utilization of the cluster since our thread executor - // will spray the scans across machines as opposed to targeting a - // single one since the scans are in row key order. - ExecutorService executor = context.getConnection().getQueryServices().getExecutor(); - List scanLocations = Lists.newArrayListWithExpectedSize(estFlattenedSize); - for (int i = 0; i < nestedScans.size(); i++) { - List scans = nestedScans.get(i); - int numScans = scans.size(); - List>> futures = Lists.newArrayListWithExpectedSize(numScans); - nestedFutures.add(futures); - for (int j = 0; j < numScans; j++) { - Scan scan = nestedScans.get(i).get(j); - scanLocations.add(new ScanLocator(scan, i, j, j == 0, (j == numScans - 1))); - futures.add(null); // placeholder + // Shuffle so that we start execution across many machines + // before we fill up the thread pool + Collections.shuffle(scanLocations); + ReadMetricQueue readMetrics = context.getReadMetricsQueue(); + final String physicalTableName = tableRef.getTable().getPhysicalName().getString(); + int numScans = scanLocations.size(); + context.getOverallQueryMetrics().updateNumParallelScans(numScans); + GLOBAL_NUM_PARALLEL_SCANS.update(numScans); + final long renewLeaseThreshold = + context.getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds(); + for (final ScanLocator scanLocation : scanLocations) { + final Scan scan = scanLocation.getScan(); + final ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, + physicalTableName, scan, context.getConnection().getLogLevel()); + final TaskExecutionMetricsHolder taskMetrics = + new TaskExecutionMetricsHolder(readMetrics, physicalTableName); + final TableResultIterator tableResultItr = + context.getConnection().getTableResultIteratorFactory().newIterator(mutationState, tableRef, + scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, caches, maxQueryEndTime); + context.getConnection().addIteratorForLeaseRenewal(tableResultItr); + Future future = + executor.submit(Tracing.wrap(new JobCallable() { + + @Override + public PeekingResultIterator call() throws Exception { + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + PeekingResultIterator iterator = iteratorFactory.newIterator(context, tableResultItr, + scan, physicalTableName, ParallelIterators.this.plan); + if (initFirstScanOnly) { + if ( + (!isReverse && scanLocation.isFirstScan()) + || (isReverse && scanLocation.isLastScan()) + ) { + // Fill the scanner's cache. This helps reduce latency since we are parallelizing + // the I/O needed. + iterator.peek(); + } + } else { + iterator.peek(); } - } - // Shuffle so that we start execution across many machines - // before we fill up the thread pool - Collections.shuffle(scanLocations); - ReadMetricQueue readMetrics = context.getReadMetricsQueue(); - final String physicalTableName = tableRef.getTable().getPhysicalName().getString(); - int numScans = scanLocations.size(); - context.getOverallQueryMetrics().updateNumParallelScans(numScans); - GLOBAL_NUM_PARALLEL_SCANS.update(numScans); - final long renewLeaseThreshold = context.getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds(); - for (final ScanLocator scanLocation : scanLocations) { - final Scan scan = scanLocation.getScan(); - final ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, physicalTableName, - scan, context.getConnection().getLogLevel()); - final TaskExecutionMetricsHolder taskMetrics = new TaskExecutionMetricsHolder(readMetrics, physicalTableName); - final TableResultIterator tableResultItr = - context.getConnection().getTableResultIteratorFactory().newIterator( - mutationState, tableRef, scan, scanMetricsHolder, renewLeaseThreshold, plan, - scanGrouper, caches, maxQueryEndTime); - context.getConnection().addIteratorForLeaseRenewal(tableResultItr); - Future future = executor.submit(Tracing.wrap(new JobCallable() { - - @Override - public PeekingResultIterator call() throws Exception { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - PeekingResultIterator iterator = iteratorFactory.newIterator( - context, - tableResultItr, - scan, - physicalTableName, - ParallelIterators.this.plan); - if (initFirstScanOnly) { - if ((!isReverse && scanLocation.isFirstScan()) || (isReverse && scanLocation.isLastScan())) { - // Fill the scanner's cache. This helps reduce latency since we are parallelizing the I/O needed. - iterator.peek(); - } - } else { - iterator.peek(); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + - (EnvironmentEdgeManager.currentTimeMillis() - startTime) + - "ms, Table: " + physicalTableName + ", Scan: " + scan, - ScanUtil.getCustomAnnotations(scan))); - } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms, Table: " + + physicalTableName + ", Scan: " + scan, ScanUtil.getCustomAnnotations(scan))); + } - allIterators.add(iterator); - return iterator; - } + allIterators.add(iterator); + return iterator; + } - /** - * Defines the grouping for round robin behavior. All threads spawned to process - * this scan will be grouped together and time sliced with other simultaneously - * executing parallel scans. - */ - @Override - public Object getJobId() { - return ParallelIterators.this; - } + /** + * Defines the grouping for round robin behavior. All threads spawned to process this scan + * will be grouped together and time sliced with other simultaneously executing parallel + * scans. + */ + @Override + public Object getJobId() { + return ParallelIterators.this; + } - @Override - public TaskExecutionMetricsHolder getTaskExecutionMetric() { - return taskMetrics; - } - }, "Parallel scanner for table: " + tableRef.getTable().getPhysicalName().getString())); - // Add our future in the right place so that we can concatenate the - // results of the inner futures versus merge sorting across all of them. - nestedFutures.get(scanLocation.getOuterListIndex()).set(scanLocation.getInnerListIndex(), new Pair>(scan,future)); - } + @Override + public TaskExecutionMetricsHolder getTaskExecutionMetric() { + return taskMetrics; + } + }, "Parallel scanner for table: " + tableRef.getTable().getPhysicalName().getString())); + // Add our future in the right place so that we can concatenate the + // results of the inner futures versus merge sorting across all of them. + nestedFutures.get(scanLocation.getOuterListIndex()).set(scanLocation.getInnerListIndex(), + new Pair>(scan, future)); } + } - @Override - protected String getName() { - return NAME; - } -} \ No newline at end of file + @Override + protected String getName() { + return NAME; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelScanGrouper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelScanGrouper.java index 2b9d81e095b..5e999215fe7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelScanGrouper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelScanGrouper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,33 +30,34 @@ */ public interface ParallelScanGrouper { - /** - * Determines whether to create a new group of parallel scans. - * @param plan current query plan - * @param lastScan the last scan processed - * @param startKey of the new scan - * @param crossesRegionBoundary whether startKey in a different region than lastScan - * @return true if we should create a new group of scans, starting with the scan whose start - * key we passed as startKey - */ - boolean shouldStartNewScan(QueryPlan plan, Scan lastScan, byte[] startKey, boolean crossesRegionBoundary); + /** + * Determines whether to create a new group of parallel scans. + * @param plan current query plan + * @param lastScan the last scan processed + * @param startKey of the new scan + * @param crossesRegionBoundary whether startKey in a different region than lastScan + * @return true if we should create a new group of scans, starting with the scan whose start key + * we passed as startKey + */ + boolean shouldStartNewScan(QueryPlan plan, Scan lastScan, byte[] startKey, + boolean crossesRegionBoundary); - List getRegionBoundaries(StatementContext context, byte[] tableName) throws SQLException; + List getRegionBoundaries(StatementContext context, byte[] tableName) + throws SQLException; - /** - * Retrieve table region locations that cover the startRegionBoundaryKey and - * stopRegionBoundaryKey. The start key of the first region of the returned list must be less - * than or equal to startRegionBoundaryKey. The end key of the last region of the returned list - * must be greater than or equal to stopRegionBoundaryKey. - * - * @param context Statement Context. - * @param tableName Table name. - * @param startRegionBoundaryKey Start region boundary key. - * @param stopRegionBoundaryKey Stop region boundary key. - * @return The list of region locations that cover the startRegionBoundaryKey and - * stopRegionBoundaryKey key boundary. - * @throws SQLException If fails to retrieve region locations. - */ - List getRegionBoundaries(StatementContext context, byte[] tableName, - byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) throws SQLException; + /** + * Retrieve table region locations that cover the startRegionBoundaryKey and + * stopRegionBoundaryKey. The start key of the first region of the returned list must be less than + * or equal to startRegionBoundaryKey. The end key of the last region of the returned list must be + * greater than or equal to stopRegionBoundaryKey. + * @param context Statement Context. + * @param tableName Table name. + * @param startRegionBoundaryKey Start region boundary key. + * @param stopRegionBoundaryKey Stop region boundary key. + * @return The list of region locations that cover the startRegionBoundaryKey and + * stopRegionBoundaryKey key boundary. + * @throws SQLException If fails to retrieve region locations. + */ + List getRegionBoundaries(StatementContext context, byte[] tableName, + byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelScansCollector.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelScansCollector.java index c9d7147efc5..c2b210aab7c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelScansCollector.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelScansCollector.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,37 +29,39 @@ */ public class ParallelScansCollector { - private final ParallelScanGrouper grouper; - private boolean lastScanCrossedRegionBoundary = false; - private final List> parallelScans = new ArrayList<>(); - private List lastBatch = new ArrayList<>(); - private Scan lastScan = null; - private final List regionLocations = new ArrayList<>(); + private final ParallelScanGrouper grouper; + private boolean lastScanCrossedRegionBoundary = false; + private final List> parallelScans = new ArrayList<>(); + private List lastBatch = new ArrayList<>(); + private Scan lastScan = null; + private final List regionLocations = new ArrayList<>(); - public ParallelScansCollector(ParallelScanGrouper grouper) { - this.grouper = grouper; - parallelScans.add(lastBatch); - } - - public void addNewScan(QueryPlan plan, Scan newScan, boolean crossesRegionBoundary, - HRegionLocation regionLocation) { - if (grouper.shouldStartNewScan(plan, lastScan, newScan.getStartRow(), - lastScanCrossedRegionBoundary)) { - lastBatch = new ArrayList<>(); - parallelScans.add(lastBatch); - } - lastBatch.add(newScan); - regionLocations.add(regionLocation); + public ParallelScansCollector(ParallelScanGrouper grouper) { + this.grouper = grouper; + parallelScans.add(lastBatch); + } - lastScanCrossedRegionBoundary = crossesRegionBoundary; - lastScan = newScan; + public void addNewScan(QueryPlan plan, Scan newScan, boolean crossesRegionBoundary, + HRegionLocation regionLocation) { + if ( + grouper.shouldStartNewScan(plan, lastScan, newScan.getStartRow(), + lastScanCrossedRegionBoundary) + ) { + lastBatch = new ArrayList<>(); + parallelScans.add(lastBatch); } + lastBatch.add(newScan); + regionLocations.add(regionLocation); - public List> getParallelScans() { - return parallelScans; - } + lastScanCrossedRegionBoundary = crossesRegionBoundary; + lastScan = newScan; + } - public List getRegionLocations() { - return regionLocations; - } + public List> getParallelScans() { + return parallelScans; + } + + public List getRegionLocations() { + return regionLocations; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/PeekingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/PeekingResultIterator.java index f4a193e7f9e..6b22f20aa76 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/PeekingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/PeekingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,49 +20,43 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.Tuple; - /** - * - * Interface for iterating through results returned from a scan, adding the - * ability to peek at the next result. - * - * + * Interface for iterating through results returned from a scan, adding the ability to peek at the + * next result. * @since 0.1 */ public interface PeekingResultIterator extends ResultIterator { - public static final PeekingResultIterator EMPTY_ITERATOR = new PeekingResultIterator() { - - @Override - public Tuple next() throws SQLException { - return null; - } - - @Override - public Tuple peek() { - return null; - } - - @Override - public void close() throws SQLException { - } - - @Override - public void explain(List planSteps) { - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - }; - - /** - * Returns the next result without advancing the iterator - * @throws SQLException - */ - public Tuple peek() throws SQLException; + public static final PeekingResultIterator EMPTY_ITERATOR = new PeekingResultIterator() { + + @Override + public Tuple next() throws SQLException { + return null; + } + + @Override + public Tuple peek() { + return null; + } + + @Override + public void close() throws SQLException { + } + + @Override + public void explain(List planSteps) { + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } + }; + + /** + * Returns the next result without advancing the iterator + */ + public Tuple peek() throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/PhoenixQueues.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/PhoenixQueues.java index 568f7dfeff4..387513c92d4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/PhoenixQueues.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/PhoenixQueues.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,72 +25,68 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.iterate.OrderedResultIterator.ResultEntry; import org.apache.phoenix.schema.tuple.Tuple; -import org.apache.phoenix.util.PhoenixKeyValueUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.MinMaxPriorityQueue; +import org.apache.phoenix.util.PhoenixKeyValueUtil; public class PhoenixQueues { - private PhoenixQueues() { - } + private PhoenixQueues() { + } - public static SizeAwareQueue newBufferedResultEntrySortedQueue( - Comparator comparator, Integer limit, long thresholdBytes) - throws IOException { - return new BufferedSortedQueue(comparator, limit, thresholdBytes); - } + public static SizeAwareQueue newBufferedResultEntrySortedQueue( + Comparator comparator, Integer limit, long thresholdBytes) throws IOException { + return new BufferedSortedQueue(comparator, limit, thresholdBytes); + } - public static SizeAwareQueue newBufferedTupleQueue(long thresholdBytes) { - return new BufferedTupleQueue(thresholdBytes); - } + public static SizeAwareQueue newBufferedTupleQueue(long thresholdBytes) { + return new BufferedTupleQueue(thresholdBytes); + } - public static SizeAwareQueue newSizeBoundResultEntrySortedQueue( - Comparator comparator, Integer limit, long maxSizeBytes) { - limit = limit == null ? -1 : limit; - MinMaxPriorityQueue queue = - limit < 0 ? MinMaxPriorityQueue. orderedBy(comparator).create() - : MinMaxPriorityQueue. orderedBy(comparator).maximumSize(limit) - .create(); - return new SizeBoundQueue(maxSizeBytes, queue) { - @Override - public long sizeOf(org.apache.phoenix.iterate.OrderedResultIterator.ResultEntry e) { - return ResultEntry.sizeOf(e); - } + public static SizeAwareQueue newSizeBoundResultEntrySortedQueue( + Comparator comparator, Integer limit, long maxSizeBytes) { + limit = limit == null ? -1 : limit; + MinMaxPriorityQueue queue = limit < 0 + ? MinMaxPriorityQueue. orderedBy(comparator).create() + : MinMaxPriorityQueue. orderedBy(comparator).maximumSize(limit).create(); + return new SizeBoundQueue(maxSizeBytes, queue) { + @Override + public long sizeOf(org.apache.phoenix.iterate.OrderedResultIterator.ResultEntry e) { + return ResultEntry.sizeOf(e); + } - }; - } + }; + } - public static SizeAwareQueue newSizeBoundTupleQueue(long maxSizeBytes) { - LinkedList results = Lists.newLinkedList(); - return new SizeBoundQueue(maxSizeBytes, results) { + public static SizeAwareQueue newSizeBoundTupleQueue(long maxSizeBytes) { + LinkedList results = Lists.newLinkedList(); + return new SizeBoundQueue(maxSizeBytes, results) { - @Override - public long sizeOf(Tuple e) { - KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(e.getValue(0)); - return Bytes.SIZEOF_INT * 2 + kv.getLength(); - } + @Override + public long sizeOf(Tuple e) { + KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(e.getValue(0)); + return Bytes.SIZEOF_INT * 2 + kv.getLength(); + } - }; - } + }; + } - public static SizeAwareQueue newResultEntrySortedQueue( - Comparator comparator, Integer limit, boolean spoolingEnabled, - long thresholdBytes) throws IOException { - if (spoolingEnabled) { - return newBufferedResultEntrySortedQueue(comparator, limit, thresholdBytes); - } else { - return newSizeBoundResultEntrySortedQueue(comparator, limit, thresholdBytes); - } + public static SizeAwareQueue newResultEntrySortedQueue( + Comparator comparator, Integer limit, boolean spoolingEnabled, long thresholdBytes) + throws IOException { + if (spoolingEnabled) { + return newBufferedResultEntrySortedQueue(comparator, limit, thresholdBytes); + } else { + return newSizeBoundResultEntrySortedQueue(comparator, limit, thresholdBytes); } + } - public static SizeAwareQueue newTupleQueue(boolean spoolingEnabled, - long thresholdBytes) { - if (spoolingEnabled) { - return newBufferedTupleQueue(thresholdBytes); - } else { - return newSizeBoundTupleQueue(thresholdBytes); - } + public static SizeAwareQueue newTupleQueue(boolean spoolingEnabled, long thresholdBytes) { + if (spoolingEnabled) { + return newBufferedTupleQueue(thresholdBytes); + } else { + return newSizeBoundTupleQueue(thresholdBytes); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ResultIterator.java index f7214563ee6..00d7ed7bbd6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,57 +20,51 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.SQLCloseable; - public interface ResultIterator extends SQLCloseable { - public static final ResultIterator EMPTY_ITERATOR = new ResultIterator() { - @Override - public void close() throws SQLException { - } + public static final ResultIterator EMPTY_ITERATOR = new ResultIterator() { + @Override + public void close() throws SQLException { + } + + @Override + public Tuple next() throws SQLException { + return null; + } - @Override - public Tuple next() throws SQLException { - return null; - } + @Override + public void explain(List planSteps) { + } - @Override - public void explain(List planSteps) { - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } + }; - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - }; + /** + * Grab the next row's worth of values. The iterator will return a Tuple. + * @return Tuple object if there is another row, null if the scanner is exhausted. + * @throws SQLException e + */ + public Tuple next() throws SQLException; - /** - * Grab the next row's worth of values. The iterator will return a Tuple. - * @return Tuple object if there is another row, null if the scanner is - * exhausted. - * @throws SQLException e - */ - public Tuple next() throws SQLException; - - public void explain(List planSteps); + public void explain(List planSteps); - /** - * Generate ExplainPlan steps and add steps as list of Strings in - * planSteps argument as readable statement as well as add same generated - * steps in explainPlanAttributesBuilder so that we prepare ExplainPlan - * result as an attribute object useful to retrieve individual plan step - * attributes. - * - * @param planSteps Add generated plan in list of planSteps. This argument - * is used to provide planSteps as whole statement consisting of - * list of Strings. - * @param explainPlanAttributesBuilder Add generated plan in attributes - * object. Having an API to provide planSteps as an object is easier - * while comparing individual attributes of ExplainPlan. - */ - void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder); + /** + * Generate ExplainPlan steps and add steps as list of Strings in planSteps argument as readable + * statement as well as add same generated steps in explainPlanAttributesBuilder so that we + * prepare ExplainPlan result as an attribute object useful to retrieve individual plan step + * attributes. + * @param planSteps Add generated plan in list of planSteps. This argument is + * used to provide planSteps as whole statement consisting of + * list of Strings. + * @param explainPlanAttributesBuilder Add generated plan in attributes object. Having an API to + * provide planSteps as an object is easier while comparing + * individual attributes of ExplainPlan. + */ + void explain(List planSteps, ExplainPlanAttributesBuilder explainPlanAttributesBuilder); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ResultIterators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ResultIterators.java index 8bc47cc1141..92fe17a1b4d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ResultIterators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ResultIterators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,33 +21,33 @@ import java.util.List; import org.apache.hadoop.hbase.client.Scan; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.util.SQLCloseable; public interface ResultIterators extends SQLCloseable { - public int size(); - public List getSplits(); - public List> getScans(); - public void explain(List planSteps); - public List getIterators() throws SQLException; - - /** - * Generate ExplainPlan steps and add steps as list of Strings in - * planSteps argument as readable statement as well as add same generated - * steps in explainPlanAttributesBuilder so that we prepare ExplainPlan - * result as an attribute object useful to retrieve individual plan step - * attributes. - * - * @param planSteps Add generated plan in list of planSteps. This argument - * is used to provide planSteps as whole statement consisting of - * list of Strings. - * @param explainPlanAttributesBuilder Add generated plan in attributes - * object. Having an API to provide planSteps as an object is easier - * while comparing individual attributes of ExplainPlan. - */ - void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder); + public int size(); + + public List getSplits(); + + public List> getScans(); + + public void explain(List planSteps); + + public List getIterators() throws SQLException; + + /** + * Generate ExplainPlan steps and add steps as list of Strings in planSteps argument as readable + * statement as well as add same generated steps in explainPlanAttributesBuilder so that we + * prepare ExplainPlan result as an attribute object useful to retrieve individual plan step + * attributes. + * @param planSteps Add generated plan in list of planSteps. This argument is + * used to provide planSteps as whole statement consisting of + * list of Strings. + * @param explainPlanAttributesBuilder Add generated plan in attributes object. Having an API to + * provide planSteps as an object is easier while comparing + * individual attributes of ExplainPlan. + */ + void explain(List planSteps, ExplainPlanAttributesBuilder explainPlanAttributesBuilder); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java index 2daee7983dd..b901a51e596 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,8 @@ */ package org.apache.phoenix.iterate; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUERY_COUNTER; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import java.sql.SQLException; import java.util.ArrayList; @@ -28,327 +28,342 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.monitoring.OverAllQueryMetrics; import org.apache.phoenix.query.ConnectionQueryServices; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; import org.apache.phoenix.util.ClientUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; - /** - * ResultIterator that keeps track of the number of records fetched by each {@link PeekingResultIterator} making sure it - * asks for records from each iterator in a round-robin fashion. When the iterators have fetched the scan cache size of - * records, it submits the iterators to the thread pool to help parallelize the I/O needed to fetch the next batch of - * records. This iterator assumes that the PeekingResultIterators that it manages are not nested i.e. they directly - * manage the underlying scanners. This kind of ResultIterator should only be used when one doesn't care about the order - * in which records are returned. + * ResultIterator that keeps track of the number of records fetched by each + * {@link PeekingResultIterator} making sure it asks for records from each iterator in a round-robin + * fashion. When the iterators have fetched the scan cache size of records, it submits the iterators + * to the thread pool to help parallelize the I/O needed to fetch the next batch of records. This + * iterator assumes that the PeekingResultIterators that it manages are not nested i.e. they + * directly manage the underlying scanners. This kind of ResultIterator should only be used when one + * doesn't care about the order in which records are returned. */ public class RoundRobinResultIterator implements ResultIterator { - private static final Logger LOGGER = LoggerFactory.getLogger(RoundRobinResultIterator.class); + private static final Logger LOGGER = LoggerFactory.getLogger(RoundRobinResultIterator.class); - private final int threshold; + private final int threshold; - private int numScannersCacheExhausted = 0; - private ResultIterators resultIterators; + private int numScannersCacheExhausted = 0; + private ResultIterators resultIterators; - private List openIterators = new ArrayList<>(); + private List openIterators = new ArrayList<>(); - private int index; - private boolean closed; - private final QueryPlan plan; + private int index; + private boolean closed; + private final QueryPlan plan; - // For testing purposes - private int numParallelFetches; + // For testing purposes + private int numParallelFetches; - public RoundRobinResultIterator(ResultIterators iterators, QueryPlan plan) { - this.resultIterators = iterators; - this.plan = plan; - this.threshold = getThreshold(); - } + public RoundRobinResultIterator(ResultIterators iterators, QueryPlan plan) { + this.resultIterators = iterators; + this.plan = plan; + this.threshold = getThreshold(); + } - public RoundRobinResultIterator(List iterators, QueryPlan plan) { - this.resultIterators = null; - this.plan = plan; - this.threshold = getThreshold(); - initOpenIterators(wrapToRoundRobinIterators(iterators)); - } + public RoundRobinResultIterator(List iterators, QueryPlan plan) { + this.resultIterators = null; + this.plan = plan; + this.threshold = getThreshold(); + initOpenIterators(wrapToRoundRobinIterators(iterators)); + } - public static ResultIterator newIterator(final List iterators, QueryPlan plan) { - if (iterators.isEmpty()) { return EMPTY_ITERATOR; } - if (iterators.size() == 1) { return iterators.get(0); } - return new RoundRobinResultIterator(iterators, plan); + public static ResultIterator newIterator(final List iterators, + QueryPlan plan) { + if (iterators.isEmpty()) { + return EMPTY_ITERATOR; } - - @Override - public Tuple next() throws SQLException { - List iterators; - int size; - while ((size = (iterators = getIterators()).size()) > 0) { - index = index % size; - RoundRobinIterator itr = iterators.get(index); - if (itr.getNumRecordsRead() < threshold) { - Tuple tuple; - if ((tuple = itr.peek()) != null) { - tuple = itr.next(); - if (itr.getNumRecordsRead() == threshold) { - numScannersCacheExhausted++; - } - index = (index + 1) % size; - return tuple; - } else { - // The underlying scanner is exhausted. Close the iterator and un-track it. - itr.close(); - iterators.remove(index); - if (iterators.size() == 0) { - close(); - } - } - } else { - index = (index + 1) % size; - } + if (iterators.size() == 1) { + return iterators.get(0); + } + return new RoundRobinResultIterator(iterators, plan); + } + + @Override + public Tuple next() throws SQLException { + List iterators; + int size; + while ((size = (iterators = getIterators()).size()) > 0) { + index = index % size; + RoundRobinIterator itr = iterators.get(index); + if (itr.getNumRecordsRead() < threshold) { + Tuple tuple; + if ((tuple = itr.peek()) != null) { + tuple = itr.next(); + if (itr.getNumRecordsRead() == threshold) { + numScannersCacheExhausted++; + } + index = (index + 1) % size; + return tuple; + } else { + // The underlying scanner is exhausted. Close the iterator and un-track it. + itr.close(); + iterators.remove(index); + if (iterators.size() == 0) { + close(); + } } - close(); - return null; + } else { + index = (index + 1) % size; + } } - - @Override - public void close() throws SQLException { - if (closed) { return; } - closed = true; - SQLException toThrow = null; - try { - if (resultIterators != null) { - resultIterators.close(); - } - } catch (Exception e) { - toThrow = ClientUtil.parseServerException(e); - } finally { + close(); + return null; + } + + @Override + public void close() throws SQLException { + if (closed) { + return; + } + closed = true; + SQLException toThrow = null; + try { + if (resultIterators != null) { + resultIterators.close(); + } + } catch (Exception e) { + toThrow = ClientUtil.parseServerException(e); + } finally { + try { + if (openIterators.size() > 0) { + for (RoundRobinIterator itr : openIterators) { try { - if (openIterators.size() > 0) { - for (RoundRobinIterator itr : openIterators) { - try { - itr.close(); - } catch (Exception e) { - if (toThrow == null) { - toThrow = ClientUtil.parseServerException(e); - } else { - toThrow.setNextException(ClientUtil.parseServerException(e)); - } - } - } - } - } finally { - if (toThrow != null) { throw toThrow; } + itr.close(); + } catch (Exception e) { + if (toThrow == null) { + toThrow = ClientUtil.parseServerException(e); + } else { + toThrow.setNextException(ClientUtil.parseServerException(e)); + } } + } } - } - - @Override - public void explain(List planSteps) { - if (resultIterators != null) { - resultIterators.explain(planSteps); + } finally { + if (toThrow != null) { + throw toThrow; } + } } + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - if (resultIterators != null) { - resultIterators.explain(planSteps, explainPlanAttributesBuilder); - } + @Override + public void explain(List planSteps) { + if (resultIterators != null) { + resultIterators.explain(planSteps); } + } - @VisibleForTesting - int getNumberOfParallelFetches() { - return numParallelFetches; + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + if (resultIterators != null) { + resultIterators.explain(planSteps, explainPlanAttributesBuilder); } + } - @VisibleForTesting - QueryPlan getQueryPlan() { - return plan; - } + @VisibleForTesting + int getNumberOfParallelFetches() { + return numParallelFetches; + } - private List getIterators() throws SQLException { - if (closed) { return Collections.emptyList(); } - if (openIterators.size() > 0 && openIterators.size() == numScannersCacheExhausted) { - /* - * All the scanners have exhausted their cache. Submit the scanners back to the pool so that they can fetch - * the next batch of records in parallel. - */ - initOpenIterators(fetchNextBatch()); - } else if (openIterators.size() == 0 && resultIterators != null) { - List iterators = resultIterators.getIterators(); - initOpenIterators(wrapToRoundRobinIterators(iterators)); - } - return openIterators; - } + @VisibleForTesting + QueryPlan getQueryPlan() { + return plan; + } - private List wrapToRoundRobinIterators(List iterators) { - List roundRobinItrs = new ArrayList<>(iterators.size()); - for (PeekingResultIterator itr : iterators) { - roundRobinItrs.add(new RoundRobinIterator(itr, null)); - } - return roundRobinItrs; + private List getIterators() throws SQLException { + if (closed) { + return Collections.emptyList(); } - - private void initOpenIterators(List iterators) { - openIterators.clear(); - openIterators.addAll(iterators); - index = 0; - numScannersCacheExhausted = 0; + if (openIterators.size() > 0 && openIterators.size() == numScannersCacheExhausted) { + /* + * All the scanners have exhausted their cache. Submit the scanners back to the pool so that + * they can fetch the next batch of records in parallel. + */ + initOpenIterators(fetchNextBatch()); + } else if (openIterators.size() == 0 && resultIterators != null) { + List iterators = resultIterators.getIterators(); + initOpenIterators(wrapToRoundRobinIterators(iterators)); } - - private int getThreshold() { - int cacheSize = getScannerCacheSize(); - checkArgument(cacheSize > 1, "RoundRobinResultIterator doesn't work when cache size is less than or equal to 1"); - return cacheSize - 1; + return openIterators; + } + + private List + wrapToRoundRobinIterators(List iterators) { + List roundRobinItrs = new ArrayList<>(iterators.size()); + for (PeekingResultIterator itr : iterators) { + roundRobinItrs.add(new RoundRobinIterator(itr, null)); } - - private int getScannerCacheSize() { - try { - return plan.getContext().getStatement().getFetchSize(); - } catch (Throwable e) { - Throwables.propagate(e); - } - return -1; // unreachable + return roundRobinItrs; + } + + private void initOpenIterators(List iterators) { + openIterators.clear(); + openIterators.addAll(iterators); + index = 0; + numScannersCacheExhausted = 0; + } + + private int getThreshold() { + int cacheSize = getScannerCacheSize(); + checkArgument(cacheSize > 1, + "RoundRobinResultIterator doesn't work when cache size is less than or equal to 1"); + return cacheSize - 1; + } + + private int getScannerCacheSize() { + try { + return plan.getContext().getStatement().getFetchSize(); + } catch (Throwable e) { + Throwables.propagate(e); } - - private List fetchNextBatch() throws SQLException { - int numExpectedIterators = openIterators.size(); - List> futures = new ArrayList<>(numExpectedIterators); - List results = new ArrayList<>(); - - // Randomize the order in which we will be hitting region servers to try not overload particular region servers. - Collections.shuffle(openIterators); - boolean success = false; - SQLException toThrow = null; - try { - StatementContext context = plan.getContext(); - final ConnectionQueryServices services = context.getConnection().getQueryServices(); - ExecutorService executor = services.getExecutor(); - numParallelFetches++; - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Performing parallel fetch for " + openIterators.size() + " iterators. "); - } - for (final RoundRobinIterator itr : openIterators) { - Future future = executor.submit(new Callable() { - @Override - public Tuple call() throws Exception { - // Read the next record to refill the scanner's cache. - return itr.next(); - } - }); - futures.add(future); - } - int i = 0; - for (Future future : futures) { - Tuple tuple = future.get(); - if (tuple != null) { - results.add(new RoundRobinIterator(openIterators.get(i).delegate, tuple)); - } else { - // Underlying scanner is exhausted. So close it. - openIterators.get(i).close(); - } - i++; - } - success = true; - return results; - } catch (SQLException e) { - toThrow = e; - } catch (Exception e) { - toThrow = ClientUtil.parseServerException(e); - } finally { - try { - if (!success) { - try { - close(); - } catch (Exception e) { - if (toThrow == null) { - toThrow = ClientUtil.parseServerException(e); - } else { - toThrow.setNextException(ClientUtil.parseServerException(e)); - } - } - } - } finally { - if (toThrow != null) { - GLOBAL_FAILED_QUERY_COUNTER.increment(); - OverAllQueryMetrics overAllQueryMetrics = plan.getContext().getOverallQueryMetrics(); - overAllQueryMetrics.queryFailed(); - if (plan.getContext().getScanRanges().isPointLookup()) { - overAllQueryMetrics.queryPointLookupFailed(); - } else { - overAllQueryMetrics.queryScanFailed(); - } - throw toThrow; - } - } + return -1; // unreachable + } + + private List fetchNextBatch() throws SQLException { + int numExpectedIterators = openIterators.size(); + List> futures = new ArrayList<>(numExpectedIterators); + List results = new ArrayList<>(); + + // Randomize the order in which we will be hitting region servers to try not overload particular + // region servers. + Collections.shuffle(openIterators); + boolean success = false; + SQLException toThrow = null; + try { + StatementContext context = plan.getContext(); + final ConnectionQueryServices services = context.getConnection().getQueryServices(); + ExecutorService executor = services.getExecutor(); + numParallelFetches++; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Performing parallel fetch for " + openIterators.size() + " iterators. "); + } + for (final RoundRobinIterator itr : openIterators) { + Future future = executor.submit(new Callable() { + @Override + public Tuple call() throws Exception { + // Read the next record to refill the scanner's cache. + return itr.next(); + } + }); + futures.add(future); + } + int i = 0; + for (Future future : futures) { + Tuple tuple = future.get(); + if (tuple != null) { + results.add(new RoundRobinIterator(openIterators.get(i).delegate, tuple)); + } else { + // Underlying scanner is exhausted. So close it. + openIterators.get(i).close(); } - return null; // Not reachable - } - - /** - * Inner class that delegates to {@link PeekingResultIterator} keeping track the number of records it has read. Also - * keeps track of the tuple the {@link PeekingResultIterator} read in the previous next() call before it ran out of - * underlying scanner cache. - */ - private static class RoundRobinIterator implements PeekingResultIterator { - - private PeekingResultIterator delegate; - private Tuple tuple; - private int numRecordsRead; - - private RoundRobinIterator(PeekingResultIterator itr, Tuple tuple) { - this.delegate = itr; - this.tuple = tuple; - this.numRecordsRead = 0; + i++; + } + success = true; + return results; + } catch (SQLException e) { + toThrow = e; + } catch (Exception e) { + toThrow = ClientUtil.parseServerException(e); + } finally { + try { + if (!success) { + try { + close(); + } catch (Exception e) { + if (toThrow == null) { + toThrow = ClientUtil.parseServerException(e); + } else { + toThrow.setNextException(ClientUtil.parseServerException(e)); + } + } } - - @Override - public void close() throws SQLException { - delegate.close(); + } finally { + if (toThrow != null) { + GLOBAL_FAILED_QUERY_COUNTER.increment(); + OverAllQueryMetrics overAllQueryMetrics = plan.getContext().getOverallQueryMetrics(); + overAllQueryMetrics.queryFailed(); + if (plan.getContext().getScanRanges().isPointLookup()) { + overAllQueryMetrics.queryPointLookupFailed(); + } else { + overAllQueryMetrics.queryScanFailed(); + } + throw toThrow; } + } + } + return null; // Not reachable + } + + /** + * Inner class that delegates to {@link PeekingResultIterator} keeping track the number of records + * it has read. Also keeps track of the tuple the {@link PeekingResultIterator} read in the + * previous next() call before it ran out of underlying scanner cache. + */ + private static class RoundRobinIterator implements PeekingResultIterator { + + private PeekingResultIterator delegate; + private Tuple tuple; + private int numRecordsRead; + + private RoundRobinIterator(PeekingResultIterator itr, Tuple tuple) { + this.delegate = itr; + this.tuple = tuple; + this.numRecordsRead = 0; + } - @Override - public Tuple next() throws SQLException { - if (tuple != null) { - Tuple t = tuple; - tuple = null; - return t; - } - numRecordsRead++; - return delegate.next(); - } + @Override + public void close() throws SQLException { + delegate.close(); + } - @Override - public void explain(List planSteps) { - delegate.explain(planSteps); - } + @Override + public Tuple next() throws SQLException { + if (tuple != null) { + Tuple t = tuple; + tuple = null; + return t; + } + numRecordsRead++; + return delegate.next(); + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - delegate.explain(planSteps, explainPlanAttributesBuilder); - } + @Override + public void explain(List planSteps) { + delegate.explain(planSteps); + } - @Override - public Tuple peek() throws SQLException { - if (tuple != null) { return tuple; } - return delegate.peek(); - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + delegate.explain(planSteps, explainPlanAttributesBuilder); + } - public int getNumRecordsRead() { - return numRecordsRead; - } + @Override + public Tuple peek() throws SQLException { + if (tuple != null) { + return tuple; + } + return delegate.peek(); + } + public int getNumRecordsRead() { + return numRecordsRead; } + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIterator.java index 4306480fc83..450d8e3a20f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,8 +25,7 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.expression.aggregator.Aggregator; import org.apache.phoenix.expression.aggregator.Aggregators; import org.apache.phoenix.schema.tuple.SingleKeyValueTuple; @@ -34,167 +33,166 @@ import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.PhoenixKeyValueUtil; - /** - * - * Client-side aggregate for key ordered aggregation. Prevents the comparison of - * row keys for rows returned unless we cross a scan boundary. - * + * Client-side aggregate for key ordered aggregation. Prevents the comparison of row keys for rows + * returned unless we cross a scan boundary. */ -public class RowKeyOrderedAggregateResultIterator extends LookAheadResultIterator implements AggregatingResultIterator { - private final ResultIterators resultIterators; - private List iterators; - private final Aggregators aggregators; - private final ImmutableBytesWritable currentKey = new ImmutableBytesWritable(); - private final ImmutableBytesWritable previousKey = new ImmutableBytesWritable(); - private boolean traversedIterator = true; - private boolean nextTraversedIterators; - private Tuple next; - - private int index; - - public RowKeyOrderedAggregateResultIterator(ResultIterators iterators, Aggregators aggregators) { - this.resultIterators = iterators; - this.aggregators = aggregators; - } - - private List getIterators() throws SQLException { - if (iterators == null && resultIterators != null) { - iterators = resultIterators.getIterators(); - } - return iterators; +public class RowKeyOrderedAggregateResultIterator extends LookAheadResultIterator + implements AggregatingResultIterator { + private final ResultIterators resultIterators; + private List iterators; + private final Aggregators aggregators; + private final ImmutableBytesWritable currentKey = new ImmutableBytesWritable(); + private final ImmutableBytesWritable previousKey = new ImmutableBytesWritable(); + private boolean traversedIterator = true; + private boolean nextTraversedIterators; + private Tuple next; + + private int index; + + public RowKeyOrderedAggregateResultIterator(ResultIterators iterators, Aggregators aggregators) { + this.resultIterators = iterators; + this.aggregators = aggregators; + } + + private List getIterators() throws SQLException { + if (iterators == null && resultIterators != null) { + iterators = resultIterators.getIterators(); } - - @Override - public void close() throws SQLException { - SQLException toThrow = null; - try { - if (resultIterators != null) { - resultIterators.close(); - } - } catch (Exception e) { - toThrow = ClientUtil.parseServerException(e); - } finally { + return iterators; + } + + @Override + public void close() throws SQLException { + SQLException toThrow = null; + try { + if (resultIterators != null) { + resultIterators.close(); + } + } catch (Exception e) { + toThrow = ClientUtil.parseServerException(e); + } finally { + try { + if (iterators != null) { + for (; index < iterators.size(); index++) { + PeekingResultIterator iterator = iterators.get(index); try { - if (iterators != null) { - for (;index < iterators.size(); index++) { - PeekingResultIterator iterator = iterators.get(index); - try { - iterator.close(); - } catch (Exception e) { - if (toThrow == null) { - toThrow = ClientUtil.parseServerException(e); - } else { - toThrow.setNextException(ClientUtil.parseServerException(e)); - } - } - } - } - } finally { - if (toThrow != null) { - throw toThrow; - } + iterator.close(); + } catch (Exception e) { + if (toThrow == null) { + toThrow = ClientUtil.parseServerException(e); + } else { + toThrow.setNextException(ClientUtil.parseServerException(e)); + } } + } + } + } finally { + if (toThrow != null) { + throw toThrow; } + } } + } + @Override + public void explain(List planSteps) { + if (resultIterators != null) { + resultIterators.explain(planSteps); + } + } - @Override - public void explain(List planSteps) { - if (resultIterators != null) { - resultIterators.explain(planSteps); - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + if (resultIterators != null) { + resultIterators.explain(planSteps, explainPlanAttributesBuilder); } + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - if (resultIterators != null) { - resultIterators.explain(planSteps, explainPlanAttributesBuilder); - } + private Tuple nextTuple() throws SQLException { + List iterators = getIterators(); + while (index < iterators.size()) { + PeekingResultIterator iterator = iterators.get(index); + Tuple r = iterator.peek(); + if (r != null) { + return iterator.next(); + } + traversedIterator = true; + iterator.close(); + index++; } + return null; + } - private Tuple nextTuple() throws SQLException { - List iterators = getIterators(); - while (index < iterators.size()) { - PeekingResultIterator iterator = iterators.get(index); - Tuple r = iterator.peek(); - if (r != null) { - return iterator.next(); - } - traversedIterator = true; - iterator.close(); - index++; - } - return null; + private boolean continueAggregating(Tuple previous, Tuple next) { + if (next == null) { + return false; } - - private boolean continueAggregating(Tuple previous, Tuple next) { - if (next == null) { - return false; - } - next.getKey(currentKey); - previous.getKey(previousKey); - return (currentKey.compareTo(previousKey) == 0); + next.getKey(currentKey); + previous.getKey(previousKey); + return (currentKey.compareTo(previousKey) == 0); + } + + @Override + public Tuple next() throws SQLException { + Tuple t = super.next(); + if (t == null) { + return null; } - - @Override - public Tuple next() throws SQLException { - Tuple t = super.next(); - if (t == null) { - return null; - } - aggregate(t); - return t; + aggregate(t); + return t; + } + + @Override + protected Tuple advance() throws SQLException { + Tuple current = this.next; + boolean traversedIterators = nextTraversedIterators; + if (current == null) { + current = nextTuple(); + traversedIterators = this.traversedIterator; } - - @Override - protected Tuple advance() throws SQLException { - Tuple current = this.next; - boolean traversedIterators = nextTraversedIterators; - if (current == null) { - current = nextTuple(); - traversedIterators = this.traversedIterator; + if (current != null) { + Tuple previous = current; + Aggregator[] rowAggregators = null; + while (true) { + current = nextTuple(); + if (!traversedIterators || !continueAggregating(previous, current)) { + break; } - if (current != null) { - Tuple previous = current; - Aggregator[] rowAggregators = null; - while (true) { - current = nextTuple(); - if (!traversedIterators || !continueAggregating(previous, current)) { - break; - } - if (rowAggregators == null) { - rowAggregators = aggregate(previous); - } - aggregators.aggregate(rowAggregators, current); - traversedIterators = this.traversedIterator; - } - this.next = current; - this.nextTraversedIterators = this.traversedIterator; - if (rowAggregators == null) { - current = previous; - } else { - byte[] value = aggregators.toBytes(rowAggregators); - current = new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(previousKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); - } - } - if (current == null) { - close(); // Close underlying ResultIterators to free resources sooner rather than later + if (rowAggregators == null) { + rowAggregators = aggregate(previous); } - return current; + aggregators.aggregate(rowAggregators, current); + traversedIterators = this.traversedIterator; + } + this.next = current; + this.nextTraversedIterators = this.traversedIterator; + if (rowAggregators == null) { + current = previous; + } else { + byte[] value = aggregators.toBytes(rowAggregators); + current = new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(previousKey, + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); + } } - - @Override - public String toString() { - return "RowKeyOrderedAggregateResultIterator [resultIterators=" + resultIterators + ", index=" + index + "]"; - } - - @Override - public Aggregator[] aggregate(Tuple result) { - Aggregator[] rowAggregators = aggregators.getAggregators(); - aggregators.reset(rowAggregators); - aggregators.aggregate(rowAggregators, result); - return rowAggregators; + if (current == null) { + close(); // Close underlying ResultIterators to free resources sooner rather than later } + return current; + } + + @Override + public String toString() { + return "RowKeyOrderedAggregateResultIterator [resultIterators=" + resultIterators + ", index=" + + index + "]"; + } + + @Override + public Aggregator[] aggregate(Tuple result) { + Aggregator[] rowAggregators = aggregators.getAggregators(); + aggregators.reset(rowAggregators); + aggregators.aggregate(rowAggregators, result); + return rowAggregators; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java index 0b60b6d3e68..ce115871cfc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -72,197 +72,197 @@ public class ScanningResultIterator implements ResultIterator { - private static final Logger LOG = LoggerFactory.getLogger(ScanningResultIterator.class); - private final ResultScanner scanner; - private final ScanMetricsHolder scanMetricsHolder; - boolean scanMetricsUpdated; - boolean scanMetricsEnabled; - private StatementContext context; - private static boolean throwExceptionIfScannerClosedForceFully = false; + private static final Logger LOG = LoggerFactory.getLogger(ScanningResultIterator.class); + private final ResultScanner scanner; + private final ScanMetricsHolder scanMetricsHolder; + boolean scanMetricsUpdated; + boolean scanMetricsEnabled; + private StatementContext context; + private static boolean throwExceptionIfScannerClosedForceFully = false; - private final boolean isMapReduceContext; - private final long maxQueryEndTime; + private final boolean isMapReduceContext; + private final long maxQueryEndTime; - private long dummyRowCounter = 0; + private long dummyRowCounter = 0; - private final ScanningResultPostDummyResultCaller scanningResultPostDummyResultCaller; - private final ScanningResultPostValidResultCaller scanningResultPostValidResultCaller; + private final ScanningResultPostDummyResultCaller scanningResultPostDummyResultCaller; + private final ScanningResultPostValidResultCaller scanningResultPostValidResultCaller; - public ScanningResultIterator(ResultScanner scanner, Scan scan, ScanMetricsHolder scanMetricsHolder, StatementContext context, boolean isMapReduceContext, long maxQueryEndTime) { - this.scanner = scanner; - this.scanMetricsHolder = scanMetricsHolder; - this.context = context; - scanMetricsUpdated = false; - scanMetricsEnabled = scan.isScanMetricsEnabled(); - this.isMapReduceContext = isMapReduceContext; - this.maxQueryEndTime = maxQueryEndTime; - Class dummyResultCallerClazz = - context.getConnection().getQueryServices().getConfiguration().getClass( - QueryServices.PHOENIX_POST_DUMMY_PROCESS, - ScanningResultPostDummyResultCaller.class, - ScanningResultPostDummyResultCaller.class); - this.scanningResultPostDummyResultCaller = - ReflectionUtils.newInstance(dummyResultCallerClazz, - context.getConnection().getQueryServices().getConfiguration()); - Class validResultCallerClazz = - context.getConnection().getQueryServices().getConfiguration().getClass( - QueryServices.PHOENIX_POST_VALID_PROCESS, - ScanningResultPostValidResultCaller.class, - ScanningResultPostValidResultCaller.class); - this.scanningResultPostValidResultCaller = - ReflectionUtils.newInstance(validResultCallerClazz, - context.getConnection().getQueryServices().getConfiguration()); - } + public ScanningResultIterator(ResultScanner scanner, Scan scan, + ScanMetricsHolder scanMetricsHolder, StatementContext context, boolean isMapReduceContext, + long maxQueryEndTime) { + this.scanner = scanner; + this.scanMetricsHolder = scanMetricsHolder; + this.context = context; + scanMetricsUpdated = false; + scanMetricsEnabled = scan.isScanMetricsEnabled(); + this.isMapReduceContext = isMapReduceContext; + this.maxQueryEndTime = maxQueryEndTime; + Class dummyResultCallerClazz = + context.getConnection().getQueryServices().getConfiguration().getClass( + QueryServices.PHOENIX_POST_DUMMY_PROCESS, ScanningResultPostDummyResultCaller.class, + ScanningResultPostDummyResultCaller.class); + this.scanningResultPostDummyResultCaller = ReflectionUtils.newInstance(dummyResultCallerClazz, + context.getConnection().getQueryServices().getConfiguration()); + Class validResultCallerClazz = + context.getConnection().getQueryServices().getConfiguration().getClass( + QueryServices.PHOENIX_POST_VALID_PROCESS, ScanningResultPostValidResultCaller.class, + ScanningResultPostValidResultCaller.class); + this.scanningResultPostValidResultCaller = ReflectionUtils.newInstance(validResultCallerClazz, + context.getConnection().getQueryServices().getConfiguration()); + } - @Override - public void close() throws SQLException { - // close the scanner so that metrics are available - scanner.close(); - updateMetrics(); - } + @Override + public void close() throws SQLException { + // close the scanner so that metrics are available + scanner.close(); + updateMetrics(); + } - private void changeMetric(CombinableMetric metric, Long value) { - if (value != null) { - metric.change(value); - } + private void changeMetric(CombinableMetric metric, Long value) { + if (value != null) { + metric.change(value); } + } - private void changeMetric(GlobalClientMetrics metric, Long value) { - if (value != null) { - metric.update(value); - } + private void changeMetric(GlobalClientMetrics metric, Long value) { + if (value != null) { + metric.update(value); } + } - private void updateMetrics() { + private void updateMetrics() { - if (scanMetricsEnabled && !scanMetricsUpdated) { - ScanMetrics scanMetrics = scanner.getScanMetrics(); - Map scanMetricsMap = scanMetrics.getMetricsMap(); - scanMetricsHolder.setScanMetricMap(scanMetricsMap); + if (scanMetricsEnabled && !scanMetricsUpdated) { + ScanMetrics scanMetrics = scanner.getScanMetrics(); + Map scanMetricsMap = scanMetrics.getMetricsMap(); + scanMetricsHolder.setScanMetricMap(scanMetricsMap); - changeMetric(scanMetricsHolder.getCountOfRPCcalls(), - scanMetricsMap.get(RPC_CALLS_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfRemoteRPCcalls(), - scanMetricsMap.get(REMOTE_RPC_CALLS_METRIC_NAME)); - changeMetric(scanMetricsHolder.getSumOfMillisSecBetweenNexts(), - scanMetricsMap.get(MILLIS_BETWEEN_NEXTS_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfNSRE(), - scanMetricsMap.get(NOT_SERVING_REGION_EXCEPTION_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfBytesInResults(), - scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfBytesInRemoteResults(), - scanMetricsMap.get(BYTES_IN_REMOTE_RESULTS_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfRegions(), - scanMetricsMap.get(REGIONS_SCANNED_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfRPCRetries(), - scanMetricsMap.get(RPC_RETRIES_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfRemoteRPCRetries(), - scanMetricsMap.get(REMOTE_RPC_RETRIES_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfRowsScanned(), - scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfRowsFiltered(), - scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfBytesScanned(), - scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); - changeMetric(scanMetricsHolder.getCountOfRowsPaged(), dummyRowCounter); + changeMetric(scanMetricsHolder.getCountOfRPCcalls(), + scanMetricsMap.get(RPC_CALLS_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfRemoteRPCcalls(), + scanMetricsMap.get(REMOTE_RPC_CALLS_METRIC_NAME)); + changeMetric(scanMetricsHolder.getSumOfMillisSecBetweenNexts(), + scanMetricsMap.get(MILLIS_BETWEEN_NEXTS_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfNSRE(), + scanMetricsMap.get(NOT_SERVING_REGION_EXCEPTION_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfBytesInResults(), + scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfBytesInRemoteResults(), + scanMetricsMap.get(BYTES_IN_REMOTE_RESULTS_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfRegions(), + scanMetricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfRPCRetries(), + scanMetricsMap.get(RPC_RETRIES_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfRemoteRPCRetries(), + scanMetricsMap.get(REMOTE_RPC_RETRIES_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfRowsScanned(), + scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfRowsFiltered(), + scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfBytesScanned(), + scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); + changeMetric(scanMetricsHolder.getCountOfRowsPaged(), dummyRowCounter); - changeMetric(GLOBAL_SCAN_BYTES, - scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_RPC_CALLS, - scanMetricsMap.get(RPC_CALLS_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_REMOTE_RPC_CALLS - , scanMetricsMap.get(REMOTE_RPC_CALLS_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS, - scanMetricsMap.get(MILLIS_BETWEEN_NEXTS_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_NOT_SERVING_REGION_EXCEPTION, - scanMetricsMap.get(NOT_SERVING_REGION_EXCEPTION_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS, - scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS, - scanMetricsMap.get(BYTES_IN_REMOTE_RESULTS_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_SCANNED_REGIONS, - scanMetricsMap.get(REGIONS_SCANNED_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_RPC_RETRIES, - scanMetricsMap.get(RPC_RETRIES_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_REMOTE_RPC_RETRIES, - scanMetricsMap.get(REMOTE_RPC_RETRIES_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_ROWS_SCANNED, - scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); - changeMetric(GLOBAL_HBASE_COUNT_ROWS_FILTERED, - scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME)); + changeMetric(GLOBAL_SCAN_BYTES, scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_RPC_CALLS, scanMetricsMap.get(RPC_CALLS_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_REMOTE_RPC_CALLS, + scanMetricsMap.get(REMOTE_RPC_CALLS_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS, + scanMetricsMap.get(MILLIS_BETWEEN_NEXTS_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_NOT_SERVING_REGION_EXCEPTION, + scanMetricsMap.get(NOT_SERVING_REGION_EXCEPTION_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS, + scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS, + scanMetricsMap.get(BYTES_IN_REMOTE_RESULTS_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_SCANNED_REGIONS, + scanMetricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_RPC_RETRIES, scanMetricsMap.get(RPC_RETRIES_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_REMOTE_RPC_RETRIES, + scanMetricsMap.get(REMOTE_RPC_RETRIES_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_ROWS_SCANNED, + scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_COUNT_ROWS_FILTERED, + scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME)); - changeMetric(GLOBAL_PAGED_ROWS_COUNTER, dummyRowCounter); - - scanMetricsUpdated = true; - } + changeMetric(GLOBAL_PAGED_ROWS_COUNTER, dummyRowCounter); + scanMetricsUpdated = true; } - @Override - public Tuple next() throws SQLException { - try { - Result result = scanner.next(); - while (result != null && (result.isEmpty() || isDummy(result))) { - dummyRowCounter += 1; - long timeOutForScan = maxQueryEndTime - EnvironmentEdgeManager.currentTimeMillis(); - if (!context.getHasFirstValidResult() && timeOutForScan < 0) { - throw new SQLExceptionInfo.Builder(OPERATION_TIMED_OUT).setMessage( - ". Query couldn't be completed in the allotted time : " - + context.getStatement().getQueryTimeoutInMillis() + " ms").build().buildException(); - } - if (!isMapReduceContext && (context.getConnection().isClosing() || context.getConnection().isClosed())) { - LOG.warn("Closing ResultScanner as Connection is already closed or in middle of closing"); - if (throwExceptionIfScannerClosedForceFully) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.FAILED_KNOWINGLY_FOR_TEST).build().buildException(); - } - close(); - return null; - } - processAfterRetrievingDummyResult(); - result = scanner.next(); - } - if (result == null) { - close(); // Free up resources early - return null; - } - context.setHasFirstValidResult(true); - // TODO: use ResultTuple.setResult(result)? - // Need to create a new one if holding on to it (i.e. OrderedResultIterator) - processAfterRetrievingValidResult(); - return new ResultTuple(result); - } catch (IOException e) { - throw ClientUtil.parseServerException(e); + } + + @Override + public Tuple next() throws SQLException { + try { + Result result = scanner.next(); + while (result != null && (result.isEmpty() || isDummy(result))) { + dummyRowCounter += 1; + long timeOutForScan = maxQueryEndTime - EnvironmentEdgeManager.currentTimeMillis(); + if (!context.getHasFirstValidResult() && timeOutForScan < 0) { + throw new SQLExceptionInfo.Builder(OPERATION_TIMED_OUT) + .setMessage(". Query couldn't be completed in the allotted time : " + + context.getStatement().getQueryTimeoutInMillis() + " ms") + .build().buildException(); + } + if ( + !isMapReduceContext + && (context.getConnection().isClosing() || context.getConnection().isClosed()) + ) { + LOG.warn("Closing ResultScanner as Connection is already closed or in middle of closing"); + if (throwExceptionIfScannerClosedForceFully) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.FAILED_KNOWINGLY_FOR_TEST).build() + .buildException(); + } + close(); + return null; } + processAfterRetrievingDummyResult(); + result = scanner.next(); + } + if (result == null) { + close(); // Free up resources early + return null; + } + context.setHasFirstValidResult(true); + // TODO: use ResultTuple.setResult(result)? + // Need to create a new one if holding on to it (i.e. OrderedResultIterator) + processAfterRetrievingValidResult(); + return new ResultTuple(result); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); } + } - private void processAfterRetrievingDummyResult() { - scanningResultPostDummyResultCaller.postDummyProcess(); - } + private void processAfterRetrievingDummyResult() { + scanningResultPostDummyResultCaller.postDummyProcess(); + } - private void processAfterRetrievingValidResult() { - scanningResultPostValidResultCaller.postValidRowProcess(); - } + private void processAfterRetrievingValidResult() { + scanningResultPostValidResultCaller.postValidRowProcess(); + } - @Override - public void explain(List planSteps) { - } + @Override + public void explain(List planSteps) { + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } - @Override - public String toString() { - return "ScanningResultIterator [scanner=" + scanner + "]"; - } + @Override + public String toString() { + return "ScanningResultIterator [scanner=" + scanner + "]"; + } - public ResultScanner getScanner() { - return scanner; - } + public ResultScanner getScanner() { + return scanner; + } - @VisibleForTesting - public static void setIsScannerClosedForcefully(boolean throwException) { - throwExceptionIfScannerClosedForceFully = throwException; - } + @VisibleForTesting + public static void setIsScannerClosedForcefully(boolean throwException) { + throwExceptionIfScannerClosedForceFully = throwException; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultPostDummyResultCaller.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultPostDummyResultCaller.java index 6367a28e0c2..f92b7193382 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultPostDummyResultCaller.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultPostDummyResultCaller.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.iterate; /** @@ -23,8 +23,8 @@ */ public class ScanningResultPostDummyResultCaller { - public void postDummyProcess() { - // no-op - } + public void postDummyProcess() { + // no-op + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultPostValidResultCaller.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultPostValidResultCaller.java index 4a38f512184..d7fade0db21 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultPostValidResultCaller.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultPostValidResultCaller.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.iterate; /** @@ -23,8 +23,8 @@ */ public class ScanningResultPostValidResultCaller { - public void postValidRowProcess() { - // no-op - } + public void postValidRowProcess() { + // no-op + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScansWithRegionLocations.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScansWithRegionLocations.java index 1bd3f45c560..80d1e171ca8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScansWithRegionLocations.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScansWithRegionLocations.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,34 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.iterate; +import java.util.List; + import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.client.Scan; -import java.util.List; - /** * Scan list to be retrieved for the BaseResultIterator with the list of region locations the scans * would be served from. */ public class ScansWithRegionLocations { - private final List> scans; - private final List regionLocations; + private final List> scans; + private final List regionLocations; - public ScansWithRegionLocations(List> scans, - List regionLocations) { - this.scans = scans; - this.regionLocations = regionLocations; - } + public ScansWithRegionLocations(List> scans, List regionLocations) { + this.scans = scans; + this.regionLocations = regionLocations; + } - public List> getScans() { - return scans; - } + public List> getScans() { + return scans; + } - public List getRegionLocations() { - return regionLocations; - } + public List getRegionLocations() { + return regionLocations; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SequenceResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SequenceResultIterator.java index 5674a6a6ce8..0f93c14853c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SequenceResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SequenceResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,56 +20,53 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.SequenceManager; import org.apache.phoenix.schema.tuple.Tuple; /** - * * Iterates through tuples retrieving sequences from the server as needed - * - * * @since 3.0 */ public class SequenceResultIterator extends DelegateResultIterator { - private final SequenceManager sequenceManager; - - public SequenceResultIterator(ResultIterator delegate, SequenceManager sequenceManager) throws SQLException { - super(delegate); - this.sequenceManager = sequenceManager; - } - - @Override - public Tuple next() throws SQLException { - Tuple next = super.next(); - if (next == null) { - return null; - } - next = sequenceManager.newSequenceTuple(next); - return next; - } + private final SequenceManager sequenceManager; - @Override - public void explain(List planSteps) { - super.explain(planSteps); - int nSequences = sequenceManager.getSequenceCount(); - planSteps.add("CLIENT RESERVE VALUES FROM " + nSequences + " SEQUENCE" + (nSequences == 1 ? "" : "S")); - } + public SequenceResultIterator(ResultIterator delegate, SequenceManager sequenceManager) + throws SQLException { + super(delegate); + this.sequenceManager = sequenceManager; + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - super.explain(planSteps, explainPlanAttributesBuilder); - int nSequences = sequenceManager.getSequenceCount(); - explainPlanAttributesBuilder.setClientSequenceCount(nSequences); - planSteps.add("CLIENT RESERVE VALUES FROM " + nSequences - + " SEQUENCE" + (nSequences == 1 ? "" : "S")); + @Override + public Tuple next() throws SQLException { + Tuple next = super.next(); + if (next == null) { + return null; } + next = sequenceManager.newSequenceTuple(next); + return next; + } - @Override - public String toString() { - return "SequenceResultIterator [sequenceManager=" + sequenceManager - + "]"; - } + @Override + public void explain(List planSteps) { + super.explain(planSteps); + int nSequences = sequenceManager.getSequenceCount(); + planSteps + .add("CLIENT RESERVE VALUES FROM " + nSequences + " SEQUENCE" + (nSequences == 1 ? "" : "S")); + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + super.explain(planSteps, explainPlanAttributesBuilder); + int nSequences = sequenceManager.getSequenceCount(); + explainPlanAttributesBuilder.setClientSequenceCount(nSequences); + planSteps + .add("CLIENT RESERVE VALUES FROM " + nSequences + " SEQUENCE" + (nSequences == 1 ? "" : "S")); + } + + @Override + public String toString() { + return "SequenceResultIterator [sequenceManager=" + sequenceManager + "]"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SerialIterators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SerialIterators.java index a783c3558a1..ef332b49f2f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SerialIterators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SerialIterators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,8 +29,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.cache.ServerCacheClient.ServerCache; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; @@ -43,200 +42,203 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.LogUtil; import org.apache.phoenix.util.QueryUtil; - -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ScanUtil; - /** - * - * Class that parallelizes the scan over a table using the ExecutorService provided. Each region of the table will be scanned in parallel with - * the results accessible through {@link #getIterators()} - * - * + * Class that parallelizes the scan over a table using the ExecutorService provided. Each region of + * the table will be scanned in parallel with the results accessible through {@link #getIterators()} * @since 0.1 */ public class SerialIterators extends BaseResultIterators { - private static final String NAME = "SERIAL"; - private final ParallelIteratorFactory iteratorFactory; - private final Integer offset; - - public SerialIterators(QueryPlan plan, Integer perScanLimit, Integer offset, - ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper, Scan scan, Map caches, QueryPlan dataPlan) - throws SQLException { - super(plan, perScanLimit, offset, scanGrouper, scan, caches, dataPlan); - this.offset = offset; - // must be a offset or a limit specified or a SERIAL hint - Preconditions.checkArgument( - offset != null || perScanLimit != null || plan.getStatement().getHint().hasHint(HintNode.Hint.SERIAL)); - this.iteratorFactory = iteratorFactory; + private static final String NAME = "SERIAL"; + private final ParallelIteratorFactory iteratorFactory; + private final Integer offset; + + public SerialIterators(QueryPlan plan, Integer perScanLimit, Integer offset, + ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper, Scan scan, + Map caches, QueryPlan dataPlan) throws SQLException { + super(plan, perScanLimit, offset, scanGrouper, scan, caches, dataPlan); + this.offset = offset; + // must be a offset or a limit specified or a SERIAL hint + Preconditions.checkArgument(offset != null || perScanLimit != null + || plan.getStatement().getHint().hasHint(HintNode.Hint.SERIAL)); + this.iteratorFactory = iteratorFactory; + } + + @Override + protected boolean isSerial() { + return true; + } + + @Override + protected void submitWork(final List> nestedScans, + List>>> nestedFutures, + final Queue allIterators, int estFlattenedSize, boolean isReverse, + final ParallelScanGrouper scanGrouper, long maxQueryEndTime) { + ExecutorService executor = context.getConnection().getQueryServices().getExecutor(); + final String tableName = tableRef.getTable().getPhysicalName().getString(); + final TaskExecutionMetricsHolder taskMetrics = + new TaskExecutionMetricsHolder(context.getReadMetricsQueue(), tableName); + final PhoenixConnection conn = context.getConnection(); + final long renewLeaseThreshold = conn.getQueryServices().getRenewLeaseThresholdMilliSeconds(); + int expectedListSize = nestedScans.size() * 10; + List flattenedScans = Lists.newArrayListWithExpectedSize(expectedListSize); + for (List list : nestedScans) { + flattenedScans.addAll(list); } + if (!flattenedScans.isEmpty()) { + if (isReverse) { + flattenedScans = Lists.reverse(flattenedScans); + } + final List finalScans = flattenedScans; + Future future = + executor.submit(Tracing.wrap(new JobCallable() { + @Override + public PeekingResultIterator call() throws Exception { + PeekingResultIterator itr = new SerialIterator(finalScans, tableName, + renewLeaseThreshold, offset, caches, maxQueryEndTime); + return itr; + } - @Override - protected boolean isSerial() { - return true; + /** + * Defines the grouping for round robin behavior. All threads spawned to process this scan + * will be grouped together and time sliced with other simultaneously executing parallel + * scans. + */ + @Override + public Object getJobId() { + return SerialIterators.this; + } + + @Override + public TaskExecutionMetricsHolder getTaskExecutionMetric() { + return taskMetrics; + } + }, "Serial scanner for table: " + tableRef.getTable().getPhysicalName().getString())); + // Add our singleton Future which will execute serially + nestedFutures.add(Collections.singletonList( + new Pair>(flattenedScans.get(0), future))); } + } - @Override - protected void submitWork(final List> nestedScans, List>>> nestedFutures, - final Queue allIterators, int estFlattenedSize, boolean isReverse, final ParallelScanGrouper scanGrouper, long maxQueryEndTime) { - ExecutorService executor = context.getConnection().getQueryServices().getExecutor(); - final String tableName = tableRef.getTable().getPhysicalName().getString(); - final TaskExecutionMetricsHolder taskMetrics = new TaskExecutionMetricsHolder(context.getReadMetricsQueue(), tableName); - final PhoenixConnection conn = context.getConnection(); - final long renewLeaseThreshold = conn.getQueryServices().getRenewLeaseThresholdMilliSeconds(); - int expectedListSize = nestedScans.size() * 10; - List flattenedScans = Lists.newArrayListWithExpectedSize(expectedListSize); - for (List list : nestedScans) { - flattenedScans.addAll(list); - } - if (!flattenedScans.isEmpty()) { - if (isReverse) { - flattenedScans = Lists.reverse(flattenedScans); - } - final List finalScans = flattenedScans; - Future future = executor.submit(Tracing.wrap(new JobCallable() { - @Override - public PeekingResultIterator call() throws Exception { - PeekingResultIterator itr = new SerialIterator(finalScans, tableName, renewLeaseThreshold, offset, caches, maxQueryEndTime); - return itr; - } - - /** - * Defines the grouping for round robin behavior. All threads spawned to process - * this scan will be grouped together and time sliced with other simultaneously - * executing parallel scans. - */ - @Override - public Object getJobId() { - return SerialIterators.this; - } - - @Override - public TaskExecutionMetricsHolder getTaskExecutionMetric() { - return taskMetrics; - } - }, "Serial scanner for table: " + tableRef.getTable().getPhysicalName().getString())); - // Add our singleton Future which will execute serially - nestedFutures.add(Collections.singletonList(new Pair>(flattenedScans.get(0), future))); - } + @Override + protected String getName() { + return NAME; + } + + /** + * Iterator that creates iterators for scans only when needed. This helps reduce the cost of + * pre-constructing all the iterators which we may not even use. + */ + private class SerialIterator implements PeekingResultIterator { + private final List scans; + private final String tableName; + private final long renewLeaseThreshold; + private int index; + private PeekingResultIterator currentIterator; + private Integer remainingOffset; + private Map caches; + private final long maxQueryEndTime; + + private SerialIterator(List flattenedScans, String tableName, long renewLeaseThreshold, + Integer offset, Map caches, long maxQueryEndTime) + throws SQLException { + this.scans = Lists.newArrayListWithExpectedSize(flattenedScans.size()); + this.tableName = tableName; + this.renewLeaseThreshold = renewLeaseThreshold; + this.scans.addAll(flattenedScans); + this.remainingOffset = offset; + this.caches = caches; + this.maxQueryEndTime = maxQueryEndTime; + if (this.remainingOffset != null) { + // mark the last scan for offset purposes + this.scans.get(this.scans.size() - 1).setAttribute(QueryConstants.LAST_SCAN, + Bytes.toBytes(Boolean.TRUE)); + } } - @Override - protected String getName() { - return NAME; + private PeekingResultIterator currentIterator() throws SQLException { + if (currentIterator == null) { + return currentIterator = nextIterator(); + } + if (currentIterator.peek() == null) { + currentIterator.close(); + currentIterator = nextIterator(); + } + return currentIterator; } - - /** - * - * Iterator that creates iterators for scans only when needed. - * This helps reduce the cost of pre-constructing all the iterators - * which we may not even use. - */ - private class SerialIterator implements PeekingResultIterator { - private final List scans; - private final String tableName; - private final long renewLeaseThreshold; - private int index; - private PeekingResultIterator currentIterator; - private Integer remainingOffset; - private Map caches; - private final long maxQueryEndTime; - - private SerialIterator(List flattenedScans, String tableName, long renewLeaseThreshold, Integer offset, Map caches, long maxQueryEndTime) throws SQLException { - this.scans = Lists.newArrayListWithExpectedSize(flattenedScans.size()); - this.tableName = tableName; - this.renewLeaseThreshold = renewLeaseThreshold; - this.scans.addAll(flattenedScans); - this.remainingOffset = offset; - this.caches = caches; - this.maxQueryEndTime = maxQueryEndTime; - if (this.remainingOffset != null) { - // mark the last scan for offset purposes - this.scans.get(this.scans.size() - 1).setAttribute(QueryConstants.LAST_SCAN, Bytes.toBytes(Boolean.TRUE)); - } - } - - private PeekingResultIterator currentIterator() throws SQLException { - if (currentIterator == null) { - return currentIterator = nextIterator(); - } - if (currentIterator.peek() == null) { - currentIterator.close(); - currentIterator = nextIterator(); - } - return currentIterator; + + private PeekingResultIterator nextIterator() throws SQLException { + if (index >= scans.size()) { + return EMPTY_ITERATOR; + } + ReadMetricQueue readMetrics = context.getReadMetricsQueue(); + while (index < scans.size()) { + Scan currentScan = scans.get(index++); + if (remainingOffset != null) { + currentScan.setAttribute(BaseScannerRegionObserverConstants.SCAN_OFFSET, + PInteger.INSTANCE.toBytes(remainingOffset)); } - - private PeekingResultIterator nextIterator() throws SQLException { - if (index >= scans.size()) { - return EMPTY_ITERATOR; - } - ReadMetricQueue readMetrics = context.getReadMetricsQueue(); - while (index < scans.size()) { - Scan currentScan = scans.get(index++); - if (remainingOffset != null) { - currentScan.setAttribute(BaseScannerRegionObserverConstants.SCAN_OFFSET, PInteger.INSTANCE.toBytes(remainingOffset)); - } - ScanMetricsHolder scanMetricsHolder = - ScanMetricsHolder.getInstance(readMetrics, tableName, currentScan, - context.getConnection().getLogLevel()); - TableResultIterator itr = - new TableResultIterator(mutationState, currentScan, scanMetricsHolder, - renewLeaseThreshold, plan, scanGrouper, caches, maxQueryEndTime); - PeekingResultIterator peekingItr = iteratorFactory.newIterator(context, itr, currentScan, tableName, plan); - Tuple tuple; - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - tuple = peekingItr.peek(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + - (EnvironmentEdgeManager.currentTimeMillis() - startTime) + - "ms, Table: " + tableName + ", Scan: " + currentScan, - ScanUtil.getCustomAnnotations(currentScan))); - } - if (tuple == null) { - peekingItr.close(); - continue; - } else if ((remainingOffset = QueryUtil.getRemainingOffset(tuple)) != null) { - peekingItr.next(); - peekingItr.close(); - continue; - } - context.getConnection().addIteratorForLeaseRenewal(itr); - return peekingItr; - } - return EMPTY_ITERATOR; + ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, + currentScan, context.getConnection().getLogLevel()); + TableResultIterator itr = new TableResultIterator(mutationState, currentScan, + scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, caches, maxQueryEndTime); + PeekingResultIterator peekingItr = + iteratorFactory.newIterator(context, itr, currentScan, tableName, plan); + Tuple tuple; + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + tuple = peekingItr.peek(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations( + "Id: " + scanId + ", Time: " + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + + "ms, Table: " + tableName + ", Scan: " + currentScan, + ScanUtil.getCustomAnnotations(currentScan))); } - - @Override - public Tuple next() throws SQLException { - return currentIterator().next(); + if (tuple == null) { + peekingItr.close(); + continue; + } else if ((remainingOffset = QueryUtil.getRemainingOffset(tuple)) != null) { + peekingItr.next(); + peekingItr.close(); + continue; } + context.getConnection().addIteratorForLeaseRenewal(itr); + return peekingItr; + } + return EMPTY_ITERATOR; + } - @Override - public void explain(List planSteps) {} + @Override + public Tuple next() throws SQLException { + return currentIterator().next(); + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } + @Override + public void explain(List planSteps) { + } - @Override - public void close() throws SQLException { - if (currentIterator != null) { - currentIterator.close(); - } - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } - @Override - public Tuple peek() throws SQLException { - return currentIterator().peek(); - } - + @Override + public void close() throws SQLException { + if (currentIterator != null) { + currentIterator.close(); + } } -} \ No newline at end of file + + @Override + public Tuple peek() throws SQLException { + return currentIterator().peek(); + } + + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SizeAwareQueue.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SizeAwareQueue.java index 73b3554451d..d953ab62df0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SizeAwareQueue.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SizeAwareQueue.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,6 +22,6 @@ public interface SizeAwareQueue extends Queue, Closeable { - public long getByteSize(); + public long getByteSize(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SizeBoundQueue.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SizeBoundQueue.java index 34c6fec8168..e956c5754e0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SizeBoundQueue.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SizeBoundQueue.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,73 +24,75 @@ public abstract class SizeBoundQueue extends AbstractQueue implements SizeAwareQueue { - private long maxSizeBytes; - private Queue delegate; - private long currentSize; + private long maxSizeBytes; + private Queue delegate; + private long currentSize; - public SizeBoundQueue(long maxSizeBytes, Queue delegate) { - assert maxSizeBytes > 0; - this.maxSizeBytes = maxSizeBytes; - this.delegate = delegate; - } + public SizeBoundQueue(long maxSizeBytes, Queue delegate) { + assert maxSizeBytes > 0; + this.maxSizeBytes = maxSizeBytes; + this.delegate = delegate; + } - abstract public long sizeOf(T e); + abstract public long sizeOf(T e); - @Override - public boolean offer(T e) { - boolean success = false; - long elementSize = sizeOf(e); - if ((currentSize + elementSize) < maxSizeBytes) { - success = delegate.offer(e); - if (success) { - currentSize += elementSize; - } - } - return success; + @Override + public boolean offer(T e) { + boolean success = false; + long elementSize = sizeOf(e); + if ((currentSize + elementSize) < maxSizeBytes) { + success = delegate.offer(e); + if (success) { + currentSize += elementSize; + } } + return success; + } - @Override - public boolean add(T e) { - try { - return super.add(e); - } catch (IllegalStateException ex) { - throw new IllegalStateException( - "Queue full. Consider increasing memory threshold or spooling to disk. Max size: " + maxSizeBytes + ", Current size: " + currentSize + ", Number of elements:" + size(), ex); - } + @Override + public boolean add(T e) { + try { + return super.add(e); + } catch (IllegalStateException ex) { + throw new IllegalStateException( + "Queue full. Consider increasing memory threshold or spooling to disk. Max size: " + + maxSizeBytes + ", Current size: " + currentSize + ", Number of elements:" + size(), + ex); } + } - @Override - public T poll() { - T e = delegate.poll(); - if (e != null) { - currentSize -= sizeOf(e); - } - return e; + @Override + public T poll() { + T e = delegate.poll(); + if (e != null) { + currentSize -= sizeOf(e); } + return e; + } - @Override - public T peek() { - return delegate.peek(); - } + @Override + public T peek() { + return delegate.peek(); + } - @Override - public void close() throws IOException { - delegate.clear(); - } + @Override + public void close() throws IOException { + delegate.clear(); + } - @Override - public long getByteSize() { - return currentSize; - } + @Override + public long getByteSize() { + return currentSize; + } - @Override - public Iterator iterator() { - return delegate.iterator(); - } + @Override + public Iterator iterator() { + return delegate.iterator(); + } - @Override - public int size() { - return delegate.size(); - } + @Override + public int size() { + return delegate.size(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SpoolTooBigToDiskException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SpoolTooBigToDiskException.java index fceb7fd3bb5..40727b42a25 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SpoolTooBigToDiskException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SpoolTooBigToDiskException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,21 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.iterate; /** - * Thrown by {@link org.apache.phoenix.iterate.SpoolingResultIterator } when - * result is too big to fit into memory and too big to spool to disk. - * - * - * + * Thrown by {@link org.apache.phoenix.iterate.SpoolingResultIterator } when result is too big to + * fit into memory and too big to spool to disk. */ public class SpoolTooBigToDiskException extends RuntimeException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; - public SpoolTooBigToDiskException(String msg) { - super(msg); - } + public SpoolTooBigToDiskException(String msg) { + super(msg); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java index 21f60553e1a..e51dbe330ae 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,8 +36,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableUtils; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.memory.MemoryManager; @@ -50,324 +49,316 @@ import org.apache.phoenix.schema.tuple.ResultTuple; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.ResultUtil; -import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.TupleUtil; /** - * - * Result iterator that spools the results of a scan to disk once an in-memory threshold has been reached. - * If the in-memory threshold is not reached, the results are held in memory with no disk writing perfomed. - * + * Result iterator that spools the results of a scan to disk once an in-memory threshold has been + * reached. If the in-memory threshold is not reached, the results are held in memory with no disk + * writing perfomed. *

- * Spooling is deprecated and shouldn't be used while implementing new features. As of HBase 0.98.17, - * we rely on pacing the server side scanners instead of pulling rows from the server and potentially - * spooling to a temporary file created on clients. + * Spooling is deprecated and shouldn't be used while implementing new features. As of HBase + * 0.98.17, we rely on pacing the server side scanners instead of pulling rows from the server and + * potentially spooling to a temporary file created on clients. *

- * * @since 0.1 */ @Deprecated public class SpoolingResultIterator implements PeekingResultIterator { - - private final PeekingResultIterator spoolFrom; - private final SpoolingMetricsHolder spoolMetrics; - private final MemoryMetricsHolder memoryMetrics; - - /** - * Spooling is deprecated and shouldn't be used while implementing new features. As of HBase - * 0.98.17, we rely on pacing the server side scanners instead of pulling rows from the server - * and potentially spooling to a temporary file created on clients. - */ - @Deprecated - public static class SpoolingResultIteratorFactory implements ParallelIteratorFactory { - private final QueryServices services; - - public SpoolingResultIteratorFactory(QueryServices services) { - this.services = services; - } - @Override - public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String physicalTableName, QueryPlan plan) throws SQLException { - ReadMetricQueue readRequestMetric = context.getReadMetricsQueue(); - SpoolingMetricsHolder spoolMetrics = new SpoolingMetricsHolder(readRequestMetric, physicalTableName); - MemoryMetricsHolder memoryMetrics = new MemoryMetricsHolder(readRequestMetric, physicalTableName); - return new SpoolingResultIterator(spoolMetrics, memoryMetrics, scanner, services); - } + + private final PeekingResultIterator spoolFrom; + private final SpoolingMetricsHolder spoolMetrics; + private final MemoryMetricsHolder memoryMetrics; + + /** + * Spooling is deprecated and shouldn't be used while implementing new features. As of HBase + * 0.98.17, we rely on pacing the server side scanners instead of pulling rows from the server and + * potentially spooling to a temporary file created on clients. + */ + @Deprecated + public static class SpoolingResultIteratorFactory implements ParallelIteratorFactory { + private final QueryServices services; + + public SpoolingResultIteratorFactory(QueryServices services) { + this.services = services; } - private SpoolingResultIterator(SpoolingMetricsHolder spoolMetrics, MemoryMetricsHolder memoryMetrics, ResultIterator scanner, QueryServices services) throws SQLException { - this (spoolMetrics, memoryMetrics, scanner, services.getMemoryManager(), - services.getProps().getLongBytes(QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES), - services.getProps() - .getLongBytes(QueryServices.MAX_SPOOL_TO_DISK_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_SPOOL_TO_DISK_BYTES), - services.getProps().get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY)); + @Override + public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, + Scan scan, String physicalTableName, QueryPlan plan) throws SQLException { + ReadMetricQueue readRequestMetric = context.getReadMetricsQueue(); + SpoolingMetricsHolder spoolMetrics = + new SpoolingMetricsHolder(readRequestMetric, physicalTableName); + MemoryMetricsHolder memoryMetrics = + new MemoryMetricsHolder(readRequestMetric, physicalTableName); + return new SpoolingResultIterator(spoolMetrics, memoryMetrics, scanner, services); } + } - /** - * Create a result iterator by iterating through the results of a scan, spooling them to disk once - * a threshold has been reached. The scanner passed in is closed prior to returning. - * @param scanner the results of a table scan - * @param mm memory manager tracking memory usage across threads. - * @param thresholdBytes the requested threshold. Will be dialed down if memory usage (as determined by - * the memory manager) is exceeded. - * @throws SQLException - */ - SpoolingResultIterator(SpoolingMetricsHolder sMetrics, MemoryMetricsHolder mMetrics, ResultIterator scanner, MemoryManager mm, final long thresholdBytes, final long maxSpoolToDisk, final String spoolDirectory) throws SQLException { - this.spoolMetrics = sMetrics; - this.memoryMetrics = mMetrics; - boolean success = false; - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - final MemoryChunk chunk = mm.allocate(0, thresholdBytes); - long waitTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; - GLOBAL_MEMORY_WAIT_TIME.update(waitTime); - memoryMetrics.getMemoryWaitTimeMetric().change(waitTime); - DeferredFileOutputStream spoolTo = null; - try { - // Can't be bigger than int, since it's the max of the above allocation - int size = (int)chunk.getSize(); - spoolTo = new DeferredFileOutputStream(size, "ResultSpooler",".bin", new File(spoolDirectory)) { - @Override - protected void thresholdReached() throws IOException { - try { - super.thresholdReached(); - } finally { - chunk.close(); - } - } - }; - DataOutputStream out = new DataOutputStream(spoolTo); - final long maxBytesAllowed = maxSpoolToDisk == -1 ? - Long.MAX_VALUE : thresholdBytes + maxSpoolToDisk; - long bytesWritten = 0L; - for (Tuple result = scanner.next(); result != null; result = scanner.next()) { - int length = TupleUtil.write(result, out); - bytesWritten += length; - if(bytesWritten > maxBytesAllowed){ - throw new SpoolTooBigToDiskException("result too big, max allowed(bytes): " + maxBytesAllowed); - } - } - if (spoolTo.isInMemory()) { - byte[] data = spoolTo.getData(); - chunk.resize(data.length); - spoolFrom = new InMemoryResultIterator(data, chunk); - GLOBAL_MEMORY_CHUNK_BYTES.update(data.length); - memoryMetrics.getMemoryChunkSizeMetric().change(data.length); - } else { - long sizeOfSpoolFile = spoolTo.getFile().length(); - GLOBAL_SPOOL_FILE_SIZE.update(sizeOfSpoolFile); - GLOBAL_SPOOL_FILE_COUNTER.increment(); - spoolMetrics.getNumSpoolFileMetric().increment(); - spoolMetrics.getSpoolFileSizeMetric().change(sizeOfSpoolFile); - spoolFrom = new OnDiskResultIterator(spoolTo.getFile()); - if (spoolTo.getFile() != null) { - spoolTo.getFile().deleteOnExit(); - } - } - success = true; - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } finally { + private SpoolingResultIterator(SpoolingMetricsHolder spoolMetrics, + MemoryMetricsHolder memoryMetrics, ResultIterator scanner, QueryServices services) + throws SQLException { + this(spoolMetrics, memoryMetrics, scanner, services.getMemoryManager(), + services.getProps().getLongBytes(QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES), + services.getProps().getLongBytes(QueryServices.MAX_SPOOL_TO_DISK_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SPOOL_TO_DISK_BYTES), + services.getProps().get(QueryServices.SPOOL_DIRECTORY, + QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY)); + } + + /** + * Create a result iterator by iterating through the results of a scan, spooling them to disk once + * a threshold has been reached. The scanner passed in is closed prior to returning. + * @param scanner the results of a table scan + * @param mm memory manager tracking memory usage across threads. + * @param thresholdBytes the requested threshold. Will be dialed down if memory usage (as + * determined by the memory manager) is exceeded. + */ + SpoolingResultIterator(SpoolingMetricsHolder sMetrics, MemoryMetricsHolder mMetrics, + ResultIterator scanner, MemoryManager mm, final long thresholdBytes, final long maxSpoolToDisk, + final String spoolDirectory) throws SQLException { + this.spoolMetrics = sMetrics; + this.memoryMetrics = mMetrics; + boolean success = false; + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + final MemoryChunk chunk = mm.allocate(0, thresholdBytes); + long waitTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + GLOBAL_MEMORY_WAIT_TIME.update(waitTime); + memoryMetrics.getMemoryWaitTimeMetric().change(waitTime); + DeferredFileOutputStream spoolTo = null; + try { + // Can't be bigger than int, since it's the max of the above allocation + int size = (int) chunk.getSize(); + spoolTo = + new DeferredFileOutputStream(size, "ResultSpooler", ".bin", new File(spoolDirectory)) { + @Override + protected void thresholdReached() throws IOException { try { - scanner.close(); + super.thresholdReached(); } finally { - try { - if (spoolTo != null) { - if(!success && spoolTo.getFile() != null){ - spoolTo.getFile().delete(); - } - spoolTo.close(); - } - } catch (IOException ignored) { - // ignore close error - } finally { - if (!success) { - chunk.close(); - } - } + chunk.close(); + } + } + }; + DataOutputStream out = new DataOutputStream(spoolTo); + final long maxBytesAllowed = + maxSpoolToDisk == -1 ? Long.MAX_VALUE : thresholdBytes + maxSpoolToDisk; + long bytesWritten = 0L; + for (Tuple result = scanner.next(); result != null; result = scanner.next()) { + int length = TupleUtil.write(result, out); + bytesWritten += length; + if (bytesWritten > maxBytesAllowed) { + throw new SpoolTooBigToDiskException( + "result too big, max allowed(bytes): " + maxBytesAllowed); + } + } + if (spoolTo.isInMemory()) { + byte[] data = spoolTo.getData(); + chunk.resize(data.length); + spoolFrom = new InMemoryResultIterator(data, chunk); + GLOBAL_MEMORY_CHUNK_BYTES.update(data.length); + memoryMetrics.getMemoryChunkSizeMetric().change(data.length); + } else { + long sizeOfSpoolFile = spoolTo.getFile().length(); + GLOBAL_SPOOL_FILE_SIZE.update(sizeOfSpoolFile); + GLOBAL_SPOOL_FILE_COUNTER.increment(); + spoolMetrics.getNumSpoolFileMetric().increment(); + spoolMetrics.getSpoolFileSizeMetric().change(sizeOfSpoolFile); + spoolFrom = new OnDiskResultIterator(spoolTo.getFile()); + if (spoolTo.getFile() != null) { + spoolTo.getFile().deleteOnExit(); + } + } + success = true; + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } finally { + try { + scanner.close(); + } finally { + try { + if (spoolTo != null) { + if (!success && spoolTo.getFile() != null) { + spoolTo.getFile().delete(); } + spoolTo.close(); + } + } catch (IOException ignored) { + // ignore close error + } finally { + if (!success) { + chunk.close(); + } } + } + } + } + + @Override + public Tuple peek() throws SQLException { + return spoolFrom.peek(); + } + + @Override + public Tuple next() throws SQLException { + return spoolFrom.next(); + } + + @Override + public void close() throws SQLException { + spoolFrom.close(); + } + + /** + * Backing result iterator if it was not necessary to spool results to disk. + * @since 0.1 + */ + private static class InMemoryResultIterator implements PeekingResultIterator { + private final MemoryChunk memoryChunk; + private final byte[] bytes; + private Tuple next; + private int offset; + + private InMemoryResultIterator(byte[] bytes, MemoryChunk memoryChunk) throws SQLException { + this.bytes = bytes; + this.memoryChunk = memoryChunk; + advance(); + } + + private Tuple advance() throws SQLException { + if (offset >= bytes.length) { + return next = null; + } + int resultSize = ByteUtil.vintFromBytes(bytes, offset); + offset += WritableUtils.getVIntSize(resultSize); + ImmutableBytesWritable value = new ImmutableBytesWritable(bytes, offset, resultSize); + offset += resultSize; + Tuple result = new ResultTuple(ResultUtil.toResult(value)); + return next = result; } @Override public Tuple peek() throws SQLException { - return spoolFrom.peek(); + return next; } @Override public Tuple next() throws SQLException { - return spoolFrom.next(); + Tuple current = next; + advance(); + return current; } @Override - public void close() throws SQLException { - spoolFrom.close(); + public void close() { + memoryChunk.close(); } - /** - * - * Backing result iterator if it was not necessary to spool results to disk. - * - * - * @since 0.1 - */ - private static class InMemoryResultIterator implements PeekingResultIterator { - private final MemoryChunk memoryChunk; - private final byte[] bytes; - private Tuple next; - private int offset; - - private InMemoryResultIterator(byte[] bytes, MemoryChunk memoryChunk) throws SQLException { - this.bytes = bytes; - this.memoryChunk = memoryChunk; - advance(); - } - - private Tuple advance() throws SQLException { - if (offset >= bytes.length) { - return next = null; - } - int resultSize = ByteUtil.vintFromBytes(bytes, offset); - offset += WritableUtils.getVIntSize(resultSize); - ImmutableBytesWritable value = new ImmutableBytesWritable(bytes,offset,resultSize); - offset += resultSize; - Tuple result = new ResultTuple(ResultUtil.toResult(value)); - return next = result; - } - - @Override - public Tuple peek() throws SQLException { - return next; - } - - @Override - public Tuple next() throws SQLException { - Tuple current = next; - advance(); - return current; - } - - @Override - public void close() { - memoryChunk.close(); - } - - @Override - public void explain(List planSteps) { - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } + @Override + public void explain(List planSteps) { } - /** - * - * Backing result iterator if results were spooled to disk - * - * - * @since 0.1 - */ - private static class OnDiskResultIterator implements PeekingResultIterator { - private final File file; - private DataInputStream spoolFrom; - private Tuple next; - private boolean isClosed; - - private OnDiskResultIterator (File file) { - this.file = file; - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } + } - private synchronized void init() throws IOException { - if (spoolFrom == null) { - spoolFrom = new DataInputStream(new BufferedInputStream(Files.newInputStream(file.toPath()))); - advance(); - } - } + /** + * Backing result iterator if results were spooled to disk + * @since 0.1 + */ + private static class OnDiskResultIterator implements PeekingResultIterator { + private final File file; + private DataInputStream spoolFrom; + private Tuple next; + private boolean isClosed; - private synchronized void reachedEnd() throws IOException { - next = null; - isClosed = true; - try { - if (spoolFrom != null) { - spoolFrom.close(); - } - } finally { - file.delete(); - } - } + private OnDiskResultIterator(File file) { + this.file = file; + } - private synchronized Tuple advance() throws IOException { - if (isClosed) { - return next; - } - int length; - try { - length = WritableUtils.readVInt(spoolFrom); - } catch (EOFException e) { - reachedEnd(); - return next; - } - int totalBytesRead = 0; - int offset = 0; - byte[] buffer = new byte[length]; - while(totalBytesRead < length) { - int bytesRead = spoolFrom.read(buffer, offset, length); - if (bytesRead == -1) { - reachedEnd(); - return next; - } - offset += bytesRead; - totalBytesRead += bytesRead; - } - next = new ResultTuple(ResultUtil.toResult(new ImmutableBytesWritable(buffer,0,length))); - return next; - } + private synchronized void init() throws IOException { + if (spoolFrom == null) { + spoolFrom = + new DataInputStream(new BufferedInputStream(Files.newInputStream(file.toPath()))); + advance(); + } + } - @Override - public synchronized Tuple peek() throws SQLException { - try { - init(); - return next; - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } + private synchronized void reachedEnd() throws IOException { + next = null; + isClosed = true; + try { + if (spoolFrom != null) { + spoolFrom.close(); } + } finally { + file.delete(); + } + } - @Override - public synchronized Tuple next() throws SQLException { - try { - init(); - Tuple current = next; - advance(); - return current; - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } + private synchronized Tuple advance() throws IOException { + if (isClosed) { + return next; + } + int length; + try { + length = WritableUtils.readVInt(spoolFrom); + } catch (EOFException e) { + reachedEnd(); + return next; + } + int totalBytesRead = 0; + int offset = 0; + byte[] buffer = new byte[length]; + while (totalBytesRead < length) { + int bytesRead = spoolFrom.read(buffer, offset, length); + if (bytesRead == -1) { + reachedEnd(); + return next; } + offset += bytesRead; + totalBytesRead += bytesRead; + } + next = new ResultTuple(ResultUtil.toResult(new ImmutableBytesWritable(buffer, 0, length))); + return next; + } - @Override - public synchronized void close() throws SQLException { - try { - if (!isClosed) { - reachedEnd(); - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - } + @Override + public synchronized Tuple peek() throws SQLException { + try { + init(); + return next; + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + } - @Override - public void explain(List planSteps) { - } + @Override + public synchronized Tuple next() throws SQLException { + try { + init(); + Tuple current = next; + advance(); + return current; + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + @Override + public synchronized void close() throws SQLException { + try { + if (!isClosed) { + reachedEnd(); } + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } } @Override @@ -376,6 +367,16 @@ public void explain(List planSteps) { @Override public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { } + } + + @Override + public void explain(List planSteps) { + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java index 98b2c8d8379..55b85b6bc94 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -43,8 +43,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.cache.ServerCacheClient.ServerCache; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.coprocessorclient.HashJoinCacheNotFoundException; import org.apache.phoenix.exception.ResultSetOutOfScanRangeException; @@ -56,6 +55,7 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.Closeables; @@ -65,312 +65,332 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; - - /** - * - * Wrapper for ResultScanner creation that closes HTableInterface - * when ResultScanner is closed. - * - * + * Wrapper for ResultScanner creation that closes HTableInterface when ResultScanner is closed. * @since 0.1 */ public class TableResultIterator implements ResultIterator { - private final Scan scan; - private final Table htable; - private final ScanMetricsHolder scanMetricsHolder; - private static final ResultIterator UNINITIALIZED_SCANNER = ResultIterator.EMPTY_ITERATOR; - private final long renewLeaseThreshold; - private final QueryPlan plan; - private final ParallelScanGrouper scanGrouper; - private static final Logger LOGGER = LoggerFactory.getLogger(TableResultIterator.class); - private Tuple lastTuple = null; - private ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - @GuardedBy("renewLeaseLock") - private ResultIterator scanIterator; - - @GuardedBy("renewLeaseLock") - private boolean closed = false; - - @GuardedBy("renewLeaseLock") - private long renewLeaseTime = 0; - - private final Lock renewLeaseLock = new ReentrantLock(); - - private int retry; - private Map caches; - private HashCacheClient hashCacheClient; - - private final boolean isMapReduceContext; - private final long maxQueryEndTime; - - @VisibleForTesting // Exposed for testing. DON'T USE ANYWHERE ELSE! - TableResultIterator() { - this.scanMetricsHolder = null; - this.renewLeaseThreshold = 0; - this.htable = null; - this.scan = null; - this.plan = null; - this.scanGrouper = null; - this.caches = null; - this.retry = 0; - this.isMapReduceContext = false; - this.maxQueryEndTime = Long.MAX_VALUE; - } - - public static enum RenewLeaseStatus { - RENEWED, NOT_RENEWED, CLOSED, UNINITIALIZED, THRESHOLD_NOT_REACHED, LOCK_NOT_ACQUIRED, NOT_SUPPORTED - }; - - public TableResultIterator(MutationState mutationState, Scan scan, ScanMetricsHolder scanMetricsHolder, - long renewLeaseThreshold, QueryPlan plan, ParallelScanGrouper scanGrouper, long maxQueryEndTime) throws SQLException { - this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, null, false, maxQueryEndTime); - } - - public TableResultIterator(MutationState mutationState, Scan scan, ScanMetricsHolder scanMetricsHolder, - long renewLeaseThreshold, QueryPlan plan, ParallelScanGrouper scanGrouper,Map caches, long maxQueryEndTime) throws SQLException { - this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, caches, false, maxQueryEndTime); - } - - public TableResultIterator(MutationState mutationState, Scan scan, ScanMetricsHolder scanMetricsHolder, - long renewLeaseThreshold, QueryPlan plan, ParallelScanGrouper scanGrouper, boolean isMapReduceContext, long maxQueryEndTime) throws SQLException { - this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, null, isMapReduceContext, maxQueryEndTime); - } - - public TableResultIterator(MutationState mutationState, Scan scan, ScanMetricsHolder scanMetricsHolder, - long renewLeaseThreshold, QueryPlan plan, ParallelScanGrouper scanGrouper,Map caches, - boolean isMapReduceContext, long maxQueryEndTime) throws SQLException { - this.scan = scan; - this.scanMetricsHolder = scanMetricsHolder; - this.plan = plan; - PTable table = plan.getTableRef().getTable(); - htable = mutationState.getHTable(table); - this.scanIterator = UNINITIALIZED_SCANNER; - this.renewLeaseThreshold = renewLeaseThreshold; - this.scanGrouper = scanGrouper; - this.hashCacheClient = new HashCacheClient(plan.getContext().getConnection()); - this.caches = caches; - this.retry=plan.getContext().getConnection().getQueryServices().getProps() - .getInt(QueryConstants.HASH_JOIN_CACHE_RETRIES, QueryConstants.DEFAULT_HASH_JOIN_CACHE_RETRIES); - this.isMapReduceContext = isMapReduceContext; - this.maxQueryEndTime = maxQueryEndTime; - ScanUtil.setScanAttributesForClient(scan, table, plan.getContext()); - } - - // Constructors without maxQueryEndTime to maintain API compatibility for phoenix-connectors - public TableResultIterator(MutationState mutationState, Scan scan, ScanMetricsHolder scanMetricsHolder, - long renewLeaseThreshold, QueryPlan plan, ParallelScanGrouper scanGrouper) throws SQLException { - this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, null, false, Long.MAX_VALUE); - } - - public TableResultIterator(MutationState mutationState, Scan scan, ScanMetricsHolder scanMetricsHolder, - long renewLeaseThreshold, QueryPlan plan, ParallelScanGrouper scanGrouper,Map caches) throws SQLException { - this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, caches, false, Long.MAX_VALUE); - } - - public TableResultIterator(MutationState mutationState, Scan scan, ScanMetricsHolder scanMetricsHolder, - long renewLeaseThreshold, QueryPlan plan, ParallelScanGrouper scanGrouper, boolean isMapReduceContext) throws SQLException { - this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, null, isMapReduceContext, Long.MAX_VALUE); - } - - public TableResultIterator(MutationState mutationState, Scan scan, ScanMetricsHolder scanMetricsHolder, - long renewLeaseThreshold, QueryPlan plan, ParallelScanGrouper scanGrouper,Map caches, - boolean isMapReduceContext) throws SQLException { - this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, caches, isMapReduceContext, Long.MAX_VALUE); - } - // End Constructors without maxQueryEndTime to maintain API compatibility for phoenix-connectors - - @Override - public void close() throws SQLException { + private final Scan scan; + private final Table htable; + private final ScanMetricsHolder scanMetricsHolder; + private static final ResultIterator UNINITIALIZED_SCANNER = ResultIterator.EMPTY_ITERATOR; + private final long renewLeaseThreshold; + private final QueryPlan plan; + private final ParallelScanGrouper scanGrouper; + private static final Logger LOGGER = LoggerFactory.getLogger(TableResultIterator.class); + private Tuple lastTuple = null; + private ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + @GuardedBy("renewLeaseLock") + private ResultIterator scanIterator; + + @GuardedBy("renewLeaseLock") + private boolean closed = false; + + @GuardedBy("renewLeaseLock") + private long renewLeaseTime = 0; + + private final Lock renewLeaseLock = new ReentrantLock(); + + private int retry; + private Map caches; + private HashCacheClient hashCacheClient; + + private final boolean isMapReduceContext; + private final long maxQueryEndTime; + + @VisibleForTesting // Exposed for testing. DON'T USE ANYWHERE ELSE! + TableResultIterator() { + this.scanMetricsHolder = null; + this.renewLeaseThreshold = 0; + this.htable = null; + this.scan = null; + this.plan = null; + this.scanGrouper = null; + this.caches = null; + this.retry = 0; + this.isMapReduceContext = false; + this.maxQueryEndTime = Long.MAX_VALUE; + } + + public static enum RenewLeaseStatus { + RENEWED, + NOT_RENEWED, + CLOSED, + UNINITIALIZED, + THRESHOLD_NOT_REACHED, + LOCK_NOT_ACQUIRED, + NOT_SUPPORTED + }; + + public TableResultIterator(MutationState mutationState, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper, long maxQueryEndTime) throws SQLException { + this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, null, + false, maxQueryEndTime); + } + + public TableResultIterator(MutationState mutationState, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper, Map caches, + long maxQueryEndTime) throws SQLException { + this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, caches, + false, maxQueryEndTime); + } + + public TableResultIterator(MutationState mutationState, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper, boolean isMapReduceContext, long maxQueryEndTime) + throws SQLException { + this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, null, + isMapReduceContext, maxQueryEndTime); + } + + public TableResultIterator(MutationState mutationState, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper, Map caches, + boolean isMapReduceContext, long maxQueryEndTime) throws SQLException { + this.scan = scan; + this.scanMetricsHolder = scanMetricsHolder; + this.plan = plan; + PTable table = plan.getTableRef().getTable(); + htable = mutationState.getHTable(table); + this.scanIterator = UNINITIALIZED_SCANNER; + this.renewLeaseThreshold = renewLeaseThreshold; + this.scanGrouper = scanGrouper; + this.hashCacheClient = new HashCacheClient(plan.getContext().getConnection()); + this.caches = caches; + this.retry = plan.getContext().getConnection().getQueryServices().getProps().getInt( + QueryConstants.HASH_JOIN_CACHE_RETRIES, QueryConstants.DEFAULT_HASH_JOIN_CACHE_RETRIES); + this.isMapReduceContext = isMapReduceContext; + this.maxQueryEndTime = maxQueryEndTime; + ScanUtil.setScanAttributesForClient(scan, table, plan.getContext()); + } + + // Constructors without maxQueryEndTime to maintain API compatibility for phoenix-connectors + public TableResultIterator(MutationState mutationState, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper) throws SQLException { + this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, null, + false, Long.MAX_VALUE); + } + + public TableResultIterator(MutationState mutationState, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper, Map caches) + throws SQLException { + this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, caches, + false, Long.MAX_VALUE); + } + + public TableResultIterator(MutationState mutationState, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper, boolean isMapReduceContext) throws SQLException { + this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, null, + isMapReduceContext, Long.MAX_VALUE); + } + + public TableResultIterator(MutationState mutationState, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper, Map caches, + boolean isMapReduceContext) throws SQLException { + this(mutationState, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, caches, + isMapReduceContext, Long.MAX_VALUE); + } + // End Constructors without maxQueryEndTime to maintain API compatibility for phoenix-connectors + + @Override + public void close() throws SQLException { + try { + renewLeaseLock.lock(); + closed = true; // ok to say closed even if the below code throws an exception + try { + scanIterator.close(); + } finally { try { - renewLeaseLock.lock(); - closed = true; // ok to say closed even if the below code throws an exception - try { - scanIterator.close(); - } finally { - try { - scanIterator = UNINITIALIZED_SCANNER; - htable.close(); - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - } - } finally { - renewLeaseLock.unlock(); + scanIterator = UNINITIALIZED_SCANNER; + htable.close(); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); } - + } + } finally { + renewLeaseLock.unlock(); } - @Override - public Tuple next() throws SQLException { - try { - renewLeaseLock.lock(); - initScanner(); - try { - lastTuple = scanIterator.next(); - if (lastTuple != null) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - lastTuple.getKey(ptr); - try { - ScanUtil.verifyKeyInScanRange(ptr, scan); - } catch (ResultSetOutOfScanRangeException e) { - LOGGER.error("Row key {} of table {} is out of scan range. Scan start " - + "key: {} , end key: {} , _ScanActualStartRow: {} , " - + "_ScanStartRowSuffix: {} , _ScanStopRowSuffix: {} , " - + "scan attributes: {}", - Bytes.toStringBinary(ptr.get()), - htable.getName(), - Bytes.toStringBinary(scan.getStartRow()), - Bytes.toStringBinary(scan.getStopRow()), - Bytes.toStringBinary(scan.getAttribute(SCAN_ACTUAL_START_ROW)), - Bytes.toStringBinary(scan.getAttribute(SCAN_START_ROW_SUFFIX)), - Bytes.toStringBinary(scan.getAttribute(SCAN_STOP_ROW_SUFFIX)), - scan.getAttributesMap(), - e); - throw e; - } - } - } catch (SQLException e) { - LOGGER.error("Error while scanning table {} , scan {}", htable, scan); - try { - throw ClientUtil.parseServerException(e); - } catch(HashJoinCacheNotFoundException e1) { - if(ScanUtil.isNonAggregateScan(scan) && plan.getContext().getAggregationManager().isEmpty()) { - // For non aggregate queries if we get stale region boundary exception we can - // continue scanning from the next value of lasted fetched result. - Scan newScan = ScanUtil.newScan(scan); - newScan.withStartRow(newScan.getAttribute(SCAN_ACTUAL_START_ROW)); - if(lastTuple != null) { - lastTuple.getKey(ptr); - byte[] startRowSuffix = ByteUtil.copyKeyBytesIfNecessary(ptr); - if(ScanUtil.isLocalIndex(newScan)) { - // If we just set scan start row suffix then server side we prepare - // actual scan boundaries by prefixing the region start key. - newScan.setAttribute(SCAN_START_ROW_SUFFIX, ByteUtil.nextKey(startRowSuffix)); - } else { - newScan.withStartRow(ByteUtil.nextKey(startRowSuffix)); - } - } - plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getName()); - LOGGER.debug( - "Retrying when Hash Join cache is not found on the server ,by sending the cache again"); - if (retry <= 0) { - throw e1; - } - Long cacheId = e1.getCacheId(); - retry--; - try { - ServerCache cache = caches == null ? null : - caches.get(new ImmutableBytesPtr(Bytes.toBytes(cacheId))); - if (!hashCacheClient.addHashCacheToServer(newScan.getStartRow(), - cache, plan.getTableRef().getTable())) { - throw e1; - } - this.scanIterator = ((BaseQueryPlan) plan).iterator(caches, scanGrouper, newScan); - - } catch (Exception ex) { - throw ClientUtil.parseServerException(ex); - } - lastTuple = scanIterator.next(); - } else { - throw e; - } - } - } - return TupleUtil.getAggregateGroupTuple(lastTuple); - } finally { - renewLeaseLock.unlock(); + } + + @Override + public Tuple next() throws SQLException { + try { + renewLeaseLock.lock(); + initScanner(); + try { + lastTuple = scanIterator.next(); + if (lastTuple != null) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + lastTuple.getKey(ptr); + try { + ScanUtil.verifyKeyInScanRange(ptr, scan); + } catch (ResultSetOutOfScanRangeException e) { + LOGGER.error( + "Row key {} of table {} is out of scan range. Scan start " + + "key: {} , end key: {} , _ScanActualStartRow: {} , " + + "_ScanStartRowSuffix: {} , _ScanStopRowSuffix: {} , " + "scan attributes: {}", + Bytes.toStringBinary(ptr.get()), htable.getName(), + Bytes.toStringBinary(scan.getStartRow()), Bytes.toStringBinary(scan.getStopRow()), + Bytes.toStringBinary(scan.getAttribute(SCAN_ACTUAL_START_ROW)), + Bytes.toStringBinary(scan.getAttribute(SCAN_START_ROW_SUFFIX)), + Bytes.toStringBinary(scan.getAttribute(SCAN_STOP_ROW_SUFFIX)), + scan.getAttributesMap(), e); + throw e; + } } - } - - public void initScanner() throws SQLException { + } catch (SQLException e) { + LOGGER.error("Error while scanning table {} , scan {}", htable, scan); try { - renewLeaseLock.lock(); - if (closed) { - return; + throw ClientUtil.parseServerException(e); + } catch (HashJoinCacheNotFoundException e1) { + if ( + ScanUtil.isNonAggregateScan(scan) && plan.getContext().getAggregationManager().isEmpty() + ) { + // For non aggregate queries if we get stale region boundary exception we can + // continue scanning from the next value of lasted fetched result. + Scan newScan = ScanUtil.newScan(scan); + newScan.withStartRow(newScan.getAttribute(SCAN_ACTUAL_START_ROW)); + if (lastTuple != null) { + lastTuple.getKey(ptr); + byte[] startRowSuffix = ByteUtil.copyKeyBytesIfNecessary(ptr); + if (ScanUtil.isLocalIndex(newScan)) { + // If we just set scan start row suffix then server side we prepare + // actual scan boundaries by prefixing the region start key. + newScan.setAttribute(SCAN_START_ROW_SUFFIX, ByteUtil.nextKey(startRowSuffix)); + } else { + newScan.withStartRow(ByteUtil.nextKey(startRowSuffix)); + } + } + plan.getContext().getConnection().getQueryServices() + .clearTableRegionCache(htable.getName()); + LOGGER.debug( + "Retrying when Hash Join cache is not found on the server ,by sending the cache again"); + if (retry <= 0) { + throw e1; } - ResultIterator delegate = this.scanIterator; - if (delegate == UNINITIALIZED_SCANNER) { - try { - // It is important to update the scan boundaries for the reverse scan - // and set the scan as reverse at the client side rather than update it - // at the server side. Updating reverse scan boundaries at the server side - // can lead to incorrect results if the region moves in the middle of the - // ongoing scans. - if (ScanUtil.isReversed(scan)) { - ScanUtil.setupReverseScan(scan); - } - this.scanIterator = - new ScanningResultIterator(htable.getScanner(scan), scan, - scanMetricsHolder, plan.getContext(), isMapReduceContext, - maxQueryEndTime); - } catch (IOException e) { - Closeables.closeQuietly(htable); - throw ClientUtil.parseServerException(e); - } + Long cacheId = e1.getCacheId(); + retry--; + try { + ServerCache cache = + caches == null ? null : caches.get(new ImmutableBytesPtr(Bytes.toBytes(cacheId))); + if ( + !hashCacheClient.addHashCacheToServer(newScan.getStartRow(), cache, + plan.getTableRef().getTable()) + ) { + throw e1; + } + this.scanIterator = ((BaseQueryPlan) plan).iterator(caches, scanGrouper, newScan); + + } catch (Exception ex) { + throw ClientUtil.parseServerException(ex); } - } finally { - renewLeaseLock.unlock(); + lastTuple = scanIterator.next(); + } else { + throw e; + } } + } + return TupleUtil.getAggregateGroupTuple(lastTuple); + } finally { + renewLeaseLock.unlock(); } - - @Override - public String toString() { - return "TableResultIterator [htable=" + htable + ", scan=" + scan + "]"; - } - - public RenewLeaseStatus renewLease() { - boolean lockAcquired = false; + } + + public void initScanner() throws SQLException { + try { + renewLeaseLock.lock(); + if (closed) { + return; + } + ResultIterator delegate = this.scanIterator; + if (delegate == UNINITIALIZED_SCANNER) { try { - lockAcquired = renewLeaseLock.tryLock(); - if (lockAcquired) { - if (closed) { - return CLOSED; - } - if (scanIterator == UNINITIALIZED_SCANNER) { - return UNINITIALIZED; - } - long delay = now() - renewLeaseTime; - if (delay < renewLeaseThreshold) { - return THRESHOLD_NOT_REACHED; - } - if (scanIterator instanceof ScanningResultIterator - && ((ScanningResultIterator)scanIterator).getScanner() instanceof AbstractClientScanner) { - // Need this explicit cast because HBase's ResultScanner doesn't have this method exposed. - boolean leaseRenewed = ((AbstractClientScanner)((ScanningResultIterator)scanIterator).getScanner()).renewLease(); - if (leaseRenewed) { - renewLeaseTime = now(); - return RENEWED; - } else { - return NOT_RENEWED; - } - } else { - return NOT_SUPPORTED; - } - } - return LOCK_NOT_ACQUIRED; - } - finally { - if (lockAcquired) { - renewLeaseLock.unlock(); - } + // It is important to update the scan boundaries for the reverse scan + // and set the scan as reverse at the client side rather than update it + // at the server side. Updating reverse scan boundaries at the server side + // can lead to incorrect results if the region moves in the middle of the + // ongoing scans. + if (ScanUtil.isReversed(scan)) { + ScanUtil.setupReverseScan(scan); + } + this.scanIterator = new ScanningResultIterator(htable.getScanner(scan), scan, + scanMetricsHolder, plan.getContext(), isMapReduceContext, maxQueryEndTime); + } catch (IOException e) { + Closeables.closeQuietly(htable); + throw ClientUtil.parseServerException(e); } + } + } finally { + renewLeaseLock.unlock(); } - - private static long now() { - return EnvironmentEdgeManager.currentTimeMillis(); - } - - @Override - public void explain(List planSteps) { - scanIterator.explain(planSteps); - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - scanIterator.explain(planSteps, explainPlanAttributesBuilder); + } + + @Override + public String toString() { + return "TableResultIterator [htable=" + htable + ", scan=" + scan + "]"; + } + + public RenewLeaseStatus renewLease() { + boolean lockAcquired = false; + try { + lockAcquired = renewLeaseLock.tryLock(); + if (lockAcquired) { + if (closed) { + return CLOSED; + } + if (scanIterator == UNINITIALIZED_SCANNER) { + return UNINITIALIZED; + } + long delay = now() - renewLeaseTime; + if (delay < renewLeaseThreshold) { + return THRESHOLD_NOT_REACHED; + } + if ( + scanIterator instanceof ScanningResultIterator + && ((ScanningResultIterator) scanIterator).getScanner() instanceof AbstractClientScanner + ) { + // Need this explicit cast because HBase's ResultScanner doesn't have this method exposed. + boolean leaseRenewed = + ((AbstractClientScanner) ((ScanningResultIterator) scanIterator).getScanner()) + .renewLease(); + if (leaseRenewed) { + renewLeaseTime = now(); + return RENEWED; + } else { + return NOT_RENEWED; + } + } else { + return NOT_SUPPORTED; + } + } + return LOCK_NOT_ACQUIRED; + } finally { + if (lockAcquired) { + renewLeaseLock.unlock(); + } } + } + + private static long now() { + return EnvironmentEdgeManager.currentTimeMillis(); + } + + @Override + public void explain(List planSteps) { + scanIterator.explain(planSteps); + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + scanIterator.explain(planSteps, explainPlanAttributesBuilder); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableResultIteratorFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableResultIteratorFactory.java index fb573bfb236..94392be06da 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableResultIteratorFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableResultIteratorFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,9 +29,9 @@ import org.apache.phoenix.schema.TableRef; public interface TableResultIteratorFactory { - public TableResultIterator newIterator(MutationState mutationState, TableRef tableRef, - Scan scan, ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, - QueryPlan plan, ParallelScanGrouper scanGrouper, Map caches, - long maxQueryEndTime) throws SQLException; + public TableResultIterator newIterator(MutationState mutationState, TableRef tableRef, Scan scan, + ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, + ParallelScanGrouper scanGrouper, Map caches, + long maxQueryEndTime) throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableSamplerPredicate.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableSamplerPredicate.java index 19b6b5f8238..6d8c96a9d61 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableSamplerPredicate.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/TableSamplerPredicate.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,95 +20,70 @@ import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; /** - * TableSampler. - * - * A dice rolling on every targeted row to decide if this row is going - * to be picked or not. - * An application is table Sampler, based on boolean result, this row is - * then picked (or rejected) to be part of sample set. - * - * Currently implemented using FNV1a with Lazy mod mapping method to ensure - * the even distribution of hashed result, so that the final sampled result - * will be close to the size of expected - * + * TableSampler. A dice rolling on every targeted row to decide if this row is going to be picked or + * not. An application is table Sampler, based on boolean result, this row is then picked (or + * rejected) to be part of sample set. Currently implemented using FNV1a with Lazy mod mapping + * method to ensure the even distribution of hashed result, so that the final sampled result will be + * close to the size of expected */ -public class TableSamplerPredicate implements Predicate{ - private final double tableSamplingRate; - - private TableSamplerPredicate(double tableSamplingRate){ - this.tableSamplingRate=tableSamplingRate; - } - - public static TableSamplerPredicate of(final Double tableSamplingRateRaw){ - assert(tableSamplingRateRaw!=null):"tableSamplingRate can not be null"; - assert(tableSamplingRateRaw>=0d&&tableSamplingRateRaw<=100d):"tableSamplingRate input has to be a rational number between 0 and 100"; - TableSamplerPredicate self=new TableSamplerPredicate(tableSamplingRateRaw); - return self; - } - - @Override - public boolean apply(byte[] bytes) { - final int hashcode_FNV1Lazy=FNV1LazyImpl(bytes); - final boolean result=evaluateWithChance(hashcode_FNV1Lazy); - return result; - } - - /** - * Take build in FNV1a Hash function then apply lazy mod mapping method so that the - * hash is evenly distributed between 0 and 100. - * - * Quoted from http://isthe.com/chongo/tech/comp/fnv/, - * The FNV hash is designed for hash sizes that are a power of 2. - * If you need a hash size that is not a power of two, then you have two choices. - * One method is called the lazy mod mapping method and the other is called the retry method. - * Both involve mapping a range that is a power of 2 onto an arbitrary range. - * - * Lazy mod mapping method: The lazy mod mapping method uses a simple mod on an n-bit hash - * to yield an arbitrary range. - * To produce a hash range between 0 and X use a n-bit FNV hash where n is smallest FNV hash - * that will produce values larger than X without the need for xor-folding. - * - * For example, to produce a value between 0 and 2142779559 using the lazy mod mapping method, - * we select a 32-bit FNV hash because: 2 power 32 > 49999 - * Before the final mod 50000 is performed, - * we check to see if the 32-bit FNV hash value is one of the upper biased values. - * If it is, we perform additional loop cycles until is below the bias level. - * - * An advantage of the lazy mod mapping method is that it requires only 1 more operation: - * only an additional mod is performed at the end. - * The disadvantage of the lazy mod mapping method is that there is a bias against - * the larger values. - * - * @param bytes - * @return - */ - final static private int FNV1LazyImpl(final byte[] bytes){ - final int contentBasedHashCode = java.util.Arrays.hashCode(bytes); - return lazyRedistribute(contentBasedHashCode); - } - - - /** - * Lazy mod mapping method Implementation - * - * Output result should be following the same distribution as input hashcode, - * however re-mapped between 0 and 100. - * - * @param hashcode - * @return - */ - final static private int lazyRedistribute(final int hashcode){ - return java.lang.Math.abs(hashcode%100); - } - - /** - * - * @param hashcode - * @return - */ - final private boolean evaluateWithChance(final int hashcode){ - assert((hashcode>=0)&&(hashcode<=100)):"hashcode should be re-distribute into 0 to 100"; - return (hashcode { + private final double tableSamplingRate; + + private TableSamplerPredicate(double tableSamplingRate) { + this.tableSamplingRate = tableSamplingRate; + } + + public static TableSamplerPredicate of(final Double tableSamplingRateRaw) { + assert (tableSamplingRateRaw != null) : "tableSamplingRate can not be null"; + assert (tableSamplingRateRaw >= 0d && tableSamplingRateRaw <= 100d) + : "tableSamplingRate input has to be a rational number between 0 and 100"; + TableSamplerPredicate self = new TableSamplerPredicate(tableSamplingRateRaw); + return self; + } + + @Override + public boolean apply(byte[] bytes) { + final int hashcode_FNV1Lazy = FNV1LazyImpl(bytes); + final boolean result = evaluateWithChance(hashcode_FNV1Lazy); + return result; + } + + /** + * Take build in FNV1a Hash function then apply lazy mod mapping method so that the hash is evenly + * distributed between 0 and 100. Quoted from http://isthe.com/chongo/tech/comp/fnv/, The FNV hash + * is designed for hash sizes that are a power of 2. If you need a hash size that is not a power + * of two, then you have two choices. One method is called the lazy mod mapping method and the + * other is called the retry method. Both involve mapping a range that is a power of 2 onto an + * arbitrary range. Lazy mod mapping method: The lazy mod mapping method uses a simple mod on an + * n-bit hash to yield an arbitrary range. To produce a hash range between 0 and X use a n-bit FNV + * hash where n is smallest FNV hash that will produce values larger than X without the need for + * xor-folding. For example, to produce a value between 0 and 2142779559 using the lazy mod + * mapping method, we select a 32-bit FNV hash because: 2 power 32 > 49999 Before the final mod + * 50000 is performed, we check to see if the 32-bit FNV hash value is one of the upper biased + * values. If it is, we perform additional loop cycles until is below the bias level. An advantage + * of the lazy mod mapping method is that it requires only 1 more operation: only an additional + * mod is performed at the end. The disadvantage of the lazy mod mapping method is that there is a + * bias against the larger values. + */ + final static private int FNV1LazyImpl(final byte[] bytes) { + final int contentBasedHashCode = java.util.Arrays.hashCode(bytes); + return lazyRedistribute(contentBasedHashCode); + } + + /** + * Lazy mod mapping method Implementation Output result should be following the same distribution + * as input hashcode, however re-mapped between 0 and 100. + */ + final static private int lazyRedistribute(final int hashcode) { + return java.lang.Math.abs(hashcode % 100); + } + + /** + */ + final private boolean evaluateWithChance(final int hashcode) { + assert ((hashcode >= 0) && (hashcode <= 100)) + : "hashcode should be re-distribute into 0 to 100"; + return (hashcode < tableSamplingRate) ? true : false; + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/UngroupedAggregatingResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/UngroupedAggregatingResultIterator.java index d19c5b22ca7..780069a4a52 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/UngroupedAggregatingResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/UngroupedAggregatingResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,52 +27,52 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.PhoenixKeyValueUtil; - public class UngroupedAggregatingResultIterator extends GroupedAggregatingResultIterator { - private boolean hasRows = false; + private boolean hasRows = false; - public UngroupedAggregatingResultIterator( PeekingResultIterator resultIterator, Aggregators aggregators) { - super(resultIterator, aggregators); - } - @Override - public Tuple next() throws SQLException { - Tuple result = resultIterator.next(); - if (result == null) { - // Ensure ungrouped aggregregation always returns a row, even if the underlying iterator doesn't. - if (!hasRows) { - // We should reset ClientAggregators here in case they are being reused in a new ResultIterator. - aggregators.reset(aggregators.getAggregators()); - byte[] value = aggregators.toBytes(aggregators.getAggregators()); - result = new SingleKeyValueTuple( - PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, - AGG_TIMESTAMP, - value)); - } - } else { - Aggregator[] rowAggregators = aggregators.getAggregators(); - aggregators.reset(rowAggregators); - while (true) { - aggregators.aggregate(rowAggregators, result); - Tuple nextResult = resultIterator.peek(); - if (nextResult == null) { - break; - } - result = resultIterator.next(); - } + public UngroupedAggregatingResultIterator(PeekingResultIterator resultIterator, + Aggregators aggregators) { + super(resultIterator, aggregators); + } - byte[] value = aggregators.toBytes(rowAggregators); - Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil .newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); - result = tuple; + @Override + public Tuple next() throws SQLException { + Tuple result = resultIterator.next(); + if (result == null) { + // Ensure ungrouped aggregregation always returns a row, even if the underlying iterator + // doesn't. + if (!hasRows) { + // We should reset ClientAggregators here in case they are being reused in a new + // ResultIterator. + aggregators.reset(aggregators.getAggregators()); + byte[] value = aggregators.toBytes(aggregators.getAggregators()); + result = new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value)); + } + } else { + Aggregator[] rowAggregators = aggregators.getAggregators(); + aggregators.reset(rowAggregators); + while (true) { + aggregators.aggregate(rowAggregators, result); + Tuple nextResult = resultIterator.peek(); + if (nextResult == null) { + break; } - hasRows = true; - return result; + result = resultIterator.next(); + } + + byte[] value = aggregators.toBytes(rowAggregators); + Tuple tuple = wrapKeyValueAsResult(PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); + result = tuple; } + hasRows = true; + return result; + } - @Override - public String toString() { - return "UngroupedAggregatingResultIterator [hasRows=" + hasRows - + ", aggregators=" + aggregators + "]"; - } + @Override + public String toString() { + return "UngroupedAggregatingResultIterator [hasRows=" + hasRows + ", aggregators=" + aggregators + + "]"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java index 6152ee3956f..937ec2c5081 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,144 +23,135 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.phoenix.compile.ExplainPlanAttributes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.monitoring.OverAllQueryMetrics; import org.apache.phoenix.monitoring.ReadMetricQueue; import org.apache.phoenix.query.KeyRange; -import org.apache.phoenix.util.ClientUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - +import org.apache.phoenix.util.ClientUtil; /** - * * Create a union ResultIterators - * - * */ public class UnionResultIterators implements ResultIterators { - private final List splits; - private final List> scans; - private final List iterators; - private final List readMetricsList; - private final List overAllQueryMetricsList; - private boolean closed; - private final StatementContext parentStmtCtx; - public UnionResultIterators(List plans, StatementContext parentStmtCtx) throws SQLException { - this.parentStmtCtx = parentStmtCtx; - int nPlans = plans.size(); - iterators = Lists.newArrayListWithExpectedSize(nPlans); - splits = Lists.newArrayListWithExpectedSize(nPlans * 30); - scans = Lists.newArrayListWithExpectedSize(nPlans * 10); - readMetricsList = Lists.newArrayListWithCapacity(nPlans); - overAllQueryMetricsList = Lists.newArrayListWithCapacity(nPlans); - for (QueryPlan plan : plans) { - readMetricsList.add(plan.getContext().getReadMetricsQueue()); - overAllQueryMetricsList.add(plan.getContext().getOverallQueryMetrics()); - iterators.add(LookAheadResultIterator.wrap(plan.iterator())); - splits.addAll(plan.getSplits()); - scans.addAll(plan.getScans()); - } - } + private final List splits; + private final List> scans; + private final List iterators; + private final List readMetricsList; + private final List overAllQueryMetricsList; + private boolean closed; + private final StatementContext parentStmtCtx; - @Override - public List getSplits() { - if (splits == null) - return Collections.emptyList(); - else - return splits; + public UnionResultIterators(List plans, StatementContext parentStmtCtx) + throws SQLException { + this.parentStmtCtx = parentStmtCtx; + int nPlans = plans.size(); + iterators = Lists.newArrayListWithExpectedSize(nPlans); + splits = Lists.newArrayListWithExpectedSize(nPlans * 30); + scans = Lists.newArrayListWithExpectedSize(nPlans * 10); + readMetricsList = Lists.newArrayListWithCapacity(nPlans); + overAllQueryMetricsList = Lists.newArrayListWithCapacity(nPlans); + for (QueryPlan plan : plans) { + readMetricsList.add(plan.getContext().getReadMetricsQueue()); + overAllQueryMetricsList.add(plan.getContext().getOverallQueryMetrics()); + iterators.add(LookAheadResultIterator.wrap(plan.iterator())); + splits.addAll(plan.getSplits()); + scans.addAll(plan.getScans()); } + } - @Override - public void close() throws SQLException { - if (!closed) { - closed = true; - SQLException toThrow = null; + @Override + public List getSplits() { + if (splits == null) return Collections.emptyList(); + else return splits; + } + + @Override + public void close() throws SQLException { + if (!closed) { + closed = true; + SQLException toThrow = null; + try { + if (iterators != null) { + for (int index = 0; index < iterators.size(); index++) { + PeekingResultIterator iterator = iterators.get(index); try { - if (iterators != null) { - for (int index=0; index < iterators.size(); index++) { - PeekingResultIterator iterator = iterators.get(index); - try { - iterator.close(); - } catch (Exception e) { - if (toThrow == null) { - toThrow = ClientUtil.parseServerException(e); - } else { - toThrow.setNextException(ClientUtil.parseServerException(e)); - } - } - } - } + iterator.close(); } catch (Exception e) { + if (toThrow == null) { toThrow = ClientUtil.parseServerException(e); - } finally { - setMetricsInParentContext(); - if (toThrow != null) { - throw toThrow; - } + } else { + toThrow.setNextException(ClientUtil.parseServerException(e)); + } } + } } - } - - private void setMetricsInParentContext() { - ReadMetricQueue parentCtxReadMetrics = parentStmtCtx.getReadMetricsQueue(); - for (ReadMetricQueue readMetrics : readMetricsList) { - parentCtxReadMetrics.combineReadMetrics(readMetrics); + } catch (Exception e) { + toThrow = ClientUtil.parseServerException(e); + } finally { + setMetricsInParentContext(); + if (toThrow != null) { + throw toThrow; } - OverAllQueryMetrics parentCtxQueryMetrics = parentStmtCtx.getOverallQueryMetrics(); - for (OverAllQueryMetrics metric : overAllQueryMetricsList) { - parentCtxQueryMetrics.combine(metric); - } - } - - @Override - public List> getScans() { - if (scans == null) - return Collections.emptyList(); - else - return scans; + } } + } - @Override - public int size() { - return scans.size(); + private void setMetricsInParentContext() { + ReadMetricQueue parentCtxReadMetrics = parentStmtCtx.getReadMetricsQueue(); + for (ReadMetricQueue readMetrics : readMetricsList) { + parentCtxReadMetrics.combineReadMetrics(readMetrics); } - - @Override - public void explain(List planSteps) { - for (PeekingResultIterator iterator : iterators) { - iterator.explain(planSteps); - } + OverAllQueryMetrics parentCtxQueryMetrics = parentStmtCtx.getOverallQueryMetrics(); + for (OverAllQueryMetrics metric : overAllQueryMetricsList) { + parentCtxQueryMetrics.combine(metric); } + } + + @Override + public List> getScans() { + if (scans == null) return Collections.emptyList(); + else return scans; + } - @Override - public List getIterators() throws SQLException { - return iterators; + @Override + public int size() { + return scans.size(); + } + + @Override + public void explain(List planSteps) { + for (PeekingResultIterator iterator : iterators) { + iterator.explain(planSteps); } + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - boolean moreThanOneIters = false; - ExplainPlanAttributesBuilder lhsPointer = null; - // For more than one iterators, explainPlanAttributes will create - // chain of objects as lhs and rhs query plans. - for (PeekingResultIterator iterator : iterators) { - if (moreThanOneIters) { - ExplainPlanAttributesBuilder rhsBuilder = - new ExplainPlanAttributesBuilder(); - iterator.explain(planSteps, rhsBuilder); - ExplainPlanAttributes rhsPlans = rhsBuilder.build(); - lhsPointer.setRhsJoinQueryExplainPlan(rhsPlans); - lhsPointer = rhsBuilder; - } else { - iterator.explain(planSteps, explainPlanAttributesBuilder); - lhsPointer = explainPlanAttributesBuilder; - } - moreThanOneIters = true; - } + @Override + public List getIterators() throws SQLException { + return iterators; + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + boolean moreThanOneIters = false; + ExplainPlanAttributesBuilder lhsPointer = null; + // For more than one iterators, explainPlanAttributes will create + // chain of objects as lhs and rhs query plans. + for (PeekingResultIterator iterator : iterators) { + if (moreThanOneIters) { + ExplainPlanAttributesBuilder rhsBuilder = new ExplainPlanAttributesBuilder(); + iterator.explain(planSteps, rhsBuilder); + ExplainPlanAttributes rhsPlans = rhsBuilder.build(); + lhsPointer.setRhsJoinQueryExplainPlan(rhsPlans); + lhsPointer = rhsBuilder; + } else { + iterator.explain(planSteps, explainPlanAttributesBuilder); + lhsPointer = explainPlanAttributesBuilder; + } + moreThanOneIters = true; } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/AbstractRPCConnectionInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/AbstractRPCConnectionInfo.java index 1a62c69fbb9..6707a9a80c2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/AbstractRPCConnectionInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/AbstractRPCConnectionInfo.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,235 +31,232 @@ /** * Encapsulates the common logic for HRPC based ConnectionInfo classes. - * */ public abstract class AbstractRPCConnectionInfo extends ConnectionInfo { - private static final String MASTER_ADDRS_KEY = "hbase.masters"; - private static final String MASTER_HOSTNAME_KEY = "hbase.master.hostname"; + private static final String MASTER_ADDRS_KEY = "hbase.masters"; + private static final String MASTER_HOSTNAME_KEY = "hbase.master.hostname"; - protected String bootstrapServers; + protected String bootstrapServers; - public String getBoostrapServers() { - return bootstrapServers; - } + public String getBoostrapServers() { + return bootstrapServers; + } - protected AbstractRPCConnectionInfo(boolean isConnectionless, String principal, String keytab, - User user, String haGroup, ConnectionType connectionType) { - super(isConnectionless, principal, keytab, user, haGroup, connectionType); - } + protected AbstractRPCConnectionInfo(boolean isConnectionless, String principal, String keytab, + User user, String haGroup, ConnectionType connectionType) { + super(isConnectionless, principal, keytab, user, haGroup, connectionType); + } - @Override - public String getZookeeperConnectionString() { - throw new UnsupportedOperationException("MasterRegistry is used"); + @Override + public String getZookeeperConnectionString() { + throw new UnsupportedOperationException("MasterRegistry is used"); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((bootstrapServers == null) ? 0 : bootstrapServers.hashCode()); + // Port is already provided in or normalized into bootstrapServers + return result; + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; + } + AbstractRPCConnectionInfo other = (AbstractRPCConnectionInfo) obj; + if (bootstrapServers == null) { + if (other.bootstrapServers != null) { + return false; + } + } else if (!bootstrapServers.equals(other.bootstrapServers)) { + return false; } + // Port is already provided in or normalized into bootstrapServers + return true; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((bootstrapServers == null) ? 0 : bootstrapServers.hashCode()); - // Port is already provided in or normalized into bootstrapServers - return result; + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(bootstrapServers.replaceAll(":", "\\\\:")); + if (anyNotNull(principal, keytab)) { + sb.append(principal == null ? ":::" : ":::" + principal); + } + if (anyNotNull(keytab)) { + sb.append(keytab == null ? ":" : ":" + keytab); } + return sb.toString(); + } - @Override - public boolean equals(Object obj) { - if (!super.equals(obj)) { - return false; - } - AbstractRPCConnectionInfo other = (AbstractRPCConnectionInfo) obj; - if (bootstrapServers == null) { - if (other.bootstrapServers != null) { - return false; - } - } else if (!bootstrapServers.equals(other.bootstrapServers)) { - return false; - } - // Port is already provided in or normalized into bootstrapServers - return true; + /** + * Abstract Builder parent for HRPC based ConnectionInfo classes. + * @since 138 + */ + protected abstract static class Builder extends ConnectionInfo.Builder { + String hostsList; + String portString; + Integer port; + + public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) { + super(url, config, props, info); } @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(bootstrapServers.replaceAll(":", "\\\\:")); - if (anyNotNull(principal, keytab)) { - sb.append(principal == null ? ":::" : ":::" + principal); - } - if (anyNotNull(keytab)) { - sb.append(keytab == null ? ":" : ":" + keytab); - } - return sb.toString(); + protected ConnectionInfo create() throws SQLException { + parse(); + normalize(); + handleKerberosAndLogin(); + setHaGroup(); + return build(); } - /** - * Abstract Builder parent for HRPC based ConnectionInfo classes. - * - * @since 138 - */ - protected abstract static class Builder extends ConnectionInfo.Builder { - String hostsList; - String portString; - Integer port; + private void parse() throws SQLException { + StringTokenizer tokenizer = getTokenizerWithoutProtocol(); - public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) { - super(url, config, props, info); + // Unlike for the ZK URL, there is no heuristics to figure out missing parts. + // Unspecified parts inside the URL must be indicated by ::. + boolean wasSeparator = true; + boolean first = true; + ArrayList parts = new ArrayList<>(7); + String token = null; + while (tokenizer.hasMoreTokens() && !(token = tokenizer.nextToken()).equals(TERMINATOR)) { + if (DELIMITERS.contains(token)) { + if (wasSeparator && !first) { + parts.add(null); + } + wasSeparator = true; + } else { + parts.add(token); + wasSeparator = false; } - - @Override - protected ConnectionInfo create() throws SQLException { - parse(); - normalize(); - handleKerberosAndLogin(); - setHaGroup(); - return build(); + first = false; + if (parts.size() > 6) { + throw getMalFormedUrlException(url); } + } - private void parse() throws SQLException { - StringTokenizer tokenizer = getTokenizerWithoutProtocol(); - - // Unlike for the ZK URL, there is no heuristics to figure out missing parts. - // Unspecified parts inside the URL must be indicated by ::. - boolean wasSeparator = true; - boolean first = true; - ArrayList parts = new ArrayList<>(7); - String token = null; - while (tokenizer.hasMoreTokens() - && !(token = tokenizer.nextToken()).equals(TERMINATOR)) { - if (DELIMITERS.contains(token)) { - if (wasSeparator && !first) { - parts.add(null); - } - wasSeparator = true; - } else { - parts.add(token); - wasSeparator = false; - } - first = false; - if (parts.size() > 6) { - throw getMalFormedUrlException(url); - } - } - - if (parts.size() == 6) { - // We could check for FileSystems.getDefault().getSeparator()), but then - // we wouldn't be able to test on Unix. - if (parts.get(5).startsWith("\\")) { - // Reconstruct windows path - parts.set(4, parts.get(4) + ":" + parts.get(5)); - parts.remove(5); - } else { - throw getMalFormedUrlException(url); - } - } - - while (parts.size() < 7) { - parts.add(null); - } - hostsList = parts.get(0); - portString = parts.get(1); - if (portString != null) { - try { - port = Integer.parseInt(parts.get(1)); - if (port < 0) { - throw new Exception(); - } - } catch (Exception e) { - throw getMalFormedUrlException(url); - } - } - if (parts.get(2) != null && !parts.get(2).isEmpty()) { - // This MUST be empty - throw getMalFormedUrlException(url); - } - principal = parts.get(3); - keytab = parts.get(4); + if (parts.size() == 6) { + // We could check for FileSystems.getDefault().getSeparator()), but then + // we wouldn't be able to test on Unix. + if (parts.get(5).startsWith("\\")) { + // Reconstruct windows path + parts.set(4, parts.get(4) + ":" + parts.get(5)); + parts.remove(5); + } else { + throw getMalFormedUrlException(url); } + } - protected void normalizeMaster() throws SQLException { - if (hostsList != null && hostsList.isEmpty()) { - hostsList = null; - } - if (portString != null && portString.isEmpty()) { - portString = null; - } - if (portString != null) { - try { - port = Integer.parseInt(portString); - if (port < 0) { - throw new Exception(); - } - } catch (Exception e) { - throw getMalFormedUrlException(url); - } - } - - if (port == null) { - port = getDefaultMasterPort(); - } - // At this point, masterPort is guaranteed not to be 0 + while (parts.size() < 7) { + parts.add(null); + } + hostsList = parts.get(0); + portString = parts.get(1); + if (portString != null) { + try { + port = Integer.parseInt(parts.get(1)); + if (port < 0) { + throw new Exception(); + } + } catch (Exception e) { + throw getMalFormedUrlException(url); + } + } + if (parts.get(2) != null && !parts.get(2).isEmpty()) { + // This MUST be empty + throw getMalFormedUrlException(url); + } + principal = parts.get(3); + keytab = parts.get(4); + } - isConnectionless = PhoenixRuntime.CONNECTIONLESS.equals(hostsList); + protected void normalizeMaster() throws SQLException { + if (hostsList != null && hostsList.isEmpty()) { + hostsList = null; + } + if (portString != null && portString.isEmpty()) { + portString = null; + } + if (portString != null) { + try { + port = Integer.parseInt(portString); + if (port < 0) { + throw new Exception(); + } + } catch (Exception e) { + throw getMalFormedUrlException(url); + } + } - if (isConnectionless) { - if (port != null) { - throw getMalFormedUrlException(url); - } else { - return; - } - } + if (port == null) { + port = getDefaultMasterPort(); + } + // At this point, masterPort is guaranteed not to be 0 - if (hostsList == null) { - hostsList = getMasterAddr(port); - if (hostsList == null) { - throw getMalFormedUrlException( - "Hbase masters are not specified and in URL, and are not set in the configuration files: " - + url); - } - } else { - hostsList = hostsList.replaceAll("=", ":"); - } + isConnectionless = PhoenixRuntime.CONNECTIONLESS.equals(hostsList); - hostsList = normalizeHostsList(hostsList, port); + if (isConnectionless) { + if (port != null) { + throw getMalFormedUrlException(url); + } else { + return; } + } - /** - * Copied from org.apache.hadoop.hbase.client.MasterRegistry (which is private) Supplies the - * default master port we should use given the provided configuration. - * @param conf Configuration to parse from. - */ - private int getDefaultMasterPort() { - String portString = get(HConstants.MASTER_PORT); - if (portString == null) { - port = HConstants.DEFAULT_MASTER_PORT; - } else { - port = Integer.parseInt(portString); - } - if (port == 0) { - // Master port may be set to 0. We should substitute the default port in that case. - return HConstants.DEFAULT_MASTER_PORT; - } - return port; + if (hostsList == null) { + hostsList = getMasterAddr(port); + if (hostsList == null) { + throw getMalFormedUrlException( + "Hbase masters are not specified and in URL, and are not set in the configuration files: " + + url); } + } else { + hostsList = hostsList.replaceAll("=", ":"); + } - /** - * Adopted from org.apache.hadoop.hbase.client.MasterRegistry Builds the default master - * address end point if it is not specified in the configuration. - */ - private String getMasterAddr(int port) { - String masterAddrFromConf = get(MASTER_ADDRS_KEY); - if (!Strings.isNullOrEmpty(masterAddrFromConf)) { - return masterAddrFromConf; - } - String hostname = get(MASTER_HOSTNAME_KEY); - if (hostname != null) { - return String.format("%s:%d", hostname, port); - } else { - return null; - } - } + hostsList = normalizeHostsList(hostsList, port); + } + + /** + * Copied from org.apache.hadoop.hbase.client.MasterRegistry (which is private) Supplies the + * default master port we should use given the provided configuration. + * @param conf Configuration to parse from. + */ + private int getDefaultMasterPort() { + String portString = get(HConstants.MASTER_PORT); + if (portString == null) { + port = HConstants.DEFAULT_MASTER_PORT; + } else { + port = Integer.parseInt(portString); + } + if (port == 0) { + // Master port may be set to 0. We should substitute the default port in that case. + return HConstants.DEFAULT_MASTER_PORT; + } + return port; + } - protected abstract ConnectionInfo build(); + /** + * Adopted from org.apache.hadoop.hbase.client.MasterRegistry Builds the default master address + * end point if it is not specified in the configuration. + */ + private String getMasterAddr(int port) { + String masterAddrFromConf = get(MASTER_ADDRS_KEY); + if (!Strings.isNullOrEmpty(masterAddrFromConf)) { + return masterAddrFromConf; + } + String hostname = get(MASTER_HOSTNAME_KEY); + if (hostname != null) { + return String.format("%s:%d", hostname, port); + } else { + return null; + } } + + protected abstract ConnectionInfo build(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ClusterRoleRecord.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ClusterRoleRecord.java index aa28c6e85f4..722cd75af6d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ClusterRoleRecord.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ClusterRoleRecord.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Optional; + import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.phoenix.util.JDBCUtil; @@ -28,230 +29,198 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.Optional; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; /** - * Immutable class of a cluster role record for a pair of HBase clusters. - * - * This is the data model used by: - * - Admin uses command line tool to write records of this class to ZK nodes - * - Clients reads and registers watcher to get data of this class from ZK nodes - * - * The cluster roles can be updated for a given HA group, in which case a new cluster role record - * will be saved in either configuration file for Admin tool or the znode data for clients. For - * any updates like that, the new cluster role record for that HA group should bump the version. - * This is to ensure data integrity across updates. Meanwhile, other fields are not allowed to - * change for an existing HA group. If the HA group needs to change its behavior, it will affect - * all clients, which are not controlled or tracked by Phoenix HA framework. To assist that - * scenario like switching HA polices, it is advised to create a new HA group and delete the old HA - * group after all clients have migrated. - * - * This class is immutable. + * Immutable class of a cluster role record for a pair of HBase clusters. This is the data model + * used by: - Admin uses command line tool to write records of this class to ZK nodes - Clients + * reads and registers watcher to get data of this class from ZK nodes The cluster roles can be + * updated for a given HA group, in which case a new cluster role record will be saved in either + * configuration file for Admin tool or the znode data for clients. For any updates like that, the + * new cluster role record for that HA group should bump the version. This is to ensure data + * integrity across updates. Meanwhile, other fields are not allowed to change for an existing HA + * group. If the HA group needs to change its behavior, it will affect all clients, which are not + * controlled or tracked by Phoenix HA framework. To assist that scenario like switching HA polices, + * it is advised to create a new HA group and delete the old HA group after all clients have + * migrated. This class is immutable. */ public class ClusterRoleRecord { - private static final Logger LOG = LoggerFactory.getLogger(ClusterRoleRecord.class); - - /** - * Enum for the current state of the cluster. Exact meaning depends on the Policy but in general Active clusters - * take traffic, standby and offline do not, and unknown is used if the state cannot be determined. - */ - public enum ClusterRole { - ACTIVE, STANDBY, OFFLINE, UNKNOWN; - - /** - * @return true if a cluster with this role can be connected, otherwise false - */ - public boolean canConnect() { - return this == ACTIVE || this == STANDBY; - } - - public static ClusterRole from(byte[] bytes) { - String value = new String(bytes, StandardCharsets.UTF_8); - return Arrays.stream(ClusterRole.values()) - .filter(r -> r.name().equalsIgnoreCase(value)) - .findFirst() - .orElse(UNKNOWN); - } - } - - private final String haGroupName; - private final HighAvailabilityPolicy policy; - private final String zk1; - private final ClusterRole role1; - private final String zk2; - private final ClusterRole role2; - private final long version; - - @JsonCreator - public ClusterRoleRecord(@JsonProperty("haGroupName") String haGroupName, - @JsonProperty("policy") HighAvailabilityPolicy policy, - @JsonProperty("zk1") String zk1, @JsonProperty("role1") ClusterRole role1, - @JsonProperty("zk2") String zk2, @JsonProperty("role2") ClusterRole role2, - @JsonProperty("version") long version) { - this.haGroupName = haGroupName; - this.policy = policy; - //Do we really need to normalize here ? - zk1 = JDBCUtil.formatZookeeperUrl(zk1); - zk2 = JDBCUtil.formatZookeeperUrl(zk2); - // Ignore the given order of url1 and url2 - if (zk1.compareTo(zk2) < 0) { - this.zk1 = zk1; - this.role1 = role1; - this.zk2 = zk2; - this.role2 = role2; - } else { - this.zk1 = zk2; - this.role1 = role2; - this.zk2 = zk1; - this.role2 = role1; - } - this.version = version; - } - - public static Optional fromJson(byte[] bytes) { - if (bytes == null) { - return Optional.empty(); - } - try { - return Optional.of(JacksonUtil.getObjectReader(ClusterRoleRecord.class).readValue(bytes)); - } catch (Exception e) { - LOG.error("Fail to deserialize data to a cluster role store", e); - return Optional.empty(); - } - } - - public static byte[] toJson(ClusterRoleRecord record) throws IOException { - return JacksonUtil.getObjectWriter().writeValueAsBytes(record); - } - - @JsonIgnore - public Optional getActiveUrl() { - if (role1 == ClusterRole.ACTIVE) { - return Optional.of(zk1); - } - if (role2 == ClusterRole.ACTIVE) { - return Optional.of(zk2); - } - return Optional.empty(); - } - - /** - * @return true if this is newer than the given cluster role record. - */ - public boolean isNewerThan(ClusterRoleRecord other) { - if (other == null) { - return true; - } - return this.hasSameInfo(other) && this.version > other.version; - } - - public boolean hasSameInfo(ClusterRoleRecord other) { - return haGroupName.equals(other.haGroupName) && - policy.equals(other.policy) && - zk1.equalsIgnoreCase(other.zk1) && - zk2.equalsIgnoreCase(other.zk2); - } - - /** - * @return role by ZK url or UNKNOWN if the zkUrl does not belong to this HA group - */ - public ClusterRole getRole(String zkUrl) { - if (zk1.equals(zkUrl)) { - return role1; - } else if (zk2.equals(zkUrl)) { - return role2; - } else { - return ClusterRole.UNKNOWN; - } - } - - public String getHaGroupName() { - return haGroupName; - } - - public HighAvailabilityPolicy getPolicy() { - return policy; - } - - public String getZk1() { - return zk1; - } - - public ClusterRole getRole1() { - return role1; - } - - public String getZk2() { - return zk2; - } - - public ClusterRole getRole2() { - return role2; - } - - public long getVersion() { - return version; - } - - @Override - public int hashCode() { - return new HashCodeBuilder() - .append(haGroupName) - .append(policy) - .append(zk1) - .append(role1) - .append(zk2) - .append(role2) - .append(version) - .hashCode(); - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } else if (other == null) { - return false; - } else if (!(other instanceof ClusterRoleRecord)) { - return false; - } else { - ClusterRoleRecord otherRecord = (ClusterRoleRecord) other; - return new EqualsBuilder() - .append(haGroupName, otherRecord.haGroupName) - .append(policy, otherRecord.policy) - .append(zk1, otherRecord.zk1) - .append(role1, otherRecord.role1) - .append(zk2, otherRecord.zk2) - .append(role2, otherRecord.role2) - .append(version, otherRecord.version) - .isEquals(); - } - } - - @Override - public String toString() { - return "ClusterRoleRecord{" - + "haGroupName='" + haGroupName + '\'' - + ", policy=" + policy - + ", zk1='" + zk1 + '\'' - + ", role1=" + role1 - + ", zk2='" + zk2 + '\'' - + ", role2=" + role2 - + ", version=" + version - + '}'; - } - - public String toPrettyString() { - try { - return JacksonUtil.getObjectWriterPretty().writeValueAsString(this); - } catch (Exception e) { - LOG.error("Fail to wrap this object as JSON, retuning the oneliner using toString", e); - return toString(); - } - } - + private static final Logger LOG = LoggerFactory.getLogger(ClusterRoleRecord.class); + + /** + * Enum for the current state of the cluster. Exact meaning depends on the Policy but in general + * Active clusters take traffic, standby and offline do not, and unknown is used if the state + * cannot be determined. + */ + public enum ClusterRole { + ACTIVE, + STANDBY, + OFFLINE, + UNKNOWN; + + /** Returns true if a cluster with this role can be connected, otherwise false */ + public boolean canConnect() { + return this == ACTIVE || this == STANDBY; + } + + public static ClusterRole from(byte[] bytes) { + String value = new String(bytes, StandardCharsets.UTF_8); + return Arrays.stream(ClusterRole.values()).filter(r -> r.name().equalsIgnoreCase(value)) + .findFirst().orElse(UNKNOWN); + } + } + + private final String haGroupName; + private final HighAvailabilityPolicy policy; + private final String zk1; + private final ClusterRole role1; + private final String zk2; + private final ClusterRole role2; + private final long version; + + @JsonCreator + public ClusterRoleRecord(@JsonProperty("haGroupName") String haGroupName, + @JsonProperty("policy") HighAvailabilityPolicy policy, @JsonProperty("zk1") String zk1, + @JsonProperty("role1") ClusterRole role1, @JsonProperty("zk2") String zk2, + @JsonProperty("role2") ClusterRole role2, @JsonProperty("version") long version) { + this.haGroupName = haGroupName; + this.policy = policy; + // Do we really need to normalize here ? + zk1 = JDBCUtil.formatZookeeperUrl(zk1); + zk2 = JDBCUtil.formatZookeeperUrl(zk2); + // Ignore the given order of url1 and url2 + if (zk1.compareTo(zk2) < 0) { + this.zk1 = zk1; + this.role1 = role1; + this.zk2 = zk2; + this.role2 = role2; + } else { + this.zk1 = zk2; + this.role1 = role2; + this.zk2 = zk1; + this.role2 = role1; + } + this.version = version; + } + + public static Optional fromJson(byte[] bytes) { + if (bytes == null) { + return Optional.empty(); + } + try { + return Optional.of(JacksonUtil.getObjectReader(ClusterRoleRecord.class).readValue(bytes)); + } catch (Exception e) { + LOG.error("Fail to deserialize data to a cluster role store", e); + return Optional.empty(); + } + } + + public static byte[] toJson(ClusterRoleRecord record) throws IOException { + return JacksonUtil.getObjectWriter().writeValueAsBytes(record); + } + + @JsonIgnore + public Optional getActiveUrl() { + if (role1 == ClusterRole.ACTIVE) { + return Optional.of(zk1); + } + if (role2 == ClusterRole.ACTIVE) { + return Optional.of(zk2); + } + return Optional.empty(); + } + + /** Returns true if this is newer than the given cluster role record. */ + public boolean isNewerThan(ClusterRoleRecord other) { + if (other == null) { + return true; + } + return this.hasSameInfo(other) && this.version > other.version; + } + + public boolean hasSameInfo(ClusterRoleRecord other) { + return haGroupName.equals(other.haGroupName) && policy.equals(other.policy) + && zk1.equalsIgnoreCase(other.zk1) && zk2.equalsIgnoreCase(other.zk2); + } + + /** Returns role by ZK url or UNKNOWN if the zkUrl does not belong to this HA group */ + public ClusterRole getRole(String zkUrl) { + if (zk1.equals(zkUrl)) { + return role1; + } else if (zk2.equals(zkUrl)) { + return role2; + } else { + return ClusterRole.UNKNOWN; + } + } + + public String getHaGroupName() { + return haGroupName; + } + + public HighAvailabilityPolicy getPolicy() { + return policy; + } + + public String getZk1() { + return zk1; + } + + public ClusterRole getRole1() { + return role1; + } + + public String getZk2() { + return zk2; + } + + public ClusterRole getRole2() { + return role2; + } + + public long getVersion() { + return version; + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(haGroupName).append(policy).append(zk1).append(role1) + .append(zk2).append(role2).append(version).hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } else if (other == null) { + return false; + } else if (!(other instanceof ClusterRoleRecord)) { + return false; + } else { + ClusterRoleRecord otherRecord = (ClusterRoleRecord) other; + return new EqualsBuilder().append(haGroupName, otherRecord.haGroupName) + .append(policy, otherRecord.policy).append(zk1, otherRecord.zk1) + .append(role1, otherRecord.role1).append(zk2, otherRecord.zk2) + .append(role2, otherRecord.role2).append(version, otherRecord.version).isEquals(); + } + } + + @Override + public String toString() { + return "ClusterRoleRecord{" + "haGroupName='" + haGroupName + '\'' + ", policy=" + policy + + ", zk1='" + zk1 + '\'' + ", role1=" + role1 + ", zk2='" + zk2 + '\'' + ", role2=" + role2 + + ", version=" + version + '}'; + } + + public String toPrettyString() { + try { + return JacksonUtil.getObjectWriterPretty().writeValueAsString(this); + } catch (Exception e) { + LOG.error("Fail to wrap this object as JSON, retuning the oneliner using toString", e); + return toString(); + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ClusterRoleRecordGeneratorTool.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ClusterRoleRecordGeneratorTool.java index 93899f87a29..5d21090931e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ClusterRoleRecordGeneratorTool.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ClusterRoleRecordGeneratorTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; +import static org.apache.phoenix.jdbc.HighAvailabilityGroup.PHOENIX_HA_ATTR_PREFIX; +import static org.apache.phoenix.jdbc.PhoenixHAAdminTool.getLocalZkUrl; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -34,128 +41,115 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import static org.apache.phoenix.jdbc.HighAvailabilityGroup.PHOENIX_HA_ATTR_PREFIX; -import static org.apache.phoenix.jdbc.PhoenixHAAdminTool.getLocalZkUrl; - - /** * A tool which generates cluster role records into JSON file assuming this cluster is ACTIVE and * peer cluster with (default id=1) is STANDBY. */ public class ClusterRoleRecordGeneratorTool extends Configured implements Tool { - private static final Logger LOG = LoggerFactory.getLogger(ClusterRoleRecordGeneratorTool.class); + private static final Logger LOG = LoggerFactory.getLogger(ClusterRoleRecordGeneratorTool.class); - /** Key/attribute prefix for this static store. */ - private static final String GENERATOR_ATTR_PREFIX = PHOENIX_HA_ATTR_PREFIX + "role.generator."; - /** The output JSON file name to write; if not configured, a temp file will be created. */ - public static final String PHOENIX_HA_GENERATOR_FILE_ATTR = GENERATOR_ATTR_PREFIX + "file"; - /** The key of all HA group names this static store will return, separated by comma. */ - public static final String PHOENIX_HA_GROUPS_ATTR = GENERATOR_ATTR_PREFIX + "groups"; - /** Config key format for the HA policy; should be formatted with a specific group name. */ - public static final String PHOENIX_HA_GROUP_POLICY_ATTR_FORMAT = - GENERATOR_ATTR_PREFIX + "policy.%s"; - /** The replication peer cluster id for one HA group. */ - public static final String PHOENIX_HA_GROUP_STORE_PEER_ID_ATTR_FORMAT = - GENERATOR_ATTR_PREFIX + "store.peer.id.%s"; - public static final String PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT = "1"; + /** Key/attribute prefix for this static store. */ + private static final String GENERATOR_ATTR_PREFIX = PHOENIX_HA_ATTR_PREFIX + "role.generator."; + /** The output JSON file name to write; if not configured, a temp file will be created. */ + public static final String PHOENIX_HA_GENERATOR_FILE_ATTR = GENERATOR_ATTR_PREFIX + "file"; + /** The key of all HA group names this static store will return, separated by comma. */ + public static final String PHOENIX_HA_GROUPS_ATTR = GENERATOR_ATTR_PREFIX + "groups"; + /** Config key format for the HA policy; should be formatted with a specific group name. */ + public static final String PHOENIX_HA_GROUP_POLICY_ATTR_FORMAT = + GENERATOR_ATTR_PREFIX + "policy.%s"; + /** The replication peer cluster id for one HA group. */ + public static final String PHOENIX_HA_GROUP_STORE_PEER_ID_ATTR_FORMAT = + GENERATOR_ATTR_PREFIX + "store.peer.id.%s"; + public static final String PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT = "1"; - @Override - public int run(String[] args) throws Exception { - try { - String fileName = getConf().get(PHOENIX_HA_GENERATOR_FILE_ATTR); - File file = StringUtils.isEmpty(fileName) - ? File.createTempFile("phoenix.ha.cluster.role.records", ".json") - : new File(fileName); - JacksonUtil.getObjectWriterPretty().writeValue(file, listAllRecordsByZk()); - System.out.println("Created JSON file '" + file + "'"); - return 0; - } catch (Exception e) { - e.printStackTrace(); - return -1; - } + @Override + public int run(String[] args) throws Exception { + try { + String fileName = getConf().get(PHOENIX_HA_GENERATOR_FILE_ATTR); + File file = StringUtils.isEmpty(fileName) + ? File.createTempFile("phoenix.ha.cluster.role.records", ".json") + : new File(fileName); + JacksonUtil.getObjectWriterPretty().writeValue(file, listAllRecordsByZk()); + System.out.println("Created JSON file '" + file + "'"); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return -1; } + } - List listAllRecordsByZk() throws IOException { - /* This current cluster's full ZK url for HBase, in host:port:/hbase format. */ - String localZkUrl = getLocalZkUrl(getConf()); - final String[] haGroupNames = getConf().getStrings(PHOENIX_HA_GROUPS_ATTR); - if (haGroupNames == null || haGroupNames.length == 0) { - String msg = "No HA groups configured for this cluster via " + PHOENIX_HA_GROUPS_ATTR; - LOG.error(msg); - throw new IOException(msg); - } - - List records = new ArrayList<>(); - for (String haGroupName : haGroupNames) { - String peerZkUrl = getPeerZkUrl(getConf(), haGroupName); - records.add(new ClusterRoleRecord(haGroupName, getHaPolicy(haGroupName), - localZkUrl, ClusterRole.ACTIVE, - peerZkUrl, ClusterRole.STANDBY, - 1)); - } - LOG.debug("Returning all cluster role records discovered: {}", records); - return records; + List listAllRecordsByZk() throws IOException { + /* This current cluster's full ZK url for HBase, in host:port:/hbase format. */ + String localZkUrl = getLocalZkUrl(getConf()); + final String[] haGroupNames = getConf().getStrings(PHOENIX_HA_GROUPS_ATTR); + if (haGroupNames == null || haGroupNames.length == 0) { + String msg = "No HA groups configured for this cluster via " + PHOENIX_HA_GROUPS_ATTR; + LOG.error(msg); + throw new IOException(msg); } - /** - * Helper method to get the replication peer's ZK URL (host:port) from Configuration. - */ - private static String getPeerZkUrl(Configuration conf, String haGroupName) throws IOException { - String key = String.format(PHOENIX_HA_GROUP_STORE_PEER_ID_ATTR_FORMAT, haGroupName); - String peerId = conf.get(key, PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT); - try (Connection connection = ConnectionFactory.createConnection(conf)) { - return getPeerClusterKey(connection.getAdmin(), peerId); - } + List records = new ArrayList<>(); + for (String haGroupName : haGroupNames) { + String peerZkUrl = getPeerZkUrl(getConf(), haGroupName); + records.add(new ClusterRoleRecord(haGroupName, getHaPolicy(haGroupName), localZkUrl, + ClusterRole.ACTIVE, peerZkUrl, ClusterRole.STANDBY, 1)); } + LOG.debug("Returning all cluster role records discovered: {}", records); + return records; + } - /** - * Helper method to get the replication peer's cluster key from replication config. - * - * This assumes the peer has the static fixed given id. - */ - @VisibleForTesting - static String getPeerClusterKey(Admin admin, String id) - throws IOException { - ReplicationPeerConfig replicationConfig; - try { - replicationConfig = admin.getReplicationPeerConfig(id); - } catch (IOException io) { - String msg = "Can not get replication peer (id=" + id + ") config"; - LOG.error(msg, io); - throw io; - } - String peerZk = replicationConfig.getClusterKey(); - if (StringUtils.isEmpty(peerZk)) { - String msg = "Peer (id=" + id + ") ZK quorum is not set!"; - LOG.error(msg); - throw new IOException(msg); - } - - return peerZk; + /** + * Helper method to get the replication peer's ZK URL (host:port) from Configuration. + */ + private static String getPeerZkUrl(Configuration conf, String haGroupName) throws IOException { + String key = String.format(PHOENIX_HA_GROUP_STORE_PEER_ID_ATTR_FORMAT, haGroupName); + String peerId = conf.get(key, PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + return getPeerClusterKey(connection.getAdmin(), peerId); } + } - /** Helper method to get the HA policy from configuration for the given HA group. */ - @VisibleForTesting - HighAvailabilityPolicy getHaPolicy(String haGroupName) throws IOException { - String key = String.format(PHOENIX_HA_GROUP_POLICY_ATTR_FORMAT, haGroupName); - String value = getConf().get(key, HighAvailabilityPolicy.PARALLEL.name()); - try { - return HighAvailabilityPolicy.valueOf(value); - } catch (IllegalArgumentException e) { - String msg = "Invalid HA policy name '" + value + "' for HA group " + haGroupName; - LOG.error(msg, e); - throw new IOException(msg, e); - } + /** + * Helper method to get the replication peer's cluster key from replication config. This assumes + * the peer has the static fixed given id. + */ + @VisibleForTesting + static String getPeerClusterKey(Admin admin, String id) throws IOException { + ReplicationPeerConfig replicationConfig; + try { + replicationConfig = admin.getReplicationPeerConfig(id); + } catch (IOException io) { + String msg = "Can not get replication peer (id=" + id + ") config"; + LOG.error(msg, io); + throw io; + } + String peerZk = replicationConfig.getClusterKey(); + if (StringUtils.isEmpty(peerZk)) { + String msg = "Peer (id=" + id + ") ZK quorum is not set!"; + LOG.error(msg); + throw new IOException(msg); } - public static void main(String[] args) throws Exception { - Configuration conf = HBaseConfiguration.create(); - int retCode = ToolRunner.run(conf, new ClusterRoleRecordGeneratorTool(), args); - System.exit(retCode); + return peerZk; + } + + /** Helper method to get the HA policy from configuration for the given HA group. */ + @VisibleForTesting + HighAvailabilityPolicy getHaPolicy(String haGroupName) throws IOException { + String key = String.format(PHOENIX_HA_GROUP_POLICY_ATTR_FORMAT, haGroupName); + String value = getConf().get(key, HighAvailabilityPolicy.PARALLEL.name()); + try { + return HighAvailabilityPolicy.valueOf(value); + } catch (IllegalArgumentException e) { + String msg = "Invalid HA policy name '" + value + "' for HA group " + haGroupName; + LOG.error(msg, e); + throw new IOException(msg, e); } + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + int retCode = ToolRunner.run(conf, new ClusterRoleRecordGeneratorTool(), args); + System.exit(retCode); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ConnectionInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ConnectionInfo.java index 945060c153e..975e4dddee1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ConnectionInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ConnectionInfo.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,537 +46,536 @@ * @since 0.1.1 */ public abstract class ConnectionInfo { - private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(ConnectionInfo.class); - protected static final Object KERBEROS_LOGIN_LOCK = new Object(); - protected static final char WINDOWS_SEPARATOR_CHAR = '\\'; - protected static final String REALM_EQUIVALENCY_WARNING_MSG = - "Provided principal does not contain a realm and the default realm cannot be" - + " determined. Ignoring realm equivalency check."; - protected static final String TERMINATOR = "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; - protected static final String DELIMITERS = TERMINATOR + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; - protected static final String CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY = - "hbase.client.registry.impl"; - - protected static final boolean HAS_MASTER_REGISTRY; - protected static final boolean HAS_RPC_REGISTRY; - - static { - String version = VersionInfo.getVersion(); - if (VersionInfo.getMajorVersion(version) >= 3) { - HAS_MASTER_REGISTRY = true; - HAS_RPC_REGISTRY = true; - } else { - if (VersionInfo.compareVersion(VersionInfo.getVersion(), "2.3.0") < 0) { - HAS_MASTER_REGISTRY = false; - HAS_RPC_REGISTRY = false; - } else if (VersionInfo.compareVersion(VersionInfo.getVersion(), "2.5.0") < 0) { - HAS_MASTER_REGISTRY = true; - HAS_RPC_REGISTRY = false; - } else { - HAS_MASTER_REGISTRY = true; - HAS_RPC_REGISTRY = true; - } - } - } - - protected static SQLException getMalFormedUrlException(String url) { - return new SQLExceptionInfo.Builder(SQLExceptionCode.MALFORMED_CONNECTION_URL) - .setMessage(url).build().buildException(); - } - - protected final boolean isConnectionless; - protected final String principal; - protected final String keytab; - protected final User user; - protected final String haGroup; - protected final ConnectionType connectionType; - - protected ConnectionInfo(boolean isConnectionless, String principal, String keytab, User user, - String haGroup, ConnectionType connectionType) { - super(); - this.isConnectionless = isConnectionless; - this.principal = principal; - this.keytab = keytab; - this.user = user; - this.haGroup = haGroup; - this.connectionType = connectionType; - } - - protected static String unescape(String escaped) { - return escaped.replaceAll("\\\\:", "="); + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(ConnectionInfo.class); + protected static final Object KERBEROS_LOGIN_LOCK = new Object(); + protected static final char WINDOWS_SEPARATOR_CHAR = '\\'; + protected static final String REALM_EQUIVALENCY_WARNING_MSG = + "Provided principal does not contain a realm and the default realm cannot be" + + " determined. Ignoring realm equivalency check."; + protected static final String TERMINATOR = "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; + protected static final String DELIMITERS = TERMINATOR + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; + protected static final String CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY = + "hbase.client.registry.impl"; + + protected static final boolean HAS_MASTER_REGISTRY; + protected static final boolean HAS_RPC_REGISTRY; + + static { + String version = VersionInfo.getVersion(); + if (VersionInfo.getMajorVersion(version) >= 3) { + HAS_MASTER_REGISTRY = true; + HAS_RPC_REGISTRY = true; + } else { + if (VersionInfo.compareVersion(VersionInfo.getVersion(), "2.3.0") < 0) { + HAS_MASTER_REGISTRY = false; + HAS_RPC_REGISTRY = false; + } else if (VersionInfo.compareVersion(VersionInfo.getVersion(), "2.5.0") < 0) { + HAS_MASTER_REGISTRY = true; + HAS_RPC_REGISTRY = false; + } else { + HAS_MASTER_REGISTRY = true; + HAS_RPC_REGISTRY = true; + } } - - public static ConnectionInfo createNoLogin(String url, ReadOnlyProps props, Properties info) - throws SQLException { - Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - return create(url, conf, props, info, true); - } - - public static ConnectionInfo create(String url, ReadOnlyProps props, Properties info) - throws SQLException { - Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - return create(url, conf, props, info); + } + + protected static SQLException getMalFormedUrlException(String url) { + return new SQLExceptionInfo.Builder(SQLExceptionCode.MALFORMED_CONNECTION_URL).setMessage(url) + .build().buildException(); + } + + protected final boolean isConnectionless; + protected final String principal; + protected final String keytab; + protected final User user; + protected final String haGroup; + protected final ConnectionType connectionType; + + protected ConnectionInfo(boolean isConnectionless, String principal, String keytab, User user, + String haGroup, ConnectionType connectionType) { + super(); + this.isConnectionless = isConnectionless; + this.principal = principal; + this.keytab = keytab; + this.user = user; + this.haGroup = haGroup; + this.connectionType = connectionType; + } + + protected static String unescape(String escaped) { + return escaped.replaceAll("\\\\:", "="); + } + + public static ConnectionInfo createNoLogin(String url, ReadOnlyProps props, Properties info) + throws SQLException { + Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + return create(url, conf, props, info, true); + } + + public static ConnectionInfo create(String url, ReadOnlyProps props, Properties info) + throws SQLException { + Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + return create(url, conf, props, info); + } + + public static ConnectionInfo createNoLogin(String url, Configuration configuration, + ReadOnlyProps props, Properties info) throws SQLException { + return create(url, configuration, props, info, true); + } + + public static ConnectionInfo create(String url, Configuration configuration, ReadOnlyProps props, + Properties info) throws SQLException { + return create(url, configuration, props, info, false); + } + + public static ConnectionInfo create(String url, Configuration configuration, ReadOnlyProps props, + Properties info, boolean doNotLogin) throws SQLException { + // registry-independent URL preprocessing + url = url == null ? "" : url; + url = unescape(url); + + // Assume missing prefix + if (url.isEmpty()) { + url = PhoenixRuntime.JDBC_PROTOCOL; } - - public static ConnectionInfo createNoLogin(String url, Configuration configuration, - ReadOnlyProps props, Properties info) throws SQLException { - return create(url, configuration, props, info, true); + if (!url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { + url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + url; } - public static ConnectionInfo create(String url, Configuration configuration, - ReadOnlyProps props, Properties info) throws SQLException { - return create(url, configuration, props, info, false); + if (configuration == null) { + configuration = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); } - public static ConnectionInfo create(String url, Configuration configuration, - ReadOnlyProps props, Properties info, boolean doNotLogin) throws SQLException { - // registry-independent URL preprocessing - url = url == null ? "" : url; - url = unescape(url); - - // Assume missing prefix - if (url.isEmpty()) { - url = PhoenixRuntime.JDBC_PROTOCOL; - } - if (!url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { - url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + url; - } - - if (configuration == null) { - configuration = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - } - - Builder builder; - - if (url.toLowerCase().startsWith(PhoenixRuntime.JDBC_PROTOCOL_ZK)) { - builder = new ZKConnectionInfo.Builder(url, configuration, props, info); - } else if (url.toLowerCase().startsWith(PhoenixRuntime.JDBC_PROTOCOL_MASTER)) { - builder = new MasterConnectionInfo.Builder(url, configuration, props, info); - } else if (url.toLowerCase().startsWith(PhoenixRuntime.JDBC_PROTOCOL_RPC)) { - builder = new RPCConnectionInfo.Builder(url, configuration, props, info); - } else if (url.toLowerCase().startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { - // The generic protocol was specified. Try to Determine the protocol from the config - if (MasterConnectionInfo.Builder.isMaster(configuration, props, info)) { - builder = new MasterConnectionInfo.Builder(url, configuration, props, info); - } else if (RPCConnectionInfo.Builder.isRPC(configuration, props, info)) { - builder = new RPCConnectionInfo.Builder(url, configuration, props, info); - } else if (ZKConnectionInfo.Builder.isZK(configuration, props, info)) { - builder = new ZKConnectionInfo.Builder(url, configuration, props, info); - } else { - // No registry class set in config. Use version-dependent default - if (VersionInfo.getMajorVersion(VersionInfo.getVersion()) >= 3) { - builder = new RPCConnectionInfo.Builder(url, configuration, props, info); - } else { - builder = new ZKConnectionInfo.Builder(url, configuration, props, info); - } - } + Builder builder; + + if (url.toLowerCase().startsWith(PhoenixRuntime.JDBC_PROTOCOL_ZK)) { + builder = new ZKConnectionInfo.Builder(url, configuration, props, info); + } else if (url.toLowerCase().startsWith(PhoenixRuntime.JDBC_PROTOCOL_MASTER)) { + builder = new MasterConnectionInfo.Builder(url, configuration, props, info); + } else if (url.toLowerCase().startsWith(PhoenixRuntime.JDBC_PROTOCOL_RPC)) { + builder = new RPCConnectionInfo.Builder(url, configuration, props, info); + } else if (url.toLowerCase().startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { + // The generic protocol was specified. Try to Determine the protocol from the config + if (MasterConnectionInfo.Builder.isMaster(configuration, props, info)) { + builder = new MasterConnectionInfo.Builder(url, configuration, props, info); + } else if (RPCConnectionInfo.Builder.isRPC(configuration, props, info)) { + builder = new RPCConnectionInfo.Builder(url, configuration, props, info); + } else if (ZKConnectionInfo.Builder.isZK(configuration, props, info)) { + builder = new ZKConnectionInfo.Builder(url, configuration, props, info); + } else { + // No registry class set in config. Use version-dependent default + if (VersionInfo.getMajorVersion(VersionInfo.getVersion()) >= 3) { + builder = new RPCConnectionInfo.Builder(url, configuration, props, info); } else { - throw getMalFormedUrlException(url); + builder = new ZKConnectionInfo.Builder(url, configuration, props, info); } - - builder.setDoNotLogin(doNotLogin); - return builder.create(); + } + } else { + throw getMalFormedUrlException(url); } - protected static List handleWindowsKeytab(String url, List parts) - throws SQLException { - - if (parts.size() == 7) { - // We could check for FileSystems.getDefault().getSeparator()), but then - // we wouldn't be able to test on Unix. - if (parts.get(6) != null && parts.get(6).startsWith("\\")) { - // Reconstruct windows path - parts.set(5, parts.get(5) + ":" + parts.get(6)); - parts.remove(6); - } else { - throw getMalFormedUrlException(url); - } - } - - return parts; + builder.setDoNotLogin(doNotLogin); + return builder.create(); + } + + protected static List handleWindowsKeytab(String url, List parts) + throws SQLException { + + if (parts.size() == 7) { + // We could check for FileSystems.getDefault().getSeparator()), but then + // we wouldn't be able to test on Unix. + if (parts.get(6) != null && parts.get(6).startsWith("\\")) { + // Reconstruct windows path + parts.set(5, parts.get(5) + ":" + parts.get(6)); + parts.remove(6); + } else { + throw getMalFormedUrlException(url); + } } - // Visible for testing - static boolean isSameName(String currentName, String newName) throws IOException { - return isSameName(currentName, newName, null, getDefaultKerberosRealm()); + return parts; + } + + // Visible for testing + static boolean isSameName(String currentName, String newName) throws IOException { + return isSameName(currentName, newName, null, getDefaultKerberosRealm()); + } + + /** + * Computes the default kerberos realm if one is available. If one cannot be computed, null is + * returned. + * @return The default kerberos realm, or null. + */ + static String getDefaultKerberosRealm() { + try { + return KerberosUtil.getDefaultRealm(); + } catch (Exception e) { + if (LOGGER.isDebugEnabled()) { + // Include the stacktrace at DEBUG + LOGGER.debug(REALM_EQUIVALENCY_WARNING_MSG, e); + } else { + // Limit the content at WARN + LOGGER.warn(REALM_EQUIVALENCY_WARNING_MSG); + } } - - /** - * Computes the default kerberos realm if one is available. If one cannot be computed, null is - * returned. - * @return The default kerberos realm, or null. - */ - static String getDefaultKerberosRealm() { - try { - return KerberosUtil.getDefaultRealm(); - } catch (Exception e) { - if (LOGGER.isDebugEnabled()) { - // Include the stacktrace at DEBUG - LOGGER.debug(REALM_EQUIVALENCY_WARNING_MSG, e); - } else { - // Limit the content at WARN - LOGGER.warn(REALM_EQUIVALENCY_WARNING_MSG); - } + return null; + } + + static boolean isSameName(String currentName, String newName, String hostname) + throws IOException { + return isSameName(currentName, newName, hostname, getDefaultKerberosRealm()); + } + + static boolean isSameName(String currentName, String newName, String hostname, + String defaultRealm) throws IOException { + final boolean newNameContainsRealm = newName.indexOf('@') != -1; + // Make sure to replace "_HOST" if it exists before comparing the principals. + if (newName.contains(org.apache.hadoop.security.SecurityUtil.HOSTNAME_PATTERN)) { + if (newNameContainsRealm) { + newName = org.apache.hadoop.security.SecurityUtil.getServerPrincipal(newName, hostname); + } else { + // If the principal ends with "/_HOST", replace "_HOST" with the hostname. + if (newName.endsWith("/_HOST")) { + newName = newName.substring(0, newName.length() - 5) + hostname; } - return null; + } } - - static boolean isSameName(String currentName, String newName, String hostname) - throws IOException { - return isSameName(currentName, newName, hostname, getDefaultKerberosRealm()); + // The new name doesn't contain a realm and we could compute a default realm + if (!newNameContainsRealm && defaultRealm != null) { + return currentName.equals(newName + "@" + defaultRealm); } - - static boolean isSameName(String currentName, String newName, String hostname, - String defaultRealm) throws IOException { - final boolean newNameContainsRealm = newName.indexOf('@') != -1; - // Make sure to replace "_HOST" if it exists before comparing the principals. - if (newName.contains(org.apache.hadoop.security.SecurityUtil.HOSTNAME_PATTERN)) { - if (newNameContainsRealm) { - newName = - org.apache.hadoop.security.SecurityUtil.getServerPrincipal(newName, - hostname); - } else { - // If the principal ends with "/_HOST", replace "_HOST" with the hostname. - if (newName.endsWith("/_HOST")) { - newName = newName.substring(0, newName.length() - 5) + hostname; - } - } - } - // The new name doesn't contain a realm and we could compute a default realm - if (!newNameContainsRealm && defaultRealm != null) { - return currentName.equals(newName + "@" + defaultRealm); - } - // We expect both names to contain a realm, so we can do a simple equality check - return currentName.equals(newName); + // We expect both names to contain a realm, so we can do a simple equality check + return currentName.equals(newName); + } + + /** + * Create a new Configuration object that merges the CQS properties and the Connection properties + * into the HBase configuration object + * @param props CQS properties + * @param info JDBC connection properties + * @return merged configuration + */ + protected static Configuration mergeConfiguration(Configuration configIn, ReadOnlyProps props, + Properties info) { + // TODO is cloning the configuration a performance problem ? + Configuration config; + if (configIn != null) { + config = new Configuration(configIn); + } else { + // props/info contains everything + config = new Configuration(false); } - - /** - * Create a new Configuration object that merges the CQS properties and the Connection - * properties into the HBase configuration object - * @param props CQS properties - * @param info JDBC connection properties - * @return merged configuration - */ - protected static Configuration mergeConfiguration(Configuration configIn, ReadOnlyProps props, - Properties info) { - // TODO is cloning the configuration a performance problem ? - Configuration config; - if (configIn != null) { - config = new Configuration(configIn); - } else { - // props/info contains everything - config = new Configuration(false); - } - // Add QueryServices properties - if (props != null) { - for (Entry entry : props) { - config.set(entry.getKey(), entry.getValue()); - } - } - // Add any user-provided properties (via DriverManager) - if (info != null) { - for (Object key : info.keySet()) { - config.set((String) key, info.getProperty((String) key)); - } - } - return config; + // Add QueryServices properties + if (props != null) { + for (Entry entry : props) { + config.set(entry.getKey(), entry.getValue()); + } } - - protected Map getCommonProps() { - Map connectionProps = new HashMap<>(); - if (getPrincipal() != null && getKeytab() != null) { - connectionProps.put(QueryServices.HBASE_CLIENT_PRINCIPAL, getPrincipal()); - connectionProps.put(QueryServices.HBASE_CLIENT_KEYTAB, getKeytab()); - } - return connectionProps; + // Add any user-provided properties (via DriverManager) + if (info != null) { + for (Object key : info.keySet()) { + config.set((String) key, info.getProperty((String) key)); + } } - - public abstract ReadOnlyProps asProps(); - - public boolean isConnectionless() { - return isConnectionless; + return config; + } + + protected Map getCommonProps() { + Map connectionProps = new HashMap<>(); + if (getPrincipal() != null && getKeytab() != null) { + connectionProps.put(QueryServices.HBASE_CLIENT_PRINCIPAL, getPrincipal()); + connectionProps.put(QueryServices.HBASE_CLIENT_KEYTAB, getKeytab()); } - - public String getKeytab() { - return keytab; + return connectionProps; + } + + public abstract ReadOnlyProps asProps(); + + public boolean isConnectionless() { + return isConnectionless; + } + + public String getKeytab() { + return keytab; + } + + public String getPrincipal() { + return principal; + } + + public User getUser() { + return user; + } + + public String getHaGroup() { + return haGroup; + } + + public abstract String toUrl(); + + public abstract String getZookeeperConnectionString(); + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ConnectionInfo other = (ConnectionInfo) obj; + // `user` is guaranteed to be non-null + if (!other.user.equals(user)) return false; + if (principal == null) { + if (other.principal != null) return false; + } else if (!principal.equals(other.principal)) return false; + if (keytab == null) { + if (other.keytab != null) return false; + } else if (!keytab.equals(other.keytab)) return false; + if (haGroup == null) { + if (other.haGroup != null) return false; + } else if (!haGroup.equals(other.haGroup)) return false; + if (!connectionType.equals(other.connectionType)) { + return false; } - - public String getPrincipal() { - return principal; + return true; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((principal == null) ? 0 : principal.hashCode()); + result = prime * result + ((keytab == null) ? 0 : keytab.hashCode()); + result = prime * result + ((haGroup == null) ? 0 : haGroup.hashCode()); + // `user` is guaranteed to be non-null + result = prime * result + user.hashCode(); + result = prime * result + connectionType.hashCode(); + return result; + } + + protected boolean anyNotNull(Object... params) { + for (Object param : params) { + if (param != null) { + return true; + } } - - public User getUser() { - return user; + return false; + } + + public abstract ConnectionInfo withPrincipal(String principal); + + /** + * Parent of the Builder classes for the immutable ConnectionInfo classes + * @since + */ + protected abstract static class Builder { + + protected boolean isConnectionless; + protected String principal; + protected String keytab; + protected User user; + protected String haGroup; + protected boolean doNotLogin = false; + protected ConnectionType connectionType; + + // Only used for building, not part of ConnectionInfo + protected final String url; + protected final Configuration config; + protected final ReadOnlyProps props; + protected final Properties info; + + public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) { + this.config = config; + this.url = url; + this.props = props; + this.info = info; + this.connectionType = ConnectionType.CLIENT; + if (info != null && Boolean.valueOf(info.getProperty(QueryUtil.IS_SERVER_CONNECTION))) { + this.connectionType = ConnectionType.SERVER; + } } - public String getHaGroup() { - return haGroup; - } + protected abstract ConnectionInfo create() throws SQLException; - public abstract String toUrl(); - - public abstract String getZookeeperConnectionString(); - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ConnectionInfo other = (ConnectionInfo) obj; - // `user` is guaranteed to be non-null - if (!other.user.equals(user)) return false; - if (principal == null) { - if (other.principal != null) return false; - } else if (!principal.equals(other.principal)) return false; - if (keytab == null) { - if (other.keytab != null) return false; - } else if (!keytab.equals(other.keytab)) return false; - if (haGroup == null) { - if (other.haGroup != null) return false; - } else if (!haGroup.equals(other.haGroup)) return false; - if (!connectionType.equals(other.connectionType)) { - return false; + protected abstract void normalize() throws SQLException; + + protected String get(String key, String defValue) { + String result = null; + if (info != null) { + result = info.getProperty(key); + } + if (result == null) { + if (props != null) { + result = props.get(key); } - return true; + if (result == null) { + result = config.get(key, defValue); + } + } + return result; } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((principal == null) ? 0 : principal.hashCode()); - result = prime * result + ((keytab == null) ? 0 : keytab.hashCode()); - result = prime * result + ((haGroup == null) ? 0 : haGroup.hashCode()); - // `user` is guaranteed to be non-null - result = prime * result + user.hashCode(); - result = prime * result + connectionType.hashCode(); - return result; + protected String get(String key) { + return get(key, null); } - protected boolean anyNotNull(Object... params) { - for (Object param : params) { - if (param != null) { - return true; - } - } - return false; + protected void setHaGroup() { + if (info != null) { + haGroup = info.getProperty(HighAvailabilityGroup.PHOENIX_HA_GROUP_ATTR); + } } - public abstract ConnectionInfo withPrincipal(String principal); - - /** - * Parent of the Builder classes for the immutable ConnectionInfo classes - * - * @since - */ - protected abstract static class Builder { - - protected boolean isConnectionless; - protected String principal; - protected String keytab; - protected User user; - protected String haGroup; - protected boolean doNotLogin = false; - protected ConnectionType connectionType; - - // Only used for building, not part of ConnectionInfo - protected final String url; - protected final Configuration config; - protected final ReadOnlyProps props; - protected final Properties info; - - public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) { - this.config = config; - this.url = url; - this.props = props; - this.info = info; - this.connectionType = ConnectionType.CLIENT; - if (info != null && Boolean.valueOf(info.getProperty(QueryUtil.IS_SERVER_CONNECTION))) { - this.connectionType = ConnectionType.SERVER; - } - } - - protected abstract ConnectionInfo create() throws SQLException; - - protected abstract void normalize() throws SQLException; + protected void setDoNotLogin(boolean doNotLogin) { + this.doNotLogin = doNotLogin; + } - protected String get(String key, String defValue) { - String result = null; - if (info != null) { - result = info.getProperty(key); - } - if (result == null) { - if (props != null) { - result = props.get(key); + protected void handleKerberosAndLogin() throws SQLException { + // Previously we have ignored the kerberos properties defined in hbase-site.xml, + // but now we use them + try { + this.user = User.getCurrent(); + } catch (IOException e) { + throw new RuntimeException("Couldn't get the current user!!", e); + } + if (null == this.user) { + throw new RuntimeException("Acquired null user which should never happen"); + } + + if (isConnectionless) { + return; + } + + if (principal == null) { + principal = get(QueryServices.HBASE_CLIENT_PRINCIPAL); + } + if (keytab == null) { + keytab = get(QueryServices.HBASE_CLIENT_KEYTAB); + } + if ((principal == null) && (keytab != null)) { + throw getMalFormedUrlException(url); + } + // We allow specifying a principal without a keytab, in which case + // the principal is not used for kerberos, but is set as the connection user + if (principal != null && keytab != null && !doNotLogin) { + // PHOENIX-3189 Because ConnectionInfo is immutable, we must make sure all parts of + // it are correct before + // construction; this also requires the Kerberos user credentials object (since they + // are compared by reference + // and not by value. If the user provided a principal and keytab via the JDBC url, + // we must make sure that the + // Kerberos login happens *before* we construct the ConnectionInfo object. + // Otherwise, the use of ConnectionInfo + // to determine when ConnectionQueryServices impl's should be reused will be broken. + try { + // Check if we need to authenticate with kerberos so that we cache the correct + // ConnectionInfo + UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); + if ( + !currentUser.hasKerberosCredentials() + || !isSameName(currentUser.getUserName(), principal) + ) { + synchronized (KERBEROS_LOGIN_LOCK) { + // Double check the current user, might have changed since we checked + // last. Don't want + // to re-login if it's the same user. + currentUser = UserGroupInformation.getCurrentUser(); + if ( + !currentUser.hasKerberosCredentials() + || !isSameName(currentUser.getUserName(), principal) + ) { + LOGGER.info("Trying to connect to a secure cluster as {} " + "with keytab {}", + principal, keytab); + // We are intentionally changing the passed in Configuration object + if (null != principal) { + config.set(QueryServices.HBASE_CLIENT_PRINCIPAL, principal); } - if (result == null) { - result = config.get(key, defValue); + if (null != keytab) { + config.set(QueryServices.HBASE_CLIENT_KEYTAB, keytab); } + UserGroupInformation.setConfiguration(config); + User.login(config, QueryServices.HBASE_CLIENT_KEYTAB, + QueryServices.HBASE_CLIENT_PRINCIPAL, null); + user = User.getCurrent(); + LOGGER.info("Successful login to secure cluster"); + } } - return result; - } - - protected String get(String key) { - return get(key, null); + } else { + // The user already has Kerberos creds, so there isn't anything to change in + // the ConnectionInfo. + LOGGER.debug("Already logged in as {}", currentUser); + } + } catch (IOException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) + .setRootCause(e).build().buildException(); } + } else { + LOGGER.debug("Principal and keytab not provided, not attempting Kerberos login"); + } + } - protected void setHaGroup() { - if (info != null) { - haGroup = info.getProperty(HighAvailabilityGroup.PHOENIX_HA_GROUP_ATTR); - } + protected String normalizeHostsList(String quorum, Integer defaultPort) throws SQLException { + // The input host:port separator char is "=" (after unescaping) + String[] quorumParts = quorum.split(","); + String[] normalizedParts = new String[quorumParts.length]; + for (int i = 0; i < quorumParts.length; i++) { + String[] hostAndPort = quorumParts[i].trim().split(":"); + if (hostAndPort.length == 1) { + normalizedParts[i] = hostAndPort[0].trim().toLowerCase() + ":" + defaultPort; + } else if (hostAndPort.length == 2) { + normalizedParts[i] = quorumParts[i].trim().toLowerCase(); + } else { + throw getMalFormedUrlException(url); } + } + // We are sorting the host:port strings, so the sorting result may be unexpected, but + // as long as it's consistent, it doesn't matter. + Arrays.sort(normalizedParts); + return String.join(",", normalizedParts); + // TODO + // HBase will perform a further reverse lookup based normalization on the hosts, + // but we skip that. + // In the unlikely worst case, we generate separate CQSI objects instead of sharing them + } - protected void setDoNotLogin(boolean doNotLogin) { - this.doNotLogin = doNotLogin; + protected StringTokenizer getTokenizerWithoutProtocol() throws SQLException { + StringTokenizer tokenizer = new StringTokenizer(url, DELIMITERS, true); + try { + // Walk the first three tokens "jdbc", ":", "phoenix"/"phoenix+master"/"phoenix-zk" + // This should succeed, as we check for the "jdbc:phoenix" prefix when accepting the + // URL + if (!tokenizer.nextToken().toLowerCase().equals("jdbc")) { + throw new Exception(); } - - protected void handleKerberosAndLogin() throws SQLException { - // Previously we have ignored the kerberos properties defined in hbase-site.xml, - // but now we use them - try { - this.user = User.getCurrent(); - } catch (IOException e) { - throw new RuntimeException("Couldn't get the current user!!", e); - } - if (null == this.user) { - throw new RuntimeException("Acquired null user which should never happen"); - } - - if (isConnectionless) { - return; - } - - if (principal == null) { - principal = get(QueryServices.HBASE_CLIENT_PRINCIPAL); - } - if (keytab == null) { - keytab = get(QueryServices.HBASE_CLIENT_KEYTAB); - } - if ((principal == null) && (keytab != null)) { - throw getMalFormedUrlException(url); - } - // We allow specifying a principal without a keytab, in which case - // the principal is not used for kerberos, but is set as the connection user - if (principal != null && keytab != null && !doNotLogin) { - // PHOENIX-3189 Because ConnectionInfo is immutable, we must make sure all parts of - // it are correct before - // construction; this also requires the Kerberos user credentials object (since they - // are compared by reference - // and not by value. If the user provided a principal and keytab via the JDBC url, - // we must make sure that the - // Kerberos login happens *before* we construct the ConnectionInfo object. - // Otherwise, the use of ConnectionInfo - // to determine when ConnectionQueryServices impl's should be reused will be broken. - try { - // Check if we need to authenticate with kerberos so that we cache the correct - // ConnectionInfo - UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); - if (!currentUser.hasKerberosCredentials() - || !isSameName(currentUser.getUserName(), principal)) { - synchronized (KERBEROS_LOGIN_LOCK) { - // Double check the current user, might have changed since we checked - // last. Don't want - // to re-login if it's the same user. - currentUser = UserGroupInformation.getCurrentUser(); - if (!currentUser.hasKerberosCredentials() - || !isSameName(currentUser.getUserName(), principal)) { - LOGGER.info("Trying to connect to a secure cluster as {} " - + "with keytab {}", - principal, keytab); - // We are intentionally changing the passed in Configuration object - if (null != principal) { - config.set(QueryServices.HBASE_CLIENT_PRINCIPAL, principal); - } - if (null != keytab) { - config.set(QueryServices.HBASE_CLIENT_KEYTAB, keytab); - } - UserGroupInformation.setConfiguration(config); - User.login(config, QueryServices.HBASE_CLIENT_KEYTAB, - QueryServices.HBASE_CLIENT_PRINCIPAL, null); - user = User.getCurrent(); - LOGGER.info("Successful login to secure cluster"); - } - } - } else { - // The user already has Kerberos creds, so there isn't anything to change in - // the ConnectionInfo. - LOGGER.debug("Already logged in as {}", currentUser); - } - } catch (IOException e) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) - .setRootCause(e).build().buildException(); - } - } else { - LOGGER.debug("Principal and keytab not provided, not attempting Kerberos login"); - } + if (!tokenizer.nextToken().toLowerCase().equals(":")) { + throw new Exception(); } - - protected String normalizeHostsList(String quorum, Integer defaultPort) - throws SQLException { - // The input host:port separator char is "=" (after unescaping) - String[] quorumParts = quorum.split(","); - String[] normalizedParts = new String[quorumParts.length]; - for (int i = 0; i < quorumParts.length; i++) { - String[] hostAndPort = quorumParts[i].trim().split(":"); - if (hostAndPort.length == 1) { - normalizedParts[i] = hostAndPort[0].trim().toLowerCase() + ":" + defaultPort; - } else if (hostAndPort.length == 2) { - normalizedParts[i] = quorumParts[i].trim().toLowerCase(); - } else { - throw getMalFormedUrlException(url); - } - } - // We are sorting the host:port strings, so the sorting result may be unexpected, but - // as long as it's consistent, it doesn't matter. - Arrays.sort(normalizedParts); - return String.join(",", normalizedParts); - // TODO - // HBase will perform a further reverse lookup based normalization on the hosts, - // but we skip that. - // In the unlikely worst case, we generate separate CQSI objects instead of sharing them + if (!tokenizer.nextToken().toLowerCase().startsWith("phoenix")) { + throw new Exception(); } + } catch (Exception e) { + throw getMalFormedUrlException(url); + } + return tokenizer; + } - protected StringTokenizer getTokenizerWithoutProtocol() throws SQLException { - StringTokenizer tokenizer = new StringTokenizer(url, DELIMITERS, true); - try { - // Walk the first three tokens "jdbc", ":", "phoenix"/"phoenix+master"/"phoenix-zk" - // This should succeed, as we check for the "jdbc:phoenix" prefix when accepting the - // URL - if (!tokenizer.nextToken().toLowerCase().equals("jdbc")) { - throw new Exception(); - } - if (!tokenizer.nextToken().toLowerCase().equals(":")) { - throw new Exception(); - } - if (!tokenizer.nextToken().toLowerCase().startsWith("phoenix")) { - throw new Exception(); - } - } catch (Exception e) { - throw getMalFormedUrlException(url); - } - return tokenizer; + protected static String get(String key, Configuration config, ReadOnlyProps props, + Properties info) { + String result = null; + if (info != null) { + result = info.getProperty(key); + } + if (result == null) { + if (props != null) { + result = props.get(key); } - - protected static String get(String key, Configuration config, ReadOnlyProps props, - Properties info) { - String result = null; - if (info != null) { - result = info.getProperty(key); - } - if (result == null) { - if (props != null) { - result = props.get(key); - } - if (result == null) { - result = config.get(key, null); - } - } - return result; + if (result == null) { + result = config.get(key, null); } + } + return result; } + } - public enum ConnectionType { - CLIENT, - SERVER - } + public enum ConnectionType { + CLIENT, + SERVER + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateConnection.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateConnection.java index 40c02d2af91..08f321eed1a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateConnection.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateConnection.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.jdbc; @@ -33,289 +40,286 @@ * Simple {@link Connection} that just delegates to an underlying {@link Connection}. */ public class DelegateConnection implements Connection { - - protected Connection conn; - - public DelegateConnection(Connection conn) { - this.conn = conn; - } - - @Override - public T unwrap(Class iface) throws SQLException { - return conn.unwrap(iface); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return conn.isWrapperFor(iface); - } - - @Override - public Statement createStatement() throws SQLException { - return conn.createStatement(); - } - - @Override - public PreparedStatement prepareStatement(String sql) throws SQLException { - return conn.prepareStatement(sql); - } - - @Override - public CallableStatement prepareCall(String sql) throws SQLException { - return conn.prepareCall(sql); - } - - @Override - public String nativeSQL(String sql) throws SQLException { - return conn.nativeSQL(sql); - } - - @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - conn.setAutoCommit(autoCommit); - } - - @Override - public boolean getAutoCommit() throws SQLException { - return conn.getAutoCommit(); - } - - @Override - public void commit() throws SQLException { - conn.commit(); - } - - @Override - public void rollback() throws SQLException { - conn.rollback(); - } - - @Override - public void close() throws SQLException { - conn.close(); - } - - @Override - public boolean isClosed() throws SQLException { - return conn.isClosed(); - } - - @Override - public DatabaseMetaData getMetaData() throws SQLException { - return conn.getMetaData(); - } - - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - conn.setReadOnly(readOnly); - } - - @Override - public boolean isReadOnly() throws SQLException { - return conn.isReadOnly(); - } - - @Override - public void setCatalog(String catalog) throws SQLException { - conn.setCatalog(catalog); - } - - @Override - public String getCatalog() throws SQLException { - return conn.getCatalog(); - } - - @Override - public void setTransactionIsolation(int level) throws SQLException { - conn.setTransactionIsolation(level); - } - - @Override - public int getTransactionIsolation() throws SQLException { - return conn.getTransactionIsolation(); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return conn.getWarnings(); - } - - @Override - public void clearWarnings() throws SQLException { - conn.clearWarnings(); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) - throws SQLException { - return conn.createStatement(resultSetType, resultSetConcurrency); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency) throws SQLException { - return conn.prepareStatement(sql, resultSetType, resultSetConcurrency); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) - throws SQLException { - return conn.prepareCall(sql, resultSetType, resultSetConcurrency); - } - - @Override - public Map> getTypeMap() throws SQLException { - return conn.getTypeMap(); - } - - @Override - public void setTypeMap(Map> map) throws SQLException { - conn.setTypeMap(map); - } - - @Override - public void setHoldability(int holdability) throws SQLException { - conn.setHoldability(holdability); - } - - @Override - public int getHoldability() throws SQLException { - return conn.getHoldability(); - } - - @Override - public Savepoint setSavepoint() throws SQLException { - return conn.setSavepoint(); - } - - @Override - public Savepoint setSavepoint(String name) throws SQLException { - return conn.setSavepoint(name); - } - - @Override - public void rollback(Savepoint savepoint) throws SQLException { - conn.rollback(savepoint); - } - - @Override - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - conn.releaseSavepoint(savepoint); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - return conn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency, int resultSetHoldability) throws SQLException { - return conn.prepareStatement(sql, resultSetType, resultSetConcurrency, - resultSetHoldability); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - return conn.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability); - } - - @Override - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) - throws SQLException { - return conn.prepareStatement(sql, autoGeneratedKeys); - } - - @Override - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - return conn.prepareStatement(sql, columnIndexes); - } - - @Override - public PreparedStatement prepareStatement(String sql, String[] columnNames) - throws SQLException { - return conn.prepareStatement(sql, columnNames); - } - - @Override - public Clob createClob() throws SQLException { - return conn.createClob(); - } - - @Override - public Blob createBlob() throws SQLException { - return conn.createBlob(); - } - - @Override - public NClob createNClob() throws SQLException { - return conn.createNClob(); - } - - @Override - public SQLXML createSQLXML() throws SQLException { - return conn.createSQLXML(); - } - - @Override - public boolean isValid(int timeout) throws SQLException { - return conn.isValid(timeout); - } - - @Override - public void setClientInfo(String name, String value) throws SQLClientInfoException { - conn.setClientInfo(name, value); - } - - @Override - public void setClientInfo(Properties properties) throws SQLClientInfoException { - conn.setClientInfo(properties); - } - - @Override - public String getClientInfo(String name) throws SQLException { - return conn.getClientInfo(name); - } - - @Override - public Properties getClientInfo() throws SQLException { - return conn.getClientInfo(); - } - - @Override - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - return conn.createArrayOf(typeName, elements); - } - - @Override - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - return conn.createStruct(typeName, attributes); - } - - @Override - public void setSchema(String schema) throws SQLException { - conn.setSchema(schema); - } - - @Override - public String getSchema() throws SQLException { - return conn.getSchema(); - } - - @Override - public void abort(Executor executor) throws SQLException { - conn.abort(executor); - } - - @Override - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - conn.setNetworkTimeout(executor, milliseconds); - } - - @Override - public int getNetworkTimeout() throws SQLException { - return conn.getNetworkTimeout(); - } -} \ No newline at end of file + + protected Connection conn; + + public DelegateConnection(Connection conn) { + this.conn = conn; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return conn.unwrap(iface); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return conn.isWrapperFor(iface); + } + + @Override + public Statement createStatement() throws SQLException { + return conn.createStatement(); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + return conn.prepareStatement(sql); + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + return conn.prepareCall(sql); + } + + @Override + public String nativeSQL(String sql) throws SQLException { + return conn.nativeSQL(sql); + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + conn.setAutoCommit(autoCommit); + } + + @Override + public boolean getAutoCommit() throws SQLException { + return conn.getAutoCommit(); + } + + @Override + public void commit() throws SQLException { + conn.commit(); + } + + @Override + public void rollback() throws SQLException { + conn.rollback(); + } + + @Override + public void close() throws SQLException { + conn.close(); + } + + @Override + public boolean isClosed() throws SQLException { + return conn.isClosed(); + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + return conn.getMetaData(); + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + conn.setReadOnly(readOnly); + } + + @Override + public boolean isReadOnly() throws SQLException { + return conn.isReadOnly(); + } + + @Override + public void setCatalog(String catalog) throws SQLException { + conn.setCatalog(catalog); + } + + @Override + public String getCatalog() throws SQLException { + return conn.getCatalog(); + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + conn.setTransactionIsolation(level); + } + + @Override + public int getTransactionIsolation() throws SQLException { + return conn.getTransactionIsolation(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return conn.getWarnings(); + } + + @Override + public void clearWarnings() throws SQLException { + conn.clearWarnings(); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) + throws SQLException { + return conn.createStatement(resultSetType, resultSetConcurrency); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + return conn.prepareStatement(sql, resultSetType, resultSetConcurrency); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + return conn.prepareCall(sql, resultSetType, resultSetConcurrency); + } + + @Override + public Map> getTypeMap() throws SQLException { + return conn.getTypeMap(); + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + conn.setTypeMap(map); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + conn.setHoldability(holdability); + } + + @Override + public int getHoldability() throws SQLException { + return conn.getHoldability(); + } + + @Override + public Savepoint setSavepoint() throws SQLException { + return conn.setSavepoint(); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + return conn.setSavepoint(name); + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + conn.rollback(savepoint); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + conn.releaseSavepoint(savepoint); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return conn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return conn.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + return conn.prepareStatement(sql, autoGeneratedKeys); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + return conn.prepareStatement(sql, columnIndexes); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + return conn.prepareStatement(sql, columnNames); + } + + @Override + public Clob createClob() throws SQLException { + return conn.createClob(); + } + + @Override + public Blob createBlob() throws SQLException { + return conn.createBlob(); + } + + @Override + public NClob createNClob() throws SQLException { + return conn.createNClob(); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + return conn.createSQLXML(); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + return conn.isValid(timeout); + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + conn.setClientInfo(name, value); + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + conn.setClientInfo(properties); + } + + @Override + public String getClientInfo(String name) throws SQLException { + return conn.getClientInfo(name); + } + + @Override + public Properties getClientInfo() throws SQLException { + return conn.getClientInfo(); + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + return conn.createArrayOf(typeName, elements); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + return conn.createStruct(typeName, attributes); + } + + @Override + public void setSchema(String schema) throws SQLException { + conn.setSchema(schema); + } + + @Override + public String getSchema() throws SQLException { + return conn.getSchema(); + } + + @Override + public void abort(Executor executor) throws SQLException { + conn.abort(executor); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + conn.setNetworkTimeout(executor, milliseconds); + } + + @Override + public int getNetworkTimeout() throws SQLException { + return conn.getNetworkTimeout(); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegatePreparedStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegatePreparedStatement.java index 85301c9ac11..7bfe31cbae1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegatePreparedStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegatePreparedStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,512 +42,510 @@ public class DelegatePreparedStatement implements PreparedStatement { - protected PreparedStatement ps; - - public DelegatePreparedStatement(PreparedStatement ps) { - this.ps = ps; - } - - @Override - public T unwrap(Class iface) throws SQLException { - return ps.unwrap(iface); - } - - @Override - public ResultSet executeQuery(String sql) throws SQLException { - return ps.executeQuery(sql); - } - - @Override - public ResultSet executeQuery() throws SQLException { - return ps.executeQuery(); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return ps.isWrapperFor(iface); - } - - @Override - public int executeUpdate(String sql) throws SQLException { - return ps.executeUpdate(sql); - } - - @Override - public int executeUpdate() throws SQLException { - return ps.executeUpdate(); - } - - @Override - public void setNull(int parameterIndex, int sqlType) throws SQLException { - ps.setNull(parameterIndex, sqlType); - } - - @Override - public void close() throws SQLException { - ps.close(); - } - - @Override - public int getMaxFieldSize() throws SQLException { - return ps.getMaxFieldSize(); - } - - @Override - public void setBoolean(int parameterIndex, boolean x) throws SQLException { - ps.setBoolean(parameterIndex, x); - } - - @Override - public void setByte(int parameterIndex, byte x) throws SQLException { - ps.setByte(parameterIndex, x); - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - ps.setMaxFieldSize(max); - } - - @Override - public void setShort(int parameterIndex, short x) throws SQLException { - ps.setShort(parameterIndex, x); - } - - @Override - public int getMaxRows() throws SQLException { - return ps.getMaxRows(); - } - - @Override - public void setInt(int parameterIndex, int x) throws SQLException { - ps.setInt(parameterIndex, x); - } - - @Override - public void setMaxRows(int max) throws SQLException { - ps.setMaxRows(max); - } - - @Override - public void setLong(int parameterIndex, long x) throws SQLException { - ps.setLong(parameterIndex, x); - } - - @Override - public void setFloat(int parameterIndex, float x) throws SQLException { - ps.setFloat(parameterIndex, x); - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - ps.setEscapeProcessing(enable); - } - - @Override - public void setDouble(int parameterIndex, double x) throws SQLException { - ps.setDouble(parameterIndex, x); - } - - @Override - public int getQueryTimeout() throws SQLException { - return ps.getQueryTimeout(); - } - - @Override - public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { - ps.setBigDecimal(parameterIndex, x); - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - ps.setQueryTimeout(seconds); - } - - @Override - public void setString(int parameterIndex, String x) throws SQLException { - ps.setString(parameterIndex, x); - } - - @Override - public void setBytes(int parameterIndex, byte[] x) throws SQLException { - ps.setBytes(parameterIndex, x); - } - - @Override - public void cancel() throws SQLException { - ps.cancel(); - } - - @Override - public void setDate(int parameterIndex, Date x) throws SQLException { - ps.setDate(parameterIndex, x); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return ps.getWarnings(); - } - - @Override - public void setTime(int parameterIndex, Time x) throws SQLException { - ps.setTime(parameterIndex, x); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { - ps.setTimestamp(parameterIndex, x); - } - - @Override - public void clearWarnings() throws SQLException { - ps.clearWarnings(); - } - - @Override - public void setCursorName(String name) throws SQLException { - ps.setCursorName(name); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { - ps.setAsciiStream(parameterIndex, x, length); - } - - @Override - public void setUnicodeStream(int parameterIndex, InputStream x, int length) - throws SQLException { - ps.setUnicodeStream(parameterIndex, x, length); - } - - @Override - public boolean execute(String sql) throws SQLException { - return ps.execute(sql); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { - ps.setBinaryStream(parameterIndex, x, length); - } - - @Override - public ResultSet getResultSet() throws SQLException { - return ps.getResultSet(); - } - - @Override - public int getUpdateCount() throws SQLException { - return ps.getUpdateCount(); - } - - @Override - public void clearParameters() throws SQLException { - ps.clearParameters(); - } - - @Override - public boolean getMoreResults() throws SQLException { - return ps.getMoreResults(); - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { - ps.setObject(parameterIndex, x, targetSqlType); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - ps.setFetchDirection(direction); - } - - @Override - public void setObject(int parameterIndex, Object x) throws SQLException { - ps.setObject(parameterIndex, x); - } - - @Override - public int getFetchDirection() throws SQLException { - return ps.getFetchDirection(); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - ps.setFetchSize(rows); - } - - @Override - public int getFetchSize() throws SQLException { - return ps.getFetchSize(); - } - - @Override - public boolean execute() throws SQLException { - return ps.execute(); - } - - @Override - public int getResultSetConcurrency() throws SQLException { - return ps.getResultSetConcurrency(); - } - - @Override - public int getResultSetType() throws SQLException { - return ps.getResultSetType(); - } - - @Override - public void addBatch(String sql) throws SQLException { - ps.addBatch(sql); - } - - @Override - public void addBatch() throws SQLException { - ps.addBatch(); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, int length) - throws SQLException { - ps.setCharacterStream(parameterIndex, reader, length); - } - - @Override - public void clearBatch() throws SQLException { - ps.clearBatch(); - } - - @Override - public int[] executeBatch() throws SQLException { - return ps.executeBatch(); - } - - @Override - public void setRef(int parameterIndex, Ref x) throws SQLException { - ps.setRef(parameterIndex, x); - } - - @Override - public void setBlob(int parameterIndex, Blob x) throws SQLException { - ps.setBlob(parameterIndex, x); - } - - @Override - public void setClob(int parameterIndex, Clob x) throws SQLException { - ps.setClob(parameterIndex, x); - } - - @Override - public void setArray(int parameterIndex, Array x) throws SQLException { - ps.setArray(parameterIndex, x); - } - - @Override - public Connection getConnection() throws SQLException { - return ps.getConnection(); - } - - @Override - public ResultSetMetaData getMetaData() throws SQLException { - return ps.getMetaData(); - } - - @Override - public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { - ps.setDate(parameterIndex, x, cal); - } - - @Override - public boolean getMoreResults(int current) throws SQLException { - return ps.getMoreResults(current); - } - - @Override - public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { - ps.setTime(parameterIndex, x, cal); - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - return ps.getGeneratedKeys(); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { - ps.setTimestamp(parameterIndex, x, cal); - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - return ps.executeUpdate(sql, autoGeneratedKeys); - } - - @Override - public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { - ps.setNull(parameterIndex, sqlType, typeName); - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - return ps.executeUpdate(sql, columnIndexes); - } - - @Override - public void setURL(int parameterIndex, URL x) throws SQLException { - ps.setURL(parameterIndex, x); - } - - @Override - public ParameterMetaData getParameterMetaData() throws SQLException { - return ps.getParameterMetaData(); - } - - @Override - public void setRowId(int parameterIndex, RowId x) throws SQLException { - ps.setRowId(parameterIndex, x); - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - return ps.executeUpdate(sql, columnNames); - } - - @Override - public void setNString(int parameterIndex, String value) throws SQLException { - ps.setNString(parameterIndex, value); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value, long length) - throws SQLException { - ps.setNCharacterStream(parameterIndex, value, length); - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - return ps.execute(sql, autoGeneratedKeys); - } - - @Override - public void setNClob(int parameterIndex, NClob value) throws SQLException { - ps.setNClob(parameterIndex, value); - } - - @Override - public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - ps.setClob(parameterIndex, reader, length); - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - return ps.execute(sql, columnIndexes); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream, long length) - throws SQLException { - ps.setBlob(parameterIndex, inputStream, length); - } - - @Override - public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - ps.setNClob(parameterIndex, reader, length); - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - return ps.execute(sql, columnNames); - } - - @Override - public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - ps.setSQLXML(parameterIndex, xmlObject); - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) - throws SQLException { - ps.setObject(parameterIndex, x, targetSqlType, scaleOrLength); - } - - @Override - public int getResultSetHoldability() throws SQLException { - return ps.getResultSetHoldability(); - } - - @Override - public boolean isClosed() throws SQLException { - return ps.isClosed(); - } - - @Override - public void setPoolable(boolean poolable) throws SQLException { - ps.setPoolable(poolable); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - ps.setAsciiStream(parameterIndex, x, length); - } - - @Override - public boolean isPoolable() throws SQLException { - return ps.isPoolable(); - } - - @Override - public void closeOnCompletion() throws SQLException { - ps.closeOnCompletion(); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, long length) - throws SQLException { - ps.setBinaryStream(parameterIndex, x, length); - } - - @Override - public boolean isCloseOnCompletion() throws SQLException { - return ps.isCloseOnCompletion(); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, long length) - throws SQLException { - ps.setCharacterStream(parameterIndex, reader, length); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - ps.setAsciiStream(parameterIndex, x); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - ps.setBinaryStream(parameterIndex, x); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - ps.setCharacterStream(parameterIndex, reader); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - ps.setNCharacterStream(parameterIndex, value); - } - - @Override - public void setClob(int parameterIndex, Reader reader) throws SQLException { - ps.setClob(parameterIndex, reader); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - ps.setBlob(parameterIndex, inputStream); - } - - @Override - public void setNClob(int parameterIndex, Reader reader) throws SQLException { - ps.setNClob(parameterIndex, reader); - } + protected PreparedStatement ps; + + public DelegatePreparedStatement(PreparedStatement ps) { + this.ps = ps; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return ps.unwrap(iface); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + return ps.executeQuery(sql); + } + + @Override + public ResultSet executeQuery() throws SQLException { + return ps.executeQuery(); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return ps.isWrapperFor(iface); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + return ps.executeUpdate(sql); + } + + @Override + public int executeUpdate() throws SQLException { + return ps.executeUpdate(); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + ps.setNull(parameterIndex, sqlType); + } + + @Override + public void close() throws SQLException { + ps.close(); + } + + @Override + public int getMaxFieldSize() throws SQLException { + return ps.getMaxFieldSize(); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + ps.setBoolean(parameterIndex, x); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + ps.setByte(parameterIndex, x); + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + ps.setMaxFieldSize(max); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + ps.setShort(parameterIndex, x); + } + + @Override + public int getMaxRows() throws SQLException { + return ps.getMaxRows(); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + ps.setInt(parameterIndex, x); + } + + @Override + public void setMaxRows(int max) throws SQLException { + ps.setMaxRows(max); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + ps.setLong(parameterIndex, x); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + ps.setFloat(parameterIndex, x); + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + ps.setEscapeProcessing(enable); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + ps.setDouble(parameterIndex, x); + } + + @Override + public int getQueryTimeout() throws SQLException { + return ps.getQueryTimeout(); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + ps.setBigDecimal(parameterIndex, x); + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + ps.setQueryTimeout(seconds); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + ps.setString(parameterIndex, x); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + ps.setBytes(parameterIndex, x); + } + + @Override + public void cancel() throws SQLException { + ps.cancel(); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + ps.setDate(parameterIndex, x); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return ps.getWarnings(); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + ps.setTime(parameterIndex, x); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + ps.setTimestamp(parameterIndex, x); + } + + @Override + public void clearWarnings() throws SQLException { + ps.clearWarnings(); + } + + @Override + public void setCursorName(String name) throws SQLException { + ps.setCursorName(name); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + ps.setAsciiStream(parameterIndex, x, length); + } + + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + ps.setUnicodeStream(parameterIndex, x, length); + } + + @Override + public boolean execute(String sql) throws SQLException { + return ps.execute(sql); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + ps.setBinaryStream(parameterIndex, x, length); + } + + @Override + public ResultSet getResultSet() throws SQLException { + return ps.getResultSet(); + } + + @Override + public int getUpdateCount() throws SQLException { + return ps.getUpdateCount(); + } + + @Override + public void clearParameters() throws SQLException { + ps.clearParameters(); + } + + @Override + public boolean getMoreResults() throws SQLException { + return ps.getMoreResults(); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + ps.setObject(parameterIndex, x, targetSqlType); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + ps.setFetchDirection(direction); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + ps.setObject(parameterIndex, x); + } + + @Override + public int getFetchDirection() throws SQLException { + return ps.getFetchDirection(); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + ps.setFetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + return ps.getFetchSize(); + } + + @Override + public boolean execute() throws SQLException { + return ps.execute(); + } + + @Override + public int getResultSetConcurrency() throws SQLException { + return ps.getResultSetConcurrency(); + } + + @Override + public int getResultSetType() throws SQLException { + return ps.getResultSetType(); + } + + @Override + public void addBatch(String sql) throws SQLException { + ps.addBatch(sql); + } + + @Override + public void addBatch() throws SQLException { + ps.addBatch(); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) + throws SQLException { + ps.setCharacterStream(parameterIndex, reader, length); + } + + @Override + public void clearBatch() throws SQLException { + ps.clearBatch(); + } + + @Override + public int[] executeBatch() throws SQLException { + return ps.executeBatch(); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + ps.setRef(parameterIndex, x); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + ps.setBlob(parameterIndex, x); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + ps.setClob(parameterIndex, x); + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + ps.setArray(parameterIndex, x); + } + + @Override + public Connection getConnection() throws SQLException { + return ps.getConnection(); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return ps.getMetaData(); + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + ps.setDate(parameterIndex, x, cal); + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + return ps.getMoreResults(current); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + ps.setTime(parameterIndex, x, cal); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return ps.getGeneratedKeys(); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + ps.setTimestamp(parameterIndex, x, cal); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + return ps.executeUpdate(sql, autoGeneratedKeys); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + ps.setNull(parameterIndex, sqlType, typeName); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + return ps.executeUpdate(sql, columnIndexes); + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + ps.setURL(parameterIndex, x); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + return ps.getParameterMetaData(); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + ps.setRowId(parameterIndex, x); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + return ps.executeUpdate(sql, columnNames); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + ps.setNString(parameterIndex, value); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) + throws SQLException { + ps.setNCharacterStream(parameterIndex, value, length); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + return ps.execute(sql, autoGeneratedKeys); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + ps.setNClob(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + ps.setClob(parameterIndex, reader, length); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + return ps.execute(sql, columnIndexes); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) + throws SQLException { + ps.setBlob(parameterIndex, inputStream, length); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + ps.setNClob(parameterIndex, reader, length); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return ps.execute(sql, columnNames); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + ps.setSQLXML(parameterIndex, xmlObject); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) + throws SQLException { + ps.setObject(parameterIndex, x, targetSqlType, scaleOrLength); + } + + @Override + public int getResultSetHoldability() throws SQLException { + return ps.getResultSetHoldability(); + } + + @Override + public boolean isClosed() throws SQLException { + return ps.isClosed(); + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + ps.setPoolable(poolable); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + ps.setAsciiStream(parameterIndex, x, length); + } + + @Override + public boolean isPoolable() throws SQLException { + return ps.isPoolable(); + } + + @Override + public void closeOnCompletion() throws SQLException { + ps.closeOnCompletion(); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + ps.setBinaryStream(parameterIndex, x, length); + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return ps.isCloseOnCompletion(); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) + throws SQLException { + ps.setCharacterStream(parameterIndex, reader, length); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + ps.setAsciiStream(parameterIndex, x); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + ps.setBinaryStream(parameterIndex, x); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + ps.setCharacterStream(parameterIndex, reader); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + ps.setNCharacterStream(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + ps.setClob(parameterIndex, reader); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + ps.setBlob(parameterIndex, inputStream); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + ps.setNClob(parameterIndex, reader); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateResultSet.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateResultSet.java index 617943cc62c..89f8e37a25f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateResultSet.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateResultSet.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.jdbc; @@ -34,975 +41,973 @@ public class DelegateResultSet implements ResultSet { - protected ResultSet rs; - - public DelegateResultSet(ResultSet rs) { - this.rs = rs; - } - - @Override - public T unwrap(Class iface) throws SQLException { - return rs.unwrap(iface); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return rs.isWrapperFor(iface); - } - - @Override - public boolean next() throws SQLException { - return rs.next(); - } - - @Override - public void close() throws SQLException { - rs.close(); - } - - @Override - public boolean wasNull() throws SQLException { - return rs.wasNull(); - } - - @Override - public String getString(int columnIndex) throws SQLException { - return rs.getString(columnIndex); - } - - @Override - public boolean getBoolean(int columnIndex) throws SQLException { - return rs.getBoolean(columnIndex); - } - - @Override - public byte getByte(int columnIndex) throws SQLException { - return rs.getByte(columnIndex); - } - - @Override - public short getShort(int columnIndex) throws SQLException { - return rs.getShort(columnIndex); - } - - @Override - public int getInt(int columnIndex) throws SQLException { - return rs.getInt(columnIndex); - } - - @Override - public long getLong(int columnIndex) throws SQLException { - return rs.getLong(columnIndex); - } - - @Override - public float getFloat(int columnIndex) throws SQLException { - return rs.getFloat(columnIndex); - } - - @Override - public double getDouble(int columnIndex) throws SQLException { - return rs.getDouble(columnIndex); - } - - @Override - public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - return rs.getBigDecimal(columnIndex, scale); - } - - @Override - public byte[] getBytes(int columnIndex) throws SQLException { - return rs.getBytes(columnIndex); - } - - @Override - public Date getDate(int columnIndex) throws SQLException { - return rs.getDate(columnIndex); - } - - @Override - public Time getTime(int columnIndex) throws SQLException { - return rs.getTime(columnIndex); - } - - @Override - public Timestamp getTimestamp(int columnIndex) throws SQLException { - return rs.getTimestamp(columnIndex); - } - - @Override - public InputStream getAsciiStream(int columnIndex) throws SQLException { - return rs.getAsciiStream(columnIndex); - } - - @Override - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - return rs.getUnicodeStream(columnIndex); - } - - @Override - public InputStream getBinaryStream(int columnIndex) throws SQLException { - return rs.getBinaryStream(columnIndex); - } - - @Override - public String getString(String columnLabel) throws SQLException { - return rs.getString(columnLabel); - } - - @Override - public boolean getBoolean(String columnLabel) throws SQLException { - return rs.getBoolean(columnLabel); - } - - @Override - public byte getByte(String columnLabel) throws SQLException { - return rs.getByte(columnLabel); - } - - @Override - public short getShort(String columnLabel) throws SQLException { - return rs.getShort(columnLabel); - } - - @Override - public int getInt(String columnLabel) throws SQLException { - return rs.getInt(columnLabel); - } - - @Override - public long getLong(String columnLabel) throws SQLException { - return rs.getLong(columnLabel); - } - - @Override - public float getFloat(String columnLabel) throws SQLException { - return rs.getFloat(columnLabel); - } - - @Override - public double getDouble(String columnLabel) throws SQLException { - return rs.getDouble(columnLabel); - } - - @Override - public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return rs.getBigDecimal(columnLabel, scale); - } - - @Override - public byte[] getBytes(String columnLabel) throws SQLException { - return rs.getBytes(columnLabel); - } - - @Override - public Date getDate(String columnLabel) throws SQLException { - return rs.getDate(columnLabel); - } - - @Override - public Time getTime(String columnLabel) throws SQLException { - return rs.getTime(columnLabel); - } - - @Override - public Timestamp getTimestamp(String columnLabel) throws SQLException { - return rs.getTimestamp(columnLabel); - } - - @Override - public InputStream getAsciiStream(String columnLabel) throws SQLException { - return rs.getAsciiStream(columnLabel); - } - - @Override - public InputStream getUnicodeStream(String columnLabel) throws SQLException { - return rs.getUnicodeStream(columnLabel); - } - - @Override - public InputStream getBinaryStream(String columnLabel) throws SQLException { - return rs.getBinaryStream(columnLabel); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return rs.getWarnings(); - } - - @Override - public void clearWarnings() throws SQLException { - rs.clearWarnings(); - } - - @Override - public String getCursorName() throws SQLException { - return rs.getCursorName(); - } - - @Override - public ResultSetMetaData getMetaData() throws SQLException { - return rs.getMetaData(); - } - - @Override - public Object getObject(int columnIndex) throws SQLException { - return rs.getObject(columnIndex); - } - - @Override - public Object getObject(String columnLabel) throws SQLException { - return rs.getObject(columnLabel); - } - - @Override - public int findColumn(String columnLabel) throws SQLException { - return rs.findColumn(columnLabel); - } - - @Override - public Reader getCharacterStream(int columnIndex) throws SQLException { - return rs.getCharacterStream(columnIndex); - } - - @Override - public Reader getCharacterStream(String columnLabel) throws SQLException { - return rs.getCharacterStream(columnLabel); - } - - @Override - public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - return rs.getBigDecimal(columnIndex); - } - - @Override - public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - return rs.getBigDecimal(columnLabel); - } - - @Override - public boolean isBeforeFirst() throws SQLException { - return rs.isBeforeFirst(); - } - - @Override - public boolean isAfterLast() throws SQLException { - return rs.isAfterLast(); - } - - @Override - public boolean isFirst() throws SQLException { - return rs.isFirst(); - } - - @Override - public boolean isLast() throws SQLException { - return rs.isLast(); - } - - @Override - public void beforeFirst() throws SQLException { - rs.beforeFirst(); - } - - @Override - public void afterLast() throws SQLException { - rs.afterLast(); - } - - @Override - public boolean first() throws SQLException { - return rs.first(); - } - - @Override - public boolean last() throws SQLException { - return rs.last(); - } - - @Override - public int getRow() throws SQLException { - return rs.getRow(); - } - - @Override - public boolean absolute(int row) throws SQLException { - return rs.absolute(row); - } - - @Override - public boolean relative(int rows) throws SQLException { - return rs.relative(rows); - } - - @Override - public boolean previous() throws SQLException { - return rs.previous(); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - rs.setFetchDirection(direction); - } - - @Override - public int getFetchDirection() throws SQLException { - return rs.getFetchDirection(); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - rs.setFetchSize(rows); - } - - @Override - public int getFetchSize() throws SQLException { - return rs.getFetchSize(); - } - - @Override - public int getType() throws SQLException { - return rs.getType(); - } - - @Override - public int getConcurrency() throws SQLException { - return rs.getConcurrency(); - } - - @Override - public boolean rowUpdated() throws SQLException { - return rs.rowUpdated(); - } - - @Override - public boolean rowInserted() throws SQLException { - return rs.rowInserted(); - } - - @Override - public boolean rowDeleted() throws SQLException { - return rs.rowDeleted(); - } - - @Override - public void updateNull(int columnIndex) throws SQLException { - rs.updateNull(columnIndex); - } - - @Override - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - rs.updateBoolean(columnIndex, x); - } - - @Override - public void updateByte(int columnIndex, byte x) throws SQLException { - rs.updateByte(columnIndex, x); - } - - @Override - public void updateShort(int columnIndex, short x) throws SQLException { - rs.updateShort(columnIndex, x); - } - - @Override - public void updateInt(int columnIndex, int x) throws SQLException { - rs.updateInt(columnIndex, x); - } - - @Override - public void updateLong(int columnIndex, long x) throws SQLException { - rs.updateLong(columnIndex, x); - } - - @Override - public void updateFloat(int columnIndex, float x) throws SQLException { - rs.updateFloat(columnIndex, x); - } - - @Override - public void updateDouble(int columnIndex, double x) throws SQLException { - rs.updateDouble(columnIndex, x); - } - - @Override - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - rs.updateBigDecimal(columnIndex, x); - } - - @Override - public void updateString(int columnIndex, String x) throws SQLException { - rs.updateString(columnIndex, x); - } - - @Override - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - rs.updateBytes(columnIndex, x); - } - - @Override - public void updateDate(int columnIndex, Date x) throws SQLException { - rs.updateDate(columnIndex, x); - } - - @Override - public void updateTime(int columnIndex, Time x) throws SQLException { - rs.updateTime(columnIndex, x); - } - - @Override - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - rs.updateTimestamp(columnIndex, x); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - rs.updateAsciiStream(columnIndex, x, length); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - rs.updateBinaryStream(columnIndex, x, length); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - rs.updateCharacterStream(columnIndex, x, length); - } - - @Override - public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - rs.updateObject(columnIndex, x, scaleOrLength); - } - - @Override - public void updateObject(int columnIndex, Object x) throws SQLException { - rs.updateObject(columnIndex, x); - } - - @Override - public void updateNull(String columnLabel) throws SQLException { - rs.updateNull(columnLabel); - } - - @Override - public void updateBoolean(String columnLabel, boolean x) throws SQLException { - rs.updateBoolean(columnLabel, x); - } - - @Override - public void updateByte(String columnLabel, byte x) throws SQLException { - rs.updateByte(columnLabel, x); - } - - @Override - public void updateShort(String columnLabel, short x) throws SQLException { - rs.updateShort(columnLabel, x); - } - - @Override - public void updateInt(String columnLabel, int x) throws SQLException { - rs.updateInt(columnLabel, x); - } - - @Override - public void updateLong(String columnLabel, long x) throws SQLException { - rs.updateLong(columnLabel, x); - } - - @Override - public void updateFloat(String columnLabel, float x) throws SQLException { - rs.updateFloat(columnLabel, x); - } - - @Override - public void updateDouble(String columnLabel, double x) throws SQLException { - rs.updateDouble(columnLabel, x); - } - - @Override - public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - rs.updateBigDecimal(columnLabel, x); - } - - @Override - public void updateString(String columnLabel, String x) throws SQLException { - rs.updateString(columnLabel, x); - } - - @Override - public void updateBytes(String columnLabel, byte[] x) throws SQLException { - rs.updateBytes(columnLabel, x); - } - - @Override - public void updateDate(String columnLabel, Date x) throws SQLException { - rs.updateDate(columnLabel, x); - } - - @Override - public void updateTime(String columnLabel, Time x) throws SQLException { - rs.updateTime(columnLabel, x); - } - - @Override - public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - rs.updateTimestamp(columnLabel, x); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, int length) - throws SQLException { - rs.updateAsciiStream(columnLabel, x, length); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, int length) - throws SQLException { - rs.updateBinaryStream(columnLabel, x, length); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, int length) - throws SQLException { - rs.updateCharacterStream(columnLabel, reader, length); - } - - @Override - public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - rs.updateObject(columnLabel, x, scaleOrLength); - } - - @Override - public void updateObject(String columnLabel, Object x) throws SQLException { - rs.updateObject(columnLabel, x); - } - - @Override - public void insertRow() throws SQLException { - rs.insertRow(); - } - - @Override - public void updateRow() throws SQLException { - rs.updateRow(); - } - - @Override - public void deleteRow() throws SQLException { - rs.deleteRow(); - } - - @Override - public void refreshRow() throws SQLException { - rs.refreshRow(); - } - - @Override - public void cancelRowUpdates() throws SQLException { - rs.cancelRowUpdates(); - } - - @Override - public void moveToInsertRow() throws SQLException { - rs.moveToInsertRow(); - } - - @Override - public void moveToCurrentRow() throws SQLException { - rs.moveToCurrentRow(); - } - - @Override - public Statement getStatement() throws SQLException { - return rs.getStatement(); - } - - @Override - public Object getObject(int columnIndex, Map> map) throws SQLException { - return rs.getObject(columnIndex, map); - } - - @Override - public Ref getRef(int columnIndex) throws SQLException { - return rs.getRef(columnIndex); - } - - @Override - public Blob getBlob(int columnIndex) throws SQLException { - return rs.getBlob(columnIndex); - } - - @Override - public Clob getClob(int columnIndex) throws SQLException { - return rs.getClob(columnIndex); - } - - @Override - public Array getArray(int columnIndex) throws SQLException { - return rs.getArray(columnIndex); - } - - @Override - public Object getObject(String columnLabel, Map> map) throws SQLException { - return rs.getObject(columnLabel, map); - } - - @Override - public Ref getRef(String columnLabel) throws SQLException { - return rs.getRef(columnLabel); - } - - @Override - public Blob getBlob(String columnLabel) throws SQLException { - return rs.getBlob(columnLabel); - } - - @Override - public Clob getClob(String columnLabel) throws SQLException { - return rs.getClob(columnLabel); - } - - @Override - public Array getArray(String columnLabel) throws SQLException { - return rs.getArray(columnLabel); - } - - @Override - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - return rs.getDate(columnIndex, cal); - } - - @Override - public Date getDate(String columnLabel, Calendar cal) throws SQLException { - return rs.getDate(columnLabel, cal); - } - - @Override - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - return rs.getTime(columnIndex, cal); - } - - @Override - public Time getTime(String columnLabel, Calendar cal) throws SQLException { - return rs.getTime(columnLabel, cal); - } - - @Override - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - return rs.getTimestamp(columnIndex, cal); - } - - @Override - public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - return rs.getTimestamp(columnLabel, cal); - } - - @Override - public URL getURL(int columnIndex) throws SQLException { - return rs.getURL(columnIndex); - } - - @Override - public URL getURL(String columnLabel) throws SQLException { - return rs.getURL(columnLabel); - } - - @Override - public void updateRef(int columnIndex, Ref x) throws SQLException { - rs.updateRef(columnIndex, x); - } - - @Override - public void updateRef(String columnLabel, Ref x) throws SQLException { - rs.updateRef(columnLabel, x); - } - - @Override - public void updateBlob(int columnIndex, Blob x) throws SQLException { - rs.updateBlob(columnIndex, x); - } - - @Override - public void updateBlob(String columnLabel, Blob x) throws SQLException { - rs.updateBlob(columnLabel, x); - } - - @Override - public void updateClob(int columnIndex, Clob x) throws SQLException { - rs.updateClob(columnIndex, x); - } - - @Override - public void updateClob(String columnLabel, Clob x) throws SQLException { - rs.updateClob(columnLabel, x); - } - - @Override - public void updateArray(int columnIndex, Array x) throws SQLException { - rs.updateArray(columnIndex, x); - } - - @Override - public void updateArray(String columnLabel, Array x) throws SQLException { - rs.updateArray(columnLabel, x); - } - - @Override - public RowId getRowId(int columnIndex) throws SQLException { - return rs.getRowId(columnIndex); - } - - @Override - public RowId getRowId(String columnLabel) throws SQLException { - return rs.getRowId(columnLabel); - } - - @Override - public void updateRowId(int columnIndex, RowId x) throws SQLException { - rs.updateRowId(columnIndex, x); - } - - @Override - public void updateRowId(String columnLabel, RowId x) throws SQLException { - rs.updateRowId(columnLabel, x); - } - - @Override - public int getHoldability() throws SQLException { - return rs.getHoldability(); - } - - @Override - public boolean isClosed() throws SQLException { - return rs.isClosed(); - } - - @Override - public void updateNString(int columnIndex, String nString) throws SQLException { - rs.updateNString(columnIndex, nString); - } - - @Override - public void updateNString(String columnLabel, String nString) throws SQLException { - rs.updateNString(columnLabel, nString); - } - - @Override - public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - rs.updateNClob(columnIndex, nClob); - } - - @Override - public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - rs.updateNClob(columnLabel, nClob); - } - - @Override - public NClob getNClob(int columnIndex) throws SQLException { - return rs.getNClob(columnIndex); - } - - @Override - public NClob getNClob(String columnLabel) throws SQLException { - return rs.getNClob(columnLabel); - } - - @Override - public SQLXML getSQLXML(int columnIndex) throws SQLException { - return rs.getSQLXML(columnIndex); - } - - @Override - public SQLXML getSQLXML(String columnLabel) throws SQLException { - return rs.getSQLXML(columnLabel); - } - - @Override - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - rs.updateSQLXML(columnIndex, xmlObject); - } - - @Override - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - rs.updateSQLXML(columnLabel, xmlObject); - } - - @Override - public String getNString(int columnIndex) throws SQLException { - return rs.getNString(columnIndex); - } - - @Override - public String getNString(String columnLabel) throws SQLException { - return rs.getNString(columnLabel); - } - - @Override - public Reader getNCharacterStream(int columnIndex) throws SQLException { - return rs.getNCharacterStream(columnIndex); - } - - @Override - public Reader getNCharacterStream(String columnLabel) throws SQLException { - return rs.getNCharacterStream(columnLabel); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - rs.updateNCharacterStream(columnIndex, x, length); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader, long length) - throws SQLException { - rs.updateNCharacterStream(columnLabel, reader, length); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - rs.updateAsciiStream(columnIndex, x, length); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, long length) - throws SQLException { - rs.updateBinaryStream(columnIndex, x, length); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - rs.updateCharacterStream(columnIndex, x, length); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, long length) - throws SQLException { - rs.updateAsciiStream(columnLabel, x, length); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, long length) - throws SQLException { - rs.updateBinaryStream(columnLabel, x, length); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, long length) - throws SQLException { - rs.updateCharacterStream(columnLabel, reader, length); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream, long length) - throws SQLException { - rs.updateBlob(columnIndex, inputStream, length); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream, long length) - throws SQLException { - rs.updateBlob(columnLabel, inputStream, length); - } - - @Override - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - rs.updateClob(columnIndex, reader, length); - } - - @Override - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - rs.updateClob(columnLabel, reader, length); - } - - @Override - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - rs.updateNClob(columnIndex, reader, length); - } - - @Override - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - rs.updateNClob(columnLabel, reader, length); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - rs.updateNCharacterStream(columnIndex, x); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - rs.updateNCharacterStream(columnLabel, reader); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - rs.updateAsciiStream(columnIndex, x); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - rs.updateBinaryStream(columnIndex, x); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - rs.updateCharacterStream(columnIndex, x); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - rs.updateAsciiStream(columnLabel, x); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - rs.updateBinaryStream(columnLabel, x); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - rs.updateCharacterStream(columnLabel, reader); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - rs.updateBlob(columnIndex, inputStream); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - rs.updateBlob(columnLabel, inputStream); - } - - @Override - public void updateClob(int columnIndex, Reader reader) throws SQLException { - rs.updateClob(columnIndex, reader); - } - - @Override - public void updateClob(String columnLabel, Reader reader) throws SQLException { - rs.updateClob(columnLabel, reader); - } - - @Override - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - rs.updateNClob(columnIndex, reader); - } - - @Override - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - rs.updateNClob(columnLabel, reader); - } - - @Override - public T getObject(int columnIndex, Class type) throws SQLException { - return rs.getObject(columnIndex, type); - } - - @Override - public T getObject(String columnLabel, Class type) throws SQLException { - return rs.getObject(columnLabel, type); - } + protected ResultSet rs; + + public DelegateResultSet(ResultSet rs) { + this.rs = rs; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return rs.unwrap(iface); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return rs.isWrapperFor(iface); + } + + @Override + public boolean next() throws SQLException { + return rs.next(); + } + + @Override + public void close() throws SQLException { + rs.close(); + } + + @Override + public boolean wasNull() throws SQLException { + return rs.wasNull(); + } + + @Override + public String getString(int columnIndex) throws SQLException { + return rs.getString(columnIndex); + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + return rs.getBoolean(columnIndex); + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + return rs.getByte(columnIndex); + } + + @Override + public short getShort(int columnIndex) throws SQLException { + return rs.getShort(columnIndex); + } + + @Override + public int getInt(int columnIndex) throws SQLException { + return rs.getInt(columnIndex); + } + + @Override + public long getLong(int columnIndex) throws SQLException { + return rs.getLong(columnIndex); + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + return rs.getFloat(columnIndex); + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + return rs.getDouble(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + return rs.getBigDecimal(columnIndex, scale); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + return rs.getBytes(columnIndex); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + return rs.getDate(columnIndex); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + return rs.getTime(columnIndex); + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + return rs.getTimestamp(columnIndex); + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + return rs.getAsciiStream(columnIndex); + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + return rs.getUnicodeStream(columnIndex); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + return rs.getBinaryStream(columnIndex); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return rs.getString(columnLabel); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return rs.getBoolean(columnLabel); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return rs.getByte(columnLabel); + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return rs.getShort(columnLabel); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return rs.getInt(columnLabel); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return rs.getLong(columnLabel); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + return rs.getFloat(columnLabel); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return rs.getDouble(columnLabel); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + return rs.getBigDecimal(columnLabel, scale); + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return rs.getBytes(columnLabel); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return rs.getDate(columnLabel); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return rs.getTime(columnLabel); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return rs.getTimestamp(columnLabel); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + return rs.getAsciiStream(columnLabel); + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + return rs.getUnicodeStream(columnLabel); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + return rs.getBinaryStream(columnLabel); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return rs.getWarnings(); + } + + @Override + public void clearWarnings() throws SQLException { + rs.clearWarnings(); + } + + @Override + public String getCursorName() throws SQLException { + return rs.getCursorName(); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return rs.getMetaData(); + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + return rs.getObject(columnIndex); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + return rs.getObject(columnLabel); + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + return rs.findColumn(columnLabel); + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + return rs.getCharacterStream(columnIndex); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + return rs.getCharacterStream(columnLabel); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + return rs.getBigDecimal(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + return rs.getBigDecimal(columnLabel); + } + + @Override + public boolean isBeforeFirst() throws SQLException { + return rs.isBeforeFirst(); + } + + @Override + public boolean isAfterLast() throws SQLException { + return rs.isAfterLast(); + } + + @Override + public boolean isFirst() throws SQLException { + return rs.isFirst(); + } + + @Override + public boolean isLast() throws SQLException { + return rs.isLast(); + } + + @Override + public void beforeFirst() throws SQLException { + rs.beforeFirst(); + } + + @Override + public void afterLast() throws SQLException { + rs.afterLast(); + } + + @Override + public boolean first() throws SQLException { + return rs.first(); + } + + @Override + public boolean last() throws SQLException { + return rs.last(); + } + + @Override + public int getRow() throws SQLException { + return rs.getRow(); + } + + @Override + public boolean absolute(int row) throws SQLException { + return rs.absolute(row); + } + + @Override + public boolean relative(int rows) throws SQLException { + return rs.relative(rows); + } + + @Override + public boolean previous() throws SQLException { + return rs.previous(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + rs.setFetchDirection(direction); + } + + @Override + public int getFetchDirection() throws SQLException { + return rs.getFetchDirection(); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + rs.setFetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + return rs.getFetchSize(); + } + + @Override + public int getType() throws SQLException { + return rs.getType(); + } + + @Override + public int getConcurrency() throws SQLException { + return rs.getConcurrency(); + } + + @Override + public boolean rowUpdated() throws SQLException { + return rs.rowUpdated(); + } + + @Override + public boolean rowInserted() throws SQLException { + return rs.rowInserted(); + } + + @Override + public boolean rowDeleted() throws SQLException { + return rs.rowDeleted(); + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + rs.updateNull(columnIndex); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + rs.updateBoolean(columnIndex, x); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + rs.updateByte(columnIndex, x); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + rs.updateShort(columnIndex, x); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + rs.updateInt(columnIndex, x); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + rs.updateLong(columnIndex, x); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + rs.updateFloat(columnIndex, x); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + rs.updateDouble(columnIndex, x); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + rs.updateBigDecimal(columnIndex, x); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + rs.updateString(columnIndex, x); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + rs.updateBytes(columnIndex, x); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + rs.updateDate(columnIndex, x); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + rs.updateTime(columnIndex, x); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + rs.updateTimestamp(columnIndex, x); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + rs.updateAsciiStream(columnIndex, x, length); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + rs.updateBinaryStream(columnIndex, x, length); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + rs.updateCharacterStream(columnIndex, x, length); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + rs.updateObject(columnIndex, x, scaleOrLength); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + rs.updateObject(columnIndex, x); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + rs.updateNull(columnLabel); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + rs.updateBoolean(columnLabel, x); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + rs.updateByte(columnLabel, x); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + rs.updateShort(columnLabel, x); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + rs.updateInt(columnLabel, x); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + rs.updateLong(columnLabel, x); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + rs.updateFloat(columnLabel, x); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + rs.updateDouble(columnLabel, x); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + rs.updateBigDecimal(columnLabel, x); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + rs.updateString(columnLabel, x); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + rs.updateBytes(columnLabel, x); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + rs.updateDate(columnLabel, x); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + rs.updateTime(columnLabel, x); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + rs.updateTimestamp(columnLabel, x); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + rs.updateAsciiStream(columnLabel, x, length); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) + throws SQLException { + rs.updateBinaryStream(columnLabel, x, length); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) + throws SQLException { + rs.updateCharacterStream(columnLabel, reader, length); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + rs.updateObject(columnLabel, x, scaleOrLength); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + rs.updateObject(columnLabel, x); + } + + @Override + public void insertRow() throws SQLException { + rs.insertRow(); + } + + @Override + public void updateRow() throws SQLException { + rs.updateRow(); + } + + @Override + public void deleteRow() throws SQLException { + rs.deleteRow(); + } + + @Override + public void refreshRow() throws SQLException { + rs.refreshRow(); + } + + @Override + public void cancelRowUpdates() throws SQLException { + rs.cancelRowUpdates(); + } + + @Override + public void moveToInsertRow() throws SQLException { + rs.moveToInsertRow(); + } + + @Override + public void moveToCurrentRow() throws SQLException { + rs.moveToCurrentRow(); + } + + @Override + public Statement getStatement() throws SQLException { + return rs.getStatement(); + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + return rs.getObject(columnIndex, map); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + return rs.getRef(columnIndex); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + return rs.getBlob(columnIndex); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + return rs.getClob(columnIndex); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + return rs.getArray(columnIndex); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + return rs.getObject(columnLabel, map); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + return rs.getRef(columnLabel); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + return rs.getBlob(columnLabel); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + return rs.getClob(columnLabel); + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + return rs.getArray(columnLabel); + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + return rs.getDate(columnIndex, cal); + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + return rs.getDate(columnLabel, cal); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + return rs.getTime(columnIndex, cal); + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + return rs.getTime(columnLabel, cal); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + return rs.getTimestamp(columnIndex, cal); + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + return rs.getTimestamp(columnLabel, cal); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + return rs.getURL(columnIndex); + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + return rs.getURL(columnLabel); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + rs.updateRef(columnIndex, x); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + rs.updateRef(columnLabel, x); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + rs.updateBlob(columnIndex, x); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + rs.updateBlob(columnLabel, x); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + rs.updateClob(columnIndex, x); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + rs.updateClob(columnLabel, x); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + rs.updateArray(columnIndex, x); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + rs.updateArray(columnLabel, x); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + return rs.getRowId(columnIndex); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + return rs.getRowId(columnLabel); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + rs.updateRowId(columnIndex, x); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + rs.updateRowId(columnLabel, x); + } + + @Override + public int getHoldability() throws SQLException { + return rs.getHoldability(); + } + + @Override + public boolean isClosed() throws SQLException { + return rs.isClosed(); + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + rs.updateNString(columnIndex, nString); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + rs.updateNString(columnLabel, nString); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + rs.updateNClob(columnIndex, nClob); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + rs.updateNClob(columnLabel, nClob); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + return rs.getNClob(columnIndex); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + return rs.getNClob(columnLabel); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + return rs.getSQLXML(columnIndex); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + return rs.getSQLXML(columnLabel); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + rs.updateSQLXML(columnIndex, xmlObject); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + rs.updateSQLXML(columnLabel, xmlObject); + } + + @Override + public String getNString(int columnIndex) throws SQLException { + return rs.getNString(columnIndex); + } + + @Override + public String getNString(String columnLabel) throws SQLException { + return rs.getNString(columnLabel); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + return rs.getNCharacterStream(columnIndex); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + return rs.getNCharacterStream(columnLabel); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + rs.updateNCharacterStream(columnIndex, x, length); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) + throws SQLException { + rs.updateNCharacterStream(columnLabel, reader, length); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + rs.updateAsciiStream(columnIndex, x, length); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + rs.updateBinaryStream(columnIndex, x, length); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + rs.updateCharacterStream(columnIndex, x, length); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) + throws SQLException { + rs.updateAsciiStream(columnLabel, x, length); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) + throws SQLException { + rs.updateBinaryStream(columnLabel, x, length); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) + throws SQLException { + rs.updateCharacterStream(columnLabel, reader, length); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) + throws SQLException { + rs.updateBlob(columnIndex, inputStream, length); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) + throws SQLException { + rs.updateBlob(columnLabel, inputStream, length); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + rs.updateClob(columnIndex, reader, length); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + rs.updateClob(columnLabel, reader, length); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + rs.updateNClob(columnIndex, reader, length); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + rs.updateNClob(columnLabel, reader, length); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + rs.updateNCharacterStream(columnIndex, x); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + rs.updateNCharacterStream(columnLabel, reader); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + rs.updateAsciiStream(columnIndex, x); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + rs.updateBinaryStream(columnIndex, x); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + rs.updateCharacterStream(columnIndex, x); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + rs.updateAsciiStream(columnLabel, x); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + rs.updateBinaryStream(columnLabel, x); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + rs.updateCharacterStream(columnLabel, reader); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + rs.updateBlob(columnIndex, inputStream); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + rs.updateBlob(columnLabel, inputStream); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + rs.updateClob(columnIndex, reader); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + rs.updateClob(columnLabel, reader); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + rs.updateNClob(columnIndex, reader); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + rs.updateNClob(columnLabel, reader); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + return rs.getObject(columnIndex, type); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + return rs.getObject(columnLabel, type); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateStatement.java index dfe2ba2dce2..9ccb40c7ed7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/DelegateStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,230 +25,230 @@ public class DelegateStatement implements Statement { - protected Statement stmt; - - public DelegateStatement(Statement stmt) { - this.stmt = stmt; - } - - @Override - public T unwrap(Class iface) throws SQLException { - return stmt.unwrap(iface); - } - - @Override - public ResultSet executeQuery(String sql) throws SQLException { - return stmt.executeQuery(sql); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return stmt.isWrapperFor(iface); - } - - @Override - public int executeUpdate(String sql) throws SQLException { - return stmt.executeUpdate(sql); - } - - @Override - public void close() throws SQLException { - stmt.close(); - } - - @Override - public int getMaxFieldSize() throws SQLException { - return stmt.getMaxFieldSize(); - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - stmt.setMaxFieldSize(max); - } - - @Override - public int getMaxRows() throws SQLException { - return stmt.getMaxRows(); - } - - @Override - public void setMaxRows(int max) throws SQLException { - stmt.setMaxRows(max); - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - stmt.setEscapeProcessing(enable); - } - - @Override - public int getQueryTimeout() throws SQLException { - return stmt.getQueryTimeout(); - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - stmt.setQueryTimeout(seconds); - } - - @Override - public void cancel() throws SQLException { - stmt.cancel(); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return stmt.getWarnings(); - } - - @Override - public void clearWarnings() throws SQLException { - stmt.clearWarnings(); - } - - @Override - public void setCursorName(String name) throws SQLException { - stmt.setCursorName(name); - } - - @Override - public boolean execute(String sql) throws SQLException { - return stmt.execute(sql); - } - - @Override - public ResultSet getResultSet() throws SQLException { - return stmt.getResultSet(); - } - - @Override - public int getUpdateCount() throws SQLException { - return stmt.getUpdateCount(); - } - - @Override - public boolean getMoreResults() throws SQLException { - return stmt.getMoreResults(); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - stmt.setFetchDirection(direction); - } - - @Override - public int getFetchDirection() throws SQLException { - return stmt.getFetchDirection(); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - stmt.setFetchSize(rows); - } - - @Override - public int getFetchSize() throws SQLException { - return stmt.getFetchSize(); - } - - @Override - public int getResultSetConcurrency() throws SQLException { - return stmt.getResultSetConcurrency(); - } - - @Override - public int getResultSetType() throws SQLException { - return stmt.getResultSetType(); - } - - @Override - public void addBatch(String sql) throws SQLException { - stmt.addBatch(sql); - } - - @Override - public void clearBatch() throws SQLException { - stmt.clearBatch(); - } - - @Override - public int[] executeBatch() throws SQLException { - return stmt.executeBatch(); - } - - @Override - public Connection getConnection() throws SQLException { - return stmt.getConnection(); - } - - @Override - public boolean getMoreResults(int current) throws SQLException { - return stmt.getMoreResults(current); - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - return stmt.getGeneratedKeys(); - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - return stmt.executeUpdate(sql, autoGeneratedKeys); - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - return stmt.executeUpdate(sql, columnIndexes); - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - return stmt.executeUpdate(sql, columnNames); - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - return stmt.execute(sql, autoGeneratedKeys); - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - return stmt.execute(sql, columnIndexes); - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - return stmt.execute(sql, columnNames); - } - - @Override - public int getResultSetHoldability() throws SQLException { - return stmt.getResultSetHoldability(); - } - - @Override - public boolean isClosed() throws SQLException { - return stmt.isClosed(); - } - - @Override - public void setPoolable(boolean poolable) throws SQLException { - stmt.setPoolable(poolable); - } - - @Override - public boolean isPoolable() throws SQLException { - return stmt.isPoolable(); - } - - @Override - public void closeOnCompletion() throws SQLException { - stmt.closeOnCompletion(); - } - - @Override - public boolean isCloseOnCompletion() throws SQLException { - return stmt.isCloseOnCompletion(); - } + protected Statement stmt; + + public DelegateStatement(Statement stmt) { + this.stmt = stmt; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return stmt.unwrap(iface); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + return stmt.executeQuery(sql); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return stmt.isWrapperFor(iface); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + return stmt.executeUpdate(sql); + } + + @Override + public void close() throws SQLException { + stmt.close(); + } + + @Override + public int getMaxFieldSize() throws SQLException { + return stmt.getMaxFieldSize(); + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + stmt.setMaxFieldSize(max); + } + + @Override + public int getMaxRows() throws SQLException { + return stmt.getMaxRows(); + } + + @Override + public void setMaxRows(int max) throws SQLException { + stmt.setMaxRows(max); + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + stmt.setEscapeProcessing(enable); + } + + @Override + public int getQueryTimeout() throws SQLException { + return stmt.getQueryTimeout(); + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + stmt.setQueryTimeout(seconds); + } + + @Override + public void cancel() throws SQLException { + stmt.cancel(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return stmt.getWarnings(); + } + + @Override + public void clearWarnings() throws SQLException { + stmt.clearWarnings(); + } + + @Override + public void setCursorName(String name) throws SQLException { + stmt.setCursorName(name); + } + + @Override + public boolean execute(String sql) throws SQLException { + return stmt.execute(sql); + } + + @Override + public ResultSet getResultSet() throws SQLException { + return stmt.getResultSet(); + } + + @Override + public int getUpdateCount() throws SQLException { + return stmt.getUpdateCount(); + } + + @Override + public boolean getMoreResults() throws SQLException { + return stmt.getMoreResults(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + stmt.setFetchDirection(direction); + } + + @Override + public int getFetchDirection() throws SQLException { + return stmt.getFetchDirection(); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + stmt.setFetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + return stmt.getFetchSize(); + } + + @Override + public int getResultSetConcurrency() throws SQLException { + return stmt.getResultSetConcurrency(); + } + + @Override + public int getResultSetType() throws SQLException { + return stmt.getResultSetType(); + } + + @Override + public void addBatch(String sql) throws SQLException { + stmt.addBatch(sql); + } + + @Override + public void clearBatch() throws SQLException { + stmt.clearBatch(); + } + + @Override + public int[] executeBatch() throws SQLException { + return stmt.executeBatch(); + } + + @Override + public Connection getConnection() throws SQLException { + return stmt.getConnection(); + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + return stmt.getMoreResults(current); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return stmt.getGeneratedKeys(); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + return stmt.executeUpdate(sql, autoGeneratedKeys); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + return stmt.executeUpdate(sql, columnIndexes); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + return stmt.executeUpdate(sql, columnNames); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + return stmt.execute(sql, autoGeneratedKeys); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + return stmt.execute(sql, columnIndexes); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return stmt.execute(sql, columnNames); + } + + @Override + public int getResultSetHoldability() throws SQLException { + return stmt.getResultSetHoldability(); + } + + @Override + public boolean isClosed() throws SQLException { + return stmt.isClosed(); + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + stmt.setPoolable(poolable); + } + + @Override + public boolean isPoolable() throws SQLException { + return stmt.isPoolable(); + } + + @Override + public void closeOnCompletion() throws SQLException { + stmt.closeOnCompletion(); + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return stmt.isCloseOnCompletion(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/FailoverPhoenixConnection.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/FailoverPhoenixConnection.java index 2ade5ef4de7..b6968b2b359 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/FailoverPhoenixConnection.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/FailoverPhoenixConnection.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,16 +17,6 @@ */ package org.apache.phoenix.jdbc; -import org.apache.phoenix.exception.FailoverSQLException; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.exception.SQLExceptionInfo; -import org.apache.phoenix.monitoring.MetricType; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.util.EnvironmentEdgeManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.sql.Array; import java.sql.Blob; import java.sql.CallableStatement; @@ -46,13 +36,23 @@ import java.util.Properties; import java.util.concurrent.Executor; +import org.apache.phoenix.exception.FailoverSQLException; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.exception.SQLExceptionInfo; +import org.apache.phoenix.monitoring.MetricType; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.util.EnvironmentEdgeManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * An implementation of JDBC connection which supports failover between two cluster in an HA group. *

* During its lifetime, a failover Phoenix connection could possibly connect to two HBase clusters - * in an HA group mutually exclusively. It wraps and delegates the logic to a PhoenixConnection - * object. At any given time, the wrapped connection should only talk to the ACTIVE HBase cluster - * in the HA group. + * in an HA group mutually exclusively. It wraps and delegates the logic to a PhoenixConnection + * object. At any given time, the wrapped connection should only talk to the ACTIVE HBase cluster in + * the HA group. *

* A failover connection will behave according to the given failover policy upon cluster role * failover, especially when the current connected HBase cluster becomes STANDBY role from ACTIVE. @@ -61,585 +61,565 @@ * still use this connection after closing. *

* This class is not thread safe. - * * @see HighAvailabilityGroup * @see FailoverPolicy */ public class FailoverPhoenixConnection implements PhoenixMonitoredConnection { - /** - * Failover timeout interval after which failover operation will fail and clients can retry. - */ - public static final String FAILOVER_TIMEOUT_MS_ATTR = "phoenix.ha.failover.timeout.ms"; - public static final long FAILOVER_TIMEOUT_MS_DEFAULT = 10_000; - private static final Logger LOG = LoggerFactory.getLogger(FailoverPhoenixConnection.class); - /** - * Connection properties. - */ - private final Properties properties; - /** - * High availability group. - */ - private final HighAvailabilityGroup haGroup; - /** - * Failover policy, per connection. - */ - private final FailoverPolicy policy; - - /** - * True iff this connection has been closed by the client. - */ - private boolean isClosed; - /** - * The wrapped PhoenixConnection object which could be re-assigned upon failover operation. - */ - private PhoenixConnection connection; - - /** - * Mutation metrics before failover to current connection. - */ - private Map> previousMutationMetrics = new HashMap<>(); - /** - * Read metrics before failover to current connection. - */ - private Map> previousReadMetrics = new HashMap<>(); - - public FailoverPhoenixConnection(HighAvailabilityGroup haGroup, Properties properties) - throws SQLException { - this.properties = properties; - this.haGroup = haGroup; - this.policy = FailoverPolicy.get(properties); - this.isClosed = false; - this.connection = haGroup.connectActive(properties); - } - - /** - * This is used for explicit failover request made by client. - *

- * It fails over to the current ACTIVE HBase cluster; if failover happens in between, this could - * possibly target this same cluster again. - *

- * - * @param conn if not of FailoverPhoenixConnection type, throw illegal argument exception - * @param timeoutMs timeout in milliseconds to failover to current active cluster - * @throws SQLException if fails to failover - */ - public static void failover(Connection conn, long timeoutMs) throws SQLException { - Preconditions.checkNotNull(conn, "Connection to failover must not be null!"); - FailoverPhoenixConnection failoverConnection = conn.unwrap(FailoverPhoenixConnection.class); - if (failoverConnection == null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) - .setMessage("Connection is not a valid FailoverPhoenixConnection object") - .build() - .buildException(); - } - failoverConnection.failover(timeoutMs); - } - - /** - * Helper method to merge two metrics map into one. - *

- * Shallow copy the first one, and deep copy the second one. - * An optimization is that, it will return the shallow directly if the deep is empty. - */ - private static Map> mergeMetricMaps( - Map> shallow, Map> deep) { - if (deep.isEmpty()) { - return shallow; - } - - Map> metrics = new HashMap<>(shallow); - deep.forEach((k, v) -> { - metrics.putIfAbsent(k, new HashMap<>()); - Map map = metrics.get(k); - v.forEach((kk, vv) -> { - Long value = map.getOrDefault(kk, 0L); - map.put(kk, value + vv); - }); - }); - return metrics; - } - - /** - * Failover this connection by switching underlying phoenix connection to the ACTIVE one. - *

- * If the current phoenix connection is already connecting to ACTIVE cluster, this is a no-op. - * - * @param timeoutMs timeout in ms waiting for a new connection to be established. - * @throws SQLException if fails to failover - */ - @VisibleForTesting - void failover(long timeoutMs) throws SQLException { - checkConnection(); - - if (haGroup.isActive(connection)) { - LOG.info("Connection {} is against ACTIVE cluster in HA group {}; skip failing over.", - connection.getURL(), haGroup.getGroupInfo().getName()); - return; - } - - PhoenixConnection newConn = null; - SQLException cause = null; - final long startTime = EnvironmentEdgeManager.currentTimeMillis(); - while (newConn == null && - EnvironmentEdgeManager.currentTimeMillis() < startTime + timeoutMs) { - try { - newConn = haGroup.connectActive(properties); - } catch (SQLException e) { - cause = e; - LOG.info("Got exception when trying to connect to active cluster.", e); - try { - Thread.sleep(100); // TODO: be smart than this - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - throw new SQLException("Got interrupted waiting for connection failover", e); - } - } - } - if (newConn == null) { - throw new FailoverSQLException("Can not failover connection", - haGroup.getGroupInfo().toString(), cause); - } - - final PhoenixConnection oldConn = connection; - connection = newConn; - if (oldConn != null) { - // aggregate metrics - previousMutationMetrics = oldConn.getMutationMetrics(); - previousReadMetrics = oldConn.getReadMetrics(); - oldConn.clearMetrics(); - - // close old connection - if (!oldConn.isClosed()) { - // TODO: what happens to in-flight edits/mutations? - // Can we copy into the new connection we do not allow this failover? - // MutationState state = oldConn.getMutationState(); - try { - oldConn.close(new SQLExceptionInfo - .Builder(SQLExceptionCode.HA_CLOSED_AFTER_FAILOVER) - .setMessage("Phoenix connection got closed due to failover") - .setHaGroupInfo(haGroup.getGroupInfo().toString()) - .build() - .buildException()); - } catch (SQLException e) { - LOG.error("Failed to close old connection after failover: {}", e.getMessage()); - LOG.info("Full stack when closing old connection after failover", e); - } - } - } - LOG.info("Connection {} failed over to {}", haGroup.getGroupInfo(), connection.getURL()); - } - - /** - * Connection can not be null before any operation. - *

- * Here when connection is non-null, we do not need to check if the wrapped connection is open. - * The reason is that each individual delegated call on the wrapped connection will internally - * check open itself, see {@link PhoenixConnection#checkOpen()}. - * - * @throws SQLException if current wrapped phoenix connection is not valid state - */ - private void checkConnection() throws SQLException { - if (isClosed) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CONNECTION_CLOSED) - .setHaGroupInfo(haGroup.getGroupInfo().toString()) - .build() - .buildException(); - } - if (connection == null) { - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) - .setMessage("Connection has not been established to ACTIVE HBase cluster") - .setHaGroupInfo(haGroup.getGroupInfo().toString()) - .build() - .buildException(); - } - } - - @Override - public void close() throws SQLException { - if (isClosed()) { - return; - } - + /** + * Failover timeout interval after which failover operation will fail and clients can retry. + */ + public static final String FAILOVER_TIMEOUT_MS_ATTR = "phoenix.ha.failover.timeout.ms"; + public static final long FAILOVER_TIMEOUT_MS_DEFAULT = 10_000; + private static final Logger LOG = LoggerFactory.getLogger(FailoverPhoenixConnection.class); + /** + * Connection properties. + */ + private final Properties properties; + /** + * High availability group. + */ + private final HighAvailabilityGroup haGroup; + /** + * Failover policy, per connection. + */ + private final FailoverPolicy policy; + + /** + * True iff this connection has been closed by the client. + */ + private boolean isClosed; + /** + * The wrapped PhoenixConnection object which could be re-assigned upon failover operation. + */ + private PhoenixConnection connection; + + /** + * Mutation metrics before failover to current connection. + */ + private Map> previousMutationMetrics = new HashMap<>(); + /** + * Read metrics before failover to current connection. + */ + private Map> previousReadMetrics = new HashMap<>(); + + public FailoverPhoenixConnection(HighAvailabilityGroup haGroup, Properties properties) + throws SQLException { + this.properties = properties; + this.haGroup = haGroup; + this.policy = FailoverPolicy.get(properties); + this.isClosed = false; + this.connection = haGroup.connectActive(properties); + } + + /** + * This is used for explicit failover request made by client. + *

+ * It fails over to the current ACTIVE HBase cluster; if failover happens in between, this could + * possibly target this same cluster again. + *

+ * @param conn if not of FailoverPhoenixConnection type, throw illegal argument exception + * @param timeoutMs timeout in milliseconds to failover to current active cluster + * @throws SQLException if fails to failover + */ + public static void failover(Connection conn, long timeoutMs) throws SQLException { + Preconditions.checkNotNull(conn, "Connection to failover must not be null!"); + FailoverPhoenixConnection failoverConnection = conn.unwrap(FailoverPhoenixConnection.class); + if (failoverConnection == null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) + .setMessage("Connection is not a valid FailoverPhoenixConnection object").build() + .buildException(); + } + failoverConnection.failover(timeoutMs); + } + + /** + * Helper method to merge two metrics map into one. + *

+ * Shallow copy the first one, and deep copy the second one. An optimization is that, it will + * return the shallow directly if the deep is empty. + */ + private static Map> mergeMetricMaps( + Map> shallow, Map> deep) { + if (deep.isEmpty()) { + return shallow; + } + + Map> metrics = new HashMap<>(shallow); + deep.forEach((k, v) -> { + metrics.putIfAbsent(k, new HashMap<>()); + Map map = metrics.get(k); + v.forEach((kk, vv) -> { + Long value = map.getOrDefault(kk, 0L); + map.put(kk, value + vv); + }); + }); + return metrics; + } + + /** + * Failover this connection by switching underlying phoenix connection to the ACTIVE one. + *

+ * If the current phoenix connection is already connecting to ACTIVE cluster, this is a no-op. + * @param timeoutMs timeout in ms waiting for a new connection to be established. + * @throws SQLException if fails to failover + */ + @VisibleForTesting + void failover(long timeoutMs) throws SQLException { + checkConnection(); + + if (haGroup.isActive(connection)) { + LOG.info("Connection {} is against ACTIVE cluster in HA group {}; skip failing over.", + connection.getURL(), haGroup.getGroupInfo().getName()); + return; + } + + PhoenixConnection newConn = null; + SQLException cause = null; + final long startTime = EnvironmentEdgeManager.currentTimeMillis(); + while (newConn == null && EnvironmentEdgeManager.currentTimeMillis() < startTime + timeoutMs) { + try { + newConn = haGroup.connectActive(properties); + } catch (SQLException e) { + cause = e; + LOG.info("Got exception when trying to connect to active cluster.", e); try { - connection.close(); - connection.clearMetrics(); - } finally { - previousMutationMetrics.clear(); - previousReadMetrics.clear(); - isClosed = true; + Thread.sleep(100); // TODO: be smart than this + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new SQLException("Got interrupted waiting for connection failover", e); } - } - - @Override - public boolean isClosed() { - return isClosed; - } - - //// metrics for monitoring methods - - @SuppressWarnings("unchecked") - @Override - public T unwrap(Class iface) throws SQLException { - if (!iface.isInstance(this)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) - .setMessage(getClass().getName() + " not unwrappable from " + iface.getName()) - .build() - .buildException(); - } - return (T) this; - } - - @Override - public Map> getMutationMetrics() { - return mergeMetricMaps(connection.getMutationMetrics(), previousMutationMetrics); - } - - @Override - public Map> getReadMetrics() { - return mergeMetricMaps(connection.getReadMetrics(), previousReadMetrics); - } - - @Override - public boolean isRequestLevelMetricsEnabled() { - return connection != null && connection.isRequestLevelMetricsEnabled(); - } - - @Override - public void clearMetrics() { - previousMutationMetrics.clear(); - previousReadMetrics.clear(); - if (connection != null) { - connection.clearMetrics(); + } + } + if (newConn == null) { + throw new FailoverSQLException("Can not failover connection", + haGroup.getGroupInfo().toString(), cause); + } + + final PhoenixConnection oldConn = connection; + connection = newConn; + if (oldConn != null) { + // aggregate metrics + previousMutationMetrics = oldConn.getMutationMetrics(); + previousReadMetrics = oldConn.getReadMetrics(); + oldConn.clearMetrics(); + + // close old connection + if (!oldConn.isClosed()) { + // TODO: what happens to in-flight edits/mutations? + // Can we copy into the new connection we do not allow this failover? + // MutationState state = oldConn.getMutationState(); + try { + oldConn.close(new SQLExceptionInfo.Builder(SQLExceptionCode.HA_CLOSED_AFTER_FAILOVER) + .setMessage("Phoenix connection got closed due to failover") + .setHaGroupInfo(haGroup.getGroupInfo().toString()).build().buildException()); + } catch (SQLException e) { + LOG.error("Failed to close old connection after failover: {}", e.getMessage()); + LOG.info("Full stack when closing old connection after failover", e); } - } - - //// Wrapping phoenix connection operations - - /** - * This is the utility method to help wrapping a method call to phoenix connection. - * - * @param s the supplier which returns a value and may throw SQLException - * @param type of the returned object by the supplier - * @return the object returned by the supplier if any - * @throws SQLException exception when getting object from the supplier - */ - @VisibleForTesting - T wrapActionDuringFailover(SupplierWithSQLException s) throws SQLException { - checkConnection(); - final long timeoutMs = Long.parseLong(properties.getProperty(FAILOVER_TIMEOUT_MS_ATTR, - String.valueOf(FAILOVER_TIMEOUT_MS_DEFAULT))); - int failoverCount = 0; - while (true) { - try { - return s.get(); - } catch (SQLException e) { - if (policy.shouldFailover(e, ++failoverCount)) { - failover(timeoutMs); - } else { - throw new SQLException( - String.format("Error on operation with failover policy %s", policy), e); - } - } + } + } + LOG.info("Connection {} failed over to {}", haGroup.getGroupInfo(), connection.getURL()); + } + + /** + * Connection can not be null before any operation. + *

+ * Here when connection is non-null, we do not need to check if the wrapped connection is open. + * The reason is that each individual delegated call on the wrapped connection will internally + * check open itself, see {@link PhoenixConnection#checkOpen()}. + * @throws SQLException if current wrapped phoenix connection is not valid state + */ + private void checkConnection() throws SQLException { + if (isClosed) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CONNECTION_CLOSED) + .setHaGroupInfo(haGroup.getGroupInfo().toString()).build().buildException(); + } + if (connection == null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) + .setMessage("Connection has not been established to ACTIVE HBase cluster") + .setHaGroupInfo(haGroup.getGroupInfo().toString()).build().buildException(); + } + } + + @Override + public void close() throws SQLException { + if (isClosed()) { + return; + } + + try { + connection.close(); + connection.clearMetrics(); + } finally { + previousMutationMetrics.clear(); + previousReadMetrics.clear(); + isClosed = true; + } + } + + @Override + public boolean isClosed() { + return isClosed; + } + + //// metrics for monitoring methods + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (!iface.isInstance(this)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) + .setMessage(getClass().getName() + " not unwrappable from " + iface.getName()).build() + .buildException(); + } + return (T) this; + } + + @Override + public Map> getMutationMetrics() { + return mergeMetricMaps(connection.getMutationMetrics(), previousMutationMetrics); + } + + @Override + public Map> getReadMetrics() { + return mergeMetricMaps(connection.getReadMetrics(), previousReadMetrics); + } + + @Override + public boolean isRequestLevelMetricsEnabled() { + return connection != null && connection.isRequestLevelMetricsEnabled(); + } + + @Override + public void clearMetrics() { + previousMutationMetrics.clear(); + previousReadMetrics.clear(); + if (connection != null) { + connection.clearMetrics(); + } + } + + //// Wrapping phoenix connection operations + + /** + * This is the utility method to help wrapping a method call to phoenix connection. + * @param s the supplier which returns a value and may throw SQLException + * @param type of the returned object by the supplier + * @return the object returned by the supplier if any + * @throws SQLException exception when getting object from the supplier + */ + @VisibleForTesting + T wrapActionDuringFailover(SupplierWithSQLException s) throws SQLException { + checkConnection(); + final long timeoutMs = Long.parseLong(properties.getProperty(FAILOVER_TIMEOUT_MS_ATTR, + String.valueOf(FAILOVER_TIMEOUT_MS_DEFAULT))); + int failoverCount = 0; + while (true) { + try { + return s.get(); + } catch (SQLException e) { + if (policy.shouldFailover(e, ++failoverCount)) { + failover(timeoutMs); + } else { + throw new SQLException( + String.format("Error on operation with failover policy %s", policy), e); } - } - - @VisibleForTesting - void wrapActionDuringFailover(RunWithSQLException runnable) throws SQLException { - wrapActionDuringFailover(() -> { - runnable.run(); - return null; - }); - } - - @Override - public void commit() throws SQLException { - wrapActionDuringFailover(() -> connection.commit()); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return wrapActionDuringFailover(() -> connection.isWrapperFor(iface)); - } - - @Override - public Statement createStatement() throws SQLException { - return wrapActionDuringFailover(() -> connection.createStatement()); - } - - @Override - public PreparedStatement prepareStatement(String sql) throws SQLException { - return wrapActionDuringFailover(() -> connection.prepareStatement(sql)); - } - - @Override - public CallableStatement prepareCall(String sql) throws SQLException { - return wrapActionDuringFailover(() -> connection.prepareCall(sql)); - } - - @Override - public String nativeSQL(String sql) throws SQLException { - return wrapActionDuringFailover(() -> connection.nativeSQL(sql)); - } - - @Override - public boolean getAutoCommit() throws SQLException { - return wrapActionDuringFailover(() -> connection.getAutoCommit()); - } - - @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - wrapActionDuringFailover(() -> connection.setAutoCommit(autoCommit)); - } - - @Override - public void rollback() throws SQLException { - wrapActionDuringFailover(() -> connection.rollback()); - } - - @Override - public DatabaseMetaData getMetaData() throws SQLException { - return wrapActionDuringFailover(() -> connection.getMetaData()); - } - - @Override - public boolean isReadOnly() throws SQLException { - return wrapActionDuringFailover(() -> connection.isReadOnly()); - } - - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - wrapActionDuringFailover(() -> connection.setReadOnly(readOnly)); - } - - @Override - public String getCatalog() throws SQLException { - return wrapActionDuringFailover(() -> connection.getCatalog()); - } - - @Override - public void setCatalog(String catalog) throws SQLException { - wrapActionDuringFailover(() -> connection.setCatalog(catalog)); - } - - @Override - public int getTransactionIsolation() throws SQLException { - //noinspection MagicConstant - return wrapActionDuringFailover(() -> connection.getTransactionIsolation()); - } - - @Override - public void setTransactionIsolation(int level) throws SQLException { - wrapActionDuringFailover(() -> connection.setTransactionIsolation(level)); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return wrapActionDuringFailover(() -> connection.getWarnings()); - } - - @Override - public void clearWarnings() throws SQLException { - wrapActionDuringFailover(() -> connection.clearWarnings()); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) - throws SQLException { - return wrapActionDuringFailover(() -> connection - .createStatement(resultSetType, resultSetConcurrency)); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency) throws SQLException { - return wrapActionDuringFailover(() -> connection - .prepareStatement(sql, resultSetType, resultSetConcurrency)); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) - throws SQLException { - return wrapActionDuringFailover(() -> connection - .prepareCall(sql, resultSetType, resultSetConcurrency)); - } - - @Override - public Map> getTypeMap() throws SQLException { - return wrapActionDuringFailover(() -> connection.getTypeMap()); - } - - @Override - public void setTypeMap(Map> map) throws SQLException { - wrapActionDuringFailover(() -> connection.setTypeMap(map)); - } - - @Override - public int getHoldability() throws SQLException { - return wrapActionDuringFailover(() -> connection.getHoldability()); - } - - @Override - public void setHoldability(int holdability) throws SQLException { - wrapActionDuringFailover(() -> connection.setHoldability(holdability)); - } - - @Override - public Savepoint setSavepoint() throws SQLException { - return wrapActionDuringFailover(() -> connection.setSavepoint()); - } - - @Override - public Savepoint setSavepoint(String name) throws SQLException { - return wrapActionDuringFailover(() -> connection.setSavepoint(name)); - } - - @Override - public void rollback(Savepoint savepoint) throws SQLException { - wrapActionDuringFailover(() -> connection.rollback(savepoint)); - } - - @Override - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - wrapActionDuringFailover(() -> connection.releaseSavepoint(savepoint)); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - return wrapActionDuringFailover(() -> connection - .createStatement(resultSetType, resultSetConcurrency, resultSetHoldability)); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency, int resultSetHoldability) throws SQLException { - return wrapActionDuringFailover(() -> connection - .prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability)); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - return wrapActionDuringFailover(() -> connection - .prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability)); - } - - @Override - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) - throws SQLException { - return wrapActionDuringFailover(() -> connection.prepareStatement(sql, autoGeneratedKeys)); - } - - @Override - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - return wrapActionDuringFailover(() -> connection.prepareStatement(sql, columnIndexes)); - } - - @Override - public PreparedStatement prepareStatement(String sql, String[] columnNames) - throws SQLException { - return wrapActionDuringFailover(() -> connection.prepareStatement(sql, columnNames)); - } - - @Override - public Clob createClob() throws SQLException { - return wrapActionDuringFailover(() -> connection.createClob()); - } - - @Override - public Blob createBlob() throws SQLException { - return wrapActionDuringFailover(() -> connection.createBlob()); - } - - @Override - public NClob createNClob() throws SQLException { - return wrapActionDuringFailover(() -> connection.createNClob()); - } - - @Override - public SQLXML createSQLXML() throws SQLException { - checkConnection(); - return wrapActionDuringFailover(() -> connection.createSQLXML()); - } - - @Override - public boolean isValid(int timeout) throws SQLException { - return wrapActionDuringFailover(() -> connection.isValid(timeout)); - } - - @Override - public void setClientInfo(String name, String value) { - throw new UnsupportedOperationException(); - } - - @Override - public String getClientInfo(String name) throws SQLException { - return wrapActionDuringFailover(() -> connection.getClientInfo(name)); - } - - @Override - public Properties getClientInfo() throws SQLException { - return wrapActionDuringFailover(() -> connection.getClientInfo()); - } - - @Override - public void setClientInfo(Properties properties) { - throw new UnsupportedOperationException(); - } - - @Override - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - return wrapActionDuringFailover(() -> connection.createArrayOf(typeName, elements)); - } - - @Override - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - return wrapActionDuringFailover(() -> connection.createStruct(typeName, attributes)); - } - - @Override - public String getSchema() throws SQLException { - return wrapActionDuringFailover(() -> connection.getSchema()); - } - - @Override - public void setSchema(String schema) throws SQLException { - wrapActionDuringFailover(() -> connection.setSchema(schema)); - } - - @Override - public void abort(Executor executor) throws SQLException { - wrapActionDuringFailover(() -> connection.abort(executor)); - } - - @Override - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - wrapActionDuringFailover(() -> connection.setNetworkTimeout(executor, milliseconds)); - } - - @Override - public int getNetworkTimeout() throws SQLException { - return wrapActionDuringFailover(() -> connection.getNetworkTimeout()); - } - - /** - * @return the currently wrapped connection. - */ - @VisibleForTesting - PhoenixConnection getWrappedConnection() { - return connection; - } - - @VisibleForTesting - @FunctionalInterface - interface SupplierWithSQLException { - T get() throws SQLException; - } - - @VisibleForTesting - @FunctionalInterface - interface RunWithSQLException { - void run() throws SQLException; - } + } + } + } + + @VisibleForTesting + void wrapActionDuringFailover(RunWithSQLException runnable) throws SQLException { + wrapActionDuringFailover(() -> { + runnable.run(); + return null; + }); + } + + @Override + public void commit() throws SQLException { + wrapActionDuringFailover(() -> connection.commit()); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return wrapActionDuringFailover(() -> connection.isWrapperFor(iface)); + } + + @Override + public Statement createStatement() throws SQLException { + return wrapActionDuringFailover(() -> connection.createStatement()); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + return wrapActionDuringFailover(() -> connection.prepareStatement(sql)); + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + return wrapActionDuringFailover(() -> connection.prepareCall(sql)); + } + + @Override + public String nativeSQL(String sql) throws SQLException { + return wrapActionDuringFailover(() -> connection.nativeSQL(sql)); + } + + @Override + public boolean getAutoCommit() throws SQLException { + return wrapActionDuringFailover(() -> connection.getAutoCommit()); + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + wrapActionDuringFailover(() -> connection.setAutoCommit(autoCommit)); + } + + @Override + public void rollback() throws SQLException { + wrapActionDuringFailover(() -> connection.rollback()); + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + return wrapActionDuringFailover(() -> connection.getMetaData()); + } + + @Override + public boolean isReadOnly() throws SQLException { + return wrapActionDuringFailover(() -> connection.isReadOnly()); + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + wrapActionDuringFailover(() -> connection.setReadOnly(readOnly)); + } + + @Override + public String getCatalog() throws SQLException { + return wrapActionDuringFailover(() -> connection.getCatalog()); + } + + @Override + public void setCatalog(String catalog) throws SQLException { + wrapActionDuringFailover(() -> connection.setCatalog(catalog)); + } + + @Override + public int getTransactionIsolation() throws SQLException { + // noinspection MagicConstant + return wrapActionDuringFailover(() -> connection.getTransactionIsolation()); + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + wrapActionDuringFailover(() -> connection.setTransactionIsolation(level)); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return wrapActionDuringFailover(() -> connection.getWarnings()); + } + + @Override + public void clearWarnings() throws SQLException { + wrapActionDuringFailover(() -> connection.clearWarnings()); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) + throws SQLException { + return wrapActionDuringFailover( + () -> connection.createStatement(resultSetType, resultSetConcurrency)); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + return wrapActionDuringFailover( + () -> connection.prepareStatement(sql, resultSetType, resultSetConcurrency)); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + return wrapActionDuringFailover( + () -> connection.prepareCall(sql, resultSetType, resultSetConcurrency)); + } + + @Override + public Map> getTypeMap() throws SQLException { + return wrapActionDuringFailover(() -> connection.getTypeMap()); + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + wrapActionDuringFailover(() -> connection.setTypeMap(map)); + } + + @Override + public int getHoldability() throws SQLException { + return wrapActionDuringFailover(() -> connection.getHoldability()); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + wrapActionDuringFailover(() -> connection.setHoldability(holdability)); + } + + @Override + public Savepoint setSavepoint() throws SQLException { + return wrapActionDuringFailover(() -> connection.setSavepoint()); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + return wrapActionDuringFailover(() -> connection.setSavepoint(name)); + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + wrapActionDuringFailover(() -> connection.rollback(savepoint)); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + wrapActionDuringFailover(() -> connection.releaseSavepoint(savepoint)); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return wrapActionDuringFailover( + () -> connection.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability)); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return wrapActionDuringFailover(() -> connection.prepareStatement(sql, resultSetType, + resultSetConcurrency, resultSetHoldability)); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return wrapActionDuringFailover( + () -> connection.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability)); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + return wrapActionDuringFailover(() -> connection.prepareStatement(sql, autoGeneratedKeys)); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + return wrapActionDuringFailover(() -> connection.prepareStatement(sql, columnIndexes)); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + return wrapActionDuringFailover(() -> connection.prepareStatement(sql, columnNames)); + } + + @Override + public Clob createClob() throws SQLException { + return wrapActionDuringFailover(() -> connection.createClob()); + } + + @Override + public Blob createBlob() throws SQLException { + return wrapActionDuringFailover(() -> connection.createBlob()); + } + + @Override + public NClob createNClob() throws SQLException { + return wrapActionDuringFailover(() -> connection.createNClob()); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + checkConnection(); + return wrapActionDuringFailover(() -> connection.createSQLXML()); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + return wrapActionDuringFailover(() -> connection.isValid(timeout)); + } + + @Override + public void setClientInfo(String name, String value) { + throw new UnsupportedOperationException(); + } + + @Override + public String getClientInfo(String name) throws SQLException { + return wrapActionDuringFailover(() -> connection.getClientInfo(name)); + } + + @Override + public Properties getClientInfo() throws SQLException { + return wrapActionDuringFailover(() -> connection.getClientInfo()); + } + + @Override + public void setClientInfo(Properties properties) { + throw new UnsupportedOperationException(); + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + return wrapActionDuringFailover(() -> connection.createArrayOf(typeName, elements)); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + return wrapActionDuringFailover(() -> connection.createStruct(typeName, attributes)); + } + + @Override + public String getSchema() throws SQLException { + return wrapActionDuringFailover(() -> connection.getSchema()); + } + + @Override + public void setSchema(String schema) throws SQLException { + wrapActionDuringFailover(() -> connection.setSchema(schema)); + } + + @Override + public void abort(Executor executor) throws SQLException { + wrapActionDuringFailover(() -> connection.abort(executor)); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + wrapActionDuringFailover(() -> connection.setNetworkTimeout(executor, milliseconds)); + } + + @Override + public int getNetworkTimeout() throws SQLException { + return wrapActionDuringFailover(() -> connection.getNetworkTimeout()); + } + + /** Returns the currently wrapped connection. */ + @VisibleForTesting + PhoenixConnection getWrappedConnection() { + return connection; + } + + @VisibleForTesting + @FunctionalInterface + interface SupplierWithSQLException { + T get() throws SQLException; + } + + @VisibleForTesting + @FunctionalInterface + interface RunWithSQLException { + void run() throws SQLException; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/FailoverPolicy.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/FailoverPolicy.java index 4d81bfd9a2f..390c651d223 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/FailoverPolicy.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/FailoverPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,118 +27,112 @@ /** * A failover policy defines how failover connection deals with existing connections in case of - * cluster role transition is detected. - * - * When an HBase cluster is not in ACTIVE role any more, all connections against it will get closed. - * To handle a failover event, the failover connection will use the failover policy for taking - * further actions. Those supported failover policies are defined here, but in future we can load - * the policy implemented and configured by user at runtime for each connection. The default policy - * requires that clients have to deal with the failover exception explicitly. + * cluster role transition is detected. When an HBase cluster is not in ACTIVE role any more, all + * connections against it will get closed. To handle a failover event, the failover connection will + * use the failover policy for taking further actions. Those supported failover policies are defined + * here, but in future we can load the policy implemented and configured by user at runtime for each + * connection. The default policy requires that clients have to deal with the failover exception + * explicitly. */ @Immutable @FunctionalInterface public interface FailoverPolicy { - String PHOENIX_HA_FAILOVER_POLICY_ATTR = "phoenix.ha.failover.policy"; - String PHOENIX_HA_FAILOVER_COUNT_ATTR = "phoenix.ha.failover.count"; - - /** - * Should try to failover by connecting to current ACTIVE HBase cluster (if any). - * - * @param exception the exception caught upon which this method is possible called - * @param failoverCount how many time so far this failover has been attempted - * @return true if caller should get a new phoenix connection against the ACTIVE HBase cluster - */ - boolean shouldFailover(Exception exception, int failoverCount); - - /** - * With this policy, clients have to deal with the failover exception explicitly if any. - * - * A {@link FailoverSQLException} exception will be thrown to the client when they try to use - * the closed connections. Specially, the high availability (HA) framework will not connect to - * the new ACTIVE cluster automatically for clients, but instead a client should: - * - re-connect to this HA group, in which case it will get a new connection wrapping a Phoenix - * connection to the newly ACTIVE cluster; OR - * - call static method {@link FailoverPhoenixConnection#failover(Connection,long)} explicitly. - * After that, it can create new Statement/ResultSet and retry the business logic. - * If neither cluster is ACTIVE, connect requests to this HA group will keep getting exception. - */ - class ExplicitFailoverPolicy implements FailoverPolicy { - public static final String NAME = "explicit"; - private static final ExplicitFailoverPolicy INSTANCE = new ExplicitFailoverPolicy(); - - @Override - public boolean shouldFailover(Exception e, int failoverCount) { - return false; - } - - @Override - public String toString() { - return NAME; - } + String PHOENIX_HA_FAILOVER_POLICY_ATTR = "phoenix.ha.failover.policy"; + String PHOENIX_HA_FAILOVER_COUNT_ATTR = "phoenix.ha.failover.count"; + + /** + * Should try to failover by connecting to current ACTIVE HBase cluster (if any). + * @param exception the exception caught upon which this method is possible called + * @param failoverCount how many time so far this failover has been attempted + * @return true if caller should get a new phoenix connection against the ACTIVE HBase cluster + */ + boolean shouldFailover(Exception exception, int failoverCount); + + /** + * With this policy, clients have to deal with the failover exception explicitly if any. A + * {@link FailoverSQLException} exception will be thrown to the client when they try to use the + * closed connections. Specially, the high availability (HA) framework will not connect to the new + * ACTIVE cluster automatically for clients, but instead a client should: - re-connect to this HA + * group, in which case it will get a new connection wrapping a Phoenix connection to the newly + * ACTIVE cluster; OR - call static method + * {@link FailoverPhoenixConnection#failover(Connection,long)} explicitly. After that, it can + * create new Statement/ResultSet and retry the business logic. If neither cluster is ACTIVE, + * connect requests to this HA group will keep getting exception. + */ + class ExplicitFailoverPolicy implements FailoverPolicy { + public static final String NAME = "explicit"; + private static final ExplicitFailoverPolicy INSTANCE = new ExplicitFailoverPolicy(); + + @Override + public boolean shouldFailover(Exception e, int failoverCount) { + return false; } - /** - * With this, failover connection will wrap a new Phoenix connection to the new ACTIVE cluster. - * - * If the current operation (e.g. commit or create Statement) fails, the failover connection - * will try to wrap a new Phoenix connection according to this policy. After that, the client - * will be able to create new Statement/ResultSet created against this failover connection. - * While the HA group is failing over, the failover connection may not be able to failover by - * wrapping a new phoenix connection. In that case, clients trying to use this the failover - * connection will get {@link FailoverSQLException} exception. - * - * The failover to ACTIVE cluster is best-effort; if it succeeds, clients do not notice target - * cluster changed. Some cases are not yet well supported with this failover policy, for e.g. - * after failover, the uncommitted mutations are not populated into the new connection. - * - * In case of {@link FailoverSQLException} exception, clients can still re-connect to this HA - * group by creating a new failover connection, OR call static method failover() explicitly. - */ - class FailoverToActivePolicy implements FailoverPolicy { - public static final String NAME = "active"; - private static final int MAX_FAILOVER_COUNT_DEFAULT = 3; - - private final int maxFailoverCount; - - private FailoverToActivePolicy() { - this.maxFailoverCount = MAX_FAILOVER_COUNT_DEFAULT; - } - - private FailoverToActivePolicy(int maxFailoverCount) { - this.maxFailoverCount = maxFailoverCount; - } - - @Override - public boolean shouldFailover(Exception e, int failoverCount) { - return failoverCount < maxFailoverCount && e instanceof FailoverSQLException; - } - - @Override - public String toString() { - return NAME + "(maxFailoverCount=" + maxFailoverCount + ")"; - } + @Override + public String toString() { + return NAME; + } + } + + /** + * With this, failover connection will wrap a new Phoenix connection to the new ACTIVE cluster. If + * the current operation (e.g. commit or create Statement) fails, the failover connection will try + * to wrap a new Phoenix connection according to this policy. After that, the client will be able + * to create new Statement/ResultSet created against this failover connection. While the HA group + * is failing over, the failover connection may not be able to failover by wrapping a new phoenix + * connection. In that case, clients trying to use this the failover connection will get + * {@link FailoverSQLException} exception. The failover to ACTIVE cluster is best-effort; if it + * succeeds, clients do not notice target cluster changed. Some cases are not yet well supported + * with this failover policy, for e.g. after failover, the uncommitted mutations are not populated + * into the new connection. In case of {@link FailoverSQLException} exception, clients can still + * re-connect to this HA group by creating a new failover connection, OR call static method + * failover() explicitly. + */ + class FailoverToActivePolicy implements FailoverPolicy { + public static final String NAME = "active"; + private static final int MAX_FAILOVER_COUNT_DEFAULT = 3; + + private final int maxFailoverCount; + + private FailoverToActivePolicy() { + this.maxFailoverCount = MAX_FAILOVER_COUNT_DEFAULT; + } + + private FailoverToActivePolicy(int maxFailoverCount) { + this.maxFailoverCount = maxFailoverCount; + } + + @Override + public boolean shouldFailover(Exception e, int failoverCount) { + return failoverCount < maxFailoverCount && e instanceof FailoverSQLException; + } + + @Override + public String toString() { + return NAME + "(maxFailoverCount=" + maxFailoverCount + ")"; + } + } + + /** + * Get the failover policy from client properties. + */ + static FailoverPolicy get(Properties properties) { + String name = properties.getProperty(PHOENIX_HA_FAILOVER_POLICY_ATTR); + if (StringUtils.isEmpty(name)) { + return ExplicitFailoverPolicy.INSTANCE; } - /** - * Get the failover policy from client properties. - */ - static FailoverPolicy get(Properties properties) { - String name = properties.getProperty(PHOENIX_HA_FAILOVER_POLICY_ATTR); - if (StringUtils.isEmpty(name)) { - return ExplicitFailoverPolicy.INSTANCE; - } - - switch (name.toLowerCase()) { - case ExplicitFailoverPolicy.NAME: - return ExplicitFailoverPolicy.INSTANCE; - case FailoverToActivePolicy.NAME: - String maxFailoverCount = properties.getProperty(PHOENIX_HA_FAILOVER_COUNT_ATTR); - return StringUtils.isEmpty(maxFailoverCount) - ? new FailoverToActivePolicy() - : new FailoverToActivePolicy(Integer.parseInt(maxFailoverCount)); - default: - throw new IllegalArgumentException( - String.format("Unsupported %s '%s'", PHOENIX_HA_FAILOVER_POLICY_ATTR, name)); - } + switch (name.toLowerCase()) { + case ExplicitFailoverPolicy.NAME: + return ExplicitFailoverPolicy.INSTANCE; + case FailoverToActivePolicy.NAME: + String maxFailoverCount = properties.getProperty(PHOENIX_HA_FAILOVER_COUNT_ATTR); + return StringUtils.isEmpty(maxFailoverCount) + ? new FailoverToActivePolicy() + : new FailoverToActivePolicy(Integer.parseInt(maxFailoverCount)); + default: + throw new IllegalArgumentException( + String.format("Unsupported %s '%s'", PHOENIX_HA_FAILOVER_POLICY_ATTR, name)); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/HighAvailabilityGroup.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/HighAvailabilityGroup.java index fb2137d1066..ecdc7c74227 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/HighAvailabilityGroup.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/HighAvailabilityGroup.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,28 @@ */ package org.apache.phoenix.jdbc; +import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.Driver; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Properties; +import java.util.concurrent.CompletionException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; @@ -44,28 +66,6 @@ import edu.umd.cs.findbugs.annotations.NonNull; -import java.io.IOException; -import java.sql.Connection; -import java.sql.Driver; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Properties; -import java.util.concurrent.CompletionException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION; - /** * An high availability (HA) group is an association between a pair of HBase clusters, a group of * clients, and an HA policy. @@ -75,843 +75,809 @@ * watches node changes in ZooKeeper. *

* The lifecycle of an HA group is confined in the global cache, meaning clients can get an instance - * from the cache but cannot construct or close an HA group instance. The reason is that HA group - * is a shared resource by many clients. Closing it intentionally or accidentally by a client will + * from the cache but cannot construct or close an HA group instance. The reason is that HA group is + * a shared resource by many clients. Closing it intentionally or accidentally by a client will * impact other connections in this group with unexpected behavior. */ @SuppressWarnings("UnstableApiUsage") public class HighAvailabilityGroup { - public static final String PHOENIX_HA_ATTR_PREFIX = "phoenix.ha."; - public static final String PHOENIX_HA_GROUP_ATTR = PHOENIX_HA_ATTR_PREFIX + "group.name"; - /** - * Should we fall back to single cluster when cluster role record is missing? - */ - public static final String PHOENIX_HA_SHOULD_FALLBACK_WHEN_MISSING_CRR_KEY = - PHOENIX_HA_ATTR_PREFIX + "fallback.enabled"; - public static final String PHOENIX_HA_SHOULD_FALLBACK_WHEN_MISSING_CRR_DEFAULT = - String.valueOf(Boolean.TRUE); - /** - * The single-cluster connection URL when it needs to fall back. - */ - public static final String PHOENIX_HA_FALLBACK_CLUSTER_KEY = - PHOENIX_HA_ATTR_PREFIX + "fallback.cluster"; - public static final String PHOENIX_HA_ZOOKEEPER_ZNODE_NAMESPACE = - "phoenix" + ZKPaths.PATH_SEPARATOR + "ha"; - - public static final String PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_KEY = - PHOENIX_HA_ATTR_PREFIX + "zk.connection.timeout.ms"; - public static final int PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT = 4_000; - public static final String PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_KEY = - PHOENIX_HA_ATTR_PREFIX + "zk.session.timeout.ms"; - public static final int PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_DEFAULT = 4_000; - public static final String PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_KEY = - PHOENIX_HA_ATTR_PREFIX + "zk.retry.base.sleep.ms"; - - public static final int PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_DEFAULT = 1000; - public static final String PHOENIX_HA_ZK_RETRY_MAX_KEY = - PHOENIX_HA_ATTR_PREFIX + "zk.retry.max"; - public static final int PHOENIX_HA_ZK_RETRY_MAX_DEFAULT = 5; - public static final String PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_KEY = - PHOENIX_HA_ATTR_PREFIX + "zk.retry.max.sleep.ms"; - public static final int PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_DEFAULT = 10_000; - public static final RetryPolicy RETRY_POLICY = new ExponentialBackoffRetry( - PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_DEFAULT, - PHOENIX_HA_ZK_RETRY_MAX_DEFAULT, - PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_DEFAULT); - - public static final String PHOENIX_HA_TRANSITION_TIMEOUT_MS_KEY = - PHOENIX_HA_ATTR_PREFIX + "transition.timeout.ms"; - public static final long PHOENIX_HA_TRANSITION_TIMEOUT_MS_DEFAULT = 5 * 60 * 1000; // 5 mins - - static final Logger LOG = LoggerFactory.getLogger(HighAvailabilityGroup.class); - @VisibleForTesting - static final Map GROUPS = new ConcurrentHashMap<>(); - @VisibleForTesting - static final Cache MISSING_CRR_GROUPS_CACHE = CacheBuilder.newBuilder() - .expireAfterWrite(PHOENIX_HA_TRANSITION_TIMEOUT_MS_DEFAULT, TimeUnit.MILLISECONDS) - .build(); - /** - * The Curator client cache, one client instance per cluster. - */ - @VisibleForTesting - static final Cache CURATOR_CACHE = CacheBuilder.newBuilder() - .expireAfterAccess(DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION, TimeUnit.MILLISECONDS) - .removalListener((notification) -> - ((CuratorFramework) Objects.requireNonNull(notification.getValue())).close()) - .build(); - /** - * High availability group info. - */ - private final HAGroupInfo info; - /** - * Client properties used to initialize this HA group. - */ - private final Properties properties; - /** - * Executor service for the two role managers. - */ - private final ExecutorService roleManagerExecutor = Executors.newFixedThreadPool(2, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("phoenixHAGroup-%d").build()); - /** - * The count down latch to make sure at least one role manager has pulled data from ZK. - */ - private final CountDownLatch roleManagerLatch = new CountDownLatch(1); - /** - * Pair of role managers for watching cluster role records from the two ZK clusters. - */ - private final AtomicReference> roleManagers - = new AtomicReference<>(); - /** - * Executor for applying the cluster role to this HA group. - */ - private final ExecutorService nodeChangedExecutor = Executors.newFixedThreadPool(1); - /** - * Current cluster role record for this HA group. - */ - private volatile ClusterRoleRecord roleRecord; - /** - * State of this HA group. - */ - private volatile State state = State.UNINITIALIZED; - - /** - * Private constructor. - *

- * To get an instance, please call {@link HighAvailabilityGroup#get(String, Properties)}. - */ - private HighAvailabilityGroup(HAGroupInfo info, Properties properties) { - this.info = info; - this.properties = properties; + public static final String PHOENIX_HA_ATTR_PREFIX = "phoenix.ha."; + public static final String PHOENIX_HA_GROUP_ATTR = PHOENIX_HA_ATTR_PREFIX + "group.name"; + /** + * Should we fall back to single cluster when cluster role record is missing? + */ + public static final String PHOENIX_HA_SHOULD_FALLBACK_WHEN_MISSING_CRR_KEY = + PHOENIX_HA_ATTR_PREFIX + "fallback.enabled"; + public static final String PHOENIX_HA_SHOULD_FALLBACK_WHEN_MISSING_CRR_DEFAULT = + String.valueOf(Boolean.TRUE); + /** + * The single-cluster connection URL when it needs to fall back. + */ + public static final String PHOENIX_HA_FALLBACK_CLUSTER_KEY = + PHOENIX_HA_ATTR_PREFIX + "fallback.cluster"; + public static final String PHOENIX_HA_ZOOKEEPER_ZNODE_NAMESPACE = + "phoenix" + ZKPaths.PATH_SEPARATOR + "ha"; + + public static final String PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_KEY = + PHOENIX_HA_ATTR_PREFIX + "zk.connection.timeout.ms"; + public static final int PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT = 4_000; + public static final String PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_KEY = + PHOENIX_HA_ATTR_PREFIX + "zk.session.timeout.ms"; + public static final int PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_DEFAULT = 4_000; + public static final String PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_KEY = + PHOENIX_HA_ATTR_PREFIX + "zk.retry.base.sleep.ms"; + + public static final int PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_DEFAULT = 1000; + public static final String PHOENIX_HA_ZK_RETRY_MAX_KEY = PHOENIX_HA_ATTR_PREFIX + "zk.retry.max"; + public static final int PHOENIX_HA_ZK_RETRY_MAX_DEFAULT = 5; + public static final String PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_KEY = + PHOENIX_HA_ATTR_PREFIX + "zk.retry.max.sleep.ms"; + public static final int PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_DEFAULT = 10_000; + public static final RetryPolicy RETRY_POLICY = + new ExponentialBackoffRetry(PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_DEFAULT, + PHOENIX_HA_ZK_RETRY_MAX_DEFAULT, PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_DEFAULT); + + public static final String PHOENIX_HA_TRANSITION_TIMEOUT_MS_KEY = + PHOENIX_HA_ATTR_PREFIX + "transition.timeout.ms"; + public static final long PHOENIX_HA_TRANSITION_TIMEOUT_MS_DEFAULT = 5 * 60 * 1000; // 5 mins + + static final Logger LOG = LoggerFactory.getLogger(HighAvailabilityGroup.class); + @VisibleForTesting + static final Map GROUPS = new ConcurrentHashMap<>(); + @VisibleForTesting + static final Cache MISSING_CRR_GROUPS_CACHE = CacheBuilder.newBuilder() + .expireAfterWrite(PHOENIX_HA_TRANSITION_TIMEOUT_MS_DEFAULT, TimeUnit.MILLISECONDS).build(); + /** + * The Curator client cache, one client instance per cluster. + */ + @VisibleForTesting + static final Cache CURATOR_CACHE = CacheBuilder.newBuilder() + .expireAfterAccess(DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION, TimeUnit.MILLISECONDS) + .removalListener( + (notification) -> ((CuratorFramework) Objects.requireNonNull(notification.getValue())) + .close()) + .build(); + /** + * High availability group info. + */ + private final HAGroupInfo info; + /** + * Client properties used to initialize this HA group. + */ + private final Properties properties; + /** + * Executor service for the two role managers. + */ + private final ExecutorService roleManagerExecutor = Executors.newFixedThreadPool(2, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("phoenixHAGroup-%d").build()); + /** + * The count down latch to make sure at least one role manager has pulled data from ZK. + */ + private final CountDownLatch roleManagerLatch = new CountDownLatch(1); + /** + * Pair of role managers for watching cluster role records from the two ZK clusters. + */ + private final AtomicReference> roleManagers = + new AtomicReference<>(); + /** + * Executor for applying the cluster role to this HA group. + */ + private final ExecutorService nodeChangedExecutor = Executors.newFixedThreadPool(1); + /** + * Current cluster role record for this HA group. + */ + private volatile ClusterRoleRecord roleRecord; + /** + * State of this HA group. + */ + private volatile State state = State.UNINITIALIZED; + + /** + * Private constructor. + *

+ * To get an instance, please call {@link HighAvailabilityGroup#get(String, Properties)}. + */ + private HighAvailabilityGroup(HAGroupInfo info, Properties properties) { + this.info = info; + this.properties = properties; + } + + /** + * This is for test usage only. In production, the record should be retrieved from ZooKeeper. + */ + @VisibleForTesting + HighAvailabilityGroup(HAGroupInfo info, Properties properties, ClusterRoleRecord record, + State state) { + this.info = info; + this.properties = properties; + this.roleRecord = record; + this.state = state; + } + + public static HAGroupInfo getHAGroupInfo(String url, Properties properties) throws SQLException { + if (url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { + url = url.substring(PhoenixRuntime.JDBC_PROTOCOL.length() + 1); + } + if (!(url.contains("[") && url.contains("|") && url.contains("]"))) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.MALFORMED_CONNECTION_URL) + .setMessage(String.format("URL %s is not a valid HA connection string", url)).build() + .buildException(); + } + String additionalJDBCParams = null; + int idx = url.indexOf("]"); + int extraIdx = url.indexOf(PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR, idx + 1); + if (extraIdx != -1) { + // skip the JDBC_PROTOCOL_SEPARATOR + additionalJDBCParams = url.substring(extraIdx + 1); } - /** - * This is for test usage only. In production, the record should be retrieved from ZooKeeper. - */ - @VisibleForTesting - HighAvailabilityGroup(HAGroupInfo info, Properties properties, ClusterRoleRecord record, - State state) { - this.info = info; - this.properties = properties; - this.roleRecord = record; - this.state = state; - } - - public static HAGroupInfo getHAGroupInfo(String url, Properties properties) - throws SQLException { - if (url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { - url = url.substring(PhoenixRuntime.JDBC_PROTOCOL.length() + 1); - } - if (!(url.contains("[") && url.contains("|") && url.contains("]"))) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.MALFORMED_CONNECTION_URL) - .setMessage(String.format("URL %s is not a valid HA connection string", url)) - .build() - .buildException(); - } - String additionalJDBCParams = null; - int idx = url.indexOf("]"); - int extraIdx = url.indexOf(PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR, idx + 1); - if (extraIdx != -1) { - // skip the JDBC_PROTOCOL_SEPARATOR - additionalJDBCParams = url.substring(extraIdx + 1); - } - url = url.substring(url.indexOf("[") + 1, url.indexOf("]")); - String[] urls = url.split("\\|"); + url = url.substring(url.indexOf("[") + 1, url.indexOf("]")); + String[] urls = url.split("\\|"); - String name = properties.getProperty(PHOENIX_HA_GROUP_ATTR); - if (StringUtils.isEmpty(name)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.HA_INVALID_PROPERTIES) - .setMessage(String.format("HA group name can not be empty for HA URL %s", url)) - .build() - .buildException(); - } - return new HAGroupInfo(name, urls[0], urls[1], additionalJDBCParams); + String name = properties.getProperty(PHOENIX_HA_GROUP_ATTR); + if (StringUtils.isEmpty(name)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.HA_INVALID_PROPERTIES) + .setMessage(String.format("HA group name can not be empty for HA URL %s", url)).build() + .buildException(); + } + return new HAGroupInfo(name, urls[0], urls[1], additionalJDBCParams); + } + + /** + * Get an instance of HA group given the HA connecting URL (with "|") and client properties. + *

+ * The HA group does not have a public constructor. This method is the only public one for getting + * an HA group instance. The reason is that, HA group is considered expensive to create and + * maintain. Caching it will make it reusable for all connection requests to this group. + *

+ * It will return the cached instance, if any, for the target HA group. The HA group creation and + * initialization are blocking operations. Upon initialization failure, the HA group information + * may be saved in a negative cache iff the cause is due to missing cluster role records. In + * presence of empty (not null or exception) return value, client may choose to fall back to a + * single cluster connection to compensate missing cluster role records. + * @return Optional of target HA group (initialized), or empty if missing cluster role records + * @throws SQLException fails to get or initialize an HA group + */ + public static Optional get(String url, Properties properties) + throws SQLException { + HAGroupInfo info = getHAGroupInfo(url, properties); + if (MISSING_CRR_GROUPS_CACHE.getIfPresent(info) != null) { + return Optional.empty(); } - /** - * Get an instance of HA group given the HA connecting URL (with "|") and client properties. - *

- * The HA group does not have a public constructor. This method is the only public one for - * getting an HA group instance. The reason is that, HA group is considered expensive to create - * and maintain. Caching it will make it reusable for all connection requests to this group. - *

- * It will return the cached instance, if any, for the target HA group. The HA group creation - * and initialization are blocking operations. Upon initialization failure, the HA group - * information may be saved in a negative cache iff the cause is due to missing cluster role - * records. In presence of empty (not null or exception) return value, client may choose to fall - * back to a single cluster connection to compensate missing cluster role records. - * - * @return Optional of target HA group (initialized), or empty if missing cluster role records - * @throws SQLException fails to get or initialize an HA group - */ - public static Optional get(String url, Properties properties) - throws SQLException { - HAGroupInfo info = getHAGroupInfo(url, properties); - if (MISSING_CRR_GROUPS_CACHE.getIfPresent(info) != null) { + HighAvailabilityGroup haGroup = GROUPS.computeIfAbsent(info, + haGroupInfo -> new HighAvailabilityGroup(haGroupInfo, properties)); + try { + haGroup.init(); + } catch (Exception e) { + GROUPS.remove(info); + haGroup.close(); + try { + CuratorFramework curator1 = CURATOR_CACHE.getIfPresent(info.getUrl1()); + CuratorFramework curator2 = CURATOR_CACHE.getIfPresent(info.getUrl2()); + if (curator1 != null && curator2 != null) { + Stat node1 = curator1.checkExists().forPath(info.getZkPath()); + Stat node2 = curator2.checkExists().forPath(info.getZkPath()); + if (node1 == null && node2 == null) { + // The HA group fails to initialize due to missing cluster role records on + // both ZK clusters. We will put this HA group into negative cache. + MISSING_CRR_GROUPS_CACHE.put(info, true); return Optional.empty(); - } + } + } + } catch (Exception e2) { + LOG.error("HA group {} failed to initialized. Got exception when checking if znode" + + " exists on the two ZK clusters.", info, e2); + } + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) + .setMessage(String.format("Cannot start HA group %s for URL %s", haGroup, url)) + .setRootCause(e).build().buildException(); + } + return Optional.of(haGroup); + } + + /** + * This method helps client to get the single cluster to fallback. + *

+ * When getting HA group using {@link #get(String, Properties)}, it may return empty (not null or + * exception) value. In that case client may choose to fall back to a single cluster connection to + * compensate missing cluster role records instead of throw errors. + * @param url The HA connection url optionally; empty optional if properties disables + * fallback + * @param properties The client connection properties + * @return The connection url of the single cluster to fall back + * @throws SQLException if fails to get HA information and/or invalid properties are seen + */ + static Optional getFallbackCluster(String url, Properties properties) + throws SQLException { + HAGroupInfo haGroupInfo = getHAGroupInfo(url, properties); + + String fallback = properties.getProperty(PHOENIX_HA_SHOULD_FALLBACK_WHEN_MISSING_CRR_KEY, + PHOENIX_HA_SHOULD_FALLBACK_WHEN_MISSING_CRR_DEFAULT); + if (!Boolean.parseBoolean(fallback)) { + LOG.info("Fallback to single cluster not enabled for the HA group {} per configuration." + + " HA url: '{}'.", haGroupInfo.getName(), url); + return Optional.empty(); + } + String fallbackCluster = properties.getProperty(PHOENIX_HA_FALLBACK_CLUSTER_KEY); + if (StringUtils.isEmpty(fallbackCluster)) { + fallbackCluster = haGroupInfo.getUrl1(); + } + LOG.info("Falling back to single cluster '{}' for the HA group {} to serve HA connection " + + "request against url '{}'.", fallbackCluster, haGroupInfo.getName(), url); + return Optional.of(fallbackCluster); + } + + /** + * Get an active curator ZK client for the given properties and ZK endpoint. + *

+ * This can be from cached object since Curator should be shared per cluster. + * @param jdbcUrl the ZK endpoint host:port or the JDBC connection String host:port:/hbase + * @param properties the properties defining time out values and retry count + * @return a new Curator framework client + */ + @SuppressWarnings("UnstableApiUsage") + public static CuratorFramework getCurator(String jdbcUrl, Properties properties) + throws IOException { + try { + return CURATOR_CACHE.get(jdbcUrl, () -> { + CuratorFramework curator = createCurator(jdbcUrl, properties); + if ( + !curator.blockUntilConnected(PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT, + TimeUnit.MILLISECONDS) + ) throw new RuntimeException("Failed to connect to the CuratorFramework in " + "timeout " + + PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT + " ms"); + return curator; + }); + } catch (Exception e) { + LOG.error("Fail to get an active curator for url {}", jdbcUrl, e); + // invalidate the cache when getting/creating throws exception + CURATOR_CACHE.invalidate(jdbcUrl); + throw new IOException(e); + } + } + + /** + * Create a curator ZK client for the given properties and ZK endpoint. + *

+ * Unless caller needs a new curator, it should use {@link #getCurator(String, Properties)}. + */ + private static CuratorFramework createCurator(String jdbcUrl, Properties properties) { + // Get the ZK endpoint in host:port format by removing JDBC protocol and HBase root node + final String zkUrl; + if (jdbcUrl.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { + jdbcUrl = jdbcUrl.substring(PhoenixRuntime.JDBC_PROTOCOL.length() + 1); + } + Preconditions.checkArgument(!StringUtils.isEmpty(jdbcUrl), "JDBC url is empty!"); + jdbcUrl = jdbcUrl.replaceAll("\\\\:", "="); + String[] parts = jdbcUrl.split(":"); + if (parts.length == 0 || parts.length > 3) { + throw new IllegalArgumentException("Invalid JDBC url!" + jdbcUrl); + } + // The URL is already normalised + zkUrl = parts[0].replaceAll("=", ":"); + + // Get timeout and retry counts + String connectionTimeoutMsProp = + properties.getProperty(PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_KEY); + final int connectionTimeoutMs = !StringUtils.isEmpty(connectionTimeoutMsProp) + ? Integer.parseInt(connectionTimeoutMsProp) + : PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT; + String sessionTimeoutMsProps = properties.getProperty(PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_KEY); + final int sessionTimeoutMs = !StringUtils.isEmpty(sessionTimeoutMsProps) + ? Integer.parseInt(sessionTimeoutMsProps) + : PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_DEFAULT; + final RetryPolicy retryPolicy = createRetryPolicy(properties); + + CuratorFramework curator = CuratorFrameworkFactory.builder().connectString(zkUrl) + .namespace(PHOENIX_HA_ZOOKEEPER_ZNODE_NAMESPACE).connectionTimeoutMs(connectionTimeoutMs) + .sessionTimeoutMs(sessionTimeoutMs).retryPolicy(retryPolicy).canBeReadOnly(true).build(); + curator.start(); + return curator; + } + + /** + * Create a Curator retry policy from properties. + *

+ * If properties is null, return a default retry policy. + * @param properties properties defining timeout and max retries + * @return a retry policy which can be used for Curator operations + */ + public static RetryPolicy createRetryPolicy(Properties properties) { + if (properties == null) { + return RETRY_POLICY; + } + String baseSleepTimeMsProp = properties.getProperty(PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_KEY); + int baseSleepTimeMs = StringUtils.isNotEmpty(baseSleepTimeMsProp) + ? Integer.parseInt(baseSleepTimeMsProp) + : PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_DEFAULT; + String maxRetriesProp = properties.getProperty(PHOENIX_HA_ZK_RETRY_MAX_KEY); + int maxRetries = StringUtils.isNotEmpty(maxRetriesProp) + ? Integer.parseInt(maxRetriesProp) + : PHOENIX_HA_ZK_RETRY_MAX_DEFAULT; + String maxSleepTimeMsProp = properties.getProperty(PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_KEY); + int maxSleepTimeMs = StringUtils.isNotEmpty(maxSleepTimeMsProp) + ? Integer.parseInt(maxSleepTimeMsProp) + : PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_DEFAULT; + return new ExponentialBackoffRetry(baseSleepTimeMs, maxRetries, maxSleepTimeMs); + } + + /** + * Initialize this HA group by registering ZK watchers and getting initial cluster role record. + *

+ * If this is already initialized, calling this method is a no-op. This method is lock free as + * current thread will either return fast or wait for the in-progress initialization or timeout. + */ + public void init() throws IOException { + if (state != State.UNINITIALIZED) { + return; + } - HighAvailabilityGroup haGroup = GROUPS.computeIfAbsent(info, - haGroupInfo -> new HighAvailabilityGroup(haGroupInfo, properties)); - try { - haGroup.init(); - } catch (Exception e) { - GROUPS.remove(info); - haGroup.close(); - try { - CuratorFramework curator1 = CURATOR_CACHE.getIfPresent(info.getUrl1()); - CuratorFramework curator2 = CURATOR_CACHE.getIfPresent(info.getUrl2()); - if (curator1 != null && curator2 != null) { - Stat node1 = curator1.checkExists().forPath(info.getZkPath()); - Stat node2 = curator2.checkExists().forPath(info.getZkPath()); - if (node1 == null && node2 == null) { - // The HA group fails to initialize due to missing cluster role records on - // both ZK clusters. We will put this HA group into negative cache. - MISSING_CRR_GROUPS_CACHE.put(info, true); - return Optional.empty(); - } - } - } catch (Exception e2) { - LOG.error("HA group {} failed to initialized. Got exception when checking if znode" - + " exists on the two ZK clusters.", info, e2); - } - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) - .setMessage(String.format("Cannot start HA group %s for URL %s", haGroup, url)) - .setRootCause(e) - .build() - .buildException(); - } - return Optional.of(haGroup); + PairOfSameType newRoleManagers = + new PairOfSameType<>(new HAClusterRoleManager(info.urls.getFirst(), properties), + new HAClusterRoleManager(info.urls.getSecond(), properties)); + if (!roleManagers.compareAndSet(null, newRoleManagers)) { + LOG.info("Someone already started role managers; waiting for that one..."); + waitForInitialization(properties); + return; } - /** - * This method helps client to get the single cluster to fallback. - *

- * When getting HA group using {@link #get(String, Properties)}, it may return empty (not null - * or exception) value. In that case client may choose to fall back to a single cluster - * connection to compensate missing cluster role records instead of throw errors. - * - * @param url The HA connection url optionally; empty optional if properties disables fallback - * @param properties The client connection properties - * @return The connection url of the single cluster to fall back - * @throws SQLException if fails to get HA information and/or invalid properties are seen - */ - static Optional getFallbackCluster(String url, Properties properties) throws SQLException { - HAGroupInfo haGroupInfo = getHAGroupInfo(url, properties); - - String fallback = properties.getProperty(PHOENIX_HA_SHOULD_FALLBACK_WHEN_MISSING_CRR_KEY, - PHOENIX_HA_SHOULD_FALLBACK_WHEN_MISSING_CRR_DEFAULT); - if (!Boolean.parseBoolean(fallback)) { - LOG.info("Fallback to single cluster not enabled for the HA group {} per configuration." - + " HA url: '{}'.", haGroupInfo.getName(), url); - return Optional.empty(); - } - String fallbackCluster = properties.getProperty(PHOENIX_HA_FALLBACK_CLUSTER_KEY); - if (StringUtils.isEmpty(fallbackCluster)) { - fallbackCluster = haGroupInfo.getUrl1(); - } - LOG.info("Falling back to single cluster '{}' for the HA group {} to serve HA connection " - + "request against url '{}'.", - fallbackCluster, haGroupInfo.getName(), url); - return Optional.of(fallbackCluster); + Future f1 = roleManagerExecutor.submit(newRoleManagers.getFirst()); + Future f2 = roleManagerExecutor.submit(newRoleManagers.getSecond()); + try { + waitForInitialization(properties); + } catch (IOException e) { + // HA group that fails to initialize will not be kept in the global cache. + // Next connection request will create and initialize a new HA group. + // Before returning in case of exception, following code will cancel the futures. + f1.cancel(true); + f2.cancel(true); + throw e; } - /** - * Get an active curator ZK client for the given properties and ZK endpoint. - *

- * This can be from cached object since Curator should be shared per cluster. - * - * @param jdbcUrl the ZK endpoint host:port or the JDBC connection String host:port:/hbase - * @param properties the properties defining time out values and retry count - * @return a new Curator framework client - */ - @SuppressWarnings("UnstableApiUsage") - public static CuratorFramework getCurator(String jdbcUrl, Properties properties) - throws IOException { + assert roleRecord != null; + LOG.info("Initial cluster role for HA group {} is {}", info, roleRecord); + } + + /** + * Helper method that will block current thread until the HA group is initialized. + *

+ * After returning, the HA state might not be in READY state. That is possible when a new ZK node + * change is detected triggering HA group to become IN_TRANSIT state. + * @param properties the connection properties + * @throws IOException when current HA group is not initialized before timeout + */ + private void waitForInitialization(Properties properties) throws IOException { + String connectionTimeoutMsProp = + properties.getProperty(PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_KEY); + int timeout = !StringUtils.isEmpty(connectionTimeoutMsProp) + ? Integer.parseInt(connectionTimeoutMsProp) + : PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT; + boolean started = false; + try { + started = roleManagerLatch.await(timeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + LOG.warn("Got interrupted when waiting for cluster role managers to start", e); + Thread.currentThread().interrupt(); + } + if (!started) { + LOG.warn("Timed out {}ms waiting for HA group '{}' to be initialized.", timeout, info); + throw new IOException("Fail to initialize HA group " + info); + } + } + + /** + * Create a JDBC connection in this high availability group. + * @param properties connection properties + * @return a JDBC connection implementation + * @throws SQLException if fails to connect a JDBC connection + */ + public Connection connect(Properties properties) throws SQLException { + if (state != State.READY) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) + .setMessage("HA group is not ready!").setHaGroupInfo(info.toString()).build() + .buildException(); + } + return roleRecord.getPolicy().provide(this, properties); + } + + /** + * Get a Phoenix connection against the current active HBase cluster. + *

+ * If there is no active cluster, it will throw exception instead of blocking or retrying. + * @param properties connection properties + * @return a Phoenix connection to current active HBase cluster + * @throws SQLException if fails to get a connection + */ + PhoenixConnection connectActive(final Properties properties) throws SQLException { + try { + Optional url = roleRecord.getActiveUrl(); + if (state == State.READY && url.isPresent()) { + PhoenixConnection conn = connectToOneCluster(url.get(), properties); + // After connection is created, double check if the cluster is still ACTIVE + // This is to make sure the newly created connection will not be returned to client + // if the target cluster is not active any more. This can happen during failover. + boolean isActive; try { - return CURATOR_CACHE.get(jdbcUrl, () -> { - CuratorFramework curator = createCurator(jdbcUrl, properties); - if (!curator.blockUntilConnected(PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT, - TimeUnit.MILLISECONDS)) - throw new RuntimeException("Failed to connect to the CuratorFramework in " - + "timeout " + PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT + " ms"); - return curator; - }); + isActive = isActive(conn); } catch (Exception e) { - LOG.error("Fail to get an active curator for url {}", jdbcUrl, e); - // invalidate the cache when getting/creating throws exception - CURATOR_CACHE.invalidate(jdbcUrl); - throw new IOException(e); - } + conn.close(); + throw e; + } + + if (state == State.READY && isActive) { + return conn; + } else { + conn.close(); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.HA_CLOSED_AFTER_FAILOVER) + .setMessage("Cluster is not active any more in HA group. Please retry.") + .setHaGroupInfo(info.toString()).build().buildException(); + } + } else { + LOG.error("Not able to connect to active cluster, state: {}, active exist: {}", state, + url.isPresent()); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.HA_NO_ACTIVE_CLUSTER) + .setMessage("Cannot connect to HA group because it has no active cluster") + .setHaGroupInfo(info.toString()).build().buildException(); + } + } catch (SQLException e) { + LOG.error("Failed to connect to active cluster in HA group {}, record: {}", info, roleRecord, + e); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) + .setMessage("Failed to connect to active cluster in HA group") + .setHaGroupInfo(info.toString()).setRootCause(e).build().buildException(); } + } - /** - * Create a curator ZK client for the given properties and ZK endpoint. - *

- * Unless caller needs a new curator, it should use {@link #getCurator(String, Properties)}. - */ - private static CuratorFramework createCurator(String jdbcUrl, Properties properties) { - // Get the ZK endpoint in host:port format by removing JDBC protocol and HBase root node - final String zkUrl; - if (jdbcUrl.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { - jdbcUrl = jdbcUrl.substring(PhoenixRuntime.JDBC_PROTOCOL.length() + 1); - } - Preconditions.checkArgument(!StringUtils.isEmpty(jdbcUrl), "JDBC url is empty!"); - jdbcUrl = jdbcUrl.replaceAll("\\\\:", "="); - String[] parts = jdbcUrl.split(":"); - if (parts.length == 0 || parts.length > 3) { - throw new IllegalArgumentException("Invalid JDBC url!" + jdbcUrl); - } - // The URL is already normalised - zkUrl = parts[0].replaceAll("=", ":"); - - // Get timeout and retry counts - String connectionTimeoutMsProp = properties.getProperty( - PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_KEY); - final int connectionTimeoutMs = !StringUtils.isEmpty(connectionTimeoutMsProp) - ? Integer.parseInt(connectionTimeoutMsProp) - : PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT; - String sessionTimeoutMsProps = properties.getProperty(PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_KEY); - final int sessionTimeoutMs = !StringUtils.isEmpty(sessionTimeoutMsProps) - ? Integer.parseInt(sessionTimeoutMsProps) - : PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_DEFAULT; - final RetryPolicy retryPolicy = createRetryPolicy(properties); - - CuratorFramework curator = CuratorFrameworkFactory - .builder() - .connectString(zkUrl) - .namespace(PHOENIX_HA_ZOOKEEPER_ZNODE_NAMESPACE) - .connectionTimeoutMs(connectionTimeoutMs) - .sessionTimeoutMs(sessionTimeoutMs) - .retryPolicy(retryPolicy) - .canBeReadOnly(true) - .build(); - curator.start(); - return curator; + /** Returns true if the given phoenix connection points to ACTIVE cluster, else false */ + boolean isActive(PhoenixConnection connection) { + if (state != State.READY || connection == null) { + return false; } - - /** - * Create a Curator retry policy from properties. - *

- * If properties is null, return a default retry policy. - * - * @param properties properties defining timeout and max retries - * @return a retry policy which can be used for Curator operations - */ - public static RetryPolicy createRetryPolicy(Properties properties) { - if (properties == null) { - return RETRY_POLICY; - } - String baseSleepTimeMsProp = properties.getProperty(PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_KEY); - int baseSleepTimeMs = StringUtils.isNotEmpty(baseSleepTimeMsProp) - ? Integer.parseInt(baseSleepTimeMsProp) - : PHOENIX_HA_ZK_RETRY_BASE_SLEEP_MS_DEFAULT; - String maxRetriesProp = properties.getProperty(PHOENIX_HA_ZK_RETRY_MAX_KEY); - int maxRetries = StringUtils.isNotEmpty(maxRetriesProp) - ? Integer.parseInt(maxRetriesProp) - : PHOENIX_HA_ZK_RETRY_MAX_DEFAULT; - String maxSleepTimeMsProp = properties.getProperty(PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_KEY); - int maxSleepTimeMs = StringUtils.isNotEmpty(maxSleepTimeMsProp) - ? Integer.parseInt(maxSleepTimeMsProp) - : PHOENIX_HA_ZK_RETRY_MAX_SLEEP_MS_DEFAULT; - return new ExponentialBackoffRetry(baseSleepTimeMs, maxRetries, maxSleepTimeMs); + return roleRecord.getActiveUrl() + .equals(Optional.of(JDBCUtil.formatZookeeperUrl(connection.getURL()))); + } + + /** + * Connect to an HBase cluster in this HA group with given url and client properties. + *

+ * The URL should belong to one of the two ZK clusters in this HA group. It returns the Phoenix + * connection to the given cluster without checking the context of the cluster's role. Please use + * {@link #connectActive(Properties)} to connect to the ACTIVE cluster. + */ + PhoenixConnection connectToOneCluster(String url, Properties properties) throws SQLException { + Preconditions.checkNotNull(url); + if (url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { + Preconditions.checkArgument(url.length() > PhoenixRuntime.JDBC_PROTOCOL.length(), + "The URL '" + url + "' is not a valid Phoenix connection string"); } + url = JDBCUtil.formatZookeeperUrl(url); + Preconditions.checkArgument(url.equals(info.getUrl1()) || url.equals(info.getUrl2()), + "The URL '" + url + "' does not belong to this HA group " + info); - /** - * Initialize this HA group by registering ZK watchers and getting initial cluster role record. - *

- * If this is already initialized, calling this method is a no-op. This method is lock free as - * current thread will either return fast or wait for the in-progress initialization or timeout. - */ - public void init() throws IOException { - if (state != State.UNINITIALIZED) { - return; - } + String jdbcString = info.getJDBCUrl(url); - PairOfSameType newRoleManagers = new PairOfSameType<>( - new HAClusterRoleManager(info.urls.getFirst(), properties), - new HAClusterRoleManager(info.urls.getSecond(), properties)); - if (!roleManagers.compareAndSet(null, newRoleManagers)) { - LOG.info("Someone already started role managers; waiting for that one..."); - waitForInitialization(properties); - return; - } + ClusterRole role = roleRecord.getRole(url); + if (!role.canConnect()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.HA_CLUSTER_CAN_NOT_CONNECT) + .setMessage("Can not connect to cluster '" + url + "' in '" + role + "' role").build() + .buildException(); + } - Future f1 = roleManagerExecutor.submit(newRoleManagers.getFirst()); - Future f2 = roleManagerExecutor.submit(newRoleManagers.getSecond()); - try { - waitForInitialization(properties); - } catch (IOException e) { - // HA group that fails to initialize will not be kept in the global cache. - // Next connection request will create and initialize a new HA group. - // Before returning in case of exception, following code will cancel the futures. - f1.cancel(true); - f2.cancel(true); - throw e; - } + // Get driver instead of using PhoenixDriver.INSTANCE since it can be test or mocked driver + Driver driver = DriverManager.getDriver(jdbcString); + Preconditions.checkArgument(driver instanceof PhoenixEmbeddedDriver, + "No JDBC driver is registered for Phoenix high availability (HA) framework"); + return ((PhoenixEmbeddedDriver) driver).getConnectionQueryServices(jdbcString, properties) + .connect(jdbcString, properties); + } + + @VisibleForTesting + HAGroupInfo getGroupInfo() { + return info; + } + + Properties getProperties() { + return properties; + } + + public ClusterRoleRecord getRoleRecord() { + return roleRecord; + } + + /** + * Package private close method. + *

+ * Once this HA group is closed, it can not be re-opened again. Use a new object if necessary. + * This method is package private because we do not want to expose the lifecycle management + * methods to public. Constructor is also private (or package-private visible for testing). The + * lifecycle management is confined to this class because an HA group is a shared resource. + * Someone calling close on this would make it unusable, since the state would become closed. + */ + void close() { + roleManagerExecutor.shutdownNow(); + try { + // TODO: Parameterize and set in future work item for pluggable + if ( + !roleManagerExecutor.awaitTermination(PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_DEFAULT, + TimeUnit.MILLISECONDS) + ) { + LOG.error("Fail to shut down role managers service for HA group: {}", info); + } + } catch (InterruptedException e) { + LOG.warn("HA group {} close() got interrupted when closing role managers", info, e); + // (Re-)Cancel if current thread also interrupted + roleManagerExecutor.shutdownNow(); + // Preserve interrupt status + Thread.currentThread().interrupt(); + } + state = State.CLOSED; + } + + @Override + public String toString() { + return roleRecord == null + ? "HighAvailabilityGroup{roleRecord=null, info=" + info + ", state=" + state + "}" + : "HighAvailabilityGroup{roleRecord=" + roleRecord + ", state=" + state + "}"; + } + + /** + * Set the new cluster role record for this HA group. + *

+ * Calling this method will make HA group be in transition state where no request can be served. + * The data source may come from either of the two clusters as seen by the ZK watcher. + * @param newRoleRecord the new cluster role record to set + * @return true if the new record is set as current one; false otherwise + */ + private synchronized boolean applyClusterRoleRecord(@NonNull ClusterRoleRecord newRoleRecord) { + if (roleRecord == null) { + roleRecord = newRoleRecord; + state = State.READY; + LOG.info("HA group {} is now in {} state after getting initial V{} role record: {}", info, + state, roleRecord.getVersion(), roleRecord); + LOG.debug("HA group {} is ready", this); + return true; + } - assert roleRecord != null; - LOG.info("Initial cluster role for HA group {} is {}", info, roleRecord); + if (!newRoleRecord.isNewerThan(roleRecord)) { + LOG.warn("Does not apply new cluster role record as it does not have higher version. " + + "Existing record: {}, new record: {}", roleRecord, newRoleRecord); + return false; } - /** - * Helper method that will block current thread until the HA group is initialized. - *

- * After returning, the HA state might not be in READY state. That is possible when a new ZK - * node change is detected triggering HA group to become IN_TRANSIT state. - * - * @param properties the connection properties - * @throws IOException when current HA group is not initialized before timeout - */ - private void waitForInitialization(Properties properties) throws IOException { - String connectionTimeoutMsProp = properties.getProperty( - PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_KEY); - int timeout = !StringUtils.isEmpty(connectionTimeoutMsProp) - ? Integer.parseInt(connectionTimeoutMsProp) - : PHOENIX_HA_ZK_CONNECTION_TIMEOUT_MS_DEFAULT; - boolean started = false; - try { - started = roleManagerLatch.await(timeout, TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { - LOG.warn("Got interrupted when waiting for cluster role managers to start", e); - Thread.currentThread().interrupt(); - } - if (!started) { - LOG.warn("Timed out {}ms waiting for HA group '{}' to be initialized.", timeout, info); - throw new IOException("Fail to initialize HA group " + info); - } + if (!roleRecord.hasSameInfo(newRoleRecord)) { + LOG.error("New record {} has different HA group information from old record {}", + newRoleRecord, roleRecord); + return false; } - /** - * Create a JDBC connection in this high availability group. - * - * @param properties connection properties - * @return a JDBC connection implementation - * @throws SQLException if fails to connect a JDBC connection - */ - public Connection connect(Properties properties) throws SQLException { - if (state != State.READY) { - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) - .setMessage("HA group is not ready!") - .setHaGroupInfo(info.toString()) - .build() - .buildException(); - } - return roleRecord.getPolicy().provide(this, properties); + final ClusterRoleRecord oldRecord = roleRecord; + state = State.IN_TRANSITION; + LOG.info("HA group {} is in {} to set V{} record", info, state, newRoleRecord.getVersion()); + Future future = nodeChangedExecutor.submit(() -> { + try { + roleRecord.getPolicy().transitClusterRole(this, roleRecord, newRoleRecord); + } catch (SQLException e) { + throw new CompletionException(e); + } + }); + + // TODO: save timeout in the HA group info (aka cluster role record) instead in properties + String transitionTimeoutProp = properties.getProperty(PHOENIX_HA_TRANSITION_TIMEOUT_MS_KEY); + long maxTransitionTimeMs = StringUtils.isNotEmpty(transitionTimeoutProp) + ? Long.parseLong(transitionTimeoutProp) + : PHOENIX_HA_TRANSITION_TIMEOUT_MS_DEFAULT; + try { + future.get(maxTransitionTimeMs, TimeUnit.MILLISECONDS); + } catch (InterruptedException ie) { + LOG.error("Got interrupted when transiting cluster roles for HA group {}", info, ie); + future.cancel(true); + Thread.currentThread().interrupt(); + return false; + } catch (ExecutionException | TimeoutException e) { + LOG.error("HA group {} failed to transit cluster roles per policy {} to new record {}", info, + roleRecord.getPolicy(), newRoleRecord, e); + // Calling back HA policy function for cluster switch is conducted with best effort. + // HA group continues transition when its HA policy fails to deal with context switch + // (e.g. to close existing connections) + // The goal here is to gain higher availability even though existing resources against + // previous ACTIVE cluster may have not been closed cleanly. + } + roleRecord = newRoleRecord; + state = State.READY; + LOG.info("HA group {} is in {} state after applying V{} role record. Old: {}, new: {}", info, + state, roleRecord.getVersion(), oldRecord, roleRecord); + LOG.debug("HA group is ready: {}", this); + return true; + } + + /** + * Local state of this HA group object, which transits upon explicit call (e.g. init) or when the + * cluster role change is detected. + *

+ * - UNINITIALIZED is the state when this HA group has not been initialized. Once the HA group is + * initialized, it will never go to this state again. - READY is the state when this HA group can + * serve client request. There is not necessarily an active HBase cluster since a standby cluster + * may be sufficient per HA policy. - IN_TRANSITION is the state where HA group is dealing with + * cluster role changes and all client connection requests are rejected. - CLOSED is the state + * where the HA group is closed. Once the HA group is closed, it will never leave this state. + */ + enum State { + UNINITIALIZED, + READY, + IN_TRANSITION, + CLOSED + } + + /** + * An HAGroupInfo contains information of an HA group. + *

+ * It is constructed based on client input, including the JDBC connection string and properties. + * Objects of this class are used as the keys of HA group cache {@link #GROUPS}. + *

+ * This class is immutable. + */ + @VisibleForTesting + static final class HAGroupInfo { + private final String name; + private final PairOfSameType urls; + private final String additionalJDBCParams; + + HAGroupInfo(String name, String url1, String url2, String additionalJDBCParams) { + Preconditions.checkNotNull(name); + Preconditions.checkNotNull(url1); + Preconditions.checkNotNull(url2); + this.name = name; + url1 = JDBCUtil.formatZookeeperUrl(url1); + url2 = JDBCUtil.formatZookeeperUrl(url2); + Preconditions.checkArgument(!url1.equals(url2), "Two clusters have the same ZK!"); + // Ignore the given order of url1 and url2, and reorder for equals comparison. + if (url1.compareTo(url2) > 0) { + this.urls = new PairOfSameType<>(url2, url1); + } else { + this.urls = new PairOfSameType<>(url1, url2); + } + this.additionalJDBCParams = additionalJDBCParams; } - /** - * Get a Phoenix connection against the current active HBase cluster. - *

- * If there is no active cluster, it will throw exception instead of blocking or retrying. - * - * @param properties connection properties - * @return a Phoenix connection to current active HBase cluster - * @throws SQLException if fails to get a connection - */ - PhoenixConnection connectActive(final Properties properties) throws SQLException { - try { - Optional url = roleRecord.getActiveUrl(); - if (state == State.READY && url.isPresent()) { - PhoenixConnection conn = connectToOneCluster(url.get(), properties); - // After connection is created, double check if the cluster is still ACTIVE - // This is to make sure the newly created connection will not be returned to client - // if the target cluster is not active any more. This can happen during failover. - boolean isActive; - try { - isActive = isActive(conn); - } catch (Exception e) { - conn.close(); - throw e; - } - - if (state == State.READY && isActive) { - return conn; - } else { - conn.close(); - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.HA_CLOSED_AFTER_FAILOVER) - .setMessage("Cluster is not active any more in HA group. Please retry.") - .setHaGroupInfo(info.toString()) - .build() - .buildException(); - } - } else { - LOG.error("Not able to connect to active cluster, state: {}, active exist: {}", - state, url.isPresent()); - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.HA_NO_ACTIVE_CLUSTER) - .setMessage("Cannot connect to HA group because it has no active cluster") - .setHaGroupInfo(info.toString()) - .build() - .buildException(); - } - } catch (SQLException e) { - LOG.error("Failed to connect to active cluster in HA group {}, record: {}", info, - roleRecord, e); - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) - .setMessage("Failed to connect to active cluster in HA group") - .setHaGroupInfo(info.toString()) - .setRootCause(e) - .build() - .buildException(); - } + HAGroupInfo(String name, String url1, String url2) { + this(name, url1, url2, null); } - /** - * @return true if the given phoenix connection points to ACTIVE cluster, else false - */ - boolean isActive(PhoenixConnection connection) { - if (state != State.READY || connection == null) { - return false; - } - return roleRecord.getActiveUrl() - .equals(Optional.of(JDBCUtil.formatZookeeperUrl(connection.getURL()))); + public String getName() { + return name; } - /** - * Connect to an HBase cluster in this HA group with given url and client properties. - *

- * The URL should belong to one of the two ZK clusters in this HA group. It returns the Phoenix - * connection to the given cluster without checking the context of the cluster's role. Please - * use {@link #connectActive(Properties)} to connect to the ACTIVE cluster. - */ - PhoenixConnection connectToOneCluster(String url, Properties properties) throws SQLException { - Preconditions.checkNotNull(url); - if (url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { - Preconditions.checkArgument(url.length() > PhoenixRuntime.JDBC_PROTOCOL.length(), - "The URL '" + url + "' is not a valid Phoenix connection string"); - } - url = JDBCUtil.formatZookeeperUrl(url); - Preconditions.checkArgument(url.equals(info.getUrl1()) || url.equals(info.getUrl2()), - "The URL '" + url + "' does not belong to this HA group " + info); - - String jdbcString = info.getJDBCUrl(url); - - ClusterRole role = roleRecord.getRole(url); - if (!role.canConnect()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.HA_CLUSTER_CAN_NOT_CONNECT) - .setMessage("Can not connect to cluster '" + url + "' in '" + role + "' role") - .build() - .buildException(); - } + public String getUrl1() { + return urls.getFirst(); + } - // Get driver instead of using PhoenixDriver.INSTANCE since it can be test or mocked driver - Driver driver = DriverManager.getDriver(jdbcString); - Preconditions.checkArgument(driver instanceof PhoenixEmbeddedDriver, - "No JDBC driver is registered for Phoenix high availability (HA) framework"); - return ((PhoenixEmbeddedDriver) driver).getConnectionQueryServices(jdbcString, properties) - .connect(jdbcString, properties); + public String getUrl2() { + return urls.getSecond(); } - @VisibleForTesting - HAGroupInfo getGroupInfo() { - return info; + public String getJDBCUrl(String zkUrl) { + Preconditions.checkArgument(zkUrl.equals(getUrl1()) || zkUrl.equals(getUrl2()), + "The URL '" + zkUrl + "' does not belong to this HA group " + this); + StringBuilder sb = new StringBuilder(); + sb.append(PhoenixRuntime.JDBC_PROTOCOL_ZK); + sb.append(PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR); + sb.append(zkUrl); + if (!Strings.isNullOrEmpty(additionalJDBCParams)) { + sb.append(PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR); + sb.append(additionalJDBCParams); + } + return sb.toString(); } - Properties getProperties() { - return properties; + public String getJDBCUrl1() { + return getJDBCUrl(getUrl1()); } - public ClusterRoleRecord getRoleRecord() { - return roleRecord; + public String getJDBCUrl2() { + return getJDBCUrl(getUrl2()); } /** - * Package private close method. - *

- * Once this HA group is closed, it can not be re-opened again. Use a new object if necessary. - * This method is package private because we do not want to expose the lifecycle management - * methods to public. Constructor is also private (or package-private visible for testing). - * The lifecycle management is confined to this class because an HA group is a shared resource. - * Someone calling close on this would make it unusable, since the state would become closed. + * Helper method to return the znode path in the Phoenix HA namespace. */ - void close() { - roleManagerExecutor.shutdownNow(); - try { - // TODO: Parameterize and set in future work item for pluggable - if (!roleManagerExecutor.awaitTermination(PHOENIX_HA_ZK_SESSION_TIMEOUT_MS_DEFAULT, - TimeUnit.MILLISECONDS)) { - LOG.error("Fail to shut down role managers service for HA group: {}", info); - } - } catch (InterruptedException e) { - LOG.warn("HA group {} close() got interrupted when closing role managers", info, e); - // (Re-)Cancel if current thread also interrupted - roleManagerExecutor.shutdownNow(); - // Preserve interrupt status - Thread.currentThread().interrupt(); - } - state = State.CLOSED; + String getZkPath() { + return ZKPaths.PATH_SEPARATOR + name; } @Override public String toString() { - return roleRecord == null - ? "HighAvailabilityGroup{roleRecord=null, info=" + info + ", state=" + state + "}" - : "HighAvailabilityGroup{roleRecord=" + roleRecord + ", state=" + state + "}"; + return String.format("%s[%s|%s]", name, urls.getFirst(), urls.getSecond()); } - /** - * Set the new cluster role record for this HA group. - *

- * Calling this method will make HA group be in transition state where no request can be served. - * The data source may come from either of the two clusters as seen by the ZK watcher. - * - * @param newRoleRecord the new cluster role record to set - * @return true if the new record is set as current one; false otherwise - */ - private synchronized boolean applyClusterRoleRecord(@NonNull ClusterRoleRecord newRoleRecord) { - if (roleRecord == null) { - roleRecord = newRoleRecord; - state = State.READY; - LOG.info("HA group {} is now in {} state after getting initial V{} role record: {}", - info, state, roleRecord.getVersion(), roleRecord); - LOG.debug("HA group {} is ready", this); - return true; - } - - if (!newRoleRecord.isNewerThan(roleRecord)) { - LOG.warn("Does not apply new cluster role record as it does not have higher version. " - + "Existing record: {}, new record: {}", roleRecord, newRoleRecord); - return false; - } - - if (!roleRecord.hasSameInfo(newRoleRecord)) { - LOG.error("New record {} has different HA group information from old record {}", - newRoleRecord, roleRecord); - return false; - } - - final ClusterRoleRecord oldRecord = roleRecord; - state = State.IN_TRANSITION; - LOG.info("HA group {} is in {} to set V{} record", info, state, newRoleRecord.getVersion()); - Future future = nodeChangedExecutor.submit(() -> { - try { - roleRecord.getPolicy().transitClusterRole(this, roleRecord, newRoleRecord); - } catch (SQLException e) { - throw new CompletionException(e); - } - }); - - // TODO: save timeout in the HA group info (aka cluster role record) instead in properties - String transitionTimeoutProp = properties.getProperty(PHOENIX_HA_TRANSITION_TIMEOUT_MS_KEY); - long maxTransitionTimeMs = StringUtils.isNotEmpty(transitionTimeoutProp) - ? Long.parseLong(transitionTimeoutProp) - : PHOENIX_HA_TRANSITION_TIMEOUT_MS_DEFAULT; - try { - future.get(maxTransitionTimeMs, TimeUnit.MILLISECONDS); - } catch (InterruptedException ie) { - LOG.error("Got interrupted when transiting cluster roles for HA group {}", info, ie); - future.cancel(true); - Thread.currentThread().interrupt(); - return false; - } catch (ExecutionException | TimeoutException e) { - LOG.error("HA group {} failed to transit cluster roles per policy {} to new record {}", - info, roleRecord.getPolicy(), newRoleRecord, e); - // Calling back HA policy function for cluster switch is conducted with best effort. - // HA group continues transition when its HA policy fails to deal with context switch - // (e.g. to close existing connections) - // The goal here is to gain higher availability even though existing resources against - // previous ACTIVE cluster may have not been closed cleanly. - } - roleRecord = newRoleRecord; - state = State.READY; - LOG.info("HA group {} is in {} state after applying V{} role record. Old: {}, new: {}", - info, state, roleRecord.getVersion(), oldRecord, roleRecord); - LOG.debug("HA group is ready: {}", this); + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other == this) { return true; + } + if (other.getClass() != getClass()) { + return false; + } + HAGroupInfo otherInfo = (HAGroupInfo) other; + return new EqualsBuilder().append(name, otherInfo.name).append(urls, otherInfo.urls) + .isEquals(); } - /** - * Local state of this HA group object, which transits upon explicit call (e.g. init) or when - * the cluster role change is detected. - *

- * - UNINITIALIZED is the state when this HA group has not been initialized. Once the HA group - * is initialized, it will never go to this state again. - * - READY is the state when this HA group can serve client request. There is not necessarily - * an active HBase cluster since a standby cluster may be sufficient per HA policy. - * - IN_TRANSITION is the state where HA group is dealing with cluster role changes and all - * client connection requests are rejected. - * - CLOSED is the state where the HA group is closed. Once the HA group is closed, it will - * never leave this state. - */ - enum State {UNINITIALIZED, READY, IN_TRANSITION, CLOSED} + @Override + public int hashCode() { + return new HashCodeBuilder(17, 37).append(name).append(urls).hashCode(); + } + } + + /** + * Maintains the client view of cluster roles for the HA group using data retrieved from one ZK. + *

+ * It is a runnable to keep setting up the curator and the node cache. It will also register the + * node watcher so any znode data change will trigger a callback function updating HA group. + */ + private final class HAClusterRoleManager implements Runnable { + private final String jdbcUrl; + private final Properties properties; + private NodeCache cache; /** - * An HAGroupInfo contains information of an HA group. - *

- * It is constructed based on client input, including the JDBC connection string and properties. - * Objects of this class are used as the keys of HA group cache {@link #GROUPS}. - *

- * This class is immutable. + * Constructor which creates and starts the ZK watcher. + * @param jdbcUrl JDBC url without jdbc:phoenix prefix which may be host:port:/hbase format + * @param properties The properties defining ZK client timeouts and retries */ - @VisibleForTesting - static final class HAGroupInfo { - private final String name; - private final PairOfSameType urls; - private final String additionalJDBCParams; - - HAGroupInfo(String name, String url1, String url2, String additionalJDBCParams) { - Preconditions.checkNotNull(name); - Preconditions.checkNotNull(url1); - Preconditions.checkNotNull(url2); - this.name = name; - url1 = JDBCUtil.formatZookeeperUrl(url1); - url2 = JDBCUtil.formatZookeeperUrl(url2); - Preconditions.checkArgument(!url1.equals(url2), "Two clusters have the same ZK!"); - // Ignore the given order of url1 and url2, and reorder for equals comparison. - if (url1.compareTo(url2) > 0) { - this.urls = new PairOfSameType<>(url2, url1); - } else { - this.urls = new PairOfSameType<>(url1, url2); - } - this.additionalJDBCParams = additionalJDBCParams; - } - - HAGroupInfo(String name, String url1, String url2) { - this(name, url1, url2, null); - } - - public String getName() { - return name; - } - - public String getUrl1() { - return urls.getFirst(); - } - - public String getUrl2() { - return urls.getSecond(); - } - - public String getJDBCUrl(String zkUrl) { - Preconditions.checkArgument(zkUrl.equals(getUrl1()) || zkUrl.equals(getUrl2()), - "The URL '" + zkUrl + "' does not belong to this HA group " + this); - StringBuilder sb = new StringBuilder(); - sb.append(PhoenixRuntime.JDBC_PROTOCOL_ZK); - sb.append(PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR); - sb.append(zkUrl); - if (!Strings.isNullOrEmpty(additionalJDBCParams)) { - sb.append(PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR); - sb.append(additionalJDBCParams); - } - return sb.toString(); - } - - public String getJDBCUrl1() { - return getJDBCUrl(getUrl1()); - } - - public String getJDBCUrl2() { - return getJDBCUrl(getUrl2()); - } - - /** - * Helper method to return the znode path in the Phoenix HA namespace. - */ - String getZkPath() { - return ZKPaths.PATH_SEPARATOR + name; - } - - @Override - public String toString() { - return String.format("%s[%s|%s]", name, urls.getFirst(), urls.getSecond()); - } - - @Override - public boolean equals(Object other) { - if (other == null) { - return false; - } - if (other == this) { - return true; - } - if (other.getClass() != getClass()) { - return false; - } - HAGroupInfo otherInfo = (HAGroupInfo) other; - return new EqualsBuilder() - .append(name, otherInfo.name) - .append(urls, otherInfo.urls) - .isEquals(); - } + HAClusterRoleManager(String jdbcUrl, Properties properties) { + this.jdbcUrl = jdbcUrl; + this.properties = properties; + } - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(name) - .append(urls).hashCode(); - } + @Override + public void run() { + final String zpath = info.getZkPath(); + while (!Thread.currentThread().isInterrupted()) { + try { + cache = new NodeCache(getCurator(jdbcUrl, properties), zpath); + cache.getListenable().addListener(this::nodeChanged); + cache.start(); + return; // return after building the initial node cache + } catch (InterruptedException e) { + LOG.warn("HA cluster role manager thread for '{}' is interrupted, exiting", jdbcUrl, e); + break; + } catch (Throwable t) { + LOG.warn("Fail to start node cache on '{}' for '{}'. Retry", jdbcUrl, zpath, t); + try { + // TODO: do better than fixed time sleep + Thread.sleep(1_000); + } catch (InterruptedException e) { + LOG.warn("HA cluster role manager thread for '{}' is interrupted, exiting", jdbcUrl, e); + break; + } + } + } } /** - * Maintains the client view of cluster roles for the HA group using data retrieved from one ZK. - *

- * It is a runnable to keep setting up the curator and the node cache. It will also register - * the node watcher so any znode data change will trigger a callback function updating HA group. + * Call back functions when a cluster role change is notified by this ZK cluster. */ - private final class HAClusterRoleManager implements Runnable { - private final String jdbcUrl; - private final Properties properties; - private NodeCache cache; - - /** - * Constructor which creates and starts the ZK watcher. - * - * @param jdbcUrl JDBC url without jdbc:phoenix prefix which may be host:port:/hbase format - * @param properties The properties defining ZK client timeouts and retries - */ - HAClusterRoleManager(String jdbcUrl, Properties properties) { - this.jdbcUrl = jdbcUrl; - this.properties = properties; - } - - @Override - public void run() { - final String zpath = info.getZkPath(); - while (!Thread.currentThread().isInterrupted()) { - try { - cache = new NodeCache(getCurator(jdbcUrl, properties), zpath); - cache.getListenable().addListener(this::nodeChanged); - cache.start(); - return; // return after building the initial node cache - } catch (InterruptedException e) { - LOG.warn("HA cluster role manager thread for '{}' is interrupted, exiting", - jdbcUrl, e); - break; - } catch (Throwable t) { - LOG.warn("Fail to start node cache on '{}' for '{}'. Retry", jdbcUrl, zpath, t); - try { - // TODO: do better than fixed time sleep - Thread.sleep(1_000); - } catch (InterruptedException e) { - LOG.warn("HA cluster role manager thread for '{}' is interrupted, exiting", - jdbcUrl, e); - break; - } - } - } - } - - /** - * Call back functions when a cluster role change is notified by this ZK cluster. - */ - private void nodeChanged() { - byte[] data = cache.getCurrentData().getData(); - Optional newRecordOptional = ClusterRoleRecord.fromJson(data); - if (!newRecordOptional.isPresent()) { - LOG.error("Fail to deserialize new record; keep current record {}", roleRecord); - return; - } - ClusterRoleRecord newRecord = newRecordOptional.get(); - LOG.info("HA group {} got a record from cluster {}: {}", info.name, jdbcUrl, newRecord); - - if (applyClusterRoleRecord(newRecord)) { - LOG.info("Successfully apply new cluster role record from cluster '{}', " - + "new record: {}", jdbcUrl, newRecord); - roleManagerLatch.countDown(); - } - } + private void nodeChanged() { + byte[] data = cache.getCurrentData().getData(); + Optional newRecordOptional = ClusterRoleRecord.fromJson(data); + if (!newRecordOptional.isPresent()) { + LOG.error("Fail to deserialize new record; keep current record {}", roleRecord); + return; + } + ClusterRoleRecord newRecord = newRecordOptional.get(); + LOG.info("HA group {} got a record from cluster {}: {}", info.name, jdbcUrl, newRecord); + + if (applyClusterRoleRecord(newRecord)) { + LOG.info( + "Successfully apply new cluster role record from cluster '{}', " + "new record: {}", + jdbcUrl, newRecord); + roleManagerLatch.countDown(); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/HighAvailabilityPolicy.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/HighAvailabilityPolicy.java index 93d26618644..d3b5a06fba7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/HighAvailabilityPolicy.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/HighAvailabilityPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; import static org.apache.phoenix.jdbc.ClusterRoleRecord.ClusterRole.ACTIVE; @@ -37,107 +36,104 @@ * An HighAvailabilityGroup provides a JDBC connection from given connection string and properties. */ enum HighAvailabilityPolicy { - FAILOVER { - @Override - public Connection provide(HighAvailabilityGroup haGroup, Properties info) - throws SQLException { - return new FailoverPhoenixConnection(haGroup, info); - } - @Override - void transitClusterRole(HighAvailabilityGroup haGroup, ClusterRoleRecord oldRecord, - ClusterRoleRecord newRecord) throws SQLException { - if (oldRecord.getRole1() == ACTIVE && newRecord.getRole1() == STANDBY) { - transitStandby(haGroup, oldRecord.getZk1()); - } - if (oldRecord.getRole2() == ACTIVE && newRecord.getRole2() == STANDBY) { - transitStandby(haGroup, oldRecord.getZk2()); - } - if (oldRecord.getRole1() != ACTIVE && newRecord.getRole1() == ACTIVE) { - transitActive(haGroup, oldRecord.getZk1()); - } - if (oldRecord.getRole2() != ACTIVE && newRecord.getRole2() == ACTIVE) { - transitActive(haGroup, oldRecord.getZk2()); - } - } - private void transitStandby(HighAvailabilityGroup haGroup, String zkUrl) - throws SQLException { - // Close connections when a previously ACTIVE HBase cluster becomes STANDBY. - LOG.info("Cluster {} becomes STANDBY in HA group {}, now close all its connections", - zkUrl, haGroup.getGroupInfo()); - ConnectionQueryServices cqs = null; - try { - cqs = PhoenixDriver.INSTANCE.getConnectionQueryServices( - haGroup.getGroupInfo().getJDBCUrl(zkUrl), haGroup.getProperties()); - cqs.closeAllConnections(new SQLExceptionInfo - .Builder(SQLExceptionCode.HA_CLOSED_AFTER_FAILOVER) - .setMessage("Phoenix connection got closed due to failover") - .setHaGroupInfo(haGroup.getGroupInfo().toString())); - LOG.info("Closed all connections to cluster {} for HA group {}", zkUrl, - haGroup.getGroupInfo()); - } finally { - if (cqs != null) { - // CQS is closed but it is not invalidated from global cache in PhoenixDriver - // so that any new connection will get error instead of creating a new CQS - LOG.info("Closing CQS after cluster '{}' becomes STANDBY", zkUrl); - cqs.close(); - LOG.info("Successfully closed CQS after cluster '{}' becomes STANDBY", zkUrl); - } - } - } - private void transitActive(HighAvailabilityGroup haGroup, String zkUrl) - throws SQLException { - // Invalidate CQS cache if any that has been closed but has not been cleared - LOG.info("invalidating cqs cache for zkUrl: " + zkUrl); - PhoenixDriver.INSTANCE.invalidateCache(haGroup.getGroupInfo().getJDBCUrl(zkUrl), - haGroup.getProperties()); - } - }, + FAILOVER { + @Override + public Connection provide(HighAvailabilityGroup haGroup, Properties info) throws SQLException { + return new FailoverPhoenixConnection(haGroup, info); + } - PARALLEL { - @Override - public Connection provide(HighAvailabilityGroup haGroup, Properties info) - throws SQLException { - List executorCapacities = PhoenixHAExecutorServiceProvider.hasCapacity(info); - if (executorCapacities.contains(Boolean.TRUE)) { - ParallelPhoenixContext context = - new ParallelPhoenixContext(info, haGroup, - PhoenixHAExecutorServiceProvider.get(info), executorCapacities); - return new ParallelPhoenixConnection(context); - } else { - // TODO: Once we have operation/primary wait timeout use the same - // Give regular connection or a failover connection? - LOG.warn("Falling back to single phoenix connection due to resource constraints"); - GlobalClientMetrics.GLOBAL_HA_PARALLEL_CONNECTION_FALLBACK_COUNTER.increment(); - return haGroup.connectActive(info); - } - } - @Override - void transitClusterRole(HighAvailabilityGroup haGroup, ClusterRoleRecord oldRecord, - ClusterRoleRecord newRecord) { - LOG.info("Cluster role changed for parallel HA policy."); + @Override + void transitClusterRole(HighAvailabilityGroup haGroup, ClusterRoleRecord oldRecord, + ClusterRoleRecord newRecord) throws SQLException { + if (oldRecord.getRole1() == ACTIVE && newRecord.getRole1() == STANDBY) { + transitStandby(haGroup, oldRecord.getZk1()); + } + if (oldRecord.getRole2() == ACTIVE && newRecord.getRole2() == STANDBY) { + transitStandby(haGroup, oldRecord.getZk2()); + } + if (oldRecord.getRole1() != ACTIVE && newRecord.getRole1() == ACTIVE) { + transitActive(haGroup, oldRecord.getZk1()); + } + if (oldRecord.getRole2() != ACTIVE && newRecord.getRole2() == ACTIVE) { + transitActive(haGroup, oldRecord.getZk2()); + } + } + + private void transitStandby(HighAvailabilityGroup haGroup, String zkUrl) throws SQLException { + // Close connections when a previously ACTIVE HBase cluster becomes STANDBY. + LOG.info("Cluster {} becomes STANDBY in HA group {}, now close all its connections", zkUrl, + haGroup.getGroupInfo()); + ConnectionQueryServices cqs = null; + try { + cqs = PhoenixDriver.INSTANCE.getConnectionQueryServices( + haGroup.getGroupInfo().getJDBCUrl(zkUrl), haGroup.getProperties()); + cqs.closeAllConnections( + new SQLExceptionInfo.Builder(SQLExceptionCode.HA_CLOSED_AFTER_FAILOVER) + .setMessage("Phoenix connection got closed due to failover") + .setHaGroupInfo(haGroup.getGroupInfo().toString())); + LOG.info("Closed all connections to cluster {} for HA group {}", zkUrl, + haGroup.getGroupInfo()); + } finally { + if (cqs != null) { + // CQS is closed but it is not invalidated from global cache in PhoenixDriver + // so that any new connection will get error instead of creating a new CQS + LOG.info("Closing CQS after cluster '{}' becomes STANDBY", zkUrl); + cqs.close(); + LOG.info("Successfully closed CQS after cluster '{}' becomes STANDBY", zkUrl); } - }; + } + } + + private void transitActive(HighAvailabilityGroup haGroup, String zkUrl) throws SQLException { + // Invalidate CQS cache if any that has been closed but has not been cleared + LOG.info("invalidating cqs cache for zkUrl: " + zkUrl); + PhoenixDriver.INSTANCE.invalidateCache(haGroup.getGroupInfo().getJDBCUrl(zkUrl), + haGroup.getProperties()); + } + }, + + PARALLEL { + @Override + public Connection provide(HighAvailabilityGroup haGroup, Properties info) throws SQLException { + List executorCapacities = PhoenixHAExecutorServiceProvider.hasCapacity(info); + if (executorCapacities.contains(Boolean.TRUE)) { + ParallelPhoenixContext context = new ParallelPhoenixContext(info, haGroup, + PhoenixHAExecutorServiceProvider.get(info), executorCapacities); + return new ParallelPhoenixConnection(context); + } else { + // TODO: Once we have operation/primary wait timeout use the same + // Give regular connection or a failover connection? + LOG.warn("Falling back to single phoenix connection due to resource constraints"); + GlobalClientMetrics.GLOBAL_HA_PARALLEL_CONNECTION_FALLBACK_COUNTER.increment(); + return haGroup.connectActive(info); + } + } + + @Override + void transitClusterRole(HighAvailabilityGroup haGroup, ClusterRoleRecord oldRecord, + ClusterRoleRecord newRecord) { + LOG.info("Cluster role changed for parallel HA policy."); + } + }; - private static final Logger LOG = LoggerFactory.getLogger(HighAvailabilityGroup.class); + private static final Logger LOG = LoggerFactory.getLogger(HighAvailabilityGroup.class); - /** - * Provides a JDBC connection from given connection string and properties. - * - * @param haGroup The high availability (HA) group - * @param info Connection properties - * @return a JDBC connection - * @throws SQLException if fails to provide a connection - */ - abstract Connection provide(HighAvailabilityGroup haGroup, Properties info) throws SQLException; + /** + * Provides a JDBC connection from given connection string and properties. + * @param haGroup The high availability (HA) group + * @param info Connection properties + * @return a JDBC connection + * @throws SQLException if fails to provide a connection + */ + abstract Connection provide(HighAvailabilityGroup haGroup, Properties info) throws SQLException; - /** - * Call-back function when a cluster role transition is detected in the high availability group. - * - * @param haGroup The high availability (HA) group - * @param oldRecord The older cluster role record cached in this client for the given HA group - * @param newRecord New cluster role record read from one ZooKeeper cluster znode - * @throws SQLException if fails to handle the cluster role transition - */ - abstract void transitClusterRole(HighAvailabilityGroup haGroup, ClusterRoleRecord oldRecord, - ClusterRoleRecord newRecord) throws SQLException; + /** + * Call-back function when a cluster role transition is detected in the high availability group. + * @param haGroup The high availability (HA) group + * @param oldRecord The older cluster role record cached in this client for the given HA group + * @param newRecord New cluster role record read from one ZooKeeper cluster znode + * @throws SQLException if fails to handle the cluster role transition + */ + abstract void transitClusterRole(HighAvailabilityGroup haGroup, ClusterRoleRecord oldRecord, + ClusterRoleRecord newRecord) throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java index af0f803e501..a0082ded3e8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,102 +26,98 @@ public class LoggingPhoenixConnection extends DelegateConnection { - private PhoenixMetricsLog phoenixMetricsLog; - - public LoggingPhoenixConnection(Connection conn, - PhoenixMetricsLog phoenixMetricsLog) { - super(conn); - this.phoenixMetricsLog = phoenixMetricsLog; - } - - public PhoenixMetricsLog getPhoenixMetricsLog() { - return phoenixMetricsLog; - } - - @Override - public Statement createStatement() throws SQLException { - return new LoggingPhoenixStatement(super.createStatement(), phoenixMetricsLog, - this); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) - throws SQLException { - return new LoggingPhoenixStatement( - super.createStatement(resultSetType, resultSetConcurrency), phoenixMetricsLog, - this); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - return new LoggingPhoenixStatement( - super.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability), - phoenixMetricsLog, this); - } - - @Override - public PreparedStatement prepareStatement(String sql) throws SQLException { - return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql), - phoenixMetricsLog, sql, this); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency) throws SQLException { - return new LoggingPhoenixPreparedStatement( - super.prepareStatement(sql, resultSetType, resultSetConcurrency), - phoenixMetricsLog, sql, this); + private PhoenixMetricsLog phoenixMetricsLog; + + public LoggingPhoenixConnection(Connection conn, PhoenixMetricsLog phoenixMetricsLog) { + super(conn); + this.phoenixMetricsLog = phoenixMetricsLog; + } + + public PhoenixMetricsLog getPhoenixMetricsLog() { + return phoenixMetricsLog; + } + + @Override + public Statement createStatement() throws SQLException { + return new LoggingPhoenixStatement(super.createStatement(), phoenixMetricsLog, this); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) + throws SQLException { + return new LoggingPhoenixStatement(super.createStatement(resultSetType, resultSetConcurrency), + phoenixMetricsLog, this); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return new LoggingPhoenixStatement( + super.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability), + phoenixMetricsLog, this); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql), phoenixMetricsLog, sql, + this); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + return new LoggingPhoenixPreparedStatement( + super.prepareStatement(sql, resultSetType, resultSetConcurrency), phoenixMetricsLog, sql, + this); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return new LoggingPhoenixPreparedStatement( + super.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability), + phoenixMetricsLog, sql, this); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql, autoGeneratedKeys), + phoenixMetricsLog, sql, this); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql, columnIndexes), + phoenixMetricsLog, sql, this); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql, columnNames), + phoenixMetricsLog, sql, this); + } + + @Override + public void commit() throws SQLException { + super.commit(); + loggingMetricsHelper(); + } + + @Override + public void close() throws SQLException { + try { + loggingMetricsHelper(); + } finally { + super.close(); } + } - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency, int resultSetHoldability) throws SQLException { - return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql, resultSetType, - resultSetConcurrency, resultSetHoldability), phoenixMetricsLog, sql, this); - } - - @Override - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) - throws SQLException { - return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql, autoGeneratedKeys), - phoenixMetricsLog, sql, this); - } - - @Override - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql, columnIndexes), - phoenixMetricsLog, sql, this); - } - - @Override - public PreparedStatement prepareStatement(String sql, String[] columnNames) - throws SQLException { - return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql, columnNames), - phoenixMetricsLog, sql, this); - } + public void loggingMetricsHelper() throws SQLException { - @Override - public void commit() throws SQLException { - super.commit(); - loggingMetricsHelper(); - } - - @Override - public void close() throws SQLException { - try { - loggingMetricsHelper(); - } finally { - super.close(); - } - } - - public void loggingMetricsHelper() throws SQLException { - - phoenixMetricsLog.logWriteMetricsfoForMutationsSinceLastReset( - PhoenixRuntime.getWriteMetricInfoForMutationsSinceLastReset(conn)); - phoenixMetricsLog.logReadMetricInfoForMutationsSinceLastReset( - PhoenixRuntime.getReadMetricInfoForMutationsSinceLastReset(conn)); - PhoenixRuntime.resetMetrics(conn); - } + phoenixMetricsLog.logWriteMetricsfoForMutationsSinceLastReset( + PhoenixRuntime.getWriteMetricInfoForMutationsSinceLastReset(conn)); + phoenixMetricsLog.logReadMetricInfoForMutationsSinceLastReset( + PhoenixRuntime.getReadMetricInfoForMutationsSinceLastReset(conn)); + PhoenixRuntime.resetMetrics(conn); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixPreparedStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixPreparedStatement.java index 12edde920ca..daa5e5100aa 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixPreparedStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixPreparedStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,62 +17,62 @@ */ package org.apache.phoenix.jdbc; +import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; -import java.sql.Connection; - public class LoggingPhoenixPreparedStatement extends DelegatePreparedStatement { - - private PhoenixMetricsLog phoenixMetricsLog; - private String sql; - private Connection conn; - - public LoggingPhoenixPreparedStatement(PreparedStatement stmt, - PhoenixMetricsLog phoenixMetricsLog, String sql, Connection conn) { - super(stmt); - this.phoenixMetricsLog = phoenixMetricsLog; - this.sql = sql; - this.conn = conn; - } - - @Override - public ResultSet executeQuery(String sql) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - @Override - public ResultSet executeQuery() throws SQLException { - ResultSet rs = new LoggingPhoenixResultSet(super.executeQuery(), phoenixMetricsLog, sql); - this.loggingAutoCommitHelper(); - return rs; - } + private PhoenixMetricsLog phoenixMetricsLog; + private String sql; + private Connection conn; - @Override - public ResultSet getResultSet() throws SQLException { - // Re-use the cached ResultSet value since call to getResultSet() is not idempotent - ResultSet resultSet = super.getResultSet(); - return (resultSet == null) ? null : new LoggingPhoenixResultSet(resultSet, - phoenixMetricsLog, sql); - } + public LoggingPhoenixPreparedStatement(PreparedStatement stmt, + PhoenixMetricsLog phoenixMetricsLog, String sql, Connection conn) { + super(stmt); + this.phoenixMetricsLog = phoenixMetricsLog; + this.sql = sql; + this.conn = conn; + } - @Override - public int executeUpdate() throws SQLException { - int res = super.executeUpdate(); - this.loggingAutoCommitHelper(); - return res; - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - return new LoggingPhoenixResultSet(super.getGeneratedKeys(), phoenixMetricsLog, sql); - } + @Override + public ResultSet executeQuery(String sql) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public ResultSet executeQuery() throws SQLException { + ResultSet rs = new LoggingPhoenixResultSet(super.executeQuery(), phoenixMetricsLog, sql); + this.loggingAutoCommitHelper(); + return rs; + } + + @Override + public ResultSet getResultSet() throws SQLException { + // Re-use the cached ResultSet value since call to getResultSet() is not idempotent + ResultSet resultSet = super.getResultSet(); + return (resultSet == null) + ? null + : new LoggingPhoenixResultSet(resultSet, phoenixMetricsLog, sql); + } + + @Override + public int executeUpdate() throws SQLException { + int res = super.executeUpdate(); + this.loggingAutoCommitHelper(); + return res; + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return new LoggingPhoenixResultSet(super.getGeneratedKeys(), phoenixMetricsLog, sql); + } - private void loggingAutoCommitHelper() throws SQLException { - if(conn.getAutoCommit() && (conn instanceof LoggingPhoenixConnection)) { - ((LoggingPhoenixConnection)conn).loggingMetricsHelper(); - } + private void loggingAutoCommitHelper() throws SQLException { + if (conn.getAutoCommit() && (conn instanceof LoggingPhoenixConnection)) { + ((LoggingPhoenixConnection) conn).loggingMetricsHelper(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java index 4ecde32acca..9ae14618f14 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,29 +23,30 @@ import org.apache.phoenix.util.PhoenixRuntime; public class LoggingPhoenixResultSet extends DelegateResultSet { - - private PhoenixMetricsLog phoenixMetricsLog; - private String sql; - private boolean areMetricsLogged; - public LoggingPhoenixResultSet(ResultSet rs, PhoenixMetricsLog phoenixMetricsLog, String sql) { - super(rs); - this.phoenixMetricsLog = phoenixMetricsLog; - this.sql = sql; - this.areMetricsLogged = false; + private PhoenixMetricsLog phoenixMetricsLog; + private String sql; + private boolean areMetricsLogged; + + public LoggingPhoenixResultSet(ResultSet rs, PhoenixMetricsLog phoenixMetricsLog, String sql) { + super(rs); + this.phoenixMetricsLog = phoenixMetricsLog; + this.sql = sql; + this.areMetricsLogged = false; + } + + @Override + public void close() throws SQLException { + if (!rs.isClosed()) { + super.close(); } - - @Override - public void close() throws SQLException { - if (!rs.isClosed()) { - super.close(); - } - if (!this.areMetricsLogged) { - phoenixMetricsLog.logOverAllReadRequestMetrics(PhoenixRuntime.getOverAllReadRequestMetricInfo(rs), sql); - phoenixMetricsLog.logRequestReadMetrics(PhoenixRuntime.getRequestReadMetricInfo(rs), sql); - PhoenixRuntime.resetMetrics(rs); - this.areMetricsLogged = true; - } + if (!this.areMetricsLogged) { + phoenixMetricsLog + .logOverAllReadRequestMetrics(PhoenixRuntime.getOverAllReadRequestMetricInfo(rs), sql); + phoenixMetricsLog.logRequestReadMetrics(PhoenixRuntime.getRequestReadMetricInfo(rs), sql); + PhoenixRuntime.resetMetrics(rs); + this.areMetricsLogged = true; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixStatement.java index d31f5215608..50f13f6e52e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.phoenix.jdbc; - import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -25,59 +24,62 @@ public class LoggingPhoenixStatement extends DelegateStatement { - private PhoenixMetricsLog phoenixMetricsLog; - private String sql; - private Connection conn; + private PhoenixMetricsLog phoenixMetricsLog; + private String sql; + private Connection conn; - public LoggingPhoenixStatement(Statement stmt, PhoenixMetricsLog phoenixMetricsLog, Connection conn) { - super(stmt); - this.phoenixMetricsLog = phoenixMetricsLog; - this.conn = conn; - } + public LoggingPhoenixStatement(Statement stmt, PhoenixMetricsLog phoenixMetricsLog, + Connection conn) { + super(stmt); + this.phoenixMetricsLog = phoenixMetricsLog; + this.conn = conn; + } - @Override - public boolean execute(String sql) throws SQLException { - boolean result; - this.sql = sql; - result = super.execute(sql); - this.loggingAutoCommitHelper(); - return result; - } + @Override + public boolean execute(String sql) throws SQLException { + boolean result; + this.sql = sql; + result = super.execute(sql); + this.loggingAutoCommitHelper(); + return result; + } - @Override - public ResultSet executeQuery(String sql) throws SQLException { - this.sql = sql; - ResultSet rs = new LoggingPhoenixResultSet(super.executeQuery(sql), phoenixMetricsLog, this.sql); - this.loggingAutoCommitHelper(); - return rs; - } + @Override + public ResultSet executeQuery(String sql) throws SQLException { + this.sql = sql; + ResultSet rs = + new LoggingPhoenixResultSet(super.executeQuery(sql), phoenixMetricsLog, this.sql); + this.loggingAutoCommitHelper(); + return rs; + } - @Override - public int executeUpdate(String sql) throws SQLException { - int result; - this.sql = sql; - result = super.executeUpdate(sql); - this.loggingAutoCommitHelper(); - return result; - } + @Override + public int executeUpdate(String sql) throws SQLException { + int result; + this.sql = sql; + result = super.executeUpdate(sql); + this.loggingAutoCommitHelper(); + return result; + } - @Override - public ResultSet getResultSet() throws SQLException { - // Re-use the cached ResultSet value since call to getResultSet() is not idempotent - ResultSet resultSet = super.getResultSet(); - return (resultSet == null) ? null : new LoggingPhoenixResultSet(resultSet, - phoenixMetricsLog, sql); - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - return new LoggingPhoenixResultSet(super.getGeneratedKeys(), phoenixMetricsLog, this.sql); - } + @Override + public ResultSet getResultSet() throws SQLException { + // Re-use the cached ResultSet value since call to getResultSet() is not idempotent + ResultSet resultSet = super.getResultSet(); + return (resultSet == null) + ? null + : new LoggingPhoenixResultSet(resultSet, phoenixMetricsLog, sql); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return new LoggingPhoenixResultSet(super.getGeneratedKeys(), phoenixMetricsLog, this.sql); + } - private void loggingAutoCommitHelper() throws SQLException { - if(conn.getAutoCommit() && (conn instanceof LoggingPhoenixConnection)) { - ((LoggingPhoenixConnection)conn).loggingMetricsHelper(); - } + private void loggingAutoCommitHelper() throws SQLException { + if (conn.getAutoCommit() && (conn instanceof LoggingPhoenixConnection)) { + ((LoggingPhoenixConnection) conn).loggingMetricsHelper(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/MasterConnectionInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/MasterConnectionInfo.java index aaf409819d5..c12ee5eb7bb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/MasterConnectionInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/MasterConnectionInfo.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,82 +29,79 @@ /** * ConnectionInfo used for org.apache.hadoop.hbase.client.MasterRegistry - * */ public class MasterConnectionInfo extends AbstractRPCConnectionInfo { - private static final String MASTER_REGISTRY_CLASS_NAME = - "org.apache.hadoop.hbase.client.MasterRegistry"; + private static final String MASTER_REGISTRY_CLASS_NAME = + "org.apache.hadoop.hbase.client.MasterRegistry"; + + protected MasterConnectionInfo(boolean isConnectionless, String principal, String keytab, + User user, String haGroup, String bootstrapServers, ConnectionType connectionType) { + super(isConnectionless, principal, keytab, user, haGroup, connectionType); + this.bootstrapServers = bootstrapServers; + } - protected MasterConnectionInfo(boolean isConnectionless, String principal, String keytab, - User user, String haGroup, String bootstrapServers, ConnectionType connectionType) { - super(isConnectionless, principal, keytab, user, haGroup, connectionType); - this.bootstrapServers = bootstrapServers; + @Override + public ReadOnlyProps asProps() { + if (isConnectionless) { + return ReadOnlyProps.EMPTY_PROPS; } - @Override - public ReadOnlyProps asProps() { - if (isConnectionless) { - return ReadOnlyProps.EMPTY_PROPS; - } - - Map connectionProps = getCommonProps(); - connectionProps.put(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - MASTER_REGISTRY_CLASS_NAME); - - if (bootstrapServers != null) { - // This is already normalized to include ports - connectionProps.put(HConstants.MASTER_ADDRS_KEY, bootstrapServers); - } - - return connectionProps.isEmpty() ? ReadOnlyProps.EMPTY_PROPS - : new ReadOnlyProps(connectionProps.entrySet().iterator()); + Map connectionProps = getCommonProps(); + connectionProps.put(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, MASTER_REGISTRY_CLASS_NAME); + + if (bootstrapServers != null) { + // This is already normalized to include ports + connectionProps.put(HConstants.MASTER_ADDRS_KEY, bootstrapServers); } - @Override - public String toUrl() { - return PhoenixRuntime.JDBC_PROTOCOL_MASTER + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR - + toString(); + return connectionProps.isEmpty() + ? ReadOnlyProps.EMPTY_PROPS + : new ReadOnlyProps(connectionProps.entrySet().iterator()); + } + + @Override + public String toUrl() { + return PhoenixRuntime.JDBC_PROTOCOL_MASTER + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + + toString(); + } + + @Override + public ConnectionInfo withPrincipal(String principal) { + return new MasterConnectionInfo(isConnectionless, principal, keytab, user, haGroup, + bootstrapServers, connectionType); + } + + /** + * Builder class for MasterConnectionInfo + * @since 138 + */ + protected static class Builder extends AbstractRPCConnectionInfo.Builder { + + public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) + throws SQLException { + super(url, config, props, info); + if (!HAS_MASTER_REGISTRY) { + throw getMalFormedUrlException( + "HBase version does not support Master registry for: " + url); + } } + @Override + protected void normalize() throws SQLException { + normalizeMaster(); + } @Override - public ConnectionInfo withPrincipal(String principal) { - return new MasterConnectionInfo(isConnectionless, principal, keytab, user, - haGroup, bootstrapServers, connectionType); + protected ConnectionInfo build() { + return new MasterConnectionInfo(isConnectionless, principal, keytab, user, haGroup, hostsList, + connectionType); } - /** - * Builder class for MasterConnectionInfo - * - * @since 138 - */ - protected static class Builder extends AbstractRPCConnectionInfo.Builder { - - public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) - throws SQLException { - super(url, config, props, info); - if (!HAS_MASTER_REGISTRY) { - throw getMalFormedUrlException( - "HBase version does not support Master registry for: " + url); - } - } - - @Override - protected void normalize() throws SQLException { - normalizeMaster(); - } - - @Override - protected ConnectionInfo build() { - return new MasterConnectionInfo(isConnectionless, principal, keytab, user, haGroup, - hostsList, connectionType); - } - - public static boolean isMaster(Configuration config, ReadOnlyProps props, Properties info) { - // Default is handled by the caller - return config != null && MASTER_REGISTRY_CLASS_NAME - .equals(get(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, config, props, info)); - } + public static boolean isMaster(Configuration config, ReadOnlyProps props, Properties info) { + // Default is handled by the caller + return config != null && MASTER_REGISTRY_CLASS_NAME + .equals(get(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, config, props, info)); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixConnection.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixConnection.java index 3184af7adf2..eabf09d675f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixConnection.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixConnection.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,14 +17,7 @@ */ package org.apache.phoenix.jdbc; -import org.apache.hadoop.hbase.util.PairOfSameType; -import org.apache.phoenix.exception.SQLExceptionInfo; -import org.apache.phoenix.jdbc.ParallelPhoenixUtil.FutureResult; -import org.apache.phoenix.monitoring.MetricType; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static org.apache.phoenix.exception.SQLExceptionCode.CLASS_NOT_UNWRAPPABLE; import java.sql.Array; import java.sql.Blob; @@ -53,629 +46,644 @@ import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Stream; -import static org.apache.phoenix.exception.SQLExceptionCode.CLASS_NOT_UNWRAPPABLE; +import org.apache.hadoop.hbase.util.PairOfSameType; +import org.apache.phoenix.exception.SQLExceptionInfo; +import org.apache.phoenix.jdbc.ParallelPhoenixUtil.FutureResult; +import org.apache.phoenix.monitoring.MetricType; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ParallelPhoenixConnection implements PhoenixMonitoredConnection { - private static final Logger LOG = LoggerFactory.getLogger(ParallelPhoenixConnection.class); - - private final ParallelPhoenixContext context; - CompletableFuture futureConnection1; - CompletableFuture futureConnection2; - public ParallelPhoenixConnection(ParallelPhoenixContext context) throws SQLException { - this.context = context; - LOG.trace("First Url: {} Second Url: {}", context.getHaGroup().getGroupInfo().getJDBCUrl1(), - context.getHaGroup().getGroupInfo().getJDBCUrl2()); - futureConnection1 = context.chainOnConn1(() -> getConnection(context.getHaGroup(), - context.getHaGroup().getGroupInfo().getJDBCUrl1(), - context.getProperties())); - futureConnection2 = context.chainOnConn2(() -> getConnection(context.getHaGroup(), - context.getHaGroup().getGroupInfo().getJDBCUrl2(), - context.getProperties())); - - // Ensure one connection is successful before returning - ParallelPhoenixUtil.INSTANCE.runFutures(Arrays.asList(futureConnection1, futureConnection2), context, false); - } - - @VisibleForTesting - ParallelPhoenixConnection(ParallelPhoenixContext context, CompletableFuture futureConnection1, CompletableFuture futureConnection2) throws SQLException { - this.context = context; - this.futureConnection1 = futureConnection1; - this.futureConnection2 = futureConnection2; - // Ensure one connection is successful before returning - ParallelPhoenixUtil.INSTANCE.runFutures(Arrays.asList(futureConnection1, futureConnection2), context, false); - } - - private static PhoenixConnection getConnection(HighAvailabilityGroup haGroup, String url, Properties properties) { - try { - return haGroup.connectToOneCluster(url, properties); - } catch (SQLException exception) { - if (LOG.isTraceEnabled()) { - LOG.trace(String.format("Failed to get a connection for haGroup %s to %s", haGroup.toString(), url), exception); - } - throw new CompletionException(exception); - } - } - - public CompletableFuture getFutureConnection1() { - return futureConnection1; - } - - public CompletableFuture getFutureConnection2() { - return futureConnection2; - } - - @VisibleForTesting - ParallelPhoenixContext getContext() { - return this.context; - } - - Object runOnConnections(Function function, boolean useMetrics) throws SQLException { - return ParallelPhoenixUtil.INSTANCE.runFutures(function, futureConnection1, futureConnection2, context, useMetrics); - } - - PairOfSameType runOnConnectionsGetAll(Function function, boolean useMetrics) throws SQLException { - return ParallelPhoenixUtil.INSTANCE.runOnFuturesGetAll(function, futureConnection1, futureConnection2, context, useMetrics); - } - - @Override - public ParallelPhoenixStatement createStatement() throws SQLException { - context.checkOpen(); - - Function function = (T) -> { - try { - return (PhoenixStatement) T.createStatement(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - List> futures = ParallelPhoenixUtil.INSTANCE.applyFunctionToFutures(function, futureConnection1, - futureConnection2, context, true); - - Preconditions.checkState(futures.size() == 2); - CompletableFuture statement1 = futures.get(0); - CompletableFuture statement2 = futures.get(1); - - //Ensure one statement is successful before returning - ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); - - return new ParallelPhoenixStatement(context, statement1, statement2); - } - - @Override - public PreparedStatement prepareStatement(String sql) throws SQLException { - context.checkOpen(); - - Function function = (T) -> { - try { - return (PhoenixMonitoredPreparedStatement) T.prepareStatement(sql); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - List> futures = ParallelPhoenixUtil.INSTANCE.applyFunctionToFutures(function, futureConnection1, - futureConnection2, context, true); - - Preconditions.checkState(futures.size() == 2); - CompletableFuture statement1 = futures.get(0); - CompletableFuture statement2 = futures.get(1); - - //Ensure one statement is successful before returning - ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); - - return new ParallelPhoenixPreparedStatement(this.context, statement1, statement2); - } - - @Override - public CallableStatement prepareCall(String sql) throws SQLException { + private static final Logger LOG = LoggerFactory.getLogger(ParallelPhoenixConnection.class); + + private final ParallelPhoenixContext context; + CompletableFuture futureConnection1; + CompletableFuture futureConnection2; + + public ParallelPhoenixConnection(ParallelPhoenixContext context) throws SQLException { + this.context = context; + LOG.trace("First Url: {} Second Url: {}", context.getHaGroup().getGroupInfo().getJDBCUrl1(), + context.getHaGroup().getGroupInfo().getJDBCUrl2()); + futureConnection1 = context.chainOnConn1(() -> getConnection(context.getHaGroup(), + context.getHaGroup().getGroupInfo().getJDBCUrl1(), context.getProperties())); + futureConnection2 = context.chainOnConn2(() -> getConnection(context.getHaGroup(), + context.getHaGroup().getGroupInfo().getJDBCUrl2(), context.getProperties())); + + // Ensure one connection is successful before returning + ParallelPhoenixUtil.INSTANCE.runFutures(Arrays.asList(futureConnection1, futureConnection2), + context, false); + } + + @VisibleForTesting + ParallelPhoenixConnection(ParallelPhoenixContext context, + CompletableFuture futureConnection1, + CompletableFuture futureConnection2) throws SQLException { + this.context = context; + this.futureConnection1 = futureConnection1; + this.futureConnection2 = futureConnection2; + // Ensure one connection is successful before returning + ParallelPhoenixUtil.INSTANCE.runFutures(Arrays.asList(futureConnection1, futureConnection2), + context, false); + } + + private static PhoenixConnection getConnection(HighAvailabilityGroup haGroup, String url, + Properties properties) { + try { + return haGroup.connectToOneCluster(url, properties); + } catch (SQLException exception) { + if (LOG.isTraceEnabled()) { + LOG.trace( + String.format("Failed to get a connection for haGroup %s to %s", haGroup.toString(), url), + exception); + } + throw new CompletionException(exception); + } + } + + public CompletableFuture getFutureConnection1() { + return futureConnection1; + } + + public CompletableFuture getFutureConnection2() { + return futureConnection2; + } + + @VisibleForTesting + ParallelPhoenixContext getContext() { + return this.context; + } + + Object runOnConnections(Function function, boolean useMetrics) + throws SQLException { + return ParallelPhoenixUtil.INSTANCE.runFutures(function, futureConnection1, futureConnection2, + context, useMetrics); + } + + PairOfSameType runOnConnectionsGetAll(Function function, + boolean useMetrics) throws SQLException { + return ParallelPhoenixUtil.INSTANCE.runOnFuturesGetAll(function, futureConnection1, + futureConnection2, context, useMetrics); + } + + @Override + public ParallelPhoenixStatement createStatement() throws SQLException { + context.checkOpen(); + + Function function = (T) -> { + try { + return (PhoenixStatement) T.createStatement(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + List> futures = ParallelPhoenixUtil.INSTANCE + .applyFunctionToFutures(function, futureConnection1, futureConnection2, context, true); + + Preconditions.checkState(futures.size() == 2); + CompletableFuture statement1 = futures.get(0); + CompletableFuture statement2 = futures.get(1); + + // Ensure one statement is successful before returning + ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); + + return new ParallelPhoenixStatement(context, statement1, statement2); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + context.checkOpen(); + + Function function = (T) -> { + try { + return (PhoenixMonitoredPreparedStatement) T.prepareStatement(sql); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + List> futures = + ParallelPhoenixUtil.INSTANCE.applyFunctionToFutures(function, futureConnection1, + futureConnection2, context, true); + + Preconditions.checkState(futures.size() == 2); + CompletableFuture statement1 = futures.get(0); + CompletableFuture statement2 = futures.get(1); + + // Ensure one statement is successful before returning + ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); + + return new ParallelPhoenixPreparedStatement(this.context, statement1, statement2); + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + return null; + } + + @Override + public String nativeSQL(String sql) throws SQLException { + return null; + } + + @Override + public boolean getAutoCommit() throws SQLException { + Function function = (T) -> { + try { + return T.getAutoCommit(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (boolean) runOnConnections(function, true); + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + Function function = (T) -> { + try { + T.setAutoCommit(autoCommit); return null; - } - - @Override - public String nativeSQL(String sql) throws SQLException { + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnConnections(function, true); + } + + @Override + public void commit() throws SQLException { + Function function = (T) -> { + try { + T.commit(); return null; - } - - @Override - public boolean getAutoCommit() throws SQLException { - Function function = (T) -> { - try { - return T.getAutoCommit(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (boolean) runOnConnections(function, true); - } - - @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - Function function = (T) -> { - try { - T.setAutoCommit(autoCommit); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnConnections(function, true); - } - - @Override - public void commit() throws SQLException { - Function function = (T) -> { - try { - T.commit(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnConnections(function, true); - } - - @Override - public void rollback() throws SQLException { - - } - - /** - * Close the underlying connections. Returns after any one of the underlying connections have - * closed successfully - * - * @throws SQLException if trying to close both the underlying connections encounters errors - */ - @Override - public void close() throws SQLException { - context.close(); - SQLException closeExp = null; - - // We can have errors on the chain, we still need to close the underlying connections - // irrespective. Do a close when we're at the end of the chain and wait for any 1 to be - // successful. We need to track which future completed hence use FutureResult - // Enqueue a close operation at the end of the chain in the common ForkJoin Pool - java.util.List> futures = new ArrayList<>(); - CompletableFuture>> closeFuture1=null,closeFuture2=null; - - // For connection close, we use the separate close executor pool to perform - // close on both the connections. We ensure that a success is returned when - // any one of the connection is closed successfully. - - - try { - Supplier closeSupplier1 = getCloseSupplier(futureConnection1); - closeFuture1 = - futureConnection1.handle((obj, e) -> { - return CompletableFuture.supplyAsync(closeSupplier1, context.getCloseConnection1ExecutorService()); - }).thenApply(t -> new FutureResult<>(t, 0)); - futures.add(closeFuture1); - } catch (Exception e) { - //Swallow close exceptions - LOG.error("Unknow error happened preparing to close connection 1.",e); - } - try { - Supplier closeSupplier2 = getCloseSupplier(futureConnection2); - closeFuture2 = - futureConnection2.handle((obj, e) -> { - return CompletableFuture.supplyAsync(closeSupplier2, context.getCloseConnection2ExecutorService()); - }).thenApply(t -> new FutureResult<>(t, 1)); - futures.add(closeFuture2); - } catch (Exception e) { - //Swallow close exceptions - LOG.error("Unknow error happened preparing to close connection 2.",e); - } - - - - FutureResult> result = - (FutureResult>) ParallelPhoenixUtil.INSTANCE - .getAnyOfNonExceptionally(futures, context); - - try { - ParallelPhoenixUtil.INSTANCE.getFutureNoRetry(result.getResult(), context); - return; - } catch (Exception e) { - closeExp = new SQLException(e); - } - // The previous close encountered an exception try the other one - CompletableFuture>> otherFuture = - (result.getIndex() == 0) ? closeFuture2 : closeFuture1; - if(otherFuture != null) { - try { - FutureResult> otherResult = - ParallelPhoenixUtil.INSTANCE.getFutureNoRetry(otherFuture, context); - ParallelPhoenixUtil.INSTANCE.getFutureNoRetry(otherResult.getResult(), context); - } catch (Exception e) { - closeExp.addSuppressed(e); - LOG.error("Failed closing both underlying connections within time limits", closeExp); - throw closeExp; - } - } - } - - private Supplier getCloseSupplier(CompletableFuture conn) { - return () -> { - try { - getConnectionAndTryClose(conn); - } catch (Exception exp) { - throw new CompletionException(exp); - } - return true; - }; - } - - private void getConnectionAndTryClose(CompletableFuture futureConn) - throws SQLException { - try { - futureConn.get().close(); - } catch (InterruptedException | ExecutionException e) { - throw new SQLException(e); - } - } - - @Override - public boolean isClosed() throws SQLException { - return context.isClosed(); - } - - @Override - public DatabaseMetaData getMetaData() throws SQLException { - Function function = (T) -> { - try { - return T.getMetaData(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - return (DatabaseMetaData) runOnConnections(function, true); - } - - @Override - public boolean isReadOnly() throws SQLException { - return false; - } - - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public String getCatalog() throws SQLException { - Function function = (T) -> { - try { - return T.getCatalog(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - return (String) runOnConnections(function, true); - } - - @Override - public void setCatalog(String catalog) throws SQLException { - Function function = (T) -> { - try { - T.getCatalog(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - runOnConnections(function, true); - } - - @Override - public int getTransactionIsolation() throws SQLException { - Function function = (T) -> { - try { - return T.getTransactionIsolation(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - return (int) runOnConnections(function, true); - } - - @Override - public void setTransactionIsolation(int level) throws SQLException { - Function function = (T) -> { - try { - T.setTransactionIsolation(level); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - runOnConnections(function, true); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - - Function function = (T) -> { - try { - return T.getWarnings(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - try { - PairOfSameType pair = runOnConnectionsGetAll(function, true); - SQLWarning warning1 = (SQLWarning) pair.getFirst(); - SQLWarning warning2 = (SQLWarning) pair.getSecond(); - if (warning1 != null && warning2 != null) { - SQLWarning warning = new SQLWarning("Warnings on multiple connections."); - warning.setNextWarning(warning1); - warning.setNextWarning(warning2); - return warning; - } else { - return Stream.of(warning1, warning2).filter(Objects::nonNull).findFirst().orElse(null); - } - } catch (Exception e) { - throw new SQLException(e); - } - - } - - @Override - public void clearWarnings() throws SQLException { - Function function = (T) -> { - try { - T.clearWarnings(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - runOnConnectionsGetAll(function, true); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) - throws SQLException { - return null; - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency) throws SQLException { - return null; - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) - throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Map> getTypeMap() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setTypeMap(Map> map) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public int getHoldability() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setHoldability(int holdability) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Savepoint setSavepoint() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Savepoint setSavepoint(String name) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void rollback(Savepoint savepoint) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnConnections(function, true); + } + + @Override + public void rollback() throws SQLException { + + } + + /** + * Close the underlying connections. Returns after any one of the underlying connections have + * closed successfully + * @throws SQLException if trying to close both the underlying connections encounters errors + */ + @Override + public void close() throws SQLException { + context.close(); + SQLException closeExp = null; + + // We can have errors on the chain, we still need to close the underlying connections + // irrespective. Do a close when we're at the end of the chain and wait for any 1 to be + // successful. We need to track which future completed hence use FutureResult + // Enqueue a close operation at the end of the chain in the common ForkJoin Pool + java.util.List> futures = + new ArrayList<>(); + CompletableFuture>> closeFuture1 = null, + closeFuture2 = null; + + // For connection close, we use the separate close executor pool to perform + // close on both the connections. We ensure that a success is returned when + // any one of the connection is closed successfully. + + try { + Supplier closeSupplier1 = getCloseSupplier(futureConnection1); + closeFuture1 = futureConnection1.handle((obj, e) -> { + return CompletableFuture.supplyAsync(closeSupplier1, + context.getCloseConnection1ExecutorService()); + }).thenApply(t -> new FutureResult<>(t, 0)); + futures.add(closeFuture1); + } catch (Exception e) { + // Swallow close exceptions + LOG.error("Unknow error happened preparing to close connection 1.", e); + } + try { + Supplier closeSupplier2 = getCloseSupplier(futureConnection2); + closeFuture2 = futureConnection2.handle((obj, e) -> { + return CompletableFuture.supplyAsync(closeSupplier2, + context.getCloseConnection2ExecutorService()); + }).thenApply(t -> new FutureResult<>(t, 1)); + futures.add(closeFuture2); + } catch (Exception e) { + // Swallow close exceptions + LOG.error("Unknow error happened preparing to close connection 2.", e); + } + + FutureResult> result = + (FutureResult>) ParallelPhoenixUtil.INSTANCE + .getAnyOfNonExceptionally(futures, context); + + try { + ParallelPhoenixUtil.INSTANCE.getFutureNoRetry(result.getResult(), context); + return; + } catch (Exception e) { + closeExp = new SQLException(e); + } + // The previous close encountered an exception try the other one + CompletableFuture>> otherFuture = + (result.getIndex() == 0) ? closeFuture2 : closeFuture1; + if (otherFuture != null) { + try { + FutureResult> otherResult = + ParallelPhoenixUtil.INSTANCE.getFutureNoRetry(otherFuture, context); + ParallelPhoenixUtil.INSTANCE.getFutureNoRetry(otherResult.getResult(), context); + } catch (Exception e) { + closeExp.addSuppressed(e); + LOG.error("Failed closing both underlying connections within time limits", closeExp); + throw closeExp; + } + } + } + + private Supplier getCloseSupplier(CompletableFuture conn) { + return () -> { + try { + getConnectionAndTryClose(conn); + } catch (Exception exp) { + throw new CompletionException(exp); + } + return true; + }; + } + + private void getConnectionAndTryClose(CompletableFuture futureConn) + throws SQLException { + try { + futureConn.get().close(); + } catch (InterruptedException | ExecutionException e) { + throw new SQLException(e); + } + } + + @Override + public boolean isClosed() throws SQLException { + return context.isClosed(); + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + Function function = (T) -> { + try { + return T.getMetaData(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + return (DatabaseMetaData) runOnConnections(function, true); + } + + @Override + public boolean isReadOnly() throws SQLException { + return false; + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public String getCatalog() throws SQLException { + Function function = (T) -> { + try { + return T.getCatalog(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + return (String) runOnConnections(function, true); + } + + @Override + public void setCatalog(String catalog) throws SQLException { + Function function = (T) -> { + try { + T.getCatalog(); return null; - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency, int resultSetHoldability) throws SQLException { + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + runOnConnections(function, true); + } + + @Override + public int getTransactionIsolation() throws SQLException { + Function function = (T) -> { + try { + return T.getTransactionIsolation(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + return (int) runOnConnections(function, true); + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + Function function = (T) -> { + try { + T.setTransactionIsolation(level); return null; - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) - throws SQLException { - return null; - } - - @Override - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - return null; - } - - @Override - public PreparedStatement prepareStatement(String sql, String[] columnNames) - throws SQLException { + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + runOnConnections(function, true); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + + Function function = (T) -> { + try { + return T.getWarnings(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + try { + PairOfSameType pair = runOnConnectionsGetAll(function, true); + SQLWarning warning1 = (SQLWarning) pair.getFirst(); + SQLWarning warning2 = (SQLWarning) pair.getSecond(); + if (warning1 != null && warning2 != null) { + SQLWarning warning = new SQLWarning("Warnings on multiple connections."); + warning.setNextWarning(warning1); + warning.setNextWarning(warning2); + return warning; + } else { + return Stream.of(warning1, warning2).filter(Objects::nonNull).findFirst().orElse(null); + } + } catch (Exception e) { + throw new SQLException(e); + } + + } + + @Override + public void clearWarnings() throws SQLException { + Function function = (T) -> { + try { + T.clearWarnings(); return null; - } - - @Override - public Clob createClob() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Blob createBlob() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public NClob createNClob() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public SQLXML createSQLXML() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean isValid(int timeout) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setClientInfo(String name, String value) throws SQLClientInfoException { - throw new SQLClientInfoException(); - } - - @Override - public String getClientInfo(String name) throws SQLException { - return context.getProperties() != null ? context.getProperties().getProperty(name) : null; - } - - @Override - public Properties getClientInfo() throws SQLException { - return context.getProperties(); - } - - @Override - public void setClientInfo(Properties properties) throws SQLClientInfoException { - throw new SQLClientInfoException(); - } - - @Override - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public String getSchema() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setSchema(String schema) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void abort(Executor executor) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public int getNetworkTimeout() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @SuppressWarnings("unchecked") - @Override - public T unwrap(Class iface) throws SQLException { - if (iface.isInstance(this)) { - return (T) this; - } - throw new SQLExceptionInfo.Builder(CLASS_NOT_UNWRAPPABLE).build().buildException(); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } - - @Override - public Map> getMutationMetrics() { - Map> metrics = new HashMap<>(); - try { - //This can return an unmodifiable map so we create our own - Map> winningMetrics = (Map>) runOnConnections(PhoenixConnection::getMutationMetrics, false); - metrics.putAll(winningMetrics); - context.decorateMetrics(metrics); - return metrics; - } catch (SQLException e) { - LOG.error("Unexpected error while getting mutation metrics.", e); - return Collections.emptyMap(); - } - } - - @Override - public Map> getReadMetrics() { - Map> metrics = new HashMap<>(); - try { - //This can return an unmodifiable map so we create our own - Map> winningMetrics = (Map>) runOnConnections(PhoenixConnection::getReadMetrics, false); - metrics.putAll(winningMetrics); - context.decorateMetrics(metrics); - return metrics; - } catch (SQLException e) { - LOG.error("Unexpected error while getting read metrics.", e); - return Collections.emptyMap(); - } - } - - @Override - public boolean isRequestLevelMetricsEnabled() { - //For initial offering assume this is true, may want to OR the 2 connections - return true; - } - - @Override - public void clearMetrics() { - Function function = (T) -> { - T.clearMetrics(); - return null; - }; - try { - runOnConnections(function, false); - } catch (SQLException exception) { - LOG.error("Unexpected exception while clearning metrics.", exception); - } - context.resetMetrics(); - } + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + runOnConnectionsGetAll(function, true); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) + throws SQLException { + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + return null; + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Map> getTypeMap() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getHoldability() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Savepoint setSavepoint() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return null; + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + return null; + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + return null; + } + + @Override + public Clob createClob() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Blob createBlob() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public NClob createNClob() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + throw new SQLClientInfoException(); + } + + @Override + public String getClientInfo(String name) throws SQLException { + return context.getProperties() != null ? context.getProperties().getProperty(name) : null; + } + + @Override + public Properties getClientInfo() throws SQLException { + return context.getProperties(); + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + throw new SQLClientInfoException(); + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public String getSchema() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setSchema(String schema) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void abort(Executor executor) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getNetworkTimeout() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isInstance(this)) { + return (T) this; + } + throw new SQLExceptionInfo.Builder(CLASS_NOT_UNWRAPPABLE).build().buildException(); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } + + @Override + public Map> getMutationMetrics() { + Map> metrics = new HashMap<>(); + try { + // This can return an unmodifiable map so we create our own + Map> winningMetrics = (Map>) runOnConnections(PhoenixConnection::getMutationMetrics, false); + metrics.putAll(winningMetrics); + context.decorateMetrics(metrics); + return metrics; + } catch (SQLException e) { + LOG.error("Unexpected error while getting mutation metrics.", e); + return Collections.emptyMap(); + } + } + + @Override + public Map> getReadMetrics() { + Map> metrics = new HashMap<>(); + try { + // This can return an unmodifiable map so we create our own + Map> winningMetrics = (Map>) runOnConnections(PhoenixConnection::getReadMetrics, false); + metrics.putAll(winningMetrics); + context.decorateMetrics(metrics); + return metrics; + } catch (SQLException e) { + LOG.error("Unexpected error while getting read metrics.", e); + return Collections.emptyMap(); + } + } + + @Override + public boolean isRequestLevelMetricsEnabled() { + // For initial offering assume this is true, may want to OR the 2 connections + return true; + } + + @Override + public void clearMetrics() { + Function function = (T) -> { + T.clearMetrics(); + return null; + }; + try { + runOnConnections(function, false); + } catch (SQLException exception) { + LOG.error("Unexpected exception while clearning metrics.", exception); + } + context.resetMetrics(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixContext.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixContext.java index 567abad2dd3..ef8229228bb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixContext.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixContext.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,14 +17,10 @@ */ package org.apache.phoenix.jdbc; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.exception.SQLExceptionInfo; -import org.apache.phoenix.monitoring.MetricType; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static org.apache.phoenix.jdbc.ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HA_PARALLEL_CONNECTION_CREATED_COUNTER; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HA_PARALLEL_CONNECTION_ERROR_COUNTER; +import static org.apache.phoenix.query.QueryServices.AUTO_COMMIT_ATTRIB; import java.sql.SQLException; import java.util.List; @@ -35,231 +31,246 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.apache.phoenix.jdbc.ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB; -import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HA_PARALLEL_CONNECTION_CREATED_COUNTER; -import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HA_PARALLEL_CONNECTION_ERROR_COUNTER; -import static org.apache.phoenix.query.QueryServices.AUTO_COMMIT_ATTRIB; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.exception.SQLExceptionInfo; +import org.apache.phoenix.monitoring.MetricType; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * ParallelPhoenixContext holds the state of the execution of a parallel phoenix operation as well as metrics. + * ParallelPhoenixContext holds the state of the execution of a parallel phoenix operation as well + * as metrics. */ public class ParallelPhoenixContext { - private static final Logger LOG = LoggerFactory.getLogger(ParallelPhoenixContext.class); - - public static String PARALLEL_PHOENIX_METRICS = "parallel_phoenix_metrics"; - - private final ParallelPhoenixClusterContext cluster1Context; - private final ParallelPhoenixClusterContext cluster2Context; + private static final Logger LOG = LoggerFactory.getLogger(ParallelPhoenixContext.class); - //May need multiple properties in the future... - //Depends on if we have phoenix.querytimeout and phoenix.second.querytimeout - private final Properties properties; + public static String PARALLEL_PHOENIX_METRICS = "parallel_phoenix_metrics"; - private final HighAvailabilityGroup haGroup; - private final long operationTimeoutMs; + private final ParallelPhoenixClusterContext cluster1Context; + private final ParallelPhoenixClusterContext cluster2Context; - private volatile boolean isClosed = false; - private volatile boolean isErrored = false; + // May need multiple properties in the future... + // Depends on if we have phoenix.querytimeout and phoenix.second.querytimeout + private final Properties properties; - private ParallelPhoenixMetrics parallelPhoenixMetrics; - - /** - * @param properties - * @param haGroup - * @param executors Executors to use for operations on connections. We use first executor in the - * list for connection1 and second for connection2 - * @param executorCapacities Ordered list of executorCapacities corresponding to executors. Null is interpreted as - * executors having capacity - */ - ParallelPhoenixContext(Properties properties, HighAvailabilityGroup haGroup, List executors, List executorCapacities) { - Preconditions.checkNotNull(executors); - Preconditions.checkArgument(executors.size() >= 2, "Expected 2 executor pairs, one for each connection with a normal/close executor"); - GLOBAL_HA_PARALLEL_CONNECTION_CREATED_COUNTER.increment(); - this.properties = properties; - this.haGroup = haGroup; - - this.parallelPhoenixMetrics = new ParallelPhoenixMetrics(); - this.operationTimeoutMs = getOperationTimeoutMs(properties); - - cluster1Context = new ParallelPhoenixClusterContext(1, executors.get(0).getExecutorService(), executors.get(0).getCloseExecutorService()); - cluster2Context = new ParallelPhoenixClusterContext(2, executors.get(1).getExecutorService(), executors.get(1).getCloseExecutorService()); - - /** - * Initializes ClusterContext's chainOnConnections according to capacity available in the threadpools. - * If there is no capacity available we initialize the chain for that connection exceptionally - * so any further operations on the chain also complete exceptionally - */ - if (executorCapacities == null) { - return; - } - Preconditions.checkArgument(executorCapacities.size() >= 2, - "Expected 2 executorCapacities values for each threadpool"); - if (!executorCapacities.get(0)) { - disableChainOnConn(cluster1Context, this.haGroup.getGroupInfo().getUrl1()); - } - if (!executorCapacities.get(1)) { - disableChainOnConn(cluster2Context, this.haGroup.getGroupInfo().getUrl2()); - } - } + private final HighAvailabilityGroup haGroup; + private final long operationTimeoutMs; - public ParallelPhoenixMetrics getParallelPhoenixMetrics() { - return parallelPhoenixMetrics; - } + private volatile boolean isClosed = false; + private volatile boolean isErrored = false; - private void disableChainOnConn(ParallelPhoenixClusterContext context, String url) { - CompletableFuture chainOnConn = new CompletableFuture<>(); - chainOnConn.completeExceptionally( - new SQLException("No capacity available for connection " + context.clusterIndex + " for cluster " + url)); - LOG.debug("No capacity available for connection " + context.clusterIndex + " for cluster {}", url); - context.setChainOnConn(chainOnConn); - } + private ParallelPhoenixMetrics parallelPhoenixMetrics; - public Properties getProperties() { - //FIXME should return immutable - return properties; - } + /** + * @param executors Executors to use for operations on connections. We use first executor + * in the list for connection1 and second for connection2 + * @param executorCapacities Ordered list of executorCapacities corresponding to executors. Null + * is interpreted as executors having capacity + */ + ParallelPhoenixContext(Properties properties, HighAvailabilityGroup haGroup, + List executors, + List executorCapacities) { + Preconditions.checkNotNull(executors); + Preconditions.checkArgument(executors.size() >= 2, + "Expected 2 executor pairs, one for each connection with a normal/close executor"); + GLOBAL_HA_PARALLEL_CONNECTION_CREATED_COUNTER.increment(); + this.properties = properties; + this.haGroup = haGroup; - public HighAvailabilityGroup getHaGroup() { - return haGroup; - } + this.parallelPhoenixMetrics = new ParallelPhoenixMetrics(); + this.operationTimeoutMs = getOperationTimeoutMs(properties); - public boolean isAutoCommit() { - return Boolean.valueOf((String) properties.getOrDefault(AUTO_COMMIT_ATTRIB, "false")); - } + cluster1Context = new ParallelPhoenixClusterContext(1, executors.get(0).getExecutorService(), + executors.get(0).getCloseExecutorService()); + cluster2Context = new ParallelPhoenixClusterContext(2, executors.get(1).getExecutorService(), + executors.get(1).getCloseExecutorService()); /** - * Chains an operation on the connection from the last chained operation. This is to ensure that - * we operate on the underlying phoenix connection (and related objects) using a single thread - * at any given time. Operations are supposed to be expressed in the form of Supplier. All async - * operations on the underlying connection should be chained using this method - * - * @param Supplier - * @return CompletableFuture + * Initializes ClusterContext's chainOnConnections according to capacity available in the + * threadpools. If there is no capacity available we initialize the chain for that connection + * exceptionally so any further operations on the chain also complete exceptionally */ - public CompletableFuture chainOnConn1(Supplier s) { - return chainOnConnClusterContext(s, cluster1Context); + if (executorCapacities == null) { + return; } - - public void setConnection1Tail(CompletableFuture future) { - cluster1Context.setChainOnConn(future); + Preconditions.checkArgument(executorCapacities.size() >= 2, + "Expected 2 executorCapacities values for each threadpool"); + if (!executorCapacities.get(0)) { + disableChainOnConn(cluster1Context, this.haGroup.getGroupInfo().getUrl1()); } - - public CompletableFuture chainOnConn2(Supplier s) { - return chainOnConnClusterContext(s, cluster2Context); + if (!executorCapacities.get(1)) { + disableChainOnConn(cluster2Context, this.haGroup.getGroupInfo().getUrl2()); } - - public void setConnection2Tail(CompletableFuture future) { - cluster2Context.setChainOnConn(future); + } + + public ParallelPhoenixMetrics getParallelPhoenixMetrics() { + return parallelPhoenixMetrics; + } + + private void disableChainOnConn(ParallelPhoenixClusterContext context, String url) { + CompletableFuture chainOnConn = new CompletableFuture<>(); + chainOnConn.completeExceptionally(new SQLException( + "No capacity available for connection " + context.clusterIndex + " for cluster " + url)); + LOG.debug("No capacity available for connection " + context.clusterIndex + " for cluster {}", + url); + context.setChainOnConn(chainOnConn); + } + + public Properties getProperties() { + // FIXME should return immutable + return properties; + } + + public HighAvailabilityGroup getHaGroup() { + return haGroup; + } + + public boolean isAutoCommit() { + return Boolean.valueOf((String) properties.getOrDefault(AUTO_COMMIT_ATTRIB, "false")); + } + + /** + * Chains an operation on the connection from the last chained operation. This is to ensure that + * we operate on the underlying phoenix connection (and related objects) using a single thread at + * any given time. Operations are supposed to be expressed in the form of Supplier. All async + * operations on the underlying connection should be chained using this method + * @param Supplier + * @return CompletableFuture + */ + public CompletableFuture chainOnConn1(Supplier s) { + return chainOnConnClusterContext(s, cluster1Context); + } + + public void setConnection1Tail(CompletableFuture future) { + cluster1Context.setChainOnConn(future); + } + + public CompletableFuture chainOnConn2(Supplier s) { + return chainOnConnClusterContext(s, cluster2Context); + } + + public void setConnection2Tail(CompletableFuture future) { + cluster2Context.setChainOnConn(future); + } + + private CompletableFuture chainOnConnClusterContext(Supplier s, + ParallelPhoenixClusterContext context) { + CompletableFuture chainedFuture = + context.getChainOnConn().thenApplyAsync((f) -> s.get(), context.getExecutorForCluster()); + context.setChainOnConn(chainedFuture); + return chainedFuture; + } + + public void close() { + isClosed = true; + if (isErrored) { + GLOBAL_HA_PARALLEL_CONNECTION_ERROR_COUNTER.increment(); } + } - private CompletableFuture chainOnConnClusterContext(Supplier s, ParallelPhoenixClusterContext context) { - CompletableFuture chainedFuture = - context.getChainOnConn().thenApplyAsync((f) -> s.get(), context.getExecutorForCluster()); - context.setChainOnConn(chainedFuture); - return chainedFuture; - } + public boolean isClosed() { + return isClosed; + } - public void close() { - isClosed = true; - if (isErrored) { - GLOBAL_HA_PARALLEL_CONNECTION_ERROR_COUNTER.increment(); - } - } + public void setError() { + isErrored = true; + } - public boolean isClosed() { - return isClosed; + public void checkOpen() throws SQLException { + if (isClosed) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CONNECTION_CLOSED).build() + .buildException(); } - - public void setError() { - isErrored = true; + } + + public Map getContextMetrics() { + return this.parallelPhoenixMetrics.getAllMetrics().entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, t -> t.getValue().getValue())); + } + + public void resetMetrics() { + // We don't use ParallelPhoenixMetrics::reset() here as that will race with any remaining + // operations + // Instead we generate new metrics, any updates won't be reflected in future reads + parallelPhoenixMetrics = new ParallelPhoenixMetrics(); + } + + /** + * Decorates metrics from PhoenixConnections table metrics with a virtual table of + * PARALLEL_PHOENIX_METRICS name as well as all the context's metrics. + * @param initialMetrics Table Specific Metrics class to populate + */ + public void decorateMetrics(Map> initialMetrics) { + // decorate + initialMetrics.put(PARALLEL_PHOENIX_METRICS, getContextMetrics()); + } + + public long getOperationTimeout() { + return this.operationTimeoutMs; + } + + CompletableFuture getChainOnConn1() { + return this.cluster1Context.getChainOnConn(); + } + + CompletableFuture getChainOnConn2() { + return this.cluster2Context.getChainOnConn(); + } + + ExecutorService getCloseConnection1ExecutorService() { + return this.cluster1Context.getConnectionCloseExecutor(); + } + + ExecutorService getCloseConnection2ExecutorService() { + return this.cluster2Context.getConnectionCloseExecutor(); + } + + private long getOperationTimeoutMs(Properties properties) { + long operationTimeoutMs; + if (properties.getProperty(PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB) != null) { + operationTimeoutMs = + Long.parseLong(properties.getProperty(PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB)); + } else { + operationTimeoutMs = + Long.parseLong(properties.getProperty(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, + Long.toString(QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS))); } - - public void checkOpen() throws SQLException { - if (isClosed) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CONNECTION_CLOSED) - .build() - .buildException(); - } + Preconditions.checkArgument(operationTimeoutMs >= 0); + return operationTimeoutMs; + } + + private static class ParallelPhoenixClusterContext { + private final int clusterIndex; + private final ExecutorService executorForCluster; + private final ExecutorService connectionCloseExecutor; + + private CompletableFuture chainOnConn = CompletableFuture.completedFuture(new Object()); + + public ParallelPhoenixClusterContext(int clusterIndex, ExecutorService executorForCluster, + ExecutorService connectionCloseExecutor) { + this.clusterIndex = clusterIndex; + this.executorForCluster = executorForCluster; + this.connectionCloseExecutor = connectionCloseExecutor; } - public Map getContextMetrics() { - return this.parallelPhoenixMetrics.getAllMetrics().entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, t -> t.getValue().getValue())); + public ExecutorService getExecutorForCluster() { + return executorForCluster; } - public void resetMetrics() { - //We don't use ParallelPhoenixMetrics::reset() here as that will race with any remaining operations - //Instead we generate new metrics, any updates won't be reflected in future reads - parallelPhoenixMetrics = new ParallelPhoenixMetrics(); + public ExecutorService getConnectionCloseExecutor() { + return connectionCloseExecutor; } - /** - * Decorates metrics from PhoenixConnections table metrics with a virtual table of - * PARALLEL_PHOENIX_METRICS name as well as all the context's metrics. - * - * @param initialMetrics Table Specific Metrics class to populate - */ - public void decorateMetrics(Map> initialMetrics) { - //decorate - initialMetrics.put(PARALLEL_PHOENIX_METRICS, getContextMetrics()); + public CompletableFuture getChainOnConn() { + return chainOnConn; } - public long getOperationTimeout() { - return this.operationTimeoutMs; - } - - CompletableFuture getChainOnConn1() { - return this.cluster1Context.getChainOnConn(); - } - - CompletableFuture getChainOnConn2() { - return this.cluster2Context.getChainOnConn(); - } - - ExecutorService getCloseConnection1ExecutorService() {return this.cluster1Context.getConnectionCloseExecutor(); } - - ExecutorService getCloseConnection2ExecutorService() {return this.cluster2Context.getConnectionCloseExecutor(); } - - private long getOperationTimeoutMs(Properties properties) { - long operationTimeoutMs; - if (properties.getProperty(PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB) != null) { - operationTimeoutMs = Long.parseLong( - properties.getProperty(PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB)); - } else { - operationTimeoutMs = - Long.parseLong(properties.getProperty(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - Long.toString(QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS))); - } - Preconditions.checkArgument(operationTimeoutMs >= 0); - return operationTimeoutMs; - } - - private static class ParallelPhoenixClusterContext { - private final int clusterIndex; - private final ExecutorService executorForCluster; - private final ExecutorService connectionCloseExecutor; - - private CompletableFuture chainOnConn = CompletableFuture.completedFuture(new Object()); - - public ParallelPhoenixClusterContext(int clusterIndex, ExecutorService executorForCluster, ExecutorService connectionCloseExecutor) { - this.clusterIndex = clusterIndex; - this.executorForCluster = executorForCluster; - this.connectionCloseExecutor = connectionCloseExecutor; - } - - public ExecutorService getExecutorForCluster() { - return executorForCluster; - } - - public ExecutorService getConnectionCloseExecutor() { return connectionCloseExecutor; } - - public CompletableFuture getChainOnConn() { - return chainOnConn; - } - - public void setChainOnConn(CompletableFuture chainOnConn) { - this.chainOnConn = chainOnConn; - } + public void setChainOnConn(CompletableFuture chainOnConn) { + this.chainOnConn = chainOnConn; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixMetrics.java index cf223437a92..f3d9edb929c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixMetrics.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixMetrics.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,52 +17,44 @@ */ package org.apache.phoenix.jdbc; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.monitoring.AtomicMetric; -import org.apache.phoenix.monitoring.Metric; -import org.apache.phoenix.monitoring.MetricType; - -import java.util.EnumMap; -import java.util.List; - import static org.apache.phoenix.jdbc.PhoenixHAGroupMetrics.HAMetricType.HA_PARALLEL_COUNT_FAILED_OPERATIONS; import static org.apache.phoenix.jdbc.PhoenixHAGroupMetrics.HAMetricType.HA_PARALLEL_COUNT_OPERATIONS; import static org.apache.phoenix.jdbc.PhoenixHAGroupMetrics.HAMetricType.HA_PARALLEL_USED_OPERATIONS; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_FAILED_OPERATIONS_ACTIVE_CLUSTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_FAILED_OPERATIONS_STANDBY_CLUSTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_OPERATIONS_ACTIVE_CLUSTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_OPERATIONS_STANDBY_CLUSTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_USED_OPERATIONS_ACTIVE_CLUSTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_USED_OPERATIONS_STANDBY_CLUSTER; + +import java.util.List; + +import org.apache.phoenix.monitoring.Metric; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; public class ParallelPhoenixMetrics extends PhoenixHAGroupMetrics { - private static List TYPES = ImmutableList.of(HA_PARALLEL_COUNT_FAILED_OPERATIONS,HA_PARALLEL_USED_OPERATIONS,HA_PARALLEL_COUNT_OPERATIONS); + private static List TYPES = ImmutableList.of(HA_PARALLEL_COUNT_FAILED_OPERATIONS, + HA_PARALLEL_USED_OPERATIONS, HA_PARALLEL_COUNT_OPERATIONS); - public ParallelPhoenixMetrics() { - super(TYPES); - } + public ParallelPhoenixMetrics() { + super(TYPES); + } - public Metric getActiveClusterOperationCount() { - return this.get(HA_PARALLEL_COUNT_OPERATIONS,0); - } + public Metric getActiveClusterOperationCount() { + return this.get(HA_PARALLEL_COUNT_OPERATIONS, 0); + } - public Metric getStandbyClusterOperationCount() { - return this.get(HA_PARALLEL_COUNT_OPERATIONS,1); - } + public Metric getStandbyClusterOperationCount() { + return this.get(HA_PARALLEL_COUNT_OPERATIONS, 1); + } - public Metric getActiveClusterFailedOperationCount() { - return this.get(HA_PARALLEL_COUNT_FAILED_OPERATIONS,0); - } + public Metric getActiveClusterFailedOperationCount() { + return this.get(HA_PARALLEL_COUNT_FAILED_OPERATIONS, 0); + } - public Metric getStandbyClusterFailedOperationCount() { - return this.get(HA_PARALLEL_COUNT_FAILED_OPERATIONS,1); - } + public Metric getStandbyClusterFailedOperationCount() { + return this.get(HA_PARALLEL_COUNT_FAILED_OPERATIONS, 1); + } - public Metric getActiveClusterUsedCount() { - return this.get(HA_PARALLEL_USED_OPERATIONS,0); - } + public Metric getActiveClusterUsedCount() { + return this.get(HA_PARALLEL_USED_OPERATIONS, 0); + } - public Metric getStandbyClusterUsedCount() { - return this.get(HA_PARALLEL_USED_OPERATIONS,1); - } + public Metric getStandbyClusterUsedCount() { + return this.get(HA_PARALLEL_USED_OPERATIONS, 1); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSet.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSet.java index ae8ebf3dd7b..76b5ca1af11 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSet.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSet.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Properties; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; @@ -35,299 +34,283 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.monitoring.MetricType; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; - /** - * ResultSet suitable for truly immutable use cases that do not delete data and do not query data the does not exist. - * Returns a non-nullvalue when possible. Checks result from both the underlying clusters for nulls(empty result). If - * we get an empty result from one cluster but are unable get a result from the other cluster we - * either return a null that we got or throw an error based on {@link #ERROR_ON_SINGLE_NULL_ATTRIB} - * This gives some additional consistency for this specific case at the cost of some latency and behavior + * ResultSet suitable for truly immutable use cases that do not delete data and do not query data + * the does not exist. Returns a non-nullvalue when possible. Checks result from both the underlying + * clusters for nulls(empty result). If we get an empty result from one cluster but are unable get a + * result from the other cluster we either return a null that we got or throw an error based on + * {@link #ERROR_ON_SINGLE_NULL_ATTRIB} This gives some additional consistency for this specific + * case at the cost of some latency and behavior */ -public class ParallelPhoenixNullComparingResultSet extends DelegateResultSet implements PhoenixMonitoredResultSet { - // Keeping this separate from ParallelPhoenixResultSet to allow separate evolution of the - // two classes' behavior, keeping PPRS as the default implementation +public class ParallelPhoenixNullComparingResultSet extends DelegateResultSet + implements PhoenixMonitoredResultSet { + // Keeping this separate from ParallelPhoenixResultSet to allow separate evolution of the + // two classes' behavior, keeping PPRS as the default implementation - public static final String ERROR_ON_SINGLE_NULL_ATTRIB = - "phoenix.parallel.nullComparingRs.errorOnSingleNull"; - public static final String DEFAULT_ERROR_ON_SINGLE_NULL = "false"; + public static final String ERROR_ON_SINGLE_NULL_ATTRIB = + "phoenix.parallel.nullComparingRs.errorOnSingleNull"; + public static final String DEFAULT_ERROR_ON_SINGLE_NULL = "false"; - private static final Logger LOG = - LoggerFactory.getLogger(ParallelPhoenixNullComparingResultSet.class); + private static final Logger LOG = + LoggerFactory.getLogger(ParallelPhoenixNullComparingResultSet.class); - private final CompletableFuture rs1, rs2; - private final ParallelPhoenixContext context; - private boolean errorOnSingleNull = true; + private final CompletableFuture rs1, rs2; + private final ParallelPhoenixContext context; + private boolean errorOnSingleNull = true; - /** - * @param context - * @param rs1 CompletableFuture from the Active cluster - * @param rs2 CompletableFuture from the Standby cluster - */ - public ParallelPhoenixNullComparingResultSet(ParallelPhoenixContext context, - CompletableFuture rs1, CompletableFuture rs2) { - super(null); - this.rs1 = rs1; - this.rs2 = rs2; - this.context = context; - this.errorOnSingleNull = - Boolean.valueOf(context.getProperties().getProperty(ERROR_ON_SINGLE_NULL_ATTRIB, - DEFAULT_ERROR_ON_SINGLE_NULL)); - } + /** + * @param rs1 CompletableFuture from the Active cluster + * @param rs2 CompletableFuture from the Standby cluster + */ + public ParallelPhoenixNullComparingResultSet(ParallelPhoenixContext context, + CompletableFuture rs1, CompletableFuture rs2) { + super(null); + this.rs1 = rs1; + this.rs2 = rs2; + this.context = context; + this.errorOnSingleNull = Boolean.valueOf(context.getProperties() + .getProperty(ERROR_ON_SINGLE_NULL_ATTRIB, DEFAULT_ERROR_ON_SINGLE_NULL)); + } - @Override - public boolean next() throws SQLException { - context.checkOpen(); - // First call to next - if (this.rs == null) { - Function function = (T) -> { - try { - return T.next(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - CompletableFuture candidate1 = - ParallelPhoenixUtil.INSTANCE.getFutureAndChainOnContext(function, rs1, - context::chainOnConn1, - context.getParallelPhoenixMetrics().getActiveClusterOperationCount(), - context.getParallelPhoenixMetrics().getActiveClusterFailedOperationCount()); - CompletableFuture candidate2 = - ParallelPhoenixUtil.INSTANCE.getFutureAndChainOnContext(function, rs2, - context::chainOnConn2, context.getParallelPhoenixMetrics().getStandbyClusterOperationCount(), - context.getParallelPhoenixMetrics().getStandbyClusterFailedOperationCount()); - List> candidates = new ArrayList<>(); - candidates.add(candidate1); - candidates.add(candidate2); - boolean notEmpty = - (boolean) ParallelPhoenixUtil.INSTANCE.getAnyOfNonExceptionally(candidates, context); - CandidateResult candidateResult1 = new CandidateResult<>(candidate1, rs1, true); - CandidateResult candidateResult2 = new CandidateResult<>(candidate2, rs2, false); - try { - if (notEmpty) { - // Non empty result. Bind to resultset that gave us non empty result - bindToNonEmptyCompletedResultSet(candidateResult1, candidateResult2); - return true; - } else { - // We got an empty result. Wait for both the responses - Pair, CandidateResult> candidateResultPair = - findFirstNonExceptionallyCompletedCandidateResult(candidateResult1, - candidateResult2); - boolean firstResult = candidateResultPair.getFirst().getCandidate().get(); - // If first result is not empty - if (firstResult) { - this.rs = candidateResultPair.getFirst().getRs().get(); - logIfTraceEnabled(candidateResultPair.getFirst()); - incrementClusterUsedCount(candidateResultPair.getFirst()); - return true; - } - // First result is empty, check the second - boolean secondResult; - try { - secondResult = - ParallelPhoenixUtil.INSTANCE.getFutureNoRetry( - candidateResultPair.getSecond().getCandidate(), context); - } catch (Exception e) { - LOG.warn( - "Exception while trying to read from other cluster after getting empty result from " - + "one cluster, errorOnSingleNull: " + errorOnSingleNull, - e); - // We can't get the secondResult, check property and error if set - if (errorOnSingleNull) { - context.setError(); - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.HA_READ_FROM_CLUSTER_FAILED_ON_NULL) - .setRootCause(e) - .setHaGroupInfo( - context.getHaGroup().getGroupInfo().toString()) - .build().buildException(); - } - this.rs = candidateResultPair.getFirst().getRs().get(); - logIfTraceEnabled(candidateResultPair.getFirst()); - incrementClusterUsedCount(candidateResultPair.getFirst()); - return false; - } - // TODO: track which rs came back first and is potentially faster. Bind accordingly - this.rs = candidateResultPair.getSecond().getRs().get(); - logIfTraceEnabled(candidateResultPair.getSecond()); - incrementClusterUsedCount(candidateResultPair.getSecond()); - return secondResult; - } - } catch (InterruptedException | ExecutionException e) { - // This should never happen - LOG.error("Unexpected exception:", e); - context.setError(); - throw new SQLException(e); + @Override + public boolean next() throws SQLException { + context.checkOpen(); + // First call to next + if (this.rs == null) { + Function function = (T) -> { + try { + return T.next(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + CompletableFuture candidate1 = ParallelPhoenixUtil.INSTANCE + .getFutureAndChainOnContext(function, rs1, context::chainOnConn1, + context.getParallelPhoenixMetrics().getActiveClusterOperationCount(), + context.getParallelPhoenixMetrics().getActiveClusterFailedOperationCount()); + CompletableFuture candidate2 = ParallelPhoenixUtil.INSTANCE + .getFutureAndChainOnContext(function, rs2, context::chainOnConn2, + context.getParallelPhoenixMetrics().getStandbyClusterOperationCount(), + context.getParallelPhoenixMetrics().getStandbyClusterFailedOperationCount()); + List> candidates = new ArrayList<>(); + candidates.add(candidate1); + candidates.add(candidate2); + boolean notEmpty = + (boolean) ParallelPhoenixUtil.INSTANCE.getAnyOfNonExceptionally(candidates, context); + CandidateResult candidateResult1 = new CandidateResult<>(candidate1, rs1, true); + CandidateResult candidateResult2 = new CandidateResult<>(candidate2, rs2, false); + try { + if (notEmpty) { + // Non empty result. Bind to resultset that gave us non empty result + bindToNonEmptyCompletedResultSet(candidateResult1, candidateResult2); + return true; + } else { + // We got an empty result. Wait for both the responses + Pair, CandidateResult> candidateResultPair = + findFirstNonExceptionallyCompletedCandidateResult(candidateResult1, candidateResult2); + boolean firstResult = candidateResultPair.getFirst().getCandidate().get(); + // If first result is not empty + if (firstResult) { + this.rs = candidateResultPair.getFirst().getRs().get(); + logIfTraceEnabled(candidateResultPair.getFirst()); + incrementClusterUsedCount(candidateResultPair.getFirst()); + return true; + } + // First result is empty, check the second + boolean secondResult; + try { + secondResult = ParallelPhoenixUtil.INSTANCE + .getFutureNoRetry(candidateResultPair.getSecond().getCandidate(), context); + } catch (Exception e) { + LOG.warn( + "Exception while trying to read from other cluster after getting empty result from " + + "one cluster, errorOnSingleNull: " + errorOnSingleNull, + e); + // We can't get the secondResult, check property and error if set + if (errorOnSingleNull) { + context.setError(); + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.HA_READ_FROM_CLUSTER_FAILED_ON_NULL).setRootCause(e) + .setHaGroupInfo(context.getHaGroup().getGroupInfo().toString()).build() + .buildException(); } + this.rs = candidateResultPair.getFirst().getRs().get(); + logIfTraceEnabled(candidateResultPair.getFirst()); + incrementClusterUsedCount(candidateResultPair.getFirst()); + return false; + } + // TODO: track which rs came back first and is potentially faster. Bind accordingly + this.rs = candidateResultPair.getSecond().getRs().get(); + logIfTraceEnabled(candidateResultPair.getSecond()); + incrementClusterUsedCount(candidateResultPair.getSecond()); + return secondResult; } - return rs.next(); + } catch (InterruptedException | ExecutionException e) { + // This should never happen + LOG.error("Unexpected exception:", e); + context.setError(); + throw new SQLException(e); + } } + return rs.next(); + } - private Object runOnResultSets(Function function) throws SQLException { - return ParallelPhoenixUtil.INSTANCE.runFutures(function, rs1, rs2, context, true); - } + private Object runOnResultSets(Function function) throws SQLException { + return ParallelPhoenixUtil.INSTANCE.runFutures(function, rs1, rs2, context, true); + } - /** - * binds the delegate resultSet to the ResultSet from the candidate that completed without - * exception and is not empty(returned true on the next() call) - * @param candidateResult1 - * @param candidateResult2 - * @throws InterruptedException - * @throws ExecutionException - * @throws SQLException - */ - private void bindToNonEmptyCompletedResultSet(CandidateResult candidateResult1, - CandidateResult candidateResult2) - throws InterruptedException, ExecutionException, SQLException { - CompletableFuture candidate1 = candidateResult1.getCandidate(); - CompletableFuture candidate2 = candidateResult2.getCandidate(); - if (candidate1.isDone() && !candidate1.isCompletedExceptionally() - && candidate1.get()) { - this.rs = candidateResult1.getRs().get(); - logIfTraceEnabled(candidateResult1); - incrementClusterUsedCount(candidateResult1); - } else if (candidate2.isDone() && !candidate2.isCompletedExceptionally() - && candidate2.get()) { - this.rs = candidateResult2.getRs().get(); - logIfTraceEnabled(candidateResult2); - incrementClusterUsedCount(candidateResult2); - } else { - throw new SQLException( - "Unexpected exception, one of the RS should've completed successfully"); - } + /** + * binds the delegate resultSet to the ResultSet from the candidate that completed without + * exception and is not empty(returned true on the next() call) + */ + private void bindToNonEmptyCompletedResultSet(CandidateResult candidateResult1, + CandidateResult candidateResult2) + throws InterruptedException, ExecutionException, SQLException { + CompletableFuture candidate1 = candidateResult1.getCandidate(); + CompletableFuture candidate2 = candidateResult2.getCandidate(); + if (candidate1.isDone() && !candidate1.isCompletedExceptionally() && candidate1.get()) { + this.rs = candidateResult1.getRs().get(); + logIfTraceEnabled(candidateResult1); + incrementClusterUsedCount(candidateResult1); + } else if (candidate2.isDone() && !candidate2.isCompletedExceptionally() && candidate2.get()) { + this.rs = candidateResult2.getRs().get(); + logIfTraceEnabled(candidateResult2); + incrementClusterUsedCount(candidateResult2); + } else { + throw new SQLException( + "Unexpected exception, one of the RS should've completed successfully"); } + } - /** - * @param - * @param candidateResult1 - * @param candidateResult2 - * @return Pair of CandidateResult ordered by completion. First of the pair is guaranteed to be - * completed non-exceptionally - * @throws SQLException - */ - private Pair, CandidateResult> - findFirstNonExceptionallyCompletedCandidateResult(CandidateResult candidateResult1, - CandidateResult candidateResult2) throws SQLException { - Pair, CandidateResult> pair = new Pair<>(); - CompletableFuture candidate1 = candidateResult1.getCandidate(); - CompletableFuture candidate2 = candidateResult2.getCandidate(); - if (candidate1.isDone() && !candidate1.isCompletedExceptionally()) { - pair.setFirst(candidateResult1); - pair.setSecond(candidateResult2); - } else if (candidate2.isDone() && !candidate2.isCompletedExceptionally()) { - pair.setFirst(candidateResult2); - pair.setSecond(candidateResult1); - } else { - throw new SQLException( - "Unexpected exception, one of the RS should've completed successfully"); - } - return pair; + /** + * @param + * @return Pair of CandidateResult ordered by completion. First of the pair is guaranteed to be + * completed non-exceptionally + */ + private Pair, CandidateResult> + findFirstNonExceptionallyCompletedCandidateResult(CandidateResult candidateResult1, + CandidateResult candidateResult2) throws SQLException { + Pair, CandidateResult> pair = new Pair<>(); + CompletableFuture candidate1 = candidateResult1.getCandidate(); + CompletableFuture candidate2 = candidateResult2.getCandidate(); + if (candidate1.isDone() && !candidate1.isCompletedExceptionally()) { + pair.setFirst(candidateResult1); + pair.setSecond(candidateResult2); + } else if (candidate2.isDone() && !candidate2.isCompletedExceptionally()) { + pair.setFirst(candidateResult2); + pair.setSecond(candidateResult1); + } else { + throw new SQLException( + "Unexpected exception, one of the RS should've completed successfully"); } + return pair; + } - @Override - public void close() throws SQLException { - Function function = (T) -> { - try { - T.close(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - runOnResultSets(function); - } + @Override + public void close() throws SQLException { + Function function = (T) -> { + try { + T.close(); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + runOnResultSets(function); + } - @VisibleForTesting - ResultSet getResultSet() { - return this.rs; - } + @VisibleForTesting + ResultSet getResultSet() { + return this.rs; + } - @Override - public Map> getReadMetrics() { - Map> metrics; - if (rs != null) { - metrics = ((PhoenixMonitoredResultSet) rs).getReadMetrics(); - } else { - metrics = new HashMap<>(); - } - context.decorateMetrics(metrics); - return metrics; + @Override + public Map> getReadMetrics() { + Map> metrics; + if (rs != null) { + metrics = ((PhoenixMonitoredResultSet) rs).getReadMetrics(); + } else { + metrics = new HashMap<>(); } + context.decorateMetrics(metrics); + return metrics; + } - @Override - public Map getOverAllRequestReadMetrics() { - Map metrics; - if (rs != null) { - metrics = ((PhoenixMonitoredResultSet) rs).getOverAllRequestReadMetrics(); - } else { - metrics = context.getContextMetrics(); - } - return metrics; + @Override + public Map getOverAllRequestReadMetrics() { + Map metrics; + if (rs != null) { + metrics = ((PhoenixMonitoredResultSet) rs).getOverAllRequestReadMetrics(); + } else { + metrics = context.getContextMetrics(); } + return metrics; + } - @Override - public void resetMetrics() { - if (rs != null) { - ((PhoenixResultSet) rs).resetMetrics(); - } - // reset our metrics - context.resetMetrics(); + @Override + public void resetMetrics() { + if (rs != null) { + ((PhoenixResultSet) rs).resetMetrics(); } + // reset our metrics + context.resetMetrics(); + } - @SuppressWarnings("unchecked") - @Override - public T unwrap(Class iface) throws SQLException { - if (iface.isInstance(this)) { - return (T) this; - } - throw new SQLExceptionInfo.Builder(CLASS_NOT_UNWRAPPABLE).build().buildException(); + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isInstance(this)) { + return (T) this; } + throw new SQLExceptionInfo.Builder(CLASS_NOT_UNWRAPPABLE).build().buildException(); + } - private void logIfTraceEnabled(CandidateResult bindingCandidateResult) - throws InterruptedException, ExecutionException { - if (LOG.isTraceEnabled()) { - boolean isNull = bindingCandidateResult.getCandidate().get(); - boolean belongsToActiveCluster = bindingCandidateResult.belongsToActiveCluster(); - LOG.trace(String.format( - "ParallelPhoenixNullComparingResultSet binding to ResultSet" - + " with attributes: isEmpty:%s belongsToActiveCluster:%s", - isNull, belongsToActiveCluster)); - } + private void logIfTraceEnabled(CandidateResult bindingCandidateResult) + throws InterruptedException, ExecutionException { + if (LOG.isTraceEnabled()) { + boolean isNull = bindingCandidateResult.getCandidate().get(); + boolean belongsToActiveCluster = bindingCandidateResult.belongsToActiveCluster(); + LOG.trace(String.format( + "ParallelPhoenixNullComparingResultSet binding to ResultSet" + + " with attributes: isEmpty:%s belongsToActiveCluster:%s", + isNull, belongsToActiveCluster)); } + } - private void incrementClusterUsedCount(CandidateResult candidateResult) { - if (candidateResult.belongsToActiveCluster()) { - context.getParallelPhoenixMetrics().getActiveClusterUsedCount().increment(); - } else { - context.getParallelPhoenixMetrics().getStandbyClusterUsedCount().increment(); - } + private void incrementClusterUsedCount(CandidateResult candidateResult) { + if (candidateResult.belongsToActiveCluster()) { + context.getParallelPhoenixMetrics().getActiveClusterUsedCount().increment(); + } else { + context.getParallelPhoenixMetrics().getStandbyClusterUsedCount().increment(); } + } - private static class CandidateResult { - private final CompletableFuture candidate; - private final CompletableFuture rs; - private final boolean belongsToActiveCluster; + private static class CandidateResult { + private final CompletableFuture candidate; + private final CompletableFuture rs; + private final boolean belongsToActiveCluster; - CandidateResult(CompletableFuture candidate, CompletableFuture rs, - boolean belongsToActiveCluster) { - this.candidate = candidate; - this.rs = rs; - this.belongsToActiveCluster = belongsToActiveCluster; - } + CandidateResult(CompletableFuture candidate, CompletableFuture rs, + boolean belongsToActiveCluster) { + this.candidate = candidate; + this.rs = rs; + this.belongsToActiveCluster = belongsToActiveCluster; + } - public CompletableFuture getCandidate() { - return candidate; - } + public CompletableFuture getCandidate() { + return candidate; + } - public CompletableFuture getRs() { - return rs; - } + public CompletableFuture getRs() { + return rs; + } - public boolean belongsToActiveCluster() { - return belongsToActiveCluster; - } + public boolean belongsToActiveCluster() { + return belongsToActiveCluster; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixPreparedStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixPreparedStatement.java index 6e04cd8ede6..1c84ba498f4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixPreparedStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixPreparedStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; -import org.apache.phoenix.jdbc.PhoenixStatement.Operation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; @@ -47,793 +42,805 @@ import java.util.concurrent.CompletionException; import java.util.function.Function; -public class ParallelPhoenixPreparedStatement implements PhoenixMonitoredPreparedStatement { - - private static final Logger LOGGER = - LoggerFactory.getLogger(ParallelPhoenixPreparedStatement.class); - - private final ParallelPhoenixContext context; - - private final CompletableFuture statement1; - - private final CompletableFuture statement2; - - public ParallelPhoenixPreparedStatement(ParallelPhoenixContext context, CompletableFuture statement1, CompletableFuture statement2) throws SQLException { - this.context = context; - this.statement1 = statement1; - this.statement2 = statement2; - - //todo: make sure 1 statement is completed - } - - public CompletableFuture getStatement1() { - return statement1; - } - - public CompletableFuture getStatement2() { - return statement2; - } - - @Override - public ResultSet executeQuery() throws SQLException { - CompletableFuture result1 = - ParallelPhoenixUtil.INSTANCE.getFutureAndChainOnContext(statement -> { - try { - return statement.executeQuery(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }, statement1, context::chainOnConn1, - context.getParallelPhoenixMetrics().getActiveClusterOperationCount(), - context.getParallelPhoenixMetrics().getActiveClusterFailedOperationCount()); - CompletableFuture result2 = - ParallelPhoenixUtil.INSTANCE.getFutureAndChainOnContext(statement -> { - try { - return statement.executeQuery(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }, statement2, context::chainOnConn2, - context.getParallelPhoenixMetrics().getStandbyClusterOperationCount(), - context.getParallelPhoenixMetrics().getStandbyClusterFailedOperationCount()); - - return ParallelPhoenixResultSetFactory.INSTANCE.getParallelResultSet(context, result1, result2); - } - - Object runOnPreparedStatements(Function function) throws SQLException { - return ParallelPhoenixUtil.INSTANCE.runFutures(function, statement1, statement2, context, true); - } - - @Override - public int executeUpdate() throws SQLException { - //TODO handle disabling connetions in a client to have all or nothing commits - Function function = (T) -> { - try { - return T.executeUpdate(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnPreparedStatements(function); - } - - @Override - public void setNull(int parameterIndex, int sqlType) throws SQLException { - Function function = (T) -> { - try { - T.setNull(parameterIndex, sqlType); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setBoolean(int parameterIndex, boolean x) throws SQLException { - Function function = (T) -> { - try { - T.setBoolean(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setByte(int parameterIndex, byte x) throws SQLException { - Function function = (T) -> { - try { - T.setByte(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setShort(int parameterIndex, short x) throws SQLException { - Function function = (T) -> { - try { - T.setShort(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setInt(int parameterIndex, int x) throws SQLException { - Function function = (T) -> { - try { - T.setInt(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setLong(int parameterIndex, long x) throws SQLException { - Function function = (T) -> { - try { - T.setLong(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setFloat(int parameterIndex, float x) throws SQLException { - Function function = (T) -> { - try { - T.setFloat(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setDouble(int parameterIndex, double x) throws SQLException { - Function function = (T) -> { - try { - T.setDouble(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { - Function function = (T) -> { - try { - T.setBigDecimal(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setString(int parameterIndex, String x) throws SQLException { - Function function = (T) -> { - try { - T.setString(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setBytes(int parameterIndex, byte[] x) throws SQLException { - Function function = (T) -> { - try { - T.setBytes(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setDate(int parameterIndex, Date x) throws SQLException { - Function function = (T) -> { - try { - T.setDate(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setTime(int parameterIndex, Time x) throws SQLException { - Function function = (T) -> { - try { - T.setTime(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { - Function function = (T) -> { - try { - T.setTimestamp(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { - Function function = (T) -> { - try { - T.setAsciiStream(parameterIndex, x, length); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { - Function function = (T) -> { - try { - T.setUnicodeStream(parameterIndex, x, length); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { - Function function = (T) -> { - try { - T.setBinaryStream(parameterIndex, x, length); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void clearParameters() throws SQLException { - Function function = (T) -> { - try { - T.clearParameters(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { - Function function = (T) -> { - try { - T.setObject(parameterIndex, x, targetSqlType); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public void setObject(int parameterIndex, Object x) throws SQLException { - Function function = (T) -> { - try { - T.setObject(parameterIndex, x); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } - - @Override - public boolean execute() throws SQLException { - Function function = (T) -> { - try { - return T.execute(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (boolean) runOnPreparedStatements(function); - } - - @Override - public Operation getUpdateOperation() throws SQLException { - Function function = (T) -> { - try { - return T.getUpdateOperation(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (Operation) runOnPreparedStatements(function); - } - - @Override - public void addBatch() throws SQLException { +import org.apache.phoenix.jdbc.PhoenixStatement.Operation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; - } +public class ParallelPhoenixPreparedStatement implements PhoenixMonitoredPreparedStatement { - @Override - public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { + private static final Logger LOGGER = + LoggerFactory.getLogger(ParallelPhoenixPreparedStatement.class); - } + private final ParallelPhoenixContext context; - @Override - public void setRef(int parameterIndex, Ref x) throws SQLException { + private final CompletableFuture statement1; - } + private final CompletableFuture statement2; - @Override - public void setBlob(int parameterIndex, Blob x) throws SQLException { + public ParallelPhoenixPreparedStatement(ParallelPhoenixContext context, + CompletableFuture statement1, + CompletableFuture statement2) throws SQLException { + this.context = context; + this.statement1 = statement1; + this.statement2 = statement2; - } + // todo: make sure 1 statement is completed + } - @Override - public void setClob(int parameterIndex, Clob x) throws SQLException { + public CompletableFuture getStatement1() { + return statement1; + } - } + public CompletableFuture getStatement2() { + return statement2; + } - @Override - public void setArray(int parameterIndex, Array x) throws SQLException { + @Override + public ResultSet executeQuery() throws SQLException { + CompletableFuture result1 = + ParallelPhoenixUtil.INSTANCE.getFutureAndChainOnContext(statement -> { + try { + return statement.executeQuery(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }, statement1, context::chainOnConn1, + context.getParallelPhoenixMetrics().getActiveClusterOperationCount(), + context.getParallelPhoenixMetrics().getActiveClusterFailedOperationCount()); + CompletableFuture result2 = + ParallelPhoenixUtil.INSTANCE.getFutureAndChainOnContext(statement -> { + try { + return statement.executeQuery(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }, statement2, context::chainOnConn2, + context.getParallelPhoenixMetrics().getStandbyClusterOperationCount(), + context.getParallelPhoenixMetrics().getStandbyClusterFailedOperationCount()); + + return ParallelPhoenixResultSetFactory.INSTANCE.getParallelResultSet(context, result1, result2); + } + + Object runOnPreparedStatements(Function function) + throws SQLException { + return ParallelPhoenixUtil.INSTANCE.runFutures(function, statement1, statement2, context, true); + } + + @Override + public int executeUpdate() throws SQLException { + // TODO handle disabling connetions in a client to have all or nothing commits + Function function = (T) -> { + try { + return T.executeUpdate(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnPreparedStatements(function); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + Function function = (T) -> { + try { + T.setNull(parameterIndex, sqlType); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + Function function = (T) -> { + try { + T.setBoolean(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + Function function = (T) -> { + try { + T.setByte(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + Function function = (T) -> { + try { + T.setShort(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + Function function = (T) -> { + try { + T.setInt(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + Function function = (T) -> { + try { + T.setLong(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + Function function = (T) -> { + try { + T.setFloat(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + Function function = (T) -> { + try { + T.setDouble(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + Function function = (T) -> { + try { + T.setBigDecimal(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + Function function = (T) -> { + try { + T.setString(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + Function function = (T) -> { + try { + T.setBytes(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + Function function = (T) -> { + try { + T.setDate(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + Function function = (T) -> { + try { + T.setTime(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + Function function = (T) -> { + try { + T.setTimestamp(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + Function function = (T) -> { + try { + T.setAsciiStream(parameterIndex, x, length); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + Function function = (T) -> { + try { + T.setUnicodeStream(parameterIndex, x, length); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + Function function = (T) -> { + try { + T.setBinaryStream(parameterIndex, x, length); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void clearParameters() throws SQLException { + Function function = (T) -> { + try { + T.clearParameters(); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + Function function = (T) -> { + try { + T.setObject(parameterIndex, x, targetSqlType); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnPreparedStatements(function); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + Function function = (T) -> { + try { + T.setObject(parameterIndex, x); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; - } + runOnPreparedStatements(function); + } - @Override - public ResultSetMetaData getMetaData() throws SQLException { - return null; - } + @Override + public boolean execute() throws SQLException { + Function function = (T) -> { + try { + return T.execute(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; - @Override - public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + return (boolean) runOnPreparedStatements(function); + } - } + @Override + public Operation getUpdateOperation() throws SQLException { + Function function = (T) -> { + try { + return T.getUpdateOperation(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; - @Override - public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + return (Operation) runOnPreparedStatements(function); + } - } + @Override + public void addBatch() throws SQLException { - @Override - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + } - } + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) + throws SQLException { - @Override - public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + } - } + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { - @Override - public void setURL(int parameterIndex, URL x) throws SQLException { + } - } + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { - @Override - public ParameterMetaData getParameterMetaData() throws SQLException { - return null; - } + } - @Override - public void setRowId(int parameterIndex, RowId x) throws SQLException { + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { - } + } - @Override - public void setNString(int parameterIndex, String value) throws SQLException { + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { - } + } - @Override - public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return null; + } - } + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { - @Override - public void setNClob(int parameterIndex, NClob value) throws SQLException { + } - } + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { - @Override - public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + } - } + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { - @Override - public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + } - } + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { - @Override - public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + } - } + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { - @Override - public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + } - } + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + return null; + } - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { - } + } - @Override - public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + @Override + public void setNString(int parameterIndex, String value) throws SQLException { - } + } - @Override - public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) + throws SQLException { - } + } - @Override - public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { - } + } - @Override - public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - } + } - @Override - public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) + throws SQLException { - } + } - @Override - public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - } + } - @Override - public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - } + } - @Override - public void setClob(int parameterIndex, Reader reader) throws SQLException { + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) + throws SQLException { - } + } - @Override - public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - } + } - @Override - public void setNClob(int parameterIndex, Reader reader) throws SQLException { + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { - } + } - @Override - public ResultSet executeQuery(String sql) throws SQLException { - Function function = (T) -> { - try { - T.executeQuery(sql); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (ResultSet) runOnPreparedStatements(function); - } + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) + throws SQLException { - @Override - public int executeUpdate(String sql) throws SQLException { - Function function = (T) -> { - try { - T.executeUpdate(sql); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnPreparedStatements(function); - } + } - @Override - public void close() throws SQLException { - Function function = (T) -> { - try { - T.close(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnPreparedStatements(function); - } + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - @Override - public int getMaxFieldSize() throws SQLException { - return 0; - } + } - @Override - public void setMaxFieldSize(int max) throws SQLException { + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - } + } - @Override - public int getMaxRows() throws SQLException { - return 0; - } + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - @Override - public void setMaxRows(int max) throws SQLException { + } - } + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { + } - } + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { - @Override - public int getQueryTimeout() throws SQLException { - return 0; - } + } - @Override - public void setQueryTimeout(int seconds) throws SQLException { + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - } + } - @Override - public void cancel() throws SQLException { + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { - } + } - @Override - public SQLWarning getWarnings() throws SQLException { + @Override + public ResultSet executeQuery(String sql) throws SQLException { + Function function = (T) -> { + try { + T.executeQuery(sql); return null; - } + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (ResultSet) runOnPreparedStatements(function); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + Function function = (T) -> { + try { + T.executeUpdate(sql); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnPreparedStatements(function); + } + + @Override + public void close() throws SQLException { + Function function = (T) -> { + try { + T.close(); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; - @Override - public void clearWarnings() throws SQLException { + runOnPreparedStatements(function); + } - } + @Override + public int getMaxFieldSize() throws SQLException { + return 0; + } - @Override - public void setCursorName(String name) throws SQLException { + @Override + public void setMaxFieldSize(int max) throws SQLException { - } + } - @Override - public boolean execute(String sql) throws SQLException { - return false; - } + @Override + public int getMaxRows() throws SQLException { + return 0; + } - @Override - public ResultSet getResultSet() throws SQLException { - // TODO: implement using the ParallelPhoenixResultSetFactory - return null; - } + @Override + public void setMaxRows(int max) throws SQLException { - @Override - public int getUpdateCount() throws SQLException { - return 0; - } + } - @Override - public boolean getMoreResults() throws SQLException { - return false; - } + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { - @Override - public int getFetchDirection() throws SQLException { - return 0; - } + } - @Override - public void setFetchDirection(int direction) throws SQLException { + @Override + public int getQueryTimeout() throws SQLException { + return 0; + } - } + @Override + public void setQueryTimeout(int seconds) throws SQLException { - @Override - public int getFetchSize() throws SQLException { - return 0; - } + } - @Override - public void setFetchSize(int rows) throws SQLException { + @Override + public void cancel() throws SQLException { - } + } - @Override - public int getResultSetConcurrency() throws SQLException { - return 0; - } + @Override + public SQLWarning getWarnings() throws SQLException { + return null; + } - @Override - public int getResultSetType() throws SQLException { - return 0; - } + @Override + public void clearWarnings() throws SQLException { - @Override - public void addBatch(String sql) throws SQLException { + } - } + @Override + public void setCursorName(String name) throws SQLException { - @Override - public void clearBatch() throws SQLException { + } - } + @Override + public boolean execute(String sql) throws SQLException { + return false; + } - @Override - public int[] executeBatch() throws SQLException { - return new int[0]; - } + @Override + public ResultSet getResultSet() throws SQLException { + // TODO: implement using the ParallelPhoenixResultSetFactory + return null; + } - @Override - public Connection getConnection() throws SQLException { - //TODO Inject ParallelPhoenixConnection - return null; - } + @Override + public int getUpdateCount() throws SQLException { + return 0; + } - @Override - public boolean getMoreResults(int current) throws SQLException { - return false; - } + @Override + public boolean getMoreResults() throws SQLException { + return false; + } - @Override - public ResultSet getGeneratedKeys() throws SQLException { - return null; - } + @Override + public int getFetchDirection() throws SQLException { + return 0; + } - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - return 0; - } + @Override + public void setFetchDirection(int direction) throws SQLException { - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - return 0; - } + } - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - return 0; - } + @Override + public int getFetchSize() throws SQLException { + return 0; + } - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - return false; - } + @Override + public void setFetchSize(int rows) throws SQLException { - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - return false; - } + } - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - return false; - } + @Override + public int getResultSetConcurrency() throws SQLException { + return 0; + } - @Override - public int getResultSetHoldability() throws SQLException { - return 0; - } + @Override + public int getResultSetType() throws SQLException { + return 0; + } - @Override - public boolean isClosed() throws SQLException { - return false; - } + @Override + public void addBatch(String sql) throws SQLException { - @Override - public boolean isPoolable() throws SQLException { - return false; - } + } - @Override - public void setPoolable(boolean poolable) throws SQLException { + @Override + public void clearBatch() throws SQLException { - } + } - @Override - public void closeOnCompletion() throws SQLException { + @Override + public int[] executeBatch() throws SQLException { + return new int[0]; + } - } + @Override + public Connection getConnection() throws SQLException { + // TODO Inject ParallelPhoenixConnection + return null; + } - @Override - public boolean isCloseOnCompletion() throws SQLException { - return false; - } + @Override + public boolean getMoreResults(int current) throws SQLException { + return false; + } - @Override - public T unwrap(Class iface) throws SQLException { - if (iface.isInstance(this)) { - return (T) this; - } - return null; - } + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return null; + } - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return false; - } + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + return 0; + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + return 0; + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + return 0; + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + return false; + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + return false; + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return false; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return 0; + } + + @Override + public boolean isClosed() throws SQLException { + return false; + } + + @Override + public boolean isPoolable() throws SQLException { + return false; + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + + } + + @Override + public void closeOnCompletion() throws SQLException { + + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return false; + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isInstance(this)) { + return (T) this; + } + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSet.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSet.java index a0818143fe1..305f7d503d6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSet.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSet.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.exception.SQLExceptionInfo; -import org.apache.phoenix.monitoring.MetricType; +import static org.apache.phoenix.exception.SQLExceptionCode.CLASS_NOT_UNWRAPPABLE; import java.sql.ResultSet; import java.sql.SQLException; @@ -32,124 +28,131 @@ import java.util.concurrent.CompletionException; import java.util.function.Function; -import static org.apache.phoenix.exception.SQLExceptionCode.CLASS_NOT_UNWRAPPABLE; +import org.apache.phoenix.exception.SQLExceptionInfo; +import org.apache.phoenix.monitoring.MetricType; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * ParallelPhoenixResultSet that provides the standard wait until at least one cluster completes approach + * ParallelPhoenixResultSet that provides the standard wait until at least one cluster completes + * approach */ -public class ParallelPhoenixResultSet extends DelegateResultSet implements PhoenixMonitoredResultSet { - - private final ParallelPhoenixContext context; - private final CompletableFuture rs1, rs2; - - public ParallelPhoenixResultSet(ParallelPhoenixContext context, CompletableFuture rs1, CompletableFuture rs2) { - super(null); - this.context = context; - this.rs = null; - this.rs1 = rs1; - this.rs2 = rs2; - } - - @Override - public boolean next() throws SQLException { - context.checkOpen(); - //As this starts iterating through a result set after we have a winner we bind to a single thread - if(rs == null) { - - Function function = (T) -> { - try { - return T.next(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - List> futures = ParallelPhoenixUtil.INSTANCE.applyFunctionToFutures(function, rs1, - rs2, context, false); - - Preconditions.checkState(futures.size() == 2); - CompletableFuture next1 = futures.get(0); - CompletableFuture next2 = futures.get(1); - - //Ensure one statement is successful before returning - ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); - - try { - if(next1.isDone() && !next1.isCompletedExceptionally()) { - rs = rs1.get(); - return next1.get(); - } else { //(next2.isDone() && !next2.isCompletedExceptionally()) - rs = rs2.get(); - return next2.get(); - } - } catch (Exception e) { - //should never happen - throw new SQLException("Unknown Error happened while processing initial next.",e); - } - - } else { - return rs.next(); +public class ParallelPhoenixResultSet extends DelegateResultSet + implements PhoenixMonitoredResultSet { + + private final ParallelPhoenixContext context; + private final CompletableFuture rs1, rs2; + + public ParallelPhoenixResultSet(ParallelPhoenixContext context, CompletableFuture rs1, + CompletableFuture rs2) { + super(null); + this.context = context; + this.rs = null; + this.rs1 = rs1; + this.rs2 = rs2; + } + + @Override + public boolean next() throws SQLException { + context.checkOpen(); + // As this starts iterating through a result set after we have a winner we bind to a single + // thread + if (rs == null) { + + Function function = (T) -> { + try { + return T.next(); + } catch (SQLException exception) { + throw new CompletionException(exception); } - } + }; - @VisibleForTesting - CompletableFuture getResultSetFuture1() { - return rs1; - } + List> futures = + ParallelPhoenixUtil.INSTANCE.applyFunctionToFutures(function, rs1, rs2, context, false); - @VisibleForTesting - CompletableFuture getResultSetFuture2() { - return rs2; - } + Preconditions.checkState(futures.size() == 2); + CompletableFuture next1 = futures.get(0); + CompletableFuture next2 = futures.get(1); - @VisibleForTesting - void setResultSet(ResultSet rs) { - this.rs = rs; - } - - @VisibleForTesting - ResultSet getResultSet() { - return rs; - } + // Ensure one statement is successful before returning + ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); - @Override - public Map> getReadMetrics() { - Map> metrics; - if(rs != null) { - metrics = ((PhoenixMonitoredResultSet) rs).getReadMetrics(); - } else { - metrics = new HashMap<>(); + try { + if (next1.isDone() && !next1.isCompletedExceptionally()) { + rs = rs1.get(); + return next1.get(); + } else { // (next2.isDone() && !next2.isCompletedExceptionally()) + rs = rs2.get(); + return next2.get(); } - context.decorateMetrics(metrics); - return metrics; - } + } catch (Exception e) { + // should never happen + throw new SQLException("Unknown Error happened while processing initial next.", e); + } - @Override - public Map getOverAllRequestReadMetrics() { - Map metrics; - if(rs != null) { - metrics = ((PhoenixResultSet) rs).getOverAllRequestReadMetrics(); - } else { - metrics = context.getContextMetrics(); - } - return metrics; + } else { + return rs.next(); } - - @Override - public void resetMetrics() { - if(rs != null) { - ((PhoenixResultSet)rs).resetMetrics(); - } - //reset our metrics - context.resetMetrics(); + } + + @VisibleForTesting + CompletableFuture getResultSetFuture1() { + return rs1; + } + + @VisibleForTesting + CompletableFuture getResultSetFuture2() { + return rs2; + } + + @VisibleForTesting + void setResultSet(ResultSet rs) { + this.rs = rs; + } + + @VisibleForTesting + ResultSet getResultSet() { + return rs; + } + + @Override + public Map> getReadMetrics() { + Map> metrics; + if (rs != null) { + metrics = ((PhoenixMonitoredResultSet) rs).getReadMetrics(); + } else { + metrics = new HashMap<>(); } + context.decorateMetrics(metrics); + return metrics; + } + + @Override + public Map getOverAllRequestReadMetrics() { + Map metrics; + if (rs != null) { + metrics = ((PhoenixResultSet) rs).getOverAllRequestReadMetrics(); + } else { + metrics = context.getContextMetrics(); + } + return metrics; + } - @SuppressWarnings("unchecked") - @Override - public T unwrap(Class iface) throws SQLException { - if (iface.isInstance(this)) { - return (T) this; - } - throw new SQLExceptionInfo.Builder(CLASS_NOT_UNWRAPPABLE).build().buildException(); + @Override + public void resetMetrics() { + if (rs != null) { + ((PhoenixResultSet) rs).resetMetrics(); + } + // reset our metrics + context.resetMetrics(); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isInstance(this)) { + return (T) this; } + throw new SQLExceptionInfo.Builder(CLASS_NOT_UNWRAPPABLE).build().buildException(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSetFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSetFactory.java index 9247ef9fb4c..50ced73c7d9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSetFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSetFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,59 +22,57 @@ public class ParallelPhoenixResultSetFactory { - public static final ParallelPhoenixResultSetFactory INSTANCE = - new ParallelPhoenixResultSetFactory(); - public static final String PHOENIX_PARALLEL_RESULTSET_TYPE = "phoenix.parallel.resultSet.type"; + public static final ParallelPhoenixResultSetFactory INSTANCE = + new ParallelPhoenixResultSetFactory(); + public static final String PHOENIX_PARALLEL_RESULTSET_TYPE = "phoenix.parallel.resultSet.type"; - public enum ParallelPhoenixResultSetType { - PARALLEL_PHOENIX_RESULT_SET("ParallelPhoenixResultSet"), - PARALLEL_PHOENIX_NULL_COMPARING_RESULT_SET("ParallelPhoenixNullComparingResultSet"); + public enum ParallelPhoenixResultSetType { + PARALLEL_PHOENIX_RESULT_SET("ParallelPhoenixResultSet"), + PARALLEL_PHOENIX_NULL_COMPARING_RESULT_SET("ParallelPhoenixNullComparingResultSet"); - private String name; + private String name; - ParallelPhoenixResultSetType(String name) { - this.name = name; - } - - static ParallelPhoenixResultSetType fromName(String name) { - if (name == null) { - return PARALLEL_PHOENIX_RESULT_SET; - } - for (ParallelPhoenixResultSetType type : ParallelPhoenixResultSetType.values()) { - if (name.equals(type.getName())) { - return type; - } - } - throw new IllegalArgumentException("Unknown ParallelPhoenixResultSetType: " + name); - } + ParallelPhoenixResultSetType(String name) { + this.name = name; + } - public String getName() { - return this.name; + static ParallelPhoenixResultSetType fromName(String name) { + if (name == null) { + return PARALLEL_PHOENIX_RESULT_SET; + } + for (ParallelPhoenixResultSetType type : ParallelPhoenixResultSetType.values()) { + if (name.equals(type.getName())) { + return type; } + } + throw new IllegalArgumentException("Unknown ParallelPhoenixResultSetType: " + name); } - private ParallelPhoenixResultSetFactory() { + public String getName() { + return this.name; } + } - public ResultSet getParallelResultSet(ParallelPhoenixContext context, - CompletableFuture resultSet1, CompletableFuture resultSet2) { - // We only have 2 types now, hence use simple comparison rather than reflection for - // performance - String resultSetProperty = - context.getProperties().getProperty(PHOENIX_PARALLEL_RESULTSET_TYPE); - ParallelPhoenixResultSetType type = - ParallelPhoenixResultSetType.fromName(resultSetProperty); - ResultSet rs; - switch (type) { - case PARALLEL_PHOENIX_RESULT_SET: - rs = new ParallelPhoenixResultSet(context, resultSet1, resultSet2); - break; - case PARALLEL_PHOENIX_NULL_COMPARING_RESULT_SET: - rs = new ParallelPhoenixNullComparingResultSet(context, resultSet1, resultSet2); - break; - default: - throw new IllegalArgumentException("Unknown ParallelPhoenixResultSetType: " + type); - } - return rs; + private ParallelPhoenixResultSetFactory() { + } + + public ResultSet getParallelResultSet(ParallelPhoenixContext context, + CompletableFuture resultSet1, CompletableFuture resultSet2) { + // We only have 2 types now, hence use simple comparison rather than reflection for + // performance + String resultSetProperty = context.getProperties().getProperty(PHOENIX_PARALLEL_RESULTSET_TYPE); + ParallelPhoenixResultSetType type = ParallelPhoenixResultSetType.fromName(resultSetProperty); + ResultSet rs; + switch (type) { + case PARALLEL_PHOENIX_RESULT_SET: + rs = new ParallelPhoenixResultSet(context, resultSet1, resultSet2); + break; + case PARALLEL_PHOENIX_NULL_COMPARING_RESULT_SET: + rs = new ParallelPhoenixNullComparingResultSet(context, resultSet1, resultSet2); + break; + default: + throw new IllegalArgumentException("Unknown ParallelPhoenixResultSetType: " + type); } + return rs; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixStatement.java index a1ed744e329..428f5ce12b0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.commons.lang3.ArrayUtils; - import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -31,562 +27,566 @@ import java.util.concurrent.CompletionException; import java.util.function.Function; +import org.apache.commons.lang3.ArrayUtils; import org.apache.phoenix.jdbc.PhoenixStatement.Operation; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; public class ParallelPhoenixStatement implements PhoenixMonitoredStatement { - private final ParallelPhoenixContext context; - private final CompletableFuture statement1; - - CompletableFuture getStatement1() { - return statement1; - } - - CompletableFuture getStatement2() { - return statement2; - } - - private final CompletableFuture statement2; - - public ParallelPhoenixStatement(ParallelPhoenixContext context, CompletableFuture statement1, CompletableFuture statement2) throws SQLException { - this.context = context; - this.statement1 = statement1; - this.statement2 = statement2; - } - - Object runOnStatements(Function function) throws SQLException { - return ParallelPhoenixUtil.INSTANCE.runFutures(function,statement1,statement2,context, true); - } - - @Override - public ResultSet executeQuery(String sql) throws SQLException { - - Function function = (T) -> { - try { - return T.executeQuery(sql); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - List> futures = ParallelPhoenixUtil.INSTANCE.applyFunctionToFutures(function, statement1, - statement2, context, true); - - Preconditions.checkState(futures.size() == 2); - CompletableFuture rs1 = futures.get(0); - CompletableFuture rs2 = futures.get(1); - - //Ensure one statement is successful before returning - ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); - - return ParallelPhoenixResultSetFactory.INSTANCE.getParallelResultSet(context, rs1, rs2); - } - - @Override - public int executeUpdate(String sql) throws SQLException { - Function function = (T) -> { - try { - return T.executeUpdate(sql); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnStatements(function); - } - - @Override - public void close() throws SQLException { - Function function = (T) -> { - try { - T.close(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public int getMaxFieldSize() throws SQLException { - Function function = (T) -> { - try { - return T.getMaxFieldSize(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnStatements(function); - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - Function function = (T) -> { - try { - T.setMaxFieldSize(max); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public int getMaxRows() throws SQLException { - Function function = (T) -> { - try { - return T.getMaxRows(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnStatements(function); - } - - @Override - public void setMaxRows(int max) throws SQLException { - Function function = (T) -> { - try { - T.setMaxRows(max); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - Function function = (T) -> { - try { - T.setEscapeProcessing(enable); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public int getQueryTimeout() throws SQLException { - Function function = (T) -> { - try { - return T.getQueryTimeout(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnStatements(function); - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - Function function = (T) -> { - try { - T.setQueryTimeout(seconds); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public void cancel() throws SQLException { - Function function = (T) -> { - try { - T.cancel(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - Function function = (T) -> { - try { - return T.getWarnings(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (SQLWarning) runOnStatements(function); - } - - @Override - public void clearWarnings() throws SQLException { - Function function = (T) -> { - try { - T.clearWarnings(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public void setCursorName(String name) throws SQLException { - Function function = (T) -> { - try { - T.setCursorName(name); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public boolean execute(String sql) throws SQLException { - Function function = (T) -> { - try { - return T.execute(sql); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (boolean) runOnStatements(function); - } - - @Override - public ResultSet getResultSet() throws SQLException { - Function function = (T) -> { - try { - return T.getResultSet(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - List> futures = ParallelPhoenixUtil.INSTANCE.applyFunctionToFutures(function, statement1, - statement2, context, true); - - Preconditions.checkState(futures.size() == 2); - CompletableFuture rs1 = futures.get(0); - CompletableFuture rs2 = futures.get(1); - - //Ensure one statement is successful before returning - ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); - - return ParallelPhoenixResultSetFactory.INSTANCE.getParallelResultSet(context, rs1, rs2); - } - - @Override - public int getUpdateCount() throws SQLException { - Function function = (T) -> { - try { - return T.getUpdateCount(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnStatements(function); - } - - @Override - public boolean getMoreResults() throws SQLException { - Function function = (T) -> { - try { - return T.getMoreResults(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (boolean) runOnStatements(function); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - Function function = (T) -> { - try { - T.setFetchDirection(direction); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public int getFetchDirection() throws SQLException { - Function function = (T) -> { - try { - return T.getUpdateCount(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnStatements(function); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - Function function = (T) -> { - try { - T.setFetchSize(rows); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public int getFetchSize() throws SQLException { - Function function = (T) -> { - try { - return T.getFetchSize(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnStatements(function); - } - - @Override - public int getResultSetConcurrency() throws SQLException { - Function function = (T) -> { - try { - return T.getResultSetConcurrency(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnStatements(function); - } - - @Override - public int getResultSetType() throws SQLException { - Function function = (T) -> { - try { - return T.getResultSetType(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (int) runOnStatements(function); - } - - @Override - public Operation getUpdateOperation() throws SQLException { - Function function = (T) -> { - try { - return T.getUpdateOperation(); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (Operation) runOnStatements(function); - } - - @Override - public void addBatch(String sql) throws SQLException { - Function function = (T) -> { - try { - T.addBatch(sql); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public void clearBatch() throws SQLException { - Function function = (T) -> { - try { - T.clearBatch(); - return null; - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - runOnStatements(function); - } - - @Override - public int[] executeBatch() throws SQLException { - Function function = (T) -> { - try { - return ArrayUtils.toObject(T.executeBatch()); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return ArrayUtils.toPrimitive((Integer[])runOnStatements(function)); - } - - @Override - public Connection getConnection() throws SQLException { - return null; //TODO: Push parallel context to this layer - } - - @Override - public boolean getMoreResults(int current) throws SQLException { - Function function = (T) -> { - try { - return T.getMoreResults(current); - } catch (SQLException exception) { - throw new CompletionException(exception); - } - }; - - return (boolean)runOnStatements(function); - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - return 0; - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - return 0; - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - return false; - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - return false; - } - - @Override - public int getResultSetHoldability() throws SQLException { - return 0; - } - - @Override - public boolean isClosed() throws SQLException { - return context.isClosed(); - } - - @Override - public void setPoolable(boolean poolable) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean isPoolable() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void closeOnCompletion() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean isCloseOnCompletion() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public long getLargeUpdateCount() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setLargeMaxRows(long max) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public long getLargeMaxRows() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public long[] executeLargeBatch() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public long executeLargeUpdate(String sql) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public T unwrap(Class iface) throws SQLException { - if (iface.isInstance(this)) { - return (T) this; - } - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return false; - } + private final ParallelPhoenixContext context; + private final CompletableFuture statement1; + + CompletableFuture getStatement1() { + return statement1; + } + + CompletableFuture getStatement2() { + return statement2; + } + + private final CompletableFuture statement2; + + public ParallelPhoenixStatement(ParallelPhoenixContext context, + CompletableFuture statement1, + CompletableFuture statement2) throws SQLException { + this.context = context; + this.statement1 = statement1; + this.statement2 = statement2; + } + + Object runOnStatements(Function function) throws SQLException { + return ParallelPhoenixUtil.INSTANCE.runFutures(function, statement1, statement2, context, true); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + + Function function = (T) -> { + try { + return T.executeQuery(sql); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + List> futures = ParallelPhoenixUtil.INSTANCE + .applyFunctionToFutures(function, statement1, statement2, context, true); + + Preconditions.checkState(futures.size() == 2); + CompletableFuture rs1 = futures.get(0); + CompletableFuture rs2 = futures.get(1); + + // Ensure one statement is successful before returning + ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); + + return ParallelPhoenixResultSetFactory.INSTANCE.getParallelResultSet(context, rs1, rs2); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + Function function = (T) -> { + try { + return T.executeUpdate(sql); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnStatements(function); + } + + @Override + public void close() throws SQLException { + Function function = (T) -> { + try { + T.close(); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public int getMaxFieldSize() throws SQLException { + Function function = (T) -> { + try { + return T.getMaxFieldSize(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnStatements(function); + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + Function function = (T) -> { + try { + T.setMaxFieldSize(max); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public int getMaxRows() throws SQLException { + Function function = (T) -> { + try { + return T.getMaxRows(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnStatements(function); + } + + @Override + public void setMaxRows(int max) throws SQLException { + Function function = (T) -> { + try { + T.setMaxRows(max); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + Function function = (T) -> { + try { + T.setEscapeProcessing(enable); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public int getQueryTimeout() throws SQLException { + Function function = (T) -> { + try { + return T.getQueryTimeout(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnStatements(function); + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + Function function = (T) -> { + try { + T.setQueryTimeout(seconds); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public void cancel() throws SQLException { + Function function = (T) -> { + try { + T.cancel(); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + Function function = (T) -> { + try { + return T.getWarnings(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (SQLWarning) runOnStatements(function); + } + + @Override + public void clearWarnings() throws SQLException { + Function function = (T) -> { + try { + T.clearWarnings(); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public void setCursorName(String name) throws SQLException { + Function function = (T) -> { + try { + T.setCursorName(name); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public boolean execute(String sql) throws SQLException { + Function function = (T) -> { + try { + return T.execute(sql); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (boolean) runOnStatements(function); + } + + @Override + public ResultSet getResultSet() throws SQLException { + Function function = (T) -> { + try { + return T.getResultSet(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + List> futures = ParallelPhoenixUtil.INSTANCE + .applyFunctionToFutures(function, statement1, statement2, context, true); + + Preconditions.checkState(futures.size() == 2); + CompletableFuture rs1 = futures.get(0); + CompletableFuture rs2 = futures.get(1); + + // Ensure one statement is successful before returning + ParallelPhoenixUtil.INSTANCE.runFutures(futures, context, true); + + return ParallelPhoenixResultSetFactory.INSTANCE.getParallelResultSet(context, rs1, rs2); + } + + @Override + public int getUpdateCount() throws SQLException { + Function function = (T) -> { + try { + return T.getUpdateCount(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnStatements(function); + } + + @Override + public boolean getMoreResults() throws SQLException { + Function function = (T) -> { + try { + return T.getMoreResults(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (boolean) runOnStatements(function); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + Function function = (T) -> { + try { + T.setFetchDirection(direction); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public int getFetchDirection() throws SQLException { + Function function = (T) -> { + try { + return T.getUpdateCount(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnStatements(function); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + Function function = (T) -> { + try { + T.setFetchSize(rows); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public int getFetchSize() throws SQLException { + Function function = (T) -> { + try { + return T.getFetchSize(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnStatements(function); + } + + @Override + public int getResultSetConcurrency() throws SQLException { + Function function = (T) -> { + try { + return T.getResultSetConcurrency(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnStatements(function); + } + + @Override + public int getResultSetType() throws SQLException { + Function function = (T) -> { + try { + return T.getResultSetType(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (int) runOnStatements(function); + } + + @Override + public Operation getUpdateOperation() throws SQLException { + Function function = (T) -> { + try { + return T.getUpdateOperation(); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (Operation) runOnStatements(function); + } + + @Override + public void addBatch(String sql) throws SQLException { + Function function = (T) -> { + try { + T.addBatch(sql); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public void clearBatch() throws SQLException { + Function function = (T) -> { + try { + T.clearBatch(); + return null; + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + runOnStatements(function); + } + + @Override + public int[] executeBatch() throws SQLException { + Function function = (T) -> { + try { + return ArrayUtils.toObject(T.executeBatch()); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return ArrayUtils.toPrimitive((Integer[]) runOnStatements(function)); + } + + @Override + public Connection getConnection() throws SQLException { + return null; // TODO: Push parallel context to this layer + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + Function function = (T) -> { + try { + return T.getMoreResults(current); + } catch (SQLException exception) { + throw new CompletionException(exception); + } + }; + + return (boolean) runOnStatements(function); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + return 0; + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + return 0; + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + return false; + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return false; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return 0; + } + + @Override + public boolean isClosed() throws SQLException { + return context.isClosed(); + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isPoolable() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void closeOnCompletion() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public long getLargeUpdateCount() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setLargeMaxRows(long max) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public long getLargeMaxRows() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public long[] executeLargeBatch() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public long executeLargeUpdate(String sql) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isInstance(this)) { + return (T) this; + } + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixUtil.java index a0225f6d33b..a2a86d2e25e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,21 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HA_PARALLEL_TASK_TIMEOUT_COUNTER; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; - -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.PairOfSameType; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.exception.SQLExceptionInfo; -import org.apache.phoenix.monitoring.Metric; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.sql.SQLException; import java.util.ArrayList; import java.util.List; @@ -42,281 +31,288 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -public class ParallelPhoenixUtil { - - // Timeout used for every operation on a ParallelPhoenixConnection. Defaults to - // phoenix.query.timeoutMs. 0 means wait indefinitely - public static final String PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB = "phoenix.ha.parallel.operation.timeout.ms"; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.PairOfSameType; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.exception.SQLExceptionInfo; +import org.apache.phoenix.monitoring.Metric; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; - public static ParallelPhoenixUtil INSTANCE = new ParallelPhoenixUtil(); +public class ParallelPhoenixUtil { - private static final Logger LOGGER = - LoggerFactory.getLogger(ParallelPhoenixUtil.class); - private static final long DEFAULT_INTERNAL_OPERATION_TIMEOUT_MS = 1000; + // Timeout used for every operation on a ParallelPhoenixConnection. Defaults to + // phoenix.query.timeoutMs. 0 means wait indefinitely + public static final String PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB = + "phoenix.ha.parallel.operation.timeout.ms"; + + public static ParallelPhoenixUtil INSTANCE = new ParallelPhoenixUtil(); + + private static final Logger LOGGER = LoggerFactory.getLogger(ParallelPhoenixUtil.class); + private static final long DEFAULT_INTERNAL_OPERATION_TIMEOUT_MS = 1000; + + private ParallelPhoenixUtil() { + } + + /** + * @param futures position in list indicates the cluster + */ + public Object getAnyOfNonExceptionally(List> futures, + ParallelPhoenixContext context) throws SQLException { + long timeoutMs = context.getOperationTimeout(); + long endTime = + (timeoutMs == 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTime() + timeoutMs; + long internalTimeoutMs = (timeoutMs == 0) + ? DEFAULT_INTERNAL_OPERATION_TIMEOUT_MS + : Math.min(DEFAULT_INTERNAL_OPERATION_TIMEOUT_MS, timeoutMs); + Object result = null; + boolean changed = true; + boolean timedout = false; + List> originalFutures = futures; + CompletableFuture resultFuture = null; + while (!futures.isEmpty() && result == null) { + if (EnvironmentEdgeManager.currentTime() > endTime) { + timedout = true; + break; + } + /** + * CompletableFuture.anyOf adds a completion handler to every future its passed so if we call + * it too often we can quickly wind up with thousands of completion handlers which take a long + * time to iterate through and notify. So only create it when the futures it covers have + * actually changed. + */ + if (changed) { + resultFuture = CompletableFuture.anyOf(futures.toArray(new CompletableFuture[0])); + } + try { + result = resultFuture.get(internalTimeoutMs, TimeUnit.MILLISECONDS); + break; + } catch (Exception e) { + // remove the exceptionally completed results + List> filteredResults = + futures.stream().filter(f -> !f.isCompletedExceptionally()).collect(Collectors.toList()); + if (filteredResults.equals(futures)) { + changed = false; + } else { + futures = filteredResults; + changed = true; + } + } + } - private ParallelPhoenixUtil(){ + // All of our futures failed + if (futures.isEmpty()) { + LOGGER.error("All Futures failed."); + SQLException futuresException = null; + int i = 0; + for (CompletableFuture failedFuture : originalFutures) { + try { + failedFuture.get(); + } catch (Exception e) { + LOGGER.error("Future Exception. Cluster " + i + " HAGroup:" + context.getHaGroup(), e); + if (futuresException == null) { + futuresException = + new SQLException("All futures failed. HAGroup:" + context.getHaGroup(), e); + } else { + futuresException.addSuppressed(e); + } + } + } + context.setError(); + throw futuresException; } - /** - * - * @param futures position in list indicates the cluster - * @param context - * @return - * @throws SQLException - */ - public Object getAnyOfNonExceptionally(List> futures, ParallelPhoenixContext context) throws SQLException { - long timeoutMs = context.getOperationTimeout(); - long endTime = - (timeoutMs == 0) ? Long.MAX_VALUE - : EnvironmentEdgeManager.currentTime() + timeoutMs; - long internalTimeoutMs = - (timeoutMs == 0) ? DEFAULT_INTERNAL_OPERATION_TIMEOUT_MS - : Math.min(DEFAULT_INTERNAL_OPERATION_TIMEOUT_MS, timeoutMs); - Object result = null; - boolean changed = true; - boolean timedout = false; - List> originalFutures = futures; - CompletableFuture resultFuture = null; - while (!futures.isEmpty() && result == null) { - if (EnvironmentEdgeManager.currentTime() > endTime) { - timedout = true; - break; - } - /** - * CompletableFuture.anyOf adds a completion handler to every future its passed so if we call it - * too often we can quickly wind up with thousands of completion handlers which take a long time - * to iterate through and notify. So only create it when the futures it covers have actually - * changed. - */ - if(changed) { - resultFuture = CompletableFuture.anyOf(futures.toArray(new CompletableFuture[0])); - } + if (timedout) { + GLOBAL_HA_PARALLEL_TASK_TIMEOUT_COUNTER.increment(); + + if (futures.isEmpty()) { + LOGGER.warn("Unexpected race between timeout and failure occurred."); + } else { + int i = 0; + LOGGER.error("Parallel Phoenix Timeout occurred"); + for (CompletableFuture future : originalFutures) { + if (future.isCompletedExceptionally()) { try { - result = resultFuture.get(internalTimeoutMs, TimeUnit.MILLISECONDS); - break; + future.get(); } catch (Exception e) { - //remove the exceptionally completed results - List> filteredResults = futures.stream().filter(f -> !f.isCompletedExceptionally()).collect(Collectors.toList()); - if(filteredResults.equals(futures)) { - changed = false; - } else { - futures = filteredResults; - changed = true; - } + LOGGER.info("For timeout cluster " + i + " completed exceptionally. HAGroup:" + + context.getHaGroup(), e); } - } - - //All of our futures failed - if (futures.isEmpty()) { - LOGGER.error("All Futures failed."); - SQLException futuresException = null; - int i = 0; - for(CompletableFuture failedFuture : originalFutures) { - try { - failedFuture.get(); - } catch (Exception e) { - LOGGER.error("Future Exception. Cluster " + i + " HAGroup:" + context.getHaGroup(), e); - if (futuresException == null) { - futuresException = new SQLException("All futures failed. HAGroup:" + context.getHaGroup(), e); - } else { - futuresException.addSuppressed(e); - } - } - } - context.setError(); - throw futuresException; - } - - if (timedout) { - GLOBAL_HA_PARALLEL_TASK_TIMEOUT_COUNTER.increment(); - - if(futures.isEmpty()) { - LOGGER.warn("Unexpected race between timeout and failure occurred."); + } else { + if (future.isDone()) { + LOGGER.info("For timeout cluster " + i + + " finished post timeout prior to recording. HAGroup:" + context.getHaGroup()); } else { - int i = 0; - LOGGER.error("Parallel Phoenix Timeout occurred"); - for(CompletableFuture future : originalFutures) { - if(future.isCompletedExceptionally()) { - try { - future.get(); - } catch (Exception e) { - LOGGER.info("For timeout cluster " + i + " completed exceptionally. HAGroup:" + context.getHaGroup(), e); - } - } else { - if(future.isDone()) { - LOGGER.info("For timeout cluster " + i + " finished post timeout prior to recording. HAGroup:" + context.getHaGroup()); - } else { - LOGGER.info("For timeout cluster " + i + " still running. HAGroup:" + context.getHaGroup()); - } - } - i++; - } + LOGGER.info( + "For timeout cluster " + i + " still running. HAGroup:" + context.getHaGroup()); } - context.setError(); - throw new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT) - .setMessage("Operation timedout. Operation timeout ms = " + timeoutMs).build() - .buildException(); - } else { - //Debug - int i = 0; - for(CompletableFuture failedFuture : originalFutures) { - if(failedFuture.isCompletedExceptionally()) { - try { - failedFuture.get(); - } catch (Exception e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Future Exception. Cluster " + i + "HAGroup:" + context.getHaGroup(), e); - } - } - } - i++; - } - } - - return result; - } - - static class FutureResult { - FutureResult(T t, int index) { - this.t = t; - this.index = index; - } - - private final T t; - private final int index; - - T getResult() { - return t; + } + i++; } - - int getIndex() { - return index; + } + context.setError(); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT) + .setMessage("Operation timedout. Operation timeout ms = " + timeoutMs).build() + .buildException(); + } else { + // Debug + int i = 0; + for (CompletableFuture failedFuture : originalFutures) { + if (failedFuture.isCompletedExceptionally()) { + try { + failedFuture.get(); + } catch (Exception e) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Future Exception. Cluster " + i + "HAGroup:" + context.getHaGroup(), e); + } + } } + i++; + } } + return result; + } - public T getFutureNoRetry(CompletableFuture future, ParallelPhoenixContext context) - throws InterruptedException, ExecutionException, TimeoutException { - long operationTimeoutMs = context.getOperationTimeout(); - long timeout = (operationTimeoutMs > 0) ? operationTimeoutMs : Long.MAX_VALUE; - return future.get(timeout, TimeUnit.MILLISECONDS); + static class FutureResult { + FutureResult(T t, int index) { + this.t = t; + this.index = index; } + private final T t; + private final int index; - CompletableFuture getFutureAndChainOnContextNoMetrics(Function functionToApply, - CompletableFuture future1, - Function, - CompletableFuture> chainOnConn) { - return getFutureAndChainOnContext(functionToApply, future1, chainOnConn, null, null, false); - } - - CompletableFuture getFutureAndChainOnContext(Function functionToApply, - CompletableFuture future1, - Function, - CompletableFuture> chainOnConn, - Metric operationCount, - Metric failureCount) { - return getFutureAndChainOnContext(functionToApply, future1, chainOnConn, operationCount, failureCount, true); - } - - private CompletableFuture getFutureAndChainOnContext(Function functionToApply, - CompletableFuture future1, - Function,CompletableFuture> chainOnConn, - Metric operationCount, - Metric failureCount, boolean useMetrics) { - - return chainOnConn.apply(() -> { - try { - if(useMetrics) { - operationCount.increment(); - } - return functionToApply.apply(future1.get()); - } catch (Exception e) { - if(useMetrics) { - failureCount.increment(); - } - throw new CompletionException(e); - } - }); + T getResult() { + return t; } - List> applyFunctionToFutures(Function function, CompletableFuture future1, - CompletableFuture future2, ParallelPhoenixContext context, boolean useMetrics) { - - CompletableFuture result1 = - getFutureAndChainOnContext(function, future1, context::chainOnConn1, - context.getParallelPhoenixMetrics().getActiveClusterOperationCount(), - context.getParallelPhoenixMetrics().getActiveClusterFailedOperationCount(), useMetrics); - CompletableFuture result2 = - getFutureAndChainOnContext(function, future2, context::chainOnConn2, - context.getParallelPhoenixMetrics().getStandbyClusterOperationCount(), - context.getParallelPhoenixMetrics().getStandbyClusterFailedOperationCount(), useMetrics); - - return ImmutableList.of(result1,result2); + int getIndex() { + return index; } - - /** - * - * @param Type of the future - * @param futures list of futures to run, for 2 clusters 0 will be the active 1 will be the standby - * @param context this parallel connections context - * @param useMetrics - * @return The first non-expectional result of the futures - * @throws SQLException no non-exceptional future available - */ - Object runFutures(List> futures, ParallelPhoenixContext context, boolean useMetrics) throws SQLException { - - List> ranFutures = new ArrayList<>(); - for(int i = 0; i < futures.size(); i++) { - CompletableFuture future = futures.get(i); - int finalI = i; - CompletableFuture> decoratedFuture = future.thenApply(t -> new FutureResult<>(t, finalI)); - ranFutures.add(decoratedFuture); + } + + public T getFutureNoRetry(CompletableFuture future, ParallelPhoenixContext context) + throws InterruptedException, ExecutionException, TimeoutException { + long operationTimeoutMs = context.getOperationTimeout(); + long timeout = (operationTimeoutMs > 0) ? operationTimeoutMs : Long.MAX_VALUE; + return future.get(timeout, TimeUnit.MILLISECONDS); + } + + CompletableFuture getFutureAndChainOnContextNoMetrics(Function functionToApply, + CompletableFuture future1, Function, CompletableFuture> chainOnConn) { + return getFutureAndChainOnContext(functionToApply, future1, chainOnConn, null, null, false); + } + + CompletableFuture getFutureAndChainOnContext(Function functionToApply, + CompletableFuture future1, Function, CompletableFuture> chainOnConn, + Metric operationCount, Metric failureCount) { + return getFutureAndChainOnContext(functionToApply, future1, chainOnConn, operationCount, + failureCount, true); + } + + private CompletableFuture getFutureAndChainOnContext(Function functionToApply, + CompletableFuture future1, Function, CompletableFuture> chainOnConn, + Metric operationCount, Metric failureCount, boolean useMetrics) { + + return chainOnConn.apply(() -> { + try { + if (useMetrics) { + operationCount.increment(); } - FutureResult result = (FutureResult) getAnyOfNonExceptionally(ranFutures, context); - if(useMetrics) { - context.getParallelPhoenixMetrics().get(PhoenixHAGroupMetrics.HAMetricType.HA_PARALLEL_USED_OPERATIONS, result.index).increment(); + return functionToApply.apply(future1.get()); + } catch (Exception e) { + if (useMetrics) { + failureCount.increment(); } - return result.t; + throw new CompletionException(e); + } + }); + } + + List> applyFunctionToFutures(Function function, + CompletableFuture future1, CompletableFuture future2, ParallelPhoenixContext context, + boolean useMetrics) { + + CompletableFuture result1 = getFutureAndChainOnContext(function, future1, + context::chainOnConn1, context.getParallelPhoenixMetrics().getActiveClusterOperationCount(), + context.getParallelPhoenixMetrics().getActiveClusterFailedOperationCount(), useMetrics); + CompletableFuture result2 = getFutureAndChainOnContext(function, future2, + context::chainOnConn2, context.getParallelPhoenixMetrics().getStandbyClusterOperationCount(), + context.getParallelPhoenixMetrics().getStandbyClusterFailedOperationCount(), useMetrics); + + return ImmutableList.of(result1, result2); + } + + /** + * @param Type of the future + * @param futures list of futures to run, for 2 clusters 0 will be the active 1 will be the + * standby + * @param context this parallel connections context + * @return The first non-expectional result of the futures + * @throws SQLException no non-exceptional future available + */ + Object runFutures(List> futures, ParallelPhoenixContext context, + boolean useMetrics) throws SQLException { + + List> ranFutures = new ArrayList<>(); + for (int i = 0; i < futures.size(); i++) { + CompletableFuture future = futures.get(i); + int finalI = i; + CompletableFuture> decoratedFuture = + future.thenApply(t -> new FutureResult<>(t, finalI)); + ranFutures.add(decoratedFuture); } - - Object runFutures(Function function, CompletableFuture future1, - CompletableFuture future2, ParallelPhoenixContext context, boolean useMetrics) throws SQLException { - List> list = applyFunctionToFutures(function,future1,future2,context, useMetrics); - return runFutures(list,context, useMetrics); + FutureResult result = (FutureResult) getAnyOfNonExceptionally(ranFutures, context); + if (useMetrics) { + context.getParallelPhoenixMetrics() + .get(PhoenixHAGroupMetrics.HAMetricType.HA_PARALLEL_USED_OPERATIONS, result.index) + .increment(); + } + return result.t; + } + + Object runFutures(Function function, CompletableFuture future1, + CompletableFuture future2, ParallelPhoenixContext context, boolean useMetrics) + throws SQLException { + List> list = + applyFunctionToFutures(function, future1, future2, context, useMetrics); + return runFutures(list, context, useMetrics); + } + + /** + * Blocks + * @throws SQLException if any of the futures fail with any exception + */ + PairOfSameType runOnFuturesGetAll(Function function, + CompletableFuture future1, CompletableFuture future2, ParallelPhoenixContext context, + boolean useMetrics) throws SQLException { + + CompletableFuture result1, result2; + if (useMetrics) { + result1 = getFutureAndChainOnContext(function, future1, context::chainOnConn1, + context.getParallelPhoenixMetrics().getActiveClusterOperationCount(), + context.getParallelPhoenixMetrics().getActiveClusterFailedOperationCount()); + result2 = getFutureAndChainOnContext(function, future2, context::chainOnConn2, + context.getParallelPhoenixMetrics().getStandbyClusterOperationCount(), + context.getParallelPhoenixMetrics().getStandbyClusterFailedOperationCount()); + } else { + result1 = getFutureAndChainOnContextNoMetrics(function, future1, context::chainOnConn1); + result2 = getFutureAndChainOnContextNoMetrics(function, future2, context::chainOnConn2); } - /** - * Blocks - * @throws SQLException if any of the futures fail with any exception - */ - PairOfSameType runOnFuturesGetAll(Function function, CompletableFuture future1, - CompletableFuture future2, ParallelPhoenixContext context, boolean useMetrics) throws SQLException { - - CompletableFuture result1,result2; - if(useMetrics) { - result1 = getFutureAndChainOnContext(function, future1, context::chainOnConn1, - context.getParallelPhoenixMetrics().getActiveClusterOperationCount(), - context.getParallelPhoenixMetrics().getActiveClusterFailedOperationCount()); - result2 = getFutureAndChainOnContext(function, future2, context::chainOnConn2, - context.getParallelPhoenixMetrics().getStandbyClusterOperationCount(), - context.getParallelPhoenixMetrics().getStandbyClusterFailedOperationCount()); - } else { - result1 = getFutureAndChainOnContextNoMetrics(function, future1, context::chainOnConn1); - result2 = getFutureAndChainOnContextNoMetrics(function, future2, context::chainOnConn2); - } - - Object value1, value2; - try { - value1 = result1.get(); - } catch (Exception e) { - throw new SQLException(e); - } - try { - value2 = result2.get(); - } catch (Exception e) { - throw new SQLException(e); - } - return new PairOfSameType<>(value1, value2); + Object value1, value2; + try { + value1 = result1.get(); + } catch (Exception e) { + throw new SQLException(e); + } + try { + value2 = result2.get(); + } catch (Exception e) { + throw new SQLException(e); } + return new PairOfSameType<>(value1, value2); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java index e2830f19a8a..e1e45f279fa 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,14 +17,14 @@ */ package org.apache.phoenix.jdbc; +import static java.util.Collections.emptyMap; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_PHOENIX_CONNECTIONS; import static org.apache.phoenix.monitoring.MetricType.OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.OPEN_PHOENIX_CONNECTIONS_COUNTER; import static org.apache.phoenix.query.QueryServices.QUERY_SERVICES_NAME; import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull; -import static java.util.Collections.emptyMap; -import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS; -import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_PHOENIX_CONNECTIONS; import java.io.EOFException; import java.io.IOException; @@ -143,1454 +143,1351 @@ import org.apache.phoenix.util.VarBinaryFormatter; /** - * - * JDBC Connection implementation of Phoenix. Currently the following are - * supported: - Statement - PreparedStatement The connection may only be used - * with the following options: - ResultSet.TYPE_FORWARD_ONLY - - * Connection.TRANSACTION_READ_COMMITTED - * - * + * JDBC Connection implementation of Phoenix. Currently the following are supported: - Statement - + * PreparedStatement The connection may only be used with the following options: - + * ResultSet.TYPE_FORWARD_ONLY - Connection.TRANSACTION_READ_COMMITTED * @since 0.1 */ -public class PhoenixConnection implements MetaDataMutated, SQLCloseable, PhoenixMonitoredConnection { - private final String url; - private String schema; - private final ConnectionQueryServices services; - private final Properties info; - private final Map, Format> formatters = new HashMap<>(); - private final int mutateBatchSize; - private final long mutateBatchSizeBytes; - private final Long scn; - private final boolean buildingIndex; - private MutationState mutationState; - private HashSet statements = new HashSet<>(); - private boolean isAutoFlush = false; - private boolean isAutoCommit = false; - private final PName tenantId; - private final String dateFormatTimeZoneId; - private final String datePattern; - private final String timePattern; - private final String timestampPattern; - private int statementExecutionCounter; - private TraceScope traceScope = null; - private volatile boolean isClosed = false; - private volatile boolean isClosing = false; - private Sampler sampler; - private boolean readOnly = false; - private Consistency consistency = Consistency.STRONG; - private Map customTracingAnnotations = emptyMap(); - private final boolean isRequestLevelMetricsEnabled; - private final boolean isDescVarLengthRowKeyUpgrade; - private ParallelIteratorFactory parallelIteratorFactory; - private final LinkedBlockingQueue> scannerQueue; - private TableResultIteratorFactory tableResultIteratorFactory; - private boolean isRunningUpgrade; - private LogLevel logLevel; - private LogLevel auditLogLevel; - private Double logSamplingRate; - private String sourceOfOperation; - private volatile SQLException reasonForClose; - private static final String[] CONNECTION_PROPERTIES; - - private final ConcurrentLinkedQueue childConnections = - new ConcurrentLinkedQueue<>(); - - //For now just the copy constructor paths will have this as true as I don't want to change the - //public interfaces. - private final boolean isInternalConnection; - private boolean isApplyTimeZoneDisplacement; - private final UUID uniqueID; - private ConnectionActivityLogger connectionActivityLogger = ConnectionActivityLogger.NO_OP_LOGGER; - - static { - Tracing.addTraceMetricsSource(); - CONNECTION_PROPERTIES = PhoenixRuntime.getConnectionProperties(); - } - - private static Properties newPropsWithSCN(long scn, Properties props) { - props = new Properties(props); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn)); - return props; - } - - public PhoenixConnection(PhoenixConnection connection, - boolean isDescRowKeyOrderUpgrade, boolean isRunningUpgrade) - throws SQLException { - this(connection.getQueryServices(), connection.getURL(), connection - .getClientInfo(), connection - .getMutationState(), isDescRowKeyOrderUpgrade, - isRunningUpgrade, connection.buildingIndex, true); - this.isAutoCommit = connection.isAutoCommit; - this.isAutoFlush = connection.isAutoFlush; - this.sampler = connection.sampler; - this.statementExecutionCounter = connection.statementExecutionCounter; - } - - public PhoenixConnection(PhoenixConnection connection) throws SQLException { - this(connection, connection.isDescVarLengthRowKeyUpgrade(), connection - .isRunningUpgrade()); - } - - public PhoenixConnection(PhoenixConnection connection, - MutationState mutationState) throws SQLException { - this(connection.getQueryServices(), connection.getURL(), connection - .getClientInfo(), mutationState, - connection.isDescVarLengthRowKeyUpgrade(), connection - .isRunningUpgrade(), connection.buildingIndex, true); - } - - public PhoenixConnection(PhoenixConnection connection, long scn) - throws SQLException { - this(connection, newPropsWithSCN(scn, connection.getClientInfo())); - } - - public PhoenixConnection(PhoenixConnection connection, Properties props) throws SQLException { - this(connection.getQueryServices(), connection.getURL(), props, connection - .getMutationState(), connection.isDescVarLengthRowKeyUpgrade(), - connection.isRunningUpgrade(), connection.buildingIndex, true); - this.isAutoCommit = connection.isAutoCommit; - this.isAutoFlush = connection.isAutoFlush; - this.sampler = connection.sampler; - this.statementExecutionCounter = connection.statementExecutionCounter; - } - - public PhoenixConnection(ConnectionQueryServices services, String url, - Properties info) throws SQLException { - this(services, url, info, null, false, false, false, false); - } - - public PhoenixConnection(PhoenixConnection connection, - ConnectionQueryServices services, Properties info) - throws SQLException { - this(services, connection.url, info, null, - connection.isDescVarLengthRowKeyUpgrade(), connection - .isRunningUpgrade(), connection.buildingIndex, true); - } - - private PhoenixConnection(ConnectionQueryServices services, String url, - Properties info, MutationState mutationState, - boolean isDescVarLengthRowKeyUpgrade, boolean isRunningUpgrade, - boolean buildingIndex, boolean isInternalConnection) throws SQLException { - this.url = url; - this.isDescVarLengthRowKeyUpgrade = isDescVarLengthRowKeyUpgrade; - this.isInternalConnection = isInternalConnection; - - // Filter user provided properties based on property policy, if - // provided and QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED is true - if (Boolean.valueOf(info.getProperty(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, - String.valueOf(QueryServicesOptions.DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED)))) { - PropertyPolicyProvider.getPropertyPolicy().evaluate(info); - } - - // Copy so client cannot change - this.info = PropertiesUtil.deepCopy(info); - final PName tenantId = JDBCUtil.getTenantId(url, info); - if (this.info.isEmpty() && tenantId == null) { - this.services = services; +public class PhoenixConnection + implements MetaDataMutated, SQLCloseable, PhoenixMonitoredConnection { + private final String url; + private String schema; + private final ConnectionQueryServices services; + private final Properties info; + private final Map, Format> formatters = new HashMap<>(); + private final int mutateBatchSize; + private final long mutateBatchSizeBytes; + private final Long scn; + private final boolean buildingIndex; + private MutationState mutationState; + private HashSet statements = new HashSet<>(); + private boolean isAutoFlush = false; + private boolean isAutoCommit = false; + private final PName tenantId; + private final String dateFormatTimeZoneId; + private final String datePattern; + private final String timePattern; + private final String timestampPattern; + private int statementExecutionCounter; + private TraceScope traceScope = null; + private volatile boolean isClosed = false; + private volatile boolean isClosing = false; + private Sampler sampler; + private boolean readOnly = false; + private Consistency consistency = Consistency.STRONG; + private Map customTracingAnnotations = emptyMap(); + private final boolean isRequestLevelMetricsEnabled; + private final boolean isDescVarLengthRowKeyUpgrade; + private ParallelIteratorFactory parallelIteratorFactory; + private final LinkedBlockingQueue> scannerQueue; + private TableResultIteratorFactory tableResultIteratorFactory; + private boolean isRunningUpgrade; + private LogLevel logLevel; + private LogLevel auditLogLevel; + private Double logSamplingRate; + private String sourceOfOperation; + private volatile SQLException reasonForClose; + private static final String[] CONNECTION_PROPERTIES; + + private final ConcurrentLinkedQueue childConnections = + new ConcurrentLinkedQueue<>(); + + // For now just the copy constructor paths will have this as true as I don't want to change the + // public interfaces. + private final boolean isInternalConnection; + private boolean isApplyTimeZoneDisplacement; + private final UUID uniqueID; + private ConnectionActivityLogger connectionActivityLogger = ConnectionActivityLogger.NO_OP_LOGGER; + + static { + Tracing.addTraceMetricsSource(); + CONNECTION_PROPERTIES = PhoenixRuntime.getConnectionProperties(); + } + + private static Properties newPropsWithSCN(long scn, Properties props) { + props = new Properties(props); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn)); + return props; + } + + public PhoenixConnection(PhoenixConnection connection, boolean isDescRowKeyOrderUpgrade, + boolean isRunningUpgrade) throws SQLException { + this(connection.getQueryServices(), connection.getURL(), connection.getClientInfo(), + connection.getMutationState(), isDescRowKeyOrderUpgrade, isRunningUpgrade, + connection.buildingIndex, true); + this.isAutoCommit = connection.isAutoCommit; + this.isAutoFlush = connection.isAutoFlush; + this.sampler = connection.sampler; + this.statementExecutionCounter = connection.statementExecutionCounter; + } + + public PhoenixConnection(PhoenixConnection connection) throws SQLException { + this(connection, connection.isDescVarLengthRowKeyUpgrade(), connection.isRunningUpgrade()); + } + + public PhoenixConnection(PhoenixConnection connection, MutationState mutationState) + throws SQLException { + this(connection.getQueryServices(), connection.getURL(), connection.getClientInfo(), + mutationState, connection.isDescVarLengthRowKeyUpgrade(), connection.isRunningUpgrade(), + connection.buildingIndex, true); + } + + public PhoenixConnection(PhoenixConnection connection, long scn) throws SQLException { + this(connection, newPropsWithSCN(scn, connection.getClientInfo())); + } + + public PhoenixConnection(PhoenixConnection connection, Properties props) throws SQLException { + this(connection.getQueryServices(), connection.getURL(), props, connection.getMutationState(), + connection.isDescVarLengthRowKeyUpgrade(), connection.isRunningUpgrade(), + connection.buildingIndex, true); + this.isAutoCommit = connection.isAutoCommit; + this.isAutoFlush = connection.isAutoFlush; + this.sampler = connection.sampler; + this.statementExecutionCounter = connection.statementExecutionCounter; + } + + public PhoenixConnection(ConnectionQueryServices services, String url, Properties info) + throws SQLException { + this(services, url, info, null, false, false, false, false); + } + + public PhoenixConnection(PhoenixConnection connection, ConnectionQueryServices services, + Properties info) throws SQLException { + this(services, connection.url, info, null, connection.isDescVarLengthRowKeyUpgrade(), + connection.isRunningUpgrade(), connection.buildingIndex, true); + } + + private PhoenixConnection(ConnectionQueryServices services, String url, Properties info, + MutationState mutationState, boolean isDescVarLengthRowKeyUpgrade, boolean isRunningUpgrade, + boolean buildingIndex, boolean isInternalConnection) throws SQLException { + this.url = url; + this.isDescVarLengthRowKeyUpgrade = isDescVarLengthRowKeyUpgrade; + this.isInternalConnection = isInternalConnection; + + // Filter user provided properties based on property policy, if + // provided and QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED is true + if ( + Boolean.valueOf(info.getProperty(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, + String.valueOf(QueryServicesOptions.DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED))) + ) { + PropertyPolicyProvider.getPropertyPolicy().evaluate(info); + } + + // Copy so client cannot change + this.info = PropertiesUtil.deepCopy(info); + final PName tenantId = JDBCUtil.getTenantId(url, info); + if (this.info.isEmpty() && tenantId == null) { + this.services = services; + } else { + // Create child services keyed by tenantId to track resource usage + // for + // a tenantId for all connections on this JVM. + if (tenantId != null) { + services = services.getChildQueryServices(tenantId.getBytesPtr()); + } + ReadOnlyProps currentProps = services.getProps(); + final ReadOnlyProps augmentedProps = currentProps.addAll(filterKnownNonProperties(this.info)); + this.services = + augmentedProps == currentProps ? services : new DelegateConnectionQueryServices(services) { + @Override + public ReadOnlyProps getProps() { + return augmentedProps; + } + }; + } + + Long scnParam = JDBCUtil.getCurrentSCN(url, this.info); + checkScn(scnParam); + Long buildIndexAtParam = JDBCUtil.getBuildIndexSCN(url, this.info); + checkBuildIndexAt(buildIndexAtParam); + checkScnAndBuildIndexAtEquality(scnParam, buildIndexAtParam); + + this.scn = scnParam != null ? scnParam : buildIndexAtParam; + this.buildingIndex = buildingIndex || buildIndexAtParam != null; + this.isAutoFlush = this.services.getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, + QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED) + && this.services.getProps().getBoolean(QueryServices.AUTO_FLUSH_ATTRIB, + QueryServicesOptions.DEFAULT_AUTO_FLUSH); + this.isAutoCommit = JDBCUtil.getAutoCommit(url, this.info, this.services.getProps() + .getBoolean(QueryServices.AUTO_COMMIT_ATTRIB, QueryServicesOptions.DEFAULT_AUTO_COMMIT)); + this.consistency = JDBCUtil.getConsistencyLevel(url, this.info, this.services.getProps() + .get(QueryServices.CONSISTENCY_ATTRIB, QueryServicesOptions.DEFAULT_CONSISTENCY_LEVEL)); + // currently we are not resolving schema set through property, so if + // schema doesn't exists ,connection will not fail + // but queries may fail + this.schema = JDBCUtil.getSchema(url, this.info, this.services.getProps() + .get(QueryServices.SCHEMA_ATTRIB, QueryServicesOptions.DEFAULT_SCHEMA)); + this.tenantId = tenantId; + this.mutateBatchSize = JDBCUtil.getMutateBatchSize(url, this.info, this.services.getProps()); + this.mutateBatchSizeBytes = + JDBCUtil.getMutateBatchSizeBytes(url, this.info, this.services.getProps()); + datePattern = + this.services.getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT); + timePattern = + this.services.getProps().get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT); + timestampPattern = this.services.getProps().get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, + DateUtil.DEFAULT_TIMESTAMP_FORMAT); + String numberPattern = this.services.getProps().get(QueryServices.NUMBER_FORMAT_ATTRIB, + NumberUtil.DEFAULT_NUMBER_FORMAT); + int maxSize = this.services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); + long maxSizeBytes = + this.services.getProps().getLongBytes(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); + this.isApplyTimeZoneDisplacement = + this.services.getProps().getBoolean(QueryServices.APPLY_TIME_ZONE_DISPLACMENT_ATTRIB, + QueryServicesOptions.DEFAULT_APPLY_TIME_ZONE_DISPLACMENT); + this.dateFormatTimeZoneId = this.services.getProps() + .get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, DateUtil.DEFAULT_TIME_ZONE_ID); + Format dateFormat = DateUtil.getDateFormatter(datePattern, dateFormatTimeZoneId); + Format timeFormat = DateUtil.getDateFormatter(timePattern, dateFormatTimeZoneId); + Format timestampFormat = DateUtil.getDateFormatter(timestampPattern, dateFormatTimeZoneId); + formatters.put(PDate.INSTANCE, dateFormat); + formatters.put(PTime.INSTANCE, timeFormat); + formatters.put(PTimestamp.INSTANCE, timestampFormat); + formatters.put(PUnsignedDate.INSTANCE, dateFormat); + formatters.put(PUnsignedTime.INSTANCE, timeFormat); + formatters.put(PUnsignedTimestamp.INSTANCE, timestampFormat); + formatters.put(PDecimal.INSTANCE, FunctionArgumentType.NUMERIC.getFormatter(numberPattern)); + formatters.put(PVarbinary.INSTANCE, VarBinaryFormatter.INSTANCE); + formatters.put(PBinary.INSTANCE, VarBinaryFormatter.INSTANCE); + + this.logLevel = LogLevel.valueOf(this.services.getProps().get(QueryServices.LOG_LEVEL, + QueryServicesOptions.DEFAULT_LOGGING_LEVEL)); + this.auditLogLevel = LogLevel.valueOf(this.services.getProps() + .get(QueryServices.AUDIT_LOG_LEVEL, QueryServicesOptions.DEFAULT_AUDIT_LOGGING_LEVEL)); + this.isRequestLevelMetricsEnabled = + JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info, this.services.getProps()); + this.mutationState = mutationState == null + ? newMutationState(maxSize, maxSizeBytes) + : new MutationState(mutationState, this); + this.uniqueID = UUID.randomUUID(); + this.services.addConnection(this); + + // setup tracing, if its enabled + this.sampler = Tracing.getConfiguredSampler(this); + this.customTracingAnnotations = getImmutableCustomTracingAnnotations(); + this.scannerQueue = new LinkedBlockingQueue<>(); + this.tableResultIteratorFactory = new DefaultTableResultIteratorFactory(); + this.isRunningUpgrade = isRunningUpgrade; + + this.logSamplingRate = Double.parseDouble(this.services.getProps() + .get(QueryServices.LOG_SAMPLE_RATE, QueryServicesOptions.DEFAULT_LOG_SAMPLE_RATE)); + String connectionQueryServiceName = this.services.getConfiguration().get(QUERY_SERVICES_NAME); + if (isInternalConnection) { + GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.increment(); + long currentInternalConnectionCount = + this.getQueryServices().getConnectionCount(isInternalConnection); + ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, + OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, currentInternalConnectionCount); + ConnectionQueryServicesMetricsManager + .updateConnectionQueryServiceOpenInternalConnectionHistogram(currentInternalConnectionCount, + connectionQueryServiceName); + } else { + GLOBAL_OPEN_PHOENIX_CONNECTIONS.increment(); + long currentConnectionCount = + this.getQueryServices().getConnectionCount(isInternalConnection); + ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, + OPEN_PHOENIX_CONNECTIONS_COUNTER, currentConnectionCount); + ConnectionQueryServicesMetricsManager.updateConnectionQueryServiceOpenConnectionHistogram( + currentConnectionCount, connectionQueryServiceName); + } + this.sourceOfOperation = + this.services.getProps().get(QueryServices.SOURCE_OPERATION_ATTRIB, null); + } + + private static void checkScn(Long scnParam) throws SQLException { + if (scnParam != null && scnParam < 0) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_SCN).build().buildException(); + } + } + + private static void checkBuildIndexAt(Long replayAtParam) throws SQLException { + if (replayAtParam != null && replayAtParam < 0) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_REPLAY_AT).build() + .buildException(); + } + } + + private static void checkScnAndBuildIndexAtEquality(Long scnParam, Long replayAt) + throws SQLException { + if (scnParam != null && replayAt != null && !scnParam.equals(replayAt)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNEQUAL_SCN_AND_BUILD_INDEX_AT).build() + .buildException(); + } + } + + private static Properties filterKnownNonProperties(Properties info) { + Properties prunedProperties = info; + for (String property : CONNECTION_PROPERTIES) { + if (info.containsKey(property)) { + if (prunedProperties == info) { + prunedProperties = PropertiesUtil.deepCopy(info); + } + prunedProperties.remove(property); + } + } + return prunedProperties; + } + + private ImmutableMap getImmutableCustomTracingAnnotations() { + Builder result = ImmutableMap.builder(); + result.putAll(JDBCUtil.getAnnotations(url, info)); + if (getSCN() != null) { + result.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, getSCN().toString()); + } + if (getTenantId() != null) { + result.put(PhoenixRuntime.TENANT_ID_ATTRIB, getTenantId().getString()); + } + return result.build(); + } + + public boolean isInternalConnection() { + return isInternalConnection; + } + + /** + * Add connection to the internal childConnections queue This method is thread safe + */ + public void addChildConnection(PhoenixConnection connection) { + childConnections.add(connection); + } + + /** + * Method to remove child connection from childConnections Queue + */ + public void removeChildConnection(PhoenixConnection connection) { + childConnections.remove(connection); + } + + /** + * Method to fetch child connections count from childConnections Queue + * @return int count + */ + @VisibleForTesting + public int getChildConnectionsCount() { + return childConnections.size(); + } + + public Sampler getSampler() { + return this.sampler; + } + + public void setSampler(Sampler sampler) throws SQLException { + this.sampler = sampler; + } + + public Map getCustomTracingAnnotations() { + return customTracingAnnotations; + } + + public int executeStatements(Reader reader, List binds, PrintStream out) + throws IOException, SQLException { + int bindsOffset = 0; + int nStatements = 0; + PhoenixStatementParser parser = new PhoenixStatementParser(reader); + try { + while (true) { + PhoenixPreparedStatement stmt = null; + try { + stmt = new PhoenixPreparedStatement(this, parser); + ParameterMetaData paramMetaData = stmt.getParameterMetaData(); + for (int i = 0; i < paramMetaData.getParameterCount(); i++) { + stmt.setObject(i + 1, binds.get(bindsOffset + i)); + } + long start = EnvironmentEdgeManager.currentTimeMillis(); + boolean isQuery = stmt.execute(); + if (isQuery) { + ResultSet rs = stmt.getResultSet(); + if (!rs.next()) { + if (out != null) { + out.println("no rows selected"); + } } else { - // Create child services keyed by tenantId to track resource usage - // for - // a tenantId for all connections on this JVM. - if (tenantId != null) { - services = services.getChildQueryServices(tenantId - .getBytesPtr()); + int columnCount = 0; + if (out != null) { + ResultSetMetaData md = rs.getMetaData(); + columnCount = md.getColumnCount(); + for (int i = 1; i <= columnCount; i++) { + int displayWidth = md.getColumnDisplaySize(i); + String label = md.getColumnLabel(i); + if (md.isSigned(i)) { + out.print(displayWidth < label.length() + ? label.substring(0, displayWidth) + : Strings.padStart(label, displayWidth, ' ')); + out.print(' '); + } else { + out.print(displayWidth < label.length() + ? label.substring(0, displayWidth) + : Strings.padEnd(md.getColumnLabel(i), displayWidth, ' ')); + out.print(' '); + } } - ReadOnlyProps currentProps = services.getProps(); - final ReadOnlyProps augmentedProps = currentProps - .addAll(filterKnownNonProperties(this.info)); - this.services = augmentedProps == currentProps ? services - : new DelegateConnectionQueryServices(services) { - @Override - public ReadOnlyProps getProps() { - return augmentedProps; - } - }; - } - - Long scnParam = JDBCUtil.getCurrentSCN(url, this.info); - checkScn(scnParam); - Long buildIndexAtParam = JDBCUtil.getBuildIndexSCN(url, this.info); - checkBuildIndexAt(buildIndexAtParam); - checkScnAndBuildIndexAtEquality(scnParam, buildIndexAtParam); - - this.scn = scnParam != null ? scnParam : buildIndexAtParam; - this.buildingIndex = buildingIndex || buildIndexAtParam != null; - this.isAutoFlush = this.services.getProps().getBoolean( - QueryServices.TRANSACTIONS_ENABLED, - QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED) - && this.services.getProps().getBoolean( - QueryServices.AUTO_FLUSH_ATTRIB, - QueryServicesOptions.DEFAULT_AUTO_FLUSH); - this.isAutoCommit = JDBCUtil.getAutoCommit( - url, - this.info, - this.services.getProps().getBoolean( - QueryServices.AUTO_COMMIT_ATTRIB, - QueryServicesOptions.DEFAULT_AUTO_COMMIT)); - this.consistency = JDBCUtil.getConsistencyLevel( - url, - this.info, - this.services.getProps().get(QueryServices.CONSISTENCY_ATTRIB, - QueryServicesOptions.DEFAULT_CONSISTENCY_LEVEL)); - // currently we are not resolving schema set through property, so if - // schema doesn't exists ,connection will not fail - // but queries may fail - this.schema = JDBCUtil.getSchema( - url, - this.info, - this.services.getProps().get(QueryServices.SCHEMA_ATTRIB, - QueryServicesOptions.DEFAULT_SCHEMA)); - this.tenantId = tenantId; - this.mutateBatchSize = JDBCUtil.getMutateBatchSize(url, this.info, - this.services.getProps()); - this.mutateBatchSizeBytes = JDBCUtil.getMutateBatchSizeBytes(url, - this.info, this.services.getProps()); - datePattern = this.services.getProps().get( - QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT); - timePattern = this.services.getProps().get( - QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT); - timestampPattern = this.services.getProps().get( - QueryServices.TIMESTAMP_FORMAT_ATTRIB, - DateUtil.DEFAULT_TIMESTAMP_FORMAT); - String numberPattern = this.services.getProps().get( - QueryServices.NUMBER_FORMAT_ATTRIB, - NumberUtil.DEFAULT_NUMBER_FORMAT); - int maxSize = this.services.getProps().getInt( - QueryServices.MAX_MUTATION_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); - long maxSizeBytes = this.services.getProps().getLongBytes( - QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES); - this.isApplyTimeZoneDisplacement = this.services.getProps().getBoolean( - QueryServices.APPLY_TIME_ZONE_DISPLACMENT_ATTRIB, - QueryServicesOptions.DEFAULT_APPLY_TIME_ZONE_DISPLACMENT); - this.dateFormatTimeZoneId = this.services.getProps().get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, - DateUtil.DEFAULT_TIME_ZONE_ID); - Format dateFormat = DateUtil.getDateFormatter(datePattern, dateFormatTimeZoneId); - Format timeFormat = DateUtil.getDateFormatter(timePattern, dateFormatTimeZoneId); - Format timestampFormat = DateUtil.getDateFormatter(timestampPattern, dateFormatTimeZoneId); - formatters.put(PDate.INSTANCE, dateFormat); - formatters.put(PTime.INSTANCE, timeFormat); - formatters.put(PTimestamp.INSTANCE, timestampFormat); - formatters.put(PUnsignedDate.INSTANCE, dateFormat); - formatters.put(PUnsignedTime.INSTANCE, timeFormat); - formatters.put(PUnsignedTimestamp.INSTANCE, timestampFormat); - formatters.put(PDecimal.INSTANCE, - FunctionArgumentType.NUMERIC.getFormatter(numberPattern)); - formatters.put(PVarbinary.INSTANCE, VarBinaryFormatter.INSTANCE); - formatters.put(PBinary.INSTANCE, VarBinaryFormatter.INSTANCE); - - this.logLevel = LogLevel.valueOf(this.services.getProps().get(QueryServices.LOG_LEVEL, - QueryServicesOptions.DEFAULT_LOGGING_LEVEL)); - this.auditLogLevel = LogLevel.valueOf(this.services.getProps().get(QueryServices.AUDIT_LOG_LEVEL, - QueryServicesOptions.DEFAULT_AUDIT_LOGGING_LEVEL)); - this.isRequestLevelMetricsEnabled = JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info, - this.services.getProps()); - this.mutationState = mutationState == null ? newMutationState(maxSize, - maxSizeBytes) : new MutationState(mutationState, this); - this.uniqueID = UUID.randomUUID(); - this.services.addConnection(this); - - // setup tracing, if its enabled - this.sampler = Tracing.getConfiguredSampler(this); - this.customTracingAnnotations = getImmutableCustomTracingAnnotations(); - this.scannerQueue = new LinkedBlockingQueue<>(); - this.tableResultIteratorFactory = new DefaultTableResultIteratorFactory(); - this.isRunningUpgrade = isRunningUpgrade; - - this.logSamplingRate = Double.parseDouble(this.services.getProps().get(QueryServices.LOG_SAMPLE_RATE, - QueryServicesOptions.DEFAULT_LOG_SAMPLE_RATE)); - String connectionQueryServiceName = - this.services.getConfiguration().get(QUERY_SERVICES_NAME); - if (isInternalConnection) { - GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.increment(); - long currentInternalConnectionCount = - this.getQueryServices().getConnectionCount(isInternalConnection); - ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, - OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, currentInternalConnectionCount); - ConnectionQueryServicesMetricsManager - .updateConnectionQueryServiceOpenInternalConnectionHistogram( - currentInternalConnectionCount, connectionQueryServiceName); - } else { - GLOBAL_OPEN_PHOENIX_CONNECTIONS.increment(); - long currentConnectionCount = - this.getQueryServices().getConnectionCount(isInternalConnection); - ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, - OPEN_PHOENIX_CONNECTIONS_COUNTER, currentConnectionCount); - ConnectionQueryServicesMetricsManager - .updateConnectionQueryServiceOpenConnectionHistogram(currentConnectionCount, - connectionQueryServiceName); - } - this.sourceOfOperation = this.services.getProps() - .get(QueryServices.SOURCE_OPERATION_ATTRIB, null); - } - - private static void checkScn(Long scnParam) throws SQLException { - if (scnParam != null && scnParam < 0) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_SCN) - .build().buildException(); - } - } - - private static void checkBuildIndexAt(Long replayAtParam) throws SQLException { - if (replayAtParam != null && replayAtParam < 0) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.INVALID_REPLAY_AT).build() - .buildException(); - } - } - - private static void checkScnAndBuildIndexAtEquality(Long scnParam, Long replayAt) - throws SQLException { - if (scnParam != null && replayAt != null && !scnParam.equals(replayAt)) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.UNEQUAL_SCN_AND_BUILD_INDEX_AT).build() - .buildException(); - } - } - - private static Properties filterKnownNonProperties(Properties info) { - Properties prunedProperties = info; - for (String property : CONNECTION_PROPERTIES) { - if (info.containsKey(property)) { - if (prunedProperties == info) { - prunedProperties = PropertiesUtil.deepCopy(info); + out.println(); + for (int i = 1; i <= columnCount; i++) { + int displayWidth = md.getColumnDisplaySize(i); + out.print(Strings.padStart("", displayWidth, '-')); + out.print(' '); } - prunedProperties.remove(property); - } - } - return prunedProperties; - } - - private ImmutableMap getImmutableCustomTracingAnnotations() { - Builder result = ImmutableMap.builder(); - result.putAll(JDBCUtil.getAnnotations(url, info)); - if (getSCN() != null) { - result.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, getSCN().toString()); - } - if (getTenantId() != null) { - result.put(PhoenixRuntime.TENANT_ID_ATTRIB, getTenantId() - .getString()); - } - return result.build(); - } - - public boolean isInternalConnection() { - return isInternalConnection; - } - - /** - * Add connection to the internal childConnections queue - * This method is thread safe - * @param connection - */ - public void addChildConnection(PhoenixConnection connection) { - childConnections.add(connection); - } - - /** - * Method to remove child connection from childConnections Queue - * - * @param connection - */ - public void removeChildConnection(PhoenixConnection connection) { - childConnections.remove(connection); - } - - /** - * Method to fetch child connections count from childConnections Queue - * - * @return int count - */ - @VisibleForTesting - public int getChildConnectionsCount() { - return childConnections.size(); - } - - public Sampler getSampler() { - return this.sampler; - } - - public void setSampler(Sampler sampler) throws SQLException { - this.sampler = sampler; - } - - public Map getCustomTracingAnnotations() { - return customTracingAnnotations; - } - - public int executeStatements(Reader reader, List binds, - PrintStream out) throws IOException, SQLException { - int bindsOffset = 0; - int nStatements = 0; - PhoenixStatementParser parser = new PhoenixStatementParser(reader); - try { - while (true) { - PhoenixPreparedStatement stmt = null; - try { - stmt = new PhoenixPreparedStatement(this, parser); - ParameterMetaData paramMetaData = stmt - .getParameterMetaData(); - for (int i = 0; i < paramMetaData.getParameterCount(); i++) { - stmt.setObject(i + 1, binds.get(bindsOffset + i)); - } - long start = EnvironmentEdgeManager.currentTimeMillis(); - boolean isQuery = stmt.execute(); - if (isQuery) { - ResultSet rs = stmt.getResultSet(); - if (!rs.next()) { - if (out != null) { - out.println("no rows selected"); - } - } else { - int columnCount = 0; - if (out != null) { - ResultSetMetaData md = rs.getMetaData(); - columnCount = md.getColumnCount(); - for (int i = 1; i <= columnCount; i++) { - int displayWidth = md - .getColumnDisplaySize(i); - String label = md.getColumnLabel(i); - if (md.isSigned(i)) { - out.print(displayWidth < label.length() ? label - .substring(0, displayWidth) - : Strings.padStart(label, - displayWidth, ' ')); - out.print(' '); - } else { - out.print(displayWidth < label.length() ? label - .substring(0, displayWidth) - : Strings.padEnd( - md.getColumnLabel(i), - displayWidth, ' ')); - out.print(' '); - } - } - out.println(); - for (int i = 1; i <= columnCount; i++) { - int displayWidth = md - .getColumnDisplaySize(i); - out.print(Strings.padStart("", - displayWidth, '-')); - out.print(' '); - } - out.println(); - } - do { - if (out != null) { - ResultSetMetaData md = rs.getMetaData(); - for (int i = 1; i <= columnCount; i++) { - int displayWidth = md - .getColumnDisplaySize(i); - String value = rs.getString(i); - String valueString = value == null ? QueryConstants.NULL_DISPLAY_TEXT - : value; - if (md.isSigned(i)) { - out.print(Strings.padStart( - valueString, displayWidth, - ' ')); - } else { - out.print(Strings.padEnd( - valueString, displayWidth, - ' ')); - } - out.print(' '); - } - out.println(); - } - } while (rs.next()); - } - } else if (out != null) { - int updateCount = stmt.getUpdateCount(); - if (updateCount >= 0) { - out.println((updateCount == 0 ? "no" : updateCount) - + (updateCount == 1 ? " row " : " rows ") - + stmt.getUpdateOperation().toString()); - } - } - bindsOffset += paramMetaData.getParameterCount(); - double elapsedDuration = ((EnvironmentEdgeManager.currentTimeMillis() - start) / 1000.0); - out.println("Time: " + elapsedDuration + " sec(s)\n"); - nStatements++; - } finally { - if (stmt != null) { - stmt.close(); + out.println(); + } + do { + if (out != null) { + ResultSetMetaData md = rs.getMetaData(); + for (int i = 1; i <= columnCount; i++) { + int displayWidth = md.getColumnDisplaySize(i); + String value = rs.getString(i); + String valueString = value == null ? QueryConstants.NULL_DISPLAY_TEXT : value; + if (md.isSigned(i)) { + out.print(Strings.padStart(valueString, displayWidth, ' ')); + } else { + out.print(Strings.padEnd(valueString, displayWidth, ' ')); } + out.print(' '); + } + out.println(); } + } while (rs.next()); } - } catch (EOFException e) { - } - return nStatements; - } - - public @Nullable PName getTenantId() { - return tenantId; - } - - public Long getSCN() { - return scn; - } - - public boolean isBuildingIndex() { - return buildingIndex; - } - - public int getMutateBatchSize() { - return mutateBatchSize; - } - - public long getMutateBatchSizeBytes() { - return mutateBatchSizeBytes; - } - - public PMetaData getMetaDataCache() { - return getQueryServices().getMetaDataCache(); - } - - private boolean prune(PTable table) { - long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP - : scn; - return (table.getType() != PTableType.SYSTEM && (table - .getTimeStamp() >= maxTimestamp || (table.getTenantId() != null && !Objects - .equal(tenantId, table.getTenantId())))); - } - - /** - * Similar to {@link #getTable(String, String, Long)} but returns the most recent - * PTable - */ - public PTable getTable(String tenantId, String fullTableName) - throws SQLException { - return getTable(tenantId, fullTableName, HConstants.LATEST_TIMESTAMP); - } - - /** - * Returns the PTable as of the timestamp provided. This method can be used to fetch tenant - * specific PTable through a global connection. A null timestamp would result in the client side - * metadata cache being used (ie. in case table metadata is already present it'll be returned). - * To get the latest metadata use {@link #getTable(String, String)} - * @param tenantId - * @param fullTableName - * @param timestamp - * @return PTable - * @throws SQLException - * @throws NullPointerException if conn or fullTableName is null - * @throws IllegalArgumentException if timestamp is negative - */ - public PTable getTable(@Nullable String tenantId, String fullTableName, - @Nullable Long timestamp) throws SQLException { - checkNotNull(fullTableName); - if (timestamp != null) { - checkArgument(timestamp >= 0); - } - PTable table; - PName pTenantId = (tenantId == null) ? null : PNameFactory.newName(tenantId); - try { - PTableRef tableref = getTableRef(new PTableKey(pTenantId, fullTableName)); - if (timestamp == null - || (tableref != null && tableref.getResolvedTimeStamp() == timestamp)) { - table = tableref.getTable(); - } else { - throw new TableNotFoundException(fullTableName); + } else if (out != null) { + int updateCount = stmt.getUpdateCount(); + if (updateCount >= 0) { + out.println((updateCount == 0 ? "no" : updateCount) + + (updateCount == 1 ? " row " : " rows ") + stmt.getUpdateOperation().toString()); } - } catch (TableNotFoundException e) { - table = getTableNoCache(pTenantId, fullTableName, timestamp); - } - return table; - } - - public PTable getTableNoCache(PName tenantId, String name, long timestamp) throws SQLException { - String schemaName = SchemaUtil.getSchemaNameFromFullName(name); - String tableName = SchemaUtil.getTableNameFromFullName(name); - MetaDataProtocol.MetaDataMutationResult result = - new MetaDataClient(this).updateCache(tenantId, schemaName, tableName, false, - timestamp); - if (result.getMutationCode() != MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS) { - throw new TableNotFoundException(schemaName, tableName); - } - return result.getTable(); - } - - public PTable getTableNoCache(PName tenantId, String name) throws SQLException { - String schemaName = SchemaUtil.getSchemaNameFromFullName(name); - String tableName = SchemaUtil.getTableNameFromFullName(name); - MetaDataProtocol.MetaDataMutationResult result = - new MetaDataClient(this).updateCache(tenantId, schemaName, - tableName, true); - if (result.getMutationCode() != MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS) { - throw new TableNotFoundException(schemaName, tableName); - } - return result.getTable(); - } - @VisibleForTesting - public PTable getTableNoCache(String name) throws SQLException { - return getTableNoCache(getTenantId(), name); - } - - /** - * Returns the most recent PTable fetched from the server without updating the CQSI cache. - */ - public PTable getTableFromServerNoCache(byte[] schemaName, byte[] tableName) - throws SQLException { - if (schemaName == null) { - schemaName = ByteUtil.EMPTY_BYTE_ARRAY; - } - MetaDataProtocol.MetaDataMutationResult result = - getQueryServices().getTable(getTenantId(), schemaName, - tableName, HConstants.LATEST_TIMESTAMP, HConstants.LATEST_TIMESTAMP); - if (result.getMutationCode() != MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS) { - throw new TableNotFoundException(new String(schemaName), new String(tableName)); - } - return result.getTable(); - } - - /** - * Returns the table if it is found in the client metadata cache. If the metadata of this - * table has changed since it was put in the cache these changes will not necessarily be - * reflected in the returned table. If the table is not found, makes a call to the server to - * fetch the latest metadata of the table. This is different than how a table is resolved when - * it is referenced from a query (a call is made to the server to fetch the latest metadata of - * the table depending on the UPDATE_CACHE_FREQUENCY property) - * See https://issues.apache.org/jira/browse/PHOENIX-4475 - * @param name requires a pre-normalized table name or a pre-normalized schema and table name - * @return - * @throws SQLException - */ - public PTable getTable(String name) throws SQLException { - return getTable(new PTableKey(getTenantId(), name)); - } - - public PTable getTable(PTableKey key) throws SQLException { - PTable table; - try { - table = getTableRef(key).getTable(); - } catch (TableNotFoundException e) { - table = getTableNoCache(key.getName()); - } - return table; - } - - public PTableRef getTableRef(PTableKey key) throws TableNotFoundException { - PTableRef tableRef = getQueryServices().getMetaDataCache().getTableRef(key); - if (prune(tableRef.getTable())) { - throw new TableNotFoundException(key.getName()); - } - return tableRef; - } - - public boolean prune(PFunction function) { - long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP - : scn; - return (function.getTimeStamp() >= maxTimestamp || (function - .getTenantId() != null && !Objects.equal(tenantId, - function.getTenantId()))); - } - - public PFunction getFunction(PTableKey key) throws FunctionNotFoundException { - PFunction function = getQueryServices().getMetaDataCache().getFunction(key); - return prune(function) ? null : function; - } - protected MutationState newMutationState(int maxSize, long maxSizeBytes) { - return new MutationState(maxSize, maxSizeBytes, this); - } - - public MutationState getMutationState() { - return mutationState; - } - - public String getDatePattern() { - return datePattern; - } - - public String getTimePattern() { - return timePattern; - } - - public String getTimestampPattern() { - return timestampPattern; - } - - public Format getFormatter(PDataType type) { - return formatters.get(type); - } - - public String getURL() { - return url; - } - - public ConnectionQueryServices getQueryServices() { - return services; - } - - @Override - public void clearWarnings() throws SQLException { - } - - private void closeStatements() throws SQLException { - try { - mutationState.rollback(); - } catch (SQLException e) { - // ignore any exceptions while rolling back + } + bindsOffset += paramMetaData.getParameterCount(); + double elapsedDuration = ((EnvironmentEdgeManager.currentTimeMillis() - start) / 1000.0); + out.println("Time: " + elapsedDuration + " sec(s)\n"); + nStatements++; } finally { - try { - // create new set to prevent close of statements from modifying this collection. - // TODO This could be optimized out by decoupling closing the stmt and removing it - // from the connection. - HashSet statementsCopy = new HashSet<>(this.statements); - SQLCloseables.closeAll(statementsCopy); - } finally { - statements.clear(); - } - } - } - - void checkOpen() throws SQLException { - if (isClosed || isClosing) { - throw reasonForClose != null - ? reasonForClose - : new SQLExceptionInfo.Builder(SQLExceptionCode.CONNECTION_CLOSED) - .build() - - .buildException(); + if (stmt != null) { + stmt.close(); + } } - } - - /** - * Close the Phoenix connection and also store the reason for it getting closed. - * - * @param reasonForClose The reason for closing the phoenix connection to be set as state - * in phoenix connection. - * @throws SQLException if error happens when closing. - * @see #close() - */ - public void close(SQLException reasonForClose) throws SQLException { - if (isClosed || isClosing) { - return; + } + } catch (EOFException e) { + } + return nStatements; + } + + public @Nullable PName getTenantId() { + return tenantId; + } + + public Long getSCN() { + return scn; + } + + public boolean isBuildingIndex() { + return buildingIndex; + } + + public int getMutateBatchSize() { + return mutateBatchSize; + } + + public long getMutateBatchSizeBytes() { + return mutateBatchSizeBytes; + } + + public PMetaData getMetaDataCache() { + return getQueryServices().getMetaDataCache(); + } + + private boolean prune(PTable table) { + long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + return (table.getType() != PTableType.SYSTEM && (table.getTimeStamp() >= maxTimestamp + || (table.getTenantId() != null && !Objects.equal(tenantId, table.getTenantId())))); + } + + /** + * Similar to {@link #getTable(String, String, Long)} but returns the most recent PTable + */ + public PTable getTable(String tenantId, String fullTableName) throws SQLException { + return getTable(tenantId, fullTableName, HConstants.LATEST_TIMESTAMP); + } + + /** + * Returns the PTable as of the timestamp provided. This method can be used to fetch tenant + * specific PTable through a global connection. A null timestamp would result in the client side + * metadata cache being used (ie. in case table metadata is already present it'll be returned). To + * get the latest metadata use {@link #getTable(String, String)} + * @throws NullPointerException if conn or fullTableName is null + * @throws IllegalArgumentException if timestamp is negative + */ + public PTable getTable(@Nullable String tenantId, String fullTableName, @Nullable Long timestamp) + throws SQLException { + checkNotNull(fullTableName); + if (timestamp != null) { + checkArgument(timestamp >= 0); + } + PTable table; + PName pTenantId = (tenantId == null) ? null : PNameFactory.newName(tenantId); + try { + PTableRef tableref = getTableRef(new PTableKey(pTenantId, fullTableName)); + if (timestamp == null || (tableref != null && tableref.getResolvedTimeStamp() == timestamp)) { + table = tableref.getTable(); + } else { + throw new TableNotFoundException(fullTableName); + } + } catch (TableNotFoundException e) { + table = getTableNoCache(pTenantId, fullTableName, timestamp); + } + return table; + } + + public PTable getTableNoCache(PName tenantId, String name, long timestamp) throws SQLException { + String schemaName = SchemaUtil.getSchemaNameFromFullName(name); + String tableName = SchemaUtil.getTableNameFromFullName(name); + MetaDataProtocol.MetaDataMutationResult result = + new MetaDataClient(this).updateCache(tenantId, schemaName, tableName, false, timestamp); + if (result.getMutationCode() != MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS) { + throw new TableNotFoundException(schemaName, tableName); + } + return result.getTable(); + } + + public PTable getTableNoCache(PName tenantId, String name) throws SQLException { + String schemaName = SchemaUtil.getSchemaNameFromFullName(name); + String tableName = SchemaUtil.getTableNameFromFullName(name); + MetaDataProtocol.MetaDataMutationResult result = + new MetaDataClient(this).updateCache(tenantId, schemaName, tableName, true); + if (result.getMutationCode() != MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS) { + throw new TableNotFoundException(schemaName, tableName); + } + return result.getTable(); + } + + @VisibleForTesting + public PTable getTableNoCache(String name) throws SQLException { + return getTableNoCache(getTenantId(), name); + } + + /** + * Returns the most recent PTable fetched from the server without updating the CQSI cache. + */ + public PTable getTableFromServerNoCache(byte[] schemaName, byte[] tableName) throws SQLException { + if (schemaName == null) { + schemaName = ByteUtil.EMPTY_BYTE_ARRAY; + } + MetaDataProtocol.MetaDataMutationResult result = getQueryServices().getTable(getTenantId(), + schemaName, tableName, HConstants.LATEST_TIMESTAMP, HConstants.LATEST_TIMESTAMP); + if (result.getMutationCode() != MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS) { + throw new TableNotFoundException(new String(schemaName), new String(tableName)); + } + return result.getTable(); + } + + /** + * Returns the table if it is found in the client metadata cache. If the metadata of this table + * has changed since it was put in the cache these changes will not necessarily be reflected in + * the returned table. If the table is not found, makes a call to the server to fetch the latest + * metadata of the table. This is different than how a table is resolved when it is referenced + * from a query (a call is made to the server to fetch the latest metadata of the table depending + * on the UPDATE_CACHE_FREQUENCY property) See https://issues.apache.org/jira/browse/PHOENIX-4475 + * @param name requires a pre-normalized table name or a pre-normalized schema and table name + */ + public PTable getTable(String name) throws SQLException { + return getTable(new PTableKey(getTenantId(), name)); + } + + public PTable getTable(PTableKey key) throws SQLException { + PTable table; + try { + table = getTableRef(key).getTable(); + } catch (TableNotFoundException e) { + table = getTableNoCache(key.getName()); + } + return table; + } + + public PTableRef getTableRef(PTableKey key) throws TableNotFoundException { + PTableRef tableRef = getQueryServices().getMetaDataCache().getTableRef(key); + if (prune(tableRef.getTable())) { + throw new TableNotFoundException(key.getName()); + } + return tableRef; + } + + public boolean prune(PFunction function) { + long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + return (function.getTimeStamp() >= maxTimestamp + || (function.getTenantId() != null && !Objects.equal(tenantId, function.getTenantId()))); + } + + public PFunction getFunction(PTableKey key) throws FunctionNotFoundException { + PFunction function = getQueryServices().getMetaDataCache().getFunction(key); + return prune(function) ? null : function; + } + + protected MutationState newMutationState(int maxSize, long maxSizeBytes) { + return new MutationState(maxSize, maxSizeBytes, this); + } + + public MutationState getMutationState() { + return mutationState; + } + + public String getDatePattern() { + return datePattern; + } + + public String getTimePattern() { + return timePattern; + } + + public String getTimestampPattern() { + return timestampPattern; + } + + public Format getFormatter(PDataType type) { + return formatters.get(type); + } + + public String getURL() { + return url; + } + + public ConnectionQueryServices getQueryServices() { + return services; + } + + @Override + public void clearWarnings() throws SQLException { + } + + private void closeStatements() throws SQLException { + try { + mutationState.rollback(); + } catch (SQLException e) { + // ignore any exceptions while rolling back + } finally { + try { + // create new set to prevent close of statements from modifying this collection. + // TODO This could be optimized out by decoupling closing the stmt and removing it + // from the connection. + HashSet statementsCopy = new HashSet<>(this.statements); + SQLCloseables.closeAll(statementsCopy); + } finally { + statements.clear(); + } + } + } + + void checkOpen() throws SQLException { + if (isClosed || isClosing) { + throw reasonForClose != null + ? reasonForClose + : new SQLExceptionInfo.Builder(SQLExceptionCode.CONNECTION_CLOSED).build() + + .buildException(); + } + } + + /** + * Close the Phoenix connection and also store the reason for it getting closed. + * @param reasonForClose The reason for closing the phoenix connection to be set as state in + * phoenix connection. + * @throws SQLException if error happens when closing. + * @see #close() + */ + public void close(SQLException reasonForClose) throws SQLException { + if (isClosed || isClosing) { + return; + } + this.reasonForClose = reasonForClose; + close(); + } + + // A connection can be closed by calling thread, or by the high availability (HA) framework. + // Making this logic synchronized will enforce a connection is closed only once. + // Does this need to be synchronized? + @Override + synchronized public void close() throws SQLException { + if (isClosed || isClosing) { + return; + } + + String connectionQueryServiceName = this.services.getConfiguration().get(QUERY_SERVICES_NAME); + try { + isClosing = true; + TableMetricsManager.pushMetricsFromConnInstanceMethod(getMutationMetrics()); + if (!(reasonForClose instanceof FailoverSQLException)) { + // If the reason for close is because of failover, the metrics will be kept for + // consolidation by the wrapper PhoenixFailoverConnection object. + clearMetrics(); + } + try { + closeStatements(); + if (childConnections != null) { + SQLCloseables.closeAllQuietly(childConnections); } - this.reasonForClose = reasonForClose; - close(); - } - - // A connection can be closed by calling thread, or by the high availability (HA) framework. - // Making this logic synchronized will enforce a connection is closed only once. - //Does this need to be synchronized? - @Override - synchronized public void close() throws SQLException { - if (isClosed || isClosing) { - return; + if (traceScope != null) { + traceScope.close(); } - - String connectionQueryServiceName = - this.services.getConfiguration().get(QUERY_SERVICES_NAME); + } finally { + services.removeConnection(this); + } + + } finally { + isClosing = false; + isClosed = true; + if (isInternalConnection()) { + GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.decrement(); + long currentInternalConnectionCount = + this.getQueryServices().getConnectionCount(isInternalConnection()); + ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, + OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, currentInternalConnectionCount); + ConnectionQueryServicesMetricsManager + .updateConnectionQueryServiceOpenInternalConnectionHistogram( + currentInternalConnectionCount, connectionQueryServiceName); + } else { + GLOBAL_OPEN_PHOENIX_CONNECTIONS.decrement(); + long currentConnectionCount = + this.getQueryServices().getConnectionCount(isInternalConnection()); + ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, + OPEN_PHOENIX_CONNECTIONS_COUNTER, currentConnectionCount); + ConnectionQueryServicesMetricsManager.updateConnectionQueryServiceOpenConnectionHistogram( + currentConnectionCount, connectionQueryServiceName); + } + } + } + + @Override + public void commit() throws SQLException { + CallRunner.run(new CallRunner.CallableThrowable() { + @Override + public Void call() throws SQLException { + checkOpen(); try { - isClosing = true; - TableMetricsManager.pushMetricsFromConnInstanceMethod(getMutationMetrics()); - if(!(reasonForClose instanceof FailoverSQLException)) { - // If the reason for close is because of failover, the metrics will be kept for - // consolidation by the wrapper PhoenixFailoverConnection object. - clearMetrics(); - } - try { - closeStatements(); - if (childConnections != null) { - SQLCloseables.closeAllQuietly(childConnections); - } - if (traceScope != null) { - traceScope.close(); - } - } finally { - services.removeConnection(this); - } - + mutationState.commit(); } finally { - isClosing = false; - isClosed = true; - if (isInternalConnection()){ - GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.decrement(); - long currentInternalConnectionCount = - this.getQueryServices().getConnectionCount(isInternalConnection()); - ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, - OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, currentInternalConnectionCount); - ConnectionQueryServicesMetricsManager - .updateConnectionQueryServiceOpenInternalConnectionHistogram( - currentInternalConnectionCount, connectionQueryServiceName); - } else { - GLOBAL_OPEN_PHOENIX_CONNECTIONS.decrement(); - long currentConnectionCount = - this.getQueryServices().getConnectionCount(isInternalConnection()); - ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, - OPEN_PHOENIX_CONNECTIONS_COUNTER, currentConnectionCount); - ConnectionQueryServicesMetricsManager - .updateConnectionQueryServiceOpenConnectionHistogram( - currentConnectionCount, connectionQueryServiceName); - } - } - } - - @Override - public void commit() throws SQLException { - CallRunner.run(new CallRunner.CallableThrowable() { - @Override - public Void call() throws SQLException { - checkOpen(); - try { - mutationState.commit(); - } finally { - mutationState.resetExecuteMutationTimeMap(); - } - return null; - } - }, Tracing.withTracing(this, "committing mutations")); - statementExecutionCounter = 0; - } - - @Override - public Array createArrayOf(String typeName, Object[] elements) - throws SQLException { - checkOpen(); - PDataType arrayPrimitiveType = PDataType.fromSqlTypeName(typeName); - return PArrayDataType.instantiatePhoenixArray(arrayPrimitiveType, - elements); - } - - @Override - public Blob createBlob() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Clob createClob() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public NClob createNClob() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public SQLXML createSQLXML() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Statement createStatement() throws SQLException { - checkOpen(); - PhoenixStatement statement = new PhoenixStatement(this); - statements.add(statement); - return statement; - } - - /** - * Back-door way to inject processing into walking through a result set - * - * @param statementFactory - * @return PhoenixStatement - * @throws SQLException - */ - public PhoenixStatement createStatement( - PhoenixStatementFactory statementFactory) throws SQLException { - PhoenixStatement statement = statementFactory.newStatement(this); - statements.add(statement); - return statement; - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) - throws SQLException { - checkOpen(); - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY - || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) { - throw new SQLFeatureNotSupportedException(); - } - return createStatement(); - } - - @Override - public Statement createStatement(int resultSetType, - int resultSetConcurrency, int resultSetHoldability) - throws SQLException { - checkOpen(); - if (resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) { - throw new SQLFeatureNotSupportedException(); - } - return createStatement(resultSetType, resultSetConcurrency); - } - - @Override - public Struct createStruct(String typeName, Object[] attributes) - throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean getAutoCommit() throws SQLException { - return isAutoCommit; - } - - public boolean getAutoFlush() { - return isAutoFlush; - } - - public void setAutoFlush(boolean autoFlush) throws SQLException { - if (autoFlush - && !this.services.getProps().getBoolean( - QueryServices.TRANSACTIONS_ENABLED, - QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED)) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_AUTO_FLUSH) - .build().buildException(); - } - this.isAutoFlush = autoFlush; - } - - public void flush() throws SQLException { - mutationState.sendUncommitted(); - } - - public void setTransactionContext(PhoenixTransactionContext txContext) - throws SQLException { - if (!this.services.getProps().getBoolean( - QueryServices.TRANSACTIONS_ENABLED, - QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED)) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_TX_CONTEXT) - .build().buildException(); + mutationState.resetExecuteMutationTimeMap(); } - this.mutationState.rollback(); - this.mutationState = new MutationState(this.mutationState.getMaxSize(), - this.mutationState.getMaxSizeBytes(), this, txContext); - - // Write data to HBase after each statement execution as the commit may - // not - // come through Phoenix APIs. - setAutoFlush(true); - } - - public Consistency getConsistency() { - return this.consistency; - } - - @Override - public String getCatalog() throws SQLException { - return tenantId == null ? "" : tenantId.getString(); - } - - @Override - public Properties getClientInfo() throws SQLException { - // Defensive copy so client cannot change - return new Properties(info); - } - - @Override - public String getClientInfo(String name) { - return info.getProperty(name); - } - - @Override - public int getHoldability() throws SQLException { - return ResultSet.CLOSE_CURSORS_AT_COMMIT; - } - - @Override - public DatabaseMetaData getMetaData() throws SQLException { - checkOpen(); - return new PhoenixDatabaseMetaData(this); - } - - public UUID getUniqueID() { - return this.uniqueID; - } - - @Override - public int getTransactionIsolation() throws SQLException { - boolean transactionsEnabled = getQueryServices().getProps().getBoolean( - QueryServices.TRANSACTIONS_ENABLED, - QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED); - return transactionsEnabled ? Connection.TRANSACTION_REPEATABLE_READ - : Connection.TRANSACTION_READ_COMMITTED; - } - - @Override - public Map> getTypeMap() throws SQLException { - return Collections.emptyMap(); - } - - @Override - public SQLWarning getWarnings() throws SQLException { return null; - } - - @Override - public boolean isClosed() throws SQLException { - return isClosed; - } - - public boolean isClosing() throws SQLException { - return isClosing; - } - - @Override - public boolean isReadOnly() throws SQLException { - return readOnly; - } - - @Override - public boolean isValid(int timeout) throws SQLException { - // TODO: run query here or ping - return !isClosed && !isClosing; - } - - @Override - public String nativeSQL(String sql) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public CallableStatement prepareCall(String sql) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, - int resultSetConcurrency) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, - int resultSetConcurrency, int resultSetHoldability) - throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public PreparedStatement prepareStatement(String sql) throws SQLException { - checkOpen(); - PhoenixPreparedStatement statement = new PhoenixPreparedStatement(this, - sql); - statements.add(statement); - return statement; - } - - @Override - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) - throws SQLException { - checkOpen(); - // Ignore autoGeneratedKeys, and just execute the statement. - return prepareStatement(sql); - } - - @Override - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) - throws SQLException { - checkOpen(); - // Ignore columnIndexes, and just execute the statement. - return prepareStatement(sql); - } - - @Override - public PreparedStatement prepareStatement(String sql, String[] columnNames) - throws SQLException { - checkOpen(); - // Ignore columnNames, and just execute the statement. - return prepareStatement(sql); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency) throws SQLException { + } + }, Tracing.withTracing(this, "committing mutations")); + statementExecutionCounter = 0; + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + checkOpen(); + PDataType arrayPrimitiveType = PDataType.fromSqlTypeName(typeName); + return PArrayDataType.instantiatePhoenixArray(arrayPrimitiveType, elements); + } + + @Override + public Blob createBlob() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Clob createClob() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public NClob createNClob() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Statement createStatement() throws SQLException { + checkOpen(); + PhoenixStatement statement = new PhoenixStatement(this); + statements.add(statement); + return statement; + } + + /** + * Back-door way to inject processing into walking through a result set + */ + public PhoenixStatement createStatement(PhoenixStatementFactory statementFactory) + throws SQLException { + PhoenixStatement statement = statementFactory.newStatement(this); + statements.add(statement); + return statement; + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) + throws SQLException { + checkOpen(); + if ( + resultSetType != ResultSet.TYPE_FORWARD_ONLY + || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY + ) { + throw new SQLFeatureNotSupportedException(); + } + return createStatement(); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + checkOpen(); + if (resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) { + throw new SQLFeatureNotSupportedException(); + } + return createStatement(resultSetType, resultSetConcurrency); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean getAutoCommit() throws SQLException { + return isAutoCommit; + } + + public boolean getAutoFlush() { + return isAutoFlush; + } + + public void setAutoFlush(boolean autoFlush) throws SQLException { + if ( + autoFlush && !this.services.getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, + QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_AUTO_FLUSH) + .build().buildException(); + } + this.isAutoFlush = autoFlush; + } + + public void flush() throws SQLException { + mutationState.sendUncommitted(); + } + + public void setTransactionContext(PhoenixTransactionContext txContext) throws SQLException { + if ( + !this.services.getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, + QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_TX_CONTEXT) + .build().buildException(); + } + this.mutationState.rollback(); + this.mutationState = new MutationState(this.mutationState.getMaxSize(), + this.mutationState.getMaxSizeBytes(), this, txContext); + + // Write data to HBase after each statement execution as the commit may + // not + // come through Phoenix APIs. + setAutoFlush(true); + } + + public Consistency getConsistency() { + return this.consistency; + } + + @Override + public String getCatalog() throws SQLException { + return tenantId == null ? "" : tenantId.getString(); + } + + @Override + public Properties getClientInfo() throws SQLException { + // Defensive copy so client cannot change + return new Properties(info); + } + + @Override + public String getClientInfo(String name) { + return info.getProperty(name); + } + + @Override + public int getHoldability() throws SQLException { + return ResultSet.CLOSE_CURSORS_AT_COMMIT; + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + checkOpen(); + return new PhoenixDatabaseMetaData(this); + } + + public UUID getUniqueID() { + return this.uniqueID; + } + + @Override + public int getTransactionIsolation() throws SQLException { + boolean transactionsEnabled = getQueryServices().getProps().getBoolean( + QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED); + return transactionsEnabled + ? Connection.TRANSACTION_REPEATABLE_READ + : Connection.TRANSACTION_READ_COMMITTED; + } + + @Override + public Map> getTypeMap() throws SQLException { + return Collections.emptyMap(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return null; + } + + @Override + public boolean isClosed() throws SQLException { + return isClosed; + } + + public boolean isClosing() throws SQLException { + return isClosing; + } + + @Override + public boolean isReadOnly() throws SQLException { + return readOnly; + } + + @Override + public boolean isValid(int timeout) throws SQLException { + // TODO: run query here or ping + return !isClosed && !isClosing; + } + + @Override + public String nativeSQL(String sql) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + checkOpen(); + PhoenixPreparedStatement statement = new PhoenixPreparedStatement(this, sql); + statements.add(statement); + return statement; + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + checkOpen(); + // Ignore autoGeneratedKeys, and just execute the statement. + return prepareStatement(sql); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + checkOpen(); + // Ignore columnIndexes, and just execute the statement. + return prepareStatement(sql); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + checkOpen(); + // Ignore columnNames, and just execute the statement. + return prepareStatement(sql); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + checkOpen(); + if ( + resultSetType != ResultSet.TYPE_FORWARD_ONLY + || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY + ) { + throw new SQLFeatureNotSupportedException(); + } + return prepareStatement(sql); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + checkOpen(); + if (resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) { + throw new SQLFeatureNotSupportedException(); + } + return prepareStatement(sql, resultSetType, resultSetConcurrency); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void rollback() throws SQLException { + CallRunner.run(new CallRunner.CallableThrowable() { + @Override + public Void call() throws SQLException { checkOpen(); - if (resultSetType != ResultSet.TYPE_FORWARD_ONLY - || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) { - throw new SQLFeatureNotSupportedException(); - } - return prepareStatement(sql); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency, int resultSetHoldability) - throws SQLException { - checkOpen(); - if (resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) { - throw new SQLFeatureNotSupportedException(); - } - return prepareStatement(sql, resultSetType, resultSetConcurrency); - } - - @Override - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void rollback() throws SQLException { - CallRunner.run(new CallRunner.CallableThrowable() { - @Override - public Void call() throws SQLException { - checkOpen(); - mutationState.rollback(); - return null; - } - }, Tracing.withTracing(this, "rolling back")); - statementExecutionCounter = 0; - } - - @Override - public void rollback(Savepoint savepoint) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setAutoCommit(boolean isAutoCommit) throws SQLException { - checkOpen(); - this.isAutoCommit = isAutoCommit; - } - - public void setConsistency(Consistency val) { - this.consistency = val; - } - - @Override - public void setCatalog(String catalog) throws SQLException { - checkOpen(); - if (!this.getCatalog().equalsIgnoreCase(catalog)) { - // allow noop calls to pass through. - throw new SQLFeatureNotSupportedException(); - } - // TODO: - // if (catalog == null) { - // tenantId = null; - // } else { - // tenantId = PNameFactory.newName(catalog); - // } - } - - @Override - public void setClientInfo(Properties properties) - throws SQLClientInfoException { - throw new UnsupportedOperationException(); - } - - @Override - public void setClientInfo(String name, String value) - throws SQLClientInfoException { - throw new UnsupportedOperationException(); - } - - @Override - public void setHoldability(int holdability) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - checkOpen(); - this.readOnly = readOnly; - } - - @Override - public Savepoint setSavepoint() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Savepoint setSavepoint(String name) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setTransactionIsolation(int level) throws SQLException { - checkOpen(); - boolean transactionsEnabled = getQueryServices().getProps().getBoolean( - QueryServices.TRANSACTIONS_ENABLED, - QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED); - if (level == Connection.TRANSACTION_SERIALIZABLE) { - throw new SQLFeatureNotSupportedException(); - } - if (!transactionsEnabled - && level == Connection.TRANSACTION_REPEATABLE_READ) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_ISOLATION_LEVEL) - .build().buildException(); - } - } - - @Override - public void setTypeMap(Map> map) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } - - @SuppressWarnings("unchecked") - @Override - public T unwrap(Class iface) throws SQLException { - if (!iface.isInstance(this)) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) - .setMessage( - this.getClass().getName() - + " not unwrappable from " - + iface.getName()).build().buildException(); - } - return (T) this; - } - - @Override - public void setSchema(String schema) throws SQLException { - checkOpen(); - this.schema = schema; - } - - @Override - public String getSchema() throws SQLException { - return SchemaUtil.normalizeIdentifier(this.schema); - } - - public PSchema getSchema(PTableKey key) throws SchemaNotFoundException { - return getQueryServices().getMetaDataCache().getSchema(key); - } - - @Override - public void abort(Executor executor) throws SQLException { - checkOpen(); - } - - @Override - public void setNetworkTimeout(Executor executor, int milliseconds) - throws SQLException { - checkOpen(); - } - - @Override - public int getNetworkTimeout() throws SQLException { - // TODO Auto-generated method stub - return 0; - } - - private boolean useMetaDataCache(PTable table) { - return table.getType() == PTableType.SYSTEM - || table.getUpdateCacheFrequency() != 0 - || (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue( - getQueryServices().getProps().get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)) != 0; - - } - @Override - public void addTable(PTable table, long resolvedTime) throws SQLException { - getQueryServices().addTable(table, resolvedTime); - } - - @Override - public void updateResolvedTimestamp(PTable table, long resolvedTime) - throws SQLException { - - getQueryServices().updateResolvedTimestamp(table, resolvedTime); - } - - @Override - public void addFunction(PFunction function) throws SQLException { - getQueryServices().addFunction(function); - } - - @Override - public void addSchema(PSchema schema) throws SQLException { - getQueryServices().addSchema(schema); - } - - @Override - public void removeTable(PName tenantId, String tableName, - String parentTableName, long tableTimeStamp) throws SQLException { - getQueryServices().removeTable(tenantId, tableName, parentTableName, - tableTimeStamp); - } - - @Override - public void removeFunction(PName tenantId, String functionName, - long tableTimeStamp) throws SQLException { - getQueryServices().removeFunction(tenantId, functionName, - tableTimeStamp); - } - - @Override - public void removeColumn(PName tenantId, String tableName, - List columnsToRemove, long tableTimeStamp, - long tableSeqNum, long resolvedTime) throws SQLException { - getQueryServices().removeColumn(tenantId, tableName, columnsToRemove, - tableTimeStamp, tableSeqNum, resolvedTime); - } - - protected boolean removeStatement(PhoenixStatement statement) - throws SQLException { - return statements.remove(statement); - } - - public KeyValueBuilder getKeyValueBuilder() { - return this.services.getKeyValueBuilder(); - } - - /** - * Used to track executions of {@link Statement}s and - * {@link PreparedStatement}s that were created from this connection before - * commit or rollback. 0-based. Used to associate partial save errors with - * SQL statements invoked by users. - * - * @see CommitException - * @see #incrementStatementExecutionCounter() - */ - public int getStatementExecutionCounter() { - return statementExecutionCounter; - } - - public void incrementStatementExecutionCounter() { - statementExecutionCounter++; - if (connectionActivityLogger.isLevelEnabled(ActivityLogInfo.OP_STMTS.getLogLevel())) { - connectionActivityLogger.log(ActivityLogInfo.OP_STMTS, String.valueOf(statementExecutionCounter)); - } - } - - public TraceScope getTraceScope() { - return traceScope; - } - - public void setTraceScope(TraceScope traceScope) { - this.traceScope = traceScope; - } - - @Override - public Map> getMutationMetrics() { - return mutationState.getMutationMetricQueue().aggregate(); - } - - @Override - public Map> getReadMetrics() { - return mutationState.getReadMetricQueue() != null ? mutationState - .getReadMetricQueue().aggregate() : Collections - .> emptyMap(); - } - - @Override - public boolean isRequestLevelMetricsEnabled() { - return isRequestLevelMetricsEnabled; - } - - @Override - public void clearMetrics() { - mutationState.getMutationMetricQueue().clearMetrics(); - if (mutationState.getReadMetricQueue() != null) { - mutationState.getReadMetricQueue().clearMetrics(); - } - } - - /** - * Returns true if this connection is being used to upgrade the data due to - * PHOENIX-2067 and false otherwise. - */ - public boolean isDescVarLengthRowKeyUpgrade() { - return isDescVarLengthRowKeyUpgrade; - } - - /** - * Added for tests only. Do not use this elsewhere. - */ - public ParallelIteratorFactory getIteratorFactory() { - return parallelIteratorFactory; - } - - /** - * Added for testing purposes. Do not use this elsewhere. - */ - public void setIteratorFactory( - ParallelIteratorFactory parallelIteratorFactory) { - this.parallelIteratorFactory = parallelIteratorFactory; - } - - public void addIteratorForLeaseRenewal(@Nonnull TableResultIterator itr) { - if (services.isRenewingLeasesEnabled()) { - checkNotNull(itr); - scannerQueue.add(new WeakReference(itr)); - } - } - - public LinkedBlockingQueue> getScanners() { - return scannerQueue; - } - - @VisibleForTesting - @Nonnull - public TableResultIteratorFactory getTableResultIteratorFactory() { - return tableResultIteratorFactory; - } - - @VisibleForTesting - public void setTableResultIteratorFactory(TableResultIteratorFactory factory) { - checkNotNull(factory); - this.tableResultIteratorFactory = factory; - } - - /** - * Added for testing purposes. Do not use this elsewhere. - */ - @VisibleForTesting - public void setIsClosing(boolean imitateIsClosing) { - isClosing = imitateIsClosing; - } - - @Override - public void removeSchema(PSchema schema, long schemaTimeStamp) { - getQueryServices().removeSchema(schema, schemaTimeStamp); - - } - - public boolean isRunningUpgrade() { - return isRunningUpgrade; - } - - public void setRunningUpgrade(boolean isRunningUpgrade) { - this.isRunningUpgrade = isRunningUpgrade; - } - - public LogLevel getLogLevel(){ - return this.logLevel; - } - - public LogLevel getAuditLogLevel(){ - return this.auditLogLevel; - } - - public Double getLogSamplingRate(){ - return this.logSamplingRate; - } - - /** - * - * @return source of operation - */ - public String getSourceOfOperation() { - return sourceOfOperation; - } - - public String getDateFormatTimeZoneId() { - return dateFormatTimeZoneId; - } - - public boolean isApplyTimeZoneDisplacement() { - return isApplyTimeZoneDisplacement; - } - - public String getActivityLog() { - return getActivityLogger().getActivityLog(); - } - - public ConnectionActivityLogger getActivityLogger() { - return this.connectionActivityLogger; - } - - public void setActivityLogger(ConnectionActivityLogger connectionActivityLogger) { - this.connectionActivityLogger = connectionActivityLogger; - } + mutationState.rollback(); + return null; + } + }, Tracing.withTracing(this, "rolling back")); + statementExecutionCounter = 0; + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setAutoCommit(boolean isAutoCommit) throws SQLException { + checkOpen(); + this.isAutoCommit = isAutoCommit; + } + + public void setConsistency(Consistency val) { + this.consistency = val; + } + + @Override + public void setCatalog(String catalog) throws SQLException { + checkOpen(); + if (!this.getCatalog().equalsIgnoreCase(catalog)) { + // allow noop calls to pass through. + throw new SQLFeatureNotSupportedException(); + } + // TODO: + // if (catalog == null) { + // tenantId = null; + // } else { + // tenantId = PNameFactory.newName(catalog); + // } + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + throw new UnsupportedOperationException(); + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + throw new UnsupportedOperationException(); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + checkOpen(); + this.readOnly = readOnly; + } + + @Override + public Savepoint setSavepoint() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + checkOpen(); + boolean transactionsEnabled = getQueryServices().getProps().getBoolean( + QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED); + if (level == Connection.TRANSACTION_SERIALIZABLE) { + throw new SQLFeatureNotSupportedException(); + } + if (!transactionsEnabled && level == Connection.TRANSACTION_REPEATABLE_READ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_ISOLATION_LEVEL) + .build().buildException(); + } + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (!iface.isInstance(this)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) + .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()).build() + .buildException(); + } + return (T) this; + } + + @Override + public void setSchema(String schema) throws SQLException { + checkOpen(); + this.schema = schema; + } + + @Override + public String getSchema() throws SQLException { + return SchemaUtil.normalizeIdentifier(this.schema); + } + + public PSchema getSchema(PTableKey key) throws SchemaNotFoundException { + return getQueryServices().getMetaDataCache().getSchema(key); + } + + @Override + public void abort(Executor executor) throws SQLException { + checkOpen(); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + checkOpen(); + } + + @Override + public int getNetworkTimeout() throws SQLException { + // TODO Auto-generated method stub + return 0; + } + + private boolean useMetaDataCache(PTable table) { + return table.getType() == PTableType.SYSTEM || table.getUpdateCacheFrequency() != 0 + || (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue( + getQueryServices().getProps().get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)) + != 0; + + } + + @Override + public void addTable(PTable table, long resolvedTime) throws SQLException { + getQueryServices().addTable(table, resolvedTime); + } + + @Override + public void updateResolvedTimestamp(PTable table, long resolvedTime) throws SQLException { + + getQueryServices().updateResolvedTimestamp(table, resolvedTime); + } + + @Override + public void addFunction(PFunction function) throws SQLException { + getQueryServices().addFunction(function); + } + + @Override + public void addSchema(PSchema schema) throws SQLException { + getQueryServices().addSchema(schema); + } + + @Override + public void removeTable(PName tenantId, String tableName, String parentTableName, + long tableTimeStamp) throws SQLException { + getQueryServices().removeTable(tenantId, tableName, parentTableName, tableTimeStamp); + } + + @Override + public void removeFunction(PName tenantId, String functionName, long tableTimeStamp) + throws SQLException { + getQueryServices().removeFunction(tenantId, functionName, tableTimeStamp); + } + + @Override + public void removeColumn(PName tenantId, String tableName, List columnsToRemove, + long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException { + getQueryServices().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, + tableSeqNum, resolvedTime); + } + + protected boolean removeStatement(PhoenixStatement statement) throws SQLException { + return statements.remove(statement); + } + + public KeyValueBuilder getKeyValueBuilder() { + return this.services.getKeyValueBuilder(); + } + + /** + * Used to track executions of {@link Statement}s and {@link PreparedStatement}s that were created + * from this connection before commit or rollback. 0-based. Used to associate partial save errors + * with SQL statements invoked by users. + * @see CommitException + * @see #incrementStatementExecutionCounter() + */ + public int getStatementExecutionCounter() { + return statementExecutionCounter; + } + + public void incrementStatementExecutionCounter() { + statementExecutionCounter++; + if (connectionActivityLogger.isLevelEnabled(ActivityLogInfo.OP_STMTS.getLogLevel())) { + connectionActivityLogger.log(ActivityLogInfo.OP_STMTS, + String.valueOf(statementExecutionCounter)); + } + } + + public TraceScope getTraceScope() { + return traceScope; + } + + public void setTraceScope(TraceScope traceScope) { + this.traceScope = traceScope; + } + + @Override + public Map> getMutationMetrics() { + return mutationState.getMutationMetricQueue().aggregate(); + } + + @Override + public Map> getReadMetrics() { + return mutationState.getReadMetricQueue() != null + ? mutationState.getReadMetricQueue().aggregate() + : Collections.> emptyMap(); + } + + @Override + public boolean isRequestLevelMetricsEnabled() { + return isRequestLevelMetricsEnabled; + } + + @Override + public void clearMetrics() { + mutationState.getMutationMetricQueue().clearMetrics(); + if (mutationState.getReadMetricQueue() != null) { + mutationState.getReadMetricQueue().clearMetrics(); + } + } + + /** + * Returns true if this connection is being used to upgrade the data due to PHOENIX-2067 and false + * otherwise. + */ + public boolean isDescVarLengthRowKeyUpgrade() { + return isDescVarLengthRowKeyUpgrade; + } + + /** + * Added for tests only. Do not use this elsewhere. + */ + public ParallelIteratorFactory getIteratorFactory() { + return parallelIteratorFactory; + } + + /** + * Added for testing purposes. Do not use this elsewhere. + */ + public void setIteratorFactory(ParallelIteratorFactory parallelIteratorFactory) { + this.parallelIteratorFactory = parallelIteratorFactory; + } + + public void addIteratorForLeaseRenewal(@Nonnull TableResultIterator itr) { + if (services.isRenewingLeasesEnabled()) { + checkNotNull(itr); + scannerQueue.add(new WeakReference(itr)); + } + } + + public LinkedBlockingQueue> getScanners() { + return scannerQueue; + } + + @VisibleForTesting + @Nonnull + public TableResultIteratorFactory getTableResultIteratorFactory() { + return tableResultIteratorFactory; + } + + @VisibleForTesting + public void setTableResultIteratorFactory(TableResultIteratorFactory factory) { + checkNotNull(factory); + this.tableResultIteratorFactory = factory; + } + + /** + * Added for testing purposes. Do not use this elsewhere. + */ + @VisibleForTesting + public void setIsClosing(boolean imitateIsClosing) { + isClosing = imitateIsClosing; + } + + @Override + public void removeSchema(PSchema schema, long schemaTimeStamp) { + getQueryServices().removeSchema(schema, schemaTimeStamp); + + } + + public boolean isRunningUpgrade() { + return isRunningUpgrade; + } + + public void setRunningUpgrade(boolean isRunningUpgrade) { + this.isRunningUpgrade = isRunningUpgrade; + } + + public LogLevel getLogLevel() { + return this.logLevel; + } + + public LogLevel getAuditLogLevel() { + return this.auditLogLevel; + } + + public Double getLogSamplingRate() { + return this.logSamplingRate; + } + + /** Returns source of operation */ + public String getSourceOfOperation() { + return sourceOfOperation; + } + + public String getDateFormatTimeZoneId() { + return dateFormatTimeZoneId; + } + + public boolean isApplyTimeZoneDisplacement() { + return isApplyTimeZoneDisplacement; + } + + public String getActivityLog() { + return getActivityLogger().getActivityLog(); + } + + public ConnectionActivityLogger getActivityLogger() { + return this.connectionActivityLogger; + } + + public void setActivityLogger(ConnectionActivityLogger connectionActivityLogger) { + this.connectionActivityLogger = connectionActivityLogger; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java index e46eb927d50..0717e6366b5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,6 +39,7 @@ import org.apache.phoenix.compile.ExpressionProjector; import org.apache.phoenix.compile.RowProjector; import org.apache.phoenix.compile.StatementContext; +import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; @@ -70,1895 +71,1964 @@ import org.apache.phoenix.schema.types.PSmallint; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.StringUtil; -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; - -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * * JDBC DatabaseMetaData implementation of Phoenix. - * */ public class PhoenixDatabaseMetaData implements DatabaseMetaData { - public static final int FAMILY_NAME_INDEX = 4; - public static final int COLUMN_NAME_INDEX = 3; - public static final int TABLE_NAME_INDEX = 2; - public static final int SCHEMA_NAME_INDEX = 1; - public static final int TENANT_ID_INDEX = 0; - - public static final int TYPE_INDEX = 2; - public static final int FUNTION_NAME_INDEX = 1; - - public static final String SYSTEM_CATALOG_SCHEMA = QueryConstants.SYSTEM_SCHEMA_NAME; - public static final byte[] SYSTEM_CATALOG_SCHEMA_BYTES = QueryConstants.SYSTEM_SCHEMA_NAME_BYTES; - public static final String SYSTEM_SCHEMA_NAME = QueryConstants.SYSTEM_SCHEMA_NAME; - public static final byte[] SYSTEM_SCHEMA_NAME_BYTES = QueryConstants.SYSTEM_SCHEMA_NAME_BYTES; - public static final TableName SYSTEM_SCHEMA_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_SCHEMA_NAME); - - public static final String SYSTEM_CATALOG_TABLE = "CATALOG"; - public static final byte[] SYSTEM_CATALOG_TABLE_BYTES = Bytes.toBytes(SYSTEM_CATALOG_TABLE); - public static final String SYSTEM_CATALOG = SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\""; - public static final String SYSTEM_CATALOG_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, - SYSTEM_CATALOG_TABLE); - public static final String MAPPED_SYSTEM_CATALOG_NAME = - SYSTEM_CATALOG_NAME.replace(QueryConstants.NAME_SEPARATOR, - QueryConstants.NAMESPACE_SEPARATOR); - public static final TableName SYSTEM_CATALOG_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_CATALOG_NAME); - public static final byte[] SYSTEM_CATALOG_NAME_BYTES = Bytes.toBytes(SYSTEM_CATALOG_NAME); - public static final String SYSTEM_STATS_TABLE = "STATS"; - public static final String SYSTEM_STATS_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_STATS_TABLE); - public static final String IS_NAMESPACE_MAPPED = "IS_NAMESPACE_MAPPED"; - public static final byte[] IS_NAMESPACE_MAPPED_BYTES = Bytes.toBytes(IS_NAMESPACE_MAPPED); - public static final byte[] SYSTEM_STATS_NAME_BYTES = Bytes.toBytes(SYSTEM_STATS_NAME); - public static final byte[] SYSTEM_STATS_TABLE_BYTES = Bytes.toBytes(SYSTEM_STATS_TABLE); - public static final TableName SYSTEM_STATS_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_STATS_NAME); - public static final String SYSTEM_CATALOG_ALIAS = "\"SYSTEM.TABLE\""; - - public static final byte[] SYSTEM_SEQUENCE_FAMILY_BYTES = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; - public static final String SYSTEM_SEQUENCE_SCHEMA = SYSTEM_CATALOG_SCHEMA; - public static final byte[] SYSTEM_SEQUENCE_SCHEMA_BYTES = Bytes.toBytes(SYSTEM_SEQUENCE_SCHEMA); - public static final String SYSTEM_SEQUENCE_TABLE = "SEQUENCE"; - public static final byte[] SYSTEM_SEQUENCE_TABLE_BYTES = Bytes.toBytes(SYSTEM_SEQUENCE_TABLE); - public static final String SYSTEM_SEQUENCE = SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_SEQUENCE_TABLE + "\""; - public static final String SYSTEM_SEQUENCE_NAME = SchemaUtil.getTableName(SYSTEM_SEQUENCE_SCHEMA, SYSTEM_SEQUENCE_TABLE); - public static final byte[] SYSTEM_SEQUENCE_NAME_BYTES = Bytes.toBytes(SYSTEM_SEQUENCE_NAME); - public static final TableName SYSTEM_SEQUENCE_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_SEQUENCE_NAME); - - public static final String TABLE_NAME = "TABLE_NAME"; - public static final byte[] TABLE_NAME_BYTES = Bytes.toBytes(TABLE_NAME); - public static final String TABLE_TYPE = "TABLE_TYPE"; - public static final byte[] TABLE_TYPE_BYTES = Bytes.toBytes(TABLE_TYPE); - public static final String PHYSICAL_NAME = "PHYSICAL_NAME"; - public static final byte[] PHYSICAL_NAME_BYTES = Bytes.toBytes(PHYSICAL_NAME); - - public static final String COLUMN_FAMILY = "COLUMN_FAMILY"; - public static final byte[] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY); - public static final String TABLE_CAT = "TABLE_CAT"; - public static final String TABLE_CATALOG = "TABLE_CATALOG"; - public static final String TABLE_SCHEM = "TABLE_SCHEM"; - public static final byte[] TABLE_SCHEM_BYTES = Bytes.toBytes(TABLE_SCHEM); - public static final String LOGICAL_TABLE_NAME = "LOGICAL_TABLE_NAME"; - public static final String LOGICAL_PARENT_NAME = "LOGICAL_PARENT_NAME"; - public static final String REMARKS = "REMARKS"; - public static final String TYPE_SCHEM = "TYPE_SCHEM"; - public static final String SELF_REFERENCING_COL_NAME = "SELF_REFERENCING_COL_NAME"; - public static final String REF_GENERATION = "REF_GENERATION"; - public static final String PK_NAME = "PK_NAME"; - public static final byte[] PK_NAME_BYTES = Bytes.toBytes(PK_NAME); - public static final String TABLE_SEQ_NUM = "TABLE_SEQ_NUM"; - public static final byte[] TABLE_SEQ_NUM_BYTES = Bytes.toBytes(TABLE_SEQ_NUM); - public static final String COLUMN_COUNT = "COLUMN_COUNT"; - public static final byte[] COLUMN_COUNT_BYTES = Bytes.toBytes(COLUMN_COUNT); - public static final String SALT_BUCKETS = "SALT_BUCKETS"; - public static final byte[] SALT_BUCKETS_BYTES = Bytes.toBytes(SALT_BUCKETS); - public static final String STORE_NULLS = "STORE_NULLS"; - public static final byte[] STORE_NULLS_BYTES = Bytes.toBytes(STORE_NULLS); - - public static final String DATA_TABLE_NAME = "DATA_TABLE_NAME"; - public static final byte[] DATA_TABLE_NAME_BYTES = Bytes.toBytes(DATA_TABLE_NAME); - public static final String INDEX_STATE = "INDEX_STATE"; - public static final byte[] INDEX_STATE_BYTES = Bytes.toBytes(INDEX_STATE); - - public static final String TENANT_ID = "TENANT_ID"; - public static final byte[] TENANT_ID_BYTES = Bytes.toBytes(TENANT_ID); - - public static final String COLUMN_NAME = "COLUMN_NAME"; - public static final String DATA_TYPE = "DATA_TYPE"; - public static final byte[] DATA_TYPE_BYTES = Bytes.toBytes(DATA_TYPE); - public static final String TYPE_NAME = "TYPE_NAME"; - public static final String COLUMN_SIZE = "COLUMN_SIZE"; - public static final byte[] COLUMN_SIZE_BYTES = Bytes.toBytes(COLUMN_SIZE); - public static final String BUFFER_LENGTH = "BUFFER_LENGTH"; - public static final String DECIMAL_DIGITS = "DECIMAL_DIGITS"; - public static final byte[] DECIMAL_DIGITS_BYTES = Bytes.toBytes(DECIMAL_DIGITS); - public static final String NUM_PREC_RADIX = "NUM_PREC_RADIX"; - public static final String NULLABLE = "NULLABLE"; - public static final byte[] NULLABLE_BYTES = Bytes.toBytes(NULLABLE); - public static final String COLUMN_DEF = "COLUMN_DEF"; - public static final byte[] COLUMN_DEF_BYTES = Bytes.toBytes(COLUMN_DEF); - public static final String SQL_DATA_TYPE = "SQL_DATA_TYPE"; - public static final String SQL_DATETIME_SUB = "SQL_DATETIME_SUB"; - public static final String CHAR_OCTET_LENGTH = "CHAR_OCTET_LENGTH"; - public static final String ORDINAL_POSITION = "ORDINAL_POSITION"; - public static final byte[] ORDINAL_POSITION_BYTES = Bytes.toBytes(ORDINAL_POSITION); - public static final String IS_NULLABLE = "IS_NULLABLE"; - public static final String SCOPE_CATALOG = "SCOPE_CATALOG"; - public static final String SCOPE_SCHEMA = "SCOPE_SCHEMA"; - public static final String SCOPE_TABLE = "SCOPE_TABLE"; - public static final String SOURCE_DATA_TYPE = "SOURCE_DATA_TYPE"; - public static final String IS_AUTOINCREMENT = "IS_AUTOINCREMENT"; - public static final String SORT_ORDER = "SORT_ORDER"; - public static final byte[] SORT_ORDER_BYTES = Bytes.toBytes(SORT_ORDER); - public static final String IMMUTABLE_ROWS = "IMMUTABLE_ROWS"; - public static final byte[] IMMUTABLE_ROWS_BYTES = Bytes.toBytes(IMMUTABLE_ROWS); - public static final String DEFAULT_COLUMN_FAMILY_NAME = "DEFAULT_COLUMN_FAMILY"; - public static final byte[] DEFAULT_COLUMN_FAMILY_NAME_BYTES = Bytes.toBytes(DEFAULT_COLUMN_FAMILY_NAME); - public static final String VIEW_STATEMENT = "VIEW_STATEMENT"; - public static final byte[] VIEW_STATEMENT_BYTES = Bytes.toBytes(VIEW_STATEMENT); - public static final String DISABLE_WAL = "DISABLE_WAL"; - public static final byte[] DISABLE_WAL_BYTES = Bytes.toBytes(DISABLE_WAL); - public static final String MULTI_TENANT = "MULTI_TENANT"; - public static final byte[] MULTI_TENANT_BYTES = Bytes.toBytes(MULTI_TENANT); - public static final String VIEW_TYPE = "VIEW_TYPE"; - public static final byte[] VIEW_TYPE_BYTES = Bytes.toBytes(VIEW_TYPE); - public static final String INDEX_TYPE = "INDEX_TYPE"; - public static final byte[] INDEX_TYPE_BYTES = Bytes.toBytes(INDEX_TYPE); - public static final String LINK_TYPE = "LINK_TYPE"; - public static final byte[] LINK_TYPE_BYTES = Bytes.toBytes(LINK_TYPE); - public static final String TASK_TYPE = "TASK_TYPE"; - public static final byte[] TASK_TYPE_BYTES = Bytes.toBytes(TASK_TYPE); - public static final String TASK_TS = "TASK_TS"; - public static final byte[] TASK_TS_BYTES = Bytes.toBytes(TASK_TS); - public static final String TASK_STATUS = "TASK_STATUS"; - public static final String TASK_END_TS = "TASK_END_TS"; - public static final String TASK_PRIORITY = "TASK_PRIORITY"; - public static final String TASK_DATA = "TASK_DATA"; - public static final String TASK_TABLE_TTL = "864000"; - public static final String NEW_PHYS_TABLE_NAME = "NEW_PHYS_TABLE_NAME"; - public static final String TRANSFORM_TYPE = "TRANSFORM_TYPE"; - public static final String TRANSFORM_STATUS = "STATUS"; - public static final String TRANSFORM_JOB_ID = "JOB_ID"; - public static final String TRANSFORM_RETRY_COUNT = "RETRY_COUNT"; - public static final String TRANSFORM_START_TS = "START_TS"; - public static final String TRANSFORM_LAST_STATE_TS = "END_TS"; - public static final String OLD_METADATA = "OLD_METADATA"; - public static final String NEW_METADATA = "NEW_METADATA"; - public static final String TRANSFORM_FUNCTION = "TRANSFORM_FUNCTION"; - public static final String TRANSFORM_TABLE_TTL = "7776000"; // 90 days - - public static final int TTL_FOR_MUTEX = 15 * 60; // 15min - public static final String ARRAY_SIZE = "ARRAY_SIZE"; - public static final byte[] ARRAY_SIZE_BYTES = Bytes.toBytes(ARRAY_SIZE); - public static final String VIEW_CONSTANT = "VIEW_CONSTANT"; - public static final byte[] VIEW_CONSTANT_BYTES = Bytes.toBytes(VIEW_CONSTANT); - public static final String IS_VIEW_REFERENCED = "IS_VIEW_REFERENCED"; - public static final byte[] IS_VIEW_REFERENCED_BYTES = Bytes.toBytes(IS_VIEW_REFERENCED); - public static final String VIEW_INDEX_ID = "VIEW_INDEX_ID"; - public static final byte[] VIEW_INDEX_ID_BYTES = Bytes.toBytes(VIEW_INDEX_ID); - public static final String VIEW_INDEX_ID_DATA_TYPE = "VIEW_INDEX_ID_DATA_TYPE"; - public static final byte[] VIEW_INDEX_ID_DATA_TYPE_BYTES = Bytes.toBytes(VIEW_INDEX_ID_DATA_TYPE); - public static final String BASE_COLUMN_COUNT = "BASE_COLUMN_COUNT"; - public static final byte[] BASE_COLUMN_COUNT_BYTES = Bytes.toBytes(BASE_COLUMN_COUNT); - public static final String IS_ROW_TIMESTAMP = "IS_ROW_TIMESTAMP"; - public static final byte[] IS_ROW_TIMESTAMP_BYTES = Bytes.toBytes(IS_ROW_TIMESTAMP); - - public static final String TABLE_FAMILY = QueryConstants.DEFAULT_COLUMN_FAMILY; - public static final byte[] TABLE_FAMILY_BYTES = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; - public static final byte[] PENDING_DISABLE_COUNT_BYTES = Bytes.toBytes("PENDING_DISABLE_COUNT"); - - public static final String TYPE_SEQUENCE = "SEQUENCE"; - public static final String SYSTEM_FUNCTION_TABLE = "FUNCTION"; - public static final String SYSTEM_FUNCTION = SYSTEM_CATALOG_SCHEMA + QueryConstants.NAME_SEPARATOR + "\"FUNCTION\""; - public static final String SYSTEM_FUNCTION_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_FUNCTION_TABLE); - public static final byte[] SYSTEM_FUNCTION_NAME_BYTES = Bytes.toBytes(SYSTEM_FUNCTION_NAME); - public static final TableName SYSTEM_FUNCTION_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_FUNCTION_NAME); - - public static final String FUNCTION_NAME = "FUNCTION_NAME"; - public static final byte[] FUNCTION_NAME_BYTES = Bytes.toBytes(FUNCTION_NAME); - public static final String CLASS_NAME = "CLASS_NAME"; - public static final byte[] CLASS_NAME_BYTES = Bytes.toBytes(CLASS_NAME); - public static final String JAR_PATH = "JAR_PATH"; - public static final byte[] JAR_PATH_BYTES = Bytes.toBytes(JAR_PATH); - public static final String TYPE = "TYPE"; - public static final byte[] TYPE_BYTES = Bytes.toBytes(TYPE); - public static final String ARG_POSITION = "ARG_POSITION"; - public static final byte[] ARG_POSITION_TYPE = Bytes.toBytes(ARG_POSITION); - public static final String RETURN_TYPE = "RETURN_TYPE"; - public static final byte[] RETURN_TYPE_BYTES = Bytes.toBytes(RETURN_TYPE); - public static final String IS_ARRAY = "IS_ARRAY"; - public static final byte[] IS_ARRAY_BYTES = Bytes.toBytes(IS_ARRAY); - public static final String IS_CONSTANT = "IS_CONSTANT"; - public static final byte[] IS_CONSTANT_BYTES = Bytes.toBytes(IS_CONSTANT); - public static final String DEFAULT_VALUE = "DEFAULT_VALUE"; - public static final byte[] DEFAULT_VALUE_BYTES = Bytes.toBytes(DEFAULT_VALUE); - public static final String NUM_ARGS = "NUM_ARGS"; - public static final byte[] NUM_ARGS_BYTES = Bytes.toBytes(NUM_ARGS); - - public static final String SEQUENCE_SCHEMA = "SEQUENCE_SCHEMA"; - public static final String SEQUENCE_NAME = "SEQUENCE_NAME"; - public static final String CURRENT_VALUE = "CURRENT_VALUE"; - public static final byte[] CURRENT_VALUE_BYTES = Bytes.toBytes(CURRENT_VALUE); - public static final String START_WITH = "START_WITH"; - public static final byte[] START_WITH_BYTES = Bytes.toBytes(START_WITH); - // MIN_VALUE, MAX_VALUE, CYCLE_FLAG and LIMIT_FLAG were added in 3.1/4.1 - public static final String MIN_VALUE = "MIN_VALUE"; - public static final byte[] MIN_VALUE_BYTES = Bytes.toBytes(MIN_VALUE); - public static final String MAX_VALUE = "MAX_VALUE"; - public static final byte[] MAX_VALUE_BYTES = Bytes.toBytes(MAX_VALUE); - public static final String INCREMENT_BY = "INCREMENT_BY"; - public static final byte[] INCREMENT_BY_BYTES = Bytes.toBytes(INCREMENT_BY); - public static final String CACHE_SIZE = "CACHE_SIZE"; - public static final byte[] CACHE_SIZE_BYTES = Bytes.toBytes(CACHE_SIZE); - public static final String CYCLE_FLAG = "CYCLE_FLAG"; - public static final byte[] CYCLE_FLAG_BYTES = Bytes.toBytes(CYCLE_FLAG); - public static final String LIMIT_REACHED_FLAG = "LIMIT_REACHED_FLAG"; - public static final byte[] LIMIT_REACHED_FLAG_BYTES = Bytes.toBytes(LIMIT_REACHED_FLAG); - public static final String KEY_SEQ = "KEY_SEQ"; - public static final byte[] KEY_SEQ_BYTES = Bytes.toBytes(KEY_SEQ); - public static final String SUPERTABLE_NAME = "SUPERTABLE_NAME"; - - public static final String TYPE_ID = "TYPE_ID"; - public static final String INDEX_DISABLE_TIMESTAMP = "INDEX_DISABLE_TIMESTAMP"; - public static final byte[] INDEX_DISABLE_TIMESTAMP_BYTES = Bytes.toBytes(INDEX_DISABLE_TIMESTAMP); - - public static final String REGION_NAME = "REGION_NAME"; - public static final byte[] REGION_NAME_BYTES = Bytes.toBytes(REGION_NAME); - public static final String GUIDE_POSTS = "GUIDE_POSTS"; - public static final byte[] GUIDE_POSTS_BYTES = Bytes.toBytes(GUIDE_POSTS); - public static final String GUIDE_POSTS_COUNT = "GUIDE_POSTS_COUNT"; - public static final byte[] GUIDE_POSTS_COUNT_BYTES = Bytes.toBytes(GUIDE_POSTS_COUNT); - public static final String GUIDE_POSTS_WIDTH = "GUIDE_POSTS_WIDTH"; - public static final byte[] GUIDE_POSTS_WIDTH_BYTES = Bytes.toBytes(GUIDE_POSTS_WIDTH); - public static final String GUIDE_POSTS_ROW_COUNT = "GUIDE_POSTS_ROW_COUNT"; - public static final byte[] GUIDE_POSTS_ROW_COUNT_BYTES = Bytes.toBytes(GUIDE_POSTS_ROW_COUNT); - public static final String MIN_KEY = "MIN_KEY"; - public static final byte[] MIN_KEY_BYTES = Bytes.toBytes(MIN_KEY); - public static final String MAX_KEY = "MAX_KEY"; - public static final byte[] MAX_KEY_BYTES = Bytes.toBytes(MAX_KEY); - public static final String LAST_STATS_UPDATE_TIME = "LAST_STATS_UPDATE_TIME"; - public static final byte[] LAST_STATS_UPDATE_TIME_BYTES = Bytes.toBytes(LAST_STATS_UPDATE_TIME); - public static final String GUIDE_POST_KEY = "GUIDE_POST_KEY"; - public static final String ASYNC_REBUILD_TIMESTAMP = "ASYNC_REBUILD_TIMESTAMP"; - public static final byte[] ASYNC_REBUILD_TIMESTAMP_BYTES = Bytes.toBytes(ASYNC_REBUILD_TIMESTAMP); - - public static final String COLUMN_ENCODED_BYTES = "COLUMN_ENCODED_BYTES"; - - public static final String PARENT_TENANT_ID = "PARENT_TENANT_ID"; - public static final byte[] PARENT_TENANT_ID_BYTES = Bytes.toBytes(PARENT_TENANT_ID); - - private static final String TENANT_POS_SHIFT = "TENANT_POS_SHIFT"; - private static final byte[] TENANT_POS_SHIFT_BYTES = Bytes.toBytes(TENANT_POS_SHIFT); - - public static final String TRANSACTIONAL = "TRANSACTIONAL"; - public static final byte[] TRANSACTIONAL_BYTES = Bytes.toBytes(TRANSACTIONAL); - - public static final String TRANSACTION_PROVIDER = "TRANSACTION_PROVIDER"; - public static final byte[] TRANSACTION_PROVIDER_BYTES = Bytes.toBytes(TRANSACTION_PROVIDER); - - public static final String PHYSICAL_TABLE_NAME = "PHYSICAL_TABLE_NAME"; - public static final byte[] PHYSICAL_TABLE_NAME_BYTES = Bytes.toBytes(PHYSICAL_TABLE_NAME); - - public static final String UPDATE_CACHE_FREQUENCY = "UPDATE_CACHE_FREQUENCY"; - public static final byte[] UPDATE_CACHE_FREQUENCY_BYTES = Bytes.toBytes(UPDATE_CACHE_FREQUENCY); - - public static final String AUTO_PARTITION_SEQ = "AUTO_PARTITION_SEQ"; - public static final byte[] AUTO_PARTITION_SEQ_BYTES = Bytes.toBytes(AUTO_PARTITION_SEQ); - - public static final String APPEND_ONLY_SCHEMA = "APPEND_ONLY_SCHEMA"; - public static final byte[] APPEND_ONLY_SCHEMA_BYTES = Bytes.toBytes(APPEND_ONLY_SCHEMA); - - public static final String ASYNC_CREATED_DATE = "ASYNC_CREATED_DATE"; - public static final String SEQUENCE_TABLE_TYPE = SYSTEM_SEQUENCE_TABLE; - - public static final String SYNC_INDEX_CREATED_DATE = "SYNC_INDEX_CREATED_DATE"; - public static final String SYSTEM_MUTEX_COLUMN_NAME = "MUTEX_VALUE"; - public static final byte[] SYSTEM_MUTEX_COLUMN_NAME_BYTES = Bytes.toBytes(SYSTEM_MUTEX_COLUMN_NAME); - public static final String SYSTEM_MUTEX_TABLE_NAME = "MUTEX"; - public static final String SYSTEM_MUTEX_NAME = SchemaUtil.getTableName(QueryConstants.SYSTEM_SCHEMA_NAME, SYSTEM_MUTEX_TABLE_NAME); - public static final TableName SYSTEM_MUTEX_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_MUTEX_NAME); - public static final byte[] SYSTEM_MUTEX_NAME_BYTES = Bytes.toBytes(SYSTEM_MUTEX_NAME); - public static final byte[] SYSTEM_MUTEX_FAMILY_NAME_BYTES = TABLE_FAMILY_BYTES; - - private final PhoenixConnection connection; - - public static final int MAX_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "8"); - public static final int MIN_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "6"); - public static final int MIN_RENEW_LEASE_VERSION = VersionUtil.encodeVersion("1", "1", "3"); - public static final int MIN_NAMESPACE_MAPPED_PHOENIX_VERSION = VersionUtil.encodeVersion("4", "8", "0"); - public static final int MIN_PENDING_ACTIVE_INDEX = VersionUtil.encodeVersion("4", "12", "0"); - public static final int MIN_CLIENT_RETRY_INDEX_WRITES = VersionUtil.encodeVersion("4", "14", "0"); - public static final int MIN_TX_CLIENT_SIDE_MAINTENANCE = VersionUtil.encodeVersion("4", "14", "0"); - public static final int MIN_PENDING_DISABLE_INDEX = VersionUtil.encodeVersion("4", "14", "0"); - - // Version below which we should turn off essential column family. - public static final int ESSENTIAL_FAMILY_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "7"); - // Version below which we should disallow usage of mutable secondary indexing. - public static final int MUTABLE_SI_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "10"); - /** Version below which we fall back on the generic KeyValueBuilder */ - public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = VersionUtil.encodeVersion("0", "94", "14"); - - public static final String IMMUTABLE_STORAGE_SCHEME = "IMMUTABLE_STORAGE_SCHEME"; - public static final byte[] STORAGE_SCHEME_BYTES = Bytes.toBytes(IMMUTABLE_STORAGE_SCHEME); - public static final String ENCODING_SCHEME = "ENCODING_SCHEME"; - public static final byte[] ENCODING_SCHEME_BYTES = Bytes.toBytes(ENCODING_SCHEME); - public static final String COLUMN_QUALIFIER = "COLUMN_QUALIFIER"; - public static final byte[] COLUMN_QUALIFIER_BYTES = Bytes.toBytes(COLUMN_QUALIFIER); - public static final String COLUMN_QUALIFIER_COUNTER = "QUALIFIER_COUNTER"; - public static final byte[] COLUMN_QUALIFIER_COUNTER_BYTES = Bytes.toBytes(COLUMN_QUALIFIER_COUNTER); - public static final String USE_STATS_FOR_PARALLELIZATION = "USE_STATS_FOR_PARALLELIZATION"; - public static final byte[] USE_STATS_FOR_PARALLELIZATION_BYTES = Bytes.toBytes(USE_STATS_FOR_PARALLELIZATION); - - // The TTL property will hold the duration after which rows will be marked as expired and - // is stored in column TTL in SYSCAT - public static final String TTL = "TTL"; - public static final byte[] TTL_BYTES = Bytes.toBytes(TTL); - public static final int TTL_NOT_DEFINED = 0; - public static final int DEFAULT_TTL = HConstants.FOREVER; - public static final String PHOENIX_TTL = "PHOENIX_TTL"; - public static final byte[] PHOENIX_TTL_BYTES = Bytes.toBytes(PHOENIX_TTL); - public static final String PHOENIX_TTL_HWM = "PHOENIX_TTL_HWM"; - public static final byte[] PHOENIX_TTL_HWM_BYTES = Bytes.toBytes(PHOENIX_TTL_HWM); - - public static final String LAST_DDL_TIMESTAMP = "LAST_DDL_TIMESTAMP"; - public static final byte[] LAST_DDL_TIMESTAMP_BYTES = Bytes.toBytes(LAST_DDL_TIMESTAMP); - - public static final String CHANGE_DETECTION_ENABLED = "CHANGE_DETECTION_ENABLED"; - public static final byte[] CHANGE_DETECTION_ENABLED_BYTES = - Bytes.toBytes(CHANGE_DETECTION_ENABLED); - - public static final String SCHEMA_VERSION = "SCHEMA_VERSION"; - public static final byte[] SCHEMA_VERSION_BYTES = Bytes.toBytes(SCHEMA_VERSION); - - public static final String EXTERNAL_SCHEMA_ID = "EXTERNAL_SCHEMA_ID"; - public static final byte[] EXTERNAL_SCHEMA_ID_BYTES = Bytes.toBytes(EXTERNAL_SCHEMA_ID); - - public static final String STREAMING_TOPIC_NAME = "STREAMING_TOPIC_NAME"; - public static final byte[] STREAMING_TOPIC_NAME_BYTES = Bytes.toBytes(STREAMING_TOPIC_NAME); - - public static final String ROW_KEY_MATCHER = "ROW_KEY_MATCHER"; - public static final byte[] ROW_KEY_MATCHER_BYTES = Bytes.toBytes(ROW_KEY_MATCHER); - - public static final String INDEX_WHERE = "INDEX_WHERE"; - public static final byte[] INDEX_WHERE_BYTES = Bytes.toBytes(INDEX_WHERE); - - public static final String SYSTEM_CHILD_LINK_TABLE = "CHILD_LINK"; - public static final String SYSTEM_CHILD_LINK_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_CHILD_LINK_TABLE); - public static final byte[] SYSTEM_CHILD_LINK_NAME_BYTES = Bytes.toBytes(SYSTEM_CHILD_LINK_NAME); - public static final byte[] SYSTEM_CHILD_LINK_NAMESPACE_BYTES = - SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, true).getName(); - public static final TableName SYSTEM_LINK_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_CHILD_LINK_NAME); - - public static final String SYSTEM_TASK_TABLE = "TASK"; - public static final String SYSTEM_TASK_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_TASK_TABLE); - public static final byte[] SYSTEM_TASK_NAME_BYTES = Bytes.toBytes(SYSTEM_TASK_NAME); - public static final TableName SYSTEM_TASK_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_TASK_NAME); - - public static final String SYSTEM_TRANSFORM_TABLE = "TRANSFORM"; - public static final String SYSTEM_TRANSFORM_NAME = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_TRANSFORM_TABLE); - public static final String MAX_LOOKBACK_AGE = BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE; - public static final byte[] MAX_LOOKBACK_AGE_BYTES = Bytes.toBytes(MAX_LOOKBACK_AGE); - - public static final String CDC_INCLUDE_NAME = "INCLUDE"; - public static final String CDC_INCLUDE_TABLE = "CDC_INCLUDE"; - public static final byte[] CDC_INCLUDE_BYTES = Bytes.toBytes(CDC_INCLUDE_TABLE); - - //SYSTEM:LOG - public static final String SYSTEM_LOG_TABLE = "LOG"; - public static final String SYSTEM_LOG_NAME = - SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_LOG_TABLE); - public static final String QUERY_ID = "QUERY_ID"; - public static final String USER = "USER"; - public static final String CLIENT_IP = "CLIENT_IP"; - public static final String QUERY = "QUERY"; - public static final String EXPLAIN_PLAN = "EXPLAIN_PLAN"; - public static final String TOTAL_EXECUTION_TIME = "TOTAL_EXECUTION_TIME"; - public static final String NO_OF_RESULTS_ITERATED = "NO_OF_RESULTS_ITERATED"; - public static final String QUERY_STATUS = "QUERY_STATUS"; - public static final String EXCEPTION_TRACE = "EXCEPTION_TRACE"; - public static final String GLOBAL_SCAN_DETAILS = "GLOBAL_SCAN_DETAILS"; - public static final String SCAN_METRICS_JSON = "SCAN_METRICS_JSON"; - public static final String START_TIME = "START_TIME"; - public static final String BIND_PARAMETERS = "BIND_PARAMETERS"; - - - PhoenixDatabaseMetaData(PhoenixConnection connection) throws SQLException { - this.connection = connection; - } - - private PhoenixResultSet getEmptyResultSet() throws SQLException { - PhoenixStatement stmt = new PhoenixStatement(connection); - stmt.closeOnCompletion(); - return new PhoenixResultSet(ResultIterator.EMPTY_ITERATOR, RowProjector.EMPTY_PROJECTOR, new StatementContext(stmt, false)); - } - - @Override - public boolean allProceduresAreCallable() throws SQLException { - return false; - } - - @Override - public boolean allTablesAreSelectable() throws SQLException { - return true; - } - - @Override - public boolean autoCommitFailureClosesAllResultSets() throws SQLException { - return false; - } - - @Override - public boolean dataDefinitionCausesTransactionCommit() throws SQLException { - return false; - } - - @Override - public boolean dataDefinitionIgnoredInTransactions() throws SQLException { - return false; - } - - @Override - public boolean deletesAreDetected(int type) throws SQLException { - return false; - } - - @Override - public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { - return false; - } - - @Override - public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, - String attributeNamePattern) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) - throws SQLException { - return getEmptyResultSet(); - } - - @Override - public String getCatalogSeparator() throws SQLException { - return "."; - } - - @Override - public String getCatalogTerm() throws SQLException { - return "Tenant"; - } - - @Override - public ResultSet getCatalogs() throws SQLException { - PreparedStatement stmt = QueryUtil.getCatalogsStmt(connection); - stmt.closeOnCompletion(); - return stmt.executeQuery(); - } - - @Override - public ResultSet getClientInfoProperties() throws SQLException { - return getEmptyResultSet(); - } - - @Override - public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) - throws SQLException { - return getEmptyResultSet(); - } - - public static final String GLOBAL_TENANANTS_ONLY = "null"; - - - private static void appendConjunction(StringBuilder buf) { - buf.append(buf.length() == 0 ? "" : " and "); - } - - // While creating the PColumns we don't care about the ordinal positiion so set it to 1 - private static final PColumnImpl TENANT_ID_COLUMN = new PColumnImpl(PNameFactory.newName(TENANT_ID), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl TABLE_SCHEM_COLUMN = new PColumnImpl(PNameFactory.newName(TABLE_SCHEM), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl TABLE_NAME_COLUMN = new PColumnImpl(PNameFactory.newName(TABLE_NAME), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl COLUMN_NAME_COLUMN = new PColumnImpl(PNameFactory.newName(COLUMN_NAME), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl DATA_TYPE_COLUMN = new PColumnImpl(PNameFactory.newName(DATA_TYPE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl TYPE_NAME_COLUMN = new PColumnImpl(PNameFactory.newName(TYPE_NAME), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(TYPE_NAME), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl COLUMN_SIZE_COLUMN = new PColumnImpl(PNameFactory.newName(COLUMN_SIZE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, COLUMN_SIZE_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl BUFFER_LENGTH_COLUMN = new PColumnImpl(PNameFactory.newName(BUFFER_LENGTH), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(BUFFER_LENGTH), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl DECIMAL_DIGITS_COLUMN = new PColumnImpl(PNameFactory.newName(DECIMAL_DIGITS), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, DECIMAL_DIGITS_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl NUM_PREC_RADIX_COLUMN = new PColumnImpl(PNameFactory.newName(NUM_PREC_RADIX), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(NUM_PREC_RADIX), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl NULLABLE_COLUMN = new PColumnImpl(PNameFactory.newName(NULLABLE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, NULLABLE_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl REMARKS_COLUMN = new PColumnImpl(PNameFactory.newName(REMARKS), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(REMARKS), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl COLUMN_DEF_COLUMN = new PColumnImpl(PNameFactory.newName(COLUMN_DEF), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(COLUMN_DEF), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl SQL_DATA_TYPE_COLUMN = new PColumnImpl(PNameFactory.newName(SQL_DATA_TYPE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(SQL_DATA_TYPE), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl SQL_DATETIME_SUB_COLUMN = new PColumnImpl(PNameFactory.newName(SQL_DATETIME_SUB), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(SQL_DATETIME_SUB), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl CHAR_OCTET_LENGTH_COLUMN = new PColumnImpl(PNameFactory.newName(COLUMN_SIZE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(CHAR_OCTET_LENGTH), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl ORDINAL_POSITION_COLUMN = new PColumnImpl(PNameFactory.newName(ORDINAL_POSITION), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, ORDINAL_POSITION_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl IS_NULLABLE_COLUMN = new PColumnImpl(PNameFactory.newName(IS_NULLABLE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(IS_NULLABLE), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl SCOPE_CATALOG_COLUMN = new PColumnImpl(PNameFactory.newName(SCOPE_CATALOG), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(SCOPE_CATALOG), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl SCOPE_SCHEMA_COLUMN = new PColumnImpl(PNameFactory.newName(SCOPE_SCHEMA), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(SCOPE_SCHEMA), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl SCOPE_TABLE_COLUMN = new PColumnImpl(PNameFactory.newName(SCOPE_TABLE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(SCOPE_TABLE), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl SOURCE_DATA_TYPE_COLUMN = new PColumnImpl(PNameFactory.newName(SOURCE_DATA_TYPE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(SOURCE_DATA_TYPE), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl IS_AUTOINCREMENT_COLUMN = new PColumnImpl(PNameFactory.newName(COLUMN_SIZE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PSmallint.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(SCOPE_CATALOG), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl ARRAY_SIZE_COLUMN = new PColumnImpl(PNameFactory.newName(ARRAY_SIZE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, ARRAY_SIZE_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl COLUMN_FAMILY_COLUMN = new PColumnImpl(PNameFactory.newName(COLUMN_FAMILY), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, COLUMN_FAMILY_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl TYPE_ID_COLUMN = new PColumnImpl(PNameFactory.newName(COLUMN_SIZE), - PNameFactory.newName(TABLE_FAMILY_BYTES), PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, Bytes.toBytes(TYPE_ID), HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl VIEW_CONSTANT_COLUMN = new PColumnImpl(PNameFactory.newName(VIEW_CONSTANT), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarbinary.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, VIEW_CONSTANT_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl MULTI_TENANT_COLUMN = new PColumnImpl(PNameFactory.newName(MULTI_TENANT), - PNameFactory.newName(TABLE_FAMILY_BYTES), PBoolean.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, MULTI_TENANT_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl KEY_SEQ_COLUMN = new PColumnImpl(PNameFactory.newName(KEY_SEQ), - PNameFactory.newName(TABLE_FAMILY_BYTES), PSmallint.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, KEY_SEQ_BYTES, HConstants.LATEST_TIMESTAMP); - private static final PColumnImpl PK_NAME_COLUMN = new PColumnImpl(PNameFactory.newName(PK_NAME), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, PK_NAME_BYTES, HConstants.LATEST_TIMESTAMP); - public static final String ASC_OR_DESC = "ASC_OR_DESC"; - public static final byte[] ASC_OR_DESC_BYTES = Bytes.toBytes(ASC_OR_DESC); - private static final PColumnImpl ASC_OR_DESC_COLUMN = new PColumnImpl(PNameFactory.newName(ASC_OR_DESC), - PNameFactory.newName(TABLE_FAMILY_BYTES), PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), - 0, null, false, null, false, false, ASC_OR_DESC_BYTES, HConstants.LATEST_TIMESTAMP); - - private static final List PK_DATUM_LIST = Lists.newArrayList(TENANT_ID_COLUMN, TABLE_SCHEM_COLUMN, TABLE_NAME_COLUMN, COLUMN_NAME_COLUMN); - - private static final RowProjector GET_COLUMNS_ROW_PROJECTOR = new RowProjector( - Arrays. asList( - new ExpressionProjector(TABLE_CAT, TABLE_CAT, SYSTEM_CATALOG, - new RowKeyColumnExpression(TENANT_ID_COLUMN, - new RowKeyValueAccessor(PK_DATUM_LIST, 0)), false), - new ExpressionProjector(TABLE_SCHEM, TABLE_SCHEM, SYSTEM_CATALOG, - new RowKeyColumnExpression(TABLE_SCHEM_COLUMN, - new RowKeyValueAccessor(PK_DATUM_LIST, 1)), false), - new ExpressionProjector(TABLE_NAME, TABLE_NAME, SYSTEM_CATALOG, - new RowKeyColumnExpression(TABLE_NAME_COLUMN, - new RowKeyValueAccessor(PK_DATUM_LIST, 2)), false), - new ExpressionProjector(COLUMN_NAME, COLUMN_NAME, SYSTEM_CATALOG, - new RowKeyColumnExpression(COLUMN_NAME_COLUMN, - new RowKeyValueAccessor(PK_DATUM_LIST, 3)), false), - new ExpressionProjector(DATA_TYPE, DATA_TYPE, SYSTEM_CATALOG, - new KeyValueColumnExpression(DATA_TYPE_COLUMN), false), - new ExpressionProjector(TYPE_NAME, TYPE_NAME, SYSTEM_CATALOG, - new KeyValueColumnExpression(TYPE_NAME_COLUMN), false), - new ExpressionProjector(COLUMN_SIZE, COLUMN_SIZE, SYSTEM_CATALOG, - new KeyValueColumnExpression(COLUMN_SIZE_COLUMN), false), - new ExpressionProjector(BUFFER_LENGTH, BUFFER_LENGTH, SYSTEM_CATALOG, - new KeyValueColumnExpression(BUFFER_LENGTH_COLUMN), false), - new ExpressionProjector(DECIMAL_DIGITS, DECIMAL_DIGITS, SYSTEM_CATALOG, - new KeyValueColumnExpression(DECIMAL_DIGITS_COLUMN), false), - new ExpressionProjector(NUM_PREC_RADIX, NUM_PREC_RADIX, SYSTEM_CATALOG, - new KeyValueColumnExpression(NUM_PREC_RADIX_COLUMN), false), - new ExpressionProjector(NULLABLE, NULLABLE, SYSTEM_CATALOG, - new KeyValueColumnExpression(NULLABLE_COLUMN), false), - new ExpressionProjector(REMARKS, REMARKS, SYSTEM_CATALOG, - new KeyValueColumnExpression(REMARKS_COLUMN), false), - new ExpressionProjector(COLUMN_DEF, COLUMN_DEF, SYSTEM_CATALOG, - new KeyValueColumnExpression(COLUMN_DEF_COLUMN), false), - new ExpressionProjector(SQL_DATA_TYPE, SQL_DATA_TYPE, SYSTEM_CATALOG, - new KeyValueColumnExpression(SQL_DATA_TYPE_COLUMN), false), - new ExpressionProjector(SQL_DATETIME_SUB, SQL_DATETIME_SUB, SYSTEM_CATALOG, - new KeyValueColumnExpression(SQL_DATETIME_SUB_COLUMN), false), - new ExpressionProjector(CHAR_OCTET_LENGTH, CHAR_OCTET_LENGTH, SYSTEM_CATALOG, - new KeyValueColumnExpression(CHAR_OCTET_LENGTH_COLUMN), false), - new ExpressionProjector(ORDINAL_POSITION, ORDINAL_POSITION, SYSTEM_CATALOG, - new KeyValueColumnExpression(ORDINAL_POSITION_COLUMN), false), - new ExpressionProjector(IS_NULLABLE, IS_NULLABLE, SYSTEM_CATALOG, - new KeyValueColumnExpression(IS_NULLABLE_COLUMN), false), - new ExpressionProjector(SCOPE_CATALOG, SCOPE_CATALOG, SYSTEM_CATALOG, - new KeyValueColumnExpression(SCOPE_CATALOG_COLUMN), false), - new ExpressionProjector(SCOPE_SCHEMA, SCOPE_SCHEMA, SYSTEM_CATALOG, - new KeyValueColumnExpression(SCOPE_SCHEMA_COLUMN), false), - new ExpressionProjector(SCOPE_TABLE, SCOPE_TABLE, SYSTEM_CATALOG, - new KeyValueColumnExpression(SCOPE_TABLE_COLUMN), false), - new ExpressionProjector(SOURCE_DATA_TYPE, SOURCE_DATA_TYPE, SYSTEM_CATALOG, - new KeyValueColumnExpression(SOURCE_DATA_TYPE_COLUMN), false), - new ExpressionProjector(IS_AUTOINCREMENT, IS_AUTOINCREMENT, SYSTEM_CATALOG, - new KeyValueColumnExpression(IS_AUTOINCREMENT_COLUMN), false), - new ExpressionProjector(ARRAY_SIZE, ARRAY_SIZE, SYSTEM_CATALOG, - new KeyValueColumnExpression(ARRAY_SIZE_COLUMN), false), - new ExpressionProjector(COLUMN_FAMILY, COLUMN_FAMILY, SYSTEM_CATALOG, - new KeyValueColumnExpression(COLUMN_FAMILY_COLUMN), false), - new ExpressionProjector(TYPE_ID, TYPE_ID, SYSTEM_CATALOG, - new KeyValueColumnExpression(TYPE_ID_COLUMN), false), - new ExpressionProjector(VIEW_CONSTANT, VIEW_CONSTANT, SYSTEM_CATALOG, - new KeyValueColumnExpression(VIEW_CONSTANT_COLUMN), false), - new ExpressionProjector(MULTI_TENANT, MULTI_TENANT, SYSTEM_CATALOG, - new KeyValueColumnExpression(MULTI_TENANT_COLUMN), false), - new ExpressionProjector(KEY_SEQ, KEY_SEQ, SYSTEM_CATALOG, - new KeyValueColumnExpression(KEY_SEQ_COLUMN), false) - ), 0, true); - - private static final RowProjector GET_PRIMARY_KEYS_ROW_PROJECTOR = - new RowProjector( - Arrays. asList( - new ExpressionProjector(TABLE_CAT, TABLE_CAT, SYSTEM_CATALOG, - new RowKeyColumnExpression(TENANT_ID_COLUMN, - new RowKeyValueAccessor(PK_DATUM_LIST, 0)), - false), - new ExpressionProjector(TABLE_SCHEM, TABLE_SCHEM, SYSTEM_CATALOG, - new RowKeyColumnExpression(TABLE_SCHEM_COLUMN, - new RowKeyValueAccessor(PK_DATUM_LIST, 1)), - false), - new ExpressionProjector(TABLE_NAME, TABLE_NAME, SYSTEM_CATALOG, - new RowKeyColumnExpression(TABLE_NAME_COLUMN, - new RowKeyValueAccessor(PK_DATUM_LIST, 2)), - false), - new ExpressionProjector(COLUMN_NAME, COLUMN_NAME, SYSTEM_CATALOG, - new RowKeyColumnExpression(COLUMN_NAME_COLUMN, - new RowKeyValueAccessor(PK_DATUM_LIST, 3)), - false), - new ExpressionProjector(KEY_SEQ, KEY_SEQ, SYSTEM_CATALOG, - new KeyValueColumnExpression(KEY_SEQ_COLUMN), false), - new ExpressionProjector(PK_NAME, PK_NAME, SYSTEM_CATALOG, - new KeyValueColumnExpression(PK_NAME_COLUMN), false), - new ExpressionProjector(ASC_OR_DESC, ASC_OR_DESC, SYSTEM_CATALOG, - new KeyValueColumnExpression(ASC_OR_DESC_COLUMN), false), - new ExpressionProjector(DATA_TYPE, DATA_TYPE, SYSTEM_CATALOG, - new KeyValueColumnExpression(DATA_TYPE_COLUMN), false), - new ExpressionProjector(TYPE_NAME, TYPE_NAME, SYSTEM_CATALOG, - new KeyValueColumnExpression(TYPE_NAME_COLUMN), false), - new ExpressionProjector(COLUMN_SIZE, COLUMN_SIZE, SYSTEM_CATALOG, - new KeyValueColumnExpression(COLUMN_SIZE_COLUMN), false), - new ExpressionProjector(TYPE_ID, TYPE_ID, SYSTEM_CATALOG, - new KeyValueColumnExpression(TYPE_ID_COLUMN), false), - new ExpressionProjector(VIEW_CONSTANT, VIEW_CONSTANT, SYSTEM_CATALOG, - new KeyValueColumnExpression(VIEW_CONSTANT_COLUMN), false)), - 0, true); - - private boolean match(String str, String pattern) throws SQLException { - LiteralExpression strExpr = LiteralExpression.newConstant(str, PVarchar.INSTANCE, SortOrder.ASC); - LiteralExpression patternExpr = LiteralExpression.newConstant(pattern, PVarchar.INSTANCE, SortOrder.ASC); - List children = Arrays.asList(strExpr, patternExpr); - LikeExpression likeExpr = StringBasedLikeExpression.create(children, LikeType.CASE_SENSITIVE); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean evaluated = likeExpr.evaluate(null, ptr); - Boolean result = (Boolean)likeExpr.getDataType().toObject(ptr); - if (evaluated) { - return result; - } - return false; - } - - @Override - public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) - throws SQLException { - try { - boolean isTenantSpecificConnection = connection.getTenantId() != null; - List tuples = Lists.newArrayListWithExpectedSize(10); - // Allow a "." in columnNamePattern for column family match - String colPattern = null; - String cfPattern = null; - if (columnNamePattern != null && columnNamePattern.length() > 0) { - int index = columnNamePattern.indexOf('.'); - if (index <= 0) { - colPattern = columnNamePattern; - } else { - cfPattern = columnNamePattern.substring(0, index); - if (columnNamePattern.length() > index+1) { - colPattern = columnNamePattern.substring(index+1); - } - } + public static final int FAMILY_NAME_INDEX = 4; + public static final int COLUMN_NAME_INDEX = 3; + public static final int TABLE_NAME_INDEX = 2; + public static final int SCHEMA_NAME_INDEX = 1; + public static final int TENANT_ID_INDEX = 0; + + public static final int TYPE_INDEX = 2; + public static final int FUNTION_NAME_INDEX = 1; + + public static final String SYSTEM_CATALOG_SCHEMA = QueryConstants.SYSTEM_SCHEMA_NAME; + public static final byte[] SYSTEM_CATALOG_SCHEMA_BYTES = QueryConstants.SYSTEM_SCHEMA_NAME_BYTES; + public static final String SYSTEM_SCHEMA_NAME = QueryConstants.SYSTEM_SCHEMA_NAME; + public static final byte[] SYSTEM_SCHEMA_NAME_BYTES = QueryConstants.SYSTEM_SCHEMA_NAME_BYTES; + public static final TableName SYSTEM_SCHEMA_HBASE_TABLE_NAME = + TableName.valueOf(SYSTEM_SCHEMA_NAME); + + public static final String SYSTEM_CATALOG_TABLE = "CATALOG"; + public static final byte[] SYSTEM_CATALOG_TABLE_BYTES = Bytes.toBytes(SYSTEM_CATALOG_TABLE); + public static final String SYSTEM_CATALOG = + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\""; + public static final String SYSTEM_CATALOG_NAME = + SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_CATALOG_TABLE); + public static final String MAPPED_SYSTEM_CATALOG_NAME = + SYSTEM_CATALOG_NAME.replace(QueryConstants.NAME_SEPARATOR, QueryConstants.NAMESPACE_SEPARATOR); + public static final TableName SYSTEM_CATALOG_HBASE_TABLE_NAME = + TableName.valueOf(SYSTEM_CATALOG_NAME); + public static final byte[] SYSTEM_CATALOG_NAME_BYTES = Bytes.toBytes(SYSTEM_CATALOG_NAME); + public static final String SYSTEM_STATS_TABLE = "STATS"; + public static final String SYSTEM_STATS_NAME = + SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_STATS_TABLE); + public static final String IS_NAMESPACE_MAPPED = "IS_NAMESPACE_MAPPED"; + public static final byte[] IS_NAMESPACE_MAPPED_BYTES = Bytes.toBytes(IS_NAMESPACE_MAPPED); + public static final byte[] SYSTEM_STATS_NAME_BYTES = Bytes.toBytes(SYSTEM_STATS_NAME); + public static final byte[] SYSTEM_STATS_TABLE_BYTES = Bytes.toBytes(SYSTEM_STATS_TABLE); + public static final TableName SYSTEM_STATS_HBASE_TABLE_NAME = + TableName.valueOf(SYSTEM_STATS_NAME); + public static final String SYSTEM_CATALOG_ALIAS = "\"SYSTEM.TABLE\""; + + public static final byte[] SYSTEM_SEQUENCE_FAMILY_BYTES = + QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; + public static final String SYSTEM_SEQUENCE_SCHEMA = SYSTEM_CATALOG_SCHEMA; + public static final byte[] SYSTEM_SEQUENCE_SCHEMA_BYTES = Bytes.toBytes(SYSTEM_SEQUENCE_SCHEMA); + public static final String SYSTEM_SEQUENCE_TABLE = "SEQUENCE"; + public static final byte[] SYSTEM_SEQUENCE_TABLE_BYTES = Bytes.toBytes(SYSTEM_SEQUENCE_TABLE); + public static final String SYSTEM_SEQUENCE = + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_SEQUENCE_TABLE + "\""; + public static final String SYSTEM_SEQUENCE_NAME = + SchemaUtil.getTableName(SYSTEM_SEQUENCE_SCHEMA, SYSTEM_SEQUENCE_TABLE); + public static final byte[] SYSTEM_SEQUENCE_NAME_BYTES = Bytes.toBytes(SYSTEM_SEQUENCE_NAME); + public static final TableName SYSTEM_SEQUENCE_HBASE_TABLE_NAME = + TableName.valueOf(SYSTEM_SEQUENCE_NAME); + + public static final String TABLE_NAME = "TABLE_NAME"; + public static final byte[] TABLE_NAME_BYTES = Bytes.toBytes(TABLE_NAME); + public static final String TABLE_TYPE = "TABLE_TYPE"; + public static final byte[] TABLE_TYPE_BYTES = Bytes.toBytes(TABLE_TYPE); + public static final String PHYSICAL_NAME = "PHYSICAL_NAME"; + public static final byte[] PHYSICAL_NAME_BYTES = Bytes.toBytes(PHYSICAL_NAME); + + public static final String COLUMN_FAMILY = "COLUMN_FAMILY"; + public static final byte[] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY); + public static final String TABLE_CAT = "TABLE_CAT"; + public static final String TABLE_CATALOG = "TABLE_CATALOG"; + public static final String TABLE_SCHEM = "TABLE_SCHEM"; + public static final byte[] TABLE_SCHEM_BYTES = Bytes.toBytes(TABLE_SCHEM); + public static final String LOGICAL_TABLE_NAME = "LOGICAL_TABLE_NAME"; + public static final String LOGICAL_PARENT_NAME = "LOGICAL_PARENT_NAME"; + public static final String REMARKS = "REMARKS"; + public static final String TYPE_SCHEM = "TYPE_SCHEM"; + public static final String SELF_REFERENCING_COL_NAME = "SELF_REFERENCING_COL_NAME"; + public static final String REF_GENERATION = "REF_GENERATION"; + public static final String PK_NAME = "PK_NAME"; + public static final byte[] PK_NAME_BYTES = Bytes.toBytes(PK_NAME); + public static final String TABLE_SEQ_NUM = "TABLE_SEQ_NUM"; + public static final byte[] TABLE_SEQ_NUM_BYTES = Bytes.toBytes(TABLE_SEQ_NUM); + public static final String COLUMN_COUNT = "COLUMN_COUNT"; + public static final byte[] COLUMN_COUNT_BYTES = Bytes.toBytes(COLUMN_COUNT); + public static final String SALT_BUCKETS = "SALT_BUCKETS"; + public static final byte[] SALT_BUCKETS_BYTES = Bytes.toBytes(SALT_BUCKETS); + public static final String STORE_NULLS = "STORE_NULLS"; + public static final byte[] STORE_NULLS_BYTES = Bytes.toBytes(STORE_NULLS); + + public static final String DATA_TABLE_NAME = "DATA_TABLE_NAME"; + public static final byte[] DATA_TABLE_NAME_BYTES = Bytes.toBytes(DATA_TABLE_NAME); + public static final String INDEX_STATE = "INDEX_STATE"; + public static final byte[] INDEX_STATE_BYTES = Bytes.toBytes(INDEX_STATE); + + public static final String TENANT_ID = "TENANT_ID"; + public static final byte[] TENANT_ID_BYTES = Bytes.toBytes(TENANT_ID); + + public static final String COLUMN_NAME = "COLUMN_NAME"; + public static final String DATA_TYPE = "DATA_TYPE"; + public static final byte[] DATA_TYPE_BYTES = Bytes.toBytes(DATA_TYPE); + public static final String TYPE_NAME = "TYPE_NAME"; + public static final String COLUMN_SIZE = "COLUMN_SIZE"; + public static final byte[] COLUMN_SIZE_BYTES = Bytes.toBytes(COLUMN_SIZE); + public static final String BUFFER_LENGTH = "BUFFER_LENGTH"; + public static final String DECIMAL_DIGITS = "DECIMAL_DIGITS"; + public static final byte[] DECIMAL_DIGITS_BYTES = Bytes.toBytes(DECIMAL_DIGITS); + public static final String NUM_PREC_RADIX = "NUM_PREC_RADIX"; + public static final String NULLABLE = "NULLABLE"; + public static final byte[] NULLABLE_BYTES = Bytes.toBytes(NULLABLE); + public static final String COLUMN_DEF = "COLUMN_DEF"; + public static final byte[] COLUMN_DEF_BYTES = Bytes.toBytes(COLUMN_DEF); + public static final String SQL_DATA_TYPE = "SQL_DATA_TYPE"; + public static final String SQL_DATETIME_SUB = "SQL_DATETIME_SUB"; + public static final String CHAR_OCTET_LENGTH = "CHAR_OCTET_LENGTH"; + public static final String ORDINAL_POSITION = "ORDINAL_POSITION"; + public static final byte[] ORDINAL_POSITION_BYTES = Bytes.toBytes(ORDINAL_POSITION); + public static final String IS_NULLABLE = "IS_NULLABLE"; + public static final String SCOPE_CATALOG = "SCOPE_CATALOG"; + public static final String SCOPE_SCHEMA = "SCOPE_SCHEMA"; + public static final String SCOPE_TABLE = "SCOPE_TABLE"; + public static final String SOURCE_DATA_TYPE = "SOURCE_DATA_TYPE"; + public static final String IS_AUTOINCREMENT = "IS_AUTOINCREMENT"; + public static final String SORT_ORDER = "SORT_ORDER"; + public static final byte[] SORT_ORDER_BYTES = Bytes.toBytes(SORT_ORDER); + public static final String IMMUTABLE_ROWS = "IMMUTABLE_ROWS"; + public static final byte[] IMMUTABLE_ROWS_BYTES = Bytes.toBytes(IMMUTABLE_ROWS); + public static final String DEFAULT_COLUMN_FAMILY_NAME = "DEFAULT_COLUMN_FAMILY"; + public static final byte[] DEFAULT_COLUMN_FAMILY_NAME_BYTES = + Bytes.toBytes(DEFAULT_COLUMN_FAMILY_NAME); + public static final String VIEW_STATEMENT = "VIEW_STATEMENT"; + public static final byte[] VIEW_STATEMENT_BYTES = Bytes.toBytes(VIEW_STATEMENT); + public static final String DISABLE_WAL = "DISABLE_WAL"; + public static final byte[] DISABLE_WAL_BYTES = Bytes.toBytes(DISABLE_WAL); + public static final String MULTI_TENANT = "MULTI_TENANT"; + public static final byte[] MULTI_TENANT_BYTES = Bytes.toBytes(MULTI_TENANT); + public static final String VIEW_TYPE = "VIEW_TYPE"; + public static final byte[] VIEW_TYPE_BYTES = Bytes.toBytes(VIEW_TYPE); + public static final String INDEX_TYPE = "INDEX_TYPE"; + public static final byte[] INDEX_TYPE_BYTES = Bytes.toBytes(INDEX_TYPE); + public static final String LINK_TYPE = "LINK_TYPE"; + public static final byte[] LINK_TYPE_BYTES = Bytes.toBytes(LINK_TYPE); + public static final String TASK_TYPE = "TASK_TYPE"; + public static final byte[] TASK_TYPE_BYTES = Bytes.toBytes(TASK_TYPE); + public static final String TASK_TS = "TASK_TS"; + public static final byte[] TASK_TS_BYTES = Bytes.toBytes(TASK_TS); + public static final String TASK_STATUS = "TASK_STATUS"; + public static final String TASK_END_TS = "TASK_END_TS"; + public static final String TASK_PRIORITY = "TASK_PRIORITY"; + public static final String TASK_DATA = "TASK_DATA"; + public static final String TASK_TABLE_TTL = "864000"; + public static final String NEW_PHYS_TABLE_NAME = "NEW_PHYS_TABLE_NAME"; + public static final String TRANSFORM_TYPE = "TRANSFORM_TYPE"; + public static final String TRANSFORM_STATUS = "STATUS"; + public static final String TRANSFORM_JOB_ID = "JOB_ID"; + public static final String TRANSFORM_RETRY_COUNT = "RETRY_COUNT"; + public static final String TRANSFORM_START_TS = "START_TS"; + public static final String TRANSFORM_LAST_STATE_TS = "END_TS"; + public static final String OLD_METADATA = "OLD_METADATA"; + public static final String NEW_METADATA = "NEW_METADATA"; + public static final String TRANSFORM_FUNCTION = "TRANSFORM_FUNCTION"; + public static final String TRANSFORM_TABLE_TTL = "7776000"; // 90 days + + public static final int TTL_FOR_MUTEX = 15 * 60; // 15min + public static final String ARRAY_SIZE = "ARRAY_SIZE"; + public static final byte[] ARRAY_SIZE_BYTES = Bytes.toBytes(ARRAY_SIZE); + public static final String VIEW_CONSTANT = "VIEW_CONSTANT"; + public static final byte[] VIEW_CONSTANT_BYTES = Bytes.toBytes(VIEW_CONSTANT); + public static final String IS_VIEW_REFERENCED = "IS_VIEW_REFERENCED"; + public static final byte[] IS_VIEW_REFERENCED_BYTES = Bytes.toBytes(IS_VIEW_REFERENCED); + public static final String VIEW_INDEX_ID = "VIEW_INDEX_ID"; + public static final byte[] VIEW_INDEX_ID_BYTES = Bytes.toBytes(VIEW_INDEX_ID); + public static final String VIEW_INDEX_ID_DATA_TYPE = "VIEW_INDEX_ID_DATA_TYPE"; + public static final byte[] VIEW_INDEX_ID_DATA_TYPE_BYTES = Bytes.toBytes(VIEW_INDEX_ID_DATA_TYPE); + public static final String BASE_COLUMN_COUNT = "BASE_COLUMN_COUNT"; + public static final byte[] BASE_COLUMN_COUNT_BYTES = Bytes.toBytes(BASE_COLUMN_COUNT); + public static final String IS_ROW_TIMESTAMP = "IS_ROW_TIMESTAMP"; + public static final byte[] IS_ROW_TIMESTAMP_BYTES = Bytes.toBytes(IS_ROW_TIMESTAMP); + + public static final String TABLE_FAMILY = QueryConstants.DEFAULT_COLUMN_FAMILY; + public static final byte[] TABLE_FAMILY_BYTES = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; + public static final byte[] PENDING_DISABLE_COUNT_BYTES = Bytes.toBytes("PENDING_DISABLE_COUNT"); + + public static final String TYPE_SEQUENCE = "SEQUENCE"; + public static final String SYSTEM_FUNCTION_TABLE = "FUNCTION"; + public static final String SYSTEM_FUNCTION = + SYSTEM_CATALOG_SCHEMA + QueryConstants.NAME_SEPARATOR + "\"FUNCTION\""; + public static final String SYSTEM_FUNCTION_NAME = + SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_FUNCTION_TABLE); + public static final byte[] SYSTEM_FUNCTION_NAME_BYTES = Bytes.toBytes(SYSTEM_FUNCTION_NAME); + public static final TableName SYSTEM_FUNCTION_HBASE_TABLE_NAME = + TableName.valueOf(SYSTEM_FUNCTION_NAME); + + public static final String FUNCTION_NAME = "FUNCTION_NAME"; + public static final byte[] FUNCTION_NAME_BYTES = Bytes.toBytes(FUNCTION_NAME); + public static final String CLASS_NAME = "CLASS_NAME"; + public static final byte[] CLASS_NAME_BYTES = Bytes.toBytes(CLASS_NAME); + public static final String JAR_PATH = "JAR_PATH"; + public static final byte[] JAR_PATH_BYTES = Bytes.toBytes(JAR_PATH); + public static final String TYPE = "TYPE"; + public static final byte[] TYPE_BYTES = Bytes.toBytes(TYPE); + public static final String ARG_POSITION = "ARG_POSITION"; + public static final byte[] ARG_POSITION_TYPE = Bytes.toBytes(ARG_POSITION); + public static final String RETURN_TYPE = "RETURN_TYPE"; + public static final byte[] RETURN_TYPE_BYTES = Bytes.toBytes(RETURN_TYPE); + public static final String IS_ARRAY = "IS_ARRAY"; + public static final byte[] IS_ARRAY_BYTES = Bytes.toBytes(IS_ARRAY); + public static final String IS_CONSTANT = "IS_CONSTANT"; + public static final byte[] IS_CONSTANT_BYTES = Bytes.toBytes(IS_CONSTANT); + public static final String DEFAULT_VALUE = "DEFAULT_VALUE"; + public static final byte[] DEFAULT_VALUE_BYTES = Bytes.toBytes(DEFAULT_VALUE); + public static final String NUM_ARGS = "NUM_ARGS"; + public static final byte[] NUM_ARGS_BYTES = Bytes.toBytes(NUM_ARGS); + + public static final String SEQUENCE_SCHEMA = "SEQUENCE_SCHEMA"; + public static final String SEQUENCE_NAME = "SEQUENCE_NAME"; + public static final String CURRENT_VALUE = "CURRENT_VALUE"; + public static final byte[] CURRENT_VALUE_BYTES = Bytes.toBytes(CURRENT_VALUE); + public static final String START_WITH = "START_WITH"; + public static final byte[] START_WITH_BYTES = Bytes.toBytes(START_WITH); + // MIN_VALUE, MAX_VALUE, CYCLE_FLAG and LIMIT_FLAG were added in 3.1/4.1 + public static final String MIN_VALUE = "MIN_VALUE"; + public static final byte[] MIN_VALUE_BYTES = Bytes.toBytes(MIN_VALUE); + public static final String MAX_VALUE = "MAX_VALUE"; + public static final byte[] MAX_VALUE_BYTES = Bytes.toBytes(MAX_VALUE); + public static final String INCREMENT_BY = "INCREMENT_BY"; + public static final byte[] INCREMENT_BY_BYTES = Bytes.toBytes(INCREMENT_BY); + public static final String CACHE_SIZE = "CACHE_SIZE"; + public static final byte[] CACHE_SIZE_BYTES = Bytes.toBytes(CACHE_SIZE); + public static final String CYCLE_FLAG = "CYCLE_FLAG"; + public static final byte[] CYCLE_FLAG_BYTES = Bytes.toBytes(CYCLE_FLAG); + public static final String LIMIT_REACHED_FLAG = "LIMIT_REACHED_FLAG"; + public static final byte[] LIMIT_REACHED_FLAG_BYTES = Bytes.toBytes(LIMIT_REACHED_FLAG); + public static final String KEY_SEQ = "KEY_SEQ"; + public static final byte[] KEY_SEQ_BYTES = Bytes.toBytes(KEY_SEQ); + public static final String SUPERTABLE_NAME = "SUPERTABLE_NAME"; + + public static final String TYPE_ID = "TYPE_ID"; + public static final String INDEX_DISABLE_TIMESTAMP = "INDEX_DISABLE_TIMESTAMP"; + public static final byte[] INDEX_DISABLE_TIMESTAMP_BYTES = Bytes.toBytes(INDEX_DISABLE_TIMESTAMP); + + public static final String REGION_NAME = "REGION_NAME"; + public static final byte[] REGION_NAME_BYTES = Bytes.toBytes(REGION_NAME); + public static final String GUIDE_POSTS = "GUIDE_POSTS"; + public static final byte[] GUIDE_POSTS_BYTES = Bytes.toBytes(GUIDE_POSTS); + public static final String GUIDE_POSTS_COUNT = "GUIDE_POSTS_COUNT"; + public static final byte[] GUIDE_POSTS_COUNT_BYTES = Bytes.toBytes(GUIDE_POSTS_COUNT); + public static final String GUIDE_POSTS_WIDTH = "GUIDE_POSTS_WIDTH"; + public static final byte[] GUIDE_POSTS_WIDTH_BYTES = Bytes.toBytes(GUIDE_POSTS_WIDTH); + public static final String GUIDE_POSTS_ROW_COUNT = "GUIDE_POSTS_ROW_COUNT"; + public static final byte[] GUIDE_POSTS_ROW_COUNT_BYTES = Bytes.toBytes(GUIDE_POSTS_ROW_COUNT); + public static final String MIN_KEY = "MIN_KEY"; + public static final byte[] MIN_KEY_BYTES = Bytes.toBytes(MIN_KEY); + public static final String MAX_KEY = "MAX_KEY"; + public static final byte[] MAX_KEY_BYTES = Bytes.toBytes(MAX_KEY); + public static final String LAST_STATS_UPDATE_TIME = "LAST_STATS_UPDATE_TIME"; + public static final byte[] LAST_STATS_UPDATE_TIME_BYTES = Bytes.toBytes(LAST_STATS_UPDATE_TIME); + public static final String GUIDE_POST_KEY = "GUIDE_POST_KEY"; + public static final String ASYNC_REBUILD_TIMESTAMP = "ASYNC_REBUILD_TIMESTAMP"; + public static final byte[] ASYNC_REBUILD_TIMESTAMP_BYTES = Bytes.toBytes(ASYNC_REBUILD_TIMESTAMP); + + public static final String COLUMN_ENCODED_BYTES = "COLUMN_ENCODED_BYTES"; + + public static final String PARENT_TENANT_ID = "PARENT_TENANT_ID"; + public static final byte[] PARENT_TENANT_ID_BYTES = Bytes.toBytes(PARENT_TENANT_ID); + + private static final String TENANT_POS_SHIFT = "TENANT_POS_SHIFT"; + private static final byte[] TENANT_POS_SHIFT_BYTES = Bytes.toBytes(TENANT_POS_SHIFT); + + public static final String TRANSACTIONAL = "TRANSACTIONAL"; + public static final byte[] TRANSACTIONAL_BYTES = Bytes.toBytes(TRANSACTIONAL); + + public static final String TRANSACTION_PROVIDER = "TRANSACTION_PROVIDER"; + public static final byte[] TRANSACTION_PROVIDER_BYTES = Bytes.toBytes(TRANSACTION_PROVIDER); + + public static final String PHYSICAL_TABLE_NAME = "PHYSICAL_TABLE_NAME"; + public static final byte[] PHYSICAL_TABLE_NAME_BYTES = Bytes.toBytes(PHYSICAL_TABLE_NAME); + + public static final String UPDATE_CACHE_FREQUENCY = "UPDATE_CACHE_FREQUENCY"; + public static final byte[] UPDATE_CACHE_FREQUENCY_BYTES = Bytes.toBytes(UPDATE_CACHE_FREQUENCY); + + public static final String AUTO_PARTITION_SEQ = "AUTO_PARTITION_SEQ"; + public static final byte[] AUTO_PARTITION_SEQ_BYTES = Bytes.toBytes(AUTO_PARTITION_SEQ); + + public static final String APPEND_ONLY_SCHEMA = "APPEND_ONLY_SCHEMA"; + public static final byte[] APPEND_ONLY_SCHEMA_BYTES = Bytes.toBytes(APPEND_ONLY_SCHEMA); + + public static final String ASYNC_CREATED_DATE = "ASYNC_CREATED_DATE"; + public static final String SEQUENCE_TABLE_TYPE = SYSTEM_SEQUENCE_TABLE; + + public static final String SYNC_INDEX_CREATED_DATE = "SYNC_INDEX_CREATED_DATE"; + public static final String SYSTEM_MUTEX_COLUMN_NAME = "MUTEX_VALUE"; + public static final byte[] SYSTEM_MUTEX_COLUMN_NAME_BYTES = + Bytes.toBytes(SYSTEM_MUTEX_COLUMN_NAME); + public static final String SYSTEM_MUTEX_TABLE_NAME = "MUTEX"; + public static final String SYSTEM_MUTEX_NAME = + SchemaUtil.getTableName(QueryConstants.SYSTEM_SCHEMA_NAME, SYSTEM_MUTEX_TABLE_NAME); + public static final TableName SYSTEM_MUTEX_HBASE_TABLE_NAME = + TableName.valueOf(SYSTEM_MUTEX_NAME); + public static final byte[] SYSTEM_MUTEX_NAME_BYTES = Bytes.toBytes(SYSTEM_MUTEX_NAME); + public static final byte[] SYSTEM_MUTEX_FAMILY_NAME_BYTES = TABLE_FAMILY_BYTES; + + private final PhoenixConnection connection; + + public static final int MAX_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "8"); + public static final int MIN_LOCAL_SI_VERSION_DISALLOW = VersionUtil.encodeVersion("0", "98", "6"); + public static final int MIN_RENEW_LEASE_VERSION = VersionUtil.encodeVersion("1", "1", "3"); + public static final int MIN_NAMESPACE_MAPPED_PHOENIX_VERSION = + VersionUtil.encodeVersion("4", "8", "0"); + public static final int MIN_PENDING_ACTIVE_INDEX = VersionUtil.encodeVersion("4", "12", "0"); + public static final int MIN_CLIENT_RETRY_INDEX_WRITES = VersionUtil.encodeVersion("4", "14", "0"); + public static final int MIN_TX_CLIENT_SIDE_MAINTENANCE = + VersionUtil.encodeVersion("4", "14", "0"); + public static final int MIN_PENDING_DISABLE_INDEX = VersionUtil.encodeVersion("4", "14", "0"); + + // Version below which we should turn off essential column family. + public static final int ESSENTIAL_FAMILY_VERSION_THRESHOLD = + VersionUtil.encodeVersion("0", "94", "7"); + // Version below which we should disallow usage of mutable secondary indexing. + public static final int MUTABLE_SI_VERSION_THRESHOLD = VersionUtil.encodeVersion("0", "94", "10"); + /** Version below which we fall back on the generic KeyValueBuilder */ + public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = + VersionUtil.encodeVersion("0", "94", "14"); + + public static final String IMMUTABLE_STORAGE_SCHEME = "IMMUTABLE_STORAGE_SCHEME"; + public static final byte[] STORAGE_SCHEME_BYTES = Bytes.toBytes(IMMUTABLE_STORAGE_SCHEME); + public static final String ENCODING_SCHEME = "ENCODING_SCHEME"; + public static final byte[] ENCODING_SCHEME_BYTES = Bytes.toBytes(ENCODING_SCHEME); + public static final String COLUMN_QUALIFIER = "COLUMN_QUALIFIER"; + public static final byte[] COLUMN_QUALIFIER_BYTES = Bytes.toBytes(COLUMN_QUALIFIER); + public static final String COLUMN_QUALIFIER_COUNTER = "QUALIFIER_COUNTER"; + public static final byte[] COLUMN_QUALIFIER_COUNTER_BYTES = + Bytes.toBytes(COLUMN_QUALIFIER_COUNTER); + public static final String USE_STATS_FOR_PARALLELIZATION = "USE_STATS_FOR_PARALLELIZATION"; + public static final byte[] USE_STATS_FOR_PARALLELIZATION_BYTES = + Bytes.toBytes(USE_STATS_FOR_PARALLELIZATION); + + // The TTL property will hold the duration after which rows will be marked as expired and + // is stored in column TTL in SYSCAT + public static final String TTL = "TTL"; + public static final byte[] TTL_BYTES = Bytes.toBytes(TTL); + public static final int TTL_NOT_DEFINED = 0; + public static final int DEFAULT_TTL = HConstants.FOREVER; + public static final String PHOENIX_TTL = "PHOENIX_TTL"; + public static final byte[] PHOENIX_TTL_BYTES = Bytes.toBytes(PHOENIX_TTL); + public static final String PHOENIX_TTL_HWM = "PHOENIX_TTL_HWM"; + public static final byte[] PHOENIX_TTL_HWM_BYTES = Bytes.toBytes(PHOENIX_TTL_HWM); + + public static final String LAST_DDL_TIMESTAMP = "LAST_DDL_TIMESTAMP"; + public static final byte[] LAST_DDL_TIMESTAMP_BYTES = Bytes.toBytes(LAST_DDL_TIMESTAMP); + + public static final String CHANGE_DETECTION_ENABLED = "CHANGE_DETECTION_ENABLED"; + public static final byte[] CHANGE_DETECTION_ENABLED_BYTES = + Bytes.toBytes(CHANGE_DETECTION_ENABLED); + + public static final String SCHEMA_VERSION = "SCHEMA_VERSION"; + public static final byte[] SCHEMA_VERSION_BYTES = Bytes.toBytes(SCHEMA_VERSION); + + public static final String EXTERNAL_SCHEMA_ID = "EXTERNAL_SCHEMA_ID"; + public static final byte[] EXTERNAL_SCHEMA_ID_BYTES = Bytes.toBytes(EXTERNAL_SCHEMA_ID); + + public static final String STREAMING_TOPIC_NAME = "STREAMING_TOPIC_NAME"; + public static final byte[] STREAMING_TOPIC_NAME_BYTES = Bytes.toBytes(STREAMING_TOPIC_NAME); + + public static final String ROW_KEY_MATCHER = "ROW_KEY_MATCHER"; + public static final byte[] ROW_KEY_MATCHER_BYTES = Bytes.toBytes(ROW_KEY_MATCHER); + + public static final String INDEX_WHERE = "INDEX_WHERE"; + public static final byte[] INDEX_WHERE_BYTES = Bytes.toBytes(INDEX_WHERE); + + public static final String SYSTEM_CHILD_LINK_TABLE = "CHILD_LINK"; + public static final String SYSTEM_CHILD_LINK_NAME = + SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_CHILD_LINK_TABLE); + public static final byte[] SYSTEM_CHILD_LINK_NAME_BYTES = Bytes.toBytes(SYSTEM_CHILD_LINK_NAME); + public static final byte[] SYSTEM_CHILD_LINK_NAMESPACE_BYTES = + SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, true).getName(); + public static final TableName SYSTEM_LINK_HBASE_TABLE_NAME = + TableName.valueOf(SYSTEM_CHILD_LINK_NAME); + + public static final String SYSTEM_TASK_TABLE = "TASK"; + public static final String SYSTEM_TASK_NAME = + SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_TASK_TABLE); + public static final byte[] SYSTEM_TASK_NAME_BYTES = Bytes.toBytes(SYSTEM_TASK_NAME); + public static final TableName SYSTEM_TASK_HBASE_TABLE_NAME = TableName.valueOf(SYSTEM_TASK_NAME); + + public static final String SYSTEM_TRANSFORM_TABLE = "TRANSFORM"; + public static final String SYSTEM_TRANSFORM_NAME = + SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_TRANSFORM_TABLE); + public static final String MAX_LOOKBACK_AGE = BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE; + public static final byte[] MAX_LOOKBACK_AGE_BYTES = Bytes.toBytes(MAX_LOOKBACK_AGE); + + public static final String CDC_INCLUDE_NAME = "INCLUDE"; + public static final String CDC_INCLUDE_TABLE = "CDC_INCLUDE"; + public static final byte[] CDC_INCLUDE_BYTES = Bytes.toBytes(CDC_INCLUDE_TABLE); + + // SYSTEM:LOG + public static final String SYSTEM_LOG_TABLE = "LOG"; + public static final String SYSTEM_LOG_NAME = + SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_LOG_TABLE); + public static final String QUERY_ID = "QUERY_ID"; + public static final String USER = "USER"; + public static final String CLIENT_IP = "CLIENT_IP"; + public static final String QUERY = "QUERY"; + public static final String EXPLAIN_PLAN = "EXPLAIN_PLAN"; + public static final String TOTAL_EXECUTION_TIME = "TOTAL_EXECUTION_TIME"; + public static final String NO_OF_RESULTS_ITERATED = "NO_OF_RESULTS_ITERATED"; + public static final String QUERY_STATUS = "QUERY_STATUS"; + public static final String EXCEPTION_TRACE = "EXCEPTION_TRACE"; + public static final String GLOBAL_SCAN_DETAILS = "GLOBAL_SCAN_DETAILS"; + public static final String SCAN_METRICS_JSON = "SCAN_METRICS_JSON"; + public static final String START_TIME = "START_TIME"; + public static final String BIND_PARAMETERS = "BIND_PARAMETERS"; + + PhoenixDatabaseMetaData(PhoenixConnection connection) throws SQLException { + this.connection = connection; + } + + private PhoenixResultSet getEmptyResultSet() throws SQLException { + PhoenixStatement stmt = new PhoenixStatement(connection); + stmt.closeOnCompletion(); + return new PhoenixResultSet(ResultIterator.EMPTY_ITERATOR, RowProjector.EMPTY_PROJECTOR, + new StatementContext(stmt, false)); + } + + @Override + public boolean allProceduresAreCallable() throws SQLException { + return false; + } + + @Override + public boolean allTablesAreSelectable() throws SQLException { + return true; + } + + @Override + public boolean autoCommitFailureClosesAllResultSets() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionCausesTransactionCommit() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionIgnoredInTransactions() throws SQLException { + return false; + } + + @Override + public boolean deletesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { + return false; + } + + @Override + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + @Override + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, + boolean nullable) throws SQLException { + return getEmptyResultSet(); + } + + @Override + public String getCatalogSeparator() throws SQLException { + return "."; + } + + @Override + public String getCatalogTerm() throws SQLException { + return "Tenant"; + } + + @Override + public ResultSet getCatalogs() throws SQLException { + PreparedStatement stmt = QueryUtil.getCatalogsStmt(connection); + stmt.closeOnCompletion(); + return stmt.executeQuery(); + } + + @Override + public ResultSet getClientInfoProperties() throws SQLException { + return getEmptyResultSet(); + } + + @Override + public ResultSet getColumnPrivileges(String catalog, String schema, String table, + String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public static final String GLOBAL_TENANANTS_ONLY = "null"; + + private static void appendConjunction(StringBuilder buf) { + buf.append(buf.length() == 0 ? "" : " and "); + } + + // While creating the PColumns we don't care about the ordinal positiion so set it to 1 + private static final PColumnImpl TENANT_ID_COLUMN = + new PColumnImpl(PNameFactory.newName(TENANT_ID), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl TABLE_SCHEM_COLUMN = + new PColumnImpl(PNameFactory.newName(TABLE_SCHEM), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl TABLE_NAME_COLUMN = + new PColumnImpl(PNameFactory.newName(TABLE_NAME), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl COLUMN_NAME_COLUMN = + new PColumnImpl(PNameFactory.newName(COLUMN_NAME), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl DATA_TYPE_COLUMN = + new PColumnImpl(PNameFactory.newName(DATA_TYPE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, DATA_TYPE_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl TYPE_NAME_COLUMN = + new PColumnImpl(PNameFactory.newName(TYPE_NAME), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(TYPE_NAME), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl COLUMN_SIZE_COLUMN = + new PColumnImpl(PNameFactory.newName(COLUMN_SIZE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, COLUMN_SIZE_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl BUFFER_LENGTH_COLUMN = + new PColumnImpl(PNameFactory.newName(BUFFER_LENGTH), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(BUFFER_LENGTH), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl DECIMAL_DIGITS_COLUMN = + new PColumnImpl(PNameFactory.newName(DECIMAL_DIGITS), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, DECIMAL_DIGITS_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl NUM_PREC_RADIX_COLUMN = + new PColumnImpl(PNameFactory.newName(NUM_PREC_RADIX), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(NUM_PREC_RADIX), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl NULLABLE_COLUMN = + new PColumnImpl(PNameFactory.newName(NULLABLE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, NULLABLE_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl REMARKS_COLUMN = + new PColumnImpl(PNameFactory.newName(REMARKS), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(REMARKS), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl COLUMN_DEF_COLUMN = + new PColumnImpl(PNameFactory.newName(COLUMN_DEF), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(COLUMN_DEF), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl SQL_DATA_TYPE_COLUMN = + new PColumnImpl(PNameFactory.newName(SQL_DATA_TYPE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(SQL_DATA_TYPE), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl SQL_DATETIME_SUB_COLUMN = new PColumnImpl( + PNameFactory.newName(SQL_DATETIME_SUB), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(SQL_DATETIME_SUB), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl CHAR_OCTET_LENGTH_COLUMN = + new PColumnImpl(PNameFactory.newName(COLUMN_SIZE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(CHAR_OCTET_LENGTH), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl ORDINAL_POSITION_COLUMN = new PColumnImpl( + PNameFactory.newName(ORDINAL_POSITION), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, ORDINAL_POSITION_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl IS_NULLABLE_COLUMN = + new PColumnImpl(PNameFactory.newName(IS_NULLABLE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(IS_NULLABLE), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl SCOPE_CATALOG_COLUMN = + new PColumnImpl(PNameFactory.newName(SCOPE_CATALOG), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(SCOPE_CATALOG), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl SCOPE_SCHEMA_COLUMN = + new PColumnImpl(PNameFactory.newName(SCOPE_SCHEMA), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(SCOPE_SCHEMA), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl SCOPE_TABLE_COLUMN = + new PColumnImpl(PNameFactory.newName(SCOPE_TABLE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(SCOPE_TABLE), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl SOURCE_DATA_TYPE_COLUMN = new PColumnImpl( + PNameFactory.newName(SOURCE_DATA_TYPE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(SOURCE_DATA_TYPE), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl IS_AUTOINCREMENT_COLUMN = + new PColumnImpl(PNameFactory.newName(COLUMN_SIZE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PSmallint.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(SCOPE_CATALOG), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl ARRAY_SIZE_COLUMN = + new PColumnImpl(PNameFactory.newName(ARRAY_SIZE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, ARRAY_SIZE_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl COLUMN_FAMILY_COLUMN = + new PColumnImpl(PNameFactory.newName(COLUMN_FAMILY), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, COLUMN_FAMILY_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl TYPE_ID_COLUMN = + new PColumnImpl(PNameFactory.newName(COLUMN_SIZE), PNameFactory.newName(TABLE_FAMILY_BYTES), + PInteger.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, Bytes.toBytes(TYPE_ID), HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl VIEW_CONSTANT_COLUMN = + new PColumnImpl(PNameFactory.newName(VIEW_CONSTANT), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarbinary.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, + false, false, VIEW_CONSTANT_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl MULTI_TENANT_COLUMN = + new PColumnImpl(PNameFactory.newName(MULTI_TENANT), PNameFactory.newName(TABLE_FAMILY_BYTES), + PBoolean.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, MULTI_TENANT_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl KEY_SEQ_COLUMN = + new PColumnImpl(PNameFactory.newName(KEY_SEQ), PNameFactory.newName(TABLE_FAMILY_BYTES), + PSmallint.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, KEY_SEQ_BYTES, HConstants.LATEST_TIMESTAMP); + private static final PColumnImpl PK_NAME_COLUMN = + new PColumnImpl(PNameFactory.newName(PK_NAME), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, PK_NAME_BYTES, HConstants.LATEST_TIMESTAMP); + public static final String ASC_OR_DESC = "ASC_OR_DESC"; + public static final byte[] ASC_OR_DESC_BYTES = Bytes.toBytes(ASC_OR_DESC); + private static final PColumnImpl ASC_OR_DESC_COLUMN = + new PColumnImpl(PNameFactory.newName(ASC_OR_DESC), PNameFactory.newName(TABLE_FAMILY_BYTES), + PVarchar.INSTANCE, null, null, false, 1, SortOrder.getDefault(), 0, null, false, null, false, + false, ASC_OR_DESC_BYTES, HConstants.LATEST_TIMESTAMP); + + private static final List PK_DATUM_LIST = + Lists.newArrayList(TENANT_ID_COLUMN, TABLE_SCHEM_COLUMN, TABLE_NAME_COLUMN, COLUMN_NAME_COLUMN); + + private static final RowProjector GET_COLUMNS_ROW_PROJECTOR = + new RowProjector( + Arrays.< + ColumnProjector> asList( + new ExpressionProjector(TABLE_CAT, TABLE_CAT, SYSTEM_CATALOG, + new RowKeyColumnExpression(TENANT_ID_COLUMN, new RowKeyValueAccessor(PK_DATUM_LIST, 0)), + false), + new ExpressionProjector(TABLE_SCHEM, TABLE_SCHEM, SYSTEM_CATALOG, + new RowKeyColumnExpression(TABLE_SCHEM_COLUMN, + new RowKeyValueAccessor(PK_DATUM_LIST, 1)), + false), + new ExpressionProjector(TABLE_NAME, TABLE_NAME, SYSTEM_CATALOG, + new RowKeyColumnExpression(TABLE_NAME_COLUMN, + new RowKeyValueAccessor(PK_DATUM_LIST, 2)), + false), + new ExpressionProjector(COLUMN_NAME, COLUMN_NAME, SYSTEM_CATALOG, + new RowKeyColumnExpression(COLUMN_NAME_COLUMN, + new RowKeyValueAccessor(PK_DATUM_LIST, 3)), + false), + new ExpressionProjector(DATA_TYPE, DATA_TYPE, SYSTEM_CATALOG, + new KeyValueColumnExpression(DATA_TYPE_COLUMN), false), + new ExpressionProjector(TYPE_NAME, TYPE_NAME, SYSTEM_CATALOG, + new KeyValueColumnExpression(TYPE_NAME_COLUMN), false), + new ExpressionProjector(COLUMN_SIZE, COLUMN_SIZE, SYSTEM_CATALOG, + new KeyValueColumnExpression(COLUMN_SIZE_COLUMN), false), + new ExpressionProjector(BUFFER_LENGTH, BUFFER_LENGTH, SYSTEM_CATALOG, + new KeyValueColumnExpression(BUFFER_LENGTH_COLUMN), false), + new ExpressionProjector(DECIMAL_DIGITS, DECIMAL_DIGITS, SYSTEM_CATALOG, + new KeyValueColumnExpression(DECIMAL_DIGITS_COLUMN), false), + new ExpressionProjector(NUM_PREC_RADIX, NUM_PREC_RADIX, SYSTEM_CATALOG, + new KeyValueColumnExpression(NUM_PREC_RADIX_COLUMN), false), + new ExpressionProjector(NULLABLE, NULLABLE, SYSTEM_CATALOG, + new KeyValueColumnExpression(NULLABLE_COLUMN), false), + new ExpressionProjector(REMARKS, REMARKS, SYSTEM_CATALOG, + new KeyValueColumnExpression(REMARKS_COLUMN), false), + new ExpressionProjector(COLUMN_DEF, COLUMN_DEF, SYSTEM_CATALOG, + new KeyValueColumnExpression(COLUMN_DEF_COLUMN), false), + new ExpressionProjector(SQL_DATA_TYPE, SQL_DATA_TYPE, SYSTEM_CATALOG, + new KeyValueColumnExpression(SQL_DATA_TYPE_COLUMN), false), + new ExpressionProjector(SQL_DATETIME_SUB, SQL_DATETIME_SUB, SYSTEM_CATALOG, + new KeyValueColumnExpression(SQL_DATETIME_SUB_COLUMN), false), + new ExpressionProjector(CHAR_OCTET_LENGTH, CHAR_OCTET_LENGTH, SYSTEM_CATALOG, + new KeyValueColumnExpression(CHAR_OCTET_LENGTH_COLUMN), false), + new ExpressionProjector(ORDINAL_POSITION, ORDINAL_POSITION, SYSTEM_CATALOG, + new KeyValueColumnExpression(ORDINAL_POSITION_COLUMN), false), + new ExpressionProjector(IS_NULLABLE, IS_NULLABLE, SYSTEM_CATALOG, + new KeyValueColumnExpression(IS_NULLABLE_COLUMN), false), + new ExpressionProjector(SCOPE_CATALOG, SCOPE_CATALOG, SYSTEM_CATALOG, + new KeyValueColumnExpression(SCOPE_CATALOG_COLUMN), false), + new ExpressionProjector(SCOPE_SCHEMA, SCOPE_SCHEMA, SYSTEM_CATALOG, + new KeyValueColumnExpression(SCOPE_SCHEMA_COLUMN), false), + new ExpressionProjector(SCOPE_TABLE, SCOPE_TABLE, SYSTEM_CATALOG, + new KeyValueColumnExpression(SCOPE_TABLE_COLUMN), false), + new ExpressionProjector(SOURCE_DATA_TYPE, SOURCE_DATA_TYPE, SYSTEM_CATALOG, + new KeyValueColumnExpression(SOURCE_DATA_TYPE_COLUMN), false), + new ExpressionProjector(IS_AUTOINCREMENT, IS_AUTOINCREMENT, SYSTEM_CATALOG, + new KeyValueColumnExpression(IS_AUTOINCREMENT_COLUMN), false), + new ExpressionProjector(ARRAY_SIZE, ARRAY_SIZE, SYSTEM_CATALOG, + new KeyValueColumnExpression(ARRAY_SIZE_COLUMN), false), + new ExpressionProjector(COLUMN_FAMILY, COLUMN_FAMILY, SYSTEM_CATALOG, + new KeyValueColumnExpression(COLUMN_FAMILY_COLUMN), false), + new ExpressionProjector(TYPE_ID, TYPE_ID, SYSTEM_CATALOG, + new KeyValueColumnExpression(TYPE_ID_COLUMN), false), + new ExpressionProjector(VIEW_CONSTANT, VIEW_CONSTANT, SYSTEM_CATALOG, + new KeyValueColumnExpression(VIEW_CONSTANT_COLUMN), false), + new ExpressionProjector(MULTI_TENANT, MULTI_TENANT, SYSTEM_CATALOG, + new KeyValueColumnExpression(MULTI_TENANT_COLUMN), false), + new ExpressionProjector(KEY_SEQ, KEY_SEQ, SYSTEM_CATALOG, + new KeyValueColumnExpression(KEY_SEQ_COLUMN), false)), + 0, true); + + private static final RowProjector GET_PRIMARY_KEYS_ROW_PROJECTOR = + new RowProjector( + Arrays.< + ColumnProjector> asList( + new ExpressionProjector(TABLE_CAT, TABLE_CAT, SYSTEM_CATALOG, + new RowKeyColumnExpression(TENANT_ID_COLUMN, new RowKeyValueAccessor(PK_DATUM_LIST, 0)), + false), + new ExpressionProjector(TABLE_SCHEM, TABLE_SCHEM, SYSTEM_CATALOG, + new RowKeyColumnExpression(TABLE_SCHEM_COLUMN, + new RowKeyValueAccessor(PK_DATUM_LIST, 1)), + false), + new ExpressionProjector(TABLE_NAME, TABLE_NAME, SYSTEM_CATALOG, + new RowKeyColumnExpression(TABLE_NAME_COLUMN, + new RowKeyValueAccessor(PK_DATUM_LIST, 2)), + false), + new ExpressionProjector(COLUMN_NAME, COLUMN_NAME, SYSTEM_CATALOG, + new RowKeyColumnExpression(COLUMN_NAME_COLUMN, + new RowKeyValueAccessor(PK_DATUM_LIST, 3)), + false), + new ExpressionProjector(KEY_SEQ, KEY_SEQ, SYSTEM_CATALOG, + new KeyValueColumnExpression(KEY_SEQ_COLUMN), false), + new ExpressionProjector(PK_NAME, PK_NAME, SYSTEM_CATALOG, + new KeyValueColumnExpression(PK_NAME_COLUMN), false), + new ExpressionProjector(ASC_OR_DESC, ASC_OR_DESC, SYSTEM_CATALOG, + new KeyValueColumnExpression(ASC_OR_DESC_COLUMN), false), + new ExpressionProjector(DATA_TYPE, DATA_TYPE, SYSTEM_CATALOG, + new KeyValueColumnExpression(DATA_TYPE_COLUMN), false), + new ExpressionProjector(TYPE_NAME, TYPE_NAME, SYSTEM_CATALOG, + new KeyValueColumnExpression(TYPE_NAME_COLUMN), false), + new ExpressionProjector(COLUMN_SIZE, COLUMN_SIZE, SYSTEM_CATALOG, + new KeyValueColumnExpression(COLUMN_SIZE_COLUMN), false), + new ExpressionProjector(TYPE_ID, TYPE_ID, SYSTEM_CATALOG, + new KeyValueColumnExpression(TYPE_ID_COLUMN), false), + new ExpressionProjector(VIEW_CONSTANT, VIEW_CONSTANT, SYSTEM_CATALOG, + new KeyValueColumnExpression(VIEW_CONSTANT_COLUMN), false)), + 0, true); + + private boolean match(String str, String pattern) throws SQLException { + LiteralExpression strExpr = + LiteralExpression.newConstant(str, PVarchar.INSTANCE, SortOrder.ASC); + LiteralExpression patternExpr = + LiteralExpression.newConstant(pattern, PVarchar.INSTANCE, SortOrder.ASC); + List children = Arrays. asList(strExpr, patternExpr); + LikeExpression likeExpr = StringBasedLikeExpression.create(children, LikeType.CASE_SENSITIVE); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean evaluated = likeExpr.evaluate(null, ptr); + Boolean result = (Boolean) likeExpr.getDataType().toObject(ptr); + if (evaluated) { + return result; + } + return false; + } + + @Override + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) throws SQLException { + try { + boolean isTenantSpecificConnection = connection.getTenantId() != null; + List tuples = Lists.newArrayListWithExpectedSize(10); + // Allow a "." in columnNamePattern for column family match + String colPattern = null; + String cfPattern = null; + if (columnNamePattern != null && columnNamePattern.length() > 0) { + int index = columnNamePattern.indexOf('.'); + if (index <= 0) { + colPattern = columnNamePattern; + } else { + cfPattern = columnNamePattern.substring(0, index); + if (columnNamePattern.length() > index + 1) { + colPattern = columnNamePattern.substring(index + 1); + } } - try (ResultSet rs = getTables(catalog, schemaPattern, tableNamePattern, null)) { - while (rs.next()) { - String schemaName = rs.getString(TABLE_SCHEM); - String tableName = rs.getString(TABLE_NAME); - String tenantId = rs.getString(TABLE_CAT); - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - PTable table = connection.getTableNoCache(fullTableName); - boolean isSalted = table.getBucketNum()!=null; - boolean tenantColSkipped = false; - List columns = table.getColumns(); - int startOffset = isSalted ? 1 : 0; - columns = Lists.newArrayList(columns.subList(startOffset, columns.size())); - for (PColumn column : columns) { - if (isTenantSpecificConnection && column.equals(table.getPKColumns().get(startOffset))) { - // skip the tenant column - tenantColSkipped = true; - continue; - } - String columnFamily = column.getFamilyName()!=null ? column.getFamilyName().getString() : null; - String columnName = column.getName().getString(); - if (cfPattern != null && cfPattern.length() > 0) { // if null or empty, will pick up all columns - if (columnFamily==null || !match(columnFamily, cfPattern)) { - continue; - } - } - if (colPattern != null && colPattern.length() > 0) { - if (!match(columnName, colPattern)) { - continue; - } - } - // generate row key - // TENANT_ID, TABLE_SCHEM, TABLE_NAME , COLUMN_NAME are row key columns - byte[] rowKey = - SchemaUtil.getColumnKey(tenantId, schemaName, tableName, columnName, null); - - // add one cell for each column info - List cells = Lists.newArrayListWithCapacity(25); - // DATA_TYPE - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - DATA_TYPE_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PInteger.INSTANCE.toBytes(column.getDataType().getResultSetSqlType()))); - // TYPE_NAME - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(TYPE_NAME), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - column.getDataType().getSqlTypeNameBytes())); - // COLUMN_SIZE - cells.add( - PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - column.getMaxLength() != null - ? PInteger.INSTANCE.toBytes(column.getMaxLength()) - : ByteUtil.EMPTY_BYTE_ARRAY)); - // BUFFER_LENGTH - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(BUFFER_LENGTH), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - ByteUtil.EMPTY_BYTE_ARRAY)); - // DECIMAL_DIGITS - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - DECIMAL_DIGITS_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - column.getScale() != null ? PInteger.INSTANCE.toBytes(column.getScale()) - : ByteUtil.EMPTY_BYTE_ARRAY)); - // NUM_PREC_RADIX - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(NUM_PREC_RADIX), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - ByteUtil.EMPTY_BYTE_ARRAY)); - // NULLABLE - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - NULLABLE_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PInteger.INSTANCE.toBytes(SchemaUtil.getIsNullableInt(column.isNullable())))); - // REMARKS - cells.add( - PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(REMARKS), - MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY)); - // COLUMN_DEF - cells.add( - PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(COLUMN_DEF), - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PVarchar.INSTANCE.toBytes(column.getExpressionStr()))); - // SQL_DATA_TYPE - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(SQL_DATA_TYPE), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - ByteUtil.EMPTY_BYTE_ARRAY)); - // SQL_DATETIME_SUB - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(SQL_DATETIME_SUB), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - ByteUtil.EMPTY_BYTE_ARRAY)); - // CHAR_OCTET_LENGTH - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(CHAR_OCTET_LENGTH), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - ByteUtil.EMPTY_BYTE_ARRAY)); - // ORDINAL_POSITION - int ordinal = - column.getPosition() + (isSalted ? 0 : 1) - (tenantColSkipped ? 1 : 0); - cells.add( - PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - ORDINAL_POSITION_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, PInteger.INSTANCE.toBytes(ordinal))); - String isNullable = - column.isNullable() ? Boolean.TRUE.toString() : Boolean.FALSE.toString(); - // IS_NULLABLE - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(IS_NULLABLE), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PVarchar.INSTANCE.toBytes(isNullable))); - // SCOPE_CATALOG - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(SCOPE_CATALOG), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - ByteUtil.EMPTY_BYTE_ARRAY)); - // SCOPE_SCHEMA - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(SCOPE_SCHEMA), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - ByteUtil.EMPTY_BYTE_ARRAY)); - // SCOPE_TABLE - cells.add( - PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(SCOPE_TABLE), - MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY)); - // SOURCE_DATA_TYPE - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(SOURCE_DATA_TYPE), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - ByteUtil.EMPTY_BYTE_ARRAY)); - // IS_AUTOINCREMENT - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(IS_AUTOINCREMENT), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - ByteUtil.EMPTY_BYTE_ARRAY)); - // ARRAY_SIZE - cells.add( - PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - column.getArraySize() != null - ? PInteger.INSTANCE.toBytes(column.getArraySize()) - : ByteUtil.EMPTY_BYTE_ARRAY)); - // COLUMN_FAMILY - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - COLUMN_FAMILY_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, column.getFamilyName() != null - ? column.getFamilyName().getBytes() : ByteUtil.EMPTY_BYTE_ARRAY)); - // TYPE_ID - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(TYPE_ID), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PInteger.INSTANCE.toBytes(column.getDataType().getSqlType()))); - // VIEW_CONSTANT - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - VIEW_CONSTANT_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, column.getViewConstant() != null - ? column.getViewConstant() : ByteUtil.EMPTY_BYTE_ARRAY)); - // MULTI_TENANT - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - MULTI_TENANT_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PBoolean.INSTANCE.toBytes(table.isMultiTenant()))); - // KEY_SEQ_COLUMN - byte[] keySeqBytes = ByteUtil.EMPTY_BYTE_ARRAY; - int pkPos = table.getPKColumns().indexOf(column); - if (pkPos!=-1) { - short keySeq = (short) (pkPos + 1 - startOffset - (tenantColSkipped ? 1 : 0)); - keySeqBytes = PSmallint.INSTANCE.toBytes(keySeq); - } - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, KEY_SEQ_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, keySeqBytes)); - Collections.sort(cells, new CellComparatorImpl()); - Tuple tuple = new MultiKeyValueTuple(cells); - tuples.add(tuple); - } + } + try (ResultSet rs = getTables(catalog, schemaPattern, tableNamePattern, null)) { + while (rs.next()) { + String schemaName = rs.getString(TABLE_SCHEM); + String tableName = rs.getString(TABLE_NAME); + String tenantId = rs.getString(TABLE_CAT); + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + PTable table = connection.getTableNoCache(fullTableName); + boolean isSalted = table.getBucketNum() != null; + boolean tenantColSkipped = false; + List columns = table.getColumns(); + int startOffset = isSalted ? 1 : 0; + columns = Lists.newArrayList(columns.subList(startOffset, columns.size())); + for (PColumn column : columns) { + if ( + isTenantSpecificConnection && column.equals(table.getPKColumns().get(startOffset)) + ) { + // skip the tenant column + tenantColSkipped = true; + continue; } - } - - PhoenixStatement stmt = new PhoenixStatement(connection); - stmt.closeOnCompletion(); - return new PhoenixResultSet(new MaterializedResultIterator(tuples), GET_COLUMNS_ROW_PROJECTOR, new StatementContext(stmt, false)); - } finally { - if (connection.getAutoCommit()) { - connection.commit(); + String columnFamily = + column.getFamilyName() != null ? column.getFamilyName().getString() : null; + String columnName = column.getName().getString(); + if (cfPattern != null && cfPattern.length() > 0) { // if null or empty, will pick up all + // columns + if (columnFamily == null || !match(columnFamily, cfPattern)) { + continue; + } } - } - } - - @Override - public Connection getConnection() throws SQLException { - return connection; - } - - @Override - public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, - String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public int getDatabaseMajorVersion() throws SQLException { - return MetaDataProtocol.PHOENIX_MAJOR_VERSION; - } - - @Override - public int getDatabaseMinorVersion() throws SQLException { - return MetaDataProtocol.PHOENIX_MINOR_VERSION; - } - - @Override - public String getDatabaseProductName() throws SQLException { - return "Phoenix"; - } - - @Override - public String getDatabaseProductVersion() throws SQLException { - return Integer.toString(getDatabaseMajorVersion()) + "." + Integer.toString(getDatabaseMinorVersion()); - } - - @Override - public int getDefaultTransactionIsolation() throws SQLException { - return connection.getTransactionIsolation(); - } - - @Override - public int getDriverMajorVersion() { - return Integer.parseInt(connection.getClientInfo(PhoenixEmbeddedDriver.MAJOR_VERSION_PROP)); - } - - @Override - public int getDriverMinorVersion() { - return Integer.parseInt(connection.getClientInfo(PhoenixEmbeddedDriver.MINOR_VERSION_PROP)); - } - - @Override - public String getDriverName() throws SQLException { - return connection.getClientInfo(PhoenixEmbeddedDriver.DRIVER_NAME_PROP); - } - - @Override - public String getDriverVersion() throws SQLException { - return connection.getClientInfo(PhoenixEmbeddedDriver.MAJOR_VERSION_PROP) + "." + connection.getClientInfo(PhoenixEmbeddedDriver.MINOR_VERSION_PROP); - } - - @Override - public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public String getExtraNameCharacters() throws SQLException { - return ""; - } - - @Override - public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, - String columnNamePattern) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public String getIdentifierQuoteString() throws SQLException { - return SchemaUtil.ESCAPE_CHARACTER; - } - - @Override - public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, - boolean approximate) throws SQLException { - PreparedStatement stmt = QueryUtil.getIndexInfoStmt(connection, catalog, schema, table, - unique, approximate); - if (stmt == null) { - return getEmptyResultSet(); - } - stmt.closeOnCompletion(); - return stmt.executeQuery(); - } - - @Override - public int getJDBCMajorVersion() throws SQLException { - return 1; - } - - @Override - public int getJDBCMinorVersion() throws SQLException { - return 0; - } - - @Override - public int getMaxBinaryLiteralLength() throws SQLException { - return 0; - } - - @Override - public int getMaxCatalogNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxCharLiteralLength() throws SQLException { - return 4000; - } - - @Override - public int getMaxColumnNameLength() throws SQLException { - return 200; - } - - @Override - public int getMaxColumnsInGroupBy() throws SQLException { - return 1; - } - - @Override - public int getMaxColumnsInIndex() throws SQLException { - return 0; - } - - @Override - public int getMaxColumnsInOrderBy() throws SQLException { - return 0; - } - - @Override - public int getMaxColumnsInSelect() throws SQLException { - return 0; - } - - @Override - public int getMaxColumnsInTable() throws SQLException { - return 0; - } - - @Override - public int getMaxConnections() throws SQLException { - return 0; - } - - @Override - public int getMaxCursorNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxIndexLength() throws SQLException { - return 0; - } - - @Override - public int getMaxProcedureNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxRowSize() throws SQLException { - return 0; - } - - @Override - public int getMaxSchemaNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxStatementLength() throws SQLException { - return 0; - } - - @Override - public int getMaxStatements() throws SQLException { - return 0; - } - - @Override - public int getMaxTableNameLength() throws SQLException { - return 0; - } - - @Override - public int getMaxTablesInSelect() throws SQLException { - return 1; - } - - @Override - public int getMaxUserNameLength() throws SQLException { - return 0; - } - - @Override - public String getNumericFunctions() throws SQLException { - return ""; - } - - @Override - public ResultSet getPrimaryKeys(String catalog, String schemaName, String tableName) - throws SQLException { - if (tableName == null || tableName.length() == 0) { - return getEmptyResultSet(); - } - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - PTable table = connection.getTableNoCache(fullTableName); - boolean isSalted = table.getBucketNum() != null; - boolean tenantColSkipped = false; - List pkColumns = table.getPKColumns(); - List sorderPkColumns = - Lists.newArrayList(pkColumns.subList(isSalted ? 1 : 0, pkColumns.size())); - // sort the columns by name - Collections.sort(sorderPkColumns, new Comparator(){ - @Override public int compare(PColumn c1, PColumn c2) { - return c1.getName().getString().compareTo(c2.getName().getString()); + if (colPattern != null && colPattern.length() > 0) { + if (!match(columnName, colPattern)) { + continue; + } } - }); - - try { - List tuples = Lists.newArrayListWithExpectedSize(10); - try (ResultSet rs = getTables(catalog, schemaName, tableName, null)) { - while (rs.next()) { - String tenantId = rs.getString(TABLE_CAT); - for (PColumn column : sorderPkColumns) { - String columnName = column.getName().getString(); - // generate row key - // TENANT_ID, TABLE_SCHEM, TABLE_NAME , COLUMN_NAME are row key columns - byte[] rowKey = - SchemaUtil.getColumnKey(tenantId, schemaName, tableName, columnName, null); - - // add one cell for each column info - List cells = Lists.newArrayListWithCapacity(8); - // KEY_SEQ_COLUMN - byte[] keySeqBytes = ByteUtil.EMPTY_BYTE_ARRAY; - int pkPos = pkColumns.indexOf(column); - if (pkPos != -1) { - short keySeq = - (short) (pkPos + 1 - (isSalted ? 1 : 0) - (tenantColSkipped ? 1 : 0)); - keySeqBytes = PSmallint.INSTANCE.toBytes(keySeq); - } - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, KEY_SEQ_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, keySeqBytes)); - // PK_NAME - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, PK_NAME_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, table.getPKName() != null - ? table.getPKName().getBytes() : ByteUtil.EMPTY_BYTE_ARRAY)); - // ASC_OR_DESC - char sortOrder = column.getSortOrder() == SortOrder.ASC ? 'A' : 'D'; - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - ASC_OR_DESC_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, - Bytes.toBytes(sortOrder))); - // DATA_TYPE - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PInteger.INSTANCE.toBytes(column.getDataType().getResultSetSqlType()))); - // TYPE_NAME - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(TYPE_NAME), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - column.getDataType().getSqlTypeNameBytes())); - // COLUMN_SIZE - cells.add( - PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - column.getMaxLength() != null - ? PInteger.INSTANCE.toBytes(column.getMaxLength()) - : ByteUtil.EMPTY_BYTE_ARRAY)); - // TYPE_ID - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, - Bytes.toBytes(TYPE_ID), MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PInteger.INSTANCE.toBytes(column.getDataType().getSqlType()))); - // VIEW_CONSTANT - cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, column.getViewConstant() != null - ? column.getViewConstant() : ByteUtil.EMPTY_BYTE_ARRAY)); - Collections.sort(cells, new CellComparatorImpl()); - Tuple tuple = new MultiKeyValueTuple(cells); - tuples.add(tuple); - } + // generate row key + // TENANT_ID, TABLE_SCHEM, TABLE_NAME , COLUMN_NAME are row key columns + byte[] rowKey = + SchemaUtil.getColumnKey(tenantId, schemaName, tableName, columnName, null); + + // add one cell for each column info + List cells = Lists.newArrayListWithCapacity(25); + // DATA_TYPE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, + PInteger.INSTANCE.toBytes(column.getDataType().getResultSetSqlType()))); + // TYPE_NAME + cells.add( + PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, Bytes.toBytes(TYPE_NAME), + MetaDataProtocol.MIN_TABLE_TIMESTAMP, column.getDataType().getSqlTypeNameBytes())); + // COLUMN_SIZE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, + column.getMaxLength() != null + ? PInteger.INSTANCE.toBytes(column.getMaxLength()) + : ByteUtil.EMPTY_BYTE_ARRAY)); + // BUFFER_LENGTH + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(BUFFER_LENGTH), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // DECIMAL_DIGITS + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + DECIMAL_DIGITS_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, + column.getScale() != null + ? PInteger.INSTANCE.toBytes(column.getScale()) + : ByteUtil.EMPTY_BYTE_ARRAY)); + // NUM_PREC_RADIX + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(NUM_PREC_RADIX), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // NULLABLE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, NULLABLE_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, + PInteger.INSTANCE.toBytes(SchemaUtil.getIsNullableInt(column.isNullable())))); + // REMARKS + cells.add( + PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, Bytes.toBytes(REMARKS), + MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY)); + // COLUMN_DEF + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(COLUMN_DEF), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + PVarchar.INSTANCE.toBytes(column.getExpressionStr()))); + // SQL_DATA_TYPE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(SQL_DATA_TYPE), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // SQL_DATETIME_SUB + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(SQL_DATETIME_SUB), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // CHAR_OCTET_LENGTH + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(CHAR_OCTET_LENGTH), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // ORDINAL_POSITION + int ordinal = column.getPosition() + (isSalted ? 0 : 1) - (tenantColSkipped ? 1 : 0); + cells.add( + PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, PInteger.INSTANCE.toBytes(ordinal))); + String isNullable = + column.isNullable() ? Boolean.TRUE.toString() : Boolean.FALSE.toString(); + // IS_NULLABLE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(IS_NULLABLE), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + PVarchar.INSTANCE.toBytes(isNullable))); + // SCOPE_CATALOG + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(SCOPE_CATALOG), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // SCOPE_SCHEMA + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(SCOPE_SCHEMA), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // SCOPE_TABLE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(SCOPE_TABLE), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // SOURCE_DATA_TYPE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(SOURCE_DATA_TYPE), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // IS_AUTOINCREMENT + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(IS_AUTOINCREMENT), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + ByteUtil.EMPTY_BYTE_ARRAY)); + // ARRAY_SIZE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, + column.getArraySize() != null + ? PInteger.INSTANCE.toBytes(column.getArraySize()) + : ByteUtil.EMPTY_BYTE_ARRAY)); + // COLUMN_FAMILY + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + COLUMN_FAMILY_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, + column.getFamilyName() != null + ? column.getFamilyName().getBytes() + : ByteUtil.EMPTY_BYTE_ARRAY)); + // TYPE_ID + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(TYPE_ID), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + PInteger.INSTANCE.toBytes(column.getDataType().getSqlType()))); + // VIEW_CONSTANT + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + VIEW_CONSTANT_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, + column.getViewConstant() != null + ? column.getViewConstant() + : ByteUtil.EMPTY_BYTE_ARRAY)); + // MULTI_TENANT + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + MULTI_TENANT_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, + PBoolean.INSTANCE.toBytes(table.isMultiTenant()))); + // KEY_SEQ_COLUMN + byte[] keySeqBytes = ByteUtil.EMPTY_BYTE_ARRAY; + int pkPos = table.getPKColumns().indexOf(column); + if (pkPos != -1) { + short keySeq = (short) (pkPos + 1 - startOffset - (tenantColSkipped ? 1 : 0)); + keySeqBytes = PSmallint.INSTANCE.toBytes(keySeq); } + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, KEY_SEQ_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, keySeqBytes)); + Collections.sort(cells, new CellComparatorImpl()); + Tuple tuple = new MultiKeyValueTuple(cells); + tuples.add(tuple); + } } - - PhoenixStatement stmt = new PhoenixStatement(connection); - stmt.closeOnCompletion(); - return new PhoenixResultSet(new MaterializedResultIterator(tuples), - GET_PRIMARY_KEYS_ROW_PROJECTOR, - new StatementContext(stmt, false)); - } finally { - if (connection.getAutoCommit()) { - connection.commit(); + } + + PhoenixStatement stmt = new PhoenixStatement(connection); + stmt.closeOnCompletion(); + return new PhoenixResultSet(new MaterializedResultIterator(tuples), GET_COLUMNS_ROW_PROJECTOR, + new StatementContext(stmt, false)); + } finally { + if (connection.getAutoCommit()) { + connection.commit(); + } + } + } + + @Override + public Connection getConnection() throws SQLException { + return connection; + } + + @Override + public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, + String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { + return getEmptyResultSet(); + } + + @Override + public int getDatabaseMajorVersion() throws SQLException { + return MetaDataProtocol.PHOENIX_MAJOR_VERSION; + } + + @Override + public int getDatabaseMinorVersion() throws SQLException { + return MetaDataProtocol.PHOENIX_MINOR_VERSION; + } + + @Override + public String getDatabaseProductName() throws SQLException { + return "Phoenix"; + } + + @Override + public String getDatabaseProductVersion() throws SQLException { + return Integer.toString(getDatabaseMajorVersion()) + "." + + Integer.toString(getDatabaseMinorVersion()); + } + + @Override + public int getDefaultTransactionIsolation() throws SQLException { + return connection.getTransactionIsolation(); + } + + @Override + public int getDriverMajorVersion() { + return Integer.parseInt(connection.getClientInfo(PhoenixEmbeddedDriver.MAJOR_VERSION_PROP)); + } + + @Override + public int getDriverMinorVersion() { + return Integer.parseInt(connection.getClientInfo(PhoenixEmbeddedDriver.MINOR_VERSION_PROP)); + } + + @Override + public String getDriverName() throws SQLException { + return connection.getClientInfo(PhoenixEmbeddedDriver.DRIVER_NAME_PROP); + } + + @Override + public String getDriverVersion() throws SQLException { + return connection.getClientInfo(PhoenixEmbeddedDriver.MAJOR_VERSION_PROP) + "." + + connection.getClientInfo(PhoenixEmbeddedDriver.MINOR_VERSION_PROP); + } + + @Override + public ResultSet getExportedKeys(String catalog, String schema, String table) + throws SQLException { + return getEmptyResultSet(); + } + + @Override + public String getExtraNameCharacters() throws SQLException { + return ""; + } + + @Override + public ResultSet getFunctionColumns(String catalog, String schemaPattern, + String functionNamePattern, String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + @Override + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + @Override + public String getIdentifierQuoteString() throws SQLException { + return SchemaUtil.ESCAPE_CHARACTER; + } + + @Override + public ResultSet getImportedKeys(String catalog, String schema, String table) + throws SQLException { + return getEmptyResultSet(); + } + + @Override + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, + boolean approximate) throws SQLException { + PreparedStatement stmt = + QueryUtil.getIndexInfoStmt(connection, catalog, schema, table, unique, approximate); + if (stmt == null) { + return getEmptyResultSet(); + } + stmt.closeOnCompletion(); + return stmt.executeQuery(); + } + + @Override + public int getJDBCMajorVersion() throws SQLException { + return 1; + } + + @Override + public int getJDBCMinorVersion() throws SQLException { + return 0; + } + + @Override + public int getMaxBinaryLiteralLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCatalogNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCharLiteralLength() throws SQLException { + return 4000; + } + + @Override + public int getMaxColumnNameLength() throws SQLException { + return 200; + } + + @Override + public int getMaxColumnsInGroupBy() throws SQLException { + return 1; + } + + @Override + public int getMaxColumnsInIndex() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInOrderBy() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInSelect() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInTable() throws SQLException { + return 0; + } + + @Override + public int getMaxConnections() throws SQLException { + return 0; + } + + @Override + public int getMaxCursorNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxIndexLength() throws SQLException { + return 0; + } + + @Override + public int getMaxProcedureNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxRowSize() throws SQLException { + return 0; + } + + @Override + public int getMaxSchemaNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxStatementLength() throws SQLException { + return 0; + } + + @Override + public int getMaxStatements() throws SQLException { + return 0; + } + + @Override + public int getMaxTableNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxTablesInSelect() throws SQLException { + return 1; + } + + @Override + public int getMaxUserNameLength() throws SQLException { + return 0; + } + + @Override + public String getNumericFunctions() throws SQLException { + return ""; + } + + @Override + public ResultSet getPrimaryKeys(String catalog, String schemaName, String tableName) + throws SQLException { + if (tableName == null || tableName.length() == 0) { + return getEmptyResultSet(); + } + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + PTable table = connection.getTableNoCache(fullTableName); + boolean isSalted = table.getBucketNum() != null; + boolean tenantColSkipped = false; + List pkColumns = table.getPKColumns(); + List sorderPkColumns = + Lists.newArrayList(pkColumns.subList(isSalted ? 1 : 0, pkColumns.size())); + // sort the columns by name + Collections.sort(sorderPkColumns, new Comparator() { + @Override + public int compare(PColumn c1, PColumn c2) { + return c1.getName().getString().compareTo(c2.getName().getString()); + } + }); + + try { + List tuples = Lists.newArrayListWithExpectedSize(10); + try (ResultSet rs = getTables(catalog, schemaName, tableName, null)) { + while (rs.next()) { + String tenantId = rs.getString(TABLE_CAT); + for (PColumn column : sorderPkColumns) { + String columnName = column.getName().getString(); + // generate row key + // TENANT_ID, TABLE_SCHEM, TABLE_NAME , COLUMN_NAME are row key columns + byte[] rowKey = + SchemaUtil.getColumnKey(tenantId, schemaName, tableName, columnName, null); + + // add one cell for each column info + List cells = Lists.newArrayListWithCapacity(8); + // KEY_SEQ_COLUMN + byte[] keySeqBytes = ByteUtil.EMPTY_BYTE_ARRAY; + int pkPos = pkColumns.indexOf(column); + if (pkPos != -1) { + short keySeq = (short) (pkPos + 1 - (isSalted ? 1 : 0) - (tenantColSkipped ? 1 : 0)); + keySeqBytes = PSmallint.INSTANCE.toBytes(keySeq); } + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, KEY_SEQ_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, keySeqBytes)); + // PK_NAME + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, PK_NAME_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, + table.getPKName() != null + ? table.getPKName().getBytes() + : ByteUtil.EMPTY_BYTE_ARRAY)); + // ASC_OR_DESC + char sortOrder = column.getSortOrder() == SortOrder.ASC ? 'A' : 'D'; + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, ASC_OR_DESC_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, Bytes.toBytes(sortOrder))); + // DATA_TYPE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, + PInteger.INSTANCE.toBytes(column.getDataType().getResultSetSqlType()))); + // TYPE_NAME + cells.add( + PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, Bytes.toBytes(TYPE_NAME), + MetaDataProtocol.MIN_TABLE_TIMESTAMP, column.getDataType().getSqlTypeNameBytes())); + // COLUMN_SIZE + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, + column.getMaxLength() != null + ? PInteger.INSTANCE.toBytes(column.getMaxLength()) + : ByteUtil.EMPTY_BYTE_ARRAY)); + // TYPE_ID + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + Bytes.toBytes(TYPE_ID), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + PInteger.INSTANCE.toBytes(column.getDataType().getSqlType()))); + // VIEW_CONSTANT + cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, + VIEW_CONSTANT_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, + column.getViewConstant() != null + ? column.getViewConstant() + : ByteUtil.EMPTY_BYTE_ARRAY)); + Collections.sort(cells, new CellComparatorImpl()); + Tuple tuple = new MultiKeyValueTuple(cells); + tuples.add(tuple); + } } - } - - @Override - public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, - String columnNamePattern) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public String getProcedureTerm() throws SQLException { - return "procedure"; - } - - @Override - public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) - throws SQLException { - return getEmptyResultSet(); - } - - @Override - public int getResultSetHoldability() throws SQLException { - return connection.getHoldability(); - } - - @Override - public RowIdLifetime getRowIdLifetime() throws SQLException { - return RowIdLifetime.ROWID_UNSUPPORTED; - } - - @Override - public String getSQLKeywords() throws SQLException { - return ""; - } - - @Override - public int getSQLStateType() throws SQLException { - return DatabaseMetaData.sqlStateSQL99; - } - - @Override - public String getSchemaTerm() throws SQLException { - return "schema"; - } - - @Override - public ResultSet getSchemas() throws SQLException { - return getSchemas("", null); - } - - @Override - public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { - PreparedStatement stmt = QueryUtil.getSchemasStmt(connection, catalog, schemaPattern); - stmt.closeOnCompletion(); - return stmt.executeQuery(); - } - - @Override - public String getSearchStringEscape() throws SQLException { - return "\\"; - } - - @Override - public String getStringFunctions() throws SQLException { - return ""; - } - - @Override - // TODO does this need to change to use the PARENT_TABLE link - public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) - throws SQLException { - PreparedStatement stmt = QueryUtil.getSuperTablesStmt(connection, catalog, schemaPattern, - tableNamePattern); - stmt.closeOnCompletion(); - return stmt.executeQuery(); - } - - @Override - public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public String getSystemFunctions() throws SQLException { - return ""; - } - - @Override - public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) - throws SQLException { - return getEmptyResultSet(); - } - - private static final PDatum TABLE_TYPE_DATUM = new PDatum() { - @Override - public boolean isNullable() { - return true; - } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } - @Override - public Integer getMaxLength() { - return null; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }; - - private static final RowProjector TABLE_TYPE_ROW_PROJECTOR = new RowProjector(Arrays.asList( - new ExpressionProjector(TABLE_TYPE, TABLE_TYPE, SYSTEM_CATALOG, - new RowKeyColumnExpression(TABLE_TYPE_DATUM, - new RowKeyValueAccessor(Collections.singletonList(TABLE_TYPE_DATUM), 0)), false) - ), 0, true); - private static final Collection TABLE_TYPE_TUPLES = Lists.newArrayListWithExpectedSize(PTableType.values().length); - static { - List tableTypes = Lists.newArrayList( - PTableType.INDEX.getValue().getBytes(), - Bytes.toBytes(SEQUENCE_TABLE_TYPE), - PTableType.SYSTEM.getValue().getBytes(), - PTableType.TABLE.getValue().getBytes(), - PTableType.VIEW.getValue().getBytes()); - for (byte[] tableType : tableTypes) { - TABLE_TYPE_TUPLES.add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(tableType, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY))); - } - } - - /** - * Supported table types include: INDEX, SEQUENCE, SYSTEM TABLE, TABLE, VIEW - */ - @Override - public ResultSet getTableTypes() throws SQLException { - PhoenixStatement stmt = new PhoenixStatement(connection); - stmt.closeOnCompletion(); - return new PhoenixResultSet(new MaterializedResultIterator(TABLE_TYPE_TUPLES), TABLE_TYPE_ROW_PROJECTOR, new StatementContext(stmt, false)); - } - - @Override - public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, - String[] types) throws SQLException { - PreparedStatement stmt = QueryUtil.getTablesStmt(connection, catalog, schemaPattern, - tableNamePattern, types); - if (stmt == null) { - return getEmptyResultSet(); - } - stmt.closeOnCompletion(); - return stmt.executeQuery(); - } + } + + PhoenixStatement stmt = new PhoenixStatement(connection); + stmt.closeOnCompletion(); + return new PhoenixResultSet(new MaterializedResultIterator(tuples), + GET_PRIMARY_KEYS_ROW_PROJECTOR, new StatementContext(stmt, false)); + } finally { + if (connection.getAutoCommit()) { + connection.commit(); + } + } + } + + @Override + public ResultSet getProcedureColumns(String catalog, String schemaPattern, + String procedureNamePattern, String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + @Override + public String getProcedureTerm() throws SQLException { + return "procedure"; + } + + @Override + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) + throws SQLException { + return getEmptyResultSet(); + } - @Override - public String getTimeDateFunctions() throws SQLException { - return ""; - } + @Override + public int getResultSetHoldability() throws SQLException { + return connection.getHoldability(); + } - @Override - public ResultSet getTypeInfo() throws SQLException { - return getEmptyResultSet(); - } - - @Override - public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) - throws SQLException { - return getEmptyResultSet(); - } - - @Override - public String getURL() throws SQLException { - return connection.getURL(); - } - - @Override - public String getUserName() throws SQLException { - String userName = connection.getQueryServices().getUserName(); - return userName == null ? StringUtil.EMPTY_STRING : userName; - } - - @Override - public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public boolean insertsAreDetected(int type) throws SQLException { - return false; - } - - @Override - public boolean isCatalogAtStart() throws SQLException { - return false; - } - - @Override - public boolean isReadOnly() throws SQLException { - return false; - } - - @Override - public boolean locatorsUpdateCopy() throws SQLException { - return false; - } - - @Override - public boolean nullPlusNonNullIsNull() throws SQLException { - return true; - } - - @Override - public boolean nullsAreSortedAtEnd() throws SQLException { - return false; - } - - @Override - public boolean nullsAreSortedAtStart() throws SQLException { - return true; - } - - @Override - public boolean nullsAreSortedHigh() throws SQLException { - return false; - } - - @Override - public boolean nullsAreSortedLow() throws SQLException { - return true; - } - - @Override - public boolean othersDeletesAreVisible(int type) throws SQLException { - return false; - } - - @Override - public boolean othersInsertsAreVisible(int type) throws SQLException { - return false; - } - - @Override - public boolean othersUpdatesAreVisible(int type) throws SQLException { - return false; - } - - @Override - public boolean ownDeletesAreVisible(int type) throws SQLException { - return true; - } - - @Override - public boolean ownInsertsAreVisible(int type) throws SQLException { - return true; - } - - @Override - public boolean ownUpdatesAreVisible(int type) throws SQLException { - return true; - } - - @Override - public boolean storesLowerCaseIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean storesMixedCaseIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { - return true; - } - - @Override - public boolean storesUpperCaseIdentifiers() throws SQLException { - return true; - } - - @Override - public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { - return true; - } - - @Override - public boolean supportsANSI92EntryLevelSQL() throws SQLException { - return false; - } - - @Override - public boolean supportsANSI92FullSQL() throws SQLException { - return false; - } - - @Override - public boolean supportsANSI92IntermediateSQL() throws SQLException { - return false; - } - - @Override - public boolean supportsAlterTableWithAddColumn() throws SQLException { - return true; - } - - @Override - public boolean supportsAlterTableWithDropColumn() throws SQLException { - return true; - } - - @Override - public boolean supportsBatchUpdates() throws SQLException { - return true; - } - - @Override - public boolean supportsCatalogsInDataManipulation() throws SQLException { - return false; - } - - @Override - public boolean supportsCatalogsInIndexDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsCatalogsInProcedureCalls() throws SQLException { - return false; - } - - @Override - public boolean supportsCatalogsInTableDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsColumnAliasing() throws SQLException { - return true; - } - - @Override - public boolean supportsConvert() throws SQLException { - return true; - } - - @Override - public boolean supportsConvert(int fromType, int toType) throws SQLException { - // TODO - return false; - } - - @Override - public boolean supportsCoreSQLGrammar() throws SQLException { - return false; - } - - @Override - public boolean supportsCorrelatedSubqueries() throws SQLException { - return false; - } - - @Override - public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { - return true; - } - - @Override - public boolean supportsDataManipulationTransactionsOnly() throws SQLException { - return false; - } - - @Override - public boolean supportsDifferentTableCorrelationNames() throws SQLException { - return false; - } - - @Override - public boolean supportsExpressionsInOrderBy() throws SQLException { - return true; - } - - @Override - public boolean supportsExtendedSQLGrammar() throws SQLException { - return false; - } - - @Override - public boolean supportsFullOuterJoins() throws SQLException { - return false; - } - - @Override - public boolean supportsGetGeneratedKeys() throws SQLException { - return false; - } - - @Override - public boolean supportsGroupBy() throws SQLException { - return true; - } - - @Override - public boolean supportsGroupByBeyondSelect() throws SQLException { - return false; - } - - @Override - public boolean supportsGroupByUnrelated() throws SQLException { - return false; - } - - @Override - public boolean supportsIntegrityEnhancementFacility() throws SQLException { - return false; - } - - @Override - public boolean supportsLikeEscapeClause() throws SQLException { - return true; - } - - @Override - public boolean supportsLimitedOuterJoins() throws SQLException { - return false; - } - - @Override - public boolean supportsMinimumSQLGrammar() throws SQLException { - return false; - } - - @Override - public boolean supportsMixedCaseIdentifiers() throws SQLException { - return false; - } - - @Override - public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { - return true; - } - - @Override - public boolean supportsMultipleOpenResults() throws SQLException { - return true; - } - - @Override - public boolean supportsMultipleResultSets() throws SQLException { - return true; - } - - @Override - public boolean supportsMultipleTransactions() throws SQLException { - return true; - } - - @Override - public boolean supportsNamedParameters() throws SQLException { - return false; - } - - @Override - public boolean supportsNonNullableColumns() throws SQLException { - return true; - } - - @Override - public boolean supportsOpenCursorsAcrossCommit() throws SQLException { - return false; - } - - @Override - public boolean supportsOpenCursorsAcrossRollback() throws SQLException { - return false; - } - - @Override - public boolean supportsOpenStatementsAcrossCommit() throws SQLException { - return false; - } - - @Override - public boolean supportsOpenStatementsAcrossRollback() throws SQLException { - return false; - } - - @Override - public boolean supportsOrderByUnrelated() throws SQLException { - return false; - } - - @Override - public boolean supportsOuterJoins() throws SQLException { - return true; - } - - @Override - public boolean supportsPositionedDelete() throws SQLException { - return false; - } - - @Override - public boolean supportsPositionedUpdate() throws SQLException { - return false; - } - - @Override - public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { - // TODO: review - return type == ResultSet.TYPE_FORWARD_ONLY && concurrency == Connection.TRANSACTION_READ_COMMITTED; - } - - @Override - public boolean supportsResultSetHoldability(int holdability) throws SQLException { - // TODO - return holdability == connection.getHoldability(); - } - - @Override - public boolean supportsResultSetType(int type) throws SQLException { - return type == ResultSet.TYPE_FORWARD_ONLY; - } - - @Override - public boolean supportsSavepoints() throws SQLException { - return false; - } - - @Override - public boolean supportsSchemasInDataManipulation() throws SQLException { - return true; - } - - @Override - public boolean supportsSchemasInIndexDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsSchemasInProcedureCalls() throws SQLException { - return false; - } - - @Override - public boolean supportsSchemasInTableDefinitions() throws SQLException { - return false; - } - - @Override - public boolean supportsSelectForUpdate() throws SQLException { - return false; - } - - @Override - public boolean supportsStatementPooling() throws SQLException { - return false; - } - - @Override - public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { - return false; - } - - @Override - public boolean supportsStoredProcedures() throws SQLException { - return false; - } - - @Override - public boolean supportsSubqueriesInComparisons() throws SQLException { - return false; - } - - @Override - public boolean supportsSubqueriesInExists() throws SQLException { - return false; - } - - @Override - public boolean supportsSubqueriesInIns() throws SQLException { - return false; - } - - @Override - public boolean supportsSubqueriesInQuantifieds() throws SQLException { - return false; - } - - @Override - public boolean supportsTableCorrelationNames() throws SQLException { - return false; - } - - @Override - public boolean supportsTransactionIsolationLevel(int level) throws SQLException { - return true; - } - - @Override - public boolean supportsTransactions() throws SQLException { - return true; - } - - @Override - public boolean supportsUnion() throws SQLException { - return false; - } - - @Override - public boolean supportsUnionAll() throws SQLException { - return false; - } - - @Override - public boolean updatesAreDetected(int type) throws SQLException { - return false; - } - - @Override - public boolean usesLocalFilePerTable() throws SQLException { - return false; - } - - @Override - public boolean usesLocalFiles() throws SQLException { - return false; - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } - - @SuppressWarnings("unchecked") - @Override - public T unwrap(Class iface) throws SQLException { - if (!iface.isInstance(this)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) - .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()) - .build().buildException(); - } - return (T)this; - } - - @Override - public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, - String columnNamePattern) throws SQLException { - return getEmptyResultSet(); - } - - @Override - public boolean generatedKeyAlwaysReturned() throws SQLException { - return false; - } - - - private void setParameters(PreparedStatement stmt, List parameterValues) - throws SQLException { - for (int i = 0; i < parameterValues.size(); i++) { - stmt.setString(i+1, parameterValues.get(i)); - } - } + @Override + public RowIdLifetime getRowIdLifetime() throws SQLException { + return RowIdLifetime.ROWID_UNSUPPORTED; + } + + @Override + public String getSQLKeywords() throws SQLException { + return ""; + } + + @Override + public int getSQLStateType() throws SQLException { + return DatabaseMetaData.sqlStateSQL99; + } + + @Override + public String getSchemaTerm() throws SQLException { + return "schema"; + } + + @Override + public ResultSet getSchemas() throws SQLException { + return getSchemas("", null); + } + + @Override + public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { + PreparedStatement stmt = QueryUtil.getSchemasStmt(connection, catalog, schemaPattern); + stmt.closeOnCompletion(); + return stmt.executeQuery(); + } + + @Override + public String getSearchStringEscape() throws SQLException { + return "\\"; + } + + @Override + public String getStringFunctions() throws SQLException { + return ""; + } + + @Override + // TODO does this need to change to use the PARENT_TABLE link + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) + throws SQLException { + PreparedStatement stmt = + QueryUtil.getSuperTablesStmt(connection, catalog, schemaPattern, tableNamePattern); + stmt.closeOnCompletion(); + return stmt.executeQuery(); + } + + @Override + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + @Override + public String getSystemFunctions() throws SQLException { + return ""; + } + + @Override + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + private static final PDatum TABLE_TYPE_DATUM = new PDatum() { + @Override + public boolean isNullable() { + return true; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }; + + private static final RowProjector TABLE_TYPE_ROW_PROJECTOR = new RowProjector( + Arrays. asList(new ExpressionProjector(TABLE_TYPE, TABLE_TYPE, SYSTEM_CATALOG, + new RowKeyColumnExpression(TABLE_TYPE_DATUM, + new RowKeyValueAccessor(Collections. singletonList(TABLE_TYPE_DATUM), 0)), + false)), + 0, true); + private static final Collection TABLE_TYPE_TUPLES = + Lists.newArrayListWithExpectedSize(PTableType.values().length); + static { + List tableTypes = Lists. newArrayList(PTableType.INDEX.getValue().getBytes(), + Bytes.toBytes(SEQUENCE_TABLE_TYPE), PTableType.SYSTEM.getValue().getBytes(), + PTableType.TABLE.getValue().getBytes(), PTableType.VIEW.getValue().getBytes()); + for (byte[] tableType : tableTypes) { + TABLE_TYPE_TUPLES + .add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(tableType, TABLE_FAMILY_BYTES, + TABLE_TYPE_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY))); + } + } + + /** + * Supported table types include: INDEX, SEQUENCE, SYSTEM TABLE, TABLE, VIEW + */ + @Override + public ResultSet getTableTypes() throws SQLException { + PhoenixStatement stmt = new PhoenixStatement(connection); + stmt.closeOnCompletion(); + return new PhoenixResultSet(new MaterializedResultIterator(TABLE_TYPE_TUPLES), + TABLE_TYPE_ROW_PROJECTOR, new StatementContext(stmt, false)); + } + + @Override + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, + String[] types) throws SQLException { + PreparedStatement stmt = + QueryUtil.getTablesStmt(connection, catalog, schemaPattern, tableNamePattern, types); + if (stmt == null) { + return getEmptyResultSet(); + } + stmt.closeOnCompletion(); + return stmt.executeQuery(); + } + + @Override + public String getTimeDateFunctions() throws SQLException { + return ""; + } + + @Override + public ResultSet getTypeInfo() throws SQLException { + return getEmptyResultSet(); + } + + @Override + public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, + int[] types) throws SQLException { + return getEmptyResultSet(); + } + + @Override + public String getURL() throws SQLException { + return connection.getURL(); + } + + @Override + public String getUserName() throws SQLException { + String userName = connection.getQueryServices().getUserName(); + return userName == null ? StringUtil.EMPTY_STRING : userName; + } + + @Override + public ResultSet getVersionColumns(String catalog, String schema, String table) + throws SQLException { + return getEmptyResultSet(); + } + + @Override + public boolean insertsAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean isCatalogAtStart() throws SQLException { + return false; + } + + @Override + public boolean isReadOnly() throws SQLException { + return false; + } + + @Override + public boolean locatorsUpdateCopy() throws SQLException { + return false; + } + + @Override + public boolean nullPlusNonNullIsNull() throws SQLException { + return true; + } + + @Override + public boolean nullsAreSortedAtEnd() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtStart() throws SQLException { + return true; + } + + @Override + public boolean nullsAreSortedHigh() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedLow() throws SQLException { + return true; + } + + @Override + public boolean othersDeletesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean ownDeletesAreVisible(int type) throws SQLException { + return true; + } + + @Override + public boolean ownInsertsAreVisible(int type) throws SQLException { + return true; + } + + @Override + public boolean ownUpdatesAreVisible(int type) throws SQLException { + return true; + } + + @Override + public boolean storesLowerCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesUpperCaseIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean supportsANSI92EntryLevelSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92FullSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92IntermediateSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsAlterTableWithAddColumn() throws SQLException { + return true; + } + + @Override + public boolean supportsAlterTableWithDropColumn() throws SQLException { + return true; + } + + @Override + public boolean supportsBatchUpdates() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInDataManipulation() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInIndexDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInProcedureCalls() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInTableDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsColumnAliasing() throws SQLException { + return true; + } + + @Override + public boolean supportsConvert() throws SQLException { + return true; + } + + @Override + public boolean supportsConvert(int fromType, int toType) throws SQLException { + // TODO + return false; + } + + @Override + public boolean supportsCoreSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsCorrelatedSubqueries() throws SQLException { + return false; + } + + @Override + public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + return true; + } + + @Override + public boolean supportsDataManipulationTransactionsOnly() throws SQLException { + return false; + } + + @Override + public boolean supportsDifferentTableCorrelationNames() throws SQLException { + return false; + } + + @Override + public boolean supportsExpressionsInOrderBy() throws SQLException { + return true; + } + + @Override + public boolean supportsExtendedSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsFullOuterJoins() throws SQLException { + return false; + } + + @Override + public boolean supportsGetGeneratedKeys() throws SQLException { + return false; + } + + @Override + public boolean supportsGroupBy() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupByBeyondSelect() throws SQLException { + return false; + } + + @Override + public boolean supportsGroupByUnrelated() throws SQLException { + return false; + } + + @Override + public boolean supportsIntegrityEnhancementFacility() throws SQLException { + return false; + } + + @Override + public boolean supportsLikeEscapeClause() throws SQLException { + return true; + } + + @Override + public boolean supportsLimitedOuterJoins() throws SQLException { + return false; + } + + @Override + public boolean supportsMinimumSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsMixedCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleOpenResults() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleResultSets() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleTransactions() throws SQLException { + return true; + } + + @Override + public boolean supportsNamedParameters() throws SQLException { + return false; + } + + @Override + public boolean supportsNonNullableColumns() throws SQLException { + return true; + } + + @Override + public boolean supportsOpenCursorsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenCursorsAcrossRollback() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossRollback() throws SQLException { + return false; + } + + @Override + public boolean supportsOrderByUnrelated() throws SQLException { + return false; + } + + @Override + public boolean supportsOuterJoins() throws SQLException { + return true; + } + + @Override + public boolean supportsPositionedDelete() throws SQLException { + return false; + } + + @Override + public boolean supportsPositionedUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { + // TODO: review + return type == ResultSet.TYPE_FORWARD_ONLY + && concurrency == Connection.TRANSACTION_READ_COMMITTED; + } + + @Override + public boolean supportsResultSetHoldability(int holdability) throws SQLException { + // TODO + return holdability == connection.getHoldability(); + } + + @Override + public boolean supportsResultSetType(int type) throws SQLException { + return type == ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public boolean supportsSavepoints() throws SQLException { + return false; + } + + @Override + public boolean supportsSchemasInDataManipulation() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInIndexDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsSchemasInProcedureCalls() throws SQLException { + return false; + } + + @Override + public boolean supportsSchemasInTableDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsSelectForUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsStatementPooling() throws SQLException { + return false; + } + + @Override + public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { + return false; + } + + @Override + public boolean supportsStoredProcedures() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInComparisons() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInExists() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInIns() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInQuantifieds() throws SQLException { + return false; + } + + @Override + public boolean supportsTableCorrelationNames() throws SQLException { + return false; + } + + @Override + public boolean supportsTransactionIsolationLevel(int level) throws SQLException { + return true; + } + + @Override + public boolean supportsTransactions() throws SQLException { + return true; + } + + @Override + public boolean supportsUnion() throws SQLException { + return false; + } + + @Override + public boolean supportsUnionAll() throws SQLException { + return false; + } + + @Override + public boolean updatesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean usesLocalFilePerTable() throws SQLException { + return false; + } + + @Override + public boolean usesLocalFiles() throws SQLException { + return false; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (!iface.isInstance(this)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) + .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()).build() + .buildException(); + } + return (T) this; + } + + @Override + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + @Override + public boolean generatedKeyAlwaysReturned() throws SQLException { + return false; + } + + private void setParameters(PreparedStatement stmt, List parameterValues) + throws SQLException { + for (int i = 0; i < parameterValues.size(); i++) { + stmt.setString(i + 1, parameterValues.get(i)); + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java index 8bdc6ea182a..ef7f7d00ecf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,7 +33,6 @@ import javax.annotation.concurrent.GuardedBy; -import org.apache.phoenix.thirdparty.com.google.common.cache.*; import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; @@ -44,352 +43,340 @@ import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesImpl; import org.apache.phoenix.query.QueryServicesOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import org.apache.phoenix.thirdparty.com.google.common.cache.*; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.phoenix.util.PropertiesUtil; - +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * - * JDBC Driver implementation of Phoenix for production. - * To use this driver, specify the following URL: - * {@code jdbc:phoenix:; } - * Only an embedded driver is currently supported (Phoenix client - * runs in the same JVM as the driver). Connections are lightweight - * and are not pooled. The last part of the URL, the hbase zookeeper - * quorum server name, determines the hbase cluster to which queries - * will be routed. - * - * + * JDBC Driver implementation of Phoenix for production. To use this driver, specify the following + * URL: {@code jdbc:phoenix:; } Only an embedded driver is currently + * supported (Phoenix client runs in the same JVM as the driver). Connections are lightweight and + * are not pooled. The last part of the URL, the hbase zookeeper quorum server name, determines the + * hbase cluster to which queries will be routed. * @since 0.1 */ public final class PhoenixDriver extends PhoenixEmbeddedDriver { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixDriver.class); - public static final PhoenixDriver INSTANCE; - private static volatile String driverShutdownMsg; - static { - try { - INSTANCE = new PhoenixDriver(); + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixDriver.class); + public static final PhoenixDriver INSTANCE; + private static volatile String driverShutdownMsg; + static { + try { + INSTANCE = new PhoenixDriver(); + try { + // Add shutdown hook to release any resources that were never closed + // In theory not necessary, but it won't hurt anything + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + final Configuration config = + HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + final ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("PHOENIX-DRIVER-SHUTDOWNHOOK" + "-thread-%s").build(); + final ExecutorService svc = Executors.newSingleThreadExecutor(threadFactory); try { - // Add shutdown hook to release any resources that were never closed - // In theory not necessary, but it won't hurt anything - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - final Configuration config = - HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - final ThreadFactory threadFactory = - new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat("PHOENIX-DRIVER-SHUTDOWNHOOK" + "-thread-%s") - .build(); - final ExecutorService svc = - Executors.newSingleThreadExecutor(threadFactory); - try { - Future future = svc.submit(new Runnable() { - @Override - public void run() { - closeInstance(INSTANCE); - } - }); - // Pull the timeout value (default 5s). - long millisBeforeShutdown = - config.getLong(QueryServices.DRIVER_SHUTDOWN_TIMEOUT_MS, - QueryServicesOptions.DEFAULT_DRIVER_SHUTDOWN_TIMEOUT_MS); + Future future = svc.submit(new Runnable() { + @Override + public void run() { + closeInstance(INSTANCE); + } + }); + // Pull the timeout value (default 5s). + long millisBeforeShutdown = config.getLong(QueryServices.DRIVER_SHUTDOWN_TIMEOUT_MS, + QueryServicesOptions.DEFAULT_DRIVER_SHUTDOWN_TIMEOUT_MS); - // Close with a timeout. If this is running, we know the JVM wants to - // go down. There may be other threads running that are holding the - // lock. We don't want to be blocked on them (for the normal HBase retry - // policy). We don't care about any exceptions, we're going down anyways. - future.get(millisBeforeShutdown, TimeUnit.MILLISECONDS); - } catch (ExecutionException e) { - LOGGER.warn("Failed to close instance", e); - } catch (InterruptedException e) { - LOGGER.warn("Interrupted waiting to close instance", e); - } catch (TimeoutException e) { - LOGGER.warn("Timed out waiting to close instance", e); - } finally { - // We're going down, but try to clean up. - svc.shutdownNow(); - } - } - }); + // Close with a timeout. If this is running, we know the JVM wants to + // go down. There may be other threads running that are holding the + // lock. We don't want to be blocked on them (for the normal HBase retry + // policy). We don't care about any exceptions, we're going down anyways. + future.get(millisBeforeShutdown, TimeUnit.MILLISECONDS); + } catch (ExecutionException e) { + LOGGER.warn("Failed to close instance", e); + } catch (InterruptedException e) { + LOGGER.warn("Interrupted waiting to close instance", e); + } catch (TimeoutException e) { + LOGGER.warn("Timed out waiting to close instance", e); + } finally { + // We're going down, but try to clean up. + svc.shutdownNow(); + } + } + }); - // Only register the driver when we successfully register the shutdown hook - // Don't want to register it if we're already in the process of going down. - DriverManager.registerDriver(INSTANCE); - } catch (IllegalStateException e) { - LOGGER.warn("Failed to register PhoenixDriver shutdown hook as the JVM is already shutting down"); + // Only register the driver when we successfully register the shutdown hook + // Don't want to register it if we're already in the process of going down. + DriverManager.registerDriver(INSTANCE); + } catch (IllegalStateException e) { + LOGGER.warn( + "Failed to register PhoenixDriver shutdown hook as the JVM is already shutting down"); - // Close the instance now because we don't have the shutdown hook - closeInstance(INSTANCE); + // Close the instance now because we don't have the shutdown hook + closeInstance(INSTANCE); - throw e; - } - } catch (SQLException e) { - throw new IllegalStateException("Unable to register " + PhoenixDriver.class.getName() + ": "+ e.getMessage()); - } + throw e; + } + } catch (SQLException e) { + throw new IllegalStateException( + "Unable to register " + PhoenixDriver.class.getName() + ": " + e.getMessage()); } + } - private static void closeInstance(PhoenixDriver instance) { - try { - instance.close(); - } catch (SQLException e) { - LOGGER.warn("Unable to close PhoenixDriver on shutdown", e); - } finally { - driverShutdownMsg = "Phoenix driver closed because server is shutting down"; - } + private static void closeInstance(PhoenixDriver instance) { + try { + instance.close(); + } catch (SQLException e) { + LOGGER.warn("Unable to close PhoenixDriver on shutdown", e); + } finally { + driverShutdownMsg = "Phoenix driver closed because server is shutting down"; } + } - // One entry per cluster here - // TODO that's not true, we can have multiple connections with different configs / principals - private final Cache connectionQueryServicesCache = - initializeConnectionCache(); + // One entry per cluster here + // TODO that's not true, we can have multiple connections with different configs / principals + private final Cache connectionQueryServicesCache = + initializeConnectionCache(); - public PhoenixDriver() { // for Squirrel - // Use production services implementation - super(); - } + public PhoenixDriver() { // for Squirrel + // Use production services implementation + super(); + } - private Cache initializeConnectionCache() { - Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - int maxCacheDuration = config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS, - QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION); - RemovalListener cacheRemovalListener = - new RemovalListener() { - @Override - public void onRemoval(RemovalNotification notification) { - String connInfoIdentifier = notification.getKey().toString(); - LOGGER.debug("Expiring " + connInfoIdentifier + " because of " - + notification.getCause().name()); + private Cache initializeConnectionCache() { + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + int maxCacheDuration = + config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS, + QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION); + RemovalListener cacheRemovalListener = + new RemovalListener() { + @Override + public void + onRemoval(RemovalNotification notification) { + String connInfoIdentifier = notification.getKey().toString(); + LOGGER.debug( + "Expiring " + connInfoIdentifier + " because of " + notification.getCause().name()); - try { - notification.getValue().close(); - } - catch (SQLException se) { - LOGGER.error("Error while closing expired cache connection " + connInfoIdentifier, se); - } - } - }; - return CacheBuilder.newBuilder() - .expireAfterAccess(maxCacheDuration, TimeUnit.MILLISECONDS) - .removalListener(cacheRemovalListener) - .build(); - } + try { + notification.getValue().close(); + } catch (SQLException se) { + LOGGER.error("Error while closing expired cache connection " + connInfoIdentifier, se); + } + } + }; + return CacheBuilder.newBuilder().expireAfterAccess(maxCacheDuration, TimeUnit.MILLISECONDS) + .removalListener(cacheRemovalListener).build(); + } - // writes guarded by "this" - private volatile QueryServices services; - - @GuardedBy("closeLock") - private volatile boolean closed = false; - private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); - + // writes guarded by "this" + private volatile QueryServices services; - @Override - public QueryServices getQueryServices() throws SQLException { - lockInterruptibly(LockMode.READ); - try { - checkClosed(); - // Lazy initialize QueryServices so that we only attempt to create an HBase Configuration - // object upon the first attempt to connect to any cluster. Otherwise, an attempt will be - // made at driver initialization time which is too early for some systems. - QueryServices result = services; - if (result == null) { - synchronized(this) { - result = services; - if (result == null) { - services = result = new QueryServicesImpl(getDefaultProps()); - } - } - } - return result; - } finally { - unlock(LockMode.READ); + @GuardedBy("closeLock") + private volatile boolean closed = false; + private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); + + @Override + public QueryServices getQueryServices() throws SQLException { + lockInterruptibly(LockMode.READ); + try { + checkClosed(); + // Lazy initialize QueryServices so that we only attempt to create an HBase Configuration + // object upon the first attempt to connect to any cluster. Otherwise, an attempt will be + // made at driver initialization time which is too early for some systems. + QueryServices result = services; + if (result == null) { + synchronized (this) { + result = services; + if (result == null) { + services = result = new QueryServicesImpl(getDefaultProps()); + } } + } + return result; + } finally { + unlock(LockMode.READ); } + } - @Override - public boolean acceptsURL(String url) throws SQLException { - // Accept the url only if test=true attribute not set - return super.acceptsURL(url) && !isTestUrl(url); + @Override + public boolean acceptsURL(String url) throws SQLException { + // Accept the url only if test=true attribute not set + return super.acceptsURL(url) && !isTestUrl(url); + } + + @Override + public Connection connect(String url, Properties info) throws SQLException { + GLOBAL_PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER.increment(); + if (!acceptsURL(url)) { + GLOBAL_FAILED_PHOENIX_CONNECTIONS.increment(); + return null; } - - @Override - public Connection connect(String url, Properties info) throws SQLException { - GLOBAL_PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER.increment(); - if (!acceptsURL(url)) { - GLOBAL_FAILED_PHOENIX_CONNECTIONS.increment(); - return null; - } - lockInterruptibly(LockMode.READ); - try { - checkClosed(); - return createConnection(url, info); - } catch (SQLException sqlException) { - if (sqlException.getErrorCode() != SQLExceptionCode.NEW_CONNECTION_THROTTLED.getErrorCode()) { - GLOBAL_FAILED_PHOENIX_CONNECTIONS.increment(); - } - throw sqlException; - } catch(Exception e) { - GLOBAL_FAILED_PHOENIX_CONNECTIONS.increment(); - throw e; - } finally { - unlock(LockMode.READ); - } + lockInterruptibly(LockMode.READ); + try { + checkClosed(); + return createConnection(url, info); + } catch (SQLException sqlException) { + if (sqlException.getErrorCode() != SQLExceptionCode.NEW_CONNECTION_THROTTLED.getErrorCode()) { + GLOBAL_FAILED_PHOENIX_CONNECTIONS.increment(); + } + throw sqlException; + } catch (Exception e) { + GLOBAL_FAILED_PHOENIX_CONNECTIONS.increment(); + throw e; + } finally { + unlock(LockMode.READ); } - - @Override - protected ConnectionQueryServices getConnectionQueryServices(String url, final Properties infoIn) throws SQLException { - lockInterruptibly(LockMode.READ); - try { - checkClosed(); - SQLException sqlE = null; - boolean success = false; - final QueryServices services = getQueryServices(); - ConnectionQueryServices connectionQueryServices = null; - // Also performs the Kerberos login if the URL/properties request this - final Properties info = PropertiesUtil.deepCopy(infoIn); - final ConnectionInfo connInfo = ConnectionInfo.create(url, services.getProps(), info); - //Set connection parameters to normalized value from URL - info.putAll(connInfo.asProps().asMap()); - try { + } + + @Override + protected ConnectionQueryServices getConnectionQueryServices(String url, final Properties infoIn) + throws SQLException { + lockInterruptibly(LockMode.READ); + try { + checkClosed(); + SQLException sqlE = null; + boolean success = false; + final QueryServices services = getQueryServices(); + ConnectionQueryServices connectionQueryServices = null; + // Also performs the Kerberos login if the URL/properties request this + final Properties info = PropertiesUtil.deepCopy(infoIn); + final ConnectionInfo connInfo = ConnectionInfo.create(url, services.getProps(), info); + // Set connection parameters to normalized value from URL + info.putAll(connInfo.asProps().asMap()); + try { + connectionQueryServices = + connectionQueryServicesCache.get(connInfo, new Callable() { + @Override + public ConnectionQueryServices call() throws Exception { + ConnectionQueryServices connectionQueryServices; + if (connInfo.isConnectionless()) { connectionQueryServices = - connectionQueryServicesCache.get(connInfo, new Callable() { - @Override - public ConnectionQueryServices call() throws Exception { - ConnectionQueryServices connectionQueryServices; - if (connInfo.isConnectionless()) { - connectionQueryServices = new ConnectionlessQueryServicesImpl(services, connInfo, info); - } else { - connectionQueryServices = new ConnectionQueryServicesImpl(services, connInfo, info); - } + new ConnectionlessQueryServicesImpl(services, connInfo, info); + } else { + connectionQueryServices = new ConnectionQueryServicesImpl(services, connInfo, info); + } - return connectionQueryServices; - } - }); - connectionQueryServices.init(url, info); - success = true; - } catch (ExecutionException ee){ - if (ee.getCause() instanceof SQLException) { - sqlE = (SQLException) ee.getCause(); - } else { - throw new SQLException(ee); - } - } - catch (SQLException e) { - sqlE = e; + return connectionQueryServices; } - finally { - if (!success) { - // Remove from map, as initialization failed - connectionQueryServicesCache.invalidate(connInfo); - if (sqlE != null) { - throw sqlE; - } - } - } - return connectionQueryServices; - } finally { - unlock(LockMode.READ); + }); + connectionQueryServices.init(url, info); + success = true; + } catch (ExecutionException ee) { + if (ee.getCause() instanceof SQLException) { + sqlE = (SQLException) ee.getCause(); + } else { + throw new SQLException(ee); } - } - - @GuardedBy("closeLock") - private void checkClosed() { - if (closed) { - throwDriverClosedException(); + } catch (SQLException e) { + sqlE = e; + } finally { + if (!success) { + // Remove from map, as initialization failed + connectionQueryServicesCache.invalidate(connInfo); + if (sqlE != null) { + throw sqlE; + } } + } + return connectionQueryServices; + } finally { + unlock(LockMode.READ); } - - private void throwDriverClosedException() { - throw new IllegalStateException(driverShutdownMsg != null ? driverShutdownMsg : "The Phoenix jdbc driver has been closed."); - } + } - - /** - * Invalidate the CQS in global connection query services cache. - * - * @param url The JDBC connection string - * @param properties properties containing the fields of connection info (key of cache) - * @throws SQLException if fails to generate key for CQS to invalidate - */ - void invalidateCache(String url, Properties properties) throws SQLException { - ConnectionInfo connInfo = ConnectionInfo.create(url, getQueryServices().getProps(), properties); - LOGGER.info("Invalidating the CQS from cache for connInfo={}", connInfo); - connectionQueryServicesCache.invalidate(connInfo); - LOGGER.debug(connectionQueryServicesCache.asMap().keySet().stream().map(Objects::toString).collect(Collectors.joining(","))); + @GuardedBy("closeLock") + private void checkClosed() { + if (closed) { + throwDriverClosedException(); } + } + private void throwDriverClosedException() { + throw new IllegalStateException( + driverShutdownMsg != null ? driverShutdownMsg : "The Phoenix jdbc driver has been closed."); + } - @Override - public synchronized void close() throws SQLException { - lockInterruptibly(LockMode.WRITE); - try { - if (closed) { - return; - } - closed = true; - } finally { - unlock(LockMode.WRITE); - } + /** + * Invalidate the CQS in global connection query services cache. + * @param url The JDBC connection string + * @param properties properties containing the fields of connection info (key of cache) + * @throws SQLException if fails to generate key for CQS to invalidate + */ + void invalidateCache(String url, Properties properties) throws SQLException { + ConnectionInfo connInfo = ConnectionInfo.create(url, getQueryServices().getProps(), properties); + LOGGER.info("Invalidating the CQS from cache for connInfo={}", connInfo); + connectionQueryServicesCache.invalidate(connInfo); + LOGGER.debug(connectionQueryServicesCache.asMap().keySet().stream().map(Objects::toString) + .collect(Collectors.joining(","))); + } - if (services != null) { - try { - services.close(); - } finally { - services = null; - } - } + @Override + public synchronized void close() throws SQLException { + lockInterruptibly(LockMode.WRITE); + try { + if (closed) { + return; + } + closed = true; + } finally { + unlock(LockMode.WRITE); + } - if (connectionQueryServicesCache != null) { - try { - for (ConnectionQueryServices cqsi : connectionQueryServicesCache.asMap().values()) { - cqsi.close(); - } - } catch (Exception e) { - LOGGER.warn("Failed to close ConnectionQueryServices instance", e); - } + if (services != null) { + try { + services.close(); + } finally { + services = null; + } + } + + if (connectionQueryServicesCache != null) { + try { + for (ConnectionQueryServices cqsi : connectionQueryServicesCache.asMap().values()) { + cqsi.close(); } + } catch (Exception e) { + LOGGER.warn("Failed to close ConnectionQueryServices instance", e); + } } + } - private enum LockMode { - READ, WRITE - }; + private enum LockMode { + READ, + WRITE + }; - private void lockInterruptibly(LockMode mode) throws SQLException { - checkNotNull(mode); - switch (mode) { - case READ: - try { - closeLock.readLock().lockInterruptibly(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION) - .setRootCause(e).build().buildException(); - } - break; - case WRITE: - try { - closeLock.writeLock().lockInterruptibly(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION) - .setRootCause(e).build().buildException(); - } + private void lockInterruptibly(LockMode mode) throws SQLException { + checkNotNull(mode); + switch (mode) { + case READ: + try { + closeLock.readLock().lockInterruptibly(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e) + .build().buildException(); + } + break; + case WRITE: + try { + closeLock.writeLock().lockInterruptibly(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e) + .build().buildException(); } } + } - private void unlock(LockMode mode) { - checkNotNull(mode); - switch (mode) { - case READ: - closeLock.readLock().unlock(); - break; - case WRITE: - closeLock.writeLock().unlock(); - } + private void unlock(LockMode mode) { + checkNotNull(mode); + switch (mode) { + case READ: + closeLock.readLock().unlock(); + break; + case WRITE: + closeLock.writeLock().unlock(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java index f4de1ae7793..b67a1e85b8d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,160 +39,153 @@ import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.SQLCloseable; - - /** - * * Abstract base class for JDBC Driver implementation of Phoenix - * - * * @since 0.1 */ @Immutable public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable { - /** - * The protocol for Phoenix Network Client - */ - private final static String DNC_JDBC_PROTOCOL_SUFFIX = "//"; - private final static String DRIVER_NAME = "PhoenixEmbeddedDriver"; - private static final String TEST_URL_AT_END = "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; - private static final String TEST_URL_IN_MIDDLE = TEST_URL_AT_END + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; - - private static final String[] SUPPORTED_PROTOCOLS = - new String[] { PhoenixRuntime.JDBC_PROTOCOL, PhoenixRuntime.JDBC_PROTOCOL_ZK, - PhoenixRuntime.JDBC_PROTOCOL_MASTER, PhoenixRuntime.JDBC_PROTOCOL_RPC }; - - private final static DriverPropertyInfo[] EMPTY_INFO = new DriverPropertyInfo[0]; - public final static String MAJOR_VERSION_PROP = "DriverMajorVersion"; - public final static String MINOR_VERSION_PROP = "DriverMinorVersion"; - public final static String DRIVER_NAME_PROP = "DriverName"; - - public static final ReadOnlyProps DEFAULT_PROPS = new ReadOnlyProps( - ImmutableMap.of( - MAJOR_VERSION_PROP, Integer.toString(MetaDataProtocol.PHOENIX_MAJOR_VERSION), - MINOR_VERSION_PROP, Integer.toString(MetaDataProtocol.PHOENIX_MINOR_VERSION), - DRIVER_NAME_PROP, DRIVER_NAME)); - - PhoenixEmbeddedDriver() { - } - - protected ReadOnlyProps getDefaultProps() { - return DEFAULT_PROPS; - } - - abstract public QueryServices getQueryServices() throws SQLException; - - @Override - public boolean acceptsURL(String url) throws SQLException { - if (url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { - for (String protocol : SUPPORTED_PROTOCOLS) { - // A connection string of "jdbc:phoenix" is supported, since - // all the connection information can potentially be gotten - // out of the HBase config file - if (!url.startsWith(protocol)) { - continue; - } - if (url.length() == protocol.length()) { - return true; - } - // Same as above, except for "jdbc:phoenix;prop=..." - if (PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR == url.charAt(protocol.length())) { - return true; - } - if (PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR == url.charAt(protocol.length())) { - int protoLength = protocol.length() + 1; - // A connection string of "jdbc:phoenix:" matches this driver, - // but will end up as a MALFORMED_CONNECTION_URL exception later. - if (url.length() == protoLength) { - return true; - } - // Explicitly ignore connections of "jdbc:phoenix:thin"; leave them for - // the thin client - if (url.startsWith(PhoenixRuntime.JDBC_THIN_PROTOCOL)) { - return false; - } - // A connection string of the form "jdbc:phoenix://" means that - // the driver is remote which isn't supported, so return false. - if (!url.startsWith(DNC_JDBC_PROTOCOL_SUFFIX, protoLength)) { - return true; - } - } - } + /** + * The protocol for Phoenix Network Client + */ + private final static String DNC_JDBC_PROTOCOL_SUFFIX = "//"; + private final static String DRIVER_NAME = "PhoenixEmbeddedDriver"; + private static final String TEST_URL_AT_END = + "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; + private static final String TEST_URL_IN_MIDDLE = + TEST_URL_AT_END + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; + + private static final String[] SUPPORTED_PROTOCOLS = + new String[] { PhoenixRuntime.JDBC_PROTOCOL, PhoenixRuntime.JDBC_PROTOCOL_ZK, + PhoenixRuntime.JDBC_PROTOCOL_MASTER, PhoenixRuntime.JDBC_PROTOCOL_RPC }; + + private final static DriverPropertyInfo[] EMPTY_INFO = new DriverPropertyInfo[0]; + public final static String MAJOR_VERSION_PROP = "DriverMajorVersion"; + public final static String MINOR_VERSION_PROP = "DriverMinorVersion"; + public final static String DRIVER_NAME_PROP = "DriverName"; + + public static final ReadOnlyProps DEFAULT_PROPS = + new ReadOnlyProps(ImmutableMap.of(MAJOR_VERSION_PROP, + Integer.toString(MetaDataProtocol.PHOENIX_MAJOR_VERSION), MINOR_VERSION_PROP, + Integer.toString(MetaDataProtocol.PHOENIX_MINOR_VERSION), DRIVER_NAME_PROP, DRIVER_NAME)); + + PhoenixEmbeddedDriver() { + } + + protected ReadOnlyProps getDefaultProps() { + return DEFAULT_PROPS; + } + + abstract public QueryServices getQueryServices() throws SQLException; + + @Override + public boolean acceptsURL(String url) throws SQLException { + if (url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)) { + for (String protocol : SUPPORTED_PROTOCOLS) { + // A connection string of "jdbc:phoenix" is supported, since + // all the connection information can potentially be gotten + // out of the HBase config file + if (!url.startsWith(protocol)) { + continue; } - return false; - } - - @Override - public Connection connect(String url, Properties info) throws SQLException { - if (!acceptsURL(url)) { - return null; + if (url.length() == protocol.length()) { + return true; } - - return createConnection(url, info); - } - - protected final Connection createConnection(String url, Properties info) throws SQLException { - Properties augmentedInfo = PropertiesUtil.deepCopy(info); - augmentedInfo.putAll(getDefaultProps().asMap()); - if (url.contains("|")) { - // High availability connection using two clusters - Optional haGroup = HighAvailabilityGroup.get(url, augmentedInfo); - if (haGroup.isPresent()) { - return haGroup.get().connect(augmentedInfo); - } else { - // If empty HA group is returned, fall back to single cluster. - url = - HighAvailabilityGroup.getFallbackCluster(url, info).orElseThrow( - () -> new SQLException( - "HA group can not be initialized, fallback to single cluster")); - } + // Same as above, except for "jdbc:phoenix;prop=..." + if (PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR == url.charAt(protocol.length())) { + return true; } - ConnectionQueryServices cqs = getConnectionQueryServices(url, augmentedInfo); - return cqs.connect(url, augmentedInfo); - } - - /** - * Get or create if necessary a QueryServices that is associated with the HBase zookeeper quorum - * name (part of the connection URL). This will cause the underlying Configuration held by the - * QueryServices to be shared for all connections to the same HBase cluster. - * @param url connection URL - * @param info connection properties - * @return new or cached QuerySerices used to establish a new Connection. - * @throws SQLException - */ - protected abstract ConnectionQueryServices getConnectionQueryServices(String url, Properties info) throws SQLException; - - @Override - public int getMajorVersion() { - return MetaDataProtocol.PHOENIX_MAJOR_VERSION; - } - - @Override - public int getMinorVersion() { - return MetaDataProtocol.PHOENIX_MINOR_VERSION; - } - - @Override - public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { - return EMPTY_INFO; - } - - @Override - public boolean jdbcCompliant() { - return false; - } - - @Override - public Logger getParentLogger() throws SQLFeatureNotSupportedException { - return null; + if (PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR == url.charAt(protocol.length())) { + int protoLength = protocol.length() + 1; + // A connection string of "jdbc:phoenix:" matches this driver, + // but will end up as a MALFORMED_CONNECTION_URL exception later. + if (url.length() == protoLength) { + return true; + } + // Explicitly ignore connections of "jdbc:phoenix:thin"; leave them for + // the thin client + if (url.startsWith(PhoenixRuntime.JDBC_THIN_PROTOCOL)) { + return false; + } + // A connection string of the form "jdbc:phoenix://" means that + // the driver is remote which isn't supported, so return false. + if (!url.startsWith(DNC_JDBC_PROTOCOL_SUFFIX, protoLength)) { + return true; + } + } + } } + return false; + } - @Override - public void close() throws SQLException { + @Override + public Connection connect(String url, Properties info) throws SQLException { + if (!acceptsURL(url)) { + return null; } - - public static boolean isTestUrl(String url) { - return url.endsWith(TEST_URL_AT_END) || url.contains(TEST_URL_IN_MIDDLE); + return createConnection(url, info); + } + + protected final Connection createConnection(String url, Properties info) throws SQLException { + Properties augmentedInfo = PropertiesUtil.deepCopy(info); + augmentedInfo.putAll(getDefaultProps().asMap()); + if (url.contains("|")) { + // High availability connection using two clusters + Optional haGroup = HighAvailabilityGroup.get(url, augmentedInfo); + if (haGroup.isPresent()) { + return haGroup.get().connect(augmentedInfo); + } else { + // If empty HA group is returned, fall back to single cluster. + url = HighAvailabilityGroup.getFallbackCluster(url, info).orElseThrow( + () -> new SQLException("HA group can not be initialized, fallback to single cluster")); + } } + ConnectionQueryServices cqs = getConnectionQueryServices(url, augmentedInfo); + return cqs.connect(url, augmentedInfo); + } + + /** + * Get or create if necessary a QueryServices that is associated with the HBase zookeeper quorum + * name (part of the connection URL). This will cause the underlying Configuration held by the + * QueryServices to be shared for all connections to the same HBase cluster. + * @param url connection URL + * @param info connection properties + * @return new or cached QuerySerices used to establish a new Connection. + */ + protected abstract ConnectionQueryServices getConnectionQueryServices(String url, Properties info) + throws SQLException; + + @Override + public int getMajorVersion() { + return MetaDataProtocol.PHOENIX_MAJOR_VERSION; + } + + @Override + public int getMinorVersion() { + return MetaDataProtocol.PHOENIX_MINOR_VERSION; + } + + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { + return EMPTY_INFO; + } + + @Override + public boolean jdbcCompliant() { + return false; + } + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + return null; + } + + @Override + public void close() throws SQLException { + } + + public static boolean isTestUrl(String url) { + return url.endsWith(TEST_URL_AT_END) || url.contains(TEST_URL_IN_MIDDLE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAAdminTool.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAAdminTool.java index e7a9cd7a22f..1b589e1f7e4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAAdminTool.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAAdminTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,12 +30,6 @@ import java.util.Optional; import java.util.Properties; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.PosixParser; import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.StringUtils; import org.apache.curator.RetryPolicy; @@ -45,20 +39,26 @@ import org.apache.curator.utils.ZKPaths; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.PairOfSameType; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.phoenix.jdbc.ClusterRoleRecord.ClusterRole; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.PosixParser; import org.apache.phoenix.util.JDBCUtil; import org.apache.phoenix.util.JacksonUtil; import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException.NodeExistsException; import org.apache.zookeeper.KeeperException.NoNodeException; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.zookeeper.KeeperException.NodeExistsException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,575 +66,565 @@ * The command line tool to manage high availability (HA) groups and their cluster roles. */ public class PhoenixHAAdminTool extends Configured implements Tool { - // Following are return value of this tool. We need this to be very explicit because external - // system calling this tool may need to retry, alert or audit the operations of cluster roles. - public static final int RET_SUCCESS = 0; // Saul Goodman - public static final int RET_ARGUMENT_ERROR = 1; // arguments are invalid - public static final int RET_SYNC_ERROR = 2; // error to sync from manifest to ZK - public static final int RET_REPAIR_FOUND_INCONSISTENCIES = 3; // error to repair current ZK - - private static final Logger LOG = LoggerFactory.getLogger(PhoenixHAAdminTool.class); - - private static final Option HELP_OPT = new Option("h", "help", false, "Show this help"); - private static final Option FORCEFUL_OPT = - new Option("F", "forceful", false, - "Forceful writing cluster role records ignoring errors on other clusters"); - private static final Option MANIFEST_OPT = - new Option("m", "manifest", true, "Manifest file containing cluster role records"); - private static final Option LIST_OPT = - new Option("l", "list", false, "List all HA groups stored on this ZK cluster"); - private static final Option REPAIR_OPT = new Option("r", "repair", false, - "Verify all HA groups stored on this ZK cluster and repair if inconsistency found"); - @VisibleForTesting - static final Options OPTIONS = new Options() - .addOption(HELP_OPT) - .addOption(FORCEFUL_OPT) - .addOption(MANIFEST_OPT) - .addOption(LIST_OPT) - .addOption(REPAIR_OPT); + // Following are return value of this tool. We need this to be very explicit because external + // system calling this tool may need to retry, alert or audit the operations of cluster roles. + public static final int RET_SUCCESS = 0; // Saul Goodman + public static final int RET_ARGUMENT_ERROR = 1; // arguments are invalid + public static final int RET_SYNC_ERROR = 2; // error to sync from manifest to ZK + public static final int RET_REPAIR_FOUND_INCONSISTENCIES = 3; // error to repair current ZK + + private static final Logger LOG = LoggerFactory.getLogger(PhoenixHAAdminTool.class); + + private static final Option HELP_OPT = new Option("h", "help", false, "Show this help"); + private static final Option FORCEFUL_OPT = new Option("F", "forceful", false, + "Forceful writing cluster role records ignoring errors on other clusters"); + private static final Option MANIFEST_OPT = + new Option("m", "manifest", true, "Manifest file containing cluster role records"); + private static final Option LIST_OPT = + new Option("l", "list", false, "List all HA groups stored on this ZK cluster"); + private static final Option REPAIR_OPT = new Option("r", "repair", false, + "Verify all HA groups stored on this ZK cluster and repair if inconsistency found"); + @VisibleForTesting + static final Options OPTIONS = new Options().addOption(HELP_OPT).addOption(FORCEFUL_OPT) + .addOption(MANIFEST_OPT).addOption(LIST_OPT).addOption(REPAIR_OPT); + + @Override + public int run(String[] args) throws Exception { + CommandLine commandLine; + try { + commandLine = parseOptions(args); + } catch (Exception e) { + System.err.println( + "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " + e); + printUsageMessage(); + return RET_ARGUMENT_ERROR; + } - @Override - public int run(String[] args) throws Exception { - CommandLine commandLine; + try { + if (commandLine.hasOption(HELP_OPT.getOpt())) { + printUsageMessage(); + return RET_SUCCESS; + } else if (commandLine.hasOption(LIST_OPT.getOpt())) { // list + String zkUrl = getLocalZkUrl(getConf()); // Admin is created against local ZK cluster + try (PhoenixHAAdminHelper admin = + new PhoenixHAAdminHelper(zkUrl, getConf(), HighAvailibilityCuratorProvider.INSTANCE)) { + List records = admin.listAllClusterRoleRecordsOnZookeeper(); + JacksonUtil.getObjectWriterPretty().writeValue(System.out, records); + } + } else if (commandLine.hasOption(MANIFEST_OPT.getOpt())) { // create or update + String fileName = commandLine.getOptionValue(MANIFEST_OPT.getOpt()); + List records = readRecordsFromFile(fileName); + boolean forceful = commandLine.hasOption(FORCEFUL_OPT.getOpt()); + Map> failedHaGroups = syncClusterRoleRecords(records, forceful); + if (!failedHaGroups.isEmpty()) { + System.out.println("Found following HA groups are failing to write the clusters:"); + failedHaGroups + .forEach((k, v) -> System.out.printf("%s -> [%s]\n", k, String.join(",", v))); + return RET_SYNC_ERROR; + } + } else if (commandLine.hasOption(REPAIR_OPT.getOpt())) { // verify and repair + String zkUrl = getLocalZkUrl(getConf()); // Admin is created against local ZK cluster + try (PhoenixHAAdminHelper admin = + new PhoenixHAAdminHelper(zkUrl, getConf(), HighAvailibilityCuratorProvider.INSTANCE)) { + List inconsistentRecord = admin.verifyAndRepairWithRemoteZnode(); + if (!inconsistentRecord.isEmpty()) { + System.out.println("Found following inconsistent cluster role records: "); + System.out.print(String.join(",", inconsistentRecord)); + return RET_REPAIR_FOUND_INCONSISTENCIES; + } + } + } + return RET_SUCCESS; + } catch (Exception e) { + e.printStackTrace(); + return -1; + } + } + + /** + * Read cluster role records defined in the file, given file name. + * @param file The local manifest file name to read from + * @return list of cluster role records defined in the manifest file + * @throws Exception when parsing or reading from the input file + */ + @VisibleForTesting + List readRecordsFromFile(String file) throws Exception { + Preconditions.checkArgument(!StringUtils.isEmpty(file)); + String fileType = FilenameUtils.getExtension(file); + switch (fileType) { + case "json": + // TODO: use jackson or standard JSON library according to PHOENIX-5789 + try (Reader reader = new FileReader(file)) { + ClusterRoleRecord[] records = + JacksonUtil.getObjectReader(ClusterRoleRecord[].class).readValue(reader); + return Arrays.asList(records); + } + case "yaml": + LOG.error("YAML file is not yet supported. See W-8274533"); + default: + throw new Exception("Can not read cluster role records from file '" + file + "' " + + "reason: unsupported file type"); + } + } + + /** + * Helper method to write the given cluster role records into the ZK clusters respectively. // + * TODO: add retry logics + * @param records The cluster role record list to save on ZK + * @param forceful if true, this method will ignore errors on other clusters; otherwise it will + * not update next cluster (in order) if there is any failure on current cluster + * @return a map of HA group name to list cluster's url for cluster role record failing to write + */ + private Map> syncClusterRoleRecords(List records, + boolean forceful) throws IOException { + Map> failedHaGroups = new HashMap<>(); + for (ClusterRoleRecord record : records) { + String haGroupName = record.getHaGroupName(); + try ( + PhoenixHAAdminHelper admin1 = new PhoenixHAAdminHelper(record.getZk1(), getConf(), + HighAvailibilityCuratorProvider.INSTANCE); + PhoenixHAAdminHelper admin2 = new PhoenixHAAdminHelper(record.getZk2(), getConf(), + HighAvailibilityCuratorProvider.INSTANCE)) { + // Update the cluster previously ACTIVE cluster first. + // It reduces the chances of split-brain between clients and clusters. + // If can not determine previous ACTIVE cluster, update new STANDBY cluster first. + final PairOfSameType pair; + if (admin1.isCurrentActiveCluster(haGroupName)) { + pair = new PairOfSameType<>(admin1, admin2); + } else if (admin2.isCurrentActiveCluster(haGroupName)) { + pair = new PairOfSameType<>(admin2, admin1); + } else if (record.getRole(admin1.getZkUrl()) == ClusterRole.STANDBY) { + pair = new PairOfSameType<>(admin1, admin2); + } else { + pair = new PairOfSameType<>(admin2, admin1); + } try { - commandLine = parseOptions(args); - } catch (Exception e) { - System.err.println( - "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " + e); - printUsageMessage(); - return RET_ARGUMENT_ERROR; + pair.getFirst().createOrUpdateDataOnZookeeper(record); + } catch (IOException e) { + LOG.error("Error to create or update data on Zookeeper, cluster={}, record={}", + pair.getFirst(), record); + failedHaGroups.computeIfAbsent(haGroupName, (k) -> new ArrayList<>()) + .add(pair.getFirst().zkUrl); + if (!forceful) { + LOG.error("-forceful option is not enabled by command line options, " + + "skip writing record {} to ZK clusters", record); + // skip writing this record to second ZK cluster, so we should report that + failedHaGroups.computeIfAbsent(haGroupName, (k) -> new ArrayList<>()) + .add(pair.getSecond().zkUrl); + continue; // do not update this record on second cluster + } } - try { - if (commandLine.hasOption(HELP_OPT.getOpt())) { - printUsageMessage(); - return RET_SUCCESS; - } else if (commandLine.hasOption(LIST_OPT.getOpt())) { // list - String zkUrl = getLocalZkUrl(getConf()); // Admin is created against local ZK cluster - try (PhoenixHAAdminHelper admin = new PhoenixHAAdminHelper(zkUrl, getConf(), HighAvailibilityCuratorProvider.INSTANCE)) { - List records = admin.listAllClusterRoleRecordsOnZookeeper(); - JacksonUtil.getObjectWriterPretty().writeValue(System.out, records); - } - } else if (commandLine.hasOption(MANIFEST_OPT.getOpt())) { // create or update - String fileName = commandLine.getOptionValue(MANIFEST_OPT.getOpt()); - List records = readRecordsFromFile(fileName); - boolean forceful = commandLine.hasOption(FORCEFUL_OPT.getOpt()); - Map> failedHaGroups = syncClusterRoleRecords(records, forceful); - if (!failedHaGroups.isEmpty()) { - System.out.println("Found following HA groups are failing to write the clusters:"); - failedHaGroups.forEach((k, v) -> - System.out.printf("%s -> [%s]\n", k, String.join(",", v))); - return RET_SYNC_ERROR; - } - } else if (commandLine.hasOption(REPAIR_OPT.getOpt())) { // verify and repair - String zkUrl = getLocalZkUrl(getConf()); // Admin is created against local ZK cluster - try (PhoenixHAAdminHelper admin = new PhoenixHAAdminHelper(zkUrl, getConf(), HighAvailibilityCuratorProvider.INSTANCE)) { - List inconsistentRecord = admin.verifyAndRepairWithRemoteZnode(); - if (!inconsistentRecord.isEmpty()) { - System.out.println("Found following inconsistent cluster role records: "); - System.out.print(String.join(",", inconsistentRecord)); - return RET_REPAIR_FOUND_INCONSISTENCIES; - } - } - } - return RET_SUCCESS; - } catch(Exception e ) { - e.printStackTrace(); - return -1; + pair.getSecond().createOrUpdateDataOnZookeeper(record); + } catch (IOException e) { + LOG.error("Error to create or update data on Zookeeper, cluster={}, record={}", + pair.getFirst(), record); + failedHaGroups.computeIfAbsent(haGroupName, (k) -> new ArrayList<>()) + .add(pair.getSecond().zkUrl); } + } + } + return failedHaGroups; + } + + /** + * Parses the commandline arguments, throw exception if validation fails. + * @param args supplied command line arguments + * @return the parsed command line + */ + @VisibleForTesting + CommandLine parseOptions(String[] args) throws Exception { + CommandLineParser parser = new PosixParser(); + CommandLine cmdLine = parser.parse(OPTIONS, args); + assert cmdLine != null; + + if ( + (cmdLine.hasOption(REPAIR_OPT.getOpt()) && cmdLine.hasOption(MANIFEST_OPT.getOpt())) + || (cmdLine.hasOption(LIST_OPT.getOpt()) && cmdLine.hasOption(REPAIR_OPT.getOpt())) + || (cmdLine.hasOption(LIST_OPT.getOpt()) && cmdLine.hasOption(MANIFEST_OPT.getOpt())) + ) { + String msg = "--list, --manifest and --repair options are mutually exclusive"; + LOG.error(msg + " User provided args: {}", (Object[]) args); + throw new IllegalArgumentException(msg); } - /** - * Read cluster role records defined in the file, given file name. - * - * @param file The local manifest file name to read from - * @return list of cluster role records defined in the manifest file - * @throws Exception when parsing or reading from the input file - */ - @VisibleForTesting - List readRecordsFromFile(String file) throws Exception { - Preconditions.checkArgument(!StringUtils.isEmpty(file)); - String fileType = FilenameUtils.getExtension(file); - switch (fileType) { - case "json": - // TODO: use jackson or standard JSON library according to PHOENIX-5789 - try (Reader reader = new FileReader(file)) { - ClusterRoleRecord[] records = - JacksonUtil.getObjectReader(ClusterRoleRecord[].class).readValue(reader); - return Arrays.asList(records); - } - case "yaml": - LOG.error("YAML file is not yet supported. See W-8274533"); - default: - throw new Exception("Can not read cluster role records from file '" + file + "' " + - "reason: unsupported file type"); - } + if (cmdLine.hasOption(FORCEFUL_OPT.getOpt()) && !cmdLine.hasOption(MANIFEST_OPT.getOpt())) { + String msg = "--forceful option only works with --manifest option"; + LOG.error(msg + " User provided args: {}", (Object[]) args); + throw new IllegalArgumentException(msg); } - /** - * Helper method to write the given cluster role records into the ZK clusters respectively. - * - * // TODO: add retry logics - * - * @param records The cluster role record list to save on ZK - * @param forceful if true, this method will ignore errors on other clusters; otherwise it will - * not update next cluster (in order) if there is any failure on current cluster - * @return a map of HA group name to list cluster's url for cluster role record failing to write - */ - private Map> syncClusterRoleRecords(List records, - boolean forceful) throws IOException { - Map> failedHaGroups = new HashMap<>(); - for (ClusterRoleRecord record : records) { - String haGroupName = record.getHaGroupName(); - try (PhoenixHAAdminHelper admin1 = new PhoenixHAAdminHelper(record.getZk1(), getConf(), HighAvailibilityCuratorProvider.INSTANCE); - PhoenixHAAdminHelper admin2 = new PhoenixHAAdminHelper(record.getZk2(), getConf(), HighAvailibilityCuratorProvider.INSTANCE)) { - // Update the cluster previously ACTIVE cluster first. - // It reduces the chances of split-brain between clients and clusters. - // If can not determine previous ACTIVE cluster, update new STANDBY cluster first. - final PairOfSameType pair; - if (admin1.isCurrentActiveCluster(haGroupName)) { - pair = new PairOfSameType<>(admin1, admin2); - } else if (admin2.isCurrentActiveCluster(haGroupName)) { - pair = new PairOfSameType<>(admin2, admin1); - } else if (record.getRole(admin1.getZkUrl()) == ClusterRole.STANDBY) { - pair = new PairOfSameType<>(admin1, admin2); - } else { - pair = new PairOfSameType<>(admin2, admin1); - } - try { - pair.getFirst().createOrUpdateDataOnZookeeper(record); - } catch (IOException e) { - LOG.error("Error to create or update data on Zookeeper, cluster={}, record={}", - pair.getFirst(), record); - failedHaGroups.computeIfAbsent(haGroupName, (k) -> new ArrayList<>()) - .add(pair.getFirst().zkUrl); - if (!forceful) { - LOG.error("-forceful option is not enabled by command line options, " - + "skip writing record {} to ZK clusters", record); - // skip writing this record to second ZK cluster, so we should report that - failedHaGroups.computeIfAbsent(haGroupName, (k) -> new ArrayList<>()) - .add(pair.getSecond().zkUrl); - continue; // do not update this record on second cluster - } - } - try { - pair.getSecond().createOrUpdateDataOnZookeeper(record); - } catch (IOException e) { - LOG.error("Error to create or update data on Zookeeper, cluster={}, record={}", - pair.getFirst(), record); - failedHaGroups.computeIfAbsent(haGroupName, (k) -> new ArrayList<>()) - .add(pair.getSecond().zkUrl); - } - } - } - return failedHaGroups; + return cmdLine; + } + + /** + * Print the usage message. + */ + private void printUsageMessage() { + GenericOptionsParser.printGenericCommandUsage(System.out); + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", OPTIONS); + } + + /** + * Helper method to get local ZK fully qualified URL (host:port:/hbase) from configuration. + */ + public static String getLocalZkUrl(Configuration conf) { + String localZkQuorum = conf.get(HConstants.ZOOKEEPER_QUORUM); + if (StringUtils.isEmpty(localZkQuorum)) { + String msg = "ZK quorum not found by looking up key " + HConstants.ZOOKEEPER_QUORUM; + LOG.error(msg); + throw new IllegalArgumentException(msg); } - /** - * Parses the commandline arguments, throw exception if validation fails. - * - * @param args supplied command line arguments - * @return the parsed command line - */ - @VisibleForTesting - CommandLine parseOptions(String[] args) throws Exception { - CommandLineParser parser = new PosixParser(); - CommandLine cmdLine = parser.parse(OPTIONS, args); - assert cmdLine != null; - - if ((cmdLine.hasOption(REPAIR_OPT.getOpt()) && cmdLine.hasOption(MANIFEST_OPT.getOpt())) - || (cmdLine.hasOption(LIST_OPT.getOpt()) && cmdLine.hasOption(REPAIR_OPT.getOpt())) - || (cmdLine.hasOption(LIST_OPT.getOpt()) && cmdLine.hasOption(MANIFEST_OPT.getOpt()))) { - String msg = "--list, --manifest and --repair options are mutually exclusive"; - LOG.error(msg + " User provided args: {}", (Object[]) args); - throw new IllegalArgumentException(msg); - } + String portStr = conf.get(HConstants.ZOOKEEPER_CLIENT_PORT); + int port = HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT; + if (portStr != null) { + try { + port = Integer.parseInt(portStr); + } catch (NumberFormatException e) { + String msg = + String.format("Unrecognized ZK port '%s' in ZK quorum '%s'", portStr, localZkQuorum); + LOG.error(msg, e); + throw new IllegalArgumentException(msg, e); + } + } - if (cmdLine.hasOption(FORCEFUL_OPT.getOpt()) && !cmdLine.hasOption(MANIFEST_OPT.getOpt())) { - String msg = "--forceful option only works with --manifest option"; - LOG.error(msg + " User provided args: {}", (Object[]) args); - throw new IllegalArgumentException(msg); - } + String localZkRoot = + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - return cmdLine; - } + return String.format("%s:%d:%s", localZkQuorum, port, localZkRoot); + } + + /** + * Wrapper class for static accessor + */ + @VisibleForTesting + static class HighAvailibilityCuratorProvider { + + public static final HighAvailibilityCuratorProvider INSTANCE = + new HighAvailibilityCuratorProvider(); /** - * Print the usage message. + * Gets curator blocking if necessary to create it */ - private void printUsageMessage() { - GenericOptionsParser.printGenericCommandUsage(System.out); - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", OPTIONS); + public CuratorFramework getCurator(String zkUrl, Properties properties) throws IOException { + return HighAvailabilityGroup.getCurator(zkUrl, properties); + } + } + + /** + * Helper class to update cluster role record for a ZK cluster. The ZK client this accessor has is + * confined to a single ZK cluster, but it can be used to operate multiple HA groups that are + * associated with this cluster. + */ + @VisibleForTesting + static class PhoenixHAAdminHelper implements Closeable { + /** The fully qualified ZK URL for an HBase cluster in format host:port:/hbase */ + private final String zkUrl; + /** Configuration of this command line tool. */ + private final Configuration conf; + /** Client properties which has copies of configuration defining ZK timeouts / retries. */ + private final Properties properties = new Properties(); + /** Curator Provider **/ + private final HighAvailibilityCuratorProvider highAvailibilityCuratorProvider; + + PhoenixHAAdminHelper(String zkUrl, Configuration conf, + HighAvailibilityCuratorProvider highAvailibilityCuratorProvider) { + Preconditions.checkNotNull(zkUrl); + Preconditions.checkNotNull(conf); + Preconditions.checkNotNull(highAvailibilityCuratorProvider); + this.zkUrl = JDBCUtil.formatZookeeperUrl(zkUrl); + this.conf = conf; + conf.iterator().forEachRemaining(k -> properties.setProperty(k.getKey(), k.getValue())); + this.highAvailibilityCuratorProvider = highAvailibilityCuratorProvider; } /** - * Helper method to get local ZK fully qualified URL (host:port:/hbase) from configuration. + * Gets curator from the cache if available otherwise calls into getCurator to make it. */ - public static String getLocalZkUrl(Configuration conf) { - String localZkQuorum = conf.get(HConstants.ZOOKEEPER_QUORUM); - if (StringUtils.isEmpty(localZkQuorum)) { - String msg = "ZK quorum not found by looking up key " + HConstants.ZOOKEEPER_QUORUM; - LOG.error(msg); - throw new IllegalArgumentException(msg); - } - - String portStr = conf.get(HConstants.ZOOKEEPER_CLIENT_PORT); - int port = HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT; - if (portStr != null) { - try { - port = Integer.parseInt(portStr); - } catch (NumberFormatException e) { - String msg = String.format("Unrecognized ZK port '%s' in ZK quorum '%s'", - portStr, localZkQuorum); - LOG.error(msg, e); - throw new IllegalArgumentException(msg, e); - } - } - - String localZkRoot = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - - return String.format("%s:%d:%s", localZkQuorum, port, localZkRoot); + private CuratorFramework getCurator() throws IOException { + return highAvailibilityCuratorProvider.getCurator(zkUrl, properties); } /** - * Wrapper class for static accessor + * Check if current cluster is ACTIVE role for the given HA group. In case of Exception when it + * fails to read cluster role data from the current cluster, it will assume current cluster is + * not ACTIVE. Callers should be aware of "false positive" possibility especially due to + * connectivity issue between this tool and remote ZK cluster. + * @param haGroupName the HA group name; a cluster can be associated with multiple HA groups + * @return true if current cluster is ACTIVE role, otherwise false */ - @VisibleForTesting - static class HighAvailibilityCuratorProvider { - - public static final HighAvailibilityCuratorProvider INSTANCE = new HighAvailibilityCuratorProvider(); - - /** - * Gets curator blocking if necessary to create it - */ - public CuratorFramework getCurator(String zkUrl, Properties properties) throws IOException { - return HighAvailabilityGroup.getCurator(zkUrl, properties); - } + private boolean isCurrentActiveCluster(String haGroupName) { + try { + byte[] data = getCurator().getData().forPath(toPath(haGroupName)); + + Optional record = ClusterRoleRecord.fromJson(data); + return record.isPresent() && record.get().getRole(zkUrl) == ClusterRole.ACTIVE; + } catch (NoNodeException ne) { + LOG.info("No role record found for HA group {} on '{}', assuming it is not active", + haGroupName, zkUrl); + return false; + } catch (Exception e) { + LOG.warn("Got exception when reading record for {} on cluster {}", haGroupName, zkUrl, e); + return false; + } } /** - * Helper class to update cluster role record for a ZK cluster. - * - * The ZK client this accessor has is confined to a single ZK cluster, but it can be used to - * operate multiple HA groups that are associated with this cluster. + * This lists all cluster role records stored in the zookeeper nodes. This read-only operation + * and hence no side effect on the ZK cluster. */ - @VisibleForTesting - static class PhoenixHAAdminHelper implements Closeable { - /** The fully qualified ZK URL for an HBase cluster in format host:port:/hbase */ - private final String zkUrl; - /** Configuration of this command line tool. */ - private final Configuration conf; - /** Client properties which has copies of configuration defining ZK timeouts / retries. */ - private final Properties properties = new Properties(); - /** Curator Provider **/ - private final HighAvailibilityCuratorProvider highAvailibilityCuratorProvider; - - PhoenixHAAdminHelper(String zkUrl, Configuration conf, HighAvailibilityCuratorProvider highAvailibilityCuratorProvider) { - Preconditions.checkNotNull(zkUrl); - Preconditions.checkNotNull(conf); - Preconditions.checkNotNull(highAvailibilityCuratorProvider); - this.zkUrl = JDBCUtil.formatZookeeperUrl(zkUrl); - this.conf = conf; - conf.iterator().forEachRemaining(k -> properties.setProperty(k.getKey(), k.getValue())); - this.highAvailibilityCuratorProvider = highAvailibilityCuratorProvider; - } - - /** - * Gets curator from the cache if available otherwise calls into getCurator to make it. - */ - private CuratorFramework getCurator() throws IOException { - return highAvailibilityCuratorProvider.getCurator(zkUrl, properties); - } - - /** - * Check if current cluster is ACTIVE role for the given HA group. - * - * In case of Exception when it fails to read cluster role data from the current cluster, it - * will assume current cluster is not ACTIVE. Callers should be aware of "false positive" - * possibility especially due to connectivity issue between this tool and remote ZK cluster. - * - * @param haGroupName the HA group name; a cluster can be associated with multiple HA groups - * @return true if current cluster is ACTIVE role, otherwise false - */ - private boolean isCurrentActiveCluster(String haGroupName) { - try { - byte[] data = getCurator().getData().forPath(toPath(haGroupName)); - - Optional record = ClusterRoleRecord.fromJson(data); - return record.isPresent() && record.get().getRole(zkUrl) == ClusterRole.ACTIVE; - } catch (NoNodeException ne) { - LOG.info("No role record found for HA group {} on '{}', assuming it is not active", - haGroupName, zkUrl); - return false; - } catch (Exception e) { - LOG.warn("Got exception when reading record for {} on cluster {}", - haGroupName, zkUrl, e); - return false; - } + List listAllClusterRoleRecordsOnZookeeper() throws IOException { + List haGroupNames; + try { + haGroupNames = getCurator().getChildren().forPath(ZKPaths.PATH_SEPARATOR); + } catch (Exception e) { + String msg = String.format("Got exception when listing all HA groups in %s", zkUrl); + LOG.error(msg); + throw new IOException(msg, e); + } + + List records = new ArrayList<>(); + List failedHaGroups = new ArrayList<>(); + for (String haGroupName : haGroupNames) { + try { + byte[] data = getCurator().getData().forPath(ZKPaths.PATH_SEPARATOR + haGroupName); + Optional record = ClusterRoleRecord.fromJson(data); + if (record.isPresent()) { + records.add(record.get()); + } else { // fail to deserialize data from JSON + failedHaGroups.add(haGroupName); + } + } catch (Exception e) { + LOG.warn("Got exception when reading data for HA group {}", haGroupName, e); + failedHaGroups.add(haGroupName); } + } + + if (!failedHaGroups.isEmpty()) { + String msg = String.format( + "Found following HA groups: %s. Fail to read cluster " + + "role records for following HA groups: %s", + String.join(",", haGroupNames), String.join(",", failedHaGroups)); + LOG.error(msg); + throw new IOException(msg); + } + return records; + } - /** - * This lists all cluster role records stored in the zookeeper nodes. - * - * This read-only operation and hence no side effect on the ZK cluster. - */ - List listAllClusterRoleRecordsOnZookeeper() throws IOException { - List haGroupNames; - try { - haGroupNames = getCurator().getChildren().forPath(ZKPaths.PATH_SEPARATOR); - } catch (Exception e) { - String msg = String.format("Got exception when listing all HA groups in %s", zkUrl); - LOG.error(msg); - throw new IOException(msg, e); - } - - List records = new ArrayList<>(); - List failedHaGroups = new ArrayList<>(); - for (String haGroupName : haGroupNames) { - try { - byte[] data = getCurator().getData().forPath(ZKPaths.PATH_SEPARATOR + haGroupName); - Optional record = ClusterRoleRecord.fromJson(data); - if (record.isPresent()) { - records.add(record.get()); - } else { // fail to deserialize data from JSON - failedHaGroups.add(haGroupName); - } - } catch (Exception e) { - LOG.warn("Got exception when reading data for HA group {}", haGroupName, e); - failedHaGroups.add(haGroupName); - } - } - - if (!failedHaGroups.isEmpty()) { - String msg = String.format("Found following HA groups: %s. Fail to read cluster " - + "role records for following HA groups: %s", - String.join(",", haGroupNames), String.join(",", failedHaGroups)); - LOG.error(msg); - throw new IOException(msg); - } - return records; + /** + * Verify cluster role records stored in local ZK nodes, and repair with remote znodes for any + * inconsistency. + * @return a list of HA group names with inconsistent cluster role records, or empty list + */ + List verifyAndRepairWithRemoteZnode() throws Exception { + List inconsistentHaGroups = new ArrayList<>(); + for (ClusterRoleRecord record : listAllClusterRoleRecordsOnZookeeper()) { + // the remote znodes may be on different ZK clusters. + if (record.getRole(zkUrl) == ClusterRole.UNKNOWN) { + LOG.warn("Unknown cluster role for cluster '{}' in record {}", zkUrl, record); + continue; } - - /** - * Verify cluster role records stored in local ZK nodes, and repair with remote znodes for - * any inconsistency. - * - * @return a list of HA group names with inconsistent cluster role records, or empty list - */ - List verifyAndRepairWithRemoteZnode() throws Exception { - List inconsistentHaGroups = new ArrayList<>(); - for (ClusterRoleRecord record : listAllClusterRoleRecordsOnZookeeper()) { - // the remote znodes may be on different ZK clusters. - if (record.getRole(zkUrl) == ClusterRole.UNKNOWN) { - LOG.warn("Unknown cluster role for cluster '{}' in record {}", zkUrl, record); - continue; - } - String remoteZkUrl = record.getZk1().equals(zkUrl) - ? record.getZk2() - : record.getZk1(); - try (PhoenixHAAdminHelper remoteAdmin = new PhoenixHAAdminHelper(remoteZkUrl, conf, HighAvailibilityCuratorProvider.INSTANCE)) { - ClusterRoleRecord remoteRecord; - try { - String zPath = toPath(record.getHaGroupName()); - byte[] data = remoteAdmin.getCurator().getData().forPath(zPath); - Optional recordOptional = ClusterRoleRecord.fromJson(data); - if (!recordOptional.isPresent()) { - remoteAdmin.createOrUpdateDataOnZookeeper(record); - continue; - } - remoteRecord = recordOptional.get(); - } catch (NoNodeException ne) { - LOG.warn("No record znode yet, creating for HA group {} on {}", - record.getHaGroupName(), remoteAdmin); - remoteAdmin.createDataOnZookeeper(record); - LOG.info("Created znode on cluster {} with record {}", remoteAdmin, record); - continue; - } catch (Exception e) { - LOG.error("Error to get data on remote cluster {} for HA group {}", - remoteAdmin, record.getHaGroupName(), e); - continue; - } - - if (!record.getHaGroupName().equals(remoteRecord.getHaGroupName())) { - inconsistentHaGroups.add(record.getHaGroupName()); - LOG.error("INTERNAL ERROR: got cluster role record for different HA groups." - + " Local record: {}, remote record: {}", record, remoteRecord); - } else if (remoteRecord.isNewerThan(record)) { - createOrUpdateDataOnZookeeper(remoteRecord); - } else if (record.isNewerThan(remoteRecord)) { - remoteAdmin.createOrUpdateDataOnZookeeper(record); - } else if (record.equals(remoteRecord)) { - LOG.info("Cluster role record {} is consistent", record); - } else { - inconsistentHaGroups.add(record.getHaGroupName()); - LOG.error("Cluster role record for HA group {} is inconsistent. On cluster " - + "{} the record is {}; on cluster {} the record is {}", - record.getHaGroupName(), this, record, remoteAdmin, remoteRecord); - } - } + String remoteZkUrl = record.getZk1().equals(zkUrl) ? record.getZk2() : record.getZk1(); + try (PhoenixHAAdminHelper remoteAdmin = + new PhoenixHAAdminHelper(remoteZkUrl, conf, HighAvailibilityCuratorProvider.INSTANCE)) { + ClusterRoleRecord remoteRecord; + try { + String zPath = toPath(record.getHaGroupName()); + byte[] data = remoteAdmin.getCurator().getData().forPath(zPath); + Optional recordOptional = ClusterRoleRecord.fromJson(data); + if (!recordOptional.isPresent()) { + remoteAdmin.createOrUpdateDataOnZookeeper(record); + continue; } - return inconsistentHaGroups; + remoteRecord = recordOptional.get(); + } catch (NoNodeException ne) { + LOG.warn("No record znode yet, creating for HA group {} on {}", record.getHaGroupName(), + remoteAdmin); + remoteAdmin.createDataOnZookeeper(record); + LOG.info("Created znode on cluster {} with record {}", remoteAdmin, record); + continue; + } catch (Exception e) { + LOG.error("Error to get data on remote cluster {} for HA group {}", remoteAdmin, + record.getHaGroupName(), e); + continue; + } + + if (!record.getHaGroupName().equals(remoteRecord.getHaGroupName())) { + inconsistentHaGroups.add(record.getHaGroupName()); + LOG.error("INTERNAL ERROR: got cluster role record for different HA groups." + + " Local record: {}, remote record: {}", record, remoteRecord); + } else if (remoteRecord.isNewerThan(record)) { + createOrUpdateDataOnZookeeper(remoteRecord); + } else if (record.isNewerThan(remoteRecord)) { + remoteAdmin.createOrUpdateDataOnZookeeper(record); + } else if (record.equals(remoteRecord)) { + LOG.info("Cluster role record {} is consistent", record); + } else { + inconsistentHaGroups.add(record.getHaGroupName()); + LOG.error( + "Cluster role record for HA group {} is inconsistent. On cluster " + + "{} the record is {}; on cluster {} the record is {}", + record.getHaGroupName(), this, record, remoteAdmin, remoteRecord); + } } + } + return inconsistentHaGroups; + } - /** - * This updates the cluster role data on the zookeeper it connects to. - * - * To avoid conflicts, it does CAS (compare-and-set) when updating. The constraint is that - * the given record's version should be larger the existing record's version. This is a way - * to help avoiding manual update conflicts. If the given record can not meet version - * check, it will reject the update request and client (human operator or external system) - * should retry. - * - * @param record the new cluster role record to be saved on ZK - * @throws IOException if it fails to update the cluster role data on ZK - * @return true if the data on ZK is updated otherwise false - */ - boolean createOrUpdateDataOnZookeeper(ClusterRoleRecord record) throws IOException { - if (!zkUrl.equals(record.getZk1()) && !zkUrl.equals(record.getZk2())) { - String msg = String.format("INTERNAL ERROR: " - + "ZK cluster is not associated with cluster role record! " - + "ZK cluster URL: '%s'. Cluster role record: %s", - zkUrl, record); - LOG.error(msg); - throw new IOException(msg); - } - - String haGroupName = record.getHaGroupName(); - byte[] data; - try { - data = getCurator().getData().forPath(toPath(haGroupName)); // Get initial data - } catch (NoNodeException ne) { - LOG.info("No record znode yet, creating for HA group {} on {}", haGroupName, zkUrl); - createDataOnZookeeper(record); - LOG.info("Created znode for HA group {} with record data {} on {}", haGroupName, - record, zkUrl); - return true; - } catch (Exception e) { - String msg = String.format("Fail to read cluster role record data for HA group %s " - + "on cluster '%s'", haGroupName, zkUrl); - LOG.error(msg, e); - throw new IOException(msg, e); - } - - Optional existingRecordOptional = ClusterRoleRecord.fromJson(data); - if (!existingRecordOptional.isPresent()) { - String msg = String.format("Fail to parse existing cluster role record data for HA " - + "group %s", haGroupName); - LOG.error(msg); - throw new IOException(msg); - } - - ClusterRoleRecord existingRecord = existingRecordOptional.get(); - if (record.getVersion() < existingRecord.getVersion()) { - String msg = String.format("Invalid new cluster role record for HA group '%s' " - + "because new record's version V%d is smaller than existing V%d. " - + "Existing role record: %s. New role record fail to save: %s", - haGroupName, record.getVersion(), existingRecord.getVersion(), - existingRecord, record); - LOG.warn(msg); - return false; // return instead of error out to tolerate - } - - if (record.getVersion() == existingRecord.getVersion()) { - if (record.equals(existingRecord)) { - LOG.debug("Cluster role does not change since last update on ZK."); - return false; // no need to update iff they are the same. - } else { - String msg = String.format("Invalid new cluster role record for HA group '%s' " - + "because it has the same version V%d but inconsistent data. " - + "Existing role record: %s. New role record fail to save: %s", - haGroupName, record.getVersion(), existingRecord, record); - LOG.error(msg); - throw new IOException(msg); - } - } - - return updateDataOnZookeeper(existingRecord, record); + /** + * This updates the cluster role data on the zookeeper it connects to. To avoid conflicts, it + * does CAS (compare-and-set) when updating. The constraint is that the given record's version + * should be larger the existing record's version. This is a way to help avoiding manual update + * conflicts. If the given record can not meet version check, it will reject the update request + * and client (human operator or external system) should retry. + * @param record the new cluster role record to be saved on ZK + * @throws IOException if it fails to update the cluster role data on ZK + * @return true if the data on ZK is updated otherwise false + */ + boolean createOrUpdateDataOnZookeeper(ClusterRoleRecord record) throws IOException { + if (!zkUrl.equals(record.getZk1()) && !zkUrl.equals(record.getZk2())) { + String msg = String + .format("INTERNAL ERROR: " + "ZK cluster is not associated with cluster role record! " + + "ZK cluster URL: '%s'. Cluster role record: %s", zkUrl, record); + LOG.error(msg); + throw new IOException(msg); + } + + String haGroupName = record.getHaGroupName(); + byte[] data; + try { + data = getCurator().getData().forPath(toPath(haGroupName)); // Get initial data + } catch (NoNodeException ne) { + LOG.info("No record znode yet, creating for HA group {} on {}", haGroupName, zkUrl); + createDataOnZookeeper(record); + LOG.info("Created znode for HA group {} with record data {} on {}", haGroupName, record, + zkUrl); + return true; + } catch (Exception e) { + String msg = String.format( + "Fail to read cluster role record data for HA group %s " + "on cluster '%s'", haGroupName, + zkUrl); + LOG.error(msg, e); + throw new IOException(msg, e); + } + + Optional existingRecordOptional = ClusterRoleRecord.fromJson(data); + if (!existingRecordOptional.isPresent()) { + String msg = String.format( + "Fail to parse existing cluster role record data for HA " + "group %s", haGroupName); + LOG.error(msg); + throw new IOException(msg); + } + + ClusterRoleRecord existingRecord = existingRecordOptional.get(); + if (record.getVersion() < existingRecord.getVersion()) { + String msg = String.format( + "Invalid new cluster role record for HA group '%s' " + + "because new record's version V%d is smaller than existing V%d. " + + "Existing role record: %s. New role record fail to save: %s", + haGroupName, record.getVersion(), existingRecord.getVersion(), existingRecord, record); + LOG.warn(msg); + return false; // return instead of error out to tolerate + } + + if (record.getVersion() == existingRecord.getVersion()) { + if (record.equals(existingRecord)) { + LOG.debug("Cluster role does not change since last update on ZK."); + return false; // no need to update iff they are the same. + } else { + String msg = String.format( + "Invalid new cluster role record for HA group '%s' " + + "because it has the same version V%d but inconsistent data. " + + "Existing role record: %s. New role record fail to save: %s", + haGroupName, record.getVersion(), existingRecord, record); + LOG.error(msg); + throw new IOException(msg); } + } - /** - * Helper to create the znode on the ZK cluster. - */ - private void createDataOnZookeeper(ClusterRoleRecord record) throws IOException { - String haGroupName = record.getHaGroupName(); - // znode path for given haGroup name assuming namespace (prefix) has been set. - String haGroupPath = toPath(haGroupName); - try { - getCurator().create() - .creatingParentsIfNeeded() - .withMode(CreateMode.PERSISTENT) - .forPath(haGroupPath, ClusterRoleRecord.toJson(record)); - } catch (NodeExistsException nee) { - //this method assumes that the znode doesn't exist yet, but it could have been - //created between now and the last time we checked. We swallow the exception and - //rely on our caller to check to make sure the znode that's saved is correct - LOG.warn("Znode for HA group {} already exists. ", - haGroupPath, nee); - } catch (Exception e) { - LOG.error("Fail to initialize the znode for HA group {} with record data {}", - haGroupPath, record, e); - throw new IOException("Fail to initialize znode for HA group " + haGroupPath, e); - } - } + return updateDataOnZookeeper(existingRecord, record); + } - /** - * Helper to update the znode on ZK cluster assuming current data is the given old record. - */ - private boolean updateDataOnZookeeper(ClusterRoleRecord oldRecord, - ClusterRoleRecord newRecord) throws IOException { - // znode path for given haGroup name assuming namespace (prefix) has been set. - String haGroupPath = toPath(newRecord.getHaGroupName()); - RetryPolicy retryPolicy = HighAvailabilityGroup.createRetryPolicy(properties); - try { - DistributedAtomicValue v = new DistributedAtomicValue(getCurator(), haGroupPath, retryPolicy); - AtomicValue result = v.compareAndSet( - ClusterRoleRecord.toJson(oldRecord), ClusterRoleRecord.toJson(newRecord)); - LOG.info("Updated cluster role record ({}->{}) for HA group {} on cluster '{}': {}", - oldRecord.getVersion(), newRecord.getVersion(), newRecord.getHaGroupName(), - zkUrl, result.succeeded() ? "succeeded" : "failed"); - LOG.debug("Old DistributedAtomicValue: {}, New DistributedAtomicValue: {},", - new String(result.preValue(), StandardCharsets.UTF_8), - new String(result.postValue(), StandardCharsets.UTF_8)); - return result.succeeded(); - } catch (Exception e) { - String msg = String.format("Fail to update cluster role record to ZK for the HA " - + "group %s due to '%s'." - + "Existing role record: %s. New role record fail to save: %s", - haGroupPath, e.getMessage(), oldRecord, newRecord); - LOG.error(msg, e); - throw new IOException(msg, e); - } - } + /** + * Helper to create the znode on the ZK cluster. + */ + private void createDataOnZookeeper(ClusterRoleRecord record) throws IOException { + String haGroupName = record.getHaGroupName(); + // znode path for given haGroup name assuming namespace (prefix) has been set. + String haGroupPath = toPath(haGroupName); + try { + getCurator().create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT) + .forPath(haGroupPath, ClusterRoleRecord.toJson(record)); + } catch (NodeExistsException nee) { + // this method assumes that the znode doesn't exist yet, but it could have been + // created between now and the last time we checked. We swallow the exception and + // rely on our caller to check to make sure the znode that's saved is correct + LOG.warn("Znode for HA group {} already exists. ", haGroupPath, nee); + } catch (Exception e) { + LOG.error("Fail to initialize the znode for HA group {} with record data {}", haGroupPath, + record, e); + throw new IOException("Fail to initialize znode for HA group " + haGroupPath, e); + } + } - /** - * Helper method to get ZK path for an HA group given the HA group name. - * - * It assumes the ZK namespace (prefix) has been set. - */ - private static String toPath(String haGroupName) { - return ZKPaths.PATH_SEPARATOR + haGroupName; - } + /** + * Helper to update the znode on ZK cluster assuming current data is the given old record. + */ + private boolean updateDataOnZookeeper(ClusterRoleRecord oldRecord, ClusterRoleRecord newRecord) + throws IOException { + // znode path for given haGroup name assuming namespace (prefix) has been set. + String haGroupPath = toPath(newRecord.getHaGroupName()); + RetryPolicy retryPolicy = HighAvailabilityGroup.createRetryPolicy(properties); + try { + DistributedAtomicValue v = + new DistributedAtomicValue(getCurator(), haGroupPath, retryPolicy); + AtomicValue result = + v.compareAndSet(ClusterRoleRecord.toJson(oldRecord), ClusterRoleRecord.toJson(newRecord)); + LOG.info("Updated cluster role record ({}->{}) for HA group {} on cluster '{}': {}", + oldRecord.getVersion(), newRecord.getVersion(), newRecord.getHaGroupName(), zkUrl, + result.succeeded() ? "succeeded" : "failed"); + LOG.debug("Old DistributedAtomicValue: {}, New DistributedAtomicValue: {},", + new String(result.preValue(), StandardCharsets.UTF_8), + new String(result.postValue(), StandardCharsets.UTF_8)); + return result.succeeded(); + } catch (Exception e) { + String msg = String.format( + "Fail to update cluster role record to ZK for the HA " + "group %s due to '%s'." + + "Existing role record: %s. New role record fail to save: %s", + haGroupPath, e.getMessage(), oldRecord, newRecord); + LOG.error(msg, e); + throw new IOException(msg, e); + } + } - String getZkUrl() { - return zkUrl; - } + /** + * Helper method to get ZK path for an HA group given the HA group name. It assumes the ZK + * namespace (prefix) has been set. + */ + private static String toPath(String haGroupName) { + return ZKPaths.PATH_SEPARATOR + haGroupName; + } - @Override - public void close() { - LOG.debug("PhoenixHAAdmin for {} is now closed.", zkUrl); - } + String getZkUrl() { + return zkUrl; + } - @Override - public String toString() { - return zkUrl; - } + @Override + public void close() { + LOG.debug("PhoenixHAAdmin for {} is now closed.", zkUrl); } - public static void main(String[] args) throws Exception { - Configuration conf = HBaseConfiguration.create(); - int retCode = ToolRunner.run(conf, new PhoenixHAAdminTool(), args); - System.exit(retCode); + @Override + public String toString() { + return zkUrl; } -} \ No newline at end of file + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + int retCode = ToolRunner.run(conf, new PhoenixHAAdminTool(), args); + System.exit(retCode); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAExecutorServiceProvider.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAExecutorServiceProvider.java index 0e45db0d55e..e121b69f02e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAExecutorServiceProvider.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAExecutorServiceProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,17 +40,16 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; + import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.phoenix.monitoring.GlobalClientMetrics; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; - -import javax.annotation.Nullable; - /** * Provides a bounded and configurable executor service for {@link ParallelPhoenixConnection} and * related infra. Provides a lazily initialized singleton executor service to be used for all @@ -59,303 +58,288 @@ */ public class PhoenixHAExecutorServiceProvider { - public static final String HA_MAX_POOL_SIZE = "phoenix.ha.max.pool.size"; - public static final String DEFAULT_HA_MAX_POOL_SIZE = "30"; - public static final String HA_MAX_QUEUE_SIZE = "phoenix.ha.max.queue.size"; - public static final String DEFAULT_HA_MAX_QUEUE_SIZE = "300"; - public static final String HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD = - "phoenix.ha.threadpool.queue.backoff.threshold"; - public static final String DEFAULT_HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD = "0.9"; - - public static final String HA_CLOSE_MAX_POOL_SIZE = "phoenix.ha.close.max.pool.size"; - public static final String DEFAULT_HA_CLOSE_MAX_POOL_SIZE = "15"; - public static final String HA_CLOSE_MAX_QUEUE_SIZE = "phoenix.ha.close.max.queue.size"; - public static final String DEFAULT_HA_CLOSE_MAX_QUEUE_SIZE = "150"; - - private static final Logger LOGGER = - LoggerFactory.getLogger(PhoenixHAExecutorServiceProvider.class); - - // Can make configurable if needed - private static final int KEEP_ALIVE_TIME_SECONDS = 120; - - private static volatile List INSTANCE = null; - - /** - * pojo for holding an execution and a close executorService - */ - public static class PhoenixHAClusterExecutorServices { - private final ExecutorService executorService; - private final ExecutorService closeExecutorService; - - PhoenixHAClusterExecutorServices(ExecutorService executorService, ExecutorService closeExecutorService) { - this.executorService = executorService; - this.closeExecutorService = closeExecutorService; - } - - public ExecutorService getExecutorService() { - return executorService; - } - - public ExecutorService getCloseExecutorService() { - return closeExecutorService; - } + public static final String HA_MAX_POOL_SIZE = "phoenix.ha.max.pool.size"; + public static final String DEFAULT_HA_MAX_POOL_SIZE = "30"; + public static final String HA_MAX_QUEUE_SIZE = "phoenix.ha.max.queue.size"; + public static final String DEFAULT_HA_MAX_QUEUE_SIZE = "300"; + public static final String HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD = + "phoenix.ha.threadpool.queue.backoff.threshold"; + public static final String DEFAULT_HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD = "0.9"; + + public static final String HA_CLOSE_MAX_POOL_SIZE = "phoenix.ha.close.max.pool.size"; + public static final String DEFAULT_HA_CLOSE_MAX_POOL_SIZE = "15"; + public static final String HA_CLOSE_MAX_QUEUE_SIZE = "phoenix.ha.close.max.queue.size"; + public static final String DEFAULT_HA_CLOSE_MAX_QUEUE_SIZE = "150"; + + private static final Logger LOGGER = + LoggerFactory.getLogger(PhoenixHAExecutorServiceProvider.class); + + // Can make configurable if needed + private static final int KEEP_ALIVE_TIME_SECONDS = 120; + + private static volatile List INSTANCE = null; + + /** + * pojo for holding an execution and a close executorService + */ + public static class PhoenixHAClusterExecutorServices { + private final ExecutorService executorService; + private final ExecutorService closeExecutorService; + + PhoenixHAClusterExecutorServices(ExecutorService executorService, + ExecutorService closeExecutorService) { + this.executorService = executorService; + this.closeExecutorService = closeExecutorService; } - private PhoenixHAExecutorServiceProvider() { + public ExecutorService getExecutorService() { + return executorService; } - public static List get(Properties properties) { - if (INSTANCE == null) { - synchronized (PhoenixHAExecutorServiceProvider.class) { - if (INSTANCE == null) { - INSTANCE = initThreadPool(properties); - } - } - } - return INSTANCE; + public ExecutorService getCloseExecutorService() { + return closeExecutorService; } + } - @VisibleForTesting - static synchronized void resetExecutor() { - INSTANCE = null; - } + private PhoenixHAExecutorServiceProvider() { + } - /** - * Checks if the underlying executorServices have sufficient available capacity based on - * {@link #HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD}. Monitors the capacity of the blockingqueues - * linked with the executor services. - * - * @param properties phoenix properties - * @return true if queue is less than {@link #HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD} full - */ - public static List hasCapacity(Properties properties) { + public static List get(Properties properties) { + if (INSTANCE == null) { + synchronized (PhoenixHAExecutorServiceProvider.class) { if (INSTANCE == null) { - return ImmutableList.of(Boolean.TRUE, Boolean.TRUE); - } - double backoffThreshold = - Double.parseDouble(properties.getProperty(HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD, - DEFAULT_HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD)); - int i = 0; - List executorCapacities = new ArrayList<>(); - List executorServicesList = ImmutableList.of(INSTANCE.get(0).getExecutorService(), INSTANCE.get(1).getExecutorService()); - for (ExecutorService executor : executorServicesList) { - double queueSize = ((ThreadPoolExecutor) executor).getQueue().size(); - double queueRemainingCapacity = - ((ThreadPoolExecutor) executor).getQueue().remainingCapacity(); - double queueCapacity = queueSize + queueRemainingCapacity; - boolean hasCapacity = ((queueSize / queueCapacity) < backoffThreshold); - if (!hasCapacity) { - LOGGER.warn( - "PhoenixHAExecutorServiceProvider ThreadPoolExecutor[" + i + "] hasCapacity: false queueSize:" + queueSize + " queueCapacity:" - + queueCapacity + " backoffThreshold:" + backoffThreshold); - } - i++; - executorCapacities.add(hasCapacity); + INSTANCE = initThreadPool(properties); } - return executorCapacities; + } } - - // We need a threadPool that increases the number of threads for incoming tasks first rather - // than the default - // behavior of filling the queue first, hence we have the corePoolSize and maxPoolSize as same - private static List initThreadPool(Properties properties) { - int maxPoolSize = - Integer.parseInt( - properties.getProperty(HA_MAX_POOL_SIZE, DEFAULT_HA_MAX_POOL_SIZE)); - int maxQueueSize = - Integer.parseInt( - properties.getProperty(HA_MAX_QUEUE_SIZE, DEFAULT_HA_MAX_QUEUE_SIZE)); - ThreadPoolExecutor pool1 = - createThreadPool(maxPoolSize, maxQueueSize, "phoenixha1", getGlobalExecutorMetricsForPool1() - ); - ThreadPoolExecutor pool2 = - createThreadPool(maxPoolSize, maxQueueSize, "phoenixha2", getGlobalExecutorMetricsForPool2() - ); - - //Make the close executor services - maxPoolSize = - Integer.parseInt( - properties.getProperty(HA_CLOSE_MAX_POOL_SIZE, DEFAULT_HA_CLOSE_MAX_POOL_SIZE)); - maxQueueSize = - Integer.parseInt( - properties.getProperty(HA_CLOSE_MAX_QUEUE_SIZE, DEFAULT_HA_CLOSE_MAX_QUEUE_SIZE)); - - ThreadPoolExecutor closePool1 = - createThreadPool(maxPoolSize, maxQueueSize, "phoenixha1close"); - ThreadPoolExecutor closePool2 = - createThreadPool(maxPoolSize, maxQueueSize, "phoenixha2close"); - closePool1.allowCoreThreadTimeOut(true); - closePool2.allowCoreThreadTimeOut(true); - - return ImmutableList.of(new PhoenixHAClusterExecutorServices(pool1, closePool1), new PhoenixHAClusterExecutorServices(pool2, closePool2)); + return INSTANCE; + } + + @VisibleForTesting + static synchronized void resetExecutor() { + INSTANCE = null; + } + + /** + * Checks if the underlying executorServices have sufficient available capacity based on + * {@link #HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD}. Monitors the capacity of the blockingqueues + * linked with the executor services. + * @param properties phoenix properties + * @return true if queue is less than {@link #HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD} full + */ + public static List hasCapacity(Properties properties) { + if (INSTANCE == null) { + return ImmutableList.of(Boolean.TRUE, Boolean.TRUE); } - - private static ThreadPoolExecutor createThreadPool(int maxPoolSize, int maxQueueSize, String threadPoolNamePrefix) { - return createThreadPool(maxPoolSize, maxQueueSize, threadPoolNamePrefix, null); + double backoffThreshold = + Double.parseDouble(properties.getProperty(HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD, + DEFAULT_HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD)); + int i = 0; + List executorCapacities = new ArrayList<>(); + List executorServicesList = + ImmutableList.of(INSTANCE.get(0).getExecutorService(), INSTANCE.get(1).getExecutorService()); + for (ExecutorService executor : executorServicesList) { + double queueSize = ((ThreadPoolExecutor) executor).getQueue().size(); + double queueRemainingCapacity = + ((ThreadPoolExecutor) executor).getQueue().remainingCapacity(); + double queueCapacity = queueSize + queueRemainingCapacity; + boolean hasCapacity = ((queueSize / queueCapacity) < backoffThreshold); + if (!hasCapacity) { + LOGGER.warn("PhoenixHAExecutorServiceProvider ThreadPoolExecutor[" + i + + "] hasCapacity: false queueSize:" + queueSize + " queueCapacity:" + queueCapacity + + " backoffThreshold:" + backoffThreshold); + } + i++; + executorCapacities.add(hasCapacity); } - - private static ThreadPoolExecutor createThreadPool(int maxPoolSize, int maxQueueSize, String threadPoolNamePrefix, - @Nullable GlobalExecutorMetrics metrics) { - BlockingQueue queue = new LinkedBlockingQueue<>(maxQueueSize); - RejectedExecutionHandler handler; - if (metrics != null) { - handler = new MonitoredCallerRunsPolicy(threadPoolNamePrefix, metrics); - } else { - handler = new ThreadPoolExecutor.CallerRunsPolicy(); - } - ThreadPoolExecutor pool = - new PhoenixHAThreadPoolExecutor(maxPoolSize, maxPoolSize, KEEP_ALIVE_TIME_SECONDS, - TimeUnit.SECONDS, queue, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadPoolNamePrefix + "-%d").build(), - handler, metrics); - pool.allowCoreThreadTimeOut(true); - return pool; + return executorCapacities; + } + + // We need a threadPool that increases the number of threads for incoming tasks first rather + // than the default + // behavior of filling the queue first, hence we have the corePoolSize and maxPoolSize as same + private static List initThreadPool(Properties properties) { + int maxPoolSize = + Integer.parseInt(properties.getProperty(HA_MAX_POOL_SIZE, DEFAULT_HA_MAX_POOL_SIZE)); + int maxQueueSize = + Integer.parseInt(properties.getProperty(HA_MAX_QUEUE_SIZE, DEFAULT_HA_MAX_QUEUE_SIZE)); + ThreadPoolExecutor pool1 = + createThreadPool(maxPoolSize, maxQueueSize, "phoenixha1", getGlobalExecutorMetricsForPool1()); + ThreadPoolExecutor pool2 = + createThreadPool(maxPoolSize, maxQueueSize, "phoenixha2", getGlobalExecutorMetricsForPool2()); + + // Make the close executor services + maxPoolSize = Integer + .parseInt(properties.getProperty(HA_CLOSE_MAX_POOL_SIZE, DEFAULT_HA_CLOSE_MAX_POOL_SIZE)); + maxQueueSize = Integer + .parseInt(properties.getProperty(HA_CLOSE_MAX_QUEUE_SIZE, DEFAULT_HA_CLOSE_MAX_QUEUE_SIZE)); + + ThreadPoolExecutor closePool1 = createThreadPool(maxPoolSize, maxQueueSize, "phoenixha1close"); + ThreadPoolExecutor closePool2 = createThreadPool(maxPoolSize, maxQueueSize, "phoenixha2close"); + closePool1.allowCoreThreadTimeOut(true); + closePool2.allowCoreThreadTimeOut(true); + + return ImmutableList.of(new PhoenixHAClusterExecutorServices(pool1, closePool1), + new PhoenixHAClusterExecutorServices(pool2, closePool2)); + } + + private static ThreadPoolExecutor createThreadPool(int maxPoolSize, int maxQueueSize, + String threadPoolNamePrefix) { + return createThreadPool(maxPoolSize, maxQueueSize, threadPoolNamePrefix, null); + } + + private static ThreadPoolExecutor createThreadPool(int maxPoolSize, int maxQueueSize, + String threadPoolNamePrefix, @Nullable GlobalExecutorMetrics metrics) { + BlockingQueue queue = new LinkedBlockingQueue<>(maxQueueSize); + RejectedExecutionHandler handler; + if (metrics != null) { + handler = new MonitoredCallerRunsPolicy(threadPoolNamePrefix, metrics); + } else { + handler = new ThreadPoolExecutor.CallerRunsPolicy(); } - - private static GlobalExecutorMetrics getGlobalExecutorMetricsForPool1() { - return new GlobalExecutorMetrics(GLOBAL_HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER, - GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER, - GLOBAL_HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME, - GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTION_TIME, - GLOBAL_HA_PARALLEL_POOL1_TASK_END_TO_END_TIME); + ThreadPoolExecutor pool = new PhoenixHAThreadPoolExecutor(maxPoolSize, maxPoolSize, + KEEP_ALIVE_TIME_SECONDS, TimeUnit.SECONDS, queue, new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat(threadPoolNamePrefix + "-%d").build(), + handler, metrics); + pool.allowCoreThreadTimeOut(true); + return pool; + } + + private static GlobalExecutorMetrics getGlobalExecutorMetricsForPool1() { + return new GlobalExecutorMetrics(GLOBAL_HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER, + GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER, GLOBAL_HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME, + GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTION_TIME, GLOBAL_HA_PARALLEL_POOL1_TASK_END_TO_END_TIME); + } + + private static GlobalExecutorMetrics getGlobalExecutorMetricsForPool2() { + return new GlobalExecutorMetrics(GLOBAL_HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER, + GLOBAL_HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER, GLOBAL_HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME, + GLOBAL_HA_PARALLEL_POOL2_TASK_EXECUTION_TIME, GLOBAL_HA_PARALLEL_POOL2_TASK_END_TO_END_TIME); + } + + private static class MonitoredCallerRunsPolicy extends ThreadPoolExecutor.CallerRunsPolicy { + private final String threadPoolName; + private final GlobalExecutorMetrics metrics; + + public MonitoredCallerRunsPolicy(String threadPoolName, GlobalExecutorMetrics metrics) { + this.threadPoolName = threadPoolName; + this.metrics = metrics; } - private static GlobalExecutorMetrics getGlobalExecutorMetricsForPool2() { - return new GlobalExecutorMetrics(GLOBAL_HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER, - GLOBAL_HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER, - GLOBAL_HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME, - GLOBAL_HA_PARALLEL_POOL2_TASK_EXECUTION_TIME, - GLOBAL_HA_PARALLEL_POOL2_TASK_END_TO_END_TIME); + @Override + public void rejectedExecution(Runnable r, ThreadPoolExecutor e) { + super.rejectedExecution(r, e); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Task was rejected by " + threadPoolName + " and executed in caller's thread"); + } + metrics.getTaskRejectedCounter().increment(); } + } - private static class MonitoredCallerRunsPolicy extends ThreadPoolExecutor.CallerRunsPolicy { - private final String threadPoolName; - private final GlobalExecutorMetrics metrics; + // Executor with monitoring + private static class PhoenixHAThreadPoolExecutor extends ThreadPoolExecutor { - public MonitoredCallerRunsPolicy(String threadPoolName, GlobalExecutorMetrics metrics) { - this.threadPoolName = threadPoolName; - this.metrics = metrics; - } + private final GlobalExecutorMetrics metrics; - @Override - public void rejectedExecution(Runnable r, ThreadPoolExecutor e) { - super.rejectedExecution(r, e); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace( - "Task was rejected by " + threadPoolName + " and executed in caller's thread"); - } - metrics.getTaskRejectedCounter().increment(); - } + public PhoenixHAThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, + TimeUnit unit, BlockingQueue workQueue, ThreadFactory threadFactory, + RejectedExecutionHandler handler, GlobalExecutorMetrics metrics) { + super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler); + this.metrics = metrics; } - // Executor with monitoring - private static class PhoenixHAThreadPoolExecutor extends ThreadPoolExecutor { - - private final GlobalExecutorMetrics metrics; - - public PhoenixHAThreadPoolExecutor(int corePoolSize, int maximumPoolSize, - long keepAliveTime, TimeUnit unit, BlockingQueue workQueue, - ThreadFactory threadFactory, RejectedExecutionHandler handler, - GlobalExecutorMetrics metrics) { - super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, - handler); - this.metrics = metrics; - } - - @Override - public void execute(Runnable r) { - if (metrics != null) { - super.execute(new MonitoredRunnable<>(r, null)); - metrics.getTaskExecutedCounter().increment(); - } else { - super.execute(r); - } - } + @Override + public void execute(Runnable r) { + if (metrics != null) { + super.execute(new MonitoredRunnable<>(r, null)); + metrics.getTaskExecutedCounter().increment(); + } else { + super.execute(r); + } + } - @Override - protected void beforeExecute(Thread t, Runnable r) { - if (r instanceof MonitoredRunnable && metrics != null) { - MonitoredRunnable mr = (MonitoredRunnable) r; - mr.taskBeginTime = EnvironmentEdgeManager.currentTime(); - long taskQueueWaitTime = mr.taskBeginTime - mr.taskSubmitTime; - metrics.getTaskQueueWaitTime().update(taskQueueWaitTime); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace( - String.format("%s waited %d ms", mr.toString(), taskQueueWaitTime)); - } - } - super.beforeExecute(t, r); + @Override + protected void beforeExecute(Thread t, Runnable r) { + if (r instanceof MonitoredRunnable && metrics != null) { + MonitoredRunnable mr = (MonitoredRunnable) r; + mr.taskBeginTime = EnvironmentEdgeManager.currentTime(); + long taskQueueWaitTime = mr.taskBeginTime - mr.taskSubmitTime; + metrics.getTaskQueueWaitTime().update(taskQueueWaitTime); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("%s waited %d ms", mr.toString(), taskQueueWaitTime)); } + } + super.beforeExecute(t, r); + } - @Override - protected void afterExecute(Runnable r, Throwable t) { - try { - super.afterExecute(r, t); - } finally { - if (r instanceof MonitoredRunnable && metrics != null) { - MonitoredRunnable mr = (MonitoredRunnable) r; - mr.taskEndTime = EnvironmentEdgeManager.currentTime(); - long taskExecutionTime = mr.taskEndTime - mr.taskBeginTime; - long taskEndToEndTime = mr.taskEndTime - mr.taskSubmitTime; - metrics.getTaskExecutionTime().update(taskExecutionTime); - metrics.getTaskEndToEndCounter().update(taskEndToEndTime); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace( - String.format("%s executed in %d ms with end to end time of %d", - mr.toString(), taskExecutionTime, taskEndToEndTime)); - } - } - } + @Override + protected void afterExecute(Runnable r, Throwable t) { + try { + super.afterExecute(r, t); + } finally { + if (r instanceof MonitoredRunnable && metrics != null) { + MonitoredRunnable mr = (MonitoredRunnable) r; + mr.taskEndTime = EnvironmentEdgeManager.currentTime(); + long taskExecutionTime = mr.taskEndTime - mr.taskBeginTime; + long taskEndToEndTime = mr.taskEndTime - mr.taskSubmitTime; + metrics.getTaskExecutionTime().update(taskExecutionTime); + metrics.getTaskEndToEndCounter().update(taskEndToEndTime); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("%s executed in %d ms with end to end time of %d", + mr.toString(), taskExecutionTime, taskEndToEndTime)); + } } + } } + } - private static class MonitoredRunnable extends FutureTask { - private final long taskSubmitTime; - private long taskBeginTime; - private long taskEndTime; + private static class MonitoredRunnable extends FutureTask { + private final long taskSubmitTime; + private long taskBeginTime; + private long taskEndTime; - public MonitoredRunnable(Runnable runnable, V result) { - super(runnable, result); - this.taskSubmitTime = EnvironmentEdgeManager.currentTime(); - } + public MonitoredRunnable(Runnable runnable, V result) { + super(runnable, result); + this.taskSubmitTime = EnvironmentEdgeManager.currentTime(); + } + } + + private static class GlobalExecutorMetrics { + + private final GlobalClientMetrics taskRejectedCounter; + private final GlobalClientMetrics taskExecutedCounter; + private final GlobalClientMetrics taskQueueWaitTime; + private final GlobalClientMetrics taskExecutionTime; + private final GlobalClientMetrics taskEndToEndCounter; + + public GlobalExecutorMetrics(GlobalClientMetrics taskRejectedCounter, + GlobalClientMetrics taskExecutedCounter, GlobalClientMetrics taskQueueWaitTime, + GlobalClientMetrics taskExecutionTime, GlobalClientMetrics taskEndToEndCounter) { + this.taskRejectedCounter = taskRejectedCounter; + this.taskExecutedCounter = taskExecutedCounter; + this.taskQueueWaitTime = taskQueueWaitTime; + this.taskExecutionTime = taskExecutionTime; + this.taskEndToEndCounter = taskEndToEndCounter; } - private static class GlobalExecutorMetrics { - - private final GlobalClientMetrics taskRejectedCounter; - private final GlobalClientMetrics taskExecutedCounter; - private final GlobalClientMetrics taskQueueWaitTime; - private final GlobalClientMetrics taskExecutionTime; - private final GlobalClientMetrics taskEndToEndCounter; - - public GlobalExecutorMetrics(GlobalClientMetrics taskRejectedCounter, - GlobalClientMetrics taskExecutedCounter, GlobalClientMetrics taskQueueWaitTime, - GlobalClientMetrics taskExecutionTime, GlobalClientMetrics taskEndToEndCounter) { - this.taskRejectedCounter = taskRejectedCounter; - this.taskExecutedCounter = taskExecutedCounter; - this.taskQueueWaitTime = taskQueueWaitTime; - this.taskExecutionTime = taskExecutionTime; - this.taskEndToEndCounter = taskEndToEndCounter; - } - - GlobalClientMetrics getTaskRejectedCounter() { - return taskRejectedCounter; - } + GlobalClientMetrics getTaskRejectedCounter() { + return taskRejectedCounter; + } - GlobalClientMetrics getTaskExecutedCounter() { - return taskExecutedCounter; - } + GlobalClientMetrics getTaskExecutedCounter() { + return taskExecutedCounter; + } - GlobalClientMetrics getTaskQueueWaitTime() { - return taskQueueWaitTime; - } + GlobalClientMetrics getTaskQueueWaitTime() { + return taskQueueWaitTime; + } - GlobalClientMetrics getTaskExecutionTime() { - return taskExecutionTime; - } + GlobalClientMetrics getTaskExecutionTime() { + return taskExecutionTime; + } - GlobalClientMetrics getTaskEndToEndCounter() { - return taskEndToEndCounter; - } + GlobalClientMetrics getTaskEndToEndCounter() { + return taskEndToEndCounter; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAGroupMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAGroupMetrics.java index 44854129d93..33a4e7f7dfa 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAGroupMetrics.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixHAGroupMetrics.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,15 +17,6 @@ */ package org.apache.phoenix.jdbc; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.monitoring.AtomicMetric; -import org.apache.phoenix.monitoring.Metric; -import org.apache.phoenix.monitoring.MetricType; - -import java.util.EnumMap; -import java.util.List; -import java.util.Map; - import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_FAILED_OPERATIONS_ACTIVE_CLUSTER; import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_FAILED_OPERATIONS_STANDBY_CLUSTER; import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_OPERATIONS_ACTIVE_CLUSTER; @@ -33,52 +24,59 @@ import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_USED_OPERATIONS_ACTIVE_CLUSTER; import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_COUNT_USED_OPERATIONS_STANDBY_CLUSTER; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; + +import org.apache.phoenix.monitoring.AtomicMetric; +import org.apache.phoenix.monitoring.Metric; +import org.apache.phoenix.monitoring.MetricType; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; + public class PhoenixHAGroupMetrics implements PhoenixMetricsHolder { - public enum HAMetricType { - HA_PARALLEL_COUNT_FAILED_OPERATIONS(ImmutableList.of( - HA_PARALLEL_COUNT_FAILED_OPERATIONS_ACTIVE_CLUSTER, - HA_PARALLEL_COUNT_FAILED_OPERATIONS_STANDBY_CLUSTER)), - HA_PARALLEL_USED_OPERATIONS(ImmutableList.of( - HA_PARALLEL_COUNT_USED_OPERATIONS_ACTIVE_CLUSTER, - HA_PARALLEL_COUNT_USED_OPERATIONS_STANDBY_CLUSTER)), - HA_PARALLEL_COUNT_OPERATIONS(ImmutableList.of( - HA_PARALLEL_COUNT_OPERATIONS_ACTIVE_CLUSTER, - HA_PARALLEL_COUNT_OPERATIONS_STANDBY_CLUSTER)); + public enum HAMetricType { + HA_PARALLEL_COUNT_FAILED_OPERATIONS( + ImmutableList.of(HA_PARALLEL_COUNT_FAILED_OPERATIONS_ACTIVE_CLUSTER, + HA_PARALLEL_COUNT_FAILED_OPERATIONS_STANDBY_CLUSTER)), + HA_PARALLEL_USED_OPERATIONS(ImmutableList.of(HA_PARALLEL_COUNT_USED_OPERATIONS_ACTIVE_CLUSTER, + HA_PARALLEL_COUNT_USED_OPERATIONS_STANDBY_CLUSTER)), + HA_PARALLEL_COUNT_OPERATIONS(ImmutableList.of(HA_PARALLEL_COUNT_OPERATIONS_ACTIVE_CLUSTER, + HA_PARALLEL_COUNT_OPERATIONS_STANDBY_CLUSTER)); - private final List metrics; + private final List metrics; - HAMetricType(List metrics) { - this.metrics = metrics; - } + HAMetricType(List metrics) { + this.metrics = metrics; } + } - protected EnumMap map = new EnumMap<>(MetricType.class); + protected EnumMap map = new EnumMap<>(MetricType.class); - protected PhoenixHAGroupMetrics(List types) { - for (HAMetricType type : types) { - for (MetricType metricType : type.metrics) { - map.put(metricType, new AtomicMetric(metricType)); - } - } + protected PhoenixHAGroupMetrics(List types) { + for (HAMetricType type : types) { + for (MetricType metricType : type.metrics) { + map.put(metricType, new AtomicMetric(metricType)); + } } + } - @Override - public Metric get(MetricType type) { - return map.get(type); - } + @Override + public Metric get(MetricType type) { + return map.get(type); + } - @Override - public void reset() { - map.values().forEach(Metric::reset); - } + @Override + public void reset() { + map.values().forEach(Metric::reset); + } - @Override - public Map getAllMetrics() { - return map; - } + @Override + public Map getAllMetrics() { + return map; + } - public Metric get(HAMetricType type, int clusterIndex) { - return map.get(type.metrics.get(clusterIndex)); - } + public Metric get(HAMetricType type, int clusterIndex) { + return map.get(type.metrics.get(clusterIndex)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsHolder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsHolder.java index 7cb4e4c8210..1c16fee38dc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsHolder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsHolder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,13 +17,15 @@ */ package org.apache.phoenix.jdbc; +import java.util.Map; + import org.apache.phoenix.monitoring.Metric; import org.apache.phoenix.monitoring.MetricType; -import java.util.Map; - public interface PhoenixMetricsHolder { - Metric get(MetricType type); - void reset(); - Map getAllMetrics(); + Metric get(MetricType type); + + void reset(); + + Map getAllMetrics(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java index 96556ad3771..ee224a0878e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,12 +23,14 @@ public interface PhoenixMetricsLog { - void logOverAllReadRequestMetrics(Map overAllQueryMetrics, String sql); + void logOverAllReadRequestMetrics(Map overAllQueryMetrics, String sql); - void logRequestReadMetrics(Map> requestReadMetrics, String sql); + void logRequestReadMetrics(Map> requestReadMetrics, String sql); - void logWriteMetricsfoForMutationsSinceLastReset(Map> mutationWriteMetrics); + void logWriteMetricsfoForMutationsSinceLastReset( + Map> mutationWriteMetrics); - void logReadMetricInfoForMutationsSinceLastReset(Map> mutationReadMetrics); + void logReadMetricInfoForMutationsSinceLastReset( + Map> mutationReadMetrics); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredConnection.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredConnection.java index d017f7bbcc8..8719af5529c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredConnection.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredConnection.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,37 +15,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; -import org.apache.phoenix.monitoring.MetricType; - import java.sql.Connection; import java.util.Map; +import org.apache.phoenix.monitoring.MetricType; + /** * This interface is for phoenix connections that provide metrics to PhoenixRuntime */ public interface PhoenixMonitoredConnection extends Connection { - /** - * @return map of Table Name String to a Map of Metric Type to current value for mutations - */ - Map> getMutationMetrics(); + /** Returns map of Table Name String to a Map of Metric Type to current value for mutations */ + Map> getMutationMetrics(); - /** - * @return map of Table Name String to a Map of Metric Type to current value for reads - */ - Map> getReadMetrics(); + /** Returns map of Table Name String to a Map of Metric Type to current value for reads */ + Map> getReadMetrics(); - /** - * @return true if request metrics are enabled false otherwise - */ - boolean isRequestLevelMetricsEnabled(); + /** Returns true if request metrics are enabled false otherwise */ + boolean isRequestLevelMetricsEnabled(); - /** - * Clears the local metrics values by setting them back to 0. Useful for multistatement connections and extracting - * metrics for individual DML. - */ - void clearMetrics(); + /** + * Clears the local metrics values by setting them back to 0. Useful for multistatement + * connections and extracting metrics for individual DML. + */ + void clearMetrics(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredPreparedStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredPreparedStatement.java index d794fd77327..071b5a22c61 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredPreparedStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredPreparedStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; import java.sql.PreparedStatement; @@ -23,6 +22,7 @@ /** * This interface is for phoenix statement that provide operation Type */ -public interface PhoenixMonitoredPreparedStatement extends PreparedStatement, PhoenixMonitoredStatement { +public interface PhoenixMonitoredPreparedStatement + extends PreparedStatement, PhoenixMonitoredStatement { } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredResultSet.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredResultSet.java index 7c99588332e..341e9766ec4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredResultSet.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredResultSet.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,16 +17,16 @@ */ package org.apache.phoenix.jdbc; -import org.apache.phoenix.monitoring.MetricType; - import java.sql.ResultSet; import java.util.Map; +import org.apache.phoenix.monitoring.MetricType; + public interface PhoenixMonitoredResultSet extends ResultSet { - Map> getReadMetrics(); + Map> getReadMetrics(); - Map getOverAllRequestReadMetrics(); + Map getOverAllRequestReadMetrics(); - void resetMetrics(); + void resetMetrics(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredStatement.java index 89d98e5de6e..79d0d3058ff 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixMonitoredStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; import java.sql.SQLException; import java.sql.Statement; + import org.apache.phoenix.jdbc.PhoenixStatement.Operation; /** @@ -27,9 +27,7 @@ */ public interface PhoenixMonitoredStatement extends Statement { - /** - * @return Operation Type of statement that has been executed or prepared for execution - */ - Operation getUpdateOperation() throws SQLException; + /** Returns Operation Type of statement that has been executed or prepared for execution */ + Operation getUpdateOperation() throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixParameterMetaData.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixParameterMetaData.java index 53ca8e1f550..3f9f3626308 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixParameterMetaData.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixParameterMetaData.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,142 +24,145 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.parse.BindParseNode; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.PDatum; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.TypeMismatchException; - - +import org.apache.phoenix.schema.types.PDataType; /** - * * Implementation of ParameterMetaData for Phoenix - * - * * @since 0.1 */ public class PhoenixParameterMetaData implements ParameterMetaData { - private final PDatum[] params; - private static final PDatum EMPTY_DATUM = new PDatum() { - @Override - public boolean isNullable() { - return false; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public Integer getMaxLength() { - return null; - } - - @Override - public PDataType getDataType() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }; - public static final PhoenixParameterMetaData EMPTY_PARAMETER_META_DATA = new PhoenixParameterMetaData(0); - public PhoenixParameterMetaData(int paramCount) { - params = new PDatum[paramCount]; - //initialize the params array with the empty_datum marker value. - for(int i = 0; i < paramCount; i++) { - params[i] = EMPTY_DATUM; - } - } - - private PDatum getParam(int index) throws SQLException { - if (index <= 0 || index > params.length) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND) - .setMessage("The index is " + index + ". Must be between 1 and " + params.length) - .build().buildException(); - } - PDatum param = params[index-1]; - - if (param == EMPTY_DATUM) { - //value at params[index-1] was never set. - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_VALUE_UNBOUND) - .setMessage("Parameter at index " + index + " is unbound").build().buildException(); - } - return param; - } - @Override - public String getParameterClassName(int index) throws SQLException { - PDatum datum = getParam(index); - PDataType type = datum == null ? null : datum.getDataType(); - return type == null ? null : type.getJavaClassName(); - } - + private final PDatum[] params; + private static final PDatum EMPTY_DATUM = new PDatum() { @Override - public int getParameterCount() throws SQLException { - return params.length; + public boolean isNullable() { + return false; } @Override - public int getParameterMode(int index) throws SQLException { - return ParameterMetaData.parameterModeIn; + public Integer getScale() { + return null; } @Override - public int getParameterType(int index) throws SQLException { - return getParam(index).getDataType().getSqlType(); + public Integer getMaxLength() { + return null; } @Override - public String getParameterTypeName(int index) throws SQLException { - return getParam(index).getDataType().getSqlTypeName(); + public PDataType getDataType() { + return null; } @Override - public int getPrecision(int index) throws SQLException { - return 0; + public SortOrder getSortOrder() { + return SortOrder.getDefault(); } - - @Override - public int getScale(int index) throws SQLException { - return 0; - } - - @Override - public int isNullable(int index) throws SQLException { - return getParam(index).isNullable() ? ResultSetMetaData.columnNullable : ResultSetMetaData.columnNoNulls; + }; + public static final PhoenixParameterMetaData EMPTY_PARAMETER_META_DATA = + new PhoenixParameterMetaData(0); + + public PhoenixParameterMetaData(int paramCount) { + params = new PDatum[paramCount]; + // initialize the params array with the empty_datum marker value. + for (int i = 0; i < paramCount; i++) { + params[i] = EMPTY_DATUM; } + } - @Override - public boolean isSigned(int index) throws SQLException { - @SuppressWarnings("rawtypes") - Class clazz = getParam(index).getDataType().getJavaClass(); - return Number.class.isInstance(clazz); + private PDatum getParam(int index) throws SQLException { + if (index <= 0 || index > params.length) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND) + .setMessage("The index is " + index + ". Must be between 1 and " + params.length).build() + .buildException(); } + PDatum param = params[index - 1]; - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); + if (param == EMPTY_DATUM) { + // value at params[index-1] was never set. + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_VALUE_UNBOUND) + .setMessage("Parameter at index " + index + " is unbound").build().buildException(); } - - @SuppressWarnings("unchecked") - @Override - public T unwrap(Class iface) throws SQLException { - if (!iface.isInstance(this)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) - .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()) - .build().buildException(); - } - return (T)this; + return param; + } + + @Override + public String getParameterClassName(int index) throws SQLException { + PDatum datum = getParam(index); + PDataType type = datum == null ? null : datum.getDataType(); + return type == null ? null : type.getJavaClassName(); + } + + @Override + public int getParameterCount() throws SQLException { + return params.length; + } + + @Override + public int getParameterMode(int index) throws SQLException { + return ParameterMetaData.parameterModeIn; + } + + @Override + public int getParameterType(int index) throws SQLException { + return getParam(index).getDataType().getSqlType(); + } + + @Override + public String getParameterTypeName(int index) throws SQLException { + return getParam(index).getDataType().getSqlTypeName(); + } + + @Override + public int getPrecision(int index) throws SQLException { + return 0; + } + + @Override + public int getScale(int index) throws SQLException { + return 0; + } + + @Override + public int isNullable(int index) throws SQLException { + return getParam(index).isNullable() + ? ResultSetMetaData.columnNullable + : ResultSetMetaData.columnNoNulls; + } + + @Override + public boolean isSigned(int index) throws SQLException { + @SuppressWarnings("rawtypes") + Class clazz = getParam(index).getDataType().getJavaClass(); + return Number.class.isInstance(clazz); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (!iface.isInstance(this)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) + .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()).build() + .buildException(); } - - public void addParam(BindParseNode bind, PDatum datum) throws SQLException { - PDatum bindDatum = params[bind.getIndex()]; - if (bindDatum != null && bindDatum.getDataType() != null && !datum.getDataType().isCoercibleTo(bindDatum.getDataType())) { - throw TypeMismatchException.newException(datum.getDataType(), bindDatum.getDataType()); - } - params[bind.getIndex()] = datum; + return (T) this; + } + + public void addParam(BindParseNode bind, PDatum datum) throws SQLException { + PDatum bindDatum = params[bind.getIndex()]; + if ( + bindDatum != null && bindDatum.getDataType() != null + && !datum.getDataType().isCoercibleTo(bindDatum.getDataType()) + ) { + throw TypeMismatchException.newException(datum.getDataType(), bindDatum.getDataType()); } + params[bind.getIndex()] = datum; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java index 036020df39e..bb7a20919ae 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixPreparedStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -63,551 +63,554 @@ import org.apache.phoenix.util.SQLCloseable; /** - * JDBC PreparedStatement implementation of Phoenix. Currently only the following methods (in addition to the ones - * supported on {@link PhoenixStatement} are supported: - {@link #executeQuery()} - {@link #setInt(int, int)} - - * {@link #setShort(int, short)} - {@link #setLong(int, long)} - {@link #setFloat(int, float)} - - * {@link #setDouble(int, double)} - {@link #setBigDecimal(int, BigDecimal)} - {@link #setString(int, String)} - - * {@link #setDate(int, Date)} - {@link #setDate(int, Date, Calendar)} - {@link #setTime(int, Time)} - - * {@link #setTime(int, Time, Calendar)} - {@link #setTimestamp(int, Timestamp)} - - * {@link #setTimestamp(int, Timestamp, Calendar)} - {@link #setNull(int, int)} - {@link #setNull(int, int, String)} - - * {@link #setBytes(int, byte[])} - {@link #clearParameters()} - {@link #getMetaData()} - * - * + * JDBC PreparedStatement implementation of Phoenix. Currently only the following methods (in + * addition to the ones supported on {@link PhoenixStatement} are supported: - + * {@link #executeQuery()} - {@link #setInt(int, int)} - {@link #setShort(int, short)} - + * {@link #setLong(int, long)} - {@link #setFloat(int, float)} - {@link #setDouble(int, double)} - + * {@link #setBigDecimal(int, BigDecimal)} - {@link #setString(int, String)} - + * {@link #setDate(int, Date)} - {@link #setDate(int, Date, Calendar)} - {@link #setTime(int, Time)} + * - {@link #setTime(int, Time, Calendar)} - {@link #setTimestamp(int, Timestamp)} - + * {@link #setTimestamp(int, Timestamp, Calendar)} - {@link #setNull(int, int)} - + * {@link #setNull(int, int, String)} - {@link #setBytes(int, byte[])} - {@link #clearParameters()} + * - {@link #getMetaData()} * @since 0.1 */ -public class PhoenixPreparedStatement extends PhoenixStatement implements PhoenixMonitoredPreparedStatement, SQLCloseable { - private final int parameterCount; - private final List parameters; - private final CompilableStatement statement; - - private final String query; - - public PhoenixPreparedStatement(PhoenixConnection connection, PhoenixStatementParser parser) throws SQLException, IOException { - super(connection); - this.statement = parser.nextStatement(new ExecutableNodeFactory()); - if (this.statement == null) { throw new EOFException(); } - this.query = null; // TODO: add toString on SQLStatement - this.parameterCount = statement.getBindCount(); - this.parameters = Arrays.asList(new Object[statement.getBindCount()]); - Collections.fill(parameters, BindManager.UNBOUND_PARAMETER); - } - - public PhoenixPreparedStatement(PhoenixConnection connection, String query) throws SQLException { - super(connection); - this.query = query; - this.statement = parseStatement(query); - this.parameterCount = statement.getBindCount(); - this.parameters = Arrays.asList(new Object[statement.getBindCount()]); - Collections.fill(parameters, BindManager.UNBOUND_PARAMETER); - } - - public PhoenixPreparedStatement(PhoenixPreparedStatement statement) { - super(statement.connection); - this.query = statement.query; - this.statement = statement.statement; - this.parameterCount = statement.parameters.size(); - this.parameters = new ArrayList(statement.parameters); - } - - @Override - public void addBatch() throws SQLException { - throwIfUnboundParameters(); - batch.add(new PhoenixPreparedStatement(this)); - } - - /** - * Set a bind parameter's value. - * @param parameterIndex 1-based index of the bind parameter to be set - * @param value value to be set - * @throws SQLException if the bind parameter index is invalid - */ - private void setParameter(int parameterIndex, Object value) throws SQLException { - if (parameterIndex < 1 || parameterIndex > parameterCount) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND) - .setMessage("Can't set parameter at index " + parameterIndex + ", " + - parameterCount + " bind parameters are defined") - .build().buildException(); - } - this.parameters.set(parameterIndex - 1, value); - } - - - @Override - public void clearParameters() throws SQLException { - Collections.fill(parameters, BindManager.UNBOUND_PARAMETER); - } - - @Override - public List getParameters() { - return parameters; - } - - private void throwIfUnboundParameters() throws SQLException { - int i = 0; - for (Object param : getParameters()) { - if (param == BindManager.UNBOUND_PARAMETER) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_VALUE_UNBOUND) - .setMessage("Parameter " + (i + 1) + " is unbound").build().buildException(); - } - i++; - } - } - - - public QueryPlan compileQuery() throws SQLException { - return compileQuery(statement, query); - } - - public MutationPlan compileMutation() throws SQLException { - return compileMutation(statement, query); - } - - void executeForBatch() throws SQLException { - throwIfUnboundParameters(); - if (!statement.getOperation().isMutation()) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.EXECUTE_BATCH_FOR_STMT_WITH_RESULT_SET) - .build().buildException(); - } - executeMutation(statement, createAuditQueryLogger(statement, query)); - } - - @Override - public boolean execute() throws SQLException { - throwIfUnboundParameters(); - if (statement.getOperation().isMutation() && !batch.isEmpty()) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH) - .build().buildException(); - } - if (statement.getOperation().isMutation()) { - executeMutation(statement, createAuditQueryLogger(statement,query)); - return false; - } - executeQuery(statement, createQueryLogger(statement,query)); - return true; - } - - @Override - public ResultSet executeQuery() throws SQLException { - throwIfUnboundParameters(); - if (statement.getOperation().isMutation()) { - throw new ExecuteQueryNotApplicableException(statement.getOperation()); - } - - return executeQuery(statement,createQueryLogger(statement,query)); - } - - @Override - public int executeUpdate() throws SQLException { - preExecuteUpdate(); - return executeMutation(statement, createAuditQueryLogger(statement,query)); - } - - private void preExecuteUpdate() throws SQLException { - throwIfUnboundParameters(); - if (!statement.getOperation().isMutation()) { - throw new ExecuteUpdateNotApplicableException(statement.getOperation()); - } - if (!batch.isEmpty()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH) - .build().buildException(); - } - } - - /** - * Executes the given SQL statement similar to JDBC API executeUpdate() but also returns the - * updated or non-updated row as Result object back to the client. This must be used with - * auto-commit Connection. This makes the operation atomic. - * If the row is successfully updated, return the updated row, otherwise if the row - * cannot be updated, return non-updated row. - * - * @return The pair of int and Tuple, where int represents value 1 for successful row update - * and 0 for non-successful row update, and Tuple represents the state of the row. - * @throws SQLException If the statement cannot be executed. - */ - public Pair executeUpdateReturnRow() throws SQLException { - if (!connection.getAutoCommit()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.AUTO_COMMIT_NOT_ENABLED).build() - .buildException(); - } - preExecuteUpdate(); - return executeMutation(statement, createAuditQueryLogger(statement, query), - MutationState.ReturnResult.ROW); - } - - public QueryPlan optimizeQuery() throws SQLException { - throwIfUnboundParameters(); - return optimizeQuery(statement); - } - - @Override - public ResultSetMetaData getMetaData() throws SQLException { - if (statement.getOperation().isMutation()) { - return null; - } - int paramCount = statement.getBindCount(); - List params = this.getParameters(); - BitSet unsetParams = new BitSet(statement.getBindCount()); - for (int i = 0; i < paramCount; i++) { - if ( params.get(i) == BindManager.UNBOUND_PARAMETER) { - unsetParams.set(i); - params.set(i, null); - } - } - try { - // Just compile top level query without optimizing to get ResultSetMetaData - QueryPlan plan = statement.compilePlan(this, Sequence.ValueOp.NOOP); - return new PhoenixResultSetMetaData(this.getConnection(), plan.getProjector()); - } finally { - int lastSetBit = 0; - while ((lastSetBit = unsetParams.nextSetBit(lastSetBit)) != -1) { - params.set(lastSetBit, BindManager.UNBOUND_PARAMETER); - lastSetBit++; - } - } - } - - @Override - public ParameterMetaData getParameterMetaData() throws SQLException { - int paramCount = statement.getBindCount(); - List params = this.getParameters(); - BitSet unsetParams = new BitSet(statement.getBindCount()); - for (int i = 0; i < paramCount; i++) { - if ( params.get(i) == BindManager.UNBOUND_PARAMETER) { - unsetParams.set(i); - params.set(i, null); - } - } - try { - StatementPlan plan = statement.compilePlan(this, Sequence.ValueOp.NOOP); - return plan.getParameterMetaData(); - } finally { - int lastSetBit = 0; - while ((lastSetBit = unsetParams.nextSetBit(lastSetBit)) != -1) { - params.set(lastSetBit, BindManager.UNBOUND_PARAMETER); - lastSetBit++; - } - } - } - - @Override - public String toString() { - return query; - } - - @Override - public void setArray(int parameterIndex, Array x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setBytes(int parameterIndex, byte[] x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setBlob(int parameterIndex, Blob x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setBoolean(int parameterIndex, boolean x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setByte(int parameterIndex, byte x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setClob(int parameterIndex, Clob x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setClob(int parameterIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setDate(int parameterIndex, Date x) throws SQLException { - setDate(parameterIndex, x, localCalendar); - } - - @Override - public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { - setParameter(parameterIndex, processDate(x, cal)); - } - - private java.sql.Date processDate(Date x, Calendar cal) { - if (x != null) { // Since Date is mutable, make a copy - if (connection.isApplyTimeZoneDisplacement()) { - return DateUtil.applyInputDisplacement(x, cal.getTimeZone()); - } else { - // Since Date is mutable, make a copy - return new Date(x.getTime()); - } - } - return x; - } - - @Override - public void setDouble(int parameterIndex, double x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setFloat(int parameterIndex, float x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setInt(int parameterIndex, int x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setLong(int parameterIndex, long x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setNClob(int parameterIndex, NClob value) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setNClob(int parameterIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setNString(int parameterIndex, String value) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setNull(int parameterIndex, int sqlType) throws SQLException { - setParameter(parameterIndex, null); - } - - @Override - public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { - setParameter(parameterIndex, null); - } - - @Override - public void setObject(int parameterIndex, Object o) throws SQLException { - setParameter(parameterIndex, processObject(o)); - } - - @Override - public void setObject(int parameterIndex, Object o, int targetSqlType) throws SQLException { - o = processObject(o); - PDataType targetType = PDataType.fromTypeId(targetSqlType); - if (o != null) { - PDataType sourceType = PDataType.fromLiteral(o); - o = targetType.toObject(o, sourceType); - } - setParameter(parameterIndex, o); - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { - setObject(parameterIndex, x, targetSqlType); - } - - @Override - public void setRef(int parameterIndex, Ref x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setRowId(int parameterIndex, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setShort(int parameterIndex, short x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setString(int parameterIndex, String x) throws SQLException { - setParameter(parameterIndex, x); - } - - @Override - public void setTime(int parameterIndex, Time x) throws SQLException { - setTime(parameterIndex, x, localCalendar); - } - - @Override - public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { - setParameter(parameterIndex, processTime(x, cal)); - } - - private java.sql.Time processTime(Time x, Calendar cal) { - if (x != null) { - if (connection.isApplyTimeZoneDisplacement()) { - return DateUtil.applyInputDisplacement(x, cal.getTimeZone()); - } else { - // Since Time is mutable, make a copy - return new Time(x.getTime()); - } - } - return x; - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { - setTimestamp(parameterIndex, x, localCalendar); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { - setParameter(parameterIndex, processTimestamp(x, cal)); - } - - private java.sql.Timestamp processTimestamp(Timestamp x, Calendar cal) { - if (x != null) { - if (connection.isApplyTimeZoneDisplacement()) { - return DateUtil.applyInputDisplacement(x, cal.getTimeZone()); - } else { - int nanos = x.getNanos(); - x = new Timestamp(x.getTime()); - x.setNanos(nanos); - } - } - return x; - } - - @Override - public void setURL(int parameterIndex, URL x) throws SQLException { - setParameter(parameterIndex, x.toExternalForm()); // Just treat as String - } - - @Override - public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - // Convert objects to their canonical forms as expected by the Phoenix type system and apply - // TZ displacement - private Object processObject(Object o) { - // We cannot use the direct conversions, as those work in the local TZ. - if (o instanceof java.time.temporal.Temporal) { - if (o instanceof java.time.LocalDateTime) { - return java.sql.Timestamp.from(((java.time.LocalDateTime) o).toInstant(ZoneOffset.UTC)); - } else if (o instanceof java.time.LocalDate) { - // java.sql.Date.from() is inherited from j.u.Date.from() and returns j.u.Date - return new java.sql.Date(((java.time.LocalDate) o).atStartOfDay() - .toInstant(ZoneOffset.UTC).toEpochMilli()); - } else if (o instanceof java.time.LocalTime) { - // preserve nanos if writing to timestamp - return java.sql.Timestamp.from( - ((java.time.LocalTime) o).atDate(DateUtil.LD_EPOCH).toInstant(ZoneOffset.UTC)); - } - } else if (o instanceof java.util.Date) { - if (o instanceof java.sql.Date) { - return processDate((java.sql.Date) o, localCalendar); - } else if (o instanceof java.sql.Timestamp) { - return processTimestamp((java.sql.Timestamp) o, localCalendar); - } else if (o instanceof java.sql.Time) { - return processTime((java.sql.Time) o, localCalendar); - } else { - // We could use Timestamp, we don't have millis, and don't differentiate anyway - return processDate(new java.sql.Date(((java.util.Date) o).getTime()), localCalendar) - .getTime(); - } - } - return o; - } +public class PhoenixPreparedStatement extends PhoenixStatement + implements PhoenixMonitoredPreparedStatement, SQLCloseable { + private final int parameterCount; + private final List parameters; + private final CompilableStatement statement; + + private final String query; + + public PhoenixPreparedStatement(PhoenixConnection connection, PhoenixStatementParser parser) + throws SQLException, IOException { + super(connection); + this.statement = parser.nextStatement(new ExecutableNodeFactory()); + if (this.statement == null) { + throw new EOFException(); + } + this.query = null; // TODO: add toString on SQLStatement + this.parameterCount = statement.getBindCount(); + this.parameters = Arrays.asList(new Object[statement.getBindCount()]); + Collections.fill(parameters, BindManager.UNBOUND_PARAMETER); + } + + public PhoenixPreparedStatement(PhoenixConnection connection, String query) throws SQLException { + super(connection); + this.query = query; + this.statement = parseStatement(query); + this.parameterCount = statement.getBindCount(); + this.parameters = Arrays.asList(new Object[statement.getBindCount()]); + Collections.fill(parameters, BindManager.UNBOUND_PARAMETER); + } + + public PhoenixPreparedStatement(PhoenixPreparedStatement statement) { + super(statement.connection); + this.query = statement.query; + this.statement = statement.statement; + this.parameterCount = statement.parameters.size(); + this.parameters = new ArrayList(statement.parameters); + } + + @Override + public void addBatch() throws SQLException { + throwIfUnboundParameters(); + batch.add(new PhoenixPreparedStatement(this)); + } + + /** + * Set a bind parameter's value. + * @param parameterIndex 1-based index of the bind parameter to be set + * @param value value to be set + * @throws SQLException if the bind parameter index is invalid + */ + private void setParameter(int parameterIndex, Object value) throws SQLException { + if (parameterIndex < 1 || parameterIndex > parameterCount) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND) + .setMessage("Can't set parameter at index " + parameterIndex + ", " + parameterCount + + " bind parameters are defined") + .build().buildException(); + } + this.parameters.set(parameterIndex - 1, value); + } + + @Override + public void clearParameters() throws SQLException { + Collections.fill(parameters, BindManager.UNBOUND_PARAMETER); + } + + @Override + public List getParameters() { + return parameters; + } + + private void throwIfUnboundParameters() throws SQLException { + int i = 0; + for (Object param : getParameters()) { + if (param == BindManager.UNBOUND_PARAMETER) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_VALUE_UNBOUND) + .setMessage("Parameter " + (i + 1) + " is unbound").build().buildException(); + } + i++; + } + } + + public QueryPlan compileQuery() throws SQLException { + return compileQuery(statement, query); + } + + public MutationPlan compileMutation() throws SQLException { + return compileMutation(statement, query); + } + + void executeForBatch() throws SQLException { + throwIfUnboundParameters(); + if (!statement.getOperation().isMutation()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.EXECUTE_BATCH_FOR_STMT_WITH_RESULT_SET) + .build().buildException(); + } + executeMutation(statement, createAuditQueryLogger(statement, query)); + } + + @Override + public boolean execute() throws SQLException { + throwIfUnboundParameters(); + if (statement.getOperation().isMutation() && !batch.isEmpty()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH) + .build().buildException(); + } + if (statement.getOperation().isMutation()) { + executeMutation(statement, createAuditQueryLogger(statement, query)); + return false; + } + executeQuery(statement, createQueryLogger(statement, query)); + return true; + } + + @Override + public ResultSet executeQuery() throws SQLException { + throwIfUnboundParameters(); + if (statement.getOperation().isMutation()) { + throw new ExecuteQueryNotApplicableException(statement.getOperation()); + } + + return executeQuery(statement, createQueryLogger(statement, query)); + } + + @Override + public int executeUpdate() throws SQLException { + preExecuteUpdate(); + return executeMutation(statement, createAuditQueryLogger(statement, query)); + } + + private void preExecuteUpdate() throws SQLException { + throwIfUnboundParameters(); + if (!statement.getOperation().isMutation()) { + throw new ExecuteUpdateNotApplicableException(statement.getOperation()); + } + if (!batch.isEmpty()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH) + .build().buildException(); + } + } + + /** + * Executes the given SQL statement similar to JDBC API executeUpdate() but also returns the + * updated or non-updated row as Result object back to the client. This must be used with + * auto-commit Connection. This makes the operation atomic. If the row is successfully updated, + * return the updated row, otherwise if the row cannot be updated, return non-updated row. + * @return The pair of int and Tuple, where int represents value 1 for successful row update and 0 + * for non-successful row update, and Tuple represents the state of the row. + * @throws SQLException If the statement cannot be executed. + */ + public Pair executeUpdateReturnRow() throws SQLException { + if (!connection.getAutoCommit()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.AUTO_COMMIT_NOT_ENABLED).build() + .buildException(); + } + preExecuteUpdate(); + return executeMutation(statement, createAuditQueryLogger(statement, query), + MutationState.ReturnResult.ROW); + } + + public QueryPlan optimizeQuery() throws SQLException { + throwIfUnboundParameters(); + return optimizeQuery(statement); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + if (statement.getOperation().isMutation()) { + return null; + } + int paramCount = statement.getBindCount(); + List params = this.getParameters(); + BitSet unsetParams = new BitSet(statement.getBindCount()); + for (int i = 0; i < paramCount; i++) { + if (params.get(i) == BindManager.UNBOUND_PARAMETER) { + unsetParams.set(i); + params.set(i, null); + } + } + try { + // Just compile top level query without optimizing to get ResultSetMetaData + QueryPlan plan = statement.compilePlan(this, Sequence.ValueOp.NOOP); + return new PhoenixResultSetMetaData(this.getConnection(), plan.getProjector()); + } finally { + int lastSetBit = 0; + while ((lastSetBit = unsetParams.nextSetBit(lastSetBit)) != -1) { + params.set(lastSetBit, BindManager.UNBOUND_PARAMETER); + lastSetBit++; + } + } + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + int paramCount = statement.getBindCount(); + List params = this.getParameters(); + BitSet unsetParams = new BitSet(statement.getBindCount()); + for (int i = 0; i < paramCount; i++) { + if (params.get(i) == BindManager.UNBOUND_PARAMETER) { + unsetParams.set(i); + params.set(i, null); + } + } + try { + StatementPlan plan = statement.compilePlan(this, Sequence.ValueOp.NOOP); + return plan.getParameterMetaData(); + } finally { + int lastSetBit = 0; + while ((lastSetBit = unsetParams.nextSetBit(lastSetBit)) != -1) { + params.set(lastSetBit, BindManager.UNBOUND_PARAMETER); + lastSetBit++; + } + } + } + + @Override + public String toString() { + return query; + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + setDate(parameterIndex, x, localCalendar); + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + setParameter(parameterIndex, processDate(x, cal)); + } + + private java.sql.Date processDate(Date x, Calendar cal) { + if (x != null) { // Since Date is mutable, make a copy + if (connection.isApplyTimeZoneDisplacement()) { + return DateUtil.applyInputDisplacement(x, cal.getTimeZone()); + } else { + // Since Date is mutable, make a copy + return new Date(x.getTime()); + } + } + return x; + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + setParameter(parameterIndex, null); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + setParameter(parameterIndex, null); + } + + @Override + public void setObject(int parameterIndex, Object o) throws SQLException { + setParameter(parameterIndex, processObject(o)); + } + + @Override + public void setObject(int parameterIndex, Object o, int targetSqlType) throws SQLException { + o = processObject(o); + PDataType targetType = PDataType.fromTypeId(targetSqlType); + if (o != null) { + PDataType sourceType = PDataType.fromLiteral(o); + o = targetType.toObject(o, sourceType); + } + setParameter(parameterIndex, o); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) + throws SQLException { + setObject(parameterIndex, x, targetSqlType); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + setParameter(parameterIndex, x); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + setTime(parameterIndex, x, localCalendar); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + setParameter(parameterIndex, processTime(x, cal)); + } + + private java.sql.Time processTime(Time x, Calendar cal) { + if (x != null) { + if (connection.isApplyTimeZoneDisplacement()) { + return DateUtil.applyInputDisplacement(x, cal.getTimeZone()); + } else { + // Since Time is mutable, make a copy + return new Time(x.getTime()); + } + } + return x; + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + setTimestamp(parameterIndex, x, localCalendar); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + setParameter(parameterIndex, processTimestamp(x, cal)); + } + + private java.sql.Timestamp processTimestamp(Timestamp x, Calendar cal) { + if (x != null) { + if (connection.isApplyTimeZoneDisplacement()) { + return DateUtil.applyInputDisplacement(x, cal.getTimeZone()); + } else { + int nanos = x.getNanos(); + x = new Timestamp(x.getTime()); + x.setNanos(nanos); + } + } + return x; + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + setParameter(parameterIndex, x.toExternalForm()); // Just treat as String + } + + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + // Convert objects to their canonical forms as expected by the Phoenix type system and apply + // TZ displacement + private Object processObject(Object o) { + // We cannot use the direct conversions, as those work in the local TZ. + if (o instanceof java.time.temporal.Temporal) { + if (o instanceof java.time.LocalDateTime) { + return java.sql.Timestamp.from(((java.time.LocalDateTime) o).toInstant(ZoneOffset.UTC)); + } else if (o instanceof java.time.LocalDate) { + // java.sql.Date.from() is inherited from j.u.Date.from() and returns j.u.Date + return new java.sql.Date( + ((java.time.LocalDate) o).atStartOfDay().toInstant(ZoneOffset.UTC).toEpochMilli()); + } else if (o instanceof java.time.LocalTime) { + // preserve nanos if writing to timestamp + return java.sql.Timestamp + .from(((java.time.LocalTime) o).atDate(DateUtil.LD_EPOCH).toInstant(ZoneOffset.UTC)); + } + } else if (o instanceof java.util.Date) { + if (o instanceof java.sql.Date) { + return processDate((java.sql.Date) o, localCalendar); + } else if (o instanceof java.sql.Timestamp) { + return processTimestamp((java.sql.Timestamp) o, localCalendar); + } else if (o instanceof java.sql.Time) { + return processTime((java.sql.Time) o, localCalendar); + } else { + // We could use Timestamp, we don't have millis, and don't differentiate anyway + return processDate(new java.sql.Date(((java.util.Date) o).getTime()), localCalendar) + .getTime(); + } + } + return o; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java index 97f54b84854..1b8924e81e0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -49,11 +49,6 @@ import java.util.List; import java.util.Map; -import org.apache.phoenix.monitoring.TableMetricsManager; -import org.apache.phoenix.schema.types.PVarbinary; -import org.apache.phoenix.schema.types.PVarbinaryEncoded; -import org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes; -import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -77,6 +72,7 @@ import org.apache.phoenix.monitoring.MetricType; import org.apache.phoenix.monitoring.OverAllQueryMetrics; import org.apache.phoenix.monitoring.ReadMetricQueue; +import org.apache.phoenix.monitoring.TableMetricsManager; import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PColumnImpl; import org.apache.phoenix.schema.tuple.ResultTuple; @@ -96,1566 +92,1566 @@ import org.apache.phoenix.schema.types.PUnsignedDate; import org.apache.phoenix.schema.types.PUnsignedTime; import org.apache.phoenix.schema.types.PUnsignedTimestamp; +import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.schema.types.PVarbinaryEncoded; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; +import org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.SQLCloseable; +import org.apache.phoenix.util.SchemaUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.util.SchemaUtil; +import com.google.protobuf.InvalidProtocolBufferException; /** - * - * JDBC ResultSet implementation of Phoenix. - * Currently only the following data types are supported: - * - String - * - Date - * - Time - * - Timestamp - * - BigDecimal - * - Double - * - Float - * - Int - * - Short - * - Long - * - Binary - * - Array - 1D - * None of the update or delete methods are supported. - * The ResultSet only supports the following options: - * - ResultSet.FETCH_FORWARD - * - ResultSet.CONCUR_READ_ONLY - * - ResultSet.TYPE_FORWARD_ONLY - * - ResultSet.CLOSE_CURSORS_AT_COMMIT - * - * + * JDBC ResultSet implementation of Phoenix. Currently only the following data types are supported: + * - String - Date - Time - Timestamp - BigDecimal - Double - Float - Int - Short - Long - Binary - + * Array - 1D None of the update or delete methods are supported. The ResultSet only supports the + * following options: - ResultSet.FETCH_FORWARD - ResultSet.CONCUR_READ_ONLY - + * ResultSet.TYPE_FORWARD_ONLY - ResultSet.CLOSE_CURSORS_AT_COMMIT * @since 0.1 */ public class PhoenixResultSet implements PhoenixMonitoredResultSet, SQLCloseable { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixResultSet.class); - - private final static String STRING_FALSE = "0"; - private final static String LITERAL_STRING_FALSE = "false"; - private final static BigDecimal BIG_DECIMAL_FALSE = BigDecimal.valueOf(0); - private final static Integer INTEGER_FALSE = Integer.valueOf(0); - private final static Tuple BEFORE_FIRST = ResultTuple.EMPTY_TUPLE; - - private final ResultIterator scanner; - private final RowProjector rowProjector; - private final PhoenixStatement statement; - private final StatementContext context; - private final ReadMetricQueue readMetricsQueue; - private final OverAllQueryMetrics overAllQueryMetrics; - private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - private final boolean wildcardIncludesDynamicCols; - private final List staticColumns; - private final int startPositionForDynamicCols; - private final boolean isApplyTimeZoneDisplacement; - - private RowProjector rowProjectorWithDynamicCols; - private Tuple currentRow = BEFORE_FIRST; - private boolean isClosed = false; - private boolean wasNull = false; - private boolean firstRecordRead = false; - - private QueryLogger queryLogger; - - private Long count = 0L; - - private Object exception; - private long queryTime; - private final Calendar localCalendar; - - public PhoenixResultSet(ResultIterator resultIterator, RowProjector rowProjector, - StatementContext ctx) throws SQLException { - this.rowProjector = rowProjector; - this.scanner = resultIterator; - this.context = ctx; - this.statement = context.getStatement(); - statement.setLastResultSet(this); - this.readMetricsQueue = context.getReadMetricsQueue(); - this.overAllQueryMetrics = context.getOverallQueryMetrics(); - this.queryLogger = context.getQueryLogger() != null ? context.getQueryLogger() : QueryLogger.NO_OP_INSTANCE; - this.wildcardIncludesDynamicCols = this.context.getConnection().getQueryServices() - .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, - DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); - if (this.wildcardIncludesDynamicCols) { - Pair, Integer> res = getStaticColsAndStartingPosForDynCols(); - this.staticColumns = res.getFirst(); - this.startPositionForDynamicCols = res.getSecond(); - } else { - this.staticColumns = null; - this.startPositionForDynamicCols = 0; - } - this.isApplyTimeZoneDisplacement = statement.getConnection().isApplyTimeZoneDisplacement(); - this.localCalendar = statement.getLocalCalendar(); - } - - @Override - public boolean absolute(int row) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void afterLast() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void beforeFirst() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void cancelRowUpdates() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void clearWarnings() throws SQLException { - } - - @Override - public void close() throws SQLException { - if (isClosed) { - return; - } - try { - scanner.close(); - } finally { - isClosed = true; - statement.removeResultSet(this); - overAllQueryMetrics.endQuery(); - overAllQueryMetrics.stopResultSetWatch(); - if (context.getCurrentTable() != null && context.getCurrentTable().getTable() != null - && !Strings.isNullOrEmpty( - context.getCurrentTable().getTable().getPhysicalName().getString())) { - boolean isPointLookup = context.getScanRanges().isPointLookup(); - String tableName = - context.getCurrentTable().getTable().getPhysicalName().toString(); - updateTableLevelReadMetrics(tableName, isPointLookup); - } - if (!queryLogger.isSynced()) { - if(this.exception==null){ - queryLogger.log(QueryLogInfo.QUERY_STATUS_I,QueryStatus.COMPLETED.toString()); - } - queryLogger.log(QueryLogInfo.NO_OF_RESULTS_ITERATED_I, count); - if (queryLogger.isDebugEnabled()) { - queryLogger.log(QueryLogInfo.SCAN_METRICS_JSON_I, - readMetricsQueue.getScanMetricsHolderList().toString()); - readMetricsQueue.getScanMetricsHolderList().clear(); - } - // if not already synced , like closing before result set exhausted - queryLogger.sync(getReadMetrics(), getOverAllRequestReadMetrics()); - } - } - } - - @Override - public void deleteRow() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public int findColumn(String columnLabel) throws SQLException { - Integer index = getRowProjector().getColumnIndex(columnLabel); - return index + 1; - } - - @Override - public boolean first() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Array getArray(int columnIndex) throws SQLException { - checkCursorState(); - // Get the value using the expected type instead of trying to coerce to VARCHAR. - // We can't coerce using our formatter because we don't have enough context in PDataType. - ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex-1); - Array value = (Array)projector.getValue(currentRow, projector.getExpression().getDataType(), ptr); - wasNull = (value == null); - return value; - } - - @Override - public Array getArray(String columnLabel) throws SQLException { - return getArray(findColumn(columnLabel)); - } - - @Override - public InputStream getAsciiStream(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public InputStream getAsciiStream(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - private void checkOpen() throws SQLException { - if (isClosed) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.RESULTSET_CLOSED).build().buildException(); - } - } - - private void checkCursorState() throws SQLException { - checkOpen(); - if (currentRow == BEFORE_FIRST) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CURSOR_BEFORE_FIRST_ROW).build().buildException(); - }else if (currentRow == null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CURSOR_PAST_LAST_ROW).build().buildException(); - } - } - - @Override - public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - checkCursorState(); - BigDecimal value = (BigDecimal)getRowProjector().getColumnProjector(columnIndex-1) - .getValue(currentRow, PDecimal.INSTANCE, ptr); - wasNull = (value == null); - return value; - } - - @Override - public BigDecimal getBigDecimal(String columnLabel) throws SQLException { - return getBigDecimal(findColumn(columnLabel)); - } - - @Override - public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - BigDecimal value = getBigDecimal(columnIndex); - if (wasNull) { - return null; - } - return value.setScale(scale); - } - - @Override - public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return getBigDecimal(findColumn(columnLabel), scale); - } - - @Override - public InputStream getBinaryStream(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public InputStream getBinaryStream(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Blob getBlob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Blob getBlob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean getBoolean(int columnIndex) throws SQLException { - checkCursorState(); - ColumnProjector colProjector = getRowProjector().getColumnProjector(columnIndex-1); - PDataType type = colProjector.getExpression().getDataType(); - Object value = colProjector.getValue(currentRow, type, ptr); - wasNull = (value == null); - if (wasNull) { - return false; - } - if (type == PBoolean.INSTANCE) { - return Boolean.TRUE.equals(value); - } else if (type == PVarchar.INSTANCE) { - return !STRING_FALSE.equals(value) && !LITERAL_STRING_FALSE.equals(value); - } else if (type == PInteger.INSTANCE) { - return !INTEGER_FALSE.equals(value); - } else if (type == PDecimal.INSTANCE) { - return !BIG_DECIMAL_FALSE.equals(value); - } else { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_METHOD_ON_TYPE) - .setMessage("Method: getBoolean; Type:" + type).build().buildException(); - } - } - - @Override - public boolean getBoolean(String columnLabel) throws SQLException { - return getBoolean(findColumn(columnLabel)); - } - - @Override - public byte[] getBytes(int columnIndex) throws SQLException { - checkCursorState(); - ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex - 1); - PDataType dataType = projector.getExpression().getDataType(); - byte[] value = (byte[]) projector.getValue(currentRow, - dataType == PVarbinaryEncoded.INSTANCE ? PVarbinaryEncoded.INSTANCE - : PVarbinary.INSTANCE, ptr); - wasNull = (value == null); - return value; - } - - @Override - public byte[] getBytes(String columnLabel) throws SQLException { - return getBytes(findColumn(columnLabel)); - } - - @Override - public byte getByte(int columnIndex) throws SQLException { -// throw new SQLFeatureNotSupportedException(); - checkCursorState(); - Byte value = (Byte)getRowProjector().getColumnProjector(columnIndex-1).getValue(currentRow, - PTinyint.INSTANCE, ptr); - wasNull = (value == null); - if (value == null) { - return 0; - } - return value; - } - - @Override - public byte getByte(String columnLabel) throws SQLException { - return getByte(findColumn(columnLabel)); - } - - @Override - public Reader getCharacterStream(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Reader getCharacterStream(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Clob getClob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Clob getClob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public int getConcurrency() throws SQLException { - return ResultSet.CONCUR_READ_ONLY; - } - - @Override - public String getCursorName() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Date getDate(int columnIndex) throws SQLException { - return getDate(columnIndex, localCalendar); - } - - @Override - public Date getDate(String columnLabel) throws SQLException { - return getDate(findColumn(columnLabel)); - } - - @Override - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - checkCursorState(); - Date value = - (Date) getRowProjector().getColumnProjector(columnIndex - 1).getValue(currentRow, - PDate.INSTANCE, ptr); - wasNull = (value == null); - if (wasNull) { - return null; - } - if (isApplyTimeZoneDisplacement) { - return DateUtil.applyOutputDisplacement(value, cal.getTimeZone()); - } else { - return value; - } - } - - @Override - public Date getDate(String columnLabel, Calendar cal) throws SQLException { - return getDate(findColumn(columnLabel), cal); - } - - @Override - public double getDouble(int columnIndex) throws SQLException { - checkCursorState(); - Double value = (Double)getRowProjector().getColumnProjector(columnIndex-1) - .getValue(currentRow, PDouble.INSTANCE, ptr); - wasNull = (value == null); - if (wasNull) { - return 0; - } - return value; - } - - @Override - public double getDouble(String columnLabel) throws SQLException { - return getDouble(findColumn(columnLabel)); - } - - @Override - public int getFetchDirection() throws SQLException { - return ResultSet.FETCH_FORWARD; - } - - @Override - public int getFetchSize() throws SQLException { - return statement.getFetchSize(); - } - - @Override - public float getFloat(int columnIndex) throws SQLException { - checkCursorState(); - Float value = (Float)getRowProjector().getColumnProjector(columnIndex-1) - .getValue(currentRow, PFloat.INSTANCE, ptr); - wasNull = (value == null); - if (wasNull) { - return 0; - } - return value; - } - - @Override - public float getFloat(String columnLabel) throws SQLException { - return getFloat(findColumn(columnLabel)); - } - - @Override - public int getHoldability() throws SQLException { - return ResultSet.CLOSE_CURSORS_AT_COMMIT; - } - - @Override - public int getInt(int columnIndex) throws SQLException { - checkCursorState(); - Integer value = (Integer)getRowProjector().getColumnProjector(columnIndex-1) - .getValue(currentRow, PInteger.INSTANCE, ptr); - wasNull = (value == null); - if (wasNull) { - return 0; - } - return value; - } - - @Override - public int getInt(String columnLabel) throws SQLException { - return getInt(findColumn(columnLabel)); - } - - @Override - public long getLong(int columnIndex) throws SQLException { - checkCursorState(); - Long value = (Long)getRowProjector().getColumnProjector(columnIndex-1).getValue(currentRow, - PLong.INSTANCE, ptr); - wasNull = (value == null); - if (wasNull) { - return 0; - } - return value; - } - - @Override - public long getLong(String columnLabel) throws SQLException { - return getLong(findColumn(columnLabel)); - } - - @Override - public ResultSetMetaData getMetaData() throws SQLException { - return new PhoenixResultSetMetaData(statement.getConnection(), getRowProjector()); - } - - @Override - public Reader getNCharacterStream(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Reader getNCharacterStream(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public NClob getNClob(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public NClob getNClob(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public String getNString(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public String getNString(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Object getObject(int columnIndex) throws SQLException { - checkCursorState(); - ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex - 1); - Object value = projector.getValue(currentRow, projector.getExpression().getDataType(), ptr); - wasNull = (value == null); - if (isApplyTimeZoneDisplacement) { - PDataType type = projector.getExpression().getDataType(); - if (type == PDate.INSTANCE || type == PUnsignedDate.INSTANCE) { - value = - DateUtil.applyOutputDisplacement((java.sql.Date) value, - localCalendar.getTimeZone()); - } else if (type == PTime.INSTANCE || type == PUnsignedTime.INSTANCE) { - value = - DateUtil.applyOutputDisplacement((java.sql.Time) value, - localCalendar.getTimeZone()); - } else if (type == PTimestamp.INSTANCE || type == PUnsignedTimestamp.INSTANCE) { - value = - DateUtil.applyOutputDisplacement((java.sql.Timestamp) value, - localCalendar.getTimeZone()); - } - } - return value; - } - - @Override - public Object getObject(String columnLabel) throws SQLException { - return getObject(findColumn(columnLabel)); - } - - @Override - public Object getObject(int columnIndex, Map> map) throws SQLException { - return getObject(columnIndex); // Just ignore map since we only support built-in types - } - - @Override - public Object getObject(String columnLabel, Map> map) throws SQLException { - return getObject(findColumn(columnLabel), map); - } - - @Override - public Ref getRef(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public Ref getRef(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public int getRow() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public RowId getRowId(int columnIndex) throws SQLException { - // TODO: support? - throw new SQLFeatureNotSupportedException(); - } - - @Override - public RowId getRowId(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public short getShort(int columnIndex) throws SQLException { - checkCursorState(); - Short value = (Short)getRowProjector().getColumnProjector(columnIndex-1) - .getValue(currentRow, PSmallint.INSTANCE, ptr); - wasNull = (value == null); - if (wasNull) { - return 0; - } - return value; - } - - @Override - public short getShort(String columnLabel) throws SQLException { - return getShort(findColumn(columnLabel)); - } - - @Override - public PhoenixStatement getStatement() throws SQLException { - return statement; - } - - @Override - public String getString(int columnIndex) throws SQLException { - checkCursorState(); - // Get the value using the expected type instead of trying to coerce to VARCHAR. - // We can't coerce using our formatter because we don't have enough context in PDataType. - ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex-1); - PDataType type = projector.getExpression().getDataType(); - Object value = projector.getValue(currentRow,type, ptr); - wasNull = (value == null); - if (wasNull) { - return null; - } - // Run Object through formatter to get String. - // This provides a simple way of getting a reasonable string representation - // for types like DATE and TIME - Format formatter = statement.getFormatter(type); - return formatter == null ? value.toString() : formatter.format(value); - } - - @Override - public String getString(String columnLabel) throws SQLException { - return getString(findColumn(columnLabel)); - } - - @Override - public Time getTime(int columnIndex) throws SQLException { - return getTime(columnIndex, localCalendar); - } - - @Override - public Time getTime(String columnLabel) throws SQLException { - return getTime(findColumn(columnLabel)); - } - - @Override - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - checkCursorState(); - Time value = (Time)getRowProjector().getColumnProjector(columnIndex-1).getValue(currentRow, - PTime.INSTANCE, ptr); - wasNull = (value == null); - if (wasNull) { - return null; - } - if (isApplyTimeZoneDisplacement) { - return DateUtil.applyOutputDisplacement(value, cal.getTimeZone()); - } else { - return value; - } - } - - @Override - public Time getTime(String columnLabel, Calendar cal) throws SQLException { - return getTime(findColumn(columnLabel),cal); - } - - @Override - public Timestamp getTimestamp(int columnIndex) throws SQLException { - return getTimestamp(columnIndex, localCalendar); - } - - @Override - public Timestamp getTimestamp(String columnLabel) throws SQLException { - return getTimestamp(findColumn(columnLabel)); - } - - @Override - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - checkCursorState(); - Timestamp value = (Timestamp)getRowProjector().getColumnProjector(columnIndex-1) - .getValue(currentRow, PTimestamp.INSTANCE, ptr); - wasNull = (value == null); - if (wasNull) { - return null; - } - if (isApplyTimeZoneDisplacement) { - return DateUtil.applyOutputDisplacement(value, cal.getTimeZone()); - } else { - return value; - } - } - - @Override - public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - return getTimestamp(findColumn(columnLabel),cal); - } - - @Override - public int getType() throws SQLException { - return ResultSet.TYPE_FORWARD_ONLY; - } - - @Override - public URL getURL(int columnIndex) throws SQLException { - checkCursorState(); - String value = (String)getRowProjector().getColumnProjector(columnIndex-1) - .getValue(currentRow, PVarchar.INSTANCE, ptr); - wasNull = (value == null); - if (wasNull) { - return null; - } - try { - return new URL(value); - } catch (MalformedURLException e) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.MALFORMED_URL).setRootCause(e).build().buildException(); - } - } - - @Override - public URL getURL(String columnLabel) throws SQLException { - return getURL(findColumn(columnLabel)); - } - - @Override - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public InputStream getUnicodeStream(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return null; - } - - @Override - public void insertRow() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean isAfterLast() throws SQLException { - return currentRow == null; - } - - @Override - public boolean isBeforeFirst() throws SQLException { - return currentRow == BEFORE_FIRST; - } - - @Override - public boolean isClosed() throws SQLException { - return isClosed; - } - - @Override - public boolean isFirst() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean isLast() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean last() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void moveToCurrentRow() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void moveToInsertRow() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - public Tuple getCurrentRow() { - return currentRow; - } - - @Override - public boolean next() throws SQLException { - checkOpen(); - try { - if (!firstRecordRead) { - firstRecordRead = true; - overAllQueryMetrics.startResultSetWatch(); - } - currentRow = scanner.next(); - if (currentRow != null) { - count++; - // Reset this projector with each row - if (this.rowProjectorWithDynamicCols != null) { - this.rowProjectorWithDynamicCols = null; - } - processDynamicColumnsIfRequired(); - } - rowProjector.reset(); - if (rowProjectorWithDynamicCols != null) { - rowProjectorWithDynamicCols.reset(); - } - } catch (RuntimeException | SQLException e) { - // FIXME: Expression.evaluate does not throw SQLException - // so this will unwrap throws from that. - queryLogger.log(QueryLogInfo.QUERY_STATUS_I, QueryStatus.FAILED.toString()); - if (queryLogger.isDebugEnabled()) { - queryLogger.log(QueryLogInfo.EXCEPTION_TRACE_I, Throwables.getStackTraceAsString(e)); - } - this.exception = e; - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw e; - } finally { - // If an exception occurs during rs.next(), or if we're on the last row, update metrics - if (this.exception != null || currentRow == null) { - overAllQueryMetrics.endQuery(); - overAllQueryMetrics.stopResultSetWatch(); - } - - if (this.exception!=null) { - queryLogger.log(QueryLogInfo.NO_OF_RESULTS_ITERATED_I, count); - if (queryLogger.isDebugEnabled()) { - queryLogger.log(QueryLogInfo.SCAN_METRICS_JSON_I, - readMetricsQueue.getScanMetricsHolderList().toString()); - readMetricsQueue.getScanMetricsHolderList().clear(); - } - if (queryLogger != null) { - queryLogger.sync(getReadMetrics(), getOverAllRequestReadMetrics()); - } - } - if (currentRow == null) { - overAllQueryMetrics.endQuery(); - overAllQueryMetrics.stopResultSetWatch(); - } - } - return currentRow != null; - } - - private void updateTableLevelReadMetrics(String tableName, boolean isPointLookup) { - Map> readMetrics = getReadMetrics(); - TableMetricsManager.pushMetricsFromConnInstanceMethod(readMetrics); - Map> metricsFromOverallQuery = new HashMap<>(); - Map overAllReadMetrics = getOverAllRequestReadMetrics(); - metricsFromOverallQuery.put(tableName, overAllReadMetrics); - TableMetricsManager.pushMetricsFromConnInstanceMethod(metricsFromOverallQuery); - if (readMetrics.get(tableName) != null) { - Long scanBytes = readMetrics.get(tableName).get(MetricType.SCAN_BYTES); - if (scanBytes == null) { - scanBytes = 0L; - } - TableMetricsManager.updateHistogramMetricsForQueryScanBytes( - scanBytes, tableName, isPointLookup); - Long timeSpentInRSNext = overAllReadMetrics.get(MetricType.RESULT_SET_TIME_MS); - - if (timeSpentInRSNext == null) { - timeSpentInRSNext = 0l; - } - timeSpentInRSNext += queryTime; - TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, timeSpentInRSNext, isPointLookup); - - TableMetricsManager.updateMetricsMethod(tableName, this.exception == null ? - MetricType.SELECT_AGGREGATE_SUCCESS_SQL_COUNTER : - MetricType.SELECT_AGGREGATE_FAILURE_SQL_COUNTER, 1); - } - } - - @Override - public boolean previous() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void refreshRow() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean relative(int rows) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean rowDeleted() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean rowInserted() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean rowUpdated() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - if (direction != ResultSet.FETCH_FORWARD) { - throw new SQLFeatureNotSupportedException(); - } - } - - @Override - public void setFetchSize(int rows) throws SQLException { - LOGGER.warn("Ignoring setFetchSize(" + rows + ")"); - } - - @Override - public void updateArray(int columnIndex, Array x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateArray(String columnLabel, Array x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBlob(int columnIndex, Blob x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBlob(String columnLabel, Blob x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBoolean(String columnLabel, boolean x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateByte(String columnLabel, byte x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateBytes(String columnLabel, byte[] x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateClob(int columnIndex, Clob x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateClob(String columnLabel, Clob x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateDate(String columnLabel, Date x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateDouble(String columnLabel, double x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateFloat(String columnLabel, float x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateInt(String columnLabel, int x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateLong(String columnLabel, long x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNClob(int columnIndex, NClob nClob) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNClob(String columnLabel, NClob nClob) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNString(int columnIndex, String nString) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNString(String columnLabel, String nString) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNull(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateNull(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateObject(String columnLabel, Object x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateRef(int columnIndex, Ref x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateRef(String columnLabel, Ref x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateRow() throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateRowId(int columnIndex, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateRowId(String columnLabel, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateShort(String columnLabel, short x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateString(String columnLabel, String x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateTime(String columnLabel, Time x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } - - @Override - public boolean wasNull() throws SQLException { - return wasNull; - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } - - @SuppressWarnings("unchecked") - @Override - public T unwrap(Class iface) throws SQLException { - if (!iface.isInstance(this)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) - .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()) - .build().buildException(); + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixResultSet.class); + + private final static String STRING_FALSE = "0"; + private final static String LITERAL_STRING_FALSE = "false"; + private final static BigDecimal BIG_DECIMAL_FALSE = BigDecimal.valueOf(0); + private final static Integer INTEGER_FALSE = Integer.valueOf(0); + private final static Tuple BEFORE_FIRST = ResultTuple.EMPTY_TUPLE; + + private final ResultIterator scanner; + private final RowProjector rowProjector; + private final PhoenixStatement statement; + private final StatementContext context; + private final ReadMetricQueue readMetricsQueue; + private final OverAllQueryMetrics overAllQueryMetrics; + private final ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + private final boolean wildcardIncludesDynamicCols; + private final List staticColumns; + private final int startPositionForDynamicCols; + private final boolean isApplyTimeZoneDisplacement; + + private RowProjector rowProjectorWithDynamicCols; + private Tuple currentRow = BEFORE_FIRST; + private boolean isClosed = false; + private boolean wasNull = false; + private boolean firstRecordRead = false; + + private QueryLogger queryLogger; + + private Long count = 0L; + + private Object exception; + private long queryTime; + private final Calendar localCalendar; + + public PhoenixResultSet(ResultIterator resultIterator, RowProjector rowProjector, + StatementContext ctx) throws SQLException { + this.rowProjector = rowProjector; + this.scanner = resultIterator; + this.context = ctx; + this.statement = context.getStatement(); + statement.setLastResultSet(this); + this.readMetricsQueue = context.getReadMetricsQueue(); + this.overAllQueryMetrics = context.getOverallQueryMetrics(); + this.queryLogger = + context.getQueryLogger() != null ? context.getQueryLogger() : QueryLogger.NO_OP_INSTANCE; + this.wildcardIncludesDynamicCols = + this.context.getConnection().getQueryServices().getConfiguration() + .getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB, DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB); + if (this.wildcardIncludesDynamicCols) { + Pair, Integer> res = getStaticColsAndStartingPosForDynCols(); + this.staticColumns = res.getFirst(); + this.startPositionForDynamicCols = res.getSecond(); + } else { + this.staticColumns = null; + this.startPositionForDynamicCols = 0; + } + this.isApplyTimeZoneDisplacement = statement.getConnection().isApplyTimeZoneDisplacement(); + this.localCalendar = statement.getLocalCalendar(); + } + + @Override + public boolean absolute(int row) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void afterLast() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void beforeFirst() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void cancelRowUpdates() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void clearWarnings() throws SQLException { + } + + @Override + public void close() throws SQLException { + if (isClosed) { + return; + } + try { + scanner.close(); + } finally { + isClosed = true; + statement.removeResultSet(this); + overAllQueryMetrics.endQuery(); + overAllQueryMetrics.stopResultSetWatch(); + if ( + context.getCurrentTable() != null && context.getCurrentTable().getTable() != null + && !Strings + .isNullOrEmpty(context.getCurrentTable().getTable().getPhysicalName().getString()) + ) { + boolean isPointLookup = context.getScanRanges().isPointLookup(); + String tableName = context.getCurrentTable().getTable().getPhysicalName().toString(); + updateTableLevelReadMetrics(tableName, isPointLookup); + } + if (!queryLogger.isSynced()) { + if (this.exception == null) { + queryLogger.log(QueryLogInfo.QUERY_STATUS_I, QueryStatus.COMPLETED.toString()); } - return (T)this; - } - - @SuppressWarnings("unchecked") - @Override - public T getObject(int columnIndex, Class type) throws SQLException { - if (type.equals(String.class)) { - // Special case, the connection specific formatter is not available in the Type system - return (T) getString(columnIndex); - } else if (java.util.Date.class.isAssignableFrom(type)) { - // The displacement handling code is in the specific getters - if (java.sql.Timestamp.class.isAssignableFrom(type)) { - return (T) getTimestamp(columnIndex); - } else if (java.sql.Date.class.isAssignableFrom(type)) { - return (T) getDate(columnIndex); - } else if (java.sql.Time.class.isAssignableFrom(type)) { - return (T) getTime(columnIndex); - } else if (java.util.Date.class.equals(type)) { - return (T) new java.util.Date(getDate(columnIndex).getTime()); - } + queryLogger.log(QueryLogInfo.NO_OF_RESULTS_ITERATED_I, count); + if (queryLogger.isDebugEnabled()) { + queryLogger.log(QueryLogInfo.SCAN_METRICS_JSON_I, + readMetricsQueue.getScanMetricsHolderList().toString()); + readMetricsQueue.getScanMetricsHolderList().clear(); } - checkCursorState(); - ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex - 1); - - Object value = - projector.getValue(currentRow, projector.getExpression().getDataType(), ptr, type); - - wasNull = (value == null); - return (T) value; - } - - @Override - public T getObject(String columnLabel, Class type) throws SQLException { - return getObject(findColumn(columnLabel), type); - } - - @VisibleForTesting - public ResultIterator getUnderlyingIterator() { - return scanner; - } - - @Override - public Map> getReadMetrics() { - ReadMetricQueue one = readMetricsQueue; - if (context != null) { - for (StatementContext sub : context.getSubStatementContexts()) { - ReadMetricQueue subMetric = sub.getReadMetricsQueue(); - one.combineReadMetrics(subMetric); - } - } - return one.aggregate(); - } - - @Override - public Map getOverAllRequestReadMetrics() { - return overAllQueryMetrics.publish(); - } - - @Override - public void resetMetrics() { - readMetricsQueue.clearMetrics(); - readMetricsQueue.getScanMetricsHolderList().clear(); - overAllQueryMetrics.reset(); - } - - public StatementContext getContext() { - return context; - } - - public void setQueryTime(long queryTime) { - this.queryTime = queryTime; - } - - /** - * Return the row projector to use - * @return the row projector including dynamic column projectors in case we are including - * dynamic columns, otherwise the regular row projector containing static column projectors - */ - private RowProjector getRowProjector() { + // if not already synced , like closing before result set exhausted + queryLogger.sync(getReadMetrics(), getOverAllRequestReadMetrics()); + } + } + } + + @Override + public void deleteRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + Integer index = getRowProjector().getColumnIndex(columnLabel); + return index + 1; + } + + @Override + public boolean first() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + checkCursorState(); + // Get the value using the expected type instead of trying to coerce to VARCHAR. + // We can't coerce using our formatter because we don't have enough context in PDataType. + ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex - 1); + Array value = + (Array) projector.getValue(currentRow, projector.getExpression().getDataType(), ptr); + wasNull = (value == null); + return value; + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + return getArray(findColumn(columnLabel)); + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + private void checkOpen() throws SQLException { + if (isClosed) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.RESULTSET_CLOSED).build() + .buildException(); + } + } + + private void checkCursorState() throws SQLException { + checkOpen(); + if (currentRow == BEFORE_FIRST) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CURSOR_BEFORE_FIRST_ROW).build() + .buildException(); + } else if (currentRow == null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CURSOR_PAST_LAST_ROW).build() + .buildException(); + } + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + checkCursorState(); + BigDecimal value = (BigDecimal) getRowProjector().getColumnProjector(columnIndex - 1) + .getValue(currentRow, PDecimal.INSTANCE, ptr); + wasNull = (value == null); + return value; + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + return getBigDecimal(findColumn(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + BigDecimal value = getBigDecimal(columnIndex); + if (wasNull) { + return null; + } + return value.setScale(scale); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + return getBigDecimal(findColumn(columnLabel), scale); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + checkCursorState(); + ColumnProjector colProjector = getRowProjector().getColumnProjector(columnIndex - 1); + PDataType type = colProjector.getExpression().getDataType(); + Object value = colProjector.getValue(currentRow, type, ptr); + wasNull = (value == null); + if (wasNull) { + return false; + } + if (type == PBoolean.INSTANCE) { + return Boolean.TRUE.equals(value); + } else if (type == PVarchar.INSTANCE) { + return !STRING_FALSE.equals(value) && !LITERAL_STRING_FALSE.equals(value); + } else if (type == PInteger.INSTANCE) { + return !INTEGER_FALSE.equals(value); + } else if (type == PDecimal.INSTANCE) { + return !BIG_DECIMAL_FALSE.equals(value); + } else { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_METHOD_ON_TYPE) + .setMessage("Method: getBoolean; Type:" + type).build().buildException(); + } + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return getBoolean(findColumn(columnLabel)); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + checkCursorState(); + ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex - 1); + PDataType dataType = projector.getExpression().getDataType(); + byte[] value = (byte[]) projector.getValue(currentRow, + dataType == PVarbinaryEncoded.INSTANCE ? PVarbinaryEncoded.INSTANCE : PVarbinary.INSTANCE, + ptr); + wasNull = (value == null); + return value; + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return getBytes(findColumn(columnLabel)); + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + // throw new SQLFeatureNotSupportedException(); + checkCursorState(); + Byte value = (Byte) getRowProjector().getColumnProjector(columnIndex - 1).getValue(currentRow, + PTinyint.INSTANCE, ptr); + wasNull = (value == null); + if (value == null) { + return 0; + } + return value; + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return getByte(findColumn(columnLabel)); + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getConcurrency() throws SQLException { + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public String getCursorName() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + return getDate(columnIndex, localCalendar); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return getDate(findColumn(columnLabel)); + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + checkCursorState(); + Date value = (Date) getRowProjector().getColumnProjector(columnIndex - 1).getValue(currentRow, + PDate.INSTANCE, ptr); + wasNull = (value == null); + if (wasNull) { + return null; + } + if (isApplyTimeZoneDisplacement) { + return DateUtil.applyOutputDisplacement(value, cal.getTimeZone()); + } else { + return value; + } + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + return getDate(findColumn(columnLabel), cal); + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + checkCursorState(); + Double value = (Double) getRowProjector().getColumnProjector(columnIndex - 1) + .getValue(currentRow, PDouble.INSTANCE, ptr); + wasNull = (value == null); + if (wasNull) { + return 0; + } + return value; + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return getDouble(findColumn(columnLabel)); + } + + @Override + public int getFetchDirection() throws SQLException { + return ResultSet.FETCH_FORWARD; + } + + @Override + public int getFetchSize() throws SQLException { + return statement.getFetchSize(); + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + checkCursorState(); + Float value = (Float) getRowProjector().getColumnProjector(columnIndex - 1).getValue(currentRow, + PFloat.INSTANCE, ptr); + wasNull = (value == null); + if (wasNull) { + return 0; + } + return value; + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + return getFloat(findColumn(columnLabel)); + } + + @Override + public int getHoldability() throws SQLException { + return ResultSet.CLOSE_CURSORS_AT_COMMIT; + } + + @Override + public int getInt(int columnIndex) throws SQLException { + checkCursorState(); + Integer value = (Integer) getRowProjector().getColumnProjector(columnIndex - 1) + .getValue(currentRow, PInteger.INSTANCE, ptr); + wasNull = (value == null); + if (wasNull) { + return 0; + } + return value; + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return getInt(findColumn(columnLabel)); + } + + @Override + public long getLong(int columnIndex) throws SQLException { + checkCursorState(); + Long value = (Long) getRowProjector().getColumnProjector(columnIndex - 1).getValue(currentRow, + PLong.INSTANCE, ptr); + wasNull = (value == null); + if (wasNull) { + return 0; + } + return value; + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return getLong(findColumn(columnLabel)); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return new PhoenixResultSetMetaData(statement.getConnection(), getRowProjector()); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public String getNString(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public String getNString(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + checkCursorState(); + ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex - 1); + Object value = projector.getValue(currentRow, projector.getExpression().getDataType(), ptr); + wasNull = (value == null); + if (isApplyTimeZoneDisplacement) { + PDataType type = projector.getExpression().getDataType(); + if (type == PDate.INSTANCE || type == PUnsignedDate.INSTANCE) { + value = + DateUtil.applyOutputDisplacement((java.sql.Date) value, localCalendar.getTimeZone()); + } else if (type == PTime.INSTANCE || type == PUnsignedTime.INSTANCE) { + value = + DateUtil.applyOutputDisplacement((java.sql.Time) value, localCalendar.getTimeZone()); + } else if (type == PTimestamp.INSTANCE || type == PUnsignedTimestamp.INSTANCE) { + value = + DateUtil.applyOutputDisplacement((java.sql.Timestamp) value, localCalendar.getTimeZone()); + } + } + return value; + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + return getObject(findColumn(columnLabel)); + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + return getObject(columnIndex); // Just ignore map since we only support built-in types + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + return getObject(findColumn(columnLabel), map); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + // TODO: support? + throw new SQLFeatureNotSupportedException(); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public short getShort(int columnIndex) throws SQLException { + checkCursorState(); + Short value = (Short) getRowProjector().getColumnProjector(columnIndex - 1).getValue(currentRow, + PSmallint.INSTANCE, ptr); + wasNull = (value == null); + if (wasNull) { + return 0; + } + return value; + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return getShort(findColumn(columnLabel)); + } + + @Override + public PhoenixStatement getStatement() throws SQLException { + return statement; + } + + @Override + public String getString(int columnIndex) throws SQLException { + checkCursorState(); + // Get the value using the expected type instead of trying to coerce to VARCHAR. + // We can't coerce using our formatter because we don't have enough context in PDataType. + ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex - 1); + PDataType type = projector.getExpression().getDataType(); + Object value = projector.getValue(currentRow, type, ptr); + wasNull = (value == null); + if (wasNull) { + return null; + } + // Run Object through formatter to get String. + // This provides a simple way of getting a reasonable string representation + // for types like DATE and TIME + Format formatter = statement.getFormatter(type); + return formatter == null ? value.toString() : formatter.format(value); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return getString(findColumn(columnLabel)); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + return getTime(columnIndex, localCalendar); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return getTime(findColumn(columnLabel)); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + checkCursorState(); + Time value = (Time) getRowProjector().getColumnProjector(columnIndex - 1).getValue(currentRow, + PTime.INSTANCE, ptr); + wasNull = (value == null); + if (wasNull) { + return null; + } + if (isApplyTimeZoneDisplacement) { + return DateUtil.applyOutputDisplacement(value, cal.getTimeZone()); + } else { + return value; + } + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + return getTime(findColumn(columnLabel), cal); + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + return getTimestamp(columnIndex, localCalendar); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return getTimestamp(findColumn(columnLabel)); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + checkCursorState(); + Timestamp value = (Timestamp) getRowProjector().getColumnProjector(columnIndex - 1) + .getValue(currentRow, PTimestamp.INSTANCE, ptr); + wasNull = (value == null); + if (wasNull) { + return null; + } + if (isApplyTimeZoneDisplacement) { + return DateUtil.applyOutputDisplacement(value, cal.getTimeZone()); + } else { + return value; + } + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + return getTimestamp(findColumn(columnLabel), cal); + } + + @Override + public int getType() throws SQLException { + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + checkCursorState(); + String value = (String) getRowProjector().getColumnProjector(columnIndex - 1) + .getValue(currentRow, PVarchar.INSTANCE, ptr); + wasNull = (value == null); + if (wasNull) { + return null; + } + try { + return new URL(value); + } catch (MalformedURLException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.MALFORMED_URL).setRootCause(e).build() + .buildException(); + } + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + return getURL(findColumn(columnLabel)); + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return null; + } + + @Override + public void insertRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isAfterLast() throws SQLException { + return currentRow == null; + } + + @Override + public boolean isBeforeFirst() throws SQLException { + return currentRow == BEFORE_FIRST; + } + + @Override + public boolean isClosed() throws SQLException { + return isClosed; + } + + @Override + public boolean isFirst() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isLast() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean last() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void moveToCurrentRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void moveToInsertRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + public Tuple getCurrentRow() { + return currentRow; + } + + @Override + public boolean next() throws SQLException { + checkOpen(); + try { + if (!firstRecordRead) { + firstRecordRead = true; + overAllQueryMetrics.startResultSetWatch(); + } + currentRow = scanner.next(); + if (currentRow != null) { + count++; + // Reset this projector with each row if (this.rowProjectorWithDynamicCols != null) { - return this.rowProjectorWithDynamicCols; + this.rowProjectorWithDynamicCols = null; } - return this.rowProjector; - } - - /** - * Populate the static columns and the starting position for dynamic columns which we use when - * merging column projectors of static and dynamic columns - * @return Pair whose first part is the list of static column PColumns and the second part is - * the starting position for dynamic columns - */ - private Pair, Integer> getStaticColsAndStartingPosForDynCols(){ - List staticCols = new ArrayList<>(); - for (ColumnProjector cp : this.rowProjector.getColumnProjectors()) { - Expression exp = cp.getExpression(); - if (exp instanceof ProjectedColumnExpression) { - staticCols.addAll(((ProjectedColumnExpression) exp).getColumns()); - break; - } + processDynamicColumnsIfRequired(); + } + rowProjector.reset(); + if (rowProjectorWithDynamicCols != null) { + rowProjectorWithDynamicCols.reset(); + } + } catch (RuntimeException | SQLException e) { + // FIXME: Expression.evaluate does not throw SQLException + // so this will unwrap throws from that. + queryLogger.log(QueryLogInfo.QUERY_STATUS_I, QueryStatus.FAILED.toString()); + if (queryLogger.isDebugEnabled()) { + queryLogger.log(QueryLogInfo.EXCEPTION_TRACE_I, Throwables.getStackTraceAsString(e)); + } + this.exception = e; + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw e; + } finally { + // If an exception occurs during rs.next(), or if we're on the last row, update metrics + if (this.exception != null || currentRow == null) { + overAllQueryMetrics.endQuery(); + overAllQueryMetrics.stopResultSetWatch(); + } + + if (this.exception != null) { + queryLogger.log(QueryLogInfo.NO_OF_RESULTS_ITERATED_I, count); + if (queryLogger.isDebugEnabled()) { + queryLogger.log(QueryLogInfo.SCAN_METRICS_JSON_I, + readMetricsQueue.getScanMetricsHolderList().toString()); + readMetricsQueue.getScanMetricsHolderList().clear(); } - int startingPosForDynCols = 0; - for (PColumn col : staticCols) { - if (!SchemaUtil.isPKColumn(col)) { - startingPosForDynCols++; - } - } - return new Pair<>(staticCols, startingPosForDynCols); - } - - /** - * Process the dynamic column metadata for the current row and store the complete projector for - * all static and dynamic columns for this row - */ - private void processDynamicColumnsIfRequired() { - if (!this.wildcardIncludesDynamicCols || this.currentRow == null || - !this.rowProjector.projectDynColsInWildcardQueries()) { - return; + if (queryLogger != null) { + queryLogger.sync(getReadMetrics(), getOverAllRequestReadMetrics()); } - List dynCols = getDynColsListAndSeparateFromActualData(); - if (dynCols == null) { - return; - } - - RowProjector rowProjectorWithDynamicColumns = null; - if (this.rowProjector.getColumnCount() > 0 && - dynCols.size() > 0) { - rowProjectorWithDynamicColumns = mergeRowProjectorWithDynColProjectors(dynCols, - this.rowProjector.getColumnProjector(0).getTableName()); - } - // Set the combined row projector - if (rowProjectorWithDynamicColumns != null) { - this.rowProjectorWithDynamicCols = rowProjectorWithDynamicColumns; - } - } - - /** - * Separate the actual cell data from the serialized list of dynamic column PColumns and - * return the deserialized list of dynamic column PColumns for the current row - * @return Deserialized list of dynamic column PColumns or null if there are no dynamic columns - */ - private List getDynColsListAndSeparateFromActualData() { - Cell base = this.currentRow.getValue(0); - final byte[] valueArray = CellUtil.cloneValue(base); - // We inserted the known byte array before appending the serialized list of dynamic columns - final byte[] anchor = Arrays.copyOf(DYN_COLS_METADATA_CELL_QUALIFIER, - DYN_COLS_METADATA_CELL_QUALIFIER.length); - // Reverse the arrays to find the last occurrence of the sub-array in the value array - ArrayUtils.reverse(valueArray); - ArrayUtils.reverse(anchor); - final int pos = valueArray.length - Bytes.indexOf(valueArray, anchor); - // There are no dynamic columns to process so return immediately - if (pos >= valueArray.length) { - return null; - } - ArrayUtils.reverse(valueArray); - - // Separate the serialized list of dynamic column PColumns from the actual cell data - byte[] actualCellDataBytes = Arrays.copyOfRange(valueArray, 0, - pos - DYN_COLS_METADATA_CELL_QUALIFIER.length); - ImmutableBytesWritable actualCellData = new ImmutableBytesWritable(actualCellDataBytes); - ImmutableBytesWritable key = new ImmutableBytesWritable(); - currentRow.getKey(key); - // Store only the actual cell data as part of the current row - this.currentRow = new TupleProjector.ProjectedValueTuple(key.get(), key.getOffset(), - key.getLength(), base.getTimestamp(), - actualCellData.get(), actualCellData.getOffset(), actualCellData.getLength(), 0); - - byte[] dynColsListBytes = Arrays.copyOfRange(valueArray, pos, valueArray.length); - List dynCols = new ArrayList<>(); - try { - List dynColsProtos = DynamicColumnMetaDataProtos - .DynamicColumnMetaData.parseFrom(dynColsListBytes).getDynamicColumnsList(); - for (PTableProtos.PColumn colProto : dynColsProtos) { - dynCols.add(PColumnImpl.createFromProto(colProto)); - } - } catch (InvalidProtocolBufferException e) { - return null; - } - return dynCols; - } - - /** - * Add the dynamic column projectors at the end of the current row's row projector - * @param dynCols list of dynamic column PColumns for the current row - * @param tableName table name - * @return The combined row projector containing column projectors for both static and dynamic - * columns - */ - private RowProjector mergeRowProjectorWithDynColProjectors(List dynCols, - String tableName) { - List allColumnProjectors = - new ArrayList<>(this.rowProjector.getColumnProjectors()); - List allCols = new ArrayList<>(); - if (this.staticColumns != null) { - allCols.addAll(this.staticColumns); - } - // Add dynamic columns to the end - allCols.addAll(dynCols); - - int startingPos = this.startPositionForDynamicCols; - // Get the ProjectedColumnExpressions for dynamic columns - for (PColumn currentDynCol : dynCols) { - // Note that we refer to all the existing static columns along with all dynamic columns - // in each of the newly added dynamic column projectors. - // This is required for correctly building the schema for each of the dynamic columns - Expression exp = new ProjectedColumnExpression(currentDynCol, allCols, - startingPos++, currentDynCol.getName().getString()); - - ColumnProjector dynColProj = new ExpressionProjector( - currentDynCol.getName().getString(), currentDynCol.getName().getString(), tableName, exp, false); - allColumnProjectors.add(dynColProj); - } - - return new RowProjector(allColumnProjectors, this.rowProjector.getEstimatedRowByteSize(), - this.rowProjector.projectEveryRow(), this.rowProjector.hasUDFs(), - this.rowProjector.projectEverything(), - this.rowProjector.projectDynColsInWildcardQueries()); - } + } + if (currentRow == null) { + overAllQueryMetrics.endQuery(); + overAllQueryMetrics.stopResultSetWatch(); + } + } + return currentRow != null; + } + + private void updateTableLevelReadMetrics(String tableName, boolean isPointLookup) { + Map> readMetrics = getReadMetrics(); + TableMetricsManager.pushMetricsFromConnInstanceMethod(readMetrics); + Map> metricsFromOverallQuery = new HashMap<>(); + Map overAllReadMetrics = getOverAllRequestReadMetrics(); + metricsFromOverallQuery.put(tableName, overAllReadMetrics); + TableMetricsManager.pushMetricsFromConnInstanceMethod(metricsFromOverallQuery); + if (readMetrics.get(tableName) != null) { + Long scanBytes = readMetrics.get(tableName).get(MetricType.SCAN_BYTES); + if (scanBytes == null) { + scanBytes = 0L; + } + TableMetricsManager.updateHistogramMetricsForQueryScanBytes(scanBytes, tableName, + isPointLookup); + Long timeSpentInRSNext = overAllReadMetrics.get(MetricType.RESULT_SET_TIME_MS); + + if (timeSpentInRSNext == null) { + timeSpentInRSNext = 0l; + } + timeSpentInRSNext += queryTime; + TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, timeSpentInRSNext, + isPointLookup); + + TableMetricsManager.updateMetricsMethod(tableName, + this.exception == null + ? MetricType.SELECT_AGGREGATE_SUCCESS_SQL_COUNTER + : MetricType.SELECT_AGGREGATE_FAILURE_SQL_COUNTER, + 1); + } + } + + @Override + public boolean previous() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void refreshRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean relative(int rows) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean rowDeleted() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean rowInserted() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean rowUpdated() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + if (direction != ResultSet.FETCH_FORWARD) { + throw new SQLFeatureNotSupportedException(); + } + } + + @Override + public void setFetchSize(int rows) throws SQLException { + LOGGER.warn("Ignoring setFetchSize(" + rows + ")"); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean wasNull() throws SQLException { + return wasNull; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (!iface.isInstance(this)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) + .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()).build() + .buildException(); + } + return (T) this; + } + + @SuppressWarnings("unchecked") + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + if (type.equals(String.class)) { + // Special case, the connection specific formatter is not available in the Type system + return (T) getString(columnIndex); + } else if (java.util.Date.class.isAssignableFrom(type)) { + // The displacement handling code is in the specific getters + if (java.sql.Timestamp.class.isAssignableFrom(type)) { + return (T) getTimestamp(columnIndex); + } else if (java.sql.Date.class.isAssignableFrom(type)) { + return (T) getDate(columnIndex); + } else if (java.sql.Time.class.isAssignableFrom(type)) { + return (T) getTime(columnIndex); + } else if (java.util.Date.class.equals(type)) { + return (T) new java.util.Date(getDate(columnIndex).getTime()); + } + } + checkCursorState(); + ColumnProjector projector = getRowProjector().getColumnProjector(columnIndex - 1); + + Object value = + projector.getValue(currentRow, projector.getExpression().getDataType(), ptr, type); + + wasNull = (value == null); + return (T) value; + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + return getObject(findColumn(columnLabel), type); + } + + @VisibleForTesting + public ResultIterator getUnderlyingIterator() { + return scanner; + } + + @Override + public Map> getReadMetrics() { + ReadMetricQueue one = readMetricsQueue; + if (context != null) { + for (StatementContext sub : context.getSubStatementContexts()) { + ReadMetricQueue subMetric = sub.getReadMetricsQueue(); + one.combineReadMetrics(subMetric); + } + } + return one.aggregate(); + } + + @Override + public Map getOverAllRequestReadMetrics() { + return overAllQueryMetrics.publish(); + } + + @Override + public void resetMetrics() { + readMetricsQueue.clearMetrics(); + readMetricsQueue.getScanMetricsHolderList().clear(); + overAllQueryMetrics.reset(); + } + + public StatementContext getContext() { + return context; + } + + public void setQueryTime(long queryTime) { + this.queryTime = queryTime; + } + + /** + * Return the row projector to use + * @return the row projector including dynamic column projectors in case we are including dynamic + * columns, otherwise the regular row projector containing static column projectors + */ + private RowProjector getRowProjector() { + if (this.rowProjectorWithDynamicCols != null) { + return this.rowProjectorWithDynamicCols; + } + return this.rowProjector; + } + + /** + * Populate the static columns and the starting position for dynamic columns which we use when + * merging column projectors of static and dynamic columns + * @return Pair whose first part is the list of static column PColumns and the second part is the + * starting position for dynamic columns + */ + private Pair, Integer> getStaticColsAndStartingPosForDynCols() { + List staticCols = new ArrayList<>(); + for (ColumnProjector cp : this.rowProjector.getColumnProjectors()) { + Expression exp = cp.getExpression(); + if (exp instanceof ProjectedColumnExpression) { + staticCols.addAll(((ProjectedColumnExpression) exp).getColumns()); + break; + } + } + int startingPosForDynCols = 0; + for (PColumn col : staticCols) { + if (!SchemaUtil.isPKColumn(col)) { + startingPosForDynCols++; + } + } + return new Pair<>(staticCols, startingPosForDynCols); + } + + /** + * Process the dynamic column metadata for the current row and store the complete projector for + * all static and dynamic columns for this row + */ + private void processDynamicColumnsIfRequired() { + if ( + !this.wildcardIncludesDynamicCols || this.currentRow == null + || !this.rowProjector.projectDynColsInWildcardQueries() + ) { + return; + } + List dynCols = getDynColsListAndSeparateFromActualData(); + if (dynCols == null) { + return; + } + + RowProjector rowProjectorWithDynamicColumns = null; + if (this.rowProjector.getColumnCount() > 0 && dynCols.size() > 0) { + rowProjectorWithDynamicColumns = mergeRowProjectorWithDynColProjectors(dynCols, + this.rowProjector.getColumnProjector(0).getTableName()); + } + // Set the combined row projector + if (rowProjectorWithDynamicColumns != null) { + this.rowProjectorWithDynamicCols = rowProjectorWithDynamicColumns; + } + } + + /** + * Separate the actual cell data from the serialized list of dynamic column PColumns and return + * the deserialized list of dynamic column PColumns for the current row + * @return Deserialized list of dynamic column PColumns or null if there are no dynamic columns + */ + private List getDynColsListAndSeparateFromActualData() { + Cell base = this.currentRow.getValue(0); + final byte[] valueArray = CellUtil.cloneValue(base); + // We inserted the known byte array before appending the serialized list of dynamic columns + final byte[] anchor = + Arrays.copyOf(DYN_COLS_METADATA_CELL_QUALIFIER, DYN_COLS_METADATA_CELL_QUALIFIER.length); + // Reverse the arrays to find the last occurrence of the sub-array in the value array + ArrayUtils.reverse(valueArray); + ArrayUtils.reverse(anchor); + final int pos = valueArray.length - Bytes.indexOf(valueArray, anchor); + // There are no dynamic columns to process so return immediately + if (pos >= valueArray.length) { + return null; + } + ArrayUtils.reverse(valueArray); + + // Separate the serialized list of dynamic column PColumns from the actual cell data + byte[] actualCellDataBytes = + Arrays.copyOfRange(valueArray, 0, pos - DYN_COLS_METADATA_CELL_QUALIFIER.length); + ImmutableBytesWritable actualCellData = new ImmutableBytesWritable(actualCellDataBytes); + ImmutableBytesWritable key = new ImmutableBytesWritable(); + currentRow.getKey(key); + // Store only the actual cell data as part of the current row + this.currentRow = new TupleProjector.ProjectedValueTuple(key.get(), key.getOffset(), + key.getLength(), base.getTimestamp(), actualCellData.get(), actualCellData.getOffset(), + actualCellData.getLength(), 0); + + byte[] dynColsListBytes = Arrays.copyOfRange(valueArray, pos, valueArray.length); + List dynCols = new ArrayList<>(); + try { + List dynColsProtos = DynamicColumnMetaDataProtos.DynamicColumnMetaData + .parseFrom(dynColsListBytes).getDynamicColumnsList(); + for (PTableProtos.PColumn colProto : dynColsProtos) { + dynCols.add(PColumnImpl.createFromProto(colProto)); + } + } catch (InvalidProtocolBufferException e) { + return null; + } + return dynCols; + } + + /** + * Add the dynamic column projectors at the end of the current row's row projector + * @param dynCols list of dynamic column PColumns for the current row + * @param tableName table name + * @return The combined row projector containing column projectors for both static and dynamic + * columns + */ + private RowProjector mergeRowProjectorWithDynColProjectors(List dynCols, + String tableName) { + List allColumnProjectors = + new ArrayList<>(this.rowProjector.getColumnProjectors()); + List allCols = new ArrayList<>(); + if (this.staticColumns != null) { + allCols.addAll(this.staticColumns); + } + // Add dynamic columns to the end + allCols.addAll(dynCols); + + int startingPos = this.startPositionForDynamicCols; + // Get the ProjectedColumnExpressions for dynamic columns + for (PColumn currentDynCol : dynCols) { + // Note that we refer to all the existing static columns along with all dynamic columns + // in each of the newly added dynamic column projectors. + // This is required for correctly building the schema for each of the dynamic columns + Expression exp = new ProjectedColumnExpression(currentDynCol, allCols, startingPos++, + currentDynCol.getName().getString()); + + ColumnProjector dynColProj = new ExpressionProjector(currentDynCol.getName().getString(), + currentDynCol.getName().getString(), tableName, exp, false); + allColumnProjectors.add(dynColProj); + } + + return new RowProjector(allColumnProjectors, this.rowProjector.getEstimatedRowByteSize(), + this.rowProjector.projectEveryRow(), this.rowProjector.hasUDFs(), + this.rowProjector.projectEverything(), this.rowProjector.projectDynColsInWildcardQueries()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSetMetaData.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSetMetaData.java index bfe952602b4..b9fa93d7005 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSetMetaData.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSetMetaData.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,184 +26,174 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.schema.types.PDecimal; -import org.apache.phoenix.schema.types.PDataType; /** - * - * JDBC ResultSetMetaData implementation of Phoenix. - * Currently only the following methods are supported: - * - {@link #getColumnCount()} - * - {@link #getColumnDisplaySize(int)} - * - {@link #getColumnLabel(int)} displays alias name if present and column name otherwise - * - {@link #getColumnName(int)} same as {@link #getColumnLabel(int)} - * - {@link #isCaseSensitive(int)} - * - {@link #getColumnType(int)} - * - {@link #getColumnTypeName(int)} - * - {@link #getTableName(int)} - * - {@link #getSchemaName(int)} always returns empty string - * - {@link #getCatalogName(int)} always returns empty string - * - {@link #isNullable(int)} - * - {@link #isSigned(int)} - * - {@link #isAutoIncrement(int)} always false - * - {@link #isCurrency(int)} always false - * - {@link #isDefinitelyWritable(int)} always false - * - {@link #isReadOnly(int)} always true - * - {@link #isSearchable(int)} always true - * - * + * JDBC ResultSetMetaData implementation of Phoenix. Currently only the following methods are + * supported: - {@link #getColumnCount()} - {@link #getColumnDisplaySize(int)} - + * {@link #getColumnLabel(int)} displays alias name if present and column name otherwise - + * {@link #getColumnName(int)} same as {@link #getColumnLabel(int)} - {@link #isCaseSensitive(int)} + * - {@link #getColumnType(int)} - {@link #getColumnTypeName(int)} - {@link #getTableName(int)} - + * {@link #getSchemaName(int)} always returns empty string - {@link #getCatalogName(int)} always + * returns empty string - {@link #isNullable(int)} - {@link #isSigned(int)} - + * {@link #isAutoIncrement(int)} always false - {@link #isCurrency(int)} always false - + * {@link #isDefinitelyWritable(int)} always false - {@link #isReadOnly(int)} always true - + * {@link #isSearchable(int)} always true * @since 0.1 */ public class PhoenixResultSetMetaData implements ResultSetMetaData { - static final int DEFAULT_DISPLAY_WIDTH = 40; - private final RowProjector rowProjector; - private final PhoenixConnection connection; - - public PhoenixResultSetMetaData(PhoenixConnection connection, RowProjector projector) { - this.connection = connection; - this.rowProjector = projector; - } - - @Override - public String getCatalogName(int column) throws SQLException { - return ""; - } - - @Override - public String getColumnClassName(int column) throws SQLException { - PDataType type = rowProjector.getColumnProjector(column-1).getExpression().getDataType(); - return type == null ? null : type.getJavaClassName(); - } - - @Override - public int getColumnCount() throws SQLException { - return rowProjector.getColumnCount(); - } - - @Override - public int getColumnDisplaySize(int column) throws SQLException { - ColumnProjector projector = rowProjector.getColumnProjector(column-1); - PDataType type = projector.getExpression().getDataType(); - if (type == null) { - return QueryConstants.NULL_DISPLAY_TEXT.length(); - } - if (type.isCoercibleTo(PDate.INSTANCE)) { - return connection.getDatePattern().length(); - } - if (projector.getExpression().getMaxLength() != null) { - return projector.getExpression().getMaxLength(); - } - return DEFAULT_DISPLAY_WIDTH; - } - - @Override - public String getColumnLabel(int column) throws SQLException { - return rowProjector.getColumnProjector(column-1).getLabel(); - } - - @Override - public String getColumnName(int column) throws SQLException { - return rowProjector.getColumnProjector(column-1).getName(); - } - - @Override - public int getColumnType(int column) throws SQLException { - PDataType type = rowProjector.getColumnProjector(column-1).getExpression().getDataType(); - return type == null ? Types.NULL : type.getResultSetSqlType(); - } - - @Override - public String getColumnTypeName(int column) throws SQLException { - PDataType type = rowProjector.getColumnProjector(column-1).getExpression().getDataType(); - return type == null ? "NULL" : type.getSqlTypeName(); - } - - @Override - public int getPrecision(int column) throws SQLException { - Integer precision = rowProjector.getColumnProjector(column-1).getExpression().getMaxLength(); - return precision == null ? 0 : precision; - } - - @Override - public int getScale(int column) throws SQLException { - Integer scale = rowProjector.getColumnProjector(column-1).getExpression().getScale(); - return scale == null ? 0 : scale; - } - - @Override - public String getSchemaName(int column) throws SQLException { - return ""; // TODO - } + static final int DEFAULT_DISPLAY_WIDTH = 40; + private final RowProjector rowProjector; + private final PhoenixConnection connection; + + public PhoenixResultSetMetaData(PhoenixConnection connection, RowProjector projector) { + this.connection = connection; + this.rowProjector = projector; + } + + @Override + public String getCatalogName(int column) throws SQLException { + return ""; + } + + @Override + public String getColumnClassName(int column) throws SQLException { + PDataType type = rowProjector.getColumnProjector(column - 1).getExpression().getDataType(); + return type == null ? null : type.getJavaClassName(); + } + + @Override + public int getColumnCount() throws SQLException { + return rowProjector.getColumnCount(); + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + ColumnProjector projector = rowProjector.getColumnProjector(column - 1); + PDataType type = projector.getExpression().getDataType(); + if (type == null) { + return QueryConstants.NULL_DISPLAY_TEXT.length(); + } + if (type.isCoercibleTo(PDate.INSTANCE)) { + return connection.getDatePattern().length(); + } + if (projector.getExpression().getMaxLength() != null) { + return projector.getExpression().getMaxLength(); + } + return DEFAULT_DISPLAY_WIDTH; + } + + @Override + public String getColumnLabel(int column) throws SQLException { + return rowProjector.getColumnProjector(column - 1).getLabel(); + } + + @Override + public String getColumnName(int column) throws SQLException { + return rowProjector.getColumnProjector(column - 1).getName(); + } + + @Override + public int getColumnType(int column) throws SQLException { + PDataType type = rowProjector.getColumnProjector(column - 1).getExpression().getDataType(); + return type == null ? Types.NULL : type.getResultSetSqlType(); + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + PDataType type = rowProjector.getColumnProjector(column - 1).getExpression().getDataType(); + return type == null ? "NULL" : type.getSqlTypeName(); + } + + @Override + public int getPrecision(int column) throws SQLException { + Integer precision = rowProjector.getColumnProjector(column - 1).getExpression().getMaxLength(); + return precision == null ? 0 : precision; + } + + @Override + public int getScale(int column) throws SQLException { + Integer scale = rowProjector.getColumnProjector(column - 1).getExpression().getScale(); + return scale == null ? 0 : scale; + } + + @Override + public String getSchemaName(int column) throws SQLException { + return ""; // TODO + } + + @Override + public String getTableName(int column) throws SQLException { + return rowProjector.getColumnProjector(column - 1).getTableName(); + } + + @Override + public boolean isAutoIncrement(int column) throws SQLException { + return false; + } + + @Override + public boolean isCaseSensitive(int column) throws SQLException { + return rowProjector.getColumnProjector(column - 1).isCaseSensitive(); + } + + @Override + public boolean isCurrency(int column) throws SQLException { + return false; + } + + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + return false; + } + + @Override + public int isNullable(int column) throws SQLException { + return rowProjector.getColumnProjector(column - 1).getExpression().isNullable() + ? ResultSetMetaData.columnNullable + : ResultSetMetaData.columnNoNulls; + } + + @Override + public boolean isReadOnly(int column) throws SQLException { + return true; + } + + @Override + public boolean isSearchable(int column) throws SQLException { + return true; + } + + @Override + public boolean isSigned(int column) throws SQLException { + PDataType type = rowProjector.getColumnProjector(column - 1).getExpression().getDataType(); + if (type == null) { + return false; + } + return type.isCoercibleTo(PDecimal.INSTANCE); + } + + @Override + public boolean isWritable(int column) throws SQLException { + return false; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (!iface.isInstance(this)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) + .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()).build() + .buildException(); + } + return (T) this; + } - @Override - public String getTableName(int column) throws SQLException { - return rowProjector.getColumnProjector(column-1).getTableName(); - } - - @Override - public boolean isAutoIncrement(int column) throws SQLException { - return false; - } - - @Override - public boolean isCaseSensitive(int column) throws SQLException { - return rowProjector.getColumnProjector(column-1).isCaseSensitive(); - } - - @Override - public boolean isCurrency(int column) throws SQLException { - return false; - } - - @Override - public boolean isDefinitelyWritable(int column) throws SQLException { - return false; - } - - @Override - public int isNullable(int column) throws SQLException { - return rowProjector.getColumnProjector(column-1).getExpression().isNullable() ? ResultSetMetaData.columnNullable : ResultSetMetaData.columnNoNulls; - } - - @Override - public boolean isReadOnly(int column) throws SQLException { - return true; - } - - @Override - public boolean isSearchable(int column) throws SQLException { - return true; - } - - @Override - public boolean isSigned(int column) throws SQLException { - PDataType type = rowProjector.getColumnProjector(column-1).getExpression().getDataType(); - if (type == null) { - return false; - } - return type.isCoercibleTo(PDecimal.INSTANCE); - } - - @Override - public boolean isWritable(int column) throws SQLException { - return false; - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); - } - - @SuppressWarnings("unchecked") - @Override - public T unwrap(Class iface) throws SQLException { - if (!iface.isInstance(this)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) - .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()) - .build().buildException(); - } - return (T)this; - } - } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java index c361868405d..5e10eb11784 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -147,9 +147,7 @@ import org.apache.phoenix.parse.DeclareCursorStatement; import org.apache.phoenix.parse.DeleteJarStatement; import org.apache.phoenix.parse.DeleteStatement; -import org.apache.phoenix.parse.ExplainType; -import org.apache.phoenix.parse.ShowCreateTableStatement; -import org.apache.phoenix.parse.ShowCreateTable; +import org.apache.phoenix.parse.DropCDCStatement; import org.apache.phoenix.parse.DropColumnStatement; import org.apache.phoenix.parse.DropFunctionStatement; import org.apache.phoenix.parse.DropIndexStatement; @@ -158,6 +156,7 @@ import org.apache.phoenix.parse.DropTableStatement; import org.apache.phoenix.parse.ExecuteUpgradeStatement; import org.apache.phoenix.parse.ExplainStatement; +import org.apache.phoenix.parse.ExplainType; import org.apache.phoenix.parse.FetchStatement; import org.apache.phoenix.parse.FilterableStatement; import org.apache.phoenix.parse.HintNode; @@ -177,6 +176,8 @@ import org.apache.phoenix.parse.PrimaryKeyConstraint; import org.apache.phoenix.parse.SQLParser; import org.apache.phoenix.parse.SelectStatement; +import org.apache.phoenix.parse.ShowCreateTable; +import org.apache.phoenix.parse.ShowCreateTableStatement; import org.apache.phoenix.parse.ShowSchemasStatement; import org.apache.phoenix.parse.ShowTablesStatement; import org.apache.phoenix.parse.TableName; @@ -186,7 +187,6 @@ import org.apache.phoenix.parse.UpdateStatisticsStatement; import org.apache.phoenix.parse.UpsertStatement; import org.apache.phoenix.parse.UseSchemaStatement; -import org.apache.phoenix.parse.DropCDCStatement; import org.apache.phoenix.query.HBaseFactoryProvider; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.query.QueryConstants; @@ -216,2597 +216,2667 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; +import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.math.IntMath; import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.util.ByteUtil; -import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.CDCUtil; +import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.CursorUtil; -import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.LogUtil; import org.apache.phoenix.util.ParseNodeUtil; +import org.apache.phoenix.util.ParseNodeUtil.RewriteResult; import org.apache.phoenix.util.PhoenixContextExecutor; +import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.SQLCloseable; -import org.apache.phoenix.util.ParseNodeUtil.RewriteResult; import org.apache.phoenix.util.ValidateLastDDLTimestampUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; -import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.math.IntMath; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; /** - * - * JDBC Statement implementation of Phoenix. - * Currently only the following methods are supported: - * - {@link #executeQuery(String)} - * - {@link #executeUpdate(String)} - * - {@link #execute(String)} - * - {@link #getResultSet()} - * - {@link #getUpdateCount()} - * - {@link #close()} - * The Statement only supports the following options: - * - ResultSet.FETCH_FORWARD - * - ResultSet.TYPE_FORWARD_ONLY - * - ResultSet.CLOSE_CURSORS_AT_COMMIT - * - * + * JDBC Statement implementation of Phoenix. Currently only the following methods are supported: - + * {@link #executeQuery(String)} - {@link #executeUpdate(String)} - {@link #execute(String)} - + * {@link #getResultSet()} - {@link #getUpdateCount()} - {@link #close()} The Statement only + * supports the following options: - ResultSet.FETCH_FORWARD - ResultSet.TYPE_FORWARD_ONLY - + * ResultSet.CLOSE_CURSORS_AT_COMMIT * @since 0.1 */ public class PhoenixStatement implements PhoenixMonitoredStatement, SQLCloseable { - - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixStatement.class); - - public enum Operation { - QUERY("queried", false), - DELETE("deleted", true), - UPSERT("upserted", true), - UPGRADE("upgrade", true), - ADMIN("admin", true); - - private final String toString; - private final boolean isMutation; - Operation(String toString, boolean isMutation) { - this.toString = toString; - this.isMutation = isMutation; - } - - public boolean isMutation() { - return isMutation; - } - - @Override - public String toString() { - return toString; - } - } - - protected final PhoenixConnection connection; - private static final int NO_UPDATE = -1; - private static final String TABLE_UNKNOWN = ""; - private QueryPlan lastQueryPlan; - private PhoenixResultSet lastResultSet; - private int lastUpdateCount = NO_UPDATE; - - private String lastUpdateTable = TABLE_UNKNOWN; - private Operation lastUpdateOperation; - private boolean isClosed = false; - private boolean closeOnCompletion = false; - private int maxRows; - private int fetchSize = -1; - private int queryTimeoutMillis; - // Caching per Statement - protected final Calendar localCalendar = Calendar.getInstance(); - private boolean validateLastDdlTimestamp; - - public PhoenixStatement(PhoenixConnection connection) { - this.connection = connection; - this.queryTimeoutMillis = getDefaultQueryTimeoutMillis(); - this.validateLastDdlTimestamp = ValidateLastDDLTimestampUtil - .getValidateLastDdlTimestampEnabled(this.connection); - } - - /** - * Internally to Phoenix we allow callers to set the query timeout in millis - * via the phoenix.query.timeoutMs. Therefore we store the time in millis. - */ - private int getDefaultQueryTimeoutMillis() { - return connection.getQueryServices().getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); - } - - protected List getResultSets() { - if (lastResultSet != null) { - return Collections.singletonList(lastResultSet); - } else { - return Collections.emptyList(); - } - } - - public PhoenixResultSet newResultSet(ResultIterator iterator, RowProjector projector, StatementContext context) throws SQLException { - return new PhoenixResultSet(iterator, projector, context); - } - - protected QueryPlan optimizeQuery(CompilableStatement stmt) throws SQLException { - QueryPlan plan = stmt.compilePlan(this, Sequence.ValueOp.VALIDATE_SEQUENCE); - return connection.getQueryServices().getOptimizer().optimize(this, plan); - } - - protected PhoenixResultSet executeQuery(final CompilableStatement stmt, final QueryLogger queryLogger) - throws SQLException { - return executeQuery(stmt, true, queryLogger, false, this.validateLastDdlTimestamp); - } - - protected PhoenixResultSet executeQuery(final CompilableStatement stmt, final QueryLogger queryLogger, boolean noCommit) - throws SQLException { - return executeQuery(stmt, true, queryLogger, noCommit, this.validateLastDdlTimestamp); - } - - - private PhoenixResultSet executeQuery(final CompilableStatement stmt, - final boolean doRetryOnMetaNotFoundError, - final QueryLogger queryLogger, final boolean noCommit, - boolean shouldValidateLastDdlTimestamp) - throws SQLException { - GLOBAL_SELECT_SQL_COUNTER.increment(); - try { - return CallRunner - .run(new CallRunner.CallableThrowable() { - @Override public PhoenixResultSet call() throws SQLException { - final long startTime = EnvironmentEdgeManager.currentTimeMillis(); - boolean success = false; - boolean updateMetrics = true; - boolean pointLookup = false; - String tableName = null; - clearResultSet(); - PhoenixResultSet rs = null; - QueryPlan plan = null; - try { - PhoenixConnection conn = getConnection(); - conn.checkOpen(); - - if (conn.getQueryServices().isUpgradeRequired() && !conn - .isRunningUpgrade() - && stmt.getOperation() != Operation.UPGRADE) { - throw new UpgradeRequiredException(); - } - plan = stmt.compilePlan(PhoenixStatement.this, - Sequence.ValueOp.VALIDATE_SEQUENCE); - // Send mutations to hbase, so they are visible to subsequent reads. - // Use original plan for data table so that data and immutable indexes will be sent - // TODO: for joins, we need to iterate through all tables, but we need the original table, - // not the projected table, so plan.getContext().getResolver().getTables() won't work. - if (plan.getContext().getScanRanges().isPointLookup()) { - pointLookup = true; - } - Iterator tableRefs = plan.getSourceRefs().iterator(); - connection.getMutationState().sendUncommitted(tableRefs); - plan = - connection.getQueryServices().getOptimizer() - .optimize(PhoenixStatement.this, plan); - setLastQueryPlan(plan); - - //verify metadata for the table/view/index in the query plan - //plan.getTableRef can be null in some cases like EXPLAIN - if (shouldValidateLastDdlTimestamp && plan.getTableRef() != null) { - ValidateLastDDLTimestampUtil.validateLastDDLTimestamp( - connection, Arrays.asList(plan.getTableRef()), true); - } - - if (plan.getTableRef() != null - && plan.getTableRef().getTable() != null && !Strings - .isNullOrEmpty( - plan.getTableRef().getTable().getPhysicalName() - .toString())) { - tableName = plan.getTableRef().getTable().getPhysicalName() - .toString(); - } - // this will create its own trace internally, so we don't wrap this - // whole thing in tracing - ResultIterator resultIterator = plan.iterator(); - if (LOGGER.isDebugEnabled()) { - String explainPlan = QueryUtil.getExplainPlan(resultIterator); - LOGGER.debug(LogUtil.addCustomAnnotations( - "Explain plan: " + explainPlan, connection)); - } - StatementContext context = plan.getContext(); - context.setQueryLogger(queryLogger); - if (queryLogger.isDebugEnabled()) { - queryLogger.log(QueryLogInfo.EXPLAIN_PLAN_I, - QueryUtil.getExplainPlan(resultIterator)); - queryLogger.log(QueryLogInfo.GLOBAL_SCAN_DETAILS_I, - context.getScan() != null ? - context.getScan().toString() : - null); - } - context.getOverallQueryMetrics().startQuery(); - rs = - newResultSet(resultIterator, plan.getProjector(), - plan.getContext()); - // newResultset sets lastResultset - setLastQueryPlan(plan); - setLastUpdateCount(NO_UPDATE); - setLastUpdateTable(tableName == null ? TABLE_UNKNOWN : tableName); - setLastUpdateOperation(stmt.getOperation()); - // If transactional, this will move the read pointer forward - if (connection.getAutoCommit() && !noCommit) { - connection.commit(); - } - connection.incrementStatementExecutionCounter(); - success = true; - } - //Force update cache and retry if meta not found error occurs - catch (MetaDataEntityNotFoundException e) { - if (doRetryOnMetaNotFoundError && e.getTableName() != null) { - String sName = e.getSchemaName(); - String tName = e.getTableName(); - // when the query plan uses the local index PTable, - // the TNFE can still be for the base table - if (plan != null && plan.getTableRef() != null) { - PTable queryPlanTable = plan.getTableRef().getTable(); - if (queryPlanTable != null - && queryPlanTable.getIndexType() - == IndexType.LOCAL) { - sName = queryPlanTable.getSchemaName().getString(); - tName = queryPlanTable.getTableName().getString(); - } - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Reloading table {} data from server", - tName); - } - if (new MetaDataClient(connection) - .updateCache(connection.getTenantId(), - sName, tName, true) - .wasUpdated()) { - updateMetrics = false; - //TODO we can log retry count and error for debugging in LOG table - return executeQuery(stmt, false, queryLogger, noCommit, - shouldValidateLastDdlTimestamp); - } - } - throw e; - } catch (StaleMetadataCacheException e) { - GlobalClientMetrics - .GLOBAL_CLIENT_STALE_METADATA_CACHE_EXCEPTION_COUNTER - .increment(); - updateMetrics = false; - PTable pTable = lastQueryPlan.getTableRef().getTable(); - String schemaN = pTable.getSchemaName().toString(); - String tableN = pTable.getTableName().toString(); - PName tenantId = connection.getTenantId(); - LOGGER.debug("Force updating client metadata cache for {}", - ValidateLastDDLTimestampUtil.getInfoString(tenantId, - Arrays.asList(getLastQueryPlan().getTableRef()))); - // force update client metadata cache for the table/view - // this also updates the cache for all ancestors in case of a view - new MetaDataClient(connection) - .updateCache(tenantId, schemaN, tableN, true); - // skip last ddl timestamp validation in the retry - return executeQuery(stmt, doRetryOnMetaNotFoundError, queryLogger, - noCommit, false); - } - catch (RuntimeException e) { - // FIXME: Expression.evaluate does not throw SQLException - // so this will unwrap throws from that. - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw e; - } finally { - if (updateMetrics) { - // Regardless of whether the query was successfully handled or not, - // update the time spent so far. If needed, we can separate out the - // success times and failure times. - GLOBAL_QUERY_TIME.update(EnvironmentEdgeManager.currentTimeMillis() - - startTime); - long - executeQueryTimeSpent = - EnvironmentEdgeManager.currentTimeMillis() - startTime; - if (tableName != null) { - - TableMetricsManager - .updateMetricsMethod(tableName, SELECT_SQL_COUNTER, 1); - TableMetricsManager - .updateMetricsMethod(tableName, SELECT_SQL_QUERY_TIME, - executeQueryTimeSpent); - if (success) { - TableMetricsManager.updateMetricsMethod(tableName, - SELECT_SUCCESS_SQL_COUNTER, 1); - TableMetricsManager.updateMetricsMethod(tableName, - pointLookup ? - SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER : - SELECT_SCAN_SUCCESS_SQL_COUNTER, 1); - } else { - TableMetricsManager.updateMetricsMethod(tableName, - SELECT_FAILED_SQL_COUNTER, 1); - TableMetricsManager.updateMetricsMethod(tableName, - SELECT_AGGREGATE_FAILURE_SQL_COUNTER, 1); - TableMetricsManager.updateMetricsMethod(tableName, - pointLookup ? - SELECT_POINTLOOKUP_FAILED_SQL_COUNTER : - SELECT_SCAN_FAILED_SQL_COUNTER, 1); - } - } - if (rs != null) { - rs.setQueryTime(executeQueryTimeSpent); - } - } - } - return rs; - } - }, PhoenixContextExecutor.inContext()); - } catch (Exception e) { - if (queryLogger.isDebugEnabled()) { - queryLogger - .log(QueryLogInfo.EXCEPTION_TRACE_I, Throwables.getStackTraceAsString(e)); - queryLogger.log(QueryLogInfo.QUERY_STATUS_I, QueryStatus.FAILED.toString()); - queryLogger.sync(null, null); - } - Throwables.propagateIfInstanceOf(e, SQLException.class); - Throwables.propagate(e); - throw new IllegalStateException(); // Can't happen as Throwables.propagate() always throws - } - } + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixStatement.class); - public String getTargetForAudit(CompilableStatement stmt) { - String target = null; - try { - if (stmt instanceof ExecutableUpsertStatement) { - return ((ExecutableUpsertStatement) stmt).getTable().getName().toString(); - } else if (stmt instanceof ExecutableDeleteStatement) { - return ((ExecutableDeleteStatement) stmt).getTable().getName().toString(); - } else if (stmt instanceof ExecutableCreateTableStatement) { - target = ((ExecutableCreateTableStatement)stmt).getTableName().toString(); - } else if (stmt instanceof ExecutableDropTableStatement) { - target = ((ExecutableDropTableStatement)stmt).getTableName().toString(); - } else if (stmt instanceof ExecutableAddColumnStatement) { - target = ((ExecutableAddColumnStatement)stmt).getTable().getName().toString(); - } else if (stmt instanceof ExecutableCreateSchemaStatement) { - return ((ExecutableCreateSchemaStatement) stmt).getSchemaName(); - } else if (stmt instanceof ExecutableDropSchemaStatement) { - target = ((ExecutableDropSchemaStatement)stmt).getSchemaName(); - } - } catch (Exception e) { - target = stmt.getClass().getName(); - } - return target; - } + public enum Operation { + QUERY("queried", false), + DELETE("deleted", true), + UPSERT("upserted", true), + UPGRADE("upgrade", true), + ADMIN("admin", true); + private final String toString; + private final boolean isMutation; - protected int executeMutation(final CompilableStatement stmt, - final AuditQueryLogger queryLogger) throws SQLException { - return executeMutation(stmt, true, queryLogger, null).getFirst(); + Operation(String toString, boolean isMutation) { + this.toString = toString; + this.isMutation = isMutation; } - Pair executeMutation(final CompilableStatement stmt, - final AuditQueryLogger queryLogger, - final ReturnResult returnResult) throws SQLException { - return executeMutation(stmt, true, queryLogger, returnResult); + public boolean isMutation() { + return isMutation; } - private Pair executeMutation(final CompilableStatement stmt, - final boolean doRetryOnMetaNotFoundError, - final AuditQueryLogger queryLogger, - final ReturnResult returnResult) - throws SQLException { - if (connection.isReadOnly()) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.READ_ONLY_CONNECTION). - build().buildException(); - } - GLOBAL_MUTATION_SQL_COUNTER.increment(); - try { - return CallRunner - .run( - new CallRunner.CallableThrowable, SQLException>() { - @Override - public Pair call() throws SQLException { - boolean success = false; - String tableName = null; - boolean isUpsert = false; - boolean isAtomicUpsert = false; - boolean isDelete = false; - MutationState state = null; - MutationPlan plan = null; - final long startExecuteMutationTime = EnvironmentEdgeManager.currentTimeMillis(); - clearResultSet(); - try { - PhoenixConnection conn = getConnection(); - if (conn.getQueryServices().isUpgradeRequired() && !conn.isRunningUpgrade() - && stmt.getOperation() != Operation.UPGRADE) { - throw new UpgradeRequiredException(); - } - state = connection.getMutationState(); - plan = stmt.compilePlan(PhoenixStatement.this, Sequence.ValueOp.VALIDATE_SEQUENCE); - isUpsert = stmt instanceof ExecutableUpsertStatement; - isDelete = stmt instanceof ExecutableDeleteStatement; - isAtomicUpsert = isUpsert && ((ExecutableUpsertStatement)stmt).getOnDupKeyPairs() != null; - if (plan.getTargetRef() != null && plan.getTargetRef().getTable() != null) { - if (!Strings.isNullOrEmpty(plan.getTargetRef().getTable().getPhysicalName().toString())) { - tableName = plan.getTargetRef().getTable().getPhysicalName().toString(); - } - if (plan.getTargetRef().getTable().isTransactional()) { - state.startTransaction(plan.getTargetRef().getTable().getTransactionProvider()); - } - } - Iterator tableRefs = plan.getSourceRefs().iterator(); - state.sendUncommitted(tableRefs); - state.checkpointIfNeccessary(plan); - checkIfDDLStatementandMutationState(stmt, state); - MutationState lastState = plan.execute(); - state.join(lastState); - // Unfortunately, JDBC uses an int for update count, so we - // just max out at Integer.MAX_VALUE - int lastUpdateCount = (int) Math.min(Integer.MAX_VALUE, - lastState.getUpdateCount()); - Result result = null; - if (connection.getAutoCommit()) { - if (isSingleRowUpdatePlan(isUpsert, isDelete, plan)) { - state.setReturnResult(returnResult); - } - connection.commit(); - if (isAtomicUpsert) { - lastUpdateCount = connection.getMutationState() - .getNumUpdatedRowsForAutoCommit(); - } - result = connection.getMutationState().getResult(); - connection.getMutationState().clearResult(); - } - setLastQueryPlan(null); - setLastUpdateCount(lastUpdateCount); - setLastUpdateOperation(stmt.getOperation()); - setLastUpdateTable(tableName == null ? TABLE_UNKNOWN : tableName); - connection.incrementStatementExecutionCounter(); - if (queryLogger.isAuditLoggingEnabled()) { - queryLogger.log(QueryLogInfo.TABLE_NAME_I, getTargetForAudit(stmt)); - queryLogger.log(QueryLogInfo.QUERY_STATUS_I, QueryStatus.COMPLETED.toString()); - queryLogger.log(QueryLogInfo.NO_OF_RESULTS_ITERATED_I, lastUpdateCount); - queryLogger.syncAudit(); - } - - success = true; - return new Pair<>(lastUpdateCount, new ResultTuple(result)); - } - //Force update cache and retry if meta not found error occurs - catch (MetaDataEntityNotFoundException e) { - if (doRetryOnMetaNotFoundError && e.getTableName() != null) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Reloading table {} data from server", e.getTableName()); - } - if (new MetaDataClient(connection).updateCache(connection.getTenantId(), - e.getSchemaName(), e.getTableName(), true).wasUpdated()) { - return executeMutation(stmt, false, queryLogger, - returnResult); - } - } - throw e; - }catch (RuntimeException e) { - // FIXME: Expression.evaluate does not throw SQLException - // so this will unwrap throws from that. - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw e; - } finally { - // Regardless of whether the mutation was successfully handled or not, - // update the time spent so far. If needed, we can separate out the - // success times and failure times. - if (tableName != null) { - // Counts for both ddl and dml - TableMetricsManager.updateMetricsMethod(tableName, - MUTATION_SQL_COUNTER, 1); - // Only count dml operations - if (isUpsert || isDelete) { - long executeMutationTimeSpent = - EnvironmentEdgeManager.currentTimeMillis() - startExecuteMutationTime; - - TableMetricsManager.updateMetricsMethod(tableName, isUpsert ? - UPSERT_SQL_COUNTER : DELETE_SQL_COUNTER, 1); - TableMetricsManager.updateMetricsMethod(tableName, isUpsert ? - UPSERT_SQL_QUERY_TIME : DELETE_SQL_QUERY_TIME, executeMutationTimeSpent); - if (isAtomicUpsert) { - TableMetricsManager.updateMetricsMethod(tableName, - ATOMIC_UPSERT_SQL_COUNTER, 1); - TableMetricsManager.updateMetricsMethod(tableName, - ATOMIC_UPSERT_SQL_QUERY_TIME, executeMutationTimeSpent); - } - - if (success) { - TableMetricsManager.updateMetricsMethod(tableName, isUpsert ? - UPSERT_SUCCESS_SQL_COUNTER : DELETE_SUCCESS_SQL_COUNTER, 1); - } else { - TableMetricsManager.updateMetricsMethod(tableName, isUpsert ? - UPSERT_FAILED_SQL_COUNTER : DELETE_FAILED_SQL_COUNTER, 1); - //Failures are updated for executeMutation phase and for autocommit=true case here. - TableMetricsManager.updateMetricsMethod(tableName, isUpsert ? UPSERT_AGGREGATE_FAILURE_SQL_COUNTER: - DELETE_AGGREGATE_FAILURE_SQL_COUNTER, 1); - } - if (plan instanceof DeleteCompiler.ServerSelectDeleteMutationPlan - || plan instanceof UpsertCompiler.ServerUpsertSelectMutationPlan) { - TableMetricsManager.updateLatencyHistogramForMutations( - tableName, executeMutationTimeSpent, false); - // We won't have size histograms for delete mutations when auto commit is set to true and - // if plan is of ServerSelectDeleteMutationPlan or ServerUpsertSelectMutationPlan - // since the update happens on server. - } else { - state.addExecuteMutationTime( - executeMutationTimeSpent, tableName); - } - } - } - - } - } - }, PhoenixContextExecutor.inContext(), - Tracing.withTracing(connection, this.toString())); - } catch (Exception e) { - if (queryLogger.isAuditLoggingEnabled()) { - queryLogger.log(QueryLogInfo.TABLE_NAME_I, getTargetForAudit(stmt)); - queryLogger.log(QueryLogInfo.EXCEPTION_TRACE_I, Throwables.getStackTraceAsString(e)); - queryLogger.log(QueryLogInfo.QUERY_STATUS_I, QueryStatus.FAILED.toString()); - queryLogger.syncAudit(); + @Override + public String toString() { + return toString; + } + } + + protected final PhoenixConnection connection; + private static final int NO_UPDATE = -1; + private static final String TABLE_UNKNOWN = ""; + private QueryPlan lastQueryPlan; + private PhoenixResultSet lastResultSet; + private int lastUpdateCount = NO_UPDATE; + + private String lastUpdateTable = TABLE_UNKNOWN; + private Operation lastUpdateOperation; + private boolean isClosed = false; + private boolean closeOnCompletion = false; + private int maxRows; + private int fetchSize = -1; + private int queryTimeoutMillis; + // Caching per Statement + protected final Calendar localCalendar = Calendar.getInstance(); + private boolean validateLastDdlTimestamp; + + public PhoenixStatement(PhoenixConnection connection) { + this.connection = connection; + this.queryTimeoutMillis = getDefaultQueryTimeoutMillis(); + this.validateLastDdlTimestamp = + ValidateLastDDLTimestampUtil.getValidateLastDdlTimestampEnabled(this.connection); + } + + /** + * Internally to Phoenix we allow callers to set the query timeout in millis via the + * phoenix.query.timeoutMs. Therefore we store the time in millis. + */ + private int getDefaultQueryTimeoutMillis() { + return connection.getQueryServices().getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, + QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); + } + + protected List getResultSets() { + if (lastResultSet != null) { + return Collections.singletonList(lastResultSet); + } else { + return Collections.emptyList(); + } + } + + public PhoenixResultSet newResultSet(ResultIterator iterator, RowProjector projector, + StatementContext context) throws SQLException { + return new PhoenixResultSet(iterator, projector, context); + } + + protected QueryPlan optimizeQuery(CompilableStatement stmt) throws SQLException { + QueryPlan plan = stmt.compilePlan(this, Sequence.ValueOp.VALIDATE_SEQUENCE); + return connection.getQueryServices().getOptimizer().optimize(this, plan); + } + + protected PhoenixResultSet executeQuery(final CompilableStatement stmt, + final QueryLogger queryLogger) throws SQLException { + return executeQuery(stmt, true, queryLogger, false, this.validateLastDdlTimestamp); + } + + protected PhoenixResultSet executeQuery(final CompilableStatement stmt, + final QueryLogger queryLogger, boolean noCommit) throws SQLException { + return executeQuery(stmt, true, queryLogger, noCommit, this.validateLastDdlTimestamp); + } + + private PhoenixResultSet executeQuery(final CompilableStatement stmt, + final boolean doRetryOnMetaNotFoundError, final QueryLogger queryLogger, final boolean noCommit, + boolean shouldValidateLastDdlTimestamp) throws SQLException { + GLOBAL_SELECT_SQL_COUNTER.increment(); + + try { + return CallRunner.run(new CallRunner.CallableThrowable() { + @Override + public PhoenixResultSet call() throws SQLException { + final long startTime = EnvironmentEdgeManager.currentTimeMillis(); + boolean success = false; + boolean updateMetrics = true; + boolean pointLookup = false; + String tableName = null; + clearResultSet(); + PhoenixResultSet rs = null; + QueryPlan plan = null; + try { + PhoenixConnection conn = getConnection(); + conn.checkOpen(); + + if ( + conn.getQueryServices().isUpgradeRequired() && !conn.isRunningUpgrade() + && stmt.getOperation() != Operation.UPGRADE + ) { + throw new UpgradeRequiredException(); } - Throwables.propagateIfInstanceOf(e, SQLException.class); - Throwables.propagate(e); - throw new IllegalStateException(); // Can't happen as Throwables.propagate() always throws - } - } - - private static boolean isSingleRowUpdatePlan(boolean isUpsert, boolean isDelete, - MutationPlan plan) { - boolean isSingleRowUpdate = false; - if (isUpsert) { - isSingleRowUpdate = true; - } else if (isDelete) { - isSingleRowUpdate = plan.getContext().getScanRanges().getPointLookupCount() == 1; - } - return isSingleRowUpdate; - } - - protected static interface CompilableStatement extends BindableStatement { - public T compilePlan (PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException; - } - - private static class ExecutableSelectStatement extends SelectStatement implements CompilableStatement { - private ExecutableSelectStatement(TableNode from, HintNode hint, boolean isDistinct, List select, ParseNode where, - List groupBy, ParseNode having, List orderBy, LimitNode limit, OffsetNode offset, int bindCount, boolean isAggregate, boolean hasSequence, Map udfParseNodes) { - this(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit,offset, bindCount, isAggregate, hasSequence, Collections.emptyList(), udfParseNodes); - } - - private ExecutableSelectStatement(TableNode from, HintNode hint, boolean isDistinct, List select, ParseNode where, - List groupBy, ParseNode having, List orderBy, LimitNode limit, OffsetNode offset, int bindCount, boolean isAggregate, - boolean hasSequence, List selects, Map udfParseNodes) { - super(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, offset, bindCount, isAggregate, hasSequence, selects, udfParseNodes); - } - - private ExecutableSelectStatement(ExecutableSelectStatement select) { - this(select.getFrom(), select.getHint(), select.isDistinct(), select.getSelect(), select.getWhere(), - select.getGroupBy(), select.getHaving(), select.getOrderBy(), select.getLimit(), select.getOffset(), select.getBindCount(), - select.isAggregate(), select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); - } - - - @SuppressWarnings("unchecked") - @Override - public QueryPlan compilePlan(PhoenixStatement phoenixStatement, Sequence.ValueOp seqAction) throws SQLException { - if (!getUdfParseNodes().isEmpty()) { - phoenixStatement.throwIfUnallowedUserDefinedFunctions(getUdfParseNodes()); + plan = stmt.compilePlan(PhoenixStatement.this, Sequence.ValueOp.VALIDATE_SEQUENCE); + // Send mutations to hbase, so they are visible to subsequent reads. + // Use original plan for data table so that data and immutable indexes will be sent + // TODO: for joins, we need to iterate through all tables, but we need the original + // table, + // not the projected table, so plan.getContext().getResolver().getTables() won't work. + if (plan.getContext().getScanRanges().isPointLookup()) { + pointLookup = true; + } + Iterator tableRefs = plan.getSourceRefs().iterator(); + connection.getMutationState().sendUncommitted(tableRefs); + plan = + connection.getQueryServices().getOptimizer().optimize(PhoenixStatement.this, plan); + setLastQueryPlan(plan); + + // verify metadata for the table/view/index in the query plan + // plan.getTableRef can be null in some cases like EXPLAIN + if (shouldValidateLastDdlTimestamp && plan.getTableRef() != null) { + ValidateLastDDLTimestampUtil.validateLastDDLTimestamp(connection, + Arrays.asList(plan.getTableRef()), true); } - RewriteResult rewriteResult = - ParseNodeUtil.rewrite(this, phoenixStatement.getConnection()); - QueryPlan queryPlan = new QueryCompiler( - phoenixStatement, - rewriteResult.getRewrittenSelectStatement(), - rewriteResult.getColumnResolver(), - Collections.emptyList(), - phoenixStatement.getConnection().getIteratorFactory(), - new SequenceManager(phoenixStatement), - true, - false, - null).compile(); - queryPlan.getContext().getSequenceManager().validateSequences(seqAction); - return queryPlan; - } - - } - - private static final byte[] EXPLAIN_PLAN_FAMILY = QueryConstants.SINGLE_COLUMN_FAMILY; - private static final byte[] EXPLAIN_PLAN_COLUMN = PVarchar.INSTANCE.toBytes("Plan"); - private static final String EXPLAIN_PLAN_ALIAS = "PLAN"; - private static final String EXPLAIN_PLAN_TABLE_NAME = "PLAN_TABLE"; - private static final PDatum EXPLAIN_PLAN_DATUM = new PDatum() { - @Override - public boolean isNullable() { - return true; - } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } - @Override - public Integer getMaxLength() { - return null; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }; - private static final String EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_NAME = "BytesEstimate"; - private static final byte[] EXPLAIN_PLAN_BYTES_ESTIMATE = - PVarchar.INSTANCE.toBytes(EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_NAME); - public static final String EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_ALIAS = "EST_BYTES_READ"; - private static final PColumnImpl EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN = - new PColumnImpl(PNameFactory.newName(EXPLAIN_PLAN_BYTES_ESTIMATE), - PNameFactory.newName(EXPLAIN_PLAN_FAMILY), PLong.INSTANCE, null, null, true, 1, - SortOrder.getDefault(), 0, null, false, null, false, false, - EXPLAIN_PLAN_BYTES_ESTIMATE, 0, false); - - private static final String EXPLAIN_PLAN_ROWS_ESTIMATE_COLUMN_NAME = "RowsEstimate"; - private static final byte[] EXPLAIN_PLAN_ROWS_ESTIMATE = - PVarchar.INSTANCE.toBytes(EXPLAIN_PLAN_ROWS_ESTIMATE_COLUMN_NAME); - public static final String EXPLAIN_PLAN_ROWS_COLUMN_ALIAS = "EST_ROWS_READ"; - private static final PColumnImpl EXPLAIN_PLAN_ROWS_ESTIMATE_COLUMN = - new PColumnImpl(PNameFactory.newName(EXPLAIN_PLAN_ROWS_ESTIMATE), - PNameFactory.newName(EXPLAIN_PLAN_FAMILY), PLong.INSTANCE, null, null, true, 2, - SortOrder.getDefault(), 0, null, false, null, false, false, - EXPLAIN_PLAN_ROWS_ESTIMATE, 0, false); - - private static final String EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_NAME = "EstimateInfoTS"; - private static final byte[] EXPLAIN_PLAN_ESTIMATE_INFO_TS = - PVarchar.INSTANCE.toBytes(EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_NAME); - public static final String EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_ALIAS = "EST_INFO_TS"; - private static final PColumnImpl EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN = - new PColumnImpl(PNameFactory.newName(EXPLAIN_PLAN_ESTIMATE_INFO_TS), - PNameFactory.newName(EXPLAIN_PLAN_FAMILY), PLong.INSTANCE, null, null, true, 3, - SortOrder.getDefault(), 0, null, false, null, false, false, - EXPLAIN_PLAN_ESTIMATE_INFO_TS, 0, false); - - private static final RowProjector EXPLAIN_PLAN_ROW_PROJECTOR_WITH_BYTE_ROW_ESTIMATES = - new RowProjector(Arrays - . asList( - new ExpressionProjector(EXPLAIN_PLAN_ALIAS, EXPLAIN_PLAN_ALIAS, - EXPLAIN_PLAN_TABLE_NAME, - new RowKeyColumnExpression(EXPLAIN_PLAN_DATUM, - new RowKeyValueAccessor(Collections - . singletonList(EXPLAIN_PLAN_DATUM), 0)), - false), - new ExpressionProjector(EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_ALIAS, - EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_ALIAS, - EXPLAIN_PLAN_TABLE_NAME, new KeyValueColumnExpression( - EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN), - false), - new ExpressionProjector(EXPLAIN_PLAN_ROWS_COLUMN_ALIAS, - EXPLAIN_PLAN_ROWS_COLUMN_ALIAS, - EXPLAIN_PLAN_TABLE_NAME, - new KeyValueColumnExpression(EXPLAIN_PLAN_ROWS_ESTIMATE_COLUMN), - false), - new ExpressionProjector(EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_ALIAS, - EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_ALIAS, - EXPLAIN_PLAN_TABLE_NAME, - new KeyValueColumnExpression(EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN), - false)), - 0, true); - - private static class ExecutableExplainStatement extends ExplainStatement implements CompilableStatement { - - ExecutableExplainStatement(BindableStatement statement, ExplainType explainType) { - super(statement, explainType); - } - - @Override - public CompilableStatement getStatement() { - return (CompilableStatement) super.getStatement(); - } - - @Override - public int getBindCount() { - return getStatement().getBindCount(); - } - - @SuppressWarnings("unchecked") - @Override - public QueryPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - CompilableStatement compilableStmt = getStatement(); - StatementPlan compilePlan = compilableStmt.compilePlan(stmt, Sequence.ValueOp.VALIDATE_SEQUENCE); - // if client is validating timestamps, ensure its metadata cache is up to date. - if (ValidateLastDDLTimestampUtil - .getValidateLastDdlTimestampEnabled(stmt.getConnection())) { - Set tableRefs = compilePlan.getSourceRefs(); - for (TableRef tableRef : tableRefs) { - new MetaDataClient(stmt.getConnection()).updateCache( - stmt.getConnection().getTenantId(), - tableRef.getTable().getSchemaName().getString(), - tableRef.getTable().getTableName().getString(), - true); - } - compilePlan = compilableStmt.compilePlan(stmt, Sequence.ValueOp.VALIDATE_SEQUENCE); + if ( + plan.getTableRef() != null && plan.getTableRef().getTable() != null + && !Strings + .isNullOrEmpty(plan.getTableRef().getTable().getPhysicalName().toString()) + ) { + tableName = plan.getTableRef().getTable().getPhysicalName().toString(); } - // For a QueryPlan, we need to get its optimized plan; for a MutationPlan, its enclosed QueryPlan - // has already been optimized during compilation. - if (compilePlan instanceof QueryPlan) { - QueryPlan dataPlan = (QueryPlan) compilePlan; - compilePlan = stmt.getConnection().getQueryServices().getOptimizer().optimize(stmt, dataPlan); + // this will create its own trace internally, so we don't wrap this + // whole thing in tracing + ResultIterator resultIterator = plan.iterator(); + if (LOGGER.isDebugEnabled()) { + String explainPlan = QueryUtil.getExplainPlan(resultIterator); + LOGGER + .debug(LogUtil.addCustomAnnotations("Explain plan: " + explainPlan, connection)); } - final StatementPlan plan = compilePlan; - List planSteps = plan.getExplainPlan().getPlanSteps(); - ExplainType explainType = getExplainType(); - if (explainType == ExplainType.DEFAULT) { - List updatedExplainPlanSteps = new ArrayList<>(planSteps); - updatedExplainPlanSteps.removeIf(planStep -> planStep != null - && planStep.contains(ExplainTable.REGION_LOCATIONS)); - planSteps = Collections.unmodifiableList(updatedExplainPlanSteps); + StatementContext context = plan.getContext(); + context.setQueryLogger(queryLogger); + if (queryLogger.isDebugEnabled()) { + queryLogger.log(QueryLogInfo.EXPLAIN_PLAN_I, + QueryUtil.getExplainPlan(resultIterator)); + queryLogger.log(QueryLogInfo.GLOBAL_SCAN_DETAILS_I, + context.getScan() != null ? context.getScan().toString() : null); } - List tuples = Lists.newArrayListWithExpectedSize(planSteps.size()); - Long estimatedBytesToScan = plan.getEstimatedBytesToScan(); - Long estimatedRowsToScan = plan.getEstimatedRowsToScan(); - Long estimateInfoTimestamp = plan.getEstimateInfoTimestamp(); - for (String planStep : planSteps) { - byte[] row = PVarchar.INSTANCE.toBytes(planStep); - List cells = Lists.newArrayListWithCapacity(3); - cells.add(PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_COLUMN, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY)); - if (estimatedBytesToScan != null) { - cells.add(PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_BYTES_ESTIMATE, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PLong.INSTANCE.toBytes(estimatedBytesToScan))); - } - if (estimatedRowsToScan != null) { - cells.add(PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_ROWS_ESTIMATE, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PLong.INSTANCE.toBytes(estimatedRowsToScan))); - } - if (estimateInfoTimestamp != null) { - cells.add(PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_ESTIMATE_INFO_TS, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, - PLong.INSTANCE.toBytes(estimateInfoTimestamp))); - } - Collections.sort(cells, CellComparator.getInstance()); - Tuple tuple = new MultiKeyValueTuple(cells); - tuples.add(tuple); + context.getOverallQueryMetrics().startQuery(); + rs = newResultSet(resultIterator, plan.getProjector(), plan.getContext()); + // newResultset sets lastResultset + setLastQueryPlan(plan); + setLastUpdateCount(NO_UPDATE); + setLastUpdateTable(tableName == null ? TABLE_UNKNOWN : tableName); + setLastUpdateOperation(stmt.getOperation()); + // If transactional, this will move the read pointer forward + if (connection.getAutoCommit() && !noCommit) { + connection.commit(); + } + connection.incrementStatementExecutionCounter(); + success = true; + } + // Force update cache and retry if meta not found error occurs + catch (MetaDataEntityNotFoundException e) { + if (doRetryOnMetaNotFoundError && e.getTableName() != null) { + String sName = e.getSchemaName(); + String tName = e.getTableName(); + // when the query plan uses the local index PTable, + // the TNFE can still be for the base table + if (plan != null && plan.getTableRef() != null) { + PTable queryPlanTable = plan.getTableRef().getTable(); + if (queryPlanTable != null && queryPlanTable.getIndexType() == IndexType.LOCAL) { + sName = queryPlanTable.getSchemaName().getString(); + tName = queryPlanTable.getTableName().getString(); + } + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Reloading table {} data from server", tName); + } + if ( + new MetaDataClient(connection) + .updateCache(connection.getTenantId(), sName, tName, true).wasUpdated() + ) { + updateMetrics = false; + // TODO we can log retry count and error for debugging in LOG table + return executeQuery(stmt, false, queryLogger, noCommit, + shouldValidateLastDdlTimestamp); + } + } + throw e; + } catch (StaleMetadataCacheException e) { + GlobalClientMetrics.GLOBAL_CLIENT_STALE_METADATA_CACHE_EXCEPTION_COUNTER.increment(); + updateMetrics = false; + PTable pTable = lastQueryPlan.getTableRef().getTable(); + String schemaN = pTable.getSchemaName().toString(); + String tableN = pTable.getTableName().toString(); + PName tenantId = connection.getTenantId(); + LOGGER.debug("Force updating client metadata cache for {}", ValidateLastDDLTimestampUtil + .getInfoString(tenantId, Arrays.asList(getLastQueryPlan().getTableRef()))); + // force update client metadata cache for the table/view + // this also updates the cache for all ancestors in case of a view + new MetaDataClient(connection).updateCache(tenantId, schemaN, tableN, true); + // skip last ddl timestamp validation in the retry + return executeQuery(stmt, doRetryOnMetaNotFoundError, queryLogger, noCommit, false); + } catch (RuntimeException e) { + // FIXME: Expression.evaluate does not throw SQLException + // so this will unwrap throws from that. + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw e; + } finally { + if (updateMetrics) { + // Regardless of whether the query was successfully handled or not, + // update the time spent so far. If needed, we can separate out the + // success times and failure times. + GLOBAL_QUERY_TIME.update(EnvironmentEdgeManager.currentTimeMillis() - startTime); + long executeQueryTimeSpent = EnvironmentEdgeManager.currentTimeMillis() - startTime; + if (tableName != null) { + + TableMetricsManager.updateMetricsMethod(tableName, SELECT_SQL_COUNTER, 1); + TableMetricsManager.updateMetricsMethod(tableName, SELECT_SQL_QUERY_TIME, + executeQueryTimeSpent); + if (success) { + TableMetricsManager.updateMetricsMethod(tableName, SELECT_SUCCESS_SQL_COUNTER, 1); + TableMetricsManager.updateMetricsMethod(tableName, + pointLookup + ? SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER + : SELECT_SCAN_SUCCESS_SQL_COUNTER, + 1); + } else { + TableMetricsManager.updateMetricsMethod(tableName, SELECT_FAILED_SQL_COUNTER, 1); + TableMetricsManager.updateMetricsMethod(tableName, + SELECT_AGGREGATE_FAILURE_SQL_COUNTER, 1); + TableMetricsManager.updateMetricsMethod(tableName, + pointLookup + ? SELECT_POINTLOOKUP_FAILED_SQL_COUNTER + : SELECT_SCAN_FAILED_SQL_COUNTER, + 1); + } + } + if (rs != null) { + rs.setQueryTime(executeQueryTimeSpent); + } + } + } + return rs; + } + }, PhoenixContextExecutor.inContext()); + } catch (Exception e) { + if (queryLogger.isDebugEnabled()) { + queryLogger.log(QueryLogInfo.EXCEPTION_TRACE_I, Throwables.getStackTraceAsString(e)); + queryLogger.log(QueryLogInfo.QUERY_STATUS_I, QueryStatus.FAILED.toString()); + queryLogger.sync(null, null); + } + Throwables.propagateIfInstanceOf(e, SQLException.class); + Throwables.propagate(e); + throw new IllegalStateException(); // Can't happen as Throwables.propagate() always throws + } + } + + public String getTargetForAudit(CompilableStatement stmt) { + String target = null; + try { + if (stmt instanceof ExecutableUpsertStatement) { + return ((ExecutableUpsertStatement) stmt).getTable().getName().toString(); + } else if (stmt instanceof ExecutableDeleteStatement) { + return ((ExecutableDeleteStatement) stmt).getTable().getName().toString(); + } else if (stmt instanceof ExecutableCreateTableStatement) { + target = ((ExecutableCreateTableStatement) stmt).getTableName().toString(); + } else if (stmt instanceof ExecutableDropTableStatement) { + target = ((ExecutableDropTableStatement) stmt).getTableName().toString(); + } else if (stmt instanceof ExecutableAddColumnStatement) { + target = ((ExecutableAddColumnStatement) stmt).getTable().getName().toString(); + } else if (stmt instanceof ExecutableCreateSchemaStatement) { + return ((ExecutableCreateSchemaStatement) stmt).getSchemaName(); + } else if (stmt instanceof ExecutableDropSchemaStatement) { + target = ((ExecutableDropSchemaStatement) stmt).getSchemaName(); + } + } catch (Exception e) { + target = stmt.getClass().getName(); + } + return target; + } + + protected int executeMutation(final CompilableStatement stmt, final AuditQueryLogger queryLogger) + throws SQLException { + return executeMutation(stmt, true, queryLogger, null).getFirst(); + } + + Pair executeMutation(final CompilableStatement stmt, + final AuditQueryLogger queryLogger, final ReturnResult returnResult) throws SQLException { + return executeMutation(stmt, true, queryLogger, returnResult); + } + + private Pair executeMutation(final CompilableStatement stmt, + final boolean doRetryOnMetaNotFoundError, final AuditQueryLogger queryLogger, + final ReturnResult returnResult) throws SQLException { + if (connection.isReadOnly()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.READ_ONLY_CONNECTION).build() + .buildException(); + } + GLOBAL_MUTATION_SQL_COUNTER.increment(); + try { + return CallRunner.run(new CallRunner.CallableThrowable, SQLException>() { + @Override + public Pair call() throws SQLException { + boolean success = false; + String tableName = null; + boolean isUpsert = false; + boolean isAtomicUpsert = false; + boolean isDelete = false; + MutationState state = null; + MutationPlan plan = null; + final long startExecuteMutationTime = EnvironmentEdgeManager.currentTimeMillis(); + clearResultSet(); + try { + PhoenixConnection conn = getConnection(); + if ( + conn.getQueryServices().isUpgradeRequired() && !conn.isRunningUpgrade() + && stmt.getOperation() != Operation.UPGRADE + ) { + throw new UpgradeRequiredException(); + } + state = connection.getMutationState(); + plan = stmt.compilePlan(PhoenixStatement.this, Sequence.ValueOp.VALIDATE_SEQUENCE); + isUpsert = stmt instanceof ExecutableUpsertStatement; + isDelete = stmt instanceof ExecutableDeleteStatement; + isAtomicUpsert = + isUpsert && ((ExecutableUpsertStatement) stmt).getOnDupKeyPairs() != null; + if (plan.getTargetRef() != null && plan.getTargetRef().getTable() != null) { + if ( + !Strings.isNullOrEmpty(plan.getTargetRef().getTable().getPhysicalName().toString()) + ) { + tableName = plan.getTargetRef().getTable().getPhysicalName().toString(); + } + if (plan.getTargetRef().getTable().isTransactional()) { + state.startTransaction(plan.getTargetRef().getTable().getTransactionProvider()); + } + } + Iterator tableRefs = plan.getSourceRefs().iterator(); + state.sendUncommitted(tableRefs); + state.checkpointIfNeccessary(plan); + checkIfDDLStatementandMutationState(stmt, state); + MutationState lastState = plan.execute(); + state.join(lastState); + // Unfortunately, JDBC uses an int for update count, so we + // just max out at Integer.MAX_VALUE + int lastUpdateCount = (int) Math.min(Integer.MAX_VALUE, lastState.getUpdateCount()); + Result result = null; + if (connection.getAutoCommit()) { + if (isSingleRowUpdatePlan(isUpsert, isDelete, plan)) { + state.setReturnResult(returnResult); + } + connection.commit(); + if (isAtomicUpsert) { + lastUpdateCount = connection.getMutationState().getNumUpdatedRowsForAutoCommit(); + } + result = connection.getMutationState().getResult(); + connection.getMutationState().clearResult(); + } + setLastQueryPlan(null); + setLastUpdateCount(lastUpdateCount); + setLastUpdateOperation(stmt.getOperation()); + setLastUpdateTable(tableName == null ? TABLE_UNKNOWN : tableName); + connection.incrementStatementExecutionCounter(); + if (queryLogger.isAuditLoggingEnabled()) { + queryLogger.log(QueryLogInfo.TABLE_NAME_I, getTargetForAudit(stmt)); + queryLogger.log(QueryLogInfo.QUERY_STATUS_I, QueryStatus.COMPLETED.toString()); + queryLogger.log(QueryLogInfo.NO_OF_RESULTS_ITERATED_I, lastUpdateCount); + queryLogger.syncAudit(); } - final Long estimatedBytes = estimatedBytesToScan; - final Long estimatedRows = estimatedRowsToScan; - final Long estimateTs = estimateInfoTimestamp; - final ResultIterator iterator = new MaterializedResultIterator(tuples); - return new QueryPlan() { - - @Override - public ParameterMetaData getParameterMetaData() { - return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("EXPLAIN PLAN")); - } - - @Override - public ResultIterator iterator() throws SQLException { - return iterator; - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - return iterator; - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - return iterator; - } - - @Override - public long getEstimatedSize() { - return 0; - } - - @Override - public Cost getCost() { - return Cost.ZERO; - } - - @Override - public TableRef getTableRef() { - return null; - } - - @Override - public Set getSourceRefs() { - return Collections.emptySet(); - } - - @Override - public RowProjector getProjector() { - return EXPLAIN_PLAN_ROW_PROJECTOR_WITH_BYTE_ROW_ESTIMATES; - } - - @Override - public Integer getLimit() { - return null; - } - - @Override - public Integer getOffset() { - return null; - } - - @Override - public OrderBy getOrderBy() { - return OrderBy.EMPTY_ORDER_BY; - } - - @Override - public GroupBy getGroupBy() { - return GroupBy.EMPTY_GROUP_BY; - } - - @Override - public List getSplits() { - return Collections.emptyList(); - } - - @Override - public List> getScans() { - return Collections.emptyList(); - } - - @Override - public StatementContext getContext() { - return plan.getContext(); - } - - @Override - public FilterableStatement getStatement() { - return null; - } - @Override - public boolean isDegenerate() { - return false; - } + success = true; + return new Pair<>(lastUpdateCount, new ResultTuple(result)); + } + // Force update cache and retry if meta not found error occurs + catch (MetaDataEntityNotFoundException e) { + if (doRetryOnMetaNotFoundError && e.getTableName() != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Reloading table {} data from server", e.getTableName()); + } + if ( + new MetaDataClient(connection) + .updateCache(connection.getTenantId(), e.getSchemaName(), e.getTableName(), true) + .wasUpdated() + ) { + return executeMutation(stmt, false, queryLogger, returnResult); + } + } + throw e; + } catch (RuntimeException e) { + // FIXME: Expression.evaluate does not throw SQLException + // so this will unwrap throws from that. + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw e; + } finally { + // Regardless of whether the mutation was successfully handled or not, + // update the time spent so far. If needed, we can separate out the + // success times and failure times. + if (tableName != null) { + // Counts for both ddl and dml + TableMetricsManager.updateMetricsMethod(tableName, MUTATION_SQL_COUNTER, 1); + // Only count dml operations + if (isUpsert || isDelete) { + long executeMutationTimeSpent = + EnvironmentEdgeManager.currentTimeMillis() - startExecuteMutationTime; + + TableMetricsManager.updateMetricsMethod(tableName, + isUpsert ? UPSERT_SQL_COUNTER : DELETE_SQL_COUNTER, 1); + TableMetricsManager.updateMetricsMethod(tableName, + isUpsert ? UPSERT_SQL_QUERY_TIME : DELETE_SQL_QUERY_TIME, + executeMutationTimeSpent); + if (isAtomicUpsert) { + TableMetricsManager.updateMetricsMethod(tableName, ATOMIC_UPSERT_SQL_COUNTER, 1); + TableMetricsManager.updateMetricsMethod(tableName, ATOMIC_UPSERT_SQL_QUERY_TIME, + executeMutationTimeSpent); + } + + if (success) { + TableMetricsManager.updateMetricsMethod(tableName, + isUpsert ? UPSERT_SUCCESS_SQL_COUNTER : DELETE_SUCCESS_SQL_COUNTER, 1); + } else { + TableMetricsManager.updateMetricsMethod(tableName, + isUpsert ? UPSERT_FAILED_SQL_COUNTER : DELETE_FAILED_SQL_COUNTER, 1); + // Failures are updated for executeMutation phase and for autocommit=true case + // here. + TableMetricsManager.updateMetricsMethod(tableName, + isUpsert + ? UPSERT_AGGREGATE_FAILURE_SQL_COUNTER + : DELETE_AGGREGATE_FAILURE_SQL_COUNTER, + 1); + } + if ( + plan instanceof DeleteCompiler.ServerSelectDeleteMutationPlan + || plan instanceof UpsertCompiler.ServerUpsertSelectMutationPlan + ) { + TableMetricsManager.updateLatencyHistogramForMutations(tableName, + executeMutationTimeSpent, false); + // We won't have size histograms for delete mutations when auto commit is set to + // true and + // if plan is of ServerSelectDeleteMutationPlan or ServerUpsertSelectMutationPlan + // since the update happens on server. + } else { + state.addExecuteMutationTime(executeMutationTimeSpent, tableName); + } + } + } - @Override - public boolean isRowKeyOrdered() { - return true; - } + } + } + }, PhoenixContextExecutor.inContext(), Tracing.withTracing(connection, this.toString())); + } catch (Exception e) { + if (queryLogger.isAuditLoggingEnabled()) { + queryLogger.log(QueryLogInfo.TABLE_NAME_I, getTargetForAudit(stmt)); + queryLogger.log(QueryLogInfo.EXCEPTION_TRACE_I, Throwables.getStackTraceAsString(e)); + queryLogger.log(QueryLogInfo.QUERY_STATUS_I, QueryStatus.FAILED.toString()); + queryLogger.syncAudit(); + } + Throwables.propagateIfInstanceOf(e, SQLException.class); + Throwables.propagate(e); + throw new IllegalStateException(); // Can't happen as Throwables.propagate() always throws + } + } + + private static boolean isSingleRowUpdatePlan(boolean isUpsert, boolean isDelete, + MutationPlan plan) { + boolean isSingleRowUpdate = false; + if (isUpsert) { + isSingleRowUpdate = true; + } else if (isDelete) { + isSingleRowUpdate = plan.getContext().getScanRanges().getPointLookupCount() == 1; + } + return isSingleRowUpdate; + } + + protected static interface CompilableStatement extends BindableStatement { + public T compilePlan(PhoenixStatement stmt, + Sequence.ValueOp seqAction) throws SQLException; + } + + private static class ExecutableSelectStatement extends SelectStatement + implements CompilableStatement { + private ExecutableSelectStatement(TableNode from, HintNode hint, boolean isDistinct, + List select, ParseNode where, List groupBy, ParseNode having, + List orderBy, LimitNode limit, OffsetNode offset, int bindCount, + boolean isAggregate, boolean hasSequence, Map udfParseNodes) { + this(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, offset, + bindCount, isAggregate, hasSequence, Collections. emptyList(), + udfParseNodes); + } + + private ExecutableSelectStatement(TableNode from, HintNode hint, boolean isDistinct, + List select, ParseNode where, List groupBy, ParseNode having, + List orderBy, LimitNode limit, OffsetNode offset, int bindCount, + boolean isAggregate, boolean hasSequence, List selects, + Map udfParseNodes) { + super(from, hint, isDistinct, select, where, groupBy, having, orderBy, limit, offset, + bindCount, isAggregate, hasSequence, selects, udfParseNodes); + } + + private ExecutableSelectStatement(ExecutableSelectStatement select) { + this(select.getFrom(), select.getHint(), select.isDistinct(), select.getSelect(), + select.getWhere(), select.getGroupBy(), select.getHaving(), select.getOrderBy(), + select.getLimit(), select.getOffset(), select.getBindCount(), select.isAggregate(), + select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); + } - @Override - public Operation getOperation() { - return ExecutableExplainStatement.this.getOperation(); - } + @SuppressWarnings("unchecked") + @Override + public QueryPlan compilePlan(PhoenixStatement phoenixStatement, Sequence.ValueOp seqAction) + throws SQLException { + if (!getUdfParseNodes().isEmpty()) { + phoenixStatement.throwIfUnallowedUserDefinedFunctions(getUdfParseNodes()); + } + + RewriteResult rewriteResult = ParseNodeUtil.rewrite(this, phoenixStatement.getConnection()); + QueryPlan queryPlan = new QueryCompiler(phoenixStatement, + rewriteResult.getRewrittenSelectStatement(), rewriteResult.getColumnResolver(), + Collections. emptyList(), phoenixStatement.getConnection().getIteratorFactory(), + new SequenceManager(phoenixStatement), true, false, null).compile(); + queryPlan.getContext().getSequenceManager().validateSequences(seqAction); + return queryPlan; + } + + } + + private static final byte[] EXPLAIN_PLAN_FAMILY = QueryConstants.SINGLE_COLUMN_FAMILY; + private static final byte[] EXPLAIN_PLAN_COLUMN = PVarchar.INSTANCE.toBytes("Plan"); + private static final String EXPLAIN_PLAN_ALIAS = "PLAN"; + private static final String EXPLAIN_PLAN_TABLE_NAME = "PLAN_TABLE"; + private static final PDatum EXPLAIN_PLAN_DATUM = new PDatum() { + @Override + public boolean isNullable() { + return true; + } - @Override - public boolean useRoundRobinIterator() throws SQLException { - return false; - } + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.defaultReturn(this); - } + @Override + public Integer getMaxLength() { + return null; + } - @Override - public Long getEstimatedRowsToScan() { - return estimatedRows; - } + @Override + public Integer getScale() { + return null; + } - @Override - public Long getEstimatedBytesToScan() { - return estimatedBytes; - } - - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return estimateTs; - } + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }; + private static final String EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_NAME = "BytesEstimate"; + private static final byte[] EXPLAIN_PLAN_BYTES_ESTIMATE = + PVarchar.INSTANCE.toBytes(EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_NAME); + public static final String EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_ALIAS = "EST_BYTES_READ"; + private static final PColumnImpl EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN = new PColumnImpl( + PNameFactory.newName(EXPLAIN_PLAN_BYTES_ESTIMATE), PNameFactory.newName(EXPLAIN_PLAN_FAMILY), + PLong.INSTANCE, null, null, true, 1, SortOrder.getDefault(), 0, null, false, null, false, false, + EXPLAIN_PLAN_BYTES_ESTIMATE, 0, false); + + private static final String EXPLAIN_PLAN_ROWS_ESTIMATE_COLUMN_NAME = "RowsEstimate"; + private static final byte[] EXPLAIN_PLAN_ROWS_ESTIMATE = + PVarchar.INSTANCE.toBytes(EXPLAIN_PLAN_ROWS_ESTIMATE_COLUMN_NAME); + public static final String EXPLAIN_PLAN_ROWS_COLUMN_ALIAS = "EST_ROWS_READ"; + private static final PColumnImpl EXPLAIN_PLAN_ROWS_ESTIMATE_COLUMN = new PColumnImpl( + PNameFactory.newName(EXPLAIN_PLAN_ROWS_ESTIMATE), PNameFactory.newName(EXPLAIN_PLAN_FAMILY), + PLong.INSTANCE, null, null, true, 2, SortOrder.getDefault(), 0, null, false, null, false, false, + EXPLAIN_PLAN_ROWS_ESTIMATE, 0, false); + + private static final String EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_NAME = "EstimateInfoTS"; + private static final byte[] EXPLAIN_PLAN_ESTIMATE_INFO_TS = + PVarchar.INSTANCE.toBytes(EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_NAME); + public static final String EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_ALIAS = "EST_INFO_TS"; + private static final PColumnImpl EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN = new PColumnImpl( + PNameFactory.newName(EXPLAIN_PLAN_ESTIMATE_INFO_TS), PNameFactory.newName(EXPLAIN_PLAN_FAMILY), + PLong.INSTANCE, null, null, true, 3, SortOrder.getDefault(), 0, null, false, null, false, false, + EXPLAIN_PLAN_ESTIMATE_INFO_TS, 0, false); + + private static final RowProjector EXPLAIN_PLAN_ROW_PROJECTOR_WITH_BYTE_ROW_ESTIMATES = + new RowProjector(Arrays. asList( + new ExpressionProjector(EXPLAIN_PLAN_ALIAS, EXPLAIN_PLAN_ALIAS, EXPLAIN_PLAN_TABLE_NAME, + new RowKeyColumnExpression(EXPLAIN_PLAN_DATUM, + new RowKeyValueAccessor(Collections. singletonList(EXPLAIN_PLAN_DATUM), 0)), + false), + new ExpressionProjector(EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_ALIAS, + EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_ALIAS, EXPLAIN_PLAN_TABLE_NAME, + new KeyValueColumnExpression(EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN), false), + new ExpressionProjector(EXPLAIN_PLAN_ROWS_COLUMN_ALIAS, EXPLAIN_PLAN_ROWS_COLUMN_ALIAS, + EXPLAIN_PLAN_TABLE_NAME, new KeyValueColumnExpression(EXPLAIN_PLAN_ROWS_ESTIMATE_COLUMN), + false), + new ExpressionProjector(EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_ALIAS, + EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_ALIAS, EXPLAIN_PLAN_TABLE_NAME, + new KeyValueColumnExpression(EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN), false)), + 0, true); + + private static class ExecutableExplainStatement extends ExplainStatement + implements CompilableStatement { + + ExecutableExplainStatement(BindableStatement statement, ExplainType explainType) { + super(statement, explainType); + } - @Override - public List getOutputOrderBys() { - return Collections. emptyList(); - } + @Override + public CompilableStatement getStatement() { + return (CompilableStatement) super.getStatement(); + } - @Override - public boolean isApplicable() { - return true; - } - }; - } + @Override + public int getBindCount() { + return getStatement().getBindCount(); } - private static class ExecutableUpsertStatement extends UpsertStatement implements CompilableStatement { - private ExecutableUpsertStatement(NamedTableNode table, HintNode hintNode, List columns, - List values, SelectStatement select, int bindCount, Map udfParseNodes, - List> onDupKeyPairs) { - super(table, hintNode, columns, values, select, bindCount, udfParseNodes, onDupKeyPairs); + @SuppressWarnings("unchecked") + @Override + public QueryPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + CompilableStatement compilableStmt = getStatement(); + StatementPlan compilePlan = + compilableStmt.compilePlan(stmt, Sequence.ValueOp.VALIDATE_SEQUENCE); + // if client is validating timestamps, ensure its metadata cache is up to date. + if (ValidateLastDDLTimestampUtil.getValidateLastDdlTimestampEnabled(stmt.getConnection())) { + Set tableRefs = compilePlan.getSourceRefs(); + for (TableRef tableRef : tableRefs) { + new MetaDataClient(stmt.getConnection()).updateCache(stmt.getConnection().getTenantId(), + tableRef.getTable().getSchemaName().getString(), + tableRef.getTable().getTableName().getString(), true); } - - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - if (!getUdfParseNodes().isEmpty()) { - stmt.throwIfUnallowedUserDefinedFunctions(getUdfParseNodes()); - } - UpsertCompiler compiler = new UpsertCompiler(stmt, this.getOperation()); - MutationPlan plan = compiler.compile(this); - plan.getContext().getSequenceManager().validateSequences(seqAction); - return plan; + compilePlan = compilableStmt.compilePlan(stmt, Sequence.ValueOp.VALIDATE_SEQUENCE); + } + // For a QueryPlan, we need to get its optimized plan; for a MutationPlan, its enclosed + // QueryPlan + // has already been optimized during compilation. + if (compilePlan instanceof QueryPlan) { + QueryPlan dataPlan = (QueryPlan) compilePlan; + compilePlan = + stmt.getConnection().getQueryServices().getOptimizer().optimize(stmt, dataPlan); + } + final StatementPlan plan = compilePlan; + List planSteps = plan.getExplainPlan().getPlanSteps(); + ExplainType explainType = getExplainType(); + if (explainType == ExplainType.DEFAULT) { + List updatedExplainPlanSteps = new ArrayList<>(planSteps); + updatedExplainPlanSteps.removeIf( + planStep -> planStep != null && planStep.contains(ExplainTable.REGION_LOCATIONS)); + planSteps = Collections.unmodifiableList(updatedExplainPlanSteps); + } + List tuples = Lists.newArrayListWithExpectedSize(planSteps.size()); + Long estimatedBytesToScan = plan.getEstimatedBytesToScan(); + Long estimatedRowsToScan = plan.getEstimatedRowsToScan(); + Long estimateInfoTimestamp = plan.getEstimateInfoTimestamp(); + for (String planStep : planSteps) { + byte[] row = PVarchar.INSTANCE.toBytes(planStep); + List cells = Lists.newArrayListWithCapacity(3); + cells.add(PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_COLUMN, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY)); + if (estimatedBytesToScan != null) { + cells.add( + PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_BYTES_ESTIMATE, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, PLong.INSTANCE.toBytes(estimatedBytesToScan))); } - } - - private static class ExecutableDeleteStatement extends DeleteStatement implements CompilableStatement { - private ExecutableDeleteStatement(NamedTableNode table, HintNode hint, ParseNode whereNode, List orderBy, LimitNode limit, int bindCount, Map udfParseNodes) { - super(table, hint, whereNode, orderBy, limit, bindCount, udfParseNodes); + if (estimatedRowsToScan != null) { + cells.add( + PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_ROWS_ESTIMATE, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, PLong.INSTANCE.toBytes(estimatedRowsToScan))); } + if (estimateInfoTimestamp != null) { + cells.add( + PhoenixKeyValueUtil.newKeyValue(row, EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_ESTIMATE_INFO_TS, + MetaDataProtocol.MIN_TABLE_TIMESTAMP, PLong.INSTANCE.toBytes(estimateInfoTimestamp))); + } + Collections.sort(cells, CellComparator.getInstance()); + Tuple tuple = new MultiKeyValueTuple(cells); + tuples.add(tuple); + } + final Long estimatedBytes = estimatedBytesToScan; + final Long estimatedRows = estimatedRowsToScan; + final Long estimateTs = estimateInfoTimestamp; + final ResultIterator iterator = new MaterializedResultIterator(tuples); + return new QueryPlan() { - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - if (!getUdfParseNodes().isEmpty()) { - stmt.throwIfUnallowedUserDefinedFunctions(getUdfParseNodes()); - } - DeleteCompiler compiler = new DeleteCompiler(stmt, this.getOperation()); - MutationPlan plan = compiler.compile(this); - plan.getContext().getSequenceManager().validateSequences(seqAction); - return plan; - } - } - - private static class ExecutableCreateTableStatement extends CreateTableStatement implements CompilableStatement { - ExecutableCreateTableStatement(TableName tableName, ListMultimap> props, List columnDefs, - PrimaryKeyConstraint pkConstraint, List splitNodes, PTableType tableType, boolean ifNotExists, - TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, Boolean immutableRows, - Map familyCounters, boolean noVerify) { - super(tableName, props, columnDefs, pkConstraint, splitNodes, tableType, ifNotExists, - baseTableName, tableTypeIdNode, bindCount, immutableRows, familyCounters, - noVerify); + public ParameterMetaData getParameterMetaData() { + return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; } - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - CreateTableCompiler compiler = new CreateTableCompiler(stmt, this.getOperation()); - return compiler.compile(this); + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("EXPLAIN PLAN")); } - } - private static class ExecutableCreateCDCStatement extends CreateCDCStatement - implements CompilableStatement { - public ExecutableCreateCDCStatement(NamedNode cdcObjName, TableName dataTable, - Set includeScopes, - ListMultimap> props, - boolean ifNotExists, int bindCount) { - super(cdcObjName, dataTable, includeScopes, props, ifNotExists, bindCount); + @Override + public ResultIterator iterator() throws SQLException { + return iterator; } @Override - public MutationPlan compilePlan(PhoenixStatement stmt, - Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("CREATE CDC")); - } - - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.createCDC(ExecutableCreateCDCStatement.this); - } - }; + public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { + return iterator; } - } - private static class ExecutableCreateSchemaStatement extends CreateSchemaStatement implements CompilableStatement { - ExecutableCreateSchemaStatement(String schemaName, boolean ifNotExists) { - super(schemaName, ifNotExists); + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) + throws SQLException { + return iterator; } - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - CreateSchemaCompiler compiler = new CreateSchemaCompiler(stmt); - return compiler.compile(this); + public long getEstimatedSize() { + return 0; } - } - private static class ExecutableCreateFunctionStatement extends CreateFunctionStatement implements CompilableStatement { - - public ExecutableCreateFunctionStatement(PFunction functionInfo, boolean temporary, boolean isReplace) { - super(functionInfo, temporary, isReplace); + @Override + public Cost getCost() { + return Cost.ZERO; } - - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - stmt.throwIfUnallowedUserDefinedFunctions(Collections.EMPTY_MAP); - CreateFunctionCompiler compiler = new CreateFunctionCompiler(stmt); - return compiler.compile(this); + public TableRef getTableRef() { + return null; } - } - - private static class ExecutableDropFunctionStatement extends DropFunctionStatement implements CompilableStatement { - public ExecutableDropFunctionStatement(String functionName, boolean ifNotExists) { - super(functionName, ifNotExists); + @Override + public Set getSourceRefs() { + return Collections.emptySet(); } - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { - - @Override - public ParameterMetaData getParameterMetaData() { - return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("DROP FUNCTION")); - } - - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.dropFunction(ExecutableDropFunctionStatement.this); - } - }; + public RowProjector getProjector() { + return EXPLAIN_PLAN_ROW_PROJECTOR_WITH_BYTE_ROW_ESTIMATES; } - } - - private static class ExecutableAddJarsStatement extends AddJarsStatement implements CompilableStatement { - public ExecutableAddJarsStatement(List jarPaths) { - super(jarPaths); + @Override + public Integer getLimit() { + return null; } - - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new CustomMutationPlan(context, stmt); + public Integer getOffset() { + return null; } - private class CustomMutationPlan extends BaseMutationPlan { - - private final StatementContext context; - private final PhoenixStatement stmt; - - private CustomMutationPlan(StatementContext context, PhoenixStatement stmt) { - super(context, ExecutableAddJarsStatement.this.getOperation()); - this.context = context; - this.stmt = stmt; - } - - @Override - public ParameterMetaData getParameterMetaData() { - return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("ADD JARS")); - } - - @Override - public MutationState execute() throws SQLException { - String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps() - .get(QueryServices.DYNAMIC_JARS_DIR_KEY); - if (dynamicJarsDir == null) { - throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY - + " is not configured for placing the jars."); - } - dynamicJarsDir = - dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/'; - Configuration conf = HBaseFactoryProvider.getConfigurationFactory() - .getConfiguration(); - Path dynamicJarsDirPath = new Path(dynamicJarsDir); - for (LiteralParseNode jarPath : getJarPaths()) { - String jarPathStr = (String) jarPath.getValue(); - if (!jarPathStr.endsWith(".jar")) { - throw new SQLException(jarPathStr + " is not a valid jar file path."); - } - } - try { - FileSystem fs = dynamicJarsDirPath.getFileSystem(conf); - List jarPaths = getJarPaths(); - for (LiteralParseNode jarPath : jarPaths) { - File f = new File((String) jarPath.getValue()); - fs.copyFromLocalFile(new Path(f.getAbsolutePath()), new Path( - dynamicJarsDir + f.getName())); - } - } catch (IOException e) { - throw new SQLException(e); - } - return new MutationState(0, 0, context.getConnection()); - } - } - } - - private static class ExecutableDeclareCursorStatement extends DeclareCursorStatement implements CompilableStatement { - public ExecutableDeclareCursorStatement(CursorName cursor, SelectStatement select){ - super(cursor, select); + @Override + public OrderBy getOrderBy() { + return OrderBy.EMPTY_ORDER_BY; } @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - ExecutableSelectStatement wrappedSelect = new ExecutableSelectStatement( - (ExecutableSelectStatement) stmt.parseStatement(this.getQuerySQL())); - DeclareCursorCompiler compiler = new DeclareCursorCompiler(stmt, this.getOperation(),wrappedSelect.compilePlan(stmt, seqAction)); - return compiler.compile(this); + public GroupBy getGroupBy() { + return GroupBy.EMPTY_GROUP_BY; } - } - private static class ExecutableOpenStatement extends OpenStatement implements CompilableStatement { - public ExecutableOpenStatement(CursorName cursor){ - super(cursor); + @Override + public List getSplits() { + return Collections.emptyList(); } @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - OpenStatementCompiler compiler = new OpenStatementCompiler(stmt, this.getOperation()); - return compiler.compile(this); + public List> getScans() { + return Collections.emptyList(); } - } - private static class ExecutableCloseStatement extends CloseStatement implements CompilableStatement { - public ExecutableCloseStatement(CursorName cursor){ - super(cursor); + @Override + public StatementContext getContext() { + return plan.getContext(); } @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - CloseStatementCompiler compiler = new CloseStatementCompiler(stmt, this.getOperation()); - return compiler.compile(this); + public FilterableStatement getStatement() { + return null; } - } - private static class ExecutableFetchStatement extends FetchStatement implements CompilableStatement { - public ExecutableFetchStatement(CursorName cursor, boolean isNext, int fetchLimit) { - super(cursor, isNext, fetchLimit); + @Override + public boolean isDegenerate() { + return false; } @Override - public QueryPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - return CursorUtil.getFetchPlan(this.getCursorName().getName(), this.isNext(), this.getFetchSize()); + public boolean isRowKeyOrdered() { + return true; } - } - - private static class ExecutableDeleteJarStatement extends DeleteJarStatement implements CompilableStatement { - - public ExecutableDeleteJarStatement(LiteralParseNode jarPath) { - super(jarPath); + @Override + public Operation getOperation() { + return ExecutableExplainStatement.this.getOperation(); } - - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new CustomMutationPlan(context, stmt); + public boolean useRoundRobinIterator() throws SQLException { + return false; } - private class CustomMutationPlan extends BaseMutationPlan { - - private final StatementContext context; - private final PhoenixStatement stmt; - - private CustomMutationPlan(StatementContext context, PhoenixStatement stmt) { - super(context, ExecutableDeleteJarStatement.this.getOperation()); - this.context = context; - this.stmt = stmt; - } - - @Override - public ParameterMetaData getParameterMetaData() { - return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("DELETE JAR")); - } - - @Override - public MutationState execute() throws SQLException { - String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps() - .get(QueryServices.DYNAMIC_JARS_DIR_KEY); - if (dynamicJarsDir == null) { - throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY - + " is not configured."); - } - dynamicJarsDir = - dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/'; - Configuration conf = HBaseFactoryProvider.getConfigurationFactory() - .getConfiguration(); - Path dynamicJarsDirPath = new Path(dynamicJarsDir); - try { - FileSystem fs = dynamicJarsDirPath.getFileSystem(conf); - String jarPathStr = (String)getJarPath().getValue(); - if (!jarPathStr.endsWith(".jar")) { - throw new SQLException(jarPathStr + " is not a valid jar file path."); - } - Path p = new Path(jarPathStr); - if (fs.exists(p)) { - fs.delete(p, false); - } - } catch(IOException e) { - throw new SQLException(e); - } - return new MutationState(0, 0, context.getConnection()); - } + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.defaultReturn(this); } - } - - private static class ExecutableListJarsStatement extends ListJarsStatement implements CompilableStatement { - public ExecutableListJarsStatement() { - super(); + @Override + public Long getEstimatedRowsToScan() { + return estimatedRows; } - - @SuppressWarnings("unchecked") @Override - public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - return new ListJarsQueryPlan(stmt); + public Long getEstimatedBytesToScan() { + return estimatedBytes; } - } - private static class ExecutableShowTablesStatement extends ShowTablesStatement - implements CompilableStatement { - - public ExecutableShowTablesStatement(String schema, String pattern) { - super(schema, pattern); + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return estimateTs; } @Override - public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) - throws SQLException { - PreparedStatement delegateStmt = QueryUtil.getTablesStmt(stmt.getConnection(), null, - getTargetSchema(), getDbPattern(), null); - return ((PhoenixPreparedStatement) delegateStmt).compileQuery(); + public List getOutputOrderBys() { + return Collections. emptyList(); } - } - - // Delegates to a SELECT query against SYSCAT. - private static class ExecutableShowSchemasStatement extends ShowSchemasStatement implements CompilableStatement { - - public ExecutableShowSchemasStatement(String pattern) { super(pattern); } @Override - public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - PreparedStatement delegateStmt = - QueryUtil.getSchemasStmt(stmt.getConnection(), null, getSchemaPattern()); - return ((PhoenixPreparedStatement) delegateStmt).compileQuery(); + public boolean isApplicable() { + return true; } + }; } + } - private static class ExecutableShowCreateTable extends ShowCreateTableStatement - implements CompilableStatement { - - public ExecutableShowCreateTable(TableName tableName) { - super(tableName); - } + private static class ExecutableUpsertStatement extends UpsertStatement + implements CompilableStatement { + private ExecutableUpsertStatement(NamedTableNode table, HintNode hintNode, + List columns, List values, SelectStatement select, int bindCount, + Map udfParseNodes, List> onDupKeyPairs) { + super(table, hintNode, columns, values, select, bindCount, udfParseNodes, onDupKeyPairs); + } - @Override - public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) - throws SQLException { - PreparedStatement delegateStmt = QueryUtil.getShowCreateTableStmt(stmt.getConnection(), null, - getTableName()); - return ((PhoenixPreparedStatement) delegateStmt).compileQuery(); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + if (!getUdfParseNodes().isEmpty()) { + stmt.throwIfUnallowedUserDefinedFunctions(getUdfParseNodes()); + } + UpsertCompiler compiler = new UpsertCompiler(stmt, this.getOperation()); + MutationPlan plan = compiler.compile(this); + plan.getContext().getSequenceManager().validateSequences(seqAction); + return plan; + } + } + + private static class ExecutableDeleteStatement extends DeleteStatement + implements CompilableStatement { + private ExecutableDeleteStatement(NamedTableNode table, HintNode hint, ParseNode whereNode, + List orderBy, LimitNode limit, int bindCount, + Map udfParseNodes) { + super(table, hint, whereNode, orderBy, limit, bindCount, udfParseNodes); } - private static class ExecutableCreateIndexStatement extends CreateIndexStatement implements CompilableStatement { + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + if (!getUdfParseNodes().isEmpty()) { + stmt.throwIfUnallowedUserDefinedFunctions(getUdfParseNodes()); + } + DeleteCompiler compiler = new DeleteCompiler(stmt, this.getOperation()); + MutationPlan plan = compiler.compile(this); + plan.getContext().getSequenceManager().validateSequences(seqAction); + return plan; + } + } + + private static class ExecutableCreateTableStatement extends CreateTableStatement + implements CompilableStatement { + ExecutableCreateTableStatement(TableName tableName, + ListMultimap> props, List columnDefs, + PrimaryKeyConstraint pkConstraint, List splitNodes, PTableType tableType, + boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, + Boolean immutableRows, Map familyCounters, boolean noVerify) { + super(tableName, props, columnDefs, pkConstraint, splitNodes, tableType, ifNotExists, + baseTableName, tableTypeIdNode, bindCount, immutableRows, familyCounters, noVerify); + } - public ExecutableCreateIndexStatement(NamedNode indexName, NamedTableNode dataTable, - IndexKeyConstraint ikConstraint, List includeColumns, - List splits, ListMultimap> props, - boolean ifNotExists, IndexType indexType, boolean async, int bindCount, Map udfParseNodes, ParseNode where) { - super(indexName, dataTable, ikConstraint, includeColumns, splits, props, ifNotExists, - indexType, async , bindCount, udfParseNodes, where); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + CreateTableCompiler compiler = new CreateTableCompiler(stmt, this.getOperation()); + return compiler.compile(this); + } + } - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - if (!getUdfParseNodes().isEmpty()) { - stmt.throwIfUnallowedUserDefinedFunctions(getUdfParseNodes()); - } - CreateIndexCompiler compiler = new CreateIndexCompiler(stmt, this.getOperation()); - return compiler.compile(this); - } + private static class ExecutableCreateCDCStatement extends CreateCDCStatement + implements CompilableStatement { + public ExecutableCreateCDCStatement(NamedNode cdcObjName, TableName dataTable, + Set includeScopes, ListMultimap> props, + boolean ifNotExists, int bindCount) { + super(cdcObjName, dataTable, includeScopes, props, ifNotExists, bindCount); } - - private static class ExecutableCreateSequenceStatement extends CreateSequenceStatement implements CompilableStatement { - public ExecutableCreateSequenceStatement(TableName sequenceName, ParseNode startWith, - ParseNode incrementBy, ParseNode cacheSize, ParseNode minValue, ParseNode maxValue, - boolean cycle, boolean ifNotExists, int bindCount) { - super(sequenceName, startWith, incrementBy, cacheSize, minValue, maxValue, cycle, - ifNotExists, bindCount); - } + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - CreateSequenceCompiler compiler = new CreateSequenceCompiler(stmt, this.getOperation()); - return compiler.compile(this); - } - } - - private static class ExecutableDropSequenceStatement extends DropSequenceStatement implements CompilableStatement { - - - public ExecutableDropSequenceStatement(TableName sequenceName, boolean ifExists, int bindCount) { - super(sequenceName, ifExists, bindCount); + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("CREATE CDC")); } - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - DropSequenceCompiler compiler = new DropSequenceCompiler(stmt, this.getOperation()); - return compiler.compile(this); + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.createCDC(ExecutableCreateCDCStatement.this); } + }; } + } - private static class ExecutableDropTableStatement extends DropTableStatement implements CompilableStatement { - - ExecutableDropTableStatement(TableName tableName, PTableType tableType, boolean ifExists, boolean cascade) { - super(tableName, tableType, ifExists, cascade, false); - } - - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("DROP TABLE")); - } - - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.dropTable(ExecutableDropTableStatement.this); - } - }; - } + private static class ExecutableCreateSchemaStatement extends CreateSchemaStatement + implements CompilableStatement { + ExecutableCreateSchemaStatement(String schemaName, boolean ifNotExists) { + super(schemaName, ifNotExists); } - private static class ExecutableDropSchemaStatement extends DropSchemaStatement implements CompilableStatement { - - ExecutableDropSchemaStatement(String schemaName, boolean ifExists, boolean cascade) { - super(schemaName, ifExists, cascade); - } - - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("DROP SCHEMA")); - } - - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.dropSchema(ExecutableDropSchemaStatement.this); - } - }; - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + CreateSchemaCompiler compiler = new CreateSchemaCompiler(stmt); + return compiler.compile(this); } + } - private static class ExecutableUseSchemaStatement extends UseSchemaStatement implements CompilableStatement { + private static class ExecutableCreateFunctionStatement extends CreateFunctionStatement + implements CompilableStatement { - ExecutableUseSchemaStatement(String schemaName) { - super(schemaName); - } + public ExecutableCreateFunctionStatement(PFunction functionInfo, boolean temporary, + boolean isReplace) { + super(functionInfo, temporary, isReplace); + } - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + stmt.throwIfUnallowedUserDefinedFunctions(Collections.EMPTY_MAP); + CreateFunctionCompiler compiler = new CreateFunctionCompiler(stmt); + return compiler.compile(this); + } + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("USE SCHEMA")); - } + private static class ExecutableDropFunctionStatement extends DropFunctionStatement + implements CompilableStatement { - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.useSchema(ExecutableUseSchemaStatement.this); - } - }; - } + public ExecutableDropFunctionStatement(String functionName, boolean ifNotExists) { + super(functionName, ifNotExists); } - private static class ExecutableChangePermsStatement extends ChangePermsStatement implements CompilableStatement { - - public ExecutableChangePermsStatement (String permsString, boolean isSchemaName, TableName tableName, - String schemaName, boolean isGroupName, LiteralParseNode userOrGroup, boolean isGrantStatement) { - super(permsString, isSchemaName, tableName, schemaName, isGroupName, userOrGroup, isGrantStatement); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { @Override - public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - - return new BaseMutationPlan(context, this.getOperation()) { - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("GRANT PERMISSION")); - } - - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.changePermissions(ExecutableChangePermsStatement.this); - } - }; + public ParameterMetaData getParameterMetaData() { + return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; } - } - - private static class ExecutableDropIndexStatement extends DropIndexStatement implements CompilableStatement { - public ExecutableDropIndexStatement(NamedNode indexName, TableName tableName, boolean ifExists) { - super(indexName, tableName, ifExists); + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("DROP FUNCTION")); } - @SuppressWarnings("unchecked") @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("DROP INDEX")); - } - - @Override - public MutationState execute() throws SQLException { - String indexName = ExecutableDropIndexStatement.this.getIndexName().getName(); - if (CDCUtil.isCDCIndex(indexName)) { - throw new SQLExceptionInfo.Builder(CANNOT_DROP_CDC_INDEX) - .setTableName(indexName) - .build().buildException(); - } - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.dropIndex(ExecutableDropIndexStatement.this); - } - }; + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.dropFunction(ExecutableDropFunctionStatement.this); } + }; } + } - private static class ExecutableDropCDCStatement extends DropCDCStatement implements CompilableStatement { - - public ExecutableDropCDCStatement(NamedNode cdcObjName, TableName tableName, boolean ifExists) { - super(cdcObjName, tableName, ifExists); - } - - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("DROP CDC")); - } + private static class ExecutableAddJarsStatement extends AddJarsStatement + implements CompilableStatement { - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.dropCDC(ExecutableDropCDCStatement.this); - } - }; - } + public ExecutableAddJarsStatement(List jarPaths) { + super(jarPaths); } - private static class ExecutableAlterIndexStatement extends AlterIndexStatement implements CompilableStatement { - - public ExecutableAlterIndexStatement(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, PIndexState state, boolean isRebuildAll, boolean async, ListMultimap> props) { - super(indexTableNode, dataTableName, ifExists, state, isRebuildAll, async, props); + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new CustomMutationPlan(context, stmt); + } + + private class CustomMutationPlan extends BaseMutationPlan { + + private final StatementContext context; + private final PhoenixStatement stmt; + + private CustomMutationPlan(StatementContext context, PhoenixStatement stmt) { + super(context, ExecutableAddJarsStatement.this.getOperation()); + this.context = context; + this.stmt = stmt; + } + + @Override + public ParameterMetaData getParameterMetaData() { + return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("ADD JARS")); + } + + @Override + public MutationState execute() throws SQLException { + String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps() + .get(QueryServices.DYNAMIC_JARS_DIR_KEY); + if (dynamicJarsDir == null) { + throw new SQLException( + QueryServices.DYNAMIC_JARS_DIR_KEY + " is not configured for placing the jars."); + } + dynamicJarsDir = dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/'; + Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + Path dynamicJarsDirPath = new Path(dynamicJarsDir); + for (LiteralParseNode jarPath : getJarPaths()) { + String jarPathStr = (String) jarPath.getValue(); + if (!jarPathStr.endsWith(".jar")) { + throw new SQLException(jarPathStr + " is not a valid jar file path."); + } } - - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("ALTER INDEX")); - } - - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.alterIndex(ExecutableAlterIndexStatement.this); - } - }; + try { + FileSystem fs = dynamicJarsDirPath.getFileSystem(conf); + List jarPaths = getJarPaths(); + for (LiteralParseNode jarPath : jarPaths) { + File f = new File((String) jarPath.getValue()); + fs.copyFromLocalFile(new Path(f.getAbsolutePath()), + new Path(dynamicJarsDir + f.getName())); + } + } catch (IOException e) { + throw new SQLException(e); } + return new MutationState(0, 0, context.getConnection()); + } } + } - private static class ExecutableTraceStatement extends TraceStatement implements CompilableStatement { - - public ExecutableTraceStatement(boolean isTraceOn, double samplingRate) { - super(isTraceOn, samplingRate); - } - - @SuppressWarnings("unchecked") - @Override - public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - return new TraceQueryPlan(this, stmt); - } + private static class ExecutableDeclareCursorStatement extends DeclareCursorStatement + implements CompilableStatement { + public ExecutableDeclareCursorStatement(CursorName cursor, SelectStatement select) { + super(cursor, select); } - private static class ExecutableAlterSessionStatement extends AlterSessionStatement implements CompilableStatement { + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + ExecutableSelectStatement wrappedSelect = new ExecutableSelectStatement( + (ExecutableSelectStatement) stmt.parseStatement(this.getQuerySQL())); + DeclareCursorCompiler compiler = new DeclareCursorCompiler(stmt, this.getOperation(), + wrappedSelect.compilePlan(stmt, seqAction)); + return compiler.compile(this); + } + } - public ExecutableAlterSessionStatement(Map props) { - super(props); - } + private static class ExecutableOpenStatement extends OpenStatement + implements CompilableStatement { + public ExecutableOpenStatement(CursorName cursor) { + super(cursor); + } - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new CustomMutationPlan(context); - } + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + OpenStatementCompiler compiler = new OpenStatementCompiler(stmt, this.getOperation()); + return compiler.compile(this); + } + } - private class CustomMutationPlan extends BaseMutationPlan { + private static class ExecutableCloseStatement extends CloseStatement + implements CompilableStatement { + public ExecutableCloseStatement(CursorName cursor) { + super(cursor); + } - private final StatementContext context; + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + CloseStatementCompiler compiler = new CloseStatementCompiler(stmt, this.getOperation()); + return compiler.compile(this); + } + } - private CustomMutationPlan(StatementContext context) { - super(context, ExecutableAlterSessionStatement.this.getOperation()); - this.context = context; - } + private static class ExecutableFetchStatement extends FetchStatement + implements CompilableStatement { + public ExecutableFetchStatement(CursorName cursor, boolean isNext, int fetchLimit) { + super(cursor, isNext, fetchLimit); + } - @Override - public StatementContext getContext() { - return context; - } + @Override + public QueryPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + return CursorUtil.getFetchPlan(this.getCursorName().getName(), this.isNext(), + this.getFetchSize()); + } - @Override - public ParameterMetaData getParameterMetaData() { - return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; - } + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("ALTER SESSION")); - } + private static class ExecutableDeleteJarStatement extends DeleteJarStatement + implements CompilableStatement { + public ExecutableDeleteJarStatement(LiteralParseNode jarPath) { + super(jarPath); + } - @Override - public MutationState execute() throws SQLException { - Object consistency = getProps() - .get(PhoenixRuntime.CONSISTENCY_ATTRIB.toUpperCase()); - if (consistency != null) { - if (((String)consistency).equalsIgnoreCase(Consistency.TIMELINE.toString())) { - getContext().getConnection().setConsistency(Consistency.TIMELINE); - } else { - getContext().getConnection().setConsistency(Consistency.STRONG); - } - } - return new MutationState(0, 0, context.getConnection()); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new CustomMutationPlan(context, stmt); + } + + private class CustomMutationPlan extends BaseMutationPlan { + + private final StatementContext context; + private final PhoenixStatement stmt; + + private CustomMutationPlan(StatementContext context, PhoenixStatement stmt) { + super(context, ExecutableDeleteJarStatement.this.getOperation()); + this.context = context; + this.stmt = stmt; + } + + @Override + public ParameterMetaData getParameterMetaData() { + return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("DELETE JAR")); + } + + @Override + public MutationState execute() throws SQLException { + String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps() + .get(QueryServices.DYNAMIC_JARS_DIR_KEY); + if (dynamicJarsDir == null) { + throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY + " is not configured."); + } + dynamicJarsDir = dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/'; + Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + Path dynamicJarsDirPath = new Path(dynamicJarsDir); + try { + FileSystem fs = dynamicJarsDirPath.getFileSystem(conf); + String jarPathStr = (String) getJarPath().getValue(); + if (!jarPathStr.endsWith(".jar")) { + throw new SQLException(jarPathStr + " is not a valid jar file path."); + } + Path p = new Path(jarPathStr); + if (fs.exists(p)) { + fs.delete(p, false); + } + } catch (IOException e) { + throw new SQLException(e); } + return new MutationState(0, 0, context.getConnection()); + } } + } - private static class ExecutableUpdateStatisticsStatement extends UpdateStatisticsStatement implements - CompilableStatement { - public ExecutableUpdateStatisticsStatement(NamedTableNode table, StatisticsCollectionScope scope, Map props) { - super(table, scope, props); - } + private static class ExecutableListJarsStatement extends ListJarsStatement + implements CompilableStatement { - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { + public ExecutableListJarsStatement() { + super(); + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("UPDATE STATISTICS")); - } + @SuppressWarnings("unchecked") + @Override + public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + return new ListJarsQueryPlan(stmt); + } + } - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.updateStatistics(ExecutableUpdateStatisticsStatement.this); - } - }; - } + private static class ExecutableShowTablesStatement extends ShowTablesStatement + implements CompilableStatement { + public ExecutableShowTablesStatement(String schema, String pattern) { + super(schema, pattern); } - - private static class ExecutableExecuteUpgradeStatement extends ExecuteUpgradeStatement implements CompilableStatement { - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - return new MutationPlan() { - - @Override - public Set getSourceRefs() { - return Collections.emptySet(); - } - - @Override - public ParameterMetaData getParameterMetaData() { - return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; - } - - @Override - public Operation getOperation() { - return Operation.UPGRADE; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("EXECUTE UPGRADE")); - } - @Override - public QueryPlan getQueryPlan() { return null; } - - @Override - public StatementContext getContext() { return new StatementContext(stmt); } - - @Override - public TableRef getTargetRef() { - return TableRef.EMPTY_TABLE_REF; - } - - @Override - public MutationState execute() throws SQLException { - PhoenixConnection phxConn = stmt.getConnection(); - Properties props = new Properties(); - phxConn.getQueryServices().upgradeSystemTables(phxConn.getURL(), props); - return MutationState.emptyMutationState(-1, -1, phxConn); - } + @Override + public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + PreparedStatement delegateStmt = QueryUtil.getTablesStmt(stmt.getConnection(), null, + getTargetSchema(), getDbPattern(), null); + return ((PhoenixPreparedStatement) delegateStmt).compileQuery(); + } + } - @Override - public Long getEstimatedRowsToScan() throws SQLException { - return 0l; - } + // Delegates to a SELECT query against SYSCAT. + private static class ExecutableShowSchemasStatement extends ShowSchemasStatement + implements CompilableStatement { - @Override - public Long getEstimatedBytesToScan() throws SQLException { - return 0l; - } + public ExecutableShowSchemasStatement(String pattern) { + super(pattern); + } - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return 0l; - } - }; - } + @Override + public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + PreparedStatement delegateStmt = + QueryUtil.getSchemasStmt(stmt.getConnection(), null, getSchemaPattern()); + return ((PhoenixPreparedStatement) delegateStmt).compileQuery(); } + } - private static class ExecutableAddColumnStatement extends AddColumnStatement implements CompilableStatement { + private static class ExecutableShowCreateTable extends ShowCreateTableStatement + implements CompilableStatement { - ExecutableAddColumnStatement(NamedTableNode table, PTableType tableType, List columnDefs, boolean ifNotExists, ListMultimap> props, boolean cascade, List indexes) { - super(table, tableType, columnDefs, ifNotExists, props, cascade, indexes); - } + public ExecutableShowCreateTable(TableName tableName) { + super(tableName); + } - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { + @Override + public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + PreparedStatement delegateStmt = + QueryUtil.getShowCreateTableStmt(stmt.getConnection(), null, getTableName()); + return ((PhoenixPreparedStatement) delegateStmt).compileQuery(); + } + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("ALTER " + getTableType() + " ADD COLUMN")); - } + private static class ExecutableCreateIndexStatement extends CreateIndexStatement + implements CompilableStatement { - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.addColumn(ExecutableAddColumnStatement.this); - } - }; - } + public ExecutableCreateIndexStatement(NamedNode indexName, NamedTableNode dataTable, + IndexKeyConstraint ikConstraint, List includeColumns, List splits, + ListMultimap> props, boolean ifNotExists, IndexType indexType, + boolean async, int bindCount, Map udfParseNodes, ParseNode where) { + super(indexName, dataTable, ikConstraint, includeColumns, splits, props, ifNotExists, + indexType, async, bindCount, udfParseNodes, where); } - private static class ExecutableDropColumnStatement extends DropColumnStatement implements CompilableStatement { + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + if (!getUdfParseNodes().isEmpty()) { + stmt.throwIfUnallowedUserDefinedFunctions(getUdfParseNodes()); + } + CreateIndexCompiler compiler = new CreateIndexCompiler(stmt, this.getOperation()); + return compiler.compile(this); + } + } - ExecutableDropColumnStatement(NamedTableNode table, PTableType tableType, List columnRefs, boolean ifExists) { - super(table, tableType, columnRefs, ifExists); - } + private static class ExecutableCreateSequenceStatement extends CreateSequenceStatement + implements CompilableStatement { - @SuppressWarnings("unchecked") - @Override - public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException { - final StatementContext context = new StatementContext(stmt); - return new BaseMutationPlan(context, this.getOperation()) { + public ExecutableCreateSequenceStatement(TableName sequenceName, ParseNode startWith, + ParseNode incrementBy, ParseNode cacheSize, ParseNode minValue, ParseNode maxValue, + boolean cycle, boolean ifNotExists, int bindCount) { + super(sequenceName, startWith, incrementBy, cacheSize, minValue, maxValue, cycle, ifNotExists, + bindCount); + } - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return new ExplainPlan(Collections.singletonList("ALTER " + getTableType() + " DROP COLUMN")); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + CreateSequenceCompiler compiler = new CreateSequenceCompiler(stmt, this.getOperation()); + return compiler.compile(this); + } + } - @Override - public MutationState execute() throws SQLException { - MetaDataClient client = new MetaDataClient(getContext().getConnection()); - return client.dropColumn(ExecutableDropColumnStatement.this); - } - }; - } + private static class ExecutableDropSequenceStatement extends DropSequenceStatement + implements CompilableStatement { + + public ExecutableDropSequenceStatement(TableName sequenceName, boolean ifExists, + int bindCount) { + super(sequenceName, ifExists, bindCount); } - protected static class ExecutableNodeFactory extends ParseNodeFactory { - @Override - public ExecutableSelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List select, ParseNode where, - List groupBy, ParseNode having, List orderBy, LimitNode limit, OffsetNode offset, int bindCount, boolean isAggregate, - boolean hasSequence, List selects, Map udfParseNodes) { - return new ExecutableSelectStatement(from, hint, isDistinct, select, where, groupBy == null ? Collections.emptyList() : groupBy, - having, orderBy == null ? Collections.emptyList() : orderBy, limit, offset, bindCount, isAggregate, hasSequence, selects == null ? Collections.emptyList() : selects, udfParseNodes); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + DropSequenceCompiler compiler = new DropSequenceCompiler(stmt, this.getOperation()); + return compiler.compile(this); + } + } - @Override - public ExecutableUpsertStatement upsert(NamedTableNode table, HintNode hintNode, List columns, List values, SelectStatement select, int bindCount, - Map udfParseNodes, List> onDupKeyPairs) { - return new ExecutableUpsertStatement(table, hintNode, columns, values, select, bindCount, udfParseNodes, onDupKeyPairs); - } + private static class ExecutableDropTableStatement extends DropTableStatement + implements CompilableStatement { - @Override - public ExecutableDeclareCursorStatement declareCursor(CursorName cursor, SelectStatement select) { - return new ExecutableDeclareCursorStatement(cursor, select); - } + ExecutableDropTableStatement(TableName tableName, PTableType tableType, boolean ifExists, + boolean cascade) { + super(tableName, tableType, ifExists, cascade, false); + } - @Override - public ExecutableFetchStatement fetch(CursorName cursor, boolean isNext, int fetchLimit) { - return new ExecutableFetchStatement(cursor, isNext, fetchLimit); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { @Override - public ExecutableOpenStatement open(CursorName cursor) { - return new ExecutableOpenStatement(cursor); + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("DROP TABLE")); } @Override - public ExecutableCloseStatement close(CursorName cursor) { - return new ExecutableCloseStatement(cursor); + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.dropTable(ExecutableDropTableStatement.this); } + }; + } + } - @Override - public ExecutableDeleteStatement delete(NamedTableNode table, HintNode hint, ParseNode whereNode, List orderBy, LimitNode limit, int bindCount, Map udfParseNodes) { - return new ExecutableDeleteStatement(table, hint, whereNode, orderBy, limit, bindCount, udfParseNodes); - } + private static class ExecutableDropSchemaStatement extends DropSchemaStatement + implements CompilableStatement { - @Override - public CreateTableStatement createTable(TableName tableName, ListMultimap> props, List columns, PrimaryKeyConstraint pkConstraint, List splits, - PTableType tableType, boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, Boolean immutableRows, - Map cqCounters, boolean noVerify) { - return new ExecutableCreateTableStatement(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, baseTableName, tableTypeIdNode, bindCount, immutableRows, cqCounters, noVerify); - } + ExecutableDropSchemaStatement(String schemaName, boolean ifExists, boolean cascade) { + super(schemaName, ifExists, cascade); + } - @Override - public CreateTableStatement createTable(TableName tableName, ListMultimap> props, List columns, PrimaryKeyConstraint pkConstraint, List splits, - PTableType tableType, boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, Boolean immutableRows, Map cqCounters) { - return createTable(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, baseTableName, tableTypeIdNode, bindCount, immutableRows, cqCounters, false); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { @Override - public CreateTableStatement createTable(TableName tableName, ListMultimap> props, List columns, PrimaryKeyConstraint pkConstraint, List splits, - PTableType tableType, boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, Boolean immutableRows) { - return createTable(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, baseTableName, tableTypeIdNode, bindCount, immutableRows, null); + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("DROP SCHEMA")); } @Override - public CreateCDCStatement createCDC(NamedNode cdcObj, TableName dataTable, - Set includeScopes, - ListMultimap> props, - boolean ifNotExists, int bindCount) { - return new ExecutableCreateCDCStatement(cdcObj, dataTable, - includeScopes, props, ifNotExists, bindCount); + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.dropSchema(ExecutableDropSchemaStatement.this); } + }; + } + } - @Override - public CreateSchemaStatement createSchema(String schemaName, boolean ifNotExists) { - return new ExecutableCreateSchemaStatement(schemaName, ifNotExists); - } + private static class ExecutableUseSchemaStatement extends UseSchemaStatement + implements CompilableStatement { - @Override - public CreateSequenceStatement createSequence(TableName tableName, ParseNode startsWith, - ParseNode incrementBy, ParseNode cacheSize, ParseNode minValue, ParseNode maxValue, - boolean cycle, boolean ifNotExists, int bindCount) { - return new ExecutableCreateSequenceStatement(tableName, startsWith, incrementBy, - cacheSize, minValue, maxValue, cycle, ifNotExists, bindCount); - } - - @Override - public CreateFunctionStatement createFunction(PFunction functionInfo, boolean temporary, boolean isReplace) { - return new ExecutableCreateFunctionStatement(functionInfo, temporary, isReplace); - } + ExecutableUseSchemaStatement(String schemaName) { + super(schemaName); + } - @Override - public AddJarsStatement addJars(List jarPaths) { - return new ExecutableAddJarsStatement(jarPaths); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { @Override - public DeleteJarStatement deleteJar(LiteralParseNode jarPath) { - return new ExecutableDeleteJarStatement(jarPath); + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("USE SCHEMA")); } @Override - public ListJarsStatement listJars() { - return new ExecutableListJarsStatement(); + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.useSchema(ExecutableUseSchemaStatement.this); } + }; + } + } + private static class ExecutableChangePermsStatement extends ChangePermsStatement + implements CompilableStatement { - @Override - public DropSequenceStatement dropSequence(TableName tableName, boolean ifExists, int bindCount) { - return new ExecutableDropSequenceStatement(tableName, ifExists, bindCount); - } - - @Override - public CreateIndexStatement createIndex(NamedNode indexName, NamedTableNode dataTable, - IndexKeyConstraint ikConstraint, List includeColumns, - List splits, ListMultimap> props, - boolean ifNotExists, IndexType indexType, boolean async, int bindCount, Map udfParseNodes, ParseNode where) { - return new ExecutableCreateIndexStatement(indexName, dataTable, ikConstraint, - includeColumns, splits, props, ifNotExists, indexType, async, bindCount, - udfParseNodes, where); - } - - @Override - public AddColumnStatement addColumn(NamedTableNode table, PTableType tableType, List columnDefs, boolean ifNotExists, ListMultimap> props, boolean cascade, List indexes) { - return new ExecutableAddColumnStatement(table, tableType, columnDefs, ifNotExists, props, cascade, indexes); - } - - @Override - public DropColumnStatement dropColumn(NamedTableNode table, PTableType tableType, List columnNodes, boolean ifExists) { - return new ExecutableDropColumnStatement(table, tableType, columnNodes, ifExists); - } - - @Override - public DropTableStatement dropTable(TableName tableName, PTableType tableType, boolean ifExists, boolean cascade) { - return new ExecutableDropTableStatement(tableName, tableType, ifExists, cascade); - } + public ExecutableChangePermsStatement(String permsString, boolean isSchemaName, + TableName tableName, String schemaName, boolean isGroupName, LiteralParseNode userOrGroup, + boolean isGrantStatement) { + super(permsString, isSchemaName, tableName, schemaName, isGroupName, userOrGroup, + isGrantStatement); + } - @Override - public DropSchemaStatement dropSchema(String schemaName, boolean ifExists, boolean cascade) { - return new ExecutableDropSchemaStatement(schemaName, ifExists, cascade); - } + @Override + public MutationPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); - @Override - public UseSchemaStatement useSchema(String schemaName) { - return new ExecutableUseSchemaStatement(schemaName); - } + return new BaseMutationPlan(context, this.getOperation()) { @Override - public DropFunctionStatement dropFunction(String functionName, boolean ifExists) { - return new ExecutableDropFunctionStatement(functionName, ifExists); - } - - @Override - public DropIndexStatement dropIndex(NamedNode indexName, TableName tableName, boolean ifExists) { - return new ExecutableDropIndexStatement(indexName, tableName, ifExists); + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("GRANT PERMISSION")); } @Override - public DropCDCStatement dropCDC(NamedNode cdcObjName, TableName tableName, boolean ifExists) { - return new ExecutableDropCDCStatement(cdcObjName, tableName, ifExists); + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.changePermissions(ExecutableChangePermsStatement.this); } + }; + } + } - @Override - public AlterIndexStatement alterIndex(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, PIndexState state, boolean isRebuildAll, boolean async, ListMultimap> props) { - return new ExecutableAlterIndexStatement(indexTableNode, dataTableName, ifExists, state, isRebuildAll, async, props); - } + private static class ExecutableDropIndexStatement extends DropIndexStatement + implements CompilableStatement { - @Override - public TraceStatement trace(boolean isTraceOn, double samplingRate) { - return new ExecutableTraceStatement(isTraceOn, samplingRate); - } + public ExecutableDropIndexStatement(NamedNode indexName, TableName tableName, + boolean ifExists) { + super(indexName, tableName, ifExists); + } - @Override - public AlterSessionStatement alterSession(Map props) { - return new ExecutableAlterSessionStatement(props); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { @Override - public ExplainStatement explain(BindableStatement statement, ExplainType explainType) { - return new ExecutableExplainStatement(statement, explainType); + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("DROP INDEX")); } @Override - public UpdateStatisticsStatement updateStatistics(NamedTableNode table, StatisticsCollectionScope scope, Map props) { - return new ExecutableUpdateStatisticsStatement(table, scope, props); - } - - @Override - public ExecuteUpgradeStatement executeUpgrade() { - return new ExecutableExecuteUpgradeStatement(); + public MutationState execute() throws SQLException { + String indexName = ExecutableDropIndexStatement.this.getIndexName().getName(); + if (CDCUtil.isCDCIndex(indexName)) { + throw new SQLExceptionInfo.Builder(CANNOT_DROP_CDC_INDEX).setTableName(indexName) + .build().buildException(); + } + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.dropIndex(ExecutableDropIndexStatement.this); } + }; + } + } - @Override - public ExecutableChangePermsStatement changePermsStatement(String permsString, boolean isSchemaName, TableName tableName, - String schemaName, boolean isGroupName, LiteralParseNode userOrGroup, boolean isGrantStatement) { - return new ExecutableChangePermsStatement(permsString, isSchemaName, tableName, schemaName, isGroupName, userOrGroup,isGrantStatement); - } + private static class ExecutableDropCDCStatement extends DropCDCStatement + implements CompilableStatement { - @Override - public ShowTablesStatement showTablesStatement(String schema, String pattern) { - return new ExecutableShowTablesStatement(schema, pattern); - } + public ExecutableDropCDCStatement(NamedNode cdcObjName, TableName tableName, boolean ifExists) { + super(cdcObjName, tableName, ifExists); + } + + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { @Override - public ShowSchemasStatement showSchemasStatement(String pattern) { - return new ExecutableShowSchemasStatement(pattern); + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("DROP CDC")); } @Override - public ShowCreateTable showCreateTable(TableName tableName) { - return new ExecutableShowCreateTable(tableName); + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.dropCDC(ExecutableDropCDCStatement.this); } + }; + } + } + + private static class ExecutableAlterIndexStatement extends AlterIndexStatement + implements CompilableStatement { + public ExecutableAlterIndexStatement(NamedTableNode indexTableNode, String dataTableName, + boolean ifExists, PIndexState state, boolean isRebuildAll, boolean async, + ListMultimap> props) { + super(indexTableNode, dataTableName, ifExists, state, isRebuildAll, async, props); } - - static class PhoenixStatementParser extends SQLParser { - PhoenixStatementParser(String query, ParseNodeFactory nodeFactory) throws IOException { - super(query, nodeFactory); - } - PhoenixStatementParser(Reader reader) throws IOException { - super(reader); - } - + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { @Override - public CompilableStatement nextStatement(ParseNodeFactory nodeFactory) throws SQLException { - return (CompilableStatement) super.nextStatement(nodeFactory); + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("ALTER INDEX")); } @Override - public CompilableStatement parseStatement() throws SQLException { - return (CompilableStatement) super.parseStatement(); + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.alterIndex(ExecutableAlterIndexStatement.this); } + }; } - - public Format getFormatter(PDataType type) { - return connection.getFormatter(type); - } - - protected final List batch = Lists.newArrayList(); - - @Override - public void addBatch(String sql) throws SQLException { - batch.add(new PhoenixPreparedStatement(connection, sql)); - } + } - @Override - public void clearBatch() throws SQLException { - batch.clear(); + private static class ExecutableTraceStatement extends TraceStatement + implements CompilableStatement { + + public ExecutableTraceStatement(boolean isTraceOn, double samplingRate) { + super(isTraceOn, samplingRate); } - /** - * Execute the current batch of statements. If any exception occurs - * during execution, a {@link java.sql.BatchUpdateException} - * is thrown which compposes the update counts for statements executed so - * far. - */ + @SuppressWarnings("unchecked") @Override - public int[] executeBatch() throws SQLException { - int i = 0; - int[] returnCodes = new int [batch.size()]; - Arrays.fill(returnCodes, -1); - boolean autoCommit = connection.getAutoCommit(); - connection.setAutoCommit(false); - try { - for (i = 0; i < returnCodes.length; i++) { - PhoenixPreparedStatement statement = batch.get(i); - statement.executeForBatch(); - returnCodes[i] = statement.getUpdateCount(); - } - // Flush all changes in batch if auto flush is true - flushIfNecessary(); - // If we make it all the way through, clear the batch - clearBatch(); - if (autoCommit) { - connection.commit(); - } - return returnCodes; - } catch (SQLException t) { - if (i == returnCodes.length) { - // Exception after for loop, perhaps in commit(), discard returnCodes. - throw new BatchUpdateException(t); - } else { - returnCodes[i] = Statement.EXECUTE_FAILED; - throw new BatchUpdateException(returnCodes, t); - } - } finally { - connection.setAutoCommit(autoCommit); - } + public QueryPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + return new TraceQueryPlan(this, stmt); } + } - @Override - public void cancel() throws SQLException { - throw new SQLFeatureNotSupportedException(); + private static class ExecutableAlterSessionStatement extends AlterSessionStatement + implements CompilableStatement { + + public ExecutableAlterSessionStatement(Map props) { + super(props); } + @SuppressWarnings("unchecked") @Override - public void clearWarnings() throws SQLException { + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new CustomMutationPlan(context); } - @Override - public void close() throws SQLException { - try { - clearResultSet(); - } finally { - try { - connection.removeStatement(this); - } finally { - isClosed = true; - } + private class CustomMutationPlan extends BaseMutationPlan { + + private final StatementContext context; + + private CustomMutationPlan(StatementContext context) { + super(context, ExecutableAlterSessionStatement.this.getOperation()); + this.context = context; + } + + @Override + public StatementContext getContext() { + return context; + } + + @Override + public ParameterMetaData getParameterMetaData() { + return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("ALTER SESSION")); + } + + @Override + public MutationState execute() throws SQLException { + Object consistency = getProps().get(PhoenixRuntime.CONSISTENCY_ATTRIB.toUpperCase()); + if (consistency != null) { + if (((String) consistency).equalsIgnoreCase(Consistency.TIMELINE.toString())) { + getContext().getConnection().setConsistency(Consistency.TIMELINE); + } else { + getContext().getConnection().setConsistency(Consistency.STRONG); + } } + return new MutationState(0, 0, context.getConnection()); + } } + } - // From the ResultSet javadoc: - // A ResultSet object is automatically closed when the Statement object that generated it is - // closed, re-executed, or used to retrieve the next result from a sequence of multiple results. - private void clearResultSet() throws SQLException { - if (lastResultSet != null) { - try { - lastResultSet.close(); - } finally { - lastResultSet = null; - } - } + private static class ExecutableUpdateStatisticsStatement extends UpdateStatisticsStatement + implements CompilableStatement { + public ExecutableUpdateStatisticsStatement(NamedTableNode table, + StatisticsCollectionScope scope, Map props) { + super(table, scope, props); } - // Called from ResultSet.close(). rs is already closed. - // We use a separate function to avoid calling close() again - void removeResultSet(ResultSet rs) throws SQLException { - if (rs == lastResultSet) { - lastResultSet = null; - if (closeOnCompletion) { - this.close(); - } + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("UPDATE STATISTICS")); } - } - public List getParameters() { - return Collections.emptyList(); - } - - protected CompilableStatement parseStatement(String sql) throws SQLException { - PhoenixStatementParser parser = null; - try { - parser = new PhoenixStatementParser(sql, new ExecutableNodeFactory()); - } catch (IOException e) { - throw ClientUtil.parseServerException(e); + @Override + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.updateStatistics(ExecutableUpdateStatisticsStatement.this); } - CompilableStatement statement = parser.parseStatement(); - return statement; - } - - public QueryPlan optimizeQuery(String sql) throws SQLException { - QueryPlan plan = compileQuery(sql); - return connection.getQueryServices().getOptimizer().optimize(this, plan); + }; } - public QueryPlan compileQuery(String sql) throws SQLException { - CompilableStatement stmt = parseStatement(sql); - return compileQuery(stmt, sql); - } + } + + private static class ExecutableExecuteUpgradeStatement extends ExecuteUpgradeStatement + implements CompilableStatement { + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + return new MutationPlan() { - public QueryPlan compileQuery(CompilableStatement stmt, String query) throws SQLException { - if (stmt.getOperation().isMutation()) { - throw new ExecuteQueryNotApplicableException(query); + @Override + public Set getSourceRefs() { + return Collections.emptySet(); } - return stmt.compilePlan(this, Sequence.ValueOp.VALIDATE_SEQUENCE); - } - public MutationPlan compileMutation(CompilableStatement stmt, String query) throws SQLException { - if (!stmt.getOperation().isMutation()) { - throw new ExecuteUpdateNotApplicableException(query); + @Override + public ParameterMetaData getParameterMetaData() { + return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; } - return stmt.compilePlan(this, Sequence.ValueOp.VALIDATE_SEQUENCE); - } - public MutationPlan compileMutation(String sql) throws SQLException { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("Execute update: " + sql, connection)); + @Override + public Operation getOperation() { + return Operation.UPGRADE; } - CompilableStatement stmt = parseStatement(sql); - return compileMutation(stmt, sql); - } - public boolean isSystemTable(CompilableStatement stmt) { - boolean systemTable = false; - TableName tableName = null; - if (stmt instanceof ExecutableSelectStatement) { - TableNode from = ((ExecutableSelectStatement)stmt).getFrom(); - if (from instanceof NamedTableNode) { - tableName = ((NamedTableNode)from).getName(); - } - } else if (stmt instanceof ExecutableUpsertStatement) { - tableName = ((ExecutableUpsertStatement)stmt).getTable().getName(); - } else if (stmt instanceof ExecutableDeleteStatement) { - tableName = ((ExecutableDeleteStatement)stmt).getTable().getName(); - } else if (stmt instanceof ExecutableAddColumnStatement) { - tableName = ((ExecutableAddColumnStatement)stmt).getTable().getName(); + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan(Collections.singletonList("EXECUTE UPGRADE")); } - if (tableName != null && PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA - .equals(tableName.getSchemaName())) { - systemTable = true; + @Override + public QueryPlan getQueryPlan() { + return null; } - return systemTable; - } + @Override + public StatementContext getContext() { + return new StatementContext(stmt); + } - public QueryLogger createQueryLogger(CompilableStatement stmt, String sql) throws SQLException { - if (connection.getLogLevel() == LogLevel.OFF) { - return QueryLogger.NO_OP_INSTANCE; + @Override + public TableRef getTargetRef() { + return TableRef.EMPTY_TABLE_REF; } - QueryLogger queryLogger = QueryLogger.getInstance(connection, isSystemTable(stmt)); - QueryLoggerUtil.logInitialDetails(queryLogger, connection.getTenantId(), - connection.getQueryServices(), sql, getParameters()); - return queryLogger; - } + @Override + public MutationState execute() throws SQLException { + PhoenixConnection phxConn = stmt.getConnection(); + Properties props = new Properties(); + phxConn.getQueryServices().upgradeSystemTables(phxConn.getURL(), props); + return MutationState.emptyMutationState(-1, -1, phxConn); + } - public AuditQueryLogger createAuditQueryLogger(CompilableStatement stmt, String sql) throws SQLException { - if (connection.getAuditLogLevel() == LogLevel.OFF) { - return AuditQueryLogger.NO_OP_INSTANCE; + @Override + public Long getEstimatedRowsToScan() throws SQLException { + return 0l; } - AuditQueryLogger queryLogger = AuditQueryLogger.getInstance(connection, isSystemTable(stmt)); - QueryLoggerUtil.logInitialDetails(queryLogger, connection.getTenantId(), - connection.getQueryServices(), sql, getParameters()); - return queryLogger; - } - - @Override - public ResultSet executeQuery(String sql) throws SQLException { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations( - "Execute query: " + sql, connection)); + @Override + public Long getEstimatedBytesToScan() throws SQLException { + return 0l; } - - CompilableStatement stmt = parseStatement(sql); - if (stmt.getOperation().isMutation()) { - throw new ExecuteQueryNotApplicableException(sql); + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return 0l; } - return executeQuery(stmt, createQueryLogger(stmt, sql)); + }; } + } - @Override - public int executeUpdate(String sql) throws SQLException { - CompilableStatement stmt = preExecuteUpdate(sql); - int updateCount = executeMutation(stmt, createAuditQueryLogger(stmt, sql)); - flushIfNecessary(); - return updateCount; - } + private static class ExecutableAddColumnStatement extends AddColumnStatement + implements CompilableStatement { - /** - * Executes the given SQL statement similar to JDBC API executeUpdate() but also returns the - * updated or non-updated row as Result object back to the client. This must be used with - * auto-commit Connection. This makes the operation atomic. - * If the row is successfully updated, return the updated row, otherwise if the row - * cannot be updated, return non-updated row. - * - * @param sql The SQL DML statement, UPSERT or DELETE for Phoenix. - * @return The pair of int and Tuple, where int represents value 1 for successful row - * update and 0 for non-successful row update, and Tuple represents the state of the row. - * @throws SQLException If the statement cannot be executed. - */ - public Pair executeUpdateReturnRow(String sql) throws SQLException { - if (!connection.getAutoCommit()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.AUTO_COMMIT_NOT_ENABLED).build() - .buildException(); - } - CompilableStatement stmt = preExecuteUpdate(sql); - Pair result = - executeMutation(stmt, createAuditQueryLogger(stmt, sql), ReturnResult.ROW); - flushIfNecessary(); - return result; + ExecutableAddColumnStatement(NamedTableNode table, PTableType tableType, + List columnDefs, boolean ifNotExists, + ListMultimap> props, boolean cascade, List indexes) { + super(table, tableType, columnDefs, ifNotExists, props, cascade, indexes); } - private CompilableStatement preExecuteUpdate(String sql) throws SQLException { - CompilableStatement stmt = parseStatement(sql); - if (!stmt.getOperation().isMutation) { - throw new ExecuteUpdateNotApplicableException(sql); + @SuppressWarnings("unchecked") + @Override + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan( + Collections.singletonList("ALTER " + getTableType() + " ADD COLUMN")); } - if (!batch.isEmpty()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH) - .build().buildException(); + + @Override + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.addColumn(ExecutableAddColumnStatement.this); } - return stmt; + }; } + } - private void flushIfNecessary() throws SQLException { - if (connection.getAutoFlush()) { - connection.flush(); - } + private static class ExecutableDropColumnStatement extends DropColumnStatement + implements CompilableStatement { + + ExecutableDropColumnStatement(NamedTableNode table, PTableType tableType, + List columnRefs, boolean ifExists) { + super(table, tableType, columnRefs, ifExists); } - + + @SuppressWarnings("unchecked") @Override - public boolean execute(String sql) throws SQLException { - CompilableStatement stmt = parseStatement(sql); - if (stmt.getOperation().isMutation()) { - if (!batch.isEmpty()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH) - .build().buildException(); - } - executeMutation(stmt, createAuditQueryLogger(stmt, sql)); - flushIfNecessary(); - return false; + public MutationPlan compilePlan(final PhoenixStatement stmt, Sequence.ValueOp seqAction) + throws SQLException { + final StatementContext context = new StatementContext(stmt); + return new BaseMutationPlan(context, this.getOperation()) { + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return new ExplainPlan( + Collections.singletonList("ALTER " + getTableType() + " DROP COLUMN")); } - - executeQuery(stmt, createQueryLogger(stmt, sql)); - return true; - } - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - return execute(sql); + @Override + public MutationState execute() throws SQLException { + MetaDataClient client = new MetaDataClient(getContext().getConnection()); + return client.dropColumn(ExecutableDropColumnStatement.this); + } + }; } + } + protected static class ExecutableNodeFactory extends ParseNodeFactory { @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - return execute(sql); + public ExecutableSelectStatement select(TableNode from, HintNode hint, boolean isDistinct, + List select, ParseNode where, List groupBy, ParseNode having, + List orderBy, LimitNode limit, OffsetNode offset, int bindCount, + boolean isAggregate, boolean hasSequence, List selects, + Map udfParseNodes) { + return new ExecutableSelectStatement(from, hint, isDistinct, select, where, + groupBy == null ? Collections. emptyList() : groupBy, having, + orderBy == null ? Collections. emptyList() : orderBy, limit, offset, bindCount, + isAggregate, hasSequence, + selects == null ? Collections. emptyList() : selects, udfParseNodes); } @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - return execute(sql); + public ExecutableUpsertStatement upsert(NamedTableNode table, HintNode hintNode, + List columns, List values, SelectStatement select, int bindCount, + Map udfParseNodes, List> onDupKeyPairs) { + return new ExecutableUpsertStatement(table, hintNode, columns, values, select, bindCount, + udfParseNodes, onDupKeyPairs); } @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - return executeUpdate(sql); + public ExecutableDeclareCursorStatement declareCursor(CursorName cursor, + SelectStatement select) { + return new ExecutableDeclareCursorStatement(cursor, select); } @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - return executeUpdate(sql); + public ExecutableFetchStatement fetch(CursorName cursor, boolean isNext, int fetchLimit) { + return new ExecutableFetchStatement(cursor, isNext, fetchLimit); } @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - return executeUpdate(sql); + public ExecutableOpenStatement open(CursorName cursor) { + return new ExecutableOpenStatement(cursor); } @Override - public PhoenixConnection getConnection() { - return connection; + public ExecutableCloseStatement close(CursorName cursor) { + return new ExecutableCloseStatement(cursor); } @Override - public int getFetchDirection() throws SQLException { - return ResultSet.FETCH_FORWARD; + public ExecutableDeleteStatement delete(NamedTableNode table, HintNode hint, + ParseNode whereNode, List orderBy, LimitNode limit, int bindCount, + Map udfParseNodes) { + return new ExecutableDeleteStatement(table, hint, whereNode, orderBy, limit, bindCount, + udfParseNodes); } @Override - public int getFetchSize() throws SQLException { - if (fetchSize > 0) { - return fetchSize; - } else { - return connection.getQueryServices().getProps() - .getInt(QueryServices.SCAN_CACHE_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE); - } + public CreateTableStatement createTable(TableName tableName, + ListMultimap> props, List columns, + PrimaryKeyConstraint pkConstraint, List splits, PTableType tableType, + boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, + Boolean immutableRows, Map cqCounters, boolean noVerify) { + return new ExecutableCreateTableStatement(tableName, props, columns, pkConstraint, splits, + tableType, ifNotExists, baseTableName, tableTypeIdNode, bindCount, immutableRows, + cqCounters, noVerify); } @Override - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLFeatureNotSupportedException(); + public CreateTableStatement createTable(TableName tableName, + ListMultimap> props, List columns, + PrimaryKeyConstraint pkConstraint, List splits, PTableType tableType, + boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, + Boolean immutableRows, Map cqCounters) { + return createTable(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, + baseTableName, tableTypeIdNode, bindCount, immutableRows, cqCounters, false); } @Override - public int getMaxFieldSize() throws SQLException { - return 0; // TODO: 4000? + public CreateTableStatement createTable(TableName tableName, + ListMultimap> props, List columns, + PrimaryKeyConstraint pkConstraint, List splits, PTableType tableType, + boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, + Boolean immutableRows) { + return createTable(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, + baseTableName, tableTypeIdNode, bindCount, immutableRows, null); } @Override - public int getMaxRows() throws SQLException { - return maxRows; + public CreateCDCStatement createCDC(NamedNode cdcObj, TableName dataTable, + Set includeScopes, ListMultimap> props, + boolean ifNotExists, int bindCount) { + return new ExecutableCreateCDCStatement(cdcObj, dataTable, includeScopes, props, ifNotExists, + bindCount); } @Override - public boolean getMoreResults() throws SQLException { - return false; + public CreateSchemaStatement createSchema(String schemaName, boolean ifNotExists) { + return new ExecutableCreateSchemaStatement(schemaName, ifNotExists); } @Override - public boolean getMoreResults(int current) throws SQLException { - return false; + public CreateSequenceStatement createSequence(TableName tableName, ParseNode startsWith, + ParseNode incrementBy, ParseNode cacheSize, ParseNode minValue, ParseNode maxValue, + boolean cycle, boolean ifNotExists, int bindCount) { + return new ExecutableCreateSequenceStatement(tableName, startsWith, incrementBy, cacheSize, + minValue, maxValue, cycle, ifNotExists, bindCount); } - // For testing - public QueryPlan getQueryPlan() { - return getLastQueryPlan(); - } - @Override - public ResultSet getResultSet() throws SQLException { - ResultSet rs = getLastResultSet(); - return rs; + public CreateFunctionStatement createFunction(PFunction functionInfo, boolean temporary, + boolean isReplace) { + return new ExecutableCreateFunctionStatement(functionInfo, temporary, isReplace); } @Override - public int getResultSetConcurrency() throws SQLException { - return ResultSet.CONCUR_READ_ONLY; + public AddJarsStatement addJars(List jarPaths) { + return new ExecutableAddJarsStatement(jarPaths); } @Override - public int getResultSetHoldability() throws SQLException { - // TODO: not sure this matters - return ResultSet.CLOSE_CURSORS_AT_COMMIT; + public DeleteJarStatement deleteJar(LiteralParseNode jarPath) { + return new ExecutableDeleteJarStatement(jarPath); } @Override - public int getResultSetType() throws SQLException { - return ResultSet.TYPE_FORWARD_ONLY; + public ListJarsStatement listJars() { + return new ExecutableListJarsStatement(); } @Override - public Operation getUpdateOperation() { - return getLastUpdateOperation(); - } - - @Override - public int getUpdateCount() throws SQLException { - int updateCount = getLastUpdateCount(); - // Only first call can get the update count, otherwise - // some SQL clients get into an infinite loop when an - // update occurs. - setLastUpdateCount(NO_UPDATE); - return updateCount; + public DropSequenceStatement dropSequence(TableName tableName, boolean ifExists, + int bindCount) { + return new ExecutableDropSequenceStatement(tableName, ifExists, bindCount); } @Override - public SQLWarning getWarnings() throws SQLException { - return null; + public CreateIndexStatement createIndex(NamedNode indexName, NamedTableNode dataTable, + IndexKeyConstraint ikConstraint, List includeColumns, List splits, + ListMultimap> props, boolean ifNotExists, IndexType indexType, + boolean async, int bindCount, Map udfParseNodes, ParseNode where) { + return new ExecutableCreateIndexStatement(indexName, dataTable, ikConstraint, includeColumns, + splits, props, ifNotExists, indexType, async, bindCount, udfParseNodes, where); } @Override - public boolean isClosed() throws SQLException { - return isClosed; + public AddColumnStatement addColumn(NamedTableNode table, PTableType tableType, + List columnDefs, boolean ifNotExists, + ListMultimap> props, boolean cascade, List indexes) { + return new ExecutableAddColumnStatement(table, tableType, columnDefs, ifNotExists, props, + cascade, indexes); } @Override - public boolean isPoolable() throws SQLException { - return false; + public DropColumnStatement dropColumn(NamedTableNode table, PTableType tableType, + List columnNodes, boolean ifExists) { + return new ExecutableDropColumnStatement(table, tableType, columnNodes, ifExists); } @Override - public void setCursorName(String name) throws SQLException { - throw new SQLFeatureNotSupportedException(); + public DropTableStatement dropTable(TableName tableName, PTableType tableType, boolean ifExists, + boolean cascade) { + return new ExecutableDropTableStatement(tableName, tableType, ifExists, cascade); } @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - // TODO: any escaping we need to do? + public DropSchemaStatement dropSchema(String schemaName, boolean ifExists, boolean cascade) { + return new ExecutableDropSchemaStatement(schemaName, ifExists, cascade); } @Override - public void setFetchDirection(int direction) throws SQLException { - if (direction != ResultSet.FETCH_FORWARD) { - throw new SQLFeatureNotSupportedException(); - } + public UseSchemaStatement useSchema(String schemaName) { + return new ExecutableUseSchemaStatement(schemaName); } @Override - public void setFetchSize(int fetchSize) throws SQLException { - // TODO: map to Scan.setBatch() ? - this.fetchSize = fetchSize; + public DropFunctionStatement dropFunction(String functionName, boolean ifExists) { + return new ExecutableDropFunctionStatement(functionName, ifExists); } @Override - public void setMaxFieldSize(int max) throws SQLException { - throw new SQLFeatureNotSupportedException(); + public DropIndexStatement dropIndex(NamedNode indexName, TableName tableName, + boolean ifExists) { + return new ExecutableDropIndexStatement(indexName, tableName, ifExists); } @Override - public void setMaxRows(int max) throws SQLException { - this.maxRows = max; + public DropCDCStatement dropCDC(NamedNode cdcObjName, TableName tableName, boolean ifExists) { + return new ExecutableDropCDCStatement(cdcObjName, tableName, ifExists); } @Override - public void setPoolable(boolean poolable) throws SQLException { - if (poolable) { - throw new SQLFeatureNotSupportedException(); - } + public AlterIndexStatement alterIndex(NamedTableNode indexTableNode, String dataTableName, + boolean ifExists, PIndexState state, boolean isRebuildAll, boolean async, + ListMultimap> props) { + return new ExecutableAlterIndexStatement(indexTableNode, dataTableName, ifExists, state, + isRebuildAll, async, props); } @Override - /** - * When setting the query timeout via JDBC timeouts must be expressed in seconds. Therefore - * we need to convert the default timeout to milliseconds for internal use. - */ - public void setQueryTimeout(int seconds) throws SQLException { - if (seconds < 0) { - this.queryTimeoutMillis = getDefaultQueryTimeoutMillis(); - } else if (seconds == 0) { - this.queryTimeoutMillis = Integer.MAX_VALUE; - } else { - this.queryTimeoutMillis = seconds * 1000; - } + public TraceStatement trace(boolean isTraceOn, double samplingRate) { + return new ExecutableTraceStatement(isTraceOn, samplingRate); } @Override - /** - * When getting the query timeout via JDBC timeouts must be expressed in seconds. Therefore - * we need to convert the default millisecond timeout to seconds. - */ - public int getQueryTimeout() throws SQLException { - // Convert milliseconds to seconds by taking the CEIL up to the next second - int scaledValue; - try { - scaledValue = IntMath.checkedAdd(queryTimeoutMillis, 999); - } catch (ArithmeticException e) { - scaledValue = Integer.MAX_VALUE; - } - return scaledValue / 1000; - } - - /** - * Returns the configured timeout in milliseconds. This - * internally enables the of use millisecond timeout granularity - * and honors the exact value configured by phoenix.query.timeoutMs. - */ - public int getQueryTimeoutInMillis() { - return queryTimeoutMillis; - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return iface.isInstance(this); + public AlterSessionStatement alterSession(Map props) { + return new ExecutableAlterSessionStatement(props); } - @SuppressWarnings("unchecked") @Override - public T unwrap(Class iface) throws SQLException { - if (!iface.isInstance(this)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) - .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()) - .build().buildException(); - } - return (T)this; + public ExplainStatement explain(BindableStatement statement, ExplainType explainType) { + return new ExecutableExplainStatement(statement, explainType); } @Override - public void closeOnCompletion() throws SQLException { - closeOnCompletion = true; + public UpdateStatisticsStatement updateStatistics(NamedTableNode table, + StatisticsCollectionScope scope, Map props) { + return new ExecutableUpdateStatisticsStatement(table, scope, props); } @Override - public boolean isCloseOnCompletion() throws SQLException { - return closeOnCompletion; - } - - private PhoenixResultSet getLastResultSet() { - return lastResultSet; - } - - void setLastResultSet(PhoenixResultSet lastResultSet) { - this.lastResultSet = lastResultSet; - } - - private int getLastUpdateCount() { - return lastUpdateCount; - } - - private void setLastUpdateCount(int lastUpdateCount) { - this.lastUpdateCount = lastUpdateCount; + public ExecuteUpgradeStatement executeUpgrade() { + return new ExecutableExecuteUpgradeStatement(); } - private String getLastUpdateTable() { - return lastUpdateTable; - } - - private void setLastUpdateTable(String lastUpdateTable) { - if (!Strings.isNullOrEmpty(lastUpdateTable)) { - this.lastUpdateTable = lastUpdateTable; - } - if (getConnection().getActivityLogger().isLevelEnabled(ActivityLogInfo.TABLE_NAME.getLogLevel())) { - updateActivityOnConnection(ActivityLogInfo.TABLE_NAME, this.lastUpdateTable); - } + @Override + public ExecutableChangePermsStatement changePermsStatement(String permsString, + boolean isSchemaName, TableName tableName, String schemaName, boolean isGroupName, + LiteralParseNode userOrGroup, boolean isGrantStatement) { + return new ExecutableChangePermsStatement(permsString, isSchemaName, tableName, schemaName, + isGroupName, userOrGroup, isGrantStatement); } - private Operation getLastUpdateOperation() { - return lastUpdateOperation; + @Override + public ShowTablesStatement showTablesStatement(String schema, String pattern) { + return new ExecutableShowTablesStatement(schema, pattern); } - private void setLastUpdateOperation(Operation lastUpdateOperation) { - this.lastUpdateOperation = lastUpdateOperation; - if (getConnection().getActivityLogger().isLevelEnabled(ActivityLogInfo.OP_NAME.getLogLevel())) { - updateActivityOnConnection(ActivityLogInfo.OP_NAME, this.lastUpdateOperation.toString()); - } - if (getConnection().getActivityLogger().isLevelEnabled(ActivityLogInfo.OP_TIME.getLogLevel())) { - updateActivityOnConnection(ActivityLogInfo.OP_TIME, String.valueOf(EnvironmentEdgeManager.currentTimeMillis())); - } + @Override + public ShowSchemasStatement showSchemasStatement(String pattern) { + return new ExecutableShowSchemasStatement(pattern); } - private QueryPlan getLastQueryPlan() { - return lastQueryPlan; + @Override + public ShowCreateTable showCreateTable(TableName tableName) { + return new ExecutableShowCreateTable(tableName); } - private void setLastQueryPlan(QueryPlan lastQueryPlan) { - this.lastQueryPlan = lastQueryPlan; + } + static class PhoenixStatementParser extends SQLParser { + PhoenixStatementParser(String query, ParseNodeFactory nodeFactory) throws IOException { + super(query, nodeFactory); } - private void updateActivityOnConnection(ActivityLogInfo item, String value) { - getConnection().getActivityLogger().log(item, value); + PhoenixStatementParser(Reader reader) throws IOException { + super(reader); } - private void throwIfUnallowedUserDefinedFunctions(Map udfParseNodes) throws SQLException { - if (!connection - .getQueryServices() - .getProps() - .getBoolean(QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB, - QueryServicesOptions.DEFAULT_ALLOW_USER_DEFINED_FUNCTIONS)) { - if (udfParseNodes.isEmpty()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_USER_DEFINED_FUNCTIONS) - .build().buildException(); - } - throw new FunctionNotFoundException(udfParseNodes.keySet().toString()); - } - } - - /** - * Check if the statement is a DDL and if there are any uncommitted mutations Throw or log the - * message - */ - private void checkIfDDLStatementandMutationState(final CompilableStatement stmt, - MutationState state) throws SQLException { - boolean throwUncommittedMutation = - connection.getQueryServices().getProps().getBoolean( - QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, - QueryServicesOptions.DEFAULT_PENDING_MUTATIONS_DDL_THROW); - if (stmt instanceof MutableStatement && !(stmt instanceof DMLStatement) - && state.getNumRows() > 0) { - if (throwUncommittedMutation) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS).build() - .buildException(); - } else { - LOGGER.warn( - "There are Uncommitted mutations, which will be dropped on the execution of this DDL statement."); - } - } + @Override + public CompilableStatement nextStatement(ParseNodeFactory nodeFactory) throws SQLException { + return (CompilableStatement) super.nextStatement(nodeFactory); } - public Calendar getLocalCalendar() { - return localCalendar; - } + @Override + public CompilableStatement parseStatement() throws SQLException { + return (CompilableStatement) super.parseStatement(); + } + } + + public Format getFormatter(PDataType type) { + return connection.getFormatter(type); + } + + protected final List batch = Lists.newArrayList(); + + @Override + public void addBatch(String sql) throws SQLException { + batch.add(new PhoenixPreparedStatement(connection, sql)); + } + + @Override + public void clearBatch() throws SQLException { + batch.clear(); + } + + /** + * Execute the current batch of statements. If any exception occurs during execution, a + * {@link java.sql.BatchUpdateException} is thrown which compposes the update counts for + * statements executed so far. + */ + @Override + public int[] executeBatch() throws SQLException { + int i = 0; + int[] returnCodes = new int[batch.size()]; + Arrays.fill(returnCodes, -1); + boolean autoCommit = connection.getAutoCommit(); + connection.setAutoCommit(false); + try { + for (i = 0; i < returnCodes.length; i++) { + PhoenixPreparedStatement statement = batch.get(i); + statement.executeForBatch(); + returnCodes[i] = statement.getUpdateCount(); + } + // Flush all changes in batch if auto flush is true + flushIfNecessary(); + // If we make it all the way through, clear the batch + clearBatch(); + if (autoCommit) { + connection.commit(); + } + return returnCodes; + } catch (SQLException t) { + if (i == returnCodes.length) { + // Exception after for loop, perhaps in commit(), discard returnCodes. + throw new BatchUpdateException(t); + } else { + returnCodes[i] = Statement.EXECUTE_FAILED; + throw new BatchUpdateException(returnCodes, t); + } + } finally { + connection.setAutoCommit(autoCommit); + } + } + + @Override + public void cancel() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void clearWarnings() throws SQLException { + } + + @Override + public void close() throws SQLException { + try { + clearResultSet(); + } finally { + try { + connection.removeStatement(this); + } finally { + isClosed = true; + } + } + } + + // From the ResultSet javadoc: + // A ResultSet object is automatically closed when the Statement object that generated it is + // closed, re-executed, or used to retrieve the next result from a sequence of multiple results. + private void clearResultSet() throws SQLException { + if (lastResultSet != null) { + try { + lastResultSet.close(); + } finally { + lastResultSet = null; + } + } + } + + // Called from ResultSet.close(). rs is already closed. + // We use a separate function to avoid calling close() again + void removeResultSet(ResultSet rs) throws SQLException { + if (rs == lastResultSet) { + lastResultSet = null; + if (closeOnCompletion) { + this.close(); + } + } + } + + public List getParameters() { + return Collections. emptyList(); + } + + protected CompilableStatement parseStatement(String sql) throws SQLException { + PhoenixStatementParser parser = null; + try { + parser = new PhoenixStatementParser(sql, new ExecutableNodeFactory()); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + CompilableStatement statement = parser.parseStatement(); + return statement; + } + + public QueryPlan optimizeQuery(String sql) throws SQLException { + QueryPlan plan = compileQuery(sql); + return connection.getQueryServices().getOptimizer().optimize(this, plan); + } + + public QueryPlan compileQuery(String sql) throws SQLException { + CompilableStatement stmt = parseStatement(sql); + return compileQuery(stmt, sql); + } + + public QueryPlan compileQuery(CompilableStatement stmt, String query) throws SQLException { + if (stmt.getOperation().isMutation()) { + throw new ExecuteQueryNotApplicableException(query); + } + return stmt.compilePlan(this, Sequence.ValueOp.VALIDATE_SEQUENCE); + } + + public MutationPlan compileMutation(CompilableStatement stmt, String query) throws SQLException { + if (!stmt.getOperation().isMutation()) { + throw new ExecuteUpdateNotApplicableException(query); + } + return stmt.compilePlan(this, Sequence.ValueOp.VALIDATE_SEQUENCE); + } + + public MutationPlan compileMutation(String sql) throws SQLException { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations("Execute update: " + sql, connection)); + } + CompilableStatement stmt = parseStatement(sql); + return compileMutation(stmt, sql); + } + + public boolean isSystemTable(CompilableStatement stmt) { + boolean systemTable = false; + TableName tableName = null; + if (stmt instanceof ExecutableSelectStatement) { + TableNode from = ((ExecutableSelectStatement) stmt).getFrom(); + if (from instanceof NamedTableNode) { + tableName = ((NamedTableNode) from).getName(); + } + } else if (stmt instanceof ExecutableUpsertStatement) { + tableName = ((ExecutableUpsertStatement) stmt).getTable().getName(); + } else if (stmt instanceof ExecutableDeleteStatement) { + tableName = ((ExecutableDeleteStatement) stmt).getTable().getName(); + } else if (stmt instanceof ExecutableAddColumnStatement) { + tableName = ((ExecutableAddColumnStatement) stmt).getTable().getName(); + } + + if ( + tableName != null + && PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(tableName.getSchemaName()) + ) { + systemTable = true; + } + + return systemTable; + } + + public QueryLogger createQueryLogger(CompilableStatement stmt, String sql) throws SQLException { + if (connection.getLogLevel() == LogLevel.OFF) { + return QueryLogger.NO_OP_INSTANCE; + } + + QueryLogger queryLogger = QueryLogger.getInstance(connection, isSystemTable(stmt)); + QueryLoggerUtil.logInitialDetails(queryLogger, connection.getTenantId(), + connection.getQueryServices(), sql, getParameters()); + return queryLogger; + } + + public AuditQueryLogger createAuditQueryLogger(CompilableStatement stmt, String sql) + throws SQLException { + if (connection.getAuditLogLevel() == LogLevel.OFF) { + return AuditQueryLogger.NO_OP_INSTANCE; + } + + AuditQueryLogger queryLogger = AuditQueryLogger.getInstance(connection, isSystemTable(stmt)); + QueryLoggerUtil.logInitialDetails(queryLogger, connection.getTenantId(), + connection.getQueryServices(), sql, getParameters()); + return queryLogger; + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations("Execute query: " + sql, connection)); + } + + CompilableStatement stmt = parseStatement(sql); + if (stmt.getOperation().isMutation()) { + throw new ExecuteQueryNotApplicableException(sql); + } + return executeQuery(stmt, createQueryLogger(stmt, sql)); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + CompilableStatement stmt = preExecuteUpdate(sql); + int updateCount = executeMutation(stmt, createAuditQueryLogger(stmt, sql)); + flushIfNecessary(); + return updateCount; + } + + /** + * Executes the given SQL statement similar to JDBC API executeUpdate() but also returns the + * updated or non-updated row as Result object back to the client. This must be used with + * auto-commit Connection. This makes the operation atomic. If the row is successfully updated, + * return the updated row, otherwise if the row cannot be updated, return non-updated row. + * @param sql The SQL DML statement, UPSERT or DELETE for Phoenix. + * @return The pair of int and Tuple, where int represents value 1 for successful row update and 0 + * for non-successful row update, and Tuple represents the state of the row. + * @throws SQLException If the statement cannot be executed. + */ + public Pair executeUpdateReturnRow(String sql) throws SQLException { + if (!connection.getAutoCommit()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.AUTO_COMMIT_NOT_ENABLED).build() + .buildException(); + } + CompilableStatement stmt = preExecuteUpdate(sql); + Pair result = + executeMutation(stmt, createAuditQueryLogger(stmt, sql), ReturnResult.ROW); + flushIfNecessary(); + return result; + } + + private CompilableStatement preExecuteUpdate(String sql) throws SQLException { + CompilableStatement stmt = parseStatement(sql); + if (!stmt.getOperation().isMutation) { + throw new ExecuteUpdateNotApplicableException(sql); + } + if (!batch.isEmpty()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH) + .build().buildException(); + } + return stmt; + } + + private void flushIfNecessary() throws SQLException { + if (connection.getAutoFlush()) { + connection.flush(); + } + } + + @Override + public boolean execute(String sql) throws SQLException { + CompilableStatement stmt = parseStatement(sql); + if (stmt.getOperation().isMutation()) { + if (!batch.isEmpty()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH) + .build().buildException(); + } + executeMutation(stmt, createAuditQueryLogger(stmt, sql)); + flushIfNecessary(); + return false; + } + + executeQuery(stmt, createQueryLogger(stmt, sql)); + return true; + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + return execute(sql); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + return execute(sql); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return execute(sql); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + return executeUpdate(sql); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + return executeUpdate(sql); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + return executeUpdate(sql); + } + + @Override + public PhoenixConnection getConnection() { + return connection; + } + + @Override + public int getFetchDirection() throws SQLException { + return ResultSet.FETCH_FORWARD; + } + + @Override + public int getFetchSize() throws SQLException { + if (fetchSize > 0) { + return fetchSize; + } else { + return connection.getQueryServices().getProps().getInt(QueryServices.SCAN_CACHE_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE); + } + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getMaxFieldSize() throws SQLException { + return 0; // TODO: 4000? + } + + @Override + public int getMaxRows() throws SQLException { + return maxRows; + } + + @Override + public boolean getMoreResults() throws SQLException { + return false; + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + return false; + } + + // For testing + public QueryPlan getQueryPlan() { + return getLastQueryPlan(); + } + + @Override + public ResultSet getResultSet() throws SQLException { + ResultSet rs = getLastResultSet(); + return rs; + } + + @Override + public int getResultSetConcurrency() throws SQLException { + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public int getResultSetHoldability() throws SQLException { + // TODO: not sure this matters + return ResultSet.CLOSE_CURSORS_AT_COMMIT; + } + + @Override + public int getResultSetType() throws SQLException { + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public Operation getUpdateOperation() { + return getLastUpdateOperation(); + } + + @Override + public int getUpdateCount() throws SQLException { + int updateCount = getLastUpdateCount(); + // Only first call can get the update count, otherwise + // some SQL clients get into an infinite loop when an + // update occurs. + setLastUpdateCount(NO_UPDATE); + return updateCount; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return null; + } + + @Override + public boolean isClosed() throws SQLException { + return isClosed; + } + + @Override + public boolean isPoolable() throws SQLException { + return false; + } + + @Override + public void setCursorName(String name) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + // TODO: any escaping we need to do? + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + if (direction != ResultSet.FETCH_FORWARD) { + throw new SQLFeatureNotSupportedException(); + } + } + + @Override + public void setFetchSize(int fetchSize) throws SQLException { + // TODO: map to Scan.setBatch() ? + this.fetchSize = fetchSize; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setMaxRows(int max) throws SQLException { + this.maxRows = max; + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + if (poolable) { + throw new SQLFeatureNotSupportedException(); + } + } + + @Override + /** + * When setting the query timeout via JDBC timeouts must be expressed in seconds. Therefore we + * need to convert the default timeout to milliseconds for internal use. + */ + public void setQueryTimeout(int seconds) throws SQLException { + if (seconds < 0) { + this.queryTimeoutMillis = getDefaultQueryTimeoutMillis(); + } else if (seconds == 0) { + this.queryTimeoutMillis = Integer.MAX_VALUE; + } else { + this.queryTimeoutMillis = seconds * 1000; + } + } + + @Override + /** + * When getting the query timeout via JDBC timeouts must be expressed in seconds. Therefore we + * need to convert the default millisecond timeout to seconds. + */ + public int getQueryTimeout() throws SQLException { + // Convert milliseconds to seconds by taking the CEIL up to the next second + int scaledValue; + try { + scaledValue = IntMath.checkedAdd(queryTimeoutMillis, 999); + } catch (ArithmeticException e) { + scaledValue = Integer.MAX_VALUE; + } + return scaledValue / 1000; + } + + /** + * Returns the configured timeout in milliseconds. This internally enables the of use millisecond + * timeout granularity and honors the exact value configured by phoenix.query.timeoutMs. + */ + public int getQueryTimeoutInMillis() { + return queryTimeoutMillis; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isInstance(this); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (!iface.isInstance(this)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CLASS_NOT_UNWRAPPABLE) + .setMessage(this.getClass().getName() + " not unwrappable from " + iface.getName()).build() + .buildException(); + } + return (T) this; + } + + @Override + public void closeOnCompletion() throws SQLException { + closeOnCompletion = true; + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return closeOnCompletion; + } + + private PhoenixResultSet getLastResultSet() { + return lastResultSet; + } + + void setLastResultSet(PhoenixResultSet lastResultSet) { + this.lastResultSet = lastResultSet; + } + + private int getLastUpdateCount() { + return lastUpdateCount; + } + + private void setLastUpdateCount(int lastUpdateCount) { + this.lastUpdateCount = lastUpdateCount; + } + + private String getLastUpdateTable() { + return lastUpdateTable; + } + + private void setLastUpdateTable(String lastUpdateTable) { + if (!Strings.isNullOrEmpty(lastUpdateTable)) { + this.lastUpdateTable = lastUpdateTable; + } + if ( + getConnection().getActivityLogger().isLevelEnabled(ActivityLogInfo.TABLE_NAME.getLogLevel()) + ) { + updateActivityOnConnection(ActivityLogInfo.TABLE_NAME, this.lastUpdateTable); + } + } + + private Operation getLastUpdateOperation() { + return lastUpdateOperation; + } + + private void setLastUpdateOperation(Operation lastUpdateOperation) { + this.lastUpdateOperation = lastUpdateOperation; + if (getConnection().getActivityLogger().isLevelEnabled(ActivityLogInfo.OP_NAME.getLogLevel())) { + updateActivityOnConnection(ActivityLogInfo.OP_NAME, this.lastUpdateOperation.toString()); + } + if (getConnection().getActivityLogger().isLevelEnabled(ActivityLogInfo.OP_TIME.getLogLevel())) { + updateActivityOnConnection(ActivityLogInfo.OP_TIME, + String.valueOf(EnvironmentEdgeManager.currentTimeMillis())); + } + } + + private QueryPlan getLastQueryPlan() { + return lastQueryPlan; + } + + private void setLastQueryPlan(QueryPlan lastQueryPlan) { + this.lastQueryPlan = lastQueryPlan; + + } + + private void updateActivityOnConnection(ActivityLogInfo item, String value) { + getConnection().getActivityLogger().log(item, value); + } + + private void throwIfUnallowedUserDefinedFunctions(Map udfParseNodes) + throws SQLException { + if ( + !connection.getQueryServices().getProps().getBoolean( + QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB, + QueryServicesOptions.DEFAULT_ALLOW_USER_DEFINED_FUNCTIONS) + ) { + if (udfParseNodes.isEmpty()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_USER_DEFINED_FUNCTIONS) + .build().buildException(); + } + throw new FunctionNotFoundException(udfParseNodes.keySet().toString()); + } + } + + /** + * Check if the statement is a DDL and if there are any uncommitted mutations Throw or log the + * message + */ + private void checkIfDDLStatementandMutationState(final CompilableStatement stmt, + MutationState state) throws SQLException { + boolean throwUncommittedMutation = connection.getQueryServices().getProps().getBoolean( + QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, + QueryServicesOptions.DEFAULT_PENDING_MUTATIONS_DDL_THROW); + if ( + stmt instanceof MutableStatement && !(stmt instanceof DMLStatement) && state.getNumRows() > 0 + ) { + if (throwUncommittedMutation) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS).build().buildException(); + } else { + LOGGER.warn( + "There are Uncommitted mutations, which will be dropped on the execution of this DDL statement."); + } + } + } + + public Calendar getLocalCalendar() { + return localCalendar; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatementFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatementFactory.java index b1047d8e766..c444e962f97 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatementFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatementFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,5 +18,5 @@ package org.apache.phoenix.jdbc; public interface PhoenixStatementFactory { - public PhoenixStatement newStatement(PhoenixConnection connection); + public PhoenixStatement newStatement(PhoenixConnection connection); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/RPCConnectionInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/RPCConnectionInfo.java index 1744fbfd929..abad4f41749 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/RPCConnectionInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/RPCConnectionInfo.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,166 +29,164 @@ /** * ConnectionInfo class for org.apache.hadoop.hbase.client.RpcConnectionRegistry - * * @since 138 */ public class RPCConnectionInfo extends AbstractRPCConnectionInfo { - // We may be on an older HBase version, which does not even have RpcConnectionRegistry - private static final String BOOTSTRAP_NODES = "hbase.client.bootstrap.servers"; - private static final String RPC_REGISTRY_CLASS_NAME = - "org.apache.hadoop.hbase.client.RpcConnectionRegistry"; - - protected RPCConnectionInfo(boolean isConnectionless, String principal, String keytab, - User user, String haGroup, String bootstrapServers, ConnectionType connectionType) { - super(isConnectionless, principal, keytab, user, haGroup, connectionType); - this.bootstrapServers = bootstrapServers; + // We may be on an older HBase version, which does not even have RpcConnectionRegistry + private static final String BOOTSTRAP_NODES = "hbase.client.bootstrap.servers"; + private static final String RPC_REGISTRY_CLASS_NAME = + "org.apache.hadoop.hbase.client.RpcConnectionRegistry"; + + protected RPCConnectionInfo(boolean isConnectionless, String principal, String keytab, User user, + String haGroup, String bootstrapServers, ConnectionType connectionType) { + super(isConnectionless, principal, keytab, user, haGroup, connectionType); + this.bootstrapServers = bootstrapServers; + } + + @Override + public ReadOnlyProps asProps() { + if (isConnectionless) { + return ReadOnlyProps.EMPTY_PROPS; } - @Override - public ReadOnlyProps asProps() { - if (isConnectionless) { - return ReadOnlyProps.EMPTY_PROPS; - } - - Map connectionProps = getCommonProps(); + Map connectionProps = getCommonProps(); - connectionProps.put(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - RPC_REGISTRY_CLASS_NAME); + connectionProps.put(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, RPC_REGISTRY_CLASS_NAME); - if (getBoostrapServers() != null) { - // This is already normalized to include ports - connectionProps.put(BOOTSTRAP_NODES, bootstrapServers); - } - - return connectionProps.isEmpty() ? ReadOnlyProps.EMPTY_PROPS - : new ReadOnlyProps(connectionProps.entrySet().iterator()); + if (getBoostrapServers() != null) { + // This is already normalized to include ports + connectionProps.put(BOOTSTRAP_NODES, bootstrapServers); } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((bootstrapServers == null) ? 0 : bootstrapServers.hashCode()); - // Port is already provided in or normalized into bootstrapServers - return result; + return connectionProps.isEmpty() + ? ReadOnlyProps.EMPTY_PROPS + : new ReadOnlyProps(connectionProps.entrySet().iterator()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((bootstrapServers == null) ? 0 : bootstrapServers.hashCode()); + // Port is already provided in or normalized into bootstrapServers + return result; + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; + } + RPCConnectionInfo other = (RPCConnectionInfo) obj; + if (bootstrapServers == null) { + if (other.bootstrapServers != null) { + return false; + } + } else if (!bootstrapServers.equals(other.bootstrapServers)) { + return false; + } + // Port is already provided in or normalized into bootstrapServers + return true; + } + + @Override + public String toUrl() { + return PhoenixRuntime.JDBC_PROTOCOL_RPC + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + toString(); + } + + @Override + public ConnectionInfo withPrincipal(String principal) { + return new RPCConnectionInfo(isConnectionless, principal, keytab, user, haGroup, + bootstrapServers, connectionType); + } + + /** + * Builder parent for RPCConnectionInfo. + */ + protected static class Builder extends AbstractRPCConnectionInfo.Builder { + + public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) + throws SQLException { + super(url, config, props, info); + if (!HAS_RPC_REGISTRY) { + throw getMalFormedUrlException( + "Hbase version does not support Master registry for: " + url); + } } @Override - public boolean equals(Object obj) { - if (!super.equals(obj)) { - return false; + protected void normalize() throws SQLException { + if (hostsList != null && hostsList.isEmpty()) { + hostsList = null; + } + if (portString != null && portString.isEmpty()) { + portString = null; + } + + // We don't have a default port for RPC Connections + // Well, we do if we fall back to Master + boolean noServerListinURL = false; + if (hostsList == null) { + hostsList = getBootstrapServerAddr(); + noServerListinURL = true; + if (hostsList == null) { + // Fall back to MasterRegistry behaviour + normalizeMaster(); + return; + } + } else { + hostsList = hostsList.replaceAll("=", ":"); + } + + isConnectionless = PhoenixRuntime.CONNECTIONLESS.equals(hostsList); + + if (portString != null) { + try { + port = Integer.parseInt(portString); + if (port < 0) { + throw new Exception(); + } + } catch (Exception e) { + throw getMalFormedUrlException(url); } - RPCConnectionInfo other = (RPCConnectionInfo) obj; - if (bootstrapServers == null) { - if (other.bootstrapServers != null) { - return false; - } - } else if (!bootstrapServers.equals(other.bootstrapServers)) { - return false; + } + + if (isConnectionless) { + if (port != null) { + throw getMalFormedUrlException(url); + } else { + return; } - // Port is already provided in or normalized into bootstrapServers - return true; + } + + // RpcConnectionRegistry doesn't have a default port property, be we accept the legacy + // format + // from the URL if both host list and port is provided + if (port != null && !noServerListinURL) { + hostsList = normalizeHostsList(hostsList, port); + } } - @Override - public String toUrl() { - return PhoenixRuntime.JDBC_PROTOCOL_RPC + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR - + toString(); + public String getBootstrapServerAddr() { + String configuredBootstrapNodes = get(BOOTSTRAP_NODES); + if (!Strings.isNullOrEmpty(configuredBootstrapNodes)) { + return configuredBootstrapNodes; + } else { + return null; + } } @Override - public ConnectionInfo withPrincipal(String principal) { - return new RPCConnectionInfo(isConnectionless, principal, keytab, user, - haGroup, bootstrapServers, connectionType); + protected ConnectionInfo build() { + return new RPCConnectionInfo(isConnectionless, principal, keytab, user, haGroup, hostsList, + connectionType); } - /** - * Builder parent for RPCConnectionInfo. - */ - protected static class Builder extends AbstractRPCConnectionInfo.Builder { - - public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) - throws SQLException { - super(url, config, props, info); - if (!HAS_RPC_REGISTRY) { - throw getMalFormedUrlException( - "Hbase version does not support Master registry for: " + url); - } - } - - @Override - protected void normalize() throws SQLException { - if (hostsList != null && hostsList.isEmpty()) { - hostsList = null; - } - if (portString != null && portString.isEmpty()) { - portString = null; - } - - // We don't have a default port for RPC Connections - // Well, we do if we fall back to Master - boolean noServerListinURL = false; - if (hostsList == null) { - hostsList = getBootstrapServerAddr(); - noServerListinURL = true; - if (hostsList == null) { - // Fall back to MasterRegistry behaviour - normalizeMaster(); - return; - } - } else { - hostsList = hostsList.replaceAll("=", ":"); - } - - isConnectionless = PhoenixRuntime.CONNECTIONLESS.equals(hostsList); - - if (portString != null) { - try { - port = Integer.parseInt(portString); - if (port < 0) { - throw new Exception(); - } - } catch (Exception e) { - throw getMalFormedUrlException(url); - } - } - - if (isConnectionless) { - if (port != null) { - throw getMalFormedUrlException(url); - } else { - return; - } - } - - // RpcConnectionRegistry doesn't have a default port property, be we accept the legacy - // format - // from the URL if both host list and port is provided - if (port != null && !noServerListinURL) { - hostsList = normalizeHostsList(hostsList, port); - } - } - - public String getBootstrapServerAddr() { - String configuredBootstrapNodes = get(BOOTSTRAP_NODES); - if (!Strings.isNullOrEmpty(configuredBootstrapNodes)) { - return configuredBootstrapNodes; - } else { - return null; - } - } - - @Override - protected ConnectionInfo build() { - return new RPCConnectionInfo(isConnectionless, principal, keytab, user, haGroup, - hostsList, connectionType); - } - - public static boolean isRPC(Configuration config, ReadOnlyProps props, Properties info) { - // Default is handled by the caller - return config != null && RPC_REGISTRY_CLASS_NAME - .equals(get(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, config, props, info)); - } + public static boolean isRPC(Configuration config, ReadOnlyProps props, Properties info) { + // Default is handled by the caller + return config != null && RPC_REGISTRY_CLASS_NAME + .equals(get(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, config, props, info)); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ZKConnectionInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ZKConnectionInfo.java index 2ebdee7185f..31f865798bf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ZKConnectionInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ZKConnectionInfo.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,322 +29,315 @@ import org.apache.phoenix.util.ReadOnlyProps; /** - * ConnectionInfo class for org.apache.hadoop.hbase.client.ZKConnectionRegistry - * - * This used to be the only supported Registry in Phoenix (and the only one implemented by HBase) - * + * ConnectionInfo class for org.apache.hadoop.hbase.client.ZKConnectionRegistry This used to be the + * only supported Registry in Phoenix (and the only one implemented by HBase) */ public class ZKConnectionInfo extends ConnectionInfo { - public static final String ZK_REGISTRY_NAME = - "org.apache.hadoop.hbase.client.ZKConnectionRegistry"; + public static final String ZK_REGISTRY_NAME = + "org.apache.hadoop.hbase.client.ZKConnectionRegistry"; + + private final Integer zkPort; + private final String zkRootNode; + private final String zkHosts; + + private ZKConnectionInfo(boolean isConnectionless, String principal, String keytab, User user, + String haGroup, String zkHosts, Integer zkPort, String zkRootNode, + ConnectionType connectionType) { + super(isConnectionless, principal, keytab, user, haGroup, connectionType); + this.zkPort = zkPort; + this.zkRootNode = zkRootNode; + this.zkHosts = zkHosts; + } + + public String getZkHosts() { + return zkHosts; + } + + public Integer getZkPort() { + return zkPort; + } + + public String getZkRootNode() { + return zkRootNode; + } + + @Override + public String getZookeeperConnectionString() { + // Normalized form includes ports + return getZkHosts(); + } + + @Override + public ReadOnlyProps asProps() { + if (isConnectionless) { + return ReadOnlyProps.EMPTY_PROPS; + } - private final Integer zkPort; - private final String zkRootNode; - private final String zkHosts; + Map connectionProps = getCommonProps(); + connectionProps.put(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, ZK_REGISTRY_NAME); - private ZKConnectionInfo(boolean isConnectionless, String principal, String keytab, User user, - String haGroup, String zkHosts, Integer zkPort, String zkRootNode, - ConnectionType connectionType) { - super(isConnectionless, principal, keytab, user, haGroup, connectionType); - this.zkPort = zkPort; - this.zkRootNode = zkRootNode; - this.zkHosts = zkHosts; + if (getZkHosts() != null) { + // This has the highest priority + connectionProps.put(HConstants.CLIENT_ZOOKEEPER_QUORUM, getZkHosts()); } - - public String getZkHosts() { - return zkHosts; + // Port is already normalized into zkHosts + if (getZkRootNode() != null) { + connectionProps.put(HConstants.ZOOKEEPER_ZNODE_PARENT, getZkRootNode()); } - - public Integer getZkPort() { - return zkPort; + return connectionProps.isEmpty() + ? ReadOnlyProps.EMPTY_PROPS + : new ReadOnlyProps(connectionProps.entrySet().iterator()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((zkHosts == null) ? 0 : zkHosts.hashCode()); + // Port is already included in zkHosts + result = prime * result + ((zkRootNode == null) ? 0 : zkRootNode.hashCode()); + result = prime * result + super.hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; } - - public String getZkRootNode() { - return zkRootNode; + ZKConnectionInfo other = (ZKConnectionInfo) obj; + if (zkHosts == null) { + if (other.zkHosts != null) { + return false; + } + } else if (!zkHosts.equals(other.zkHosts)) { + return false; } - - @Override - public String getZookeeperConnectionString() { - // Normalized form includes ports - return getZkHosts(); + // Port is already normalized into zkHosts + if (zkRootNode == null) { + if (other.zkRootNode != null) { + return false; + } + } else if (!zkRootNode.equals(other.zkRootNode)) { + return false; } - - @Override - public ReadOnlyProps asProps() { - if (isConnectionless) { - return ReadOnlyProps.EMPTY_PROPS; - } - - Map connectionProps = getCommonProps(); - connectionProps.put(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - ZK_REGISTRY_NAME); - - if (getZkHosts() != null) { - //This has the highest priority - connectionProps.put(HConstants.CLIENT_ZOOKEEPER_QUORUM, getZkHosts()); - } - //Port is already normalized into zkHosts - if (getZkRootNode() != null) { - connectionProps.put(HConstants.ZOOKEEPER_ZNODE_PARENT, getZkRootNode()); - } - return connectionProps.isEmpty() ? ReadOnlyProps.EMPTY_PROPS - : new ReadOnlyProps(connectionProps.entrySet().iterator()); + return true; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(zkHosts.replaceAll(":", "\\\\:")); + if (anyNotNull(zkPort, zkRootNode, principal, keytab)) { + sb.append(zkPort == null ? ":" : ":" + zkPort); } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((zkHosts == null) ? 0 : zkHosts.hashCode()); - //Port is already included in zkHosts - result = prime * result + ((zkRootNode == null) ? 0 : zkRootNode.hashCode()); - result = prime * result + super.hashCode(); - return result; + if (anyNotNull(zkRootNode, principal, keytab)) { + sb.append(zkRootNode == null ? ":" : ":" + zkRootNode); } - - @Override - public boolean equals(Object obj) { - if (!super.equals(obj)) { - return false; - } - ZKConnectionInfo other = (ZKConnectionInfo) obj; - if (zkHosts == null) { - if (other.zkHosts != null) { - return false; - } - } else if (!zkHosts.equals(other.zkHosts)) { - return false; - } - //Port is already normalized into zkHosts - if (zkRootNode == null) { - if (other.zkRootNode != null) { - return false; - } - } else if (!zkRootNode.equals(other.zkRootNode)) { - return false; - } - return true; + if (anyNotNull(principal, keytab)) { + sb.append(principal == null ? ":" : ":" + principal); } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(zkHosts.replaceAll(":", "\\\\:")); - if (anyNotNull(zkPort, zkRootNode, principal, keytab)) { - sb.append(zkPort == null ? ":" : ":" + zkPort); - } - if (anyNotNull(zkRootNode, principal, keytab)) { - sb.append(zkRootNode == null ? ":" : ":" + zkRootNode); - } - if (anyNotNull(principal, keytab)) { - sb.append(principal == null ? ":" : ":" + principal); - } - if (anyNotNull(keytab)) { - sb.append(keytab == null ? ":" : ":" + keytab); - } - return sb.toString(); + if (anyNotNull(keytab)) { + sb.append(keytab == null ? ":" : ":" + keytab); } - - @Override - public String toUrl() { - return PhoenixRuntime.JDBC_PROTOCOL_ZK + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR - + toString(); + return sb.toString(); + } + + @Override + public String toUrl() { + return PhoenixRuntime.JDBC_PROTOCOL_ZK + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + toString(); + } + + @Override + public ConnectionInfo withPrincipal(String principal) { + return new ZKConnectionInfo(isConnectionless, principal, keytab, user, haGroup, zkHosts, zkPort, + zkRootNode, connectionType); + } + + /** + * Builder helper class for ZKConnectionInfo + */ + protected static class Builder extends ConnectionInfo.Builder { + private Integer zkPort; + private String zkRootNode; + private String zkHosts; + + public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) { + super(url, config, props, info); } @Override - public ConnectionInfo withPrincipal(String principal) { - return new ZKConnectionInfo(isConnectionless, principal, keytab, user, - haGroup, zkHosts, zkPort, zkRootNode, connectionType); + protected ConnectionInfo create() throws SQLException { + parse(); + normalize(); + handleKerberosAndLogin(); + setHaGroup(); + return build(); } /** - * Builder helper class for ZKConnectionInfo - * + * Detect url with quorum:1,quorum:2 as HBase does not handle different port numbers for + * different quorum hostnames. */ - protected static class Builder extends ConnectionInfo.Builder { - private Integer zkPort; - private String zkRootNode; - private String zkHosts; - - public Builder(String url, Configuration config, ReadOnlyProps props, Properties info) { - super(url, config, props, info); - } - - @Override - protected ConnectionInfo create() throws SQLException { - parse(); - normalize(); - handleKerberosAndLogin(); - setHaGroup(); - return build(); - } - - /** - * Detect url with quorum:1,quorum:2 as HBase does not handle different port numbers - * for different quorum hostnames. - * @param portStr - * @return - */ - private boolean isMultiPortUrl(String portStr) { - int commaIndex = portStr.indexOf(','); - if (commaIndex > 0) { - try { - Integer.parseInt(portStr.substring(0, commaIndex)); - return true; - } catch (NumberFormatException otherE) { - } - } - return false; + private boolean isMultiPortUrl(String portStr) { + int commaIndex = portStr.indexOf(','); + if (commaIndex > 0) { + try { + Integer.parseInt(portStr.substring(0, commaIndex)); + return true; + } catch (NumberFormatException otherE) { } + } + return false; + } - private void parse() throws SQLException { - StringTokenizer tokenizer = getTokenizerWithoutProtocol(); - int nTokens = 0; - String[] tokens = new String[5]; - String token = null; - boolean wasDelimiter = false; - boolean first = true; - while (tokenizer.hasMoreTokens() && !(token = tokenizer.nextToken()).equals(TERMINATOR) - && nTokens < tokens.length) { - // This would mean we have an empty string for a token which is illegal - if (DELIMITERS.contains(token)) { - if (wasDelimiter && !first) { - tokens[nTokens++] = ""; - } - wasDelimiter = true; - } else { - tokens[nTokens++] = token; - wasDelimiter = false; - } - first = false; - } - // Look-forward to see if the last token is actually the C:\\ path - if (tokenizer.hasMoreTokens() && !TERMINATOR.equals(token)) { - String extraToken = tokenizer.nextToken(); - if (WINDOWS_SEPARATOR_CHAR == extraToken.charAt(0)) { - String prevToken = tokens[nTokens - 1]; - tokens[nTokens - 1] = prevToken + ":" + extraToken; - if (tokenizer.hasMoreTokens() - && !(token = tokenizer.nextToken()).equals(TERMINATOR)) { - throw getMalFormedUrlException(url); - } - } else { - throw getMalFormedUrlException(url); - } - } - int tokenIndex = 0; - if (nTokens > tokenIndex) { - zkHosts = tokens[tokenIndex++]; // Found quorum - if (nTokens > tokenIndex) { - try { - zkPort = Integer.parseInt(tokens[tokenIndex]); - if (zkPort < 0) { - throw getMalFormedUrlException(url); - } - tokenIndex++; // Found port - } catch (NumberFormatException e) { // No port information - if (tokens[tokenIndex].isEmpty()) { - tokenIndex++; // Found empty port - } - if (isMultiPortUrl(tokens[tokenIndex])) { - throw getMalFormedUrlException(url); - } - // Otherwise assume port is simply omitted - } - if (nTokens > tokenIndex) { - if (tokens[tokenIndex].startsWith("/") || tokens[tokenIndex].isEmpty()) { - zkRootNode = tokens[tokenIndex++]; // Found rootNode - } - if (nTokens > tokenIndex) { - principal = tokens[tokenIndex++]; // Found principal - if (nTokens > tokenIndex) { - keytab = tokens[tokenIndex++]; // Found keytabFile - // There's still more after, try to see if it's a windows file path - if (tokenIndex < tokens.length) { - String nextToken = tokens[tokenIndex++]; - // The next token starts with the directory separator, assume - // it's still the keytab path. - if (null != nextToken - && WINDOWS_SEPARATOR_CHAR == nextToken.charAt(0)) { - keytab = keytab + ":" + nextToken; - } - } - } - } - } - } - } + private void parse() throws SQLException { + StringTokenizer tokenizer = getTokenizerWithoutProtocol(); + int nTokens = 0; + String[] tokens = new String[5]; + String token = null; + boolean wasDelimiter = false; + boolean first = true; + while ( + tokenizer.hasMoreTokens() && !(token = tokenizer.nextToken()).equals(TERMINATOR) + && nTokens < tokens.length + ) { + // This would mean we have an empty string for a token which is illegal + if (DELIMITERS.contains(token)) { + if (wasDelimiter && !first) { + tokens[nTokens++] = ""; + } + wasDelimiter = true; + } else { + tokens[nTokens++] = token; + wasDelimiter = false; } - - protected ConnectionInfo build() { - return new ZKConnectionInfo(isConnectionless, principal, keytab, user, haGroup, zkHosts, - zkPort, zkRootNode, connectionType); + first = false; + } + // Look-forward to see if the last token is actually the C:\\ path + if (tokenizer.hasMoreTokens() && !TERMINATOR.equals(token)) { + String extraToken = tokenizer.nextToken(); + if (WINDOWS_SEPARATOR_CHAR == extraToken.charAt(0)) { + String prevToken = tokens[nTokens - 1]; + tokens[nTokens - 1] = prevToken + ":" + extraToken; + if (tokenizer.hasMoreTokens() && !(token = tokenizer.nextToken()).equals(TERMINATOR)) { + throw getMalFormedUrlException(url); + } + } else { + throw getMalFormedUrlException(url); } - - @Override - protected void normalize() throws SQLException { - // Treat empty as null - if (zkHosts != null && zkHosts.isEmpty()) { - zkHosts = null; + } + int tokenIndex = 0; + if (nTokens > tokenIndex) { + zkHosts = tokens[tokenIndex++]; // Found quorum + if (nTokens > tokenIndex) { + try { + zkPort = Integer.parseInt(tokens[tokenIndex]); + if (zkPort < 0) { + throw getMalFormedUrlException(url); } - if (zkRootNode != null && zkRootNode.isEmpty()) { - zkRootNode = null; + tokenIndex++; // Found port + } catch (NumberFormatException e) { // No port information + if (tokens[tokenIndex].isEmpty()) { + tokenIndex++; // Found empty port } - isConnectionless = PhoenixRuntime.CONNECTIONLESS.equals(zkHosts); - - if (isConnectionless) { - if (zkPort != null || zkRootNode != null) { - throw getMalFormedUrlException(url); - } else { - return; - } + if (isMultiPortUrl(tokens[tokenIndex])) { + throw getMalFormedUrlException(url); } - - // Normalize connInfo so that a url explicitly specifying versus implicitly inheriting - // the default values will both share the same ConnectionQueryServices. - if (zkPort == null) { - String zkPortString = get(HConstants.CLIENT_ZOOKEEPER_CLIENT_PORT); - if (zkPortString == null) { - zkPortString = get(HConstants.ZOOKEEPER_CLIENT_PORT); - } - if (zkPortString == null) { - zkPort = HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT; - } else { - zkPort = Integer.parseInt(zkPortString); - } + // Otherwise assume port is simply omitted + } + if (nTokens > tokenIndex) { + if (tokens[tokenIndex].startsWith("/") || tokens[tokenIndex].isEmpty()) { + zkRootNode = tokens[tokenIndex++]; // Found rootNode } - - if (zkHosts == null) { - zkHosts = get(HConstants.CLIENT_ZOOKEEPER_QUORUM); - if (zkHosts == null) { - zkHosts = get(HConstants.ZOOKEEPER_QUORUM); - } - if (zkHosts == null) { - throw getMalFormedUrlException( - "Quorum not specified and hbase.client.zookeeper.quorum is not set" - + " in configuration : " + url); + if (nTokens > tokenIndex) { + principal = tokens[tokenIndex++]; // Found principal + if (nTokens > tokenIndex) { + keytab = tokens[tokenIndex++]; // Found keytabFile + // There's still more after, try to see if it's a windows file path + if (tokenIndex < tokens.length) { + String nextToken = tokens[tokenIndex++]; + // The next token starts with the directory separator, assume + // it's still the keytab path. + if (null != nextToken && WINDOWS_SEPARATOR_CHAR == nextToken.charAt(0)) { + keytab = keytab + ":" + nextToken; + } } - } else { - zkHosts = zkHosts.replaceAll("=", ":"); + } } + } + } + } + } - zkHosts = normalizeHostsList(zkHosts, zkPort); - // normalize out zkPort - zkPort = null; + protected ConnectionInfo build() { + return new ZKConnectionInfo(isConnectionless, principal, keytab, user, haGroup, zkHosts, + zkPort, zkRootNode, connectionType); + } - if (zkRootNode == null) { - zkRootNode = - get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - } + @Override + protected void normalize() throws SQLException { + // Treat empty as null + if (zkHosts != null && zkHosts.isEmpty()) { + zkHosts = null; + } + if (zkRootNode != null && zkRootNode.isEmpty()) { + zkRootNode = null; + } + isConnectionless = PhoenixRuntime.CONNECTIONLESS.equals(zkHosts); + + if (isConnectionless) { + if (zkPort != null || zkRootNode != null) { + throw getMalFormedUrlException(url); + } else { + return; + } + } + + // Normalize connInfo so that a url explicitly specifying versus implicitly inheriting + // the default values will both share the same ConnectionQueryServices. + if (zkPort == null) { + String zkPortString = get(HConstants.CLIENT_ZOOKEEPER_CLIENT_PORT); + if (zkPortString == null) { + zkPortString = get(HConstants.ZOOKEEPER_CLIENT_PORT); } + if (zkPortString == null) { + zkPort = HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT; + } else { + zkPort = Integer.parseInt(zkPortString); + } + } - public static boolean isZK(Configuration config, ReadOnlyProps props, Properties info) { - // Default is handled by the caller - return config != null && ZK_REGISTRY_NAME - .equals(get(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, config, props, info)); + if (zkHosts == null) { + zkHosts = get(HConstants.CLIENT_ZOOKEEPER_QUORUM); + if (zkHosts == null) { + zkHosts = get(HConstants.ZOOKEEPER_QUORUM); } + if (zkHosts == null) { + throw getMalFormedUrlException( + "Quorum not specified and hbase.client.zookeeper.quorum is not set" + + " in configuration : " + url); + } + } else { + zkHosts = zkHosts.replaceAll("=", ":"); + } + + zkHosts = normalizeHostsList(zkHosts, zkPort); + // normalize out zkPort + zkPort = null; + + if (zkRootNode == null) { + zkRootNode = + get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + } + } + + public static boolean isZK(Configuration config, ReadOnlyProps props, Properties info) { + // Default is handled by the caller + return config != null && ZK_REGISTRY_NAME + .equals(get(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, config, props, info)); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/job/AbstractRoundRobinQueue.java b/phoenix-core-client/src/main/java/org/apache/phoenix/job/AbstractRoundRobinQueue.java index 001453923e1..03df3618a67 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/job/AbstractRoundRobinQueue.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/job/AbstractRoundRobinQueue.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,7 @@ */ package org.apache.phoenix.job; +import static org.apache.phoenix.query.QueryServicesOptions.UNLIMITED_QUEUE_SIZE; import java.util.AbstractQueue; import java.util.ArrayList; @@ -31,294 +32,288 @@ import org.apache.phoenix.util.EnvironmentEdgeManager; -import static org.apache.phoenix.query.QueryServicesOptions.UNLIMITED_QUEUE_SIZE; - /** - * * An bounded blocking queue implementation that keeps a virtual queue of elements on per-producer * basis and iterates through each producer queue in round robin fashion. - * */ public abstract class AbstractRoundRobinQueue extends AbstractQueue - implements BlockingQueue{ + implements BlockingQueue { - /** - * Construct an AbstractBlockingRoundRobinQueue that limits the size of the queued elements - * to at most maxSize. Attempts to insert new elements after that point will cause the - * caller to block. - * @param maxSize - */ - public AbstractRoundRobinQueue(int maxSize) { - this(maxSize, false); - } - /** - * @param newProducerToFront If true, new producers go to the front of the round-robin list, if false, they go to the end. - */ - public AbstractRoundRobinQueue(int maxSize, boolean newProducerToFront) { - this.producerMap = new HashMap>(); - this.producerLists = new LinkedList>(); - this.lock = new Object(); - this.newProducerToFront = newProducerToFront; - this.maxSize = maxSize; - } + /** + * Construct an AbstractBlockingRoundRobinQueue that limits the size of the queued elements to at + * most maxSize. Attempts to insert new elements after that point will cause the caller to block. + */ + public AbstractRoundRobinQueue(int maxSize) { + this(maxSize, false); + } - @Override - public Iterator iterator() { - synchronized(lock) { - ArrayList allElements = new ArrayList(this.size); - ListIterator> iter = this.producerLists.listIterator(this.currentProducer); - while(iter.hasNext()) { - ProducerList tList = iter.next(); - allElements.addAll(tList.list); - } - return allElements.iterator(); - } + /** + * @param newProducerToFront If true, new producers go to the front of the round-robin list, if + * false, they go to the end. + */ + public AbstractRoundRobinQueue(int maxSize, boolean newProducerToFront) { + this.producerMap = new HashMap>(); + this.producerLists = new LinkedList>(); + this.lock = new Object(); + this.newProducerToFront = newProducerToFront; + this.maxSize = maxSize; + } + + @Override + public Iterator iterator() { + synchronized (lock) { + ArrayList allElements = new ArrayList(this.size); + ListIterator> iter = this.producerLists.listIterator(this.currentProducer); + while (iter.hasNext()) { + ProducerList tList = iter.next(); + allElements.addAll(tList.list); + } + return allElements.iterator(); } + } - @Override - public boolean offer(E o, long timeout, TimeUnit unit) throws InterruptedException { - boolean taken = false; - long endAt = EnvironmentEdgeManager.currentTimeMillis() + unit.toMillis(timeout); - synchronized(lock) { - long waitTime = endAt - EnvironmentEdgeManager.currentTimeMillis(); - while (!(taken = offer(o)) && waitTime > 0) { - this.lock.wait(waitTime); - waitTime = endAt - EnvironmentEdgeManager.currentTimeMillis(); - } - } - return taken; + @Override + public boolean offer(E o, long timeout, TimeUnit unit) throws InterruptedException { + boolean taken = false; + long endAt = EnvironmentEdgeManager.currentTimeMillis() + unit.toMillis(timeout); + synchronized (lock) { + long waitTime = endAt - EnvironmentEdgeManager.currentTimeMillis(); + while (!(taken = offer(o)) && waitTime > 0) { + this.lock.wait(waitTime); + waitTime = endAt - EnvironmentEdgeManager.currentTimeMillis(); + } } + return taken; + } - @Override - public boolean offer(E o) { - if (o == null) - throw new NullPointerException(); + @Override + public boolean offer(E o) { + if (o == null) throw new NullPointerException(); - final Object producerKey = extractProducer(o); + final Object producerKey = extractProducer(o); - ProducerList producerList = null; - synchronized(lock) { - if (this.maxSize != UNLIMITED_QUEUE_SIZE && this.size == this.maxSize) { - return false; - } - producerList = this.producerMap.get(producerKey); - if (producerList == null) { - producerList = new ProducerList(producerKey); - this.producerMap.put(producerKey, producerList); - this.producerLists.add(this.currentProducer, producerList); - if (!this.newProducerToFront) { - incrementCurrentProducerPointer(); - } - } - producerList.list.add(o); - this.size++; - lock.notifyAll(); + ProducerList producerList = null; + synchronized (lock) { + if (this.maxSize != UNLIMITED_QUEUE_SIZE && this.size == this.maxSize) { + return false; + } + producerList = this.producerMap.get(producerKey); + if (producerList == null) { + producerList = new ProducerList(producerKey); + this.producerMap.put(producerKey, producerList); + this.producerLists.add(this.currentProducer, producerList); + if (!this.newProducerToFront) { + incrementCurrentProducerPointer(); } - return true; + } + producerList.list.add(o); + this.size++; + lock.notifyAll(); } - - /** - * Implementations must extracts the producer object which is used as the key to identify a unique producer. - */ - protected abstract Object extractProducer(E o); + return true; + } - @Override - public void put(E o) { - offer(o); - } + /** + * Implementations must extracts the producer object which is used as the key to identify a unique + * producer. + */ + protected abstract Object extractProducer(E o); - @Override - public E take() throws InterruptedException { - synchronized(lock) { - while (this.size == 0) { - this.lock.wait(); - } - E element = poll(); - assert element != null; - return element; - } + @Override + public void put(E o) { + offer(o); + } + + @Override + public E take() throws InterruptedException { + synchronized (lock) { + while (this.size == 0) { + this.lock.wait(); + } + E element = poll(); + assert element != null; + return element; } + } - @Override - public E poll(long timeout, TimeUnit unit) throws InterruptedException { - long endAt = EnvironmentEdgeManager.currentTimeMillis() + unit.toMillis(timeout); - synchronized(lock) { - long waitTime = endAt - EnvironmentEdgeManager.currentTimeMillis(); - while (this.size == 0 && waitTime > 0) { - this.lock.wait(waitTime); - waitTime = endAt - EnvironmentEdgeManager.currentTimeMillis(); - } - return poll(); - } + @Override + public E poll(long timeout, TimeUnit unit) throws InterruptedException { + long endAt = EnvironmentEdgeManager.currentTimeMillis() + unit.toMillis(timeout); + synchronized (lock) { + long waitTime = endAt - EnvironmentEdgeManager.currentTimeMillis(); + while (this.size == 0 && waitTime > 0) { + this.lock.wait(waitTime); + waitTime = endAt - EnvironmentEdgeManager.currentTimeMillis(); + } + return poll(); } + } - @Override - public E poll() { - synchronized(lock) { - ListIterator> iter = this.producerLists.listIterator(this.currentProducer); - while (iter.hasNext()) { - ProducerList tList = iter.next(); - if (tList.list.isEmpty()) { - iter.remove(); - this.producerMap.remove(tList.producer); - adjustCurrentProducerPointer(); - } else { - E element = tList.list.removeFirst(); - this.size--; - assert element != null; - // This is the round robin part. When we take an element from the current thread's queue - // we move on to the next thread. - if (tList.list.isEmpty()) { - iter.remove(); - this.producerMap.remove(tList.producer); - adjustCurrentProducerPointer(); - } else { - incrementCurrentProducerPointer(); - } - lock.notifyAll(); - return element; - } - } - assert this.size == 0; + @Override + public E poll() { + synchronized (lock) { + ListIterator> iter = this.producerLists.listIterator(this.currentProducer); + while (iter.hasNext()) { + ProducerList tList = iter.next(); + if (tList.list.isEmpty()) { + iter.remove(); + this.producerMap.remove(tList.producer); + adjustCurrentProducerPointer(); + } else { + E element = tList.list.removeFirst(); + this.size--; + assert element != null; + // This is the round robin part. When we take an element from the current thread's queue + // we move on to the next thread. + if (tList.list.isEmpty()) { + iter.remove(); + this.producerMap.remove(tList.producer); + adjustCurrentProducerPointer(); + } else { + incrementCurrentProducerPointer(); + } + lock.notifyAll(); + return element; } - return null; + } + assert this.size == 0; } + return null; + } - /** - * Polls using the given producer key. - */ - protected E pollProducer(Object producer) { - synchronized(lock) { - ProducerList tList = this.producerMap.get(producer); - if (tList != null && !tList.list.isEmpty()) { - E element = tList.list.removeFirst(); - this.size--; - if (tList.list.isEmpty()) { - this.producerLists.remove(tList); - this.producerMap.remove(tList.producer); - // we need to adjust the current thread pointer in case it pointed to this thread list, which is now removed - adjustCurrentProducerPointer(); - } - lock.notifyAll(); - assert element != null; - // Since this is only processing the current thread's work, we'll leave the - // round-robin part alone and just return the work - return element; - } + /** + * Polls using the given producer key. + */ + protected E pollProducer(Object producer) { + synchronized (lock) { + ProducerList tList = this.producerMap.get(producer); + if (tList != null && !tList.list.isEmpty()) { + E element = tList.list.removeFirst(); + this.size--; + if (tList.list.isEmpty()) { + this.producerLists.remove(tList); + this.producerMap.remove(tList.producer); + // we need to adjust the current thread pointer in case it pointed to this thread list, + // which is now removed + adjustCurrentProducerPointer(); } - return null; + lock.notifyAll(); + assert element != null; + // Since this is only processing the current thread's work, we'll leave the + // round-robin part alone and just return the work + return element; + } } + return null; + } - @Override - public E peek() { - synchronized(lock) { - ListIterator> iter = this.producerLists.listIterator(this.currentProducer); - while (iter.hasNext()) { - ProducerList tList = iter.next(); - if (tList.list.isEmpty()) { - iter.remove(); - this.producerMap.remove(tList.producer); - adjustCurrentProducerPointer(); - } else { - E element = tList.list.getFirst(); - assert element != null; - return element; - } - } - assert this.size == 0; + @Override + public E peek() { + synchronized (lock) { + ListIterator> iter = this.producerLists.listIterator(this.currentProducer); + while (iter.hasNext()) { + ProducerList tList = iter.next(); + if (tList.list.isEmpty()) { + iter.remove(); + this.producerMap.remove(tList.producer); + adjustCurrentProducerPointer(); + } else { + E element = tList.list.getFirst(); + assert element != null; + return element; } - return null; + } + assert this.size == 0; } + return null; + } - @Override - public int drainTo(Collection c) { - if (c == null) - throw new NullPointerException(); - if (c == this) - throw new IllegalArgumentException(); + @Override + public int drainTo(Collection c) { + if (c == null) throw new NullPointerException(); + if (c == this) throw new IllegalArgumentException(); - synchronized(this.lock) { - int originalSize = this.size; - int drained = drainTo(c, this.size); - assert drained == originalSize; - assert this.size == 0; - assert this.producerLists.isEmpty(); - assert this.producerMap.isEmpty(); - return drained; - } + synchronized (this.lock) { + int originalSize = this.size; + int drained = drainTo(c, this.size); + assert drained == originalSize; + assert this.size == 0; + assert this.producerLists.isEmpty(); + assert this.producerMap.isEmpty(); + return drained; } + } - @Override - public int drainTo(Collection c, int maxElements) { - if (c == null) - throw new NullPointerException(); - if (c == this) - throw new IllegalArgumentException(); + @Override + public int drainTo(Collection c, int maxElements) { + if (c == null) throw new NullPointerException(); + if (c == this) throw new IllegalArgumentException(); - synchronized(this.lock) { - int i = 0; - while(i < maxElements) { - E element = poll(); - if (element != null) { - c.add(element); - i++; - } else { - break; - } - } - return i; + synchronized (this.lock) { + int i = 0; + while (i < maxElements) { + E element = poll(); + if (element != null) { + c.add(element); + i++; + } else { + break; } + } + return i; } + } - @Override - public int remainingCapacity() { - return Integer.MAX_VALUE; - } + @Override + public int remainingCapacity() { + return Integer.MAX_VALUE; + } - @Override - public int size() { - synchronized(this.lock) { - return this.size; - } + @Override + public int size() { + synchronized (this.lock) { + return this.size; } - - private void incrementCurrentProducerPointer() { - synchronized(lock) { - if (this.producerLists.size() == 0) { - this.currentProducer = 0; - } else { - this.currentProducer = (this.currentProducer+1)%this.producerLists.size(); - } - } + } + + private void incrementCurrentProducerPointer() { + synchronized (lock) { + if (this.producerLists.size() == 0) { + this.currentProducer = 0; + } else { + this.currentProducer = (this.currentProducer + 1) % this.producerLists.size(); + } } - - /** - * Adjusts the current pointer to a decrease in size. - */ - private void adjustCurrentProducerPointer() { - synchronized(lock) { - if (this.producerLists.size() == 0) { - this.currentProducer = 0; - } else { - this.currentProducer = (this.currentProducer)%this.producerLists.size(); - } - } + } + + /** + * Adjusts the current pointer to a decrease in size. + */ + private void adjustCurrentProducerPointer() { + synchronized (lock) { + if (this.producerLists.size() == 0) { + this.currentProducer = 0; + } else { + this.currentProducer = (this.currentProducer) % this.producerLists.size(); + } } + } - private static class ProducerList { - public ProducerList(Object producer) { - this.producer = producer; - this.list = new LinkedList(); - } - private final Object producer; - private final LinkedList list; + private static class ProducerList { + public ProducerList(Object producer) { + this.producer = producer; + this.list = new LinkedList(); } - private final Map> producerMap; - private final LinkedList> producerLists; - private final Object lock; - private final boolean newProducerToFront; - private int currentProducer; - private int size; - private int maxSize; + private final Object producer; + private final LinkedList list; + } + + private final Map> producerMap; + private final LinkedList> producerLists; + private final Object lock; + private final boolean newProducerToFront; + private int currentProducer; + private int size; + private int maxSize; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/job/JobManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/job/JobManager.java index 8801f0f66b6..4e22a2fc090 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/job/JobManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/job/JobManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,266 +39,273 @@ import javax.annotation.Nullable; import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder; - import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.phoenix.util.EnvironmentEdgeManager; /** - * * Thread pool executor that executes scans in parallel - * - * * @since 0.1 */ @SuppressWarnings("rawtypes") public class JobManager extends AbstractRoundRobinQueue { - - private static final AtomicLong PHOENIX_POOL_INDEX = new AtomicLong(1); - - public JobManager(int maxSize) { - super(maxSize, true); // true -> new producers move to front of queue; this reduces latency. - } - @Override - protected Object extractProducer(T o) { - if( o instanceof JobFutureTask){ - return ((JobFutureTask)o).getJobId(); - } - return o; - } + private static final AtomicLong PHOENIX_POOL_INDEX = new AtomicLong(1); - public static interface JobRunnable extends Runnable { - public Object getJobId(); - public TaskExecutionMetricsHolder getTaskExecutionMetric(); + public JobManager(int maxSize) { + super(maxSize, true); // true -> new producers move to front of queue; this reduces latency. + } + + @Override + protected Object extractProducer(T o) { + if (o instanceof JobFutureTask) { + return ((JobFutureTask) o).getJobId(); } + return o; + } - public static ThreadPoolExecutor createThreadPoolExec(int keepAliveMs, int size, int queueSize, boolean useInstrumentedThreadPool) { - BlockingQueue queue; - if (queueSize == 0) { - queue = new SynchronousQueue(); // Specialized for 0 length. - } else { - queue = new JobManager(queueSize); - } - String name = "phoenix-" + PHOENIX_POOL_INDEX.getAndIncrement(); - ThreadFactory threadFactory = new ThreadFactoryBuilder() - .setNameFormat(name + "-thread-%s") - .setDaemon(true) - .setThreadFactory( - new ContextClassLoaderThreadFactory(JobManager.class.getClassLoader())) - .build(); - ThreadPoolExecutor exec; - if (useInstrumentedThreadPool) { - // For thread pool, set core threads = max threads -- we don't ever want to exceed core threads, but want to go up to core threads *before* using the queue. - exec = new InstrumentedThreadPoolExecutor(name, size, size, keepAliveMs, TimeUnit.MILLISECONDS, queue, threadFactory) { - @Override - protected RunnableFuture newTaskFor(Callable call) { - return new InstrumentedJobFutureTask(call); - } - - @Override - protected RunnableFuture newTaskFor(Runnable runnable, T value) { - return new InstrumentedJobFutureTask(runnable, value); - } - }; - } else { - // For thread pool, set core threads = max threads -- we don't ever want to exceed core threads, but want to go up to core threads *before* using the queue. - exec = new ThreadPoolExecutor(size, size, keepAliveMs, TimeUnit.MILLISECONDS, queue, threadFactory) { - @Override - protected RunnableFuture newTaskFor(Callable call) { - // Override this so we can create a JobFutureTask so we can extract out the parentJobId (otherwise, in the default FutureTask, it is private). - return new JobFutureTask(call); - } - - @Override - protected RunnableFuture newTaskFor(Runnable runnable, T value) { - return new JobFutureTask(runnable, value); - } - }; - } - exec.allowCoreThreadTimeOut(true); // ... and allow core threads to time out. This just keeps things clean when idle, and is nice for ftests modes, etc., where we'd especially like these not to linger. - return exec; + public static interface JobRunnable extends Runnable { + public Object getJobId(); + + public TaskExecutionMetricsHolder getTaskExecutionMetric(); + } + + public static ThreadPoolExecutor createThreadPoolExec(int keepAliveMs, int size, int queueSize, + boolean useInstrumentedThreadPool) { + BlockingQueue queue; + if (queueSize == 0) { + queue = new SynchronousQueue(); // Specialized for 0 length. + } else { + queue = new JobManager(queueSize); } + String name = "phoenix-" + PHOENIX_POOL_INDEX.getAndIncrement(); + ThreadFactory threadFactory = + new ThreadFactoryBuilder().setNameFormat(name + "-thread-%s").setDaemon(true) + .setThreadFactory(new ContextClassLoaderThreadFactory(JobManager.class.getClassLoader())) + .build(); + ThreadPoolExecutor exec; + if (useInstrumentedThreadPool) { + // For thread pool, set core threads = max threads -- we don't ever want to exceed core + // threads, but want to go up to core threads *before* using the queue. + exec = new InstrumentedThreadPoolExecutor(name, size, size, keepAliveMs, + TimeUnit.MILLISECONDS, queue, threadFactory) { + @Override + protected RunnableFuture newTaskFor(Callable call) { + return new InstrumentedJobFutureTask(call); + } - /** - * Subclasses FutureTask for the sole purpose of providing {@link #getCallable()}, which is used to extract the producer in the {@link JobBasedRoundRobinQueue} - */ - static class JobFutureTask extends FutureTask { - private final Object jobId; - @Nullable - private final TaskExecutionMetricsHolder taskMetric; - - public JobFutureTask(Runnable r, T t) { - super(r, t); - if(r instanceof JobRunnable){ - this.jobId = ((JobRunnable)r).getJobId(); - this.taskMetric = ((JobRunnable)r).getTaskExecutionMetric(); - } else { - this.jobId = this; - this.taskMetric = null; - } + @Override + protected RunnableFuture newTaskFor(Runnable runnable, T value) { + return new InstrumentedJobFutureTask(runnable, value); } - - public JobFutureTask(Callable c) { - super(c); - // FIXME: this fails when executor used by hbase - if (c instanceof JobCallable) { - this.jobId = ((JobCallable) c).getJobId(); - this.taskMetric = ((JobCallable) c).getTaskExecutionMetric(); - } else { - this.jobId = this; - this.taskMetric = null; - } + }; + } else { + // For thread pool, set core threads = max threads -- we don't ever want to exceed core + // threads, but want to go up to core threads *before* using the queue. + exec = new ThreadPoolExecutor(size, size, keepAliveMs, TimeUnit.MILLISECONDS, queue, + threadFactory) { + @Override + protected RunnableFuture newTaskFor(Callable call) { + // Override this so we can create a JobFutureTask so we can extract out the parentJobId + // (otherwise, in the default FutureTask, it is private). + return new JobFutureTask(call); } - - public Object getJobId() { - return jobId; + + @Override + protected RunnableFuture newTaskFor(Runnable runnable, T value) { + return new JobFutureTask(runnable, value); } + }; + } + exec.allowCoreThreadTimeOut(true); // ... and allow core threads to time out. This just keeps + // things clean when idle, and is nice for ftests modes, + // etc., where we'd especially like these not to linger. + return exec; + } + + /** + * Subclasses FutureTask for the sole purpose of providing {@link #getCallable()}, which is used + * to extract the producer in the {@link JobBasedRoundRobinQueue} + */ + static class JobFutureTask extends FutureTask { + private final Object jobId; + @Nullable + private final TaskExecutionMetricsHolder taskMetric; + + public JobFutureTask(Runnable r, T t) { + super(r, t); + if (r instanceof JobRunnable) { + this.jobId = ((JobRunnable) r).getJobId(); + this.taskMetric = ((JobRunnable) r).getTaskExecutionMetric(); + } else { + this.jobId = this; + this.taskMetric = null; + } + } + + public JobFutureTask(Callable c) { + super(c); + // FIXME: this fails when executor used by hbase + if (c instanceof JobCallable) { + this.jobId = ((JobCallable) c).getJobId(); + this.taskMetric = ((JobCallable) c).getTaskExecutionMetric(); + } else { + this.jobId = this; + this.taskMetric = null; + } + } + + public Object getJobId() { + return jobId; } - - /** - * Instrumented version of {@link JobFutureTask} that measures time spent by a task at various stages in the queue - * and when executed. + } + + /** + * Instrumented version of {@link JobFutureTask} that measures time spent by a task at various + * stages in the queue and when executed. + */ + private static class InstrumentedJobFutureTask extends JobFutureTask { + + /* + * Time at which the task was submitted to the executor. */ - private static class InstrumentedJobFutureTask extends JobFutureTask { + private final long taskSubmissionTime; - /* - * Time at which the task was submitted to the executor. - */ - private final long taskSubmissionTime; + // Time at which the task is about to be executed. + private long taskExecutionStartTime; - // Time at which the task is about to be executed. - private long taskExecutionStartTime; + public InstrumentedJobFutureTask(Runnable r, T t) { + super(r, t); + this.taskSubmissionTime = EnvironmentEdgeManager.currentTimeMillis(); + } - public InstrumentedJobFutureTask(Runnable r, T t) { - super(r, t); - this.taskSubmissionTime = EnvironmentEdgeManager.currentTimeMillis(); - } + public InstrumentedJobFutureTask(Callable c) { + super(c); + this.taskSubmissionTime = EnvironmentEdgeManager.currentTimeMillis(); + } - public InstrumentedJobFutureTask(Callable c) { - super(c); - this.taskSubmissionTime = EnvironmentEdgeManager.currentTimeMillis(); - } - - @Override - public void run() { - this.taskExecutionStartTime = EnvironmentEdgeManager.currentTimeMillis(); - super.run(); - } - - public long getTaskSubmissionTime() { - return taskSubmissionTime; - } - - public long getTaskExecutionStartTime() { - return taskExecutionStartTime; - } + @Override + public void run() { + this.taskExecutionStartTime = EnvironmentEdgeManager.currentTimeMillis(); + super.run(); + } + public long getTaskSubmissionTime() { + return taskSubmissionTime; } - - /** - * Delegating callable implementation that preserves the parentJobId and sets up thread tracker stuff before delegating to the actual command. - */ - public static interface JobCallable extends Callable { - public Object getJobId(); - public TaskExecutionMetricsHolder getTaskExecutionMetric(); + + public long getTaskExecutionStartTime() { + return taskExecutionStartTime; } + } - /** - * Extension of the default thread factory returned by {@code Executors.defaultThreadFactory} - * that sets the context classloader on newly-created threads to be a specific classloader (and - * not the context classloader of the calling thread). - *

- * See {@link org.apache.phoenix.util.PhoenixContextExecutor} for the rationale on changing - * the context classloader. - */ - static class ContextClassLoaderThreadFactory implements ThreadFactory { - private final ThreadFactory baseFactory; - private final ClassLoader contextClassLoader; + /** + * Delegating callable implementation that preserves the parentJobId and sets up thread tracker + * stuff before delegating to the actual command. + */ + public static interface JobCallable extends Callable { + public Object getJobId(); - public ContextClassLoaderThreadFactory(ClassLoader contextClassLoader) { - baseFactory = Executors.defaultThreadFactory(); - this.contextClassLoader = contextClassLoader; - } + public TaskExecutionMetricsHolder getTaskExecutionMetric(); + } - @Override - public Thread newThread(Runnable r) { - Thread t = baseFactory.newThread(r); - t.setContextClassLoader(contextClassLoader); - return t; - } + /** + * Extension of the default thread factory returned by {@code Executors.defaultThreadFactory} that + * sets the context classloader on newly-created threads to be a specific classloader (and not the + * context classloader of the calling thread). + *

+ * See {@link org.apache.phoenix.util.PhoenixContextExecutor} for the rationale on changing the + * context classloader. + */ + static class ContextClassLoaderThreadFactory implements ThreadFactory { + private final ThreadFactory baseFactory; + private final ClassLoader contextClassLoader; + + public ContextClassLoaderThreadFactory(ClassLoader contextClassLoader) { + baseFactory = Executors.defaultThreadFactory(); + this.contextClassLoader = contextClassLoader; } - - /** - * Thread pool executor that instruments the various characteristics of the backing pool of threads and queue. This - * executor assumes that all the tasks handled are of type {@link JobManager.InstrumentedJobFutureTask} - */ - private static class InstrumentedThreadPoolExecutor extends ThreadPoolExecutor { - - private final RejectedExecutionHandler rejectedExecHandler = new RejectedExecutionHandler() { - @Override - public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { - TaskExecutionMetricsHolder metrics = getRequestMetric(r); - if (metrics != null) { - metrics.getNumRejectedTasks().increment(); - } - GLOBAL_REJECTED_TASK_COUNTER.increment(); - throw new RejectedExecutionException("Task " + r.toString() + " rejected from " + executor.toString()); - } - }; - - public InstrumentedThreadPoolExecutor(String threadPoolName, int corePoolSize, int maximumPoolSize, - long keepAliveTime, TimeUnit unit, BlockingQueue workQueue, ThreadFactory threadFactory) { - super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory); - setRejectedExecutionHandler(rejectedExecHandler); - } - @Override - public void execute(Runnable task) { - TaskExecutionMetricsHolder metrics = getRequestMetric(task); - if (metrics != null) { - metrics.getNumTasks().increment(); - } - GLOBAL_TASK_EXECUTED_COUNTER.increment(); - super.execute(task); - } + @Override + public Thread newThread(Runnable r) { + Thread t = baseFactory.newThread(r); + t.setContextClassLoader(contextClassLoader); + return t; + } + } - @Override - protected void beforeExecute(Thread worker, Runnable task) { - InstrumentedJobFutureTask instrumentedTask = (InstrumentedJobFutureTask)task; - long queueWaitTime = EnvironmentEdgeManager.currentTimeMillis() - - instrumentedTask.getTaskSubmissionTime(); - GLOBAL_TASK_QUEUE_WAIT_TIME.update(queueWaitTime); - TaskExecutionMetricsHolder metrics = getRequestMetric(task); - if (metrics != null) { - metrics.getTaskQueueWaitTime().change(queueWaitTime); - } - super.beforeExecute(worker, instrumentedTask); - } + /** + * Thread pool executor that instruments the various characteristics of the backing pool of + * threads and queue. This executor assumes that all the tasks handled are of type + * {@link JobManager.InstrumentedJobFutureTask} + */ + private static class InstrumentedThreadPoolExecutor extends ThreadPoolExecutor { - @Override - protected void afterExecute(Runnable task, Throwable t) { - InstrumentedJobFutureTask instrumentedTask = (InstrumentedJobFutureTask)task; - try { - super.afterExecute(instrumentedTask, t); - } finally { - long taskExecutionTime = EnvironmentEdgeManager.currentTimeMillis() - - instrumentedTask.getTaskExecutionStartTime(); - long endToEndTaskTime = EnvironmentEdgeManager.currentTimeMillis() - - instrumentedTask.getTaskSubmissionTime(); - TaskExecutionMetricsHolder metrics = getRequestMetric(task); - if (metrics != null) { - metrics.getTaskExecutionTime().change(taskExecutionTime); - metrics.getTaskEndToEndTime().change(endToEndTaskTime); - } - GLOBAL_TASK_EXECUTION_TIME.update(taskExecutionTime); - GLOBAL_TASK_END_TO_END_TIME.update(endToEndTaskTime); - } + private final RejectedExecutionHandler rejectedExecHandler = new RejectedExecutionHandler() { + @Override + public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { + TaskExecutionMetricsHolder metrics = getRequestMetric(r); + if (metrics != null) { + metrics.getNumRejectedTasks().increment(); } + GLOBAL_REJECTED_TASK_COUNTER.increment(); + throw new RejectedExecutionException( + "Task " + r.toString() + " rejected from " + executor.toString()); + } + }; + + public InstrumentedThreadPoolExecutor(String threadPoolName, int corePoolSize, + int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue workQueue, + ThreadFactory threadFactory) { + super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory); + setRejectedExecutionHandler(rejectedExecHandler); + } - private static TaskExecutionMetricsHolder getRequestMetric(Runnable task) { - return ((JobFutureTask)task).taskMetric; + @Override + public void execute(Runnable task) { + TaskExecutionMetricsHolder metrics = getRequestMetric(task); + if (metrics != null) { + metrics.getNumTasks().increment(); + } + GLOBAL_TASK_EXECUTED_COUNTER.increment(); + super.execute(task); + } + + @Override + protected void beforeExecute(Thread worker, Runnable task) { + InstrumentedJobFutureTask instrumentedTask = (InstrumentedJobFutureTask) task; + long queueWaitTime = + EnvironmentEdgeManager.currentTimeMillis() - instrumentedTask.getTaskSubmissionTime(); + GLOBAL_TASK_QUEUE_WAIT_TIME.update(queueWaitTime); + TaskExecutionMetricsHolder metrics = getRequestMetric(task); + if (metrics != null) { + metrics.getTaskQueueWaitTime().change(queueWaitTime); + } + super.beforeExecute(worker, instrumentedTask); + } + + @Override + protected void afterExecute(Runnable task, Throwable t) { + InstrumentedJobFutureTask instrumentedTask = (InstrumentedJobFutureTask) task; + try { + super.afterExecute(instrumentedTask, t); + } finally { + long taskExecutionTime = + EnvironmentEdgeManager.currentTimeMillis() - instrumentedTask.getTaskExecutionStartTime(); + long endToEndTaskTime = + EnvironmentEdgeManager.currentTimeMillis() - instrumentedTask.getTaskSubmissionTime(); + TaskExecutionMetricsHolder metrics = getRequestMetric(task); + if (metrics != null) { + metrics.getTaskExecutionTime().change(taskExecutionTime); + metrics.getTaskEndToEndTime().change(endToEndTaskTime); } + GLOBAL_TASK_EXECUTION_TIME.update(taskExecutionTime); + GLOBAL_TASK_END_TO_END_TIME.update(endToEndTaskTime); + } } -} + private static TaskExecutionMetricsHolder getRequestMetric(Runnable task) { + return ((JobFutureTask) task).taskMetric; + } + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheClient.java index 9a5fb6eb798..39ed76bc527 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheClient.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheClient.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,189 +40,176 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.TrustedByteArrayOutputStream; import org.apache.phoenix.util.TupleUtil; import org.iq80.snappy.Snappy; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** - * * Client for adding cache of one side of a join to region servers - * - * * @since 0.1 */ -public class HashCacheClient { - private final ServerCacheClient serverCache; +public class HashCacheClient { + private final ServerCacheClient serverCache; - /** - * Construct client used to create a serialized cached snapshot of a table and send it to each region server - * for caching during hash join processing. - * @param connection the client connection - */ - public HashCacheClient(PhoenixConnection connection) { - serverCache = new ServerCacheClient(connection); - } + /** + * Construct client used to create a serialized cached snapshot of a table and send it to each + * region server for caching during hash join processing. + * @param connection the client connection + */ + public HashCacheClient(PhoenixConnection connection) { + serverCache = new ServerCacheClient(connection); + } - /** - * Creates a ServerCache object for cacheId. This is used for persistent cache, and there may or may not - * be corresponding data on each region server. - * @param cacheId ID for the cache entry - * @param delegate the query plan this will be used for - * @return client-side {@link ServerCache} representing the hash cache that may or may not be present on region servers. - * @throws SQLException - * size - */ - public ServerCache createServerCache(final byte[] cacheId, QueryPlan delegate) - throws SQLException, IOException { - return serverCache.createServerCache(cacheId, delegate); - } + /** + * Creates a ServerCache object for cacheId. This is used for persistent cache, and there may or + * may not be corresponding data on each region server. + * @param cacheId ID for the cache entry + * @param delegate the query plan this will be used for + * @return client-side {@link ServerCache} representing the hash cache that may or may not be + * present on region servers. size + */ + public ServerCache createServerCache(final byte[] cacheId, QueryPlan delegate) + throws SQLException, IOException { + return serverCache.createServerCache(cacheId, delegate); + } + /** + * Send the results of scanning through the scanner to all region servers for regions of the table + * that will use the cache that intersect with the minMaxKeyRange. + * @return client-side {@link ServerCache} representing the added hash cache + * @throws MaxServerCacheSizeExceededException if size of hash cache exceeds max allowed size + */ + public ServerCache addHashCache(ScanRanges keyRanges, byte[] cacheId, ResultIterator iterator, + long estimatedSize, List onExpressions, boolean singleValueOnly, + boolean usePersistentCache, PTable cacheUsingTable, Expression keyRangeRhsExpression, + List keyRangeRhsValues) throws SQLException { /** - * Send the results of scanning through the scanner to all - * region servers for regions of the table that will use the cache - * that intersect with the minMaxKeyRange. - * @param keyRanges - * @param cacheId - * @param iterator - * @param estimatedSize - * @param onExpressions - * @param singleValueOnly - * @param usePersistentCache - * @param cacheUsingTable - * @param keyRangeRhsExpression - * @param keyRangeRhsValues - * @return client-side {@link ServerCache} representing the added hash cache - * @throws SQLException - * @throws MaxServerCacheSizeExceededException if size of hash cache exceeds max allowed - * size + * Serialize and compress hashCacheTable */ - public ServerCache addHashCache( - ScanRanges keyRanges, byte[] cacheId, ResultIterator iterator, long estimatedSize, List onExpressions, - boolean singleValueOnly, boolean usePersistentCache, PTable cacheUsingTable, Expression keyRangeRhsExpression, - List keyRangeRhsValues) throws SQLException { - /** - * Serialize and compress hashCacheTable - */ - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - serialize(ptr, iterator, estimatedSize, onExpressions, singleValueOnly, keyRangeRhsExpression, keyRangeRhsValues); - ServerCache cache = serverCache.addServerCache(keyRanges, cacheId, ptr, ByteUtil.EMPTY_BYTE_ARRAY, new HashCacheFactory(), cacheUsingTable, usePersistentCache, true); - return cache; + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + serialize(ptr, iterator, estimatedSize, onExpressions, singleValueOnly, keyRangeRhsExpression, + keyRangeRhsValues); + ServerCache cache = serverCache.addServerCache(keyRanges, cacheId, ptr, + ByteUtil.EMPTY_BYTE_ARRAY, new HashCacheFactory(), cacheUsingTable, usePersistentCache, true); + return cache; + } + + /** + * Should only be used to resend the hash table cache to the regionserver. + * @param startkeyOfRegion start key of any region hosted on a regionserver which needs hash cache + * @param cache The cache which needs to be sent + */ + public boolean addHashCacheToServer(byte[] startkeyOfRegion, ServerCache cache, PTable pTable) + throws Exception { + if (cache == null) { + return false; } - - /** - * Should only be used to resend the hash table cache to the regionserver. - * - * @param startkeyOfRegion start key of any region hosted on a regionserver which needs hash cache - * @param cache The cache which needs to be sent - * @param pTable - * @return - * @throws Exception - */ - public boolean addHashCacheToServer(byte[] startkeyOfRegion, ServerCache cache, PTable pTable) throws Exception{ - if (cache == null) { return false; } - return serverCache.addServerCache(startkeyOfRegion, cache, new HashCacheFactory(), ByteUtil.EMPTY_BYTE_ARRAY, pTable); + return serverCache.addServerCache(startkeyOfRegion, cache, new HashCacheFactory(), + ByteUtil.EMPTY_BYTE_ARRAY, pTable); + } + + private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, + List onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression, + List keyRangeRhsValues) throws SQLException { + long maxSize = serverCache.getConnection().getQueryServices().getProps().getLongBytes( + QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE); + estimatedSize = Math.min(estimatedSize, maxSize); + if (estimatedSize > Integer.MAX_VALUE) { + throw new IllegalStateException("Estimated size(" + estimatedSize + + ") must not be greater than Integer.MAX_VALUE(" + Integer.MAX_VALUE + ")"); } - - private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, List onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression, List keyRangeRhsValues) throws SQLException { - long maxSize = serverCache.getConnection().getQueryServices().getProps() - .getLongBytes(QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE); - estimatedSize = Math.min(estimatedSize, maxSize); - if (estimatedSize > Integer.MAX_VALUE) { - throw new IllegalStateException("Estimated size(" + estimatedSize + ") must not be greater than Integer.MAX_VALUE(" + Integer.MAX_VALUE + ")"); + try { + TrustedByteArrayOutputStream baOut = new TrustedByteArrayOutputStream((int) estimatedSize); + DataOutputStream out = new DataOutputStream(baOut); + // Write onExpressions first, for hash key evaluation along with deserialization + out.writeInt(onExpressions.size()); + for (Expression expression : onExpressions) { + WritableUtils.writeVInt(out, ExpressionType.valueOf(expression).ordinal()); + expression.write(out); + } + int exprSize = baOut.size() + Bytes.SIZEOF_INT; + out.writeInt(exprSize * (singleValueOnly ? -1 : 1)); + int nRows = 0; + out.writeInt(nRows); // In the end will be replaced with total number of rows + ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); + for (Tuple result = iterator.next(); result != null; result = iterator.next()) { + TupleUtil.write(result, out); + if (baOut.size() > maxSize) { + throw new MaxServerCacheSizeExceededException("Size of hash cache (" + baOut.size() + + " bytes) exceeds the maximum allowed size (" + maxSize + " bytes)"); } - try { - TrustedByteArrayOutputStream baOut = new TrustedByteArrayOutputStream((int)estimatedSize); - DataOutputStream out = new DataOutputStream(baOut); - // Write onExpressions first, for hash key evaluation along with deserialization - out.writeInt(onExpressions.size()); - for (Expression expression : onExpressions) { - WritableUtils.writeVInt(out, ExpressionType.valueOf(expression).ordinal()); - expression.write(out); - } - int exprSize = baOut.size() + Bytes.SIZEOF_INT; - out.writeInt(exprSize * (singleValueOnly ? -1 : 1)); - int nRows = 0; - out.writeInt(nRows); // In the end will be replaced with total number of rows - ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); - for (Tuple result = iterator.next(); result != null; result = iterator.next()) { - TupleUtil.write(result, out); - if (baOut.size() > maxSize) { - throw new MaxServerCacheSizeExceededException("Size of hash cache (" + baOut.size() + " bytes) exceeds the maximum allowed size (" + maxSize + " bytes)"); - } - // Evaluate key expressions for hash join key range optimization. - if (keyRangeRhsExpression != null) { - keyRangeRhsValues.add(evaluateKeyExpression(keyRangeRhsExpression, result, tempPtr)); - } - nRows++; - } - TrustedByteArrayOutputStream sizeOut = new TrustedByteArrayOutputStream(Bytes.SIZEOF_INT); - DataOutputStream dataOut = new DataOutputStream(sizeOut); - try { - dataOut.writeInt(nRows); - dataOut.flush(); - byte[] cache = baOut.getBuffer(); - // Replace number of rows written above with the correct value. - System.arraycopy(sizeOut.getBuffer(), 0, cache, exprSize, sizeOut.size()); - // Reallocate to actual size plus compressed buffer size (which is allocated below) - int maxCompressedSize = Snappy.maxCompressedLength(baOut.size()); - byte[] compressed = new byte[maxCompressedSize]; // size for worst case - int compressedSize = Snappy.compress(baOut.getBuffer(), 0, baOut.size(), compressed, 0); - // Last realloc to size of compressed buffer. - ptr.set(compressed,0,compressedSize); - } finally { - dataOut.close(); - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } finally { - iterator.close(); + // Evaluate key expressions for hash join key range optimization. + if (keyRangeRhsExpression != null) { + keyRangeRhsValues.add(evaluateKeyExpression(keyRangeRhsExpression, result, tempPtr)); } + nRows++; + } + TrustedByteArrayOutputStream sizeOut = new TrustedByteArrayOutputStream(Bytes.SIZEOF_INT); + DataOutputStream dataOut = new DataOutputStream(sizeOut); + try { + dataOut.writeInt(nRows); + dataOut.flush(); + byte[] cache = baOut.getBuffer(); + // Replace number of rows written above with the correct value. + System.arraycopy(sizeOut.getBuffer(), 0, cache, exprSize, sizeOut.size()); + // Reallocate to actual size plus compressed buffer size (which is allocated below) + int maxCompressedSize = Snappy.maxCompressedLength(baOut.size()); + byte[] compressed = new byte[maxCompressedSize]; // size for worst case + int compressedSize = Snappy.compress(baOut.getBuffer(), 0, baOut.size(), compressed, 0); + // Last realloc to size of compressed buffer. + ptr.set(compressed, 0, compressedSize); + } finally { + dataOut.close(); + } + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } finally { + iterator.close(); } - - /** - * Evaluate the RHS key expression and wrap the result as a new Expression. - * Unlike other types of Expression which will be evaluated and wrapped as a - * single LiteralExpression, RowValueConstructorExpression should be handled - * differently. We should evaluate each child of RVC and wrap them into a new - * RVC Expression, in order to make sure that the later coercion between the - * LHS key expression and this RHS key expression will be successful. - * - * @param keyExpression the RHS key expression - * @param tuple the input tuple - * @param ptr the temporary pointer - * @return the Expression containing the evaluated result - * @throws SQLException - */ - public static Expression evaluateKeyExpression(Expression keyExpression, Tuple tuple, ImmutableBytesWritable ptr) throws SQLException { - if (!(keyExpression instanceof RowValueConstructorExpression)) { - PDataType type = keyExpression.getDataType(); - keyExpression.reset(); - if (keyExpression.evaluate(tuple, ptr)) { - return LiteralExpression.newConstant(type.toObject(ptr, keyExpression.getSortOrder()), type); - } - - return LiteralExpression.newConstant(null, type); - } - - List children = keyExpression.getChildren(); - List values = Lists.newArrayListWithExpectedSize(children.size()); - for (Expression child : children) { - PDataType type = child.getDataType(); - child.reset(); - if (child.evaluate(tuple, ptr)) { - values.add(LiteralExpression.newConstant(type.toObject(ptr, child.getSortOrder()), type)); - } else { - values.add(LiteralExpression.newConstant(null, type)); - } - } - // The early evaluation of this constant expression is not necessary, for it - // might be coerced later. - return new RowValueConstructorExpression(values, false); + } + + /** + * Evaluate the RHS key expression and wrap the result as a new Expression. Unlike other types of + * Expression which will be evaluated and wrapped as a single LiteralExpression, + * RowValueConstructorExpression should be handled differently. We should evaluate each child of + * RVC and wrap them into a new RVC Expression, in order to make sure that the later coercion + * between the LHS key expression and this RHS key expression will be successful. + * @param keyExpression the RHS key expression + * @param tuple the input tuple + * @param ptr the temporary pointer + * @return the Expression containing the evaluated result + */ + public static Expression evaluateKeyExpression(Expression keyExpression, Tuple tuple, + ImmutableBytesWritable ptr) throws SQLException { + if (!(keyExpression instanceof RowValueConstructorExpression)) { + PDataType type = keyExpression.getDataType(); + keyExpression.reset(); + if (keyExpression.evaluate(tuple, ptr)) { + return LiteralExpression.newConstant(type.toObject(ptr, keyExpression.getSortOrder()), + type); + } + + return LiteralExpression.newConstant(null, type); + } + + List children = keyExpression.getChildren(); + List values = Lists.newArrayListWithExpectedSize(children.size()); + for (Expression child : children) { + PDataType type = child.getDataType(); + child.reset(); + if (child.evaluate(tuple, ptr)) { + values.add(LiteralExpression.newConstant(type.toObject(ptr, child.getSortOrder()), type)); + } else { + values.add(LiteralExpression.newConstant(null, type)); + } } + // The early evaluation of this constant expression is not necessary, for it + // might be coerced later. + return new RowValueConstructorExpression(values, false); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheFactory.java index c36c6ecd869..c5d217c201d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashCacheFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,8 +32,6 @@ import java.util.Map; import java.util.Set; -import net.jcip.annotations.Immutable; - import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableUtils; @@ -54,121 +52,130 @@ import org.iq80.snappy.CorruptionException; import org.iq80.snappy.Snappy; +import net.jcip.annotations.Immutable; + public class HashCacheFactory implements ServerCacheFactory { - public HashCacheFactory() { + public HashCacheFactory() { + } + + @Override + public void readFields(DataInput input) throws IOException { + } + + @Override + public void write(DataOutput output) throws IOException { + } + + @Override + public Closeable newCache(ImmutableBytesWritable cachePtr, byte[] txState, MemoryChunk chunk, + boolean useProtoForIndexMaintainer, int clientVersion) throws SQLException { + try { + // This reads the uncompressed length from the front of the compressed input + int uncompressedLen = Snappy.getUncompressedLength(cachePtr.get(), cachePtr.getOffset()); + byte[] uncompressed = new byte[uncompressedLen]; + Snappy.uncompress(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength(), uncompressed, + 0); + return new HashCacheImpl(uncompressed, chunk, clientVersion); + } catch (CorruptionException e) { + throw ClientUtil.parseServerException(e); } + } - @Override - public void readFields(DataInput input) throws IOException { + @Immutable + private static class HashCacheImpl implements HashCache { + private final Map> hashCache; + private final MemoryChunk memoryChunk; + private final boolean singleValueOnly; + private final int clientVersion; + + private HashCacheImpl(byte[] hashCacheBytes, MemoryChunk memoryChunk, int clientVersion) { + try { + this.memoryChunk = memoryChunk; + this.clientVersion = clientVersion; + byte[] hashCacheByteArray = hashCacheBytes; + int offset = 0; + ByteArrayInputStream input = + new ByteArrayInputStream(hashCacheByteArray, offset, hashCacheBytes.length); + DataInputStream dataInput = new DataInputStream(input); + int nExprs = dataInput.readInt(); + List onExpressions = new ArrayList(nExprs); + for (int i = 0; i < nExprs; i++) { + int expressionOrdinal = WritableUtils.readVInt(dataInput); + Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); + expression.readFields(dataInput); + onExpressions.add(expression); + } + boolean singleValueOnly = false; + int exprSizeAndSingleValueOnly = dataInput.readInt(); + int exprSize = exprSizeAndSingleValueOnly; + if (exprSize < 0) { + exprSize *= -1; + singleValueOnly = true; + } + this.singleValueOnly = singleValueOnly; + offset += exprSize; + int nRows = dataInput.readInt(); + long estimatedSize = + SizedUtil.sizeOfMap(nRows, SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE, SizedUtil.RESULT_SIZE) + + hashCacheBytes.length; + this.memoryChunk.resize(estimatedSize); + HashMap> hashCacheMap = + new HashMap>(nRows * 5 / 4); + offset += Bytes.SIZEOF_INT; + // Build Map with evaluated hash key as key and row as value + for (int i = 0; i < nRows; i++) { + int resultSize = (int) Bytes.readAsVLong(hashCacheByteArray, offset); + offset += WritableUtils.decodeVIntSize(hashCacheByteArray[offset]); + ImmutableBytesWritable value = + new ImmutableBytesWritable(hashCacheByteArray, offset, resultSize); + Tuple result = new ResultTuple(ResultUtil.toResult(value)); + ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(result, onExpressions); + List tuples = hashCacheMap.get(key); + if (tuples == null) { + tuples = new LinkedList(); + hashCacheMap.put(key, tuples); + } + tuples.add(result); + offset += resultSize; + } + this.hashCache = Collections.unmodifiableMap(hashCacheMap); + } catch (IOException e) { // Not possible with ByteArrayInputStream + throw new RuntimeException(e); + } } @Override - public void write(DataOutput output) throws IOException { + public String toString() { + StringBuilder sb = new StringBuilder(); + Set keySet = hashCache.keySet(); + for (ImmutableBytesPtr key : keySet) { + sb.append("key: " + key + " value: " + hashCache.get(key)); + } + return sb.toString(); } @Override - public Closeable newCache(ImmutableBytesWritable cachePtr, byte[] txState, MemoryChunk chunk, boolean useProtoForIndexMaintainer, int clientVersion) throws SQLException { - try { - // This reads the uncompressed length from the front of the compressed input - int uncompressedLen = Snappy.getUncompressedLength(cachePtr.get(), cachePtr.getOffset()); - byte[] uncompressed = new byte[uncompressedLen]; - Snappy.uncompress(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength(), - uncompressed, 0); - return new HashCacheImpl(uncompressed, chunk, clientVersion); - } catch (CorruptionException e) { - throw ClientUtil.parseServerException(e); - } + public void close() { + memoryChunk.close(); } - @Immutable - private static class HashCacheImpl implements HashCache { - private final Map> hashCache; - private final MemoryChunk memoryChunk; - private final boolean singleValueOnly; - private final int clientVersion; - - private HashCacheImpl(byte[] hashCacheBytes, MemoryChunk memoryChunk, int clientVersion) { - try { - this.memoryChunk = memoryChunk; - this.clientVersion = clientVersion; - byte[] hashCacheByteArray = hashCacheBytes; - int offset = 0; - ByteArrayInputStream input = new ByteArrayInputStream(hashCacheByteArray, offset, hashCacheBytes.length); - DataInputStream dataInput = new DataInputStream(input); - int nExprs = dataInput.readInt(); - List onExpressions = new ArrayList(nExprs); - for (int i = 0; i < nExprs; i++) { - int expressionOrdinal = WritableUtils.readVInt(dataInput); - Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); - expression.readFields(dataInput); - onExpressions.add(expression); - } - boolean singleValueOnly = false; - int exprSizeAndSingleValueOnly = dataInput.readInt(); - int exprSize = exprSizeAndSingleValueOnly; - if (exprSize < 0) { - exprSize *= -1; - singleValueOnly = true; - } - this.singleValueOnly = singleValueOnly; - offset += exprSize; - int nRows = dataInput.readInt(); - long estimatedSize = SizedUtil.sizeOfMap(nRows, SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE, SizedUtil.RESULT_SIZE) + hashCacheBytes.length; - this.memoryChunk.resize(estimatedSize); - HashMap> hashCacheMap = new HashMap>(nRows * 5 / 4); - offset += Bytes.SIZEOF_INT; - // Build Map with evaluated hash key as key and row as value - for (int i = 0; i < nRows; i++) { - int resultSize = (int)Bytes.readAsVLong(hashCacheByteArray, offset); - offset += WritableUtils.decodeVIntSize(hashCacheByteArray[offset]); - ImmutableBytesWritable value = new ImmutableBytesWritable(hashCacheByteArray,offset,resultSize); - Tuple result = new ResultTuple(ResultUtil.toResult(value)); - ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(result, onExpressions); - List tuples = hashCacheMap.get(key); - if (tuples == null) { - tuples = new LinkedList(); - hashCacheMap.put(key, tuples); - } - tuples.add(result); - offset += resultSize; - } - this.hashCache = Collections.unmodifiableMap(hashCacheMap); - } catch (IOException e) { // Not possible with ByteArrayInputStream - throw new RuntimeException(e); - } - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - Set keySet = hashCache.keySet(); - for (ImmutableBytesPtr key : keySet) { - sb.append("key: " + key + " value: " + hashCache.get(key)); - } - return sb.toString(); - } + @Override + public List get(ImmutableBytesPtr hashKey) throws IOException { + List ret = hashCache.get(hashKey); + if (singleValueOnly && ret != null && ret.size() > 1) { + SQLException ex = + new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS) + .build().buildException(); + ClientUtil.throwIOException(ex.getMessage(), ex); + } - @Override - public void close() { - memoryChunk.close(); - } - - @Override - public List get(ImmutableBytesPtr hashKey) throws IOException { - List ret = hashCache.get(hashKey); - if (singleValueOnly && ret != null && ret.size() > 1) { - SQLException ex = new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_ROW_SUBQUERY_RETURNS_MULTIPLE_ROWS).build().buildException(); - ClientUtil.throwIOException(ex.getMessage(), ex); - } - - return ret; - } + return ret; + } - @Override - public int getClientVersion() { - return clientVersion; - } + @Override + public int getClientVersion() { + return clientVersion; } + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashJoinInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashJoinInfo.java index 3161eb492c0..254b823399a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashJoinInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/join/HashJoinInfo.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,198 +39,207 @@ import org.apache.phoenix.util.SchemaUtil; public class HashJoinInfo { - private static final String HASH_JOIN = "HashJoin"; - - private KeyValueSchema joinedSchema; - private ImmutableBytesPtr[] joinIds; - private List[] joinExpressions; - private JoinType[] joinTypes; - private boolean[] earlyEvaluation; - private KeyValueSchema[] schemas; - private int[] fieldPositions; - private Expression postJoinFilterExpression; - private Integer limit; - private boolean forceProjection; // always true now, but for backward compatibility. - - public HashJoinInfo(PTable joinedTable, ImmutableBytesPtr[] joinIds, List[] joinExpressions, JoinType[] joinTypes, boolean[] earlyEvaluation, PTable[] tables, int[] fieldPositions, Expression postJoinFilterExpression, Integer limit) { - this(buildSchema(joinedTable), joinIds, joinExpressions, joinTypes, earlyEvaluation, buildSchemas(tables), fieldPositions, postJoinFilterExpression, limit, true); - } - - private static KeyValueSchema[] buildSchemas(PTable[] tables) { - KeyValueSchema[] schemas = new KeyValueSchema[tables.length]; - for (int i = 0; i < tables.length; i++) { - schemas[i] = buildSchema(tables[i]); - } - return schemas; - } - - private static KeyValueSchema buildSchema(PTable table) { - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); - if (table != null) { - for (PColumn column : table.getColumns()) { - if (!SchemaUtil.isPKColumn(column)) { - builder.addField(column); - } - } - } - return builder.build(); - } - - private HashJoinInfo(KeyValueSchema joinedSchema, ImmutableBytesPtr[] joinIds, List[] joinExpressions, JoinType[] joinTypes, boolean[] earlyEvaluation, KeyValueSchema[] schemas, int[] fieldPositions, Expression postJoinFilterExpression, Integer limit, boolean forceProjection) { - this.joinedSchema = joinedSchema; - this.joinIds = joinIds; - this.joinExpressions = joinExpressions; - this.joinTypes = joinTypes; - this.earlyEvaluation = earlyEvaluation; - this.schemas = schemas; - this.fieldPositions = fieldPositions; - this.postJoinFilterExpression = postJoinFilterExpression; - this.limit = limit; - this.forceProjection = forceProjection; - } - - public KeyValueSchema getJoinedSchema() { - return joinedSchema; - } - - public ImmutableBytesPtr[] getJoinIds() { - return joinIds; - } - - public List[] getJoinExpressions() { - return joinExpressions; - } - - public JoinType[] getJoinTypes() { - return joinTypes; - } - - public boolean[] earlyEvaluation() { - return earlyEvaluation; - } - - public KeyValueSchema[] getSchemas() { - return schemas; - } - - public int[] getFieldPositions() { - return fieldPositions; - } - - public Expression getPostJoinFilterExpression() { - return postJoinFilterExpression; - } - - public Integer getLimit() { - return limit; - } - - public boolean forceProjection() { - return forceProjection; - } - - public static void serializeHashJoinIntoScan(Scan scan, HashJoinInfo joinInfo) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - try { - DataOutputStream output = new DataOutputStream(stream); - joinInfo.joinedSchema.write(output); - int count = joinInfo.joinIds.length; - WritableUtils.writeVInt(output, count); - for (int i = 0; i < count; i++) { - joinInfo.joinIds[i].write(output); - WritableUtils.writeVInt(output, joinInfo.joinExpressions[i].size()); - for (Expression expr : joinInfo.joinExpressions[i]) { - WritableUtils.writeVInt(output, ExpressionType.valueOf(expr).ordinal()); - expr.write(output); - } - WritableUtils.writeVInt(output, joinInfo.joinTypes[i].ordinal()); - output.writeBoolean(joinInfo.earlyEvaluation[i]); - joinInfo.schemas[i].write(output); - WritableUtils.writeVInt(output, joinInfo.fieldPositions[i]); - } - if (joinInfo.postJoinFilterExpression != null) { - WritableUtils.writeVInt(output, ExpressionType.valueOf(joinInfo.postJoinFilterExpression).ordinal()); - joinInfo.postJoinFilterExpression.write(output); - } else { - WritableUtils.writeVInt(output, -1); - } - WritableUtils.writeVInt(output, joinInfo.limit == null ? -1 : joinInfo.limit); - output.writeBoolean(joinInfo.forceProjection); - scan.setAttribute(HASH_JOIN, stream.toByteArray()); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } + private static final String HASH_JOIN = "HashJoin"; + + private KeyValueSchema joinedSchema; + private ImmutableBytesPtr[] joinIds; + private List[] joinExpressions; + private JoinType[] joinTypes; + private boolean[] earlyEvaluation; + private KeyValueSchema[] schemas; + private int[] fieldPositions; + private Expression postJoinFilterExpression; + private Integer limit; + private boolean forceProjection; // always true now, but for backward compatibility. + + public HashJoinInfo(PTable joinedTable, ImmutableBytesPtr[] joinIds, + List[] joinExpressions, JoinType[] joinTypes, boolean[] earlyEvaluation, + PTable[] tables, int[] fieldPositions, Expression postJoinFilterExpression, Integer limit) { + this(buildSchema(joinedTable), joinIds, joinExpressions, joinTypes, earlyEvaluation, + buildSchemas(tables), fieldPositions, postJoinFilterExpression, limit, true); + } + + private static KeyValueSchema[] buildSchemas(PTable[] tables) { + KeyValueSchema[] schemas = new KeyValueSchema[tables.length]; + for (int i = 0; i < tables.length; i++) { + schemas[i] = buildSchema(tables[i]); + } + return schemas; + } + + private static KeyValueSchema buildSchema(PTable table) { + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); + if (table != null) { + for (PColumn column : table.getColumns()) { + if (!SchemaUtil.isPKColumn(column)) { + builder.addField(column); } - - } - - @SuppressWarnings("unchecked") - public static HashJoinInfo deserializeHashJoinFromScan(Scan scan) { - byte[] join = scan.getAttribute(HASH_JOIN); - if (join == null) { - return null; + } + } + return builder.build(); + } + + private HashJoinInfo(KeyValueSchema joinedSchema, ImmutableBytesPtr[] joinIds, + List[] joinExpressions, JoinType[] joinTypes, boolean[] earlyEvaluation, + KeyValueSchema[] schemas, int[] fieldPositions, Expression postJoinFilterExpression, + Integer limit, boolean forceProjection) { + this.joinedSchema = joinedSchema; + this.joinIds = joinIds; + this.joinExpressions = joinExpressions; + this.joinTypes = joinTypes; + this.earlyEvaluation = earlyEvaluation; + this.schemas = schemas; + this.fieldPositions = fieldPositions; + this.postJoinFilterExpression = postJoinFilterExpression; + this.limit = limit; + this.forceProjection = forceProjection; + } + + public KeyValueSchema getJoinedSchema() { + return joinedSchema; + } + + public ImmutableBytesPtr[] getJoinIds() { + return joinIds; + } + + public List[] getJoinExpressions() { + return joinExpressions; + } + + public JoinType[] getJoinTypes() { + return joinTypes; + } + + public boolean[] earlyEvaluation() { + return earlyEvaluation; + } + + public KeyValueSchema[] getSchemas() { + return schemas; + } + + public int[] getFieldPositions() { + return fieldPositions; + } + + public Expression getPostJoinFilterExpression() { + return postJoinFilterExpression; + } + + public Integer getLimit() { + return limit; + } + + public boolean forceProjection() { + return forceProjection; + } + + public static void serializeHashJoinIntoScan(Scan scan, HashJoinInfo joinInfo) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + try { + DataOutputStream output = new DataOutputStream(stream); + joinInfo.joinedSchema.write(output); + int count = joinInfo.joinIds.length; + WritableUtils.writeVInt(output, count); + for (int i = 0; i < count; i++) { + joinInfo.joinIds[i].write(output); + WritableUtils.writeVInt(output, joinInfo.joinExpressions[i].size()); + for (Expression expr : joinInfo.joinExpressions[i]) { + WritableUtils.writeVInt(output, ExpressionType.valueOf(expr).ordinal()); + expr.write(output); } - ByteArrayInputStream stream = new ByteArrayInputStream(join); - try { - DataInputStream input = new DataInputStream(stream); - KeyValueSchema joinedSchema = new KeyValueSchema(); - joinedSchema.readFields(input); - int count = WritableUtils.readVInt(input); - ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count]; - List[] joinExpressions = new List[count]; - JoinType[] joinTypes = new JoinType[count]; - boolean[] earlyEvaluation = new boolean[count]; - KeyValueSchema[] schemas = new KeyValueSchema[count]; - int[] fieldPositions = new int[count]; - for (int i = 0; i < count; i++) { - joinIds[i] = new ImmutableBytesPtr(); - joinIds[i].readFields(input); - int nExprs = WritableUtils.readVInt(input); - joinExpressions[i] = new ArrayList(nExprs); - for (int j = 0; j < nExprs; j++) { - int expressionOrdinal = WritableUtils.readVInt(input); - Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); - expression.readFields(input); - joinExpressions[i].add(expression); - } - int type = WritableUtils.readVInt(input); - joinTypes[i] = JoinType.values()[type]; - earlyEvaluation[i] = input.readBoolean(); - schemas[i] = new KeyValueSchema(); - schemas[i].readFields(input); - fieldPositions[i] = WritableUtils.readVInt(input); - } - Expression postJoinFilterExpression = null; - int expressionOrdinal = WritableUtils.readVInt(input); - if (expressionOrdinal != -1) { - postJoinFilterExpression = ExpressionType.values()[expressionOrdinal].newInstance(); - postJoinFilterExpression.readFields(input); - } - int limit = -1; - boolean forceProjection = false; - // Read these and ignore if we don't find them as they were not - // present in Apache Phoenix 3.0.0 release. This allows a newer - // 3.1 server to work with an older 3.0 client without force - // both to be upgraded in lock step. - try { - limit = WritableUtils.readVInt(input); - forceProjection = input.readBoolean(); - } catch (EOFException ignore) { - } - return new HashJoinInfo(joinedSchema, joinIds, joinExpressions, joinTypes, earlyEvaluation, schemas, fieldPositions, postJoinFilterExpression, limit >= 0 ? limit : null, forceProjection); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } + WritableUtils.writeVInt(output, joinInfo.joinTypes[i].ordinal()); + output.writeBoolean(joinInfo.earlyEvaluation[i]); + joinInfo.schemas[i].write(output); + WritableUtils.writeVInt(output, joinInfo.fieldPositions[i]); + } + if (joinInfo.postJoinFilterExpression != null) { + WritableUtils.writeVInt(output, + ExpressionType.valueOf(joinInfo.postJoinFilterExpression).ordinal()); + joinInfo.postJoinFilterExpression.write(output); + } else { + WritableUtils.writeVInt(output, -1); + } + WritableUtils.writeVInt(output, joinInfo.limit == null ? -1 : joinInfo.limit); + output.writeBoolean(joinInfo.forceProjection); + scan.setAttribute(HASH_JOIN, stream.toByteArray()); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + } + + @SuppressWarnings("unchecked") + public static HashJoinInfo deserializeHashJoinFromScan(Scan scan) { + byte[] join = scan.getAttribute(HASH_JOIN); + if (join == null) { + return null; + } + ByteArrayInputStream stream = new ByteArrayInputStream(join); + try { + DataInputStream input = new DataInputStream(stream); + KeyValueSchema joinedSchema = new KeyValueSchema(); + joinedSchema.readFields(input); + int count = WritableUtils.readVInt(input); + ImmutableBytesPtr[] joinIds = new ImmutableBytesPtr[count]; + List[] joinExpressions = new List[count]; + JoinType[] joinTypes = new JoinType[count]; + boolean[] earlyEvaluation = new boolean[count]; + KeyValueSchema[] schemas = new KeyValueSchema[count]; + int[] fieldPositions = new int[count]; + for (int i = 0; i < count; i++) { + joinIds[i] = new ImmutableBytesPtr(); + joinIds[i].readFields(input); + int nExprs = WritableUtils.readVInt(input); + joinExpressions[i] = new ArrayList(nExprs); + for (int j = 0; j < nExprs; j++) { + int expressionOrdinal = WritableUtils.readVInt(input); + Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); + expression.readFields(input); + joinExpressions[i].add(expression); } - } + int type = WritableUtils.readVInt(input); + joinTypes[i] = JoinType.values()[type]; + earlyEvaluation[i] = input.readBoolean(); + schemas[i] = new KeyValueSchema(); + schemas[i].readFields(input); + fieldPositions[i] = WritableUtils.readVInt(input); + } + Expression postJoinFilterExpression = null; + int expressionOrdinal = WritableUtils.readVInt(input); + if (expressionOrdinal != -1) { + postJoinFilterExpression = ExpressionType.values()[expressionOrdinal].newInstance(); + postJoinFilterExpression.readFields(input); + } + int limit = -1; + boolean forceProjection = false; + // Read these and ignore if we don't find them as they were not + // present in Apache Phoenix 3.0.0 release. This allows a newer + // 3.1 server to work with an older 3.0 client without force + // both to be upgraded in lock step. + try { + limit = WritableUtils.readVInt(input); + forceProjection = input.readBoolean(); + } catch (EOFException ignore) { + } + return new HashJoinInfo(joinedSchema, joinIds, joinExpressions, joinTypes, earlyEvaluation, + schemas, fieldPositions, postJoinFilterExpression, limit >= 0 ? limit : null, + forceProjection); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/join/MaxServerCacheSizeExceededException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/join/MaxServerCacheSizeExceededException.java index 0b26fd6d060..c53a84d6a41 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/join/MaxServerCacheSizeExceededException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/join/MaxServerCacheSizeExceededException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,30 +18,26 @@ package org.apache.phoenix.join; /** - * - * Exception thrown when the size of the hash cache exceeds the - * maximum size as specified by the phoenix.query.maxHashCacheBytes - * parameter in the {@link org.apache.hadoop.conf.Configuration} - * - * + * Exception thrown when the size of the hash cache exceeds the maximum size as specified by the + * phoenix.query.maxHashCacheBytes parameter in the {@link org.apache.hadoop.conf.Configuration} * @since 0.1 */ public class MaxServerCacheSizeExceededException extends RuntimeException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; - public MaxServerCacheSizeExceededException() { - } + public MaxServerCacheSizeExceededException() { + } - public MaxServerCacheSizeExceededException(String message) { - super(message); - } + public MaxServerCacheSizeExceededException(String message) { + super(message); + } - public MaxServerCacheSizeExceededException(Throwable cause) { - super(cause); - } + public MaxServerCacheSizeExceededException(Throwable cause) { + super(cause); + } - public MaxServerCacheSizeExceededException(String message, Throwable cause) { - super(message, cause); - } + public MaxServerCacheSizeExceededException(String message, Throwable cause) { + super(message, cause); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/ActivityLogInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/ActivityLogInfo.java index 3baf44754dd..9ad6f9e80a8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/ActivityLogInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/ActivityLogInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,41 +22,37 @@ import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PVarchar; -import java.util.EnumSet; - - public enum ActivityLogInfo { - START_TIME("s", LogLevel.INFO,PTimestamp.INSTANCE), - OP_TIME("u", LogLevel.INFO,PTimestamp.INSTANCE), - TENANT_ID("t", LogLevel.INFO,PVarchar.INSTANCE), - CQS_NAME("p", LogLevel.INFO,PVarchar.INSTANCE), - REQUEST_ID("r", LogLevel.INFO,PVarchar.INSTANCE), - TABLE_NAME("n", LogLevel.INFO,PVarchar.INSTANCE), - OP_NAME("o", LogLevel.INFO,PVarchar.INSTANCE), - OP_STMTS("#", LogLevel.INFO, PInteger.INSTANCE); - - public final String shortName; - public final LogLevel logLevel; - public final PDataType dataType; - - private ActivityLogInfo(String shortName, LogLevel logLevel, PDataType dataType) { - this.shortName = shortName; - this.logLevel=logLevel; - this.dataType=dataType; - } - - public String getShortName() { - return shortName; - } - - public LogLevel getLogLevel() { - return logLevel; - } - - public PDataType getDataType() { - return dataType; - } - + START_TIME("s", LogLevel.INFO, PTimestamp.INSTANCE), + OP_TIME("u", LogLevel.INFO, PTimestamp.INSTANCE), + TENANT_ID("t", LogLevel.INFO, PVarchar.INSTANCE), + CQS_NAME("p", LogLevel.INFO, PVarchar.INSTANCE), + REQUEST_ID("r", LogLevel.INFO, PVarchar.INSTANCE), + TABLE_NAME("n", LogLevel.INFO, PVarchar.INSTANCE), + OP_NAME("o", LogLevel.INFO, PVarchar.INSTANCE), + OP_STMTS("#", LogLevel.INFO, PInteger.INSTANCE); + + public final String shortName; + public final LogLevel logLevel; + public final PDataType dataType; + + private ActivityLogInfo(String shortName, LogLevel logLevel, PDataType dataType) { + this.shortName = shortName; + this.logLevel = logLevel; + this.dataType = dataType; + } + + public String getShortName() { + return shortName; + } + + public LogLevel getLogLevel() { + return logLevel; + } + + public PDataType getDataType() { + return dataType; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/AuditQueryLogger.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/AuditQueryLogger.java index 8e4fc51e881..3ec1f1ba855 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/AuditQueryLogger.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/AuditQueryLogger.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,103 +17,100 @@ */ package org.apache.phoenix.log; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.monitoring.MetricType; - import java.util.Map; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.monitoring.MetricType; /* * Wrapper for query translator */ public class AuditQueryLogger extends QueryLogger { - private LogLevel auditLogLevel; + private LogLevel auditLogLevel; + private AuditQueryLogger(PhoenixConnection connection) { + super(connection); + auditLogLevel = connection.getAuditLogLevel(); - private AuditQueryLogger(PhoenixConnection connection) { - super(connection); - auditLogLevel = connection.getAuditLogLevel(); - - } - - private AuditQueryLogger() { - super(); - auditLogLevel = LogLevel.OFF; - } - - public static final AuditQueryLogger NO_OP_INSTANCE = new AuditQueryLogger() { - @Override - public void log(QueryLogInfo queryLogInfo, Object info) { + } - } + private AuditQueryLogger() { + super(); + auditLogLevel = LogLevel.OFF; + } - @Override - public boolean isDebugEnabled() { - return false; - } + public static final AuditQueryLogger NO_OP_INSTANCE = new AuditQueryLogger() { + @Override + public void log(QueryLogInfo queryLogInfo, Object info) { - @Override - public boolean isInfoEnabled() { - return false; - } - - @Override - public void sync( - Map> readMetrics, Map overAllMetrics) { - - } - - @Override - public void syncAudit( - Map> readMetrics, Map overAllMetrics) { - - } - - @Override - public boolean isSynced(){ - return true; - } - }; - - public static AuditQueryLogger getInstance(PhoenixConnection connection, boolean isSystemTable) { - if (connection.getAuditLogLevel() == LogLevel.OFF || isSystemTable) { - return NO_OP_INSTANCE; - } - return new AuditQueryLogger(connection); } - - /** - * Is audit logging currently enabled? - * Call this method to prevent having to perform expensive operations (for example, - * String concatenation) when the audit log level is more than info. - */ - public boolean isAuditLoggingEnabled(){ - return isAuditLevelEnabled(LogLevel.INFO); + @Override + public boolean isDebugEnabled() { + return false; } - private boolean isAuditLevelEnabled(LogLevel logLevel){ - return this.auditLogLevel != null && logLevel != LogLevel.OFF ? logLevel.ordinal() <= this.auditLogLevel.ordinal() - : false; + @Override + public boolean isInfoEnabled() { + return false; } + @Override + public void sync(Map> readMetrics, + Map overAllMetrics) { + + } + @Override + public void syncAudit(Map> readMetrics, + Map overAllMetrics) { - public void sync(Map> readMetrics, Map overAllMetrics) { - syncBase(readMetrics, overAllMetrics, auditLogLevel); } - public void syncAudit() { - syncAudit(null, null); + @Override + public boolean isSynced() { + return true; } + }; - /** - * We force LogLevel.TRACE here because in QueryLogInfo the minimum LogLevel for - * TABLE_NAME_I is Debug and for BIND_PARAMETERS_I is TRACE and we would like to see - * these parameters even in INFO level when using DDL and DML operations. - */ - public void syncAudit(Map> readMetrics, Map overAllMetrics) { - syncBase(readMetrics, overAllMetrics, LogLevel.TRACE); + public static AuditQueryLogger getInstance(PhoenixConnection connection, boolean isSystemTable) { + if (connection.getAuditLogLevel() == LogLevel.OFF || isSystemTable) { + return NO_OP_INSTANCE; } + return new AuditQueryLogger(connection); + } + + /** + * Is audit logging currently enabled? Call this method to prevent having to perform expensive + * operations (for example, String concatenation) when the audit log level is more than info. + */ + public boolean isAuditLoggingEnabled() { + return isAuditLevelEnabled(LogLevel.INFO); + } + + private boolean isAuditLevelEnabled(LogLevel logLevel) { + return this.auditLogLevel != null && logLevel != LogLevel.OFF + ? logLevel.ordinal() <= this.auditLogLevel.ordinal() + : false; + } + + public void sync(Map> readMetrics, + Map overAllMetrics) { + syncBase(readMetrics, overAllMetrics, auditLogLevel); + } + + public void syncAudit() { + syncAudit(null, null); + } + + /** + * We force LogLevel.TRACE here because in QueryLogInfo the minimum LogLevel for TABLE_NAME_I is + * Debug and for BIND_PARAMETERS_I is TRACE and we would like to see these parameters even in INFO + * level when using DDL and DML operations. + */ + public void syncAudit(Map> readMetrics, + Map overAllMetrics) { + syncBase(readMetrics, overAllMetrics, LogLevel.TRACE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/BaseConnectionLimiter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/BaseConnectionLimiter.java index ec07deb4653..4af7e903599 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/BaseConnectionLimiter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/BaseConnectionLimiter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,131 +17,136 @@ */ package org.apache.phoenix.log; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_THROTTLED_COUNTER; +import static org.apache.phoenix.query.QueryServices.QUERY_SERVICES_NAME; + +import java.sql.SQLException; + +import javax.annotation.concurrent.GuardedBy; + import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.monitoring.MetricType; import org.apache.phoenix.monitoring.connectionqueryservice.ConnectionQueryServicesMetricsManager; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; - -import javax.annotation.concurrent.GuardedBy; -import java.sql.SQLException; - -import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_THROTTLED_COUNTER; -import static org.apache.phoenix.query.QueryServices.QUERY_SERVICES_NAME; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * A base class for concrete implementation of ConnectionLimiter. - * 1. Should be called only when holding the ConnectionQueryServicesImpl.connectionCountLock - * 2. Does the basic accounting on open and close connection calls. - * 3. If connection throttling is enabled checks and calls onLimit if the threshold is breached. + * A base class for concrete implementation of ConnectionLimiter. 1. Should be called only when + * holding the ConnectionQueryServicesImpl.connectionCountLock 2. Does the basic accounting on open + * and close connection calls. 3. If connection throttling is enabled checks and calls onLimit if + * the threshold is breached. */ public abstract class BaseConnectionLimiter implements ConnectionLimiter { - protected int connectionCount = 0; - protected int internalConnectionCount = 0; - protected int connectionThrottledCounter = 0; - protected String profileName; - protected int maxConnectionsAllowed; - protected int maxInternalConnectionsAllowed; - protected boolean shouldThrottleNumConnections; - - protected BaseConnectionLimiter(String profileName, boolean shouldThrottleNumConnections, int maxConnectionsAllowed, int maxInternalConnectionsAllowed) { - this.profileName = profileName; - this.shouldThrottleNumConnections = shouldThrottleNumConnections; - this.maxConnectionsAllowed = maxConnectionsAllowed; - this.maxInternalConnectionsAllowed = maxInternalConnectionsAllowed; - } - @Override - @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") - public void acquireConnection(PhoenixConnection connection) throws SQLException { - Preconditions.checkNotNull(connection.getUniqueID(), "Got null UUID for Phoenix Connection!"); - - /* - * If we are throttling connections internal connections and client created connections - * are counted separately against each respective quota. - */ - if (shouldThrottleNumConnections) { - int futureConnections = 1 + ( connection.isInternalConnection() ? internalConnectionCount : connectionCount); - int allowedConnections = connection.isInternalConnection() ? maxInternalConnectionsAllowed : maxConnectionsAllowed; - // if throttling threshold is reached, try reclaiming garbage collected phoenix connections. - if ((allowedConnections != 0) && (futureConnections > allowedConnections) && (onSweep(connection.isInternalConnection()) == 0)) { - GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER.increment(); - connectionThrottledCounter++; - String connectionQueryServiceName = connection.getQueryServices() - .getConfiguration().get(QUERY_SERVICES_NAME); - // Since this is ever-increasing counter and only gets reset at JVM restart - // Both global and connection query service level, - // we won't create histogram for this metric. - ConnectionQueryServicesMetricsManager.updateMetrics( - connectionQueryServiceName, - PHOENIX_CONNECTIONS_THROTTLED_COUNTER, connectionThrottledCounter); - - // Let the concrete classes handle the onLimit. - // They can either throw the exception back or handle it. - SQLException connectionThrottledException = connection.isInternalConnection() ? - new SQLExceptionInfo.Builder(SQLExceptionCode.NEW_INTERNAL_CONNECTION_THROTTLED). - build().buildException() : - new SQLExceptionInfo.Builder(SQLExceptionCode.NEW_CONNECTION_THROTTLED). - build().buildException(); - throw connectionThrottledException; - } - } - - if (connection.isInternalConnection()) { - internalConnectionCount++; - } else { - connectionCount++; - } - + protected int connectionCount = 0; + protected int internalConnectionCount = 0; + protected int connectionThrottledCounter = 0; + protected String profileName; + protected int maxConnectionsAllowed; + protected int maxInternalConnectionsAllowed; + protected boolean shouldThrottleNumConnections; + + protected BaseConnectionLimiter(String profileName, boolean shouldThrottleNumConnections, + int maxConnectionsAllowed, int maxInternalConnectionsAllowed) { + this.profileName = profileName; + this.shouldThrottleNumConnections = shouldThrottleNumConnections; + this.maxConnectionsAllowed = maxConnectionsAllowed; + this.maxInternalConnectionsAllowed = maxInternalConnectionsAllowed; + } + + @Override + @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") + public void acquireConnection(PhoenixConnection connection) throws SQLException { + Preconditions.checkNotNull(connection.getUniqueID(), "Got null UUID for Phoenix Connection!"); + + /* + * If we are throttling connections internal connections and client created connections are + * counted separately against each respective quota. + */ + if (shouldThrottleNumConnections) { + int futureConnections = + 1 + (connection.isInternalConnection() ? internalConnectionCount : connectionCount); + int allowedConnections = + connection.isInternalConnection() ? maxInternalConnectionsAllowed : maxConnectionsAllowed; + // if throttling threshold is reached, try reclaiming garbage collected phoenix connections. + if ( + (allowedConnections != 0) && (futureConnections > allowedConnections) + && (onSweep(connection.isInternalConnection()) == 0) + ) { + GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER.increment(); + connectionThrottledCounter++; + String connectionQueryServiceName = + connection.getQueryServices().getConfiguration().get(QUERY_SERVICES_NAME); + // Since this is ever-increasing counter and only gets reset at JVM restart + // Both global and connection query service level, + // we won't create histogram for this metric. + ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, + PHOENIX_CONNECTIONS_THROTTLED_COUNTER, connectionThrottledCounter); + + // Let the concrete classes handle the onLimit. + // They can either throw the exception back or handle it. + SQLException connectionThrottledException = connection.isInternalConnection() + ? new SQLExceptionInfo.Builder(SQLExceptionCode.NEW_INTERNAL_CONNECTION_THROTTLED).build() + .buildException() + : new SQLExceptionInfo.Builder(SQLExceptionCode.NEW_CONNECTION_THROTTLED).build() + .buildException(); + throw connectionThrottledException; + } } - @Override - @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") - public void returnConnection(PhoenixConnection connection) { - if (connection.isInternalConnection() && internalConnectionCount > 0) { - --internalConnectionCount; - } else if (!connection.isInternalConnection() && connectionCount > 0) { - --connectionCount; - } + if (connection.isInternalConnection()) { + internalConnectionCount++; + } else { + connectionCount++; } - @Override - @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") - public boolean isLastConnection() { - return connectionCount + internalConnectionCount - 1 <= 0; - } - @Override - public boolean isShouldThrottleNumConnections() { - return shouldThrottleNumConnections; - } - - @VisibleForTesting - @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") - public int getConnectionCount() { - return connectionCount; - } + } - @Override - public int onSweep(boolean internal) { - return 0; - } - - @VisibleForTesting - @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") - public int getInternalConnectionCount() { - return internalConnectionCount; - } - - public int getMaxConnectionsAllowed() { - return maxConnectionsAllowed; - } - - public int getMaxInternalConnectionsAllowed() { - return maxInternalConnectionsAllowed; + @Override + @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") + public void returnConnection(PhoenixConnection connection) { + if (connection.isInternalConnection() && internalConnectionCount > 0) { + --internalConnectionCount; + } else if (!connection.isInternalConnection() && connectionCount > 0) { + --connectionCount; } + } + + @Override + @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") + public boolean isLastConnection() { + return connectionCount + internalConnectionCount - 1 <= 0; + } + + @Override + public boolean isShouldThrottleNumConnections() { + return shouldThrottleNumConnections; + } + + @VisibleForTesting + @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") + public int getConnectionCount() { + return connectionCount; + } + + @Override + public int onSweep(boolean internal) { + return 0; + } + + @VisibleForTesting + @GuardedBy("ConnectionQueryServicesImpl.connectionCountLock") + public int getInternalConnectionCount() { + return internalConnectionCount; + } + + public int getMaxConnectionsAllowed() { + return maxConnectionsAllowed; + } + + public int getMaxInternalConnectionsAllowed() { + return maxInternalConnectionsAllowed; + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/ConnectionActivityLogger.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/ConnectionActivityLogger.java index ee2a2a8af45..5837c2b285b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/ConnectionActivityLogger.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/ConnectionActivityLogger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +17,6 @@ */ package org.apache.phoenix.log; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.schema.PName; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.util.EnvironmentEdgeManager; - import java.lang.ref.WeakReference; import java.util.List; import java.util.UUID; @@ -29,122 +24,134 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.schema.PName; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.util.EnvironmentEdgeManager; /** - * Logger for connection related activities. - * See also {@link ActivityLogInfo} + * Logger for connection related activities. See also {@link ActivityLogInfo} */ public class ConnectionActivityLogger { - private LogLevel logLevel; - private boolean isInternalConnection; - private UUID connectionID; - private WeakReference connectionReference; - List activityList = Stream.of(ActivityLogInfo.values()).map(f -> "").collect(Collectors.toList()); - - public ConnectionActivityLogger(PhoenixConnection connection, LogLevel level) { - logLevel = level; - this.isInternalConnection = connection.isInternalConnection(); - this.connectionID = connection.getUniqueID(); - this.connectionReference = new WeakReference(connection); - connection.setActivityLogger(this); - log(ActivityLogInfo.START_TIME, String.valueOf(EnvironmentEdgeManager.currentTimeMillis())); - PName tenantName = connection.getTenantId(); - if (tenantName != null) { - log(ActivityLogInfo.TENANT_ID, tenantName.getString()); - } - // TODO: CQS_NAME (Connection Profile Name) - + private LogLevel logLevel; + private boolean isInternalConnection; + private UUID connectionID; + private WeakReference connectionReference; + List activityList = + Stream.of(ActivityLogInfo.values()).map(f -> "").collect(Collectors.toList()); + + public ConnectionActivityLogger(PhoenixConnection connection, LogLevel level) { + logLevel = level; + this.isInternalConnection = connection.isInternalConnection(); + this.connectionID = connection.getUniqueID(); + this.connectionReference = new WeakReference(connection); + connection.setActivityLogger(this); + log(ActivityLogInfo.START_TIME, String.valueOf(EnvironmentEdgeManager.currentTimeMillis())); + PName tenantName = connection.getTenantId(); + if (tenantName != null) { + log(ActivityLogInfo.TENANT_ID, tenantName.getString()); } + // TODO: CQS_NAME (Connection Profile Name) - public ConnectionActivityLogger() { - logLevel = LogLevel.OFF; - } - - public static final ConnectionActivityLogger NO_OP_LOGGER = new ConnectionActivityLogger() { - - @Override - public void log(ActivityLogInfo ActivityLogInfo, String info) {} + } - @Override - public boolean isDebugEnabled() { - return false; - } + public ConnectionActivityLogger() { + logLevel = LogLevel.OFF; + } - @Override - public boolean isInfoEnabled() { - return false; - } + public static final ConnectionActivityLogger NO_OP_LOGGER = new ConnectionActivityLogger() { - @Override - public boolean isLevelEnabled(LogLevel logLevel) { - return false; - } - - @Override - public boolean isInternalConnection() { - return false; - } - - @Override - public PhoenixConnection getConnection() { return null; } - - @Override - public String getActivityLog() {return "";} + @Override + public void log(ActivityLogInfo ActivityLogInfo, String info) { + } - @Override - public String getConnectionID() {return "";} + @Override + public boolean isDebugEnabled() { + return false; + } - }; + @Override + public boolean isInfoEnabled() { + return false; + } - public String getConnectionID() { - return connectionID.toString(); + @Override + public boolean isLevelEnabled(LogLevel logLevel) { + return false; } + @Override public boolean isInternalConnection() { - return isInternalConnection; + return false; } + @Override public PhoenixConnection getConnection() { - return connectionReference.get(); - } - - /** - * Set logging info for a given activity - */ - public void log(ActivityLogInfo activity, String info) { - if (logLevel == LogLevel.OFF) return; - activityList.set(activity.ordinal(), info); + return null; } - /** - * Get the formatted log for external logging. - */ + @Override public String getActivityLog() { - return IntStream - .range(0, ActivityLogInfo.values().length) - .filter((i) -> {return !Strings.isNullOrEmpty(activityList.get(i)) && isLevelEnabled(ActivityLogInfo.values()[i].getLogLevel());}) - .mapToObj(i -> new StringBuilder().append(ActivityLogInfo.values()[i].shortName).append("=").append(activityList.get(i)).toString()) - .collect(Collectors.joining(", ")); - } - /** - * Is Info logging currently enabled? - * Call this method to prevent having to perform expensive operations (for example, String concatenation) when the log level is more than info. - * @return - */ - public boolean isInfoEnabled(){ - return isLevelEnabled(LogLevel.INFO); + return ""; } - /** - * Is debug logging currently enabled? - * Call this method to prevent having to perform expensive operations (for example, String concatenation) when the log level is more than debug. - */ - public boolean isDebugEnabled(){ - return isLevelEnabled(LogLevel.DEBUG); + @Override + public String getConnectionID() { + return ""; } - public boolean isLevelEnabled(LogLevel logLevel) { - return this.logLevel != null && logLevel != LogLevel.OFF ? logLevel.ordinal() <= this.logLevel.ordinal() - : false; - } + }; + + public String getConnectionID() { + return connectionID.toString(); + } + + public boolean isInternalConnection() { + return isInternalConnection; + } + + public PhoenixConnection getConnection() { + return connectionReference.get(); + } + + /** + * Set logging info for a given activity + */ + public void log(ActivityLogInfo activity, String info) { + if (logLevel == LogLevel.OFF) return; + activityList.set(activity.ordinal(), info); + } + + /** + * Get the formatted log for external logging. + */ + public String getActivityLog() { + return IntStream.range(0, ActivityLogInfo.values().length).filter((i) -> { + return !Strings.isNullOrEmpty(activityList.get(i)) + && isLevelEnabled(ActivityLogInfo.values()[i].getLogLevel()); + }).mapToObj(i -> new StringBuilder().append(ActivityLogInfo.values()[i].shortName).append("=") + .append(activityList.get(i)).toString()).collect(Collectors.joining(", ")); + } + + /** + * Is Info logging currently enabled? Call this method to prevent having to perform expensive + * operations (for example, String concatenation) when the log level is more than info. + */ + public boolean isInfoEnabled() { + return isLevelEnabled(LogLevel.INFO); + } + + /** + * Is debug logging currently enabled? Call this method to prevent having to perform expensive + * operations (for example, String concatenation) when the log level is more than debug. + */ + public boolean isDebugEnabled() { + return isLevelEnabled(LogLevel.DEBUG); + } + + public boolean isLevelEnabled(LogLevel logLevel) { + return this.logLevel != null && logLevel != LogLevel.OFF + ? logLevel.ordinal() <= this.logLevel.ordinal() + : false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/ConnectionLimiter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/ConnectionLimiter.java index 6e9f98b9b24..ca2a98225a9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/ConnectionLimiter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/ConnectionLimiter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,28 +16,29 @@ * limitations under the License. */ package org.apache.phoenix.log; -import org.apache.phoenix.jdbc.PhoenixConnection; import java.sql.SQLException; +import org.apache.phoenix.jdbc.PhoenixConnection; + /** - * This interface defines the contract for storing information about Phoenix connections - * for debugging client-side issues like + * This interface defines the contract for storing information about Phoenix connections for + * debugging client-side issues like * {@link org.apache.phoenix.exception.SQLExceptionCode#NEW_CONNECTION_THROTTLED} */ public interface ConnectionLimiter { - void acquireConnection(PhoenixConnection connection) throws SQLException; + void acquireConnection(PhoenixConnection connection) throws SQLException; - void returnConnection(PhoenixConnection connection); + void returnConnection(PhoenixConnection connection); - int onSweep(boolean internal) ; + int onSweep(boolean internal); - boolean isLastConnection(); + boolean isLastConnection(); - boolean isShouldThrottleNumConnections(); + boolean isShouldThrottleNumConnections(); - int getConnectionCount(); + int getConnectionCount(); - int getInternalConnectionCount(); + int getInternalConnectionCount(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/DefaultConnectionLimiter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/DefaultConnectionLimiter.java index 2045ef510ee..7fbf952fa5f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/DefaultConnectionLimiter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/DefaultConnectionLimiter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,48 +17,44 @@ */ package org.apache.phoenix.log; -import org.apache.phoenix.jdbc.PhoenixConnection; - -import java.sql.SQLException; - /** * Default implementation of a ConnectionLimiter. */ public class DefaultConnectionLimiter extends BaseConnectionLimiter { - private DefaultConnectionLimiter(Builder builder) { - super(builder.profileName, builder.shouldThrottleNumConnections, builder.maxConnectionsAllowed, builder.maxInternalConnectionsAllowed); + private DefaultConnectionLimiter(Builder builder) { + super(builder.profileName, builder.shouldThrottleNumConnections, builder.maxConnectionsAllowed, + builder.maxInternalConnectionsAllowed); + } + + public static class Builder { + protected String profileName; + protected int maxConnectionsAllowed; + protected int maxInternalConnectionsAllowed; + protected boolean shouldThrottleNumConnections; + + public Builder(boolean shouldThrottleNumConnections) { + this.shouldThrottleNumConnections = shouldThrottleNumConnections; } - public static class Builder { - protected String profileName; - protected int maxConnectionsAllowed; - protected int maxInternalConnectionsAllowed; - protected boolean shouldThrottleNumConnections; - - public Builder(boolean shouldThrottleNumConnections) { - this.shouldThrottleNumConnections = shouldThrottleNumConnections; - } - - public DefaultConnectionLimiter.Builder withConnectionProfile(String profileName) { - this.profileName = profileName; - return this; - } - - public DefaultConnectionLimiter.Builder withMaxAllowed(int maxAllowed) { - this.maxConnectionsAllowed = maxAllowed; - return this; - } - - public DefaultConnectionLimiter.Builder withMaxInternalAllowed(int maxInternalAllowed) { - this.maxInternalConnectionsAllowed = maxInternalAllowed; - return this; - } + public DefaultConnectionLimiter.Builder withConnectionProfile(String profileName) { + this.profileName = profileName; + return this; + } + public DefaultConnectionLimiter.Builder withMaxAllowed(int maxAllowed) { + this.maxConnectionsAllowed = maxAllowed; + return this; + } - public ConnectionLimiter build() { - return new DefaultConnectionLimiter(this); - } + public DefaultConnectionLimiter.Builder withMaxInternalAllowed(int maxInternalAllowed) { + this.maxInternalConnectionsAllowed = maxInternalAllowed; + return this; + } + public ConnectionLimiter build() { + return new DefaultConnectionLimiter(this); } + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/LogLevel.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/LogLevel.java index 269b4f479d6..bdf16e5398a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/LogLevel.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/LogLevel.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,5 +18,8 @@ package org.apache.phoenix.log; public enum LogLevel { - OFF,INFO, DEBUG, TRACE + OFF, + INFO, + DEBUG, + TRACE } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/LogWriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/LogWriter.java index ff58d22619c..66bf5a4154a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/LogWriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/LogWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,32 +21,23 @@ import java.sql.SQLException; /** - * Used by the event handler to write RingBufferEvent, this is done in a separate thread from the application configured - * during disruptor + * Used by the event handler to write RingBufferEvent, this is done in a separate thread from the + * application configured during disruptor */ public interface LogWriter { - /** - * Called by ring buffer event handler to write RingBufferEvent - * - * @param event - * @throws SQLException - * @throws IOException - */ - void write(RingBufferEvent event) throws SQLException, IOException; + /** + * Called by ring buffer event handler to write RingBufferEvent + */ + void write(RingBufferEvent event) throws SQLException, IOException; - /** - * will be called when disruptor is getting shutdown - * - * @throws IOException - * @throws SQLException - */ + /** + * will be called when disruptor is getting shutdown + */ - void close() throws IOException, SQLException; + void close() throws IOException, SQLException; - /** - * if writer is closed and cannot write further event - * - * @return - */ - boolean isClosed(); + /** + * if writer is closed and cannot write further event + */ + boolean isClosed(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/LoggingConnectionLimiter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/LoggingConnectionLimiter.java index e93a8a7f885..a3458d2f02e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/LoggingConnectionLimiter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/LoggingConnectionLimiter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,165 +17,174 @@ */ package org.apache.phoenix.log; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_PHOENIX_CONNECTIONS; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER; import java.sql.SQLException; import java.util.Iterator; import java.util.Map; import java.util.UUID; -import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS; -import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_PHOENIX_CONNECTIONS; -import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * An implementation of a ConnectionLimiter which logs activity info at configured intervals - * for the active connections in the map when throttling threshold is reached. + * An implementation of a ConnectionLimiter which logs activity info at configured intervals for the + * active connections in the map when throttling threshold is reached. */ public class LoggingConnectionLimiter extends BaseConnectionLimiter { - private static final Logger LOGGER = LoggerFactory.getLogger(LoggingConnectionLimiter.class); - private static long MIN_IN_MILLIS = 60 * 1000; - protected final boolean enableActivityLogging; - protected final long loggingIntervalInMillis; - protected long lastLoggedTimeInMillis; - protected long lastCollectedTimeInMillis; - // Map Phoenix connection UUID to its connection activity logger object - protected final Map openConnectionActivityLoggers; - - private LoggingConnectionLimiter(Builder builder) { - super(builder.profileName, builder.shouldThrottleNumConnections, builder.maxConnectionsAllowed, builder.maxInternalConnectionsAllowed); - this.enableActivityLogging = builder.enableActivityLogging; - this.loggingIntervalInMillis = builder.loggingIntervalInMins * MIN_IN_MILLIS; - this.lastCollectedTimeInMillis = this.lastLoggedTimeInMillis = System.currentTimeMillis(); - this.openConnectionActivityLoggers = Maps.newHashMap(); + private static final Logger LOGGER = LoggerFactory.getLogger(LoggingConnectionLimiter.class); + private static long MIN_IN_MILLIS = 60 * 1000; + protected final boolean enableActivityLogging; + protected final long loggingIntervalInMillis; + protected long lastLoggedTimeInMillis; + protected long lastCollectedTimeInMillis; + // Map Phoenix connection UUID to its connection activity logger object + protected final Map openConnectionActivityLoggers; + + private LoggingConnectionLimiter(Builder builder) { + super(builder.profileName, builder.shouldThrottleNumConnections, builder.maxConnectionsAllowed, + builder.maxInternalConnectionsAllowed); + this.enableActivityLogging = builder.enableActivityLogging; + this.loggingIntervalInMillis = builder.loggingIntervalInMins * MIN_IN_MILLIS; + this.lastCollectedTimeInMillis = this.lastLoggedTimeInMillis = System.currentTimeMillis(); + this.openConnectionActivityLoggers = Maps.newHashMap(); + } + + @Override + public void acquireConnection(PhoenixConnection connection) throws SQLException { + super.acquireConnection(connection); + if ( + (this.enableActivityLogging) && (this.openConnectionActivityLoggers.size() + < this.maxConnectionsAllowed + this.maxInternalConnectionsAllowed) + ) { + ConnectionActivityLogger logger = new ConnectionActivityLogger(connection, LogLevel.INFO); + this.openConnectionActivityLoggers.put(connection.getUniqueID(), logger); } - - @Override - public void acquireConnection(PhoenixConnection connection) throws SQLException { - super.acquireConnection(connection); - if ((this.enableActivityLogging) && (this.openConnectionActivityLoggers.size() < this.maxConnectionsAllowed + this.maxInternalConnectionsAllowed)) { - ConnectionActivityLogger logger = new ConnectionActivityLogger(connection, LogLevel.INFO); - this.openConnectionActivityLoggers.put(connection.getUniqueID(), logger); - } + } + + @Override + public void returnConnection(PhoenixConnection connection) { + super.returnConnection(connection); + UUID phxConnUniqueID = connection.getUniqueID(); + Preconditions.checkNotNull(phxConnUniqueID, "Got null UUID for Phoenix Connection!"); + if (this.enableActivityLogging) { + this.openConnectionActivityLoggers.remove(phxConnUniqueID); } - - @Override - public void returnConnection(PhoenixConnection connection) { - super.returnConnection(connection); - UUID phxConnUniqueID = connection.getUniqueID(); - Preconditions.checkNotNull(phxConnUniqueID, "Got null UUID for Phoenix Connection!"); - if (this.enableActivityLogging) { - this.openConnectionActivityLoggers.remove(phxConnUniqueID); + } + + @Override + public int onSweep(boolean internal) { + long currentTimeInMillis = System.currentTimeMillis(); + boolean shouldCollectNow = + (currentTimeInMillis - lastCollectedTimeInMillis) >= loggingIntervalInMillis; + int garbageCollectedConnections = 0; + if (this.enableActivityLogging && shouldCollectNow) { + Iterator> iterator = + openConnectionActivityLoggers.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + ConnectionActivityLogger logger = entry.getValue(); + // check for reclaim only for connection/logger that match the sweep type. for e.g internal + // or external client connections. + boolean checkReclaimable = ((logger.isInternalConnection() && internal) + || (!logger.isInternalConnection() && !internal)); + if (checkReclaimable) { + PhoenixConnection monitoredConnection = logger.getConnection(); + LOGGER.info(String.format("connection-sweep-activity-log for %s: %s", + logger.getConnectionID(), logger.getActivityLog())); + if (monitoredConnection == null) { + garbageCollectedConnections += collectConnection(internal); + iterator.remove(); + } } - } - - @Override - public int onSweep(boolean internal) { - long currentTimeInMillis = System.currentTimeMillis(); - boolean shouldCollectNow = (currentTimeInMillis - lastCollectedTimeInMillis) >= loggingIntervalInMillis; - int garbageCollectedConnections = 0; - if (this.enableActivityLogging && shouldCollectNow) { - Iterator> iterator = openConnectionActivityLoggers.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - ConnectionActivityLogger logger = entry.getValue(); - // check for reclaim only for connection/logger that match the sweep type. for e.g internal or external client connections. - boolean checkReclaimable = ((logger.isInternalConnection() && internal) || (!logger.isInternalConnection() && !internal)); - if (checkReclaimable) { - PhoenixConnection monitoredConnection = logger.getConnection(); - LOGGER.info(String.format("connection-sweep-activity-log for %s: %s", logger.getConnectionID(), logger.getActivityLog())); - if (monitoredConnection == null) { - garbageCollectedConnections += collectConnection(internal); - iterator.remove(); - } - } - } - LOGGER.info(String.format("connection-profile-metrics-log for %s: internal=%s, freed=%d, current=%d, open=%d, throttled=%d", - this.profileName, - internal, - garbageCollectedConnections, - internal ? getInternalConnectionCount(): getConnectionCount(), - internal ? - GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.getMetric().getValue() : - GLOBAL_OPEN_PHOENIX_CONNECTIONS.getMetric().getValue(), - GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER.getMetric().getValue())); - - // Register the last logged time - lastCollectedTimeInMillis = currentTimeInMillis; + } + LOGGER.info(String.format( + "connection-profile-metrics-log for %s: internal=%s, freed=%d, current=%d, open=%d, throttled=%d", + this.profileName, internal, garbageCollectedConnections, + internal ? getInternalConnectionCount() : getConnectionCount(), + internal + ? GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.getMetric().getValue() + : GLOBAL_OPEN_PHOENIX_CONNECTIONS.getMetric().getValue(), + GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER.getMetric().getValue())); + + // Register the last logged time + lastCollectedTimeInMillis = currentTimeInMillis; - } - return garbageCollectedConnections; } - - private int collectConnection(boolean internal) { - if (internal && internalConnectionCount > 0) { - --internalConnectionCount; - GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.decrement(); - return 1; - } else if (!internal && connectionCount > 0) { - --connectionCount; - GLOBAL_OPEN_PHOENIX_CONNECTIONS.decrement(); - return 1; - } - return 0; + return garbageCollectedConnections; + } + + private int collectConnection(boolean internal) { + if (internal && internalConnectionCount > 0) { + --internalConnectionCount; + GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.decrement(); + return 1; + } else if (!internal && connectionCount > 0) { + --connectionCount; + GLOBAL_OPEN_PHOENIX_CONNECTIONS.decrement(); + return 1; } - - @VisibleForTesting - public Map getActivityLog() throws SQLException { - Map activityLog = Maps.newHashMap(); - if (this.enableActivityLogging) { - for (ConnectionActivityLogger connectionLogger : openConnectionActivityLoggers.values()) { - activityLog.put(connectionLogger.getConnectionID(), connectionLogger.getActivityLog()); - } - } - return activityLog; + return 0; + } + + @VisibleForTesting + public Map getActivityLog() throws SQLException { + Map activityLog = Maps.newHashMap(); + if (this.enableActivityLogging) { + for (ConnectionActivityLogger connectionLogger : openConnectionActivityLoggers.values()) { + activityLog.put(connectionLogger.getConnectionID(), connectionLogger.getActivityLog()); + } } + return activityLog; + } - public static class Builder { + public static class Builder { - protected String profileName; - protected boolean enableActivityLogging; - protected int loggingIntervalInMins; - protected int maxConnectionsAllowed; - protected int maxInternalConnectionsAllowed; - protected boolean shouldThrottleNumConnections; + protected String profileName; + protected boolean enableActivityLogging; + protected int loggingIntervalInMins; + protected int maxConnectionsAllowed; + protected int maxInternalConnectionsAllowed; + protected boolean shouldThrottleNumConnections; - public Builder(boolean shouldThrottleNumConnections) { - this.shouldThrottleNumConnections = shouldThrottleNumConnections; - } + public Builder(boolean shouldThrottleNumConnections) { + this.shouldThrottleNumConnections = shouldThrottleNumConnections; + } - public Builder withConnectionProfile(String profileName) { - this.profileName = profileName; - return this; - } + public Builder withConnectionProfile(String profileName) { + this.profileName = profileName; + return this; + } - public Builder withMaxAllowed(int maxAllowed) { - this.maxConnectionsAllowed = maxAllowed; - return this; - } + public Builder withMaxAllowed(int maxAllowed) { + this.maxConnectionsAllowed = maxAllowed; + return this; + } - public Builder withMaxInternalAllowed(int maxInternalAllowed) { - this.maxInternalConnectionsAllowed = maxInternalAllowed; - return this; - } + public Builder withMaxInternalAllowed(int maxInternalAllowed) { + this.maxInternalConnectionsAllowed = maxInternalAllowed; + return this; + } - public Builder withLogging(boolean enabled) { - this.enableActivityLogging = enabled; - return this; - } - public Builder withLoggingIntervalInMins(int loggingIntervalInMins) { - this.loggingIntervalInMins = loggingIntervalInMins; - return this; - } + public Builder withLogging(boolean enabled) { + this.enableActivityLogging = enabled; + return this; + } - public ConnectionLimiter build() { - return new LoggingConnectionLimiter(this); - } + public Builder withLoggingIntervalInMins(int loggingIntervalInMins) { + this.loggingIntervalInMins = loggingIntervalInMins; + return this; + } + public ConnectionLimiter build() { + return new LoggingConnectionLimiter(this); } + + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogDetailsWorkHandler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogDetailsWorkHandler.java index 82d30a25e9b..77c58bc939a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogDetailsWorkHandler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogDetailsWorkHandler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,39 +17,38 @@ */ package org.apache.phoenix.log; -import com.lmax.disruptor.LifecycleAware; -import com.lmax.disruptor.WorkHandler; import org.apache.hadoop.conf.Configuration; +import com.lmax.disruptor.LifecycleAware; +import com.lmax.disruptor.WorkHandler; public class QueryLogDetailsWorkHandler implements WorkHandler, LifecycleAware { + private LogWriter logWriter; - private LogWriter logWriter; + public QueryLogDetailsWorkHandler(Configuration configuration) { + this.logWriter = new TableLogWriter(configuration); + } - public QueryLogDetailsWorkHandler(Configuration configuration) { - this.logWriter = new TableLogWriter(configuration); - } - - @Override - public void onEvent(RingBufferEvent ringBufferEvent) throws Exception { - logWriter.write(ringBufferEvent); - ringBufferEvent.clear(); - } + @Override + public void onEvent(RingBufferEvent ringBufferEvent) throws Exception { + logWriter.write(ringBufferEvent); + ringBufferEvent.clear(); + } - @Override - public void onStart() { + @Override + public void onStart() { - } + } - @Override - public void onShutdown() { - try { - if (logWriter != null) { - logWriter.close(); - } - } catch (Exception e) { - //Ignore - } + @Override + public void onShutdown() { + try { + if (logWriter != null) { + logWriter.close(); + } + } catch (Exception e) { + // Ignore } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogInfo.java index fb38ba25e5e..2cf27395c7c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,45 +37,43 @@ import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PVarchar; - public enum QueryLogInfo { - - CLIENT_IP_I(CLIENT_IP, LogLevel.INFO, PVarchar.INSTANCE), - QUERY_I(QUERY, LogLevel.INFO,PVarchar.INSTANCE), - BIND_PARAMETERS_I(BIND_PARAMETERS, LogLevel.TRACE,PVarchar.INSTANCE), - QUERY_ID_I(QUERY_ID, LogLevel.INFO,PVarchar.INSTANCE), - TENANT_ID_I(TENANT_ID, LogLevel.INFO,PVarchar.INSTANCE), - START_TIME_I(START_TIME, LogLevel.INFO,PTimestamp.INSTANCE), - USER_I(USER, LogLevel.INFO,PVarchar.INSTANCE), - EXPLAIN_PLAN_I(EXPLAIN_PLAN,LogLevel.DEBUG,PVarchar.INSTANCE), - GLOBAL_SCAN_DETAILS_I(GLOBAL_SCAN_DETAILS, LogLevel.DEBUG,PVarchar.INSTANCE), - NO_OF_RESULTS_ITERATED_I(NO_OF_RESULTS_ITERATED, LogLevel.INFO,PLong.INSTANCE), - EXCEPTION_TRACE_I(EXCEPTION_TRACE, LogLevel.DEBUG,PVarchar.INSTANCE), - QUERY_STATUS_I(QUERY_STATUS, LogLevel.INFO,PVarchar.INSTANCE), - SCAN_METRICS_JSON_I(SCAN_METRICS_JSON, LogLevel.TRACE,PVarchar.INSTANCE), - TABLE_NAME_I(TABLE_NAME, LogLevel.DEBUG,PVarchar.INSTANCE); - - public final String columnName; - public final LogLevel logLevel; - public final PDataType dataType; - private QueryLogInfo(String columnName, LogLevel logLevel, PDataType dataType) { - this.columnName = columnName; - this.logLevel=logLevel; - this.dataType=dataType; - } + CLIENT_IP_I(CLIENT_IP, LogLevel.INFO, PVarchar.INSTANCE), + QUERY_I(QUERY, LogLevel.INFO, PVarchar.INSTANCE), + BIND_PARAMETERS_I(BIND_PARAMETERS, LogLevel.TRACE, PVarchar.INSTANCE), + QUERY_ID_I(QUERY_ID, LogLevel.INFO, PVarchar.INSTANCE), + TENANT_ID_I(TENANT_ID, LogLevel.INFO, PVarchar.INSTANCE), + START_TIME_I(START_TIME, LogLevel.INFO, PTimestamp.INSTANCE), + USER_I(USER, LogLevel.INFO, PVarchar.INSTANCE), + EXPLAIN_PLAN_I(EXPLAIN_PLAN, LogLevel.DEBUG, PVarchar.INSTANCE), + GLOBAL_SCAN_DETAILS_I(GLOBAL_SCAN_DETAILS, LogLevel.DEBUG, PVarchar.INSTANCE), + NO_OF_RESULTS_ITERATED_I(NO_OF_RESULTS_ITERATED, LogLevel.INFO, PLong.INSTANCE), + EXCEPTION_TRACE_I(EXCEPTION_TRACE, LogLevel.DEBUG, PVarchar.INSTANCE), + QUERY_STATUS_I(QUERY_STATUS, LogLevel.INFO, PVarchar.INSTANCE), + SCAN_METRICS_JSON_I(SCAN_METRICS_JSON, LogLevel.TRACE, PVarchar.INSTANCE), + TABLE_NAME_I(TABLE_NAME, LogLevel.DEBUG, PVarchar.INSTANCE); + + public final String columnName; + public final LogLevel logLevel; + public final PDataType dataType; + + private QueryLogInfo(String columnName, LogLevel logLevel, PDataType dataType) { + this.columnName = columnName; + this.logLevel = logLevel; + this.dataType = dataType; + } + + public String getColumnName() { + return columnName; + } - public String getColumnName() { - return columnName; - } + public LogLevel getLogLevel() { + return logLevel; + } - public LogLevel getLogLevel() { - return logLevel; - } + public PDataType getDataType() { + return dataType; + } - public PDataType getDataType() { - return dataType; - } - - } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogger.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogger.java index b132bbd7e96..a7925aa4713 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogger.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLogger.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,155 +23,156 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.monitoring.MetricType; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap.Builder; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap.Builder; - - /* * Wrapper for query translator */ public class QueryLogger { - private final ThreadLocal threadLocalTranslator = new ThreadLocal<>(); - private QueryLoggerDisruptor queryDisruptor; - private String queryId; - private LogLevel logLevel; - private Builder queryLogBuilder = ImmutableMap.builder(); - private boolean isSynced; - private static final Logger LOGGER = LoggerFactory.getLogger(QueryLogger.class); - - protected QueryLogger(PhoenixConnection connection) { - this.queryId = UUID.randomUUID().toString(); - this.queryDisruptor = connection.getQueryServices().getQueryDisruptor(); - logLevel = connection.getLogLevel(); - log(QueryLogInfo.QUERY_ID_I, queryId); - log(QueryLogInfo.START_TIME_I, EnvironmentEdgeManager.currentTimeMillis()); + private final ThreadLocal threadLocalTranslator = new ThreadLocal<>(); + private QueryLoggerDisruptor queryDisruptor; + private String queryId; + private LogLevel logLevel; + private Builder queryLogBuilder = ImmutableMap.builder(); + private boolean isSynced; + private static final Logger LOGGER = LoggerFactory.getLogger(QueryLogger.class); + + protected QueryLogger(PhoenixConnection connection) { + this.queryId = UUID.randomUUID().toString(); + this.queryDisruptor = connection.getQueryServices().getQueryDisruptor(); + logLevel = connection.getLogLevel(); + log(QueryLogInfo.QUERY_ID_I, queryId); + log(QueryLogInfo.START_TIME_I, EnvironmentEdgeManager.currentTimeMillis()); + } + + protected QueryLogger() { + logLevel = LogLevel.OFF; + } + + private RingBufferEventTranslator getCachedTranslator() { + RingBufferEventTranslator result = threadLocalTranslator.get(); + if (result == null) { + result = new RingBufferEventTranslator(queryId); + threadLocalTranslator.set(result); } + return result; + } + + public static final QueryLogger NO_OP_INSTANCE = new QueryLogger() { + @Override + public void log(QueryLogInfo queryLogInfo, Object info) { - protected QueryLogger() { - logLevel = LogLevel.OFF; - } - - private RingBufferEventTranslator getCachedTranslator() { - RingBufferEventTranslator result = threadLocalTranslator.get(); - if (result == null) { - result = new RingBufferEventTranslator(queryId); - threadLocalTranslator.set(result); - } - return result; - } - - public static final QueryLogger NO_OP_INSTANCE = new QueryLogger() { - @Override - public void log(QueryLogInfo queryLogInfo, Object info) { - - } - - @Override - public boolean isDebugEnabled() { - return false; - } - - @Override - public boolean isInfoEnabled() { - return false; - } - - @Override - public void sync( - Map> readMetrics, Map overAllMetrics) { - - } - - @Override - public boolean isSynced(){ - return true; - } - }; - - public static QueryLogger getInstance(PhoenixConnection connection, boolean isSystemTable) { - if (connection.getLogLevel() == LogLevel.OFF || isSystemTable || ThreadLocalRandom.current() - .nextDouble() > connection.getLogSamplingRate()) { return NO_OP_INSTANCE; } - return new QueryLogger(connection); } - /** - * Add query log in the table, columns will be logged depending upon the connection logLevel - */ - public void log(QueryLogInfo queryLogInfo, Object info) { - try { - queryLogBuilder.put(queryLogInfo, info); - } catch (Exception e) { - LOGGER.warn("Unable to add log info because of " + e.getMessage()); - } + @Override + public boolean isDebugEnabled() { + return false; } - - private boolean publishLogs(RingBufferEventTranslator translator) { - if (queryDisruptor == null) { return false; } - boolean isLogged = queryDisruptor.tryPublish(translator); - if (!isLogged && LOGGER.isDebugEnabled()) { - LOGGER.debug("Unable to write query log in table as ring buffer queue is full!!"); - } - return isLogged; + + @Override + public boolean isInfoEnabled() { + return false; } - /** - * Is debug logging currently enabled? - * Call this method to prevent having to perform expensive operations (for example, String concatenation) when the log level is more than debug. - */ - public boolean isDebugEnabled(){ - return isLevelEnabled(LogLevel.DEBUG); + @Override + public void sync(Map> readMetrics, + Map overAllMetrics) { + } - - private boolean isLevelEnabled(LogLevel logLevel){ - return this.logLevel != null && logLevel != LogLevel.OFF ? logLevel.ordinal() <= this.logLevel.ordinal() - : false; + + @Override + public boolean isSynced() { + return true; } - - /** - * Is Info logging currently enabled? - * Call this method to prevent having to perform expensive operations (for example, String concatenation) when the log level is more than info. - * @return - */ - public boolean isInfoEnabled(){ - return isLevelEnabled(LogLevel.INFO); + }; + + public static QueryLogger getInstance(PhoenixConnection connection, boolean isSystemTable) { + if ( + connection.getLogLevel() == LogLevel.OFF || isSystemTable + || ThreadLocalRandom.current().nextDouble() > connection.getLogSamplingRate() + ) { + return NO_OP_INSTANCE; } - - /** - * Return queryId of the current query logger , needed by the application - * to correlate with the logging table. - * Eg(usage):- - * StatementContext context = ((PhoenixResultSet)rs).getContext(); - * String queryId = context.getQueryLogger().getQueryId(); - * - * @return - */ - public String getQueryId() { - return this.queryId; + return new QueryLogger(connection); + } + + /** + * Add query log in the table, columns will be logged depending upon the connection logLevel + */ + public void log(QueryLogInfo queryLogInfo, Object info) { + try { + queryLogBuilder.put(queryLogInfo, info); + } catch (Exception e) { + LOGGER.warn("Unable to add log info because of " + e.getMessage()); } - + } - public void sync(Map> readMetrics, Map overAllMetrics) { - syncBase(readMetrics, overAllMetrics, logLevel); + private boolean publishLogs(RingBufferEventTranslator translator) { + if (queryDisruptor == null) { + return false; } - - public void syncBase(Map> readMetrics, Map overAllMetrics, LogLevel logLevel) { - if (!isSynced) { - isSynced = true; - final RingBufferEventTranslator translator = getCachedTranslator(); - translator.setQueryInfo(logLevel, queryLogBuilder.build(), readMetrics, overAllMetrics); - publishLogs(translator); - } + boolean isLogged = queryDisruptor.tryPublish(translator); + if (!isLogged && LOGGER.isDebugEnabled()) { + LOGGER.debug("Unable to write query log in table as ring buffer queue is full!!"); } - - /** - * Is Synced already - */ - public boolean isSynced(){ - return this.isSynced; + return isLogged; + } + + /** + * Is debug logging currently enabled? Call this method to prevent having to perform expensive + * operations (for example, String concatenation) when the log level is more than debug. + */ + public boolean isDebugEnabled() { + return isLevelEnabled(LogLevel.DEBUG); + } + + private boolean isLevelEnabled(LogLevel logLevel) { + return this.logLevel != null && logLevel != LogLevel.OFF + ? logLevel.ordinal() <= this.logLevel.ordinal() + : false; + } + + /** + * Is Info logging currently enabled? Call this method to prevent having to perform expensive + * operations (for example, String concatenation) when the log level is more than info. + */ + public boolean isInfoEnabled() { + return isLevelEnabled(LogLevel.INFO); + } + + /** + * Return queryId of the current query logger , needed by the application to correlate with the + * logging table. Eg(usage):- StatementContext context = ((PhoenixResultSet)rs).getContext(); + * String queryId = context.getQueryLogger().getQueryId(); + */ + public String getQueryId() { + return this.queryId; + } + + public void sync(Map> readMetrics, + Map overAllMetrics) { + syncBase(readMetrics, overAllMetrics, logLevel); + } + + public void syncBase(Map> readMetrics, + Map overAllMetrics, LogLevel logLevel) { + if (!isSynced) { + isSynced = true; + final RingBufferEventTranslator translator = getCachedTranslator(); + translator.setQueryInfo(logLevel, queryLogBuilder.build(), readMetrics, overAllMetrics); + publishLogs(translator); } - + } + + /** + * Is Synced already + */ + public boolean isSynced() { + return this.isSynced; + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerDefaultExceptionHandler.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerDefaultExceptionHandler.java index e9ae6bd9016..52d92b93f49 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerDefaultExceptionHandler.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerDefaultExceptionHandler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,31 +21,31 @@ class QueryLoggerDefaultExceptionHandler implements ExceptionHandler { - @Override - public void handleEventException(Throwable ex, long sequence, RingBufferEvent event) { - final StringBuilder sb = new StringBuilder(512); - sb.append("Query Logger error handling event seq=").append(sequence).append(", value='"); - try { - sb.append(event); - } catch (final Exception ignored) { - sb.append("[ERROR calling ").append(event.getClass()).append(".toString(): "); - sb.append(ignored).append("]"); - } - sb.append("':"); - System.err.println(sb); - ex.printStackTrace(); + @Override + public void handleEventException(Throwable ex, long sequence, RingBufferEvent event) { + final StringBuilder sb = new StringBuilder(512); + sb.append("Query Logger error handling event seq=").append(sequence).append(", value='"); + try { + sb.append(event); + } catch (final Exception ignored) { + sb.append("[ERROR calling ").append(event.getClass()).append(".toString(): "); + sb.append(ignored).append("]"); } + sb.append("':"); + System.err.println(sb); + ex.printStackTrace(); + } - @Override - public void handleOnStartException(final Throwable throwable) { - System.err.println("QueryLogger error starting:"); - throwable.printStackTrace(); - } + @Override + public void handleOnStartException(final Throwable throwable) { + System.err.println("QueryLogger error starting:"); + throwable.printStackTrace(); + } - @Override - public void handleOnShutdownException(final Throwable throwable) { - System.err.println("QueryLogger error shutting down:"); - throwable.printStackTrace(); - } + @Override + public void handleOnShutdownException(final Throwable throwable) { + System.err.println("QueryLogger error shutting down:"); + throwable.printStackTrace(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java index 935b0583ccc..0622ace1108 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,12 +23,13 @@ import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import com.lmax.disruptor.BlockingWaitStrategy; import com.lmax.disruptor.EventTranslator; import com.lmax.disruptor.ExceptionHandler; @@ -37,99 +38,95 @@ import com.lmax.disruptor.dsl.Disruptor; import com.lmax.disruptor.dsl.ProducerType; -public class QueryLoggerDisruptor implements Closeable{ - - private volatile Disruptor disruptor; - private boolean isClosed = false; - //number of elements to create within the ring buffer. - private static final int RING_BUFFER_SIZE = 8 * 1024; - private static final Logger LOGGER = LoggerFactory.getLogger(QueryLoggerDisruptor.class); - private static final String DEFAULT_WAIT_STRATEGY = BlockingWaitStrategy.class.getName(); - private static final int DEFAULT_AUDIT_LOGGER_PROCESS_COUNT = 1; - - public QueryLoggerDisruptor(Configuration configuration) throws SQLException{ - WaitStrategy waitStrategy; - try { - waitStrategy = (WaitStrategy)Class - .forName(configuration.get(QueryServices.LOG_BUFFER_WAIT_STRATEGY, DEFAULT_WAIT_STRATEGY)).newInstance(); - } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) { - throw new SQLException(e); - } - - ThreadFactory threadFactory = new ThreadFactoryBuilder() - .setNameFormat("QueryLogger" + "-thread-%s") - .setDaemon(true) - .setThreadFactory(new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - final Thread result = Executors.defaultThreadFactory().newThread(r); - result.setContextClassLoader(QueryLoggerDisruptor.class.getClass().getClassLoader()); - return result; - } - }) - .build(); - disruptor = new Disruptor(RingBufferEvent.FACTORY, - configuration.getInt(QueryServices.LOG_BUFFER_SIZE, RING_BUFFER_SIZE), threadFactory, ProducerType.MULTI, - waitStrategy); - final ExceptionHandler errorHandler = new QueryLoggerDefaultExceptionHandler(); - disruptor.setDefaultExceptionHandler(errorHandler); - - /** - * if LOG_HANDLER_COUNT is 1 it will work as the previous implementation - * if LOG_HANDLER_COUNT is 2 or more then Multi Thread - */ - int handlerCount = configuration.getInt( - QueryServices.LOG_HANDLER_COUNT, DEFAULT_AUDIT_LOGGER_PROCESS_COUNT); - - if (handlerCount <= 0){ - LOGGER.error("Audit Log Handler Count must be greater than 0." + - "change to default value, input : " + handlerCount); - handlerCount = DEFAULT_AUDIT_LOGGER_PROCESS_COUNT; - } - - QueryLogDetailsWorkHandler[] workHandlers = new QueryLogDetailsWorkHandler[handlerCount]; - for (int i = 0; i < handlerCount; i++){ - workHandlers[i] = new QueryLogDetailsWorkHandler(configuration); - } - disruptor.handleEventsWithWorkerPool(workHandlers); - - LOGGER.info("Starting QueryLoggerDisruptor for with ringbufferSize=" + - disruptor.getRingBuffer().getBufferSize() + ", waitStrategy=" + - waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler=" - + errorHandler + ", handlerCount=" + handlerCount); - disruptor.start(); - +public class QueryLoggerDisruptor implements Closeable { + + private volatile Disruptor disruptor; + private boolean isClosed = false; + // number of elements to create within the ring buffer. + private static final int RING_BUFFER_SIZE = 8 * 1024; + private static final Logger LOGGER = LoggerFactory.getLogger(QueryLoggerDisruptor.class); + private static final String DEFAULT_WAIT_STRATEGY = BlockingWaitStrategy.class.getName(); + private static final int DEFAULT_AUDIT_LOGGER_PROCESS_COUNT = 1; + + public QueryLoggerDisruptor(Configuration configuration) throws SQLException { + WaitStrategy waitStrategy; + try { + waitStrategy = (WaitStrategy) Class + .forName(configuration.get(QueryServices.LOG_BUFFER_WAIT_STRATEGY, DEFAULT_WAIT_STRATEGY)) + .newInstance(); + } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) { + throw new SQLException(e); } - + + ThreadFactory threadFactory = + new ThreadFactoryBuilder().setNameFormat("QueryLogger" + "-thread-%s").setDaemon(true) + .setThreadFactory(new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + final Thread result = Executors.defaultThreadFactory().newThread(r); + result.setContextClassLoader(QueryLoggerDisruptor.class.getClass().getClassLoader()); + return result; + } + }).build(); + disruptor = new Disruptor(RingBufferEvent.FACTORY, + configuration.getInt(QueryServices.LOG_BUFFER_SIZE, RING_BUFFER_SIZE), threadFactory, + ProducerType.MULTI, waitStrategy); + final ExceptionHandler errorHandler = new QueryLoggerDefaultExceptionHandler(); + disruptor.setDefaultExceptionHandler(errorHandler); + /** - * Attempts to publish an event by translating (write) data representations into events claimed from the RingBuffer. - * @param translator - * @return + * if LOG_HANDLER_COUNT is 1 it will work as the previous implementation if LOG_HANDLER_COUNT is + * 2 or more then Multi Thread */ - public boolean tryPublish(final EventTranslator translator) { - if(isClosed()){ - return false; - } - return disruptor.getRingBuffer().tryPublishEvent(translator); + int handlerCount = + configuration.getInt(QueryServices.LOG_HANDLER_COUNT, DEFAULT_AUDIT_LOGGER_PROCESS_COUNT); + + if (handlerCount <= 0) { + LOGGER.error("Audit Log Handler Count must be greater than 0." + + "change to default value, input : " + handlerCount); + handlerCount = DEFAULT_AUDIT_LOGGER_PROCESS_COUNT; } - - public boolean isClosed() { - return isClosed ; + QueryLogDetailsWorkHandler[] workHandlers = new QueryLogDetailsWorkHandler[handlerCount]; + for (int i = 0; i < handlerCount; i++) { + workHandlers[i] = new QueryLogDetailsWorkHandler(configuration); } + disruptor.handleEventsWithWorkerPool(workHandlers); + + LOGGER.info("Starting QueryLoggerDisruptor for with ringbufferSize=" + + disruptor.getRingBuffer().getBufferSize() + ", waitStrategy=" + + waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler=" + errorHandler + + ", handlerCount=" + handlerCount); + disruptor.start(); - @Override - public void close() throws IOException { - isClosed = true; - LOGGER.info("Shutting down QueryLoggerDisruptor.."); - try { - //we can wait for 2 seconds, so that backlog can be committed - disruptor.shutdown(2, TimeUnit.SECONDS); - } catch (TimeoutException e) { - throw new IOException(e); - } + } + /** + * Attempts to publish an event by translating (write) data representations into events claimed + * from the RingBuffer. + */ + public boolean tryPublish(final EventTranslator translator) { + if (isClosed()) { + return false; } - - + return disruptor.getRingBuffer().tryPublishEvent(translator); + } + + public boolean isClosed() { + return isClosed; + } + + @Override + public void close() throws IOException { + isClosed = true; + LOGGER.info("Shutting down QueryLoggerDisruptor.."); + try { + // we can wait for 2 seconds, so that backlog can be committed + disruptor.shutdown(2, TimeUnit.SECONDS); + } catch (TimeoutException e) { + throw new IOException(e); + } + + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerUtil.java index 2645e5c433b..00799cbc7dd 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryLoggerUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,34 +27,35 @@ public class QueryLoggerUtil { + public static void logInitialDetails(QueryLogger queryLogger, PName tenantId, + ConnectionQueryServices queryServices, String query, List bindParameters) { + try { + String clientIP; + try { + clientIP = InetAddress.getLocalHost().getHostAddress(); + } catch (UnknownHostException e) { + clientIP = "UnknownHost"; + } - public static void logInitialDetails(QueryLogger queryLogger, PName tenantId, ConnectionQueryServices queryServices, - String query, List bindParameters) { - try { - String clientIP; - try { - clientIP = InetAddress.getLocalHost().getHostAddress(); - } catch (UnknownHostException e) { - clientIP = "UnknownHost"; - } + if (clientIP != null) { + queryLogger.log(QueryLogInfo.CLIENT_IP_I, clientIP); + } + if (query != null) { + queryLogger.log(QueryLogInfo.QUERY_I, query); + } + if (bindParameters != null) { + queryLogger.log(QueryLogInfo.BIND_PARAMETERS_I, StringUtils.join(bindParameters, ",")); + } + if (tenantId != null) { + queryLogger.log(QueryLogInfo.TENANT_ID_I, tenantId.getString()); + } - if (clientIP != null) { - queryLogger.log(QueryLogInfo.CLIENT_IP_I, clientIP); - } - if (query != null) { - queryLogger.log(QueryLogInfo.QUERY_I, query); - } - if (bindParameters != null) { - queryLogger.log(QueryLogInfo.BIND_PARAMETERS_I, StringUtils.join(bindParameters, ",")); - } - if (tenantId != null) { - queryLogger.log(QueryLogInfo.TENANT_ID_I, tenantId.getString()); - } - - queryLogger.log(QueryLogInfo.USER_I, queryServices.getUserName() != null ? queryServices.getUserName() - : queryServices.getUser().getShortName()); - } catch (Exception e) { - // Ignore - } + queryLogger.log(QueryLogInfo.USER_I, + queryServices.getUserName() != null + ? queryServices.getUserName() + : queryServices.getUser().getShortName()); + } catch (Exception e) { + // Ignore } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryStatus.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryStatus.java index 0e634c1fb61..d2dc68abbcf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryStatus.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/QueryStatus.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,5 +18,7 @@ package org.apache.phoenix.log; public enum QueryStatus { - COMPILED, COMPLETED,FAILED + COMPILED, + COMPLETED, + FAILED } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/RingBufferEvent.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/RingBufferEvent.java index b5a4d4c9e7c..cf85a5ed052 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/RingBufferEvent.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/RingBufferEvent.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,88 +20,80 @@ import java.util.Map; import org.apache.phoenix.monitoring.MetricType; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; + import com.lmax.disruptor.EventFactory; - class RingBufferEvent { - private String queryId; - private LogLevel connectionLogLevel; - private ImmutableMap queryInfo; - private Map> readMetrics; - private Map overAllMetrics; - - public static final Factory FACTORY = new Factory(); - - /** - * Creates the events that will be put in the RingBuffer. - */ - private static class Factory implements EventFactory { - @Override - public RingBufferEvent newInstance() { - final RingBufferEvent result = new RingBufferEvent(); - return result; - } +class RingBufferEvent { + private String queryId; + private LogLevel connectionLogLevel; + private ImmutableMap queryInfo; + private Map> readMetrics; + private Map overAllMetrics; + + public static final Factory FACTORY = new Factory(); + + /** + * Creates the events that will be put in the RingBuffer. + */ + private static class Factory implements EventFactory { + @Override + public RingBufferEvent newInstance() { + final RingBufferEvent result = new RingBufferEvent(); + return result; } + } - public void clear() { - this.queryInfo=null; - this.queryId=null; - } + public void clear() { + this.queryInfo = null; + this.queryId = null; + } - - public String getQueryId() { - return queryId; - } + public String getQueryId() { + return queryId; + } - public static Factory getFactory() { - return FACTORY; - } + public static Factory getFactory() { + return FACTORY; + } - public void setQueryInfo(ImmutableMap queryInfo) { - this.queryInfo=queryInfo; - - } + public void setQueryInfo(ImmutableMap queryInfo) { + this.queryInfo = queryInfo; - public void setQueryId(String queryId) { - this.queryId=queryId; - - } + } - public ImmutableMap getQueryInfo() { - return queryInfo; - - } + public void setQueryId(String queryId) { + this.queryId = queryId; - public LogLevel getConnectionLogLevel() { - return connectionLogLevel; - } + } + public ImmutableMap getQueryInfo() { + return queryInfo; - public void setConnectionLogLevel(LogLevel connectionLogLevel) { - this.connectionLogLevel = connectionLogLevel; - } - - - public Map> getReadMetrics() { - return readMetrics; - } + } + public LogLevel getConnectionLogLevel() { + return connectionLogLevel; + } - public void setReadMetrics(Map> readMetrics) { - this.readMetrics = readMetrics; - } - + public void setConnectionLogLevel(LogLevel connectionLogLevel) { + this.connectionLogLevel = connectionLogLevel; + } - public Map getOverAllMetrics() { - return overAllMetrics; - } + public Map> getReadMetrics() { + return readMetrics; + } + public void setReadMetrics(Map> readMetrics) { + this.readMetrics = readMetrics; + } - public void setOverAllMetrics(Map overAllMetrics) { - this.overAllMetrics = overAllMetrics; - } + public Map getOverAllMetrics() { + return overAllMetrics; + } - + public void setOverAllMetrics(Map overAllMetrics) { + this.overAllMetrics = overAllMetrics; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/RingBufferEventTranslator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/RingBufferEventTranslator.java index 57e9d2b7212..f6fa0340589 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/RingBufferEventTranslator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/RingBufferEventTranslator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,41 +20,41 @@ import java.util.Map; import org.apache.phoenix.monitoring.MetricType; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; + import com.lmax.disruptor.EventTranslator; class RingBufferEventTranslator implements EventTranslator { - private String queryId; - private ImmutableMap queryInfo; - private LogLevel connectionLogLevel; - private Map> readMetrics; - private Map overAllMetrics; - - public RingBufferEventTranslator(String queryId) { - this.queryId=queryId; - } - - @Override - public void translateTo(RingBufferEvent event, long sequence) { - event.setQueryId(queryId); - event.setQueryInfo(queryInfo); - event.setReadMetrics(readMetrics); - event.setOverAllMetrics(overAllMetrics); - event.setConnectionLogLevel(connectionLogLevel); - clear(); - } - - private void clear() { - setQueryInfo(null,null,null,null); - } - - public void setQueryInfo(LogLevel logLevel, ImmutableMap queryInfo, Map> readMetrics, - Map overAllMetrics) { - this.queryInfo = queryInfo; - this.connectionLogLevel = logLevel; - this.readMetrics = readMetrics; - this.overAllMetrics=overAllMetrics; - } + private String queryId; + private ImmutableMap queryInfo; + private LogLevel connectionLogLevel; + private Map> readMetrics; + private Map overAllMetrics; + + public RingBufferEventTranslator(String queryId) { + this.queryId = queryId; + } + + @Override + public void translateTo(RingBufferEvent event, long sequence) { + event.setQueryId(queryId); + event.setQueryInfo(queryInfo); + event.setReadMetrics(readMetrics); + event.setOverAllMetrics(overAllMetrics); + event.setConnectionLogLevel(connectionLogLevel); + clear(); + } + + private void clear() { + setQueryInfo(null, null, null, null); + } + + public void setQueryInfo(LogLevel logLevel, ImmutableMap queryInfo, + Map> readMetrics, Map overAllMetrics) { + this.queryInfo = queryInfo; + this.connectionLogLevel = logLevel; + this.readMetrics = readMetrics; + this.overAllMetrics = overAllMetrics; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/log/TableLogWriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/log/TableLogWriter.java index dc610b556bc..cd44c4843c1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/log/TableLogWriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/log/TableLogWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,131 +29,137 @@ import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.monitoring.MetricType; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.phoenix.util.QueryUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; - /** - * Writes RingBuffer log event into table - * + * Writes RingBuffer log event into table */ public class TableLogWriter implements LogWriter { - private static final Logger LOGGER = LoggerFactory.getLogger(LogWriter.class); - private volatile Connection connection; - private boolean isClosed; - private PreparedStatement upsertStatement; - private Configuration config; - private Map metricOrdinals=new HashMap(); + private static final Logger LOGGER = LoggerFactory.getLogger(LogWriter.class); + private volatile Connection connection; + private boolean isClosed; + private PreparedStatement upsertStatement; + private Configuration config; + private Map metricOrdinals = new HashMap(); - public TableLogWriter(Configuration configuration) { - this.config=configuration; + public TableLogWriter(Configuration configuration) { + this.config = configuration; + } + + private PreparedStatement buildUpsertStatement(Connection conn) throws SQLException { + StringBuilder buf = + new StringBuilder("UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\"("); + int queryLogEntries = 0; + for (QueryLogInfo info : QueryLogInfo.values()) { + buf.append(info.columnName); + buf.append(','); + queryLogEntries++; } - - private PreparedStatement buildUpsertStatement(Connection conn) throws SQLException { - StringBuilder buf = new StringBuilder("UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\"("); - int queryLogEntries=0; - for (QueryLogInfo info : QueryLogInfo.values()) { - buf.append(info.columnName); - buf.append(','); - queryLogEntries++; - } - for (MetricType metric : MetricType.values()) { - if (metric.logLevel() != LogLevel.OFF) { - metricOrdinals.put(metric, ++queryLogEntries); - buf.append(metric.columnName()); - buf.append(','); - } - } - buf.setLength(buf.length()-1); - buf.append(") VALUES ("); - for (int i = 0; i < QueryLogInfo.values().length; i++) { - buf.append("?,"); - } - for (MetricType metric : MetricType.values()) { - if (metric.logLevel() != LogLevel.OFF) { - buf.append("?,"); - } - } - buf.setLength(buf.length()-1); - buf.append(")"); - return conn.prepareStatement(buf.toString()); + for (MetricType metric : MetricType.values()) { + if (metric.logLevel() != LogLevel.OFF) { + metricOrdinals.put(metric, ++queryLogEntries); + buf.append(metric.columnName()); + buf.append(','); + } + } + buf.setLength(buf.length() - 1); + buf.append(") VALUES ("); + for (int i = 0; i < QueryLogInfo.values().length; i++) { + buf.append("?,"); } + for (MetricType metric : MetricType.values()) { + if (metric.logLevel() != LogLevel.OFF) { + buf.append("?,"); + } + } + buf.setLength(buf.length() - 1); + buf.append(")"); + return conn.prepareStatement(buf.toString()); + } - @Override - public void write(RingBufferEvent event) throws SQLException, IOException { - if (isClosed()) { - LOGGER.warn("Unable to commit query log as Log committer is already closed"); - return; - } + @Override + public void write(RingBufferEvent event) throws SQLException, IOException { + if (isClosed()) { + LOGGER.warn("Unable to commit query log as Log committer is already closed"); + return; + } + if (connection == null) { + synchronized (this) { if (connection == null) { - synchronized (this) { - if (connection == null) { - connection = QueryUtil.getConnectionForQueryLog(this.config); - this.upsertStatement = buildUpsertStatement(connection); - } - } - } - - if (connection.isReadOnly()) { - return; + connection = QueryUtil.getConnectionForQueryLog(this.config); + this.upsertStatement = buildUpsertStatement(connection); } + } + } - ImmutableMap queryInfoMap = event.getQueryInfo(); - for (QueryLogInfo info : QueryLogInfo.values()) { - if (queryInfoMap.containsKey(info) && info.logLevel.ordinal() <= event.getConnectionLogLevel().ordinal()) { - upsertStatement.setObject(info.ordinal() + 1, queryInfoMap.get(info)); - } else { - upsertStatement.setObject(info.ordinal() + 1, null); - } - } - Map overAllMetrics = event.getOverAllMetrics(); - Map> readMetrics = event.getReadMetrics(); + if (connection.isReadOnly()) { + return; + } - for (MetricType metric : MetricType.values()) { - if (overAllMetrics != null && overAllMetrics.containsKey(metric) - && metric.isLoggingEnabled(event.getConnectionLogLevel())) { - upsertStatement.setObject(metricOrdinals.get(metric), overAllMetrics.get(metric)); - } else { - if (metric.logLevel() != LogLevel.OFF) { - upsertStatement.setObject(metricOrdinals.get(metric), null); - } - } - } + ImmutableMap queryInfoMap = event.getQueryInfo(); + for (QueryLogInfo info : QueryLogInfo.values()) { + if ( + queryInfoMap.containsKey(info) + && info.logLevel.ordinal() <= event.getConnectionLogLevel().ordinal() + ) { + upsertStatement.setObject(info.ordinal() + 1, queryInfoMap.get(info)); + } else { + upsertStatement.setObject(info.ordinal() + 1, null); + } + } + Map overAllMetrics = event.getOverAllMetrics(); + Map> readMetrics = event.getReadMetrics(); - if (readMetrics != null && !readMetrics.isEmpty()) { - for (Map.Entry> entry : readMetrics.entrySet()) { - upsertStatement.setObject(QueryLogInfo.TABLE_NAME_I.ordinal() + 1, entry.getKey()); - for (MetricType metric : entry.getValue().keySet()) { - if (metric.isLoggingEnabled(event.getConnectionLogLevel())) { - upsertStatement.setObject(metricOrdinals.get(metric), entry.getValue().get(metric)); - } - } - upsertStatement.executeUpdate(); - } - } else { - upsertStatement.executeUpdate(); + for (MetricType metric : MetricType.values()) { + if ( + overAllMetrics != null && overAllMetrics.containsKey(metric) + && metric.isLoggingEnabled(event.getConnectionLogLevel()) + ) { + upsertStatement.setObject(metricOrdinals.get(metric), overAllMetrics.get(metric)); + } else { + if (metric.logLevel() != LogLevel.OFF) { + upsertStatement.setObject(metricOrdinals.get(metric), null); } - connection.commit(); + } } - - @Override - public void close() throws IOException { - if (isClosed()) { return; } - isClosed = true; - try { - if (connection != null) { - // It should internally close all the statements - connection.close(); - } - } catch (SQLException e) { - // TODO Ignore? + + if (readMetrics != null && !readMetrics.isEmpty()) { + for (Map.Entry> entry : readMetrics.entrySet()) { + upsertStatement.setObject(QueryLogInfo.TABLE_NAME_I.ordinal() + 1, entry.getKey()); + for (MetricType metric : entry.getValue().keySet()) { + if (metric.isLoggingEnabled(event.getConnectionLogLevel())) { + upsertStatement.setObject(metricOrdinals.get(metric), entry.getValue().get(metric)); + } } + upsertStatement.executeUpdate(); + } + } else { + upsertStatement.executeUpdate(); } + connection.commit(); + } - public boolean isClosed(){ - return isClosed; + @Override + public void close() throws IOException { + if (isClosed()) { + return; } + isClosed = true; + try { + if (connection != null) { + // It should internally close all the statements + connection.close(); + } + } catch (SQLException e) { + // TODO Ignore? + } + } + + public boolean isClosed() { + return isClosed; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java index 3df02e48dc6..966680d9155 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/mapreduce/util/ConnectionUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,65 +32,63 @@ */ public class ConnectionUtil { - /** - * Retrieve the configured input Connection. - * @param conf configuration containing connection information - * @return the configured input connection - */ - public static Connection getInputConnection(final Configuration conf) throws SQLException { - Preconditions.checkNotNull(conf); - return getInputConnection(conf, new Properties()); - } + /** + * Retrieve the configured input Connection. + * @param conf configuration containing connection information + * @return the configured input connection + */ + public static Connection getInputConnection(final Configuration conf) throws SQLException { + Preconditions.checkNotNull(conf); + return getInputConnection(conf, new Properties()); + } - /** - * Retrieve the configured input Connection. - * @param conf configuration containing connection information - * @param props custom connection properties - * @return the configured input connection - */ - public static Connection getInputConnection(final Configuration conf, final Properties props) - throws SQLException { - String inputQuorum = PhoenixConfigurationUtilHelper.getInputCluster(conf); - if (inputQuorum != null) { - // This will not override the quorum set with setInputClusterUrl - Properties copyProps = PropertiesUtil.deepCopy(props); - copyProps.setProperty(HConstants.CLIENT_ZOOKEEPER_QUORUM, inputQuorum); - return DriverManager.getConnection( - PhoenixConfigurationUtilHelper.getInputClusterUrl(conf), - PropertiesUtil.combineProperties(copyProps, conf)); - } - return DriverManager.getConnection(PhoenixConfigurationUtilHelper.getInputClusterUrl(conf), - PropertiesUtil.combineProperties(props, conf)); + /** + * Retrieve the configured input Connection. + * @param conf configuration containing connection information + * @param props custom connection properties + * @return the configured input connection + */ + public static Connection getInputConnection(final Configuration conf, final Properties props) + throws SQLException { + String inputQuorum = PhoenixConfigurationUtilHelper.getInputCluster(conf); + if (inputQuorum != null) { + // This will not override the quorum set with setInputClusterUrl + Properties copyProps = PropertiesUtil.deepCopy(props); + copyProps.setProperty(HConstants.CLIENT_ZOOKEEPER_QUORUM, inputQuorum); + return DriverManager.getConnection(PhoenixConfigurationUtilHelper.getInputClusterUrl(conf), + PropertiesUtil.combineProperties(copyProps, conf)); } + return DriverManager.getConnection(PhoenixConfigurationUtilHelper.getInputClusterUrl(conf), + PropertiesUtil.combineProperties(props, conf)); + } - /** - * Create the configured output Connection. - * @param conf configuration containing the connection information - * @return the configured output connection - */ - public static Connection getOutputConnection(final Configuration conf) throws SQLException { - return getOutputConnection(conf, new Properties()); - } + /** + * Create the configured output Connection. + * @param conf configuration containing the connection information + * @return the configured output connection + */ + public static Connection getOutputConnection(final Configuration conf) throws SQLException { + return getOutputConnection(conf, new Properties()); + } - /** - * Create the configured output Connection. - * @param conf configuration containing the connection information - * @param props custom connection properties - * @return the configured output connection - */ - public static Connection getOutputConnection(final Configuration conf, Properties props) - throws SQLException { - Preconditions.checkNotNull(conf); - String outputQuorum = PhoenixConfigurationUtilHelper.getOutputCluster(conf); - if (outputQuorum != null) { - // This will not override the quorum set with setInputClusterUrl - Properties copyProps = PropertiesUtil.deepCopy(props); - copyProps.setProperty(HConstants.CLIENT_ZOOKEEPER_QUORUM, outputQuorum); - return DriverManager.getConnection( - PhoenixConfigurationUtilHelper.getInputClusterUrl(conf), - PropertiesUtil.combineProperties(copyProps, conf)); - } - return DriverManager.getConnection(PhoenixConfigurationUtilHelper.getOutputClusterUrl(conf), - PropertiesUtil.combineProperties(props, conf)); + /** + * Create the configured output Connection. + * @param conf configuration containing the connection information + * @param props custom connection properties + * @return the configured output connection + */ + public static Connection getOutputConnection(final Configuration conf, Properties props) + throws SQLException { + Preconditions.checkNotNull(conf); + String outputQuorum = PhoenixConfigurationUtilHelper.getOutputCluster(conf); + if (outputQuorum != null) { + // This will not override the quorum set with setInputClusterUrl + Properties copyProps = PropertiesUtil.deepCopy(props); + copyProps.setProperty(HConstants.CLIENT_ZOOKEEPER_QUORUM, outputQuorum); + return DriverManager.getConnection(PhoenixConfigurationUtilHelper.getInputClusterUrl(conf), + PropertiesUtil.combineProperties(copyProps, conf)); } + return DriverManager.getConnection(PhoenixConfigurationUtilHelper.getOutputClusterUrl(conf), + PropertiesUtil.combineProperties(props, conf)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilHelper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilHelper.java index 1fbf3f444f1..f1c3acb216d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilHelper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilHelper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.mapreduce.util; import org.apache.hadoop.conf.Configuration; @@ -24,173 +23,159 @@ import org.apache.phoenix.util.PhoenixRuntime; public final class PhoenixConfigurationUtilHelper { - // This relies on Hadoop Configuration to handle warning about deprecated configs and - // to set the correct non-deprecated configs when an old one shows up. - static { - Configuration.addDeprecation("phoneix.mapreduce.output.cluster.quorum", PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_QUORUM); - } + // This relies on Hadoop Configuration to handle warning about deprecated configs and + // to set the correct non-deprecated configs when an old one shows up. + static { + Configuration.addDeprecation("phoneix.mapreduce.output.cluster.quorum", + PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_QUORUM); + } - @Deprecated - public static final String MAPREDUCE_INPUT_CLUSTER_QUORUM = "phoenix.mapreduce.input.cluster.quorum"; - @Deprecated - public static final String MAPREDUCE_OUTPUT_CLUSTER_QUORUM = "phoenix.mapreduce.output.cluster.quorum"; - public static final String MAPREDUCE_INPUT_CLUSTER_URL = "phoenix.mapreduce.input.cluster.url"; - public static final String MAPREDUCE_OUTPUT_CLUSTER_URL = "phoenix.mapreduce.output.cluster.url"; - public static final String TRANSFORM_MONITOR_ENABLED = "phoenix.transform.monitor.enabled"; - public static final boolean DEFAULT_TRANSFORM_MONITOR_ENABLED = true; - /** - * Get the value of the name property as a set of comma-delimited - * long values. - * If no such property exists, null is returned. - * Hadoop Configuration object has support for getting ints delimited by comma - * but doesn't support for long. - * @param name property name - * @return property value interpreted as an array of comma-delimited - * long values - */ - public static long[] getLongs(Configuration conf, String name) { - String[] strings = conf.getTrimmedStrings(name); - // Configuration#getTrimmedStrings will never return null. - // If key is not found, it will return empty array. - if (strings.length == 0) { - return null; - } - long[] longs = new long[strings.length]; - for (int i = 0; i < strings.length; i++) { - longs[i] = Long.parseLong(strings[i]); - } - return longs; - } + @Deprecated + public static final String MAPREDUCE_INPUT_CLUSTER_QUORUM = + "phoenix.mapreduce.input.cluster.quorum"; + @Deprecated + public static final String MAPREDUCE_OUTPUT_CLUSTER_QUORUM = + "phoenix.mapreduce.output.cluster.quorum"; + public static final String MAPREDUCE_INPUT_CLUSTER_URL = "phoenix.mapreduce.input.cluster.url"; + public static final String MAPREDUCE_OUTPUT_CLUSTER_URL = "phoenix.mapreduce.output.cluster.url"; + public static final String TRANSFORM_MONITOR_ENABLED = "phoenix.transform.monitor.enabled"; + public static final boolean DEFAULT_TRANSFORM_MONITOR_ENABLED = true; - /** - * Returns the ZooKeeper quorum string for the HBase cluster a Phoenix MapReduce job will read - * from. If MAPREDUCE_OUTPUT_CLUSTER_QUORUM is not set, then it returns the value of - * HConstants.ZOOKEEPER_QUORUM - * @param configuration - * @return ZooKeeper quorum string - */ - @Deprecated - public static String getInputCluster(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - String quorum = configuration.get(MAPREDUCE_INPUT_CLUSTER_QUORUM); - if (quorum == null) { - quorum = configuration.get(HConstants.CLIENT_ZOOKEEPER_QUORUM); - } - if (quorum == null) { - quorum = configuration.get(HConstants.ZOOKEEPER_QUORUM); - } - return quorum; + /** + * Get the value of the name property as a set of comma-delimited long + * values. If no such property exists, null is returned. Hadoop Configuration object has support + * for getting ints delimited by comma but doesn't support for long. + * @param name property name + * @return property value interpreted as an array of comma-delimited long values + */ + public static long[] getLongs(Configuration conf, String name) { + String[] strings = conf.getTrimmedStrings(name); + // Configuration#getTrimmedStrings will never return null. + // If key is not found, it will return empty array. + if (strings.length == 0) { + return null; } - - /** - * Returns the Phoenix JDBC URL a Phoenix MapReduce job will read - * from. If MAPREDUCE_INPUT_CLUSTER_URL is not set, then it returns the value of - * "jdbc:phoenix" - * @param configuration - * @return URL string - */ - public static String getInputClusterUrl(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - String url = configuration.get(MAPREDUCE_INPUT_CLUSTER_URL); - if (url == null) { - url = PhoenixRuntime.JDBC_PROTOCOL; - } - return url; + long[] longs = new long[strings.length]; + for (int i = 0; i < strings.length; i++) { + longs[i] = Long.parseLong(strings[i]); } + return longs; + } - /** - * Returns the HBase Client Port - * @param configuration - * @return - */ - @Deprecated - public static Integer getClientPort(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - String clientPortString = configuration.get(HConstants.ZOOKEEPER_CLIENT_PORT); - return clientPortString==null ? null : Integer.parseInt(clientPortString); + /** + * Returns the ZooKeeper quorum string for the HBase cluster a Phoenix MapReduce job will read + * from. If MAPREDUCE_OUTPUT_CLUSTER_QUORUM is not set, then it returns the value of + * HConstants.ZOOKEEPER_QUORUM + * @return ZooKeeper quorum string + */ + @Deprecated + public static String getInputCluster(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + String quorum = configuration.get(MAPREDUCE_INPUT_CLUSTER_QUORUM); + if (quorum == null) { + quorum = configuration.get(HConstants.CLIENT_ZOOKEEPER_QUORUM); } - - /** - * Returns the HBase zookeeper znode parent - * @param configuration - * @return - */ - @Deprecated - public static String getZNodeParent(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(HConstants.ZOOKEEPER_ZNODE_PARENT); + if (quorum == null) { + quorum = configuration.get(HConstants.ZOOKEEPER_QUORUM); } + return quorum; + } - /** - * Returns the ZooKeeper quorum string for the HBase cluster a Phoenix MapReduce job will write - * to. If MAPREDUCE_OUTPUT_CLUSTER_QUORUM is not set, then it returns the value of - * HConstants.ZOOKEEPER_QUORUM - * @param configuration - * @return ZooKeeper quorum string - */ - @Deprecated - public static String getOutputCluster(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - String quorum = configuration.get(MAPREDUCE_OUTPUT_CLUSTER_QUORUM); - if (quorum == null) { - quorum = configuration.get(HConstants.CLIENT_ZOOKEEPER_QUORUM); - } - if (quorum == null) { - quorum = configuration.get(HConstants.ZOOKEEPER_QUORUM); - } - return quorum; + /** + * Returns the Phoenix JDBC URL a Phoenix MapReduce job will read from. If + * MAPREDUCE_INPUT_CLUSTER_URL is not set, then it returns the value of "jdbc:phoenix" + * @return URL string + */ + public static String getInputClusterUrl(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + String url = configuration.get(MAPREDUCE_INPUT_CLUSTER_URL); + if (url == null) { + url = PhoenixRuntime.JDBC_PROTOCOL; } + return url; + } - /** - * Returns the ZooKeeper quorum string for the HBase cluster a Phoenix MapReduce job will - * read from - * @param configuration - * @return ZooKeeper quorum string if defined, null otherwise - */ - @Deprecated - public static String getInputClusterZkQuorum(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(MAPREDUCE_INPUT_CLUSTER_QUORUM); - } + /** + * Returns the HBase Client Port + */ + @Deprecated + public static Integer getClientPort(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + String clientPortString = configuration.get(HConstants.ZOOKEEPER_CLIENT_PORT); + return clientPortString == null ? null : Integer.parseInt(clientPortString); + } + /** + * Returns the HBase zookeeper znode parent + */ + @Deprecated + public static String getZNodeParent(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(HConstants.ZOOKEEPER_ZNODE_PARENT); + } - /** - * Returns the Phoenix JDBC URL a Phoenix MapReduce job will write to. - * If MAPREDUCE_OUTPUT_CLUSTER_URL is not set, then it returns the value of - * "jdbc:phoenix" - * @param configuration - * @return URL string - */ - public static String getOutputClusterUrl(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - String quorum = configuration.get(MAPREDUCE_OUTPUT_CLUSTER_URL); - if (quorum == null) { - quorum = PhoenixRuntime.JDBC_PROTOCOL; - } - return quorum; + /** + * Returns the ZooKeeper quorum string for the HBase cluster a Phoenix MapReduce job will write + * to. If MAPREDUCE_OUTPUT_CLUSTER_QUORUM is not set, then it returns the value of + * HConstants.ZOOKEEPER_QUORUM + * @return ZooKeeper quorum string + */ + @Deprecated + public static String getOutputCluster(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + String quorum = configuration.get(MAPREDUCE_OUTPUT_CLUSTER_QUORUM); + if (quorum == null) { + quorum = configuration.get(HConstants.CLIENT_ZOOKEEPER_QUORUM); } - - /** - * Returns the value of HConstants.ZOOKEEPER_QUORUM. - * For tests only - * @param configuration - * @return ZooKeeper quorum string if defined, null otherwise - */ - @Deprecated - public static String getZKQuorum(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(HConstants.CLIENT_ZOOKEEPER_QUORUM, - configuration.get(HConstants.ZOOKEEPER_QUORUM)); + if (quorum == null) { + quorum = configuration.get(HConstants.ZOOKEEPER_QUORUM); } + return quorum; + } + + /** + * Returns the ZooKeeper quorum string for the HBase cluster a Phoenix MapReduce job will read + * from + * @return ZooKeeper quorum string if defined, null otherwise + */ + @Deprecated + public static String getInputClusterZkQuorum(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(MAPREDUCE_INPUT_CLUSTER_QUORUM); + } - /** - * Returns the ZooKeeper quorum override MAPREDUCE_OUTPUT_CLUSTER_QUORUM for mapreduce jobs - * @param configuration - * @return ZooKeeper quorum string if defined, null otherwise - */ - @Deprecated - public static String getOutputClusterZkQuorum(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(MAPREDUCE_OUTPUT_CLUSTER_QUORUM); + /** + * Returns the Phoenix JDBC URL a Phoenix MapReduce job will write to. If + * MAPREDUCE_OUTPUT_CLUSTER_URL is not set, then it returns the value of "jdbc:phoenix" + * @return URL string + */ + public static String getOutputClusterUrl(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + String quorum = configuration.get(MAPREDUCE_OUTPUT_CLUSTER_URL); + if (quorum == null) { + quorum = PhoenixRuntime.JDBC_PROTOCOL; } + return quorum; + } + + /** + * Returns the value of HConstants.ZOOKEEPER_QUORUM. For tests only + * @return ZooKeeper quorum string if defined, null otherwise + */ + @Deprecated + public static String getZKQuorum(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(HConstants.CLIENT_ZOOKEEPER_QUORUM, + configuration.get(HConstants.ZOOKEEPER_QUORUM)); + } + + /** + * Returns the ZooKeeper quorum override MAPREDUCE_OUTPUT_CLUSTER_QUORUM for mapreduce jobs + * @return ZooKeeper quorum string if defined, null otherwise + */ + @Deprecated + public static String getOutputClusterZkQuorum(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(MAPREDUCE_OUTPUT_CLUSTER_QUORUM); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/ChildMemoryManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/ChildMemoryManager.java index f5ad5ddb0a2..c918ecc49f5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/ChildMemoryManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/ChildMemoryManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,106 +17,105 @@ */ package org.apache.phoenix.memory; -import net.jcip.annotations.GuardedBy; -import net.jcip.annotations.ThreadSafe; - import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; +import net.jcip.annotations.GuardedBy; +import net.jcip.annotations.ThreadSafe; + /** - * - * Child memory manager that delegates through to global memory manager, - * but enforces that at most a threshold percentage is used by this - * memory manager. No blocking is done if the threshold is exceeded, - * but the standard blocking will be done by the global memory manager. - * - * + * Child memory manager that delegates through to global memory manager, but enforces that at most a + * threshold percentage is used by this memory manager. No blocking is done if the threshold is + * exceeded, but the standard blocking will be done by the global memory manager. * @since 0.1 */ @ThreadSafe public class ChildMemoryManager extends DelegatingMemoryManager { - private final Object sync = new Object(); - private final int maxPercOfTotal; - @GuardedBy("sync") - private long allocatedBytes; - - public ChildMemoryManager(MemoryManager mm, int maxPercOfTotal) { - super(mm); - if (mm instanceof ChildMemoryManager) { - throw new IllegalStateException("ChildMemoryManager cannot delegate to another ChildMemoryManager"); - } - this.maxPercOfTotal = maxPercOfTotal; - if (maxPercOfTotal <= 0 || maxPercOfTotal > 100) { - throw new IllegalArgumentException("Max percentage of total memory (" + maxPercOfTotal + "%) must be greater than zero and less than or equal to 100"); - } + private final Object sync = new Object(); + private final int maxPercOfTotal; + @GuardedBy("sync") + private long allocatedBytes; + + public ChildMemoryManager(MemoryManager mm, int maxPercOfTotal) { + super(mm); + if (mm instanceof ChildMemoryManager) { + throw new IllegalStateException( + "ChildMemoryManager cannot delegate to another ChildMemoryManager"); + } + this.maxPercOfTotal = maxPercOfTotal; + if (maxPercOfTotal <= 0 || maxPercOfTotal > 100) { + throw new IllegalArgumentException("Max percentage of total memory (" + maxPercOfTotal + + "%) must be greater than zero and less than or equal to 100"); } + } + private long adjustAllocation(long minBytes, long reqBytes) { + assert (reqBytes >= minBytes); + long availBytes = getAvailableMemory(); + // Check if this memory managers percentage of allocated bytes exceeds its allowed maximum + if (minBytes > availBytes) { + throw new InsufficientMemoryException( + new SQLExceptionInfo.Builder(SQLExceptionCode.INSUFFICIENT_MEMORY) + .setMessage( + "Attempt to allocate more memory than the max allowed of " + maxPercOfTotal + "%") + .build().buildException()); + } + // Revise reqBytes down to available memory if necessary + return Math.min(reqBytes, availBytes); + } - private long adjustAllocation(long minBytes, long reqBytes) { - assert(reqBytes >= minBytes); - long availBytes = getAvailableMemory(); - // Check if this memory managers percentage of allocated bytes exceeds its allowed maximum - if (minBytes > availBytes) { - throw new InsufficientMemoryException( - new SQLExceptionInfo.Builder(SQLExceptionCode.INSUFFICIENT_MEMORY) - .setMessage("Attempt to allocate more memory than the max allowed of " + maxPercOfTotal + "%") - .build().buildException()); + @Override + public MemoryChunk allocate(long minBytes, long nBytes) { + synchronized (sync) { + nBytes = adjustAllocation(minBytes, nBytes); + final MemoryChunk chunk = super.allocate(minBytes, nBytes); + allocatedBytes += chunk.getSize(); + // Instantiate delegate chunk to track allocatedBytes correctly + return new MemoryChunk() { + @Override + public void close() { + synchronized (sync) { + allocatedBytes -= chunk.getSize(); + chunk.close(); + } } - // Revise reqBytes down to available memory if necessary - return Math.min(reqBytes,availBytes); - } - - @Override - public MemoryChunk allocate(long minBytes, long nBytes) { - synchronized (sync) { - nBytes = adjustAllocation(minBytes, nBytes); - final MemoryChunk chunk = super.allocate(minBytes, nBytes); - allocatedBytes += chunk.getSize(); - // Instantiate delegate chunk to track allocatedBytes correctly - return new MemoryChunk() { - @Override - public void close() { - synchronized (sync) { - allocatedBytes -= chunk.getSize(); - chunk.close(); - } - } - - @Override - public long getSize() { - return chunk.getSize(); - } - - @Override - public void resize(long nBytes) { - synchronized (sync) { - long size = getSize(); - long deltaBytes = nBytes - size; - if (deltaBytes > 0) { - adjustAllocation(deltaBytes,deltaBytes); // Throw if too much memory - } - chunk.resize(nBytes); - allocatedBytes += deltaBytes; - } - } - }; + + @Override + public long getSize() { + return chunk.getSize(); } - } - @Override - public long getAvailableMemory() { - synchronized (sync) { - long availBytes = getMaxMemory() - allocatedBytes; - // Sanity check (should never happen) - if (availBytes < 0) { - throw new IllegalStateException("Available memory has become negative: " + availBytes + " bytes. Allocated memory: " + allocatedBytes + " bytes."); + @Override + public void resize(long nBytes) { + synchronized (sync) { + long size = getSize(); + long deltaBytes = nBytes - size; + if (deltaBytes > 0) { + adjustAllocation(deltaBytes, deltaBytes); // Throw if too much memory } - return availBytes; + chunk.resize(nBytes); + allocatedBytes += deltaBytes; + } } + }; } - - @Override - public long getMaxMemory() { - return maxPercOfTotal * super.getMaxMemory() / 100; + } + + @Override + public long getAvailableMemory() { + synchronized (sync) { + long availBytes = getMaxMemory() - allocatedBytes; + // Sanity check (should never happen) + if (availBytes < 0) { + throw new IllegalStateException("Available memory has become negative: " + availBytes + + " bytes. Allocated memory: " + allocatedBytes + " bytes."); + } + return availBytes; } + } + + @Override + public long getMaxMemory() { + return maxPercOfTotal * super.getMaxMemory() / 100; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/DelegatingMemoryManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/DelegatingMemoryManager.java index 418f1287006..d0ea9500095 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/DelegatingMemoryManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/DelegatingMemoryManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,42 +18,38 @@ package org.apache.phoenix.memory; /** - * * Memory manager that delegates through to another memory manager. - * - * * @since 0.1 */ public class DelegatingMemoryManager implements MemoryManager { - private final MemoryManager parent; - - public DelegatingMemoryManager(MemoryManager globalMemoryManager){ - this.parent = globalMemoryManager; - } - - @Override - public long getAvailableMemory() { - return parent.getAvailableMemory(); - } - - @Override - public long getMaxMemory() { - return parent.getMaxMemory(); - } - - @Override - public MemoryChunk allocate(long minBytes, long reqBytes) { - return parent.allocate(minBytes, reqBytes); - } - - - @Override - public MemoryChunk allocate(long nBytes) { - return allocate(nBytes, nBytes); - } - - public MemoryManager getParent() { - return parent; - } + private final MemoryManager parent; + + public DelegatingMemoryManager(MemoryManager globalMemoryManager) { + this.parent = globalMemoryManager; + } + + @Override + public long getAvailableMemory() { + return parent.getAvailableMemory(); + } + + @Override + public long getMaxMemory() { + return parent.getMaxMemory(); + } + + @Override + public MemoryChunk allocate(long minBytes, long reqBytes) { + return parent.allocate(minBytes, reqBytes); + } + + @Override + public MemoryChunk allocate(long nBytes) { + return allocate(nBytes, nBytes); + } + + public MemoryManager getParent() { + return parent; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java index 48fb37407a1..411b4237484 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,160 +17,160 @@ */ package org.apache.phoenix.memory; -import net.jcip.annotations.GuardedBy; - import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import net.jcip.annotations.GuardedBy; + /** - * * Global memory manager to track course grained memory usage across all requests. - * - * * @since 0.1 */ public class GlobalMemoryManager implements MemoryManager { - private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMemoryManager.class); - - private final Object sync = new Object(); - private final long maxMemoryBytes; - @GuardedBy("sync") - private volatile long usedMemoryBytes; - public GlobalMemoryManager(long maxBytes) { - if (maxBytes <= 0) { - throw new IllegalStateException( - "Total number of available bytes (" + maxBytes + ") must be greater than zero"); - } - this.maxMemoryBytes = maxBytes; - this.usedMemoryBytes = 0; - } + private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMemoryManager.class); - @Override - public long getAvailableMemory() { - synchronized(sync) { - return maxMemoryBytes - usedMemoryBytes; - } - } + private final Object sync = new Object(); + private final long maxMemoryBytes; + @GuardedBy("sync") + private volatile long usedMemoryBytes; - @Override - public long getMaxMemory() { - return maxMemoryBytes; + public GlobalMemoryManager(long maxBytes) { + if (maxBytes <= 0) { + throw new IllegalStateException( + "Total number of available bytes (" + maxBytes + ") must be greater than zero"); } - - // TODO: Work on fairness: One big memory request can cause all others to fail here. - private long allocateBytes(long minBytes, long reqBytes) { - if (minBytes < 0 || reqBytes < 0) { - throw new IllegalStateException("Minimum requested bytes (" + minBytes - + ") and requested bytes (" + reqBytes + ") must be greater than zero"); - } - if (minBytes > maxMemoryBytes) { - throw new InsufficientMemoryException( - new SQLExceptionInfo.Builder(SQLExceptionCode.INSUFFICIENT_MEMORY) - .setMessage("Requested memory of " + minBytes - + " bytes is larger than global pool of " + maxMemoryBytes + " bytes.") - .build().buildException()); - } - long nBytes; - synchronized(sync) { - if (usedMemoryBytes + minBytes > maxMemoryBytes) { - throw new InsufficientMemoryException( - new SQLExceptionInfo.Builder(SQLExceptionCode.INSUFFICIENT_MEMORY) - .setMessage("Requested memory of " + minBytes - + " bytes could not be allocated. Using memory of " + usedMemoryBytes - + " bytes from global pool of " + maxMemoryBytes) - .build().buildException()); - } - // Allocate at most reqBytes, but at least minBytes - nBytes = Math.min(reqBytes, maxMemoryBytes - usedMemoryBytes); - if (nBytes < minBytes) { - throw new IllegalStateException("Allocated bytes (" + nBytes - + ") should be at least the minimum requested bytes (" + minBytes + ")"); - } - usedMemoryBytes += nBytes; - } - return nBytes; + this.maxMemoryBytes = maxBytes; + this.usedMemoryBytes = 0; + } + + @Override + public long getAvailableMemory() { + synchronized (sync) { + return maxMemoryBytes - usedMemoryBytes; } - - @Override - public MemoryChunk allocate(long minBytes, long reqBytes) { - long nBytes = allocateBytes(minBytes, reqBytes); - return newMemoryChunk(nBytes); + } + + @Override + public long getMaxMemory() { + return maxMemoryBytes; + } + + // TODO: Work on fairness: One big memory request can cause all others to fail here. + private long allocateBytes(long minBytes, long reqBytes) { + if (minBytes < 0 || reqBytes < 0) { + throw new IllegalStateException("Minimum requested bytes (" + minBytes + + ") and requested bytes (" + reqBytes + ") must be greater than zero"); } - - @Override - public MemoryChunk allocate(long nBytes) { - return allocate(nBytes,nBytes); + if (minBytes > maxMemoryBytes) { + throw new InsufficientMemoryException( + new SQLExceptionInfo.Builder(SQLExceptionCode.INSUFFICIENT_MEMORY) + .setMessage("Requested memory of " + minBytes + " bytes is larger than global pool of " + + maxMemoryBytes + " bytes.") + .build().buildException()); } - - private MemoryChunk newMemoryChunk(long sizeBytes) { - return new GlobalMemoryChunk(sizeBytes); + long nBytes; + synchronized (sync) { + if (usedMemoryBytes + minBytes > maxMemoryBytes) { + throw new InsufficientMemoryException( + new SQLExceptionInfo.Builder(SQLExceptionCode.INSUFFICIENT_MEMORY) + .setMessage( + "Requested memory of " + minBytes + " bytes could not be allocated. Using memory of " + + usedMemoryBytes + " bytes from global pool of " + maxMemoryBytes) + .build().buildException()); + } + // Allocate at most reqBytes, but at least minBytes + nBytes = Math.min(reqBytes, maxMemoryBytes - usedMemoryBytes); + if (nBytes < minBytes) { + throw new IllegalStateException("Allocated bytes (" + nBytes + + ") should be at least the minimum requested bytes (" + minBytes + ")"); + } + usedMemoryBytes += nBytes; + } + return nBytes; + } + + @Override + public MemoryChunk allocate(long minBytes, long reqBytes) { + long nBytes = allocateBytes(minBytes, reqBytes); + return newMemoryChunk(nBytes); + } + + @Override + public MemoryChunk allocate(long nBytes) { + return allocate(nBytes, nBytes); + } + + private MemoryChunk newMemoryChunk(long sizeBytes) { + return new GlobalMemoryChunk(sizeBytes); + } + + private class GlobalMemoryChunk implements MemoryChunk { + private volatile long size; + // private volatile String stack; + + private GlobalMemoryChunk(long size) { + if (size < 0) { + throw new IllegalStateException( + "Size of memory chunk must be greater than zero, but instead is " + size); + } + this.size = size; + // Useful for debugging where a piece of memory was allocated + // this.stack = ExceptionUtils.getStackTrace(new Throwable()); } - private class GlobalMemoryChunk implements MemoryChunk { - private volatile long size; - //private volatile String stack; - - private GlobalMemoryChunk(long size) { - if (size < 0) { - throw new IllegalStateException("Size of memory chunk must be greater than zero, but instead is " + size); - } - this.size = size; - // Useful for debugging where a piece of memory was allocated - // this.stack = ExceptionUtils.getStackTrace(new Throwable()); - } + @Override + public long getSize() { + return size; + } - @Override - public long getSize() { - return size; + @Override + public void resize(long nBytes) { + if (nBytes < 0) { + throw new IllegalStateException( + "Number of bytes to resize to must be greater than zero, but instead is " + nBytes); + } + synchronized (sync) { + long nAdditionalBytes = (nBytes - size); + if (nAdditionalBytes < 0) { + usedMemoryBytes += nAdditionalBytes; + size = nBytes; + } else { + allocateBytes(nAdditionalBytes, nAdditionalBytes); + size = nBytes; + // this.stack = ExceptionUtils.getStackTrace(new Throwable()); } + } + } - @Override - public void resize(long nBytes) { - if (nBytes < 0) { - throw new IllegalStateException("Number of bytes to resize to must be greater than zero, but instead is " + nBytes); - } - synchronized(sync) { - long nAdditionalBytes = (nBytes - size); - if (nAdditionalBytes < 0) { - usedMemoryBytes += nAdditionalBytes; - size = nBytes; - } else { - allocateBytes(nAdditionalBytes, nAdditionalBytes); - size = nBytes; - //this.stack = ExceptionUtils.getStackTrace(new Throwable()); - } - } + /** + * Check that MemoryChunk has previously been closed. + */ + @Override + protected void finalize() throws Throwable { + try { + if (size > 0) { + LOGGER.warn("Orphaned chunk of " + size + " bytes found during finalize"); + // logger.warn("Orphaned chunk of " + size + " bytes found during finalize allocated + // here:\n" + stack); } + freeMemory(); + } finally { + super.finalize(); + } + } - /** - * Check that MemoryChunk has previously been closed. - */ - @Override - protected void finalize() throws Throwable { - try { - if (size > 0) { - LOGGER.warn("Orphaned chunk of " + size + " bytes found during finalize"); - //logger.warn("Orphaned chunk of " + size + " bytes found during finalize allocated here:\n" + stack); - } - freeMemory(); - } finally { - super.finalize(); - } - } + private void freeMemory() { + synchronized (sync) { + usedMemoryBytes -= size; + size = 0; + } + } - private void freeMemory() { - synchronized(sync) { - usedMemoryBytes -= size; - size = 0; - } - } - - @Override - public void close() { - freeMemory(); - } + @Override + public void close() { + freeMemory(); } + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/InsufficientMemoryException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/InsufficientMemoryException.java index 0810aa410d9..5db0b942a72 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/InsufficientMemoryException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/InsufficientMemoryException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,28 +18,25 @@ package org.apache.phoenix.memory; /** - * * Exception thrown by MemoryManager when insufficient memory is available - * - * * @since 0.1 */ public class InsufficientMemoryException extends RuntimeException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; - public InsufficientMemoryException() { - } + public InsufficientMemoryException() { + } - public InsufficientMemoryException(String message) { - super(message); - } + public InsufficientMemoryException(String message) { + super(message); + } - public InsufficientMemoryException(Throwable cause) { - super(cause); - } + public InsufficientMemoryException(Throwable cause) { + super(cause); + } - public InsufficientMemoryException(String message, Throwable cause) { - super(message, cause); - } + public InsufficientMemoryException(String message, Throwable cause) { + super(message, cause); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/MemoryManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/MemoryManager.java index 00b10caa721..59180edf5a3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/memory/MemoryManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/memory/MemoryManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,76 +20,64 @@ import java.io.Closeable; /** - * - * Memory manager used to track memory usage. Either throttles - * memory usage by blocking when the max memory is reached or - * allocates up to a maximum without blocking. - * - * + * Memory manager used to track memory usage. Either throttles memory usage by blocking when the max + * memory is reached or allocates up to a maximum without blocking. * @since 0.1 */ public interface MemoryManager { + /** + * Get the total amount of memory (in bytes) that may be allocated. + */ + long getMaxMemory(); + + /** + * Get the amount of available memory (in bytes) not yet allocated. + */ + long getAvailableMemory(); + + /** + * Allocate up to reqBytes of memory, dialing the amount down to minBytes if full amount is not + * available. If minBytes is not available, then this call will block for a configurable amount of + * time and throw if minBytes does not become available. + * @param minBytes minimum number of bytes required + * @param reqBytes requested number of bytes. Must be greater than or equal to minBytes + * @return MemoryChunk that was allocated + * @throws InsufficientMemoryException if unable to allocate minBytes during configured amount of + * time + */ + MemoryChunk allocate(long minBytes, long reqBytes); + + /** + * Equivalent to calling {@link #allocate(long, long)} where minBytes and reqBytes being the same. + */ + MemoryChunk allocate(long nBytes); + + /** + * Chunk of allocated memory. To reclaim the memory, call {@link #close()} + * @since 0.1 + */ + public static interface MemoryChunk extends Closeable { /** - * Get the total amount of memory (in bytes) that may be allocated. - */ - long getMaxMemory(); - - /** - * Get the amount of available memory (in bytes) not yet allocated. - */ - long getAvailableMemory(); - - /** - * Allocate up to reqBytes of memory, dialing the amount down to - * minBytes if full amount is not available. If minBytes is not - * available, then this call will block for a configurable amount - * of time and throw if minBytes does not become available. - * @param minBytes minimum number of bytes required - * @param reqBytes requested number of bytes. Must be greater - * than or equal to minBytes - * @return MemoryChunk that was allocated - * @throws InsufficientMemoryException if unable to allocate minBytes - * during configured amount of time + * Get the size in bytes of the allocated chunk. */ - MemoryChunk allocate(long minBytes, long reqBytes); + long getSize(); /** - * Equivalent to calling {@link #allocate(long, long)} where - * minBytes and reqBytes being the same. + * Free up the memory associated with this chunk */ - MemoryChunk allocate(long nBytes); - + @Override + void close(); + /** - * - * Chunk of allocated memory. To reclaim the memory, call {@link #close()} - * - * - * @since 0.1 + * Resize an already allocated memory chunk up or down to a new amount. If decreasing + * allocation, this call will not block. If increasing allocation, and nBytes is not available, + * then this call will block for a configurable amount of time and throw if nBytes does not + * become available. Most commonly used to adjust the allocation of a memory buffer that was + * originally sized for the worst case scenario. + * @param nBytes new number of bytes required for this chunk + * @throws InsufficientMemoryException if unable to allocate minBytes during configured amount + * of time */ - public static interface MemoryChunk extends Closeable { - /** - * Get the size in bytes of the allocated chunk. - */ - long getSize(); - - /** - * Free up the memory associated with this chunk - */ - @Override - void close(); - - /** - * Resize an already allocated memory chunk up or down to a - * new amount. If decreasing allocation, this call will not block. - * If increasing allocation, and nBytes is not available, then - * this call will block for a configurable amount of time and - * throw if nBytes does not become available. Most commonly - * used to adjust the allocation of a memory buffer that was - * originally sized for the worst case scenario. - * @param nBytes new number of bytes required for this chunk - * @throws InsufficientMemoryException if unable to allocate minBytes - * during configured amount of time - */ - void resize(long nBytes); - } + void resize(long nBytes); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/metrics/MetricInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/metrics/MetricInfo.java index e6ad976dc0f..473e96c75a8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/metrics/MetricInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/metrics/MetricInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,30 +22,30 @@ */ public enum MetricInfo { - TRACE("", "trace_id"), - SPAN("span_id", "span_id"), - PARENT("parent_id", "parent_id"), - START("start_time", "start_time"), - END("end_time", "end_time"), - TAG("phoenix.tag", "t"), - ANNOTATION("phoenix.annotation", "a"), - HOSTNAME("Hostname", "hostname"), - DESCRIPTION("", "description"); + TRACE("", "trace_id"), + SPAN("span_id", "span_id"), + PARENT("parent_id", "parent_id"), + START("start_time", "start_time"), + END("end_time", "end_time"), + TAG("phoenix.tag", "t"), + ANNOTATION("phoenix.annotation", "a"), + HOSTNAME("Hostname", "hostname"), + DESCRIPTION("", "description"); - public final String traceName; - public final String columnName; + public final String traceName; + public final String columnName; - private MetricInfo(String traceName, String columnName) { - this.traceName = traceName; - this.columnName = columnName; - } + private MetricInfo(String traceName, String columnName) { + this.traceName = traceName; + this.columnName = columnName; + } - public static String getColumnName(String traceName) { - for (MetricInfo info : MetricInfo.values()) { - if (info.traceName.equals(traceName)) { - return info.columnName; - } - } - throw new IllegalArgumentException("Unknown tracename: " + traceName); + public static String getColumnName(String traceName) { + for (MetricInfo info : MetricInfo.values()) { + if (info.traceName.equals(traceName)) { + return info.columnName; + } } + throw new IllegalArgumentException("Unknown tracename: " + traceName); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/metrics/Metrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/metrics/Metrics.java index 86b54dfd7ad..dda712a6dcd 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/metrics/Metrics.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/metrics/Metrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,43 +24,44 @@ public class Metrics { - private static final Logger LOGGER = LoggerFactory.getLogger(Metrics.class); + private static final Logger LOGGER = LoggerFactory.getLogger(Metrics.class); private static volatile MetricsSystem manager = DefaultMetricsSystem.instance(); - private static boolean initialized; + private static boolean initialized; - /** This must match the prefix that we are using in the hadoop-metrics2 config on the client */ - public static final String METRICS_SYSTEM_NAME = "phoenix"; - public static MetricsSystem initialize() { - // if the jars aren't on the classpath, then we don't start the metrics system - if (manager == null) { - LOGGER.warn("Phoenix metrics could not be initialized - no MetricsManager found!"); - return null; - } - // only initialize the metrics system once - synchronized (Metrics.class) { - if (!initialized) { - LOGGER.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME); - manager.init(Metrics.METRICS_SYSTEM_NAME); - initialized = true; - } - } - return manager; + /** This must match the prefix that we are using in the hadoop-metrics2 config on the client */ + public static final String METRICS_SYSTEM_NAME = "phoenix"; + + public static MetricsSystem initialize() { + // if the jars aren't on the classpath, then we don't start the metrics system + if (manager == null) { + LOGGER.warn("Phoenix metrics could not be initialized - no MetricsManager found!"); + return null; + } + // only initialize the metrics system once + synchronized (Metrics.class) { + if (!initialized) { + LOGGER.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME); + manager.init(Metrics.METRICS_SYSTEM_NAME); + initialized = true; + } } + return manager; + } - private static volatile boolean sinkInitialized = false; + private static volatile boolean sinkInitialized = false; - /** - * Mark that the metrics/tracing sink has been initialized - */ - public static void markSinkInitialized() { - sinkInitialized = true; - } + /** + * Mark that the metrics/tracing sink has been initialized + */ + public static void markSinkInitialized() { + sinkInitialized = true; + } - public static void ensureConfigured() { - if (!sinkInitialized) { - LOGGER.warn("Phoenix metrics2/tracing sink was not started. Should be it be?"); - } + public static void ensureConfigured() { + if (!sinkInitialized) { + LOGGER.warn("Phoenix metrics2/tracing sink was not started. Should be it be?"); } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/AtomicMetric.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/AtomicMetric.java index 3368bceae92..a1c0d9ad85b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/AtomicMetric.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/AtomicMetric.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,61 +20,59 @@ import java.util.concurrent.atomic.AtomicLong; /** - * Version of {@link Metric} that can be used when the metric is being concurrently accessed or modified by multiple - * threads. + * Version of {@link Metric} that can be used when the metric is being concurrently accessed or + * modified by multiple threads. */ public class AtomicMetric implements Metric { - private final MetricType type; - private final AtomicLong value = new AtomicLong(); + private final MetricType type; + private final AtomicLong value = new AtomicLong(); - public AtomicMetric(MetricType type) { - this.type = type; - } + public AtomicMetric(MetricType type) { + this.type = type; + } - @Override - public MetricType getMetricType() { - return type; - } + @Override + public MetricType getMetricType() { + return type; + } - @Override - public long getValue() { - return value.get(); - } + @Override + public long getValue() { + return value.get(); + } - @Override - public void change(long delta) { - value.addAndGet(delta); - } + @Override + public void change(long delta) { + value.addAndGet(delta); + } - @Override - public void increment() { - value.incrementAndGet(); - } + @Override + public void increment() { + value.incrementAndGet(); + } - @Override - public String getCurrentMetricState() { - return getMetricType().shortName() + ": " + value.get(); - } + @Override + public String getCurrentMetricState() { + return getMetricType().shortName() + ": " + value.get(); + } - @Override - public void reset() { - value.set(0); - } + @Override + public void reset() { + value.set(0); + } - /** - * Set the Metric value as current value - * - * @param value - */ - @Override - public void set(long value) { - this.value.set(value); - } + /** + * Set the Metric value as current value + */ + @Override + public void set(long value) { + this.value.set(value); + } - @Override - public void decrement() { - value.decrementAndGet(); - } + @Override + public void decrement() { + value.decrementAndGet(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/CombinableMetric.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/CombinableMetric.java index d1ed8ba4fd3..ceb4b002ea2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/CombinableMetric.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/CombinableMetric.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,74 +18,75 @@ package org.apache.phoenix.monitoring; /** - * Interface for representing a metric that could be published and possibly combined with a metric of the same - * type. + * Interface for representing a metric that could be published and possibly combined with a metric + * of the same type. */ public interface CombinableMetric extends Metric { - String getPublishString(); + String getPublishString(); - CombinableMetric combine(CombinableMetric metric); - - CombinableMetric clone(); + CombinableMetric combine(CombinableMetric metric); - public class NoOpRequestMetric implements CombinableMetric, Cloneable { + CombinableMetric clone(); - public static final NoOpRequestMetric INSTANCE = new NoOpRequestMetric(); - private static final String EMPTY_STRING = ""; + public class NoOpRequestMetric implements CombinableMetric, Cloneable { - @Override - public MetricType getMetricType() { - return MetricType.NO_OP_METRIC; - } + public static final NoOpRequestMetric INSTANCE = new NoOpRequestMetric(); + private static final String EMPTY_STRING = ""; - @Override - public long getValue() { - return 0; - } + @Override + public MetricType getMetricType() { + return MetricType.NO_OP_METRIC; + } - @Override - public void change(long delta) {} + @Override + public long getValue() { + return 0; + } - @Override - public void increment() {} + @Override + public void change(long delta) { + } - @Override - public String getCurrentMetricState() { - return EMPTY_STRING; - } + @Override + public void increment() { + } - @Override - public void reset() {} + @Override + public String getCurrentMetricState() { + return EMPTY_STRING; + } - /** - * Set the Metric value as current value - * - * @param value - */ - @Override - public void set(long value) {} + @Override + public void reset() { + } - @Override - public String getPublishString() { - return EMPTY_STRING; - } + /** + * Set the Metric value as current value + */ + @Override + public void set(long value) { + } - @Override - public CombinableMetric combine(CombinableMetric metric) { - return INSTANCE; - } + @Override + public String getPublishString() { + return EMPTY_STRING; + } - @Override - public void decrement() {} - - @Override - public CombinableMetric clone(){ - return INSTANCE; - } + @Override + public CombinableMetric combine(CombinableMetric metric) { + return INSTANCE; + } + + @Override + public void decrement() { + } + @Override + public CombinableMetric clone() { + return INSTANCE; } - + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/CombinableMetricImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/CombinableMetricImpl.java index c2a6e7ba8db..8f551fbcbf6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/CombinableMetricImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/CombinableMetricImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,77 +19,75 @@ public class CombinableMetricImpl implements CombinableMetric, Cloneable { - private final Metric metric; - - public CombinableMetricImpl(MetricType type) { - metric = new NonAtomicMetric(type); - } - - private CombinableMetricImpl(Metric metric) { - this.metric = metric; - } - - @Override - public MetricType getMetricType() { - return metric.getMetricType(); - } - - @Override - public long getValue() { - return metric.getValue(); - } - - @Override - public void change(long delta) { - metric.change(delta); - } - - @Override - public void increment() { - metric.increment(); - } - - @Override - public String getCurrentMetricState() { - return metric.getCurrentMetricState(); - } - - @Override - public void reset() { - metric.reset(); - } - - /** - * Set the Metric value as current value - * - * @param value - */ - @Override - public void set(long value) { - metric.set(value); - } - - @Override - public String getPublishString() { - return getCurrentMetricState(); - } - - @Override - public CombinableMetric combine(CombinableMetric metric) { - this.metric.change(metric.getValue()); - return this; - } - - @Override - public void decrement() { - metric.decrement(); - } - - @Override - public CombinableMetric clone(){ - NonAtomicMetric metric = new NonAtomicMetric(this.metric.getMetricType()); - metric.change(this.metric.getValue()); - return new CombinableMetricImpl(metric); - } + private final Metric metric; + + public CombinableMetricImpl(MetricType type) { + metric = new NonAtomicMetric(type); + } + + private CombinableMetricImpl(Metric metric) { + this.metric = metric; + } + + @Override + public MetricType getMetricType() { + return metric.getMetricType(); + } + + @Override + public long getValue() { + return metric.getValue(); + } + + @Override + public void change(long delta) { + metric.change(delta); + } + + @Override + public void increment() { + metric.increment(); + } + + @Override + public String getCurrentMetricState() { + return metric.getCurrentMetricState(); + } + + @Override + public void reset() { + metric.reset(); + } + + /** + * Set the Metric value as current value + */ + @Override + public void set(long value) { + metric.set(value); + } + + @Override + public String getPublishString() { + return getCurrentMetricState(); + } + + @Override + public CombinableMetric combine(CombinableMetric metric) { + this.metric.change(metric.getValue()); + return this; + } + + @Override + public void decrement() { + metric.decrement(); + } + + @Override + public CombinableMetric clone() { + NonAtomicMetric metric = new NonAtomicMetric(this.metric.getMetricType()); + metric.change(this.metric.getValue()); + return new CombinableMetricImpl(metric); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ConnectionQueryServicesMetric.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ConnectionQueryServicesMetric.java index 75ba7e01312..ee9cec0bfdf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ConnectionQueryServicesMetric.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ConnectionQueryServicesMetric.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,20 +19,16 @@ /** * Class that exposes the various phoenix metrics collected at the Phoenix Query Service level. - * Because metrics are dynamic in nature, it is not guaranteed that the state exposed will always - * be in sync with each other. One should use these metrics primarily for monitoring and debugging + * Because metrics are dynamic in nature, it is not guaranteed that the state exposed will always be + * in sync with each other. One should use these metrics primarily for monitoring and debugging * purposes. */ public interface ConnectionQueryServicesMetric extends Metric { - /** - * @return Number of samples collected since the last {@link #reset()} call. - */ - long getNumberOfSamples(); + /** Returns Number of samples collected since the last {@link #reset()} call. */ + long getNumberOfSamples(); - /** - * @return Sum of the values of the metric sampled since the last {@link #reset()} call. - */ - long getTotalSum(); + /** Returns Sum of the values of the metric sampled since the last {@link #reset()} call. */ + long getTotalSum(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ConnectionQueryServicesMetricImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ConnectionQueryServicesMetricImpl.java index bbefbf34d84..2950f510dcf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ConnectionQueryServicesMetricImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ConnectionQueryServicesMetricImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,68 +24,75 @@ */ public class ConnectionQueryServicesMetricImpl implements ConnectionQueryServicesMetric { - private AtomicLong numberOfSamples = new AtomicLong(0); - private Metric metric; + private AtomicLong numberOfSamples = new AtomicLong(0); + private Metric metric; - /** - * Default implementation used when Phoenix Connection Query Service Metrics are enabled - */ - public ConnectionQueryServicesMetricImpl(MetricType type) { - this.metric = new AtomicMetric(type); - } + /** + * Default implementation used when Phoenix Connection Query Service Metrics are enabled + */ + public ConnectionQueryServicesMetricImpl(MetricType type) { + this.metric = new AtomicMetric(type); + } - /** - * Reset the internal state. Typically called after metric information has been - * collected and a new phase of collection is being requested for the next interval. - */ - @Override public void reset() { - metric.reset(); - numberOfSamples.set(0); - } + /** + * Reset the internal state. Typically called after metric information has been collected and a + * new phase of collection is being requested for the next interval. + */ + @Override + public void reset() { + metric.reset(); + numberOfSamples.set(0); + } - /** - * Set the Metric value as current value - * - * @param value - */ - @Override - public void set(long value) { - metric.set(value); - } + /** + * Set the Metric value as current value + */ + @Override + public void set(long value) { + metric.set(value); + } - @Override public long getNumberOfSamples() { - return numberOfSamples.get(); - } + @Override + public long getNumberOfSamples() { + return numberOfSamples.get(); + } - @Override public long getTotalSum() { - return metric.getValue(); - } + @Override + public long getTotalSum() { + return metric.getValue(); + } - @Override public void change(long delta) { - metric.change(delta); - numberOfSamples.incrementAndGet(); - } + @Override + public void change(long delta) { + metric.change(delta); + numberOfSamples.incrementAndGet(); + } - @Override public void increment() { - metric.increment(); - numberOfSamples.incrementAndGet(); - } + @Override + public void increment() { + metric.increment(); + numberOfSamples.incrementAndGet(); + } - @Override public MetricType getMetricType() { - return metric.getMetricType(); - } + @Override + public MetricType getMetricType() { + return metric.getMetricType(); + } - @Override public long getValue() { - return metric.getValue(); - } + @Override + public long getValue() { + return metric.getValue(); + } - @Override public String getCurrentMetricState() { - return metric.getCurrentMetricState() + ", Number of samples: " + numberOfSamples.get(); - } + @Override + public String getCurrentMetricState() { + return metric.getCurrentMetricState() + ", Number of samples: " + numberOfSamples.get(); + } - @Override public void decrement() { - metric.decrement(); - numberOfSamples.incrementAndGet(); - } + @Override + public void decrement() { + metric.decrement(); + numberOfSamples.incrementAndGet(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java index f0f071cbcfe..2c2a3686274 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,8 +20,37 @@ import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_ADD_COUNTER; import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE; import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_EVICTION_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_HIT_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_MISS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_REMOVAL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.COUNTER_METADATA_INCONSISTENCY; +import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_IN_REMOTE_RESULTS; +import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_REGION_SERVER_RESULTS; +import static org.apache.phoenix.monitoring.MetricType.COUNT_MILLS_BETWEEN_NEXTS; +import static org.apache.phoenix.monitoring.MetricType.COUNT_NOT_SERVING_REGION_EXCEPTION; +import static org.apache.phoenix.monitoring.MetricType.COUNT_REMOTE_RPC_CALLS; +import static org.apache.phoenix.monitoring.MetricType.COUNT_REMOTE_RPC_RETRIES; +import static org.apache.phoenix.monitoring.MetricType.COUNT_ROWS_FILTERED; +import static org.apache.phoenix.monitoring.MetricType.COUNT_ROWS_SCANNED; +import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_CALLS; +import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_RETRIES; +import static org.apache.phoenix.monitoring.MetricType.COUNT_SCANNED_REGIONS; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_CONNECTION_CREATED_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_CONNECTION_ERROR_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_CONNECTION_FALLBACK_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_END_TO_END_TIME; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_EXECUTION_TIME; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_END_TO_END_TIME; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_EXECUTION_TIME; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_TASK_TIMEOUT_COUNTER; import static org.apache.phoenix.monitoring.MetricType.HCONNECTIONS_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.INDEX_COMMIT_FAILURE_SIZE; import static org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES; import static org.apache.phoenix.monitoring.MetricType.MEMORY_WAIT_TIME; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_FAILED_SIZE; @@ -33,8 +62,10 @@ import static org.apache.phoenix.monitoring.MetricType.NUM_PARALLEL_SCANS; import static org.apache.phoenix.monitoring.MetricType.OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.OPEN_PHOENIX_CONNECTIONS_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.INDEX_COMMIT_FAILURE_SIZE; import static org.apache.phoenix.monitoring.MetricType.PAGED_ROWS_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_FAILED_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_THROTTLED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.QUERY_FAILED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.QUERY_SERVICES_COUNTER; import static org.apache.phoenix.monitoring.MetricType.QUERY_TIME; @@ -48,39 +79,7 @@ import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTION_TIME; import static org.apache.phoenix.monitoring.MetricType.TASK_QUEUE_WAIT_TIME; -import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_THROTTLED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_FAILED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.TASK_REJECTED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_END_TO_END_TIME; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_EXECUTION_TIME; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_END_TO_END_TIME; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_EXECUTION_TIME; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_TASK_TIMEOUT_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_CONNECTION_FALLBACK_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_CONNECTION_CREATED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.HA_PARALLEL_CONNECTION_ERROR_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_HIT_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_MISS_COUNTER; - -import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_CALLS; -import static org.apache.phoenix.monitoring.MetricType.COUNT_REMOTE_RPC_CALLS; -import static org.apache.phoenix.monitoring.MetricType.COUNT_MILLS_BETWEEN_NEXTS; -import static org.apache.phoenix.monitoring.MetricType.COUNT_NOT_SERVING_REGION_EXCEPTION; -import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_REGION_SERVER_RESULTS; -import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_IN_REMOTE_RESULTS; -import static org.apache.phoenix.monitoring.MetricType.COUNT_SCANNED_REGIONS; -import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_RETRIES; -import static org.apache.phoenix.monitoring.MetricType.COUNT_REMOTE_RPC_RETRIES; -import static org.apache.phoenix.monitoring.MetricType.COUNT_ROWS_SCANNED; -import static org.apache.phoenix.monitoring.MetricType.COUNT_ROWS_FILTERED; -import static org.apache.phoenix.monitoring.MetricType.COUNTER_METADATA_INCONSISTENCY; import java.util.ArrayList; import java.util.Collection; @@ -91,180 +90,180 @@ import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryInfo; import org.apache.phoenix.query.QueryServicesOptions; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Central place where we keep track of all the global client phoenix metrics. These metrics are different from - * {@link ReadMetricQueue} or {@link MutationMetricQueue} as they are collected at the client JVM level as opposed - * to the above two which are collected for every phoenix request. + * Central place where we keep track of all the global client phoenix metrics. These metrics are + * different from {@link ReadMetricQueue} or {@link MutationMetricQueue} as they are collected at + * the client JVM level as opposed to the above two which are collected for every phoenix request. */ public enum GlobalClientMetrics { - GLOBAL_MUTATION_BATCH_SIZE(MUTATION_BATCH_SIZE), - GLOBAL_MUTATION_BYTES(MUTATION_BYTES), - GLOBAL_MUTATION_COMMIT_TIME(MUTATION_COMMIT_TIME), - GLOBAL_MUTATION_BATCH_FAILED_COUNT(MUTATION_BATCH_FAILED_SIZE), - GLOBAL_MUTATION_INDEX_COMMIT_FAILURE_COUNT(INDEX_COMMIT_FAILURE_SIZE), - GLOBAL_MUTATION_SYSCAT_TIME(MUTATION_SYSCAT_TIME), - GLOBAL_QUERY_TIME(QUERY_TIME), - GLOBAL_NUM_PARALLEL_SCANS(NUM_PARALLEL_SCANS), - GLOBAL_SCAN_BYTES(SCAN_BYTES), - GLOBAL_SPOOL_FILE_SIZE(SPOOL_FILE_SIZE), - GLOBAL_MEMORY_CHUNK_BYTES(MEMORY_CHUNK_BYTES), - GLOBAL_MEMORY_WAIT_TIME(MEMORY_WAIT_TIME), - GLOBAL_TASK_QUEUE_WAIT_TIME(TASK_QUEUE_WAIT_TIME), - GLOBAL_TASK_END_TO_END_TIME(TASK_END_TO_END_TIME), - GLOBAL_TASK_EXECUTION_TIME(TASK_EXECUTION_TIME), - GLOBAL_MUTATION_SQL_COUNTER(MUTATION_SQL_COUNTER), - GLOBAL_SELECT_SQL_COUNTER(SELECT_SQL_COUNTER), - GLOBAL_TASK_EXECUTED_COUNTER(TASK_EXECUTED_COUNTER), - GLOBAL_REJECTED_TASK_COUNTER(TASK_REJECTED_COUNTER), - GLOBAL_QUERY_TIMEOUT_COUNTER(QUERY_TIMEOUT_COUNTER), - GLOBAL_FAILED_QUERY_COUNTER(QUERY_FAILED_COUNTER), - GLOBAL_SPOOL_FILE_COUNTER(SPOOL_FILE_COUNTER), - GLOBAL_OPEN_PHOENIX_CONNECTIONS(OPEN_PHOENIX_CONNECTIONS_COUNTER), - GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS(OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER), - GLOBAL_FAILED_PHOENIX_CONNECTIONS(PHOENIX_CONNECTIONS_FAILED_COUNTER), - GLOBAL_QUERY_SERVICES_COUNTER(QUERY_SERVICES_COUNTER), - GLOBAL_HCONNECTIONS_COUNTER(HCONNECTIONS_COUNTER), - GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER(PHOENIX_CONNECTIONS_THROTTLED_COUNTER), - GLOBAL_PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER(PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER), - GLOBAL_PAGED_ROWS_COUNTER(PAGED_ROWS_COUNTER), - GLOBAL_HBASE_COUNT_RPC_CALLS(COUNT_RPC_CALLS), - GLOBAL_HBASE_COUNT_REMOTE_RPC_CALLS(COUNT_REMOTE_RPC_CALLS), - GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS(COUNT_MILLS_BETWEEN_NEXTS), - GLOBAL_HBASE_COUNT_NOT_SERVING_REGION_EXCEPTION(COUNT_NOT_SERVING_REGION_EXCEPTION), - GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS(COUNT_BYTES_REGION_SERVER_RESULTS), - GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS(COUNT_BYTES_IN_REMOTE_RESULTS), - GLOBAL_HBASE_COUNT_SCANNED_REGIONS(COUNT_SCANNED_REGIONS), - GLOBAL_HBASE_COUNT_RPC_RETRIES(COUNT_RPC_RETRIES), - GLOBAL_HBASE_COUNT_REMOTE_RPC_RETRIES(COUNT_REMOTE_RPC_RETRIES), - GLOBAL_HBASE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED), - GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED), - GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY(COUNTER_METADATA_INCONSISTENCY), + GLOBAL_MUTATION_BATCH_SIZE(MUTATION_BATCH_SIZE), + GLOBAL_MUTATION_BYTES(MUTATION_BYTES), + GLOBAL_MUTATION_COMMIT_TIME(MUTATION_COMMIT_TIME), + GLOBAL_MUTATION_BATCH_FAILED_COUNT(MUTATION_BATCH_FAILED_SIZE), + GLOBAL_MUTATION_INDEX_COMMIT_FAILURE_COUNT(INDEX_COMMIT_FAILURE_SIZE), + GLOBAL_MUTATION_SYSCAT_TIME(MUTATION_SYSCAT_TIME), + GLOBAL_QUERY_TIME(QUERY_TIME), + GLOBAL_NUM_PARALLEL_SCANS(NUM_PARALLEL_SCANS), + GLOBAL_SCAN_BYTES(SCAN_BYTES), + GLOBAL_SPOOL_FILE_SIZE(SPOOL_FILE_SIZE), + GLOBAL_MEMORY_CHUNK_BYTES(MEMORY_CHUNK_BYTES), + GLOBAL_MEMORY_WAIT_TIME(MEMORY_WAIT_TIME), + GLOBAL_TASK_QUEUE_WAIT_TIME(TASK_QUEUE_WAIT_TIME), + GLOBAL_TASK_END_TO_END_TIME(TASK_END_TO_END_TIME), + GLOBAL_TASK_EXECUTION_TIME(TASK_EXECUTION_TIME), + GLOBAL_MUTATION_SQL_COUNTER(MUTATION_SQL_COUNTER), + GLOBAL_SELECT_SQL_COUNTER(SELECT_SQL_COUNTER), + GLOBAL_TASK_EXECUTED_COUNTER(TASK_EXECUTED_COUNTER), + GLOBAL_REJECTED_TASK_COUNTER(TASK_REJECTED_COUNTER), + GLOBAL_QUERY_TIMEOUT_COUNTER(QUERY_TIMEOUT_COUNTER), + GLOBAL_FAILED_QUERY_COUNTER(QUERY_FAILED_COUNTER), + GLOBAL_SPOOL_FILE_COUNTER(SPOOL_FILE_COUNTER), + GLOBAL_OPEN_PHOENIX_CONNECTIONS(OPEN_PHOENIX_CONNECTIONS_COUNTER), + GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS(OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER), + GLOBAL_FAILED_PHOENIX_CONNECTIONS(PHOENIX_CONNECTIONS_FAILED_COUNTER), + GLOBAL_QUERY_SERVICES_COUNTER(QUERY_SERVICES_COUNTER), + GLOBAL_HCONNECTIONS_COUNTER(HCONNECTIONS_COUNTER), + GLOBAL_PHOENIX_CONNECTIONS_THROTTLED_COUNTER(PHOENIX_CONNECTIONS_THROTTLED_COUNTER), + GLOBAL_PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER(PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER), + GLOBAL_PAGED_ROWS_COUNTER(PAGED_ROWS_COUNTER), + GLOBAL_HBASE_COUNT_RPC_CALLS(COUNT_RPC_CALLS), + GLOBAL_HBASE_COUNT_REMOTE_RPC_CALLS(COUNT_REMOTE_RPC_CALLS), + GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS(COUNT_MILLS_BETWEEN_NEXTS), + GLOBAL_HBASE_COUNT_NOT_SERVING_REGION_EXCEPTION(COUNT_NOT_SERVING_REGION_EXCEPTION), + GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS(COUNT_BYTES_REGION_SERVER_RESULTS), + GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS(COUNT_BYTES_IN_REMOTE_RESULTS), + GLOBAL_HBASE_COUNT_SCANNED_REGIONS(COUNT_SCANNED_REGIONS), + GLOBAL_HBASE_COUNT_RPC_RETRIES(COUNT_RPC_RETRIES), + GLOBAL_HBASE_COUNT_REMOTE_RPC_RETRIES(COUNT_REMOTE_RPC_RETRIES), + GLOBAL_HBASE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED), + GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED), + GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY(COUNTER_METADATA_INCONSISTENCY), - GLOBAL_HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME(HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME), - GLOBAL_HA_PARALLEL_POOL1_TASK_END_TO_END_TIME(HA_PARALLEL_POOL1_TASK_END_TO_END_TIME), - GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTION_TIME(HA_PARALLEL_POOL1_TASK_EXECUTION_TIME), - GLOBAL_HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER(HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER), - GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER(HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER), + GLOBAL_HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME(HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME), + GLOBAL_HA_PARALLEL_POOL1_TASK_END_TO_END_TIME(HA_PARALLEL_POOL1_TASK_END_TO_END_TIME), + GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTION_TIME(HA_PARALLEL_POOL1_TASK_EXECUTION_TIME), + GLOBAL_HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER(HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER), + GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER(HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER), - GLOBAL_HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME(HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME), - GLOBAL_HA_PARALLEL_POOL2_TASK_END_TO_END_TIME(HA_PARALLEL_POOL2_TASK_END_TO_END_TIME), - GLOBAL_HA_PARALLEL_POOL2_TASK_EXECUTION_TIME(HA_PARALLEL_POOL2_TASK_EXECUTION_TIME), - GLOBAL_HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER(HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER), - GLOBAL_HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER(HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER), + GLOBAL_HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME(HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME), + GLOBAL_HA_PARALLEL_POOL2_TASK_END_TO_END_TIME(HA_PARALLEL_POOL2_TASK_END_TO_END_TIME), + GLOBAL_HA_PARALLEL_POOL2_TASK_EXECUTION_TIME(HA_PARALLEL_POOL2_TASK_EXECUTION_TIME), + GLOBAL_HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER(HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER), + GLOBAL_HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER(HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER), - GLOBAL_HA_PARALLEL_TASK_TIMEOUT_COUNTER(HA_PARALLEL_TASK_TIMEOUT_COUNTER), - GLOBAL_HA_PARALLEL_CONNECTION_FALLBACK_COUNTER(HA_PARALLEL_CONNECTION_FALLBACK_COUNTER), - GLOBAL_HA_PARALLEL_CONNECTION_ERROR_COUNTER( HA_PARALLEL_CONNECTION_ERROR_COUNTER), - GLOBAL_HA_PARALLEL_CONNECTION_CREATED_COUNTER(HA_PARALLEL_CONNECTION_CREATED_COUNTER), + GLOBAL_HA_PARALLEL_TASK_TIMEOUT_COUNTER(HA_PARALLEL_TASK_TIMEOUT_COUNTER), + GLOBAL_HA_PARALLEL_CONNECTION_FALLBACK_COUNTER(HA_PARALLEL_CONNECTION_FALLBACK_COUNTER), + GLOBAL_HA_PARALLEL_CONNECTION_ERROR_COUNTER(HA_PARALLEL_CONNECTION_ERROR_COUNTER), + GLOBAL_HA_PARALLEL_CONNECTION_CREATED_COUNTER(HA_PARALLEL_CONNECTION_CREATED_COUNTER), - GLOBAL_CLIENT_METADATA_CACHE_MISS_COUNTER(CLIENT_METADATA_CACHE_MISS_COUNTER), - GLOBAL_CLIENT_METADATA_CACHE_HIT_COUNTER(CLIENT_METADATA_CACHE_HIT_COUNTER), - GLOBAL_CLIENT_METADATA_CACHE_EVICTION_COUNTER(CLIENT_METADATA_CACHE_EVICTION_COUNTER), - GLOBAL_CLIENT_METADATA_CACHE_REMOVAL_COUNTER(CLIENT_METADATA_CACHE_REMOVAL_COUNTER), - GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER(CLIENT_METADATA_CACHE_ADD_COUNTER), - GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE(CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE), - GLOBAL_CLIENT_STALE_METADATA_CACHE_EXCEPTION_COUNTER(STALE_METADATA_CACHE_EXCEPTION_COUNTER); + GLOBAL_CLIENT_METADATA_CACHE_MISS_COUNTER(CLIENT_METADATA_CACHE_MISS_COUNTER), + GLOBAL_CLIENT_METADATA_CACHE_HIT_COUNTER(CLIENT_METADATA_CACHE_HIT_COUNTER), + GLOBAL_CLIENT_METADATA_CACHE_EVICTION_COUNTER(CLIENT_METADATA_CACHE_EVICTION_COUNTER), + GLOBAL_CLIENT_METADATA_CACHE_REMOVAL_COUNTER(CLIENT_METADATA_CACHE_REMOVAL_COUNTER), + GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER(CLIENT_METADATA_CACHE_ADD_COUNTER), + GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE(CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE), + GLOBAL_CLIENT_STALE_METADATA_CACHE_EXCEPTION_COUNTER(STALE_METADATA_CACHE_EXCEPTION_COUNTER); - private static final Logger LOGGER = LoggerFactory.getLogger(GlobalClientMetrics.class); - private static final boolean isGlobalMetricsEnabled = QueryServicesOptions.withDefaults().isGlobalMetricsEnabled(); - private MetricType metricType; - private GlobalMetric metric; + private static final Logger LOGGER = LoggerFactory.getLogger(GlobalClientMetrics.class); + private static final boolean isGlobalMetricsEnabled = + QueryServicesOptions.withDefaults().isGlobalMetricsEnabled(); + private MetricType metricType; + private GlobalMetric metric; - static { - initPhoenixGlobalClientMetrics(); - if (isGlobalMetricsEnabled) { - MetricRegistry metricRegistry = createMetricRegistry(); - registerPhoenixMetricsToRegistry(metricRegistry); - GlobalMetricRegistriesAdapter.getInstance().registerMetricRegistry(metricRegistry); - } + static { + initPhoenixGlobalClientMetrics(); + if (isGlobalMetricsEnabled) { + MetricRegistry metricRegistry = createMetricRegistry(); + registerPhoenixMetricsToRegistry(metricRegistry); + GlobalMetricRegistriesAdapter.getInstance().registerMetricRegistry(metricRegistry); } + } - private static void initPhoenixGlobalClientMetrics() { - for (GlobalClientMetrics globalMetric : GlobalClientMetrics.values()) { - globalMetric.metric = isGlobalMetricsEnabled ? - new GlobalMetricImpl(globalMetric.metricType) : new NoOpGlobalMetricImpl(); - } + private static void initPhoenixGlobalClientMetrics() { + for (GlobalClientMetrics globalMetric : GlobalClientMetrics.values()) { + globalMetric.metric = isGlobalMetricsEnabled + ? new GlobalMetricImpl(globalMetric.metricType) + : new NoOpGlobalMetricImpl(); } + } - private static void registerPhoenixMetricsToRegistry(MetricRegistry metricRegistry) { - for (GlobalClientMetrics globalMetric : GlobalClientMetrics.values()) { - metricRegistry.register(globalMetric.metricType.columnName(), - new PhoenixGlobalMetricGauge(globalMetric.metric)); - } + private static void registerPhoenixMetricsToRegistry(MetricRegistry metricRegistry) { + for (GlobalClientMetrics globalMetric : GlobalClientMetrics.values()) { + metricRegistry.register(globalMetric.metricType.columnName(), + new PhoenixGlobalMetricGauge(globalMetric.metric)); } + } - private static MetricRegistry createMetricRegistry() { - LOGGER.info("Creating Metric Registry for Phoenix Global Metrics"); - MetricRegistryInfo registryInfo = new MetricRegistryInfo("PHOENIX", "Phoenix Client Metrics", - "phoenix", "Phoenix,sub=CLIENT", true); - return MetricRegistries.global().create(registryInfo); - } + private static MetricRegistry createMetricRegistry() { + LOGGER.info("Creating Metric Registry for Phoenix Global Metrics"); + MetricRegistryInfo registryInfo = new MetricRegistryInfo("PHOENIX", "Phoenix Client Metrics", + "phoenix", "Phoenix,sub=CLIENT", true); + return MetricRegistries.global().create(registryInfo); + } - /** - * Class to convert Phoenix Metric objects into HBase Metric objects (Gauge) - */ - private static class PhoenixGlobalMetricGauge implements Gauge { + /** + * Class to convert Phoenix Metric objects into HBase Metric objects (Gauge) + */ + private static class PhoenixGlobalMetricGauge implements Gauge { - private final GlobalMetric metric; + private final GlobalMetric metric; - public PhoenixGlobalMetricGauge(GlobalMetric metric) { - this.metric = metric; - } - - @Override - public Long getValue() { - return metric.getValue(); - } + public PhoenixGlobalMetricGauge(GlobalMetric metric) { + this.metric = metric; } - public void update(long value) { - metric.change(value); + @Override + public Long getValue() { + return metric.getValue(); } + } - @VisibleForTesting - public GlobalMetric getMetric() { - return metric; - } + public void update(long value) { + metric.change(value); + } - @VisibleForTesting - public MetricType getMetricType() { - return metricType; - } + @VisibleForTesting + public GlobalMetric getMetric() { + return metric; + } + @VisibleForTesting + public MetricType getMetricType() { + return metricType; + } - @Override - public String toString() { - return metric.toString(); - } + @Override + public String toString() { + return metric.toString(); + } - private GlobalClientMetrics(MetricType metricType) { - this.metricType = metricType; - } + private GlobalClientMetrics(MetricType metricType) { + this.metricType = metricType; + } - public void increment() { - metric.increment(); - } - - public void decrement() { - metric.decrement(); - } + public void increment() { + metric.increment(); + } - public static Collection getMetrics() { - List metrics = new ArrayList<>(); - for (GlobalClientMetrics m : GlobalClientMetrics.values()) { - metrics.add(m.metric); - } - return metrics; - } + public void decrement() { + metric.decrement(); + } - public static boolean isMetricsEnabled() { - return isGlobalMetricsEnabled; + public static Collection getMetrics() { + List metrics = new ArrayList<>(); + for (GlobalClientMetrics m : GlobalClientMetrics.values()) { + metrics.add(m.metric); } + return metrics; + } + + public static boolean isMetricsEnabled() { + return isGlobalMetricsEnabled; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetric.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetric.java index 1538065bbef..1cd1facbef8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetric.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetric.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,15 +18,12 @@ package org.apache.phoenix.monitoring; /** - * Class that exposes the various internal phoenix metrics collected - * at the JVM level. Because metrics are dynamic in nature, it is not guaranteed that the - * state exposed will always be in sync with each other. One should use - * these metrics primarily for monitoring and debugging purposes. + * Class that exposes the various internal phoenix metrics collected at the JVM level. Because + * metrics are dynamic in nature, it is not guaranteed that the state exposed will always be in sync + * with each other. One should use these metrics primarily for monitoring and debugging purposes. */ public interface GlobalMetric extends Metric { - - /** - * @return Number of samples collected since the last {@link #reset()} call. - */ - public long getNumberOfSamples(); + + /** Returns Number of samples collected since the last {@link #reset()} call. */ + public long getNumberOfSamples(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetricImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetricImpl.java index ca19c580a29..9c170a578cf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetricImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetricImpl.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.monitoring; @@ -16,68 +24,66 @@ */ public class GlobalMetricImpl implements GlobalMetric { - private AtomicLong numberOfSamples = new AtomicLong(0); - private Metric metric; + private AtomicLong numberOfSamples = new AtomicLong(0); + private Metric metric; - public GlobalMetricImpl(MetricType type) { - this.metric = new AtomicMetric(type); - } + public GlobalMetricImpl(MetricType type) { + this.metric = new AtomicMetric(type); + } - /** - * Reset the internal state. Typically called after metric information has been collected and a new phase of - * collection is being requested for the next interval. - */ - @Override - public void reset() { - metric.reset(); - numberOfSamples.set(0); - } + /** + * Reset the internal state. Typically called after metric information has been collected and a + * new phase of collection is being requested for the next interval. + */ + @Override + public void reset() { + metric.reset(); + numberOfSamples.set(0); + } - /** - * Set the Metric value as current value - * - * @param value - */ - @Override - public void set(long value) { - metric.set(value); - } + /** + * Set the Metric value as current value + */ + @Override + public void set(long value) { + metric.set(value); + } - @Override - public long getNumberOfSamples() { - return numberOfSamples.get(); - } + @Override + public long getNumberOfSamples() { + return numberOfSamples.get(); + } - @Override - public void change(long delta) { - metric.change(delta); - numberOfSamples.incrementAndGet(); - } + @Override + public void change(long delta) { + metric.change(delta); + numberOfSamples.incrementAndGet(); + } - @Override - public void increment() { - metric.increment(); - numberOfSamples.incrementAndGet(); - } + @Override + public void increment() { + metric.increment(); + numberOfSamples.incrementAndGet(); + } - @Override - public MetricType getMetricType() { - return metric.getMetricType(); - } + @Override + public MetricType getMetricType() { + return metric.getMetricType(); + } - @Override - public long getValue() { - return metric.getValue(); - } + @Override + public long getValue() { + return metric.getValue(); + } - @Override - public String getCurrentMetricState() { - return metric.getCurrentMetricState() + ", Number of samples: " + numberOfSamples.get(); - } + @Override + public String getCurrentMetricState() { + return metric.getCurrentMetricState() + ", Number of samples: " + numberOfSamples.get(); + } - @Override - public void decrement() { - metric.decrement(); - numberOfSamples.incrementAndGet(); - } + @Override + public void decrement() { + metric.decrement(); + numberOfSamples.incrementAndGet(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java index 4a9093511f4..22b67e05d63 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +20,7 @@ import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; + import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Gauge; @@ -42,139 +43,140 @@ import org.slf4j.LoggerFactory; /** - * Contents mostly copied from GlobalMetricRegistriesAdapter class from hbase-hadoop2-compat - * The adapter attaches HBase's MetricRegistry to Hadoop's DefaultMetricsSystem - * Note: This DOES NOT handle dynamic attach/detach of registries + * Contents mostly copied from GlobalMetricRegistriesAdapter class from hbase-hadoop2-compat The + * adapter attaches HBase's MetricRegistry to Hadoop's DefaultMetricsSystem Note: This DOES NOT + * handle dynamic attach/detach of registries */ public class GlobalMetricRegistriesAdapter { - private static final Logger LOGGER = - LoggerFactory.getLogger(GlobalMetricRegistriesAdapter.class); - private static GlobalMetricRegistriesAdapter INSTANCE = new GlobalMetricRegistriesAdapter(); - - private GlobalMetricRegistriesAdapter() { - if (MetricUtil.isDefaultMetricsInitialized()) { - // Prevent clobbering the default metrics HBase has set up in - // RS or Master while JmxCacheBuster shuts the Metrics down - LOGGER.info("HBase metrics is already initialized. " - + "Skipping Phoenix metrics initialization."); - return; - } - DefaultMetricsSystem.initialize("Phoenix"); - JvmMetrics.initSingleton("Phoenix", ""); - } + private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMetricRegistriesAdapter.class); + private static GlobalMetricRegistriesAdapter INSTANCE = new GlobalMetricRegistriesAdapter(); - public static GlobalMetricRegistriesAdapter getInstance() { - return INSTANCE; + private GlobalMetricRegistriesAdapter() { + if (MetricUtil.isDefaultMetricsInitialized()) { + // Prevent clobbering the default metrics HBase has set up in + // RS or Master while JmxCacheBuster shuts the Metrics down + LOGGER.info( + "HBase metrics is already initialized. " + "Skipping Phoenix metrics initialization."); + return; } - - public void registerMetricRegistry(MetricRegistry registry) { - if (registry == null) { - LOGGER.warn("Registry cannot be registered with Hadoop Metrics 2 since it is null."); - return; - } - - HBaseMetrics2HadoopMetricsAdapter adapter = new HBaseMetrics2HadoopMetricsAdapter(registry); - adapter.registerToDefaultMetricsSystem(); + DefaultMetricsSystem.initialize("Phoenix"); + JvmMetrics.initSingleton("Phoenix", ""); + } + + public static GlobalMetricRegistriesAdapter getInstance() { + return INSTANCE; + } + + public void registerMetricRegistry(MetricRegistry registry) { + if (registry == null) { + LOGGER.warn("Registry cannot be registered with Hadoop Metrics 2 since it is null."); + return; } - /** - * Class to convert HBase Metric Objects to Hadoop Metrics2 Metric Objects - */ - private static class HBaseMetrics2HadoopMetricsAdapter implements MetricsSource { - private static final Logger LOGGER = - LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); - private final MetricRegistry registry; - private final String metricTag; - - private HBaseMetrics2HadoopMetricsAdapter(MetricRegistry registry) { - this.registry = registry; - metricTag = QueryServicesOptions.withDefaults().getClientMetricTag(); - } + HBaseMetrics2HadoopMetricsAdapter adapter = new HBaseMetrics2HadoopMetricsAdapter(registry); + adapter.registerToDefaultMetricsSystem(); + } - private void registerToDefaultMetricsSystem() { - MetricRegistryInfo info = registry.getMetricRegistryInfo(); - LOGGER.info("Registering " + info.getMetricsJmxContext() + - " " + info.getMetricsDescription() + " into DefaultMetricsSystem"); - DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), info.getMetricsDescription(), this); - } + /** + * Class to convert HBase Metric Objects to Hadoop Metrics2 Metric Objects + */ + private static class HBaseMetrics2HadoopMetricsAdapter implements MetricsSource { + private static final Logger LOGGER = + LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); + private final MetricRegistry registry; + private final String metricTag; - private void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsCollector collector) { - MetricRegistryInfo hbaseMetricRegistryInfo = metricRegistry.getMetricRegistryInfo(); - MetricsInfo hadoopMetricsInfo = Interns.info(hbaseMetricRegistryInfo.getMetricsName(), hbaseMetricRegistryInfo.getMetricsDescription()); - MetricsRecordBuilder builder = collector.addRecord(hadoopMetricsInfo); - builder.setContext(hbaseMetricRegistryInfo.getMetricsContext()); - builder.tag(hadoopMetricsInfo, metricTag); - this.snapshotAllMetrics(metricRegistry, builder); - } + private HBaseMetrics2HadoopMetricsAdapter(MetricRegistry registry) { + this.registry = registry; + metricTag = QueryServicesOptions.withDefaults().getClientMetricTag(); + } - private void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) { - Map metrics = metricRegistry.getMetrics(); - Iterator iterator = metrics.entrySet().iterator(); - - while(iterator.hasNext()) { - Entry e = (Entry)iterator.next(); - String name = StringUtils.capitalize(e.getKey()); - Metric metric = e.getValue(); - if (metric instanceof Gauge) { - this.addGauge(name, (Gauge)metric, builder); - } else if (metric instanceof Counter) { - this.addCounter(name, (Counter)metric, builder); - } else if (metric instanceof Histogram) { - this.addHistogram(name, (Histogram)metric, builder); - } else if (metric instanceof Meter) { - this.addMeter(name, (Meter)metric, builder); - } else if (metric instanceof Timer) { - this.addTimer(name, (Timer)metric, builder); - } else { - LOGGER.info("Ignoring unknown Metric class " + metric.getClass().getName()); - } - } - } + private void registerToDefaultMetricsSystem() { + MetricRegistryInfo info = registry.getMetricRegistryInfo(); + LOGGER.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription() + + " into DefaultMetricsSystem"); + DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), + info.getMetricsDescription(), this); + } - private void addGauge(String name, Gauge gauge, MetricsRecordBuilder builder) { - MetricsInfo info = Interns.info(name, ""); - Object o = gauge.getValue(); - if (o instanceof Integer) { - builder.addGauge(info, (Integer)o); - } else if (o instanceof Long) { - builder.addGauge(info, (Long)o); - } else if (o instanceof Float) { - builder.addGauge(info, (Float)o); - } else if (o instanceof Double) { - builder.addGauge(info, (Double)o); - } else { - LOGGER.warn("Ignoring Gauge (" + name + ") with unhandled type: " + o.getClass()); - } + private void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsCollector collector) { + MetricRegistryInfo hbaseMetricRegistryInfo = metricRegistry.getMetricRegistryInfo(); + MetricsInfo hadoopMetricsInfo = Interns.info(hbaseMetricRegistryInfo.getMetricsName(), + hbaseMetricRegistryInfo.getMetricsDescription()); + MetricsRecordBuilder builder = collector.addRecord(hadoopMetricsInfo); + builder.setContext(hbaseMetricRegistryInfo.getMetricsContext()); + builder.tag(hadoopMetricsInfo, metricTag); + this.snapshotAllMetrics(metricRegistry, builder); + } + private void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) { + Map metrics = metricRegistry.getMetrics(); + Iterator iterator = metrics.entrySet().iterator(); + + while (iterator.hasNext()) { + Entry e = (Entry) iterator.next(); + String name = StringUtils.capitalize(e.getKey()); + Metric metric = e.getValue(); + if (metric instanceof Gauge) { + this.addGauge(name, (Gauge) metric, builder); + } else if (metric instanceof Counter) { + this.addCounter(name, (Counter) metric, builder); + } else if (metric instanceof Histogram) { + this.addHistogram(name, (Histogram) metric, builder); + } else if (metric instanceof Meter) { + this.addMeter(name, (Meter) metric, builder); + } else if (metric instanceof Timer) { + this.addTimer(name, (Timer) metric, builder); + } else { + LOGGER.info("Ignoring unknown Metric class " + metric.getClass().getName()); } + } + } - private void addCounter(String name, Counter counter, MetricsRecordBuilder builder) { - MetricsInfo info = Interns.info(name, ""); - builder.addCounter(info, counter.getCount()); - } + private void addGauge(String name, Gauge gauge, MetricsRecordBuilder builder) { + MetricsInfo info = Interns.info(name, ""); + Object o = gauge.getValue(); + if (o instanceof Integer) { + builder.addGauge(info, (Integer) o); + } else if (o instanceof Long) { + builder.addGauge(info, (Long) o); + } else if (o instanceof Float) { + builder.addGauge(info, (Float) o); + } else if (o instanceof Double) { + builder.addGauge(info, (Double) o); + } else { + LOGGER.warn("Ignoring Gauge (" + name + ") with unhandled type: " + o.getClass()); + } - private void addHistogram(String name, Histogram histogram, MetricsRecordBuilder builder) { - MutableHistogram.snapshot(name, "", histogram, builder, true); - } + } - private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) { - builder.addGauge(Interns.info(name + "_count", ""), meter.getCount()); - builder.addGauge(Interns.info(name + "_mean_rate", ""), meter.getMeanRate()); - builder.addGauge(Interns.info(name + "_1min_rate", ""), meter.getOneMinuteRate()); - builder.addGauge(Interns.info(name + "_5min_rate", ""), meter.getFiveMinuteRate()); - builder.addGauge(Interns.info(name + "_15min_rate", ""), meter.getFifteenMinuteRate()); - } + private void addCounter(String name, Counter counter, MetricsRecordBuilder builder) { + MetricsInfo info = Interns.info(name, ""); + builder.addCounter(info, counter.getCount()); + } - private void addTimer(String name, Timer timer, MetricsRecordBuilder builder) { - this.addMeter(name, timer.getMeter(), builder); - this.addHistogram(name, timer.getHistogram(), builder); - } + private void addHistogram(String name, Histogram histogram, MetricsRecordBuilder builder) { + MutableHistogram.snapshot(name, "", histogram, builder, true); + } - @Override - public void getMetrics(MetricsCollector metricsCollector, boolean b) { - this.snapshotAllMetrics(this.registry, metricsCollector); - } + private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) { + builder.addGauge(Interns.info(name + "_count", ""), meter.getCount()); + builder.addGauge(Interns.info(name + "_mean_rate", ""), meter.getMeanRate()); + builder.addGauge(Interns.info(name + "_1min_rate", ""), meter.getOneMinuteRate()); + builder.addGauge(Interns.info(name + "_5min_rate", ""), meter.getFiveMinuteRate()); + builder.addGauge(Interns.info(name + "_15min_rate", ""), meter.getFifteenMinuteRate()); + } + private void addTimer(String name, Timer timer, MetricsRecordBuilder builder) { + this.addMeter(name, timer.getMeter(), builder); + this.addHistogram(name, timer.getHistogram(), builder); } + + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean b) { + this.snapshotAllMetrics(this.registry, metricsCollector); + } + + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/HistogramDistribution.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/HistogramDistribution.java index 4e8039c27f5..b8ac31a682c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/HistogramDistribution.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/HistogramDistribution.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,14 +20,14 @@ import java.util.Map; public interface HistogramDistribution { - public long getMin(); + public long getMin(); - public long getMax(); + public long getMax(); - public long getCount(); + public long getCount(); - public String getHistoName(); + public String getHistoName(); - public Map getRangeDistributionMap(); + public Map getRangeDistributionMap(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/HistogramDistributionImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/HistogramDistributionImpl.java index 90711561a42..9123a81f311 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/HistogramDistributionImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/HistogramDistributionImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,44 +20,45 @@ import java.util.Map; public class HistogramDistributionImpl implements HistogramDistribution { - private final String histoName; - private final long min; - private final long max; - private final long count; - private final Map rangeDistribution; - - public HistogramDistributionImpl(String histoName, long min, long max, long count, Map distributionMap ) { - this.histoName = histoName; - this.min = min; - this.max = max; - this.count = count; - this.rangeDistribution = distributionMap; - } - - @Override - public long getMin() { - return min; - } - - @Override - public long getMax() { - return max; - } - - @Override - public long getCount() { - return count; - } - - @Override - public String getHistoName() { - return histoName; - } - - @Override - //The caller making the list immutable - public Map getRangeDistributionMap() { - return rangeDistribution; - } + private final String histoName; + private final long min; + private final long max; + private final long count; + private final Map rangeDistribution; + + public HistogramDistributionImpl(String histoName, long min, long max, long count, + Map distributionMap) { + this.histoName = histoName; + this.min = min; + this.max = max; + this.count = count; + this.rangeDistribution = distributionMap; + } + + @Override + public long getMin() { + return min; + } + + @Override + public long getMax() { + return max; + } + + @Override + public long getCount() { + return count; + } + + @Override + public String getHistoName() { + return histoName; + } + + @Override + // The caller making the list immutable + public Map getRangeDistributionMap() { + return rangeDistribution; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/JmxMetricProvider.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/JmxMetricProvider.java index d2e4a356ea9..36ef4410bdf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/JmxMetricProvider.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/JmxMetricProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,78 +17,80 @@ */ package org.apache.phoenix.monitoring; +import java.util.Map; + import org.apache.hadoop.hbase.metrics.Gauge; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryInfo; - -import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This class implements the JMX based default Metric publishing - * of Metrics to JMX end point. - * This class is defined in phoenix/phoenix-core/src/main/resources/META-INF/services/org.apache.phoenix.monitoring.MetricPublisherSupplierFactory + * This class implements the JMX based default Metric publishing of Metrics to JMX end point. This + * class is defined in + * phoenix/phoenix-core/src/main/resources/META-INF/services/org.apache.phoenix.monitoring.MetricPublisherSupplierFactory */ public class JmxMetricProvider implements MetricPublisherSupplierFactory { - private static final Logger LOGGER = LoggerFactory.getLogger(JmxMetricProvider.class); - private static final String metricsName = "PHOENIX-TableLevel"; - private static final String metricsDesc = "Phoenix Client Metrics"; - private static final String metricsjmxContext = "phoenixTableLevel"; - private static final String metricsContext = "Phoenix,sub=CLIENT"; - - private MetricRegistry metricRegistry; + private static final Logger LOGGER = LoggerFactory.getLogger(JmxMetricProvider.class); + private static final String metricsName = "PHOENIX-TableLevel"; + private static final String metricsDesc = "Phoenix Client Metrics"; + private static final String metricsjmxContext = "phoenixTableLevel"; + private static final String metricsContext = "Phoenix,sub=CLIENT"; - @Override public void registerMetricProvider() { - metricRegistry = createMetricRegistry(); - GlobalMetricRegistriesAdapter.getInstance().registerMetricRegistry(metricRegistry); - } + private MetricRegistry metricRegistry; - @Override public void unregisterMetricProvider() { + @Override + public void registerMetricProvider() { + metricRegistry = createMetricRegistry(); + GlobalMetricRegistriesAdapter.getInstance().registerMetricRegistry(metricRegistry); + } - } + @Override + public void unregisterMetricProvider() { - private MetricRegistry createMetricRegistry() { - LOGGER.info("Creating Metric Registry for Phoenix Table Level Metrics"); - MetricRegistryInfo registryInfo = - new MetricRegistryInfo(metricsName, metricsDesc, - metricsjmxContext, metricsContext, true); - return MetricRegistries.global().create(registryInfo); - } + } - private static class PhoenixMetricGauge implements Gauge { - private final PhoenixTableMetric metric; + private MetricRegistry createMetricRegistry() { + LOGGER.info("Creating Metric Registry for Phoenix Table Level Metrics"); + MetricRegistryInfo registryInfo = + new MetricRegistryInfo(metricsName, metricsDesc, metricsjmxContext, metricsContext, true); + return MetricRegistries.global().create(registryInfo); + } - public PhoenixMetricGauge(PhoenixTableMetric metric) { - this.metric = metric; - } + private static class PhoenixMetricGauge implements Gauge { + private final PhoenixTableMetric metric; - @Override public Long getValue() { - return metric.getValue(); - } + public PhoenixMetricGauge(PhoenixTableMetric metric) { + this.metric = metric; } - private String getMetricNameFromMetricType(MetricType type, String tableName) { - return tableName + "_table_" + type; + @Override + public Long getValue() { + return metric.getValue(); } - - @Override public void registerMetrics(TableClientMetrics tInstance) { - for (Map.Entry entry : tInstance.getMetricRegistry() - .entrySet()) { - metricRegistry - .register(getMetricNameFromMetricType(entry.getKey(), tInstance.getTableName()), - new PhoenixMetricGauge(entry.getValue())); - } + } + + private String getMetricNameFromMetricType(MetricType type, String tableName) { + return tableName + "_table_" + type; + } + + @Override + public void registerMetrics(TableClientMetrics tInstance) { + for (Map.Entry entry : tInstance.getMetricRegistry() + .entrySet()) { + metricRegistry.register(getMetricNameFromMetricType(entry.getKey(), tInstance.getTableName()), + new PhoenixMetricGauge(entry.getValue())); } + } - @Override public void unRegisterMetrics(TableClientMetrics tInstance) { - for (Map.Entry entry : tInstance.getMetricRegistry() - .entrySet()) { - metricRegistry - .remove(getMetricNameFromMetricType(entry.getKey(), tInstance.getTableName())); - } + @Override + public void unRegisterMetrics(TableClientMetrics tInstance) { + for (Map.Entry entry : tInstance.getMetricRegistry() + .entrySet()) { + metricRegistry.remove(getMetricNameFromMetricType(entry.getKey(), tInstance.getTableName())); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/LatencyHistogram.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/LatencyHistogram.java index 83adb719675..9d2f9cb4f27 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/LatencyHistogram.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/LatencyHistogram.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -22,25 +22,24 @@ import org.apache.phoenix.query.QueryServices; /** - * Histogram for calculating latencies. We read ranges using - * config property {@link QueryServices#PHOENIX_HISTOGRAM_LATENCY_RANGES}. - * If this property is not set then it will default to - * {@link org.apache.hadoop.metrics2.lib.MutableTimeHistogram#RANGES} values. + * Histogram for calculating latencies. We read ranges using config property + * {@link QueryServices#PHOENIX_HISTOGRAM_LATENCY_RANGES}. If this property is not set then it will + * default to {@link org.apache.hadoop.metrics2.lib.MutableTimeHistogram#RANGES} values. */ public class LatencyHistogram extends RangeHistogram { - //default range of time buckets in milli seconds. - protected final static long[] DEFAULT_RANGE = - { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000}; + // default range of time buckets in milli seconds. + protected final static long[] DEFAULT_RANGE = + { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 }; - public LatencyHistogram(String name, String description, Configuration conf) { - super(initializeRanges(conf), name, description); - } + public LatencyHistogram(String name, String description, Configuration conf) { + super(initializeRanges(conf), name, description); + } - private static long[] initializeRanges(Configuration conf) { - long[] ranges = PhoenixConfigurationUtilHelper.getLongs(conf, - QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES); - return ranges != null ? ranges : DEFAULT_RANGE; - } + private static long[] initializeRanges(Configuration conf) { + long[] ranges = + PhoenixConfigurationUtilHelper.getLongs(conf, QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES); + return ranges != null ? ranges : DEFAULT_RANGE; + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MemoryMetricsHolder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MemoryMetricsHolder.java index daa0bba8bee..dd77c482e27 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MemoryMetricsHolder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MemoryMetricsHolder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,19 +24,19 @@ * Class that encapsulates the metrics regarding memory resources needed for servicing a request. */ public class MemoryMetricsHolder { - private final CombinableMetric memoryChunkSizeMetric; - private final CombinableMetric memoryWaitTimeMetric; - - public MemoryMetricsHolder(ReadMetricQueue readMetrics, String tableName) { - this.memoryChunkSizeMetric = readMetrics.allotMetric(MEMORY_CHUNK_BYTES, tableName); - this.memoryWaitTimeMetric = readMetrics.allotMetric(MEMORY_WAIT_TIME, tableName); - } + private final CombinableMetric memoryChunkSizeMetric; + private final CombinableMetric memoryWaitTimeMetric; - public CombinableMetric getMemoryChunkSizeMetric() { - return memoryChunkSizeMetric; - } + public MemoryMetricsHolder(ReadMetricQueue readMetrics, String tableName) { + this.memoryChunkSizeMetric = readMetrics.allotMetric(MEMORY_CHUNK_BYTES, tableName); + this.memoryWaitTimeMetric = readMetrics.allotMetric(MEMORY_WAIT_TIME, tableName); + } - public CombinableMetric getMemoryWaitTimeMetric() { - return memoryWaitTimeMetric; - } + public CombinableMetric getMemoryChunkSizeMetric() { + return memoryChunkSizeMetric; + } + + public CombinableMetric getMemoryWaitTimeMetric() { + return memoryWaitTimeMetric; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/Metric.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/Metric.java index 1f3bdb858d0..da13435fad3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/Metric.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/Metric.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,48 +21,41 @@ * Interface that represents phoenix-internal metric. */ public interface Metric { - - /** - * @return type of the metric - */ - public MetricType getMetricType(); - /** - * @return Current value of the metric - */ - public long getValue(); + /** Returns type of the metric */ + public MetricType getMetricType(); - /** - * Change the metric by the specified amount - * - * @param delta - * amount by which the metric value should be changed - */ - public void change(long delta); + /** Returns Current value of the metric */ + public long getValue(); - /** - * Increase the value of metric by 1 - */ - public void increment(); - - /** - * Decrease the value of metric by 1 - */ - public void decrement(); - - /** - * @return String that represents the current state of the metric. Typically used for logging or reporting purposes. - */ - public String getCurrentMetricState(); - - /** - * Reset the metric - */ - public void reset(); + /** + * Change the metric by the specified amount amount by which the metric value should be changed + */ + public void change(long delta); - /** - * Set the Metric value as current value - */ - void set(long value); -} + /** + * Increase the value of metric by 1 + */ + public void increment(); + + /** + * Decrease the value of metric by 1 + */ + public void decrement(); + + /** + * Returns String that represents the current state of the metric. Typically used for logging or + * reporting purposes. + */ + public String getCurrentMetricState(); + /** + * Reset the metric + */ + public void reset(); + + /** + * Set the Metric value as current value + */ + void set(long value); +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricPublisherSupplierFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricPublisherSupplierFactory.java index dee2345a330..1e00b8f9b50 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricPublisherSupplierFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricPublisherSupplierFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,30 +6,30 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.monitoring; /** * Interface for configurable MetricPublisher interface construction - * */ public interface MetricPublisherSupplierFactory extends MetricsRegistry { - /** - * Interface for Registering Publisher Method - */ - void registerMetricProvider(); + /** + * Interface for Registering Publisher Method + */ + void registerMetricProvider(); - /** - * Interface for UnRegistering Publisher Method - */ - void unregisterMetricProvider(); + /** + * Interface for UnRegistering Publisher Method + */ + void unregisterMetricProvider(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricServiceResolver.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricServiceResolver.java index 920f24b5ef8..3ccd479f184 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricServiceResolver.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricServiceResolver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,51 +15,48 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.monitoring; +import java.util.List; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.InstanceResolver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; - /** - * This class helps resolve the metricpublisher supplier class at the run time. - * Based on the classString name passed, it will return the appropriate class Instance. + * This class helps resolve the metricpublisher supplier class at the run time. Based on the + * classString name passed, it will return the appropriate class Instance. */ public class MetricServiceResolver { - private static final Logger LOGGER = LoggerFactory.getLogger(MetricServiceResolver.class); + private static final Logger LOGGER = LoggerFactory.getLogger(MetricServiceResolver.class); - private MetricPublisherSupplierFactory metricSupplier = null; + private MetricPublisherSupplierFactory metricSupplier = null; - public MetricPublisherSupplierFactory instantiate(String classString) { - Preconditions.checkNotNull(classString); + public MetricPublisherSupplierFactory instantiate(String classString) { + Preconditions.checkNotNull(classString); + if (metricSupplier == null) { + try { + Class clazz = Class.forName(classString); + List factoryList = + InstanceResolver.get(MetricPublisherSupplierFactory.class, null); + for (MetricPublisherSupplierFactory factory : factoryList) { + if (clazz.isInstance(factory)) { + metricSupplier = factory; + LOGGER.info(String.format( + "Sucessfully loaded class for MetricPublishFactory of type: %s", classString)); + break; + } + } if (metricSupplier == null) { - try { - Class clazz = Class.forName(classString); - List - factoryList = - InstanceResolver.get(MetricPublisherSupplierFactory.class, null); - for (MetricPublisherSupplierFactory factory : factoryList) { - if (clazz.isInstance(factory)) { - metricSupplier = factory; - LOGGER.info(String.format( - "Sucessfully loaded class for MetricPublishFactory of type: %s", - classString)); - break; - } - } - if (metricSupplier == null) { - String msg = String.format("Could not load/instantiate class %s", classString); - LOGGER.error(msg); - } - } catch (ClassNotFoundException e) { - LOGGER.error(String.format("Could not load/instantiate class %s", classString), e); - } + String msg = String.format("Could not load/instantiate class %s", classString); + LOGGER.error(msg); } - return metricSupplier; + } catch (ClassNotFoundException e) { + LOGGER.error(String.format("Could not load/instantiate class %s", classString), e); + } } + return metricSupplier; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java index d66fb0e19df..75208ec9302 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,240 +21,349 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; - /** - * Keeping {@link LogLevel#OFF} for metrics which are calculated globally only and doesn't need to be logged in SYSTEM.LOG + * Keeping {@link LogLevel#OFF} for metrics which are calculated globally only and doesn't need to + * be logged in SYSTEM.LOG */ public enum MetricType { - NO_OP_METRIC("no", "No op metric",LogLevel.OFF, PLong.INSTANCE), - // mutation (write) related metrics - MUTATION_BATCH_SIZE("ms", "Number of mutations in the batch",LogLevel.OFF, PLong.INSTANCE), - MUTATION_BYTES("mb", "Size of mutations in bytes",LogLevel.OFF, PLong.INSTANCE), - MUTATION_COMMIT_TIME("mt", "Time it took to commit a batch of mutations",LogLevel.OFF, PLong.INSTANCE), - MUTATION_SYSCAT_TIME("msyst", "Time it spent in syscat before mutation", LogLevel.OFF, PLong.INSTANCE), - MUTATION_BATCH_FAILED_SIZE("mfs", "Number of mutations that failed to be committed",LogLevel.OFF, PLong.INSTANCE), - MUTATION_SQL_COUNTER("msc", "Counter for number of mutation sql statements",LogLevel.OFF, PLong.INSTANCE), - UPSERT_SQL_COUNTER("uc", "Counter for number of upsert sql queries", LogLevel.OFF, PLong.INSTANCE), - UPSERT_COMMIT_TIME("ut", "Time it took to commit a batch of upserts", LogLevel.OFF, PLong.INSTANCE), - UPSERT_MUTATION_BYTES("umb", "Size of mutations in upsert statement in bytes",LogLevel.OFF, PLong.INSTANCE), - UPSERT_MUTATION_SQL_COUNTER("umsc", "Counter for number of upsert mutations committed",LogLevel.OFF, PLong.INSTANCE), - UPSERT_BATCH_FAILED_SIZE("ubfs", "Number of upsert mutations in a batch that failed to be committed", - LogLevel.OFF, PLong.INSTANCE), - UPSERT_BATCH_FAILED_COUNTER("ubfc", "Number of upsert mutation batches that failed to be committed", - LogLevel.OFF, PLong.INSTANCE), + NO_OP_METRIC("no", "No op metric", LogLevel.OFF, PLong.INSTANCE), + // mutation (write) related metrics + MUTATION_BATCH_SIZE("ms", "Number of mutations in the batch", LogLevel.OFF, PLong.INSTANCE), + MUTATION_BYTES("mb", "Size of mutations in bytes", LogLevel.OFF, PLong.INSTANCE), + MUTATION_COMMIT_TIME("mt", "Time it took to commit a batch of mutations", LogLevel.OFF, + PLong.INSTANCE), + MUTATION_SYSCAT_TIME("msyst", "Time it spent in syscat before mutation", LogLevel.OFF, + PLong.INSTANCE), + MUTATION_BATCH_FAILED_SIZE("mfs", "Number of mutations that failed to be committed", LogLevel.OFF, + PLong.INSTANCE), + MUTATION_SQL_COUNTER("msc", "Counter for number of mutation sql statements", LogLevel.OFF, + PLong.INSTANCE), + UPSERT_SQL_COUNTER("uc", "Counter for number of upsert sql queries", LogLevel.OFF, + PLong.INSTANCE), + UPSERT_COMMIT_TIME("ut", "Time it took to commit a batch of upserts", LogLevel.OFF, + PLong.INSTANCE), + UPSERT_MUTATION_BYTES("umb", "Size of mutations in upsert statement in bytes", LogLevel.OFF, + PLong.INSTANCE), + UPSERT_MUTATION_SQL_COUNTER("umsc", "Counter for number of upsert mutations committed", + LogLevel.OFF, PLong.INSTANCE), + UPSERT_BATCH_FAILED_SIZE("ubfs", + "Number of upsert mutations in a batch that failed to be committed", LogLevel.OFF, + PLong.INSTANCE), + UPSERT_BATCH_FAILED_COUNTER("ubfc", + "Number of upsert mutation batches that failed to be committed", LogLevel.OFF, PLong.INSTANCE), - UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER("uassc", "Counter which indicates the total number of upsert Mutations which passed executeUpdate phase " - + "(since last commit called) and subsequent conn.commit() are successful.", LogLevel.OFF, PLong.INSTANCE), - UPSERT_AGGREGATE_FAILURE_SQL_COUNTER("uafsc", "Counter which indicates the total number of upsert Mutations for all statements which failed either in executeUpdate phase " - + "(since last commit called) or subsequent conn.commit() fails", LogLevel.OFF, PLong.INSTANCE), - UPSERT_SUCCESS_SQL_COUNTER("ussc", "Counter for number of upsert sql queries that successfully" - + " passed the executeMutation phase, or if autoCommit is true, the total" - + " number of successful upserts", LogLevel.OFF, PLong.INSTANCE), - UPSERT_FAILED_SQL_COUNTER("ufsc", "Counter for number of upsert sql queries that" - + " failed the executeMutation phase, or if autoCommit is true, the total" - + " number of upsert failures", LogLevel.OFF, PLong.INSTANCE), - UPSERT_SQL_QUERY_TIME("uqt", "Time taken by upsert sql queries inside executeMutation or if" - + " autoCommit is true, the total time taken for executeMutation + conn.commit", - LogLevel.OFF, PLong.INSTANCE), + UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER("uassc", + "Counter which indicates the total number of upsert Mutations which passed executeUpdate phase " + + "(since last commit called) and subsequent conn.commit() are successful.", + LogLevel.OFF, PLong.INSTANCE), + UPSERT_AGGREGATE_FAILURE_SQL_COUNTER("uafsc", + "Counter which indicates the total number of upsert Mutations for all statements which failed either in executeUpdate phase " + + "(since last commit called) or subsequent conn.commit() fails", + LogLevel.OFF, PLong.INSTANCE), + UPSERT_SUCCESS_SQL_COUNTER("ussc", + "Counter for number of upsert sql queries that successfully" + + " passed the executeMutation phase, or if autoCommit is true, the total" + + " number of successful upserts", + LogLevel.OFF, PLong.INSTANCE), + UPSERT_FAILED_SQL_COUNTER("ufsc", + "Counter for number of upsert sql queries that" + + " failed the executeMutation phase, or if autoCommit is true, the total" + + " number of upsert failures", + LogLevel.OFF, PLong.INSTANCE), + UPSERT_SQL_QUERY_TIME("uqt", + "Time taken by upsert sql queries inside executeMutation or if" + + " autoCommit is true, the total time taken for executeMutation + conn.commit", + LogLevel.OFF, PLong.INSTANCE), - ATOMIC_UPSERT_SQL_COUNTER("auc", "Counter for number of atomic upsert sql queries", LogLevel.OFF, PLong.INSTANCE), - ATOMIC_UPSERT_COMMIT_TIME("aut", "Time it took to commit a batch of atomic upserts", LogLevel.OFF, PLong.INSTANCE), - ATOMIC_UPSERT_SQL_QUERY_TIME("auqt", "Time taken by atomic upsert sql queries inside executeMutation or if" - + " autoCommit is true, the total time taken for executeMutation + conn.commit", - LogLevel.OFF, PLong.INSTANCE), + ATOMIC_UPSERT_SQL_COUNTER("auc", "Counter for number of atomic upsert sql queries", LogLevel.OFF, + PLong.INSTANCE), + ATOMIC_UPSERT_COMMIT_TIME("aut", "Time it took to commit a batch of atomic upserts", LogLevel.OFF, + PLong.INSTANCE), + ATOMIC_UPSERT_SQL_QUERY_TIME("auqt", + "Time taken by atomic upsert sql queries inside executeMutation or if" + + " autoCommit is true, the total time taken for executeMutation + conn.commit", + LogLevel.OFF, PLong.INSTANCE), - // delete-specific metrics updated during executeMutation - DELETE_SQL_COUNTER("dc", "Counter for number of delete sql queries", LogLevel.OFF, PLong.INSTANCE), - DELETE_SUCCESS_SQL_COUNTER("dssc", "Counter for number of delete sql queries that successfully" - + " passed the executeMutation phase, or if autoCommit is true, the total" - + " number of successful deletes", LogLevel.OFF, PLong.INSTANCE), - DELETE_AGGREGATE_SUCCESS_SQL_COUNTER("dassc", "Counter which indicates if everything in the executeUpdate phase for all " - + "statements (since last commit called) and subsequent conn.commit() is successful.", LogLevel.OFF, PLong.INSTANCE), - DELETE_AGGREGATE_FAILURE_SQL_COUNTER("dafsc", "Counter which indicates if anything in the executeUpdate phase for any " - + "statements (since last commit called) or subsequent conn.commit() fails.", LogLevel.OFF, PLong.INSTANCE), - DELETE_FAILED_SQL_COUNTER("dfsc", "Counter for number of delete sql queries that" - + " failed the executeMutation phase, or if autoCommit is true, the total" - + " number of delete failures", LogLevel.OFF, PLong.INSTANCE), - DELETE_SQL_QUERY_TIME("dqt", "Time taken by delete sql queries inside executeMutation or if" - + " autoCommit is true, the total time taken for executeMutation + conn.commit", - LogLevel.OFF, PLong.INSTANCE), + // delete-specific metrics updated during executeMutation + DELETE_SQL_COUNTER("dc", "Counter for number of delete sql queries", LogLevel.OFF, + PLong.INSTANCE), + DELETE_SUCCESS_SQL_COUNTER("dssc", + "Counter for number of delete sql queries that successfully" + + " passed the executeMutation phase, or if autoCommit is true, the total" + + " number of successful deletes", + LogLevel.OFF, PLong.INSTANCE), + DELETE_AGGREGATE_SUCCESS_SQL_COUNTER("dassc", + "Counter which indicates if everything in the executeUpdate phase for all " + + "statements (since last commit called) and subsequent conn.commit() is successful.", + LogLevel.OFF, PLong.INSTANCE), + DELETE_AGGREGATE_FAILURE_SQL_COUNTER("dafsc", + "Counter which indicates if anything in the executeUpdate phase for any " + + "statements (since last commit called) or subsequent conn.commit() fails.", + LogLevel.OFF, PLong.INSTANCE), + DELETE_FAILED_SQL_COUNTER("dfsc", + "Counter for number of delete sql queries that" + + " failed the executeMutation phase, or if autoCommit is true, the total" + + " number of delete failures", + LogLevel.OFF, PLong.INSTANCE), + DELETE_SQL_QUERY_TIME("dqt", + "Time taken by delete sql queries inside executeMutation or if" + + " autoCommit is true, the total time taken for executeMutation + conn.commit", + LogLevel.OFF, PLong.INSTANCE), - DELETE_COMMIT_TIME("dt", "Time it took to commit a batch of deletes", LogLevel.OFF, PLong.INSTANCE), - DELETE_MUTATION_BYTES("dmb", "Size of mutations in delete statement in bytes",LogLevel.OFF, PLong.INSTANCE), - DELETE_MUTATION_SQL_COUNTER("dmsc", "Counter for number of delete mutations committed",LogLevel.OFF, PLong.INSTANCE), - DELETE_BATCH_FAILED_SIZE("dbfs", "Number of delete mutations in a batch that failed to be committed", - LogLevel.OFF, PLong.INSTANCE), - DELETE_BATCH_FAILED_COUNTER("dbfc", "Number of delete mutation batches that failed to be committed", - LogLevel.OFF, PLong.INSTANCE), + DELETE_COMMIT_TIME("dt", "Time it took to commit a batch of deletes", LogLevel.OFF, + PLong.INSTANCE), + DELETE_MUTATION_BYTES("dmb", "Size of mutations in delete statement in bytes", LogLevel.OFF, + PLong.INSTANCE), + DELETE_MUTATION_SQL_COUNTER("dmsc", "Counter for number of delete mutations committed", + LogLevel.OFF, PLong.INSTANCE), + DELETE_BATCH_FAILED_SIZE("dbfs", + "Number of delete mutations in a batch that failed to be committed", LogLevel.OFF, + PLong.INSTANCE), + DELETE_BATCH_FAILED_COUNTER("dbfc", + "Number of delete mutation batches that failed to be committed", LogLevel.OFF, PLong.INSTANCE), - // select-specific query (read) metrics updated during executeQuery - SELECT_SUCCESS_SQL_COUNTER("sss", "Counter for number of select sql queries that successfully" - + " passed the executeQuery phase", LogLevel.OFF, PLong.INSTANCE), - SELECT_AGGREGATE_SUCCESS_SQL_COUNTER("sassc","Counter which indicates if everything in executeQuery" - + " phase and all rs.next() are successful",LogLevel.OFF, PLong.INSTANCE), - SELECT_AGGREGATE_FAILURE_SQL_COUNTER("safsc","Counter which indicates if anything in " - + "executeQuery phase or any of the rs.next() fail",LogLevel.OFF, PLong.INSTANCE), - SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER("spls", "Counter for number of point lookup select sql " - + "queries that succeeded the executeQuery phase", LogLevel.OFF, PLong.INSTANCE), - SELECT_SCAN_SUCCESS_SQL_COUNTER("sscs", "Counter for number of scan select sql queries " - + "that succeed the executeQuery phase", LogLevel.OFF, PLong.INSTANCE), - SELECT_FAILED_SQL_COUNTER("sfsc", "Counter for number of select sql queries that" - + " failed the executeQuery phase", LogLevel.OFF, PLong.INSTANCE), - SELECT_POINTLOOKUP_FAILED_SQL_COUNTER("splf", "Counter for number of point lookup select sql " - + "queries that failed the executeQuery phase", LogLevel.OFF, PLong.INSTANCE), - SELECT_SCAN_FAILED_SQL_COUNTER("sscf", "Counter for number of scan select sql queries " - + "that failed the executeQuery phase", LogLevel.OFF, PLong.INSTANCE), - SELECT_SQL_QUERY_TIME("sqt", "Time taken by select sql queries inside executeQuery", - LogLevel.OFF, PLong.INSTANCE), - INDEX_COMMIT_FAILURE_SIZE("p3s", "Number of mutations that failed in phase 3", LogLevel.OFF, PLong.INSTANCE), - QUERY_POINTLOOKUP_TIMEOUT_COUNTER("qplo", "Number of times the point lookup select query timed out" - + " when fetching results", LogLevel.DEBUG, PLong.INSTANCE), - QUERY_SCAN_TIMEOUT_COUNTER("qso", "Number of times the scan select query timed out" - + " when fetching results", LogLevel.DEBUG, PLong.INSTANCE), - QUERY_POINTLOOKUP_FAILED_COUNTER("qplf", "Number of times the point lookup select query failed" - + " when fetching results", LogLevel.DEBUG, PLong.INSTANCE), - QUERY_SCAN_FAILED_COUNTER("qsf", "Number of times the scan select query failed when fetching" - + " results", LogLevel.DEBUG, PLong.INSTANCE), - // query (read) related metrics - QUERY_TIME("qt", "Query times",LogLevel.OFF, PLong.INSTANCE), - QUERY_TIMEOUT_COUNTER("qo", "Number of times query timed out",LogLevel.DEBUG, PLong.INSTANCE), - QUERY_FAILED_COUNTER("qf", "Number of times query failed",LogLevel.DEBUG, PLong.INSTANCE), - NUM_PARALLEL_SCANS("ps", "Number of scans that were executed in parallel",LogLevel.DEBUG, PLong.INSTANCE), - SCAN_BYTES("sb", "Number of bytes read by scans",LogLevel.OFF, PLong.INSTANCE), - SELECT_SQL_COUNTER("sc", "Counter for number of sql queries",LogLevel.OFF, PLong.INSTANCE), - // task metrics - TASK_QUEUE_WAIT_TIME("tw", "Time in milliseconds tasks had to wait in the queue of the thread pool executor",LogLevel.DEBUG, PLong.INSTANCE), - TASK_END_TO_END_TIME("tee", "Time in milliseconds spent by tasks from creation to completion",LogLevel.DEBUG, PLong.INSTANCE), - TASK_EXECUTION_TIME("tx", "Time in milliseconds tasks took to execute",LogLevel.DEBUG, PLong.INSTANCE), - TASK_EXECUTED_COUNTER("te", "Counter for number of tasks submitted to the thread pool executor",LogLevel.DEBUG, PLong.INSTANCE), - TASK_REJECTED_COUNTER("tr", "Counter for number of tasks that were rejected by the thread pool executor",LogLevel.DEBUG, PLong.INSTANCE), - // spool metrics - SPOOL_FILE_SIZE("ss", "Size of spool files created in bytes",LogLevel.DEBUG, PLong.INSTANCE), - SPOOL_FILE_COUNTER("sn", "Number of spool files created",LogLevel.DEBUG, PLong.INSTANCE), - // misc metrics - MEMORY_CHUNK_BYTES("mc", "Number of bytes allocated by the memory manager",LogLevel.DEBUG, PLong.INSTANCE), - MEMORY_WAIT_TIME("mw", "Number of milliseconds threads needed to wait for memory to be allocated through memory manager",LogLevel.DEBUG, PLong.INSTANCE), - CACHE_REFRESH_SPLITS_COUNTER("cr", "Number of times cache was refreshed because of splits",LogLevel.DEBUG, PLong.INSTANCE), - WALL_CLOCK_TIME_MS("tq", "Wall clock time elapsed for the overall query execution",LogLevel.INFO, PLong.INSTANCE), - RESULT_SET_TIME_MS("tn", "Wall clock time elapsed for reading all records using resultSet.next()",LogLevel.INFO, PLong.INSTANCE), - OPEN_PHOENIX_CONNECTIONS_COUNTER("o", "Number of open phoenix connections",LogLevel.OFF, PLong.INSTANCE), - OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER("io", "Number of open internal phoenix connections",LogLevel.OFF, PLong.INSTANCE), - QUERY_SERVICES_COUNTER("cqs", "Number of ConnectionQueryServicesImpl instantiated",LogLevel.OFF, PLong.INSTANCE), - HCONNECTIONS_COUNTER("h", "Number of HConnections created by phoenix driver",LogLevel.OFF, PLong.INSTANCE), - PHOENIX_CONNECTIONS_THROTTLED_COUNTER("ct", "Number of client Phoenix connections prevented from opening " + - "because there are already too many to that target cluster.",LogLevel.OFF, PLong.INSTANCE), - PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER("ca","Number of requests for Phoenix connections, whether successful or not.",LogLevel.OFF, PLong.INSTANCE), - PHOENIX_CONNECTIONS_FAILED_COUNTER("cf", "Number of client Phoenix Connections Failed to open" + - ", not including throttled connections", LogLevel.OFF, PLong.INSTANCE), - CLIENT_METADATA_CACHE_MISS_COUNTER("cmcm", "Number of cache misses for the CQSI cache.", LogLevel.DEBUG, PLong.INSTANCE), - CLIENT_METADATA_CACHE_HIT_COUNTER("cmch", "Number of cache hits for the CQSI cache.", LogLevel.DEBUG, PLong.INSTANCE), - CLIENT_METADATA_CACHE_EVICTION_COUNTER("cmce", "Number of cache evictions for the CQSI cache" + - ".", LogLevel.DEBUG, PLong.INSTANCE), - CLIENT_METADATA_CACHE_REMOVAL_COUNTER("cmcr", "Number of cache removals for the CQSI cache.", - LogLevel.DEBUG, PLong.INSTANCE), - CLIENT_METADATA_CACHE_ADD_COUNTER("cmca", "Number of cache adds for the CQSI cache.", - LogLevel.DEBUG, PLong.INSTANCE), - CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE("cmcu", "Estimated used size of the CQSI cache.", - LogLevel.DEBUG, PLong.INSTANCE), - PAGED_ROWS_COUNTER("prc", "Number of dummy rows returned to client due to paging.", - LogLevel.DEBUG, PLong.INSTANCE), - STALE_METADATA_CACHE_EXCEPTION_COUNTER("smce", - "Number of StaleMetadataCacheException encountered.", - LogLevel.DEBUG, PLong.INSTANCE), + // select-specific query (read) metrics updated during executeQuery + SELECT_SUCCESS_SQL_COUNTER("sss", + "Counter for number of select sql queries that successfully" + " passed the executeQuery phase", + LogLevel.OFF, PLong.INSTANCE), + SELECT_AGGREGATE_SUCCESS_SQL_COUNTER("sassc", + "Counter which indicates if everything in executeQuery" + + " phase and all rs.next() are successful", + LogLevel.OFF, PLong.INSTANCE), + SELECT_AGGREGATE_FAILURE_SQL_COUNTER("safsc", + "Counter which indicates if anything in " + "executeQuery phase or any of the rs.next() fail", + LogLevel.OFF, PLong.INSTANCE), + SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER("spls", + "Counter for number of point lookup select sql " + + "queries that succeeded the executeQuery phase", + LogLevel.OFF, PLong.INSTANCE), + SELECT_SCAN_SUCCESS_SQL_COUNTER("sscs", + "Counter for number of scan select sql queries " + "that succeed the executeQuery phase", + LogLevel.OFF, PLong.INSTANCE), + SELECT_FAILED_SQL_COUNTER("sfsc", + "Counter for number of select sql queries that" + " failed the executeQuery phase", + LogLevel.OFF, PLong.INSTANCE), + SELECT_POINTLOOKUP_FAILED_SQL_COUNTER("splf", + "Counter for number of point lookup select sql " + "queries that failed the executeQuery phase", + LogLevel.OFF, PLong.INSTANCE), + SELECT_SCAN_FAILED_SQL_COUNTER("sscf", + "Counter for number of scan select sql queries " + "that failed the executeQuery phase", + LogLevel.OFF, PLong.INSTANCE), + SELECT_SQL_QUERY_TIME("sqt", "Time taken by select sql queries inside executeQuery", LogLevel.OFF, + PLong.INSTANCE), + INDEX_COMMIT_FAILURE_SIZE("p3s", "Number of mutations that failed in phase 3", LogLevel.OFF, + PLong.INSTANCE), + QUERY_POINTLOOKUP_TIMEOUT_COUNTER("qplo", + "Number of times the point lookup select query timed out" + " when fetching results", + LogLevel.DEBUG, PLong.INSTANCE), + QUERY_SCAN_TIMEOUT_COUNTER("qso", + "Number of times the scan select query timed out" + " when fetching results", LogLevel.DEBUG, + PLong.INSTANCE), + QUERY_POINTLOOKUP_FAILED_COUNTER("qplf", + "Number of times the point lookup select query failed" + " when fetching results", + LogLevel.DEBUG, PLong.INSTANCE), + QUERY_SCAN_FAILED_COUNTER("qsf", + "Number of times the scan select query failed when fetching" + " results", LogLevel.DEBUG, + PLong.INSTANCE), + // query (read) related metrics + QUERY_TIME("qt", "Query times", LogLevel.OFF, PLong.INSTANCE), + QUERY_TIMEOUT_COUNTER("qo", "Number of times query timed out", LogLevel.DEBUG, PLong.INSTANCE), + QUERY_FAILED_COUNTER("qf", "Number of times query failed", LogLevel.DEBUG, PLong.INSTANCE), + NUM_PARALLEL_SCANS("ps", "Number of scans that were executed in parallel", LogLevel.DEBUG, + PLong.INSTANCE), + SCAN_BYTES("sb", "Number of bytes read by scans", LogLevel.OFF, PLong.INSTANCE), + SELECT_SQL_COUNTER("sc", "Counter for number of sql queries", LogLevel.OFF, PLong.INSTANCE), + // task metrics + TASK_QUEUE_WAIT_TIME("tw", + "Time in milliseconds tasks had to wait in the queue of the thread pool executor", + LogLevel.DEBUG, PLong.INSTANCE), + TASK_END_TO_END_TIME("tee", "Time in milliseconds spent by tasks from creation to completion", + LogLevel.DEBUG, PLong.INSTANCE), + TASK_EXECUTION_TIME("tx", "Time in milliseconds tasks took to execute", LogLevel.DEBUG, + PLong.INSTANCE), + TASK_EXECUTED_COUNTER("te", "Counter for number of tasks submitted to the thread pool executor", + LogLevel.DEBUG, PLong.INSTANCE), + TASK_REJECTED_COUNTER("tr", + "Counter for number of tasks that were rejected by the thread pool executor", LogLevel.DEBUG, + PLong.INSTANCE), + // spool metrics + SPOOL_FILE_SIZE("ss", "Size of spool files created in bytes", LogLevel.DEBUG, PLong.INSTANCE), + SPOOL_FILE_COUNTER("sn", "Number of spool files created", LogLevel.DEBUG, PLong.INSTANCE), + // misc metrics + MEMORY_CHUNK_BYTES("mc", "Number of bytes allocated by the memory manager", LogLevel.DEBUG, + PLong.INSTANCE), + MEMORY_WAIT_TIME("mw", + "Number of milliseconds threads needed to wait for memory to be allocated through memory manager", + LogLevel.DEBUG, PLong.INSTANCE), + CACHE_REFRESH_SPLITS_COUNTER("cr", "Number of times cache was refreshed because of splits", + LogLevel.DEBUG, PLong.INSTANCE), + WALL_CLOCK_TIME_MS("tq", "Wall clock time elapsed for the overall query execution", LogLevel.INFO, + PLong.INSTANCE), + RESULT_SET_TIME_MS("tn", "Wall clock time elapsed for reading all records using resultSet.next()", + LogLevel.INFO, PLong.INSTANCE), + OPEN_PHOENIX_CONNECTIONS_COUNTER("o", "Number of open phoenix connections", LogLevel.OFF, + PLong.INSTANCE), + OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER("io", "Number of open internal phoenix connections", + LogLevel.OFF, PLong.INSTANCE), + QUERY_SERVICES_COUNTER("cqs", "Number of ConnectionQueryServicesImpl instantiated", LogLevel.OFF, + PLong.INSTANCE), + HCONNECTIONS_COUNTER("h", "Number of HConnections created by phoenix driver", LogLevel.OFF, + PLong.INSTANCE), + PHOENIX_CONNECTIONS_THROTTLED_COUNTER("ct", + "Number of client Phoenix connections prevented from opening " + + "because there are already too many to that target cluster.", + LogLevel.OFF, PLong.INSTANCE), + PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER("ca", + "Number of requests for Phoenix connections, whether successful or not.", LogLevel.OFF, + PLong.INSTANCE), + PHOENIX_CONNECTIONS_FAILED_COUNTER("cf", + "Number of client Phoenix Connections Failed to open" + ", not including throttled connections", + LogLevel.OFF, PLong.INSTANCE), + CLIENT_METADATA_CACHE_MISS_COUNTER("cmcm", "Number of cache misses for the CQSI cache.", + LogLevel.DEBUG, PLong.INSTANCE), + CLIENT_METADATA_CACHE_HIT_COUNTER("cmch", "Number of cache hits for the CQSI cache.", + LogLevel.DEBUG, PLong.INSTANCE), + CLIENT_METADATA_CACHE_EVICTION_COUNTER("cmce", + "Number of cache evictions for the CQSI cache" + ".", LogLevel.DEBUG, PLong.INSTANCE), + CLIENT_METADATA_CACHE_REMOVAL_COUNTER("cmcr", "Number of cache removals for the CQSI cache.", + LogLevel.DEBUG, PLong.INSTANCE), + CLIENT_METADATA_CACHE_ADD_COUNTER("cmca", "Number of cache adds for the CQSI cache.", + LogLevel.DEBUG, PLong.INSTANCE), + CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE("cmcu", "Estimated used size of the CQSI cache.", + LogLevel.DEBUG, PLong.INSTANCE), + PAGED_ROWS_COUNTER("prc", "Number of dummy rows returned to client due to paging.", + LogLevel.DEBUG, PLong.INSTANCE), + STALE_METADATA_CACHE_EXCEPTION_COUNTER("smce", + "Number of StaleMetadataCacheException encountered.", LogLevel.DEBUG, PLong.INSTANCE), - // hbase metrics - COUNT_RPC_CALLS("rp", "Number of RPC calls",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_REMOTE_RPC_CALLS("rr", "Number of remote RPC calls",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_MILLS_BETWEEN_NEXTS("n", "Sum of milliseconds between sequential next calls",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_NOT_SERVING_REGION_EXCEPTION("nsr", "Number of NotServingRegionException caught",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_BYTES_REGION_SERVER_RESULTS("rs", "Number of bytes in Result objects from region servers",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_BYTES_IN_REMOTE_RESULTS("rrs", "Number of bytes in Result objects from remote region servers",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_SCANNED_REGIONS("rg", "Number of regions scanned",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_RPC_RETRIES("rpr", "Number of RPC retries",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_REMOTE_RPC_RETRIES("rrr", "Number of remote RPC retries",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_ROWS_SCANNED("ws", "Number of rows scanned",LogLevel.DEBUG, PLong.INSTANCE), - COUNT_ROWS_FILTERED("wf", "Number of rows filtered",LogLevel.DEBUG,PLong.INSTANCE), - COUNTER_METADATA_INCONSISTENCY("mi", "Number of times the metadata inconsistencies ", - LogLevel.DEBUG, PLong.INSTANCE), - NUM_SYSTEM_TABLE_RPC_SUCCESS("nstrs", "Number of successful system table RPC calls", - LogLevel.DEBUG,PLong.INSTANCE), - NUM_SYSTEM_TABLE_RPC_FAILURES("nstcf", "Number of Failed system table RPC calls ", - LogLevel.DEBUG,PLong.INSTANCE), - NUM_METADATA_LOOKUP_FAILURES("nmlf", "Number of Failed metadata lookup calls", - LogLevel.DEBUG,PLong.INSTANCE), - TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS("tsistrc", "Time spent in RPC calls for systemTable lookup", - LogLevel.DEBUG,PLong.INSTANCE), + // hbase metrics + COUNT_RPC_CALLS("rp", "Number of RPC calls", LogLevel.DEBUG, PLong.INSTANCE), + COUNT_REMOTE_RPC_CALLS("rr", "Number of remote RPC calls", LogLevel.DEBUG, PLong.INSTANCE), + COUNT_MILLS_BETWEEN_NEXTS("n", "Sum of milliseconds between sequential next calls", + LogLevel.DEBUG, PLong.INSTANCE), + COUNT_NOT_SERVING_REGION_EXCEPTION("nsr", "Number of NotServingRegionException caught", + LogLevel.DEBUG, PLong.INSTANCE), + COUNT_BYTES_REGION_SERVER_RESULTS("rs", "Number of bytes in Result objects from region servers", + LogLevel.DEBUG, PLong.INSTANCE), + COUNT_BYTES_IN_REMOTE_RESULTS("rrs", + "Number of bytes in Result objects from remote region servers", LogLevel.DEBUG, PLong.INSTANCE), + COUNT_SCANNED_REGIONS("rg", "Number of regions scanned", LogLevel.DEBUG, PLong.INSTANCE), + COUNT_RPC_RETRIES("rpr", "Number of RPC retries", LogLevel.DEBUG, PLong.INSTANCE), + COUNT_REMOTE_RPC_RETRIES("rrr", "Number of remote RPC retries", LogLevel.DEBUG, PLong.INSTANCE), + COUNT_ROWS_SCANNED("ws", "Number of rows scanned", LogLevel.DEBUG, PLong.INSTANCE), + COUNT_ROWS_FILTERED("wf", "Number of rows filtered", LogLevel.DEBUG, PLong.INSTANCE), + COUNTER_METADATA_INCONSISTENCY("mi", "Number of times the metadata inconsistencies ", + LogLevel.DEBUG, PLong.INSTANCE), + NUM_SYSTEM_TABLE_RPC_SUCCESS("nstrs", "Number of successful system table RPC calls", + LogLevel.DEBUG, PLong.INSTANCE), + NUM_SYSTEM_TABLE_RPC_FAILURES("nstcf", "Number of Failed system table RPC calls ", LogLevel.DEBUG, + PLong.INSTANCE), + NUM_METADATA_LOOKUP_FAILURES("nmlf", "Number of Failed metadata lookup calls", LogLevel.DEBUG, + PLong.INSTANCE), + TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS("tsistrc", "Time spent in RPC calls for systemTable lookup", + LogLevel.DEBUG, PLong.INSTANCE), - //HA Related Metrics - HA_PARALLEL_COUNT_OPERATIONS_ACTIVE_CLUSTER("hpoac","Number of Operations to the active cluster",LogLevel.DEBUG,PLong.INSTANCE), - HA_PARALLEL_COUNT_OPERATIONS_STANDBY_CLUSTER("hposc","Number of Operations to the standby cluster",LogLevel.DEBUG,PLong.INSTANCE), - HA_PARALLEL_COUNT_FAILED_OPERATIONS_ACTIVE_CLUSTER("hpfac","Number of Operations to the active cluster",LogLevel.DEBUG,PLong.INSTANCE), - HA_PARALLEL_COUNT_FAILED_OPERATIONS_STANDBY_CLUSTER("hpfsc","Number of Operations to the standby cluster",LogLevel.DEBUG,PLong.INSTANCE), - HA_PARALLEL_COUNT_USED_OPERATIONS_ACTIVE_CLUSTER("hpuac","Number of times active cluster was returned to the caller",LogLevel.DEBUG,PLong.INSTANCE), - HA_PARALLEL_COUNT_USED_OPERATIONS_STANDBY_CLUSTER("hpusc","Number of times standby cluster was returned to the caller",LogLevel.DEBUG,PLong.INSTANCE), - HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME("hpp1tw", "Time in milliseconds tasks had to wait in the queue of the thread pool executor",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_POOL1_TASK_END_TO_END_TIME("hpp1tee", "Time in milliseconds spent by tasks from creation to completion",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_POOL1_TASK_EXECUTION_TIME("hpp1tx", "Time in milliseconds tasks took to execute",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER("hpp1te", "Counter for number of tasks submitted to the thread pool executor",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER("hpp1tr", "Counter for number of tasks that were rejected by the thread pool executor",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME("hpp2tw", "Time in milliseconds tasks had to wait in the queue of the thread pool executor",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_POOL2_TASK_END_TO_END_TIME("hpp2tee", "Time in milliseconds spent by tasks from creation to completion",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_POOL2_TASK_EXECUTION_TIME("hpp2tx", "Time in milliseconds tasks took to execute",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER("hpp2te", "Counter for number of tasks submitted to the thread pool executor",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER("hpp2tr", "Counter for number of tasks that were rejected by the thread pool executor",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_TASK_TIMEOUT_COUNTER("hptto", "Counter for number of tasks that timedout",LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_CONNECTION_FALLBACK_COUNTER("hpcfc", "Counter for the number of connections that fellback to single cluster connection", LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_CONNECTION_ERROR_COUNTER("hpcec","Counter for the number of parallel phoenix connections that return a failure to the user", LogLevel.DEBUG, PLong.INSTANCE), - HA_PARALLEL_CONNECTION_CREATED_COUNTER("hpccc","Counter for the number of parallel phoenix connections that were created", LogLevel.DEBUG, PLong.INSTANCE); + // HA Related Metrics + HA_PARALLEL_COUNT_OPERATIONS_ACTIVE_CLUSTER("hpoac", "Number of Operations to the active cluster", + LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_COUNT_OPERATIONS_STANDBY_CLUSTER("hposc", + "Number of Operations to the standby cluster", LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_COUNT_FAILED_OPERATIONS_ACTIVE_CLUSTER("hpfac", + "Number of Operations to the active cluster", LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_COUNT_FAILED_OPERATIONS_STANDBY_CLUSTER("hpfsc", + "Number of Operations to the standby cluster", LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_COUNT_USED_OPERATIONS_ACTIVE_CLUSTER("hpuac", + "Number of times active cluster was returned to the caller", LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_COUNT_USED_OPERATIONS_STANDBY_CLUSTER("hpusc", + "Number of times standby cluster was returned to the caller", LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME("hpp1tw", + "Time in milliseconds tasks had to wait in the queue of the thread pool executor", + LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_POOL1_TASK_END_TO_END_TIME("hpp1tee", + "Time in milliseconds spent by tasks from creation to completion", LogLevel.DEBUG, + PLong.INSTANCE), + HA_PARALLEL_POOL1_TASK_EXECUTION_TIME("hpp1tx", "Time in milliseconds tasks took to execute", + LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_POOL1_TASK_EXECUTED_COUNTER("hpp1te", + "Counter for number of tasks submitted to the thread pool executor", LogLevel.DEBUG, + PLong.INSTANCE), + HA_PARALLEL_POOL1_TASK_REJECTED_COUNTER("hpp1tr", + "Counter for number of tasks that were rejected by the thread pool executor", LogLevel.DEBUG, + PLong.INSTANCE), + HA_PARALLEL_POOL2_TASK_QUEUE_WAIT_TIME("hpp2tw", + "Time in milliseconds tasks had to wait in the queue of the thread pool executor", + LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_POOL2_TASK_END_TO_END_TIME("hpp2tee", + "Time in milliseconds spent by tasks from creation to completion", LogLevel.DEBUG, + PLong.INSTANCE), + HA_PARALLEL_POOL2_TASK_EXECUTION_TIME("hpp2tx", "Time in milliseconds tasks took to execute", + LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_POOL2_TASK_EXECUTED_COUNTER("hpp2te", + "Counter for number of tasks submitted to the thread pool executor", LogLevel.DEBUG, + PLong.INSTANCE), + HA_PARALLEL_POOL2_TASK_REJECTED_COUNTER("hpp2tr", + "Counter for number of tasks that were rejected by the thread pool executor", LogLevel.DEBUG, + PLong.INSTANCE), + HA_PARALLEL_TASK_TIMEOUT_COUNTER("hptto", "Counter for number of tasks that timedout", + LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_CONNECTION_FALLBACK_COUNTER("hpcfc", + "Counter for the number of connections that fellback to single cluster connection", + LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_CONNECTION_ERROR_COUNTER("hpcec", + "Counter for the number of parallel phoenix connections that return a failure to the user", + LogLevel.DEBUG, PLong.INSTANCE), + HA_PARALLEL_CONNECTION_CREATED_COUNTER("hpccc", + "Counter for the number of parallel phoenix connections that were created", LogLevel.DEBUG, + PLong.INSTANCE); - private final String description; - private final String shortName; - private LogLevel logLevel; - private PDataType dataType; + private final String description; + private final String shortName; + private LogLevel logLevel; + private PDataType dataType; - private MetricType(String shortName, String description, LogLevel logLevel, PDataType dataType) { - this.shortName = shortName; - this.description = description; - this.logLevel=logLevel; - this.dataType=dataType; - } + private MetricType(String shortName, String description, LogLevel logLevel, PDataType dataType) { + this.shortName = shortName; + this.description = description; + this.logLevel = logLevel; + this.dataType = dataType; + } - public String description() { - return description; - } - - public String shortName() { - return shortName; - } - - public LogLevel logLevel() { - return logLevel; - } - - public PDataType dataType() { - return dataType; - } - - public String columnName() { - return name(); - } - - public boolean isLoggingEnabled(LogLevel connectionLogLevel){ - return logLevel() != LogLevel.OFF && (logLevel().ordinal() <= connectionLogLevel.ordinal()); - } + public String description() { + return description; + } + + public String shortName() { + return shortName; + } + + public LogLevel logLevel() { + return logLevel; + } + + public PDataType dataType() { + return dataType; + } - public static String getMetricColumnsDetails() { - StringBuilder buffer=new StringBuilder(); - for (MetricType metric:MetricType.values()) { - if (metric.logLevel() != LogLevel.OFF) { - buffer.append(metric.columnName()); - buffer.append(" "); - buffer.append(metric.dataType.getSqlTypeName()); - buffer.append(","); - } - } - return buffer.toString(); + public String columnName() { + return name(); + } + + public boolean isLoggingEnabled(LogLevel connectionLogLevel) { + return logLevel() != LogLevel.OFF && (logLevel().ordinal() <= connectionLogLevel.ordinal()); + } + + public static String getMetricColumnsDetails() { + StringBuilder buffer = new StringBuilder(); + for (MetricType metric : MetricType.values()) { + if (metric.logLevel() != LogLevel.OFF) { + buffer.append(metric.columnName()); + buffer.append(" "); + buffer.append(metric.dataType.getSqlTypeName()); + buffer.append(","); + } } - + return buffer.toString(); + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java index bbe5f2a46ea..c090b408961 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,40 +28,40 @@ public class MetricUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(MetricUtil.class); + private static final Logger LOGGER = LoggerFactory.getLogger(MetricUtil.class); - public static CombinableMetric getCombinableMetric(boolean isRequestMetricsEnabled, - LogLevel connectionLogLevel, - MetricType type) { - if (!type.isLoggingEnabled(connectionLogLevel) && !isRequestMetricsEnabled) { - return NoOpRequestMetric.INSTANCE; } - return new CombinableMetricImpl(type); + public static CombinableMetric getCombinableMetric(boolean isRequestMetricsEnabled, + LogLevel connectionLogLevel, MetricType type) { + if (!type.isLoggingEnabled(connectionLogLevel) && !isRequestMetricsEnabled) { + return NoOpRequestMetric.INSTANCE; } + return new CombinableMetricImpl(type); + } - public static MetricsStopWatch getMetricsStopWatch(boolean isRequestMetricsEnabled, - LogLevel connectionLogLevel, - MetricType type) { - if(!type.isLoggingEnabled(connectionLogLevel) && !isRequestMetricsEnabled) { - return new MetricsStopWatch(false); } - return new MetricsStopWatch(true); + public static MetricsStopWatch getMetricsStopWatch(boolean isRequestMetricsEnabled, + LogLevel connectionLogLevel, MetricType type) { + if (!type.isLoggingEnabled(connectionLogLevel) && !isRequestMetricsEnabled) { + return new MetricsStopWatch(false); } + return new MetricsStopWatch(true); + } - // We need to cover the case when JmxCacheBuster has just stopped the HBase metrics - // system, and not accidentally overwrite the DefaultMetricsSystem singleton. - // See PHOENIX-6699 - public static boolean isDefaultMetricsInitialized() { - try { - MetricsSystemImpl metrics = (MetricsSystemImpl) DefaultMetricsSystem.instance(); - Field prefixField = MetricsSystemImpl.class.getDeclaredField("prefix"); - prefixField.setAccessible(true); - String prefix = (String) prefixField.get(metrics); - prefixField.setAccessible(false); - if (prefix != null) { - return true; - } - } catch (Exception e) { - LOGGER.error("Exception trying to determine if HBase metrics is initialized", e); - } - return false; + // We need to cover the case when JmxCacheBuster has just stopped the HBase metrics + // system, and not accidentally overwrite the DefaultMetricsSystem singleton. + // See PHOENIX-6699 + public static boolean isDefaultMetricsInitialized() { + try { + MetricsSystemImpl metrics = (MetricsSystemImpl) DefaultMetricsSystem.instance(); + Field prefixField = MetricsSystemImpl.class.getDeclaredField("prefix"); + prefixField.setAccessible(true); + String prefix = (String) prefixField.get(metrics); + prefixField.setAccessible(false); + if (prefix != null) { + return true; + } + } catch (Exception e) { + LOGGER.error("Exception trying to determine if HBase metrics is initialized", e); } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricsRegistry.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricsRegistry.java index 05a53eecf17..16cf4990888 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricsRegistry.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricsRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,18 +19,17 @@ /** * Interface for configurable registering Metrics interface construction - * */ public interface MetricsRegistry { - /** - * Interface for Registering Metrics - */ - void registerMetrics(TableClientMetrics tInstance); + /** + * Interface for Registering Metrics + */ + void registerMetrics(TableClientMetrics tInstance); - /** - * Interface for unRegistering Metrics - */ - void unRegisterMetrics(TableClientMetrics tInstance); + /** + * Interface for unRegistering Metrics + */ + void unRegisterMetrics(TableClientMetrics tInstance); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java index 32ca9d09036..08147c45c95 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,49 +20,46 @@ import org.apache.phoenix.util.PhoenixStopWatch; /** - * - * Stop watch that is cognizant of the fact whether or not metrics is enabled. - * If metrics isn't enabled it doesn't do anything. Otherwise, it delegates - * calls to a {@code PhoenixStopWatch}. - * + * Stop watch that is cognizant of the fact whether or not metrics is enabled. If metrics isn't + * enabled it doesn't do anything. Otherwise, it delegates calls to a {@code PhoenixStopWatch}. */ final class MetricsStopWatch { - - private final boolean isMetricsEnabled; - private final PhoenixStopWatch stopwatch; - - MetricsStopWatch(boolean isMetricsEnabled) { - this.isMetricsEnabled = isMetricsEnabled; - this.stopwatch = new PhoenixStopWatch(); - } - - void start() { - if (isMetricsEnabled) { - stopwatch.start(); - } - } - - void stop() { - if (isMetricsEnabled) { - if (stopwatch.isRunning()) { - stopwatch.stop(); - } - } - } - boolean isRunning() { - return isMetricsEnabled && stopwatch.isRunning(); + private final boolean isMetricsEnabled; + private final PhoenixStopWatch stopwatch; + + MetricsStopWatch(boolean isMetricsEnabled) { + this.isMetricsEnabled = isMetricsEnabled; + this.stopwatch = new PhoenixStopWatch(); + } + + void start() { + if (isMetricsEnabled) { + stopwatch.start(); } - - long getElapsedTimeInMs() { - if (isMetricsEnabled) { - return stopwatch.elapsedMillis(); - } - return 0; + } + + void stop() { + if (isMetricsEnabled) { + if (stopwatch.isRunning()) { + stopwatch.stop(); + } } + } - @org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting - final boolean getMetricsEnabled(){ - return isMetricsEnabled; + boolean isRunning() { + return isMetricsEnabled && stopwatch.isRunning(); + } + + long getElapsedTimeInMs() { + if (isMetricsEnabled) { + return stopwatch.elapsedMillis(); } + return 0; + } + + @org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting + final boolean getMetricsEnabled() { + return isMetricsEnabled; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java index 5a129c09145..943a2e4a5c9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,11 +23,11 @@ import static org.apache.phoenix.monitoring.MetricType.DELETE_COMMIT_TIME; import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_BYTES; import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.INDEX_COMMIT_FAILURE_SIZE; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_FAILED_SIZE; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_SIZE; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BYTES; import static org.apache.phoenix.monitoring.MetricType.MUTATION_COMMIT_TIME; -import static org.apache.phoenix.monitoring.MetricType.INDEX_COMMIT_FAILURE_SIZE; import static org.apache.phoenix.monitoring.MetricType.UPSERT_BATCH_FAILED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.UPSERT_BATCH_FAILED_SIZE; import static org.apache.phoenix.monitoring.MetricType.UPSERT_COMMIT_TIME; @@ -43,216 +43,252 @@ * Queue that tracks various writes/mutations related phoenix request metrics. */ public class MutationMetricQueue { - - // Map of table name -> mutation metric - private Map tableMutationMetric = new HashMap<>(); - - public void addMetricsForTable(String tableName, MutationMetric metric) { - MutationMetric tableMetric = tableMutationMetric.get(tableName); - if (tableMetric == null) { - tableMutationMetric.put(tableName, metric); - } else { - tableMetric.combineMetric(metric); - } + + // Map of table name -> mutation metric + private Map tableMutationMetric = new HashMap<>(); + + public void addMetricsForTable(String tableName, MutationMetric metric) { + MutationMetric tableMetric = tableMutationMetric.get(tableName); + if (tableMetric == null) { + tableMutationMetric.put(tableName, metric); + } else { + tableMetric.combineMetric(metric); } + } - public void combineMetricQueues(MutationMetricQueue other) { - Map tableMetricMap = other.tableMutationMetric; - for (Entry entry : tableMetricMap.entrySet()) { - addMetricsForTable(entry.getKey(), entry.getValue()); - } + public void combineMetricQueues(MutationMetricQueue other) { + Map tableMetricMap = other.tableMutationMetric; + for (Entry entry : tableMetricMap.entrySet()) { + addMetricsForTable(entry.getKey(), entry.getValue()); } - - /** - * Publish the metrics to wherever you want them published. The internal state is cleared out after every publish. - * @return map of table {@code name -> list } of pair of (metric name, metric value) - */ - public Map> aggregate() { - Map> publishedMetrics = new HashMap<>(); - for (Entry entry : tableMutationMetric.entrySet()) { - String tableName = entry.getKey(); - MutationMetric metric = entry.getValue(); - Map publishedMetricsForTable = publishedMetrics.get(tableName); - if (publishedMetricsForTable == null) { - publishedMetricsForTable = new HashMap<>(); - publishedMetrics.put(tableName, publishedMetricsForTable); - } - publishedMetricsForTable.put(metric.getNumMutations().getMetricType(), metric.getNumMutations().getValue()); - publishedMetricsForTable.put(metric.getUpsertMutationsSizeBytes().getMetricType(), metric.getUpsertMutationsSizeBytes().getValue()); - publishedMetricsForTable.put(metric.getDeleteMutationsSizeBytes().getMetricType(), metric.getDeleteMutationsSizeBytes().getValue()); - publishedMetricsForTable.put(metric.getCommitTimeForMutations().getMetricType(), metric.getCommitTimeForMutations().getValue()); - publishedMetricsForTable.put(metric.getTotalCommitTimeForUpserts().getMetricType(), metric.getTotalCommitTimeForUpserts().getValue()); - publishedMetricsForTable.put(metric.getTotalCommitTimeForAtomicUpserts().getMetricType(), metric.getTotalCommitTimeForAtomicUpserts().getValue()); - publishedMetricsForTable.put(metric.getTotalCommitTimeForDeletes().getMetricType(), metric.getTotalCommitTimeForDeletes().getValue()); - publishedMetricsForTable.put(metric.getNumFailedMutations().getMetricType(), metric.getNumFailedMutations().getValue()); - publishedMetricsForTable.put(metric.getNumOfIndexCommitFailedMutations().getMetricType(), metric.getNumOfIndexCommitFailedMutations().getValue()); - publishedMetricsForTable.put(metric.getUpsertMutationSqlCounterSuccess().getMetricType(), metric.getUpsertMutationSqlCounterSuccess().getValue()); - publishedMetricsForTable.put(metric.getDeleteMutationSqlCounterSuccess().getMetricType(), metric.getDeleteMutationSqlCounterSuccess().getValue()); - publishedMetricsForTable.put(metric.getTotalMutationsSizeBytes().getMetricType(), metric.getTotalMutationsSizeBytes().getValue()); - publishedMetricsForTable.put(metric.getUpsertBatchFailedSize().getMetricType(), metric.getUpsertBatchFailedSize().getValue()); - publishedMetricsForTable.put(metric.getUpsertBatchFailedCounter().getMetricType(), metric.getUpsertBatchFailedCounter().getValue()); - publishedMetricsForTable.put(metric.getDeleteBatchFailedSize().getMetricType(), metric.getDeleteBatchFailedSize().getValue()); - publishedMetricsForTable.put(metric.getDeleteBatchFailedCounter().getMetricType(), metric.getDeleteBatchFailedCounter().getValue()); - - } - return publishedMetrics; + } + + /** + * Publish the metrics to wherever you want them published. The internal state is cleared out + * after every publish. + * @return map of table {@code name -> list } of pair of (metric name, metric value) + */ + public Map> aggregate() { + Map> publishedMetrics = new HashMap<>(); + for (Entry entry : tableMutationMetric.entrySet()) { + String tableName = entry.getKey(); + MutationMetric metric = entry.getValue(); + Map publishedMetricsForTable = publishedMetrics.get(tableName); + if (publishedMetricsForTable == null) { + publishedMetricsForTable = new HashMap<>(); + publishedMetrics.put(tableName, publishedMetricsForTable); + } + publishedMetricsForTable.put(metric.getNumMutations().getMetricType(), + metric.getNumMutations().getValue()); + publishedMetricsForTable.put(metric.getUpsertMutationsSizeBytes().getMetricType(), + metric.getUpsertMutationsSizeBytes().getValue()); + publishedMetricsForTable.put(metric.getDeleteMutationsSizeBytes().getMetricType(), + metric.getDeleteMutationsSizeBytes().getValue()); + publishedMetricsForTable.put(metric.getCommitTimeForMutations().getMetricType(), + metric.getCommitTimeForMutations().getValue()); + publishedMetricsForTable.put(metric.getTotalCommitTimeForUpserts().getMetricType(), + metric.getTotalCommitTimeForUpserts().getValue()); + publishedMetricsForTable.put(metric.getTotalCommitTimeForAtomicUpserts().getMetricType(), + metric.getTotalCommitTimeForAtomicUpserts().getValue()); + publishedMetricsForTable.put(metric.getTotalCommitTimeForDeletes().getMetricType(), + metric.getTotalCommitTimeForDeletes().getValue()); + publishedMetricsForTable.put(metric.getNumFailedMutations().getMetricType(), + metric.getNumFailedMutations().getValue()); + publishedMetricsForTable.put(metric.getNumOfIndexCommitFailedMutations().getMetricType(), + metric.getNumOfIndexCommitFailedMutations().getValue()); + publishedMetricsForTable.put(metric.getUpsertMutationSqlCounterSuccess().getMetricType(), + metric.getUpsertMutationSqlCounterSuccess().getValue()); + publishedMetricsForTable.put(metric.getDeleteMutationSqlCounterSuccess().getMetricType(), + metric.getDeleteMutationSqlCounterSuccess().getValue()); + publishedMetricsForTable.put(metric.getTotalMutationsSizeBytes().getMetricType(), + metric.getTotalMutationsSizeBytes().getValue()); + publishedMetricsForTable.put(metric.getUpsertBatchFailedSize().getMetricType(), + metric.getUpsertBatchFailedSize().getValue()); + publishedMetricsForTable.put(metric.getUpsertBatchFailedCounter().getMetricType(), + metric.getUpsertBatchFailedCounter().getValue()); + publishedMetricsForTable.put(metric.getDeleteBatchFailedSize().getMetricType(), + metric.getDeleteBatchFailedSize().getValue()); + publishedMetricsForTable.put(metric.getDeleteBatchFailedCounter().getMetricType(), + metric.getDeleteBatchFailedCounter().getValue()); + + } + return publishedMetrics; + } + + public void clearMetrics() { + tableMutationMetric.clear(); // help gc + } + + /** + * Class that holds together the various metrics associated with mutations. + */ + public static class MutationMetric { + private final CombinableMetric numMutations = new CombinableMetricImpl(MUTATION_BATCH_SIZE); + private final CombinableMetric totalMutationsSizeBytes = + new CombinableMetricImpl(MUTATION_BYTES); + private final CombinableMetric totalCommitTimeForMutations = + new CombinableMetricImpl(MUTATION_COMMIT_TIME); + private final CombinableMetric numFailedMutations = + new CombinableMetricImpl(MUTATION_BATCH_FAILED_SIZE); + private final CombinableMetric totalCommitTimeForUpserts = + new CombinableMetricImpl(UPSERT_COMMIT_TIME); + private final CombinableMetric totalCommitTimeForAtomicUpserts = + new CombinableMetricImpl(ATOMIC_UPSERT_COMMIT_TIME); + private final CombinableMetric totalCommitTimeForDeletes = + new CombinableMetricImpl(DELETE_COMMIT_TIME); + private final CombinableMetric upsertMutationsSizeBytes = + new CombinableMetricImpl(UPSERT_MUTATION_BYTES); + private final CombinableMetric deleteMutationsSizeBytes = + new CombinableMetricImpl(DELETE_MUTATION_BYTES); + private final CombinableMetric upsertMutationSqlCounterSuccess = + new CombinableMetricImpl(UPSERT_MUTATION_SQL_COUNTER); + private final CombinableMetric deleteMutationSqlCounterSuccess = + new CombinableMetricImpl(DELETE_MUTATION_SQL_COUNTER); + private final CombinableMetric upsertBatchFailedSize = + new CombinableMetricImpl(UPSERT_BATCH_FAILED_SIZE); + private final CombinableMetric upsertBatchFailedCounter = + new CombinableMetricImpl(UPSERT_BATCH_FAILED_COUNTER); + private final CombinableMetric deleteBatchFailedSize = + new CombinableMetricImpl(DELETE_BATCH_FAILED_SIZE); + private final CombinableMetric deleteBatchFailedCounter = + new CombinableMetricImpl(DELETE_BATCH_FAILED_COUNTER); + + private final CombinableMetric numOfIndexCommitFailMutations = + new CombinableMetricImpl(INDEX_COMMIT_FAILURE_SIZE); + + public static final MutationMetric EMPTY_METRIC = + new MutationMetric(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + + public MutationMetric(long numMutations, long upsertMutationsSizeBytes, + long deleteMutationsSizeBytes, long commitTimeForUpserts, long commitTimeForAtomicUpserts, + long commitTimeForDeletes, long numFailedMutations, long upsertMutationSqlCounterSuccess, + long deleteMutationSqlCounterSuccess, long totalMutationBytes, long numOfPhase3Failed, + long upsertBatchFailedSize, long upsertBatchFailedCounter, long deleteBatchFailedSize, + long deleteBatchFailedCounter) { + this.numMutations.change(numMutations); + this.totalCommitTimeForUpserts.change(commitTimeForUpserts); + this.totalCommitTimeForAtomicUpserts.change(commitTimeForAtomicUpserts); + this.totalCommitTimeForDeletes.change(commitTimeForDeletes); + this.totalCommitTimeForMutations.change(commitTimeForUpserts + commitTimeForDeletes); + this.numFailedMutations.change(numFailedMutations); + this.numOfIndexCommitFailMutations.change(numOfPhase3Failed); + this.upsertMutationsSizeBytes.change(upsertMutationsSizeBytes); + this.deleteMutationsSizeBytes.change(deleteMutationsSizeBytes); + this.totalMutationsSizeBytes.change(totalMutationBytes); + this.upsertMutationSqlCounterSuccess.change(upsertMutationSqlCounterSuccess); + this.deleteMutationSqlCounterSuccess.change(deleteMutationSqlCounterSuccess); + this.upsertBatchFailedSize.change(upsertBatchFailedSize); + this.upsertBatchFailedCounter.change(upsertBatchFailedCounter); + this.deleteBatchFailedSize.change(deleteBatchFailedSize); + this.deleteBatchFailedCounter.change(deleteBatchFailedCounter); } - - public void clearMetrics() { - tableMutationMetric.clear(); // help gc + + public CombinableMetric getTotalCommitTimeForUpserts() { + return totalCommitTimeForUpserts; } - - /** - * Class that holds together the various metrics associated with mutations. - */ - public static class MutationMetric { - private final CombinableMetric numMutations = new CombinableMetricImpl(MUTATION_BATCH_SIZE); - private final CombinableMetric totalMutationsSizeBytes = new CombinableMetricImpl(MUTATION_BYTES); - private final CombinableMetric totalCommitTimeForMutations = new CombinableMetricImpl(MUTATION_COMMIT_TIME); - private final CombinableMetric numFailedMutations = new CombinableMetricImpl(MUTATION_BATCH_FAILED_SIZE); - private final CombinableMetric totalCommitTimeForUpserts = new CombinableMetricImpl(UPSERT_COMMIT_TIME); - private final CombinableMetric totalCommitTimeForAtomicUpserts = new CombinableMetricImpl(ATOMIC_UPSERT_COMMIT_TIME); - private final CombinableMetric totalCommitTimeForDeletes = new CombinableMetricImpl(DELETE_COMMIT_TIME); - private final CombinableMetric upsertMutationsSizeBytes = new CombinableMetricImpl(UPSERT_MUTATION_BYTES); - private final CombinableMetric deleteMutationsSizeBytes = new CombinableMetricImpl(DELETE_MUTATION_BYTES); - private final CombinableMetric upsertMutationSqlCounterSuccess = new CombinableMetricImpl(UPSERT_MUTATION_SQL_COUNTER); - private final CombinableMetric deleteMutationSqlCounterSuccess = new CombinableMetricImpl(DELETE_MUTATION_SQL_COUNTER); - private final CombinableMetric upsertBatchFailedSize = new CombinableMetricImpl(UPSERT_BATCH_FAILED_SIZE); - private final CombinableMetric upsertBatchFailedCounter = new CombinableMetricImpl(UPSERT_BATCH_FAILED_COUNTER); - private final CombinableMetric deleteBatchFailedSize = new CombinableMetricImpl(DELETE_BATCH_FAILED_SIZE); - private final CombinableMetric deleteBatchFailedCounter = new CombinableMetricImpl(DELETE_BATCH_FAILED_COUNTER); - - private final CombinableMetric numOfIndexCommitFailMutations = new CombinableMetricImpl( - INDEX_COMMIT_FAILURE_SIZE); - - public static final MutationMetric EMPTY_METRIC = - new MutationMetric(0,0,0,0, 0, 0,0,0,0,0,0,0,0,0,0); - - public MutationMetric(long numMutations, long upsertMutationsSizeBytes, - long deleteMutationsSizeBytes, long commitTimeForUpserts, long commitTimeForAtomicUpserts, - long commitTimeForDeletes, long numFailedMutations, long upsertMutationSqlCounterSuccess, - long deleteMutationSqlCounterSuccess, long totalMutationBytes, - long numOfPhase3Failed, long upsertBatchFailedSize, - long upsertBatchFailedCounter, long deleteBatchFailedSize, - long deleteBatchFailedCounter) { - this.numMutations.change(numMutations); - this.totalCommitTimeForUpserts.change(commitTimeForUpserts); - this.totalCommitTimeForAtomicUpserts.change(commitTimeForAtomicUpserts); - this.totalCommitTimeForDeletes.change(commitTimeForDeletes); - this.totalCommitTimeForMutations.change(commitTimeForUpserts + commitTimeForDeletes); - this.numFailedMutations.change(numFailedMutations); - this.numOfIndexCommitFailMutations.change(numOfPhase3Failed); - this.upsertMutationsSizeBytes.change(upsertMutationsSizeBytes); - this.deleteMutationsSizeBytes.change(deleteMutationsSizeBytes); - this.totalMutationsSizeBytes.change(totalMutationBytes); - this.upsertMutationSqlCounterSuccess.change(upsertMutationSqlCounterSuccess); - this.deleteMutationSqlCounterSuccess.change(deleteMutationSqlCounterSuccess); - this.upsertBatchFailedSize.change(upsertBatchFailedSize); - this.upsertBatchFailedCounter.change(upsertBatchFailedCounter); - this.deleteBatchFailedSize.change(deleteBatchFailedSize); - this.deleteBatchFailedCounter.change(deleteBatchFailedCounter); - } - - public CombinableMetric getTotalCommitTimeForUpserts() { - return totalCommitTimeForUpserts; - } - - public CombinableMetric getTotalCommitTimeForAtomicUpserts() { return totalCommitTimeForAtomicUpserts; } - - public CombinableMetric getTotalCommitTimeForDeletes() { - return totalCommitTimeForDeletes; - } - - public CombinableMetric getCommitTimeForMutations() { - return totalCommitTimeForMutations; - } - - public CombinableMetric getNumMutations() { - return numMutations; - } - - public CombinableMetric getTotalMutationsSizeBytes() { - return totalMutationsSizeBytes; - } - - public CombinableMetric getNumFailedMutations() { - return numFailedMutations; - } - - public CombinableMetric getNumOfIndexCommitFailedMutations() { - return numOfIndexCommitFailMutations; - } - - public CombinableMetric getUpsertMutationsSizeBytes() { - return upsertMutationsSizeBytes; - } - - public CombinableMetric getDeleteMutationsSizeBytes() { - return deleteMutationsSizeBytes; - } - - public CombinableMetric getUpsertMutationSqlCounterSuccess() { - return upsertMutationSqlCounterSuccess; - } - - public CombinableMetric getDeleteMutationSqlCounterSuccess() { - return deleteMutationSqlCounterSuccess; - } - - public CombinableMetric getUpsertBatchFailedSize() { - return upsertBatchFailedSize; - } - - public CombinableMetric getUpsertBatchFailedCounter() { - return upsertBatchFailedCounter; - } - - public CombinableMetric getDeleteBatchFailedSize() { - return deleteBatchFailedSize; - } - - public CombinableMetric getDeleteBatchFailedCounter() { - return deleteBatchFailedCounter; - } - - public void combineMetric(MutationMetric other) { - this.numMutations.combine(other.numMutations); - this.totalCommitTimeForUpserts.combine(other.totalCommitTimeForUpserts); - this.totalCommitTimeForAtomicUpserts.combine(other.totalCommitTimeForAtomicUpserts); - this.totalCommitTimeForDeletes.combine(other.totalCommitTimeForDeletes); - this.totalCommitTimeForMutations.combine(other.totalCommitTimeForMutations); - this.numFailedMutations.combine(other.numFailedMutations); - this.numOfIndexCommitFailMutations.combine(other.numOfIndexCommitFailMutations); - this.upsertMutationsSizeBytes.combine(other.upsertMutationsSizeBytes); - this.deleteMutationsSizeBytes.combine(other.deleteMutationsSizeBytes); - this.totalMutationsSizeBytes.combine(other.totalMutationsSizeBytes); - this.upsertMutationSqlCounterSuccess.combine(other.upsertMutationSqlCounterSuccess); - this.deleteMutationSqlCounterSuccess.combine(other.deleteMutationSqlCounterSuccess); - this.upsertBatchFailedSize.combine(other.upsertBatchFailedSize); - this.upsertBatchFailedCounter.combine(other.upsertBatchFailedCounter); - this.deleteBatchFailedSize.combine(other.deleteBatchFailedSize); - this.deleteBatchFailedCounter.combine(other.deleteBatchFailedCounter); - } + public CombinableMetric getTotalCommitTimeForAtomicUpserts() { + return totalCommitTimeForAtomicUpserts; } - /** - * Class to represent a no-op mutation metric. Used in places where request level metric tracking for mutations is not - * needed or desired. - */ - public static class NoOpMutationMetricsQueue extends MutationMetricQueue { + public CombinableMetric getTotalCommitTimeForDeletes() { + return totalCommitTimeForDeletes; + } - public static final NoOpMutationMetricsQueue NO_OP_MUTATION_METRICS_QUEUE = new NoOpMutationMetricsQueue(); + public CombinableMetric getCommitTimeForMutations() { + return totalCommitTimeForMutations; + } - private NoOpMutationMetricsQueue() {} + public CombinableMetric getNumMutations() { + return numMutations; + } - @Override - public void addMetricsForTable(String tableName, MutationMetric metric) {} + public CombinableMetric getTotalMutationsSizeBytes() { + return totalMutationsSizeBytes; + } + + public CombinableMetric getNumFailedMutations() { + return numFailedMutations; + } + + public CombinableMetric getNumOfIndexCommitFailedMutations() { + return numOfIndexCommitFailMutations; + } + + public CombinableMetric getUpsertMutationsSizeBytes() { + return upsertMutationsSizeBytes; + } + + public CombinableMetric getDeleteMutationsSizeBytes() { + return deleteMutationsSizeBytes; + } + + public CombinableMetric getUpsertMutationSqlCounterSuccess() { + return upsertMutationSqlCounterSuccess; + } - @Override - public Map> aggregate() { return Collections.emptyMap(); } - - + public CombinableMetric getDeleteMutationSqlCounterSuccess() { + return deleteMutationSqlCounterSuccess; } + public CombinableMetric getUpsertBatchFailedSize() { + return upsertBatchFailedSize; + } + + public CombinableMetric getUpsertBatchFailedCounter() { + return upsertBatchFailedCounter; + } + + public CombinableMetric getDeleteBatchFailedSize() { + return deleteBatchFailedSize; + } + + public CombinableMetric getDeleteBatchFailedCounter() { + return deleteBatchFailedCounter; + } + + public void combineMetric(MutationMetric other) { + this.numMutations.combine(other.numMutations); + this.totalCommitTimeForUpserts.combine(other.totalCommitTimeForUpserts); + this.totalCommitTimeForAtomicUpserts.combine(other.totalCommitTimeForAtomicUpserts); + this.totalCommitTimeForDeletes.combine(other.totalCommitTimeForDeletes); + this.totalCommitTimeForMutations.combine(other.totalCommitTimeForMutations); + this.numFailedMutations.combine(other.numFailedMutations); + this.numOfIndexCommitFailMutations.combine(other.numOfIndexCommitFailMutations); + this.upsertMutationsSizeBytes.combine(other.upsertMutationsSizeBytes); + this.deleteMutationsSizeBytes.combine(other.deleteMutationsSizeBytes); + this.totalMutationsSizeBytes.combine(other.totalMutationsSizeBytes); + this.upsertMutationSqlCounterSuccess.combine(other.upsertMutationSqlCounterSuccess); + this.deleteMutationSqlCounterSuccess.combine(other.deleteMutationSqlCounterSuccess); + this.upsertBatchFailedSize.combine(other.upsertBatchFailedSize); + this.upsertBatchFailedCounter.combine(other.upsertBatchFailedCounter); + this.deleteBatchFailedSize.combine(other.deleteBatchFailedSize); + this.deleteBatchFailedCounter.combine(other.deleteBatchFailedCounter); + } + + } + + /** + * Class to represent a no-op mutation metric. Used in places where request level metric tracking + * for mutations is not needed or desired. + */ + public static class NoOpMutationMetricsQueue extends MutationMetricQueue { + + public static final NoOpMutationMetricsQueue NO_OP_MUTATION_METRICS_QUEUE = + new NoOpMutationMetricsQueue(); + + private NoOpMutationMetricsQueue() { + } + + @Override + public void addMetricsForTable(String tableName, MutationMetric metric) { + } + + @Override + public Map> aggregate() { + return Collections.emptyMap(); + } + + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NoOpGlobalMetricImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NoOpGlobalMetricImpl.java index d03b27e405b..74be86886ac 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NoOpGlobalMetricImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NoOpGlobalMetricImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,56 +22,54 @@ */ public class NoOpGlobalMetricImpl implements GlobalMetric { - static long NO_SAMPLES = -1; - static long NO_VALUE = -1; + static long NO_SAMPLES = -1; + static long NO_VALUE = -1; - @Override - public long getNumberOfSamples() { - return NO_SAMPLES; - } + @Override + public long getNumberOfSamples() { + return NO_SAMPLES; + } - @Override - public MetricType getMetricType() { - return null; - } + @Override + public MetricType getMetricType() { + return null; + } - @Override - public long getValue() { - return NO_VALUE; - } + @Override + public long getValue() { + return NO_VALUE; + } - @Override - public void change(long delta) { + @Override + public void change(long delta) { - } + } - @Override - public void increment() { + @Override + public void increment() { - } + } - @Override - public void decrement() { + @Override + public void decrement() { - } + } - @Override - public String getCurrentMetricState() { - return null; - } + @Override + public String getCurrentMetricState() { + return null; + } - @Override - public void reset() { + @Override + public void reset() { - } + } - /** - * Set the Metric value as current value - * - * @param value - */ - @Override - public void set(long value) { + /** + * Set the Metric value as current value + */ + @Override + public void set(long value) { - } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NoOpTableMetricsManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NoOpTableMetricsManager.java index 5194518f8b5..f4df4dd8a08 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NoOpTableMetricsManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NoOpTableMetricsManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,37 +22,41 @@ import java.util.Map; /** - * TableMetricsManager will be replaced by this case - * incase of tableMetrics flag is set to false. + * TableMetricsManager will be replaced by this case incase of tableMetrics flag is set to false. */ - public class NoOpTableMetricsManager extends TableMetricsManager { - public static final NoOpTableMetricsManager noOpsTableMetricManager = new NoOpTableMetricsManager(); + public static final NoOpTableMetricsManager noOpsTableMetricManager = + new NoOpTableMetricsManager(); - private NoOpTableMetricsManager() { - super(); - } + private NoOpTableMetricsManager() { + super(); + } - @Override public void updateMetrics(String tableName, MetricType type, long value) { + @Override + public void updateMetrics(String tableName, MetricType type, long value) { - } + } - @Override public void pushMetricsFromConnInstance(Map> map) { + @Override + public void pushMetricsFromConnInstance(Map> map) { - } + } - @Override public void clearTableLevelMetrics() { + @Override + public void clearTableLevelMetrics() { - } + } - @Override public Map> getTableLevelMetrics() { - return Collections.emptyMap(); - } + @Override + public Map> getTableLevelMetrics() { + return Collections.emptyMap(); + } - @Override public TableClientMetrics getTableClientMetrics(String tableName) { - return null; - } + @Override + public TableClientMetrics getTableClientMetrics(String tableName) { + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NonAtomicMetric.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NonAtomicMetric.java index 77fe093a403..5df08b54102 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NonAtomicMetric.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/NonAtomicMetric.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,64 +18,63 @@ package org.apache.phoenix.monitoring; /** - * Version of {@link Metric} that can be used when the metric isn't getting concurrently modified/accessed by multiple - * threads and the memory consistency effects of happen-before can be established. For example - phoenix client side - * metrics are modified/accessed by only one thread at a time. Further, the actions of threads in the phoenix client - * thread pool happen-before the actions of the thread that performs the aggregation of metrics. This makes + * Version of {@link Metric} that can be used when the metric isn't getting concurrently + * modified/accessed by multiple threads and the memory consistency effects of happen-before can be + * established. For example - phoenix client side metrics are modified/accessed by only one thread + * at a time. Further, the actions of threads in the phoenix client thread pool happen-before the + * actions of the thread that performs the aggregation of metrics. This makes * {@link NonAtomicMetric} a good fit for storing Phoenix's client side request level metrics. */ class NonAtomicMetric implements Metric { - private final MetricType type; - private long value; + private final MetricType type; + private long value; - public NonAtomicMetric(MetricType type) { - this.type = type; - } + public NonAtomicMetric(MetricType type) { + this.type = type; + } - @Override - public MetricType getMetricType() { - return type; - } + @Override + public MetricType getMetricType() { + return type; + } - @Override - public long getValue() { - return value; - } + @Override + public long getValue() { + return value; + } - @Override - public void change(long delta) { - value += delta; - } + @Override + public void change(long delta) { + value += delta; + } - @Override - public void increment() { - value++; - } + @Override + public void increment() { + value++; + } - @Override - public String getCurrentMetricState() { - return type.shortName() + ": " + value; - } + @Override + public String getCurrentMetricState() { + return type.shortName() + ": " + value; + } - @Override - public void reset() { - value = 0; - } + @Override + public void reset() { + value = 0; + } - /** - * Set the Metric value as current value - * - * @param value - */ - @Override - public void set(long value) { - this.value = value; - } + /** + * Set the Metric value as current value + */ + @Override + public void set(long value) { + this.value = value; + } - @Override - public void decrement() { - value--; - } + @Override + public void decrement() { + value--; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java index 5038cb32387..92042fb0058 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,152 +38,159 @@ * Class that represents the overall metrics associated with a query being executed by the phoenix. */ public class OverAllQueryMetrics { - private final MetricsStopWatch queryWatch; - private final MetricsStopWatch resultSetWatch; - private final CombinableMetric numParallelScans; - private final CombinableMetric wallClockTimeMS; - private final CombinableMetric resultSetTimeMS; - private final CombinableMetric queryTimedOut; - private final CombinableMetric queryPointLookupTimedOut; - private final CombinableMetric queryScanTimedOut; - private final CombinableMetric queryFailed; - private final CombinableMetric queryPointLookupFailed; - private final CombinableMetric queryScanFailed; - private final CombinableMetric cacheRefreshedDueToSplits; - - public OverAllQueryMetrics(boolean isRequestMetricsEnabled, LogLevel connectionLogLevel) { - queryWatch = MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, connectionLogLevel, - WALL_CLOCK_TIME_MS); - resultSetWatch = MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, connectionLogLevel, - RESULT_SET_TIME_MS); - numParallelScans = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, - connectionLogLevel, NUM_PARALLEL_SCANS); - wallClockTimeMS = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, - connectionLogLevel, WALL_CLOCK_TIME_MS); - resultSetTimeMS = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, - connectionLogLevel, RESULT_SET_TIME_MS); - queryTimedOut = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, - connectionLogLevel, QUERY_TIMEOUT_COUNTER); - queryPointLookupTimedOut = MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, QUERY_POINTLOOKUP_TIMEOUT_COUNTER); - queryScanTimedOut = MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, QUERY_SCAN_TIMEOUT_COUNTER); - queryFailed = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, - connectionLogLevel, QUERY_FAILED_COUNTER); - queryPointLookupFailed = MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, QUERY_POINTLOOKUP_FAILED_COUNTER); - queryScanFailed = MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, QUERY_SCAN_FAILED_COUNTER); - cacheRefreshedDueToSplits = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, - connectionLogLevel, CACHE_REFRESH_SPLITS_COUNTER); - } - - public void updateNumParallelScans(long numParallelScans) { - this.numParallelScans.change(numParallelScans); - } - - public void queryTimedOut() { - queryTimedOut.increment(); - } - - public void queryPointLookupTimedOut() { - queryPointLookupTimedOut.increment(); - } - - public void queryScanTimedOut() { - queryScanTimedOut.increment(); - } - - public void queryFailed() { - queryFailed.increment(); - } - - public void queryPointLookupFailed() { - queryPointLookupFailed.increment(); - } - - public void queryScanFailed() { - queryScanFailed.increment(); - } - - public void cacheRefreshedDueToSplits() { - cacheRefreshedDueToSplits.increment(); - } - - public void startQuery() { - if (!queryWatch.isRunning()) { - queryWatch.start(); - } - } - - public void endQuery() { - boolean wasRunning = queryWatch.isRunning(); - queryWatch.stop(); - if (wasRunning) { - wallClockTimeMS.change(queryWatch.getElapsedTimeInMs()); - } - } - - public void startResultSetWatch() { - resultSetWatch.start(); - } - - public void stopResultSetWatch() { - boolean wasRunning = resultSetWatch.isRunning(); - resultSetWatch.stop(); - if (wasRunning) { - resultSetTimeMS.change(resultSetWatch.getElapsedTimeInMs()); - } - } - - @VisibleForTesting - long getWallClockTimeMs() { - return wallClockTimeMS.getValue(); - } - - @VisibleForTesting - long getResultSetTimeMs() { - return resultSetTimeMS.getValue(); - } - - public Map publish() { - Map metricsForPublish = new HashMap<>(); - metricsForPublish.put(numParallelScans.getMetricType(), numParallelScans.getValue()); - metricsForPublish.put(wallClockTimeMS.getMetricType(), wallClockTimeMS.getValue()); - metricsForPublish.put(resultSetTimeMS.getMetricType(), resultSetTimeMS.getValue()); - metricsForPublish.put(queryTimedOut.getMetricType(), queryTimedOut.getValue()); - metricsForPublish.put(queryPointLookupTimedOut.getMetricType(), queryPointLookupTimedOut.getValue()); - metricsForPublish.put(queryScanTimedOut.getMetricType(), queryScanTimedOut.getValue()); - metricsForPublish.put(queryFailed.getMetricType(), queryFailed.getValue()); - metricsForPublish.put(queryPointLookupFailed.getMetricType(), queryPointLookupFailed.getValue()); - metricsForPublish.put(queryScanFailed.getMetricType(), queryScanFailed.getValue()); - metricsForPublish.put(cacheRefreshedDueToSplits.getMetricType(), cacheRefreshedDueToSplits.getValue()); - return metricsForPublish; - } - - public void reset() { - numParallelScans.reset(); - wallClockTimeMS.reset(); - resultSetTimeMS.reset(); - queryTimedOut.reset(); - queryPointLookupTimedOut.reset(); - queryScanTimedOut.reset(); - queryFailed.reset(); - queryPointLookupFailed.reset(); - queryScanFailed.reset(); - cacheRefreshedDueToSplits.reset(); - queryWatch.stop(); - resultSetWatch.stop(); - } - - public OverAllQueryMetrics combine(OverAllQueryMetrics metric) { - cacheRefreshedDueToSplits.combine(metric.cacheRefreshedDueToSplits); - queryFailed.combine(metric.queryFailed); - queryPointLookupFailed.combine(metric.queryPointLookupFailed); - queryScanFailed.combine(metric.queryScanFailed); - queryTimedOut.combine(metric.queryTimedOut); - queryPointLookupTimedOut.combine(metric.queryPointLookupTimedOut); - queryScanTimedOut.combine(metric.queryScanTimedOut); - numParallelScans.combine(metric.numParallelScans); - wallClockTimeMS.combine(metric.wallClockTimeMS); - resultSetTimeMS.combine(metric.resultSetTimeMS); - return this; - } + private final MetricsStopWatch queryWatch; + private final MetricsStopWatch resultSetWatch; + private final CombinableMetric numParallelScans; + private final CombinableMetric wallClockTimeMS; + private final CombinableMetric resultSetTimeMS; + private final CombinableMetric queryTimedOut; + private final CombinableMetric queryPointLookupTimedOut; + private final CombinableMetric queryScanTimedOut; + private final CombinableMetric queryFailed; + private final CombinableMetric queryPointLookupFailed; + private final CombinableMetric queryScanFailed; + private final CombinableMetric cacheRefreshedDueToSplits; + + public OverAllQueryMetrics(boolean isRequestMetricsEnabled, LogLevel connectionLogLevel) { + queryWatch = MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, connectionLogLevel, + WALL_CLOCK_TIME_MS); + resultSetWatch = MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, connectionLogLevel, + RESULT_SET_TIME_MS); + numParallelScans = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, connectionLogLevel, + NUM_PARALLEL_SCANS); + wallClockTimeMS = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, connectionLogLevel, + WALL_CLOCK_TIME_MS); + resultSetTimeMS = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, connectionLogLevel, + RESULT_SET_TIME_MS); + queryTimedOut = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, connectionLogLevel, + QUERY_TIMEOUT_COUNTER); + queryPointLookupTimedOut = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, + connectionLogLevel, QUERY_POINTLOOKUP_TIMEOUT_COUNTER); + queryScanTimedOut = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, connectionLogLevel, + QUERY_SCAN_TIMEOUT_COUNTER); + queryFailed = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, connectionLogLevel, + QUERY_FAILED_COUNTER); + queryPointLookupFailed = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, + connectionLogLevel, QUERY_POINTLOOKUP_FAILED_COUNTER); + queryScanFailed = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, connectionLogLevel, + QUERY_SCAN_FAILED_COUNTER); + cacheRefreshedDueToSplits = MetricUtil.getCombinableMetric(isRequestMetricsEnabled, + connectionLogLevel, CACHE_REFRESH_SPLITS_COUNTER); + } + + public void updateNumParallelScans(long numParallelScans) { + this.numParallelScans.change(numParallelScans); + } + + public void queryTimedOut() { + queryTimedOut.increment(); + } + + public void queryPointLookupTimedOut() { + queryPointLookupTimedOut.increment(); + } + + public void queryScanTimedOut() { + queryScanTimedOut.increment(); + } + + public void queryFailed() { + queryFailed.increment(); + } + + public void queryPointLookupFailed() { + queryPointLookupFailed.increment(); + } + + public void queryScanFailed() { + queryScanFailed.increment(); + } + + public void cacheRefreshedDueToSplits() { + cacheRefreshedDueToSplits.increment(); + } + + public void startQuery() { + if (!queryWatch.isRunning()) { + queryWatch.start(); + } + } + + public void endQuery() { + boolean wasRunning = queryWatch.isRunning(); + queryWatch.stop(); + if (wasRunning) { + wallClockTimeMS.change(queryWatch.getElapsedTimeInMs()); + } + } + + public void startResultSetWatch() { + resultSetWatch.start(); + } + + public void stopResultSetWatch() { + boolean wasRunning = resultSetWatch.isRunning(); + resultSetWatch.stop(); + if (wasRunning) { + resultSetTimeMS.change(resultSetWatch.getElapsedTimeInMs()); + } + } + + @VisibleForTesting + long getWallClockTimeMs() { + return wallClockTimeMS.getValue(); + } + + @VisibleForTesting + long getResultSetTimeMs() { + return resultSetTimeMS.getValue(); + } + + public Map publish() { + Map metricsForPublish = new HashMap<>(); + metricsForPublish.put(numParallelScans.getMetricType(), numParallelScans.getValue()); + metricsForPublish.put(wallClockTimeMS.getMetricType(), wallClockTimeMS.getValue()); + metricsForPublish.put(resultSetTimeMS.getMetricType(), resultSetTimeMS.getValue()); + metricsForPublish.put(queryTimedOut.getMetricType(), queryTimedOut.getValue()); + metricsForPublish.put(queryPointLookupTimedOut.getMetricType(), + queryPointLookupTimedOut.getValue()); + metricsForPublish.put(queryScanTimedOut.getMetricType(), queryScanTimedOut.getValue()); + metricsForPublish.put(queryFailed.getMetricType(), queryFailed.getValue()); + metricsForPublish.put(queryPointLookupFailed.getMetricType(), + queryPointLookupFailed.getValue()); + metricsForPublish.put(queryScanFailed.getMetricType(), queryScanFailed.getValue()); + metricsForPublish.put(cacheRefreshedDueToSplits.getMetricType(), + cacheRefreshedDueToSplits.getValue()); + return metricsForPublish; + } + + public void reset() { + numParallelScans.reset(); + wallClockTimeMS.reset(); + resultSetTimeMS.reset(); + queryTimedOut.reset(); + queryPointLookupTimedOut.reset(); + queryScanTimedOut.reset(); + queryFailed.reset(); + queryPointLookupFailed.reset(); + queryScanFailed.reset(); + cacheRefreshedDueToSplits.reset(); + queryWatch.stop(); + resultSetWatch.stop(); + } + + public OverAllQueryMetrics combine(OverAllQueryMetrics metric) { + cacheRefreshedDueToSplits.combine(metric.cacheRefreshedDueToSplits); + queryFailed.combine(metric.queryFailed); + queryPointLookupFailed.combine(metric.queryPointLookupFailed); + queryScanFailed.combine(metric.queryScanFailed); + queryTimedOut.combine(metric.queryTimedOut); + queryPointLookupTimedOut.combine(metric.queryPointLookupTimedOut); + queryScanTimedOut.combine(metric.queryScanTimedOut); + numParallelScans.combine(metric.numParallelScans); + wallClockTimeMS.combine(metric.wallClockTimeMS); + resultSetTimeMS.combine(metric.resultSetTimeMS); + return this; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/PhoenixTableMetric.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/PhoenixTableMetric.java index ddf880fd066..b8a64f082fe 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/PhoenixTableMetric.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/PhoenixTableMetric.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,21 +18,16 @@ package org.apache.phoenix.monitoring; /** - * Class that exposes the various phoenix metrics collected - * at the Table level. Because metrics are dynamic in nature, it is not guaranteed that the - * state exposed will always be in sync with each other. One should use - * these metrics primarily for monitoring and debugging purposes. + * Class that exposes the various phoenix metrics collected at the Table level. Because metrics are + * dynamic in nature, it is not guaranteed that the state exposed will always be in sync with each + * other. One should use these metrics primarily for monitoring and debugging purposes. */ public interface PhoenixTableMetric extends Metric { - /** - * @return Number of samples collected since the last {@link #reset()} call. - */ - public long getNumberOfSamples(); + /** Returns Number of samples collected since the last {@link #reset()} call. */ + public long getNumberOfSamples(); - /** - * @return Sum of the values of the metric sampled since the last {@link #reset()} call. - */ - public long getTotalSum(); + /** Returns Sum of the values of the metric sampled since the last {@link #reset()} call. */ + public long getTotalSum(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/PhoenixTableMetricImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/PhoenixTableMetricImpl.java index 0d6ef7868bc..5fc034bdf96 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/PhoenixTableMetricImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/PhoenixTableMetricImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,68 +21,75 @@ public class PhoenixTableMetricImpl implements PhoenixTableMetric { - private AtomicLong numberOfSamples = new AtomicLong(0); - private Metric metric; + private AtomicLong numberOfSamples = new AtomicLong(0); + private Metric metric; - /** - * Default implementation used when TableLevel Metrics are enabled - */ - public PhoenixTableMetricImpl(MetricType type) { - this.metric = new AtomicMetric(type); - } + /** + * Default implementation used when TableLevel Metrics are enabled + */ + public PhoenixTableMetricImpl(MetricType type) { + this.metric = new AtomicMetric(type); + } - /** - * Reset the internal state. Typically called after metric information has been collected and a new phase of - * collection is being requested for the next interval. - */ - @Override public void reset() { - metric.reset(); - numberOfSamples.set(0); - } + /** + * Reset the internal state. Typically called after metric information has been collected and a + * new phase of collection is being requested for the next interval. + */ + @Override + public void reset() { + metric.reset(); + numberOfSamples.set(0); + } - /** - * Set the Metric value as current value - * - * @param value - */ - @Override - public void set(long value) { - metric.set(value); - } + /** + * Set the Metric value as current value + */ + @Override + public void set(long value) { + metric.set(value); + } - @Override public long getNumberOfSamples() { - return numberOfSamples.get(); - } + @Override + public long getNumberOfSamples() { + return numberOfSamples.get(); + } - @Override public long getTotalSum() { - return metric.getValue(); - } + @Override + public long getTotalSum() { + return metric.getValue(); + } - @Override public void change(long delta) { - metric.change(delta); - numberOfSamples.incrementAndGet(); - } + @Override + public void change(long delta) { + metric.change(delta); + numberOfSamples.incrementAndGet(); + } - @Override public void increment() { - metric.increment(); - numberOfSamples.incrementAndGet(); - } + @Override + public void increment() { + metric.increment(); + numberOfSamples.incrementAndGet(); + } - @Override public MetricType getMetricType() { - return metric.getMetricType(); - } + @Override + public MetricType getMetricType() { + return metric.getMetricType(); + } - @Override public long getValue() { - return metric.getValue(); - } + @Override + public long getValue() { + return metric.getValue(); + } - @Override public String getCurrentMetricState() { - return metric.getCurrentMetricState() + ", Number of samples: " + numberOfSamples.get(); - } + @Override + public String getCurrentMetricState() { + return metric.getCurrentMetricState() + ", Number of samples: " + numberOfSamples.get(); + } - @Override public void decrement() { - metric.decrement(); - numberOfSamples.incrementAndGet(); - } + @Override + public void decrement() { + metric.decrement(); + numberOfSamples.incrementAndGet(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/RangeHistogram.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/RangeHistogram.java index 29110b90993..ab526807c9b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/RangeHistogram.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/RangeHistogram.java @@ -4,10 +4,10 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,9 @@ */ package org.apache.phoenix.monitoring; - import java.util.HashMap; import java.util.Map; + import org.HdrHistogram.ConcurrentHistogram; import org.HdrHistogram.Histogram; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -30,84 +30,78 @@ Creates a histogram with the specified range. */ public class RangeHistogram { - private Histogram histogram; - private long[] ranges; - private String name; - private String desc; - private static final Logger LOGGER = LoggerFactory.getLogger(RangeHistogram.class); + private Histogram histogram; + private long[] ranges; + private String name; + private String desc; + private static final Logger LOGGER = LoggerFactory.getLogger(RangeHistogram.class); - public RangeHistogram(long[] ranges, String name, String description) { - Preconditions.checkNotNull(ranges); - Preconditions.checkArgument(ranges.length != 0); - this.ranges = ranges; // the ranges are static or either provided by user - this.name = name; - this.desc = description; - /* - Below is the memory footprint per precision as of hdrhistogram version 2.1.12 - Histogram#getEstimatedFootprintInBytes provide a (conservatively high) estimate - of the Histogram's total footprint in bytes. - |-----------------------------------------| - |PRECISION | ERROR RATE | SIZE IN BYTES | - | 1 | 10% | 3,584 | - | 2 | 1% | 22,016 | - | 3 | 0.1% | 147,968 | - | 4 | 0.01% | 1,835,520 | - | 5 | 0.001% | 11,534,848 | - |-----------------------------------------| - */ - // highestTrackable value is the last value in the provided range. - this.histogram = new ConcurrentHistogram(this.ranges[this.ranges.length-1], 2); - } + public RangeHistogram(long[] ranges, String name, String description) { + Preconditions.checkNotNull(ranges); + Preconditions.checkArgument(ranges.length != 0); + this.ranges = ranges; // the ranges are static or either provided by user + this.name = name; + this.desc = description; + /* + * Below is the memory footprint per precision as of hdrhistogram version 2.1.12 + * Histogram#getEstimatedFootprintInBytes provide a (conservatively high) estimate of the + * Histogram's total footprint in bytes. |-----------------------------------------| |PRECISION + * | ERROR RATE | SIZE IN BYTES | | 1 | 10% | 3,584 | | 2 | 1% | 22,016 | | 3 | 0.1% | 147,968 | + * | 4 | 0.01% | 1,835,520 | | 5 | 0.001% | 11,534,848 | + * |-----------------------------------------| + */ + // highestTrackable value is the last value in the provided range. + this.histogram = new ConcurrentHistogram(this.ranges[this.ranges.length - 1], 2); + } - public void add(long value) { - if (value > histogram.getHighestTrackableValue()) { - // Ignoring recording value more than maximum trackable value. - LOGGER.warn("Histogram recording higher value than maximum. Ignoring it."); - return; - } - histogram.recordValue(value); + public void add(long value) { + if (value > histogram.getHighestTrackableValue()) { + // Ignoring recording value more than maximum trackable value. + LOGGER.warn("Histogram recording higher value than maximum. Ignoring it."); + return; } + histogram.recordValue(value); + } - public Histogram getHistogram() { - return histogram; - } + public Histogram getHistogram() { + return histogram; + } - public long[] getRanges() { - return ranges; - } + public long[] getRanges() { + return ranges; + } - public String getName() { - return name; - } + public String getName() { + return name; + } - public String getDesc() { - return desc; - } + public String getDesc() { + return desc; + } - public HistogramDistribution getRangeHistogramDistribution() { - // Generate distribution from the snapshot. - Histogram snapshot = histogram.copy(); - HistogramDistributionImpl - distribution = - new HistogramDistributionImpl(name, snapshot.getMinValue(), snapshot.getMaxValue(), - snapshot.getTotalCount(), generateDistributionMap(snapshot)); - return distribution; - } + public HistogramDistribution getRangeHistogramDistribution() { + // Generate distribution from the snapshot. + Histogram snapshot = histogram.copy(); + HistogramDistributionImpl distribution = + new HistogramDistributionImpl(name, snapshot.getMinValue(), snapshot.getMaxValue(), + snapshot.getTotalCount(), generateDistributionMap(snapshot)); + return distribution; + } - private Map generateDistributionMap(Histogram snapshot) { - long priorRange = 0; - Map map = new HashMap<>(); - for (int i = 0; i < ranges.length; i++) { - // We get the next non equivalent range to avoid double counting. - // getCountBetweenValues is inclusive of both values but since we are getting - // next non equivalent value from the lower bound it will be more than priorRange. - long nextNonEquivalentRange = histogram.nextNonEquivalentValue(priorRange); - // lower exclusive upper inclusive - long val = snapshot.getCountBetweenValues(nextNonEquivalentRange, ranges[i]); - map.put(priorRange + "," + ranges[i], val); - priorRange = ranges[i]; - } - return map; + private Map generateDistributionMap(Histogram snapshot) { + long priorRange = 0; + Map map = new HashMap<>(); + for (int i = 0; i < ranges.length; i++) { + // We get the next non equivalent range to avoid double counting. + // getCountBetweenValues is inclusive of both values but since we are getting + // next non equivalent value from the lower bound it will be more than priorRange. + long nextNonEquivalentRange = histogram.nextNonEquivalentValue(priorRange); + // lower exclusive upper inclusive + long val = snapshot.getCountBetweenValues(nextNonEquivalentRange, ranges[i]); + map.put(priorRange + "," + ranges[i], val); + priorRange = ranges[i]; } + return map; + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java index 45851187a17..dc12424a910 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.monitoring; @@ -27,7 +35,6 @@ import org.apache.phoenix.log.LogLevel; import org.apache.phoenix.monitoring.CombinableMetric.NoOpRequestMetric; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -35,163 +42,163 @@ */ public class ReadMetricQueue { - private static final int MAX_QUEUE_SIZE = 20000; // TODO: should this be configurable? + private static final int MAX_QUEUE_SIZE = 20000; // TODO: should this be configurable? - private final ConcurrentMap> metricsMap = new ConcurrentHashMap<>(); + private final ConcurrentMap> metricsMap = + new ConcurrentHashMap<>(); - private final List scanMetricsHolderList = new ArrayList(); + private final List scanMetricsHolderList = new ArrayList(); + private LogLevel connectionLogLevel; - private LogLevel connectionLogLevel; + private boolean isRequestMetricsEnabled; - private boolean isRequestMetricsEnabled; + public ReadMetricQueue(boolean isRequestMetricsEnabled, LogLevel connectionLogLevel) { + this.isRequestMetricsEnabled = isRequestMetricsEnabled; + this.connectionLogLevel = connectionLogLevel; + } - public ReadMetricQueue(boolean isRequestMetricsEnabled, LogLevel connectionLogLevel) { - this.isRequestMetricsEnabled = isRequestMetricsEnabled; - this.connectionLogLevel = connectionLogLevel; + public CombinableMetric allotMetric(MetricType type, String tableName) { + if (type.isLoggingEnabled(connectionLogLevel) || isRequestMetricsEnabled) { + MetricKey key = new MetricKey(type, tableName); + Queue q = getMetricQueue(key); + CombinableMetric metric = getMetric(type); + q.offer(metric); + return metric; + } else { + return NoOpRequestMetric.INSTANCE; } - - public CombinableMetric allotMetric(MetricType type, String tableName) { - if (type.isLoggingEnabled(connectionLogLevel) || isRequestMetricsEnabled) { - MetricKey key = new MetricKey(type, tableName); - Queue q = getMetricQueue(key); - CombinableMetric metric = getMetric(type); - q.offer(metric); - return metric; - } else { - return NoOpRequestMetric.INSTANCE; + } + + @VisibleForTesting + public CombinableMetric getMetric(MetricType type) { + CombinableMetric metric = new CombinableMetricImpl(type); + return metric; + } + + /** Returns map of table {@code name -> list } of pair of (metric name, metric value) */ + public Map> aggregate() { + Map> publishedMetrics = new HashMap<>(); + for (Entry> entry : metricsMap.entrySet()) { + String tableNameToPublish = entry.getKey().tableName; + Collection metrics = entry.getValue(); + if (metrics.size() > 0) { + CombinableMetric m = combine(metrics); + Map map = publishedMetrics.get(tableNameToPublish); + if (map == null) { + map = new HashMap<>(); + publishedMetrics.put(tableNameToPublish, map); } + map.put(m.getMetricType(), m.getValue()); + } } + return publishedMetrics; + } - @VisibleForTesting - public CombinableMetric getMetric(MetricType type) { - CombinableMetric metric = new CombinableMetricImpl(type); - return metric; - } + public void clearMetrics() { + metricsMap.clear(); // help gc + } - /** - * @return map of table {@code name -> list } of pair of (metric name, metric value) - */ - public Map> aggregate() { - Map> publishedMetrics = new HashMap<>(); - for (Entry> entry : metricsMap.entrySet()) { - String tableNameToPublish = entry.getKey().tableName; - Collection metrics = entry.getValue(); - if (metrics.size() > 0) { - CombinableMetric m = combine(metrics); - Map map = publishedMetrics.get(tableNameToPublish); - if (map == null) { - map = new HashMap<>(); - publishedMetrics.put(tableNameToPublish, map); - } - map.put(m.getMetricType(), m.getValue()); - } - } - return publishedMetrics; + private static CombinableMetric combine(Collection metrics) { + int size = metrics.size(); + if (size == 0) { + throw new IllegalArgumentException("Metrics collection needs to have at least one element"); } - - public void clearMetrics() { - metricsMap.clear(); // help gc + Iterator itr = metrics.iterator(); + // Clone first metric for combining so that aggregate always give consistent result + CombinableMetric combinedMetric = itr.next().clone(); + while (itr.hasNext()) { + combinedMetric = combinedMetric.combine(itr.next()); } - - private static CombinableMetric combine(Collection metrics) { - int size = metrics.size(); - if (size == 0) { throw new IllegalArgumentException("Metrics collection needs to have at least one element"); } - Iterator itr = metrics.iterator(); - //Clone first metric for combining so that aggregate always give consistent result - CombinableMetric combinedMetric = itr.next().clone(); - while (itr.hasNext()) { - combinedMetric = combinedMetric.combine(itr.next()); + return combinedMetric; + } + + /** + * Combine the metrics. This method should only be called in a single threaded manner when the two + * metric holders are not getting modified. + */ + public ReadMetricQueue combineReadMetrics(ReadMetricQueue other) { + ConcurrentMap> otherMetricsMap = other.metricsMap; + for (Entry> entry : otherMetricsMap.entrySet()) { + MetricKey key = entry.getKey(); + Queue otherQueue = entry.getValue(); + CombinableMetric combinedMetric = null; + // combine the metrics corresponding to this metric key before putting it in the queue. + for (CombinableMetric m : otherQueue) { + if (combinedMetric == null) { + combinedMetric = m; + } else { + combinedMetric.combine(m); } - return combinedMetric; + } + if (combinedMetric != null) { + Queue thisQueue = getMetricQueue(key); + thisQueue.offer(combinedMetric); + } } - - /** - * Combine the metrics. This method should only be called in a single threaded manner when the two metric holders - * are not getting modified. - */ - public ReadMetricQueue combineReadMetrics(ReadMetricQueue other) { - ConcurrentMap> otherMetricsMap = other.metricsMap; - for (Entry> entry : otherMetricsMap.entrySet()) { - MetricKey key = entry.getKey(); - Queue otherQueue = entry.getValue(); - CombinableMetric combinedMetric = null; - // combine the metrics corresponding to this metric key before putting it in the queue. - for (CombinableMetric m : otherQueue) { - if (combinedMetric == null) { - combinedMetric = m; - } else { - combinedMetric.combine(m); - } - } - if (combinedMetric != null) { - Queue thisQueue = getMetricQueue(key); - thisQueue.offer(combinedMetric); - } - } - return this; + return this; + } + + /** + * Inner class whose instances are used as keys in the metrics map. + */ + private static class MetricKey { + @Nonnull + private final MetricType type; + + @Nonnull + private final String tableName; + + MetricKey(MetricType type, String tableName) { + checkNotNull(type); + checkNotNull(tableName); + this.type = type; + this.tableName = tableName; } - /** - * Inner class whose instances are used as keys in the metrics map. - */ - private static class MetricKey { - @Nonnull - private final MetricType type; - - @Nonnull - private final String tableName; - - MetricKey(MetricType type, String tableName) { - checkNotNull(type); - checkNotNull(tableName); - this.type = type; - this.tableName = tableName; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + tableName.hashCode(); + result = prime * result + type.hashCode(); + return result; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + tableName.hashCode(); - result = prime * result + type.hashCode(); - return result; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + MetricKey other = (MetricKey) obj; + if (tableName.equals(other.tableName) && type == other.type) return true; + return false; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - MetricKey other = (MetricKey)obj; - if (tableName.equals(other.tableName) && type == other.type) return true; - return false; - } + } + private Queue getMetricQueue(MetricKey key) { + Queue q = metricsMap.get(key); + if (q == null) { + q = new LinkedBlockingQueue(MAX_QUEUE_SIZE); + Queue curQ = metricsMap.putIfAbsent(key, q); + if (curQ != null) { + q = curQ; + } } + return q; + } - private Queue getMetricQueue(MetricKey key) { - Queue q = metricsMap.get(key); - if (q == null) { - q = new LinkedBlockingQueue(MAX_QUEUE_SIZE); - Queue curQ = metricsMap.putIfAbsent(key, q); - if (curQ != null) { - q = curQ; - } - } - return q; - } + public void addScanHolder(ScanMetricsHolder holder) { + scanMetricsHolderList.add(holder); + } - public void addScanHolder(ScanMetricsHolder holder){ - scanMetricsHolderList.add(holder); - } + public List getScanMetricsHolderList() { + return scanMetricsHolderList; + } - public List getScanMetricsHolderList() { - return scanMetricsHolderList; - } - - public boolean isRequestMetricsEnabled() { - return isRequestMetricsEnabled; - } + public boolean isRequestMetricsEnabled() { + return isRequestMetricsEnabled; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java index dd0aca0659b..40d3193e7f8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,119 +40,120 @@ public class ScanMetricsHolder { - private final CombinableMetric countOfRPCcalls; - private final CombinableMetric countOfRemoteRPCcalls; - private final CombinableMetric sumOfMillisSecBetweenNexts; - private final CombinableMetric countOfNSRE; - private final CombinableMetric countOfBytesInResults; - private final CombinableMetric countOfBytesInRemoteResults; - private final CombinableMetric countOfRegions; - private final CombinableMetric countOfRPCRetries; - private final CombinableMetric countOfRemoteRPCRetries; - private final CombinableMetric countOfRowsScanned; - private final CombinableMetric countOfRowsFiltered; - private final CombinableMetric countOfBytesScanned; - private final CombinableMetric countOfRowsPaged; - private Map scanMetricMap; - private Object scan; - - private static final ScanMetricsHolder NO_OP_INSTANCE = - new ScanMetricsHolder(new ReadMetricQueue(false,LogLevel.OFF), "",null); - - public static ScanMetricsHolder getInstance(ReadMetricQueue readMetrics, String tableName, - Scan scan, LogLevel connectionLogLevel) { - if (connectionLogLevel == LogLevel.OFF && !readMetrics.isRequestMetricsEnabled()) { return NO_OP_INSTANCE; } - scan.setScanMetricsEnabled(true); - return new ScanMetricsHolder(readMetrics, tableName, scan); - } - - private ScanMetricsHolder(ReadMetricQueue readMetrics, String tableName,Scan scan) { - readMetrics.addScanHolder(this); - this.scan=scan; - countOfRPCcalls = readMetrics.allotMetric(COUNT_RPC_CALLS, tableName); - countOfRemoteRPCcalls = readMetrics.allotMetric(COUNT_REMOTE_RPC_CALLS, tableName); - sumOfMillisSecBetweenNexts = readMetrics.allotMetric(COUNT_MILLS_BETWEEN_NEXTS, tableName); - countOfNSRE = readMetrics.allotMetric(COUNT_NOT_SERVING_REGION_EXCEPTION, tableName); - countOfBytesInResults = - readMetrics.allotMetric(COUNT_BYTES_REGION_SERVER_RESULTS, tableName); - countOfBytesInRemoteResults = - readMetrics.allotMetric(COUNT_BYTES_IN_REMOTE_RESULTS, tableName); - countOfRegions = readMetrics.allotMetric(COUNT_SCANNED_REGIONS, tableName); - countOfRPCRetries = readMetrics.allotMetric(COUNT_RPC_RETRIES, tableName); - countOfRemoteRPCRetries = readMetrics.allotMetric(COUNT_REMOTE_RPC_RETRIES, tableName); - countOfRowsScanned = readMetrics.allotMetric(COUNT_ROWS_SCANNED, tableName); - countOfRowsFiltered = readMetrics.allotMetric(COUNT_ROWS_FILTERED, tableName); - countOfBytesScanned = readMetrics.allotMetric(SCAN_BYTES,tableName); - countOfRowsPaged = readMetrics.allotMetric(PAGED_ROWS_COUNTER, tableName); - } - - public CombinableMetric getCountOfRemoteRPCcalls() { - return countOfRemoteRPCcalls; - } - - public CombinableMetric getSumOfMillisSecBetweenNexts() { - return sumOfMillisSecBetweenNexts; - } - - public CombinableMetric getCountOfNSRE() { - return countOfNSRE; - } - - public CombinableMetric getCountOfBytesInRemoteResults() { - return countOfBytesInRemoteResults; - } - - public CombinableMetric getCountOfRegions() { - return countOfRegions; - } - - public CombinableMetric getCountOfRPCRetries() { - return countOfRPCRetries; - } - - public CombinableMetric getCountOfRemoteRPCRetries() { - return countOfRemoteRPCRetries; - } - - public CombinableMetric getCountOfRowsFiltered() { - return countOfRowsFiltered; - } - - public CombinableMetric getCountOfRPCcalls() { - return countOfRPCcalls; - } - - public CombinableMetric getCountOfBytesInResults() { - return countOfBytesInResults; - } - - public CombinableMetric getCountOfRowsScanned() { - return countOfRowsScanned; - } - - public Map getScanMetricMap() { - return scanMetricMap; - } - - public CombinableMetric getCountOfBytesScanned() { - return countOfBytesScanned; - } - - public CombinableMetric getCountOfRowsPaged() { - return countOfRowsPaged; - } - - public void setScanMetricMap(Map scanMetricMap) { - this.scanMetricMap = scanMetricMap; - } - - @Override - public String toString() { - try { - return "{\"scan\":" + scan + ", \"scanMetrics\":" + JsonMapper.writeObjectAsString(scanMetricMap) + "}"; - } catch (IOException e) { - return "{\"Exception while converting scan metrics to Json\":\"" + e.getMessage() + "\"}"; - } - } + private final CombinableMetric countOfRPCcalls; + private final CombinableMetric countOfRemoteRPCcalls; + private final CombinableMetric sumOfMillisSecBetweenNexts; + private final CombinableMetric countOfNSRE; + private final CombinableMetric countOfBytesInResults; + private final CombinableMetric countOfBytesInRemoteResults; + private final CombinableMetric countOfRegions; + private final CombinableMetric countOfRPCRetries; + private final CombinableMetric countOfRemoteRPCRetries; + private final CombinableMetric countOfRowsScanned; + private final CombinableMetric countOfRowsFiltered; + private final CombinableMetric countOfBytesScanned; + private final CombinableMetric countOfRowsPaged; + private Map scanMetricMap; + private Object scan; + + private static final ScanMetricsHolder NO_OP_INSTANCE = + new ScanMetricsHolder(new ReadMetricQueue(false, LogLevel.OFF), "", null); + + public static ScanMetricsHolder getInstance(ReadMetricQueue readMetrics, String tableName, + Scan scan, LogLevel connectionLogLevel) { + if (connectionLogLevel == LogLevel.OFF && !readMetrics.isRequestMetricsEnabled()) { + return NO_OP_INSTANCE; + } + scan.setScanMetricsEnabled(true); + return new ScanMetricsHolder(readMetrics, tableName, scan); + } + + private ScanMetricsHolder(ReadMetricQueue readMetrics, String tableName, Scan scan) { + readMetrics.addScanHolder(this); + this.scan = scan; + countOfRPCcalls = readMetrics.allotMetric(COUNT_RPC_CALLS, tableName); + countOfRemoteRPCcalls = readMetrics.allotMetric(COUNT_REMOTE_RPC_CALLS, tableName); + sumOfMillisSecBetweenNexts = readMetrics.allotMetric(COUNT_MILLS_BETWEEN_NEXTS, tableName); + countOfNSRE = readMetrics.allotMetric(COUNT_NOT_SERVING_REGION_EXCEPTION, tableName); + countOfBytesInResults = readMetrics.allotMetric(COUNT_BYTES_REGION_SERVER_RESULTS, tableName); + countOfBytesInRemoteResults = readMetrics.allotMetric(COUNT_BYTES_IN_REMOTE_RESULTS, tableName); + countOfRegions = readMetrics.allotMetric(COUNT_SCANNED_REGIONS, tableName); + countOfRPCRetries = readMetrics.allotMetric(COUNT_RPC_RETRIES, tableName); + countOfRemoteRPCRetries = readMetrics.allotMetric(COUNT_REMOTE_RPC_RETRIES, tableName); + countOfRowsScanned = readMetrics.allotMetric(COUNT_ROWS_SCANNED, tableName); + countOfRowsFiltered = readMetrics.allotMetric(COUNT_ROWS_FILTERED, tableName); + countOfBytesScanned = readMetrics.allotMetric(SCAN_BYTES, tableName); + countOfRowsPaged = readMetrics.allotMetric(PAGED_ROWS_COUNTER, tableName); + } + + public CombinableMetric getCountOfRemoteRPCcalls() { + return countOfRemoteRPCcalls; + } + + public CombinableMetric getSumOfMillisSecBetweenNexts() { + return sumOfMillisSecBetweenNexts; + } + + public CombinableMetric getCountOfNSRE() { + return countOfNSRE; + } + + public CombinableMetric getCountOfBytesInRemoteResults() { + return countOfBytesInRemoteResults; + } + + public CombinableMetric getCountOfRegions() { + return countOfRegions; + } + + public CombinableMetric getCountOfRPCRetries() { + return countOfRPCRetries; + } + + public CombinableMetric getCountOfRemoteRPCRetries() { + return countOfRemoteRPCRetries; + } + + public CombinableMetric getCountOfRowsFiltered() { + return countOfRowsFiltered; + } + + public CombinableMetric getCountOfRPCcalls() { + return countOfRPCcalls; + } + + public CombinableMetric getCountOfBytesInResults() { + return countOfBytesInResults; + } + + public CombinableMetric getCountOfRowsScanned() { + return countOfRowsScanned; + } + + public Map getScanMetricMap() { + return scanMetricMap; + } + + public CombinableMetric getCountOfBytesScanned() { + return countOfBytesScanned; + } + + public CombinableMetric getCountOfRowsPaged() { + return countOfRowsPaged; + } + + public void setScanMetricMap(Map scanMetricMap) { + this.scanMetricMap = scanMetricMap; + } + + @Override + public String toString() { + try { + return "{\"scan\":" + scan + ", \"scanMetrics\":" + + JsonMapper.writeObjectAsString(scanMetricMap) + "}"; + } catch (IOException e) { + return "{\"Exception while converting scan metrics to Json\":\"" + e.getMessage() + "\"}"; + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/SizeHistogram.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/SizeHistogram.java index 0e99b1056d7..f48c1a78151 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/SizeHistogram.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/SizeHistogram.java @@ -4,10 +4,10 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,26 +22,25 @@ import org.apache.phoenix.query.QueryServices; /** - * Histogram for calculating sizes (for eg: bytes read, bytes scanned). We read ranges using - * config property {@link QueryServices#PHOENIX_HISTOGRAM_SIZE_RANGES}. If this property is not set - * then it will default to {@link org.apache.hadoop.metrics2.lib.MutableSizeHistogram#RANGES} - * values. + * Histogram for calculating sizes (for eg: bytes read, bytes scanned). We read ranges using config + * property {@link QueryServices#PHOENIX_HISTOGRAM_SIZE_RANGES}. If this property is not set then it + * will default to {@link org.apache.hadoop.metrics2.lib.MutableSizeHistogram#RANGES} values. */ public class SizeHistogram extends RangeHistogram { - //default range of bins for size Histograms - protected static long[] - DEFAULT_RANGE = - { 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000 }; - public SizeHistogram(String name, String description, Configuration conf) { - super(initializeRanges(conf), name, description); - initializeRanges(conf); - } + // default range of bins for size Histograms + protected static long[] DEFAULT_RANGE = + { 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000 }; - private static long[] initializeRanges(Configuration conf) { - long[] ranges = PhoenixConfigurationUtilHelper.getLongs(conf, - QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES); - return ranges != null ? ranges : DEFAULT_RANGE; - } + public SizeHistogram(String name, String description, Configuration conf) { + super(initializeRanges(conf), name, description); + initializeRanges(conf); + } -} \ No newline at end of file + private static long[] initializeRanges(Configuration conf) { + long[] ranges = + PhoenixConfigurationUtilHelper.getLongs(conf, QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES); + return ranges != null ? ranges : DEFAULT_RANGE; + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/SpoolingMetricsHolder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/SpoolingMetricsHolder.java index 8662f1a9c46..cb06b10dcf1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/SpoolingMetricsHolder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/SpoolingMetricsHolder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,25 +20,26 @@ import org.apache.phoenix.log.LogLevel; /** - * Class that encapsulates the various metrics associated with the spooling done by phoenix as part of servicing a - * request. + * Class that encapsulates the various metrics associated with the spooling done by phoenix as part + * of servicing a request. */ public class SpoolingMetricsHolder { - private final CombinableMetric spoolFileSizeMetric; - private final CombinableMetric numSpoolFileMetric; - public static final SpoolingMetricsHolder NO_OP_INSTANCE = new SpoolingMetricsHolder(new ReadMetricQueue(false,LogLevel.OFF), ""); + private final CombinableMetric spoolFileSizeMetric; + private final CombinableMetric numSpoolFileMetric; + public static final SpoolingMetricsHolder NO_OP_INSTANCE = + new SpoolingMetricsHolder(new ReadMetricQueue(false, LogLevel.OFF), ""); - public SpoolingMetricsHolder(ReadMetricQueue readMetrics, String tableName) { - this.spoolFileSizeMetric = readMetrics.allotMetric(MetricType.SPOOL_FILE_SIZE, tableName); - this.numSpoolFileMetric = readMetrics.allotMetric(MetricType.SPOOL_FILE_COUNTER, tableName); - } + public SpoolingMetricsHolder(ReadMetricQueue readMetrics, String tableName) { + this.spoolFileSizeMetric = readMetrics.allotMetric(MetricType.SPOOL_FILE_SIZE, tableName); + this.numSpoolFileMetric = readMetrics.allotMetric(MetricType.SPOOL_FILE_COUNTER, tableName); + } - public CombinableMetric getSpoolFileSizeMetric() { - return spoolFileSizeMetric; - } + public CombinableMetric getSpoolFileSizeMetric() { + return spoolFileSizeMetric; + } - public CombinableMetric getNumSpoolFileMetric() { - return numSpoolFileMetric; - } + public CombinableMetric getNumSpoolFileMetric() { + return numSpoolFileMetric; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java index 5a1aa3dbc7e..7e18ab89072 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,202 +6,198 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.monitoring; -import org.apache.hadoop.conf.Configuration; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_SIZE; +import static org.apache.phoenix.monitoring.MetricType.ATOMIC_UPSERT_COMMIT_TIME; +import static org.apache.phoenix.monitoring.MetricType.ATOMIC_UPSERT_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.ATOMIC_UPSERT_SQL_QUERY_TIME; +import static org.apache.phoenix.monitoring.MetricType.COUNT_ROWS_SCANNED; +import static org.apache.phoenix.monitoring.MetricType.DELETE_AGGREGATE_FAILURE_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.DELETE_AGGREGATE_SUCCESS_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.DELETE_BATCH_FAILED_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.DELETE_BATCH_FAILED_SIZE; +import static org.apache.phoenix.monitoring.MetricType.DELETE_COMMIT_TIME; +import static org.apache.phoenix.monitoring.MetricType.DELETE_FAILED_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_BYTES; +import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.DELETE_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.DELETE_SQL_QUERY_TIME; +import static org.apache.phoenix.monitoring.MetricType.DELETE_SUCCESS_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_FAILED_SIZE; +import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_SIZE; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BYTES; +import static org.apache.phoenix.monitoring.MetricType.MUTATION_COMMIT_TIME; +import static org.apache.phoenix.monitoring.MetricType.MUTATION_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.NUM_METADATA_LOOKUP_FAILURES; import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_FAILURES; import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_SUCCESS; +import static org.apache.phoenix.monitoring.MetricType.QUERY_FAILED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.QUERY_POINTLOOKUP_FAILED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.QUERY_POINTLOOKUP_TIMEOUT_COUNTER; import static org.apache.phoenix.monitoring.MetricType.QUERY_SCAN_FAILED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.QUERY_SCAN_TIMEOUT_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.QUERY_TIMEOUT_COUNTER; import static org.apache.phoenix.monitoring.MetricType.RESULT_SET_TIME_MS; +import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES; +import static org.apache.phoenix.monitoring.MetricType.SELECT_AGGREGATE_FAILURE_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.SELECT_AGGREGATE_SUCCESS_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.SELECT_FAILED_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.SELECT_POINTLOOKUP_FAILED_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.SELECT_SCAN_FAILED_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.SELECT_SCAN_SUCCESS_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.SELECT_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.SELECT_SQL_QUERY_TIME; import static org.apache.phoenix.monitoring.MetricType.SELECT_SUCCESS_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.UPSERT_MUTATION_BYTES; -import static org.apache.phoenix.monitoring.MetricType.UPSERT_MUTATION_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.DELETE_BATCH_FAILED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.DELETE_BATCH_FAILED_SIZE; -import static org.apache.phoenix.monitoring.MetricType.DELETE_COMMIT_TIME; -import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_BYTES; -import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.MUTATION_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.MUTATION_COMMIT_TIME; import static org.apache.phoenix.monitoring.MetricType.TASK_END_TO_END_TIME; -import static org.apache.phoenix.monitoring.MetricType.COUNT_ROWS_SCANNED; -import static org.apache.phoenix.monitoring.MetricType.QUERY_FAILED_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.QUERY_TIMEOUT_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES; -import static org.apache.phoenix.monitoring.MetricType.SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.SELECT_POINTLOOKUP_FAILED_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.SELECT_SQL_QUERY_TIME; -import static org.apache.phoenix.monitoring.MetricType.SELECT_SCAN_SUCCESS_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.SELECT_SCAN_FAILED_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS; -import static org.apache.phoenix.monitoring.MetricType.DELETE_FAILED_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.DELETE_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.DELETE_SQL_QUERY_TIME; -import static org.apache.phoenix.monitoring.MetricType.DELETE_SUCCESS_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.UPSERT_AGGREGATE_FAILURE_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.UPSERT_BATCH_FAILED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.UPSERT_BATCH_FAILED_SIZE; import static org.apache.phoenix.monitoring.MetricType.UPSERT_COMMIT_TIME; import static org.apache.phoenix.monitoring.MetricType.UPSERT_FAILED_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.UPSERT_MUTATION_BYTES; +import static org.apache.phoenix.monitoring.MetricType.UPSERT_MUTATION_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.UPSERT_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.UPSERT_SQL_QUERY_TIME; import static org.apache.phoenix.monitoring.MetricType.UPSERT_SUCCESS_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.UPSERT_AGGREGATE_FAILURE_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.DELETE_AGGREGATE_SUCCESS_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.DELETE_AGGREGATE_FAILURE_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.SELECT_AGGREGATE_SUCCESS_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.SELECT_AGGREGATE_FAILURE_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_SUCCESS; -import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_FAILURES; -import static org.apache.phoenix.monitoring.MetricType.TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS; -import static org.apache.phoenix.monitoring.MetricType.ATOMIC_UPSERT_COMMIT_TIME; -import static org.apache.phoenix.monitoring.MetricType.ATOMIC_UPSERT_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.ATOMIC_UPSERT_SQL_QUERY_TIME; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; /** - * This is used by TableMetricsManager class to store instance of - * object associated with a tableName. + * This is used by TableMetricsManager class to store instance of object associated with a + * tableName. */ public class TableClientMetrics { - public enum TableMetrics { - TABLE_MUTATION_BATCH_FAILED_SIZE(MUTATION_BATCH_FAILED_SIZE), TABLE_MUTATION_BATCH_SIZE( - MUTATION_BATCH_SIZE), TABLE_MUTATION_BYTES( - MUTATION_BYTES), TABLE_UPSERT_MUTATION_BYTES( - UPSERT_MUTATION_BYTES), TABLE_UPSERT_MUTATION_SQL_COUNTER( - UPSERT_MUTATION_SQL_COUNTER), TABLE_DELETE_MUTATION_BYTES( - DELETE_MUTATION_BYTES), TABLE_DELETE_MUTATION_SQL_COUNTER( - DELETE_MUTATION_SQL_COUNTER), TABLE_MUTATION_SQL_COUNTER( - MUTATION_SQL_COUNTER), TABLE_MUTATION_COMMIT_TIME( - MUTATION_COMMIT_TIME), TABLE_UPSERT_SQL_COUNTER( - UPSERT_SQL_COUNTER), TABLE_UPSERT_SQL_QUERY_TIME( - UPSERT_SQL_QUERY_TIME), TABLE_SUCCESS_UPSERT_SQL_COUNTER( - UPSERT_SUCCESS_SQL_COUNTER), TABLE_FAILED_UPSERT_SQL_COUNTER( - UPSERT_FAILED_SQL_COUNTER), TABLE_UPSERT_BATCH_FAILED_SIZE( - UPSERT_BATCH_FAILED_SIZE), TABLE_UPSERT_BATCH_FAILED_COUNTER( - UPSERT_BATCH_FAILED_COUNTER), TABLE_DELETE_SQL_COUNTER( - DELETE_SQL_COUNTER), TABLE_DELETE_SQL_QUERY_TIME( - DELETE_SQL_QUERY_TIME), TABLE_SUCCESS_DELETE_SQL_COUNTER( - DELETE_SUCCESS_SQL_COUNTER), TABLE_FAILED_DELETE_SQL_COUNTER( - DELETE_FAILED_SQL_COUNTER), TABLE_DELETE_BATCH_FAILED_SIZE( - DELETE_BATCH_FAILED_SIZE), TABLE_DELETE_BATCH_FAILED_COUNTER( - DELETE_BATCH_FAILED_COUNTER), TABLE_UPSERT_COMMIT_TIME( - UPSERT_COMMIT_TIME), TABLE_DELETE_COMMIT_TIME( - DELETE_COMMIT_TIME), TABLE_TASK_END_TO_END_TIME( - TASK_END_TO_END_TIME), TABLE_COUNT_ROWS_SCANNED( - COUNT_ROWS_SCANNED), TABLE_QUERY_FAILED_COUNTER( - QUERY_FAILED_COUNTER), TABLE_QUERY_POINTLOOKUP_FAILED_COUNTER( - QUERY_POINTLOOKUP_FAILED_COUNTER), TABLE_QUERY_SCAN_FAILED_COUNTER( - QUERY_SCAN_FAILED_COUNTER), TABLE_QUERY_TIMEOUT_COUNTER( - QUERY_TIMEOUT_COUNTER), TABLE_QUERY_POINTLOOKUP_TIMEOUT_COUNTER( - QUERY_POINTLOOKUP_TIMEOUT_COUNTER), TABLE_QUERY_SCAN_TIMEOUT_COUNTER( - QUERY_SCAN_TIMEOUT_COUNTER), TABLE_SELECT_QUERY_RESULT_SET_MS( - RESULT_SET_TIME_MS), TABLE_SCANBYTES(SCAN_BYTES), TABLE_SELECT_SQL_COUNTER( - SELECT_SQL_COUNTER), TABLE_SELECT_SQL_QUERY_TIME( - SELECT_SQL_QUERY_TIME), TABLE_SUCCESS_SELECT_SQL_COUNTER( - SELECT_SUCCESS_SQL_COUNTER), TABLE_FAILED_SELECT_SQL_COUNTER( - SELECT_FAILED_SQL_COUNTER), TABLE_SELECT_POINTLOOKUP_COUNTER_SUCCESS( - SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER), TABLE_SELECT_POINTLOOKUP_COUNTER_FAILED( - SELECT_POINTLOOKUP_FAILED_SQL_COUNTER), TABLE_SELECT_SCAN_COUNTER_SUCCESS( - SELECT_SCAN_SUCCESS_SQL_COUNTER), TABLE_SELECT_SCAN_COUNTER_FAILED( - SELECT_SCAN_FAILED_SQL_COUNTER), TABLE_UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER( - UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER), TABLE_UPSERT_AGGREGATE_FAILURE_SQL_COUNTER( - UPSERT_AGGREGATE_FAILURE_SQL_COUNTER), TABLE_DELETE_AGGREGATE_SUCCESS_SQL_COUNTER( - DELETE_AGGREGATE_SUCCESS_SQL_COUNTER), TABLE_DELETE_AGGREGATE_FAILURE_SQL_COUNTER( - DELETE_AGGREGATE_FAILURE_SQL_COUNTER), TABLE_SELECT_AGGREGATE_SUCCESS_SQL_COUNTER( - SELECT_AGGREGATE_SUCCESS_SQL_COUNTER), TABLE_SELECT_AGGREGATE_FAILURE_SQL_COUNTER( - SELECT_AGGREGATE_FAILURE_SQL_COUNTER), - TABLE_ATOMIC_UPSERT_SQL_COUNTER(ATOMIC_UPSERT_SQL_COUNTER), - TABLE_ATOMIC_UPSERT_COMMIT_TIME(ATOMIC_UPSERT_COMMIT_TIME), - TABLE_ATOMIC_UPSERT_SQL_QUERY_TIME(ATOMIC_UPSERT_SQL_QUERY_TIME), - TABLE_NUM_SYSTEM_TABLE_RPC_SUCCESS(NUM_SYSTEM_TABLE_RPC_SUCCESS), - TABLE_NUM_SYSTEM_TABLE_RPC_FAILURES(NUM_SYSTEM_TABLE_RPC_FAILURES), - TABLE_NUM_METADATA_LOOKUP_FAILURES(NUM_METADATA_LOOKUP_FAILURES), - TABLE_TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS(TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS); - - private MetricType metricType; - private PhoenixTableMetric metric; - - TableMetrics(MetricType metricType) { - this.metricType = metricType; - } + public enum TableMetrics { + TABLE_MUTATION_BATCH_FAILED_SIZE(MUTATION_BATCH_FAILED_SIZE), + TABLE_MUTATION_BATCH_SIZE(MUTATION_BATCH_SIZE), + TABLE_MUTATION_BYTES(MUTATION_BYTES), + TABLE_UPSERT_MUTATION_BYTES(UPSERT_MUTATION_BYTES), + TABLE_UPSERT_MUTATION_SQL_COUNTER(UPSERT_MUTATION_SQL_COUNTER), + TABLE_DELETE_MUTATION_BYTES(DELETE_MUTATION_BYTES), + TABLE_DELETE_MUTATION_SQL_COUNTER(DELETE_MUTATION_SQL_COUNTER), + TABLE_MUTATION_SQL_COUNTER(MUTATION_SQL_COUNTER), + TABLE_MUTATION_COMMIT_TIME(MUTATION_COMMIT_TIME), + TABLE_UPSERT_SQL_COUNTER(UPSERT_SQL_COUNTER), + TABLE_UPSERT_SQL_QUERY_TIME(UPSERT_SQL_QUERY_TIME), + TABLE_SUCCESS_UPSERT_SQL_COUNTER(UPSERT_SUCCESS_SQL_COUNTER), + TABLE_FAILED_UPSERT_SQL_COUNTER(UPSERT_FAILED_SQL_COUNTER), + TABLE_UPSERT_BATCH_FAILED_SIZE(UPSERT_BATCH_FAILED_SIZE), + TABLE_UPSERT_BATCH_FAILED_COUNTER(UPSERT_BATCH_FAILED_COUNTER), + TABLE_DELETE_SQL_COUNTER(DELETE_SQL_COUNTER), + TABLE_DELETE_SQL_QUERY_TIME(DELETE_SQL_QUERY_TIME), + TABLE_SUCCESS_DELETE_SQL_COUNTER(DELETE_SUCCESS_SQL_COUNTER), + TABLE_FAILED_DELETE_SQL_COUNTER(DELETE_FAILED_SQL_COUNTER), + TABLE_DELETE_BATCH_FAILED_SIZE(DELETE_BATCH_FAILED_SIZE), + TABLE_DELETE_BATCH_FAILED_COUNTER(DELETE_BATCH_FAILED_COUNTER), + TABLE_UPSERT_COMMIT_TIME(UPSERT_COMMIT_TIME), + TABLE_DELETE_COMMIT_TIME(DELETE_COMMIT_TIME), + TABLE_TASK_END_TO_END_TIME(TASK_END_TO_END_TIME), + TABLE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED), + TABLE_QUERY_FAILED_COUNTER(QUERY_FAILED_COUNTER), + TABLE_QUERY_POINTLOOKUP_FAILED_COUNTER(QUERY_POINTLOOKUP_FAILED_COUNTER), + TABLE_QUERY_SCAN_FAILED_COUNTER(QUERY_SCAN_FAILED_COUNTER), + TABLE_QUERY_TIMEOUT_COUNTER(QUERY_TIMEOUT_COUNTER), + TABLE_QUERY_POINTLOOKUP_TIMEOUT_COUNTER(QUERY_POINTLOOKUP_TIMEOUT_COUNTER), + TABLE_QUERY_SCAN_TIMEOUT_COUNTER(QUERY_SCAN_TIMEOUT_COUNTER), + TABLE_SELECT_QUERY_RESULT_SET_MS(RESULT_SET_TIME_MS), + TABLE_SCANBYTES(SCAN_BYTES), + TABLE_SELECT_SQL_COUNTER(SELECT_SQL_COUNTER), + TABLE_SELECT_SQL_QUERY_TIME(SELECT_SQL_QUERY_TIME), + TABLE_SUCCESS_SELECT_SQL_COUNTER(SELECT_SUCCESS_SQL_COUNTER), + TABLE_FAILED_SELECT_SQL_COUNTER(SELECT_FAILED_SQL_COUNTER), + TABLE_SELECT_POINTLOOKUP_COUNTER_SUCCESS(SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER), + TABLE_SELECT_POINTLOOKUP_COUNTER_FAILED(SELECT_POINTLOOKUP_FAILED_SQL_COUNTER), + TABLE_SELECT_SCAN_COUNTER_SUCCESS(SELECT_SCAN_SUCCESS_SQL_COUNTER), + TABLE_SELECT_SCAN_COUNTER_FAILED(SELECT_SCAN_FAILED_SQL_COUNTER), + TABLE_UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER(UPSERT_AGGREGATE_SUCCESS_SQL_COUNTER), + TABLE_UPSERT_AGGREGATE_FAILURE_SQL_COUNTER(UPSERT_AGGREGATE_FAILURE_SQL_COUNTER), + TABLE_DELETE_AGGREGATE_SUCCESS_SQL_COUNTER(DELETE_AGGREGATE_SUCCESS_SQL_COUNTER), + TABLE_DELETE_AGGREGATE_FAILURE_SQL_COUNTER(DELETE_AGGREGATE_FAILURE_SQL_COUNTER), + TABLE_SELECT_AGGREGATE_SUCCESS_SQL_COUNTER(SELECT_AGGREGATE_SUCCESS_SQL_COUNTER), + TABLE_SELECT_AGGREGATE_FAILURE_SQL_COUNTER(SELECT_AGGREGATE_FAILURE_SQL_COUNTER), + TABLE_ATOMIC_UPSERT_SQL_COUNTER(ATOMIC_UPSERT_SQL_COUNTER), + TABLE_ATOMIC_UPSERT_COMMIT_TIME(ATOMIC_UPSERT_COMMIT_TIME), + TABLE_ATOMIC_UPSERT_SQL_QUERY_TIME(ATOMIC_UPSERT_SQL_QUERY_TIME), + TABLE_NUM_SYSTEM_TABLE_RPC_SUCCESS(NUM_SYSTEM_TABLE_RPC_SUCCESS), + TABLE_NUM_SYSTEM_TABLE_RPC_FAILURES(NUM_SYSTEM_TABLE_RPC_FAILURES), + TABLE_NUM_METADATA_LOOKUP_FAILURES(NUM_METADATA_LOOKUP_FAILURES), + TABLE_TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS(TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS); + + private MetricType metricType; + private PhoenixTableMetric metric; + + TableMetrics(MetricType metricType) { + this.metricType = metricType; } - - private final String tableName; - private Map metricRegister; - private TableHistograms tableHistograms; - - public TableClientMetrics(final String tableName, Configuration conf) { - this.tableName = tableName; - metricRegister = new HashMap<>(); - for (TableMetrics tableMetric : TableMetrics.values()) { - tableMetric.metric = new PhoenixTableMetricImpl(tableMetric.metricType); - metricRegister.put(tableMetric.metricType, tableMetric.metric); - } - tableHistograms = new TableHistograms(tableName, conf); + } + + private final String tableName; + private Map metricRegister; + private TableHistograms tableHistograms; + + public TableClientMetrics(final String tableName, Configuration conf) { + this.tableName = tableName; + metricRegister = new HashMap<>(); + for (TableMetrics tableMetric : TableMetrics.values()) { + tableMetric.metric = new PhoenixTableMetricImpl(tableMetric.metricType); + metricRegister.put(tableMetric.metricType, tableMetric.metric); } - - /** - * This function is used to update the value of Metric - * Incase of counter val will passed as 1. - * - * @param type metric type - * @param val update value. In case of counters, this will be 1 - */ - public void changeMetricValue(MetricType type, long val) { - if (!metricRegister.containsKey(type)) { - return; - } - PhoenixTableMetric metric = metricRegister.get(type); - metric.change(val); + tableHistograms = new TableHistograms(tableName, conf); + } + + /** + * This function is used to update the value of Metric Incase of counter val will passed as 1. + * @param type metric type + * @param val update value. In case of counters, this will be 1 + */ + public void changeMetricValue(MetricType type, long val) { + if (!metricRegister.containsKey(type)) { + return; } - - public String getTableName() { - return tableName; + PhoenixTableMetric metric = metricRegister.get(type); + metric.change(val); + } + + public String getTableName() { + return tableName; + } + + /** + * This method is called to aggregate all the Metrics across all Tables in Phoenix. + * @return map of table name -> list of TableMetric. + */ + public List getMetricMap() { + List metricsList = new ArrayList<>(); + for (PhoenixTableMetric value : metricRegister.values()) { + metricsList.add(value); } + return metricsList; + } - /** - * This method is called to aggregate all the Metrics across all Tables in Phoenix. - * - * @return map of table name -> list of TableMetric. - */ - public List getMetricMap() { - List metricsList = new ArrayList<>(); - for (PhoenixTableMetric value : metricRegister.values()) { - metricsList.add(value); - } - return metricsList; - } + public Map getMetricRegistry() { + return metricRegister; + } - public Map getMetricRegistry() { - return metricRegister; - } - - public TableHistograms getTableHistograms() { - return tableHistograms; - } + public TableHistograms getTableHistograms() { + return tableHistograms; + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableHistograms.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableHistograms.java index 1ef29f5da6b..03de883334a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableHistograms.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableHistograms.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -18,104 +18,101 @@ package org.apache.phoenix.monitoring; import java.util.List; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; public class TableHistograms { - private String tableName; - private LatencyHistogram queryLatencyHisto; - private SizeHistogram querySizeHisto; - private LatencyHistogram upsertLatencyHisto; - private SizeHistogram upsertSizeHisto; - private LatencyHistogram deleteLatencyHisto; - private SizeHistogram deleteSizeHisto; - private LatencyHistogram pointLookupLatencyHisto; - private SizeHistogram pointLookupSizeHisto; - private LatencyHistogram rangeScanLatencyHisto; - private SizeHistogram rangeScanSizeHisto; - - public TableHistograms(String tableName, Configuration conf) { - this.tableName = tableName; - queryLatencyHisto = new LatencyHistogram("QueryTime", "Query time latency", conf); - querySizeHisto = new SizeHistogram("QuerySize", "Query size", conf); - - upsertLatencyHisto = new LatencyHistogram("UpsertTime", "Upsert time latency", conf); - upsertSizeHisto = new SizeHistogram("UpsertSize", "Upsert size", conf); - - deleteLatencyHisto = new LatencyHistogram("DeleteTime", "Delete time latency", conf); - deleteSizeHisto = new SizeHistogram("DeleteSize", "Delete size", conf); - - pointLookupLatencyHisto = new LatencyHistogram("PointLookupTime", - "Point Lookup Query time latency", conf); - pointLookupSizeHisto = new SizeHistogram("PointLookupSize", - "Point Lookup Query Size", conf); - - rangeScanLatencyHisto = new LatencyHistogram("RangeScanTime", - "Range Scan Query time latency", conf); - rangeScanSizeHisto = new SizeHistogram("RangeScanSize", - "Range Scan Query size", conf); - } - - public String getTableName() { - return tableName; - } - - public LatencyHistogram getQueryLatencyHisto() { - return queryLatencyHisto; - } - - public SizeHistogram getQuerySizeHisto() { - return querySizeHisto; - } - - - public LatencyHistogram getPointLookupLatencyHisto() { - return pointLookupLatencyHisto; - } - - public SizeHistogram getPointLookupSizeHisto() { - return pointLookupSizeHisto; - } - - public LatencyHistogram getRangeScanLatencyHisto() { - return rangeScanLatencyHisto; - } - - public SizeHistogram getRangeScanSizeHisto() { - return rangeScanSizeHisto; - } - - public LatencyHistogram getUpsertLatencyHisto() { - return upsertLatencyHisto; - } - - public SizeHistogram getUpsertSizeHisto() { - return upsertSizeHisto; - } - - public LatencyHistogram getDeleteLatencyHisto() { - return deleteLatencyHisto; - } - - public SizeHistogram getDeleteSizeHisto() { - return deleteSizeHisto; - } - - public List getTableLatencyHistogramsDistribution() { - return ImmutableList.of(queryLatencyHisto.getRangeHistogramDistribution(), - upsertLatencyHisto.getRangeHistogramDistribution(), - deleteLatencyHisto.getRangeHistogramDistribution(), - pointLookupLatencyHisto.getRangeHistogramDistribution(), - rangeScanLatencyHisto.getRangeHistogramDistribution()); - } - - public List getTableSizeHistogramsDistribution() { - return ImmutableList.of(querySizeHisto.getRangeHistogramDistribution(), - upsertSizeHisto.getRangeHistogramDistribution(), - deleteSizeHisto.getRangeHistogramDistribution(), - pointLookupSizeHisto.getRangeHistogramDistribution(), - rangeScanSizeHisto.getRangeHistogramDistribution()); - } - -} \ No newline at end of file + private String tableName; + private LatencyHistogram queryLatencyHisto; + private SizeHistogram querySizeHisto; + private LatencyHistogram upsertLatencyHisto; + private SizeHistogram upsertSizeHisto; + private LatencyHistogram deleteLatencyHisto; + private SizeHistogram deleteSizeHisto; + private LatencyHistogram pointLookupLatencyHisto; + private SizeHistogram pointLookupSizeHisto; + private LatencyHistogram rangeScanLatencyHisto; + private SizeHistogram rangeScanSizeHisto; + + public TableHistograms(String tableName, Configuration conf) { + this.tableName = tableName; + queryLatencyHisto = new LatencyHistogram("QueryTime", "Query time latency", conf); + querySizeHisto = new SizeHistogram("QuerySize", "Query size", conf); + + upsertLatencyHisto = new LatencyHistogram("UpsertTime", "Upsert time latency", conf); + upsertSizeHisto = new SizeHistogram("UpsertSize", "Upsert size", conf); + + deleteLatencyHisto = new LatencyHistogram("DeleteTime", "Delete time latency", conf); + deleteSizeHisto = new SizeHistogram("DeleteSize", "Delete size", conf); + + pointLookupLatencyHisto = + new LatencyHistogram("PointLookupTime", "Point Lookup Query time latency", conf); + pointLookupSizeHisto = new SizeHistogram("PointLookupSize", "Point Lookup Query Size", conf); + + rangeScanLatencyHisto = + new LatencyHistogram("RangeScanTime", "Range Scan Query time latency", conf); + rangeScanSizeHisto = new SizeHistogram("RangeScanSize", "Range Scan Query size", conf); + } + + public String getTableName() { + return tableName; + } + + public LatencyHistogram getQueryLatencyHisto() { + return queryLatencyHisto; + } + + public SizeHistogram getQuerySizeHisto() { + return querySizeHisto; + } + + public LatencyHistogram getPointLookupLatencyHisto() { + return pointLookupLatencyHisto; + } + + public SizeHistogram getPointLookupSizeHisto() { + return pointLookupSizeHisto; + } + + public LatencyHistogram getRangeScanLatencyHisto() { + return rangeScanLatencyHisto; + } + + public SizeHistogram getRangeScanSizeHisto() { + return rangeScanSizeHisto; + } + + public LatencyHistogram getUpsertLatencyHisto() { + return upsertLatencyHisto; + } + + public SizeHistogram getUpsertSizeHisto() { + return upsertSizeHisto; + } + + public LatencyHistogram getDeleteLatencyHisto() { + return deleteLatencyHisto; + } + + public SizeHistogram getDeleteSizeHisto() { + return deleteSizeHisto; + } + + public List getTableLatencyHistogramsDistribution() { + return ImmutableList.of(queryLatencyHisto.getRangeHistogramDistribution(), + upsertLatencyHisto.getRangeHistogramDistribution(), + deleteLatencyHisto.getRangeHistogramDistribution(), + pointLookupLatencyHisto.getRangeHistogramDistribution(), + rangeScanLatencyHisto.getRangeHistogramDistribution()); + } + + public List getTableSizeHistogramsDistribution() { + return ImmutableList.of(querySizeHisto.getRangeHistogramDistribution(), + upsertSizeHisto.getRangeHistogramDistribution(), + deleteSizeHisto.getRangeHistogramDistribution(), + pointLookupSizeHisto.getRangeHistogramDistribution(), + rangeScanSizeHisto.getRangeHistogramDistribution()); + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableMetricsManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableMetricsManager.java index 24aaaea79b2..4990bcc4faf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableMetricsManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableMetricsManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,16 +6,19 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.monitoring; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME; + import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -27,461 +30,444 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import edu.umd.cs.findbugs.annotations.SuppressWarnings; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME; /** * Central place where we keep track of all the Table Level metrics. Register each tableMetrics and - * store the instance of it associated with TableName in a map - * This class exposes following functions as static methods to help catch all execptions - * 1.clearTableLevelMetricsMethod - * 2.getTableMetricsMethod - * 3.pushMetricsFromConnInstanceMethod - * 4.updateMetricsMethod + * store the instance of it associated with TableName in a map This class exposes following + * functions as static methods to help catch all execptions 1.clearTableLevelMetricsMethod + * 2.getTableMetricsMethod 3.pushMetricsFromConnInstanceMethod 4.updateMetricsMethod */ public class TableMetricsManager { - private static final Logger LOGGER = LoggerFactory.getLogger(TableMetricsManager.class); - private static final Set allowedListOfTableNames = new HashSet<>(); - private static volatile boolean isTableLevelMetricsEnabled; - private static volatile boolean isMetricPublisherEnabled; - private static volatile ConcurrentMap - tableClientMetricsMapping = - null; - // Singleton object - private static volatile TableMetricsManager tableMetricsManager = null; - private static volatile MetricPublisherSupplierFactory mPublisher = null; - private static volatile QueryServicesOptions options = null; - - @SuppressWarnings(value = "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", justification = "This is how we implement the singleton pattern") - public TableMetricsManager(QueryServicesOptions ops) { - options = ops; - isTableLevelMetricsEnabled = options.isTableLevelMetricsEnabled(); - LOGGER.info(String.format("Phoenix Table metrics enabled status: %s", - isTableLevelMetricsEnabled)); - tableClientMetricsMapping = new ConcurrentHashMap<>(); - - String tableNamesList = options.getAllowedListTableNames(); - if (tableNamesList != null && !tableNamesList.isEmpty()) { - for (String tableName : tableNamesList.split(",")) { - allowedListOfTableNames.add(tableName); - } - } - isMetricPublisherEnabled = options.isMetricPublisherEnabled(); - LOGGER.info(String.format("Phoenix table level metrics publisher enabled status %s", - isMetricPublisherEnabled)); - } - - public TableMetricsManager() { - - } - - /** - * Method to provide instance of TableMetricsManager(Create if needed in thread safe manner) - * - * @return - */ - private static TableMetricsManager getInstance() { - - TableMetricsManager localRef = tableMetricsManager; + private static final Logger LOGGER = LoggerFactory.getLogger(TableMetricsManager.class); + private static final Set allowedListOfTableNames = new HashSet<>(); + private static volatile boolean isTableLevelMetricsEnabled; + private static volatile boolean isMetricPublisherEnabled; + private static volatile ConcurrentMap tableClientMetricsMapping = + null; + // Singleton object + private static volatile TableMetricsManager tableMetricsManager = null; + private static volatile MetricPublisherSupplierFactory mPublisher = null; + private static volatile QueryServicesOptions options = null; + + @SuppressWarnings(value = "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", + justification = "This is how we implement the singleton pattern") + public TableMetricsManager(QueryServicesOptions ops) { + options = ops; + isTableLevelMetricsEnabled = options.isTableLevelMetricsEnabled(); + LOGGER + .info(String.format("Phoenix Table metrics enabled status: %s", isTableLevelMetricsEnabled)); + tableClientMetricsMapping = new ConcurrentHashMap<>(); + + String tableNamesList = options.getAllowedListTableNames(); + if (tableNamesList != null && !tableNamesList.isEmpty()) { + for (String tableName : tableNamesList.split(",")) { + allowedListOfTableNames.add(tableName); + } + } + isMetricPublisherEnabled = options.isMetricPublisherEnabled(); + LOGGER.info(String.format("Phoenix table level metrics publisher enabled status %s", + isMetricPublisherEnabled)); + } + + public TableMetricsManager() { + + } + + /** + * Method to provide instance of TableMetricsManager(Create if needed in thread safe manner) + */ + private static TableMetricsManager getInstance() { + + TableMetricsManager localRef = tableMetricsManager; + if (localRef == null) { + synchronized (TableMetricsManager.class) { if (localRef == null) { - synchronized (TableMetricsManager.class) { - if (localRef == null) { - QueryServicesOptions options = QueryServicesOptions.withDefaults(); - if (!options.isTableLevelMetricsEnabled()) { - localRef = - tableMetricsManager = - NoOpTableMetricsManager.noOpsTableMetricManager; - return localRef; - } - localRef = tableMetricsManager = new TableMetricsManager(options); - LOGGER.info("Phoenix Table metrics created object for metrics manager"); - if (isMetricPublisherEnabled) { - String className = options.getMetricPublisherClass(); - if (className != null) { - MetricServiceResolver mResolver = new MetricServiceResolver(); - LOGGER.info(String.format( - "Phoenix table level metrics publisher className %s", - className)); - try { - mPublisher = mResolver.instantiate(className); - mPublisher.registerMetricProvider(); - } catch (Throwable e) { - LOGGER.error("The exception from metric publish Function", e); - } - - } else { - LOGGER.error( - "Phoenix table level metrics publisher className cannot be null"); - } - - } - } - } - } - return localRef; - } - - @VisibleForTesting public static void setInstance(TableMetricsManager metricsManager) { - tableMetricsManager = metricsManager; - } - - public static void updateMetricsMethod(String tableName, MetricType type, long value) { - try { - TableMetricsManager.getInstance().updateMetrics(tableName, type, value); - } catch (Exception e) { - LOGGER.error("Failed updating Phoenix table level metrics", e); - } - } - - public static void pushMetricsFromConnInstanceMethod(Map> map) { - try { - TableMetricsManager.getInstance().pushMetricsFromConnInstance(map); - } catch (Exception e) { - LOGGER.error("Failed pushing Phoenix table level metrics", e); - } - } - - public static Map> getTableMetricsMethod() { - try { - return TableMetricsManager.getInstance().getTableLevelMetrics(); - } catch (Exception e) { - LOGGER.error("Failed retrieving table level Metrics", e); - } - return null; - } - - public static void clearTableLevelMetricsMethod() { - try { - TableMetricsManager.getInstance().clearTableLevelMetrics(); - } catch (Exception e) { - LOGGER.error("Failed resetting table level Metrics", e); - } - } - - @VisibleForTesting public static Long getMetricValue(String tableName, MetricType type) { - TableClientMetrics tableMetrics = getInstance().getTableClientMetrics(tableName); - if (tableMetrics == null) { - return null; - } - for (PhoenixTableMetric metric : tableMetrics.getMetricMap()) { - if (metric.getMetricType() == type) { - return metric.getValue(); - } - } - return null; - } - - // static methods to push, update or retrieve TableLevel Metrics. - - /** - * This function is provided as hook to publish the tableLevel Metrics to - * LocalStore(tablePhoenixMapping). - * - * @param map of tableName to pair of (MetricType, Metric Value) - */ - public void pushMetricsFromConnInstance(Map> map) { - - if (map == null) { - LOGGER.debug("Phoenix table level metrics input map cannot be null"); - return; - } - - long startTime = EnvironmentEdgeManager.currentTime(); - for (Map.Entry> tableEntry : map.entrySet()) { - for (Map.Entry metricEntry : tableEntry.getValue().entrySet()) { - updateMetrics(tableEntry.getKey(), metricEntry.getKey(), metricEntry.getValue()); - } - } - - LOGGER.debug(String.format( - "Phoenix table level metrics completed updating metrics from conn instance, timetaken:\t%d", - +EnvironmentEdgeManager.currentTime() - startTime)); - } - - /** - * This function will be used to add individual MetricType to LocalStore. - * - * @param tableName - * @param type - * @param value - */ - public void updateMetrics(String tableName, MetricType type, long value) { - - long startTime = EnvironmentEdgeManager.currentTime(); - - TableClientMetrics tInstance = getTableClientMetrics(tableName); - if (tInstance == null) { - LOGGER.debug("Table level client metrics are disabled for table: " + tableName); - return; - } - tInstance.changeMetricValue(type, value); - - LOGGER.debug(String.format("Phoenix table level metrics completed updating metric" - + " %s to value %s, timetaken = %s", type, value, - EnvironmentEdgeManager.currentTime() - startTime)); - } - - /** - * Get Table specific metrics object and create if not initialized(thread safe) - * - * @param tableName - * @return TableClientMetrics object - */ - public TableClientMetrics getTableClientMetrics(String tableName) { + QueryServicesOptions options = QueryServicesOptions.withDefaults(); + if (!options.isTableLevelMetricsEnabled()) { + localRef = tableMetricsManager = NoOpTableMetricsManager.noOpsTableMetricManager; + return localRef; + } + localRef = tableMetricsManager = new TableMetricsManager(options); + LOGGER.info("Phoenix Table metrics created object for metrics manager"); + if (isMetricPublisherEnabled) { + String className = options.getMetricPublisherClass(); + if (className != null) { + MetricServiceResolver mResolver = new MetricServiceResolver(); + LOGGER.info( + String.format("Phoenix table level metrics publisher className %s", className)); + try { + mPublisher = mResolver.instantiate(className); + mPublisher.registerMetricProvider(); + } catch (Throwable e) { + LOGGER.error("The exception from metric publish Function", e); + } - if (Strings.isNullOrEmpty(tableName)) { - LOGGER.debug("Phoenix Table metrics TableName cannot be null or empty"); - return null; - } - - if (!allowedListOfTableNames.isEmpty() && !allowedListOfTableNames.contains(tableName)) { - return null; - } - - TableClientMetrics tInstance; - tInstance = tableClientMetricsMapping.get(tableName); - if (tInstance == null) { - synchronized (TableMetricsManager.class) { - tInstance = tableClientMetricsMapping.get(tableName); - if (tInstance == null) { - LOGGER.info(String.format("Phoenix Table metrics creating object for table: %s", - tableName)); - tInstance = new TableClientMetrics(tableName, options.getConfiguration()); - if (isMetricPublisherEnabled && mPublisher != null) { - mPublisher.registerMetrics(tInstance); - } - tableClientMetricsMapping.put(tableName, tInstance); - } + } else { + LOGGER.error("Phoenix table level metrics publisher className cannot be null"); } - } - return tInstance; - } - - /** - * Publish the metrics to wherever you want them published. - * - * @return map of table name ->TableMetric - */ - public Map> getTableLevelMetrics() { - long startTime = EnvironmentEdgeManager.currentTime(); - Map> map = new HashMap<>(); - for (Map.Entry entry : tableClientMetricsMapping.entrySet()) { - map.put(entry.getKey(), entry.getValue().getMetricMap()); + } } - long timeTakenForMetricConversion = EnvironmentEdgeManager.currentTime() - startTime; - LOGGER.info(String.format("Phoenix Table metrics fetching complete, timeTaken: \t%d", - +timeTakenForMetricConversion)); - return map; + } } + return localRef; + } - private void updateMetricsForSystemCatalogTable(String userTable, MetricType mType, long value) { - - if (userTable != null && !userTable.equals(SYSTEM_CATALOG_NAME)) { - updateMetricsMethod(userTable, mType, value); - } - - updateMetricsMethod(SYSTEM_CATALOG_NAME, mType, value); - } + @VisibleForTesting + public static void setInstance(TableMetricsManager metricsManager) { + tableMetricsManager = metricsManager; + } - /** - * Helps reset the localstore(tableClientMetricsMapping) - */ - public void clearTableLevelMetrics() { - if (tableClientMetricsMapping != null) { - tableClientMetricsMapping.clear(); - } - LOGGER.info("Phoenix Table metrics clearing complete"); - } - - /** - * Update the Metrics for systemCatalog Table. - * For every userTable which is non empty we update success/Failure RPC call metric - * and for systemCatalog table we update the total no. of success/failure rpc calls made. - */ - public static void updateMetricsForSystemCatalogTableMethod(String userTable, MetricType mType, long value) { - try { - TableMetricsManager.getInstance() - .updateMetricsForSystemCatalogTable(userTable, mType, value); - } catch (Exception e) { - LOGGER.error("Failed updating Metrics for System catalog Table", e); - } + public static void updateMetricsMethod(String tableName, MetricType type, long value) { + try { + TableMetricsManager.getInstance().updateMetrics(tableName, type, value); + } catch (Exception e) { + LOGGER.error("Failed updating Phoenix table level metrics", e); } + } - public void clear() { - TableMetricsManager.clearTableLevelMetricsMethod(); + public static void pushMetricsFromConnInstanceMethod(Map> map) { + try { + TableMetricsManager.getInstance().pushMetricsFromConnInstance(map); + } catch (Exception e) { + LOGGER.error("Failed pushing Phoenix table level metrics", e); } + } - public static Map> getSizeHistogramsForAllTables() { - Map> map = new HashMap<>(); - for (Map.Entry entry : tableClientMetricsMapping.entrySet()) { - TableHistograms tableHistograms = entry.getValue().getTableHistograms(); - map.put(entry.getKey(), tableHistograms.getTableSizeHistogramsDistribution()); - } - return map; + public static Map> getTableMetricsMethod() { + try { + return TableMetricsManager.getInstance().getTableLevelMetrics(); + } catch (Exception e) { + LOGGER.error("Failed retrieving table level Metrics", e); } + return null; + } - public static Map> getLatencyHistogramsForAllTables() { - - Map> map = new HashMap<>(); - for (Map.Entry entry : tableClientMetricsMapping.entrySet()) { - TableHistograms tableHistograms = entry.getValue().getTableHistograms(); - map.put(entry.getKey(), tableHistograms.getTableLatencyHistogramsDistribution()); - } - return map; + public static void clearTableLevelMetricsMethod() { + try { + TableMetricsManager.getInstance().clearTableLevelMetrics(); + } catch (Exception e) { + LOGGER.error("Failed resetting table level Metrics", e); } + } - public static LatencyHistogram getUpsertLatencyHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getUpsertLatencyHisto(); - } - return null; + @VisibleForTesting + public static Long getMetricValue(String tableName, MetricType type) { + TableClientMetrics tableMetrics = getInstance().getTableClientMetrics(tableName); + if (tableMetrics == null) { + return null; } - - public static SizeHistogram getUpsertSizeHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getUpsertSizeHisto(); - } - return null; + for (PhoenixTableMetric metric : tableMetrics.getMetricMap()) { + if (metric.getMetricType() == type) { + return metric.getValue(); + } } + return null; + } - public static LatencyHistogram getDeleteLatencyHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getDeleteLatencyHisto(); - } - return null; - } + // static methods to push, update or retrieve TableLevel Metrics. - public static SizeHistogram getDeleteSizeHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getDeleteSizeHisto(); - } - return null; - } + /** + * This function is provided as hook to publish the tableLevel Metrics to + * LocalStore(tablePhoenixMapping). + * @param map of tableName to pair of (MetricType, Metric Value) + */ + public void pushMetricsFromConnInstance(Map> map) { - public static LatencyHistogram getQueryLatencyHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getQueryLatencyHisto(); - } - return null; + if (map == null) { + LOGGER.debug("Phoenix table level metrics input map cannot be null"); + return; } - public static SizeHistogram getQuerySizeHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getQuerySizeHisto(); - } - return null; + long startTime = EnvironmentEdgeManager.currentTime(); + for (Map.Entry> tableEntry : map.entrySet()) { + for (Map.Entry metricEntry : tableEntry.getValue().entrySet()) { + updateMetrics(tableEntry.getKey(), metricEntry.getKey(), metricEntry.getValue()); + } } - public static LatencyHistogram getPointLookupLatencyHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getPointLookupLatencyHisto(); - } - return null; - } + LOGGER.debug(String.format( + "Phoenix table level metrics completed updating metrics from conn instance, timetaken:\t%d", + +EnvironmentEdgeManager.currentTime() - startTime)); + } - public static SizeHistogram getPointLookupSizeHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getPointLookupSizeHisto(); - } - return null; - } + /** + * This function will be used to add individual MetricType to LocalStore. + */ + public void updateMetrics(String tableName, MetricType type, long value) { - public static LatencyHistogram getRangeScanLatencyHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getRangeScanLatencyHisto(); - } - return null; - } + long startTime = EnvironmentEdgeManager.currentTime(); - public static SizeHistogram getRangeScanSizeHistogramForTable(String tableName) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - return tableMetrics.getTableHistograms().getRangeScanSizeHisto(); - } - return null; - } - - public static void updateHistogramMetricsForQueryLatency(String tableName, long elapsedTime, - boolean isPointLookup) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - LOGGER.trace("Updating latency histograms for select query: tableName: " + tableName - + " isPointLookup: " + isPointLookup + " elapsedTime: " + elapsedTime); - tableMetrics.getTableHistograms().getQueryLatencyHisto().add(elapsedTime); - if (isPointLookup) { - tableMetrics.getTableHistograms().getPointLookupLatencyHisto().add(elapsedTime); - } else { - tableMetrics.getTableHistograms().getRangeScanLatencyHisto().add(elapsedTime); - } - } + TableClientMetrics tInstance = getTableClientMetrics(tableName); + if (tInstance == null) { + LOGGER.debug("Table level client metrics are disabled for table: " + tableName); + return; } + tInstance.changeMetricValue(type, value); - public static void updateHistogramMetricsForQueryScanBytes(long scanBytes, String tableName, - boolean isPointLookup) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - tableMetrics.getTableHistograms().getQuerySizeHisto().add(scanBytes); - if (isPointLookup) { - tableMetrics.getTableHistograms().getPointLookupSizeHisto().add(scanBytes); - } else { - tableMetrics.getTableHistograms().getRangeScanSizeHisto().add(scanBytes); - } - } - } + LOGGER.debug(String.format( + "Phoenix table level metrics completed updating metric" + " %s to value %s, timetaken = %s", + type, value, EnvironmentEdgeManager.currentTime() - startTime)); + } - public static void updateSizeHistogramMetricsForMutations(String tableName, long mutationBytes, - boolean isUpsert) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - LOGGER.trace("Updating size histograms for mutations: tableName: " + tableName - + " isUpsert: " + isUpsert + " mutation bytes: " + mutationBytes); + /** + * Get Table specific metrics object and create if not initialized(thread safe) + * @return TableClientMetrics object + */ + public TableClientMetrics getTableClientMetrics(String tableName) { - if (isUpsert) { - tableMetrics.getTableHistograms().getUpsertSizeHisto().add(mutationBytes); - } else { - tableMetrics.getTableHistograms().getDeleteSizeHisto().add(mutationBytes); - } - } + if (Strings.isNullOrEmpty(tableName)) { + LOGGER.debug("Phoenix Table metrics TableName cannot be null or empty"); + return null; } - private static TableClientMetrics getTableClientMetricsInstance(String tableName) { - TableClientMetrics tableMetrics = getInstance().getTableClientMetrics(tableName); - if (tableMetrics == null) { - LOGGER.trace("Table level client metrics are disabled for table: " + tableName); - return null; - } - return tableMetrics; + if (!allowedListOfTableNames.isEmpty() && !allowedListOfTableNames.contains(tableName)) { + return null; } - public static void updateLatencyHistogramForMutations(String tableName, long elapsedTime, - boolean isUpsert) { - TableClientMetrics tableMetrics; - if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { - LOGGER.trace("Updating latency histograms for mutations: tableName: " + tableName - + " isUpsert: " + isUpsert + " elapsedTime: " + elapsedTime); - if (isUpsert) { - tableMetrics.getTableHistograms().getUpsertLatencyHisto().add(elapsedTime); - } else { - tableMetrics.getTableHistograms().getDeleteLatencyHisto().add(elapsedTime); - } - } - } + TableClientMetrics tInstance; + tInstance = tableClientMetricsMapping.get(tableName); + if (tInstance == null) { + synchronized (TableMetricsManager.class) { + tInstance = tableClientMetricsMapping.get(tableName); + if (tInstance == null) { + LOGGER + .info(String.format("Phoenix Table metrics creating object for table: %s", tableName)); + tInstance = new TableClientMetrics(tableName, options.getConfiguration()); + if (isMetricPublisherEnabled && mPublisher != null) { + mPublisher.registerMetrics(tInstance); + } + tableClientMetricsMapping.put(tableName, tInstance); + } + } + } + return tInstance; + } + + /** + * Publish the metrics to wherever you want them published. + * @return map of table name ->TableMetric + */ + public Map> getTableLevelMetrics() { + + long startTime = EnvironmentEdgeManager.currentTime(); + Map> map = new HashMap<>(); + for (Map.Entry entry : tableClientMetricsMapping.entrySet()) { + map.put(entry.getKey(), entry.getValue().getMetricMap()); + } + long timeTakenForMetricConversion = EnvironmentEdgeManager.currentTime() - startTime; + LOGGER.info(String.format("Phoenix Table metrics fetching complete, timeTaken: \t%d", + +timeTakenForMetricConversion)); + return map; + } + + private void updateMetricsForSystemCatalogTable(String userTable, MetricType mType, long value) { + + if (userTable != null && !userTable.equals(SYSTEM_CATALOG_NAME)) { + updateMetricsMethod(userTable, mType, value); + } + + updateMetricsMethod(SYSTEM_CATALOG_NAME, mType, value); + } + + /** + * Helps reset the localstore(tableClientMetricsMapping) + */ + public void clearTableLevelMetrics() { + if (tableClientMetricsMapping != null) { + tableClientMetricsMapping.clear(); + } + LOGGER.info("Phoenix Table metrics clearing complete"); + } + + /** + * Update the Metrics for systemCatalog Table. For every userTable which is non empty we update + * success/Failure RPC call metric and for systemCatalog table we update the total no. of + * success/failure rpc calls made. + */ + public static void updateMetricsForSystemCatalogTableMethod(String userTable, MetricType mType, + long value) { + try { + TableMetricsManager.getInstance().updateMetricsForSystemCatalogTable(userTable, mType, value); + } catch (Exception e) { + LOGGER.error("Failed updating Metrics for System catalog Table", e); + } + } + + public void clear() { + TableMetricsManager.clearTableLevelMetricsMethod(); + } + + public static Map> getSizeHistogramsForAllTables() { + Map> map = new HashMap<>(); + for (Map.Entry entry : tableClientMetricsMapping.entrySet()) { + TableHistograms tableHistograms = entry.getValue().getTableHistograms(); + map.put(entry.getKey(), tableHistograms.getTableSizeHistogramsDistribution()); + } + return map; + } + + public static Map> getLatencyHistogramsForAllTables() { + + Map> map = new HashMap<>(); + for (Map.Entry entry : tableClientMetricsMapping.entrySet()) { + TableHistograms tableHistograms = entry.getValue().getTableHistograms(); + map.put(entry.getKey(), tableHistograms.getTableLatencyHistogramsDistribution()); + } + return map; + } + + public static LatencyHistogram getUpsertLatencyHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getUpsertLatencyHisto(); + } + return null; + } + + public static SizeHistogram getUpsertSizeHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getUpsertSizeHisto(); + } + return null; + } + + public static LatencyHistogram getDeleteLatencyHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getDeleteLatencyHisto(); + } + return null; + } + + public static SizeHistogram getDeleteSizeHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getDeleteSizeHisto(); + } + return null; + } + + public static LatencyHistogram getQueryLatencyHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getQueryLatencyHisto(); + } + return null; + } + + public static SizeHistogram getQuerySizeHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getQuerySizeHisto(); + } + return null; + } + + public static LatencyHistogram getPointLookupLatencyHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getPointLookupLatencyHisto(); + } + return null; + } + + public static SizeHistogram getPointLookupSizeHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getPointLookupSizeHisto(); + } + return null; + } + + public static LatencyHistogram getRangeScanLatencyHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getRangeScanLatencyHisto(); + } + return null; + } + + public static SizeHistogram getRangeScanSizeHistogramForTable(String tableName) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + return tableMetrics.getTableHistograms().getRangeScanSizeHisto(); + } + return null; + } + + public static void updateHistogramMetricsForQueryLatency(String tableName, long elapsedTime, + boolean isPointLookup) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + LOGGER.trace("Updating latency histograms for select query: tableName: " + tableName + + " isPointLookup: " + isPointLookup + " elapsedTime: " + elapsedTime); + tableMetrics.getTableHistograms().getQueryLatencyHisto().add(elapsedTime); + if (isPointLookup) { + tableMetrics.getTableHistograms().getPointLookupLatencyHisto().add(elapsedTime); + } else { + tableMetrics.getTableHistograms().getRangeScanLatencyHisto().add(elapsedTime); + } + } + } + + public static void updateHistogramMetricsForQueryScanBytes(long scanBytes, String tableName, + boolean isPointLookup) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + tableMetrics.getTableHistograms().getQuerySizeHisto().add(scanBytes); + if (isPointLookup) { + tableMetrics.getTableHistograms().getPointLookupSizeHisto().add(scanBytes); + } else { + tableMetrics.getTableHistograms().getRangeScanSizeHisto().add(scanBytes); + } + } + } + + public static void updateSizeHistogramMetricsForMutations(String tableName, long mutationBytes, + boolean isUpsert) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + LOGGER.trace("Updating size histograms for mutations: tableName: " + tableName + " isUpsert: " + + isUpsert + " mutation bytes: " + mutationBytes); + + if (isUpsert) { + tableMetrics.getTableHistograms().getUpsertSizeHisto().add(mutationBytes); + } else { + tableMetrics.getTableHistograms().getDeleteSizeHisto().add(mutationBytes); + } + } + } + + private static TableClientMetrics getTableClientMetricsInstance(String tableName) { + TableClientMetrics tableMetrics = getInstance().getTableClientMetrics(tableName); + if (tableMetrics == null) { + LOGGER.trace("Table level client metrics are disabled for table: " + tableName); + return null; + } + return tableMetrics; + } + + public static void updateLatencyHistogramForMutations(String tableName, long elapsedTime, + boolean isUpsert) { + TableClientMetrics tableMetrics; + if ((tableMetrics = getTableClientMetricsInstance(tableName)) != null) { + LOGGER.trace("Updating latency histograms for mutations: tableName: " + tableName + + " isUpsert: " + isUpsert + " elapsedTime: " + elapsedTime); + if (isUpsert) { + tableMetrics.getTableHistograms().getUpsertLatencyHisto().add(elapsedTime); + } else { + tableMetrics.getTableHistograms().getDeleteLatencyHisto().add(elapsedTime); + } + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TaskExecutionMetricsHolder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TaskExecutionMetricsHolder.java index 531a6b36768..301e2dfb1c7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TaskExecutionMetricsHolder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TaskExecutionMetricsHolder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,46 +25,46 @@ import org.apache.phoenix.log.LogLevel; - /** - * Class to encapsulate the various metrics associated with submitting and executing a task to the phoenix client - * thread pool. + * Class to encapsulate the various metrics associated with submitting and executing a task to the + * phoenix client thread pool. */ public class TaskExecutionMetricsHolder { - private final CombinableMetric taskQueueWaitTime; - private final CombinableMetric taskEndToEndTime; - private final CombinableMetric taskExecutionTime; - private final CombinableMetric numTasks; - private final CombinableMetric numRejectedTasks; - public static final TaskExecutionMetricsHolder NO_OP_INSTANCE = new TaskExecutionMetricsHolder(new ReadMetricQueue(false,LogLevel.OFF), ""); - - public TaskExecutionMetricsHolder(ReadMetricQueue readMetrics, String tableName) { - taskQueueWaitTime = readMetrics.allotMetric(TASK_QUEUE_WAIT_TIME, tableName); - taskEndToEndTime = readMetrics.allotMetric(TASK_END_TO_END_TIME, tableName); - taskExecutionTime = readMetrics.allotMetric(TASK_EXECUTION_TIME, tableName); - numTasks = readMetrics.allotMetric(TASK_EXECUTED_COUNTER, tableName); - numRejectedTasks = readMetrics.allotMetric(TASK_REJECTED_COUNTER, tableName); - } + private final CombinableMetric taskQueueWaitTime; + private final CombinableMetric taskEndToEndTime; + private final CombinableMetric taskExecutionTime; + private final CombinableMetric numTasks; + private final CombinableMetric numRejectedTasks; + public static final TaskExecutionMetricsHolder NO_OP_INSTANCE = + new TaskExecutionMetricsHolder(new ReadMetricQueue(false, LogLevel.OFF), ""); + + public TaskExecutionMetricsHolder(ReadMetricQueue readMetrics, String tableName) { + taskQueueWaitTime = readMetrics.allotMetric(TASK_QUEUE_WAIT_TIME, tableName); + taskEndToEndTime = readMetrics.allotMetric(TASK_END_TO_END_TIME, tableName); + taskExecutionTime = readMetrics.allotMetric(TASK_EXECUTION_TIME, tableName); + numTasks = readMetrics.allotMetric(TASK_EXECUTED_COUNTER, tableName); + numRejectedTasks = readMetrics.allotMetric(TASK_REJECTED_COUNTER, tableName); + } - public CombinableMetric getTaskQueueWaitTime() { - return taskQueueWaitTime; - } + public CombinableMetric getTaskQueueWaitTime() { + return taskQueueWaitTime; + } - public CombinableMetric getTaskEndToEndTime() { - return taskEndToEndTime; - } + public CombinableMetric getTaskEndToEndTime() { + return taskEndToEndTime; + } - public CombinableMetric getTaskExecutionTime() { - return taskExecutionTime; - } + public CombinableMetric getTaskExecutionTime() { + return taskExecutionTime; + } - public CombinableMetric getNumTasks() { - return numTasks; - } + public CombinableMetric getNumTasks() { + return numTasks; + } - public CombinableMetric getNumRejectedTasks() { - return numRejectedTasks; - } + public CombinableMetric getNumRejectedTasks() { + return numRejectedTasks; + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesHistogram.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesHistogram.java index 3e07ff9503b..6171140a427 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesHistogram.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,19 +25,20 @@ import org.apache.phoenix.query.QueryServices; /** - * Histogram for calculating phoenix connection. We read ranges using - * config property {@link QueryServices#PHOENIX_HISTOGRAM_SIZE_RANGES}. - * If this property is not set then it will default to DEFAULT_RANGE values. + * Histogram for calculating phoenix connection. We read ranges using config property + * {@link QueryServices#PHOENIX_HISTOGRAM_SIZE_RANGES}. If this property is not set then it will + * default to DEFAULT_RANGE values. */ public class ConnectionQueryServicesHistogram extends RangeHistogram { - static final long[] DEFAULT_RANGE = {1, 10, 100, 500, 1000}; - public ConnectionQueryServicesHistogram(String name, String description, Configuration conf) { - super(initializeRanges(conf), name, description); - } + static final long[] DEFAULT_RANGE = { 1, 10, 100, 500, 1000 }; + + public ConnectionQueryServicesHistogram(String name, String description, Configuration conf) { + super(initializeRanges(conf), name, description); + } - private static long[] initializeRanges(Configuration conf) { - long[] ranges = PhoenixConfigurationUtilHelper.getLongs( - conf, CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES); - return ranges != null ? ranges : DEFAULT_RANGE; - } + private static long[] initializeRanges(Configuration conf) { + long[] ranges = + PhoenixConfigurationUtilHelper.getLongs(conf, CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES); + return ranges != null ? ranges : DEFAULT_RANGE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetrics.java index b3a8a1c3c1d..a3c6e37cb05 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetrics.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,86 +35,81 @@ * Class for Connection Query Service Metrics. */ public class ConnectionQueryServicesMetrics { - /** - * List Metrics tracked in Connection Query Service Metrics - */ - public enum QueryServiceMetrics { - CONNECTION_QUERY_SERVICE_OPEN_PHOENIX_CONNECTIONS_COUNTER(OPEN_PHOENIX_CONNECTIONS_COUNTER), - CONNECTION_QUERY_SERVICE_OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER( - OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER), - CONNECTION_QUERY_SERVICE_PHOENIX_CONNECTIONS_THROTTLED_COUNTER( - PHOENIX_CONNECTIONS_THROTTLED_COUNTER); + /** + * List Metrics tracked in Connection Query Service Metrics + */ + public enum QueryServiceMetrics { + CONNECTION_QUERY_SERVICE_OPEN_PHOENIX_CONNECTIONS_COUNTER(OPEN_PHOENIX_CONNECTIONS_COUNTER), + CONNECTION_QUERY_SERVICE_OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER( + OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER), + CONNECTION_QUERY_SERVICE_PHOENIX_CONNECTIONS_THROTTLED_COUNTER( + PHOENIX_CONNECTIONS_THROTTLED_COUNTER); - private MetricType metricType; - private ConnectionQueryServicesMetric metric; + private MetricType metricType; + private ConnectionQueryServicesMetric metric; - QueryServiceMetrics(MetricType metricType) { - this.metricType = metricType; - } + QueryServiceMetrics(MetricType metricType) { + this.metricType = metricType; } + } - private final String connectionQueryServiceName; - private Map metricRegister; - private ConnectionQueryServicesMetricsHistograms connectionQueryServiceMetricsHistograms; + private final String connectionQueryServiceName; + private Map metricRegister; + private ConnectionQueryServicesMetricsHistograms connectionQueryServiceMetricsHistograms; - public ConnectionQueryServicesMetrics(final String connectionQueryServiceName, - Configuration conf) { - this.connectionQueryServiceName = connectionQueryServiceName; - metricRegister = new HashMap<>(); - for (QueryServiceMetrics connectionQueryServiceMetric - : QueryServiceMetrics.values()) { - connectionQueryServiceMetric.metric = - new ConnectionQueryServicesMetricImpl(connectionQueryServiceMetric.metricType); - metricRegister.put(connectionQueryServiceMetric.metricType, - connectionQueryServiceMetric.metric); - } - connectionQueryServiceMetricsHistograms = - new ConnectionQueryServicesMetricsHistograms(connectionQueryServiceName, conf); + public ConnectionQueryServicesMetrics(final String connectionQueryServiceName, + Configuration conf) { + this.connectionQueryServiceName = connectionQueryServiceName; + metricRegister = new HashMap<>(); + for (QueryServiceMetrics connectionQueryServiceMetric : QueryServiceMetrics.values()) { + connectionQueryServiceMetric.metric = + new ConnectionQueryServicesMetricImpl(connectionQueryServiceMetric.metricType); + metricRegister.put(connectionQueryServiceMetric.metricType, + connectionQueryServiceMetric.metric); } + connectionQueryServiceMetricsHistograms = + new ConnectionQueryServicesMetricsHistograms(connectionQueryServiceName, conf); + } - /** - * This function is used to update the value of Metric - * In case of counter val will be passed as 1. - * - * @param type metric type - * @param val update value. In case of counters, this will be 1 - */ - public void setMetricValue(MetricType type, long val) { - if (!metricRegister.containsKey(type)) { - return; - } - ConnectionQueryServicesMetric metric = metricRegister.get(type); - metric.set(val); + /** + * This function is used to update the value of Metric In case of counter val will be passed as 1. + * @param type metric type + * @param val update value. In case of counters, this will be 1 + */ + public void setMetricValue(MetricType type, long val) { + if (!metricRegister.containsKey(type)) { + return; } + ConnectionQueryServicesMetric metric = metricRegister.get(type); + metric.set(val); + } - /** - * This function is used to get the value of Metric. - * - * @param type metric type - * @return val current value of metric. - */ - public long getMetricValue(MetricType type) { - if (!metricRegister.containsKey(type)) { - return 0; - } - ConnectionQueryServicesMetric metric = metricRegister.get(type); - return metric.getValue(); + /** + * This function is used to get the value of Metric. + * @param type metric type + * @return val current value of metric. + */ + public long getMetricValue(MetricType type) { + if (!metricRegister.containsKey(type)) { + return 0; } + ConnectionQueryServicesMetric metric = metricRegister.get(type); + return metric.getValue(); + } - public String getConnectionQueryServiceName() { - return connectionQueryServiceName; - } + public String getConnectionQueryServiceName() { + return connectionQueryServiceName; + } - /** - * This method is called to aggregate all the Metrics across all Connection Query Service. - * - * @return map of Connection Query Service name -> list of ConnectionQueryServicesMetric. - */ - public List getAllMetrics() { - return new ArrayList<>(metricRegister.values()); - } + /** + * This method is called to aggregate all the Metrics across all Connection Query Service. + * @return map of Connection Query Service name -> list of ConnectionQueryServicesMetric. + */ + public List getAllMetrics() { + return new ArrayList<>(metricRegister.values()); + } - public ConnectionQueryServicesMetricsHistograms getConnectionQueryServiceHistograms() { - return connectionQueryServiceMetricsHistograms; - } + public ConnectionQueryServicesMetricsHistograms getConnectionQueryServiceHistograms() { + return connectionQueryServiceMetricsHistograms; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsHistograms.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsHistograms.java index 29e8a4b4f44..c91c62b205d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsHistograms.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsHistograms.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,50 +22,49 @@ import java.util.Collections; import java.util.List; -import edu.umd.cs.findbugs.annotations.SuppressWarnings; import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.monitoring.HistogramDistribution; +import edu.umd.cs.findbugs.annotations.SuppressWarnings; + /** - * Histogram Metrics for Connection Query Service Metrics. - * 1. Connection count - * 2. Internal Connection Count. + * Histogram Metrics for Connection Query Service Metrics. 1. Connection count 2. Internal + * Connection Count. */ public class ConnectionQueryServicesMetricsHistograms { - private String connectionQueryServicesName; - private ConnectionQueryServicesHistogram connectionQueryServiceOpenInternalSizeHistogram; - private ConnectionQueryServicesHistogram connectionQueryServicesOpenConnSizeHistogram; + private String connectionQueryServicesName; + private ConnectionQueryServicesHistogram connectionQueryServiceOpenInternalSizeHistogram; + private ConnectionQueryServicesHistogram connectionQueryServicesOpenConnSizeHistogram; - public ConnectionQueryServicesMetricsHistograms(String connectionQueryServiceName, - Configuration conf) { - connectionQueryServicesName = connectionQueryServiceName; - connectionQueryServiceOpenInternalSizeHistogram = new ConnectionQueryServicesHistogram( - "PhoenixInternalOpenConn", - "histogram for number of open internal phoenix connections", conf); - connectionQueryServicesOpenConnSizeHistogram = new ConnectionQueryServicesHistogram( - "PhoenixOpenConn", "histogram for number of open phoenix connections", conf); - } + public ConnectionQueryServicesMetricsHistograms(String connectionQueryServiceName, + Configuration conf) { + connectionQueryServicesName = connectionQueryServiceName; + connectionQueryServiceOpenInternalSizeHistogram = new ConnectionQueryServicesHistogram( + "PhoenixInternalOpenConn", "histogram for number of open internal phoenix connections", conf); + connectionQueryServicesOpenConnSizeHistogram = new ConnectionQueryServicesHistogram( + "PhoenixOpenConn", "histogram for number of open phoenix connections", conf); + } - public String getConnectionQueryServicesName() { - return this.connectionQueryServicesName; - } + public String getConnectionQueryServicesName() { + return this.connectionQueryServicesName; + } - @SuppressWarnings(value = "EI_EXPOSE_REP", - justification = "It's only used in internally for metrics storage") - public ConnectionQueryServicesHistogram getConnectionQueryServicesInternalOpenConnHisto() { - return connectionQueryServiceOpenInternalSizeHistogram; - } + @SuppressWarnings(value = "EI_EXPOSE_REP", + justification = "It's only used in internally for metrics storage") + public ConnectionQueryServicesHistogram getConnectionQueryServicesInternalOpenConnHisto() { + return connectionQueryServiceOpenInternalSizeHistogram; + } - @SuppressWarnings(value = "EI_EXPOSE_REP", - justification = "It's only used in internally for metrics storage") - public ConnectionQueryServicesHistogram getConnectionQueryServicesOpenConnHisto() { - return connectionQueryServicesOpenConnSizeHistogram; - } + @SuppressWarnings(value = "EI_EXPOSE_REP", + justification = "It's only used in internally for metrics storage") + public ConnectionQueryServicesHistogram getConnectionQueryServicesOpenConnHisto() { + return connectionQueryServicesOpenConnSizeHistogram; + } - public List getConnectionQueryServicesHistogramsDistribution() { - List list = new ArrayList(Arrays.asList( - this.connectionQueryServiceOpenInternalSizeHistogram.getRangeHistogramDistribution(), - this.connectionQueryServicesOpenConnSizeHistogram.getRangeHistogramDistribution())); - return Collections.unmodifiableList(list); - } + public List getConnectionQueryServicesHistogramsDistribution() { + List list = new ArrayList(Arrays.asList( + this.connectionQueryServiceOpenInternalSizeHistogram.getRangeHistogramDistribution(), + this.connectionQueryServicesOpenConnSizeHistogram.getRangeHistogramDistribution())); + return Collections.unmodifiableList(list); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsManager.java index 01759246539..25d65cf6ca4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,7 +23,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import edu.umd.cs.findbugs.annotations.SuppressWarnings; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.phoenix.monitoring.ConnectionQueryServicesMetric; import org.apache.phoenix.monitoring.HistogramDistribution; @@ -35,309 +34,296 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import edu.umd.cs.findbugs.annotations.SuppressWarnings; + /** * Central place where we keep track of all the Connection Query Service metrics. Register each * Connection Query Service and store the instance of it associated with ConnectionServiceName in a * map This class exposes following functions as static functions to help catch all exception - * 1.clearAllConnectionQueryServiceMetrics - * 2.getConnectionQueryServicesMetrics - * 3.updateMetrics + * 1.clearAllConnectionQueryServiceMetrics 2.getConnectionQueryServicesMetrics 3.updateMetrics */ public class ConnectionQueryServicesMetricsManager { - private static final Logger LOGGER = - LoggerFactory.getLogger(ConnectionQueryServicesMetricsManager.class); - private static volatile boolean isConnectionQueryServiceMetricsEnabled; - private static volatile boolean isConnectionQueryServiceMetricPublisherEnabled; - private static ConcurrentMap - connectionQueryServiceMetricsMapping; - // Singleton object - private static volatile ConnectionQueryServicesMetricsManager - connectionQueryServicesMetricsManager = null; - private static volatile MetricPublisherSupplierFactory mPublisher = null; - private static volatile QueryServicesOptions options; + private static final Logger LOGGER = + LoggerFactory.getLogger(ConnectionQueryServicesMetricsManager.class); + private static volatile boolean isConnectionQueryServiceMetricsEnabled; + private static volatile boolean isConnectionQueryServiceMetricPublisherEnabled; + private static ConcurrentMap connectionQueryServiceMetricsMapping; + // Singleton object + private static volatile ConnectionQueryServicesMetricsManager connectionQueryServicesMetricsManager = + null; + private static volatile MetricPublisherSupplierFactory mPublisher = null; + private static volatile QueryServicesOptions options; - @SuppressWarnings(value = "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", justification = "This " + - "Object is only created once for the JVM") - public ConnectionQueryServicesMetricsManager(QueryServicesOptions opts) { - options = opts; - connectionQueryServiceMetricsMapping = new ConcurrentHashMap<>(); - isConnectionQueryServiceMetricsEnabled = options.isConnectionQueryServiceMetricsEnabled(); - isConnectionQueryServiceMetricPublisherEnabled = - options.isConnectionQueryServiceMetricsPublisherEnabled(); - LOGGER.info("Connection query service metrics enabled : " - + isConnectionQueryServiceMetricsEnabled + " publisher enabled : " - + isConnectionQueryServiceMetricPublisherEnabled); - } + @SuppressWarnings(value = "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", + justification = "This " + "Object is only created once for the JVM") + public ConnectionQueryServicesMetricsManager(QueryServicesOptions opts) { + options = opts; + connectionQueryServiceMetricsMapping = new ConcurrentHashMap<>(); + isConnectionQueryServiceMetricsEnabled = options.isConnectionQueryServiceMetricsEnabled(); + isConnectionQueryServiceMetricPublisherEnabled = + options.isConnectionQueryServiceMetricsPublisherEnabled(); + LOGGER + .info("Connection query service metrics enabled : " + isConnectionQueryServiceMetricsEnabled + + " publisher enabled : " + isConnectionQueryServiceMetricPublisherEnabled); + } - @SuppressWarnings(value = "EI_EXPOSE_STATIC_REP2", justification = "Only used for testing") - public static void setInstance(ConnectionQueryServicesMetricsManager metricsManager) { - connectionQueryServicesMetricsManager = metricsManager; - } + @SuppressWarnings(value = "EI_EXPOSE_STATIC_REP2", justification = "Only used for testing") + public static void setInstance(ConnectionQueryServicesMetricsManager metricsManager) { + connectionQueryServicesMetricsManager = metricsManager; + } - /** - * Function to provide instance of ConnectionQueryServiceMetricsManager(Create if needed in - * thread safe manner) - * @return returns instance of ConnectionQueryServicesMetricsManager - */ - @SuppressWarnings(value = "MS_EXPOSE_REP", justification = "Only used internally, not exposed" + - " to external client") - public static ConnectionQueryServicesMetricsManager getInstance() { + /** + * Function to provide instance of ConnectionQueryServiceMetricsManager(Create if needed in thread + * safe manner) + * @return returns instance of ConnectionQueryServicesMetricsManager + */ + @SuppressWarnings(value = "MS_EXPOSE_REP", + justification = "Only used internally, not exposed" + " to external client") + public static ConnectionQueryServicesMetricsManager getInstance() { + if (connectionQueryServicesMetricsManager == null) { + synchronized (ConnectionQueryServicesMetricsManager.class) { if (connectionQueryServicesMetricsManager == null) { - synchronized (ConnectionQueryServicesMetricsManager.class) { - if (connectionQueryServicesMetricsManager == null) { - QueryServicesOptions options = QueryServicesOptions.withDefaults(); - if (options.isConnectionQueryServiceMetricsEnabled()) { - connectionQueryServicesMetricsManager = - new ConnectionQueryServicesMetricsManager(options); - LOGGER.info("Created object for Connection query service metrics manager"); - } else { - connectionQueryServicesMetricsManager = - NoOpConnectionQueryServicesMetricsManager.NO_OP_CONN_QUERY_SERVICES_METRICS_MANAGER; - LOGGER.info("Created object for NoOp Connection query service metrics manager"); - return connectionQueryServicesMetricsManager; - } - registerMetricsPublisher(); - } - } + QueryServicesOptions options = QueryServicesOptions.withDefaults(); + if (options.isConnectionQueryServiceMetricsEnabled()) { + connectionQueryServicesMetricsManager = + new ConnectionQueryServicesMetricsManager(options); + LOGGER.info("Created object for Connection query service metrics manager"); + } else { + connectionQueryServicesMetricsManager = + NoOpConnectionQueryServicesMetricsManager.NO_OP_CONN_QUERY_SERVICES_METRICS_MANAGER; + LOGGER.info("Created object for NoOp Connection query service metrics manager"); + return connectionQueryServicesMetricsManager; + } + registerMetricsPublisher(); } - return connectionQueryServicesMetricsManager; + } } + return connectionQueryServicesMetricsManager; + } - ConnectionQueryServicesMetricsManager() { - - } + ConnectionQueryServicesMetricsManager() { - public static void registerMetricsPublisher() { - if (isConnectionQueryServiceMetricPublisherEnabled) { - String className = options.getConnectionQueryServiceMetricsPublisherClass(); - if (className != null) { - MetricServiceResolver mResolver = new MetricServiceResolver(); - LOGGER.info("Connection query service metrics publisher className " - + className); - try { - mPublisher = mResolver.instantiate(className); - mPublisher.registerMetricProvider(); - } catch (Throwable e) { - LOGGER.error("The exception from metric publish Function", e); - } + } - } else { - LOGGER.warn("Connection query service metrics publisher className" - + " can't be null"); - } + public static void registerMetricsPublisher() { + if (isConnectionQueryServiceMetricPublisherEnabled) { + String className = options.getConnectionQueryServiceMetricsPublisherClass(); + if (className != null) { + MetricServiceResolver mResolver = new MetricServiceResolver(); + LOGGER.info("Connection query service metrics publisher className " + className); + try { + mPublisher = mResolver.instantiate(className); + mPublisher.registerMetricProvider(); + } catch (Throwable e) { + LOGGER.error("The exception from metric publish Function", e); } + + } else { + LOGGER.warn("Connection query service metrics publisher className" + " can't be null"); + } } + } - /** - * Function to provide Object of ConnectionQueryServicesMetrics (Create if needed in - * thread safe manner) for connectionQueryServiceName - * @param connectionQueryServiceName Connection Query Service Name - * @return returns instance of ConnectionQueryServicesMetrics for connectionQueryServiceName - */ - ConnectionQueryServicesMetrics getConnectionQueryServiceMetricsInstance( - String connectionQueryServiceName) { - if (Strings.isNullOrEmpty(connectionQueryServiceName)) { - LOGGER.warn("Connection query service Name can't be null or empty"); - return null; - } + /** + * Function to provide Object of ConnectionQueryServicesMetrics (Create if needed in thread safe + * manner) for connectionQueryServiceName + * @param connectionQueryServiceName Connection Query Service Name + * @return returns instance of ConnectionQueryServicesMetrics for connectionQueryServiceName + */ + ConnectionQueryServicesMetrics + getConnectionQueryServiceMetricsInstance(String connectionQueryServiceName) { + if (Strings.isNullOrEmpty(connectionQueryServiceName)) { + LOGGER.warn("Connection query service Name can't be null or empty"); + return null; + } - ConnectionQueryServicesMetrics cqsInstance = - connectionQueryServiceMetricsMapping.get(connectionQueryServiceName); + ConnectionQueryServicesMetrics cqsInstance = + connectionQueryServiceMetricsMapping.get(connectionQueryServiceName); + if (cqsInstance == null) { + synchronized (ConnectionQueryServicesMetricsManager.class) { + cqsInstance = connectionQueryServiceMetricsMapping.get(connectionQueryServiceName); if (cqsInstance == null) { - synchronized (ConnectionQueryServicesMetricsManager.class) { - cqsInstance = connectionQueryServiceMetricsMapping.get(connectionQueryServiceName); - if (cqsInstance == null) { - LOGGER.info("Creating connection query service metrics object for : " - + connectionQueryServiceName); - cqsInstance = new ConnectionQueryServicesMetrics(connectionQueryServiceName, - options.getConfiguration()); - connectionQueryServiceMetricsMapping - .put(connectionQueryServiceName, cqsInstance); - } - } + LOGGER.info( + "Creating connection query service metrics object for : " + connectionQueryServiceName); + cqsInstance = new ConnectionQueryServicesMetrics(connectionQueryServiceName, + options.getConfiguration()); + connectionQueryServiceMetricsMapping.put(connectionQueryServiceName, cqsInstance); } - return cqsInstance; + } } + return cqsInstance; + } - /** - * This function will be used to add individual MetricType to LocalStore. Also this will serve - * as LocalStore to store connection query service metrics before their current value is added - * to histogram. - * This func is only used for metrics which are counter based, where values increases or - * decreases frequently. Like Open Conn Counter. This function will first retrieve it's current - * value and increment or decrement (by +/-1) it as required then update the new values. - *
- * Example :- OPEN_PHOENIX_CONNECTIONS_COUNTER, OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER - *
- *
- * histogram will update with each increment/decrement. - * @param connectionQueryServiceName - * @param type - * @param value - */ - void updateMetricsValue(String connectionQueryServiceName, MetricType type, - long value) { + /** + * This function will be used to add individual MetricType to LocalStore. Also this will serve as + * LocalStore to store connection query service metrics before their current value is added to + * histogram. This func is only used for metrics which are counter based, where values increases + * or decreases frequently. Like Open Conn Counter. This function will first retrieve it's current + * value and increment or decrement (by +/-1) it as required then update the new values.
+ * Example :- OPEN_PHOENIX_CONNECTIONS_COUNTER, OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER
+ *
+ * histogram will update with each increment/decrement. + */ + void updateMetricsValue(String connectionQueryServiceName, MetricType type, long value) { - long startTime = EnvironmentEdgeManager.currentTime(); - - ConnectionQueryServicesMetrics cqsInstance = - getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); - if (cqsInstance == null) { - return; - } - cqsInstance.setMetricValue(type, value); + long startTime = EnvironmentEdgeManager.currentTime(); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Connection query service metrics completed updating metric " - + type + " to value " + value + ", timetaken = " - + (EnvironmentEdgeManager.currentTime() - startTime)); - } + ConnectionQueryServicesMetrics cqsInstance = + getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); + if (cqsInstance == null) { + return; } + cqsInstance.setMetricValue(type, value); - /** - * static functions to push, update or retrieve ConnectionQueryService Metrics. - * @param connectionQueryServiceName name of the connection query service - * @param type type of metric - * @param value metric value - */ - public static void updateMetrics(String connectionQueryServiceName, MetricType type, - long value) { - try { - ConnectionQueryServicesMetricsManager.getInstance() - .updateMetricsValue(connectionQueryServiceName, type, value); - } catch (Exception e) { - LOGGER.error("Failed updating connection query service metrics", e); - } + if (LOGGER.isTraceEnabled()) { + LOGGER + .trace("Connection query service metrics completed updating metric " + type + " to value " + + value + ", timetaken = " + (EnvironmentEdgeManager.currentTime() - startTime)); } + } - public static Map> getAllConnectionQueryServicesMetrics() { - return ConnectionQueryServicesMetricsManager.getInstance() - .getConnectionQueryServicesMetrics(); + /** + * static functions to push, update or retrieve ConnectionQueryService Metrics. + * @param connectionQueryServiceName name of the connection query service + * @param type type of metric + * @param value metric value + */ + public static void updateMetrics(String connectionQueryServiceName, MetricType type, long value) { + try { + ConnectionQueryServicesMetricsManager.getInstance() + .updateMetricsValue(connectionQueryServiceName, type, value); + } catch (Exception e) { + LOGGER.error("Failed updating connection query service metrics", e); } + } - /** - * This function will return all the counters for Phoenix connection query service. - * @return Map of all ConnectionQueryService Metrics. - */ - Map> getConnectionQueryServicesMetrics() { - try { - long startTime = EnvironmentEdgeManager.currentTime(); - Map> map = new HashMap<>(); - for (Map.Entry entry - : connectionQueryServiceMetricsMapping.entrySet()) { - map.put(entry.getKey(), entry.getValue().getAllMetrics()); - } - long timeTakenForMetricConversion = EnvironmentEdgeManager.currentTime() - startTime; - LOGGER.info("Connection query service metrics fetching complete, timeTaken: " - + timeTakenForMetricConversion); - return map; - } catch (Exception e) { - LOGGER.error("Failed retrieving connection query service Metrics", e); - } - return null; - } + public static Map> + getAllConnectionQueryServicesMetrics() { + return ConnectionQueryServicesMetricsManager.getInstance().getConnectionQueryServicesMetrics(); + } - public static Map> getHistogramsForAllConnectionQueryServices() { - return ConnectionQueryServicesMetricsManager.getInstance() - .getHistogramsForConnectionQueryServices(); + /** + * This function will return all the counters for Phoenix connection query service. + * @return Map of all ConnectionQueryService Metrics. + */ + Map> getConnectionQueryServicesMetrics() { + try { + long startTime = EnvironmentEdgeManager.currentTime(); + Map> map = new HashMap<>(); + for (Map.Entry entry : connectionQueryServiceMetricsMapping.entrySet()) { + map.put(entry.getKey(), entry.getValue().getAllMetrics()); + } + long timeTakenForMetricConversion = EnvironmentEdgeManager.currentTime() - startTime; + LOGGER.info("Connection query service metrics fetching complete, timeTaken: " + + timeTakenForMetricConversion); + return map; + } catch (Exception e) { + LOGGER.error("Failed retrieving connection query service Metrics", e); } + return null; + } - /** - * This function will return histogram for all the Phoenix connection query service metrics. - * @return Map of all ConnectionServiceMetrics Histogram - */ - Map> getHistogramsForConnectionQueryServices() { - Map> map = new HashMap<>(); - for (Map.Entry entry - : connectionQueryServiceMetricsMapping.entrySet()) { - ConnectionQueryServicesMetricsHistograms connectionQueryServiceHistogramsHistograms = - entry.getValue().getConnectionQueryServiceHistograms(); - map.put(entry.getKey(), connectionQueryServiceHistogramsHistograms - .getConnectionQueryServicesHistogramsDistribution()); - } - return map; + public static Map> + getHistogramsForAllConnectionQueryServices() { + return ConnectionQueryServicesMetricsManager.getInstance() + .getHistogramsForConnectionQueryServices(); + } + + /** + * This function will return histogram for all the Phoenix connection query service metrics. + * @return Map of all ConnectionServiceMetrics Histogram + */ + Map> getHistogramsForConnectionQueryServices() { + Map> map = new HashMap<>(); + for (Map.Entry entry : connectionQueryServiceMetricsMapping.entrySet()) { + ConnectionQueryServicesMetricsHistograms connectionQueryServiceHistogramsHistograms = + entry.getValue().getConnectionQueryServiceHistograms(); + map.put(entry.getKey(), connectionQueryServiceHistogramsHistograms + .getConnectionQueryServicesHistogramsDistribution()); } + return map; + } - /** - * Function to update {@link MetricType#OPEN_PHOENIX_CONNECTIONS_COUNTER} counter value in - * Histogram - * @param connCount current count of - * {@link MetricType#OPEN_PHOENIX_CONNECTIONS_COUNTER} - * @param connectionQueryServiceName ConnectionQueryService name - */ - public static void updateConnectionQueryServiceOpenConnectionHistogram(long connCount, - String connectionQueryServiceName) { - ConnectionQueryServicesMetrics metrics = - getInstance().getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); - if (metrics == null) { - return; - } - metrics.getConnectionQueryServiceHistograms().getConnectionQueryServicesOpenConnHisto() - .add(connCount); + /** + * Function to update {@link MetricType#OPEN_PHOENIX_CONNECTIONS_COUNTER} counter value in + * Histogram + * @param connCount current count of + * {@link MetricType#OPEN_PHOENIX_CONNECTIONS_COUNTER} + * @param connectionQueryServiceName ConnectionQueryService name + */ + public static void updateConnectionQueryServiceOpenConnectionHistogram(long connCount, + String connectionQueryServiceName) { + ConnectionQueryServicesMetrics metrics = + getInstance().getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); + if (metrics == null) { + return; } + metrics.getConnectionQueryServiceHistograms().getConnectionQueryServicesOpenConnHisto() + .add(connCount); + } - /** - * Function to update {@link MetricType#OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER} counter value - * in Histogram - * @param connCount current count of - * {@link - * MetricType#OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER} - * @param connectionQueryServiceName ConnectionQueryService name - */ - public static void updateConnectionQueryServiceOpenInternalConnectionHistogram(long connCount, - String connectionQueryServiceName) { - ConnectionQueryServicesMetrics metrics = - getInstance().getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); - if (metrics == null) { - return; - } - metrics.getConnectionQueryServiceHistograms() - .getConnectionQueryServicesInternalOpenConnHisto().add(connCount); + /** + * Function to update {@link MetricType#OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER} counter value + * in Histogram + * @param connCount current count of + * {@link MetricType#OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER} + * @param connectionQueryServiceName ConnectionQueryService name + */ + public static void updateConnectionQueryServiceOpenInternalConnectionHistogram(long connCount, + String connectionQueryServiceName) { + ConnectionQueryServicesMetrics metrics = + getInstance().getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); + if (metrics == null) { + return; } + metrics.getConnectionQueryServiceHistograms().getConnectionQueryServicesInternalOpenConnHisto() + .add(connCount); + } - ///////////////////////////////////////////////////////// - ////// Below Functions are majorly used in testing ////// - ///////////////////////////////////////////////////////// + ///////////////////////////////////////////////////////// + ////// Below Functions are majorly used in testing ////// + ///////////////////////////////////////////////////////// - public static ConnectionQueryServicesHistogram getConnectionQueryServiceOpenInternalConnectionHistogram( - String connectionQueryServiceName) { - ConnectionQueryServicesMetrics metrics = - getInstance().getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); - if (metrics == null) { - return null; - } - return metrics.getConnectionQueryServiceHistograms() - .getConnectionQueryServicesInternalOpenConnHisto(); + public static ConnectionQueryServicesHistogram + getConnectionQueryServiceOpenInternalConnectionHistogram(String connectionQueryServiceName) { + ConnectionQueryServicesMetrics metrics = + getInstance().getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); + if (metrics == null) { + return null; } + return metrics.getConnectionQueryServiceHistograms() + .getConnectionQueryServicesInternalOpenConnHisto(); + } - public static ConnectionQueryServicesHistogram - getConnectionQueryServiceOpenConnectionHistogram(String connectionQueryServiceName) { - ConnectionQueryServicesMetrics metrics = - getInstance().getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); - if (metrics == null) { - return null; - } - return metrics.getConnectionQueryServiceHistograms() - .getConnectionQueryServicesOpenConnHisto(); + public static ConnectionQueryServicesHistogram + getConnectionQueryServiceOpenConnectionHistogram(String connectionQueryServiceName) { + ConnectionQueryServicesMetrics metrics = + getInstance().getConnectionQueryServiceMetricsInstance(connectionQueryServiceName); + if (metrics == null) { + return null; } + return metrics.getConnectionQueryServiceHistograms().getConnectionQueryServicesOpenConnHisto(); + } - /** - * Helps reset the localstore(connectionQueryServiceMetricsMapping) - */ - void clearConnectionQueryServiceMetrics() { - if (connectionQueryServiceMetricsMapping != null) { - connectionQueryServiceMetricsMapping.clear(); - } - LOGGER.info("Connection query service metrics clearing complete"); + /** + * Helps reset the localstore(connectionQueryServiceMetricsMapping) + */ + void clearConnectionQueryServiceMetrics() { + if (connectionQueryServiceMetricsMapping != null) { + connectionQueryServiceMetricsMapping.clear(); } + LOGGER.info("Connection query service metrics clearing complete"); + } - public static void clearAllConnectionQueryServiceMetrics() { - try { - ConnectionQueryServicesMetricsManager.getInstance() - .clearConnectionQueryServiceMetrics(); - } catch (Exception e) { - LOGGER.error("Failed resetting connection query service Metrics", e); - } + public static void clearAllConnectionQueryServiceMetrics() { + try { + ConnectionQueryServicesMetricsManager.getInstance().clearConnectionQueryServiceMetrics(); + } catch (Exception e) { + LOGGER.error("Failed resetting connection query service Metrics", e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/NoOpConnectionQueryServicesMetricsManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/NoOpConnectionQueryServicesMetricsManager.java index cf3ec00de3e..60edbc030d1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/NoOpConnectionQueryServicesMetricsManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/connectionqueryservice/NoOpConnectionQueryServicesMetricsManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,35 +28,35 @@ /** * ConnectionQueryServicesMetricsManager will be replaced by this class when * {@link org.apache.phoenix.query.QueryServices#CONNECTION_QUERY_SERVICE_METRICS_ENABLED} flag is - * set to false. + * set to false. */ -public class NoOpConnectionQueryServicesMetricsManager extends ConnectionQueryServicesMetricsManager { +public class NoOpConnectionQueryServicesMetricsManager + extends ConnectionQueryServicesMetricsManager { - public static final NoOpConnectionQueryServicesMetricsManager NO_OP_CONN_QUERY_SERVICES_METRICS_MANAGER = - new NoOpConnectionQueryServicesMetricsManager(); + public static final NoOpConnectionQueryServicesMetricsManager NO_OP_CONN_QUERY_SERVICES_METRICS_MANAGER = + new NoOpConnectionQueryServicesMetricsManager(); - private NoOpConnectionQueryServicesMetricsManager() { - super(); - } + private NoOpConnectionQueryServicesMetricsManager() { + super(); + } - void updateMetricsValue(String connectionQueryServiceName, MetricType type, - long value) { - } + void updateMetricsValue(String connectionQueryServiceName, MetricType type, long value) { + } - Map> getConnectionQueryServicesMetrics() { - return Collections.emptyMap(); - } + Map> getConnectionQueryServicesMetrics() { + return Collections.emptyMap(); + } - Map> getHistogramsForConnectionQueryServices() { - return Collections.emptyMap(); - } + Map> getHistogramsForConnectionQueryServices() { + return Collections.emptyMap(); + } - void clearConnectionQueryServiceMetrics() { + void clearConnectionQueryServiceMetrics() { - } + } - ConnectionQueryServicesMetrics getConnectionQueryServiceMetricsInstance( - String connectionQueryServiceName) { - return null; - } + ConnectionQueryServicesMetrics + getConnectionQueryServiceMetricsInstance(String connectionQueryServiceName) { + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/Cost.java b/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/Cost.java index 788e4b9aa36..20aec4e9e13 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/Cost.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/Cost.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,104 +20,96 @@ import java.util.Objects; /** - * Optimizer cost in terms of CPU, memory, and I/O usage, the unit of which is now the - * number of bytes processed. - * + * Optimizer cost in terms of CPU, memory, and I/O usage, the unit of which is now the number of + * bytes processed. */ public class Cost implements Comparable { - /** The unknown cost. */ - public static final Cost UNKNOWN = new Cost(Double.NaN, Double.NaN, Double.NaN) { - @Override - public String toString() { - return "{unknown}"; - } - }; - - /** The zero cost. */ - public static final Cost ZERO = new Cost(0, 0, 0) { - @Override - public String toString() { - return "{zero}"; - } - }; - - private final double cpu; - private final double memory; - private final double io; - - public Cost(double cpu, double memory, double io) { - this.cpu = cpu; - this.memory = memory; - this.io = io; + /** The unknown cost. */ + public static final Cost UNKNOWN = new Cost(Double.NaN, Double.NaN, Double.NaN) { + @Override + public String toString() { + return "{unknown}"; } + }; - public double getCpu() { - return cpu; + /** The zero cost. */ + public static final Cost ZERO = new Cost(0, 0, 0) { + @Override + public String toString() { + return "{zero}"; } + }; - public double getMemory() { - return memory; - } + private final double cpu; + private final double memory; + private final double io; - public double getIo() { - return io; - } + public Cost(double cpu, double memory, double io) { + this.cpu = cpu; + this.memory = memory; + this.io = io; + } - public boolean isUnknown() { - return this == UNKNOWN; - } + public double getCpu() { + return cpu; + } - public Cost plus(Cost other) { - if (isUnknown() || other.isUnknown()) { - return UNKNOWN; - } + public double getMemory() { + return memory; + } - return new Cost( - this.cpu + other.cpu, - this.memory + other.memory, - this.io + other.io); - } + public double getIo() { + return io; + } - public Cost multiplyBy(double factor) { - if (isUnknown()) { - return UNKNOWN; - } + public boolean isUnknown() { + return this == UNKNOWN; + } - return new Cost( - this.cpu * factor, - this.memory * factor, - this.io * factor); + public Cost plus(Cost other) { + if (isUnknown() || other.isUnknown()) { + return UNKNOWN; } - // TODO right now for simplicity, we choose to ignore CPU and memory costs. We may - // add those into account as our cost model mature. - @Override - public int compareTo(Cost other) { - if (isUnknown() && other.isUnknown()) { - return 0; - } else if (isUnknown() && !other.isUnknown()) { - return 1; - } else if (!isUnknown() && other.isUnknown()) { - return -1; - } - - double d = this.io - other.io; - return d == 0 ? 0 : (d > 0 ? 1 : -1); - } + return new Cost(this.cpu + other.cpu, this.memory + other.memory, this.io + other.io); + } - @Override - public boolean equals(Object obj) { - return this == obj - || (obj instanceof Cost && this.compareTo((Cost) obj) == 0); + public Cost multiplyBy(double factor) { + if (isUnknown()) { + return UNKNOWN; } - @Override - public int hashCode() { - return Objects.hash(cpu, memory, io); + return new Cost(this.cpu * factor, this.memory * factor, this.io * factor); + } + + // TODO right now for simplicity, we choose to ignore CPU and memory costs. We may + // add those into account as our cost model mature. + @Override + public int compareTo(Cost other) { + if (isUnknown() && other.isUnknown()) { + return 0; + } else if (isUnknown() && !other.isUnknown()) { + return 1; + } else if (!isUnknown() && other.isUnknown()) { + return -1; } - @Override - public String toString() { - return "{cpu: " + cpu + ", memory: " + memory + ", io: " + io + "}"; - } + double d = this.io - other.io; + return d == 0 ? 0 : (d > 0 ? 1 : -1); + } + + @Override + public boolean equals(Object obj) { + return this == obj || (obj instanceof Cost && this.compareTo((Cost) obj) == 0); + } + + @Override + public int hashCode() { + return Objects.hash(cpu, memory, io); + } + + @Override + public String toString() { + return "{cpu: " + cpu + ", memory: " + memory + ", io: " + io + "}"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/GenSubqueryParamValuesRewriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/GenSubqueryParamValuesRewriter.java index 567e92e0ca2..f3ae04de6a1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/GenSubqueryParamValuesRewriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/GenSubqueryParamValuesRewriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,11 @@ */ package org.apache.phoenix.optimize; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + import org.apache.phoenix.compile.ExpressionCompiler; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; @@ -33,121 +38,114 @@ import org.apache.phoenix.parse.SubqueryParseNode; import org.apache.phoenix.schema.types.PDataType; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - /** - * Creates a new WHERE clause by replaces non-correlated sub-queries with dummy values. - * - * Note that this class does not check the presence of correlation, thus it should only - * be used after de-correlation has been performed. + * Creates a new WHERE clause by replaces non-correlated sub-queries with dummy values. Note that + * this class does not check the presence of correlation, thus it should only be used after + * de-correlation has been performed. */ public class GenSubqueryParamValuesRewriter extends ParseNodeRewriter { - private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - - private final ExpressionCompiler expressionCompiler; - - public static ParseNode replaceWithDummyValues( - ParseNode where, StatementContext context) throws SQLException { - return rewrite(where, new GenSubqueryParamValuesRewriter(context)); - } - - private GenSubqueryParamValuesRewriter(StatementContext context) { - this.expressionCompiler = new ExpressionCompiler(context); - } + private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - protected List generateDummyValues( - ParseNode lhs, boolean multipleValues) throws SQLException { - Expression expr = lhs.accept(expressionCompiler); - PDataType type = expr.getDataType(); - if (!multipleValues) { - return Arrays. asList(NODE_FACTORY.literal(type.getSampleValue(), type)); - } - - return Arrays. asList( - NODE_FACTORY.literal(type.getSampleValue(), type), - NODE_FACTORY.literal(type.getSampleValue(), type), - NODE_FACTORY.literal(type.getSampleValue(), type)); - } - - @Override - public ParseNode visitLeave(AndParseNode node, List l) throws SQLException { - return leaveCompoundNode(node, l, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - if (children.isEmpty()) { - return null; - } - if (children.size() == 1) { - return children.get(0); - } - return NODE_FACTORY.and(children); - } - }); - } + private final ExpressionCompiler expressionCompiler; - @Override - public ParseNode visitLeave(OrParseNode node, List l) throws SQLException { - return leaveCompoundNode(node, l, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - if (children.isEmpty()) { - return null; - } - if (children.size() == 1) { - return children.get(0); - } - return NODE_FACTORY.or(children); - } - }); - } + public static ParseNode replaceWithDummyValues(ParseNode where, StatementContext context) + throws SQLException { + return rewrite(where, new GenSubqueryParamValuesRewriter(context)); + } - @Override - public ParseNode visitLeave(InParseNode node, List l) throws SQLException { - ParseNode lhs = l.get(0); - List inList = generateDummyValues(lhs, true); - List children = new ArrayList(); - children.add(lhs); - children.addAll(inList); - return NODE_FACTORY.inList(children, node.isNegate()); - } + private GenSubqueryParamValuesRewriter(StatementContext context) { + this.expressionCompiler = new ExpressionCompiler(context); + } - @Override - public ParseNode visitLeave(ExistsParseNode node, List l) throws SQLException { - return null; + protected List generateDummyValues(ParseNode lhs, boolean multipleValues) + throws SQLException { + Expression expr = lhs.accept(expressionCompiler); + PDataType type = expr.getDataType(); + if (!multipleValues) { + return Arrays. asList(NODE_FACTORY.literal(type.getSampleValue(), type)); } - @Override - public ParseNode visitLeave(ComparisonParseNode node, List l) throws SQLException { - if (!(l.get(1) instanceof SubqueryParseNode)) { - super.visitLeave(node, l); + return Arrays. asList(NODE_FACTORY.literal(type.getSampleValue(), type), + NODE_FACTORY.literal(type.getSampleValue(), type), + NODE_FACTORY.literal(type.getSampleValue(), type)); + } + + @Override + public ParseNode visitLeave(AndParseNode node, List l) throws SQLException { + return leaveCompoundNode(node, l, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + if (children.isEmpty()) { + return null; } - - ParseNode lhs = l.get(0); - List rhs = generateDummyValues(lhs, false); - List children = new ArrayList(); - children.add(lhs); - children.add(rhs.get(0)); - return super.visitLeave(node, children); - } - - @Override - public ParseNode visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { - ComparisonParseNode compare = (ComparisonParseNode) l.get(1); - ParseNode lhs = compare.getLHS(); - List rhs = generateDummyValues(lhs, false); - - return NODE_FACTORY.comparison(compare.getFilterOp(), lhs, rhs.get(0)); + if (children.size() == 1) { + return children.get(0); + } + return NODE_FACTORY.and(children); + } + }); + } + + @Override + public ParseNode visitLeave(OrParseNode node, List l) throws SQLException { + return leaveCompoundNode(node, l, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + if (children.isEmpty()) { + return null; + } + if (children.size() == 1) { + return children.get(0); + } + return NODE_FACTORY.or(children); + } + }); + } + + @Override + public ParseNode visitLeave(InParseNode node, List l) throws SQLException { + ParseNode lhs = l.get(0); + List inList = generateDummyValues(lhs, true); + List children = new ArrayList(); + children.add(lhs); + children.addAll(inList); + return NODE_FACTORY.inList(children, node.isNegate()); + } + + @Override + public ParseNode visitLeave(ExistsParseNode node, List l) throws SQLException { + return null; + } + + @Override + public ParseNode visitLeave(ComparisonParseNode node, List l) throws SQLException { + if (!(l.get(1) instanceof SubqueryParseNode)) { + super.visitLeave(node, l); } - @Override - public ParseNode visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { - ComparisonParseNode compare = (ComparisonParseNode) l.get(1); - ParseNode lhs = compare.getLHS(); - List rhs = generateDummyValues(lhs, false); - - return NODE_FACTORY.comparison(compare.getFilterOp(), lhs, rhs.get(0)); - } + ParseNode lhs = l.get(0); + List rhs = generateDummyValues(lhs, false); + List children = new ArrayList(); + children.add(lhs); + children.add(rhs.get(0)); + return super.visitLeave(node, children); + } + + @Override + public ParseNode visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { + ComparisonParseNode compare = (ComparisonParseNode) l.get(1); + ParseNode lhs = compare.getLHS(); + List rhs = generateDummyValues(lhs, false); + + return NODE_FACTORY.comparison(compare.getFilterOp(), lhs, rhs.get(0)); + } + + @Override + public ParseNode visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { + ComparisonParseNode compare = (ComparisonParseNode) l.get(1); + ParseNode lhs = compare.getLHS(); + List rhs = generateDummyValues(lhs, false); + + return NODE_FACTORY.comparison(compare.getFilterOp(), lhs, rhs.get(0)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java index 6433f9ab319..76c25f46aff 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.optimize; +import static org.apache.phoenix.query.QueryConstants.CDC_JSON_COL_NAME; + import java.sql.SQLException; import java.util.Collections; import java.util.Comparator; @@ -73,797 +74,836 @@ import org.apache.phoenix.schema.RowValueConstructorOffsetNotCoercibleException; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.CDCUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.ParseNodeUtil; import org.apache.phoenix.util.ParseNodeUtil.RewriteResult; - -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.SchemaUtil; -import static org.apache.phoenix.query.QueryConstants.CDC_JSON_COL_NAME; - public class QueryOptimizer { - private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); - - private final QueryServices services; - private final boolean useIndexes; - private final boolean costBased; - private long indexPendingDisabledThreshold; - - public QueryOptimizer(QueryServices services) { - this.services = services; - this.useIndexes = this.services.getProps().getBoolean(QueryServices.USE_INDEXES_ATTRIB, QueryServicesOptions.DEFAULT_USE_INDEXES); - this.costBased = this.services.getProps().getBoolean(QueryServices.COST_BASED_OPTIMIZER_ENABLED, QueryServicesOptions.DEFAULT_COST_BASED_OPTIMIZER_ENABLED); - this.indexPendingDisabledThreshold = this.services.getProps().getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD, - QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD); + private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); + + private final QueryServices services; + private final boolean useIndexes; + private final boolean costBased; + private long indexPendingDisabledThreshold; + + public QueryOptimizer(QueryServices services) { + this.services = services; + this.useIndexes = this.services.getProps().getBoolean(QueryServices.USE_INDEXES_ATTRIB, + QueryServicesOptions.DEFAULT_USE_INDEXES); + this.costBased = this.services.getProps().getBoolean(QueryServices.COST_BASED_OPTIMIZER_ENABLED, + QueryServicesOptions.DEFAULT_COST_BASED_OPTIMIZER_ENABLED); + this.indexPendingDisabledThreshold = + this.services.getProps().getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD, + QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD); + } + + public QueryPlan optimize(PhoenixStatement statement, QueryPlan dataPlan) throws SQLException { + if (dataPlan.getTableRef() == null) { + return dataPlan; + } + return optimize(dataPlan, statement, Collections. emptyList(), null); + } + + public QueryPlan optimize(PhoenixStatement statement, SelectStatement select) + throws SQLException { + return optimize(statement, select, + FromCompiler.getResolverForQuery(select, statement.getConnection()), + Collections. emptyList(), null); + } + + public QueryPlan optimize(PhoenixStatement statement, SelectStatement select, + ColumnResolver resolver, List targetColumns, + ParallelIteratorFactory parallelIteratorFactory) throws SQLException { + QueryCompiler compiler = new QueryCompiler(statement, select, resolver, targetColumns, + parallelIteratorFactory, new SequenceManager(statement)); + QueryPlan dataPlan = compiler.compile(); + return optimize(dataPlan, statement, targetColumns, parallelIteratorFactory); + } + + public QueryPlan optimize(QueryPlan dataPlan, PhoenixStatement statement, + List targetColumns, ParallelIteratorFactory parallelIteratorFactory) + throws SQLException { + List plans = + getApplicablePlans(dataPlan, statement, targetColumns, parallelIteratorFactory, true); + return plans.get(0); + } + + public List getBestPlan(QueryPlan dataPlan, PhoenixStatement statement, + SelectStatement select, ColumnResolver resolver, List targetColumns, + ParallelIteratorFactory parallelIteratorFactory) throws SQLException { + return getApplicablePlans(dataPlan, statement, targetColumns, parallelIteratorFactory, true); + } + + public List getApplicablePlans(QueryPlan dataPlan, PhoenixStatement statement, + SelectStatement select, ColumnResolver resolver, List targetColumns, + ParallelIteratorFactory parallelIteratorFactory) throws SQLException { + return getApplicablePlans(dataPlan, statement, targetColumns, parallelIteratorFactory, false); + } + + private List getApplicablePlans(QueryPlan dataPlan, PhoenixStatement statement, + List targetColumns, ParallelIteratorFactory parallelIteratorFactory, + boolean stopAtBestPlan) throws SQLException { + if (!useIndexes) { + return Collections.singletonList(dataPlan); } - public QueryPlan optimize(PhoenixStatement statement, QueryPlan dataPlan) throws SQLException { - if (dataPlan.getTableRef() == null) { - return dataPlan; - } - return optimize(dataPlan, statement, Collections.emptyList(), null); + SelectStatement select = (SelectStatement) dataPlan.getStatement(); + if ( + !select.isUnion() && !select.isJoin() && select.getInnerSelectStatement() == null + && (select.getWhere() == null || !select.getWhere().hasSubquery()) + ) { + return getApplicablePlansForSingleFlatQuery(dataPlan, statement, targetColumns, + parallelIteratorFactory, stopAtBestPlan); } - public QueryPlan optimize(PhoenixStatement statement, SelectStatement select) throws SQLException { - return optimize(statement, select, FromCompiler.getResolverForQuery(select, statement.getConnection()), Collections.emptyList(), null); + Map dataPlans = null; + // Find the optimal index plan for each join tables in a join query or a + // non-correlated sub-query, then rewrite the query with found index tables. + if (select.isJoin() || (select.getWhere() != null && select.getWhere().hasSubquery())) { + ColumnResolver resolver = FromCompiler.getResolverForQuery(select, statement.getConnection()); + JoinCompiler.JoinTable join = JoinCompiler.compile(statement, select, resolver); + Map replacement = null; + for (JoinCompiler.Table table : join.getAllTables()) { + if (table.isSubselect()) continue; + TableRef tableRef = table.getTableRef(); + SelectStatement stmt = + table.getAsSubqueryForOptimization(tableRef.equals(dataPlan.getTableRef())); + // Replace non-correlated sub-queries in WHERE clause with dummy values + // so the filter conditions can be taken into account in optimization. + if (stmt.getWhere() != null && stmt.getWhere().hasSubquery()) { + StatementContext context = + new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement)); + ; + ParseNode dummyWhere = + GenSubqueryParamValuesRewriter.replaceWithDummyValues(stmt.getWhere(), context); + stmt = FACTORY.select(stmt, dummyWhere); + } + // TODO: It seems inefficient to be recompiling the statement again inside of this optimize + // call + QueryPlan subDataPlan = new QueryCompiler(statement, stmt, + FromCompiler.getResolverForQuery(stmt, statement.getConnection()), false, false, null) + .compile(); + QueryPlan subPlan = optimize(statement, subDataPlan); + TableRef newTableRef = subPlan.getTableRef(); + if (!newTableRef.equals(tableRef)) { + if (replacement == null) { + replacement = new HashMap(); + dataPlans = new HashMap(); + } + replacement.put(tableRef, newTableRef); + dataPlans.put(newTableRef, subDataPlan); + } + } + + if (replacement != null) { + select = rewriteQueryWithIndexReplacement(statement.getConnection(), resolver, select, + replacement); + } } - public QueryPlan optimize(PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List targetColumns, ParallelIteratorFactory parallelIteratorFactory) throws SQLException { - QueryCompiler compiler = new QueryCompiler(statement, select, resolver, targetColumns, parallelIteratorFactory, new SequenceManager(statement)); - QueryPlan dataPlan = compiler.compile(); - return optimize(dataPlan, statement, targetColumns, parallelIteratorFactory); + // Re-compile the plan with option "optimizeSubquery" turned on, so that enclosed + // sub-queries can be optimized recursively. + QueryCompiler compiler = new QueryCompiler(statement, select, + FromCompiler.getResolverForQuery(select, statement.getConnection()), targetColumns, + parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), true, true, dataPlans, + dataPlan.getContext()); + return Collections.singletonList(compiler.compile()); + } + + private static boolean isPartialIndexUsable(SelectStatement select, QueryPlan dataPlan, + PTable index) throws SQLException { + StatementContext context = new StatementContext(dataPlan.getContext()); + context.setResolver(FromCompiler.getResolver(dataPlan.getTableRef())); + return WhereCompiler.contains( + index.getIndexWhereExpression(dataPlan.getContext().getConnection()), + WhereCompiler.transformDNF(select.getWhere(), context)); + } + + private List getApplicablePlansForSingleFlatQuery(QueryPlan dataPlan, + PhoenixStatement statement, List targetColumns, + ParallelIteratorFactory parallelIteratorFactory, boolean stopAtBestPlan) throws SQLException { + SelectStatement select = (SelectStatement) dataPlan.getStatement(); + String indexHint = select.getHint().getHint(Hint.INDEX); + // Exit early if we have a point lookup w/o index hint as we can't get better than that + if ( + indexHint == null && dataPlan.getContext().getScanRanges().isPointLookup() && stopAtBestPlan + && dataPlan.isApplicable() + ) { + return Collections. singletonList(dataPlan); } - - public QueryPlan optimize(QueryPlan dataPlan, PhoenixStatement statement, List targetColumns, ParallelIteratorFactory parallelIteratorFactory) throws SQLException { - List plans = getApplicablePlans(dataPlan, statement, targetColumns, parallelIteratorFactory, true); - return plans.get(0); + + ColumnResolver indexResolver = null; + boolean forCDC = false; + PTable table = dataPlan.getTableRef().getTable(); + if (table.getType() == PTableType.CDC) { + NamedTableNode indexTableNode = FACTORY.namedTable(null, + FACTORY.table(table.getSchemaName().getString(), + CDCUtil.getCDCIndexName(table.getTableName().getString())), + select.getTableSamplingRate()); + indexResolver = FromCompiler.getResolver(indexTableNode, statement.getConnection()); + TableRef indexTableRef = indexResolver.getTables().get(0); + PTable cdcIndex = indexTableRef.getTable(); + PTableImpl.Builder indexBuilder = PTableImpl.builderFromExisting(cdcIndex); + List idxColumns = cdcIndex.getColumns(); + if (cdcIndex.getBucketNum() != null) { + // If salted, it will get added by the builder, so avoid duplication. + idxColumns = idxColumns.subList(1, idxColumns.size()); + } + indexBuilder.setColumns(idxColumns); + indexBuilder.setParentName(table.getName()); + indexBuilder.setParentTableName(table.getTableName()); + cdcIndex = indexBuilder.build(); + indexTableRef.setTable(cdcIndex); + + PTableImpl.Builder cdcBuilder = PTableImpl.builderFromExisting(table); + cdcBuilder.setColumns(table.getColumns()); + cdcBuilder.setIndexes(Collections.singletonList(cdcIndex)); + table = cdcBuilder.build(); + dataPlan.getTableRef().setTable(table); + forCDC = true; } - - public List getBestPlan(QueryPlan dataPlan, PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List targetColumns, ParallelIteratorFactory parallelIteratorFactory) throws SQLException { - return getApplicablePlans(dataPlan, statement, targetColumns, parallelIteratorFactory, true); + + List indexes = Lists.newArrayList(dataPlan.getTableRef().getTable().getIndexes()); + if ( + dataPlan.isApplicable() && (indexes.isEmpty() || dataPlan.isDegenerate() + || dataPlan.getTableRef().hasDynamicCols() || select.getHint().hasHint(Hint.NO_INDEX)) + ) { + return Collections. singletonList(dataPlan); } - - public List getApplicablePlans(QueryPlan dataPlan, PhoenixStatement statement, SelectStatement select, ColumnResolver resolver, List targetColumns, ParallelIteratorFactory parallelIteratorFactory) throws SQLException { - return getApplicablePlans(dataPlan, statement, targetColumns, parallelIteratorFactory, false); + // The targetColumns is set for UPSERT SELECT to ensure that the proper type conversion takes + // place. + // For a SELECT, it is empty. In this case, we want to set the targetColumns to match the + // projection + // from the dataPlan to ensure that the metadata for when an index is used matches the metadata + // for + // when the data table is used. + if (targetColumns.isEmpty()) { + List projectors = dataPlan.getProjector().getColumnProjectors(); + List targetDatums = Lists.newArrayListWithExpectedSize(projectors.size()); + for (ColumnProjector projector : projectors) { + targetDatums.add(projector.getExpression()); + } + targetColumns = targetDatums; } - - private List getApplicablePlans(QueryPlan dataPlan, PhoenixStatement statement, List targetColumns, ParallelIteratorFactory parallelIteratorFactory, boolean stopAtBestPlan) throws SQLException { - if (!useIndexes) { - return Collections.singletonList(dataPlan); - } - - SelectStatement select = (SelectStatement) dataPlan.getStatement(); - if (!select.isUnion() - && !select.isJoin() - && select.getInnerSelectStatement() == null - && (select.getWhere() == null || !select.getWhere().hasSubquery())) { - return getApplicablePlansForSingleFlatQuery(dataPlan, statement, targetColumns, parallelIteratorFactory, stopAtBestPlan); - } - Map dataPlans = null; - // Find the optimal index plan for each join tables in a join query or a - // non-correlated sub-query, then rewrite the query with found index tables. - if (select.isJoin() - || (select.getWhere() != null && select.getWhere().hasSubquery())) { - ColumnResolver resolver = FromCompiler.getResolverForQuery(select, statement.getConnection()); - JoinCompiler.JoinTable join = JoinCompiler.compile(statement, select, resolver); - Map replacement = null; - for (JoinCompiler.Table table : join.getAllTables()) { - if (table.isSubselect()) - continue; - TableRef tableRef = table.getTableRef(); - SelectStatement stmt = table.getAsSubqueryForOptimization(tableRef.equals(dataPlan.getTableRef())); - // Replace non-correlated sub-queries in WHERE clause with dummy values - // so the filter conditions can be taken into account in optimization. - if (stmt.getWhere() != null && stmt.getWhere().hasSubquery()) { - StatementContext context = - new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement));; - ParseNode dummyWhere = GenSubqueryParamValuesRewriter.replaceWithDummyValues(stmt.getWhere(), context); - stmt = FACTORY.select(stmt, dummyWhere); - } - // TODO: It seems inefficient to be recompiling the statement again inside of this optimize call - QueryPlan subDataPlan = - new QueryCompiler( - statement, stmt, - FromCompiler.getResolverForQuery(stmt, statement.getConnection()), - false, false, null) - .compile(); - QueryPlan subPlan = optimize(statement, subDataPlan); - TableRef newTableRef = subPlan.getTableRef(); - if (!newTableRef.equals(tableRef)) { - if (replacement == null) { - replacement = new HashMap(); - dataPlans = new HashMap(); - } - replacement.put(tableRef, newTableRef); - dataPlans.put(newTableRef, subDataPlan); - } - } - - if (replacement != null) { - select = rewriteQueryWithIndexReplacement( - statement.getConnection(), resolver, select, replacement); - } - } - - // Re-compile the plan with option "optimizeSubquery" turned on, so that enclosed - // sub-queries can be optimized recursively. - QueryCompiler compiler = new QueryCompiler( - statement, - select, - FromCompiler.getResolverForQuery(select, statement.getConnection()), - targetColumns, - parallelIteratorFactory, - dataPlan.getContext().getSequenceManager(), - true, - true, - dataPlans, - dataPlan.getContext()); - return Collections.singletonList(compiler.compile()); - } - - private static boolean isPartialIndexUsable(SelectStatement select, QueryPlan dataPlan, - PTable index) throws SQLException { - StatementContext context = new StatementContext(dataPlan.getContext()); - context.setResolver(FromCompiler.getResolver(dataPlan.getTableRef())); - return WhereCompiler.contains( - index.getIndexWhereExpression(dataPlan.getContext().getConnection()), - WhereCompiler.transformDNF(select.getWhere(), context)); - } - - private List getApplicablePlansForSingleFlatQuery(QueryPlan dataPlan, PhoenixStatement statement, List targetColumns, ParallelIteratorFactory parallelIteratorFactory, boolean stopAtBestPlan) throws SQLException { - SelectStatement select = (SelectStatement)dataPlan.getStatement(); - String indexHint = select.getHint().getHint(Hint.INDEX); - // Exit early if we have a point lookup w/o index hint as we can't get better than that - if (indexHint == null && dataPlan.getContext().getScanRanges().isPointLookup() - && stopAtBestPlan && dataPlan.isApplicable()) { - return Collections. singletonList(dataPlan); - } - - ColumnResolver indexResolver = null; - boolean forCDC = false; - PTable table = dataPlan.getTableRef().getTable(); - if (table.getType() == PTableType.CDC) { - NamedTableNode indexTableNode = FACTORY.namedTable(null, - FACTORY.table(table.getSchemaName().getString(), - CDCUtil.getCDCIndexName(table.getTableName().getString())), - select.getTableSamplingRate()); - indexResolver = FromCompiler.getResolver(indexTableNode, - statement.getConnection()); - TableRef indexTableRef = indexResolver.getTables().get(0); - PTable cdcIndex = indexTableRef.getTable(); - PTableImpl.Builder indexBuilder = PTableImpl.builderFromExisting(cdcIndex); - List idxColumns = cdcIndex.getColumns(); - if (cdcIndex.getBucketNum() != null) { - // If salted, it will get added by the builder, so avoid duplication. - idxColumns = idxColumns.subList(1, idxColumns.size()); - } - indexBuilder.setColumns(idxColumns); - indexBuilder.setParentName(table.getName()); - indexBuilder.setParentTableName(table.getTableName()); - cdcIndex = indexBuilder.build(); - indexTableRef.setTable(cdcIndex); - - PTableImpl.Builder cdcBuilder = PTableImpl.builderFromExisting(table); - cdcBuilder.setColumns(table.getColumns()); - cdcBuilder.setIndexes(Collections.singletonList(cdcIndex)); - table = cdcBuilder.build(); - dataPlan.getTableRef().setTable(table); - forCDC = true; - } + List plans = Lists.newArrayListWithExpectedSize((forCDC ? 0 : 1) + indexes.size()); + SelectStatement translatedIndexSelect = + IndexStatementRewriter.translate(select, FromCompiler.getResolver(dataPlan.getTableRef())); + QueryPlan hintedPlan = null; + // We can't have hints work with CDC queries so skip looking for hinted plans. + if (!forCDC) { + plans.add(dataPlan); + hintedPlan = getHintedQueryPlan(statement, translatedIndexSelect, indexes, targetColumns, + parallelIteratorFactory, plans); + if (hintedPlan != null) { + PTable index = hintedPlan.getTableRef().getTable(); + if ( + stopAtBestPlan && hintedPlan.isApplicable() + && (index.getIndexWhere() == null || isPartialIndexUsable(select, dataPlan, index)) + ) { + return Collections.singletonList(hintedPlan); + } + plans.add(0, hintedPlan); + } + } - Listindexes = Lists.newArrayList(dataPlan.getTableRef().getTable().getIndexes()); - if (dataPlan.isApplicable() && (indexes.isEmpty() - || dataPlan.isDegenerate() - || dataPlan.getTableRef().hasDynamicCols() - || select.getHint().hasHint(Hint.NO_INDEX))) { - return Collections. singletonList(dataPlan); - } - // The targetColumns is set for UPSERT SELECT to ensure that the proper type conversion takes place. - // For a SELECT, it is empty. In this case, we want to set the targetColumns to match the projection - // from the dataPlan to ensure that the metadata for when an index is used matches the metadata for - // when the data table is used. - if (targetColumns.isEmpty()) { - List projectors = dataPlan.getProjector().getColumnProjectors(); - List targetDatums = Lists.newArrayListWithExpectedSize(projectors.size()); - for (ColumnProjector projector : projectors) { - targetDatums.add(projector.getExpression()); - } - targetColumns = targetDatums; - } - - List plans = Lists.newArrayListWithExpectedSize((forCDC ? 0 : 1) - + indexes.size()); - SelectStatement translatedIndexSelect = IndexStatementRewriter.translate( - select, FromCompiler.getResolver(dataPlan.getTableRef())); - QueryPlan hintedPlan = null; - // We can't have hints work with CDC queries so skip looking for hinted plans. - if (! forCDC) { - plans.add(dataPlan); - hintedPlan = getHintedQueryPlan(statement, translatedIndexSelect, indexes, - targetColumns, parallelIteratorFactory, plans); - if (hintedPlan != null) { - PTable index = hintedPlan.getTableRef().getTable(); - if (stopAtBestPlan && hintedPlan.isApplicable() && (index.getIndexWhere() == null - || isPartialIndexUsable(select, dataPlan, index))) { - return Collections.singletonList(hintedPlan); - } - plans.add(0, hintedPlan); - } - } - - for (PTable index : indexes) { - if (CDCUtil.isCDCIndex(index) && !forCDC) { - // A CDC index is allowed only for the queries on its CDC table because a CDC index - // may not be built completely and may not include the index row updates for - // the data table mutations outside the max lookback window - continue; - } - QueryPlan plan = addPlan(statement, translatedIndexSelect, index, targetColumns, - parallelIteratorFactory, dataPlan, false, indexResolver); - if (plan != null && - (index.getIndexWhere() == null - || isPartialIndexUsable(select, dataPlan, index))) { - // Query can't possibly return anything so just return this plan. - if (plan.isDegenerate()) { - return Collections.singletonList(plan); - } - plans.add(plan); - } - } + for (PTable index : indexes) { + if (CDCUtil.isCDCIndex(index) && !forCDC) { + // A CDC index is allowed only for the queries on its CDC table because a CDC index + // may not be built completely and may not include the index row updates for + // the data table mutations outside the max lookback window + continue; + } + QueryPlan plan = addPlan(statement, translatedIndexSelect, index, targetColumns, + parallelIteratorFactory, dataPlan, false, indexResolver); + if ( + plan != null + && (index.getIndexWhere() == null || isPartialIndexUsable(select, dataPlan, index)) + ) { + // Query can't possibly return anything so just return this plan. + if (plan.isDegenerate()) { + return Collections.singletonList(plan); + } + plans.add(plan); + } + } - //Only pull out applicable plans, late filtering since dataplan is used to construct the plans - List applicablePlans = Lists.newArrayListWithExpectedSize(plans.size()); - for(QueryPlan plan : plans) { - if(plan.isApplicable()) { - applicablePlans.add(plan); - } - } - if(applicablePlans.isEmpty()) { - //Currently this is the only case for non-applicable plans - throw new RowValueConstructorOffsetNotCoercibleException("No table or index could be coerced to the PK as the offset. Or an uncovered index was attempted"); - } + // Only pull out applicable plans, late filtering since dataplan is used to construct the plans + List applicablePlans = Lists.newArrayListWithExpectedSize(plans.size()); + for (QueryPlan plan : plans) { + if (plan.isApplicable()) { + applicablePlans.add(plan); + } + } + if (applicablePlans.isEmpty()) { + // Currently this is the only case for non-applicable plans + throw new RowValueConstructorOffsetNotCoercibleException( + "No table or index could be coerced to the PK as the offset. Or an uncovered index was attempted"); + } - //OrderPlans - return hintedPlan == null ? orderPlansBestToWorst(select, applicablePlans, stopAtBestPlan) : applicablePlans; + // OrderPlans + return hintedPlan == null + ? orderPlansBestToWorst(select, applicablePlans, stopAtBestPlan) + : applicablePlans; + } + + private QueryPlan getHintedQueryPlan(PhoenixStatement statement, SelectStatement select, + List indexes, List targetColumns, + ParallelIteratorFactory parallelIteratorFactory, List plans) throws SQLException { + QueryPlan dataPlan = plans.get(0); + String indexHint = select.getHint().getHint(Hint.INDEX); + if (indexHint == null) { + return null; } - - private QueryPlan getHintedQueryPlan(PhoenixStatement statement, SelectStatement select, List indexes, List targetColumns, ParallelIteratorFactory parallelIteratorFactory, List plans) throws SQLException { - QueryPlan dataPlan = plans.get(0); - String indexHint = select.getHint().getHint(Hint.INDEX); - if (indexHint == null) { - return null; - } - int startIndex = 0; - String alias = dataPlan.getTableRef().getTableAlias(); - String prefix = HintNode.PREFIX + (alias == null ? dataPlan.getTableRef().getTable().getName().getString() : alias) + HintNode.SEPARATOR; - while (startIndex < indexHint.length()) { - startIndex = indexHint.indexOf(prefix, startIndex); - if (startIndex < 0) { - return null; - } - startIndex += prefix.length(); - boolean done = false; // true when SUFFIX found - while (startIndex < indexHint.length() && !done) { - int endIndex; - int endIndex1 = indexHint.indexOf(HintNode.SEPARATOR, startIndex); - int endIndex2 = indexHint.indexOf(HintNode.SUFFIX, startIndex); - if (endIndex1 < 0 && endIndex2 < 0) { // Missing SUFFIX shouldn't happen - endIndex = indexHint.length(); - } else if (endIndex1 < 0) { - done = true; - endIndex = endIndex2; - } else if (endIndex2 < 0) { - endIndex = endIndex1; - } else { - endIndex = Math.min(endIndex1, endIndex2); - done = endIndex2 == endIndex; - } - String indexName = indexHint.substring(startIndex, endIndex); - int indexPos = getIndexPosition(indexes, indexName); - if (indexPos >= 0) { - // Hinted index is applicable, so return it's index - PTable index = indexes.get(indexPos); - indexes.remove(indexPos); - QueryPlan plan = addPlan(statement, select, index, targetColumns, - parallelIteratorFactory, dataPlan, true, null); - if (plan != null) { - return plan; - } - } - startIndex = endIndex + 1; - } - } + int startIndex = 0; + String alias = dataPlan.getTableRef().getTableAlias(); + String prefix = HintNode.PREFIX + + (alias == null ? dataPlan.getTableRef().getTable().getName().getString() : alias) + + HintNode.SEPARATOR; + while (startIndex < indexHint.length()) { + startIndex = indexHint.indexOf(prefix, startIndex); + if (startIndex < 0) { return null; + } + startIndex += prefix.length(); + boolean done = false; // true when SUFFIX found + while (startIndex < indexHint.length() && !done) { + int endIndex; + int endIndex1 = indexHint.indexOf(HintNode.SEPARATOR, startIndex); + int endIndex2 = indexHint.indexOf(HintNode.SUFFIX, startIndex); + if (endIndex1 < 0 && endIndex2 < 0) { // Missing SUFFIX shouldn't happen + endIndex = indexHint.length(); + } else if (endIndex1 < 0) { + done = true; + endIndex = endIndex2; + } else if (endIndex2 < 0) { + endIndex = endIndex1; + } else { + endIndex = Math.min(endIndex1, endIndex2); + done = endIndex2 == endIndex; + } + String indexName = indexHint.substring(startIndex, endIndex); + int indexPos = getIndexPosition(indexes, indexName); + if (indexPos >= 0) { + // Hinted index is applicable, so return it's index + PTable index = indexes.get(indexPos); + indexes.remove(indexPos); + QueryPlan plan = addPlan(statement, select, index, targetColumns, parallelIteratorFactory, + dataPlan, true, null); + if (plan != null) { + return plan; + } + } + startIndex = endIndex + 1; + } + } + return null; + } + + private static int getIndexPosition(List indexes, String indexName) { + for (int i = 0; i < indexes.size(); i++) { + if (indexName.equals(indexes.get(i).getTableName().getString())) { + return i; + } } - - private static int getIndexPosition(List indexes, String indexName) { - for (int i = 0; i < indexes.size(); i++) { - if (indexName.equals(indexes.get(i).getTableName().getString())) { - return i; + return -1; + } + + private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index, + List targetColumns, ParallelIteratorFactory parallelIteratorFactory, + QueryPlan dataPlan, boolean isHinted, ColumnResolver indexResolver) throws SQLException { + String tableAlias = dataPlan.getTableRef().getTableAlias(); + String alias = tableAlias == null ? null : '"' + tableAlias + '"'; // double quote in case it's + // case sensitive + String schemaName = index.getParentSchemaName().getString(); + schemaName = schemaName.length() == 0 ? null : '"' + schemaName + '"'; + + String tableName = '"' + index.getTableName().getString() + '"'; + TableNode table = FACTORY.namedTable(alias, FACTORY.table(schemaName, tableName), + select.getTableSamplingRate()); + SelectStatement indexSelect = FACTORY.select(select, table); + ColumnResolver resolver = indexResolver != null + ? indexResolver + : FromCompiler.getResolverForQuery(indexSelect, statement.getConnection()); + return addPlan(statement, select, index, targetColumns, parallelIteratorFactory, dataPlan, + isHinted, indexSelect, resolver); + } + + private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index, + List targetColumns, ParallelIteratorFactory parallelIteratorFactory, + QueryPlan dataPlan, boolean isHinted, SelectStatement indexSelect, ColumnResolver resolver) + throws SQLException { + int nColumns = dataPlan.getProjector().getColumnCount(); + // We will or will not do tuple projection according to the data plan. + boolean isProjected = + dataPlan.getContext().getResolver().getTables().get(0).getTable().getType() + == PTableType.PROJECTED; + // Check index state of now potentially updated index table to make sure it's active + TableRef indexTableRef = resolver.getTables().get(0); + indexTableRef.setHinted(isHinted); + Map dataPlans = Collections.singletonMap(indexTableRef, dataPlan); + PTable indexTable = indexTableRef.getTable(); + PIndexState indexState = indexTable.getIndexState(); + boolean isServerMergeForUncoveredIndexEnabled = statement.getConnection().getQueryServices() + .getProps().getBoolean(QueryServices.SERVER_MERGE_FOR_UNCOVERED_INDEX, + QueryServicesOptions.DEFAULT_SERVER_MERGE_FOR_UNCOVERED_INDEX); + if ( + indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE + || (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold( + indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp())) + ) { + try { + if ( + !isServerMergeForUncoveredIndexEnabled + || select.getHint().hasHint(HintNode.Hint.NO_INDEX_SERVER_MERGE) + ) { + String schemaNameStr = + index.getSchemaName() == null ? null : index.getSchemaName().getString(); + String tableNameStr = + index.getTableName() == null ? null : index.getTableName().getString(); + throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*"); + } + // translate nodes that match expressions that are indexed to the + // associated column parse node + SelectStatement rewrittenIndexSelect = + ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(index, null, + statement.getConnection(), indexSelect.getUdfParseNodes())); + QueryCompiler compiler = new QueryCompiler(statement, rewrittenIndexSelect, resolver, + targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), + isProjected, true, dataPlans); + + QueryPlan plan = compiler.compile(); + if (indexTable.getIndexType() == IndexType.UNCOVERED_GLOBAL) { + // Indexed columns should also be added to the data columns to join for + // uncovered global indexes. This is required to verify index rows against + // data table rows + plan.getContext().setUncoveredIndex(true); + PhoenixConnection connection = statement.getConnection(); + IndexMaintainer maintainer; + PTable newIndexTable; + String dataTableName; + if ( + indexTable.getViewIndexId() != null && indexTable.getName().getString() + .contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR) + ) { + // MetaDataClient modifies the index table name for view indexes if the + // parent view of an index has a child view. We need to recreate a PTable + // object with the correct table name to get the index maintainer + int lastIndexOf = indexTable.getName().getString() + .lastIndexOf(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); + String indexName = indexTable.getName().getString().substring(lastIndexOf + 1); + newIndexTable = connection.getTable(indexName); + dataTableName = SchemaUtil.getTableName(newIndexTable.getParentSchemaName().getString(), + indexTable.getParentTableName().getString()); + } else { + newIndexTable = indexTable; + dataTableName = SchemaUtil.getTableName(indexTable.getParentSchemaName().getString(), + indexTable.getParentTableName().getString()); + } + PTable dataTableFromDataPlan = dataPlan.getTableRef().getTable(); + PTable cdcTable = null; + if (dataTableFromDataPlan.getType() == PTableType.CDC) { + cdcTable = dataTableFromDataPlan; + dataTableName = SchemaUtil.getTableName(indexTable.getParentSchemaName().getString(), + dataTableFromDataPlan.getParentTableName().getString()); + } + PTable dataTable = connection.getTable(dataTableName); + maintainer = newIndexTable.getIndexMaintainer(dataTable, cdcTable, connection); + Set> indexedColumns = + maintainer.getIndexedColumnInfo(); + for (org.apache.hadoop.hbase.util.Pair pair : indexedColumns) { + // The first member of the pair is the column family. For the data table PK columns, the + // column + // family is set to null. The data PK columns should not be added to the set of data + // columns + // to join back to index rows + if (pair.getFirst() != null) { + PColumn pColumn = dataTable.getColumnForColumnName(pair.getSecond()); + // The following adds the column to the set + plan.getContext().getDataColumnPosition(pColumn); } - } - return -1; - } - - private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index, - List targetColumns, - ParallelIteratorFactory parallelIteratorFactory, QueryPlan dataPlan, - boolean isHinted, ColumnResolver indexResolver) - throws SQLException { - String tableAlias = dataPlan.getTableRef().getTableAlias(); - String alias = tableAlias == null ? null - : '"' + tableAlias + '"'; // double quote in case it's case sensitive - String schemaName = index.getParentSchemaName().getString(); - schemaName = schemaName.length() == 0 ? null : '"' + schemaName + '"'; - - String tableName = '"' + index.getTableName().getString() + '"'; - TableNode table = FACTORY.namedTable(alias, FACTORY.table(schemaName, tableName), - select.getTableSamplingRate()); - SelectStatement indexSelect = FACTORY.select(select, table); - ColumnResolver resolver = indexResolver != null ? indexResolver - : FromCompiler.getResolverForQuery(indexSelect, statement.getConnection()); - return addPlan(statement, select, index, targetColumns, parallelIteratorFactory, dataPlan, - isHinted, indexSelect, resolver); - } - - private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index, - List targetColumns, - ParallelIteratorFactory parallelIteratorFactory, QueryPlan dataPlan, - boolean isHinted, SelectStatement indexSelect, - ColumnResolver resolver) throws SQLException { - int nColumns = dataPlan.getProjector().getColumnCount(); - // We will or will not do tuple projection according to the data plan. - boolean isProjected = dataPlan.getContext().getResolver().getTables().get(0).getTable().getType() == PTableType.PROJECTED; - // Check index state of now potentially updated index table to make sure it's active - TableRef indexTableRef = resolver.getTables().get(0); - indexTableRef.setHinted(isHinted); - Map dataPlans = Collections.singletonMap(indexTableRef, dataPlan); - PTable indexTable = indexTableRef.getTable(); - PIndexState indexState = indexTable.getIndexState(); - boolean isServerMergeForUncoveredIndexEnabled = statement.getConnection() - .getQueryServices().getProps().getBoolean( - QueryServices.SERVER_MERGE_FOR_UNCOVERED_INDEX, - QueryServicesOptions.DEFAULT_SERVER_MERGE_FOR_UNCOVERED_INDEX); - if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE - || (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp()))) { - try { - if (!isServerMergeForUncoveredIndexEnabled - || select.getHint().hasHint(HintNode.Hint.NO_INDEX_SERVER_MERGE)) { - String schemaNameStr = index.getSchemaName() == null ? null - : index.getSchemaName().getString(); - String tableNameStr = index.getTableName() == null ? null - : index.getTableName().getString(); - throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*"); - } - // translate nodes that match expressions that are indexed to the - // associated column parse node - SelectStatement rewrittenIndexSelect = ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(index, null, statement.getConnection(), indexSelect.getUdfParseNodes())); - QueryCompiler compiler = new QueryCompiler(statement, rewrittenIndexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected, true, dataPlans); - - QueryPlan plan = compiler.compile(); - if (indexTable.getIndexType() == IndexType.UNCOVERED_GLOBAL) { - // Indexed columns should also be added to the data columns to join for - // uncovered global indexes. This is required to verify index rows against - // data table rows - plan.getContext().setUncoveredIndex(true); - PhoenixConnection connection = statement.getConnection(); - IndexMaintainer maintainer; - PTable newIndexTable; - String dataTableName; - if (indexTable.getViewIndexId() != null - && indexTable.getName().getString().contains( - QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { - // MetaDataClient modifies the index table name for view indexes if the - // parent view of an index has a child view. We need to recreate a PTable - // object with the correct table name to get the index maintainer - int lastIndexOf = indexTable.getName().getString().lastIndexOf( - QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); - String indexName = indexTable.getName().getString().substring(lastIndexOf + 1); - newIndexTable = connection.getTable(indexName); - dataTableName = SchemaUtil.getTableName( - newIndexTable.getParentSchemaName().getString(), - indexTable.getParentTableName().getString()); - } else { - newIndexTable = indexTable; - dataTableName = SchemaUtil.getTableName( - indexTable.getParentSchemaName().getString(), - indexTable.getParentTableName().getString()); - } - PTable dataTableFromDataPlan = dataPlan.getTableRef().getTable(); - PTable cdcTable = null; - if (dataTableFromDataPlan.getType() == PTableType.CDC) { - cdcTable = dataTableFromDataPlan; - dataTableName = SchemaUtil.getTableName( - indexTable.getParentSchemaName().getString(), - dataTableFromDataPlan.getParentTableName().getString()); - } - PTable dataTable = connection.getTable(dataTableName); - maintainer = newIndexTable.getIndexMaintainer(dataTable, cdcTable, connection); - Set> indexedColumns = - maintainer.getIndexedColumnInfo(); - for (org.apache.hadoop.hbase.util.Pair pair : indexedColumns) { - // The first member of the pair is the column family. For the data table PK columns, the column - // family is set to null. The data PK columns should not be added to the set of data columns - // to join back to index rows - if (pair.getFirst() != null) { - PColumn pColumn = dataTable.getColumnForColumnName(pair.getSecond()); - // The following adds the column to the set - plan.getContext().getDataColumnPosition(pColumn); - } - } - if (dataTableFromDataPlan.getType() == PTableType.CDC) { - PColumn cdcJsonCol = dataTableFromDataPlan.getColumnForColumnName( - CDC_JSON_COL_NAME); - plan.getContext().getDataColumnPosition(cdcJsonCol); - } - } - indexTableRef = plan.getTableRef(); - indexTable = indexTableRef.getTable(); - indexState = indexTable.getIndexState(); - // Checking number of columns handles the wildcard cases correctly, as in that case the index - // must contain all columns from the data table to be able to be used. - if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE - || (indexState == PIndexState.PENDING_DISABLE - && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), - indexTable.getIndexDisableTimestamp()))) { - if (plan.getProjector().getColumnCount() == nColumns) { - return plan; - } else { - String schemaNameStr = index.getSchemaName() == null ? null - : index.getSchemaName().getString(); - String tableNameStr = index.getTableName() == null ? null - : index.getTableName().getString(); - throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*"); - } - } - } catch (ColumnNotFoundException e) { - /* Means that a column is being used that's not in our index. - * Since we currently don't keep stats, we don't know the selectivity of the index. - * For now, if this is a hinted plan, we will try rewriting the query as a subquery; - * otherwise we just don't use this index (as opposed to trying to join back from - * the index table to the data table. - */ - // Reset the state changes from the attempt above - indexTableRef.setHinted(false); - dataPlan.getContext().setUncoveredIndex(false); - - SelectStatement dataSelect = (SelectStatement)dataPlan.getStatement(); - ParseNode where = dataSelect.getWhere(); - if (isHinted && where != null) { - StatementContext context = new StatementContext(statement, resolver); - WhereConditionRewriter whereRewriter = new WhereConditionRewriter(FromCompiler.getResolver(dataPlan.getTableRef()), context); - where = where.accept(whereRewriter); - if (where != null) { - PTable dataTable = dataPlan.getTableRef().getTable(); - List pkColumns = dataTable.getPKColumns(); - List aliasedNodes = Lists.newArrayListWithExpectedSize(pkColumns.size()); - List nodes = Lists.newArrayListWithExpectedSize(pkColumns.size()); - boolean isSalted = dataTable.getBucketNum() != null; - boolean isTenantSpecific = dataTable.isMultiTenant() && statement.getConnection().getTenantId() != null; - int posOffset = (isSalted ? 1 : 0) + (isTenantSpecific ? 1 : 0); - for (int i = posOffset; i < pkColumns.size(); i++) { - PColumn column = pkColumns.get(i); - String indexColName = IndexUtil.getIndexColumnName(column); - ParseNode indexColNode = new ColumnParseNode(null, '"' + indexColName + '"', indexColName); - PDataType indexColType = IndexUtil.getIndexColumnDataType(column); - PDataType dataColType = column.getDataType(); - if (indexColType != dataColType) { - indexColNode = FACTORY.cast(indexColNode, dataColType, null, null); - } - aliasedNodes.add(FACTORY.aliasedNode(null, indexColNode)); - nodes.add(new ColumnParseNode(null, '"' + column.getName().getString() + '"')); - } - SelectStatement innerSelect = FACTORY.select(indexSelect.getFrom(), indexSelect.getHint(), false, aliasedNodes, where, null, null, null, null, null, indexSelect.getBindCount(), false, indexSelect.hasSequence(), Collections.emptyList(), indexSelect.getUdfParseNodes()); - ParseNode outerWhere = FACTORY.in(nodes.size() == 1 ? nodes.get(0) : FACTORY.rowValueConstructor(nodes), FACTORY.subquery(innerSelect, false), false, true); - ParseNode extractedCondition = whereRewriter.getExtractedCondition(); - if (extractedCondition != null) { - outerWhere = FACTORY.and(Lists.newArrayList(outerWhere, extractedCondition)); - } - HintNode hint = HintNode.combine(HintNode.subtract(indexSelect.getHint(), new Hint[] {Hint.INDEX, Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION}), FACTORY.hint("NO_INDEX")); - SelectStatement query = FACTORY.select(dataSelect, hint, outerWhere); - RewriteResult rewriteResult = - ParseNodeUtil.rewrite(query, statement.getConnection()); - QueryPlan plan = new QueryCompiler( - statement, - rewriteResult.getRewrittenSelectStatement(), - rewriteResult.getColumnResolver(), - targetColumns, - parallelIteratorFactory, - dataPlan.getContext().getSequenceManager(), - isProjected, - true, - dataPlans).compile(); - return plan; - } - } + } + if (dataTableFromDataPlan.getType() == PTableType.CDC) { + PColumn cdcJsonCol = dataTableFromDataPlan.getColumnForColumnName(CDC_JSON_COL_NAME); + plan.getContext().getDataColumnPosition(cdcJsonCol); + } + } + indexTableRef = plan.getTableRef(); + indexTable = indexTableRef.getTable(); + indexState = indexTable.getIndexState(); + // Checking number of columns handles the wildcard cases correctly, as in that case the + // index + // must contain all columns from the data table to be able to be used. + if ( + indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE + || (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold( + indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp())) + ) { + if (plan.getProjector().getColumnCount() == nColumns) { + return plan; + } else { + String schemaNameStr = + index.getSchemaName() == null ? null : index.getSchemaName().getString(); + String tableNameStr = + index.getTableName() == null ? null : index.getTableName().getString(); + throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*"); + } + } + } catch (ColumnNotFoundException e) { + /* + * Means that a column is being used that's not in our index. Since we currently don't keep + * stats, we don't know the selectivity of the index. For now, if this is a hinted plan, we + * will try rewriting the query as a subquery; otherwise we just don't use this index (as + * opposed to trying to join back from the index table to the data table. + */ + // Reset the state changes from the attempt above + indexTableRef.setHinted(false); + dataPlan.getContext().setUncoveredIndex(false); + + SelectStatement dataSelect = (SelectStatement) dataPlan.getStatement(); + ParseNode where = dataSelect.getWhere(); + if (isHinted && where != null) { + StatementContext context = new StatementContext(statement, resolver); + WhereConditionRewriter whereRewriter = + new WhereConditionRewriter(FromCompiler.getResolver(dataPlan.getTableRef()), context); + where = where.accept(whereRewriter); + if (where != null) { + PTable dataTable = dataPlan.getTableRef().getTable(); + List pkColumns = dataTable.getPKColumns(); + List aliasedNodes = + Lists. newArrayListWithExpectedSize(pkColumns.size()); + List nodes = + Lists. newArrayListWithExpectedSize(pkColumns.size()); + boolean isSalted = dataTable.getBucketNum() != null; + boolean isTenantSpecific = + dataTable.isMultiTenant() && statement.getConnection().getTenantId() != null; + int posOffset = (isSalted ? 1 : 0) + (isTenantSpecific ? 1 : 0); + for (int i = posOffset; i < pkColumns.size(); i++) { + PColumn column = pkColumns.get(i); + String indexColName = IndexUtil.getIndexColumnName(column); + ParseNode indexColNode = + new ColumnParseNode(null, '"' + indexColName + '"', indexColName); + PDataType indexColType = IndexUtil.getIndexColumnDataType(column); + PDataType dataColType = column.getDataType(); + if (indexColType != dataColType) { + indexColNode = FACTORY.cast(indexColNode, dataColType, null, null); + } + aliasedNodes.add(FACTORY.aliasedNode(null, indexColNode)); + nodes.add(new ColumnParseNode(null, '"' + column.getName().getString() + '"')); } - catch (RowValueConstructorOffsetNotCoercibleException e) { - // Could not coerce the user provided RVC Offset so we do not have a plan to add. - return null; + SelectStatement innerSelect = FACTORY.select(indexSelect.getFrom(), + indexSelect.getHint(), false, aliasedNodes, where, null, null, null, null, null, + indexSelect.getBindCount(), false, indexSelect.hasSequence(), + Collections. emptyList(), indexSelect.getUdfParseNodes()); + ParseNode outerWhere = + FACTORY.in(nodes.size() == 1 ? nodes.get(0) : FACTORY.rowValueConstructor(nodes), + FACTORY.subquery(innerSelect, false), false, true); + ParseNode extractedCondition = whereRewriter.getExtractedCondition(); + if (extractedCondition != null) { + outerWhere = FACTORY.and(Lists.newArrayList(outerWhere, extractedCondition)); } - } + HintNode hint = HintNode.combine( + HintNode.subtract(indexSelect.getHint(), + new Hint[] { Hint.INDEX, Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION }), + FACTORY.hint("NO_INDEX")); + SelectStatement query = FACTORY.select(dataSelect, hint, outerWhere); + RewriteResult rewriteResult = ParseNodeUtil.rewrite(query, statement.getConnection()); + QueryPlan plan = + new QueryCompiler(statement, rewriteResult.getRewrittenSelectStatement(), + rewriteResult.getColumnResolver(), targetColumns, parallelIteratorFactory, + dataPlan.getContext().getSequenceManager(), isProjected, true, dataPlans).compile(); + return plan; + } + } + } catch (RowValueConstructorOffsetNotCoercibleException e) { + // Could not coerce the user provided RVC Offset so we do not have a plan to add. return null; + } + } + return null; + } + + // returns true if we can still use the index + // retuns false if we've been in PENDING_DISABLE too long - index should be considered disabled + private boolean isUnderPendingDisableThreshold(long currentTimestamp, + long indexDisableTimestamp) { + return currentTimestamp - indexDisableTimestamp <= indexPendingDisabledThreshold; + } + + /** + * Order the plans among all the possible ones from best to worst. If option + * COST_BASED_OPTIMIZER_ENABLED is on and stats are available, we order the plans based on their + * costs, otherwise we use the following simple algorithm: 1) If the query is a point lookup (i.e. + * we have a set of exact row keys), choose that one immediately. 2) If the query has an ORDER BY + * and a LIMIT, choose the plan that has all the ORDER BY expression in the same order as the row + * key columns. 3) If there are more than one plan that meets (1&2), choose the plan with: a) the + * most row key columns that may be used to form the start/stop scan key (i.e. bound slots). b) + * the plan that preserves ordering for a group by. c) the non local index table plan + * @param plans the list of candidate plans + * @return list of plans ordered from best to worst. + */ + private List orderPlansBestToWorst(SelectStatement select, List plans, + boolean stopAtBestPlan) { + final QueryPlan dataPlan = plans.get(0); + if (plans.size() == 1) { + return plans; } - // returns true if we can still use the index - // retuns false if we've been in PENDING_DISABLE too long - index should be considered disabled - private boolean isUnderPendingDisableThreshold(long currentTimestamp, long indexDisableTimestamp) { - return currentTimestamp - indexDisableTimestamp <= indexPendingDisabledThreshold; + if (this.costBased) { + Collections.sort(plans, new Comparator() { + @Override + public int compare(QueryPlan plan1, QueryPlan plan2) { + return plan1.getCost().compareTo(plan2.getCost()); + } + }); + // Return ordered list based on cost if stats are available; otherwise fall + // back to static ordering. + if (!plans.get(0).getCost().isUnknown()) { + return stopAtBestPlan ? plans.subList(0, 1) : plans; + } } /** - * Order the plans among all the possible ones from best to worst. - * If option COST_BASED_OPTIMIZER_ENABLED is on and stats are available, we order the plans based on - * their costs, otherwise we use the following simple algorithm: - * 1) If the query is a point lookup (i.e. we have a set of exact row keys), choose that one immediately. - * 2) If the query has an ORDER BY and a LIMIT, choose the plan that has all the ORDER BY expression - * in the same order as the row key columns. - * 3) If there are more than one plan that meets (1&2), choose the plan with: - * a) the most row key columns that may be used to form the start/stop scan key (i.e. bound slots). - * b) the plan that preserves ordering for a group by. - * c) the non local index table plan - * @param plans the list of candidate plans - * @return list of plans ordered from best to worst. + * If we have a plan(s) that are just point lookups (i.e. fully qualified row keys), then favor + * those first. */ - private List orderPlansBestToWorst(SelectStatement select, List plans, boolean stopAtBestPlan) { - final QueryPlan dataPlan = plans.get(0); - if (plans.size() == 1) { - return plans; - } + List candidates = Lists.newArrayListWithExpectedSize(plans.size()); + if (stopAtBestPlan) { // If we're stopping at the best plan, only consider point lookups if + // there are any + for (QueryPlan plan : plans) { + if (plan.getContext().getScanRanges().isPointLookup()) { + candidates.add(plan); + } + } + } else { + candidates.addAll(plans); + } + /** + * If we have a plan(s) that removes the order by, choose from among these, as this is typically + * the most expensive operation. Once we have stats, if there's a limit on the query, we might + * choose a different plan. For example if the limit was a very large number and the combination + * of applying other filters on the row key are estimated to choose fewer rows, we'd choose that + * one. + */ + List stillCandidates = plans; + List bestCandidates = candidates; + if (!candidates.isEmpty()) { + stillCandidates = candidates; + bestCandidates = Lists. newArrayListWithExpectedSize(candidates.size()); + } + for (QueryPlan plan : stillCandidates) { + // If ORDER BY optimized out (or not present at all) + if (plan.getOrderBy().getOrderByExpressions().isEmpty()) { + bestCandidates.add(plan); + } + } + if (bestCandidates.isEmpty()) { + bestCandidates.addAll(stillCandidates); + } - if (this.costBased) { - Collections.sort(plans, new Comparator() { - @Override - public int compare(QueryPlan plan1, QueryPlan plan2) { - return plan1.getCost().compareTo(plan2.getCost()); - } - }); - // Return ordered list based on cost if stats are available; otherwise fall - // back to static ordering. - if (!plans.get(0).getCost().isUnknown()) { - return stopAtBestPlan ? plans.subList(0, 1) : plans; - } + int nViewConstants = 0; + PTable dataTable = dataPlan.getTableRef().getTable(); + if (dataTable.getType() == PTableType.VIEW) { + for (PColumn column : dataTable.getColumns()) { + if (column.getViewConstant() != null) { + nViewConstants++; } + } + } + final int boundRanges = nViewConstants; + final boolean useDataOverIndexHint = select.getHint().hasHint(Hint.USE_DATA_OVER_INDEX_TABLE); + final int comparisonOfDataVersusIndexTable = useDataOverIndexHint ? -1 : 1; + Collections.sort(bestCandidates, new Comparator() { + + @Override + public int compare(QueryPlan plan1, QueryPlan plan2) { + PTable table1 = plan1.getTableRef().getTable(); + PTable table2 = plan2.getTableRef().getTable(); + int boundCount1 = plan1.getContext().getScanRanges().getBoundPkColumnCount(); + int boundCount2 = plan2.getContext().getScanRanges().getBoundPkColumnCount(); + // For shared indexes (i.e. indexes on views and local indexes), + // a) add back any view constants as these won't be in the index, and + // b) ignore the viewIndexId which will be part of the row key columns. + boundCount1 += table1.getViewIndexId() == null ? 0 : (boundRanges - 1); + boundCount2 += table2.getViewIndexId() == null ? 0 : (boundRanges - 1); + // Adjust for salting. Salting adds a bound range for each salt bucket. + // (but the sum of buckets cover the entire table) + boundCount1 -= plan1.getContext().getScanRanges().isSalted() ? 1 : 0; + boundCount2 -= plan2.getContext().getScanRanges().isSalted() ? 1 : 0; + int c = boundCount2 - boundCount1; + if (c != 0) return c; + if (plan1.getGroupBy() != null && plan2.getGroupBy() != null) { + if (plan1.getGroupBy().isOrderPreserving() != plan2.getGroupBy().isOrderPreserving()) { + return plan1.getGroupBy().isOrderPreserving() ? -1 : 1; + } + } + + // Partial secondary index is preferred + if (table1.getIndexWhere() != null && table2.getIndexWhere() == null) { + return -1; + } + if (table1.getIndexWhere() == null && table2.getIndexWhere() != null) { + return 1; + } + // Use the plan that has fewer "dataColumns" (columns that need to be merged in) + c = plan1.getContext().getDataColumns().size() - plan2.getContext().getDataColumns().size(); + if (c != 0) return c; + + // Use smaller table (table with fewest kv columns) + if ( + !useDataOverIndexHint + || (table1.getType() == PTableType.INDEX && table2.getType() == PTableType.INDEX) + ) { + c = (table1.getColumns().size() - table1.getPKColumns().size()) + - (table2.getColumns().size() - table2.getPKColumns().size()); + if (c != 0) return c; + } + + // If all things are equal, don't choose local index as it forces scan + // on every region (unless there's no start/stop key) + + if (table1.getIndexType() == IndexType.LOCAL && table2.getIndexType() != IndexType.LOCAL) { + return plan1.getContext().getScanRanges().getRanges().isEmpty() ? -1 : 1; + } + if (table2.getIndexType() == IndexType.LOCAL && table1.getIndexType() != IndexType.LOCAL) { + return plan2.getContext().getScanRanges().getRanges().isEmpty() ? 1 : -1; + } + + // All things being equal, just use the table based on the Hint.USE_DATA_OVER_INDEX_TABLE + + if (table1.getType() == PTableType.INDEX && table2.getType() != PTableType.INDEX) { + return -comparisonOfDataVersusIndexTable; + } + if (table2.getType() == PTableType.INDEX && table1.getType() != PTableType.INDEX) { + return comparisonOfDataVersusIndexTable; + } + return 0; + } + + }); + + return stopAtBestPlan ? bestCandidates.subList(0, 1) : bestCandidates; + } + + private static class WhereConditionRewriter extends AndRewriterBooleanParseNodeVisitor { + private final ColumnResolver dataResolver; + private final ExpressionCompiler expressionCompiler; + private List extractedConditions; + + public WhereConditionRewriter(ColumnResolver dataResolver, StatementContext context) + throws SQLException { + super(FACTORY); + this.dataResolver = dataResolver; + this.expressionCompiler = new ExpressionCompiler(context); + this.extractedConditions = Lists. newArrayList(); + } - /** - * If we have a plan(s) that are just point lookups (i.e. fully qualified row - * keys), then favor those first. - */ - List candidates = Lists.newArrayListWithExpectedSize(plans.size()); - if (stopAtBestPlan) { // If we're stopping at the best plan, only consider point lookups if there are any - for (QueryPlan plan : plans) { - if (plan.getContext().getScanRanges().isPointLookup()) { - candidates.add(plan); - } - } - } else { - candidates.addAll(plans); - } - /** - * If we have a plan(s) that removes the order by, choose from among these, - * as this is typically the most expensive operation. Once we have stats, if - * there's a limit on the query, we might choose a different plan. For example - * if the limit was a very large number and the combination of applying other - * filters on the row key are estimated to choose fewer rows, we'd choose that - * one. - */ - List stillCandidates = plans; - List bestCandidates = candidates; - if (!candidates.isEmpty()) { - stillCandidates = candidates; - bestCandidates = Lists.newArrayListWithExpectedSize(candidates.size()); - } - for (QueryPlan plan : stillCandidates) { - // If ORDER BY optimized out (or not present at all) - if (plan.getOrderBy().getOrderByExpressions().isEmpty()) { - bestCandidates.add(plan); - } - } - if (bestCandidates.isEmpty()) { - bestCandidates.addAll(stillCandidates); - } - - int nViewConstants = 0; - PTable dataTable = dataPlan.getTableRef().getTable(); - if (dataTable.getType() == PTableType.VIEW) { - for (PColumn column : dataTable.getColumns()) { - if (column.getViewConstant() != null) { - nViewConstants++; - } - } - } - final int boundRanges = nViewConstants; - final boolean useDataOverIndexHint = select.getHint().hasHint(Hint.USE_DATA_OVER_INDEX_TABLE); - final int comparisonOfDataVersusIndexTable = useDataOverIndexHint ? -1 : 1; - Collections.sort(bestCandidates, new Comparator() { - - @Override - public int compare(QueryPlan plan1, QueryPlan plan2) { - PTable table1 = plan1.getTableRef().getTable(); - PTable table2 = plan2.getTableRef().getTable(); - int boundCount1 = plan1.getContext().getScanRanges().getBoundPkColumnCount(); - int boundCount2 = plan2.getContext().getScanRanges().getBoundPkColumnCount(); - // For shared indexes (i.e. indexes on views and local indexes), - // a) add back any view constants as these won't be in the index, and - // b) ignore the viewIndexId which will be part of the row key columns. - boundCount1 += table1.getViewIndexId() == null ? 0 : (boundRanges - 1); - boundCount2 += table2.getViewIndexId() == null ? 0 : (boundRanges - 1); - // Adjust for salting. Salting adds a bound range for each salt bucket. - // (but the sum of buckets cover the entire table) - boundCount1 -= plan1.getContext().getScanRanges().isSalted() ? 1 : 0; - boundCount2 -= plan2.getContext().getScanRanges().isSalted() ? 1 : 0; - int c = boundCount2 - boundCount1; - if (c != 0) return c; - if (plan1.getGroupBy() != null && plan2.getGroupBy() != null) { - if (plan1.getGroupBy().isOrderPreserving() != plan2.getGroupBy().isOrderPreserving()) { - return plan1.getGroupBy().isOrderPreserving() ? -1 : 1; - } - } - - // Partial secondary index is preferred - if (table1.getIndexWhere() != null && table2.getIndexWhere() == null) { - return -1; - } - if (table1.getIndexWhere() == null && table2.getIndexWhere() != null) { - return 1; - } - // Use the plan that has fewer "dataColumns" (columns that need to be merged in) - c = plan1.getContext().getDataColumns().size() - plan2.getContext().getDataColumns().size(); - if (c != 0) return c; - - // Use smaller table (table with fewest kv columns) - if (!useDataOverIndexHint || (table1.getType() == PTableType.INDEX && table2.getType() == PTableType.INDEX)) { - c = (table1.getColumns().size() - table1.getPKColumns().size()) - (table2.getColumns().size() - table2.getPKColumns().size()); - if (c != 0) return c; - } - - // If all things are equal, don't choose local index as it forces scan - // on every region (unless there's no start/stop key) - - if (table1.getIndexType() == IndexType.LOCAL && table2.getIndexType() != - IndexType.LOCAL) { - return plan1.getContext().getScanRanges().getRanges().isEmpty() ? -1 : 1; - } - if (table2.getIndexType() == IndexType.LOCAL && table1.getIndexType() != - IndexType.LOCAL) { - return plan2.getContext().getScanRanges().getRanges().isEmpty() ? 1 : -1; - } - - // All things being equal, just use the table based on the Hint.USE_DATA_OVER_INDEX_TABLE - - if (table1.getType() == PTableType.INDEX && table2.getType() != PTableType.INDEX) { - return -comparisonOfDataVersusIndexTable; - } - if (table2.getType() == PTableType.INDEX && table1.getType() != PTableType.INDEX) { - return comparisonOfDataVersusIndexTable; - } - return 0; - } - - }); - - return stopAtBestPlan ? bestCandidates.subList(0, 1) : bestCandidates; - } - - - private static class WhereConditionRewriter extends AndRewriterBooleanParseNodeVisitor { - private final ColumnResolver dataResolver; - private final ExpressionCompiler expressionCompiler; - private List extractedConditions; - - public WhereConditionRewriter(ColumnResolver dataResolver, StatementContext context) throws SQLException { - super(FACTORY); - this.dataResolver = dataResolver; - this.expressionCompiler = new ExpressionCompiler(context); - this.extractedConditions = Lists. newArrayList(); - } - - public ParseNode getExtractedCondition() { - if (this.extractedConditions.isEmpty()) - return null; - - if (this.extractedConditions.size() == 1) - return this.extractedConditions.get(0); - - return FACTORY.and(this.extractedConditions); - } + public ParseNode getExtractedCondition() { + if (this.extractedConditions.isEmpty()) return null; - @Override - protected ParseNode leaveBooleanNode(ParseNode node, List l) - throws SQLException { - ParseNode translatedNode = IndexStatementRewriter.translate(node, dataResolver); - expressionCompiler.reset(); - try { - translatedNode.accept(expressionCompiler); - } catch (ColumnNotFoundException e) { - extractedConditions.add(node); - return null; - } - - return translatedNode; - } + if (this.extractedConditions.size() == 1) return this.extractedConditions.get(0); + + return FACTORY.and(this.extractedConditions); } - private static SelectStatement rewriteQueryWithIndexReplacement( - final PhoenixConnection connection, final ColumnResolver resolver, - final SelectStatement select, final Map replacement) throws SQLException { - TableNode from = select.getFrom(); - TableNode newFrom = from.accept(new QueryOptimizerTableNode(resolver, replacement)); - if (from == newFrom) { - return select; - } + @Override + protected ParseNode leaveBooleanNode(ParseNode node, List l) throws SQLException { + ParseNode translatedNode = IndexStatementRewriter.translate(node, dataResolver); + expressionCompiler.reset(); + try { + translatedNode.accept(expressionCompiler); + } catch (ColumnNotFoundException e) { + extractedConditions.add(node); + return null; + } - SelectStatement indexSelect = IndexStatementRewriter.translate(FACTORY.select(select, newFrom), resolver, replacement); - for (TableRef indexTableRef : replacement.values()) { - // replace expressions with corresponding matching columns for functional indexes - indexSelect = ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(indexTableRef.getTable(), indexTableRef.getTableAlias(), connection, indexSelect.getUdfParseNodes())); - } + return translatedNode; + } + } + + private static SelectStatement rewriteQueryWithIndexReplacement( + final PhoenixConnection connection, final ColumnResolver resolver, final SelectStatement select, + final Map replacement) throws SQLException { + TableNode from = select.getFrom(); + TableNode newFrom = from.accept(new QueryOptimizerTableNode(resolver, replacement)); + if (from == newFrom) { + return select; + } - return indexSelect; + SelectStatement indexSelect = + IndexStatementRewriter.translate(FACTORY.select(select, newFrom), resolver, replacement); + for (TableRef indexTableRef : replacement.values()) { + // replace expressions with corresponding matching columns for functional indexes + indexSelect = ParseNodeRewriter.rewrite(indexSelect, + new IndexExpressionParseNodeRewriter(indexTableRef.getTable(), + indexTableRef.getTableAlias(), connection, indexSelect.getUdfParseNodes())); } - private static class QueryOptimizerTableNode implements TableNodeVisitor { - private final ColumnResolver resolver; - private final Map replacement; - QueryOptimizerTableNode (ColumnResolver resolver, final Map replacement){ - this.resolver = resolver; - this.replacement = replacement; - } + return indexSelect; + } - private TableRef resolveTable(String alias, TableName name) throws SQLException { - if (alias != null) - return resolver.resolveTable(null, alias); + private static class QueryOptimizerTableNode implements TableNodeVisitor { + private final ColumnResolver resolver; + private final Map replacement; - return resolver.resolveTable(name.getSchemaName(), name.getTableName()); - } + QueryOptimizerTableNode(ColumnResolver resolver, final Map replacement) { + this.resolver = resolver; + this.replacement = replacement; + } - private TableName getReplacedTableName(TableRef tableRef) { - String schemaName = tableRef.getTable().getSchemaName().getString(); - return TableName.create(schemaName.length() == 0 ? null : schemaName, tableRef.getTable().getTableName().getString()); - } + private TableRef resolveTable(String alias, TableName name) throws SQLException { + if (alias != null) return resolver.resolveTable(null, alias); - @Override - public TableNode visit(BindTableNode boundTableNode) throws SQLException { - TableRef tableRef = resolveTable(boundTableNode.getAlias(), boundTableNode.getName()); - TableRef replaceRef = replacement.get(tableRef); - if (replaceRef == null) - return boundTableNode; - - String alias = boundTableNode.getAlias(); - return FACTORY.bindTable(alias == null ? null : '"' + alias + '"', getReplacedTableName(replaceRef)); - } + return resolver.resolveTable(name.getSchemaName(), name.getTableName()); + } - @Override - public TableNode visit(JoinTableNode joinNode) throws SQLException { - TableNode lhs = joinNode.getLHS(); - TableNode rhs = joinNode.getRHS(); - TableNode lhsReplace = lhs.accept(this); - TableNode rhsReplace = rhs.accept(this); - if (lhs == lhsReplace && rhs == rhsReplace) - return joinNode; - - return FACTORY.join(joinNode.getType(), lhsReplace, rhsReplace, joinNode.getOnNode(), joinNode.isSingleValueOnly()); - } + private TableName getReplacedTableName(TableRef tableRef) { + String schemaName = tableRef.getTable().getSchemaName().getString(); + return TableName.create(schemaName.length() == 0 ? null : schemaName, + tableRef.getTable().getTableName().getString()); + } - @Override - public TableNode visit(NamedTableNode namedTableNode) - throws SQLException { - TableRef tableRef = resolveTable(namedTableNode.getAlias(), namedTableNode.getName()); - TableRef replaceRef = replacement.get(tableRef); - if (replaceRef == null) - return namedTableNode; - - String alias = namedTableNode.getAlias(); - return FACTORY.namedTable(alias == null ? null : '"' + alias + '"', getReplacedTableName(replaceRef), namedTableNode.getDynamicColumns(), namedTableNode.getTableSamplingRate()); - } + @Override + public TableNode visit(BindTableNode boundTableNode) throws SQLException { + TableRef tableRef = resolveTable(boundTableNode.getAlias(), boundTableNode.getName()); + TableRef replaceRef = replacement.get(tableRef); + if (replaceRef == null) return boundTableNode; - @Override - public TableNode visit(DerivedTableNode subselectNode) - throws SQLException { - return subselectNode; - } + String alias = boundTableNode.getAlias(); + return FACTORY.bindTable(alias == null ? null : '"' + alias + '"', + getReplacedTableName(replaceRef)); + } + + @Override + public TableNode visit(JoinTableNode joinNode) throws SQLException { + TableNode lhs = joinNode.getLHS(); + TableNode rhs = joinNode.getRHS(); + TableNode lhsReplace = lhs.accept(this); + TableNode rhsReplace = rhs.accept(this); + if (lhs == lhsReplace && rhs == rhsReplace) return joinNode; + + return FACTORY.join(joinNode.getType(), lhsReplace, rhsReplace, joinNode.getOnNode(), + joinNode.isSingleValueOnly()); + } + + @Override + public TableNode visit(NamedTableNode namedTableNode) throws SQLException { + TableRef tableRef = resolveTable(namedTableNode.getAlias(), namedTableNode.getName()); + TableRef replaceRef = replacement.get(tableRef); + if (replaceRef == null) return namedTableNode; + + String alias = namedTableNode.getAlias(); + return FACTORY.namedTable(alias == null ? null : '"' + alias + '"', + getReplacedTableName(replaceRef), namedTableNode.getDynamicColumns(), + namedTableNode.getTableSamplingRate()); + } + + @Override + public TableNode visit(DerivedTableNode subselectNode) throws SQLException { + return subselectNode; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddColumnStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddColumnStatement.java index 32b4ebe0d3f..71ea790f14a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddColumnStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddColumnStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,39 +21,44 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.schema.PTableType; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; public class AddColumnStatement extends AlterTableStatement { - private final List columnDefs; - private final boolean ifNotExists; - private final ListMultimap> props; - private final boolean cascade; - private final List indexes; - - protected AddColumnStatement(NamedTableNode table, PTableType tableType, List columnDefs, boolean ifNotExists, ListMultimap> props, boolean cascade, List indexes) { - super(table, tableType); - this.columnDefs = columnDefs; - this.props = props == null ? ImmutableListMultimap.>of() : props; - this.ifNotExists = ifNotExists; - this.cascade = cascade; - this.indexes = indexes; - } - - public List getColumnDefs() { - return columnDefs; - } - - public boolean ifNotExists() { - return ifNotExists; - } - - public ListMultimap> getProps() { - return props; - } - - public boolean isCascade() { return cascade; } - - public List getIndexes() { return indexes; } -} \ No newline at end of file + private final List columnDefs; + private final boolean ifNotExists; + private final ListMultimap> props; + private final boolean cascade; + private final List indexes; + + protected AddColumnStatement(NamedTableNode table, PTableType tableType, + List columnDefs, boolean ifNotExists, + ListMultimap> props, boolean cascade, List indexes) { + super(table, tableType); + this.columnDefs = columnDefs; + this.props = props == null ? ImmutableListMultimap.> of() : props; + this.ifNotExists = ifNotExists; + this.cascade = cascade; + this.indexes = indexes; + } + + public List getColumnDefs() { + return columnDefs; + } + + public boolean ifNotExists() { + return ifNotExists; + } + + public ListMultimap> getProps() { + return props; + } + + public boolean isCascade() { + return cascade; + } + + public List getIndexes() { + return indexes; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddJarsStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddJarsStatement.java index b1eeea6e54c..77b5ba0f61b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddJarsStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddJarsStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,19 +20,19 @@ import java.util.List; public class AddJarsStatement extends MutableStatement { - - List jarPaths; - public AddJarsStatement(List jarPaths) { - this.jarPaths = jarPaths; - } + List jarPaths; - @Override - public int getBindCount() { - return 0; - } + public AddJarsStatement(List jarPaths) { + this.jarPaths = jarPaths; + } - public List getJarPaths() { - return jarPaths; - } + @Override + public int getBindCount() { + return 0; + } + + public List getJarPaths() { + return jarPaths; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddParseNode.java index fa04a55b4c8..c496a88df70 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AddParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,33 +21,28 @@ import java.util.Collections; import java.util.List; - - /** - * * Node representing addition in a SQL expression - * - * * @since 0.1 */ public class AddParseNode extends ArithmeticParseNode { - public static final String OPERATOR = "+"; + public static final String OPERATOR = "+"; - @Override - public String getOperator() { - return OPERATOR; - } - - AddParseNode(List children) { - super(children); - } + @Override + public String getOperator() { + return OPERATOR; + } + + AddParseNode(List children) { + super(children); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AggregateFunctionParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AggregateFunctionParseNode.java index 586a1a0bb61..853f809f300 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AggregateFunctionParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AggregateFunctionParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,19 +19,19 @@ import java.util.List; - public class AggregateFunctionParseNode extends FunctionParseNode { - public AggregateFunctionParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - - /** - * Aggregate function are not stateless, even though all the args may be stateless, - * for example, COUNT(1) - */ - @Override - public boolean isStateless() { - return false; - } + public AggregateFunctionParseNode(String name, List children, + BuiltInFunctionInfo info) { + super(name, children, info); + } + + /** + * Aggregate function are not stateless, even though all the args may be stateless, for example, + * COUNT(1) + */ + @Override + public boolean isStateless() { + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AggregateFunctionWithinGroupParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AggregateFunctionWithinGroupParseNode.java index 5c329085dc9..be7b6a6be80 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AggregateFunctionWithinGroupParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AggregateFunctionWithinGroupParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,30 +23,30 @@ public class AggregateFunctionWithinGroupParseNode extends AggregateFunctionParseNode { - public AggregateFunctionWithinGroupParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - + public AggregateFunctionWithinGroupParseNode(String name, List children, + BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(' '); - buf.append(getName()); - buf.append('('); - List children = getChildren(); - List args = children.subList(2, children.size()); - if (!args.isEmpty()) { - for (ParseNode child : args) { - child.toSQL(resolver, buf); - buf.append(','); - } - buf.setLength(buf.length()-1); - } - buf.append(')'); - - buf.append(" WITHIN GROUP (ORDER BY "); - children.get(0).toSQL(resolver, buf); - buf.append(" " + (LiteralParseNode.TRUE.equals(children.get(1)) ? "ASC" : "DESC")); - buf.append(')'); + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(' '); + buf.append(getName()); + buf.append('('); + List children = getChildren(); + List args = children.subList(2, children.size()); + if (!args.isEmpty()) { + for (ParseNode child : args) { + child.toSQL(resolver, buf); + buf.append(','); + } + buf.setLength(buf.length() - 1); } + buf.append(')'); + + buf.append(" WITHIN GROUP (ORDER BY "); + children.get(0).toSQL(resolver, buf); + buf.append(" " + (LiteralParseNode.TRUE.equals(children.get(1)) ? "ASC" : "DESC")); + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AliasedNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AliasedNode.java index 807a01f53d1..42ebc256d4c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AliasedNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AliasedNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,66 +21,63 @@ import org.apache.phoenix.util.SchemaUtil; /** - * * Node representing an aliased parse node in a SQL select clause - * - * * @since 0.1 */ public class AliasedNode { - private final String alias; - private final ParseNode node; - private final boolean isCaseSensitve; + private final String alias; + private final ParseNode node; + private final boolean isCaseSensitve; - public AliasedNode(String alias, ParseNode node) { - this.isCaseSensitve = alias != null && SchemaUtil.isCaseSensitive(alias); - this.alias = alias == null ? null : SchemaUtil.normalizeIdentifier(alias); - this.node = node; - } + public AliasedNode(String alias, ParseNode node) { + this.isCaseSensitve = alias != null && SchemaUtil.isCaseSensitive(alias); + this.alias = alias == null ? null : SchemaUtil.normalizeIdentifier(alias); + this.node = node; + } - public String getAlias() { - return alias; - } + public String getAlias() { + return alias; + } - public ParseNode getNode() { - return node; - } + public ParseNode getNode() { + return node; + } - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - node.toSQL(resolver, buf); - if (alias != null) { - buf.append(' '); - if (isCaseSensitve) buf.append('"'); - buf.append(alias); - if (isCaseSensitve) buf.append('"'); - } - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((alias == null) ? 0 : alias.hashCode()); - result = prime * result + ((node == null) ? 0 : node.hashCode()); - return result; + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + node.toSQL(resolver, buf); + if (alias != null) { + buf.append(' '); + if (isCaseSensitve) buf.append('"'); + buf.append(alias); + if (isCaseSensitve) buf.append('"'); } + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - AliasedNode other = (AliasedNode)obj; - if (alias == null) { - if (other.alias != null) return false; - } else if (!alias.equals(other.alias)) return false; - if (node == null) { - if (other.node != null) return false; - } else if (!node.equals(other.node)) return false; - return true; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((alias == null) ? 0 : alias.hashCode()); + result = prime * result + ((node == null) ? 0 : node.hashCode()); + return result; + } - public boolean isCaseSensitve() { - return isCaseSensitve; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + AliasedNode other = (AliasedNode) obj; + if (alias == null) { + if (other.alias != null) return false; + } else if (!alias.equals(other.alias)) return false; + if (node == null) { + if (other.node != null) return false; + } else if (!node.equals(other.node)) return false; + return true; + } + + public boolean isCaseSensitve() { + return isCaseSensitve; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterIndexStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterIndexStatement.java index 32a3c042c5a..4c9c79bdba7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterIndexStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterIndexStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,61 +17,68 @@ */ package org.apache.phoenix.parse; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.schema.PIndexState; import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; public class AlterIndexStatement extends SingleTableStatement { - private final String dataTableName; - private final boolean ifExists; - private final PIndexState indexState; - private boolean async; - private boolean isRebuildAll; - private ListMultimap> props; - private static final PTableType tableType=PTableType.INDEX; + private final String dataTableName; + private final boolean ifExists; + private final PIndexState indexState; + private boolean async; + private boolean isRebuildAll; + private ListMultimap> props; + private static final PTableType tableType = PTableType.INDEX; - public AlterIndexStatement(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, PIndexState indexState, boolean isRebuildAll, boolean async) { - this(indexTableNode,dataTableName,ifExists,indexState, isRebuildAll, async,null); - } + public AlterIndexStatement(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, + PIndexState indexState, boolean isRebuildAll, boolean async) { + this(indexTableNode, dataTableName, ifExists, indexState, isRebuildAll, async, null); + } - public AlterIndexStatement(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, PIndexState indexState, boolean isRebuildAll, boolean async, ListMultimap> props) { - super(indexTableNode,0); - this.dataTableName = dataTableName; - this.ifExists = ifExists; - this.indexState = indexState; - this.async = async; - this.isRebuildAll = isRebuildAll; - this.props= props==null ? ImmutableListMultimap.>of() : props; - } + public AlterIndexStatement(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, + PIndexState indexState, boolean isRebuildAll, boolean async, + ListMultimap> props) { + super(indexTableNode, 0); + this.dataTableName = dataTableName; + this.ifExists = ifExists; + this.indexState = indexState; + this.async = async; + this.isRebuildAll = isRebuildAll; + this.props = props == null ? ImmutableListMultimap.> of() : props; + } - public String getTableName() { - return dataTableName; - } + public String getTableName() { + return dataTableName; + } - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - public boolean ifExists() { - return ifExists; - } + public boolean ifExists() { + return ifExists; + } - public PIndexState getIndexState() { - return indexState; - } + public PIndexState getIndexState() { + return indexState; + } - public boolean isAsync() { - return async; - } + public boolean isAsync() { + return async; + } - public boolean isRebuildAll() { - return isRebuildAll; - } + public boolean isRebuildAll() { + return isRebuildAll; + } - public ListMultimap> getProps() { return props; } + public ListMultimap> getProps() { + return props; + } - public PTableType getTableType(){ return tableType; } + public PTableType getTableType() { + return tableType; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterSessionStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterSessionStatement.java index 5d944dfe8a5..c78c9575904 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterSessionStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterSessionStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,18 +21,18 @@ public class AlterSessionStatement extends MutableStatement { - private final Map props; + private final Map props; - public AlterSessionStatement(Map props) { - this.props = props; - } + public AlterSessionStatement(Map props) { + this.props = props; + } - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - public Map getProps(){ - return props; - } + public Map getProps() { + return props; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterTableStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterTableStatement.java index a3340119439..b3e08457757 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterTableStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AlterTableStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,14 +20,14 @@ import org.apache.phoenix.schema.PTableType; public abstract class AlterTableStatement extends SingleTableStatement { - private final PTableType tableType; + private final PTableType tableType; - AlterTableStatement(NamedTableNode table, PTableType tableType) { - super(table, 0); - this.tableType = tableType; - } + AlterTableStatement(NamedTableNode table, PTableType tableType) { + super(table, 0); + this.tableType = tableType; + } - public PTableType getTableType() { - return tableType; - } + public PTableType getTableType() { + return tableType; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndBooleanParseNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndBooleanParseNodeVisitor.java index 2b9b81603bc..80e89547b22 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndBooleanParseNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndBooleanParseNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,18 +24,18 @@ */ public abstract class AndBooleanParseNodeVisitor extends BooleanParseNodeVisitor { - @Override - protected boolean enterBooleanNode(ParseNode parseNode) throws SQLException { - return false; - } + @Override + protected boolean enterBooleanNode(ParseNode parseNode) throws SQLException { + return false; + } - @Override - protected boolean enterNonBooleanNode(ParseNode parseNode) throws SQLException { - return false; - } + @Override + protected boolean enterNonBooleanNode(ParseNode parseNode) throws SQLException { + return false; + } - @Override - public boolean visitEnter(AndParseNode andParseNode) throws SQLException { - return true; - } + @Override + public boolean visitEnter(AndParseNode andParseNode) throws SQLException { + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndParseNode.java index 3c333c478f8..9f20685ee89 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,41 +23,35 @@ import org.apache.phoenix.compile.ColumnResolver; - - - /** - * * Node representing AND in a SQL expression - * - * * @since 0.1 */ public class AndParseNode extends CompoundParseNode { - public static final String NAME = "AND"; + public static final String NAME = "AND"; - AndParseNode(List children) { - super(children); - } + AndParseNode(List children) { + super(children); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append('('); - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - for (int i = 1 ; i < children.size(); i++) { - buf.append(" " + NAME + " "); - children.get(i).toSQL(resolver, buf); - } - buf.append(')'); + return visitor.visitLeave(this, l); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append('('); + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + for (int i = 1; i < children.size(); i++) { + buf.append(" " + NAME + " "); + children.get(i).toSQL(resolver, buf); } + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndRewriterBooleanParseNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndRewriterBooleanParseNodeVisitor.java index 762181e0c24..d7b371e8cb5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndRewriterBooleanParseNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AndRewriterBooleanParseNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,50 +23,53 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * Base visitor for rewrite {@link ParseNode},only further visit down for {@link AndParseNode}. - * A example is org.apache.phoenix.optimize.QueryOptimizer.WhereConditionRewriter, which - * rewrites columns in dataTable to columns in indexTable, and removes parseNodes which have - * columns not in indexTable. + * Base visitor for rewrite {@link ParseNode},only further visit down for {@link AndParseNode}. A + * example is org.apache.phoenix.optimize.QueryOptimizer.WhereConditionRewriter, which rewrites + * columns in dataTable to columns in indexTable, and removes parseNodes which have columns not in + * indexTable. */ -public abstract class AndRewriterBooleanParseNodeVisitor extends AndBooleanParseNodeVisitor{ +public abstract class AndRewriterBooleanParseNodeVisitor + extends AndBooleanParseNodeVisitor { - private final ParseNodeFactory parseNodeFactory ; + private final ParseNodeFactory parseNodeFactory; - public AndRewriterBooleanParseNodeVisitor(ParseNodeFactory parseNodeFactory) { - this.parseNodeFactory = parseNodeFactory; - } - - @Override - public List newElementList(int size) { - return Lists. newArrayListWithExpectedSize(size); - } + public AndRewriterBooleanParseNodeVisitor(ParseNodeFactory parseNodeFactory) { + this.parseNodeFactory = parseNodeFactory; + } - @Override - public void addElement(List childParseNodeResults, ParseNode newChildParseNode) { - if (newChildParseNode != null) { - childParseNodeResults.add(newChildParseNode); - } - } + @Override + public List newElementList(int size) { + return Lists. newArrayListWithExpectedSize(size); + } - @Override - protected ParseNode leaveNonBooleanNode(ParseNode parentParseNode, List childParseNodes) throws SQLException { - return parentParseNode; + @Override + public void addElement(List childParseNodeResults, ParseNode newChildParseNode) { + if (newChildParseNode != null) { + childParseNodeResults.add(newChildParseNode); } + } - @Override - public ParseNode visitLeave(AndParseNode andParseNode, List childParseNodes) throws SQLException { - if (childParseNodes.equals(andParseNode.getChildren())) { - return andParseNode; - } + @Override + protected ParseNode leaveNonBooleanNode(ParseNode parentParseNode, + List childParseNodes) throws SQLException { + return parentParseNode; + } - if (childParseNodes.isEmpty()) { - return null; - } + @Override + public ParseNode visitLeave(AndParseNode andParseNode, List childParseNodes) + throws SQLException { + if (childParseNodes.equals(andParseNode.getChildren())) { + return andParseNode; + } - if (childParseNodes.size() == 1) { - return childParseNodes.get(0); - } + if (childParseNodes.isEmpty()) { + return null; + } - return this.parseNodeFactory.and(childParseNodes); + if (childParseNodes.size() == 1) { + return childParseNodes.get(0); } + + return this.parseNodeFactory.and(childParseNodes); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArithmeticParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArithmeticParseNode.java index 1a2f1708587..4bb33548c0c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArithmeticParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArithmeticParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,21 +23,21 @@ public abstract class ArithmeticParseNode extends CompoundParseNode { - public ArithmeticParseNode(List children) { - super(children); - } + public ArithmeticParseNode(List children) { + super(children); + } + + public abstract String getOperator(); - public abstract String getOperator(); - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append('('); - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - for (int i = 1 ; i < children.size(); i++) { - buf.append(" " + getOperator() + " "); - children.get(i).toSQL(resolver, buf); - } - buf.append(')'); + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append('('); + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + for (int i = 1; i < children.size(); i++) { + buf.append(" " + getOperator() + " "); + children.get(i).toSQL(resolver, buf); } + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAllAnyComparisonNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAllAnyComparisonNode.java index 18e5535829e..0e12781b397 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAllAnyComparisonNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAllAnyComparisonNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,25 +25,25 @@ public abstract class ArrayAllAnyComparisonNode extends CompoundParseNode { - public ArrayAllAnyComparisonNode(List children) { - super(children); - } + public ArrayAllAnyComparisonNode(List children) { + super(children); + } - public abstract String getType(); + public abstract String getType(); - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - List children = getChildren(); - ParseNode rhs = children.get(0); - ComparisonParseNode comp = (ComparisonParseNode)children.get(1); - ParseNode lhs = comp.getLHS(); - CompareOperator op = comp.getFilterOp(); - buf.append(' '); - lhs.toSQL(resolver, buf); - buf.append(" " + QueryUtil.toSQL(op) + " "); - buf.append(getType()); - buf.append('('); - rhs.toSQL(resolver, buf); - buf.append(')'); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + List children = getChildren(); + ParseNode rhs = children.get(0); + ComparisonParseNode comp = (ComparisonParseNode) children.get(1); + ParseNode lhs = comp.getLHS(); + CompareOperator op = comp.getFilterOp(); + buf.append(' '); + lhs.toSQL(resolver, buf); + buf.append(" " + QueryUtil.toSQL(op) + " "); + buf.append(getType()); + buf.append('('); + rhs.toSQL(resolver, buf); + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAllComparisonNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAllComparisonNode.java index 98371a59d8a..b6963ca43f5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAllComparisonNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAllComparisonNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,27 +21,27 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -/** - * The Expression a = ALL(b) where b is of type array is rewritten in this - * node as ALL(a = b(n)) + +/** + * The Expression a = ALL(b) where b is of type array is rewritten in this node as ALL(a = b(n)) */ public class ArrayAllComparisonNode extends ArrayAllAnyComparisonNode { - ArrayAllComparisonNode(ParseNode rhs, ComparisonParseNode compareNode) { - super(Arrays.asList(rhs, compareNode)); - } - - @Override - public String getType() { - return "ALL"; - } + ArrayAllComparisonNode(ParseNode rhs, ComparisonParseNode compareNode) { + super(Arrays. asList(rhs, compareNode)); + } + + @Override + public String getType() { + return "ALL"; + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAnyComparisonNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAnyComparisonNode.java index a4662b50d93..724f76dd59d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAnyComparisonNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayAnyComparisonNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,27 +21,27 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -/** - * The Expression a = ANY(b) where b is of type array is rewritten in this - * node as ANY(a = b(n)) + +/** + * The Expression a = ANY(b) where b is of type array is rewritten in this node as ANY(a = b(n)) */ public class ArrayAnyComparisonNode extends ArrayAllAnyComparisonNode { - ArrayAnyComparisonNode(ParseNode rhs, ComparisonParseNode compareNode) { - super(Arrays.asList(rhs, compareNode)); - } - - @Override - public String getType() { - return "ANY"; - } + ArrayAnyComparisonNode(ParseNode rhs, ComparisonParseNode compareNode) { + super(Arrays. asList(rhs, compareNode)); + } + + @Override + public String getType() { + return "ANY"; + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayConstructorNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayConstructorNode.java index 9b6a6be75b1..48e5afdf1a0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayConstructorNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayConstructorNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,35 +25,34 @@ import org.apache.phoenix.schema.types.PArrayDataType; /** - * Holds the list of array elements that will be used by the upsert stmt with ARRAY column - * + * Holds the list of array elements that will be used by the upsert stmt with ARRAY column */ public class ArrayConstructorNode extends CompoundParseNode { - public ArrayConstructorNode(List children) { - super(children); - } + public ArrayConstructorNode(List children) { + super(children); + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); + } + return visitor.visitLeave(this, l); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); - } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(' '); - buf.append(PArrayDataType.ARRAY_TYPE_SUFFIX); - buf.append('['); - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - for (int i = 1 ; i < children.size(); i++) { - buf.append(','); - children.get(i).toSQL(resolver, buf); - } - buf.append(']'); + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(' '); + buf.append(PArrayDataType.ARRAY_TYPE_SUFFIX); + buf.append('['); + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + for (int i = 1; i < children.size(); i++) { + buf.append(','); + children.get(i).toSQL(resolver, buf); } + buf.append(']'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayElemRefNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayElemRefNode.java index b3c4ad9fd47..0a75975504b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayElemRefNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayElemRefNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,25 +25,25 @@ public class ArrayElemRefNode extends CompoundParseNode { - public ArrayElemRefNode(List parseNode) { - super(parseNode); - } + public ArrayElemRefNode(List parseNode) { + super(parseNode); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); - } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - buf.append('['); - children.get(1).toSQL(resolver, buf); - buf.append(']'); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + buf.append('['); + children.get(1).toSQL(resolver, buf); + buf.append(']'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayModifierParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayModifierParseNode.java index 2ec1e2714f9..82ac2047cee 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayModifierParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ArrayModifierParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,15 +22,14 @@ import org.apache.phoenix.compile.StatementContext; - public class ArrayModifierParseNode extends FunctionParseNode { - public ArrayModifierParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public ArrayModifierParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { - return false; - } + @Override + public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AvgAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AvgAggregateParseNode.java index 776aa7642b1..6e30eb846d2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AvgAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/AvgAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,23 +24,25 @@ import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.function.*; - public class AvgAggregateParseNode extends AggregateFunctionParseNode { - public AvgAggregateParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - SumAggregateFunction sumFunc; - CountAggregateFunction countFunc = (CountAggregateFunction)context.getExpressionManager().addIfAbsent(new CountAggregateFunction(children)); - if (!countFunc.isConstantExpression()) { - sumFunc = (SumAggregateFunction)context.getExpressionManager().addIfAbsent(new SumAggregateFunction(countFunc.getChildren(),null)); - } else { - sumFunc = null; - } + public AvgAggregateParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - return new AvgAggregateFunction(children, countFunc, sumFunc); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + SumAggregateFunction sumFunc; + CountAggregateFunction countFunc = (CountAggregateFunction) context.getExpressionManager() + .addIfAbsent(new CountAggregateFunction(children)); + if (!countFunc.isConstantExpression()) { + sumFunc = (SumAggregateFunction) context.getExpressionManager() + .addIfAbsent(new SumAggregateFunction(countFunc.getChildren(), null)); + } else { + sumFunc = null; } + + return new AvgAggregateFunction(children, countFunc, sumFunc); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BaseParseNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BaseParseNodeVisitor.java index e111d609fd9..905af0a1248 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BaseParseNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BaseParseNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,48 +21,41 @@ import java.sql.SQLFeatureNotSupportedException; import java.util.List; - - /** - * * Base class for parse node visitors. - * - * * @since 0.1 */ public abstract class BaseParseNodeVisitor implements ParseNodeVisitor { - /** - * Fall through visitEnter method. Anything coming through - * here means that a more specific method wasn't found - * and thus this CompoundNode is not yet supported. - */ - @Override - public boolean visitEnter(CompoundParseNode expressionNode) throws SQLException { - throw new SQLFeatureNotSupportedException(expressionNode.toString()); - } + /** + * Fall through visitEnter method. Anything coming through here means that a more specific method + * wasn't found and thus this CompoundNode is not yet supported. + */ + @Override + public boolean visitEnter(CompoundParseNode expressionNode) throws SQLException { + throw new SQLFeatureNotSupportedException(expressionNode.toString()); + } + + @Override + public E visitLeave(CompoundParseNode expressionNode, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(expressionNode.toString()); + } + + /** + * Fall through visit method. Anything coming through here means that a more specific method + * wasn't found and thus this Node is not yet supported. + */ + @Override + public E visit(ParseNode expressionNode) throws SQLException { + throw new SQLFeatureNotSupportedException(expressionNode.toString()); + } - @Override - public E visitLeave(CompoundParseNode expressionNode, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(expressionNode.toString()); - } + @Override + public List newElementList(int size) { + return null; + } - /** - * Fall through visit method. Anything coming through - * here means that a more specific method wasn't found - * and thus this Node is not yet supported. - */ - @Override - public E visit(ParseNode expressionNode) throws SQLException { - throw new SQLFeatureNotSupportedException(expressionNode.toString()); - } - - @Override - public List newElementList(int size) { - return null; - } - - @Override - public void addElement(List l, E element) { - } + @Override + public void addElement(List l, E element) { + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java index 6d8211722db..42b6aac2c9b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BetweenParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,67 +24,57 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing BETWEEN in SQL - * - * * @since 0.1 */ public class BetweenParseNode extends CompoundParseNode { - private final boolean negate; + private final boolean negate; - BetweenParseNode(ParseNode l, ParseNode r1, ParseNode r2, boolean negate) { - super(Arrays.asList(l, r1, r2)); - this.negate = negate; - } - - public boolean isNegate() { - return negate; - } - - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + BetweenParseNode(ParseNode l, ParseNode r1, ParseNode r2, boolean negate) { + super(Arrays.asList(l, r1, r2)); + this.negate = negate; + } + + public boolean isNegate() { + return negate; + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (negate ? 1231 : 1237); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (negate ? 1231 : 1237); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - BetweenParseNode other = (BetweenParseNode) obj; - if (negate != other.negate) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + BetweenParseNode other = (BetweenParseNode) obj; + if (negate != other.negate) return false; + return true; + } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - if (negate) buf.append(" NOT"); - buf.append(" BETWEEN "); - children.get(1).toSQL(resolver, buf); - buf.append(" AND "); - children.get(2).toSQL(resolver, buf); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + if (negate) buf.append(" NOT"); + buf.append(" BETWEEN "); + children.get(1).toSQL(resolver, buf); + buf.append(" AND "); + children.get(2).toSQL(resolver, buf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BinaryParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BinaryParseNode.java index 3ffd776bad0..ce05472107f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BinaryParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BinaryParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,23 +20,20 @@ import java.util.Arrays; /** - * * Abstract class for operators that operate on exactly two nodes - * - * * @since 0.1 */ public abstract class BinaryParseNode extends CompoundParseNode { - BinaryParseNode(ParseNode lhs, ParseNode rhs) { - super(Arrays.asList(lhs, rhs)); - } + BinaryParseNode(ParseNode lhs, ParseNode rhs) { + super(Arrays.asList(lhs, rhs)); + } + + public ParseNode getLHS() { + return getChildren().get(0); + } - public ParseNode getLHS() { - return getChildren().get(0); - } - - public ParseNode getRHS() { - return getChildren().get(1); - } + public ParseNode getRHS() { + return getChildren().get(1); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindParseNode.java index 42e42bf40fe..a786a52cba7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,63 +21,53 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing a bind variable in a SQL expression - * - * * @since 0.1 */ public class BindParseNode extends NamedParseNode { - private final int index; - - BindParseNode(String name) { - super(name); - index = Integer.parseInt(name); - } - - public int getIndex() { - return index-1; - } + private final int index; + + BindParseNode(String name) { + super(name); + index = Integer.parseInt(name); + } + + public int getIndex() { + return index - 1; + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + @Override + public boolean isStateless() { + return true; + } - - @Override - public boolean isStateless() { - return true; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + index; - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + index; + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - BindParseNode other = (BindParseNode) obj; - if (index != other.index) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + BindParseNode other = (BindParseNode) obj; + if (index != other.index) return false; + return true; + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(':'); - buf.append(index); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(':'); + buf.append(index); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindTableNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindTableNode.java index 3895dd1f598..02c9dd17bf5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindTableNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindTableNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,31 +21,25 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * - * Node representing a TABLE bound using an ARRAY variable - * TODO: modify grammar to support this - * + * Node representing a TABLE bound using an ARRAY variable TODO: modify grammar to support this * @since 0.1 */ public class BindTableNode extends ConcreteTableNode { - BindTableNode(String alias, TableName name) { - super(alias, name); - } - - @Override - public T accept(TableNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(this.getName().toString()); - if (this.getAlias() != null) buf.append(" " + this.getAlias()); - buf.append(' '); - } + BindTableNode(String alias, TableName name) { + super(alias, name); + } + + @Override + public T accept(TableNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(this.getName().toString()); + if (this.getAlias() != null) buf.append(" " + this.getAlias()); + buf.append(' '); + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindableStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindableStatement.java index 6594f49bb3c..64f0241d4e0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindableStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BindableStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,8 +19,8 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; - public interface BindableStatement { - public int getBindCount(); - public Operation getOperation(); + public int getBindCount(); + + public Operation getOperation(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BooleanParseNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BooleanParseNodeVisitor.java index 0d6feda8392..f42e5eedb1c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BooleanParseNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BooleanParseNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,287 +22,282 @@ public abstract class BooleanParseNodeVisitor extends BaseParseNodeVisitor { - protected abstract boolean enterBooleanNode(ParseNode node) throws SQLException; - protected abstract T leaveBooleanNode(ParseNode node, List l) throws SQLException; - protected abstract boolean enterNonBooleanNode(ParseNode node) throws SQLException; - protected abstract T leaveNonBooleanNode(ParseNode node, List l) throws SQLException; - - @Override - public boolean visitEnter(LikeParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(LikeParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(OrParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(OrParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(FunctionParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(FunctionParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(ComparisonParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(ComparisonParseNode node, List l) - throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(CaseParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(CaseParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(AddParseNode node) throws SQLException { - return enterNonBooleanNode(node); - } - - @Override - public T visitLeave(AddParseNode node, List l) throws SQLException { - return leaveNonBooleanNode(node, l); - } - - @Override - public boolean visitEnter(MultiplyParseNode node) throws SQLException { - return enterNonBooleanNode(node); - } - - @Override - public T visitLeave(MultiplyParseNode node, List l) throws SQLException { - return leaveNonBooleanNode(node, l); - } - - @Override - public boolean visitEnter(ModulusParseNode node) throws SQLException { - return enterNonBooleanNode(node); - } - - @Override - public T visitLeave(ModulusParseNode node, List l) throws SQLException { - return leaveNonBooleanNode(node, l); - } - - @Override - public boolean visitEnter(DivideParseNode node) throws SQLException { - return enterNonBooleanNode(node); - } - - @Override - public T visitLeave(DivideParseNode node, List l) throws SQLException { - return leaveNonBooleanNode(node, l); - } - - @Override - public boolean visitEnter(SubtractParseNode node) throws SQLException { - return enterNonBooleanNode(node); - } - - @Override - public T visitLeave(SubtractParseNode node, List l) throws SQLException { - return leaveNonBooleanNode(node, l); - } - - @Override - public boolean visitEnter(NotParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(NotParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(ExistsParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(ExistsParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(InListParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(InListParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(InParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(InParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(IsNullParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(IsNullParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public T visit(ColumnParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(LiteralParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(BindParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(WildcardParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(TableWildcardParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(FamilyWildcardParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(SubqueryParseNode node) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(StringConcatParseNode node) throws SQLException { - return enterNonBooleanNode(node); - } - - @Override - public T visitLeave(StringConcatParseNode node, List l) - throws SQLException { - return leaveNonBooleanNode(node, l); - } - - @Override - public boolean visitEnter(BetweenParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(BetweenParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(CastParseNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(CastParseNode node, List l) throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(RowValueConstructorParseNode node) - throws SQLException { - return enterNonBooleanNode(node); - } - - @Override - public T visitLeave(RowValueConstructorParseNode node, List l) - throws SQLException { - return leaveNonBooleanNode(node, l); - } - - @Override - public boolean visitEnter(ArrayConstructorNode node) throws SQLException { - return enterNonBooleanNode(node); - } - - @Override - public T visitLeave(ArrayConstructorNode node, List l) - throws SQLException { - return leaveNonBooleanNode(node, l); - } - - @Override - public T visit(SequenceValueParseNode node) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(ArrayAllComparisonNode node, List l) - throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { - return enterBooleanNode(node); - } - - @Override - public T visitLeave(ArrayAnyComparisonNode node, List l) - throws SQLException { - return leaveBooleanNode(node, l); - } - - @Override - public boolean visitEnter(ArrayElemRefNode node) throws SQLException { - return enterNonBooleanNode(node); - } - - @Override - public T visitLeave(ArrayElemRefNode node, List l) throws SQLException { - return leaveNonBooleanNode(node, l); - } + protected abstract boolean enterBooleanNode(ParseNode node) throws SQLException; + + protected abstract T leaveBooleanNode(ParseNode node, List l) throws SQLException; + + protected abstract boolean enterNonBooleanNode(ParseNode node) throws SQLException; + + protected abstract T leaveNonBooleanNode(ParseNode node, List l) throws SQLException; + + @Override + public boolean visitEnter(LikeParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(LikeParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(OrParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(OrParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(FunctionParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(FunctionParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(ComparisonParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(ComparisonParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(CaseParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(CaseParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(AddParseNode node) throws SQLException { + return enterNonBooleanNode(node); + } + + @Override + public T visitLeave(AddParseNode node, List l) throws SQLException { + return leaveNonBooleanNode(node, l); + } + + @Override + public boolean visitEnter(MultiplyParseNode node) throws SQLException { + return enterNonBooleanNode(node); + } + + @Override + public T visitLeave(MultiplyParseNode node, List l) throws SQLException { + return leaveNonBooleanNode(node, l); + } + + @Override + public boolean visitEnter(ModulusParseNode node) throws SQLException { + return enterNonBooleanNode(node); + } + + @Override + public T visitLeave(ModulusParseNode node, List l) throws SQLException { + return leaveNonBooleanNode(node, l); + } + + @Override + public boolean visitEnter(DivideParseNode node) throws SQLException { + return enterNonBooleanNode(node); + } + + @Override + public T visitLeave(DivideParseNode node, List l) throws SQLException { + return leaveNonBooleanNode(node, l); + } + + @Override + public boolean visitEnter(SubtractParseNode node) throws SQLException { + return enterNonBooleanNode(node); + } + + @Override + public T visitLeave(SubtractParseNode node, List l) throws SQLException { + return leaveNonBooleanNode(node, l); + } + + @Override + public boolean visitEnter(NotParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(NotParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(ExistsParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(ExistsParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(InListParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(InListParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(InParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(InParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(IsNullParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(IsNullParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public T visit(ColumnParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(LiteralParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(BindParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(WildcardParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(TableWildcardParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(FamilyWildcardParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(SubqueryParseNode node) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(StringConcatParseNode node) throws SQLException { + return enterNonBooleanNode(node); + } + + @Override + public T visitLeave(StringConcatParseNode node, List l) throws SQLException { + return leaveNonBooleanNode(node, l); + } + + @Override + public boolean visitEnter(BetweenParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(BetweenParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(CastParseNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(CastParseNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(RowValueConstructorParseNode node) throws SQLException { + return enterNonBooleanNode(node); + } + + @Override + public T visitLeave(RowValueConstructorParseNode node, List l) throws SQLException { + return leaveNonBooleanNode(node, l); + } + + @Override + public boolean visitEnter(ArrayConstructorNode node) throws SQLException { + return enterNonBooleanNode(node); + } + + @Override + public T visitLeave(ArrayConstructorNode node, List l) throws SQLException { + return leaveNonBooleanNode(node, l); + } + + @Override + public T visit(SequenceValueParseNode node) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { + return enterBooleanNode(node); + } + + @Override + public T visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { + return leaveBooleanNode(node, l); + } + + @Override + public boolean visitEnter(ArrayElemRefNode node) throws SQLException { + return enterNonBooleanNode(node); + } + + @Override + public T visitLeave(ArrayElemRefNode node, List l) throws SQLException { + return leaveNonBooleanNode(node, l); + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonConditionExpressionParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonConditionExpressionParseNode.java index 7fd55b8077e..548156019df 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonConditionExpressionParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonConditionExpressionParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,26 +22,25 @@ import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; -import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.expression.function.BsonConditionExpressionFunction; -import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.schema.types.PBson; +import org.apache.phoenix.schema.types.PDataType; public class BsonConditionExpressionParseNode extends FunctionParseNode { - public BsonConditionExpressionParseNode(String name, List children, - BuiltInFunctionInfo info) { - super(name, children, info); - } + public BsonConditionExpressionParseNode(String name, List children, + BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) - throws SQLException { - PDataType dataType = children.get(0).getDataType(); - if (!dataType.isCoercibleTo(PBson.INSTANCE)) { - throw new SQLException( - dataType + " type is unsupported for BSON_CONDITION_EXPRESSION()."); - } - return new BsonConditionExpressionFunction(children); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + PDataType dataType = children.get(0).getDataType(); + if (!dataType.isCoercibleTo(PBson.INSTANCE)) { + throw new SQLException(dataType + " type is unsupported for BSON_CONDITION_EXPRESSION()."); } + return new BsonConditionExpressionFunction(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonUpdateExpressionParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonUpdateExpressionParseNode.java index 7bb06cd4b73..afc3d4b74ca 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonUpdateExpressionParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonUpdateExpressionParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,19 +29,18 @@ public class BsonUpdateExpressionParseNode extends FunctionParseNode { - public BsonUpdateExpressionParseNode(String name, List children, - BuiltInFunctionInfo info) { - super(name, children, info); - } + public BsonUpdateExpressionParseNode(String name, List children, + BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) - throws SQLException { - PDataType dataType = children.get(0).getDataType(); - if (!dataType.isCoercibleTo(PBson.INSTANCE)) { - throw new SQLException( - dataType + " type is unsupported for BSON_CONDITION_EXPRESSION()."); - } - return new BsonUpdateExpressionFunction(children); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + PDataType dataType = children.get(0).getDataType(); + if (!dataType.isCoercibleTo(PBson.INSTANCE)) { + throw new SQLException(dataType + " type is unsupported for BSON_CONDITION_EXPRESSION()."); } + return new BsonUpdateExpressionFunction(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonValueParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonValueParseNode.java index 9be1e51fa0a..9b226ca7f40 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonValueParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/BsonValueParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,17 +30,17 @@ public class BsonValueParseNode extends FunctionParseNode { - public BsonValueParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public BsonValueParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) - throws SQLException { - PDataType dataType = children.get(0).getDataType(); - if (!dataType.isCoercibleTo(PJson.INSTANCE) && !dataType.isCoercibleTo(PBson.INSTANCE)) { - throw new SQLException(dataType + " type is unsupported for BSON_VALUE()."); - } - return new BsonValueFunction(children); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + PDataType dataType = children.get(0).getDataType(); + if (!dataType.isCoercibleTo(PJson.INSTANCE) && !dataType.isCoercibleTo(PBson.INSTANCE)) { + throw new SQLException(dataType + " type is unsupported for BSON_VALUE()."); } + return new BsonValueFunction(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CaseParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CaseParseNode.java index 9467e687adf..6bc9b762c60 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CaseParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CaseParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,45 +23,39 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing a CASE in SQL - * - * * @since 0.1 */ public class CaseParseNode extends CompoundParseNode { - CaseParseNode(List children) { - super(children); - } + CaseParseNode(List children) { + super(children); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } - - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append("CASE "); - List children = getChildren(); - for (int i = 0; i < children.size() - 1; i+=2) { - buf.append("WHEN "); - children.get(i+1).toSQL(resolver, buf); - buf.append(" THEN "); - children.get(i).toSQL(resolver, buf); - } - if (children.size() % 2 != 0) { // has ELSE - buf.append(" ELSE "); - children.get(children.size()-1).toSQL(resolver, buf); - } - buf.append(" END "); + return visitor.visitLeave(this, l); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append("CASE "); + List children = getChildren(); + for (int i = 0; i < children.size() - 1; i += 2) { + buf.append("WHEN "); + children.get(i + 1).toSQL(resolver, buf); + buf.append(" THEN "); + children.get(i).toSQL(resolver, buf); + } + if (children.size() % 2 != 0) { // has ELSE + buf.append(" ELSE "); + children.get(children.size() - 1).toSQL(resolver, buf); } + buf.append(" END "); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CastParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CastParseNode.java index 3e03613eb88..d6160a9d608 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CastParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CastParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,114 +26,101 @@ import org.apache.phoenix.util.SchemaUtil; /** - * * Node representing the CAST operator in SQL. - * - * * @since 0.1 - * */ public class CastParseNode extends UnaryParseNode { - private final PDataType dt; - private final Integer maxLength; - private final Integer scale; - - CastParseNode(ParseNode expr, String dataType, Integer maxLength, Integer scale, boolean arr) { - this(expr, PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(dataType)), maxLength, scale, arr); - } + private final PDataType dt; + private final Integer maxLength; + private final Integer scale; - CastParseNode(ParseNode expr, PDataType dataType, Integer maxLength, Integer scale, boolean arr) { - super(expr); - if (arr == true) { - dt = PDataType.fromTypeId(dataType.getSqlType() + PDataType.ARRAY_TYPE_BASE); - } else { - dt = dataType; - } - this.maxLength = maxLength; - this.scale = scale; - } + CastParseNode(ParseNode expr, String dataType, Integer maxLength, Integer scale, boolean arr) { + this(expr, PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(dataType)), maxLength, + scale, arr); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + CastParseNode(ParseNode expr, PDataType dataType, Integer maxLength, Integer scale, boolean arr) { + super(expr); + if (arr == true) { + dt = PDataType.fromTypeId(dataType.getSqlType() + PDataType.ARRAY_TYPE_BASE); + } else { + dt = dataType; } + this.maxLength = maxLength; + this.scale = scale; + } - public PDataType getDataType() { - return dt; - } - - public Integer getMaxLength() { - return maxLength; + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } - public Integer getScale() { - return scale; - } + public PDataType getDataType() { + return dt; + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((dt == null) ? 0 : dt.hashCode()); - result = prime * result - + ((maxLength == null) ? 0 : maxLength.hashCode()); - result = prime * result + ((scale == null) ? 0 : scale.hashCode()); - return result; - } + public Integer getMaxLength() { + return maxLength; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - CastParseNode other = (CastParseNode) obj; - if (dt == null) { - if (other.dt != null) - return false; - } else if (!dt.equals(other.dt)) - return false; - if (maxLength == null) { - if (other.maxLength != null) - return false; - } else if (!maxLength.equals(other.maxLength)) - return false; - if (scale == null) { - if (other.scale != null) - return false; - } else if (!scale.equals(other.scale)) - return false; - return true; - } + public Integer getScale() { + return scale; + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - List children = getChildren(); - buf.append(" CAST("); - children.get(0).toSQL(resolver, buf); - buf.append(" AS "); - boolean isArray = dt.isArrayType(); - PDataType type = isArray ? PDataType.arrayBaseType(dt) : dt; - buf.append(type.getSqlTypeName()); - if (maxLength != null) { - buf.append('('); - buf.append(maxLength); - if (scale != null) { - buf.append(','); - buf.append(scale); // has both max length and scale. For ex- decimal(10,2) - } - buf.append(')'); - } - if (isArray) { - buf.append(' '); - buf.append(PDataType.ARRAY_TYPE_SUFFIX); - } - buf.append(")"); + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((dt == null) ? 0 : dt.hashCode()); + result = prime * result + ((maxLength == null) ? 0 : maxLength.hashCode()); + result = prime * result + ((scale == null) ? 0 : scale.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + CastParseNode other = (CastParseNode) obj; + if (dt == null) { + if (other.dt != null) return false; + } else if (!dt.equals(other.dt)) return false; + if (maxLength == null) { + if (other.maxLength != null) return false; + } else if (!maxLength.equals(other.maxLength)) return false; + if (scale == null) { + if (other.scale != null) return false; + } else if (!scale.equals(other.scale)) return false; + return true; + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + List children = getChildren(); + buf.append(" CAST("); + children.get(0).toSQL(resolver, buf); + buf.append(" AS "); + boolean isArray = dt.isArrayType(); + PDataType type = isArray ? PDataType.arrayBaseType(dt) : dt; + buf.append(type.getSqlTypeName()); + if (maxLength != null) { + buf.append('('); + buf.append(maxLength); + if (scale != null) { + buf.append(','); + buf.append(scale); // has both max length and scale. For ex- decimal(10,2) + } + buf.append(')'); + } + if (isArray) { + buf.append(' '); + buf.append(PDataType.ARRAY_TYPE_SUFFIX); } + buf.append(")"); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CeilParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CeilParseNode.java index b5f666936f4..f0d573b34df 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CeilParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CeilParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,59 +26,57 @@ import org.apache.phoenix.expression.function.CeilDecimalExpression; import org.apache.phoenix.expression.function.CeilFunction; import org.apache.phoenix.expression.function.CeilTimestampExpression; +import org.apache.phoenix.schema.TypeMismatchException; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.schema.types.PDecimal; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PUnsignedTimestamp; -import org.apache.phoenix.schema.TypeMismatchException; /** - * Parse node corresponding to {@link CeilFunction}. - * It also acts as a factory for creating the right kind of - * ceil expression according to the data type of the - * first child. - * - * + * Parse node corresponding to {@link CeilFunction}. It also acts as a factory for creating the + * right kind of ceil expression according to the data type of the first child. * @since 3.0.0 */ public class CeilParseNode extends FunctionParseNode { - - CeilParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - - @Override - public Expression create(List children, StatementContext context) throws SQLException { - return getCeilExpression(children); - } - - public static Expression getCeilExpression(List children) throws SQLException { - final Expression firstChild = children.get(0); - final PDataType firstChildDataType = firstChild.getDataType(); - if(firstChildDataType.isCoercibleTo(PDate.INSTANCE)) { - return CeilDateExpression.create(children); - } else if (firstChildDataType == PTimestamp.INSTANCE || firstChildDataType == PUnsignedTimestamp.INSTANCE) { - return CeilTimestampExpression.create(children); - } else if(firstChildDataType.isCoercibleTo(PDecimal.INSTANCE)) { - return CeilDecimalExpression.create(children); - } else { - throw TypeMismatchException.newException(firstChildDataType, "1"); - } - } - - /** - * When ceiling off decimals, user need not specify the scale. In such cases, - * we need to prevent the function from getting evaluated as null. This is really - * a hack. A better way would have been if {@link org.apache.phoenix.parse.FunctionParseNode.BuiltInFunctionInfo} provided a - * way of associating default values for each permissible data type. - * Something like: @ Argument(allowedTypes={PDataType.VARCHAR, PDataType.INTEGER}, defaultValues = {"null", "1"} isConstant=true) - * Till then, this will have to do. - */ - @Override - public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { - return index == 0; + + CeilParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } + + @Override + public Expression create(List children, StatementContext context) + throws SQLException { + return getCeilExpression(children); + } + + public static Expression getCeilExpression(List children) throws SQLException { + final Expression firstChild = children.get(0); + final PDataType firstChildDataType = firstChild.getDataType(); + if (firstChildDataType.isCoercibleTo(PDate.INSTANCE)) { + return CeilDateExpression.create(children); + } else if ( + firstChildDataType == PTimestamp.INSTANCE || firstChildDataType == PUnsignedTimestamp.INSTANCE + ) { + return CeilTimestampExpression.create(children); + } else if (firstChildDataType.isCoercibleTo(PDecimal.INSTANCE)) { + return CeilDecimalExpression.create(children); + } else { + throw TypeMismatchException.newException(firstChildDataType, "1"); } - - -} + } + + /** + * When ceiling off decimals, user need not specify the scale. In such cases, we need to prevent + * the function from getting evaluated as null. This is really a hack. A better way would have + * been if {@link org.apache.phoenix.parse.FunctionParseNode.BuiltInFunctionInfo} provided a way + * of associating default values for each permissible data type. Something like: @ + * Argument(allowedTypes={PDataType.VARCHAR, PDataType.INTEGER}, defaultValues = {"null", "1"} + * isConstant=true) Till then, this will have to do. + */ + @Override + public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { + return index == 0; + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ChangePermsStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ChangePermsStatement.java index b49183d3a3b..47263ec79de 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ChangePermsStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ChangePermsStatement.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,88 +17,90 @@ */ package org.apache.phoenix.parse; -import org.antlr.runtime.RecognitionException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.security.access.Permission; -import org.apache.phoenix.exception.PhoenixParserException; import org.apache.phoenix.jdbc.PhoenixStatement; import org.apache.phoenix.util.SchemaUtil; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; - /** - * See PHOENIX-672, Use GRANT/REVOKE statements to assign or remove permissions for a user OR group on a table OR namespace - * Permissions are managed by HBase using hbase:acl table, Allowed permissions are RWXCA + * See PHOENIX-672, Use GRANT/REVOKE statements to assign or remove permissions for a user OR group + * on a table OR namespace Permissions are managed by HBase using hbase:acl table, Allowed + * permissions are RWXCA */ public class ChangePermsStatement implements BindableStatement { - private Permission.Action[] permsList; - private TableName tableName; - private String schemaName; - private String name; - // Grant/Revoke statements are differentiated based on this boolean - private boolean isGrantStatement; + private Permission.Action[] permsList; + private TableName tableName; + private String schemaName; + private String name; + // Grant/Revoke statements are differentiated based on this boolean + private boolean isGrantStatement; - public ChangePermsStatement(String permsString, boolean isSchemaName, - TableName tableName, String schemaName, boolean isGroupName, LiteralParseNode ugNode, boolean isGrantStatement) { - // PHOENIX-672 HBase API doesn't allow to revoke specific permissions, hence this parameter will be ignored here. - // To comply with SQL standards, we may support the user given permissions to revoke specific permissions in future. - // GRANT permissions statement requires this parameter and the parsing will fail if it is not specified in SQL - if(permsString != null) { - Permission permission = new Permission(permsString.getBytes(StandardCharsets.UTF_8)); - permsList = permission.getActions(); - } - if(isSchemaName) { - this.schemaName = SchemaUtil.normalizeIdentifier(schemaName); - } else { - this.tableName = tableName; - } - name = SchemaUtil.normalizeLiteral(ugNode); - name = isGroupName ? AuthUtil.toGroupEntry(name) : name; - this.isGrantStatement = isGrantStatement; + public ChangePermsStatement(String permsString, boolean isSchemaName, TableName tableName, + String schemaName, boolean isGroupName, LiteralParseNode ugNode, boolean isGrantStatement) { + // PHOENIX-672 HBase API doesn't allow to revoke specific permissions, hence this parameter will + // be ignored here. + // To comply with SQL standards, we may support the user given permissions to revoke specific + // permissions in future. + // GRANT permissions statement requires this parameter and the parsing will fail if it is not + // specified in SQL + if (permsString != null) { + Permission permission = new Permission(permsString.getBytes(StandardCharsets.UTF_8)); + permsList = permission.getActions(); } - - public Permission.Action[] getPermsList() { - return permsList; + if (isSchemaName) { + this.schemaName = SchemaUtil.normalizeIdentifier(schemaName); + } else { + this.tableName = tableName; } + name = SchemaUtil.normalizeLiteral(ugNode); + name = isGroupName ? AuthUtil.toGroupEntry(name) : name; + this.isGrantStatement = isGrantStatement; + } - public String getName() { - return name; - } + public Permission.Action[] getPermsList() { + return permsList; + } - public TableName getTableName() { - return tableName; - } + public String getName() { + return name; + } - public String getSchemaName() { - return schemaName; - } + public TableName getTableName() { + return tableName; + } - public boolean isGrantStatement() { - return isGrantStatement; - } + public String getSchemaName() { + return schemaName; + } - public String toString() { - StringBuffer buffer = new StringBuffer(); - buffer = this.isGrantStatement() ? buffer.append("GRANT ") : buffer.append("REVOKE "); - buffer.append("permissions requested for user/group: " + this.getName()); - if (this.getSchemaName() != null) { - buffer.append(" for Schema: " + this.getSchemaName()); - } else if (this.getTableName() != null) { - buffer.append(" for Table: " + this.getTableName()); - } - buffer.append(" Permissions: " + Arrays.toString(this.getPermsList())); - return buffer.toString(); - } + public boolean isGrantStatement() { + return isGrantStatement; + } - @Override - public int getBindCount() { - return 0; + public String toString() { + StringBuffer buffer = new StringBuffer(); + buffer = this.isGrantStatement() ? buffer.append("GRANT ") : buffer.append("REVOKE "); + buffer.append("permissions requested for user/group: " + this.getName()); + if (this.getSchemaName() != null) { + buffer.append(" for Schema: " + this.getSchemaName()); + } else if (this.getTableName() != null) { + buffer.append(" for Table: " + this.getTableName()); } + buffer.append(" Permissions: " + Arrays.toString(this.getPermsList())); + return buffer.toString(); + } - @Override - public PhoenixStatement.Operation getOperation() { - return PhoenixStatement.Operation.ADMIN; - } + @Override + public int getBindCount() { + return 0; + } + + @Override + public PhoenixStatement.Operation getOperation() { + return PhoenixStatement.Operation.ADMIN; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CloseStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CloseStatement.java index 5d7af346472..ee780919ef2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CloseStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CloseStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,21 +20,21 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; public class CloseStatement implements BindableStatement { - private final CursorName cursorName; + private final CursorName cursorName; - public CloseStatement(CursorName cursorName){ - this.cursorName = cursorName; - } + public CloseStatement(CursorName cursorName) { + this.cursorName = cursorName; + } - public String getCursorName(){ - return cursorName.getName(); - } + public String getCursorName() { + return cursorName.getName(); + } - public int getBindCount(){ - return 0; - } + public int getBindCount() { + return 0; + } - public Operation getOperation(){ - return Operation.UPSERT; - } + public Operation getOperation() { + return Operation.UPSERT; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnDef.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnDef.java index 062040dc295..f76e9b6e357 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnDef.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnDef.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,7 +19,6 @@ import java.sql.SQLException; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.compile.ExpressionCompiler; import org.apache.phoenix.compile.StatementContext; @@ -35,282 +34,286 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.phoenix.util.ExpressionUtil; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - - /** - * * Represents a column definition during DDL - * - * * @since 0.1 */ public class ColumnDef { - private final ColumnName columnDefName; - private final PDataType dataType; - private final Boolean isNull; - private final Integer maxLength; - private final Integer scale; - private boolean isPK; - private final SortOrder sortOrder; - private final boolean isArray; - private final Integer arrSize; - private final String expressionStr; - private final Integer encodedQualifier; - private final boolean isRowTimestamp; + private final ColumnName columnDefName; + private final PDataType dataType; + private final Boolean isNull; + private final Integer maxLength; + private final Integer scale; + private boolean isPK; + private final SortOrder sortOrder; + private final boolean isArray; + private final Integer arrSize; + private final String expressionStr; + private final Integer encodedQualifier; + private final boolean isRowTimestamp; - public ColumnDef(ColumnDef def, String expressionStr) { - this.columnDefName = def.columnDefName; - this.dataType = def.dataType; - this.isNull = def.isNull; - this.maxLength = def.maxLength; - this.scale = def.scale; - this.isPK = def.isPK; - this.sortOrder = def.sortOrder; - this.isArray = def.isArray; - this.arrSize = def.arrSize; - this.encodedQualifier = def.encodedQualifier; - this.isRowTimestamp = def.isRowTimestamp; - this.expressionStr = expressionStr; - } + public ColumnDef(ColumnDef def, String expressionStr) { + this.columnDefName = def.columnDefName; + this.dataType = def.dataType; + this.isNull = def.isNull; + this.maxLength = def.maxLength; + this.scale = def.scale; + this.isPK = def.isPK; + this.sortOrder = def.sortOrder; + this.isArray = def.isArray; + this.arrSize = def.arrSize; + this.encodedQualifier = def.encodedQualifier; + this.isRowTimestamp = def.isRowTimestamp; + this.expressionStr = expressionStr; + } - ColumnDef(ColumnName columnDefName, String sqlTypeName, boolean isArray, Integer arrSize, Boolean isNull, Integer maxLength, - Integer scale, boolean isPK, SortOrder sortOrder, String expressionStr, Integer encodedQualifier, boolean isRowTimestamp) { - try { - Preconditions.checkNotNull(sortOrder); - PDataType baseType; - PDataType dataType; - this.columnDefName = columnDefName; - // TODO : Add correctness check for arrSize. Should this be ignored as in postgres - // Also add what is the limit that we would support. Are we going to support a - // fixed size or like postgres allow infinite. May be the data types max limit can - // be used for the array size (May be too big) - if (isArray) { - this.isArray = true; - dataType = sqlTypeName == null ? null : PDataType.fromTypeId(PDataType.sqlArrayType(SchemaUtil.normalizeIdentifier(sqlTypeName))); - baseType = sqlTypeName == null ? null : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(sqlTypeName)); - this.arrSize = arrSize; // Can only be non negative based on parsing - if (baseType == PVarbinary.INSTANCE) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_ARRAY_NOT_SUPPORTED) - .setColumnName(columnDefName.getColumnName()).build().buildException(); - } - } else { - baseType = dataType = sqlTypeName == null ? null : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(sqlTypeName)); - if (this.isArray = dataType != null && dataType.isArrayType()) { - baseType = PDataType.arrayBaseType(dataType); - } - this.arrSize = null; - } + ColumnDef(ColumnName columnDefName, String sqlTypeName, boolean isArray, Integer arrSize, + Boolean isNull, Integer maxLength, Integer scale, boolean isPK, SortOrder sortOrder, + String expressionStr, Integer encodedQualifier, boolean isRowTimestamp) { + try { + Preconditions.checkNotNull(sortOrder); + PDataType baseType; + PDataType dataType; + this.columnDefName = columnDefName; + // TODO : Add correctness check for arrSize. Should this be ignored as in postgres + // Also add what is the limit that we would support. Are we going to support a + // fixed size or like postgres allow infinite. May be the data types max limit can + // be used for the array size (May be too big) + if (isArray) { + this.isArray = true; + dataType = sqlTypeName == null + ? null + : PDataType + .fromTypeId(PDataType.sqlArrayType(SchemaUtil.normalizeIdentifier(sqlTypeName))); + baseType = sqlTypeName == null + ? null + : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(sqlTypeName)); + this.arrSize = arrSize; // Can only be non negative based on parsing + if (baseType == PVarbinary.INSTANCE) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_ARRAY_NOT_SUPPORTED) + .setColumnName(columnDefName.getColumnName()).build().buildException(); + } + } else { + baseType = dataType = sqlTypeName == null + ? null + : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(sqlTypeName)); + if (this.isArray = dataType != null && dataType.isArrayType()) { + baseType = PDataType.arrayBaseType(dataType); + } + this.arrSize = null; + } - this.isNull = isNull; - if (baseType == PDecimal.INSTANCE) { - // for deciaml, 1 <= maxLength <= PDataType.MAX_PRECISION; - if (maxLength == null) { - scale = null; - } else { - if (maxLength < 1 || maxLength > PDataType.MAX_PRECISION) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.DECIMAL_PRECISION_OUT_OF_RANGE) - .setColumnName(columnDefName.getColumnName()).build().buildException(); - } - // When a precision is specified and a scale is not specified, it is set to 0. - // - // This is the standard as specified in - // http://docs.oracle.com/cd/B28359_01/server.111/b28318/datatype.htm#CNCPT1832 - // and - // http://docs.oracle.com/javadb/10.6.2.1/ref/rrefsqlj15260.html. - // Otherwise, if scale is bigger than maxLength, just set it to the maxLength; - // - // When neither a precision nor a scale is specified, the precision and scale is - // ignored. All decimal are stored with as much decimal points as possible. - scale = scale == null ? PDataType.DEFAULT_SCALE : scale > maxLength ? maxLength : scale; - } - } else { - if (maxLength != null && maxLength < 1) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NONPOSITIVE_MAX_LENGTH) - .setColumnName(columnDefName.getColumnName()).build().buildException(); - } - scale = null; - if (baseType == null) { - maxLength = null; - } else if (baseType.isFixedWidth()) { - if (baseType.getByteSize() == null) { - if (maxLength == null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.MISSING_MAX_LENGTH) - .setColumnName(columnDefName.getColumnName()).build().buildException(); - } - } else { - maxLength = null; - } - } - } - if (dataType != null && !dataType.canBePrimaryKey() && isPK) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_PRIMARY_KEY_CONSTRAINT) - .setColumnName(columnDefName.getColumnName()) - .setMessage( - "," + dataType.toString() + " is not supported as primary key,") - .build().buildException(); + this.isNull = isNull; + if (baseType == PDecimal.INSTANCE) { + // for deciaml, 1 <= maxLength <= PDataType.MAX_PRECISION; + if (maxLength == null) { + scale = null; + } else { + if (maxLength < 1 || maxLength > PDataType.MAX_PRECISION) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.DECIMAL_PRECISION_OUT_OF_RANGE) + .setColumnName(columnDefName.getColumnName()).build().buildException(); + } + // When a precision is specified and a scale is not specified, it is set to 0. + // + // This is the standard as specified in + // http://docs.oracle.com/cd/B28359_01/server.111/b28318/datatype.htm#CNCPT1832 + // and + // http://docs.oracle.com/javadb/10.6.2.1/ref/rrefsqlj15260.html. + // Otherwise, if scale is bigger than maxLength, just set it to the maxLength; + // + // When neither a precision nor a scale is specified, the precision and scale is + // ignored. All decimal are stored with as much decimal points as possible. + scale = scale == null ? PDataType.DEFAULT_SCALE : scale > maxLength ? maxLength : scale; + } + } else { + if (maxLength != null && maxLength < 1) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NONPOSITIVE_MAX_LENGTH) + .setColumnName(columnDefName.getColumnName()).build().buildException(); + } + scale = null; + if (baseType == null) { + maxLength = null; + } else if (baseType.isFixedWidth()) { + if (baseType.getByteSize() == null) { + if (maxLength == null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.MISSING_MAX_LENGTH) + .setColumnName(columnDefName.getColumnName()).build().buildException(); } - this.maxLength = maxLength; - this.scale = scale; - this.isPK = isPK; - this.sortOrder = sortOrder; - this.dataType = dataType; - this.expressionStr = expressionStr; - this.encodedQualifier = encodedQualifier; - this.isRowTimestamp = isRowTimestamp; - } catch (SQLException e) { - throw new ParseException(e); + } else { + maxLength = null; + } } + } + if (dataType != null && !dataType.canBePrimaryKey() && isPK) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_PRIMARY_KEY_CONSTRAINT) + .setColumnName(columnDefName.getColumnName()) + .setMessage("," + dataType.toString() + " is not supported as primary key,").build() + .buildException(); + } + this.maxLength = maxLength; + this.scale = scale; + this.isPK = isPK; + this.sortOrder = sortOrder; + this.dataType = dataType; + this.expressionStr = expressionStr; + this.encodedQualifier = encodedQualifier; + this.isRowTimestamp = isRowTimestamp; + } catch (SQLException e) { + throw new ParseException(e); } + } - ColumnDef(ColumnName columnDefName, String sqlTypeName, Boolean isNull, Integer maxLength, - Integer scale, boolean isPK, SortOrder sortOrder, String expressionStr, - Integer columnQualifier, boolean isRowTimestamp) { - this(columnDefName, sqlTypeName, false, 0, isNull, maxLength, - scale, isPK, sortOrder, expressionStr, columnQualifier, isRowTimestamp); - } + ColumnDef(ColumnName columnDefName, String sqlTypeName, Boolean isNull, Integer maxLength, + Integer scale, boolean isPK, SortOrder sortOrder, String expressionStr, Integer columnQualifier, + boolean isRowTimestamp) { + this(columnDefName, sqlTypeName, false, 0, isNull, maxLength, scale, isPK, sortOrder, + expressionStr, columnQualifier, isRowTimestamp); + } - public ColumnName getColumnDefName() { - return columnDefName; - } + public ColumnName getColumnDefName() { + return columnDefName; + } - public PDataType getDataType() { - return dataType; - } + public PDataType getDataType() { + return dataType; + } - public boolean isNull() { - // null or Boolean.TRUE means NULL - // Boolean.FALSE means NOT NULL - return !Boolean.FALSE.equals(isNull); - } + public boolean isNull() { + // null or Boolean.TRUE means NULL + // Boolean.FALSE means NOT NULL + return !Boolean.FALSE.equals(isNull); + } - public boolean isNullSet() { - return isNull != null; - } + public boolean isNullSet() { + return isNull != null; + } - public Integer getMaxLength() { - return maxLength; - } + public Integer getMaxLength() { + return maxLength; + } - public Integer getScale() { - return scale; - } + public Integer getScale() { + return scale; + } - public boolean isPK() { - return isPK; - } + public boolean isPK() { + return isPK; + } - public SortOrder getSortOrder() { - return sortOrder; - } + public SortOrder getSortOrder() { + return sortOrder; + } - public boolean isArray() { - return isArray; - } + public boolean isArray() { + return isArray; + } - public Integer getArraySize() { - return arrSize; - } + public Integer getArraySize() { + return arrSize; + } - public String getExpression() { - return expressionStr; - } + public String getExpression() { + return expressionStr; + } + + public Integer getEncodedQualifier() { + return encodedQualifier; + } + + public boolean isRowTimestamp() { + return isRowTimestamp; + } - public Integer getEncodedQualifier() { - return encodedQualifier; + public void setIsPK(boolean isPK) { + this.isPK = isPK; + } + + public String toFullString() { + if (!Strings.isNullOrEmpty(columnDefName.getFamilyName())) { + return columnDefName.getFamilyName() + "." + toString(); } + return toString(); + } - public boolean isRowTimestamp() { - return isRowTimestamp; + @Override + public String toString() { + StringBuilder buf = new StringBuilder(columnDefName.getColumnNode().toString()); + buf.append(' '); + buf.append(dataType.getSqlTypeName()); + if (maxLength != null) { + buf.append('('); + buf.append(maxLength); + if (scale != null) { + buf.append(','); + buf.append(scale); // has both max length and scale. For ex- decimal(10,2) + } + buf.append(')'); } - - public void setIsPK(boolean isPK) { - this.isPK = isPK; + if (isArray) { + buf.append(' '); + buf.append(PDataType.ARRAY_TYPE_SUFFIX); + buf.append(' '); } + return buf.toString(); + } - public String toFullString() { - if (!Strings.isNullOrEmpty(columnDefName.getFamilyName())) { - return columnDefName.getFamilyName() + "." + toString(); - } - return toString(); + public boolean validateDefault(StatementContext context, PrimaryKeyConstraint pkConstraint) + throws SQLException { + String defaultStr = this.getExpression(); + if (defaultStr == null) { + return true; } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder(columnDefName.getColumnNode().toString()); - buf.append(' '); - buf.append(dataType.getSqlTypeName()); - if (maxLength != null) { - buf.append('('); - buf.append(maxLength); - if (scale != null) { - buf.append(','); - buf.append(scale); // has both max length and scale. For ex- decimal(10,2) - } - buf.append(')'); - } - if (isArray) { - buf.append(' '); - buf.append(PDataType.ARRAY_TYPE_SUFFIX); - buf.append(' '); - } - return buf.toString(); + ExpressionCompiler compiler = new ExpressionCompiler(context); + ParseNode defaultParseNode = new SQLParser(this.getExpression()).parseExpression(); + Expression defaultExpression = defaultParseNode.accept(compiler); + if ( + !defaultParseNode.isStateless() || defaultExpression.getDeterminism() != Determinism.ALWAYS + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_DEFAULT) + .setColumnName(this.getColumnDefName().getColumnName()).build().buildException(); } - - public boolean validateDefault(StatementContext context, PrimaryKeyConstraint pkConstraint) throws SQLException { - String defaultStr = this.getExpression(); - if (defaultStr == null) { - return true; - } - ExpressionCompiler compiler = new ExpressionCompiler(context); - ParseNode defaultParseNode = - new SQLParser(this.getExpression()).parseExpression(); - Expression defaultExpression = defaultParseNode.accept(compiler); - if (!defaultParseNode.isStateless() - || defaultExpression.getDeterminism() != Determinism.ALWAYS) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_DEFAULT) - .setColumnName(this.getColumnDefName().getColumnName()).build() - .buildException(); - } - if (this.isRowTimestamp() || ( pkConstraint != null && pkConstraint.isColumnRowTimestamp(this.getColumnDefName()))) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_CREATE_DEFAULT_ROWTIMESTAMP) - .setColumnName(this.getColumnDefName().getColumnName()) - .build().buildException(); - } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - // Evaluate the expression to confirm it's validity - LiteralExpression defaultValue = ExpressionUtil.getConstantExpression(defaultExpression, ptr); - // A DEFAULT that evaluates to null should be ignored as it adds nothing - if (defaultValue.getValue() == null) { - return false; - } - PDataType sourceType = defaultExpression.getDataType(); - PDataType targetType = this.getDataType(); - // Ensure that coercion works (will throw if not) - context.getTempPtr().set(ptr.get(), ptr.getOffset(), ptr.getLength()); - try { - targetType.coerceBytes(context.getTempPtr(), defaultValue.getValue(), sourceType, - defaultValue.getMaxLength(), defaultValue.getScale(), - defaultValue.getSortOrder(), - this.getMaxLength(), this.getScale(), - this.getSortOrder()); - } catch (ConstraintViolationException e) { - if (e.getCause() instanceof SQLException) { - SQLException sqlE = (SQLException) e.getCause(); - throw new DelegateSQLException(sqlE, ". DEFAULT " + SQLExceptionInfo.COLUMN_NAME + "=" + this.getColumnDefName().getColumnName()); - } - throw e; - } - if (!targetType.isSizeCompatible(ptr, defaultValue.getValue(), sourceType, - sortOrder, defaultValue.getMaxLength(), - defaultValue.getScale(), this.getMaxLength(), this.getScale())) { - throw new DataExceedsCapacityException(this.getDataType(), this.getMaxLength(), - this.getScale(), this.getColumnDefName().getColumnName()); - } - return true; + if ( + this.isRowTimestamp() + || (pkConstraint != null && pkConstraint.isColumnRowTimestamp(this.getColumnDefName())) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_DEFAULT_ROWTIMESTAMP) + .setColumnName(this.getColumnDefName().getColumnName()).build().buildException(); + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + // Evaluate the expression to confirm it's validity + LiteralExpression defaultValue = ExpressionUtil.getConstantExpression(defaultExpression, ptr); + // A DEFAULT that evaluates to null should be ignored as it adds nothing + if (defaultValue.getValue() == null) { + return false; + } + PDataType sourceType = defaultExpression.getDataType(); + PDataType targetType = this.getDataType(); + // Ensure that coercion works (will throw if not) + context.getTempPtr().set(ptr.get(), ptr.getOffset(), ptr.getLength()); + try { + targetType.coerceBytes(context.getTempPtr(), defaultValue.getValue(), sourceType, + defaultValue.getMaxLength(), defaultValue.getScale(), defaultValue.getSortOrder(), + this.getMaxLength(), this.getScale(), this.getSortOrder()); + } catch (ConstraintViolationException e) { + if (e.getCause() instanceof SQLException) { + SQLException sqlE = (SQLException) e.getCause(); + throw new DelegateSQLException(sqlE, ". DEFAULT " + SQLExceptionInfo.COLUMN_NAME + "=" + + this.getColumnDefName().getColumnName()); + } + throw e; + } + if ( + !targetType.isSizeCompatible(ptr, defaultValue.getValue(), sourceType, sortOrder, + defaultValue.getMaxLength(), defaultValue.getScale(), this.getMaxLength(), this.getScale()) + ) { + throw new DataExceedsCapacityException(this.getDataType(), this.getMaxLength(), + this.getScale(), this.getColumnDefName().getColumnName()); } -} \ No newline at end of file + return true; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnDefInPkConstraint.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnDefInPkConstraint.java index 41d8868ee48..09928a8da33 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnDefInPkConstraint.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnDefInPkConstraint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,25 +20,26 @@ import org.apache.phoenix.schema.SortOrder; public class ColumnDefInPkConstraint { - private final ColumnName columnDefName; - private final SortOrder sortOrder; - private final boolean isRowTimestamp; - - public ColumnDefInPkConstraint(ColumnName columnDefName, SortOrder sortOrder, boolean isRowTimestamp) { - this.columnDefName = columnDefName; - this.sortOrder = sortOrder; - this.isRowTimestamp = isRowTimestamp; - } + private final ColumnName columnDefName; + private final SortOrder sortOrder; + private final boolean isRowTimestamp; - public ColumnName getColumnName() { - return columnDefName; - } + public ColumnDefInPkConstraint(ColumnName columnDefName, SortOrder sortOrder, + boolean isRowTimestamp) { + this.columnDefName = columnDefName; + this.sortOrder = sortOrder; + this.isRowTimestamp = isRowTimestamp; + } - public SortOrder getSortOrder() { - return sortOrder; - } + public ColumnName getColumnName() { + return columnDefName; + } - public boolean isRowTimestamp() { - return isRowTimestamp; - } + public SortOrder getSortOrder() { + return sortOrder; + } + + public boolean isRowTimestamp() { + return isRowTimestamp; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnFamilyDef.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnFamilyDef.java index b4578d06aec..c4e6cab8f39 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnFamilyDef.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnFamilyDef.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,37 +19,33 @@ import java.util.*; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.util.SchemaUtil; /** - * * Definition of a Column Family at DDL time - * - * * @since 0.1 */ public class ColumnFamilyDef { - private final String name; - private final List columnDefs; - private final Map props; - - ColumnFamilyDef(String name, List columnDefs, Map props) { - this.name = SchemaUtil.normalizeIdentifier(name); - this.columnDefs = ImmutableList.copyOf(columnDefs); - this.props = props == null ? Collections.emptyMap() : props; - } + private final String name; + private final List columnDefs; + private final Map props; + + ColumnFamilyDef(String name, List columnDefs, Map props) { + this.name = SchemaUtil.normalizeIdentifier(name); + this.columnDefs = ImmutableList.copyOf(columnDefs); + this.props = props == null ? Collections. emptyMap() : props; + } - public String getName() { - return name; - } + public String getName() { + return name; + } - public List getColumnDefs() { - return columnDefs; - } + public List getColumnDefs() { + return columnDefs; + } - public Map getProps() { - return props; - } + public Map getProps() { + return props; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnName.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnName.java index 5d62017fd21..84cb86c5d0a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnName.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnName.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,86 +19,85 @@ import org.apache.phoenix.util.SchemaUtil; - public class ColumnName { - private final NamedNode familyNode; - private final NamedNode columnNode; - - public static ColumnName caseSensitiveColumnName(String familyName, String columnName) { - return new ColumnName(NamedNode.caseSensitiveNamedNode(familyName), NamedNode.caseSensitiveNamedNode(columnName)); - } - - public static ColumnName caseSensitiveColumnName(String columnName) { - return new ColumnName(null, NamedNode.caseSensitiveNamedNode(columnName)); - } - - public static ColumnName newColumnName(NamedNode columnName) { - return new ColumnName(null, columnName); - } - - public static ColumnName newColumnName(NamedNode familyName, NamedNode columnName) { - return new ColumnName(familyName, columnName); - } - - public static ColumnName newColumnName(String familyName, String columnName) { - return new ColumnName(familyName, columnName); - } - - private ColumnName(NamedNode familyNode, NamedNode columnNode) { - this.familyNode = familyNode; - this.columnNode = columnNode; - } - - - ColumnName(String familyName, String columnName) { - this.familyNode = familyName == null ? null : new NamedNode(familyName); - this.columnNode = new NamedNode(columnName); - } - - ColumnName(String columnName) { - this(null, columnName); - } - - public String getFamilyName() { - return familyNode == null ? null : familyNode.getName(); - } - - public String getColumnName() { - return columnNode.getName(); - } - - public NamedNode getFamilyNode() { - return familyNode; - } - - public NamedNode getColumnNode() { - return columnNode; - } - - @Override - public String toString() { - return SchemaUtil.getColumnName(getFamilyName(),getColumnName()); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + columnNode.hashCode(); - result = prime * result + ((familyNode == null) ? 0 : familyNode.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ColumnName other = (ColumnName)obj; - if (!columnNode.equals(other.columnNode)) return false; - if (familyNode == null) { - if (other.familyNode != null) return false; - } else if (!familyNode.equals(other.familyNode)) return false; - return true; - } + private final NamedNode familyNode; + private final NamedNode columnNode; + + public static ColumnName caseSensitiveColumnName(String familyName, String columnName) { + return new ColumnName(NamedNode.caseSensitiveNamedNode(familyName), + NamedNode.caseSensitiveNamedNode(columnName)); + } + + public static ColumnName caseSensitiveColumnName(String columnName) { + return new ColumnName(null, NamedNode.caseSensitiveNamedNode(columnName)); + } + + public static ColumnName newColumnName(NamedNode columnName) { + return new ColumnName(null, columnName); + } + + public static ColumnName newColumnName(NamedNode familyName, NamedNode columnName) { + return new ColumnName(familyName, columnName); + } + + public static ColumnName newColumnName(String familyName, String columnName) { + return new ColumnName(familyName, columnName); + } + + private ColumnName(NamedNode familyNode, NamedNode columnNode) { + this.familyNode = familyNode; + this.columnNode = columnNode; + } + + ColumnName(String familyName, String columnName) { + this.familyNode = familyName == null ? null : new NamedNode(familyName); + this.columnNode = new NamedNode(columnName); + } + + ColumnName(String columnName) { + this(null, columnName); + } + + public String getFamilyName() { + return familyNode == null ? null : familyNode.getName(); + } + + public String getColumnName() { + return columnNode.getName(); + } + + public NamedNode getFamilyNode() { + return familyNode; + } + + public NamedNode getColumnNode() { + return columnNode; + } + + @Override + public String toString() { + return SchemaUtil.getColumnName(getFamilyName(), getColumnName()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + columnNode.hashCode(); + result = prime * result + ((familyNode == null) ? 0 : familyNode.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ColumnName other = (ColumnName) obj; + if (!columnNode.equals(other.columnNode)) return false; + if (familyNode == null) { + if (other.familyNode != null) return false; + } else if (!familyNode.equals(other.familyNode)) return false; + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java index 80c5d0f8818..16eb73c2f8f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ColumnParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,103 +28,107 @@ /** * Node representing a reference to a column in a SQL expression - * - * * @since 0.1 */ public class ColumnParseNode extends NamedParseNode { - // table name can also represent a column family - private final TableName tableName; - private final String fullName; - private final String alias; + // table name can also represent a column family + private final TableName tableName; + private final String fullName; + private final String alias; - public ColumnParseNode(TableName tableName, String name, String alias) { - // Upper case here so our Maps can depend on this (and we don't have to upper case and create a string on every - // lookup - super(name); - this.alias = alias; - this.tableName = tableName; - fullName = tableName == null ? getName() : tableName.toString() + QueryConstants.NAME_SEPARATOR + getName(); - } + public ColumnParseNode(TableName tableName, String name, String alias) { + // Upper case here so our Maps can depend on this (and we don't have to upper case and create a + // string on every + // lookup + super(name); + this.alias = alias; + this.tableName = tableName; + fullName = tableName == null + ? getName() + : tableName.toString() + QueryConstants.NAME_SEPARATOR + getName(); + } - public ColumnParseNode(TableName tableName, String name) { - this(tableName, name, null); - } - - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + public ColumnParseNode(TableName tableName, String name) { + this(tableName, name, null); + } - public String getTableName() { - return tableName == null ? null : tableName.getTableName(); - } + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } - public String getSchemaName() { - return tableName == null ? null : tableName.getSchemaName(); - } + public String getTableName() { + return tableName == null ? null : tableName.getTableName(); + } - public String getFullName() { - return fullName; - } + public String getSchemaName() { + return tableName == null ? null : tableName.getSchemaName(); + } - @Override - public String getAlias() { - return alias; - } + public String getFullName() { + return fullName; + } - @Override - public int hashCode() { - return fullName.hashCode(); - } + @Override + public String getAlias() { + return alias; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ColumnParseNode other = (ColumnParseNode)obj; - return fullName.equals(other.fullName); - } - - public boolean isTableNameCaseSensitive() { - return tableName == null ? false : tableName.isTableNameCaseSensitive(); - } + @Override + public int hashCode() { + return fullName.hashCode(); + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - // If resolver is not null, then resolve to get fully qualified name - String tableName = null; - if (resolver == null) { - if (this.tableName != null) { - tableName = this.tableName.getTableName(); - } - } else { - try { - ColumnRef ref = resolver.resolveColumn(this.getSchemaName(), this.getTableName(), this.getName()); - PColumn column = ref.getColumn(); - if (!SchemaUtil.isPKColumn(column)) { - PTable table = ref.getTable(); - String defaultFamilyName = table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : table.getDefaultFamilyName().getString(); - // Translate to the data table column name - String dataFamilyName = column.getFamilyName().getString() ; - tableName = defaultFamilyName.equals(dataFamilyName) ? null : dataFamilyName; - } - - } catch (SQLException e) { - throw new RuntimeException(e); // Already resolved, so not possible - } - } - if (tableName != null) { - if (isTableNameCaseSensitive()) { - buf.append('"'); - buf.append(tableName); - buf.append('"'); - } else { - buf.append(tableName); - } - buf.append('.'); + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ColumnParseNode other = (ColumnParseNode) obj; + return fullName.equals(other.fullName); + } + + public boolean isTableNameCaseSensitive() { + return tableName == null ? false : tableName.isTableNameCaseSensitive(); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + // If resolver is not null, then resolve to get fully qualified name + String tableName = null; + if (resolver == null) { + if (this.tableName != null) { + tableName = this.tableName.getTableName(); + } + } else { + try { + ColumnRef ref = + resolver.resolveColumn(this.getSchemaName(), this.getTableName(), this.getName()); + PColumn column = ref.getColumn(); + if (!SchemaUtil.isPKColumn(column)) { + PTable table = ref.getTable(); + String defaultFamilyName = table.getDefaultFamilyName() == null + ? QueryConstants.DEFAULT_COLUMN_FAMILY + : table.getDefaultFamilyName().getString(); + // Translate to the data table column name + String dataFamilyName = column.getFamilyName().getString(); + tableName = defaultFamilyName.equals(dataFamilyName) ? null : dataFamilyName; } - toSQL(buf); + + } catch (SQLException e) { + throw new RuntimeException(e); // Already resolved, so not possible + } + } + if (tableName != null) { + if (isTableNameCaseSensitive()) { + buf.append('"'); + buf.append(tableName); + buf.append('"'); + } else { + buf.append(tableName); + } + buf.append('.'); } + toSQL(buf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java index ba2b20a01da..272e0c39204 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ComparisonParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,42 +26,39 @@ import org.apache.phoenix.util.QueryUtil; /** - * * Common base class {@code =, >, >=, <, <=, != } - * - * * @since 0.1 */ public abstract class ComparisonParseNode extends BinaryParseNode { - public ComparisonParseNode(ParseNode lhs, ParseNode rhs) { - super(lhs, rhs); - } + public ComparisonParseNode(ParseNode lhs, ParseNode rhs) { + super(lhs, rhs); + } - @Override - public final T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public final T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } - /** - * Return the comparison operator associated with the given comparison expression node - */ - public abstract CompareOperator getFilterOp(); - - /** - * Return the inverted operator for the CompareOp - */ - public abstract CompareOperator getInvertFilterOp(); - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - buf.append(" " + QueryUtil.toSQL(getFilterOp()) + " "); - children.get(1).toSQL(resolver, buf); - } + /** + * Return the comparison operator associated with the given comparison expression node + */ + public abstract CompareOperator getFilterOp(); + + /** + * Return the inverted operator for the CompareOp + */ + public abstract CompareOperator getInvertFilterOp(); + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + buf.append(" " + QueryUtil.toSQL(getFilterOp()) + " "); + children.get(1).toSQL(resolver, buf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java index fd5d73ebf9d..8ad62c04dcf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CompoundParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,78 +21,65 @@ import java.util.Collections; import java.util.List; - - /** - * * Abstract node representing an expression node that has children - * - * * @since 0.1 */ public abstract class CompoundParseNode extends ParseNode { - private final List children; - private final boolean isStateless; - - CompoundParseNode(List children) { - this.children = Collections.unmodifiableList(children); - boolean isStateless = true; - for (ParseNode child : children) { - isStateless &= child.isStateless(); - if (!isStateless) { - break; - } - } - this.isStateless = isStateless; - } - - @Override - public boolean isStateless() { - return isStateless; - } - - @Override - public final List getChildren() { - return children; + private final List children; + private final boolean isStateless; + + CompoundParseNode(List children) { + this.children = Collections.unmodifiableList(children); + boolean isStateless = true; + for (ParseNode child : children) { + isStateless &= child.isStateless(); + if (!isStateless) { + break; + } } + this.isStateless = isStateless; + } + + @Override + public boolean isStateless() { + return isStateless; + } + @Override + public final List getChildren() { + return children; + } - final List acceptChildren(ParseNodeVisitor visitor) throws SQLException { - List l = visitor.newElementList(children.size()); - for (int i = 0; i < children.size(); i++) { - T e = children.get(i).accept(visitor); - visitor.addElement(l, e); - } - return l; + final List acceptChildren(ParseNodeVisitor visitor) throws SQLException { + List l = visitor.newElementList(children.size()); + for (int i = 0; i < children.size(); i++) { + T e = children.get(i).accept(visitor); + visitor.addElement(l, e); } + return l; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result - + ((children == null) ? 0 : children.hashCode()); - result = prime * result + (isStateless ? 1231 : 1237); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((children == null) ? 0 : children.hashCode()); + result = prime * result + (isStateless ? 1231 : 1237); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - CompoundParseNode other = (CompoundParseNode) obj; - if (children == null) { - if (other.children != null) - return false; - } else if (!children.equals(other.children)) - return false; - if (isStateless != other.isStateless) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + CompoundParseNode other = (CompoundParseNode) obj; + if (children == null) { + if (other.children != null) return false; + } else if (!children.equals(other.children)) return false; + if (isStateless != other.isStateless) return false; + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ConcreteTableNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ConcreteTableNode.java index c9fd51b16f5..92776ffacb2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ConcreteTableNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ConcreteTableNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,64 +20,60 @@ import org.apache.phoenix.util.SchemaUtil; /** - * * Abstract node representing a table reference in the FROM clause in SQL - * - * * @since 0.1 */ public abstract class ConcreteTableNode extends TableNode { - //DEFAULT_TABLE_SAMPLING_RATE alternative is to set as 100d - public static final Double DEFAULT_TABLE_SAMPLING_RATE=null; - private final TableName name; - private final Double tableSamplingRate; - - ConcreteTableNode(String alias, TableName name) { - this(alias,name,DEFAULT_TABLE_SAMPLING_RATE); - } - - ConcreteTableNode(String alias, TableName name, Double tableSamplingRate) { - super(SchemaUtil.normalizeIdentifier(alias)); - this.name = name; - if(tableSamplingRate==null){ - this.tableSamplingRate=DEFAULT_TABLE_SAMPLING_RATE; - }else if(tableSamplingRate<0d||tableSamplingRate>100d){ - throw new IllegalArgumentException("TableSamplingRate is out of bound of 0 and 100"); - }else{ - this.tableSamplingRate=tableSamplingRate; - } - } + // DEFAULT_TABLE_SAMPLING_RATE alternative is to set as 100d + public static final Double DEFAULT_TABLE_SAMPLING_RATE = null; + private final TableName name; + private final Double tableSamplingRate; - public TableName getName() { - return name; - } - - public Double getTableSamplingRate(){ - return tableSamplingRate; - } + ConcreteTableNode(String alias, TableName name) { + this(alias, name, DEFAULT_TABLE_SAMPLING_RATE); + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((name == null) ? 0 : name.hashCode()); - result = prime * result + ((tableSamplingRate == null) ? 0 : tableSamplingRate.hashCode()); - return result; + ConcreteTableNode(String alias, TableName name, Double tableSamplingRate) { + super(SchemaUtil.normalizeIdentifier(alias)); + this.name = name; + if (tableSamplingRate == null) { + this.tableSamplingRate = DEFAULT_TABLE_SAMPLING_RATE; + } else if (tableSamplingRate < 0d || tableSamplingRate > 100d) { + throw new IllegalArgumentException("TableSamplingRate is out of bound of 0 and 100"); + } else { + this.tableSamplingRate = tableSamplingRate; } + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ConcreteTableNode other = (ConcreteTableNode)obj; - if (name == null) { - if (other.name != null) return false; - } else if (!name.equals(other.name)) return false; - if (tableSamplingRate == null) { - if (other.tableSamplingRate != null) return false; - } else if (!tableSamplingRate.equals(other.tableSamplingRate)) return false; - return true; - } -} + public TableName getName() { + return name; + } + + public Double getTableSamplingRate() { + return tableSamplingRate; + } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((name == null) ? 0 : name.hashCode()); + result = prime * result + ((tableSamplingRate == null) ? 0 : tableSamplingRate.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ConcreteTableNode other = (ConcreteTableNode) obj; + if (name == null) { + if (other.name != null) return false; + } else if (!name.equals(other.name)) return false; + if (tableSamplingRate == null) { + if (other.tableSamplingRate != null) return false; + } else if (!tableSamplingRate.equals(other.tableSamplingRate)) return false; + return true; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateCDCStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateCDCStatement.java index 5722ab2a200..3ac0783ab5a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateCDCStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateCDCStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,52 +20,51 @@ import java.util.Set; import org.apache.hadoop.hbase.util.Pair; - import org.apache.phoenix.schema.PTable; import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; public class CreateCDCStatement extends MutableStatement { - private final NamedNode cdcObjName; - private final TableName dataTable; - private final Set includeScopes; - private final ListMultimap> props; - private final boolean ifNotExists; - private final int bindCount; + private final NamedNode cdcObjName; + private final TableName dataTable; + private final Set includeScopes; + private final ListMultimap> props; + private final boolean ifNotExists; + private final int bindCount; - public CreateCDCStatement(NamedNode cdcObjName, TableName dataTable, - Set includeScopes, ListMultimap> props, boolean ifNotExists, int bindCount) { - this.cdcObjName = cdcObjName; - this.dataTable = dataTable; - this.includeScopes = includeScopes; - this.props = props == null ? ArrayListMultimap.>create() : props; - this.ifNotExists = ifNotExists; - this.bindCount = bindCount; - } + public CreateCDCStatement(NamedNode cdcObjName, TableName dataTable, + Set includeScopes, ListMultimap> props, + boolean ifNotExists, int bindCount) { + this.cdcObjName = cdcObjName; + this.dataTable = dataTable; + this.includeScopes = includeScopes; + this.props = props == null ? ArrayListMultimap.> create() : props; + this.ifNotExists = ifNotExists; + this.bindCount = bindCount; + } - public NamedNode getCdcObjName() { - return cdcObjName; - } + public NamedNode getCdcObjName() { + return cdcObjName; + } - public TableName getDataTable() { - return dataTable; - } + public TableName getDataTable() { + return dataTable; + } - public Set getIncludeScopes() { - return includeScopes; - } + public Set getIncludeScopes() { + return includeScopes; + } - public ListMultimap> getProps() { - return props; - } + public ListMultimap> getProps() { + return props; + } - public boolean isIfNotExists() { - return ifNotExists; - } + public boolean isIfNotExists() { + return ifNotExists; + } - @Override - public int getBindCount() { - return bindCount; - } + @Override + public int getBindCount() { + return bindCount; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateFunctionStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateFunctionStatement.java index 863783bd67f..f9204b57eb4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateFunctionStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateFunctionStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,30 +18,30 @@ package org.apache.phoenix.parse; public class CreateFunctionStatement extends MutableStatement { - private final PFunction functionInfo; - private final boolean temporary; - private final boolean isReplace; + private final PFunction functionInfo; + private final boolean temporary; + private final boolean isReplace; - public CreateFunctionStatement(PFunction functionInfo, boolean temporary, boolean isReplace) { - this.functionInfo = functionInfo; - this.temporary = temporary; - this.isReplace = isReplace; - } + public CreateFunctionStatement(PFunction functionInfo, boolean temporary, boolean isReplace) { + this.functionInfo = functionInfo; + this.temporary = temporary; + this.isReplace = isReplace; + } - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - public PFunction getFunctionInfo() { - return functionInfo; - } - - public boolean isTemporary() { - return temporary; - } + public PFunction getFunctionInfo() { + return functionInfo; + } - public boolean isReplace() { - return isReplace; - } + public boolean isTemporary() { + return temporary; + } + + public boolean isReplace() { + return isReplace; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java index de15ac88ea6..1a4afaf6bff 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateIndexStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,92 +23,93 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.schema.PTable.IndexType; - import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; - public class CreateIndexStatement extends SingleTableStatement { - private final TableName indexTableName; - private final IndexKeyConstraint indexKeyConstraint; - private final List includeColumns; - private final List splitNodes; - private final ListMultimap> props; - private final boolean ifNotExists; - private final IndexType indexType; - private final boolean async; - private final Map udfParseNodes; - private final ParseNode where; - - public CreateIndexStatement(NamedNode indexTableName, NamedTableNode dataTable, - IndexKeyConstraint indexKeyConstraint, List includeColumns, - List splits, ListMultimap> props, - boolean ifNotExists, IndexType indexType, boolean async, int bindCount, - Map udfParseNodes, ParseNode where) { - super(dataTable, bindCount); - this.indexTableName =TableName.create(dataTable.getName().getSchemaName(),indexTableName.getName()); - this.indexKeyConstraint = indexKeyConstraint == null ? IndexKeyConstraint.EMPTY : indexKeyConstraint; - this.includeColumns = includeColumns == null ? Collections.emptyList() : includeColumns; - this.splitNodes = splits == null ? Collections.emptyList() : splits; - this.props = props == null ? ArrayListMultimap.>create() : props; - this.ifNotExists = ifNotExists; - this.indexType = indexType; - this.async = async; - this.udfParseNodes = udfParseNodes; - this.where = where; - } - - public CreateIndexStatement(CreateIndexStatement createStmt, ListMultimap> finalProps) { - super(createStmt.getTable(), createStmt.getBindCount()); - this.indexTableName = createStmt.getIndexTableName(); - this.indexKeyConstraint = createStmt.getIndexConstraint(); - this.includeColumns = createStmt.getIncludeColumns(); - this.splitNodes = createStmt.getSplitNodes(); - this.props = finalProps; - this.ifNotExists = createStmt.ifNotExists(); - this.indexType = createStmt.getIndexType(); - this.async = createStmt.isAsync(); - this.udfParseNodes = createStmt.getUdfParseNodes(); - this.where = createStmt.where; - } - - public IndexKeyConstraint getIndexConstraint() { - return indexKeyConstraint; - } - - public List getIncludeColumns() { - return includeColumns; - } - - public TableName getIndexTableName() { - return indexTableName; - } - - public List getSplitNodes() { - return splitNodes; - } - - public ListMultimap> getProps() { - return props; - } - - public boolean ifNotExists() { - return ifNotExists; - } - - - public IndexType getIndexType() { - return indexType; - } - - public boolean isAsync() { - return async; - } - - public Map getUdfParseNodes() { - return udfParseNodes; - } - public ParseNode getWhere() { - return where; - } + private final TableName indexTableName; + private final IndexKeyConstraint indexKeyConstraint; + private final List includeColumns; + private final List splitNodes; + private final ListMultimap> props; + private final boolean ifNotExists; + private final IndexType indexType; + private final boolean async; + private final Map udfParseNodes; + private final ParseNode where; + + public CreateIndexStatement(NamedNode indexTableName, NamedTableNode dataTable, + IndexKeyConstraint indexKeyConstraint, List includeColumns, List splits, + ListMultimap> props, boolean ifNotExists, IndexType indexType, + boolean async, int bindCount, Map udfParseNodes, ParseNode where) { + super(dataTable, bindCount); + this.indexTableName = + TableName.create(dataTable.getName().getSchemaName(), indexTableName.getName()); + this.indexKeyConstraint = + indexKeyConstraint == null ? IndexKeyConstraint.EMPTY : indexKeyConstraint; + this.includeColumns = + includeColumns == null ? Collections. emptyList() : includeColumns; + this.splitNodes = splits == null ? Collections. emptyList() : splits; + this.props = props == null ? ArrayListMultimap.> create() : props; + this.ifNotExists = ifNotExists; + this.indexType = indexType; + this.async = async; + this.udfParseNodes = udfParseNodes; + this.where = where; + } + + public CreateIndexStatement(CreateIndexStatement createStmt, + ListMultimap> finalProps) { + super(createStmt.getTable(), createStmt.getBindCount()); + this.indexTableName = createStmt.getIndexTableName(); + this.indexKeyConstraint = createStmt.getIndexConstraint(); + this.includeColumns = createStmt.getIncludeColumns(); + this.splitNodes = createStmt.getSplitNodes(); + this.props = finalProps; + this.ifNotExists = createStmt.ifNotExists(); + this.indexType = createStmt.getIndexType(); + this.async = createStmt.isAsync(); + this.udfParseNodes = createStmt.getUdfParseNodes(); + this.where = createStmt.where; + } + + public IndexKeyConstraint getIndexConstraint() { + return indexKeyConstraint; + } + + public List getIncludeColumns() { + return includeColumns; + } + + public TableName getIndexTableName() { + return indexTableName; + } + + public List getSplitNodes() { + return splitNodes; + } + + public ListMultimap> getProps() { + return props; + } + + public boolean ifNotExists() { + return ifNotExists; + } + + public IndexType getIndexType() { + return indexType; + } + + public boolean isAsync() { + return async; + } + + public Map getUdfParseNodes() { + return udfParseNodes; + } + + public ParseNode getWhere() { + return where; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateSchemaStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateSchemaStatement.java index f5ab3f6bc3a..3f096ffe735 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateSchemaStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateSchemaStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,28 +17,26 @@ */ package org.apache.phoenix.parse; -import org.apache.phoenix.util.SchemaUtil; - public class CreateSchemaStatement extends MutableStatement { - private final String schemaName; - private final boolean ifNotExists; - - public CreateSchemaStatement(String schemaName,boolean ifNotExists) { - this.schemaName = schemaName; - this.ifNotExists = ifNotExists; - } - - @Override - public int getBindCount() { - return 0; - } + private final String schemaName; + private final boolean ifNotExists; + + public CreateSchemaStatement(String schemaName, boolean ifNotExists) { + this.schemaName = schemaName; + this.ifNotExists = ifNotExists; + } + + @Override + public int getBindCount() { + return 0; + } - public String getSchemaName() { - return schemaName; - } + public String getSchemaName() { + return schemaName; + } - public boolean isIfNotExists() { - return ifNotExists; - } + public boolean isIfNotExists() { + return ifNotExists; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateSequenceStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateSequenceStatement.java index 2e0c943629b..8853de10fce 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateSequenceStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateSequenceStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,73 +19,72 @@ public class CreateSequenceStatement extends MutableStatement { - public static CreateSequenceStatement create(TableName sequenceName) { - return new CreateSequenceStatement(sequenceName, null, null, null, null, null, false, true, - 0); - } - - private final TableName sequenceName; - private final ParseNode startWith; - private final ParseNode incrementBy; - private final ParseNode cacheSize; - private final ParseNode minValue; - private final ParseNode maxValue; - private final boolean cycle; - private final boolean ifNotExists; - private final int bindCount; + public static CreateSequenceStatement create(TableName sequenceName) { + return new CreateSequenceStatement(sequenceName, null, null, null, null, null, false, true, 0); + } - protected CreateSequenceStatement(TableName sequenceName, ParseNode startWith, - ParseNode incrementBy, ParseNode cacheSize, ParseNode minValue, ParseNode maxValue, - boolean cycle, boolean ifNotExists, int bindCount) { - this.sequenceName = sequenceName; - // if MINVALUE, MAXVALUE and START WITH are not specified, set START WITH to 1 in order to - // maintain backward compatibility - this.startWith = - (minValue == null && maxValue == null && startWith == null) ? LiteralParseNode.ONE - : startWith; - this.minValue = minValue == null ? new LiteralParseNode(Long.MIN_VALUE) : minValue; - this.maxValue = maxValue == null ? new LiteralParseNode(Long.MAX_VALUE) : maxValue; - this.incrementBy = incrementBy == null ? LiteralParseNode.ONE : incrementBy; - this.cacheSize = cacheSize; - this.cycle = cycle; - this.ifNotExists = ifNotExists; - this.bindCount = bindCount; - } + private final TableName sequenceName; + private final ParseNode startWith; + private final ParseNode incrementBy; + private final ParseNode cacheSize; + private final ParseNode minValue; + private final ParseNode maxValue; + private final boolean cycle; + private final boolean ifNotExists; + private final int bindCount; - @Override - public int getBindCount() { - return this.bindCount; - } - - public ParseNode getIncrementBy() { - return incrementBy; - } + protected CreateSequenceStatement(TableName sequenceName, ParseNode startWith, + ParseNode incrementBy, ParseNode cacheSize, ParseNode minValue, ParseNode maxValue, + boolean cycle, boolean ifNotExists, int bindCount) { + this.sequenceName = sequenceName; + // if MINVALUE, MAXVALUE and START WITH are not specified, set START WITH to 1 in order to + // maintain backward compatibility + this.startWith = (minValue == null && maxValue == null && startWith == null) + ? LiteralParseNode.ONE + : startWith; + this.minValue = minValue == null ? new LiteralParseNode(Long.MIN_VALUE) : minValue; + this.maxValue = maxValue == null ? new LiteralParseNode(Long.MAX_VALUE) : maxValue; + this.incrementBy = incrementBy == null ? LiteralParseNode.ONE : incrementBy; + this.cacheSize = cacheSize; + this.cycle = cycle; + this.ifNotExists = ifNotExists; + this.bindCount = bindCount; + } - public TableName getSequenceName() { - return sequenceName; - } + @Override + public int getBindCount() { + return this.bindCount; + } - public ParseNode getCacheSize() { - return cacheSize; - } + public ParseNode getIncrementBy() { + return incrementBy; + } - public ParseNode getMinValue() { - return minValue; - } + public TableName getSequenceName() { + return sequenceName; + } - public ParseNode getMaxValue() { - return maxValue; - } + public ParseNode getCacheSize() { + return cacheSize; + } - public boolean getCycle() { - return cycle; - } + public ParseNode getMinValue() { + return minValue; + } - public ParseNode getStartWith() { - return startWith; - } + public ParseNode getMaxValue() { + return maxValue; + } - public boolean ifNotExists() { - return ifNotExists; - } -} \ No newline at end of file + public boolean getCycle() { + return cycle; + } + + public ParseNode getStartWith() { + return startWith; + } + + public boolean ifNotExists() { + return ifNotExists; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateTableStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateTableStatement.java index 37376c985eb..7f2649c1e36 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateTableStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CreateTableStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,145 +24,151 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.schema.PTableType; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; public class CreateTableStatement extends MutableStatement { - private final TableName tableName; - private final PTableType tableType; - private final List columns; - private final PrimaryKeyConstraint pkConstraint; - private final List splitNodes; - private final int bindCount; - private final ListMultimap> props; - private final boolean ifNotExists; - private final TableName baseTableName; - private final ParseNode whereClause; - // TODO change this to boolean at the next major release and remove TableProperty.IMMUTABLE_ROWS and QueryServiceOptions.IMMUTABLE_ROWS_ATTRIB - private final Boolean immutableRows; - private final Map familyCQCounters; - private final boolean noVerify; - - public CreateTableStatement(CreateTableStatement createTable, List columns) { - this.tableName = createTable.tableName; - this.tableType = createTable.tableType; - this.columns = ImmutableList.copyOf(columns); - this.pkConstraint = createTable.pkConstraint; - this.splitNodes = createTable.splitNodes; - this.bindCount = createTable.bindCount; - this.props = createTable.props; - this.ifNotExists = createTable.ifNotExists; - this.baseTableName = createTable.baseTableName; - this.whereClause = createTable.whereClause; - this.immutableRows = createTable.immutableRows; - this.familyCQCounters = createTable.familyCQCounters; - this.noVerify = createTable.noVerify; - } - - public CreateTableStatement(CreateTableStatement createTable, PrimaryKeyConstraint pkConstraint, - List columns) { - this.tableName = createTable.tableName; - this.tableType = createTable.tableType; - this.columns = ImmutableList.copyOf(columns); - this.pkConstraint = pkConstraint; - this.splitNodes = createTable.splitNodes; - this.bindCount = createTable.bindCount; - this.props = createTable.props; - this.ifNotExists = createTable.ifNotExists; - this.baseTableName = createTable.baseTableName; - this.whereClause = createTable.whereClause; - this.immutableRows = createTable.immutableRows; - this.familyCQCounters = createTable.familyCQCounters; - this.noVerify = createTable.noVerify; - } - - public CreateTableStatement(CreateTableStatement createTable, ListMultimap> props, List columns) { - this.tableName = createTable.tableName; - this.tableType = createTable.tableType; - this.columns = ImmutableList.copyOf(columns); - this.pkConstraint = createTable.pkConstraint; - this.splitNodes = createTable.splitNodes; - this.bindCount = createTable.bindCount; - this.props = props; - this.ifNotExists = createTable.ifNotExists; - this.baseTableName = createTable.baseTableName; - this.whereClause = createTable.whereClause; - this.immutableRows = createTable.immutableRows; - this.familyCQCounters = createTable.familyCQCounters; - this.noVerify = createTable.noVerify; - } - - protected CreateTableStatement(TableName tableName, ListMultimap> props, List columns, PrimaryKeyConstraint pkConstraint, - List splitNodes, PTableType tableType, boolean ifNotExists, - TableName baseTableName, ParseNode whereClause, int bindCount, Boolean immutableRows, - Map familyCounters, boolean noVerify) { - this.tableName = tableName; - this.props = props == null ? ImmutableListMultimap.>of() : props; - this.tableType = PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(tableName.getSchemaName()) ? PTableType.SYSTEM : tableType; - this.columns = columns == null ? ImmutableList.of() : ImmutableList.copyOf(columns); - this.pkConstraint = pkConstraint == null ? PrimaryKeyConstraint.EMPTY : pkConstraint; - this.splitNodes = splitNodes == null ? Collections.emptyList() : ImmutableList.copyOf(splitNodes); - this.bindCount = bindCount; - this.ifNotExists = ifNotExists; - this.baseTableName = baseTableName; - this.whereClause = whereClause; - this.immutableRows = immutableRows; - this.familyCQCounters = familyCounters; - this.noVerify = noVerify; - } - - public ParseNode getWhereClause() { - return whereClause; - } - - @Override - public int getBindCount() { - return bindCount; - } - - public TableName getTableName() { - return tableName; - } - - public TableName getBaseTableName() { - return baseTableName; - } - - public List getColumnDefs() { - return columns; - } - - public List getSplitNodes() { - return splitNodes; - } - - public PTableType getTableType() { - return tableType; - } - - public ListMultimap> getProps() { - return props; - } - - public boolean ifNotExists() { - return ifNotExists; - } - - public PrimaryKeyConstraint getPrimaryKeyConstraint() { - return pkConstraint; - } - - public Boolean immutableRows() { - return immutableRows; - } - - public Map getFamilyCQCounters() { - return familyCQCounters; - } - - public boolean isNoVerify() { - return noVerify; - } + private final TableName tableName; + private final PTableType tableType; + private final List columns; + private final PrimaryKeyConstraint pkConstraint; + private final List splitNodes; + private final int bindCount; + private final ListMultimap> props; + private final boolean ifNotExists; + private final TableName baseTableName; + private final ParseNode whereClause; + // TODO change this to boolean at the next major release and remove TableProperty.IMMUTABLE_ROWS + // and QueryServiceOptions.IMMUTABLE_ROWS_ATTRIB + private final Boolean immutableRows; + private final Map familyCQCounters; + private final boolean noVerify; + + public CreateTableStatement(CreateTableStatement createTable, List columns) { + this.tableName = createTable.tableName; + this.tableType = createTable.tableType; + this.columns = ImmutableList.copyOf(columns); + this.pkConstraint = createTable.pkConstraint; + this.splitNodes = createTable.splitNodes; + this.bindCount = createTable.bindCount; + this.props = createTable.props; + this.ifNotExists = createTable.ifNotExists; + this.baseTableName = createTable.baseTableName; + this.whereClause = createTable.whereClause; + this.immutableRows = createTable.immutableRows; + this.familyCQCounters = createTable.familyCQCounters; + this.noVerify = createTable.noVerify; + } + + public CreateTableStatement(CreateTableStatement createTable, PrimaryKeyConstraint pkConstraint, + List columns) { + this.tableName = createTable.tableName; + this.tableType = createTable.tableType; + this.columns = ImmutableList.copyOf(columns); + this.pkConstraint = pkConstraint; + this.splitNodes = createTable.splitNodes; + this.bindCount = createTable.bindCount; + this.props = createTable.props; + this.ifNotExists = createTable.ifNotExists; + this.baseTableName = createTable.baseTableName; + this.whereClause = createTable.whereClause; + this.immutableRows = createTable.immutableRows; + this.familyCQCounters = createTable.familyCQCounters; + this.noVerify = createTable.noVerify; + } + + public CreateTableStatement(CreateTableStatement createTable, + ListMultimap> props, List columns) { + this.tableName = createTable.tableName; + this.tableType = createTable.tableType; + this.columns = ImmutableList.copyOf(columns); + this.pkConstraint = createTable.pkConstraint; + this.splitNodes = createTable.splitNodes; + this.bindCount = createTable.bindCount; + this.props = props; + this.ifNotExists = createTable.ifNotExists; + this.baseTableName = createTable.baseTableName; + this.whereClause = createTable.whereClause; + this.immutableRows = createTable.immutableRows; + this.familyCQCounters = createTable.familyCQCounters; + this.noVerify = createTable.noVerify; + } + + protected CreateTableStatement(TableName tableName, + ListMultimap> props, List columns, + PrimaryKeyConstraint pkConstraint, List splitNodes, PTableType tableType, + boolean ifNotExists, TableName baseTableName, ParseNode whereClause, int bindCount, + Boolean immutableRows, Map familyCounters, boolean noVerify) { + this.tableName = tableName; + this.props = props == null ? ImmutableListMultimap.> of() : props; + this.tableType = PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(tableName.getSchemaName()) + ? PTableType.SYSTEM + : tableType; + this.columns = + columns == null ? ImmutableList. of() : ImmutableList. copyOf(columns); + this.pkConstraint = pkConstraint == null ? PrimaryKeyConstraint.EMPTY : pkConstraint; + this.splitNodes = + splitNodes == null ? Collections. emptyList() : ImmutableList.copyOf(splitNodes); + this.bindCount = bindCount; + this.ifNotExists = ifNotExists; + this.baseTableName = baseTableName; + this.whereClause = whereClause; + this.immutableRows = immutableRows; + this.familyCQCounters = familyCounters; + this.noVerify = noVerify; + } + + public ParseNode getWhereClause() { + return whereClause; + } + + @Override + public int getBindCount() { + return bindCount; + } + + public TableName getTableName() { + return tableName; + } + + public TableName getBaseTableName() { + return baseTableName; + } + + public List getColumnDefs() { + return columns; + } + + public List getSplitNodes() { + return splitNodes; + } + + public PTableType getTableType() { + return tableType; + } + + public ListMultimap> getProps() { + return props; + } + + public boolean ifNotExists() { + return ifNotExists; + } + + public PrimaryKeyConstraint getPrimaryKeyConstraint() { + return pkConstraint; + } + + public Boolean immutableRows() { + return immutableRows; + } + + public Map getFamilyCQCounters() { + return familyCQCounters; + } + + public boolean isNoVerify() { + return noVerify; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CurrentDateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CurrentDateParseNode.java index 05e66b66beb..bea6c339ea2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CurrentDateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CurrentDateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,15 +25,15 @@ import org.apache.phoenix.expression.function.CurrentDateFunction; import org.apache.phoenix.expression.function.FunctionExpression; - public class CurrentDateParseNode extends FunctionParseNode { - public CurrentDateParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public CurrentDateParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new CurrentDateFunction(context.getCurrentTimeWithDisplacement()); - } + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new CurrentDateFunction(context.getCurrentTimeWithDisplacement()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CurrentTimeParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CurrentTimeParseNode.java index 680b7efebbc..16d3446557e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CurrentTimeParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CurrentTimeParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,15 +25,15 @@ import org.apache.phoenix.expression.function.CurrentTimeFunction; import org.apache.phoenix.expression.function.FunctionExpression; - public class CurrentTimeParseNode extends FunctionParseNode { - public CurrentTimeParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public CurrentTimeParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new CurrentTimeFunction(context.getCurrentTimeWithDisplacement()); - } + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new CurrentTimeFunction(context.getCurrentTimeWithDisplacement()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CursorName.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CursorName.java index 737573cfaee..755be1d6fce 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CursorName.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/CursorName.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,30 +15,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; import org.apache.phoenix.util.SchemaUtil; public class CursorName { - private final String name; - private final boolean isCaseSensitive; + private final String name; + private final boolean isCaseSensitive; - public CursorName(String name, boolean isCaseSensitive){ - this.name = name; - this.isCaseSensitive = isCaseSensitive; - } + public CursorName(String name, boolean isCaseSensitive) { + this.name = name; + this.isCaseSensitive = isCaseSensitive; + } - public CursorName(String name){ - this.name = name; - this.isCaseSensitive = name == null ? false: SchemaUtil.isCaseSensitive(name); - } + public CursorName(String name) { + this.name = name; + this.isCaseSensitive = name == null ? false : SchemaUtil.isCaseSensitive(name); + } - public String getName() { - return name; - } + public String getName() { + return name; + } - public boolean isCaseSensitive() { - return isCaseSensitive; - } + public boolean isCaseSensitive() { + return isCaseSensitive; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DMLStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DMLStatement.java index 3b9bd97e1a9..6717bd20aea 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DMLStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DMLStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,14 +21,15 @@ public class DMLStatement extends SingleTableStatement { - private final Map udfParseNodes; - - public DMLStatement(NamedTableNode table, int bindCount, Map udfParseNodes) { - super(table, bindCount); - this.udfParseNodes = udfParseNodes; - } - - public Map getUdfParseNodes() { - return udfParseNodes; - } + private final Map udfParseNodes; + + public DMLStatement(NamedTableNode table, int bindCount, + Map udfParseNodes) { + super(table, bindCount); + this.udfParseNodes = udfParseNodes; + } + + public Map getUdfParseNodes() { + return udfParseNodes; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeclareCursorStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeclareCursorStatement.java index 68129ecac05..822a9d5e61d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeclareCursorStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeclareCursorStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,44 +17,45 @@ */ package org.apache.phoenix.parse; -import org.apache.phoenix.jdbc.PhoenixStatement.Operation; import java.util.*; +import org.apache.phoenix.jdbc.PhoenixStatement.Operation; + public class DeclareCursorStatement implements BindableStatement { - private final CursorName cursorName; - private final SelectStatement select; + private final CursorName cursorName; + private final SelectStatement select; - public DeclareCursorStatement(CursorName cursorName, SelectStatement select){ - this.cursorName = cursorName; - this.select = select; - } + public DeclareCursorStatement(CursorName cursorName, SelectStatement select) { + this.cursorName = cursorName; + this.select = select; + } - public String getCursorName(){ - return cursorName.getName(); - } + public String getCursorName() { + return cursorName.getName(); + } - public String getQuerySQL(){ - //Check if there are parameters to bind. - if(select.getBindCount() > 0){ + public String getQuerySQL() { + // Check if there are parameters to bind. + if (select.getBindCount() > 0) { - } - //TODO: Test if this works - return select.toString(); } + // TODO: Test if this works + return select.toString(); + } - public SelectStatement getSelect(){ - return select; - } + public SelectStatement getSelect() { + return select; + } - public List getSelectOrderBy() { - return select.getOrderBy(); - } + public List getSelectOrderBy() { + return select.getOrderBy(); + } - public int getBindCount(){ - return select.getBindCount(); - } + public int getBindCount() { + return select.getBindCount(); + } - public Operation getOperation(){ - return Operation.UPSERT; - } + public Operation getOperation() { + return Operation.UPSERT; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DelegateConstantToCountParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DelegateConstantToCountParseNode.java index 64c81249c9b..95df9eeb06d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DelegateConstantToCountParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DelegateConstantToCountParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,22 +25,25 @@ import org.apache.phoenix.expression.function.CountAggregateFunction; import org.apache.phoenix.expression.function.FunctionExpression; - public abstract class DelegateConstantToCountParseNode extends AggregateFunctionParseNode { - public DelegateConstantToCountParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - - protected CountAggregateFunction getDelegateFunction(List children, StatementContext context) { - CountAggregateFunction countFunc = null; - if (getChildren().get(0).isStateless()) { - countFunc = (CountAggregateFunction)context.getExpressionManager().addIfAbsent(new CountAggregateFunction(children)); - } - return countFunc; + public DelegateConstantToCountParseNode(String name, List children, + BuiltInFunctionInfo info) { + super(name, children, info); + } + + protected CountAggregateFunction getDelegateFunction(List children, + StatementContext context) { + CountAggregateFunction countFunc = null; + if (getChildren().get(0).isStateless()) { + countFunc = (CountAggregateFunction) context.getExpressionManager() + .addIfAbsent(new CountAggregateFunction(children)); } - - @Override - public abstract FunctionExpression create(List children, StatementContext context) throws SQLException; + return countFunc; + } + + @Override + public abstract FunctionExpression create(List children, StatementContext context) + throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeleteJarStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeleteJarStatement.java index a7438ef56aa..73a4049aab3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeleteJarStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeleteJarStatement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,18 +19,18 @@ public class DeleteJarStatement extends MutableStatement { - private LiteralParseNode jarPath; + private LiteralParseNode jarPath; - public DeleteJarStatement(LiteralParseNode jarPath) { - this.jarPath = jarPath; - } + public DeleteJarStatement(LiteralParseNode jarPath) { + this.jarPath = jarPath; + } - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - public LiteralParseNode getJarPath() { - return jarPath; - } + public LiteralParseNode getJarPath() { + return jarPath; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeleteStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeleteStatement.java index 331bee4133e..8ae6baddaff 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeleteStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DeleteStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,62 +24,64 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; public class DeleteStatement extends DMLStatement implements FilterableStatement { - private final ParseNode whereNode; - private final List orderBy; - private final LimitNode limit; - private final HintNode hint; - - public DeleteStatement(NamedTableNode table, HintNode hint, ParseNode whereNode, List orderBy, LimitNode limit, int bindCount, Map udfParseNodes) { - super(table, bindCount, udfParseNodes); - this.whereNode = whereNode; - this.orderBy = orderBy == null ? Collections.emptyList() : orderBy; - this.limit = limit; - this.hint = hint == null ? HintNode.EMPTY_HINT_NODE : hint; - } + private final ParseNode whereNode; + private final List orderBy; + private final LimitNode limit; + private final HintNode hint; - @Override - public ParseNode getWhere() { - return whereNode; - } + public DeleteStatement(NamedTableNode table, HintNode hint, ParseNode whereNode, + List orderBy, LimitNode limit, int bindCount, + Map udfParseNodes) { + super(table, bindCount, udfParseNodes); + this.whereNode = whereNode; + this.orderBy = orderBy == null ? Collections. emptyList() : orderBy; + this.limit = limit; + this.hint = hint == null ? HintNode.EMPTY_HINT_NODE : hint; + } - @Override - public List getOrderBy() { - return orderBy; - } + @Override + public ParseNode getWhere() { + return whereNode; + } - @Override - public LimitNode getLimit() { - return limit; - } + @Override + public List getOrderBy() { + return orderBy; + } - @Override - public HintNode getHint() { - return hint; - } + @Override + public LimitNode getLimit() { + return limit; + } - @Override - public boolean isDistinct() { - return false; - } + @Override + public HintNode getHint() { + return hint; + } - @Override - public boolean isAggregate() { - return false; - } + @Override + public boolean isDistinct() { + return false; + } - @Override - public Operation getOperation() { - return Operation.DELETE; - } + @Override + public boolean isAggregate() { + return false; + } - @Override - public OffsetNode getOffset() { - return null; - } - - @Override - public Double getTableSamplingRate(){ - throw new UnsupportedOperationException("Table sampling is not allowd for Deletion"); - } + @Override + public Operation getOperation() { + return Operation.DELETE; + } + + @Override + public OffsetNode getOffset() { + return null; + } + + @Override + public Double getTableSamplingRate() { + throw new UnsupportedOperationException("Table sampling is not allowd for Deletion"); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DerivedTableNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DerivedTableNode.java index 33b57bc81c8..bbaf5e99c42 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DerivedTableNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DerivedTableNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,58 +22,53 @@ import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.util.SchemaUtil; - - /** - * * Node representing a subselect in the FROM clause of SQL - * - * * @since 0.1 */ public class DerivedTableNode extends TableNode { - private final SelectStatement select; + private final SelectStatement select; - DerivedTableNode(String alias, SelectStatement select) { - super(SchemaUtil.normalizeIdentifier(alias)); - this.select = select; - } + DerivedTableNode(String alias, SelectStatement select) { + super(SchemaUtil.normalizeIdentifier(alias)); + this.select = select; + } - public SelectStatement getSelect() { - return select; - } + public SelectStatement getSelect() { + return select; + } - @Override - public T accept(TableNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + @Override + public T accept(TableNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append('('); - select.toSQL(resolver, buf); - buf.append(')'); - buf.append(" " + (this.getAlias() == null ? "" : this.getAlias())); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append('('); + select.toSQL(resolver, buf); + buf.append(')'); + buf.append(" " + (this.getAlias() == null ? "" : this.getAlias())); + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((select == null) ? 0 : select.hashCode()); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((select == null) ? 0 : select.hashCode()); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - DerivedTableNode other = (DerivedTableNode)obj; - if (select == null) { - if (other.select != null) return false; - } else if (!select.equals(other.select)) return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + DerivedTableNode other = (DerivedTableNode) obj; + if (select == null) { + if (other.select != null) return false; + } else if (!select.equals(other.select)) return false; + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DistinctCountHyperLogLogAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DistinctCountHyperLogLogAggregateParseNode.java index 2fa6e10195c..2f342e04945 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DistinctCountHyperLogLogAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DistinctCountHyperLogLogAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,18 +22,19 @@ import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; -import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.expression.function.DistinctCountHyperLogLogAggregateFunction; -import org.apache.phoenix.expression.function.SumAggregateFunction; - +import org.apache.phoenix.expression.function.FunctionExpression; public class DistinctCountHyperLogLogAggregateParseNode extends DelegateConstantToCountParseNode { - public DistinctCountHyperLogLogAggregateParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new DistinctCountHyperLogLogAggregateFunction(children, getDelegateFunction(children,context)); - } + public DistinctCountHyperLogLogAggregateParseNode(String name, List children, + BuiltInFunctionInfo info) { + super(name, children, info); + } + + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new DistinctCountHyperLogLogAggregateFunction(children, + getDelegateFunction(children, context)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DistinctCountParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DistinctCountParseNode.java index 8dc596c890c..d37cc902f44 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DistinctCountParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DistinctCountParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,34 +27,32 @@ import org.apache.phoenix.expression.function.FunctionExpression; /** - * - * * @since 1.2.1 */ public class DistinctCountParseNode extends DelegateConstantToCountParseNode { - - public DistinctCountParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - @Override - public FunctionExpression create(List children, StatementContext context) - throws SQLException { - return new DistinctCountAggregateFunction(children, getDelegateFunction(children, context)); - } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(' '); - buf.append("COUNT(DISTINCT "); - List children = getChildren(); - if (!children.isEmpty()) { - for (ParseNode child : children) { - child.toSQL(resolver, buf); - buf.append(','); - } - buf.setLength(buf.length()-1); - } - buf.append(')'); + public DistinctCountParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } + + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new DistinctCountAggregateFunction(children, getDelegateFunction(children, context)); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(' '); + buf.append("COUNT(DISTINCT "); + List children = getChildren(); + if (!children.isEmpty()) { + for (ParseNode child : children) { + child.toSQL(resolver, buf); + buf.append(','); + } + buf.setLength(buf.length() - 1); } + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DivideParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DivideParseNode.java index 1a2e1f9d48c..1436ae509ed 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DivideParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DivideParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,34 +21,28 @@ import java.util.Collections; import java.util.List; - - /** - * * Node representing division in a SQL expression - * - * * @since 0.1 */ public class DivideParseNode extends ArithmeticParseNode { - public static final String OPERATOR = "/"; + public static final String OPERATOR = "/"; - @Override - public String getOperator() { - return OPERATOR; - } - + @Override + public String getOperator() { + return OPERATOR; + } - DivideParseNode(List children) { - super(children); - } + DivideParseNode(List children) { + super(children); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropCDCStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropCDCStatement.java index a02d0e25d31..416ab14a5eb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropCDCStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropCDCStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,35 +20,35 @@ import org.apache.phoenix.jdbc.PhoenixStatement; public class DropCDCStatement extends MutableStatement { - private final TableName tableName; - private final NamedNode cdcObjName; - private final boolean ifExists; - - public DropCDCStatement(NamedNode cdcObjName, TableName tableName, boolean ifExists) { - this.cdcObjName = cdcObjName; - this.tableName = tableName; - this.ifExists = ifExists; - } - - public TableName getTableName() { - return tableName; - } - - public NamedNode getCdcObjName() { - return cdcObjName; - } - - @Override - public int getBindCount() { - return 0; - } - - public boolean ifExists() { - return ifExists; - } - - @Override - public PhoenixStatement.Operation getOperation() { - return PhoenixStatement.Operation.DELETE; - } -} \ No newline at end of file + private final TableName tableName; + private final NamedNode cdcObjName; + private final boolean ifExists; + + public DropCDCStatement(NamedNode cdcObjName, TableName tableName, boolean ifExists) { + this.cdcObjName = cdcObjName; + this.tableName = tableName; + this.ifExists = ifExists; + } + + public TableName getTableName() { + return tableName; + } + + public NamedNode getCdcObjName() { + return cdcObjName; + } + + @Override + public int getBindCount() { + return 0; + } + + public boolean ifExists() { + return ifExists; + } + + @Override + public PhoenixStatement.Operation getOperation() { + return PhoenixStatement.Operation.DELETE; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropColumnStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropColumnStatement.java index a0aaa6a5a55..1c539d6d16e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropColumnStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropColumnStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,20 +22,21 @@ import org.apache.phoenix.schema.PTableType; public class DropColumnStatement extends AlterTableStatement { - private final List columnRefs; - private final boolean ifExists; - - protected DropColumnStatement(NamedTableNode table, PTableType tableType, List columnRefs, boolean ifExists) { - super(table, tableType); - this.columnRefs = columnRefs; - this.ifExists = ifExists; - } + private final List columnRefs; + private final boolean ifExists; - public List getColumnRefs() { - return columnRefs; - } + protected DropColumnStatement(NamedTableNode table, PTableType tableType, + List columnRefs, boolean ifExists) { + super(table, tableType); + this.columnRefs = columnRefs; + this.ifExists = ifExists; + } - public boolean ifExists() { - return ifExists; - } + public List getColumnRefs() { + return columnRefs; + } + + public boolean ifExists() { + return ifExists; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropFunctionStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropFunctionStatement.java index a959eb7da18..c5ccbff6f4a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropFunctionStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropFunctionStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,23 +19,24 @@ public class DropFunctionStatement extends MutableStatement { - private final String functionName; - private final boolean ifExists; - public DropFunctionStatement(String functionName, boolean ifExists) { - this.functionName = functionName; - this.ifExists = ifExists; - } + private final String functionName; + private final boolean ifExists; - @Override - public int getBindCount() { - return 0; - } + public DropFunctionStatement(String functionName, boolean ifExists) { + this.functionName = functionName; + this.ifExists = ifExists; + } - public String getFunctionName() { - return functionName; - } + @Override + public int getBindCount() { + return 0; + } - public boolean ifExists() { - return ifExists; - } + public String getFunctionName() { + return functionName; + } + + public boolean ifExists() { + return ifExists; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropIndexStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropIndexStatement.java index 288d081c03c..8e954791437 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropIndexStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropIndexStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,35 +20,35 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; public class DropIndexStatement extends MutableStatement { - private final TableName tableName; - private final NamedNode indexName; - private final boolean ifExists; - - public DropIndexStatement(NamedNode indexName, TableName tableName, boolean ifExists) { - this.indexName = indexName; - this.tableName = tableName; - this.ifExists = ifExists; - } - - public TableName getTableName() { - return tableName; - } - - public NamedNode getIndexName() { - return indexName; - } - - @Override - public int getBindCount() { - return 0; - } - - public boolean ifExists() { - return ifExists; - } - - @Override - public Operation getOperation() { - return Operation.DELETE; - } + private final TableName tableName; + private final NamedNode indexName; + private final boolean ifExists; + + public DropIndexStatement(NamedNode indexName, TableName tableName, boolean ifExists) { + this.indexName = indexName; + this.tableName = tableName; + this.ifExists = ifExists; + } + + public TableName getTableName() { + return tableName; + } + + public NamedNode getIndexName() { + return indexName; + } + + @Override + public int getBindCount() { + return 0; + } + + public boolean ifExists() { + return ifExists; + } + + @Override + public Operation getOperation() { + return Operation.DELETE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropSchemaStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropSchemaStatement.java index 5d03a787352..d12a4ffe87a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropSchemaStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropSchemaStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,36 +20,36 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; public class DropSchemaStatement extends MutableStatement { - private final String schemaName; - private final boolean ifExists; - private final boolean cascade; - - public DropSchemaStatement(String schemaName, boolean ifExists, boolean cascade) { - this.schemaName = schemaName; - this.ifExists = ifExists; - this.cascade = cascade; - } - - @Override - public int getBindCount() { - return 0; - } - - public String getSchemaName() { - return schemaName; - } - - public boolean ifExists() { - return ifExists; - } - - public boolean cascade() { - return cascade; - } - - @Override - public Operation getOperation() { - return Operation.DELETE; - } + private final String schemaName; + private final boolean ifExists; + private final boolean cascade; + + public DropSchemaStatement(String schemaName, boolean ifExists, boolean cascade) { + this.schemaName = schemaName; + this.ifExists = ifExists; + this.cascade = cascade; + } + + @Override + public int getBindCount() { + return 0; + } + + public String getSchemaName() { + return schemaName; + } + + public boolean ifExists() { + return ifExists; + } + + public boolean cascade() { + return cascade; + } + + @Override + public Operation getOperation() { + return Operation.DELETE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropSequenceStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropSequenceStatement.java index c4093a1a0e4..16a93dea575 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropSequenceStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropSequenceStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,31 +21,31 @@ public class DropSequenceStatement extends MutableStatement { - private final TableName sequenceName; - private final boolean ifExists; - private final int bindCount; - - protected DropSequenceStatement(TableName sequenceName, boolean ifExists, int bindCount) { - this.sequenceName = sequenceName; - this.ifExists = ifExists; - this.bindCount = bindCount; - } - - @Override - public int getBindCount() { - return bindCount; - } - - public TableName getSequenceName() { - return sequenceName; - } - - public boolean ifExists() { - return ifExists; - } - - @Override - public Operation getOperation() { - return Operation.DELETE; - } -} \ No newline at end of file + private final TableName sequenceName; + private final boolean ifExists; + private final int bindCount; + + protected DropSequenceStatement(TableName sequenceName, boolean ifExists, int bindCount) { + this.sequenceName = sequenceName; + this.ifExists = ifExists; + this.bindCount = bindCount; + } + + @Override + public int getBindCount() { + return bindCount; + } + + public TableName getSequenceName() { + return sequenceName; + } + + public boolean ifExists() { + return ifExists; + } + + @Override + public Operation getOperation() { + return Operation.DELETE; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropTableStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropTableStatement.java index c334a819b9e..feef9b17996 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropTableStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/DropTableStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,48 +21,48 @@ import org.apache.phoenix.schema.PTableType; public class DropTableStatement extends MutableStatement { - private final TableName tableName; - private final boolean ifExists; - private final PTableType tableType; - private final boolean cascade; - private final boolean skipAddingParentColumns; - + private final TableName tableName; + private final boolean ifExists; + private final PTableType tableType; + private final boolean cascade; + private final boolean skipAddingParentColumns; - public DropTableStatement(TableName tableName, PTableType tableType, boolean ifExists, boolean cascade, boolean skipAddingParentColumns) { - this.tableName = tableName; - this.tableType = tableType; - this.ifExists = ifExists; - this.cascade = cascade; - this.skipAddingParentColumns = skipAddingParentColumns; - } - - @Override - public int getBindCount() { - return 0; // No binds for DROP - } + public DropTableStatement(TableName tableName, PTableType tableType, boolean ifExists, + boolean cascade, boolean skipAddingParentColumns) { + this.tableName = tableName; + this.tableType = tableType; + this.ifExists = ifExists; + this.cascade = cascade; + this.skipAddingParentColumns = skipAddingParentColumns; + } - public TableName getTableName() { - return tableName; - } + @Override + public int getBindCount() { + return 0; // No binds for DROP + } - public PTableType getTableType() { - return tableType; - } + public TableName getTableName() { + return tableName; + } - public boolean ifExists() { - return ifExists; - } - - public boolean cascade() { - return cascade; - } - - @Override - public Operation getOperation() { - return Operation.DELETE; - } + public PTableType getTableType() { + return tableType; + } - public boolean getSkipAddingParentColumns() { - return skipAddingParentColumns; - } + public boolean ifExists() { + return ifExists; + } + + public boolean cascade() { + return cascade; + } + + @Override + public Operation getOperation() { + return Operation.DELETE; + } + + public boolean getSkipAddingParentColumns() { + return skipAddingParentColumns; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/EqualParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/EqualParseNode.java index 5d69deba9bc..4d72333b6f0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/EqualParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/EqualParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,25 +20,22 @@ import org.apache.hadoop.hbase.CompareOperator; /** - * * Node representing the equal operator in SQL - * - * * @since 0.1 */ public class EqualParseNode extends ComparisonParseNode { - EqualParseNode(ParseNode lhs, ParseNode rhs) { - super(lhs, rhs); - } + EqualParseNode(ParseNode lhs, ParseNode rhs) { + super(lhs, rhs); + } - @Override - public CompareOperator getFilterOp() { - return CompareOperator.EQUAL; - } + @Override + public CompareOperator getFilterOp() { + return CompareOperator.EQUAL; + } - @Override - public CompareOperator getInvertFilterOp() { - return CompareOperator.EQUAL; - } + @Override + public CompareOperator getInvertFilterOp() { + return CompareOperator.EQUAL; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExecuteUpgradeStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExecuteUpgradeStatement.java index 29edf8f329b..abb41b01345 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExecuteUpgradeStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExecuteUpgradeStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,14 +21,14 @@ public class ExecuteUpgradeStatement implements BindableStatement { - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - @Override - public Operation getOperation() { - return Operation.UPGRADE; - } + @Override + public Operation getOperation() { + return Operation.UPGRADE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java index 95d5e20c9f2..56f7aa268ea 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExistsParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,62 +23,53 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing EXISTS and NOT EXISTS expressions in SQL - * - * * @since 0.1 */ public class ExistsParseNode extends UnaryParseNode { - private final boolean negate; + private final boolean negate; - ExistsParseNode(ParseNode child, boolean negate) { - super(child); - this.negate = negate; - } - - public boolean isNegate() { - return negate; - } + ExistsParseNode(ParseNode child, boolean negate) { + super(child); + this.negate = negate; + } + + public boolean isNegate() { + return negate; + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (negate ? 1231 : 1237); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (negate ? 1231 : 1237); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - ExistsParseNode other = (ExistsParseNode) obj; - if (negate != other.negate) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + ExistsParseNode other = (ExistsParseNode) obj; + if (negate != other.negate) return false; + return true; + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - if (negate) buf.append(" NOT"); - buf.append(" EXISTS "); - getChildren().get(0).toSQL(resolver, buf); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + if (negate) buf.append(" NOT"); + buf.append(" EXISTS "); + getChildren().get(0).toSQL(resolver, buf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExplainStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExplainStatement.java index 3b28ca5c0d5..a92696f67bf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExplainStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExplainStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,29 +20,29 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; public class ExplainStatement implements BindableStatement { - private final BindableStatement statement; - private final ExplainType explainType; + private final BindableStatement statement; + private final ExplainType explainType; - public ExplainStatement(BindableStatement statement, ExplainType explainType) { - this.statement = statement; - this.explainType = explainType; - } + public ExplainStatement(BindableStatement statement, ExplainType explainType) { + this.statement = statement; + this.explainType = explainType; + } - public BindableStatement getStatement() { - return statement; - } + public BindableStatement getStatement() { + return statement; + } - @Override - public int getBindCount() { - return statement.getBindCount(); - } + @Override + public int getBindCount() { + return statement.getBindCount(); + } - @Override - public Operation getOperation() { - return Operation.QUERY; - } + @Override + public Operation getOperation() { + return Operation.QUERY; + } - public ExplainType getExplainType() { - return explainType; - } + public ExplainType getExplainType() { + return explainType; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExplainType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExplainType.java index fc35939d35f..883e3c6a90c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExplainType.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ExplainType.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,13 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; /** * Explain type attributes used to differentiate output of the explain plan. */ public enum ExplainType { - WITH_REGIONS, - DEFAULT + WITH_REGIONS, + DEFAULT } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java index 3c824529da6..8d3bfe20a4d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FamilyWildcardParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; import java.sql.SQLException; @@ -23,70 +22,63 @@ import org.apache.phoenix.compile.ColumnResolver; /** - * * Node representing the selection of all columns of a family (cf.*) in the SELECT clause of SQL - * - * * @since 1.2 */ public class FamilyWildcardParseNode extends NamedParseNode { - private final boolean isRewrite; - - public FamilyWildcardParseNode(String familyName, boolean isRewrite){ - super(familyName); - this.isRewrite = isRewrite; - } - - public FamilyWildcardParseNode(FamilyWildcardParseNode familyName, boolean isRewrite){ - super(familyName); - this.isRewrite = isRewrite; - } - - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + private final boolean isRewrite; + + public FamilyWildcardParseNode(String familyName, boolean isRewrite) { + super(familyName); + this.isRewrite = isRewrite; + } + + public FamilyWildcardParseNode(FamilyWildcardParseNode familyName, boolean isRewrite) { + super(familyName); + this.isRewrite = isRewrite; + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } + + public boolean isRewrite() { + return isRewrite; + } - public boolean isRewrite() { - return isRewrite; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (isRewrite ? 1231 : 1237); + return result; + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (isRewrite ? 1231 : 1237); - return result; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + FamilyWildcardParseNode other = (FamilyWildcardParseNode) obj; + if (isRewrite != other.isRewrite) return false; + return true; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - FamilyWildcardParseNode other = (FamilyWildcardParseNode) obj; - if (isRewrite != other.isRewrite) - return false; - return true; - } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - toSQL(buf); - buf.append(".*"); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + toSQL(buf); + buf.append(".*"); + } - @Override - public boolean isWildcardNode() { - return true; - } + @Override + public boolean isWildcardNode() { + return true; + } - @Override - public FamilyWildcardParseNode getRewritten() { - return new FamilyWildcardParseNode(getName(), true); - } + @Override + public FamilyWildcardParseNode getRewritten() { + return new FamilyWildcardParseNode(getName(), true); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FetchStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FetchStatement.java index 08e97249633..3d0d2d404db 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FetchStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FetchStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,33 +20,33 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; public class FetchStatement implements BindableStatement { - private final CursorName cursorName; - private final boolean isNext; - private final int fetchSize; - - public FetchStatement(CursorName cursorName, boolean isNext, int fetchSize){ - this.cursorName = cursorName; - this.isNext = isNext; - this.fetchSize = fetchSize; - } - - public CursorName getCursorName(){ - return cursorName; - } - - public boolean isNext(){ - return isNext; - } - - public int getBindCount(){ - return 0; - } - - public Operation getOperation(){ - return Operation.QUERY; - } - - public int getFetchSize(){ - return fetchSize; - } + private final CursorName cursorName; + private final boolean isNext; + private final int fetchSize; + + public FetchStatement(CursorName cursorName, boolean isNext, int fetchSize) { + this.cursorName = cursorName; + this.isNext = isNext; + this.fetchSize = fetchSize; + } + + public CursorName getCursorName() { + return cursorName; + } + + public boolean isNext() { + return isNext; + } + + public int getBindCount() { + return 0; + } + + public Operation getOperation() { + return Operation.QUERY; + } + + public int getFetchSize() { + return fetchSize; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FilterableStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FilterableStatement.java index 62a4aa252bd..ebbf973da9d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FilterableStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FilterableStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,12 +20,19 @@ import java.util.List; public interface FilterableStatement extends BindableStatement { - public HintNode getHint(); - public ParseNode getWhere(); - public boolean isDistinct(); - public boolean isAggregate(); - public List getOrderBy(); - public Double getTableSamplingRate(); - public LimitNode getLimit(); - public OffsetNode getOffset(); + public HintNode getHint(); + + public ParseNode getWhere(); + + public boolean isDistinct(); + + public boolean isAggregate(); + + public List getOrderBy(); + + public Double getTableSamplingRate(); + + public LimitNode getLimit(); + + public OffsetNode getOffset(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FirstValueAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FirstValueAggregateParseNode.java index 6eeaf3e39fd..f28fa39a387 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FirstValueAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FirstValueAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,12 +27,14 @@ public class FirstValueAggregateParseNode extends DelegateConstantToCountParseNode { - public FirstValueAggregateParseNode(String name, List children, FunctionParseNode.BuiltInFunctionInfo info) { - super(name, children, info); - } + public FirstValueAggregateParseNode(String name, List children, + FunctionParseNode.BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new FirstValueFunction(children, getDelegateFunction(children, context)); - } + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new FirstValueFunction(children, getDelegateFunction(children, context)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FirstValuesAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FirstValuesAggregateParseNode.java index 50b137ae265..b4e6bad49af 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FirstValuesAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FirstValuesAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,12 +27,14 @@ public class FirstValuesAggregateParseNode extends DelegateConstantToCountParseNode { - public FirstValuesAggregateParseNode(String name, List children, FunctionParseNode.BuiltInFunctionInfo info) { - super(name, children, info); - } + public FirstValuesAggregateParseNode(String name, List children, + FunctionParseNode.BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new FirstValuesFunction(children, getDelegateFunction(children, context)); - } + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new FirstValuesFunction(children, getDelegateFunction(children, context)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FloorParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FloorParseNode.java index 136d2a7589a..88ccceaedd0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FloorParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FloorParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,57 +25,54 @@ import org.apache.phoenix.expression.function.FloorDateExpression; import org.apache.phoenix.expression.function.FloorDecimalExpression; import org.apache.phoenix.expression.function.FloorFunction; -import org.apache.phoenix.schema.types.PDecimal; +import org.apache.phoenix.schema.TypeMismatchException; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PTimestamp; -import org.apache.phoenix.schema.TypeMismatchException; /** - * Parse node corresponding to {@link FloorFunction}. - * It also acts as a factory for creating the right kind of - * floor expression according to the data type of the - * first child. - * - * + * Parse node corresponding to {@link FloorFunction}. It also acts as a factory for creating the + * right kind of floor expression according to the data type of the first child. * @since 3.0.0 */ public class FloorParseNode extends FunctionParseNode { - FloorParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + FloorParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public Expression create(List children, StatementContext context) throws SQLException { - return getFloorExpression(children); - } + @Override + public Expression create(List children, StatementContext context) + throws SQLException { + return getFloorExpression(children); + } - public static Expression getFloorExpression(List children) throws SQLException { - final Expression firstChild = children.get(0); - final PDataType firstChildDataType = firstChild.getDataType(); - - //FLOOR on timestamp doesn't really care about the nanos part i.e. it just sets it to zero. - //Which is exactly what FloorDateExpression does too. - if(firstChildDataType.isCoercibleTo(PTimestamp.INSTANCE)) { - return FloorDateExpression.create(children); - } else if(firstChildDataType.isCoercibleTo(PDecimal.INSTANCE)) { - return FloorDecimalExpression.create(children); - } else { - throw TypeMismatchException.newException(firstChildDataType, "1"); - } - } - - /** - * When rounding off decimals, user need not specify the scale. In such cases, - * we need to prevent the function from getting evaluated as null. This is really - * a hack. A better way would have been if {@link org.apache.phoenix.parse.FunctionParseNode.BuiltInFunctionInfo} provided a - * way of associating default values for each permissible data type. - * Something like: @ Argument(allowedTypes={PDataType.VARCHAR, PDataType.INTEGER}, defaultValues = {"null", "1"} isConstant=true) - * Till then, this will have to do. - */ - @Override - public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { - return index == 0; + public static Expression getFloorExpression(List children) throws SQLException { + final Expression firstChild = children.get(0); + final PDataType firstChildDataType = firstChild.getDataType(); + + // FLOOR on timestamp doesn't really care about the nanos part i.e. it just sets it to zero. + // Which is exactly what FloorDateExpression does too. + if (firstChildDataType.isCoercibleTo(PTimestamp.INSTANCE)) { + return FloorDateExpression.create(children); + } else if (firstChildDataType.isCoercibleTo(PDecimal.INSTANCE)) { + return FloorDecimalExpression.create(children); + } else { + throw TypeMismatchException.newException(firstChildDataType, "1"); } + } + + /** + * When rounding off decimals, user need not specify the scale. In such cases, we need to prevent + * the function from getting evaluated as null. This is really a hack. A better way would have + * been if {@link org.apache.phoenix.parse.FunctionParseNode.BuiltInFunctionInfo} provided a way + * of associating default values for each permissible data type. Something like: @ + * Argument(allowedTypes={PDataType.VARCHAR, PDataType.INTEGER}, defaultValues = {"null", "1"} + * isConstant=true) Till then, this will have to do. + */ + @Override + public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { + return index == 0; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java index 284e54d729c..1276df87b5b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,8 +30,6 @@ import java.util.List; import java.util.Set; -import net.jcip.annotations.Immutable; - import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Determinism; @@ -47,528 +45,544 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDataTypeFactory; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.util.SchemaUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.phoenix.util.SchemaUtil; - +import net.jcip.annotations.Immutable; /** - * * Node representing a function expression in SQL - * - * * @since 0.1 */ public class FunctionParseNode extends CompoundParseNode { - private final String name; - private BuiltInFunctionInfo info; - - FunctionParseNode(String name, List children, BuiltInFunctionInfo info) { - super(children); - this.name = SchemaUtil.normalizeIdentifier(name); - this.info = info; - } - - public BuiltInFunctionInfo getInfo() { - return info; + private final String name; + private BuiltInFunctionInfo info; + + FunctionParseNode(String name, List children, BuiltInFunctionInfo info) { + super(children); + this.name = SchemaUtil.normalizeIdentifier(name); + this.info = info; + } + + public BuiltInFunctionInfo getInfo() { + return info; + } + + public String getName() { + return name; + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } - - public String getName() { - return name; + return visitor.visitLeave(this, l); + } + + public boolean isAggregate() { + if (getInfo() == null) return false; + return getInfo().isAggregate(); + } + + /** + * Determines whether or not we can collapse a function expression to null if a required parameter + * is null. + * @param index index of parameter + * @return true if when the parameter at index is null, the function always evaluates to null and + * false otherwise. + */ + public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { + return true; + } + + private static Constructor + getParseNodeCtor(Class clazz) { + Constructor ctor; + try { + ctor = clazz.getDeclaredConstructor(String.class, List.class, BuiltInFunctionInfo.class); + } catch (Exception e) { + throw new RuntimeException(e); } - - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + ctor.setAccessible(true); + return ctor; + } + + private static Constructor + getExpressionCtor(Class clazz, PFunction function) { + Constructor ctor; + try { + if (function == null) { + ctor = clazz.getDeclaredConstructor(List.class); + } else { + ctor = clazz.getDeclaredConstructor(List.class, PFunction.class); + } + } catch (Exception e) { + throw new RuntimeException(e); } - - public boolean isAggregate() { - if(getInfo()==null) return false; - return getInfo().isAggregate(); + ctor.setAccessible(true); + return ctor; + } + + public List validate(List children, StatementContext context) + throws SQLException { + BuiltInFunctionInfo info = this.getInfo(); + BuiltInFunctionArgInfo[] args = info.getArgs(); + if (args.length < children.size() || info.getRequiredArgCount() > children.size()) { + throw new FunctionNotFoundException(this.name); } - - /** - * Determines whether or not we can collapse a function expression to null if a required - * parameter is null. - * @param context - * @param index index of parameter - * @return true if when the parameter at index is null, the function always evaluates to null - * and false otherwise. - * @throws SQLException - */ - public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { - return true; - } - - - private static Constructor getParseNodeCtor(Class clazz) { - Constructor ctor; - try { - ctor = clazz.getDeclaredConstructor(String.class, List.class, BuiltInFunctionInfo.class); - } catch (Exception e) { - throw new RuntimeException(e); - } - ctor.setAccessible(true); - return ctor; + if (args.length > children.size()) { + List moreChildren = new ArrayList(children); + for (int i = children.size(); i < info.getArgs().length; i++) { + moreChildren.add(LiteralExpression.newConstant(null, + args[i].allowedTypes.length == 0 + ? null + : PDataTypeFactory.getInstance().instanceFromClass(args[i].allowedTypes[0]), + Determinism.ALWAYS)); + } + children = moreChildren; } - - private static Constructor getExpressionCtor(Class clazz, PFunction function) { - Constructor ctor; - try { - if(function == null) { - ctor = clazz.getDeclaredConstructor(List.class); - } else { - ctor = clazz.getDeclaredConstructor(List.class, PFunction.class); + List nodeChildren = this.getChildren(); + for (int i = 0; i < children.size(); i++) { + BindParseNode bindNode = null; + Class[] allowedTypes = args[i].getAllowedTypes(); + // Check if the node is a bind parameter, and set the parameter + // metadata based on the function argument annotation. Check to + // make sure we're not looking past the end of the list of + // child expression nodes, which can happen if the function + // invocation hasn't specified all arguments and is using default + // values. + if (i < nodeChildren.size() && nodeChildren.get(i) instanceof BindParseNode) { + bindNode = (BindParseNode) nodeChildren.get(i); + } + // If the child type is null, then the expression is unbound. + // Skip any validation, since we either 1) have a default value + // which has already been validated, 2) attempting to get the + // parameter metadata, or 3) have an unbound parameter which + // will be detected futher downstream. + Expression child = children.get(i); + if ( + child.getDataType() == null + /* null used explicitly in query */ || i >= nodeChildren.size() /* argument left off */ + ) { + // Replace the unbound expression with the default value expression if specified + if (args[i].getDefaultValue() != null) { + Expression defaultValue = args[i].getDefaultValue(); + children.set(i, defaultValue); + // Set the parameter metadata if this is a bind parameter + if (bindNode != null) { + context.getBindManager().addParamMetaData(bindNode, defaultValue); + } + } else if (bindNode != null) { + // Otherwise if the node is a bind parameter and we have type information + // based on the function argument annonation set the parameter meta data. + if (child.getDataType() == null) { + if (allowedTypes.length > 0) { + context.getBindManager().addParamMetaData(bindNode, + LiteralExpression.newConstant(null, + PDataTypeFactory.getInstance().instanceFromClass(allowedTypes[0]), + Determinism.ALWAYS)); } - } catch (Exception e) { - throw new RuntimeException(e); + } else { // Use expression as is, since we already have the data type set + context.getBindManager().addParamMetaData(bindNode, child); + } + } else if (allowedTypes.length > 0) { + // Switch null type with typed null + children.set(i, LiteralExpression.newConstant(null, + PDataTypeFactory.getInstance().instanceFromClass(allowedTypes[0]), Determinism.ALWAYS)); } - ctor.setAccessible(true); - return ctor; + } else { + validateFunctionArguement(info, i, child); + } } - - public List validate(List children, StatementContext context) throws SQLException { - BuiltInFunctionInfo info = this.getInfo(); - BuiltInFunctionArgInfo[] args = info.getArgs(); - if (args.length < children.size() || info.getRequiredArgCount() > children.size()) { - throw new FunctionNotFoundException(this.name); + return children; + } + + public static void validateFunctionArguement(BuiltInFunctionInfo info, int childIndex, + Expression child) throws ArgumentTypeMismatchException, ValueRangeExcpetion { + BuiltInFunctionArgInfo arg = info.getArgs()[childIndex]; + if (arg.getAllowedTypes().length > 0) { + boolean isCoercible = false; + for (Class type : arg.getAllowedTypes()) { + if ( + child.getDataType().isCoercibleTo(PDataTypeFactory.getInstance().instanceFromClass(type)) + ) { + isCoercible = true; + break; } - if (args.length > children.size()) { - List moreChildren = new ArrayList(children); - for (int i = children.size(); i < info.getArgs().length; i++) { - moreChildren.add(LiteralExpression.newConstant(null, args[i].allowedTypes.length == 0 ? null : - PDataTypeFactory.getInstance().instanceFromClass(args[i].allowedTypes[0]), Determinism.ALWAYS)); - } - children = moreChildren; + } + if (!isCoercible) { + throw new ArgumentTypeMismatchException(arg.getAllowedTypes(), child.getDataType(), + info.getName() + " argument " + (childIndex + 1)); + } + if (child instanceof LiteralExpression) { + LiteralExpression valueExp = (LiteralExpression) child; + LiteralExpression minValue = arg.getMinValue(); + LiteralExpression maxValue = arg.getMaxValue(); + if ( + minValue != null && minValue.getDataType().compareTo(minValue.getValue(), + valueExp.getValue(), valueExp.getDataType()) > 0 + ) { + throw new ValueRangeExcpetion(minValue, maxValue == null ? "" : maxValue, + valueExp.getValue(), info.getName() + " argument " + (childIndex + 1)); } - List nodeChildren = this.getChildren(); - for (int i = 0; i < children.size(); i++) { - BindParseNode bindNode = null; - Class[] allowedTypes = args[i].getAllowedTypes(); - // Check if the node is a bind parameter, and set the parameter - // metadata based on the function argument annotation. Check to - // make sure we're not looking past the end of the list of - // child expression nodes, which can happen if the function - // invocation hasn't specified all arguments and is using default - // values. - if (i < nodeChildren.size() && nodeChildren.get(i) instanceof BindParseNode) { - bindNode = (BindParseNode)nodeChildren.get(i); - } - // If the child type is null, then the expression is unbound. - // Skip any validation, since we either 1) have a default value - // which has already been validated, 2) attempting to get the - // parameter metadata, or 3) have an unbound parameter which - // will be detected futher downstream. - Expression child = children.get(i); - if (child.getDataType() == null /* null used explicitly in query */ || i >= nodeChildren.size() /* argument left off */) { - // Replace the unbound expression with the default value expression if specified - if (args[i].getDefaultValue() != null) { - Expression defaultValue = args[i].getDefaultValue(); - children.set(i, defaultValue); - // Set the parameter metadata if this is a bind parameter - if (bindNode != null) { - context.getBindManager().addParamMetaData(bindNode, defaultValue); - } - } else if (bindNode != null) { - // Otherwise if the node is a bind parameter and we have type information - // based on the function argument annonation set the parameter meta data. - if (child.getDataType() == null) { - if (allowedTypes.length > 0) { - context.getBindManager().addParamMetaData(bindNode, LiteralExpression.newConstant(null, PDataTypeFactory.getInstance().instanceFromClass( - allowedTypes[0]), Determinism.ALWAYS)); - } - } else { // Use expression as is, since we already have the data type set - context.getBindManager().addParamMetaData(bindNode, child); - } - } else if (allowedTypes.length > 0) { - // Switch null type with typed null - children.set(i, LiteralExpression.newConstant(null, PDataTypeFactory.getInstance().instanceFromClass( - allowedTypes[0]), Determinism.ALWAYS)); - } - } else { - validateFunctionArguement(info, i, child); - } + if ( + maxValue != null && maxValue.getDataType().compareTo(maxValue.getValue(), + valueExp.getValue(), valueExp.getDataType()) < 0 + ) { + throw new ValueRangeExcpetion(minValue == null ? "" : minValue, maxValue, + valueExp.getValue(), info.getName() + " argument " + (childIndex + 1)); } - return children; + } } - - public static void validateFunctionArguement(BuiltInFunctionInfo info, - int childIndex, Expression child) - throws ArgumentTypeMismatchException, ValueRangeExcpetion { - BuiltInFunctionArgInfo arg = info.getArgs()[childIndex]; - if (arg.getAllowedTypes().length > 0) { - boolean isCoercible = false; - for (Class type :arg.getAllowedTypes()) { - if (child.getDataType().isCoercibleTo( - PDataTypeFactory.getInstance().instanceFromClass(type))) { - isCoercible = true; - break; - } - } - if (!isCoercible) { - throw new ArgumentTypeMismatchException(arg.getAllowedTypes(), - child.getDataType(), info.getName() + " argument " + (childIndex + 1)); - } - if (child instanceof LiteralExpression) { - LiteralExpression valueExp = (LiteralExpression) child; - LiteralExpression minValue = arg.getMinValue(); - LiteralExpression maxValue = arg.getMaxValue(); - if (minValue != null && minValue.getDataType().compareTo(minValue.getValue(), valueExp.getValue(), valueExp.getDataType()) > 0) { - throw new ValueRangeExcpetion(minValue, maxValue == null ? "" : maxValue, valueExp.getValue(), info.getName() + " argument " + (childIndex + 1)); - } - if (maxValue != null && maxValue.getDataType().compareTo(maxValue.getValue(), valueExp.getValue(), valueExp.getDataType()) < 0) { - throw new ValueRangeExcpetion(minValue == null ? "" : minValue, maxValue, valueExp.getValue(), info.getName() + " argument " + (childIndex + 1)); - } - } - } - if (arg.isConstant() && ! (child instanceof LiteralExpression) ) { - throw new ArgumentTypeMismatchException("constant", child.toString(), info.getName() + " argument " + (childIndex + 1)); - } - if (!arg.getAllowedValues().isEmpty()) { - Object value = ((LiteralExpression)child).getValue(); - if (!arg.getAllowedValues().contains(value.toString().toUpperCase())) { - throw new ArgumentTypeMismatchException(Arrays.toString(arg.getAllowedValues().toArray(new String[0])), - value.toString(), info.getName() + " argument " + (childIndex + 1)); - } - } + if (arg.isConstant() && !(child instanceof LiteralExpression)) { + throw new ArgumentTypeMismatchException("constant", child.toString(), + info.getName() + " argument " + (childIndex + 1)); } - - /** - * Entry point for parser to instantiate compiled representation of built-in function - * @param children Compiled expressions for child nodes - * @param context Query context for accessing state shared across the processing of multiple clauses - * @return compiled representation of built-in function - * @throws SQLException - */ - public Expression create(List children, StatementContext context) throws SQLException { - return create(children, null, context); + if (!arg.getAllowedValues().isEmpty()) { + Object value = ((LiteralExpression) child).getValue(); + if (!arg.getAllowedValues().contains(value.toString().toUpperCase())) { + throw new ArgumentTypeMismatchException( + Arrays.toString(arg.getAllowedValues().toArray(new String[0])), value.toString(), + info.getName() + " argument " + (childIndex + 1)); + } } - - /** - * Entry point for parser to instantiate compiled representation of built-in function - * @param children Compiled expressions for child nodes - * @param function - * @param context Query context for accessing state shared across the processing of multiple clauses - * @return compiled representation of built-in function - * @throws SQLException - */ - public Expression create(List children, PFunction function, StatementContext context) throws SQLException { - try { - Constructor fCtor = info.getFuncCtor(); - if (fCtor == null) { - fCtor = getExpressionCtor(info.func, null); - } - if(function == null) { - return fCtor.newInstance(children); - } else { - return fCtor.newInstance(children, function); - } - } catch (InstantiationException e) { - throw new SQLException(e); - } catch (IllegalAccessException e) { - throw new SQLException(e); - } catch (IllegalArgumentException e) { - throw new SQLException(e); - } catch (InvocationTargetException e) { - if (e.getTargetException() instanceof SQLException) { - throw (SQLException) e.getTargetException(); - } - throw new SQLException(e); - } + } + + /** + * Entry point for parser to instantiate compiled representation of built-in function + * @param children Compiled expressions for child nodes + * @param context Query context for accessing state shared across the processing of multiple + * clauses + * @return compiled representation of built-in function + */ + public Expression create(List children, StatementContext context) + throws SQLException { + return create(children, null, context); + } + + /** + * Entry point for parser to instantiate compiled representation of built-in function + * @param children Compiled expressions for child nodes + * @param context Query context for accessing state shared across the processing of multiple + * clauses + * @return compiled representation of built-in function + */ + public Expression create(List children, PFunction function, StatementContext context) + throws SQLException { + try { + Constructor fCtor = info.getFuncCtor(); + if (fCtor == null) { + fCtor = getExpressionCtor(info.func, null); + } + if (function == null) { + return fCtor.newInstance(children); + } else { + return fCtor.newInstance(children, function); + } + } catch (InstantiationException e) { + throw new SQLException(e); + } catch (IllegalAccessException e) { + throw new SQLException(e); + } catch (IllegalArgumentException e) { + throw new SQLException(e); + } catch (InvocationTargetException e) { + if (e.getTargetException() instanceof SQLException) { + throw (SQLException) e.getTargetException(); + } + throw new SQLException(e); } + } - /** - * Marker used to indicate that a class should be used by DirectFunctionExpressionExec below - */ - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.TYPE) - public - @interface BuiltInFunction { - String name(); - Argument[] args() default {}; - Class nodeClass() default FunctionParseNode.class; - Class[] derivedFunctions() default {}; - FunctionClassType classType() default FunctionClassType.NONE; - } + /** + * Marker used to indicate that a class should be used by DirectFunctionExpressionExec below + */ + @Retention(RetentionPolicy.RUNTIME) + @Target(ElementType.TYPE) + public @interface BuiltInFunction { + String name(); - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.TYPE) - public - @interface Argument { - Class[] allowedTypes() default {}; - boolean isConstant() default false; - String defaultValue() default ""; - String enumeration() default ""; - String minValue() default ""; - String maxValue() default ""; - } + Argument[] args() default {}; - public enum FunctionClassType { - NONE, - ABSTRACT, - DERIVED, - ALIAS, - UDF - } + Class nodeClass() default FunctionParseNode.class; - /** - * Structure used to hold parse-time information about Function implementation classes - */ - @Immutable - public static final class BuiltInFunctionInfo { - private final String name; - private final Class func; - private final Constructor funcCtor; - private final Constructor nodeCtor; - private final BuiltInFunctionArgInfo[] args; - private final boolean isAggregate; - private final int requiredArgCount; - private final FunctionClassType classType; - private final Class[] derivedFunctions; - - public BuiltInFunctionInfo(Class f, BuiltInFunction d) { - this.name = SchemaUtil.normalizeIdentifier(d.name()); - this.func = f; - this.funcCtor = d.nodeClass() == FunctionParseNode.class ? getExpressionCtor(f, null) : null; - this.nodeCtor = d.nodeClass() == FunctionParseNode.class ? null : getParseNodeCtor(d.nodeClass()); - this.args = new BuiltInFunctionArgInfo[d.args().length]; - int requiredArgCount = 0; - for (int i = 0; i < args.length; i++) { - this.args[i] = new BuiltInFunctionArgInfo(d.args()[i]); - if (this.args[i].getDefaultValue() == null) { - requiredArgCount = i + 1; - } - } - this.requiredArgCount = requiredArgCount; - this.isAggregate = AggregateFunction.class.isAssignableFrom(f); - this.classType = d.classType(); - this.derivedFunctions = d.derivedFunctions(); - } + Class[] derivedFunctions() default {}; - public BuiltInFunctionInfo(PFunction function) { - this.name = SchemaUtil.normalizeIdentifier(function.getFunctionName()); - this.func = null; - this.funcCtor = getExpressionCtor(UDFExpression.class, function); - this.nodeCtor = getParseNodeCtor(UDFParseNode.class); - this.args = new BuiltInFunctionArgInfo[function.getFunctionArguments().size()]; - int requiredArgCount = 0; - for (int i = 0; i < args.length; i++) { - this.args[i] = new BuiltInFunctionArgInfo(function.getFunctionArguments().get(i)); - if (this.args[i].getDefaultValue() == null) { - requiredArgCount = i + 1; - } - } - this.requiredArgCount = requiredArgCount; - this.isAggregate = AggregateFunction.class.isAssignableFrom(UDFExpression.class); - this.classType = FunctionClassType.UDF; - this.derivedFunctions = null; - } + FunctionClassType classType() default FunctionClassType.NONE; + } - public int getRequiredArgCount() { - return requiredArgCount; - } + @Retention(RetentionPolicy.RUNTIME) + @Target(ElementType.TYPE) + public @interface Argument { + Class[] allowedTypes() default {}; - public String getName() { - return name; - } + boolean isConstant() default false; - public Class getFunc() { - return func; - } + String defaultValue() default ""; - public Constructor getFuncCtor() { - return funcCtor; - } + String enumeration() default ""; - public Constructor getNodeCtor() { - return nodeCtor; - } + String minValue() default ""; - public boolean isAggregate() { - return isAggregate; - } + String maxValue() default ""; + } - public BuiltInFunctionArgInfo[] getArgs() { - return args; - } + public enum FunctionClassType { + NONE, + ABSTRACT, + DERIVED, + ALIAS, + UDF + } - public FunctionClassType getClassType() { - return classType; + /** + * Structure used to hold parse-time information about Function implementation classes + */ + @Immutable + public static final class BuiltInFunctionInfo { + private final String name; + private final Class func; + private final Constructor funcCtor; + private final Constructor nodeCtor; + private final BuiltInFunctionArgInfo[] args; + private final boolean isAggregate; + private final int requiredArgCount; + private final FunctionClassType classType; + private final Class[] derivedFunctions; + + public BuiltInFunctionInfo(Class f, BuiltInFunction d) { + this.name = SchemaUtil.normalizeIdentifier(d.name()); + this.func = f; + this.funcCtor = d.nodeClass() == FunctionParseNode.class ? getExpressionCtor(f, null) : null; + this.nodeCtor = + d.nodeClass() == FunctionParseNode.class ? null : getParseNodeCtor(d.nodeClass()); + this.args = new BuiltInFunctionArgInfo[d.args().length]; + int requiredArgCount = 0; + for (int i = 0; i < args.length; i++) { + this.args[i] = new BuiltInFunctionArgInfo(d.args()[i]); + if (this.args[i].getDefaultValue() == null) { + requiredArgCount = i + 1; } + } + this.requiredArgCount = requiredArgCount; + this.isAggregate = AggregateFunction.class.isAssignableFrom(f); + this.classType = d.classType(); + this.derivedFunctions = d.derivedFunctions(); + } - public Class[] getDerivedFunctions() { - return derivedFunctions; + public BuiltInFunctionInfo(PFunction function) { + this.name = SchemaUtil.normalizeIdentifier(function.getFunctionName()); + this.func = null; + this.funcCtor = getExpressionCtor(UDFExpression.class, function); + this.nodeCtor = getParseNodeCtor(UDFParseNode.class); + this.args = new BuiltInFunctionArgInfo[function.getFunctionArguments().size()]; + int requiredArgCount = 0; + for (int i = 0; i < args.length; i++) { + this.args[i] = new BuiltInFunctionArgInfo(function.getFunctionArguments().get(i)); + if (this.args[i].getDefaultValue() == null) { + requiredArgCount = i + 1; } + } + this.requiredArgCount = requiredArgCount; + this.isAggregate = AggregateFunction.class.isAssignableFrom(UDFExpression.class); + this.classType = FunctionClassType.UDF; + this.derivedFunctions = null; } - @Immutable - public static class BuiltInFunctionArgInfo { - private static final Class[] ENUMERATION_TYPES = new Class[] { PVarchar.class }; - private final Class[] allowedTypes; - private final boolean isConstant; - private final Set allowedValues; // Enumeration of possible values - private final LiteralExpression defaultValue; - private final LiteralExpression minValue; - private final LiteralExpression maxValue; - - @SuppressWarnings({ "unchecked", "rawtypes" }) - BuiltInFunctionArgInfo(Argument argument) { - - if (argument.enumeration().length() > 0) { - this.isConstant = true; - this.defaultValue = null; - this.minValue = null; - this.maxValue = null; - this.allowedTypes = ENUMERATION_TYPES; - Class clazz = null; - String packageName = FunctionExpression.class.getPackage().getName(); - try { - clazz = Class.forName(packageName + "." + argument.enumeration()); - } catch (ClassNotFoundException e) { - try { - clazz = Class.forName(argument.enumeration()); - } catch (ClassNotFoundException e1) { - } - } - if (clazz == null || !clazz.isEnum()) { - throw new IllegalStateException("The enumeration annotation '" + argument.enumeration() + "' does not resolve to a enumeration class"); - } - Class enumClass = (Class)clazz; - Enum[] enums = enumClass.getEnumConstants(); - ImmutableSet.Builder builder = ImmutableSet.builder(); - for (Enum en : enums) { - builder.add(en.name()); - } - allowedValues = builder.build(); - } else { - this.allowedValues = Collections.emptySet(); - this.isConstant = argument.isConstant(); - this.allowedTypes = argument.allowedTypes(); - this.defaultValue = getExpFromConstant(argument.defaultValue()); - this.minValue = getExpFromConstant(argument.minValue()); - this.maxValue = getExpFromConstant(argument.maxValue()); - } - } + public int getRequiredArgCount() { + return requiredArgCount; + } - @SuppressWarnings({ "unchecked", "rawtypes" }) - BuiltInFunctionArgInfo(FunctionArgument arg) { - PDataType dataType = - arg.isArrayType() ? PDataType.fromTypeId(PDataType.sqlArrayType(SchemaUtil - .normalizeIdentifier(SchemaUtil.normalizeIdentifier(arg - .getArgumentType())))) : PDataType.fromSqlTypeName(SchemaUtil - .normalizeIdentifier(arg.getArgumentType())); - this.allowedValues = Collections.emptySet(); - this.allowedTypes = new Class[] { dataType.getClass() }; - this.isConstant = arg.isConstant(); - this.defaultValue = - arg.getDefaultValue() == null ? null : arg.getDefaultValue(); - this.minValue = - arg.getMinValue() == null ? null : arg.getMinValue(); - this.maxValue = - arg.getMaxValue() == null ? null : arg.getMaxValue(); - } - - private LiteralExpression getExpFromConstant(String strValue) { - LiteralExpression exp = null; - if (strValue.length() > 0) { - SQLParser parser = new SQLParser(strValue); - try { - LiteralParseNode node = parser.parseLiteral(); - LiteralExpression defaultValue = LiteralExpression.newConstant(node.getValue(), PDataTypeFactory.getInstance().instanceFromClass( - allowedTypes[0]), Determinism.ALWAYS); - if (this.getAllowedTypes().length > 0) { - for (Class type : this.getAllowedTypes()) { - if (defaultValue.getDataType() == null || defaultValue.getDataType().isCoercibleTo( - PDataTypeFactory.getInstance().instanceFromClass(type), - node.getValue())) { - return LiteralExpression.newConstant(node.getValue(), - PDataTypeFactory.getInstance().instanceFromClass(type), - Determinism.ALWAYS); - } - } - throw new IllegalStateException("Unable to coerce default value " + strValue + " to any of the allowed types of " + Arrays.toString(this.getAllowedTypes())); - } - exp = defaultValue; - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - return exp; - } + public String getName() { + return name; + } - public boolean isConstant() { - return isConstant; - } + public Class getFunc() { + return func; + } - public LiteralExpression getDefaultValue() { - return defaultValue; - } + public Constructor getFuncCtor() { + return funcCtor; + } - public LiteralExpression getMinValue() { - return minValue; - } + public Constructor getNodeCtor() { + return nodeCtor; + } - public LiteralExpression getMaxValue() { - return maxValue; + public boolean isAggregate() { + return isAggregate; + } + + public BuiltInFunctionArgInfo[] getArgs() { + return args; + } + + public FunctionClassType getClassType() { + return classType; + } + + public Class[] getDerivedFunctions() { + return derivedFunctions; + } + } + + @Immutable + public static class BuiltInFunctionArgInfo { + private static final Class[] ENUMERATION_TYPES = + new Class[] { PVarchar.class }; + private final Class[] allowedTypes; + private final boolean isConstant; + private final Set allowedValues; // Enumeration of possible values + private final LiteralExpression defaultValue; + private final LiteralExpression minValue; + private final LiteralExpression maxValue; + + @SuppressWarnings({ "unchecked", "rawtypes" }) + BuiltInFunctionArgInfo(Argument argument) { + + if (argument.enumeration().length() > 0) { + this.isConstant = true; + this.defaultValue = null; + this.minValue = null; + this.maxValue = null; + this.allowedTypes = ENUMERATION_TYPES; + Class clazz = null; + String packageName = FunctionExpression.class.getPackage().getName(); + try { + clazz = Class.forName(packageName + "." + argument.enumeration()); + } catch (ClassNotFoundException e) { + try { + clazz = Class.forName(argument.enumeration()); + } catch (ClassNotFoundException e1) { + } } - public Class[] getAllowedTypes() { - return allowedTypes; + if (clazz == null || !clazz.isEnum()) { + throw new IllegalStateException("The enumeration annotation '" + argument.enumeration() + + "' does not resolve to a enumeration class"); } - - public Set getAllowedValues() { - return allowedValues; + Class enumClass = (Class) clazz; + Enum[] enums = enumClass.getEnumConstants(); + ImmutableSet.Builder builder = ImmutableSet.builder(); + for (Enum en : enums) { + builder.add(en.name()); } + allowedValues = builder.build(); + } else { + this.allowedValues = Collections.emptySet(); + this.isConstant = argument.isConstant(); + this.allowedTypes = argument.allowedTypes(); + this.defaultValue = getExpFromConstant(argument.defaultValue()); + this.minValue = getExpFromConstant(argument.minValue()); + this.maxValue = getExpFromConstant(argument.maxValue()); + } + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + BuiltInFunctionArgInfo(FunctionArgument arg) { + PDataType dataType = arg.isArrayType() + ? PDataType.fromTypeId(PDataType.sqlArrayType( + SchemaUtil.normalizeIdentifier(SchemaUtil.normalizeIdentifier(arg.getArgumentType())))) + : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(arg.getArgumentType())); + this.allowedValues = Collections.emptySet(); + this.allowedTypes = new Class[] { dataType.getClass() }; + this.isConstant = arg.isConstant(); + this.defaultValue = arg.getDefaultValue() == null ? null : arg.getDefaultValue(); + this.minValue = arg.getMinValue() == null ? null : arg.getMinValue(); + this.maxValue = arg.getMaxValue() == null ? null : arg.getMaxValue(); } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((info == null) ? 0 : info.hashCode()); - result = prime * result + ((name == null) ? 0 : name.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - FunctionParseNode other = (FunctionParseNode) obj; - if (info == null) { - if (other.info != null) - return false; - } else if (!info.equals(other.info)) - return false; - if (name == null) { - if (other.name != null) - return false; - } else if (!name.equals(other.name)) - return false; - return true; - } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(' '); - buf.append(name); - buf.append('('); - List children = getChildren(); - if (!children.isEmpty()) { - for (ParseNode child : children) { - child.toSQL(resolver, buf); - buf.append(','); + private LiteralExpression getExpFromConstant(String strValue) { + LiteralExpression exp = null; + if (strValue.length() > 0) { + SQLParser parser = new SQLParser(strValue); + try { + LiteralParseNode node = parser.parseLiteral(); + LiteralExpression defaultValue = LiteralExpression.newConstant(node.getValue(), + PDataTypeFactory.getInstance().instanceFromClass(allowedTypes[0]), Determinism.ALWAYS); + if (this.getAllowedTypes().length > 0) { + for (Class type : this.getAllowedTypes()) { + if ( + defaultValue.getDataType() == null || defaultValue.getDataType().isCoercibleTo( + PDataTypeFactory.getInstance().instanceFromClass(type), node.getValue()) + ) { + return LiteralExpression.newConstant(node.getValue(), + PDataTypeFactory.getInstance().instanceFromClass(type), Determinism.ALWAYS); + } } - buf.setLength(buf.length()-1); + throw new IllegalStateException("Unable to coerce default value " + strValue + + " to any of the allowed types of " + Arrays.toString(this.getAllowedTypes())); + } + exp = defaultValue; + } catch (SQLException e) { + throw new RuntimeException(e); } - buf.append(')'); + } + return exp; + } + + public boolean isConstant() { + return isConstant; + } + + public LiteralExpression getDefaultValue() { + return defaultValue; + } + + public LiteralExpression getMinValue() { + return minValue; + } + + public LiteralExpression getMaxValue() { + return maxValue; + } + + public Class[] getAllowedTypes() { + return allowedTypes; + } + + public Set getAllowedValues() { + return allowedValues; + } + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((info == null) ? 0 : info.hashCode()); + result = prime * result + ((name == null) ? 0 : name.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + FunctionParseNode other = (FunctionParseNode) obj; + if (info == null) { + if (other.info != null) return false; + } else if (!info.equals(other.info)) return false; + if (name == null) { + if (other.name != null) return false; + } else if (!name.equals(other.name)) return false; + return true; + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(' '); + buf.append(name); + buf.append('('); + List children = getChildren(); + if (!children.isEmpty()) { + for (ParseNode child : children) { + child.toSQL(resolver, buf); + buf.append(','); + } + buf.setLength(buf.length() - 1); } + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/GreaterThanOrEqualParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/GreaterThanOrEqualParseNode.java index 2cf3ba285e7..6ad26ca80a5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/GreaterThanOrEqualParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/GreaterThanOrEqualParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,26 +20,22 @@ import org.apache.hadoop.hbase.CompareOperator; /** - * * Node representing the greater than or equal to operator {@code (>=) } in SQL - * - * * @since 0.1 */ public class GreaterThanOrEqualParseNode extends ComparisonParseNode { - GreaterThanOrEqualParseNode(ParseNode lhs, ParseNode rhs) { - super(lhs, rhs); - } - - @Override - public CompareOperator getFilterOp() { - return CompareOperator.GREATER_OR_EQUAL; - } + GreaterThanOrEqualParseNode(ParseNode lhs, ParseNode rhs) { + super(lhs, rhs); + } + @Override + public CompareOperator getFilterOp() { + return CompareOperator.GREATER_OR_EQUAL; + } - @Override - public CompareOperator getInvertFilterOp() { - return CompareOperator.LESS_OR_EQUAL; - } + @Override + public CompareOperator getInvertFilterOp() { + return CompareOperator.LESS_OR_EQUAL; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/GreaterThanParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/GreaterThanParseNode.java index 2d163ada86e..1ae60b2d40b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/GreaterThanParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/GreaterThanParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,27 +19,23 @@ import org.apache.hadoop.hbase.CompareOperator; - /** - * * Node representing the greater than operator {@code (>) } in SQL - * - * * @since 0.1 */ public class GreaterThanParseNode extends ComparisonParseNode { - GreaterThanParseNode(ParseNode lhs, ParseNode rhs) { - super(lhs, rhs); - } + GreaterThanParseNode(ParseNode lhs, ParseNode rhs) { + super(lhs, rhs); + } - @Override - public CompareOperator getFilterOp() { - return CompareOperator.GREATER; - } + @Override + public CompareOperator getFilterOp() { + return CompareOperator.GREATER; + } - @Override - public CompareOperator getInvertFilterOp() { - return CompareOperator.LESS; - } + @Override + public CompareOperator getInvertFilterOp() { + return CompareOperator.LESS; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/HintNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/HintNode.java index 6c1f97b7fab..3d25200f90d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/HintNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/HintNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,246 +20,235 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.StringUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; - - /** * Node representing optimizer hints in SQL */ public class HintNode { - public static final HintNode EMPTY_HINT_NODE = new HintNode(); - - public static final char SEPARATOR = ' '; - public static final String PREFIX = "("; - public static final String SUFFIX = ")"; - // Each hint is of the generic syntax hintWord(hintArgs) where hintArgs in parent are optional. - private static final Pattern HINT_PATTERN = Pattern.compile( - "(?\\w+)\\s*(?:\\s*\\(\\s*(?[^)]+)\\s*\\))?"); - private static final Pattern HINT_ARG_PATTERN = Pattern.compile("(?\"[^\"]+\"|\\S+)"); - - public enum Hint { - /** - * Forces a range scan to be used to process the query. - */ - RANGE_SCAN, - /** - * Forces a skip scan to be used to process the query. - */ - SKIP_SCAN, - /** - * Prevents the usage of child-parent-join optimization. - */ - NO_CHILD_PARENT_JOIN_OPTIMIZATION, - /** - * Prevents the usage of indexes, forcing usage - * of the data table for a query. - */ - NO_INDEX, - /** - * Hint of the form {@code INDEX( ...) } - * to suggest usage of the index if possible. The first - * usable index in the list of indexes will be choosen. - * Table and index names may be surrounded by double quotes - * if they are case sensitive. - */ - INDEX, - /** - * All things being equal, use the data table instead of - * the index table when optimizing. - */ - USE_DATA_OVER_INDEX_TABLE, - /** - * All things being equal, use the index table instead of - * the data table when optimizing. - */ - USE_INDEX_OVER_DATA_TABLE, - /** - * Avoid caching any HBase blocks loaded by this query. - */ - NO_CACHE, - /** - * Use sort-merge join algorithm instead of broadcast join (hash join) algorithm. - */ - USE_SORT_MERGE_JOIN, - /** - * Persist the RHS results of a hash join. - */ - USE_PERSISTENT_CACHE, - /** - * Avoid using star-join optimization. Used for broadcast join (hash join) only. - */ - NO_STAR_JOIN, - /** - * Avoid using the no seek optimization. When there are many columns which are not selected coming in between 2 - * selected columns and/or versions of columns, this should be used. - */ - SEEK_TO_COLUMN, - /** - * Avoid seeks to select specified columns. When there are very less number of columns which are not selected in - * between 2 selected columns this will be give better performance. - */ - NO_SEEK_TO_COLUMN, - /** - * Saves an RPC call on the scan. See Scan.setSmall(true) in HBase documentation. - */ - SMALL, - /** - * Enforces a serial scan. - */ - SERIAL, - /** - * Enforces a forward scan. - */ - FORWARD_SCAN, - /** - * Prefer a hash aggregate over a sort plus streaming aggregate. - * Issue https://issues.apache.org/jira/browse/PHOENIX-4751. - */ - HASH_AGGREGATE, - /** - * Do not use server merge for hinted uncovered indexes - */ - NO_INDEX_SERVER_MERGE, - - /** - * Override the default CDC include scopes. - */ - CDC_INCLUDE, - ; - }; - - private final Map hints; - - public static HintNode create(HintNode hintNode, Hint hint) { - return create(hintNode, hint, ""); - } - - public static HintNode create(HintNode hintNode, Hint hint, String value) { - Map hints = new HashMap(hintNode.hints); - hints.put(hint, value); - return new HintNode(hints); - } - - public static HintNode combine(HintNode hintNode, HintNode override) { - Map hints = new HashMap(hintNode.hints); - hints.putAll(override.hints); - return new HintNode(hints); - } - - public static HintNode subtract(HintNode hintNode, Hint[] remove) { - Map hints = new HashMap(hintNode.hints); - for (Hint hint : remove) { - hints.remove(hint); - } - return new HintNode(hints); - } - - private HintNode() { - hints = new HashMap(); - } - - private HintNode(Map hints) { - this.hints = ImmutableMap.copyOf(hints); - } - - public HintNode(String hint) { - Map hints = new HashMap(); - Matcher hintMatcher = HINT_PATTERN.matcher(hint); - while (hintMatcher.find()) { - try { - Hint hintWord = Hint.valueOf(hintMatcher.group("hintWord").toUpperCase()); - String hintArgsStr = hintMatcher.group("hintArgs"); - List hintArgs = new ArrayList<>(); - if (hintArgsStr != null) { - Matcher hintArgMatcher = HINT_ARG_PATTERN.matcher(hintArgsStr); - while (hintArgMatcher.find()) { - hintArgs.add(SchemaUtil.normalizeIdentifier(hintArgMatcher.group())); - } - } - hintArgsStr = String.join(" ", hintArgs); - hintArgsStr = hintArgsStr.equals("") ? "" : "(" + hintArgsStr + ")"; - if (hints.containsKey(hintWord)) { - // Concatenate together any old value with the new value - hints.put(hintWord, hints.get(hintWord) + hintArgsStr); - } - else { - hints.put(hintWord, hintArgsStr); - } - } catch (IllegalArgumentException e) { // Ignore unknown/invalid hints - } - } - this.hints = ImmutableMap.copyOf(hints); - } + public static final HintNode EMPTY_HINT_NODE = new HintNode(); - public boolean isEmpty() { - return hints.isEmpty(); - } + public static final char SEPARATOR = ' '; + public static final String PREFIX = "("; + public static final String SUFFIX = ")"; + // Each hint is of the generic syntax hintWord(hintArgs) where hintArgs in parent are optional. + private static final Pattern HINT_PATTERN = + Pattern.compile("(?\\w+)\\s*(?:\\s*\\(\\s*(?[^)]+)\\s*\\))?"); + private static final Pattern HINT_ARG_PATTERN = Pattern.compile("(?\"[^\"]+\"|\\S+)"); + public enum Hint { /** - * Gets the value of the hint or null if the hint is not present. - * @param hint the hint - * @return the value specified in parenthesis following the hint or null - * if the hint is not present. - * + * Forces a range scan to be used to process the query. */ - public String getHint(Hint hint) { - return hints.get(hint); - } - + RANGE_SCAN, /** - * Tests for the presence of a hint in a query - * @param hint the hint - * @return true if the hint is present and false otherwise + * Forces a skip scan to be used to process the query. */ - public boolean hasHint(Hint hint) { - return hints.containsKey(hint); - } + SKIP_SCAN, + /** + * Prevents the usage of child-parent-join optimization. + */ + NO_CHILD_PARENT_JOIN_OPTIMIZATION, + /** + * Prevents the usage of indexes, forcing usage of the data table for a query. + */ + NO_INDEX, + /** + * Hint of the form {@code INDEX( ...) } to suggest usage of the index + * if possible. The first usable index in the list of indexes will be choosen. Table and index + * names may be surrounded by double quotes if they are case sensitive. + */ + INDEX, + /** + * All things being equal, use the data table instead of the index table when optimizing. + */ + USE_DATA_OVER_INDEX_TABLE, + /** + * All things being equal, use the index table instead of the data table when optimizing. + */ + USE_INDEX_OVER_DATA_TABLE, + /** + * Avoid caching any HBase blocks loaded by this query. + */ + NO_CACHE, + /** + * Use sort-merge join algorithm instead of broadcast join (hash join) algorithm. + */ + USE_SORT_MERGE_JOIN, + /** + * Persist the RHS results of a hash join. + */ + USE_PERSISTENT_CACHE, + /** + * Avoid using star-join optimization. Used for broadcast join (hash join) only. + */ + NO_STAR_JOIN, + /** + * Avoid using the no seek optimization. When there are many columns which are not selected + * coming in between 2 selected columns and/or versions of columns, this should be used. + */ + SEEK_TO_COLUMN, + /** + * Avoid seeks to select specified columns. When there are very less number of columns which are + * not selected in between 2 selected columns this will be give better performance. + */ + NO_SEEK_TO_COLUMN, + /** + * Saves an RPC call on the scan. See Scan.setSmall(true) in HBase documentation. + */ + SMALL, + /** + * Enforces a serial scan. + */ + SERIAL, + /** + * Enforces a forward scan. + */ + FORWARD_SCAN, + /** + * Prefer a hash aggregate over a sort plus streaming aggregate. Issue + * https://issues.apache.org/jira/browse/PHOENIX-4751. + */ + HASH_AGGREGATE, + /** + * Do not use server merge for hinted uncovered indexes + */ + NO_INDEX_SERVER_MERGE, - public Set getHints(){ - return hints.keySet(); + /** + * Override the default CDC include scopes. + */ + CDC_INCLUDE,; + }; + + private final Map hints; + + public static HintNode create(HintNode hintNode, Hint hint) { + return create(hintNode, hint, ""); + } + + public static HintNode create(HintNode hintNode, Hint hint, String value) { + Map hints = new HashMap(hintNode.hints); + hints.put(hint, value); + return new HintNode(hints); + } + + public static HintNode combine(HintNode hintNode, HintNode override) { + Map hints = new HashMap(hintNode.hints); + hints.putAll(override.hints); + return new HintNode(hints); + } + + public static HintNode subtract(HintNode hintNode, Hint[] remove) { + Map hints = new HashMap(hintNode.hints); + for (Hint hint : remove) { + hints.remove(hint); } - - @Override - public String toString() { - if (hints.isEmpty()) { - return StringUtil.EMPTY_STRING; + return new HintNode(hints); + } + + private HintNode() { + hints = new HashMap(); + } + + private HintNode(Map hints) { + this.hints = ImmutableMap.copyOf(hints); + } + + public HintNode(String hint) { + Map hints = new HashMap(); + Matcher hintMatcher = HINT_PATTERN.matcher(hint); + while (hintMatcher.find()) { + try { + Hint hintWord = Hint.valueOf(hintMatcher.group("hintWord").toUpperCase()); + String hintArgsStr = hintMatcher.group("hintArgs"); + List hintArgs = new ArrayList<>(); + if (hintArgsStr != null) { + Matcher hintArgMatcher = HINT_ARG_PATTERN.matcher(hintArgsStr); + while (hintArgMatcher.find()) { + hintArgs.add(SchemaUtil.normalizeIdentifier(hintArgMatcher.group())); + } } - StringBuilder buf = new StringBuilder("/*+ "); - for (Map.Entry entry : hints.entrySet()) { - buf.append(entry.getKey()); - buf.append(entry.getValue()); - buf.append(' '); + hintArgsStr = String.join(" ", hintArgs); + hintArgsStr = hintArgsStr.equals("") ? "" : "(" + hintArgsStr + ")"; + if (hints.containsKey(hintWord)) { + // Concatenate together any old value with the new value + hints.put(hintWord, hints.get(hintWord) + hintArgsStr); + } else { + hints.put(hintWord, hintArgsStr); } - buf.append("*/ "); - return buf.toString(); + } catch (IllegalArgumentException e) { // Ignore unknown/invalid hints + } } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((hints == null) ? 0 : hints.hashCode()); - return result; + this.hints = ImmutableMap.copyOf(hints); + } + + public boolean isEmpty() { + return hints.isEmpty(); + } + + /** + * Gets the value of the hint or null if the hint is not present. + * @param hint the hint + * @return the value specified in parenthesis following the hint or null if the hint is not + * present. + */ + public String getHint(Hint hint) { + return hints.get(hint); + } + + /** + * Tests for the presence of a hint in a query + * @param hint the hint + * @return true if the hint is present and false otherwise + */ + public boolean hasHint(Hint hint) { + return hints.containsKey(hint); + } + + public Set getHints() { + return hints.keySet(); + } + + @Override + public String toString() { + if (hints.isEmpty()) { + return StringUtil.EMPTY_STRING; } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - HintNode other = (HintNode)obj; - if (hints == null) { - if (other.hints != null) return false; - } else if (!hints.equals(other.hints)) return false; - return true; + StringBuilder buf = new StringBuilder("/*+ "); + for (Map.Entry entry : hints.entrySet()) { + buf.append(entry.getKey()); + buf.append(entry.getValue()); + buf.append(' '); } + buf.append("*/ "); + return buf.toString(); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((hints == null) ? 0 : hints.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + HintNode other = (HintNode) obj; + if (hints == null) { + if (other.hints != null) return false; + } else if (!hints.equals(other.hints)) return false; + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/InListParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/InListParseNode.java index b0290762fc1..34cff1e4196 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/InListParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/InListParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,81 +25,73 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; - - /** - * * Node representing the IN literal list expression in SQL - * - * * @since 0.1 */ public class InListParseNode extends CompoundParseNode { - private final boolean negate; + private final boolean negate; - InListParseNode(List children, boolean negate) { - super(children); - // All values in the IN must be constant. First child is the LHS - for (int i = 1; i < children.size(); i++) { - ParseNode child = children.get(i); - if (!child.isStateless()) { - throw new ParseException(new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_LIST_NOT_CONSTANT) - .build().buildException()); - } - } - this.negate = negate; - } - - public boolean isNegate() { - return negate; + InListParseNode(List children, boolean negate) { + super(children); + // All values in the IN must be constant. First child is the LHS + for (int i = 1; i < children.size(); i++) { + ParseNode child = children.get(i); + if (!child.isStateless()) { + throw new ParseException( + new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_LIST_NOT_CONSTANT).build() + .buildException()); + } } + this.negate = negate; + } + + public boolean isNegate() { + return negate; + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (negate ? 1231 : 1237); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (negate ? 1231 : 1237); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - InListParseNode other = (InListParseNode) obj; - if (negate != other.negate) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + InListParseNode other = (InListParseNode) obj; + if (negate != other.negate) return false; + return true; + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - buf.append(' '); - if (negate) buf.append("NOT "); - buf.append("IN"); - buf.append('('); - if (children.size() > 1) { - for (int i = 1; i < children.size(); i++) { - children.get(i).toSQL(resolver, buf); - buf.append(','); - } - buf.setLength(buf.length()-1); - } - buf.append(')'); + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + buf.append(' '); + if (negate) buf.append("NOT "); + buf.append("IN"); + buf.append('('); + if (children.size() > 1) { + for (int i = 1; i < children.size(); i++) { + children.get(i).toSQL(resolver, buf); + buf.append(','); + } + buf.setLength(buf.length() - 1); } + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/InParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/InParseNode.java index 9003fc80c8c..2a067618d61 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/InParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/InParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,73 +23,63 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing IN subquery expression in SQL - * - * * @since 0.1 */ public class InParseNode extends BinaryParseNode { - private final boolean negate; - private final boolean isSubqueryDistinct; + private final boolean negate; + private final boolean isSubqueryDistinct; - InParseNode(ParseNode l, ParseNode r, boolean negate, boolean isSubqueryDistinct) { - super(l, r); - this.negate = negate; - this.isSubqueryDistinct = isSubqueryDistinct; - } - - public boolean isNegate() { - return negate; - } - - public boolean isSubqueryDistinct() { - return isSubqueryDistinct; - } + InParseNode(ParseNode l, ParseNode r, boolean negate, boolean isSubqueryDistinct) { + super(l, r); + this.negate = negate; + this.isSubqueryDistinct = isSubqueryDistinct; + } + + public boolean isNegate() { + return negate; + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + public boolean isSubqueryDistinct() { + return isSubqueryDistinct; + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (isSubqueryDistinct ? 1231 : 1237); - result = prime * result + (negate ? 1231 : 1237); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (isSubqueryDistinct ? 1231 : 1237); + result = prime * result + (negate ? 1231 : 1237); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - InParseNode other = (InParseNode) obj; - if (isSubqueryDistinct != other.isSubqueryDistinct) - return false; - if (negate != other.negate) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + InParseNode other = (InParseNode) obj; + if (isSubqueryDistinct != other.isSubqueryDistinct) return false; + if (negate != other.negate) return false; + return true; + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - getChildren().get(0).toSQL(resolver, buf); - if (negate) buf.append(" NOT"); - buf.append(" IN ("); - getChildren().get(1).toSQL(resolver, buf); - buf.append(')'); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + getChildren().get(0).toSQL(resolver, buf); + if (negate) buf.append(" NOT"); + buf.append(" IN ("); + getChildren().get(1).toSQL(resolver, buf); + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java index f7481b94462..4f197480756 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IndexExpressionParseNodeRewriter.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.parse; @@ -25,50 +33,56 @@ import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.util.IndexUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.util.IndexUtil; /** - * Used to replace parse nodes in a SelectStatement that match expressions that are present in an indexed with the - * corresponding {@link ColumnParseNode} + * Used to replace parse nodes in a SelectStatement that match expressions that are present in an + * indexed with the corresponding {@link ColumnParseNode} */ public class IndexExpressionParseNodeRewriter extends ParseNodeRewriter { - private final Map indexedParseNodeToColumnParseNodeMap; - - public IndexExpressionParseNodeRewriter(PTable index, String alias, PhoenixConnection connection, Map udfParseNodes) throws SQLException { - indexedParseNodeToColumnParseNodeMap = Maps.newHashMapWithExpectedSize(index.getColumns().size()); - NamedTableNode tableNode = NamedTableNode.create(alias, - TableName.create(index.getParentSchemaName().getString(), index.getParentTableName().getString()), - Collections. emptyList()); - ColumnResolver dataResolver = FromCompiler.getResolver(tableNode, connection, udfParseNodes); - StatementContext context = new StatementContext(new PhoenixStatement(connection), dataResolver); - IndexStatementRewriter rewriter = new IndexStatementRewriter(dataResolver, null, true); - ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); - int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (index.isMultiTenant() ? 1 : 0) + (index.getViewIndexId() == null ? 0 : 1); - List pkColumns = index.getPKColumns(); - for (int i=indexPosOffset; i indexedParseNodeToColumnParseNodeMap; - @Override - protected ParseNode leaveCompoundNode(CompoundParseNode node, List children, CompoundNodeFactory factory) { - return indexedParseNodeToColumnParseNodeMap.containsKey(node) ? indexedParseNodeToColumnParseNodeMap.get(node) - : super.leaveCompoundNode(node, children, factory); + public IndexExpressionParseNodeRewriter(PTable index, String alias, PhoenixConnection connection, + Map udfParseNodes) throws SQLException { + indexedParseNodeToColumnParseNodeMap = + Maps.newHashMapWithExpectedSize(index.getColumns().size()); + NamedTableNode tableNode = + NamedTableNode.create(alias, TableName.create(index.getParentSchemaName().getString(), + index.getParentTableName().getString()), Collections. emptyList()); + ColumnResolver dataResolver = FromCompiler.getResolver(tableNode, connection, udfParseNodes); + StatementContext context = new StatementContext(new PhoenixStatement(connection), dataResolver); + IndexStatementRewriter rewriter = new IndexStatementRewriter(dataResolver, null, true); + ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); + int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (index.isMultiTenant() ? 1 : 0) + + (index.getViewIndexId() == null ? 0 : 1); + List pkColumns = index.getPKColumns(); + for (int i = indexPosOffset; i < pkColumns.size(); ++i) { + PColumn column = pkColumns.get(i); + String expressionStr = IndexUtil.getIndexColumnExpressionStr(column); + ParseNode expressionParseNode = SQLParser.parseCondition(expressionStr); + String colName = "\"" + column.getName().getString() + "\""; + Expression dataExpression = expressionParseNode.accept(expressionCompiler); + PDataType expressionDataType = dataExpression.getDataType(); + ParseNode indexedParseNode = expressionParseNode.accept(rewriter); + PDataType indexColType = + IndexUtil.getIndexColumnDataType(dataExpression.isNullable(), expressionDataType); + ParseNode columnParseNode = + new ColumnParseNode(alias != null ? TableName.create(null, alias) : null, colName, null); + if (indexColType != expressionDataType) { + columnParseNode = NODE_FACTORY.cast(columnParseNode, expressionDataType, null, null); + } + indexedParseNodeToColumnParseNodeMap.put(indexedParseNode, columnParseNode); } + } + + @Override + protected ParseNode leaveCompoundNode(CompoundParseNode node, List children, + CompoundNodeFactory factory) { + return indexedParseNodeToColumnParseNodeMap.containsKey(node) + ? indexedParseNodeToColumnParseNodeMap.get(node) + : super.leaveCompoundNode(node, children, factory); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IndexKeyConstraint.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IndexKeyConstraint.java index ed9e04cce39..ffc814e88ed 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IndexKeyConstraint.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IndexKeyConstraint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,39 +19,37 @@ import java.util.Collections; import java.util.List; -import java.util.Map; import org.apache.hadoop.hbase.util.Pair; - -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; - import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; public class IndexKeyConstraint { - public static final IndexKeyConstraint EMPTY = new IndexKeyConstraint(Collections.>emptyList()); - - private final List> columnNameToSortOrder; - - IndexKeyConstraint(List> parseNodeAndSortOrder) { - this.columnNameToSortOrder = ImmutableList.copyOf(parseNodeAndSortOrder); - } - - public List> getParseNodeAndSortOrderList() { - return columnNameToSortOrder; - } - - @Override - public String toString() { - StringBuffer sb = new StringBuffer(); - for(Pair entry : columnNameToSortOrder) { - if(sb.length()!=0) { - sb.append(", "); - } - sb.append(entry.getFirst().toString()); - if(entry.getSecond() != SortOrder.getDefault()) { - sb.append(" "+entry.getSecond()); - } - } - return sb.toString(); + public static final IndexKeyConstraint EMPTY = + new IndexKeyConstraint(Collections.> emptyList()); + + private final List> columnNameToSortOrder; + + IndexKeyConstraint(List> parseNodeAndSortOrder) { + this.columnNameToSortOrder = ImmutableList.copyOf(parseNodeAndSortOrder); + } + + public List> getParseNodeAndSortOrderList() { + return columnNameToSortOrder; + } + + @Override + public String toString() { + StringBuffer sb = new StringBuffer(); + for (Pair entry : columnNameToSortOrder) { + if (sb.length() != 0) { + sb.append(", "); + } + sb.append(entry.getFirst().toString()); + if (entry.getSecond() != SortOrder.getDefault()) { + sb.append(" " + entry.getSecond()); + } } -} \ No newline at end of file + return sb.toString(); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java index fafa9d1329e..ce2b1847f6d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/IsNullParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,63 +23,54 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing the IS NULL and IS NOT NULL expressions in SQL - * - * * @since 0.1 */ public class IsNullParseNode extends UnaryParseNode { - private final boolean negate; - - IsNullParseNode(ParseNode expr, boolean negate) { - super(expr); - this.negate = negate; - } - - public boolean isNegate() { - return negate; - } + private final boolean negate; + + IsNullParseNode(ParseNode expr, boolean negate) { + super(expr); + this.negate = negate; + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + public boolean isNegate() { + return negate; + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (negate ? 1231 : 1237); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (negate ? 1231 : 1237); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - IsNullParseNode other = (IsNullParseNode) obj; - if (negate != other.negate) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + IsNullParseNode other = (IsNullParseNode) obj; + if (negate != other.negate) return false; + return true; + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - getChildren().get(0).toSQL(resolver, buf); - buf.append(" IS"); - if (negate) buf.append(" NOT"); - buf.append(" NULL "); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + getChildren().get(0).toSQL(resolver, buf); + buf.append(" IS"); + if (negate) buf.append(" NOT"); + buf.append(" NULL "); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JoinTableNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JoinTableNode.java index d30e4ba013e..28549b295ee 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JoinTableNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JoinTableNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,113 +21,108 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing the join specified in the FROM clause of SQL - * - * * @since 0.1 */ public class JoinTableNode extends TableNode { - public enum JoinType { - Inner, - Left, - Right, - Full, - // the following two types derive from sub-query rewriting - Semi, - Anti, - }; - - private final JoinType type; - private final TableNode lhs; - private final TableNode rhs; - private final ParseNode onNode; - private final boolean singleValueOnly; - - JoinTableNode(JoinType type, TableNode lhs, TableNode rhs, ParseNode onNode, boolean singleValueOnly) { - super(null); - this.type = type; - this.lhs = lhs; - this.rhs = rhs; - this.onNode = onNode; - this.singleValueOnly = singleValueOnly; - } - - public JoinType getType() { - return type; - } - - public TableNode getLHS() { - return lhs; - } - - public TableNode getRHS() { - return rhs; - } - - public ParseNode getOnNode() { - return onNode; - } - - public boolean isSingleValueOnly() { - return singleValueOnly; - } + public enum JoinType { + Inner, + Left, + Right, + Full, + // the following two types derive from sub-query rewriting + Semi, + Anti, + }; - @Override - public T accept(TableNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + private final JoinType type; + private final TableNode lhs; + private final TableNode rhs; + private final ParseNode onNode; + private final boolean singleValueOnly; - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(lhs); - buf.append(' '); - if (onNode == null) { - buf.append(','); - buf.append(rhs); - } else { - buf.append(type); - buf.append(" JOIN "); - buf.append(rhs); - buf.append(" ON ("); - onNode.toSQL(resolver, buf); - buf.append(')'); - } - } + JoinTableNode(JoinType type, TableNode lhs, TableNode rhs, ParseNode onNode, + boolean singleValueOnly) { + super(null); + this.type = type; + this.lhs = lhs; + this.rhs = rhs; + this.onNode = onNode; + this.singleValueOnly = singleValueOnly; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((lhs == null) ? 0 : lhs.hashCode()); - result = prime * result + ((onNode == null) ? 0 : onNode.hashCode()); - result = prime * result + ((rhs == null) ? 0 : rhs.hashCode()); - result = prime * result + (singleValueOnly ? 1231 : 1237); - result = prime * result + ((type == null) ? 0 : type.hashCode()); - return result; - } + public JoinType getType() { + return type; + } + + public TableNode getLHS() { + return lhs; + } + + public TableNode getRHS() { + return rhs; + } + + public ParseNode getOnNode() { + return onNode; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - JoinTableNode other = (JoinTableNode)obj; - if (lhs == null) { - if (other.lhs != null) return false; - } else if (!lhs.equals(other.lhs)) return false; - if (onNode == null) { - if (other.onNode != null) return false; - } else if (!onNode.equals(other.onNode)) return false; - if (rhs == null) { - if (other.rhs != null) return false; - } else if (!rhs.equals(other.rhs)) return false; - if (singleValueOnly != other.singleValueOnly) return false; - if (type != other.type) return false; - return true; + public boolean isSingleValueOnly() { + return singleValueOnly; + } + + @Override + public T accept(TableNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(lhs); + buf.append(' '); + if (onNode == null) { + buf.append(','); + buf.append(rhs); + } else { + buf.append(type); + buf.append(" JOIN "); + buf.append(rhs); + buf.append(" ON ("); + onNode.toSQL(resolver, buf); + buf.append(')'); } -} + } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((lhs == null) ? 0 : lhs.hashCode()); + result = prime * result + ((onNode == null) ? 0 : onNode.hashCode()); + result = prime * result + ((rhs == null) ? 0 : rhs.hashCode()); + result = prime * result + (singleValueOnly ? 1231 : 1237); + result = prime * result + ((type == null) ? 0 : type.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + JoinTableNode other = (JoinTableNode) obj; + if (lhs == null) { + if (other.lhs != null) return false; + } else if (!lhs.equals(other.lhs)) return false; + if (onNode == null) { + if (other.onNode != null) return false; + } else if (!onNode.equals(other.onNode)) return false; + if (rhs == null) { + if (other.rhs != null) return false; + } else if (!rhs.equals(other.rhs)) return false; + if (singleValueOnly != other.singleValueOnly) return false; + if (type != other.type) return false; + return true; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonExistsParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonExistsParseNode.java index ee10f975425..86af0434a67 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonExistsParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonExistsParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.parse; +import java.sql.SQLException; +import java.util.List; + import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.function.FunctionExpression; @@ -24,22 +27,19 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PJson; -import java.sql.SQLException; -import java.util.List; - public class JsonExistsParseNode extends FunctionParseNode { - public JsonExistsParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public JsonExistsParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) - throws SQLException { - PDataType dataType = children.get(0).getDataType(); - if (!dataType.isCoercibleTo(PJson.INSTANCE)) { - throw new SQLException(dataType + " type is unsupported for JSON_EXISTS()."); - } - return new JsonExistsFunction(children); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + PDataType dataType = children.get(0).getDataType(); + if (!dataType.isCoercibleTo(PJson.INSTANCE)) { + throw new SQLException(dataType + " type is unsupported for JSON_EXISTS()."); } + return new JsonExistsFunction(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonModifyParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonModifyParseNode.java index 059da3209ed..0be2a0cf1d6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonModifyParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonModifyParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.parse; +import java.sql.SQLException; +import java.util.List; + import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.function.FunctionExpression; @@ -24,25 +27,22 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PJson; -import java.sql.SQLException; -import java.util.List; - /** * ParseNode for JSON_MODIFY function. */ public class JsonModifyParseNode extends FunctionParseNode { - public JsonModifyParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public JsonModifyParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) - throws SQLException { - PDataType dataType = children.get(0).getDataType(); - if (!dataType.isCoercibleTo(PJson.INSTANCE)) { - throw new SQLException(dataType + " type is unsupported for JSON_MODIFY()."); - } - return new JsonModifyFunction(children); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + PDataType dataType = children.get(0).getDataType(); + if (!dataType.isCoercibleTo(PJson.INSTANCE)) { + throw new SQLException(dataType + " type is unsupported for JSON_MODIFY()."); } -} \ No newline at end of file + return new JsonModifyFunction(children); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonQueryParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonQueryParseNode.java index 81093fbb5b1..7deb0fad965 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonQueryParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonQueryParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.parse; +import java.sql.SQLException; +import java.util.List; + import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.function.FunctionExpression; @@ -24,25 +27,22 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PJson; -import java.sql.SQLException; -import java.util.List; - /** * ParseNode for JSON_QUERY function. */ public class JsonQueryParseNode extends FunctionParseNode { - public JsonQueryParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public JsonQueryParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) - throws SQLException { - PDataType dataType = children.get(0).getDataType(); - if (!dataType.isCoercibleTo(PJson.INSTANCE)) { - throw new SQLException(dataType + " type is unsupported for JSON_QUERY()."); - } - return new JsonQueryFunction(children); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + PDataType dataType = children.get(0).getDataType(); + if (!dataType.isCoercibleTo(PJson.INSTANCE)) { + throw new SQLException(dataType + " type is unsupported for JSON_QUERY()."); } + return new JsonQueryFunction(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonValueParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonValueParseNode.java index eb1e413b3fc..a62ec4820a9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonValueParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/JsonValueParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.parse; +import java.sql.SQLException; +import java.util.List; + import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.function.FunctionExpression; @@ -25,22 +28,19 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PJson; -import java.sql.SQLException; -import java.util.List; - public class JsonValueParseNode extends FunctionParseNode { - public JsonValueParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public JsonValueParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) - throws SQLException { - PDataType dataType = children.get(0).getDataType(); - if (!dataType.isCoercibleTo(PJson.INSTANCE) && !dataType.isCoercibleTo(PBson.INSTANCE)) { - throw new SQLException(dataType + " type is unsupported for JSON_VALUE()."); - } - return new JsonValueFunction(children); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + PDataType dataType = children.get(0).getDataType(); + if (!dataType.isCoercibleTo(PJson.INSTANCE) && !dataType.isCoercibleTo(PBson.INSTANCE)) { + throw new SQLException(dataType + " type is unsupported for JSON_VALUE()."); } + return new JsonValueFunction(children); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LastValueAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LastValueAggregateParseNode.java index 333bb132206..32da90c1ac2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LastValueAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LastValueAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,12 +27,14 @@ public class LastValueAggregateParseNode extends DelegateConstantToCountParseNode { - public LastValueAggregateParseNode(String name, List children, FunctionParseNode.BuiltInFunctionInfo info) { - super(name, children, info); - } + public LastValueAggregateParseNode(String name, List children, + FunctionParseNode.BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new LastValueFunction(children, getDelegateFunction(children, context)); - } + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new LastValueFunction(children, getDelegateFunction(children, context)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LastValuesAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LastValuesAggregateParseNode.java index 43fa82dcf39..6437129c85f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LastValuesAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LastValuesAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,12 +27,14 @@ public class LastValuesAggregateParseNode extends DelegateConstantToCountParseNode { - public LastValuesAggregateParseNode(String name, List children, FunctionParseNode.BuiltInFunctionInfo info) { - super(name, children, info); - } + public LastValuesAggregateParseNode(String name, List children, + FunctionParseNode.BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new LastValuesFunction(children, getDelegateFunction(children, context)); - } + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new LastValuesFunction(children, getDelegateFunction(children, context)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LessThanOrEqualParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LessThanOrEqualParseNode.java index 8736d8aaa97..8549785ccee 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LessThanOrEqualParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LessThanOrEqualParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,27 +19,23 @@ import org.apache.hadoop.hbase.CompareOperator; - /** - * * Node representing the less than or equal to operator {@code (<=) } in SQL - * - * * @since 0.1 */ public class LessThanOrEqualParseNode extends ComparisonParseNode { - LessThanOrEqualParseNode(ParseNode lhs, ParseNode rhs) { - super(lhs, rhs); - } + LessThanOrEqualParseNode(ParseNode lhs, ParseNode rhs) { + super(lhs, rhs); + } - @Override - public CompareOperator getFilterOp() { - return CompareOperator.LESS_OR_EQUAL; - } + @Override + public CompareOperator getFilterOp() { + return CompareOperator.LESS_OR_EQUAL; + } - @Override - public CompareOperator getInvertFilterOp() { - return CompareOperator.GREATER_OR_EQUAL; - } + @Override + public CompareOperator getInvertFilterOp() { + return CompareOperator.GREATER_OR_EQUAL; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LessThanParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LessThanParseNode.java index 158ae3158d8..a9189913162 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LessThanParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LessThanParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,27 +19,23 @@ import org.apache.hadoop.hbase.CompareOperator; - /** - * * Node representing the less than operator {@code (<) } in SQL - * - * * @since 0.1 */ public class LessThanParseNode extends ComparisonParseNode { - LessThanParseNode(ParseNode lhs, ParseNode rhs) { - super(lhs, rhs); - } + LessThanParseNode(ParseNode lhs, ParseNode rhs) { + super(lhs, rhs); + } - @Override - public CompareOperator getFilterOp() { - return CompareOperator.LESS; - } + @Override + public CompareOperator getFilterOp() { + return CompareOperator.LESS; + } - @Override - public CompareOperator getInvertFilterOp() { - return CompareOperator.GREATER; - } + @Override + public CompareOperator getInvertFilterOp() { + return CompareOperator.GREATER; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LikeParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LikeParseNode.java index 8a510d4ca9f..c537ad18025 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LikeParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LikeParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,78 +23,69 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing LIKE and NOT LIKE in SQL - * - * * @since 0.1 */ public class LikeParseNode extends BinaryParseNode { - public enum LikeType {CASE_SENSITIVE, CASE_INSENSITIVE} + public enum LikeType { + CASE_SENSITIVE, + CASE_INSENSITIVE + } - private final boolean negate; - private final LikeType likeType; + private final boolean negate; + private final LikeType likeType; - LikeParseNode(ParseNode lhs, ParseNode rhs, boolean negate, LikeType likeType) { - super(lhs, rhs); - this.negate = negate; - this.likeType = likeType; - } + LikeParseNode(ParseNode lhs, ParseNode rhs, boolean negate, LikeType likeType) { + super(lhs, rhs); + this.negate = negate; + this.likeType = likeType; + } - public boolean isNegate() { - return negate; - } + public boolean isNegate() { + return negate; + } - public LikeType getLikeType() { - return likeType; - } + public LikeType getLikeType() { + return likeType; + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result - + ((likeType == null) ? 0 : likeType.hashCode()); - result = prime * result + (negate ? 1231 : 1237); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((likeType == null) ? 0 : likeType.hashCode()); + result = prime * result + (negate ? 1231 : 1237); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - LikeParseNode other = (LikeParseNode) obj; - if (likeType != other.likeType) - return false; - if (negate != other.negate) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + LikeParseNode other = (LikeParseNode) obj; + if (likeType != other.likeType) return false; + if (negate != other.negate) return false; + return true; + } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - if (negate) buf.append(" NOT"); - buf.append(" " + (likeType == LikeType.CASE_SENSITIVE ? "LIKE" : "ILIKE") + " "); - children.get(1).toSQL(resolver, buf); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + if (negate) buf.append(" NOT"); + buf.append(" " + (likeType == LikeType.CASE_SENSITIVE ? "LIKE" : "ILIKE") + " "); + children.get(1).toSQL(resolver, buf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LimitNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LimitNode.java index 135cf544ca4..71f54797b3f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LimitNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LimitNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,51 +17,50 @@ */ package org.apache.phoenix.parse; - public class LimitNode { - private final BindParseNode bindNode; - private final LiteralParseNode limitNode; - - LimitNode(BindParseNode bindNode) { - this.bindNode = bindNode; - limitNode = null; - } - - LimitNode(LiteralParseNode limitNode) { - this.limitNode = limitNode; - this.bindNode = null; - } - - public ParseNode getLimitParseNode() { - return bindNode == null ? limitNode : bindNode; - } - - @Override - public String toString() { - return bindNode == null ? limitNode.toString() : bindNode.toString(); - } + private final BindParseNode bindNode; + private final LiteralParseNode limitNode; + + LimitNode(BindParseNode bindNode) { + this.bindNode = bindNode; + limitNode = null; + } + + LimitNode(LiteralParseNode limitNode) { + this.limitNode = limitNode; + this.bindNode = null; + } + + public ParseNode getLimitParseNode() { + return bindNode == null ? limitNode : bindNode; + } + + @Override + public String toString() { + return bindNode == null ? limitNode.toString() : bindNode.toString(); + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((bindNode == null) ? 0 : bindNode.hashCode()); - result = prime * result + ((limitNode == null) ? 0 : limitNode.hashCode()); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((bindNode == null) ? 0 : bindNode.hashCode()); + result = prime * result + ((limitNode == null) ? 0 : limitNode.hashCode()); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - LimitNode other = (LimitNode)obj; - if (bindNode == null) { - if (other.bindNode != null) return false; - } else if (!bindNode.equals(other.bindNode)) return false; - if (limitNode == null) { - if (other.limitNode != null) return false; - } else if (!limitNode.equals(other.limitNode)) return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + LimitNode other = (LimitNode) obj; + if (bindNode == null) { + if (other.bindNode != null) return false; + } else if (!bindNode.equals(other.bindNode)) return false; + if (limitNode == null) { + if (other.limitNode != null) return false; + } else if (!limitNode.equals(other.limitNode)) return false; + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ListJarsStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ListJarsStatement.java index e9821fbed83..55111a9cdec 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ListJarsStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ListJarsStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,14 +21,14 @@ public class ListJarsStatement implements BindableStatement { - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - @Override - public Operation getOperation() { - return Operation.QUERY; - } + @Override + public Operation getOperation() { + return Operation.QUERY; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java index 85f4ee5bc64..83ef89df9a8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/LiteralParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,99 +27,96 @@ import org.apache.phoenix.schema.types.PTimestamp; /** - * * Node representing literal expressions such as 1,2.5,'foo', and NULL in SQL - * - * * @since 0.1 */ public class LiteralParseNode extends TerminalParseNode { - public static final List STAR = Collections.singletonList(new LiteralParseNode(1)); - public static final ParseNode NULL = new LiteralParseNode(null); - public static final ParseNode ZERO = new LiteralParseNode(0); - public static final ParseNode ONE = new LiteralParseNode(1); - public static final ParseNode MINUS_ONE = new LiteralParseNode(-1L); - public static final ParseNode TRUE = new LiteralParseNode(true); - // Parser representation of Long.MIN_VALUE, as ABS(Long.MIN_VALUE) is too big to fit into a Long - public static final ParseNode MIN_LONG_AS_BIG_DECIMAL = new LiteralParseNode(BigDecimal.valueOf(Long.MIN_VALUE).abs()); - // See ParseNodeFactory.negate(), as MIN_LONG_AS_BIG_DECIMAL will be converted MIN_LONG if negated. - public static final ParseNode MIN_LONG = new LiteralParseNode(Long.MIN_VALUE); - - private final Object value; - private final PDataType type; - - public LiteralParseNode(Object value) { - this.type = PDataType.fromLiteral(value); - // This will make the value null if the value passed through represents null for the given type. - // For example, an empty string is treated as a null. - this.value = this.type == null ? null : this.type.toObject(value, this.type); - } + public static final List STAR = + Collections. singletonList(new LiteralParseNode(1)); + public static final ParseNode NULL = new LiteralParseNode(null); + public static final ParseNode ZERO = new LiteralParseNode(0); + public static final ParseNode ONE = new LiteralParseNode(1); + public static final ParseNode MINUS_ONE = new LiteralParseNode(-1L); + public static final ParseNode TRUE = new LiteralParseNode(true); + // Parser representation of Long.MIN_VALUE, as ABS(Long.MIN_VALUE) is too big to fit into a Long + public static final ParseNode MIN_LONG_AS_BIG_DECIMAL = + new LiteralParseNode(BigDecimal.valueOf(Long.MIN_VALUE).abs()); + // See ParseNodeFactory.negate(), as MIN_LONG_AS_BIG_DECIMAL will be converted MIN_LONG if + // negated. + public static final ParseNode MIN_LONG = new LiteralParseNode(Long.MIN_VALUE); - public LiteralParseNode(Object value, PDataType type) { - this.type = type; - // This will make the value null if the value passed through represents null for the given type. - // For example, an empty string is treated as a null. - this.value = this.type == null ? null : this.type.toObject(value, this.type); - } + private final Object value; + private final PDataType type; - public PDataType getType() { - return type; - } - - public Object getValue() { - return value; - } + public LiteralParseNode(Object value) { + this.type = PDataType.fromLiteral(value); + // This will make the value null if the value passed through represents null for the given type. + // For example, an empty string is treated as a null. + this.value = this.type == null ? null : this.type.toObject(value, this.type); + } - @Override - public boolean isStateless() { - return true; - } - - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + public LiteralParseNode(Object value, PDataType type) { + this.type = type; + // This will make the value null if the value passed through represents null for the given type. + // For example, an empty string is treated as a null. + this.value = this.type == null ? null : this.type.toObject(value, this.type); + } - public byte[] getBytes() { - return type == null ? null : type.toBytes(value); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((type == null) ? 0 : type.hashCode()); - result = prime * result + ((value == null) ? 0 : value.hashCode()); - return result; - } + public PDataType getType() { + return type; + } + + public Object getValue() { + return value; + } + + @Override + public boolean isStateless() { + return true; + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } + + public byte[] getBytes() { + return type == null ? null : type.toBytes(value); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((type == null) ? 0 : type.hashCode()); + result = prime * result + ((value == null) ? 0 : value.hashCode()); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - LiteralParseNode other = (LiteralParseNode) obj; - if (value == other.value) return true; - if (type == null) return false; - return type.isComparableTo(other.type) && type.compareTo(value, other.value, other.type) == 0; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + LiteralParseNode other = (LiteralParseNode) obj; + if (value == other.value) return true; + if (type == null) return false; + return type.isComparableTo(other.type) && type.compareTo(value, other.value, other.type) == 0; + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - if (value == null) { - buf.append(" null "); - } else { - // TODO: move into PDataType? - if (type.isCoercibleTo(PTimestamp.INSTANCE)) { - buf.append(type); - buf.append(' '); - buf.append(type.toStringLiteral(value, null)); - } else { - buf.append(type.toStringLiteral(value, null)); - } - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + if (value == null) { + buf.append(" null "); + } else { + // TODO: move into PDataType? + if (type.isCoercibleTo(PTimestamp.INSTANCE)) { + buf.append(type); + buf.append(' '); + buf.append(type.toStringLiteral(value, null)); + } else { + buf.append(type.toStringLiteral(value, null)); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MaxAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MaxAggregateParseNode.java index 4d7d758e88c..7a7891327fa 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MaxAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MaxAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,15 +25,15 @@ import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.expression.function.MaxAggregateFunction; - public class MaxAggregateParseNode extends DelegateConstantToCountParseNode { - public MaxAggregateParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new MaxAggregateFunction(children, getDelegateFunction(children,context)); - } + public MaxAggregateParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } + + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new MaxAggregateFunction(children, getDelegateFunction(children, context)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MinAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MinAggregateParseNode.java index 31ef4784efd..62f135edef6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MinAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MinAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,15 +25,15 @@ import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.expression.function.MinAggregateFunction; - public class MinAggregateParseNode extends DelegateConstantToCountParseNode { - public MinAggregateParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new MinAggregateFunction(children, getDelegateFunction(children,context)); - } + public MinAggregateParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } + + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new MinAggregateFunction(children, getDelegateFunction(children, context)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ModulusParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ModulusParseNode.java index 15d539d91de..499cd004091 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ModulusParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ModulusParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,33 +21,28 @@ import java.util.Collections; import java.util.List; - - /** - * * Node representing modulus in a SQL expression - * - * * @since 0.1 */ public class ModulusParseNode extends ArithmeticParseNode { - public static final String OPERATOR = "%"; + public static final String OPERATOR = "%"; - @Override - public String getOperator() { - return OPERATOR; - } + @Override + public String getOperator() { + return OPERATOR; + } - ModulusParseNode(List children) { - super(children); - } + ModulusParseNode(List children) { + super(children); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MultiplyParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MultiplyParseNode.java index 1fc5436476a..f6569d5aae4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MultiplyParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MultiplyParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,33 +21,28 @@ import java.util.Collections; import java.util.List; - - /** - * * Node representing multiplication in a SQL expression - * - * * @since 0.1 */ public class MultiplyParseNode extends ArithmeticParseNode { - public static final String OPERATOR = "*"; + public static final String OPERATOR = "*"; - @Override - public String getOperator() { - return OPERATOR; - } + @Override + public String getOperator() { + return OPERATOR; + } - MultiplyParseNode(List children) { - super(children); - } + MultiplyParseNode(List children) { + super(children); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MutableStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MutableStatement.java index 610b2d2d63c..8a0817c1ff9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MutableStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/MutableStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,8 +21,8 @@ public abstract class MutableStatement implements BindableStatement { - @Override - public Operation getOperation() { - return Operation.UPSERT; - } + @Override + public Operation getOperation() { + return Operation.UPSERT; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedNode.java index 1957f0eeda4..584a1c36410 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,53 +15,52 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; import org.apache.phoenix.util.SchemaUtil; public class NamedNode { - private final String name; - private final boolean isCaseSensitive; - - public static NamedNode caseSensitiveNamedNode(String name) { - return new NamedNode(name,true); - } + private final String name; + private final boolean isCaseSensitive; + + public static NamedNode caseSensitiveNamedNode(String name) { + return new NamedNode(name, true); + } + + NamedNode(String name, boolean isCaseSensitive) { + this.name = name; + this.isCaseSensitive = isCaseSensitive; + } + + NamedNode(String name) { + this.name = SchemaUtil.normalizeIdentifier(name); + this.isCaseSensitive = name == null ? false : SchemaUtil.isCaseSensitive(name); + } - NamedNode(String name, boolean isCaseSensitive) { - this.name = name; - this.isCaseSensitive = isCaseSensitive; - } + public String getName() { + return name; + } - NamedNode(String name) { - this.name = SchemaUtil.normalizeIdentifier(name); - this.isCaseSensitive = name == null ? false : SchemaUtil.isCaseSensitive(name); - } + public boolean isCaseSensitive() { + return isCaseSensitive; + } - public String getName() { - return name; - } + @Override + public int hashCode() { + return name.hashCode(); + } - public boolean isCaseSensitive() { - return isCaseSensitive; - } - - @Override - public int hashCode() { - return name.hashCode(); - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + NamedNode other = (NamedNode) obj; + return name.equals(other.name); + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - NamedNode other = (NamedNode)obj; - return name.equals(other.name); - } - - @Override - public String toString() { - return (isCaseSensitive ? "\"" : "" ) + name + (isCaseSensitive ? "\"" : "" ); - } + @Override + public String toString() { + return (isCaseSensitive ? "\"" : "") + name + (isCaseSensitive ? "\"" : ""); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedParseNode.java index 32dfc494bdd..ce1875d056b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,72 +17,60 @@ */ package org.apache.phoenix.parse; - - /** - * * Abstract node representing named nodes such as binds and column expressions in SQL - * - * * @since 0.1 */ -public abstract class NamedParseNode extends TerminalParseNode{ - private final NamedNode namedNode; - - NamedParseNode(NamedParseNode node) { - this.namedNode = node.namedNode; - } +public abstract class NamedParseNode extends TerminalParseNode { + private final NamedNode namedNode; - NamedParseNode(String name) { - this.namedNode = new NamedNode(name); - } - - NamedParseNode(String name, boolean isCaseSensitive) { - this.namedNode = new NamedNode(name, isCaseSensitive); - } + NamedParseNode(NamedParseNode node) { + this.namedNode = node.namedNode; + } - public String getName() { - return namedNode.getName(); - } + NamedParseNode(String name) { + this.namedNode = new NamedNode(name); + } - public boolean isCaseSensitive() { - return namedNode.isCaseSensitive(); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result - + ((namedNode == null) ? 0 : namedNode.hashCode()); - return result; - } + NamedParseNode(String name, boolean isCaseSensitive) { + this.namedNode = new NamedNode(name, isCaseSensitive); + } + + public String getName() { + return namedNode.getName(); + } + + public boolean isCaseSensitive() { + return namedNode.isCaseSensitive(); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((namedNode == null) ? 0 : namedNode.hashCode()); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - NamedParseNode other = (NamedParseNode) obj; - if (namedNode == null) { - if (other.namedNode != null) - return false; - } else if (!namedNode.equals(other.namedNode)) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + NamedParseNode other = (NamedParseNode) obj; + if (namedNode == null) { + if (other.namedNode != null) return false; + } else if (!namedNode.equals(other.namedNode)) return false; + return true; + } - - public void toSQL(StringBuilder buf) { - if (isCaseSensitive()) { - buf.append('"'); - buf.append(getName()); - buf.append('"'); - } else { - buf.append(getName()); - } + public void toSQL(StringBuilder buf) { + if (isCaseSensitive()) { + buf.append('"'); + buf.append(getName()); + buf.append('"'); + } else { + buf.append(getName()); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedTableNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedTableNode.java index a04c214ecf4..580fe619704 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedTableNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NamedTableNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,99 +22,98 @@ import java.util.List; import org.apache.phoenix.compile.ColumnResolver; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; /** * Node representing an explicit table reference in the FROM clause of SQL - * - * * @since 0.1 */ public class NamedTableNode extends ConcreteTableNode { - private final List dynColumns; - - public static NamedTableNode create (String alias, TableName name, List dynColumns) { - return new NamedTableNode(alias, name, dynColumns); - } - - public static NamedTableNode create (TableName name) { - return new NamedTableNode(null, name, Collections.emptyList()); - } - - public static NamedTableNode create (String schemaName, String tableName) { - return new NamedTableNode(null, TableName.create(schemaName, tableName), Collections.emptyList()); - } - - @Deprecated - NamedTableNode(String alias, TableName name) { - this(alias, name, ConcreteTableNode.DEFAULT_TABLE_SAMPLING_RATE); - } - - @Deprecated - NamedTableNode(String alias, TableName name, List dynColumns) { - this(alias,name,dynColumns,ConcreteTableNode.DEFAULT_TABLE_SAMPLING_RATE); - } - - NamedTableNode(String alias, TableName name, Double tableSamplingRate) { - super(alias, name, tableSamplingRate); - dynColumns = Collections. emptyList(); - } - - NamedTableNode(String alias, TableName name, List dynColumns, Double tableSamplingRate) { - super(alias, name, tableSamplingRate); - if (dynColumns != null) { - this.dynColumns = ImmutableList.copyOf(dynColumns); - } else { - this.dynColumns = Collections. emptyList(); - } - } + private final List dynColumns; - @Override - public T accept(TableNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + public static NamedTableNode create(String alias, TableName name, List dynColumns) { + return new NamedTableNode(alias, name, dynColumns); + } - public List getDynamicColumns() { - return dynColumns; - } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(this.getName().toString()); - if (!dynColumns.isEmpty()) { - buf.append('('); - for (ColumnDef def : dynColumns) { - buf.append(def.toFullString()); - buf.append(','); - } - buf.setLength(buf.length()-1); - buf.append(')'); - } - if (this.getAlias() != null) buf.append(" " + this.getAlias()); - if (this.getTableSamplingRate() != null) buf.append(" TABLESAMPLE " + this.getTableSamplingRate()); - buf.append(' '); - } + public static NamedTableNode create(TableName name) { + return new NamedTableNode(null, name, Collections. emptyList()); + } + + public static NamedTableNode create(String schemaName, String tableName) { + return new NamedTableNode(null, TableName.create(schemaName, tableName), + Collections. emptyList()); + } + + @Deprecated + NamedTableNode(String alias, TableName name) { + this(alias, name, ConcreteTableNode.DEFAULT_TABLE_SAMPLING_RATE); + } + + @Deprecated + NamedTableNode(String alias, TableName name, List dynColumns) { + this(alias, name, dynColumns, ConcreteTableNode.DEFAULT_TABLE_SAMPLING_RATE); + } + + NamedTableNode(String alias, TableName name, Double tableSamplingRate) { + super(alias, name, tableSamplingRate); + dynColumns = Collections. emptyList(); + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((dynColumns == null) ? 0 : dynColumns.hashCode()); - return result; + NamedTableNode(String alias, TableName name, List dynColumns, + Double tableSamplingRate) { + super(alias, name, tableSamplingRate); + if (dynColumns != null) { + this.dynColumns = ImmutableList.copyOf(dynColumns); + } else { + this.dynColumns = Collections. emptyList(); } + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!super.equals(obj)) return false; - if (getClass() != obj.getClass()) return false; - NamedTableNode other = (NamedTableNode)obj; - if (dynColumns == null) { - if (other.dynColumns != null) return false; - } else if (!dynColumns.equals(other.dynColumns)) return false; - return true; + @Override + public T accept(TableNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } + + public List getDynamicColumns() { + return dynColumns; + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(this.getName().toString()); + if (!dynColumns.isEmpty()) { + buf.append('('); + for (ColumnDef def : dynColumns) { + buf.append(def.toFullString()); + buf.append(','); + } + buf.setLength(buf.length() - 1); + buf.append(')'); } -} + if (this.getAlias() != null) buf.append(" " + this.getAlias()); + if (this.getTableSamplingRate() != null) + buf.append(" TABLESAMPLE " + this.getTableSamplingRate()); + buf.append(' '); + } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((dynColumns == null) ? 0 : dynColumns.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + NamedTableNode other = (NamedTableNode) obj; + if (dynColumns == null) { + if (other.dynColumns != null) return false; + } else if (!dynColumns.equals(other.dynColumns)) return false; + return true; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NotEqualParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NotEqualParseNode.java index bbbe00b13b5..6e4525cc96e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NotEqualParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NotEqualParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,27 +19,23 @@ import org.apache.hadoop.hbase.CompareOperator; - /** - * * Node representing a not equal expression {@code (!=,<>) } in SQL - * - * * @since 0.1 */ public class NotEqualParseNode extends ComparisonParseNode { - NotEqualParseNode(ParseNode lhs, ParseNode rhs) { - super(lhs, rhs); - } + NotEqualParseNode(ParseNode lhs, ParseNode rhs) { + super(lhs, rhs); + } - @Override - public CompareOperator getFilterOp() { - return CompareOperator.NOT_EQUAL; - } + @Override + public CompareOperator getFilterOp() { + return CompareOperator.NOT_EQUAL; + } - @Override - public CompareOperator getInvertFilterOp() { - return CompareOperator.NOT_EQUAL; - } + @Override + public CompareOperator getInvertFilterOp() { + return CompareOperator.NOT_EQUAL; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NotParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NotParseNode.java index 86ca1cf51ae..108cf503b3f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NotParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NotParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,34 +23,29 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing the NOT in SQL - * - * * @since 0.1 */ public class NotParseNode extends UnaryParseNode { - NotParseNode(ParseNode expr) { - super(expr); - } - - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); - } + NotParseNode(ParseNode expr) { + super(expr); + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - List children = getChildren(); - buf.append(" NOT "); - children.get(0).toSQL(resolver, buf); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + List children = getChildren(); + buf.append(" NOT "); + children.get(0).toSQL(resolver, buf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NthValueAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NthValueAggregateParseNode.java index a0495d43866..b236eeb05de 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NthValueAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/NthValueAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,12 +27,14 @@ public class NthValueAggregateParseNode extends DelegateConstantToCountParseNode { - public NthValueAggregateParseNode(String name, List children, FunctionParseNode.BuiltInFunctionInfo info) { - super(name, children, info); - } + public NthValueAggregateParseNode(String name, List children, + FunctionParseNode.BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new NthValueFunction(children, getDelegateFunction(children, context)); - } + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new NthValueFunction(children, getDelegateFunction(children, context)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OffsetNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OffsetNode.java index e84316dfa1f..a99cf9037a8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OffsetNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OffsetNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,40 +21,46 @@ import java.util.Objects; public class OffsetNode { - private final ParseNode node; + private final ParseNode node; - OffsetNode(ParseNode node) throws SQLException { - if(!(node instanceof BindParseNode || node instanceof LiteralParseNode || node instanceof ComparisonParseNode)) { - throw new SQLException("Bad Expression Passed To Offset, node of type" + node.getClass().getName()); - } - this.node = node; - } - - public ParseNode getOffsetParseNode() { - return node; + OffsetNode(ParseNode node) throws SQLException { + if ( + !(node instanceof BindParseNode || node instanceof LiteralParseNode + || node instanceof ComparisonParseNode) + ) { + throw new SQLException( + "Bad Expression Passed To Offset, node of type" + node.getClass().getName()); } + this.node = node; + } - /** - * As we usually consider RVC as having multiple binds treat bind as Integer offset. - * @return true for Literal or Bind parse nodes. - */ - public boolean isIntegerOffset() { - return (node instanceof BindParseNode) || (node instanceof LiteralParseNode); - } - - @Override - public String toString() { - return node.toString(); - } + public ParseNode getOffsetParseNode() { + return node; + } - @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - OffsetNode that = (OffsetNode) o; - return Objects.equals(node, that.node); - } + /** + * As we usually consider RVC as having multiple binds treat bind as Integer offset. + * @return true for Literal or Bind parse nodes. + */ + public boolean isIntegerOffset() { + return (node instanceof BindParseNode) || (node instanceof LiteralParseNode); + } - @Override public int hashCode() { - return Objects.hash(node); - } + @Override + public String toString() { + return node.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + OffsetNode that = (OffsetNode) o; + return Objects.equals(node, that.node); + } + + @Override + public int hashCode() { + return Objects.hash(node); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OpenStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OpenStatement.java index ad905b0d12e..58a1cb1c2ec 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OpenStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OpenStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,21 +20,21 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; public class OpenStatement implements BindableStatement { - private final CursorName cursorName; + private final CursorName cursorName; - public OpenStatement(CursorName cursorName){ - this.cursorName = cursorName; - } + public OpenStatement(CursorName cursorName) { + this.cursorName = cursorName; + } - public String getCursorName(){ - return cursorName.getName(); - } + public String getCursorName() { + return cursorName.getName(); + } - public int getBindCount(){ - return 0; - } + public int getBindCount() { + return 0; + } - public Operation getOperation(){ - return Operation.UPSERT; - } + public Operation getOperation() { + return Operation.UPSERT; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OrParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OrParseNode.java index 2a38819f56d..68e2e3009b7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OrParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OrParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,40 +23,35 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing an OR in SQL - * - * * @since 0.1 */ public class OrParseNode extends CompoundParseNode { - public static final String NAME = "OR"; + public static final String NAME = "OR"; - OrParseNode(List children) { - super(children); - } + OrParseNode(List children) { + super(children); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append('('); - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - for (int i = 1 ; i < children.size(); i++) { - buf.append(" " + NAME + " "); - children.get(i).toSQL(resolver, buf); - } - buf.append(')'); + return visitor.visitLeave(this, l); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append('('); + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + for (int i = 1; i < children.size(); i++) { + buf.append(" " + NAME + " "); + children.get(i).toSQL(resolver, buf); } + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OrderByNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OrderByNode.java index 39ddc8dbe1b..340b0d73c9b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OrderByNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/OrderByNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,81 +20,78 @@ import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.schema.types.PInteger; - /** - * * Node representing an ORDER BY clause (including asc/desc and nulls first/last) in SQL - * - * * @since 0.1 */ public final class OrderByNode { - private final ParseNode child; - private final boolean nullsLast; - private final boolean orderAscending; - - OrderByNode(ParseNode child, boolean nullsLast, boolean orderAscending) { - this.child = child; - this.nullsLast = nullsLast; - this.orderAscending = orderAscending; - } - - public boolean isNullsLast() { - return nullsLast; - } - - public boolean isAscending() { - return orderAscending; - } - - public ParseNode getNode() { - return child; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((child == null) ? 0 : child.hashCode()); - result = prime * result + (nullsLast ? 1231 : 1237); - result = prime * result + (orderAscending ? 1231 : 1237); - return result; - } + private final ParseNode child; + private final boolean nullsLast; + private final boolean orderAscending; - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - OrderByNode other = (OrderByNode)obj; - if (child == null) { - if (other.child != null) return false; - } else if (!child.equals(other.child)) return false; - if (nullsLast != other.nullsLast) return false; - if (orderAscending != other.orderAscending) return false; - return true; - } + OrderByNode(ParseNode child, boolean nullsLast, boolean orderAscending) { + this.child = child; + this.nullsLast = nullsLast; + this.orderAscending = orderAscending; + } - @Override - public String toString() { - return child.toString() + (orderAscending ? " asc" : " desc") + " nulls " + (nullsLast ? "last" : "first"); - } + public boolean isNullsLast() { + return nullsLast; + } - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - child.toSQL(resolver, buf); - if (!orderAscending) buf.append(" DESC"); - if (nullsLast) buf.append(" NULLS LAST "); - } + public boolean isAscending() { + return orderAscending; + } - public boolean isIntegerLiteral() { - return child instanceof LiteralParseNode - && ((LiteralParseNode) child).getType() == PInteger.INSTANCE; - } + public ParseNode getNode() { + return child; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((child == null) ? 0 : child.hashCode()); + result = prime * result + (nullsLast ? 1231 : 1237); + result = prime * result + (orderAscending ? 1231 : 1237); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + OrderByNode other = (OrderByNode) obj; + if (child == null) { + if (other.child != null) return false; + } else if (!child.equals(other.child)) return false; + if (nullsLast != other.nullsLast) return false; + if (orderAscending != other.orderAscending) return false; + return true; + } + + @Override + public String toString() { + return child.toString() + (orderAscending ? " asc" : " desc") + " nulls " + + (nullsLast ? "last" : "first"); + } + + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + child.toSQL(resolver, buf); + if (!orderAscending) buf.append(" DESC"); + if (nullsLast) buf.append(" NULLS LAST "); + } + + public boolean isIntegerLiteral() { + return child instanceof LiteralParseNode + && ((LiteralParseNode) child).getType() == PInteger.INSTANCE; + } - public Integer getValueIfIntegerLiteral() { - if (!isIntegerLiteral()) { - return null; - } - return (Integer) ((LiteralParseNode) child).getValue(); + public Integer getValueIfIntegerLiteral() { + if (!isIntegerLiteral()) { + return null; } + return (Integer) ((LiteralParseNode) child).getValue(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PFunction.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PFunction.java index f914fbae865..07a8dbb0171 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PFunction.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PFunction.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,247 +36,256 @@ public class PFunction implements PMetaDataEntity { - private PName tenantId = null; - private final PName functionName; - private List args; - private PName className; - private PName jarPath; - private PName returnType; - private PTableKey functionKey; - private long timeStamp; - private int estimatedSize; - private boolean temporary; - private boolean replace; - - public PFunction(long timeStamp) { // For index delete marker - this.timeStamp = timeStamp; - this.args = Collections.emptyList(); - this.functionName = null; - } + private PName tenantId = null; + private final PName functionName; + private List args; + private PName className; + private PName jarPath; + private PName returnType; + private PTableKey functionKey; + private long timeStamp; + private int estimatedSize; + private boolean temporary; + private boolean replace; - public PFunction(String functionName, List args, String returnType, - String className, String jarPath) { - this(functionName,args,returnType,className, jarPath, HConstants.LATEST_TIMESTAMP); - } + public PFunction(long timeStamp) { // For index delete marker + this.timeStamp = timeStamp; + this.args = Collections.emptyList(); + this.functionName = null; + } - public PFunction(String functionName, List args, String returnType, - String className, String jarPath, long timeStamp) { - this(null, functionName, args, returnType, className, jarPath, timeStamp); - } + public PFunction(String functionName, List args, String returnType, + String className, String jarPath) { + this(functionName, args, returnType, className, jarPath, HConstants.LATEST_TIMESTAMP); + } - public PFunction(PName tenantId, String functionName, List args, String returnType, - String className, String jarPath, long timeStamp) { - this(tenantId, functionName, args, returnType, className, jarPath, timeStamp, false); - } - - public PFunction(PFunction function, boolean temporary) { - this(function.getTenantId(), function.getFunctionName(), function.getFunctionArguments(), - function.getReturnType(), function.getClassName(), function.getJarPath(), function - .getTimeStamp(), temporary, function.isReplace()); - } + public PFunction(String functionName, List args, String returnType, + String className, String jarPath, long timeStamp) { + this(null, functionName, args, returnType, className, jarPath, timeStamp); + } + + public PFunction(PName tenantId, String functionName, List args, + String returnType, String className, String jarPath, long timeStamp) { + this(tenantId, functionName, args, returnType, className, jarPath, timeStamp, false); + } + + public PFunction(PFunction function, boolean temporary) { + this(function.getTenantId(), function.getFunctionName(), function.getFunctionArguments(), + function.getReturnType(), function.getClassName(), function.getJarPath(), + function.getTimeStamp(), temporary, function.isReplace()); + } + + public PFunction(PFunction function, boolean temporary, boolean isReplace) { + this(function.getTenantId(), function.getFunctionName(), function.getFunctionArguments(), + function.getReturnType(), function.getClassName(), function.getJarPath(), + function.getTimeStamp(), temporary, isReplace); + } + + public PFunction(PName tenantId, String functionName, List args, + String returnType, String className, String jarPath, long timeStamp, boolean temporary) { + this(tenantId, functionName, args, returnType, className, jarPath, timeStamp, temporary, false); + } - public PFunction(PFunction function, boolean temporary, boolean isReplace) { - this(function.getTenantId(), function.getFunctionName(), function.getFunctionArguments(), - function.getReturnType(), function.getClassName(), function.getJarPath(), function - .getTimeStamp(), temporary, isReplace); + public PFunction(PName tenantId, String functionName, List args, + String returnType, String className, String jarPath, long timeStamp, boolean temporary, + boolean replace) { + this.tenantId = tenantId; + this.functionName = PNameFactory.newName(functionName); + if (args == null) { + this.args = new ArrayList(); + } else { + this.args = args; } + this.className = PNameFactory.newName(className); + this.jarPath = jarPath == null ? null : PNameFactory.newName(jarPath); + this.returnType = PNameFactory.newName(returnType); + this.functionKey = new PTableKey(this.tenantId, this.functionName.getString()); + this.timeStamp = timeStamp; + this.estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE + + PNameFactory.getEstimatedSize(tenantId) + PNameFactory.getEstimatedSize(this.functionName) + + PNameFactory.getEstimatedSize(this.className) + + (jarPath == null ? 0 : PNameFactory.getEstimatedSize(this.jarPath)); + this.temporary = temporary; + this.replace = replace; + } + + public PFunction(PFunction function) { + this(function, function.isTemporaryFunction()); + } + + public String getFunctionName() { + return functionName == null ? null : functionName.getString(); + } + + public List getFunctionArguments() { + return args; + } + + public String getClassName() { + return className.getString(); + } + + public String getJarPath() { + return jarPath == null ? null : jarPath.getString(); + } + + public String getReturnType() { + return returnType.getString(); + } + + public PTableKey getKey() { + return this.functionKey; + } - public PFunction(PName tenantId, String functionName, List args, - String returnType, String className, String jarPath, long timeStamp, boolean temporary) { - this(tenantId, functionName, args, returnType, className, jarPath, timeStamp, temporary, - false); + public long getTimeStamp() { + return this.timeStamp; + } + + public PName getTenantId() { + return this.tenantId; + } + + public boolean isTemporaryFunction() { + return temporary; + } + + public static class FunctionArgument { + private final PName argumentType; + private final boolean isArrayType; + private final boolean isConstant; + private final LiteralExpression defaultValue; + private final LiteralExpression minValue; + private final LiteralExpression maxValue; + private short argPosition; + + public FunctionArgument(String argumentType, boolean isArrayType, boolean isConstant, + LiteralExpression defaultValue, LiteralExpression minValue, LiteralExpression maxValue) { + this.argumentType = PNameFactory.newName(argumentType); + this.isArrayType = isArrayType; + this.isConstant = isConstant; + this.defaultValue = defaultValue; + this.minValue = minValue; + this.maxValue = maxValue; } - public PFunction(PName tenantId, String functionName, List args, String returnType, - String className, String jarPath, long timeStamp, boolean temporary, boolean replace) { - this.tenantId = tenantId; - this.functionName = PNameFactory.newName(functionName); - if (args == null){ - this.args = new ArrayList(); - } else { - this.args = args; - } - this.className = PNameFactory.newName(className); - this.jarPath = jarPath == null ? null : PNameFactory.newName(jarPath); - this.returnType = PNameFactory.newName(returnType); - this.functionKey = new PTableKey(this.tenantId, this.functionName.getString()); - this.timeStamp = timeStamp; - this.estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + - 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE + - PNameFactory.getEstimatedSize(tenantId) + - PNameFactory.getEstimatedSize(this.functionName) + - PNameFactory.getEstimatedSize(this.className) + - (jarPath == null ? 0 : PNameFactory.getEstimatedSize(this.jarPath)); - this.temporary = temporary; - this.replace = replace; + public FunctionArgument(String argumentType, boolean isArrayType, boolean isConstant, + LiteralExpression defaultValue, LiteralExpression minValue, LiteralExpression maxValue, + short argPosition) { + this(argumentType, isArrayType, isConstant, defaultValue, minValue, maxValue); + this.argPosition = argPosition; } - public PFunction(PFunction function) { - this(function, function.isTemporaryFunction()); + public String getArgumentType() { + return argumentType.getString(); } - public String getFunctionName() { - return functionName == null ? null : functionName.getString(); + public boolean isConstant() { + return isConstant; } - public List getFunctionArguments() { - return args; + public boolean isArrayType() { + return isArrayType; } - public String getClassName() { - return className.getString(); + public LiteralExpression getDefaultValue() { + return defaultValue; } - public String getJarPath() { - return jarPath == null ? null : jarPath.getString(); + public LiteralExpression getMinValue() { + return minValue; } - public String getReturnType() { - return returnType.getString(); + public LiteralExpression getMaxValue() { + return maxValue; } - - public PTableKey getKey() { - return this.functionKey; + + public short getArgPosition() { + return argPosition; } - - public long getTimeStamp() { - return this.timeStamp; + } + + public static PFunctionProtos.PFunction toProto(PFunction function) { + PFunctionProtos.PFunction.Builder builder = PFunctionProtos.PFunction.newBuilder(); + if (function.getTenantId() != null) { + builder.setTenantId(ByteStringer.wrap(function.getTenantId().getBytes())); } - - public PName getTenantId() { - return this.tenantId; + builder.setFunctionName(function.getFunctionName()); + builder.setClassname(function.getClassName()); + if (function.getJarPath() != null) { + builder.setJarPath(function.getJarPath()); } - - public boolean isTemporaryFunction() { - return temporary; + builder.setReturnType(function.getReturnType()); + builder.setTimeStamp(function.getTimeStamp()); + for (FunctionArgument arg : function.getFunctionArguments()) { + PFunctionProtos.PFunctionArg.Builder argBuilder = PFunctionProtos.PFunctionArg.newBuilder(); + argBuilder.setArgumentType(arg.getArgumentType()); + argBuilder.setIsArrayType(arg.isArrayType); + argBuilder.setIsConstant(arg.isConstant); + if (arg.getDefaultValue() != null) { + argBuilder.setDefaultValue((String) arg.getDefaultValue().getValue()); + } + if (arg.getMinValue() != null) { + argBuilder.setMinValue((String) arg.getMinValue().getValue()); + } + if (arg.getMaxValue() != null) { + argBuilder.setMaxValue((String) arg.getMaxValue().getValue()); + } + builder.addArguments(argBuilder.build()); } - - public static class FunctionArgument { - private final PName argumentType; - private final boolean isArrayType; - private final boolean isConstant; - private final LiteralExpression defaultValue; - private final LiteralExpression minValue; - private final LiteralExpression maxValue; - private short argPosition; - - public FunctionArgument(String argumentType, boolean isArrayType, boolean isConstant, LiteralExpression defaultValue, - LiteralExpression minValue, LiteralExpression maxValue) { - this.argumentType = PNameFactory.newName(argumentType); - this.isArrayType = isArrayType; - this.isConstant = isConstant; - this.defaultValue = defaultValue; - this.minValue = minValue; - this.maxValue = maxValue; - } - public FunctionArgument(String argumentType, boolean isArrayType, boolean isConstant, LiteralExpression defaultValue, - LiteralExpression minValue, LiteralExpression maxValue, short argPosition) { - this(argumentType, isArrayType, isConstant, defaultValue, minValue, maxValue); - this.argPosition = argPosition; - } - - public String getArgumentType() { - return argumentType.getString(); - } - - public boolean isConstant() { - return isConstant; - } - - public boolean isArrayType() { - return isArrayType; - } - - public LiteralExpression getDefaultValue() { - return defaultValue; - } - - public LiteralExpression getMinValue() { - return minValue; - } - - public LiteralExpression getMaxValue() { - return maxValue; - } - - public short getArgPosition() { - return argPosition; - } + if (builder.hasIsReplace()) { + builder.setIsReplace(function.isReplace()); } - - public static PFunctionProtos.PFunction toProto(PFunction function) { - PFunctionProtos.PFunction.Builder builder = PFunctionProtos.PFunction.newBuilder(); - if(function.getTenantId() != null){ - builder.setTenantId(ByteStringer.wrap(function.getTenantId().getBytes())); - } - builder.setFunctionName(function.getFunctionName()); - builder.setClassname(function.getClassName()); - if (function.getJarPath() != null) { - builder.setJarPath(function.getJarPath()); - } - builder.setReturnType(function.getReturnType()); - builder.setTimeStamp(function.getTimeStamp()); - for(FunctionArgument arg: function.getFunctionArguments()) { - PFunctionProtos.PFunctionArg.Builder argBuilder = PFunctionProtos.PFunctionArg.newBuilder(); - argBuilder.setArgumentType(arg.getArgumentType()); - argBuilder.setIsArrayType(arg.isArrayType); - argBuilder.setIsConstant(arg.isConstant); - if(arg.getDefaultValue() != null) { - argBuilder.setDefaultValue((String)arg.getDefaultValue().getValue()); - } - if(arg.getMinValue() != null) { - argBuilder.setMinValue((String)arg.getMinValue().getValue()); - } - if(arg.getMaxValue() != null) { - argBuilder.setMaxValue((String)arg.getMaxValue().getValue()); - } - builder.addArguments(argBuilder.build()); - } - if(builder.hasIsReplace()) { - builder.setIsReplace(function.isReplace()); - } - return builder.build(); - } + return builder.build(); + } - public static PFunction createFromProto( - org.apache.phoenix.coprocessor.generated.PFunctionProtos.PFunction function) { - PName tenantId = null; - if(function.hasTenantId()){ - tenantId = PNameFactory.newName(function.getTenantId().toByteArray()); - } - String functionName = function.getFunctionName(); - long timeStamp = function.getTimeStamp(); - String className = function.getClassname(); - String jarPath = function.getJarPath(); - String returnType = function.getReturnType(); - List args = new ArrayList(function.getArgumentsCount()); - for(PFunctionArg arg: function.getArgumentsList()) { - String argType = arg.getArgumentType(); - boolean isArrayType = arg.hasIsArrayType()?arg.getIsArrayType():false; - PDataType dataType = isArrayType ? PDataType.fromTypeId(PDataType - .sqlArrayType(SchemaUtil.normalizeIdentifier(SchemaUtil - .normalizeIdentifier(argType)))) : PDataType - .fromSqlTypeName(SchemaUtil.normalizeIdentifier(argType)); - boolean isConstant = arg.hasIsConstant()?arg.getIsConstant():false; - String defaultValue = arg.hasDefaultValue()?arg.getDefaultValue():null; - String minValue = arg.hasMinValue()?arg.getMinValue():null; - String maxValue = arg.hasMaxValue()?arg.getMaxValue():null; - args.add(new FunctionArgument(argType, isArrayType, isConstant, - defaultValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(dataType.toObject(defaultValue))).getValue()), - minValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(dataType.toObject(minValue))).getValue()), - maxValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(dataType.toObject(maxValue))).getValue()))); - } - return new PFunction(tenantId, functionName, args, returnType, className, jarPath, - timeStamp, false, function.hasIsReplace() ? true : false); + public static PFunction + createFromProto(org.apache.phoenix.coprocessor.generated.PFunctionProtos.PFunction function) { + PName tenantId = null; + if (function.hasTenantId()) { + tenantId = PNameFactory.newName(function.getTenantId().toByteArray()); } - - @Override - public int getEstimatedSize() { - return estimatedSize; + String functionName = function.getFunctionName(); + long timeStamp = function.getTimeStamp(); + String className = function.getClassname(); + String jarPath = function.getJarPath(); + String returnType = function.getReturnType(); + List args = new ArrayList(function.getArgumentsCount()); + for (PFunctionArg arg : function.getArgumentsList()) { + String argType = arg.getArgumentType(); + boolean isArrayType = arg.hasIsArrayType() ? arg.getIsArrayType() : false; + PDataType dataType = isArrayType + ? PDataType.fromTypeId(PDataType + .sqlArrayType(SchemaUtil.normalizeIdentifier(SchemaUtil.normalizeIdentifier(argType)))) + : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(argType)); + boolean isConstant = arg.hasIsConstant() ? arg.getIsConstant() : false; + String defaultValue = arg.hasDefaultValue() ? arg.getDefaultValue() : null; + String minValue = arg.hasMinValue() ? arg.getMinValue() : null; + String maxValue = arg.hasMaxValue() ? arg.getMaxValue() : null; + args.add(new FunctionArgument(argType, isArrayType, isConstant, + defaultValue == null + ? null + : LiteralExpression + .newConstant((new LiteralParseNode(dataType.toObject(defaultValue))).getValue()), + minValue == null + ? null + : LiteralExpression + .newConstant((new LiteralParseNode(dataType.toObject(minValue))).getValue()), + maxValue == null + ? null + : LiteralExpression + .newConstant((new LiteralParseNode(dataType.toObject(maxValue))).getValue()))); } + return new PFunction(tenantId, functionName, args, returnType, className, jarPath, timeStamp, + false, function.hasIsReplace() ? true : false); + } - public boolean isReplace() { - return this.replace; - } -} + @Override + public int getEstimatedSize() { + return estimatedSize; + } + public boolean isReplace() { + return this.replace; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PSchema.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PSchema.java index 7a0ddc8f395..8ef346d94e9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PSchema.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PSchema.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,59 +28,59 @@ public class PSchema implements PMetaDataEntity { - private final PName schemaName; - private PTableKey schemaKey; - private long timeStamp; - private int estimatedSize; + private final PName schemaName; + private PTableKey schemaKey; + private long timeStamp; + private int estimatedSize; - public PSchema(long timeStamp) { // For index delete marker - this.timeStamp = timeStamp; - this.schemaName = null; - } + public PSchema(long timeStamp) { // For index delete marker + this.timeStamp = timeStamp; + this.schemaName = null; + } - public PSchema(String schemaName) { - this(schemaName, HConstants.LATEST_TIMESTAMP); - } + public PSchema(String schemaName) { + this(schemaName, HConstants.LATEST_TIMESTAMP); + } - public PSchema(String schemaName, long timeStamp) { - this.schemaName = PNameFactory.newName(SchemaUtil.normalizeIdentifier(schemaName)); - this.schemaKey = new PTableKey(null, this.schemaName.getString()); - this.timeStamp = timeStamp; - this.estimatedSize = SizedUtil.INT_SIZE + SizedUtil.LONG_SIZE + PNameFactory.getEstimatedSize(this.schemaName); - } + public PSchema(String schemaName, long timeStamp) { + this.schemaName = PNameFactory.newName(SchemaUtil.normalizeIdentifier(schemaName)); + this.schemaKey = new PTableKey(null, this.schemaName.getString()); + this.timeStamp = timeStamp; + this.estimatedSize = + SizedUtil.INT_SIZE + SizedUtil.LONG_SIZE + PNameFactory.getEstimatedSize(this.schemaName); + } - public PSchema(PSchema schema) { - this(schema.getSchemaName().toString(), schema.getTimeStamp()); - } + public PSchema(PSchema schema) { + this(schema.getSchemaName().toString(), schema.getTimeStamp()); + } - public String getSchemaName() { - return schemaName == null ? null : schemaName.getString(); - } + public String getSchemaName() { + return schemaName == null ? null : schemaName.getString(); + } - public PTableKey getSchemaKey() { - return schemaKey; - } + public PTableKey getSchemaKey() { + return schemaKey; + } - public long getTimeStamp() { - return timeStamp; - } + public long getTimeStamp() { + return timeStamp; + } - public static PSchemaProtos.PSchema toProto(PSchema schema) { - PSchemaProtos.PSchema.Builder builder = PSchemaProtos.PSchema.newBuilder(); - builder.setSchemaName(schema.getSchemaName()); - builder.setTimeStamp(schema.getTimeStamp()); - return builder.build(); - } + public static PSchemaProtos.PSchema toProto(PSchema schema) { + PSchemaProtos.PSchema.Builder builder = PSchemaProtos.PSchema.newBuilder(); + builder.setSchemaName(schema.getSchemaName()); + builder.setTimeStamp(schema.getTimeStamp()); + return builder.build(); + } - public static PSchema createFromProto(PSchemaProtos.PSchema schema) { - long timeStamp = schema.getTimeStamp(); - String schemaName = schema.getSchemaName(); - return new PSchema(schemaName, timeStamp); - } + public static PSchema createFromProto(PSchemaProtos.PSchema schema) { + long timeStamp = schema.getTimeStamp(); + String schemaName = schema.getSchemaName(); + return new PSchema(schemaName, timeStamp); + } - public int getEstimatedSize() { - return estimatedSize; - } + public int getEstimatedSize() { + return estimatedSize; + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseContext.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseContext.java index bbe1bb665c0..9a67bcbd0b2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseContext.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseContext.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,46 +22,46 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; public class ParseContext { - private boolean isAggregate; - private boolean hasSequences; - - public ParseContext() { - } + private boolean isAggregate; + private boolean hasSequences; - public boolean isAggregate() { - return isAggregate; - } + public ParseContext() { + } + + public boolean isAggregate() { + return isAggregate; + } + + public void setAggregate(boolean isAggregate) { + this.isAggregate |= isAggregate; + } - public void setAggregate(boolean isAggregate) { - this.isAggregate |= isAggregate; + public boolean hasSequences() { + return hasSequences; + } + + public void hasSequences(boolean hasSequences) { + this.hasSequences |= hasSequences; + } + + public static class Stack { + private final List stack = Lists.newArrayListWithExpectedSize(5); + + public void push(ParseContext context) { + stack.add(context); } - public boolean hasSequences() { - return hasSequences; + public ParseContext pop() { + return stack.remove(stack.size() - 1); } - public void hasSequences(boolean hasSequences) { - this.hasSequences |= hasSequences; + public ParseContext peek() { + return stack.get(stack.size() - 1); } - public static class Stack { - private final List stack = Lists.newArrayListWithExpectedSize(5); - - public void push(ParseContext context) { - stack.add(context); - } - - public ParseContext pop() { - return stack.remove(stack.size()-1); - } - - public ParseContext peek() { - return stack.get(stack.size()-1); - } - - public boolean isEmpty() { - return stack.isEmpty(); - } + public boolean isEmpty() { + return stack.isEmpty(); } - + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseException.java index fba6477fee2..043e6a4b704 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,28 +18,25 @@ package org.apache.phoenix.parse; /** - * - * RuntimeException for exceptions occurring during parsing, - * since ANTLR doesn't handle typed exceptions well. - * - * + * RuntimeException for exceptions occurring during parsing, since ANTLR doesn't handle typed + * exceptions well. * @since 2.0 */ public class ParseException extends RuntimeException { - public ParseException() { - } + public ParseException() { + } - public ParseException(String msg) { - super(msg); - } + public ParseException(String msg) { + super(msg); + } - public ParseException(Throwable t) { - super(t); - } + public ParseException(Throwable t) { + super(t); + } - public ParseException(String msg, Throwable t) { - super(msg, t); - } + public ParseException(String msg, Throwable t) { + super(msg, t); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNode.java index 12d6737a446..f97ceae5054 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,87 +22,83 @@ import org.apache.phoenix.compile.ColumnResolver; - - - /** - * * Abstract base class for a parse node in SQL - * - * * @since 0.1 */ public abstract class ParseNode { - public abstract List getChildren(); - public abstract T accept(ParseNodeVisitor visitor) throws SQLException; - - public boolean isStateless() { - return false; - } - - /** - * Allows node to override what the alias is for a given node. - * Useful for a column reference, as JDBC says that the alias - * name for "a.b" should be "b" - * @return the alias to use for this node or null for no alias - */ - public String getAlias() { - return null; - } - - @Override - public final String toString() { - StringBuilder buf = new StringBuilder(); - toSQL(null, buf); - return buf.toString(); - } + public abstract List getChildren(); - /** - * Returns whether this ParseNode is a SubqueryParseNode - * or contains any SubqueryParseNode descendant. - */ - public boolean hasSubquery() { - SubqueryFinder finder = new SubqueryFinder(); - try { - this.accept(finder); - } catch (SQLException e) { - // Not possible. - } - return finder.hasSubquery; + public abstract T accept(ParseNodeVisitor visitor) throws SQLException; + + public boolean isStateless() { + return false; + } + + /** + * Allows node to override what the alias is for a given node. Useful for a column reference, as + * JDBC says that the alias name for "a.b" should be "b" + * @return the alias to use for this node or null for no alias + */ + public String getAlias() { + return null; + } + + @Override + public final String toString() { + StringBuilder buf = new StringBuilder(); + toSQL(null, buf); + return buf.toString(); + } + + /** + * Returns whether this ParseNode is a SubqueryParseNode or contains any SubqueryParseNode + * descendant. + */ + public boolean hasSubquery() { + SubqueryFinder finder = new SubqueryFinder(); + try { + this.accept(finder); + } catch (SQLException e) { + // Not possible. } + return finder.hasSubquery; + } - public boolean hasJsonExpression() { - JsonFunctionFinder finder = new JsonFunctionFinder(); - try { - this.accept(finder); - } catch (SQLException e) { - // Not possible. - } - return finder.hasJsonFunction; + public boolean hasJsonExpression() { + JsonFunctionFinder finder = new JsonFunctionFinder(); + try { + this.accept(finder); + } catch (SQLException e) { + // Not possible. } + return finder.hasJsonFunction; + } - public abstract void toSQL(ColumnResolver resolver, StringBuilder buf); + public abstract void toSQL(ColumnResolver resolver, StringBuilder buf); - private static class SubqueryFinder extends StatelessTraverseAllParseNodeVisitor { - private boolean hasSubquery = false; + private static class SubqueryFinder extends StatelessTraverseAllParseNodeVisitor { + private boolean hasSubquery = false; - @Override - public Void visit(SubqueryParseNode node) throws SQLException { - hasSubquery = true; - return null; - } + @Override + public Void visit(SubqueryParseNode node) throws SQLException { + hasSubquery = true; + return null; } + } + + private static class JsonFunctionFinder extends StatelessTraverseAllParseNodeVisitor { + private boolean hasJsonFunction = false; - private static class JsonFunctionFinder extends StatelessTraverseAllParseNodeVisitor { - private boolean hasJsonFunction = false; - @Override - public boolean visitEnter(FunctionParseNode node) throws SQLException { - if (node instanceof JsonValueParseNode - || node instanceof JsonQueryParseNode - || node instanceof JsonModifyParseNode) { - hasJsonFunction = true; - } - return true; - } + @Override + public boolean visitEnter(FunctionParseNode node) throws SQLException { + if ( + node instanceof JsonValueParseNode || node instanceof JsonQueryParseNode + || node instanceof JsonModifyParseNode + ) { + hasJsonFunction = true; + } + return true; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java index dc3c1835cf3..10b9655a094 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,9 +28,6 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -47,6 +44,7 @@ import org.apache.phoenix.parse.JoinTableNode.JoinType; import org.apache.phoenix.parse.LikeParseNode.LikeType; import org.apache.phoenix.schema.PIndexState; +import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.SortOrder; @@ -56,1005 +54,1062 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PTimestamp; -import org.apache.phoenix.util.ByteUtil; -import org.apache.phoenix.util.SchemaUtil; - +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; +import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.util.SchemaUtil; /** - * * Factory used by parser to construct object model while parsing a SQL statement - * - * * @since 0.1 */ public class ParseNodeFactory { - private static final String ARRAY_ELEM = "ARRAY_ELEM"; - // TODO: Use Google's Reflection library instead to find aggregate functions - @SuppressWarnings("unchecked") - private static final List> CLIENT_SIDE_BUILT_IN_FUNCTIONS = Arrays.>asList( - CurrentDateFunction.class, - CurrentTimeFunction.class, - AvgAggregateFunction.class - ); - private static final Map BUILT_IN_FUNCTION_MAP = Maps.newHashMap(); - private static final Multimap BUILT_IN_FUNCTION_MULTIMAP = ArrayListMultimap.create(); - private static final BigDecimal MAX_LONG = BigDecimal.valueOf(Long.MAX_VALUE); - - /** - * - * Key used to look up a built-in function using the combination of - * the lowercase name and the number of arguments. This disambiguates - * the aggregate {@code MAX(

) } from the non aggregate {@code MAX(,) }. - * - * - * @since 0.1 - */ - public static class BuiltInFunctionKey { - private final String upperName; - private final int argCount; - - public BuiltInFunctionKey(String lowerName, int argCount) { - this.upperName = lowerName; - this.argCount = argCount; + private static final String ARRAY_ELEM = "ARRAY_ELEM"; + // TODO: Use Google's Reflection library instead to find aggregate functions + @SuppressWarnings("unchecked") + private static final List> CLIENT_SIDE_BUILT_IN_FUNCTIONS = + Arrays.> asList(CurrentDateFunction.class, + CurrentTimeFunction.class, AvgAggregateFunction.class); + private static final Map BUILT_IN_FUNCTION_MAP = + Maps.newHashMap(); + private static final Multimap BUILT_IN_FUNCTION_MULTIMAP = + ArrayListMultimap.create(); + private static final BigDecimal MAX_LONG = BigDecimal.valueOf(Long.MAX_VALUE); + + /** + * Key used to look up a built-in function using the combination of the lowercase name and the + * number of arguments. This disambiguates the aggregate {@code MAX() } from the non + * aggregate {@code MAX(,) }. + * @since 0.1 + */ + public static class BuiltInFunctionKey { + private final String upperName; + private final int argCount; + + public BuiltInFunctionKey(String lowerName, int argCount) { + this.upperName = lowerName; + this.argCount = argCount; + } + + @Override + public String toString() { + return upperName; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + argCount; + result = prime * result + ((upperName == null) ? 0 : upperName.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + BuiltInFunctionKey other = (BuiltInFunctionKey) obj; + if (argCount != other.argCount) return false; + if (!upperName.equals(other.upperName)) return false; + return true; + } + } + + private static void addBuiltInFunction(Class f) throws Exception { + BuiltInFunction d = f.getAnnotation(BuiltInFunction.class); + if (d == null) { + return; + } + int nArgs = d.args().length; + BuiltInFunctionInfo value = new BuiltInFunctionInfo(f, d); + if (d.classType() != FunctionParseNode.FunctionClassType.ABSTRACT) { + BUILT_IN_FUNCTION_MULTIMAP.put(value.getName(), value); + } + if (d.classType() != FunctionParseNode.FunctionClassType.DERIVED) { + do { + // Add function to function map, throwing if conflicts found + // Add entry for each possible version of function based on arguments that are not required + // to be present (i.e. arg with default value) + BuiltInFunctionKey key = new BuiltInFunctionKey(value.getName(), nArgs); + if (BUILT_IN_FUNCTION_MAP.put(key, value) != null) { + throw new IllegalStateException( + "Multiple " + value.getName() + " functions with " + nArgs + " arguments"); } - - @Override - public String toString() { - return upperName; + } while (--nArgs >= 0 && d.args()[nArgs].defaultValue().length() > 0); + + // Look for default values that aren't at the end and throw + while (--nArgs >= 0) { + if (d.args()[nArgs].defaultValue().length() > 0) { + throw new IllegalStateException("Function " + value.getName() + + " has non trailing default value of '" + d.args()[nArgs].defaultValue() + + "'. Only trailing arguments may have default values"); } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + argCount; - result = prime * result + ((upperName == null) ? 0 : upperName.hashCode()); - return result; + } + } + } + + /** + * Reflect this class and populate static structures from it. Don't initialize in static block + * because we have a circular dependency + */ + private synchronized static void initBuiltInFunctionMap() { + if (!BUILT_IN_FUNCTION_MAP.isEmpty()) { + return; + } + Class f = null; + try { + // Reflection based parsing which yields direct explicit function evaluation at runtime + for (int i = 0; i < CLIENT_SIDE_BUILT_IN_FUNCTIONS.size(); i++) { + f = CLIENT_SIDE_BUILT_IN_FUNCTIONS.get(i); + addBuiltInFunction(f); + } + for (ExpressionType et : ExpressionType.values()) { + Class ec = et.getExpressionClass(); + if (FunctionExpression.class.isAssignableFrom(ec)) { + @SuppressWarnings("unchecked") + Class c = (Class) ec; + addBuiltInFunction(f = c); } + } + } catch (Exception e) { + throw new RuntimeException("Failed initialization of built-in functions at class '" + f + "'", + e); + } + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - BuiltInFunctionKey other = (BuiltInFunctionKey)obj; - if (argCount != other.argCount) return false; - if (!upperName.equals(other.upperName)) return false; - return true; - } - } + private static BuiltInFunctionInfo getInfo(String name, List children) { + return get(SchemaUtil.normalizeIdentifier(name), children); + } - private static void addBuiltInFunction(Class f) throws Exception { - BuiltInFunction d = f.getAnnotation(BuiltInFunction.class); - if (d == null) { - return; - } - int nArgs = d.args().length; - BuiltInFunctionInfo value = new BuiltInFunctionInfo(f, d); - if (d.classType() != FunctionParseNode.FunctionClassType.ABSTRACT) { - BUILT_IN_FUNCTION_MULTIMAP.put(value.getName(), value); - } - if (d.classType() != FunctionParseNode.FunctionClassType.DERIVED) { - do { - // Add function to function map, throwing if conflicts found - // Add entry for each possible version of function based on arguments that are not required to be present (i.e. arg with default value) - BuiltInFunctionKey key = new BuiltInFunctionKey(value.getName(), nArgs); - if (BUILT_IN_FUNCTION_MAP.put(key, value) != null) { - throw new IllegalStateException("Multiple " + value.getName() + " functions with " + nArgs + " arguments"); - } - } while (--nArgs >= 0 && d.args()[nArgs].defaultValue().length() > 0); - - // Look for default values that aren't at the end and throw - while (--nArgs >= 0) { - if (d.args()[nArgs].defaultValue().length() > 0) { - throw new IllegalStateException("Function " + value.getName() + " has non trailing default value of '" + d.args()[nArgs].defaultValue() + "'. Only trailing arguments may have default values"); - } - } - } - } - /** - * Reflect this class and populate static structures from it. - * Don't initialize in static block because we have a circular dependency + public static BuiltInFunctionInfo get(String normalizedName, List children) { + initBuiltInFunctionMap(); + BuiltInFunctionInfo info = + BUILT_IN_FUNCTION_MAP.get(new BuiltInFunctionKey(normalizedName, children.size())); + return info; + } + + public static Multimap getBuiltInFunctionMultimap() { + initBuiltInFunctionMap(); + return BUILT_IN_FUNCTION_MULTIMAP; + } + + public ParseNodeFactory() { + } + + private static AtomicInteger tempAliasCounter = new AtomicInteger(0); + + @VisibleForTesting + public static int getTempAliasCounterValue() { + return tempAliasCounter.get(); + } + + @VisibleForTesting + public static void setTempAliasCounterValue(int newValue) { + tempAliasCounter.set(newValue); + } + + public static String createTempAlias() { + return "$" + tempAliasCounter.incrementAndGet(); + } + + public ExplainStatement explain(BindableStatement statement, ExplainType explainType) { + return new ExplainStatement(statement, explainType); + } + + public AliasedNode aliasedNode(String alias, ParseNode expression) { + return new AliasedNode(alias, expression); + } + + public AddParseNode add(List children) { + return new AddParseNode(children); + } + + public SubtractParseNode subtract(List children) { + return new SubtractParseNode(children); + } + + public MultiplyParseNode multiply(List children) { + return new MultiplyParseNode(children); + } + + public ModulusParseNode modulus(List children) { + return new ModulusParseNode(children); + } + + public AndParseNode and(List children) { + return new AndParseNode(children); + } + + public FamilyWildcardParseNode family(String familyName) { + return new FamilyWildcardParseNode(familyName, false); + } + + public TableWildcardParseNode tableWildcard(TableName tableName) { + return new TableWildcardParseNode(tableName, false); + } + + public WildcardParseNode wildcard() { + return WildcardParseNode.INSTANCE; + } + + public BetweenParseNode between(ParseNode l, ParseNode r1, ParseNode r2, boolean negate) { + return new BetweenParseNode(l, r1, r2, negate); + } + + public BindParseNode bind(String bind) { + return new BindParseNode(bind); + } + + public StringConcatParseNode concat(List children) { + return new StringConcatParseNode(children); + } + + public ColumnParseNode column(TableName tableName, String columnName, String alias) { + return new ColumnParseNode(tableName, columnName, alias); + } + + public ColumnName columnName(String columnName) { + return new ColumnName(columnName); + } + + public ColumnName columnName(String familyName, String columnName) { + return new ColumnName(familyName, columnName); + } + + public PropertyName propertyName(String propertyName) { + return new PropertyName(propertyName); + } + + public PropertyName propertyName(String familyName, String propertyName) { + return new PropertyName(familyName, propertyName); + } + + public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, boolean isNull, + Integer maxLength, Integer scale, boolean isPK, SortOrder sortOrder, String expressionStr, + boolean isRowTimestamp) { + return new ColumnDef(columnDefName, sqlTypeName, isNull, maxLength, scale, isPK, sortOrder, + expressionStr, null, isRowTimestamp); + } + + public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, boolean isArray, + Integer arrSize, Boolean isNull, Integer maxLength, Integer scale, boolean isPK, + SortOrder sortOrder, String expressionStr, Integer encodedQualifier, boolean isRowTimestamp) { + return new ColumnDef(columnDefName, sqlTypeName, isArray, arrSize, isNull, maxLength, scale, + isPK, sortOrder, expressionStr, encodedQualifier, isRowTimestamp); + } + + public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, boolean isArray, + Integer arrSize, Boolean isNull, Integer maxLength, Integer scale, boolean isPK, + SortOrder sortOrder, String expressionStr, boolean isRowTimestamp) { + return new ColumnDef(columnDefName, sqlTypeName, isArray, arrSize, isNull, maxLength, scale, + isPK, sortOrder, expressionStr, null, isRowTimestamp); + } + + public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, boolean isArray, + Integer arrSize, Boolean isNull, Integer maxLength, Integer scale, boolean isPK, + SortOrder sortOrder, boolean isRowTimestamp) { + return new ColumnDef(columnDefName, sqlTypeName, isArray, arrSize, isNull, maxLength, scale, + isPK, sortOrder, null, null, isRowTimestamp); + } + + public ColumnDefInPkConstraint columnDefInPkConstraint(ColumnName columnDefName, + SortOrder sortOrder, boolean isRowTimestamp) { + return new ColumnDefInPkConstraint(columnDefName, sortOrder, isRowTimestamp); + } + + public PrimaryKeyConstraint primaryKey(String name, List columnDefs) { + return new PrimaryKeyConstraint(name, columnDefs); + } + + public IndexKeyConstraint indexKey(List> parseNodeAndSortOrder) { + return new IndexKeyConstraint(parseNodeAndSortOrder); + } + + public CreateTableStatement createTable(TableName tableName, + ListMultimap> props, List columns, + PrimaryKeyConstraint pkConstraint, List splits, PTableType tableType, + boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, + Boolean immutableRows, Map cqCounters, boolean noVerify) { + return new CreateTableStatement(tableName, props, columns, pkConstraint, splits, tableType, + ifNotExists, baseTableName, tableTypeIdNode, bindCount, immutableRows, cqCounters, noVerify); + } + + public CreateTableStatement createTable(TableName tableName, + ListMultimap> props, List columns, + PrimaryKeyConstraint pkConstraint, List splits, PTableType tableType, + boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, + Boolean immutableRows, Map cqCounters) { + return createTable(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, + baseTableName, tableTypeIdNode, bindCount, immutableRows, cqCounters, false); + } + + public CreateTableStatement createTable(TableName tableName, + ListMultimap> props, List columns, + PrimaryKeyConstraint pkConstraint, List splits, PTableType tableType, + boolean ifNotExists, TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, + Boolean immutableRows) { + return createTable(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, + baseTableName, tableTypeIdNode, bindCount, immutableRows, null, false); + } + + public CreateSchemaStatement createSchema(String schemaName, boolean ifNotExists) { + return new CreateSchemaStatement(schemaName, ifNotExists); + } + + public CreateIndexStatement createIndex(NamedNode indexName, NamedTableNode dataTable, + IndexKeyConstraint ikConstraint, List includeColumns, List splits, + ListMultimap> props, boolean ifNotExists, IndexType indexType, + boolean async, int bindCount, Map udfParseNodes, ParseNode where) { + return new CreateIndexStatement(indexName, dataTable, ikConstraint, includeColumns, splits, + props, ifNotExists, indexType, async, bindCount, udfParseNodes, where); + } + + public CreateCDCStatement createCDC(NamedNode cdcObj, TableName dataTable, + Set includeScopes, ListMultimap> props, + boolean ifNotExists, int bindCount) { + return new CreateCDCStatement(cdcObj, dataTable, includeScopes, props, ifNotExists, bindCount); + } + + public CreateSequenceStatement createSequence(TableName tableName, ParseNode startsWith, + ParseNode incrementBy, ParseNode cacheSize, ParseNode minValue, ParseNode maxValue, + boolean cycle, boolean ifNotExits, int bindCount) { + return new CreateSequenceStatement(tableName, startsWith, incrementBy, cacheSize, minValue, + maxValue, cycle, ifNotExits, bindCount); + } + + public CreateFunctionStatement createFunction(PFunction functionInfo, boolean temporary, + boolean isReplace) { + return new CreateFunctionStatement(functionInfo, temporary, isReplace); + } + + public AddJarsStatement addJars(List jarPaths) { + return new AddJarsStatement(jarPaths); + } + + public ListJarsStatement listJars() { + return new ListJarsStatement(); + } + + public DeleteJarStatement deleteJar(LiteralParseNode jarPath) { + return new DeleteJarStatement(jarPath); + } + + public DropFunctionStatement dropFunction(String functionName, boolean ifExists) { + return new DropFunctionStatement(functionName, ifExists); + } + + public DropSequenceStatement dropSequence(TableName tableName, boolean ifExits, int bindCount) { + return new DropSequenceStatement(tableName, ifExits, bindCount); + } + + public SequenceValueParseNode currentValueFor(TableName tableName) { + return new SequenceValueParseNode(tableName, SequenceValueParseNode.Op.CURRENT_VALUE, null); + } + + public SequenceValueParseNode nextValueFor(TableName tableName, ParseNode numToAllocateNode) { + return new SequenceValueParseNode(tableName, SequenceValueParseNode.Op.NEXT_VALUE, + numToAllocateNode); + } + + public AddColumnStatement addColumn(NamedTableNode table, PTableType tableType, + List columnDefs, boolean ifNotExists, + ListMultimap> props, boolean cascade, List indexes) { + return new AddColumnStatement(table, tableType, columnDefs, ifNotExists, props, cascade, + indexes); + } + + public DropColumnStatement dropColumn(NamedTableNode table, PTableType tableType, + List columnNodes, boolean ifExists) { + return new DropColumnStatement(table, tableType, columnNodes, ifExists); + } + + public DropTableStatement dropTable(TableName tableName, PTableType tableType, boolean ifExists, + boolean cascade) { + return new DropTableStatement(tableName, tableType, ifExists, cascade, false); + } + + public DropIndexStatement dropIndex(NamedNode indexName, TableName tableName, boolean ifExists) { + return new DropIndexStatement(indexName, tableName, ifExists); + } + + public DropCDCStatement dropCDC(NamedNode cdcObjName, TableName tableName, boolean ifExists) { + return new DropCDCStatement(cdcObjName, tableName, ifExists); + } + + public AlterIndexStatement alterIndex(NamedTableNode indexTableNode, String dataTableName, + boolean ifExists, PIndexState state, boolean isRebuildAll, boolean async, + ListMultimap> props) { + return new AlterIndexStatement(indexTableNode, dataTableName, ifExists, state, isRebuildAll, + async, props); + } + + public AlterIndexStatement alterIndex(NamedTableNode indexTableNode, String dataTableName, + boolean ifExists, PIndexState state) { + return new AlterIndexStatement(indexTableNode, dataTableName, ifExists, state, false, false); + } + + public TraceStatement trace(boolean isTraceOn, double samplingRate) { + return new TraceStatement(isTraceOn, samplingRate); + } + + public AlterSessionStatement alterSession(Map props) { + return new AlterSessionStatement(props); + } + + public TableName table(String schemaName, String tableName) { + return TableName.createNormalized(schemaName, tableName); + } + + public NamedNode indexName(String name) { + return new NamedNode(name); + } + + public NamedNode cdcName(String name) { + return new NamedNode(name); + } + + @Deprecated + public NamedTableNode namedTable(String alias, TableName name) { + return new NamedTableNode(alias, name); + } + + @Deprecated + public NamedTableNode namedTable(String alias, TableName name, List dyn_columns) { + return new NamedTableNode(alias, name, dyn_columns); + } + + public NamedTableNode namedTable(String alias, TableName name, Double tableSamplingRate) { + return new NamedTableNode(alias, name, tableSamplingRate); + } + + public NamedTableNode namedTable(String alias, TableName name, List dyn_columns, + Double tableSamplingRate) { + return new NamedTableNode(alias, name, dyn_columns, tableSamplingRate); + } + + public NamedTableNode namedTable(String alias, TableName name, List dyn_columns, + LiteralParseNode tableSampleNode) { + Double tableSamplingRate; + if (tableSampleNode == null || tableSampleNode.getValue() == null) { + tableSamplingRate = ConcreteTableNode.DEFAULT_TABLE_SAMPLING_RATE; + } else if (tableSampleNode.getValue() instanceof Integer) { + tableSamplingRate = (double) ((int) tableSampleNode.getValue()); + } else { + tableSamplingRate = ((BigDecimal) tableSampleNode.getValue()).doubleValue(); + } + return new NamedTableNode(alias, name, dyn_columns, tableSamplingRate); + } + + public BindTableNode bindTable(String alias, TableName name) { + return new BindTableNode(alias, name); + } + + public CaseParseNode caseWhen(List children) { + return new CaseParseNode(children); + } + + public DivideParseNode divide(List children) { + return new DivideParseNode(children); + } + + public UpdateStatisticsStatement updateStatistics(NamedTableNode table, + StatisticsCollectionScope scope, Map props) { + return new UpdateStatisticsStatement(table, scope, props); + } + + public ExecuteUpgradeStatement executeUpgrade() { + return new ExecuteUpgradeStatement(); + } + + public FunctionParseNode functionDistinct(String name, List args) { + if (CountAggregateFunction.NAME.equals(SchemaUtil.normalizeIdentifier(name))) { + BuiltInFunctionInfo info = + getInfo(SchemaUtil.normalizeIdentifier(DistinctCountAggregateFunction.NAME), args); + return new DistinctCountParseNode(DistinctCountAggregateFunction.NAME, args, info); + } else { + throw new UnsupportedOperationException("DISTINCT not supported with " + name); + } + } + + public FunctionParseNode arrayElemRef(List args) { + return function(ARRAY_ELEM, args); + } + + public FunctionParseNode function(String name, List args) { + BuiltInFunctionInfo info = getInfo(name, args); + if (info == null) { + return new UDFParseNode(name, args, info); + } + Constructor ctor = info.getNodeCtor(); + if (ctor == null) { + return info.isAggregate() + ? new AggregateFunctionParseNode(name, args, info) + : new FunctionParseNode(name, args, info); + } else { + try { + return ctor.newInstance(name, args, info); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } + + public FunctionParseNode function(String name, List valueNodes, + List columnNodes, boolean isAscending) { + + List args = + Lists.newArrayListWithExpectedSize(columnNodes.size() + valueNodes.size() + 1); + args.addAll(columnNodes); + args.add(new LiteralParseNode(Boolean.valueOf(isAscending))); + args.addAll(valueNodes); + + BuiltInFunctionInfo info = getInfo(name, args); + if (info == null) { + return new UDFParseNode(name, args, info); + } + Constructor ctor = info.getNodeCtor(); + if (ctor == null) { + return new AggregateFunctionWithinGroupParseNode(name, args, info); + } else { + try { + return ctor.newInstance(name, args, info); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } + + public HintNode hint(String hint) { + return new HintNode(hint); + } + + public InListParseNode inList(List children, boolean negate) { + return new InListParseNode(children, negate); + } + + public ExistsParseNode exists(ParseNode child, boolean negate) { + return new ExistsParseNode(child, negate); + } + + public InParseNode in(ParseNode l, ParseNode r, boolean negate, boolean isSubqueryDistinct) { + return new InParseNode(l, r, negate, isSubqueryDistinct); + } + + public IsNullParseNode isNull(ParseNode child, boolean negate) { + return new IsNullParseNode(child, negate); + } + + public JoinTableNode join(JoinType type, TableNode lhs, TableNode rhs, ParseNode on, + boolean singleValueOnly) { + return new JoinTableNode(type, lhs, rhs, on, singleValueOnly); + } + + public DerivedTableNode derivedTable(String alias, SelectStatement select) { + return new DerivedTableNode(alias, select); + } + + public LikeParseNode like(ParseNode lhs, ParseNode rhs, boolean negate, LikeType likeType) { + return new LikeParseNode(lhs, rhs, negate, likeType); + } + + public LiteralParseNode literal(Object value) { + return new LiteralParseNode(value); + } + + public LiteralParseNode realNumber(String text) { + return new LiteralParseNode(new BigDecimal(text, PDataType.DEFAULT_MATH_CONTEXT)); + } + + public LiteralParseNode wholeNumber(String text) { + int length = text.length(); + // We know it'll fit into long, might still fit into int + if (length <= PDataType.LONG_PRECISION - 1) { + long l = Long.parseLong(text); + if (l <= Integer.MAX_VALUE) { + // Fits into int + return new LiteralParseNode((int) l); + } + return new LiteralParseNode(l); + } + // Might still fit into long + BigDecimal d = new BigDecimal(text, PDataType.DEFAULT_MATH_CONTEXT); + if (d.compareTo(MAX_LONG) <= 0) { + return new LiteralParseNode(d.longValueExact()); + } + // Doesn't fit into long + return new LiteralParseNode(d); + } + + public LiteralParseNode intOrLong(String text) { + long l = Long.parseLong(text); + if (l <= Integer.MAX_VALUE) { + // Fits into int + return new LiteralParseNode((int) l); + } + return new LiteralParseNode(l); + } + + public LiteralParseNode hexLiteral(String text) { + // The lexer has already removed everything but the digits + int length = text.length(); + if (length % 2 != 0) { + throw new IllegalArgumentException("Hex literals must have an even number of digits"); + } + byte[] bytes = Bytes.fromHex(text); + return new LiteralParseNode(bytes, PBinary.INSTANCE); + } + + public String stringToHexLiteral(String in) { + String noSpace = in.replaceAll(" ", ""); + if (!noSpace.matches("^[0-9a-fA-F]+$")) { + throw new IllegalArgumentException( + "Hex literal continuation line has non hex digit characters"); + } + return noSpace; + } + + public LiteralParseNode binLiteral(String text) { + // The lexer has already removed everything but the digits + int length = text.length(); + if (length % 8 != 0) { + throw new IllegalArgumentException("Binary literals must have a multiple of 8 digits"); + } + byte[] bytes = ByteUtil.fromAscii(text.toCharArray()); + return new LiteralParseNode(bytes, PBinary.INSTANCE); + } + + public String stringToBinLiteral(String in) { + String noSpace = in.replaceAll(" ", ""); + if (!noSpace.matches("^[0-1]+$")) { + throw new IllegalArgumentException( + "Binary literal continuation line has non binary digit characters"); + } + return noSpace; + } + + public CastParseNode cast(ParseNode expression, String dataType, Integer maxLength, + Integer scale) { + return new CastParseNode(expression, dataType, maxLength, scale, false); + } + + public CastParseNode cast(ParseNode expression, PDataType dataType, Integer maxLength, + Integer scale) { + return new CastParseNode(expression, dataType, maxLength, scale, false); + } + + public CastParseNode cast(ParseNode expression, PDataType dataType, Integer maxLength, + Integer scale, boolean arr) { + return new CastParseNode(expression, dataType, maxLength, scale, arr); + } + + public CastParseNode cast(ParseNode expression, String dataType, Integer maxLength, Integer scale, + boolean arr) { + return new CastParseNode(expression, dataType, maxLength, scale, arr); + } + + public RowValueConstructorParseNode rowValueConstructor(List l) { + return new RowValueConstructorParseNode(l); + } + + private void checkTypeMatch(PDataType expectedType, PDataType actualType) throws SQLException { + if (!expectedType.isCoercibleTo(actualType)) { + throw TypeMismatchException.newException(expectedType, actualType); + } + } + + public LiteralParseNode literal(Object value, PDataType expectedType) throws SQLException { + PDataType actualType = PDataType.fromLiteral(value); + if (actualType != null && actualType != expectedType) { + checkTypeMatch(expectedType, actualType); + value = expectedType.toObject(value, actualType); + } + return new LiteralParseNode(value); + /* + * Object typedValue = expectedType.toObject(value.toString()); return new + * LiteralParseNode(typedValue); */ - private synchronized static void initBuiltInFunctionMap() { - if (!BUILT_IN_FUNCTION_MAP.isEmpty()) { - return; - } - Class f = null; - try { - // Reflection based parsing which yields direct explicit function evaluation at runtime - for (int i = 0; i < CLIENT_SIDE_BUILT_IN_FUNCTIONS.size(); i++) { - f = CLIENT_SIDE_BUILT_IN_FUNCTIONS.get(i); - addBuiltInFunction(f); - } - for (ExpressionType et : ExpressionType.values()) { - Class ec = et.getExpressionClass(); - if (FunctionExpression.class.isAssignableFrom(ec)) { - @SuppressWarnings("unchecked") - Class c = (Class)ec; - addBuiltInFunction(f = c); - } - } - } catch (Exception e) { - throw new RuntimeException("Failed initialization of built-in functions at class '" + f + "'", e); - } - } - - private static BuiltInFunctionInfo getInfo(String name, List children) { - return get(SchemaUtil.normalizeIdentifier(name), children); - } - - public static BuiltInFunctionInfo get(String normalizedName, List children) { - initBuiltInFunctionMap(); - BuiltInFunctionInfo info = BUILT_IN_FUNCTION_MAP.get(new BuiltInFunctionKey(normalizedName,children.size())); - return info; - } - - public static Multimap getBuiltInFunctionMultimap(){ - initBuiltInFunctionMap(); - return BUILT_IN_FUNCTION_MULTIMAP; - } - - public ParseNodeFactory() { - } - - private static AtomicInteger tempAliasCounter = new AtomicInteger(0); - - @VisibleForTesting - public static int getTempAliasCounterValue() { - return tempAliasCounter.get(); - } - - @VisibleForTesting - public static void setTempAliasCounterValue(int newValue) { - tempAliasCounter.set(newValue); - } - - public static String createTempAlias() { - return "$" + tempAliasCounter.incrementAndGet(); - } - - public ExplainStatement explain(BindableStatement statement, ExplainType explainType) { - return new ExplainStatement(statement, explainType); - } - - public AliasedNode aliasedNode(String alias, ParseNode expression) { - return new AliasedNode(alias, expression); - } - - public AddParseNode add(List children) { - return new AddParseNode(children); - } - - public SubtractParseNode subtract(List children) { - return new SubtractParseNode(children); - } - - public MultiplyParseNode multiply(List children) { - return new MultiplyParseNode(children); - } - - public ModulusParseNode modulus(List children) { - return new ModulusParseNode(children); - } - - public AndParseNode and(List children) { - return new AndParseNode(children); - } - - public FamilyWildcardParseNode family(String familyName){ - return new FamilyWildcardParseNode(familyName, false); - } - - public TableWildcardParseNode tableWildcard(TableName tableName) { - return new TableWildcardParseNode(tableName, false); - } - - public WildcardParseNode wildcard() { - return WildcardParseNode.INSTANCE; - } - - public BetweenParseNode between(ParseNode l, ParseNode r1, ParseNode r2, boolean negate) { - return new BetweenParseNode(l, r1, r2, negate); - } - - public BindParseNode bind(String bind) { - return new BindParseNode(bind); - } - - public StringConcatParseNode concat(List children) { - return new StringConcatParseNode(children); - } - - public ColumnParseNode column(TableName tableName, String columnName, String alias) { - return new ColumnParseNode(tableName, columnName, alias); - } - - public ColumnName columnName(String columnName) { - return new ColumnName(columnName); - } - - public ColumnName columnName(String familyName, String columnName) { - return new ColumnName(familyName, columnName); - } - - public PropertyName propertyName(String propertyName) { - return new PropertyName(propertyName); - } - - public PropertyName propertyName(String familyName, String propertyName) { - return new PropertyName(familyName, propertyName); - } - - public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, boolean isNull, Integer maxLength, Integer scale, boolean isPK, SortOrder sortOrder, String expressionStr, boolean isRowTimestamp) { - return new ColumnDef(columnDefName, sqlTypeName, isNull, maxLength, scale, isPK, sortOrder, expressionStr, null, isRowTimestamp); - } - - public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, - boolean isArray, Integer arrSize, Boolean isNull, - Integer maxLength, Integer scale, boolean isPK, - SortOrder sortOrder, String expressionStr, Integer encodedQualifier, - boolean isRowTimestamp) { - return new ColumnDef(columnDefName, sqlTypeName, - isArray, arrSize, isNull, - maxLength, scale, isPK, - sortOrder, expressionStr, encodedQualifier, isRowTimestamp); - } - - - public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, - boolean isArray, Integer arrSize, Boolean isNull, - Integer maxLength, Integer scale, boolean isPK, - SortOrder sortOrder, String expressionStr, boolean isRowTimestamp) { - return new ColumnDef(columnDefName, sqlTypeName, - isArray, arrSize, isNull, - maxLength, scale, isPK, - sortOrder, expressionStr, null, isRowTimestamp); - } - - public ColumnDef columnDef(ColumnName columnDefName, String sqlTypeName, boolean isArray, Integer arrSize, Boolean isNull, Integer maxLength, Integer scale, boolean isPK, - SortOrder sortOrder, boolean isRowTimestamp) { - return new ColumnDef(columnDefName, sqlTypeName, isArray, arrSize, isNull, maxLength, scale, isPK, sortOrder, null, null, isRowTimestamp); - } - - public ColumnDefInPkConstraint columnDefInPkConstraint(ColumnName columnDefName, SortOrder sortOrder, boolean isRowTimestamp) { - return new ColumnDefInPkConstraint(columnDefName, sortOrder, isRowTimestamp); - } - - public PrimaryKeyConstraint primaryKey(String name, List columnDefs) { - return new PrimaryKeyConstraint(name, columnDefs); - } - - public IndexKeyConstraint indexKey( List> parseNodeAndSortOrder) { - return new IndexKeyConstraint(parseNodeAndSortOrder); - } - - public CreateTableStatement createTable( - TableName tableName, ListMultimap> props, - List columns, PrimaryKeyConstraint pkConstraint, - List splits, PTableType tableType, boolean ifNotExists, - TableName baseTableName, ParseNode tableTypeIdNode, int bindCount, - Boolean immutableRows, Map cqCounters, boolean noVerify) { - return new CreateTableStatement(tableName, props, columns, pkConstraint, splits, tableType, - ifNotExists, baseTableName, tableTypeIdNode, bindCount, immutableRows, cqCounters, - noVerify); - } - - public CreateTableStatement createTable( - TableName tableName, ListMultimap> props, - List columns, PrimaryKeyConstraint pkConstraint, List splits, - PTableType tableType, boolean ifNotExists, TableName baseTableName, - ParseNode tableTypeIdNode, int bindCount, Boolean immutableRows, - Map cqCounters) { - return createTable(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, - baseTableName, tableTypeIdNode, bindCount, immutableRows, cqCounters, false); - } - - public CreateTableStatement createTable( - TableName tableName, ListMultimap> props, - List columns, PrimaryKeyConstraint pkConstraint, List splits, - PTableType tableType, boolean ifNotExists, TableName baseTableName, - ParseNode tableTypeIdNode, int bindCount, Boolean immutableRows) { - return createTable(tableName, props, columns, pkConstraint, splits, tableType, ifNotExists, - baseTableName, tableTypeIdNode, bindCount, immutableRows, null, false); - } - - public CreateSchemaStatement createSchema(String schemaName, boolean ifNotExists) { - return new CreateSchemaStatement(schemaName, ifNotExists); - } - - public CreateIndexStatement createIndex(NamedNode indexName, NamedTableNode dataTable, - IndexKeyConstraint ikConstraint, List includeColumns, - List splits, ListMultimap> props, - boolean ifNotExists, IndexType indexType, boolean async, int bindCount, - Map udfParseNodes, ParseNode where) { - return new CreateIndexStatement(indexName, dataTable, ikConstraint, includeColumns, splits, - props, ifNotExists, indexType, async, bindCount, udfParseNodes, where); - } - - public CreateCDCStatement createCDC(NamedNode cdcObj, TableName dataTable, - Set includeScopes, - ListMultimap> props, - boolean ifNotExists, int bindCount) { - return new CreateCDCStatement(cdcObj, dataTable, includeScopes, - props, ifNotExists, bindCount); - } - - public CreateSequenceStatement createSequence(TableName tableName, ParseNode startsWith, - ParseNode incrementBy, ParseNode cacheSize, ParseNode minValue, ParseNode maxValue, - boolean cycle, boolean ifNotExits, int bindCount) { - return new CreateSequenceStatement(tableName, startsWith, incrementBy, cacheSize, minValue, - maxValue, cycle, ifNotExits, bindCount); - } - - public CreateFunctionStatement createFunction(PFunction functionInfo, boolean temporary, boolean isReplace) { - return new CreateFunctionStatement(functionInfo, temporary, isReplace); - } - - public AddJarsStatement addJars(List jarPaths) { - return new AddJarsStatement(jarPaths); - } - - public ListJarsStatement listJars() { - return new ListJarsStatement(); - } - - public DeleteJarStatement deleteJar(LiteralParseNode jarPath) { - return new DeleteJarStatement(jarPath); - } - - public DropFunctionStatement dropFunction(String functionName, boolean ifExists) { - return new DropFunctionStatement(functionName, ifExists); - } - - public DropSequenceStatement dropSequence(TableName tableName, boolean ifExits, int bindCount){ - return new DropSequenceStatement(tableName, ifExits, bindCount); - } - - public SequenceValueParseNode currentValueFor(TableName tableName) { - return new SequenceValueParseNode(tableName, SequenceValueParseNode.Op.CURRENT_VALUE, null); - } - - public SequenceValueParseNode nextValueFor(TableName tableName, ParseNode numToAllocateNode) { - return new SequenceValueParseNode(tableName, SequenceValueParseNode.Op.NEXT_VALUE, numToAllocateNode); - } - - public AddColumnStatement addColumn(NamedTableNode table, PTableType tableType, List columnDefs, boolean ifNotExists, ListMultimap> props, boolean cascade, Listindexes) { - return new AddColumnStatement(table, tableType, columnDefs, ifNotExists, props, cascade, indexes); - } - - public DropColumnStatement dropColumn(NamedTableNode table, PTableType tableType, List columnNodes, boolean ifExists) { - return new DropColumnStatement(table, tableType, columnNodes, ifExists); - } - - public DropTableStatement dropTable(TableName tableName, PTableType tableType, boolean ifExists, boolean cascade) { - return new DropTableStatement(tableName, tableType, ifExists, cascade, false); - } - - public DropIndexStatement dropIndex(NamedNode indexName, TableName tableName, boolean ifExists) { - return new DropIndexStatement(indexName, tableName, ifExists); - } - - public DropCDCStatement dropCDC(NamedNode cdcObjName, TableName tableName, boolean ifExists) { - return new DropCDCStatement(cdcObjName, tableName, ifExists); - } - - public AlterIndexStatement alterIndex(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, PIndexState state, boolean isRebuildAll, boolean async, ListMultimap> props) { - return new AlterIndexStatement(indexTableNode, dataTableName, ifExists, state, isRebuildAll, async, props); - } - - public AlterIndexStatement alterIndex(NamedTableNode indexTableNode, String dataTableName, boolean ifExists, PIndexState state) { - return new AlterIndexStatement(indexTableNode, dataTableName, ifExists, state, false, false); - } - - public TraceStatement trace(boolean isTraceOn, double samplingRate) { - return new TraceStatement(isTraceOn, samplingRate); - } - - public AlterSessionStatement alterSession(Map props) { - return new AlterSessionStatement(props); - } - - public TableName table(String schemaName, String tableName) { - return TableName.createNormalized(schemaName,tableName); - } - - public NamedNode indexName(String name) { - return new NamedNode(name); - } - - public NamedNode cdcName(String name) { - return new NamedNode(name); - } - - @Deprecated - public NamedTableNode namedTable(String alias, TableName name) { - return new NamedTableNode(alias, name); - } - - @Deprecated - public NamedTableNode namedTable(String alias, TableName name, List dyn_columns) { - return new NamedTableNode(alias, name,dyn_columns); - } - - public NamedTableNode namedTable(String alias, TableName name, Double tableSamplingRate) { - return new NamedTableNode(alias, name, tableSamplingRate); - } - - public NamedTableNode namedTable(String alias, TableName name, List dyn_columns, Double tableSamplingRate) { - return new NamedTableNode(alias, name,dyn_columns, tableSamplingRate); - } - - public NamedTableNode namedTable(String alias, TableName name, List dyn_columns, LiteralParseNode tableSampleNode) { - Double tableSamplingRate; - if (tableSampleNode == null || tableSampleNode.getValue() == null) { - tableSamplingRate = ConcreteTableNode.DEFAULT_TABLE_SAMPLING_RATE; - } else if (tableSampleNode.getValue() instanceof Integer) { - tableSamplingRate = (double)((int)tableSampleNode.getValue()); - } else { - tableSamplingRate=((BigDecimal) tableSampleNode.getValue()).doubleValue(); - } - return new NamedTableNode(alias, name, dyn_columns, tableSamplingRate); - } - - public BindTableNode bindTable(String alias, TableName name) { - return new BindTableNode(alias, name); - } - - public CaseParseNode caseWhen(List children) { - return new CaseParseNode(children); - } - - public DivideParseNode divide(List children) { - return new DivideParseNode(children); - } - - public UpdateStatisticsStatement updateStatistics(NamedTableNode table, StatisticsCollectionScope scope, Map props) { - return new UpdateStatisticsStatement(table, scope, props); - } - - public ExecuteUpgradeStatement executeUpgrade() { - return new ExecuteUpgradeStatement(); - } - - - public FunctionParseNode functionDistinct(String name, List args) { - if (CountAggregateFunction.NAME.equals(SchemaUtil.normalizeIdentifier(name))) { - BuiltInFunctionInfo info = getInfo( - SchemaUtil.normalizeIdentifier(DistinctCountAggregateFunction.NAME), args); - return new DistinctCountParseNode(DistinctCountAggregateFunction.NAME, args, info); - } else { - throw new UnsupportedOperationException("DISTINCT not supported with " + name); - } - } - - public FunctionParseNode arrayElemRef(List args) { - return function(ARRAY_ELEM, args); - } - - public FunctionParseNode function(String name, List args) { - BuiltInFunctionInfo info = getInfo(name, args); - if (info == null) { - return new UDFParseNode(name, args, info); - } - Constructor ctor = info.getNodeCtor(); - if (ctor == null) { - return info.isAggregate() - ? new AggregateFunctionParseNode(name, args, info) - : new FunctionParseNode(name, args, info); - } else { - try { - return ctor.newInstance(name, args, info); - } catch (Exception e) { - throw new RuntimeException(e); - } + } + + public LiteralParseNode literal(String value, String sqlTypeName) throws SQLException { + PDataType expectedType = sqlTypeName == null + ? null + : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(sqlTypeName)); + if (expectedType == null || !expectedType.isCoercibleTo(PTimestamp.INSTANCE)) { + throw TypeMismatchException.newException(expectedType, PTimestamp.INSTANCE); + } + Object typedValue = expectedType.toObject(value); + return new LiteralParseNode(typedValue); + } + + public LiteralParseNode coerce(LiteralParseNode literalNode, PDataType expectedType) + throws SQLException { + PDataType actualType = literalNode.getType(); + if (actualType != null) { + Object before = literalNode.getValue(); + checkTypeMatch(expectedType, actualType); + Object after = expectedType.toObject(before, actualType); + if (before != after) { + literalNode = literal(after); + } + } + return literalNode; + } + + public ComparisonParseNode comparison(CompareOperator op, ParseNode lhs, ParseNode rhs) { + switch (op) { + case LESS: + return lt(lhs, rhs); + case LESS_OR_EQUAL: + return lte(lhs, rhs); + case EQUAL: + return equal(lhs, rhs); + case NOT_EQUAL: + return notEqual(lhs, rhs); + case GREATER_OR_EQUAL: + return gte(lhs, rhs); + case GREATER: + return gt(lhs, rhs); + default: + throw new IllegalArgumentException("Unexpcted CompareOp of " + op); + } + } + + public ArrayAnyComparisonNode arrayAny(ParseNode rhs, ComparisonParseNode compareNode) { + return new ArrayAnyComparisonNode(rhs, compareNode); + } + + public ArrayAllComparisonNode arrayAll(ParseNode rhs, ComparisonParseNode compareNode) { + return new ArrayAllComparisonNode(rhs, compareNode); + } + + public ArrayAnyComparisonNode wrapInAny(CompareOperator op, ParseNode lhs, ParseNode rhs) { + return new ArrayAnyComparisonNode(rhs, + comparison(op, lhs, elementRef(Arrays. asList(rhs, literal(1))))); + } + + public ArrayAllComparisonNode wrapInAll(CompareOperator op, ParseNode lhs, ParseNode rhs) { + return new ArrayAllComparisonNode(rhs, + comparison(op, lhs, elementRef(Arrays. asList(rhs, literal(1))))); + } + + public ArrayElemRefNode elementRef(List parseNode) { + return new ArrayElemRefNode(parseNode); + } + + public GreaterThanParseNode gt(ParseNode lhs, ParseNode rhs) { + return new GreaterThanParseNode(lhs, rhs); + } + + public GreaterThanOrEqualParseNode gte(ParseNode lhs, ParseNode rhs) { + return new GreaterThanOrEqualParseNode(lhs, rhs); + } + + public LessThanParseNode lt(ParseNode lhs, ParseNode rhs) { + return new LessThanParseNode(lhs, rhs); + } + + public LessThanOrEqualParseNode lte(ParseNode lhs, ParseNode rhs) { + return new LessThanOrEqualParseNode(lhs, rhs); + } + + public EqualParseNode equal(ParseNode lhs, ParseNode rhs) { + return new EqualParseNode(lhs, rhs); + } + + public ArrayConstructorNode upsertStmtArrayNode(List upsertStmtArray) { + return new ArrayConstructorNode(upsertStmtArray); + } + + public ParseNode negate(ParseNode child) { + // Prevents reparsing of -1 from becoming 1*-1 and 1*1*-1 with each re-parsing + if ( + LiteralParseNode.ONE.equals(child) + && ((LiteralParseNode) child).getType().isCoercibleTo(PLong.INSTANCE) + ) { + return LiteralParseNode.MINUS_ONE; + } + // Special case to convert Long.MIN_VALUE back to a Long. We can't initially represent it + // as a Long in the parser because we only represent positive values as constants in the + // parser, and ABS(Long.MIN_VALUE) is too big to fit into a Long. So we convert it back here. + if (LiteralParseNode.MIN_LONG_AS_BIG_DECIMAL.equals(child)) { + return LiteralParseNode.MIN_LONG; + } + return new MultiplyParseNode(Arrays.asList(child, LiteralParseNode.MINUS_ONE)); + } + + public NotEqualParseNode notEqual(ParseNode lhs, ParseNode rhs) { + return new NotEqualParseNode(lhs, rhs); + } + + public ParseNode not(ParseNode child) { + if (child instanceof ExistsParseNode) { + return exists(child.getChildren().get(0), !((ExistsParseNode) child).isNegate()); + } + + return new NotParseNode(child); + } + + public OrParseNode or(List children) { + return new OrParseNode(children); + } + + public OrderByNode orderBy(ParseNode expression, boolean nullsLast, boolean orderAscending) { + return new OrderByNode(expression, nullsLast, orderAscending); + } + + public SelectStatement select(TableNode from, HintNode hint, boolean isDistinct, + List select, ParseNode where, List groupBy, ParseNode having, + List orderBy, LimitNode limit, OffsetNode offset, int bindCount, + boolean isAggregate, boolean hasSequence, List selects, + Map udfParseNodes) { + + return new SelectStatement(from, hint, isDistinct, select, where, + groupBy == null ? Collections. emptyList() : groupBy, having, + orderBy == null ? Collections. emptyList() : orderBy, limit, offset, bindCount, + isAggregate, hasSequence, + selects == null ? Collections. emptyList() : selects, udfParseNodes); + } + + public UpsertStatement upsert(NamedTableNode table, HintNode hint, List columns, + List values, SelectStatement select, int bindCount, + Map udfParseNodes, List> onDupKeyPairs) { + return new UpsertStatement(table, hint, columns, values, select, bindCount, udfParseNodes, + onDupKeyPairs); + } + + public CursorName cursorName(String name) { + return new CursorName(name); + } + + public DeclareCursorStatement declareCursor(CursorName cursor, SelectStatement select) { + return new DeclareCursorStatement(cursor, select); + } + + public FetchStatement fetch(CursorName cursor, boolean isNext, int fetchLimit) { + return new FetchStatement(cursor, isNext, fetchLimit); + } + + public OpenStatement open(CursorName cursor) { + return new OpenStatement(cursor); + } + + public CloseStatement close(CursorName cursor) { + return new CloseStatement(cursor); + } + + public DeleteStatement delete(NamedTableNode table, HintNode hint, ParseNode node, + List orderBy, LimitNode limit, int bindCount, + Map udfParseNodes) { + return new DeleteStatement(table, hint, node, orderBy, limit, bindCount, udfParseNodes); + } + + public SelectStatement select(SelectStatement statement, ParseNode where) { + return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), + statement.getSelect(), where, statement.getGroupBy(), statement.getHaving(), + statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), + statement.isAggregate(), statement.hasSequence(), statement.getSelects(), + statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, ParseNode where, ParseNode having) { + return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), + statement.getSelect(), where, statement.getGroupBy(), having, statement.getOrderBy(), + statement.getLimit(), statement.getOffset(), statement.getBindCount(), + statement.isAggregate(), statement.hasSequence(), statement.getSelects(), + statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, List select, + ParseNode where, List groupBy, ParseNode having, List orderBy) { + return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), select, where, + groupBy, having, orderBy, statement.getLimit(), statement.getOffset(), + statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), + statement.getSelects(), statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, TableNode table) { + return select(table, statement.getHint(), statement.isDistinct(), statement.getSelect(), + statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), + statement.getLimit(), statement.getOffset(), statement.getBindCount(), + statement.isAggregate(), statement.hasSequence(), statement.getSelects(), + statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, TableNode table, ParseNode where) { + return select(table, statement.getHint(), statement.isDistinct(), statement.getSelect(), where, + statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), statement.getLimit(), + statement.getOffset(), statement.getBindCount(), statement.isAggregate(), + statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, boolean isDistinct, + List select) { + return select(statement.getFrom(), statement.getHint(), isDistinct, select, + statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), + statement.getLimit(), statement.getOffset(), statement.getBindCount(), + statement.isAggregate(), statement.hasSequence(), statement.getSelects(), + statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, boolean isDistinct, + List select, ParseNode where) { + return select(statement.getFrom(), statement.getHint(), isDistinct, select, where, + statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), statement.getLimit(), + statement.getOffset(), statement.getBindCount(), statement.isAggregate(), + statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, boolean isDistinct, + List select, ParseNode where, List groupBy, boolean isAggregate) { + return select(statement.getFrom(), statement.getHint(), isDistinct, select, where, groupBy, + statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getOffset(), + statement.getBindCount(), isAggregate, statement.hasSequence(), statement.getSelects(), + statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, List orderBy) { + return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), + statement.getSelect(), statement.getWhere(), statement.getGroupBy(), statement.getHaving(), + orderBy, statement.getLimit(), statement.getOffset(), statement.getBindCount(), + statement.isAggregate(), statement.hasSequence(), statement.getSelects(), + statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, HintNode hint) { + return hint == null || hint.isEmpty() + ? statement + : select(statement.getFrom(), hint, statement.isDistinct(), statement.getSelect(), + statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), + statement.getLimit(), statement.getOffset(), statement.getBindCount(), + statement.isAggregate(), statement.hasSequence(), statement.getSelects(), + statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, HintNode hint, ParseNode where) { + return select(statement.getFrom(), hint, statement.isDistinct(), statement.getSelect(), where, + statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), statement.getLimit(), + statement.getOffset(), statement.getBindCount(), statement.isAggregate(), + statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, List orderBy, + LimitNode limit, OffsetNode offset, int bindCount, boolean isAggregate) { + return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), + statement.getSelect(), statement.getWhere(), statement.getGroupBy(), statement.getHaving(), + orderBy, limit, offset, bindCount, isAggregate || statement.isAggregate(), + statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); + + } + + public SelectStatement select(SelectStatement statement, LimitNode limit) { + return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), + statement.getSelect(), statement.getWhere(), statement.getGroupBy(), statement.getHaving(), + statement.getOrderBy(), limit, statement.getOffset(), statement.getBindCount(), + statement.isAggregate(), statement.hasSequence(), statement.getSelects(), + statement.getUdfParseNodes()); + } + + public SelectStatement select(SelectStatement statement, List orderBy, + LimitNode limit, OffsetNode offset) { + return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), + statement.getSelect(), statement.getWhere(), statement.getGroupBy(), statement.getHaving(), + orderBy, limit, offset, statement.getBindCount(), statement.isAggregate(), + statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); + } + + public SelectStatement select(List statements, List orderBy, + LimitNode limit, OffsetNode offset, int bindCount, boolean isAggregate) { + if (statements.size() == 1) + return select(statements.get(0), orderBy, limit, offset, bindCount, isAggregate); + + // Get a list of adjusted aliases from a non-wildcard sub-select if any. + // We do not check the number of select nodes among all sub-selects, as + // it will be done later at compile stage. Empty or different aliases + // are ignored, since they cannot be referred by outer queries. + List aliases = Lists. newArrayList(); + Map udfParseNodes = new HashMap(1); + for (int i = 0; i < statements.size() && aliases.isEmpty(); i++) { + SelectStatement subselect = statements.get(i); + udfParseNodes.putAll(subselect.getUdfParseNodes()); + if (!subselect.hasWildcard()) { + for (AliasedNode aliasedNode : subselect.getSelect()) { + String alias = aliasedNode.getAlias(); + if (alias == null) { + alias = SchemaUtil.normalizeIdentifier(aliasedNode.getNode().getAlias()); + } + aliases.add(alias == null ? createTempAlias() : alias); } + } } - public FunctionParseNode function(String name, List valueNodes, - List columnNodes, boolean isAscending) { - - List args = Lists.newArrayListWithExpectedSize(columnNodes.size() + valueNodes.size() + 1); - args.addAll(columnNodes); - args.add(new LiteralParseNode(Boolean.valueOf(isAscending))); - args.addAll(valueNodes); - - BuiltInFunctionInfo info = getInfo(name, args); - if (info == null) { - return new UDFParseNode(name,args,info); - } - Constructor ctor = info.getNodeCtor(); - if (ctor == null) { - return new AggregateFunctionWithinGroupParseNode(name, args, info); - } else { - try { - return ctor.newInstance(name, args, info); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - } - - public HintNode hint(String hint) { - return new HintNode(hint); - } - - public InListParseNode inList(List children, boolean negate) { - return new InListParseNode(children, negate); - } - - public ExistsParseNode exists(ParseNode child, boolean negate) { - return new ExistsParseNode(child, negate); - } - - public InParseNode in(ParseNode l, ParseNode r, boolean negate, boolean isSubqueryDistinct) { - return new InParseNode(l, r, negate, isSubqueryDistinct); - } - - public IsNullParseNode isNull(ParseNode child, boolean negate) { - return new IsNullParseNode(child, negate); - } - - public JoinTableNode join(JoinType type, TableNode lhs, TableNode rhs, ParseNode on, boolean singleValueOnly) { - return new JoinTableNode(type, lhs, rhs, on, singleValueOnly); - } - - public DerivedTableNode derivedTable (String alias, SelectStatement select) { - return new DerivedTableNode(alias, select); - } - - public LikeParseNode like(ParseNode lhs, ParseNode rhs, boolean negate, LikeType likeType) { - return new LikeParseNode(lhs, rhs, negate, likeType); - } - - public LiteralParseNode literal(Object value) { - return new LiteralParseNode(value); - } - - public LiteralParseNode realNumber(String text) { - return new LiteralParseNode(new BigDecimal(text, PDataType.DEFAULT_MATH_CONTEXT)); - } - - public LiteralParseNode wholeNumber(String text) { - int length = text.length(); - // We know it'll fit into long, might still fit into int - if (length <= PDataType.LONG_PRECISION-1) { - long l = Long.parseLong(text); - if (l <= Integer.MAX_VALUE) { - // Fits into int - return new LiteralParseNode((int)l); - } - return new LiteralParseNode(l); - } - // Might still fit into long - BigDecimal d = new BigDecimal(text, PDataType.DEFAULT_MATH_CONTEXT); - if (d.compareTo(MAX_LONG) <= 0) { - return new LiteralParseNode(d.longValueExact()); - } - // Doesn't fit into long - return new LiteralParseNode(d); - } - - public LiteralParseNode intOrLong(String text) { - long l = Long.parseLong(text); - if (l <= Integer.MAX_VALUE) { - // Fits into int - return new LiteralParseNode((int)l); - } - return new LiteralParseNode(l); - } - - public LiteralParseNode hexLiteral(String text) { - // The lexer has already removed everything but the digits - int length = text.length(); - if (length % 2 != 0) { - throw new IllegalArgumentException("Hex literals must have an even number of digits"); - } - byte[] bytes = Bytes.fromHex(text); - return new LiteralParseNode(bytes, PBinary.INSTANCE); - } - - public String stringToHexLiteral(String in) { - String noSpace = in.replaceAll(" ", ""); - if (!noSpace.matches("^[0-9a-fA-F]+$")) { - throw new IllegalArgumentException( - "Hex literal continuation line has non hex digit characters"); - } - return noSpace; - } - - public LiteralParseNode binLiteral(String text) { - // The lexer has already removed everything but the digits - int length = text.length(); - if (length % 8 != 0) { - throw new IllegalArgumentException("Binary literals must have a multiple of 8 digits"); - } - byte[] bytes = ByteUtil.fromAscii(text.toCharArray()); - return new LiteralParseNode(bytes, PBinary.INSTANCE); - } - - public String stringToBinLiteral(String in) { - String noSpace = in.replaceAll(" ", ""); - if (!noSpace.matches("^[0-1]+$")) { - throw new IllegalArgumentException( - "Binary literal continuation line has non binary digit characters"); - } - return noSpace; - } - - public CastParseNode cast(ParseNode expression, String dataType, Integer maxLength, Integer scale) { - return new CastParseNode(expression, dataType, maxLength, scale, false); - } - - public CastParseNode cast(ParseNode expression, PDataType dataType, Integer maxLength, Integer scale) { - return new CastParseNode(expression, dataType, maxLength, scale, false); - } - - public CastParseNode cast(ParseNode expression, PDataType dataType, Integer maxLength, Integer scale, boolean arr) { - return new CastParseNode(expression, dataType, maxLength, scale, arr); - } - - public CastParseNode cast(ParseNode expression, String dataType, Integer maxLength, Integer scale, boolean arr) { - return new CastParseNode(expression, dataType, maxLength, scale, arr); - } - - public RowValueConstructorParseNode rowValueConstructor(List l) { - return new RowValueConstructorParseNode(l); - } - - private void checkTypeMatch (PDataType expectedType, PDataType actualType) throws SQLException { - if (!expectedType.isCoercibleTo(actualType)) { - throw TypeMismatchException.newException(expectedType, actualType); - } - } - - public LiteralParseNode literal(Object value, PDataType expectedType) throws SQLException { - PDataType actualType = PDataType.fromLiteral(value); - if (actualType != null && actualType != expectedType) { - checkTypeMatch(expectedType, actualType); - value = expectedType.toObject(value, actualType); - } - return new LiteralParseNode(value); - /* - Object typedValue = expectedType.toObject(value.toString()); - return new LiteralParseNode(typedValue); - */ - } - - public LiteralParseNode literal(String value, String sqlTypeName) throws SQLException { - PDataType expectedType = sqlTypeName == null ? null : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(sqlTypeName)); - if (expectedType == null || !expectedType.isCoercibleTo(PTimestamp.INSTANCE)) { - throw TypeMismatchException.newException(expectedType, PTimestamp.INSTANCE); - } - Object typedValue = expectedType.toObject(value); - return new LiteralParseNode(typedValue); - } - - public LiteralParseNode coerce(LiteralParseNode literalNode, PDataType expectedType) throws SQLException { - PDataType actualType = literalNode.getType(); - if (actualType != null) { - Object before = literalNode.getValue(); - checkTypeMatch(expectedType, actualType); - Object after = expectedType.toObject(before, actualType); - if (before != after) { - literalNode = literal(after); - } - } - return literalNode; - } - - public ComparisonParseNode comparison(CompareOperator op, ParseNode lhs, ParseNode rhs) { - switch (op){ - case LESS: - return lt(lhs,rhs); - case LESS_OR_EQUAL: - return lte(lhs,rhs); - case EQUAL: - return equal(lhs,rhs); - case NOT_EQUAL: - return notEqual(lhs,rhs); - case GREATER_OR_EQUAL: - return gte(lhs,rhs); - case GREATER: - return gt(lhs,rhs); - default: - throw new IllegalArgumentException("Unexpcted CompareOp of " + op); - } - } - - public ArrayAnyComparisonNode arrayAny(ParseNode rhs, ComparisonParseNode compareNode) { - return new ArrayAnyComparisonNode(rhs, compareNode); - } - - public ArrayAllComparisonNode arrayAll(ParseNode rhs, ComparisonParseNode compareNode) { - return new ArrayAllComparisonNode(rhs, compareNode); - } - - public ArrayAnyComparisonNode wrapInAny(CompareOperator op, ParseNode lhs, ParseNode rhs) { - return new ArrayAnyComparisonNode(rhs, comparison(op, lhs, elementRef(Arrays.asList(rhs, literal(1))))); - } - - public ArrayAllComparisonNode wrapInAll(CompareOperator op, ParseNode lhs, ParseNode rhs) { - return new ArrayAllComparisonNode(rhs, comparison(op, lhs, elementRef(Arrays.asList(rhs, literal(1))))); - } - - public ArrayElemRefNode elementRef(List parseNode) { - return new ArrayElemRefNode(parseNode); - } - - public GreaterThanParseNode gt(ParseNode lhs, ParseNode rhs) { - return new GreaterThanParseNode(lhs, rhs); - } - - - public GreaterThanOrEqualParseNode gte(ParseNode lhs, ParseNode rhs) { - return new GreaterThanOrEqualParseNode(lhs, rhs); + List aliasedNodes; + if (aliases.isEmpty()) { + aliasedNodes = Lists.newArrayList(aliasedNode(null, wildcard())); + } else { + aliasedNodes = Lists.newArrayListWithExpectedSize(aliases.size()); + for (String alias : aliases) { + aliasedNodes.add(aliasedNode(alias, column(null, alias, alias))); + } } - public LessThanParseNode lt(ParseNode lhs, ParseNode rhs) { - return new LessThanParseNode(lhs, rhs); - } + return select(null, HintNode.EMPTY_HINT_NODE, false, aliasedNodes, null, null, null, orderBy, + limit, offset, bindCount, false, false, statements, udfParseNodes); + } + public SubqueryParseNode subquery(SelectStatement select, boolean expectSingleRow) { + return new SubqueryParseNode(select, expectSingleRow); + } - public LessThanOrEqualParseNode lte(ParseNode lhs, ParseNode rhs) { - return new LessThanOrEqualParseNode(lhs, rhs); - } + public LimitNode limit(BindParseNode b) { + return new LimitNode(b); + } - public EqualParseNode equal(ParseNode lhs, ParseNode rhs) { - return new EqualParseNode(lhs, rhs); - } + public LimitNode limit(LiteralParseNode l) { + return new LimitNode(l); + } - public ArrayConstructorNode upsertStmtArrayNode(List upsertStmtArray) { - return new ArrayConstructorNode(upsertStmtArray); - } + public OffsetNode offset(BindParseNode b) throws SQLException { + return new OffsetNode(b); + } - public ParseNode negate(ParseNode child) { - // Prevents reparsing of -1 from becoming 1*-1 and 1*1*-1 with each re-parsing - if (LiteralParseNode.ONE.equals(child) && ((LiteralParseNode)child).getType().isCoercibleTo( - PLong.INSTANCE)) { - return LiteralParseNode.MINUS_ONE; - } - // Special case to convert Long.MIN_VALUE back to a Long. We can't initially represent it - // as a Long in the parser because we only represent positive values as constants in the - // parser, and ABS(Long.MIN_VALUE) is too big to fit into a Long. So we convert it back here. - if (LiteralParseNode.MIN_LONG_AS_BIG_DECIMAL.equals(child)) { - return LiteralParseNode.MIN_LONG; - } - return new MultiplyParseNode(Arrays.asList(child,LiteralParseNode.MINUS_ONE)); - } + public OffsetNode offset(LiteralParseNode l) throws SQLException { + return new OffsetNode(l); + } - public NotEqualParseNode notEqual(ParseNode lhs, ParseNode rhs) { - return new NotEqualParseNode(lhs, rhs); - } + public OffsetNode offset(ComparisonParseNode r) throws SQLException { + return new OffsetNode(r); + } - public ParseNode not(ParseNode child) { - if (child instanceof ExistsParseNode) { - return exists(child.getChildren().get(0), !((ExistsParseNode) child).isNegate()); - } - - return new NotParseNode(child); - } + public DropSchemaStatement dropSchema(String schemaName, boolean ifExists, boolean cascade) { + return new DropSchemaStatement(schemaName, ifExists, cascade); + } + public UseSchemaStatement useSchema(String schemaName) { + return new UseSchemaStatement(schemaName); + } - public OrParseNode or(List children) { - return new OrParseNode(children); - } + public ChangePermsStatement changePermsStatement(String permsString, boolean isSchemaName, + TableName tableName, String schemaName, boolean isGroupName, LiteralParseNode userOrGroup, + boolean isGrantStatement) { + return new ChangePermsStatement(permsString, isSchemaName, tableName, schemaName, isGroupName, + userOrGroup, isGrantStatement); + } + public ShowTablesStatement showTablesStatement(String schema, String pattern) { + return new ShowTablesStatement(schema, pattern); + } - public OrderByNode orderBy(ParseNode expression, boolean nullsLast, boolean orderAscending) { - return new OrderByNode(expression, nullsLast, orderAscending); - } - - public SelectStatement select(TableNode from, HintNode hint, boolean isDistinct, List select, ParseNode where, - List groupBy, ParseNode having, List orderBy, LimitNode limit, OffsetNode offset, int bindCount, boolean isAggregate, - boolean hasSequence, List selects, Map udfParseNodes) { - - return new SelectStatement(from, hint, isDistinct, select, where, groupBy == null ? Collections.emptyList() : groupBy, having, - orderBy == null ? Collections.emptyList() : orderBy, limit, offset, bindCount, isAggregate, hasSequence, selects == null ? Collections.emptyList() : selects, udfParseNodes); - } - - public UpsertStatement upsert(NamedTableNode table, HintNode hint, List columns, List values, - SelectStatement select, int bindCount, - Map udfParseNodes, - List> onDupKeyPairs) { - return new UpsertStatement(table, hint, columns, values, select, bindCount, udfParseNodes, onDupKeyPairs); - } - - public CursorName cursorName(String name){ - return new CursorName(name); - } + public ShowSchemasStatement showSchemasStatement(String pattern) { + return new ShowSchemasStatement(pattern); + } - public DeclareCursorStatement declareCursor(CursorName cursor, SelectStatement select){ - return new DeclareCursorStatement(cursor, select); - } - - public FetchStatement fetch(CursorName cursor, boolean isNext, int fetchLimit){ - return new FetchStatement(cursor, isNext, fetchLimit); - } - - public OpenStatement open(CursorName cursor){ - return new OpenStatement(cursor); - } - - public CloseStatement close(CursorName cursor){ - return new CloseStatement(cursor); - } - - public DeleteStatement delete(NamedTableNode table, HintNode hint, ParseNode node, List orderBy, LimitNode limit, int bindCount, Map udfParseNodes) { - return new DeleteStatement(table, hint, node, orderBy, limit, bindCount, udfParseNodes); - } - - public SelectStatement select(SelectStatement statement, ParseNode where) { - return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(), where, statement.getGroupBy(), statement.getHaving(), - statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, ParseNode where, ParseNode having) { - return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(), where, statement.getGroupBy(), having, - statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, List select, ParseNode where, List groupBy, ParseNode having, List orderBy) { - return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), - select, where, groupBy, having, orderBy, statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, TableNode table) { - return select(table, statement.getHint(), statement.isDistinct(), statement.getSelect(), statement.getWhere(), statement.getGroupBy(), - statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), - statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, TableNode table, ParseNode where) { - return select(table, statement.getHint(), statement.isDistinct(), statement.getSelect(), where, statement.getGroupBy(), - statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), - statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, boolean isDistinct, List select) { - return select(statement.getFrom(), statement.getHint(), isDistinct, select, statement.getWhere(), statement.getGroupBy(), - statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), - statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, boolean isDistinct, List select, ParseNode where) { - return select(statement.getFrom(), statement.getHint(), isDistinct, select, where, statement.getGroupBy(), - statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), - statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, boolean isDistinct, List select, ParseNode where, List groupBy, boolean isAggregate) { - return select(statement.getFrom(), statement.getHint(), isDistinct, select, where, groupBy, - statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), isAggregate, - statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, List orderBy) { - return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(), - statement.getWhere(), statement.getGroupBy(), statement.getHaving(), orderBy, statement.getLimit(), - statement.getOffset(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, HintNode hint) { - return hint == null || hint.isEmpty() ? statement : select(statement.getFrom(), hint, statement.isDistinct(), statement.getSelect(), - statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getOffset(), - statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, HintNode hint, ParseNode where) { - return select(statement.getFrom(), hint, statement.isDistinct(), statement.getSelect(), where, statement.getGroupBy(), - statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getOffset(), statement.getBindCount(), statement.isAggregate(), - statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, List orderBy, LimitNode limit, OffsetNode offset, int bindCount, boolean isAggregate) { - return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(), - statement.getWhere(), statement.getGroupBy(), statement.getHaving(), orderBy, limit, offset, - bindCount, isAggregate || statement.isAggregate(), statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - - } - - public SelectStatement select(SelectStatement statement, LimitNode limit) { - return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(), - statement.getWhere(), statement.getGroupBy(), statement.getHaving(), statement.getOrderBy(), limit, - statement.getOffset(), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), - statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(SelectStatement statement, List orderBy, LimitNode limit, OffsetNode offset) { - return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), statement.getSelect(), - statement.getWhere(), statement.getGroupBy(), statement.getHaving(), orderBy, limit,offset, - statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), statement.getSelects(), statement.getUdfParseNodes()); - } - - public SelectStatement select(List statements, List orderBy, LimitNode limit, - OffsetNode offset, int bindCount, boolean isAggregate) { - if (statements.size() == 1) return select(statements.get(0), orderBy, limit, offset, bindCount, isAggregate); - - // Get a list of adjusted aliases from a non-wildcard sub-select if any. - // We do not check the number of select nodes among all sub-selects, as - // it will be done later at compile stage. Empty or different aliases - // are ignored, since they cannot be referred by outer queries. - List aliases = Lists. newArrayList(); - Map udfParseNodes = new HashMap(1); - for (int i = 0; i < statements.size() && aliases.isEmpty(); i++) { - SelectStatement subselect = statements.get(i); - udfParseNodes.putAll(subselect.getUdfParseNodes()); - if (!subselect.hasWildcard()) { - for (AliasedNode aliasedNode : subselect.getSelect()) { - String alias = aliasedNode.getAlias(); - if (alias == null) { - alias = SchemaUtil.normalizeIdentifier(aliasedNode.getNode().getAlias()); - } - aliases.add(alias == null ? createTempAlias() : alias); - } - } - } - - List aliasedNodes; - if (aliases.isEmpty()) { - aliasedNodes = Lists.newArrayList(aliasedNode(null, wildcard())); - } else { - aliasedNodes = Lists.newArrayListWithExpectedSize(aliases.size()); - for (String alias : aliases) { - aliasedNodes.add(aliasedNode(alias, column(null, alias, alias))); - } - } - - return select(null, HintNode.EMPTY_HINT_NODE, false, aliasedNodes, - null, null, null, orderBy, limit,offset, bindCount, false, false, statements, udfParseNodes); - } - - public SubqueryParseNode subquery(SelectStatement select, boolean expectSingleRow) { - return new SubqueryParseNode(select, expectSingleRow); - } - - public LimitNode limit(BindParseNode b) { - return new LimitNode(b); - } - - public LimitNode limit(LiteralParseNode l) { - return new LimitNode(l); - } - - public OffsetNode offset(BindParseNode b) throws SQLException { - return new OffsetNode(b); - } - - public OffsetNode offset(LiteralParseNode l) throws SQLException { - return new OffsetNode(l); - } - - public OffsetNode offset(ComparisonParseNode r) throws SQLException { - return new OffsetNode(r); - } - - public DropSchemaStatement dropSchema(String schemaName, boolean ifExists, boolean cascade) { - return new DropSchemaStatement(schemaName, ifExists, cascade); - } - - public UseSchemaStatement useSchema(String schemaName) { - return new UseSchemaStatement(schemaName); - } - - public ChangePermsStatement changePermsStatement(String permsString, boolean isSchemaName, TableName tableName - , String schemaName, boolean isGroupName, LiteralParseNode userOrGroup, boolean isGrantStatement) { - return new ChangePermsStatement(permsString, isSchemaName, tableName, schemaName, isGroupName, userOrGroup, isGrantStatement); - } - - public ShowTablesStatement showTablesStatement(String schema, String pattern) { - return new ShowTablesStatement(schema, pattern); - } - - public ShowSchemasStatement showSchemasStatement(String pattern) { - return new ShowSchemasStatement(pattern); - } - - public ShowCreateTable showCreateTable(TableName tableName) { - return new ShowCreateTableStatement(tableName); - } + public ShowCreateTable showCreateTable(TableName tableName) { + return new ShowCreateTableStatement(tableName); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java index 7b66c760e5a..fb4f71a8c7b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeRewriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,11 +18,9 @@ package org.apache.phoenix.parse; import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Set; import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.compile.FromCompiler; @@ -30,613 +28,624 @@ import org.apache.phoenix.schema.AmbiguousColumnException; import org.apache.phoenix.schema.ColumnNotFoundException; import org.apache.phoenix.schema.ColumnRef; -import org.apache.phoenix.util.SchemaUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; /** - * * Base class for visitors that rewrite the expression node hierarchy - * - * * @since 0.1 */ public class ParseNodeRewriter extends TraverseAllParseNodeVisitor { - protected static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - - public static ParseNode rewrite(ParseNode where, ParseNodeRewriter rewriter) throws SQLException { - if (where == null) { - return null; - } + protected static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); + + public static ParseNode rewrite(ParseNode where, ParseNodeRewriter rewriter) throws SQLException { + if (where == null) { + return null; + } + rewriter.reset(); + return where.accept(rewriter); + } + + /** + *
+   * Resolve the inner alias for the selectStament.
+   * For following sql:
+   *   {@code select aid,sum(age) agesum from merge where age >=11 and age <= 33 group by aid order by agesum }
+   * "agesum" is an alias of "sum(age)", so for this method, the above sql is rewritten to:
+   *   {@code  select aid,sum(age) agesum from merge where age >= 11 and <= 33 group by aid order by sum(age) }
+   * 
+ */ + public static SelectStatement resolveInternalAlias(SelectStatement selectStament, + PhoenixConnection phoenixConnection) throws SQLException { + ColumnResolver columnResolver = + FromCompiler.getResolverForQuery(selectStament, phoenixConnection); + ParseNodeRewriter parseNodeRewriter = + new ParseNodeRewriter(columnResolver, selectStament.getSelect().size()); + return ParseNodeRewriter.rewrite(selectStament, parseNodeRewriter); + } + + /** + * Rewrite the select statement by switching any constants to the right hand side of the + * expression. + * @param statement the select statement + * @return new select statement + */ + public static SelectStatement rewrite(SelectStatement statement, ParseNodeRewriter rewriter) + throws SQLException { + Map aliasMap = rewriter.getAliasMap(); + TableNode from = statement.getFrom(); + TableNode normFrom = from == null ? null : from.accept(new TableNodeRewriter(rewriter)); + ParseNode where = statement.getWhere(); + ParseNode normWhere = where; + if (where != null) { + rewriter.reset(); + normWhere = where.accept(rewriter); + } + OffsetNode offsetNode = statement.getOffset(); + ParseNode offset = null; + ParseNode normOffset = null; + if (offsetNode != null) { + offset = statement.getOffset().getOffsetParseNode(); + normOffset = offset; + if (offset != null && !statement.getOffset().isIntegerOffset()) { rewriter.reset(); - return where.accept(rewriter); - } - - /** - *
-     * Resolve the inner alias for the selectStament.
-     * For following sql:
-     *   {@code select aid,sum(age) agesum from merge where age >=11 and age <= 33 group by aid order by agesum }
-     * "agesum" is an alias of "sum(age)", so for this method, the above sql is rewritten to:
-     *   {@code  select aid,sum(age) agesum from merge where age >= 11 and <= 33 group by aid order by sum(age) }
-     * 
- * @param selectStament - * @param phoenixConnection - * @return - * @throws SQLException - */ - public static SelectStatement resolveInternalAlias( - SelectStatement selectStament, PhoenixConnection phoenixConnection) throws SQLException { - ColumnResolver columnResolver = FromCompiler.getResolverForQuery(selectStament, phoenixConnection); - ParseNodeRewriter parseNodeRewriter = - new ParseNodeRewriter(columnResolver, selectStament.getSelect().size()); - return ParseNodeRewriter.rewrite(selectStament, parseNodeRewriter); - } - /** - * Rewrite the select statement by switching any constants to the right hand side - * of the expression. - * @param statement the select statement - * @return new select statement - * @throws SQLException - */ - public static SelectStatement rewrite(SelectStatement statement, ParseNodeRewriter rewriter) throws SQLException { - Map aliasMap = rewriter.getAliasMap(); - TableNode from = statement.getFrom(); - TableNode normFrom = from == null ? null : from.accept(new TableNodeRewriter(rewriter)); - ParseNode where = statement.getWhere(); - ParseNode normWhere = where; - if (where != null) { - rewriter.reset(); - normWhere = where.accept(rewriter); + normOffset = offset.accept(rewriter); + } + } + + List selectNodes = statement.getSelect(); + List normSelectNodes = selectNodes; + for (int i = 0; i < selectNodes.size(); i++) { + AliasedNode aliasedNode = selectNodes.get(i); + ParseNode selectNode = aliasedNode.getNode(); + rewriter.reset(); + ParseNode normSelectNode = selectNode.accept(rewriter); + if (selectNode == normSelectNode) { + if (selectNodes != normSelectNodes) { + normSelectNodes.add(aliasedNode); } - OffsetNode offsetNode = statement.getOffset(); - ParseNode offset = null; - ParseNode normOffset = null; - if(offsetNode != null) { - offset = statement.getOffset().getOffsetParseNode(); - normOffset = offset; - if (offset != null && !statement.getOffset().isIntegerOffset()) { - rewriter.reset(); - normOffset = offset.accept(rewriter); - } + continue; + } + if (selectNodes == normSelectNodes) { + normSelectNodes = Lists.newArrayList(selectNodes.subList(0, i)); + } + AliasedNode normAliasedNode = NODE_FACTORY.aliasedNode( + aliasedNode.isCaseSensitve() ? '"' + aliasedNode.getAlias() + '"' : aliasedNode.getAlias(), + normSelectNode); + normSelectNodes.add(normAliasedNode); + } + // Add to map in separate pass so that we don't try to use aliases + // while processing the select expressions + if (aliasMap != null) { + for (int i = 0; i < normSelectNodes.size(); i++) { + AliasedNode aliasedNode = normSelectNodes.get(i); + ParseNode selectNode = aliasedNode.getNode(); + String alias = aliasedNode.getAlias(); + if (alias != null) { + aliasMap.put(alias, selectNode); } - - List selectNodes = statement.getSelect(); - List normSelectNodes = selectNodes; - for (int i = 0; i < selectNodes.size(); i++) { - AliasedNode aliasedNode = selectNodes.get(i); - ParseNode selectNode = aliasedNode.getNode(); - rewriter.reset(); - ParseNode normSelectNode = selectNode.accept(rewriter); - if (selectNode == normSelectNode) { - if (selectNodes != normSelectNodes) { - normSelectNodes.add(aliasedNode); - } - continue; - } - if (selectNodes == normSelectNodes) { - normSelectNodes = Lists.newArrayList(selectNodes.subList(0, i)); - } - AliasedNode normAliasedNode = NODE_FACTORY.aliasedNode(aliasedNode.isCaseSensitve() ? '"' + aliasedNode.getAlias() + '"' : aliasedNode.getAlias(), normSelectNode); - normSelectNodes.add(normAliasedNode); + } + } + + List groupByNodes = statement.getGroupBy(); + List normGroupByNodes = groupByNodes; + for (int i = 0; i < groupByNodes.size(); i++) { + ParseNode groupByNode = groupByNodes.get(i); + rewriter.reset(); + ParseNode normGroupByNode = groupByNode.accept(rewriter); + if (groupByNode == normGroupByNode) { + if (groupByNodes != normGroupByNodes) { + normGroupByNodes.add(groupByNode); } - // Add to map in separate pass so that we don't try to use aliases - // while processing the select expressions - if (aliasMap != null) { - for (int i = 0; i < normSelectNodes.size(); i++) { - AliasedNode aliasedNode = normSelectNodes.get(i); - ParseNode selectNode = aliasedNode.getNode(); - String alias = aliasedNode.getAlias(); - if (alias != null) { - aliasMap.put(alias, selectNode); - } - } + continue; + } + if (groupByNodes == normGroupByNodes) { + normGroupByNodes = Lists.newArrayList(groupByNodes.subList(0, i)); + } + normGroupByNodes.add(normGroupByNode); + } + + ParseNode having = statement.getHaving(); + ParseNode normHaving = having; + if (having != null) { + rewriter.reset(); + normHaving = having.accept(rewriter); + } + + List orderByNodes = statement.getOrderBy(); + List normOrderByNodes = orderByNodes; + for (int i = 0; i < orderByNodes.size(); i++) { + OrderByNode orderByNode = orderByNodes.get(i); + ParseNode node = orderByNode.getNode(); + rewriter.reset(); + ParseNode normNode = node.accept(rewriter); + if (node == normNode) { + if (orderByNodes != normOrderByNodes) { + normOrderByNodes.add(orderByNode); } - - List groupByNodes = statement.getGroupBy(); - List normGroupByNodes = groupByNodes; - for (int i = 0; i < groupByNodes.size(); i++) { - ParseNode groupByNode = groupByNodes.get(i); - rewriter.reset(); - ParseNode normGroupByNode = groupByNode.accept(rewriter); - if (groupByNode == normGroupByNode) { - if (groupByNodes != normGroupByNodes) { - normGroupByNodes.add(groupByNode); - } - continue; - } - if (groupByNodes == normGroupByNodes) { - normGroupByNodes = Lists.newArrayList(groupByNodes.subList(0, i)); - } - normGroupByNodes.add(normGroupByNode); + continue; + } + if (orderByNodes == normOrderByNodes) { + normOrderByNodes = Lists.newArrayList(orderByNodes.subList(0, i)); + } + normOrderByNodes + .add(NODE_FACTORY.orderBy(normNode, orderByNode.isNullsLast(), orderByNode.isAscending())); + } + + // Return new SELECT statement with updated WHERE clause + if ( + normFrom == from && normWhere == where && normHaving == having + && selectNodes == normSelectNodes && groupByNodes == normGroupByNodes + && orderByNodes == normOrderByNodes && normOffset == offset + ) { + return statement; + } + return NODE_FACTORY.select(normFrom, statement.getHint(), statement.isDistinct(), + normSelectNodes, normWhere, normGroupByNodes, normHaving, normOrderByNodes, + statement.getLimit(), normOffset == null ? null : new OffsetNode(normOffset), + statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), + statement.getSelects(), statement.getUdfParseNodes()); + } + + private Map getAliasMap() { + return aliasMap; + } + + private final ColumnResolver resolver; + private final Map aliasMap; + private int nodeCount; + + public boolean isTopLevel() { + return nodeCount == 0; + } + + protected ParseNodeRewriter() { + this.resolver = null; + this.aliasMap = null; + } + + protected ParseNodeRewriter(ColumnResolver resolver) { + this.resolver = resolver; + this.aliasMap = null; + } + + protected ParseNodeRewriter(ColumnResolver resolver, int maxAliasCount) { + this.resolver = resolver; + this.aliasMap = Maps.newHashMapWithExpectedSize(maxAliasCount); + } + + protected ColumnResolver getResolver() { + return resolver; + } + + protected void reset() { + this.nodeCount = 0; + } + + protected static interface CompoundNodeFactory { + ParseNode createNode(List children); + } + + protected ParseNode leaveCompoundNode(CompoundParseNode node, List children, + CompoundNodeFactory factory) { + if (children.equals(node.getChildren())) { + return node; + } else { // Child nodes have been inverted (because a literal was found on LHS) + return factory.createNode(children); + } + } + + @Override + protected void enterParseNode(ParseNode node) { + } + + @Override + public ParseNode visitLeave(AndParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.and(children); + } + }); + } + + @Override + public ParseNode visitLeave(OrParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.or(children); + } + }); + } + + @Override + public ParseNode visitLeave(SubtractParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.subtract(children); + } + }); + } + + @Override + public ParseNode visitLeave(AddParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.add(children); + } + }); + } + + @Override + public ParseNode visitLeave(MultiplyParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.multiply(children); + } + }); + } + + @Override + public ParseNode visitLeave(DivideParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.divide(children); + } + }); + } + + @Override + public ParseNode visitLeave(ModulusParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.modulus(children); + } + }); + } + + @Override + public ParseNode visitLeave(final FunctionParseNode node, List nodes) + throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.function(node.getName(), children); + } + }); + } + + @Override + public ParseNode visitLeave(CaseParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.caseWhen(children); + } + }); + } + + @Override + public ParseNode visitLeave(final LikeParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.like(children.get(0), children.get(1), node.isNegate(), + node.getLikeType()); + } + }); + } + + @Override + public ParseNode visitLeave(NotParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.not(children.get(0)); + } + }); + } + + @Override + public ParseNode visitLeave(final ExistsParseNode node, List nodes) + throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.exists(children.get(0), node.isNegate()); + } + }); + } + + @Override + public ParseNode visitLeave(final CastParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.cast(children.get(0), node.getDataType(), node.getMaxLength(), + node.getScale()); + } + }); + } + + @Override + public ParseNode visitLeave(final InListParseNode node, List nodes) + throws SQLException { + ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.inList(children, node.isNegate()); + } + }); + return normNode; + } + + @Override + public ParseNode visitLeave(final InParseNode node, List nodes) throws SQLException { + ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.in(children.get(0), children.get(1), node.isNegate(), + node.isSubqueryDistinct()); + } + }); + return normNode; + } + + @Override + public ParseNode visitLeave(final IsNullParseNode node, List nodes) + throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.isNull(children.get(0), node.isNegate()); + } + }); + } + + @Override + public ParseNode visitLeave(final ComparisonParseNode node, List nodes) + throws SQLException { + ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.comparison(node.getFilterOp(), children.get(0), children.get(1)); + } + }); + return normNode; + } + + @Override + public ParseNode visitLeave(final BetweenParseNode node, List nodes) + throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + if (node.isNegate()) { + return NODE_FACTORY.not(NODE_FACTORY.and(children)); + } else { + return NODE_FACTORY.and(children); } - - ParseNode having = statement.getHaving(); - ParseNode normHaving= having; - if (having != null) { - rewriter.reset(); - normHaving = having.accept(rewriter); + } + }); + } + + @Override + public ParseNode visit(ColumnParseNode node) throws SQLException { + // If we're resolving aliases and we have an unqualified ColumnParseNode, + // check if we find the name in our alias map. + if (aliasMap != null && node.getTableName() == null) { + ParseNode aliasedNode = aliasMap.get(node.getName()); + // If we found something, then try to resolve it unless the two nodes are the same + if (aliasedNode != null && !node.equals(aliasedNode)) { + ColumnRef ref; + try { + ref = resolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); + } catch (ColumnNotFoundException e) { + // Not able to resolve alias as a column name as well, so we use the alias + return aliasedNode; } - - List orderByNodes = statement.getOrderBy(); - List normOrderByNodes = orderByNodes; - for (int i = 0; i < orderByNodes.size(); i++) { - OrderByNode orderByNode = orderByNodes.get(i); - ParseNode node = orderByNode.getNode(); - rewriter.reset(); - ParseNode normNode = node.accept(rewriter); - if (node == normNode) { - if (orderByNodes != normOrderByNodes) { - normOrderByNodes.add(orderByNode); - } - continue; - } - if (orderByNodes == normOrderByNodes) { - normOrderByNodes = Lists.newArrayList(orderByNodes.subList(0, i)); - } - normOrderByNodes.add(NODE_FACTORY.orderBy(normNode, orderByNode.isNullsLast(), orderByNode.isAscending())); + // We have resolved it to a column, so now check if the aliased node can be resolved as the + // same column + if (aliasedNode instanceof ColumnParseNode) { + ColumnParseNode aliasedColumnNode = (ColumnParseNode) aliasedNode; + ColumnRef aliasedRef = resolver.resolveColumn(aliasedColumnNode.getSchemaName(), + aliasedColumnNode.getTableName(), aliasedColumnNode.getName()); + if (aliasedRef.equals(ref)) { + return aliasedNode; + } } - - // Return new SELECT statement with updated WHERE clause - if (normFrom == from && - normWhere == where && - normHaving == having && - selectNodes == normSelectNodes && - groupByNodes == normGroupByNodes && - orderByNodes == normOrderByNodes && - normOffset == offset - ) { - return statement; + // Otherwise it means we have a conflict + throw new AmbiguousColumnException(node.getName()); + } + } + return node; + } + + @Override + public ParseNode visit(LiteralParseNode node) throws SQLException { + return node; + } + + @Override + public ParseNode visit(BindParseNode node) throws SQLException { + return node; + } + + @Override + public ParseNode visit(WildcardParseNode node) throws SQLException { + return node; + } + + @Override + public ParseNode visit(TableWildcardParseNode node) throws SQLException { + return node; + } + + @Override + public ParseNode visit(FamilyWildcardParseNode node) throws SQLException { + return node; + } + + @Override + public ParseNode visit(SubqueryParseNode node) throws SQLException { + return node; + } + + @Override + public List newElementList(int size) { + nodeCount += size; + return new ArrayList(size); + } + + @Override + public ParseNode visitLeave(StringConcatParseNode node, List l) throws SQLException { + return leaveCompoundNode(node, l, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.concat(children); + } + }); + } + + @Override + public void addElement(List l, ParseNode element) { + nodeCount--; + if (element != null) { + l.add(element); + } + } + + @Override + public ParseNode visitLeave(RowValueConstructorParseNode node, List children) + throws SQLException { + // Strip trailing nulls from rvc as they have no meaning + if (children.get(children.size() - 1) == null) { + children = Lists.newArrayList(children); + do { + children.remove(children.size() - 1); + } while (children.size() > 0 && children.get(children.size() - 1) == null); + // If we're down to a single child, it's not a rvc anymore + if (children.size() == 0) { + return null; + } + if (children.size() == 1) { + return children.get(0); + } + } + // Flatten nested row value constructors, as this makes little sense and adds no information + List flattenedChildren = children; + for (int i = 0; i < children.size(); i++) { + ParseNode child = children.get(i); + if (child instanceof RowValueConstructorParseNode) { + if (flattenedChildren == children) { + flattenedChildren = + Lists.newArrayListWithExpectedSize(children.size() + child.getChildren().size()); + flattenedChildren.addAll(children.subList(0, i)); } - return NODE_FACTORY.select(normFrom, statement.getHint(), statement.isDistinct(), - normSelectNodes, normWhere, normGroupByNodes, normHaving, normOrderByNodes, - statement.getLimit(), normOffset == null ? null : new OffsetNode(normOffset), statement.getBindCount(), statement.isAggregate(), statement.hasSequence(), - statement.getSelects(), statement.getUdfParseNodes()); - } - - private Map getAliasMap() { - return aliasMap; - } - - private final ColumnResolver resolver; - private final Map aliasMap; - private int nodeCount; - - public boolean isTopLevel() { - return nodeCount == 0; - } - - protected ParseNodeRewriter() { - this.resolver = null; - this.aliasMap = null; - } - - protected ParseNodeRewriter(ColumnResolver resolver) { - this.resolver = resolver; - this.aliasMap = null; - } - - protected ParseNodeRewriter(ColumnResolver resolver, int maxAliasCount) { - this.resolver = resolver; - this.aliasMap = Maps.newHashMapWithExpectedSize(maxAliasCount); - } - - protected ColumnResolver getResolver() { - return resolver; - } - - protected void reset() { - this.nodeCount = 0; - } - - protected static interface CompoundNodeFactory { - ParseNode createNode(List children); - } - - protected ParseNode leaveCompoundNode(CompoundParseNode node, List children, CompoundNodeFactory factory) { - if (children.equals(node.getChildren())) { - return node; - } else { // Child nodes have been inverted (because a literal was found on LHS) - return factory.createNode(children); - } - } - - @Override - protected void enterParseNode(ParseNode node) { - } - - @Override - public ParseNode visitLeave(AndParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.and(children); - } - }); - } - - @Override - public ParseNode visitLeave(OrParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.or(children); - } - }); - } - - @Override - public ParseNode visitLeave(SubtractParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.subtract(children); - } - }); - } - - @Override - public ParseNode visitLeave(AddParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.add(children); - } - }); - } - - @Override - public ParseNode visitLeave(MultiplyParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.multiply(children); - } - }); - } - - @Override - public ParseNode visitLeave(DivideParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.divide(children); - } - }); - } - - @Override - public ParseNode visitLeave(ModulusParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.modulus(children); - } - }); - } - - @Override - public ParseNode visitLeave(final FunctionParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.function(node.getName(),children); - } - }); - } - - @Override - public ParseNode visitLeave(CaseParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.caseWhen(children); - } - }); - } - - @Override - public ParseNode visitLeave(final LikeParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.like(children.get(0),children.get(1),node.isNegate(), node.getLikeType()); - } - }); - } - - @Override - public ParseNode visitLeave(NotParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.not(children.get(0)); - } - }); - } - - @Override - public ParseNode visitLeave(final ExistsParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.exists(children.get(0), node.isNegate()); - } - }); - } - - @Override - public ParseNode visitLeave(final CastParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.cast(children.get(0), node.getDataType(), node.getMaxLength(), node.getScale()); - } - }); - } - - @Override - public ParseNode visitLeave(final InListParseNode node, List nodes) throws SQLException { - ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.inList(children, node.isNegate()); - } - }); - return normNode; - } - - @Override - public ParseNode visitLeave(final InParseNode node, List nodes) throws SQLException { - ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.in(children.get(0), children.get(1), node.isNegate(), node.isSubqueryDistinct()); - } - }); - return normNode; - } - - @Override - public ParseNode visitLeave(final IsNullParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.isNull(children.get(0), node.isNegate()); - } - }); - } - - @Override - public ParseNode visitLeave(final ComparisonParseNode node, List nodes) throws SQLException { - ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.comparison(node.getFilterOp(), children.get(0), children.get(1)); - } - }); - return normNode; - } - - @Override - public ParseNode visitLeave(final BetweenParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - if(node.isNegate()) { - return NODE_FACTORY.not(NODE_FACTORY.and(children)); - } else { - return NODE_FACTORY.and(children); - } - } - }); - } - - @Override - public ParseNode visit(ColumnParseNode node) throws SQLException { - // If we're resolving aliases and we have an unqualified ColumnParseNode, - // check if we find the name in our alias map. - if (aliasMap != null && node.getTableName() == null) { - ParseNode aliasedNode = aliasMap.get(node.getName()); - // If we found something, then try to resolve it unless the two nodes are the same - if (aliasedNode != null && !node.equals(aliasedNode)) { - ColumnRef ref; - try { - ref = resolver.resolveColumn(node.getSchemaName(), node.getTableName(), node.getName()); - } catch (ColumnNotFoundException e) { - // Not able to resolve alias as a column name as well, so we use the alias - return aliasedNode; - } - // We have resolved it to a column, so now check if the aliased node can be resolved as the same column - if (aliasedNode instanceof ColumnParseNode) { - ColumnParseNode aliasedColumnNode = (ColumnParseNode) aliasedNode; - ColumnRef aliasedRef = resolver.resolveColumn(aliasedColumnNode.getSchemaName(), aliasedColumnNode.getTableName(), aliasedColumnNode.getName()); - if (aliasedRef.equals(ref)) { - return aliasedNode; - } - } - // Otherwise it means we have a conflict - throw new AmbiguousColumnException(node.getName()); - } - } - return node; - } - - @Override - public ParseNode visit(LiteralParseNode node) throws SQLException { - return node; - } - - @Override - public ParseNode visit(BindParseNode node) throws SQLException { - return node; - } - - @Override - public ParseNode visit(WildcardParseNode node) throws SQLException { - return node; - } - - @Override - public ParseNode visit(TableWildcardParseNode node) throws SQLException { - return node; - } - - @Override - public ParseNode visit(FamilyWildcardParseNode node) throws SQLException { - return node; - } - - @Override - public ParseNode visit(SubqueryParseNode node) throws SQLException { - return node; - } - - @Override - public List newElementList(int size) { - nodeCount += size; - return new ArrayList(size); - } - - @Override - public ParseNode visitLeave(StringConcatParseNode node, List l) throws SQLException { - return leaveCompoundNode(node, l, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.concat(children); - } - }); - } - - @Override - public void addElement(List l, ParseNode element) { - nodeCount--; - if (element != null) { - l.add(element); - } - } - - @Override - public ParseNode visitLeave(RowValueConstructorParseNode node, List children) throws SQLException { - // Strip trailing nulls from rvc as they have no meaning - if (children.get(children.size()-1) == null) { - children = Lists.newArrayList(children); - do { - children.remove(children.size()-1); - } while (children.size() > 0 && children.get(children.size()-1) == null); - // If we're down to a single child, it's not a rvc anymore - if (children.size() == 0) { - return null; - } - if (children.size() == 1) { - return children.get(0); - } - } - // Flatten nested row value constructors, as this makes little sense and adds no information - List flattenedChildren = children; - for (int i = 0; i < children.size(); i++) { - ParseNode child = children.get(i); - if (child instanceof RowValueConstructorParseNode) { - if (flattenedChildren == children) { - flattenedChildren = Lists.newArrayListWithExpectedSize(children.size() + child.getChildren().size()); - flattenedChildren.addAll(children.subList(0, i)); - } - flattenedChildren.addAll(child.getChildren()); - } - } - - return leaveCompoundNode(node, flattenedChildren, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.rowValueConstructor(children); - } - }); - } - - @Override - public ParseNode visit(SequenceValueParseNode node) throws SQLException { - return node; - } - - @Override - public ParseNode visitLeave(ArrayConstructorNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.upsertStmtArrayNode(children); - } - }); - } - - private static class TableNodeRewriter implements TableNodeVisitor { - private final ParseNodeRewriter parseNodeRewriter; - - public TableNodeRewriter(ParseNodeRewriter parseNodeRewriter) { - this.parseNodeRewriter = parseNodeRewriter; - } - - @Override - public TableNode visit(BindTableNode boundTableNode) throws SQLException { - return boundTableNode; - } - - @Override - public TableNode visit(JoinTableNode joinNode) throws SQLException { - TableNode lhsNode = joinNode.getLHS(); - TableNode rhsNode = joinNode.getRHS(); - ParseNode onNode = joinNode.getOnNode(); - TableNode normLhsNode = lhsNode.accept(this); - TableNode normRhsNode = rhsNode.accept(this); - parseNodeRewriter.reset(); - ParseNode normOnNode = onNode == null ? null : onNode.accept(parseNodeRewriter); - if (lhsNode == normLhsNode && rhsNode == normRhsNode && onNode == normOnNode) - return joinNode; - - return NODE_FACTORY.join(joinNode.getType(), normLhsNode, normRhsNode, normOnNode, joinNode.isSingleValueOnly()); - } - - @Override - public TableNode visit(NamedTableNode namedTableNode) throws SQLException { - return namedTableNode; - } - - @Override - public TableNode visit(DerivedTableNode subselectNode) throws SQLException { - return subselectNode; - } - } - - @Override - public ParseNode visitLeave(ArrayAnyComparisonNode node, final List nodes) throws SQLException { - ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.arrayAny(nodes.get(0), (ComparisonParseNode) nodes.get(1)); - } - }); - return normNode; - } - - @Override - public ParseNode visitLeave(ArrayAllComparisonNode node, final List nodes) throws SQLException { - ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.arrayAll(nodes.get(0), (ComparisonParseNode) nodes.get(1)); - } - }); - return normNode; - } - - @Override - public ParseNode visitLeave(ArrayElemRefNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.elementRef(children); - } - }); - } + flattenedChildren.addAll(child.getChildren()); + } + } + + return leaveCompoundNode(node, flattenedChildren, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.rowValueConstructor(children); + } + }); + } + + @Override + public ParseNode visit(SequenceValueParseNode node) throws SQLException { + return node; + } + + @Override + public ParseNode visitLeave(ArrayConstructorNode node, List nodes) + throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.upsertStmtArrayNode(children); + } + }); + } + + private static class TableNodeRewriter implements TableNodeVisitor { + private final ParseNodeRewriter parseNodeRewriter; + + public TableNodeRewriter(ParseNodeRewriter parseNodeRewriter) { + this.parseNodeRewriter = parseNodeRewriter; + } + + @Override + public TableNode visit(BindTableNode boundTableNode) throws SQLException { + return boundTableNode; + } + + @Override + public TableNode visit(JoinTableNode joinNode) throws SQLException { + TableNode lhsNode = joinNode.getLHS(); + TableNode rhsNode = joinNode.getRHS(); + ParseNode onNode = joinNode.getOnNode(); + TableNode normLhsNode = lhsNode.accept(this); + TableNode normRhsNode = rhsNode.accept(this); + parseNodeRewriter.reset(); + ParseNode normOnNode = onNode == null ? null : onNode.accept(parseNodeRewriter); + if (lhsNode == normLhsNode && rhsNode == normRhsNode && onNode == normOnNode) return joinNode; + + return NODE_FACTORY.join(joinNode.getType(), normLhsNode, normRhsNode, normOnNode, + joinNode.isSingleValueOnly()); + } + + @Override + public TableNode visit(NamedTableNode namedTableNode) throws SQLException { + return namedTableNode; + } + + @Override + public TableNode visit(DerivedTableNode subselectNode) throws SQLException { + return subselectNode; + } + } + + @Override + public ParseNode visitLeave(ArrayAnyComparisonNode node, final List nodes) + throws SQLException { + ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.arrayAny(nodes.get(0), (ComparisonParseNode) nodes.get(1)); + } + }); + return normNode; + } + + @Override + public ParseNode visitLeave(ArrayAllComparisonNode node, final List nodes) + throws SQLException { + ParseNode normNode = leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.arrayAll(nodes.get(0), (ComparisonParseNode) nodes.get(1)); + } + }); + return normNode; + } + + @Override + public ParseNode visitLeave(ArrayElemRefNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.elementRef(children); + } + }); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeVisitor.java index 50edf917005..04e5efb4ef1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ParseNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,109 +20,134 @@ import java.sql.SQLException; import java.util.List; - - /** - * - * Visitor for ParseNode in the node tree. Uses composite - * visitor pattern with enter/leave calls for any - * compound expression node. Only supported SQL constructs - * have visit methods. Unsupported constructs fall through - * to {@link #visitEnter(CompoundParseNode)} for - * compound parse nodes and {@link #visit(ParseNode)} - * for terminal parse nodes. - * - * + * Visitor for ParseNode in the node tree. Uses composite visitor pattern with enter/leave calls for + * any compound expression node. Only supported SQL constructs have visit methods. Unsupported + * constructs fall through to {@link #visitEnter(CompoundParseNode)} for compound parse nodes and + * {@link #visit(ParseNode)} for terminal parse nodes. * @since 0.1 */ public interface ParseNodeVisitor { - public List newElementList(int size); - public void addElement(List a, E element); - - public boolean visitEnter(LikeParseNode node) throws SQLException; - public E visitLeave(LikeParseNode node, List l) throws SQLException; - - public boolean visitEnter(AndParseNode node) throws SQLException; - public E visitLeave(AndParseNode node, List l) throws SQLException; - - public boolean visitEnter(OrParseNode node) throws SQLException; - public E visitLeave(OrParseNode node, List l) throws SQLException; - - public boolean visitEnter(FunctionParseNode node) throws SQLException; - public E visitLeave(FunctionParseNode node, List l) throws SQLException; - - public boolean visitEnter(ComparisonParseNode node) throws SQLException; - public E visitLeave(ComparisonParseNode node, List l) throws SQLException; - - public boolean visitEnter(CaseParseNode node) throws SQLException; - public E visitLeave(CaseParseNode node, List l) throws SQLException; - - public boolean visitEnter(CompoundParseNode node) throws SQLException; - public E visitLeave(CompoundParseNode node, List l) throws SQLException; - - public boolean visitEnter(AddParseNode node) throws SQLException; - public E visitLeave(AddParseNode node, List l) throws SQLException; - - public boolean visitEnter(MultiplyParseNode node) throws SQLException; - public E visitLeave(MultiplyParseNode node, List l) throws SQLException; - - public boolean visitEnter(ModulusParseNode node) throws SQLException; - public E visitLeave(ModulusParseNode node, List l) throws SQLException; - - public boolean visitEnter(DivideParseNode node) throws SQLException; - public E visitLeave(DivideParseNode node, List l) throws SQLException; - - public boolean visitEnter(SubtractParseNode node) throws SQLException; - public E visitLeave(SubtractParseNode node, List l) throws SQLException; - - public boolean visitEnter(NotParseNode node) throws SQLException; - public E visitLeave(NotParseNode node, List l) throws SQLException; - - public boolean visitEnter(ExistsParseNode node) throws SQLException; - public E visitLeave(ExistsParseNode node, List l) throws SQLException; - - public boolean visitEnter(InListParseNode node) throws SQLException; - public E visitLeave(InListParseNode node, List l) throws SQLException; - - public boolean visitEnter(InParseNode node) throws SQLException; - public E visitLeave(InParseNode node, List l) throws SQLException; - - public boolean visitEnter(IsNullParseNode node) throws SQLException; - public E visitLeave(IsNullParseNode node, List l) throws SQLException; - - public E visit(ColumnParseNode node) throws SQLException; - public E visit(LiteralParseNode node) throws SQLException; - public E visit(BindParseNode node) throws SQLException; - public E visit(WildcardParseNode node) throws SQLException; - public E visit(TableWildcardParseNode node) throws SQLException; - public E visit(FamilyWildcardParseNode node) throws SQLException; - public E visit(SubqueryParseNode node) throws SQLException; - public E visit(ParseNode node) throws SQLException; - - public boolean visitEnter(StringConcatParseNode node) throws SQLException; - public E visitLeave(StringConcatParseNode node, List l) throws SQLException; - - public boolean visitEnter(BetweenParseNode node) throws SQLException; - public E visitLeave(BetweenParseNode node, List l) throws SQLException; - - public boolean visitEnter(CastParseNode node) throws SQLException; - public E visitLeave(CastParseNode node, List l) throws SQLException; - - public boolean visitEnter(RowValueConstructorParseNode node) throws SQLException; - public E visitLeave(RowValueConstructorParseNode node, List l) throws SQLException; - - public boolean visitEnter(ArrayConstructorNode node) throws SQLException; - public E visitLeave(ArrayConstructorNode node, List l) throws SQLException; - public E visit(SequenceValueParseNode node) throws SQLException; - - public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException; - public E visitLeave(ArrayAllComparisonNode node, List l) throws SQLException; - - public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException; - public E visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException; - - public boolean visitEnter(ArrayElemRefNode node) throws SQLException; - public E visitLeave(ArrayElemRefNode node, List l) throws SQLException; - - + public List newElementList(int size); + + public void addElement(List a, E element); + + public boolean visitEnter(LikeParseNode node) throws SQLException; + + public E visitLeave(LikeParseNode node, List l) throws SQLException; + + public boolean visitEnter(AndParseNode node) throws SQLException; + + public E visitLeave(AndParseNode node, List l) throws SQLException; + + public boolean visitEnter(OrParseNode node) throws SQLException; + + public E visitLeave(OrParseNode node, List l) throws SQLException; + + public boolean visitEnter(FunctionParseNode node) throws SQLException; + + public E visitLeave(FunctionParseNode node, List l) throws SQLException; + + public boolean visitEnter(ComparisonParseNode node) throws SQLException; + + public E visitLeave(ComparisonParseNode node, List l) throws SQLException; + + public boolean visitEnter(CaseParseNode node) throws SQLException; + + public E visitLeave(CaseParseNode node, List l) throws SQLException; + + public boolean visitEnter(CompoundParseNode node) throws SQLException; + + public E visitLeave(CompoundParseNode node, List l) throws SQLException; + + public boolean visitEnter(AddParseNode node) throws SQLException; + + public E visitLeave(AddParseNode node, List l) throws SQLException; + + public boolean visitEnter(MultiplyParseNode node) throws SQLException; + + public E visitLeave(MultiplyParseNode node, List l) throws SQLException; + + public boolean visitEnter(ModulusParseNode node) throws SQLException; + + public E visitLeave(ModulusParseNode node, List l) throws SQLException; + + public boolean visitEnter(DivideParseNode node) throws SQLException; + + public E visitLeave(DivideParseNode node, List l) throws SQLException; + + public boolean visitEnter(SubtractParseNode node) throws SQLException; + + public E visitLeave(SubtractParseNode node, List l) throws SQLException; + + public boolean visitEnter(NotParseNode node) throws SQLException; + + public E visitLeave(NotParseNode node, List l) throws SQLException; + + public boolean visitEnter(ExistsParseNode node) throws SQLException; + + public E visitLeave(ExistsParseNode node, List l) throws SQLException; + + public boolean visitEnter(InListParseNode node) throws SQLException; + + public E visitLeave(InListParseNode node, List l) throws SQLException; + + public boolean visitEnter(InParseNode node) throws SQLException; + + public E visitLeave(InParseNode node, List l) throws SQLException; + + public boolean visitEnter(IsNullParseNode node) throws SQLException; + + public E visitLeave(IsNullParseNode node, List l) throws SQLException; + + public E visit(ColumnParseNode node) throws SQLException; + + public E visit(LiteralParseNode node) throws SQLException; + + public E visit(BindParseNode node) throws SQLException; + + public E visit(WildcardParseNode node) throws SQLException; + + public E visit(TableWildcardParseNode node) throws SQLException; + + public E visit(FamilyWildcardParseNode node) throws SQLException; + + public E visit(SubqueryParseNode node) throws SQLException; + + public E visit(ParseNode node) throws SQLException; + + public boolean visitEnter(StringConcatParseNode node) throws SQLException; + + public E visitLeave(StringConcatParseNode node, List l) throws SQLException; + + public boolean visitEnter(BetweenParseNode node) throws SQLException; + + public E visitLeave(BetweenParseNode node, List l) throws SQLException; + + public boolean visitEnter(CastParseNode node) throws SQLException; + + public E visitLeave(CastParseNode node, List l) throws SQLException; + + public boolean visitEnter(RowValueConstructorParseNode node) throws SQLException; + + public E visitLeave(RowValueConstructorParseNode node, List l) throws SQLException; + + public boolean visitEnter(ArrayConstructorNode node) throws SQLException; + + public E visitLeave(ArrayConstructorNode node, List l) throws SQLException; + + public E visit(SequenceValueParseNode node) throws SQLException; + + public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException; + + public E visitLeave(ArrayAllComparisonNode node, List l) throws SQLException; + + public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException; + + public E visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException; + + public boolean visitEnter(ArrayElemRefNode node) throws SQLException; + + public E visitLeave(ArrayElemRefNode node, List l) throws SQLException; + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PhoenixRowTimestampParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PhoenixRowTimestampParseNode.java index 2dcba1558aa..7c0d10ee7f9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PhoenixRowTimestampParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PhoenixRowTimestampParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; + import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.expression.Expression; @@ -32,67 +35,67 @@ import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.util.SchemaUtil; -import java.sql.SQLException; -import java.util.Arrays; -import java.util.List; - public class PhoenixRowTimestampParseNode extends FunctionParseNode { - PhoenixRowTimestampParseNode(String name, List children, - BuiltInFunctionInfo info) { - super(name, children, info); + PhoenixRowTimestampParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } + + @Override + /** + * Note: Although this ParseNode does not take any children, we are injecting an EMPTY_COLUMN + * KeyValueColumnExpression so that the EMPTY_COLUMN is evaluated during scan filter processing. + */ + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + + // PhoenixRowTimestampFunction does not take any parameters. + if (children.size() != 0) { + throw new IllegalArgumentException( + "PhoenixRowTimestampFunction does not take any parameters"); } - @Override - /** - * Note: Although this ParseNode does not take any children, we are injecting an EMPTY_COLUMN - * KeyValueColumnExpression so that the EMPTY_COLUMN is evaluated during scan filter processing. - */ - public FunctionExpression create(List children, StatementContext context) - throws SQLException { + // Get the empty column family and qualifier for the context. + PTable table = context.getCurrentTable().getTable(); + byte[] emptyColumnFamilyName = SchemaUtil.getEmptyColumnFamily(table); + byte[] emptyColumnName = + table.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + ? QueryConstants.EMPTY_COLUMN_BYTES + : table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); - // PhoenixRowTimestampFunction does not take any parameters. - if (children.size() != 0) { - throw new IllegalArgumentException( - "PhoenixRowTimestampFunction does not take any parameters" - ); - } + // Create an empty column key value expression. + // This will cause the empty column key value to be evaluated during scan filter processing. + Expression emptyColumnExpression = new KeyValueColumnExpression(new PDatum() { + @Override + public boolean isNullable() { + return false; + } - // Get the empty column family and qualifier for the context. - PTable table = context.getCurrentTable().getTable(); - byte[] emptyColumnFamilyName = SchemaUtil.getEmptyColumnFamily(table); - byte[] emptyColumnName = - table.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS - ? QueryConstants.EMPTY_COLUMN_BYTES - : table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); + @Override + public PDataType getDataType() { + return PDate.INSTANCE; + } - // Create an empty column key value expression. - // This will cause the empty column key value to be evaluated during scan filter processing. - Expression emptyColumnExpression = new KeyValueColumnExpression(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - @Override - public PDataType getDataType() { - return PDate.INSTANCE; - } - @Override - public Integer getMaxLength() { - return null; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, emptyColumnFamilyName, emptyColumnName); - List expressionList = Arrays.asList(new Expression[] {emptyColumnExpression}); - context.getScan().setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, emptyColumnFamilyName); - context.getScan().setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyColumnName); - return new PhoenixRowTimestampFunction(expressionList); - } + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, emptyColumnFamilyName, emptyColumnName); + List expressionList = Arrays.asList(new Expression[] { emptyColumnExpression }); + context.getScan().setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, + emptyColumnFamilyName); + context.getScan().setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, + emptyColumnName); + return new PhoenixRowTimestampFunction(expressionList); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PrimaryKeyConstraint.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PrimaryKeyConstraint.java index 802a87ff52a..9955c5fb3f3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PrimaryKeyConstraint.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PrimaryKeyConstraint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,86 +24,89 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; public class PrimaryKeyConstraint extends NamedNode { - public static final PrimaryKeyConstraint EMPTY = new PrimaryKeyConstraint(null, Collections.emptyList()); + public static final PrimaryKeyConstraint EMPTY = + new PrimaryKeyConstraint(null, Collections. emptyList()); + + private final List> columns; + private final Map> columnNameToSortOrder; + private final Map> columnNameToRowTimestamp; + private final int numColumnsWithRowTimestamp; - private final List> columns; - private final Map> columnNameToSortOrder; - private final Map> columnNameToRowTimestamp; - private final int numColumnsWithRowTimestamp; - - public PrimaryKeyConstraint(String name, List columnDefs) { - super(name); - if (columnDefs == null) { - this.columns = Collections.>emptyList(); - this.columnNameToSortOrder = Collections.>emptyMap(); - this.columnNameToRowTimestamp = Collections.>emptyMap(); - numColumnsWithRowTimestamp = 0; - } else { - int numRowTimestampCols = 0; - List> l = new ArrayList<>(columnDefs.size()); - this.columnNameToSortOrder = Maps.newHashMapWithExpectedSize(columnDefs.size()); - this.columnNameToRowTimestamp = Maps.newHashMapWithExpectedSize(columnDefs.size()); - for (ColumnDefInPkConstraint colDef : columnDefs) { - Pair p = Pair.newPair(colDef.getColumnName(), colDef.getSortOrder()); - l.add(p); - this.columnNameToSortOrder.put(colDef.getColumnName(), p); - this.columnNameToRowTimestamp.put(colDef.getColumnName(), Pair.newPair(colDef.getColumnName(), colDef.isRowTimestamp())); - if (colDef.isRowTimestamp()) { - numRowTimestampCols++; - } - } - this.numColumnsWithRowTimestamp = numRowTimestampCols; - this.columns = ImmutableList.copyOf(l); + public PrimaryKeyConstraint(String name, List columnDefs) { + super(name); + if (columnDefs == null) { + this.columns = Collections.> emptyList(); + this.columnNameToSortOrder = Collections.> emptyMap(); + this.columnNameToRowTimestamp = + Collections.> emptyMap(); + numColumnsWithRowTimestamp = 0; + } else { + int numRowTimestampCols = 0; + List> l = new ArrayList<>(columnDefs.size()); + this.columnNameToSortOrder = Maps.newHashMapWithExpectedSize(columnDefs.size()); + this.columnNameToRowTimestamp = Maps.newHashMapWithExpectedSize(columnDefs.size()); + for (ColumnDefInPkConstraint colDef : columnDefs) { + Pair p = Pair.newPair(colDef.getColumnName(), colDef.getSortOrder()); + l.add(p); + this.columnNameToSortOrder.put(colDef.getColumnName(), p); + this.columnNameToRowTimestamp.put(colDef.getColumnName(), + Pair.newPair(colDef.getColumnName(), colDef.isRowTimestamp())); + if (colDef.isRowTimestamp()) { + numRowTimestampCols++; } + } + this.numColumnsWithRowTimestamp = numRowTimestampCols; + this.columns = ImmutableList.copyOf(l); } + } - public List> getColumnNames() { - return columns; - } - - public Pair getColumnWithSortOrder(ColumnName columnName) { - return columnNameToSortOrder.get(columnName); - } - - public boolean isColumnRowTimestamp(ColumnName columnName) { - return columnNameToRowTimestamp.get(columnName) != null && columnNameToRowTimestamp.get(columnName).getSecond() == Boolean.TRUE; - } - - public boolean contains(ColumnName columnName) { - return columnNameToSortOrder.containsKey(columnName); - } - - public int getNumColumnsWithRowTimestamp() { - return numColumnsWithRowTimestamp; - } - - @Override - public int hashCode() { - return super.hashCode(); - } + public List> getColumnNames() { + return columns; + } - @Override - public boolean equals(Object obj) { - return super.equals(obj); - } + public Pair getColumnWithSortOrder(ColumnName columnName) { + return columnNameToSortOrder.get(columnName); + } - @Override - public String toString() { - StringBuffer sb = new StringBuffer(); - for (Pair entry : columns) { - if(sb.length()!=0) { - sb.append(", "); - } - sb.append(entry.getFirst()); - if(entry.getSecond() != SortOrder.getDefault()) { - sb.append(" "+entry.getSecond()); - } - } - return sb.toString(); + public boolean isColumnRowTimestamp(ColumnName columnName) { + return columnNameToRowTimestamp.get(columnName) != null + && columnNameToRowTimestamp.get(columnName).getSecond() == Boolean.TRUE; + } + + public boolean contains(ColumnName columnName) { + return columnNameToSortOrder.containsKey(columnName); + } + + public int getNumColumnsWithRowTimestamp() { + return numColumnsWithRowTimestamp; + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj); + } + + @Override + public String toString() { + StringBuffer sb = new StringBuffer(); + for (Pair entry : columns) { + if (sb.length() != 0) { + sb.append(", "); + } + sb.append(entry.getFirst()); + if (entry.getSecond() != SortOrder.getDefault()) { + sb.append(" " + entry.getSecond()); + } } + return sb.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PropertyName.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PropertyName.java index c6cc888f6b4..94b7e70fd23 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PropertyName.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/PropertyName.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,29 +15,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; import org.apache.phoenix.util.SchemaUtil; public class PropertyName { - private final NamedNode familyName; - private final String propertyName; - - PropertyName(String familyName, String propertyName) { - this.familyName = familyName == null ? null : new NamedNode(familyName); - this.propertyName = SchemaUtil.normalizeIdentifier(propertyName);; - } + private final NamedNode familyName; + private final String propertyName; + + PropertyName(String familyName, String propertyName) { + this.familyName = familyName == null ? null : new NamedNode(familyName); + this.propertyName = SchemaUtil.normalizeIdentifier(propertyName); + ; + } - PropertyName(String columnName) { - this(null, columnName); - } + PropertyName(String columnName) { + this(null, columnName); + } - public String getFamilyName() { - return familyName == null ? "" : familyName.getName(); - } + public String getFamilyName() { + return familyName == null ? "" : familyName.getName(); + } - public String getPropertyName() { - return propertyName; - } -} \ No newline at end of file + public String getPropertyName() { + return propertyName; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpReplaceParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpReplaceParseNode.java index 4d9840542a1..d58f3b60ba4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpReplaceParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpReplaceParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,21 +35,20 @@ */ public class RegexpReplaceParseNode extends FunctionParseNode { - RegexpReplaceParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + RegexpReplaceParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public Expression create(List children, StatementContext context) - throws SQLException { - QueryServices services = context.getConnection().getQueryServices(); - boolean useByteBasedRegex = - services.getProps().getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, - QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); - if (useByteBasedRegex) { - return new ByteBasedRegexpReplaceFunction(children); - } else { - return new StringBasedRegexpReplaceFunction(children); - } + @Override + public Expression create(List children, StatementContext context) + throws SQLException { + QueryServices services = context.getConnection().getQueryServices(); + boolean useByteBasedRegex = services.getProps().getBoolean( + QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); + if (useByteBasedRegex) { + return new ByteBasedRegexpReplaceFunction(children); + } else { + return new StringBasedRegexpReplaceFunction(children); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpSplitParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpSplitParseNode.java index 74bee072f98..19894f5dd31 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpSplitParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpSplitParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,21 +35,20 @@ */ public class RegexpSplitParseNode extends FunctionParseNode { - RegexpSplitParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + RegexpSplitParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public Expression create(List children, StatementContext context) - throws SQLException { - QueryServices services = context.getConnection().getQueryServices(); - boolean useByteBasedRegex = - services.getProps().getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, - QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); - if (useByteBasedRegex) { - return new ByteBasedRegexpSplitFunction(children); - } else { - return new StringBasedRegexpSplitFunction(children); - } + @Override + public Expression create(List children, StatementContext context) + throws SQLException { + QueryServices services = context.getConnection().getQueryServices(); + boolean useByteBasedRegex = services.getProps().getBoolean( + QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); + if (useByteBasedRegex) { + return new ByteBasedRegexpSplitFunction(children); + } else { + return new StringBasedRegexpSplitFunction(children); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpSubstrParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpSubstrParseNode.java index a9755500575..5b3b4239448 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpSubstrParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RegexpSubstrParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,21 +35,20 @@ */ public class RegexpSubstrParseNode extends FunctionParseNode { - RegexpSubstrParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + RegexpSubstrParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public Expression create(List children, StatementContext context) - throws SQLException { - QueryServices services = context.getConnection().getQueryServices(); - boolean useByteBasedRegex = - services.getProps().getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, - QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); - if (useByteBasedRegex) { - return new ByteBasedRegexpSubstrFunction(children); - } else { - return new StringBasedRegexpSubstrFunction(children); - } + @Override + public Expression create(List children, StatementContext context) + throws SQLException { + QueryServices services = context.getConnection().getQueryServices(); + boolean useByteBasedRegex = services.getProps().getBoolean( + QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); + if (useByteBasedRegex) { + return new ByteBasedRegexpSubstrFunction(children); + } else { + return new StringBasedRegexpSubstrFunction(children); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RoundParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RoundParseNode.java index 9bf4e7014a4..c90f9d8ede1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RoundParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RoundParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,59 +26,55 @@ import org.apache.phoenix.expression.function.RoundDecimalExpression; import org.apache.phoenix.expression.function.RoundFunction; import org.apache.phoenix.expression.function.RoundTimestampExpression; +import org.apache.phoenix.schema.TypeMismatchException; +import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.schema.types.PDecimal; -import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PTimestamp; -import org.apache.phoenix.schema.TypeMismatchException; /** - * - * Parse node corresponding to {@link RoundFunction}. - * It also acts as a factory for creating the right kind of - * round expression according to the data type of the - * first child. - * - * + * Parse node corresponding to {@link RoundFunction}. It also acts as a factory for creating the + * right kind of round expression according to the data type of the first child. * @since 3.0.0 */ public class RoundParseNode extends FunctionParseNode { - RoundParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + RoundParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public Expression create(List children, StatementContext context) throws SQLException { - return getRoundExpression(children); - } + @Override + public Expression create(List children, StatementContext context) + throws SQLException { + return getRoundExpression(children); + } - public static Expression getRoundExpression(List children) throws SQLException { - final Expression firstChild = children.get(0); - final PDataType firstChildDataType = firstChild.getDataType(); - - if(firstChildDataType.isCoercibleTo(PDate.INSTANCE)) { - return RoundDateExpression.create(children); - } else if (firstChildDataType.isCoercibleTo(PTimestamp.INSTANCE)) { - return RoundTimestampExpression.create(children); - } else if(firstChildDataType.isCoercibleTo(PDecimal.INSTANCE)) { - return RoundDecimalExpression.create(children); - } else { - throw TypeMismatchException.newException(firstChildDataType, "1"); - } - } - - /** - * When rounding off decimals, user need not specify the scale. In such cases, - * we need to prevent the function from getting evaluated as null. This is really - * a hack. A better way would have been if {@link org.apache.phoenix.parse.FunctionParseNode.BuiltInFunctionInfo} provided a - * way of associating default values for each permissible data type. - * Something like: @ Argument(allowedTypes={PDataType.VARCHAR, PDataType.INTEGER}, defaultValues = {"null", "1"} isConstant=true) - * Till then, this will have to do. - */ - @Override - public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { - return index == 0; + public static Expression getRoundExpression(List children) throws SQLException { + final Expression firstChild = children.get(0); + final PDataType firstChildDataType = firstChild.getDataType(); + + if (firstChildDataType.isCoercibleTo(PDate.INSTANCE)) { + return RoundDateExpression.create(children); + } else if (firstChildDataType.isCoercibleTo(PTimestamp.INSTANCE)) { + return RoundTimestampExpression.create(children); + } else if (firstChildDataType.isCoercibleTo(PDecimal.INSTANCE)) { + return RoundDecimalExpression.create(children); + } else { + throw TypeMismatchException.newException(firstChildDataType, "1"); } + } + + /** + * When rounding off decimals, user need not specify the scale. In such cases, we need to prevent + * the function from getting evaluated as null. This is really a hack. A better way would have + * been if {@link org.apache.phoenix.parse.FunctionParseNode.BuiltInFunctionInfo} provided a way + * of associating default values for each permissible data type. Something like: @ + * Argument(allowedTypes={PDataType.VARCHAR, PDataType.INTEGER}, defaultValues = {"null", "1"} + * isConstant=true) Till then, this will have to do. + */ + @Override + public boolean evalToNullIfParamIsNull(StatementContext context, int index) throws SQLException { + return index == 0; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RowValueConstructorParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RowValueConstructorParseNode.java index 3d6d7f1bfb3..2659fd0fefe 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RowValueConstructorParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/RowValueConstructorParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,39 +24,36 @@ import org.apache.phoenix.compile.ColumnResolver; /** - * - * Node representing a row value constructor in SQL. - * - * + * Node representing a row value constructor in SQL. * @since 0.1 */ public class RowValueConstructorParseNode extends CompoundParseNode { - - public RowValueConstructorParseNode(List l) { - super(l); - } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + public RowValueConstructorParseNode(List l) { + super(l); + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - List children = getChildren(); - buf.append(' '); - buf.append('('); - if (!children.isEmpty()) { - for (ParseNode child : children) { - child.toSQL(resolver, buf); - buf.append(','); - } - buf.setLength(buf.length()-1); - } - buf.append(')'); + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + List children = getChildren(); + buf.append(' '); + buf.append('('); + if (!children.isEmpty()) { + for (ParseNode child : children) { + child.toSQL(resolver, buf); + buf.append(','); + } + buf.setLength(buf.length() - 1); } + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SQLParser.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SQLParser.java index b6b7de2937f..3b8d2815d5f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SQLParser.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SQLParser.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,242 +30,227 @@ import org.apache.phoenix.exception.PhoenixParserException; /** - * * SQL Parser for Phoenix - * - * * @since 0.1 */ public class SQLParser { - private static final ParseNodeFactory DEFAULT_NODE_FACTORY = new ParseNodeFactory(); + private static final ParseNodeFactory DEFAULT_NODE_FACTORY = new ParseNodeFactory(); - private final PhoenixSQLParser parser; + private final PhoenixSQLParser parser; - public static ParseNode parseCondition(String expression) throws SQLException { - if (expression == null) return null; - SQLParser parser = new SQLParser(expression); - return parser.parseExpression(); - } - - public SQLParser(String query) { - this(query,DEFAULT_NODE_FACTORY); - } + public static ParseNode parseCondition(String expression) throws SQLException { + if (expression == null) return null; + SQLParser parser = new SQLParser(expression); + return parser.parseExpression(); + } - public SQLParser(String query, ParseNodeFactory factory) { - PhoenixSQLLexer lexer; - try { - lexer = new PhoenixSQLLexer(new CaseInsensitiveReaderStream(new StringReader(query))); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } - CommonTokenStream cts = new CommonTokenStream(lexer); - parser = new PhoenixSQLParser(cts); - parser.setParseNodeFactory(factory); - } + public SQLParser(String query) { + this(query, DEFAULT_NODE_FACTORY); + } - public SQLParser(Reader queryReader, ParseNodeFactory factory) throws IOException { - PhoenixSQLLexer lexer = new PhoenixSQLLexer(new CaseInsensitiveReaderStream(queryReader)); - CommonTokenStream cts = new CommonTokenStream(lexer); - parser = new PhoenixSQLParser(cts); - parser.setParseNodeFactory(factory); + public SQLParser(String query, ParseNodeFactory factory) { + PhoenixSQLLexer lexer; + try { + lexer = new PhoenixSQLLexer(new CaseInsensitiveReaderStream(new StringReader(query))); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible } + CommonTokenStream cts = new CommonTokenStream(lexer); + parser = new PhoenixSQLParser(cts); + parser.setParseNodeFactory(factory); + } - public SQLParser(Reader queryReader) throws IOException { - PhoenixSQLLexer lexer = new PhoenixSQLLexer(new CaseInsensitiveReaderStream(queryReader)); - CommonTokenStream cts = new CommonTokenStream(lexer); - parser = new PhoenixSQLParser(cts); - parser.setParseNodeFactory(DEFAULT_NODE_FACTORY); - } + public SQLParser(Reader queryReader, ParseNodeFactory factory) throws IOException { + PhoenixSQLLexer lexer = new PhoenixSQLLexer(new CaseInsensitiveReaderStream(queryReader)); + CommonTokenStream cts = new CommonTokenStream(lexer); + parser = new PhoenixSQLParser(cts); + parser.setParseNodeFactory(factory); + } + + public SQLParser(Reader queryReader) throws IOException { + PhoenixSQLLexer lexer = new PhoenixSQLLexer(new CaseInsensitiveReaderStream(queryReader)); + CommonTokenStream cts = new CommonTokenStream(lexer); + parser = new PhoenixSQLParser(cts); + parser.setParseNodeFactory(DEFAULT_NODE_FACTORY); + } - /** - * Parses the input as a series of semicolon-terminated SQL statements. - * @throws SQLException - */ - public BindableStatement nextStatement(ParseNodeFactory factory) throws SQLException { - try { - parser.resetBindCount(); - parser.setParseNodeFactory(factory); - BindableStatement statement = parser.nextStatement(); - return statement; - } catch (RecognitionException e) { - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } catch (UnsupportedOperationException e) { - throw new SQLFeatureNotSupportedException(e); - } catch (RuntimeException e) { - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } + /** + * Parses the input as a series of semicolon-terminated SQL statements. + */ + public BindableStatement nextStatement(ParseNodeFactory factory) throws SQLException { + try { + parser.resetBindCount(); + parser.setParseNodeFactory(factory); + BindableStatement statement = parser.nextStatement(); + return statement; + } catch (RecognitionException e) { + throw PhoenixParserException.newException(e, parser.getTokenNames()); + } catch (UnsupportedOperationException e) { + throw new SQLFeatureNotSupportedException(e); + } catch (RuntimeException e) { + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw PhoenixParserException.newException(e, parser.getTokenNames()); } + } - /** - * Parses the input as a SQL select or upsert statement. - * @throws SQLException - */ - public BindableStatement parseStatement() throws SQLException { - try { - BindableStatement statement = parser.statement(); - return statement; - } catch (RecognitionException e) { - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } catch (UnsupportedOperationException e) { - throw new SQLFeatureNotSupportedException(e); - } catch (RuntimeException e) { - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } + /** + * Parses the input as a SQL select or upsert statement. + */ + public BindableStatement parseStatement() throws SQLException { + try { + BindableStatement statement = parser.statement(); + return statement; + } catch (RecognitionException e) { + throw PhoenixParserException.newException(e, parser.getTokenNames()); + } catch (UnsupportedOperationException e) { + throw new SQLFeatureNotSupportedException(e); + } catch (RuntimeException e) { + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw PhoenixParserException.newException(e, parser.getTokenNames()); } + } - /** - * Parses the input as a SQL select statement. - * Used only in tests - * @throws SQLException - */ - public SelectStatement parseQuery() throws SQLException { - try { - SelectStatement statement = parser.query(); - return statement; - } catch (RecognitionException e) { - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } catch (RuntimeException e) { - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } + /** + * Parses the input as a SQL select statement. Used only in tests + */ + public SelectStatement parseQuery() throws SQLException { + try { + SelectStatement statement = parser.query(); + return statement; + } catch (RecognitionException e) { + throw PhoenixParserException.newException(e, parser.getTokenNames()); + } catch (RuntimeException e) { + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw PhoenixParserException.newException(e, parser.getTokenNames()); } + } - /** - * Parses the input as a SQL declare cursor statement. - * Used only in tests - * @throws SQLException - */ - public DeclareCursorStatement parseDeclareCursor() throws SQLException { - try { - DeclareCursorStatement statement = parser.declare_cursor_node(); - return statement; - } catch (RecognitionException e) { - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } catch (RuntimeException e) { - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } + /** + * Parses the input as a SQL declare cursor statement. Used only in tests + */ + public DeclareCursorStatement parseDeclareCursor() throws SQLException { + try { + DeclareCursorStatement statement = parser.declare_cursor_node(); + return statement; + } catch (RecognitionException e) { + throw PhoenixParserException.newException(e, parser.getTokenNames()); + } catch (RuntimeException e) { + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw PhoenixParserException.newException(e, parser.getTokenNames()); } + } - /** - * Parses the input as a SQL cursor open statement. - * Used only in tests - * @throws SQLException - */ - public OpenStatement parseOpen() throws SQLException { - try { - OpenStatement statement = parser.cursor_open_node(); - return statement; - } catch (RecognitionException e) { - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } catch (RuntimeException e) { - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } + /** + * Parses the input as a SQL cursor open statement. Used only in tests + */ + public OpenStatement parseOpen() throws SQLException { + try { + OpenStatement statement = parser.cursor_open_node(); + return statement; + } catch (RecognitionException e) { + throw PhoenixParserException.newException(e, parser.getTokenNames()); + } catch (RuntimeException e) { + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw PhoenixParserException.newException(e, parser.getTokenNames()); } + } - /** - * Parses the input as a SQL cursor close statement. - * Used only in tests - * @throws SQLException - */ - public CloseStatement parseClose() throws SQLException { - try { - CloseStatement statement = parser.cursor_close_node(); - return statement; - } catch (RecognitionException e) { - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } catch (RuntimeException e) { - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } + /** + * Parses the input as a SQL cursor close statement. Used only in tests + */ + public CloseStatement parseClose() throws SQLException { + try { + CloseStatement statement = parser.cursor_close_node(); + return statement; + } catch (RecognitionException e) { + throw PhoenixParserException.newException(e, parser.getTokenNames()); + } catch (RuntimeException e) { + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw PhoenixParserException.newException(e, parser.getTokenNames()); } + } - /** - * Parses the input as a SQL cursor fetch statement. - * Used only in tests - * @throws SQLException - */ - public FetchStatement parseFetch() throws SQLException { - try { - FetchStatement statement = parser.cursor_fetch_node(); - return statement; - } catch (RecognitionException e) { - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } catch (RuntimeException e) { - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } + /** + * Parses the input as a SQL cursor fetch statement. Used only in tests + */ + public FetchStatement parseFetch() throws SQLException { + try { + FetchStatement statement = parser.cursor_fetch_node(); + return statement; + } catch (RecognitionException e) { + throw PhoenixParserException.newException(e, parser.getTokenNames()); + } catch (RuntimeException e) { + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw PhoenixParserException.newException(e, parser.getTokenNames()); } + } - /** - * Parses the input as a SQL select statement. - * Used only in tests - * @throws SQLException - */ - public ParseNode parseExpression() throws SQLException { - try { - ParseNode node = parser.expression(); - return node; - } catch (RecognitionException e) { - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } catch (RuntimeException e) { - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } + /** + * Parses the input as a SQL select statement. Used only in tests + */ + public ParseNode parseExpression() throws SQLException { + try { + ParseNode node = parser.expression(); + return node; + } catch (RecognitionException e) { + throw PhoenixParserException.newException(e, parser.getTokenNames()); + } catch (RuntimeException e) { + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw PhoenixParserException.newException(e, parser.getTokenNames()); } + } - /** - * Parses the input as a SQL literal - * @throws SQLException - */ - public LiteralParseNode parseLiteral() throws SQLException { - try { - LiteralParseNode literalNode = parser.literal(); - return literalNode; - } catch (RecognitionException e) { - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } catch (RuntimeException e) { - if (e.getCause() instanceof SQLException) { - throw (SQLException) e.getCause(); - } - throw PhoenixParserException.newException(e, parser.getTokenNames()); - } + /** + * Parses the input as a SQL literal + */ + public LiteralParseNode parseLiteral() throws SQLException { + try { + LiteralParseNode literalNode = parser.literal(); + return literalNode; + } catch (RecognitionException e) { + throw PhoenixParserException.newException(e, parser.getTokenNames()); + } catch (RuntimeException e) { + if (e.getCause() instanceof SQLException) { + throw (SQLException) e.getCause(); + } + throw PhoenixParserException.newException(e, parser.getTokenNames()); } + } - private static class CaseInsensitiveReaderStream extends ANTLRReaderStream { - CaseInsensitiveReaderStream(Reader script) throws IOException { - super(script); - } + private static class CaseInsensitiveReaderStream extends ANTLRReaderStream { + CaseInsensitiveReaderStream(Reader script) throws IOException { + super(script); + } - @Override - public int LA(int i) { - if (i == 0) { return 0; // undefined - } - if (i < 0) { - i++; // e.g., translate LA(-1) to use offset 0 - } + @Override + public int LA(int i) { + if (i == 0) { + return 0; // undefined + } + if (i < 0) { + i++; // e.g., translate LA(-1) to use offset 0 + } - if ((p + i - 1) >= n) { return CharStream.EOF; } - return Character.toLowerCase(data[p + i - 1]); - } + if ((p + i - 1) >= n) { + return CharStream.EOF; + } + return Character.toLowerCase(data[p + i - 1]); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SelectStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SelectStatement.java index 3446eb73bcf..c03cb95b823 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SelectStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SelectStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,369 +31,360 @@ import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunctionInfo; /** - * * Top level node representing a SQL statement - * - * * @since 0.1 */ public class SelectStatement implements FilterableStatement { - public static final SelectStatement SELECT_STAR = - new SelectStatement( - null, null, false, - Arrays.asList(), - null, Collections.emptyList(), - null, Collections.emptyList(), - null, null, 0, false, false, Collections.emptyList(), new HashMap(1)); - public static final SelectStatement SELECT_ONE = - new SelectStatement( - null, null, false, - Collections.singletonList(new AliasedNode(null, LiteralParseNode.ONE)), - null, Collections.emptyList(), - null, Collections.emptyList(), - null, null, 0, false, false, Collections.emptyList(), new HashMap(1)); - public static final SelectStatement COUNT_ONE = - new SelectStatement( - null, null, false, - Collections.singletonList( - new AliasedNode(null, - new AggregateFunctionParseNode( - CountAggregateFunction.NORMALIZED_NAME, - LiteralParseNode.STAR, - new BuiltInFunctionInfo(CountAggregateFunction.class, CountAggregateFunction.class.getAnnotation(BuiltInFunction.class))))), - null, Collections.emptyList(), - null, Collections.emptyList(), - null,null, 0, true, false, Collections.emptyList(), new HashMap(1)); - public static SelectStatement create(SelectStatement select, HintNode hint) { - if (select.getHint() == hint || hint.isEmpty()) { - return select; - } - return new SelectStatement(select.getFrom(), hint, select.isDistinct(), - select.getSelect(), select.getWhere(), select.getGroupBy(), select.getHaving(), - select.getOrderBy(), select.getLimit(), select.getOffset(), select.getBindCount(), select.isAggregate(), select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); - } + public static final SelectStatement SELECT_STAR = new SelectStatement(null, null, false, + Arrays.asList(), null, Collections. emptyList(), null, + Collections. emptyList(), null, null, 0, false, false, + Collections. emptyList(), new HashMap(1)); + public static final SelectStatement SELECT_ONE = new SelectStatement(null, null, false, + Collections. singletonList(new AliasedNode(null, LiteralParseNode.ONE)), null, + Collections. emptyList(), null, Collections. emptyList(), null, null, 0, + false, false, Collections. emptyList(), new HashMap(1)); + public static final SelectStatement COUNT_ONE = new SelectStatement(null, null, false, + Collections. singletonList(new AliasedNode(null, + new AggregateFunctionParseNode(CountAggregateFunction.NORMALIZED_NAME, LiteralParseNode.STAR, + new BuiltInFunctionInfo(CountAggregateFunction.class, + CountAggregateFunction.class.getAnnotation(BuiltInFunction.class))))), + null, Collections. emptyList(), null, Collections. emptyList(), null, + null, 0, true, false, Collections. emptyList(), + new HashMap(1)); - public static SelectStatement create(SelectStatement select, TableNode tableNode, List selects) { - return new SelectStatement(tableNode, select.getHint(), select.isDistinct(), - selects, select.getWhere(), select.getGroupBy(), select.getHaving(), - select.getOrderBy(), select.getLimit(), select.getOffset(), select.getBindCount(), select.isAggregate(), select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); - } + public static SelectStatement create(SelectStatement select, HintNode hint) { + if (select.getHint() == hint || hint.isEmpty()) { + return select; + } + return new SelectStatement(select.getFrom(), hint, select.isDistinct(), select.getSelect(), + select.getWhere(), select.getGroupBy(), select.getHaving(), select.getOrderBy(), + select.getLimit(), select.getOffset(), select.getBindCount(), select.isAggregate(), + select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); + } - public SelectStatement combine(ParseNode where) { - if (where == null) { - return this; - } - if (this.getWhere() != null) { - where = new AndParseNode(Arrays.asList(this.getWhere(), where)); - } - return new SelectStatement(this.getFrom(), this.getHint(), this.isDistinct(), - this.getSelect(), where, this.getGroupBy(), this.getHaving(), - this.getOrderBy(), this.getLimit(), this.getOffset(), this.getBindCount(), this.isAggregate(), this.hasSequence(), this.selects, this.udfParseNodes); - } - - public static SelectStatement create(SelectStatement select, List selects) { - return new SelectStatement(select.getFrom(), select.getHint(), select.isDistinct(), - selects, select.getWhere(), select.getGroupBy(), select.getHaving(), - select.getOrderBy(), select.getLimit(), select.getOffset(), select.getBindCount(), select.isAggregate(), select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); - } - - // Copy constructor for sub select statements in a union - public static SelectStatement create(SelectStatement select, List orderBy, LimitNode limit, - OffsetNode offset, boolean isAggregate) { - return new SelectStatement(select.getFrom(), select.getHint(), select.isDistinct(), select.getSelect(), - select.getWhere(), select.getGroupBy(), select.getHaving(), orderBy, limit, offset, - select.getBindCount(), isAggregate, select.hasSequence(), select.getSelects(), - select.getUdfParseNodes()); - } + public static SelectStatement create(SelectStatement select, TableNode tableNode, + List selects) { + return new SelectStatement(tableNode, select.getHint(), select.isDistinct(), selects, + select.getWhere(), select.getGroupBy(), select.getHaving(), select.getOrderBy(), + select.getLimit(), select.getOffset(), select.getBindCount(), select.isAggregate(), + select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); + } - private final TableNode fromTable; - private final HintNode hint; - private final boolean isDistinct; - private final List select; - private final ParseNode where; - private final List groupBy; - private final ParseNode having; - private final List orderBy; - private final LimitNode limit; - private final int bindCount; - private final boolean isAggregate; - private final boolean hasSequence; - private final boolean hasWildcard; - private final List selects = new ArrayList(); - private final Map udfParseNodes; - private final OffsetNode offset; - - @Override - public final String toString() { - StringBuilder buf = new StringBuilder(); - toSQL(null,buf); - return buf.toString(); - } + public SelectStatement combine(ParseNode where) { + if (where == null) { + return this; + } + if (this.getWhere() != null) { + where = new AndParseNode(Arrays.asList(this.getWhere(), where)); + } + return new SelectStatement(this.getFrom(), this.getHint(), this.isDistinct(), this.getSelect(), + where, this.getGroupBy(), this.getHaving(), this.getOrderBy(), this.getLimit(), + this.getOffset(), this.getBindCount(), this.isAggregate(), this.hasSequence(), this.selects, + this.udfParseNodes); + } - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append("SELECT "); - if (hint != null) buf.append(hint); - if (isDistinct) buf.append("DISTINCT "); - for (AliasedNode selectNode : select) { - selectNode.toSQL(resolver, buf); - buf.append(','); - } - buf.setLength(buf.length()-1); - if (fromTable != null) { - buf.append(" FROM "); - fromTable.toSQL(resolver, buf); - } - if (where != null) { - buf.append(" WHERE "); - where.toSQL(resolver, buf); - } - if (!groupBy.isEmpty()) { - buf.append(" GROUP BY "); - for (ParseNode node : groupBy) { - node.toSQL(resolver, buf); - buf.append(','); - } - buf.setLength(buf.length()-1); - } - if (having != null) { - buf.append(" HAVING "); - having.toSQL(resolver, buf); - } - if (!orderBy.isEmpty()) { - buf.append(" ORDER BY "); - for (OrderByNode node : orderBy) { - node.toSQL(resolver, buf); - buf.append(','); - } - buf.setLength(buf.length()-1); - } - if (limit != null) { - buf.append(" LIMIT " + limit.toString()); - } - if (offset != null) { - buf.append(" OFFSET " + offset.toString()); - } - } - - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((fromTable == null) ? 0 : fromTable.hashCode()); - result = prime * result + ((groupBy == null) ? 0 : groupBy.hashCode()); - result = prime * result + ((having == null) ? 0 : having.hashCode()); - result = prime * result + ((hint == null) ? 0 : hint.hashCode()); - result = prime * result + (isDistinct ? 1231 : 1237); - result = prime * result + ((limit == null) ? 0 : limit.hashCode()); - result = prime * result + ((orderBy == null) ? 0 : orderBy.hashCode()); - result = prime * result + ((select == null) ? 0 : select.hashCode()); - result = prime * result + ((where == null) ? 0 : where.hashCode()); - return result; - } + public static SelectStatement create(SelectStatement select, List selects) { + return new SelectStatement(select.getFrom(), select.getHint(), select.isDistinct(), selects, + select.getWhere(), select.getGroupBy(), select.getHaving(), select.getOrderBy(), + select.getLimit(), select.getOffset(), select.getBindCount(), select.isAggregate(), + select.hasSequence(), select.getSelects(), select.getUdfParseNodes()); + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - SelectStatement other = (SelectStatement)obj; - if (fromTable == null) { - if (other.fromTable != null) return false; - } else if (!fromTable.equals(other.fromTable)) return false; - if (groupBy == null) { - if (other.groupBy != null) return false; - } else if (!groupBy.equals(other.groupBy)) return false; - if (having == null) { - if (other.having != null) return false; - } else if (!having.equals(other.having)) return false; - if (hint == null) { - if (other.hint != null) return false; - } else if (!hint.equals(other.hint)) return false; - if (isDistinct != other.isDistinct) return false; - if (limit == null) { - if (other.limit != null) return false; - } else if (!limit.equals(other.limit)) return false; - if (orderBy == null) { - if (other.orderBy != null) return false; - } else if (!orderBy.equals(other.orderBy)) return false; - if (select == null) { - if (other.select != null) return false; - } else if (!select.equals(other.select)) return false; - if (where == null) { - if (other.where != null) return false; - } else if (!where.equals(other.where)) return false; - return true; - } + // Copy constructor for sub select statements in a union + public static SelectStatement create(SelectStatement select, List orderBy, + LimitNode limit, OffsetNode offset, boolean isAggregate) { + return new SelectStatement(select.getFrom(), select.getHint(), select.isDistinct(), + select.getSelect(), select.getWhere(), select.getGroupBy(), select.getHaving(), orderBy, + limit, offset, select.getBindCount(), isAggregate, select.hasSequence(), select.getSelects(), + select.getUdfParseNodes()); + } - // Count constant expressions - private static int countConstants(List nodes) { - int count = 0; - for (int i = 0; i < nodes.size(); i++) { - if (nodes.get(i).isStateless()) { - count++; - } - } - return count; - } - - protected SelectStatement(TableNode from, HintNode hint, boolean isDistinct, List select, - ParseNode where, List groupBy, ParseNode having, List orderBy, LimitNode limit, - OffsetNode offset, int bindCount, boolean isAggregate, boolean hasSequence, List selects, - Map udfParseNodes) { - this.fromTable = from; - this.hint = hint == null ? HintNode.EMPTY_HINT_NODE : hint; - this.isDistinct = isDistinct; - this.select = Collections.unmodifiableList(select); - this.where = where; - this.groupBy = Collections.unmodifiableList(groupBy); - this.having = having; - this.orderBy = Collections.unmodifiableList(orderBy); - this.limit = limit; - this.offset = offset; - this.bindCount = bindCount; - this.isAggregate = isAggregate || groupBy.size() != countConstants(groupBy) || this.having != null; - this.hasSequence = hasSequence; - boolean hasWildcard = false; - for (AliasedNode aliasedNode : select) { - ParseNode node = aliasedNode.getNode(); - if (node instanceof WildcardParseNode || node instanceof TableWildcardParseNode || node instanceof FamilyWildcardParseNode) { - hasWildcard = true; - break; - } - } - this.hasWildcard = hasWildcard; - if (!selects.isEmpty()) { - this.selects.addAll(selects); - } - this.udfParseNodes = udfParseNodes; - } - - @Override - public boolean isDistinct() { - return isDistinct; - } - - @Override - public LimitNode getLimit() { - return limit; - } - - /** - * This method should not be called during the early stage - * of the plan preparation phase since fromTable might not - * be ConcreteTableNode at that time(e.g., JoinTableNode). - * - * By the time getTableSamplingRate method is called, - * each select statements should have exactly one ConcreteTableNode, - * with its corresponding sampling rate associate with it. - */ - @Override - public Double getTableSamplingRate(){ - if(fromTable==null || !(fromTable instanceof ConcreteTableNode)) return null; - return ((ConcreteTableNode)fromTable).getTableSamplingRate(); - } - - @Override - public int getBindCount() { - return bindCount; - } - - public TableNode getFrom() { - return fromTable; - } - - @Override - public HintNode getHint() { - return hint; - } - - public List getSelect() { - return select; - } - /** - * Gets the where condition, or null if none. - */ - @Override - public ParseNode getWhere() { - return where; - } - - /** - * Gets the group-by, containing at least 1 element, or empty list, if none. - */ - public List getGroupBy() { - return groupBy; - } - - public ParseNode getHaving() { - return having; - } - - /** - * Gets the order-by, containing at least 1 element, or null, if none. - */ - @Override - public List getOrderBy() { - return orderBy; - } + private final TableNode fromTable; + private final HintNode hint; + private final boolean isDistinct; + private final List select; + private final ParseNode where; + private final List groupBy; + private final ParseNode having; + private final List orderBy; + private final LimitNode limit; + private final int bindCount; + private final boolean isAggregate; + private final boolean hasSequence; + private final boolean hasWildcard; + private final List selects = new ArrayList(); + private final Map udfParseNodes; + private final OffsetNode offset; - @Override - public boolean isAggregate() { - return isAggregate; - } + @Override + public final String toString() { + StringBuilder buf = new StringBuilder(); + toSQL(null, buf); + return buf.toString(); + } - public boolean hasSequence() { - return hasSequence; - } + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append("SELECT "); + if (hint != null) buf.append(hint); + if (isDistinct) buf.append("DISTINCT "); + for (AliasedNode selectNode : select) { + selectNode.toSQL(resolver, buf); + buf.append(','); + } + buf.setLength(buf.length() - 1); + if (fromTable != null) { + buf.append(" FROM "); + fromTable.toSQL(resolver, buf); + } + if (where != null) { + buf.append(" WHERE "); + where.toSQL(resolver, buf); + } + if (!groupBy.isEmpty()) { + buf.append(" GROUP BY "); + for (ParseNode node : groupBy) { + node.toSQL(resolver, buf); + buf.append(','); + } + buf.setLength(buf.length() - 1); + } + if (having != null) { + buf.append(" HAVING "); + having.toSQL(resolver, buf); + } + if (!orderBy.isEmpty()) { + buf.append(" ORDER BY "); + for (OrderByNode node : orderBy) { + node.toSQL(resolver, buf); + buf.append(','); + } + buf.setLength(buf.length() - 1); + } + if (limit != null) { + buf.append(" LIMIT " + limit.toString()); + } + if (offset != null) { + buf.append(" OFFSET " + offset.toString()); + } + } - @Override - public Operation getOperation() { - return Operation.QUERY; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((fromTable == null) ? 0 : fromTable.hashCode()); + result = prime * result + ((groupBy == null) ? 0 : groupBy.hashCode()); + result = prime * result + ((having == null) ? 0 : having.hashCode()); + result = prime * result + ((hint == null) ? 0 : hint.hashCode()); + result = prime * result + (isDistinct ? 1231 : 1237); + result = prime * result + ((limit == null) ? 0 : limit.hashCode()); + result = prime * result + ((orderBy == null) ? 0 : orderBy.hashCode()); + result = prime * result + ((select == null) ? 0 : select.hashCode()); + result = prime * result + ((where == null) ? 0 : where.hashCode()); + return result; + } - public boolean isJoin() { - return fromTable != null && fromTable instanceof JoinTableNode; - } - - public SelectStatement getInnerSelectStatement() { - if (fromTable == null || !(fromTable instanceof DerivedTableNode)) - return null; - - return ((DerivedTableNode) fromTable).getSelect(); - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + SelectStatement other = (SelectStatement) obj; + if (fromTable == null) { + if (other.fromTable != null) return false; + } else if (!fromTable.equals(other.fromTable)) return false; + if (groupBy == null) { + if (other.groupBy != null) return false; + } else if (!groupBy.equals(other.groupBy)) return false; + if (having == null) { + if (other.having != null) return false; + } else if (!having.equals(other.having)) return false; + if (hint == null) { + if (other.hint != null) return false; + } else if (!hint.equals(other.hint)) return false; + if (isDistinct != other.isDistinct) return false; + if (limit == null) { + if (other.limit != null) return false; + } else if (!limit.equals(other.limit)) return false; + if (orderBy == null) { + if (other.orderBy != null) return false; + } else if (!orderBy.equals(other.orderBy)) return false; + if (select == null) { + if (other.select != null) return false; + } else if (!select.equals(other.select)) return false; + if (where == null) { + if (other.where != null) return false; + } else if (!where.equals(other.where)) return false; + return true; + } - public boolean isUnion() { - return !getSelects().isEmpty(); - } + // Count constant expressions + private static int countConstants(List nodes) { + int count = 0; + for (int i = 0; i < nodes.size(); i++) { + if (nodes.get(i).isStateless()) { + count++; + } + } + return count; + } - public List getSelects() { - return selects; - } - - public boolean hasWildcard() { - return hasWildcard; - } + protected SelectStatement(TableNode from, HintNode hint, boolean isDistinct, + List select, ParseNode where, List groupBy, ParseNode having, + List orderBy, LimitNode limit, OffsetNode offset, int bindCount, + boolean isAggregate, boolean hasSequence, List selects, + Map udfParseNodes) { + this.fromTable = from; + this.hint = hint == null ? HintNode.EMPTY_HINT_NODE : hint; + this.isDistinct = isDistinct; + this.select = Collections.unmodifiableList(select); + this.where = where; + this.groupBy = Collections.unmodifiableList(groupBy); + this.having = having; + this.orderBy = Collections.unmodifiableList(orderBy); + this.limit = limit; + this.offset = offset; + this.bindCount = bindCount; + this.isAggregate = + isAggregate || groupBy.size() != countConstants(groupBy) || this.having != null; + this.hasSequence = hasSequence; + boolean hasWildcard = false; + for (AliasedNode aliasedNode : select) { + ParseNode node = aliasedNode.getNode(); + if ( + node instanceof WildcardParseNode || node instanceof TableWildcardParseNode + || node instanceof FamilyWildcardParseNode + ) { + hasWildcard = true; + break; + } + } + this.hasWildcard = hasWildcard; + if (!selects.isEmpty()) { + this.selects.addAll(selects); + } + this.udfParseNodes = udfParseNodes; + } - public Map getUdfParseNodes() { - return udfParseNodes; - } + @Override + public boolean isDistinct() { + return isDistinct; + } - @Override - public OffsetNode getOffset() { - return offset; - } + @Override + public LimitNode getLimit() { + return limit; + } - public boolean haveGroupBy() { - return this.getGroupBy() != null && this.getGroupBy().size() > 0 - || !this.isAggregate() - && this.isDistinct() - && this.getSelect() != null - && this.getSelect().size() > 0; - } + /** + * This method should not be called during the early stage of the plan preparation phase since + * fromTable might not be ConcreteTableNode at that time(e.g., JoinTableNode). By the time + * getTableSamplingRate method is called, each select statements should have exactly one + * ConcreteTableNode, with its corresponding sampling rate associate with it. + */ + @Override + public Double getTableSamplingRate() { + if (fromTable == null || !(fromTable instanceof ConcreteTableNode)) return null; + return ((ConcreteTableNode) fromTable).getTableSamplingRate(); + } - public boolean haveOrderBy() { - return this.getOrderBy() != null && this.getOrderBy().size() > 0; - } + @Override + public int getBindCount() { + return bindCount; + } + + public TableNode getFrom() { + return fromTable; + } + + @Override + public HintNode getHint() { + return hint; + } + + public List getSelect() { + return select; + } + + /** + * Gets the where condition, or null if none. + */ + @Override + public ParseNode getWhere() { + return where; + } + + /** + * Gets the group-by, containing at least 1 element, or empty list, if none. + */ + public List getGroupBy() { + return groupBy; + } + + public ParseNode getHaving() { + return having; + } + + /** + * Gets the order-by, containing at least 1 element, or null, if none. + */ + @Override + public List getOrderBy() { + return orderBy; + } + + @Override + public boolean isAggregate() { + return isAggregate; + } + + public boolean hasSequence() { + return hasSequence; + } + + @Override + public Operation getOperation() { + return Operation.QUERY; + } + + public boolean isJoin() { + return fromTable != null && fromTable instanceof JoinTableNode; + } + + public SelectStatement getInnerSelectStatement() { + if (fromTable == null || !(fromTable instanceof DerivedTableNode)) return null; + + return ((DerivedTableNode) fromTable).getSelect(); + } + + public boolean isUnion() { + return !getSelects().isEmpty(); + } + + public List getSelects() { + return selects; + } + + public boolean hasWildcard() { + return hasWildcard; + } + + public Map getUdfParseNodes() { + return udfParseNodes; + } + + @Override + public OffsetNode getOffset() { + return offset; + } + + public boolean haveGroupBy() { + return this.getGroupBy() != null && this.getGroupBy().size() > 0 || !this.isAggregate() + && this.isDistinct() && this.getSelect() != null && this.getSelect().size() > 0; + } + + public boolean haveOrderBy() { + return this.getOrderBy() != null && this.getOrderBy().size() > 0; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SelectStatementRewriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SelectStatementRewriter.java index 3152abea337..f5d3759df56 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SelectStatementRewriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SelectStatementRewriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,177 +23,172 @@ import java.util.List; import java.util.Set; - - /** - * - * Class that creates a new select statement by filtering out nodes. - * Currently only supports filtering out boolean nodes (i.e. nodes - * that may be ANDed and ORed together. - * - * TODO: generize this - * + * Class that creates a new select statement by filtering out nodes. Currently only supports + * filtering out boolean nodes (i.e. nodes that may be ANDed and ORed together. TODO: generize this * @since 0.1 */ public class SelectStatementRewriter extends ParseNodeRewriter { - - /** - * Rewrite the select statement by filtering out expression nodes from the WHERE clause - * @param statement the select statement from which to filter. - * @param removeNodes expression nodes to filter out of WHERE clause. - * @return new select statement - * @throws SQLException - */ - public static SelectStatement removeFromWhereClause(SelectStatement statement, Set removeNodes) throws SQLException { - if (removeNodes.isEmpty()) { - return statement; - } - ParseNode where = statement.getWhere(); - SelectStatementRewriter rewriter = new SelectStatementRewriter(removeNodes); - where = where.accept(rewriter); - // Return new SELECT statement with updated WHERE clause - return NODE_FACTORY.select(statement, where); - } - - /** - * Rewrite the select statement by filtering out expression nodes from the HAVING clause - * and anding them with the WHERE clause. - * @param statement the select statement from which to move the nodes. - * @param moveNodes expression nodes to filter out of HAVING clause and add to WHERE clause. - * @return new select statement - * @throws SQLException - */ - public static SelectStatement moveFromHavingToWhereClause(SelectStatement statement, Set moveNodes) throws SQLException { - if (moveNodes.isEmpty()) { - return statement; - } - ParseNode andNode = NODE_FACTORY.and(new ArrayList(moveNodes)); - ParseNode having = statement.getHaving(); - SelectStatementRewriter rewriter = new SelectStatementRewriter(moveNodes); - having = having.accept(rewriter); - ParseNode where = statement.getWhere(); - if (where == null) { - where = andNode; - } else { - where = NODE_FACTORY.and(Arrays.asList(where,andNode)); - } - // Return new SELECT statement with updated WHERE and HAVING clauses - return NODE_FACTORY.select(statement, where, having); - } - - private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - private final Set removeNodes; - - private SelectStatementRewriter(Set removeNodes) { - this.removeNodes = removeNodes; - } - - private static interface CompoundNodeFactory { - ParseNode createNode(List children); - } - - private boolean enterCompoundNode(ParseNode node) { - if (removeNodes.contains(node)) { - return false; - } - return true; - } - - private ParseNode leaveCompoundNode(CompoundParseNode node, List children, CompoundNodeFactory factory) { - int newSize = children.size(); - int oldSize = node.getChildren().size(); - if (newSize == oldSize) { - return node; - } else if (newSize > 1) { - return factory.createNode(children); - } else if (newSize == 1) { - // TODO: keep or collapse? Maybe be helpful as context of where a problem occurs if a node could not be consumed - return(children.get(0)); - } else { - return null; - } - } - - @Override - public boolean visitEnter(AndParseNode node) throws SQLException { - return enterCompoundNode(node); - } + /** + * Rewrite the select statement by filtering out expression nodes from the WHERE clause + * @param statement the select statement from which to filter. + * @param removeNodes expression nodes to filter out of WHERE clause. + * @return new select statement + */ + public static SelectStatement removeFromWhereClause(SelectStatement statement, + Set removeNodes) throws SQLException { + if (removeNodes.isEmpty()) { + return statement; + } + ParseNode where = statement.getWhere(); + SelectStatementRewriter rewriter = new SelectStatementRewriter(removeNodes); + where = where.accept(rewriter); + // Return new SELECT statement with updated WHERE clause + return NODE_FACTORY.select(statement, where); + } - @Override - public ParseNode visitLeave(AndParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.and(children); - } - }); - } + /** + * Rewrite the select statement by filtering out expression nodes from the HAVING clause and + * anding them with the WHERE clause. + * @param statement the select statement from which to move the nodes. + * @param moveNodes expression nodes to filter out of HAVING clause and add to WHERE clause. + * @return new select statement + */ + public static SelectStatement moveFromHavingToWhereClause(SelectStatement statement, + Set moveNodes) throws SQLException { + if (moveNodes.isEmpty()) { + return statement; + } + ParseNode andNode = NODE_FACTORY.and(new ArrayList(moveNodes)); + ParseNode having = statement.getHaving(); + SelectStatementRewriter rewriter = new SelectStatementRewriter(moveNodes); + having = having.accept(rewriter); + ParseNode where = statement.getWhere(); + if (where == null) { + where = andNode; + } else { + where = NODE_FACTORY.and(Arrays.asList(where, andNode)); + } + // Return new SELECT statement with updated WHERE and HAVING clauses + return NODE_FACTORY.select(statement, where, having); + } - @Override - public boolean visitEnter(OrParseNode node) throws SQLException { - return enterCompoundNode(node); - } + private static final ParseNodeFactory NODE_FACTORY = new ParseNodeFactory(); - @Override - public ParseNode visitLeave(OrParseNode node, List nodes) throws SQLException { - return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { - @Override - public ParseNode createNode(List children) { - return NODE_FACTORY.or(children); - } - }); - } - - @Override - public boolean visitEnter(ComparisonParseNode node) throws SQLException { - if (removeNodes.contains(node)) { - return false; - } - return true; - } + private final Set removeNodes; - @Override - public ParseNode visitLeave(ComparisonParseNode node, List c) throws SQLException { - return c.isEmpty() ? null : node; - } - - @Override - public boolean visitEnter(LikeParseNode node) throws SQLException { - if (removeNodes.contains(node)) { - return false; - } - return true; - } - - @Override - public ParseNode visitLeave(LikeParseNode node, List c) throws SQLException { - return c.isEmpty() ? null : node; + private SelectStatementRewriter(Set removeNodes) { + this.removeNodes = removeNodes; + } + + private static interface CompoundNodeFactory { + ParseNode createNode(List children); + } + + private boolean enterCompoundNode(ParseNode node) { + if (removeNodes.contains(node)) { + return false; } - - @Override - public boolean visitEnter(InListParseNode node) throws SQLException { - if (removeNodes.contains(node)) { - return false; - } - return true; + return true; + } + + private ParseNode leaveCompoundNode(CompoundParseNode node, List children, + CompoundNodeFactory factory) { + int newSize = children.size(); + int oldSize = node.getChildren().size(); + if (newSize == oldSize) { + return node; + } else if (newSize > 1) { + return factory.createNode(children); + } else if (newSize == 1) { + // TODO: keep or collapse? Maybe be helpful as context of where a problem occurs if a node + // could not be consumed + return (children.get(0)); + } else { + return null; + } + } + + @Override + public boolean visitEnter(AndParseNode node) throws SQLException { + return enterCompoundNode(node); + } + + @Override + public ParseNode visitLeave(AndParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.and(children); + } + }); + } + + @Override + public boolean visitEnter(OrParseNode node) throws SQLException { + return enterCompoundNode(node); + } + + @Override + public ParseNode visitLeave(OrParseNode node, List nodes) throws SQLException { + return leaveCompoundNode(node, nodes, new CompoundNodeFactory() { + @Override + public ParseNode createNode(List children) { + return NODE_FACTORY.or(children); + } + }); + } + + @Override + public boolean visitEnter(ComparisonParseNode node) throws SQLException { + if (removeNodes.contains(node)) { + return false; } - - @Override - public ParseNode visitLeave(InListParseNode node, List c) throws SQLException { - return c.isEmpty() ? null : node; + return true; + } + + @Override + public ParseNode visitLeave(ComparisonParseNode node, List c) throws SQLException { + return c.isEmpty() ? null : node; + } + + @Override + public boolean visitEnter(LikeParseNode node) throws SQLException { + if (removeNodes.contains(node)) { + return false; } - - @Override - public boolean visitEnter(InParseNode node) throws SQLException { - if (removeNodes.contains(node)) { - return false; - } - return true; + return true; + } + + @Override + public ParseNode visitLeave(LikeParseNode node, List c) throws SQLException { + return c.isEmpty() ? null : node; + } + + @Override + public boolean visitEnter(InListParseNode node) throws SQLException { + if (removeNodes.contains(node)) { + return false; } - - @Override - public ParseNode visitLeave(InParseNode node, List c) throws SQLException { - return c.isEmpty() ? null : node; + return true; + } + + @Override + public ParseNode visitLeave(InListParseNode node, List c) throws SQLException { + return c.isEmpty() ? null : node; + } + + @Override + public boolean visitEnter(InParseNode node) throws SQLException { + if (removeNodes.contains(node)) { + return false; } + return true; + } + + @Override + public ParseNode visitLeave(InParseNode node, List c) throws SQLException { + return c.isEmpty() ? null : node; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java index 1fc670cda2b..867620fc79b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SequenceValueParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,94 +15,88 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; import java.sql.SQLException; import org.apache.phoenix.compile.ColumnResolver; - public class SequenceValueParseNode extends TerminalParseNode { - public enum Op { - NEXT_VALUE("NEXT"), - CURRENT_VALUE("CURRENT"); - - private final String name; - Op(String name) { - this.name = name; - } - public String getName() { - return name; - }; - - } - private final TableName tableName; - private final Op op; - private final ParseNode numToAllocate; - - public SequenceValueParseNode(TableName tableName, Op op, ParseNode numToAllocate) { - this.tableName = tableName; - this.op = op; - this.numToAllocate = numToAllocate; - } - - public ParseNode getNumToAllocateNode() { - return numToAllocate; - } - - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } - - public TableName getTableName() { - return tableName; - } - - @Override - public boolean isStateless() { - return true; - } + public enum Op { + NEXT_VALUE("NEXT"), + CURRENT_VALUE("CURRENT"); - public Op getOp() { - return op; - } + private final String name; - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((op == null) ? 0 : op.hashCode()); - result = prime * result - + ((tableName == null) ? 0 : tableName.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - SequenceValueParseNode other = (SequenceValueParseNode) obj; - if (op != other.op) - return false; - if (tableName == null) { - if (other.tableName != null) - return false; - } else if (!tableName.equals(other.tableName)) - return false; - return true; - } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(' '); - buf.append(op.getName()); - buf.append(" VALUE FOR "); - buf.append(tableName); + Op(String name) { + this.name = name; } -} \ No newline at end of file + + public String getName() { + return name; + }; + + } + + private final TableName tableName; + private final Op op; + private final ParseNode numToAllocate; + + public SequenceValueParseNode(TableName tableName, Op op, ParseNode numToAllocate) { + this.tableName = tableName; + this.op = op; + this.numToAllocate = numToAllocate; + } + + public ParseNode getNumToAllocateNode() { + return numToAllocate; + } + + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } + + public TableName getTableName() { + return tableName; + } + + @Override + public boolean isStateless() { + return true; + } + + public Op getOp() { + return op; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((op == null) ? 0 : op.hashCode()); + result = prime * result + ((tableName == null) ? 0 : tableName.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + SequenceValueParseNode other = (SequenceValueParseNode) obj; + if (op != other.op) return false; + if (tableName == null) { + if (other.tableName != null) return false; + } else if (!tableName.equals(other.tableName)) return false; + return true; + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(' '); + buf.append(op.getName()); + buf.append(" VALUE FOR "); + buf.append(tableName); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowCreateTable.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowCreateTable.java index 4fe77a7b4cd..b6910dd63d2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowCreateTable.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowCreateTable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; import org.apache.phoenix.jdbc.PhoenixStatement; @@ -24,15 +23,16 @@ * Parent class for SHOW CREATE TABLE statements. */ public class ShowCreateTable implements BindableStatement { - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - @Override - public PhoenixStatement.Operation getOperation() { - return PhoenixStatement.Operation.QUERY; - } + @Override + public PhoenixStatement.Operation getOperation() { + return PhoenixStatement.Operation.QUERY; + } - public ShowCreateTable() {} + public ShowCreateTable() { + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowCreateTableStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowCreateTableStatement.java index 5f1c6d4c141..ac3e683b6ee 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowCreateTableStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowCreateTableStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,54 +15,52 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; +import java.util.Objects; + import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.SchemaUtil; -import java.util.Objects; - /** * ParseNode implementation for SHOW CREATE TABLE statements. */ public class ShowCreateTableStatement extends ShowCreateTable { - private TableName tableName; + private TableName tableName; - public ShowCreateTableStatement(TableName tn) { - tableName = tn; - } + public ShowCreateTableStatement(TableName tn) { + tableName = tn; + } - public TableName getTableName() { - return tableName; - } + public TableName getTableName() { + return tableName; + } - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - Preconditions.checkNotNull(buf); - buf.append("SHOW CREATE TABLE "); + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + Preconditions.checkNotNull(buf); + buf.append("SHOW CREATE TABLE "); - buf.append(SchemaUtil - .getEscapedTableName(tableName.getSchemaName(), tableName.getTableName())); - } + buf.append(SchemaUtil.getEscapedTableName(tableName.getSchemaName(), tableName.getTableName())); + } - @Override - public String toString() { - StringBuilder buf = new StringBuilder(); - toSQL(null, buf); - return buf.toString(); - } + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + toSQL(null, buf); + return buf.toString(); + } - @Override - public boolean equals(Object other) { - if (!(other instanceof ShowCreateTableStatement)) return false; - ShowCreateTableStatement stmt = (ShowCreateTableStatement) other; - return Objects.equals(tableName, stmt.getTableName()); - } + @Override + public boolean equals(Object other) { + if (!(other instanceof ShowCreateTableStatement)) return false; + ShowCreateTableStatement stmt = (ShowCreateTableStatement) other; + return Objects.equals(tableName, stmt.getTableName()); + } - @Override - public int hashCode() { - return Objects.hash(tableName); - } + @Override + public int hashCode() { + return Objects.hash(tableName); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowSchemasStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowSchemasStatement.java index 8e95e0e0393..1cc11081924 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowSchemasStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowSchemasStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,56 +15,56 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.compile.ColumnResolver; +import java.util.Objects; import javax.annotation.Nullable; -import java.util.Objects; + +import org.apache.phoenix.compile.ColumnResolver; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** * ParseNode implementation for SHOW SCHEMAS sql. */ public class ShowSchemasStatement extends ShowStatement { - @Nullable - private final String schemaPattern; + @Nullable + private final String schemaPattern; - public ShowSchemasStatement(String pattern) { - schemaPattern = pattern; - }; + public ShowSchemasStatement(String pattern) { + schemaPattern = pattern; + }; - @Nullable - protected String getSchemaPattern() { - return schemaPattern; - } + @Nullable + protected String getSchemaPattern() { + return schemaPattern; + } - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - Preconditions.checkNotNull(buf); - buf.append("SHOW SCHEMAS"); - if (schemaPattern != null) { - buf.append(" LIKE "); - buf.append("'").append(schemaPattern).append("'"); - } + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + Preconditions.checkNotNull(buf); + buf.append("SHOW SCHEMAS"); + if (schemaPattern != null) { + buf.append(" LIKE "); + buf.append("'").append(schemaPattern).append("'"); } + } - @Override - public String toString() { - StringBuilder buf = new StringBuilder(); - toSQL(null, buf); - return buf.toString(); - } + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + toSQL(null, buf); + return buf.toString(); + } - @Override - public boolean equals(Object other) { - if (!(other instanceof ShowSchemasStatement)) return false; - ShowSchemasStatement stmt = (ShowSchemasStatement) other; - return Objects.equals(schemaPattern, stmt.getSchemaPattern()); - } + @Override + public boolean equals(Object other) { + if (!(other instanceof ShowSchemasStatement)) return false; + ShowSchemasStatement stmt = (ShowSchemasStatement) other; + return Objects.equals(schemaPattern, stmt.getSchemaPattern()); + } - @Override - public int hashCode() { - return Objects.hashCode(schemaPattern); - } + @Override + public int hashCode() { + return Objects.hashCode(schemaPattern); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowStatement.java index d4ab7a487e4..1a6e589ab9e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; import org.apache.phoenix.jdbc.PhoenixStatement; @@ -24,15 +23,16 @@ * Parent class for all SHOW statements. SHOW SCHEMAS, SHOW TABLES etc. */ public class ShowStatement implements BindableStatement { - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - @Override - public PhoenixStatement.Operation getOperation() { - return PhoenixStatement.Operation.QUERY; - } + @Override + public PhoenixStatement.Operation getOperation() { + return PhoenixStatement.Operation.QUERY; + } - public ShowStatement () {} + public ShowStatement() { + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowTablesStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowTablesStatement.java index 0371a452dd3..73816a64696 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowTablesStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ShowTablesStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,78 +15,78 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.compile.ColumnResolver; +import java.util.Objects; import javax.annotation.Nullable; -import java.util.Objects; + +import org.apache.phoenix.compile.ColumnResolver; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** * ParseNode implementation for SHOW TABLES {@code [IN ] }. */ public class ShowTablesStatement extends ShowStatement { - // Schema for table listing. null implies the the db for this connection is used. - @Nullable - private String targetSchema; + // Schema for table listing. null implies the the db for this connection is used. + @Nullable + private String targetSchema; - // Pattern to be matched while looking up for tables in 'targetSchema'. - // null implies everything is returned. - @Nullable - private String dbPattern; + // Pattern to be matched while looking up for tables in 'targetSchema'. + // null implies everything is returned. + @Nullable + private String dbPattern; - public ShowTablesStatement() { - this(null, null); - } + public ShowTablesStatement() { + this(null, null); + } - public ShowTablesStatement(@Nullable String schema, @Nullable String pattern) { - targetSchema = schema; - dbPattern = pattern; - } + public ShowTablesStatement(@Nullable String schema, @Nullable String pattern) { + targetSchema = schema; + dbPattern = pattern; + } - @Nullable - public String getTargetSchema() { - return targetSchema; - } + @Nullable + public String getTargetSchema() { + return targetSchema; + } - @Nullable - public String getDbPattern() { - return dbPattern; - } + @Nullable + public String getDbPattern() { + return dbPattern; + } - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - Preconditions.checkNotNull(buf); - buf.append("SHOW TABLES"); - if (targetSchema != null) { - buf.append(" IN "); - buf.append(targetSchema); - buf.append(" "); - } - if (dbPattern != null) { - buf.append(" LIKE "); - buf.append("'").append(dbPattern).append("'"); - } + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + Preconditions.checkNotNull(buf); + buf.append("SHOW TABLES"); + if (targetSchema != null) { + buf.append(" IN "); + buf.append(targetSchema); + buf.append(" "); } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder(); - toSQL(null, buf); - return buf.toString(); + if (dbPattern != null) { + buf.append(" LIKE "); + buf.append("'").append(dbPattern).append("'"); } + } - @Override - public boolean equals(Object other) { - if (!(other instanceof ShowTablesStatement)) return false; - ShowTablesStatement stmt = (ShowTablesStatement) other; - return Objects.equals(targetSchema, stmt.getTargetSchema()) && Objects.equals(dbPattern, - stmt.getDbPattern()); - } + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + toSQL(null, buf); + return buf.toString(); + } - @Override - public int hashCode() { - return Objects.hash(targetSchema, dbPattern); - } + @Override + public boolean equals(Object other) { + if (!(other instanceof ShowTablesStatement)) return false; + ShowTablesStatement stmt = (ShowTablesStatement) other; + return Objects.equals(targetSchema, stmt.getTargetSchema()) + && Objects.equals(dbPattern, stmt.getDbPattern()); + } + + @Override + public int hashCode() { + return Objects.hash(targetSchema, dbPattern); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SingleTableStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SingleTableStatement.java index 603519eacab..b7c42fa226b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SingleTableStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SingleTableStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,20 +18,20 @@ package org.apache.phoenix.parse; public abstract class SingleTableStatement extends MutableStatement { - private final NamedTableNode table; - private final int bindCount; + private final NamedTableNode table; + private final int bindCount; - public SingleTableStatement(NamedTableNode table, int bindCount) { - this.table = table; - this.bindCount = bindCount; - } - - public NamedTableNode getTable() { - return table; - } + public SingleTableStatement(NamedTableNode table, int bindCount) { + this.table = table; + this.bindCount = bindCount; + } - @Override - public int getBindCount() { - return bindCount; - } + public NamedTableNode getTable() { + return table; + } + + @Override + public int getBindCount() { + return bindCount; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/StatelessTraverseAllParseNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/StatelessTraverseAllParseNodeVisitor.java index e95b480dfda..df2deaf88d6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/StatelessTraverseAllParseNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/StatelessTraverseAllParseNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,138 +15,133 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; import java.sql.SQLException; import java.util.List; public class StatelessTraverseAllParseNodeVisitor extends TraverseAllParseNodeVisitor { - @Override - protected void enterParseNode(ParseNode node) { - } - - @Override - public Void visitLeave(LikeParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(AndParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(OrParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(FunctionParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(ComparisonParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(CaseParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(AddParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(MultiplyParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(DivideParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(ModulusParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(SubtractParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(NotParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(ExistsParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(CastParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(InListParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(InParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(IsNullParseNode node, List l) - throws SQLException { - return null; - } - - @Override - public Void visitLeave(StringConcatParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(BetweenParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(RowValueConstructorParseNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(ArrayConstructorNode node, List l) throws SQLException { - return null; - } - - @Override - public Void visitLeave(ArrayAllComparisonNode node, List l) - throws SQLException { - return null; - } - - @Override - public Void visitLeave(ArrayAnyComparisonNode node, List l) - throws SQLException { - return null; - } - - @Override - public Void visitLeave(ArrayElemRefNode node, List l) - throws SQLException { - return null; - } + @Override + protected void enterParseNode(ParseNode node) { + } + + @Override + public Void visitLeave(LikeParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(AndParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(OrParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(FunctionParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(ComparisonParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(CaseParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(AddParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(MultiplyParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(DivideParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(ModulusParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(SubtractParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(NotParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(ExistsParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(CastParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(InListParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(InParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(IsNullParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(StringConcatParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(BetweenParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(RowValueConstructorParseNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(ArrayConstructorNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { + return null; + } + + @Override + public Void visitLeave(ArrayElemRefNode node, List l) throws SQLException { + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/StringConcatParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/StringConcatParseNode.java index 5eba979ecea..fe6ec82fbd0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/StringConcatParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/StringConcatParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,41 +23,34 @@ import org.apache.phoenix.compile.ColumnResolver; - - - /** - * * Node representing || String concatenation in a SQL expression - * - * * @since 0.1 */ public class StringConcatParseNode extends CompoundParseNode { - StringConcatParseNode(List children) { - super(children); - } + StringConcatParseNode(List children) { + super(children); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } - - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append('('); - List children = getChildren(); - children.get(0).toSQL(resolver, buf); - for (int i = 1 ; i < children.size(); i++) { - buf.append(" || "); - children.get(i).toSQL(resolver, buf); - } - buf.append(')'); + return visitor.visitLeave(this, l); + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append('('); + List children = getChildren(); + children.get(0).toSQL(resolver, buf); + for (int i = 1; i < children.size(); i++) { + buf.append(" || "); + children.get(i).toSQL(resolver, buf); } + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java index d73958e1990..a7640528e25 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SubqueryParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,69 +21,58 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing a subquery in SQL - * - * * @since 0.1 */ public class SubqueryParseNode extends TerminalParseNode { - private final SelectStatement select; - private final boolean expectSingleRow; + private final SelectStatement select; + private final boolean expectSingleRow; + + SubqueryParseNode(SelectStatement select, boolean expectSingleRow) { + this.select = select; + this.expectSingleRow = expectSingleRow; + } + + public SelectStatement getSelectNode() { + return select; + } + + public boolean expectSingleRow() { + return expectSingleRow; + } - SubqueryParseNode(SelectStatement select, boolean expectSingleRow) { - this.select = select; - this.expectSingleRow = expectSingleRow; - } - - public SelectStatement getSelectNode() { - return select; - } - - public boolean expectSingleRow() { - return expectSingleRow; - } + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (expectSingleRow ? 1231 : 1237); + result = prime * result + ((select == null) ? 0 : select.hashCode()); + return result; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (expectSingleRow ? 1231 : 1237); - result = prime * result + ((select == null) ? 0 : select.hashCode()); - return result; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + SubqueryParseNode other = (SubqueryParseNode) obj; + if (expectSingleRow != other.expectSingleRow) return false; + if (select == null) { + if (other.select != null) return false; + } else if (!select.equals(other.select)) return false; + return true; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - SubqueryParseNode other = (SubqueryParseNode) obj; - if (expectSingleRow != other.expectSingleRow) - return false; - if (select == null) { - if (other.select != null) - return false; - } else if (!select.equals(other.select)) - return false; - return true; - } - - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append('('); - select.toSQL(resolver, buf); - buf.append(')'); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append('('); + select.toSQL(resolver, buf); + buf.append(')'); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SubtractParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SubtractParseNode.java index decc3ac17b6..8b319a25fe0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SubtractParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SubtractParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,33 +21,28 @@ import java.util.Collections; import java.util.List; - - /** - * * Node representing subtraction in a SQL expression - * - * * @since 0.1 */ public class SubtractParseNode extends ArithmeticParseNode { - public static final String OPERATOR = "-"; + public static final String OPERATOR = "-"; - @Override - public String getOperator() { - return OPERATOR; - } + @Override + public String getOperator() { + return OPERATOR; + } - SubtractParseNode(List children) { - super(children); - } + SubtractParseNode(List children) { + super(children); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - List l = Collections.emptyList(); - if (visitor.visitEnter(this)) { - l = acceptChildren(visitor); - } - return visitor.visitLeave(this, l); + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + List l = Collections.emptyList(); + if (visitor.visitEnter(this)) { + l = acceptChildren(visitor); } + return visitor.visitLeave(this, l); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SumAggregateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SumAggregateParseNode.java index 9f54b0897d5..88969259d12 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SumAggregateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/SumAggregateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,15 +25,15 @@ import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.expression.function.SumAggregateFunction; - public class SumAggregateParseNode extends DelegateConstantToCountParseNode { - public SumAggregateParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } - - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - return new SumAggregateFunction(children, getDelegateFunction(children,context)); - } + public SumAggregateParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } + + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + return new SumAggregateFunction(children, getDelegateFunction(children, context)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableName.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableName.java index 61bfa6b585c..60db2fb53a9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableName.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableName.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,79 +22,73 @@ public class TableName { - private final String tableName; - private final String schemaName; - private final boolean isTableNameCaseSensitive; - private final boolean isSchemaNameCaseSensitive; - - public static TableName createNormalized(String schemaName, String tableName) { - return new TableName(schemaName, tableName, true); - } - - public static TableName create(String schemaName, String tableName) { - return new TableName(schemaName, tableName, false); - } - - private TableName(String schemaName, String tableName, boolean normalize) { - this.schemaName = normalize ? SchemaUtil.normalizeIdentifier(schemaName) : schemaName; - this.isSchemaNameCaseSensitive = normalize ? SchemaUtil.isCaseSensitive(schemaName) : false; - this.tableName = normalize ? SchemaUtil.normalizeIdentifier(tableName) : tableName; - this.isTableNameCaseSensitive = normalize ? SchemaUtil.isCaseSensitive(tableName) : false; - } - - public boolean isTableNameCaseSensitive() { - return isTableNameCaseSensitive; - } + private final String tableName; + private final String schemaName; + private final boolean isTableNameCaseSensitive; + private final boolean isSchemaNameCaseSensitive; - public boolean isSchemaNameCaseSensitive() { - return isSchemaNameCaseSensitive; - } + public static TableName createNormalized(String schemaName, String tableName) { + return new TableName(schemaName, tableName, true); + } - public String getTableName() { - return tableName; - } + public static TableName create(String schemaName, String tableName) { + return new TableName(schemaName, tableName, false); + } - public String getSchemaName() { - return schemaName; - } - - @Override - public String toString() { - return (schemaName == null ? "" : ((isSchemaNameCaseSensitive ? "\"" : "") + schemaName - + (isSchemaNameCaseSensitive ? "\"" : "") + QueryConstants.NAME_SEPARATOR)) - + ((isTableNameCaseSensitive ? "\"" : "") + tableName + (isTableNameCaseSensitive ? "\"" : "")); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result - + ((schemaName == null) ? 0 : schemaName.hashCode()); - result = prime * result - + ((tableName == null) ? 0 : tableName.hashCode()); - return result; - } + private TableName(String schemaName, String tableName, boolean normalize) { + this.schemaName = normalize ? SchemaUtil.normalizeIdentifier(schemaName) : schemaName; + this.isSchemaNameCaseSensitive = normalize ? SchemaUtil.isCaseSensitive(schemaName) : false; + this.tableName = normalize ? SchemaUtil.normalizeIdentifier(tableName) : tableName; + this.isTableNameCaseSensitive = normalize ? SchemaUtil.isCaseSensitive(tableName) : false; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - TableName other = (TableName) obj; - if (schemaName == null) { - if (other.schemaName != null) - return false; - } else if (!schemaName.equals(other.schemaName)) - return false; - if (tableName == null) { - if (other.tableName != null) - return false; - } else if (!tableName.equals(other.tableName)) - return false; - return true; - } + public boolean isTableNameCaseSensitive() { + return isTableNameCaseSensitive; + } + + public boolean isSchemaNameCaseSensitive() { + return isSchemaNameCaseSensitive; + } + + public String getTableName() { + return tableName; + } + + public String getSchemaName() { + return schemaName; + } + + @Override + public String toString() { + return (schemaName == null + ? "" + : ((isSchemaNameCaseSensitive ? "\"" : "") + schemaName + + (isSchemaNameCaseSensitive ? "\"" : "") + QueryConstants.NAME_SEPARATOR)) + + ((isTableNameCaseSensitive ? "\"" : "") + tableName + + (isTableNameCaseSensitive ? "\"" : "")); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((schemaName == null) ? 0 : schemaName.hashCode()); + result = prime * result + ((tableName == null) ? 0 : tableName.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + TableName other = (TableName) obj; + if (schemaName == null) { + if (other.schemaName != null) return false; + } else if (!schemaName.equals(other.schemaName)) return false; + if (tableName == null) { + if (other.tableName != null) return false; + } else if (!tableName.equals(other.tableName)) return false; + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableNode.java index 7c372346a54..2b7da76b927 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,34 +21,29 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Abstract base class for FROM clause data sources - * - * * @since 0.1 */ public abstract class TableNode { - private final String alias; + private final String alias; - TableNode(String alias) { - this.alias = alias; - } + TableNode(String alias) { + this.alias = alias; + } - public String getAlias() { - return alias; - } + public String getAlias() { + return alias; + } - @Override - public final String toString() { - StringBuilder buf = new StringBuilder(); - toSQL(null,buf); - return buf.toString(); - } + @Override + public final String toString() { + StringBuilder buf = new StringBuilder(); + toSQL(null, buf); + return buf.toString(); + } - public abstract T accept(TableNodeVisitor visitor) throws SQLException; - public abstract void toSQL(ColumnResolver resolver, StringBuilder buf); -} + public abstract T accept(TableNodeVisitor visitor) throws SQLException; + public abstract void toSQL(ColumnResolver resolver, StringBuilder buf); +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableNodeVisitor.java index 8d5e4e79a66..ddc7e1d9984 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,17 +19,16 @@ import java.sql.SQLException; - /** - * * Visitor for nodes in the FROM clause - * - * * @since 0.1 */ public interface TableNodeVisitor { - E visit(BindTableNode boundTableNode) throws SQLException; - E visit(JoinTableNode joinNode) throws SQLException; - E visit(NamedTableNode namedTableNode) throws SQLException; - E visit(DerivedTableNode subselectNode) throws SQLException; + E visit(BindTableNode boundTableNode) throws SQLException; + + E visit(JoinTableNode joinNode) throws SQLException; + + E visit(NamedTableNode namedTableNode) throws SQLException; + + E visit(DerivedTableNode subselectNode) throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java index 3ff5972fb06..f8b10b78041 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TableWildcardParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,75 +22,67 @@ import org.apache.phoenix.compile.ColumnResolver; public class TableWildcardParseNode extends NamedParseNode { - private final TableName tableName; - private final boolean isRewrite; - - public static TableWildcardParseNode create(TableName tableName, boolean isRewrite) { - return new TableWildcardParseNode(tableName, isRewrite); - } + private final TableName tableName; + private final boolean isRewrite; - TableWildcardParseNode(TableName tableName, boolean isRewrite) { - super(tableName.toString()); - this.tableName = tableName; - this.isRewrite = isRewrite; - } - - public TableName getTableName() { - return tableName; - } - - public boolean isRewrite() { - return isRewrite; - } + public static TableWildcardParseNode create(TableName tableName, boolean isRewrite) { + return new TableWildcardParseNode(tableName, isRewrite); + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + TableWildcardParseNode(TableName tableName, boolean isRewrite) { + super(tableName.toString()); + this.tableName = tableName; + this.isRewrite = isRewrite; + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (isRewrite ? 1231 : 1237); - result = prime * result - + ((tableName == null) ? 0 : tableName.hashCode()); - return result; - } + public TableName getTableName() { + return tableName; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - TableWildcardParseNode other = (TableWildcardParseNode) obj; - if (isRewrite != other.isRewrite) - return false; - if (tableName == null) { - if (other.tableName != null) - return false; - } else if (!tableName.equals(other.tableName)) - return false; - return true; - } + public boolean isRewrite() { + return isRewrite; + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - toSQL(buf); - buf.append(".*"); - } + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } - @Override - public boolean isWildcardNode() { - return true; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (isRewrite ? 1231 : 1237); + result = prime * result + ((tableName == null) ? 0 : tableName.hashCode()); + return result; + } - @Override - public TableWildcardParseNode getRewritten() { - return new TableWildcardParseNode(tableName, true); - } -} + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + TableWildcardParseNode other = (TableWildcardParseNode) obj; + if (isRewrite != other.isRewrite) return false; + if (tableName == null) { + if (other.tableName != null) return false; + } else if (!tableName.equals(other.tableName)) return false; + return true; + } + + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + toSQL(buf); + buf.append(".*"); + } + @Override + public boolean isWildcardNode() { + return true; + } + + @Override + public TableWildcardParseNode getRewritten() { + return new TableWildcardParseNode(tableName, true); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TerminalParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TerminalParseNode.java index 78224e68d22..ef6a421e7dc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TerminalParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TerminalParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,23 +21,20 @@ import java.util.List; /** - * * Abstract node for expressions that have no children - * - * * @since 0.1 */ public abstract class TerminalParseNode extends ParseNode { - @Override - public final List getChildren() { - return Collections.emptyList(); - } + @Override + public final List getChildren() { + return Collections.emptyList(); + } - public boolean isWildcardNode() { - return false; - } + public boolean isWildcardNode() { + return false; + } - public TerminalParseNode getRewritten() { - return null; - } + public TerminalParseNode getRewritten() { + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToCharParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToCharParseNode.java index 0234df4bd5e..4a2259b425d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToCharParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToCharParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,40 +27,42 @@ import org.apache.phoenix.expression.function.FunctionArgumentType; import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.expression.function.ToCharFunction; -import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDecimal; import org.apache.phoenix.schema.types.PTimestamp; public class ToCharParseNode extends FunctionParseNode { - public ToCharParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public ToCharParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - PDataType dataType = children.get(0).getDataType(); - String formatString = (String)((LiteralExpression)children.get(1)).getValue(); // either date or number format string - Format formatter; - FunctionArgumentType type; - if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { - if (formatString == null) { - formatString = context.getDateFormat(); - formatter = context.getDateFormatter(); - } else { - formatter = FunctionArgumentType.TEMPORAL.getFormatter(formatString); - } - type = FunctionArgumentType.TEMPORAL; - } - else if (dataType.isCoercibleTo(PDecimal.INSTANCE)) { - if (formatString == null) - formatString = context.getNumberFormat(); - formatter = FunctionArgumentType.NUMERIC.getFormatter(formatString); - type = FunctionArgumentType.NUMERIC; - } - else { - throw new SQLException(dataType + " type is unsupported for TO_CHAR(). Numeric and temporal types are supported."); - } - return new ToCharFunction(children, type, formatString, formatter); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + PDataType dataType = children.get(0).getDataType(); + String formatString = (String) ((LiteralExpression) children.get(1)).getValue(); // either date + // or number + // format + // string + Format formatter; + FunctionArgumentType type; + if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { + if (formatString == null) { + formatString = context.getDateFormat(); + formatter = context.getDateFormatter(); + } else { + formatter = FunctionArgumentType.TEMPORAL.getFormatter(formatString); + } + type = FunctionArgumentType.TEMPORAL; + } else if (dataType.isCoercibleTo(PDecimal.INSTANCE)) { + if (formatString == null) formatString = context.getNumberFormat(); + formatter = FunctionArgumentType.NUMERIC.getFormatter(formatString); + type = FunctionArgumentType.NUMERIC; + } else { + throw new SQLException(dataType + + " type is unsupported for TO_CHAR(). Numeric and temporal types are supported."); } + return new ToCharFunction(children, type, formatString, formatter); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java index b84dfc33d24..302bb2f1961 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToDateParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,23 +26,23 @@ import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.expression.function.ToDateFunction; - public class ToDateParseNode extends FunctionParseNode { - public ToDateParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public ToDateParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue(); - String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue(); - if (dateFormat == null) { - dateFormat = context.getDateFormat(); - } - if (timeZoneId == null) { - timeZoneId = context.getDateFormatTimeZoneId(); - } - return new ToDateFunction(children, dateFormat, timeZoneId); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue(); + String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue(); + if (dateFormat == null) { + dateFormat = context.getDateFormat(); + } + if (timeZoneId == null) { + timeZoneId = context.getDateFormatTimeZoneId(); } + return new ToDateFunction(children, dateFormat, timeZoneId); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToNumberParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToNumberParseNode.java index 9a1b80f0340..fb89bf5ea65 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToNumberParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToNumberParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; import java.sql.SQLException; @@ -34,36 +33,38 @@ public class ToNumberParseNode extends FunctionParseNode { - ToNumberParseNode(String name, List children, - BuiltInFunctionInfo info) { - super(name, children, info); - } + ToNumberParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } + + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + PDataType dataType = children.get(0).getDataType(); + String formatString = (String) ((LiteralExpression) children.get(1)).getValue(); // either date + // or number + // format + // string + Format formatter = null; + FunctionArgumentType type; - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - PDataType dataType = children.get(0).getDataType(); - String formatString = (String)((LiteralExpression)children.get(1)).getValue(); // either date or number format string - Format formatter = null; - FunctionArgumentType type; - - if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { - if (formatString == null) { - formatString = context.getDateFormat(); - formatter = context.getDateFormatter(); - } else { - formatter = FunctionArgumentType.TEMPORAL.getFormatter(formatString); - } - type = FunctionArgumentType.TEMPORAL; - } - else if (dataType.isCoercibleTo(PChar.INSTANCE)) { - if (formatString != null) { - formatter = FunctionArgumentType.CHAR.getFormatter(formatString); - } - type = FunctionArgumentType.CHAR; - } - else { - throw new SQLException(dataType + " type is unsupported for TO_NUMBER(). Numeric and temporal types are supported."); - } - return new ToNumberFunction(children, type, formatString, formatter); + if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { + if (formatString == null) { + formatString = context.getDateFormat(); + formatter = context.getDateFormatter(); + } else { + formatter = FunctionArgumentType.TEMPORAL.getFormatter(formatString); + } + type = FunctionArgumentType.TEMPORAL; + } else if (dataType.isCoercibleTo(PChar.INSTANCE)) { + if (formatString != null) { + formatter = FunctionArgumentType.CHAR.getFormatter(formatString); + } + type = FunctionArgumentType.CHAR; + } else { + throw new SQLException(dataType + + " type is unsupported for TO_NUMBER(). Numeric and temporal types are supported."); } + return new ToNumberFunction(children, type, formatString, formatter); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToTimeParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToTimeParseNode.java index eb84008a603..76adbfdacad 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToTimeParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToTimeParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,23 +26,23 @@ import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.expression.function.ToTimeFunction; +public class ToTimeParseNode extends FunctionParseNode { -public class ToTimeParseNode extends FunctionParseNode { + public ToTimeParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - public ToTimeParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue(); + String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue(); + if (dateFormat == null) { + dateFormat = context.getTimeFormat(); } - - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue(); - String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue(); - if (dateFormat == null) { - dateFormat = context.getTimeFormat(); - } - if (timeZoneId == null) { - timeZoneId = context.getDateFormatTimeZoneId(); - } - return new ToTimeFunction(children, dateFormat, timeZoneId); + if (timeZoneId == null) { + timeZoneId = context.getDateFormatTimeZoneId(); } + return new ToTimeFunction(children, dateFormat, timeZoneId); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToTimestampParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToTimestampParseNode.java index 771c812d417..34eb9ccafde 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToTimestampParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/ToTimestampParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,23 +26,23 @@ import org.apache.phoenix.expression.function.FunctionExpression; import org.apache.phoenix.expression.function.ToTimestampFunction; +public class ToTimestampParseNode extends FunctionParseNode { -public class ToTimestampParseNode extends FunctionParseNode { + public ToTimestampParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } - public ToTimestampParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); + @Override + public FunctionExpression create(List children, StatementContext context) + throws SQLException { + String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue(); + String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue(); + if (dateFormat == null) { + dateFormat = context.getTimestampFormat(); } - - @Override - public FunctionExpression create(List children, StatementContext context) throws SQLException { - String dateFormat = (String) ((LiteralExpression) children.get(1)).getValue(); - String timeZoneId = (String) ((LiteralExpression) children.get(2)).getValue(); - if (dateFormat == null) { - dateFormat = context.getTimestampFormat(); - } - if (timeZoneId == null) { - timeZoneId = context.getDateFormatTimeZoneId(); - } - return new ToTimestampFunction(children, dateFormat, timeZoneId); + if (timeZoneId == null) { + timeZoneId = context.getDateFormatTimeZoneId(); } + return new ToTimestampFunction(children, dateFormat, timeZoneId); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraceStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraceStatement.java index 301fa56d3a9..fa49542e50e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraceStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraceStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,29 +21,29 @@ public class TraceStatement implements BindableStatement { - private final boolean traceOn; - private final double samplingRate; + private final boolean traceOn; + private final double samplingRate; - public TraceStatement(boolean isOn, double samplingRate) { - this.traceOn = isOn; - this.samplingRate = samplingRate; - } + public TraceStatement(boolean isOn, double samplingRate) { + this.traceOn = isOn; + this.samplingRate = samplingRate; + } - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - @Override - public Operation getOperation() { - return Operation.QUERY; - } + @Override + public Operation getOperation() { + return Operation.QUERY; + } - public boolean isTraceOn() { - return traceOn; - } + public boolean isTraceOn() { + return traceOn; + } - public double getSamplingRate() { - return samplingRate; - } + public double getSamplingRate() { + return samplingRate; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraverseAllParseNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraverseAllParseNodeVisitor.java index bbe58d0634f..1297f4032d2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraverseAllParseNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraverseAllParseNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,206 +19,202 @@ import java.sql.SQLException; - /** - * * Visitor that traverses into all parse nodes - * - * * @since 0.1 */ public abstract class TraverseAllParseNodeVisitor extends BaseParseNodeVisitor { - protected abstract void enterParseNode(ParseNode node) throws SQLException; - - @Override - public boolean visitEnter(AndParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(OrParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(FunctionParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(CaseParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(ComparisonParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(LikeParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(NotParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(ExistsParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(CastParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(InListParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(InParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(IsNullParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(MultiplyParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(SubtractParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(AddParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(DivideParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(ModulusParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(BetweenParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public T visit(ColumnParseNode node) throws SQLException { - enterParseNode(node); - return null; - } - - @Override - public T visit(LiteralParseNode node) throws SQLException { - enterParseNode(node); - return null; - } - - @Override - public T visit(BindParseNode node) throws SQLException { - enterParseNode(node); - return null; - } - - @Override - public T visit(WildcardParseNode node) throws SQLException { - enterParseNode(node); - return null; - } - - @Override - public T visit(TableWildcardParseNode node) throws SQLException { - enterParseNode(node); - return null; - } - - @Override - public T visit(FamilyWildcardParseNode node) throws SQLException { - enterParseNode(node); - return null; - } - - @Override - public T visit(SubqueryParseNode node) throws SQLException { - enterParseNode(node); - return null; - } - - @Override - public boolean visitEnter(StringConcatParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(RowValueConstructorParseNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public T visit(SequenceValueParseNode node) throws SQLException { - enterParseNode(node); - return null; - } - - @Override - public boolean visitEnter(ArrayConstructorNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { - enterParseNode(node); - return true; - } - - @Override - public boolean visitEnter(ArrayElemRefNode node) throws SQLException { - enterParseNode(node); - return true; - } + protected abstract void enterParseNode(ParseNode node) throws SQLException; + + @Override + public boolean visitEnter(AndParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(OrParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(FunctionParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(CaseParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(ComparisonParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(LikeParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(NotParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(ExistsParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(CastParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(InListParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(InParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(IsNullParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(MultiplyParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(SubtractParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(AddParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(DivideParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(ModulusParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(BetweenParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public T visit(ColumnParseNode node) throws SQLException { + enterParseNode(node); + return null; + } + + @Override + public T visit(LiteralParseNode node) throws SQLException { + enterParseNode(node); + return null; + } + + @Override + public T visit(BindParseNode node) throws SQLException { + enterParseNode(node); + return null; + } + + @Override + public T visit(WildcardParseNode node) throws SQLException { + enterParseNode(node); + return null; + } + + @Override + public T visit(TableWildcardParseNode node) throws SQLException { + enterParseNode(node); + return null; + } + + @Override + public T visit(FamilyWildcardParseNode node) throws SQLException { + enterParseNode(node); + return null; + } + + @Override + public T visit(SubqueryParseNode node) throws SQLException { + enterParseNode(node); + return null; + } + + @Override + public boolean visitEnter(StringConcatParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(RowValueConstructorParseNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public T visit(SequenceValueParseNode node) throws SQLException { + enterParseNode(node); + return null; + } + + @Override + public boolean visitEnter(ArrayConstructorNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { + enterParseNode(node); + return true; + } + + @Override + public boolean visitEnter(ArrayElemRefNode node) throws SQLException { + enterParseNode(node); + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraverseNoParseNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraverseNoParseNodeVisitor.java index 7a8732ad8f8..ac6d92f9dbd 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraverseNoParseNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraverseNoParseNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,291 +20,288 @@ import java.sql.SQLException; import java.util.List; - /** - * * Visitor that traverses into no parse nodes - * - * * @since 0.1 */ public abstract class TraverseNoParseNodeVisitor extends BaseParseNodeVisitor { - @Override - public boolean visitEnter(AndParseNode node) throws SQLException { - return false; - } - - @Override - public boolean visitEnter(OrParseNode node) throws SQLException { - return false; - } - - @Override - public boolean visitEnter(FunctionParseNode node) throws SQLException { - return false; - } - - @Override - public boolean visitEnter(ComparisonParseNode node) throws SQLException { - return false; - } - - @Override - public boolean visitEnter(CaseParseNode node) throws SQLException { - return false; - } - - @Override - public boolean visitEnter(LikeParseNode node) throws SQLException { - return false; - } - - @Override - public boolean visitEnter(BetweenParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(LikeParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(NotParseNode node) throws SQLException { - return false; - } - - @Override - public boolean visitEnter(ExistsParseNode node) throws SQLException { - return false; - } - - @Override - public boolean visitEnter(CastParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(NotParseNode node, List l) throws SQLException { - return null; - } - - @Override - public T visitLeave(ExistsParseNode node, List l) throws SQLException { - return null; - } - - @Override - public T visitLeave(CastParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(InListParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(InListParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(InParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(InParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(IsNullParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(IsNullParseNode node, List l) throws SQLException { - return null; - } - - @Override - public T visit(ColumnParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(LiteralParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(BindParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(WildcardParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(TableWildcardParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(FamilyWildcardParseNode node) throws SQLException { - return null; - } - - @Override - public T visit(SubqueryParseNode node) throws SQLException { - return null; - } - - @Override - public T visitLeave(AndParseNode node, List l) throws SQLException { - return null; - } - - @Override - public T visitLeave(OrParseNode node, List l) throws SQLException { - return null; - } - - @Override - public T visitLeave(FunctionParseNode node, List l) throws SQLException { - return null; - } - - @Override - public T visitLeave(ComparisonParseNode node, List l) throws SQLException { - return null; - } - - @Override - public T visitLeave(CaseParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(MultiplyParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(MultiplyParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(SubtractParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(SubtractParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(AddParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(AddParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(DivideParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(DivideParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(ModulusParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(ModulusParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(StringConcatParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(StringConcatParseNode node, List l) throws SQLException { - return null; - } - - @Override - public T visitLeave(BetweenParseNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(RowValueConstructorParseNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(RowValueConstructorParseNode node, List l) throws SQLException { - return null; - } - - @Override - public T visit(SequenceValueParseNode node) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(ArrayConstructorNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(ArrayConstructorNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { - return false; - } - @Override - public T visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { - return null; - } - - - @Override - public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { - return false; - } - @Override - public T visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(ArrayElemRefNode node) throws SQLException { - return false; - } - - @Override - public T visitLeave(ArrayElemRefNode node, List l) throws SQLException { - return null; - } + @Override + public boolean visitEnter(AndParseNode node) throws SQLException { + return false; + } + + @Override + public boolean visitEnter(OrParseNode node) throws SQLException { + return false; + } + + @Override + public boolean visitEnter(FunctionParseNode node) throws SQLException { + return false; + } + + @Override + public boolean visitEnter(ComparisonParseNode node) throws SQLException { + return false; + } + + @Override + public boolean visitEnter(CaseParseNode node) throws SQLException { + return false; + } + + @Override + public boolean visitEnter(LikeParseNode node) throws SQLException { + return false; + } + + @Override + public boolean visitEnter(BetweenParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(LikeParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(NotParseNode node) throws SQLException { + return false; + } + + @Override + public boolean visitEnter(ExistsParseNode node) throws SQLException { + return false; + } + + @Override + public boolean visitEnter(CastParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(NotParseNode node, List l) throws SQLException { + return null; + } + + @Override + public T visitLeave(ExistsParseNode node, List l) throws SQLException { + return null; + } + + @Override + public T visitLeave(CastParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(InListParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(InListParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(InParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(InParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(IsNullParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(IsNullParseNode node, List l) throws SQLException { + return null; + } + + @Override + public T visit(ColumnParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(LiteralParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(BindParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(WildcardParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(TableWildcardParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(FamilyWildcardParseNode node) throws SQLException { + return null; + } + + @Override + public T visit(SubqueryParseNode node) throws SQLException { + return null; + } + + @Override + public T visitLeave(AndParseNode node, List l) throws SQLException { + return null; + } + + @Override + public T visitLeave(OrParseNode node, List l) throws SQLException { + return null; + } + + @Override + public T visitLeave(FunctionParseNode node, List l) throws SQLException { + return null; + } + + @Override + public T visitLeave(ComparisonParseNode node, List l) throws SQLException { + return null; + } + + @Override + public T visitLeave(CaseParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(MultiplyParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(MultiplyParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(SubtractParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(SubtractParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(AddParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(AddParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(DivideParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(DivideParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(ModulusParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(ModulusParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(StringConcatParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(StringConcatParseNode node, List l) throws SQLException { + return null; + } + + @Override + public T visitLeave(BetweenParseNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(RowValueConstructorParseNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(RowValueConstructorParseNode node, List l) throws SQLException { + return null; + } + + @Override + public T visit(SequenceValueParseNode node) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(ArrayConstructorNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(ArrayConstructorNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(ArrayElemRefNode node) throws SQLException { + return false; + } + + @Override + public T visitLeave(ArrayElemRefNode node, List l) throws SQLException { + return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UDFParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UDFParseNode.java index c0b972f236e..8d5bd080eb9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UDFParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UDFParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +21,7 @@ public class UDFParseNode extends FunctionParseNode { - public UDFParseNode(String name, List children, BuiltInFunctionInfo info) { - super(name, children, info); - } + public UDFParseNode(String name, List children, BuiltInFunctionInfo info) { + super(name, children, info); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UnaryParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UnaryParseNode.java index 132feaf6389..401bd367070 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UnaryParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UnaryParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,14 +20,11 @@ import java.util.Collections; /** - * * Abstract node representing an expression that has a single child in SQL - * - * * @since 0.1 */ public abstract class UnaryParseNode extends CompoundParseNode { - UnaryParseNode(ParseNode expr) { - super(Collections.singletonList(expr)); - } + UnaryParseNode(ParseNode expr) { + super(Collections.singletonList(expr)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UnsupportedAllParseNodeVisitor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UnsupportedAllParseNodeVisitor.java index 8e6a84e0344..ec3d5852ee7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UnsupportedAllParseNodeVisitor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UnsupportedAllParseNodeVisitor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,266 +21,260 @@ import java.sql.SQLFeatureNotSupportedException; import java.util.List; - /** - * - * Visitor that throws UnsupportedOperationException for every - * node. Meant to be sub-classed for the case of a small subset - * of nodes being supported, in which case only those applicable - * methods would be overridden. - * - * + * Visitor that throws UnsupportedOperationException for every node. Meant to be sub-classed for the + * case of a small subset of nodes being supported, in which case only those applicable methods + * would be overridden. * @since 0.1 */ abstract public class UnsupportedAllParseNodeVisitor extends BaseParseNodeVisitor { - @Override - public E visit(ColumnParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visit(LiteralParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(ArrayConstructorNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(ArrayConstructorNode node, List l) - throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visit(BindParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visit(WildcardParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visit(TableWildcardParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visit(FamilyWildcardParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visit(SubqueryParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(AndParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(OrParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(FunctionParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(ComparisonParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(BetweenParseNode node) throws SQLException{ - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(AndParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(OrParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(FunctionParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(ComparisonParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(LikeParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(LikeParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(NotParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(NotParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(ExistsParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(ExistsParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(CastParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(CastParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(InListParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(InParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(BetweenParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(InListParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(InParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(IsNullParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(IsNullParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(AddParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(AddParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(SubtractParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(SubtractParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(MultiplyParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(MultiplyParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(ModulusParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(ModulusParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(DivideParseNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(DivideParseNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public List newElementList(int size) { - return null; - } - - @Override - public void addElement(List a, E element) { - } - - @Override - public E visit(SequenceValueParseNode node) throws SQLException { - return null; - } - - @Override - public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } - - @Override - public E visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { - throw new SQLFeatureNotSupportedException(node.toString()); - } + @Override + public E visit(ColumnParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visit(LiteralParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(ArrayConstructorNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(ArrayConstructorNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visit(BindParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visit(WildcardParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visit(TableWildcardParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visit(FamilyWildcardParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visit(SubqueryParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(AndParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(OrParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(FunctionParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(ComparisonParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(BetweenParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(AndParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(OrParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(FunctionParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(ComparisonParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(LikeParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(LikeParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(NotParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(NotParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(ExistsParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(ExistsParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(CastParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(CastParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(InListParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(InParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(BetweenParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(InListParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(InParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(IsNullParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(IsNullParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(AddParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(AddParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(SubtractParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(SubtractParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(MultiplyParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(MultiplyParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(ModulusParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(ModulusParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(DivideParseNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(DivideParseNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public List newElementList(int size) { + return null; + } + + @Override + public void addElement(List a, E element) { + } + + @Override + public E visit(SequenceValueParseNode node) throws SQLException { + return null; + } + + @Override + public boolean visitEnter(ArrayAnyComparisonNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(ArrayAnyComparisonNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public boolean visitEnter(ArrayAllComparisonNode node) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } + + @Override + public E visitLeave(ArrayAllComparisonNode node, List l) throws SQLException { + throw new SQLFeatureNotSupportedException(node.toString()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java index 10f0b2fb4e3..c149f00ce74 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UpdateStatisticsStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,30 +27,30 @@ import edu.umd.cs.findbugs.annotations.NonNull; - public class UpdateStatisticsStatement extends SingleTableStatement { - private final StatisticsCollectionScope scope; - private final Map props; - - public UpdateStatisticsStatement(NamedTableNode table, @NonNull StatisticsCollectionScope scope, Map props) { - super(table, 0); - this.scope = scope; - this.props = props; - } - - public boolean updateColumns() { - return scope == COLUMNS || scope == ALL; - } - - public boolean updateIndex() { - return scope == INDEX || scope == ALL; - } - - public boolean updateAll() { - return scope == ALL; - } - - public Map getProps() { - return props; - }; + private final StatisticsCollectionScope scope; + private final Map props; + + public UpdateStatisticsStatement(NamedTableNode table, @NonNull StatisticsCollectionScope scope, + Map props) { + super(table, 0); + this.scope = scope; + this.props = props; + } + + public boolean updateColumns() { + return scope == COLUMNS || scope == ALL; + } + + public boolean updateIndex() { + return scope == INDEX || scope == ALL; + } + + public boolean updateAll() { + return scope == ALL; + } + + public Map getProps() { + return props; + }; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UpsertStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UpsertStatement.java index fca746320ba..515d01142df 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UpsertStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UpsertStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,40 +24,40 @@ import org.apache.hadoop.hbase.util.Pair; public class UpsertStatement extends DMLStatement { - private final List columns; - private final List values; - private final SelectStatement select; - private final HintNode hint; - private final List> onDupKeyPairs; - - public UpsertStatement(NamedTableNode table, HintNode hint, List columns, - List values, SelectStatement select, int bindCount, - Map udfParseNodes, List> onDupKeyPairs) { - super(table, bindCount, udfParseNodes); - this.columns = columns == null ? Collections.emptyList() : columns; - this.values = values; - this.select = select; - this.hint = hint == null ? HintNode.EMPTY_HINT_NODE : hint; - this.onDupKeyPairs = onDupKeyPairs; - } - - public List getColumns() { - return columns; - } - - public List getValues() { - return values; - } - - public SelectStatement getSelect() { - return select; - } - - public HintNode getHint() { - return hint; - } - - public List> getOnDupKeyPairs() { - return onDupKeyPairs; - } + private final List columns; + private final List values; + private final SelectStatement select; + private final HintNode hint; + private final List> onDupKeyPairs; + + public UpsertStatement(NamedTableNode table, HintNode hint, List columns, + List values, SelectStatement select, int bindCount, + Map udfParseNodes, List> onDupKeyPairs) { + super(table, bindCount, udfParseNodes); + this.columns = columns == null ? Collections. emptyList() : columns; + this.values = values; + this.select = select; + this.hint = hint == null ? HintNode.EMPTY_HINT_NODE : hint; + this.onDupKeyPairs = onDupKeyPairs; + } + + public List getColumns() { + return columns; + } + + public List getValues() { + return values; + } + + public SelectStatement getSelect() { + return select; + } + + public HintNode getHint() { + return hint; + } + + public List> getOnDupKeyPairs() { + return onDupKeyPairs; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UseSchemaStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UseSchemaStatement.java index abba30963f5..07058b1edfc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UseSchemaStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/UseSchemaStatement.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,19 +20,19 @@ import org.apache.phoenix.util.StringUtil; public class UseSchemaStatement extends MutableStatement { - private final String schemaName; + private final String schemaName; - public UseSchemaStatement(String schemaName) { - this.schemaName = schemaName == null ? StringUtil.EMPTY_STRING : schemaName; - } + public UseSchemaStatement(String schemaName) { + this.schemaName = schemaName == null ? StringUtil.EMPTY_STRING : schemaName; + } - @Override - public int getBindCount() { - return 0; - } + @Override + public int getBindCount() { + return 0; + } - public String getSchemaName() { - return schemaName; - } + public String getSchemaName() { + return schemaName; + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java index 70cdbd2892d..8633812407c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/WildcardParseNode.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,71 +21,62 @@ import org.apache.phoenix.compile.ColumnResolver; - - /** - * * Node representing the selection of all columns (*) in the SELECT clause of SQL - * - * * @since 0.1 */ public class WildcardParseNode extends TerminalParseNode { - public static final String NAME = "*"; - public static final WildcardParseNode INSTANCE = new WildcardParseNode(false); - public static final WildcardParseNode REWRITE_INSTANCE = new WildcardParseNode(true); + public static final String NAME = "*"; + public static final WildcardParseNode INSTANCE = new WildcardParseNode(false); + public static final WildcardParseNode REWRITE_INSTANCE = new WildcardParseNode(true); - private final boolean isRewrite; + private final boolean isRewrite; - private WildcardParseNode(boolean isRewrite) { - this.isRewrite = isRewrite; - } + private WildcardParseNode(boolean isRewrite) { + this.isRewrite = isRewrite; + } - @Override - public T accept(ParseNodeVisitor visitor) throws SQLException { - return visitor.visit(this); - } + @Override + public T accept(ParseNodeVisitor visitor) throws SQLException { + return visitor.visit(this); + } - public boolean isRewrite() { - return isRewrite; - } + public boolean isRewrite() { + return isRewrite; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (isRewrite ? 1231 : 1237); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (isRewrite ? 1231 : 1237); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - WildcardParseNode other = (WildcardParseNode) obj; - if (isRewrite != other.isRewrite) - return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + WildcardParseNode other = (WildcardParseNode) obj; + if (isRewrite != other.isRewrite) return false; + return true; + } - @Override - public void toSQL(ColumnResolver resolver, StringBuilder buf) { - buf.append(' '); - buf.append(NAME); - buf.append(' '); - } + @Override + public void toSQL(ColumnResolver resolver, StringBuilder buf) { + buf.append(' '); + buf.append(NAME); + buf.append(' '); + } - @Override - public boolean isWildcardNode() { - return true; - } + @Override + public boolean isWildcardNode() { + return true; + } - @Override - public WildcardParseNode getRewritten() { - return REWRITE_INSTANCE; - } + @Override + public WildcardParseNode getRewritten() { + return REWRITE_INSTANCE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java index 66528dc5c92..573bac3d8f8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/protobuf/ProtobufUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,8 +34,7 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos; import org.apache.phoenix.coprocessor.generated.PTableProtos; import org.apache.phoenix.coprocessor.generated.ServerCachingProtos; -import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos - .TaskMutateRequest; +import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos.TaskMutateRequest; import org.apache.phoenix.schema.PTableType; import com.google.protobuf.ByteString; @@ -43,120 +42,120 @@ public class ProtobufUtil { - /** - * Stores an exception encountered during RPC invocation so it can be passed back through to the - * client. - * @param controller the controller instance provided by the client when calling the service - * @param ioe the exception encountered - */ - public static void setControllerException(RpcController controller, IOException ioe) { - if (controller != null) { - if (controller instanceof ServerRpcController) { - ((ServerRpcController) controller).setFailedOn(ioe); - } else { - controller.setFailed(StringUtils.stringifyException(ioe)); - } - } - } - - public static PTableProtos.PTableType toPTableTypeProto(PTableType type) { - return PTableProtos.PTableType.values()[type.ordinal()]; - } - - public static PTableType toPTableType(PTableProtos.PTableType type) { - return PTableType.values()[type.ordinal()]; - } - - public static List getMutations(MetaDataProtos.CreateTableRequest request) - throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - public static List getMutations(MetaDataProtos.DropSchemaRequest request) throws IOException { - return getMutations(request.getSchemaMetadataMutationsList()); - } - - public static List getMutations(MetaDataProtos.DropFunctionRequest request) - throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - public static List getMutations(MetaDataProtos.CreateFunctionRequest request) - throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - public static List getMutations(MetaDataProtos.DropTableRequest request) - throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - public static List getMutations(MetaDataProtos.AddColumnRequest request) - throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - public static List getMutations(MetaDataProtos.DropColumnRequest request) - throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - public static List getMutations(MetaDataProtos.UpdateIndexStateRequest request) - throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - public static List getMutations(MetaDataProtos.CreateSchemaRequest request) throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - public static List getMutations(TaskMutateRequest request) - throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - public static List getMutations(CreateViewAddChildLinkRequest request) - throws IOException { - return getMutations(request.getTableMetadataMutationsList()); - } - - /** - * Each ByteString entry is a byte array serialized from MutationProto instance - * @param mutations - * @throws IOException - */ - private static List getMutations(List mutations) - throws IOException { - List result = new ArrayList(); - for (ByteString mutation : mutations) { - MutationProto mProto = MutationProto.parseFrom(mutation); - result.add(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto)); - } - return result; - } - - public static MutationProto toProto(Mutation mutation) throws IOException { - MutationType type; - if (mutation instanceof Put) { - type = MutationType.PUT; - } else if (mutation instanceof Delete) { - type = MutationType.DELETE; - } else { - throw new IllegalArgumentException("Only Put and Delete are supported"); - } - return org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(type, mutation); - } - - public static ServerCachingProtos.ImmutableBytesWritable toProto(ImmutableBytesWritable w) { - ServerCachingProtos.ImmutableBytesWritable.Builder builder = - ServerCachingProtos.ImmutableBytesWritable.newBuilder(); - builder.setByteArray(ByteStringer.wrap(w.get())); - builder.setOffset(w.getOffset()); - builder.setLength(w.getLength()); - return builder.build(); - } - - public static ImmutableBytesWritable toImmutableBytesWritable(ServerCachingProtos.ImmutableBytesWritable proto) { - return new ImmutableBytesWritable(proto.getByteArray().toByteArray(), proto.getOffset(), proto.getLength()); - } + /** + * Stores an exception encountered during RPC invocation so it can be passed back through to the + * client. + * @param controller the controller instance provided by the client when calling the service + * @param ioe the exception encountered + */ + public static void setControllerException(RpcController controller, IOException ioe) { + if (controller != null) { + if (controller instanceof ServerRpcController) { + ((ServerRpcController) controller).setFailedOn(ioe); + } else { + controller.setFailed(StringUtils.stringifyException(ioe)); + } + } + } + + public static PTableProtos.PTableType toPTableTypeProto(PTableType type) { + return PTableProtos.PTableType.values()[type.ordinal()]; + } + + public static PTableType toPTableType(PTableProtos.PTableType type) { + return PTableType.values()[type.ordinal()]; + } + + public static List getMutations(MetaDataProtos.CreateTableRequest request) + throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + public static List getMutations(MetaDataProtos.DropSchemaRequest request) + throws IOException { + return getMutations(request.getSchemaMetadataMutationsList()); + } + + public static List getMutations(MetaDataProtos.DropFunctionRequest request) + throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + public static List getMutations(MetaDataProtos.CreateFunctionRequest request) + throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + public static List getMutations(MetaDataProtos.DropTableRequest request) + throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + public static List getMutations(MetaDataProtos.AddColumnRequest request) + throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + public static List getMutations(MetaDataProtos.DropColumnRequest request) + throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + public static List getMutations(MetaDataProtos.UpdateIndexStateRequest request) + throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + public static List getMutations(MetaDataProtos.CreateSchemaRequest request) + throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + public static List getMutations(TaskMutateRequest request) throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + public static List getMutations(CreateViewAddChildLinkRequest request) + throws IOException { + return getMutations(request.getTableMetadataMutationsList()); + } + + /** + * Each ByteString entry is a byte array serialized from MutationProto instance + */ + private static List getMutations(List mutations) throws IOException { + List result = new ArrayList(); + for (ByteString mutation : mutations) { + MutationProto mProto = MutationProto.parseFrom(mutation); + result.add(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto)); + } + return result; + } + + public static MutationProto toProto(Mutation mutation) throws IOException { + MutationType type; + if (mutation instanceof Put) { + type = MutationType.PUT; + } else if (mutation instanceof Delete) { + type = MutationType.DELETE; + } else { + throw new IllegalArgumentException("Only Put and Delete are supported"); + } + return org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(type, mutation); + } + + public static ServerCachingProtos.ImmutableBytesWritable toProto(ImmutableBytesWritable w) { + ServerCachingProtos.ImmutableBytesWritable.Builder builder = + ServerCachingProtos.ImmutableBytesWritable.newBuilder(); + builder.setByteArray(ByteStringer.wrap(w.get())); + builder.setOffset(w.getOffset()); + builder.setLength(w.getLength()); + return builder.build(); + } + + public static ImmutableBytesWritable + toImmutableBytesWritable(ServerCachingProtos.ImmutableBytesWritable proto) { + return new ImmutableBytesWritable(proto.getByteArray().toByteArray(), proto.getOffset(), + proto.getLength()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/AdminUtilWithFallback.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/AdminUtilWithFallback.java index 0402910df13..7972bf19988 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/AdminUtilWithFallback.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/AdminUtilWithFallback.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.query; import java.io.IOException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.TableState; @@ -34,29 +30,32 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Admin utility class specifically useful for running Admin APIs in the middle of the - * rolling upgrade from HBase 1.x to 2.x versions. Some Admin APIs fail to retrieve - * table state details from meta table if master is running on 1.x version and coprocs are - * running on 2.x version. Hence, as a fallback, server side coproc can directly perform - * zookeeper look-up to retrieve table state data. The fallback use-cases would not be - * encountered for fully upgraded 2.x cluster. + * Admin utility class specifically useful for running Admin APIs in the middle of the rolling + * upgrade from HBase 1.x to 2.x versions. Some Admin APIs fail to retrieve table state details from + * meta table if master is running on 1.x version and coprocs are running on 2.x version. Hence, as + * a fallback, server side coproc can directly perform zookeeper look-up to retrieve table state + * data. The fallback use-cases would not be encountered for fully upgraded 2.x cluster. */ public class AdminUtilWithFallback { private static final Logger LOG = LoggerFactory.getLogger(AdminUtilWithFallback.class); public static boolean tableExists(Admin admin, TableName tableName) - throws IOException, InterruptedException { + throws IOException, InterruptedException { try { return admin.tableExists(tableName); } catch (IOException e) { - if (e instanceof NoSuchColumnFamilyException || (e.getCause() != null - && e.getCause() instanceof NoSuchColumnFamilyException)) { + if ( + e instanceof NoSuchColumnFamilyException + || (e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException) + ) { LOG.warn("Admin API to retrieve table existence failed due to missing CF in meta." - + " This should happen only when HBase master is running on 1.x and" - + " current regionserver is on 2.x. Falling back to retrieve info from ZK.", e); + + " This should happen only when HBase master is running on 1.x and" + + " current regionserver is on 2.x. Falling back to retrieve info from ZK.", e); return getTableStateFromZk(tableName, admin) != null; } throw e; @@ -64,15 +63,15 @@ public static boolean tableExists(Admin admin, TableName tableName) } private static ZooKeeperProtos.DeprecatedTableState.State getTableState(ZKWatcher zkw, - TableName tableName) throws KeeperException, InterruptedException { + TableName tableName) throws KeeperException, InterruptedException { String znode = - ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, tableName.getNameAsString()); + ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, tableName.getNameAsString()); byte[] data = ZKUtil.getData(zkw, znode); if (data != null && data.length > 0) { try { ProtobufUtil.expectPBMagicPrefix(data); ZooKeeperProtos.DeprecatedTableState.Builder builder = - ZooKeeperProtos.DeprecatedTableState.newBuilder(); + ZooKeeperProtos.DeprecatedTableState.newBuilder(); int magicLen = ProtobufUtil.lengthOfPBMagic(); ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen); return builder.getState(); @@ -89,30 +88,29 @@ private static ZooKeeperProtos.DeprecatedTableState.State getTableState(ZKWatche } private static TableState.State getTableStateFromZk(TableName tableName, Admin admin) - throws IOException, InterruptedException { - try (ZKWatcher zkWatcher = new ZKWatcher(admin.getConfiguration(), "phoenix-admin-fallback", - null)) { - ZooKeeperProtos.DeprecatedTableState.State state = - getTableState(zkWatcher, tableName); + throws IOException, InterruptedException { + try (ZKWatcher zkWatcher = + new ZKWatcher(admin.getConfiguration(), "phoenix-admin-fallback", null)) { + ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkWatcher, tableName); if (state == null) { return null; } TableState.State tableState; switch (state) { - case ENABLED: - tableState = TableState.State.ENABLED; - break; - case DISABLED: - tableState = TableState.State.DISABLED; - break; - case DISABLING: - tableState = TableState.State.DISABLING; - break; - case ENABLING: - tableState = TableState.State.ENABLING; - break; - default: - throw new IOException("ZK state inconsistent"); + case ENABLED: + tableState = TableState.State.ENABLED; + break; + case DISABLED: + tableState = TableState.State.DISABLED; + break; + case DISABLING: + tableState = TableState.State.DISABLING; + break; + case ENABLING: + tableState = TableState.State.ENABLING; + break; + default: + throw new IOException("ZK state inconsistent"); } return tableState; } catch (KeeperException e) { diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/BaseQueryServicesImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/BaseQueryServicesImpl.java index e46c2628df1..58631cf5b71 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/BaseQueryServicesImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/BaseQueryServicesImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,56 +25,48 @@ import org.apache.phoenix.optimize.QueryOptimizer; import org.apache.phoenix.util.ReadOnlyProps; - - /** - * * Base class for QueryService implementors. - * - * * @since 0.1 */ public abstract class BaseQueryServicesImpl implements QueryServices { - private final ThreadPoolExecutor executor; - private final MemoryManager memoryManager; - private final ReadOnlyProps props; - private final QueryOptimizer queryOptimizer; - - public BaseQueryServicesImpl(ReadOnlyProps defaultProps, QueryServicesOptions options) { - this.executor = JobManager.createThreadPoolExec( - options.getKeepAliveMs(), - options.getThreadPoolSize(), - options.getQueueSize(), - options.isGlobalMetricsEnabled()); - this.memoryManager = new GlobalMemoryManager( - Runtime.getRuntime().maxMemory() * options.getMaxMemoryPerc() / 100); - this.props = options.getProps(defaultProps); - this.queryOptimizer = new QueryOptimizer(this); - } - - @Override - public ThreadPoolExecutor getExecutor() { - return executor; - } + private final ThreadPoolExecutor executor; + private final MemoryManager memoryManager; + private final ReadOnlyProps props; + private final QueryOptimizer queryOptimizer; + + public BaseQueryServicesImpl(ReadOnlyProps defaultProps, QueryServicesOptions options) { + this.executor = JobManager.createThreadPoolExec(options.getKeepAliveMs(), + options.getThreadPoolSize(), options.getQueueSize(), options.isGlobalMetricsEnabled()); + this.memoryManager = + new GlobalMemoryManager(Runtime.getRuntime().maxMemory() * options.getMaxMemoryPerc() / 100); + this.props = options.getProps(defaultProps); + this.queryOptimizer = new QueryOptimizer(this); + } + + @Override + public ThreadPoolExecutor getExecutor() { + return executor; + } - @Override - public MemoryManager getMemoryManager() { - return memoryManager; - } + @Override + public MemoryManager getMemoryManager() { + return memoryManager; + } - @Override - public final ReadOnlyProps getProps() { - return props; - } + @Override + public final ReadOnlyProps getProps() { + return props; + } - @Override - public void close() { - // Do not shutdown the executor as it prevents the Driver from being able - // to attempt to open a connection in the future. - } + @Override + public void close() { + // Do not shutdown the executor as it prevents the Driver from being able + // to attempt to open a connection in the future. + } - @Override - public QueryOptimizer getOptimizer() { - return queryOptimizer; - } + @Override + public QueryOptimizer getOptimizer() { + return queryOptimizer; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ChildLinkMetaDataServiceCallBack.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ChildLinkMetaDataServiceCallBack.java index ab16a02c637..6c49f2106bd 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ChildLinkMetaDataServiceCallBack.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ChildLinkMetaDataServiceCallBack.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,70 +15,65 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.query; -import com.google.protobuf.RpcController; +import java.io.IOException; +import java.util.List; + import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils.BlockingRpcCallback; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; -import org.apache.phoenix.coprocessor.generated.ChildLinkMetaDataProtos - .ChildLinkMetaDataService; -import org.apache.phoenix.coprocessor.generated.ChildLinkMetaDataProtos - .CreateViewAddChildLinkRequest; +import org.apache.phoenix.coprocessor.generated.ChildLinkMetaDataProtos.ChildLinkMetaDataService; +import org.apache.phoenix.coprocessor.generated.ChildLinkMetaDataProtos.CreateViewAddChildLinkRequest; import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse; import org.apache.phoenix.protobuf.ProtobufUtil; -import java.io.IOException; -import java.util.List; +import com.google.protobuf.RpcController; /** - * Callable implementation for coprocessor endpoint associated with - * SYSTEM.CHILD_LINK + * Callable implementation for coprocessor endpoint associated with SYSTEM.CHILD_LINK */ class ChildLinkMetaDataServiceCallBack - implements Batch.Call { + implements Batch.Call { - private final List childLinkMutations; - private final RpcController controller; + private final List childLinkMutations; + private final RpcController controller; - public ChildLinkMetaDataServiceCallBack(RpcController controller, List childLinkMutations) { - this.controller = controller; - this.childLinkMutations = childLinkMutations; - } + public ChildLinkMetaDataServiceCallBack(RpcController controller, + List childLinkMutations) { + this.controller = controller; + this.childLinkMutations = childLinkMutations; + } - @Override - public MetaDataResponse call(ChildLinkMetaDataService instance) - throws IOException { - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback<>(); - CreateViewAddChildLinkRequest.Builder builder = - CreateViewAddChildLinkRequest.newBuilder(); - for (Mutation mutation : childLinkMutations) { - MutationProto mp = ProtobufUtil.toProto(mutation); - builder.addTableMetadataMutations(mp.toByteString()); - } - CreateViewAddChildLinkRequest build = builder.build(); - instance.createViewAddChildLink(controller, build, rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); + @Override + public MetaDataResponse call(ChildLinkMetaDataService instance) throws IOException { + BlockingRpcCallback rpcCallback = new BlockingRpcCallback<>(); + CreateViewAddChildLinkRequest.Builder builder = CreateViewAddChildLinkRequest.newBuilder(); + for (Mutation mutation : childLinkMutations) { + MutationProto mp = ProtobufUtil.toProto(mutation); + builder.addTableMetadataMutations(mp.toByteString()); } + CreateViewAddChildLinkRequest build = builder.build(); + instance.createViewAddChildLink(controller, build, rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } - private void checkForRemoteExceptions(RpcController controller) throws IOException { - if (controller != null) { - if (controller instanceof ServerRpcController) { - if (((ServerRpcController)controller).getFailedOn() != null) { - throw ((ServerRpcController)controller).getFailedOn(); - } - } else { - if (((HBaseRpcController)controller).getFailed() != null) { - throw ((HBaseRpcController)controller).getFailed(); - } - } + private void checkForRemoteExceptions(RpcController controller) throws IOException { + if (controller != null) { + if (controller instanceof ServerRpcController) { + if (((ServerRpcController) controller).getFailedOn() != null) { + throw ((ServerRpcController) controller).getFailedOn(); + } + } else { + if (((HBaseRpcController) controller).getFailed() != null) { + throw ((HBaseRpcController) controller).getFailed(); } + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ChildQueryServices.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ChildQueryServices.java index e6da4074651..eeb9788f8ab 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ChildQueryServices.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ChildQueryServices.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,25 +21,23 @@ import org.apache.phoenix.memory.MemoryManager; /** - * - * Child QueryServices that delegates through to global QueryService. - * Used to track memory used by each org to allow a max percentage threshold. - * - * + * Child QueryServices that delegates through to global QueryService. Used to track memory used by + * each org to allow a max percentage threshold. * @since 0.1 */ public class ChildQueryServices extends DelegateConnectionQueryServices { - private final MemoryManager memoryManager; - private static final int DEFAULT_MAX_ORG_MEMORY_PERC = 30; - - public ChildQueryServices(ConnectionQueryServices services) { - super(services); - int maxOrgMemPerc = getProps().getInt(MAX_TENANT_MEMORY_PERC_ATTRIB, DEFAULT_MAX_ORG_MEMORY_PERC); - this.memoryManager = new ChildMemoryManager(services.getMemoryManager(), maxOrgMemPerc); - } + private final MemoryManager memoryManager; + private static final int DEFAULT_MAX_ORG_MEMORY_PERC = 30; + + public ChildQueryServices(ConnectionQueryServices services) { + super(services); + int maxOrgMemPerc = + getProps().getInt(MAX_TENANT_MEMORY_PERC_ATTRIB, DEFAULT_MAX_ORG_MEMORY_PERC); + this.memoryManager = new ChildMemoryManager(services.getMemoryManager(), maxOrgMemPerc); + } - @Override - public MemoryManager getMemoryManager() { - return memoryManager; - } + @Override + public MemoryManager getMemoryManager() { + return memoryManager; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConfigurationFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConfigurationFactory.java index 3e6f8344862..c83c5e80ad7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConfigurationFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConfigurationFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,40 +25,36 @@ /** * Creates {@link Configuration} instances that contain HBase/Hadoop settings. - * - * * @since 2.0 */ public interface ConfigurationFactory { - /** - * @return Configuration containing HBase/Hadoop settings - */ - Configuration getConfiguration(); + /** Returns Configuration containing HBase/Hadoop settings */ + Configuration getConfiguration(); - Configuration getConfiguration(Configuration conf); + Configuration getConfiguration(Configuration conf); - /** - * Default implementation uses {@link org.apache.hadoop.hbase.HBaseConfiguration#create()}. - */ - static class ConfigurationFactoryImpl implements ConfigurationFactory { + /** + * Default implementation uses {@link org.apache.hadoop.hbase.HBaseConfiguration#create()}. + */ + static class ConfigurationFactoryImpl implements ConfigurationFactory { + @Override + public Configuration getConfiguration() { + return PhoenixContextExecutor.callWithoutPropagation(new Callable() { @Override - public Configuration getConfiguration() { - return PhoenixContextExecutor.callWithoutPropagation(new Callable() { - @Override - public Configuration call() throws Exception { - return HBaseConfiguration.create(); - } - }); + public Configuration call() throws Exception { + return HBaseConfiguration.create(); } + }); + } + @Override + public Configuration getConfiguration(final Configuration conf) { + return PhoenixContextExecutor.callWithoutPropagation(new Callable() { @Override - public Configuration getConfiguration(final Configuration conf) { - return PhoenixContextExecutor.callWithoutPropagation(new Callable() { - @Override - public Configuration call() throws Exception { - return HBaseConfiguration.create(conf); - } - }); + public Configuration call() throws Exception { + return HBaseConfiguration.create(conf); } + }); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java index 528534e0cb3..72f52019ee6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -57,239 +57,262 @@ import org.apache.phoenix.transaction.PhoenixTransactionClient; import org.apache.phoenix.transaction.TransactionFactory; - public interface ConnectionQueryServices extends QueryServices, MetaDataMutated { - public static final int INITIAL_META_DATA_TABLE_CAPACITY = 100; - - /** - * Get (and create if necessary) a child QueryService for a given tenantId. - * The QueryService will be cached for the lifetime of the parent QueryService - * @param tenantId the organization ID - * @return the child QueryService - */ - public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable tenantId); - - /** - * Get Table by the given name. It is the callers - * responsibility to close the returned Table reference. - * - * @param tableName the name of the HTable - * @return Table interface. It is caller's responsibility to close this - * returned Table reference. - * @throws SQLException - */ - public Table getTable(byte[] tableName) throws SQLException; - - /** - * Get Table by the given name. It is the responsibility of callers - * to close the returned Table interface. This method uses additional Admin - * API to ensure if table exists before returning Table interface from - * Connection. If table does not exist, this method will throw - * {@link org.apache.phoenix.schema.TableNotFoundException} - * It is caller's responsibility to close returned Table reference. - * - * @param tableName the name of the Table - * @return Table interface. It is caller's responsibility to close this - * returned Table reference. - * @throws SQLException If something goes wrong while retrieving table - * interface from connection managed by implementor. If table does not - * exist, {@link org.apache.phoenix.schema.TableNotFoundException} will - * be thrown. - */ - Table getTableIfExists(byte[] tableName) throws SQLException; - - public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException; - - public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException; - - /** - * Retrieve the region metadata locations for all regions of the given table. - * This method is Deprecated. Use {@link #getAllTableRegions(byte[], int)} instead. - * - * @param tableName The table name. - * @return The list of table region locations. - * @throws SQLException If fails to retrieve region locations. - */ - @Deprecated - public List getAllTableRegions(byte[] tableName) throws SQLException; - - /** - * Retrieve the region metadata locations for all regions of the given table. - * The operation to retrieve the table region locations must be completed within - * the query timeout. - * - * @param tableName Table name. - * @param queryTimeout Phoenix query timeout. - * @return The list of region locations. - * @throws SQLException If fails to retrieve region locations. - */ - public List getAllTableRegions(byte[] tableName, int queryTimeout) - throws SQLException; - - /** - * Retrieve table region locations that cover the startRowKey and endRowKey. The start key - * of the first region of the returned list must be less than or equal to startRowKey. - * The end key of the last region of the returned list must be greater than or equal to - * endRowKey. - * This method is Deprecated. Use {@link #getTableRegions(byte[], byte[], byte[], int)} instead. - * - * @param tableName Table name. - * @param startRowKey Start RowKey. - * @param endRowKey End RowKey. - * @return The list of region locations that cover the startRowKey and endRowKey key boundary. - * @throws SQLException If fails to retrieve region locations. - */ - @Deprecated - public List getTableRegions(byte[] tableName, byte[] startRowKey, - byte[] endRowKey) throws SQLException; - - /** - * Retrieve table region locations that cover the startRowKey and endRowKey. The start key - * of the first region of the returned list must be less than or equal to startRowKey. - * The end key of the last region of the returned list must be greater than or equal to - * endRowKey. The operation to retrieve the table region locations must be completed within - * the query timeout. - * - * @param tableName Table name. - * @param startRowKey Start RowKey. - * @param endRowKey End RowKey. - * @param queryTimeout Phoenix query timeout. - * @return The list of region locations that cover the startRowKey and endRowKey key boundary. - * @throws SQLException If fails to retrieve region locations. - */ - public List getTableRegions(byte[] tableName, byte[] startRowKey, - byte[] endRowKey, - int queryTimeout) throws SQLException; - - public PhoenixConnection connect(String url, Properties info) throws SQLException; - - /** - * @param tableTimestamp timestamp of table if its present in the client side cache - * @param clientTimetamp if the client connection has an scn, or of the table is transactional - * the txn write pointer - * @return PTable for the given tenant id, schema and table name - */ - public MetaDataMutationResult getTable(PName tenantId, byte[] schemaName, byte[] tableName, - long tableTimestamp, long clientTimetamp) throws SQLException; - public MetaDataMutationResult getFunctions(PName tenantId, List> functionNameAndTimeStampPairs, long clientTimestamp) throws SQLException; - - public MetaDataMutationResult createTable(List tableMetaData, byte[] tableName, PTableType tableType, - Map tableProps, - List>> families, byte[][] splits, - boolean isNamespaceMapped, boolean allocateIndexId, - boolean isDoNotUpgradePropSet, PTable parentTable) throws SQLException; - public MetaDataMutationResult dropTable(List tableMetadata, PTableType tableType, boolean cascade) throws SQLException; - public MetaDataMutationResult dropFunction(List tableMetadata, boolean ifExists) throws SQLException; - public MetaDataMutationResult addColumn(List tableMetaData, - PTable table, - PTable parentTable, - PTable transformingNewTable, - Map>> properties, - Set colFamiliesForPColumnsToBeAdded, - List columns) throws SQLException; - public MetaDataMutationResult dropColumn(List tableMetadata, - PTableType tableType, PTable parentTable) throws SQLException; - public MetaDataMutationResult updateIndexState(List tableMetadata, String parentTableName) throws SQLException; - public MetaDataMutationResult updateIndexState(List tableMetadata, String parentTableName, Map>> stmtProperties, PTable table) throws SQLException; - - public MutationState updateData(MutationPlan plan) throws SQLException; - - public void init(String url, Properties props) throws SQLException; - - public int getLowestClusterHBaseVersion(); - public Admin getAdmin() throws SQLException; - void refreshLiveRegionServers() throws SQLException; - List getLiveRegionServers(); - - void clearTableRegionCache(TableName name) throws SQLException; - - boolean hasIndexWALCodec(); - - long createSequence(String tenantId, String schemaName, String sequenceName, long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, boolean cycle, long timestamp) throws SQLException; - long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) throws SQLException; - void validateSequences(List sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp action) throws SQLException; - void incrementSequences(List sequenceAllocation, long timestamp, long[] values, SQLException[] exceptions) throws SQLException; - long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException; - void returnSequences(List sequenceKeys, long timestamp, SQLException[] exceptions) throws SQLException; - - MetaDataMutationResult createFunction(List functionData, PFunction function, boolean temporary) throws SQLException; - void addConnection(PhoenixConnection connection) throws SQLException; - void removeConnection(PhoenixConnection connection) throws SQLException; - - /** - * @return the {@link KeyValueBuilder} that is valid for the locally installed version of HBase. - */ - public KeyValueBuilder getKeyValueBuilder(); - - public enum Feature {LOCAL_INDEX, RENEW_LEASE}; - public boolean supportsFeature(Feature feature); - - public String getUserName(); - public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, long clientTS) throws SQLException; - - public GuidePostsInfo getTableStats(GuidePostsKey key) throws SQLException; - /** - * Removes cache {@link GuidePostsInfo} for the table with the given name. If no cached guideposts are present, this does nothing. - * - * @param key - */ - void invalidateStats(GuidePostsKey key); - - - public long clearCache() throws SQLException; - public int getSequenceSaltBuckets(); - - public long getRenewLeaseThresholdMilliSeconds(); - public boolean isRenewingLeasesEnabled(); - - public MetaDataMutationResult createSchema(List schemaMutations, String schemaName) throws SQLException; - - MetaDataMutationResult getSchema(String schemaName, long clientTimestamp) throws SQLException; - - public MetaDataMutationResult dropSchema(List schemaMetaData, String schemaName) throws SQLException; - - boolean isUpgradeRequired(); - void clearUpgradeRequired(); - void upgradeSystemTables(String url, Properties props) throws SQLException; - - public Configuration getConfiguration(); - - public User getUser(); - - public QueryLoggerDisruptor getQueryDisruptor(); - - public PhoenixTransactionClient initTransactionClient(TransactionFactory.Provider provider) throws SQLException; - - /** - * Writes a cell to SYSTEM.MUTEX using checkAndPut to ensure only a single client can execute a - * particular task. The params are used to generate the rowkey. - * @return true if this client was able to successfully acquire the mutex - */ - public boolean writeMutexCell(String tenantId, String schemaName, String tableName, - String columnName, String familyName) throws SQLException; - - /** - * Deletes a cell that was written to SYSTEM.MUTEX. The params are used to generate the rowkey. - */ - public void deleteMutexCell(String tenantId, String schemaName, String tableName, - String columnName, String familyName) throws SQLException; - - /** - * Close all phoenix connections created using this CQS. - * - * @param reasonBuilder exception builder for building reasons why connection is closed. - */ - default void closeAllConnections(SQLExceptionInfo.Builder reasonBuilder) { - throw new UnsupportedOperationException(); - } - - PMetaData getMetaDataCache(); - public default ConnectionLimiter getConnectionLimiter() { - throw new UnsupportedOperationException(); - } - - int getConnectionCount(boolean isInternal); - - void invalidateServerMetadataCache(List requests) - throws Throwable; + public static final int INITIAL_META_DATA_TABLE_CAPACITY = 100; + + /** + * Get (and create if necessary) a child QueryService for a given tenantId. The QueryService will + * be cached for the lifetime of the parent QueryService + * @param tenantId the organization ID + * @return the child QueryService + */ + public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable tenantId); + + /** + * Get Table by the given name. It is the callers responsibility to close the returned Table + * reference. + * @param tableName the name of the HTable + * @return Table interface. It is caller's responsibility to close this returned Table reference. + */ + public Table getTable(byte[] tableName) throws SQLException; + + /** + * Get Table by the given name. It is the responsibility of callers to close the returned Table + * interface. This method uses additional Admin API to ensure if table exists before returning + * Table interface from Connection. If table does not exist, this method will throw + * {@link org.apache.phoenix.schema.TableNotFoundException} It is caller's responsibility to close + * returned Table reference. + * @param tableName the name of the Table + * @return Table interface. It is caller's responsibility to close this returned Table reference. + * @throws SQLException If something goes wrong while retrieving table interface from connection + * managed by implementor. If table does not exist, + * {@link org.apache.phoenix.schema.TableNotFoundException} will be thrown. + */ + Table getTableIfExists(byte[] tableName) throws SQLException; + + public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException; + + public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException; + + /** + * Retrieve the region metadata locations for all regions of the given table. This method is + * Deprecated. Use {@link #getAllTableRegions(byte[], int)} instead. + * @param tableName The table name. + * @return The list of table region locations. + * @throws SQLException If fails to retrieve region locations. + */ + @Deprecated + public List getAllTableRegions(byte[] tableName) throws SQLException; + + /** + * Retrieve the region metadata locations for all regions of the given table. The operation to + * retrieve the table region locations must be completed within the query timeout. + * @param tableName Table name. + * @param queryTimeout Phoenix query timeout. + * @return The list of region locations. + * @throws SQLException If fails to retrieve region locations. + */ + public List getAllTableRegions(byte[] tableName, int queryTimeout) + throws SQLException; + + /** + * Retrieve table region locations that cover the startRowKey and endRowKey. The start key of the + * first region of the returned list must be less than or equal to startRowKey. The end key of the + * last region of the returned list must be greater than or equal to endRowKey. This method is + * Deprecated. Use {@link #getTableRegions(byte[], byte[], byte[], int)} instead. + * @param tableName Table name. + * @param startRowKey Start RowKey. + * @param endRowKey End RowKey. + * @return The list of region locations that cover the startRowKey and endRowKey key boundary. + * @throws SQLException If fails to retrieve region locations. + */ + @Deprecated + public List getTableRegions(byte[] tableName, byte[] startRowKey, + byte[] endRowKey) throws SQLException; + + /** + * Retrieve table region locations that cover the startRowKey and endRowKey. The start key of the + * first region of the returned list must be less than or equal to startRowKey. The end key of the + * last region of the returned list must be greater than or equal to endRowKey. The operation to + * retrieve the table region locations must be completed within the query timeout. + * @param tableName Table name. + * @param startRowKey Start RowKey. + * @param endRowKey End RowKey. + * @param queryTimeout Phoenix query timeout. + * @return The list of region locations that cover the startRowKey and endRowKey key boundary. + * @throws SQLException If fails to retrieve region locations. + */ + public List getTableRegions(byte[] tableName, byte[] startRowKey, + byte[] endRowKey, int queryTimeout) throws SQLException; + + public PhoenixConnection connect(String url, Properties info) throws SQLException; + + /** + * @param tableTimestamp timestamp of table if its present in the client side cache + * @param clientTimetamp if the client connection has an scn, or of the table is transactional the + * txn write pointer + * @return PTable for the given tenant id, schema and table name + */ + public MetaDataMutationResult getTable(PName tenantId, byte[] schemaName, byte[] tableName, + long tableTimestamp, long clientTimetamp) throws SQLException; + + public MetaDataMutationResult getFunctions(PName tenantId, + List> functionNameAndTimeStampPairs, long clientTimestamp) + throws SQLException; + + public MetaDataMutationResult createTable(List tableMetaData, byte[] tableName, + PTableType tableType, Map tableProps, + List>> families, byte[][] splits, boolean isNamespaceMapped, + boolean allocateIndexId, boolean isDoNotUpgradePropSet, PTable parentTable) throws SQLException; + + public MetaDataMutationResult dropTable(List tableMetadata, PTableType tableType, + boolean cascade) throws SQLException; + + public MetaDataMutationResult dropFunction(List tableMetadata, boolean ifExists) + throws SQLException; + + public MetaDataMutationResult addColumn(List tableMetaData, PTable table, + PTable parentTable, PTable transformingNewTable, + Map>> properties, Set colFamiliesForPColumnsToBeAdded, + List columns) throws SQLException; + + public MetaDataMutationResult dropColumn(List tableMetadata, PTableType tableType, + PTable parentTable) throws SQLException; + + public MetaDataMutationResult updateIndexState(List tableMetadata, + String parentTableName) throws SQLException; + + public MetaDataMutationResult updateIndexState(List tableMetadata, + String parentTableName, Map>> stmtProperties, PTable table) + throws SQLException; + + public MutationState updateData(MutationPlan plan) throws SQLException; + + public void init(String url, Properties props) throws SQLException; + + public int getLowestClusterHBaseVersion(); + + public Admin getAdmin() throws SQLException; + + void refreshLiveRegionServers() throws SQLException; + + List getLiveRegionServers(); + + void clearTableRegionCache(TableName name) throws SQLException; + + boolean hasIndexWALCodec(); + + long createSequence(String tenantId, String schemaName, String sequenceName, long startWith, + long incrementBy, long cacheSize, long minValue, long maxValue, boolean cycle, long timestamp) + throws SQLException; + + long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) + throws SQLException; + + void validateSequences(List sequenceAllocations, long timestamp, + long[] values, SQLException[] exceptions, Sequence.ValueOp action) throws SQLException; + + void incrementSequences(List sequenceAllocation, long timestamp, + long[] values, SQLException[] exceptions) throws SQLException; + + long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException; + + void returnSequences(List sequenceKeys, long timestamp, SQLException[] exceptions) + throws SQLException; + + MetaDataMutationResult createFunction(List functionData, PFunction function, + boolean temporary) throws SQLException; + + void addConnection(PhoenixConnection connection) throws SQLException; + + void removeConnection(PhoenixConnection connection) throws SQLException; + + /** + * Returns the {@link KeyValueBuilder} that is valid for the locally installed version of HBase. + */ + public KeyValueBuilder getKeyValueBuilder(); + + public enum Feature { + LOCAL_INDEX, + RENEW_LEASE + }; + + public boolean supportsFeature(Feature feature); + + public String getUserName(); + + public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, + final byte[] tableName, long clientTS) throws SQLException; + + public GuidePostsInfo getTableStats(GuidePostsKey key) throws SQLException; + + /** + * Removes cache {@link GuidePostsInfo} for the table with the given name. If no cached guideposts + * are present, this does nothing. + */ + void invalidateStats(GuidePostsKey key); + + public long clearCache() throws SQLException; + + public int getSequenceSaltBuckets(); + + public long getRenewLeaseThresholdMilliSeconds(); + + public boolean isRenewingLeasesEnabled(); + + public MetaDataMutationResult createSchema(List schemaMutations, String schemaName) + throws SQLException; + + MetaDataMutationResult getSchema(String schemaName, long clientTimestamp) throws SQLException; + + public MetaDataMutationResult dropSchema(List schemaMetaData, String schemaName) + throws SQLException; + + boolean isUpgradeRequired(); + + void clearUpgradeRequired(); + + void upgradeSystemTables(String url, Properties props) throws SQLException; + + public Configuration getConfiguration(); + + public User getUser(); + + public QueryLoggerDisruptor getQueryDisruptor(); + + public PhoenixTransactionClient initTransactionClient(TransactionFactory.Provider provider) + throws SQLException; + + /** + * Writes a cell to SYSTEM.MUTEX using checkAndPut to ensure only a single client can execute a + * particular task. The params are used to generate the rowkey. + * @return true if this client was able to successfully acquire the mutex + */ + public boolean writeMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException; + + /** + * Deletes a cell that was written to SYSTEM.MUTEX. The params are used to generate the rowkey. + */ + public void deleteMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException; + + /** + * Close all phoenix connections created using this CQS. + * @param reasonBuilder exception builder for building reasons why connection is closed. + */ + default void closeAllConnections(SQLExceptionInfo.Builder reasonBuilder) { + throw new UnsupportedOperationException(); + } + + PMetaData getMetaDataCache(); + + public default ConnectionLimiter getConnectionLimiter() { + throw new UnsupportedOperationException(); + } + + int getConnectionCount(boolean isInternal); + + void invalidateServerMetadataCache(List requests) + throws Throwable; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java index b69b2c06d38..48627f66661 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.phoenix.query; + import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS; import static org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.MAX_VERSIONS; @@ -47,12 +48,13 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PK_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_HBASE_TABLE_NAME; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_COLUMN_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME; @@ -69,10 +71,9 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HCONNECTIONS_COUNTER; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_SERVICES_COUNTER; -import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY; import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_FAILURES; import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_SUCCESS; import static org.apache.phoenix.monitoring.MetricType.TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS; @@ -138,8 +139,6 @@ import javax.annotation.concurrent.GuardedBy; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; @@ -160,8 +159,8 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -175,8 +174,8 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.ipc.controller.InvalidateMetadataCacheControllerFactory; -import org.apache.hadoop.hbase.ipc.controller.ServerToServerRpcController; import org.apache.hadoop.hbase.ipc.controller.ServerSideRPCControllerFactory; +import org.apache.hadoop.hbase.ipc.controller.ServerToServerRpcController; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; @@ -212,21 +211,21 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest; import org.apache.phoenix.coprocessor.generated.RegionServerEndpointProtos; import org.apache.phoenix.coprocessorclient.InvalidateServerMetadataCacheRequest; -import org.apache.phoenix.coprocessorclient.metrics.MetricsMetadataCachingSource; -import org.apache.phoenix.coprocessorclient.metrics.MetricsPhoenixCoprocessorSourceFactory; import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MetaDataMutationResult; import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MutationCode; import org.apache.phoenix.coprocessorclient.SequenceRegionObserverConstants; +import org.apache.phoenix.coprocessorclient.metrics.MetricsMetadataCachingSource; +import org.apache.phoenix.coprocessorclient.metrics.MetricsPhoenixCoprocessorSourceFactory; import org.apache.phoenix.exception.InvalidRegionSplitPolicyException; import org.apache.phoenix.exception.PhoenixIOException; import org.apache.phoenix.exception.RetriableUpgradeException; import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; +import org.apache.phoenix.exception.UpgradeBlockedException; import org.apache.phoenix.exception.UpgradeInProgressException; import org.apache.phoenix.exception.UpgradeNotRequiredException; import org.apache.phoenix.exception.UpgradeRequiredException; -import org.apache.phoenix.exception.UpgradeBlockedException; import org.apache.phoenix.execute.MutationState; import org.apache.phoenix.hbase.index.util.KeyValueBuilder; import org.apache.phoenix.hbase.index.util.VersionUtil; @@ -284,6 +283,17 @@ import org.apache.phoenix.schema.types.PUnsignedTinyint; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.phoenix.transaction.PhoenixTransactionClient; import org.apache.phoenix.transaction.PhoenixTransactionContext; @@ -311,6390 +321,6501 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; -public class ConnectionQueryServicesImpl extends DelegateQueryServices implements ConnectionQueryServices { - private static final Logger LOGGER = - LoggerFactory.getLogger(ConnectionQueryServicesImpl.class); - private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100; - private static final int DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS = 1000; - private static final String ALTER_TABLE_SET_PROPS = - "ALTER TABLE %s SET %s=%s"; - private final GuidePostsCacheProvider - GUIDE_POSTS_CACHE_PROVIDER = new GuidePostsCacheProvider(); - protected final Configuration config; - - public ConnectionInfo getConnectionInfo() { - return connectionInfo; - } - - protected final ConnectionInfo connectionInfo; - // Copy of config.getProps(), but read-only to prevent synchronization that we - // don't need. - private final ReadOnlyProps props; - private final String userName; - private final User user; - private final ConcurrentHashMap childServices; - private final GuidePostsCacheWrapper tableStatsCache; - - // Cache the latest meta data here for future connections - // writes guarded by "latestMetaDataLock" - private volatile PMetaData latestMetaData; - private final Object latestMetaDataLock = new Object(); - - // Lowest HBase version on the cluster. - private int lowestClusterHBaseVersion = Integer.MAX_VALUE; - private boolean hasIndexWALCodec = true; - - @GuardedBy("connectionCountLock") - private int connectionCount = 0; - - @GuardedBy("connectionCountLock") - private int internalConnectionCount = 0; - - private final Object connectionCountLock = new Object(); - private final boolean returnSequenceValues ; - - private Connection connection; - private volatile boolean initialized; - private volatile int nSequenceSaltBuckets; - - // writes guarded by "this" - private volatile boolean closed; - - private volatile SQLException initializationException; - // setting this member variable guarded by "connectionCountLock" - private volatile ConcurrentMap sequenceMap = Maps.newConcurrentMap(); - private KeyValueBuilder kvBuilder; - - private final int renewLeaseTaskFrequency; - private final int renewLeasePoolSize; - private final int renewLeaseThreshold; - // List of queues instead of a single queue to provide reduced contention via lock striping - private final List>> connectionQueues; - private ScheduledExecutorService renewLeaseExecutor; - // Use TransactionFactory.Provider.values() here not TransactionFactory.Provider.available() - // because the array will be indexed by ordinal. - private PhoenixTransactionClient[] txClients = new PhoenixTransactionClient[Provider.values().length]; +public class ConnectionQueryServicesImpl extends DelegateQueryServices + implements ConnectionQueryServices { + private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionQueryServicesImpl.class); + private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100; + private static final int DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS = 1000; + private static final String ALTER_TABLE_SET_PROPS = "ALTER TABLE %s SET %s=%s"; + private final GuidePostsCacheProvider GUIDE_POSTS_CACHE_PROVIDER = new GuidePostsCacheProvider(); + protected final Configuration config; + + public ConnectionInfo getConnectionInfo() { + return connectionInfo; + } + + protected final ConnectionInfo connectionInfo; + // Copy of config.getProps(), but read-only to prevent synchronization that we + // don't need. + private final ReadOnlyProps props; + private final String userName; + private final User user; + private final ConcurrentHashMap childServices; + private final GuidePostsCacheWrapper tableStatsCache; + + // Cache the latest meta data here for future connections + // writes guarded by "latestMetaDataLock" + private volatile PMetaData latestMetaData; + private final Object latestMetaDataLock = new Object(); + + // Lowest HBase version on the cluster. + private int lowestClusterHBaseVersion = Integer.MAX_VALUE; + private boolean hasIndexWALCodec = true; + + @GuardedBy("connectionCountLock") + private int connectionCount = 0; + + @GuardedBy("connectionCountLock") + private int internalConnectionCount = 0; + + private final Object connectionCountLock = new Object(); + private final boolean returnSequenceValues; + + private Connection connection; + private volatile boolean initialized; + private volatile int nSequenceSaltBuckets; + + // writes guarded by "this" + private volatile boolean closed; + + private volatile SQLException initializationException; + // setting this member variable guarded by "connectionCountLock" + private volatile ConcurrentMap sequenceMap = Maps.newConcurrentMap(); + private KeyValueBuilder kvBuilder; + + private final int renewLeaseTaskFrequency; + private final int renewLeasePoolSize; + private final int renewLeaseThreshold; + // List of queues instead of a single queue to provide reduced contention via lock striping + private final List>> connectionQueues; + private ScheduledExecutorService renewLeaseExecutor; + // Use TransactionFactory.Provider.values() here not TransactionFactory.Provider.available() + // because the array will be indexed by ordinal. + private PhoenixTransactionClient[] txClients = + new PhoenixTransactionClient[Provider.values().length]; + /* + * We can have multiple instances of ConnectionQueryServices. By making the thread factory static, + * renew lease thread names will be unique across them. + */ + private static final ThreadFactory renewLeaseThreadFactory = new RenewLeaseThreadFactory(); + private final boolean renewLeaseEnabled; + private final boolean isAutoUpgradeEnabled; + private final AtomicBoolean upgradeRequired = new AtomicBoolean(false); + private final int maxConnectionsAllowed; + private final int maxInternalConnectionsAllowed; + private final boolean shouldThrottleNumConnections; + public static final byte[] MUTEX_LOCKED = "MUTEX_LOCKED".getBytes(StandardCharsets.UTF_8); + private ServerSideRPCControllerFactory serverSideRPCControllerFactory; + private boolean localIndexUpgradeRequired; + + private final boolean enableConnectionActivityLogging; + private final int loggingIntervalInMins; + + private final ConnectionLimiter connectionLimiter; + + // writes guarded by "liveRegionServersLock" + private volatile List liveRegionServers; + private final Object liveRegionServersLock = new Object(); + // Writes guarded by invalidateMetadataCacheConnLock + private Connection invalidateMetadataCacheConnection = null; + private final Object invalidateMetadataCacheConnLock = new Object(); + private MetricsMetadataCachingSource metricsMetadataCachingSource; + public static final String INVALIDATE_SERVER_METADATA_CACHE_EX_MESSAGE = + "Cannot invalidate server metadata cache on a non-server connection"; + + private static interface FeatureSupported { + boolean isSupported(ConnectionQueryServices services); + } + + private final Map featureMap = + ImmutableMap. of(Feature.LOCAL_INDEX, new FeatureSupported() { + @Override + public boolean isSupported(ConnectionQueryServices services) { + int hbaseVersion = services.getLowestClusterHBaseVersion(); + return hbaseVersion < MetaDataProtocol.MIN_LOCAL_SI_VERSION_DISALLOW + || hbaseVersion > MetaDataProtocol.MAX_LOCAL_SI_VERSION_DISALLOW; + } + }, Feature.RENEW_LEASE, new FeatureSupported() { + @Override + public boolean isSupported(ConnectionQueryServices services) { + int hbaseVersion = services.getLowestClusterHBaseVersion(); + return hbaseVersion >= MetaDataProtocol.MIN_RENEW_LEASE_VERSION; + } + }); + private QueryLoggerDisruptor queryDisruptor; + + private PMetaData newEmptyMetaData() { + return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, + (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY + .getValue(getProps().get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)), + getProps()); + } + + /** + * Construct a ConnectionQueryServicesImpl that represents a connection to an HBase cluster. + * @param services base services from where we derive our default configuration + * @param connectionInfo to provide connection information + * @param info hbase configuration properties + */ + public ConnectionQueryServicesImpl(QueryServices services, ConnectionInfo connectionInfo, + Properties info) { + super(services); + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + for (Entry entry : services.getProps()) { + config.set(entry.getKey(), entry.getValue()); + } + if (info != null) { + for (Object key : info.keySet()) { + config.set((String) key, info.getProperty((String) key)); + } + } + for (Entry entry : connectionInfo.asProps()) { + config.set(entry.getKey(), entry.getValue()); + } + if (connectionInfo.getPrincipal() != null) { + config.set(QUERY_SERVICES_NAME, connectionInfo.getPrincipal()); + } + LOGGER.info(String.format("CQS initialized with connection query service : %s", + config.get(QUERY_SERVICES_NAME))); + this.connectionInfo = connectionInfo; + + // Without making a copy of the configuration we cons up, we lose some of our properties + // on the server side during testing. + this.config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(config); + // Set the rpcControllerFactory if it is a server side connnection. + boolean isServerSideConnection = config.getBoolean(QueryUtil.IS_SERVER_CONNECTION, false); + if (isServerSideConnection) { + this.serverSideRPCControllerFactory = new ServerSideRPCControllerFactory(config); + } + // set replication required parameter + ConfigUtil.setReplicationConfigIfAbsent(this.config); + this.props = new ReadOnlyProps(this.config.iterator()); + this.userName = connectionInfo.getPrincipal(); + this.user = connectionInfo.getUser(); + this.latestMetaData = newEmptyMetaData(); + // TODO: should we track connection wide memory usage or just org-wide usage? + // If connection-wide, create a MemoryManager here, otherwise just use the one from the delegate + this.childServices = new ConcurrentHashMap( + INITIAL_CHILD_SERVICES_CAPACITY); + // find the HBase version and use that to determine the KeyValueBuilder that should be used + String hbaseVersion = VersionInfo.getVersion(); + this.kvBuilder = KeyValueBuilder.get(hbaseVersion); + this.returnSequenceValues = props.getBoolean(QueryServices.RETURN_SEQUENCE_VALUES_ATTRIB, + QueryServicesOptions.DEFAULT_RETURN_SEQUENCE_VALUES); + this.renewLeaseEnabled = config.getBoolean(RENEW_LEASE_ENABLED, DEFAULT_RENEW_LEASE_ENABLED); + this.renewLeasePoolSize = + config.getInt(RENEW_LEASE_THREAD_POOL_SIZE, DEFAULT_RENEW_LEASE_THREAD_POOL_SIZE); + this.renewLeaseThreshold = + config.getInt(RENEW_LEASE_THRESHOLD_MILLISECONDS, DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS); + this.renewLeaseTaskFrequency = config.getInt(RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS, + DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS); + List>> list = + Lists.newArrayListWithCapacity(renewLeasePoolSize); + for (int i = 0; i < renewLeasePoolSize; i++) { + LinkedBlockingQueue> queue = + new LinkedBlockingQueue>(); + list.add(queue); + } + connectionQueues = ImmutableList.copyOf(list); + + // A little bit of a smell to leak `this` here, but should not be a problem + this.tableStatsCache = + GUIDE_POSTS_CACHE_PROVIDER.getGuidePostsCache(props.get(GUIDE_POSTS_CACHE_FACTORY_CLASS, + QueryServicesOptions.DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS), this, config); + + this.isAutoUpgradeEnabled = + config.getBoolean(AUTO_UPGRADE_ENABLED, QueryServicesOptions.DEFAULT_AUTO_UPGRADE_ENABLED); + this.maxConnectionsAllowed = + config.getInt(QueryServices.CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS, + QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS); + this.maxInternalConnectionsAllowed = + config.getInt(QueryServices.INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS, + QueryServicesOptions.DEFAULT_INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS); + this.shouldThrottleNumConnections = + (maxConnectionsAllowed > 0) || (maxInternalConnectionsAllowed > 0); + this.enableConnectionActivityLogging = config.getBoolean(CONNECTION_ACTIVITY_LOGGING_ENABLED, + QueryServicesOptions.DEFAULT_CONNECTION_ACTIVITY_LOGGING_ENABLED); + this.loggingIntervalInMins = config.getInt(CONNECTION_ACTIVITY_LOGGING_INTERVAL, + QueryServicesOptions.DEFAULT_CONNECTION_ACTIVITY_LOGGING_INTERVAL_IN_MINS); + + if (enableConnectionActivityLogging) { + LoggingConnectionLimiter.Builder builder = + new LoggingConnectionLimiter.Builder(shouldThrottleNumConnections); + connectionLimiter = builder.withLoggingIntervalInMins(loggingIntervalInMins).withLogging(true) + .withMaxAllowed(this.maxConnectionsAllowed) + .withMaxInternalAllowed(this.maxInternalConnectionsAllowed).build(); + } else { + DefaultConnectionLimiter.Builder builder = + new DefaultConnectionLimiter.Builder(shouldThrottleNumConnections); + connectionLimiter = builder.withMaxAllowed(this.maxConnectionsAllowed) + .withMaxInternalAllowed(this.maxInternalConnectionsAllowed).build(); + } + + if (!QueryUtil.isServerConnection(props)) { + // Start queryDistruptor everytime as log level can be change at connection level as well, but + // we can avoid starting for server connections. + try { + this.queryDisruptor = new QueryLoggerDisruptor(this.config); + } catch (SQLException e) { + LOGGER.warn("Unable to initiate query logging service !!"); + e.printStackTrace(); + } + } + nSequenceSaltBuckets = config.getInt(QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB, + QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); + this.metricsMetadataCachingSource = + MetricsPhoenixCoprocessorSourceFactory.getInstance().getMetadataCachingSource(); + } + + private Connection openConnection(Configuration conf) throws SQLException { + Connection localConnection; + try { + localConnection = HBaseFactoryProvider.getHConnectionFactory().createConnection(conf); + GLOBAL_HCONNECTIONS_COUNTER.increment(); + LOGGER.info("HConnection established. Stacktrace for informational purposes: " + + localConnection + " " + LogUtil.getCallerStackTrace()); + } catch (IOException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) + .setRootCause(e).build().buildException(); + } + if (localConnection.isClosed()) { // TODO: why the heck doesn't this throw above? + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION).build() + .buildException(); + } + return localConnection; + } + + /** + * We create a long-lived hbase connection to run invalidate cache RPCs. We override + * CUSTOM_CONTROLLER_CONF_KEY to instantiate InvalidateMetadataCacheController which has a special + * priority for invalidate metadata cache operations. + * @return hbase connection + * @throws SQLException SQLException + */ + public Connection getInvalidateMetadataCacheConnection() throws SQLException { + if (invalidateMetadataCacheConnection != null) { + return invalidateMetadataCacheConnection; + } + + synchronized (invalidateMetadataCacheConnLock) { + Configuration clonedConfiguration = PropertiesUtil.cloneConfig(this.config); + clonedConfiguration.setClass(CUSTOM_CONTROLLER_CONF_KEY, + InvalidateMetadataCacheControllerFactory.class, RpcControllerFactory.class); + invalidateMetadataCacheConnection = openConnection(clonedConfiguration); + } + return invalidateMetadataCacheConnection; + } + + /** + * Close the HBase connection and decrement the counter. + * @throws IOException throws IOException + */ + private void closeConnection(Connection connection) throws IOException { + if (connection != null) { + connection.close(); + LOGGER.info("{} HConnection closed. Stacktrace for informational" + " purposes: {}", + connection, LogUtil.getCallerStackTrace()); + GLOBAL_HCONNECTIONS_COUNTER.decrement(); + } + } + + @Override + public Table getTable(byte[] tableName) throws SQLException { + try { + return HBaseFactoryProvider.getHTableFactory().getTable(tableName, connection, null); + } catch (IOException e) { + throw new SQLException(e); + } + } + + @Override + public Table getTableIfExists(byte[] tableName) throws SQLException { + try (Admin admin = getAdmin()) { + if (!AdminUtilWithFallback.tableExists(admin, TableName.valueOf(tableName))) { + throw new TableNotFoundException(SchemaUtil.getSchemaNameFromFullName(tableName), + SchemaUtil.getTableNameFromFullName(tableName)); + } + } catch (IOException | InterruptedException e) { + throw new SQLException(e); + } + return getTable(tableName); + } + + @Override + public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException { + Table htable = getTable(tableName); + try { + return htable.getDescriptor(); + } catch (IOException e) { + if ( + e instanceof org.apache.hadoop.hbase.TableNotFoundException + || e.getCause() instanceof org.apache.hadoop.hbase.TableNotFoundException + ) { + byte[][] schemaAndTableName = new byte[2][]; + SchemaUtil.getVarChars(tableName, schemaAndTableName); + throw new TableNotFoundException(Bytes.toString(schemaAndTableName[0]), + Bytes.toString(schemaAndTableName[1])); + } + throw new RuntimeException(e); + } finally { + Closeables.closeQuietly(htable); + } + } + + @Override + public ReadOnlyProps getProps() { + return props; + } + + /** + * Closes all the connections it has in its connectionQueues. + */ + @Override + public void closeAllConnections(SQLExceptionInfo.Builder reasonBuilder) { + for (LinkedBlockingQueue> queue : connectionQueues) { + for (WeakReference connectionReference : queue) { + PhoenixConnection connection = connectionReference.get(); + try { + if (connection != null && !connection.isClosed()) { + connection.close(reasonBuilder.build().buildException()); + } + } catch (SQLException e) { + LOGGER.warn("Exception while closing phoenix connection {}", connection, e); + } + } + } + } + + /** + * Closes the underlying connection to zookeeper. The QueryServices may not be used after that + * point. When a Connection is closed, this is not called, since these instances are pooled by the + * Driver. Instead, the Driver should call this if the QueryServices is ever removed from the pool + */ + @Override + public void close() throws SQLException { + if (closed) { + return; + } + synchronized (this) { + if (closed) { + return; + } + closed = true; + GLOBAL_QUERY_SERVICES_COUNTER.decrement(); + try { + if (this.queryDisruptor != null) { + this.queryDisruptor.close(); + } + } catch (Exception e) { + // Ignore + } + SQLException sqlE = null; + try { + // Attempt to return any unused sequences. + if (connection != null) returnAllSequences(this.sequenceMap); + } catch (SQLException e) { + sqlE = e; + } finally { + try { + childServices.clear(); + synchronized (latestMetaDataLock) { + latestMetaData = null; + latestMetaDataLock.notifyAll(); + } + try { + // close HBase connections. + closeConnection(this.connection); + closeConnection(this.invalidateMetadataCacheConnection); + } finally { + if (renewLeaseExecutor != null) { + renewLeaseExecutor.shutdownNow(); + } + // shut down the tx client service if we created one to support transactions + for (PhoenixTransactionClient client : txClients) { + if (client != null) { + client.close(); + } + } + } + } catch (IOException e) { + if (sqlE == null) { + sqlE = ClientUtil.parseServerException(e); + } else { + sqlE.setNextException(ClientUtil.parseServerException(e)); + } + } finally { + try { + tableStatsCache.invalidateAll(); + super.close(); + } catch (SQLException e) { + if (sqlE == null) { + sqlE = e; + } else { + sqlE.setNextException(e); + } + } finally { + if (sqlE != null) { + throw sqlE; + } + } + } + } + } + } + + protected ConnectionQueryServices newChildQueryService() { + return new ChildQueryServices(this); + } + + /** + * Get (and create if necessary) a child QueryService for a given tenantId. The QueryService will + * be cached for the lifetime of the parent QueryService + * @param tenantId the tenant ID + * @return the child QueryService + */ + @Override + public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable tenantId) { + ConnectionQueryServices childQueryService = childServices.get(tenantId); + if (childQueryService == null) { + childQueryService = newChildQueryService(); + ConnectionQueryServices prevQueryService = + childServices.putIfAbsent(tenantId, childQueryService); + return prevQueryService == null ? childQueryService : prevQueryService; + } + return childQueryService; + } + + @Override + public void clearTableRegionCache(TableName tableName) throws SQLException { + ((ClusterConnection) connection).clearRegionCache(tableName); + } + + public byte[] getNextRegionStartKey(HRegionLocation regionLocation, byte[] currentKey, + HRegionLocation prevRegionLocation) { + // in order to check the overlap/inconsistencies bad region info, we have to make sure + // the current endKey always increasing(compare the previous endKey) + + // conditionOne = true if the currentKey does not belong to the region boundaries specified + // by regionLocation i.e. if the currentKey is less than the region startKey or if the + // currentKey is greater than or equal to the region endKey. + + // conditionTwo = true if the previous region endKey is either not same as current region + // startKey or if the previous region endKey is greater than or equal to current region + // endKey. + boolean conditionOne = + (Bytes.compareTo(regionLocation.getRegion().getStartKey(), currentKey) > 0 + || Bytes.compareTo(regionLocation.getRegion().getEndKey(), currentKey) <= 0) + && !Bytes.equals(currentKey, HConstants.EMPTY_START_ROW) + && !Bytes.equals(regionLocation.getRegion().getEndKey(), HConstants.EMPTY_END_ROW); + boolean conditionTwo = prevRegionLocation != null + && (Bytes.compareTo(regionLocation.getRegion().getStartKey(), + prevRegionLocation.getRegion().getEndKey()) != 0 + || Bytes.compareTo(regionLocation.getRegion().getEndKey(), + prevRegionLocation.getRegion().getEndKey()) <= 0) + && !Bytes.equals(prevRegionLocation.getRegion().getEndKey(), HConstants.EMPTY_START_ROW) + && !Bytes.equals(regionLocation.getRegion().getEndKey(), HConstants.EMPTY_END_ROW); + if (conditionOne || conditionTwo) { + GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.increment(); + LOGGER.warn( + "HBase region overlap/inconsistencies on {} , current key: {} , region startKey:" + + " {} , region endKey: {} , prev region startKey: {} , prev region endKey: {}", + regionLocation, Bytes.toStringBinary(currentKey), + Bytes.toStringBinary(regionLocation.getRegion().getStartKey()), + Bytes.toStringBinary(regionLocation.getRegion().getEndKey()), + prevRegionLocation == null + ? "null" + : Bytes.toStringBinary(prevRegionLocation.getRegion().getStartKey()), + prevRegionLocation == null + ? "null" + : Bytes.toStringBinary(prevRegionLocation.getRegion().getEndKey())); + } + return regionLocation.getRegion().getEndKey(); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getAllTableRegions(byte[] tableName) throws SQLException { + int queryTimeout = this.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, + QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); + return getTableRegions(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + queryTimeout); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getAllTableRegions(byte[] tableName, int queryTimeout) + throws SQLException { + return getTableRegions(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + queryTimeout); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getTableRegions(byte[] tableName, byte[] startRowKey, + byte[] endRowKey) throws SQLException { + int queryTimeout = this.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, + QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); + return getTableRegions(tableName, startRowKey, endRowKey, queryTimeout); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getTableRegions(final byte[] tableName, final byte[] startRowKey, + final byte[] endRowKey, final int queryTimeout) throws SQLException { /* - * We can have multiple instances of ConnectionQueryServices. By making the thread factory - * static, renew lease thread names will be unique across them. - */ - private static final ThreadFactory renewLeaseThreadFactory = new RenewLeaseThreadFactory(); - private final boolean renewLeaseEnabled; - private final boolean isAutoUpgradeEnabled; - private final AtomicBoolean upgradeRequired = new AtomicBoolean(false); - private final int maxConnectionsAllowed; - private final int maxInternalConnectionsAllowed; - private final boolean shouldThrottleNumConnections; - public static final byte[] MUTEX_LOCKED = "MUTEX_LOCKED".getBytes(StandardCharsets.UTF_8); - private ServerSideRPCControllerFactory serverSideRPCControllerFactory; - private boolean localIndexUpgradeRequired; - - private final boolean enableConnectionActivityLogging; - private final int loggingIntervalInMins; - - private final ConnectionLimiter connectionLimiter; - - // writes guarded by "liveRegionServersLock" - private volatile List liveRegionServers; - private final Object liveRegionServersLock = new Object(); - // Writes guarded by invalidateMetadataCacheConnLock - private Connection invalidateMetadataCacheConnection = null; - private final Object invalidateMetadataCacheConnLock = new Object(); - private MetricsMetadataCachingSource metricsMetadataCachingSource; - public static final String INVALIDATE_SERVER_METADATA_CACHE_EX_MESSAGE = - "Cannot invalidate server metadata cache on a non-server connection"; - - private static interface FeatureSupported { - boolean isSupported(ConnectionQueryServices services); - } - - private final Map featureMap = ImmutableMap.of( - Feature.LOCAL_INDEX, new FeatureSupported() { - @Override - public boolean isSupported(ConnectionQueryServices services) { - int hbaseVersion = services.getLowestClusterHBaseVersion(); - return hbaseVersion < MetaDataProtocol.MIN_LOCAL_SI_VERSION_DISALLOW || hbaseVersion > MetaDataProtocol.MAX_LOCAL_SI_VERSION_DISALLOW; - } - }, - Feature.RENEW_LEASE, new FeatureSupported() { - @Override - public boolean isSupported(ConnectionQueryServices services) { - int hbaseVersion = services.getLowestClusterHBaseVersion(); - return hbaseVersion >= MetaDataProtocol.MIN_RENEW_LEASE_VERSION; - } - }); - private QueryLoggerDisruptor queryDisruptor; - - private PMetaData newEmptyMetaData() { - return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, - (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue( - getProps().get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)), - getProps()); - } - - /** - * Construct a ConnectionQueryServicesImpl that represents a connection to an HBase - * cluster. - * @param services base services from where we derive our default configuration - * @param connectionInfo to provide connection information - * @param info hbase configuration properties + * Use HConnection.getRegionLocation as it uses the cache in HConnection, while getting all + * region locations from the HTable doesn't. */ - public ConnectionQueryServicesImpl(QueryServices services, ConnectionInfo connectionInfo, Properties info) { - super(services); - Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - for (Entry entry : services.getProps()) { - config.set(entry.getKey(), entry.getValue()); - } - if (info != null) { - for (Object key : info.keySet()) { - config.set((String) key, info.getProperty((String) key)); - } - } - for (Entry entry : connectionInfo.asProps()) { - config.set(entry.getKey(), entry.getValue()); - } - if (connectionInfo.getPrincipal() != null) { - config.set(QUERY_SERVICES_NAME, connectionInfo.getPrincipal()); - } - LOGGER.info(String.format("CQS initialized with connection query service : %s", - config.get(QUERY_SERVICES_NAME))); - this.connectionInfo = connectionInfo; - - // Without making a copy of the configuration we cons up, we lose some of our properties - // on the server side during testing. - this.config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(config); - //Set the rpcControllerFactory if it is a server side connnection. - boolean isServerSideConnection = config.getBoolean(QueryUtil.IS_SERVER_CONNECTION, false); - if (isServerSideConnection) { - this.serverSideRPCControllerFactory = new ServerSideRPCControllerFactory(config); - } - // set replication required parameter - ConfigUtil.setReplicationConfigIfAbsent(this.config); - this.props = new ReadOnlyProps(this.config.iterator()); - this.userName = connectionInfo.getPrincipal(); - this.user = connectionInfo.getUser(); - this.latestMetaData = newEmptyMetaData(); - // TODO: should we track connection wide memory usage or just org-wide usage? - // If connection-wide, create a MemoryManager here, otherwise just use the one from the delegate - this.childServices = new ConcurrentHashMap(INITIAL_CHILD_SERVICES_CAPACITY); - // find the HBase version and use that to determine the KeyValueBuilder that should be used - String hbaseVersion = VersionInfo.getVersion(); - this.kvBuilder = KeyValueBuilder.get(hbaseVersion); - this.returnSequenceValues = props.getBoolean(QueryServices.RETURN_SEQUENCE_VALUES_ATTRIB, QueryServicesOptions.DEFAULT_RETURN_SEQUENCE_VALUES); - this.renewLeaseEnabled = config.getBoolean(RENEW_LEASE_ENABLED, DEFAULT_RENEW_LEASE_ENABLED); - this.renewLeasePoolSize = config.getInt(RENEW_LEASE_THREAD_POOL_SIZE, DEFAULT_RENEW_LEASE_THREAD_POOL_SIZE); - this.renewLeaseThreshold = config.getInt(RENEW_LEASE_THRESHOLD_MILLISECONDS, DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS); - this.renewLeaseTaskFrequency = config.getInt(RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS, DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS); - List>> list = Lists.newArrayListWithCapacity(renewLeasePoolSize); - for (int i = 0; i < renewLeasePoolSize; i++) { - LinkedBlockingQueue> queue = new LinkedBlockingQueue>(); - list.add(queue); - } - connectionQueues = ImmutableList.copyOf(list); - - // A little bit of a smell to leak `this` here, but should not be a problem - this.tableStatsCache = GUIDE_POSTS_CACHE_PROVIDER.getGuidePostsCache(props.get(GUIDE_POSTS_CACHE_FACTORY_CLASS, - QueryServicesOptions.DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS), this, config); - - this.isAutoUpgradeEnabled = config.getBoolean(AUTO_UPGRADE_ENABLED, QueryServicesOptions.DEFAULT_AUTO_UPGRADE_ENABLED); - this.maxConnectionsAllowed = config.getInt(QueryServices.CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS, - QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS); - this.maxInternalConnectionsAllowed = config.getInt(QueryServices.INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS, - QueryServicesOptions.DEFAULT_INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS); - this.shouldThrottleNumConnections = (maxConnectionsAllowed > 0) || (maxInternalConnectionsAllowed > 0); - this.enableConnectionActivityLogging = - config.getBoolean(CONNECTION_ACTIVITY_LOGGING_ENABLED, - QueryServicesOptions.DEFAULT_CONNECTION_ACTIVITY_LOGGING_ENABLED); - this.loggingIntervalInMins = - config.getInt(CONNECTION_ACTIVITY_LOGGING_INTERVAL, - QueryServicesOptions.DEFAULT_CONNECTION_ACTIVITY_LOGGING_INTERVAL_IN_MINS); - - if (enableConnectionActivityLogging) { - LoggingConnectionLimiter.Builder builder = new LoggingConnectionLimiter.Builder(shouldThrottleNumConnections); - connectionLimiter = builder - .withLoggingIntervalInMins(loggingIntervalInMins) - .withLogging(true) - .withMaxAllowed(this.maxConnectionsAllowed) - .withMaxInternalAllowed(this.maxInternalConnectionsAllowed) - .build(); + int retryCount = 0; + int maxRetryCount = + config.getInt(PHOENIX_GET_REGIONS_RETRIES, DEFAULT_PHOENIX_GET_REGIONS_RETRIES); + TableName table = TableName.valueOf(tableName); + byte[] currentKey = null; + final long startTime = EnvironmentEdgeManager.currentTimeMillis(); + final long maxQueryEndTime = startTime + queryTimeout; + while (true) { + try { + // We could surface the package projected + // HConnectionImplementation.getNumberOfCachedRegionLocations + // to get the sizing info we need, but this would require a new class in the same package + // and a cast + // to this implementation class, so it's probably not worth it. + List locations = Lists.newArrayList(); + HRegionLocation prevRegionLocation = null; + currentKey = startRowKey; + do { + HRegionLocation regionLocation = + ((ClusterConnection) connection).getRegionLocation(table, currentKey, false); + currentKey = getNextRegionStartKey(regionLocation, currentKey, prevRegionLocation); + locations.add(regionLocation); + prevRegionLocation = regionLocation; + if ( + !Bytes.equals(endRowKey, HConstants.EMPTY_END_ROW) + && Bytes.compareTo(currentKey, endRowKey) >= 0 + ) { + break; + } + throwErrorIfQueryTimedOut(startRowKey, endRowKey, maxQueryEndTime, queryTimeout, table, + retryCount, currentKey); + } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW)); + return locations; + } catch (org.apache.hadoop.hbase.TableNotFoundException e) { + TableNotFoundException ex = new TableNotFoundException(table.getNameAsString()); + e.initCause(ex); + throw ex; + } catch (IOException e) { + LOGGER.error( + "Exception encountered in getAllTableRegions for " + + "table: {}, retryCount: {} , currentKey: {} , startRowKey: {} ," + " endRowKey: {}", + table.getNameAsString(), retryCount, Bytes.toStringBinary(currentKey), + Bytes.toStringBinary(startRowKey), Bytes.toStringBinary(endRowKey), e); + if (retryCount++ < maxRetryCount) { + continue; + } + throw new SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_REGIONS_FAIL).setRootCause(e) + .build().buildException(); + } + } + } + + /** + * Throw Error if the metadata lookup takes longer than query timeout configured. + * @param startRowKey Start RowKey to begin the region metadata lookup from. + * @param endRowKey End RowKey to end the region metadata lookup at. + * @param maxQueryEndTime Max time to execute the metadata lookup. + * @param queryTimeout Query timeout. + * @param table Table Name. + * @param retryCount Retry Count. + * @param currentKey Current Key. + * @throws SQLException Throw Error if the metadata lookup takes longer than query timeout. + */ + private static void throwErrorIfQueryTimedOut(byte[] startRowKey, byte[] endRowKey, + long maxQueryEndTime, int queryTimeout, TableName table, int retryCount, byte[] currentKey) + throws SQLException { + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + if (currentTime >= maxQueryEndTime) { + LOGGER.error( + "getTableRegions has exceeded query timeout {} ms." + + "Table: {}, retryCount: {} , currentKey: {} , " + "startRowKey: {} , endRowKey: {}", + queryTimeout, table.getNameAsString(), retryCount, Bytes.toStringBinary(currentKey), + Bytes.toStringBinary(startRowKey), Bytes.toStringBinary(endRowKey)); + final String message = "getTableRegions has exceeded query timeout " + queryTimeout + "ms"; + IOException e = new IOException(message); + throw new SQLTimeoutException(message, SQLExceptionCode.OPERATION_TIMED_OUT.getSQLState(), + SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode(), e); + } + } + + public PMetaData getMetaDataCache() { + return latestMetaData; + } + + @Override + public int getConnectionCount(boolean isInternal) { + if (isInternal) { + return connectionLimiter.getInternalConnectionCount(); + } else { + return connectionLimiter.getConnectionCount(); + } + } + + @Override + public void addTable(PTable table, long resolvedTime) throws SQLException { + synchronized (latestMetaDataLock) { + try { + throwConnectionClosedIfNullMetaData(); + // If existing table isn't older than new table, don't replace + // If a client opens a connection at an earlier timestamp, this can happen + PTableRef existingTableRef = latestMetaData + .getTableRef(new PTableKey(table.getTenantId(), table.getName().getString())); + PTable existingTable = existingTableRef.getTable(); + if (existingTable.getTimeStamp() > table.getTimeStamp()) { + return; + } + } catch (TableNotFoundException e) { + } + latestMetaData.addTable(table, resolvedTime); + latestMetaDataLock.notifyAll(); + } + } + + @Override + public void updateResolvedTimestamp(PTable table, long resolvedTime) throws SQLException { + synchronized (latestMetaDataLock) { + throwConnectionClosedIfNullMetaData(); + latestMetaData.updateResolvedTimestamp(table, resolvedTime); + latestMetaDataLock.notifyAll(); + } + } + + private static interface Mutator { + void mutate(PMetaData metaData) throws SQLException; + } + + /** + * Ensures that metaData mutations are handled in the correct order + */ + private PMetaData metaDataMutated(PName tenantId, String tableName, long tableSeqNum, + Mutator mutator) throws SQLException { + synchronized (latestMetaDataLock) { + throwConnectionClosedIfNullMetaData(); + PMetaData metaData = latestMetaData; + PTable table; + long endTime = + EnvironmentEdgeManager.currentTimeMillis() + DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS; + while (true) { + try { + try { + table = metaData.getTableRef(new PTableKey(tenantId, tableName)).getTable(); + /* + * If the table is at the prior sequence number, then we're good to go. We know if we've + * got this far, that the server validated the mutations, so we'd just need to wait + * until the other connection that mutated the same table is processed. + */ + if (table.getSequenceNumber() + 1 == tableSeqNum) { + // TODO: assert that timeStamp is bigger that table timeStamp? + mutator.mutate(metaData); + break; + } else if (table.getSequenceNumber() >= tableSeqNum) { + LOGGER.warn("Attempt to cache older version of " + tableName + ": current= " + + table.getSequenceNumber() + ", new=" + tableSeqNum); + break; + } + } catch (TableNotFoundException e) { + } + long waitTime = endTime - EnvironmentEdgeManager.currentTimeMillis(); + // We waited long enough - just remove the table from the cache + // and the next time it's used it'll be pulled over from the server. + if (waitTime <= 0) { + LOGGER.warn("Unable to update meta data repo within " + + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS / 1000) + " seconds for " + tableName); + // There will never be a parentTableName here, as that would only + // be non null for an index an we never add/remove columns from an index. + metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP); + break; + } + latestMetaDataLock.wait(waitTime); + } catch (InterruptedException e) { + // restore the interrupt status + Thread.currentThread().interrupt(); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e) + .build().buildException(); // FIXME + } + } + latestMetaData = metaData; + latestMetaDataLock.notifyAll(); + return metaData; + } + } + + @Override + public void removeTable(PName tenantId, final String tableName, String parentTableName, + long tableTimeStamp) throws SQLException { + synchronized (latestMetaDataLock) { + throwConnectionClosedIfNullMetaData(); + latestMetaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp); + latestMetaDataLock.notifyAll(); + } + } + + @Override + public void removeColumn(final PName tenantId, final String tableName, + final List columnsToRemove, final long tableTimeStamp, final long tableSeqNum, + final long resolvedTime) throws SQLException { + metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() { + @Override + public void mutate(PMetaData metaData) throws SQLException { + try { + metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, + resolvedTime); + } catch (TableNotFoundException e) { + // The DROP TABLE may have been processed first, so just ignore. + } + } + }); + } + + /** + * Check that the supplied connection properties are set to valid values. + * @param info The properties to be validated. + * @throws IllegalArgumentException when a property is not set to a valid value. + */ + private void validateConnectionProperties(Properties info) { + if (info.get(DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB) != null) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info("Connection's " + DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB + " set to " + + info.get(DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); + } + ConnectionProperty.UPDATE_CACHE_FREQUENCY + .getValue(info.getProperty(DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); + } + } + + @Override + public PhoenixConnection connect(String url, Properties info) throws SQLException { + checkClosed(); + throwConnectionClosedIfNullMetaData(); + validateConnectionProperties(info); + + return new PhoenixConnection(this, url, info); + } + + private ColumnFamilyDescriptor generateColumnFamilyDescriptor( + Pair> family, PTableType tableType) throws SQLException { + ColumnFamilyDescriptorBuilder columnDescBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(family.getFirst()); + if (tableType != PTableType.VIEW) { + columnDescBuilder.setDataBlockEncoding(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING); + for (Entry entry : family.getSecond().entrySet()) { + String key = entry.getKey(); + Object value = entry.getValue(); + setHColumnDescriptorValue(columnDescBuilder, key, value); + } + } + return columnDescBuilder.build(); + } + + // Workaround HBASE-14737 + private static void setHColumnDescriptorValue(ColumnFamilyDescriptorBuilder columnDescBuilder, + String key, Object value) { + if (HConstants.VERSIONS.equals(key)) { + columnDescBuilder.setMaxVersions(getMaxVersion(value)); + } else { + columnDescBuilder.setValue(key, value == null ? null : value.toString()); + } + } + + private static int getMaxVersion(Object value) { + if (value == null) { + return -1; // HColumnDescriptor.UNINITIALIZED is private + } + if (value instanceof Number) { + return ((Number) value).intValue(); + } + String stringValue = value.toString(); + if (stringValue.isEmpty()) { + return -1; + } + return Integer.parseInt(stringValue); + } + + private void modifyColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder hcd, + Map props) throws SQLException { + for (Entry entry : props.entrySet()) { + String propName = entry.getKey(); + Object value = entry.getValue(); + setHColumnDescriptorValue(hcd, propName, value); + } + } + + private TableDescriptorBuilder generateTableDescriptor(byte[] physicalTableName, + byte[] parentPhysicalTableName, TableDescriptor existingDesc, PTableType tableType, + Map tableProps, List>> families, + byte[][] splits, boolean isNamespaceMapped) throws SQLException { + String defaultFamilyName = + (String) tableProps.remove(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME); + TableDescriptorBuilder tableDescriptorBuilder = (existingDesc != null) + ? TableDescriptorBuilder.newBuilder(existingDesc) + : TableDescriptorBuilder.newBuilder(TableName.valueOf(physicalTableName)); + + ColumnFamilyDescriptor dataTableColDescForIndexTablePropSyncing = null; + boolean doNotAddGlobalIndexChecker = false; + if ( + tableType == PTableType.INDEX || MetaDataUtil.isViewIndex(Bytes.toString(physicalTableName)) + ) { + byte[] defaultFamilyBytes = defaultFamilyName == null + ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES + : Bytes.toBytes(defaultFamilyName); + + final TableDescriptor baseTableDesc; + if (MetaDataUtil.isViewIndex(Bytes.toString(physicalTableName))) { + // Handles indexes created on views for single-tenant tables and + // global indexes created on views of multi-tenant tables + baseTableDesc = this.getTableDescriptor(parentPhysicalTableName); + } else if (existingDesc == null) { + // Global/local index creation on top of a physical base table + baseTableDesc = this.getTableDescriptor(SchemaUtil.getPhysicalTableName( + Bytes.toBytes((String) tableProps.get(PhoenixDatabaseMetaData.DATA_TABLE_NAME)), + isNamespaceMapped).getName()); + } else { + // In case this a local index created on a view of a multi-tenant table, the + // PHYSICAL_DATA_TABLE_NAME points to the name of the view instead of the physical base + // table + baseTableDesc = existingDesc; + } + dataTableColDescForIndexTablePropSyncing = baseTableDesc.getColumnFamily(defaultFamilyBytes); + // It's possible that the table has specific column families and none of them are declared + // to be the DEFAULT_COLUMN_FAMILY, so we choose the first column family for syncing + // properties + if (dataTableColDescForIndexTablePropSyncing == null) { + dataTableColDescForIndexTablePropSyncing = baseTableDesc.getColumnFamilies()[0]; + } + if (baseTableDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME)) { + // The base table still uses the old indexing + doNotAddGlobalIndexChecker = true; + } + } + // By default, do not automatically rebuild/catch up an index on a write failure + // Add table-specific properties to the table descriptor + for (Entry entry : tableProps.entrySet()) { + String key = entry.getKey(); + if (!TableProperty.isPhoenixTableProperty(key)) { + Object value = entry.getValue(); + tableDescriptorBuilder.setValue(key, value == null ? null : value.toString()); + } + } + + Map syncedProps = + MetaDataUtil.getSyncedProps(dataTableColDescForIndexTablePropSyncing); + // Add column family-specific properties to the table descriptor + for (Pair> family : families) { + // If family is only in phoenix description, add it. otherwise, modify its property + // accordingly. + byte[] familyByte = family.getFirst(); + if (tableDescriptorBuilder.build().getColumnFamily(familyByte) == null) { + if (tableType == PTableType.VIEW) { + String fullTableName = Bytes.toString(physicalTableName); + throw new ReadOnlyTableException( + "The HBase column families for a read-only table must already exist", + SchemaUtil.getSchemaNameFromFullName(fullTableName), + SchemaUtil.getTableNameFromFullName(fullTableName), Bytes.toString(familyByte)); + } + + ColumnFamilyDescriptor columnDescriptor = generateColumnFamilyDescriptor(family, tableType); + // Keep certain index column family properties in sync with the base table + if ( + (tableType == PTableType.INDEX + || MetaDataUtil.isViewIndex(Bytes.toString(physicalTableName))) + && (syncedProps != null && !syncedProps.isEmpty()) + ) { + ColumnFamilyDescriptorBuilder colFamDescBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(columnDescriptor); + modifyColumnFamilyDescriptor(colFamDescBuilder, syncedProps); + columnDescriptor = colFamDescBuilder.build(); + } + tableDescriptorBuilder.setColumnFamily(columnDescriptor); + } else { + if (tableType != PTableType.VIEW) { + ColumnFamilyDescriptor columnDescriptor = + tableDescriptorBuilder.build().getColumnFamily(familyByte); + if (columnDescriptor == null) { + throw new IllegalArgumentException("Unable to find column descriptor with family name " + + Bytes.toString(family.getFirst())); + } + ColumnFamilyDescriptorBuilder columnDescriptorBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(columnDescriptor); + modifyColumnFamilyDescriptor(columnDescriptorBuilder, family.getSecond()); + tableDescriptorBuilder.modifyColumnFamily(columnDescriptorBuilder.build()); + } + } + } + addCoprocessors(physicalTableName, tableDescriptorBuilder, tableType, tableProps, existingDesc, + doNotAddGlobalIndexChecker); + + // PHOENIX-3072: Set index priority if this is a system table or index table + if (tableType == PTableType.SYSTEM) { + tableDescriptorBuilder.setValue(QueryConstants.PRIORITY, + String.valueOf(IndexUtil.getMetadataPriority(config))); + } else if ( + tableType == PTableType.INDEX // Global, mutable index + && !isLocalIndexTable(tableDescriptorBuilder.build().getColumnFamilyNames()) + && !Boolean.TRUE.equals(tableProps.get(PhoenixDatabaseMetaData.IMMUTABLE_ROWS)) + ) { + tableDescriptorBuilder.setValue(QueryConstants.PRIORITY, + String.valueOf(IndexUtil.getIndexPriority(config))); + } + return tableDescriptorBuilder; + } + + private boolean isLocalIndexTable(Collection families) { + // no easier way to know local index table? + for (byte[] family : families) { + if (Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + return true; + } + } + return false; + } + + private boolean isPhoenixTTLEnabled() { + return config.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); + } + + private void addCoprocessors(byte[] tableName, TableDescriptorBuilder builder, + PTableType tableType, Map tableProps, TableDescriptor existingDesc, + boolean doNotAddGlobalIndexChecker) throws SQLException { + // The phoenix jar must be available on HBase classpath + int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, + QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY); + try { + TableDescriptor newDesc = builder.build(); + TransactionFactory.Provider provider = getTransactionProvider(tableProps); + boolean isTransactional = (provider != null); + + boolean indexRegionObserverEnabled = + config.getBoolean(QueryServices.INDEX_REGION_OBSERVER_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REGION_OBSERVER_ENABLED); + boolean isViewIndex = + TRUE_BYTES_AS_STRING.equals(tableProps.get(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_NAME)); + boolean isServerSideMaskingEnabled = + config.getBoolean(QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, + QueryServicesOptions.DEFAULT_SERVER_SIDE_MASKING_ENABLED); + + boolean isViewBaseTransactional = false; + if (!isTransactional && isViewIndex) { + if ( + tableProps.containsKey(TRANSACTIONAL) + && Boolean.TRUE.equals(tableProps.get(TRANSACTIONAL)) + ) { + isViewBaseTransactional = true; + } + } + + if ( + !isTransactional && !isViewBaseTransactional + && (tableType == PTableType.INDEX || isViewIndex) + ) { + if ( + !indexRegionObserverEnabled + && newDesc.hasCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME) + ) { + builder.removeCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME); + } else if ( + indexRegionObserverEnabled + && !newDesc.hasCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME) + && !isLocalIndexTable(newDesc.getColumnFamilyNames()) + ) { + if (newDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME)) { + builder.removeCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME); + } + if (!doNotAddGlobalIndexChecker) { + builder.setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME) + .setPriority(priority - 1).build()); + } + } + } + + if (!newDesc.hasCoprocessor(QueryConstants.SCAN_REGION_OBSERVER_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.SCAN_REGION_OBSERVER_CLASSNAME).setPriority(priority).build()); + } + if (!newDesc.hasCoprocessor(QueryConstants.UNGROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.UNGROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME) + .setPriority(priority).build()); + } + if (!newDesc.hasCoprocessor(QueryConstants.GROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.GROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME) + .setPriority(priority).build()); + } + if (!newDesc.hasCoprocessor(QueryConstants.SERVER_CACHING_ENDPOINT_IMPL_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.SERVER_CACHING_ENDPOINT_IMPL_CLASSNAME).setPriority(priority) + .build()); + } + + // TODO: better encapsulation for this + // Since indexes can't have indexes, don't install our indexing coprocessor for indexes. + // Also don't install on the SYSTEM.CATALOG and SYSTEM.STATS table because we use + // all-or-none mutate class which break when this coprocessor is installed (PHOENIX-1318). + if ( + (tableType != PTableType.INDEX && tableType != PTableType.VIEW && !isViewIndex) + && !SchemaUtil.isMetaTable(tableName) && !SchemaUtil.isStatsTable(tableName) + ) { + if (isTransactional) { + if (!newDesc.hasCoprocessor(QueryConstants.PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME) + .setPriority(priority).build()); + } + // For alter table, remove non transactional index coprocessor + if (newDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME)) { + builder.removeCoprocessor(QueryConstants.INDEXER_CLASSNAME); + } + if (newDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME)) { + builder.removeCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME); + } } else { - DefaultConnectionLimiter.Builder builder = new DefaultConnectionLimiter.Builder(shouldThrottleNumConnections); - connectionLimiter = builder - .withMaxAllowed(this.maxConnectionsAllowed) - .withMaxInternalAllowed(this.maxInternalConnectionsAllowed) - .build(); - } - - - if (!QueryUtil.isServerConnection(props)) { - //Start queryDistruptor everytime as log level can be change at connection level as well, but we can avoid starting for server connections. + // If exception on alter table to transition back to non transactional + if (newDesc.hasCoprocessor(QueryConstants.PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME)) { + builder.removeCoprocessor(QueryConstants.PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME); + } + // we only want to mess with the indexing coprocs if we're on the original + // CREATE statement. Otherwise, if we're on an ALTER or CREATE TABLE + // IF NOT EXISTS of an existing table, we should leave them unaltered, + // because they should be upgraded or downgraded using the IndexUpgradeTool + if (!doesPhoenixTableAlreadyExist(existingDesc)) { + if (indexRegionObserverEnabled) { + if (newDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME)) { + builder.removeCoprocessor(QueryConstants.INDEXER_CLASSNAME); + } + if (!newDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME)) { + Map opts = Maps.newHashMapWithExpectedSize(1); + opts.put(IndexUtil.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); + IndexUtil.enableIndexing(builder, IndexUtil.PHOENIX_INDEX_BUILDER_CLASSNAME, opts, + priority, QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME); + } + } else { + if (newDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME)) { + builder.removeCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME); + } + if (!newDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME)) { + Map opts = Maps.newHashMapWithExpectedSize(1); + opts.put(IndexUtil.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); + IndexUtil.enableIndexing(builder, IndexUtil.PHOENIX_INDEX_BUILDER_CLASSNAME, opts, + priority, QueryConstants.INDEXER_CLASSNAME); + } + } + } + } + } + + if ( + (SchemaUtil.isStatsTable(tableName) || SchemaUtil.isMetaTable(tableName)) + && !newDesc.hasCoprocessor(QueryConstants.MULTI_ROW_MUTATION_ENDPOINT_CLASSNAME) + ) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.MULTI_ROW_MUTATION_ENDPOINT_CLASSNAME).setPriority(priority) + .setProperties(Collections.emptyMap()).build()); + } + + Set familiesKeys = builder.build().getColumnFamilyNames(); + for (byte[] family : familiesKeys) { + if (Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + if ( + !newDesc.hasCoprocessor(QueryConstants.INDEX_HALF_STORE_FILE_READER_GENERATOR_CLASSNAME) + ) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.INDEX_HALF_STORE_FILE_READER_GENERATOR_CLASSNAME) + .setPriority(priority).setProperties(Collections.emptyMap()).build()); + break; + } + } + } + + // Setup split policy on Phoenix metadata table to ensure that the key values of a Phoenix + // table + // stay on the same region. + if (SchemaUtil.isMetaTable(tableName) || SchemaUtil.isFunctionTable(tableName)) { + if (!newDesc.hasCoprocessor(QueryConstants.META_DATA_ENDPOINT_IMPL_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.META_DATA_ENDPOINT_IMPL_CLASSNAME).setPriority(priority) + .setProperties(Collections.emptyMap()).build()); + } + if (SchemaUtil.isMetaTable(tableName)) { + if (!newDesc.hasCoprocessor(QueryConstants.META_DATA_REGION_OBSERVER_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.META_DATA_REGION_OBSERVER_CLASSNAME) + .setPriority(priority + 1).setProperties(Collections.emptyMap()).build()); + } + } + } else if (SchemaUtil.isSequenceTable(tableName)) { + if (!newDesc.hasCoprocessor(QueryConstants.SEQUENCE_REGION_OBSERVER_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.SEQUENCE_REGION_OBSERVER_CLASSNAME).setPriority(priority) + .setProperties(Collections.emptyMap()).build()); + } + } else if (SchemaUtil.isTaskTable(tableName)) { + if (!newDesc.hasCoprocessor(QueryConstants.TASK_REGION_OBSERVER_CLASSNAME)) { + builder.setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder(QueryConstants.TASK_REGION_OBSERVER_CLASSNAME) + .setPriority(priority).setProperties(Collections.emptyMap()).build()); + } + if (!newDesc.hasCoprocessor(QueryConstants.TASK_META_DATA_ENDPOINT_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.TASK_META_DATA_ENDPOINT_CLASSNAME).setPriority(priority) + .setProperties(Collections.emptyMap()).build()); + } + } else if (SchemaUtil.isChildLinkTable(tableName)) { + if (!newDesc.hasCoprocessor(QueryConstants.CHILD_LINK_META_DATA_ENDPOINT_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.CHILD_LINK_META_DATA_ENDPOINT_CLASSNAME) + .setPriority(priority).setProperties(Collections.emptyMap()).build()); + } + } + + if (isTransactional) { + String coprocessorClassName = provider.getTransactionProvider().getCoprocessorClassName(); + if (!newDesc.hasCoprocessor(coprocessorClassName)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(coprocessorClassName) + .setPriority(priority - 10).setProperties(Collections.emptyMap()).build()); + } + String coprocessorGCClassName = + provider.getTransactionProvider().getGCCoprocessorClassName(); + if (coprocessorGCClassName != null) { + if (!newDesc.hasCoprocessor(coprocessorGCClassName)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(coprocessorGCClassName) + .setPriority(priority - 10).setProperties(Collections.emptyMap()).build()); + } + } + } else { + // Remove all potential transactional coprocessors + for (TransactionFactory.Provider aprovider : TransactionFactory.Provider.available()) { + String coprocessorClassName = + aprovider.getTransactionProvider().getCoprocessorClassName(); + String coprocessorGCClassName = + aprovider.getTransactionProvider().getGCCoprocessorClassName(); + if (coprocessorClassName != null && newDesc.hasCoprocessor(coprocessorClassName)) { + builder.removeCoprocessor(coprocessorClassName); + } + if (coprocessorGCClassName != null && newDesc.hasCoprocessor(coprocessorGCClassName)) { + builder.removeCoprocessor(coprocessorGCClassName); + } + } + } + + // The priority for this co-processor should be set higher than the GlobalIndexChecker so that + // the read repair scans + // are intercepted by the TTLAwareRegionObserver and only the rows that are not ttl-expired + // are returned. + if (!SchemaUtil.isSystemTable(tableName)) { + if ( + !newDesc.hasCoprocessor(QueryConstants.PHOENIX_TTL_REGION_OBSERVER_CLASSNAME) + && isServerSideMaskingEnabled + ) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.PHOENIX_TTL_REGION_OBSERVER_CLASSNAME) + .setPriority(priority - 2).setProperties(Collections.emptyMap()).build()); + } + } + + if ( + Arrays.equals(tableName, + SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, props).getName()) + ) { + if (!newDesc.hasCoprocessor(QueryConstants.SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME)) { + builder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME) + .setPriority(priority).setProperties(Collections.emptyMap()).build()); + } + } + + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + } + + private TransactionFactory.Provider getTransactionProvider(Map tableProps) { + TransactionFactory.Provider provider = + (TransactionFactory.Provider) TableProperty.TRANSACTION_PROVIDER.getValue(tableProps); + return provider; + } + + private boolean doesPhoenixTableAlreadyExist(TableDescriptor existingDesc) { + // if the table descriptor already has Phoenix coprocs, we assume it's + // already gone through a Phoenix create statement once + if (existingDesc == null) { + return false; + } + boolean hasScanObserver = + existingDesc.hasCoprocessor(QueryConstants.SCAN_REGION_OBSERVER_CLASSNAME); + boolean hasUnAggObserver = + existingDesc.hasCoprocessor(QueryConstants.UNGROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME); + boolean hasGroupedObserver = + existingDesc.hasCoprocessor(QueryConstants.GROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME); + boolean hasIndexObserver = existingDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME) + || existingDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME) + || existingDesc.hasCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME); + return hasScanObserver && hasUnAggObserver && hasGroupedObserver && hasIndexObserver; + } + + private static interface RetriableOperation { + boolean checkForCompletion() throws TimeoutException, IOException; + + String getOperationName(); + } + + private void pollForUpdatedTableDescriptor(final Admin admin, + final TableDescriptor newTableDescriptor, final byte[] tableName) + throws InterruptedException, TimeoutException { + checkAndRetry(new RetriableOperation() { + + @Override + public String getOperationName() { + return "UpdateOrNewTableDescriptor"; + } + + @Override + public boolean checkForCompletion() throws TimeoutException, IOException { + TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(tableName)); + return newTableDescriptor.equals(tableDesc); + } + }); + } + + private void checkAndRetry(RetriableOperation op) throws InterruptedException, TimeoutException { + int maxRetries = ConnectionQueryServicesImpl.this.props.getInt( + QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, + QueryServicesOptions.DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK); + long sleepInterval = + ConnectionQueryServicesImpl.this.props.getLong(QueryServices.DELAY_FOR_SCHEMA_UPDATE_CHECK, + QueryServicesOptions.DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK); + boolean success = false; + int numTries = 1; + PhoenixStopWatch watch = new PhoenixStopWatch(); + watch.start(); + do { + try { + success = op.checkForCompletion(); + } catch (Exception ex) { + // If we encounter any exception on the first or last try, propagate the exception and fail. + // Else, we swallow the exception and retry till we reach maxRetries. + if (numTries == 1 || numTries == maxRetries) { + watch.stop(); + TimeoutException toThrow = new TimeoutException("Operation " + op.getOperationName() + + " didn't complete because of exception. Time elapsed: " + watch.elapsedMillis()); + toThrow.initCause(ex); + throw toThrow; + } + } + numTries++; + Thread.sleep(sleepInterval); + } while (numTries < maxRetries && !success); + + watch.stop(); + + if (!success) { + throw new TimeoutException("Operation " + op.getOperationName() + " didn't complete within " + + watch.elapsedMillis() + " ms " + "after trying " + numTries + "times."); + } else { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Operation " + op.getOperationName() + " completed within " + + watch.elapsedMillis() + "ms " + "after trying " + numTries + " times."); + } + } + } + + private boolean allowOnlineTableSchemaUpdate() { + return props.getBoolean(QueryServices.ALLOW_ONLINE_TABLE_SCHEMA_UPDATE, + QueryServicesOptions.DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE); + } + + /** + * Ensure that the HBase namespace is created/exists already + * @param schemaName Phoenix schema name for which we ensure existence of the HBase namespace + * @return true if we created the HBase namespace because it didn't already exist + * @throws SQLException If there is an exception creating the HBase namespace + */ + boolean ensureNamespaceCreated(String schemaName) throws SQLException { + SQLException sqlE = null; + boolean createdNamespace = false; + try (Admin admin = getAdmin()) { + if (!ClientUtil.isHBaseNamespaceAvailable(admin, schemaName)) { + NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create(schemaName).build(); + admin.createNamespace(namespaceDescriptor); + createdNamespace = true; + } + } catch (IOException e) { + sqlE = ClientUtil.parseServerException(e); + } finally { + if (sqlE != null) { + throw sqlE; + } + } + return createdNamespace; + } + + /** Returns true if table was created and false if it already exists */ + + private TableDescriptor ensureTableCreated(byte[] physicalTableName, + byte[] parentPhysicalTableName, PTableType tableType, Map props, + List>> families, byte[][] splits, + boolean modifyExistingMetaData, boolean isNamespaceMapped, boolean isDoNotUpgradePropSet) + throws SQLException { + SQLException sqlE = null; + TableDescriptor existingDesc = null; + boolean isMetaTable = SchemaUtil.isMetaTable(physicalTableName); + boolean tableExist = true; + try (Admin admin = getAdmin()) { + final String quorum = ZKConfig.getZKQuorumServersString(config); + final String znode = this.getProps().get(HConstants.ZOOKEEPER_ZNODE_PARENT); + boolean createdNamespace = false; + LOGGER.debug("Found quorum: " + quorum + ":" + znode); + + if (isMetaTable) { + if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.getProps())) { + try { + // SYSTEM namespace needs to be created via HBase APIs because "CREATE SCHEMA" statement + // tries to write + // its metadata in SYSTEM:CATALOG table. Without SYSTEM namespace, SYSTEM:CATALOG table + // cannot be created + createdNamespace = ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME); + } catch (PhoenixIOException e) { + // We could either: + // 1) Not access the NS descriptor. The NS may or may not exist at this point + // 2) We could not create the NS + // Regardless of the case 1 or 2, if we eventually try to migrate SYSTEM tables to the + // SYSTEM + // namespace using the {@link + // ensureSystemTablesMigratedToSystemNamespace(ReadOnlyProps)} method, + // if the NS does not exist, we will error as expected, or + // if the NS does exist and tables are already mapped, the check will exit gracefully + } + if ( + AdminUtilWithFallback.tableExists(admin, + SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, false)) + ) { + // SYSTEM.CATALOG exists, so at this point, we have 3 cases: + // 1) If server-side namespace mapping is disabled, drop the SYSTEM namespace if it was + // created + // above and throw Inconsistent namespace mapping exception + // 2) If server-side namespace mapping is enabled and SYSTEM.CATALOG needs to be + // upgraded, + // upgrade SYSTEM.CATALOG and also migrate SYSTEM tables to the SYSTEM namespace + // 3. If server-side namespace mapping is enabled and SYSTEM.CATALOG doesn't need to be + // upgraded, we still need to migrate SYSTEM tables to the SYSTEM namespace using the + // {@link ensureSystemTablesMigratedToSystemNamespace(ReadOnlyProps)} method (as part of + // {@link upgradeSystemTables(String, Properties)}) try { - this.queryDisruptor = new QueryLoggerDisruptor(this.config); - } catch (SQLException e) { - LOGGER.warn("Unable to initiate query logging service !!"); - e.printStackTrace(); - } + checkClientServerCompatibility(SYSTEM_CATALOG_NAME_BYTES); + } catch (SQLException possibleCompatException) { + // Handles Case 1: Drop the SYSTEM namespace in case it was created above + if ( + createdNamespace && possibleCompatException.getErrorCode() + == SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode() + ) { + ensureNamespaceDropped(QueryConstants.SYSTEM_SCHEMA_NAME); + } + // rethrow the SQLException + throw possibleCompatException; + } + // Thrown so we can force an upgrade which will just migrate SYSTEM tables to the SYSTEM + // namespace + throw new UpgradeRequiredException(MIN_SYSTEM_TABLE_TIMESTAMP); + } + } else if ( + AdminUtilWithFallback.tableExists(admin, + SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, true)) + ) { + // If SYSTEM:CATALOG exists, but client-side namespace mapping for SYSTEM tables is + // disabled, throw an exception + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES) + .setMessage("Cannot initiate connection as " + + SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, true) + + " is found but client does not have " + IS_NAMESPACE_MAPPING_ENABLED + " enabled") + .build().buildException(); + } + // If DoNotUpgrade config is set only check namespace mapping and + // Client-server compatibility for system tables. + if (isDoNotUpgradePropSet) { + try { + checkClientServerCompatibility( + SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName()); + } catch (SQLException possibleCompatException) { + if ( + possibleCompatException + .getCause() instanceof org.apache.hadoop.hbase.TableNotFoundException + ) { + throw new UpgradeRequiredException(MIN_SYSTEM_TABLE_TIMESTAMP); + } + throw possibleCompatException; + } + return null; + } + } + + try { + existingDesc = admin.getDescriptor(TableName.valueOf(physicalTableName)); + } catch (org.apache.hadoop.hbase.TableNotFoundException e) { + tableExist = false; + if (tableType == PTableType.VIEW) { + String fullTableName = Bytes.toString(physicalTableName); + throw new ReadOnlyTableException("An HBase table for a VIEW must already exist", + SchemaUtil.getSchemaNameFromFullName(fullTableName), + SchemaUtil.getTableNameFromFullName(fullTableName)); + } + } + + TableDescriptorBuilder newDesc = + generateTableDescriptor(physicalTableName, parentPhysicalTableName, existingDesc, tableType, + props, families, splits, isNamespaceMapped); + + if (!tableExist) { + if ( + SchemaUtil.isSystemTable(physicalTableName) && !isUpgradeRequired() + && (!isAutoUpgradeEnabled || isDoNotUpgradePropSet) + ) { + // Disallow creating the SYSTEM.CATALOG or SYSTEM:CATALOG HBase table + throw new UpgradeRequiredException(); + } + if ( + newDesc.build().getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) != null + && Boolean.TRUE.equals(PBoolean.INSTANCE + .toObject(newDesc.build().getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES))) + ) { + newDesc.setRegionSplitPolicyClassName(QueryConstants.INDEX_REGION_SPLIT_POLICY_CLASSNAME); } - nSequenceSaltBuckets = config.getInt(QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB, - QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); - this.metricsMetadataCachingSource = MetricsPhoenixCoprocessorSourceFactory.getInstance() - .getMetadataCachingSource(); - } - - private Connection openConnection(Configuration conf) throws SQLException { - Connection localConnection; try { - localConnection = HBaseFactoryProvider.getHConnectionFactory().createConnection(conf); - GLOBAL_HCONNECTIONS_COUNTER.increment(); - LOGGER.info("HConnection established. Stacktrace for informational purposes: " - + localConnection + " " + LogUtil.getCallerStackTrace()); - } catch (IOException e) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION) - .setRootCause(e).build().buildException(); - } - if (localConnection.isClosed()) { // TODO: why the heck doesn't this throw above? - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION).build().buildException(); - } - return localConnection; - } - - /** - * We create a long-lived hbase connection to run invalidate cache RPCs. We override - * CUSTOM_CONTROLLER_CONF_KEY to instantiate InvalidateMetadataCacheController which has - * a special priority for invalidate metadata cache operations. - * @return hbase connection - * @throws SQLException SQLException - */ - public Connection getInvalidateMetadataCacheConnection() throws SQLException { - if (invalidateMetadataCacheConnection != null) { - return invalidateMetadataCacheConnection; + if (splits == null) { + admin.createTable(newDesc.build()); + } else { + admin.createTable(newDesc.build(), splits); + } + } catch (TableExistsException e) { + // We can ignore this, as it just means that another client beat us + // to creating the HBase metadata. + if (isMetaTable && !isUpgradeRequired()) { + checkClientServerCompatibility( + SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName()); + } + return null; + } + if (isMetaTable && !isUpgradeRequired()) { + try { + checkClientServerCompatibility( + SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName()); + } catch (SQLException possibleCompatException) { + if ( + possibleCompatException.getErrorCode() + == SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode() + ) { + try { + // In case we wrongly created SYSTEM.CATALOG or SYSTEM:CATALOG, we should drop it + disableTable(admin, TableName.valueOf(physicalTableName)); + admin.deleteTable(TableName.valueOf(physicalTableName)); + } catch (org.apache.hadoop.hbase.TableNotFoundException ignored) { + // Ignore this since it just means that another client with a similar set of + // incompatible configs and conditions beat us to dropping the SYSCAT HBase table + } + if ( + createdNamespace + && SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.getProps()) + ) { + // We should drop the SYSTEM namespace which we just created, since + // server-side namespace mapping is disabled + ensureNamespaceDropped(QueryConstants.SYSTEM_SCHEMA_NAME); + } + } + // rethrow the SQLException + throw possibleCompatException; + } + + } + return null; + } else { + if (isMetaTable && !isUpgradeRequired()) { + checkClientServerCompatibility( + SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName()); + } else { + for (Pair> family : families) { + if ( + (Bytes.toString(family.getFirst()) + .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) + ) { + newDesc + .setRegionSplitPolicyClassName(QueryConstants.INDEX_REGION_SPLIT_POLICY_CLASSNAME); + break; + } + } + } + + if (!modifyExistingMetaData) { + return existingDesc; // Caller already knows that no metadata was changed + } + TransactionFactory.Provider provider = getTransactionProvider(props); + boolean willBeTx = provider != null; + // If mapping an existing table as transactional, set property so that existing + // data is correctly read. + if (willBeTx) { + if (!equalTxCoprocessor(provider, existingDesc, newDesc.build())) { + // Cannot switch between different providers + if (hasTxCoprocessor(existingDesc)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SWITCH_TXN_PROVIDERS) + .setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalTableName)) + .setTableName(SchemaUtil.getTableNameFromFullName(physicalTableName)).build() + .buildException(); + } + if ( + provider.getTransactionProvider() + .isUnsupported(PhoenixTransactionProvider.Feature.ALTER_NONTX_TO_TX) + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_ALTER_TABLE_FROM_NON_TXN_TO_TXNL) + .setMessage(provider.name()) + .setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalTableName)) + .setTableName(SchemaUtil.getTableNameFromFullName(physicalTableName)).build() + .buildException(); + } + newDesc.setValue(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.TRUE.toString()); + } + } else { + // If we think we're creating a non transactional table when it's already + // transactional, don't allow. + if (hasTxCoprocessor(existingDesc)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX) + .setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalTableName)) + .setTableName(SchemaUtil.getTableNameFromFullName(physicalTableName)).build() + .buildException(); + } + newDesc.removeValue(Bytes.toBytes(PhoenixTransactionContext.READ_NON_TX_DATA)); } - - synchronized (invalidateMetadataCacheConnLock) { - Configuration clonedConfiguration = PropertiesUtil.cloneConfig(this.config); - clonedConfiguration.setClass(CUSTOM_CONTROLLER_CONF_KEY, - InvalidateMetadataCacheControllerFactory.class, RpcControllerFactory.class); - invalidateMetadataCacheConnection = openConnection(clonedConfiguration); + TableDescriptor result = newDesc.build(); + if (existingDesc.equals(result)) { + return null; // Indicate that no metadata was changed } - return invalidateMetadataCacheConnection; - } - /** - * Close the HBase connection and decrement the counter. - * @throws IOException throws IOException - */ - private void closeConnection(Connection connection) throws IOException { - if (connection != null) { - connection.close(); - LOGGER.info("{} HConnection closed. Stacktrace for informational" - + " purposes: {}", connection, LogUtil.getCallerStackTrace()); - GLOBAL_HCONNECTIONS_COUNTER.decrement(); + // Do not call modifyTable for SYSTEM tables + if (tableType != PTableType.SYSTEM) { + modifyTable(physicalTableName, newDesc.build(), true); } + return result; + } + + } catch (IOException e) { + sqlE = ClientUtil.parseServerException(e); + } catch (InterruptedException e) { + // restore the interrupt status + Thread.currentThread().interrupt(); + sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e) + .build().buildException(); + } catch (TimeoutException e) { + sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT) + .setRootCause(e.getCause() != null ? e.getCause() : e).build().buildException(); + } finally { + if (sqlE != null) { + throw sqlE; + } + } + return null; // will never make it here + } + + /** + * If given TableDescriptorBuilder belongs to SYSTEM.TASK and if the table still does not have + * split policy setup as SystemTaskSplitPolicy, set it up and return true, else return false. This + * method is expected to return true only if it updated split policy (which should happen once + * during initial upgrade). + * @param tdBuilder table descriptor builder + * @return return true if split policy of SYSTEM.TASK is updated to SystemTaskSplitPolicy. + * @throws SQLException If SYSTEM.TASK already has custom split policy set up other than + * SystemTaskSplitPolicy + */ + @VisibleForTesting + public boolean updateAndConfirmSplitPolicyForTask(final TableDescriptorBuilder tdBuilder) + throws SQLException { + boolean isTaskTable = false; + TableName sysTaskTable = + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME, props); + if (tdBuilder.build().getTableName().equals(sysTaskTable)) { + isTaskTable = true; + } + if (isTaskTable) { + final String actualSplitPolicy = tdBuilder.build().getRegionSplitPolicyClassName(); + final String targetSplitPolicy = QueryConstants.SYSTEM_TASK_SPLIT_POLICY_CLASSNAME; + if (!targetSplitPolicy.equals(actualSplitPolicy)) { + if (StringUtils.isNotEmpty(actualSplitPolicy)) { + // Rare possibility. pre-4.16 create DDL query + // doesn't have any split policy setup for SYSTEM.TASK + throw new InvalidRegionSplitPolicyException(QueryConstants.SYSTEM_SCHEMA_NAME, + SYSTEM_TASK_TABLE, ImmutableList.of("null", targetSplitPolicy), actualSplitPolicy); + } + tdBuilder.setRegionSplitPolicyClassName(targetSplitPolicy); + return true; + } } + return false; + } - @Override - public Table getTable(byte[] tableName) throws SQLException { + private static boolean hasTxCoprocessor(TableDescriptor descriptor) { + for (TransactionFactory.Provider provider : TransactionFactory.Provider.available()) { + String coprocessorClassName = provider.getTransactionProvider().getCoprocessorClassName(); + if (coprocessorClassName != null && descriptor.hasCoprocessor(coprocessorClassName)) { + return true; + } + } + return false; + } + + private static boolean equalTxCoprocessor(TransactionFactory.Provider provider, + TableDescriptor existingDesc, TableDescriptor newDesc) { + String coprocessorClassName = provider.getTransactionProvider().getCoprocessorClassName(); + return (coprocessorClassName != null && existingDesc.hasCoprocessor(coprocessorClassName) + && newDesc.hasCoprocessor(coprocessorClassName)); + } + + private void modifyTable(byte[] tableName, TableDescriptor newDesc, boolean shouldPoll) + throws IOException, InterruptedException, TimeoutException, SQLException { + TableName tn = TableName.valueOf(tableName); + try (Admin admin = getAdmin()) { + if (!allowOnlineTableSchemaUpdate()) { + disableTable(admin, tn); + admin.modifyTable(newDesc); // TODO: Update to TableDescriptor + admin.enableTable(tn); + } else { + admin.modifyTable(newDesc); // TODO: Update to TableDescriptor + if (shouldPoll) { + pollForUpdatedTableDescriptor(admin, newDesc, tableName); + } + } + } + } + + private static boolean hasIndexWALCodec(Long serverVersion) { + if (serverVersion == null) { + return true; + } + return MetaDataUtil.decodeHasIndexWALCodec(serverVersion); + } + + private void checkClientServerCompatibility(byte[] metaTable) + throws SQLException, AccessDeniedException { + StringBuilder errorMessage = new StringBuilder(); + int minHBaseVersion = Integer.MAX_VALUE; + boolean isTableNamespaceMappingEnabled = false; + long systemCatalogTimestamp = Long.MAX_VALUE; + long startTime = 0L; + long systemCatalogRpcTime; + Map results; + Table ht = null; + + try { + try { + startTime = EnvironmentEdgeManager.currentTimeMillis(); + ht = this.getTable(metaTable); + final byte[] tableKey = PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES; + results = ht.coprocessorService(MetaDataService.class, tableKey, tableKey, + new Batch.Call() { + @Override + public GetVersionResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = new BlockingRpcCallback<>(); + GetVersionRequest.Builder builder = GetVersionRequest.newBuilder(); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.getVersion(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + NUM_SYSTEM_TABLE_RPC_SUCCESS, 1); + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + NUM_SYSTEM_TABLE_RPC_FAILURES, 1); + throw ClientUtil.parseServerException(e); + } finally { + systemCatalogRpcTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, systemCatalogRpcTime); + } + for (Map.Entry result : results.entrySet()) { + // This is the "phoenix.jar" is in-place, but server is out-of-sync with client case. + GetVersionResponse versionResponse = result.getValue(); + long serverJarVersion = versionResponse.getVersion(); + isTableNamespaceMappingEnabled |= + MetaDataUtil.decodeTableNamespaceMappingEnabled(serverJarVersion); + + MetaDataUtil.ClientServerCompatibility compatibility = + MetaDataUtil.areClientAndServerCompatible(serverJarVersion); + if (!compatibility.getIsCompatible()) { + if (compatibility.getErrorCode() == SQLExceptionCode.OUTDATED_JARS.getErrorCode()) { + errorMessage.append("Newer Phoenix clients can't communicate with older " + + "Phoenix servers. Client version: " + MetaDataProtocol.CURRENT_CLIENT_VERSION + + "; Server version: " + getServerVersion(serverJarVersion) + + " The following servers require an updated " + + QueryConstants.DEFAULT_COPROCESS_JAR_NAME + + " to be put in the classpath of HBase."); + } else if ( + compatibility.getErrorCode() + == SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode() + ) { + errorMessage + .append("Major version of client is less than that of the server. Client version: " + + MetaDataProtocol.CURRENT_CLIENT_VERSION + "; Server version: " + + getServerVersion(serverJarVersion)); + } + } + hasIndexWALCodec = hasIndexWALCodec && hasIndexWALCodec(serverJarVersion); + if (minHBaseVersion > MetaDataUtil.decodeHBaseVersion(serverJarVersion)) { + minHBaseVersion = MetaDataUtil.decodeHBaseVersion(serverJarVersion); + } + // In case this is the first time connecting to this cluster, the system catalog table does + // not have an + // entry for itself yet, so we cannot get the timestamp and this will not be returned from + // the + // GetVersionResponse message object + if (versionResponse.hasSystemCatalogTimestamp()) { + systemCatalogTimestamp = + systemCatalogTimestamp < versionResponse.getSystemCatalogTimestamp() + ? systemCatalogTimestamp + : versionResponse.getSystemCatalogTimestamp(); + } + + if (compatibility.getErrorCode() != 0) { + if (compatibility.getErrorCode() == SQLExceptionCode.OUTDATED_JARS.getErrorCode()) { + errorMessage.setLength(errorMessage.length() - 1); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.OUTDATED_JARS) + .setMessage(errorMessage.toString()).build().buildException(); + } else if ( + compatibility.getErrorCode() + == SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode() + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR) + .setMessage(errorMessage.toString()).build().buildException(); + } + } + } + if ( + isTableNamespaceMappingEnabled + != SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, getProps()) + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES) + .setMessage("Ensure that config " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + + " is consistent on client and server.") + .build().buildException(); + } + lowestClusterHBaseVersion = minHBaseVersion; + } finally { + if (ht != null) { try { - return HBaseFactoryProvider.getHTableFactory().getTable(tableName, - connection, null); + ht.close(); } catch (IOException e) { - throw new SQLException(e); - } - } - - @Override - public Table getTableIfExists(byte[] tableName) throws SQLException { - try (Admin admin = getAdmin()) { - if (!AdminUtilWithFallback.tableExists(admin, TableName.valueOf(tableName))) { - throw new TableNotFoundException( - SchemaUtil.getSchemaNameFromFullName(tableName), - SchemaUtil.getTableNameFromFullName(tableName)); - } - } catch (IOException | InterruptedException e) { - throw new SQLException(e); - } - return getTable(tableName); - } - - @Override - public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException { - Table htable = getTable(tableName); + LOGGER.warn("Could not close HTable", e); + } + } + } + + if (systemCatalogTimestamp < MIN_SYSTEM_TABLE_TIMESTAMP) { + throw new UpgradeRequiredException(systemCatalogTimestamp); + } + } + + private String getServerVersion(long serverJarVersion) { + return (VersionUtil.decodeMajorVersion(MetaDataUtil.decodePhoenixVersion(serverJarVersion)) + + "." + VersionUtil.decodeMinorVersion(MetaDataUtil.decodePhoenixVersion(serverJarVersion)) + + "." + VersionUtil.decodePatchVersion(MetaDataUtil.decodePhoenixVersion(serverJarVersion))); + } + + /** + * Invoke the SYSTEM.CHILD_LINK metadata coprocessor endpoint + * @param parentTableKey key corresponding to the parent of the view + * @param callable used to invoke the coprocessor endpoint to write links from a parent to + * its child view + * @return result of invoking the coprocessor endpoint + */ + private MetaDataMutationResult childLinkMetaDataCoprocessorExec(byte[] parentTableKey, + Batch.Call callable) throws SQLException { + try (Table htable = this.getTable(SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, this.getProps()) + .getName())) { + final Map results = htable.coprocessorService( + ChildLinkMetaDataService.class, parentTableKey, parentTableKey, callable); + assert (results.size() == 1); + MetaDataResponse result = results.values().iterator().next(); + return MetaDataMutationResult.constructFromProto(result); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } catch (Throwable t) { + throw new SQLException(t); + } + } + + @VisibleForTesting + protected RpcController getController() { + return getController(SYSTEM_CATALOG_HBASE_TABLE_NAME); + } + + /** + * If configured to use the server-server metadata handler pool for server side connections, use + * the {@link org.apache.hadoop.hbase.ipc.controller.ServerToServerRpcController} else use the + * ordinary handler pool {@link ServerRpcController} return the rpcController to use + */ + @VisibleForTesting + protected RpcController getController(TableName systemTableName) { + if (serverSideRPCControllerFactory != null) { + ServerToServerRpcController controller = serverSideRPCControllerFactory.newController(); + controller.setPriority(systemTableName); + return controller; + } else { + return new ServerRpcController(); + } + } + + @VisibleForTesting + public ConnectionLimiter getConnectionLimiter() { + return connectionLimiter; + } + + /** + * helper function to return the exception from the RPC + */ + + private void checkForRemoteExceptions(RpcController controller) throws IOException { + if (controller != null) { + if (controller instanceof ServerRpcController) { + if (((ServerRpcController) controller).getFailedOn() != null) { + throw ((ServerRpcController) controller).getFailedOn(); + } + } else { + if (((HBaseRpcController) controller).getFailed() != null) { + throw ((HBaseRpcController) controller).getFailed(); + } + } + } + } + + /** + * Invoke meta data coprocessor with one retry if the key was found to not be in the regions (due + * to a table split) + */ + private MetaDataMutationResult metaDataCoprocessorExec(String tableName, byte[] tableKey, + Batch.Call callable) throws SQLException { + return metaDataCoprocessorExec(tableName, tableKey, callable, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES); + } + + /** + * Invoke meta data coprocessor with one retry if the key was found to not be in the regions (due + * to a table split) + */ + private MetaDataMutationResult metaDataCoprocessorExec(String tableName, byte[] tableKey, + Batch.Call callable, byte[] systemTableName) + throws SQLException { + Map results; + try { + boolean success = false; + boolean retried = false; + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + while (true) { + if (retried) { + ((ClusterConnection) connection) + .relocateRegion(SchemaUtil.getPhysicalName(systemTableName, this.getProps()), tableKey); + } + + Table ht = + this.getTable(SchemaUtil.getPhysicalName(systemTableName, this.getProps()).getName()); try { - return htable.getDescriptor(); - } catch (IOException e) { - if (e instanceof org.apache.hadoop.hbase.TableNotFoundException - || e.getCause() instanceof org.apache.hadoop.hbase.TableNotFoundException) { - byte[][] schemaAndTableName = new byte[2][]; - SchemaUtil.getVarChars(tableName, schemaAndTableName); - throw new TableNotFoundException(Bytes.toString(schemaAndTableName[0]), Bytes.toString(schemaAndTableName[1])); - } - throw new RuntimeException(e); + results = ht.coprocessorService(MetaDataService.class, tableKey, tableKey, callable); + + assert (results.size() == 1); + MetaDataResponse result = results.values().iterator().next(); + if ( + result.getReturnCode() == MetaDataProtos.MutationCode.TABLE_NOT_IN_REGION + || result.getReturnCode() == MetaDataProtos.MutationCode.FUNCTION_NOT_IN_REGION + ) { + if (retried) return MetaDataMutationResult.constructFromProto(result); + retried = true; + continue; + } + success = true; + return MetaDataMutationResult.constructFromProto(result); } finally { - Closeables.closeQuietly(htable); - } - } - - @Override - public ReadOnlyProps getProps() { - return props; + long systemCatalogRpcTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, + TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, systemCatalogRpcTime); + if (success) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, + NUM_SYSTEM_TABLE_RPC_SUCCESS, 1); + } else { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, + NUM_SYSTEM_TABLE_RPC_FAILURES, 1); + } + Closeables.closeQuietly(ht); + } + } + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } catch (Throwable t) { + throw new SQLException(t); + } + } + + // Our property values are translated using toString, so we need to "string-ify" this. + private static final String TRUE_BYTES_AS_STRING = Bytes.toString(PDataType.TRUE_BYTES); + + private void ensureViewIndexTableCreated(byte[] physicalTableName, byte[] parentPhysicalTableName, + Map tableProps, List>> families, + byte[][] splits, long timestamp, boolean isNamespaceMapped) throws SQLException { + byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName); + + tableProps.put(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_NAME, TRUE_BYTES_AS_STRING); + TableDescriptor desc = ensureTableCreated(physicalIndexName, parentPhysicalTableName, + PTableType.TABLE, tableProps, families, splits, true, isNamespaceMapped, false); + if (desc != null) { + if ( + !Boolean.TRUE.equals( + PBoolean.INSTANCE.toObject(desc.getValue(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_BYTES))) + ) { + String fullTableName = Bytes.toString(physicalIndexName); + throw new TableAlreadyExistsException(SchemaUtil.getSchemaNameFromFullName(fullTableName), + SchemaUtil.getTableNameFromFullName(fullTableName), + "Unable to create shared physical table for indexes on views."); + } + } + } + + private void disableTable(Admin admin, TableName tableName) throws IOException { + try { + admin.disableTable(tableName); + } catch (TableNotEnabledException e) { + LOGGER.info("Table already disabled, continuing with next steps", e); + } + } + + private boolean ensureViewIndexTableDropped(byte[] physicalTableName, long timestamp) + throws SQLException { + byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName); + boolean wasDeleted = false; + try (Admin admin = getAdmin()) { + try { + TableName physicalIndexTableName = TableName.valueOf(physicalIndexName); + TableDescriptor desc = admin.getDescriptor(physicalIndexTableName); + if ( + Boolean.TRUE.equals( + PBoolean.INSTANCE.toObject(desc.getValue(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_BYTES))) + ) { + final ReadOnlyProps props = this.getProps(); + final boolean dropMetadata = + props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); + if (dropMetadata) { + disableTable(admin, physicalIndexTableName); + admin.deleteTable(physicalIndexTableName); + clearTableRegionCache(physicalIndexTableName); + wasDeleted = true; + } else { + this.tableStatsCache.invalidateAll(desc); + } + } + } catch (org.apache.hadoop.hbase.TableNotFoundException ignore) { + // Ignore, as we may never have created a view index table + } + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + return wasDeleted; + } + + private boolean ensureLocalIndexTableDropped(byte[] physicalTableName, long timestamp) + throws SQLException { + TableDescriptor desc = null; + boolean wasDeleted = false; + try (Admin admin = getAdmin()) { + try { + desc = admin.getDescriptor(TableName.valueOf(physicalTableName)); + for (byte[] fam : desc.getColumnFamilyNames()) { + this.tableStatsCache.invalidate(new GuidePostsKey(physicalTableName, fam)); + } + final ReadOnlyProps props = this.getProps(); + final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); + if (dropMetadata) { + List columnFamiles = new ArrayList<>(); + for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) { + if (cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + columnFamiles.add(cf.getNameAsString()); + } + } + for (String cf : columnFamiles) { + admin.deleteColumnFamily(TableName.valueOf(physicalTableName), Bytes.toBytes(cf)); + } + clearTableRegionCache(TableName.valueOf(physicalTableName)); + wasDeleted = true; + } + } catch (org.apache.hadoop.hbase.TableNotFoundException ignore) { + // Ignore, as we may never have created a view index table + } + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + return wasDeleted; + } + + @Override + public MetaDataMutationResult createTable(final List tableMetaData, + final byte[] physicalTableName, PTableType tableType, Map tableProps, + final List>> families, byte[][] splits, + boolean isNamespaceMapped, final boolean allocateIndexId, final boolean isDoNotUpgradePropSet, + final PTable parentTable) throws SQLException { + List childLinkMutations = MetaDataUtil.removeChildLinkMutations(tableMetaData); + byte[][] rowKeyMetadata = new byte[3][]; + Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetaData); + byte[] key = m.getRow(); + SchemaUtil.getVarChars(key, rowKeyMetadata); + byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + byte[] physicalTableNameBytes = physicalTableName != null + ? physicalTableName + : SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, isNamespaceMapped).getBytes(); + boolean localIndexTable = false; + for (Pair> family : families) { + if ( + Bytes.toString(family.getFirst()) + .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX) + ) { + localIndexTable = true; + break; + } + } + if ( + (tableType != PTableType.CDC) && ((tableType == PTableType.VIEW && physicalTableName != null) + || (tableType != PTableType.VIEW && (physicalTableName == null || localIndexTable))) + ) { + // For views this will ensure that metadata already exists + // For tables and indexes, this will create the metadata if it doesn't already exist + ensureTableCreated(physicalTableNameBytes, null, tableType, tableProps, families, splits, + true, isNamespaceMapped, isDoNotUpgradePropSet); + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + if (tableType == PTableType.INDEX) { // Index on view + // Physical index table created up front for multi tenant + // TODO: if viewIndexId is Short.MIN_VALUE, then we don't need to attempt to create it + if (physicalTableName != null) { + if (!localIndexTable && !MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) { + // For view index, the physical table name is _IDX_+ logical table name format + ensureViewIndexTableCreated( + tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes), + physicalTableName, MetaDataUtil.getClientTimeStamp(m), isNamespaceMapped); + } + } + } else if (tableType == PTableType.TABLE && MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) { // Create + // view + // index + // table + // up + // front + // for + // multi + // tenant + // tables + ptr.set(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); + MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME_BYTES, + kvBuilder, ptr); + List>> familiesPlusDefault = null; + for (Pair> family : families) { + byte[] cf = family.getFirst(); + if (Bytes.compareTo(cf, 0, cf.length, ptr.get(), ptr.getOffset(), ptr.getLength()) == 0) { + familiesPlusDefault = families; + break; + } + } + // Don't override if default family already present + if (familiesPlusDefault == null) { + byte[] defaultCF = ByteUtil.copyKeyBytesIfNecessary(ptr); + // Only use splits if table is salted, otherwise it may not be applicable + // Always add default column family, as we don't know in advance if we'll need it + familiesPlusDefault = Lists.newArrayList(families); + familiesPlusDefault.add(new Pair>(defaultCF, + Collections. emptyMap())); + } + ensureViewIndexTableCreated(physicalTableNameBytes, physicalTableNameBytes, tableProps, + familiesPlusDefault, MetaDataUtil.isSalted(m, kvBuilder, ptr) ? splits : null, + MetaDataUtil.getClientTimeStamp(m), isNamespaceMapped); + } + + // Avoid the client-server RPC if this is not a view creation + if (!childLinkMutations.isEmpty()) { + // Send mutations for parent-child links to SYSTEM.CHILD_LINK + // We invoke this using rowKey available in the first element + // of childLinkMutations. + final byte[] rowKey = childLinkMutations.get(0).getRow(); + final RpcController controller = + getController(PhoenixDatabaseMetaData.SYSTEM_LINK_HBASE_TABLE_NAME); + final MetaDataMutationResult result = childLinkMetaDataCoprocessorExec(rowKey, + new ChildLinkMetaDataServiceCallBack(controller, childLinkMutations)); + + switch (result.getMutationCode()) { + case UNABLE_TO_CREATE_CHILD_LINK: + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_CREATE_CHILD_LINK) + .setSchemaName(Bytes.toString(schemaBytes)) + .setTableName(Bytes.toString(physicalTableNameBytes)).build().buildException(); + default: + break; + } } - /** - * Closes all the connections it has in its connectionQueues. - */ - @Override - public void closeAllConnections(SQLExceptionInfo.Builder reasonBuilder) { - for (LinkedBlockingQueue> queue : connectionQueues) { - for (WeakReference connectionReference : queue) { - PhoenixConnection connection = connectionReference.get(); - try { - if (connection != null && !connection.isClosed()) { - connection.close(reasonBuilder.build().buildException()); - } - } catch (SQLException e) { - LOGGER.warn("Exception while closing phoenix connection {}", connection, e); - } - } + // Send the remaining metadata mutations to SYSTEM.CATALOG + byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes); + return metaDataCoprocessorExec( + SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, + SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), + tableKey, new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = new BlockingRpcCallback<>(); + CreateTableRequest.Builder builder = CreateTableRequest.newBuilder(); + for (Mutation m : tableMetaData) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addTableMetadataMutations(mp.toByteString()); + } + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + if (allocateIndexId) { + builder.setAllocateIndexId(allocateIndexId); + } + if (parentTable != null) { + builder.setParentTable(PTableImpl.toProto(parentTable)); + } + CreateTableRequest build = builder.build(); + instance.createTable(controller, build, rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + } + + @Override + public MetaDataMutationResult getTable(final PName tenantId, final byte[] schemaBytes, + final byte[] tableBytes, final long tableTimestamp, final long clientTimestamp) + throws SQLException { + final byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(); + byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes); + return metaDataCoprocessorExec( + SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, + SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), + tableKey, new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + GetTableRequest.Builder builder = GetTableRequest.newBuilder(); + builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); + builder.setSchemaName(ByteStringer.wrap(schemaBytes)); + builder.setTableName(ByteStringer.wrap(tableBytes)); + builder.setTableTimestamp(tableTimestamp); + builder.setClientTimestamp(clientTimestamp); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.getTable(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + } + + @Override + public MetaDataMutationResult dropTable(final List tableMetaData, + final PTableType tableType, final boolean cascade) throws SQLException { + byte[][] rowKeyMetadata = new byte[3][]; + SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata); + byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + byte[] tableKey = SchemaUtil.getTableKey( + tenantIdBytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantIdBytes, schemaBytes, tableBytes); + final MetaDataMutationResult result = metaDataCoprocessorExec( + SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, + SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), + tableKey, new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + DropTableRequest.Builder builder = DropTableRequest.newBuilder(); + for (Mutation m : tableMetaData) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addTableMetadataMutations(mp.toByteString()); + } + builder.setTableType(tableType.getSerializedValue()); + builder.setCascade(cascade); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.dropTable(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + + final MutationCode code = result.getMutationCode(); + switch (code) { + case TABLE_ALREADY_EXISTS: + ReadOnlyProps props = this.getProps(); + boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); + PTable table = result.getTable(); + if (dropMetadata) { + flushParentPhysicalTable(table); + dropTables(result.getTableNamesToDelete()); + } else { + invalidateTableStats(result.getTableNamesToDelete()); + } + long timestamp = MetaDataUtil.getClientTimeStamp(tableMetaData); + if (tableType == PTableType.TABLE) { + byte[] physicalName = table.getPhysicalName().getBytes(); + ensureViewIndexTableDropped(physicalName, timestamp); + ensureLocalIndexTableDropped(physicalName, timestamp); + tableStatsCache.invalidateAll(table); + } + break; + default: + break; + } + return result; + } + + /* + * PHOENIX-2915 while dropping index, flush data table to avoid stale WAL edits of indexes 1. + * Flush parent table if dropping view has indexes 2. Dropping table indexes 3. Dropping view + * indexes + */ + private void flushParentPhysicalTable(PTable table) throws SQLException { + byte[] parentPhysicalTableName = null; + if (PTableType.VIEW == table.getType()) { + if (!table.getIndexes().isEmpty()) { + parentPhysicalTableName = table.getPhysicalName().getBytes(); + } + } else if (PTableType.INDEX == table.getType()) { + PTable parentTable = getTable(table.getTenantId(), table.getParentName().getString(), + HConstants.LATEST_TIMESTAMP); + parentPhysicalTableName = parentTable.getPhysicalName().getBytes(); + } + if (parentPhysicalTableName != null) { + try { + flushTable(parentPhysicalTableName); + } catch (PhoenixIOException ex) { + if (ex.getCause() instanceof org.apache.hadoop.hbase.TableNotFoundException) { + LOGGER.info("Flushing physical parent table " + Bytes.toString(parentPhysicalTableName) + + " of " + table.getName().getString() + " failed with : " + ex + " with cause: " + + ex.getCause() + " since the table has already been dropped"); + } else { + throw ex; } + } } + } + @Override + public MetaDataMutationResult dropFunction(final List functionData, + final boolean ifExists) throws SQLException { + byte[][] rowKeyMetadata = new byte[2][]; + byte[] key = functionData.get(0).getRow(); + SchemaUtil.getVarChars(key, rowKeyMetadata); + byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + byte[] functionBytes = rowKeyMetadata[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX]; + byte[] functionKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionBytes); - /** - * Closes the underlying connection to zookeeper. The QueryServices - * may not be used after that point. When a Connection is closed, - * this is not called, since these instances are pooled by the - * Driver. Instead, the Driver should call this if the QueryServices - * is ever removed from the pool - */ - @Override - public void close() throws SQLException { - if (closed) { - return; - } - synchronized (this) { - if (closed) { - return; - } - closed = true; - GLOBAL_QUERY_SERVICES_COUNTER.decrement(); + final MetaDataMutationResult result = metaDataCoprocessorExec(null, functionKey, + new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(SYSTEM_FUNCTION_HBASE_TABLE_NAME); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + DropFunctionRequest.Builder builder = DropFunctionRequest.newBuilder(); + for (Mutation m : functionData) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addTableMetadataMutations(mp.toByteString()); + } + builder.setIfExists(ifExists); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.dropFunction(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }, SYSTEM_FUNCTION_NAME_BYTES); + return result; + } + + private void invalidateTableStats(final List tableNamesToDelete) throws SQLException { + if (tableNamesToDelete != null) { + for (byte[] tableName : tableNamesToDelete) { + TableName tn = TableName.valueOf(tableName); + TableDescriptor htableDesc = this.getTableDescriptor(tableName); + tableStatsCache.invalidateAll(htableDesc); + } + } + } + + private void dropTable(byte[] tableNameToDelete) throws SQLException { + dropTables(Collections. singletonList(tableNameToDelete)); + } + + @VisibleForTesting + void dropTables(final List tableNamesToDelete) throws SQLException { + SQLException sqlE = null; + try (Admin admin = getAdmin()) { + if (tableNamesToDelete != null) { + for (byte[] tableName : tableNamesToDelete) { + try { + TableName tn = TableName.valueOf(tableName); + TableDescriptor htableDesc = this.getTableDescriptor(tableName); + disableTable(admin, tn); + admin.deleteTable(tn); + tableStatsCache.invalidateAll(htableDesc); + clearTableRegionCache(TableName.valueOf(tableName)); + } catch (TableNotFoundException ignore) { + } + } + } + + } catch (IOException e) { + sqlE = ClientUtil.parseServerException(e); + } finally { + if (sqlE != null) { + throw sqlE; + } + } + } + + private static Map createPropertiesMap(Map htableProps) { + Map props = Maps.newHashMapWithExpectedSize(htableProps.size()); + for (Map.Entry entry : htableProps.entrySet()) { + Bytes key = entry.getKey(); + Bytes value = entry.getValue(); + props.put(Bytes.toString(key.get(), key.getOffset(), key.getLength()), + Bytes.toString(value.get(), value.getOffset(), value.getLength())); + } + return props; + } + + private void ensureViewIndexTableCreated(PName tenantId, byte[] physicalIndexTableName, + long timestamp, boolean isNamespaceMapped) throws SQLException { + String name = Bytes + .toString(SchemaUtil.getParentTableNameFromIndexTable(physicalIndexTableName, + MetaDataUtil.VIEW_INDEX_TABLE_PREFIX)) + .replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); + PTable table = getTable(tenantId, name, timestamp); + ensureViewIndexTableCreated(table, timestamp, isNamespaceMapped); + } + + private PTable getTable(PName tenantId, String fullTableName, long timestamp) + throws SQLException { + PTable table; + try { + PMetaData metadata = latestMetaData; + throwConnectionClosedIfNullMetaData(); + table = metadata.getTableRef(new PTableKey(tenantId, fullTableName)).getTable(); + if (table.getTimeStamp() >= timestamp) { // Table in cache is newer than client timestamp + // which shouldn't be + // the case + throw new TableNotFoundException(table.getSchemaName().getString(), + table.getTableName().getString()); + } + } catch (TableNotFoundException e) { + byte[] schemaName = Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(fullTableName)); + byte[] tableName = Bytes.toBytes(SchemaUtil.getTableNameFromFullName(fullTableName)); + MetaDataMutationResult result = + this.getTable(tenantId, schemaName, tableName, HConstants.LATEST_TIMESTAMP, timestamp); + table = result.getTable(); + if (table == null) { + throw e; + } + } + return table; + } + + private void ensureViewIndexTableCreated(PTable table, long timestamp, boolean isNamespaceMapped) + throws SQLException { + byte[] physicalTableName = table.getPhysicalName().getBytes(); + TableDescriptor htableDesc = this.getTableDescriptor(physicalTableName); + List>> families = + Lists.newArrayListWithExpectedSize(Math.max(1, table.getColumnFamilies().size() + 1)); + + // Create all column families that the parent table has + for (PColumnFamily family : table.getColumnFamilies()) { + byte[] familyName = family.getName().getBytes(); + Map familyProps = + createPropertiesMap(htableDesc.getColumnFamily(familyName).getValues()); + families.add(new Pair<>(familyName, familyProps)); + } + // Always create default column family, because we don't know in advance if we'll + // need it for an index with no covered columns. + byte[] defaultFamilyName = table.getDefaultFamilyName() == null + ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES + : table.getDefaultFamilyName().getBytes(); + families.add(new Pair<>(defaultFamilyName, Collections. emptyMap())); + + byte[][] splits = null; + if (table.getBucketNum() != null) { + splits = SaltingUtil.getSalteByteSplitPoints(table.getBucketNum()); + } + + // Transfer over table values into tableProps + // TODO: encapsulate better + Map tableProps = createPropertiesMap(htableDesc.getValues()); + tableProps.put(PhoenixDatabaseMetaData.TRANSACTIONAL, table.isTransactional()); + tableProps.put(PhoenixDatabaseMetaData.IMMUTABLE_ROWS, table.isImmutableRows()); + + // We got the properties of the physical base table but we need to create the view index table + // using logical name + byte[] viewPhysicalTableName = MetaDataUtil + .getNamespaceMappedName(table.getName(), isNamespaceMapped).getBytes(StandardCharsets.UTF_8); + ensureViewIndexTableCreated(viewPhysicalTableName, physicalTableName, tableProps, families, + splits, timestamp, isNamespaceMapped); + } + + @Override + public MetaDataMutationResult addColumn(final List tableMetaData, PTable table, + final PTable parentTable, final PTable transformingNewTable, + Map>> stmtProperties, + Set colFamiliesForPColumnsToBeAdded, List columns) throws SQLException { + List>> families = new ArrayList<>(stmtProperties.size()); + Map tableProps = new HashMap<>(); + Set tableDescriptors = Collections.emptySet(); + boolean nonTxToTx = false; + + Map oldToNewTableDescriptors = separateAndValidateProperties( + table, stmtProperties, colFamiliesForPColumnsToBeAdded, tableProps); + Set origTableDescriptors = new HashSet<>(oldToNewTableDescriptors.keySet()); + + TableDescriptor baseTableOrigDesc = this.getTableDescriptor(table.getPhysicalName().getBytes()); + TableDescriptor tableDescriptor = oldToNewTableDescriptors.get(baseTableOrigDesc); + + if (tableDescriptor != null) { + tableDescriptors = Sets.newHashSetWithExpectedSize(3 + table.getIndexes().size()); + nonTxToTx = Boolean.TRUE.equals(tableProps.get(PhoenixTransactionContext.READ_NON_TX_DATA)); + /* + * If the table was transitioned from non transactional to transactional, we need to also + * transition the index tables. + */ + + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(tableDescriptor); + if (nonTxToTx) { + updateDescriptorForTx(table, tableProps, tableDescriptorBuilder, Boolean.TRUE.toString(), + tableDescriptors, origTableDescriptors, oldToNewTableDescriptors); + tableDescriptor = tableDescriptorBuilder.build(); + tableDescriptors.add(tableDescriptor); + } else { + tableDescriptors = new HashSet<>(oldToNewTableDescriptors.values()); + } + } + + boolean success = false; + boolean metaDataUpdated = !tableDescriptors.isEmpty(); + boolean pollingNeeded = + !(!tableProps.isEmpty() && families.isEmpty() && colFamiliesForPColumnsToBeAdded.isEmpty()); + MetaDataMutationResult result = null; + try { + boolean modifyHTable = true; + if (table.getType() == PTableType.VIEW) { + boolean canViewsAddNewCF = props.getBoolean(QueryServices.ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE, + QueryServicesOptions.DEFAULT_ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE); + // When adding a column to a view, base physical table should only be modified when new + // column families are being added. + modifyHTable = + canViewsAddNewCF && !existingColumnFamiliesForBaseTable(table.getPhysicalName()) + .containsAll(colFamiliesForPColumnsToBeAdded); + } + + // Special case for call during drop table to ensure that the empty column family exists. + // In this, case we only include the table header row, as until we add schemaBytes and + // tableBytes + // as args to this function, we have no way of getting them in this case. + // TODO: change to if (tableMetaData.isEmpty()) once we pass through schemaBytes and + // tableBytes + // Also, could be used to update table descriptor property values on ALTER TABLE t SET + // prop=xxx + if ( + (tableMetaData.isEmpty()) || (tableMetaData.size() == 1 && tableMetaData.get(0).isEmpty()) + ) { + if (modifyHTable) { + sendHBaseMetaData(tableDescriptors, pollingNeeded); + } + return new MetaDataMutationResult(MutationCode.NO_OP, + EnvironmentEdgeManager.currentTimeMillis(), table); + } + byte[][] rowKeyMetaData = new byte[3][]; + PTableType tableType = table.getType(); + + Mutation m = tableMetaData.get(0); + byte[] rowKey = m.getRow(); + SchemaUtil.getVarChars(rowKey, rowKeyMetaData); + byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + byte[] schemaBytes = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] tableBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + final boolean addingColumns = columns != null && columns.size() > 0; + result = metaDataCoprocessorExec( + SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, + SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), + tableKey, new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); + for (Mutation m : tableMetaData) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addTableMetadataMutations(mp.toByteString()); + } + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + if (parentTable != null) builder.setParentTable(PTableImpl.toProto(parentTable)); + if (transformingNewTable != null) { + builder.setTransformingNewTable(PTableImpl.toProto(transformingNewTable)); + } + builder.setAddingColumns(addingColumns); + instance.addColumn(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + + if ( + result.getMutationCode() == MutationCode.COLUMN_NOT_FOUND + || result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS + ) { // Success + success = true; + // Flush the table if transitioning DISABLE_WAL from TRUE to FALSE + if ( + MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.DISABLE_WAL_BYTES, kvBuilder, + ptr) && Boolean.FALSE.equals(PBoolean.INSTANCE.toObject(ptr)) + ) { + flushTable(table.getPhysicalName().getBytes()); + } + + if (tableType == PTableType.TABLE) { + // If we're changing MULTI_TENANT to true or false, create or drop the view index table + if ( + MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.MULTI_TENANT_BYTES, kvBuilder, + ptr) + ) { + long timestamp = MetaDataUtil.getClientTimeStamp(m); + if ( + Boolean.TRUE + .equals(PBoolean.INSTANCE.toObject(ptr.get(), ptr.getOffset(), ptr.getLength())) + ) { + this.ensureViewIndexTableCreated(table, timestamp, table.isNamespaceMapped()); + } else { + this.ensureViewIndexTableDropped(table.getPhysicalName().getBytes(), timestamp); + } + } + } + } + + if (modifyHTable && result.getMutationCode() != MutationCode.UNALLOWED_TABLE_MUTATION) { + sendHBaseMetaData(tableDescriptors, pollingNeeded); + } + } finally { + // If we weren't successful with our metadata update + // and we've already pushed the HBase metadata changes to the server + // and we've tried to go from non transactional to transactional + // then we must undo the metadata change otherwise the table will + // no longer function correctly. + // Note that if this fails, we're in a corrupt state. + if (!success && metaDataUpdated && nonTxToTx) { + sendHBaseMetaData(origTableDescriptors, pollingNeeded); + } + } + return result; + } + + private void updateDescriptorForTx(PTable table, Map tableProps, + TableDescriptorBuilder tableDescriptorBuilder, String txValue, + Set descriptorsToUpdate, Set origDescriptors, + Map oldToNewTableDescriptors) throws SQLException { + byte[] physicalTableName = table.getPhysicalName().getBytes(); + try (Admin admin = getAdmin()) { + TableDescriptor baseDesc = admin.getDescriptor(TableName.valueOf(physicalTableName)); + boolean hasOldIndexing = baseDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME); + setTransactional(physicalTableName, tableDescriptorBuilder, table.getType(), txValue, + tableProps, hasOldIndexing); + Map indexTableProps; + if (txValue == null) { + indexTableProps = Collections.emptyMap(); + } else { + indexTableProps = Maps.newHashMapWithExpectedSize(1); + indexTableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.valueOf(txValue)); + indexTableProps.put(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER, + tableProps.get(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER)); + } + for (PTable index : table.getIndexes()) { + TableDescriptor origIndexDesc = + admin.getDescriptor(TableName.valueOf(index.getPhysicalName().getBytes())); + TableDescriptor intermedIndexDesc = origIndexDesc; + // If we already wished to make modifications to this index table descriptor previously, we + // use the updated + // table descriptor to carry out further modifications + // See {@link ConnectionQueryServicesImpl#separateAndValidateProperties(PTable, Map, Set, + // Map)} + if (origDescriptors.contains(origIndexDesc)) { + intermedIndexDesc = oldToNewTableDescriptors.get(origIndexDesc); + // Remove any previous modification for this table descriptor because we will add + // the combined modification done in this method as well + descriptorsToUpdate.remove(intermedIndexDesc); + } else { + origDescriptors.add(origIndexDesc); + } + TableDescriptorBuilder indexDescriptorBuilder = + TableDescriptorBuilder.newBuilder(intermedIndexDesc); + if (index.getColumnFamilies().isEmpty()) { + byte[] dataFamilyName = SchemaUtil.getEmptyColumnFamily(table); + byte[] indexFamilyName = SchemaUtil.getEmptyColumnFamily(index); + ColumnFamilyDescriptorBuilder indexColDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(indexDescriptorBuilder.build().getColumnFamily(indexFamilyName)); + ColumnFamilyDescriptor tableColDescriptor = + tableDescriptorBuilder.build().getColumnFamily(dataFamilyName); + indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions()); + indexColDescriptor.setValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL), + tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL))); + indexDescriptorBuilder.removeColumnFamily(indexFamilyName); + indexDescriptorBuilder.setColumnFamily(indexColDescriptor.build()); + } else { + for (PColumnFamily family : index.getColumnFamilies()) { + byte[] familyName = family.getName().getBytes(); + ColumnFamilyDescriptorBuilder indexColDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(indexDescriptorBuilder.build().getColumnFamily(familyName)); + ColumnFamilyDescriptor tableColDescriptor = + tableDescriptorBuilder.build().getColumnFamily(familyName); + indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions()); + indexColDescriptor.setValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL), + tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL))); + indexDescriptorBuilder.removeColumnFamily(familyName); + indexDescriptorBuilder.setColumnFamily(indexColDescriptor.build()); + } + } + setTransactional(index.getPhysicalName().getBytes(), indexDescriptorBuilder, + index.getType(), txValue, indexTableProps, hasOldIndexing); + descriptorsToUpdate.add(indexDescriptorBuilder.build()); + } + try { + TableDescriptor origIndexDesc = admin.getDescriptor( + TableName.valueOf(MetaDataUtil.getViewIndexPhysicalName(physicalTableName))); + TableDescriptor intermedIndexDesc = origIndexDesc; + if (origDescriptors.contains(origIndexDesc)) { + intermedIndexDesc = oldToNewTableDescriptors.get(origIndexDesc); + descriptorsToUpdate.remove(intermedIndexDesc); + } else { + origDescriptors.add(origIndexDesc); + } + TableDescriptorBuilder indexDescriptorBuilder = + TableDescriptorBuilder.newBuilder(intermedIndexDesc); + setSharedIndexMaxVersion(table, tableDescriptorBuilder.build(), indexDescriptorBuilder); + setTransactional(MetaDataUtil.getViewIndexPhysicalName(physicalTableName), + indexDescriptorBuilder, PTableType.INDEX, txValue, indexTableProps, hasOldIndexing); + descriptorsToUpdate.add(indexDescriptorBuilder.build()); + } catch (org.apache.hadoop.hbase.TableNotFoundException ignore) { + // Ignore, as we may never have created a view index table + } + try { + TableDescriptor origIndexDesc = admin.getDescriptor( + TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(physicalTableName))); + TableDescriptor intermedIndexDesc = origIndexDesc; + if (origDescriptors.contains(origIndexDesc)) { + intermedIndexDesc = oldToNewTableDescriptors.get(origIndexDesc); + descriptorsToUpdate.remove(intermedIndexDesc); + } else { + origDescriptors.add(origIndexDesc); + } + TableDescriptorBuilder indexDescriptorBuilder = + TableDescriptorBuilder.newBuilder(intermedIndexDesc); + setSharedIndexMaxVersion(table, tableDescriptorBuilder.build(), indexDescriptorBuilder); + setTransactional(MetaDataUtil.getViewIndexPhysicalName(physicalTableName), + indexDescriptorBuilder, PTableType.INDEX, txValue, indexTableProps, hasOldIndexing); + descriptorsToUpdate.add(indexDescriptorBuilder.build()); + } catch (org.apache.hadoop.hbase.TableNotFoundException ignore) { + // Ignore, as we may never have created a local index + } + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + } + + private void setSharedIndexMaxVersion(PTable table, TableDescriptor tableDescriptor, + TableDescriptorBuilder indexDescriptorBuilder) { + if (table.getColumnFamilies().isEmpty()) { + byte[] familyName = SchemaUtil.getEmptyColumnFamily(table); + ColumnFamilyDescriptorBuilder indexColDescriptorBuilder = ColumnFamilyDescriptorBuilder + .newBuilder(indexDescriptorBuilder.build().getColumnFamily(familyName)); + ColumnFamilyDescriptor tableColDescriptor = tableDescriptor.getColumnFamily(familyName); + indexColDescriptorBuilder.setMaxVersions(tableColDescriptor.getMaxVersions()); + indexColDescriptorBuilder.setValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL), + tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL))); + indexDescriptorBuilder.removeColumnFamily(familyName); + indexDescriptorBuilder.setColumnFamily(indexColDescriptorBuilder.build()); + } else { + for (PColumnFamily family : table.getColumnFamilies()) { + byte[] familyName = family.getName().getBytes(); + ColumnFamilyDescriptor indexColDescriptor = + indexDescriptorBuilder.build().getColumnFamily(familyName); + if (indexColDescriptor != null) { + ColumnFamilyDescriptor tableColDescriptor = tableDescriptor.getColumnFamily(familyName); + ColumnFamilyDescriptorBuilder indexColDescriptorBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(indexColDescriptor); + indexColDescriptorBuilder.setMaxVersions(tableColDescriptor.getMaxVersions()); + indexColDescriptorBuilder.setValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL), + tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL))); + indexDescriptorBuilder.removeColumnFamily(familyName); + indexDescriptorBuilder.setColumnFamily(indexColDescriptorBuilder.build()); + } + } + } + } + + private void sendHBaseMetaData(Set tableDescriptors, boolean pollingNeeded) + throws SQLException { + SQLException sqlE = null; + for (TableDescriptor descriptor : tableDescriptors) { + try { + modifyTable(descriptor.getTableName().getName(), descriptor, pollingNeeded); + } catch (IOException e) { + sqlE = ClientUtil.parseServerException(e); + } catch (InterruptedException e) { + // restore the interrupt status + Thread.currentThread().interrupt(); + sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e) + .build().buildException(); + } catch (TimeoutException e) { + sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT) + .setRootCause(e.getCause() != null ? e.getCause() : e).build().buildException(); + } finally { + if (sqlE != null) { + throw sqlE; + } + } + } + } + + private void setTransactional(byte[] physicalTableName, + TableDescriptorBuilder tableDescriptorBuilder, PTableType tableType, String txValue, + Map tableProps, boolean hasOldIndexing) throws SQLException { + if (txValue == null) { + tableDescriptorBuilder.removeValue(Bytes.toBytes(PhoenixTransactionContext.READ_NON_TX_DATA)); + } else { + tableDescriptorBuilder.setValue(PhoenixTransactionContext.READ_NON_TX_DATA, txValue); + } + this.addCoprocessors(physicalTableName, tableDescriptorBuilder, tableType, tableProps, null, + hasOldIndexing); + } + + private Map separateAndValidateProperties(PTable table, + Map>> properties, Set colFamiliesForPColumnsToBeAdded, + Map tableProps) throws SQLException { + Map> stmtFamiliesPropsMap = new HashMap<>(properties.size()); + Map commonFamilyProps = new HashMap<>(); + boolean addingColumns = + colFamiliesForPColumnsToBeAdded != null && !colFamiliesForPColumnsToBeAdded.isEmpty(); + HashSet existingColumnFamilies = existingColumnFamilies(table); + Map> allFamiliesProps = + new HashMap<>(existingColumnFamilies.size()); + boolean isTransactional = table.isTransactional(); + boolean willBeTransactional = false; + boolean isOrWillBeTransactional = isTransactional; + Integer newTTL = null; + Integer newPhoenixTTL = null; + Integer newReplicationScope = null; + KeepDeletedCells newKeepDeletedCells = null; + TransactionFactory.Provider txProvider = null; + for (String family : properties.keySet()) { + List> propsList = properties.get(family); + if (propsList != null && propsList.size() > 0) { + Map colFamilyPropsMap = new HashMap<>(propsList.size()); + for (Pair prop : propsList) { + String propName = prop.getFirst(); + Object propValue = prop.getSecond(); + if ( + (MetaDataUtil.isHTableProperty(propName) + || TableProperty.isPhoenixTableProperty(propName)) && addingColumns + ) { + // setting HTable and PhoenixTable properties while adding a column is not allowed. + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN) + .setMessage("Property: " + propName) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()).build().buildException(); + } + if (MetaDataUtil.isHTableProperty(propName)) { + // Can't have a column family name for a property that's an HTableProperty + if (!family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY) + .setMessage("Column Family: " + family + ", Property: " + propName) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()).build().buildException(); + } + tableProps.put(propName, propValue); + } else { + if (TableProperty.isPhoenixTableProperty(propName)) { + TableProperty tableProp = TableProperty.valueOf(propName); + tableProp.validate(true, !family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY), + table.getType()); + if (propName.equals(TTL)) { + if (table.getType() == PTableType.INDEX) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX) + .setMessage("Property: " + propName).build().buildException(); + } + // Handle FOREVER and NONE case + propValue = convertForeverAndNoneTTLValue(propValue, isPhoenixTTLEnabled()); + // If Phoenix level TTL is enabled we are using TTL as phoenix + // Table level property. + if (!isPhoenixTTLEnabled()) { + newTTL = ((Number) propValue).intValue(); + // Even though TTL is really a HColumnProperty we treat it + // specially. We enforce that all CFs have the same TTL. + commonFamilyProps.put(propName, propValue); + } else { + // Setting this here just to check if we need to throw Exception + // for Transaction's SET_TTL Feature. + newPhoenixTTL = ((Number) propValue).intValue(); + } + } else if ( + propName.equals(PhoenixDatabaseMetaData.TRANSACTIONAL) + && Boolean.TRUE.equals(propValue) + ) { + willBeTransactional = isOrWillBeTransactional = true; + tableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, propValue); + } else if ( + propName.equals(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER) && propValue != null + ) { + willBeTransactional = isOrWillBeTransactional = true; + tableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.TRUE); + txProvider = (Provider) TableProperty.TRANSACTION_PROVIDER.getValue(propValue); + tableProps.put(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER, txProvider); + } + } else { + if (MetaDataUtil.isHColumnProperty(propName)) { + if ( + table.getType() == PTableType.INDEX + && MetaDataUtil.propertyNotAllowedToBeOutOfSync(propName) + ) { + // We disallow index tables from overriding TTL, KEEP_DELETED_CELLS and + // REPLICATION_SCOPE, + // in order to avoid situations where indexes are not in sync with their data + // table + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX) + .setMessage("Property: " + propName).build().buildException(); + } + if (family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { + if (propName.equals(KEEP_DELETED_CELLS)) { + newKeepDeletedCells = Boolean.valueOf(propValue.toString()) + ? KeepDeletedCells.TRUE + : KeepDeletedCells.FALSE; + } + if (propName.equals(REPLICATION_SCOPE)) { + newReplicationScope = ((Number) propValue).intValue(); + } + commonFamilyProps.put(propName, propValue); + } else if (MetaDataUtil.propertyNotAllowedToBeOutOfSync(propName)) { + // Don't allow specifying column families for TTL, KEEP_DELETED_CELLS and + // REPLICATION_SCOPE. + // These properties can only be applied for all column families of a table and + // can't be column family specific. + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY) + .setMessage("Property: " + propName).build().buildException(); + } else { + colFamilyPropsMap.put(propName, propValue); + } + } else { + // invalid property - neither of HTableProp, HColumnProp or PhoenixTableProp + // FIXME: This isn't getting triggered as currently a property gets evaluated + // as HTableProp if its neither HColumnProp or PhoenixTableProp. + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_PROPERTY) + .setMessage("Column Family: " + family + ", Property: " + propName) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()).build().buildException(); + } + } + } + } + if (isOrWillBeTransactional && (newTTL != null || newPhoenixTTL != null)) { + TransactionFactory.Provider isOrWillBeTransactionProvider = + txProvider == null ? table.getTransactionProvider() : txProvider; + if ( + isOrWillBeTransactionProvider.getTransactionProvider() + .isUnsupported(PhoenixTransactionProvider.Feature.SET_TTL) + ) { + throw new SQLExceptionInfo.Builder(PhoenixTransactionProvider.Feature.SET_TTL.getCode()) + .setMessage(isOrWillBeTransactionProvider.name()) + .setSchemaName(table.getSchemaName().getString()) + .setTableName(table.getTableName().getString()).build().buildException(); + } + } + if (!colFamilyPropsMap.isEmpty()) { + stmtFamiliesPropsMap.put(family, colFamilyPropsMap); + } + + } + } + commonFamilyProps = Collections.unmodifiableMap(commonFamilyProps); + boolean isAddingPkColOnly = + colFamiliesForPColumnsToBeAdded.size() == 1 && colFamiliesForPColumnsToBeAdded.contains(null); + if (!commonFamilyProps.isEmpty()) { + if (!addingColumns) { + // Add the common family props to all existing column families + for (String existingColFamily : existingColumnFamilies) { + Map m = new HashMap<>(commonFamilyProps.size()); + m.putAll(commonFamilyProps); + allFamiliesProps.put(existingColFamily, m); + } + } else { + // Add the common family props to the column families of the columns being added + for (String colFamily : colFamiliesForPColumnsToBeAdded) { + if (colFamily != null) { + // only set properties for key value columns + Map m = new HashMap<>(commonFamilyProps.size()); + m.putAll(commonFamilyProps); + allFamiliesProps.put(colFamily, m); + } else if (isAddingPkColOnly) { + // Setting HColumnProperty for a pk column is invalid + // because it will be part of the row key and not a key value column family. + // However, if both pk cols as well as key value columns are getting added + // together, then its allowed. The above if block will make sure that we add properties + // only for the kv cols and not pk cols. + throw new SQLExceptionInfo.Builder(SQLExceptionCode.SET_UNSUPPORTED_PROP_ON_ALTER_TABLE) + .build().buildException(); + } + } + } + } + + // Now go through the column family properties specified in the statement + // and merge them with the common family properties. + for (String f : stmtFamiliesPropsMap.keySet()) { + if (!addingColumns && !existingColumnFamilies.contains(f)) { + String schemaNameStr = + table.getSchemaName() == null ? null : table.getSchemaName().getString(); + String tableNameStr = + table.getTableName() == null ? null : table.getTableName().getString(); + throw new ColumnFamilyNotFoundException(schemaNameStr, tableNameStr, f); + } + if (addingColumns && !colFamiliesForPColumnsToBeAdded.contains(f)) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED).build().buildException(); + } + Map commonProps = allFamiliesProps.get(f); + Map stmtProps = stmtFamiliesPropsMap.get(f); + if (commonProps != null) { + if (stmtProps != null) { + // merge common props with statement props for the family + commonProps.putAll(stmtProps); + } + } else { + // if no common props were specified, then assign family specific props + if (stmtProps != null) { + allFamiliesProps.put(f, stmtProps); + } + } + } + + // case when there is a column family being added but there are no props + // For ex - in DROP COLUMN when a new empty CF needs to be added since all + // the columns of the existing empty CF are getting dropped. Or the case + // when one is just adding a column for a column family like this: + // ALTER TABLE ADD CF.COL + for (String cf : colFamiliesForPColumnsToBeAdded) { + if (cf != null && allFamiliesProps.get(cf) == null) { + allFamiliesProps.put(cf, new HashMap()); + } + } + + if (table.getColumnFamilies().isEmpty() && !addingColumns && !commonFamilyProps.isEmpty()) { + allFamiliesProps.put(Bytes.toString(table.getDefaultFamilyName() == null + ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES + : table.getDefaultFamilyName().getBytes()), commonFamilyProps); + } + + // Views are not allowed to have any of these properties. + if ( + table.getType() == PTableType.VIEW && (!stmtFamiliesPropsMap.isEmpty() + || !commonFamilyProps.isEmpty() || !tableProps.isEmpty()) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build() + .buildException(); + } + + TableDescriptorBuilder newTableDescriptorBuilder = null; + TableDescriptor origTableDescriptor = null; + // Store all old to new table descriptor mappings for the table as well as its global indexes + Map tableAndIndexDescriptorMappings = Collections.emptyMap(); + if (!allFamiliesProps.isEmpty() || !tableProps.isEmpty()) { + tableAndIndexDescriptorMappings = + Maps.newHashMapWithExpectedSize(3 + table.getIndexes().size()); + TableDescriptor existingTableDescriptor = + origTableDescriptor = this.getTableDescriptor(table.getPhysicalName().getBytes()); + newTableDescriptorBuilder = TableDescriptorBuilder.newBuilder(existingTableDescriptor); + if (!tableProps.isEmpty()) { + // add all the table properties to the new table descriptor + for (Entry entry : tableProps.entrySet()) { + newTableDescriptorBuilder.setValue(entry.getKey(), + entry.getValue() != null ? entry.getValue().toString() : null); + } + } + if (addingColumns) { + // Make sure that TTL, KEEP_DELETED_CELLS and REPLICATION_SCOPE for the new column family to + // be added stays in sync + // with the table's existing column families. Note that we use the new values for these + // properties in case we are + // altering their values. We also propagate these altered values to existing column families + // and indexes on the table below + setSyncedPropsForNewColumnFamilies(allFamiliesProps, table, newTableDescriptorBuilder, + newTTL, newKeepDeletedCells, newReplicationScope); + } + if (newTTL != null || newKeepDeletedCells != null || newReplicationScope != null) { + // Set properties to be kept in sync on all table column families of this table, even if + // they are not referenced here + setSyncedPropsForUnreferencedColumnFamilies( + this.getTableDescriptor(table.getPhysicalName().getBytes()), allFamiliesProps, newTTL, + newKeepDeletedCells, newReplicationScope); + } + + Integer defaultTxMaxVersions = null; + if (isOrWillBeTransactional) { + // Calculate default for max versions + Map emptyFamilyProps = + allFamiliesProps.get(SchemaUtil.getEmptyColumnFamilyAsString(table)); + if (emptyFamilyProps != null) { + defaultTxMaxVersions = (Integer) emptyFamilyProps.get(MAX_VERSIONS); + } + if (defaultTxMaxVersions == null) { + if (isTransactional) { + defaultTxMaxVersions = newTableDescriptorBuilder.build() + .getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getMaxVersions(); + } else { + defaultTxMaxVersions = + this.getProps().getInt(QueryServices.MAX_VERSIONS_TRANSACTIONAL_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL); + } + } + if (willBeTransactional) { + // Set VERSIONS for all column families when transitioning to transactional + for (PColumnFamily family : table.getColumnFamilies()) { + if (!allFamiliesProps.containsKey(family.getName().getString())) { + Map familyProps = Maps.newHashMapWithExpectedSize(1); + familyProps.put(MAX_VERSIONS, defaultTxMaxVersions); + allFamiliesProps.put(family.getName().getString(), familyProps); + } + } + } + // Set transaction context TTL property based on HBase property if we're + // transitioning to become transactional or setting TTL on + // an already transactional table. + int ttl = getTTL(table, newTableDescriptorBuilder.build(), newTTL); + if (ttl != ColumnFamilyDescriptorBuilder.DEFAULT_TTL) { + for (Map.Entry> entry : allFamiliesProps.entrySet()) { + Map props = entry.getValue(); + if (props == null) { + allFamiliesProps.put(entry.getKey(), new HashMap<>()); + props = allFamiliesProps.get(entry.getKey()); + } else { + props = new HashMap<>(props); + } + // Note: After PHOENIX-6627, is PhoenixTransactionContext.PROPERTY_TTL still useful? + props.put(PhoenixTransactionContext.PROPERTY_TTL, ttl); + // Remove HBase TTL if we're not transitioning an existing table to become transactional + // or if the existing transactional table wasn't originally non transactional. + if ( + !willBeTransactional && !Boolean.valueOf(newTableDescriptorBuilder.build() + .getValue(PhoenixTransactionContext.READ_NON_TX_DATA)) + ) { + props.remove(TTL); + } + entry.setValue(props); + } + } + } + for (Entry> entry : allFamiliesProps.entrySet()) { + Map familyProps = entry.getValue(); + if (isOrWillBeTransactional) { + if (!familyProps.containsKey(MAX_VERSIONS)) { + familyProps.put(MAX_VERSIONS, defaultTxMaxVersions); + } + } + byte[] cf = Bytes.toBytes(entry.getKey()); + ColumnFamilyDescriptor colDescriptor = + newTableDescriptorBuilder.build().getColumnFamily(cf); + if (colDescriptor == null) { + // new column family + colDescriptor = + generateColumnFamilyDescriptor(new Pair<>(cf, familyProps), table.getType()); + newTableDescriptorBuilder.setColumnFamily(colDescriptor); + } else { + ColumnFamilyDescriptorBuilder colDescriptorBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(colDescriptor); + modifyColumnFamilyDescriptor(colDescriptorBuilder, familyProps); + colDescriptor = colDescriptorBuilder.build(); + newTableDescriptorBuilder.removeColumnFamily(cf); + newTableDescriptorBuilder.setColumnFamily(colDescriptor); + } + if (isOrWillBeTransactional) { + checkTransactionalVersionsValue(colDescriptor); + } + } + } + if (origTableDescriptor != null && newTableDescriptorBuilder != null) { + // Add the table descriptor mapping for the base table + tableAndIndexDescriptorMappings.put(origTableDescriptor, newTableDescriptorBuilder.build()); + } + + Map applyPropsToAllIndexColFams = + getNewSyncedPropsMap(newTTL, newKeepDeletedCells, newReplicationScope); + // Copy properties that need to be synced from the default column family of the base table to + // the column families of each of its indexes (including indexes on this base table's views) + // and store those table descriptor mappings as well + setSyncedPropertiesForTableIndexes(table, tableAndIndexDescriptorMappings, + applyPropsToAllIndexColFams); + return tableAndIndexDescriptorMappings; + } + + private void checkTransactionalVersionsValue(ColumnFamilyDescriptor colDescriptor) + throws SQLException { + int maxVersions = colDescriptor.getMaxVersions(); + if (maxVersions <= 1) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAX_VERSIONS_MUST_BE_GREATER_THAN_ONE) + .setFamilyName(colDescriptor.getNameAsString()).build().buildException(); + } + } + + private HashSet existingColumnFamiliesForBaseTable(PName baseTableName) + throws TableNotFoundException { + throwConnectionClosedIfNullMetaData(); + PTable table = + latestMetaData.getTableRef(new PTableKey(null, baseTableName.getString())).getTable(); + return existingColumnFamilies(table); + } + + public HashSet existingColumnFamilies(PTable table) { + List cfs = table.getColumnFamilies(); + HashSet cfNames = new HashSet<>(cfs.size()); + for (PColumnFamily cf : table.getColumnFamilies()) { + cfNames.add(cf.getName().getString()); + } + return cfNames; + } + + public static KeepDeletedCells getKeepDeletedCells(PTable table, TableDescriptor tableDesc, + KeepDeletedCells newKeepDeletedCells) throws SQLException { + // If we're setting KEEP_DELETED_CELLS now, then use that value. Otherwise, use the empty column + // family value + return (newKeepDeletedCells != null) + ? newKeepDeletedCells + : tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getKeepDeletedCells(); + } + + public static int getReplicationScope(PTable table, TableDescriptor tableDesc, + Integer newReplicationScope) throws SQLException { + // If we're setting replication scope now, then use that value. Otherwise, use the empty column + // family value + return (newReplicationScope != null) + ? newReplicationScope + : tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getScope(); + } + + public static int getTTL(PTable table, TableDescriptor tableDesc, Integer newTTL) + throws SQLException { + // If we're setting TTL now, then use that value. Otherwise, use empty column family value + return (newTTL != null) + ? newTTL + : tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getTimeToLive(); + } + + public static Object convertForeverAndNoneTTLValue(Object propValue, + boolean isPhoenixTTLEnabled) { + // Handle FOREVER and NONE value for TTL at HBase level TTL. + if (propValue instanceof String) { + String strValue = (String) propValue; + if ("FOREVER".equalsIgnoreCase(strValue)) { + propValue = HConstants.FOREVER; + } else if ("NONE".equalsIgnoreCase(strValue)) { + propValue = isPhoenixTTLEnabled ? TTL_NOT_DEFINED : HConstants.FOREVER; + } + } + return propValue; + } + + /** + * Keep the TTL, KEEP_DELETED_CELLS and REPLICATION_SCOPE properties of new column families in + * sync with the existing column families. Note that we use the new values for these properties in + * case they are passed from our alter table command, if not, we use the default column family's + * value for each property See {@link MetaDataUtil#SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES} + * @param allFamiliesProps Map of all column family properties + * @param table original table + * @param tableDescBuilder new table descriptor builder + * @param newTTL new value of TTL + * @param newKeepDeletedCells new value of KEEP_DELETED_CELLS + * @param newReplicationScope new value of REPLICATION_SCOPE + */ + private void setSyncedPropsForNewColumnFamilies(Map> allFamiliesProps, + PTable table, TableDescriptorBuilder tableDescBuilder, Integer newTTL, + KeepDeletedCells newKeepDeletedCells, Integer newReplicationScope) throws SQLException { + if (!allFamiliesProps.isEmpty()) { + int ttl = getTTL(table, tableDescBuilder.build(), newTTL); + int replicationScope = + getReplicationScope(table, tableDescBuilder.build(), newReplicationScope); + KeepDeletedCells keepDeletedCells = + getKeepDeletedCells(table, tableDescBuilder.build(), newKeepDeletedCells); + for (Map.Entry> entry : allFamiliesProps.entrySet()) { + Map props = entry.getValue(); + if (props == null) { + allFamiliesProps.put(entry.getKey(), new HashMap<>()); + props = allFamiliesProps.get(entry.getKey()); + } + props.put(TTL, ttl); + props.put(KEEP_DELETED_CELLS, keepDeletedCells); + props.put(REPLICATION_SCOPE, replicationScope); + } + } + } + + private void setPropIfNotNull(Map propMap, String propName, Object propVal) { + if (propName != null && propVal != null) { + propMap.put(propName, propVal); + } + } + + private Map getNewSyncedPropsMap(Integer newTTL, + KeepDeletedCells newKeepDeletedCells, Integer newReplicationScope) { + Map newSyncedProps = Maps.newHashMapWithExpectedSize(3); + setPropIfNotNull(newSyncedProps, TTL, newTTL); + setPropIfNotNull(newSyncedProps, KEEP_DELETED_CELLS, newKeepDeletedCells); + setPropIfNotNull(newSyncedProps, REPLICATION_SCOPE, newReplicationScope); + return newSyncedProps; + } + + /** + * Set the new values for properties that are to be kept in sync amongst those column families of + * the table which are not referenced in the context of our alter table command, including the + * local index column family if it exists See + * {@link MetaDataUtil#SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES} + * @param tableDesc original table descriptor + * @param allFamiliesProps Map of all column family properties + * @param newTTL new value of TTL + * @param newKeepDeletedCells new value of KEEP_DELETED_CELLS + * @param newReplicationScope new value of REPLICATION_SCOPE + */ + private void setSyncedPropsForUnreferencedColumnFamilies(TableDescriptor tableDesc, + Map> allFamiliesProps, Integer newTTL, + KeepDeletedCells newKeepDeletedCells, Integer newReplicationScope) { + for (ColumnFamilyDescriptor family : tableDesc.getColumnFamilies()) { + if (!allFamiliesProps.containsKey(family.getNameAsString())) { + allFamiliesProps.put(family.getNameAsString(), + getNewSyncedPropsMap(newTTL, newKeepDeletedCells, newReplicationScope)); + } + } + } + + /** + * Set properties to be kept in sync for global indexes of a table, as well as the physical table + * corresponding to indexes created on views of a table See + * {@link MetaDataUtil#SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES} and + * @param table base table + * @param tableAndIndexDescriptorMappings old to new table descriptor mappings + * @param applyPropsToAllIndexesDefaultCF new properties to apply to all index column families + */ + private void setSyncedPropertiesForTableIndexes(PTable table, + Map tableAndIndexDescriptorMappings, + Map applyPropsToAllIndexesDefaultCF) throws SQLException { + if (applyPropsToAllIndexesDefaultCF == null || applyPropsToAllIndexesDefaultCF.isEmpty()) { + return; + } + + for (PTable indexTable : table.getIndexes()) { + if (indexTable.getIndexType() == PTable.IndexType.LOCAL) { + // local index tables are already handled when we sync all column families of a base table + continue; + } + TableDescriptor origIndexDescriptor = + this.getTableDescriptor(indexTable.getPhysicalName().getBytes()); + TableDescriptorBuilder newIndexDescriptorBuilder = + TableDescriptorBuilder.newBuilder(origIndexDescriptor); + + byte[] defaultIndexColFam = SchemaUtil.getEmptyColumnFamily(indexTable); + ColumnFamilyDescriptorBuilder indexDefaultColDescriptorBuilder = ColumnFamilyDescriptorBuilder + .newBuilder(origIndexDescriptor.getColumnFamily(defaultIndexColFam)); + modifyColumnFamilyDescriptor(indexDefaultColDescriptorBuilder, + applyPropsToAllIndexesDefaultCF); + newIndexDescriptorBuilder.removeColumnFamily(defaultIndexColFam); + newIndexDescriptorBuilder.setColumnFamily(indexDefaultColDescriptorBuilder.build()); + tableAndIndexDescriptorMappings.put(origIndexDescriptor, newIndexDescriptorBuilder.build()); + } + // Also keep properties for the physical view index table in sync + String viewIndexName = + MetaDataUtil.getViewIndexPhysicalName(table.getName(), table.isNamespaceMapped()); + if (!Strings.isNullOrEmpty(viewIndexName)) { + try { + TableDescriptor origViewIndexTableDescriptor = + this.getTableDescriptor(Bytes.toBytes(viewIndexName)); + TableDescriptorBuilder newViewIndexDescriptorBuilder = + TableDescriptorBuilder.newBuilder(origViewIndexTableDescriptor); + for (ColumnFamilyDescriptor cfd : origViewIndexTableDescriptor.getColumnFamilies()) { + ColumnFamilyDescriptorBuilder newCfd = ColumnFamilyDescriptorBuilder.newBuilder(cfd); + modifyColumnFamilyDescriptor(newCfd, applyPropsToAllIndexesDefaultCF); + newViewIndexDescriptorBuilder.removeColumnFamily(cfd.getName()); + newViewIndexDescriptorBuilder.setColumnFamily(newCfd.build()); + } + tableAndIndexDescriptorMappings.put(origViewIndexTableDescriptor, + newViewIndexDescriptorBuilder.build()); + } catch (TableNotFoundException ignore) { + // Ignore since this means that a view index table does not exist for this table + } + } + } + + @Override + public MetaDataMutationResult dropColumn(final List tableMetaData, + final PTableType tableType, final PTable parentTable) throws SQLException { + byte[][] rowKeyMetadata = new byte[3][]; + SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata); + byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes); + MetaDataMutationResult result = metaDataCoprocessorExec( + SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, + SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), + tableKey, new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + DropColumnRequest.Builder builder = DropColumnRequest.newBuilder(); + for (Mutation m : tableMetaData) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addTableMetadataMutations(mp.toByteString()); + } + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + if (parentTable != null) builder.setParentTable(PTableImpl.toProto(parentTable)); + instance.dropColumn(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + final MutationCode code = result.getMutationCode(); + switch (code) { + case TABLE_ALREADY_EXISTS: + final ReadOnlyProps props = this.getProps(); + final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); + if (dropMetadata) { + dropTables(result.getTableNamesToDelete()); + } else { + invalidateTableStats(result.getTableNamesToDelete()); + } + break; + default: + break; + } + return result; + + } + + private PhoenixConnection removeNotNullConstraint(PhoenixConnection oldMetaConnection, + String schemaName, String tableName, long timestamp, String columnName) throws SQLException { + Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); + // Cannot go through DriverManager or you end up in an infinite loop because it'll call init + // again + PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); + SQLException sqlE = null; + try { + String dml = "UPSERT INTO " + SYSTEM_CATALOG_NAME + " (" + PhoenixDatabaseMetaData.TENANT_ID + + "," + PhoenixDatabaseMetaData.TABLE_SCHEM + "," + PhoenixDatabaseMetaData.TABLE_NAME + "," + + PhoenixDatabaseMetaData.COLUMN_NAME + "," + PhoenixDatabaseMetaData.NULLABLE + + ") VALUES (null, ?, ?, ?, ?)"; + PreparedStatement stmt = metaConnection.prepareStatement(dml); + stmt.setString(1, schemaName); + stmt.setString(2, tableName); + stmt.setString(3, columnName); + stmt.setInt(4, ResultSetMetaData.columnNullable); + stmt.executeUpdate(); + metaConnection.commit(); + } catch (NewerTableAlreadyExistsException e) { + LOGGER.warn("Table already modified at this timestamp," + + " so assuming column already nullable: " + columnName); + } catch (SQLException e) { + LOGGER.warn("Add column failed due to:" + e); + sqlE = e; + } finally { + try { + oldMetaConnection.close(); + } catch (SQLException e) { + if (sqlE != null) { + sqlE.setNextException(e); + } else { + sqlE = e; + } + } + if (sqlE != null) { + throw sqlE; + } + } + return metaConnection; + } + + /** + * This closes the passed connection. + */ + private PhoenixConnection addColumn(PhoenixConnection oldMetaConnection, String tableName, + long timestamp, String columns, boolean addIfNotExists) throws SQLException { + Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); + // Cannot go through DriverManager or you end up in an infinite loop because it'll call init + // again + PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); + SQLException sqlE = null; + try { + metaConnection.createStatement().executeUpdate( + "ALTER TABLE " + tableName + " ADD " + (addIfNotExists ? " IF NOT EXISTS " : "") + columns); + } catch (NewerTableAlreadyExistsException e) { + LOGGER.warn("Table already modified at this timestamp," + + " so assuming add of these columns already done: " + columns); + } catch (SQLException e) { + LOGGER.warn("Add column failed due to:" + e); + sqlE = e; + } finally { + try { + oldMetaConnection.close(); + } catch (SQLException e) { + if (sqlE != null) { + sqlE.setNextException(e); + } else { + sqlE = e; + } + } + if (sqlE != null) { + throw sqlE; + } + } + return metaConnection; + } + + /** + * Keeping this to use for further upgrades. This method closes the oldMetaConnection. + */ + private PhoenixConnection addColumnsIfNotExists(PhoenixConnection oldMetaConnection, + String tableName, long timestamp, String columns) throws SQLException { + return addColumn(oldMetaConnection, tableName, timestamp, columns, true); + } + + private void copyDataFromPhoenixTTLtoTTL(PhoenixConnection oldMetaConnection) throws IOException { + // If ViewTTL is enabled then only copy values from PHOENIX_TTL Column to TTL Column + if ( + oldMetaConnection.getQueryServices().getConfiguration().getBoolean(PHOENIX_VIEW_TTL_ENABLED, + DEFAULT_PHOENIX_VIEW_TTL_ENABLED) + ) { + // Increase the timeouts so that the scan queries during Copy Data do not timeout + // on large SYSCAT Tables + Map options = new HashMap<>(); + options.put(HConstants.HBASE_RPC_TIMEOUT_KEY, + Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); + options.put(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); + copyTTLValuesFromPhoenixTTLColumnToTTLColumn(oldMetaConnection, options); + } + + } + + private void moveTTLFromHBaseLevelTTLToPhoenixLevelTTL(PhoenixConnection oldMetaConnection) + throws IOException { + // Increase the timeouts so that the scan queries during Copy Data does not timeout + // on large SYSCAT Tables + Map options = new HashMap<>(); + options.put(HConstants.HBASE_RPC_TIMEOUT_KEY, + Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); + options.put(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); + UpgradeUtil.moveHBaseLevelTTLToSYSCAT(oldMetaConnection, options); + } + + // Available for testing + protected long getSystemTableVersion() { + return MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP; + } + + // Available for testing + protected void setUpgradeRequired() { + this.upgradeRequired.set(true); + } + + // Available for testing + protected boolean isInitialized() { + return initialized; + } + + // Available for testing + protected void setInitialized(boolean isInitialized) { + initialized = isInitialized; + } + + // Available for testing + protected String getSystemCatalogTableDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_TABLE_METADATA); + } + + protected String getSystemSequenceTableDDL(int nSaltBuckets) { + String schema = String.format(setSystemDDLProperties(QueryConstants.CREATE_SEQUENCE_METADATA)); + return Sequence.getCreateTableStatement(schema, nSaltBuckets); + } + + // Available for testing + protected String getFunctionTableDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_FUNCTION_METADATA); + } + + // Available for testing + protected String getLogTableDDL() { + return setSystemLogDDLProperties(QueryConstants.CREATE_LOG_METADATA); + } + + private String setSystemLogDDLProperties(String ddl) { + return String.format(ddl, + props.getInt(LOG_SALT_BUCKETS_ATTRIB, QueryServicesOptions.DEFAULT_LOG_SALT_BUCKETS)); + + } + + // Available for testing + protected String getChildLinkDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_CHILD_LINK_METADATA); + } + + protected String getMutexDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADATA); + } + + protected String getTaskDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_TASK_METADATA); + } + + protected String getTransformDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_TRANSFORM_METADATA); + } + + private String setSystemDDLProperties(String ddl) { + return String.format(ddl, + props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB, + QueryServicesOptions.DEFAULT_SYSTEM_MAX_VERSIONS), + props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB, + QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)); + } + + @Override + public void init(final String url, final Properties props) throws SQLException { + try { + PhoenixContextExecutor.call(new Callable() { + @Override + public Void call() throws Exception { + if (isInitialized()) { + if (initializationException != null) { + // Throw previous initialization exception, as we won't resuse this instance + throw initializationException; + } + return null; + } + synchronized (ConnectionQueryServicesImpl.this) { + if (isInitialized()) { + if (initializationException != null) { + // Throw previous initialization exception, as we won't resuse this instance + throw initializationException; + } + return null; + } + + checkClosed(); + boolean hConnectionEstablished = false; + boolean success = false; try { - if (this.queryDisruptor != null) { - this.queryDisruptor.close(); - } + GLOBAL_QUERY_SERVICES_COUNTER.increment(); + LOGGER.info("An instance of ConnectionQueryServices was created."); + connection = openConnection(config); + hConnectionEstablished = true; + boolean lastDDLTimestampValidationEnabled = + getProps().getBoolean(QueryServices.LAST_DDL_TIMESTAMP_VALIDATION_ENABLED, + QueryServicesOptions.DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED); + if (lastDDLTimestampValidationEnabled) { + refreshLiveRegionServers(); + } + String skipSystemExistenceCheck = + props.getProperty(SKIP_SYSTEM_TABLES_EXISTENCE_CHECK); + if (skipSystemExistenceCheck != null && Boolean.valueOf(skipSystemExistenceCheck)) { + initialized = true; + success = true; + return null; + } + boolean isDoNotUpgradePropSet = UpgradeUtil.isNoUpgradeSet(props); + Properties scnProps = PropertiesUtil.deepCopy(props); + scnProps.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, + Long.toString(getSystemTableVersion())); + scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB); + String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB); + try (PhoenixConnection metaConnection = + new PhoenixConnection(ConnectionQueryServicesImpl.this, globalUrl, scnProps)) { + try (Statement statement = metaConnection.createStatement()) { + metaConnection.setRunningUpgrade(true); + statement.executeUpdate(getSystemCatalogTableDDL()); + } catch (NewerTableAlreadyExistsException ignore) { + // Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed + // timestamp. A TableAlreadyExistsException is not thrown, since the table only + // exists + // *after* this fixed timestamp. + } catch (TableAlreadyExistsException e) { + long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); + if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP) { + setUpgradeRequired(); + } + } catch (PhoenixIOException e) { + boolean foundAccessDeniedException = false; + // when running spark/map reduce jobs the ADE might be wrapped + // in a RemoteException + if ( + inspectIfAnyExceptionInChain(e, + Collections.< + Class> singletonList(AccessDeniedException.class)) + ) { + // Pass + LOGGER.warn("Could not check for Phoenix SYSTEM tables," + + " assuming they exist and are properly configured"); + checkClientServerCompatibility( + SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, getProps()).getName()); + success = true; + } else + if ( + inspectIfAnyExceptionInChain(e, + Collections.> singletonList( + NamespaceNotFoundException.class)) + ) { + // This exception is only possible if SYSTEM namespace mapping is enabled and + // SYSTEM namespace is missing + // It implies that SYSTEM tables are not created and hence we shouldn't + // provide a connection + AccessDeniedException ade = new AccessDeniedException( + "Insufficient permissions to create SYSTEM namespace and SYSTEM Tables"); + initializationException = ClientUtil.parseServerException(ade); + } else { + initializationException = e; + } + return null; + } catch (UpgradeRequiredException e) { + // This will occur in 2 cases: + // 1. when SYSTEM.CATALOG doesn't exists + // 2. when SYSTEM.CATALOG exists, but client and + // server-side namespace mapping is enabled so + // we need to migrate SYSTEM tables to the SYSTEM namespace + setUpgradeRequired(); + } + + if (!ConnectionQueryServicesImpl.this.upgradeRequired.get()) { + if (!isDoNotUpgradePropSet) { + createOtherSystemTables(metaConnection); + // In case namespace mapping is enabled and system table to + // system namespace mapping is also enabled, create an entry + // for the SYSTEM namespace in the SYSCAT table, so that + // GRANT/REVOKE commands can work with SYSTEM Namespace + createSchemaIfNotExistsSystemNSMappingEnabled(metaConnection); + } + } else if (isAutoUpgradeEnabled && !isDoNotUpgradePropSet) { + // Upgrade is required and we are allowed to automatically upgrade + upgradeSystemTables(url, props); + } else { + // We expect the user to manually run the "EXECUTE UPGRADE" command first. + LOGGER.error("Upgrade is required. Must run 'EXECUTE UPGRADE' " + + "before any other command"); + } + } + success = true; + } catch (RetriableUpgradeException e) { + // Set success to true and don't set the exception as an initializationException, + // because otherwise the client won't be able to retry establishing the connection. + success = true; + throw e; } catch (Exception e) { - // Ignore - } - SQLException sqlE = null; - try { - // Attempt to return any unused sequences. - if (connection != null) returnAllSequences(this.sequenceMap); - } catch (SQLException e) { - sqlE = e; + if (e instanceof SQLException) { + initializationException = (SQLException) e; + } else { + // wrap every other exception into a SQLException + initializationException = new SQLException(e); + } } finally { + if (success) { + scheduleRenewLeaseTasks(); + } + try { + if (!success && hConnectionEstablished) { + closeConnection(connection); + closeConnection(invalidateMetadataCacheConnection); + } + } catch (IOException e) { + SQLException ex = new SQLException(e); + if (initializationException != null) { + initializationException.setNextException(ex); + } else { + initializationException = ex; + } + } finally { try { - childServices.clear(); - synchronized (latestMetaDataLock) { - latestMetaData = null; - latestMetaDataLock.notifyAll(); - } - try { - // close HBase connections. - closeConnection(this.connection); - closeConnection(this.invalidateMetadataCacheConnection); - } finally { - if (renewLeaseExecutor != null) { - renewLeaseExecutor.shutdownNow(); - } - // shut down the tx client service if we created one to support transactions - for (PhoenixTransactionClient client : txClients) { - if (client != null) { - client.close(); - } - } - } - } catch (IOException e) { - if (sqlE == null) { - sqlE = ClientUtil.parseServerException(e); - } else { - sqlE.setNextException(ClientUtil.parseServerException(e)); - } + if (initializationException != null) { + throw initializationException; + } } finally { - try { - tableStatsCache.invalidateAll(); - super.close(); - } catch (SQLException e) { - if (sqlE == null) { - sqlE = e; - } else { - sqlE.setNextException(e); - } - } finally { - if (sqlE != null) { throw sqlE; } - } - } - } + setInitialized(true); + } + } + } + } + return null; + } + }); + } catch (Exception e) { + Throwables.propagateIfInstanceOf(e, SQLException.class); + Throwables.propagate(e); + } + } + + void createSysMutexTableIfNotExists(Admin admin) throws IOException { + try { + if (checkIfSysMutexExistsAndModifyTTLIfRequired(admin)) { + return; + } + final TableName mutexTableName = SchemaUtil.getPhysicalTableName(SYSTEM_MUTEX_NAME, props); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(mutexTableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES) + .setTimeToLive(TTL_FOR_MUTEX).build()) + .build(); + admin.createTable(tableDesc); + } catch (IOException e) { + if ( + inspectIfAnyExceptionInChain(e, + Arrays.> asList(AccessDeniedException.class, + org.apache.hadoop.hbase.TableExistsException.class)) + ) { + // Ignore TableExistsException as another client might beat us during upgrade. + // Ignore AccessDeniedException, as it may be possible underpriviliged user trying to use + // the connection + // which doesn't required upgrade. + LOGGER.debug("Ignoring exception while creating mutex table" + + " during connection initialization: " + Throwables.getStackTraceAsString(e)); + } else { + throw e; + } + } + } + + /** + * Check if the SYSTEM MUTEX table exists. If it does, ensure that its TTL is correct and if not, + * modify its table descriptor + * @param admin HBase admin + * @return true if SYSTEM MUTEX exists already and false if it needs to be created + * @throws IOException thrown if there is an error getting the table descriptor + */ + @VisibleForTesting + boolean checkIfSysMutexExistsAndModifyTTLIfRequired(Admin admin) throws IOException { + TableDescriptor htd; + try { + htd = admin.getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME)); + } catch (org.apache.hadoop.hbase.TableNotFoundException ignored) { + try { + // Try with the namespace mapping name + htd = admin.getDescriptor(TableName.valueOf(SYSTEM_SCHEMA_NAME, SYSTEM_MUTEX_TABLE_NAME)); + } catch (org.apache.hadoop.hbase.TableNotFoundException ignored2) { + return false; + } + } + + // The SYSTEM MUTEX table already exists so check its TTL + if (htd.getColumnFamily(SYSTEM_MUTEX_FAMILY_NAME_BYTES).getTimeToLive() != TTL_FOR_MUTEX) { + LOGGER.debug( + "SYSTEM MUTEX already appears to exist, but has the wrong TTL. " + "Will modify the TTL"); + ColumnFamilyDescriptor hColFamDesc = ColumnFamilyDescriptorBuilder + .newBuilder(htd.getColumnFamily(SYSTEM_MUTEX_FAMILY_NAME_BYTES)) + .setTimeToLive(TTL_FOR_MUTEX).build(); + htd = TableDescriptorBuilder.newBuilder(htd).modifyColumnFamily(hColFamDesc).build(); + admin.modifyTable(htd); + } else { + LOGGER + .debug("SYSTEM MUTEX already appears to exist with the correct TTL, " + "not creating it"); + } + return true; + } + + private boolean inspectIfAnyExceptionInChain(Throwable io, + List> ioList) { + boolean exceptionToIgnore = false; + for (Throwable t : Throwables.getCausalChain(io)) { + for (Class exception : ioList) { + exceptionToIgnore |= isExceptionInstanceOf(t, exception); + } + if (exceptionToIgnore) { + break; + } + + } + return exceptionToIgnore; + } + + private boolean isExceptionInstanceOf(Throwable io, Class exception) { + return exception.isInstance(io) || (io instanceof RemoteException + && (((RemoteException) io).getClassName().equals(exception.getName()))); + } + + List getSystemTableNamesInDefaultNamespace(Admin admin) throws IOException { + return Lists.newArrayList( + admin.listTableNames(Pattern.compile(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"))); // TODO: + // replace + // to + // pattern + } + + private void createOtherSystemTables(PhoenixConnection metaConnection) + throws SQLException, IOException { + try { + metaConnection.createStatement().execute(getSystemSequenceTableDDL(nSequenceSaltBuckets)); + // When creating the table above, DDL statements are + // used. However, the CFD level properties are not set + // via DDL commands, hence we are explicitly setting + // few properties using the Admin API below. + updateSystemSequenceWithCacheOnWriteProps(metaConnection); + } catch (TableAlreadyExistsException e) { + nSequenceSaltBuckets = getSaltBuckets(e); + } + try { + metaConnection.createStatement().execute(QueryConstants.CREATE_STATS_TABLE_METADATA); + } catch (TableAlreadyExistsException ignore) { + } + try { + metaConnection.createStatement().execute(getFunctionTableDDL()); + } catch (TableAlreadyExistsException ignore) { + } + try { + metaConnection.createStatement().execute(getLogTableDDL()); + } catch (TableAlreadyExistsException ignore) { + } + try { + metaConnection.createStatement().executeUpdate(getChildLinkDDL()); + } catch (TableAlreadyExistsException ignore) { + } + try { + metaConnection.createStatement().executeUpdate(getMutexDDL()); + } catch (TableAlreadyExistsException ignore) { + } + try { + metaConnection.createStatement().executeUpdate(getTaskDDL()); + } catch (TableAlreadyExistsException ignore) { + } + try { + metaConnection.createStatement().executeUpdate(getTransformDDL()); + } catch (TableAlreadyExistsException ignore) { + } + } + + /** + * Create an entry for the SYSTEM namespace in the SYSCAT table in case namespace mapping is + * enabled and system table to system namespace mapping is also enabled. If not enabled, this + * method returns immediately without doing anything + */ + private void createSchemaIfNotExistsSystemNSMappingEnabled(PhoenixConnection metaConnection) + throws SQLException { + // HBase Namespace SYSTEM is assumed to be already created inside {@link + // ensureTableCreated(byte[], PTableType, + // Map, List>>, byte[][], boolean, boolean, + // boolean)}. + // This statement will create an entry for the SYSTEM namespace in the SYSCAT table, so that + // GRANT/REVOKE + // commands can work with SYSTEM Namespace. (See PHOENIX-4227 + // https://issues.apache.org/jira/browse/PHOENIX-4227) + if ( + SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, + ConnectionQueryServicesImpl.this.getProps()) + ) { + try { + metaConnection.createStatement() + .execute("CREATE SCHEMA IF NOT EXISTS " + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA); + } catch (NewerSchemaAlreadyExistsException e) { + // Older clients with appropriate perms may try getting a new connection + // This results in NewerSchemaAlreadyExistsException, so we can safely ignore it here + } catch (PhoenixIOException e) { + if ( + !Iterables + .isEmpty(Iterables.filter(Throwables.getCausalChain(e), AccessDeniedException.class)) + ) { + // Ignore ADE + } else { + throw e; + } + } + } + } + + /** + * Upgrade the SYSCAT schema if required + * @return Phoenix connection object + */ + // Available for testing + protected PhoenixConnection upgradeSystemCatalogIfRequired(PhoenixConnection metaConnection, + long currentServerSideTableTimeStamp) + throws SQLException, IOException, TimeoutException, InterruptedException { + String columnsToAdd = ""; + // This will occur if we have an older SYSTEM.CATALOG and we need to update it to + // include any new columns we've added. + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) { + // We know that we always need to add the STORE_NULLS column for 4.3 release + columnsToAdd = addColumn(columnsToAdd, + PhoenixDatabaseMetaData.STORE_NULLS + " " + PBoolean.INSTANCE.getSqlTypeName()); + try (Admin admin = getAdmin()) { + List localIndexTables = + admin.listTableDescriptors(Pattern.compile(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + ".*")); + for (TableDescriptor table : localIndexTables) { + if ( + table.getValue(MetaDataUtil.PARENT_TABLE_KEY) == null + && table.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_NAME) != null + ) { + + table = + TableDescriptorBuilder.newBuilder(table) + .setValue(Bytes.toBytes(MetaDataUtil.PARENT_TABLE_KEY), Bytes.toBytes( + MetaDataUtil.getLocalIndexUserTableName(table.getTableName().getNameAsString()))) + .build(); + // Explicitly disable, modify and enable the table to ensure + // co-location of data and index regions. If we just modify the + // table descriptor when online schema change enabled may reopen + // the region in same region server instead of following data region. + disableTable(admin, table.getTableName()); + admin.modifyTable(table); + admin.enableTable(table.getTableName()); + } + } + } + } + + // If the server side schema is before MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 then + // we need to add INDEX_TYPE and INDEX_DISABLE_TIMESTAMP columns too. + // TODO: Once https://issues.apache.org/jira/browse/PHOENIX-1614 is fixed, + // we should just have a ALTER TABLE ADD IF NOT EXISTS statement with all + // the column names that have been added to SYSTEM.CATALOG since 4.0. + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) { + columnsToAdd = addColumn(columnsToAdd, + PhoenixDatabaseMetaData.INDEX_TYPE + " " + PUnsignedTinyint.INSTANCE.getSqlTypeName() + ", " + + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " " + + PLong.INSTANCE.getSqlTypeName()); + } + + // If we have some new columns from 4.1-4.3 to add, add them now. + if (!columnsToAdd.isEmpty()) { + // Ugh..need to assign to another local variable to keep eclipse happy. + PhoenixConnection newMetaConnection = + addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0, columnsToAdd); + metaConnection = newMetaConnection; + } + + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0) { + columnsToAdd = + PhoenixDatabaseMetaData.BASE_COLUMN_COUNT + " " + PInteger.INSTANCE.getSqlTypeName(); + try { + metaConnection = addColumn(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0, columnsToAdd, false); + upgradeTo4_5_0(metaConnection); + } catch (ColumnAlreadyExistsException ignored) { + /* + * Upgrade to 4.5 is a slightly special case. We use the fact that the column + * BASE_COLUMN_COUNT is already part of the meta-data schema as the signal that the server + * side upgrade has finished or is in progress. + */ + LOGGER.debug("No need to run 4.5 upgrade"); + } + Properties p = PropertiesUtil.deepCopy(metaConnection.getClientInfo()); + p.remove(PhoenixRuntime.CURRENT_SCN_ATTRIB); + p.remove(PhoenixRuntime.TENANT_ID_ATTRIB); + PhoenixConnection conn = + new PhoenixConnection(ConnectionQueryServicesImpl.this, metaConnection.getURL(), p); + try { + List tablesNeedingUpgrade = UpgradeUtil.getPhysicalTablesWithDescRowKey(conn); + if (!tablesNeedingUpgrade.isEmpty()) { + LOGGER.warn("The following tables require upgrade due to a bug " + + "causing the row key to be incorrect for descending columns " + + "and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n" + + Joiner.on(' ').join(tablesNeedingUpgrade) + + "\nTo upgrade issue the \"bin/psql.py -u\" command."); + } + List unsupportedTables = UpgradeUtil.getPhysicalTablesWithDescVarbinaryRowKey(conn); + if (!unsupportedTables.isEmpty()) { + LOGGER.warn("The following tables use an unsupported " + + "VARBINARY DESC construct and need to be changed:\n" + + Joiner.on(' ').join(unsupportedTables)); + } + } catch (Exception ex) { + LOGGER.error("Unable to determine tables requiring upgrade due to PHOENIX-2067", ex); + } finally { + conn.close(); + } + } + // Add these columns one at a time so that if folks have run the upgrade code + // already for a snapshot, we'll still enter this block (and do the parts we + // haven't yet done). + // Add each column with different timestamp else the code assumes that the + // table is already modified at that timestamp resulting in not updating the + // second column with same timestamp + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0) { + columnsToAdd = + PhoenixDatabaseMetaData.IS_ROW_TIMESTAMP + " " + PBoolean.INSTANCE.getSqlTypeName(); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, columnsToAdd); + } + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) { + // Drop old stats table so that new stats table is created + metaConnection = + dropStatsTable(metaConnection, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3, + PhoenixDatabaseMetaData.TRANSACTIONAL + " " + PBoolean.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2, + PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " " + PLong.INSTANCE.getSqlTypeName()); + metaConnection = setImmutableTableIndexesImmutable(metaConnection, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1); + metaConnection = updateSystemCatalogTimestamp(metaConnection, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0); + ConnectionQueryServicesImpl.this.removeTable(null, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0); + clearCache(); + } + + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0) { + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 - 2, + PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED + " " + PBoolean.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 - 1, + PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ + " " + PVarchar.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0, + PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA + " " + PBoolean.INSTANCE.getSqlTypeName()); + metaConnection = UpgradeUtil.disableViewIndexes(metaConnection); + if ( + getProps().getBoolean(QueryServices.LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB, + QueryServicesOptions.DEFAULT_LOCAL_INDEX_CLIENT_UPGRADE) + ) { + localIndexUpgradeRequired = true; + } + ConnectionQueryServicesImpl.this.removeTable(null, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0); + clearCache(); + } + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0) { + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0, + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH + " " + PLong.INSTANCE.getSqlTypeName()); + ConnectionQueryServicesImpl.this.removeTable(null, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0); + clearCache(); + } + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0) { + metaConnection = addColumnQualifierColumn(metaConnection, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 - 3); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 - 2, + PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME + " " + + PTinyint.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 - 1, + PhoenixDatabaseMetaData.ENCODING_SCHEME + " " + PTinyint.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0, + PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER + " " + + PInteger.INSTANCE.getSqlTypeName()); + ConnectionQueryServicesImpl.this.removeTable(null, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0); + clearCache(); + } + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0) { + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0, + PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION + " " + + PBoolean.INSTANCE.getSqlTypeName()); + addParentToChildLinks(metaConnection); + } + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0) { + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0, + PhoenixDatabaseMetaData.TRANSACTION_PROVIDER + " " + PTinyint.INSTANCE.getSqlTypeName()); + try (Statement altQry = metaConnection.createStatement()) { + altQry.executeUpdate("ALTER TABLE " + PhoenixDatabaseMetaData.SYSTEM_CATALOG + " SET " + + HConstants.VERSIONS + "= " + + props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB, + QueryServicesOptions.DEFAULT_SYSTEM_MAX_VERSIONS) + + ",\n" + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + + props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB, + QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)); + + altQry.executeUpdate("ALTER TABLE " + PhoenixDatabaseMetaData.SYSTEM_FUNCTION + " SET " + + TableDescriptorBuilder.SPLIT_POLICY + "='" + + QueryConstants.SYSTEM_FUNCTION_SPLIT_POLICY_CLASSNAME + "',\n" + HConstants.VERSIONS + + "= " + + props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB, + QueryServicesOptions.DEFAULT_SYSTEM_MAX_VERSIONS) + + ",\n" + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + + props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB, + QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)); + + altQry.executeUpdate("ALTER TABLE " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " SET " + + TableDescriptorBuilder.SPLIT_POLICY + "='" + + QueryConstants.SYSTEM_STATS_SPLIT_POLICY_CLASSNAME + "'"); + } + } + if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0) { + addViewIndexToParentLinks(metaConnection); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0, + PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE + " " + PInteger.INSTANCE.getSqlTypeName()); + } + if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0) { + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 - 3, + PhoenixDatabaseMetaData.PHOENIX_TTL + " " + PInteger.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 - 2, + PhoenixDatabaseMetaData.PHOENIX_TTL_HWM + " " + PInteger.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 - 1, + PhoenixDatabaseMetaData.LAST_DDL_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0, PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED + " " + + PBoolean.INSTANCE.getSqlTypeName()); + UpgradeUtil.bootstrapLastDDLTimestampForTablesAndViews(metaConnection); + + boolean isNamespaceMapping = SchemaUtil.isNamespaceMappingEnabled(null, getConfiguration()); + String tableName = PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME; + if (isNamespaceMapping) { + tableName = + tableName.replace(QueryConstants.NAME_SEPARATOR, QueryConstants.NAMESPACE_SEPARATOR); + } + byte[] tableBytes = StringUtil.toBytes(tableName); + byte[] rowKey = SchemaUtil.getColumnKey(null, QueryConstants.SYSTEM_SCHEMA_NAME, + SYSTEM_CATALOG_TABLE, VIEW_INDEX_ID, PhoenixDatabaseMetaData.TABLE_FAMILY); + if ( + UpgradeUtil.isUpdateViewIndexIdColumnDataTypeFromShortToLongNeeded(metaConnection, rowKey, + tableBytes) + ) { + LOGGER.info("Updating VIEW_INDEX_ID data type to BIGINT."); + UpgradeUtil.updateViewIndexIdColumnDataTypeFromShortToLong(metaConnection, rowKey, + tableBytes); + } else { + LOGGER.info("Updating VIEW_INDEX_ID data type is not needed."); + } + try (Admin admin = metaConnection.getQueryServices().getAdmin()) { + TableDescriptorBuilder tdBuilder; + TableName sysCatPhysicalTableName = + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, props); + tdBuilder = TableDescriptorBuilder.newBuilder(admin.getDescriptor(sysCatPhysicalTableName)); + if ( + !tdBuilder.build().hasCoprocessor(QueryConstants.SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME) + ) { + int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, + QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY); + tdBuilder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME) + .setPriority(priority).setProperties(Collections.emptyMap()).build()); + admin.modifyTable(tdBuilder.build()); + pollForUpdatedTableDescriptor(admin, tdBuilder.build(), + sysCatPhysicalTableName.getName()); + } + } + } + if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0) { + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 8, + PhoenixDatabaseMetaData.PHYSICAL_TABLE_NAME + " " + PVarchar.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 7, + PhoenixDatabaseMetaData.SCHEMA_VERSION + " " + PVarchar.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 6, + PhoenixDatabaseMetaData.EXTERNAL_SCHEMA_ID + " " + PVarchar.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 5, + PhoenixDatabaseMetaData.STREAMING_TOPIC_NAME + " " + PVarchar.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 4, + PhoenixDatabaseMetaData.INDEX_WHERE + " " + PVarchar.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 3, + PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE + " " + PLong.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 2, + PhoenixDatabaseMetaData.CDC_INCLUDE_TABLE + " " + PVarchar.INSTANCE.getSqlTypeName()); + + /** + * TODO: Provide a path to copy existing data from PHOENIX_TTL to TTL column and then to DROP + * PHOENIX_TTL Column. See PHOENIX-7023 + */ + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 1, + PhoenixDatabaseMetaData.TTL + " " + PInteger.INSTANCE.getSqlTypeName()); + metaConnection = addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0, + PhoenixDatabaseMetaData.ROW_KEY_MATCHER + " " + PVarbinary.INSTANCE.getSqlTypeName()); + // Values in PHOENIX_TTL column will not be used for further release as PHOENIX_TTL column is + // being deprecated + // and will be removed in later release. To copy copyDataFromPhoenixTTLtoTTL(metaConnection) + // can be used but + // as that feature was not fully built we are not moving old value to new column + + // move TTL values stored in descriptor to SYSCAT TTL column. + moveTTLFromHBaseLevelTTLToPhoenixLevelTTL(metaConnection); + UpgradeUtil.bootstrapLastDDLTimestampForIndexes(metaConnection); + } + return metaConnection; + } + + /** + * There is no other locking needed here since only one connection (on the same or different JVM) + * will be able to acquire the upgrade mutex via {@link #acquireUpgradeMutex(long)} . + */ + @Override + public void upgradeSystemTables(final String url, final Properties props) throws SQLException { + PhoenixConnection metaConnection = null; + boolean success = false; + final Map systemTableToSnapshotMap = new HashMap<>(); + String sysCatalogTableName = null; + SQLException toThrow = null; + boolean acquiredMutexLock = false; + boolean moveChildLinks = false; + boolean syncAllTableAndIndexProps = false; + try { + if (!isUpgradeRequired()) { + throw new UpgradeNotRequiredException(); + } + Properties scnProps = PropertiesUtil.deepCopy(props); + scnProps.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, + Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP)); + scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB); + String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB); + metaConnection = new PhoenixConnection(ConnectionQueryServicesImpl.this, globalUrl, scnProps); + metaConnection.setRunningUpgrade(true); + + // Always try to create SYSTEM.MUTEX table first since we need it to acquire the + // upgrade mutex. Upgrade or migration is not possible without the upgrade mutex + try (Admin admin = getAdmin()) { + createSysMutexTableIfNotExists(admin); + } + UpgradeRequiredException caughtUpgradeRequiredException = null; + TableAlreadyExistsException caughtTableAlreadyExistsException = null; + try { + metaConnection.createStatement().executeUpdate(getSystemCatalogTableDDL()); + } catch (NewerTableAlreadyExistsException ignore) { + // Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed + // timestamp. A TableAlreadyExistsException is not thrown, since the table only exists + // *after* this fixed timestamp. + } catch (UpgradeRequiredException e) { + // This is thrown while trying to create SYSTEM:CATALOG to indicate that we must + // migrate SYSTEM tables to the SYSTEM namespace and/or upgrade SYSCAT if required + caughtUpgradeRequiredException = e; + } catch (TableAlreadyExistsException e) { + caughtTableAlreadyExistsException = e; + } + + if (caughtUpgradeRequiredException != null || caughtTableAlreadyExistsException != null) { + long currentServerSideTableTimeStamp; + if (caughtUpgradeRequiredException != null) { + currentServerSideTableTimeStamp = + caughtUpgradeRequiredException.getSystemCatalogTimeStamp(); + } else { + currentServerSideTableTimeStamp = + caughtTableAlreadyExistsException.getTable().getTimeStamp(); } - } - protected ConnectionQueryServices newChildQueryService() { - return new ChildQueryServices(this); - } + ReadOnlyProps readOnlyProps = metaConnection.getQueryServices().getProps(); + String skipUpgradeBlock = readOnlyProps.get(SKIP_UPGRADE_BLOCK_CHECK); - /** - * Get (and create if necessary) a child QueryService for a given tenantId. - * The QueryService will be cached for the lifetime of the parent QueryService - * @param tenantId the tenant ID - * @return the child QueryService - */ - @Override - public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable tenantId) { - ConnectionQueryServices childQueryService = childServices.get(tenantId); - if (childQueryService == null) { - childQueryService = newChildQueryService(); - ConnectionQueryServices prevQueryService = childServices.putIfAbsent(tenantId, childQueryService); - return prevQueryService == null ? childQueryService : prevQueryService; + if (skipUpgradeBlock == null || !Boolean.valueOf(skipUpgradeBlock)) { + checkUpgradeBlockMutex(); } - return childQueryService; - } - @Override - public void clearTableRegionCache(TableName tableName) throws SQLException { - ((ClusterConnection)connection).clearRegionCache(tableName); - } - - public byte[] getNextRegionStartKey(HRegionLocation regionLocation, byte[] currentKey, - HRegionLocation prevRegionLocation) { - // in order to check the overlap/inconsistencies bad region info, we have to make sure - // the current endKey always increasing(compare the previous endKey) - - // conditionOne = true if the currentKey does not belong to the region boundaries specified - // by regionLocation i.e. if the currentKey is less than the region startKey or if the - // currentKey is greater than or equal to the region endKey. - - // conditionTwo = true if the previous region endKey is either not same as current region - // startKey or if the previous region endKey is greater than or equal to current region - // endKey. - boolean conditionOne = - (Bytes.compareTo(regionLocation.getRegion().getStartKey(), currentKey) > 0 - || Bytes.compareTo(regionLocation.getRegion().getEndKey(), currentKey) <= 0) - && !Bytes.equals(currentKey, HConstants.EMPTY_START_ROW) - && !Bytes.equals(regionLocation.getRegion().getEndKey(), HConstants.EMPTY_END_ROW); - boolean conditionTwo = prevRegionLocation != null && ( - Bytes.compareTo(regionLocation.getRegion().getStartKey(), - prevRegionLocation.getRegion().getEndKey()) != 0 || - Bytes.compareTo(regionLocation.getRegion().getEndKey(), - prevRegionLocation.getRegion().getEndKey()) <= 0) - && !Bytes.equals(prevRegionLocation.getRegion().getEndKey(), HConstants.EMPTY_START_ROW) - && !Bytes.equals(regionLocation.getRegion().getEndKey(), HConstants.EMPTY_END_ROW); - if (conditionOne || conditionTwo) { - GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.increment(); - LOGGER.warn( - "HBase region overlap/inconsistencies on {} , current key: {} , region startKey:" - + " {} , region endKey: {} , prev region startKey: {} , prev region endKey: {}", - regionLocation, - Bytes.toStringBinary(currentKey), - Bytes.toStringBinary(regionLocation.getRegion().getStartKey()), - Bytes.toStringBinary(regionLocation.getRegion().getEndKey()), - prevRegionLocation == null ? - "null" : Bytes.toStringBinary(prevRegionLocation.getRegion().getStartKey()), - prevRegionLocation == null ? - "null" : Bytes.toStringBinary(prevRegionLocation.getRegion().getEndKey())); - } - return regionLocation.getRegion().getEndKey(); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getAllTableRegions(byte[] tableName) throws SQLException { - int queryTimeout = this.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); - return getTableRegions(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, queryTimeout); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getAllTableRegions(byte[] tableName, int queryTimeout) - throws SQLException { - return getTableRegions(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, queryTimeout); - } + acquiredMutexLock = + acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP); + LOGGER.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM " + + "namespace and/or upgrading " + sysCatalogTableName); + String snapshotName = + getSysTableSnapshotName(currentServerSideTableTimeStamp, SYSTEM_CATALOG_NAME); + createSnapshot(snapshotName, SYSTEM_CATALOG_NAME); + systemTableToSnapshotMap.put(SYSTEM_CATALOG_NAME, snapshotName); + LOGGER.info("Created snapshot {} for {}", snapshotName, SYSTEM_CATALOG_NAME); - /** - * {@inheritDoc}. - */ - @Override - public List getTableRegions(byte[] tableName, byte[] startRowKey, - byte[] endRowKey) throws SQLException{ - int queryTimeout = this.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); - return getTableRegions(tableName, startRowKey, endRowKey, queryTimeout); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getTableRegions(final byte[] tableName, final byte[] startRowKey, - final byte[] endRowKey, final int queryTimeout) - throws SQLException { - /* - * Use HConnection.getRegionLocation as it uses the cache in HConnection, while getting - * all region locations from the HTable doesn't. - */ - int retryCount = 0; - int maxRetryCount = - config.getInt(PHOENIX_GET_REGIONS_RETRIES, DEFAULT_PHOENIX_GET_REGIONS_RETRIES); - TableName table = TableName.valueOf(tableName); - byte[] currentKey = null; - final long startTime = EnvironmentEdgeManager.currentTimeMillis(); - final long maxQueryEndTime = startTime + queryTimeout; - while (true) { - try { - // We could surface the package projected HConnectionImplementation.getNumberOfCachedRegionLocations - // to get the sizing info we need, but this would require a new class in the same package and a cast - // to this implementation class, so it's probably not worth it. - List locations = Lists.newArrayList(); - HRegionLocation prevRegionLocation = null; - currentKey = startRowKey; - do { - HRegionLocation regionLocation = - ((ClusterConnection) connection).getRegionLocation(table, - currentKey, false); - currentKey = - getNextRegionStartKey(regionLocation, currentKey, prevRegionLocation); - locations.add(regionLocation); - prevRegionLocation = regionLocation; - if (!Bytes.equals(endRowKey, HConstants.EMPTY_END_ROW) - && Bytes.compareTo(currentKey, endRowKey) >= 0) { - break; - } - throwErrorIfQueryTimedOut(startRowKey, endRowKey, maxQueryEndTime, - queryTimeout, table, retryCount, currentKey); - } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW)); - return locations; - } catch (org.apache.hadoop.hbase.TableNotFoundException e) { - TableNotFoundException ex = new TableNotFoundException(table.getNameAsString()); - e.initCause(ex); - throw ex; - } catch (IOException e) { - LOGGER.error("Exception encountered in getAllTableRegions for " - + "table: {}, retryCount: {} , currentKey: {} , startRowKey: {} ," - + " endRowKey: {}", - table.getNameAsString(), - retryCount, - Bytes.toStringBinary(currentKey), - Bytes.toStringBinary(startRowKey), - Bytes.toStringBinary(endRowKey), - e); - if (retryCount++ < maxRetryCount) { - continue; - } - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.GET_TABLE_REGIONS_FAIL).setRootCause(e).build() - .buildException(); - } + // Snapshot qualifiers may only contain 'alphanumeric characters' and + // digits, hence : cannot be part of snapshot name + String mappedSnapshotName = + getSysTableSnapshotName(currentServerSideTableTimeStamp, "MAPPED." + SYSTEM_CATALOG_NAME); + createSnapshot(mappedSnapshotName, MAPPED_SYSTEM_CATALOG_NAME); + systemTableToSnapshotMap.put(MAPPED_SYSTEM_CATALOG_NAME, mappedSnapshotName); + LOGGER.info("Created snapshot {} for {}", mappedSnapshotName, MAPPED_SYSTEM_CATALOG_NAME); + + if (caughtUpgradeRequiredException != null) { + if ( + SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, + ConnectionQueryServicesImpl.this.getProps()) + ) { + // If SYSTEM tables exist, they are migrated to HBase SYSTEM namespace + // If they don't exist or they're already migrated, this method will return + // immediately + ensureSystemTablesMigratedToSystemNamespace(); + LOGGER.debug("Migrated SYSTEM tables to SYSTEM namespace"); + } + } + + metaConnection = + upgradeSystemCatalogIfRequired(metaConnection, currentServerSideTableTimeStamp); + if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0) { + moveChildLinks = true; + syncAllTableAndIndexProps = true; } - } - - /** - * Throw Error if the metadata lookup takes longer than query timeout configured. - * - * @param startRowKey Start RowKey to begin the region metadata lookup from. - * @param endRowKey End RowKey to end the region metadata lookup at. - * @param maxQueryEndTime Max time to execute the metadata lookup. - * @param queryTimeout Query timeout. - * @param table Table Name. - * @param retryCount Retry Count. - * @param currentKey Current Key. - * @throws SQLException Throw Error if the metadata lookup takes longer than query timeout. - */ - private static void throwErrorIfQueryTimedOut(byte[] startRowKey, byte[] endRowKey, - long maxQueryEndTime, - int queryTimeout, TableName table, int retryCount, - byte[] currentKey) throws SQLException { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - if (currentTime >= maxQueryEndTime) { - LOGGER.error("getTableRegions has exceeded query timeout {} ms." - + "Table: {}, retryCount: {} , currentKey: {} , " - + "startRowKey: {} , endRowKey: {}", - queryTimeout, - table.getNameAsString(), - retryCount, - Bytes.toStringBinary(currentKey), - Bytes.toStringBinary(startRowKey), - Bytes.toStringBinary(endRowKey) - ); - final String message = "getTableRegions has exceeded query timeout " + queryTimeout - + "ms"; - IOException e = new IOException(message); - throw new SQLTimeoutException(message, - SQLExceptionCode.OPERATION_TIMED_OUT.getSQLState(), - SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode(), e); - } - } - - public PMetaData getMetaDataCache() { - return latestMetaData; - } - - @Override - public int getConnectionCount(boolean isInternal) { - if (isInternal) { - return connectionLimiter.getInternalConnectionCount(); + if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0) { + // Combine view index id sequences for the same physical view index table + // to avoid collisions. See PHOENIX-5132 and PHOENIX-5138 + try (PhoenixConnection conn = + new PhoenixConnection(ConnectionQueryServicesImpl.this, globalUrl, props)) { + UpgradeUtil.mergeViewIndexIdSequences(metaConnection); + } catch (Exception mergeViewIndeIdException) { + LOGGER.warn("Merge view index id sequence failed! If possible, " + + "please run MergeViewIndexIdSequencesTool to avoid view index" + + "id collision. Error: " + mergeViewIndeIdException.getMessage()); + } + } + } + + // pass systemTableToSnapshotMap to capture more system table to + // snapshot entries + metaConnection = upgradeOtherSystemTablesIfRequired(metaConnection, moveChildLinks, + systemTableToSnapshotMap); + + // Once the system tables are upgraded the local index upgrade can be done + if (localIndexUpgradeRequired) { + LOGGER.info("Upgrading local indexes"); + metaConnection = UpgradeUtil.upgradeLocalIndexes(metaConnection); + } + + // Synchronize necessary properties amongst all column families of a base table + // and its indexes. See PHOENIX-3955 + if (syncAllTableAndIndexProps) { + syncTableAndIndexProperties(metaConnection); + } + + // In case namespace mapping is enabled and system table to system namespace mapping is also + // enabled, + // create an entry for the SYSTEM namespace in the SYSCAT table, so that GRANT/REVOKE commands + // can work + // with SYSTEM Namespace + createSchemaIfNotExistsSystemNSMappingEnabled(metaConnection); + + clearUpgradeRequired(); + success = true; + } catch (UpgradeInProgressException | UpgradeNotRequiredException e) { + // don't set it as initializationException because otherwise client won't be able to retry + throw e; + } catch (Exception e) { + if (e instanceof SQLException) { + toThrow = (SQLException) e; + } else { + // wrap every other exception into a SQLException + toThrow = new SQLException(e); + } + } finally { + try { + if (metaConnection != null) { + metaConnection.close(); + } + } catch (SQLException e) { + if (toThrow != null) { + toThrow.setNextException(e); } else { - return connectionLimiter.getConnectionCount(); + toThrow = e; } - } - - @Override - public void addTable(PTable table, long resolvedTime) throws SQLException { - synchronized (latestMetaDataLock) { - try { - throwConnectionClosedIfNullMetaData(); - // If existing table isn't older than new table, don't replace - // If a client opens a connection at an earlier timestamp, this can happen - PTableRef existingTableRef = latestMetaData.getTableRef(new PTableKey( - table.getTenantId(), table.getName().getString())); - PTable existingTable = existingTableRef.getTable(); - if (existingTable.getTimeStamp() > table.getTimeStamp()) { - return; - } - } catch (TableNotFoundException e) {} - latestMetaData.addTable(table, resolvedTime); - latestMetaDataLock.notifyAll(); + } finally { + if (!success) { + LOGGER.warn( + "Failed upgrading System tables. " + "Snapshots for system tables created so far: {}", + systemTableToSnapshotMap); } - } - @Override - public void updateResolvedTimestamp(PTable table, long resolvedTime) throws SQLException { - synchronized (latestMetaDataLock) { - throwConnectionClosedIfNullMetaData(); - latestMetaData.updateResolvedTimestamp(table, resolvedTime); - latestMetaDataLock.notifyAll(); + if (acquiredMutexLock) { + try { + releaseUpgradeMutex(); + } catch (IOException e) { + LOGGER.warn("Release of upgrade mutex failed ", e); + } + } + if (toThrow != null) { + throw toThrow; + } + } + } + } + + /** + * Create or upgrade SYSTEM tables other than SYSTEM.CATALOG + * @param metaConnection Phoenix connection + * @param moveChildLinks true if we need to move child links from SYSTEM.CATALOG to + * SYSTEM.CHILD_LINK + * @param systemTableToSnapshotMap table to snapshot map which can be where new entries of system + * table to it's corresponding created snapshot is added + * @return Phoenix connection + * @throws SQLException thrown by underlying upgrade system methods + * @throws IOException thrown by underlying upgrade system methods + */ + private PhoenixConnection upgradeOtherSystemTablesIfRequired(PhoenixConnection metaConnection, + boolean moveChildLinks, Map systemTableToSnapshotMap) + throws SQLException, IOException { + // if we are really going to perform upgrades of other system tables, + // by this point we would have already taken mutex lock, hence + // we can proceed with creation of snapshots and add table to + // snapshot entries in systemTableToSnapshotMap + metaConnection = upgradeSystemSequence(metaConnection, systemTableToSnapshotMap); + metaConnection = upgradeSystemStats(metaConnection, systemTableToSnapshotMap); + metaConnection = upgradeSystemTask(metaConnection, systemTableToSnapshotMap); + metaConnection = upgradeSystemFunction(metaConnection); + metaConnection = upgradeSystemTransform(metaConnection, systemTableToSnapshotMap); + metaConnection = upgradeSystemLog(metaConnection, systemTableToSnapshotMap); + metaConnection = upgradeSystemMutex(metaConnection); + + // As this is where the most time will be spent during an upgrade, + // especially when there are large number of views. + // Upgrade the SYSTEM.CHILD_LINK towards the end, + // so that any failures here can be handled/continued out of band. + metaConnection = + upgradeSystemChildLink(metaConnection, moveChildLinks, systemTableToSnapshotMap); + return metaConnection; + } + + private PhoenixConnection upgradeSystemChildLink(PhoenixConnection metaConnection, + boolean moveChildLinks, Map systemTableToSnapshotMap) + throws SQLException, IOException { + try (Statement statement = metaConnection.createStatement()) { + statement.executeUpdate(getChildLinkDDL()); + } catch (TableAlreadyExistsException e) { + takeSnapshotOfSysTable(systemTableToSnapshotMap, e); + } + if (moveChildLinks) { + // Increase the timeouts so that the scan queries during moveOrCopyChildLinks do not timeout + // on large syscat's + Map options = new HashMap<>(); + options.put(HConstants.HBASE_RPC_TIMEOUT_KEY, + Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); + options.put(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); + moveOrCopyChildLinks(metaConnection, options); + } + return metaConnection; + } + + @VisibleForTesting + public PhoenixConnection upgradeSystemSequence(PhoenixConnection metaConnection, + Map systemTableToSnapshotMap) throws SQLException, IOException { + try (Statement statement = metaConnection.createStatement()) { + String createSequenceTable = getSystemSequenceTableDDL(nSequenceSaltBuckets); + statement.executeUpdate(createSequenceTable); + } catch (NewerTableAlreadyExistsException e) { + // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed + // timestamp. + // A TableAlreadyExistsException is not thrown, since the table only exists *after* this + // fixed timestamp. + nSequenceSaltBuckets = getSaltBuckets(e); + } catch (TableAlreadyExistsException e) { + // take snapshot first + takeSnapshotOfSysTable(systemTableToSnapshotMap, e); + + // This will occur if we have an older SYSTEM.SEQUENCE and we need to update it to + // include + // any new columns we've added. + long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) { + // If the table time stamp is before 4.1.0 then we need to add below columns + // to the SYSTEM.SEQUENCE table. + String columnsToAdd = + PhoenixDatabaseMetaData.MIN_VALUE + " " + PLong.INSTANCE.getSqlTypeName() + ", " + + PhoenixDatabaseMetaData.MAX_VALUE + " " + PLong.INSTANCE.getSqlTypeName() + ", " + + PhoenixDatabaseMetaData.CYCLE_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName() + ", " + + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG + " " + PBoolean.INSTANCE.getSqlTypeName(); + addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd); + } + // If the table timestamp is before 4.2.1 then run the upgrade script + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1) { + if (UpgradeUtil.upgradeSequenceTable(metaConnection, nSequenceSaltBuckets, e.getTable())) { + metaConnection.removeTable(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP); + clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA_BYTES, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE_BYTES, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP); + clearTableRegionCache( + TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES)); + } + } else { + nSequenceSaltBuckets = getSaltBuckets(e); + } + updateSystemSequenceWithCacheOnWriteProps(metaConnection); + } + return metaConnection; + } + + private void updateSystemSequenceWithCacheOnWriteProps(PhoenixConnection metaConnection) + throws IOException, SQLException { + + try (Admin admin = getAdmin()) { + TableDescriptor oldTD = admin + .getDescriptor(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME, + metaConnection.getQueryServices().getProps())); + ColumnFamilyDescriptor oldCf = + oldTD.getColumnFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); + + // If the CacheOnWrite related properties are not set, lets set them. + if ( + !oldCf.isCacheBloomsOnWrite() || !oldCf.isCacheDataOnWrite() + || !oldCf.isCacheIndexesOnWrite() + ) { + ColumnFamilyDescriptorBuilder newCFBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(oldCf); + newCFBuilder.setCacheBloomsOnWrite(true); + newCFBuilder.setCacheDataOnWrite(true); + newCFBuilder.setCacheIndexesOnWrite(true); + + TableDescriptorBuilder newTD = TableDescriptorBuilder.newBuilder(oldTD); + newTD.modifyColumnFamily(newCFBuilder.build()); + admin.modifyTable(newTD.build()); + } + } + } + + private void takeSnapshotOfSysTable(Map systemTableToSnapshotMap, + TableAlreadyExistsException e) throws SQLException { + long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); + String tableName = e.getTable().getPhysicalName().getString(); + String snapshotName = getSysTableSnapshotName(currentServerSideTableTimeStamp, tableName); + // Snapshot qualifiers may only contain 'alphanumeric characters' and + // digits, hence : cannot be part of snapshot name + if (snapshotName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + snapshotName = getSysTableSnapshotName(currentServerSideTableTimeStamp, "MAPPED." + tableName) + .replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); + } + createSnapshot(snapshotName, tableName); + systemTableToSnapshotMap.put(tableName, snapshotName); + LOGGER.info("Snapshot {} created for table {}", snapshotName, tableName); + } + + @VisibleForTesting + public PhoenixConnection upgradeSystemStats(PhoenixConnection metaConnection, + Map systemTableToSnapshotMap) + throws SQLException, org.apache.hadoop.hbase.TableNotFoundException, IOException { + try (Statement statement = metaConnection.createStatement()) { + statement.executeUpdate(QueryConstants.CREATE_STATS_TABLE_METADATA); + } catch (NewerTableAlreadyExistsException ignored) { + + } catch (TableAlreadyExistsException e) { + // take snapshot first + takeSnapshotOfSysTable(systemTableToSnapshotMap, e); + long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) { + metaConnection = addColumnsIfNotExists(metaConnection, SYSTEM_STATS_NAME, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, + PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT + " " + PLong.INSTANCE.getSqlTypeName()); + } + if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0) { + // The COLUMN_FAMILY column should be nullable as we create a row in it without + // any column family to mark when guideposts were last collected. + metaConnection = removeNotNullConstraint(metaConnection, SYSTEM_SCHEMA_NAME, + PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0, PhoenixDatabaseMetaData.COLUMN_FAMILY); + ConnectionQueryServicesImpl.this.removeTable(null, + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME, null, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0); + clearCache(); + } + if ( + UpgradeUtil.tableHasKeepDeleted(metaConnection, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME) + ) { + try (Statement altStmt = metaConnection.createStatement()) { + altStmt.executeUpdate("ALTER TABLE " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " SET " + + KEEP_DELETED_CELLS + "='" + KeepDeletedCells.FALSE + "'"); + } + } + if ( + UpgradeUtil.tableHasMaxVersions(metaConnection, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME) + ) { + try (Statement altStats = metaConnection.createStatement()) { + altStats.executeUpdate("ALTER TABLE " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + + " SET " + HConstants.VERSIONS + " = '1' "); + } + } + } + return metaConnection; + } + + private PhoenixConnection upgradeSystemTask(PhoenixConnection metaConnection, + Map systemTableToSnapshotMap) throws SQLException, IOException { + try (Statement statement = metaConnection.createStatement()) { + statement.executeUpdate(getTaskDDL()); + } catch (NewerTableAlreadyExistsException ignored) { + + } catch (TableAlreadyExistsException e) { + // take snapshot first + takeSnapshotOfSysTable(systemTableToSnapshotMap, e); + long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); + if (currentServerSideTableTimeStamp <= MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0) { + String columnsToAdd = PhoenixDatabaseMetaData.TASK_STATUS + " " + + PVarchar.INSTANCE.getSqlTypeName() + ", " + PhoenixDatabaseMetaData.TASK_END_TS + " " + + PTimestamp.INSTANCE.getSqlTypeName() + ", " + PhoenixDatabaseMetaData.TASK_PRIORITY + + " " + PUnsignedTinyint.INSTANCE.getSqlTypeName() + ", " + + PhoenixDatabaseMetaData.TASK_DATA + " " + PVarchar.INSTANCE.getSqlTypeName(); + String taskTableFullName = + SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, SYSTEM_TASK_TABLE); + metaConnection = addColumnsIfNotExists(metaConnection, taskTableFullName, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd); + String altQuery = + String.format(ALTER_TABLE_SET_PROPS, taskTableFullName, TTL, TASK_TABLE_TTL); + try (PreparedStatement altQueryStmt = metaConnection.prepareStatement(altQuery)) { + altQueryStmt.executeUpdate(); + } + clearCache(); + } + // If SYSTEM.TASK does not have disabled regions split policy, + // set it up here while upgrading it + try (Admin admin = metaConnection.getQueryServices().getAdmin()) { + TableDescriptor td; + TableName tableName = + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME, props); + td = admin.getDescriptor(tableName); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(td); + boolean isTableDescUpdated = false; + if (updateAndConfirmSplitPolicyForTask(tableDescriptorBuilder)) { + isTableDescUpdated = true; + } + if ( + !tableDescriptorBuilder.build() + .hasCoprocessor(QueryConstants.TASK_META_DATA_ENDPOINT_CLASSNAME) + ) { + int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, + QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY); + tableDescriptorBuilder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(QueryConstants.TASK_META_DATA_ENDPOINT_CLASSNAME).setPriority(priority) + .setProperties(Collections.emptyMap()).build()); + isTableDescUpdated = true; + } + if (isTableDescUpdated) { + admin.modifyTable(tableDescriptorBuilder.build()); + pollForUpdatedTableDescriptor(admin, tableDescriptorBuilder.build(), tableName.getName()); + } + } catch (InterruptedException | TimeoutException ite) { + throw new SQLException( + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME + " Upgrade is not confirmed"); + } + } + return metaConnection; + } + + private PhoenixConnection upgradeSystemTransform(PhoenixConnection metaConnection, + Map systemTableToSnapshotMap) throws SQLException { + try (Statement statement = metaConnection.createStatement()) { + statement.executeUpdate(getTransformDDL()); + } catch (TableAlreadyExistsException ignored) { + + } + return metaConnection; + } + + private PhoenixConnection upgradeSystemFunction(PhoenixConnection metaConnection) + throws SQLException { + try { + metaConnection.createStatement().executeUpdate(getFunctionTableDDL()); + } catch (TableAlreadyExistsException ignored) { + // Since we are not performing any action as part of upgrading + // SYSTEM.FUNCTION, we don't need to take snapshot as of this + // writing. However, if need arises to perform significant + // update, we should take snapshot just like other system tables. + // e.g usages of takeSnapshotOfSysTable() + } + return metaConnection; + } + + @VisibleForTesting + public PhoenixConnection upgradeSystemLog(PhoenixConnection metaConnection, + Map systemTableToSnapshotMap) + throws SQLException, org.apache.hadoop.hbase.TableNotFoundException, IOException { + try (Statement statement = metaConnection.createStatement()) { + statement.executeUpdate(getLogTableDDL()); + } catch (NewerTableAlreadyExistsException ignored) { + } catch (TableAlreadyExistsException e) { + // take snapshot first + takeSnapshotOfSysTable(systemTableToSnapshotMap, e); + if ( + UpgradeUtil.tableHasKeepDeleted(metaConnection, PhoenixDatabaseMetaData.SYSTEM_LOG_NAME) + ) { + try (Statement altLogStmt = metaConnection.createStatement()) { + altLogStmt.executeUpdate("ALTER TABLE " + PhoenixDatabaseMetaData.SYSTEM_LOG_NAME + + " SET " + KEEP_DELETED_CELLS + "='" + KeepDeletedCells.FALSE + "'"); + } + } + if ( + UpgradeUtil.tableHasMaxVersions(metaConnection, PhoenixDatabaseMetaData.SYSTEM_LOG_NAME) + ) { + try (Statement altLogVer = metaConnection.createStatement()) { + altLogVer.executeUpdate("ALTER TABLE " + PhoenixDatabaseMetaData.SYSTEM_LOG_NAME + " SET " + + HConstants.VERSIONS + "='1'"); + } + } + } + return metaConnection; + } + + private PhoenixConnection upgradeSystemMutex(PhoenixConnection metaConnection) + throws SQLException { + try { + metaConnection.createStatement().executeUpdate(getMutexDDL()); + } catch (TableAlreadyExistsException ignored) { + // Since we are not performing any action as part of upgrading + // SYSTEM.MUTEX, we don't need to take snapshot as of this + // writing. However, if need arises to perform significant + // update, we should take snapshot just like other system tables. + // e.g usages of takeSnapshotOfSysTable() + } + return metaConnection; + } + + // Special method for adding the column qualifier column for 4.10. + private PhoenixConnection addColumnQualifierColumn(PhoenixConnection oldMetaConnection, + Long timestamp) throws SQLException { + Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); + // Cannot go through DriverManager or you end up in an infinite loop because it'll call init + // again + PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); + metaConnection.setAutoCommit(false); + PTable sysCatalogPTable = metaConnection.getTable(SYSTEM_CATALOG_NAME); + int numColumns = sysCatalogPTable.getColumns().size(); + try (PreparedStatement mutateTable = + metaConnection.prepareStatement(MetaDataClient.MUTATE_TABLE)) { + mutateTable.setString(1, null); + mutateTable.setString(2, SYSTEM_CATALOG_SCHEMA); + mutateTable.setString(3, SYSTEM_CATALOG_TABLE); + mutateTable.setString(4, PTableType.SYSTEM.getSerializedValue()); + mutateTable.setLong(5, sysCatalogPTable.getSequenceNumber() + 1); + mutateTable.setInt(6, numColumns + 1); + mutateTable.execute(); + } + List tableMetadata = new ArrayList<>( + metaConnection.getMutationState().toMutations(metaConnection.getSCN()).next().getSecond()); + metaConnection.rollback(); + PColumn column = new PColumnImpl(PNameFactory.newName("COLUMN_QUALIFIER"), + PNameFactory.newName(DEFAULT_COLUMN_FAMILY_NAME), PVarbinary.INSTANCE, null, null, true, + numColumns, SortOrder.ASC, null, null, false, null, false, false, + Bytes.toBytes("COLUMN_QUALIFIER"), timestamp); + String upsertColumnMetadata = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + COLUMN_NAME + "," + COLUMN_FAMILY + "," + DATA_TYPE + "," + NULLABLE + "," + COLUMN_SIZE + + "," + DECIMAL_DIGITS + "," + ORDINAL_POSITION + "," + SORT_ORDER + "," + DATA_TABLE_NAME + + "," + ARRAY_SIZE + "," + VIEW_CONSTANT + "," + IS_VIEW_REFERENCED + "," + PK_NAME + "," + + KEY_SEQ + "," + COLUMN_DEF + "," + IS_ROW_TIMESTAMP + + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + try (PreparedStatement colUpsert = metaConnection.prepareStatement(upsertColumnMetadata)) { + colUpsert.setString(1, null); + colUpsert.setString(2, SYSTEM_CATALOG_SCHEMA); + colUpsert.setString(3, SYSTEM_CATALOG_TABLE); + colUpsert.setString(4, "COLUMN_QUALIFIER"); + colUpsert.setString(5, DEFAULT_COLUMN_FAMILY); + colUpsert.setInt(6, column.getDataType().getSqlType()); + colUpsert.setInt(7, ResultSetMetaData.columnNullable); + colUpsert.setNull(8, Types.INTEGER); + colUpsert.setNull(9, Types.INTEGER); + colUpsert.setInt(10, sysCatalogPTable.getBucketNum() != null ? numColumns : (numColumns + 1)); + colUpsert.setInt(11, SortOrder.ASC.getSystemValue()); + colUpsert.setString(12, null); + colUpsert.setNull(13, Types.INTEGER); + colUpsert.setBytes(14, null); + colUpsert.setBoolean(15, false); + colUpsert.setString(16, + sysCatalogPTable.getPKName() == null ? null : sysCatalogPTable.getPKName().getString()); + colUpsert.setNull(17, Types.SMALLINT); + colUpsert.setNull(18, Types.VARCHAR); + colUpsert.setBoolean(19, false); + colUpsert.execute(); + } + tableMetadata.addAll( + metaConnection.getMutationState().toMutations(metaConnection.getSCN()).next().getSecond()); + metaConnection.rollback(); + metaConnection.getQueryServices().addColumn(tableMetadata, sysCatalogPTable, null, null, + Collections.>> emptyMap(), Collections. emptySet(), + Lists.newArrayList(column)); + metaConnection.removeTable(null, SYSTEM_CATALOG_NAME, null, timestamp); + ConnectionQueryServicesImpl.this.removeTable(null, SYSTEM_CATALOG_NAME, null, timestamp); + clearCache(); + return metaConnection; + } + + private void deleteSnapshot(String snapshotName) throws SQLException, IOException { + try (Admin admin = getAdmin()) { + admin.deleteSnapshot(snapshotName); + LOGGER.info("Snapshot {} is deleted", snapshotName); + } + } + + private void createSnapshot(String snapshotName, String tableName) throws SQLException { + Admin admin = null; + SQLException sqlE = null; + try { + admin = getAdmin(); + admin.snapshot(snapshotName, TableName.valueOf(tableName)); + LOGGER.info("Successfully created snapshot " + snapshotName + " for " + tableName); + } catch (SnapshotCreationException e) { + if (e.getMessage().contains("doesn't exist")) { + LOGGER.warn("Could not create snapshot {}, table is missing." + snapshotName, e); + } else { + sqlE = new SQLException(e); + } + } catch (Exception e) { + sqlE = new SQLException(e); + } finally { + try { + if (admin != null) { + admin.close(); + } + } catch (Exception e) { + SQLException adminCloseEx = new SQLException(e); + if (sqlE == null) { + sqlE = adminCloseEx; + } else { + sqlE.setNextException(adminCloseEx); + } + } finally { + if (sqlE != null) { + throw sqlE; + } + } + } + } + + void ensureSystemTablesMigratedToSystemNamespace() + throws SQLException, IOException, IllegalArgumentException, InterruptedException { + if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.getProps())) { + return; + } + + Table metatable = null; + try (Admin admin = getAdmin()) { + List tableNames = getSystemTableNamesInDefaultNamespace(admin); + // No tables exist matching "SYSTEM\..*", they are all already in "SYSTEM:.*" + if (tableNames.size() == 0) { + return; + } + // Try to move any remaining tables matching "SYSTEM\..*" into "SYSTEM:" + if (tableNames.size() > 9) { + LOGGER.warn("Expected 9 system tables but found " + tableNames.size() + ":" + tableNames); + } + + byte[] mappedSystemTable = SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()) + .getName(); + metatable = getTable(mappedSystemTable); + if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) { + if (!AdminUtilWithFallback.tableExists(admin, TableName.valueOf(mappedSystemTable))) { + LOGGER.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace."); + // Actual migration of SYSCAT table + UpgradeUtil.mapTableToNamespace(admin, metatable, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, this.getProps(), null, PTableType.SYSTEM, + null); + // Invalidate the client-side metadataCache + ConnectionQueryServicesImpl.this.removeTable(null, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0); + } + tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME); + } + for (TableName table : tableNames) { + LOGGER + .info(String.format("Migrating %s table to SYSTEM namespace.", table.getNameAsString())); + UpgradeUtil.mapTableToNamespace(admin, metatable, table.getNameAsString(), this.getProps(), + null, PTableType.SYSTEM, null); + ConnectionQueryServicesImpl.this.removeTable(null, table.getNameAsString(), null, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0); + } + + // Clear the server-side metadataCache when all tables are migrated so that the new PTable can + // be loaded with NS mapping + clearCache(); + } finally { + if (metatable != null) { + metatable.close(); + } + } + } + + /** + * Acquire distributed mutex of sorts to make sure only one JVM is able to run the upgrade code by + * making use of HBase's checkAndPut api. + * @return true if client won the race, false otherwise + */ + @VisibleForTesting + public boolean checkUpgradeBlockMutex() throws SQLException { + try (Table sysMutexTable = getSysMutexTable()) { + final byte[] rowKey = Bytes.toBytes("BLOCK_UPGRADE"); + + Get get = + new Get(rowKey).addColumn(SYSTEM_MUTEX_FAMILY_NAME_BYTES, SYSTEM_MUTEX_COLUMN_NAME_BYTES); + Result r = sysMutexTable.get(get); + + if (!r.isEmpty()) { + throw new UpgradeBlockedException(); + } + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + return true; + } + + /** + * Acquire distributed mutex of sorts to make sure only one JVM is able to run the upgrade code by + * making use of HBase's checkAndPut api. + * @return true if client won the race, false otherwise + */ + @VisibleForTesting + public boolean acquireUpgradeMutex(long currentServerSideTableTimestamp) throws SQLException { + Preconditions.checkArgument(currentServerSideTableTimestamp < MIN_SYSTEM_TABLE_TIMESTAMP); + if ( + !writeMutexCell(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, null, null) + ) { + throw new UpgradeInProgressException(getVersion(currentServerSideTableTimestamp), + getVersion(MIN_SYSTEM_TABLE_TIMESTAMP)); + } + return true; + } + + @Override + public boolean writeMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + try { + byte[] rowKey = columnName != null + ? SchemaUtil.getColumnKey(tenantId, schemaName, tableName, columnName, familyName) + : SchemaUtil.getTableKey(tenantId, schemaName, tableName); + // at this point the system mutex table should have been created or + // an exception thrown + try (Table sysMutexTable = getSysMutexTable()) { + Put put = new Put(rowKey); + put.addColumn(SYSTEM_MUTEX_FAMILY_NAME_BYTES, SYSTEM_MUTEX_COLUMN_NAME_BYTES, MUTEX_LOCKED); + CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(rowKey) + .ifNotExists(SYSTEM_MUTEX_FAMILY_NAME_BYTES, SYSTEM_MUTEX_COLUMN_NAME_BYTES).build(put); + boolean checkAndPut = sysMutexTable.checkAndMutate(checkAndMutate).isSuccess(); + String processName = ManagementFactory.getRuntimeMXBean().getName(); + String msg = " tenantId : " + tenantId + " schemaName : " + schemaName + " tableName : " + + tableName + " columnName : " + columnName + " familyName : " + familyName; + if (!checkAndPut) { + LOGGER.error(processName + " failed to acquire mutex for " + msg); + } else { + LOGGER.debug(processName + " acquired mutex for " + msg); + } + return checkAndPut; + } + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + } + + @VisibleForTesting + public void releaseUpgradeMutex() throws IOException, SQLException { + deleteMutexCell(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, null, null); + } + + @Override + public void deleteMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + try { + byte[] rowKey = columnName != null + ? SchemaUtil.getColumnKey(tenantId, schemaName, tableName, columnName, familyName) + : SchemaUtil.getTableKey(tenantId, schemaName, tableName); + // at this point the system mutex table should have been created or + // an exception thrown + try (Table sysMutexTable = getSysMutexTable()) { + byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES; + byte[] qualifier = PhoenixDatabaseMetaData.SYSTEM_MUTEX_COLUMN_NAME_BYTES; + Delete delete = new Delete(rowKey); + delete.addColumn(family, qualifier); + sysMutexTable.delete(delete); + String processName = ManagementFactory.getRuntimeMXBean().getName(); + String msg = " tenantId : " + tenantId + " schemaName : " + schemaName + " tableName : " + + tableName + " columnName : " + columnName + " familyName : " + familyName; + LOGGER.debug(processName + " released mutex for " + msg); + } + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + } + + @VisibleForTesting + public Table getSysMutexTable() throws SQLException { + String tableNameAsString = SYSTEM_MUTEX_NAME; + Table table; + try { + table = getTableIfExists(Bytes.toBytes(tableNameAsString)); + } catch (TableNotFoundException e) { + tableNameAsString = tableNameAsString.replace(QueryConstants.NAME_SEPARATOR, + QueryConstants.NAMESPACE_SEPARATOR); + // if SYSTEM.MUTEX does not exist, we don't need to check + // for the existence of SYSTEM:MUTEX as it must exist, hence + // we can call getTable() here instead of getTableIfExists() + table = getTable(Bytes.toBytes(tableNameAsString)); + } + return table; + } + + private String addColumn(String columnsToAddSoFar, String columns) { + if (columnsToAddSoFar == null || columnsToAddSoFar.isEmpty()) { + return columns; + } else { + return columnsToAddSoFar + ", " + columns; + } + } + + /** + * Set IMMUTABLE_ROWS to true for all index tables over immutable tables. + * @param oldMetaConnection connection over which to run the upgrade + * @param timestamp SCN at which to run the update + */ + private PhoenixConnection setImmutableTableIndexesImmutable(PhoenixConnection oldMetaConnection, + long timestamp) throws SQLException { + SQLException sqlE = null; + Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); + PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); + boolean autoCommit = metaConnection.getAutoCommit(); + try { + metaConnection.setAutoCommit(true); + metaConnection.createStatement().execute( + "UPSERT INTO SYSTEM.CATALOG(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, IMMUTABLE_ROWS)\n" + + "SELECT A.TENANT_ID, A.TABLE_SCHEM,B.COLUMN_FAMILY,null,null,true\n" + + "FROM SYSTEM.CATALOG A JOIN SYSTEM.CATALOG B ON (\n" + + " A.TENANT_ID = B.TENANT_ID AND \n" + " A.TABLE_SCHEM = B.TABLE_SCHEM AND\n" + + " A.TABLE_NAME = B.TABLE_NAME AND\n" + " A.COLUMN_NAME = B.COLUMN_NAME AND\n" + + " B.LINK_TYPE = 1\n" + ")\n" + "WHERE A.COLUMN_FAMILY IS NULL AND\n" + + " B.COLUMN_FAMILY IS NOT NULL AND\n" + " A.IMMUTABLE_ROWS = TRUE"); + } catch (SQLException e) { + LOGGER.warn("exception during upgrading stats table:" + e); + sqlE = e; + } finally { + try { + metaConnection.setAutoCommit(autoCommit); + oldMetaConnection.close(); + } catch (SQLException e) { + if (sqlE != null) { + sqlE.setNextException(e); + } else { + sqlE = e; + } + } + if (sqlE != null) { + throw sqlE; + } + } + return metaConnection; + } + + /** + * Forces update of SYSTEM.CATALOG by setting column to existing value + */ + private PhoenixConnection updateSystemCatalogTimestamp(PhoenixConnection oldMetaConnection, + long timestamp) throws SQLException { + SQLException sqlE = null; + Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); + PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); + boolean autoCommit = metaConnection.getAutoCommit(); + try { + metaConnection.setAutoCommit(true); + metaConnection.createStatement().execute( + "UPSERT INTO SYSTEM.CATALOG(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, DISABLE_WAL)\n" + + "VALUES (NULL, '" + QueryConstants.SYSTEM_SCHEMA_NAME + "','" + + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE + "', NULL, NULL, FALSE)"); + } catch (SQLException e) { + LOGGER.warn("exception during upgrading stats table:" + e); + sqlE = e; + } finally { + try { + metaConnection.setAutoCommit(autoCommit); + oldMetaConnection.close(); + } catch (SQLException e) { + if (sqlE != null) { + sqlE.setNextException(e); + } else { + sqlE = e; + } + } + if (sqlE != null) { + throw sqlE; + } + } + return metaConnection; + } + + private PhoenixConnection dropStatsTable(PhoenixConnection oldMetaConnection, long timestamp) + throws SQLException, IOException { + Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); + PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); + SQLException sqlE = null; + boolean wasCommit = metaConnection.getAutoCommit(); + try { + metaConnection.setAutoCommit(true); + metaConnection.createStatement() + .executeUpdate("DELETE FROM " + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " WHERE " + + PhoenixDatabaseMetaData.TABLE_NAME + "='" + PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE + + "' AND " + PhoenixDatabaseMetaData.TABLE_SCHEM + "='" + SYSTEM_SCHEMA_NAME + "'"); + } catch (SQLException e) { + LOGGER.warn("exception during upgrading stats table:" + e); + sqlE = e; + } finally { + try { + metaConnection.setAutoCommit(wasCommit); + oldMetaConnection.close(); + } catch (SQLException e) { + if (sqlE != null) { + sqlE.setNextException(e); + } else { + sqlE = e; } + } + if (sqlE != null) { + throw sqlE; + } } + return metaConnection; + } - private static interface Mutator { - void mutate(PMetaData metaData) throws SQLException; + private void scheduleRenewLeaseTasks() { + if (isRenewingLeasesEnabled()) { + renewLeaseExecutor = + Executors.newScheduledThreadPool(renewLeasePoolSize, renewLeaseThreadFactory); + for (LinkedBlockingQueue> q : connectionQueues) { + renewLeaseExecutor.scheduleAtFixedRate(new RenewLeaseTask(q), 0, renewLeaseTaskFrequency, + TimeUnit.MILLISECONDS); + } } + } - /** - * Ensures that metaData mutations are handled in the correct order - */ - private PMetaData metaDataMutated(PName tenantId, String tableName, long tableSeqNum, Mutator mutator) throws SQLException { - synchronized (latestMetaDataLock) { - throwConnectionClosedIfNullMetaData(); - PMetaData metaData = latestMetaData; - PTable table; - long endTime = EnvironmentEdgeManager.currentTimeMillis() + - DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS; - while (true) { - try { - try { - table = metaData.getTableRef(new PTableKey(tenantId, tableName)).getTable(); - /* If the table is at the prior sequence number, then we're good to go. - * We know if we've got this far, that the server validated the mutations, - * so we'd just need to wait until the other connection that mutated the same - * table is processed. - */ - if (table.getSequenceNumber() + 1 == tableSeqNum) { - // TODO: assert that timeStamp is bigger that table timeStamp? - mutator.mutate(metaData); - break; - } else if (table.getSequenceNumber() >= tableSeqNum) { - LOGGER.warn("Attempt to cache older version of " + tableName + - ": current= " + table.getSequenceNumber() + - ", new=" + tableSeqNum); - break; - } - } catch (TableNotFoundException e) { - } - long waitTime = endTime - EnvironmentEdgeManager.currentTimeMillis(); - // We waited long enough - just remove the table from the cache - // and the next time it's used it'll be pulled over from the server. - if (waitTime <= 0) { - LOGGER.warn("Unable to update meta data repo within " + - (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) + - " seconds for " + tableName); - // There will never be a parentTableName here, as that would only - // be non null for an index an we never add/remove columns from an index. - metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP); - break; - } - latestMetaDataLock.wait(waitTime); - } catch (InterruptedException e) { - // restore the interrupt status - Thread.currentThread().interrupt(); - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION) - .setRootCause(e).build().buildException(); // FIXME - } - } - latestMetaData = metaData; - latestMetaDataLock.notifyAll(); - return metaData; - } - } + private static class RenewLeaseThreadFactory implements ThreadFactory { + private static final AtomicInteger threadNumber = new AtomicInteger(1); + private static final String NAME_PREFIX = "PHOENIX-SCANNER-RENEW-LEASE-thread-"; @Override - public void removeTable(PName tenantId, final String tableName, String parentTableName, long tableTimeStamp) throws SQLException { - synchronized (latestMetaDataLock) { - throwConnectionClosedIfNullMetaData(); - latestMetaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp); - latestMetaDataLock.notifyAll(); - } - } - - @Override - public void removeColumn(final PName tenantId, final String tableName, final List columnsToRemove, final long tableTimeStamp, final long tableSeqNum, final long resolvedTime) throws SQLException { - metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() { - @Override - public void mutate(PMetaData metaData) throws SQLException { - try { - metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime); - } catch (TableNotFoundException e) { - // The DROP TABLE may have been processed first, so just ignore. - } - } - }); - } - - /** - * Check that the supplied connection properties are set to valid values. - * @param info The properties to be validated. - * @throws IllegalArgumentException when a property is not set to a valid value. - */ - private void validateConnectionProperties(Properties info) { - if (info.get(DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB) != null) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info("Connection's " + DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB + " set to " + - info.get(DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); - } - ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue( - info.getProperty(DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); - } - } - - @Override - public PhoenixConnection connect(String url, Properties info) throws SQLException { - checkClosed(); - throwConnectionClosedIfNullMetaData(); - validateConnectionProperties(info); - - return new PhoenixConnection(this, url, info); - } - - private ColumnFamilyDescriptor generateColumnFamilyDescriptor(Pair> family, PTableType tableType) throws SQLException { - ColumnFamilyDescriptorBuilder columnDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family.getFirst()); - if (tableType != PTableType.VIEW) { - columnDescBuilder.setDataBlockEncoding(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING); - for (Entry entry : family.getSecond().entrySet()) { - String key = entry.getKey(); - Object value = entry.getValue(); - setHColumnDescriptorValue(columnDescBuilder, key, value); - } - } - return columnDescBuilder.build(); - } - - // Workaround HBASE-14737 - private static void setHColumnDescriptorValue(ColumnFamilyDescriptorBuilder columnDescBuilder, String key, Object value) { - if (HConstants.VERSIONS.equals(key)) { - columnDescBuilder.setMaxVersions(getMaxVersion(value)); - } else { - columnDescBuilder.setValue(key, value == null ? null : value.toString()); - } - } - - private static int getMaxVersion(Object value) { - if (value == null) { - return -1; // HColumnDescriptor.UNINITIALIZED is private - } - if (value instanceof Number) { - return ((Number)value).intValue(); - } - String stringValue = value.toString(); - if (stringValue.isEmpty()) { - return -1; - } - return Integer.parseInt(stringValue); - } - - private void modifyColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder hcd, Map props) throws SQLException { - for (Entry entry : props.entrySet()) { - String propName = entry.getKey(); - Object value = entry.getValue(); - setHColumnDescriptorValue(hcd, propName, value); - } - } - - private TableDescriptorBuilder generateTableDescriptor(byte[] physicalTableName, byte[] parentPhysicalTableName, TableDescriptor existingDesc, - PTableType tableType, Map tableProps, List>> families, - byte[][] splits, boolean isNamespaceMapped) throws SQLException { - String defaultFamilyName = (String)tableProps.remove(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME); - TableDescriptorBuilder tableDescriptorBuilder = (existingDesc != null) ?TableDescriptorBuilder.newBuilder(existingDesc) - : TableDescriptorBuilder.newBuilder(TableName.valueOf(physicalTableName)); - - ColumnFamilyDescriptor dataTableColDescForIndexTablePropSyncing = null; - boolean doNotAddGlobalIndexChecker = false; - if (tableType == PTableType.INDEX || MetaDataUtil.isViewIndex(Bytes.toString(physicalTableName))) { - byte[] defaultFamilyBytes = - defaultFamilyName == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : Bytes.toBytes(defaultFamilyName); - - final TableDescriptor baseTableDesc; - if (MetaDataUtil.isViewIndex(Bytes.toString(physicalTableName))) { - // Handles indexes created on views for single-tenant tables and - // global indexes created on views of multi-tenant tables - baseTableDesc = this.getTableDescriptor(parentPhysicalTableName); - } else if (existingDesc == null) { - // Global/local index creation on top of a physical base table - baseTableDesc = this.getTableDescriptor(SchemaUtil.getPhysicalTableName( - Bytes.toBytes((String) tableProps.get(PhoenixDatabaseMetaData.DATA_TABLE_NAME)), isNamespaceMapped) - .getName()); - } else { - // In case this a local index created on a view of a multi-tenant table, the - // PHYSICAL_DATA_TABLE_NAME points to the name of the view instead of the physical base table - baseTableDesc = existingDesc; - } - dataTableColDescForIndexTablePropSyncing = baseTableDesc.getColumnFamily(defaultFamilyBytes); - // It's possible that the table has specific column families and none of them are declared - // to be the DEFAULT_COLUMN_FAMILY, so we choose the first column family for syncing properties - if (dataTableColDescForIndexTablePropSyncing == null) { - dataTableColDescForIndexTablePropSyncing = baseTableDesc.getColumnFamilies()[0]; - } - if (baseTableDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME)) { - // The base table still uses the old indexing - doNotAddGlobalIndexChecker = true; - } - } - // By default, do not automatically rebuild/catch up an index on a write failure - // Add table-specific properties to the table descriptor - for (Entry entry : tableProps.entrySet()) { - String key = entry.getKey(); - if (!TableProperty.isPhoenixTableProperty(key)) { - Object value = entry.getValue(); - tableDescriptorBuilder.setValue(key, value == null ? null : value.toString()); - } - } - - Map syncedProps = MetaDataUtil.getSyncedProps(dataTableColDescForIndexTablePropSyncing); - // Add column family-specific properties to the table descriptor - for (Pair> family : families) { - // If family is only in phoenix description, add it. otherwise, modify its property accordingly. - byte[] familyByte = family.getFirst(); - if (tableDescriptorBuilder.build().getColumnFamily(familyByte) == null) { - if (tableType == PTableType.VIEW) { - String fullTableName = Bytes.toString(physicalTableName); - throw new ReadOnlyTableException( - "The HBase column families for a read-only table must already exist", - SchemaUtil.getSchemaNameFromFullName(fullTableName), - SchemaUtil.getTableNameFromFullName(fullTableName), - Bytes.toString(familyByte)); - } - - ColumnFamilyDescriptor columnDescriptor = generateColumnFamilyDescriptor(family, tableType); - // Keep certain index column family properties in sync with the base table - if ((tableType == PTableType.INDEX || MetaDataUtil.isViewIndex(Bytes.toString(physicalTableName))) && - (syncedProps != null && !syncedProps.isEmpty())) { - ColumnFamilyDescriptorBuilder colFamDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(columnDescriptor); - modifyColumnFamilyDescriptor(colFamDescBuilder, syncedProps); - columnDescriptor = colFamDescBuilder.build(); - } - tableDescriptorBuilder.setColumnFamily(columnDescriptor); - } else { - if (tableType != PTableType.VIEW) { - ColumnFamilyDescriptor columnDescriptor = tableDescriptorBuilder.build().getColumnFamily(familyByte); - if (columnDescriptor == null) { - throw new IllegalArgumentException("Unable to find column descriptor with family name " + Bytes.toString(family.getFirst())); - } - ColumnFamilyDescriptorBuilder columnDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(columnDescriptor); - modifyColumnFamilyDescriptor(columnDescriptorBuilder, family.getSecond()); - tableDescriptorBuilder.modifyColumnFamily(columnDescriptorBuilder.build()); - } - } - } - addCoprocessors(physicalTableName, tableDescriptorBuilder, - tableType, tableProps, existingDesc, doNotAddGlobalIndexChecker); - - // PHOENIX-3072: Set index priority if this is a system table or index table - if (tableType == PTableType.SYSTEM) { - tableDescriptorBuilder.setValue(QueryConstants.PRIORITY, - String.valueOf(IndexUtil.getMetadataPriority(config))); - } else if (tableType == PTableType.INDEX // Global, mutable index - && !isLocalIndexTable(tableDescriptorBuilder.build().getColumnFamilyNames()) - && !Boolean.TRUE.equals(tableProps.get(PhoenixDatabaseMetaData.IMMUTABLE_ROWS))) { - tableDescriptorBuilder.setValue(QueryConstants.PRIORITY, - String.valueOf(IndexUtil.getIndexPriority(config))); - } - return tableDescriptorBuilder; - } - - private boolean isLocalIndexTable(Collection families) { - // no easier way to know local index table? - for (byte[] family: families) { - if (Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - return true; - } - } - return false; - } - - private boolean isPhoenixTTLEnabled() { - return config.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); - } - - - private void addCoprocessors(byte[] tableName, TableDescriptorBuilder builder, - PTableType tableType, Map tableProps, TableDescriptor existingDesc, - boolean doNotAddGlobalIndexChecker) throws SQLException { - // The phoenix jar must be available on HBase classpath - int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY); - try { - TableDescriptor newDesc = builder.build(); - TransactionFactory.Provider provider = getTransactionProvider(tableProps); - boolean isTransactional = (provider != null); - - boolean indexRegionObserverEnabled = config.getBoolean( - QueryServices.INDEX_REGION_OBSERVER_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REGION_OBSERVER_ENABLED); - boolean isViewIndex = TRUE_BYTES_AS_STRING - .equals(tableProps.get(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_NAME)); - boolean isServerSideMaskingEnabled = config.getBoolean( - QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, - QueryServicesOptions.DEFAULT_SERVER_SIDE_MASKING_ENABLED); - - boolean isViewBaseTransactional = false; - if (!isTransactional && isViewIndex) { - if (tableProps.containsKey(TRANSACTIONAL) && - Boolean.TRUE.equals(tableProps.get(TRANSACTIONAL))) { - isViewBaseTransactional = true; - } - } - - if (!isTransactional && !isViewBaseTransactional - && (tableType == PTableType.INDEX || isViewIndex)) { - if (!indexRegionObserverEnabled && newDesc.hasCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME)) { - builder.removeCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME); - } else if (indexRegionObserverEnabled && !newDesc.hasCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME) && - !isLocalIndexTable(newDesc.getColumnFamilyNames())) { - if (newDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME)) { - builder.removeCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME); - } - if (!doNotAddGlobalIndexChecker) { - builder.setCoprocessor(CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME) - .setPriority(priority - 1).build()); - } - } - } - - if (!newDesc.hasCoprocessor(QueryConstants.SCAN_REGION_OBSERVER_CLASSNAME)) { - builder.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(QueryConstants.SCAN_REGION_OBSERVER_CLASSNAME) - .setPriority(priority).build()); - } - if (!newDesc.hasCoprocessor(QueryConstants.UNGROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(QueryConstants.UNGROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME) - .setPriority(priority).build()); - } - if (!newDesc.hasCoprocessor(QueryConstants.GROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(QueryConstants.GROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME) - .setPriority(priority).build()); - } - if (!newDesc.hasCoprocessor(QueryConstants.SERVER_CACHING_ENDPOINT_IMPL_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(QueryConstants.SERVER_CACHING_ENDPOINT_IMPL_CLASSNAME) - .setPriority(priority).build()); - } - - // TODO: better encapsulation for this - // Since indexes can't have indexes, don't install our indexing coprocessor for indexes. - // Also don't install on the SYSTEM.CATALOG and SYSTEM.STATS table because we use - // all-or-none mutate class which break when this coprocessor is installed (PHOENIX-1318). - if ((tableType != PTableType.INDEX && tableType != PTableType.VIEW && !isViewIndex) - && !SchemaUtil.isMetaTable(tableName) - && !SchemaUtil.isStatsTable(tableName)) { - if (isTransactional) { - if (!newDesc.hasCoprocessor(QueryConstants.PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(QueryConstants.PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME) - .setPriority(priority).build()); - } - // For alter table, remove non transactional index coprocessor - if (newDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME)) { - builder.removeCoprocessor(QueryConstants.INDEXER_CLASSNAME); - } - if (newDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME)) { - builder.removeCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME); - } - } else { - // If exception on alter table to transition back to non transactional - if (newDesc.hasCoprocessor(QueryConstants.PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME)) { - builder.removeCoprocessor(QueryConstants.PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME); - } - // we only want to mess with the indexing coprocs if we're on the original - // CREATE statement. Otherwise, if we're on an ALTER or CREATE TABLE - // IF NOT EXISTS of an existing table, we should leave them unaltered, - // because they should be upgraded or downgraded using the IndexUpgradeTool - if (!doesPhoenixTableAlreadyExist(existingDesc)) { - if (indexRegionObserverEnabled) { - if (newDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME)) { - builder.removeCoprocessor(QueryConstants.INDEXER_CLASSNAME); - } - if (!newDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME)) { - Map opts = Maps.newHashMapWithExpectedSize(1); - opts.put(IndexUtil.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); - IndexUtil.enableIndexing(builder, IndexUtil.PHOENIX_INDEX_BUILDER_CLASSNAME, - opts, priority, QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME); - } - } else { - if (newDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME)) { - builder.removeCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME); - } - if (!newDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME)) { - Map opts = Maps.newHashMapWithExpectedSize(1); - opts.put(IndexUtil.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); - IndexUtil.enableIndexing(builder, IndexUtil.PHOENIX_INDEX_BUILDER_CLASSNAME, - opts, priority, QueryConstants.INDEXER_CLASSNAME); - } - } - } - } - } - - if ((SchemaUtil.isStatsTable(tableName) || SchemaUtil.isMetaTable(tableName)) - && !newDesc.hasCoprocessor(QueryConstants.MULTI_ROW_MUTATION_ENDPOINT_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.MULTI_ROW_MUTATION_ENDPOINT_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - } - - Set familiesKeys = builder.build().getColumnFamilyNames(); - for (byte[] family: familiesKeys) { - if (Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - if (!newDesc.hasCoprocessor(QueryConstants.INDEX_HALF_STORE_FILE_READER_GENERATOR_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.INDEX_HALF_STORE_FILE_READER_GENERATOR_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - break; - } - } - } - - // Setup split policy on Phoenix metadata table to ensure that the key values of a Phoenix table - // stay on the same region. - if (SchemaUtil.isMetaTable(tableName) || SchemaUtil.isFunctionTable(tableName)) { - if (!newDesc.hasCoprocessor(QueryConstants.META_DATA_ENDPOINT_IMPL_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.META_DATA_ENDPOINT_IMPL_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - } - if (SchemaUtil.isMetaTable(tableName) ) { - if (!newDesc.hasCoprocessor(QueryConstants.META_DATA_REGION_OBSERVER_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.META_DATA_REGION_OBSERVER_CLASSNAME) - .setPriority(priority + 1) - .setProperties(Collections.emptyMap()) - .build()); - } - } - } else if (SchemaUtil.isSequenceTable(tableName)) { - if (!newDesc.hasCoprocessor(QueryConstants.SEQUENCE_REGION_OBSERVER_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.SEQUENCE_REGION_OBSERVER_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - } - } else if (SchemaUtil.isTaskTable(tableName)) { - if (!newDesc.hasCoprocessor(QueryConstants.TASK_REGION_OBSERVER_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.TASK_REGION_OBSERVER_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - } - if (!newDesc.hasCoprocessor(QueryConstants.TASK_META_DATA_ENDPOINT_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.TASK_META_DATA_ENDPOINT_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - } - } else if (SchemaUtil.isChildLinkTable(tableName)) { - if (!newDesc.hasCoprocessor(QueryConstants.CHILD_LINK_META_DATA_ENDPOINT_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.CHILD_LINK_META_DATA_ENDPOINT_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - } - } - - if (isTransactional) { - String coprocessorClassName = provider.getTransactionProvider().getCoprocessorClassName(); - if (!newDesc.hasCoprocessor(coprocessorClassName)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(coprocessorClassName) - .setPriority(priority - 10) - .setProperties(Collections.emptyMap()) - .build()); - } - String coprocessorGCClassName = provider.getTransactionProvider().getGCCoprocessorClassName(); - if (coprocessorGCClassName != null) { - if (!newDesc.hasCoprocessor(coprocessorGCClassName)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(coprocessorGCClassName) - .setPriority(priority - 10) - .setProperties(Collections.emptyMap()) - .build()); - } - } - } else { - // Remove all potential transactional coprocessors - for (TransactionFactory.Provider aprovider : TransactionFactory.Provider.available()) { - String coprocessorClassName = aprovider.getTransactionProvider().getCoprocessorClassName(); - String coprocessorGCClassName = aprovider.getTransactionProvider().getGCCoprocessorClassName(); - if (coprocessorClassName != null && newDesc.hasCoprocessor(coprocessorClassName)) { - builder.removeCoprocessor(coprocessorClassName); - } - if (coprocessorGCClassName != null && newDesc.hasCoprocessor(coprocessorGCClassName)) { - builder.removeCoprocessor(coprocessorGCClassName); - } - } - } - - // The priority for this co-processor should be set higher than the GlobalIndexChecker so that the read repair scans - // are intercepted by the TTLAwareRegionObserver and only the rows that are not ttl-expired are returned. - if (!SchemaUtil.isSystemTable(tableName)) { - if (!newDesc.hasCoprocessor(QueryConstants.PHOENIX_TTL_REGION_OBSERVER_CLASSNAME) && - isServerSideMaskingEnabled) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.PHOENIX_TTL_REGION_OBSERVER_CLASSNAME) - .setPriority(priority - 2) - .setProperties(Collections.emptyMap()) - .build()); - } - } - - if (Arrays.equals(tableName, SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, props).getName())) { - if (!newDesc.hasCoprocessor(QueryConstants.SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME)) { - builder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - } - } - - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - } - - private TransactionFactory.Provider getTransactionProvider(Map tableProps) { - TransactionFactory.Provider provider = (TransactionFactory.Provider)TableProperty.TRANSACTION_PROVIDER.getValue(tableProps); - return provider; - } - - private boolean doesPhoenixTableAlreadyExist(TableDescriptor existingDesc) { - //if the table descriptor already has Phoenix coprocs, we assume it's - //already gone through a Phoenix create statement once - if (existingDesc == null){ - return false; - } - boolean hasScanObserver = existingDesc.hasCoprocessor(QueryConstants.SCAN_REGION_OBSERVER_CLASSNAME); - boolean hasUnAggObserver = existingDesc.hasCoprocessor( - QueryConstants.UNGROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME); - boolean hasGroupedObserver = existingDesc.hasCoprocessor( - QueryConstants.GROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME); - boolean hasIndexObserver = existingDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME) - || existingDesc.hasCoprocessor(QueryConstants.INDEX_REGION_OBSERVER_CLASSNAME) - || existingDesc.hasCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME); - return hasScanObserver && hasUnAggObserver && hasGroupedObserver && hasIndexObserver; - } - - private static interface RetriableOperation { - boolean checkForCompletion() throws TimeoutException, IOException; - String getOperationName(); - } - - private void pollForUpdatedTableDescriptor(final Admin admin, final TableDescriptor newTableDescriptor, - final byte[] tableName) throws InterruptedException, TimeoutException { - checkAndRetry(new RetriableOperation() { - - @Override - public String getOperationName() { - return "UpdateOrNewTableDescriptor"; - } - - @Override - public boolean checkForCompletion() throws TimeoutException, IOException { - TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(tableName)); - return newTableDescriptor.equals(tableDesc); - } - }); - } - - private void checkAndRetry(RetriableOperation op) throws InterruptedException, TimeoutException { - int maxRetries = ConnectionQueryServicesImpl.this.props.getInt( - QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, - QueryServicesOptions.DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK); - long sleepInterval = ConnectionQueryServicesImpl.this.props - .getLong(QueryServices.DELAY_FOR_SCHEMA_UPDATE_CHECK, - QueryServicesOptions.DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK); - boolean success = false; - int numTries = 1; - PhoenixStopWatch watch = new PhoenixStopWatch(); - watch.start(); - do { - try { - success = op.checkForCompletion(); - } catch (Exception ex) { - // If we encounter any exception on the first or last try, propagate the exception and fail. - // Else, we swallow the exception and retry till we reach maxRetries. - if (numTries == 1 || numTries == maxRetries) { - watch.stop(); - TimeoutException toThrow = new TimeoutException("Operation " + op.getOperationName() - + " didn't complete because of exception. Time elapsed: " + watch.elapsedMillis()); - toThrow.initCause(ex); - throw toThrow; - } - } - numTries++; - Thread.sleep(sleepInterval); - } while (numTries < maxRetries && !success); - - watch.stop(); - - if (!success) { - throw new TimeoutException("Operation " + op.getOperationName() + " didn't complete within " - + watch.elapsedMillis() + " ms " - + "after trying " + numTries + "times."); - } else { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Operation " - + op.getOperationName() - + " completed within " - + watch.elapsedMillis() - + "ms " - + "after trying " + numTries + " times." ); - } - } - } - - private boolean allowOnlineTableSchemaUpdate() { - return props.getBoolean( - QueryServices.ALLOW_ONLINE_TABLE_SCHEMA_UPDATE, - QueryServicesOptions.DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE); - } - - /** - * Ensure that the HBase namespace is created/exists already - * @param schemaName Phoenix schema name for which we ensure existence of the HBase namespace - * @return true if we created the HBase namespace because it didn't already exist - * @throws SQLException If there is an exception creating the HBase namespace - */ - boolean ensureNamespaceCreated(String schemaName) throws SQLException { - SQLException sqlE = null; - boolean createdNamespace = false; - try (Admin admin = getAdmin()) { - if (!ClientUtil.isHBaseNamespaceAvailable(admin, schemaName)) { - NamespaceDescriptor namespaceDescriptor = - NamespaceDescriptor.create(schemaName).build(); - admin.createNamespace(namespaceDescriptor); - createdNamespace = true; - } - } catch (IOException e) { - sqlE = ClientUtil.parseServerException(e); - } finally { - if (sqlE != null) { throw sqlE; } - } - return createdNamespace; - } - - /** - * - * @param physicalTableName - * @param tableType - * @param props - * @param families - * @param splits - * @param modifyExistingMetaData - * @param isNamespaceMapped - * @param isDoNotUpgradePropSet - * @return true if table was created and false if it already exists - * @throws SQLException - */ - - private TableDescriptor ensureTableCreated(byte[] physicalTableName, byte[] parentPhysicalTableName, PTableType tableType, Map props, - List>> families, byte[][] splits, boolean modifyExistingMetaData, - boolean isNamespaceMapped, boolean isDoNotUpgradePropSet) throws SQLException { - SQLException sqlE = null; - TableDescriptor existingDesc = null; - boolean isMetaTable = SchemaUtil.isMetaTable(physicalTableName); - boolean tableExist = true; - try (Admin admin = getAdmin()) { - final String quorum = ZKConfig.getZKQuorumServersString(config); - final String znode = this.getProps().get(HConstants.ZOOKEEPER_ZNODE_PARENT); - boolean createdNamespace = false; - LOGGER.debug("Found quorum: " + quorum + ":" + znode); - - if (isMetaTable) { - if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.getProps())) { - try { - // SYSTEM namespace needs to be created via HBase APIs because "CREATE SCHEMA" statement tries to write - // its metadata in SYSTEM:CATALOG table. Without SYSTEM namespace, SYSTEM:CATALOG table cannot be created - createdNamespace = ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME); - } catch (PhoenixIOException e) { - // We could either: - // 1) Not access the NS descriptor. The NS may or may not exist at this point - // 2) We could not create the NS - // Regardless of the case 1 or 2, if we eventually try to migrate SYSTEM tables to the SYSTEM - // namespace using the {@link ensureSystemTablesMigratedToSystemNamespace(ReadOnlyProps)} method, - // if the NS does not exist, we will error as expected, or - // if the NS does exist and tables are already mapped, the check will exit gracefully - } - if (AdminUtilWithFallback.tableExists(admin, - SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, false))) { - // SYSTEM.CATALOG exists, so at this point, we have 3 cases: - // 1) If server-side namespace mapping is disabled, drop the SYSTEM namespace if it was created - // above and throw Inconsistent namespace mapping exception - // 2) If server-side namespace mapping is enabled and SYSTEM.CATALOG needs to be upgraded, - // upgrade SYSTEM.CATALOG and also migrate SYSTEM tables to the SYSTEM namespace - // 3. If server-side namespace mapping is enabled and SYSTEM.CATALOG doesn't need to be - // upgraded, we still need to migrate SYSTEM tables to the SYSTEM namespace using the - // {@link ensureSystemTablesMigratedToSystemNamespace(ReadOnlyProps)} method (as part of - // {@link upgradeSystemTables(String, Properties)}) - try { - checkClientServerCompatibility(SYSTEM_CATALOG_NAME_BYTES); - } catch (SQLException possibleCompatException) { - // Handles Case 1: Drop the SYSTEM namespace in case it was created above - if (createdNamespace && possibleCompatException.getErrorCode() == - SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode()) { - ensureNamespaceDropped(QueryConstants.SYSTEM_SCHEMA_NAME); - } - // rethrow the SQLException - throw possibleCompatException; - } - // Thrown so we can force an upgrade which will just migrate SYSTEM tables to the SYSTEM namespace - throw new UpgradeRequiredException(MIN_SYSTEM_TABLE_TIMESTAMP); - } - } else if (AdminUtilWithFallback.tableExists(admin, - SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, true))) { - // If SYSTEM:CATALOG exists, but client-side namespace mapping for SYSTEM tables is disabled, throw an exception - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES) - .setMessage("Cannot initiate connection as " - + SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, true) - + " is found but client does not have " - + IS_NAMESPACE_MAPPING_ENABLED + " enabled") - .build().buildException(); - } - // If DoNotUpgrade config is set only check namespace mapping and - // Client-server compatibility for system tables. - if (isDoNotUpgradePropSet) { - try { - checkClientServerCompatibility(SchemaUtil.getPhysicalName( - SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName()); - } catch (SQLException possibleCompatException) { - if (possibleCompatException.getCause() - instanceof org.apache.hadoop.hbase.TableNotFoundException) { - throw new UpgradeRequiredException(MIN_SYSTEM_TABLE_TIMESTAMP); - } - throw possibleCompatException; - } - return null; - } - } - - try { - existingDesc = admin.getDescriptor(TableName.valueOf(physicalTableName)); - } catch (org.apache.hadoop.hbase.TableNotFoundException e) { - tableExist = false; - if (tableType == PTableType.VIEW) { - String fullTableName = Bytes.toString(physicalTableName); - throw new ReadOnlyTableException( - "An HBase table for a VIEW must already exist", - SchemaUtil.getSchemaNameFromFullName(fullTableName), - SchemaUtil.getTableNameFromFullName(fullTableName)); - } - } - - TableDescriptorBuilder newDesc = generateTableDescriptor(physicalTableName, parentPhysicalTableName, existingDesc, tableType, props, families, - splits, isNamespaceMapped); - - if (!tableExist) { - if (SchemaUtil.isSystemTable(physicalTableName) && !isUpgradeRequired() && (!isAutoUpgradeEnabled || isDoNotUpgradePropSet)) { - // Disallow creating the SYSTEM.CATALOG or SYSTEM:CATALOG HBase table - throw new UpgradeRequiredException(); - } - if (newDesc.build().getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) != null && Boolean.TRUE.equals( - PBoolean.INSTANCE.toObject(newDesc.build().getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) { - newDesc.setRegionSplitPolicyClassName(QueryConstants.INDEX_REGION_SPLIT_POLICY_CLASSNAME); - } - try { - if (splits == null) { - admin.createTable(newDesc.build()); - } else { - admin.createTable(newDesc.build(), splits); - } - } catch (TableExistsException e) { - // We can ignore this, as it just means that another client beat us - // to creating the HBase metadata. - if (isMetaTable && !isUpgradeRequired()) { - checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName()); - } - return null; - } - if (isMetaTable && !isUpgradeRequired()) { - try { - checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, - this.getProps()).getName()); - } catch (SQLException possibleCompatException) { - if (possibleCompatException.getErrorCode() == - SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode()) { - try { - // In case we wrongly created SYSTEM.CATALOG or SYSTEM:CATALOG, we should drop it - disableTable(admin, TableName.valueOf(physicalTableName)); - admin.deleteTable(TableName.valueOf(physicalTableName)); - } catch (org.apache.hadoop.hbase.TableNotFoundException ignored) { - // Ignore this since it just means that another client with a similar set of - // incompatible configs and conditions beat us to dropping the SYSCAT HBase table - } - if (createdNamespace && - SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.getProps())) { - // We should drop the SYSTEM namespace which we just created, since - // server-side namespace mapping is disabled - ensureNamespaceDropped(QueryConstants.SYSTEM_SCHEMA_NAME); - } - } - // rethrow the SQLException - throw possibleCompatException; - } - - } - return null; - } else { - if (isMetaTable && !isUpgradeRequired()) { - checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName()); - } else { - for (Pair> family: families) { - if ((Bytes.toString(family.getFirst()) - .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX))) { - newDesc.setRegionSplitPolicyClassName(QueryConstants.INDEX_REGION_SPLIT_POLICY_CLASSNAME); - break; - } - } - } - - if (!modifyExistingMetaData) { - return existingDesc; // Caller already knows that no metadata was changed - } - TransactionFactory.Provider provider = getTransactionProvider(props); - boolean willBeTx = provider != null; - // If mapping an existing table as transactional, set property so that existing - // data is correctly read. - if (willBeTx) { - if (!equalTxCoprocessor(provider, existingDesc, newDesc.build())) { - // Cannot switch between different providers - if (hasTxCoprocessor(existingDesc)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SWITCH_TXN_PROVIDERS) - .setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalTableName)) - .setTableName(SchemaUtil.getTableNameFromFullName(physicalTableName)).build().buildException(); - } - if (provider.getTransactionProvider().isUnsupported(PhoenixTransactionProvider.Feature.ALTER_NONTX_TO_TX)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TABLE_FROM_NON_TXN_TO_TXNL) - .setMessage(provider.name()) - .setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalTableName)) - .setTableName(SchemaUtil.getTableNameFromFullName(physicalTableName)).build().buildException(); - } - newDesc.setValue(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.TRUE.toString()); - } - } else { - // If we think we're creating a non transactional table when it's already - // transactional, don't allow. - if (hasTxCoprocessor(existingDesc)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX) - .setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalTableName)) - .setTableName(SchemaUtil.getTableNameFromFullName(physicalTableName)).build().buildException(); - } - newDesc.removeValue(Bytes.toBytes(PhoenixTransactionContext.READ_NON_TX_DATA)); - } - TableDescriptor result = newDesc.build(); - if (existingDesc.equals(result)) { - return null; // Indicate that no metadata was changed - } - - // Do not call modifyTable for SYSTEM tables - if (tableType != PTableType.SYSTEM) { - modifyTable(physicalTableName, newDesc.build(), true); - } - return result; - } - - } catch (IOException e) { - sqlE = ClientUtil.parseServerException(e); - } catch (InterruptedException e) { - // restore the interrupt status - Thread.currentThread().interrupt(); - sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException(); - } catch (TimeoutException e) { - sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setRootCause(e.getCause() != null ? e.getCause() : e).build().buildException(); - } finally { - if (sqlE != null) { - throw sqlE; - } - } - return null; // will never make it here - } - - /** - * If given TableDescriptorBuilder belongs to SYSTEM.TASK and if the table - * still does not have split policy setup as SystemTaskSplitPolicy, set - * it up and return true, else return false. This method is expected - * to return true only if it updated split policy (which should happen - * once during initial upgrade). - * - * @param tdBuilder table descriptor builder - * @return return true if split policy of SYSTEM.TASK is updated to - * SystemTaskSplitPolicy. - * @throws SQLException If SYSTEM.TASK already has custom split policy - * set up other than SystemTaskSplitPolicy - */ - @VisibleForTesting - public boolean updateAndConfirmSplitPolicyForTask( - final TableDescriptorBuilder tdBuilder) throws SQLException { - boolean isTaskTable = false; - TableName sysTaskTable = SchemaUtil - .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME, - props); - if (tdBuilder.build().getTableName().equals(sysTaskTable)) { - isTaskTable = true; - } - if (isTaskTable) { - final String actualSplitPolicy = tdBuilder.build() - .getRegionSplitPolicyClassName(); - final String targetSplitPolicy = QueryConstants.SYSTEM_TASK_SPLIT_POLICY_CLASSNAME; - if (!targetSplitPolicy.equals(actualSplitPolicy)) { - if (StringUtils.isNotEmpty(actualSplitPolicy)) { - // Rare possibility. pre-4.16 create DDL query - // doesn't have any split policy setup for SYSTEM.TASK - throw new InvalidRegionSplitPolicyException( - QueryConstants.SYSTEM_SCHEMA_NAME, SYSTEM_TASK_TABLE, - ImmutableList.of("null", targetSplitPolicy), - actualSplitPolicy); - } - tdBuilder.setRegionSplitPolicyClassName(targetSplitPolicy); - return true; - } - } - return false; - } - - private static boolean hasTxCoprocessor(TableDescriptor descriptor) { - for (TransactionFactory.Provider provider : TransactionFactory.Provider.available()) { - String coprocessorClassName = provider.getTransactionProvider().getCoprocessorClassName(); - if (coprocessorClassName != null && descriptor.hasCoprocessor(coprocessorClassName)) { - return true; - } - } - return false; - } - - private static boolean equalTxCoprocessor(TransactionFactory.Provider provider, TableDescriptor existingDesc, TableDescriptor newDesc) { - String coprocessorClassName = provider.getTransactionProvider().getCoprocessorClassName(); - return (coprocessorClassName != null && existingDesc.hasCoprocessor(coprocessorClassName) && newDesc.hasCoprocessor(coprocessorClassName)); -} - - private void modifyTable(byte[] tableName, TableDescriptor newDesc, boolean shouldPoll) throws IOException, - InterruptedException, TimeoutException, SQLException { - TableName tn = TableName.valueOf(tableName); - try (Admin admin = getAdmin()) { - if (!allowOnlineTableSchemaUpdate()) { - disableTable(admin, tn); - admin.modifyTable(newDesc); // TODO: Update to TableDescriptor - admin.enableTable(tn); - } else { - admin.modifyTable(newDesc); // TODO: Update to TableDescriptor - if (shouldPoll) { - pollForUpdatedTableDescriptor(admin, newDesc, tableName); - } - } - } - } - - private static boolean hasIndexWALCodec(Long serverVersion) { - if (serverVersion == null) { - return true; - } - return MetaDataUtil.decodeHasIndexWALCodec(serverVersion); - } - - - private void checkClientServerCompatibility(byte[] metaTable) throws SQLException, - AccessDeniedException { - StringBuilder errorMessage = new StringBuilder(); - int minHBaseVersion = Integer.MAX_VALUE; - boolean isTableNamespaceMappingEnabled = false; - long systemCatalogTimestamp = Long.MAX_VALUE; - long startTime = 0L; - long systemCatalogRpcTime; - Map results; - Table ht = null; - - try { - try { - startTime = EnvironmentEdgeManager.currentTimeMillis(); - ht = this.getTable(metaTable); - final byte[] tableKey = PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES; - results = - ht.coprocessorService(MetaDataService.class, tableKey, tableKey, - new Batch.Call() { - @Override - public GetVersionResponse call(MetaDataService instance) - throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback<>(); - GetVersionRequest.Builder builder = - GetVersionRequest.newBuilder(); - builder.setClientVersion( - VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, - PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.getVersion(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, NUM_SYSTEM_TABLE_RPC_SUCCESS, 1); - } catch (Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, NUM_SYSTEM_TABLE_RPC_FAILURES, 1); - throw ClientUtil.parseServerException(e); - } finally { - systemCatalogRpcTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, - TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, systemCatalogRpcTime); - } - for (Map.Entry result : results.entrySet()) { - // This is the "phoenix.jar" is in-place, but server is out-of-sync with client case. - GetVersionResponse versionResponse = result.getValue(); - long serverJarVersion = versionResponse.getVersion(); - isTableNamespaceMappingEnabled |= MetaDataUtil.decodeTableNamespaceMappingEnabled(serverJarVersion); - - MetaDataUtil.ClientServerCompatibility compatibility = MetaDataUtil.areClientAndServerCompatible(serverJarVersion); - if (!compatibility.getIsCompatible()) { - if (compatibility.getErrorCode() == SQLExceptionCode.OUTDATED_JARS.getErrorCode()) { - errorMessage.append("Newer Phoenix clients can't communicate with older " - + "Phoenix servers. Client version: " - + MetaDataProtocol.CURRENT_CLIENT_VERSION - + "; Server version: " - + getServerVersion(serverJarVersion) - + " The following servers require an updated " - + QueryConstants.DEFAULT_COPROCESS_JAR_NAME - + " to be put in the classpath of HBase."); - } else if (compatibility.getErrorCode() == SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()) { - errorMessage.append("Major version of client is less than that of the server. Client version: " - + MetaDataProtocol.CURRENT_CLIENT_VERSION - + "; Server version: " - + getServerVersion(serverJarVersion)); - } - } - hasIndexWALCodec = hasIndexWALCodec && hasIndexWALCodec(serverJarVersion); - if (minHBaseVersion > MetaDataUtil.decodeHBaseVersion(serverJarVersion)) { - minHBaseVersion = MetaDataUtil.decodeHBaseVersion(serverJarVersion); - } - // In case this is the first time connecting to this cluster, the system catalog table does not have an - // entry for itself yet, so we cannot get the timestamp and this will not be returned from the - // GetVersionResponse message object - if (versionResponse.hasSystemCatalogTimestamp()) { - systemCatalogTimestamp = systemCatalogTimestamp < versionResponse.getSystemCatalogTimestamp() ? - systemCatalogTimestamp: versionResponse.getSystemCatalogTimestamp(); - } - - if (compatibility.getErrorCode() != 0) { - if (compatibility.getErrorCode() == SQLExceptionCode.OUTDATED_JARS.getErrorCode()) { - errorMessage.setLength(errorMessage.length()-1); - throw new SQLExceptionInfo.Builder(SQLExceptionCode.OUTDATED_JARS).setMessage(errorMessage.toString()).build().buildException(); - } else if (compatibility.getErrorCode() == SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR).setMessage(errorMessage.toString()).build().buildException(); - } - } - } - if (isTableNamespaceMappingEnabled != SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, - getProps())) { throw new SQLExceptionInfo.Builder( - SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES) - .setMessage( - "Ensure that config " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED - + " is consistent on client and server.") - .build().buildException(); } - lowestClusterHBaseVersion = minHBaseVersion; - } finally { - if (ht != null) { - try { - ht.close(); - } catch (IOException e) { - LOGGER.warn("Could not close HTable", e); - } - } - } - - if (systemCatalogTimestamp < MIN_SYSTEM_TABLE_TIMESTAMP) { - throw new UpgradeRequiredException(systemCatalogTimestamp); - } - } - - private String getServerVersion(long serverJarVersion) { - return (VersionUtil.decodeMajorVersion(MetaDataUtil.decodePhoenixVersion(serverJarVersion)) + "." - + VersionUtil.decodeMinorVersion(MetaDataUtil.decodePhoenixVersion(serverJarVersion)) + "." - + VersionUtil.decodePatchVersion(MetaDataUtil.decodePhoenixVersion(serverJarVersion))); - } - - /** - * Invoke the SYSTEM.CHILD_LINK metadata coprocessor endpoint - * @param parentTableKey key corresponding to the parent of the view - * @param callable used to invoke the coprocessor endpoint to write links from a parent to its child view - * @return result of invoking the coprocessor endpoint - * @throws SQLException - */ - private MetaDataMutationResult childLinkMetaDataCoprocessorExec(byte[] parentTableKey, - Batch.Call callable) throws SQLException { - try (Table htable = this.getTable(SchemaUtil.getPhysicalName( - PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, this.getProps()).getName())) - { - final Map results = - htable.coprocessorService(ChildLinkMetaDataService.class, parentTableKey, parentTableKey, callable); - assert(results.size() == 1); - MetaDataResponse result = results.values().iterator().next(); - return MetaDataMutationResult.constructFromProto(result); - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } catch (Throwable t) { - throw new SQLException(t); - } - } - - @VisibleForTesting - protected RpcController getController() { - return getController(SYSTEM_CATALOG_HBASE_TABLE_NAME); - } - - /** - * If configured to use the server-server metadata handler pool for server side connections, - * use the {@link org.apache.hadoop.hbase.ipc.controller.ServerToServerRpcController} - * else use the ordinary handler pool {@link ServerRpcController} - * - * return the rpcController to use - * @return - */ - @VisibleForTesting - protected RpcController getController(TableName systemTableName) { - if (serverSideRPCControllerFactory != null) { - ServerToServerRpcController controller = serverSideRPCControllerFactory.newController(); - controller.setPriority(systemTableName); - return controller; - } else { - return new ServerRpcController(); - } - } - - @VisibleForTesting - public ConnectionLimiter getConnectionLimiter() { - return connectionLimiter; - } - /** - * helper function to return the exception from the RPC - * @param controller - * @throws IOException - */ - - private void checkForRemoteExceptions(RpcController controller) throws IOException { - if (controller != null) { - if (controller instanceof ServerRpcController) { - if (((ServerRpcController)controller).getFailedOn() != null) { - throw ((ServerRpcController)controller).getFailedOn(); - } - } else { - if (((HBaseRpcController)controller).getFailed() != null) { - throw ((HBaseRpcController)controller).getFailed(); - } - } - } - } - - /** - * Invoke meta data coprocessor with one retry if the key was found to not be in the regions - * (due to a table split) - */ - private MetaDataMutationResult metaDataCoprocessorExec(String tableName, byte[] tableKey, - Batch.Call callable) throws SQLException { - return metaDataCoprocessorExec(tableName, tableKey, callable, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES); - } - - /** - * Invoke meta data coprocessor with one retry if the key was found to not be in the regions - * (due to a table split) - */ - private MetaDataMutationResult metaDataCoprocessorExec(String tableName, byte[] tableKey, - Batch.Call callable, byte[] systemTableName) throws SQLException { - Map results; - try { - boolean success = false; - boolean retried = false; - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - while (true) { - if (retried) { - ((ClusterConnection) connection).relocateRegion( - SchemaUtil.getPhysicalName(systemTableName, this.getProps()), tableKey); - } - - Table ht = this.getTable(SchemaUtil.getPhysicalName(systemTableName, this.getProps()).getName()); - try { - results = ht.coprocessorService(MetaDataService.class, tableKey, tableKey, callable); - - assert(results.size() == 1); - MetaDataResponse result = results.values().iterator().next(); - if (result.getReturnCode() == MetaDataProtos.MutationCode.TABLE_NOT_IN_REGION - || result.getReturnCode() == MetaDataProtos.MutationCode.FUNCTION_NOT_IN_REGION) { - if (retried) return MetaDataMutationResult.constructFromProto(result); - retried = true; - continue; - } - success = true; - return MetaDataMutationResult.constructFromProto(result); - } finally { - long systemCatalogRpcTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, - TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, systemCatalogRpcTime); - if (success) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, - NUM_SYSTEM_TABLE_RPC_SUCCESS, 1); - } else { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, - NUM_SYSTEM_TABLE_RPC_FAILURES, 1); - } - Closeables.closeQuietly(ht); - } - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } catch (Throwable t) { - throw new SQLException(t); - } - } - - // Our property values are translated using toString, so we need to "string-ify" this. - private static final String TRUE_BYTES_AS_STRING = Bytes.toString(PDataType.TRUE_BYTES); - - private void ensureViewIndexTableCreated(byte[] physicalTableName, byte[] parentPhysicalTableName, Map tableProps, - List>> families, byte[][] splits, long timestamp, - boolean isNamespaceMapped) throws SQLException { - byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName); - - tableProps.put(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_NAME, TRUE_BYTES_AS_STRING); - TableDescriptor desc = ensureTableCreated(physicalIndexName, parentPhysicalTableName, PTableType.TABLE, tableProps, families, splits, - true, isNamespaceMapped, false); - if (desc != null) { - if (!Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(desc.getValue(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_BYTES)))) { - String fullTableName = Bytes.toString(physicalIndexName); - throw new TableAlreadyExistsException( - SchemaUtil.getSchemaNameFromFullName(fullTableName), - SchemaUtil.getTableNameFromFullName(fullTableName), - "Unable to create shared physical table for indexes on views."); - } - } - } - - private void disableTable(Admin admin, TableName tableName) throws IOException { - try { - admin.disableTable(tableName); - } catch (TableNotEnabledException e) { - LOGGER.info("Table already disabled, continuing with next steps", e); - } - } - - private boolean ensureViewIndexTableDropped(byte[] physicalTableName, long timestamp) throws SQLException { - byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName); - boolean wasDeleted = false; - try (Admin admin = getAdmin()) { - try { - TableName physicalIndexTableName = TableName.valueOf(physicalIndexName); - TableDescriptor desc = admin.getDescriptor(physicalIndexTableName); - if (Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(desc.getValue(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_BYTES)))) { - final ReadOnlyProps props = this.getProps(); - final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); - if (dropMetadata) { - disableTable(admin, physicalIndexTableName); - admin.deleteTable(physicalIndexTableName); - clearTableRegionCache(physicalIndexTableName); - wasDeleted = true; - } else { - this.tableStatsCache.invalidateAll(desc); - } - } - } catch (org.apache.hadoop.hbase.TableNotFoundException ignore) { - // Ignore, as we may never have created a view index table - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - return wasDeleted; - } - - private boolean ensureLocalIndexTableDropped(byte[] physicalTableName, long timestamp) throws SQLException { - TableDescriptor desc = null; - boolean wasDeleted = false; - try (Admin admin = getAdmin()) { - try { - desc = admin.getDescriptor(TableName.valueOf(physicalTableName)); - for (byte[] fam : desc.getColumnFamilyNames()) { - this.tableStatsCache.invalidate(new GuidePostsKey(physicalTableName, fam)); - } - final ReadOnlyProps props = this.getProps(); - final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); - if (dropMetadata) { - List columnFamiles = new ArrayList<>(); - for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) { - if (cf.getNameAsString().startsWith( - QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - columnFamiles.add(cf.getNameAsString()); - } - } - for (String cf: columnFamiles) { - admin.deleteColumnFamily(TableName.valueOf(physicalTableName), Bytes.toBytes(cf)); - } - clearTableRegionCache(TableName.valueOf(physicalTableName)); - wasDeleted = true; - } - } catch (org.apache.hadoop.hbase.TableNotFoundException ignore) { - // Ignore, as we may never have created a view index table - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - return wasDeleted; - } - - @Override - public MetaDataMutationResult createTable(final List tableMetaData, final byte[] physicalTableName, - PTableType tableType, Map tableProps, - final List>> families, - byte[][] splits, boolean isNamespaceMapped, - final boolean allocateIndexId, final boolean isDoNotUpgradePropSet, - final PTable parentTable) throws SQLException { - List childLinkMutations = MetaDataUtil.removeChildLinkMutations(tableMetaData); - byte[][] rowKeyMetadata = new byte[3][]; - Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetaData); - byte[] key = m.getRow(); - SchemaUtil.getVarChars(key, rowKeyMetadata); - byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - byte[] physicalTableNameBytes = physicalTableName != null ? physicalTableName : - SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, isNamespaceMapped).getBytes(); - boolean localIndexTable = false; - for (Pair> family: families) { - if (Bytes.toString(family.getFirst()).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - localIndexTable = true; - break; - } - } - if ((tableType != PTableType.CDC) && ( - (tableType == PTableType.VIEW && physicalTableName != null) || - (tableType != PTableType.VIEW && (physicalTableName == null || localIndexTable)) - )) { - // For views this will ensure that metadata already exists - // For tables and indexes, this will create the metadata if it doesn't already exist - ensureTableCreated(physicalTableNameBytes, null, tableType, tableProps, families, splits, true, - isNamespaceMapped, isDoNotUpgradePropSet); - } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - if (tableType == PTableType.INDEX) { // Index on view - // Physical index table created up front for multi tenant - // TODO: if viewIndexId is Short.MIN_VALUE, then we don't need to attempt to create it - if (physicalTableName != null) { - if (!localIndexTable && !MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) { - // For view index, the physical table name is _IDX_+ logical table name format - ensureViewIndexTableCreated(tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes), - physicalTableName, MetaDataUtil.getClientTimeStamp(m), isNamespaceMapped); - } - } - } else if (tableType == PTableType.TABLE && MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) { // Create view index table up front for multi tenant tables - ptr.set(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); - MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME_BYTES, kvBuilder, ptr); - List>> familiesPlusDefault = null; - for (Pair> family : families) { - byte[] cf = family.getFirst(); - if (Bytes.compareTo(cf, 0, cf.length, ptr.get(), ptr.getOffset(),ptr.getLength()) == 0) { - familiesPlusDefault = families; - break; - } - } - // Don't override if default family already present - if (familiesPlusDefault == null) { - byte[] defaultCF = ByteUtil.copyKeyBytesIfNecessary(ptr); - // Only use splits if table is salted, otherwise it may not be applicable - // Always add default column family, as we don't know in advance if we'll need it - familiesPlusDefault = Lists.newArrayList(families); - familiesPlusDefault.add(new Pair>(defaultCF,Collections.emptyMap())); - } - ensureViewIndexTableCreated( - physicalTableNameBytes, physicalTableNameBytes, tableProps, familiesPlusDefault, - MetaDataUtil.isSalted(m, kvBuilder, ptr) ? splits : null, - MetaDataUtil.getClientTimeStamp(m), isNamespaceMapped); - } - - // Avoid the client-server RPC if this is not a view creation - if (!childLinkMutations.isEmpty()) { - // Send mutations for parent-child links to SYSTEM.CHILD_LINK - // We invoke this using rowKey available in the first element - // of childLinkMutations. - final byte[] rowKey = childLinkMutations.get(0).getRow(); - final RpcController controller = getController(PhoenixDatabaseMetaData.SYSTEM_LINK_HBASE_TABLE_NAME); - final MetaDataMutationResult result = - childLinkMetaDataCoprocessorExec(rowKey, - new ChildLinkMetaDataServiceCallBack(controller, childLinkMutations)); - - switch (result.getMutationCode()) { - case UNABLE_TO_CREATE_CHILD_LINK: - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_CREATE_CHILD_LINK) - .setSchemaName(Bytes.toString(schemaBytes)) - .setTableName(Bytes.toString(physicalTableNameBytes)).build().buildException(); - default: - break; - } - } - - // Send the remaining metadata mutations to SYSTEM.CATALOG - byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes); - return metaDataCoprocessorExec(SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, - SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), - tableKey, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback<>(); - CreateTableRequest.Builder builder = CreateTableRequest.newBuilder(); - for (Mutation m : tableMetaData) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addTableMetadataMutations(mp.toByteString()); - } - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - if (allocateIndexId) { - builder.setAllocateIndexId(allocateIndexId); - } - if (parentTable!=null) { - builder.setParentTable(PTableImpl.toProto(parentTable)); - } - CreateTableRequest build = builder.build(); - instance.createTable(controller, build, rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - } - - @Override - public MetaDataMutationResult getTable(final PName tenantId, final byte[] schemaBytes, - final byte[] tableBytes, final long tableTimestamp, final long clientTimestamp) throws SQLException { - final byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(); - byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes); - return metaDataCoprocessorExec( SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, - SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), - tableKey, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - GetTableRequest.Builder builder = GetTableRequest.newBuilder(); - builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); - builder.setSchemaName(ByteStringer.wrap(schemaBytes)); - builder.setTableName(ByteStringer.wrap(tableBytes)); - builder.setTableTimestamp(tableTimestamp); - builder.setClientTimestamp(clientTimestamp); - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.getTable(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - } - - @Override - public MetaDataMutationResult dropTable(final List tableMetaData, final PTableType tableType, - final boolean cascade) throws SQLException { - byte[][] rowKeyMetadata = new byte[3][]; - SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata); - byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantIdBytes, schemaBytes, tableBytes); - final MetaDataMutationResult result = metaDataCoprocessorExec( - SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, - SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), - tableKey, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - DropTableRequest.Builder builder = DropTableRequest.newBuilder(); - for (Mutation m : tableMetaData) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addTableMetadataMutations(mp.toByteString()); - } - builder.setTableType(tableType.getSerializedValue()); - builder.setCascade(cascade); - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.dropTable(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - - final MutationCode code = result.getMutationCode(); - switch(code) { - case TABLE_ALREADY_EXISTS: - ReadOnlyProps props = this.getProps(); - boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); - PTable table = result.getTable(); - if (dropMetadata) { - flushParentPhysicalTable(table); - dropTables(result.getTableNamesToDelete()); - } else { - invalidateTableStats(result.getTableNamesToDelete()); - } - long timestamp = MetaDataUtil.getClientTimeStamp(tableMetaData); - if (tableType == PTableType.TABLE) { - byte[] physicalName = table.getPhysicalName().getBytes(); - ensureViewIndexTableDropped(physicalName, timestamp); - ensureLocalIndexTableDropped(physicalName, timestamp); - tableStatsCache.invalidateAll(table); - } - break; - default: - break; - } - return result; - } - - /* - * PHOENIX-2915 while dropping index, flush data table to avoid stale WAL edits of indexes 1. Flush parent table if - * dropping view has indexes 2. Dropping table indexes 3. Dropping view indexes - */ - private void flushParentPhysicalTable(PTable table) throws SQLException { - byte[] parentPhysicalTableName = null; - if (PTableType.VIEW == table.getType()) { - if (!table.getIndexes().isEmpty()) { - parentPhysicalTableName = table.getPhysicalName().getBytes(); - } - } else if (PTableType.INDEX == table.getType()) { - PTable parentTable = getTable(table.getTenantId(), table.getParentName().getString(), HConstants.LATEST_TIMESTAMP); - parentPhysicalTableName = parentTable.getPhysicalName().getBytes(); - } - if (parentPhysicalTableName != null) { - try { - flushTable(parentPhysicalTableName); - } catch (PhoenixIOException ex) { - if (ex.getCause() instanceof org.apache.hadoop.hbase.TableNotFoundException) { - LOGGER.info("Flushing physical parent table " + Bytes.toString(parentPhysicalTableName) + " of " + table.getName() - .getString() + " failed with : " + ex + " with cause: " + ex.getCause() - + " since the table has already been dropped"); - } else { - throw ex; - } - } - } - } - - @Override - public MetaDataMutationResult dropFunction(final List functionData, final boolean ifExists) throws SQLException { - byte[][] rowKeyMetadata = new byte[2][]; - byte[] key = functionData.get(0).getRow(); - SchemaUtil.getVarChars(key, rowKeyMetadata); - byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - byte[] functionBytes = rowKeyMetadata[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX]; - byte[] functionKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionBytes); - - final MetaDataMutationResult result = metaDataCoprocessorExec(null, functionKey, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(SYSTEM_FUNCTION_HBASE_TABLE_NAME); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - DropFunctionRequest.Builder builder = DropFunctionRequest.newBuilder(); - for (Mutation m : functionData) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addTableMetadataMutations(mp.toByteString()); - } - builder.setIfExists(ifExists); - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.dropFunction(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }, SYSTEM_FUNCTION_NAME_BYTES); - return result; - } - - private void invalidateTableStats(final List tableNamesToDelete) throws SQLException { - if (tableNamesToDelete != null) { - for (byte[] tableName : tableNamesToDelete) { - TableName tn = TableName.valueOf(tableName); - TableDescriptor htableDesc = this.getTableDescriptor(tableName); - tableStatsCache.invalidateAll(htableDesc); - } - } - } - - private void dropTable(byte[] tableNameToDelete) throws SQLException { - dropTables(Collections.singletonList(tableNameToDelete)); - } - - @VisibleForTesting - void dropTables(final List tableNamesToDelete) throws SQLException { - SQLException sqlE = null; - try (Admin admin = getAdmin()) { - if (tableNamesToDelete != null) { - for ( byte[] tableName : tableNamesToDelete ) { - try { - TableName tn = TableName.valueOf(tableName); - TableDescriptor htableDesc = this.getTableDescriptor(tableName); - disableTable(admin, tn); - admin.deleteTable(tn); - tableStatsCache.invalidateAll(htableDesc); - clearTableRegionCache(TableName.valueOf(tableName)); - } catch (TableNotFoundException ignore) { - } - } - } - - } catch (IOException e) { - sqlE = ClientUtil.parseServerException(e); - } finally { - if (sqlE != null) { - throw sqlE; - } - } - } - - private static Map createPropertiesMap(Map htableProps) { - Map props = Maps.newHashMapWithExpectedSize(htableProps.size()); - for (Map.Entry entry : htableProps.entrySet()) { - Bytes key = entry.getKey(); - Bytes value = entry.getValue(); - props.put(Bytes.toString(key.get(), key.getOffset(), key.getLength()), - Bytes.toString(value.get(), value.getOffset(), value.getLength())); - } - return props; - } - - private void ensureViewIndexTableCreated(PName tenantId, byte[] physicalIndexTableName, long timestamp, - boolean isNamespaceMapped) throws SQLException { - String name = Bytes - .toString(SchemaUtil.getParentTableNameFromIndexTable(physicalIndexTableName, - MetaDataUtil.VIEW_INDEX_TABLE_PREFIX)) - .replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); - PTable table = getTable(tenantId, name, timestamp); - ensureViewIndexTableCreated(table, timestamp, isNamespaceMapped); - } - - private PTable getTable(PName tenantId, String fullTableName, long timestamp) throws SQLException { - PTable table; - try { - PMetaData metadata = latestMetaData; - throwConnectionClosedIfNullMetaData(); - table = metadata.getTableRef(new PTableKey(tenantId, fullTableName)).getTable(); - if (table.getTimeStamp() >= timestamp) { // Table in cache is newer than client timestamp which shouldn't be - // the case - throw new TableNotFoundException(table.getSchemaName().getString(), table.getTableName().getString()); - } - } catch (TableNotFoundException e) { - byte[] schemaName = Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(fullTableName)); - byte[] tableName = Bytes.toBytes(SchemaUtil.getTableNameFromFullName(fullTableName)); - MetaDataMutationResult result = this.getTable(tenantId, schemaName, tableName, HConstants.LATEST_TIMESTAMP, - timestamp); - table = result.getTable(); - if (table == null) { throw e; } - } - return table; - } - - private void ensureViewIndexTableCreated(PTable table, long timestamp, boolean isNamespaceMapped) - throws SQLException { - byte[] physicalTableName = table.getPhysicalName().getBytes(); - TableDescriptor htableDesc = this.getTableDescriptor(physicalTableName); - List>> families = Lists.newArrayListWithExpectedSize(Math.max(1, table.getColumnFamilies().size() + 1)); - - // Create all column families that the parent table has - for (PColumnFamily family : table.getColumnFamilies()) { - byte[] familyName = family.getName().getBytes(); - Map familyProps = createPropertiesMap(htableDesc.getColumnFamily(familyName).getValues()); - families.add(new Pair<>(familyName, familyProps)); - } - // Always create default column family, because we don't know in advance if we'll - // need it for an index with no covered columns. - byte[] defaultFamilyName = table.getDefaultFamilyName() == null ? - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : table.getDefaultFamilyName().getBytes(); - families.add(new Pair<>(defaultFamilyName, Collections.emptyMap())); - - byte[][] splits = null; - if (table.getBucketNum() != null) { - splits = SaltingUtil.getSalteByteSplitPoints(table.getBucketNum()); - } - - // Transfer over table values into tableProps - // TODO: encapsulate better - Map tableProps = createPropertiesMap(htableDesc.getValues()); - tableProps.put(PhoenixDatabaseMetaData.TRANSACTIONAL, table.isTransactional()); - tableProps.put(PhoenixDatabaseMetaData.IMMUTABLE_ROWS, table.isImmutableRows()); - - // We got the properties of the physical base table but we need to create the view index table using logical name - byte[] viewPhysicalTableName = - MetaDataUtil.getNamespaceMappedName(table.getName(), isNamespaceMapped) - .getBytes(StandardCharsets.UTF_8); - ensureViewIndexTableCreated(viewPhysicalTableName, physicalTableName, tableProps, families, splits, timestamp, isNamespaceMapped); - } - - @Override - public MetaDataMutationResult addColumn(final List tableMetaData, - PTable table, - final PTable parentTable, - final PTable transformingNewTable, - Map>> stmtProperties, - Set colFamiliesForPColumnsToBeAdded, - List columns) throws SQLException { - List>> families = new ArrayList<>(stmtProperties.size()); - Map tableProps = new HashMap<>(); - Set tableDescriptors = Collections.emptySet(); - boolean nonTxToTx = false; - - Map oldToNewTableDescriptors = - separateAndValidateProperties(table, stmtProperties, colFamiliesForPColumnsToBeAdded, tableProps); - Set origTableDescriptors = new HashSet<>(oldToNewTableDescriptors.keySet()); - - TableDescriptor baseTableOrigDesc = this.getTableDescriptor(table.getPhysicalName().getBytes()); - TableDescriptor tableDescriptor = oldToNewTableDescriptors.get(baseTableOrigDesc); - - if (tableDescriptor != null) { - tableDescriptors = Sets.newHashSetWithExpectedSize(3 + table.getIndexes().size()); - nonTxToTx = Boolean.TRUE.equals(tableProps.get(PhoenixTransactionContext.READ_NON_TX_DATA)); - /* - * If the table was transitioned from non transactional to transactional, we need - * to also transition the index tables. - */ - - TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableDescriptor); - if (nonTxToTx) { - updateDescriptorForTx(table, tableProps, tableDescriptorBuilder, Boolean.TRUE.toString(), - tableDescriptors, origTableDescriptors, oldToNewTableDescriptors); - tableDescriptor = tableDescriptorBuilder.build(); - tableDescriptors.add(tableDescriptor); - } else { - tableDescriptors = new HashSet<>(oldToNewTableDescriptors.values()); - } - } - - boolean success = false; - boolean metaDataUpdated = !tableDescriptors.isEmpty(); - boolean pollingNeeded = !(!tableProps.isEmpty() && families.isEmpty() && colFamiliesForPColumnsToBeAdded.isEmpty()); - MetaDataMutationResult result = null; - try { - boolean modifyHTable = true; - if (table.getType() == PTableType.VIEW) { - boolean canViewsAddNewCF = props.getBoolean(QueryServices.ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE, - QueryServicesOptions.DEFAULT_ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE); - // When adding a column to a view, base physical table should only be modified when new column families are being added. - modifyHTable = canViewsAddNewCF && !existingColumnFamiliesForBaseTable(table.getPhysicalName()).containsAll(colFamiliesForPColumnsToBeAdded); - } - - // Special case for call during drop table to ensure that the empty column family exists. - // In this, case we only include the table header row, as until we add schemaBytes and tableBytes - // as args to this function, we have no way of getting them in this case. - // TODO: change to if (tableMetaData.isEmpty()) once we pass through schemaBytes and tableBytes - // Also, could be used to update table descriptor property values on ALTER TABLE t SET prop=xxx - if ((tableMetaData.isEmpty()) || (tableMetaData.size() == 1 && tableMetaData.get(0).isEmpty())) { - if (modifyHTable) { - sendHBaseMetaData(tableDescriptors, pollingNeeded); - } - return new MetaDataMutationResult(MutationCode.NO_OP, EnvironmentEdgeManager.currentTimeMillis(), table); - } - byte[][] rowKeyMetaData = new byte[3][]; - PTableType tableType = table.getType(); - - Mutation m = tableMetaData.get(0); - byte[] rowKey = m.getRow(); - SchemaUtil.getVarChars(rowKey, rowKeyMetaData); - byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - byte[] schemaBytes = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] tableBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - final boolean addingColumns = columns != null && columns.size() > 0; - result = metaDataCoprocessorExec( - SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, - SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), - tableKey, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); - for (Mutation m : tableMetaData) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addTableMetadataMutations(mp.toByteString()); - } - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - if (parentTable!=null) - builder.setParentTable(PTableImpl.toProto(parentTable)); - if (transformingNewTable!=null) { - builder.setTransformingNewTable(PTableImpl.toProto(transformingNewTable)); - } - builder.setAddingColumns(addingColumns); - instance.addColumn(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - - if (result.getMutationCode() == MutationCode.COLUMN_NOT_FOUND || result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) { // Success - success = true; - // Flush the table if transitioning DISABLE_WAL from TRUE to FALSE - if ( MetaDataUtil.getMutationValue(m,PhoenixDatabaseMetaData.DISABLE_WAL_BYTES, kvBuilder, ptr) - && Boolean.FALSE.equals(PBoolean.INSTANCE.toObject(ptr))) { - flushTable(table.getPhysicalName().getBytes()); - } - - if (tableType == PTableType.TABLE) { - // If we're changing MULTI_TENANT to true or false, create or drop the view index table - if (MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.MULTI_TENANT_BYTES, kvBuilder, ptr)){ - long timestamp = MetaDataUtil.getClientTimeStamp(m); - if (Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr.get(), ptr.getOffset(), ptr.getLength()))) { - this.ensureViewIndexTableCreated(table, timestamp,table.isNamespaceMapped()); - } else { - this.ensureViewIndexTableDropped(table.getPhysicalName().getBytes(), timestamp); - } - } - } - } - - if (modifyHTable && result.getMutationCode() != MutationCode.UNALLOWED_TABLE_MUTATION) { - sendHBaseMetaData(tableDescriptors, pollingNeeded); - } - } finally { - // If we weren't successful with our metadata update - // and we've already pushed the HBase metadata changes to the server - // and we've tried to go from non transactional to transactional - // then we must undo the metadata change otherwise the table will - // no longer function correctly. - // Note that if this fails, we're in a corrupt state. - if (!success && metaDataUpdated && nonTxToTx) { - sendHBaseMetaData(origTableDescriptors, pollingNeeded); - } - } - return result; - } - - private void updateDescriptorForTx(PTable table, Map tableProps, TableDescriptorBuilder tableDescriptorBuilder, - String txValue, Set descriptorsToUpdate, Set origDescriptors, - Map oldToNewTableDescriptors) throws SQLException { - byte[] physicalTableName = table.getPhysicalName().getBytes(); - try (Admin admin = getAdmin()) { - TableDescriptor baseDesc = admin.getDescriptor(TableName.valueOf(physicalTableName)); - boolean hasOldIndexing = baseDesc.hasCoprocessor(QueryConstants.INDEXER_CLASSNAME); - setTransactional(physicalTableName, tableDescriptorBuilder, table.getType(), txValue, tableProps, hasOldIndexing); - Map indexTableProps; - if (txValue == null) { - indexTableProps = Collections.emptyMap(); - } else { - indexTableProps = Maps.newHashMapWithExpectedSize(1); - indexTableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.valueOf(txValue)); - indexTableProps.put(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER, tableProps.get(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER)); - } - for (PTable index : table.getIndexes()) { - TableDescriptor origIndexDesc = admin.getDescriptor(TableName.valueOf(index.getPhysicalName().getBytes())); - TableDescriptor intermedIndexDesc = origIndexDesc; - // If we already wished to make modifications to this index table descriptor previously, we use the updated - // table descriptor to carry out further modifications - // See {@link ConnectionQueryServicesImpl#separateAndValidateProperties(PTable, Map, Set, Map)} - if (origDescriptors.contains(origIndexDesc)) { - intermedIndexDesc = oldToNewTableDescriptors.get(origIndexDesc); - // Remove any previous modification for this table descriptor because we will add - // the combined modification done in this method as well - descriptorsToUpdate.remove(intermedIndexDesc); - } else { - origDescriptors.add(origIndexDesc); - } - TableDescriptorBuilder indexDescriptorBuilder = TableDescriptorBuilder.newBuilder(intermedIndexDesc); - if (index.getColumnFamilies().isEmpty()) { - byte[] dataFamilyName = SchemaUtil.getEmptyColumnFamily(table); - byte[] indexFamilyName = SchemaUtil.getEmptyColumnFamily(index); - ColumnFamilyDescriptorBuilder indexColDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(indexDescriptorBuilder.build().getColumnFamily(indexFamilyName)); - ColumnFamilyDescriptor tableColDescriptor = tableDescriptorBuilder.build().getColumnFamily(dataFamilyName); - indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions()); - indexColDescriptor.setValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL), - tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL))); - indexDescriptorBuilder.removeColumnFamily(indexFamilyName); - indexDescriptorBuilder.setColumnFamily(indexColDescriptor.build()); - } else { - for (PColumnFamily family : index.getColumnFamilies()) { - byte[] familyName = family.getName().getBytes(); - ColumnFamilyDescriptorBuilder indexColDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(indexDescriptorBuilder.build().getColumnFamily(familyName)); - ColumnFamilyDescriptor tableColDescriptor = tableDescriptorBuilder.build().getColumnFamily(familyName); - indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions()); - indexColDescriptor.setValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL), - tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL))); - indexDescriptorBuilder.removeColumnFamily(familyName); - indexDescriptorBuilder.setColumnFamily(indexColDescriptor.build()); - } - } - setTransactional(index.getPhysicalName().getBytes(), indexDescriptorBuilder, index.getType(), txValue, indexTableProps, hasOldIndexing); - descriptorsToUpdate.add(indexDescriptorBuilder.build()); - } - try { - TableDescriptor origIndexDesc = admin.getDescriptor(TableName.valueOf(MetaDataUtil.getViewIndexPhysicalName(physicalTableName))); - TableDescriptor intermedIndexDesc = origIndexDesc; - if (origDescriptors.contains(origIndexDesc)) { - intermedIndexDesc = oldToNewTableDescriptors.get(origIndexDesc); - descriptorsToUpdate.remove(intermedIndexDesc); - } else { - origDescriptors.add(origIndexDesc); - } - TableDescriptorBuilder indexDescriptorBuilder = TableDescriptorBuilder.newBuilder(intermedIndexDesc); - setSharedIndexMaxVersion(table, tableDescriptorBuilder.build(), indexDescriptorBuilder); - setTransactional(MetaDataUtil.getViewIndexPhysicalName(physicalTableName), indexDescriptorBuilder, PTableType.INDEX, txValue, indexTableProps, hasOldIndexing); - descriptorsToUpdate.add(indexDescriptorBuilder.build()); - } catch (org.apache.hadoop.hbase.TableNotFoundException ignore) { - // Ignore, as we may never have created a view index table - } - try { - TableDescriptor origIndexDesc = admin.getDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(physicalTableName))); - TableDescriptor intermedIndexDesc = origIndexDesc; - if (origDescriptors.contains(origIndexDesc)) { - intermedIndexDesc = oldToNewTableDescriptors.get(origIndexDesc); - descriptorsToUpdate.remove(intermedIndexDesc); - } else { - origDescriptors.add(origIndexDesc); - } - TableDescriptorBuilder indexDescriptorBuilder = TableDescriptorBuilder.newBuilder(intermedIndexDesc); - setSharedIndexMaxVersion(table, tableDescriptorBuilder.build(), indexDescriptorBuilder); - setTransactional(MetaDataUtil.getViewIndexPhysicalName(physicalTableName), indexDescriptorBuilder, PTableType.INDEX, txValue, indexTableProps, hasOldIndexing); - descriptorsToUpdate.add(indexDescriptorBuilder.build()); - } catch (org.apache.hadoop.hbase.TableNotFoundException ignore) { - // Ignore, as we may never have created a local index - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - } - - private void setSharedIndexMaxVersion(PTable table, TableDescriptor tableDescriptor, - TableDescriptorBuilder indexDescriptorBuilder) { - if (table.getColumnFamilies().isEmpty()) { - byte[] familyName = SchemaUtil.getEmptyColumnFamily(table); - ColumnFamilyDescriptorBuilder indexColDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(indexDescriptorBuilder.build().getColumnFamily(familyName)); - ColumnFamilyDescriptor tableColDescriptor = tableDescriptor.getColumnFamily(familyName); - indexColDescriptorBuilder.setMaxVersions(tableColDescriptor.getMaxVersions()); - indexColDescriptorBuilder.setValue( Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL),tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL))); - indexDescriptorBuilder.removeColumnFamily(familyName); - indexDescriptorBuilder.setColumnFamily(indexColDescriptorBuilder.build()); - } else { - for (PColumnFamily family : table.getColumnFamilies()) { - byte[] familyName = family.getName().getBytes(); - ColumnFamilyDescriptor indexColDescriptor = indexDescriptorBuilder.build().getColumnFamily(familyName); - if (indexColDescriptor != null) { - ColumnFamilyDescriptor tableColDescriptor = tableDescriptor.getColumnFamily(familyName); - ColumnFamilyDescriptorBuilder indexColDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(indexColDescriptor); - indexColDescriptorBuilder.setMaxVersions(tableColDescriptor.getMaxVersions()); - indexColDescriptorBuilder.setValue( Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL),tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL))); - indexDescriptorBuilder.removeColumnFamily(familyName); - indexDescriptorBuilder.setColumnFamily(indexColDescriptorBuilder.build()); - } - } - } - } - - private void sendHBaseMetaData(Set tableDescriptors, boolean pollingNeeded) throws SQLException { - SQLException sqlE = null; - for (TableDescriptor descriptor : tableDescriptors) { - try { - modifyTable(descriptor.getTableName().getName(), descriptor, pollingNeeded); - } catch (IOException e) { - sqlE = ClientUtil.parseServerException(e); - } catch (InterruptedException e) { - // restore the interrupt status - Thread.currentThread().interrupt(); - sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException(); - } catch (TimeoutException e) { - sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setRootCause(e.getCause() != null ? e.getCause() : e).build().buildException(); - } finally { - if (sqlE != null) { - throw sqlE; - } - } - } - } - private void setTransactional(byte[] physicalTableName, TableDescriptorBuilder tableDescriptorBuilder, PTableType tableType, String txValue, Map tableProps, boolean hasOldIndexing) throws SQLException { - if (txValue == null) { - tableDescriptorBuilder.removeValue(Bytes.toBytes(PhoenixTransactionContext.READ_NON_TX_DATA)); - } else { - tableDescriptorBuilder.setValue(PhoenixTransactionContext.READ_NON_TX_DATA, txValue); - } - this.addCoprocessors(physicalTableName, tableDescriptorBuilder, tableType, tableProps, null, hasOldIndexing); - } - - private Map separateAndValidateProperties(PTable table, - Map>> properties, Set colFamiliesForPColumnsToBeAdded, - Map tableProps) throws SQLException { - Map> stmtFamiliesPropsMap = new HashMap<>(properties.size()); - Map commonFamilyProps = new HashMap<>(); - boolean addingColumns = colFamiliesForPColumnsToBeAdded != null && !colFamiliesForPColumnsToBeAdded.isEmpty(); - HashSet existingColumnFamilies = existingColumnFamilies(table); - Map> allFamiliesProps = new HashMap<>(existingColumnFamilies.size()); - boolean isTransactional = table.isTransactional(); - boolean willBeTransactional = false; - boolean isOrWillBeTransactional = isTransactional; - Integer newTTL = null; - Integer newPhoenixTTL = null; - Integer newReplicationScope = null; - KeepDeletedCells newKeepDeletedCells = null; - TransactionFactory.Provider txProvider = null; - for (String family : properties.keySet()) { - List> propsList = properties.get(family); - if (propsList != null && propsList.size() > 0) { - Map colFamilyPropsMap = new HashMap<>(propsList.size()); - for (Pair prop : propsList) { - String propName = prop.getFirst(); - Object propValue = prop.getSecond(); - if ((MetaDataUtil.isHTableProperty(propName) || TableProperty.isPhoenixTableProperty(propName)) && addingColumns) { - // setting HTable and PhoenixTable properties while adding a column is not allowed. - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_TABLE_PROPERTY_ADD_COLUMN) - .setMessage("Property: " + propName) - .setSchemaName(table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .build() - .buildException(); - } - if (MetaDataUtil.isHTableProperty(propName)) { - // Can't have a column family name for a property that's an HTableProperty - if (!family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY) - .setMessage("Column Family: " + family + ", Property: " + propName) - .setSchemaName(table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .build() - .buildException(); - } - tableProps.put(propName, propValue); - } else { - if (TableProperty.isPhoenixTableProperty(propName)) { - TableProperty tableProp = TableProperty.valueOf(propName); - tableProp.validate(true, !family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY), table.getType()); - if (propName.equals(TTL)) { - if (table.getType() == PTableType.INDEX) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX) - .setMessage("Property: " + propName).build() - .buildException(); - } - //Handle FOREVER and NONE case - propValue = convertForeverAndNoneTTLValue(propValue, isPhoenixTTLEnabled()); - //If Phoenix level TTL is enabled we are using TTL as phoenix - //Table level property. - if (!isPhoenixTTLEnabled()) { - newTTL = ((Number) propValue).intValue(); - //Even though TTL is really a HColumnProperty we treat it - //specially. We enforce that all CFs have the same TTL. - commonFamilyProps.put(propName, propValue); - } else { - //Setting this here just to check if we need to throw Exception - //for Transaction's SET_TTL Feature. - newPhoenixTTL = ((Number) propValue).intValue(); - } - } else if (propName.equals(PhoenixDatabaseMetaData.TRANSACTIONAL) && Boolean.TRUE.equals(propValue)) { - willBeTransactional = isOrWillBeTransactional = true; - tableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, propValue); - } else if (propName.equals(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER) && propValue != null) { - willBeTransactional = isOrWillBeTransactional = true; - tableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.TRUE); - txProvider = (Provider)TableProperty.TRANSACTION_PROVIDER.getValue(propValue); - tableProps.put(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER, txProvider); - } - } else { - if (MetaDataUtil.isHColumnProperty(propName)) { - if (table.getType() == PTableType.INDEX && MetaDataUtil.propertyNotAllowedToBeOutOfSync(propName)) { - // We disallow index tables from overriding TTL, KEEP_DELETED_CELLS and REPLICATION_SCOPE, - // in order to avoid situations where indexes are not in sync with their data table - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX) - .setMessage("Property: " + propName).build() - .buildException(); - } - if (family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { - if (propName.equals(KEEP_DELETED_CELLS)) { - newKeepDeletedCells = - Boolean.valueOf(propValue.toString()) ? KeepDeletedCells.TRUE : KeepDeletedCells.FALSE; - } - if (propName.equals(REPLICATION_SCOPE)) { - newReplicationScope = ((Number)propValue).intValue(); - } - commonFamilyProps.put(propName, propValue); - } else if (MetaDataUtil.propertyNotAllowedToBeOutOfSync(propName)) { - // Don't allow specifying column families for TTL, KEEP_DELETED_CELLS and REPLICATION_SCOPE. - // These properties can only be applied for all column families of a table and can't be column family specific. - throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY) - .setMessage("Property: " + propName).build() - .buildException(); - } else { - colFamilyPropsMap.put(propName, propValue); - } - } else { - // invalid property - neither of HTableProp, HColumnProp or PhoenixTableProp - // FIXME: This isn't getting triggered as currently a property gets evaluated - // as HTableProp if its neither HColumnProp or PhoenixTableProp. - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_PROPERTY) - .setMessage("Column Family: " + family + ", Property: " + propName) - .setSchemaName(table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .build() - .buildException(); - } - } - } - } - if (isOrWillBeTransactional && (newTTL != null || newPhoenixTTL != null)) { - TransactionFactory.Provider isOrWillBeTransactionProvider = txProvider == null ? table.getTransactionProvider() : txProvider; - if (isOrWillBeTransactionProvider.getTransactionProvider().isUnsupported(PhoenixTransactionProvider.Feature.SET_TTL)) { - throw new SQLExceptionInfo.Builder(PhoenixTransactionProvider.Feature.SET_TTL.getCode()) - .setMessage(isOrWillBeTransactionProvider.name()) - .setSchemaName(table.getSchemaName().getString()) - .setTableName(table.getTableName().getString()) - .build() - .buildException(); - } - } - if (!colFamilyPropsMap.isEmpty()) { - stmtFamiliesPropsMap.put(family, colFamilyPropsMap); - } - - } - } - commonFamilyProps = Collections.unmodifiableMap(commonFamilyProps); - boolean isAddingPkColOnly = colFamiliesForPColumnsToBeAdded.size() == 1 && colFamiliesForPColumnsToBeAdded.contains(null); - if (!commonFamilyProps.isEmpty()) { - if (!addingColumns) { - // Add the common family props to all existing column families - for (String existingColFamily : existingColumnFamilies) { - Map m = new HashMap<>(commonFamilyProps.size()); - m.putAll(commonFamilyProps); - allFamiliesProps.put(existingColFamily, m); - } - } else { - // Add the common family props to the column families of the columns being added - for (String colFamily : colFamiliesForPColumnsToBeAdded) { - if (colFamily != null) { - // only set properties for key value columns - Map m = new HashMap<>(commonFamilyProps.size()); - m.putAll(commonFamilyProps); - allFamiliesProps.put(colFamily, m); - } else if (isAddingPkColOnly) { - // Setting HColumnProperty for a pk column is invalid - // because it will be part of the row key and not a key value column family. - // However, if both pk cols as well as key value columns are getting added - // together, then its allowed. The above if block will make sure that we add properties - // only for the kv cols and not pk cols. - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SET_UNSUPPORTED_PROP_ON_ALTER_TABLE) - .build().buildException(); - } - } - } - } - - // Now go through the column family properties specified in the statement - // and merge them with the common family properties. - for (String f : stmtFamiliesPropsMap.keySet()) { - if (!addingColumns && !existingColumnFamilies.contains(f)) { - String schemaNameStr = table.getSchemaName()==null?null:table.getSchemaName().getString(); - String tableNameStr = table.getTableName()==null?null:table.getTableName().getString(); - throw new ColumnFamilyNotFoundException(schemaNameStr, tableNameStr, f); - } - if (addingColumns && !colFamiliesForPColumnsToBeAdded.contains(f)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_PROPERTY_FOR_COLUMN_NOT_ADDED).build().buildException(); - } - Map commonProps = allFamiliesProps.get(f); - Map stmtProps = stmtFamiliesPropsMap.get(f); - if (commonProps != null) { - if (stmtProps != null) { - // merge common props with statement props for the family - commonProps.putAll(stmtProps); - } - } else { - // if no common props were specified, then assign family specific props - if (stmtProps != null) { - allFamiliesProps.put(f, stmtProps); - } - } - } - - // case when there is a column family being added but there are no props - // For ex - in DROP COLUMN when a new empty CF needs to be added since all - // the columns of the existing empty CF are getting dropped. Or the case - // when one is just adding a column for a column family like this: - // ALTER TABLE ADD CF.COL - for (String cf : colFamiliesForPColumnsToBeAdded) { - if (cf != null && allFamiliesProps.get(cf) == null) { - allFamiliesProps.put(cf, new HashMap()); - } - } - - if (table.getColumnFamilies().isEmpty() && !addingColumns && !commonFamilyProps.isEmpty()) { - allFamiliesProps.put(Bytes.toString(table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : table.getDefaultFamilyName().getBytes() ), commonFamilyProps); - } - - // Views are not allowed to have any of these properties. - if (table.getType() == PTableType.VIEW && (!stmtFamiliesPropsMap.isEmpty() || !commonFamilyProps.isEmpty() || !tableProps.isEmpty())) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build() - .buildException(); - } - - TableDescriptorBuilder newTableDescriptorBuilder = null; - TableDescriptor origTableDescriptor = null; - // Store all old to new table descriptor mappings for the table as well as its global indexes - Map tableAndIndexDescriptorMappings = Collections.emptyMap(); - if (!allFamiliesProps.isEmpty() || !tableProps.isEmpty()) { - tableAndIndexDescriptorMappings = Maps.newHashMapWithExpectedSize(3 + table.getIndexes().size()); - TableDescriptor existingTableDescriptor = origTableDescriptor = this.getTableDescriptor(table.getPhysicalName().getBytes()); - newTableDescriptorBuilder = TableDescriptorBuilder.newBuilder(existingTableDescriptor); - if (!tableProps.isEmpty()) { - // add all the table properties to the new table descriptor - for (Entry entry : tableProps.entrySet()) { - newTableDescriptorBuilder.setValue(entry.getKey(), entry.getValue() != null ? entry.getValue().toString() : null); - } - } - if (addingColumns) { - // Make sure that TTL, KEEP_DELETED_CELLS and REPLICATION_SCOPE for the new column family to be added stays in sync - // with the table's existing column families. Note that we use the new values for these properties in case we are - // altering their values. We also propagate these altered values to existing column families and indexes on the table below - setSyncedPropsForNewColumnFamilies(allFamiliesProps, table, newTableDescriptorBuilder, newTTL, newKeepDeletedCells, newReplicationScope); - } - if (newTTL != null || newKeepDeletedCells != null || newReplicationScope != null) { - // Set properties to be kept in sync on all table column families of this table, even if they are not referenced here - setSyncedPropsForUnreferencedColumnFamilies(this.getTableDescriptor(table.getPhysicalName().getBytes()), - allFamiliesProps, newTTL, newKeepDeletedCells, newReplicationScope); - } - - Integer defaultTxMaxVersions = null; - if (isOrWillBeTransactional) { - // Calculate default for max versions - Map emptyFamilyProps = allFamiliesProps.get(SchemaUtil.getEmptyColumnFamilyAsString(table)); - if (emptyFamilyProps != null) { - defaultTxMaxVersions = (Integer)emptyFamilyProps.get(MAX_VERSIONS); - } - if (defaultTxMaxVersions == null) { - if (isTransactional) { - defaultTxMaxVersions = newTableDescriptorBuilder.build() - .getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getMaxVersions(); - } else { - defaultTxMaxVersions = - this.getProps().getInt( - QueryServices.MAX_VERSIONS_TRANSACTIONAL_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL); - } - } - if (willBeTransactional) { - // Set VERSIONS for all column families when transitioning to transactional - for (PColumnFamily family : table.getColumnFamilies()) { - if (!allFamiliesProps.containsKey(family.getName().getString())) { - Map familyProps = Maps.newHashMapWithExpectedSize(1); - familyProps.put(MAX_VERSIONS, defaultTxMaxVersions); - allFamiliesProps.put(family.getName().getString(), familyProps); - } - } - } - // Set transaction context TTL property based on HBase property if we're - // transitioning to become transactional or setting TTL on - // an already transactional table. - int ttl = getTTL(table, newTableDescriptorBuilder.build(), newTTL); - if (ttl != ColumnFamilyDescriptorBuilder.DEFAULT_TTL) { - for (Map.Entry> entry : allFamiliesProps.entrySet()) { - Map props = entry.getValue(); - if (props == null) { - allFamiliesProps.put(entry.getKey(), new HashMap<>()); - props = allFamiliesProps.get(entry.getKey()); - } else { - props = new HashMap<>(props); - } - // Note: After PHOENIX-6627, is PhoenixTransactionContext.PROPERTY_TTL still useful? - props.put(PhoenixTransactionContext.PROPERTY_TTL, ttl); - // Remove HBase TTL if we're not transitioning an existing table to become transactional - // or if the existing transactional table wasn't originally non transactional. - if (!willBeTransactional && !Boolean.valueOf(newTableDescriptorBuilder.build().getValue(PhoenixTransactionContext.READ_NON_TX_DATA))) { - props.remove(TTL); - } - entry.setValue(props); - } - } - } - for (Entry> entry : allFamiliesProps.entrySet()) { - Map familyProps = entry.getValue(); - if (isOrWillBeTransactional) { - if (!familyProps.containsKey(MAX_VERSIONS)) { - familyProps.put(MAX_VERSIONS, defaultTxMaxVersions); - } - } - byte[] cf = Bytes.toBytes(entry.getKey()); - ColumnFamilyDescriptor colDescriptor = newTableDescriptorBuilder.build().getColumnFamily(cf); - if (colDescriptor == null) { - // new column family - colDescriptor = generateColumnFamilyDescriptor(new Pair<>(cf, familyProps), table.getType()); - newTableDescriptorBuilder.setColumnFamily(colDescriptor); - } else { - ColumnFamilyDescriptorBuilder colDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(colDescriptor); - modifyColumnFamilyDescriptor(colDescriptorBuilder, familyProps); - colDescriptor = colDescriptorBuilder.build(); - newTableDescriptorBuilder.removeColumnFamily(cf); - newTableDescriptorBuilder.setColumnFamily(colDescriptor); - } - if (isOrWillBeTransactional) { - checkTransactionalVersionsValue(colDescriptor); - } - } - } - if (origTableDescriptor != null && newTableDescriptorBuilder != null) { - // Add the table descriptor mapping for the base table - tableAndIndexDescriptorMappings.put(origTableDescriptor, newTableDescriptorBuilder.build()); - } - - Map applyPropsToAllIndexColFams = getNewSyncedPropsMap(newTTL, newKeepDeletedCells, newReplicationScope); - // Copy properties that need to be synced from the default column family of the base table to - // the column families of each of its indexes (including indexes on this base table's views) - // and store those table descriptor mappings as well - setSyncedPropertiesForTableIndexes(table, tableAndIndexDescriptorMappings, applyPropsToAllIndexColFams); - return tableAndIndexDescriptorMappings; - } - - private void checkTransactionalVersionsValue(ColumnFamilyDescriptor colDescriptor) throws SQLException { - int maxVersions = colDescriptor.getMaxVersions(); - if (maxVersions <= 1) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAX_VERSIONS_MUST_BE_GREATER_THAN_ONE) - .setFamilyName(colDescriptor.getNameAsString()) - .build().buildException(); - } - } - - private HashSet existingColumnFamiliesForBaseTable(PName baseTableName) throws TableNotFoundException { - throwConnectionClosedIfNullMetaData(); - PTable table = latestMetaData.getTableRef(new PTableKey(null, baseTableName.getString())).getTable(); - return existingColumnFamilies(table); - } - - public HashSet existingColumnFamilies(PTable table) { - List cfs = table.getColumnFamilies(); - HashSet cfNames = new HashSet<>(cfs.size()); - for (PColumnFamily cf : table.getColumnFamilies()) { - cfNames.add(cf.getName().getString()); - } - return cfNames; - } - - public static KeepDeletedCells getKeepDeletedCells(PTable table, TableDescriptor tableDesc, - KeepDeletedCells newKeepDeletedCells) throws SQLException { - // If we're setting KEEP_DELETED_CELLS now, then use that value. Otherwise, use the empty column family value - return (newKeepDeletedCells != null) ? - newKeepDeletedCells : - tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getKeepDeletedCells(); - } - - public static int getReplicationScope(PTable table, TableDescriptor tableDesc, - Integer newReplicationScope) throws SQLException { - // If we're setting replication scope now, then use that value. Otherwise, use the empty column family value - return (newReplicationScope != null) ? - newReplicationScope : - tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getScope(); - } - - public static int getTTL(PTable table, TableDescriptor tableDesc, Integer newTTL) throws SQLException { - // If we're setting TTL now, then use that value. Otherwise, use empty column family value - return (newTTL != null) ? - newTTL : - tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getTimeToLive(); - } - - public static Object convertForeverAndNoneTTLValue(Object propValue, boolean isPhoenixTTLEnabled) { - //Handle FOREVER and NONE value for TTL at HBase level TTL. - if (propValue instanceof String) { - String strValue = (String) propValue; - if ("FOREVER".equalsIgnoreCase(strValue)) { - propValue = HConstants.FOREVER; - } else if ("NONE".equalsIgnoreCase(strValue)) { - propValue = isPhoenixTTLEnabled ? TTL_NOT_DEFINED : HConstants.FOREVER; - } - } - return propValue; - } - - /** - * Keep the TTL, KEEP_DELETED_CELLS and REPLICATION_SCOPE properties of new column families - * in sync with the existing column families. Note that we use the new values for these properties in case they - * are passed from our alter table command, if not, we use the default column family's value for each property - * See {@link MetaDataUtil#SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES} - * @param allFamiliesProps Map of all column family properties - * @param table original table - * @param tableDescBuilder new table descriptor builder - * @param newTTL new value of TTL - * @param newKeepDeletedCells new value of KEEP_DELETED_CELLS - * @param newReplicationScope new value of REPLICATION_SCOPE - * @throws SQLException - */ - private void setSyncedPropsForNewColumnFamilies(Map> allFamiliesProps, PTable table, - TableDescriptorBuilder tableDescBuilder, Integer newTTL, KeepDeletedCells newKeepDeletedCells, - Integer newReplicationScope) throws SQLException { - if (!allFamiliesProps.isEmpty()) { - int ttl = getTTL(table, tableDescBuilder.build(), newTTL); - int replicationScope = getReplicationScope(table, tableDescBuilder.build(), newReplicationScope); - KeepDeletedCells keepDeletedCells = getKeepDeletedCells(table, tableDescBuilder.build(), newKeepDeletedCells); - for (Map.Entry> entry : allFamiliesProps.entrySet()) { - Map props = entry.getValue(); - if (props == null) { - allFamiliesProps.put(entry.getKey(), new HashMap<>()); - props = allFamiliesProps.get(entry.getKey()); - } - props.put(TTL, ttl); - props.put(KEEP_DELETED_CELLS, keepDeletedCells); - props.put(REPLICATION_SCOPE, replicationScope); - } - } - } - - private void setPropIfNotNull(Map propMap, String propName, Object propVal) { - if (propName!= null && propVal != null) { - propMap.put(propName, propVal); - } - } - - private Map getNewSyncedPropsMap(Integer newTTL, KeepDeletedCells newKeepDeletedCells, Integer newReplicationScope) { - Map newSyncedProps = Maps.newHashMapWithExpectedSize(3); - setPropIfNotNull(newSyncedProps, TTL, newTTL); - setPropIfNotNull(newSyncedProps,KEEP_DELETED_CELLS, newKeepDeletedCells); - setPropIfNotNull(newSyncedProps, REPLICATION_SCOPE, newReplicationScope); - return newSyncedProps; - } - - /** - * Set the new values for properties that are to be kept in sync amongst those column families of the table which are - * not referenced in the context of our alter table command, including the local index column family if it exists - * See {@link MetaDataUtil#SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES} - * @param tableDesc original table descriptor - * @param allFamiliesProps Map of all column family properties - * @param newTTL new value of TTL - * @param newKeepDeletedCells new value of KEEP_DELETED_CELLS - * @param newReplicationScope new value of REPLICATION_SCOPE - * @return - */ - private void setSyncedPropsForUnreferencedColumnFamilies(TableDescriptor tableDesc, Map> allFamiliesProps, - Integer newTTL, KeepDeletedCells newKeepDeletedCells, Integer newReplicationScope) { - for (ColumnFamilyDescriptor family: tableDesc.getColumnFamilies()) { - if (!allFamiliesProps.containsKey(family.getNameAsString())) { - allFamiliesProps.put(family.getNameAsString(), - getNewSyncedPropsMap(newTTL, newKeepDeletedCells, newReplicationScope)); - } - } - } - - /** - * Set properties to be kept in sync for global indexes of a table, as well as - * the physical table corresponding to indexes created on views of a table - * See {@link MetaDataUtil#SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES} and - * @param table base table - * @param tableAndIndexDescriptorMappings old to new table descriptor mappings - * @param applyPropsToAllIndexesDefaultCF new properties to apply to all index column families - * @throws SQLException - */ - private void setSyncedPropertiesForTableIndexes(PTable table, - Map tableAndIndexDescriptorMappings, - Map applyPropsToAllIndexesDefaultCF) throws SQLException { - if (applyPropsToAllIndexesDefaultCF == null || applyPropsToAllIndexesDefaultCF.isEmpty()) { - return; - } - - for (PTable indexTable: table.getIndexes()) { - if (indexTable.getIndexType() == PTable.IndexType.LOCAL) { - // local index tables are already handled when we sync all column families of a base table - continue; - } - TableDescriptor origIndexDescriptor = this.getTableDescriptor(indexTable.getPhysicalName().getBytes()); - TableDescriptorBuilder newIndexDescriptorBuilder = TableDescriptorBuilder.newBuilder(origIndexDescriptor); - - byte[] defaultIndexColFam = SchemaUtil.getEmptyColumnFamily(indexTable); - ColumnFamilyDescriptorBuilder indexDefaultColDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(origIndexDescriptor.getColumnFamily(defaultIndexColFam)); - modifyColumnFamilyDescriptor(indexDefaultColDescriptorBuilder, applyPropsToAllIndexesDefaultCF); - newIndexDescriptorBuilder.removeColumnFamily(defaultIndexColFam); - newIndexDescriptorBuilder.setColumnFamily(indexDefaultColDescriptorBuilder.build()); - tableAndIndexDescriptorMappings.put(origIndexDescriptor, newIndexDescriptorBuilder.build()); - } - // Also keep properties for the physical view index table in sync - String viewIndexName = MetaDataUtil.getViewIndexPhysicalName(table.getName(), table.isNamespaceMapped()); - if (!Strings.isNullOrEmpty(viewIndexName)) { - try { - TableDescriptor origViewIndexTableDescriptor = this.getTableDescriptor(Bytes.toBytes(viewIndexName)); - TableDescriptorBuilder newViewIndexDescriptorBuilder = - TableDescriptorBuilder.newBuilder(origViewIndexTableDescriptor); - for (ColumnFamilyDescriptor cfd: origViewIndexTableDescriptor.getColumnFamilies()) { - ColumnFamilyDescriptorBuilder newCfd = - ColumnFamilyDescriptorBuilder.newBuilder(cfd); - modifyColumnFamilyDescriptor(newCfd, applyPropsToAllIndexesDefaultCF); - newViewIndexDescriptorBuilder.removeColumnFamily(cfd.getName()); - newViewIndexDescriptorBuilder.setColumnFamily(newCfd.build()); - } - tableAndIndexDescriptorMappings.put(origViewIndexTableDescriptor, newViewIndexDescriptorBuilder.build()); - } catch (TableNotFoundException ignore) { - // Ignore since this means that a view index table does not exist for this table - } - } - } - - @Override - public MetaDataMutationResult dropColumn(final List tableMetaData, - final PTableType tableType, - final PTable parentTable) throws SQLException { - byte[][] rowKeyMetadata = new byte[3][]; - SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata); - byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes); - MetaDataMutationResult result = metaDataCoprocessorExec( - SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, - SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), - tableKey, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - DropColumnRequest.Builder builder = DropColumnRequest.newBuilder(); - for (Mutation m : tableMetaData) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addTableMetadataMutations(mp.toByteString()); - } - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - if (parentTable!=null) - builder.setParentTable(PTableImpl.toProto(parentTable)); - instance.dropColumn(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - final MutationCode code = result.getMutationCode(); - switch(code) { - case TABLE_ALREADY_EXISTS: - final ReadOnlyProps props = this.getProps(); - final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); - if (dropMetadata) { - dropTables(result.getTableNamesToDelete()); - } else { - invalidateTableStats(result.getTableNamesToDelete()); - } - break; - default: - break; - } - return result; - - } - - private PhoenixConnection removeNotNullConstraint(PhoenixConnection oldMetaConnection, String schemaName, String tableName, long timestamp, String columnName) throws SQLException { - Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); - // Cannot go through DriverManager or you end up in an infinite loop because it'll call init again - PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); - SQLException sqlE = null; - try { - String dml = "UPSERT INTO " + SYSTEM_CATALOG_NAME + " (" + PhoenixDatabaseMetaData.TENANT_ID + "," - + PhoenixDatabaseMetaData.TABLE_SCHEM + "," + PhoenixDatabaseMetaData.TABLE_NAME + "," - + PhoenixDatabaseMetaData.COLUMN_NAME + "," - + PhoenixDatabaseMetaData.NULLABLE + ") VALUES (null, ?, ?, ?, ?)"; - PreparedStatement stmt = metaConnection.prepareStatement(dml); - stmt.setString(1, schemaName); - stmt.setString(2, tableName); - stmt.setString(3, columnName); - stmt.setInt(4, ResultSetMetaData.columnNullable); - stmt.executeUpdate(); - metaConnection.commit(); - } catch (NewerTableAlreadyExistsException e) { - LOGGER.warn("Table already modified at this timestamp," + - " so assuming column already nullable: " + columnName); - } catch (SQLException e) { - LOGGER.warn("Add column failed due to:" + e); - sqlE = e; - } finally { - try { - oldMetaConnection.close(); - } catch (SQLException e) { - if (sqlE != null) { - sqlE.setNextException(e); - } else { - sqlE = e; - } - } - if (sqlE != null) { - throw sqlE; - } - } - return metaConnection; - } - /** - * This closes the passed connection. - */ - private PhoenixConnection addColumn(PhoenixConnection oldMetaConnection, String tableName, long timestamp, String columns, boolean addIfNotExists) throws SQLException { - Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); - // Cannot go through DriverManager or you end up in an infinite loop because it'll call init again - PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); - SQLException sqlE = null; - try { - metaConnection.createStatement().executeUpdate("ALTER TABLE " + tableName + " ADD " + (addIfNotExists ? " IF NOT EXISTS " : "") + columns ); - } catch (NewerTableAlreadyExistsException e) { - LOGGER.warn("Table already modified at this timestamp," + - " so assuming add of these columns already done: " + columns); - } catch (SQLException e) { - LOGGER.warn("Add column failed due to:" + e); - sqlE = e; - } finally { - try { - oldMetaConnection.close(); - } catch (SQLException e) { - if (sqlE != null) { - sqlE.setNextException(e); - } else { - sqlE = e; - } - } - if (sqlE != null) { - throw sqlE; - } - } - return metaConnection; - } - - /** - * Keeping this to use for further upgrades. This method closes the oldMetaConnection. - */ - private PhoenixConnection addColumnsIfNotExists(PhoenixConnection oldMetaConnection, - String tableName, long timestamp, String columns) throws SQLException { - return addColumn(oldMetaConnection, tableName, timestamp, columns, true); - } - - private void copyDataFromPhoenixTTLtoTTL(PhoenixConnection oldMetaConnection) throws IOException { - //If ViewTTL is enabled then only copy values from PHOENIX_TTL Column to TTL Column - if (oldMetaConnection.getQueryServices().getConfiguration().getBoolean(PHOENIX_VIEW_TTL_ENABLED, - DEFAULT_PHOENIX_VIEW_TTL_ENABLED)) { - // Increase the timeouts so that the scan queries during Copy Data do not timeout - // on large SYSCAT Tables - Map options = new HashMap<>(); - options.put(HConstants.HBASE_RPC_TIMEOUT_KEY, Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); - options.put(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); - copyTTLValuesFromPhoenixTTLColumnToTTLColumn(oldMetaConnection, options); - } - - } - - private void moveTTLFromHBaseLevelTTLToPhoenixLevelTTL(PhoenixConnection oldMetaConnection) throws IOException { - // Increase the timeouts so that the scan queries during Copy Data does not timeout - // on large SYSCAT Tables - Map options = new HashMap<>(); - options.put(HConstants.HBASE_RPC_TIMEOUT_KEY, Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); - options.put(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); - UpgradeUtil.moveHBaseLevelTTLToSYSCAT(oldMetaConnection, options); - } - - // Available for testing - protected long getSystemTableVersion() { - return MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP; - } - - - // Available for testing - protected void setUpgradeRequired() { - this.upgradeRequired.set(true); - } - - // Available for testing - protected boolean isInitialized() { - return initialized; - } - - // Available for testing - protected void setInitialized(boolean isInitialized) { - initialized = isInitialized; - } - - // Available for testing - protected String getSystemCatalogTableDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_TABLE_METADATA); - } - - protected String getSystemSequenceTableDDL(int nSaltBuckets) { - String schema = String.format(setSystemDDLProperties(QueryConstants.CREATE_SEQUENCE_METADATA)); - return Sequence.getCreateTableStatement(schema, nSaltBuckets); - } - - // Available for testing - protected String getFunctionTableDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_FUNCTION_METADATA); - } - - // Available for testing - protected String getLogTableDDL() { - return setSystemLogDDLProperties(QueryConstants.CREATE_LOG_METADATA); - } - - private String setSystemLogDDLProperties(String ddl) { - return String.format(ddl, props.getInt(LOG_SALT_BUCKETS_ATTRIB, QueryServicesOptions.DEFAULT_LOG_SALT_BUCKETS)); - - } - - // Available for testing - protected String getChildLinkDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_CHILD_LINK_METADATA); - } - - protected String getMutexDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADATA); - } - - protected String getTaskDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_TASK_METADATA); - } - - protected String getTransformDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_TRANSFORM_METADATA); - } - - private String setSystemDDLProperties(String ddl) { - return String.format(ddl, - props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB, QueryServicesOptions.DEFAULT_SYSTEM_MAX_VERSIONS), - props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB, QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)); - } - - @Override - public void init(final String url, final Properties props) throws SQLException { - try { - PhoenixContextExecutor.call(new Callable() { - @Override - public Void call() throws Exception { - if (isInitialized()) { - if (initializationException != null) { - // Throw previous initialization exception, as we won't resuse this instance - throw initializationException; - } - return null; - } - synchronized (ConnectionQueryServicesImpl.this) { - if (isInitialized()) { - if (initializationException != null) { - // Throw previous initialization exception, as we won't resuse this instance - throw initializationException; - } - return null; - } - - checkClosed(); - boolean hConnectionEstablished = false; - boolean success = false; - try { - GLOBAL_QUERY_SERVICES_COUNTER.increment(); - LOGGER.info("An instance of ConnectionQueryServices was created."); - connection = openConnection(config); - hConnectionEstablished = true; - boolean lastDDLTimestampValidationEnabled - = getProps().getBoolean( - QueryServices.LAST_DDL_TIMESTAMP_VALIDATION_ENABLED, - QueryServicesOptions.DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED); - if (lastDDLTimestampValidationEnabled) { - refreshLiveRegionServers(); - } - String skipSystemExistenceCheck = - props.getProperty(SKIP_SYSTEM_TABLES_EXISTENCE_CHECK); - if (skipSystemExistenceCheck != null && - Boolean.valueOf(skipSystemExistenceCheck)) { - initialized = true; - success = true; - return null; - } - boolean isDoNotUpgradePropSet = UpgradeUtil.isNoUpgradeSet(props); - Properties scnProps = PropertiesUtil.deepCopy(props); - scnProps.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, - Long.toString(getSystemTableVersion())); - scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB); - String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB); - try (PhoenixConnection metaConnection = new PhoenixConnection(ConnectionQueryServicesImpl.this, globalUrl, - scnProps)) { - try (Statement statement = - metaConnection.createStatement()) { - metaConnection.setRunningUpgrade(true); - statement.executeUpdate( - getSystemCatalogTableDDL()); - } catch (NewerTableAlreadyExistsException ignore) { - // Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed - // timestamp. A TableAlreadyExistsException is not thrown, since the table only exists - // *after* this fixed timestamp. - } catch (TableAlreadyExistsException e) { - long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); - if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP) { - setUpgradeRequired(); - } - } catch (PhoenixIOException e) { - boolean foundAccessDeniedException = false; - // when running spark/map reduce jobs the ADE might be wrapped - // in a RemoteException - if (inspectIfAnyExceptionInChain(e, Collections - .> singletonList(AccessDeniedException.class))) { - // Pass - LOGGER.warn("Could not check for Phoenix SYSTEM tables," + - " assuming they exist and are properly configured"); - checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, getProps()).getName()); - success = true; - } else if (inspectIfAnyExceptionInChain(e, - Collections.> singletonList( - NamespaceNotFoundException.class))) { - // This exception is only possible if SYSTEM namespace mapping is enabled and SYSTEM namespace is missing - // It implies that SYSTEM tables are not created and hence we shouldn't provide a connection - AccessDeniedException ade = new AccessDeniedException("Insufficient permissions to create SYSTEM namespace and SYSTEM Tables"); - initializationException = ClientUtil.parseServerException(ade); - } else { - initializationException = e; - } - return null; - } catch (UpgradeRequiredException e) { - // This will occur in 2 cases: - // 1. when SYSTEM.CATALOG doesn't exists - // 2. when SYSTEM.CATALOG exists, but client and - // server-side namespace mapping is enabled so - // we need to migrate SYSTEM tables to the SYSTEM namespace - setUpgradeRequired(); - } - - if (!ConnectionQueryServicesImpl.this.upgradeRequired.get()) { - if (!isDoNotUpgradePropSet) { - createOtherSystemTables(metaConnection); - // In case namespace mapping is enabled and system table to - // system namespace mapping is also enabled, create an entry - // for the SYSTEM namespace in the SYSCAT table, so that - // GRANT/REVOKE commands can work with SYSTEM Namespace - createSchemaIfNotExistsSystemNSMappingEnabled(metaConnection); - } - } else if (isAutoUpgradeEnabled && !isDoNotUpgradePropSet) { - // Upgrade is required and we are allowed to automatically upgrade - upgradeSystemTables(url, props); - } else { - // We expect the user to manually run the "EXECUTE UPGRADE" command first. - LOGGER.error("Upgrade is required. Must run 'EXECUTE UPGRADE' " - + "before any other command"); - } - } - success = true; - } catch (RetriableUpgradeException e) { - // Set success to true and don't set the exception as an initializationException, - // because otherwise the client won't be able to retry establishing the connection. - success = true; - throw e; - } catch (Exception e) { - if (e instanceof SQLException) { - initializationException = (SQLException) e; - } else { - // wrap every other exception into a SQLException - initializationException = new SQLException(e); - } - } finally { - if (success) { - scheduleRenewLeaseTasks(); - } - try { - if (!success && hConnectionEstablished) { - closeConnection(connection); - closeConnection(invalidateMetadataCacheConnection); - } - } catch (IOException e) { - SQLException ex = new SQLException(e); - if (initializationException != null) { - initializationException.setNextException(ex); - } else { - initializationException = ex; - } - } finally { - try { - if (initializationException != null) { - throw initializationException; - } - } finally { - setInitialized(true); - } - } - } - } - return null; - } - }); - } catch (Exception e) { - Throwables.propagateIfInstanceOf(e, SQLException.class); - Throwables.propagate(e); - } - } - - void createSysMutexTableIfNotExists(Admin admin) throws IOException { - try { - if (checkIfSysMutexExistsAndModifyTTLIfRequired(admin)) { - return; - } - final TableName mutexTableName = SchemaUtil.getPhysicalTableName( - SYSTEM_MUTEX_NAME, props); - TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(mutexTableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES) - .setTimeToLive(TTL_FOR_MUTEX).build()) - .build(); - admin.createTable(tableDesc); - } - catch (IOException e) { - if (inspectIfAnyExceptionInChain(e, Arrays.> asList( - AccessDeniedException.class, org.apache.hadoop.hbase.TableExistsException.class))) { - // Ignore TableExistsException as another client might beat us during upgrade. - // Ignore AccessDeniedException, as it may be possible underpriviliged user trying to use the connection - // which doesn't required upgrade. - LOGGER.debug("Ignoring exception while creating mutex table" + - " during connection initialization: " - + Throwables.getStackTraceAsString(e)); - } else { - throw e; - } - } - } - /** - * Check if the SYSTEM MUTEX table exists. If it does, ensure that its TTL is correct and if - * not, modify its table descriptor - * @param admin HBase admin - * @return true if SYSTEM MUTEX exists already and false if it needs to be created - * @throws IOException thrown if there is an error getting the table descriptor - */ - @VisibleForTesting - boolean checkIfSysMutexExistsAndModifyTTLIfRequired(Admin admin) throws IOException { - TableDescriptor htd; - try { - htd = admin.getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME)); - } catch (org.apache.hadoop.hbase.TableNotFoundException ignored) { - try { - // Try with the namespace mapping name - htd = admin.getDescriptor(TableName.valueOf(SYSTEM_SCHEMA_NAME, - SYSTEM_MUTEX_TABLE_NAME)); - } catch (org.apache.hadoop.hbase.TableNotFoundException ignored2) { - return false; - } - } - - // The SYSTEM MUTEX table already exists so check its TTL - if (htd.getColumnFamily(SYSTEM_MUTEX_FAMILY_NAME_BYTES).getTimeToLive() != TTL_FOR_MUTEX) { - LOGGER.debug("SYSTEM MUTEX already appears to exist, but has the wrong TTL. " + - "Will modify the TTL"); - ColumnFamilyDescriptor hColFamDesc = ColumnFamilyDescriptorBuilder - .newBuilder(htd.getColumnFamily(SYSTEM_MUTEX_FAMILY_NAME_BYTES)) - .setTimeToLive(TTL_FOR_MUTEX) - .build(); - htd = TableDescriptorBuilder - .newBuilder(htd) - .modifyColumnFamily(hColFamDesc) - .build(); - admin.modifyTable(htd); - } else { - LOGGER.debug("SYSTEM MUTEX already appears to exist with the correct TTL, " + - "not creating it"); - } - return true; - } - - private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) { - boolean exceptionToIgnore = false; - for (Throwable t : Throwables.getCausalChain(io)) { - for (Class exception : ioList) { - exceptionToIgnore |= isExceptionInstanceOf(t, exception); - } - if (exceptionToIgnore) { - break; - } - - } - return exceptionToIgnore; - } - - private boolean isExceptionInstanceOf(Throwable io, Class exception) { - return exception.isInstance(io) || (io instanceof RemoteException - && (((RemoteException)io).getClassName().equals(exception.getName()))); - } - - List getSystemTableNamesInDefaultNamespace(Admin admin) throws IOException { - return Lists.newArrayList(admin.listTableNames(Pattern.compile(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"))); // TODO: replace to pattern - } - - private void createOtherSystemTables(PhoenixConnection metaConnection) throws SQLException, IOException { - try { - metaConnection.createStatement().execute(getSystemSequenceTableDDL(nSequenceSaltBuckets)); - // When creating the table above, DDL statements are - // used. However, the CFD level properties are not set - // via DDL commands, hence we are explicitly setting - // few properties using the Admin API below. - updateSystemSequenceWithCacheOnWriteProps(metaConnection); - } catch (TableAlreadyExistsException e) { - nSequenceSaltBuckets = getSaltBuckets(e); - } - try { - metaConnection.createStatement().execute(QueryConstants.CREATE_STATS_TABLE_METADATA); - } catch (TableAlreadyExistsException ignore) {} - try { - metaConnection.createStatement().execute(getFunctionTableDDL()); - } catch (TableAlreadyExistsException ignore) {} - try { - metaConnection.createStatement().execute(getLogTableDDL()); - } catch (TableAlreadyExistsException ignore) {} - try { - metaConnection.createStatement().executeUpdate(getChildLinkDDL()); - } catch (TableAlreadyExistsException ignore) {} - try { - metaConnection.createStatement().executeUpdate(getMutexDDL()); - } catch (TableAlreadyExistsException ignore) {} - try { - metaConnection.createStatement().executeUpdate(getTaskDDL()); - } catch (TableAlreadyExistsException ignore) {} - try { - metaConnection.createStatement().executeUpdate(getTransformDDL()); - } catch (TableAlreadyExistsException ignore) {} - } - - /** - * Create an entry for the SYSTEM namespace in the SYSCAT table in case namespace mapping is enabled and system table - * to system namespace mapping is also enabled. If not enabled, this method returns immediately without doing anything - * @param metaConnection - * @throws SQLException - */ - private void createSchemaIfNotExistsSystemNSMappingEnabled(PhoenixConnection metaConnection) throws SQLException { - // HBase Namespace SYSTEM is assumed to be already created inside {@link ensureTableCreated(byte[], PTableType, - // Map, List>>, byte[][], boolean, boolean, boolean)}. - // This statement will create an entry for the SYSTEM namespace in the SYSCAT table, so that GRANT/REVOKE - // commands can work with SYSTEM Namespace. (See PHOENIX-4227 https://issues.apache.org/jira/browse/PHOENIX-4227) - if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, - ConnectionQueryServicesImpl.this.getProps())) { - try { - metaConnection.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " - + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA); - } catch (NewerSchemaAlreadyExistsException e) { - // Older clients with appropriate perms may try getting a new connection - // This results in NewerSchemaAlreadyExistsException, so we can safely ignore it here - } catch (PhoenixIOException e) { - if (!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), AccessDeniedException.class))) { - // Ignore ADE - } else { - throw e; - } - } - } - } - - /** - * Upgrade the SYSCAT schema if required - * @param metaConnection - * @param currentServerSideTableTimeStamp - * @return Phoenix connection object - * @throws SQLException - * @throws IOException - * @throws TimeoutException - * @throws InterruptedException - */ - // Available for testing - protected PhoenixConnection upgradeSystemCatalogIfRequired(PhoenixConnection metaConnection, - long currentServerSideTableTimeStamp) throws SQLException, IOException, TimeoutException, InterruptedException { - String columnsToAdd = ""; - // This will occur if we have an older SYSTEM.CATALOG and we need to update it to - // include any new columns we've added. - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) { - // We know that we always need to add the STORE_NULLS column for 4.3 release - columnsToAdd = addColumn(columnsToAdd, PhoenixDatabaseMetaData.STORE_NULLS - + " " + PBoolean.INSTANCE.getSqlTypeName()); - try (Admin admin = getAdmin()) { - List localIndexTables = - admin.listTableDescriptors(Pattern - .compile(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + ".*")); - for (TableDescriptor table : localIndexTables) { - if (table.getValue(MetaDataUtil.PARENT_TABLE_KEY) == null - && table.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_NAME) != null) { - - table=TableDescriptorBuilder.newBuilder(table).setValue(Bytes.toBytes(MetaDataUtil.PARENT_TABLE_KEY), - Bytes.toBytes(MetaDataUtil.getLocalIndexUserTableName(table.getTableName().getNameAsString()))).build(); - // Explicitly disable, modify and enable the table to ensure - // co-location of data and index regions. If we just modify the - // table descriptor when online schema change enabled may reopen - // the region in same region server instead of following data region. - disableTable(admin, table.getTableName()); - admin.modifyTable(table); - admin.enableTable(table.getTableName()); - } - } - } - } - - // If the server side schema is before MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 then - // we need to add INDEX_TYPE and INDEX_DISABLE_TIMESTAMP columns too. - // TODO: Once https://issues.apache.org/jira/browse/PHOENIX-1614 is fixed, - // we should just have a ALTER TABLE ADD IF NOT EXISTS statement with all - // the column names that have been added to SYSTEM.CATALOG since 4.0. - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) { - columnsToAdd = addColumn(columnsToAdd, PhoenixDatabaseMetaData.INDEX_TYPE + " " - + PUnsignedTinyint.INSTANCE.getSqlTypeName() + ", " - + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " " - + PLong.INSTANCE.getSqlTypeName()); - } - - // If we have some new columns from 4.1-4.3 to add, add them now. - if (!columnsToAdd.isEmpty()) { - // Ugh..need to assign to another local variable to keep eclipse happy. - PhoenixConnection newMetaConnection = addColumnsIfNotExists(metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0, columnsToAdd); - metaConnection = newMetaConnection; - } - - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0) { - columnsToAdd = PhoenixDatabaseMetaData.BASE_COLUMN_COUNT + " " - + PInteger.INSTANCE.getSqlTypeName(); - try { - metaConnection = addColumn(metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0, columnsToAdd, - false); - upgradeTo4_5_0(metaConnection); - } catch (ColumnAlreadyExistsException ignored) { - /* - * Upgrade to 4.5 is a slightly special case. We use the fact that the - * column BASE_COLUMN_COUNT is already part of the meta-data schema as the - * signal that the server side upgrade has finished or is in progress. - */ - LOGGER.debug("No need to run 4.5 upgrade"); - } - Properties p = PropertiesUtil.deepCopy(metaConnection.getClientInfo()); - p.remove(PhoenixRuntime.CURRENT_SCN_ATTRIB); - p.remove(PhoenixRuntime.TENANT_ID_ATTRIB); - PhoenixConnection conn = new PhoenixConnection( - ConnectionQueryServicesImpl.this, metaConnection.getURL(), p); - try { - List tablesNeedingUpgrade = UpgradeUtil - .getPhysicalTablesWithDescRowKey(conn); - if (!tablesNeedingUpgrade.isEmpty()) { - LOGGER.warn("The following tables require upgrade due to a bug " + - "causing the row key to be incorrect for descending columns " + - "and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n" - + Joiner.on(' ').join(tablesNeedingUpgrade) - + "\nTo upgrade issue the \"bin/psql.py -u\" command."); - } - List unsupportedTables = UpgradeUtil - .getPhysicalTablesWithDescVarbinaryRowKey(conn); - if (!unsupportedTables.isEmpty()) { - LOGGER.warn("The following tables use an unsupported " + - "VARBINARY DESC construct and need to be changed:\n" - + Joiner.on(' ').join(unsupportedTables)); - } - } catch (Exception ex) { - LOGGER.error( - "Unable to determine tables requiring upgrade due to PHOENIX-2067", - ex); - } finally { - conn.close(); - } - } - // Add these columns one at a time so that if folks have run the upgrade code - // already for a snapshot, we'll still enter this block (and do the parts we - // haven't yet done). - // Add each column with different timestamp else the code assumes that the - // table is already modified at that timestamp resulting in not updating the - // second column with same timestamp - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0) { - columnsToAdd = PhoenixDatabaseMetaData.IS_ROW_TIMESTAMP + " " - + PBoolean.INSTANCE.getSqlTypeName(); - metaConnection = addColumnsIfNotExists(metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_6_0, columnsToAdd); - } - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) { - // Drop old stats table so that new stats table is created - metaConnection = dropStatsTable(metaConnection, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 4); - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 3, - PhoenixDatabaseMetaData.TRANSACTIONAL + " " - + PBoolean.INSTANCE.getSqlTypeName()); - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 2, - PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + " " - + PLong.INSTANCE.getSqlTypeName()); - metaConnection = setImmutableTableIndexesImmutable(metaConnection, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 - 1); - metaConnection = updateSystemCatalogTimestamp(metaConnection, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0); - ConnectionQueryServicesImpl.this.removeTable(null, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0); - clearCache(); - } - - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0) { - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 - 2, - PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED + " " - + PBoolean.INSTANCE.getSqlTypeName()); - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0 - 1, - PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ + " " - + PVarchar.INSTANCE.getSqlTypeName()); - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0, - PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA + " " - + PBoolean.INSTANCE.getSqlTypeName()); - metaConnection = UpgradeUtil.disableViewIndexes(metaConnection); - if (getProps().getBoolean(QueryServices.LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB, - QueryServicesOptions.DEFAULT_LOCAL_INDEX_CLIENT_UPGRADE)) { - localIndexUpgradeRequired = true; - } - ConnectionQueryServicesImpl.this.removeTable(null, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_8_0); - clearCache(); - } - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0) { - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0, - PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH + " " - + PLong.INSTANCE.getSqlTypeName()); - ConnectionQueryServicesImpl.this.removeTable(null, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0); - clearCache(); - } - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0) { - metaConnection = addColumnQualifierColumn(metaConnection, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 - 3); - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 - 2, - PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME + " " - + PTinyint.INSTANCE.getSqlTypeName()); - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 - 1, - PhoenixDatabaseMetaData.ENCODING_SCHEME + " " - + PTinyint.INSTANCE.getSqlTypeName()); - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0, - PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER + " " - + PInteger.INSTANCE.getSqlTypeName()); - ConnectionQueryServicesImpl.this.removeTable(null, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0); - clearCache(); - } - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0) { - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0, - PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION + " " - + PBoolean.INSTANCE.getSqlTypeName()); - addParentToChildLinks(metaConnection); - } - if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0) { - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0, - PhoenixDatabaseMetaData.TRANSACTION_PROVIDER + " " - + PTinyint.INSTANCE.getSqlTypeName()); - try (Statement altQry = metaConnection.createStatement()) { - altQry.executeUpdate("ALTER TABLE " - + PhoenixDatabaseMetaData.SYSTEM_CATALOG + " SET " - + HConstants.VERSIONS + "= " - + props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB, QueryServicesOptions - .DEFAULT_SYSTEM_MAX_VERSIONS) + ",\n" - + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" - + props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB, - QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)); - - altQry.executeUpdate("ALTER TABLE " - + PhoenixDatabaseMetaData.SYSTEM_FUNCTION + " SET " - + TableDescriptorBuilder.SPLIT_POLICY + "='" - + QueryConstants.SYSTEM_FUNCTION_SPLIT_POLICY_CLASSNAME + "',\n" - + HConstants.VERSIONS + "= " - + props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB, QueryServicesOptions - .DEFAULT_SYSTEM_MAX_VERSIONS) + ",\n" - + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" - + props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB, - QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)); - - altQry.executeUpdate("ALTER TABLE " - + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " SET " - + TableDescriptorBuilder.SPLIT_POLICY + "='" - + QueryConstants.SYSTEM_STATS_SPLIT_POLICY_CLASSNAME + "'"); - } - } - if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0) { - addViewIndexToParentLinks(metaConnection); - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0, - PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE + " " - + PInteger.INSTANCE.getSqlTypeName()); - } - if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0) { - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 - 3, - PhoenixDatabaseMetaData.PHOENIX_TTL + " " - + PInteger.INSTANCE.getSqlTypeName()); - metaConnection = addColumnsIfNotExists( - metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 - 2, - PhoenixDatabaseMetaData.PHOENIX_TTL_HWM + " " - + PInteger.INSTANCE.getSqlTypeName()); - metaConnection = addColumnsIfNotExists(metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 -1, - PhoenixDatabaseMetaData.LAST_DDL_TIMESTAMP + " " - + PLong.INSTANCE.getSqlTypeName()); - metaConnection = addColumnsIfNotExists(metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0, - PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED - + " " + PBoolean.INSTANCE.getSqlTypeName()); - UpgradeUtil.bootstrapLastDDLTimestampForTablesAndViews(metaConnection); - - boolean isNamespaceMapping = - SchemaUtil.isNamespaceMappingEnabled(null, getConfiguration()); - String tableName = PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME; - if (isNamespaceMapping) { - tableName = tableName.replace( - QueryConstants.NAME_SEPARATOR, - QueryConstants.NAMESPACE_SEPARATOR); - } - byte[] tableBytes = StringUtil.toBytes(tableName); - byte[] rowKey = SchemaUtil.getColumnKey(null, - QueryConstants.SYSTEM_SCHEMA_NAME, - SYSTEM_CATALOG_TABLE, VIEW_INDEX_ID, - PhoenixDatabaseMetaData.TABLE_FAMILY); - if (UpgradeUtil.isUpdateViewIndexIdColumnDataTypeFromShortToLongNeeded - (metaConnection, rowKey, tableBytes)) { - LOGGER.info("Updating VIEW_INDEX_ID data type to BIGINT."); - UpgradeUtil.updateViewIndexIdColumnDataTypeFromShortToLong( - metaConnection, rowKey, tableBytes); - } else { - LOGGER.info("Updating VIEW_INDEX_ID data type is not needed."); - } - try (Admin admin = metaConnection.getQueryServices().getAdmin()) { - TableDescriptorBuilder tdBuilder; - TableName sysCatPhysicalTableName = SchemaUtil.getPhysicalTableName( - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, props); - tdBuilder = TableDescriptorBuilder.newBuilder( - admin.getDescriptor(sysCatPhysicalTableName)); - if (!tdBuilder.build().hasCoprocessor( - QueryConstants.SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME)) { - int priority = props.getInt( - QueryServices.COPROCESSOR_PRIORITY_ATTRIB, - QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY); - tdBuilder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - admin.modifyTable(tdBuilder.build()); - pollForUpdatedTableDescriptor(admin, tdBuilder.build(), - sysCatPhysicalTableName.getName()); - } - } - } - if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0) { - metaConnection = - addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 8, - PhoenixDatabaseMetaData.PHYSICAL_TABLE_NAME + " " - + PVarchar.INSTANCE.getSqlTypeName()); - metaConnection = - addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 7, - PhoenixDatabaseMetaData.SCHEMA_VERSION + " " - + PVarchar.INSTANCE.getSqlTypeName()); - metaConnection = - addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 6, - PhoenixDatabaseMetaData.EXTERNAL_SCHEMA_ID + " " - + PVarchar.INSTANCE.getSqlTypeName()); - metaConnection = - addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 5, - PhoenixDatabaseMetaData.STREAMING_TOPIC_NAME + " " - + PVarchar.INSTANCE.getSqlTypeName()); - metaConnection = - addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 4, - PhoenixDatabaseMetaData.INDEX_WHERE + " " - + PVarchar.INSTANCE.getSqlTypeName()); - metaConnection = - addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 3, - PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE + " " - + PLong.INSTANCE.getSqlTypeName()); - metaConnection = - addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 2, - PhoenixDatabaseMetaData.CDC_INCLUDE_TABLE + " " - + PVarchar.INSTANCE.getSqlTypeName()); - - /** - * TODO: Provide a path to copy existing data from PHOENIX_TTL to TTL column and then - * to DROP PHOENIX_TTL Column. See PHOENIX-7023 - */ - metaConnection = addColumnsIfNotExists(metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0 - 1, - PhoenixDatabaseMetaData.TTL + " " + PInteger.INSTANCE.getSqlTypeName()); - metaConnection = addColumnsIfNotExists(metaConnection, - PhoenixDatabaseMetaData.SYSTEM_CATALOG, MIN_SYSTEM_TABLE_TIMESTAMP_5_3_0, - PhoenixDatabaseMetaData.ROW_KEY_MATCHER + " " - + PVarbinary.INSTANCE.getSqlTypeName()); - //Values in PHOENIX_TTL column will not be used for further release as PHOENIX_TTL column is being deprecated - //and will be removed in later release. To copy copyDataFromPhoenixTTLtoTTL(metaConnection) can be used but - //as that feature was not fully built we are not moving old value to new column - - //move TTL values stored in descriptor to SYSCAT TTL column. - moveTTLFromHBaseLevelTTLToPhoenixLevelTTL(metaConnection); - UpgradeUtil.bootstrapLastDDLTimestampForIndexes(metaConnection); - } - return metaConnection; - } - - /** - * There is no other locking needed here since only one connection (on the same or different JVM) will be able to - * acquire the upgrade mutex via {@link #acquireUpgradeMutex(long)} . - */ - @Override - public void upgradeSystemTables(final String url, final Properties props) throws SQLException { - PhoenixConnection metaConnection = null; - boolean success = false; - final Map systemTableToSnapshotMap = new HashMap<>(); - String sysCatalogTableName = null; - SQLException toThrow = null; - boolean acquiredMutexLock = false; - boolean moveChildLinks = false; - boolean syncAllTableAndIndexProps = false; - try { - if (!isUpgradeRequired()) { - throw new UpgradeNotRequiredException(); - } - Properties scnProps = PropertiesUtil.deepCopy(props); - scnProps.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, - Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP)); - scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB); - String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB); - metaConnection = new PhoenixConnection(ConnectionQueryServicesImpl.this, globalUrl, - scnProps); - metaConnection.setRunningUpgrade(true); - - // Always try to create SYSTEM.MUTEX table first since we need it to acquire the - // upgrade mutex. Upgrade or migration is not possible without the upgrade mutex - try (Admin admin = getAdmin()) { - createSysMutexTableIfNotExists(admin); - } - UpgradeRequiredException caughtUpgradeRequiredException = null; - TableAlreadyExistsException caughtTableAlreadyExistsException = null; - try { - metaConnection.createStatement().executeUpdate(getSystemCatalogTableDDL()); - } catch (NewerTableAlreadyExistsException ignore) { - // Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed - // timestamp. A TableAlreadyExistsException is not thrown, since the table only exists - // *after* this fixed timestamp. - } catch (UpgradeRequiredException e) { - // This is thrown while trying to create SYSTEM:CATALOG to indicate that we must - // migrate SYSTEM tables to the SYSTEM namespace and/or upgrade SYSCAT if required - caughtUpgradeRequiredException = e; - } catch (TableAlreadyExistsException e) { - caughtTableAlreadyExistsException = e; - } - - if (caughtUpgradeRequiredException != null - || caughtTableAlreadyExistsException != null) { - long currentServerSideTableTimeStamp; - if (caughtUpgradeRequiredException != null) { - currentServerSideTableTimeStamp = - caughtUpgradeRequiredException.getSystemCatalogTimeStamp(); - } else { - currentServerSideTableTimeStamp = - caughtTableAlreadyExistsException.getTable().getTimeStamp(); - } - - ReadOnlyProps readOnlyProps = metaConnection.getQueryServices().getProps(); - String skipUpgradeBlock = readOnlyProps.get(SKIP_UPGRADE_BLOCK_CHECK); - - if (skipUpgradeBlock == null || !Boolean.valueOf(skipUpgradeBlock)) { - checkUpgradeBlockMutex(); - } - - acquiredMutexLock = acquireUpgradeMutex( - MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP); - LOGGER.debug( - "Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM " - + "namespace and/or upgrading " + sysCatalogTableName); - String snapshotName = getSysTableSnapshotName(currentServerSideTableTimeStamp, - SYSTEM_CATALOG_NAME); - createSnapshot(snapshotName, SYSTEM_CATALOG_NAME); - systemTableToSnapshotMap.put(SYSTEM_CATALOG_NAME, snapshotName); - LOGGER.info("Created snapshot {} for {}", snapshotName, SYSTEM_CATALOG_NAME); - - // Snapshot qualifiers may only contain 'alphanumeric characters' and - // digits, hence : cannot be part of snapshot name - String mappedSnapshotName = getSysTableSnapshotName(currentServerSideTableTimeStamp, - "MAPPED." + SYSTEM_CATALOG_NAME); - createSnapshot(mappedSnapshotName, MAPPED_SYSTEM_CATALOG_NAME); - systemTableToSnapshotMap.put(MAPPED_SYSTEM_CATALOG_NAME, mappedSnapshotName); - LOGGER.info("Created snapshot {} for {}", - mappedSnapshotName, MAPPED_SYSTEM_CATALOG_NAME); - - if (caughtUpgradeRequiredException != null) { - if (SchemaUtil.isNamespaceMappingEnabled( - PTableType.SYSTEM, ConnectionQueryServicesImpl.this.getProps())) { - // If SYSTEM tables exist, they are migrated to HBase SYSTEM namespace - // If they don't exist or they're already migrated, this method will return - //immediately - ensureSystemTablesMigratedToSystemNamespace(); - LOGGER.debug("Migrated SYSTEM tables to SYSTEM namespace"); - } - } - - metaConnection = upgradeSystemCatalogIfRequired(metaConnection, currentServerSideTableTimeStamp); - if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0) { - moveChildLinks = true; - syncAllTableAndIndexProps = true; - } - if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0) { - //Combine view index id sequences for the same physical view index table - //to avoid collisions. See PHOENIX-5132 and PHOENIX-5138 - try (PhoenixConnection conn = new PhoenixConnection( - ConnectionQueryServicesImpl.this, globalUrl, - props)) { - UpgradeUtil.mergeViewIndexIdSequences(metaConnection); - } catch (Exception mergeViewIndeIdException) { - LOGGER.warn("Merge view index id sequence failed! If possible, " + - "please run MergeViewIndexIdSequencesTool to avoid view index" + - "id collision. Error: " + mergeViewIndeIdException.getMessage()); - } - } - } - - // pass systemTableToSnapshotMap to capture more system table to - // snapshot entries - metaConnection = upgradeOtherSystemTablesIfRequired(metaConnection, - moveChildLinks, systemTableToSnapshotMap); - - // Once the system tables are upgraded the local index upgrade can be done - if (localIndexUpgradeRequired) { - LOGGER.info("Upgrading local indexes"); - metaConnection = UpgradeUtil.upgradeLocalIndexes(metaConnection); - } - - // Synchronize necessary properties amongst all column families of a base table - // and its indexes. See PHOENIX-3955 - if (syncAllTableAndIndexProps) { - syncTableAndIndexProperties(metaConnection); - } - - // In case namespace mapping is enabled and system table to system namespace mapping is also enabled, - // create an entry for the SYSTEM namespace in the SYSCAT table, so that GRANT/REVOKE commands can work - // with SYSTEM Namespace - createSchemaIfNotExistsSystemNSMappingEnabled(metaConnection); - - clearUpgradeRequired(); - success = true; - } catch (UpgradeInProgressException | UpgradeNotRequiredException e) { - // don't set it as initializationException because otherwise client won't be able to retry - throw e; - } catch (Exception e) { - if (e instanceof SQLException) { - toThrow = (SQLException)e; - } else { - // wrap every other exception into a SQLException - toThrow = new SQLException(e); - } - } finally { - try { - if (metaConnection != null) { - metaConnection.close(); - } - } catch (SQLException e) { - if (toThrow != null) { - toThrow.setNextException(e); - } else { - toThrow = e; - } - } finally { - if (!success) { - LOGGER.warn("Failed upgrading System tables. " + - "Snapshots for system tables created so far: {}", - systemTableToSnapshotMap); - } - if (acquiredMutexLock) { - try { - releaseUpgradeMutex(); - } catch (IOException e) { - LOGGER.warn("Release of upgrade mutex failed ", e); - } - } - if (toThrow != null) { - throw toThrow; - } - } - } - } - - /** - * Create or upgrade SYSTEM tables other than SYSTEM.CATALOG - * @param metaConnection Phoenix connection - * @param moveChildLinks true if we need to move child links from SYSTEM.CATALOG to - * SYSTEM.CHILD_LINK - * @param systemTableToSnapshotMap table to snapshot map which can be - * where new entries of system table to it's corresponding created - * snapshot is added - * @return Phoenix connection - * @throws SQLException thrown by underlying upgrade system methods - * @throws IOException thrown by underlying upgrade system methods - */ - private PhoenixConnection upgradeOtherSystemTablesIfRequired( - PhoenixConnection metaConnection, boolean moveChildLinks, - Map systemTableToSnapshotMap) - throws SQLException, IOException { - // if we are really going to perform upgrades of other system tables, - // by this point we would have already taken mutex lock, hence - // we can proceed with creation of snapshots and add table to - // snapshot entries in systemTableToSnapshotMap - metaConnection = upgradeSystemSequence(metaConnection, - systemTableToSnapshotMap); - metaConnection = upgradeSystemStats(metaConnection, - systemTableToSnapshotMap); - metaConnection = upgradeSystemTask(metaConnection, - systemTableToSnapshotMap); - metaConnection = upgradeSystemFunction(metaConnection); - metaConnection = upgradeSystemTransform(metaConnection, systemTableToSnapshotMap); - metaConnection = upgradeSystemLog(metaConnection, systemTableToSnapshotMap); - metaConnection = upgradeSystemMutex(metaConnection); - - // As this is where the most time will be spent during an upgrade, - // especially when there are large number of views. - // Upgrade the SYSTEM.CHILD_LINK towards the end, - // so that any failures here can be handled/continued out of band. - metaConnection = upgradeSystemChildLink(metaConnection, moveChildLinks, - systemTableToSnapshotMap); - return metaConnection; - } - - private PhoenixConnection upgradeSystemChildLink( - PhoenixConnection metaConnection, boolean moveChildLinks, - Map systemTableToSnapshotMap) throws SQLException, IOException { - try (Statement statement = metaConnection.createStatement()) { - statement.executeUpdate(getChildLinkDDL()); - } catch (TableAlreadyExistsException e) { - takeSnapshotOfSysTable(systemTableToSnapshotMap, e); - } - if (moveChildLinks) { - // Increase the timeouts so that the scan queries during moveOrCopyChildLinks do not timeout on large syscat's - Map options = new HashMap<>(); - options.put(HConstants.HBASE_RPC_TIMEOUT_KEY, Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); - options.put(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); - moveOrCopyChildLinks(metaConnection, options); - } - return metaConnection; - } - - @VisibleForTesting - public PhoenixConnection upgradeSystemSequence( - PhoenixConnection metaConnection, - Map systemTableToSnapshotMap) throws SQLException, IOException { - try (Statement statement = metaConnection.createStatement()) { - String createSequenceTable = getSystemSequenceTableDDL(nSequenceSaltBuckets); - statement.executeUpdate(createSequenceTable); - } catch (NewerTableAlreadyExistsException e) { - // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed - // timestamp. - // A TableAlreadyExistsException is not thrown, since the table only exists *after* this - // fixed timestamp. - nSequenceSaltBuckets = getSaltBuckets(e); - } catch (TableAlreadyExistsException e) { - // take snapshot first - takeSnapshotOfSysTable(systemTableToSnapshotMap, e); - - // This will occur if we have an older SYSTEM.SEQUENCE and we need to update it to - // include - // any new columns we've added. - long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); - if (currentServerSideTableTimeStamp < - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) { - // If the table time stamp is before 4.1.0 then we need to add below columns - // to the SYSTEM.SEQUENCE table. - String columnsToAdd = PhoenixDatabaseMetaData.MIN_VALUE + " " - + PLong.INSTANCE.getSqlTypeName() + ", " - + PhoenixDatabaseMetaData.MAX_VALUE + " " - + PLong.INSTANCE.getSqlTypeName() + ", " - + PhoenixDatabaseMetaData.CYCLE_FLAG + " " - + PBoolean.INSTANCE.getSqlTypeName() + ", " - + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG + " " - + PBoolean.INSTANCE.getSqlTypeName(); - addColumnsIfNotExists(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd); - } - // If the table timestamp is before 4.2.1 then run the upgrade script - if (currentServerSideTableTimeStamp < - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1) { - if (UpgradeUtil.upgradeSequenceTable(metaConnection, nSequenceSaltBuckets, - e.getTable())) { - metaConnection.removeTable(null, - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP); - clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY, - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA_BYTES, - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE_BYTES, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP); - clearTableRegionCache(TableName.valueOf( - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES)); - } - } else { - nSequenceSaltBuckets = getSaltBuckets(e); - } - updateSystemSequenceWithCacheOnWriteProps(metaConnection); - } - return metaConnection; - } - - private void updateSystemSequenceWithCacheOnWriteProps(PhoenixConnection metaConnection) throws - IOException, SQLException { - - try (Admin admin = getAdmin()) { - TableDescriptor oldTD = admin.getDescriptor( - SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME, - metaConnection.getQueryServices().getProps())); - ColumnFamilyDescriptor oldCf = oldTD.getColumnFamily( - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); - - // If the CacheOnWrite related properties are not set, lets set them. - if (!oldCf.isCacheBloomsOnWrite() || !oldCf.isCacheDataOnWrite() - || !oldCf.isCacheIndexesOnWrite()) { - ColumnFamilyDescriptorBuilder newCFBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(oldCf); - newCFBuilder.setCacheBloomsOnWrite(true); - newCFBuilder.setCacheDataOnWrite(true); - newCFBuilder.setCacheIndexesOnWrite(true); - - TableDescriptorBuilder newTD = TableDescriptorBuilder.newBuilder(oldTD); - newTD.modifyColumnFamily(newCFBuilder.build()); - admin.modifyTable(newTD.build()); - } - } - } - - private void takeSnapshotOfSysTable( - Map systemTableToSnapshotMap, - TableAlreadyExistsException e) throws SQLException { - long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); - String tableName = e.getTable().getPhysicalName().getString(); - String snapshotName = getSysTableSnapshotName( - currentServerSideTableTimeStamp, tableName); - // Snapshot qualifiers may only contain 'alphanumeric characters' and - // digits, hence : cannot be part of snapshot name - if (snapshotName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - snapshotName = getSysTableSnapshotName( - currentServerSideTableTimeStamp, "MAPPED." + tableName). - replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); - } - createSnapshot(snapshotName, tableName); - systemTableToSnapshotMap.put(tableName, snapshotName); - LOGGER.info("Snapshot {} created for table {}", snapshotName, - tableName); - } - - @VisibleForTesting - public PhoenixConnection upgradeSystemStats( - PhoenixConnection metaConnection, - Map systemTableToSnapshotMap) throws - SQLException, org.apache.hadoop.hbase.TableNotFoundException, IOException { - try (Statement statement = metaConnection.createStatement()) { - statement.executeUpdate(QueryConstants.CREATE_STATS_TABLE_METADATA); - } catch (NewerTableAlreadyExistsException ignored) { - - } catch (TableAlreadyExistsException e) { - // take snapshot first - takeSnapshotOfSysTable(systemTableToSnapshotMap, e); - long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); - if (currentServerSideTableTimeStamp < - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) { - metaConnection = addColumnsIfNotExists( - metaConnection, - SYSTEM_STATS_NAME, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, - PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT + " " - + PLong.INSTANCE.getSqlTypeName()); - } - if (currentServerSideTableTimeStamp < - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0) { - // The COLUMN_FAMILY column should be nullable as we create a row in it without - // any column family to mark when guideposts were last collected. - metaConnection = removeNotNullConstraint(metaConnection, - SYSTEM_SCHEMA_NAME, - PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0, - PhoenixDatabaseMetaData.COLUMN_FAMILY); - ConnectionQueryServicesImpl.this.removeTable(null, - PhoenixDatabaseMetaData.SYSTEM_STATS_NAME, null, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0); - clearCache(); - } - if (UpgradeUtil.tableHasKeepDeleted( - metaConnection, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)) { - try (Statement altStmt = metaConnection.createStatement()) { - altStmt.executeUpdate("ALTER TABLE " - + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " SET " - + KEEP_DELETED_CELLS + "='" + KeepDeletedCells.FALSE + "'"); - } - } - if (UpgradeUtil.tableHasMaxVersions( - metaConnection, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)) { - try (Statement altStats = metaConnection.createStatement()) { - altStats.executeUpdate("ALTER TABLE " - + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " SET " - + HConstants.VERSIONS + " = '1' "); - } - } - } - return metaConnection; - } - - private PhoenixConnection upgradeSystemTask( - PhoenixConnection metaConnection, - Map systemTableToSnapshotMap) - throws SQLException, IOException { - try (Statement statement = metaConnection.createStatement()) { - statement.executeUpdate(getTaskDDL()); - } catch (NewerTableAlreadyExistsException ignored) { - - } catch (TableAlreadyExistsException e) { - // take snapshot first - takeSnapshotOfSysTable(systemTableToSnapshotMap, e); - long currentServerSideTableTimeStamp = e.getTable().getTimeStamp(); - if (currentServerSideTableTimeStamp <= - MIN_SYSTEM_TABLE_TIMESTAMP_4_15_0) { - String columnsToAdd = - PhoenixDatabaseMetaData.TASK_STATUS + " " + - PVarchar.INSTANCE.getSqlTypeName() + ", " - + PhoenixDatabaseMetaData.TASK_END_TS + " " + - PTimestamp.INSTANCE.getSqlTypeName() + ", " - + PhoenixDatabaseMetaData.TASK_PRIORITY + " " + - PUnsignedTinyint.INSTANCE.getSqlTypeName() + ", " - + PhoenixDatabaseMetaData.TASK_DATA + " " + - PVarchar.INSTANCE.getSqlTypeName(); - String taskTableFullName = SchemaUtil.getTableName(SYSTEM_CATALOG_SCHEMA, - SYSTEM_TASK_TABLE); - metaConnection = - addColumnsIfNotExists(metaConnection, taskTableFullName, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd); - String altQuery = String.format(ALTER_TABLE_SET_PROPS, - taskTableFullName, TTL, TASK_TABLE_TTL); - try (PreparedStatement altQueryStmt = metaConnection.prepareStatement(altQuery)) { - altQueryStmt.executeUpdate(); - } - clearCache(); - } - // If SYSTEM.TASK does not have disabled regions split policy, - // set it up here while upgrading it - try (Admin admin = metaConnection.getQueryServices().getAdmin()) { - TableDescriptor td; - TableName tableName = SchemaUtil.getPhysicalTableName( - PhoenixDatabaseMetaData.SYSTEM_TASK_NAME, props); - td = admin.getDescriptor(tableName); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(td); - boolean isTableDescUpdated = false; - if (updateAndConfirmSplitPolicyForTask( - tableDescriptorBuilder)) { - isTableDescUpdated = true; - } - if (!tableDescriptorBuilder.build().hasCoprocessor( - QueryConstants.TASK_META_DATA_ENDPOINT_CLASSNAME)) { - int priority = props.getInt( - QueryServices.COPROCESSOR_PRIORITY_ATTRIB, - QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY); - tableDescriptorBuilder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(QueryConstants.TASK_META_DATA_ENDPOINT_CLASSNAME) - .setPriority(priority) - .setProperties(Collections.emptyMap()) - .build()); - isTableDescUpdated=true; - } - if (isTableDescUpdated) { - admin.modifyTable(tableDescriptorBuilder.build()); - pollForUpdatedTableDescriptor(admin, - tableDescriptorBuilder.build(), tableName.getName()); - } - } catch (InterruptedException | TimeoutException ite) { - throw new SQLException(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME - + " Upgrade is not confirmed"); - } - } - return metaConnection; - } - - private PhoenixConnection upgradeSystemTransform( - PhoenixConnection metaConnection, - Map systemTableToSnapshotMap) - throws SQLException { - try (Statement statement = metaConnection.createStatement()) { - statement.executeUpdate(getTransformDDL()); - } catch (TableAlreadyExistsException ignored) { - - } - return metaConnection; - } - - private PhoenixConnection upgradeSystemFunction(PhoenixConnection metaConnection) - throws SQLException { - try { - metaConnection.createStatement().executeUpdate(getFunctionTableDDL()); - } catch (TableAlreadyExistsException ignored) { - // Since we are not performing any action as part of upgrading - // SYSTEM.FUNCTION, we don't need to take snapshot as of this - // writing. However, if need arises to perform significant - // update, we should take snapshot just like other system tables. - // e.g usages of takeSnapshotOfSysTable() - } - return metaConnection; - } - - @VisibleForTesting - public PhoenixConnection upgradeSystemLog(PhoenixConnection metaConnection, - Map systemTableToSnapshotMap) - throws SQLException, org.apache.hadoop.hbase.TableNotFoundException, IOException { - try (Statement statement = metaConnection.createStatement()) { - statement.executeUpdate(getLogTableDDL()); - } catch (NewerTableAlreadyExistsException ignored) { - } catch (TableAlreadyExistsException e) { - // take snapshot first - takeSnapshotOfSysTable(systemTableToSnapshotMap, e); - if (UpgradeUtil.tableHasKeepDeleted( - metaConnection, PhoenixDatabaseMetaData.SYSTEM_LOG_NAME) ) { - try (Statement altLogStmt = metaConnection.createStatement()) { - altLogStmt.executeUpdate("ALTER TABLE " - + PhoenixDatabaseMetaData.SYSTEM_LOG_NAME + " SET " - + KEEP_DELETED_CELLS + "='" + KeepDeletedCells.FALSE + "'"); - } - } - if (UpgradeUtil.tableHasMaxVersions( - metaConnection, PhoenixDatabaseMetaData.SYSTEM_LOG_NAME)) { - try (Statement altLogVer = metaConnection.createStatement()) { - altLogVer.executeUpdate("ALTER TABLE " - + PhoenixDatabaseMetaData.SYSTEM_LOG_NAME + " SET " - + HConstants.VERSIONS + "='1'"); - } - } - } - return metaConnection; - } - - private PhoenixConnection upgradeSystemMutex(PhoenixConnection metaConnection) - throws SQLException { - try { - metaConnection.createStatement().executeUpdate(getMutexDDL()); - } catch (TableAlreadyExistsException ignored) { - // Since we are not performing any action as part of upgrading - // SYSTEM.MUTEX, we don't need to take snapshot as of this - // writing. However, if need arises to perform significant - // update, we should take snapshot just like other system tables. - // e.g usages of takeSnapshotOfSysTable() - } - return metaConnection; - } - - - // Special method for adding the column qualifier column for 4.10. - private PhoenixConnection addColumnQualifierColumn(PhoenixConnection oldMetaConnection, Long timestamp) throws SQLException { - Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); - // Cannot go through DriverManager or you end up in an infinite loop because it'll call init again - PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); - metaConnection.setAutoCommit(false); - PTable sysCatalogPTable = metaConnection.getTable(SYSTEM_CATALOG_NAME); - int numColumns = sysCatalogPTable.getColumns().size(); - try (PreparedStatement mutateTable = metaConnection.prepareStatement(MetaDataClient.MUTATE_TABLE)) { - mutateTable.setString(1, null); - mutateTable.setString(2, SYSTEM_CATALOG_SCHEMA); - mutateTable.setString(3, SYSTEM_CATALOG_TABLE); - mutateTable.setString(4, PTableType.SYSTEM.getSerializedValue()); - mutateTable.setLong(5, sysCatalogPTable.getSequenceNumber() + 1); - mutateTable.setInt(6, numColumns + 1); - mutateTable.execute(); - } - List tableMetadata = new ArrayList<>( - metaConnection.getMutationState().toMutations(metaConnection.getSCN()).next() - .getSecond()); - metaConnection.rollback(); - PColumn column = new PColumnImpl(PNameFactory.newName("COLUMN_QUALIFIER"), - PNameFactory.newName(DEFAULT_COLUMN_FAMILY_NAME), PVarbinary.INSTANCE, null, null, true, numColumns, - SortOrder.ASC, null, null, false, null, false, false, - Bytes.toBytes("COLUMN_QUALIFIER"), timestamp); - String upsertColumnMetadata = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_NAME + "," + - COLUMN_FAMILY + "," + - DATA_TYPE + "," + - NULLABLE + "," + - COLUMN_SIZE + "," + - DECIMAL_DIGITS + "," + - ORDINAL_POSITION + "," + - SORT_ORDER + "," + - DATA_TABLE_NAME + "," + - ARRAY_SIZE + "," + - VIEW_CONSTANT + "," + - IS_VIEW_REFERENCED + "," + - PK_NAME + "," + - KEY_SEQ + "," + - COLUMN_DEF + "," + - IS_ROW_TIMESTAMP + - ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - try (PreparedStatement colUpsert = metaConnection.prepareStatement(upsertColumnMetadata)) { - colUpsert.setString(1, null); - colUpsert.setString(2, SYSTEM_CATALOG_SCHEMA); - colUpsert.setString(3, SYSTEM_CATALOG_TABLE); - colUpsert.setString(4, "COLUMN_QUALIFIER"); - colUpsert.setString(5, DEFAULT_COLUMN_FAMILY); - colUpsert.setInt(6, column.getDataType().getSqlType()); - colUpsert.setInt(7, ResultSetMetaData.columnNullable); - colUpsert.setNull(8, Types.INTEGER); - colUpsert.setNull(9, Types.INTEGER); - colUpsert.setInt(10, sysCatalogPTable.getBucketNum() != null ? numColumns : (numColumns + 1)); - colUpsert.setInt(11, SortOrder.ASC.getSystemValue()); - colUpsert.setString(12, null); - colUpsert.setNull(13, Types.INTEGER); - colUpsert.setBytes(14, null); - colUpsert.setBoolean(15, false); - colUpsert.setString(16, sysCatalogPTable.getPKName() == null ? null : sysCatalogPTable.getPKName().getString()); - colUpsert.setNull(17, Types.SMALLINT); - colUpsert.setNull(18, Types.VARCHAR); - colUpsert.setBoolean(19, false); - colUpsert.execute(); - } - tableMetadata.addAll(metaConnection.getMutationState().toMutations(metaConnection.getSCN()).next().getSecond()); - metaConnection.rollback(); - metaConnection.getQueryServices().addColumn(tableMetadata, sysCatalogPTable, null,null, Collections.>>emptyMap(), Collections.emptySet(), Lists.newArrayList(column)); - metaConnection.removeTable(null, SYSTEM_CATALOG_NAME, null, timestamp); - ConnectionQueryServicesImpl.this.removeTable(null, - SYSTEM_CATALOG_NAME, null, - timestamp); - clearCache(); - return metaConnection; - } - - private void deleteSnapshot(String snapshotName) - throws SQLException, IOException { - try (Admin admin = getAdmin()) { - admin.deleteSnapshot(snapshotName); - LOGGER.info("Snapshot {} is deleted", snapshotName); - } - } - - private void createSnapshot(String snapshotName, String tableName) - throws SQLException { - Admin admin = null; - SQLException sqlE = null; - try { - admin = getAdmin(); - admin.snapshot(snapshotName, TableName.valueOf(tableName)); - LOGGER.info("Successfully created snapshot " + snapshotName + " for " - + tableName); - } catch (SnapshotCreationException e) { - if (e.getMessage().contains("doesn't exist")) { - LOGGER.warn("Could not create snapshot {}, table is missing." + snapshotName, e); - } else { - sqlE = new SQLException(e); - } - } catch (Exception e) { - sqlE = new SQLException(e); - } finally { - try { - if (admin != null) { - admin.close(); - } - } catch (Exception e) { - SQLException adminCloseEx = new SQLException(e); - if (sqlE == null) { - sqlE = adminCloseEx; - } else { - sqlE.setNextException(adminCloseEx); - } - } finally { - if (sqlE != null) { - throw sqlE; - } - } - } - } - - void ensureSystemTablesMigratedToSystemNamespace() - throws SQLException, IOException, IllegalArgumentException, InterruptedException { - if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.getProps())) { return; } - - Table metatable = null; - try (Admin admin = getAdmin()) { - List tableNames = getSystemTableNamesInDefaultNamespace(admin); - // No tables exist matching "SYSTEM\..*", they are all already in "SYSTEM:.*" - if (tableNames.size() == 0) { return; } - // Try to move any remaining tables matching "SYSTEM\..*" into "SYSTEM:" - if (tableNames.size() > 9) { - LOGGER.warn("Expected 9 system tables but found " + tableNames.size() + ":" + tableNames); - } - - byte[] mappedSystemTable = SchemaUtil - .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName(); - metatable = getTable(mappedSystemTable); - if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) { - if (!AdminUtilWithFallback.tableExists(admin, - TableName.valueOf(mappedSystemTable))) { - LOGGER.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace."); - // Actual migration of SYSCAT table - UpgradeUtil.mapTableToNamespace(admin, metatable, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, this.getProps(), null, PTableType.SYSTEM, - null); - // Invalidate the client-side metadataCache - ConnectionQueryServicesImpl.this.removeTable(null, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0); - } - tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME); - } - for (TableName table : tableNames) { - LOGGER.info(String.format("Migrating %s table to SYSTEM namespace.", table.getNameAsString())); - UpgradeUtil.mapTableToNamespace(admin, metatable, table.getNameAsString(), this.getProps(), null, PTableType.SYSTEM, - null); - ConnectionQueryServicesImpl.this.removeTable(null, table.getNameAsString(), null, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0); - } - - // Clear the server-side metadataCache when all tables are migrated so that the new PTable can be loaded with NS mapping - clearCache(); - } finally { - if (metatable != null) { - metatable.close(); - } - } - } - - /** - * Acquire distributed mutex of sorts to make sure only one JVM is able to run the upgrade code by - * making use of HBase's checkAndPut api. - * - * @return true if client won the race, false otherwise - * @throws SQLException - */ - @VisibleForTesting - public boolean checkUpgradeBlockMutex() - throws SQLException { - try (Table sysMutexTable = getSysMutexTable()) { - final byte[] rowKey = Bytes.toBytes("BLOCK_UPGRADE"); - - Get get = new Get(rowKey).addColumn(SYSTEM_MUTEX_FAMILY_NAME_BYTES, SYSTEM_MUTEX_COLUMN_NAME_BYTES); - Result r = sysMutexTable.get(get); - - if (!r.isEmpty()) { - throw new UpgradeBlockedException(); - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - return true; - } - - /** - * Acquire distributed mutex of sorts to make sure only one JVM is able to run the upgrade code by - * making use of HBase's checkAndPut api. - * - * @return true if client won the race, false otherwise - * @throws SQLException - */ - @VisibleForTesting - public boolean acquireUpgradeMutex(long currentServerSideTableTimestamp) - throws SQLException { - Preconditions.checkArgument(currentServerSideTableTimestamp < MIN_SYSTEM_TABLE_TIMESTAMP); - if (!writeMutexCell(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, null, null)) { - throw new UpgradeInProgressException(getVersion(currentServerSideTableTimestamp), - getVersion(MIN_SYSTEM_TABLE_TIMESTAMP)); - } - return true; - } - - @Override - public boolean writeMutexCell(String tenantId, String schemaName, String tableName, - String columnName, String familyName) throws SQLException { - try { - byte[] rowKey = columnName != null - ? SchemaUtil.getColumnKey(tenantId, schemaName, tableName, - columnName, familyName) - : SchemaUtil.getTableKey(tenantId, schemaName, tableName); - // at this point the system mutex table should have been created or - // an exception thrown - try (Table sysMutexTable = getSysMutexTable()) { - Put put = new Put(rowKey); - put.addColumn(SYSTEM_MUTEX_FAMILY_NAME_BYTES, SYSTEM_MUTEX_COLUMN_NAME_BYTES, MUTEX_LOCKED); - CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(rowKey) - .ifNotExists(SYSTEM_MUTEX_FAMILY_NAME_BYTES, SYSTEM_MUTEX_COLUMN_NAME_BYTES) - .build(put); - boolean checkAndPut = - sysMutexTable.checkAndMutate(checkAndMutate).isSuccess(); - String processName = ManagementFactory.getRuntimeMXBean().getName(); - String msg = - " tenantId : " + tenantId + " schemaName : " + schemaName + " tableName : " - + tableName + " columnName : " + columnName + " familyName : " - + familyName; - if (!checkAndPut) { - LOGGER.error(processName + " failed to acquire mutex for "+ msg); - } - else { - LOGGER.debug(processName + " acquired mutex for "+ msg); - } - return checkAndPut; - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - } - - @VisibleForTesting - public void releaseUpgradeMutex() throws IOException, SQLException { - deleteMutexCell(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, null, null); - } - - - @Override - public void deleteMutexCell(String tenantId, String schemaName, String tableName, - String columnName, String familyName) throws SQLException { - try { - byte[] rowKey = columnName != null - ? SchemaUtil.getColumnKey(tenantId, schemaName, tableName, - columnName, familyName) - : SchemaUtil.getTableKey(tenantId, schemaName, tableName); - // at this point the system mutex table should have been created or - // an exception thrown - try (Table sysMutexTable = getSysMutexTable()) { - byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES; - byte[] qualifier = PhoenixDatabaseMetaData.SYSTEM_MUTEX_COLUMN_NAME_BYTES; - Delete delete = new Delete(rowKey); - delete.addColumn(family, qualifier); - sysMutexTable.delete(delete); - String processName = ManagementFactory.getRuntimeMXBean().getName(); - String msg = - " tenantId : " + tenantId + " schemaName : " + schemaName + " tableName : " - + tableName + " columnName : " + columnName + " familyName : " - + familyName; - LOGGER.debug(processName + " released mutex for "+ msg); - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - } - - @VisibleForTesting - public Table getSysMutexTable() throws SQLException { - String tableNameAsString = SYSTEM_MUTEX_NAME; - Table table; - try { - table = getTableIfExists(Bytes.toBytes(tableNameAsString)); - } catch (TableNotFoundException e) { - tableNameAsString = tableNameAsString.replace( - QueryConstants.NAME_SEPARATOR, - QueryConstants.NAMESPACE_SEPARATOR); - // if SYSTEM.MUTEX does not exist, we don't need to check - // for the existence of SYSTEM:MUTEX as it must exist, hence - // we can call getTable() here instead of getTableIfExists() - table = getTable(Bytes.toBytes(tableNameAsString)); - } - return table; - } - - private String addColumn(String columnsToAddSoFar, String columns) { - if (columnsToAddSoFar == null || columnsToAddSoFar.isEmpty()) { - return columns; - } else { - return columnsToAddSoFar + ", " + columns; - } - } - - /** - * Set IMMUTABLE_ROWS to true for all index tables over immutable tables. - * @param oldMetaConnection connection over which to run the upgrade - * @param timestamp SCN at which to run the update - * @throws SQLException - */ - private PhoenixConnection setImmutableTableIndexesImmutable(PhoenixConnection oldMetaConnection, long timestamp) throws SQLException { - SQLException sqlE = null; - Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); - PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); - boolean autoCommit = metaConnection.getAutoCommit(); - try { - metaConnection.setAutoCommit(true); - metaConnection.createStatement().execute( - "UPSERT INTO SYSTEM.CATALOG(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, IMMUTABLE_ROWS)\n" + - "SELECT A.TENANT_ID, A.TABLE_SCHEM,B.COLUMN_FAMILY,null,null,true\n" + - "FROM SYSTEM.CATALOG A JOIN SYSTEM.CATALOG B ON (\n" + - " A.TENANT_ID = B.TENANT_ID AND \n" + - " A.TABLE_SCHEM = B.TABLE_SCHEM AND\n" + - " A.TABLE_NAME = B.TABLE_NAME AND\n" + - " A.COLUMN_NAME = B.COLUMN_NAME AND\n" + - " B.LINK_TYPE = 1\n" + - ")\n" + - "WHERE A.COLUMN_FAMILY IS NULL AND\n" + - " B.COLUMN_FAMILY IS NOT NULL AND\n" + - " A.IMMUTABLE_ROWS = TRUE"); - } catch (SQLException e) { - LOGGER.warn("exception during upgrading stats table:" + e); - sqlE = e; - } finally { - try { - metaConnection.setAutoCommit(autoCommit); - oldMetaConnection.close(); - } catch (SQLException e) { - if (sqlE != null) { - sqlE.setNextException(e); - } else { - sqlE = e; - } - } - if (sqlE != null) { - throw sqlE; - } - } - return metaConnection; - } - - - - /** - * Forces update of SYSTEM.CATALOG by setting column to existing value - * @param oldMetaConnection - * @param timestamp - * @return - * @throws SQLException - */ - private PhoenixConnection updateSystemCatalogTimestamp(PhoenixConnection oldMetaConnection, long timestamp) throws SQLException { - SQLException sqlE = null; - Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); - PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); - boolean autoCommit = metaConnection.getAutoCommit(); - try { - metaConnection.setAutoCommit(true); - metaConnection.createStatement().execute( - "UPSERT INTO SYSTEM.CATALOG(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, DISABLE_WAL)\n" + - "VALUES (NULL, '" + QueryConstants.SYSTEM_SCHEMA_NAME + "','" + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE + "', NULL, NULL, FALSE)"); - } catch (SQLException e) { - LOGGER.warn("exception during upgrading stats table:" + e); - sqlE = e; - } finally { - try { - metaConnection.setAutoCommit(autoCommit); - oldMetaConnection.close(); - } catch (SQLException e) { - if (sqlE != null) { - sqlE.setNextException(e); - } else { - sqlE = e; - } - } - if (sqlE != null) { - throw sqlE; - } - } - return metaConnection; - } - - private PhoenixConnection dropStatsTable(PhoenixConnection oldMetaConnection, long timestamp) - throws SQLException, IOException { - Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); - PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); - SQLException sqlE = null; - boolean wasCommit = metaConnection.getAutoCommit(); - try { - metaConnection.setAutoCommit(true); - metaConnection.createStatement() - .executeUpdate("DELETE FROM " + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " WHERE " - + PhoenixDatabaseMetaData.TABLE_NAME + "='" + PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE - + "' AND " + PhoenixDatabaseMetaData.TABLE_SCHEM + "='" - + SYSTEM_SCHEMA_NAME + "'"); - } catch (SQLException e) { - LOGGER.warn("exception during upgrading stats table:" + e); - sqlE = e; - } finally { - try { - metaConnection.setAutoCommit(wasCommit); - oldMetaConnection.close(); - } catch (SQLException e) { - if (sqlE != null) { - sqlE.setNextException(e); - } else { - sqlE = e; - } - } - if (sqlE != null) { - throw sqlE; - } - } - return metaConnection; - } - - private void scheduleRenewLeaseTasks() { - if (isRenewingLeasesEnabled()) { - renewLeaseExecutor = - Executors.newScheduledThreadPool(renewLeasePoolSize, renewLeaseThreadFactory); - for (LinkedBlockingQueue> q : connectionQueues) { - renewLeaseExecutor.scheduleAtFixedRate(new RenewLeaseTask(q), 0, - renewLeaseTaskFrequency, TimeUnit.MILLISECONDS); - } - } - } - - private static class RenewLeaseThreadFactory implements ThreadFactory { - private static final AtomicInteger threadNumber = new AtomicInteger(1); - private static final String NAME_PREFIX = "PHOENIX-SCANNER-RENEW-LEASE-thread-"; - - @Override - public Thread newThread(Runnable r) { - Thread t = new Thread(r, NAME_PREFIX + threadNumber.getAndIncrement()); - t.setDaemon(true); - return t; - } - } - - private static int getSaltBuckets(TableAlreadyExistsException e) { - PTable table = e.getTable(); - Integer sequenceSaltBuckets = table == null ? null : table.getBucketNum(); - return sequenceSaltBuckets == null ? 0 : sequenceSaltBuckets; - } - - @Override - public MutationState updateData(MutationPlan plan) throws SQLException { - MutationState state = plan.execute(); - plan.getContext().getConnection().commit(); - return state; - } - - @Override - public int getLowestClusterHBaseVersion() { - return lowestClusterHBaseVersion; - } - - @Override - public boolean hasIndexWALCodec() { - return hasIndexWALCodec; - } - - /** - * Clears the Phoenix meta data cache on each region server - * @throws SQLException - */ - @Override - public long clearCache() throws SQLException { - synchronized (latestMetaDataLock) { - latestMetaData = newEmptyMetaData(); - } - tableStatsCache.invalidateAll(); - long startTime = 0L; - long systemCatalogRpcTime; - Map results; - try (Table htable = - this.getTable( - SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, - this.getProps()).getName())) { - try { - startTime = EnvironmentEdgeManager.currentTimeMillis(); - results = htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, new Batch.Call() { - @Override - public Long call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder(); - builder.setClientVersion( - VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, - PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.clearCache(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get().getUnfreedBytes(); - } - }); - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null,NUM_SYSTEM_TABLE_RPC_SUCCESS, 1); - } catch(Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, NUM_SYSTEM_TABLE_RPC_FAILURES, 1); - throw ClientUtil.parseServerException(e); - } finally { - systemCatalogRpcTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, - TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, systemCatalogRpcTime); - } - - long unfreedBytes = 0; - for (Map.Entry result : results.entrySet()) { - if (result.getValue() != null) { - unfreedBytes += result.getValue(); - } - } - return unfreedBytes; - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } catch (Throwable e) { - // wrap all other exceptions in a SQLException - throw new SQLException(e); - } - } - - private void flushTable(byte[] tableName) throws SQLException { - Admin admin = getAdmin(); - try { - admin.flush(TableName.valueOf(tableName)); - } catch (IOException e) { - throw new PhoenixIOException(e); -// } catch (InterruptedException e) { -// // restore the interrupt status -// Thread.currentThread().interrupt(); -// throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build() -// .buildException(); - } finally { - Closeables.closeQuietly(admin); - } - } - - @Override - public void refreshLiveRegionServers() throws SQLException { - synchronized (liveRegionServersLock) { - try (Admin admin = getAdmin()) { - this.liveRegionServers = new ArrayList<>(admin.getRegionServers(true)); - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - } - LOGGER.info("Refreshed list of live region servers."); - } - - @Override - public List getLiveRegionServers() { - return this.liveRegionServers; - } - - @Override - public Admin getAdmin() throws SQLException { - try { - return connection.getAdmin(); - } catch (IOException e) { - throw new PhoenixIOException(e); - } - } - - @Override - public MetaDataMutationResult updateIndexState(final List tableMetaData, String parentTableName) throws SQLException { - byte[][] rowKeyMetadata = new byte[3][]; - SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata); - byte[] tableKey = - SchemaUtil.getTableKey(rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX], - rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX], - rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]); - byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - return metaDataCoprocessorExec( - SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, - SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), - tableKey, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - UpdateIndexStateRequest.Builder builder = UpdateIndexStateRequest.newBuilder(); - for (Mutation m : tableMetaData) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addTableMetadataMutations(mp.toByteString()); - } - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.updateIndexState(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - } - - @Override - public MetaDataMutationResult updateIndexState(final List tableMetaData, String parentTableName, Map>> stmtProperties, PTable table) throws SQLException { - if (stmtProperties == null) { - return updateIndexState(tableMetaData,parentTableName); - } - - Map oldToNewTableDescriptors = - separateAndValidateProperties(table, stmtProperties, new HashSet<>(), new HashMap<>()); - TableDescriptor origTableDescriptor = this.getTableDescriptor(table.getPhysicalName().getBytes()); - TableDescriptor newTableDescriptor = oldToNewTableDescriptors.remove(origTableDescriptor); - Set modifiedTableDescriptors = Collections.emptySet(); - if (newTableDescriptor != null) { - modifiedTableDescriptors = Sets.newHashSetWithExpectedSize(3 + table.getIndexes().size()); - modifiedTableDescriptors.add(newTableDescriptor); - } - sendHBaseMetaData(modifiedTableDescriptors, true); - return updateIndexState(tableMetaData, parentTableName); - } - - @Override - public long createSequence(String tenantId, String schemaName, String sequenceName, - long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, - boolean cycle, long timestamp) throws SQLException { - SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets); - Sequence newSequences = new Sequence(sequenceKey); - Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences); - if (sequence == null) { - sequence = newSequences; - } - try { - sequence.getLock().lock(); - // Now that we have the lock we need, create the sequence - Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp, minValue, maxValue, cycle); - Table htable = this.getTable(SchemaUtil - .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName()); - try { - Result result = htable.append(append); - return sequence.createSequence(result, minValue, maxValue, cycle); - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } finally { - Closeables.closeQuietly(htable); - } - } finally { - sequence.getLock().unlock(); - } - } - - @Override - public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) throws SQLException { - SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets); - Sequence newSequences = new Sequence(sequenceKey); - Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences); - if (sequence == null) { - sequence = newSequences; - } - try { - sequence.getLock().lock(); - // Now that we have the lock we need, create the sequence - Append append = sequence.dropSequence(timestamp); - Table htable = this.getTable(SchemaUtil - .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName()); - try { - Result result = htable.append(append); - return sequence.dropSequence(result); - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } finally { - Closeables.closeQuietly(htable); - } - } finally { - sequence.getLock().unlock(); - } - } - - /** - * Gets the current sequence value - * @throws SQLException if cached sequence cannot be found - */ - @Override - public long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException { - Sequence sequence = sequenceMap.get(sequenceKey); - if (sequence == null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE) - .setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()) - .build().buildException(); - } - sequence.getLock().lock(); - try { - return sequence.currentValue(timestamp); - } catch (EmptySequenceCacheException e) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE) - .setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()) - .build().buildException(); - } finally { - sequence.getLock().unlock(); - } - } - - /** - * Verifies that sequences exist and reserves values for them if reserveValues is true - */ - @Override - public void validateSequences(List sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp action) throws SQLException { - incrementSequenceValues(sequenceAllocations, timestamp, values, exceptions, action); - } - - /** - * Increment any of the set of sequences that need more values. These are the sequences - * that are asking for the next value within a given statement. The returned sequences - * are the ones that were not found because they were deleted by another client. - * @param sequenceAllocations sorted list of sequence kyes - * @param timestamp - * @throws SQLException if any of the sequences cannot be found - * - */ - @Override - public void incrementSequences(List sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions) throws SQLException { - incrementSequenceValues(sequenceAllocations, timestamp, values, exceptions, Sequence.ValueOp.INCREMENT_SEQUENCE); - } - - private void incrementSequenceValues(List sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp op) throws SQLException { - List sequences = Lists.newArrayListWithExpectedSize(sequenceAllocations.size()); - for (SequenceAllocation sequenceAllocation : sequenceAllocations) { - SequenceKey key = sequenceAllocation.getSequenceKey(); - Sequence newSequences = new Sequence(key); - Sequence sequence = getSequence(sequenceAllocation); - if (sequence == null) { - sequence = newSequences; - } - sequences.add(sequence); - } - try { - for (Sequence sequence : sequences) { - sequence.getLock().lock(); - } - // Now that we have all the locks we need, increment the sequences - List incrementBatch = Lists.newArrayListWithExpectedSize(sequences.size()); - List toIncrementList = Lists.newArrayListWithExpectedSize(sequences.size()); - int[] indexes = new int[sequences.size()]; - for (int i = 0; i < sequences.size(); i++) { - Sequence sequence = sequences.get(i); - try { - values[i] = sequence.incrementValue(timestamp, op, sequenceAllocations.get(i).getNumAllocations()); - } catch (EmptySequenceCacheException e) { - indexes[toIncrementList.size()] = i; - toIncrementList.add(sequence); - Increment inc = sequence.newIncrement(timestamp, op, sequenceAllocations.get(i).getNumAllocations()); - incrementBatch.add(inc); - } catch (SQLException e) { - exceptions[i] = e; - } - } - if (toIncrementList.isEmpty()) { - return; - } - Table hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES,this.getProps()).getName()); - Object[] resultObjects = new Object[incrementBatch.size()]; - SQLException sqlE = null; - try { - hTable.batch(incrementBatch, resultObjects); - } catch (IOException e) { - sqlE = ClientUtil.parseServerException(e); - } catch (InterruptedException e) { - // restore the interrupt status - Thread.currentThread().interrupt(); - sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION) - .setRootCause(e).build().buildException(); // FIXME ? - } finally { - try { - hTable.close(); - } catch (IOException e) { - if (sqlE == null) { - sqlE = ClientUtil.parseServerException(e); - } else { - sqlE.setNextException(ClientUtil.parseServerException(e)); - } - } - if (sqlE != null) { - throw sqlE; - } - } - for (int i=0;i compareSequenceKeysWithoutTenant(key, entry.getKey())) - .findFirst() - .map(Entry::getValue) - .orElse(null); - } else { - return sequence; - } - } - } - - private boolean compareSequenceKeysWithoutTenant(SequenceKey keyToCompare, SequenceKey availableKey) { - if (availableKey.getTenantId() != null) { - return false; - } - boolean sameSchema = keyToCompare.getSchemaName() == null ? availableKey.getSchemaName() == null : - keyToCompare.getSchemaName().equals(availableKey.getSchemaName()); - if (!sameSchema) { - return false; - } - return keyToCompare.getSequenceName().equals(availableKey.getSequenceName()); - } - - @Override - public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, - final long clientTS) throws SQLException { - // clear the meta data cache for the table here - boolean success = false; + public Thread newThread(Runnable r) { + Thread t = new Thread(r, NAME_PREFIX + threadNumber.getAndIncrement()); + t.setDaemon(true); + return t; + } + } + + private static int getSaltBuckets(TableAlreadyExistsException e) { + PTable table = e.getTable(); + Integer sequenceSaltBuckets = table == null ? null : table.getBucketNum(); + return sequenceSaltBuckets == null ? 0 : sequenceSaltBuckets; + } + + @Override + public MutationState updateData(MutationPlan plan) throws SQLException { + MutationState state = plan.execute(); + plan.getContext().getConnection().commit(); + return state; + } + + @Override + public int getLowestClusterHBaseVersion() { + return lowestClusterHBaseVersion; + } + + @Override + public boolean hasIndexWALCodec() { + return hasIndexWALCodec; + } + + /** + * Clears the Phoenix meta data cache on each region server + */ + @Override + public long clearCache() throws SQLException { + synchronized (latestMetaDataLock) { + latestMetaData = newEmptyMetaData(); + } + tableStatsCache.invalidateAll(); + long startTime = 0L; + long systemCatalogRpcTime; + Map results; + try (Table htable = this.getTable( + SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()) + .getName())) { + try { + startTime = EnvironmentEdgeManager.currentTimeMillis(); + results = htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, new Batch.Call() { + @Override + public Long call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder(); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.clearCache(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get().getUnfreedBytes(); + } + }); + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + NUM_SYSTEM_TABLE_RPC_SUCCESS, 1); + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + NUM_SYSTEM_TABLE_RPC_FAILURES, 1); + throw ClientUtil.parseServerException(e); + } finally { + systemCatalogRpcTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, systemCatalogRpcTime); + } + + long unfreedBytes = 0; + for (Map.Entry result : results.entrySet()) { + if (result.getValue() != null) { + unfreedBytes += result.getValue(); + } + } + return unfreedBytes; + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } catch (Throwable e) { + // wrap all other exceptions in a SQLException + throw new SQLException(e); + } + } + + private void flushTable(byte[] tableName) throws SQLException { + Admin admin = getAdmin(); + try { + admin.flush(TableName.valueOf(tableName)); + } catch (IOException e) { + throw new PhoenixIOException(e); + // } catch (InterruptedException e) { + // // restore the interrupt status + // Thread.currentThread().interrupt(); + // throw new + // SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build() + // .buildException(); + } finally { + Closeables.closeQuietly(admin); + } + } + + @Override + public void refreshLiveRegionServers() throws SQLException { + synchronized (liveRegionServersLock) { + try (Admin admin = getAdmin()) { + this.liveRegionServers = new ArrayList<>(admin.getRegionServers(true)); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + } + LOGGER.info("Refreshed list of live region servers."); + } + + @Override + public List getLiveRegionServers() { + return this.liveRegionServers; + } + + @Override + public Admin getAdmin() throws SQLException { + try { + return connection.getAdmin(); + } catch (IOException e) { + throw new PhoenixIOException(e); + } + } + + @Override + public MetaDataMutationResult updateIndexState(final List tableMetaData, + String parentTableName) throws SQLException { + byte[][] rowKeyMetadata = new byte[3][]; + SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata); + byte[] tableKey = + SchemaUtil.getTableKey(rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX], + rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX], + rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]); + byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + return metaDataCoprocessorExec( + SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, + SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.props)).toString(), + tableKey, new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + UpdateIndexStateRequest.Builder builder = UpdateIndexStateRequest.newBuilder(); + for (Mutation m : tableMetaData) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addTableMetadataMutations(mp.toByteString()); + } + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.updateIndexState(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + } + + @Override + public MetaDataMutationResult updateIndexState(final List tableMetaData, + String parentTableName, Map>> stmtProperties, PTable table) + throws SQLException { + if (stmtProperties == null) { + return updateIndexState(tableMetaData, parentTableName); + } + + Map oldToNewTableDescriptors = + separateAndValidateProperties(table, stmtProperties, new HashSet<>(), new HashMap<>()); + TableDescriptor origTableDescriptor = + this.getTableDescriptor(table.getPhysicalName().getBytes()); + TableDescriptor newTableDescriptor = oldToNewTableDescriptors.remove(origTableDescriptor); + Set modifiedTableDescriptors = Collections.emptySet(); + if (newTableDescriptor != null) { + modifiedTableDescriptors = Sets.newHashSetWithExpectedSize(3 + table.getIndexes().size()); + modifiedTableDescriptors.add(newTableDescriptor); + } + sendHBaseMetaData(modifiedTableDescriptors, true); + return updateIndexState(tableMetaData, parentTableName); + } + + @Override + public long createSequence(String tenantId, String schemaName, String sequenceName, + long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, boolean cycle, + long timestamp) throws SQLException { + SequenceKey sequenceKey = + new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets); + Sequence newSequences = new Sequence(sequenceKey); + Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences); + if (sequence == null) { + sequence = newSequences; + } + try { + sequence.getLock().lock(); + // Now that we have the lock we need, create the sequence + Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp, + minValue, maxValue, cycle); + Table htable = this.getTable(SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()) + .getName()); + try { + Result result = htable.append(append); + return sequence.createSequence(result, minValue, maxValue, cycle); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } finally { + Closeables.closeQuietly(htable); + } + } finally { + sequence.getLock().unlock(); + } + } + + @Override + public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) + throws SQLException { + SequenceKey sequenceKey = + new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets); + Sequence newSequences = new Sequence(sequenceKey); + Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences); + if (sequence == null) { + sequence = newSequences; + } + try { + sequence.getLock().lock(); + // Now that we have the lock we need, create the sequence + Append append = sequence.dropSequence(timestamp); + Table htable = this.getTable(SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()) + .getName()); + try { + Result result = htable.append(append); + return sequence.dropSequence(result); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } finally { + Closeables.closeQuietly(htable); + } + } finally { + sequence.getLock().unlock(); + } + } + + /** + * Gets the current sequence value + * @throws SQLException if cached sequence cannot be found + */ + @Override + public long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException { + Sequence sequence = sequenceMap.get(sequenceKey); + if (sequence == null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE) + .setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()) + .build().buildException(); + } + sequence.getLock().lock(); + try { + return sequence.currentValue(timestamp); + } catch (EmptySequenceCacheException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE) + .setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()) + .build().buildException(); + } finally { + sequence.getLock().unlock(); + } + } + + /** + * Verifies that sequences exist and reserves values for them if reserveValues is true + */ + @Override + public void validateSequences(List sequenceAllocations, long timestamp, + long[] values, SQLException[] exceptions, Sequence.ValueOp action) throws SQLException { + incrementSequenceValues(sequenceAllocations, timestamp, values, exceptions, action); + } + + /** + * Increment any of the set of sequences that need more values. These are the sequences that are + * asking for the next value within a given statement. The returned sequences are the ones that + * were not found because they were deleted by another client. + * @param sequenceAllocations sorted list of sequence kyes + * @throws SQLException if any of the sequences cannot be found + */ + @Override + public void incrementSequences(List sequenceAllocations, long timestamp, + long[] values, SQLException[] exceptions) throws SQLException { + incrementSequenceValues(sequenceAllocations, timestamp, values, exceptions, + Sequence.ValueOp.INCREMENT_SEQUENCE); + } + + private void incrementSequenceValues(List sequenceAllocations, long timestamp, + long[] values, SQLException[] exceptions, Sequence.ValueOp op) throws SQLException { + List sequences = Lists.newArrayListWithExpectedSize(sequenceAllocations.size()); + for (SequenceAllocation sequenceAllocation : sequenceAllocations) { + SequenceKey key = sequenceAllocation.getSequenceKey(); + Sequence newSequences = new Sequence(key); + Sequence sequence = getSequence(sequenceAllocation); + if (sequence == null) { + sequence = newSequences; + } + sequences.add(sequence); + } + try { + for (Sequence sequence : sequences) { + sequence.getLock().lock(); + } + // Now that we have all the locks we need, increment the sequences + List incrementBatch = Lists.newArrayListWithExpectedSize(sequences.size()); + List toIncrementList = Lists.newArrayListWithExpectedSize(sequences.size()); + int[] indexes = new int[sequences.size()]; + for (int i = 0; i < sequences.size(); i++) { + Sequence sequence = sequences.get(i); try { - SQLException sqlE = null; - long startTime = 0L; - long systemCatalogRpcTime; - Table htable = this.getTable(SchemaUtil - .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName()); - - try { - startTime = EnvironmentEdgeManager.currentTimeMillis(); - htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, - new Batch.Call() { - @Override - public ClearTableFromCacheResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = new BlockingRpcCallback(); - ClearTableFromCacheRequest.Builder builder = ClearTableFromCacheRequest.newBuilder(); - builder.setTenantId(ByteStringer.wrap(tenantId)); - builder.setTableName(ByteStringer.wrap(tableName)); - builder.setSchemaName(ByteStringer.wrap(schemaName)); - builder.setClientTimestamp(clientTS); - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.clearTableFromCache(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - success = true; - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } catch (Throwable e) { - sqlE = new SQLException(e); - } finally { - try { - htable.close(); - systemCatalogRpcTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(Bytes.toString(tableName), - TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, systemCatalogRpcTime); - if (success) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(Bytes.toString(tableName), - NUM_SYSTEM_TABLE_RPC_SUCCESS, 1); - } else { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(Bytes.toString(tableName), - NUM_SYSTEM_TABLE_RPC_FAILURES, 1); - } - } catch (IOException e) { - if (sqlE == null) { - sqlE = ClientUtil.parseServerException(e); - } else { - sqlE.setNextException(ClientUtil.parseServerException(e)); - } - } finally { - if (sqlE != null) { throw sqlE; } - } - } - } catch (Exception e) { - throw new SQLException(ClientUtil.parseServerException(e)); - } - } - - @Override - public void returnSequences(List keys, long timestamp, SQLException[] exceptions) throws SQLException { - List sequences = Lists.newArrayListWithExpectedSize(keys.size()); - for (SequenceKey key : keys) { - Sequence newSequences = new Sequence(key); - Sequence sequence = sequenceMap.putIfAbsent(key, newSequences); - if (sequence == null) { - sequence = newSequences; - } - sequences.add(sequence); - } + values[i] = + sequence.incrementValue(timestamp, op, sequenceAllocations.get(i).getNumAllocations()); + } catch (EmptySequenceCacheException e) { + indexes[toIncrementList.size()] = i; + toIncrementList.add(sequence); + Increment inc = + sequence.newIncrement(timestamp, op, sequenceAllocations.get(i).getNumAllocations()); + incrementBatch.add(inc); + } catch (SQLException e) { + exceptions[i] = e; + } + } + if (toIncrementList.isEmpty()) { + return; + } + Table hTable = this.getTable(SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()) + .getName()); + Object[] resultObjects = new Object[incrementBatch.size()]; + SQLException sqlE = null; + try { + hTable.batch(incrementBatch, resultObjects); + } catch (IOException e) { + sqlE = ClientUtil.parseServerException(e); + } catch (InterruptedException e) { + // restore the interrupt status + Thread.currentThread().interrupt(); + sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e) + .build().buildException(); // FIXME ? + } finally { try { - for (Sequence sequence : sequences) { - sequence.getLock().lock(); - } - // Now that we have all the locks we need, attempt to return the unused sequence values - List mutations = Lists.newArrayListWithExpectedSize(sequences.size()); - List toReturnList = Lists.newArrayListWithExpectedSize(sequences.size()); - int[] indexes = new int[sequences.size()]; - for (int i = 0; i < sequences.size(); i++) { - Sequence sequence = sequences.get(i); - try { - Append append = sequence.newReturn(timestamp); - toReturnList.add(sequence); - mutations.add(append); - } catch (EmptySequenceCacheException ignore) { // Nothing to return, so ignore - } - } - if (toReturnList.isEmpty()) { - return; - } - Table hTable = this.getTable(SchemaUtil - .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName()); - Object[] resultObjects = null; - SQLException sqlE = null; - try { - hTable.batch(mutations, resultObjects); - } catch (IOException e){ - sqlE = ClientUtil.parseServerException(e); - } catch (InterruptedException e){ - // restore the interrupt status - Thread.currentThread().interrupt(); - sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION) - .setRootCause(e).build().buildException(); // FIXME ? - } finally { - try { - hTable.close(); - } catch (IOException e) { - if (sqlE == null) { - sqlE = ClientUtil.parseServerException(e); - } else { - sqlE.setNextException(ClientUtil.parseServerException(e)); - } - } - if (sqlE != null) { - throw sqlE; - } - } - for (int i=0;i sequenceMap) throws SQLException { - List mutations = Lists.newArrayListWithExpectedSize(sequenceMap.size()); - for (Sequence sequence : sequenceMap.values()) { - mutations.addAll(sequence.newReturns()); + hTable.close(); + } catch (IOException e) { + if (sqlE == null) { + sqlE = ClientUtil.parseServerException(e); + } else { + sqlE.setNextException(ClientUtil.parseServerException(e)); + } } - if (mutations.isEmpty()) { - return; + if (sqlE != null) { + throw sqlE; } - Table hTable = this.getTable( - SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName()); - SQLException sqlE = null; + } + for (int i = 0; i < resultObjects.length; i++) { + Sequence sequence = toIncrementList.get(i); + Result result = (Result) resultObjects[i]; + try { + long numToAllocate = Bytes.toLong( + incrementBatch.get(i).getAttribute(SequenceRegionObserverConstants.NUM_TO_ALLOCATE)); + values[indexes[i]] = sequence.incrementValue(result, op, numToAllocate); + } catch (SQLException e) { + exceptions[indexes[i]] = e; + } + } + } finally { + for (Sequence sequence : sequences) { + sequence.getLock().unlock(); + } + } + } + + /** + * checks if sequenceAllocation's sequence there in sequenceMap, also returns Global Sequences + * from Tenant sequenceAllocations + */ + + private Sequence getSequence(SequenceAllocation sequenceAllocation) { + SequenceKey key = sequenceAllocation.getSequenceKey(); + if (key.getTenantId() == null) { + return sequenceMap.putIfAbsent(key, new Sequence(key)); + } else { + Sequence sequence = sequenceMap.get(key); + if (sequence == null) { + return sequenceMap.entrySet().stream() + .filter(entry -> compareSequenceKeysWithoutTenant(key, entry.getKey())).findFirst() + .map(Entry::getValue).orElse(null); + } else { + return sequence; + } + } + } + + private boolean compareSequenceKeysWithoutTenant(SequenceKey keyToCompare, + SequenceKey availableKey) { + if (availableKey.getTenantId() != null) { + return false; + } + boolean sameSchema = keyToCompare.getSchemaName() == null + ? availableKey.getSchemaName() == null + : keyToCompare.getSchemaName().equals(availableKey.getSchemaName()); + if (!sameSchema) { + return false; + } + return keyToCompare.getSequenceName().equals(availableKey.getSequenceName()); + } + + @Override + public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, + final byte[] tableName, final long clientTS) throws SQLException { + // clear the meta data cache for the table here + boolean success = false; + try { + SQLException sqlE = null; + long startTime = 0L; + long systemCatalogRpcTime; + Table htable = this.getTable(SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()) + .getName()); + + try { + startTime = EnvironmentEdgeManager.currentTimeMillis(); + htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, new Batch.Call() { + @Override + public ClearTableFromCacheResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + ClearTableFromCacheRequest.Builder builder = ClearTableFromCacheRequest.newBuilder(); + builder.setTenantId(ByteStringer.wrap(tenantId)); + builder.setTableName(ByteStringer.wrap(tableName)); + builder.setSchemaName(ByteStringer.wrap(schemaName)); + builder.setClientTimestamp(clientTS); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.clearTableFromCache(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + success = true; + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } catch (Throwable e) { + sqlE = new SQLException(e); + } finally { try { - hTable.batch(mutations, null); + htable.close(); + systemCatalogRpcTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(Bytes.toString(tableName), + TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, systemCatalogRpcTime); + if (success) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(Bytes.toString(tableName), + NUM_SYSTEM_TABLE_RPC_SUCCESS, 1); + } else { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(Bytes.toString(tableName), + NUM_SYSTEM_TABLE_RPC_FAILURES, 1); + } } catch (IOException e) { + if (sqlE == null) { sqlE = ClientUtil.parseServerException(e); - } catch (InterruptedException e) { - // restore the interrupt status - Thread.currentThread().interrupt(); - sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION) - .setRootCause(e).build().buildException(); // FIXME ? + } else { + sqlE.setNextException(ClientUtil.parseServerException(e)); + } } finally { - try { - hTable.close(); - } catch (IOException e) { - if (sqlE == null) { - sqlE = ClientUtil.parseServerException(e); - } else { - sqlE.setNextException(ClientUtil.parseServerException(e)); - } - } - if (sqlE != null) { - throw sqlE; - } - } - } - - @Override - public void addConnection(PhoenixConnection connection) throws SQLException { - if (returnSequenceValues || shouldThrottleNumConnections) { - synchronized (connectionCountLock) { - connectionLimiter.acquireConnection(connection); - } + if (sqlE != null) { + throw sqlE; + } } - // If lease renewal isn't enabled, these are never cleaned up. Tracking when renewals - // aren't enabled also (presently) has no purpose. - if (isRenewingLeasesEnabled()) { - connectionQueues.get(getQueueIndex(connection)).add(new WeakReference(connection)); - } - } - - @Override - public void removeConnection(PhoenixConnection connection) throws SQLException { - if (returnSequenceValues) { - ConcurrentMap formerSequenceMap = null; - synchronized (connectionCountLock) { - if (!connection.isInternalConnection()) { - if (connectionLimiter.isLastConnection()) { - if (!this.sequenceMap.isEmpty()) { - formerSequenceMap = this.sequenceMap; - this.sequenceMap = Maps.newConcurrentMap(); - } - } - } - } - // Since we're using the former sequenceMap, we can do this outside - // the lock. - if (formerSequenceMap != null) { - // When there are no more connections, attempt to return any sequences - returnAllSequences(formerSequenceMap); - } - } - if (returnSequenceValues || connectionLimiter.isShouldThrottleNumConnections()) { //still need to decrement connection count - synchronized (connectionCountLock) { - connectionLimiter.returnConnection(connection); - } - } - } - - private int getQueueIndex(PhoenixConnection conn) { - return ThreadLocalRandom.current().nextInt(renewLeasePoolSize); - } - - @Override - public KeyValueBuilder getKeyValueBuilder() { - return this.kvBuilder; - } - - @Override - public boolean supportsFeature(Feature feature) { - FeatureSupported supported = featureMap.get(feature); - if (supported == null) { - return false; - } - return supported.isSupported(this); - } - - @Override - public String getUserName() { - return userName; - } - - @Override - public User getUser() { - return user; - } - - @VisibleForTesting - public void checkClosed() { - if (closed) { - throwConnectionClosedException(); - } - } - - private void throwConnectionClosedIfNullMetaData() { - if (latestMetaData == null) { - throwConnectionClosedException(); - } - } - - private void throwConnectionClosedException() { - throw new IllegalStateException("Connection to the cluster is closed"); + } + } catch (Exception e) { + throw new SQLException(ClientUtil.parseServerException(e)); } + } - @Override - public GuidePostsInfo getTableStats(GuidePostsKey key) throws SQLException { + @Override + public void returnSequences(List keys, long timestamp, SQLException[] exceptions) + throws SQLException { + List sequences = Lists.newArrayListWithExpectedSize(keys.size()); + for (SequenceKey key : keys) { + Sequence newSequences = new Sequence(key); + Sequence sequence = sequenceMap.putIfAbsent(key, newSequences); + if (sequence == null) { + sequence = newSequences; + } + sequences.add(sequence); + } + try { + for (Sequence sequence : sequences) { + sequence.getLock().lock(); + } + // Now that we have all the locks we need, attempt to return the unused sequence values + List mutations = Lists.newArrayListWithExpectedSize(sequences.size()); + List toReturnList = Lists.newArrayListWithExpectedSize(sequences.size()); + int[] indexes = new int[sequences.size()]; + for (int i = 0; i < sequences.size(); i++) { + Sequence sequence = sequences.get(i); try { - return tableStatsCache.get(key); - } catch (ExecutionException e) { - throw ClientUtil.parseServerException(e); + Append append = sequence.newReturn(timestamp); + toReturnList.add(sequence); + mutations.add(append); + } catch (EmptySequenceCacheException ignore) { // Nothing to return, so ignore + } + } + if (toReturnList.isEmpty()) { + return; + } + Table hTable = this.getTable(SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()) + .getName()); + Object[] resultObjects = null; + SQLException sqlE = null; + try { + hTable.batch(mutations, resultObjects); + } catch (IOException e) { + sqlE = ClientUtil.parseServerException(e); + } catch (InterruptedException e) { + // restore the interrupt status + Thread.currentThread().interrupt(); + sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e) + .build().buildException(); // FIXME ? + } finally { + try { + hTable.close(); + } catch (IOException e) { + if (sqlE == null) { + sqlE = ClientUtil.parseServerException(e); + } else { + sqlE.setNextException(ClientUtil.parseServerException(e)); + } } - } - - @Override - public int getSequenceSaltBuckets() { - return nSequenceSaltBuckets; - } - - @Override - public void addFunction(PFunction function) throws SQLException { - synchronized (latestMetaDataLock) { - try { - throwConnectionClosedIfNullMetaData(); - // If existing table isn't older than new table, don't replace - // If a client opens a connection at an earlier timestamp, this can happen - PFunction existingFunction = latestMetaData.getFunction(new PTableKey(function.getTenantId(), function.getFunctionName())); - if (existingFunction.getTimeStamp() >= function.getTimeStamp()) { - return; - } - } catch (FunctionNotFoundException e) {} - latestMetaData.addFunction(function); - latestMetaDataLock.notifyAll(); + if (sqlE != null) { + throw sqlE; } - } - - @Override - public void removeFunction(PName tenantId, String function, long functionTimeStamp) - throws SQLException { - synchronized (latestMetaDataLock) { - throwConnectionClosedIfNullMetaData(); - latestMetaData.removeFunction(tenantId, function, functionTimeStamp); - latestMetaDataLock.notifyAll(); + } + for (int i = 0; i < resultObjects.length; i++) { + Sequence sequence = toReturnList.get(i); + Result result = (Result) resultObjects[i]; + try { + sequence.returnValue(result); + } catch (SQLException e) { + exceptions[indexes[i]] = e; } + } + } finally { + for (Sequence sequence : sequences) { + sequence.getLock().unlock(); + } } + } - @Override - public MetaDataMutationResult getFunctions(PName tenantId, final List> functions, - final long clientTimestamp) throws SQLException { - final byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(); - return metaDataCoprocessorExec(null, tenantIdBytes, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(SYSTEM_FUNCTION_HBASE_TABLE_NAME); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - GetFunctionsRequest.Builder builder = GetFunctionsRequest.newBuilder(); - builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); - for (Pair function: functions) { - builder.addFunctionNames(ByteStringer.wrap(function.getFirst())); - builder.addFunctionTimestamps(function.getSecond().longValue()); - } - builder.setClientTimestamp(clientTimestamp); - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.getFunctions(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }, SYSTEM_FUNCTION_NAME_BYTES); - - } - - @Override - public MetaDataMutationResult getSchema(final String schemaName, final long clientTimestamp) throws SQLException { - return metaDataCoprocessorExec(null, SchemaUtil.getSchemaKey(schemaName), - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = new BlockingRpcCallback(); - GetSchemaRequest.Builder builder = GetSchemaRequest.newBuilder(); - builder.setSchemaName(schemaName); - builder.setClientTimestamp(clientTimestamp); - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, - PHOENIX_PATCH_NUMBER)); - instance.getSchema(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - - } - - // TODO the mutations should be added to System functions table. - @Override - public MetaDataMutationResult createFunction(final List functionData, - final PFunction function, final boolean temporary) throws SQLException { - byte[][] rowKeyMetadata = new byte[2][]; - Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(functionData); - byte[] key = m.getRow(); - SchemaUtil.getVarChars(key, rowKeyMetadata); - byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - byte[] functionBytes = rowKeyMetadata[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX]; - byte[] functionKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionBytes); - MetaDataMutationResult result = metaDataCoprocessorExec(null, functionKey, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(SYSTEM_FUNCTION_HBASE_TABLE_NAME); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - CreateFunctionRequest.Builder builder = CreateFunctionRequest.newBuilder(); - for (Mutation m : functionData) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addTableMetadataMutations(mp.toByteString()); - } - builder.setTemporary(temporary); - builder.setReplace(function.isReplace()); - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.createFunction(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }, SYSTEM_FUNCTION_NAME_BYTES); - return result; - } - - @VisibleForTesting - static class RenewLeaseTask implements Runnable { - - private final LinkedBlockingQueue> connectionsQueue; - private final Random random = new Random(); - private static final int MAX_WAIT_TIME = 1000; - - RenewLeaseTask(LinkedBlockingQueue> queue) { - this.connectionsQueue = queue; - } - - private void waitForRandomDuration() throws InterruptedException { - new CountDownLatch(1).await(random.nextInt(MAX_WAIT_TIME), MILLISECONDS); - } - - private static class InternalRenewLeaseTaskException extends Exception { - public InternalRenewLeaseTaskException(String msg) { - super(msg); - } - } - + // Take no locks, as this only gets run when there are no open connections + // so there's no danger of contention. + private void returnAllSequences(ConcurrentMap sequenceMap) + throws SQLException { + List mutations = Lists.newArrayListWithExpectedSize(sequenceMap.size()); + for (Sequence sequence : sequenceMap.values()) { + mutations.addAll(sequence.newReturns()); + } + if (mutations.isEmpty()) { + return; + } + Table hTable = this.getTable(SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()) + .getName()); + SQLException sqlE = null; + try { + hTable.batch(mutations, null); + } catch (IOException e) { + sqlE = ClientUtil.parseServerException(e); + } catch (InterruptedException e) { + // restore the interrupt status + Thread.currentThread().interrupt(); + sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e) + .build().buildException(); // FIXME ? + } finally { + try { + hTable.close(); + } catch (IOException e) { + if (sqlE == null) { + sqlE = ClientUtil.parseServerException(e); + } else { + sqlE.setNextException(ClientUtil.parseServerException(e)); + } + } + if (sqlE != null) { + throw sqlE; + } + } + } + + @Override + public void addConnection(PhoenixConnection connection) throws SQLException { + if (returnSequenceValues || shouldThrottleNumConnections) { + synchronized (connectionCountLock) { + connectionLimiter.acquireConnection(connection); + } + } + // If lease renewal isn't enabled, these are never cleaned up. Tracking when renewals + // aren't enabled also (presently) has no purpose. + if (isRenewingLeasesEnabled()) { + connectionQueues.get(getQueueIndex(connection)) + .add(new WeakReference(connection)); + } + } + + @Override + public void removeConnection(PhoenixConnection connection) throws SQLException { + if (returnSequenceValues) { + ConcurrentMap formerSequenceMap = null; + synchronized (connectionCountLock) { + if (!connection.isInternalConnection()) { + if (connectionLimiter.isLastConnection()) { + if (!this.sequenceMap.isEmpty()) { + formerSequenceMap = this.sequenceMap; + this.sequenceMap = Maps.newConcurrentMap(); + } + } + } + } + // Since we're using the former sequenceMap, we can do this outside + // the lock. + if (formerSequenceMap != null) { + // When there are no more connections, attempt to return any sequences + returnAllSequences(formerSequenceMap); + } + } + if (returnSequenceValues || connectionLimiter.isShouldThrottleNumConnections()) { // still need + // to + // decrement + // connection + // count + synchronized (connectionCountLock) { + connectionLimiter.returnConnection(connection); + } + } + } + + private int getQueueIndex(PhoenixConnection conn) { + return ThreadLocalRandom.current().nextInt(renewLeasePoolSize); + } + + @Override + public KeyValueBuilder getKeyValueBuilder() { + return this.kvBuilder; + } + + @Override + public boolean supportsFeature(Feature feature) { + FeatureSupported supported = featureMap.get(feature); + if (supported == null) { + return false; + } + return supported.isSupported(this); + } + + @Override + public String getUserName() { + return userName; + } + + @Override + public User getUser() { + return user; + } + + @VisibleForTesting + public void checkClosed() { + if (closed) { + throwConnectionClosedException(); + } + } + + private void throwConnectionClosedIfNullMetaData() { + if (latestMetaData == null) { + throwConnectionClosedException(); + } + } + + private void throwConnectionClosedException() { + throw new IllegalStateException("Connection to the cluster is closed"); + } + + @Override + public GuidePostsInfo getTableStats(GuidePostsKey key) throws SQLException { + try { + return tableStatsCache.get(key); + } catch (ExecutionException e) { + throw ClientUtil.parseServerException(e); + } + } + + @Override + public int getSequenceSaltBuckets() { + return nSequenceSaltBuckets; + } + + @Override + public void addFunction(PFunction function) throws SQLException { + synchronized (latestMetaDataLock) { + try { + throwConnectionClosedIfNullMetaData(); + // If existing table isn't older than new table, don't replace + // If a client opens a connection at an earlier timestamp, this can happen + PFunction existingFunction = latestMetaData + .getFunction(new PTableKey(function.getTenantId(), function.getFunctionName())); + if (existingFunction.getTimeStamp() >= function.getTimeStamp()) { + return; + } + } catch (FunctionNotFoundException e) { + } + latestMetaData.addFunction(function); + latestMetaDataLock.notifyAll(); + } + } + + @Override + public void removeFunction(PName tenantId, String function, long functionTimeStamp) + throws SQLException { + synchronized (latestMetaDataLock) { + throwConnectionClosedIfNullMetaData(); + latestMetaData.removeFunction(tenantId, function, functionTimeStamp); + latestMetaDataLock.notifyAll(); + } + } + + @Override + public MetaDataMutationResult getFunctions(PName tenantId, + final List> functions, final long clientTimestamp) throws SQLException { + final byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(); + return metaDataCoprocessorExec(null, tenantIdBytes, + new Batch.Call() { @Override - public void run() { - try { - int numConnections = connectionsQueue.size(); - boolean wait = true; - // We keep adding items to the end of the queue. So to stop the loop, iterate only up to - // whatever the current count is. - while (numConnections > 0) { - if (wait) { - // wait for some random duration to prevent all threads from renewing lease at - // the same time. - waitForRandomDuration(); - wait = false; - } - // It is guaranteed that this poll won't hang indefinitely because this is the - // only thread that removes items from the queue. Still adding a 1 ms timeout - // for sanity check. - WeakReference connRef = - connectionsQueue.poll(1, TimeUnit.MILLISECONDS); - if (connRef == null) { - throw new InternalRenewLeaseTaskException( - "Connection ref found to be null. This is a bug. Some other thread removed items from the connection queue."); - } - PhoenixConnection conn = connRef.get(); - if (conn != null && !conn.isClosed()) { - LinkedBlockingQueue> scannerQueue = - conn.getScanners(); - // We keep adding items to the end of the queue. So to stop the loop, - // iterate only up to whatever the current count is. - int numScanners = scannerQueue.size(); - int renewed = 0; - long start = EnvironmentEdgeManager.currentTimeMillis(); - while (numScanners > 0) { - // It is guaranteed that this poll won't hang indefinitely because this is the - // only thread that removes items from the queue. Still adding a 1 ms timeout - // for sanity check. - WeakReference ref = - scannerQueue.poll(1, TimeUnit.MILLISECONDS); - if (ref == null) { - throw new InternalRenewLeaseTaskException( - "TableResulIterator ref found to be null. This is a bug. Some other thread removed items from the scanner queue."); - } - TableResultIterator scanningItr = ref.get(); - if (scanningItr != null) { - RenewLeaseStatus status = scanningItr.renewLease(); - switch (status) { - case RENEWED: - renewed++; - // add it back at the tail - scannerQueue.offer(new WeakReference( - scanningItr)); - LOGGER.info("Lease renewed for scanner: " + scanningItr); - break; - // Scanner not initialized probably because next() hasn't been called on it yet. Enqueue it back to attempt lease renewal later. - case UNINITIALIZED: - // Threshold not yet reached. Re-enqueue to renew lease later. - case THRESHOLD_NOT_REACHED: - // Another scanner operation in progress. Re-enqueue to attempt renewing lease later. - case LOCK_NOT_ACQUIRED: - // add it back at the tail - scannerQueue.offer(new WeakReference( - scanningItr)); - break; - // if lease wasn't renewed or scanner was closed, don't add the - // scanner back to the queue. - case CLOSED: - case NOT_SUPPORTED: - break; - } - } - numScanners--; - } - if (renewed > 0) { - LOGGER.info("Renewed leases for " + renewed + " scanner/s in " - + (EnvironmentEdgeManager.currentTimeMillis() - start) + " ms "); - } - connectionsQueue.offer(connRef); - } - numConnections--; - } - } catch (InternalRenewLeaseTaskException e) { - LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e); - // clear up the queue since the task is about to be unscheduled. - connectionsQueue.clear(); - // throw an exception since we want the task execution to be suppressed because we just encountered an - // exception that happened because of a bug. - throw new RuntimeException(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); // restore the interrupt status - LOGGER.error("Thread interrupted when renewing lease.", e); - } catch (Exception e) { - LOGGER.error("Exception thrown when renewing lease ", e); - // don't drain the queue and swallow the exception in this case since we don't want the task - // execution to be suppressed because renewing lease of a scanner failed. - } catch (Throwable e) { - LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e); - connectionsQueue.clear(); // clear up the queue since the task is about to be unscheduled. - throw new RuntimeException(e); - } + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(SYSTEM_FUNCTION_HBASE_TABLE_NAME); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + GetFunctionsRequest.Builder builder = GetFunctionsRequest.newBuilder(); + builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); + for (Pair function : functions) { + builder.addFunctionNames(ByteStringer.wrap(function.getFirst())); + builder.addFunctionTimestamps(function.getSecond().longValue()); + } + builder.setClientTimestamp(clientTimestamp); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.getFunctions(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }, SYSTEM_FUNCTION_NAME_BYTES); + + } + + @Override + public MetaDataMutationResult getSchema(final String schemaName, final long clientTimestamp) + throws SQLException { + return metaDataCoprocessorExec(null, SchemaUtil.getSchemaKey(schemaName), + new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + GetSchemaRequest.Builder builder = GetSchemaRequest.newBuilder(); + builder.setSchemaName(schemaName); + builder.setClientTimestamp(clientTimestamp); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.getSchema(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + + } + + // TODO the mutations should be added to System functions table. + @Override + public MetaDataMutationResult createFunction(final List functionData, + final PFunction function, final boolean temporary) throws SQLException { + byte[][] rowKeyMetadata = new byte[2][]; + Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(functionData); + byte[] key = m.getRow(); + SchemaUtil.getVarChars(key, rowKeyMetadata); + byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + byte[] functionBytes = rowKeyMetadata[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX]; + byte[] functionKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionBytes); + MetaDataMutationResult result = metaDataCoprocessorExec(null, functionKey, + new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(SYSTEM_FUNCTION_HBASE_TABLE_NAME); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + CreateFunctionRequest.Builder builder = CreateFunctionRequest.newBuilder(); + for (Mutation m : functionData) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addTableMetadataMutations(mp.toByteString()); + } + builder.setTemporary(temporary); + builder.setReplace(function.isReplace()); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.createFunction(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); } - } - - @Override - public long getRenewLeaseThresholdMilliSeconds() { - return renewLeaseThreshold; - } + }, SYSTEM_FUNCTION_NAME_BYTES); + return result; + } - @Override - public boolean isRenewingLeasesEnabled() { - return supportsFeature(ConnectionQueryServices.Feature.RENEW_LEASE) && renewLeaseEnabled; - } + @VisibleForTesting + static class RenewLeaseTask implements Runnable { - @Override - public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException { - /* - * Use HConnection.getRegionLocation as it uses the cache in HConnection, to get the region - * to which specified row belongs to. - */ - int retryCount = 0, maxRetryCount = 1; - while (true) { - TableName table = TableName.valueOf(tableName); - try { - return connection.getRegionLocator(table).getRegionLocation(row, false); - } catch (org.apache.hadoop.hbase.TableNotFoundException e) { - String fullName = Bytes.toString(tableName); - throw new TableNotFoundException(SchemaUtil.getSchemaNameFromFullName(fullName), SchemaUtil.getTableNameFromFullName(fullName)); - } catch (IOException e) { - LOGGER.error("Exception encountered in getTableRegionLocation for " - + "table: {}, retryCount: {}", table.getNameAsString(), retryCount, e); - if (retryCount++ < maxRetryCount) { // One retry, in case split occurs while navigating - continue; - } - throw new SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_REGIONS_FAIL) - .setRootCause(e).build().buildException(); - } - } - } + private final LinkedBlockingQueue> connectionsQueue; + private final Random random = new Random(); + private static final int MAX_WAIT_TIME = 1000; - @Override - public MetaDataMutationResult createSchema(final List schemaMutations, final String schemaName) - throws SQLException { - ensureNamespaceCreated(schemaName); - Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(schemaMutations); - byte[] key = m.getRow(); - MetaDataMutationResult result = metaDataCoprocessorExec(null, key, - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = new BlockingRpcCallback(); - CreateSchemaRequest.Builder builder = CreateSchemaRequest.newBuilder(); - for (Mutation m : schemaMutations) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addTableMetadataMutations(mp.toByteString()); - } - builder.setSchemaName(schemaName); - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, - PHOENIX_PATCH_NUMBER)); - instance.createSchema(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - return result; + RenewLeaseTask(LinkedBlockingQueue> queue) { + this.connectionsQueue = queue; } - @Override - public void addSchema(PSchema schema) throws SQLException { - latestMetaData.addSchema(schema); + private void waitForRandomDuration() throws InterruptedException { + new CountDownLatch(1).await(random.nextInt(MAX_WAIT_TIME), MILLISECONDS); } - @Override - public void removeSchema(PSchema schema, long schemaTimeStamp) { - latestMetaData.removeSchema(schema, schemaTimeStamp); + private static class InternalRenewLeaseTaskException extends Exception { + public InternalRenewLeaseTaskException(String msg) { + super(msg); + } } @Override - public MetaDataMutationResult dropSchema(final List schemaMetaData, final String schemaName) - throws SQLException { - final MetaDataMutationResult result = metaDataCoprocessorExec(null, SchemaUtil.getSchemaKey(schemaName), - new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - RpcController controller = getController(); - BlockingRpcCallback rpcCallback = new BlockingRpcCallback(); - DropSchemaRequest.Builder builder = DropSchemaRequest.newBuilder(); - for (Mutation m : schemaMetaData) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addSchemaMetadataMutations(mp.toByteString()); + public void run() { + try { + int numConnections = connectionsQueue.size(); + boolean wait = true; + // We keep adding items to the end of the queue. So to stop the loop, iterate only up to + // whatever the current count is. + while (numConnections > 0) { + if (wait) { + // wait for some random duration to prevent all threads from renewing lease at + // the same time. + waitForRandomDuration(); + wait = false; + } + // It is guaranteed that this poll won't hang indefinitely because this is the + // only thread that removes items from the queue. Still adding a 1 ms timeout + // for sanity check. + WeakReference connRef = + connectionsQueue.poll(1, TimeUnit.MILLISECONDS); + if (connRef == null) { + throw new InternalRenewLeaseTaskException( + "Connection ref found to be null. This is a bug. Some other thread removed items from the connection queue."); + } + PhoenixConnection conn = connRef.get(); + if (conn != null && !conn.isClosed()) { + LinkedBlockingQueue> scannerQueue = + conn.getScanners(); + // We keep adding items to the end of the queue. So to stop the loop, + // iterate only up to whatever the current count is. + int numScanners = scannerQueue.size(); + int renewed = 0; + long start = EnvironmentEdgeManager.currentTimeMillis(); + while (numScanners > 0) { + // It is guaranteed that this poll won't hang indefinitely because this is the + // only thread that removes items from the queue. Still adding a 1 ms timeout + // for sanity check. + WeakReference ref = scannerQueue.poll(1, TimeUnit.MILLISECONDS); + if (ref == null) { + throw new InternalRenewLeaseTaskException( + "TableResulIterator ref found to be null. This is a bug. Some other thread removed items from the scanner queue."); + } + TableResultIterator scanningItr = ref.get(); + if (scanningItr != null) { + RenewLeaseStatus status = scanningItr.renewLease(); + switch (status) { + case RENEWED: + renewed++; + // add it back at the tail + scannerQueue.offer(new WeakReference(scanningItr)); + LOGGER.info("Lease renewed for scanner: " + scanningItr); + break; + // Scanner not initialized probably because next() hasn't been called on it yet. + // Enqueue it back to attempt lease renewal later. + case UNINITIALIZED: + // Threshold not yet reached. Re-enqueue to renew lease later. + case THRESHOLD_NOT_REACHED: + // Another scanner operation in progress. Re-enqueue to attempt renewing lease + // later. + case LOCK_NOT_ACQUIRED: + // add it back at the tail + scannerQueue.offer(new WeakReference(scanningItr)); + break; + // if lease wasn't renewed or scanner was closed, don't add the + // scanner back to the queue. + case CLOSED: + case NOT_SUPPORTED: + break; } - builder.setSchemaName(schemaName); - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, - PHOENIX_PATCH_NUMBER)); - instance.dropSchema(controller, builder.build(), rpcCallback); - checkForRemoteExceptions(controller); - return rpcCallback.get(); - } - }); - - final MutationCode code = result.getMutationCode(); - switch (code) { - case SCHEMA_ALREADY_EXISTS: - ReadOnlyProps props = this.getProps(); - boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); - if (dropMetadata) { - ensureNamespaceDropped(schemaName); - } - break; - default: - break; - } - return result; - } - - private void ensureNamespaceDropped(String schemaName) throws SQLException { - SQLException sqlE = null; - try (Admin admin = getAdmin()) { - final String quorum = ZKConfig.getZKQuorumServersString(config); - final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT); - LOGGER.debug("Found quorum: " + quorum + ":" + znode); - if (ClientUtil.isHBaseNamespaceAvailable(admin, schemaName)) { - admin.deleteNamespace(schemaName); - } - } catch (IOException e) { - sqlE = ClientUtil.parseServerException(e); - } finally { - if (sqlE != null) { throw sqlE; } - } - } - - /** - * Manually adds {@link GuidePostsInfo} for a table to the client-side cache. Not a - * {@link ConnectionQueryServices} method. Exposed for testing purposes. - * - * @param key Table name - * @param info Stats instance - */ - public void addTableStats(GuidePostsKey key, GuidePostsInfo info) { - this.tableStatsCache.put(Objects.requireNonNull(key), Objects.requireNonNull(info)); - } - - @Override - public void invalidateStats(GuidePostsKey key) { - this.tableStatsCache.invalidate(Objects.requireNonNull(key)); - } - - @Override - public boolean isUpgradeRequired() { - return upgradeRequired.get(); - } - - @Override - public void clearUpgradeRequired() { - upgradeRequired.set(false); - } - - @Override - public Configuration getConfiguration() { - return config; - } - - @Override - public QueryLoggerDisruptor getQueryDisruptor() { - return this.queryDisruptor; - } - - @Override - public synchronized PhoenixTransactionClient initTransactionClient(Provider provider) throws SQLException { - PhoenixTransactionClient client = txClients[provider.ordinal()]; - if (client == null) { - client = txClients[provider.ordinal()] = provider.getTransactionProvider().getTransactionClient(config, connectionInfo); - } - return client; - } - - @VisibleForTesting - public List>> getCachedConnections() { - return connectionQueues; - } - - /** - * Invalidate metadata cache from all region servers for the given list of - * InvalidateServerMetadataCacheRequest. - * @throws Throwable - */ - public void invalidateServerMetadataCache(List requests) - throws Throwable { - boolean invalidateCacheEnabled = - config.getBoolean(PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED, - DEFAULT_PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED); - if (!invalidateCacheEnabled) { - LOGGER.info("Skip invalidating server metadata cache since conf property" - + " phoenix.metadata.invalidate.cache.enabled is set to false"); - return; - } - if (!QueryUtil.isServerConnection(props)) { - LOGGER.warn(INVALIDATE_SERVER_METADATA_CACHE_EX_MESSAGE); - throw new Exception(INVALIDATE_SERVER_METADATA_CACHE_EX_MESSAGE); - } - - metricsMetadataCachingSource.incrementMetadataCacheInvalidationOperationsCount(); - Admin admin = getInvalidateMetadataCacheConnection().getAdmin(); - // This will incur an extra RPC to the master. This RPC is required since we want to - // get current list of regionservers. - Collection serverNames = admin.getRegionServers(true); - PhoenixStopWatch stopWatch = new PhoenixStopWatch().start(); - try { - invalidateServerMetadataCacheWithRetries(admin, serverNames, requests, false); - metricsMetadataCachingSource.incrementMetadataCacheInvalidationSuccessCount(); - } catch (Throwable t) { - metricsMetadataCachingSource.incrementMetadataCacheInvalidationFailureCount(); - throw t; - } finally { - metricsMetadataCachingSource - .addMetadataCacheInvalidationTotalTime(stopWatch.stop().elapsedMillis()); - } - } - - /** - * Invalidate metadata cache on all regionservers with retries for the given list of - * InvalidateServerMetadataCacheRequest. Each InvalidateServerMetadataCacheRequest contains - * tenantID, schema name and table name. - * We retry once before failing the operation. - * - * @param admin - * @param serverNames - * @param invalidateCacheRequests - * @param isRetry - * @throws Throwable + } + numScanners--; + } + if (renewed > 0) { + LOGGER.info("Renewed leases for " + renewed + " scanner/s in " + + (EnvironmentEdgeManager.currentTimeMillis() - start) + " ms "); + } + connectionsQueue.offer(connRef); + } + numConnections--; + } + } catch (InternalRenewLeaseTaskException e) { + LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e); + // clear up the queue since the task is about to be unscheduled. + connectionsQueue.clear(); + // throw an exception since we want the task execution to be suppressed because we just + // encountered an + // exception that happened because of a bug. + throw new RuntimeException(e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); // restore the interrupt status + LOGGER.error("Thread interrupted when renewing lease.", e); + } catch (Exception e) { + LOGGER.error("Exception thrown when renewing lease ", e); + // don't drain the queue and swallow the exception in this case since we don't want the task + // execution to be suppressed because renewing lease of a scanner failed. + } catch (Throwable e) { + LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e); + connectionsQueue.clear(); // clear up the queue since the task is about to be unscheduled. + throw new RuntimeException(e); + } + } + } + + @Override + public long getRenewLeaseThresholdMilliSeconds() { + return renewLeaseThreshold; + } + + @Override + public boolean isRenewingLeasesEnabled() { + return supportsFeature(ConnectionQueryServices.Feature.RENEW_LEASE) && renewLeaseEnabled; + } + + @Override + public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException { + /* + * Use HConnection.getRegionLocation as it uses the cache in HConnection, to get the region to + * which specified row belongs to. */ - private void invalidateServerMetadataCacheWithRetries(Admin admin, - Collection serverNames, - List invalidateCacheRequests, - boolean isRetry) throws Throwable { - RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest protoRequest = - getRequest(invalidateCacheRequests); - List> futures = new ArrayList<>(); - Map map = new HashMap<>(); - int poolSize = config.getInt( - PHOENIX_METADATA_CACHE_INVALIDATION_THREAD_POOL_SIZE, - QueryServicesOptions.DEFAULT_PHOENIX_METADATA_CACHE_INVALIDATION_THREAD_POOL_SIZE); - ThreadFactoryBuilder builder = new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("metadata-cache-invalidation-pool-%d"); - ExecutorService executor = Executors.newFixedThreadPool(poolSize, builder.build()); - for (ServerName serverName : serverNames) { - CompletableFuture future = CompletableFuture.runAsync(() -> { - try { - PhoenixStopWatch innerWatch = new PhoenixStopWatch().start(); - for (InvalidateServerMetadataCacheRequest invalidateCacheRequest - : invalidateCacheRequests) { - LOGGER.info("Sending invalidate metadata cache for {} to region server:" - + " {}", invalidateCacheRequest.toString(), serverName); - } - - RegionServerEndpointProtos.RegionServerEndpointService.BlockingInterface - service = RegionServerEndpointProtos.RegionServerEndpointService - .newBlockingStub(admin.coprocessorService(serverName)); - // The timeout for this particular request is managed by config parameter: - // hbase.rpc.timeout. Even if the future times out, this runnable can be in - // RUNNING state and will not be interrupted. - // We use the controller set in hbase connection. - service.invalidateServerMetadataCache(null, protoRequest); - long cacheInvalidationTime = innerWatch.stop().elapsedMillis(); - LOGGER.info("Invalidating metadata cache" - + " on region server: {} completed successfully and it took {} ms", - serverName, cacheInvalidationTime); - metricsMetadataCachingSource - .addMetadataCacheInvalidationRpcTime(cacheInvalidationTime); - } catch (ServiceException se) { - LOGGER.error("Invalidating metadata cache failed for regionserver {}", - serverName, se); - IOException ioe = ClientUtil.parseServiceException(se); - throw new CompletionException(ioe); - } - }, executor); - futures.add(future); - map.put(future, serverName); - } - // Here we create one master like future which tracks individual future - // for each region server. - CompletableFuture allFutures = CompletableFuture.allOf( - futures.toArray(new CompletableFuture[0])); - long metadataCacheInvalidationTimeoutMs = config.getLong( - PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS, - PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS_DEFAULT); + int retryCount = 0, maxRetryCount = 1; + while (true) { + TableName table = TableName.valueOf(tableName); + try { + return connection.getRegionLocator(table).getRegionLocation(row, false); + } catch (org.apache.hadoop.hbase.TableNotFoundException e) { + String fullName = Bytes.toString(tableName); + throw new TableNotFoundException(SchemaUtil.getSchemaNameFromFullName(fullName), + SchemaUtil.getTableNameFromFullName(fullName)); + } catch (IOException e) { + LOGGER.error( + "Exception encountered in getTableRegionLocation for " + "table: {}, retryCount: {}", + table.getNameAsString(), retryCount, e); + if (retryCount++ < maxRetryCount) { // One retry, in case split occurs while navigating + continue; + } + throw new SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_REGIONS_FAIL).setRootCause(e) + .build().buildException(); + } + } + } + + @Override + public MetaDataMutationResult createSchema(final List schemaMutations, + final String schemaName) throws SQLException { + ensureNamespaceCreated(schemaName); + Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(schemaMutations); + byte[] key = m.getRow(); + MetaDataMutationResult result = + metaDataCoprocessorExec(null, key, new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + CreateSchemaRequest.Builder builder = CreateSchemaRequest.newBuilder(); + for (Mutation m : schemaMutations) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addTableMetadataMutations(mp.toByteString()); + } + builder.setSchemaName(schemaName); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.createSchema(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + return result; + } + + @Override + public void addSchema(PSchema schema) throws SQLException { + latestMetaData.addSchema(schema); + } + + @Override + public void removeSchema(PSchema schema, long schemaTimeStamp) { + latestMetaData.removeSchema(schema, schemaTimeStamp); + } + + @Override + public MetaDataMutationResult dropSchema(final List schemaMetaData, + final String schemaName) throws SQLException { + final MetaDataMutationResult result = metaDataCoprocessorExec(null, + SchemaUtil.getSchemaKey(schemaName), new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + RpcController controller = getController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + DropSchemaRequest.Builder builder = DropSchemaRequest.newBuilder(); + for (Mutation m : schemaMetaData) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addSchemaMetadataMutations(mp.toByteString()); + } + builder.setSchemaName(schemaName); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.dropSchema(controller, builder.build(), rpcCallback); + checkForRemoteExceptions(controller); + return rpcCallback.get(); + } + }); + + final MutationCode code = result.getMutationCode(); + switch (code) { + case SCHEMA_ALREADY_EXISTS: + ReadOnlyProps props = this.getProps(); + boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); + if (dropMetadata) { + ensureNamespaceDropped(schemaName); + } + break; + default: + break; + } + return result; + } + + private void ensureNamespaceDropped(String schemaName) throws SQLException { + SQLException sqlE = null; + try (Admin admin = getAdmin()) { + final String quorum = ZKConfig.getZKQuorumServersString(config); + final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT); + LOGGER.debug("Found quorum: " + quorum + ":" + znode); + if (ClientUtil.isHBaseNamespaceAvailable(admin, schemaName)) { + admin.deleteNamespace(schemaName); + } + } catch (IOException e) { + sqlE = ClientUtil.parseServerException(e); + } finally { + if (sqlE != null) { + throw sqlE; + } + } + } + + /** + * Manually adds {@link GuidePostsInfo} for a table to the client-side cache. Not a + * {@link ConnectionQueryServices} method. Exposed for testing purposes. + * @param key Table name + * @param info Stats instance + */ + public void addTableStats(GuidePostsKey key, GuidePostsInfo info) { + this.tableStatsCache.put(Objects.requireNonNull(key), Objects.requireNonNull(info)); + } + + @Override + public void invalidateStats(GuidePostsKey key) { + this.tableStatsCache.invalidate(Objects.requireNonNull(key)); + } + + @Override + public boolean isUpgradeRequired() { + return upgradeRequired.get(); + } + + @Override + public void clearUpgradeRequired() { + upgradeRequired.set(false); + } + + @Override + public Configuration getConfiguration() { + return config; + } + + @Override + public QueryLoggerDisruptor getQueryDisruptor() { + return this.queryDisruptor; + } + + @Override + public synchronized PhoenixTransactionClient initTransactionClient(Provider provider) + throws SQLException { + PhoenixTransactionClient client = txClients[provider.ordinal()]; + if (client == null) { + client = txClients[provider.ordinal()] = + provider.getTransactionProvider().getTransactionClient(config, connectionInfo); + } + return client; + } + + @VisibleForTesting + public List>> getCachedConnections() { + return connectionQueues; + } + + /** + * Invalidate metadata cache from all region servers for the given list of + * InvalidateServerMetadataCacheRequest. + */ + public void invalidateServerMetadataCache(List requests) + throws Throwable { + boolean invalidateCacheEnabled = config.getBoolean(PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED, + DEFAULT_PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED); + if (!invalidateCacheEnabled) { + LOGGER.info("Skip invalidating server metadata cache since conf property" + + " phoenix.metadata.invalidate.cache.enabled is set to false"); + return; + } + if (!QueryUtil.isServerConnection(props)) { + LOGGER.warn(INVALIDATE_SERVER_METADATA_CACHE_EX_MESSAGE); + throw new Exception(INVALIDATE_SERVER_METADATA_CACHE_EX_MESSAGE); + } + + metricsMetadataCachingSource.incrementMetadataCacheInvalidationOperationsCount(); + Admin admin = getInvalidateMetadataCacheConnection().getAdmin(); + // This will incur an extra RPC to the master. This RPC is required since we want to + // get current list of regionservers. + Collection serverNames = admin.getRegionServers(true); + PhoenixStopWatch stopWatch = new PhoenixStopWatch().start(); + try { + invalidateServerMetadataCacheWithRetries(admin, serverNames, requests, false); + metricsMetadataCachingSource.incrementMetadataCacheInvalidationSuccessCount(); + } catch (Throwable t) { + metricsMetadataCachingSource.incrementMetadataCacheInvalidationFailureCount(); + throw t; + } finally { + metricsMetadataCachingSource + .addMetadataCacheInvalidationTotalTime(stopWatch.stop().elapsedMillis()); + } + } + + /** + * Invalidate metadata cache on all regionservers with retries for the given list of + * InvalidateServerMetadataCacheRequest. Each InvalidateServerMetadataCacheRequest contains + * tenantID, schema name and table name. We retry once before failing the operation. + */ + private void invalidateServerMetadataCacheWithRetries(Admin admin, + Collection serverNames, + List invalidateCacheRequests, boolean isRetry) + throws Throwable { + RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest protoRequest = + getRequest(invalidateCacheRequests); + List> futures = new ArrayList<>(); + Map map = new HashMap<>(); + int poolSize = config.getInt(PHOENIX_METADATA_CACHE_INVALIDATION_THREAD_POOL_SIZE, + QueryServicesOptions.DEFAULT_PHOENIX_METADATA_CACHE_INVALIDATION_THREAD_POOL_SIZE); + ThreadFactoryBuilder builder = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("metadata-cache-invalidation-pool-%d"); + ExecutorService executor = Executors.newFixedThreadPool(poolSize, builder.build()); + for (ServerName serverName : serverNames) { + CompletableFuture future = CompletableFuture.runAsync(() -> { try { - allFutures.get(metadataCacheInvalidationTimeoutMs, TimeUnit.MILLISECONDS); - } catch (Throwable t) { - List failedServers = getFailedServers(futures, map); - LOGGER.error("Invalidating metadata cache for failed for region servers: {}", - failedServers, t); - if (isRetry) { - // If this is a retry attempt then just fail the operation. - if (allFutures.isCompletedExceptionally()) { - if (t instanceof ExecutionException) { - t = t.getCause(); - } - } - throw t; - } else { - // This is the first attempt, we can retry once. - // Indicate that this is a retry attempt. - invalidateServerMetadataCacheWithRetries(admin, failedServers, - invalidateCacheRequests, true); - } - } finally { - executor.shutdown(); - } - } - - /** - * Get the list of regionservers that failed the invalidateCache rpc. - * @param futures futtures - * @param map map of future to server names - * @return the list of servers that failed the invalidateCache RPC. - */ - private List getFailedServers(List> futures, - Map map) { - List failedServers = new ArrayList<>(); - for (CompletableFuture completedFuture : futures) { - if (!completedFuture.isDone()) { - // If this task is still running, cancel it and keep in retry list. - ServerName sn = map.get(completedFuture); - failedServers.add(sn); - // Even though we cancel this future but it doesn't interrupt the executing thread. - completedFuture.cancel(true); - } else if (completedFuture.isCompletedExceptionally() - || completedFuture.isCancelled()) { - // This means task is done but completed with exception - // or was canceled. Add it to retry list. - ServerName sn = map.get(completedFuture); - failedServers.add(sn); - } - } - return failedServers; - } - - private RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest getRequest( - List requests) { - RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest.Builder builder = - RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest.newBuilder(); - for (InvalidateServerMetadataCacheRequest request: requests) { - RegionServerEndpointProtos.InvalidateServerMetadataCache.Builder innerBuilder - = RegionServerEndpointProtos.InvalidateServerMetadataCache.newBuilder(); - innerBuilder.setTenantId(ByteStringer.wrap(request.getTenantId())); - innerBuilder.setSchemaName(ByteStringer.wrap(request.getSchemaName())); - innerBuilder.setTableName(ByteStringer.wrap(request.getTableName())); - builder.addInvalidateServerMetadataCacheRequests(innerBuilder.build()); - } - return builder.build(); - } + PhoenixStopWatch innerWatch = new PhoenixStopWatch().start(); + for (InvalidateServerMetadataCacheRequest invalidateCacheRequest : invalidateCacheRequests) { + LOGGER.info("Sending invalidate metadata cache for {} to region server:" + " {}", + invalidateCacheRequest.toString(), serverName); + } + + RegionServerEndpointProtos.RegionServerEndpointService.BlockingInterface service = + RegionServerEndpointProtos.RegionServerEndpointService + .newBlockingStub(admin.coprocessorService(serverName)); + // The timeout for this particular request is managed by config parameter: + // hbase.rpc.timeout. Even if the future times out, this runnable can be in + // RUNNING state and will not be interrupted. + // We use the controller set in hbase connection. + service.invalidateServerMetadataCache(null, protoRequest); + long cacheInvalidationTime = innerWatch.stop().elapsedMillis(); + LOGGER.info( + "Invalidating metadata cache" + + " on region server: {} completed successfully and it took {} ms", + serverName, cacheInvalidationTime); + metricsMetadataCachingSource.addMetadataCacheInvalidationRpcTime(cacheInvalidationTime); + } catch (ServiceException se) { + LOGGER.error("Invalidating metadata cache failed for regionserver {}", serverName, se); + IOException ioe = ClientUtil.parseServiceException(se); + throw new CompletionException(ioe); + } + }, executor); + futures.add(future); + map.put(future, serverName); + } + // Here we create one master like future which tracks individual future + // for each region server. + CompletableFuture allFutures = + CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + long metadataCacheInvalidationTimeoutMs = + config.getLong(PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS, + PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS_DEFAULT); + try { + allFutures.get(metadataCacheInvalidationTimeoutMs, TimeUnit.MILLISECONDS); + } catch (Throwable t) { + List failedServers = getFailedServers(futures, map); + LOGGER.error("Invalidating metadata cache for failed for region servers: {}", failedServers, + t); + if (isRetry) { + // If this is a retry attempt then just fail the operation. + if (allFutures.isCompletedExceptionally()) { + if (t instanceof ExecutionException) { + t = t.getCause(); + } + } + throw t; + } else { + // This is the first attempt, we can retry once. + // Indicate that this is a retry attempt. + invalidateServerMetadataCacheWithRetries(admin, failedServers, invalidateCacheRequests, + true); + } + } finally { + executor.shutdown(); + } + } + + /** + * Get the list of regionservers that failed the invalidateCache rpc. + * @param futures futtures + * @param map map of future to server names + * @return the list of servers that failed the invalidateCache RPC. + */ + private List getFailedServers(List> futures, + Map map) { + List failedServers = new ArrayList<>(); + for (CompletableFuture completedFuture : futures) { + if (!completedFuture.isDone()) { + // If this task is still running, cancel it and keep in retry list. + ServerName sn = map.get(completedFuture); + failedServers.add(sn); + // Even though we cancel this future but it doesn't interrupt the executing thread. + completedFuture.cancel(true); + } else if (completedFuture.isCompletedExceptionally() || completedFuture.isCancelled()) { + // This means task is done but completed with exception + // or was canceled. Add it to retry list. + ServerName sn = map.get(completedFuture); + failedServers.add(sn); + } + } + return failedServers; + } + + private RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest + getRequest(List requests) { + RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest.Builder builder = + RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest.newBuilder(); + for (InvalidateServerMetadataCacheRequest request : requests) { + RegionServerEndpointProtos.InvalidateServerMetadataCache.Builder innerBuilder = + RegionServerEndpointProtos.InvalidateServerMetadataCache.newBuilder(); + innerBuilder.setTenantId(ByteStringer.wrap(request.getTenantId())); + innerBuilder.setSchemaName(ByteStringer.wrap(request.getSchemaName())); + innerBuilder.setTableName(ByteStringer.wrap(request.getTableName())); + builder.addInvalidateServerMetadataCacheRequests(innerBuilder.build()); + } + return builder.build(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java index 6b39eeb50b9..1a6abe34209 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -86,6 +86,8 @@ import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.schema.stats.GuidePostsInfo; import org.apache.phoenix.schema.stats.GuidePostsKey; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.transaction.PhoenixTransactionClient; import org.apache.phoenix.transaction.TransactionFactory.Provider; import org.apache.phoenix.util.ConfigUtil; @@ -98,772 +100,787 @@ import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.SequenceUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; - /** - * - * Implementation of ConnectionQueryServices used in testing where no connection to - * an hbase cluster is necessary. - * - * + * Implementation of ConnectionQueryServices used in testing where no connection to an hbase cluster + * is necessary. * @since 0.1 */ -public class ConnectionlessQueryServicesImpl extends DelegateQueryServices implements ConnectionQueryServices { - private static ServerName SERVER_NAME = ServerName.parseServerName(HConstants.LOCALHOST + Addressing.HOSTNAME_PORT_SEPARATOR + HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); - private static final GuidePostsCacheProvider - GUIDE_POSTS_CACHE_PROVIDER = new GuidePostsCacheProvider(); - private final ReadOnlyProps props; - private PMetaData metaData; - private final Map sequenceMap = Maps.newHashMap(); - private final String userName; - private KeyValueBuilder kvBuilder; - private volatile boolean initialized; - private volatile SQLException initializationException; - private final Map> tableSplits = Maps.newHashMap(); - private final GuidePostsCacheWrapper guidePostsCache; - private final Configuration config; - - private User user; - - public ConnectionlessQueryServicesImpl(QueryServices services, ConnectionInfo connInfo, Properties info) { - super(services); - userName = connInfo.getPrincipal(); - user = connInfo.getUser(); - metaData = newEmptyMetaData(); - - // Use KeyValueBuilder that builds real KeyValues, as our test utils require this - this.kvBuilder = GenericKeyValueBuilder.INSTANCE; - Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - for (Entry entry : services.getProps()) { - config.set(entry.getKey(), entry.getValue()); - } - if (info != null) { - for (Object key : info.keySet()) { - config.set((String) key, info.getProperty((String) key)); - } - } - for (Entry entry : connInfo.asProps()) { - config.set(entry.getKey(), entry.getValue()); - } - - // Without making a copy of the configuration we cons up, we lose some of our properties - // on the server side during testing. - this.config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(config); - - // set replication required parameter - ConfigUtil.setReplicationConfigIfAbsent(this.config); - this.props = new ReadOnlyProps(this.config.iterator()); - - this.guidePostsCache = GUIDE_POSTS_CACHE_PROVIDER.getGuidePostsCache(props.get(GUIDE_POSTS_CACHE_FACTORY_CLASS, - QueryServicesOptions.DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS), null, config); - } - - private PMetaData newEmptyMetaData() { - long updateCacheFrequency = (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue( - getProps().get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); - // We cannot have zero update cache frequency for connectionless query services as we need to keep the - // metadata in the cache (i.e., in memory) - if (updateCacheFrequency == 0) { - updateCacheFrequency = Long.MAX_VALUE; - } - return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, updateCacheFrequency, getProps()); - } - - protected String getSystemCatalogTableDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_TABLE_METADATA); - } - - protected String getSystemSequenceTableDDL(int nSaltBuckets) { - String schema = String.format(setSystemDDLProperties(QueryConstants.CREATE_SEQUENCE_METADATA)); - return Sequence.getCreateTableStatement(schema, nSaltBuckets); - } - - protected String getFunctionTableDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_FUNCTION_METADATA); - } - - protected String getLogTableDDL() { - return setSystemLogDDLProperties(QueryConstants.CREATE_LOG_METADATA); - } - - private String setSystemLogDDLProperties(String ddl) { - return String.format(ddl, props.getInt(LOG_SALT_BUCKETS_ATTRIB, QueryServicesOptions.DEFAULT_LOG_SALT_BUCKETS)); - - } - - protected String getChildLinkDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_CHILD_LINK_METADATA); - } - - protected String getMutexDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADATA); - } - - protected String getTaskDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_TASK_METADATA); - } - - protected String getTransformDDL() { - return setSystemDDLProperties(QueryConstants.CREATE_TRANSFORM_METADATA); - } - - private String setSystemDDLProperties(String ddl) { - return String.format(ddl, - props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB, QueryServicesOptions.DEFAULT_SYSTEM_MAX_VERSIONS), - props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB, QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)); - } - - @Override - public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable childId) { - return this; // Just reuse the same query services - } - - @Override - public Table getTable(byte[] tableName) throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public Table getTableIfExists(byte[] tableName) { - throw new UnsupportedOperationException(); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getAllTableRegions(byte[] tableName) throws SQLException { - return getTableRegions(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, - QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getAllTableRegions(byte[] tableName, int queryTimeout) - throws SQLException { - return getTableRegions(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, - queryTimeout); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getTableRegions(byte[] tableName, byte[] startRowKey, - byte[] endRowKey) throws SQLException { - return getTableRegions(tableName, startRowKey, endRowKey, - QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getTableRegions(byte[] tableName, byte[] startRowKey, - byte[] endRowKey, int queryTimeout) - throws SQLException { - List regions = tableSplits.get(Bytes.toString(tableName)); - if (regions != null) { - return regions; +public class ConnectionlessQueryServicesImpl extends DelegateQueryServices + implements ConnectionQueryServices { + private static ServerName SERVER_NAME = ServerName.parseServerName(HConstants.LOCALHOST + + Addressing.HOSTNAME_PORT_SEPARATOR + HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); + private static final GuidePostsCacheProvider GUIDE_POSTS_CACHE_PROVIDER = + new GuidePostsCacheProvider(); + private final ReadOnlyProps props; + private PMetaData metaData; + private final Map sequenceMap = Maps.newHashMap(); + private final String userName; + private KeyValueBuilder kvBuilder; + private volatile boolean initialized; + private volatile SQLException initializationException; + private final Map> tableSplits = Maps.newHashMap(); + private final GuidePostsCacheWrapper guidePostsCache; + private final Configuration config; + + private User user; + + public ConnectionlessQueryServicesImpl(QueryServices services, ConnectionInfo connInfo, + Properties info) { + super(services); + userName = connInfo.getPrincipal(); + user = connInfo.getUser(); + metaData = newEmptyMetaData(); + + // Use KeyValueBuilder that builds real KeyValues, as our test utils require this + this.kvBuilder = GenericKeyValueBuilder.INSTANCE; + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + for (Entry entry : services.getProps()) { + config.set(entry.getKey(), entry.getValue()); + } + if (info != null) { + for (Object key : info.keySet()) { + config.set((String) key, info.getProperty((String) key)); + } + } + for (Entry entry : connInfo.asProps()) { + config.set(entry.getKey(), entry.getValue()); + } + + // Without making a copy of the configuration we cons up, we lose some of our properties + // on the server side during testing. + this.config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(config); + + // set replication required parameter + ConfigUtil.setReplicationConfigIfAbsent(this.config); + this.props = new ReadOnlyProps(this.config.iterator()); + + this.guidePostsCache = + GUIDE_POSTS_CACHE_PROVIDER.getGuidePostsCache(props.get(GUIDE_POSTS_CACHE_FACTORY_CLASS, + QueryServicesOptions.DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS), null, config); + } + + private PMetaData newEmptyMetaData() { + long updateCacheFrequency = (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY + .getValue(getProps().get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); + // We cannot have zero update cache frequency for connectionless query services as we need to + // keep the + // metadata in the cache (i.e., in memory) + if (updateCacheFrequency == 0) { + updateCacheFrequency = Long.MAX_VALUE; + } + return new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, updateCacheFrequency, getProps()); + } + + protected String getSystemCatalogTableDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_TABLE_METADATA); + } + + protected String getSystemSequenceTableDDL(int nSaltBuckets) { + String schema = String.format(setSystemDDLProperties(QueryConstants.CREATE_SEQUENCE_METADATA)); + return Sequence.getCreateTableStatement(schema, nSaltBuckets); + } + + protected String getFunctionTableDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_FUNCTION_METADATA); + } + + protected String getLogTableDDL() { + return setSystemLogDDLProperties(QueryConstants.CREATE_LOG_METADATA); + } + + private String setSystemLogDDLProperties(String ddl) { + return String.format(ddl, + props.getInt(LOG_SALT_BUCKETS_ATTRIB, QueryServicesOptions.DEFAULT_LOG_SALT_BUCKETS)); + + } + + protected String getChildLinkDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_CHILD_LINK_METADATA); + } + + protected String getMutexDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_MUTEX_METADATA); + } + + protected String getTaskDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_TASK_METADATA); + } + + protected String getTransformDDL() { + return setSystemDDLProperties(QueryConstants.CREATE_TRANSFORM_METADATA); + } + + private String setSystemDDLProperties(String ddl) { + return String.format(ddl, + props.getInt(DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB, + QueryServicesOptions.DEFAULT_SYSTEM_MAX_VERSIONS), + props.getBoolean(DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB, + QueryServicesOptions.DEFAULT_SYSTEM_KEEP_DELETED_CELLS)); + } + + @Override + public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable childId) { + return this; // Just reuse the same query services + } + + @Override + public Table getTable(byte[] tableName) throws SQLException { + throw new UnsupportedOperationException(); + } + + @Override + public Table getTableIfExists(byte[] tableName) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getAllTableRegions(byte[] tableName) throws SQLException { + return getTableRegions(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getAllTableRegions(byte[] tableName, int queryTimeout) + throws SQLException { + return getTableRegions(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + queryTimeout); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getTableRegions(byte[] tableName, byte[] startRowKey, + byte[] endRowKey) throws SQLException { + return getTableRegions(tableName, startRowKey, endRowKey, + QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getTableRegions(byte[] tableName, byte[] startRowKey, + byte[] endRowKey, int queryTimeout) throws SQLException { + List regions = tableSplits.get(Bytes.toString(tableName)); + if (regions != null) { + return regions; + } + RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)) + .setStartKey(startRowKey).setStartKey(endRowKey).build(); + return Collections.singletonList(new HRegionLocation(hri, SERVER_NAME, -1)); + } + + @Override + public void addTable(PTable table, long resolvedTime) throws SQLException { + metaData.addTable(table, resolvedTime); + } + + @Override + public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException { + metaData.updateResolvedTimestamp(table, resolvedTimestamp); + } + + @Override + public void removeTable(PName tenantId, String tableName, String parentTableName, + long tableTimeStamp) throws SQLException { + metaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp); + } + + @Override + public void removeColumn(PName tenantId, String tableName, List columnsToRemove, + long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException { + metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, + resolvedTime); + } + + @Override + public PhoenixConnection connect(String url, Properties info) throws SQLException { + return new PhoenixConnection(this, url, info); + } + + @Override + public MetaDataMutationResult getTable(PName tenantId, byte[] schemaBytes, byte[] tableBytes, + long tableTimestamp, long clientTimestamp) throws SQLException { + // Return result that will cause client to use it's own metadata instead of needing + // to get anything from the server (since we don't have a connection) + try { + String fullTableName = SchemaUtil.getTableName(schemaBytes, tableBytes); + PTable table = metaData.getTableRef(new PTableKey(tenantId, fullTableName)).getTable(); + return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, table); + } catch (TableNotFoundException e) { + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null); + } + } + + private static byte[] getTableName(List tableMetaData, byte[] physicalTableName) { + if (physicalTableName != null) { + return physicalTableName; + } + byte[][] rowKeyMetadata = new byte[3][]; + Mutation m = MetaDataUtil.getTableHeaderRow(tableMetaData); + byte[] key = m.getRow(); + SchemaUtil.getVarChars(key, rowKeyMetadata); + byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + return SchemaUtil.getTableNameAsBytes(schemaBytes, tableBytes); + } + + private static List generateRegionLocations(byte[] physicalName, + byte[][] splits) { + byte[] startKey = HConstants.EMPTY_START_ROW; + List regions = Lists.newArrayListWithExpectedSize(splits.length); + for (byte[] split : splits) { + regions.add(new HRegionLocation(RegionInfoBuilder.newBuilder(TableName.valueOf(physicalName)) + .setStartKey(startKey).setEndKey(split).build(), SERVER_NAME, -1)); + startKey = split; + } + regions.add(new HRegionLocation(RegionInfoBuilder.newBuilder(TableName.valueOf(physicalName)) + .setStartKey(startKey).setEndKey(HConstants.EMPTY_END_ROW).build(), SERVER_NAME, -1)); + return regions; + } + + @Override + public MetaDataMutationResult createTable(List tableMetaData, byte[] physicalName, + PTableType tableType, Map tableProps, + List>> families, byte[][] splits, boolean isNamespaceMapped, + boolean allocateIndexId, boolean isDoNotUpgradePropSet, PTable parentTable) + throws SQLException { + if ( + tableType == PTableType.INDEX + && IndexUtil.isLocalIndexFamily(Bytes.toString(families.iterator().next().getFirst())) + ) { + Object dataTableName = tableProps.get(PhoenixDatabaseMetaData.DATA_TABLE_NAME); + List regionLocations = tableSplits.get(dataTableName); + byte[] tableName = getTableName(tableMetaData, physicalName); + tableSplits.put(Bytes.toString(tableName), regionLocations); + } else if (splits != null) { + byte[] tableName = getTableName(tableMetaData, physicalName); + tableSplits.put(Bytes.toString(tableName), generateRegionLocations(tableName, splits)); + } + if (!allocateIndexId) { + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null); + } else { + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null, Long.MIN_VALUE, + MetaDataUtil.getViewIndexIdDataType()); + } + } + + @Override + public MetaDataMutationResult dropTable(List tableMetadata, PTableType tableType, + boolean cascade) throws SQLException { + byte[] tableName = getTableName(tableMetadata, null); + tableSplits.remove(Bytes.toString(tableName)); + return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, null); + } + + @Override + public MetaDataMutationResult addColumn(List tableMetaData, PTable table, + PTable parentTable, PTable transformingNewTable, + Map>> properties, Set colFamiliesForPColumnsToBeAdded, + List columnsToBeAdded) throws SQLException { + List columns = Lists.newArrayList(table.getColumns()); + columns.addAll(columnsToBeAdded); + return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, + PTableImpl.builderWithColumns(table, columns).build()); + } + + @Override + public MetaDataMutationResult dropColumn(List tableMetadata, PTableType tableType, + PTable parentTable) throws SQLException { + return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, null); + } + + @Override + public void clearTableFromCache(byte[] tenantId, byte[] schemaName, byte[] tableName, + long clientTS) throws SQLException { + } + + // TODO: share this with ConnectionQueryServicesImpl + @Override + public void init(String url, Properties props) throws SQLException { + if (initialized) { + if (initializationException != null) { + throw initializationException; + } + return; + } + synchronized (this) { + if (initialized) { + if (initializationException != null) { + throw initializationException; } - RegionInfo hri = - RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)) - .setStartKey(startRowKey) - .setStartKey(endRowKey) - .build(); - return Collections.singletonList(new HRegionLocation(hri, SERVER_NAME, -1)); - } - - @Override - public void addTable(PTable table, long resolvedTime) throws SQLException { - metaData.addTable(table, resolvedTime); - } - - @Override - public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException { - metaData.updateResolvedTimestamp(table, resolvedTimestamp); - } - - @Override - public void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) - throws SQLException { - metaData.removeTable(tenantId, tableName, parentTableName, tableTimeStamp); - } - - @Override - public void removeColumn(PName tenantId, String tableName, List columnsToRemove, long tableTimeStamp, - long tableSeqNum, long resolvedTime) throws SQLException { - metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime); - } - - - @Override - public PhoenixConnection connect(String url, Properties info) throws SQLException { - return new PhoenixConnection(this, url, info); - } - - @Override - public MetaDataMutationResult getTable(PName tenantId, byte[] schemaBytes, byte[] tableBytes, long tableTimestamp, long clientTimestamp) throws SQLException { - // Return result that will cause client to use it's own metadata instead of needing - // to get anything from the server (since we don't have a connection) + return; + } + SQLException sqlE = null; + PhoenixConnection metaConnection = null; + try { + Properties scnProps = PropertiesUtil.deepCopy(props); + scnProps.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, + Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP)); + scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB); + String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB); + metaConnection = new PhoenixConnection(this, globalUrl, scnProps); + metaConnection.setRunningUpgrade(true); try { - String fullTableName = SchemaUtil.getTableName(schemaBytes, tableBytes); - PTable table = metaData.getTableRef(new PTableKey(tenantId, fullTableName)).getTable(); - return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, table); - } catch (TableNotFoundException e) { - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null); - } - } - - private static byte[] getTableName(List tableMetaData, byte[] physicalTableName) { - if (physicalTableName != null) { - return physicalTableName; - } - byte[][] rowKeyMetadata = new byte[3][]; - Mutation m = MetaDataUtil.getTableHeaderRow(tableMetaData); - byte[] key = m.getRow(); - SchemaUtil.getVarChars(key, rowKeyMetadata); - byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - return SchemaUtil.getTableNameAsBytes(schemaBytes, tableBytes); - } - - private static List generateRegionLocations(byte[] physicalName, byte[][] splits) { - byte[] startKey = HConstants.EMPTY_START_ROW; - List regions = Lists.newArrayListWithExpectedSize(splits.length); - for (byte[] split : splits) { - regions.add(new HRegionLocation(RegionInfoBuilder - .newBuilder(TableName.valueOf(physicalName)).setStartKey(startKey) - .setEndKey(split).build(), SERVER_NAME, -1)); - startKey = split; - } - regions.add(new HRegionLocation(RegionInfoBuilder - .newBuilder(TableName.valueOf(physicalName)).setStartKey(startKey) - .setEndKey(HConstants.EMPTY_END_ROW).build(), SERVER_NAME, -1)); - return regions; - } - - @Override - public MetaDataMutationResult createTable(List tableMetaData, byte[] physicalName, PTableType tableType, - Map tableProps, List>> families, byte[][] splits, - boolean isNamespaceMapped, boolean allocateIndexId, boolean isDoNotUpgradePropSet, PTable parentTable) throws SQLException { - if (tableType == PTableType.INDEX && IndexUtil.isLocalIndexFamily(Bytes.toString(families.iterator().next().getFirst()))) { - Object dataTableName = tableProps.get(PhoenixDatabaseMetaData.DATA_TABLE_NAME); - List regionLocations = tableSplits.get(dataTableName); - byte[] tableName = getTableName(tableMetaData, physicalName); - tableSplits.put(Bytes.toString(tableName), regionLocations); - } else if (splits != null) { - byte[] tableName = getTableName(tableMetaData, physicalName); - tableSplits.put(Bytes.toString(tableName), generateRegionLocations(tableName, splits)); - } - if (!allocateIndexId) { - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null); - } else { - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null, Long.MIN_VALUE, MetaDataUtil.getViewIndexIdDataType()); - } - } - - @Override - public MetaDataMutationResult dropTable(List tableMetadata, PTableType tableType, boolean cascade) throws SQLException { - byte[] tableName = getTableName(tableMetadata, null); - tableSplits.remove(Bytes.toString(tableName)); - return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, null); - } - - @Override - public MetaDataMutationResult addColumn(List tableMetaData, - PTable table, - PTable parentTable, - PTable transformingNewTable, - Map>> properties, - Set colFamiliesForPColumnsToBeAdded, - List columnsToBeAdded) throws SQLException { - List columns = Lists.newArrayList(table.getColumns()); - columns.addAll(columnsToBeAdded); - return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, - PTableImpl.builderWithColumns(table, columns).build()); - } - - @Override - public MetaDataMutationResult dropColumn(List tableMetadata, - PTableType tableType, - PTable parentTable) throws SQLException { - return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, null); - } - - @Override - public void clearTableFromCache(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS) - throws SQLException {} - // TODO: share this with ConnectionQueryServicesImpl - @Override - public void init(String url, Properties props) throws SQLException { - if (initialized) { - if (initializationException != null) { - throw initializationException; - } - return; - } - synchronized (this) { - if (initialized) { - if (initializationException != null) { - throw initializationException; - } - return; - } - SQLException sqlE = null; - PhoenixConnection metaConnection = null; - try { - Properties scnProps = PropertiesUtil.deepCopy(props); - scnProps.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP)); - scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB); - String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB); - metaConnection = new PhoenixConnection(this, globalUrl, scnProps); - metaConnection.setRunningUpgrade(true); - try { - metaConnection.createStatement().executeUpdate(getSystemCatalogTableDDL()); - } catch (TableAlreadyExistsException ignore) { - // Ignore, as this will happen if the SYSTEM.TABLE already exists at this fixed timestamp. - // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp. - } - try { - int nSaltBuckets = getSequenceSaltBuckets(); - String createTableStatement = getSystemSequenceTableDDL(nSaltBuckets); - metaConnection.createStatement().executeUpdate(createTableStatement); - } catch (NewerTableAlreadyExistsException ignore) { - // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed timestamp. - // A TableAlreadyExistsException is not thrown, since the table only exists *after* this fixed timestamp. - } - try { - metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_STATS_TABLE_METADATA); - } catch (NewerTableAlreadyExistsException ignore) { - // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed - // timestamp. - // A TableAlreadyExistsException is not thrown, since the table only exists *after* this - // fixed timestamp. - } - - try { - metaConnection.createStatement().executeUpdate(getFunctionTableDDL()); - } catch (NewerTableAlreadyExistsException ignore) { - } - try { - metaConnection.createStatement().executeUpdate(getLogTableDDL()); - } catch (NewerTableAlreadyExistsException ignore) {} - try { - metaConnection.createStatement() - .executeUpdate(getChildLinkDDL()); - } catch (NewerTableAlreadyExistsException ignore) { - } - try { - metaConnection.createStatement() - .executeUpdate(getMutexDDL()); - } catch (NewerTableAlreadyExistsException ignore) { - } - try { - metaConnection.createStatement() - .executeUpdate(getTaskDDL()); - } catch (NewerTableAlreadyExistsException ignore) { - } - try { - metaConnection.createStatement() - .executeUpdate(getTransformDDL()); - } catch (NewerTableAlreadyExistsException ignore) { - } - } catch (SQLException e) { - sqlE = e; - } finally { - try { - if (metaConnection != null) metaConnection.close(); - } catch (SQLException e) { - if (sqlE != null) { - sqlE.setNextException(e); - } else { - sqlE = e; - } - } finally { - try { - if (sqlE != null) { - initializationException = sqlE; - throw sqlE; - } - } finally { - initialized = true; - } - } - } + metaConnection.createStatement().executeUpdate(getSystemCatalogTableDDL()); + } catch (TableAlreadyExistsException ignore) { + // Ignore, as this will happen if the SYSTEM.TABLE already exists at this fixed timestamp. + // A TableAlreadyExistsException is not thrown, since the table only exists *after* this + // fixed timestamp. } - } - - @Override - public MutationState updateData(MutationPlan plan) throws SQLException { - return new MutationState(0, 0, plan.getContext().getConnection()); - } - - @Override - public int getLowestClusterHBaseVersion() { - return Integer.MAX_VALUE; // Allow everything for connectionless - } - - @Override - public void refreshLiveRegionServers() throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public List getLiveRegionServers() { - throw new UnsupportedOperationException(); - } - - @Override - public Admin getAdmin() throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public MetaDataMutationResult updateIndexState(List tableMetadata, String parentTableName) throws SQLException { - byte[][] rowKeyMetadata = new byte[3][]; - SchemaUtil.getVarChars(tableMetadata.get(0).getRow(), rowKeyMetadata); - Mutation m = MetaDataUtil.getTableHeaderRow(tableMetadata); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - if (!MetaDataUtil.getMutationValue(m, INDEX_STATE_BYTES, kvBuilder, ptr)) { - throw new IllegalStateException(); + try { + int nSaltBuckets = getSequenceSaltBuckets(); + String createTableStatement = getSystemSequenceTableDDL(nSaltBuckets); + metaConnection.createStatement().executeUpdate(createTableStatement); + } catch (NewerTableAlreadyExistsException ignore) { + // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed + // timestamp. + // A TableAlreadyExistsException is not thrown, since the table only exists *after* this + // fixed timestamp. } - PIndexState newState = PIndexState.fromSerializedValue(ptr.get()[ptr.getOffset()]); - byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - String schemaName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]); - String indexName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]); - String indexTableName = SchemaUtil.getTableName(schemaName, indexName); - PName tenantId = tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes); - PTable index = metaData.getTableRef(new PTableKey(tenantId, indexTableName)).getTable(); - index = PTableImpl.builderWithColumns(index, getColumnsToClone(index)) - .setState(newState == PIndexState.USABLE ? PIndexState.ACTIVE : - newState == PIndexState.UNUSABLE ? PIndexState.INACTIVE : newState) - .build(); - return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, index); - } - - @Override - public MetaDataMutationResult updateIndexState(List tableMetadata, - String parentTableName, Map>> stmtProperties, - PTable table) throws SQLException { - return updateIndexState(tableMetadata,parentTableName); - } - - @Override - public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException { - return null; - } - - @Override - public void clearTableRegionCache(TableName tableName) throws SQLException { - } - - @Override - public boolean hasIndexWALCodec() { - return true; - } - - @Override - public long createSequence(String tenantId, String schemaName, String sequenceName, - long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, - boolean cycle, long timestamp) throws SQLException { - SequenceKey key = new SequenceKey(tenantId, schemaName, sequenceName, getSequenceSaltBuckets()); - if (sequenceMap.get(key) != null) { - throw new SequenceAlreadyExistsException(schemaName, sequenceName); + try { + metaConnection.createStatement() + .executeUpdate(QueryConstants.CREATE_STATS_TABLE_METADATA); + } catch (NewerTableAlreadyExistsException ignore) { + // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed + // timestamp. + // A TableAlreadyExistsException is not thrown, since the table only exists *after* this + // fixed timestamp. } - sequenceMap.put(key, new SequenceInfo(startWith, incrementBy, minValue, maxValue, 1l, cycle)) ; - return timestamp; - } - @Override - public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) throws SQLException { - SequenceKey key = new SequenceKey(tenantId, schemaName, sequenceName, getSequenceSaltBuckets()); - if (sequenceMap.remove(key) == null) { - throw new SequenceNotFoundException(schemaName, sequenceName); + try { + metaConnection.createStatement().executeUpdate(getFunctionTableDDL()); + } catch (NewerTableAlreadyExistsException ignore) { } - return timestamp; - } - - @Override - public void validateSequences(List sequenceAllocations, long timestamp, - long[] values, SQLException[] exceptions, Sequence.ValueOp action) throws SQLException { - int i = 0; - for (SequenceAllocation sequenceAllocation : sequenceAllocations) { - SequenceInfo info = sequenceMap.get(sequenceAllocation.getSequenceKey()); - if (info == null) { - exceptions[i] = new SequenceNotFoundException(sequenceAllocation.getSequenceKey().getSchemaName(), sequenceAllocation.getSequenceKey().getSequenceName()); - } else { - values[i] = info.sequenceValue; - } - i++; + try { + metaConnection.createStatement().executeUpdate(getLogTableDDL()); + } catch (NewerTableAlreadyExistsException ignore) { } - } - - @Override - public void incrementSequences(List sequenceAllocations, long timestamp, - long[] values, SQLException[] exceptions) throws SQLException { - int i = 0; - for (SequenceAllocation sequenceAllocation : sequenceAllocations) { - SequenceKey key = sequenceAllocation.getSequenceKey(); - SequenceInfo info = sequenceMap.get(key); - if (info == null) { - exceptions[i] = new SequenceNotFoundException( - key.getSchemaName(), key.getSequenceName()); - } else { - boolean increaseSeq = info.incrementBy > 0; - if (info.limitReached) { - SQLExceptionCode code = increaseSeq ? SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE - : SQLExceptionCode.SEQUENCE_VAL_REACHED_MIN_VALUE; - exceptions[i] = new SQLExceptionInfo.Builder(code).build().buildException(); - } else { - values[i] = info.sequenceValue; - info.sequenceValue += info.incrementBy * info.cacheSize; - info.limitReached = SequenceUtil.checkIfLimitReached(info); - if (info.limitReached && info.cycle) { - info.sequenceValue = increaseSeq ? info.minValue : info.maxValue; - info.limitReached = false; - } - } - } - i++; - } - i = 0; - for (SQLException e : exceptions) { - if (e != null) { - sequenceMap.remove(sequenceAllocations.get(i).getSequenceKey()); - } - i++; + try { + metaConnection.createStatement().executeUpdate(getChildLinkDDL()); + } catch (NewerTableAlreadyExistsException ignore) { } - } - - @Override - public long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException { - SequenceInfo info = sequenceMap.get(sequenceKey); - if (info == null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE) - .setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()) - .build().buildException(); + try { + metaConnection.createStatement().executeUpdate(getMutexDDL()); + } catch (NewerTableAlreadyExistsException ignore) { } - return info.sequenceValue; - } - - @Override - public void returnSequences(List sequenceKeys, long timestamp, SQLException[] exceptions) - throws SQLException { - } - - @Override - public void addConnection(PhoenixConnection connection) throws SQLException { - } - - @Override - public void removeConnection(PhoenixConnection connection) throws SQLException { - } - - @Override - public KeyValueBuilder getKeyValueBuilder() { - return this.kvBuilder; - } - - @Override - public boolean supportsFeature(Feature feature) { - return true; - } - - @Override - public String getUserName() { - return userName; - } - - @Override - public GuidePostsInfo getTableStats(GuidePostsKey key) { - GuidePostsInfo info = null; try { - info = guidePostsCache.get(key); - } catch(ExecutionException e){ - return GuidePostsInfo.NO_GUIDEPOST; + metaConnection.createStatement().executeUpdate(getTaskDDL()); + } catch (NewerTableAlreadyExistsException ignore) { } - if (null == info) { - return GuidePostsInfo.NO_GUIDEPOST; + try { + metaConnection.createStatement().executeUpdate(getTransformDDL()); + } catch (NewerTableAlreadyExistsException ignore) { } - return info; - } - - @Override - public long clearCache() throws SQLException { - return 0; - } - - @Override - public int getSequenceSaltBuckets() { - return getProps().getInt(QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB, - QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); - } - - public MetaDataMutationResult createFunction(List functionData, PFunction function, boolean temporary) - throws SQLException { - return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, 0l, null); - } - - @Override - public void addFunction(PFunction function) throws SQLException { - this.metaData.addFunction(function); - } - - @Override - public void removeFunction(PName tenantId, String function, long functionTimeStamp) - throws SQLException { - this.metaData.removeFunction(tenantId, function, functionTimeStamp); - } - - @Override - public MetaDataMutationResult getFunctions(PName tenantId, - List> functionNameAndTimeStampPairs, long clientTimestamp) - throws SQLException { - List functions = new ArrayList(functionNameAndTimeStampPairs.size()); - for(Pair functionInfo: functionNameAndTimeStampPairs) { - try { - PFunction function2 = metaData.getFunction(new PTableKey(tenantId, Bytes.toString(functionInfo.getFirst()))); - functions.add(function2); - } catch (FunctionNotFoundException e) { - return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, 0, null); + } catch (SQLException e) { + sqlE = e; + } finally { + try { + if (metaConnection != null) metaConnection.close(); + } catch (SQLException e) { + if (sqlE != null) { + sqlE.setNextException(e); + } else { + sqlE = e; + } + } finally { + try { + if (sqlE != null) { + initializationException = sqlE; + throw sqlE; } + } finally { + initialized = true; + } } - if(functions.isEmpty()) { - return null; + } + } + } + + @Override + public MutationState updateData(MutationPlan plan) throws SQLException { + return new MutationState(0, 0, plan.getContext().getConnection()); + } + + @Override + public int getLowestClusterHBaseVersion() { + return Integer.MAX_VALUE; // Allow everything for connectionless + } + + @Override + public void refreshLiveRegionServers() throws SQLException { + throw new UnsupportedOperationException(); + } + + @Override + public List getLiveRegionServers() { + throw new UnsupportedOperationException(); + } + + @Override + public Admin getAdmin() throws SQLException { + throw new UnsupportedOperationException(); + } + + @Override + public MetaDataMutationResult updateIndexState(List tableMetadata, + String parentTableName) throws SQLException { + byte[][] rowKeyMetadata = new byte[3][]; + SchemaUtil.getVarChars(tableMetadata.get(0).getRow(), rowKeyMetadata); + Mutation m = MetaDataUtil.getTableHeaderRow(tableMetadata); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + if (!MetaDataUtil.getMutationValue(m, INDEX_STATE_BYTES, kvBuilder, ptr)) { + throw new IllegalStateException(); + } + PIndexState newState = PIndexState.fromSerializedValue(ptr.get()[ptr.getOffset()]); + byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + String schemaName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]); + String indexName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]); + String indexTableName = SchemaUtil.getTableName(schemaName, indexName); + PName tenantId = tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes); + PTable index = metaData.getTableRef(new PTableKey(tenantId, indexTableName)).getTable(); + index = PTableImpl.builderWithColumns(index, getColumnsToClone(index)) + .setState(newState == PIndexState.USABLE ? PIndexState.ACTIVE + : newState == PIndexState.UNUSABLE ? PIndexState.INACTIVE + : newState) + .build(); + return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, index); + } + + @Override + public MetaDataMutationResult updateIndexState(List tableMetadata, + String parentTableName, Map>> stmtProperties, PTable table) + throws SQLException { + return updateIndexState(tableMetadata, parentTableName); + } + + @Override + public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException { + return null; + } + + @Override + public void clearTableRegionCache(TableName tableName) throws SQLException { + } + + @Override + public boolean hasIndexWALCodec() { + return true; + } + + @Override + public long createSequence(String tenantId, String schemaName, String sequenceName, + long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, boolean cycle, + long timestamp) throws SQLException { + SequenceKey key = new SequenceKey(tenantId, schemaName, sequenceName, getSequenceSaltBuckets()); + if (sequenceMap.get(key) != null) { + throw new SequenceAlreadyExistsException(schemaName, sequenceName); + } + sequenceMap.put(key, new SequenceInfo(startWith, incrementBy, minValue, maxValue, 1l, cycle)); + return timestamp; + } + + @Override + public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) + throws SQLException { + SequenceKey key = new SequenceKey(tenantId, schemaName, sequenceName, getSequenceSaltBuckets()); + if (sequenceMap.remove(key) == null) { + throw new SequenceNotFoundException(schemaName, sequenceName); + } + return timestamp; + } + + @Override + public void validateSequences(List sequenceAllocations, long timestamp, + long[] values, SQLException[] exceptions, Sequence.ValueOp action) throws SQLException { + int i = 0; + for (SequenceAllocation sequenceAllocation : sequenceAllocations) { + SequenceInfo info = sequenceMap.get(sequenceAllocation.getSequenceKey()); + if (info == null) { + exceptions[i] = + new SequenceNotFoundException(sequenceAllocation.getSequenceKey().getSchemaName(), + sequenceAllocation.getSequenceKey().getSequenceName()); + } else { + values[i] = info.sequenceValue; + } + i++; + } + } + + @Override + public void incrementSequences(List sequenceAllocations, long timestamp, + long[] values, SQLException[] exceptions) throws SQLException { + int i = 0; + for (SequenceAllocation sequenceAllocation : sequenceAllocations) { + SequenceKey key = sequenceAllocation.getSequenceKey(); + SequenceInfo info = sequenceMap.get(key); + if (info == null) { + exceptions[i] = new SequenceNotFoundException(key.getSchemaName(), key.getSequenceName()); + } else { + boolean increaseSeq = info.incrementBy > 0; + if (info.limitReached) { + SQLExceptionCode code = increaseSeq + ? SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE + : SQLExceptionCode.SEQUENCE_VAL_REACHED_MIN_VALUE; + exceptions[i] = new SQLExceptionInfo.Builder(code).build().buildException(); + } else { + values[i] = info.sequenceValue; + info.sequenceValue += info.incrementBy * info.cacheSize; + info.limitReached = SequenceUtil.checkIfLimitReached(info); + if (info.limitReached && info.cycle) { + info.sequenceValue = increaseSeq ? info.minValue : info.maxValue; + info.limitReached = false; + } } - return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, 0, functions, true); - } - - @Override - public MetaDataMutationResult dropFunction(List tableMetadata, boolean ifExists) - throws SQLException { - return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, 0, null); - } - - @Override - public long getRenewLeaseThresholdMilliSeconds() { - return 0; - } - - @Override - public boolean isRenewingLeasesEnabled() { - return false; - } - - public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException { - List regions = tableSplits.get(Bytes.toString(tableName)); - if (regions != null) { - for (HRegionLocation region : regions) { - if (Bytes.compareTo(region.getRegion().getStartKey(), row) <= 0 - && Bytes.compareTo(region.getRegion().getEndKey(), row) > 0) { - return region; - } - } - } - return new HRegionLocation(RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW) - .build(), SERVER_NAME, -1); - } - - @Override - public MetaDataMutationResult createSchema(List schemaMutations, String schemaName) { - return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, 0l, null); - } - - @Override - public void addSchema(PSchema schema) throws SQLException { - this.metaData.addSchema(schema); - } - - @Override - public MetaDataMutationResult getSchema(String schemaName, long clientTimestamp) throws SQLException { - return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, 0, null); - } - - @Override - public void removeSchema(PSchema schema, long schemaTimeStamp) { - metaData.removeSchema(schema, schemaTimeStamp); - } - - @Override - public MetaDataMutationResult dropSchema(List schemaMetaData, String schemaName) { - return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, 0, null); - } - - /** - * Manually adds {@link GuidePostsInfo} for a table to the client-side cache. Not a - * {@link ConnectionQueryServices} method. Exposed for testing purposes. - * - * @param key - * @param info - */ - public void addTableStats(GuidePostsKey key, GuidePostsInfo info) { - this.guidePostsCache.put(Objects.requireNonNull(key), info); - } - - @Override - public void invalidateStats(GuidePostsKey key) { - this.guidePostsCache.invalidate(Objects.requireNonNull(key)); - } - - @Override - public void upgradeSystemTables(String url, Properties props) throws SQLException {} - - @Override - public boolean isUpgradeRequired() { - return false; - } - - @Override - public void clearUpgradeRequired() {} - - @Override - public Configuration getConfiguration() { - return config; - } - - @Override - public User getUser() { - return user; - } - - @Override - public QueryLoggerDisruptor getQueryDisruptor() { - return null; - } - - @Override - public PhoenixTransactionClient initTransactionClient(Provider provider) { - return null; // Client is not necessary - } - - @Override - public boolean writeMutexCell(String tenantId, String schemaName, String tableName, - String columnName, String familyName) throws SQLException { - return true; - } - - @Override - public void deleteMutexCell(String tenantId, String schemaName, String tableName, - String columnName, String familyName) throws SQLException { - } - @Override - public PMetaData getMetaDataCache() { - return metaData; - } - - @Override - public int getConnectionCount(boolean isInternal) { - return 0; - } - - @Override - public void invalidateServerMetadataCache(List requests) - throws Throwable { - // No-op - } + } + i++; + } + i = 0; + for (SQLException e : exceptions) { + if (e != null) { + sequenceMap.remove(sequenceAllocations.get(i).getSequenceKey()); + } + i++; + } + } + + @Override + public long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException { + SequenceInfo info = sequenceMap.get(sequenceKey); + if (info == null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE) + .setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()) + .build().buildException(); + } + return info.sequenceValue; + } + + @Override + public void returnSequences(List sequenceKeys, long timestamp, + SQLException[] exceptions) throws SQLException { + } + + @Override + public void addConnection(PhoenixConnection connection) throws SQLException { + } + + @Override + public void removeConnection(PhoenixConnection connection) throws SQLException { + } + + @Override + public KeyValueBuilder getKeyValueBuilder() { + return this.kvBuilder; + } + + @Override + public boolean supportsFeature(Feature feature) { + return true; + } + + @Override + public String getUserName() { + return userName; + } + + @Override + public GuidePostsInfo getTableStats(GuidePostsKey key) { + GuidePostsInfo info = null; + try { + info = guidePostsCache.get(key); + } catch (ExecutionException e) { + return GuidePostsInfo.NO_GUIDEPOST; + } + if (null == info) { + return GuidePostsInfo.NO_GUIDEPOST; + } + return info; + } + + @Override + public long clearCache() throws SQLException { + return 0; + } + + @Override + public int getSequenceSaltBuckets() { + return getProps().getInt(QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB, + QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); + } + + public MetaDataMutationResult createFunction(List functionData, PFunction function, + boolean temporary) throws SQLException { + return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, 0l, null); + } + + @Override + public void addFunction(PFunction function) throws SQLException { + this.metaData.addFunction(function); + } + + @Override + public void removeFunction(PName tenantId, String function, long functionTimeStamp) + throws SQLException { + this.metaData.removeFunction(tenantId, function, functionTimeStamp); + } + + @Override + public MetaDataMutationResult getFunctions(PName tenantId, + List> functionNameAndTimeStampPairs, long clientTimestamp) + throws SQLException { + List functions = new ArrayList(functionNameAndTimeStampPairs.size()); + for (Pair functionInfo : functionNameAndTimeStampPairs) { + try { + PFunction function2 = + metaData.getFunction(new PTableKey(tenantId, Bytes.toString(functionInfo.getFirst()))); + functions.add(function2); + } catch (FunctionNotFoundException e) { + return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, 0, null); + } + } + if (functions.isEmpty()) { + return null; + } + return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, 0, functions, true); + } + + @Override + public MetaDataMutationResult dropFunction(List tableMetadata, boolean ifExists) + throws SQLException { + return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, 0, null); + } + + @Override + public long getRenewLeaseThresholdMilliSeconds() { + return 0; + } + + @Override + public boolean isRenewingLeasesEnabled() { + return false; + } + + public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException { + List regions = tableSplits.get(Bytes.toString(tableName)); + if (regions != null) { + for (HRegionLocation region : regions) { + if ( + Bytes.compareTo(region.getRegion().getStartKey(), row) <= 0 + && Bytes.compareTo(region.getRegion().getEndKey(), row) > 0 + ) { + return region; + } + } + } + return new HRegionLocation( + RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)) + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build(), + SERVER_NAME, -1); + } + + @Override + public MetaDataMutationResult createSchema(List schemaMutations, String schemaName) { + return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, 0l, null); + } + + @Override + public void addSchema(PSchema schema) throws SQLException { + this.metaData.addSchema(schema); + } + + @Override + public MetaDataMutationResult getSchema(String schemaName, long clientTimestamp) + throws SQLException { + return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, 0, null); + } + + @Override + public void removeSchema(PSchema schema, long schemaTimeStamp) { + metaData.removeSchema(schema, schemaTimeStamp); + } + + @Override + public MetaDataMutationResult dropSchema(List schemaMetaData, String schemaName) { + return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, 0, null); + } + + /** + * Manually adds {@link GuidePostsInfo} for a table to the client-side cache. Not a + * {@link ConnectionQueryServices} method. Exposed for testing purposes. + */ + public void addTableStats(GuidePostsKey key, GuidePostsInfo info) { + this.guidePostsCache.put(Objects.requireNonNull(key), info); + } + + @Override + public void invalidateStats(GuidePostsKey key) { + this.guidePostsCache.invalidate(Objects.requireNonNull(key)); + } + + @Override + public void upgradeSystemTables(String url, Properties props) throws SQLException { + } + + @Override + public boolean isUpgradeRequired() { + return false; + } + + @Override + public void clearUpgradeRequired() { + } + + @Override + public Configuration getConfiguration() { + return config; + } + + @Override + public User getUser() { + return user; + } + + @Override + public QueryLoggerDisruptor getQueryDisruptor() { + return null; + } + + @Override + public PhoenixTransactionClient initTransactionClient(Provider provider) { + return null; // Client is not necessary + } + + @Override + public boolean writeMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + return true; + } + + @Override + public void deleteMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + } + + @Override + public PMetaData getMetaDataCache() { + return metaData; + } + + @Override + public int getConnectionCount(boolean isInternal) { + return 0; + } + + @Override + public void invalidateServerMetadataCache(List requests) + throws Throwable { + // No-op + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/DefaultGuidePostsCacheFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/DefaultGuidePostsCacheFactory.java index 96227d5a333..ff5169ce662 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/DefaultGuidePostsCacheFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/DefaultGuidePostsCacheFactory.java @@ -1,11 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.query; @@ -13,33 +21,34 @@ import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_STATS_COLLECTION_ENABLED; import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ReadOnlyProps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - public class DefaultGuidePostsCacheFactory implements GuidePostsCacheFactory { - private static final Logger LOGGER = LoggerFactory.getLogger(DefaultGuidePostsCacheFactory.class); + private static final Logger LOGGER = LoggerFactory.getLogger(DefaultGuidePostsCacheFactory.class); - @Override - public PhoenixStatsLoader getPhoenixStatsLoader(ConnectionQueryServices queryServices, ReadOnlyProps readOnlyProps, - Configuration config) { - Preconditions.checkNotNull(config); - - final boolean isStatsEnabled = config.getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED); - if (queryServices == null || !isStatsEnabled) { - LOGGER.info("Using EmptyStatsLoader from DefaultGuidePostsCacheFactory"); - return new EmptyStatsLoader(); - } - return new StatsLoaderImpl(queryServices); - } + @Override + public PhoenixStatsLoader getPhoenixStatsLoader(ConnectionQueryServices queryServices, + ReadOnlyProps readOnlyProps, Configuration config) { + Preconditions.checkNotNull(config); - @Override - public GuidePostsCache getGuidePostsCache(PhoenixStatsLoader phoenixStatsLoader, Configuration config) { - LOGGER.debug("DefaultGuidePostsCacheFactory guide post cache construction."); - PhoenixStatsCacheLoader cacheLoader = new PhoenixStatsCacheLoader(phoenixStatsLoader, config); - return new GuidePostsCacheImpl(cacheLoader, config); + final boolean isStatsEnabled = + config.getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED); + if (queryServices == null || !isStatsEnabled) { + LOGGER.info("Using EmptyStatsLoader from DefaultGuidePostsCacheFactory"); + return new EmptyStatsLoader(); } + return new StatsLoaderImpl(queryServices); + } + + @Override + public GuidePostsCache getGuidePostsCache(PhoenixStatsLoader phoenixStatsLoader, + Configuration config) { + LOGGER.debug("DefaultGuidePostsCacheFactory guide post cache construction."); + PhoenixStatsCacheLoader cacheLoader = new PhoenixStatsCacheLoader(phoenixStatsLoader, config); + return new GuidePostsCacheImpl(cacheLoader, config); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java index 07ff39a781f..146ebd512ee 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -57,417 +57,417 @@ import org.apache.phoenix.transaction.PhoenixTransactionClient; import org.apache.phoenix.transaction.TransactionFactory.Provider; - -public class DelegateConnectionQueryServices extends DelegateQueryServices implements ConnectionQueryServices { - - public DelegateConnectionQueryServices(ConnectionQueryServices delegate) { - super(delegate); - } - - @Override - public ConnectionQueryServices getDelegate() { - return (ConnectionQueryServices)super.getDelegate(); - } - - @Override - public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable tenantId) { - return getDelegate().getChildQueryServices(tenantId); - } - - @Override - public Table getTable(byte[] tableName) throws SQLException { - return getDelegate().getTable(tableName); - } - - @Override - public Table getTableIfExists(byte[] tableName) throws SQLException { - return getDelegate().getTableIfExists(tableName); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getAllTableRegions(byte[] tableName) throws SQLException { - return getDelegate().getAllTableRegions(tableName); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getAllTableRegions(byte[] tableName, int queryTimeout) - throws SQLException { - return getDelegate().getAllTableRegions(tableName, queryTimeout); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getTableRegions(byte[] tableName, byte[] startRowKey, - byte[] endRowKey) throws SQLException { - return getDelegate().getTableRegions(tableName, startRowKey, endRowKey); - } - - /** - * {@inheritDoc}. - */ - @Override - public List getTableRegions(byte[] tableName, byte[] startRowKey, - byte[] endRowKey, int queryTimeout) - throws SQLException { - return getDelegate().getTableRegions(tableName, startRowKey, endRowKey, queryTimeout); - } - - @Override - public void addTable(PTable table, long resolvedTime) throws SQLException { - getDelegate().addTable(table, resolvedTime); - } - - @Override - public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException { - getDelegate().updateResolvedTimestamp(table, resolvedTimestamp); - } - - @Override - public void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) - throws SQLException { - getDelegate().removeTable(tenantId, tableName, parentTableName, tableTimeStamp); - } - - @Override - public void removeColumn(PName tenantId, String tableName, List columnsToRemove, long tableTimeStamp, - long tableSeqNum, long resolvedTime) throws SQLException { - getDelegate().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, resolvedTime); - } - - @Override - public PhoenixConnection connect(String url, Properties info) throws SQLException { - return getDelegate().connect(url, info); - } - - @Override - public MetaDataMutationResult getTable(PName tenantId, byte[] schemaBytes, byte[] tableBytes, long tableTimestamp, long clientTimestamp) throws SQLException { - return getDelegate().getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, clientTimestamp); - } - - @Override - public MetaDataMutationResult createTable(List tableMetaData, byte[] physicalName, PTableType tableType, - Map tableProps, - List>> families, byte[][] splits, - boolean isNamespaceMapped, boolean allocateIndexId, - boolean isDoNotUpgradePropSet, PTable parentTable) throws SQLException { - return getDelegate().createTable(tableMetaData, physicalName, tableType, tableProps, families, splits, - isNamespaceMapped, allocateIndexId, isDoNotUpgradePropSet, parentTable); - } - - @Override - public MetaDataMutationResult dropTable(List tabeMetaData, PTableType tableType, boolean cascade) throws SQLException { - return getDelegate().dropTable(tabeMetaData, tableType, cascade); - } - - @Override - public MetaDataMutationResult addColumn(List tableMetaData, - PTable table, - PTable parentTable, - PTable transformingNewTable, - Map>> properties, - Set colFamiliesForPColumnsToBeAdded, - List columns) throws SQLException { - return getDelegate().addColumn(tableMetaData, table, parentTable, transformingNewTable, - properties, colFamiliesForPColumnsToBeAdded, columns); - } - - - @Override - public MetaDataMutationResult dropColumn(List tabeMetaData, - PTableType tableType, - PTable parentTable) throws SQLException { - return getDelegate().dropColumn(tabeMetaData, tableType, parentTable); - } - - @Override - public MetaDataMutationResult updateIndexState(List tableMetadata, String parentTableName) throws SQLException { - return getDelegate().updateIndexState(tableMetadata, parentTableName); - } - - @Override public MetaDataMutationResult updateIndexState(List tableMetadata, - String parentTableName, Map>> stmtProperties, - PTable table) throws SQLException { - return getDelegate().updateIndexState(tableMetadata, parentTableName, stmtProperties,table); - } - - @Override - public void init(String url, Properties props) throws SQLException { - getDelegate().init(url, props); - } - - @Override - public MutationState updateData(MutationPlan plan) throws SQLException { - return getDelegate().updateData(plan); - } - - @Override - public int getLowestClusterHBaseVersion() { - return getDelegate().getLowestClusterHBaseVersion(); - } - - @Override - public void refreshLiveRegionServers() throws SQLException { - getDelegate().refreshLiveRegionServers(); - } - - @Override - public List getLiveRegionServers() { - return getDelegate().getLiveRegionServers(); - } - - @Override - public Admin getAdmin() throws SQLException { - return getDelegate().getAdmin(); - } - - @Override - public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException { - return getDelegate().getTableDescriptor(tableName); - } - - @Override - public void clearTableRegionCache(TableName tableName) throws SQLException { - getDelegate().clearTableRegionCache(tableName); - } - - @Override - public boolean hasIndexWALCodec() { - return getDelegate().hasIndexWALCodec(); - } - - @Override - public long createSequence(String tenantId, String schemaName, String sequenceName, - long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, - boolean cycle, long timestamp) throws SQLException { - return getDelegate().createSequence(tenantId, schemaName, sequenceName, startWith, - incrementBy, cacheSize, minValue, maxValue, cycle, timestamp); - } - - @Override - public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) - throws SQLException { - return getDelegate().dropSequence(tenantId, schemaName, sequenceName, timestamp); - } - - @Override - public void validateSequences(List sequenceAllocations, long timestamp, - long[] values, SQLException[] exceptions, Sequence.ValueOp action) throws SQLException { - getDelegate().validateSequences(sequenceAllocations, timestamp, values, exceptions, action); - } - - @Override - public void incrementSequences(List sequenceAllocations, long timestamp, - long[] values, SQLException[] exceptions) throws SQLException { - getDelegate().incrementSequences(sequenceAllocations, timestamp, values, exceptions); - } - - @Override - public long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException { - return getDelegate().currentSequenceValue(sequenceKey, timestamp); - } - - @Override - public void returnSequences(List sequenceKeys, long timestamp, SQLException[] exceptions) - throws SQLException { - getDelegate().returnSequences(sequenceKeys, timestamp, exceptions); - } - - @Override - public void addConnection(PhoenixConnection connection) throws SQLException { - getDelegate().addConnection(connection); - } - - @Override - public void removeConnection(PhoenixConnection connection) throws SQLException { - getDelegate().removeConnection(connection); - } - - @Override - public KeyValueBuilder getKeyValueBuilder() { - return getDelegate().getKeyValueBuilder(); - } - - @Override - public boolean supportsFeature(Feature feature) { - return getDelegate().supportsFeature(feature); - } - - @Override - public String getUserName() { - return getDelegate().getUserName(); - } - - @Override - public void clearTableFromCache(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS) - throws SQLException { - getDelegate().clearTableFromCache(tenantId, schemaName, tableName, clientTS); - } - - @Override - public GuidePostsInfo getTableStats(GuidePostsKey key) throws SQLException { - return getDelegate().getTableStats(key); - } - - - @Override - public long clearCache() throws SQLException { - return getDelegate().clearCache(); - } - - @Override - public int getSequenceSaltBuckets() { - return getDelegate().getSequenceSaltBuckets(); - } - - @Override - public MetaDataMutationResult createFunction(List functionData, PFunction function, boolean temporary) - throws SQLException { - return getDelegate().createFunction(functionData, function, temporary); - } - - @Override - public void addFunction(PFunction function) throws SQLException { - getDelegate().addFunction(function); - } - - @Override - public void removeFunction(PName tenantId, String function, long functionTimeStamp) - throws SQLException { - getDelegate().removeFunction(tenantId, function, functionTimeStamp); - } - - @Override - public MetaDataMutationResult getFunctions(PName tenantId, - List> functionNameAndTimeStampPairs, long clientTimestamp) - throws SQLException { - return getDelegate().getFunctions(tenantId, functionNameAndTimeStampPairs, clientTimestamp); - } - - @Override - public MetaDataMutationResult dropFunction(List tableMetadata, boolean ifExists) - throws SQLException { - return getDelegate().dropFunction(tableMetadata, ifExists); - } - - @Override - public long getRenewLeaseThresholdMilliSeconds() { - return getDelegate().getRenewLeaseThresholdMilliSeconds(); - } - - @Override - public boolean isRenewingLeasesEnabled() { - return getDelegate().isRenewingLeasesEnabled(); - } - - @Override - public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) - throws SQLException { - return getDelegate().getTableRegionLocation(tableName, row); - } - - @Override - public void addSchema(PSchema schema) throws SQLException { - getDelegate().addSchema(schema); - } - - @Override - public MetaDataMutationResult createSchema(List schemaMutations, String schemaName) throws SQLException { - return getDelegate().createSchema(schemaMutations, schemaName); - } - - @Override - public MetaDataMutationResult getSchema(String schemaName, long clientTimestamp) throws SQLException { - return getDelegate().getSchema(schemaName, clientTimestamp); - } - - @Override - public void removeSchema(PSchema schema, long schemaTimeStamp) { - getDelegate().removeSchema(schema, schemaTimeStamp); - } - - @Override - public MetaDataMutationResult dropSchema(List schemaMetaData, String schemaName) throws SQLException { - return getDelegate().dropSchema(schemaMetaData, schemaName); - } - - @Override - public void invalidateStats(GuidePostsKey key) { - getDelegate().invalidateStats(key); - } - - @Override - public void upgradeSystemTables(String url, Properties props) throws SQLException { - getDelegate().upgradeSystemTables(url, props); - } - - @Override - public boolean isUpgradeRequired() { - return getDelegate().isUpgradeRequired(); - } - - @Override - public void clearUpgradeRequired() { - getDelegate().clearUpgradeRequired(); - } - - @Override - public Configuration getConfiguration() { - return getDelegate().getConfiguration(); - } - - @Override - public User getUser() { - return getDelegate().getUser(); - } - - @Override - public QueryLoggerDisruptor getQueryDisruptor() { - return getDelegate().getQueryDisruptor(); - } - - @Override - public PhoenixTransactionClient initTransactionClient(Provider provider) throws SQLException { - return getDelegate().initTransactionClient(provider); - } - - @Override - public boolean writeMutexCell(String tenantId, String schemaName, String tableName, - String columnName, String familyName) throws SQLException { - return getDelegate() - .writeMutexCell(tenantId, schemaName, tableName, columnName, familyName); - } - - @Override - public void deleteMutexCell(String tenantId, String schemaName, String tableName, - String columnName, String familyName) throws SQLException { - getDelegate().deleteMutexCell(tenantId, schemaName, tableName, columnName, familyName); - } - - @Override - public PMetaData getMetaDataCache() { - return getDelegate().getMetaDataCache(); - } - - public ConnectionLimiter getConnectionLimiter() { - return getDelegate().getConnectionLimiter(); - } - - @Override - public int getConnectionCount(boolean isInternal) { - return getDelegate().getConnectionCount(isInternal); - } - - @Override - public void invalidateServerMetadataCache(List requests) - throws Throwable { - getDelegate().invalidateServerMetadataCache(requests); - } -} \ No newline at end of file +public class DelegateConnectionQueryServices extends DelegateQueryServices + implements ConnectionQueryServices { + + public DelegateConnectionQueryServices(ConnectionQueryServices delegate) { + super(delegate); + } + + @Override + public ConnectionQueryServices getDelegate() { + return (ConnectionQueryServices) super.getDelegate(); + } + + @Override + public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable tenantId) { + return getDelegate().getChildQueryServices(tenantId); + } + + @Override + public Table getTable(byte[] tableName) throws SQLException { + return getDelegate().getTable(tableName); + } + + @Override + public Table getTableIfExists(byte[] tableName) throws SQLException { + return getDelegate().getTableIfExists(tableName); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getAllTableRegions(byte[] tableName) throws SQLException { + return getDelegate().getAllTableRegions(tableName); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getAllTableRegions(byte[] tableName, int queryTimeout) + throws SQLException { + return getDelegate().getAllTableRegions(tableName, queryTimeout); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getTableRegions(byte[] tableName, byte[] startRowKey, + byte[] endRowKey) throws SQLException { + return getDelegate().getTableRegions(tableName, startRowKey, endRowKey); + } + + /** + * {@inheritDoc}. + */ + @Override + public List getTableRegions(byte[] tableName, byte[] startRowKey, + byte[] endRowKey, int queryTimeout) throws SQLException { + return getDelegate().getTableRegions(tableName, startRowKey, endRowKey, queryTimeout); + } + + @Override + public void addTable(PTable table, long resolvedTime) throws SQLException { + getDelegate().addTable(table, resolvedTime); + } + + @Override + public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException { + getDelegate().updateResolvedTimestamp(table, resolvedTimestamp); + } + + @Override + public void removeTable(PName tenantId, String tableName, String parentTableName, + long tableTimeStamp) throws SQLException { + getDelegate().removeTable(tenantId, tableName, parentTableName, tableTimeStamp); + } + + @Override + public void removeColumn(PName tenantId, String tableName, List columnsToRemove, + long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException { + getDelegate().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, + resolvedTime); + } + + @Override + public PhoenixConnection connect(String url, Properties info) throws SQLException { + return getDelegate().connect(url, info); + } + + @Override + public MetaDataMutationResult getTable(PName tenantId, byte[] schemaBytes, byte[] tableBytes, + long tableTimestamp, long clientTimestamp) throws SQLException { + return getDelegate().getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, + clientTimestamp); + } + + @Override + public MetaDataMutationResult createTable(List tableMetaData, byte[] physicalName, + PTableType tableType, Map tableProps, + List>> families, byte[][] splits, boolean isNamespaceMapped, + boolean allocateIndexId, boolean isDoNotUpgradePropSet, PTable parentTable) + throws SQLException { + return getDelegate().createTable(tableMetaData, physicalName, tableType, tableProps, families, + splits, isNamespaceMapped, allocateIndexId, isDoNotUpgradePropSet, parentTable); + } + + @Override + public MetaDataMutationResult dropTable(List tabeMetaData, PTableType tableType, + boolean cascade) throws SQLException { + return getDelegate().dropTable(tabeMetaData, tableType, cascade); + } + + @Override + public MetaDataMutationResult addColumn(List tableMetaData, PTable table, + PTable parentTable, PTable transformingNewTable, + Map>> properties, Set colFamiliesForPColumnsToBeAdded, + List columns) throws SQLException { + return getDelegate().addColumn(tableMetaData, table, parentTable, transformingNewTable, + properties, colFamiliesForPColumnsToBeAdded, columns); + } + + @Override + public MetaDataMutationResult dropColumn(List tabeMetaData, PTableType tableType, + PTable parentTable) throws SQLException { + return getDelegate().dropColumn(tabeMetaData, tableType, parentTable); + } + + @Override + public MetaDataMutationResult updateIndexState(List tableMetadata, + String parentTableName) throws SQLException { + return getDelegate().updateIndexState(tableMetadata, parentTableName); + } + + @Override + public MetaDataMutationResult updateIndexState(List tableMetadata, + String parentTableName, Map>> stmtProperties, PTable table) + throws SQLException { + return getDelegate().updateIndexState(tableMetadata, parentTableName, stmtProperties, table); + } + + @Override + public void init(String url, Properties props) throws SQLException { + getDelegate().init(url, props); + } + + @Override + public MutationState updateData(MutationPlan plan) throws SQLException { + return getDelegate().updateData(plan); + } + + @Override + public int getLowestClusterHBaseVersion() { + return getDelegate().getLowestClusterHBaseVersion(); + } + + @Override + public void refreshLiveRegionServers() throws SQLException { + getDelegate().refreshLiveRegionServers(); + } + + @Override + public List getLiveRegionServers() { + return getDelegate().getLiveRegionServers(); + } + + @Override + public Admin getAdmin() throws SQLException { + return getDelegate().getAdmin(); + } + + @Override + public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException { + return getDelegate().getTableDescriptor(tableName); + } + + @Override + public void clearTableRegionCache(TableName tableName) throws SQLException { + getDelegate().clearTableRegionCache(tableName); + } + + @Override + public boolean hasIndexWALCodec() { + return getDelegate().hasIndexWALCodec(); + } + + @Override + public long createSequence(String tenantId, String schemaName, String sequenceName, + long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, boolean cycle, + long timestamp) throws SQLException { + return getDelegate().createSequence(tenantId, schemaName, sequenceName, startWith, incrementBy, + cacheSize, minValue, maxValue, cycle, timestamp); + } + + @Override + public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) + throws SQLException { + return getDelegate().dropSequence(tenantId, schemaName, sequenceName, timestamp); + } + + @Override + public void validateSequences(List sequenceAllocations, long timestamp, + long[] values, SQLException[] exceptions, Sequence.ValueOp action) throws SQLException { + getDelegate().validateSequences(sequenceAllocations, timestamp, values, exceptions, action); + } + + @Override + public void incrementSequences(List sequenceAllocations, long timestamp, + long[] values, SQLException[] exceptions) throws SQLException { + getDelegate().incrementSequences(sequenceAllocations, timestamp, values, exceptions); + } + + @Override + public long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException { + return getDelegate().currentSequenceValue(sequenceKey, timestamp); + } + + @Override + public void returnSequences(List sequenceKeys, long timestamp, + SQLException[] exceptions) throws SQLException { + getDelegate().returnSequences(sequenceKeys, timestamp, exceptions); + } + + @Override + public void addConnection(PhoenixConnection connection) throws SQLException { + getDelegate().addConnection(connection); + } + + @Override + public void removeConnection(PhoenixConnection connection) throws SQLException { + getDelegate().removeConnection(connection); + } + + @Override + public KeyValueBuilder getKeyValueBuilder() { + return getDelegate().getKeyValueBuilder(); + } + + @Override + public boolean supportsFeature(Feature feature) { + return getDelegate().supportsFeature(feature); + } + + @Override + public String getUserName() { + return getDelegate().getUserName(); + } + + @Override + public void clearTableFromCache(byte[] tenantId, byte[] schemaName, byte[] tableName, + long clientTS) throws SQLException { + getDelegate().clearTableFromCache(tenantId, schemaName, tableName, clientTS); + } + + @Override + public GuidePostsInfo getTableStats(GuidePostsKey key) throws SQLException { + return getDelegate().getTableStats(key); + } + + @Override + public long clearCache() throws SQLException { + return getDelegate().clearCache(); + } + + @Override + public int getSequenceSaltBuckets() { + return getDelegate().getSequenceSaltBuckets(); + } + + @Override + public MetaDataMutationResult createFunction(List functionData, PFunction function, + boolean temporary) throws SQLException { + return getDelegate().createFunction(functionData, function, temporary); + } + + @Override + public void addFunction(PFunction function) throws SQLException { + getDelegate().addFunction(function); + } + + @Override + public void removeFunction(PName tenantId, String function, long functionTimeStamp) + throws SQLException { + getDelegate().removeFunction(tenantId, function, functionTimeStamp); + } + + @Override + public MetaDataMutationResult getFunctions(PName tenantId, + List> functionNameAndTimeStampPairs, long clientTimestamp) + throws SQLException { + return getDelegate().getFunctions(tenantId, functionNameAndTimeStampPairs, clientTimestamp); + } + + @Override + public MetaDataMutationResult dropFunction(List tableMetadata, boolean ifExists) + throws SQLException { + return getDelegate().dropFunction(tableMetadata, ifExists); + } + + @Override + public long getRenewLeaseThresholdMilliSeconds() { + return getDelegate().getRenewLeaseThresholdMilliSeconds(); + } + + @Override + public boolean isRenewingLeasesEnabled() { + return getDelegate().isRenewingLeasesEnabled(); + } + + @Override + public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException { + return getDelegate().getTableRegionLocation(tableName, row); + } + + @Override + public void addSchema(PSchema schema) throws SQLException { + getDelegate().addSchema(schema); + } + + @Override + public MetaDataMutationResult createSchema(List schemaMutations, String schemaName) + throws SQLException { + return getDelegate().createSchema(schemaMutations, schemaName); + } + + @Override + public MetaDataMutationResult getSchema(String schemaName, long clientTimestamp) + throws SQLException { + return getDelegate().getSchema(schemaName, clientTimestamp); + } + + @Override + public void removeSchema(PSchema schema, long schemaTimeStamp) { + getDelegate().removeSchema(schema, schemaTimeStamp); + } + + @Override + public MetaDataMutationResult dropSchema(List schemaMetaData, String schemaName) + throws SQLException { + return getDelegate().dropSchema(schemaMetaData, schemaName); + } + + @Override + public void invalidateStats(GuidePostsKey key) { + getDelegate().invalidateStats(key); + } + + @Override + public void upgradeSystemTables(String url, Properties props) throws SQLException { + getDelegate().upgradeSystemTables(url, props); + } + + @Override + public boolean isUpgradeRequired() { + return getDelegate().isUpgradeRequired(); + } + + @Override + public void clearUpgradeRequired() { + getDelegate().clearUpgradeRequired(); + } + + @Override + public Configuration getConfiguration() { + return getDelegate().getConfiguration(); + } + + @Override + public User getUser() { + return getDelegate().getUser(); + } + + @Override + public QueryLoggerDisruptor getQueryDisruptor() { + return getDelegate().getQueryDisruptor(); + } + + @Override + public PhoenixTransactionClient initTransactionClient(Provider provider) throws SQLException { + return getDelegate().initTransactionClient(provider); + } + + @Override + public boolean writeMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + return getDelegate().writeMutexCell(tenantId, schemaName, tableName, columnName, familyName); + } + + @Override + public void deleteMutexCell(String tenantId, String schemaName, String tableName, + String columnName, String familyName) throws SQLException { + getDelegate().deleteMutexCell(tenantId, schemaName, tableName, columnName, familyName); + } + + @Override + public PMetaData getMetaDataCache() { + return getDelegate().getMetaDataCache(); + } + + public ConnectionLimiter getConnectionLimiter() { + return getDelegate().getConnectionLimiter(); + } + + @Override + public int getConnectionCount(boolean isInternal) { + return getDelegate().getConnectionCount(isInternal); + } + + @Override + public void invalidateServerMetadataCache(List requests) + throws Throwable { + getDelegate().invalidateServerMetadataCache(requests); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateQueryServices.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateQueryServices.java index ac241e0b29d..0c0ce50e61f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateQueryServices.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/DelegateQueryServices.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,49 +24,43 @@ import org.apache.phoenix.optimize.QueryOptimizer; import org.apache.phoenix.util.ReadOnlyProps; - - /** - * - * Class that delegates QueryService calls through to - * a parent QueryService. - * - * + * Class that delegates QueryService calls through to a parent QueryService. * @since 0.1 */ public class DelegateQueryServices implements QueryServices { - private final QueryServices parent; - - public DelegateQueryServices(QueryServices queryServices) { - parent = queryServices; - } + private final QueryServices parent; + + public DelegateQueryServices(QueryServices queryServices) { + parent = queryServices; + } + + public QueryServices getDelegate() { + return parent; + } - public QueryServices getDelegate() { - return parent; - } - - @Override - public ThreadPoolExecutor getExecutor() { - return parent.getExecutor(); - } + @Override + public ThreadPoolExecutor getExecutor() { + return parent.getExecutor(); + } - @Override - public MemoryManager getMemoryManager() { - return parent.getMemoryManager(); - } + @Override + public MemoryManager getMemoryManager() { + return parent.getMemoryManager(); + } - @Override - public void close() throws SQLException { - parent.close(); - } + @Override + public void close() throws SQLException { + parent.close(); + } - @Override - public ReadOnlyProps getProps() { - return parent.getProps(); - } + @Override + public ReadOnlyProps getProps() { + return parent.getProps(); + } - @Override - public QueryOptimizer getOptimizer() { - return parent.getOptimizer(); - } + @Override + public QueryOptimizer getOptimizer() { + return parent.getOptimizer(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/EmptyStatsLoader.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/EmptyStatsLoader.java index 76024a3f12f..65e742a5194 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/EmptyStatsLoader.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/EmptyStatsLoader.java @@ -1,35 +1,43 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.query; import org.apache.phoenix.schema.stats.GuidePostsInfo; import org.apache.phoenix.schema.stats.GuidePostsKey; /** - * {@link PhoenixStatsLoader} implementation for the Stats Loader. - * Empty stats loader if stats are disabled + * {@link PhoenixStatsLoader} implementation for the Stats Loader. Empty stats loader if stats are + * disabled */ class EmptyStatsLoader implements PhoenixStatsLoader { - @Override - public boolean needsLoad() { - return false; - } + @Override + public boolean needsLoad() { + return false; + } - @Override - public GuidePostsInfo loadStats(GuidePostsKey statsKey) throws Exception { - return GuidePostsInfo.NO_GUIDEPOST; - } + @Override + public GuidePostsInfo loadStats(GuidePostsKey statsKey) throws Exception { + return GuidePostsInfo.NO_GUIDEPOST; + } - @Override - public GuidePostsInfo loadStats(GuidePostsKey statsKey, GuidePostsInfo prevGuidepostInfo) throws Exception { - return GuidePostsInfo.NO_GUIDEPOST; - } + @Override + public GuidePostsInfo loadStats(GuidePostsKey statsKey, GuidePostsInfo prevGuidepostInfo) + throws Exception { + return GuidePostsInfo.NO_GUIDEPOST; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCache.java index 5eb2b977605..06b91d8c4e4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCache.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCache.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,20 +17,18 @@ */ package org.apache.phoenix.query; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.stats.GuidePostsInfo; -import org.apache.phoenix.schema.stats.GuidePostsKey; import java.util.concurrent.ExecutionException; +import org.apache.phoenix.schema.stats.GuidePostsInfo; +import org.apache.phoenix.schema.stats.GuidePostsKey; public interface GuidePostsCache { - GuidePostsInfo get(GuidePostsKey key) throws ExecutionException; + GuidePostsInfo get(GuidePostsKey key) throws ExecutionException; - void put(GuidePostsKey key, GuidePostsInfo info); + void put(GuidePostsKey key, GuidePostsInfo info); - void invalidate(GuidePostsKey key); + void invalidate(GuidePostsKey key); - void invalidateAll(); + void invalidateAll(); -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheFactory.java index 7d0cbaa7049..c21c4804068 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheFactory.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,33 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.query; import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.util.ReadOnlyProps; /** - * Interface for configurable GuidePostsCache interface construction - * Class is meant to be defined in the ConnectionQueryServices property - * Implementations must provide a default constructor + * Interface for configurable GuidePostsCache interface construction Class is meant to be defined in + * the ConnectionQueryServices property Implementations must provide a default constructor */ public interface GuidePostsCacheFactory { - /** - * Interface for a PhoenixStatsLoader - * @param clientConnectionQueryServices current client connectionQueryServices note not - * necessary to use this connection - * @param readOnlyProps properties from HBase configuration - * @param config a Configuration for the current Phoenix/Hbase - * @return PhoenixStatsLoader interface - */ - PhoenixStatsLoader getPhoenixStatsLoader(ConnectionQueryServices clientConnectionQueryServices, ReadOnlyProps readOnlyProps, Configuration config); + /** + * Interface for a PhoenixStatsLoader + * @param clientConnectionQueryServices current client connectionQueryServices note not necessary + * to use this connection + * @param readOnlyProps properties from HBase configuration + * @param config a Configuration for the current Phoenix/Hbase + * @return PhoenixStatsLoader interface + */ + PhoenixStatsLoader getPhoenixStatsLoader(ConnectionQueryServices clientConnectionQueryServices, + ReadOnlyProps readOnlyProps, Configuration config); - /** - * @param phoenixStatsLoader The passed in stats loader will come from getPhoenixStatsLoader - * @param config a Configuration for the current Phoenix/Hbase - * @return GuidePostsCache interface - */ - GuidePostsCache getGuidePostsCache(PhoenixStatsLoader phoenixStatsLoader, Configuration config); + /** + * @param phoenixStatsLoader The passed in stats loader will come from getPhoenixStatsLoader + * @param config a Configuration for the current Phoenix/Hbase + * @return GuidePostsCache interface + */ + GuidePostsCache getGuidePostsCache(PhoenixStatsLoader phoenixStatsLoader, Configuration config); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheImpl.java index 8de051f3c55..2285f6146e6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,9 +24,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.schema.stats.GuidePostsInfo; import org.apache.phoenix.schema.stats.GuidePostsKey; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.phoenix.thirdparty.com.google.common.cache.LoadingCache; @@ -33,116 +31,112 @@ import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalListener; import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalNotification; import org.apache.phoenix.thirdparty.com.google.common.cache.Weigher; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * "Client-side" cache for storing {@link GuidePostsInfo} for a column family. Intended to decouple * Phoenix from a specific version of Guava's cache. */ public class GuidePostsCacheImpl implements GuidePostsCache { - private static final Logger logger = LoggerFactory.getLogger(GuidePostsCacheImpl.class); - - private final LoadingCache cache; + private static final Logger logger = LoggerFactory.getLogger(GuidePostsCacheImpl.class); - public GuidePostsCacheImpl(PhoenixStatsCacheLoader cacheLoader, Configuration config) { - Preconditions.checkNotNull(cacheLoader); + private final LoadingCache cache; - // Number of millis to expire cache values after write - final long statsUpdateFrequency = config.getLong( - QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, - QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS); + public GuidePostsCacheImpl(PhoenixStatsCacheLoader cacheLoader, Configuration config) { + Preconditions.checkNotNull(cacheLoader); - // Maximum total weight (size in bytes) of stats entries - final long maxTableStatsCacheSize = config.getLongBytes( - QueryServices.STATS_MAX_CACHE_SIZE, - QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE); + // Number of millis to expire cache values after write + final long statsUpdateFrequency = config.getLong(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, + QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS); - cache = CacheBuilder.newBuilder() - // Refresh entries a given amount of time after they were written - .refreshAfterWrite(statsUpdateFrequency, TimeUnit.MILLISECONDS) - // Maximum total weight (size in bytes) of stats entries - .maximumWeight(maxTableStatsCacheSize) - // Defer actual size to the PTableStats.getEstimatedSize() - .weigher(new Weigher() { - @Override public int weigh(GuidePostsKey key, GuidePostsInfo info) { - return info.getEstimatedSize(); - } - }) - // Log removals at TRACE for debugging - .removalListener(new PhoenixStatsCacheRemovalListener()) - // Automatically load the cache when entries need to be refreshed - .build(cacheLoader); - } + // Maximum total weight (size in bytes) of stats entries + final long maxTableStatsCacheSize = config.getLongBytes(QueryServices.STATS_MAX_CACHE_SIZE, + QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE); - /** - * Returns the underlying cache. Try to use the provided methods instead of accessing the cache - * directly. - */ - LoadingCache getCache() { - return cache; - } - - /** - * Returns the PTableStats for the given tableName, using the provided - * valueLoader if no such mapping exists. - * - * @see com.google.common.cache.LoadingCache#get(Object) - */ - @Override - public GuidePostsInfo get(GuidePostsKey key) throws ExecutionException { - return getCache().get(key); - } - - /** - * Cache the given stats to the cache for the given tableName. - * - * @see com.google.common.cache.Cache#put(Object, Object) - */ - @Override - public void put(GuidePostsKey key, GuidePostsInfo info) { - getCache().put(Objects.requireNonNull(key), Objects.requireNonNull(info)); - } - - /** - * Removes the mapping for tableName if it exists. - * - * @see com.google.common.cache.Cache#invalidate(Object) - */ - @Override - public void invalidate(GuidePostsKey key) { - getCache().invalidate(Objects.requireNonNull(key)); - } - - /** - * Removes all mappings from the cache. - * - * @see com.google.common.cache.Cache#invalidateAll() - */ - @Override - public void invalidateAll() { - getCache().invalidateAll(); - } - - /** - * A {@link RemovalListener} implementation to track evictions from the table stats cache. - */ - static class PhoenixStatsCacheRemovalListener implements - RemovalListener { + cache = CacheBuilder.newBuilder() + // Refresh entries a given amount of time after they were written + .refreshAfterWrite(statsUpdateFrequency, TimeUnit.MILLISECONDS) + // Maximum total weight (size in bytes) of stats entries + .maximumWeight(maxTableStatsCacheSize) + // Defer actual size to the PTableStats.getEstimatedSize() + .weigher(new Weigher() { @Override - public void onRemoval(RemovalNotification notification) { - if (logger.isTraceEnabled()) { - final RemovalCause cause = notification.getCause(); - if (wasEvicted(cause)) { - GuidePostsKey key = notification.getKey(); - logger.trace("Cached stats for {} with size={}bytes was evicted due to cause={}", - new Object[] {key, notification.getValue().getEstimatedSize(), - cause}); - } - } + public int weigh(GuidePostsKey key, GuidePostsInfo info) { + return info.getEstimatedSize(); } - - boolean wasEvicted(RemovalCause cause) { - // This is actually a method on RemovalCause but isn't exposed - return RemovalCause.EXPLICIT != cause && RemovalCause.REPLACED != cause; + }) + // Log removals at TRACE for debugging + .removalListener(new PhoenixStatsCacheRemovalListener()) + // Automatically load the cache when entries need to be refreshed + .build(cacheLoader); + } + + /** + * Returns the underlying cache. Try to use the provided methods instead of accessing the cache + * directly. + */ + LoadingCache getCache() { + return cache; + } + + /** + * Returns the PTableStats for the given tableName, using the provided + * valueLoader if no such mapping exists. + * @see com.google.common.cache.LoadingCache#get(Object) + */ + @Override + public GuidePostsInfo get(GuidePostsKey key) throws ExecutionException { + return getCache().get(key); + } + + /** + * Cache the given stats to the cache for the given tableName. + * @see com.google.common.cache.Cache#put(Object, Object) + */ + @Override + public void put(GuidePostsKey key, GuidePostsInfo info) { + getCache().put(Objects.requireNonNull(key), Objects.requireNonNull(info)); + } + + /** + * Removes the mapping for tableName if it exists. + * @see com.google.common.cache.Cache#invalidate(Object) + */ + @Override + public void invalidate(GuidePostsKey key) { + getCache().invalidate(Objects.requireNonNull(key)); + } + + /** + * Removes all mappings from the cache. + * @see com.google.common.cache.Cache#invalidateAll() + */ + @Override + public void invalidateAll() { + getCache().invalidateAll(); + } + + /** + * A {@link RemovalListener} implementation to track evictions from the table stats cache. + */ + static class PhoenixStatsCacheRemovalListener + implements RemovalListener { + @Override + public void onRemoval(RemovalNotification notification) { + if (logger.isTraceEnabled()) { + final RemovalCause cause = notification.getCause(); + if (wasEvicted(cause)) { + GuidePostsKey key = notification.getKey(); + logger.trace("Cached stats for {} with size={}bytes was evicted due to cause={}", + new Object[] { key, notification.getValue().getEstimatedSize(), cause }); } + } + } + + boolean wasEvicted(RemovalCause cause) { + // This is actually a method on RemovalCause but isn't exposed + return RemovalCause.EXPLICIT != cause && RemovalCause.REPLACED != cause; } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheProvider.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheProvider.java index 2051df48916..e7afc3ec226 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheProvider.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheProvider.java @@ -1,11 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.query; @@ -13,67 +21,68 @@ import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.exception.PhoenixNonRetryableRuntimeException; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.InstanceResolver; import org.apache.phoenix.util.ReadOnlyProps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - public class GuidePostsCacheProvider { - private static final Logger LOGGER = LoggerFactory.getLogger(GuidePostsCacheProvider.class); + private static final Logger LOGGER = LoggerFactory.getLogger(GuidePostsCacheProvider.class); - GuidePostsCacheFactory guidePostsCacheFactory = null; + GuidePostsCacheFactory guidePostsCacheFactory = null; - @VisibleForTesting - GuidePostsCacheFactory loadAndGetGuidePostsCacheFactory(String classString) { - Preconditions.checkNotNull(classString); - if (guidePostsCacheFactory == null) { - try { + @VisibleForTesting + GuidePostsCacheFactory loadAndGetGuidePostsCacheFactory(String classString) { + Preconditions.checkNotNull(classString); + if (guidePostsCacheFactory == null) { + try { - Class clazz = Class.forName(classString); - if (!GuidePostsCacheFactory.class.isAssignableFrom(clazz)) { - String msg = String.format( - "Could not load/instantiate class %s is not an instance of GuidePostsCacheFactory", - classString); - LOGGER.error(msg); - throw new PhoenixNonRetryableRuntimeException(msg); - } + Class clazz = Class.forName(classString); + if (!GuidePostsCacheFactory.class.isAssignableFrom(clazz)) { + String msg = String.format( + "Could not load/instantiate class %s is not an instance of GuidePostsCacheFactory", + classString); + LOGGER.error(msg); + throw new PhoenixNonRetryableRuntimeException(msg); + } - List factoryList = InstanceResolver.get(GuidePostsCacheFactory.class, null); - for (GuidePostsCacheFactory factory : factoryList) { - if (clazz.isInstance(factory)) { - guidePostsCacheFactory = factory; - LOGGER.info(String.format("Sucessfully loaded class for GuidePostsCacheFactor of type: %s", - classString)); - break; - } - } - if (guidePostsCacheFactory == null) { - String msg = String.format("Could not load/instantiate class %s", classString); - LOGGER.error(msg); - throw new PhoenixNonRetryableRuntimeException(msg); - } - } catch (ClassNotFoundException e) { - LOGGER.error(String.format("Could not load/instantiate class %s", classString), e); - throw new PhoenixNonRetryableRuntimeException(e); - } + List factoryList = + InstanceResolver.get(GuidePostsCacheFactory.class, null); + for (GuidePostsCacheFactory factory : factoryList) { + if (clazz.isInstance(factory)) { + guidePostsCacheFactory = factory; + LOGGER.info(String.format( + "Sucessfully loaded class for GuidePostsCacheFactor of type: %s", classString)); + break; + } } - return guidePostsCacheFactory; + if (guidePostsCacheFactory == null) { + String msg = String.format("Could not load/instantiate class %s", classString); + LOGGER.error(msg); + throw new PhoenixNonRetryableRuntimeException(msg); + } + } catch (ClassNotFoundException e) { + LOGGER.error(String.format("Could not load/instantiate class %s", classString), e); + throw new PhoenixNonRetryableRuntimeException(e); + } } + return guidePostsCacheFactory; + } - public GuidePostsCacheWrapper getGuidePostsCache(String classStr, ConnectionQueryServices queryServices, - Configuration config) { - ReadOnlyProps props = null; - if (queryServices != null) { - props = queryServices.getProps(); - } - GuidePostsCacheFactory guidePostCacheFactory = loadAndGetGuidePostsCacheFactory(classStr); - PhoenixStatsLoader phoenixStatsLoader = guidePostsCacheFactory.getPhoenixStatsLoader(queryServices, props, - config); - GuidePostsCache guidePostsCache = guidePostCacheFactory.getGuidePostsCache(phoenixStatsLoader, config); - return new GuidePostsCacheWrapper(guidePostsCache); + public GuidePostsCacheWrapper getGuidePostsCache(String classStr, + ConnectionQueryServices queryServices, Configuration config) { + ReadOnlyProps props = null; + if (queryServices != null) { + props = queryServices.getProps(); } + GuidePostsCacheFactory guidePostCacheFactory = loadAndGetGuidePostsCacheFactory(classStr); + PhoenixStatsLoader phoenixStatsLoader = + guidePostsCacheFactory.getPhoenixStatsLoader(queryServices, props, config); + GuidePostsCache guidePostsCache = + guidePostCacheFactory.getGuidePostsCache(phoenixStatsLoader, config); + return new GuidePostsCacheWrapper(guidePostsCache); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheWrapper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheWrapper.java index 2f9182dd995..8c0fc1bbaa6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheWrapper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/GuidePostsCacheWrapper.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,59 +17,59 @@ */ package org.apache.phoenix.query; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import java.util.List; +import java.util.concurrent.ExecutionException; + import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.phoenix.schema.PColumnFamily; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.stats.GuidePostsInfo; import org.apache.phoenix.schema.stats.GuidePostsKey; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.SchemaUtil; -import java.util.List; -import java.util.concurrent.ExecutionException; - public class GuidePostsCacheWrapper { - private final GuidePostsCache guidePostsCache; + private final GuidePostsCache guidePostsCache; - GuidePostsCacheWrapper(GuidePostsCache guidePostsCache){ - this.guidePostsCache = guidePostsCache; - } + GuidePostsCacheWrapper(GuidePostsCache guidePostsCache) { + this.guidePostsCache = guidePostsCache; + } - GuidePostsInfo get(GuidePostsKey key) throws ExecutionException { - return guidePostsCache.get(key); - } + GuidePostsInfo get(GuidePostsKey key) throws ExecutionException { + return guidePostsCache.get(key); + } - void put(GuidePostsKey key, GuidePostsInfo info){ - guidePostsCache.put(key,info); - } + void put(GuidePostsKey key, GuidePostsInfo info) { + guidePostsCache.put(key, info); + } - void invalidate(GuidePostsKey key){ - guidePostsCache.invalidate(key); - } + void invalidate(GuidePostsKey key) { + guidePostsCache.invalidate(key); + } - void invalidateAll(){ - guidePostsCache.invalidateAll(); - } + void invalidateAll() { + guidePostsCache.invalidateAll(); + } - public void invalidateAll(TableDescriptor htableDesc) { - Preconditions.checkNotNull(htableDesc); - byte[] tableName = htableDesc.getTableName().getName(); - for (byte[] fam : htableDesc.getColumnFamilyNames()) { - invalidate(new GuidePostsKey(tableName, fam)); - } + public void invalidateAll(TableDescriptor htableDesc) { + Preconditions.checkNotNull(htableDesc); + byte[] tableName = htableDesc.getTableName().getName(); + for (byte[] fam : htableDesc.getColumnFamilyNames()) { + invalidate(new GuidePostsKey(tableName, fam)); } + } - public void invalidateAll(PTable table) { - Preconditions.checkNotNull(table); - byte[] physicalName = table.getPhysicalName().getBytes(); - List families = table.getColumnFamilies(); - if (families.isEmpty()) { - invalidate(new GuidePostsKey(physicalName, SchemaUtil.getEmptyColumnFamily(table))); - } else { - for (PColumnFamily family : families) { - invalidate(new GuidePostsKey(physicalName, family.getName().getBytes())); - } - } + public void invalidateAll(PTable table) { + Preconditions.checkNotNull(table); + byte[] physicalName = table.getPhysicalName().getBytes(); + List families = table.getColumnFamilies(); + if (families.isEmpty()) { + invalidate(new GuidePostsKey(physicalName, SchemaUtil.getEmptyColumnFamily(table))); + } else { + for (PColumnFamily family : families) { + invalidate(new GuidePostsKey(physicalName, family.getName().getBytes())); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/HBaseFactoryProvider.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/HBaseFactoryProvider.java index 780dbfd4a29..8d87570d5c8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/HBaseFactoryProvider.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/HBaseFactoryProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,36 +20,33 @@ import org.apache.phoenix.util.InstanceResolver; /** - * Manages factories that provide extension points for HBase. - * - * Dependent modules may register their own implementations of the following using {@link java.util.ServiceLoader}: + * Manages factories that provide extension points for HBase. Dependent modules may register their + * own implementations of the following using {@link java.util.ServiceLoader}: *
    - *
  • {@link ConfigurationFactory}
  • - *
  • {@link HTableFactory}
  • - *
  • {@link HConnectionFactory}
  • + *
  • {@link ConfigurationFactory}
  • + *
  • {@link HTableFactory}
  • + *
  • {@link HConnectionFactory}
  • *
- * * If a custom implementation is not registered, the default implementations will be used. - * - * * @since 0.2 */ public class HBaseFactoryProvider { - private static final HTableFactory DEFAULT_HTABLE_FACTORY = new HTableFactory.HTableFactoryImpl(); - private static final HConnectionFactory DEFAULT_HCONNECTION_FACTORY = - new HConnectionFactory.HConnectionFactoryImpl(); - private static final ConfigurationFactory DEFAULT_CONFIGURATION_FACTORY = new ConfigurationFactory.ConfigurationFactoryImpl(); + private static final HTableFactory DEFAULT_HTABLE_FACTORY = new HTableFactory.HTableFactoryImpl(); + private static final HConnectionFactory DEFAULT_HCONNECTION_FACTORY = + new HConnectionFactory.HConnectionFactoryImpl(); + private static final ConfigurationFactory DEFAULT_CONFIGURATION_FACTORY = + new ConfigurationFactory.ConfigurationFactoryImpl(); - public static HTableFactory getHTableFactory() { - return InstanceResolver.getSingleton(HTableFactory.class, DEFAULT_HTABLE_FACTORY); - } + public static HTableFactory getHTableFactory() { + return InstanceResolver.getSingleton(HTableFactory.class, DEFAULT_HTABLE_FACTORY); + } - public static HConnectionFactory getHConnectionFactory() { - return InstanceResolver.getSingleton(HConnectionFactory.class, DEFAULT_HCONNECTION_FACTORY); - } + public static HConnectionFactory getHConnectionFactory() { + return InstanceResolver.getSingleton(HConnectionFactory.class, DEFAULT_HCONNECTION_FACTORY); + } - public static ConfigurationFactory getConfigurationFactory() { - return InstanceResolver.getSingleton(ConfigurationFactory.class, DEFAULT_CONFIGURATION_FACTORY); - } + public static ConfigurationFactory getConfigurationFactory() { + return InstanceResolver.getSingleton(ConfigurationFactory.class, DEFAULT_CONFIGURATION_FACTORY); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/HConnectionFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/HConnectionFactory.java index c3e0eb54dcf..4ce6c7023b6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/HConnectionFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/HConnectionFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,26 +25,23 @@ /** * Factory for creating HConnection - * - * */ public interface HConnectionFactory { - /** - * Creates HConnection to access HBase clusters. - * - * @param conf object - * @return A HConnection instance - */ - Connection createConnection(Configuration conf) throws IOException; + /** + * Creates HConnection to access HBase clusters. + * @param conf object + * @return A HConnection instance + */ + Connection createConnection(Configuration conf) throws IOException; - /** - * Default implementation. Uses standard HBase HConnections. - */ - static class HConnectionFactoryImpl implements HConnectionFactory { - @Override - public Connection createConnection(Configuration conf) throws IOException { - return ConnectionFactory.createConnection(conf); - } + /** + * Default implementation. Uses standard HBase HConnections. + */ + static class HConnectionFactoryImpl implements HConnectionFactory { + @Override + public Connection createConnection(Configuration conf) throws IOException { + return ConnectionFactory.createConnection(conf); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/HTableFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/HTableFactory.java index 10a531f1982..9b6927346d3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/HTableFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/HTableFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,30 +26,28 @@ /** * Creates clients to access HBase tables. - * - * * @since 0.2 */ public interface HTableFactory { - /** - * Creates an HBase client using an externally managed HConnection and Thread pool. - * - * @param tableName Name of the table. - * @param connection HConnection to use. - * @param pool ExecutorService to use. - * @return An client to access an HBase table. - * @throws IOException if a server or network exception occurs - */ - Table getTable(byte[] tableName, Connection connection, ExecutorService pool) throws IOException; + /** + * Creates an HBase client using an externally managed HConnection and Thread pool. + * @param tableName Name of the table. + * @param connection HConnection to use. + * @param pool ExecutorService to use. + * @return An client to access an HBase table. + * @throws IOException if a server or network exception occurs + */ + Table getTable(byte[] tableName, Connection connection, ExecutorService pool) throws IOException; - /** - * Default implementation. Uses standard HBase HTables. - */ - static class HTableFactoryImpl implements HTableFactory { - @Override - public Table getTable(byte[] tableName, Connection connection, ExecutorService pool) throws IOException { - // Let the HBase client manage the thread pool instead of passing ours through - return connection.getTable(TableName.valueOf(tableName)); - } + /** + * Default implementation. Uses standard HBase HTables. + */ + static class HTableFactoryImpl implements HTableFactory { + @Override + public Table getTable(byte[] tableName, Connection connection, ExecutorService pool) + throws IOException { + // Let the HBase client manage the thread pool instead of passing ours through + return connection.getTable(TableName.valueOf(tableName)); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ITGuidePostsCacheFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ITGuidePostsCacheFactory.java index 61a91755c8c..908f87b4639 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/ITGuidePostsCacheFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/ITGuidePostsCacheFactory.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.query; @@ -20,33 +27,33 @@ * Test Class Only used to verify in e2e tests */ public class ITGuidePostsCacheFactory implements GuidePostsCacheFactory { - public static final ConcurrentHashMap map = - new ConcurrentHashMap<>(); - private static AtomicInteger count = new AtomicInteger(); - private Integer key; - - public ITGuidePostsCacheFactory() { - key = count.getAndIncrement(); - map.put(key, new DefaultGuidePostsCacheFactory()); - } - - public static int getCount() { - return count.get(); - } - - public static ConcurrentHashMap getMap(){ - return map; - } - - @Override - public PhoenixStatsLoader getPhoenixStatsLoader(ConnectionQueryServices queryServices, - ReadOnlyProps readOnlyProps, Configuration config) { - return map.get(key).getPhoenixStatsLoader(queryServices, readOnlyProps, config); - } - - @Override - public GuidePostsCache getGuidePostsCache(PhoenixStatsLoader phoenixStatsLoader, - Configuration config) { - return map.get(key).getGuidePostsCache(phoenixStatsLoader, config); - } + public static final ConcurrentHashMap map = + new ConcurrentHashMap<>(); + private static AtomicInteger count = new AtomicInteger(); + private Integer key; + + public ITGuidePostsCacheFactory() { + key = count.getAndIncrement(); + map.put(key, new DefaultGuidePostsCacheFactory()); + } + + public static int getCount() { + return count.get(); + } + + public static ConcurrentHashMap getMap() { + return map; + } + + @Override + public PhoenixStatsLoader getPhoenixStatsLoader(ConnectionQueryServices queryServices, + ReadOnlyProps readOnlyProps, Configuration config) { + return map.get(key).getPhoenixStatsLoader(queryServices, readOnlyProps, config); + } + + @Override + public GuidePostsCache getGuidePostsCache(PhoenixStatsLoader phoenixStatsLoader, + Configuration config) { + return map.get(key).getGuidePostsCache(phoenixStatsLoader, config); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/KeyRange.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/KeyRange.java index 4bde3cf07c5..77db758b891 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/KeyRange.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/KeyRange.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,700 +42,722 @@ import edu.umd.cs.findbugs.annotations.NonNull; /** - * * Class that represents an upper/lower bound key range. - * - * * @since 0.1 */ public class KeyRange implements Writable { - public enum Bound { LOWER, UPPER }; - private static final byte[] DEGENERATE_KEY = new byte[] {1}; - public static final byte[] UNBOUND = new byte[0]; - public static final byte[] NULL_BOUND = new byte[0]; - /** - * KeyRange for variable length null values. Since we need to represent this using an empty byte array (which - * is what we use for upper/lower bound), we create this range using the private constructor rather than - * going through the static creation method (where this would not be possible). - */ - public static final KeyRange IS_NULL_RANGE = new KeyRange(NULL_BOUND, true, NULL_BOUND, true, false); - /** - * KeyRange for non null variable length values. Since we need to represent this using an empty byte array (which - * is what we use for upper/lower bound), we create this range using the private constructor rather than going - * through the static creation method (where this would not be possible). - */ - public static final KeyRange IS_NOT_NULL_RANGE = new KeyRange(ByteUtil.nextKey(QueryConstants.SEPARATOR_BYTE_ARRAY), true, UNBOUND, false, false); - - /** - * KeyRange for an empty key range - */ - public static final KeyRange EMPTY_RANGE = new KeyRange(DEGENERATE_KEY, false, DEGENERATE_KEY, false, false); - - /** - * KeyRange that contains all values - */ - public static final KeyRange EVERYTHING_RANGE = new KeyRange(UNBOUND, false, UNBOUND, false, false); - - public static final Function POINT = new Function() { - @Override - public KeyRange apply(byte[] input) { - return new KeyRange(input, true, input, true, false); - } - }; - public static final Comparator COMPARATOR = new Comparator() { - @Override public int compare(KeyRange o1, KeyRange o2) { - int result = Boolean.compare(o2.lowerUnbound(), o1.lowerUnbound()); - if (result != 0) { - return result; - } - result = Bytes.BYTES_COMPARATOR.compare(o1.getLowerRange(), o2.getLowerRange()); - if (result != 0) { - return result; - } - result = Boolean.compare(o2.isLowerInclusive(), o1.isLowerInclusive()); - if (result != 0) { - return result; - } - result = Boolean.compare(o1.upperUnbound(), o2.upperUnbound()); - if (result != 0) { - return result; - } - result = Bytes.BYTES_COMPARATOR.compare(o1.getUpperRange(), o2.getUpperRange()); - if (result != 0) { - return result; - } - return Boolean.compare(o2.isUpperInclusive(), o1.isUpperInclusive()); - } - }; - - public static final Comparator DESC_COMPARATOR = new Comparator() { - @Override public int compare(KeyRange o1, KeyRange o2) { - int result = Boolean.compare(o2.lowerUnbound(), o1.lowerUnbound()); - if (result != 0) { - return result; - } - result = DescVarLengthFastByteComparisons.compareTo(o1.getLowerRange(), 0, o1.getLowerRange().length, - o2.getLowerRange(), 0, o2.getLowerRange().length); - if (result != 0) { - return result; - } - result = Boolean.compare(o2.isLowerInclusive(), o1.isLowerInclusive()); - if (result != 0) { - return result; - } - result = Boolean.compare(o1.upperUnbound(), o2.upperUnbound()); - if (result != 0) { - return result; - } - result = DescVarLengthFastByteComparisons.compareTo(o1.getUpperRange(), 0, o1.getUpperRange().length, - o2.getUpperRange(), 0, o2.getUpperRange().length); - if (result != 0) { - return result; - } - return Boolean.compare(o2.isUpperInclusive(), o1.isUpperInclusive()); - } - }; - - protected byte[] lowerRange; - protected boolean lowerInclusive; - protected byte[] upperRange; - protected boolean upperInclusive; - protected boolean isSingleKey; - protected boolean inverted = false; - - public static KeyRange getKeyRange(byte[] point) { - return getKeyRange(point, true, point, true); - } - - public static KeyRange getKeyRange(byte[] lowerRange, byte[] upperRange) { - return getKeyRange(lowerRange, true, upperRange, false); - } - - private static KeyRange getSingleton(byte[] lowerRange, boolean lowerInclusive, - byte[] upperRange, boolean upperInclusive) { - return getSingleton(lowerRange, lowerInclusive, - upperRange, upperInclusive, false); - } - - private static KeyRange getSingleton(byte[] lowerRange, boolean lowerInclusive, - byte[] upperRange, boolean upperInclusive, boolean inverted) { - if (lowerRange == null || upperRange == null) { - return EMPTY_RANGE; - } - if (lowerRange.length == 0 && upperRange.length == 0) { - // Need singleton to represent NULL range so it gets treated differently - // than an unbound RANGE. - return lowerInclusive && upperInclusive ? IS_NULL_RANGE : EVERYTHING_RANGE; - } - if ((lowerRange.length != 0 || lowerRange == NULL_BOUND) - && (upperRange.length != 0 || upperRange == NULL_BOUND)) { - int cmp; - if (inverted) { - // Allow illegal ranges to be defined. These will be fixed during processing. - cmp = - Bytes.compareTo(SortOrder.invert(upperRange, 0, upperRange.length), - SortOrder.invert(lowerRange, 0, lowerRange.length)); - } else { - cmp = Bytes.compareTo(lowerRange, upperRange); - } - if (cmp > 0 || (cmp == 0 && !(lowerInclusive && upperInclusive))) { - return EMPTY_RANGE; - } - } - return null; - } - - public static KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, - byte[] upperRange, boolean upperInclusive) { - return getKeyRange(lowerRange, lowerInclusive, - upperRange, upperInclusive, false); - } - public static KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, - byte[] upperRange, boolean upperInclusive, boolean inverted) { - KeyRange range = getSingleton(lowerRange, lowerInclusive, upperRange, upperInclusive, inverted); - if (range != null) { - return range; - } - boolean unboundLower = false; - boolean unboundUpper = false; - if (lowerRange.length == 0 && lowerRange != NULL_BOUND) { - lowerRange = UNBOUND; - lowerInclusive = false; - unboundLower = true; - } - if (upperRange.length == 0 && upperRange != NULL_BOUND) { - upperRange = UNBOUND; - upperInclusive = false; - unboundUpper = true; - } - - return new KeyRange(lowerRange, unboundLower ? false : lowerInclusive, - upperRange, unboundUpper ? false : upperInclusive, inverted); - } - - public static KeyRange read(DataInput input) throws IOException { - KeyRange range = new KeyRange(); - range.readFields(input); - // Translate to singleton after reading - KeyRange singletonRange = getSingleton(range.lowerRange, range.lowerInclusive, range.upperRange, range.upperInclusive); - if (singletonRange != null) { - return singletonRange; - } - // Otherwise, just keep the range we read - return range; - } - - protected KeyRange() { - this.lowerRange = DEGENERATE_KEY; - this.lowerInclusive = false; - this.upperRange = DEGENERATE_KEY; - this.upperInclusive = false; - this.isSingleKey = false; - } - - protected KeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, boolean upperInclusive, boolean inverted) { - this.lowerRange = lowerRange; - this.lowerInclusive = lowerInclusive; - this.upperRange = upperRange; - this.upperInclusive = upperInclusive; - this.inverted = inverted; - init(); - } - - private void init() { - this.isSingleKey = lowerRange != UNBOUND && upperRange != UNBOUND - && lowerInclusive && upperInclusive && Bytes.compareTo(lowerRange, upperRange) == 0; - } - - public byte[] getRange(Bound bound) { - return bound == Bound.LOWER ? getLowerRange() : getUpperRange(); - } - - public boolean isInclusive(Bound bound) { - return bound == Bound.LOWER ? isLowerInclusive() : isUpperInclusive(); - } - - public boolean isUnbound(Bound bound) { - return bound == Bound.LOWER ? lowerUnbound() : upperUnbound(); - } - - public boolean isSingleKey() { - return isSingleKey; - } - - public int compareLowerToUpperBound(ImmutableBytesWritable ptr, boolean isInclusive, BytesComparator comparator) { - return compareLowerToUpperBound(ptr.get(), ptr.getOffset(), ptr.getLength(), isInclusive, comparator); - } - - public int compareLowerToUpperBound(ImmutableBytesWritable ptr, BytesComparator comparator) { - return compareLowerToUpperBound(ptr, true, comparator); - } - - public int compareUpperToLowerBound(ImmutableBytesWritable ptr, boolean isInclusive, BytesComparator comparator) { - return compareUpperToLowerBound(ptr.get(), ptr.getOffset(), ptr.getLength(), isInclusive, comparator); - } - - public int compareUpperToLowerBound(ImmutableBytesWritable ptr, BytesComparator comparator) { - return compareUpperToLowerBound(ptr, true, comparator); - } - - public int compareLowerToUpperBound( byte[] b, int o, int l, BytesComparator comparator) { - return compareLowerToUpperBound(b,o,l,true, comparator); - } - - public int compareLowerToUpperBound( byte[] b, BytesComparator comparator) { - return compareLowerToUpperBound(b,0,b.length, comparator); - } - - /** - * Compares a lower bound against an upper bound - * @param b upper bound byte array - * @param o upper bound offset - * @param l upper bound length - * @param isInclusive upper bound inclusive - * @param comparator comparator used to do compare the byte array using offset and length - * @return -1 if the lower bound is less than the upper bound, - * 1 if the lower bound is greater than the upper bound, - * and 0 if they are equal. - */ - public int compareLowerToUpperBound( byte[] b, int o, int l, boolean isInclusive, BytesComparator comparator) { - if (lowerUnbound() || b == KeyRange.UNBOUND) { - return -1; - } - int cmp = comparator.compare(lowerRange, 0, lowerRange.length, b, o, l); - if (cmp > 0) { - return 1; - } - if (cmp < 0) { - return -1; - } - if (lowerInclusive && isInclusive) { - return 0; - } - return 1; - } - - public int compareUpperToLowerBound(byte[] b, BytesComparator comparator) { - return compareUpperToLowerBound(b,0,b.length, comparator); - } - - public int compareUpperToLowerBound(byte[] b, int o, int l, BytesComparator comparator) { - return compareUpperToLowerBound(b,o,l, true, comparator); - } - - public int compareUpperToLowerBound(byte[] b, int o, int l, boolean isInclusive, BytesComparator comparator) { - if (upperUnbound() || b == KeyRange.UNBOUND) { - return 1; - } - int cmp = comparator.compare(upperRange, 0, upperRange.length, b, o, l); - if (cmp > 0) { - return 1; - } - if (cmp < 0) { - return -1; - } - if (upperInclusive && isInclusive) { - return 0; - } - return -1; - } - - public byte[] getLowerRange() { - return lowerRange; - } - - public boolean isLowerInclusive() { - return lowerInclusive; - } - - public byte[] getUpperRange() { - return upperRange; - } - - public boolean isUpperInclusive() { - return upperInclusive; - } - - public boolean isUnbound() { - return lowerUnbound() || upperUnbound(); - } - - public boolean upperUnbound() { - return upperRange == UNBOUND; - } - - public boolean lowerUnbound() { - return lowerRange == UNBOUND; - } - + public enum Bound { + LOWER, + UPPER + }; + + private static final byte[] DEGENERATE_KEY = new byte[] { 1 }; + public static final byte[] UNBOUND = new byte[0]; + public static final byte[] NULL_BOUND = new byte[0]; + /** + * KeyRange for variable length null values. Since we need to represent this using an empty byte + * array (which is what we use for upper/lower bound), we create this range using the private + * constructor rather than going through the static creation method (where this would not be + * possible). + */ + public static final KeyRange IS_NULL_RANGE = + new KeyRange(NULL_BOUND, true, NULL_BOUND, true, false); + /** + * KeyRange for non null variable length values. Since we need to represent this using an empty + * byte array (which is what we use for upper/lower bound), we create this range using the private + * constructor rather than going through the static creation method (where this would not be + * possible). + */ + public static final KeyRange IS_NOT_NULL_RANGE = new KeyRange( + ByteUtil.nextKey(QueryConstants.SEPARATOR_BYTE_ARRAY), true, UNBOUND, false, false); + + /** + * KeyRange for an empty key range + */ + public static final KeyRange EMPTY_RANGE = + new KeyRange(DEGENERATE_KEY, false, DEGENERATE_KEY, false, false); + + /** + * KeyRange that contains all values + */ + public static final KeyRange EVERYTHING_RANGE = + new KeyRange(UNBOUND, false, UNBOUND, false, false); + + public static final Function POINT = new Function() { @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + Arrays.hashCode(lowerRange); - if (lowerRange != null) - result = prime * result + (lowerInclusive ? 1231 : 1237); - result = prime * result + Arrays.hashCode(upperRange); - if (upperRange != null) - result = prime * result + (upperInclusive ? 1231 : 1237); - return result; + public KeyRange apply(byte[] input) { + return new KeyRange(input, true, input, true, false); } - + }; + public static final Comparator COMPARATOR = new Comparator() { @Override - public String toString() { - if (isSingleKey()) { - return Bytes.toStringBinary(lowerRange); - } - return (lowerInclusive ? "[" : - "(") + (lowerUnbound() ? "*" : - Bytes.toStringBinary(lowerRange)) + " - " + (upperUnbound() ? "*" : - Bytes.toStringBinary(upperRange)) + (upperInclusive ? "]" : ")" ); + public int compare(KeyRange o1, KeyRange o2) { + int result = Boolean.compare(o2.lowerUnbound(), o1.lowerUnbound()); + if (result != 0) { + return result; + } + result = Bytes.BYTES_COMPARATOR.compare(o1.getLowerRange(), o2.getLowerRange()); + if (result != 0) { + return result; + } + result = Boolean.compare(o2.isLowerInclusive(), o1.isLowerInclusive()); + if (result != 0) { + return result; + } + result = Boolean.compare(o1.upperUnbound(), o2.upperUnbound()); + if (result != 0) { + return result; + } + result = Bytes.BYTES_COMPARATOR.compare(o1.getUpperRange(), o2.getUpperRange()); + if (result != 0) { + return result; + } + return Boolean.compare(o2.isUpperInclusive(), o1.isUpperInclusive()); } + }; + public static final Comparator DESC_COMPARATOR = new Comparator() { @Override - public boolean equals(Object o) { - if (!(o instanceof KeyRange)) { - return false; - } - KeyRange that = (KeyRange)o; - return Bytes.compareTo(this.lowerRange,that.lowerRange) == 0 && this.lowerInclusive == that.lowerInclusive && - Bytes.compareTo(this.upperRange, that.upperRange) == 0 && this.upperInclusive == that.upperInclusive; - } - - public KeyRange intersect(KeyRange range) { - byte[] newLowerRange; - byte[] newUpperRange; - boolean newLowerInclusive; - boolean newUpperInclusive; - // Special case for null, is it is never included another range - // except for null itself. - if (this == IS_NULL_RANGE && range == IS_NULL_RANGE) { - return IS_NULL_RANGE; - } else if(this == IS_NULL_RANGE || range == IS_NULL_RANGE) { - return EMPTY_RANGE; - } - if (lowerUnbound()) { - newLowerRange = range.lowerRange; - newLowerInclusive = range.lowerInclusive; - } else if (range.lowerUnbound()) { - newLowerRange = lowerRange; - newLowerInclusive = lowerInclusive; + public int compare(KeyRange o1, KeyRange o2) { + int result = Boolean.compare(o2.lowerUnbound(), o1.lowerUnbound()); + if (result != 0) { + return result; + } + result = DescVarLengthFastByteComparisons.compareTo(o1.getLowerRange(), 0, + o1.getLowerRange().length, o2.getLowerRange(), 0, o2.getLowerRange().length); + if (result != 0) { + return result; + } + result = Boolean.compare(o2.isLowerInclusive(), o1.isLowerInclusive()); + if (result != 0) { + return result; + } + result = Boolean.compare(o1.upperUnbound(), o2.upperUnbound()); + if (result != 0) { + return result; + } + result = DescVarLengthFastByteComparisons.compareTo(o1.getUpperRange(), 0, + o1.getUpperRange().length, o2.getUpperRange(), 0, o2.getUpperRange().length); + if (result != 0) { + return result; + } + return Boolean.compare(o2.isUpperInclusive(), o1.isUpperInclusive()); + } + }; + + protected byte[] lowerRange; + protected boolean lowerInclusive; + protected byte[] upperRange; + protected boolean upperInclusive; + protected boolean isSingleKey; + protected boolean inverted = false; + + public static KeyRange getKeyRange(byte[] point) { + return getKeyRange(point, true, point, true); + } + + public static KeyRange getKeyRange(byte[] lowerRange, byte[] upperRange) { + return getKeyRange(lowerRange, true, upperRange, false); + } + + private static KeyRange getSingleton(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, + boolean upperInclusive) { + return getSingleton(lowerRange, lowerInclusive, upperRange, upperInclusive, false); + } + + private static KeyRange getSingleton(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, + boolean upperInclusive, boolean inverted) { + if (lowerRange == null || upperRange == null) { + return EMPTY_RANGE; + } + if (lowerRange.length == 0 && upperRange.length == 0) { + // Need singleton to represent NULL range so it gets treated differently + // than an unbound RANGE. + return lowerInclusive && upperInclusive ? IS_NULL_RANGE : EVERYTHING_RANGE; + } + if ( + (lowerRange.length != 0 || lowerRange == NULL_BOUND) + && (upperRange.length != 0 || upperRange == NULL_BOUND) + ) { + int cmp; + if (inverted) { + // Allow illegal ranges to be defined. These will be fixed during processing. + cmp = Bytes.compareTo(SortOrder.invert(upperRange, 0, upperRange.length), + SortOrder.invert(lowerRange, 0, lowerRange.length)); + } else { + cmp = Bytes.compareTo(lowerRange, upperRange); + } + if (cmp > 0 || (cmp == 0 && !(lowerInclusive && upperInclusive))) { + return EMPTY_RANGE; + } + } + return null; + } + + public static KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, + boolean upperInclusive) { + return getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive, false); + } + + public static KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, + boolean upperInclusive, boolean inverted) { + KeyRange range = getSingleton(lowerRange, lowerInclusive, upperRange, upperInclusive, inverted); + if (range != null) { + return range; + } + boolean unboundLower = false; + boolean unboundUpper = false; + if (lowerRange.length == 0 && lowerRange != NULL_BOUND) { + lowerRange = UNBOUND; + lowerInclusive = false; + unboundLower = true; + } + if (upperRange.length == 0 && upperRange != NULL_BOUND) { + upperRange = UNBOUND; + upperInclusive = false; + unboundUpper = true; + } + + return new KeyRange(lowerRange, unboundLower ? false : lowerInclusive, upperRange, + unboundUpper ? false : upperInclusive, inverted); + } + + public static KeyRange read(DataInput input) throws IOException { + KeyRange range = new KeyRange(); + range.readFields(input); + // Translate to singleton after reading + KeyRange singletonRange = + getSingleton(range.lowerRange, range.lowerInclusive, range.upperRange, range.upperInclusive); + if (singletonRange != null) { + return singletonRange; + } + // Otherwise, just keep the range we read + return range; + } + + protected KeyRange() { + this.lowerRange = DEGENERATE_KEY; + this.lowerInclusive = false; + this.upperRange = DEGENERATE_KEY; + this.upperInclusive = false; + this.isSingleKey = false; + } + + protected KeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, + boolean upperInclusive, boolean inverted) { + this.lowerRange = lowerRange; + this.lowerInclusive = lowerInclusive; + this.upperRange = upperRange; + this.upperInclusive = upperInclusive; + this.inverted = inverted; + init(); + } + + private void init() { + this.isSingleKey = lowerRange != UNBOUND && upperRange != UNBOUND && lowerInclusive + && upperInclusive && Bytes.compareTo(lowerRange, upperRange) == 0; + } + + public byte[] getRange(Bound bound) { + return bound == Bound.LOWER ? getLowerRange() : getUpperRange(); + } + + public boolean isInclusive(Bound bound) { + return bound == Bound.LOWER ? isLowerInclusive() : isUpperInclusive(); + } + + public boolean isUnbound(Bound bound) { + return bound == Bound.LOWER ? lowerUnbound() : upperUnbound(); + } + + public boolean isSingleKey() { + return isSingleKey; + } + + public int compareLowerToUpperBound(ImmutableBytesWritable ptr, boolean isInclusive, + BytesComparator comparator) { + return compareLowerToUpperBound(ptr.get(), ptr.getOffset(), ptr.getLength(), isInclusive, + comparator); + } + + public int compareLowerToUpperBound(ImmutableBytesWritable ptr, BytesComparator comparator) { + return compareLowerToUpperBound(ptr, true, comparator); + } + + public int compareUpperToLowerBound(ImmutableBytesWritable ptr, boolean isInclusive, + BytesComparator comparator) { + return compareUpperToLowerBound(ptr.get(), ptr.getOffset(), ptr.getLength(), isInclusive, + comparator); + } + + public int compareUpperToLowerBound(ImmutableBytesWritable ptr, BytesComparator comparator) { + return compareUpperToLowerBound(ptr, true, comparator); + } + + public int compareLowerToUpperBound(byte[] b, int o, int l, BytesComparator comparator) { + return compareLowerToUpperBound(b, o, l, true, comparator); + } + + public int compareLowerToUpperBound(byte[] b, BytesComparator comparator) { + return compareLowerToUpperBound(b, 0, b.length, comparator); + } + + /** + * Compares a lower bound against an upper bound + * @param b upper bound byte array + * @param o upper bound offset + * @param l upper bound length + * @param isInclusive upper bound inclusive + * @param comparator comparator used to do compare the byte array using offset and length + * @return -1 if the lower bound is less than the upper bound, 1 if the lower bound is greater + * than the upper bound, and 0 if they are equal. + */ + public int compareLowerToUpperBound(byte[] b, int o, int l, boolean isInclusive, + BytesComparator comparator) { + if (lowerUnbound() || b == KeyRange.UNBOUND) { + return -1; + } + int cmp = comparator.compare(lowerRange, 0, lowerRange.length, b, o, l); + if (cmp > 0) { + return 1; + } + if (cmp < 0) { + return -1; + } + if (lowerInclusive && isInclusive) { + return 0; + } + return 1; + } + + public int compareUpperToLowerBound(byte[] b, BytesComparator comparator) { + return compareUpperToLowerBound(b, 0, b.length, comparator); + } + + public int compareUpperToLowerBound(byte[] b, int o, int l, BytesComparator comparator) { + return compareUpperToLowerBound(b, o, l, true, comparator); + } + + public int compareUpperToLowerBound(byte[] b, int o, int l, boolean isInclusive, + BytesComparator comparator) { + if (upperUnbound() || b == KeyRange.UNBOUND) { + return 1; + } + int cmp = comparator.compare(upperRange, 0, upperRange.length, b, o, l); + if (cmp > 0) { + return 1; + } + if (cmp < 0) { + return -1; + } + if (upperInclusive && isInclusive) { + return 0; + } + return -1; + } + + public byte[] getLowerRange() { + return lowerRange; + } + + public boolean isLowerInclusive() { + return lowerInclusive; + } + + public byte[] getUpperRange() { + return upperRange; + } + + public boolean isUpperInclusive() { + return upperInclusive; + } + + public boolean isUnbound() { + return lowerUnbound() || upperUnbound(); + } + + public boolean upperUnbound() { + return upperRange == UNBOUND; + } + + public boolean lowerUnbound() { + return lowerRange == UNBOUND; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(lowerRange); + if (lowerRange != null) result = prime * result + (lowerInclusive ? 1231 : 1237); + result = prime * result + Arrays.hashCode(upperRange); + if (upperRange != null) result = prime * result + (upperInclusive ? 1231 : 1237); + return result; + } + + @Override + public String toString() { + if (isSingleKey()) { + return Bytes.toStringBinary(lowerRange); + } + return (lowerInclusive ? "[" : "(") + (lowerUnbound() ? "*" : Bytes.toStringBinary(lowerRange)) + + " - " + (upperUnbound() ? "*" : Bytes.toStringBinary(upperRange)) + + (upperInclusive ? "]" : ")"); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof KeyRange)) { + return false; + } + KeyRange that = (KeyRange) o; + return Bytes.compareTo(this.lowerRange, that.lowerRange) == 0 + && this.lowerInclusive == that.lowerInclusive + && Bytes.compareTo(this.upperRange, that.upperRange) == 0 + && this.upperInclusive == that.upperInclusive; + } + + public KeyRange intersect(KeyRange range) { + byte[] newLowerRange; + byte[] newUpperRange; + boolean newLowerInclusive; + boolean newUpperInclusive; + // Special case for null, is it is never included another range + // except for null itself. + if (this == IS_NULL_RANGE && range == IS_NULL_RANGE) { + return IS_NULL_RANGE; + } else if (this == IS_NULL_RANGE || range == IS_NULL_RANGE) { + return EMPTY_RANGE; + } + if (lowerUnbound()) { + newLowerRange = range.lowerRange; + newLowerInclusive = range.lowerInclusive; + } else if (range.lowerUnbound()) { + newLowerRange = lowerRange; + newLowerInclusive = lowerInclusive; + } else { + int cmp = Bytes.compareTo(lowerRange, range.lowerRange); + if (cmp != 0 || lowerInclusive == range.lowerInclusive) { + if (cmp <= 0) { + newLowerRange = range.lowerRange; + newLowerInclusive = range.lowerInclusive; } else { - int cmp = Bytes.compareTo(lowerRange, range.lowerRange); - if (cmp != 0 || lowerInclusive == range.lowerInclusive) { - if (cmp <= 0) { - newLowerRange = range.lowerRange; - newLowerInclusive = range.lowerInclusive; - } else { - newLowerRange = lowerRange; - newLowerInclusive = lowerInclusive; - } - } else { // Same lower range, but one is not inclusive - newLowerRange = range.lowerRange; - newLowerInclusive = false; - } - } - if (upperUnbound()) { - newUpperRange = range.upperRange; - newUpperInclusive = range.upperInclusive; - } else if (range.upperUnbound()) { - newUpperRange = upperRange; - newUpperInclusive = upperInclusive; + newLowerRange = lowerRange; + newLowerInclusive = lowerInclusive; + } + } else { // Same lower range, but one is not inclusive + newLowerRange = range.lowerRange; + newLowerInclusive = false; + } + } + if (upperUnbound()) { + newUpperRange = range.upperRange; + newUpperInclusive = range.upperInclusive; + } else if (range.upperUnbound()) { + newUpperRange = upperRange; + newUpperInclusive = upperInclusive; + } else { + int cmp = Bytes.compareTo(upperRange, range.upperRange); + if (cmp != 0 || upperInclusive == range.upperInclusive) { + if (cmp >= 0) { + newUpperRange = range.upperRange; + newUpperInclusive = range.upperInclusive; } else { - int cmp = Bytes.compareTo(upperRange, range.upperRange); - if (cmp != 0 || upperInclusive == range.upperInclusive) { - if (cmp >= 0) { - newUpperRange = range.upperRange; - newUpperInclusive = range.upperInclusive; - } else { - newUpperRange = upperRange; - newUpperInclusive = upperInclusive; - } - } else { // Same upper range, but one is not inclusive - newUpperRange = range.upperRange; - newUpperInclusive = false; - } - } - if (newLowerRange == lowerRange && newLowerInclusive == lowerInclusive - && newUpperRange == upperRange && newUpperInclusive == upperInclusive) { - return this; - } - return getKeyRange(newLowerRange, newLowerInclusive, newUpperRange, newUpperInclusive, - this.inverted && range.inverted); - } - - public static boolean isDegenerate(byte[] lowerRange, byte[] upperRange) { - return lowerRange == KeyRange.EMPTY_RANGE.getLowerRange() && upperRange == KeyRange.EMPTY_RANGE.getUpperRange(); - } - - public static boolean areAllSingleKey(List rowKeyRanges) { - if(rowKeyRanges == null || rowKeyRanges.isEmpty()) { - return false; - } - for(KeyRange rowKeyRange : rowKeyRanges) { - if(!rowKeyRange.isSingleKey()) { - return false; - } - } - return true; - } - - /** - * @return list of at least size 1 - */ - @NonNull - public static List coalesce(List keyRanges) { - List tmp = new ArrayList(); - for (KeyRange keyRange : keyRanges) { - if (EMPTY_RANGE == keyRange) { - continue; - } - if (EVERYTHING_RANGE == keyRange) { - tmp.clear(); - tmp.add(keyRange); - break; - } - tmp.add(keyRange); - } - if (tmp.size() == 1) { - return tmp; - } - if (tmp.size() == 0) { - return Collections.singletonList(EMPTY_RANGE); - } - - Collections.sort(tmp, COMPARATOR); - List tmp2 = new ArrayList(); - KeyRange range = tmp.get(0); - for (int i=1; i rowKeyRanges) { + if (rowKeyRanges == null || rowKeyRanges.isEmpty()) { + return false; + } + for (KeyRange rowKeyRange : rowKeyRanges) { + if (!rowKeyRange.isSingleKey()) { + return false; + } + } + return true; + } + + /** Returns list of at least size 1 */ + @NonNull + public static List coalesce(List keyRanges) { + List tmp = new ArrayList(); + for (KeyRange keyRange : keyRanges) { + if (EMPTY_RANGE == keyRange) { + continue; + } + if (EVERYTHING_RANGE == keyRange) { + tmp.clear(); + tmp.add(keyRange); + break; + } + tmp.add(keyRange); + } + if (tmp.size() == 1) { + return tmp; + } + if (tmp.size() == 0) { + return Collections.singletonList(EMPTY_RANGE); + } + + Collections.sort(tmp, COMPARATOR); + List tmp2 = new ArrayList(); + KeyRange range = tmp.get(0); + for (int i = 1; i < tmp.size(); i++) { + KeyRange otherRange = tmp.get(i); + KeyRange intersect = range.intersect(otherRange); + if (EMPTY_RANGE == intersect) { tmp2.add(range); - List tmp3 = new ArrayList(); - range = tmp2.get(0); - for (int i=1; i tmp3 = new ArrayList(); + range = tmp2.get(0); + for (int i = 1; i < tmp2.size(); i++) { + KeyRange otherRange = tmp2.get(i); + assert !range.upperUnbound(); + assert !otherRange.lowerUnbound(); + if ( + range.isUpperInclusive() != otherRange.isLowerInclusive() + && Bytes.equals(range.getUpperRange(), otherRange.getLowerRange()) + ) { + range = KeyRange.getKeyRange(range.getLowerRange(), range.isLowerInclusive(), + otherRange.getUpperRange(), otherRange.isUpperInclusive()); + } else { tmp3.add(range); - - return tmp3; - } - - public KeyRange union(KeyRange other) { - if (EMPTY_RANGE == other) return this; - if (EMPTY_RANGE == this) return other; - byte[] newLower, newUpper; - boolean newLowerInclusive, newUpperInclusive; - if (this.lowerUnbound() || other.lowerUnbound()) { - newLower = UNBOUND; - newLowerInclusive = false; - } else { - int lowerCmp = Bytes.compareTo(this.lowerRange, other.lowerRange); - if (lowerCmp < 0) { - newLower = lowerRange; - newLowerInclusive = lowerInclusive; - } else if (lowerCmp == 0) { - newLower = lowerRange; - newLowerInclusive = this.lowerInclusive || other.lowerInclusive; - } else { - newLower = other.lowerRange; - newLowerInclusive = other.lowerInclusive; - } - } - - if (this.upperUnbound() || other.upperUnbound()) { - newUpper = UNBOUND; - newUpperInclusive = false; - } else { - int upperCmp = Bytes.compareTo(this.upperRange, other.upperRange); - if (upperCmp > 0) { - newUpper = upperRange; - newUpperInclusive = this.upperInclusive; - } else if (upperCmp == 0) { - newUpper = upperRange; - newUpperInclusive = this.upperInclusive || other.upperInclusive; - } else { - newUpper = other.upperRange; - newUpperInclusive = other.upperInclusive; - } - } - return KeyRange.getKeyRange(newLower, newLowerInclusive, newUpper, newUpperInclusive); - } - - public static List of(List keys) { - return Lists.transform(keys, POINT); - } - - public static int compareUpperRange(KeyRange rowKeyRange1,KeyRange rowKeyRange2) { - int result = Boolean.compare(rowKeyRange1.upperUnbound(), rowKeyRange2.upperUnbound()); - if (result != 0) { - return result; - } - result = Bytes.BYTES_COMPARATOR.compare(rowKeyRange1.getUpperRange(), rowKeyRange2.getUpperRange()); - if (result != 0) { - return result; - } - return Boolean.compare(rowKeyRange1.isUpperInclusive(), rowKeyRange2.isUpperInclusive()); - } - - public static List intersect(List rowKeyRanges1, List rowKeyRanges2) { - List newRowKeyRanges1=coalesce(rowKeyRanges1); - List newRowKeyRanges2=coalesce(rowKeyRanges2); - Iterator iter1=newRowKeyRanges1.iterator(); - Iterator iter2=newRowKeyRanges2.iterator(); - - List result = new LinkedList(); - KeyRange rowKeyRange1=null; - KeyRange rowKeyRange2=null; - while(true) { - if(rowKeyRange1==null) { - if(!iter1.hasNext()) { - break; - } - rowKeyRange1=iter1.next(); - } - if(rowKeyRange2==null) { - if(!iter2.hasNext()) { - break; - } - rowKeyRange2=iter2.next(); - } - KeyRange intersectedRowKeyRange=rowKeyRange1.intersect(rowKeyRange2); - if(intersectedRowKeyRange!=EMPTY_RANGE) { - result.add(intersectedRowKeyRange); - } - int cmp=compareUpperRange(rowKeyRange1, rowKeyRange2); - if(cmp < 0) { - //move iter1 - rowKeyRange1=null; - } else if(cmp > 0) { - //move iter2 - rowKeyRange2=null; - } else { - //move iter1 and iter2 - rowKeyRange1=rowKeyRange2=null; - } - } - if (result.size() == 0) { - return Collections.singletonList(KeyRange.EMPTY_RANGE); - } - return result; - } - - // The range generated here is possibly invalid - public KeyRange invert() { - // these special ranges do not get inverted because we - // represent NULL in the same way for ASC and DESC. - if (this == IS_NOT_NULL_RANGE || this == IS_NULL_RANGE) { - return this; - } - byte[] lowerBound = this.getLowerRange(); - if (!this.lowerUnbound()) { - lowerBound = SortOrder.invert(lowerBound, 0, lowerBound.length); - } - byte[] upperBound; - if (this.isSingleKey()) { - upperBound = lowerBound; - } else { - upperBound = this.getUpperRange(); - if (!this.upperUnbound()) { - upperBound = SortOrder.invert(upperBound, 0, upperBound.length); - } - } - return KeyRange.getKeyRange(upperBound, this.isUpperInclusive(), lowerBound, this.isLowerInclusive(), !this.inverted); - } - - @Override - public void readFields(DataInput in) throws IOException { - int len = WritableUtils.readVInt(in); - if (len == 0) { - lowerRange = KeyRange.UNBOUND; - lowerInclusive = false; - } else { - if (len < 0) { - lowerInclusive = false; - lowerRange = new byte[-len - 1]; - in.readFully(lowerRange); - } else { - lowerInclusive = true; - lowerRange = new byte[len - 1]; - in.readFully(lowerRange); - } - } - len = WritableUtils.readVInt(in); - if (len == 0) { - upperRange = KeyRange.UNBOUND; - upperInclusive = false; - } else { - if (len < 0) { - upperInclusive = false; - upperRange = new byte[-len - 1]; - in.readFully(upperRange); - } else { - upperInclusive = true; - upperRange = new byte[len - 1]; - in.readFully(upperRange); - } - } - init(); - } - - private void writeBound(Bound bound, DataOutput out) throws IOException { - // Encode unbound by writing a zero - if (isUnbound(bound)) { - WritableUtils.writeVInt(out, 0); - return; - } - // Otherwise, inclusive is positive and exclusive is negative, offset by 1 - byte[] range = getRange(bound); - if (isInclusive(bound)){ - WritableUtils.writeVInt(out, range.length+1); - } else { - WritableUtils.writeVInt(out, -(range.length+1)); - } - out.write(range); - } - - @Override - public void write(DataOutput out) throws IOException { - writeBound(Bound.LOWER, out); - writeBound(Bound.UPPER, out); - } - - public KeyRange prependRange(byte[] bytes, int offset, int length) { - if (length == 0 || this == EVERYTHING_RANGE) { - return this; - } - byte[] lowerRange = this.getLowerRange(); - if (!this.lowerUnbound()) { - byte[] newLowerRange = new byte[length + lowerRange.length]; - System.arraycopy(bytes, offset, newLowerRange, 0, length); - System.arraycopy(lowerRange, 0, newLowerRange, length, lowerRange.length); - lowerRange = newLowerRange; - } - byte[] upperRange = this.getUpperRange(); - if (!this.upperUnbound()) { - byte[] newUpperRange = new byte[length + upperRange.length]; - System.arraycopy(bytes, offset, newUpperRange, 0, length); - System.arraycopy(upperRange, 0, newUpperRange, length, upperRange.length); - upperRange = newUpperRange; - } - return getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); - } -} \ No newline at end of file + range = otherRange; + } + } + tmp3.add(range); + + return tmp3; + } + + public KeyRange union(KeyRange other) { + if (EMPTY_RANGE == other) return this; + if (EMPTY_RANGE == this) return other; + byte[] newLower, newUpper; + boolean newLowerInclusive, newUpperInclusive; + if (this.lowerUnbound() || other.lowerUnbound()) { + newLower = UNBOUND; + newLowerInclusive = false; + } else { + int lowerCmp = Bytes.compareTo(this.lowerRange, other.lowerRange); + if (lowerCmp < 0) { + newLower = lowerRange; + newLowerInclusive = lowerInclusive; + } else if (lowerCmp == 0) { + newLower = lowerRange; + newLowerInclusive = this.lowerInclusive || other.lowerInclusive; + } else { + newLower = other.lowerRange; + newLowerInclusive = other.lowerInclusive; + } + } + + if (this.upperUnbound() || other.upperUnbound()) { + newUpper = UNBOUND; + newUpperInclusive = false; + } else { + int upperCmp = Bytes.compareTo(this.upperRange, other.upperRange); + if (upperCmp > 0) { + newUpper = upperRange; + newUpperInclusive = this.upperInclusive; + } else if (upperCmp == 0) { + newUpper = upperRange; + newUpperInclusive = this.upperInclusive || other.upperInclusive; + } else { + newUpper = other.upperRange; + newUpperInclusive = other.upperInclusive; + } + } + return KeyRange.getKeyRange(newLower, newLowerInclusive, newUpper, newUpperInclusive); + } + + public static List of(List keys) { + return Lists.transform(keys, POINT); + } + + public static int compareUpperRange(KeyRange rowKeyRange1, KeyRange rowKeyRange2) { + int result = Boolean.compare(rowKeyRange1.upperUnbound(), rowKeyRange2.upperUnbound()); + if (result != 0) { + return result; + } + result = + Bytes.BYTES_COMPARATOR.compare(rowKeyRange1.getUpperRange(), rowKeyRange2.getUpperRange()); + if (result != 0) { + return result; + } + return Boolean.compare(rowKeyRange1.isUpperInclusive(), rowKeyRange2.isUpperInclusive()); + } + + public static List intersect(List rowKeyRanges1, + List rowKeyRanges2) { + List newRowKeyRanges1 = coalesce(rowKeyRanges1); + List newRowKeyRanges2 = coalesce(rowKeyRanges2); + Iterator iter1 = newRowKeyRanges1.iterator(); + Iterator iter2 = newRowKeyRanges2.iterator(); + + List result = new LinkedList(); + KeyRange rowKeyRange1 = null; + KeyRange rowKeyRange2 = null; + while (true) { + if (rowKeyRange1 == null) { + if (!iter1.hasNext()) { + break; + } + rowKeyRange1 = iter1.next(); + } + if (rowKeyRange2 == null) { + if (!iter2.hasNext()) { + break; + } + rowKeyRange2 = iter2.next(); + } + KeyRange intersectedRowKeyRange = rowKeyRange1.intersect(rowKeyRange2); + if (intersectedRowKeyRange != EMPTY_RANGE) { + result.add(intersectedRowKeyRange); + } + int cmp = compareUpperRange(rowKeyRange1, rowKeyRange2); + if (cmp < 0) { + // move iter1 + rowKeyRange1 = null; + } else if (cmp > 0) { + // move iter2 + rowKeyRange2 = null; + } else { + // move iter1 and iter2 + rowKeyRange1 = rowKeyRange2 = null; + } + } + if (result.size() == 0) { + return Collections.singletonList(KeyRange.EMPTY_RANGE); + } + return result; + } + + // The range generated here is possibly invalid + public KeyRange invert() { + // these special ranges do not get inverted because we + // represent NULL in the same way for ASC and DESC. + if (this == IS_NOT_NULL_RANGE || this == IS_NULL_RANGE) { + return this; + } + byte[] lowerBound = this.getLowerRange(); + if (!this.lowerUnbound()) { + lowerBound = SortOrder.invert(lowerBound, 0, lowerBound.length); + } + byte[] upperBound; + if (this.isSingleKey()) { + upperBound = lowerBound; + } else { + upperBound = this.getUpperRange(); + if (!this.upperUnbound()) { + upperBound = SortOrder.invert(upperBound, 0, upperBound.length); + } + } + return KeyRange.getKeyRange(upperBound, this.isUpperInclusive(), lowerBound, + this.isLowerInclusive(), !this.inverted); + } + + @Override + public void readFields(DataInput in) throws IOException { + int len = WritableUtils.readVInt(in); + if (len == 0) { + lowerRange = KeyRange.UNBOUND; + lowerInclusive = false; + } else { + if (len < 0) { + lowerInclusive = false; + lowerRange = new byte[-len - 1]; + in.readFully(lowerRange); + } else { + lowerInclusive = true; + lowerRange = new byte[len - 1]; + in.readFully(lowerRange); + } + } + len = WritableUtils.readVInt(in); + if (len == 0) { + upperRange = KeyRange.UNBOUND; + upperInclusive = false; + } else { + if (len < 0) { + upperInclusive = false; + upperRange = new byte[-len - 1]; + in.readFully(upperRange); + } else { + upperInclusive = true; + upperRange = new byte[len - 1]; + in.readFully(upperRange); + } + } + init(); + } + + private void writeBound(Bound bound, DataOutput out) throws IOException { + // Encode unbound by writing a zero + if (isUnbound(bound)) { + WritableUtils.writeVInt(out, 0); + return; + } + // Otherwise, inclusive is positive and exclusive is negative, offset by 1 + byte[] range = getRange(bound); + if (isInclusive(bound)) { + WritableUtils.writeVInt(out, range.length + 1); + } else { + WritableUtils.writeVInt(out, -(range.length + 1)); + } + out.write(range); + } + + @Override + public void write(DataOutput out) throws IOException { + writeBound(Bound.LOWER, out); + writeBound(Bound.UPPER, out); + } + + public KeyRange prependRange(byte[] bytes, int offset, int length) { + if (length == 0 || this == EVERYTHING_RANGE) { + return this; + } + byte[] lowerRange = this.getLowerRange(); + if (!this.lowerUnbound()) { + byte[] newLowerRange = new byte[length + lowerRange.length]; + System.arraycopy(bytes, offset, newLowerRange, 0, length); + System.arraycopy(lowerRange, 0, newLowerRange, length, lowerRange.length); + lowerRange = newLowerRange; + } + byte[] upperRange = this.getUpperRange(); + if (!this.upperUnbound()) { + byte[] newUpperRange = new byte[length + upperRange.length]; + System.arraycopy(bytes, offset, newUpperRange, 0, length); + System.arraycopy(upperRange, 0, newUpperRange, length, upperRange.length); + upperRange = newUpperRange; + } + return getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/MetaDataMutated.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/MetaDataMutated.java index e7ce65c83c1..57e5a93d19a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/MetaDataMutated.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/MetaDataMutated.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,21 +26,26 @@ import org.apache.phoenix.schema.PName; import org.apache.phoenix.schema.PTable; - /** - * * Interface for applying schema mutations to our client-side schema cache - * - * * @since 0.1 */ public interface MetaDataMutated { - void addTable(PTable table, long resolvedTime) throws SQLException; - void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException; - void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException; - void removeColumn(PName tenantId, String tableName, List columnsToRemove, long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException; - void addFunction(PFunction function) throws SQLException; - void removeFunction(PName tenantId, String function, long functionTimeStamp) throws SQLException; - void addSchema(PSchema schema) throws SQLException; - void removeSchema(PSchema schema, long schemaTimeStamp); + void addTable(PTable table, long resolvedTime) throws SQLException; + + void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException; + + void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) + throws SQLException; + + void removeColumn(PName tenantId, String tableName, List columnsToRemove, + long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException; + + void addFunction(PFunction function) throws SQLException; + + void removeFunction(PName tenantId, String function, long functionTimeStamp) throws SQLException; + + void addSchema(PSchema schema) throws SQLException; + + void removeSchema(PSchema schema, long schemaTimeStamp); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java index 911d7d183a0..b8011ba7748 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,82 +17,74 @@ */ package org.apache.phoenix.query; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.schema.stats.GuidePostsInfo; +import org.apache.phoenix.schema.stats.GuidePostsKey; import org.apache.phoenix.thirdparty.com.google.common.cache.CacheLoader; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ListenableFutureTask; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.schema.stats.GuidePostsInfo; -import org.apache.phoenix.schema.stats.GuidePostsKey; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; - /** * {@link CacheLoader} asynchronous implementation for the Phoenix Table Stats cache. */ public class PhoenixStatsCacheLoader extends CacheLoader { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixStatsCacheLoader.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixStatsCacheLoader.class); - final private PhoenixStatsLoader statsLoader; - private static volatile ExecutorService executor; + final private PhoenixStatsLoader statsLoader; + private static volatile ExecutorService executor; - public PhoenixStatsCacheLoader(PhoenixStatsLoader statsLoader, Configuration config) { - this.statsLoader = statsLoader; + public PhoenixStatsCacheLoader(PhoenixStatsLoader statsLoader, Configuration config) { + this.statsLoader = statsLoader; + if (executor == null) { + synchronized (PhoenixStatsCacheLoader.class) { if (executor == null) { - synchronized (PhoenixStatsCacheLoader.class) { - if (executor == null) { - // The size of the thread pool used for refreshing cached table stats - final int statsCacheThreadPoolSize = config.getInt( - QueryServices.STATS_CACHE_THREAD_POOL_SIZE, - QueryServicesOptions.DEFAULT_STATS_CACHE_THREAD_POOL_SIZE); - final ThreadFactory threadFactory = - new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat("PHOENIX-STATS-CACHE-LOADER-thread-%s") - .build(); - executor = - Executors.newFixedThreadPool(statsCacheThreadPoolSize, threadFactory); - } - } + // The size of the thread pool used for refreshing cached table stats + final int statsCacheThreadPoolSize = + config.getInt(QueryServices.STATS_CACHE_THREAD_POOL_SIZE, + QueryServicesOptions.DEFAULT_STATS_CACHE_THREAD_POOL_SIZE); + final ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("PHOENIX-STATS-CACHE-LOADER-thread-%s").build(); + executor = Executors.newFixedThreadPool(statsCacheThreadPoolSize, threadFactory); } + } } + } - @Override - public GuidePostsInfo load(GuidePostsKey statsKey) throws Exception { - return statsLoader.loadStats(statsKey); - } + @Override + public GuidePostsInfo load(GuidePostsKey statsKey) throws Exception { + return statsLoader.loadStats(statsKey); + } - @Override - public ListenableFuture reload( - final GuidePostsKey key, - GuidePostsInfo prevGuidepostInfo) - { - if (statsLoader.needsLoad()) { - // schedule asynchronous task - ListenableFutureTask task = - ListenableFutureTask.create(new Callable() { - public GuidePostsInfo call() { - try { - return statsLoader.loadStats(key, prevGuidepostInfo); - } catch (Exception e) { - LOGGER.warn("Unable to load stats from table: " + key.toString(), e); - return prevGuidepostInfo; - } - } - }); - executor.execute(task); - return task; - } - else { - return Futures.immediateFuture(prevGuidepostInfo); - } + @Override + public ListenableFuture reload(final GuidePostsKey key, + GuidePostsInfo prevGuidepostInfo) { + if (statsLoader.needsLoad()) { + // schedule asynchronous task + ListenableFutureTask task = + ListenableFutureTask.create(new Callable() { + public GuidePostsInfo call() { + try { + return statsLoader.loadStats(key, prevGuidepostInfo); + } catch (Exception e) { + LOGGER.warn("Unable to load stats from table: " + key.toString(), e); + return prevGuidepostInfo; + } + } + }); + executor.execute(task); + return task; + } else { + return Futures.immediateFuture(prevGuidepostInfo); } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PhoenixStatsLoader.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PhoenixStatsLoader.java index eda5e56450d..71694f3d616 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PhoenixStatsLoader.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PhoenixStatsLoader.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,34 +24,28 @@ * The interface for talking to underneath layers to load stats from stats table for a given key */ public interface PhoenixStatsLoader { - /** - * Use to check whether this is the time to load stats from stats table. - * There are two cases: - * a. After a specified duration has passed - * b. The stats on server side (e.g. in stats table) has been updated - * - * @return boolean indicates whether we need to load stats or not - */ - boolean needsLoad(); + /** + * Use to check whether this is the time to load stats from stats table. There are two cases: a. + * After a specified duration has passed b. The stats on server side (e.g. in stats table) has + * been updated + * @return boolean indicates whether we need to load stats or not + */ + boolean needsLoad(); - /** - * Called by client stats cache to load stats from underneath layers - * - * @param statsKey the stats key used to search the stats on server side (in stats table) - * @throws Exception - * - * @return GuidePostsInfo retrieved from sever side - */ - GuidePostsInfo loadStats(GuidePostsKey statsKey) throws Exception; + /** + * Called by client stats cache to load stats from underneath layers + * @param statsKey the stats key used to search the stats on server side (in stats table) + * @return GuidePostsInfo retrieved from sever side + */ + GuidePostsInfo loadStats(GuidePostsKey statsKey) throws Exception; - /** - * Called by client stats cache to load stats from underneath layers - * - * @param statsKey the stats key used to search the stats on server side (in stats table) - * @param prevGuidepostInfo the existing stats cached on the client side or GuidePostsInfo.NO_GUIDEPOST - * @throws Exception - * - * @return GuidePostsInfo retrieved from sever side - */ - GuidePostsInfo loadStats(GuidePostsKey statsKey, GuidePostsInfo prevGuidepostInfo) throws Exception; + /** + * Called by client stats cache to load stats from underneath layers + * @param statsKey the stats key used to search the stats on server side (in stats table) + * @param prevGuidepostInfo the existing stats cached on the client side or + * GuidePostsInfo.NO_GUIDEPOST + * @return GuidePostsInfo retrieved from sever side + */ + GuidePostsInfo loadStats(GuidePostsKey statsKey, GuidePostsInfo prevGuidepostInfo) + throws Exception; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyNotAllowedException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyNotAllowedException.java index 9d9eaf9921a..8cf7cfba566 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyNotAllowedException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyNotAllowedException.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,10 +27,12 @@ public class PropertyNotAllowedException extends SQLException { private static final long serialVersionUID = 1L; private final Properties offendingProperties; - public PropertyNotAllowedException(Properties offendingProperties){ - this.offendingProperties=offendingProperties; + public PropertyNotAllowedException(Properties offendingProperties) { + this.offendingProperties = offendingProperties; } - public Properties getOffendingProperties(){ return this.offendingProperties; } + public Properties getOffendingProperties() { + return this.offendingProperties; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyPolicy.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyPolicy.java index 239b89f5288..26dd22a4d1b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyPolicy.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,19 +16,16 @@ * limitations under the License. */ package org.apache.phoenix.query; + import java.util.Properties; /** - * Filters {@link Properties} instances based on property policy. - * - * Provided properties, check if each property inside is whitelisted, - * if not, throws IllegalArgumentException. - * For best code practice, throws the offending properties list along - * with exception + * Filters {@link Properties} instances based on property policy. Provided properties, check if each + * property inside is whitelisted, if not, throws IllegalArgumentException. For best code practice, + * throws the offending properties list along with exception An example will be: * - * An example will be: - *
- *{@code 
+ * 
+ *{@code
  *public class Customized PropertyPolicy implements PropertyPolicy {
  *  final static Set propertiesKeyAllowed = Collections.unmodifiableSet(
  *      new HashSet<>(asList("DriverMajorVersion","DriverMinorVersion","DriverName","CurrentSCN")));
@@ -46,20 +43,19 @@
  *  }
  *}
  *}
- *
+ *
*/ public interface PropertyPolicy { - /** - * @param properties - * @throws IllegalArgumentException - */ - void evaluate(Properties properties) throws PropertyNotAllowedException; + /** + */ + void evaluate(Properties properties) throws PropertyNotAllowedException; - /** - * Default implementation allows all properties. - */ - static class PropertyPolicyImpl implements PropertyPolicy { - @Override - public void evaluate(Properties properties) throws PropertyNotAllowedException{} + /** + * Default implementation allows all properties. + */ + static class PropertyPolicyImpl implements PropertyPolicy { + @Override + public void evaluate(Properties properties) throws PropertyNotAllowedException { } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyPolicyProvider.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyPolicyProvider.java index 4ba4c566a72..99ad0b53eb2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyPolicyProvider.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/PropertyPolicyProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,20 +20,18 @@ import org.apache.phoenix.util.InstanceResolver; /** - * Manages providers that provide property policy for Phoenix. - * - * Dependent modules may register their own implementations of the following using {@link java.util.ServiceLoader}: + * Manages providers that provide property policy for Phoenix. Dependent modules may register their + * own implementations of the following using {@link java.util.ServiceLoader}: *
    - *
  • {@link PropertyPolicy}
  • + *
  • {@link PropertyPolicy}
  • *
- * * If a custom implementation is not registered, the default implementations will be used. - * */ public class PropertyPolicyProvider { - private static final PropertyPolicy DEFAULT_PROPERTY_POLICY = new PropertyPolicy.PropertyPolicyImpl(); + private static final PropertyPolicy DEFAULT_PROPERTY_POLICY = + new PropertyPolicy.PropertyPolicyImpl(); - public static PropertyPolicy getPropertyPolicy(){ - return InstanceResolver.getSingleton(PropertyPolicy.class, DEFAULT_PROPERTY_POLICY); - } + public static PropertyPolicy getPropertyPolicy() { + return InstanceResolver.getSingleton(PropertyPolicy.class, DEFAULT_PROPERTY_POLICY); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryConstants.java index ccadb5be196..44f0c7e7582 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryConstants.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryConstants.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,24 +17,6 @@ */ package org.apache.phoenix.query; - -import java.math.BigDecimal; -import java.nio.charset.StandardCharsets; - -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.coprocessorclient.MetaDataProtocol; -import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.apache.phoenix.monitoring.MetricType; -import org.apache.phoenix.schema.PName; -import org.apache.phoenix.schema.PNameFactory; -import org.apache.phoenix.schema.PTable.ImmutableStorageScheme; -import org.apache.phoenix.schema.PTable.QualifierEncodingScheme; -import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.TableProperty; - import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE; @@ -94,6 +76,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LOGICAL_PARENT_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LOGICAL_TABLE_NAME; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT; @@ -178,466 +161,391 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE; + +import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; + +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.coprocessorclient.MetaDataProtocol; +import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.monitoring.MetricType; +import org.apache.phoenix.schema.PName; +import org.apache.phoenix.schema.PNameFactory; +import org.apache.phoenix.schema.PTable.ImmutableStorageScheme; +import org.apache.phoenix.schema.PTable.QualifierEncodingScheme; +import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.schema.TableProperty; /** - * * Constants used during querying - * - * * @since 0.1 */ public interface QueryConstants { - String NAME_SEPARATOR = "."; - String NAMESPACE_SEPARATOR = ":"; - String CHILD_VIEW_INDEX_NAME_SEPARATOR = "#"; - byte[] NAMESPACE_SEPARATOR_BYTES = Bytes.toBytes(NAMESPACE_SEPARATOR); - byte NAMESPACE_SEPARATOR_BYTE = NAMESPACE_SEPARATOR_BYTES[0]; - String NAME_SEPARATOR_REGEX = "\\" + NAME_SEPARATOR; - byte[] NAME_SEPARATOR_BYTES = Bytes.toBytes(NAME_SEPARATOR); - byte NAME_SEPARATOR_BYTE = NAME_SEPARATOR_BYTES[0]; - String NULL_DISPLAY_TEXT = ""; - long UNSET_TIMESTAMP = -1; - - enum JoinType {INNER, LEFT_OUTER} - String SYSTEM_SCHEMA_NAME = "SYSTEM"; - byte[] SYSTEM_SCHEMA_NAME_BYTES = Bytes.toBytes(SYSTEM_SCHEMA_NAME); - String OFFSET_ROW_KEY = "_OFFSET_"; - byte[] OFFSET_ROW_KEY_BYTES = Bytes.toBytes(OFFSET_ROW_KEY); - String GROUPED_AGGREGATOR_VALUE = "_GA_"; - byte[] GROUPED_AGGREGATOR_VALUE_BYTES = Bytes.toBytes(GROUPED_AGGREGATOR_VALUE); - - long AGG_TIMESTAMP = HConstants.LATEST_TIMESTAMP; - /** - * Key used for a single row aggregation where there is no group by - */ - byte[] UNGROUPED_AGG_ROW_KEY = Bytes.toBytes("a"); - - /** BEGIN Set of reserved column qualifiers **/ - - String RESERVED_COLUMN_FAMILY = "_v"; - byte[] RESERVED_COLUMN_FAMILY_BYTES = Bytes.toBytes(RESERVED_COLUMN_FAMILY); - - byte[] VALUE_COLUMN_FAMILY = RESERVED_COLUMN_FAMILY_BYTES; - byte[] VALUE_COLUMN_QUALIFIER = QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(1); - - byte[] ARRAY_VALUE_COLUMN_FAMILY = RESERVED_COLUMN_FAMILY_BYTES; - byte[] ARRAY_VALUE_COLUMN_QUALIFIER = QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(2); - - PName SINGLE_COLUMN_NAME = PNameFactory.newNormalizedName("s"); - PName SINGLE_COLUMN_FAMILY_NAME = PNameFactory.newNormalizedName("s"); - byte[] SINGLE_COLUMN = SINGLE_COLUMN_NAME.getBytes(); - byte[] SINGLE_COLUMN_FAMILY = SINGLE_COLUMN_FAMILY_NAME.getBytes(); - - /** END Set of reserved column qualifiers **/ - - byte[] TRUE = new byte[] {1}; - - /** - * The priority property for an hbase table. This is already in HTD, but older versions of - * HBase do not have this, so we re-defined it here. Once Phoenix is HBase-1.3+, we can remote. - */ - String PRIORITY = "PRIORITY"; - - /** - * Separator used between variable length keys for a composite key. - * Variable length data types may not use this byte value. - */ - byte SEPARATOR_BYTE = (byte) 0; - byte[] SEPARATOR_BYTE_ARRAY = new byte[] {SEPARATOR_BYTE}; - byte DESC_SEPARATOR_BYTE = SortOrder.invert(SEPARATOR_BYTE); - byte[] DESC_SEPARATOR_BYTE_ARRAY = new byte[] {DESC_SEPARATOR_BYTE}; - - byte[] VARBINARY_ENCODED_SEPARATOR_BYTES = new byte[] {0x00, 0x01}; - byte[] DESC_VARBINARY_ENCODED_SEPARATOR_BYTES = - SortOrder.invert(VARBINARY_ENCODED_SEPARATOR_BYTES, 0, 2); - - byte[] ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR = - Bytes.toBytes("_ROW_KEY_VALUE_ACCESSOR_ENCODED_SEPARATOR_"); - - String DEFAULT_COPROCESS_JAR_NAME = "phoenix-[version]-server.jar"; - - int MILLIS_IN_DAY = 1000 * 60 * 60 * 24; - String EMPTY_COLUMN_NAME = "_0"; - // For transactional tables, the value of our empty key value can no longer be empty - // since empty values are treated as column delete markers. - byte[] EMPTY_COLUMN_BYTES = Bytes.toBytes(EMPTY_COLUMN_NAME); - ImmutableBytesPtr EMPTY_COLUMN_BYTES_PTR = new ImmutableBytesPtr(EMPTY_COLUMN_BYTES); - Integer ENCODED_EMPTY_COLUMN_NAME = 0; - byte[] ENCODED_EMPTY_COLUMN_BYTES = QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode( - ENCODED_EMPTY_COLUMN_NAME); - String EMPTY_COLUMN_VALUE = "x"; - byte[] EMPTY_COLUMN_VALUE_BYTES = Bytes.toBytes(EMPTY_COLUMN_VALUE); - ImmutableBytesPtr EMPTY_COLUMN_VALUE_BYTES_PTR = new ImmutableBytesPtr( - EMPTY_COLUMN_VALUE_BYTES); - byte[] ENCODED_EMPTY_COLUMN_VALUE_BYTES = Bytes.toBytes(EMPTY_COLUMN_VALUE); - String DEFAULT_COLUMN_FAMILY = "0"; - byte[] DEFAULT_COLUMN_FAMILY_BYTES = Bytes.toBytes(DEFAULT_COLUMN_FAMILY); - ImmutableBytesPtr DEFAULT_COLUMN_FAMILY_BYTES_PTR = new ImmutableBytesPtr( - DEFAULT_COLUMN_FAMILY_BYTES); - - byte VERIFIED_BYTE = 1; - byte UNVERIFIED_BYTE = 2; - byte[] VERIFIED_BYTES = new byte[] { VERIFIED_BYTE }; - byte[] UNVERIFIED_BYTES = new byte[] { UNVERIFIED_BYTE }; - ImmutableBytesPtr VERIFIED_BYTES_PTR = new ImmutableBytesPtr(VERIFIED_BYTES); - ImmutableBytesPtr UNVERIFIED_BYTES_PTR = new ImmutableBytesPtr(UNVERIFIED_BYTES); - - // column qualifier of the single key value used to store all columns for the - // COLUMNS_STORED_IN_SINGLE_CELL storage scheme - String SINGLE_KEYVALUE_COLUMN_QUALIFIER = "1"; - byte[] SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES = Bytes.toBytes(SINGLE_KEYVALUE_COLUMN_QUALIFIER); - ImmutableBytesPtr SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES_PTR = new ImmutableBytesPtr( - SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); - - String LOCAL_INDEX_COLUMN_FAMILY_PREFIX = "L#"; - byte[] LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES = Bytes.toBytes(LOCAL_INDEX_COLUMN_FAMILY_PREFIX); - - String DEFAULT_LOCAL_INDEX_COLUMN_FAMILY = LOCAL_INDEX_COLUMN_FAMILY_PREFIX + - DEFAULT_COLUMN_FAMILY; - byte[] DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES = Bytes.toBytes( - DEFAULT_LOCAL_INDEX_COLUMN_FAMILY); - ImmutableBytesPtr DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES_PTR = new ImmutableBytesPtr( - DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES); - - String ALL_FAMILY_PROPERTIES_KEY = ""; - String SYSTEM_TABLE_PK_NAME = "pk"; - - double MILLIS_TO_NANOS_CONVERTOR = Math.pow(10, 6); - BigDecimal BD_MILLIS_NANOS_CONVERSION = BigDecimal.valueOf(MILLIS_TO_NANOS_CONVERTOR); - BigDecimal BD_MILLIS_IN_DAY = BigDecimal.valueOf(QueryConstants.MILLIS_IN_DAY); - int MAX_ALLOWED_NANOS = 999999999; - int DIVERGED_VIEW_BASE_COLUMN_COUNT = -100; - int BASE_TABLE_BASE_COLUMN_COUNT = -1; - - // String constants for the server side class names, so that we don't need the server jar - // on the client side - final String METADATA_SPLIT_POLICY_CLASSNAME = "org.apache.phoenix.schema.MetaDataSplitPolicy"; - final String SYSTEM_STATS_SPLIT_POLICY_CLASSNAME = "org.apache.phoenix.schema.SystemStatsSplitPolicy"; - final String SYSTEM_FUNCTION_SPLIT_POLICY_CLASSNAME = "org.apache.phoenix.schema.SystemFunctionSplitPolicy"; - final String SYSTEM_TASK_SPLIT_POLICY_CLASSNAME = "org.apache.phoenix.schema.SystemTaskSplitPolicy"; - final String INDEX_REGION_SPLIT_POLICY_CLASSNAME = "org.apache.phoenix.hbase.index.IndexRegionSplitPolicy"; - - - final String GLOBAL_INDEX_CHECKER_CLASSNAME = "org.apache.phoenix.index.GlobalIndexChecker"; - final String INDEX_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.hbase.index.IndexRegionObserver"; - final String PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME = "org.apache.phoenix.index.PhoenixTransactionalIndexer"; - final String LOCAL_INDEX_SPLITTER_CLASSNAME = "org.apache.hadoop.hbase.regionserver.LocalIndexSplitter"; - - final String INDEXER_CLASSNAME = "org.apache.phoenix.hbase.index.Indexer"; - final String SCAN_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.ScanRegionObserver"; - final String UNGROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver"; - final String GROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver"; - final String SERVER_CACHING_ENDPOINT_IMPL_CLASSNAME = "org.apache.phoenix.coprocessor.ServerCachingEndpointImpl"; - - final String MULTI_ROW_MUTATION_ENDPOINT_CLASSNAME = "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint"; - final String INDEX_HALF_STORE_FILE_READER_GENERATOR_CLASSNAME = "org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator"; - final String META_DATA_ENDPOINT_IMPL_CLASSNAME = "org.apache.phoenix.coprocessor.MetaDataEndpointImpl"; - final String META_DATA_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.MetaDataRegionObserver"; - final String SEQUENCE_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.SequenceRegionObserver"; - final String TASK_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.TaskRegionObserver"; - final String TASK_META_DATA_ENDPOINT_CLASSNAME = "org.apache.phoenix.coprocessor.TaskMetaDataEndpoint"; - final String CHILD_LINK_META_DATA_ENDPOINT_CLASSNAME = "org.apache.phoenix.coprocessor.ChildLinkMetaDataEndpoint"; - final String PHOENIX_TTL_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.PhoenixTTLRegionObserver"; - final String SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.SystemCatalogRegionObserver"; - - // custom TagType - byte VIEW_MODIFIED_PROPERTY_TAG_TYPE = (byte) 70; - - String CDC_JSON_COL_NAME = "CDC JSON"; - String CDC_EVENT_TYPE = "event_type"; - String CDC_PRE_IMAGE = "pre_image"; - String CDC_POST_IMAGE = "post_image"; - String CDC_CHANGE_IMAGE = "change_image"; - String CDC_UPSERT_EVENT_TYPE = "upsert"; - String CDC_DELETE_EVENT_TYPE = "delete"; - String SPLITS_FILE = "SPLITS_FILE"; - - /** - * We mark counter values 0 to 10 as reserved. Value 0 is used by - * {@link #ENCODED_EMPTY_COLUMN_NAME}. Values 1-10 - * are reserved for special column qualifiers returned by Phoenix co-processors. - */ - int ENCODED_CQ_COUNTER_INITIAL_VALUE = 11; - String CREATE_TABLE_METADATA = - // Do not use IF NOT EXISTS as we sometimes catch the TableAlreadyExists - // exception and add columns to the SYSTEM.TABLE dynamically. - "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"(\n" + - // PK columns - TENANT_ID + " VARCHAR NULL," + - TABLE_SCHEM + " VARCHAR NULL," + - TABLE_NAME + " VARCHAR NOT NULL," + - COLUMN_NAME + " VARCHAR NULL," + // null for table row - COLUMN_FAMILY + " VARCHAR NULL," + // using for CF to uniqueness for columns - // Table metadata (will be null for column rows) - TABLE_SEQ_NUM + " BIGINT," + - TABLE_TYPE + " CHAR(1)," + - PK_NAME + " VARCHAR," + - COLUMN_COUNT + " INTEGER," + - SALT_BUCKETS + " INTEGER," + - DATA_TABLE_NAME + " VARCHAR," + - INDEX_STATE + " CHAR(1),\n" + - IMMUTABLE_ROWS + " BOOLEAN,\n" + - VIEW_STATEMENT + " VARCHAR,\n" + - DEFAULT_COLUMN_FAMILY_NAME + " VARCHAR,\n" + - DISABLE_WAL + " BOOLEAN,\n" + - MULTI_TENANT + " BOOLEAN,\n" + - VIEW_TYPE + " UNSIGNED_TINYINT,\n" + - VIEW_INDEX_ID + " BIGINT,\n" + - VIEW_INDEX_ID_DATA_TYPE + " INTEGER,\n" + - PHOENIX_TTL + " BIGINT,\n" + - PHOENIX_TTL_HWM + " BIGINT,\n" + - LAST_DDL_TIMESTAMP + " BIGINT, \n" + - CHANGE_DETECTION_ENABLED + " BOOLEAN, \n" + - SCHEMA_VERSION + " VARCHAR, \n" + - EXTERNAL_SCHEMA_ID + " VARCHAR, \n" + - STREAMING_TOPIC_NAME + " VARCHAR, \n" + - INDEX_WHERE + " VARCHAR, \n" + - MAX_LOOKBACK_AGE + " BIGINT, \n" + - CDC_INCLUDE_TABLE + " VARCHAR, \n" + - TTL + " VARCHAR, \n" + - ROW_KEY_MATCHER + " VARBINARY, \n" + - // Column metadata (will be null for table row) - DATA_TYPE + " INTEGER," + - COLUMN_SIZE + " INTEGER," + - DECIMAL_DIGITS + " INTEGER," + - NULLABLE + " INTEGER," + - ORDINAL_POSITION + " INTEGER," + - SORT_ORDER + " INTEGER," + - ARRAY_SIZE + " INTEGER,\n" + - VIEW_CONSTANT + " VARBINARY,\n" + - IS_VIEW_REFERENCED + " BOOLEAN,\n" + - KEY_SEQ + " SMALLINT,\n" + - // Link metadata (only set on rows linking table to index or view) - LINK_TYPE + " UNSIGNED_TINYINT,\n" + - // Unused - TYPE_NAME + " VARCHAR," + - REMARKS + " VARCHAR," + - SELF_REFERENCING_COL_NAME + " VARCHAR," + - REF_GENERATION + " VARCHAR," + - BUFFER_LENGTH + " INTEGER," + - NUM_PREC_RADIX + " INTEGER," + - COLUMN_DEF + " VARCHAR," + - SQL_DATA_TYPE + " INTEGER," + - SQL_DATETIME_SUB + " INTEGER," + - CHAR_OCTET_LENGTH + " INTEGER," + - IS_NULLABLE + " VARCHAR," + - SCOPE_CATALOG + " VARCHAR," + - SCOPE_SCHEMA + " VARCHAR," + - SCOPE_TABLE + " VARCHAR," + - SOURCE_DATA_TYPE + " SMALLINT," + - IS_AUTOINCREMENT + " VARCHAR," + - INDEX_TYPE + " UNSIGNED_TINYINT," + - INDEX_DISABLE_TIMESTAMP + " BIGINT," + - STORE_NULLS + " BOOLEAN," + - BASE_COLUMN_COUNT + " INTEGER," + - // Column metadata (will be null for table row) - IS_ROW_TIMESTAMP + " BOOLEAN, " + - TRANSACTIONAL + " BOOLEAN," + - UPDATE_CACHE_FREQUENCY + " BIGINT," + - IS_NAMESPACE_MAPPED + " BOOLEAN," + - AUTO_PARTITION_SEQ + " VARCHAR," + - APPEND_ONLY_SCHEMA + " BOOLEAN," + - GUIDE_POSTS_WIDTH + " BIGINT," + - COLUMN_QUALIFIER + " VARBINARY," + - IMMUTABLE_STORAGE_SCHEME + " TINYINT, " + - ENCODING_SCHEME + " TINYINT, " + - COLUMN_QUALIFIER_COUNTER + " INTEGER, " + - USE_STATS_FOR_PARALLELIZATION + " BOOLEAN, " + - TRANSACTION_PROVIDER + " TINYINT, " + - PHYSICAL_TABLE_NAME + " VARCHAR," + - "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + "," - + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + - HConstants.VERSIONS + "=%s,\n" + - ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + - // Install split policy to prevent a tenant's metadata from being split across regions. - TableDescriptorBuilder.SPLIT_POLICY + "='" + METADATA_SPLIT_POLICY_CLASSNAME + - "',\n" + TRANSACTIONAL + "=" + Boolean.FALSE; - - String CREATE_STATS_TABLE_METADATA = - "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_STATS_TABLE + "\"(\n" + - // PK columns - PHYSICAL_NAME + " VARCHAR NOT NULL," + - COLUMN_FAMILY + " VARCHAR," + - GUIDE_POST_KEY + " VARBINARY," + - GUIDE_POSTS_WIDTH + " BIGINT," + - LAST_STATS_UPDATE_TIME+ " DATE, "+ - GUIDE_POSTS_ROW_COUNT+ " BIGINT, "+ - "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" - + PHYSICAL_NAME + "," - + COLUMN_FAMILY + ","+ GUIDE_POST_KEY+"))\n" + - // Install split policy to prevent a physical table's stats from being split - // across regions. - TableDescriptorBuilder.SPLIT_POLICY + "='" + SYSTEM_STATS_SPLIT_POLICY_CLASSNAME + "',\n" + - TRANSACTIONAL + "=" + Boolean.FALSE; - - String CREATE_SEQUENCE_METADATA = - "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + TYPE_SEQUENCE + "\"(\n" + - TENANT_ID + " VARCHAR NULL," + - SEQUENCE_SCHEMA + " VARCHAR NULL, \n" + - SEQUENCE_NAME + " VARCHAR NOT NULL, \n" + - START_WITH + " BIGINT, \n" + - CURRENT_VALUE + " BIGINT, \n" + - INCREMENT_BY + " BIGINT, \n" + - CACHE_SIZE + " BIGINT, \n" + - // the following three columns were added in 3.1/4.1 - MIN_VALUE + " BIGINT, \n" + - MAX_VALUE + " BIGINT, \n" + - CYCLE_FLAG + " BOOLEAN, \n" + - LIMIT_REACHED_FLAG + " BOOLEAN \n" + - " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + - TENANT_ID + "," + SEQUENCE_SCHEMA + "," + SEQUENCE_NAME + "))\n" + - HConstants.VERSIONS + "=%s,\n" + - ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n"+ - TRANSACTIONAL + "=" + Boolean.FALSE; - - String UPGRADE_TABLE_SNAPSHOT_PREFIX = "_UPGRADING_TABLE_"; - - String CREATE_FUNCTION_METADATA = - "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_FUNCTION_TABLE + "\"(\n" + - // Pk columns - TENANT_ID + " VARCHAR NULL," + - FUNCTION_NAME + " VARCHAR NOT NULL, \n" + - NUM_ARGS + " INTEGER, \n" + - // Function metadata (will be null for argument row) - CLASS_NAME + " VARCHAR, \n" + - JAR_PATH + " VARCHAR, \n" + - RETURN_TYPE + " VARCHAR, \n" + - // Argument metadata (will be null for function row) - TYPE + " VARCHAR, \n" + - ARG_POSITION + " VARBINARY, \n" + - IS_ARRAY + " BOOLEAN, \n" + - IS_CONSTANT + " BOOLEAN, \n" + - DEFAULT_VALUE + " VARCHAR, \n" + - MIN_VALUE + " VARCHAR, \n" + - MAX_VALUE + " VARCHAR, \n" + - " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + ", " + - FUNCTION_NAME + ", " + TYPE + ", " + ARG_POSITION + "))\n" + - HConstants.VERSIONS + "=%s,\n" + - ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n"+ - // Install split policy to prevent a tenant's metadata from being split across regions. - TableDescriptorBuilder.SPLIT_POLICY + "='" + SYSTEM_FUNCTION_SPLIT_POLICY_CLASSNAME + - "',\n" + TRANSACTIONAL + "=" + Boolean.FALSE; - - String CREATE_LOG_METADATA = - "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\"(\n" + - // Pk columns - START_TIME + " DECIMAL, \n" + - TABLE_NAME + " VARCHAR, \n" + - QUERY_ID + " VARCHAR NOT NULL,\n" + - TENANT_ID + " VARCHAR ," + - USER + " VARCHAR , \n" + - CLIENT_IP + " VARCHAR, \n" + - // Function metadata (will be null for argument row) - QUERY + " VARCHAR, \n" + - EXPLAIN_PLAN + " VARCHAR, \n" + - // Argument metadata (will be null for function row) - NO_OF_RESULTS_ITERATED + " BIGINT, \n" + - QUERY_STATUS + " VARCHAR, \n" + - EXCEPTION_TRACE + " VARCHAR, \n" + - GLOBAL_SCAN_DETAILS + " VARCHAR, \n" + - BIND_PARAMETERS + " VARCHAR, \n" + - SCAN_METRICS_JSON + " VARCHAR, \n" + - MetricType.getMetricColumnsDetails()+"\n"+ - " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + - " PRIMARY KEY (START_TIME, TABLE_NAME, QUERY_ID))\n" + - SALT_BUCKETS + "=%s,\n"+ - TRANSACTIONAL + "=" + Boolean.FALSE+ ",\n" + - ColumnFamilyDescriptorBuilder.TTL + "=" + MetaDataProtocol.DEFAULT_LOG_TTL+",\n"+ - TableProperty.IMMUTABLE_STORAGE_SCHEME.toString() + " = " + - ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS.name() + ",\n" + - TableProperty.COLUMN_ENCODED_BYTES.toString()+" = 1"; - - byte[] OFFSET_FAMILY = "f_offset".getBytes(StandardCharsets.UTF_8); - byte[] OFFSET_COLUMN = "c_offset".getBytes(StandardCharsets.UTF_8); - String LAST_SCAN = "LAST_SCAN"; - String HASH_JOIN_CACHE_RETRIES = "hashjoin.client.retries.number"; - int DEFAULT_HASH_JOIN_CACHE_RETRIES = 5; - - // Links from parent to child views are stored in a separate table for - // scalability - String CREATE_CHILD_LINK_METADATA = "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + - SYSTEM_CHILD_LINK_TABLE + "\"(\n" + - // PK columns - TENANT_ID + " VARCHAR NULL," + - TABLE_SCHEM + " VARCHAR NULL," + - TABLE_NAME + " VARCHAR NOT NULL," + - COLUMN_NAME + " VARCHAR NULL," + - COLUMN_FAMILY + " VARCHAR NULL," + - LINK_TYPE + " UNSIGNED_TINYINT,\n" + - "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + - TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + - COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + - HConstants.VERSIONS + "=%s,\n" + - ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + - TRANSACTIONAL + "=" + Boolean.FALSE; - - String CREATE_MUTEX_METADATA = - "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + - SYSTEM_MUTEX_TABLE_NAME + "\"(\n" + - // Pk columns - TENANT_ID + " VARCHAR NULL," + - TABLE_SCHEM + " VARCHAR NULL," + - TABLE_NAME + " VARCHAR NOT NULL," + - COLUMN_NAME + " VARCHAR NULL," + // null for table row - COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to uniqueness for columns - "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + "," + - TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + - HConstants.VERSIONS + "=%s,\n" + - ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + - TRANSACTIONAL + "=" + Boolean.FALSE + ",\n" + - ColumnFamilyDescriptorBuilder.TTL + "=" + TTL_FOR_MUTEX; - - String CREATE_TASK_METADATA = - "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_TASK_TABLE + "\"(\n" + - // PK columns - TASK_TYPE + " UNSIGNED_TINYINT NOT NULL," + - TASK_TS + " TIMESTAMP NOT NULL," + - TENANT_ID + " VARCHAR NULL," + - TABLE_SCHEM + " VARCHAR NULL," + - TABLE_NAME + " VARCHAR NOT NULL,\n" + - // Non-PK columns - TASK_STATUS + " VARCHAR NULL," + - TASK_END_TS + " TIMESTAMP NULL," + - TASK_PRIORITY + " UNSIGNED_TINYINT NULL," + - TASK_DATA + " VARCHAR NULL,\n" + - "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + - TASK_TYPE + "," + TASK_TS + " ROW_TIMESTAMP," + TENANT_ID + "," + TABLE_SCHEM + "," + - TABLE_NAME + "))\n" + - HConstants.VERSIONS + "=%s,\n" + - ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + - ColumnFamilyDescriptorBuilder.TTL + "=" + TASK_TABLE_TTL + ",\n" + // 10 days - TableDescriptorBuilder.SPLIT_POLICY + "='" - + SYSTEM_TASK_SPLIT_POLICY_CLASSNAME + "',\n" + - TRANSACTIONAL + "=" + Boolean.FALSE + ",\n" + - STORE_NULLS + "=" + Boolean.TRUE; - - - String CREATE_TRANSFORM_METADATA = "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" - + SYSTEM_TRANSFORM_TABLE + "\"(\n" + - // PK columns - TENANT_ID + " VARCHAR NULL,\n" + - TABLE_SCHEM + " VARCHAR NULL," + - LOGICAL_TABLE_NAME + " VARCHAR NOT NULL,\n" + - // Non-PK columns - NEW_PHYS_TABLE_NAME + " VARCHAR,\n" + - TRANSFORM_TYPE + " INTEGER," + - LOGICAL_PARENT_NAME + " VARCHAR NULL,\n" + // If this is an index, Logical_Parent_Name is the data table name. Index name is not unique. - TRANSFORM_STATUS + " VARCHAR NULL," + - TRANSFORM_JOB_ID + " VARCHAR NULL," + - TRANSFORM_RETRY_COUNT + " INTEGER NULL," + - TRANSFORM_START_TS + " TIMESTAMP NULL," + - TRANSFORM_LAST_STATE_TS + " TIMESTAMP NULL," + - OLD_METADATA + " VARBINARY NULL,\n" + - NEW_METADATA + " VARCHAR NULL,\n" + - TRANSFORM_FUNCTION + " VARCHAR NULL\n" + - "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + - TENANT_ID + "," + TABLE_SCHEM + "," + LOGICAL_TABLE_NAME + "))\n" + - HConstants.VERSIONS + "=%s,\n" + - ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + - ColumnFamilyDescriptorBuilder.TTL + "=" + TRANSFORM_TABLE_TTL + ",\n" + // 90 days - TableDescriptorBuilder.SPLIT_POLICY + "='" - + SYSTEM_TASK_SPLIT_POLICY_CLASSNAME + "',\n" + - TRANSACTIONAL + "=" + Boolean.FALSE + ",\n" + - STORE_NULLS + "=" + Boolean.TRUE; + String NAME_SEPARATOR = "."; + String NAMESPACE_SEPARATOR = ":"; + String CHILD_VIEW_INDEX_NAME_SEPARATOR = "#"; + byte[] NAMESPACE_SEPARATOR_BYTES = Bytes.toBytes(NAMESPACE_SEPARATOR); + byte NAMESPACE_SEPARATOR_BYTE = NAMESPACE_SEPARATOR_BYTES[0]; + String NAME_SEPARATOR_REGEX = "\\" + NAME_SEPARATOR; + byte[] NAME_SEPARATOR_BYTES = Bytes.toBytes(NAME_SEPARATOR); + byte NAME_SEPARATOR_BYTE = NAME_SEPARATOR_BYTES[0]; + String NULL_DISPLAY_TEXT = ""; + long UNSET_TIMESTAMP = -1; + + enum JoinType { + INNER, + LEFT_OUTER + } + + String SYSTEM_SCHEMA_NAME = "SYSTEM"; + byte[] SYSTEM_SCHEMA_NAME_BYTES = Bytes.toBytes(SYSTEM_SCHEMA_NAME); + String OFFSET_ROW_KEY = "_OFFSET_"; + byte[] OFFSET_ROW_KEY_BYTES = Bytes.toBytes(OFFSET_ROW_KEY); + String GROUPED_AGGREGATOR_VALUE = "_GA_"; + byte[] GROUPED_AGGREGATOR_VALUE_BYTES = Bytes.toBytes(GROUPED_AGGREGATOR_VALUE); + + long AGG_TIMESTAMP = HConstants.LATEST_TIMESTAMP; + /** + * Key used for a single row aggregation where there is no group by + */ + byte[] UNGROUPED_AGG_ROW_KEY = Bytes.toBytes("a"); + + /** BEGIN Set of reserved column qualifiers **/ + + String RESERVED_COLUMN_FAMILY = "_v"; + byte[] RESERVED_COLUMN_FAMILY_BYTES = Bytes.toBytes(RESERVED_COLUMN_FAMILY); + + byte[] VALUE_COLUMN_FAMILY = RESERVED_COLUMN_FAMILY_BYTES; + byte[] VALUE_COLUMN_QUALIFIER = QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(1); + + byte[] ARRAY_VALUE_COLUMN_FAMILY = RESERVED_COLUMN_FAMILY_BYTES; + byte[] ARRAY_VALUE_COLUMN_QUALIFIER = QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(2); + + PName SINGLE_COLUMN_NAME = PNameFactory.newNormalizedName("s"); + PName SINGLE_COLUMN_FAMILY_NAME = PNameFactory.newNormalizedName("s"); + byte[] SINGLE_COLUMN = SINGLE_COLUMN_NAME.getBytes(); + byte[] SINGLE_COLUMN_FAMILY = SINGLE_COLUMN_FAMILY_NAME.getBytes(); + + /** END Set of reserved column qualifiers **/ + + byte[] TRUE = new byte[] { 1 }; + + /** + * The priority property for an hbase table. This is already in HTD, but older versions of HBase + * do not have this, so we re-defined it here. Once Phoenix is HBase-1.3+, we can remote. + */ + String PRIORITY = "PRIORITY"; + + /** + * Separator used between variable length keys for a composite key. Variable length data types may + * not use this byte value. + */ + byte SEPARATOR_BYTE = (byte) 0; + byte[] SEPARATOR_BYTE_ARRAY = new byte[] { SEPARATOR_BYTE }; + byte DESC_SEPARATOR_BYTE = SortOrder.invert(SEPARATOR_BYTE); + byte[] DESC_SEPARATOR_BYTE_ARRAY = new byte[] { DESC_SEPARATOR_BYTE }; + + byte[] VARBINARY_ENCODED_SEPARATOR_BYTES = new byte[] { 0x00, 0x01 }; + byte[] DESC_VARBINARY_ENCODED_SEPARATOR_BYTES = + SortOrder.invert(VARBINARY_ENCODED_SEPARATOR_BYTES, 0, 2); + + byte[] ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR = + Bytes.toBytes("_ROW_KEY_VALUE_ACCESSOR_ENCODED_SEPARATOR_"); + + String DEFAULT_COPROCESS_JAR_NAME = "phoenix-[version]-server.jar"; + + int MILLIS_IN_DAY = 1000 * 60 * 60 * 24; + String EMPTY_COLUMN_NAME = "_0"; + // For transactional tables, the value of our empty key value can no longer be empty + // since empty values are treated as column delete markers. + byte[] EMPTY_COLUMN_BYTES = Bytes.toBytes(EMPTY_COLUMN_NAME); + ImmutableBytesPtr EMPTY_COLUMN_BYTES_PTR = new ImmutableBytesPtr(EMPTY_COLUMN_BYTES); + Integer ENCODED_EMPTY_COLUMN_NAME = 0; + byte[] ENCODED_EMPTY_COLUMN_BYTES = + QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(ENCODED_EMPTY_COLUMN_NAME); + String EMPTY_COLUMN_VALUE = "x"; + byte[] EMPTY_COLUMN_VALUE_BYTES = Bytes.toBytes(EMPTY_COLUMN_VALUE); + ImmutableBytesPtr EMPTY_COLUMN_VALUE_BYTES_PTR = new ImmutableBytesPtr(EMPTY_COLUMN_VALUE_BYTES); + byte[] ENCODED_EMPTY_COLUMN_VALUE_BYTES = Bytes.toBytes(EMPTY_COLUMN_VALUE); + String DEFAULT_COLUMN_FAMILY = "0"; + byte[] DEFAULT_COLUMN_FAMILY_BYTES = Bytes.toBytes(DEFAULT_COLUMN_FAMILY); + ImmutableBytesPtr DEFAULT_COLUMN_FAMILY_BYTES_PTR = + new ImmutableBytesPtr(DEFAULT_COLUMN_FAMILY_BYTES); + + byte VERIFIED_BYTE = 1; + byte UNVERIFIED_BYTE = 2; + byte[] VERIFIED_BYTES = new byte[] { VERIFIED_BYTE }; + byte[] UNVERIFIED_BYTES = new byte[] { UNVERIFIED_BYTE }; + ImmutableBytesPtr VERIFIED_BYTES_PTR = new ImmutableBytesPtr(VERIFIED_BYTES); + ImmutableBytesPtr UNVERIFIED_BYTES_PTR = new ImmutableBytesPtr(UNVERIFIED_BYTES); + + // column qualifier of the single key value used to store all columns for the + // COLUMNS_STORED_IN_SINGLE_CELL storage scheme + String SINGLE_KEYVALUE_COLUMN_QUALIFIER = "1"; + byte[] SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES = Bytes.toBytes(SINGLE_KEYVALUE_COLUMN_QUALIFIER); + ImmutableBytesPtr SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES_PTR = + new ImmutableBytesPtr(SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); + + String LOCAL_INDEX_COLUMN_FAMILY_PREFIX = "L#"; + byte[] LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES = Bytes.toBytes(LOCAL_INDEX_COLUMN_FAMILY_PREFIX); + + String DEFAULT_LOCAL_INDEX_COLUMN_FAMILY = + LOCAL_INDEX_COLUMN_FAMILY_PREFIX + DEFAULT_COLUMN_FAMILY; + byte[] DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES = Bytes.toBytes(DEFAULT_LOCAL_INDEX_COLUMN_FAMILY); + ImmutableBytesPtr DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES_PTR = + new ImmutableBytesPtr(DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES); + + String ALL_FAMILY_PROPERTIES_KEY = ""; + String SYSTEM_TABLE_PK_NAME = "pk"; + + double MILLIS_TO_NANOS_CONVERTOR = Math.pow(10, 6); + BigDecimal BD_MILLIS_NANOS_CONVERSION = BigDecimal.valueOf(MILLIS_TO_NANOS_CONVERTOR); + BigDecimal BD_MILLIS_IN_DAY = BigDecimal.valueOf(QueryConstants.MILLIS_IN_DAY); + int MAX_ALLOWED_NANOS = 999999999; + int DIVERGED_VIEW_BASE_COLUMN_COUNT = -100; + int BASE_TABLE_BASE_COLUMN_COUNT = -1; + + // String constants for the server side class names, so that we don't need the server jar + // on the client side + final String METADATA_SPLIT_POLICY_CLASSNAME = "org.apache.phoenix.schema.MetaDataSplitPolicy"; + final String SYSTEM_STATS_SPLIT_POLICY_CLASSNAME = + "org.apache.phoenix.schema.SystemStatsSplitPolicy"; + final String SYSTEM_FUNCTION_SPLIT_POLICY_CLASSNAME = + "org.apache.phoenix.schema.SystemFunctionSplitPolicy"; + final String SYSTEM_TASK_SPLIT_POLICY_CLASSNAME = + "org.apache.phoenix.schema.SystemTaskSplitPolicy"; + final String INDEX_REGION_SPLIT_POLICY_CLASSNAME = + "org.apache.phoenix.hbase.index.IndexRegionSplitPolicy"; + + final String GLOBAL_INDEX_CHECKER_CLASSNAME = "org.apache.phoenix.index.GlobalIndexChecker"; + final String INDEX_REGION_OBSERVER_CLASSNAME = + "org.apache.phoenix.hbase.index.IndexRegionObserver"; + final String PHOENIX_TRANSACTIONAL_INDEXER_CLASSNAME = + "org.apache.phoenix.index.PhoenixTransactionalIndexer"; + final String LOCAL_INDEX_SPLITTER_CLASSNAME = + "org.apache.hadoop.hbase.regionserver.LocalIndexSplitter"; + + final String INDEXER_CLASSNAME = "org.apache.phoenix.hbase.index.Indexer"; + final String SCAN_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.ScanRegionObserver"; + final String UNGROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME = + "org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver"; + final String GROUPED_AGGREGATE_REGION_OBSERVER_CLASSNAME = + "org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver"; + final String SERVER_CACHING_ENDPOINT_IMPL_CLASSNAME = + "org.apache.phoenix.coprocessor.ServerCachingEndpointImpl"; + + final String MULTI_ROW_MUTATION_ENDPOINT_CLASSNAME = + "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint"; + final String INDEX_HALF_STORE_FILE_READER_GENERATOR_CLASSNAME = + "org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator"; + final String META_DATA_ENDPOINT_IMPL_CLASSNAME = + "org.apache.phoenix.coprocessor.MetaDataEndpointImpl"; + final String META_DATA_REGION_OBSERVER_CLASSNAME = + "org.apache.phoenix.coprocessor.MetaDataRegionObserver"; + final String SEQUENCE_REGION_OBSERVER_CLASSNAME = + "org.apache.phoenix.coprocessor.SequenceRegionObserver"; + final String TASK_REGION_OBSERVER_CLASSNAME = "org.apache.phoenix.coprocessor.TaskRegionObserver"; + final String TASK_META_DATA_ENDPOINT_CLASSNAME = + "org.apache.phoenix.coprocessor.TaskMetaDataEndpoint"; + final String CHILD_LINK_META_DATA_ENDPOINT_CLASSNAME = + "org.apache.phoenix.coprocessor.ChildLinkMetaDataEndpoint"; + final String PHOENIX_TTL_REGION_OBSERVER_CLASSNAME = + "org.apache.phoenix.coprocessor.PhoenixTTLRegionObserver"; + final String SYSTEM_CATALOG_REGION_OBSERVER_CLASSNAME = + "org.apache.phoenix.coprocessor.SystemCatalogRegionObserver"; + + // custom TagType + byte VIEW_MODIFIED_PROPERTY_TAG_TYPE = (byte) 70; + + String CDC_JSON_COL_NAME = "CDC JSON"; + String CDC_EVENT_TYPE = "event_type"; + String CDC_PRE_IMAGE = "pre_image"; + String CDC_POST_IMAGE = "post_image"; + String CDC_CHANGE_IMAGE = "change_image"; + String CDC_UPSERT_EVENT_TYPE = "upsert"; + String CDC_DELETE_EVENT_TYPE = "delete"; + String SPLITS_FILE = "SPLITS_FILE"; + + /** + * We mark counter values 0 to 10 as reserved. Value 0 is used by + * {@link #ENCODED_EMPTY_COLUMN_NAME}. Values 1-10 are reserved for special column qualifiers + * returned by Phoenix co-processors. + */ + int ENCODED_CQ_COUNTER_INITIAL_VALUE = 11; + String CREATE_TABLE_METADATA = + // Do not use IF NOT EXISTS as we sometimes catch the TableAlreadyExists + // exception and add columns to the SYSTEM.TABLE dynamically. + "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"(\n" + + // PK columns + TENANT_ID + " VARCHAR NULL," + TABLE_SCHEM + " VARCHAR NULL," + TABLE_NAME + + " VARCHAR NOT NULL," + COLUMN_NAME + " VARCHAR NULL," + // null for table row + COLUMN_FAMILY + " VARCHAR NULL," + // using for CF to uniqueness for columns + // Table metadata (will be null for column rows) + TABLE_SEQ_NUM + " BIGINT," + TABLE_TYPE + " CHAR(1)," + PK_NAME + " VARCHAR," + COLUMN_COUNT + + " INTEGER," + SALT_BUCKETS + " INTEGER," + DATA_TABLE_NAME + " VARCHAR," + INDEX_STATE + + " CHAR(1),\n" + IMMUTABLE_ROWS + " BOOLEAN,\n" + VIEW_STATEMENT + " VARCHAR,\n" + + DEFAULT_COLUMN_FAMILY_NAME + " VARCHAR,\n" + DISABLE_WAL + " BOOLEAN,\n" + MULTI_TENANT + + " BOOLEAN,\n" + VIEW_TYPE + " UNSIGNED_TINYINT,\n" + VIEW_INDEX_ID + " BIGINT,\n" + + VIEW_INDEX_ID_DATA_TYPE + " INTEGER,\n" + PHOENIX_TTL + " BIGINT,\n" + PHOENIX_TTL_HWM + + " BIGINT,\n" + LAST_DDL_TIMESTAMP + " BIGINT, \n" + CHANGE_DETECTION_ENABLED + + " BOOLEAN, \n" + SCHEMA_VERSION + " VARCHAR, \n" + EXTERNAL_SCHEMA_ID + " VARCHAR, \n" + + STREAMING_TOPIC_NAME + " VARCHAR, \n" + INDEX_WHERE + " VARCHAR, \n" + MAX_LOOKBACK_AGE + + " BIGINT, \n" + CDC_INCLUDE_TABLE + " VARCHAR, \n" + TTL + " VARCHAR, \n" + ROW_KEY_MATCHER + + " VARBINARY, \n" + + // Column metadata (will be null for table row) + DATA_TYPE + " INTEGER," + COLUMN_SIZE + " INTEGER," + DECIMAL_DIGITS + " INTEGER," + NULLABLE + + " INTEGER," + ORDINAL_POSITION + " INTEGER," + SORT_ORDER + " INTEGER," + ARRAY_SIZE + + " INTEGER,\n" + VIEW_CONSTANT + " VARBINARY,\n" + IS_VIEW_REFERENCED + " BOOLEAN,\n" + + KEY_SEQ + " SMALLINT,\n" + + // Link metadata (only set on rows linking table to index or view) + LINK_TYPE + " UNSIGNED_TINYINT,\n" + + // Unused + TYPE_NAME + " VARCHAR," + REMARKS + " VARCHAR," + SELF_REFERENCING_COL_NAME + " VARCHAR," + + REF_GENERATION + " VARCHAR," + BUFFER_LENGTH + " INTEGER," + NUM_PREC_RADIX + " INTEGER," + + COLUMN_DEF + " VARCHAR," + SQL_DATA_TYPE + " INTEGER," + SQL_DATETIME_SUB + " INTEGER," + + CHAR_OCTET_LENGTH + " INTEGER," + IS_NULLABLE + " VARCHAR," + SCOPE_CATALOG + " VARCHAR," + + SCOPE_SCHEMA + " VARCHAR," + SCOPE_TABLE + " VARCHAR," + SOURCE_DATA_TYPE + " SMALLINT," + + IS_AUTOINCREMENT + " VARCHAR," + INDEX_TYPE + " UNSIGNED_TINYINT," + INDEX_DISABLE_TIMESTAMP + + " BIGINT," + STORE_NULLS + " BOOLEAN," + BASE_COLUMN_COUNT + " INTEGER," + + // Column metadata (will be null for table row) + IS_ROW_TIMESTAMP + " BOOLEAN, " + TRANSACTIONAL + " BOOLEAN," + UPDATE_CACHE_FREQUENCY + + " BIGINT," + IS_NAMESPACE_MAPPED + " BOOLEAN," + AUTO_PARTITION_SEQ + " VARCHAR," + + APPEND_ONLY_SCHEMA + " BOOLEAN," + GUIDE_POSTS_WIDTH + " BIGINT," + COLUMN_QUALIFIER + + " VARBINARY," + IMMUTABLE_STORAGE_SCHEME + " TINYINT, " + ENCODING_SCHEME + " TINYINT, " + + COLUMN_QUALIFIER_COUNTER + " INTEGER, " + USE_STATS_FOR_PARALLELIZATION + " BOOLEAN, " + + TRANSACTION_PROVIDER + " TINYINT, " + PHYSICAL_TABLE_NAME + " VARCHAR," + "CONSTRAINT " + + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + HConstants.VERSIONS + "=%s,\n" + + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + + // Install split policy to prevent a tenant's metadata from being split across regions. + TableDescriptorBuilder.SPLIT_POLICY + "='" + METADATA_SPLIT_POLICY_CLASSNAME + "',\n" + + TRANSACTIONAL + "=" + Boolean.FALSE; + + String CREATE_STATS_TABLE_METADATA = + "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_STATS_TABLE + "\"(\n" + + // PK columns + PHYSICAL_NAME + " VARCHAR NOT NULL," + COLUMN_FAMILY + " VARCHAR," + GUIDE_POST_KEY + + " VARBINARY," + GUIDE_POSTS_WIDTH + " BIGINT," + LAST_STATS_UPDATE_TIME + " DATE, " + + GUIDE_POSTS_ROW_COUNT + " BIGINT, " + "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + + " PRIMARY KEY (" + PHYSICAL_NAME + "," + COLUMN_FAMILY + "," + GUIDE_POST_KEY + "))\n" + + // Install split policy to prevent a physical table's stats from being split + // across regions. + TableDescriptorBuilder.SPLIT_POLICY + "='" + SYSTEM_STATS_SPLIT_POLICY_CLASSNAME + "',\n" + + TRANSACTIONAL + "=" + Boolean.FALSE; + + String CREATE_SEQUENCE_METADATA = "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + TYPE_SEQUENCE + + "\"(\n" + TENANT_ID + " VARCHAR NULL," + SEQUENCE_SCHEMA + " VARCHAR NULL, \n" + SEQUENCE_NAME + + " VARCHAR NOT NULL, \n" + START_WITH + " BIGINT, \n" + CURRENT_VALUE + " BIGINT, \n" + + INCREMENT_BY + " BIGINT, \n" + CACHE_SIZE + " BIGINT, \n" + + // the following three columns were added in 3.1/4.1 + MIN_VALUE + " BIGINT, \n" + MAX_VALUE + " BIGINT, \n" + CYCLE_FLAG + " BOOLEAN, \n" + + LIMIT_REACHED_FLAG + " BOOLEAN \n" + " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + + TENANT_ID + "," + SEQUENCE_SCHEMA + "," + SEQUENCE_NAME + "))\n" + HConstants.VERSIONS + + "=%s,\n" + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + TRANSACTIONAL + "=" + + Boolean.FALSE; + + String UPGRADE_TABLE_SNAPSHOT_PREFIX = "_UPGRADING_TABLE_"; + + String CREATE_FUNCTION_METADATA = + "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_FUNCTION_TABLE + "\"(\n" + + // Pk columns + TENANT_ID + " VARCHAR NULL," + FUNCTION_NAME + " VARCHAR NOT NULL, \n" + NUM_ARGS + + " INTEGER, \n" + + // Function metadata (will be null for argument row) + CLASS_NAME + " VARCHAR, \n" + JAR_PATH + " VARCHAR, \n" + RETURN_TYPE + " VARCHAR, \n" + + // Argument metadata (will be null for function row) + TYPE + " VARCHAR, \n" + ARG_POSITION + " VARBINARY, \n" + IS_ARRAY + " BOOLEAN, \n" + + IS_CONSTANT + " BOOLEAN, \n" + DEFAULT_VALUE + " VARCHAR, \n" + MIN_VALUE + " VARCHAR, \n" + + MAX_VALUE + " VARCHAR, \n" + " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + + TENANT_ID + ", " + FUNCTION_NAME + ", " + TYPE + ", " + ARG_POSITION + "))\n" + + HConstants.VERSIONS + "=%s,\n" + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + + + // Install split policy to prevent a tenant's metadata from being split across regions. + TableDescriptorBuilder.SPLIT_POLICY + "='" + SYSTEM_FUNCTION_SPLIT_POLICY_CLASSNAME + "',\n" + + TRANSACTIONAL + "=" + Boolean.FALSE; + + String CREATE_LOG_METADATA = + "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\"(\n" + + // Pk columns + START_TIME + " DECIMAL, \n" + TABLE_NAME + " VARCHAR, \n" + QUERY_ID + " VARCHAR NOT NULL,\n" + + TENANT_ID + " VARCHAR ," + USER + " VARCHAR , \n" + CLIENT_IP + " VARCHAR, \n" + + // Function metadata (will be null for argument row) + QUERY + " VARCHAR, \n" + EXPLAIN_PLAN + " VARCHAR, \n" + + // Argument metadata (will be null for function row) + NO_OF_RESULTS_ITERATED + " BIGINT, \n" + QUERY_STATUS + " VARCHAR, \n" + EXCEPTION_TRACE + + " VARCHAR, \n" + GLOBAL_SCAN_DETAILS + " VARCHAR, \n" + BIND_PARAMETERS + " VARCHAR, \n" + + SCAN_METRICS_JSON + " VARCHAR, \n" + MetricType.getMetricColumnsDetails() + "\n" + + " CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (START_TIME, TABLE_NAME, QUERY_ID))\n" + + SALT_BUCKETS + "=%s,\n" + TRANSACTIONAL + "=" + Boolean.FALSE + ",\n" + + ColumnFamilyDescriptorBuilder.TTL + "=" + MetaDataProtocol.DEFAULT_LOG_TTL + ",\n" + + TableProperty.IMMUTABLE_STORAGE_SCHEME.toString() + " = " + + ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS.name() + ",\n" + + TableProperty.COLUMN_ENCODED_BYTES.toString() + " = 1"; + + byte[] OFFSET_FAMILY = "f_offset".getBytes(StandardCharsets.UTF_8); + byte[] OFFSET_COLUMN = "c_offset".getBytes(StandardCharsets.UTF_8); + String LAST_SCAN = "LAST_SCAN"; + String HASH_JOIN_CACHE_RETRIES = "hashjoin.client.retries.number"; + int DEFAULT_HASH_JOIN_CACHE_RETRIES = 5; + + // Links from parent to child views are stored in a separate table for + // scalability + String CREATE_CHILD_LINK_METADATA = + "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CHILD_LINK_TABLE + "\"(\n" + + // PK columns + TENANT_ID + " VARCHAR NULL," + TABLE_SCHEM + " VARCHAR NULL," + TABLE_NAME + + " VARCHAR NOT NULL," + COLUMN_NAME + " VARCHAR NULL," + COLUMN_FAMILY + " VARCHAR NULL," + + LINK_TYPE + " UNSIGNED_TINYINT,\n" + "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + + "))\n" + HConstants.VERSIONS + "=%s,\n" + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + + "=%s,\n" + TRANSACTIONAL + "=" + Boolean.FALSE; + + String CREATE_MUTEX_METADATA = + "CREATE IMMUTABLE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_MUTEX_TABLE_NAME + "\"(\n" + + // Pk columns + TENANT_ID + " VARCHAR NULL," + TABLE_SCHEM + " VARCHAR NULL," + TABLE_NAME + + " VARCHAR NOT NULL," + COLUMN_NAME + " VARCHAR NULL," + // null for table row + COLUMN_FAMILY + " VARCHAR NULL " + // using for CF to uniqueness for columns + "CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + "," + TABLE_SCHEM + "," + + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" + HConstants.VERSIONS + + "=%s,\n" + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + TRANSACTIONAL + "=" + + Boolean.FALSE + ",\n" + ColumnFamilyDescriptorBuilder.TTL + "=" + TTL_FOR_MUTEX; + + String CREATE_TASK_METADATA = + "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_TASK_TABLE + "\"(\n" + + // PK columns + TASK_TYPE + " UNSIGNED_TINYINT NOT NULL," + TASK_TS + " TIMESTAMP NOT NULL," + TENANT_ID + + " VARCHAR NULL," + TABLE_SCHEM + " VARCHAR NULL," + TABLE_NAME + " VARCHAR NOT NULL,\n" + + // Non-PK columns + TASK_STATUS + " VARCHAR NULL," + TASK_END_TS + " TIMESTAMP NULL," + TASK_PRIORITY + + " UNSIGNED_TINYINT NULL," + TASK_DATA + " VARCHAR NULL,\n" + "CONSTRAINT " + + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TASK_TYPE + "," + TASK_TS + " ROW_TIMESTAMP," + + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "))\n" + HConstants.VERSIONS + "=%s,\n" + + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + + ColumnFamilyDescriptorBuilder.TTL + "=" + TASK_TABLE_TTL + ",\n" + // 10 days + TableDescriptorBuilder.SPLIT_POLICY + "='" + SYSTEM_TASK_SPLIT_POLICY_CLASSNAME + "',\n" + + TRANSACTIONAL + "=" + Boolean.FALSE + ",\n" + STORE_NULLS + "=" + Boolean.TRUE; + + String CREATE_TRANSFORM_METADATA = + "CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_TRANSFORM_TABLE + "\"(\n" + + // PK columns + TENANT_ID + " VARCHAR NULL,\n" + TABLE_SCHEM + " VARCHAR NULL," + LOGICAL_TABLE_NAME + + " VARCHAR NOT NULL,\n" + + // Non-PK columns + NEW_PHYS_TABLE_NAME + " VARCHAR,\n" + TRANSFORM_TYPE + " INTEGER," + LOGICAL_PARENT_NAME + + " VARCHAR NULL,\n" + // If this is an index, Logical_Parent_Name is the data table name. + // Index name is not unique. + TRANSFORM_STATUS + " VARCHAR NULL," + TRANSFORM_JOB_ID + " VARCHAR NULL," + + TRANSFORM_RETRY_COUNT + " INTEGER NULL," + TRANSFORM_START_TS + " TIMESTAMP NULL," + + TRANSFORM_LAST_STATE_TS + " TIMESTAMP NULL," + OLD_METADATA + " VARBINARY NULL,\n" + + NEW_METADATA + " VARCHAR NULL,\n" + TRANSFORM_FUNCTION + " VARCHAR NULL\n" + "CONSTRAINT " + + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + "," + TABLE_SCHEM + "," + + LOGICAL_TABLE_NAME + "))\n" + HConstants.VERSIONS + "=%s,\n" + + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=%s,\n" + + ColumnFamilyDescriptorBuilder.TTL + "=" + TRANSFORM_TABLE_TTL + ",\n" + // 90 days + TableDescriptorBuilder.SPLIT_POLICY + "='" + SYSTEM_TASK_SPLIT_POLICY_CLASSNAME + "',\n" + + TRANSACTIONAL + "=" + Boolean.FALSE + ",\n" + STORE_NULLS + "=" + Boolean.TRUE; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java index 27e2bfed6cd..39cea601f5a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,529 +27,590 @@ import net.jcip.annotations.Immutable; - - /** - * - * Interface to group together services needed during querying. The - * parameters that may be set in {@link org.apache.hadoop.conf.Configuration} - * are documented here: https://github.com/forcedotcom/phoenix/wiki/Tuning - * - * + * Interface to group together services needed during querying. The parameters that may be set in + * {@link org.apache.hadoop.conf.Configuration} are documented here: + * https://github.com/forcedotcom/phoenix/wiki/Tuning * @since 0.1 */ @Immutable public interface QueryServices extends SQLCloseable { - public static final String KEEP_ALIVE_MS_ATTRIB = "phoenix.query.keepAliveMs"; - public static final String THREAD_POOL_SIZE_ATTRIB = "phoenix.query.threadPoolSize"; - public static final String QUEUE_SIZE_ATTRIB = "phoenix.query.queueSize"; - public static final String THREAD_TIMEOUT_MS_ATTRIB = "phoenix.query.timeoutMs"; - public static final String SERVER_SPOOL_THRESHOLD_BYTES_ATTRIB = - "phoenix.query.server.spoolThresholdBytes"; - public static final String CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB = - "phoenix.query.client.spoolThresholdBytes"; - public static final String CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB = - "phoenix.query.client.orderBy.spooling.enabled"; - public static final String CLIENT_JOIN_SPOOLING_ENABLED_ATTRIB = - "phoenix.query.client.join.spooling.enabled"; - public static final String SERVER_ORDERBY_SPOOLING_ENABLED_ATTRIB = - "phoenix.query.server.orderBy.spooling.enabled"; - public static final String HBASE_CLIENT_KEYTAB = "hbase.myclient.keytab"; - public static final String HBASE_CLIENT_PRINCIPAL = "hbase.myclient.principal"; - String QUERY_SERVICES_NAME = "phoenix.query.services.name"; - public static final String SPOOL_DIRECTORY = "phoenix.spool.directory"; - public static final String AUTO_COMMIT_ATTRIB = "phoenix.connection.autoCommit"; - // consistency configuration setting - public static final String CONSISTENCY_ATTRIB = "phoenix.connection.consistency"; - public static final String SCHEMA_ATTRIB = "phoenix.connection.schema"; - public static final String IS_NAMESPACE_MAPPING_ENABLED = "phoenix.schema.isNamespaceMappingEnabled"; - public static final String IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE = "phoenix.schema.mapSystemTablesToNamespace"; - // joni byte regex engine setting - public static final String USE_BYTE_BASED_REGEX_ATTRIB = "phoenix.regex.byteBased"; - public static final String DRIVER_SHUTDOWN_TIMEOUT_MS = "phoenix.shutdown.timeoutMs"; - public static final String CLIENT_INDEX_ASYNC_THRESHOLD = "phoenix.index.async.threshold"; - - /** - * max size to spool the the result into - * ${java.io.tmpdir}/ResultSpoolerXXX.bin if - * QueryServices#SPOOL_THRESHOLD_BYTES_ATTRIB is reached. - *

- * default is unlimited(-1) - *

- * if the threshold is reached, a {@link SpoolTooBigToDiskException } will be thrown - */ - public static final String MAX_SPOOL_TO_DISK_BYTES_ATTRIB = "phoenix.query.maxSpoolToDiskBytes"; - - /** - * Number of records to read per chunk when streaming records of a basic scan. - */ - public static final String SCAN_RESULT_CHUNK_SIZE = "phoenix.query.scanResultChunkSize"; - - public static final String MAX_MEMORY_PERC_ATTRIB = "phoenix.query.maxGlobalMemoryPercentage"; - public static final String MAX_TENANT_MEMORY_PERC_ATTRIB = "phoenix.query.maxTenantMemoryPercentage"; - public static final String MAX_SERVER_CACHE_SIZE_ATTRIB = "phoenix.query.maxServerCacheBytes"; - public static final String APPLY_TIME_ZONE_DISPLACMENT_ATTRIB = "phoenix.query.applyTimeZoneDisplacement"; - public static final String DATE_FORMAT_TIMEZONE_ATTRIB = "phoenix.query.dateFormatTimeZone"; - public static final String DATE_FORMAT_ATTRIB = "phoenix.query.dateFormat"; - public static final String TIME_FORMAT_ATTRIB = "phoenix.query.timeFormat"; - public static final String TIMESTAMP_FORMAT_ATTRIB = "phoenix.query.timestampFormat"; - - public static final String NUMBER_FORMAT_ATTRIB = "phoenix.query.numberFormat"; - public static final String CALL_QUEUE_ROUND_ROBIN_ATTRIB = "ipc.server.callqueue.roundrobin"; - public static final String SCAN_CACHE_SIZE_ATTRIB = "hbase.client.scanner.caching"; - public static final String MAX_MUTATION_SIZE_ATTRIB = "phoenix.mutate.maxSize"; - public static final String MAX_MUTATION_SIZE_BYTES_ATTRIB = "phoenix.mutate.maxSizeBytes"; - public static final String HBASE_CLIENT_KEYVALUE_MAXSIZE = "hbase.client.keyvalue.maxsize"; - - public static final String MUTATE_BATCH_SIZE_ATTRIB = "phoenix.mutate.batchSize"; - public static final String MUTATE_BATCH_SIZE_BYTES_ATTRIB = "phoenix.mutate.batchSizeBytes"; - public static final String MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB = "phoenix.coprocessor.maxServerCacheTimeToLiveMs"; - public static final String MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS_ATTRIB = "phoenix.coprocessor.maxServerCachePersistenceTimeToLiveMs"; - - @Deprecated // Use FORCE_ROW_KEY_ORDER instead. - public static final String ROW_KEY_ORDER_SALTED_TABLE_ATTRIB = "phoenix.query.rowKeyOrderSaltedTable"; - - public static final String USE_INDEXES_ATTRIB = "phoenix.query.useIndexes"; - @Deprecated // use the IMMUTABLE keyword while creating the table - public static final String IMMUTABLE_ROWS_ATTRIB = "phoenix.mutate.immutableRows"; - public static final String INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB = "phoenix.index.mutableBatchSizeThreshold"; - public static final String DROP_METADATA_ATTRIB = "phoenix.schema.dropMetaData"; - public static final String GROUPBY_SPILLABLE_ATTRIB = "phoenix.groupby.spillable"; - public static final String GROUPBY_SPILL_FILES_ATTRIB = "phoenix.groupby.spillFiles"; - public static final String GROUPBY_MAX_CACHE_SIZE_ATTRIB = "phoenix.groupby.maxCacheSize"; - public static final String GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB = "phoenix.groupby.estimatedDistinctValues"; - public static final String AGGREGATE_CHUNK_SIZE_INCREASE_ATTRIB = "phoenix.aggregate.chunk_size_increase"; - - public static final String CALL_QUEUE_PRODUCER_ATTRIB_NAME = "CALL_QUEUE_PRODUCER"; - - public static final String MASTER_INFO_PORT_ATTRIB = "hbase.master.info.port"; - public static final String REGIONSERVER_INFO_PORT_ATTRIB = "hbase.regionserver.info.port"; - public static final String HBASE_CLIENT_SCANNER_TIMEOUT_ATTRIB = "hbase.client.scanner.timeout.period"; - public static final String RPC_TIMEOUT_ATTRIB = "hbase.rpc.timeout"; - public static final String DYNAMIC_JARS_DIR_KEY = "hbase.dynamic.jars.dir"; - @Deprecated // Use HConstants directly - public static final String ZOOKEEPER_QUORUM_ATTRIB = "hbase.zookeeper.quorum"; - @Deprecated // Use HConstants directly - public static final String ZOOKEEPER_PORT_ATTRIB = "hbase.zookeeper.property.clientPort"; - @Deprecated // Use HConstants directly - public static final String ZOOKEEPER_ROOT_NODE_ATTRIB = "zookeeper.znode.parent"; - public static final String DISTINCT_VALUE_COMPRESS_THRESHOLD_ATTRIB = "phoenix.distinct.value.compress.threshold"; - public static final String SEQUENCE_CACHE_SIZE_ATTRIB = "phoenix.sequence.cacheSize"; - public static final String MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB = "phoenix.coprocessor.maxMetaDataCacheTimeToLiveMs"; - public static final String MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB = "phoenix.coprocessor.maxMetaDataCacheSize"; - public static final String MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB = "phoenix.client.maxMetaDataCacheSize"; - public static final String HA_GROUP_NAME_ATTRIB = "phoenix.ha.group"; - public static final String AUTO_UPGRADE_WHITELIST_ATTRIB = "phoenix.client.autoUpgradeWhiteList"; - // Mainly for testing to force spilling - public static final String MAX_MEMORY_SIZE_ATTRIB = "phoenix.query.maxGlobalMemorySize"; - - // The following config settings is to deal with SYSTEM.CATALOG moves(PHOENIX-916) among region servers - public static final String CLOCK_SKEW_INTERVAL_ATTRIB = "phoenix.clock.skew.interval"; - - // A master switch if to enable auto rebuild an index which failed to be updated previously - public static final String INDEX_FAILURE_HANDLING_REBUILD_ATTRIB = "phoenix.index.failure.handling.rebuild"; - public static final String INDEX_FAILURE_HANDLING_REBUILD_PERIOD = "phoenix.index.failure.handling.rebuild.period"; - public static final String INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB = "phoenix.index.rebuild.query.timeout"; - public static final String INDEX_REBUILD_RPC_TIMEOUT_ATTRIB = "phoenix.index.rebuild.rpc.timeout"; - public static final String INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB = "phoenix.index.rebuild.client.scanner.timeout"; - public static final String INDEX_REBUILD_RPC_RETRIES_COUNTER = "phoenix.index.rebuild.rpc.retries.counter"; - public static final String INDEX_REBUILD_RPC_RETRY_PAUSE_TIME = "phoenix.index.rebuild.rpc.retry.pause"; - - // Time interval to check if there is an index needs to be rebuild - public static final String INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB = - "phoenix.index.failure.handling.rebuild.interval"; - public static final String INDEX_REBUILD_TASK_INITIAL_DELAY = "phoenix.index.rebuild.task.initial.delay"; - public static final String START_TRUNCATE_TASK_DELAY = "phoenix.start.truncate.task.delay"; - - public static final String INDEX_FAILURE_HANDLING_REBUILD_NUMBER_OF_BATCHES_PER_TABLE = "phoenix.index.rebuild.batch.perTable"; - // If index disable timestamp is older than this threshold, then index rebuild task won't attempt to rebuild it - public static final String INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD = "phoenix.index.rebuild.disabletimestamp.threshold"; - // threshold number of ms an index has been in PENDING_DISABLE, beyond which we consider it disabled - public static final String INDEX_PENDING_DISABLE_THRESHOLD = "phoenix.index.pending.disable.threshold"; - - // Block writes to data table when index write fails - public static final String INDEX_FAILURE_BLOCK_WRITE = "phoenix.index.failure.block.write"; - public static final String INDEX_FAILURE_DISABLE_INDEX = "phoenix.index.failure.disable.index"; - public static final String INDEX_FAILURE_THROW_EXCEPTION_ATTRIB = "phoenix.index.failure.throw.exception"; - public static final String INDEX_FAILURE_KILL_SERVER = "phoenix.index.failure.unhandled.killserver"; - - public static final String INDEX_CREATE_DEFAULT_STATE = "phoenix.index.create.default.state"; - - // Index will be partially re-built from index disable time stamp - following overlap time - @Deprecated - public static final String INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB = - "phoenix.index.failure.handling.rebuild.overlap.time"; - public static final String INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_BACKWARD_TIME_ATTRIB = - "phoenix.index.failure.handling.rebuild.overlap.backward.time"; - public static final String INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB = - "phoenix.index.failure.handling.rebuild.overlap.forward.time"; - public static final String INDEX_PRIOIRTY_ATTRIB = "phoenix.index.rpc.priority"; - public static final String METADATA_PRIOIRTY_ATTRIB = "phoenix.metadata.rpc.priority"; - public static final String SERVER_SIDE_PRIOIRTY_ATTRIB = "phoenix.serverside.rpc.priority"; - String INVALIDATE_METADATA_CACHE_PRIORITY_ATTRIB = - "phoenix.invalidate.metadata.cache.rpc.priority"; - - public static final String ALLOW_LOCAL_INDEX_ATTRIB = "phoenix.index.allowLocalIndex"; - - // Retries when doing server side writes to SYSTEM.CATALOG - public static final String METADATA_WRITE_RETRIES_NUMBER = "phoenix.metadata.rpc.retries.number"; - public static final String METADATA_WRITE_RETRY_PAUSE = "phoenix.metadata.rpc.pause"; - - // Config parameters for for configuring tracing - public static final String TRACING_FREQ_ATTRIB = "phoenix.trace.frequency"; - public static final String TRACING_PAGE_SIZE_ATTRIB = "phoenix.trace.read.pagesize"; - public static final String TRACING_PROBABILITY_THRESHOLD_ATTRIB = "phoenix.trace.probability.threshold"; - public static final String TRACING_STATS_TABLE_NAME_ATTRIB = "phoenix.trace.statsTableName"; - public static final String TRACING_CUSTOM_ANNOTATION_ATTRIB_PREFIX = "phoenix.trace.custom.annotation."; - public static final String TRACING_ENABLED = "phoenix.trace.enabled"; - public static final String TRACING_BATCH_SIZE = "phoenix.trace.batchSize"; - public static final String TRACING_THREAD_POOL_SIZE = "phoenix.trace.threadPoolSize"; - public static final String TRACING_TRACE_BUFFER_SIZE = "phoenix.trace.traceBufferSize"; - - public static final String USE_REVERSE_SCAN_ATTRIB = "phoenix.query.useReverseScan"; - - // Config parameters for stats collection - public static final String STATS_UPDATE_FREQ_MS_ATTRIB = "phoenix.stats.updateFrequency"; - public static final String MIN_STATS_UPDATE_FREQ_MS_ATTRIB = "phoenix.stats.minUpdateFrequency"; - public static final String STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB = "phoenix.stats.guidepost.width"; - public static final String STATS_GUIDEPOST_PER_REGION_ATTRIB = "phoenix.stats.guidepost.per.region"; - public static final String STATS_USE_CURRENT_TIME_ATTRIB = "phoenix.stats.useCurrentTime"; - - public static final String RUN_UPDATE_STATS_ASYNC = "phoenix.update.stats.command.async"; - public static final String STATS_SERVER_POOL_SIZE = "phoenix.stats.pool.size"; - public static final String COMMIT_STATS_ASYNC = "phoenix.stats.commit.async"; - // Maximum size in bytes taken up by cached table stats in the client - public static final String STATS_MAX_CACHE_SIZE = "phoenix.stats.cache.maxSize"; - // The size of the thread pool used for refreshing cached table stats in stats client cache - public static final String STATS_CACHE_THREAD_POOL_SIZE = "phoenix.stats.cache.threadPoolSize"; - - public static final String LOG_SALT_BUCKETS_ATTRIB = "phoenix.log.saltBuckets"; - public static final String SEQUENCE_SALT_BUCKETS_ATTRIB = "phoenix.sequence.saltBuckets"; - public static final String COPROCESSOR_PRIORITY_ATTRIB = "phoenix.coprocessor.priority"; - public static final String EXPLAIN_CHUNK_COUNT_ATTRIB = "phoenix.explain.displayChunkCount"; - public static final String EXPLAIN_ROW_COUNT_ATTRIB = "phoenix.explain.displayRowCount"; - public static final String ALLOW_ONLINE_TABLE_SCHEMA_UPDATE = "hbase.online.schema.update.enable"; - public static final String NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK = "phoenix.schema.change.retries"; - public static final String DELAY_FOR_SCHEMA_UPDATE_CHECK = "phoenix.schema.change.delay"; - public static final String DEFAULT_STORE_NULLS_ATTRIB = "phoenix.table.default.store.nulls"; - public static final String DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB = "phoenix.table.istransactional.default"; - public static final String DEFAULT_TRANSACTION_PROVIDER_ATTRIB = "phoenix.table.transaction.provider.default"; - public static final String GLOBAL_METRICS_ENABLED = "phoenix.query.global.metrics.enabled"; - - public static final String TABLE_LEVEL_METRICS_ENABLED = "phoenix.monitoring.tableMetrics.enabled"; - public static final String METRIC_PUBLISHER_ENABLED = "phoenix.monitoring.metricsPublisher.enabled"; - public static final String METRIC_PUBLISHER_CLASS_NAME = "phoenix.monitoring.metricProvider.className"; - public static final String ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS = "phoenix.monitoring.allowedTableNames.list"; - - // Tag Name to determine the Phoenix Client Type - public static final String CLIENT_METRICS_TAG = "phoenix.client.metrics.tag"; - - // Transaction related configs - public static final String TRANSACTIONS_ENABLED = "phoenix.transactions.enabled"; - // Controls whether or not uncommitted data is automatically sent to HBase - // at the end of a statement execution when transaction state is passed through. - public static final String AUTO_FLUSH_ATTRIB = "phoenix.transactions.autoFlush"; - - // rpc queue configs - public static final String INDEX_HANDLER_COUNT_ATTRIB = "phoenix.rpc.index.handler.count"; - public static final String METADATA_HANDLER_COUNT_ATTRIB = "phoenix.rpc.metadata.handler.count"; - public static final String SERVER_SIDE_HANDLER_COUNT_ATTRIB = "phoenix.rpc.serverside.handler.count"; - String INVALIDATE_CACHE_HANDLER_COUNT_ATTRIB = "phoenix.rpc.invalidate.cache.handler.count"; - - public static final String FORCE_ROW_KEY_ORDER_ATTRIB = "phoenix.query.force.rowkeyorder"; - public static final String ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB = "phoenix.functions.allowUserDefinedFunctions"; - public static final String COLLECT_REQUEST_LEVEL_METRICS = "phoenix.query.request.metrics.enabled"; - public static final String ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE = "phoenix.view.allowNewColumnFamily"; - public static final String RETURN_SEQUENCE_VALUES_ATTRIB = "phoenix.sequence.returnValues"; - public static final String EXTRA_JDBC_ARGUMENTS_ATTRIB = "phoenix.jdbc.extra.arguments"; - - public static final String MAX_VERSIONS_TRANSACTIONAL_ATTRIB = "phoenix.transactions.maxVersions"; - - // metadata configs - public static final String DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB = "phoenix.system.default.keep.deleted.cells"; - public static final String DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB = "phoenix.system.default.max.versions"; - - public static final String RENEW_LEASE_ENABLED = "phoenix.scanner.lease.renew.enabled"; - public static final String RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS = "phoenix.scanner.lease.renew.interval"; - public static final String RENEW_LEASE_THRESHOLD_MILLISECONDS = "phoenix.scanner.lease.threshold"; - public static final String RENEW_LEASE_THREAD_POOL_SIZE = "phoenix.scanner.lease.pool.size"; - public static final String HCONNECTION_POOL_CORE_SIZE = "hbase.hconnection.threads.core"; - public static final String HCONNECTION_POOL_MAX_SIZE = "hbase.hconnection.threads.max"; - public static final String HTABLE_MAX_THREADS = "hbase.htable.threads.max"; - // time to wait before running second index population upsert select (so that any pending batches of rows on region server are also written to index) - public static final String INDEX_POPULATION_SLEEP_TIME = "phoenix.index.population.wait.time"; - public static final String LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB = "phoenix.client.localIndexUpgrade"; - public static final String LIMITED_QUERY_SERIAL_THRESHOLD = "phoenix.limited.query.serial.threshold"; - - //currently BASE64 and ASCII is supported - public static final String UPLOAD_BINARY_DATA_TYPE_ENCODING = "phoenix.upload.binaryDataType.encoding"; - // Toggle for server-written updates to SYSTEM.CATALOG - public static final String PHOENIX_ACLS_ENABLED = "phoenix.acls.enabled"; - - public static final String INDEX_ASYNC_BUILD_ENABLED = "phoenix.index.async.build.enabled"; - - public static final String MAX_INDEXES_PER_TABLE = "phoenix.index.maxIndexesPerTable"; - - public static final String CLIENT_CACHE_ENCODING = "phoenix.table.client.cache.encoding"; - public static final String AUTO_UPGRADE_ENABLED = "phoenix.autoupgrade.enabled"; - - public static final String CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS = - "phoenix.client.connection.max.duration"; - - //max number of connections from a single client to a single cluster. 0 is unlimited. - public static final String CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS = - "phoenix.client.connection.max.allowed.connections"; - //max number of connections from a single client to a single cluster. 0 is unlimited. - public static final String INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS = - "phoenix.internal.connection.max.allowed.connections"; - public static final String CONNECTION_ACTIVITY_LOGGING_ENABLED = - "phoenix.connection.activity.logging.enabled"; - public static final String CONNECTION_ACTIVITY_LOGGING_INTERVAL = - "phoenix.connection.activity.logging.interval"; - public static final String DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB = "phoenix.default.column.encoded.bytes.attrib"; - public static final String DEFAULT_IMMUTABLE_STORAGE_SCHEME_ATTRIB = "phoenix.default.immutable.storage.scheme"; - public static final String DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME_ATTRIB = "phoenix.default.multitenant.immutable.storage.scheme"; - - - public static final String STATS_COLLECTION_ENABLED = "phoenix.stats.collection.enabled"; - public static final String USE_STATS_FOR_PARALLELIZATION = "phoenix.use.stats.parallelization"; - - // whether to enable server side RS -> RS calls for upsert select statements - public static final String ENABLE_SERVER_UPSERT_SELECT ="phoenix.client.enable.server.upsert.select"; - - public static final String PROPERTY_POLICY_PROVIDER_ENABLED = "phoenix.property.policy.provider.enabled"; - - // whether to trigger mutations on the server at all (UPSERT/DELETE or DELETE FROM) - public static final String ENABLE_SERVER_SIDE_DELETE_MUTATIONS ="phoenix.client.enable.server.delete.mutations"; - public static final String ENABLE_SERVER_SIDE_UPSERT_MUTATIONS ="phoenix.client.enable.server.upsert.mutations"; - - //Update Cache Frequency default config attribute - public static final String DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB = "phoenix.default.update.cache.frequency"; - - //Update Cache Frequency for indexes in PENDING_DISABLE state - public static final String UPDATE_CACHE_FREQUENCY_FOR_PENDING_DISABLED_INDEX - = "phoenix.update.cache.frequency.pending.disable.index"; - - // whether to validate last ddl timestamps during client operations - public static final String LAST_DDL_TIMESTAMP_VALIDATION_ENABLED = "phoenix.ddl.timestamp.validation.enabled"; - - // Whether to enable cost-based-decision in the query optimizer - public static final String COST_BASED_OPTIMIZER_ENABLED = "phoenix.costbased.optimizer.enabled"; - public static final String SMALL_SCAN_THRESHOLD_ATTRIB = "phoenix.query.smallScanThreshold"; - public static final String WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB = - "phoenix.query.wildcard.dynamicColumns"; - public static final String LOG_LEVEL = "phoenix.log.level"; - public static final String AUDIT_LOG_LEVEL = "phoenix.audit.log.level"; - public static final String LOG_BUFFER_SIZE = "phoenix.log.buffer.size"; - public static final String LOG_BUFFER_WAIT_STRATEGY = "phoenix.log.wait.strategy"; - public static final String LOG_SAMPLE_RATE = "phoenix.log.sample.rate"; - public static final String LOG_HANDLER_COUNT = "phoenix.log.handler.count"; - - public static final String SYSTEM_CATALOG_SPLITTABLE = "phoenix.system.catalog.splittable"; - - // The parameters defined for handling task stored in table SYSTEM.TASK - // The time interval between periodic scans of table SYSTEM.TASK - public static final String TASK_HANDLING_INTERVAL_MS_ATTRIB = "phoenix.task.handling.interval.ms"; - // The maximum time for a task to stay in table SYSTEM.TASK - public static final String TASK_HANDLING_MAX_INTERVAL_MS_ATTRIB = "phoenix.task.handling.maxInterval.ms"; - // The initial delay before the first task from table SYSTEM.TASK is handled - public static final String TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB = "phoenix.task.handling.initial.delay.ms"; - // The minimum age of an unverified global index row to be eligible for deletion - public static final String GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB = "phoenix.global.index.row.age.threshold.to.delete.ms"; - // Enable the IndexRegionObserver Coprocessor - public static final String INDEX_REGION_OBSERVER_ENABLED_ATTRIB = "phoenix.index.region.observer.enabled"; - // Whether IndexRegionObserver/GlobalIndexChecker is enabled for all tables - public static final String INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES_ATTRIB = "phoenix.index.region.observer.enabled.all.tables"; - // Enable Phoenix server paging - public static final String PHOENIX_SERVER_PAGING_ENABLED_ATTRIB = "phoenix.server.paging.enabled"; - // Enable support for long view index(default is false) - public static final String LONG_VIEW_INDEX_ENABLED_ATTRIB = "phoenix.index.longViewIndex.enabled"; - // The number of index rows to be rebuild in one RPC call - public static final String INDEX_REBUILD_PAGE_SIZE_IN_ROWS = "phoenix.index.rebuild_page_size_in_rows"; - // The number of index rows to be scanned in one RPC call - String INDEX_PAGE_SIZE_IN_ROWS = "phoenix.index.page_size_in_rows"; - // Flag indicating that server side masking of ttl expired rows is enabled. - public static final String PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED = "phoenix.ttl.server_side.masking.enabled"; - // The time limit on the amount of work to be done in one RPC call - public static final String PHOENIX_SERVER_PAGE_SIZE_MS = "phoenix.server.page.size.ms"; - // Phoenix TTL implemented by CompactionScanner and TTLRegionScanner is enabled - public static final String PHOENIX_TABLE_TTL_ENABLED = "phoenix.table.ttl.enabled"; - // Copied here to avoid dependency on hbase-server - public static final String WAL_EDIT_CODEC_ATTRIB = "hbase.regionserver.wal.codec"; - //Property to know whether TTL at View Level is enabled - public static final String PHOENIX_VIEW_TTL_ENABLED = "phoenix.view.ttl.enabled"; - - public static final String PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT = "phoenix.view.ttl.tenant_views_per_scan.limit"; - - // Before 4.15 when we created a view we included the parent table column metadata in the view - // metadata. After PHOENIX-3534 we allow SYSTEM.CATALOG to split and no longer store the parent - // table column metadata along with the child view metadata. When we resolve a child view, we - // resolve its ancestors and include their columns. - // Also, before 4.15 when we added a column to a base table we would have to propagate the - // column metadata to all its child views. After PHOENIX-3534 we no longer propagate metadata - // changes from a parent to its children (we just resolve its ancestors and include their columns) - // - // The following config is used to continue writing the parent table column metadata while - // creating a view and also prevent metadata changes to a parent table/view that needs to be - // propagated to its children. This is done to allow rollback of the splittable SYSTEM.CATALOG - // feature - // - // By default this config is false meaning that rolling back the upgrade is not possible - // If this config is true and you want to rollback the upgrade be sure to run the sql commands in - // UpgradeUtil.addParentToChildLink which will recreate the PARENT->CHILD links in SYSTEM.CATALOG. This is needed - // as from 4.15 onwards the PARENT->CHILD links are stored in a separate SYSTEM.CHILD_LINK table. - public static final String ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK = - "phoenix.allow.system.catalog.rollback"; - - // Phoenix parameter used to indicate what implementation is used for providing the client - // stats guide post cache. - // QueryServicesOptions.DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS is used if this is not provided - public static final String GUIDE_POSTS_CACHE_FACTORY_CLASS = "phoenix.guide.posts.cache.factory.class"; - - public static final String PENDING_MUTATIONS_DDL_THROW_ATTRIB = "phoenix.pending.mutations.before.ddl.throw"; - - // The range of bins for latency metrics for histogram. - public static final String PHOENIX_HISTOGRAM_LATENCY_RANGES = "phoenix.histogram.latency.ranges"; - // The range of bins for size metrics for histogram. - public static final String PHOENIX_HISTOGRAM_SIZE_RANGES = "phoenix.histogram.size.ranges"; - - // Connection Query Service Metrics Configs - String CONNECTION_QUERY_SERVICE_METRICS_ENABLED = "phoenix.conn.query.service.metrics.enabled"; - String CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_CLASSNAME = - "phoenix.monitoring.connection.query.service.metricProvider.className"; - String CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED = - "phoenix.conn.query.service.metricsPublisher.enabled"; - // The range of bins for Connection Query Service Metrics of histogram. - String CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES = - "phoenix.conn.query.service.histogram.size.ranges"; - - // This config is used to move (copy and delete) the child links from the SYSTEM.CATALOG to SYSTEM.CHILD_LINK table. - // As opposed to a copy and async (out of band) delete. - public static final String MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED = "phoenix.move.child_link.during.upgrade"; - - /** - * Parameter to indicate the source of operation attribute. - * It can include metadata about the customer, service, etc. - */ - String SOURCE_OPERATION_ATTRIB = "phoenix.source.operation"; - - // The max point keys that can be generated for large in list clause - public static final String MAX_IN_LIST_SKIP_SCAN_SIZE = "phoenix.max.inList.skipScan.size"; - - /** - * Parameter to skip the system tables existence check to avoid unnecessary calls to - * Region server holding the SYSTEM.CATALOG table in batch oriented jobs. - */ - String SKIP_SYSTEM_TABLES_EXISTENCE_CHECK = "phoenix.skip.system.tables.existence.check"; - - /** - * Parameter to skip the minimum version check for system table upgrades - */ - String SKIP_UPGRADE_BLOCK_CHECK = "phoenix.skip.upgrade.block.check"; - - /** - * Config key to represent max region locations to be displayed as part of the Explain plan - * output. - */ - String MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN = - "phoenix.max.region.locations.size.explain.plan"; - - /** - * Parameter to disable the server merges for hinted uncovered indexes - */ - String SERVER_MERGE_FOR_UNCOVERED_INDEX = "phoenix.query.global.server.merge.enable"; - String PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS = - "phoenix.metadata.cache.invalidation.timeoutMs"; - // Default to 10 seconds. - long PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS_DEFAULT = 10 * 1000; - String PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED = "phoenix.metadata.invalidate.cache.enabled"; - - String PHOENIX_METADATA_CACHE_INVALIDATION_THREAD_POOL_SIZE = - "phoenix.metadata.cache.invalidation.threadPool.size"; - /** - * Param to determine whether client can disable validation to figure out if any of the - * descendent views extend primary key of their parents. Since this is a bit of - * expensive call, we can opt in to disable it. By default, this check will always be performed - * while creating index (PHOENIX-7067) on any table or view. This config can be used for - * disabling other subtree validation purpose as well. - */ - String DISABLE_VIEW_SUBTREE_VALIDATION = "phoenix.disable.view.subtree.validation"; - - boolean DEFAULT_DISABLE_VIEW_SUBTREE_VALIDATION = false; - - /** - * Param to enable updatable view restriction that only mark view as updatable if rows - * cannot overlap with other updatable views. - */ - String PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED = - "phoenix.updatable.view.restriction.enabled"; - - boolean DEFAULT_PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED = false; - - /** - * Only used by tests: parameter to determine num of regionservers to be created by - * MiniHBaseCluster. - */ - String TESTS_MINI_CLUSTER_NUM_REGION_SERVERS = "phoenix.tests.minicluster.numregionservers"; - - - /** - * Config to inject any processing after the client retrieves dummy result from the server. - */ - String PHOENIX_POST_DUMMY_PROCESS = "phoenix.scanning.result.post.dummy.process"; - - /** - * Config to inject any processing after the client retrieves valid result from the server. - */ - String PHOENIX_POST_VALID_PROCESS = "phoenix.scanning.result.post.valid.process"; - - /** - * New start rowkey to be used by paging region scanner for the scan. - */ - String PHOENIX_PAGING_NEW_SCAN_START_ROWKEY = "phoenix.paging.start.newscan.startrow"; - - /** - * New start rowkey to be included by paging region scanner for the scan. The value of the - * attribute is expected to be boolean. - */ - String PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE = - "phoenix.paging.start.newscan.startrow.include"; - - /** - * Num of retries while retrieving the region location details for the given table. - */ - String PHOENIX_GET_REGIONS_RETRIES = "phoenix.get.table.regions.retries"; - - int DEFAULT_PHOENIX_GET_REGIONS_RETRIES = 10; - - String PHOENIX_GET_METADATA_READ_LOCK_ENABLED = "phoenix.get.metadata.read.lock.enabled"; - - /** - * If server side metadata cache is empty, take Phoenix writeLock for the given row - * and make sure we can acquire the writeLock within the configurable duration. - */ - String PHOENIX_METADATA_CACHE_UPDATE_ROWLOCK_TIMEOUT = - "phoenix.metadata.update.rowlock.timeout"; - - long DEFAULT_PHOENIX_METADATA_CACHE_UPDATE_ROWLOCK_TIMEOUT = 60000; - - /** - * Get executor service used for parallel scans - */ - public ThreadPoolExecutor getExecutor(); - /** - * Get the memory manager used to track memory usage - */ - public MemoryManager getMemoryManager(); - - /** - * Get the properties from the HBase configuration in a - * read-only structure that avoids any synchronization - */ - public ReadOnlyProps getProps(); - - /** - * Get query optimizer used to choose the best query plan - */ - public QueryOptimizer getOptimizer(); + public static final String KEEP_ALIVE_MS_ATTRIB = "phoenix.query.keepAliveMs"; + public static final String THREAD_POOL_SIZE_ATTRIB = "phoenix.query.threadPoolSize"; + public static final String QUEUE_SIZE_ATTRIB = "phoenix.query.queueSize"; + public static final String THREAD_TIMEOUT_MS_ATTRIB = "phoenix.query.timeoutMs"; + public static final String SERVER_SPOOL_THRESHOLD_BYTES_ATTRIB = + "phoenix.query.server.spoolThresholdBytes"; + public static final String CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB = + "phoenix.query.client.spoolThresholdBytes"; + public static final String CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB = + "phoenix.query.client.orderBy.spooling.enabled"; + public static final String CLIENT_JOIN_SPOOLING_ENABLED_ATTRIB = + "phoenix.query.client.join.spooling.enabled"; + public static final String SERVER_ORDERBY_SPOOLING_ENABLED_ATTRIB = + "phoenix.query.server.orderBy.spooling.enabled"; + public static final String HBASE_CLIENT_KEYTAB = "hbase.myclient.keytab"; + public static final String HBASE_CLIENT_PRINCIPAL = "hbase.myclient.principal"; + String QUERY_SERVICES_NAME = "phoenix.query.services.name"; + public static final String SPOOL_DIRECTORY = "phoenix.spool.directory"; + public static final String AUTO_COMMIT_ATTRIB = "phoenix.connection.autoCommit"; + // consistency configuration setting + public static final String CONSISTENCY_ATTRIB = "phoenix.connection.consistency"; + public static final String SCHEMA_ATTRIB = "phoenix.connection.schema"; + public static final String IS_NAMESPACE_MAPPING_ENABLED = + "phoenix.schema.isNamespaceMappingEnabled"; + public static final String IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE = + "phoenix.schema.mapSystemTablesToNamespace"; + // joni byte regex engine setting + public static final String USE_BYTE_BASED_REGEX_ATTRIB = "phoenix.regex.byteBased"; + public static final String DRIVER_SHUTDOWN_TIMEOUT_MS = "phoenix.shutdown.timeoutMs"; + public static final String CLIENT_INDEX_ASYNC_THRESHOLD = "phoenix.index.async.threshold"; + + /** + * max size to spool the the result into ${java.io.tmpdir}/ResultSpoolerXXX.bin if + * QueryServices#SPOOL_THRESHOLD_BYTES_ATTRIB is reached. + *

+ * default is unlimited(-1) + *

+ * if the threshold is reached, a {@link SpoolTooBigToDiskException } will be thrown + */ + public static final String MAX_SPOOL_TO_DISK_BYTES_ATTRIB = "phoenix.query.maxSpoolToDiskBytes"; + + /** + * Number of records to read per chunk when streaming records of a basic scan. + */ + public static final String SCAN_RESULT_CHUNK_SIZE = "phoenix.query.scanResultChunkSize"; + + public static final String MAX_MEMORY_PERC_ATTRIB = "phoenix.query.maxGlobalMemoryPercentage"; + public static final String MAX_TENANT_MEMORY_PERC_ATTRIB = + "phoenix.query.maxTenantMemoryPercentage"; + public static final String MAX_SERVER_CACHE_SIZE_ATTRIB = "phoenix.query.maxServerCacheBytes"; + public static final String APPLY_TIME_ZONE_DISPLACMENT_ATTRIB = + "phoenix.query.applyTimeZoneDisplacement"; + public static final String DATE_FORMAT_TIMEZONE_ATTRIB = "phoenix.query.dateFormatTimeZone"; + public static final String DATE_FORMAT_ATTRIB = "phoenix.query.dateFormat"; + public static final String TIME_FORMAT_ATTRIB = "phoenix.query.timeFormat"; + public static final String TIMESTAMP_FORMAT_ATTRIB = "phoenix.query.timestampFormat"; + + public static final String NUMBER_FORMAT_ATTRIB = "phoenix.query.numberFormat"; + public static final String CALL_QUEUE_ROUND_ROBIN_ATTRIB = "ipc.server.callqueue.roundrobin"; + public static final String SCAN_CACHE_SIZE_ATTRIB = "hbase.client.scanner.caching"; + public static final String MAX_MUTATION_SIZE_ATTRIB = "phoenix.mutate.maxSize"; + public static final String MAX_MUTATION_SIZE_BYTES_ATTRIB = "phoenix.mutate.maxSizeBytes"; + public static final String HBASE_CLIENT_KEYVALUE_MAXSIZE = "hbase.client.keyvalue.maxsize"; + + public static final String MUTATE_BATCH_SIZE_ATTRIB = "phoenix.mutate.batchSize"; + public static final String MUTATE_BATCH_SIZE_BYTES_ATTRIB = "phoenix.mutate.batchSizeBytes"; + public static final String MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB = + "phoenix.coprocessor.maxServerCacheTimeToLiveMs"; + public static final String MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS_ATTRIB = + "phoenix.coprocessor.maxServerCachePersistenceTimeToLiveMs"; + + @Deprecated // Use FORCE_ROW_KEY_ORDER instead. + public static final String ROW_KEY_ORDER_SALTED_TABLE_ATTRIB = + "phoenix.query.rowKeyOrderSaltedTable"; + + public static final String USE_INDEXES_ATTRIB = "phoenix.query.useIndexes"; + @Deprecated // use the IMMUTABLE keyword while creating the table + public static final String IMMUTABLE_ROWS_ATTRIB = "phoenix.mutate.immutableRows"; + public static final String INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB = + "phoenix.index.mutableBatchSizeThreshold"; + public static final String DROP_METADATA_ATTRIB = "phoenix.schema.dropMetaData"; + public static final String GROUPBY_SPILLABLE_ATTRIB = "phoenix.groupby.spillable"; + public static final String GROUPBY_SPILL_FILES_ATTRIB = "phoenix.groupby.spillFiles"; + public static final String GROUPBY_MAX_CACHE_SIZE_ATTRIB = "phoenix.groupby.maxCacheSize"; + public static final String GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB = + "phoenix.groupby.estimatedDistinctValues"; + public static final String AGGREGATE_CHUNK_SIZE_INCREASE_ATTRIB = + "phoenix.aggregate.chunk_size_increase"; + + public static final String CALL_QUEUE_PRODUCER_ATTRIB_NAME = "CALL_QUEUE_PRODUCER"; + + public static final String MASTER_INFO_PORT_ATTRIB = "hbase.master.info.port"; + public static final String REGIONSERVER_INFO_PORT_ATTRIB = "hbase.regionserver.info.port"; + public static final String HBASE_CLIENT_SCANNER_TIMEOUT_ATTRIB = + "hbase.client.scanner.timeout.period"; + public static final String RPC_TIMEOUT_ATTRIB = "hbase.rpc.timeout"; + public static final String DYNAMIC_JARS_DIR_KEY = "hbase.dynamic.jars.dir"; + @Deprecated // Use HConstants directly + public static final String ZOOKEEPER_QUORUM_ATTRIB = "hbase.zookeeper.quorum"; + @Deprecated // Use HConstants directly + public static final String ZOOKEEPER_PORT_ATTRIB = "hbase.zookeeper.property.clientPort"; + @Deprecated // Use HConstants directly + public static final String ZOOKEEPER_ROOT_NODE_ATTRIB = "zookeeper.znode.parent"; + public static final String DISTINCT_VALUE_COMPRESS_THRESHOLD_ATTRIB = + "phoenix.distinct.value.compress.threshold"; + public static final String SEQUENCE_CACHE_SIZE_ATTRIB = "phoenix.sequence.cacheSize"; + public static final String MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB = + "phoenix.coprocessor.maxMetaDataCacheTimeToLiveMs"; + public static final String MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB = + "phoenix.coprocessor.maxMetaDataCacheSize"; + public static final String MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB = + "phoenix.client.maxMetaDataCacheSize"; + public static final String HA_GROUP_NAME_ATTRIB = "phoenix.ha.group"; + public static final String AUTO_UPGRADE_WHITELIST_ATTRIB = "phoenix.client.autoUpgradeWhiteList"; + // Mainly for testing to force spilling + public static final String MAX_MEMORY_SIZE_ATTRIB = "phoenix.query.maxGlobalMemorySize"; + + // The following config settings is to deal with SYSTEM.CATALOG moves(PHOENIX-916) among region + // servers + public static final String CLOCK_SKEW_INTERVAL_ATTRIB = "phoenix.clock.skew.interval"; + + // A master switch if to enable auto rebuild an index which failed to be updated previously + public static final String INDEX_FAILURE_HANDLING_REBUILD_ATTRIB = + "phoenix.index.failure.handling.rebuild"; + public static final String INDEX_FAILURE_HANDLING_REBUILD_PERIOD = + "phoenix.index.failure.handling.rebuild.period"; + public static final String INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB = + "phoenix.index.rebuild.query.timeout"; + public static final String INDEX_REBUILD_RPC_TIMEOUT_ATTRIB = "phoenix.index.rebuild.rpc.timeout"; + public static final String INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB = + "phoenix.index.rebuild.client.scanner.timeout"; + public static final String INDEX_REBUILD_RPC_RETRIES_COUNTER = + "phoenix.index.rebuild.rpc.retries.counter"; + public static final String INDEX_REBUILD_RPC_RETRY_PAUSE_TIME = + "phoenix.index.rebuild.rpc.retry.pause"; + + // Time interval to check if there is an index needs to be rebuild + public static final String INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB = + "phoenix.index.failure.handling.rebuild.interval"; + public static final String INDEX_REBUILD_TASK_INITIAL_DELAY = + "phoenix.index.rebuild.task.initial.delay"; + public static final String START_TRUNCATE_TASK_DELAY = "phoenix.start.truncate.task.delay"; + + public static final String INDEX_FAILURE_HANDLING_REBUILD_NUMBER_OF_BATCHES_PER_TABLE = + "phoenix.index.rebuild.batch.perTable"; + // If index disable timestamp is older than this threshold, then index rebuild task won't attempt + // to rebuild it + public static final String INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD = + "phoenix.index.rebuild.disabletimestamp.threshold"; + // threshold number of ms an index has been in PENDING_DISABLE, beyond which we consider it + // disabled + public static final String INDEX_PENDING_DISABLE_THRESHOLD = + "phoenix.index.pending.disable.threshold"; + + // Block writes to data table when index write fails + public static final String INDEX_FAILURE_BLOCK_WRITE = "phoenix.index.failure.block.write"; + public static final String INDEX_FAILURE_DISABLE_INDEX = "phoenix.index.failure.disable.index"; + public static final String INDEX_FAILURE_THROW_EXCEPTION_ATTRIB = + "phoenix.index.failure.throw.exception"; + public static final String INDEX_FAILURE_KILL_SERVER = + "phoenix.index.failure.unhandled.killserver"; + + public static final String INDEX_CREATE_DEFAULT_STATE = "phoenix.index.create.default.state"; + + // Index will be partially re-built from index disable time stamp - following overlap time + @Deprecated + public static final String INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB = + "phoenix.index.failure.handling.rebuild.overlap.time"; + public static final String INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_BACKWARD_TIME_ATTRIB = + "phoenix.index.failure.handling.rebuild.overlap.backward.time"; + public static final String INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB = + "phoenix.index.failure.handling.rebuild.overlap.forward.time"; + public static final String INDEX_PRIOIRTY_ATTRIB = "phoenix.index.rpc.priority"; + public static final String METADATA_PRIOIRTY_ATTRIB = "phoenix.metadata.rpc.priority"; + public static final String SERVER_SIDE_PRIOIRTY_ATTRIB = "phoenix.serverside.rpc.priority"; + String INVALIDATE_METADATA_CACHE_PRIORITY_ATTRIB = + "phoenix.invalidate.metadata.cache.rpc.priority"; + + public static final String ALLOW_LOCAL_INDEX_ATTRIB = "phoenix.index.allowLocalIndex"; + + // Retries when doing server side writes to SYSTEM.CATALOG + public static final String METADATA_WRITE_RETRIES_NUMBER = "phoenix.metadata.rpc.retries.number"; + public static final String METADATA_WRITE_RETRY_PAUSE = "phoenix.metadata.rpc.pause"; + + // Config parameters for for configuring tracing + public static final String TRACING_FREQ_ATTRIB = "phoenix.trace.frequency"; + public static final String TRACING_PAGE_SIZE_ATTRIB = "phoenix.trace.read.pagesize"; + public static final String TRACING_PROBABILITY_THRESHOLD_ATTRIB = + "phoenix.trace.probability.threshold"; + public static final String TRACING_STATS_TABLE_NAME_ATTRIB = "phoenix.trace.statsTableName"; + public static final String TRACING_CUSTOM_ANNOTATION_ATTRIB_PREFIX = + "phoenix.trace.custom.annotation."; + public static final String TRACING_ENABLED = "phoenix.trace.enabled"; + public static final String TRACING_BATCH_SIZE = "phoenix.trace.batchSize"; + public static final String TRACING_THREAD_POOL_SIZE = "phoenix.trace.threadPoolSize"; + public static final String TRACING_TRACE_BUFFER_SIZE = "phoenix.trace.traceBufferSize"; + + public static final String USE_REVERSE_SCAN_ATTRIB = "phoenix.query.useReverseScan"; + + // Config parameters for stats collection + public static final String STATS_UPDATE_FREQ_MS_ATTRIB = "phoenix.stats.updateFrequency"; + public static final String MIN_STATS_UPDATE_FREQ_MS_ATTRIB = "phoenix.stats.minUpdateFrequency"; + public static final String STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB = "phoenix.stats.guidepost.width"; + public static final String STATS_GUIDEPOST_PER_REGION_ATTRIB = + "phoenix.stats.guidepost.per.region"; + public static final String STATS_USE_CURRENT_TIME_ATTRIB = "phoenix.stats.useCurrentTime"; + + public static final String RUN_UPDATE_STATS_ASYNC = "phoenix.update.stats.command.async"; + public static final String STATS_SERVER_POOL_SIZE = "phoenix.stats.pool.size"; + public static final String COMMIT_STATS_ASYNC = "phoenix.stats.commit.async"; + // Maximum size in bytes taken up by cached table stats in the client + public static final String STATS_MAX_CACHE_SIZE = "phoenix.stats.cache.maxSize"; + // The size of the thread pool used for refreshing cached table stats in stats client cache + public static final String STATS_CACHE_THREAD_POOL_SIZE = "phoenix.stats.cache.threadPoolSize"; + + public static final String LOG_SALT_BUCKETS_ATTRIB = "phoenix.log.saltBuckets"; + public static final String SEQUENCE_SALT_BUCKETS_ATTRIB = "phoenix.sequence.saltBuckets"; + public static final String COPROCESSOR_PRIORITY_ATTRIB = "phoenix.coprocessor.priority"; + public static final String EXPLAIN_CHUNK_COUNT_ATTRIB = "phoenix.explain.displayChunkCount"; + public static final String EXPLAIN_ROW_COUNT_ATTRIB = "phoenix.explain.displayRowCount"; + public static final String ALLOW_ONLINE_TABLE_SCHEMA_UPDATE = "hbase.online.schema.update.enable"; + public static final String NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK = "phoenix.schema.change.retries"; + public static final String DELAY_FOR_SCHEMA_UPDATE_CHECK = "phoenix.schema.change.delay"; + public static final String DEFAULT_STORE_NULLS_ATTRIB = "phoenix.table.default.store.nulls"; + public static final String DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB = + "phoenix.table.istransactional.default"; + public static final String DEFAULT_TRANSACTION_PROVIDER_ATTRIB = + "phoenix.table.transaction.provider.default"; + public static final String GLOBAL_METRICS_ENABLED = "phoenix.query.global.metrics.enabled"; + + public static final String TABLE_LEVEL_METRICS_ENABLED = + "phoenix.monitoring.tableMetrics.enabled"; + public static final String METRIC_PUBLISHER_ENABLED = + "phoenix.monitoring.metricsPublisher.enabled"; + public static final String METRIC_PUBLISHER_CLASS_NAME = + "phoenix.monitoring.metricProvider.className"; + public static final String ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS = + "phoenix.monitoring.allowedTableNames.list"; + + // Tag Name to determine the Phoenix Client Type + public static final String CLIENT_METRICS_TAG = "phoenix.client.metrics.tag"; + + // Transaction related configs + public static final String TRANSACTIONS_ENABLED = "phoenix.transactions.enabled"; + // Controls whether or not uncommitted data is automatically sent to HBase + // at the end of a statement execution when transaction state is passed through. + public static final String AUTO_FLUSH_ATTRIB = "phoenix.transactions.autoFlush"; + + // rpc queue configs + public static final String INDEX_HANDLER_COUNT_ATTRIB = "phoenix.rpc.index.handler.count"; + public static final String METADATA_HANDLER_COUNT_ATTRIB = "phoenix.rpc.metadata.handler.count"; + public static final String SERVER_SIDE_HANDLER_COUNT_ATTRIB = + "phoenix.rpc.serverside.handler.count"; + String INVALIDATE_CACHE_HANDLER_COUNT_ATTRIB = "phoenix.rpc.invalidate.cache.handler.count"; + + public static final String FORCE_ROW_KEY_ORDER_ATTRIB = "phoenix.query.force.rowkeyorder"; + public static final String ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB = + "phoenix.functions.allowUserDefinedFunctions"; + public static final String COLLECT_REQUEST_LEVEL_METRICS = + "phoenix.query.request.metrics.enabled"; + public static final String ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE = + "phoenix.view.allowNewColumnFamily"; + public static final String RETURN_SEQUENCE_VALUES_ATTRIB = "phoenix.sequence.returnValues"; + public static final String EXTRA_JDBC_ARGUMENTS_ATTRIB = "phoenix.jdbc.extra.arguments"; + + public static final String MAX_VERSIONS_TRANSACTIONAL_ATTRIB = "phoenix.transactions.maxVersions"; + + // metadata configs + public static final String DEFAULT_SYSTEM_KEEP_DELETED_CELLS_ATTRIB = + "phoenix.system.default.keep.deleted.cells"; + public static final String DEFAULT_SYSTEM_MAX_VERSIONS_ATTRIB = + "phoenix.system.default.max.versions"; + + public static final String RENEW_LEASE_ENABLED = "phoenix.scanner.lease.renew.enabled"; + public static final String RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS = + "phoenix.scanner.lease.renew.interval"; + public static final String RENEW_LEASE_THRESHOLD_MILLISECONDS = "phoenix.scanner.lease.threshold"; + public static final String RENEW_LEASE_THREAD_POOL_SIZE = "phoenix.scanner.lease.pool.size"; + public static final String HCONNECTION_POOL_CORE_SIZE = "hbase.hconnection.threads.core"; + public static final String HCONNECTION_POOL_MAX_SIZE = "hbase.hconnection.threads.max"; + public static final String HTABLE_MAX_THREADS = "hbase.htable.threads.max"; + // time to wait before running second index population upsert select (so that any pending batches + // of rows on region server are also written to index) + public static final String INDEX_POPULATION_SLEEP_TIME = "phoenix.index.population.wait.time"; + public static final String LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB = "phoenix.client.localIndexUpgrade"; + public static final String LIMITED_QUERY_SERIAL_THRESHOLD = + "phoenix.limited.query.serial.threshold"; + + // currently BASE64 and ASCII is supported + public static final String UPLOAD_BINARY_DATA_TYPE_ENCODING = + "phoenix.upload.binaryDataType.encoding"; + // Toggle for server-written updates to SYSTEM.CATALOG + public static final String PHOENIX_ACLS_ENABLED = "phoenix.acls.enabled"; + + public static final String INDEX_ASYNC_BUILD_ENABLED = "phoenix.index.async.build.enabled"; + + public static final String MAX_INDEXES_PER_TABLE = "phoenix.index.maxIndexesPerTable"; + + public static final String CLIENT_CACHE_ENCODING = "phoenix.table.client.cache.encoding"; + public static final String AUTO_UPGRADE_ENABLED = "phoenix.autoupgrade.enabled"; + + public static final String CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS = + "phoenix.client.connection.max.duration"; + + // max number of connections from a single client to a single cluster. 0 is unlimited. + public static final String CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS = + "phoenix.client.connection.max.allowed.connections"; + // max number of connections from a single client to a single cluster. 0 is unlimited. + public static final String INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS = + "phoenix.internal.connection.max.allowed.connections"; + public static final String CONNECTION_ACTIVITY_LOGGING_ENABLED = + "phoenix.connection.activity.logging.enabled"; + public static final String CONNECTION_ACTIVITY_LOGGING_INTERVAL = + "phoenix.connection.activity.logging.interval"; + public static final String DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB = + "phoenix.default.column.encoded.bytes.attrib"; + public static final String DEFAULT_IMMUTABLE_STORAGE_SCHEME_ATTRIB = + "phoenix.default.immutable.storage.scheme"; + public static final String DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME_ATTRIB = + "phoenix.default.multitenant.immutable.storage.scheme"; + + public static final String STATS_COLLECTION_ENABLED = "phoenix.stats.collection.enabled"; + public static final String USE_STATS_FOR_PARALLELIZATION = "phoenix.use.stats.parallelization"; + + // whether to enable server side RS -> RS calls for upsert select statements + public static final String ENABLE_SERVER_UPSERT_SELECT = + "phoenix.client.enable.server.upsert.select"; + + public static final String PROPERTY_POLICY_PROVIDER_ENABLED = + "phoenix.property.policy.provider.enabled"; + + // whether to trigger mutations on the server at all (UPSERT/DELETE or DELETE FROM) + public static final String ENABLE_SERVER_SIDE_DELETE_MUTATIONS = + "phoenix.client.enable.server.delete.mutations"; + public static final String ENABLE_SERVER_SIDE_UPSERT_MUTATIONS = + "phoenix.client.enable.server.upsert.mutations"; + + // Update Cache Frequency default config attribute + public static final String DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB = + "phoenix.default.update.cache.frequency"; + + // Update Cache Frequency for indexes in PENDING_DISABLE state + public static final String UPDATE_CACHE_FREQUENCY_FOR_PENDING_DISABLED_INDEX = + "phoenix.update.cache.frequency.pending.disable.index"; + + // whether to validate last ddl timestamps during client operations + public static final String LAST_DDL_TIMESTAMP_VALIDATION_ENABLED = + "phoenix.ddl.timestamp.validation.enabled"; + + // Whether to enable cost-based-decision in the query optimizer + public static final String COST_BASED_OPTIMIZER_ENABLED = "phoenix.costbased.optimizer.enabled"; + public static final String SMALL_SCAN_THRESHOLD_ATTRIB = "phoenix.query.smallScanThreshold"; + public static final String WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB = + "phoenix.query.wildcard.dynamicColumns"; + public static final String LOG_LEVEL = "phoenix.log.level"; + public static final String AUDIT_LOG_LEVEL = "phoenix.audit.log.level"; + public static final String LOG_BUFFER_SIZE = "phoenix.log.buffer.size"; + public static final String LOG_BUFFER_WAIT_STRATEGY = "phoenix.log.wait.strategy"; + public static final String LOG_SAMPLE_RATE = "phoenix.log.sample.rate"; + public static final String LOG_HANDLER_COUNT = "phoenix.log.handler.count"; + + public static final String SYSTEM_CATALOG_SPLITTABLE = "phoenix.system.catalog.splittable"; + + // The parameters defined for handling task stored in table SYSTEM.TASK + // The time interval between periodic scans of table SYSTEM.TASK + public static final String TASK_HANDLING_INTERVAL_MS_ATTRIB = "phoenix.task.handling.interval.ms"; + // The maximum time for a task to stay in table SYSTEM.TASK + public static final String TASK_HANDLING_MAX_INTERVAL_MS_ATTRIB = + "phoenix.task.handling.maxInterval.ms"; + // The initial delay before the first task from table SYSTEM.TASK is handled + public static final String TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB = + "phoenix.task.handling.initial.delay.ms"; + // The minimum age of an unverified global index row to be eligible for deletion + public static final String GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB = + "phoenix.global.index.row.age.threshold.to.delete.ms"; + // Enable the IndexRegionObserver Coprocessor + public static final String INDEX_REGION_OBSERVER_ENABLED_ATTRIB = + "phoenix.index.region.observer.enabled"; + // Whether IndexRegionObserver/GlobalIndexChecker is enabled for all tables + public static final String INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES_ATTRIB = + "phoenix.index.region.observer.enabled.all.tables"; + // Enable Phoenix server paging + public static final String PHOENIX_SERVER_PAGING_ENABLED_ATTRIB = "phoenix.server.paging.enabled"; + // Enable support for long view index(default is false) + public static final String LONG_VIEW_INDEX_ENABLED_ATTRIB = "phoenix.index.longViewIndex.enabled"; + // The number of index rows to be rebuild in one RPC call + public static final String INDEX_REBUILD_PAGE_SIZE_IN_ROWS = + "phoenix.index.rebuild_page_size_in_rows"; + // The number of index rows to be scanned in one RPC call + String INDEX_PAGE_SIZE_IN_ROWS = "phoenix.index.page_size_in_rows"; + // Flag indicating that server side masking of ttl expired rows is enabled. + public static final String PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED = + "phoenix.ttl.server_side.masking.enabled"; + // The time limit on the amount of work to be done in one RPC call + public static final String PHOENIX_SERVER_PAGE_SIZE_MS = "phoenix.server.page.size.ms"; + // Phoenix TTL implemented by CompactionScanner and TTLRegionScanner is enabled + public static final String PHOENIX_TABLE_TTL_ENABLED = "phoenix.table.ttl.enabled"; + // Copied here to avoid dependency on hbase-server + public static final String WAL_EDIT_CODEC_ATTRIB = "hbase.regionserver.wal.codec"; + // Property to know whether TTL at View Level is enabled + public static final String PHOENIX_VIEW_TTL_ENABLED = "phoenix.view.ttl.enabled"; + + public static final String PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT = + "phoenix.view.ttl.tenant_views_per_scan.limit"; + + // Before 4.15 when we created a view we included the parent table column metadata in the view + // metadata. After PHOENIX-3534 we allow SYSTEM.CATALOG to split and no longer store the parent + // table column metadata along with the child view metadata. When we resolve a child view, we + // resolve its ancestors and include their columns. + // Also, before 4.15 when we added a column to a base table we would have to propagate the + // column metadata to all its child views. After PHOENIX-3534 we no longer propagate metadata + // changes from a parent to its children (we just resolve its ancestors and include their columns) + // + // The following config is used to continue writing the parent table column metadata while + // creating a view and also prevent metadata changes to a parent table/view that needs to be + // propagated to its children. This is done to allow rollback of the splittable SYSTEM.CATALOG + // feature + // + // By default this config is false meaning that rolling back the upgrade is not possible + // If this config is true and you want to rollback the upgrade be sure to run the sql commands in + // UpgradeUtil.addParentToChildLink which will recreate the PARENT->CHILD links in SYSTEM.CATALOG. + // This is needed + // as from 4.15 onwards the PARENT->CHILD links are stored in a separate SYSTEM.CHILD_LINK table. + public static final String ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK = + "phoenix.allow.system.catalog.rollback"; + + // Phoenix parameter used to indicate what implementation is used for providing the client + // stats guide post cache. + // QueryServicesOptions.DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS is used if this is not provided + public static final String GUIDE_POSTS_CACHE_FACTORY_CLASS = + "phoenix.guide.posts.cache.factory.class"; + + public static final String PENDING_MUTATIONS_DDL_THROW_ATTRIB = + "phoenix.pending.mutations.before.ddl.throw"; + + // The range of bins for latency metrics for histogram. + public static final String PHOENIX_HISTOGRAM_LATENCY_RANGES = "phoenix.histogram.latency.ranges"; + // The range of bins for size metrics for histogram. + public static final String PHOENIX_HISTOGRAM_SIZE_RANGES = "phoenix.histogram.size.ranges"; + + // Connection Query Service Metrics Configs + String CONNECTION_QUERY_SERVICE_METRICS_ENABLED = "phoenix.conn.query.service.metrics.enabled"; + String CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_CLASSNAME = + "phoenix.monitoring.connection.query.service.metricProvider.className"; + String CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED = + "phoenix.conn.query.service.metricsPublisher.enabled"; + // The range of bins for Connection Query Service Metrics of histogram. + String CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES = + "phoenix.conn.query.service.histogram.size.ranges"; + + // This config is used to move (copy and delete) the child links from the SYSTEM.CATALOG to + // SYSTEM.CHILD_LINK table. + // As opposed to a copy and async (out of band) delete. + public static final String MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED = + "phoenix.move.child_link.during.upgrade"; + + /** + * Parameter to indicate the source of operation attribute. It can include metadata about the + * customer, service, etc. + */ + String SOURCE_OPERATION_ATTRIB = "phoenix.source.operation"; + + // The max point keys that can be generated for large in list clause + public static final String MAX_IN_LIST_SKIP_SCAN_SIZE = "phoenix.max.inList.skipScan.size"; + + /** + * Parameter to skip the system tables existence check to avoid unnecessary calls to Region server + * holding the SYSTEM.CATALOG table in batch oriented jobs. + */ + String SKIP_SYSTEM_TABLES_EXISTENCE_CHECK = "phoenix.skip.system.tables.existence.check"; + + /** + * Parameter to skip the minimum version check for system table upgrades + */ + String SKIP_UPGRADE_BLOCK_CHECK = "phoenix.skip.upgrade.block.check"; + + /** + * Config key to represent max region locations to be displayed as part of the Explain plan + * output. + */ + String MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN = "phoenix.max.region.locations.size.explain.plan"; + + /** + * Parameter to disable the server merges for hinted uncovered indexes + */ + String SERVER_MERGE_FOR_UNCOVERED_INDEX = "phoenix.query.global.server.merge.enable"; + String PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS = + "phoenix.metadata.cache.invalidation.timeoutMs"; + // Default to 10 seconds. + long PHOENIX_METADATA_CACHE_INVALIDATION_TIMEOUT_MS_DEFAULT = 10 * 1000; + String PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED = "phoenix.metadata.invalidate.cache.enabled"; + + String PHOENIX_METADATA_CACHE_INVALIDATION_THREAD_POOL_SIZE = + "phoenix.metadata.cache.invalidation.threadPool.size"; + /** + * Param to determine whether client can disable validation to figure out if any of the descendent + * views extend primary key of their parents. Since this is a bit of expensive call, we can opt in + * to disable it. By default, this check will always be performed while creating index + * (PHOENIX-7067) on any table or view. This config can be used for disabling other subtree + * validation purpose as well. + */ + String DISABLE_VIEW_SUBTREE_VALIDATION = "phoenix.disable.view.subtree.validation"; + + boolean DEFAULT_DISABLE_VIEW_SUBTREE_VALIDATION = false; + + /** + * Param to enable updatable view restriction that only mark view as updatable if rows cannot + * overlap with other updatable views. + */ + String PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED = "phoenix.updatable.view.restriction.enabled"; + + boolean DEFAULT_PHOENIX_UPDATABLE_VIEW_RESTRICTION_ENABLED = false; + + /** + * Only used by tests: parameter to determine num of regionservers to be created by + * MiniHBaseCluster. + */ + String TESTS_MINI_CLUSTER_NUM_REGION_SERVERS = "phoenix.tests.minicluster.numregionservers"; + + /** + * Config to inject any processing after the client retrieves dummy result from the server. + */ + String PHOENIX_POST_DUMMY_PROCESS = "phoenix.scanning.result.post.dummy.process"; + + /** + * Config to inject any processing after the client retrieves valid result from the server. + */ + String PHOENIX_POST_VALID_PROCESS = "phoenix.scanning.result.post.valid.process"; + + /** + * New start rowkey to be used by paging region scanner for the scan. + */ + String PHOENIX_PAGING_NEW_SCAN_START_ROWKEY = "phoenix.paging.start.newscan.startrow"; + + /** + * New start rowkey to be included by paging region scanner for the scan. The value of the + * attribute is expected to be boolean. + */ + String PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE = + "phoenix.paging.start.newscan.startrow.include"; + + /** + * Num of retries while retrieving the region location details for the given table. + */ + String PHOENIX_GET_REGIONS_RETRIES = "phoenix.get.table.regions.retries"; + + int DEFAULT_PHOENIX_GET_REGIONS_RETRIES = 10; + + String PHOENIX_GET_METADATA_READ_LOCK_ENABLED = "phoenix.get.metadata.read.lock.enabled"; + + /** + * If server side metadata cache is empty, take Phoenix writeLock for the given row and make sure + * we can acquire the writeLock within the configurable duration. + */ + String PHOENIX_METADATA_CACHE_UPDATE_ROWLOCK_TIMEOUT = "phoenix.metadata.update.rowlock.timeout"; + + long DEFAULT_PHOENIX_METADATA_CACHE_UPDATE_ROWLOCK_TIMEOUT = 60000; + + /** + * Get executor service used for parallel scans + */ + public ThreadPoolExecutor getExecutor(); + + /** + * Get the memory manager used to track memory usage + */ + public MemoryManager getMemoryManager(); + + /** + * Get the properties from the HBase configuration in a read-only structure that avoids any + * synchronization + */ + public ReadOnlyProps getProps(); + + /** + * Get query optimizer used to choose the best query plan + */ + public QueryOptimizer getOptimizer(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesImpl.java index 564da60e6ad..2c9413f40cb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,20 +19,13 @@ import org.apache.phoenix.util.ReadOnlyProps; - - - - /** - * * Real implementation of QueryServices for use in runtime and perf testing - * - * * @since 0.1 */ public final class QueryServicesImpl extends BaseQueryServicesImpl { - - public QueryServicesImpl(ReadOnlyProps defaultProps) { - super(defaultProps, QueryServicesOptions.withDefaults()); - } + + public QueryServicesImpl(ReadOnlyProps defaultProps) { + super(defaultProps, QueryServicesOptions.withDefaults()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java index 73e72c36e41..bc7527be7d6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,6 +24,7 @@ import static org.apache.phoenix.query.QueryServices.AUTO_UPGRADE_ENABLED; import static org.apache.phoenix.query.QueryServices.CALL_QUEUE_PRODUCER_ATTRIB_NAME; import static org.apache.phoenix.query.QueryServices.CALL_QUEUE_ROUND_ROBIN_ATTRIB; +import static org.apache.phoenix.query.QueryServices.CLIENT_INDEX_ASYNC_THRESHOLD; import static org.apache.phoenix.query.QueryServices.CLIENT_METRICS_TAG; import static org.apache.phoenix.query.QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB; import static org.apache.phoenix.query.QueryServices.COLLECT_REQUEST_LEVEL_METRICS; @@ -32,8 +33,8 @@ import static org.apache.phoenix.query.QueryServices.CONNECTION_ACTIVITY_LOGGING_INTERVAL; import static org.apache.phoenix.query.QueryServices.CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES; import static org.apache.phoenix.query.QueryServices.CONNECTION_QUERY_SERVICE_METRICS_ENABLED; -import static org.apache.phoenix.query.QueryServices.CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED; import static org.apache.phoenix.query.QueryServices.CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_CLASSNAME; +import static org.apache.phoenix.query.QueryServices.CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED; import static org.apache.phoenix.query.QueryServices.COST_BASED_OPTIMIZER_ENABLED; import static org.apache.phoenix.query.QueryServices.DATE_FORMAT_ATTRIB; import static org.apache.phoenix.query.QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB; @@ -61,6 +62,7 @@ import static org.apache.phoenix.query.QueryServices.LOG_SAMPLE_RATE; import static org.apache.phoenix.query.QueryServices.MASTER_INFO_PORT_ATTRIB; import static org.apache.phoenix.query.QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB; +import static org.apache.phoenix.query.QueryServices.MAX_IN_LIST_SKIP_SCAN_SIZE; import static org.apache.phoenix.query.QueryServices.MAX_MEMORY_PERC_ATTRIB; import static org.apache.phoenix.query.QueryServices.MAX_MUTATION_SIZE_ATTRIB; import static org.apache.phoenix.query.QueryServices.MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN; @@ -75,6 +77,7 @@ import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB; import static org.apache.phoenix.query.QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK; import static org.apache.phoenix.query.QueryServices.PHOENIX_ACLS_ENABLED; +import static org.apache.phoenix.query.QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED; import static org.apache.phoenix.query.QueryServices.QUERY_SERVICES_NAME; import static org.apache.phoenix.query.QueryServices.QUEUE_SIZE_ATTRIB; import static org.apache.phoenix.query.QueryServices.REGIONSERVER_INFO_PORT_ATTRIB; @@ -111,22 +114,19 @@ import static org.apache.phoenix.query.QueryServices.USE_BYTE_BASED_REGEX_ATTRIB; import static org.apache.phoenix.query.QueryServices.USE_INDEXES_ATTRIB; import static org.apache.phoenix.query.QueryServices.USE_STATS_FOR_PARALLELIZATION; -import static org.apache.phoenix.query.QueryServices.CLIENT_INDEX_ASYNC_THRESHOLD; -import static org.apache.phoenix.query.QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED; -import static org.apache.phoenix.query.QueryServices.MAX_IN_LIST_SKIP_SCAN_SIZE; import static org.apache.phoenix.query.QueryServices.WAL_EDIT_CODEC_ATTRIB; import java.util.Map.Entry; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.schema.ConnectionProperty; -import org.apache.phoenix.schema.PIndexState; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.ipc.controller.ClientRpcControllerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.log.LogLevel; +import org.apache.phoenix.schema.ConnectionProperty; +import org.apache.phoenix.schema.PIndexState; import org.apache.phoenix.schema.PTable.ImmutableStorageScheme; import org.apache.phoenix.schema.PTable.QualifierEncodingScheme; import org.apache.phoenix.schema.PTableRefFactory; @@ -137,862 +137,883 @@ /** * Options for {@link QueryServices}. - * - * * @since 0.1 */ public class QueryServicesOptions { - public static final int DEFAULT_KEEP_ALIVE_MS = 60000; - public static final int DEFAULT_THREAD_POOL_SIZE = 128; - public static final int DEFAULT_QUEUE_SIZE = 5000; - public static final int UNLIMITED_QUEUE_SIZE = -1; - public static final int DEFAULT_THREAD_TIMEOUT_MS = 600000; // 10min - public static final int DEFAULT_SPOOL_THRESHOLD_BYTES = 1024 * 1024 * 20; // 20m - public static final int DEFAULT_SERVER_SPOOL_THRESHOLD_BYTES = 1024 * 1024 * 20; // 20m - public static final int DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES = 1024 * 1024 * 20; // 20m - public static final boolean DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED = true; - public static final boolean DEFAULT_CLIENT_JOIN_SPOOLING_ENABLED = true; - public static final boolean DEFAULT_SERVER_ORDERBY_SPOOLING_ENABLED = true; - public static final String DEFAULT_SPOOL_DIRECTORY = System.getProperty("java.io.tmpdir"); - public static final int DEFAULT_MAX_MEMORY_PERC = 15; // 15% of heap - public static final int DEFAULT_MAX_TENANT_MEMORY_PERC = 100; - public static final long DEFAULT_MAX_SERVER_CACHE_SIZE = 1024 * 1024 * 100; // 100 Mb - public static final int DEFAULT_TARGET_QUERY_CONCURRENCY = 32; - public static final int DEFAULT_MAX_QUERY_CONCURRENCY = 64; - public static final String DEFAULT_DATE_FORMAT = DateUtil.DEFAULT_DATE_FORMAT; - public static final String DEFAULT_DATE_FORMAT_TIMEZONE = DateUtil.DEFAULT_TIME_ZONE_ID; - public static final boolean DEFAULT_CALL_QUEUE_ROUND_ROBIN = true; - public static final int DEFAULT_MAX_MUTATION_SIZE = 500000; - public static final int DEFAULT_MAX_MUTATION_SIZE_BYTES = 104857600; // 100 Mb - public static final int DEFAULT_HBASE_CLIENT_KEYVALUE_MAXSIZE = 10485760; // 10 Mb - public static final boolean DEFAULT_USE_INDEXES = true; // Use indexes - public static final boolean DEFAULT_IMMUTABLE_ROWS = false; // Tables rows may be updated - public static final boolean DEFAULT_DROP_METADATA = true; // Drop meta data also. - public static final long DEFAULT_DRIVER_SHUTDOWN_TIMEOUT_MS = 5 * 1000; // Time to wait in ShutdownHook to exit gracefully. - public static final boolean DEFAULT_TRACING_ENABLED = false; - public static final int DEFAULT_TRACING_THREAD_POOL_SIZE = 5; - public static final int DEFAULT_TRACING_BATCH_SIZE = 100; - public static final int DEFAULT_TRACING_TRACE_BUFFER_SIZE = 1000; - public static final int DEFAULT_MAX_INDEXES_PER_TABLE = 10; - public static final int DEFAULT_CLIENT_INDEX_ASYNC_THRESHOLD = 0; - public static final boolean DEFAULT_SERVER_SIDE_MASKING_ENABLED = false; - - public final static int DEFAULT_MUTATE_BATCH_SIZE = 100; // Batch size for UPSERT SELECT and DELETE - //Batch size in bytes for UPSERT, SELECT and DELETE. By default, 2MB - public final static long DEFAULT_MUTATE_BATCH_SIZE_BYTES = 2097152; - // The only downside of it being out-of-sync is that the parallelization of the scan won't be as balanced as it could be. - public static final int DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS = 30000; // 30 sec (with no activity) - public static final int DEFAULT_MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS = 30 * 60000; // 30 minutes - public static final int DEFAULT_SCAN_CACHE_SIZE = 1000; - public static final int DEFAULT_MAX_INTRA_REGION_PARALLELIZATION = DEFAULT_MAX_QUERY_CONCURRENCY; - public static final int DEFAULT_DISTINCT_VALUE_COMPRESS_THRESHOLD = 1024 * 1024 * 1; // 1 Mb - public static final int DEFAULT_AGGREGATE_CHUNK_SIZE_INCREASE = 1024 * 1024 * 1; // 1 Mb - public static final int DEFAULT_INDEX_MUTATE_BATCH_SIZE_THRESHOLD = 3; - public static final long DEFAULT_MAX_SPOOL_TO_DISK_BYTES = 1024000000; - // Only the first chunked batches are fetched in parallel, so this default - // should be on the relatively bigger side of things. Bigger means more - // latency and client-side spooling/buffering. Smaller means less initial - // latency and less parallelization. - public static final long DEFAULT_SCAN_RESULT_CHUNK_SIZE = 2999; - public static final boolean DEFAULT_IS_NAMESPACE_MAPPING_ENABLED = false; - public static final boolean DEFAULT_IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE = true; - public static final int DEFAULT_MAX_IN_LIST_SKIP_SCAN_SIZE = 50000; - - // - // Spillable GroupBy - SPGBY prefix - // - // Enable / disable spillable group by - public static final boolean DEFAULT_GROUPBY_SPILLABLE = true; - // Number of spill files / partitions the keys are distributed to - // Each spill file fits 2GB of data - public static final int DEFAULT_GROUPBY_SPILL_FILES = 2; - // Max size of 1st level main memory cache in bytes --> upper bound - public static final long DEFAULT_GROUPBY_MAX_CACHE_MAX = 1024L*1024L*100L; // 100 Mb - - public static final long DEFAULT_SEQUENCE_CACHE_SIZE = 100; // reserve 100 sequences at a time - public static final int GLOBAL_INDEX_CHECKER_ENABLED_MAP_EXPIRATION_MIN = 10; - public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS = 60000 * 30; // 30 mins - public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE = 1024L*1024L*20L; // 20 Mb - public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE = 1024L*1024L*10L; // 10 Mb - public static final int DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES = 1000; - public static final int DEFAULT_CLOCK_SKEW_INTERVAL = 2000; - public static final boolean DEFAULT_INDEX_FAILURE_HANDLING_REBUILD = true; // auto rebuild on - public static final boolean DEFAULT_INDEX_FAILURE_BLOCK_WRITE = false; - public static final boolean DEFAULT_INDEX_FAILURE_DISABLE_INDEX = true; - public static final boolean DEFAULT_INDEX_FAILURE_THROW_EXCEPTION = true; - public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 60000; // 60 secs - public static final long DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY = 10000; // 10 secs - public static final long DEFAULT_START_TRUNCATE_TASK_DELAY = 20000; // 20 secs - public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_BACKWARD_TIME = 1; // 1 ms - public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME = 60000 * 3; // 3 mins - // 30 min rpc timeout * 5 tries, with 2100ms total pause time between retries - public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = (5 * 30000 * 60) + 2100; - public static final long DEFAULT_INDEX_REBUILD_RPC_TIMEOUT = 30000 * 60; // 30 mins - public static final long DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT = 30000 * 60; // 30 mins - public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 5; // 5 total tries at rpc level - public static final int DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD = 60000 * 60 * 24; // 24 hrs - public static final long DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD = 30000; // 30 secs - - /** - * HConstants#HIGH_QOS is the max we will see to a standard table. We go higher to differentiate - * and give some room for things in the middle - */ - public static final int DEFAULT_SERVER_SIDE_PRIORITY = 500; - public static final int DEFAULT_INDEX_PRIORITY = 1000; - public static final int DEFAULT_METADATA_PRIORITY = 2000; - public static final int DEFAULT_INVALIDATE_METADATA_CACHE_PRIORITY = 3000; - public static final boolean DEFAULT_ALLOW_LOCAL_INDEX = true; - public static final int DEFAULT_INDEX_HANDLER_COUNT = 30; - public static final int DEFAULT_METADATA_HANDLER_COUNT = 30; - public static final int DEFAULT_SERVERSIDE_HANDLER_COUNT = 30; - public static final int DEFAULT_INVALIDATE_CACHE_HANDLER_COUNT = 10; - public static final int DEFAULT_SYSTEM_MAX_VERSIONS = 1; - public static final boolean DEFAULT_SYSTEM_KEEP_DELETED_CELLS = false; - - // Retries when doing server side writes to SYSTEM.CATALOG - // 20 retries with 100 pause = 230 seconds total retry time - public static final int DEFAULT_METADATA_WRITE_RETRIES_NUMBER = 20; - public static final int DEFAULT_METADATA_WRITE_RETRY_PAUSE = 100; - - public static final int DEFAULT_TRACING_PAGE_SIZE = 100; - /** - * Configuration key to overwrite the tablename that should be used as the target table - */ - public static final String DEFAULT_TRACING_STATS_TABLE_NAME = "SYSTEM.TRACING_STATS"; - public static final String DEFAULT_TRACING_FREQ = Tracing.Frequency.NEVER.getKey(); - public static final double DEFAULT_TRACING_PROBABILITY_THRESHOLD = 0.05; - - public static final int DEFAULT_STATS_UPDATE_FREQ_MS = 15 * 60000; // 15min - public static final int DEFAULT_STATS_GUIDEPOST_PER_REGION = 0; // Uses guidepost width by default - // Since we're not taking into account the compression done by FAST_DIFF in our - // counting of the bytes, default guidepost width to 100MB * 3 (where 3 is the - // compression we're getting) - public static final long DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES = 3* 100 * 1024 *1024; - public static final boolean DEFAULT_STATS_USE_CURRENT_TIME = true; - public static final boolean DEFAULT_RUN_UPDATE_STATS_ASYNC = true; - public static final boolean DEFAULT_COMMIT_STATS_ASYNC = true; - public static final int DEFAULT_STATS_POOL_SIZE = 4; - // Maximum size (in bytes) that cached table stats should take upm - public static final long DEFAULT_STATS_MAX_CACHE_SIZE = 256 * 1024 * 1024; - // Allow stats collection to be initiated by client multiple times immediately - public static final int DEFAULT_MIN_STATS_UPDATE_FREQ_MS = 0; - public static final int DEFAULT_STATS_CACHE_THREAD_POOL_SIZE = 4; - - public static final boolean DEFAULT_USE_REVERSE_SCAN = true; - - public static final String DEFAULT_CREATE_INDEX_STATE= PIndexState.BUILDING.toString(); - public static final boolean DEFAULT_DISABLE_ON_DROP = false; - - /** - * Use only first time SYSTEM.SEQUENCE table is created. - */ - public static final int DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS = 0; - /** - * Default value for coprocessor priority is between SYSTEM and USER priority. - */ - public static final int DEFAULT_COPROCESSOR_PRIORITY = Coprocessor.PRIORITY_SYSTEM/2 + Coprocessor.PRIORITY_USER/2; // Divide individually to prevent any overflow - public static final boolean DEFAULT_EXPLAIN_CHUNK_COUNT = true; - public static final boolean DEFAULT_EXPLAIN_ROW_COUNT = true; - public static final boolean DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE = true; - public static final int DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK = 10; - public static final long DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK = 5 * 1000; // 5 seconds. - public static final boolean DEFAULT_STORE_NULLS = false; - - // TODO Change this to true as part of PHOENIX-1543 - // We'll also need this for transactions to work correctly - public static final boolean DEFAULT_AUTO_COMMIT = false; - public static final boolean DEFAULT_TABLE_ISTRANSACTIONAL = false; - public static final String DEFAULT_TRANSACTION_PROVIDER = TransactionFactory.Provider.getDefault().name(); - public static final boolean DEFAULT_TRANSACTIONS_ENABLED = false; - public static final boolean DEFAULT_IS_GLOBAL_METRICS_ENABLED = true; - public static final boolean DEFAULT_IS_TABLE_LEVEL_METRICS_ENABLED = false; - public static final boolean DEFAULT_IS_METRIC_PUBLISHER_ENABLED = false; - public static final String DEFAULT_ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS = null; //All the tables metrics will be allowed. - public static final String DEFAULT_METRIC_PUBLISHER_CLASS_NAME = "org.apache.phoenix.monitoring.JmxMetricProvider"; - public static final String DEFAULT_CLIENT_METRICS_TAG = "FAT_CLIENT"; - - public static final boolean DEFAULT_TRANSACTIONAL = false; - public static final boolean DEFAULT_MULTI_TENANT = false; - public static final boolean DEFAULT_AUTO_FLUSH = false; - - private static final String DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY = ClientRpcControllerFactory.class.getName(); - - public static final String DEFAULT_CONSISTENCY_LEVEL = Consistency.STRONG.toString(); - - public static final boolean DEFAULT_USE_BYTE_BASED_REGEX = false; - public static final boolean DEFAULT_FORCE_ROW_KEY_ORDER = false; - public static final boolean DEFAULT_ALLOW_USER_DEFINED_FUNCTIONS = false; - public static final boolean DEFAULT_REQUEST_LEVEL_METRICS_ENABLED = false; - public static final boolean DEFAULT_ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE = true; - public static final int DEFAULT_MAX_VERSIONS_TRANSACTIONAL = Integer.MAX_VALUE; - - public static final boolean DEFAULT_RETURN_SEQUENCE_VALUES = false; - public static final String DEFAULT_EXTRA_JDBC_ARGUMENTS = ""; - - public static final long DEFAULT_INDEX_POPULATION_SLEEP_TIME = 5000; - - // Phoenix Connection Query Service configuration Defaults - public static final String DEFAULT_QUERY_SERVICES_NAME = "DEFAULT_CQSN"; - public static final String DEFAULT_CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES = - "1, 10, 100, 500, 1000"; - public static final boolean DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED = - false; - public static final boolean DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_ENABLED = false; - - public static final boolean DEFAULT_RENEW_LEASE_ENABLED = true; - public static final int DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS = - DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD / 2; - public static final int DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS = - (3 * DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD) / 4; - public static final int DEFAULT_RENEW_LEASE_THREAD_POOL_SIZE = 10; - public static final boolean DEFAULT_LOCAL_INDEX_CLIENT_UPGRADE = true; - public static final float DEFAULT_LIMITED_QUERY_SERIAL_THRESHOLD = 0.2f; - - public static final boolean DEFAULT_INDEX_ASYNC_BUILD_ENABLED = true; - - public static final String DEFAULT_CLIENT_CACHE_ENCODING = PTableRefFactory.Encoding.OBJECT.toString(); - public static final boolean DEFAULT_AUTO_UPGRADE_ENABLED = true; - public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION = 86400000; - public static final int DEFAULT_COLUMN_ENCODED_BYTES = QualifierEncodingScheme.TWO_BYTE_QUALIFIERS.getSerializedMetadataValue(); - public static final String DEFAULT_IMMUTABLE_STORAGE_SCHEME = ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS.toString(); - public static final String DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME = ImmutableStorageScheme.ONE_CELL_PER_COLUMN.toString(); - - - //by default, max connections from one client to one cluster is unlimited - public static final int DEFAULT_CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS = 0; - //by default, max internal connections from one client to one cluster is unlimited - public static final int DEFAULT_INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS = 0; - - public static final boolean DEFAULT_CONNECTION_ACTIVITY_LOGGING_ENABLED = false; - public static final int DEFAULT_CONNECTION_ACTIVITY_LOGGING_INTERVAL_IN_MINS = 15; - public static final boolean DEFAULT_STATS_COLLECTION_ENABLED = true; - public static final boolean DEFAULT_USE_STATS_FOR_PARALLELIZATION = true; - - //Security defaults - public static final boolean DEFAULT_PHOENIX_ACLS_ENABLED = false; - - public static final int DEFAULT_SMALL_SCAN_THRESHOLD = 100; - - /** - * Metadata caching configs, see https://issues.apache.org/jira/browse/PHOENIX-6883. - * Disable the boolean flags and set UCF=always to disable the caching re-design. - * - * Disable caching re-design if you use Online Data Format Change since the cutover logic - * is currently incompatible and clients may not learn about the physical table change. - * See https://issues.apache.org/jira/browse/PHOENIX-7284. - * - * Disable caching re-design if your clients will not have ADMIN perms to call region server - * RPC. See https://issues.apache.org/jira/browse/HBASE-28508 - */ - public static final long DEFAULT_UPDATE_CACHE_FREQUENCY - = (long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue("ALWAYS"); - public static final boolean DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED = false; - public static final boolean DEFAULT_PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED = false; - public static final int DEFAULT_PHOENIX_METADATA_CACHE_INVALIDATION_THREAD_POOL_SIZE = 20; - - // default system task handling interval in milliseconds - public static final long DEFAULT_TASK_HANDLING_INTERVAL_MS = 60*1000; // 1 min - public static final long DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS = 30*60*1000; // 30 min - public static final long DEFAULT_TASK_HANDLING_INITIAL_DELAY_MS = 10*1000; // 10 sec - - public static final long DEFAULT_GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS = 7*24*60*60*1000; /* 7 days */ - public static final boolean DEFAULT_INDEX_REGION_OBSERVER_ENABLED = true; - - public static final String DEFAULT_INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES = Boolean.toString(true); - public static final boolean DEFAULT_PHOENIX_SERVER_PAGING_ENABLED = true; - public static final long DEFAULT_INDEX_REBUILD_PAGE_SIZE_IN_ROWS = 32 * 1024; - public static final long DEFAULT_INDEX_PAGE_SIZE_IN_ROWS = 32 * 1024; - - public static final boolean DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK = false; - - public static final boolean DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED = true; - - public static final String DEFAULT_SCHEMA = null; - public static final String DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING = "BASE64"; // for backward compatibility, till - // 4.10, psql and CSVBulkLoad - // expects binary data to be base 64 - // encoded - // RS -> RS calls for upsert select statements are disabled by default - public static final boolean DEFAULT_ENABLE_SERVER_UPSERT_SELECT = false; - - // By default generally allow server trigger mutations - public static final boolean DEFAULT_ENABLE_SERVER_SIDE_DELETE_MUTATIONS = true; - public static final boolean DEFAULT_ENABLE_SERVER_SIDE_UPSERT_MUTATIONS = true; - - public static final boolean DEFAULT_COST_BASED_OPTIMIZER_ENABLED = false; - public static final boolean DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB = false; - public static final String DEFAULT_LOGGING_LEVEL = LogLevel.OFF.name(); - public static final String DEFAULT_AUDIT_LOGGING_LEVEL = LogLevel.OFF.name(); - public static final String DEFAULT_LOG_SAMPLE_RATE = "1.0"; - public static final int DEFAULT_LOG_SALT_BUCKETS = 32; - public static final int DEFAULT_SALT_BUCKETS = 0; - - public static final boolean DEFAULT_SYSTEM_CATALOG_SPLITTABLE = true; - - public static final String DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS = "org.apache.phoenix.query.DefaultGuidePostsCacheFactory"; - - public static final boolean DEFAULT_LONG_VIEW_INDEX_ENABLED = false; - - public static final boolean DEFAULT_PENDING_MUTATIONS_DDL_THROW = false; - public static final boolean DEFAULT_SKIP_SYSTEM_TABLES_EXISTENCE_CHECK = false; - public static final boolean DEFAULT_MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED = true; - public static final int DEFAULT_TIMEOUT_DURING_UPGRADE_MS = 60000 * 30; // 30 mins - public static final int DEFAULT_SCAN_PAGE_SIZE = 32768; - public static final boolean DEFAULT_APPLY_TIME_ZONE_DISPLACMENT = false; - public static final boolean DEFAULT_PHOENIX_TABLE_TTL_ENABLED = true; - public static final boolean DEFAULT_PHOENIX_VIEW_TTL_ENABLED = true; - public static final int DEFAULT_PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT = 100; - - public static final int DEFAULT_MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN = 5; - public static final boolean DEFAULT_SERVER_MERGE_FOR_UNCOVERED_INDEX = true; - - public static final boolean DEFAULT_PHOENIX_GET_METADATA_READ_LOCK_ENABLED = true; - - - private final Configuration config; - - private QueryServicesOptions(Configuration config) { - this.config = config; - } - - public ReadOnlyProps getProps(ReadOnlyProps defaultProps) { - return new ReadOnlyProps(defaultProps, config.iterator()); - } - - public QueryServicesOptions setAll(ReadOnlyProps props) { - for (Entry entry : props) { - config.set(entry.getKey(), entry.getValue()); - } - return this; - } - - public static QueryServicesOptions withDefaults() { - Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - QueryServicesOptions options = new QueryServicesOptions(config) - .setIfUnset(STATS_USE_CURRENT_TIME_ATTRIB, DEFAULT_STATS_USE_CURRENT_TIME) - .setIfUnset(RUN_UPDATE_STATS_ASYNC, DEFAULT_RUN_UPDATE_STATS_ASYNC) - .setIfUnset(COMMIT_STATS_ASYNC, DEFAULT_COMMIT_STATS_ASYNC) - .setIfUnset(KEEP_ALIVE_MS_ATTRIB, DEFAULT_KEEP_ALIVE_MS) - .setIfUnset(THREAD_POOL_SIZE_ATTRIB, DEFAULT_THREAD_POOL_SIZE) - .setIfUnset(QUEUE_SIZE_ATTRIB, DEFAULT_QUEUE_SIZE) - .setIfUnset(THREAD_TIMEOUT_MS_ATTRIB, DEFAULT_THREAD_TIMEOUT_MS) - .setIfUnset(CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES) - .setIfUnset(SERVER_SPOOL_THRESHOLD_BYTES_ATTRIB, DEFAULT_SERVER_SPOOL_THRESHOLD_BYTES) - .setIfUnset(SPOOL_DIRECTORY, DEFAULT_SPOOL_DIRECTORY) - .setIfUnset(MAX_MEMORY_PERC_ATTRIB, DEFAULT_MAX_MEMORY_PERC) - .setIfUnset(MAX_TENANT_MEMORY_PERC_ATTRIB, DEFAULT_MAX_TENANT_MEMORY_PERC) - .setIfUnset(MAX_SERVER_CACHE_SIZE_ATTRIB, DEFAULT_MAX_SERVER_CACHE_SIZE) - .setIfUnset(SCAN_CACHE_SIZE_ATTRIB, DEFAULT_SCAN_CACHE_SIZE) - .setIfUnset(DATE_FORMAT_ATTRIB, DEFAULT_DATE_FORMAT) - .setIfUnset(DATE_FORMAT_TIMEZONE_ATTRIB, DEFAULT_DATE_FORMAT_TIMEZONE) - .setIfUnset(STATS_UPDATE_FREQ_MS_ATTRIB, DEFAULT_STATS_UPDATE_FREQ_MS) - .setIfUnset(MIN_STATS_UPDATE_FREQ_MS_ATTRIB, DEFAULT_MIN_STATS_UPDATE_FREQ_MS) - .setIfUnset(STATS_CACHE_THREAD_POOL_SIZE, DEFAULT_STATS_CACHE_THREAD_POOL_SIZE) - .setIfUnset(CALL_QUEUE_ROUND_ROBIN_ATTRIB, DEFAULT_CALL_QUEUE_ROUND_ROBIN) - .setIfUnset(MAX_MUTATION_SIZE_ATTRIB, DEFAULT_MAX_MUTATION_SIZE) - .setIfUnset(ROW_KEY_ORDER_SALTED_TABLE_ATTRIB, DEFAULT_FORCE_ROW_KEY_ORDER) - .setIfUnset(USE_INDEXES_ATTRIB, DEFAULT_USE_INDEXES) - .setIfUnset(IMMUTABLE_ROWS_ATTRIB, DEFAULT_IMMUTABLE_ROWS) - .setIfUnset(INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, DEFAULT_INDEX_MUTATE_BATCH_SIZE_THRESHOLD) - .setIfUnset(MAX_SPOOL_TO_DISK_BYTES_ATTRIB, DEFAULT_MAX_SPOOL_TO_DISK_BYTES) - .setIfUnset(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA) - .setIfUnset(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE) - .setIfUnset(GROUPBY_MAX_CACHE_SIZE_ATTRIB, DEFAULT_GROUPBY_MAX_CACHE_MAX) - .setIfUnset(GROUPBY_SPILL_FILES_ATTRIB, DEFAULT_GROUPBY_SPILL_FILES) - .setIfUnset(SEQUENCE_CACHE_SIZE_ATTRIB, DEFAULT_SEQUENCE_CACHE_SIZE) - .setIfUnset(SCAN_RESULT_CHUNK_SIZE, DEFAULT_SCAN_RESULT_CHUNK_SIZE) - .setIfUnset(ALLOW_ONLINE_TABLE_SCHEMA_UPDATE, DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE) - .setIfUnset(NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK) - .setIfUnset(DELAY_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK) - .setIfUnset(GLOBAL_METRICS_ENABLED, DEFAULT_IS_GLOBAL_METRICS_ENABLED) - .setIfUnset(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY) - .setIfUnset(USE_BYTE_BASED_REGEX_ATTRIB, DEFAULT_USE_BYTE_BASED_REGEX) - .setIfUnset(FORCE_ROW_KEY_ORDER_ATTRIB, DEFAULT_FORCE_ROW_KEY_ORDER) - .setIfUnset(COLLECT_REQUEST_LEVEL_METRICS, DEFAULT_REQUEST_LEVEL_METRICS_ENABLED) - .setIfUnset(ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE, DEFAULT_ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE) - .setIfUnset(ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE, DEFAULT_ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE) - .setIfUnset(RENEW_LEASE_THRESHOLD_MILLISECONDS, DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS) - .setIfUnset(RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS, DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS) - .setIfUnset(RENEW_LEASE_THREAD_POOL_SIZE, DEFAULT_RENEW_LEASE_THREAD_POOL_SIZE) - .setIfUnset(IS_NAMESPACE_MAPPING_ENABLED, DEFAULT_IS_NAMESPACE_MAPPING_ENABLED) - .setIfUnset(IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, DEFAULT_IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE) - .setIfUnset(LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB, DEFAULT_LOCAL_INDEX_CLIENT_UPGRADE) - .setIfUnset(AUTO_UPGRADE_ENABLED, DEFAULT_AUTO_UPGRADE_ENABLED) - .setIfUnset(UPLOAD_BINARY_DATA_TYPE_ENCODING, DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING) - .setIfUnset(TRACING_ENABLED, DEFAULT_TRACING_ENABLED) - .setIfUnset(TRACING_BATCH_SIZE, DEFAULT_TRACING_BATCH_SIZE) - .setIfUnset(TRACING_THREAD_POOL_SIZE, DEFAULT_TRACING_THREAD_POOL_SIZE) - .setIfUnset(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED) - .setIfUnset(USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION) - .setIfUnset(USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION) - .setIfUnset(UPLOAD_BINARY_DATA_TYPE_ENCODING, DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING) - .setIfUnset(COST_BASED_OPTIMIZER_ENABLED, DEFAULT_COST_BASED_OPTIMIZER_ENABLED) - .setIfUnset(PHOENIX_ACLS_ENABLED, DEFAULT_PHOENIX_ACLS_ENABLED) - .setIfUnset(LOG_LEVEL, DEFAULT_LOGGING_LEVEL) - .setIfUnset(LOG_SAMPLE_RATE, DEFAULT_LOG_SAMPLE_RATE) - .setIfUnset("data.tx.pre.014.changeset.key", Boolean.FALSE.toString()) - .setIfUnset(CLIENT_METRICS_TAG, DEFAULT_CLIENT_METRICS_TAG) - .setIfUnset(CLIENT_INDEX_ASYNC_THRESHOLD, DEFAULT_CLIENT_INDEX_ASYNC_THRESHOLD) - .setIfUnset(PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, DEFAULT_SERVER_SIDE_MASKING_ENABLED) - .setIfUnset(QUERY_SERVICES_NAME, DEFAULT_QUERY_SERVICES_NAME) - .setIfUnset(INDEX_CREATE_DEFAULT_STATE, DEFAULT_CREATE_INDEX_STATE) - .setIfUnset(CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES, - DEFAULT_CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES) - .setIfUnset(CONNECTION_QUERY_SERVICE_METRICS_ENABLED, - DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_ENABLED) - .setIfUnset(CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED, - DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED) - .setIfUnset(SKIP_SYSTEM_TABLES_EXISTENCE_CHECK, - DEFAULT_SKIP_SYSTEM_TABLES_EXISTENCE_CHECK) - .setIfUnset(MAX_IN_LIST_SKIP_SCAN_SIZE, DEFAULT_MAX_IN_LIST_SKIP_SCAN_SIZE) - .setIfUnset(MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN, - DEFAULT_MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN) - .setIfUnset(SERVER_MERGE_FOR_UNCOVERED_INDEX, - DEFAULT_SERVER_MERGE_FOR_UNCOVERED_INDEX) - .setIfUnset(MAX_IN_LIST_SKIP_SCAN_SIZE, DEFAULT_MAX_IN_LIST_SKIP_SCAN_SIZE) - .setIfUnset(CONNECTION_ACTIVITY_LOGGING_ENABLED, DEFAULT_CONNECTION_ACTIVITY_LOGGING_ENABLED) - .setIfUnset(CONNECTION_ACTIVITY_LOGGING_INTERVAL, DEFAULT_CONNECTION_ACTIVITY_LOGGING_INTERVAL_IN_MINS); - - // HBase sets this to 1, so we reset it to something more appropriate. - // Hopefully HBase will change this, because we can't know if a user set - // it to 1, so we'll change it. - int scanCaching = config.getInt(SCAN_CACHE_SIZE_ATTRIB, 0); - if (scanCaching == 1) { - config.setInt(SCAN_CACHE_SIZE_ATTRIB, DEFAULT_SCAN_CACHE_SIZE); - } else if (scanCaching <= 0) { // Provides the user with a way of setting it to 1 - config.setInt(SCAN_CACHE_SIZE_ATTRIB, 1); - } - return options; - } - - public Configuration getConfiguration() { - return config; - } - - private QueryServicesOptions setIfUnset(String name, int value) { - config.setIfUnset(name, Integer.toString(value)); - return this; - } - - private QueryServicesOptions setIfUnset(String name, boolean value) { - config.setIfUnset(name, Boolean.toString(value)); - return this; - } - - private QueryServicesOptions setIfUnset(String name, long value) { - config.setIfUnset(name, Long.toString(value)); - return this; - } - - private QueryServicesOptions setIfUnset(String name, String value) { - config.setIfUnset(name, value); - return this; - } - - public QueryServicesOptions setKeepAliveMs(int keepAliveMs) { - return set(KEEP_ALIVE_MS_ATTRIB, keepAliveMs); - } - - public QueryServicesOptions setThreadPoolSize(int threadPoolSize) { - return set(THREAD_POOL_SIZE_ATTRIB, threadPoolSize); - } - - public QueryServicesOptions setQueueSize(int queueSize) { - config.setInt(QUEUE_SIZE_ATTRIB, queueSize); - return this; - } - - public QueryServicesOptions setThreadTimeoutMs(int threadTimeoutMs) { - return set(THREAD_TIMEOUT_MS_ATTRIB, threadTimeoutMs); - } - - public QueryServicesOptions setClientSpoolThresholdBytes(long spoolThresholdBytes) { - return set(CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, spoolThresholdBytes); - } - - public QueryServicesOptions setServerSpoolThresholdBytes(long spoolThresholdBytes) { - return set(SERVER_SPOOL_THRESHOLD_BYTES_ATTRIB, spoolThresholdBytes); - } - - public QueryServicesOptions setSpoolDirectory(String spoolDirectory) { - return set(SPOOL_DIRECTORY, spoolDirectory); - } - - public QueryServicesOptions setMaxMemoryPerc(int maxMemoryPerc) { - return set(MAX_MEMORY_PERC_ATTRIB, maxMemoryPerc); - } - - public QueryServicesOptions setMaxTenantMemoryPerc(int maxTenantMemoryPerc) { - return set(MAX_TENANT_MEMORY_PERC_ATTRIB, maxTenantMemoryPerc); - } - - public QueryServicesOptions setMaxServerCacheSize(long maxServerCacheSize) { - return set(MAX_SERVER_CACHE_SIZE_ATTRIB, maxServerCacheSize); - } - - public QueryServicesOptions setMaxServerMetaDataCacheSize(long maxMetaDataCacheSize) { - return set(MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB, maxMetaDataCacheSize); - } - - public QueryServicesOptions setMaxClientMetaDataCacheSize(long maxMetaDataCacheSize) { - return set(MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, maxMetaDataCacheSize); - } - - public QueryServicesOptions setScanFetchSize(int scanFetchSize) { - return set(SCAN_CACHE_SIZE_ATTRIB, scanFetchSize); - } - - public QueryServicesOptions setDateFormat(String dateFormat) { - return set(DATE_FORMAT_ATTRIB, dateFormat); - } - - public QueryServicesOptions setCallQueueRoundRobin(boolean isRoundRobin) { - return set(CALL_QUEUE_PRODUCER_ATTRIB_NAME, isRoundRobin); - } - - public QueryServicesOptions setMaxMutateSize(int maxMutateSize) { - return set(MAX_MUTATION_SIZE_ATTRIB, maxMutateSize); - } - - @Deprecated - public QueryServicesOptions setMutateBatchSize(int mutateBatchSize) { - return set(MUTATE_BATCH_SIZE_ATTRIB, mutateBatchSize); - } - - public QueryServicesOptions setDropMetaData(boolean dropMetadata) { - return set(DROP_METADATA_ATTRIB, dropMetadata); - } - - public QueryServicesOptions setGroupBySpill(boolean enabled) { - return set(GROUPBY_SPILLABLE_ATTRIB, enabled); - } - - public QueryServicesOptions setGroupBySpillMaxCacheSize(long size) { - return set(GROUPBY_MAX_CACHE_SIZE_ATTRIB, size); - } - - public QueryServicesOptions setGroupBySpillNumSpillFiles(long num) { - return set(GROUPBY_SPILL_FILES_ATTRIB, num); - } - - QueryServicesOptions set(String name, boolean value) { - config.set(name, Boolean.toString(value)); - return this; - } - - QueryServicesOptions set(String name, int value) { - config.set(name, Integer.toString(value)); - return this; - } - - QueryServicesOptions set(String name, String value) { - config.set(name, value); - return this; - } - - QueryServicesOptions set(String name, long value) { - config.set(name, Long.toString(value)); - return this; - } - - public int getKeepAliveMs() { - return config.getInt(KEEP_ALIVE_MS_ATTRIB, DEFAULT_KEEP_ALIVE_MS); - } - - public int getThreadPoolSize() { - return config.getInt(THREAD_POOL_SIZE_ATTRIB, DEFAULT_THREAD_POOL_SIZE); - } - - public int getQueueSize() { - return config.getInt(QUEUE_SIZE_ATTRIB, DEFAULT_QUEUE_SIZE); - } - - public int getMaxMemoryPerc() { - return config.getInt(MAX_MEMORY_PERC_ATTRIB, DEFAULT_MAX_MEMORY_PERC); - } - - public int getMaxMutateSize() { - return config.getInt(MAX_MUTATION_SIZE_ATTRIB, DEFAULT_MAX_MUTATION_SIZE); - } - - @Deprecated - public int getMutateBatchSize() { - return config.getInt(MUTATE_BATCH_SIZE_ATTRIB, DEFAULT_MUTATE_BATCH_SIZE); - } - - public String getClientMetricTag() { - return config.get(QueryServices.CLIENT_METRICS_TAG, DEFAULT_CLIENT_METRICS_TAG); - } - - public boolean isUseIndexes() { - return config.getBoolean(USE_INDEXES_ATTRIB, DEFAULT_USE_INDEXES); - } - - public boolean isImmutableRows() { - return config.getBoolean(IMMUTABLE_ROWS_ATTRIB, DEFAULT_IMMUTABLE_ROWS); - } - - public boolean isDropMetaData() { - return config.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); - } - - public boolean isSpillableGroupByEnabled() { - return config.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE); - } - - public long getSpillableGroupByMaxCacheSize() { - return config.getLongBytes(GROUPBY_MAX_CACHE_SIZE_ATTRIB, DEFAULT_GROUPBY_MAX_CACHE_MAX); - } - - public int getSpillableGroupByNumSpillFiles() { - return config.getInt(GROUPBY_SPILL_FILES_ATTRIB, DEFAULT_GROUPBY_SPILL_FILES); - } - - public boolean isTracingEnabled() { - return config.getBoolean(TRACING_ENABLED, DEFAULT_TRACING_ENABLED); - } - - public QueryServicesOptions setTracingEnabled(boolean enable) { - config.setBoolean(TRACING_ENABLED, enable); - return this; - } - - public int getTracingThreadPoolSize() { - return config.getInt(TRACING_THREAD_POOL_SIZE, DEFAULT_TRACING_THREAD_POOL_SIZE); - } - - public int getTracingBatchSize() { - return config.getInt(TRACING_BATCH_SIZE, DEFAULT_TRACING_BATCH_SIZE); - } - - public int getTracingTraceBufferSize() { - return config.getInt(TRACING_TRACE_BUFFER_SIZE, DEFAULT_TRACING_TRACE_BUFFER_SIZE); - } - - public String getTableName() { - return config.get(TRACING_STATS_TABLE_NAME_ATTRIB, DEFAULT_TRACING_STATS_TABLE_NAME); - } - - - public boolean isGlobalMetricsEnabled() { - return config.getBoolean(GLOBAL_METRICS_ENABLED, DEFAULT_IS_GLOBAL_METRICS_ENABLED); - } - - public String getMetricPublisherClass() { - return config.get(METRIC_PUBLISHER_CLASS_NAME, DEFAULT_METRIC_PUBLISHER_CLASS_NAME); - } - - public String getAllowedListTableNames() { - return config.get(ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS, - DEFAULT_ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS); - } - - public boolean isTableLevelMetricsEnabled() { - return config - .getBoolean(TABLE_LEVEL_METRICS_ENABLED, DEFAULT_IS_TABLE_LEVEL_METRICS_ENABLED); - } - - public void setTableLevelMetricsEnabled() { - set(TABLE_LEVEL_METRICS_ENABLED, true); - } - - public boolean isMetricPublisherEnabled() { - return config.getBoolean(METRIC_PUBLISHER_ENABLED, DEFAULT_IS_METRIC_PUBLISHER_ENABLED); - } - - public boolean isConnectionQueryServiceMetricsEnabled() { - return config.getBoolean(CONNECTION_QUERY_SERVICE_METRICS_ENABLED, - DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_ENABLED); - } - - public boolean isConnectionQueryServiceMetricsPublisherEnabled() { - return config.getBoolean(CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED, - DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED); - } - - public String getQueryServicesName() { - return config.get(QUERY_SERVICES_NAME, DEFAULT_QUERY_SERVICES_NAME); - } - - public void setConnectionQueryServiceMetricsEnabled() { - set(CONNECTION_QUERY_SERVICE_METRICS_ENABLED, true); - } - - public String getConnectionQueryServiceMetricsPublisherClass() { - return config.get(CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_CLASSNAME, - DEFAULT_METRIC_PUBLISHER_CLASS_NAME); - } - - @VisibleForTesting - public void setAllowedListForTableLevelMetrics(String tableNameList){ - set(ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS,tableNameList); - } - - - public boolean isUseByteBasedRegex() { - return config.getBoolean(USE_BYTE_BASED_REGEX_ATTRIB, DEFAULT_USE_BYTE_BASED_REGEX); - } - - public int getScanCacheSize() { - return config.getInt(SCAN_CACHE_SIZE_ATTRIB, DEFAULT_SCAN_CACHE_SIZE); - } - - public QueryServicesOptions setMaxServerCacheTTLMs(int ttl) { - return set(MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, ttl); - } - - public QueryServicesOptions setMasterInfoPort(int port) { - return set(MASTER_INFO_PORT_ATTRIB, port); - } - - public QueryServicesOptions setRegionServerInfoPort(int port) { - return set(REGIONSERVER_INFO_PORT_ATTRIB, port); - } - - public QueryServicesOptions setRegionServerLeasePeriodMs(int period) { - return set(HBASE_CLIENT_SCANNER_TIMEOUT_ATTRIB, period); - } - - public QueryServicesOptions setRpcTimeoutMs(int timeout) { - return set(RPC_TIMEOUT_ATTRIB, timeout); - } - - public QueryServicesOptions setUseIndexes(boolean useIndexes) { - return set(USE_INDEXES_ATTRIB, useIndexes); - } - - public QueryServicesOptions setImmutableRows(boolean isImmutableRows) { - return set(IMMUTABLE_ROWS_ATTRIB, isImmutableRows); - } - - public QueryServicesOptions setWALEditCodec(String walEditCodec) { - return set(WAL_EDIT_CODEC_ATTRIB, walEditCodec); - } - - public QueryServicesOptions setStatsHistogramDepthBytes(long byteDepth) { - return set(STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, byteDepth); - } - - public QueryServicesOptions setStatsUpdateFrequencyMs(int frequencyMs) { - return set(STATS_UPDATE_FREQ_MS_ATTRIB, frequencyMs); - } - - public QueryServicesOptions setMinStatsUpdateFrequencyMs(int frequencyMs) { - return set(MIN_STATS_UPDATE_FREQ_MS_ATTRIB, frequencyMs); - } - - public QueryServicesOptions setStatsCacheThreadPoolSize(int threadPoolSize) { - return set(STATS_CACHE_THREAD_POOL_SIZE, threadPoolSize); - } - - public QueryServicesOptions setSequenceSaltBuckets(int saltBuckets) { - config.setInt(SEQUENCE_SALT_BUCKETS_ATTRIB, saltBuckets); - return this; - } - - public QueryServicesOptions setExplainChunkCount(boolean showChunkCount) { - config.setBoolean(EXPLAIN_CHUNK_COUNT_ATTRIB, showChunkCount); - return this; - } - - public QueryServicesOptions setTransactionsEnabled(boolean transactionsEnabled) { - config.setBoolean(TRANSACTIONS_ENABLED, transactionsEnabled); - return this; - } - - public QueryServicesOptions setExplainRowCount(boolean showRowCount) { - config.setBoolean(EXPLAIN_ROW_COUNT_ATTRIB, showRowCount); - return this; - } - - public QueryServicesOptions setAllowOnlineSchemaUpdate(boolean allow) { - config.setBoolean(ALLOW_ONLINE_TABLE_SCHEMA_UPDATE, allow); - return this; - } - - public QueryServicesOptions setNumRetriesForSchemaChangeCheck(int numRetries) { - config.setInt(NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, numRetries); - return this; - } - - public QueryServicesOptions setDelayInMillisForSchemaChangeCheck(long delayInMillis) { - config.setLong(DELAY_FOR_SCHEMA_UPDATE_CHECK, delayInMillis); - return this; - - } - - public QueryServicesOptions setUseByteBasedRegex(boolean flag) { - config.setBoolean(USE_BYTE_BASED_REGEX_ATTRIB, flag); - return this; - } - - public QueryServicesOptions setForceRowKeyOrder(boolean forceRowKeyOrder) { - config.setBoolean(FORCE_ROW_KEY_ORDER_ATTRIB, forceRowKeyOrder); - return this; - } - - public QueryServicesOptions setExtraJDBCArguments(String extraArgs) { - config.set(EXTRA_JDBC_ARGUMENTS_ATTRIB, extraArgs); - return this; - } - - public QueryServicesOptions setRunUpdateStatsAsync(boolean flag) { - config.setBoolean(RUN_UPDATE_STATS_ASYNC, flag); - return this; - } - - public QueryServicesOptions setCommitStatsAsync(boolean flag) { - config.setBoolean(COMMIT_STATS_ASYNC, flag); - return this; - } - - public QueryServicesOptions setEnableRenewLease(boolean enable) { - config.setBoolean(RENEW_LEASE_ENABLED, enable); - return this; - } - - public QueryServicesOptions setIndexHandlerCount(int count) { - config.setInt(QueryServices.INDEX_HANDLER_COUNT_ATTRIB, count); - return this; - } - - public QueryServicesOptions setMetadataHandlerCount(int count) { - config.setInt(QueryServices.METADATA_HANDLER_COUNT_ATTRIB, count); - return this; - } - - public QueryServicesOptions setHConnectionPoolCoreSize(int count) { - config.setInt(QueryServices.HCONNECTION_POOL_CORE_SIZE, count); - return this; - } - - public QueryServicesOptions setHConnectionPoolMaxSize(int count) { - config.setInt(QueryServices.HCONNECTION_POOL_MAX_SIZE, count); - return this; - } - - public QueryServicesOptions setMaxThreadsPerHTable(int count) { - config.setInt(QueryServices.HTABLE_MAX_THREADS, count); - return this; - } - - public QueryServicesOptions setDefaultIndexPopulationWaitTime(long waitTime) { - config.setLong(INDEX_POPULATION_SLEEP_TIME, waitTime); - return this; - } - - public QueryServicesOptions setUseStatsForParallelization(boolean flag) { - config.setBoolean(USE_STATS_FOR_PARALLELIZATION, flag); - return this; - } - - public QueryServicesOptions setIndexRebuildTaskInitialDelay(long waitTime) { - config.setLong(INDEX_REBUILD_TASK_INITIAL_DELAY, waitTime); - return this; - } - - public QueryServicesOptions setSequenceCacheSize(long sequenceCacheSize) { - config.setLong(SEQUENCE_CACHE_SIZE_ATTRIB, sequenceCacheSize); - return this; - } + public static final int DEFAULT_KEEP_ALIVE_MS = 60000; + public static final int DEFAULT_THREAD_POOL_SIZE = 128; + public static final int DEFAULT_QUEUE_SIZE = 5000; + public static final int UNLIMITED_QUEUE_SIZE = -1; + public static final int DEFAULT_THREAD_TIMEOUT_MS = 600000; // 10min + public static final int DEFAULT_SPOOL_THRESHOLD_BYTES = 1024 * 1024 * 20; // 20m + public static final int DEFAULT_SERVER_SPOOL_THRESHOLD_BYTES = 1024 * 1024 * 20; // 20m + public static final int DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES = 1024 * 1024 * 20; // 20m + public static final boolean DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED = true; + public static final boolean DEFAULT_CLIENT_JOIN_SPOOLING_ENABLED = true; + public static final boolean DEFAULT_SERVER_ORDERBY_SPOOLING_ENABLED = true; + public static final String DEFAULT_SPOOL_DIRECTORY = System.getProperty("java.io.tmpdir"); + public static final int DEFAULT_MAX_MEMORY_PERC = 15; // 15% of heap + public static final int DEFAULT_MAX_TENANT_MEMORY_PERC = 100; + public static final long DEFAULT_MAX_SERVER_CACHE_SIZE = 1024 * 1024 * 100; // 100 Mb + public static final int DEFAULT_TARGET_QUERY_CONCURRENCY = 32; + public static final int DEFAULT_MAX_QUERY_CONCURRENCY = 64; + public static final String DEFAULT_DATE_FORMAT = DateUtil.DEFAULT_DATE_FORMAT; + public static final String DEFAULT_DATE_FORMAT_TIMEZONE = DateUtil.DEFAULT_TIME_ZONE_ID; + public static final boolean DEFAULT_CALL_QUEUE_ROUND_ROBIN = true; + public static final int DEFAULT_MAX_MUTATION_SIZE = 500000; + public static final int DEFAULT_MAX_MUTATION_SIZE_BYTES = 104857600; // 100 Mb + public static final int DEFAULT_HBASE_CLIENT_KEYVALUE_MAXSIZE = 10485760; // 10 Mb + public static final boolean DEFAULT_USE_INDEXES = true; // Use indexes + public static final boolean DEFAULT_IMMUTABLE_ROWS = false; // Tables rows may be updated + public static final boolean DEFAULT_DROP_METADATA = true; // Drop meta data also. + public static final long DEFAULT_DRIVER_SHUTDOWN_TIMEOUT_MS = 5 * 1000; // Time to wait in + // ShutdownHook to exit + // gracefully. + public static final boolean DEFAULT_TRACING_ENABLED = false; + public static final int DEFAULT_TRACING_THREAD_POOL_SIZE = 5; + public static final int DEFAULT_TRACING_BATCH_SIZE = 100; + public static final int DEFAULT_TRACING_TRACE_BUFFER_SIZE = 1000; + public static final int DEFAULT_MAX_INDEXES_PER_TABLE = 10; + public static final int DEFAULT_CLIENT_INDEX_ASYNC_THRESHOLD = 0; + public static final boolean DEFAULT_SERVER_SIDE_MASKING_ENABLED = false; + + public final static int DEFAULT_MUTATE_BATCH_SIZE = 100; // Batch size for UPSERT SELECT and + // DELETE + // Batch size in bytes for UPSERT, SELECT and DELETE. By default, 2MB + public final static long DEFAULT_MUTATE_BATCH_SIZE_BYTES = 2097152; + // The only downside of it being out-of-sync is that the parallelization of the scan won't be as + // balanced as it could be. + public static final int DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS = 30000; // 30 sec (with no + // activity) + public static final int DEFAULT_MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS = 30 * 60000; // 30 + // minutes + public static final int DEFAULT_SCAN_CACHE_SIZE = 1000; + public static final int DEFAULT_MAX_INTRA_REGION_PARALLELIZATION = DEFAULT_MAX_QUERY_CONCURRENCY; + public static final int DEFAULT_DISTINCT_VALUE_COMPRESS_THRESHOLD = 1024 * 1024 * 1; // 1 Mb + public static final int DEFAULT_AGGREGATE_CHUNK_SIZE_INCREASE = 1024 * 1024 * 1; // 1 Mb + public static final int DEFAULT_INDEX_MUTATE_BATCH_SIZE_THRESHOLD = 3; + public static final long DEFAULT_MAX_SPOOL_TO_DISK_BYTES = 1024000000; + // Only the first chunked batches are fetched in parallel, so this default + // should be on the relatively bigger side of things. Bigger means more + // latency and client-side spooling/buffering. Smaller means less initial + // latency and less parallelization. + public static final long DEFAULT_SCAN_RESULT_CHUNK_SIZE = 2999; + public static final boolean DEFAULT_IS_NAMESPACE_MAPPING_ENABLED = false; + public static final boolean DEFAULT_IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE = true; + public static final int DEFAULT_MAX_IN_LIST_SKIP_SCAN_SIZE = 50000; + + // + // Spillable GroupBy - SPGBY prefix + // + // Enable / disable spillable group by + public static final boolean DEFAULT_GROUPBY_SPILLABLE = true; + // Number of spill files / partitions the keys are distributed to + // Each spill file fits 2GB of data + public static final int DEFAULT_GROUPBY_SPILL_FILES = 2; + // Max size of 1st level main memory cache in bytes --> upper bound + public static final long DEFAULT_GROUPBY_MAX_CACHE_MAX = 1024L * 1024L * 100L; // 100 Mb + + public static final long DEFAULT_SEQUENCE_CACHE_SIZE = 100; // reserve 100 sequences at a time + public static final int GLOBAL_INDEX_CHECKER_ENABLED_MAP_EXPIRATION_MIN = 10; + public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS = 60000 * 30; // 30 + // mins + public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE = 1024L * 1024L * 20L; // 20 Mb + public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE = 1024L * 1024L * 10L; // 10 Mb + public static final int DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES = 1000; + public static final int DEFAULT_CLOCK_SKEW_INTERVAL = 2000; + public static final boolean DEFAULT_INDEX_FAILURE_HANDLING_REBUILD = true; // auto rebuild on + public static final boolean DEFAULT_INDEX_FAILURE_BLOCK_WRITE = false; + public static final boolean DEFAULT_INDEX_FAILURE_DISABLE_INDEX = true; + public static final boolean DEFAULT_INDEX_FAILURE_THROW_EXCEPTION = true; + public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 60000; // 60 secs + public static final long DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY = 10000; // 10 secs + public static final long DEFAULT_START_TRUNCATE_TASK_DELAY = 20000; // 20 secs + public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_BACKWARD_TIME = 1; // 1 ms + public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME = 60000 * 3; // 3 + // mins + // 30 min rpc timeout * 5 tries, with 2100ms total pause time between retries + public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = (5 * 30000 * 60) + 2100; + public static final long DEFAULT_INDEX_REBUILD_RPC_TIMEOUT = 30000 * 60; // 30 mins + public static final long DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT = 30000 * 60; // 30 mins + public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 5; // 5 total tries at rpc + // level + public static final int DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD = 60000 * 60 * 24; // 24 + // hrs + public static final long DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD = 30000; // 30 secs + + /** + * HConstants#HIGH_QOS is the max we will see to a standard table. We go higher to differentiate + * and give some room for things in the middle + */ + public static final int DEFAULT_SERVER_SIDE_PRIORITY = 500; + public static final int DEFAULT_INDEX_PRIORITY = 1000; + public static final int DEFAULT_METADATA_PRIORITY = 2000; + public static final int DEFAULT_INVALIDATE_METADATA_CACHE_PRIORITY = 3000; + public static final boolean DEFAULT_ALLOW_LOCAL_INDEX = true; + public static final int DEFAULT_INDEX_HANDLER_COUNT = 30; + public static final int DEFAULT_METADATA_HANDLER_COUNT = 30; + public static final int DEFAULT_SERVERSIDE_HANDLER_COUNT = 30; + public static final int DEFAULT_INVALIDATE_CACHE_HANDLER_COUNT = 10; + public static final int DEFAULT_SYSTEM_MAX_VERSIONS = 1; + public static final boolean DEFAULT_SYSTEM_KEEP_DELETED_CELLS = false; + + // Retries when doing server side writes to SYSTEM.CATALOG + // 20 retries with 100 pause = 230 seconds total retry time + public static final int DEFAULT_METADATA_WRITE_RETRIES_NUMBER = 20; + public static final int DEFAULT_METADATA_WRITE_RETRY_PAUSE = 100; + + public static final int DEFAULT_TRACING_PAGE_SIZE = 100; + /** + * Configuration key to overwrite the tablename that should be used as the target table + */ + public static final String DEFAULT_TRACING_STATS_TABLE_NAME = "SYSTEM.TRACING_STATS"; + public static final String DEFAULT_TRACING_FREQ = Tracing.Frequency.NEVER.getKey(); + public static final double DEFAULT_TRACING_PROBABILITY_THRESHOLD = 0.05; + + public static final int DEFAULT_STATS_UPDATE_FREQ_MS = 15 * 60000; // 15min + public static final int DEFAULT_STATS_GUIDEPOST_PER_REGION = 0; // Uses guidepost width by default + // Since we're not taking into account the compression done by FAST_DIFF in our + // counting of the bytes, default guidepost width to 100MB * 3 (where 3 is the + // compression we're getting) + public static final long DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES = 3 * 100 * 1024 * 1024; + public static final boolean DEFAULT_STATS_USE_CURRENT_TIME = true; + public static final boolean DEFAULT_RUN_UPDATE_STATS_ASYNC = true; + public static final boolean DEFAULT_COMMIT_STATS_ASYNC = true; + public static final int DEFAULT_STATS_POOL_SIZE = 4; + // Maximum size (in bytes) that cached table stats should take upm + public static final long DEFAULT_STATS_MAX_CACHE_SIZE = 256 * 1024 * 1024; + // Allow stats collection to be initiated by client multiple times immediately + public static final int DEFAULT_MIN_STATS_UPDATE_FREQ_MS = 0; + public static final int DEFAULT_STATS_CACHE_THREAD_POOL_SIZE = 4; + + public static final boolean DEFAULT_USE_REVERSE_SCAN = true; + + public static final String DEFAULT_CREATE_INDEX_STATE = PIndexState.BUILDING.toString(); + public static final boolean DEFAULT_DISABLE_ON_DROP = false; + + /** + * Use only first time SYSTEM.SEQUENCE table is created. + */ + public static final int DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS = 0; + /** + * Default value for coprocessor priority is between SYSTEM and USER priority. + */ + public static final int DEFAULT_COPROCESSOR_PRIORITY = + Coprocessor.PRIORITY_SYSTEM / 2 + Coprocessor.PRIORITY_USER / 2; // Divide individually to + // prevent any overflow + public static final boolean DEFAULT_EXPLAIN_CHUNK_COUNT = true; + public static final boolean DEFAULT_EXPLAIN_ROW_COUNT = true; + public static final boolean DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE = true; + public static final int DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK = 10; + public static final long DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK = 5 * 1000; // 5 seconds. + public static final boolean DEFAULT_STORE_NULLS = false; + + // TODO Change this to true as part of PHOENIX-1543 + // We'll also need this for transactions to work correctly + public static final boolean DEFAULT_AUTO_COMMIT = false; + public static final boolean DEFAULT_TABLE_ISTRANSACTIONAL = false; + public static final String DEFAULT_TRANSACTION_PROVIDER = + TransactionFactory.Provider.getDefault().name(); + public static final boolean DEFAULT_TRANSACTIONS_ENABLED = false; + public static final boolean DEFAULT_IS_GLOBAL_METRICS_ENABLED = true; + public static final boolean DEFAULT_IS_TABLE_LEVEL_METRICS_ENABLED = false; + public static final boolean DEFAULT_IS_METRIC_PUBLISHER_ENABLED = false; + public static final String DEFAULT_ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS = null; // All the tables + // metrics will be + // allowed. + public static final String DEFAULT_METRIC_PUBLISHER_CLASS_NAME = + "org.apache.phoenix.monitoring.JmxMetricProvider"; + public static final String DEFAULT_CLIENT_METRICS_TAG = "FAT_CLIENT"; + + public static final boolean DEFAULT_TRANSACTIONAL = false; + public static final boolean DEFAULT_MULTI_TENANT = false; + public static final boolean DEFAULT_AUTO_FLUSH = false; + + private static final String DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY = + ClientRpcControllerFactory.class.getName(); + + public static final String DEFAULT_CONSISTENCY_LEVEL = Consistency.STRONG.toString(); + + public static final boolean DEFAULT_USE_BYTE_BASED_REGEX = false; + public static final boolean DEFAULT_FORCE_ROW_KEY_ORDER = false; + public static final boolean DEFAULT_ALLOW_USER_DEFINED_FUNCTIONS = false; + public static final boolean DEFAULT_REQUEST_LEVEL_METRICS_ENABLED = false; + public static final boolean DEFAULT_ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE = true; + public static final int DEFAULT_MAX_VERSIONS_TRANSACTIONAL = Integer.MAX_VALUE; + + public static final boolean DEFAULT_RETURN_SEQUENCE_VALUES = false; + public static final String DEFAULT_EXTRA_JDBC_ARGUMENTS = ""; + + public static final long DEFAULT_INDEX_POPULATION_SLEEP_TIME = 5000; + + // Phoenix Connection Query Service configuration Defaults + public static final String DEFAULT_QUERY_SERVICES_NAME = "DEFAULT_CQSN"; + public static final String DEFAULT_CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES = + "1, 10, 100, 500, 1000"; + public static final boolean DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED = false; + public static final boolean DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_ENABLED = false; + + public static final boolean DEFAULT_RENEW_LEASE_ENABLED = true; + public static final int DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS = + DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD / 2; + public static final int DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS = + (3 * DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD) / 4; + public static final int DEFAULT_RENEW_LEASE_THREAD_POOL_SIZE = 10; + public static final boolean DEFAULT_LOCAL_INDEX_CLIENT_UPGRADE = true; + public static final float DEFAULT_LIMITED_QUERY_SERIAL_THRESHOLD = 0.2f; + + public static final boolean DEFAULT_INDEX_ASYNC_BUILD_ENABLED = true; + + public static final String DEFAULT_CLIENT_CACHE_ENCODING = + PTableRefFactory.Encoding.OBJECT.toString(); + public static final boolean DEFAULT_AUTO_UPGRADE_ENABLED = true; + public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION = 86400000; + public static final int DEFAULT_COLUMN_ENCODED_BYTES = + QualifierEncodingScheme.TWO_BYTE_QUALIFIERS.getSerializedMetadataValue(); + public static final String DEFAULT_IMMUTABLE_STORAGE_SCHEME = + ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS.toString(); + public static final String DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME = + ImmutableStorageScheme.ONE_CELL_PER_COLUMN.toString(); + + // by default, max connections from one client to one cluster is unlimited + public static final int DEFAULT_CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS = 0; + // by default, max internal connections from one client to one cluster is unlimited + public static final int DEFAULT_INTERNAL_CONNECTION_MAX_ALLOWED_CONNECTIONS = 0; + + public static final boolean DEFAULT_CONNECTION_ACTIVITY_LOGGING_ENABLED = false; + public static final int DEFAULT_CONNECTION_ACTIVITY_LOGGING_INTERVAL_IN_MINS = 15; + public static final boolean DEFAULT_STATS_COLLECTION_ENABLED = true; + public static final boolean DEFAULT_USE_STATS_FOR_PARALLELIZATION = true; + + // Security defaults + public static final boolean DEFAULT_PHOENIX_ACLS_ENABLED = false; + + public static final int DEFAULT_SMALL_SCAN_THRESHOLD = 100; + + /** + * Metadata caching configs, see https://issues.apache.org/jira/browse/PHOENIX-6883. Disable the + * boolean flags and set UCF=always to disable the caching re-design. Disable caching re-design if + * you use Online Data Format Change since the cutover logic is currently incompatible and clients + * may not learn about the physical table change. See + * https://issues.apache.org/jira/browse/PHOENIX-7284. Disable caching re-design if your clients + * will not have ADMIN perms to call region server RPC. See + * https://issues.apache.org/jira/browse/HBASE-28508 + */ + public static final long DEFAULT_UPDATE_CACHE_FREQUENCY = + (long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue("ALWAYS"); + public static final boolean DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED = false; + public static final boolean DEFAULT_PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED = false; + public static final int DEFAULT_PHOENIX_METADATA_CACHE_INVALIDATION_THREAD_POOL_SIZE = 20; + + // default system task handling interval in milliseconds + public static final long DEFAULT_TASK_HANDLING_INTERVAL_MS = 60 * 1000; // 1 min + public static final long DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS = 30 * 60 * 1000; // 30 min + public static final long DEFAULT_TASK_HANDLING_INITIAL_DELAY_MS = 10 * 1000; // 10 sec + + public static final long DEFAULT_GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS = + 7 * 24 * 60 * 60 * 1000; /* 7 days */ + public static final boolean DEFAULT_INDEX_REGION_OBSERVER_ENABLED = true; + + public static final String DEFAULT_INDEX_REGION_OBSERVER_ENABLED_ALL_TABLES = + Boolean.toString(true); + public static final boolean DEFAULT_PHOENIX_SERVER_PAGING_ENABLED = true; + public static final long DEFAULT_INDEX_REBUILD_PAGE_SIZE_IN_ROWS = 32 * 1024; + public static final long DEFAULT_INDEX_PAGE_SIZE_IN_ROWS = 32 * 1024; + + public static final boolean DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK = false; + + public static final boolean DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED = true; + + public static final String DEFAULT_SCHEMA = null; + public static final String DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING = "BASE64"; // for backward + // compatibility, + // till + // 4.10, psql and + // CSVBulkLoad + // expects binary + // data to be base + // 64 + // encoded + // RS -> RS calls for upsert select statements are disabled by default + public static final boolean DEFAULT_ENABLE_SERVER_UPSERT_SELECT = false; + + // By default generally allow server trigger mutations + public static final boolean DEFAULT_ENABLE_SERVER_SIDE_DELETE_MUTATIONS = true; + public static final boolean DEFAULT_ENABLE_SERVER_SIDE_UPSERT_MUTATIONS = true; + + public static final boolean DEFAULT_COST_BASED_OPTIMIZER_ENABLED = false; + public static final boolean DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB = false; + public static final String DEFAULT_LOGGING_LEVEL = LogLevel.OFF.name(); + public static final String DEFAULT_AUDIT_LOGGING_LEVEL = LogLevel.OFF.name(); + public static final String DEFAULT_LOG_SAMPLE_RATE = "1.0"; + public static final int DEFAULT_LOG_SALT_BUCKETS = 32; + public static final int DEFAULT_SALT_BUCKETS = 0; + + public static final boolean DEFAULT_SYSTEM_CATALOG_SPLITTABLE = true; + + public static final String DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS = + "org.apache.phoenix.query.DefaultGuidePostsCacheFactory"; + + public static final boolean DEFAULT_LONG_VIEW_INDEX_ENABLED = false; + + public static final boolean DEFAULT_PENDING_MUTATIONS_DDL_THROW = false; + public static final boolean DEFAULT_SKIP_SYSTEM_TABLES_EXISTENCE_CHECK = false; + public static final boolean DEFAULT_MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED = true; + public static final int DEFAULT_TIMEOUT_DURING_UPGRADE_MS = 60000 * 30; // 30 mins + public static final int DEFAULT_SCAN_PAGE_SIZE = 32768; + public static final boolean DEFAULT_APPLY_TIME_ZONE_DISPLACMENT = false; + public static final boolean DEFAULT_PHOENIX_TABLE_TTL_ENABLED = true; + public static final boolean DEFAULT_PHOENIX_VIEW_TTL_ENABLED = true; + public static final int DEFAULT_PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT = 100; + + public static final int DEFAULT_MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN = 5; + public static final boolean DEFAULT_SERVER_MERGE_FOR_UNCOVERED_INDEX = true; + + public static final boolean DEFAULT_PHOENIX_GET_METADATA_READ_LOCK_ENABLED = true; + + private final Configuration config; + + private QueryServicesOptions(Configuration config) { + this.config = config; + } + + public ReadOnlyProps getProps(ReadOnlyProps defaultProps) { + return new ReadOnlyProps(defaultProps, config.iterator()); + } + + public QueryServicesOptions setAll(ReadOnlyProps props) { + for (Entry entry : props) { + config.set(entry.getKey(), entry.getValue()); + } + return this; + } + + public static QueryServicesOptions withDefaults() { + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + QueryServicesOptions options = new QueryServicesOptions(config) + .setIfUnset(STATS_USE_CURRENT_TIME_ATTRIB, DEFAULT_STATS_USE_CURRENT_TIME) + .setIfUnset(RUN_UPDATE_STATS_ASYNC, DEFAULT_RUN_UPDATE_STATS_ASYNC) + .setIfUnset(COMMIT_STATS_ASYNC, DEFAULT_COMMIT_STATS_ASYNC) + .setIfUnset(KEEP_ALIVE_MS_ATTRIB, DEFAULT_KEEP_ALIVE_MS) + .setIfUnset(THREAD_POOL_SIZE_ATTRIB, DEFAULT_THREAD_POOL_SIZE) + .setIfUnset(QUEUE_SIZE_ATTRIB, DEFAULT_QUEUE_SIZE) + .setIfUnset(THREAD_TIMEOUT_MS_ATTRIB, DEFAULT_THREAD_TIMEOUT_MS) + .setIfUnset(CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES) + .setIfUnset(SERVER_SPOOL_THRESHOLD_BYTES_ATTRIB, DEFAULT_SERVER_SPOOL_THRESHOLD_BYTES) + .setIfUnset(SPOOL_DIRECTORY, DEFAULT_SPOOL_DIRECTORY) + .setIfUnset(MAX_MEMORY_PERC_ATTRIB, DEFAULT_MAX_MEMORY_PERC) + .setIfUnset(MAX_TENANT_MEMORY_PERC_ATTRIB, DEFAULT_MAX_TENANT_MEMORY_PERC) + .setIfUnset(MAX_SERVER_CACHE_SIZE_ATTRIB, DEFAULT_MAX_SERVER_CACHE_SIZE) + .setIfUnset(SCAN_CACHE_SIZE_ATTRIB, DEFAULT_SCAN_CACHE_SIZE) + .setIfUnset(DATE_FORMAT_ATTRIB, DEFAULT_DATE_FORMAT) + .setIfUnset(DATE_FORMAT_TIMEZONE_ATTRIB, DEFAULT_DATE_FORMAT_TIMEZONE) + .setIfUnset(STATS_UPDATE_FREQ_MS_ATTRIB, DEFAULT_STATS_UPDATE_FREQ_MS) + .setIfUnset(MIN_STATS_UPDATE_FREQ_MS_ATTRIB, DEFAULT_MIN_STATS_UPDATE_FREQ_MS) + .setIfUnset(STATS_CACHE_THREAD_POOL_SIZE, DEFAULT_STATS_CACHE_THREAD_POOL_SIZE) + .setIfUnset(CALL_QUEUE_ROUND_ROBIN_ATTRIB, DEFAULT_CALL_QUEUE_ROUND_ROBIN) + .setIfUnset(MAX_MUTATION_SIZE_ATTRIB, DEFAULT_MAX_MUTATION_SIZE) + .setIfUnset(ROW_KEY_ORDER_SALTED_TABLE_ATTRIB, DEFAULT_FORCE_ROW_KEY_ORDER) + .setIfUnset(USE_INDEXES_ATTRIB, DEFAULT_USE_INDEXES) + .setIfUnset(IMMUTABLE_ROWS_ATTRIB, DEFAULT_IMMUTABLE_ROWS) + .setIfUnset(INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, + DEFAULT_INDEX_MUTATE_BATCH_SIZE_THRESHOLD) + .setIfUnset(MAX_SPOOL_TO_DISK_BYTES_ATTRIB, DEFAULT_MAX_SPOOL_TO_DISK_BYTES) + .setIfUnset(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA) + .setIfUnset(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE) + .setIfUnset(GROUPBY_MAX_CACHE_SIZE_ATTRIB, DEFAULT_GROUPBY_MAX_CACHE_MAX) + .setIfUnset(GROUPBY_SPILL_FILES_ATTRIB, DEFAULT_GROUPBY_SPILL_FILES) + .setIfUnset(SEQUENCE_CACHE_SIZE_ATTRIB, DEFAULT_SEQUENCE_CACHE_SIZE) + .setIfUnset(SCAN_RESULT_CHUNK_SIZE, DEFAULT_SCAN_RESULT_CHUNK_SIZE) + .setIfUnset(ALLOW_ONLINE_TABLE_SCHEMA_UPDATE, DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE) + .setIfUnset(NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK) + .setIfUnset(DELAY_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK) + .setIfUnset(GLOBAL_METRICS_ENABLED, DEFAULT_IS_GLOBAL_METRICS_ENABLED) + .setIfUnset(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, + DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY) + .setIfUnset(USE_BYTE_BASED_REGEX_ATTRIB, DEFAULT_USE_BYTE_BASED_REGEX) + .setIfUnset(FORCE_ROW_KEY_ORDER_ATTRIB, DEFAULT_FORCE_ROW_KEY_ORDER) + .setIfUnset(COLLECT_REQUEST_LEVEL_METRICS, DEFAULT_REQUEST_LEVEL_METRICS_ENABLED) + .setIfUnset(ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE, DEFAULT_ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE) + .setIfUnset(ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE, DEFAULT_ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE) + .setIfUnset(RENEW_LEASE_THRESHOLD_MILLISECONDS, DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS) + .setIfUnset(RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS, + DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS) + .setIfUnset(RENEW_LEASE_THREAD_POOL_SIZE, DEFAULT_RENEW_LEASE_THREAD_POOL_SIZE) + .setIfUnset(IS_NAMESPACE_MAPPING_ENABLED, DEFAULT_IS_NAMESPACE_MAPPING_ENABLED) + .setIfUnset(IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, DEFAULT_IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE) + .setIfUnset(LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB, DEFAULT_LOCAL_INDEX_CLIENT_UPGRADE) + .setIfUnset(AUTO_UPGRADE_ENABLED, DEFAULT_AUTO_UPGRADE_ENABLED) + .setIfUnset(UPLOAD_BINARY_DATA_TYPE_ENCODING, DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING) + .setIfUnset(TRACING_ENABLED, DEFAULT_TRACING_ENABLED) + .setIfUnset(TRACING_BATCH_SIZE, DEFAULT_TRACING_BATCH_SIZE) + .setIfUnset(TRACING_THREAD_POOL_SIZE, DEFAULT_TRACING_THREAD_POOL_SIZE) + .setIfUnset(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED) + .setIfUnset(USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION) + .setIfUnset(USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION) + .setIfUnset(UPLOAD_BINARY_DATA_TYPE_ENCODING, DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING) + .setIfUnset(COST_BASED_OPTIMIZER_ENABLED, DEFAULT_COST_BASED_OPTIMIZER_ENABLED) + .setIfUnset(PHOENIX_ACLS_ENABLED, DEFAULT_PHOENIX_ACLS_ENABLED) + .setIfUnset(LOG_LEVEL, DEFAULT_LOGGING_LEVEL) + .setIfUnset(LOG_SAMPLE_RATE, DEFAULT_LOG_SAMPLE_RATE) + .setIfUnset("data.tx.pre.014.changeset.key", Boolean.FALSE.toString()) + .setIfUnset(CLIENT_METRICS_TAG, DEFAULT_CLIENT_METRICS_TAG) + .setIfUnset(CLIENT_INDEX_ASYNC_THRESHOLD, DEFAULT_CLIENT_INDEX_ASYNC_THRESHOLD) + .setIfUnset(PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, DEFAULT_SERVER_SIDE_MASKING_ENABLED) + .setIfUnset(QUERY_SERVICES_NAME, DEFAULT_QUERY_SERVICES_NAME) + .setIfUnset(INDEX_CREATE_DEFAULT_STATE, DEFAULT_CREATE_INDEX_STATE) + .setIfUnset(CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES, + DEFAULT_CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES) + .setIfUnset(CONNECTION_QUERY_SERVICE_METRICS_ENABLED, + DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_ENABLED) + .setIfUnset(CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED, + DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED) + .setIfUnset(SKIP_SYSTEM_TABLES_EXISTENCE_CHECK, DEFAULT_SKIP_SYSTEM_TABLES_EXISTENCE_CHECK) + .setIfUnset(MAX_IN_LIST_SKIP_SCAN_SIZE, DEFAULT_MAX_IN_LIST_SKIP_SCAN_SIZE) + .setIfUnset(MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN, + DEFAULT_MAX_REGION_LOCATIONS_SIZE_EXPLAIN_PLAN) + .setIfUnset(SERVER_MERGE_FOR_UNCOVERED_INDEX, DEFAULT_SERVER_MERGE_FOR_UNCOVERED_INDEX) + .setIfUnset(MAX_IN_LIST_SKIP_SCAN_SIZE, DEFAULT_MAX_IN_LIST_SKIP_SCAN_SIZE) + .setIfUnset(CONNECTION_ACTIVITY_LOGGING_ENABLED, DEFAULT_CONNECTION_ACTIVITY_LOGGING_ENABLED) + .setIfUnset(CONNECTION_ACTIVITY_LOGGING_INTERVAL, + DEFAULT_CONNECTION_ACTIVITY_LOGGING_INTERVAL_IN_MINS); + + // HBase sets this to 1, so we reset it to something more appropriate. + // Hopefully HBase will change this, because we can't know if a user set + // it to 1, so we'll change it. + int scanCaching = config.getInt(SCAN_CACHE_SIZE_ATTRIB, 0); + if (scanCaching == 1) { + config.setInt(SCAN_CACHE_SIZE_ATTRIB, DEFAULT_SCAN_CACHE_SIZE); + } else if (scanCaching <= 0) { // Provides the user with a way of setting it to 1 + config.setInt(SCAN_CACHE_SIZE_ATTRIB, 1); + } + return options; + } + + public Configuration getConfiguration() { + return config; + } + + private QueryServicesOptions setIfUnset(String name, int value) { + config.setIfUnset(name, Integer.toString(value)); + return this; + } + + private QueryServicesOptions setIfUnset(String name, boolean value) { + config.setIfUnset(name, Boolean.toString(value)); + return this; + } + + private QueryServicesOptions setIfUnset(String name, long value) { + config.setIfUnset(name, Long.toString(value)); + return this; + } + + private QueryServicesOptions setIfUnset(String name, String value) { + config.setIfUnset(name, value); + return this; + } + + public QueryServicesOptions setKeepAliveMs(int keepAliveMs) { + return set(KEEP_ALIVE_MS_ATTRIB, keepAliveMs); + } + + public QueryServicesOptions setThreadPoolSize(int threadPoolSize) { + return set(THREAD_POOL_SIZE_ATTRIB, threadPoolSize); + } + + public QueryServicesOptions setQueueSize(int queueSize) { + config.setInt(QUEUE_SIZE_ATTRIB, queueSize); + return this; + } + + public QueryServicesOptions setThreadTimeoutMs(int threadTimeoutMs) { + return set(THREAD_TIMEOUT_MS_ATTRIB, threadTimeoutMs); + } + + public QueryServicesOptions setClientSpoolThresholdBytes(long spoolThresholdBytes) { + return set(CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, spoolThresholdBytes); + } + + public QueryServicesOptions setServerSpoolThresholdBytes(long spoolThresholdBytes) { + return set(SERVER_SPOOL_THRESHOLD_BYTES_ATTRIB, spoolThresholdBytes); + } + + public QueryServicesOptions setSpoolDirectory(String spoolDirectory) { + return set(SPOOL_DIRECTORY, spoolDirectory); + } + + public QueryServicesOptions setMaxMemoryPerc(int maxMemoryPerc) { + return set(MAX_MEMORY_PERC_ATTRIB, maxMemoryPerc); + } + + public QueryServicesOptions setMaxTenantMemoryPerc(int maxTenantMemoryPerc) { + return set(MAX_TENANT_MEMORY_PERC_ATTRIB, maxTenantMemoryPerc); + } + + public QueryServicesOptions setMaxServerCacheSize(long maxServerCacheSize) { + return set(MAX_SERVER_CACHE_SIZE_ATTRIB, maxServerCacheSize); + } + + public QueryServicesOptions setMaxServerMetaDataCacheSize(long maxMetaDataCacheSize) { + return set(MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB, maxMetaDataCacheSize); + } + + public QueryServicesOptions setMaxClientMetaDataCacheSize(long maxMetaDataCacheSize) { + return set(MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, maxMetaDataCacheSize); + } + + public QueryServicesOptions setScanFetchSize(int scanFetchSize) { + return set(SCAN_CACHE_SIZE_ATTRIB, scanFetchSize); + } + + public QueryServicesOptions setDateFormat(String dateFormat) { + return set(DATE_FORMAT_ATTRIB, dateFormat); + } + + public QueryServicesOptions setCallQueueRoundRobin(boolean isRoundRobin) { + return set(CALL_QUEUE_PRODUCER_ATTRIB_NAME, isRoundRobin); + } + + public QueryServicesOptions setMaxMutateSize(int maxMutateSize) { + return set(MAX_MUTATION_SIZE_ATTRIB, maxMutateSize); + } + + @Deprecated + public QueryServicesOptions setMutateBatchSize(int mutateBatchSize) { + return set(MUTATE_BATCH_SIZE_ATTRIB, mutateBatchSize); + } + + public QueryServicesOptions setDropMetaData(boolean dropMetadata) { + return set(DROP_METADATA_ATTRIB, dropMetadata); + } + + public QueryServicesOptions setGroupBySpill(boolean enabled) { + return set(GROUPBY_SPILLABLE_ATTRIB, enabled); + } + + public QueryServicesOptions setGroupBySpillMaxCacheSize(long size) { + return set(GROUPBY_MAX_CACHE_SIZE_ATTRIB, size); + } + + public QueryServicesOptions setGroupBySpillNumSpillFiles(long num) { + return set(GROUPBY_SPILL_FILES_ATTRIB, num); + } + + QueryServicesOptions set(String name, boolean value) { + config.set(name, Boolean.toString(value)); + return this; + } + + QueryServicesOptions set(String name, int value) { + config.set(name, Integer.toString(value)); + return this; + } + + QueryServicesOptions set(String name, String value) { + config.set(name, value); + return this; + } + + QueryServicesOptions set(String name, long value) { + config.set(name, Long.toString(value)); + return this; + } + + public int getKeepAliveMs() { + return config.getInt(KEEP_ALIVE_MS_ATTRIB, DEFAULT_KEEP_ALIVE_MS); + } + + public int getThreadPoolSize() { + return config.getInt(THREAD_POOL_SIZE_ATTRIB, DEFAULT_THREAD_POOL_SIZE); + } + + public int getQueueSize() { + return config.getInt(QUEUE_SIZE_ATTRIB, DEFAULT_QUEUE_SIZE); + } + + public int getMaxMemoryPerc() { + return config.getInt(MAX_MEMORY_PERC_ATTRIB, DEFAULT_MAX_MEMORY_PERC); + } + + public int getMaxMutateSize() { + return config.getInt(MAX_MUTATION_SIZE_ATTRIB, DEFAULT_MAX_MUTATION_SIZE); + } + + @Deprecated + public int getMutateBatchSize() { + return config.getInt(MUTATE_BATCH_SIZE_ATTRIB, DEFAULT_MUTATE_BATCH_SIZE); + } + + public String getClientMetricTag() { + return config.get(QueryServices.CLIENT_METRICS_TAG, DEFAULT_CLIENT_METRICS_TAG); + } + + public boolean isUseIndexes() { + return config.getBoolean(USE_INDEXES_ATTRIB, DEFAULT_USE_INDEXES); + } + + public boolean isImmutableRows() { + return config.getBoolean(IMMUTABLE_ROWS_ATTRIB, DEFAULT_IMMUTABLE_ROWS); + } + + public boolean isDropMetaData() { + return config.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); + } + + public boolean isSpillableGroupByEnabled() { + return config.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE); + } + + public long getSpillableGroupByMaxCacheSize() { + return config.getLongBytes(GROUPBY_MAX_CACHE_SIZE_ATTRIB, DEFAULT_GROUPBY_MAX_CACHE_MAX); + } + + public int getSpillableGroupByNumSpillFiles() { + return config.getInt(GROUPBY_SPILL_FILES_ATTRIB, DEFAULT_GROUPBY_SPILL_FILES); + } + + public boolean isTracingEnabled() { + return config.getBoolean(TRACING_ENABLED, DEFAULT_TRACING_ENABLED); + } + + public QueryServicesOptions setTracingEnabled(boolean enable) { + config.setBoolean(TRACING_ENABLED, enable); + return this; + } + + public int getTracingThreadPoolSize() { + return config.getInt(TRACING_THREAD_POOL_SIZE, DEFAULT_TRACING_THREAD_POOL_SIZE); + } + + public int getTracingBatchSize() { + return config.getInt(TRACING_BATCH_SIZE, DEFAULT_TRACING_BATCH_SIZE); + } + + public int getTracingTraceBufferSize() { + return config.getInt(TRACING_TRACE_BUFFER_SIZE, DEFAULT_TRACING_TRACE_BUFFER_SIZE); + } + + public String getTableName() { + return config.get(TRACING_STATS_TABLE_NAME_ATTRIB, DEFAULT_TRACING_STATS_TABLE_NAME); + } + + public boolean isGlobalMetricsEnabled() { + return config.getBoolean(GLOBAL_METRICS_ENABLED, DEFAULT_IS_GLOBAL_METRICS_ENABLED); + } + + public String getMetricPublisherClass() { + return config.get(METRIC_PUBLISHER_CLASS_NAME, DEFAULT_METRIC_PUBLISHER_CLASS_NAME); + } + + public String getAllowedListTableNames() { + return config.get(ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS, + DEFAULT_ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS); + } + + public boolean isTableLevelMetricsEnabled() { + return config.getBoolean(TABLE_LEVEL_METRICS_ENABLED, DEFAULT_IS_TABLE_LEVEL_METRICS_ENABLED); + } + + public void setTableLevelMetricsEnabled() { + set(TABLE_LEVEL_METRICS_ENABLED, true); + } + + public boolean isMetricPublisherEnabled() { + return config.getBoolean(METRIC_PUBLISHER_ENABLED, DEFAULT_IS_METRIC_PUBLISHER_ENABLED); + } + + public boolean isConnectionQueryServiceMetricsEnabled() { + return config.getBoolean(CONNECTION_QUERY_SERVICE_METRICS_ENABLED, + DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_ENABLED); + } + + public boolean isConnectionQueryServiceMetricsPublisherEnabled() { + return config.getBoolean(CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED, + DEFAULT_IS_CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_ENABLED); + } + + public String getQueryServicesName() { + return config.get(QUERY_SERVICES_NAME, DEFAULT_QUERY_SERVICES_NAME); + } + + public void setConnectionQueryServiceMetricsEnabled() { + set(CONNECTION_QUERY_SERVICE_METRICS_ENABLED, true); + } + + public String getConnectionQueryServiceMetricsPublisherClass() { + return config.get(CONNECTION_QUERY_SERVICE_METRICS_PUBLISHER_CLASSNAME, + DEFAULT_METRIC_PUBLISHER_CLASS_NAME); + } + + @VisibleForTesting + public void setAllowedListForTableLevelMetrics(String tableNameList) { + set(ALLOWED_LIST_FOR_TABLE_LEVEL_METRICS, tableNameList); + } + + public boolean isUseByteBasedRegex() { + return config.getBoolean(USE_BYTE_BASED_REGEX_ATTRIB, DEFAULT_USE_BYTE_BASED_REGEX); + } + + public int getScanCacheSize() { + return config.getInt(SCAN_CACHE_SIZE_ATTRIB, DEFAULT_SCAN_CACHE_SIZE); + } + + public QueryServicesOptions setMaxServerCacheTTLMs(int ttl) { + return set(MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, ttl); + } + + public QueryServicesOptions setMasterInfoPort(int port) { + return set(MASTER_INFO_PORT_ATTRIB, port); + } + + public QueryServicesOptions setRegionServerInfoPort(int port) { + return set(REGIONSERVER_INFO_PORT_ATTRIB, port); + } + + public QueryServicesOptions setRegionServerLeasePeriodMs(int period) { + return set(HBASE_CLIENT_SCANNER_TIMEOUT_ATTRIB, period); + } + + public QueryServicesOptions setRpcTimeoutMs(int timeout) { + return set(RPC_TIMEOUT_ATTRIB, timeout); + } + + public QueryServicesOptions setUseIndexes(boolean useIndexes) { + return set(USE_INDEXES_ATTRIB, useIndexes); + } + + public QueryServicesOptions setImmutableRows(boolean isImmutableRows) { + return set(IMMUTABLE_ROWS_ATTRIB, isImmutableRows); + } + + public QueryServicesOptions setWALEditCodec(String walEditCodec) { + return set(WAL_EDIT_CODEC_ATTRIB, walEditCodec); + } + + public QueryServicesOptions setStatsHistogramDepthBytes(long byteDepth) { + return set(STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, byteDepth); + } + + public QueryServicesOptions setStatsUpdateFrequencyMs(int frequencyMs) { + return set(STATS_UPDATE_FREQ_MS_ATTRIB, frequencyMs); + } + + public QueryServicesOptions setMinStatsUpdateFrequencyMs(int frequencyMs) { + return set(MIN_STATS_UPDATE_FREQ_MS_ATTRIB, frequencyMs); + } + + public QueryServicesOptions setStatsCacheThreadPoolSize(int threadPoolSize) { + return set(STATS_CACHE_THREAD_POOL_SIZE, threadPoolSize); + } + + public QueryServicesOptions setSequenceSaltBuckets(int saltBuckets) { + config.setInt(SEQUENCE_SALT_BUCKETS_ATTRIB, saltBuckets); + return this; + } + + public QueryServicesOptions setExplainChunkCount(boolean showChunkCount) { + config.setBoolean(EXPLAIN_CHUNK_COUNT_ATTRIB, showChunkCount); + return this; + } + + public QueryServicesOptions setTransactionsEnabled(boolean transactionsEnabled) { + config.setBoolean(TRANSACTIONS_ENABLED, transactionsEnabled); + return this; + } + + public QueryServicesOptions setExplainRowCount(boolean showRowCount) { + config.setBoolean(EXPLAIN_ROW_COUNT_ATTRIB, showRowCount); + return this; + } + + public QueryServicesOptions setAllowOnlineSchemaUpdate(boolean allow) { + config.setBoolean(ALLOW_ONLINE_TABLE_SCHEMA_UPDATE, allow); + return this; + } + + public QueryServicesOptions setNumRetriesForSchemaChangeCheck(int numRetries) { + config.setInt(NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, numRetries); + return this; + } + + public QueryServicesOptions setDelayInMillisForSchemaChangeCheck(long delayInMillis) { + config.setLong(DELAY_FOR_SCHEMA_UPDATE_CHECK, delayInMillis); + return this; + + } + + public QueryServicesOptions setUseByteBasedRegex(boolean flag) { + config.setBoolean(USE_BYTE_BASED_REGEX_ATTRIB, flag); + return this; + } + + public QueryServicesOptions setForceRowKeyOrder(boolean forceRowKeyOrder) { + config.setBoolean(FORCE_ROW_KEY_ORDER_ATTRIB, forceRowKeyOrder); + return this; + } + + public QueryServicesOptions setExtraJDBCArguments(String extraArgs) { + config.set(EXTRA_JDBC_ARGUMENTS_ATTRIB, extraArgs); + return this; + } + + public QueryServicesOptions setRunUpdateStatsAsync(boolean flag) { + config.setBoolean(RUN_UPDATE_STATS_ASYNC, flag); + return this; + } + + public QueryServicesOptions setCommitStatsAsync(boolean flag) { + config.setBoolean(COMMIT_STATS_ASYNC, flag); + return this; + } + + public QueryServicesOptions setEnableRenewLease(boolean enable) { + config.setBoolean(RENEW_LEASE_ENABLED, enable); + return this; + } + + public QueryServicesOptions setIndexHandlerCount(int count) { + config.setInt(QueryServices.INDEX_HANDLER_COUNT_ATTRIB, count); + return this; + } + + public QueryServicesOptions setMetadataHandlerCount(int count) { + config.setInt(QueryServices.METADATA_HANDLER_COUNT_ATTRIB, count); + return this; + } + + public QueryServicesOptions setHConnectionPoolCoreSize(int count) { + config.setInt(QueryServices.HCONNECTION_POOL_CORE_SIZE, count); + return this; + } + + public QueryServicesOptions setHConnectionPoolMaxSize(int count) { + config.setInt(QueryServices.HCONNECTION_POOL_MAX_SIZE, count); + return this; + } + + public QueryServicesOptions setMaxThreadsPerHTable(int count) { + config.setInt(QueryServices.HTABLE_MAX_THREADS, count); + return this; + } + + public QueryServicesOptions setDefaultIndexPopulationWaitTime(long waitTime) { + config.setLong(INDEX_POPULATION_SLEEP_TIME, waitTime); + return this; + } + + public QueryServicesOptions setUseStatsForParallelization(boolean flag) { + config.setBoolean(USE_STATS_FOR_PARALLELIZATION, flag); + return this; + } + + public QueryServicesOptions setIndexRebuildTaskInitialDelay(long waitTime) { + config.setLong(INDEX_REBUILD_TASK_INITIAL_DELAY, waitTime); + return this; + } + + public QueryServicesOptions setSequenceCacheSize(long sequenceCacheSize) { + config.setLong(SEQUENCE_CACHE_SIZE_ATTRIB, sequenceCacheSize); + return this; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/StatsLoaderImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/StatsLoaderImpl.java index bc90d32c42b..69c32dcf8c5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/StatsLoaderImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/StatsLoaderImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.query; +import java.io.IOException; +import java.util.Objects; + import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; @@ -30,75 +32,72 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Objects; - /** * {@link PhoenixStatsLoader} implementation for the Stats Loader. */ class StatsLoaderImpl implements PhoenixStatsLoader { - private static final Logger LOGGER = LoggerFactory.getLogger(StatsLoaderImpl.class); + private static final Logger LOGGER = LoggerFactory.getLogger(StatsLoaderImpl.class); - private final ConnectionQueryServices queryServices; + private final ConnectionQueryServices queryServices; - public StatsLoaderImpl(ConnectionQueryServices queryServices){ - this.queryServices = queryServices; - } + public StatsLoaderImpl(ConnectionQueryServices queryServices) { + this.queryServices = queryServices; + } - @Override - public boolean needsLoad() { - // For now, whenever it's called, we try to load stats from stats table - // no matter it has been updated or not. - // Here are the possible optimizations we can do here: - // 1. Load stats from the stats table only when the stats get updated on the server side. - // 2. Support different refresh cycle for different tables. - return true; - } + @Override + public boolean needsLoad() { + // For now, whenever it's called, we try to load stats from stats table + // no matter it has been updated or not. + // Here are the possible optimizations we can do here: + // 1. Load stats from the stats table only when the stats get updated on the server side. + // 2. Support different refresh cycle for different tables. + return true; + } - @Override - public GuidePostsInfo loadStats(GuidePostsKey statsKey) throws Exception { - return loadStats(statsKey, GuidePostsInfo.NO_GUIDEPOST); - } + @Override + public GuidePostsInfo loadStats(GuidePostsKey statsKey) throws Exception { + return loadStats(statsKey, GuidePostsInfo.NO_GUIDEPOST); + } - @Override - public GuidePostsInfo loadStats(GuidePostsKey statsKey, GuidePostsInfo prevGuidepostInfo) throws Exception { - assert(prevGuidepostInfo != null); + @Override + public GuidePostsInfo loadStats(GuidePostsKey statsKey, GuidePostsInfo prevGuidepostInfo) + throws Exception { + assert (prevGuidepostInfo != null); - TableName tableName = SchemaUtil.getPhysicalName( - PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, - queryServices.getProps()); - Table statsHTable = queryServices.getTable(tableName.getName()); + TableName tableName = SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, queryServices.getProps()); + Table statsHTable = queryServices.getTable(tableName.getName()); - try { - GuidePostsInfo guidePostsInfo = StatisticsUtil.readStatistics(statsHTable, statsKey, - HConstants.LATEST_TIMESTAMP); - traceStatsUpdate(statsKey, guidePostsInfo); - return guidePostsInfo; - } catch (TableNotFoundException e) { - // On a fresh install, stats might not yet be created, don't warn about this. - LOGGER.debug("Unable to locate Phoenix stats table: " + tableName.toString(), e); - return prevGuidepostInfo; - } catch (IOException e) { - LOGGER.warn("Unable to read from stats table: " + tableName.toString(), e); - return prevGuidepostInfo; - } finally { - try { - statsHTable.close(); - } catch (IOException e) { - // Log, but continue. We have our stats anyway now. - LOGGER.warn("Unable to close stats table: " + tableName.toString(), e); - } - } + try { + GuidePostsInfo guidePostsInfo = + StatisticsUtil.readStatistics(statsHTable, statsKey, HConstants.LATEST_TIMESTAMP); + traceStatsUpdate(statsKey, guidePostsInfo); + return guidePostsInfo; + } catch (TableNotFoundException e) { + // On a fresh install, stats might not yet be created, don't warn about this. + LOGGER.debug("Unable to locate Phoenix stats table: " + tableName.toString(), e); + return prevGuidepostInfo; + } catch (IOException e) { + LOGGER.warn("Unable to read from stats table: " + tableName.toString(), e); + return prevGuidepostInfo; + } finally { + try { + statsHTable.close(); + } catch (IOException e) { + // Log, but continue. We have our stats anyway now. + LOGGER.warn("Unable to close stats table: " + tableName.toString(), e); + } } + } - /** - * Logs a trace message for newly inserted entries to the stats cache. - */ - void traceStatsUpdate(GuidePostsKey key, GuidePostsInfo info) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Updating local TableStats cache (id={}) for {}, size={}bytes", - new Object[] { Objects.hashCode(this), key, info.getEstimatedSize()}); - } + /** + * Logs a trace message for newly inserted entries to the stats cache. + */ + void traceStatsUpdate(GuidePostsKey key, GuidePostsInfo info) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Updating local TableStats cache (id={}) for {}, size={}bytes", + new Object[] { Objects.hashCode(this), key, info.getEstimatedSize() }); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/AmbiguousColumnException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/AmbiguousColumnException.java index f66083dd4ed..43b9ce33fd1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/AmbiguousColumnException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/AmbiguousColumnException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,28 +23,26 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * - * Exception thrown when a column name is used without being qualified with an alias - * and more than one table contains that column. - * - * + * Exception thrown when a column name is used without being qualified with an alias and more than + * one table contains that column. * @since 0.1 */ public class AmbiguousColumnException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.AMBIGUOUS_COLUMN; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.AMBIGUOUS_COLUMN; - public AmbiguousColumnException() { - super(new SQLExceptionInfo.Builder(code).build().toString(), code.getSQLState(), code.getErrorCode()); - } + public AmbiguousColumnException() { + super(new SQLExceptionInfo.Builder(code).build().toString(), code.getSQLState(), + code.getErrorCode()); + } - public AmbiguousColumnException(String columnName) { - super(new SQLExceptionInfo.Builder(code).setColumnName(columnName).build().toString(), - code.getSQLState(), code.getErrorCode()); - } + public AmbiguousColumnException(String columnName) { + super(new SQLExceptionInfo.Builder(code).setColumnName(columnName).build().toString(), + code.getSQLState(), code.getErrorCode()); + } - public AmbiguousColumnException(String columnName, Throwable cause) { - super(new SQLExceptionInfo.Builder(code).setColumnName(columnName).build().toString(), - code.getSQLState(), code.getErrorCode(), cause); - } + public AmbiguousColumnException(String columnName, Throwable cause) { + super(new SQLExceptionInfo.Builder(code).setColumnName(columnName).build().toString(), + code.getSQLState(), code.getErrorCode(), cause); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/AmbiguousTableException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/AmbiguousTableException.java index fea93305492..7e832efa5d9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/AmbiguousTableException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/AmbiguousTableException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,28 +23,26 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * - * Exception thrown when a table name is used without being qualified with an alias - * and more than one schema contains that table. - * - * + * Exception thrown when a table name is used without being qualified with an alias and more than + * one schema contains that table. * @since 0.1 */ public class AmbiguousTableException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.AMBIGUOUS_TABLE; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.AMBIGUOUS_TABLE; - public AmbiguousTableException() { - super(new SQLExceptionInfo.Builder(code).build().toString(), code.getSQLState(), code.getErrorCode(), null); - } + public AmbiguousTableException() { + super(new SQLExceptionInfo.Builder(code).build().toString(), code.getSQLState(), + code.getErrorCode(), null); + } - public AmbiguousTableException(String tableName) { - super(new SQLExceptionInfo.Builder(code).setTableName(tableName).toString(), - code.getSQLState(), code.getErrorCode(), null); - } + public AmbiguousTableException(String tableName) { + super(new SQLExceptionInfo.Builder(code).setTableName(tableName).toString(), code.getSQLState(), + code.getErrorCode(), null); + } - public AmbiguousTableException(String tableName, Throwable cause) { - super(new SQLExceptionInfo.Builder(code).setTableName(tableName).toString(), - code.getSQLState(), code.getErrorCode(), cause); - } + public AmbiguousTableException(String tableName, Throwable cause) { + super(new SQLExceptionInfo.Builder(code).setTableName(tableName).toString(), code.getSQLState(), + code.getErrorCode(), cause); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ArgumentTypeMismatchException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ArgumentTypeMismatchException.java index 8e5188d9ca9..16956c68873 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ArgumentTypeMismatchException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ArgumentTypeMismatchException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,40 +20,44 @@ import java.sql.SQLException; import java.util.Arrays; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.collect.Collections2; +import javax.annotation.Nullable; + import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDataTypeFactory; - -import javax.annotation.Nullable; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.collect.Collections2; /** - * Exception thrown when we try to use use an argument that has the wrong type. - * - * + * Exception thrown when we try to use use an argument that has the wrong type. * @since 1.0 */ public class ArgumentTypeMismatchException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.TYPE_MISMATCH; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.TYPE_MISMATCH; - public ArgumentTypeMismatchException(PDataType expected, PDataType actual, String location) { - super(new SQLExceptionInfo.Builder(code).setMessage("expected: " + expected + " but was: " + actual + " at " + location).build().toString(), code.getSQLState(), code.getErrorCode()); - } + public ArgumentTypeMismatchException(PDataType expected, PDataType actual, String location) { + super(new SQLExceptionInfo.Builder(code) + .setMessage("expected: " + expected + " but was: " + actual + " at " + location).build() + .toString(), code.getSQLState(), code.getErrorCode()); + } - public ArgumentTypeMismatchException(Class[] expecteds, PDataType actual, String location) { - this(Arrays.toString(Collections2.transform(Arrays.asList(expecteds), - new Function, PDataType>() { - @Nullable @Override - public PDataType apply(@Nullable Class input) { - return PDataTypeFactory.getInstance().instanceFromClass(input); - } - }).toArray()), actual.toString(), location); - } + public ArgumentTypeMismatchException(Class[] expecteds, PDataType actual, + String location) { + this(Arrays.toString(Collections2 + .transform(Arrays.asList(expecteds), new Function, PDataType>() { + @Nullable + @Override + public PDataType apply(@Nullable Class input) { + return PDataTypeFactory.getInstance().instanceFromClass(input); + } + }).toArray()), actual.toString(), location); + } - public ArgumentTypeMismatchException(String expected, String actual, String location) { - super(new SQLExceptionInfo.Builder(code).setMessage("expected: " + expected + " but was: " + actual + " at " + location).build().toString(), code.getSQLState(), code.getErrorCode()); - } + public ArgumentTypeMismatchException(String expected, String actual, String location) { + super(new SQLExceptionInfo.Builder(code) + .setMessage("expected: " + expected + " but was: " + actual + " at " + location).build() + .toString(), code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnAlreadyExistsException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnAlreadyExistsException.java index 07b36243c4f..e713437d55c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnAlreadyExistsException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnAlreadyExistsException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,39 +22,34 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; - /** - * * Exception thrown when a column already exists. - * - * * @since 0.1 */ public class ColumnAlreadyExistsException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.COLUMN_EXIST_IN_DEF; - private final String schemaName; - private final String tableName; - private final String columnName; - - public ColumnAlreadyExistsException(String schemaName, String tableName, String columnName) { - super(new SQLExceptionInfo.Builder(code).setColumnName(columnName) - .setSchemaName(schemaName).setTableName(tableName).build().toString(), - code.getSQLState(), code.getErrorCode(), null); - this.schemaName = schemaName; - this.tableName = tableName; - this.columnName = columnName; - } - - public String getTableName() { - return tableName; - } - - public String getSchemaName() { - return schemaName; - } - - public String getColumnName() { - return columnName; - } + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.COLUMN_EXIST_IN_DEF; + private final String schemaName; + private final String tableName; + private final String columnName; + + public ColumnAlreadyExistsException(String schemaName, String tableName, String columnName) { + super(new SQLExceptionInfo.Builder(code).setColumnName(columnName).setSchemaName(schemaName) + .setTableName(tableName).build().toString(), code.getSQLState(), code.getErrorCode(), null); + this.schemaName = schemaName; + this.tableName = tableName; + this.columnName = columnName; + } + + public String getTableName() { + return tableName; + } + + public String getSchemaName() { + return schemaName; + } + + public String getColumnName() { + return columnName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnFamilyNotFoundException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnFamilyNotFoundException.java index d6902e229c8..f5bdb738f72 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnFamilyNotFoundException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnFamilyNotFoundException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,24 +21,21 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * * Exception thrown when a family name could not be found in the schema - * - * * @since 0.1 */ public class ColumnFamilyNotFoundException extends MetaDataEntityNotFoundException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.COLUMN_FAMILY_NOT_FOUND; - private final String familyName; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.COLUMN_FAMILY_NOT_FOUND; + private final String familyName; - public ColumnFamilyNotFoundException(String schemaName, String tableName, String familyName) { - super(new SQLExceptionInfo.Builder(code).setFamilyName(familyName).build().toString(), - code.getSQLState(), code.getErrorCode(), schemaName, tableName,null); - this.familyName = familyName; - } + public ColumnFamilyNotFoundException(String schemaName, String tableName, String familyName) { + super(new SQLExceptionInfo.Builder(code).setFamilyName(familyName).build().toString(), + code.getSQLState(), code.getErrorCode(), schemaName, tableName, null); + this.familyName = familyName; + } - public String getFamilyName() { - return familyName; - } + public String getFamilyName() { + return familyName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnMetaDataOps.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnMetaDataOps.java index f8d8a830a42..caea08fc30a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnMetaDataOps.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnMetaDataOps.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,22 +17,6 @@ */ package org.apache.phoenix.schema; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.exception.SQLExceptionInfo; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.parse.ColumnDef; -import org.apache.phoenix.parse.ColumnName; -import org.apache.phoenix.parse.PrimaryKeyConstraint; -import org.apache.phoenix.query.QueryConstants; -import org.apache.phoenix.schema.types.PVarbinary; -import org.apache.phoenix.util.EnvironmentEdgeManager; -import org.apache.phoenix.util.SchemaUtil; - -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Types; - import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY; @@ -57,154 +41,186 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT; import static org.apache.phoenix.schema.MetaDataClient.ALTER_SYSCATALOG_TABLE_UPGRADE; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Types; + +import org.apache.hadoop.hbase.util.Pair; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.exception.SQLExceptionInfo; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.parse.ColumnDef; +import org.apache.phoenix.parse.ColumnName; +import org.apache.phoenix.parse.PrimaryKeyConstraint; +import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.util.EnvironmentEdgeManager; +import org.apache.phoenix.util.SchemaUtil; + public class ColumnMetaDataOps { - public static final String UPSERT_COLUMN = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_NAME + "," + - COLUMN_FAMILY + "," + - DATA_TYPE + "," + - NULLABLE + "," + - COLUMN_SIZE + "," + - DECIMAL_DIGITS + "," + - ORDINAL_POSITION + "," + - SORT_ORDER + "," + - DATA_TABLE_NAME + "," + // write this both in the column and table rows for access by metadata APIs - ARRAY_SIZE + "," + - VIEW_CONSTANT + "," + - IS_VIEW_REFERENCED + "," + - PK_NAME + "," + // write this both in the column and table rows for access by metadata APIs - KEY_SEQ + "," + - COLUMN_DEF + "," + - COLUMN_QUALIFIER + ", " + - IS_ROW_TIMESTAMP + - ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + public static final String UPSERT_COLUMN = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + COLUMN_NAME + "," + COLUMN_FAMILY + "," + DATA_TYPE + "," + NULLABLE + "," + COLUMN_SIZE + "," + + DECIMAL_DIGITS + "," + ORDINAL_POSITION + "," + SORT_ORDER + "," + DATA_TABLE_NAME + "," + // write + // this + // both + // in + // the + // column + // and + // table + // rows + // for + // access + // by + // metadata + // APIs + ARRAY_SIZE + "," + VIEW_CONSTANT + "," + IS_VIEW_REFERENCED + "," + PK_NAME + "," + // write + // this both + // in the + // column + // and table + // rows for + // access by + // metadata + // APIs + KEY_SEQ + "," + COLUMN_DEF + "," + COLUMN_QUALIFIER + ", " + IS_ROW_TIMESTAMP + + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - public static void addColumnMutation(PhoenixConnection connection, String tenantId, String schemaName, String tableName, PColumn column, String parentTableName, String pkName, Short keySeq, boolean isSalted) throws SQLException { - addColumnMutationInternal(connection, tenantId, schemaName, tableName, column, parentTableName, pkName, keySeq, isSalted); - } + public static void addColumnMutation(PhoenixConnection connection, String tenantId, + String schemaName, String tableName, PColumn column, String parentTableName, String pkName, + Short keySeq, boolean isSalted) throws SQLException { + addColumnMutationInternal(connection, tenantId, schemaName, tableName, column, parentTableName, + pkName, keySeq, isSalted); + } - public static void addColumnMutation(PhoenixConnection connection, String schemaName, String tableName, PColumn column, String parentTableName, String pkName, Short keySeq, boolean isSalted) throws SQLException { - addColumnMutationInternal(connection, connection.getTenantId() == null ? null : connection.getTenantId().getString() - , schemaName, tableName, column, parentTableName, pkName, keySeq, isSalted); - } + public static void addColumnMutation(PhoenixConnection connection, String schemaName, + String tableName, PColumn column, String parentTableName, String pkName, Short keySeq, + boolean isSalted) throws SQLException { + addColumnMutationInternal(connection, + connection.getTenantId() == null ? null : connection.getTenantId().getString(), schemaName, + tableName, column, parentTableName, pkName, keySeq, isSalted); + } - private static void addColumnMutationInternal(PhoenixConnection connection, String tenantId, String schemaName, String tableName, PColumn column, String parentTableName, String pkName, Short keySeq, boolean isSalted) throws SQLException { - String addColumnSqlToUse = connection.isRunningUpgrade() - && tableName.equals(SYSTEM_CATALOG_TABLE) - && schemaName.equals(SYSTEM_CATALOG_SCHEMA) ? ALTER_SYSCATALOG_TABLE_UPGRADE - : UPSERT_COLUMN; + private static void addColumnMutationInternal(PhoenixConnection connection, String tenantId, + String schemaName, String tableName, PColumn column, String parentTableName, String pkName, + Short keySeq, boolean isSalted) throws SQLException { + String addColumnSqlToUse = connection.isRunningUpgrade() + && tableName.equals(SYSTEM_CATALOG_TABLE) && schemaName.equals(SYSTEM_CATALOG_SCHEMA) + ? ALTER_SYSCATALOG_TABLE_UPGRADE + : UPSERT_COLUMN; - try (PreparedStatement colUpsert = connection.prepareStatement(addColumnSqlToUse)) { - colUpsert.setString(1, tenantId); - colUpsert.setString(2, schemaName); - colUpsert.setString(3, tableName); - colUpsert.setString(4, column.getName().getString()); - colUpsert.setString(5, column.getFamilyName() == null ? null : column.getFamilyName().getString()); - colUpsert.setInt(6, column.getDataType().getSqlType()); - colUpsert.setInt(7, SchemaUtil.getIsNullableInt(column.isNullable())); - if (column.getMaxLength() == null) { - colUpsert.setNull(8, Types.INTEGER); - } else { - colUpsert.setInt(8, column.getMaxLength()); - } - if (column.getScale() == null) { - colUpsert.setNull(9, Types.INTEGER); - } else { - colUpsert.setInt(9, column.getScale()); - } - colUpsert.setInt(10, column.getPosition() + (isSalted ? 0 : 1)); - colUpsert.setInt(11, column.getSortOrder().getSystemValue()); - colUpsert.setString(12, parentTableName); - if (column.getArraySize() == null) { - colUpsert.setNull(13, Types.INTEGER); - } else { - colUpsert.setInt(13, column.getArraySize()); - } - colUpsert.setBytes(14, column.getViewConstant()); - colUpsert.setBoolean(15, column.isViewReferenced()); - colUpsert.setString(16, pkName); - if (keySeq == null) { - colUpsert.setNull(17, Types.SMALLINT); - } else { - colUpsert.setShort(17, keySeq); - } - if (column.getExpressionStr() == null) { - colUpsert.setNull(18, Types.VARCHAR); - } else { - colUpsert.setString(18, column.getExpressionStr()); - } - //Do not try to set extra columns when using ALTER_SYSCATALOG_TABLE_UPGRADE - if (colUpsert.getParameterMetaData().getParameterCount() > 18) { - if (column.getColumnQualifierBytes() == null) { - colUpsert.setNull(19, Types.VARBINARY); - } else { - colUpsert.setBytes(19, column.getColumnQualifierBytes()); - } - colUpsert.setBoolean(20, column.isRowTimestamp()); - } - colUpsert.execute(); + try (PreparedStatement colUpsert = connection.prepareStatement(addColumnSqlToUse)) { + colUpsert.setString(1, tenantId); + colUpsert.setString(2, schemaName); + colUpsert.setString(3, tableName); + colUpsert.setString(4, column.getName().getString()); + colUpsert.setString(5, + column.getFamilyName() == null ? null : column.getFamilyName().getString()); + colUpsert.setInt(6, column.getDataType().getSqlType()); + colUpsert.setInt(7, SchemaUtil.getIsNullableInt(column.isNullable())); + if (column.getMaxLength() == null) { + colUpsert.setNull(8, Types.INTEGER); + } else { + colUpsert.setInt(8, column.getMaxLength()); + } + if (column.getScale() == null) { + colUpsert.setNull(9, Types.INTEGER); + } else { + colUpsert.setInt(9, column.getScale()); + } + colUpsert.setInt(10, column.getPosition() + (isSalted ? 0 : 1)); + colUpsert.setInt(11, column.getSortOrder().getSystemValue()); + colUpsert.setString(12, parentTableName); + if (column.getArraySize() == null) { + colUpsert.setNull(13, Types.INTEGER); + } else { + colUpsert.setInt(13, column.getArraySize()); + } + colUpsert.setBytes(14, column.getViewConstant()); + colUpsert.setBoolean(15, column.isViewReferenced()); + colUpsert.setString(16, pkName); + if (keySeq == null) { + colUpsert.setNull(17, Types.SMALLINT); + } else { + colUpsert.setShort(17, keySeq); + } + if (column.getExpressionStr() == null) { + colUpsert.setNull(18, Types.VARCHAR); + } else { + colUpsert.setString(18, column.getExpressionStr()); + } + // Do not try to set extra columns when using ALTER_SYSCATALOG_TABLE_UPGRADE + if (colUpsert.getParameterMetaData().getParameterCount() > 18) { + if (column.getColumnQualifierBytes() == null) { + colUpsert.setNull(19, Types.VARBINARY); + } else { + colUpsert.setBytes(19, column.getColumnQualifierBytes()); } + colUpsert.setBoolean(20, column.isRowTimestamp()); + } + colUpsert.execute(); } + } - public static PColumn newColumn(int position, ColumnDef def, PrimaryKeyConstraint pkConstraint, String defaultColumnFamily, - boolean addingToPK, byte[] columnQualifierBytes, boolean isImmutableRows) throws SQLException { - try { - ColumnName columnDefName = def.getColumnDefName(); - SortOrder sortOrder = def.getSortOrder(); - boolean isPK = def.isPK(); - boolean isRowTimestamp = def.isRowTimestamp(); - if (pkConstraint != null) { - Pair pkSortOrder = pkConstraint.getColumnWithSortOrder(columnDefName); - if (pkSortOrder != null) { - isPK = true; - sortOrder = pkSortOrder.getSecond(); - isRowTimestamp = pkConstraint.isColumnRowTimestamp(columnDefName); - } - } - String columnName = columnDefName.getColumnName(); - if (isPK && sortOrder == SortOrder.DESC && def.getDataType() == PVarbinary.INSTANCE) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED) - .setColumnName(columnName) - .build().buildException(); - } + public static PColumn newColumn(int position, ColumnDef def, PrimaryKeyConstraint pkConstraint, + String defaultColumnFamily, boolean addingToPK, byte[] columnQualifierBytes, + boolean isImmutableRows) throws SQLException { + try { + ColumnName columnDefName = def.getColumnDefName(); + SortOrder sortOrder = def.getSortOrder(); + boolean isPK = def.isPK(); + boolean isRowTimestamp = def.isRowTimestamp(); + if (pkConstraint != null) { + Pair pkSortOrder = + pkConstraint.getColumnWithSortOrder(columnDefName); + if (pkSortOrder != null) { + isPK = true; + sortOrder = pkSortOrder.getSecond(); + isRowTimestamp = pkConstraint.isColumnRowTimestamp(columnDefName); + } + } + String columnName = columnDefName.getColumnName(); + if (isPK && sortOrder == SortOrder.DESC && def.getDataType() == PVarbinary.INSTANCE) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED) + .setColumnName(columnName).build().buildException(); + } - PName familyName = null; - if (def.isPK() && !pkConstraint.getColumnNames().isEmpty() ) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) - .setColumnName(columnName).build().buildException(); - } - boolean isNull = def.isNull(); - if (def.getColumnDefName().getFamilyName() != null) { - String family = def.getColumnDefName().getFamilyName(); - if (isPK) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME) - .setColumnName(columnName).setFamilyName(family).build().buildException(); - } else if (!def.isNull() && !isImmutableRows) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL) - .setColumnName(columnName).setFamilyName(family).build().buildException(); - } - familyName = PNameFactory.newName(family); - } else if (!isPK) { - familyName = PNameFactory.newName(defaultColumnFamily == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : defaultColumnFamily); - } + PName familyName = null; + if (def.isPK() && !pkConstraint.getColumnNames().isEmpty()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) + .setColumnName(columnName).build().buildException(); + } + boolean isNull = def.isNull(); + if (def.getColumnDefName().getFamilyName() != null) { + String family = def.getColumnDefName().getFamilyName(); + if (isPK) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME) + .setColumnName(columnName).setFamilyName(family).build().buildException(); + } else if (!def.isNull() && !isImmutableRows) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL) + .setColumnName(columnName).setFamilyName(family).build().buildException(); + } + familyName = PNameFactory.newName(family); + } else if (!isPK) { + familyName = PNameFactory.newName( + defaultColumnFamily == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : defaultColumnFamily); + } - if (isPK && !addingToPK && pkConstraint.getColumnNames().size() <= 1) { - if (def.isNull() && def.isNullSet()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_PK_MAY_NOT_BE_NULL) - .setColumnName(columnName).build().buildException(); - } - isNull = false; - } - PColumn column = new PColumnImpl(PNameFactory.newName(columnName), familyName, def.getDataType(), - def.getMaxLength(), def.getScale(), isNull, position, sortOrder, def.getArraySize(), - null, false, def.getExpression(), isRowTimestamp, - false, columnQualifierBytes, EnvironmentEdgeManager.currentTimeMillis()); - return column; - } catch (IllegalArgumentException e) { // Based on precondition check in constructor - throw new SQLException(e); + if (isPK && !addingToPK && pkConstraint.getColumnNames().size() <= 1) { + if (def.isNull() && def.isNullSet()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_PK_MAY_NOT_BE_NULL) + .setColumnName(columnName).build().buildException(); } + isNull = false; + } + PColumn column = new PColumnImpl(PNameFactory.newName(columnName), familyName, + def.getDataType(), def.getMaxLength(), def.getScale(), isNull, position, sortOrder, + def.getArraySize(), null, false, def.getExpression(), isRowTimestamp, false, + columnQualifierBytes, EnvironmentEdgeManager.currentTimeMillis()); + return column; + } catch (IllegalArgumentException e) { // Based on precondition check in constructor + throw new SQLException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnModifier.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnModifier.java index e7a71f7fce0..e6be95d1484 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnModifier.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnModifier.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,130 +18,131 @@ package org.apache.phoenix.schema; import org.apache.hadoop.hbase.CompareOperator; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * A ColumnModifier implementation modifies how bytes are stored in a primary key column. - * The {@link ColumnModifier#apply apply} method is called when the bytes for a specific column are first written to HBase and again - * when they are read back. Phoenix attemps to minimize calls to apply when bytes are read out of HBase. - * - * + * A ColumnModifier implementation modifies how bytes are stored in a primary key column. The + * {@link ColumnModifier#apply apply} method is called when the bytes for a specific column are + * first written to HBase and again when they are read back. Phoenix attemps to minimize calls to + * apply when bytes are read out of HBase. * @since 1.2 */ public enum ColumnModifier { - /** - * Invert the bytes in the src byte array to support descending ordering of row keys. - */ - SORT_DESC(1) { - @Override - public byte[] apply(byte[] src, int srcOffset, byte[] dest, int dstOffset, int length) { - Preconditions.checkNotNull(src); - Preconditions.checkNotNull(dest); - for (int i = 0; i < length; i++) { - dest[dstOffset+i] = (byte)(src[srcOffset+i] ^ 0xFF); - } - return dest; - } - - @Override - public byte apply(byte b) { - return (byte)(b ^ 0xFF); - } + /** + * Invert the bytes in the src byte array to support descending ordering of row keys. + */ + SORT_DESC(1) { + @Override + public byte[] apply(byte[] src, int srcOffset, byte[] dest, int dstOffset, int length) { + Preconditions.checkNotNull(src); + Preconditions.checkNotNull(dest); + for (int i = 0; i < length; i++) { + dest[dstOffset + i] = (byte) (src[srcOffset + i] ^ 0xFF); + } + return dest; + } - @Override - public CompareOperator transform(CompareOperator op) { - switch (op) { - case EQUAL: - return op; - case GREATER: - return CompareOperator.LESS; - case GREATER_OR_EQUAL: - return CompareOperator.LESS_OR_EQUAL; - case LESS: - return CompareOperator.GREATER; - case LESS_OR_EQUAL: - return CompareOperator.GREATER_OR_EQUAL; - default: - throw new IllegalArgumentException("Unknown operator " + op); - } - } + @Override + public byte apply(byte b) { + return (byte) (b ^ 0xFF); + } - @Override - public byte[] apply(byte[] src, int srcOffset, int length) { - return apply(src, srcOffset, new byte[length], 0, length); - } - }; - - private final int serializationId; - - ColumnModifier(int serializationId) { - this.serializationId = serializationId; + @Override + public CompareOperator transform(CompareOperator op) { + switch (op) { + case EQUAL: + return op; + case GREATER: + return CompareOperator.LESS; + case GREATER_OR_EQUAL: + return CompareOperator.LESS_OR_EQUAL; + case LESS: + return CompareOperator.GREATER; + case LESS_OR_EQUAL: + return CompareOperator.GREATER_OR_EQUAL; + default: + throw new IllegalArgumentException("Unknown operator " + op); + } } - - public int getSerializationId() { - return serializationId; + + @Override + public byte[] apply(byte[] src, int srcOffset, int length) { + return apply(src, srcOffset, new byte[length], 0, length); } - /** - * Returns the ColumnModifier for the specified DDL stmt keyword. - */ - public static ColumnModifier fromDDLValue(String modifier) { - if (modifier == null) { - return null; - } else if (modifier.equalsIgnoreCase("ASC")) { - return null; - } else if (modifier.equalsIgnoreCase("DESC")) { - return SORT_DESC; - } else { - return null; - } + }; + + private final int serializationId; + + ColumnModifier(int serializationId) { + this.serializationId = serializationId; + } + + public int getSerializationId() { + return serializationId; + } + + /** + * Returns the ColumnModifier for the specified DDL stmt keyword. + */ + public static ColumnModifier fromDDLValue(String modifier) { + if (modifier == null) { + return null; + } else if (modifier.equalsIgnoreCase("ASC")) { + return null; + } else if (modifier.equalsIgnoreCase("DESC")) { + return SORT_DESC; + } else { + return null; } + } - /** - * Returns the ColumnModifier for the specified internal value. - */ - public static ColumnModifier fromSystemValue(int value) { - for (ColumnModifier mod : ColumnModifier.values()) { - if (mod.getSerializationId() == value) { - return mod; - } - } - return null; + /** + * Returns the ColumnModifier for the specified internal value. + */ + public static ColumnModifier fromSystemValue(int value) { + for (ColumnModifier mod : ColumnModifier.values()) { + if (mod.getSerializationId() == value) { + return mod; + } } + return null; + } - /** - * Returns an internal value representing the specified ColumnModifier. - */ - public static int toSystemValue(ColumnModifier columnModifier) { - if (columnModifier == null) { - return 0; - } - return columnModifier.getSerializationId(); + /** + * Returns an internal value representing the specified ColumnModifier. + */ + public static int toSystemValue(ColumnModifier columnModifier) { + if (columnModifier == null) { + return 0; } + return columnModifier.getSerializationId(); + } + + /** + * Copies the bytes from source array to destination array and applies the column modifier + * operation on the bytes starting at the specified offsets. The column modifier is applied to the + * number of bytes matching the specified length. + * @param src the source byte array to copy from, cannot be null + * @param srcOffset the offset into the source byte array at which to begin. + * @param dest the destination byte array into which to transfer the modified bytes. + * @param dstOffset the offset into the destination byte array at which to begin + * @param length the number of bytes for which to apply the modification + * @return the destination byte array + */ + public abstract byte[] apply(byte[] src, int srcOffset, byte[] dest, int dstOffset, int length); + + /** + * Copies the bytes from source array to a newly allocated destination array and applies the + * column modifier operation on the bytes starting at the specified offsets. The column modifier + * is applied to the number of bytes matching the specified length. + * @param src the source byte array to copy from, cannot be null + * @param srcOffset the offset into the source byte array at which to begin. + * @param length the number of bytes for which to apply the modification + * @return the newly allocated destination byte array + */ + public abstract byte[] apply(byte[] src, int srcOffset, int length); + + public abstract byte apply(byte b); - /** - * Copies the bytes from source array to destination array and applies the column modifier operation on the bytes - * starting at the specified offsets. The column modifier is applied to the number of bytes matching the - * specified length. - * @param src the source byte array to copy from, cannot be null - * @param srcOffset the offset into the source byte array at which to begin. - * @param dest the destination byte array into which to transfer the modified bytes. - * @param dstOffset the offset into the destination byte array at which to begin - * @param length the number of bytes for which to apply the modification - * @return the destination byte array - */ - public abstract byte[] apply(byte[] src, int srcOffset, byte[] dest, int dstOffset, int length); - /** - * Copies the bytes from source array to a newly allocated destination array and applies the column - * modifier operation on the bytes starting at the specified offsets. The column modifier is applied - * to the number of bytes matching the specified length. - * @param src the source byte array to copy from, cannot be null - * @param srcOffset the offset into the source byte array at which to begin. - * @param length the number of bytes for which to apply the modification - * @return the newly allocated destination byte array - */ - public abstract byte[] apply(byte[] src, int srcOffset, int length); - public abstract byte apply(byte b); - - public abstract CompareOperator transform(CompareOperator op); + public abstract CompareOperator transform(CompareOperator op); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnNotFoundException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnNotFoundException.java index 00b65f2299a..ccfca402902 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnNotFoundException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnNotFoundException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,32 +20,30 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; - /** - * - * Exception thrown when a column name referenced in a select - * statement cannot be found in any table. - * - * + * Exception thrown when a column name referenced in a select statement cannot be found in any + * table. * @since 0.1 */ public class ColumnNotFoundException extends MetaDataEntityNotFoundException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.COLUMN_NOT_FOUND; - private final String columnName; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.COLUMN_NOT_FOUND; + private final String columnName; - public ColumnNotFoundException(String columnName) { - this(null, null, null, columnName); - } + public ColumnNotFoundException(String columnName) { + this(null, null, null, columnName); + } - public ColumnNotFoundException(String schemaName, String tableName, String familyName, String columnName) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) - .setFamilyName(familyName).setColumnName(columnName).build().toString(), - code.getSQLState(), code.getErrorCode(), schemaName, tableName, null); - this.columnName = columnName; - } + public ColumnNotFoundException(String schemaName, String tableName, String familyName, + String columnName) { + super( + new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .setFamilyName(familyName).setColumnName(columnName).build().toString(), + code.getSQLState(), code.getErrorCode(), schemaName, tableName, null); + this.columnName = columnName; + } - public String getColumnName() { - return columnName; - } + public String getColumnName() { + return columnName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnRef.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnRef.java index 2618e0fa6b8..d342700f836 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnRef.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnRef.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,8 +21,6 @@ import java.sql.SQLException; import java.util.Arrays; -import net.jcip.annotations.Immutable; - import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.compile.ExpressionCompiler; import org.apache.phoenix.compile.StatementContext; @@ -41,136 +39,139 @@ import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.SchemaUtil; +import net.jcip.annotations.Immutable; /** - * * Class that represents a reference to a PColumn in a PTable - * - * * @since 0.1 */ @Immutable public class ColumnRef { - private final TableRef tableRef; - private final int columnPosition; - private final int pkSlotPosition; - - protected ColumnRef(ColumnRef columnRef, long timeStamp) { - this.tableRef = new TableRef(columnRef.tableRef, timeStamp); - this.columnPosition = columnRef.columnPosition; - this.pkSlotPosition = columnRef.pkSlotPosition; + private final TableRef tableRef; + private final int columnPosition; + private final int pkSlotPosition; + + protected ColumnRef(ColumnRef columnRef, long timeStamp) { + this.tableRef = new TableRef(columnRef.tableRef, timeStamp); + this.columnPosition = columnRef.columnPosition; + this.pkSlotPosition = columnRef.pkSlotPosition; + } + + public ColumnRef(TableRef tableRef, String familyName, String columnName) + throws MetaDataEntityNotFoundException { + this(tableRef, tableRef.getTable().getColumnFamily(familyName) + .getPColumnForColumnName(columnName).getPosition()); + } + + public ColumnRef(TableRef tableRef, int columnPosition) { + if (tableRef == null) { + throw new NullPointerException(); } - - public ColumnRef(TableRef tableRef, String familyName, String columnName) throws MetaDataEntityNotFoundException { - this(tableRef, tableRef.getTable().getColumnFamily(familyName).getPColumnForColumnName(columnName).getPosition()); + if (columnPosition < 0 || columnPosition >= tableRef.getTable().getColumns().size()) { + throw new IllegalArgumentException("Column position of " + columnPosition + + " must be between 0 and " + tableRef.getTable().getColumns().size() + " for table " + + tableRef.getTable().getName().getString()); } - - public ColumnRef(TableRef tableRef, int columnPosition) { - if (tableRef == null) { - throw new NullPointerException(); - } - if (columnPosition < 0 || columnPosition >= tableRef.getTable().getColumns().size()) { - throw new IllegalArgumentException("Column position of " + columnPosition + " must be between 0 and " + tableRef.getTable().getColumns().size() + " for table " + tableRef.getTable().getName().getString()); - } - this.tableRef = tableRef; - this.columnPosition = columnPosition; - PColumn column = getColumn(); - int i = -1; - if (SchemaUtil.isPKColumn(column)) { - for (PColumn pkColumn : tableRef.getTable().getPKColumns()) { - i++; - if (pkColumn == column) { - break; - } - } + this.tableRef = tableRef; + this.columnPosition = columnPosition; + PColumn column = getColumn(); + int i = -1; + if (SchemaUtil.isPKColumn(column)) { + for (PColumn pkColumn : tableRef.getTable().getPKColumns()) { + i++; + if (pkColumn == column) { + break; } - pkSlotPosition = i; + } } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + columnPosition; - result = prime * result + tableRef.hashCode(); - return result; + pkSlotPosition = i; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + columnPosition; + result = prime * result + tableRef.hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ColumnRef other = (ColumnRef) obj; + if (columnPosition != other.columnPosition) return false; + if (!tableRef.equals(other.tableRef)) return false; + return true; + } + + public Expression newColumnExpression() throws SQLException { + return newColumnExpression(false, false); + } + + public Expression newColumnExpression(boolean schemaNameCaseSensitive, + boolean colNameCaseSensitive) throws SQLException { + PTable table = tableRef.getTable(); + PColumn column = this.getColumn(); + String displayName = + tableRef.getColumnDisplayName(this, schemaNameCaseSensitive, colNameCaseSensitive); + if (SchemaUtil.isPKColumn(column)) { + return new RowKeyColumnExpression(column, + new RowKeyValueAccessor(table.getPKColumns(), pkSlotPosition), displayName); } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ColumnRef other = (ColumnRef)obj; - if (columnPosition != other.columnPosition) return false; - if (!tableRef.equals(other.tableRef)) return false; - return true; + if ( + table.getType() == PTableType.PROJECTED || table.getType() == PTableType.SUBQUERY + || table.getType() == PTableType.CDC + ) { + return new ProjectedColumnExpression(column, table, displayName); } - public Expression newColumnExpression() throws SQLException { - return newColumnExpression(false, false); + Expression expression = + table.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS + ? new SingleCellColumnExpression(column, displayName, table.getEncodingScheme(), + table.getImmutableStorageScheme()) + : new KeyValueColumnExpression(column, displayName); + + if (column.getExpressionStr() != null) { + String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + + PhoenixRuntime.CONNECTIONLESS; + PhoenixConnection conn = DriverManager.getConnection(url).unwrap(PhoenixConnection.class); + StatementContext context = new StatementContext(new PhoenixStatement(conn)); + + ExpressionCompiler compiler = new ExpressionCompiler(context); + ParseNode defaultParseNode = new SQLParser(column.getExpressionStr()).parseExpression(); + Expression defaultExpression = defaultParseNode.accept(compiler); + if (!ExpressionUtil.isNull(defaultExpression, new ImmutableBytesWritable())) { + return new DefaultValueExpression(Arrays.asList(expression, defaultExpression)); + } } + return expression; + } - public Expression newColumnExpression(boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) throws SQLException { - PTable table = tableRef.getTable(); - PColumn column = this.getColumn(); - String displayName = tableRef.getColumnDisplayName(this, schemaNameCaseSensitive, colNameCaseSensitive); - if (SchemaUtil.isPKColumn(column)) { - return new RowKeyColumnExpression( - column, - new RowKeyValueAccessor(table.getPKColumns(), pkSlotPosition), - displayName); - } - - if (table.getType() == PTableType.PROJECTED || table.getType() == PTableType.SUBQUERY || - table.getType() == PTableType.CDC) { - return new ProjectedColumnExpression(column, table, displayName); - } + public ColumnRef cloneAtTimestamp(long timestamp) { + return new ColumnRef(this, timestamp); + } - Expression expression = table.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS ? - new SingleCellColumnExpression(column, displayName, - table.getEncodingScheme(), table.getImmutableStorageScheme()) - : new KeyValueColumnExpression(column, displayName); - - if (column.getExpressionStr() != null) { - String url = PhoenixRuntime.JDBC_PROTOCOL - + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR - + PhoenixRuntime.CONNECTIONLESS; - PhoenixConnection conn = - DriverManager.getConnection(url).unwrap(PhoenixConnection.class); - StatementContext context = new StatementContext(new PhoenixStatement(conn)); - - ExpressionCompiler compiler = new ExpressionCompiler(context); - ParseNode defaultParseNode = new SQLParser(column.getExpressionStr()).parseExpression(); - Expression defaultExpression = defaultParseNode.accept(compiler); - if (!ExpressionUtil.isNull(defaultExpression, new ImmutableBytesWritable())) { - return new DefaultValueExpression(Arrays.asList(expression, defaultExpression)); - } - } - return expression; - } + public int getColumnPosition() { + return columnPosition; + } - public ColumnRef cloneAtTimestamp(long timestamp) { - return new ColumnRef(this, timestamp); - } + public int getPKSlotPosition() { + return pkSlotPosition; + } - public int getColumnPosition() { - return columnPosition; - } - - public int getPKSlotPosition() { - return pkSlotPosition; - } - - public PColumn getColumn() { - return tableRef.getTable().getColumns().get(columnPosition); - } + public PColumn getColumn() { + return tableRef.getTable().getColumns().get(columnPosition); + } - public PTable getTable() { - return tableRef.getTable(); - } - - public TableRef getTableRef() { - return tableRef; - } + public PTable getTable() { + return tableRef.getTable(); + } + + public TableRef getTableRef() { + return tableRef; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnValueDecoder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnValueDecoder.java index 5ae72d1c6c5..0712170d3c4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnValueDecoder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnValueDecoder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,12 +20,12 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; /** - * Interface to decode column values that are stored in a byte[] + * Interface to decode column values that are stored in a byte[] */ public interface ColumnValueDecoder { - /** - * sets the ptr to the column value at the given index - * @return false if the column value is absent (used to support DEFAULT expressions) or else true - */ - boolean decode(ImmutableBytesWritable ptr, int index); + /** + * sets the ptr to the column value at the given index + * @return false if the column value is absent (used to support DEFAULT expressions) or else true + */ + boolean decode(ImmutableBytesWritable ptr, int index); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnValueEncoder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnValueEncoder.java index 5e930bdb17b..1035b3aa808 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnValueEncoder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ColumnValueEncoder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,27 +19,24 @@ import org.apache.phoenix.schema.PTable.ImmutableStorageScheme; - /** * Interface to encode column values into a serialized byte[] that will be stored in a single cell - * The last byte of the serialized byte[] should be the serialized value of the {@link ImmutableStorageScheme} - * that was used. + * The last byte of the serialized byte[] should be the serialized value of the + * {@link ImmutableStorageScheme} that was used. */ public interface ColumnValueEncoder { - - /** - * append a column value to the array - */ - void appendValue(byte[] bytes, int offset, int length); - - /** - * append a value that is not present to the array (used to support DEFAULT expressions) - */ - void appendAbsentValue(); - - /** - * @return the encoded byte[] that contains the serialized column values - */ - byte[] encode(); - -} \ No newline at end of file + + /** + * append a column value to the array + */ + void appendValue(byte[] bytes, int offset, int length); + + /** + * append a value that is not present to the array (used to support DEFAULT expressions) + */ + void appendAbsentValue(); + + /** Returns the encoded byte[] that contains the serialized column values */ + byte[] encode(); + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ComparisonNotSupportedException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ComparisonNotSupportedException.java index 8449c77bf98..0015a99a076 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ComparisonNotSupportedException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ComparisonNotSupportedException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,8 +22,8 @@ import org.apache.phoenix.schema.types.PDataType; public class ComparisonNotSupportedException extends RuntimeException { - public ComparisonNotSupportedException(PDataType pDataType) { - super(new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) - .setMessage(" for type " + pDataType.toString()).build().buildException()); - } -} \ No newline at end of file + public ComparisonNotSupportedException(PDataType pDataType) { + super(new SQLExceptionInfo.Builder(SQLExceptionCode.COMPARISON_UNSUPPORTED) + .setMessage(" for type " + pDataType.toString()).build().buildException()); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConcurrentTableMutationException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConcurrentTableMutationException.java index f4ee33f0d42..474136b8e6f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConcurrentTableMutationException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConcurrentTableMutationException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,25 +22,24 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; - public class ConcurrentTableMutationException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.CONCURRENT_TABLE_MUTATION; - private final String schemaName; - private final String tableName; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.CONCURRENT_TABLE_MUTATION; + private final String schemaName; + private final String tableName; - public ConcurrentTableMutationException(String schemaName, String tableName) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).build().toString(), - code.getSQLState(), code.getErrorCode()); - this.schemaName = schemaName; - this.tableName = tableName; - } + public ConcurrentTableMutationException(String schemaName, String tableName) { + super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .build().toString(), code.getSQLState(), code.getErrorCode()); + this.schemaName = schemaName; + this.tableName = tableName; + } - public String getTableName() { - return tableName; - } + public String getTableName() { + return tableName; + } - public String getSchemaName() { - return schemaName; - } + public String getSchemaName() { + return schemaName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConnectionProperty.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConnectionProperty.java index c44490e0ca2..1671daac7ea 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConnectionProperty.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConnectionProperty.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,35 +21,35 @@ import org.apache.phoenix.query.QueryServicesOptions; public enum ConnectionProperty { - /** - * Connection level property phoenix.default.update.cache.frequency - */ - UPDATE_CACHE_FREQUENCY() { - @Override - public Object getValue(String value) { - if (value == null) { - return QueryServicesOptions.DEFAULT_UPDATE_CACHE_FREQUENCY; - } - - if ("ALWAYS".equalsIgnoreCase(value)) { - return 0L; - } + /** + * Connection level property phoenix.default.update.cache.frequency + */ + UPDATE_CACHE_FREQUENCY() { + @Override + public Object getValue(String value) { + if (value == null) { + return QueryServicesOptions.DEFAULT_UPDATE_CACHE_FREQUENCY; + } - if ("NEVER".equalsIgnoreCase(value)) { - return Long.MAX_VALUE; - } + if ("ALWAYS".equalsIgnoreCase(value)) { + return 0L; + } - try { - return Long.parseLong(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Connection's " + - QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB + - " can only be set to 'ALWAYS', 'NEVER' or a millisecond numeric value."); - } - } - }; + if ("NEVER".equalsIgnoreCase(value)) { + return Long.MAX_VALUE; + } - public Object getValue(String value) { - return value; + try { + return Long.parseLong(value); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + "Connection's " + QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB + + " can only be set to 'ALWAYS', 'NEVER' or a millisecond numeric value."); + } } + }; + + public Object getValue(String value) { + return value; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConstraintViolationException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConstraintViolationException.java index a86b16ab6a6..fcb447ed507 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConstraintViolationException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ConstraintViolationException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,26 +21,22 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * - * Exception thrown when a schema constraint is violated at the - * time of data insertion. - * - * + * Exception thrown when a schema constraint is violated at the time of data insertion. * @since 180 */ public class ConstraintViolationException extends RuntimeException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; + + public ConstraintViolationException() { + this((String) null); + } + + public ConstraintViolationException(String message) { + super(new SQLExceptionInfo.Builder(SQLExceptionCode.CONSTRAINT_VIOLATION).setMessage(message) + .build().buildException()); + } - public ConstraintViolationException() { - this((String)null); - } - - public ConstraintViolationException(String message) { - super(new SQLExceptionInfo.Builder( - SQLExceptionCode.CONSTRAINT_VIOLATION).setMessage(message).build().buildException()); - } - - public ConstraintViolationException(Throwable cause) { - super(cause); // Already wrapped - don't rewrap - } + public ConstraintViolationException(Throwable cause) { + super(cause); // Already wrapped - don't rewrap + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateColumn.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateColumn.java index 4d9abaf9f14..db34328e23f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateColumn.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateColumn.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,102 +20,103 @@ import org.apache.phoenix.util.SizedUtil; public class DelegateColumn extends DelegateDatum implements PColumn { - - public DelegateColumn(PColumn delegate) { - super(delegate); - } - - @Override - protected PColumn getDelegate() { - return (PColumn)super.getDelegate(); - } - - @Override - public PName getName() { - return getDelegate().getName(); - } - - @Override - public SortOrder getSortOrder() { - return getDelegate().getSortOrder(); - } - - @Override - public PName getFamilyName() { - return getDelegate().getFamilyName(); - } - - @Override - public int getPosition() { - return getDelegate().getPosition(); - } - - @Override - public Integer getArraySize() { - return getDelegate().getArraySize(); - } - - @Override - public byte[] getViewConstant() { - return getDelegate().getViewConstant(); - } - - @Override - public int getEstimatedSize() { - return SizedUtil.OBJECT_SIZE + getDelegate().getEstimatedSize(); - } - - @Override - public boolean isViewReferenced() { - return getDelegate().isViewReferenced(); - } - - @Override - public String getExpressionStr() { - return getDelegate().getExpressionStr(); - } - - @Override - public long getTimestamp() { - return getDelegate().getTimestamp(); - } - - @Override - public boolean isDerived() { - return getDelegate().isDerived(); - } - - @Override - public boolean isExcluded() { - return getDelegate().isExcluded(); - } - - @Override - public boolean isRowTimestamp() { - return getDelegate().isRowTimestamp(); - } - - @Override - public String toString() { - return getDelegate().toString(); - } - - @Override - public boolean isDynamic() { - return getDelegate().isDynamic(); - } - - @Override - public int hashCode() { - return getDelegate().hashCode(); - } - - @Override - public boolean equals(Object o) { - return getDelegate().equals(o); - } - @Override - public byte[] getColumnQualifierBytes() { - return getDelegate().getColumnQualifierBytes(); - } + + public DelegateColumn(PColumn delegate) { + super(delegate); + } + + @Override + protected PColumn getDelegate() { + return (PColumn) super.getDelegate(); + } + + @Override + public PName getName() { + return getDelegate().getName(); + } + + @Override + public SortOrder getSortOrder() { + return getDelegate().getSortOrder(); + } + + @Override + public PName getFamilyName() { + return getDelegate().getFamilyName(); + } + + @Override + public int getPosition() { + return getDelegate().getPosition(); + } + + @Override + public Integer getArraySize() { + return getDelegate().getArraySize(); + } + + @Override + public byte[] getViewConstant() { + return getDelegate().getViewConstant(); + } + + @Override + public int getEstimatedSize() { + return SizedUtil.OBJECT_SIZE + getDelegate().getEstimatedSize(); + } + + @Override + public boolean isViewReferenced() { + return getDelegate().isViewReferenced(); + } + + @Override + public String getExpressionStr() { + return getDelegate().getExpressionStr(); + } + + @Override + public long getTimestamp() { + return getDelegate().getTimestamp(); + } + + @Override + public boolean isDerived() { + return getDelegate().isDerived(); + } + + @Override + public boolean isExcluded() { + return getDelegate().isExcluded(); + } + + @Override + public boolean isRowTimestamp() { + return getDelegate().isRowTimestamp(); + } + + @Override + public String toString() { + return getDelegate().toString(); + } + + @Override + public boolean isDynamic() { + return getDelegate().isDynamic(); + } + + @Override + public int hashCode() { + return getDelegate().hashCode(); + } + + @Override + public boolean equals(Object o) { + return getDelegate().equals(o); + } + + @Override + public byte[] getColumnQualifierBytes() { + return getDelegate().getColumnQualifierBytes(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateDatum.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateDatum.java index 6e18cc52b48..da1ac8f674a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateDatum.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateDatum.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,38 +20,38 @@ import org.apache.phoenix.schema.types.PDataType; public class DelegateDatum implements PDatum { - private final PDatum delegate; - - public DelegateDatum(PDatum delegate) { - this.delegate = delegate; - } - - @Override - public boolean isNullable() { - return delegate.isNullable(); - } - - @Override - public PDataType getDataType() { - return delegate.getDataType(); - } - - @Override - public Integer getMaxLength() { - return delegate.getMaxLength(); - } - - @Override - public Integer getScale() { - return delegate.getScale(); - } - - @Override - public SortOrder getSortOrder() { - return delegate.getSortOrder(); - } - - protected PDatum getDelegate() { - return delegate; - } + private final PDatum delegate; + + public DelegateDatum(PDatum delegate) { + this.delegate = delegate; + } + + @Override + public boolean isNullable() { + return delegate.isNullable(); + } + + @Override + public PDataType getDataType() { + return delegate.getDataType(); + } + + @Override + public Integer getMaxLength() { + return delegate.getMaxLength(); + } + + @Override + public Integer getScale() { + return delegate.getScale(); + } + + @Override + public SortOrder getSortOrder() { + return delegate.getSortOrder(); + } + + protected PDatum getDelegate() { + return delegate; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateSQLException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateSQLException.java index 9ed4805a83f..bade6399e8f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateSQLException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateSQLException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,42 +21,42 @@ import java.util.Iterator; public class DelegateSQLException extends SQLException { - private final SQLException delegate; - private final String msg; - - public DelegateSQLException(SQLException e, String msg) { - this.delegate = e; - this.msg = e.getMessage() + msg; - } - - @Override - public String getMessage() { - return msg; - } - - @Override - public String getSQLState() { - return delegate.getSQLState(); - } - - @Override - public int getErrorCode() { - return delegate.getErrorCode(); - } - - @Override - public SQLException getNextException() { - return delegate.getNextException(); - } - - @Override - public void setNextException(SQLException ex) { - delegate.setNextException(ex); - } - - @Override - public Iterator iterator() { - return delegate.iterator(); - } + private final SQLException delegate; + private final String msg; + + public DelegateSQLException(SQLException e, String msg) { + this.delegate = e; + this.msg = e.getMessage() + msg; + } + + @Override + public String getMessage() { + return msg; + } + + @Override + public String getSQLState() { + return delegate.getSQLState(); + } + + @Override + public int getErrorCode() { + return delegate.getErrorCode(); + } + + @Override + public SQLException getNextException() { + return delegate.getNextException(); + } + + @Override + public void setNextException(SQLException ex) { + delegate.setNextException(ex); + } + + @Override + public Iterator iterator() { + return delegate.iterator(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateTable.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateTable.java index e67e876dcd2..dbe20b3c67f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateTable.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/DelegateTable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,417 +33,435 @@ import org.apache.phoenix.transaction.TransactionFactory; public class DelegateTable implements PTable { - @Override - public long getTimeStamp() { - return delegate.getTimeStamp(); - } - - @Override - public long getIndexDisableTimestamp() { - return delegate.getIndexDisableTimestamp(); - } - - @Override - public boolean isIndexStateDisabled() { - return delegate.isIndexStateDisabled(); - } - - @Override - public long getSequenceNumber() { - return delegate.getSequenceNumber(); - } - - @Override - public PName getName() { - return delegate.getName(); - } - - @Override - public PName getSchemaName() { - return delegate.getSchemaName(); - } - - @Override - public PName getTableName() { - return delegate.getTableName(); - } - - @Override - public PName getTenantId() { - return delegate.getTenantId(); - } - - @Override - public PTableType getType() { - return delegate.getType(); - } - - @Override - public PName getPKName() { - return delegate.getPKName(); - } - - @Override - public List getPKColumns() { - return delegate.getPKColumns(); - } - - @Override - public List getColumns() { - return delegate.getColumns(); - } - - @Override - public List getExcludedColumns() { - return delegate.getExcludedColumns(); - } - - @Override - public List getColumnFamilies() { - return delegate.getColumnFamilies(); - } - - @Override - public boolean hasOnlyPkColumns() { - return delegate.hasOnlyPkColumns(); - } - - @Override - public PColumnFamily getColumnFamily(byte[] family) throws ColumnFamilyNotFoundException { - return delegate.getColumnFamily(family); - } - - @Override - public PColumnFamily getColumnFamily(String family) throws ColumnFamilyNotFoundException { - return delegate.getColumnFamily(family); - } - - @Override - public PColumn getColumnForColumnName(String name) throws ColumnNotFoundException, AmbiguousColumnException { - return delegate.getColumnForColumnName(name); - } - - @Override - public PColumn getPKColumn(String name) throws ColumnNotFoundException { - return delegate.getPKColumn(name); - } - - @Override - public PRow newRow(KeyValueBuilder builder, long ts, ImmutableBytesWritable key, boolean hasOnDupKey, byte[]... values) { - return delegate.newRow(builder, ts, key, hasOnDupKey, values); - } - - @Override - public PRow newRow(KeyValueBuilder builder, ImmutableBytesWritable key, boolean hasOnDupKey, byte[]... values) { - return delegate.newRow(builder, key, hasOnDupKey, values); - } - - @Override - public int newKey(ImmutableBytesWritable key, byte[][] values) { - return delegate.newKey(key, values); - } - - @Override - public RowKeySchema getRowKeySchema() { - return delegate.getRowKeySchema(); - } - - @Override - public Integer getBucketNum() { - return delegate.getBucketNum(); - } - - @Override - public List getIndexes() { return delegate.getIndexes(); } - - @Override - public PTable getTransformingNewTable() { return delegate.getTransformingNewTable(); } - - @Override - public PIndexState getIndexState() { - return delegate.getIndexState(); - } - - @Override - public PName getParentName() { - return delegate.getParentName(); - } - - @Override - public PName getParentTableName() { - return delegate.getParentTableName(); - } - - @Override - public PName getBaseTableLogicalName() { - return delegate.getBaseTableLogicalName(); - } - - @Override - public List getPhysicalNames() { - return delegate.getPhysicalNames(); - } - - @Override - public PName getPhysicalName() { - return delegate.getPhysicalName(); - } - - @Override - public PName getPhysicalName(boolean returnColValueFromSyscat) { - return delegate.getPhysicalName(returnColValueFromSyscat); - } - - @Override - public boolean isImmutableRows() { - return delegate.isImmutableRows(); - } - - @Override - public boolean getIndexMaintainers(ImmutableBytesWritable ptr, PhoenixConnection connection) - throws SQLException { - return delegate.getIndexMaintainers(ptr, connection); - } - - @Override - public IndexMaintainer getIndexMaintainer(PTable dataTable, PhoenixConnection connection) - throws SQLException { - return delegate.getIndexMaintainer(dataTable, connection); - } - - @Override - public IndexMaintainer getIndexMaintainer(PTable dataTable, PTable cdcTable, - PhoenixConnection connection) throws SQLException { - return delegate.getIndexMaintainer(dataTable, cdcTable, connection); - } - - @Override - public TransformMaintainer getTransformMaintainer(PTable oldTable, PhoenixConnection connection) { - return delegate.getTransformMaintainer(oldTable, connection); - } - - @Override - public PName getDefaultFamilyName() { - return delegate.getDefaultFamilyName(); - } - - @Override - public boolean isWALDisabled() { - return delegate.isWALDisabled(); - } - - @Override - public boolean isMultiTenant() { - return delegate.isMultiTenant(); - } - - @Override - public boolean getStoreNulls() { - return delegate.getStoreNulls(); - } - - @Override - public ViewType getViewType() { - return delegate.getViewType(); - } - - @Override - public String getViewStatement() { - return delegate.getViewStatement(); - } - - @Override - public Long getViewIndexId() { - return delegate.getViewIndexId(); - } - - @Override - public PDataType getviewIndexIdType() { - return delegate.getviewIndexIdType(); - } - - @Override - public PTableKey getKey() { - return delegate.getKey(); - } - - @Override - public int getEstimatedSize() { - return delegate.getEstimatedSize(); - } - - @Override - public IndexType getIndexType() { - return delegate.getIndexType(); - } - - private final PTable delegate; - - public DelegateTable(PTable delegate) { - this.delegate = delegate; - } - - @Override - public PName getParentSchemaName() { - return delegate.getParentSchemaName(); - } - - @Override - public TransactionFactory.Provider getTransactionProvider() { - return delegate.getTransactionProvider(); - } - - @Override - public final boolean isTransactional() { - return delegate.isTransactional(); - } - - @Override - public int getBaseColumnCount() { - return delegate.getBaseColumnCount(); - } - - @Override - public boolean rowKeyOrderOptimizable() { - return delegate.rowKeyOrderOptimizable(); - } - - @Override - public int getRowTimestampColPos() { - return delegate.getRowTimestampColPos(); - } - - @Override - public String toString() { - return delegate.toString(); - } - - @Override - public long getUpdateCacheFrequency() { - return delegate.getUpdateCacheFrequency(); - } - - @Override - public boolean isNamespaceMapped() { - return delegate.isNamespaceMapped(); - } - - @Override - public String getAutoPartitionSeqName() { - return delegate.getAutoPartitionSeqName(); - } - - @Override - public boolean isAppendOnlySchema() { - return delegate.isAppendOnlySchema(); - } - - @Override - public int hashCode() { - return delegate.hashCode(); - } - - @Override - public boolean equals(Object obj) { - return delegate.equals(obj); - } - - @Override - public ImmutableStorageScheme getImmutableStorageScheme() { - return delegate.getImmutableStorageScheme(); - } - - @Override - public PColumn getColumnForColumnQualifier(byte[] cf, byte[] cq) throws ColumnNotFoundException, AmbiguousColumnException { - return delegate.getColumnForColumnQualifier(cf, cq); - } - - @Override - public EncodedCQCounter getEncodedCQCounter() { - return delegate.getEncodedCQCounter(); - } - - @Override - public QualifierEncodingScheme getEncodingScheme() { - return delegate.getEncodingScheme(); - } - - @Override - public Boolean useStatsForParallelization() { - return delegate.useStatsForParallelization(); - } - - @Override public boolean hasViewModifiedUpdateCacheFrequency() { - return delegate.hasViewModifiedUpdateCacheFrequency(); - } - - @Override public boolean hasViewModifiedUseStatsForParallelization() { - return delegate.hasViewModifiedUseStatsForParallelization(); - } - - @Override public int getTTL() { - return delegate.getTTL(); - } - - @Override - public Long getLastDDLTimestamp() { - return delegate.getLastDDLTimestamp(); - } - - @Override - public boolean isChangeDetectionEnabled() { - return delegate.isChangeDetectionEnabled(); - } - - @Override - public String getSchemaVersion() { - return delegate.getSchemaVersion(); - } - - @Override - public String getExternalSchemaId() { - return delegate.getExternalSchemaId(); - } - - @Override - public String getStreamingTopicName() { return delegate.getStreamingTopicName(); } - - @Override - public Set getCDCIncludeScopes() { - return delegate.getCDCIncludeScopes(); - } - - public byte[] getRowKeyMatcher() { - return delegate.getRowKeyMatcher(); - } - - public String getIndexWhere() { - return delegate.getIndexWhere(); - } - - @Override - public Map getAncestorLastDDLTimestampMap() { - return delegate.getAncestorLastDDLTimestampMap(); - } - - @Override - public Expression getIndexWhereExpression(PhoenixConnection connection) - throws SQLException { - return delegate.getIndexWhereExpression(connection); - } - - @Override - public Set getIndexWhereColumns(PhoenixConnection connection) - throws SQLException { - return delegate.getIndexWhereColumns(connection); - } - - @Override - public Long getMaxLookbackAge() { - return delegate.getMaxLookbackAge(); - } - - @Override public Map getPropertyValues() { return delegate.getPropertyValues(); } - - @Override public Map getDefaultPropertyValues() { return delegate.getDefaultPropertyValues(); } + @Override + public long getTimeStamp() { + return delegate.getTimeStamp(); + } + + @Override + public long getIndexDisableTimestamp() { + return delegate.getIndexDisableTimestamp(); + } + + @Override + public boolean isIndexStateDisabled() { + return delegate.isIndexStateDisabled(); + } + + @Override + public long getSequenceNumber() { + return delegate.getSequenceNumber(); + } + + @Override + public PName getName() { + return delegate.getName(); + } + + @Override + public PName getSchemaName() { + return delegate.getSchemaName(); + } + + @Override + public PName getTableName() { + return delegate.getTableName(); + } + + @Override + public PName getTenantId() { + return delegate.getTenantId(); + } + + @Override + public PTableType getType() { + return delegate.getType(); + } + + @Override + public PName getPKName() { + return delegate.getPKName(); + } + + @Override + public List getPKColumns() { + return delegate.getPKColumns(); + } + + @Override + public List getColumns() { + return delegate.getColumns(); + } + + @Override + public List getExcludedColumns() { + return delegate.getExcludedColumns(); + } + + @Override + public List getColumnFamilies() { + return delegate.getColumnFamilies(); + } + + @Override + public boolean hasOnlyPkColumns() { + return delegate.hasOnlyPkColumns(); + } + + @Override + public PColumnFamily getColumnFamily(byte[] family) throws ColumnFamilyNotFoundException { + return delegate.getColumnFamily(family); + } + + @Override + public PColumnFamily getColumnFamily(String family) throws ColumnFamilyNotFoundException { + return delegate.getColumnFamily(family); + } + + @Override + public PColumn getColumnForColumnName(String name) + throws ColumnNotFoundException, AmbiguousColumnException { + return delegate.getColumnForColumnName(name); + } + + @Override + public PColumn getPKColumn(String name) throws ColumnNotFoundException { + return delegate.getPKColumn(name); + } + + @Override + public PRow newRow(KeyValueBuilder builder, long ts, ImmutableBytesWritable key, + boolean hasOnDupKey, byte[]... values) { + return delegate.newRow(builder, ts, key, hasOnDupKey, values); + } + + @Override + public PRow newRow(KeyValueBuilder builder, ImmutableBytesWritable key, boolean hasOnDupKey, + byte[]... values) { + return delegate.newRow(builder, key, hasOnDupKey, values); + } + + @Override + public int newKey(ImmutableBytesWritable key, byte[][] values) { + return delegate.newKey(key, values); + } + + @Override + public RowKeySchema getRowKeySchema() { + return delegate.getRowKeySchema(); + } + + @Override + public Integer getBucketNum() { + return delegate.getBucketNum(); + } + + @Override + public List getIndexes() { + return delegate.getIndexes(); + } + + @Override + public PTable getTransformingNewTable() { + return delegate.getTransformingNewTable(); + } + + @Override + public PIndexState getIndexState() { + return delegate.getIndexState(); + } + + @Override + public PName getParentName() { + return delegate.getParentName(); + } + + @Override + public PName getParentTableName() { + return delegate.getParentTableName(); + } + + @Override + public PName getBaseTableLogicalName() { + return delegate.getBaseTableLogicalName(); + } + + @Override + public List getPhysicalNames() { + return delegate.getPhysicalNames(); + } + + @Override + public PName getPhysicalName() { + return delegate.getPhysicalName(); + } + + @Override + public PName getPhysicalName(boolean returnColValueFromSyscat) { + return delegate.getPhysicalName(returnColValueFromSyscat); + } + + @Override + public boolean isImmutableRows() { + return delegate.isImmutableRows(); + } + + @Override + public boolean getIndexMaintainers(ImmutableBytesWritable ptr, PhoenixConnection connection) + throws SQLException { + return delegate.getIndexMaintainers(ptr, connection); + } + + @Override + public IndexMaintainer getIndexMaintainer(PTable dataTable, PhoenixConnection connection) + throws SQLException { + return delegate.getIndexMaintainer(dataTable, connection); + } + + @Override + public IndexMaintainer getIndexMaintainer(PTable dataTable, PTable cdcTable, + PhoenixConnection connection) throws SQLException { + return delegate.getIndexMaintainer(dataTable, cdcTable, connection); + } + + @Override + public TransformMaintainer getTransformMaintainer(PTable oldTable, PhoenixConnection connection) { + return delegate.getTransformMaintainer(oldTable, connection); + } + + @Override + public PName getDefaultFamilyName() { + return delegate.getDefaultFamilyName(); + } + + @Override + public boolean isWALDisabled() { + return delegate.isWALDisabled(); + } + + @Override + public boolean isMultiTenant() { + return delegate.isMultiTenant(); + } + + @Override + public boolean getStoreNulls() { + return delegate.getStoreNulls(); + } + + @Override + public ViewType getViewType() { + return delegate.getViewType(); + } + + @Override + public String getViewStatement() { + return delegate.getViewStatement(); + } + + @Override + public Long getViewIndexId() { + return delegate.getViewIndexId(); + } + + @Override + public PDataType getviewIndexIdType() { + return delegate.getviewIndexIdType(); + } + + @Override + public PTableKey getKey() { + return delegate.getKey(); + } + + @Override + public int getEstimatedSize() { + return delegate.getEstimatedSize(); + } + + @Override + public IndexType getIndexType() { + return delegate.getIndexType(); + } + + private final PTable delegate; + + public DelegateTable(PTable delegate) { + this.delegate = delegate; + } + + @Override + public PName getParentSchemaName() { + return delegate.getParentSchemaName(); + } + + @Override + public TransactionFactory.Provider getTransactionProvider() { + return delegate.getTransactionProvider(); + } + + @Override + public final boolean isTransactional() { + return delegate.isTransactional(); + } + + @Override + public int getBaseColumnCount() { + return delegate.getBaseColumnCount(); + } + + @Override + public boolean rowKeyOrderOptimizable() { + return delegate.rowKeyOrderOptimizable(); + } + + @Override + public int getRowTimestampColPos() { + return delegate.getRowTimestampColPos(); + } + + @Override + public String toString() { + return delegate.toString(); + } + + @Override + public long getUpdateCacheFrequency() { + return delegate.getUpdateCacheFrequency(); + } + + @Override + public boolean isNamespaceMapped() { + return delegate.isNamespaceMapped(); + } + + @Override + public String getAutoPartitionSeqName() { + return delegate.getAutoPartitionSeqName(); + } + + @Override + public boolean isAppendOnlySchema() { + return delegate.isAppendOnlySchema(); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return delegate.equals(obj); + } + + @Override + public ImmutableStorageScheme getImmutableStorageScheme() { + return delegate.getImmutableStorageScheme(); + } + + @Override + public PColumn getColumnForColumnQualifier(byte[] cf, byte[] cq) + throws ColumnNotFoundException, AmbiguousColumnException { + return delegate.getColumnForColumnQualifier(cf, cq); + } + + @Override + public EncodedCQCounter getEncodedCQCounter() { + return delegate.getEncodedCQCounter(); + } + + @Override + public QualifierEncodingScheme getEncodingScheme() { + return delegate.getEncodingScheme(); + } + + @Override + public Boolean useStatsForParallelization() { + return delegate.useStatsForParallelization(); + } + + @Override + public boolean hasViewModifiedUpdateCacheFrequency() { + return delegate.hasViewModifiedUpdateCacheFrequency(); + } + + @Override + public boolean hasViewModifiedUseStatsForParallelization() { + return delegate.hasViewModifiedUseStatsForParallelization(); + } + + @Override + public int getTTL() { + return delegate.getTTL(); + } + + @Override + public Long getLastDDLTimestamp() { + return delegate.getLastDDLTimestamp(); + } + + @Override + public boolean isChangeDetectionEnabled() { + return delegate.isChangeDetectionEnabled(); + } + + @Override + public String getSchemaVersion() { + return delegate.getSchemaVersion(); + } + + @Override + public String getExternalSchemaId() { + return delegate.getExternalSchemaId(); + } + + @Override + public String getStreamingTopicName() { + return delegate.getStreamingTopicName(); + } + + @Override + public Set getCDCIncludeScopes() { + return delegate.getCDCIncludeScopes(); + } + + public byte[] getRowKeyMatcher() { + return delegate.getRowKeyMatcher(); + } + + public String getIndexWhere() { + return delegate.getIndexWhere(); + } + + @Override + public Map getAncestorLastDDLTimestampMap() { + return delegate.getAncestorLastDDLTimestampMap(); + } + + @Override + public Expression getIndexWhereExpression(PhoenixConnection connection) throws SQLException { + return delegate.getIndexWhereExpression(connection); + } + + @Override + public Set getIndexWhereColumns(PhoenixConnection connection) + throws SQLException { + return delegate.getIndexWhereColumns(connection); + } + + @Override + public Long getMaxLookbackAge() { + return delegate.getMaxLookbackAge(); + } + + @Override + public Map getPropertyValues() { + return delegate.getPropertyValues(); + } + + @Override + public Map getDefaultPropertyValues() { + return delegate.getDefaultPropertyValues(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/EmptySequenceCacheException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/EmptySequenceCacheException.java index e6f6007b037..5fc1b82a23c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/EmptySequenceCacheException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/EmptySequenceCacheException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema; import java.sql.SQLException; @@ -24,16 +23,16 @@ import org.apache.phoenix.exception.SQLExceptionInfo; public class EmptySequenceCacheException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode ERROR_CODE = SQLExceptionCode.EMPTY_SEQUENCE_CACHE; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode ERROR_CODE = SQLExceptionCode.EMPTY_SEQUENCE_CACHE; - public EmptySequenceCacheException() { - this(null,null); - } + public EmptySequenceCacheException() { + this(null, null); + } - public EmptySequenceCacheException(String schemaName, String tableName) { - super(new SQLExceptionInfo.Builder(ERROR_CODE).setSchemaName(schemaName).setTableName(tableName).build().toString(), - ERROR_CODE.getSQLState(), ERROR_CODE.getErrorCode(), null); - } + public EmptySequenceCacheException(String schemaName, String tableName) { + super(new SQLExceptionInfo.Builder(ERROR_CODE).setSchemaName(schemaName).setTableName(tableName) + .build().toString(), ERROR_CODE.getSQLState(), ERROR_CODE.getErrorCode(), null); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ExecuteQueryNotApplicableException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ExecuteQueryNotApplicableException.java index 9e93d935ef6..b02c30a9845 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ExecuteQueryNotApplicableException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ExecuteQueryNotApplicableException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,19 +24,24 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; public class ExecuteQueryNotApplicableException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.EXECUTE_QUERY_NOT_APPLICABLE; - - public ExecuteQueryNotApplicableException(Operation op) { - super(new SQLExceptionInfo.Builder(code).setMessage("Disallowed operation: " + op.name()).build().toString(), code.getSQLState(), code.getErrorCode()); - } - - public ExecuteQueryNotApplicableException(String query) { - super(new SQLExceptionInfo.Builder(code).setMessage("Query: " + query).build().toString(), code.getSQLState(), code.getErrorCode()); - } + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.EXECUTE_QUERY_NOT_APPLICABLE; - public ExecuteQueryNotApplicableException(String command, String statement) { - super(new SQLExceptionInfo.Builder(code).setMessage("Command: " + command + ". Statement: " + statement).build().toString(), code.getSQLState(), code.getErrorCode()); - } + public ExecuteQueryNotApplicableException(Operation op) { + super(new SQLExceptionInfo.Builder(code).setMessage("Disallowed operation: " + op.name()) + .build().toString(), code.getSQLState(), code.getErrorCode()); + } + + public ExecuteQueryNotApplicableException(String query) { + super(new SQLExceptionInfo.Builder(code).setMessage("Query: " + query).build().toString(), + code.getSQLState(), code.getErrorCode()); + } + + public ExecuteQueryNotApplicableException(String command, String statement) { + super( + new SQLExceptionInfo.Builder(code) + .setMessage("Command: " + command + ". Statement: " + statement).build().toString(), + code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ExecuteUpdateNotApplicableException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ExecuteUpdateNotApplicableException.java index 8df1e062add..866f0a16287 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ExecuteUpdateNotApplicableException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ExecuteUpdateNotApplicableException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,18 +24,23 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; public class ExecuteUpdateNotApplicableException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.EXECUTE_UPDATE_NOT_APPLICABLE; - - public ExecuteUpdateNotApplicableException(Operation op) { - super(new SQLExceptionInfo.Builder(code).setMessage("Disallowed operation: " + op.name()).build().toString(), code.getSQLState(), code.getErrorCode()); - } + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.EXECUTE_UPDATE_NOT_APPLICABLE; - public ExecuteUpdateNotApplicableException(String query) { - super(new SQLExceptionInfo.Builder(code).setMessage("Query: " + query).build().toString(), code.getSQLState(), code.getErrorCode()); - } + public ExecuteUpdateNotApplicableException(Operation op) { + super(new SQLExceptionInfo.Builder(code).setMessage("Disallowed operation: " + op.name()) + .build().toString(), code.getSQLState(), code.getErrorCode()); + } - public ExecuteUpdateNotApplicableException(String command, String statement) { - super(new SQLExceptionInfo.Builder(code).setMessage("Command: " + command + ". Statement: " + statement).build().toString(), code.getSQLState(), code.getErrorCode()); - } + public ExecuteUpdateNotApplicableException(String query) { + super(new SQLExceptionInfo.Builder(code).setMessage("Query: " + query).build().toString(), + code.getSQLState(), code.getErrorCode()); + } + + public ExecuteUpdateNotApplicableException(String command, String statement) { + super( + new SQLExceptionInfo.Builder(code) + .setMessage("Command: " + command + ". Statement: " + statement).build().toString(), + code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/FunctionAlreadyExistsException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/FunctionAlreadyExistsException.java index 91b9d0705df..616f9547c72 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/FunctionAlreadyExistsException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/FunctionAlreadyExistsException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,35 +24,35 @@ import org.apache.phoenix.parse.PFunction; public class FunctionAlreadyExistsException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.FUNCTION_ALREADY_EXIST; - private final PFunction function; - private final String functionName; - - public FunctionAlreadyExistsException(String functionName) { - this(functionName, null, null); - } - - public FunctionAlreadyExistsException(String functionName, String msg) { - this(functionName, msg, null); - } - - public FunctionAlreadyExistsException(String functionName, PFunction function) { - this(functionName, null, function); - } - - public FunctionAlreadyExistsException(String functionName, String msg, PFunction function) { - super(new SQLExceptionInfo.Builder(code).setFunctionName(functionName).setMessage(msg).build().toString(), - code.getSQLState(), code.getErrorCode()); - this.functionName = functionName; - this.function = function; - } - - public String getFunctionName() { - return functionName; - } - - public PFunction getFunction() { - return function; - } + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.FUNCTION_ALREADY_EXIST; + private final PFunction function; + private final String functionName; + + public FunctionAlreadyExistsException(String functionName) { + this(functionName, null, null); + } + + public FunctionAlreadyExistsException(String functionName, String msg) { + this(functionName, msg, null); + } + + public FunctionAlreadyExistsException(String functionName, PFunction function) { + this(functionName, null, function); + } + + public FunctionAlreadyExistsException(String functionName, String msg, PFunction function) { + super(new SQLExceptionInfo.Builder(code).setFunctionName(functionName).setMessage(msg).build() + .toString(), code.getSQLState(), code.getErrorCode()); + this.functionName = functionName; + this.function = function; + } + + public String getFunctionName() { + return functionName; + } + + public PFunction getFunction() { + return function; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/FunctionNotFoundException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/FunctionNotFoundException.java index 09763cd1e97..4c2048ad4ce 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/FunctionNotFoundException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/FunctionNotFoundException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,31 +22,31 @@ import org.apache.phoenix.exception.SQLExceptionInfo; public class FunctionNotFoundException extends MetaDataEntityNotFoundException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.FUNCTION_UNDEFINED; - private final String functionName; - private final long timestamp; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.FUNCTION_UNDEFINED; + private final String functionName; + private final long timestamp; - public FunctionNotFoundException(FunctionNotFoundException e, long timestamp) { - this(e.functionName, timestamp); - } + public FunctionNotFoundException(FunctionNotFoundException e, long timestamp) { + this(e.functionName, timestamp); + } - public FunctionNotFoundException(String functionName) { - this(functionName, HConstants.LATEST_TIMESTAMP); - } - - public FunctionNotFoundException(String functionName, long timestamp) { - super(new SQLExceptionInfo.Builder(code).setFunctionName(functionName).build().toString(), - code.getSQLState(), code.getErrorCode(), null, null, null); - this.functionName = functionName; - this.timestamp = timestamp; - } + public FunctionNotFoundException(String functionName) { + this(functionName, HConstants.LATEST_TIMESTAMP); + } - public String getFunctionName() { - return functionName; - } + public FunctionNotFoundException(String functionName, long timestamp) { + super(new SQLExceptionInfo.Builder(code).setFunctionName(functionName).build().toString(), + code.getSQLState(), code.getErrorCode(), null, null, null); + this.functionName = functionName; + this.timestamp = timestamp; + } - public long getTimeStamp() { - return timestamp; - } + public String getFunctionName() { + return functionName; + } + + public long getTimeStamp() { + return timestamp; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IllegalDataException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IllegalDataException.java index 0053e51b683..ff48ffaab0e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IllegalDataException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IllegalDataException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,27 +21,23 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * * Exception thrown when an invalid or illegal data value is found - * - * * @since 0.1 */ public class IllegalDataException extends ConstraintViolationException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; + + public IllegalDataException() { + this((String) null); + } + + public IllegalDataException(String message) { + super(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA).setMessage(message).build() + .buildException()); + } - public IllegalDataException() { - this((String)null); - } - - public IllegalDataException(String message) { - super(new SQLExceptionInfo.Builder( - SQLExceptionCode.ILLEGAL_DATA).setMessage(message) - .build().buildException()); - } + public IllegalDataException(Throwable cause) { + super(cause); // Already wrapped - don't rewrap + } - public IllegalDataException(Throwable cause) { - super(cause); // Already wrapped - don't rewrap - } - } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IndexNotFoundException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IndexNotFoundException.java index 953933ee0f7..38242b9792b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IndexNotFoundException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IndexNotFoundException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,22 +22,22 @@ import org.apache.phoenix.util.SchemaUtil; public class IndexNotFoundException extends TableNotFoundException { - private static SQLExceptionCode code = SQLExceptionCode.INDEX_UNDEFINED; + private static SQLExceptionCode code = SQLExceptionCode.INDEX_UNDEFINED; - public IndexNotFoundException(IndexNotFoundException e, long timestamp) { - this(e.getSchemaName(),e.getTableName(), timestamp); - } + public IndexNotFoundException(IndexNotFoundException e, long timestamp) { + this(e.getSchemaName(), e.getTableName(), timestamp); + } - public IndexNotFoundException(String tableName) { - this(SchemaUtil.getSchemaNameFromFullName(tableName), - SchemaUtil.getTableNameFromFullName(tableName)); - } + public IndexNotFoundException(String tableName) { + this(SchemaUtil.getSchemaNameFromFullName(tableName), + SchemaUtil.getTableNameFromFullName(tableName)); + } - public IndexNotFoundException(String schemaName, String tableName) { - this(schemaName, tableName, HConstants.LATEST_TIMESTAMP); - } + public IndexNotFoundException(String schemaName, String tableName) { + this(schemaName, tableName, HConstants.LATEST_TIMESTAMP); + } - public IndexNotFoundException(String schemaName, String tableName, long timestamp) { - super(schemaName, tableName, timestamp, code, false); - } + public IndexNotFoundException(String schemaName, String tableName, long timestamp) { + super(schemaName, tableName, timestamp, code, false); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IndexUncoveredDataColumnRef.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IndexUncoveredDataColumnRef.java index 2bffdbb5c39..4ee65a98558 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IndexUncoveredDataColumnRef.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/IndexUncoveredDataColumnRef.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,62 +29,66 @@ import org.apache.phoenix.util.IndexUtil; /** - * Even when a column is not covered by an index table for a given query, we may still want to - * use index in the query plan and fetch the missing columns from the data table rows on the - * server side. This class is used to keep track of such data columns. + * Even when a column is not covered by an index table for a given query, we may still want to use + * index in the query plan and fetch the missing columns from the data table rows on the server + * side. This class is used to keep track of such data columns. */ public class IndexUncoveredDataColumnRef extends ColumnRef { - final private int position; - // Despite the final keyword, columns IS mutable, and must not be used for equality/hashCode - final private Set columns; - private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); + final private int position; + // Despite the final keyword, columns IS mutable, and must not be used for equality/hashCode + final private Set columns; + private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); - public IndexUncoveredDataColumnRef(StatementContext context, TableRef tRef, String indexColumnName) - throws MetaDataEntityNotFoundException, SQLException { - super(FromCompiler.getResolver( - FACTORY.namedTable( - null, - TableName.create(tRef.getTable().getSchemaName().getString(), tRef.getTable() - .getParentTableName().getString())), context.getConnection(), false) - .resolveTable(context.getCurrentTable().getTable().getSchemaName().getString(), - tRef.getTable().getParentTableName().getString()), - IndexUtil.getDataColumnFamilyName(indexColumnName), IndexUtil - .getDataColumnName(indexColumnName)); - position = context.getDataColumnPosition(this.getColumn()); - columns = context.getDataColumns(); - } + public IndexUncoveredDataColumnRef(StatementContext context, TableRef tRef, + String indexColumnName) throws MetaDataEntityNotFoundException, SQLException { + super( + FromCompiler + .getResolver(FACTORY.namedTable(null, + TableName.create(tRef.getTable().getSchemaName().getString(), + tRef.getTable().getParentTableName().getString())), + context.getConnection(), false) + .resolveTable(context.getCurrentTable().getTable().getSchemaName().getString(), + tRef.getTable().getParentTableName().getString()), + IndexUtil.getDataColumnFamilyName(indexColumnName), + IndexUtil.getDataColumnName(indexColumnName)); + position = context.getDataColumnPosition(this.getColumn()); + columns = context.getDataColumns(); + } - protected IndexUncoveredDataColumnRef(IndexUncoveredDataColumnRef indexDataColumnRef, long timestamp) { - super(indexDataColumnRef, timestamp); - this.position = indexDataColumnRef.position; - this.columns = indexDataColumnRef.columns; - } + protected IndexUncoveredDataColumnRef(IndexUncoveredDataColumnRef indexDataColumnRef, + long timestamp) { + super(indexDataColumnRef, timestamp); + this.position = indexDataColumnRef.position; + this.columns = indexDataColumnRef.columns; + } - @Override - public ColumnRef cloneAtTimestamp(long timestamp) { - return new IndexUncoveredDataColumnRef(this, timestamp); - } + @Override + public ColumnRef cloneAtTimestamp(long timestamp) { + return new IndexUncoveredDataColumnRef(this, timestamp); + } - @Override - public ColumnExpression newColumnExpression(boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) { - String displayName = this.getTableRef().getColumnDisplayName(this, schemaNameCaseSensitive, colNameCaseSensitive); - return new ProjectedColumnExpression(this.getColumn(), columns, position, displayName); - } + @Override + public ColumnExpression newColumnExpression(boolean schemaNameCaseSensitive, + boolean colNameCaseSensitive) { + String displayName = + this.getTableRef().getColumnDisplayName(this, schemaNameCaseSensitive, colNameCaseSensitive); + return new ProjectedColumnExpression(this.getColumn(), columns, position, displayName); + } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + position; - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + position; + return result; + } - @Override - public boolean equals(Object o) { - if (!super.equals(o)) { - return false; - } - IndexUncoveredDataColumnRef that = (IndexUncoveredDataColumnRef) o; - return position == that.position; + @Override + public boolean equals(Object o) { + if (!super.equals(o)) { + return false; } + IndexUncoveredDataColumnRef that = (IndexUncoveredDataColumnRef) o; + return position == that.position; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java index a8d6c7dce1a..0f4492b9fe8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,8 +19,6 @@ import java.util.List; -import net.jcip.annotations.Immutable; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableUtils; @@ -32,216 +30,212 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.util.ByteUtil; +import net.jcip.annotations.Immutable; /** - * - * Simple flat schema over a byte array where fields may be any of {@link org.apache.phoenix.schema.types.PDataType}. - * Optimized for positional access by index. - * - * + * Simple flat schema over a byte array where fields may be any of + * {@link org.apache.phoenix.schema.types.PDataType}. Optimized for positional access by index. * @since 0.1 */ @Immutable public class KeyValueSchema extends ValueSchema { - - public KeyValueSchema() { - } - - protected KeyValueSchema(int minNullable, List fields) { - super(minNullable, fields); - } - public static class KeyValueSchemaBuilder extends ValueSchemaBuilder { + public KeyValueSchema() { + } - public KeyValueSchemaBuilder(int minNullable) { - super(minNullable); - } - - @Override - public KeyValueSchema build() { - List condensedFields = buildFields(); - return new KeyValueSchema(this.minNullable, condensedFields); - } + protected KeyValueSchema(int minNullable, List fields) { + super(minNullable, fields); + } - @Override - public KeyValueSchemaBuilder setMaxFields(int nFields) { - super.setMaxFields(nFields); - return this; - } - - public KeyValueSchemaBuilder addField(PDatum datum) { - super.addField(datum, fields.size() >= this.minNullable, SortOrder.getDefault()); - return this; - } + public static class KeyValueSchemaBuilder extends ValueSchemaBuilder { + + public KeyValueSchemaBuilder(int minNullable) { + super(minNullable); } - - public boolean isNull(int position, ValueBitSet bitSet) { - int nBit = position - getMinNullable(); - return (nBit >= 0 && !bitSet.get(nBit)); + + @Override + public KeyValueSchema build() { + List condensedFields = buildFields(); + return new KeyValueSchema(this.minNullable, condensedFields); } - - private static byte[] ensureSize(byte[] b, int offset, int size) { - if (size > b.length) { - byte[] bBigger = new byte[Math.max(b.length * 2, size)]; - System.arraycopy(b, 0, bBigger, 0, b.length); - return bBigger; - } - return b; + + @Override + public KeyValueSchemaBuilder setMaxFields(int nFields) { + super.setMaxFields(nFields); + return this; } - /** - * @return byte representation of the KeyValueSchema - */ - public byte[] toBytes(Expression[] expressions, ValueBitSet valueSet, ImmutableBytesWritable ptr) { - return toBytes(null, expressions, valueSet, ptr); + public KeyValueSchemaBuilder addField(PDatum datum) { + super.addField(datum, fields.size() >= this.minNullable, SortOrder.getDefault()); + return this; } - - /** - * @return byte representation of the KeyValueSchema - */ - public byte[] toBytes(Tuple tuple, Expression[] expressions, ValueBitSet valueSet, ImmutableBytesWritable ptr) { - int offset = 0; - int index = 0; - valueSet.clear(); - int minNullableIndex = getMinNullable(); - byte[] b = new byte[getEstimatedValueLength() + valueSet.getEstimatedLength()]; - List fields = getFields(); - // We can get away with checking if only nulls are left in the outer loop, - // since repeating fields will not span the non-null/null boundary. - for (int i = 0; i < fields.size(); i++) { - Field field = fields.get(i); - PDataType type = field.getDataType(); - for (int j = 0; j < field.getCount(); j++) { - if (expressions[index].evaluate(tuple, ptr) && ptr.getLength() > 0) { // Skip null values - if (index >= minNullableIndex) { - valueSet.set(index - minNullableIndex); - } - if (!type.isFixedWidth()) { - b = ensureSize(b, offset, offset + getVarLengthBytes(ptr.getLength())); - offset = writeVarLengthField(ptr, b, offset); - } else { - int nBytes = ptr.getLength(); - b = ensureSize(b, offset, offset + nBytes); - System.arraycopy(ptr.get(), ptr.getOffset(), b, offset, nBytes); - offset += nBytes; - } - } - index++; - } - } - // Add information about which values were set at end of value, - // so that we can quickly access them without needing to walk - // through the values using the schema. - // TODO: if there aren't any non null values, don't serialize anything - b = ensureSize(b, offset, offset + valueSet.getEstimatedLength()); - offset = valueSet.toBytes(b, offset); - - if (offset == b.length) { - return b; - } else { - byte[] bExact = new byte[offset]; - System.arraycopy(b, 0, bExact, 0, offset); - return bExact; - } + } + + public boolean isNull(int position, ValueBitSet bitSet) { + int nBit = position - getMinNullable(); + return (nBit >= 0 && !bitSet.get(nBit)); + } + + private static byte[] ensureSize(byte[] b, int offset, int size) { + if (size > b.length) { + byte[] bBigger = new byte[Math.max(b.length * 2, size)]; + System.arraycopy(b, 0, bBigger, 0, b.length); + return bBigger; } + return b; + } - /** - * Extract value out of a cell encoded with {@link - * org.apache.phoenix.schema.PTable.ImmutableStorageScheme#SINGLE_CELL_ARRAY_WITH_OFFSETS} - * - * @param cell The cell, exepected to have an encoded value. - * @param expression The expression - * @param ptr The pointer in which the extracted value can be found, if successful. - * @return {@code true} on success. - */ - public boolean extractValue(Cell cell, SingleCellColumnExpression expression, - ImmutableBytesWritable ptr) { - ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - List fields = getFields(); - for (int i = 0; i < fields.size(); i++) { - Field field = fields.get(i); - for (int j = 0; j < field.getCount(); j++) { - if (expression.evaluate(ptr) && ptr.getLength() > 0) { - return true; - } - } + /** Returns byte representation of the KeyValueSchema */ + public byte[] toBytes(Expression[] expressions, ValueBitSet valueSet, + ImmutableBytesWritable ptr) { + return toBytes(null, expressions, valueSet, ptr); + } + + /** Returns byte representation of the KeyValueSchema */ + public byte[] toBytes(Tuple tuple, Expression[] expressions, ValueBitSet valueSet, + ImmutableBytesWritable ptr) { + int offset = 0; + int index = 0; + valueSet.clear(); + int minNullableIndex = getMinNullable(); + byte[] b = new byte[getEstimatedValueLength() + valueSet.getEstimatedLength()]; + List fields = getFields(); + // We can get away with checking if only nulls are left in the outer loop, + // since repeating fields will not span the non-null/null boundary. + for (int i = 0; i < fields.size(); i++) { + Field field = fields.get(i); + PDataType type = field.getDataType(); + for (int j = 0; j < field.getCount(); j++) { + if (expressions[index].evaluate(tuple, ptr) && ptr.getLength() > 0) { // Skip null values + if (index >= minNullableIndex) { + valueSet.set(index - minNullableIndex); + } + if (!type.isFixedWidth()) { + b = ensureSize(b, offset, offset + getVarLengthBytes(ptr.getLength())); + offset = writeVarLengthField(ptr, b, offset); + } else { + int nBytes = ptr.getLength(); + b = ensureSize(b, offset, offset + nBytes); + System.arraycopy(ptr.get(), ptr.getOffset(), b, offset, nBytes); + offset += nBytes; + } } - return false; + index++; + } } + // Add information about which values were set at end of value, + // so that we can quickly access them without needing to walk + // through the values using the schema. + // TODO: if there aren't any non null values, don't serialize anything + b = ensureSize(b, offset, offset + valueSet.getEstimatedLength()); + offset = valueSet.toBytes(b, offset); - private int getVarLengthBytes(int length) { - return length + WritableUtils.getVIntSize(length); - } - - private int writeVarLengthField(ImmutableBytesWritable ptr, byte[] b, int offset) { - int length = ptr.getLength(); - offset += ByteUtil.vintToBytes(b, offset, length); - System.arraycopy(ptr.get(), ptr.getOffset(), b, offset, length); - offset += length; - return offset; + if (offset == b.length) { + return b; + } else { + byte[] bExact = new byte[offset]; + System.arraycopy(b, 0, bExact, 0, offset); + return bExact; } + } - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="NP_BOOLEAN_RETURN_NULL", - justification="Designed to return null.") - public Boolean iterator(byte[] src, int srcOffset, int srcLength, ImmutableBytesWritable ptr, int position, ValueBitSet valueBitSet) { - ptr.set(src, srcOffset, 0); - int maxOffset = srcOffset + srcLength; - Boolean hasValue = null; - for (int i = 0; i < position; i++) { - hasValue = next(ptr, i, maxOffset, valueBitSet); + /** + * Extract value out of a cell encoded with + * {@link org.apache.phoenix.schema.PTable.ImmutableStorageScheme#SINGLE_CELL_ARRAY_WITH_OFFSETS} + * @param cell The cell, exepected to have an encoded value. + * @param expression The expression + * @param ptr The pointer in which the extracted value can be found, if successful. + * @return {@code true} on success. + */ + public boolean extractValue(Cell cell, SingleCellColumnExpression expression, + ImmutableBytesWritable ptr) { + ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + List fields = getFields(); + for (int i = 0; i < fields.size(); i++) { + Field field = fields.get(i); + for (int j = 0; j < field.getCount(); j++) { + if (expression.evaluate(ptr) && ptr.getLength() > 0) { + return true; } - return hasValue; + } } - - public Boolean iterator(ImmutableBytesWritable srcPtr, ImmutableBytesWritable ptr, int position, ValueBitSet valueSet) { - return iterator(srcPtr.get(),srcPtr.getOffset(),srcPtr.getLength(), ptr, position, valueSet); + return false; + } + + private int getVarLengthBytes(int length) { + return length + WritableUtils.getVIntSize(length); + } + + private int writeVarLengthField(ImmutableBytesWritable ptr, byte[] b, int offset) { + int length = ptr.getLength(); + offset += ByteUtil.vintToBytes(b, offset, length); + System.arraycopy(ptr.get(), ptr.getOffset(), b, offset, length); + offset += length; + return offset; + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_BOOLEAN_RETURN_NULL", + justification = "Designed to return null.") + public Boolean iterator(byte[] src, int srcOffset, int srcLength, ImmutableBytesWritable ptr, + int position, ValueBitSet valueBitSet) { + ptr.set(src, srcOffset, 0); + int maxOffset = srcOffset + srcLength; + Boolean hasValue = null; + for (int i = 0; i < position; i++) { + hasValue = next(ptr, i, maxOffset, valueBitSet); } - - public Boolean iterator(ImmutableBytesWritable ptr, int position, ValueBitSet valueSet) { - return iterator(ptr, ptr, position, valueSet); + return hasValue; + } + + public Boolean iterator(ImmutableBytesWritable srcPtr, ImmutableBytesWritable ptr, int position, + ValueBitSet valueSet) { + return iterator(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength(), ptr, position, valueSet); + } + + public Boolean iterator(ImmutableBytesWritable ptr, int position, ValueBitSet valueSet) { + return iterator(ptr, ptr, position, valueSet); + } + + public Boolean iterator(ImmutableBytesWritable ptr) { + return iterator(ptr, ptr, 0, ValueBitSet.EMPTY_VALUE_BITSET); + } + + /** + * Move the bytes ptr to the next position relative to the current ptr + * @param ptr bytes pointer pointing to the value at the positional index provided. + * @param position zero-based index of the next field in the value schema + * @param maxOffset max possible offset value when iterating + * @return true if a value was found and ptr was set, false if the value is null and ptr was not + * set, and null if the value is null and there are no more values + */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_BOOLEAN_RETURN_NULL", + justification = "Designed to return null.") + public Boolean next(ImmutableBytesWritable ptr, int position, int maxOffset, + ValueBitSet valueSet) { + if (ptr.getOffset() + ptr.getLength() >= maxOffset) { + ptr.set(ptr.get(), maxOffset, 0); + return null; } - - public Boolean iterator(ImmutableBytesWritable ptr) { - return iterator(ptr, ptr, 0, ValueBitSet.EMPTY_VALUE_BITSET); + if (position >= getFieldCount()) { + return null; } - - /** - * Move the bytes ptr to the next position relative to the current ptr - * @param ptr bytes pointer pointing to the value at the positional index - * provided. - * @param position zero-based index of the next field in the value schema - * @param maxOffset max possible offset value when iterating - * @return true if a value was found and ptr was set, false if the value is null and ptr was not - * set, and null if the value is null and there are no more values - */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="NP_BOOLEAN_RETURN_NULL", - justification="Designed to return null.") - public Boolean next(ImmutableBytesWritable ptr, int position, int maxOffset, ValueBitSet valueSet) { - if (ptr.getOffset() + ptr.getLength() >= maxOffset) { - ptr.set(ptr.get(), maxOffset, 0); - return null; - } - if (position >= getFieldCount()) { - return null; - } - // Move the pointer past the current value and set length - // to 0 to ensure you never set the ptr past the end of the - // backing byte array. - ptr.set(ptr.get(), ptr.getOffset() + ptr.getLength(), 0); - if (!isNull(position, valueSet)) { - Field field = this.getField(position); - int length = field.getDataType().isFixedWidth() ? - field.getByteSize() : ByteUtil.vintFromBytes(ptr); - if (ptr.getOffset() + length > maxOffset) { - throw new RuntimeException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) - .setMessage("Expected length of at least " + length + " bytes, but had " + (maxOffset - - ptr.getOffset())).build().buildException()); - } - ptr.set(ptr.get(),ptr.getOffset(),length); - return ptr.getLength() > 0; - } - return false; + // Move the pointer past the current value and set length + // to 0 to ensure you never set the ptr past the end of the + // backing byte array. + ptr.set(ptr.get(), ptr.getOffset() + ptr.getLength(), 0); + if (!isNull(position, valueSet)) { + Field field = this.getField(position); + int length = + field.getDataType().isFixedWidth() ? field.getByteSize() : ByteUtil.vintFromBytes(ptr); + if (ptr.getOffset() + length > maxOffset) { + throw new RuntimeException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) + .setMessage("Expected length of at least " + length + " bytes, but had " + + (maxOffset - ptr.getOffset())) + .build().buildException()); + } + ptr.set(ptr.get(), ptr.getOffset(), length); + return ptr.getLength() > 0; } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxMutationSizeBytesExceededException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxMutationSizeBytesExceededException.java index 95f25bd6970..abbd3aa1011 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxMutationSizeBytesExceededException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxMutationSizeBytesExceededException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,33 +17,30 @@ */ package org.apache.phoenix.schema; +import java.sql.SQLException; + import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.exception.SQLExceptionInfo.Builder; -import java.sql.SQLException; - /** - * - * Exception thrown when MutationState size is bigger than - * maximum allowed number of Bytes - * + * Exception thrown when MutationState size is bigger than maximum allowed number of Bytes */ public class MaxMutationSizeBytesExceededException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED; - public MaxMutationSizeBytesExceededException() { - super(new Builder(code).build().toString(), code.getSQLState(), code.getErrorCode(), null); - } + public MaxMutationSizeBytesExceededException() { + super(new Builder(code).build().toString(), code.getSQLState(), code.getErrorCode(), null); + } - public MaxMutationSizeBytesExceededException(long maxMutationSizeBytes, - long mutationSizeBytes) { - super(new SQLExceptionInfo.Builder(code).setMaxMutationSizeBytes(maxMutationSizeBytes) - .setMutationSizeBytes(mutationSizeBytes).build().toString(), - code.getSQLState(), code.getErrorCode(), null); - } + public MaxMutationSizeBytesExceededException(long maxMutationSizeBytes, long mutationSizeBytes) { + super( + new SQLExceptionInfo.Builder(code).setMaxMutationSizeBytes(maxMutationSizeBytes) + .setMutationSizeBytes(mutationSizeBytes).build().toString(), + code.getSQLState(), code.getErrorCode(), null); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxMutationSizeExceededException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxMutationSizeExceededException.java index 9fcf8c22a51..2429f5d14eb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxMutationSizeExceededException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxMutationSizeExceededException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,29 +17,28 @@ */ package org.apache.phoenix.schema; +import java.sql.SQLException; + import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; -import java.sql.SQLException; /** - * - * Exception thrown when MutationState size is bigger than - * maximum allowed number of rows - * + * Exception thrown when MutationState size is bigger than maximum allowed number of rows */ public class MaxMutationSizeExceededException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED; - public MaxMutationSizeExceededException() { - super(new SQLExceptionInfo.Builder(code).build().toString(), code.getSQLState(), - code.getErrorCode(), null); - } + public MaxMutationSizeExceededException() { + super(new SQLExceptionInfo.Builder(code).build().toString(), code.getSQLState(), + code.getErrorCode(), null); + } - public MaxMutationSizeExceededException(int maxMutationSize, int mutationSize) { - super(new SQLExceptionInfo.Builder(code).setMaxMutationSize(maxMutationSize) - .setMutationSize(mutationSize).build().toString(), - code.getSQLState(), code.getErrorCode(), null); - } + public MaxMutationSizeExceededException(int maxMutationSize, int mutationSize) { + super( + new SQLExceptionInfo.Builder(code).setMaxMutationSize(maxMutationSize) + .setMutationSize(mutationSize).build().toString(), + code.getSQLState(), code.getErrorCode(), null); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxPhoenixColumnSizeExceededException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxPhoenixColumnSizeExceededException.java index 64e01d5eaee..494ef66c236 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxPhoenixColumnSizeExceededException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MaxPhoenixColumnSizeExceededException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,30 +17,30 @@ */ package org.apache.phoenix.schema; +import java.sql.SQLException; + import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; -import java.sql.SQLException; - /** - * - * Exception thrown when MutationState row Column Cell size is bigger than - * maximum allowed number - * + * Exception thrown when MutationState row Column Cell size is bigger than maximum allowed number */ -public class MaxPhoenixColumnSizeExceededException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.MAX_HBASE_CLIENT_KEYVALUE_MAXSIZE_EXCEEDED; +public class MaxPhoenixColumnSizeExceededException extends SQLException { + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = + SQLExceptionCode.MAX_HBASE_CLIENT_KEYVALUE_MAXSIZE_EXCEEDED; - public MaxPhoenixColumnSizeExceededException() { - super(new SQLExceptionInfo.Builder(code).build().toString(), code.getSQLState(), code.getErrorCode(), null); - } + public MaxPhoenixColumnSizeExceededException() { + super(new SQLExceptionInfo.Builder(code).build().toString(), code.getSQLState(), + code.getErrorCode(), null); + } - public MaxPhoenixColumnSizeExceededException(String columnInfo, int maxMutationCellSizeBytes, - int mutationCellSizeBytes) { - super(new SQLExceptionInfo.Builder(code).setMaxPhoenixColumnSizeBytes(maxMutationCellSizeBytes) - .setPhoenixColumnSizeBytes(mutationCellSizeBytes).build().toString() + ". " + columnInfo, - code.getSQLState(), code.getErrorCode(), null); - } -} \ No newline at end of file + public MaxPhoenixColumnSizeExceededException(String columnInfo, int maxMutationCellSizeBytes, + int mutationCellSizeBytes) { + super( + new SQLExceptionInfo.Builder(code).setMaxPhoenixColumnSizeBytes(maxMutationCellSizeBytes) + .setPhoenixColumnSizeBytes(mutationCellSizeBytes).build().toString() + ". " + columnInfo, + code.getSQLState(), code.getErrorCode(), null); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java index 6851b6fe31d..c5e8a76da01 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,22 +17,14 @@ */ package org.apache.phoenix.schema; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.RUN_UPDATE_STATS_ASYNC_ATTRIB; +import static org.apache.phoenix.coprocessorclient.tasks.IndexRebuildTaskConstants.INDEX_NAME; +import static org.apache.phoenix.coprocessorclient.tasks.IndexRebuildTaskConstants.REBUILD_ALL; import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_TRANSFORM_TRANSACTIONAL_TABLE; import static org.apache.phoenix.exception.SQLExceptionCode.ERROR_WRITING_TO_SCHEMA_REGISTRY; -import static org.apache.phoenix.exception.SQLExceptionCode.TABLE_ALREADY_EXIST; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CDC_INCLUDE_TABLE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STREAMING_TOPIC_NAME; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_TASK_TABLE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL; -import static org.apache.phoenix.query.QueryConstants.SPLITS_FILE; -import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME; -import static org.apache.phoenix.query.QueryServices.INDEX_CREATE_DEFAULT_STATE; -import static org.apache.phoenix.schema.PTableType.CDC; -import static org.apache.phoenix.thirdparty.com.google.common.collect.Sets.newLinkedHashSet; -import static org.apache.phoenix.thirdparty.com.google.common.collect.Sets.newLinkedHashSetWithExpectedSize; import static org.apache.phoenix.exception.SQLExceptionCode.INSUFFICIENT_MULTI_TENANT_COLUMNS; import static org.apache.phoenix.exception.SQLExceptionCode.PARENT_TABLE_NOT_FOUND; +import static org.apache.phoenix.exception.SQLExceptionCode.TABLE_ALREADY_EXIST; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE; @@ -40,6 +32,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BASE_COLUMN_COUNT; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CDC_INCLUDE_TABLE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT; @@ -72,6 +65,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT; @@ -88,12 +82,14 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCHEMA_VERSION; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORE_NULLS; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STREAMING_TOPIC_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYNC_INDEX_CREATED_DATE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAMESPACE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_TASK_TABLE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM; @@ -101,6 +97,8 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTIONAL; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTION_PROVIDER; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION; @@ -108,17 +106,16 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE; -import static org.apache.phoenix.query.QueryServices.DEFAULT_DISABLE_VIEW_SUBTREE_VALIDATION; -import static org.apache.phoenix.query.QueryServices.DISABLE_VIEW_SUBTREE_VALIDATION; -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.RUN_UPDATE_STATS_ASYNC_ATTRIB; -import static org.apache.phoenix.coprocessorclient.tasks.IndexRebuildTaskConstants.INDEX_NAME; -import static org.apache.phoenix.coprocessorclient.tasks.IndexRebuildTaskConstants.REBUILD_ALL; import static org.apache.phoenix.monitoring.MetricType.NUM_METADATA_LOOKUP_FAILURES; import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT; import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY; import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; +import static org.apache.phoenix.query.QueryConstants.SPLITS_FILE; +import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME; +import static org.apache.phoenix.query.QueryServices.DEFAULT_DISABLE_VIEW_SUBTREE_VALIDATION; +import static org.apache.phoenix.query.QueryServices.DISABLE_VIEW_SUBTREE_VALIDATION; import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB; +import static org.apache.phoenix.query.QueryServices.INDEX_CREATE_DEFAULT_STATE; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_UPDATE_STATS_ASYNC; import static org.apache.phoenix.schema.ColumnMetaDataOps.addColumnMutation; @@ -129,11 +126,14 @@ import static org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; import static org.apache.phoenix.schema.PTable.ViewType.MAPPED; import static org.apache.phoenix.schema.PTable.ViewType.UPDATABLE; +import static org.apache.phoenix.schema.PTableType.CDC; import static org.apache.phoenix.schema.PTableType.INDEX; import static org.apache.phoenix.schema.PTableType.TABLE; import static org.apache.phoenix.schema.PTableType.VIEW; import static org.apache.phoenix.schema.types.PDataType.FALSE_BYTES; import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; +import static org.apache.phoenix.thirdparty.com.google.common.collect.Sets.newLinkedHashSet; +import static org.apache.phoenix.thirdparty.com.google.common.collect.Sets.newLinkedHashSetWithExpectedSize; import java.io.BufferedReader; import java.io.File; @@ -156,29 +156,19 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Objects; import java.util.Properties; import java.util.Set; -import java.util.HashSet; -import java.util.Objects; import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.expression.function.PhoenixRowTimestampFunction; -import org.apache.phoenix.parse.CreateCDCStatement; -import org.apache.phoenix.parse.DropCDCStatement; -import org.apache.hadoop.hbase.client.Table; -import org.apache.phoenix.coprocessorclient.TableInfo; -import org.apache.phoenix.query.ConnectionlessQueryServicesImpl; -import org.apache.phoenix.query.DelegateQueryServices; -import org.apache.phoenix.query.ConnectionQueryServicesImpl; -import org.apache.phoenix.schema.task.SystemTaskParams; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; @@ -188,6 +178,7 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.access.AccessControlClient; @@ -210,20 +201,14 @@ import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MetaDataMutationResult; import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MutationCode; import org.apache.phoenix.coprocessorclient.MetaDataProtocol.SharedTableState; -import org.apache.phoenix.schema.stats.GuidePostsInfo; -import org.apache.phoenix.schema.transform.TransformClient; -import org.apache.phoenix.util.ClientUtil; -import org.apache.phoenix.util.CDCUtil; -import org.apache.phoenix.util.TaskMetaDataServiceCallBack; -import org.apache.phoenix.util.ValidateLastDDLTimestampUtil; -import org.apache.phoenix.util.ViewUtil; -import org.apache.phoenix.util.JacksonUtil; +import org.apache.phoenix.coprocessorclient.TableInfo; import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.execute.MutationState; import org.apache.phoenix.expression.Determinism; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.RowKeyColumnExpression; +import org.apache.phoenix.expression.function.PhoenixRowTimestampFunction; import org.apache.phoenix.hbase.index.covered.update.ColumnReference; import org.apache.phoenix.index.IndexMaintainer; import org.apache.phoenix.jdbc.PhoenixConnection; @@ -237,12 +222,14 @@ import org.apache.phoenix.parse.ColumnDef; import org.apache.phoenix.parse.ColumnDefInPkConstraint; import org.apache.phoenix.parse.ColumnName; +import org.apache.phoenix.parse.CreateCDCStatement; import org.apache.phoenix.parse.CreateFunctionStatement; import org.apache.phoenix.parse.CreateIndexStatement; import org.apache.phoenix.parse.CreateSchemaStatement; import org.apache.phoenix.parse.CreateSequenceStatement; import org.apache.phoenix.parse.CreateTableStatement; import org.apache.phoenix.parse.DeclareCursorStatement; +import org.apache.phoenix.parse.DropCDCStatement; import org.apache.phoenix.parse.DropColumnStatement; import org.apache.phoenix.parse.DropFunctionStatement; import org.apache.phoenix.parse.DropIndexStatement; @@ -264,6 +251,9 @@ import org.apache.phoenix.parse.UseSchemaStatement; import org.apache.phoenix.query.ConnectionQueryServices; import org.apache.phoenix.query.ConnectionQueryServices.Feature; +import org.apache.phoenix.query.ConnectionQueryServicesImpl; +import org.apache.phoenix.query.ConnectionlessQueryServicesImpl; +import org.apache.phoenix.query.DelegateQueryServices; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; @@ -274,9 +264,12 @@ import org.apache.phoenix.schema.PTable.QualifierEncodingScheme; import org.apache.phoenix.schema.PTable.QualifierEncodingScheme.QualifierOutOfRangeException; import org.apache.phoenix.schema.PTable.ViewType; +import org.apache.phoenix.schema.stats.GuidePostsInfo; import org.apache.phoenix.schema.stats.GuidePostsKey; import org.apache.phoenix.schema.stats.StatisticsUtil; +import org.apache.phoenix.schema.task.SystemTaskParams; import org.apache.phoenix.schema.task.Task; +import org.apache.phoenix.schema.transform.TransformClient; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.schema.types.PLong; @@ -284,15 +277,26 @@ import org.apache.phoenix.schema.types.PUnsignedLong; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import org.apache.phoenix.thirdparty.com.google.common.primitives.Ints; import org.apache.phoenix.transaction.PhoenixTransactionContext; import org.apache.phoenix.transaction.PhoenixTransactionProvider; import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.transaction.TransactionFactory.Provider; import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.util.CDCUtil; +import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.CursorUtil; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.IndexUtil; +import org.apache.phoenix.util.JacksonUtil; import org.apache.phoenix.util.LogUtil; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.PhoenixRuntime; @@ -301,6305 +305,6492 @@ import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.StringUtil; +import org.apache.phoenix.util.TaskMetaDataServiceCallBack; import org.apache.phoenix.util.TransactionUtil; import org.apache.phoenix.util.UpgradeUtil; +import org.apache.phoenix.util.ValidateLastDDLTimestampUtil; +import org.apache.phoenix.util.ViewUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; -import org.apache.phoenix.thirdparty.com.google.common.primitives.Ints; - public class MetaDataClient { - private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataClient.class); - - private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); - private static final String SET_ASYNC_CREATED_DATE = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - ASYNC_CREATED_DATE + " " + PDate.INSTANCE.getSqlTypeName() + - ") VALUES (?, ?, ?, ?)"; - - private static final String SET_INDEX_SYNC_CREATED_DATE = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - SYNC_INDEX_CREATED_DATE + " " + PDate.INSTANCE.getSqlTypeName() + - ") VALUES (?, ?, ?, ?)"; - - private static final String CREATE_TABLE = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - TABLE_TYPE + "," + - TABLE_SEQ_NUM + "," + - COLUMN_COUNT + "," + - SALT_BUCKETS + "," + - PK_NAME + "," + - DATA_TABLE_NAME + "," + - INDEX_STATE + "," + - IMMUTABLE_ROWS + "," + - DEFAULT_COLUMN_FAMILY_NAME + "," + - VIEW_STATEMENT + "," + - DISABLE_WAL + "," + - MULTI_TENANT + "," + - VIEW_TYPE + "," + - INDEX_TYPE + "," + - STORE_NULLS + "," + - BASE_COLUMN_COUNT + "," + - TRANSACTION_PROVIDER + "," + - UPDATE_CACHE_FREQUENCY + "," + - IS_NAMESPACE_MAPPED + "," + - AUTO_PARTITION_SEQ + "," + - APPEND_ONLY_SCHEMA + "," + - GUIDE_POSTS_WIDTH + "," + - IMMUTABLE_STORAGE_SCHEME + "," + - ENCODING_SCHEME + "," + - USE_STATS_FOR_PARALLELIZATION +"," + - VIEW_INDEX_ID_DATA_TYPE +"," + - CHANGE_DETECTION_ENABLED + "," + - PHYSICAL_TABLE_NAME + "," + - SCHEMA_VERSION + "," + - STREAMING_TOPIC_NAME + "," + - INDEX_WHERE + "," + - MAX_LOOKBACK_AGE + "," + - CDC_INCLUDE_TABLE + "," + - TTL + "," + - ROW_KEY_MATCHER + - ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, " + - "?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - - private static final String CREATE_SCHEMA = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE - + "\"( " + TABLE_SCHEM + "," + TABLE_NAME + ") VALUES (?,?)"; - - public static final String CREATE_LINK = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_FAMILY + "," + - LINK_TYPE + "," + - TABLE_SEQ_NUM +","+ // this is actually set to the parent table's sequence number - TABLE_TYPE + - ") VALUES (?, ?, ?, ?, ?, ?, ?)"; - - - public static final String CREATE_VIEW_LINK = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_FAMILY + "," + - LINK_TYPE + "," + - PARENT_TENANT_ID + " " + PVarchar.INSTANCE.getSqlTypeName() + // Dynamic column for now to prevent schema change - ") VALUES (?, ?, ?, ?, ?, ?)"; - - public static final String UPDATE_ENCODED_COLUMN_COUNTER = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + ", " + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_FAMILY + "," + - COLUMN_QUALIFIER_COUNTER + - ") VALUES (?, ?, ?, ?, ?)"; - - private static final String CREATE_CHILD_LINK = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_NAME + "," + - COLUMN_FAMILY + "," + - LINK_TYPE + - ") VALUES (?, ?, ?, ?, ?, ?)"; - - private static final String CREATE_VIEW_INDEX_PARENT_LINK = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_FAMILY + "," + - LINK_TYPE + - ") VALUES (?, ?, ?, ?, ?)"; - - private static final String INCREMENT_SEQ_NUM = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - TABLE_SEQ_NUM + - ") VALUES (?, ?, ?, ?)"; - public static final String MUTATE_TABLE = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - TABLE_TYPE + "," + - TABLE_SEQ_NUM + "," + - COLUMN_COUNT + - ") VALUES (?, ?, ?, ?, ?, ?)"; - public static final String UPDATE_INDEX_STATE = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - INDEX_STATE + "," + - ASYNC_REBUILD_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName() + - ") VALUES (?, ?, ?, ?, ?)"; - - private static final String UPDATE_INDEX_REBUILD_ASYNC_STATE = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - ASYNC_REBUILD_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName() + - ") VALUES (?, ?, ?, ?)"; - - public static final String UPDATE_INDEX_STATE_TO_ACTIVE = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - INDEX_STATE + "," + - INDEX_DISABLE_TIMESTAMP +","+ - ASYNC_REBUILD_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName() + - ") VALUES (?, ?, ?, ?, ?, ?)"; - - /* - * Custom sql to add a column to SYSTEM.CATALOG table during upgrade. - * We can't use the regular ColumnMetaDataOps.UPSERT_COLUMN sql because the COLUMN_QUALIFIER column - * was added in 4.10. And so if upgrading from let's say 4.7, we won't be able to - * find the COLUMN_QUALIFIER column which the INSERT_COLUMN_ALTER_TABLE sql expects. - */ - public static final String ALTER_SYSCATALOG_TABLE_UPGRADE = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_NAME + "," + - COLUMN_FAMILY + "," + - DATA_TYPE + "," + - NULLABLE + "," + - COLUMN_SIZE + "," + - DECIMAL_DIGITS + "," + - ORDINAL_POSITION + "," + - SORT_ORDER + "," + - DATA_TABLE_NAME + "," + // write this both in the column and table rows for access by metadata APIs - ARRAY_SIZE + "," + - VIEW_CONSTANT + "," + - IS_VIEW_REFERENCED + "," + - PK_NAME + "," + // write this both in the column and table rows for access by metadata APIs - KEY_SEQ + "," + - COLUMN_DEF + - ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - - private static final String UPDATE_COLUMN_POSITION = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\" ( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_NAME + "," + - COLUMN_FAMILY + "," + - ORDINAL_POSITION + - ") VALUES (?, ?, ?, ?, ?, ?)"; - private static final String CREATE_FUNCTION = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_FUNCTION_TABLE + "\" ( " + - TENANT_ID +","+ - FUNCTION_NAME + "," + - NUM_ARGS + "," + - CLASS_NAME + "," + - JAR_PATH + "," + - RETURN_TYPE + - ") VALUES (?, ?, ?, ?, ?, ?)"; - private static final String INSERT_FUNCTION_ARGUMENT = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_FUNCTION_TABLE + "\" ( " + - TENANT_ID +","+ - FUNCTION_NAME + "," + - TYPE + "," + - ARG_POSITION +","+ - IS_ARRAY + "," + - IS_CONSTANT + "," + - DEFAULT_VALUE + "," + - MIN_VALUE + "," + - MAX_VALUE + - ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"; - - public static final String EMPTY_TABLE = " "; - - private final PhoenixConnection connection; - - public MetaDataClient(PhoenixConnection connection) { - this.connection = connection; - } - - public PhoenixConnection getConnection() { - return connection; - } - - public long getCurrentTime(String schemaName, String tableName) throws SQLException { - MetaDataMutationResult result = updateCache(schemaName, tableName, true); - return result.getMutationTime(); - } - - /** - * Update the cache with the latest as of the connection scn. - * @param schemaName - * @param tableName - * @return the timestamp from the server, negative if the table was added to the cache and positive otherwise - * @throws SQLException - */ - public MetaDataMutationResult updateCache(String schemaName, String tableName) throws SQLException { - return updateCache(schemaName, tableName, false); - } - - public MetaDataMutationResult updateCache(String schemaName, String tableName, - boolean alwaysHitServer) throws SQLException { - return updateCache(connection.getTenantId(), schemaName, tableName, alwaysHitServer); + private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataClient.class); + + private static final ParseNodeFactory FACTORY = new ParseNodeFactory(); + private static final String SET_ASYNC_CREATED_DATE = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + ASYNC_CREATED_DATE + " " + PDate.INSTANCE.getSqlTypeName() + ") VALUES (?, ?, ?, ?)"; + + private static final String SET_INDEX_SYNC_CREATED_DATE = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + SYNC_INDEX_CREATED_DATE + " " + PDate.INSTANCE.getSqlTypeName() + ") VALUES (?, ?, ?, ?)"; + + private static final String CREATE_TABLE = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + TABLE_TYPE + "," + TABLE_SEQ_NUM + "," + COLUMN_COUNT + "," + SALT_BUCKETS + "," + PK_NAME + + "," + DATA_TABLE_NAME + "," + INDEX_STATE + "," + IMMUTABLE_ROWS + "," + + DEFAULT_COLUMN_FAMILY_NAME + "," + VIEW_STATEMENT + "," + DISABLE_WAL + "," + MULTI_TENANT + + "," + VIEW_TYPE + "," + INDEX_TYPE + "," + STORE_NULLS + "," + BASE_COLUMN_COUNT + "," + + TRANSACTION_PROVIDER + "," + UPDATE_CACHE_FREQUENCY + "," + IS_NAMESPACE_MAPPED + "," + + AUTO_PARTITION_SEQ + "," + APPEND_ONLY_SCHEMA + "," + GUIDE_POSTS_WIDTH + "," + + IMMUTABLE_STORAGE_SCHEME + "," + ENCODING_SCHEME + "," + USE_STATS_FOR_PARALLELIZATION + "," + + VIEW_INDEX_ID_DATA_TYPE + "," + CHANGE_DETECTION_ENABLED + "," + PHYSICAL_TABLE_NAME + "," + + SCHEMA_VERSION + "," + STREAMING_TOPIC_NAME + "," + INDEX_WHERE + "," + MAX_LOOKBACK_AGE + "," + + CDC_INCLUDE_TABLE + "," + TTL + "," + ROW_KEY_MATCHER + + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, " + + "?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + private static final String CREATE_SCHEMA = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_CATALOG_TABLE + "\"( " + TABLE_SCHEM + "," + TABLE_NAME + ") VALUES (?,?)"; + + public static final String CREATE_LINK = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + COLUMN_FAMILY + "," + LINK_TYPE + "," + TABLE_SEQ_NUM + "," + // this is actually set to the + // parent table's sequence + // number + TABLE_TYPE + ") VALUES (?, ?, ?, ?, ?, ?, ?)"; + + public static final String CREATE_VIEW_LINK = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_FAMILY + "," + LINK_TYPE + "," + + PARENT_TENANT_ID + " " + PVarchar.INSTANCE.getSqlTypeName() + // Dynamic column for now to + // prevent schema change + ") VALUES (?, ?, ?, ?, ?, ?)"; + + public static final String UPDATE_ENCODED_COLUMN_COUNTER = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + ", " + TABLE_SCHEM + "," + TABLE_NAME + + "," + COLUMN_FAMILY + "," + COLUMN_QUALIFIER_COUNTER + ") VALUES (?, ?, ?, ?, ?)"; + + private static final String CREATE_CHILD_LINK = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + COLUMN_NAME + "," + COLUMN_FAMILY + "," + LINK_TYPE + ") VALUES (?, ?, ?, ?, ?, ?)"; + + private static final String CREATE_VIEW_INDEX_PARENT_LINK = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + COLUMN_FAMILY + "," + LINK_TYPE + ") VALUES (?, ?, ?, ?, ?)"; + + private static final String INCREMENT_SEQ_NUM = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + + TABLE_SCHEM + "," + TABLE_NAME + "," + TABLE_SEQ_NUM + ") VALUES (?, ?, ?, ?)"; + public static final String MUTATE_TABLE = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + TABLE_TYPE + "," + TABLE_SEQ_NUM + "," + COLUMN_COUNT + ") VALUES (?, ?, ?, ?, ?, ?)"; + public static final String UPDATE_INDEX_STATE = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + + TABLE_SCHEM + "," + TABLE_NAME + "," + INDEX_STATE + "," + ASYNC_REBUILD_TIMESTAMP + " " + + PLong.INSTANCE.getSqlTypeName() + ") VALUES (?, ?, ?, ?, ?)"; + + private static final String UPDATE_INDEX_REBUILD_ASYNC_STATE = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + + TABLE_SCHEM + "," + TABLE_NAME + "," + ASYNC_REBUILD_TIMESTAMP + " " + + PLong.INSTANCE.getSqlTypeName() + ") VALUES (?, ?, ?, ?)"; + + public static final String UPDATE_INDEX_STATE_TO_ACTIVE = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + INDEX_STATE + "," + INDEX_DISABLE_TIMESTAMP + "," + ASYNC_REBUILD_TIMESTAMP + " " + + PLong.INSTANCE.getSqlTypeName() + ") VALUES (?, ?, ?, ?, ?, ?)"; + + /* + * Custom sql to add a column to SYSTEM.CATALOG table during upgrade. We can't use the regular + * ColumnMetaDataOps.UPSERT_COLUMN sql because the COLUMN_QUALIFIER column was added in 4.10. And + * so if upgrading from let's say 4.7, we won't be able to find the COLUMN_QUALIFIER column which + * the INSERT_COLUMN_ALTER_TABLE sql expects. + */ + public static final String ALTER_SYSCATALOG_TABLE_UPGRADE = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + COLUMN_NAME + "," + COLUMN_FAMILY + "," + DATA_TYPE + "," + NULLABLE + "," + COLUMN_SIZE + "," + + DECIMAL_DIGITS + "," + ORDINAL_POSITION + "," + SORT_ORDER + "," + DATA_TABLE_NAME + "," + // write + // this + // both + // in + // the + // column + // and + // table + // rows + // for + // access + // by + // metadata + // APIs + ARRAY_SIZE + "," + VIEW_CONSTANT + "," + IS_VIEW_REFERENCED + "," + PK_NAME + "," + // write + // this both + // in the + // column + // and table + // rows for + // access by + // metadata + // APIs + KEY_SEQ + "," + COLUMN_DEF + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + private static final String UPDATE_COLUMN_POSITION = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\" ( " + TENANT_ID + + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "," + + ORDINAL_POSITION + ") VALUES (?, ?, ?, ?, ?, ?)"; + private static final String CREATE_FUNCTION = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_FUNCTION_TABLE + "\" ( " + TENANT_ID + "," + FUNCTION_NAME + "," + NUM_ARGS + "," + + CLASS_NAME + "," + JAR_PATH + "," + RETURN_TYPE + ") VALUES (?, ?, ?, ?, ?, ?)"; + private static final String INSERT_FUNCTION_ARGUMENT = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + + ".\"" + SYSTEM_FUNCTION_TABLE + "\" ( " + TENANT_ID + "," + FUNCTION_NAME + "," + TYPE + "," + + ARG_POSITION + "," + IS_ARRAY + "," + IS_CONSTANT + "," + DEFAULT_VALUE + "," + MIN_VALUE + + "," + MAX_VALUE + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + public static final String EMPTY_TABLE = " "; + + private final PhoenixConnection connection; + + public MetaDataClient(PhoenixConnection connection) { + this.connection = connection; + } + + public PhoenixConnection getConnection() { + return connection; + } + + public long getCurrentTime(String schemaName, String tableName) throws SQLException { + MetaDataMutationResult result = updateCache(schemaName, tableName, true); + return result.getMutationTime(); + } + + /** + * Update the cache with the latest as of the connection scn. + * @return the timestamp from the server, negative if the table was added to the cache and + * positive otherwise + */ + public MetaDataMutationResult updateCache(String schemaName, String tableName) + throws SQLException { + return updateCache(schemaName, tableName, false); + } + + public MetaDataMutationResult updateCache(String schemaName, String tableName, + boolean alwaysHitServer) throws SQLException { + return updateCache(connection.getTenantId(), schemaName, tableName, alwaysHitServer); + } + + public MetaDataMutationResult updateCache(PName tenantId, String schemaName, String tableName, + boolean alwaysHitServer) throws SQLException { + return updateCache(tenantId, schemaName, tableName, alwaysHitServer, null); + } + + /** + * Update the cache with the latest as of the connection scn. + * @return the timestamp from the server, negative if the function was added to the cache and + * positive otherwise + */ + public MetaDataMutationResult updateCache(List functionNames) throws SQLException { + return updateCache(functionNames, false); + } + + private MetaDataMutationResult updateCache(List functionNames, boolean alwaysHitServer) + throws SQLException { + return updateCache(connection.getTenantId(), functionNames, alwaysHitServer); + } + + public MetaDataMutationResult updateCache(PName tenantId, List functionNames) + throws SQLException { + return updateCache(tenantId, functionNames, false); + } + + private long getClientTimeStamp() { + Long scn = connection.getSCN(); + long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + return clientTimeStamp; + } + + private long getCurrentScn() { + Long scn = connection.getSCN(); + long currentScn = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + return currentScn; + } + + public MetaDataMutationResult updateCache(PName origTenantId, String schemaName, String tableName, + boolean alwaysHitServer, Long resolvedTimestamp) throws SQLException { // TODO: pass byte[] + // herez + boolean systemTable = SYSTEM_CATALOG_SCHEMA.equals(schemaName); + // System tables must always have a null tenantId + PName tenantId = systemTable ? null : origTenantId; + PTable table = null; + PTableRef tableRef = null; + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + long tableTimestamp = HConstants.LATEST_TIMESTAMP; + long tableResolvedTimestamp = HConstants.LATEST_TIMESTAMP; + int tryCount = 0; + // for tenant specific connection, look up the global table without using tenantId + int maxTryCount = tenantId == null ? 1 : 2; + do { + try { + tableRef = connection.getTableRef(new PTableKey(tenantId, fullTableName)); + table = tableRef.getTable(); + tableTimestamp = table.getTimeStamp(); + tableResolvedTimestamp = table.getTimeStamp(); + break; + } catch (TableNotFoundException e) { + tenantId = null; + } + } while (++tryCount < maxTryCount); + // reset the tenantId if the global table isn't found in the cache + if (table == null) { + tenantId = systemTable ? null : origTenantId; } - public MetaDataMutationResult updateCache(PName tenantId, String schemaName, String tableName, boolean alwaysHitServer) throws SQLException { - return updateCache(tenantId, schemaName, tableName, alwaysHitServer, null); + // start a txn if all table are transactional by default or if we found the table in the cache + // and it is transactional + // TODO if system tables become transactional remove the check + boolean isTransactional = (table != null && table.isTransactional()); + if (isTransactional) { + connection.getMutationState().startTransaction(table.getTransactionProvider()); } - - /** - * Update the cache with the latest as of the connection scn. - * @param functionNames - * @return the timestamp from the server, negative if the function was added to the cache and positive otherwise - * @throws SQLException - */ - public MetaDataMutationResult updateCache(List functionNames) throws SQLException { - return updateCache(functionNames, false); + // this is to allow the connection to see SYSTEM tables during an upgrade since they are not + // cached at the connection level + if ( + connection.isRunningUpgrade() && systemTable && connection.getSCN() != null + && connection.getSCN() <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP + ) { + resolvedTimestamp = HConstants.LATEST_TIMESTAMP; + } else { + resolvedTimestamp = resolvedTimestamp == null + ? TransactionUtil.getResolvedTimestamp(connection, isTransactional, + HConstants.LATEST_TIMESTAMP) + : resolvedTimestamp; } - private MetaDataMutationResult updateCache(List functionNames, boolean alwaysHitServer) throws SQLException { - return updateCache(connection.getTenantId(), functionNames, alwaysHitServer); + if ( + avoidRpcToGetTable(alwaysHitServer, resolvedTimestamp, systemTable, table, tableRef, + tableResolvedTimestamp) + ) { + return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, + QueryConstants.UNSET_TIMESTAMP, table); } - public MetaDataMutationResult updateCache(PName tenantId, List functionNames) throws SQLException { - return updateCache(tenantId, functionNames, false); + MetaDataMutationResult result; + // if we are looking up an index on a child view that is inherited from its + // parent, then we need to resolve the parent of the child view which will also + // load any of its indexes instead of trying to load the inherited view index + // which doesn't exist in SYSTEM.CATALOG + if (tableName.contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { + String parentViewName = SchemaUtil.getSchemaNameFromFullName(tableName, + QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); + // recursively look up the parent view as we could have inherited this index from an ancestor + // view(V) with Index (VIndex) -> child view (V1) -> grand child view (V2) + // the view index name will be V2#V1#VIndex + result = updateCache(origTenantId, SchemaUtil.getSchemaNameFromFullName(parentViewName), + SchemaUtil.getTableNameFromFullName(parentViewName), alwaysHitServer, resolvedTimestamp); + if (result.getTable() != null) { + try { + tableRef = connection.getTableRef(new PTableKey(tenantId, fullTableName)); + table = tableRef.getTable(); + return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, + tableRef.getResolvedTimeStamp(), table); + } catch (TableNotFoundException e) { + // reset the result as we looked up the parent view + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, + QueryConstants.UNSET_TIMESTAMP, null); + } + } + } else { + tryCount = 0; + do { + final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName); + final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName); + ConnectionQueryServices queryServices = connection.getQueryServices(); + result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, + resolvedTimestamp); + // if the table was assumed to be non transactional, but is actually transactional + // then re-resolve as of the right timestamp + if (result.getTable() != null && result.getTable().isTransactional() && !isTransactional) { + long resolveTimestamp = TransactionUtil.getResolvedTimestamp(connection, + result.getTable().isTransactional(), HConstants.LATEST_TIMESTAMP); + // Reresolve if table timestamp is past timestamp as of which we should see data + if (result.getTable().getTimeStamp() >= resolveTimestamp) { + result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, + resolveTimestamp); + } + } + + if (SYSTEM_CATALOG_SCHEMA.equals(schemaName)) { + if ( + result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS + && result.getTable() == null + ) { + result.setTable(table); + } + if (result.getTable() != null) { + addTableToCache(result, alwaysHitServer); + } + return result; + } + MutationCode code = result.getMutationCode(); + PTable resultTable = result.getTable(); + // We found an updated table, so update our cache + if (resultTable != null) { + // Cache table, even if multi-tenant table found for null tenant_id + // These may be accessed by tenant-specific connections, as the + // tenant_id will always be added to mask other tenants data. + // Otherwise, a tenant would be required to create a VIEW first + // which is not really necessary unless you want to filter or add + // columns + addTableToCache(result, alwaysHitServer); + return result; + } else { + // if (result.getMutationCode() == MutationCode.NEWER_TABLE_FOUND) { + // TODO: No table exists at the clientTimestamp, but a newer one exists. + // Since we disallow creation or modification of a table earlier than the latest + // timestamp, we can handle this such that we don't ask the + // server again. + if (table != null) { + // Ensures that table in result is set to table found in our cache. + if (code == MutationCode.TABLE_ALREADY_EXISTS) { + result.setTable(table); + // Although this table is up-to-date, the parent table may not be. + // In this case, we update the parent table which may in turn pull + // in indexes to add to this table. + long resolvedTime = TransactionUtil.getResolvedTime(connection, result); + if ( + addColumnsIndexesAndLastDDLTimestampsFromAncestors(result, resolvedTimestamp, true, + false) + ) { + updateIndexesWithAncestorMap(result); + connection.addTable(result.getTable(), resolvedTime); + } else { + // if we aren't adding the table, we still need to update the + // resolved time of the table + connection.updateResolvedTimestamp(table, resolvedTime); + } + return result; + } + // If table was not found at the current time stamp and we have one cached, + // remove it. + // Otherwise, we're up to date, so there's nothing to do. + if (code == MutationCode.TABLE_NOT_FOUND && tryCount + 1 == maxTryCount) { + connection.removeTable(origTenantId, fullTableName, + table.getParentName() == null ? null : table.getParentName().getString(), + table.getTimeStamp()); + } + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + NUM_METADATA_LOOKUP_FAILURES, 1); + } + } + tenantId = null; // Try again with global tenantId + } while (++tryCount < maxTryCount); } - private long getClientTimeStamp() { - Long scn = connection.getSCN(); - long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - return clientTimeStamp; + return result; + } + + // Do not make rpc to getTable if + // 1. table is a system table that does not have a ROW_TIMESTAMP column OR + // 2. table was already resolved as of that timestamp OR + // 3. table does not have a ROW_TIMESTAMP column and age is less then UPDATE_CACHE_FREQUENCY + // 3a. Get the effective UPDATE_CACHE_FREQUENCY for checking the age in the following precedence + // order: + // Table-level property > Connection-level property > Default value. + private boolean avoidRpcToGetTable(boolean alwaysHitServer, Long resolvedTimestamp, + boolean systemTable, PTable table, PTableRef tableRef, long tableResolvedTimestamp) { + if (table != null && !alwaysHitServer) { + if ( + systemTable && table.getRowTimestampColPos() == -1 + || resolvedTimestamp == tableResolvedTimestamp + ) { + return true; + } + + final long effectiveUpdateCacheFreq; + final String ucfInfoForLogging; // Only used for logging purposes + + boolean overrideUcfToAlways = false; + if (table.getType() == INDEX) { + overrideUcfToAlways = PIndexState.PENDING_DISABLE.equals(table.getIndexState()) + || !IndexMaintainer.sendIndexMaintainer(table); + } + if (!overrideUcfToAlways && !table.getIndexes().isEmpty()) { + List indexes = table.getIndexes(); + List maintainedIndexes = + Lists.newArrayList(IndexMaintainer.maintainedIndexes(indexes.iterator())); + // The maintainedIndexes contain only the indexes that are used by clients + // while generating the mutations. If all the indexes are usable by clients, + // we don't need to override UPDATE_CACHE_FREQUENCY. However, if any index is + // not in usable state by the client mutations, we should override + // UPDATE_CACHE_FREQUENCY to default value so that we make getTable() RPC calls + // until all index states change to ACTIVE, BUILDING or other usable states. + overrideUcfToAlways = indexes.size() != maintainedIndexes.size(); + } + + // What if the table is created with UPDATE_CACHE_FREQUENCY explicitly set to ALWAYS? + // i.e. explicitly set to 0. We should ideally be checking for something like + // hasUpdateCacheFrequency(). + + // always fetch an Index in PENDING_DISABLE state to retrieve server timestamp + // QueryOptimizer needs that to decide whether the index can be used + if (overrideUcfToAlways) { + effectiveUpdateCacheFreq = + (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue("ALWAYS"); + ucfInfoForLogging = "override-to-always"; + } else if ( + table.getUpdateCacheFrequency() != QueryServicesOptions.DEFAULT_UPDATE_CACHE_FREQUENCY + ) { + effectiveUpdateCacheFreq = table.getUpdateCacheFrequency(); + ucfInfoForLogging = "table-level"; + } else { + effectiveUpdateCacheFreq = + (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue(connection.getQueryServices() + .getProps().get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); + ucfInfoForLogging = connection.getQueryServices().getProps() + .get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB) != null + ? "connection-level" + : "default"; + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Using " + ucfInfoForLogging + " Update Cache Frequency (value = " + + effectiveUpdateCacheFreq + "ms) for " + table.getName() + + (table.getTenantId() != null ? ", Tenant ID: " + table.getTenantId() : "")); + } + + return MetaDataUtil.avoidMetadataRPC(connection, table, tableRef, effectiveUpdateCacheFreq); } + return false; + } + + public MetaDataMutationResult updateCache(String schemaName) throws SQLException { + return updateCache(schemaName, false); + } + + public MetaDataMutationResult updateCache(String schemaName, boolean alwaysHitServer) + throws SQLException { + long clientTimeStamp = getClientTimeStamp(); + PSchema schema = null; + try { + schema = connection.getMetaDataCache().getSchema(new PTableKey(null, schemaName)); + if (schema != null && !alwaysHitServer) { + return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema, + QueryConstants.UNSET_TIMESTAMP); + } + } catch (SchemaNotFoundException e) { - private long getCurrentScn() { - Long scn = connection.getSCN(); - long currentScn = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - return currentScn; } - - public MetaDataMutationResult updateCache(PName origTenantId, String schemaName, String tableName, - boolean alwaysHitServer, Long resolvedTimestamp) throws SQLException { // TODO: pass byte[] herez - boolean systemTable = SYSTEM_CATALOG_SCHEMA.equals(schemaName); - // System tables must always have a null tenantId - PName tenantId = systemTable ? null : origTenantId; - PTable table = null; - PTableRef tableRef = null; - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - long tableTimestamp = HConstants.LATEST_TIMESTAMP; - long tableResolvedTimestamp = HConstants.LATEST_TIMESTAMP; - int tryCount = 0; - // for tenant specific connection, look up the global table without using tenantId - int maxTryCount = tenantId == null ? 1 : 2; - do { - try { - tableRef = connection.getTableRef(new PTableKey(tenantId, fullTableName)); - table = tableRef.getTable(); - tableTimestamp = table.getTimeStamp(); - tableResolvedTimestamp = table.getTimeStamp(); - break; - } catch (TableNotFoundException e) { - tenantId = null; - } - } while (++tryCount < maxTryCount); - // reset the tenantId if the global table isn't found in the cache - if (table==null) { - tenantId = systemTable ? null : origTenantId; - } - - // start a txn if all table are transactional by default or if we found the table in the cache and it is transactional - // TODO if system tables become transactional remove the check - boolean isTransactional = (table!=null && table.isTransactional()); - if (isTransactional) { - connection.getMutationState().startTransaction(table.getTransactionProvider()); - } - // this is to allow the connection to see SYSTEM tables during an upgrade since they are not cached at the connection level - if (connection.isRunningUpgrade() && systemTable && connection.getSCN() != null && connection.getSCN() <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP) { - resolvedTimestamp = HConstants.LATEST_TIMESTAMP; - } - else { - resolvedTimestamp = resolvedTimestamp==null ? TransactionUtil.getResolvedTimestamp(connection, isTransactional, HConstants.LATEST_TIMESTAMP) : resolvedTimestamp; - } - - if (avoidRpcToGetTable(alwaysHitServer, resolvedTimestamp, systemTable, table, tableRef, - tableResolvedTimestamp)) { - return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, - QueryConstants.UNSET_TIMESTAMP, table); - } - - MetaDataMutationResult result; - // if we are looking up an index on a child view that is inherited from its - // parent, then we need to resolve the parent of the child view which will also - // load any of its indexes instead of trying to load the inherited view index - // which doesn't exist in SYSTEM.CATALOG - if (tableName.contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { - String parentViewName = - SchemaUtil.getSchemaNameFromFullName(tableName, - QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); - // recursively look up the parent view as we could have inherited this index from an ancestor - // view(V) with Index (VIndex) -> child view (V1) -> grand child view (V2) - // the view index name will be V2#V1#VIndex - result = - updateCache(origTenantId, SchemaUtil.getSchemaNameFromFullName(parentViewName), - SchemaUtil.getTableNameFromFullName(parentViewName), alwaysHitServer, - resolvedTimestamp); - if (result.getTable() != null) { - try { - tableRef = connection.getTableRef(new PTableKey(tenantId, fullTableName)); - table = tableRef.getTable(); - return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, - tableRef.getResolvedTimeStamp(), table); - } catch (TableNotFoundException e) { - // reset the result as we looked up the parent view - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, - QueryConstants.UNSET_TIMESTAMP, null); - } - } - } - else { - tryCount = 0; - do { - final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName); - final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName); - ConnectionQueryServices queryServices = connection.getQueryServices(); - result = - queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, resolvedTimestamp); - // if the table was assumed to be non transactional, but is actually transactional - // then re-resolve as of the right timestamp - if (result.getTable() != null - && result.getTable().isTransactional() - && !isTransactional) { - long resolveTimestamp = TransactionUtil.getResolvedTimestamp(connection, - result.getTable().isTransactional(), - HConstants.LATEST_TIMESTAMP); - // Reresolve if table timestamp is past timestamp as of which we should see data - if (result.getTable().getTimeStamp() >= resolveTimestamp) { - result = queryServices.getTable(tenantId, schemaBytes, - tableBytes, tableTimestamp, resolveTimestamp); - } - } - - if (SYSTEM_CATALOG_SCHEMA.equals(schemaName)) { - if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS - && result.getTable() == null) { - result.setTable(table); - } - if (result.getTable()!=null) { - addTableToCache(result, alwaysHitServer); - } - return result; - } - MutationCode code = result.getMutationCode(); - PTable resultTable = result.getTable(); - // We found an updated table, so update our cache - if (resultTable != null) { - // Cache table, even if multi-tenant table found for null tenant_id - // These may be accessed by tenant-specific connections, as the - // tenant_id will always be added to mask other tenants data. - // Otherwise, a tenant would be required to create a VIEW first - // which is not really necessary unless you want to filter or add - // columns - addTableToCache(result, alwaysHitServer); - return result; - } else { - // if (result.getMutationCode() == MutationCode.NEWER_TABLE_FOUND) { - // TODO: No table exists at the clientTimestamp, but a newer one exists. - // Since we disallow creation or modification of a table earlier than the latest - // timestamp, we can handle this such that we don't ask the - // server again. - if (table != null) { - // Ensures that table in result is set to table found in our cache. - if (code == MutationCode.TABLE_ALREADY_EXISTS) { - result.setTable(table); - // Although this table is up-to-date, the parent table may not be. - // In this case, we update the parent table which may in turn pull - // in indexes to add to this table. - long resolvedTime = TransactionUtil.getResolvedTime(connection, result); - if (addColumnsIndexesAndLastDDLTimestampsFromAncestors(result, - resolvedTimestamp, true, false)) { - updateIndexesWithAncestorMap(result); - connection.addTable(result.getTable(), resolvedTime); - } else { - // if we aren't adding the table, we still need to update the - // resolved time of the table - connection.updateResolvedTimestamp(table, resolvedTime); - } - return result; - } - // If table was not found at the current time stamp and we have one cached, - // remove it. - // Otherwise, we're up to date, so there's nothing to do. - if (code == MutationCode.TABLE_NOT_FOUND && tryCount + 1 == maxTryCount) { - connection - .removeTable(origTenantId, fullTableName, - table.getParentName() == null ? null - : table.getParentName().getString(), - table.getTimeStamp()); - } - TableMetricsManager.updateMetricsForSystemCatalogTableMethod( - null, NUM_METADATA_LOOKUP_FAILURES, 1); - } - } - tenantId = null; // Try again with global tenantId - } while (++tryCount < maxTryCount); + MetaDataMutationResult result; + + result = connection.getQueryServices().getSchema(schemaName, clientTimeStamp); + return result; + } + + public MetaDataMutationResult updateCache(PName tenantId, List functionNames, + boolean alwaysHitServer) throws SQLException { // TODO: pass byte[] herez + long clientTimeStamp = getClientTimeStamp(); + List functions = new ArrayList(functionNames.size()); + List functionTimeStamps = new ArrayList(functionNames.size()); + Iterator iterator = functionNames.iterator(); + while (iterator.hasNext()) { + PFunction function = null; + try { + String functionName = iterator.next(); + function = connection.getMetaDataCache().getFunction(new PTableKey(tenantId, functionName)); + if ( + function != null && !alwaysHitServer && function.getTimeStamp() == clientTimeStamp - 1 + ) { + functions.add(function); + iterator.remove(); + continue; + } + if (function != null && function.getTimeStamp() != clientTimeStamp - 1) { + functionTimeStamps.add(function.getTimeStamp()); + } else { + functionTimeStamps.add(HConstants.LATEST_TIMESTAMP); } + } catch (FunctionNotFoundException e) { + functionTimeStamps.add(HConstants.LATEST_TIMESTAMP); + } + } + // Don't bother with server call: we can't possibly find a newer function + if (functionNames.isEmpty()) { + return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, + QueryConstants.UNSET_TIMESTAMP, functions, true); + } + int maxTryCount = tenantId == null ? 1 : 2; + int tryCount = 0; + MetaDataMutationResult result; + + do { + List> functionsToFecth = + new ArrayList>(functionNames.size()); + for (int i = 0; i < functionNames.size(); i++) { + functionsToFecth.add(new Pair(PVarchar.INSTANCE.toBytes(functionNames.get(i)), + functionTimeStamps.get(i))); + } + result = + connection.getQueryServices().getFunctions(tenantId, functionsToFecth, clientTimeStamp); + + MutationCode code = result.getMutationCode(); + // We found an updated table, so update our cache + if (result.getFunctions() != null && !result.getFunctions().isEmpty()) { + result.getFunctions().addAll(functions); + addFunctionToCache(result); return result; + } else { + if (code == MutationCode.FUNCTION_ALREADY_EXISTS) { + result.getFunctions().addAll(functions); + addFunctionToCache(result); + return result; + } + if (code == MutationCode.FUNCTION_NOT_FOUND && tryCount + 1 == maxTryCount) { + for (Pair f : functionsToFecth) { + connection.removeFunction(tenantId, Bytes.toString(f.getFirst()), f.getSecond()); + } + // TODO removeFunctions all together from cache when + throw new FunctionNotFoundException(functionNames.toString() + " not found"); + } + } + tenantId = null; // Try again with global tenantId + } while (++tryCount < maxTryCount); + + return result; + } + + /** + * Looks up the ancestors of views and view indexes and adds inherited columns and also any + * indexes of the ancestors that can be used + * @param result the result from updating the cache for the current + * table. + * @param resolvedTimestamp timestamp at which child table was resolved + * @param alwaysAddAncestorColumnsAndIndexes flag that determines whether we should recalculate + * all inherited columns and indexes that can be used in + * the view and + * @param alwaysHitServerForAncestors flag that determines whether we should fetch latest + * metadata for ancestors from the server + * @return true if the PTable contained by result was modified and false otherwise + * @throws SQLException if the physical table cannot be found + */ + private boolean addColumnsIndexesAndLastDDLTimestampsFromAncestors(MetaDataMutationResult result, + Long resolvedTimestamp, boolean alwaysAddAncestorColumnsAndIndexes, + boolean alwaysHitServerForAncestors) throws SQLException { + PTable table = result.getTable(); + boolean hasIndexId = table.getViewIndexId() != null; + if ( + table.getType() == PTableType.INDEX + || (table.getType() == PTableType.VIEW && table.getViewType() != ViewType.MAPPED) + ) { + String tableName = null; + try { + String parentName = table.getParentName().getString(); + String parentSchemaName = SchemaUtil.getSchemaNameFromFullName(parentName); + tableName = SchemaUtil.getTableNameFromFullName(parentName); + MetaDataMutationResult parentResult = updateCache(connection.getTenantId(), + parentSchemaName, tableName, alwaysHitServerForAncestors, resolvedTimestamp); + PTable parentTable = parentResult.getTable(); + if (parentResult.getMutationCode() == MutationCode.TABLE_NOT_FOUND || parentTable == null) { + // Try once more with different tenant id (connection can be global but view could be + // tenant + parentResult = + updateCache(table.getTenantId(), parentSchemaName, tableName, false, resolvedTimestamp); + parentTable = parentResult.getTable(); + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("addColumnsAndIndexesFromAncestors parent logical name " + + table.getBaseTableLogicalName().getString() + " parent name " + + table.getParentName().getString() + " tableName=" + table.getName()); + } + if (parentResult.getMutationCode() == MutationCode.TABLE_NOT_FOUND || parentTable == null) { + // this mean the parent table was dropped and the child views have not yet been + // dropped by the TaskRegionObserver + String schemaName = + table.getSchemaName() != null ? table.getSchemaName().getString() : null; + throw new TableNotFoundException(schemaName, parentName); + } + // only inherit columns view indexes (and not local indexes on regular tables which also + // have a viewIndexId) + if (hasIndexId && parentTable.getType() != PTableType.VIEW) { + return false; + } + // if alwaysAddAncestorColumnsAndIndexes is false we only recalculate if the ancestor table + // or table + // was updated from the server + if ( + !alwaysAddAncestorColumnsAndIndexes && !result.wasUpdated() && !parentResult.wasUpdated() + ) { + return false; + } + + // only need to inherit columns and indexes for view indexes and views + if (!table.getType().equals(PTableType.INDEX) || hasIndexId) { + PTable pTableWithDerivedColumnsAndIndexes = + ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, parentTable); + result.setTable(getPTableWithAncestorLastDDLTimestampMap( + pTableWithDerivedColumnsAndIndexes, parentTable)); + } else { + result.setTable(getPTableWithAncestorLastDDLTimestampMap(table, parentTable)); + } + return true; + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, + NUM_METADATA_LOOKUP_FAILURES, 1); + throw e; + } + } + return false; + } + + /** + * Update the indexes within this result's table with ancestor->last_ddl_timestamp map. + */ + private void updateIndexesWithAncestorMap(MetaDataMutationResult result) throws SQLException { + PTable table = result.getTable(); + if (table.getIndexes().isEmpty()) { + return; + } + List newIndexes = new ArrayList<>(table.getIndexes().size()); + for (PTable index : table.getIndexes()) { + newIndexes.add(getPTableWithAncestorLastDDLTimestampMap(index, table)); + } + result.setTable(PTableImpl.builderWithColumns(table, PTableImpl.getColumnsToClone(table)) + .setIndexes(newIndexes).build()); + } + + /** + * Creates a new PTable object from the provided pTable and with the ancestorLastDDLTimestampMap + * Copy the map of the parent and add the last_ddl_timestamp of the parent in the map. + */ + private PTable getPTableWithAncestorLastDDLTimestampMap(PTable pTable, PTable parentTable) + throws SQLException { + Map ancestorMap = new HashMap<>(parentTable.getAncestorLastDDLTimestampMap()); + // this method can be called for an index and a view which inherited this index + // from its ancestors, skip adding the view as an ancestor of the index. + if (pTable.getParentName().equals(parentTable.getName())) { + ancestorMap.put(parentTable.getKey(), parentTable.getLastDDLTimestamp()); + } + return PTableImpl.builderWithColumns(pTable, PTableImpl.getColumnsToClone(pTable)) + .setAncestorLastDDLTimestampMap(ancestorMap).build(); + } + + private void addFunctionArgMutation(String functionName, FunctionArgument arg, + PreparedStatement argUpsert, int position) throws SQLException { + argUpsert.setString(1, + connection.getTenantId() == null ? null : connection.getTenantId().getString()); + argUpsert.setString(2, functionName); + argUpsert.setString(3, arg.getArgumentType()); + byte[] bytes = Bytes.toBytes((short) position); + argUpsert.setBytes(4, bytes); + argUpsert.setBoolean(5, arg.isArrayType()); + argUpsert.setBoolean(6, arg.isConstant()); + argUpsert.setString(7, arg.getDefaultValue() == null ? null : arg.getDefaultValue().toString()); + argUpsert.setString(8, arg.getMinValue() == null ? null : arg.getMinValue().toString()); + argUpsert.setString(9, arg.getMaxValue() == null ? null : arg.getMaxValue().toString()); + argUpsert.execute(); + } + + public MutationState createTable(CreateTableStatement statement, byte[][] splits, PTable parent, + String viewStatement, ViewType viewType, PDataType viewIndexIdType, byte[] rowKeyMatcher, + byte[][] viewColumnConstants, BitSet isViewColumnReferenced) throws SQLException { + TableName tableName = statement.getTableName(); + Map tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); + Map commonFamilyProps = + Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1); + populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps, + statement.getTableType(), false); + + splits = processSplits(tableProps, splits); + boolean isAppendOnlySchema = false; + long updateCacheFrequency = (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue(connection + .getQueryServices().getProps().get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); + Long updateCacheFrequencyProp = + (Long) TableProperty.UPDATE_CACHE_FREQUENCY.getValue(tableProps); + if (parent == null) { + Boolean appendOnlySchemaProp = + (Boolean) TableProperty.APPEND_ONLY_SCHEMA.getValue(tableProps); + if (appendOnlySchemaProp != null) { + isAppendOnlySchema = appendOnlySchemaProp; + } + if (updateCacheFrequencyProp != null) { + updateCacheFrequency = updateCacheFrequencyProp; + } + } else { + isAppendOnlySchema = parent.isAppendOnlySchema(); + updateCacheFrequency = (updateCacheFrequencyProp != null) + ? updateCacheFrequencyProp + : parent.getUpdateCacheFrequency(); + } + // updateCacheFrequency cannot be set to ALWAYS if isAppendOnlySchema is true + if (isAppendOnlySchema && updateCacheFrequency == 0) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPDATE_CACHE_FREQUENCY_INVALID) + .setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()).build() + .buildException(); + } + Boolean immutableProp = (Boolean) TableProperty.IMMUTABLE_ROWS.getValue(tableProps); + if (statement.immutableRows() != null && immutableProp != null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.IMMUTABLE_TABLE_PROPERTY_INVALID) + .setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()).build() + .buildException(); } - // Do not make rpc to getTable if - // 1. table is a system table that does not have a ROW_TIMESTAMP column OR - // 2. table was already resolved as of that timestamp OR - // 3. table does not have a ROW_TIMESTAMP column and age is less then UPDATE_CACHE_FREQUENCY - // 3a. Get the effective UPDATE_CACHE_FREQUENCY for checking the age in the following precedence order: - // Table-level property > Connection-level property > Default value. - private boolean avoidRpcToGetTable(boolean alwaysHitServer, Long resolvedTimestamp, - boolean systemTable, PTable table, PTableRef tableRef, long tableResolvedTimestamp) { - if (table != null && !alwaysHitServer) { - if (systemTable && table.getRowTimestampColPos() == -1 || - resolvedTimestamp == tableResolvedTimestamp) { - return true; - } - - final long effectiveUpdateCacheFreq; - final String ucfInfoForLogging; // Only used for logging purposes - - boolean overrideUcfToAlways = false; - if (table.getType() == INDEX) { - overrideUcfToAlways = - PIndexState.PENDING_DISABLE.equals(table.getIndexState()) || - !IndexMaintainer.sendIndexMaintainer(table); - } - if (!overrideUcfToAlways && !table.getIndexes().isEmpty()) { - List indexes = table.getIndexes(); - List maintainedIndexes = - Lists.newArrayList(IndexMaintainer.maintainedIndexes(indexes.iterator())); - // The maintainedIndexes contain only the indexes that are used by clients - // while generating the mutations. If all the indexes are usable by clients, - // we don't need to override UPDATE_CACHE_FREQUENCY. However, if any index is - // not in usable state by the client mutations, we should override - // UPDATE_CACHE_FREQUENCY to default value so that we make getTable() RPC calls - // until all index states change to ACTIVE, BUILDING or other usable states. - overrideUcfToAlways = indexes.size() != maintainedIndexes.size(); - } - - // What if the table is created with UPDATE_CACHE_FREQUENCY explicitly set to ALWAYS? - // i.e. explicitly set to 0. We should ideally be checking for something like - // hasUpdateCacheFrequency(). - - //always fetch an Index in PENDING_DISABLE state to retrieve server timestamp - //QueryOptimizer needs that to decide whether the index can be used - if (overrideUcfToAlways) { - effectiveUpdateCacheFreq = - (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue("ALWAYS"); - ucfInfoForLogging = "override-to-always"; - } else if (table.getUpdateCacheFrequency() - != QueryServicesOptions.DEFAULT_UPDATE_CACHE_FREQUENCY) { - effectiveUpdateCacheFreq = table.getUpdateCacheFrequency(); - ucfInfoForLogging = "table-level"; - } else { - effectiveUpdateCacheFreq = - (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue( - connection.getQueryServices().getProps().get( - QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); - ucfInfoForLogging = connection.getQueryServices().getProps().get( - QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB) != null ? - "connection-level" : "default"; - } - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Using " + ucfInfoForLogging + " Update Cache Frequency (value = " + - effectiveUpdateCacheFreq + "ms) for " + table.getName() + - (table.getTenantId() != null ? ", Tenant ID: " + table.getTenantId() : "")); - } - - return MetaDataUtil - .avoidMetadataRPC(connection, table, tableRef, effectiveUpdateCacheFreq); - } - return false; + PTable table = null; + // if the APPEND_ONLY_SCHEMA attribute is true first check if the table is present in the cache + // if it is add columns that are not already present + if (isAppendOnlySchema) { + // look up the table in the cache + MetaDataMutationResult result = + updateCache(tableName.getSchemaName(), tableName.getTableName()); + if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) { + table = result.getTable(); + if (!statement.ifNotExists()) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName.toString(), + NUM_METADATA_LOOKUP_FAILURES, 1); + throw new NewerTableAlreadyExistsException(tableName.getSchemaName(), + tableName.getTableName(), table); + } + + List columnDefs = statement.getColumnDefs(); + PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint(); + // get the list of columns to add + for (ColumnDef columnDef : columnDefs) { + if (pkConstraint.contains(columnDef.getColumnDefName())) { + columnDef.setIsPK(true); + } + } + // if there are new columns to add + return addColumn(table, columnDefs, statement.getProps(), statement.ifNotExists(), true, + NamedTableNode.create(statement.getTableName()), statement.getTableType(), false, null); + } + } + table = createTableInternal(statement, splits, parent, viewStatement, viewType, viewIndexIdType, + rowKeyMatcher, viewColumnConstants, isViewColumnReferenced, false, null, null, null, + tableProps, commonFamilyProps); + + if (table == null || table.getType() == PTableType.VIEW || statement.isNoVerify() /* + * || table. + * isTransactional + * () + */) { + return new MutationState(0, 0, connection); + } + // Hack to get around the case when an SCN is specified on the connection. + // In this case, we won't see the table we just created yet, so we hack + // around it by forcing the compiler to not resolve anything. + PostDDLCompiler compiler = new PostDDLCompiler(connection); + // connection.setAutoCommit(true); + // Execute any necessary data updates + Long scn = connection.getSCN(); + long ts = (scn == null ? table.getTimeStamp() : scn); + // Getting the schema through the current connection doesn't work when the connection has an scn + // specified + // Since the table won't be added to the current connection. + TableRef tableRef = new TableRef(null, table, ts, false); + byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table); + MutationPlan plan = + compiler.compile(Collections.singletonList(tableRef), emptyCF, null, null, ts); + return connection.getQueryServices().updateData(plan); + } + + /* + * Create splits either from the provided splits or reading from SPLITS_FILE. + */ + private byte[][] processSplits(Map tableProperties, byte[][] splits) + throws SQLException { + String splitFilesLocation = (String) tableProperties.get(SPLITS_FILE); + if (splitFilesLocation == null || splitFilesLocation.isEmpty()) { + splitFilesLocation = null; } - public MetaDataMutationResult updateCache(String schemaName) throws SQLException { - return updateCache(schemaName, false); + // Both splits and split file location are not passed, so return empty split. + if (splits.length == 0 && splitFilesLocation == null) { + return splits; } - public MetaDataMutationResult updateCache(String schemaName, boolean alwaysHitServer) throws SQLException { - long clientTimeStamp = getClientTimeStamp(); - PSchema schema = null; - try { - schema = connection.getMetaDataCache().getSchema(new PTableKey(null, schemaName)); - if (schema != null - && !alwaysHitServer) { return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema, - QueryConstants.UNSET_TIMESTAMP); } - } catch (SchemaNotFoundException e) { + // Both splits[] and splitFileLocation are provided. Throw an exception in this case. + if (splits.length != 0 && splitFilesLocation != null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLITS_AND_SPLIT_FILE_EXISTS).build() + .buildException(); + } + // This means we only have splits[] and no split file location is specified + if (splitFilesLocation == null) { + return splits; + } + // This means splits[] is empty and split file location is not null. + File splitFile = new File(splitFilesLocation); + // Check if file exists and is a file not a directory. + if (!splitFile.exists() || !splitFile.isFile()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_FILE_DONT_EXIST).build() + .buildException(); + } + List splitsListFromFile = new ArrayList<>(); + Path path = Paths.get(splitFilesLocation); + try (BufferedReader reader = Files.newBufferedReader(path)) { + String line; + while ((line = reader.readLine()) != null) { + splitsListFromFile.add(Bytes.toBytes(line)); + } + } catch (IOException ioe) { + LOGGER.warn("Exception while reading splits file", ioe); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_OPEN_SPLIT_FILE).build() + .buildException(); + } + return splitsListFromFile.toArray(new byte[splitsListFromFile.size()][]); + } + + /** + * Populate properties for the table and common properties for all column families of the table + * @param statementProps Properties specified in SQL statement + * @param tableProps Properties for an HTableDescriptor and Phoenix Table Properties + * @param commonFamilyProps Properties common to all column families + * @param tableType Used to distinguish between index creation vs. base table creation + * paths + */ + private void populatePropertyMaps(ListMultimap> statementProps, + Map tableProps, Map commonFamilyProps, PTableType tableType, + boolean isCDCIndex) throws SQLException { + // Somewhat hacky way of determining if property is for HColumnDescriptor or HTableDescriptor + ColumnFamilyDescriptor defaultDescriptor = + ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); + if (!statementProps.isEmpty()) { + Collection> propsList = + statementProps.get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY); + for (Pair prop : propsList) { + if ( + tableType == PTableType.INDEX && !isCDCIndex + && MetaDataUtil.propertyNotAllowedToBeOutOfSync(prop.getFirst()) + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX) + .setMessage("Property: " + prop.getFirst()).build().buildException(); + } + // Keeping TTL value as Phoenix Table property irrespective of PhoenixTTLEnabled or + // not to store the value in SYSCAT. To keep PTableImpl.getTTL() consistent + // for client side. + if (prop.getFirst().equalsIgnoreCase(TTL) && tableType != PTableType.SYSTEM) { + tableProps.put(prop.getFirst(), prop.getSecond()); + if (!isPhoenixTTLEnabled()) { + // Handling FOREVER and NONE case for TTL when phoenix.table.ttl.enable is false. + Object value = + ConnectionQueryServicesImpl.convertForeverAndNoneTTLValue(prop.getSecond(), false); + commonFamilyProps.put(prop.getFirst(), value); + } + // If phoenix.table.ttl.enabled is true doesn't store TTL as columnFamilyProp + continue; + } + + // HTableDescriptor property or Phoenix Table Property + if (defaultDescriptor.getValue(Bytes.toBytes(prop.getFirst())) == null) { + // See PHOENIX-4891 + if (tableType == PTableType.INDEX && UPDATE_CACHE_FREQUENCY.equals(prop.getFirst())) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_SET_OR_ALTER_UPDATE_CACHE_FREQ_FOR_INDEX).build() + .buildException(); + } + tableProps.put(prop.getFirst(), prop.getSecond()); + } else { // HColumnDescriptor property + commonFamilyProps.put(prop.getFirst(), prop.getSecond()); } - MetaDataMutationResult result; - - result = connection.getQueryServices().getSchema(schemaName, clientTimeStamp); - return result; + } + } + } + + private boolean isPhoenixTTLEnabled() { + return connection.getQueryServices().getConfiguration().getBoolean( + QueryServices.PHOENIX_TABLE_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); + } + + private boolean isViewTTLEnabled() { + return connection.getQueryServices().getConfiguration().getBoolean( + QueryServices.PHOENIX_VIEW_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_ENABLED); + } + + public MutationState updateStatistics(UpdateStatisticsStatement updateStatisticsStmt) + throws SQLException { + // Don't mistakenly commit pending rows + connection.rollback(); + // Check before updating the stats if we have reached the configured time to reupdate the stats + // once again + ColumnResolver resolver = FromCompiler.getResolver(updateStatisticsStmt, connection); + PTable table = resolver.getTables().get(0).getTable(); + long rowCount = 0; + if (updateStatisticsStmt.updateColumns()) { + rowCount += updateStatisticsInternal(table.getPhysicalName(), table, + updateStatisticsStmt.getProps(), true); + } + if (updateStatisticsStmt.updateIndex()) { + // TODO: If our table is a VIEW with multiple indexes or a TABLE with local indexes, + // we may be doing more work that we have to here. We should union the scan ranges + // across all indexes in that case so that we don't re-calculate the same stats + // multiple times. + for (PTable index : table.getIndexes()) { + // If the table is a view, then we will end up calling update stats + // here for all the view indexes on it. We take care of local indexes later. + if (index.getIndexType() != IndexType.LOCAL) { + if (table.getType() != PTableType.VIEW) { + rowCount += updateStatisticsInternal(index.getPhysicalName(), index, + updateStatisticsStmt.getProps(), true); + } else { + rowCount += updateStatisticsInternal(table.getPhysicalName(), index, + updateStatisticsStmt.getProps(), true); + } + } + } + /* + * Update stats for local indexes. This takes care of local indexes on the the table as well + * as local indexes on any views on it. + */ + PName physicalName = table.getPhysicalName(); + List localCFs = + MetaDataUtil.getLocalIndexColumnFamilies(connection, physicalName.getBytes()); + if (!localCFs.isEmpty()) { + /* + * We need to pass checkLastStatsUpdateTime as false here. Local indexes are on the same + * table as the physical table. So when the user has requested to update stats for both + * table and indexes on it, we need to make sure that we don't re-check LAST_UPDATE_STATS + * time. If we don't do that then we will end up *not* collecting stats for local indexes + * which would be bad. Note, that this also means we don't have a way of controlling how + * often update stats can run for local indexes. Consider the case when the user calls + * UPDATE STATS TABLE followed by UPDATE STATS TABLE INDEX. When the second statement is + * being executed, this causes us to skip the check and execute stats collection possibly a + * bit too frequently. + */ + rowCount += updateStatisticsInternal(physicalName, table, updateStatisticsStmt.getProps(), + localCFs, false); + } + // If analyzing the indexes of a multi-tenant table or a table with view indexes + // then analyze all of those indexes too. + if (table.getType() != PTableType.VIEW) { + if ( + table.isMultiTenant() + || MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName()) + ) { + final PName viewIndexPhysicalTableName = PNameFactory + .newName(MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes())); + PTable indexLogicalTable = new DelegateTable(table) { + @Override + public PName getPhysicalName() { + return viewIndexPhysicalTableName; + } + }; + /* + * Note for future maintainers: local indexes whether on a table or on a view, reside on + * the same physical table as the base table and not the view index table. So below call + * is collecting stats only for non-local view indexes. + */ + rowCount += updateStatisticsInternal(viewIndexPhysicalTableName, indexLogicalTable, + updateStatisticsStmt.getProps(), true); + } + } + } + final long count = rowCount; + return new MutationState(1, 1000, connection) { + @Override + public long getUpdateCount() { + return count; + } + }; + } + + private long updateStatisticsInternal(PName physicalName, PTable logicalTable, + Map statsProps, boolean checkLastStatsUpdateTime) throws SQLException { + return updateStatisticsInternal(physicalName, logicalTable, statsProps, null, + checkLastStatsUpdateTime); + } + + private long updateStatisticsInternal(PName physicalName, PTable logicalTable, + Map statsProps, List cfs, boolean checkLastStatsUpdateTime) + throws SQLException { + ReadOnlyProps props = connection.getQueryServices().getProps(); + final long msMinBetweenUpdates = props.getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB, + QueryServicesOptions.DEFAULT_MIN_STATS_UPDATE_FREQ_MS); + Long scn = connection.getSCN(); + // Always invalidate the cache + long clientTimeStamp = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn; + long msSinceLastUpdate = Long.MAX_VALUE; + if (checkLastStatsUpdateTime) { + String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME + " FROM " + + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " WHERE " + PHYSICAL_NAME + "= ? AND " + + COLUMN_FAMILY + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL"; + try (PreparedStatement selectStatsStmt = connection.prepareStatement(query)) { + selectStatsStmt.setString(1, physicalName.getString()); + try (ResultSet rs = selectStatsStmt.executeQuery(query)) { + if (rs.next()) { + msSinceLastUpdate = rs.getLong(1) - rs.getLong(2); + } + } + } + } + long rowCount = 0; + if (msSinceLastUpdate >= msMinBetweenUpdates) { + /* + * Execute a COUNT(*) through PostDDLCompiler as we need to use the logicalTable passed + * through, since it may not represent a "real" table in the case of the view indexes of a + * base table. + */ + PostDDLCompiler compiler = new PostDDLCompiler(connection); + // even if table is transactional, while calculating stats we scan the table + // non-transactionally to + // view all the data belonging to the table + PTable nonTxnLogicalTable = new DelegateTable(logicalTable) { + @Override + public TransactionFactory.Provider getTransactionProvider() { + return null; + } + }; + TableRef tableRef = new TableRef(null, nonTxnLogicalTable, clientTimeStamp, false); + MutationPlan plan = + compiler.compile(Collections.singletonList(tableRef), null, cfs, null, clientTimeStamp); + Scan scan = plan.getContext().getScan(); + StatisticsUtil.setScanAttributes(scan, statsProps); + boolean runUpdateStatsAsync = + props.getBoolean(QueryServices.RUN_UPDATE_STATS_ASYNC, DEFAULT_RUN_UPDATE_STATS_ASYNC); + scan.setAttribute(RUN_UPDATE_STATS_ASYNC_ATTRIB, + runUpdateStatsAsync ? TRUE_BYTES : FALSE_BYTES); + MutationState mutationState = plan.execute(); + rowCount = mutationState.getUpdateCount(); } - public MetaDataMutationResult updateCache(PName tenantId, List functionNames, - boolean alwaysHitServer) throws SQLException { // TODO: pass byte[] herez - long clientTimeStamp = getClientTimeStamp(); - List functions = new ArrayList(functionNames.size()); - List functionTimeStamps = new ArrayList(functionNames.size()); - Iterator iterator = functionNames.iterator(); - while (iterator.hasNext()) { - PFunction function = null; - try { - String functionName = iterator.next(); - function = - connection.getMetaDataCache().getFunction( - new PTableKey(tenantId, functionName)); - if (function != null && !alwaysHitServer - && function.getTimeStamp() == clientTimeStamp - 1) { - functions.add(function); - iterator.remove(); - continue; - } - if (function != null && function.getTimeStamp() != clientTimeStamp - 1) { - functionTimeStamps.add(function.getTimeStamp()); - } else { - functionTimeStamps.add(HConstants.LATEST_TIMESTAMP); - } - } catch (FunctionNotFoundException e) { - functionTimeStamps.add(HConstants.LATEST_TIMESTAMP); - } - } - // Don't bother with server call: we can't possibly find a newer function - if (functionNames.isEmpty()) { - return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS,QueryConstants.UNSET_TIMESTAMP,functions, true); + /* + * Update the stats table so that client will pull the new one with the updated stats. Even if + * we don't run the command due to the last update time, invalidate the cache. This supports + * scenarios in which a major compaction was manually initiated and the client wants the + * modified stats to be reflected immediately. + */ + if (cfs == null) { + List families = logicalTable.getColumnFamilies(); + if (families.isEmpty()) { + connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), + SchemaUtil.getEmptyColumnFamily(logicalTable))); + } else { + for (PColumnFamily family : families) { + connection.getQueryServices().invalidateStats( + new GuidePostsKey(physicalName.getBytes(), family.getName().getBytes())); + } + } + } else { + for (byte[] cf : cfs) { + connection.getQueryServices() + .invalidateStats(new GuidePostsKey(physicalName.getBytes(), cf)); + } + } + return rowCount; + } + + private MutationState buildIndexAtTimeStamp(PTable index, NamedTableNode dataTableNode) + throws SQLException { + // If our connection is at a fixed point-in-time, we need to open a new + // connection so that our new index table is visible. + Properties props = new Properties(connection.getClientInfo()); + props.setProperty(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, Long.toString(connection.getSCN() + 1)); + PhoenixConnection conn = + new PhoenixConnection(connection, connection.getQueryServices(), props); + MetaDataClient newClientAtNextTimeStamp = new MetaDataClient(conn); + + // Re-resolve the tableRef from the now newer connection + conn.setAutoCommit(true); + ColumnResolver resolver = FromCompiler.getResolver(dataTableNode, conn); + TableRef tableRef = resolver.getTables().get(0); + boolean success = false; + SQLException sqlException = null; + try { + MutationState state = newClientAtNextTimeStamp.buildIndex(index, tableRef); + success = true; + return state; + } catch (SQLException e) { + sqlException = e; + } finally { + try { + conn.close(); + } catch (SQLException e) { + if (sqlException == null) { + // If we're not in the middle of throwing another exception + // then throw the exception we got on close. + if (success) { + sqlException = e; + } + } else { + sqlException.setNextException(e); } - - int maxTryCount = tenantId == null ? 1 : 2; - int tryCount = 0; - MetaDataMutationResult result; - - do { - List> functionsToFecth = new ArrayList>(functionNames.size()); - for (int i = 0; i< functionNames.size(); i++) { - functionsToFecth.add(new Pair(PVarchar.INSTANCE.toBytes(functionNames.get(i)), functionTimeStamps.get(i))); - } - result = connection.getQueryServices().getFunctions(tenantId, functionsToFecth, clientTimeStamp); - - MutationCode code = result.getMutationCode(); - // We found an updated table, so update our cache - if (result.getFunctions() != null && !result.getFunctions().isEmpty()) { - result.getFunctions().addAll(functions); - addFunctionToCache(result); - return result; - } else { - if (code == MutationCode.FUNCTION_ALREADY_EXISTS) { - result.getFunctions().addAll(functions); - addFunctionToCache(result); - return result; - } - if (code == MutationCode.FUNCTION_NOT_FOUND && tryCount + 1 == maxTryCount) { - for (Pair f : functionsToFecth) { - connection.removeFunction(tenantId, Bytes.toString(f.getFirst()), - f.getSecond()); - } - // TODO removeFunctions all together from cache when - throw new FunctionNotFoundException(functionNames.toString() + " not found"); - } - } - tenantId = null; // Try again with global tenantId - } while (++tryCount < maxTryCount); - - return result; + } + if (sqlException != null) { + throw sqlException; + } + } + throw new IllegalStateException(); // impossible + } + + private MutationPlan getMutationPlanForBuildingIndex(PTable index, TableRef dataTableRef) + throws SQLException { + if (index.getIndexType() == IndexType.LOCAL) { + PostLocalIndexDDLCompiler compiler = + new PostLocalIndexDDLCompiler(connection, getFullTableName(dataTableRef)); + return compiler.compile(index); + } else if (dataTableRef.getTable().isTransactional()) { + PostIndexDDLCompiler compiler = new PostIndexDDLCompiler(connection, dataTableRef); + return compiler.compile(index); + } else { + ServerBuildIndexCompiler compiler = + new ServerBuildIndexCompiler(connection, getFullTableName(dataTableRef)); + return compiler.compile(index); + } + } + + private MutationState buildIndex(PTable index, TableRef dataTableRef) throws SQLException { + AlterIndexStatement indexStatement = null; + boolean wasAutoCommit = connection.getAutoCommit(); + try { + connection.setAutoCommit(true); + MutationPlan mutationPlan = getMutationPlanForBuildingIndex(index, dataTableRef); + Scan scan = mutationPlan.getContext().getScan(); + Long scn = connection.getSCN(); + try { + if (ScanUtil.isDefaultTimeRange(scan.getTimeRange())) { + if (scn == null) { + scn = mutationPlan.getContext().getCurrentTime(); + } + scan.setTimeRange(dataTableRef.getLowerBoundTimeStamp(), scn); + } + } catch (IOException e) { + throw new SQLException(e); + } + + // execute index population upsert select + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + MutationState state = connection.getQueryServices().updateData(mutationPlan); + long firstUpsertSelectTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + + // for global indexes on non transactional tables we might have to + // run a second index population upsert select to handle data rows + // that were being written on the server while the index was created. + // TODO: this sleep time is really arbitrary. If any query is in progress + // while the index is being built, we're depending on this sleep + // waiting them out. Instead we should have a means of waiting until + // all in progress queries are complete (though I'm not sure that's + // feasible). See PHOENIX-4092. + long sleepTime = + connection.getQueryServices().getProps().getLong(QueryServices.INDEX_POPULATION_SLEEP_TIME, + QueryServicesOptions.DEFAULT_INDEX_POPULATION_SLEEP_TIME); + if (!dataTableRef.getTable().isTransactional() && sleepTime > 0) { + long delta = sleepTime - firstUpsertSelectTime; + if (delta > 0) { + try { + Thread.sleep(delta); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION) + .setRootCause(e).build().buildException(); + } + } + // set the min timestamp of second index upsert select some time before the index + // was created + long minTimestamp = index.getTimeStamp() - firstUpsertSelectTime; + try { + // TODO: Use scn or LATEST_TIMESTAMP here? It's possible that a DML statement + // ran and ended up with timestamps later than this time. If we use a later + // timestamp, we'll need to run the partial index rebuilder here as it's + // possible that the updates to the table were made (such as deletes) after + // the scn which would not be properly reflected correctly this mechanism. + // See PHOENIX-4092. + mutationPlan.getContext().getScan().setTimeRange(minTimestamp, scn); + } catch (IOException e) { + throw new SQLException(e); + } + MutationState newMutationState = connection.getQueryServices().updateData(mutationPlan); + state.join(newMutationState); + } + + indexStatement = + FACTORY + .alterIndex( + FACTORY.namedTable(null, + TableName.create(index.getSchemaName().getString(), + index.getTableName().getString())), + dataTableRef.getTable().getTableName().getString(), false, PIndexState.ACTIVE); + alterIndex(indexStatement); + + return state; + } finally { + connection.setAutoCommit(wasAutoCommit); + } + } + + private String getFullTableName(TableRef dataTableRef) { + String schemaName = dataTableRef.getTable().getSchemaName().getString(); + String tableName = dataTableRef.getTable().getTableName().getString(); + String fullName = schemaName == null + ? ("\"" + tableName + "\"") + : ("\"" + schemaName + "\"" + QueryConstants.NAME_SEPARATOR + "\"" + tableName + "\""); + return fullName; + } + + public MutationState declareCursor(DeclareCursorStatement statement, QueryPlan queryPlan) + throws SQLException { + CursorUtil.declareCursor(statement, queryPlan); + return new MutationState(0, 0, connection); + } + + public MutationState open(OpenStatement statement) throws SQLException { + CursorUtil.openCursor(statement, connection); + return new MutationState(0, 0, connection); + } + + public MutationState close(CloseStatement statement) throws SQLException { + CursorUtil.closeCursor(statement); + return new MutationState(0, 0, connection); + } + + /** + * Supprort long viewIndexId only if client has explicitly set the + * QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB connection property to 'true'. + */ + private PDataType getViewIndexDataType() throws SQLException { + boolean supportsLongViewIndexId = connection.getQueryServices().getProps().getBoolean( + QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_LONG_VIEW_INDEX_ENABLED); + return supportsLongViewIndexId + ? MetaDataUtil.getViewIndexIdDataType() + : MetaDataUtil.getLegacyViewIndexIdDataType(); + } + + /** + * Create an index table by morphing the CreateIndexStatement into a CreateTableStatement and + * calling MetaDataClient.createTable. In doing so, we perform the following translations: 1) + * Change the type of any columns being indexed to types that support null if the column is + * nullable. For example, a BIGINT type would be coerced to a DECIMAL type, since a DECIMAL type + * supports null when it's in the row key while a BIGINT does not. 2) Append any row key column + * from the data table that is not in the indexed column list. Our indexes rely on having a 1:1 + * correspondence between the index and data rows. 3) Change the name of the columns to include + * the column family. For example, if you have a column named "B" in a column family named "A", + * the indexed column name will be "A:B". This makes it easy to translate the column references in + * a query to the correct column references in an index table regardless of whether the column + * reference is prefixed with the column family name or not. It also has the side benefit of + * allowing the same named column in different column families to both be listed as an index + * column. + * @return MutationState from population of index table from data table + */ + public MutationState createIndex(CreateIndexStatement statement, byte[][] splits) + throws SQLException { + IndexKeyConstraint ik = statement.getIndexConstraint(); + TableName indexTableName = statement.getIndexTableName(); + + Map tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); + Map commonFamilyProps = + Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1); + populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps, PTableType.INDEX, + CDCUtil + .isCDCIndex(SchemaUtil.getTableNameFromFullName(statement.getIndexTableName().toString()))); + List> indexParseNodeAndSortOrderList = + ik.getParseNodeAndSortOrderList(); + List includedColumns = statement.getIncludeColumns(); + TableRef tableRef = null; + PTable table = null; + boolean allocateIndexId = false; + boolean isLocalIndex = statement.getIndexType() == IndexType.LOCAL; + int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion(); + if (isLocalIndex) { + if ( + !connection.getQueryServices().getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, + QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES) + .setTableName(indexTableName.getTableName()).build().buildException(); + } + if (!connection.getQueryServices().supportsFeature(Feature.LOCAL_INDEX)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_LOCAL_INDEXES) + .setTableName(indexTableName.getTableName()).build().buildException(); + } + } + Set acquiredColumnMutexSet = Sets.newHashSetWithExpectedSize(3); + String physicalSchemaName = null; + String physicalTableName = null; + PTable dataTable = null; + try { + ColumnResolver resolver = + FromCompiler.getResolverForCreateIndex(statement, connection, statement.getUdfParseNodes()); + tableRef = resolver.getTables().get(0); + Date asyncCreatedDate = null; + if (statement.isAsync()) { + asyncCreatedDate = new Date(tableRef.getCurrentTime()); + } + dataTable = tableRef.getTable(); + boolean isTenantConnection = connection.getTenantId() != null; + if (isTenantConnection) { + if (dataTable.getType() != PTableType.VIEW) { + throw new SQLFeatureNotSupportedException( + "An index may only be created for a VIEW through a tenant-specific connection"); + } + } + if (!dataTable.isImmutableRows()) { + if (hbaseVersion < MetaDataProtocol.MUTABLE_SI_VERSION_THRESHOLD) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES) + .setTableName(indexTableName.getTableName()).build().buildException(); + } + if (!connection.getQueryServices().hasIndexWALCodec() && !dataTable.isTransactional()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG) + .setTableName(indexTableName.getTableName()).build().buildException(); + } + boolean tableWithRowTimestampCol = dataTable.getRowTimestampColPos() != -1; + if (tableWithRowTimestampCol) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_CREATE_INDEX_ON_MUTABLE_TABLE_WITH_ROWTIMESTAMP) + .setTableName(indexTableName.getTableName()).build().buildException(); + } + } + if ( + dataTable.isTransactional() && isLocalIndex + && dataTable.getTransactionProvider().getTransactionProvider() + .isUnsupported(PhoenixTransactionProvider.Feature.ALLOW_LOCAL_INDEX) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_LOCAL_INDEX_FOR_TXN_TABLE) + .setMessage(dataTable.getTransactionProvider().name()) + .setTableName(indexTableName.getTableName()).build().buildException(); + } + int posOffset = 0; + List pkColumns = dataTable.getPKColumns(); + Set unusedPkColumns; + if (dataTable.getBucketNum() != null) { // Ignore SALT column + unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size() - 1); + posOffset++; + } else { + unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size()); + } + for (int i = posOffset; i < pkColumns.size(); i++) { + PColumn column = pkColumns.get(i); + unusedPkColumns.add(new RowKeyColumnExpression(column, + new RowKeyValueAccessor(pkColumns, i), "\"" + column.getName().getString() + "\"")); + } + List allPkColumns = + Lists.newArrayListWithExpectedSize(unusedPkColumns.size()); + List columnDefs = Lists.newArrayListWithExpectedSize( + includedColumns.size() + indexParseNodeAndSortOrderList.size()); + + /* + * Allocate an index ID in two circumstances: 1) for a local index, as all local indexes will + * reside in the same HBase table 2) for a view on an index. + */ + if ( + isLocalIndex + || (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED) + ) { + allocateIndexId = true; + PDataType dataType = getViewIndexDataType(); + ColumnName colName = + ColumnName.caseSensitiveColumnName(MetaDataUtil.getViewIndexIdColumnName()); + allPkColumns.add(new ColumnDefInPkConstraint(colName, SortOrder.getDefault(), false)); + columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, + false, SortOrder.getDefault(), null, false)); + } + + if (dataTable.isMultiTenant()) { + PColumn col = dataTable.getPKColumns().get(posOffset); + RowKeyColumnExpression columnExpression = new RowKeyColumnExpression(col, + new RowKeyValueAccessor(pkColumns, posOffset), col.getName().getString()); + unusedPkColumns.remove(columnExpression); + PDataType dataType = IndexUtil.getIndexColumnDataType(col); + ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col)); + allPkColumns.add(new ColumnDefInPkConstraint(colName, col.getSortOrder(), false)); + columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), + col.getMaxLength(), col.getScale(), false, SortOrder.getDefault(), + col.getName().getString(), col.isRowTimestamp())); + } + + PhoenixStatement phoenixStatment = new PhoenixStatement(connection); + StatementContext context = new StatementContext(phoenixStatment, resolver); + IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context); + Set indexedColumnNames = + Sets.newHashSetWithExpectedSize(indexParseNodeAndSortOrderList.size()); + for (Pair pair : indexParseNodeAndSortOrderList) { + ParseNode parseNode = pair.getFirst(); + // normalize the parse node + parseNode = StatementNormalizer.normalize(parseNode, resolver); + // compile the parseNode to get an expression + expressionIndexCompiler.reset(); + Expression expression = parseNode.accept(expressionIndexCompiler); + if (expressionIndexCompiler.isAggregate()) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException(); + } + if (expressionIndexCompiler.isJsonFragment()) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.JSON_FRAGMENT_NOT_ALLOWED_IN_INDEX_EXPRESSION).build() + .buildException(); + } + if ( + !(expression.getDeterminism() == Determinism.ALWAYS + || expression.getDeterminism() == Determinism.PER_ROW) + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX).build() + .buildException(); + } + if (expression.isStateless()) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException(); + } + unusedPkColumns.remove(expression); + + // Go through parse node to get string as otherwise we + // can lose information during compilation + StringBuilder buf = new StringBuilder(); + parseNode.toSQL(resolver, buf); + // need to escape backslash as this expression will be re-parsed later + String expressionStr = StringUtil.escapeBackslash(buf.toString()); + + ColumnName colName = null; + ColumnRef colRef = expressionIndexCompiler.getColumnRef(); + boolean isRowTimestamp = false; + if (colRef != null) { + // if this is a regular column + PColumn column = colRef.getColumn(); + String columnFamilyName = + column.getFamilyName() != null ? column.getFamilyName().getString() : null; + colName = ColumnName.caseSensitiveColumnName( + IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString())); + isRowTimestamp = column.isRowTimestamp(); + } else { + // if this is an expression + // TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is + // fixed + String name = expressionStr.replaceAll("\"", "'"); + colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name)); + } + indexedColumnNames.add(colName); + PDataType dataType = + IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType()); + allPkColumns.add(new ColumnDefInPkConstraint(colName, pair.getSecond(), isRowTimestamp)); + columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), + expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, + pair.getSecond(), expressionStr, isRowTimestamp)); + } + + // Next all the PK columns from the data table that aren't indexed + if (!unusedPkColumns.isEmpty()) { + for (RowKeyColumnExpression colExpression : unusedPkColumns) { + PColumn col = dataTable.getPKColumns().get(colExpression.getPosition()); + // Don't add columns with constant values from updatable views, as + // we don't need these in the index + if (col.getViewConstant() == null) { + ColumnName colName = + ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col)); + allPkColumns.add(new ColumnDefInPkConstraint(colName, colExpression.getSortOrder(), + col.isRowTimestamp())); + PDataType dataType = IndexUtil.getIndexColumnDataType(colExpression.isNullable(), + colExpression.getDataType()); + columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), + colExpression.isNullable(), colExpression.getMaxLength(), colExpression.getScale(), + false, colExpression.getSortOrder(), colExpression.toString(), col.isRowTimestamp())); + } + } + } + + // Last all the included columns (minus any PK columns) + for (ColumnName colName : includedColumns) { + PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()) + .getColumn(); + colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col)); + // Check for duplicates between indexed and included columns + if (indexedColumnNames.contains(colName)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build() + .buildException(); + } + if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) { + // Need to re-create ColumnName, since the above one won't have the column family name + colName = ColumnName.caseSensitiveColumnName(isLocalIndex + ? IndexUtil.getLocalIndexColumnFamily(col.getFamilyName().getString()) + : col.getFamilyName().getString(), IndexUtil.getIndexColumnName(col)); + columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(), + col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder(), + col.getExpressionStr(), col.isRowTimestamp())); + } + } + + Configuration config = connection.getQueryServices().getConfiguration(); + if ( + !connection.getQueryServices().getProps().getBoolean(DISABLE_VIEW_SUBTREE_VALIDATION, + DEFAULT_DISABLE_VIEW_SUBTREE_VALIDATION) + ) { + verifyIfDescendentViewsExtendPk(dataTable, config); + } + // for view indexes + if (dataTable.getType() == PTableType.VIEW) { + String physicalName = dataTable.getPhysicalName().getString(); + physicalSchemaName = SchemaUtil.getSchemaNameFromFullName(physicalName); + physicalTableName = SchemaUtil.getTableNameFromFullName(physicalName); + List requiredCols = Lists.newArrayList(indexedColumnNames); + requiredCols.addAll(includedColumns); + for (ColumnName colName : requiredCols) { + // acquire the mutex using the global physical table name to + // prevent this column from being dropped while the view is being created + String colNameSeparatedByDot = colName.getColumnName() + .replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); + // indexed column name have a ':' between the column family and column name + // We would like to have '.' like in other column names + boolean acquiredMutex = + writeCell(null, physicalSchemaName, physicalTableName, colNameSeparatedByDot); + if (!acquiredMutex) { + throw new ConcurrentTableMutationException(physicalSchemaName, physicalTableName); + } + acquiredColumnMutexSet.add(colNameSeparatedByDot); + } + } + + long threshold = Long.parseLong(config.get(QueryServices.CLIENT_INDEX_ASYNC_THRESHOLD)); + + if (threshold > 0 && !statement.isAsync()) { + Set columnFamilies = new HashSet<>(); + for (ColumnDef column : columnDefs) { + try { + String columnFamily = + IndexUtil.getDataColumnFamilyName(column.getColumnDefName().getColumnName()); + columnFamilies.add(!columnFamily.equals("") ? columnFamily + : dataTable.getDefaultFamilyName() != null + ? dataTable.getDefaultFamilyName().toString() + : QueryConstants.DEFAULT_COLUMN_FAMILY); + } catch (Exception ignored) { + ; // We ignore any exception during this phase + } + } + long estimatedBytes = 0; + for (String colFamily : columnFamilies) { + GuidePostsInfo gps = connection.getQueryServices().getTableStats(new GuidePostsKey( + Bytes.toBytes(tableRef.getTable().toString()), Bytes.toBytes(colFamily))); + long[] byteCounts = gps.getByteCounts(); + for (long byteCount : byteCounts) { + estimatedBytes += byteCount; + } + + if (threshold < estimatedBytes) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ABOVE_INDEX_NON_ASYNC_THRESHOLD) + .build().buildException(); + } + } + } + + // Set DEFAULT_COLUMN_FAMILY_NAME of index to match data table + // We need this in the props so that the correct column family is created + if ( + dataTable.getDefaultFamilyName() != null && dataTable.getType() != PTableType.VIEW + && !allocateIndexId + ) { + statement.getProps().put("", new Pair(DEFAULT_COLUMN_FAMILY_NAME, + dataTable.getDefaultFamilyName().getString())); + } + PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns); + + tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, + dataTable.getPhysicalName().getString()); + CreateTableStatement tableStatement = FACTORY.createTable(indexTableName, + statement.getProps(), columnDefs, pk, statement.getSplitNodes(), PTableType.INDEX, + statement.ifNotExists(), null, statement.getWhere(), statement.getBindCount(), null); + table = createTableInternal(tableStatement, splits, dataTable, null, null, + getViewIndexDataType(), null, null, null, allocateIndexId, statement.getIndexType(), + asyncCreatedDate, null, tableProps, commonFamilyProps); + } finally { + deleteMutexCells(physicalSchemaName, physicalTableName, acquiredColumnMutexSet); + } + if (table == null) { + return new MutationState(0, 0, connection); } - /** - * Looks up the ancestors of views and view indexes and adds inherited columns and - * also any indexes of the ancestors that can be used - * - * @param result the result from updating the cache for the current table. - * @param resolvedTimestamp timestamp at which child table was resolved - * @param alwaysAddAncestorColumnsAndIndexes flag that determines whether we should recalculate - * all inherited columns and indexes that can be used in the view and - * @param alwaysHitServerForAncestors flag that determines whether we should fetch latest - * metadata for ancestors from the server - * @return true if the PTable contained by result was modified and false otherwise - * @throws SQLException if the physical table cannot be found - */ - private boolean addColumnsIndexesAndLastDDLTimestampsFromAncestors( - MetaDataMutationResult result, Long resolvedTimestamp, - boolean alwaysAddAncestorColumnsAndIndexes, - boolean alwaysHitServerForAncestors) - throws SQLException { - PTable table = result.getTable(); - boolean hasIndexId = table.getViewIndexId() != null; - if (table.getType() == PTableType.INDEX - || (table.getType() == PTableType.VIEW && table.getViewType() != ViewType.MAPPED)) { - String tableName = null; - try { - String parentName = table.getParentName().getString(); - String parentSchemaName = SchemaUtil.getSchemaNameFromFullName(parentName); - tableName = SchemaUtil.getTableNameFromFullName(parentName); - MetaDataMutationResult parentResult = updateCache(connection.getTenantId(), parentSchemaName, tableName, - alwaysHitServerForAncestors, resolvedTimestamp); - PTable parentTable = parentResult.getTable(); - if (parentResult.getMutationCode() == MutationCode.TABLE_NOT_FOUND || parentTable == null) { - // Try once more with different tenant id (connection can be global but view could be tenant - parentResult = - updateCache(table.getTenantId(), parentSchemaName, tableName, false, - resolvedTimestamp); - parentTable = parentResult.getTable(); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("addColumnsAndIndexesFromAncestors parent logical name " + table.getBaseTableLogicalName().getString() + " parent name " + table.getParentName().getString() + " tableName=" + table.getName()); - } - if (parentResult.getMutationCode() == MutationCode.TABLE_NOT_FOUND || parentTable == null) { - // this mean the parent table was dropped and the child views have not yet been - // dropped by the TaskRegionObserver - String schemaName = table.getSchemaName() != null ? table.getSchemaName().getString() : null; - throw new TableNotFoundException(schemaName, parentName); - } - // only inherit columns view indexes (and not local indexes on regular tables which also have a viewIndexId) - if (hasIndexId && parentTable.getType() != PTableType.VIEW) { - return false; - } - // if alwaysAddAncestorColumnsAndIndexes is false we only recalculate if the ancestor table or table - // was updated from the server - if (!alwaysAddAncestorColumnsAndIndexes && !result.wasUpdated() && !parentResult.wasUpdated()) { - return false; - } + if (LOGGER.isInfoEnabled()) + LOGGER.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp()); + boolean asyncIndexBuildEnabled = + connection.getQueryServices().getProps().getBoolean(QueryServices.INDEX_ASYNC_BUILD_ENABLED, + QueryServicesOptions.DEFAULT_INDEX_ASYNC_BUILD_ENABLED); + // In async process, we return immediately as the MR job needs to be triggered . + if (statement.isAsync() && asyncIndexBuildEnabled) { + return new MutationState(0, 0, connection); + } - // only need to inherit columns and indexes for view indexes and views - if (!table.getType().equals(PTableType.INDEX) || hasIndexId) { - PTable pTableWithDerivedColumnsAndIndexes - = ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, - table, parentTable); - result.setTable(getPTableWithAncestorLastDDLTimestampMap( - pTableWithDerivedColumnsAndIndexes, parentTable)); - } else { - result.setTable(getPTableWithAncestorLastDDLTimestampMap( - table, parentTable)); - } - return true; - } catch (Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, NUM_METADATA_LOOKUP_FAILURES, 1); - throw e; - } - } - return false; + // If we create index in create_disabled state, we will build them later + if (table.getIndexState() == PIndexState.CREATE_DISABLE) { + return new MutationState(0, 0, connection); } - /** - * Update the indexes within this result's table with ancestor->last_ddl_timestamp map. - */ - private void updateIndexesWithAncestorMap(MetaDataMutationResult result) throws SQLException { - PTable table = result.getTable(); - if (table.getIndexes().isEmpty()) { - return; - } - List newIndexes = new ArrayList<>(table.getIndexes().size()); - for (PTable index : table.getIndexes()) { - newIndexes.add(getPTableWithAncestorLastDDLTimestampMap(index, table)); - } - result.setTable(PTableImpl.builderWithColumns(table, PTableImpl.getColumnsToClone(table)) - .setIndexes(newIndexes).build()); + // If our connection is at a fixed point-in-time, we need to open a new + // connection so that our new index table is visible. + if (connection.getSCN() != null) { + return buildIndexAtTimeStamp(table, statement.getTable()); } - /** - * Creates a new PTable object from the provided pTable and with the ancestorLastDDLTimestampMap - * Copy the map of the parent and add the last_ddl_timestamp of the parent in the map. - * @param pTable - * @param parentTable - */ - private PTable getPTableWithAncestorLastDDLTimestampMap(PTable pTable, PTable parentTable) - throws SQLException { - Map ancestorMap - = new HashMap<>(parentTable.getAncestorLastDDLTimestampMap()); - // this method can be called for an index and a view which inherited this index - // from its ancestors, skip adding the view as an ancestor of the index. - if (pTable.getParentName().equals(parentTable.getName())) { - ancestorMap.put(parentTable.getKey(), parentTable.getLastDDLTimestamp()); - } - return PTableImpl.builderWithColumns(pTable, PTableImpl.getColumnsToClone(pTable)) - .setAncestorLastDDLTimestampMap(ancestorMap) - .build(); - } - - private void addFunctionArgMutation(String functionName, FunctionArgument arg, PreparedStatement argUpsert, int position) throws SQLException { - argUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString()); - argUpsert.setString(2, functionName); - argUpsert.setString(3, arg.getArgumentType()); - byte[] bytes = Bytes.toBytes((short)position); - argUpsert.setBytes(4, bytes); - argUpsert.setBoolean(5, arg.isArrayType()); - argUpsert.setBoolean(6, arg.isConstant()); - argUpsert.setString(7, arg.getDefaultValue() == null? null: arg.getDefaultValue().toString()); - argUpsert.setString(8, arg.getMinValue() == null? null: arg.getMinValue().toString()); - argUpsert.setString(9, arg.getMaxValue() == null? null: arg.getMaxValue().toString()); - argUpsert.execute(); - } - - public MutationState createTable( - CreateTableStatement statement, - byte[][] splits, - PTable parent, - String viewStatement, - ViewType viewType, - PDataType viewIndexIdType, - byte[] rowKeyMatcher, - byte[][] viewColumnConstants, - BitSet isViewColumnReferenced - ) throws SQLException { - TableName tableName = statement.getTableName(); - Map tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); - Map commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1); - populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps, - statement.getTableType(), false); - - splits = processSplits(tableProps, splits); - boolean isAppendOnlySchema = false; - long updateCacheFrequency = (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue( - connection.getQueryServices().getProps().get( - QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); - Long updateCacheFrequencyProp = (Long) TableProperty.UPDATE_CACHE_FREQUENCY.getValue(tableProps); - if (parent==null) { - Boolean appendOnlySchemaProp = (Boolean) TableProperty.APPEND_ONLY_SCHEMA.getValue(tableProps); - if (appendOnlySchemaProp != null) { - isAppendOnlySchema = appendOnlySchemaProp; - } - if (updateCacheFrequencyProp != null) { - updateCacheFrequency = updateCacheFrequencyProp; - } - } - else { - isAppendOnlySchema = parent.isAppendOnlySchema(); - updateCacheFrequency = (updateCacheFrequencyProp != null) ? - updateCacheFrequencyProp : parent.getUpdateCacheFrequency(); - } - // updateCacheFrequency cannot be set to ALWAYS if isAppendOnlySchema is true - if (isAppendOnlySchema && updateCacheFrequency==0) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPDATE_CACHE_FREQUENCY_INVALID) - .setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()) - .build().buildException(); - } - Boolean immutableProp = (Boolean) TableProperty.IMMUTABLE_ROWS.getValue(tableProps); - if (statement.immutableRows()!=null && immutableProp!=null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.IMMUTABLE_TABLE_PROPERTY_INVALID) - .setSchemaName(tableName.getSchemaName()).setTableName(tableName.getTableName()) - .build().buildException(); - } - - PTable table = null; - // if the APPEND_ONLY_SCHEMA attribute is true first check if the table is present in the cache - // if it is add columns that are not already present - if (isAppendOnlySchema) { - // look up the table in the cache - MetaDataMutationResult result = updateCache(tableName.getSchemaName(), tableName.getTableName()); - if (result.getMutationCode()==MutationCode.TABLE_ALREADY_EXISTS) { - table = result.getTable(); - if (!statement.ifNotExists()) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod( - tableName.toString(), NUM_METADATA_LOOKUP_FAILURES, 1); - throw new NewerTableAlreadyExistsException(tableName.getSchemaName(), tableName.getTableName(), table); - } + MutationState state = buildIndex(table, tableRef); + // If client is validating LAST_DDL_TIMESTAMPS, parent's last_ddl_timestamp changed + // so remove it from client's cache. It will be refreshed when table is accessed next time. + if (ValidateLastDDLTimestampUtil.getValidateLastDdlTimestampEnabled(connection)) { + connection.removeTable(connection.getTenantId(), dataTable.getName().getString(), null, + dataTable.getTimeStamp()); + } + return state; + } + + public MutationState createCDC(CreateCDCStatement statement) throws SQLException { + ColumnResolver resolver = + FromCompiler.getResolver(NamedTableNode.create(statement.getDataTable()), connection); + TableRef tableRef = resolver.getTables().get(0); + PTable dataTable = tableRef.getTable(); + + Map tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); + Map commonFamilyProps = + Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1); + populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps, PTableType.CDC, + false); + Properties props = connection.getClientInfo(); + props.put(INDEX_CREATE_DEFAULT_STATE, "ACTIVE"); + + String dataTableFullName = SchemaUtil.getTableName(statement.getDataTable().getSchemaName(), + statement.getDataTable().getTableName()); + String createIndexSql = + "CREATE UNCOVERED INDEX " + (statement.isIfNotExists() ? "IF NOT EXISTS " : "") + + CDCUtil.getCDCIndexName(statement.getCdcObjName().getName()) + " ON " + dataTableFullName + + " (" + PhoenixRowTimestampFunction.NAME + "()) ASYNC"; + List indexProps = new ArrayList<>(); + indexProps.add("REPLICATION_SCOPE=0"); + Object saltBucketNum = TableProperty.SALT_BUCKETS.getValue(tableProps); + if (saltBucketNum != null) { + indexProps.add("SALT_BUCKETS=" + saltBucketNum); + } + Object columnEncodedBytes = TableProperty.COLUMN_ENCODED_BYTES.getValue(tableProps); + if (columnEncodedBytes != null) { + indexProps.add("COLUMN_ENCODED_BYTES=" + columnEncodedBytes); + } + createIndexSql = createIndexSql + " " + String.join(", ", indexProps); + try (Connection internalConnection = + QueryUtil.getConnection(props, connection.getQueryServices().getConfiguration())) { + PhoenixStatement pstmt = new PhoenixStatement((PhoenixConnection) internalConnection); + pstmt.execute(createIndexSql); + } catch (SQLException e) { + if (e.getErrorCode() == TABLE_ALREADY_EXIST.getErrorCode()) { + throw new SQLExceptionInfo.Builder(TABLE_ALREADY_EXIST) + .setTableName(statement.getCdcObjName().getName()).setRootCause(e).build() + .buildException(); + } + throw e; + } - List columnDefs = statement.getColumnDefs(); - PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint(); - // get the list of columns to add - for (ColumnDef columnDef : columnDefs) { - if (pkConstraint.contains(columnDef.getColumnDefName())) { - columnDef.setIsPK(true); - } - } - // if there are new columns to add - return addColumn(table, columnDefs, statement.getProps(), statement.ifNotExists(), - true, NamedTableNode.create(statement.getTableName()), statement.getTableType(), false, null); - } - } - table = createTableInternal( - statement, - splits, - parent, - viewStatement, - viewType, - viewIndexIdType, - rowKeyMatcher, - viewColumnConstants, - isViewColumnReferenced, - false, - null, - null, - null, - tableProps, - commonFamilyProps - ); - - if (table == null || table.getType() == PTableType.VIEW - || statement.isNoVerify() /*|| table.isTransactional()*/) { - return new MutationState(0, 0, connection); + List pkColumns = dataTable.getPKColumns(); + List columnDefs = new ArrayList<>(); + List pkColumnDefs = new ArrayList<>(); + int pkOffset = dataTable.getBucketNum() != null ? 1 : 0; + for (int i = pkOffset; i < pkColumns.size(); ++i) { + PColumn pcol = pkColumns.get(i); + columnDefs.add(FACTORY.columnDef(FACTORY.columnName(pcol.getName().getString()), + pcol.getDataType().getSqlTypeName(), false, null, false, pcol.getMaxLength(), + pcol.getScale(), false, pcol.getSortOrder(), "", null, false)); + pkColumnDefs + .add(FACTORY.columnDefInPkConstraint(FACTORY.columnName(pcol.getName().getString()), + pcol.getSortOrder(), pcol.isRowTimestamp())); + } + columnDefs.add(FACTORY.columnDef(FACTORY.columnName(QueryConstants.CDC_JSON_COL_NAME), + PVarchar.INSTANCE.getSqlTypeName(), false, null, true, null, null, false, + SortOrder.getDefault(), "", null, false)); + tableProps = new HashMap<>(); + if (dataTable.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS) { + // CDC table doesn't need SINGLE_CELL_ARRAY_WITH_OFFSETS encoding, so override it. + tableProps.put(TableProperty.IMMUTABLE_STORAGE_SCHEME.getPropertyName(), + ONE_CELL_PER_COLUMN.name()); + } + if (dataTable.isMultiTenant()) { + tableProps.put(TableProperty.MULTI_TENANT.getPropertyName(), Boolean.TRUE); + } + CreateTableStatement tableStatement = FACTORY.createTable( + FACTORY.table(dataTable.getSchemaName().getString(), statement.getCdcObjName().getName()), + null, columnDefs, FACTORY.primaryKey(null, pkColumnDefs), Collections.emptyList(), + PTableType.CDC, statement.isIfNotExists(), null, null, statement.getBindCount(), null); + createTableInternal(tableStatement, null, dataTable, null, null, null, null, null, null, false, + null, null, statement.getIncludeScopes(), tableProps, commonFamilyProps); + return new MutationState(0, 0, connection); + } + + /** + * Go through all the descendent views from the child view hierarchy and find if any of the + * descendent views extends the primary key, throw error. + * @param tableOrView view or table on which the index is being created. + * @param config the configuration. + * @throws SQLException if any of the descendent views extends pk or if something goes wrong while + * querying descendent view hierarchy. + */ + private void verifyIfDescendentViewsExtendPk(PTable tableOrView, Configuration config) + throws SQLException { + if (connection.getQueryServices() instanceof ConnectionlessQueryServicesImpl) { + return; + } + if (connection.getQueryServices() instanceof DelegateQueryServices) { + DelegateQueryServices services = (DelegateQueryServices) connection.getQueryServices(); + if (services.getDelegate() instanceof ConnectionlessQueryServicesImpl) { + return; + } + } + byte[] systemChildLinkTable = SchemaUtil.isNamespaceMappingEnabled(null, config) + ? SYSTEM_CHILD_LINK_NAMESPACE_BYTES + : SYSTEM_CHILD_LINK_NAME_BYTES; + try (Table childLinkTable = connection.getQueryServices().getTable(systemChildLinkTable)) { + byte[] tenantId = + connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); + byte[] schemaNameBytes = tableOrView.getSchemaName().getBytes(); + byte[] viewOrTableName = tableOrView.getTableName().getBytes(); + Pair, List> descViews = + ViewUtil.findAllDescendantViews(childLinkTable, config, tenantId, schemaNameBytes, + viewOrTableName, HConstants.LATEST_TIMESTAMP, false); + List legitimateChildViews = descViews.getFirst(); + int dataTableOrViewPkCols = tableOrView.getPKColumns().size(); + if (legitimateChildViews != null && legitimateChildViews.size() > 0) { + for (PTable childView : legitimateChildViews) { + if (childView.getPKColumns().size() > dataTableOrViewPkCols) { + LOGGER.error("Creation of view index not allowed as child view {}" + " extends pk", + childView.getName()); + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_CREATE_INDEX_CHILD_VIEWS_EXTEND_PK).build().buildException(); + } } - // Hack to get around the case when an SCN is specified on the connection. - // In this case, we won't see the table we just created yet, so we hack - // around it by forcing the compiler to not resolve anything. - PostDDLCompiler compiler = new PostDDLCompiler(connection); - //connection.setAutoCommit(true); - // Execute any necessary data updates - Long scn = connection.getSCN(); - long ts = (scn == null ? table.getTimeStamp() : scn); - // Getting the schema through the current connection doesn't work when the connection has an scn specified - // Since the table won't be added to the current connection. - TableRef tableRef = new TableRef(null, table, ts, false); - byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table); - MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), emptyCF, null, null, ts); - return connection.getQueryServices().updateData(plan); + } + } catch (IOException e) { + LOGGER.error("Error while retrieving descendent views", e); + throw new SQLException(e); + } + } + + public MutationState dropSequence(DropSequenceStatement statement) throws SQLException { + Long scn = connection.getSCN(); + long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + String schemaName = + connection.getSchema() != null && statement.getSequenceName().getSchemaName() == null + ? connection.getSchema() + : statement.getSequenceName().getSchemaName(); + String sequenceName = statement.getSequenceName().getTableName(); + String tenantId = + connection.getTenantId() == null ? null : connection.getTenantId().getString(); + try { + connection.getQueryServices().dropSequence(tenantId, schemaName, sequenceName, timestamp); + } catch (SequenceNotFoundException e) { + if (statement.ifExists()) { + return new MutationState(0, 0, connection); + } + throw e; + } + return new MutationState(1, 1000, connection); + } + + public MutationState createSequence(CreateSequenceStatement statement, long startWith, + long incrementBy, long cacheSize, long minValue, long maxValue) throws SQLException { + Long scn = connection.getSCN(); + long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + String tenantId = + connection.getTenantId() == null ? null : connection.getTenantId().getString(); + String schemaName = statement.getSequenceName().getSchemaName(); + if (SchemaUtil.isNamespaceMappingEnabled(null, connection.getQueryServices().getProps())) { + if (schemaName == null || schemaName.equals(StringUtil.EMPTY_STRING)) { + schemaName = connection.getSchema(); + } + if (schemaName != null) { + FromCompiler.getResolverForSchema(schemaName, connection); + } + } + return createSequence(tenantId, schemaName, statement.getSequenceName().getTableName(), + statement.ifNotExists(), startWith, incrementBy, cacheSize, statement.getCycle(), minValue, + maxValue, timestamp); + } + + private MutationState createSequence(String tenantId, String schemaName, String sequenceName, + boolean ifNotExists, long startWith, long incrementBy, long cacheSize, boolean cycle, + long minValue, long maxValue, long timestamp) throws SQLException { + try { + connection.getQueryServices().createSequence(tenantId, schemaName, sequenceName, startWith, + incrementBy, cacheSize, minValue, maxValue, cycle, timestamp); + } catch (SequenceAlreadyExistsException e) { + if (ifNotExists) { + return new MutationState(0, 0, connection); + } + throw e; + } + return new MutationState(1, 1000, connection); + } + + public MutationState createFunction(CreateFunctionStatement stmt) throws SQLException { + boolean wasAutoCommit = connection.getAutoCommit(); + connection.rollback(); + try { + PFunction function = + new PFunction(stmt.getFunctionInfo(), stmt.isTemporary(), stmt.isReplace()); + connection.setAutoCommit(false); + String tenantIdStr = + connection.getTenantId() == null ? null : connection.getTenantId().getString(); + List functionData = + Lists.newArrayListWithExpectedSize(function.getFunctionArguments().size() + 1); + + List args = function.getFunctionArguments(); + try (PreparedStatement argUpsert = connection.prepareStatement(INSERT_FUNCTION_ARGUMENT)) { + for (int i = 0; i < args.size(); i++) { + FunctionArgument arg = args.get(i); + addFunctionArgMutation(function.getFunctionName(), arg, argUpsert, i); + } + functionData.addAll(connection.getMutationState().toMutations().next().getSecond()); + connection.rollback(); + } + + try (PreparedStatement functionUpsert = connection.prepareStatement(CREATE_FUNCTION)) { + functionUpsert.setString(1, tenantIdStr); + functionUpsert.setString(2, function.getFunctionName()); + functionUpsert.setInt(3, function.getFunctionArguments().size()); + functionUpsert.setString(4, function.getClassName()); + functionUpsert.setString(5, function.getJarPath()); + functionUpsert.setString(6, function.getReturnType()); + functionUpsert.execute(); + functionData.addAll(connection.getMutationState().toMutations(null).next().getSecond()); + connection.rollback(); + } + MetaDataMutationResult result = + connection.getQueryServices().createFunction(functionData, function, stmt.isTemporary()); + MutationCode code = result.getMutationCode(); + switch (code) { + case FUNCTION_ALREADY_EXISTS: + if (!function.isReplace()) { + throw new FunctionAlreadyExistsException(function.getFunctionName(), + result.getFunctions().get(0)); + } else { + connection.removeFunction(function.getTenantId(), function.getFunctionName(), + result.getMutationTime()); + addFunctionToCache(result); + } + case NEWER_FUNCTION_FOUND: + // Add function to ConnectionQueryServices so it's cached, but don't add + // it to this connection as we can't see it. + throw new NewerFunctionAlreadyExistsException(function.getFunctionName(), + result.getFunctions().get(0)); + default: + List functions = new ArrayList(1); + functions.add(function); + result = new MetaDataMutationResult(code, result.getMutationTime(), functions, true); + if (function.isReplace()) { + connection.removeFunction(function.getTenantId(), function.getFunctionName(), + result.getMutationTime()); + } + addFunctionToCache(result); + } + } finally { + connection.setAutoCommit(wasAutoCommit); + } + return new MutationState(1, 1000, connection); + } + + private static ColumnDef findColumnDefOrNull(List colDefs, ColumnName colName) { + for (ColumnDef colDef : colDefs) { + if (colDef.getColumnDefName().getColumnName().equals(colName.getColumnName())) { + return colDef; + } + } + return null; + } + + private static boolean checkAndValidateRowTimestampCol(ColumnDef colDef, + PrimaryKeyConstraint pkConstraint, boolean rowTimeStampColAlreadyFound, PTableType tableType) + throws SQLException { + + ColumnName columnDefName = colDef.getColumnDefName(); + if ( + tableType == VIEW + && (pkConstraint.getNumColumnsWithRowTimestamp() > 0 || colDef.isRowTimestamp()) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_NOT_ALLOWED_ON_VIEW) + .setColumnName(columnDefName.getColumnName()).build().buildException(); } - /* - Create splits either from the provided splits or reading from SPLITS_FILE. + * For indexes we have already validated that the data table has the right kind and number of + * row_timestamp columns. So we don't need to perform any extra validations for them. */ - private byte[][] processSplits(Map tableProperties, byte[][] splits) - throws SQLException { - String splitFilesLocation = (String) tableProperties.get(SPLITS_FILE); - if (splitFilesLocation == null || splitFilesLocation.isEmpty()) { - splitFilesLocation = null; - } - - // Both splits and split file location are not passed, so return empty split. - if (splits.length == 0 && splitFilesLocation == null) { - return splits; + if (tableType == TABLE) { + boolean isColumnDeclaredRowTimestamp = + colDef.isRowTimestamp() || pkConstraint.isColumnRowTimestamp(columnDefName); + if (isColumnDeclaredRowTimestamp) { + boolean isColumnPartOfPk = colDef.isPK() || pkConstraint.contains(columnDefName); + // A column can be declared as ROW_TIMESTAMP only if it is part of the primary key + if (isColumnDeclaredRowTimestamp && !isColumnPartOfPk) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_PK_COL_ONLY) + .setColumnName(columnDefName.getColumnName()).build().buildException(); } - // Both splits[] and splitFileLocation are provided. Throw an exception in this case. - if (splits.length != 0 && splitFilesLocation != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLITS_AND_SPLIT_FILE_EXISTS) - .build().buildException(); + // A column can be declared as ROW_TIMESTAMP only if it can be represented as a long + PDataType dataType = colDef.getDataType(); + if ( + isColumnDeclaredRowTimestamp && (dataType != PLong.INSTANCE + && dataType != PUnsignedLong.INSTANCE && !dataType.isCoercibleTo(PTimestamp.INSTANCE)) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE) + .setColumnName(columnDefName.getColumnName()).build().buildException(); } - // This means we only have splits[] and no split file location is specified - if (splitFilesLocation == null) { - return splits; - } - // This means splits[] is empty and split file location is not null. - File splitFile = new File(splitFilesLocation); - // Check if file exists and is a file not a directory. - if (!splitFile.exists() || !splitFile.isFile()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_FILE_DONT_EXIST) - .build().buildException(); - } - List splitsListFromFile = new ArrayList<>(); - Path path = Paths.get(splitFilesLocation); - try (BufferedReader reader = Files.newBufferedReader(path)) { - String line; - while ((line = reader.readLine()) != null) { - splitsListFromFile.add(Bytes.toBytes(line)); - } - } catch (IOException ioe) { - LOGGER.warn("Exception while reading splits file", ioe); - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_OPEN_SPLIT_FILE) - .build().buildException(); + // Only one column can be declared as a ROW_TIMESTAMP column + if (rowTimeStampColAlreadyFound && isColumnDeclaredRowTimestamp) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_ONE_PK_COL_ONLY) + .setColumnName(columnDefName.getColumnName()).build().buildException(); } - return splitsListFromFile.toArray(new byte[splitsListFromFile.size()][]); + return true; + } } - - /** - * Populate properties for the table and common properties for all column families of the table - * @param statementProps Properties specified in SQL statement - * @param tableProps Properties for an HTableDescriptor and Phoenix Table Properties - * @param commonFamilyProps Properties common to all column families - * @param tableType Used to distinguish between index creation vs. base table creation paths - * @throws SQLException - */ - private void populatePropertyMaps(ListMultimap> statementProps, Map tableProps, - Map commonFamilyProps, PTableType tableType, boolean isCDCIndex) throws SQLException { - // Somewhat hacky way of determining if property is for HColumnDescriptor or HTableDescriptor - ColumnFamilyDescriptor defaultDescriptor = ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); - if (!statementProps.isEmpty()) { - Collection> propsList = statementProps.get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY); - for (Pair prop : propsList) { - if (tableType == PTableType.INDEX && !isCDCIndex && - MetaDataUtil.propertyNotAllowedToBeOutOfSync(prop.getFirst())) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX) - .setMessage("Property: " + prop.getFirst()).build() - .buildException(); - } - //Keeping TTL value as Phoenix Table property irrespective of PhoenixTTLEnabled or - //not to store the value in SYSCAT. To keep PTableImpl.getTTL() consistent - //for client side. - if (prop.getFirst().equalsIgnoreCase(TTL) && tableType != PTableType.SYSTEM) { - tableProps.put(prop.getFirst(), prop.getSecond()); - if (!isPhoenixTTLEnabled()) { - //Handling FOREVER and NONE case for TTL when phoenix.table.ttl.enable is false. - Object value = ConnectionQueryServicesImpl.convertForeverAndNoneTTLValue(prop.getSecond(), false); - commonFamilyProps.put(prop.getFirst(), value); - } - //If phoenix.table.ttl.enabled is true doesn't store TTL as columnFamilyProp - continue; - } - - // HTableDescriptor property or Phoenix Table Property - if (defaultDescriptor.getValue(Bytes.toBytes(prop.getFirst())) == null) { - // See PHOENIX-4891 - if (tableType == PTableType.INDEX && UPDATE_CACHE_FREQUENCY.equals(prop.getFirst())) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_SET_OR_ALTER_UPDATE_CACHE_FREQ_FOR_INDEX) - .build() - .buildException(); - } - tableProps.put(prop.getFirst(), prop.getSecond()); - } else { // HColumnDescriptor property - commonFamilyProps.put(prop.getFirst(), prop.getSecond()); - } - } + return false; + } + + /** + * While adding or dropping columns we write a cell to the SYSTEM.MUTEX table with the rowkey of + * the physical table to prevent conflicting concurrent modifications. For eg two client adding a + * column with the same name but different datatype, or once client dropping a column on a base + * table while another client creating a view or view index that requires the dropped column + */ + private boolean writeCell(String tenantId, String schemaName, String tableName, String columnName) + throws SQLException { + return connection.getQueryServices().writeMutexCell(tenantId, schemaName, tableName, columnName, + null); + } + + /** + * Remove the cell that was written to to the SYSTEM.MUTEX table with the rowkey of the physical + * table + */ + private void deleteCell(String tenantId, String schemaName, String tableName, String columnName) + throws SQLException { + connection.getQueryServices().deleteMutexCell(tenantId, schemaName, tableName, columnName, + null); + } + + /** + * Populate the properties for each column family referenced in the create table statement + * @param familyNames column families referenced in the create table statement + * @param commonFamilyProps properties common to all column families + * @param statement create table statement + * @param defaultFamilyName the default column family name + * @param isLocalIndex true if in the create local index path + * @param familyPropList list containing pairs of column families and their corresponding + * properties + */ + private void populateFamilyPropsList(Map familyNames, + Map commonFamilyProps, CreateTableStatement statement, String defaultFamilyName, + boolean isLocalIndex, final List>> familyPropList) + throws SQLException { + for (PName familyName : familyNames.values()) { + String fam = familyName.getString(); + Collection> propsForCF = + statement.getProps().get(IndexUtil.getActualColumnFamilyName(fam)); + // No specific properties for this column family, so add the common family properties + if (propsForCF.isEmpty()) { + familyPropList.add(new Pair<>(familyName.getBytes(), commonFamilyProps)); + } else { + Map combinedFamilyProps = + Maps.newHashMapWithExpectedSize(propsForCF.size() + commonFamilyProps.size()); + combinedFamilyProps.putAll(commonFamilyProps); + for (Pair prop : propsForCF) { + // Don't allow specifying column families for TTL, KEEP_DELETED_CELLS and + // REPLICATION_SCOPE. + // These properties can only be applied for all column families of a table and can't be + // column family specific. + // See PHOENIX-3955 + if ( + !fam.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY) + && MetaDataUtil.propertyNotAllowedToBeOutOfSync(prop.getFirst()) + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY) + .setMessage("Property: " + prop.getFirst()).build().buildException(); + } + combinedFamilyProps.put(prop.getFirst(), prop.getSecond()); } + familyPropList.add(new Pair<>(familyName.getBytes(), combinedFamilyProps)); + } } - private boolean isPhoenixTTLEnabled() { - return connection.getQueryServices().getConfiguration(). - getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); + if (familyNames.isEmpty()) { + // If there are no family names, use the default column family name. This also takes care of + // the case when + // the table ddl has only PK cols present (which means familyNames is empty). + byte[] cf = defaultFamilyName == null + ? (!isLocalIndex + ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES + : QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES) + : Bytes.toBytes(defaultFamilyName); + familyPropList.add(new Pair<>(cf, commonFamilyProps)); } - - private boolean isViewTTLEnabled() { - return connection.getQueryServices().getConfiguration(). - getBoolean(QueryServices.PHOENIX_VIEW_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_ENABLED); + } + + /*** + * Get TTL defined for given entity (Index, View or Table) in hierarchy * For an index it will + * return TTL defined either from parent table or from parent view's hierarchy if it is defined. * + * For view it will return TTL defined from its parent table or from parent view's hierarchy if it + * is defined * For table it will just return TTL_NOT_DEFINED as it has no parent. + * @param parent entity's parent + * @return TTL from hierarchy if defined otherwise TTL_NOT_DEFINED. + * @throws TableNotFoundException if not able ot find any table in hierarchy + */ + private Integer checkAndGetTTLFromHierarchy(PTable parent) throws SQLException { + return parent != null + ? (parent.getType() == TABLE + ? parent.getTTL() + : (parent.getType() == VIEW && parent.getViewType() != MAPPED + ? getTTLFromViewHierarchy(parent) + : TTL_NOT_DEFINED)) + : TTL_NOT_DEFINED; + } + + /** + * Get TTL defined for the given View if it is defined in hierarchy. + * @return appropriate TTL from Views defined above for the entity calling. + * @throws TableNotFoundException if not able to find any table in hierarchy + */ + private Integer getTTLFromViewHierarchy(PTable view) throws SQLException { + return view.getTTL() != TTL_NOT_DEFINED + ? Integer.valueOf(view.getTTL()) + : (checkIfParentIsTable(view) + ? PhoenixRuntime.getTable(connection, view.getPhysicalNames().get(0).toString()).getTTL() + : getTTLFromViewHierarchy( + PhoenixRuntime.getTable(connection, view.getParentName().toString()))); + } + + private boolean checkIfParentIsTable(PTable view) { + PName parentName = view.getParentName(); + if (parentName == null) { + // means this is a view on dataTable + return true; } + return parentName.getString().equals(view.getPhysicalName().getString()); + } + + private PTable createTableInternal(CreateTableStatement statement, byte[][] splits, + final PTable parent, String viewStatement, ViewType viewType, PDataType viewIndexIdType, + final byte[] rowKeyMatcher, final byte[][] viewColumnConstants, + final BitSet isViewColumnReferenced, boolean allocateIndexId, IndexType indexType, + Date asyncCreatedDate, Set cdcIncludeScopes, + Map tableProps, Map commonFamilyProps) throws SQLException { + final PTableType tableType = statement.getTableType(); + boolean wasAutoCommit = connection.getAutoCommit(); + TableName tableNameNode = null; + boolean allowSystemCatalogRollback = connection.getQueryServices().getProps().getBoolean( + QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, + QueryServicesOptions.DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK); + Set acquiredColumnMutexSet = Sets.newHashSetWithExpectedSize(3); + String parentPhysicalName = (parent != null && parent.getPhysicalName() != null) + ? parent.getPhysicalName().getString() + : null; + String parentPhysicalSchemaName = + parentPhysicalName != null ? SchemaUtil.getSchemaNameFromFullName(parentPhysicalName) : null; + String parentPhysicalTableName = + parentPhysicalName != null ? SchemaUtil.getTableNameFromFullName(parentPhysicalName) : null; + connection.rollback(); + try { + connection.setAutoCommit(false); + List tableMetaData = + Lists.newArrayListWithExpectedSize(statement.getColumnDefs().size() + 3); + + tableNameNode = statement.getTableName(); + final String schemaName = + connection.getSchema() != null && tableNameNode.getSchemaName() == null + ? connection.getSchema() + : tableNameNode.getSchemaName(); + final String tableName = tableNameNode.getTableName(); + String parentTableName = null; + PName tenantId = connection.getTenantId(); + String tenantIdStr = tenantId == null ? null : tenantId.getString(); + Long scn = connection.getSCN(); + long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + boolean multiTenant = false; + boolean storeNulls = false; + TransactionFactory.Provider transactionProvider = + (parent != null) ? parent.getTransactionProvider() : null; + Integer saltBucketNum = null; + String defaultFamilyName = null; + boolean isImmutableRows = false; + boolean isAppendOnlySchema = false; + List physicalNames = Collections.emptyList(); + boolean addSaltColumn = false; + boolean rowKeyOrderOptimizable = true; + Long timestamp = null; + boolean isNamespaceMapped = parent == null + ? SchemaUtil.isNamespaceMappingEnabled(tableType, connection.getQueryServices().getProps()) + : parent.isNamespaceMapped(); + boolean isLocalIndex = indexType == IndexType.LOCAL; + QualifierEncodingScheme encodingScheme = NON_ENCODED_QUALIFIERS; + ImmutableStorageScheme immutableStorageScheme = ONE_CELL_PER_COLUMN; + int baseTableColumnCount = tableType == PTableType.VIEW + ? parent.getColumns().size() + : QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT; + + Integer ttl = TTL_NOT_DEFINED; + Integer ttlFromHierarchy = TTL_NOT_DEFINED; + Integer ttlProp = (Integer) TableProperty.TTL.getValue(tableProps); + + // Validate TTL prop value if set + if (ttlProp != null) { + if (ttlProp < 0) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) + .setMessage(String.format("entity = %s, TTL value should be > 0", tableName)).build() + .buildException(); + } + if (!isViewTTLEnabled() && tableType == VIEW) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_TTL_NOT_ENABLED) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } - public MutationState updateStatistics(UpdateStatisticsStatement updateStatisticsStmt) - throws SQLException { - // Don't mistakenly commit pending rows - connection.rollback(); - // Check before updating the stats if we have reached the configured time to reupdate the stats once again - ColumnResolver resolver = FromCompiler.getResolver(updateStatisticsStmt, connection); - PTable table = resolver.getTables().get(0).getTable(); - long rowCount = 0; - if (updateStatisticsStmt.updateColumns()) { - rowCount += updateStatisticsInternal(table.getPhysicalName(), table, updateStatisticsStmt.getProps(), true); - } - if (updateStatisticsStmt.updateIndex()) { - // TODO: If our table is a VIEW with multiple indexes or a TABLE with local indexes, - // we may be doing more work that we have to here. We should union the scan ranges - // across all indexes in that case so that we don't re-calculate the same stats - // multiple times. - for (PTable index : table.getIndexes()) { - // If the table is a view, then we will end up calling update stats - // here for all the view indexes on it. We take care of local indexes later. - if (index.getIndexType() != IndexType.LOCAL) { - if (table.getType() != PTableType.VIEW) { - rowCount += updateStatisticsInternal(index.getPhysicalName(), index, - updateStatisticsStmt.getProps(), true); - } else { - rowCount += updateStatisticsInternal(table.getPhysicalName(), index, - updateStatisticsStmt.getProps(), true); - } - } - } - /* - * Update stats for local indexes. This takes care of local indexes on the the table - * as well as local indexes on any views on it. - */ - PName physicalName = table.getPhysicalName(); - List localCFs = MetaDataUtil.getLocalIndexColumnFamilies(connection, physicalName.getBytes()); - if (!localCFs.isEmpty()) { - /* - * We need to pass checkLastStatsUpdateTime as false here. Local indexes are on the - * same table as the physical table. So when the user has requested to update stats - * for both table and indexes on it, we need to make sure that we don't re-check - * LAST_UPDATE_STATS time. If we don't do that then we will end up *not* collecting - * stats for local indexes which would be bad. - * - * Note, that this also means we don't have a way of controlling how often update - * stats can run for local indexes. Consider the case when the user calls UPDATE STATS TABLE - * followed by UPDATE STATS TABLE INDEX. When the second statement is being executed, - * this causes us to skip the check and execute stats collection possibly a bit too frequently. - */ - rowCount += updateStatisticsInternal(physicalName, table, updateStatisticsStmt.getProps(), localCFs, false); - } - // If analyzing the indexes of a multi-tenant table or a table with view indexes - // then analyze all of those indexes too. - if (table.getType() != PTableType.VIEW) { - if (table.isMultiTenant() || MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName())) { - final PName viewIndexPhysicalTableName = PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes())); - PTable indexLogicalTable = new DelegateTable(table) { - @Override - public PName getPhysicalName() { - return viewIndexPhysicalTableName; - } - }; - /* - * Note for future maintainers: local indexes whether on a table or on a view, - * reside on the same physical table as the base table and not the view index - * table. So below call is collecting stats only for non-local view indexes. - */ - rowCount += updateStatisticsInternal(viewIndexPhysicalTableName, indexLogicalTable, updateStatisticsStmt.getProps(), true); - } - } - } - final long count = rowCount; - return new MutationState(1, 1000, connection) { - @Override - public long getUpdateCount() { - return count; - } - }; - } - - private long updateStatisticsInternal(PName physicalName, PTable logicalTable, Map statsProps, boolean checkLastStatsUpdateTime) throws SQLException { - return updateStatisticsInternal(physicalName, logicalTable, statsProps, null, checkLastStatsUpdateTime); - } - - private long updateStatisticsInternal(PName physicalName, PTable logicalTable, Map statsProps, List cfs, boolean checkLastStatsUpdateTime) throws SQLException { - ReadOnlyProps props = connection.getQueryServices().getProps(); - final long msMinBetweenUpdates = props - .getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB, - QueryServicesOptions.DEFAULT_MIN_STATS_UPDATE_FREQ_MS); - Long scn = connection.getSCN(); - // Always invalidate the cache - long clientTimeStamp = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn; - long msSinceLastUpdate = Long.MAX_VALUE; - if (checkLastStatsUpdateTime) { - String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME + " FROM " - + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME - + " WHERE " + PHYSICAL_NAME + "= ? AND " + COLUMN_FAMILY - + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL"; - try (PreparedStatement selectStatsStmt = connection.prepareStatement(query)) { - selectStatsStmt.setString(1, physicalName.getString()); - try (ResultSet rs = selectStatsStmt.executeQuery(query)) { - if (rs.next()) { - msSinceLastUpdate = rs.getLong(1) - rs.getLong(2); - } - } - } - } - long rowCount = 0; - if (msSinceLastUpdate >= msMinBetweenUpdates) { - /* - * Execute a COUNT(*) through PostDDLCompiler as we need to use the logicalTable passed through, - * since it may not represent a "real" table in the case of the view indexes of a base table. - */ - PostDDLCompiler compiler = new PostDDLCompiler(connection); - //even if table is transactional, while calculating stats we scan the table non-transactionally to - //view all the data belonging to the table - PTable nonTxnLogicalTable = new DelegateTable(logicalTable) { - @Override - public TransactionFactory.Provider getTransactionProvider() { - return null; - } - }; - TableRef tableRef = new TableRef(null, nonTxnLogicalTable, clientTimeStamp, false); - MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), null, cfs, null, clientTimeStamp); - Scan scan = plan.getContext().getScan(); - StatisticsUtil.setScanAttributes(scan, statsProps); - boolean runUpdateStatsAsync = props.getBoolean(QueryServices.RUN_UPDATE_STATS_ASYNC, DEFAULT_RUN_UPDATE_STATS_ASYNC); - scan.setAttribute(RUN_UPDATE_STATS_ASYNC_ATTRIB, runUpdateStatsAsync ? TRUE_BYTES : FALSE_BYTES); - MutationState mutationState = plan.execute(); - rowCount = mutationState.getUpdateCount(); + if (tableType != TABLE && (tableType != VIEW || viewType != UPDATABLE)) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.TTL_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); } - - /* - * Update the stats table so that client will pull the new one with the updated stats. - * Even if we don't run the command due to the last update time, invalidate the cache. - * This supports scenarios in which a major compaction was manually initiated and the - * client wants the modified stats to be reflected immediately. - */ - if (cfs == null) { - List families = logicalTable.getColumnFamilies(); - if (families.isEmpty()) { - connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), SchemaUtil.getEmptyColumnFamily(logicalTable))); - } else { - for (PColumnFamily family : families) { - connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), family.getName().getBytes())); - } - } - } else { - for (byte[] cf : cfs) { - connection.getQueryServices().invalidateStats(new GuidePostsKey(physicalName.getBytes(), cf)); - } + ttlFromHierarchy = checkAndGetTTLFromHierarchy(parent); + if (ttlFromHierarchy != TTL_NOT_DEFINED) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TTL_ALREADY_DEFINED_IN_HIERARCHY) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } - return rowCount; - } - - private MutationState buildIndexAtTimeStamp(PTable index, NamedTableNode dataTableNode) throws SQLException { - // If our connection is at a fixed point-in-time, we need to open a new - // connection so that our new index table is visible. - Properties props = new Properties(connection.getClientInfo()); - props.setProperty(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, Long.toString(connection.getSCN()+1)); - PhoenixConnection conn = new PhoenixConnection(connection, connection.getQueryServices(), props); - MetaDataClient newClientAtNextTimeStamp = new MetaDataClient(conn); - // Re-resolve the tableRef from the now newer connection - conn.setAutoCommit(true); - ColumnResolver resolver = FromCompiler.getResolver(dataTableNode, conn); - TableRef tableRef = resolver.getTables().get(0); - boolean success = false; - SQLException sqlException = null; - try { - MutationState state = newClientAtNextTimeStamp.buildIndex(index, tableRef); - success = true; - return state; - } catch (SQLException e) { - sqlException = e; - } finally { - try { - conn.close(); - } catch (SQLException e) { - if (sqlException == null) { - // If we're not in the middle of throwing another exception - // then throw the exception we got on close. - if (success) { - sqlException = e; - } - } else { - sqlException.setNextException(e); - } - } - if (sqlException != null) { - throw sqlException; - } - } - throw new IllegalStateException(); // impossible - } - - private MutationPlan getMutationPlanForBuildingIndex(PTable index, TableRef dataTableRef) throws SQLException { - if (index.getIndexType() == IndexType.LOCAL) { - PostLocalIndexDDLCompiler compiler = - new PostLocalIndexDDLCompiler(connection, getFullTableName(dataTableRef)); - return compiler.compile(index); - } else if (dataTableRef.getTable().isTransactional()){ - PostIndexDDLCompiler compiler = new PostIndexDDLCompiler(connection, dataTableRef); - return compiler.compile(index); + ttl = ttlProp; + } else { + ttlFromHierarchy = checkAndGetTTLFromHierarchy(parent); + } + + Boolean isChangeDetectionEnabledProp = + (Boolean) TableProperty.CHANGE_DETECTION_ENABLED.getValue(tableProps); + verifyChangeDetectionTableType(tableType, isChangeDetectionEnabledProp); + + String schemaVersion = (String) TableProperty.SCHEMA_VERSION.getValue(tableProps); + String streamingTopicName = (String) TableProperty.STREAMING_TOPIC_NAME.getValue(tableProps); + Long maxLookbackAge = (Long) TableProperty.MAX_LOOKBACK_AGE.getValue(tableProps); + + if (maxLookbackAge != null && tableType != TABLE) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.MAX_LOOKBACK_AGE_SUPPORTED_FOR_TABLES_ONLY).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + String cdcIncludeScopesStr = + cdcIncludeScopes == null ? null : CDCUtil.makeChangeScopeStringFromEnums(cdcIncludeScopes); + + if (parent != null && tableType == PTableType.INDEX) { + timestamp = TransactionUtil.getTableTimestamp(connection, transactionProvider != null, + transactionProvider); + isImmutableRows = parent.isImmutableRows(); + isAppendOnlySchema = parent.isAppendOnlySchema(); + + // Index on view + // TODO: Can we support a multi-tenant index directly on a multi-tenant + // table instead of only a view? We don't have anywhere to put the link + // from the table to the index, though. + if ( + isLocalIndex + || (parent.getType() == PTableType.VIEW && parent.getViewType() != ViewType.MAPPED) + ) { + PName physicalName = parent.getPhysicalName(); + + saltBucketNum = parent.getBucketNum(); + addSaltColumn = (saltBucketNum != null && !isLocalIndex); + defaultFamilyName = parent.getDefaultFamilyName() == null + ? null + : parent.getDefaultFamilyName().getString(); + if (isLocalIndex) { + defaultFamilyName = parent.getDefaultFamilyName() == null + ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY + : IndexUtil.getLocalIndexColumnFamily(parent.getDefaultFamilyName().getString()); + saltBucketNum = null; + // Set physical name of local index table + physicalNames = + Collections.singletonList(PNameFactory.newName(physicalName.getBytes())); + } else { + defaultFamilyName = parent.getDefaultFamilyName() == null + ? QueryConstants.DEFAULT_COLUMN_FAMILY + : parent.getDefaultFamilyName().getString(); + // Set physical name of view index table + // Parent is a view and this is an index so we need to get _IDX_+logical name of base + // table. + // parent.getPhysicalName is Schema.Physical of base and we can't use it since the _IDX_ + // table is logical name of the base. + // parent.getName is the view name. parent.getBaseTableLogicalName is the logical name + // of the base table + PName parentName = parent.getBaseTableLogicalName(); + physicalNames = Collections.singletonList(PNameFactory + .newName(MetaDataUtil.getViewIndexPhysicalName(parentName, isNamespaceMapped))); + } + } + + multiTenant = parent.isMultiTenant(); + storeNulls = parent.getStoreNulls(); + parentTableName = parent.getTableName().getString(); + // Pass through data table sequence number so we can check it hasn't changed + try ( + PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM)) { + incrementStatement.setString(1, tenantIdStr); + incrementStatement.setString(2, schemaName); + incrementStatement.setString(3, parentTableName); + incrementStatement.setLong(4, parent.getSequenceNumber()); + incrementStatement.execute(); + // Get list of mutations and add to table meta data that will be passed to server + // to guarantee order. This row will always end up last + tableMetaData + .addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); + connection.rollback(); + } + + // Add row linking from data table row to index table row + try (PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK)) { + linkStatement.setString(1, tenantIdStr); + linkStatement.setString(2, schemaName); + linkStatement.setString(3, parentTableName); + linkStatement.setString(4, tableName); + linkStatement.setByte(5, LinkType.INDEX_TABLE.getSerializedValue()); + linkStatement.setLong(6, parent.getSequenceNumber()); + linkStatement.setString(7, PTableType.INDEX.getSerializedValue()); + linkStatement.execute(); + } + + // Add row linking index table to parent table for indexes on views + if (parent.getType() == PTableType.VIEW) { + try (PreparedStatement linkStatement = + connection.prepareStatement(CREATE_VIEW_INDEX_PARENT_LINK)) { + linkStatement.setString(1, tenantIdStr); + linkStatement.setString(2, schemaName); + linkStatement.setString(3, tableName); + linkStatement.setString(4, parent.getName().getString()); + linkStatement.setByte(5, LinkType.VIEW_INDEX_PARENT_TABLE.getSerializedValue()); + linkStatement.execute(); + } + } + } + + PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint(); + String pkName = null; + List> pkColumnsNames = + Collections.> emptyList(); + Iterator> pkColumnsIterator = Collections.emptyIterator(); + if (pkConstraint != null) { + pkColumnsNames = pkConstraint.getColumnNames(); + pkColumnsIterator = pkColumnsNames.iterator(); + pkName = pkConstraint.getName(); + } + + // Although unusual, it's possible to set a mapped VIEW as having immutable rows. + // This tells Phoenix that you're managing the index maintenance yourself. + if ( + tableType != PTableType.INDEX + && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED) + ) { + // TODO remove TableProperty.IMMUTABLE_ROWS at the next major release + Boolean isImmutableRowsProp = statement.immutableRows() != null + ? statement.immutableRows() + : (Boolean) TableProperty.IMMUTABLE_ROWS.getValue(tableProps); + if (isImmutableRowsProp == null) { + isImmutableRows = connection.getQueryServices().getProps().getBoolean( + QueryServices.IMMUTABLE_ROWS_ATTRIB, QueryServicesOptions.DEFAULT_IMMUTABLE_ROWS); } else { - ServerBuildIndexCompiler compiler = new ServerBuildIndexCompiler(connection, getFullTableName(dataTableRef)); - return compiler.compile(index); - } - } - - private MutationState buildIndex(PTable index, TableRef dataTableRef) throws SQLException { - AlterIndexStatement indexStatement = null; - boolean wasAutoCommit = connection.getAutoCommit(); - try { - connection.setAutoCommit(true); - MutationPlan mutationPlan = getMutationPlanForBuildingIndex(index, dataTableRef); - Scan scan = mutationPlan.getContext().getScan(); - Long scn = connection.getSCN(); - try { - if (ScanUtil.isDefaultTimeRange(scan.getTimeRange())) { - if (scn == null) { - scn = mutationPlan.getContext().getCurrentTime(); - } - scan.setTimeRange(dataTableRef.getLowerBoundTimeStamp(), scn); - } - } catch (IOException e) { - throw new SQLException(e); - } - - // execute index population upsert select - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - MutationState state = connection.getQueryServices().updateData(mutationPlan); - long firstUpsertSelectTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; - - // for global indexes on non transactional tables we might have to - // run a second index population upsert select to handle data rows - // that were being written on the server while the index was created. - // TODO: this sleep time is really arbitrary. If any query is in progress - // while the index is being built, we're depending on this sleep - // waiting them out. Instead we should have a means of waiting until - // all in progress queries are complete (though I'm not sure that's - // feasible). See PHOENIX-4092. - long sleepTime = - connection - .getQueryServices() - .getProps() - .getLong(QueryServices.INDEX_POPULATION_SLEEP_TIME, - QueryServicesOptions.DEFAULT_INDEX_POPULATION_SLEEP_TIME); - if (!dataTableRef.getTable().isTransactional() && sleepTime > 0) { - long delta = sleepTime - firstUpsertSelectTime; - if (delta > 0) { - try { - Thread.sleep(delta); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION) - .setRootCause(e).build().buildException(); - } - } - // set the min timestamp of second index upsert select some time before the index - // was created - long minTimestamp = index.getTimeStamp() - firstUpsertSelectTime; - try { - // TODO: Use scn or LATEST_TIMESTAMP here? It's possible that a DML statement - // ran and ended up with timestamps later than this time. If we use a later - // timestamp, we'll need to run the partial index rebuilder here as it's - // possible that the updates to the table were made (such as deletes) after - // the scn which would not be properly reflected correctly this mechanism. - // See PHOENIX-4092. - mutationPlan.getContext().getScan().setTimeRange(minTimestamp, scn); - } catch (IOException e) { - throw new SQLException(e); - } - MutationState newMutationState = - connection.getQueryServices().updateData(mutationPlan); - state.join(newMutationState); - } - - indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null, - TableName.create(index.getSchemaName().getString(), index.getTableName().getString())), - dataTableRef.getTable().getTableName().getString(), false, PIndexState.ACTIVE); - alterIndex(indexStatement); - - return state; - } finally { - connection.setAutoCommit(wasAutoCommit); - } - } - - private String getFullTableName(TableRef dataTableRef) { - String schemaName = dataTableRef.getTable().getSchemaName().getString(); - String tableName = dataTableRef.getTable().getTableName().getString(); - String fullName = - schemaName == null ? ("\"" + tableName + "\"") : ("\"" + schemaName + "\"" - + QueryConstants.NAME_SEPARATOR + "\"" + tableName + "\""); - return fullName; - } - - public MutationState declareCursor(DeclareCursorStatement statement, QueryPlan queryPlan) throws SQLException { - CursorUtil.declareCursor(statement, queryPlan); - return new MutationState(0, 0, connection); - } - - public MutationState open(OpenStatement statement) throws SQLException { - CursorUtil.openCursor(statement, connection); - return new MutationState(0, 0, connection); - } - - public MutationState close(CloseStatement statement) throws SQLException { - CursorUtil.closeCursor(statement); - return new MutationState(0, 0, connection); - } - - /** - * Supprort long viewIndexId only if client has explicitly set - * the QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB connection property to 'true'. - * @return - */ - private PDataType getViewIndexDataType() throws SQLException { - boolean supportsLongViewIndexId = connection.getQueryServices().getProps().getBoolean( - QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_LONG_VIEW_INDEX_ENABLED); - return supportsLongViewIndexId ? MetaDataUtil.getViewIndexIdDataType() : MetaDataUtil.getLegacyViewIndexIdDataType(); - } - - /** - * Create an index table by morphing the CreateIndexStatement into a CreateTableStatement and calling - * MetaDataClient.createTable. In doing so, we perform the following translations: - * 1) Change the type of any columns being indexed to types that support null if the column is nullable. - * For example, a BIGINT type would be coerced to a DECIMAL type, since a DECIMAL type supports null - * when it's in the row key while a BIGINT does not. - * 2) Append any row key column from the data table that is not in the indexed column list. Our indexes - * rely on having a 1:1 correspondence between the index and data rows. - * 3) Change the name of the columns to include the column family. For example, if you have a column - * named "B" in a column family named "A", the indexed column name will be "A:B". This makes it easy - * to translate the column references in a query to the correct column references in an index table - * regardless of whether the column reference is prefixed with the column family name or not. It also - * has the side benefit of allowing the same named column in different column families to both be - * listed as an index column. - * @param statement - * @param splits - * @return MutationState from population of index table from data table - * @throws SQLException - */ - public MutationState createIndex(CreateIndexStatement statement, byte[][] splits) throws SQLException { - IndexKeyConstraint ik = statement.getIndexConstraint(); - TableName indexTableName = statement.getIndexTableName(); - - Map tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); - Map commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1); - populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps, PTableType.INDEX, - CDCUtil.isCDCIndex(SchemaUtil - .getTableNameFromFullName(statement.getIndexTableName().toString()))); - List> indexParseNodeAndSortOrderList = ik.getParseNodeAndSortOrderList(); - List includedColumns = statement.getIncludeColumns(); - TableRef tableRef = null; - PTable table = null; - boolean allocateIndexId = false; - boolean isLocalIndex = statement.getIndexType() == IndexType.LOCAL; - int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion(); - if (isLocalIndex) { - if (!connection.getQueryServices().getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException(); - } - if (!connection.getQueryServices().supportsFeature(Feature.LOCAL_INDEX)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException(); - } - } - Set acquiredColumnMutexSet = Sets.newHashSetWithExpectedSize(3); - String physicalSchemaName = null; - String physicalTableName = null; - PTable dataTable = null; - try { - ColumnResolver resolver - = FromCompiler.getResolverForCreateIndex( - statement, connection, statement.getUdfParseNodes()); - tableRef = resolver.getTables().get(0); - Date asyncCreatedDate = null; - if (statement.isAsync()) { - asyncCreatedDate = new Date(tableRef.getCurrentTime()); - } - dataTable = tableRef.getTable(); - boolean isTenantConnection = connection.getTenantId() != null; - if (isTenantConnection) { - if (dataTable.getType() != PTableType.VIEW) { - throw new SQLFeatureNotSupportedException("An index may only be created for a VIEW through a tenant-specific connection"); - } - } - if (!dataTable.isImmutableRows()) { - if (hbaseVersion < MetaDataProtocol.MUTABLE_SI_VERSION_THRESHOLD) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setTableName(indexTableName.getTableName()).build().buildException(); - } - if (!connection.getQueryServices().hasIndexWALCodec() && !dataTable.isTransactional()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG).setTableName(indexTableName.getTableName()).build().buildException(); - } - boolean tableWithRowTimestampCol = dataTable.getRowTimestampColPos() != -1; - if (tableWithRowTimestampCol) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_INDEX_ON_MUTABLE_TABLE_WITH_ROWTIMESTAMP).setTableName(indexTableName.getTableName()).build().buildException(); - } - } - if (dataTable.isTransactional() - && isLocalIndex - && dataTable.getTransactionProvider().getTransactionProvider().isUnsupported(PhoenixTransactionProvider.Feature.ALLOW_LOCAL_INDEX)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_LOCAL_INDEX_FOR_TXN_TABLE).setMessage(dataTable.getTransactionProvider().name()).setTableName(indexTableName.getTableName()).build().buildException(); - } - int posOffset = 0; - List pkColumns = dataTable.getPKColumns(); - Set unusedPkColumns; - if (dataTable.getBucketNum() != null) { // Ignore SALT column - unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size()-1); - posOffset++; - } else { - unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size()); - } - for (int i = posOffset; i < pkColumns.size(); i++) { - PColumn column = pkColumns.get(i); - unusedPkColumns.add(new RowKeyColumnExpression(column, new RowKeyValueAccessor(pkColumns, i), "\""+column.getName().getString()+"\"")); - } - List allPkColumns = Lists.newArrayListWithExpectedSize(unusedPkColumns.size()); - List columnDefs = Lists.newArrayListWithExpectedSize(includedColumns.size() + indexParseNodeAndSortOrderList.size()); - - /* - * Allocate an index ID in two circumstances: - * 1) for a local index, as all local indexes will reside in the same HBase table - * 2) for a view on an index. - */ - if (isLocalIndex || (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED)) { - allocateIndexId = true; - PDataType dataType = getViewIndexDataType(); - ColumnName colName = ColumnName.caseSensitiveColumnName(MetaDataUtil.getViewIndexIdColumnName()); - allPkColumns.add(new ColumnDefInPkConstraint(colName, SortOrder.getDefault(), false)); - columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, false, SortOrder.getDefault(), null, false)); - } - - if (dataTable.isMultiTenant()) { - PColumn col = dataTable.getPKColumns().get(posOffset); - RowKeyColumnExpression columnExpression = new RowKeyColumnExpression(col, new RowKeyValueAccessor(pkColumns, posOffset), col.getName().getString()); - unusedPkColumns.remove(columnExpression); - PDataType dataType = IndexUtil.getIndexColumnDataType(col); - ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col)); - allPkColumns.add(new ColumnDefInPkConstraint(colName, col.getSortOrder(), false)); - columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault(), col.getName().getString(), col.isRowTimestamp())); - } - - PhoenixStatement phoenixStatment = new PhoenixStatement(connection); - StatementContext context = new StatementContext(phoenixStatment, resolver); - IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context); - Set indexedColumnNames = Sets.newHashSetWithExpectedSize(indexParseNodeAndSortOrderList.size()); - for (Pair pair : indexParseNodeAndSortOrderList) { - ParseNode parseNode = pair.getFirst(); - // normalize the parse node - parseNode = StatementNormalizer.normalize(parseNode, resolver); - // compile the parseNode to get an expression - expressionIndexCompiler.reset(); - Expression expression = parseNode.accept(expressionIndexCompiler); - if (expressionIndexCompiler.isAggregate()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException(); - } - if (expressionIndexCompiler.isJsonFragment()) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.JSON_FRAGMENT_NOT_ALLOWED_IN_INDEX_EXPRESSION).build() - .buildException(); - } - if (!(expression.getDeterminism() == Determinism.ALWAYS || expression.getDeterminism() == Determinism.PER_ROW)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException(); - } - if (expression.isStateless()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException(); - } - unusedPkColumns.remove(expression); - - // Go through parse node to get string as otherwise we - // can lose information during compilation - StringBuilder buf = new StringBuilder(); - parseNode.toSQL(resolver, buf); - // need to escape backslash as this expression will be re-parsed later - String expressionStr = StringUtil.escapeBackslash(buf.toString()); - - ColumnName colName = null; - ColumnRef colRef = expressionIndexCompiler.getColumnRef(); - boolean isRowTimestamp = false; - if (colRef!=null) { - // if this is a regular column - PColumn column = colRef.getColumn(); - String columnFamilyName = column.getFamilyName()!=null ? column.getFamilyName().getString() : null; - colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString())); - isRowTimestamp = column.isRowTimestamp(); - } - else { - // if this is an expression - // TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed - String name = expressionStr.replaceAll("\"", "'"); - colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name)); - } - indexedColumnNames.add(colName); - PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType()); - allPkColumns.add(new ColumnDefInPkConstraint(colName, pair.getSecond(), isRowTimestamp)); - columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, pair.getSecond(), expressionStr, isRowTimestamp)); - } - - // Next all the PK columns from the data table that aren't indexed - if (!unusedPkColumns.isEmpty()) { - for (RowKeyColumnExpression colExpression : unusedPkColumns) { - PColumn col = dataTable.getPKColumns().get(colExpression.getPosition()); - // Don't add columns with constant values from updatable views, as - // we don't need these in the index - if (col.getViewConstant() == null) { - ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col)); - allPkColumns.add(new ColumnDefInPkConstraint(colName, colExpression.getSortOrder(), col.isRowTimestamp())); - PDataType dataType = IndexUtil.getIndexColumnDataType(colExpression.isNullable(), colExpression.getDataType()); - columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), - colExpression.isNullable(), colExpression.getMaxLength(), colExpression.getScale(), - false, colExpression.getSortOrder(), colExpression.toString(), col.isRowTimestamp())); - } - } - } - - // Last all the included columns (minus any PK columns) - for (ColumnName colName : includedColumns) { - PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn(); - colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col)); - // Check for duplicates between indexed and included columns - if (indexedColumnNames.contains(colName)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException(); - } - if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) { - // Need to re-create ColumnName, since the above one won't have the column family name - colName = ColumnName.caseSensitiveColumnName(isLocalIndex?IndexUtil.getLocalIndexColumnFamily(col.getFamilyName().getString()):col.getFamilyName().getString(), IndexUtil.getIndexColumnName(col)); - columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder(), col.getExpressionStr(), col.isRowTimestamp())); - } - } - - Configuration config = connection.getQueryServices().getConfiguration(); - if (!connection.getQueryServices().getProps() - .getBoolean(DISABLE_VIEW_SUBTREE_VALIDATION, - DEFAULT_DISABLE_VIEW_SUBTREE_VALIDATION)) { - verifyIfDescendentViewsExtendPk(dataTable, config); - } - // for view indexes - if (dataTable.getType() == PTableType.VIEW) { - String physicalName = dataTable.getPhysicalName().getString(); - physicalSchemaName = SchemaUtil.getSchemaNameFromFullName(physicalName); - physicalTableName = SchemaUtil.getTableNameFromFullName(physicalName); - List requiredCols = Lists.newArrayList(indexedColumnNames); - requiredCols.addAll(includedColumns); - for (ColumnName colName : requiredCols) { - // acquire the mutex using the global physical table name to - // prevent this column from being dropped while the view is being created - String colNameSeparatedByDot = colName.getColumnName() - .replace(QueryConstants.NAMESPACE_SEPARATOR, - QueryConstants.NAME_SEPARATOR); - // indexed column name have a ':' between the column family and column name - // We would like to have '.' like in other column names - boolean acquiredMutex = writeCell(null, physicalSchemaName, physicalTableName, - colNameSeparatedByDot); - if (!acquiredMutex) { - throw new ConcurrentTableMutationException(physicalSchemaName, physicalTableName); - } - acquiredColumnMutexSet.add(colNameSeparatedByDot); - } - } - - long threshold = Long.parseLong(config.get(QueryServices.CLIENT_INDEX_ASYNC_THRESHOLD)); - - if (threshold > 0 && !statement.isAsync()) { - Set columnFamilies = new HashSet<>(); - for (ColumnDef column : columnDefs){ - try { - String columnFamily = IndexUtil - .getDataColumnFamilyName(column.getColumnDefName().getColumnName()); - columnFamilies.add(!columnFamily.equals("") ? columnFamily - : dataTable.getDefaultFamilyName()!= null ? - dataTable.getDefaultFamilyName().toString() - : QueryConstants.DEFAULT_COLUMN_FAMILY); - } catch (Exception ignored){ - ; // We ignore any exception during this phase - } - } - long estimatedBytes = 0; - for (String colFamily : columnFamilies) { - GuidePostsInfo gps = connection.getQueryServices().getTableStats( - new GuidePostsKey(Bytes.toBytes(tableRef.getTable().toString()), - Bytes.toBytes(colFamily))); - long[] byteCounts = gps.getByteCounts(); - for (long byteCount : byteCounts) { - estimatedBytes += byteCount; - } - - if (threshold < estimatedBytes) { - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.ABOVE_INDEX_NON_ASYNC_THRESHOLD) - .build().buildException(); - } - } - } - - // Set DEFAULT_COLUMN_FAMILY_NAME of index to match data table - // We need this in the props so that the correct column family is created - if (dataTable.getDefaultFamilyName() != null && dataTable.getType() != PTableType.VIEW && !allocateIndexId) { - statement.getProps().put("", new Pair(DEFAULT_COLUMN_FAMILY_NAME,dataTable.getDefaultFamilyName().getString())); - } - PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns); - - tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, dataTable.getPhysicalName().getString()); - CreateTableStatement tableStatement = FACTORY.createTable( - indexTableName, - statement.getProps(), - columnDefs, - pk, - statement.getSplitNodes(), - PTableType.INDEX, - statement.ifNotExists(), - null, - statement.getWhere(), - statement.getBindCount(), - null - ); - table = createTableInternal( - tableStatement, - splits, - dataTable, - null, - null, - getViewIndexDataType(), - null, - null, - null, - allocateIndexId, - statement.getIndexType(), - asyncCreatedDate, - null, - tableProps, - commonFamilyProps - ); - } - finally { - deleteMutexCells(physicalSchemaName, physicalTableName, acquiredColumnMutexSet); - } - if (table == null) { - return new MutationState(0, 0, connection); - } - - if (LOGGER.isInfoEnabled()) LOGGER.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp()); - boolean asyncIndexBuildEnabled = connection.getQueryServices().getProps().getBoolean( - QueryServices.INDEX_ASYNC_BUILD_ENABLED, - QueryServicesOptions.DEFAULT_INDEX_ASYNC_BUILD_ENABLED); - // In async process, we return immediately as the MR job needs to be triggered . - if (statement.isAsync() && asyncIndexBuildEnabled) { - return new MutationState(0, 0, connection); - } - - // If we create index in create_disabled state, we will build them later - if (table.getIndexState() == PIndexState.CREATE_DISABLE) { - return new MutationState(0, 0, connection); - } - - // If our connection is at a fixed point-in-time, we need to open a new - // connection so that our new index table is visible. - if (connection.getSCN() != null) { - return buildIndexAtTimeStamp(table, statement.getTable()); - } - - MutationState state = buildIndex(table, tableRef); - // If client is validating LAST_DDL_TIMESTAMPS, parent's last_ddl_timestamp changed - // so remove it from client's cache. It will be refreshed when table is accessed next time. - if (ValidateLastDDLTimestampUtil.getValidateLastDdlTimestampEnabled(connection)) { - connection.removeTable(connection.getTenantId(), dataTable.getName().getString(), - null, dataTable.getTimeStamp()); - } - return state; - } - - public MutationState createCDC(CreateCDCStatement statement) throws SQLException { - ColumnResolver resolver = FromCompiler.getResolver(NamedTableNode.create(statement.getDataTable()), connection); - TableRef tableRef = resolver.getTables().get(0); - PTable dataTable = tableRef.getTable(); - - Map tableProps = Maps.newHashMapWithExpectedSize( - statement.getProps().size()); - Map commonFamilyProps = Maps.newHashMapWithExpectedSize( - statement.getProps().size() + 1); - populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps, PTableType.CDC, - false); - Properties props = connection.getClientInfo(); - props.put(INDEX_CREATE_DEFAULT_STATE, "ACTIVE"); - - String - dataTableFullName = - SchemaUtil.getTableName(statement.getDataTable().getSchemaName(), - statement.getDataTable().getTableName()); - String - createIndexSql = - "CREATE UNCOVERED INDEX " + (statement.isIfNotExists() ? "IF NOT EXISTS " : "") - + CDCUtil.getCDCIndexName(statement.getCdcObjName().getName()) - + " ON " + dataTableFullName + " (" - + PhoenixRowTimestampFunction.NAME + "()) ASYNC"; - List indexProps = new ArrayList<>(); - indexProps.add("REPLICATION_SCOPE=0"); - Object saltBucketNum = TableProperty.SALT_BUCKETS.getValue(tableProps); + isImmutableRows = isImmutableRowsProp; + } + } + if (tableType == PTableType.TABLE) { + Boolean isAppendOnlySchemaProp = + (Boolean) TableProperty.APPEND_ONLY_SCHEMA.getValue(tableProps); + isAppendOnlySchema = isAppendOnlySchemaProp != null ? isAppendOnlySchemaProp : false; + } + + // Can't set any of these on views or shared indexes on views + if (tableType != PTableType.VIEW && tableType != PTableType.CDC && !allocateIndexId) { + saltBucketNum = (Integer) TableProperty.SALT_BUCKETS.getValue(tableProps); if (saltBucketNum != null) { - indexProps.add("SALT_BUCKETS=" + saltBucketNum); - } - Object columnEncodedBytes = TableProperty.COLUMN_ENCODED_BYTES.getValue(tableProps); - if (columnEncodedBytes != null) { - indexProps.add("COLUMN_ENCODED_BYTES=" + columnEncodedBytes); - } - createIndexSql = createIndexSql + " " + String.join(", ", indexProps); - try (Connection internalConnection = QueryUtil.getConnection(props, connection.getQueryServices().getConfiguration())) { - PhoenixStatement pstmt = new PhoenixStatement((PhoenixConnection) internalConnection); - pstmt.execute(createIndexSql); - } catch (SQLException e) { - if (e.getErrorCode() == TABLE_ALREADY_EXIST.getErrorCode()) { - throw new SQLExceptionInfo.Builder(TABLE_ALREADY_EXIST).setTableName( - statement.getCdcObjName().getName()).setRootCause( - e).build().buildException(); - } - throw e; - } - - List pkColumns = dataTable.getPKColumns(); - List columnDefs = new ArrayList<>(); - List pkColumnDefs = new ArrayList<>(); - int pkOffset = dataTable.getBucketNum() != null ? 1 : 0; - for (int i = pkOffset; i < pkColumns.size(); ++i) { - PColumn pcol = pkColumns.get(i); - columnDefs.add(FACTORY.columnDef(FACTORY.columnName(pcol.getName().getString()), - pcol.getDataType().getSqlTypeName(), false, null, false, pcol.getMaxLength(), - pcol.getScale(), false, pcol.getSortOrder(), "", null, false)); - pkColumnDefs.add(FACTORY.columnDefInPkConstraint(FACTORY.columnName( - pcol.getName().getString()), pcol.getSortOrder(), pcol.isRowTimestamp())); - } - columnDefs.add(FACTORY.columnDef(FACTORY.columnName(QueryConstants.CDC_JSON_COL_NAME), - PVarchar.INSTANCE.getSqlTypeName(), false, null, true, null, - null, false, SortOrder.getDefault(), "", null, false)); - tableProps = new HashMap<>(); - if (dataTable.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS) { - // CDC table doesn't need SINGLE_CELL_ARRAY_WITH_OFFSETS encoding, so override it. - tableProps.put(TableProperty.IMMUTABLE_STORAGE_SCHEME.getPropertyName(), - ONE_CELL_PER_COLUMN.name()); - } - if (dataTable.isMultiTenant()) { - tableProps.put(TableProperty.MULTI_TENANT.getPropertyName(), Boolean.TRUE); - } - CreateTableStatement tableStatement = FACTORY.createTable( - FACTORY.table(dataTable.getSchemaName().getString(), statement.getCdcObjName().getName()), - null, columnDefs, FACTORY.primaryKey(null, pkColumnDefs), - Collections.emptyList(), PTableType.CDC, statement.isIfNotExists(), null, null, - statement.getBindCount(), null); - createTableInternal(tableStatement, null, dataTable, null, null, null, null, - null, null, false, null, - null, statement.getIncludeScopes(), tableProps, commonFamilyProps); - return new MutationState(0, 0, connection); - } - - /** - * Go through all the descendent views from the child view hierarchy and find if any of the - * descendent views extends the primary key, throw error. - * - * @param tableOrView view or table on which the index is being created. - * @param config the configuration. - * @throws SQLException if any of the descendent views extends pk or if something goes wrong - * while querying descendent view hierarchy. - */ - private void verifyIfDescendentViewsExtendPk(PTable tableOrView, Configuration config) - throws SQLException { - if (connection.getQueryServices() instanceof ConnectionlessQueryServicesImpl) { - return; - } - if (connection.getQueryServices() instanceof DelegateQueryServices) { - DelegateQueryServices services = (DelegateQueryServices) connection.getQueryServices(); - if (services.getDelegate() instanceof ConnectionlessQueryServicesImpl) { - return; - } - } - byte[] systemChildLinkTable = SchemaUtil.isNamespaceMappingEnabled(null, config) ? - SYSTEM_CHILD_LINK_NAMESPACE_BYTES : - SYSTEM_CHILD_LINK_NAME_BYTES; - try (Table childLinkTable = - connection.getQueryServices().getTable(systemChildLinkTable)) { - byte[] tenantId = connection.getTenantId() == null ? null - : connection.getTenantId().getBytes(); - byte[] schemaNameBytes = tableOrView.getSchemaName().getBytes(); - byte[] viewOrTableName = tableOrView.getTableName().getBytes(); - Pair, List> descViews = - ViewUtil.findAllDescendantViews( - childLinkTable, - config, - tenantId, - schemaNameBytes, - viewOrTableName, - HConstants.LATEST_TIMESTAMP, - false); - List legitimateChildViews = descViews.getFirst(); - int dataTableOrViewPkCols = tableOrView.getPKColumns().size(); - if (legitimateChildViews != null && legitimateChildViews.size() > 0) { - for (PTable childView : legitimateChildViews) { - if (childView.getPKColumns().size() > dataTableOrViewPkCols) { - LOGGER.error("Creation of view index not allowed as child view {}" - + " extends pk", childView.getName()); - throw new SQLExceptionInfo.Builder( - SQLExceptionCode - .CANNOT_CREATE_INDEX_CHILD_VIEWS_EXTEND_PK) - .build() - .buildException(); - } - } - } - } catch (IOException e) { - LOGGER.error("Error while retrieving descendent views", e); - throw new SQLException(e); - } - } - - public MutationState dropSequence(DropSequenceStatement statement) throws SQLException { - Long scn = connection.getSCN(); - long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - String schemaName = connection.getSchema() != null && statement.getSequenceName().getSchemaName() == null - ? connection.getSchema() : statement.getSequenceName().getSchemaName(); - String sequenceName = statement.getSequenceName().getTableName(); - String tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getString(); - try { - connection.getQueryServices().dropSequence(tenantId, schemaName, sequenceName, timestamp); - } catch (SequenceNotFoundException e) { - if (statement.ifExists()) { - return new MutationState(0, 0, connection); - } - throw e; - } - return new MutationState(1, 1000, connection); - } - - public MutationState createSequence(CreateSequenceStatement statement, long startWith, - long incrementBy, long cacheSize, long minValue, long maxValue) throws SQLException { - Long scn = connection.getSCN(); - long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - String tenantId = - connection.getTenantId() == null ? null : connection.getTenantId().getString(); - String schemaName=statement.getSequenceName().getSchemaName(); - if (SchemaUtil.isNamespaceMappingEnabled(null, connection.getQueryServices().getProps())) { - if (schemaName == null || schemaName.equals(StringUtil.EMPTY_STRING)) { - schemaName = connection.getSchema(); - } - if (schemaName != null) { - FromCompiler.getResolverForSchema(schemaName, connection); - } - } - return createSequence(tenantId, schemaName, statement - .getSequenceName().getTableName(), statement.ifNotExists(), startWith, incrementBy, - cacheSize, statement.getCycle(), minValue, maxValue, timestamp); - } - - private MutationState createSequence(String tenantId, String schemaName, String sequenceName, - boolean ifNotExists, long startWith, long incrementBy, long cacheSize, boolean cycle, - long minValue, long maxValue, long timestamp) throws SQLException { - try { - connection.getQueryServices().createSequence(tenantId, schemaName, sequenceName, - startWith, incrementBy, cacheSize, minValue, maxValue, cycle, timestamp); - } catch (SequenceAlreadyExistsException e) { - if (ifNotExists) { - return new MutationState(0, 0, connection); - } - throw e; - } - return new MutationState(1, 1000, connection); - } - - public MutationState createFunction(CreateFunctionStatement stmt) throws SQLException { - boolean wasAutoCommit = connection.getAutoCommit(); - connection.rollback(); - try { - PFunction function = new PFunction(stmt.getFunctionInfo(), stmt.isTemporary(), stmt.isReplace()); - connection.setAutoCommit(false); - String tenantIdStr = connection.getTenantId() == null ? null : connection.getTenantId().getString(); - List functionData = Lists.newArrayListWithExpectedSize(function.getFunctionArguments().size() + 1); - - List args = function.getFunctionArguments(); - try (PreparedStatement argUpsert = connection.prepareStatement(INSERT_FUNCTION_ARGUMENT)) { - for (int i = 0; i < args.size(); i++) { - FunctionArgument arg = args.get(i); - addFunctionArgMutation(function.getFunctionName(), arg, argUpsert, i); - } - functionData.addAll(connection.getMutationState().toMutations().next().getSecond()); - connection.rollback(); - } - - try (PreparedStatement functionUpsert = connection.prepareStatement(CREATE_FUNCTION)) { - functionUpsert.setString(1, tenantIdStr); - functionUpsert.setString(2, function.getFunctionName()); - functionUpsert.setInt(3, function.getFunctionArguments().size()); - functionUpsert.setString(4, function.getClassName()); - functionUpsert.setString(5, function.getJarPath()); - functionUpsert.setString(6, function.getReturnType()); - functionUpsert.execute(); - functionData.addAll(connection.getMutationState().toMutations(null).next().getSecond()); - connection.rollback(); - } - MetaDataMutationResult result = connection.getQueryServices().createFunction(functionData, function, stmt.isTemporary()); - MutationCode code = result.getMutationCode(); - switch(code) { - case FUNCTION_ALREADY_EXISTS: - if (!function.isReplace()) { - throw new FunctionAlreadyExistsException(function.getFunctionName(), result - .getFunctions().get(0)); - } else { - connection.removeFunction(function.getTenantId(), function.getFunctionName(), - result.getMutationTime()); - addFunctionToCache(result); - } - case NEWER_FUNCTION_FOUND: - // Add function to ConnectionQueryServices so it's cached, but don't add - // it to this connection as we can't see it. - throw new NewerFunctionAlreadyExistsException(function.getFunctionName(), result.getFunctions().get(0)); - default: - List functions = new ArrayList(1); - functions.add(function); - result = new MetaDataMutationResult(code, result.getMutationTime(), functions, true); - if (function.isReplace()) { - connection.removeFunction(function.getTenantId(), function.getFunctionName(), - result.getMutationTime()); - } - addFunctionToCache(result); - } - } finally { - connection.setAutoCommit(wasAutoCommit); - } - return new MutationState(1, 1000, connection); - } - - private static ColumnDef findColumnDefOrNull(List colDefs, ColumnName colName) { - for (ColumnDef colDef : colDefs) { - if (colDef.getColumnDefName().getColumnName().equals(colName.getColumnName())) { - return colDef; - } - } - return null; - } - - private static boolean checkAndValidateRowTimestampCol(ColumnDef colDef, PrimaryKeyConstraint pkConstraint, - boolean rowTimeStampColAlreadyFound, PTableType tableType) throws SQLException { - - ColumnName columnDefName = colDef.getColumnDefName(); - if (tableType == VIEW && (pkConstraint.getNumColumnsWithRowTimestamp() > 0 || colDef.isRowTimestamp())) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_NOT_ALLOWED_ON_VIEW) - .setColumnName(columnDefName.getColumnName()).build().buildException(); + if (saltBucketNum < 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM).build() + .buildException(); + } + } + // Salt the index table if the data table is salted + if (saltBucketNum == null) { + if (parent != null) { + saltBucketNum = parent.getBucketNum(); + } + } else if (saltBucketNum.intValue() == 0) { + saltBucketNum = null; // Provides a way for an index to not be salted if its data table is + // salted + } + addSaltColumn = (saltBucketNum != null); + } + + // Can't set MULTI_TENANT or DEFAULT_COLUMN_FAMILY_NAME on an INDEX or a non mapped VIEW + if ( + tableType != PTableType.INDEX + && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED) + ) { + Boolean multiTenantProp = (Boolean) tableProps.get(PhoenixDatabaseMetaData.MULTI_TENANT); + multiTenant = Boolean.TRUE.equals(multiTenantProp); + defaultFamilyName = (String) TableProperty.DEFAULT_COLUMN_FAMILY.getValue(tableProps); + } + + boolean disableWAL = false; + Boolean disableWALProp = (Boolean) TableProperty.DISABLE_WAL.getValue(tableProps); + if (disableWALProp != null) { + disableWAL = disableWALProp; + } + long updateCacheFrequency = + (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue(connection.getQueryServices() + .getProps().get(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); + if (tableType == PTableType.INDEX && parent != null) { + updateCacheFrequency = parent.getUpdateCacheFrequency(); + } + Long updateCacheFrequencyProp = + (Long) TableProperty.UPDATE_CACHE_FREQUENCY.getValue(tableProps); + if (tableType != PTableType.INDEX && updateCacheFrequencyProp != null) { + updateCacheFrequency = updateCacheFrequencyProp; + } + + String physicalTableName = (String) TableProperty.PHYSICAL_TABLE_NAME.getValue(tableProps); + String autoPartitionSeq = (String) TableProperty.AUTO_PARTITION_SEQ.getValue(tableProps); + Long guidePostsWidth = (Long) TableProperty.GUIDE_POSTS_WIDTH.getValue(tableProps); + + // We only allow setting guide post width for a base table + if (guidePostsWidth != null && tableType != PTableType.TABLE) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_GUIDE_POST_WIDTH) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + + Boolean storeNullsProp = (Boolean) TableProperty.STORE_NULLS.getValue(tableProps); + if (storeNullsProp == null) { + if (parent == null) { + storeNulls = connection.getQueryServices().getProps().getBoolean( + QueryServices.DEFAULT_STORE_NULLS_ATTRIB, QueryServicesOptions.DEFAULT_STORE_NULLS); + tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.valueOf(storeNulls)); + } + } else { + storeNulls = storeNullsProp; + } + Boolean transactionalProp = (Boolean) TableProperty.TRANSACTIONAL.getValue(tableProps); + TransactionFactory.Provider transactionProviderProp = + (TransactionFactory.Provider) TableProperty.TRANSACTION_PROVIDER.getValue(tableProps); + if ((transactionalProp != null || transactionProviderProp != null) && parent != null) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.ONLY_TABLE_MAY_BE_DECLARED_TRANSACTIONAL).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + if (parent == null) { + boolean transactional; + if (transactionProviderProp != null) { + transactional = true; + } else if (transactionalProp == null) { + transactional = connection.getQueryServices().getProps().getBoolean( + QueryServices.DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB, + QueryServicesOptions.DEFAULT_TABLE_ISTRANSACTIONAL); + } else { + transactional = transactionalProp; + } + if (transactional) { + if (transactionProviderProp == null) { + transactionProvider = (TransactionFactory.Provider) TableProperty.TRANSACTION_PROVIDER + .getValue(connection.getQueryServices().getProps().get( + QueryServices.DEFAULT_TRANSACTION_PROVIDER_ATTRIB, + QueryServicesOptions.DEFAULT_TRANSACTION_PROVIDER)); + } else { + transactionProvider = transactionProviderProp; + } + } + } + boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean( + QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED); + // can't create a transactional table if transactions are not enabled + if (!transactionsEnabled && transactionProvider != null) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_IF_TXNS_DISABLED).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + // can't create a transactional table if it has a row timestamp column + if (pkConstraint.getNumColumnsWithRowTimestamp() > 0 && transactionProvider != null) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + if ( + (isPhoenixTTLEnabled() + ? ttlProp != null + : TableProperty.TTL.getValue(commonFamilyProps) != null) + && transactionProvider != null + && transactionProvider.getTransactionProvider() + .isUnsupported(PhoenixTransactionProvider.Feature.SET_TTL) + ) { + throw new SQLExceptionInfo.Builder(PhoenixTransactionProvider.Feature.SET_TTL.getCode()) + .setMessage(transactionProvider.name()).setSchemaName(schemaName).setTableName(tableName) + .build().buildException(); + } + + // Put potentially inferred value into tableProps as it's used by the createTable call below + // to determine which coprocessors to install on the new table. + tableProps.put(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER, transactionProvider); + if (transactionProvider != null) { + // If TTL set, use transaction context TTL property name instead + // Note: After PHOENIX-6627, is PhoenixTransactionContext.PROPERTY_TTL still useful? + Object transactionTTL = commonFamilyProps.remove(ColumnFamilyDescriptorBuilder.TTL); + if (transactionTTL != null) { + commonFamilyProps.put(PhoenixTransactionContext.PROPERTY_TTL, transactionTTL); + } + } + + Boolean useStatsForParallelizationProp = + (Boolean) TableProperty.USE_STATS_FOR_PARALLELIZATION.getValue(tableProps); + + boolean sharedTable = statement.getTableType() == PTableType.VIEW || allocateIndexId; + if (transactionProvider != null) { + // We turn on storeNulls for transactional tables for compatibility. This was required + // when Tephra was a supported txn engine option. After PHOENIX-6627, this may no longer + // be necessary. + // Tephra would have converted normal delete markers on the server which could mess up + // our secondary index code as the changes get committed prior to the + // maintenance code being able to see the prior state to update the rows correctly. + // A future tnx engine might do the same? + if (Boolean.FALSE.equals(storeNullsProp)) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + storeNulls = true; + tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.TRUE); + + if (!sharedTable) { + Integer maxVersionsProp = (Integer) commonFamilyProps.get(HConstants.VERSIONS); + if (maxVersionsProp == null) { + if (parent != null) { + TableDescriptor desc = connection.getQueryServices() + .getTableDescriptor(parent.getPhysicalName().getBytes()); + if (desc != null) { + maxVersionsProp = + desc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(parent)).getMaxVersions(); + } + } + if (maxVersionsProp == null) { + maxVersionsProp = connection.getQueryServices().getProps().getInt( + QueryServices.MAX_VERSIONS_TRANSACTIONAL_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL); + } + commonFamilyProps.put(HConstants.VERSIONS, maxVersionsProp); + } + } + } + timestamp = timestamp == null + ? TransactionUtil.getTableTimestamp(connection, transactionProvider != null, + transactionProvider) + : timestamp; + + // Delay this check as it is supported to have IMMUTABLE_ROWS and SALT_BUCKETS defined on + // views + if (sharedTable) { + if (tableProps.get(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME) != null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } + if (SchemaUtil.hasHTableDescriptorProps(tableProps)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build() + .buildException(); + } + } + + List colDefs = statement.getColumnDefs(); + LinkedHashMap columns; + LinkedHashSet pkColumns; + + if (tenantId != null && !sharedTable) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TENANT_SPECIFIC_TABLE) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + if (autoPartitionSeq != null) { + int autoPartitionColIndex = multiTenant ? 1 : 0; + PDataType dataType = colDefs.get(autoPartitionColIndex).getDataType(); + if (!PLong.INSTANCE.isCastableTo(dataType)) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.SEQUENCE_NOT_CASTABLE_TO_AUTO_PARTITION_ID_COLUMN) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + } + + if (tableType == PTableType.VIEW) { + physicalNames = + Collections.singletonList(PNameFactory.newName(parent.getPhysicalName().getString())); + if (viewType == ViewType.MAPPED) { + columns = Maps.newLinkedHashMap(); + pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size()); + } else { + // Propagate property values to VIEW. + // TODO: formalize the known set of these properties + // Manually transfer the ROW_KEY_ORDER_OPTIMIZABLE_BYTES from parent as we don't + // want to add this hacky flag to the schema (see PHOENIX-2067). + rowKeyOrderOptimizable = parent.rowKeyOrderOptimizable(); + if (rowKeyOrderOptimizable) { + UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetaData, + SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName), clientTimeStamp); + } + multiTenant = parent.isMultiTenant(); + saltBucketNum = parent.getBucketNum(); + isAppendOnlySchema = parent.isAppendOnlySchema(); + isImmutableRows = parent.isImmutableRows(); + if (updateCacheFrequencyProp == null) { + // set to the parent value if the property is not set on the view + updateCacheFrequency = parent.getUpdateCacheFrequency(); + } + disableWAL = (disableWALProp == null ? parent.isWALDisabled() : disableWALProp); + defaultFamilyName = parent.getDefaultFamilyName() == null + ? null + : parent.getDefaultFamilyName().getString(); + // TODO PHOENIX-4766 Add an options to stop sending parent metadata when creating views + List allColumns = parent.getColumns(); + if (saltBucketNum != null) { // Don't include salt column in columns, as it should not + // have it when created + allColumns = allColumns.subList(1, allColumns.size()); + } + columns = new LinkedHashMap(allColumns.size() + colDefs.size()); + for (PColumn column : allColumns) { + columns.put(column, column); + } + pkColumns = newLinkedHashSet(parent.getPKColumns()); + + // Add row linking view to its parent + try (PreparedStatement linkStatement = connection.prepareStatement(CREATE_VIEW_LINK)) { + linkStatement.setString(1, tenantIdStr); + linkStatement.setString(2, schemaName); + linkStatement.setString(3, tableName); + linkStatement.setString(4, parent.getName().getString()); + linkStatement.setByte(5, LinkType.PARENT_TABLE.getSerializedValue()); + linkStatement.setString(6, + parent.getTenantId() == null ? null : parent.getTenantId().getString()); + linkStatement.execute(); + } + // Add row linking parent to view + // TODO From 4.16 write the child links to SYSTEM.CHILD_LINK directly + try (PreparedStatement linkStatement = connection.prepareStatement(CREATE_CHILD_LINK)) { + linkStatement.setString(1, + parent.getTenantId() == null ? null : parent.getTenantId().getString()); + linkStatement.setString(2, + parent.getSchemaName() == null ? null : parent.getSchemaName().getString()); + linkStatement.setString(3, parent.getTableName().getString()); + linkStatement.setString(4, tenantIdStr); + linkStatement.setString(5, SchemaUtil.getTableName(schemaName, tableName)); + linkStatement.setByte(6, LinkType.CHILD_TABLE.getSerializedValue()); + linkStatement.execute(); + } + } + } else { + columns = new LinkedHashMap(colDefs.size()); + pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size() + 1); // in case salted + } + + if (tableType == PTableType.CDC) { + if (parent.getType() == VIEW) { + physicalNames = Collections.singletonList(PNameFactory.newName(MetaDataUtil + .getViewIndexPhysicalName(parent.getBaseTableLogicalName(), isNamespaceMapped))); + } else { + physicalNames = Collections.singletonList(PNameFactory + .newName(SchemaUtil.getTableName(schemaName, CDCUtil.getCDCIndexName(tableName)))); + } + } + + // Don't add link for mapped view, as it just points back to itself and causes the drop to + // fail because it looks like there's always a view associated with it. + if (!physicalNames.isEmpty()) { + // Upsert physical name for mapped view only if the full physical table name is different + // than the full table name + // Otherwise, we end up with a self-referencing link and then cannot ever drop the view. + if ( + viewType != ViewType.MAPPED || (!physicalNames.get(0).getString() + .equals(SchemaUtil.getTableName(schemaName, tableName)) + && !physicalNames.get(0).getString() + .equals(SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped) + .getString())) + ) { + // Add row linking from data table row to physical table row + try (PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK)) { + for (PName physicalName : physicalNames) { + linkStatement.setString(1, tenantIdStr); + linkStatement.setString(2, schemaName); + linkStatement.setString(3, tableName); + linkStatement.setString(4, physicalName.getString()); + linkStatement.setByte(5, LinkType.PHYSICAL_TABLE.getSerializedValue()); + if (tableType == PTableType.VIEW) { + if (parent.getType() == PTableType.TABLE) { + linkStatement.setString(4, SchemaUtil.getTableName( + parent.getSchemaName().getString(), parent.getTableName().getString())); + linkStatement.setLong(6, parent.getSequenceNumber()); + } else { // This is a grandchild view, find the physical base table + PTable logicalTable = connection.getTable( + new PTableKey(null, SchemaUtil.replaceNamespaceSeparator(physicalName))); + linkStatement.setString(4, + SchemaUtil.getTableName(logicalTable.getSchemaName().getString(), + logicalTable.getTableName().getString())); + linkStatement.setLong(6, logicalTable.getSequenceNumber()); + } + // Set link to logical name + linkStatement.setString(7, null); + } else { + linkStatement.setLong(6, parent.getSequenceNumber()); + linkStatement.setString(7, PTableType.INDEX.getSerializedValue()); + } + linkStatement.execute(); + } + } + tableMetaData + .addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); + connection.rollback(); + } + } + + Map familyNames = Maps.newLinkedHashMap(); + boolean rowTimeStampColumnAlreadyFound = false; + int positionOffset = columns.size(); + if (saltBucketNum != null) { + positionOffset++; + if (addSaltColumn) { + pkColumns.add(SaltingUtil.SALTING_COLUMN); + } + } + int pkPositionOffset = pkColumns.size(); + int position = positionOffset; + EncodedCQCounter cqCounter = NULL_COUNTER; + Map changedCqCounters = new HashMap<>(colDefs.size()); + // Check for duplicate column qualifiers + Map> inputCqCounters = new HashMap<>(); + PTable viewPhysicalTable = null; + if (tableType == PTableType.VIEW) { /* - * For indexes we have already validated that the data table has the right kind and number of row_timestamp - * columns. So we don't need to perform any extra validations for them. + * We can't control what column qualifiers are used in HTable mapped to Phoenix views. So we + * are not able to encode column names. */ - if (tableType == TABLE) { - boolean isColumnDeclaredRowTimestamp = colDef.isRowTimestamp() || pkConstraint.isColumnRowTimestamp(columnDefName); - if (isColumnDeclaredRowTimestamp) { - boolean isColumnPartOfPk = colDef.isPK() || pkConstraint.contains(columnDefName); - // A column can be declared as ROW_TIMESTAMP only if it is part of the primary key - if (isColumnDeclaredRowTimestamp && !isColumnPartOfPk) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_PK_COL_ONLY) - .setColumnName(columnDefName.getColumnName()).build().buildException(); - } - - // A column can be declared as ROW_TIMESTAMP only if it can be represented as a long - PDataType dataType = colDef.getDataType(); - if (isColumnDeclaredRowTimestamp && (dataType != PLong.INSTANCE && dataType != PUnsignedLong.INSTANCE && !dataType.isCoercibleTo(PTimestamp.INSTANCE))) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE) - .setColumnName(columnDefName.getColumnName()).build().buildException(); - } - - // Only one column can be declared as a ROW_TIMESTAMP column - if (rowTimeStampColAlreadyFound && isColumnDeclaredRowTimestamp) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_ONE_PK_COL_ONLY) - .setColumnName(columnDefName.getColumnName()).build().buildException(); - } - return true; - } - } - return false; - } - - /** - * While adding or dropping columns we write a cell to the SYSTEM.MUTEX table with the rowkey of the - * physical table to prevent conflicting concurrent modifications. For eg two client adding a column - * with the same name but different datatype, or once client dropping a column on a base table - * while another client creating a view or view index that requires the dropped column - */ - private boolean writeCell(String tenantId, String schemaName, String tableName, String columnName) - throws SQLException { - return connection.getQueryServices().writeMutexCell(tenantId, schemaName, tableName, columnName, null); - } - - /** - * Remove the cell that was written to to the SYSTEM.MUTEX table with the rowkey of the physical table - */ - private void deleteCell(String tenantId, String schemaName, String tableName, String columnName) - throws SQLException { - connection.getQueryServices().deleteMutexCell(tenantId, schemaName, tableName, columnName, null); - } - - /** - * - * Populate the properties for each column family referenced in the create table statement - * @param familyNames column families referenced in the create table statement - * @param commonFamilyProps properties common to all column families - * @param statement create table statement - * @param defaultFamilyName the default column family name - * @param isLocalIndex true if in the create local index path - * @param familyPropList list containing pairs of column families and their corresponding properties - * @throws SQLException - */ - private void populateFamilyPropsList(Map familyNames, Map commonFamilyProps, - CreateTableStatement statement, String defaultFamilyName, boolean isLocalIndex, - final List>> familyPropList) throws SQLException { - for (PName familyName : familyNames.values()) { - String fam = familyName.getString(); - Collection> propsForCF = - statement.getProps().get(IndexUtil.getActualColumnFamilyName(fam)); - // No specific properties for this column family, so add the common family properties - if (propsForCF.isEmpty()) { - familyPropList.add(new Pair<>(familyName.getBytes(),commonFamilyProps)); - } else { - Map combinedFamilyProps = Maps.newHashMapWithExpectedSize(propsForCF.size() + commonFamilyProps.size()); - combinedFamilyProps.putAll(commonFamilyProps); - for (Pair prop : propsForCF) { - // Don't allow specifying column families for TTL, KEEP_DELETED_CELLS and REPLICATION_SCOPE. - // These properties can only be applied for all column families of a table and can't be column family specific. - // See PHOENIX-3955 - if (!fam.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY) && MetaDataUtil.propertyNotAllowedToBeOutOfSync(prop.getFirst())) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY) - .setMessage("Property: " + prop.getFirst()) - .build() - .buildException(); - } - combinedFamilyProps.put(prop.getFirst(), prop.getSecond()); - } - familyPropList.add(new Pair<>(familyName.getBytes(),combinedFamilyProps)); - } - } - - if (familyNames.isEmpty()) { - // If there are no family names, use the default column family name. This also takes care of the case when - // the table ddl has only PK cols present (which means familyNames is empty). - byte[] cf = - defaultFamilyName == null ? (!isLocalIndex? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES - : QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES) - : Bytes.toBytes(defaultFamilyName); - familyPropList.add(new Pair<>(cf, commonFamilyProps)); - } - } - - - /*** - * Get TTL defined for given entity (Index, View or Table) in hierarchy - * * For an index it will return TTL defined either from parent table - * or from parent view's hierarchy if it is defined. - * * For view it will return TTL defined from its parent table or from parent view's hierarchy - * if it is defined - * * For table it will just return TTL_NOT_DEFINED as it has no parent. - * @param parent entity's parent - * @return TTL from hierarchy if defined otherwise TTL_NOT_DEFINED. - * @throws TableNotFoundException if not able ot find any table in hierarchy - */ - private Integer checkAndGetTTLFromHierarchy(PTable parent) throws SQLException { - return parent != null ? (parent.getType() == TABLE ? parent.getTTL() - : (parent.getType() == VIEW && parent.getViewType() != MAPPED ? getTTLFromViewHierarchy(parent) : TTL_NOT_DEFINED)) - : TTL_NOT_DEFINED; - } - - /** - * Get TTL defined for the given View if it is defined in hierarchy. - * @param view - * @return appropriate TTL from Views defined above for the entity calling. - * @throws TableNotFoundException if not able to find any table in hierarchy - */ - private Integer getTTLFromViewHierarchy(PTable view) throws SQLException { - return view.getTTL() != TTL_NOT_DEFINED - ? Integer.valueOf(view.getTTL()) : (checkIfParentIsTable(view) - ? PhoenixRuntime.getTable(connection, view.getPhysicalNames().get(0).toString()).getTTL() - : getTTLFromViewHierarchy(PhoenixRuntime.getTable(connection, view.getParentName().toString()))); - } - - private boolean checkIfParentIsTable(PTable view) { - PName parentName = view.getParentName(); - if (parentName == null) { - //means this is a view on dataTable - return true; - } - return parentName.getString().equals(view.getPhysicalName().getString()); - } - - private PTable createTableInternal( - CreateTableStatement statement, - byte[][] splits, - final PTable parent, - String viewStatement, - ViewType viewType, - PDataType viewIndexIdType, - final byte[] rowKeyMatcher, - final byte[][] viewColumnConstants, - final BitSet isViewColumnReferenced, - boolean allocateIndexId, - IndexType indexType, - Date asyncCreatedDate, - Set cdcIncludeScopes, - Map tableProps, - Map commonFamilyProps) throws SQLException { - final PTableType tableType = statement.getTableType(); - boolean wasAutoCommit = connection.getAutoCommit(); - TableName tableNameNode = null; - boolean allowSystemCatalogRollback = - connection.getQueryServices().getProps().getBoolean( - QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, - QueryServicesOptions.DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK); - Set acquiredColumnMutexSet = Sets.newHashSetWithExpectedSize(3); - String parentPhysicalName = - (parent!=null && parent.getPhysicalName()!=null) ? parent.getPhysicalName().getString() : null; - String parentPhysicalSchemaName = parentPhysicalName!=null ? - SchemaUtil.getSchemaNameFromFullName(parentPhysicalName) : null; - String parentPhysicalTableName = parentPhysicalName!=null ? - SchemaUtil.getTableNameFromFullName(parentPhysicalName) : null; - connection.rollback(); - try { - connection.setAutoCommit(false); - List tableMetaData = Lists.newArrayListWithExpectedSize(statement.getColumnDefs().size() + 3); - - tableNameNode = statement.getTableName(); - final String schemaName = connection.getSchema() != null && tableNameNode.getSchemaName() == null ? connection.getSchema() : tableNameNode.getSchemaName(); - final String tableName = tableNameNode.getTableName(); - String parentTableName = null; - PName tenantId = connection.getTenantId(); - String tenantIdStr = tenantId == null ? null : tenantId.getString(); - Long scn = connection.getSCN(); - long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - boolean multiTenant = false; - boolean storeNulls = false; - TransactionFactory.Provider transactionProvider = (parent!= null) ? parent.getTransactionProvider() : null; - Integer saltBucketNum = null; - String defaultFamilyName = null; - boolean isImmutableRows = false; - boolean isAppendOnlySchema = false; - List physicalNames = Collections.emptyList(); - boolean addSaltColumn = false; - boolean rowKeyOrderOptimizable = true; - Long timestamp = null; - boolean isNamespaceMapped = parent == null - ? SchemaUtil.isNamespaceMappingEnabled(tableType, connection.getQueryServices().getProps()) - : parent.isNamespaceMapped(); - boolean isLocalIndex = indexType == IndexType.LOCAL; - QualifierEncodingScheme encodingScheme = NON_ENCODED_QUALIFIERS; - ImmutableStorageScheme immutableStorageScheme = ONE_CELL_PER_COLUMN; - int baseTableColumnCount = - tableType == PTableType.VIEW ? parent.getColumns().size() - : QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT; - - Integer ttl = TTL_NOT_DEFINED; - Integer ttlFromHierarchy = TTL_NOT_DEFINED; - Integer ttlProp = (Integer) TableProperty.TTL.getValue(tableProps); - - // Validate TTL prop value if set - if (ttlProp != null) { - if (ttlProp < 0) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) - .setMessage(String.format("entity = %s, TTL value should be > 0", - tableName)) - .build() - .buildException(); - } - if (!isViewTTLEnabled() && tableType == VIEW) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode. - VIEW_TTL_NOT_ENABLED) - .setSchemaName(schemaName) - .setTableName(tableName) - .build() - .buildException(); - } - - if (tableType != TABLE && (tableType != VIEW || viewType != UPDATABLE)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode. - TTL_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY) - .setSchemaName(schemaName) - .setTableName(tableName) - .build() - .buildException(); - } - ttlFromHierarchy = checkAndGetTTLFromHierarchy(parent); - if (ttlFromHierarchy != TTL_NOT_DEFINED) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode. - TTL_ALREADY_DEFINED_IN_HIERARCHY) - .setSchemaName(schemaName) - .setTableName(tableName) - .build() - .buildException(); - } - - ttl = ttlProp; - } else { - ttlFromHierarchy = checkAndGetTTLFromHierarchy(parent); - } - - Boolean isChangeDetectionEnabledProp = - (Boolean) TableProperty.CHANGE_DETECTION_ENABLED.getValue(tableProps); - verifyChangeDetectionTableType(tableType, isChangeDetectionEnabledProp); - - String schemaVersion = (String) TableProperty.SCHEMA_VERSION.getValue(tableProps); - String streamingTopicName = (String) TableProperty.STREAMING_TOPIC_NAME.getValue(tableProps); - Long maxLookbackAge = (Long) TableProperty.MAX_LOOKBACK_AGE.getValue(tableProps); - - if (maxLookbackAge != null && tableType != TABLE) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode. - MAX_LOOKBACK_AGE_SUPPORTED_FOR_TABLES_ONLY) - .setSchemaName(schemaName) - .setTableName(tableName) - .build() - .buildException(); - } - String cdcIncludeScopesStr = cdcIncludeScopes == null ? null : - CDCUtil.makeChangeScopeStringFromEnums(cdcIncludeScopes); - - if (parent != null && tableType == PTableType.INDEX) { - timestamp = TransactionUtil.getTableTimestamp(connection, transactionProvider != null, transactionProvider); - isImmutableRows = parent.isImmutableRows(); - isAppendOnlySchema = parent.isAppendOnlySchema(); - - // Index on view - // TODO: Can we support a multi-tenant index directly on a multi-tenant - // table instead of only a view? We don't have anywhere to put the link - // from the table to the index, though. - if (isLocalIndex || (parent.getType() == PTableType.VIEW && parent.getViewType() != ViewType.MAPPED)) { - PName physicalName = parent.getPhysicalName(); - - saltBucketNum = parent.getBucketNum(); - addSaltColumn = (saltBucketNum != null && !isLocalIndex); - defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString(); - if (isLocalIndex) { - defaultFamilyName = - parent.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY - : IndexUtil.getLocalIndexColumnFamily(parent.getDefaultFamilyName().getString()); - saltBucketNum = null; - // Set physical name of local index table - physicalNames = Collections.singletonList(PNameFactory.newName(physicalName.getBytes())); - } else { - defaultFamilyName = parent.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : parent.getDefaultFamilyName().getString(); - // Set physical name of view index table - // Parent is a view and this is an index so we need to get _IDX_+logical name of base table. - // parent.getPhysicalName is Schema.Physical of base and we can't use it since the _IDX_ table is logical name of the base. - // parent.getName is the view name. parent.getBaseTableLogicalName is the logical name of the base table - PName parentName = parent.getBaseTableLogicalName(); - physicalNames = Collections.singletonList(PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(parentName, isNamespaceMapped))); - } - } - - multiTenant = parent.isMultiTenant(); - storeNulls = parent.getStoreNulls(); - parentTableName = parent.getTableName().getString(); - // Pass through data table sequence number so we can check it hasn't changed - try (PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM)) { - incrementStatement.setString(1, tenantIdStr); - incrementStatement.setString(2, schemaName); - incrementStatement.setString(3, parentTableName); - incrementStatement.setLong(4, parent.getSequenceNumber()); - incrementStatement.execute(); - // Get list of mutations and add to table meta data that will be passed to server - // to guarantee order. This row will always end up last - tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); - connection.rollback(); - } - - // Add row linking from data table row to index table row - try (PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK)) { - linkStatement.setString(1, tenantIdStr); - linkStatement.setString(2, schemaName); - linkStatement.setString(3, parentTableName); - linkStatement.setString(4, tableName); - linkStatement.setByte(5, LinkType.INDEX_TABLE.getSerializedValue()); - linkStatement.setLong(6, parent.getSequenceNumber()); - linkStatement.setString(7, PTableType.INDEX.getSerializedValue()); - linkStatement.execute(); - } - - // Add row linking index table to parent table for indexes on views - if (parent.getType() == PTableType.VIEW) { - try (PreparedStatement linkStatement = connection.prepareStatement(CREATE_VIEW_INDEX_PARENT_LINK)) { - linkStatement.setString(1, tenantIdStr); - linkStatement.setString(2, schemaName); - linkStatement.setString(3, tableName); - linkStatement.setString(4, parent.getName().getString()); - linkStatement.setByte(5, LinkType.VIEW_INDEX_PARENT_TABLE.getSerializedValue()); - linkStatement.execute(); - } - } - } - - PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint(); - String pkName = null; - List> pkColumnsNames = Collections.>emptyList(); - Iterator> pkColumnsIterator = Collections.emptyIterator(); - if (pkConstraint != null) { - pkColumnsNames = pkConstraint.getColumnNames(); - pkColumnsIterator = pkColumnsNames.iterator(); - pkName = pkConstraint.getName(); - } - - // Although unusual, it's possible to set a mapped VIEW as having immutable rows. - // This tells Phoenix that you're managing the index maintenance yourself. - if (tableType != PTableType.INDEX && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED)) { - // TODO remove TableProperty.IMMUTABLE_ROWS at the next major release - Boolean isImmutableRowsProp = statement.immutableRows()!=null? statement.immutableRows() : - (Boolean) TableProperty.IMMUTABLE_ROWS.getValue(tableProps); - if (isImmutableRowsProp == null) { - isImmutableRows = connection.getQueryServices().getProps().getBoolean(QueryServices.IMMUTABLE_ROWS_ATTRIB, QueryServicesOptions.DEFAULT_IMMUTABLE_ROWS); - } else { - isImmutableRows = isImmutableRowsProp; - } - } - if (tableType == PTableType.TABLE) { - Boolean isAppendOnlySchemaProp = (Boolean) TableProperty.APPEND_ONLY_SCHEMA.getValue(tableProps); - isAppendOnlySchema = isAppendOnlySchemaProp!=null ? isAppendOnlySchemaProp : false; - } - - // Can't set any of these on views or shared indexes on views - if (tableType != PTableType.VIEW && tableType != PTableType.CDC && !allocateIndexId) { - saltBucketNum = (Integer) TableProperty.SALT_BUCKETS.getValue(tableProps); - if (saltBucketNum != null) { - if (saltBucketNum < 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM).build().buildException(); - } - } - // Salt the index table if the data table is salted - if (saltBucketNum == null) { - if (parent != null) { - saltBucketNum = parent.getBucketNum(); - } - } else if (saltBucketNum.intValue() == 0) { - saltBucketNum = null; // Provides a way for an index to not be salted if its data table is salted - } - addSaltColumn = (saltBucketNum != null); - } - - // Can't set MULTI_TENANT or DEFAULT_COLUMN_FAMILY_NAME on an INDEX or a non mapped VIEW - if (tableType != PTableType.INDEX && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED)) { - Boolean multiTenantProp = (Boolean) tableProps.get(PhoenixDatabaseMetaData.MULTI_TENANT); - multiTenant = Boolean.TRUE.equals(multiTenantProp); - defaultFamilyName = (String)TableProperty.DEFAULT_COLUMN_FAMILY.getValue(tableProps); - } - - boolean disableWAL = false; - Boolean disableWALProp = (Boolean) TableProperty.DISABLE_WAL.getValue(tableProps); - if (disableWALProp != null) { - disableWAL = disableWALProp; - } - long updateCacheFrequency = (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue( - connection.getQueryServices().getProps().get( - QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB)); - if (tableType == PTableType.INDEX && parent != null) { - updateCacheFrequency = parent.getUpdateCacheFrequency(); - } - Long updateCacheFrequencyProp = (Long) TableProperty.UPDATE_CACHE_FREQUENCY.getValue(tableProps); - if (tableType != PTableType.INDEX && updateCacheFrequencyProp != null) { - updateCacheFrequency = updateCacheFrequencyProp; - } - - String physicalTableName = (String) TableProperty.PHYSICAL_TABLE_NAME.getValue(tableProps); - String autoPartitionSeq = (String) TableProperty.AUTO_PARTITION_SEQ.getValue(tableProps); - Long guidePostsWidth = (Long) TableProperty.GUIDE_POSTS_WIDTH.getValue(tableProps); - - // We only allow setting guide post width for a base table - if (guidePostsWidth != null && tableType != PTableType.TABLE) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_GUIDE_POST_WIDTH) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - - Boolean storeNullsProp = (Boolean) TableProperty.STORE_NULLS.getValue(tableProps); - if (storeNullsProp == null) { - if (parent == null) { - storeNulls = connection.getQueryServices().getProps().getBoolean( - QueryServices.DEFAULT_STORE_NULLS_ATTRIB, - QueryServicesOptions.DEFAULT_STORE_NULLS); - tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.valueOf(storeNulls)); - } - } else { - storeNulls = storeNullsProp; - } - Boolean transactionalProp = (Boolean) TableProperty.TRANSACTIONAL.getValue(tableProps); - TransactionFactory.Provider transactionProviderProp = (TransactionFactory.Provider) TableProperty.TRANSACTION_PROVIDER.getValue(tableProps); - if ((transactionalProp != null || transactionProviderProp != null) && parent != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_TABLE_MAY_BE_DECLARED_TRANSACTIONAL) - .setSchemaName(schemaName).setTableName(tableName) - .build().buildException(); - } - if (parent == null) { - boolean transactional; - if (transactionProviderProp != null) { - transactional = true; - } else if (transactionalProp == null) { - transactional = connection.getQueryServices().getProps().getBoolean( - QueryServices.DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB, - QueryServicesOptions.DEFAULT_TABLE_ISTRANSACTIONAL); - } else { - transactional = transactionalProp; - } - if (transactional) { - if (transactionProviderProp == null) { - transactionProvider = (TransactionFactory.Provider)TableProperty.TRANSACTION_PROVIDER.getValue( - connection.getQueryServices().getProps().get( - QueryServices.DEFAULT_TRANSACTION_PROVIDER_ATTRIB, - QueryServicesOptions.DEFAULT_TRANSACTION_PROVIDER)); - } else { - transactionProvider = transactionProviderProp; - } - } - } - boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean( - QueryServices.TRANSACTIONS_ENABLED, - QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED); - // can't create a transactional table if transactions are not enabled - if (!transactionsEnabled && transactionProvider != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_IF_TXNS_DISABLED) - .setSchemaName(schemaName).setTableName(tableName) - .build().buildException(); - } - // can't create a transactional table if it has a row timestamp column - if (pkConstraint.getNumColumnsWithRowTimestamp() > 0 && transactionProvider != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP) - .setSchemaName(schemaName).setTableName(tableName) - .build().buildException(); - } - if ((isPhoenixTTLEnabled() ? ttlProp != null - : TableProperty.TTL.getValue(commonFamilyProps) != null) - && transactionProvider != null - && transactionProvider.getTransactionProvider().isUnsupported(PhoenixTransactionProvider.Feature.SET_TTL)) { - throw new SQLExceptionInfo.Builder(PhoenixTransactionProvider.Feature.SET_TTL.getCode()) - .setMessage(transactionProvider.name()) - .setSchemaName(schemaName) - .setTableName(tableName) - .build() - .buildException(); - } - - // Put potentially inferred value into tableProps as it's used by the createTable call below - // to determine which coprocessors to install on the new table. - tableProps.put(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER, transactionProvider); - if (transactionProvider != null) { - // If TTL set, use transaction context TTL property name instead - // Note: After PHOENIX-6627, is PhoenixTransactionContext.PROPERTY_TTL still useful? - Object transactionTTL = commonFamilyProps.remove(ColumnFamilyDescriptorBuilder.TTL); - if (transactionTTL != null) { - commonFamilyProps.put(PhoenixTransactionContext.PROPERTY_TTL, transactionTTL); - } - } - - Boolean useStatsForParallelizationProp = - (Boolean) TableProperty.USE_STATS_FOR_PARALLELIZATION.getValue(tableProps); - - boolean sharedTable = statement.getTableType() == PTableType.VIEW || allocateIndexId; - if (transactionProvider != null) { - // We turn on storeNulls for transactional tables for compatibility. This was required - // when Tephra was a supported txn engine option. After PHOENIX-6627, this may no longer - // be necessary. - // Tephra would have converted normal delete markers on the server which could mess up - // our secondary index code as the changes get committed prior to the - // maintenance code being able to see the prior state to update the rows correctly. - // A future tnx engine might do the same? - if (Boolean.FALSE.equals(storeNullsProp)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL) - .setSchemaName(schemaName).setTableName(tableName) - .build().buildException(); - } - storeNulls = true; - tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.TRUE); - - if (!sharedTable) { - Integer maxVersionsProp = (Integer) commonFamilyProps.get(HConstants.VERSIONS); - if (maxVersionsProp == null) { - if (parent != null) { - TableDescriptor desc = connection.getQueryServices().getTableDescriptor(parent.getPhysicalName().getBytes()); - if (desc != null) { - maxVersionsProp = desc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(parent)).getMaxVersions(); - } - } - if (maxVersionsProp == null) { - maxVersionsProp = connection.getQueryServices().getProps().getInt( - QueryServices.MAX_VERSIONS_TRANSACTIONAL_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL); - } - commonFamilyProps.put(HConstants.VERSIONS, maxVersionsProp); - } - } - } - timestamp = timestamp==null ? TransactionUtil.getTableTimestamp(connection, transactionProvider != null, transactionProvider) : timestamp; - - // Delay this check as it is supported to have IMMUTABLE_ROWS and SALT_BUCKETS defined on views - if (sharedTable) { - if (tableProps.get(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME) != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE) - .setSchemaName(schemaName).setTableName(tableName) - .build().buildException(); - } - if (SchemaUtil.hasHTableDescriptorProps(tableProps)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build() - .buildException(); - } - } - - List colDefs = statement.getColumnDefs(); - LinkedHashMap columns; - LinkedHashSet pkColumns; - - if (tenantId != null && !sharedTable) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TENANT_SPECIFIC_TABLE) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - if (autoPartitionSeq!=null) { - int autoPartitionColIndex = multiTenant ? 1 : 0; - PDataType dataType = colDefs.get(autoPartitionColIndex).getDataType(); - if (!PLong.INSTANCE.isCastableTo(dataType)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SEQUENCE_NOT_CASTABLE_TO_AUTO_PARTITION_ID_COLUMN) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - } - - if (tableType == PTableType.VIEW) { - physicalNames = Collections.singletonList(PNameFactory.newName(parent.getPhysicalName().getString())); - if (viewType == ViewType.MAPPED) { - columns = Maps.newLinkedHashMap(); - pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size()); - } else { - // Propagate property values to VIEW. - // TODO: formalize the known set of these properties - // Manually transfer the ROW_KEY_ORDER_OPTIMIZABLE_BYTES from parent as we don't - // want to add this hacky flag to the schema (see PHOENIX-2067). - rowKeyOrderOptimizable = parent.rowKeyOrderOptimizable(); - if (rowKeyOrderOptimizable) { - UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetaData, SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName), clientTimeStamp); - } - multiTenant = parent.isMultiTenant(); - saltBucketNum = parent.getBucketNum(); - isAppendOnlySchema = parent.isAppendOnlySchema(); - isImmutableRows = parent.isImmutableRows(); - if (updateCacheFrequencyProp == null) { - // set to the parent value if the property is not set on the view - updateCacheFrequency = parent.getUpdateCacheFrequency(); - } - disableWAL = (disableWALProp == null ? parent.isWALDisabled() : disableWALProp); - defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString(); - // TODO PHOENIX-4766 Add an options to stop sending parent metadata when creating views - List allColumns = parent.getColumns(); - if (saltBucketNum != null) { // Don't include salt column in columns, as it should not have it when created - allColumns = allColumns.subList(1, allColumns.size()); - } - columns = new LinkedHashMap(allColumns.size() + colDefs.size()); - for (PColumn column : allColumns) { - columns.put(column, column); - } - pkColumns = newLinkedHashSet(parent.getPKColumns()); - - // Add row linking view to its parent - try (PreparedStatement linkStatement = connection.prepareStatement(CREATE_VIEW_LINK)) { - linkStatement.setString(1, tenantIdStr); - linkStatement.setString(2, schemaName); - linkStatement.setString(3, tableName); - linkStatement.setString(4, parent.getName().getString()); - linkStatement.setByte(5, LinkType.PARENT_TABLE.getSerializedValue()); - linkStatement.setString(6, parent.getTenantId() == null ? null : parent.getTenantId().getString()); - linkStatement.execute(); - } - // Add row linking parent to view - // TODO From 4.16 write the child links to SYSTEM.CHILD_LINK directly - try (PreparedStatement linkStatement = connection.prepareStatement(CREATE_CHILD_LINK)) { - linkStatement.setString(1, parent.getTenantId() == null ? null : parent.getTenantId().getString()); - linkStatement.setString(2, parent.getSchemaName() == null ? null : parent.getSchemaName().getString()); - linkStatement.setString(3, parent.getTableName().getString()); - linkStatement.setString(4, tenantIdStr); - linkStatement.setString(5, SchemaUtil.getTableName(schemaName, tableName)); - linkStatement.setByte(6, LinkType.CHILD_TABLE.getSerializedValue()); - linkStatement.execute(); - } - } - } else { - columns = new LinkedHashMap(colDefs.size()); - pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size() + 1); // in case salted - } - - if (tableType == PTableType.CDC) { - if (parent.getType() == VIEW) { - physicalNames = Collections.singletonList( - PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName( - parent.getBaseTableLogicalName(), isNamespaceMapped))); - } - else { - physicalNames = Collections.singletonList( - PNameFactory.newName(SchemaUtil.getTableName(schemaName, - CDCUtil.getCDCIndexName(tableName)))); - } - } - - // Don't add link for mapped view, as it just points back to itself and causes the drop to - // fail because it looks like there's always a view associated with it. - if (!physicalNames.isEmpty()) { - // Upsert physical name for mapped view only if the full physical table name is different than the full table name - // Otherwise, we end up with a self-referencing link and then cannot ever drop the view. - if (viewType != ViewType.MAPPED - || (!physicalNames.get(0).getString().equals(SchemaUtil.getTableName(schemaName, tableName)) - && !physicalNames.get(0).getString().equals(SchemaUtil.getPhysicalHBaseTableName( - schemaName, tableName, isNamespaceMapped).getString()))) { - // Add row linking from data table row to physical table row - try (PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK)) { - for (PName physicalName : physicalNames) { - linkStatement.setString(1, tenantIdStr); - linkStatement.setString(2, schemaName); - linkStatement.setString(3, tableName); - linkStatement.setString(4, physicalName.getString()); - linkStatement.setByte(5, LinkType.PHYSICAL_TABLE.getSerializedValue()); - if (tableType == PTableType.VIEW) { - if (parent.getType() == PTableType.TABLE) { - linkStatement.setString(4, SchemaUtil.getTableName(parent.getSchemaName().getString(), parent.getTableName().getString())); - linkStatement.setLong(6, parent.getSequenceNumber()); - } else { //This is a grandchild view, find the physical base table - PTable logicalTable = connection.getTable(new PTableKey(null, SchemaUtil.replaceNamespaceSeparator(physicalName))); - linkStatement.setString(4, SchemaUtil.getTableName(logicalTable.getSchemaName().getString(), logicalTable.getTableName().getString())); - linkStatement.setLong(6, logicalTable.getSequenceNumber()); - } - // Set link to logical name - linkStatement.setString(7, null); - } else { - linkStatement.setLong(6, parent.getSequenceNumber()); - linkStatement.setString(7, PTableType.INDEX.getSerializedValue()); - } - linkStatement.execute(); - } - } - tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); - connection.rollback(); - } - } - - Map familyNames = Maps.newLinkedHashMap(); - boolean rowTimeStampColumnAlreadyFound = false; - int positionOffset = columns.size(); - if (saltBucketNum != null) { - positionOffset++; - if (addSaltColumn) { - pkColumns.add(SaltingUtil.SALTING_COLUMN); - } - } - int pkPositionOffset = pkColumns.size(); - int position = positionOffset; - EncodedCQCounter cqCounter = NULL_COUNTER; - Map changedCqCounters = new HashMap<>(colDefs.size()); - // Check for duplicate column qualifiers - Map> inputCqCounters = new HashMap<>(); - PTable viewPhysicalTable = null; - if (tableType == PTableType.VIEW) { - /* - * We can't control what column qualifiers are used in HTable mapped to Phoenix views. So we are not - * able to encode column names. - */ - if (viewType != MAPPED) { - /* - * For regular phoenix views, use the storage scheme of the physical table since they all share the - * the same HTable. Views always use the base table's column qualifier counter for doling out - * encoded column qualifier. - */ - viewPhysicalTable = connection.getTable(physicalNames.get(0).getString()); - immutableStorageScheme = viewPhysicalTable.getImmutableStorageScheme(); - encodingScheme = viewPhysicalTable.getEncodingScheme(); - if (EncodedColumnsUtil.usesEncodedColumnNames(viewPhysicalTable)) { - cqCounter = viewPhysicalTable.getEncodedCQCounter(); - } - } - } - // System tables have hard-coded column qualifiers. So we can't use column encoding for them. - else if (!SchemaUtil.isSystemTable(Bytes.toBytes(SchemaUtil.getTableName(schemaName, tableName)))|| SchemaUtil.isLogTable(schemaName, tableName)) { - /* - * Indexes inherit the storage scheme of the parent data tables. Otherwise, we always attempt to - * create tables with encoded column names. - * - * Also of note is the case with shared indexes i.e. local indexes and view indexes. In these cases, - * column qualifiers for covered columns don't have to be unique because rows of the logical indexes are - * partitioned by the virtue of indexId present in the row key. As such, different shared indexes can use - * potentially overlapping column qualifiers. - * - */ - if (parent != null) { - Byte encodingSchemeSerializedByte = (Byte) TableProperty.COLUMN_ENCODED_BYTES.getValue(tableProps); - // Table has encoding scheme defined - if (encodingSchemeSerializedByte != null) { - encodingScheme = getEncodingScheme(tableProps, schemaName, tableName, transactionProvider); - } else { - encodingScheme = parent.getEncodingScheme(); - } - - ImmutableStorageScheme immutableStorageSchemeProp = (ImmutableStorageScheme) TableProperty.IMMUTABLE_STORAGE_SCHEME.getValue(tableProps); - if (immutableStorageSchemeProp == null) { - immutableStorageScheme = parent.getImmutableStorageScheme(); - } else { - checkImmutableStorageSchemeForIndex(immutableStorageSchemeProp, schemaName, tableName, transactionProvider); - immutableStorageScheme = immutableStorageSchemeProp; - } - - if (immutableStorageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS) { - if (encodingScheme == NON_ENCODED_QUALIFIERS) { - if (encodingSchemeSerializedByte != null) { - // encoding scheme is set as non-encoded on purpose, so we should fail - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } else { - // encoding scheme is inherited from parent but it is not compatible with Single Cell. - encodingScheme = - QualifierEncodingScheme.fromSerializedValue( - (byte) QueryServicesOptions.DEFAULT_COLUMN_ENCODED_BYTES); - } - } - } - - if (tableType != CDC && - parent.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS && - immutableStorageScheme == ONE_CELL_PER_COLUMN) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE) - .setSchemaName(schemaName).setTableName(tableName).build() - .buildException(); - } - LOGGER.info(String.format("STORAGE--ENCODING: %s--%s", immutableStorageScheme, encodingScheme)); - } else { - encodingScheme = getEncodingScheme(tableProps, schemaName, tableName, transactionProvider); - - ImmutableStorageScheme immutableStorageSchemeProp = - (ImmutableStorageScheme) TableProperty.IMMUTABLE_STORAGE_SCHEME - .getValue(tableProps); - if (immutableStorageSchemeProp == null) { - // Ignore default if transactional and column encoding is not supported - if (transactionProvider == null || - !transactionProvider.getTransactionProvider().isUnsupported( - PhoenixTransactionProvider.Feature.COLUMN_ENCODING)) { - if (multiTenant) { - immutableStorageScheme = - ImmutableStorageScheme - .valueOf(connection - .getQueryServices() - .getProps() - .get( - QueryServices.DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME_ATTRIB, - QueryServicesOptions.DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME)); - } else { - if (isImmutableRows) { - immutableStorageScheme = - ImmutableStorageScheme - .valueOf(connection - .getQueryServices() - .getProps() - .get( - QueryServices.DEFAULT_IMMUTABLE_STORAGE_SCHEME_ATTRIB, - QueryServicesOptions.DEFAULT_IMMUTABLE_STORAGE_SCHEME)); - } else { - immutableStorageScheme = ONE_CELL_PER_COLUMN; - } - } - } - } else { - immutableStorageScheme = isImmutableRows ? immutableStorageSchemeProp : ONE_CELL_PER_COLUMN; - checkImmutableStorageSchemeForIndex(immutableStorageScheme, schemaName, tableName, transactionProvider); - } - if (immutableStorageScheme != ONE_CELL_PER_COLUMN - && encodingScheme == NON_ENCODED_QUALIFIERS) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES) - .setSchemaName(schemaName).setTableName(tableName).build() - .buildException(); - } - } - cqCounter = encodingScheme != NON_ENCODED_QUALIFIERS ? new EncodedCQCounter() : NULL_COUNTER; - if (encodingScheme != NON_ENCODED_QUALIFIERS && statement.getFamilyCQCounters() != null) - { - for (Map.Entry cq : statement.getFamilyCQCounters().entrySet()) { - if (cq.getValue() < QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_CQ) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } - cqCounter.setValue(cq.getKey(), cq.getValue()); - changedCqCounters.put(cq.getKey(), cqCounter.getNextQualifier(cq.getKey())); - inputCqCounters.putIfAbsent(cq.getKey(), new HashSet()); - } - } - } - - boolean wasPKDefined = false; - // Keep track of all columns that are newly added to a view - Set viewNewColumnPositions = - Sets.newHashSetWithExpectedSize(colDefs.size()); - Set pkColumnNames = new HashSet<>(); - for (PColumn pColumn : pkColumns) { - pkColumnNames.add(pColumn.getName().toString()); - } - for (ColumnDef colDef : colDefs) { - rowTimeStampColumnAlreadyFound = checkAndValidateRowTimestampCol(colDef, pkConstraint, rowTimeStampColumnAlreadyFound, tableType); - if (colDef.isPK()) { // i.e. the column is declared as CREATE TABLE COLNAME DATATYPE PRIMARY KEY... - if (wasPKDefined) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) - .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException(); - } - wasPKDefined = true; - } else { - // do not allow setting NOT-NULL constraint on non-primary columns. - if ( !colDef.isNull() && !isImmutableRows && - ( wasPKDefined || !isPkColumn(pkConstraint, colDef))) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL) - .setSchemaName(schemaName) - .setTableName(tableName) - .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException(); - } - } - ColumnName columnDefName = colDef.getColumnDefName(); - String colDefFamily = columnDefName.getFamilyName(); - boolean isPkColumn = isPkColumn(pkConstraint, colDef); - String cqCounterFamily = null; - if (!isPkColumn) { - if (immutableStorageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS && encodingScheme != NON_ENCODED_QUALIFIERS) { - // For this scheme we track column qualifier counters at the column family level. - cqCounterFamily = colDefFamily != null ? colDefFamily : (defaultFamilyName != null ? defaultFamilyName : DEFAULT_COLUMN_FAMILY); - } else { - // For other schemes, column qualifier counters are tracked using the default column family. - cqCounterFamily = defaultFamilyName != null ? defaultFamilyName : DEFAULT_COLUMN_FAMILY; - } - } - // Use position as column qualifier if APPEND_ONLY_SCHEMA to prevent gaps in - // the column encoding (PHOENIX-4737). - Integer encodedCQ = null; - if (!isPkColumn) { - if (colDef.getEncodedQualifier() != null && encodingScheme != NON_ENCODED_QUALIFIERS) { - if (cqCounter.getNextQualifier(cqCounterFamily) > ENCODED_CQ_COUNTER_INITIAL_VALUE && - !inputCqCounters.containsKey(cqCounterFamily)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.MISSING_CQ) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } - - if (statement.getFamilyCQCounters() == null || - statement.getFamilyCQCounters().get(cqCounterFamily) == null) { - if (colDef.getEncodedQualifier() >= cqCounter.getNextQualifier(cqCounterFamily)) { - cqCounter.setValue(cqCounterFamily, colDef.getEncodedQualifier()); - cqCounter.increment(cqCounterFamily); - } - changedCqCounters.put(cqCounterFamily, cqCounter.getNextQualifier(cqCounterFamily)); - } - - encodedCQ = colDef.getEncodedQualifier(); - if (encodedCQ < QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE || - encodedCQ >= cqCounter.getNextQualifier(cqCounterFamily)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_CQ) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } - - inputCqCounters.putIfAbsent(cqCounterFamily, new HashSet()); - Set familyCounters = inputCqCounters.get(cqCounterFamily); - if (!familyCounters.add(encodedCQ)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.DUPLICATE_CQ) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } - } else { - if (inputCqCounters.containsKey(cqCounterFamily)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.MISSING_CQ) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } - - if (isAppendOnlySchema) { - encodedCQ = Integer.valueOf(ENCODED_CQ_COUNTER_INITIAL_VALUE + position); - } else { - encodedCQ = cqCounter.getNextQualifier(cqCounterFamily); - } - } - } - byte[] columnQualifierBytes = null; - try { - columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes(columnDefName.getColumnName(), encodedCQ, encodingScheme, isPkColumn); - } - catch (QualifierOutOfRangeException e) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_COLUMNS_EXCEEDED) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } - PColumn column = newColumn(position++, colDef, pkConstraint, defaultFamilyName, false, columnQualifierBytes, isImmutableRows); - if (!isAppendOnlySchema && colDef.getEncodedQualifier() == null - && cqCounter.increment(cqCounterFamily)) { - changedCqCounters.put(cqCounterFamily, cqCounter.getNextQualifier(cqCounterFamily)); - } - if (SchemaUtil.isPKColumn(column)) { - // TODO: remove this constraint? - if (pkColumnsIterator.hasNext() && !column.getName().getString().equals(pkColumnsIterator.next().getFirst().getColumnName())) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER) - .setSchemaName(schemaName) - .setTableName(tableName) - .setColumnName(column.getName().getString()) - .build().buildException(); - } - if (tableType == PTableType.VIEW && viewType != ViewType.MAPPED) { - throwIfLastPKOfParentIsVariableLength(parent, schemaName, tableName, colDef); - } - if (!pkColumns.add(column)) { - throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString()); - } - } - // check for duplicate column - if (isDuplicateColumn(columns, pkColumnNames, column)) { - throw new ColumnAlreadyExistsException(schemaName, tableName, - column.getName().getString()); - } else if (tableType == VIEW) { - viewNewColumnPositions.add(column.getPosition()); - } - if (isPkColumn) { - pkColumnNames.add(column.getName().toString()); - } - if ((colDef.getDataType() == PVarbinary.INSTANCE || colDef.getDataType().isArrayType()) - && SchemaUtil.isPKColumn(column) - && pkColumnsIterator.hasNext()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_IN_ROW_KEY) - .setSchemaName(schemaName) - .setTableName(tableName) - .setColumnName(column.getName().getString()) - .build().buildException(); - } - if (column.getFamilyName() != null) { - familyNames.put( - IndexUtil.getActualColumnFamilyName(column.getFamilyName().getString()), - column.getFamilyName()); - } - } - - // We need a PK definition for a TABLE or mapped VIEW - if (!wasPKDefined && pkColumnsNames.isEmpty() && tableType != PTableType.VIEW && viewType != ViewType.MAPPED) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING) - .setSchemaName(schemaName) - .setTableName(tableName) - .build().buildException(); - } - if (!pkColumnsNames.isEmpty() && pkColumnsNames.size() != pkColumns.size() - pkPositionOffset) { // Then a column name in the primary key constraint wasn't resolved - Iterator> pkColumnNamesIterator = pkColumnsNames.iterator(); - while (pkColumnNamesIterator.hasNext()) { - ColumnName colName = pkColumnNamesIterator.next().getFirst(); - ColumnDef colDef = findColumnDefOrNull(colDefs, colName); - if (colDef == null) { - throw new ColumnNotFoundException(schemaName, tableName, null, colName.getColumnName()); - } - if (colDef.getColumnDefName().getFamilyName() != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME) - .setSchemaName(schemaName) - .setTableName(tableName) - .setColumnName(colDef.getColumnDefName().getColumnName() ) - .setFamilyName(colDef.getColumnDefName().getFamilyName()) - .build().buildException(); - } - } - // The above should actually find the specific one, but just in case... - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_PRIMARY_KEY_CONSTRAINT) - .setSchemaName(schemaName) - .setTableName(tableName) - .build().buildException(); - } - - if (!statement.getProps().isEmpty()) { - for (String familyName : statement.getProps().keySet()) { - if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { - if (familyNames.get(familyName) == null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY) - .setFamilyName(familyName).build().buildException(); - } else if (statement.getTableType() == PTableType.VIEW) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build().buildException(); - } - } - } - } - throwIfInsufficientColumns(schemaName, tableName, pkColumns, saltBucketNum!=null, multiTenant); - - List>> familyPropList = Lists.newArrayListWithExpectedSize(familyNames.size()); - populateFamilyPropsList(familyNames, commonFamilyProps, statement, defaultFamilyName, isLocalIndex, familyPropList); - - // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists - if (SchemaUtil.isMetaTable(schemaName,tableName)) { - // TODO: what about stats for system catalog? - PName newSchemaName = PNameFactory.newName(schemaName); - // Column names and qualifiers and hardcoded for system tables. - PTable table = new PTableImpl.Builder() - .setType(tableType) - .setTimeStamp(MetaDataProtocol.MIN_TABLE_TIMESTAMP) - .setIndexDisableTimestamp(0L) - .setSequenceNumber(PTable.INITIAL_SEQ_NUM) - .setImmutableRows(isImmutableRows) - .setDisableWAL(Boolean.TRUE.equals(disableWAL)) - .setMultiTenant(false) - .setStoreNulls(false) - .setViewIndexIdType(viewIndexIdType) - .setIndexType(indexType) - .setUpdateCacheFrequency(0) - .setNamespaceMapped(isNamespaceMapped) - .setAutoPartitionSeqName(autoPartitionSeq) - .setAppendOnlySchema(isAppendOnlySchema) - .setImmutableStorageScheme(ONE_CELL_PER_COLUMN) - .setQualifierEncodingScheme(NON_ENCODED_QUALIFIERS) - .setBaseColumnCount(QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT) - .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER) - .setUseStatsForParallelization(true) - .setExcludedColumns(ImmutableList.of()) - .setTenantId(tenantId) - .setSchemaName(newSchemaName) - .setTableName(PNameFactory.newName(tableName)) - .setPkName(PNameFactory.newName(QueryConstants.SYSTEM_TABLE_PK_NAME)) - .setDefaultFamilyName(defaultFamilyName == null ? null : - PNameFactory.newName(defaultFamilyName)) - .setRowKeyOrderOptimizable(true) - .setIndexes(Collections.emptyList()) - .setPhysicalNames(ImmutableList.of()) - .setColumns(columns.values()) - .setLastDDLTimestamp(0L) - .setIndexWhere(statement.getWhereClause() == null ? null - : statement.getWhereClause().toString()) - .setRowKeyMatcher(rowKeyMatcher) - .setTTL(TTL_NOT_DEFINED) - .build(); - connection.addTable(table, MetaDataProtocol.MIN_TABLE_TIMESTAMP); - } - - // Update column qualifier counters - if (EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme)) { - // Store the encoded column counter for phoenix entities that have their own hbase - // tables i.e. base tables and indexes. - String schemaNameToUse = tableType == VIEW ? viewPhysicalTable.getSchemaName().getString() : schemaName; - String tableNameToUse = tableType == VIEW ? viewPhysicalTable.getTableName().getString() : tableName; - boolean sharedIndex = tableType == PTableType.INDEX && (indexType == IndexType.LOCAL || parent.getType() == PTableType.VIEW); - // For local indexes and indexes on views, pass on the the tenant id since all their meta-data rows have - // tenant ids in there. - String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null; - // When a view adds its own columns, then we need to increase the sequence number of the base table - // too since we want clients to get the latest PTable of the base table. - for (Entry entry : changedCqCounters.entrySet()) { - try (PreparedStatement linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER)) { - linkStatement.setString(1, tenantIdToUse); - linkStatement.setString(2, schemaNameToUse); - linkStatement.setString(3, tableNameToUse); - linkStatement.setString(4, entry.getKey()); - linkStatement.setInt(5, entry.getValue()); - linkStatement.execute(); - } - } - if (tableType == VIEW && !changedCqCounters.isEmpty()) { - try (PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM)) { - incrementStatement.setString(1, null); - incrementStatement.setString(2, viewPhysicalTable.getSchemaName().getString()); - incrementStatement.setString(3, viewPhysicalTable.getTableName().getString()); - incrementStatement.setLong(4, viewPhysicalTable.getSequenceNumber() + 1); - incrementStatement.execute(); - } - } - if (connection.getMutationState().toMutations(timestamp).hasNext()) { - tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); - connection.rollback(); - } - } - - short nextKeySeq = 0; - - List columnMetadata = Lists.newArrayListWithExpectedSize(columns.size()); - boolean isRegularView = (tableType == PTableType.VIEW && viewType!=ViewType.MAPPED); - for (Map.Entry entry : columns.entrySet()) { - PColumn column = entry.getValue(); - final int columnPosition = column.getPosition(); - // For client-side cache, we need to update the column - // set the autoPartition column attributes - if (parent != null && parent.getAutoPartitionSeqName() != null - && parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent)).equals(column)) { - entry.setValue(column = new DelegateColumn(column) { - @Override - public byte[] getViewConstant() { - // set to non-null value so that we will generate a Put that - // will be set correctly on the server - return QueryConstants.EMPTY_COLUMN_VALUE_BYTES; - } - - @Override - public boolean isViewReferenced() { - return true; - } - }); - } else if (isViewColumnReferenced != null) { - if (viewColumnConstants != null && columnPosition < viewColumnConstants.length) { - entry.setValue(column = new DelegateColumn(column) { - @Override - public byte[] getViewConstant() { - return viewColumnConstants[columnPosition]; - } - - @Override - public boolean isViewReferenced() { - return isViewColumnReferenced.get(columnPosition); - } - }); - } else { - entry.setValue(column = new DelegateColumn(column) { - @Override - public boolean isViewReferenced() { - return isViewColumnReferenced.get(columnPosition); - } - }); - } - - // if the base table column is referenced in the view - // or if we are adding a new column during view creation - if (isViewColumnReferenced.get(columnPosition) || - viewNewColumnPositions.contains( - columnPosition)) { - // acquire the mutex using the global physical table - // name to prevent this column from being dropped - // while the view is being created or to prevent - // a conflicting column from being added to a parent - // in case the view creation adds new columns - boolean acquiredMutex = writeCell( - null, - parentPhysicalSchemaName, - parentPhysicalTableName, - column.toString()); - if (!acquiredMutex) { - throw new ConcurrentTableMutationException( - parentPhysicalSchemaName, - parentPhysicalTableName); - } - acquiredColumnMutexSet.add(column.toString()); - } - } - Short keySeq = SchemaUtil.isPKColumn(column) ? ++nextKeySeq : null; - // Prior to PHOENIX-3534 we were sending the parent table column metadata while creating a - // child view, now that we combine columns by resolving the parent table hierarchy we - // don't need to include the parent table columns while creating a view - // If QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK is true we continue - // to store the parent table column metadata along with the child view metadata - // so that we can rollback the upgrade if required. - if (allowSystemCatalogRollback || !isRegularView - || columnPosition >= baseTableColumnCount) { - addColumnMutation(connection, schemaName, tableName, column, parentTableName, - pkName, keySeq, saltBucketNum != null); - columnMetadata.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); - connection.rollback(); - } - } - - // add the columns in reverse order since we reverse the list later - Collections.reverse(columnMetadata); - tableMetaData.addAll(columnMetadata); - String dataTableName = parent == null || tableType == PTableType.VIEW ? null : parent.getTableName().getString(); - PIndexState defaultCreateState; - String defaultCreateStateString = connection.getClientInfo(INDEX_CREATE_DEFAULT_STATE); - if (defaultCreateStateString == null) { - defaultCreateStateString = connection.getQueryServices().getConfiguration().get( - INDEX_CREATE_DEFAULT_STATE, QueryServicesOptions.DEFAULT_CREATE_INDEX_STATE); - } - defaultCreateState = PIndexState.valueOf(defaultCreateStateString); - if (defaultCreateState == PIndexState.CREATE_DISABLE) { - if (indexType == IndexType.LOCAL || sharedTable) { - defaultCreateState = PIndexState.BUILDING; - } - } - PIndexState indexState = parent == null || - (tableType == PTableType.VIEW || tableType == PTableType.CDC) ? - null : defaultCreateState; - if (indexState == null && tableProps.containsKey(INDEX_STATE)) { - indexState = PIndexState.fromSerializedValue(tableProps.get(INDEX_STATE).toString()); - } - PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE); - tableUpsert.setString(1, tenantIdStr); - tableUpsert.setString(2, schemaName); - tableUpsert.setString(3, tableName); - tableUpsert.setString(4, tableType.getSerializedValue()); - tableUpsert.setLong(5, PTable.INITIAL_SEQ_NUM); - tableUpsert.setInt(6, position); - if (saltBucketNum != null) { - tableUpsert.setInt(7, saltBucketNum); - } else { - tableUpsert.setNull(7, Types.INTEGER); - } - tableUpsert.setString(8, pkName); - tableUpsert.setString(9, dataTableName); - tableUpsert.setString(10, indexState == null ? null : indexState.getSerializedValue()); - tableUpsert.setBoolean(11, isImmutableRows); - tableUpsert.setString(12, defaultFamilyName); - if (parent != null && parent.getAutoPartitionSeqName() != null && viewStatement==null) { - // set to non-null value so that we will generate a Put that - // will be set correctly on the server - tableUpsert.setString(13, QueryConstants.EMPTY_COLUMN_VALUE); - } - else { - tableUpsert.setString(13, viewStatement); - } - tableUpsert.setBoolean(14, disableWAL); - tableUpsert.setBoolean(15, multiTenant); - if (viewType == null) { - tableUpsert.setNull(16, Types.TINYINT); - } else { - tableUpsert.setByte(16, viewType.getSerializedValue()); - } - if (indexType == null) { - tableUpsert.setNull(17, Types.TINYINT); - } else { - tableUpsert.setByte(17, indexType.getSerializedValue()); - } - tableUpsert.setBoolean(18, storeNulls); - if (parent != null && tableType == PTableType.VIEW) { - tableUpsert.setInt(19, parent.getColumns().size()); - } else { - tableUpsert.setInt(19, BASE_TABLE_BASE_COLUMN_COUNT); - } - if (transactionProvider == null) { - tableUpsert.setNull(20, Types.TINYINT); - } else { - tableUpsert.setByte(20, transactionProvider.getCode()); - } - tableUpsert.setLong(21, updateCacheFrequency); - tableUpsert.setBoolean(22, isNamespaceMapped); - if (autoPartitionSeq == null) { - tableUpsert.setNull(23, Types.VARCHAR); - } else { - tableUpsert.setString(23, autoPartitionSeq); - } - tableUpsert.setBoolean(24, isAppendOnlySchema); - if (guidePostsWidth == null) { - tableUpsert.setNull(25, Types.BIGINT); - } else { - tableUpsert.setLong(25, guidePostsWidth); - } - tableUpsert.setByte(26, immutableStorageScheme.getSerializedMetadataValue()); - tableUpsert.setByte(27, encodingScheme.getSerializedMetadataValue()); - if (useStatsForParallelizationProp == null) { - tableUpsert.setNull(28, Types.BOOLEAN); - } else { - tableUpsert.setBoolean(28, useStatsForParallelizationProp); - } - if (indexType == IndexType.LOCAL || - (parent != null && parent.getType() == PTableType.VIEW - && tableType == PTableType.INDEX)) { - tableUpsert.setInt(29, viewIndexIdType.getSqlType()); - } else { - tableUpsert.setNull(29, Types.NULL); - } - - if (isChangeDetectionEnabledProp == null) { - tableUpsert.setNull(30, Types.BOOLEAN); - } else { - tableUpsert.setBoolean(30, isChangeDetectionEnabledProp); - } - - if (physicalTableName == null){ - tableUpsert.setNull(31, Types.VARCHAR); - } else { - tableUpsert.setString(31, physicalTableName); - } - - if (schemaVersion == null) { - tableUpsert.setNull(32, Types.VARCHAR); - } else { - tableUpsert.setString(32, schemaVersion); - } - - if (streamingTopicName == null) { - tableUpsert.setNull(33, Types.VARCHAR); - } else { - tableUpsert.setString(33, streamingTopicName); - } - - if (tableType == INDEX && statement.getWhereClause() != null) { - tableUpsert.setString(34, statement.getWhereClause().toString()); - } else { - tableUpsert.setNull(34, Types.VARCHAR); - } - if (maxLookbackAge == null) { - tableUpsert.setNull(35, Types.BIGINT); - } - else { - tableUpsert.setLong(35, maxLookbackAge); - } - - if (cdcIncludeScopesStr == null) { - tableUpsert.setNull(36, Types.VARCHAR); - } else { - tableUpsert.setString(36, cdcIncludeScopesStr); - } - - if (ttl == null || ttl == TTL_NOT_DEFINED) { - tableUpsert.setNull(37, Types.VARCHAR); - } else { - tableUpsert.setString(37, String.valueOf(ttl)); - } - - if ((rowKeyMatcher == null) || - Bytes.compareTo(rowKeyMatcher, HConstants.EMPTY_BYTE_ARRAY) == 0) { - tableUpsert.setNull(38, Types.VARBINARY); - } else { - tableUpsert.setBytes(38, rowKeyMatcher); - } - - tableUpsert.execute(); - - if (asyncCreatedDate != null) { - try (PreparedStatement setAsync = connection.prepareStatement(SET_ASYNC_CREATED_DATE)) { - setAsync.setString(1, tenantIdStr); - setAsync.setString(2, schemaName); - setAsync.setString(3, tableName); - setAsync.setDate(4, asyncCreatedDate); - setAsync.execute(); - } - } else { - Date syncCreatedDate = new Date(EnvironmentEdgeManager.currentTimeMillis()); - try (PreparedStatement setSync = connection.prepareStatement(SET_INDEX_SYNC_CREATED_DATE)) { - setSync.setString(1, tenantIdStr); - setSync.setString(2, schemaName); - setSync.setString(3, tableName); - setSync.setDate(4, syncCreatedDate); - setSync.execute(); - } - } - tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); - connection.rollback(); - - /* - * The table metadata must be in the following order: - * 1) table header row - * 2) ordered column rows - * 3) parent table header row - */ - Collections.reverse(tableMetaData); - - if (indexType != IndexType.LOCAL) { - splits = SchemaUtil.processSplits(splits, pkColumns, saltBucketNum, connection.getQueryServices().getProps().getBoolean( - QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, QueryServicesOptions.DEFAULT_FORCE_ROW_KEY_ORDER)); - } - - // Modularized this code for unit testing - PName parentName = physicalNames !=null && physicalNames.size() > 0 ? physicalNames.get(0) : null; - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("createTable tableName=" + tableName + " parent=" + (parent == null ? "" : parent.getTableName() + "-" + parent.getPhysicalName()) + " parent physical=" + parentName + "-" + (physicalNames.size() > 0 ? physicalNames.get(0) : "null") + " viewType " + viewType + allocateIndexId); - } - MetaDataMutationResult result = connection.getQueryServices().createTable(tableMetaData - ,viewType == ViewType.MAPPED || allocateIndexId ? physicalNames.get(0).getBytes() - : null, tableType, tableProps, familyPropList, splits, isNamespaceMapped, - allocateIndexId, UpgradeUtil.isNoUpgradeSet(connection.getClientInfo()), parent); - MutationCode code = result.getMutationCode(); - try { - if (code != MutationCode.TABLE_NOT_FOUND) { - boolean tableAlreadyExists = handleCreateTableMutationCode(result, code, statement, - schemaName, tableName, parent); - if (tableAlreadyExists) { - return null; - } - } - // If the parent table of the view has the auto partition sequence name attribute, - // set the view statement and relevant partition column attributes correctly - if (parent != null && parent.getAutoPartitionSeqName() != null) { - final PColumn autoPartitionCol = parent.getPKColumns().get(MetaDataUtil - .getAutoPartitionColIndex(parent)); - final Long autoPartitionNum = Long.valueOf(result.getAutoPartitionNum()); - columns.put(autoPartitionCol, new DelegateColumn(autoPartitionCol) { - @Override - public byte[] getViewConstant() { - PDataType dataType = autoPartitionCol.getDataType(); - Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE); - byte[] bytes = new byte[dataType.getByteSize() + 1]; - dataType.toBytes(val, bytes, 0); - return bytes; - } - - @Override - public boolean isViewReferenced() { - return true; - } - }); - String viewPartitionClause = QueryUtil.getViewPartitionClause(MetaDataUtil - .getAutoPartitionColumnName(parent), autoPartitionNum); - if (viewStatement != null) { - viewStatement = viewStatement + " AND " + viewPartitionClause; - } else { - viewStatement = QueryUtil.getViewStatement(parent.getSchemaName().getString(), - parent.getTableName().getString(), viewPartitionClause); - } - } - PName newSchemaName = PNameFactory.newName(schemaName); - /* - * It doesn't hurt for the PTable of views to have the cqCounter. However, views always - * rely on the parent table's counter to dole out encoded column qualifiers. So setting - * the counter as NULL_COUNTER for extra safety. - */ - EncodedCQCounter cqCounterToBe = tableType == PTableType.VIEW ? NULL_COUNTER : cqCounter; - PTable table = new PTableImpl.Builder() - .setType(tableType) - .setState(indexState) - .setTimeStamp(timestamp != null ? timestamp : result.getMutationTime()) - .setIndexDisableTimestamp(0L) - .setSequenceNumber(PTable.INITIAL_SEQ_NUM) - .setImmutableRows(isImmutableRows) - .setViewStatement(viewStatement) - .setDisableWAL(Boolean.TRUE.equals(disableWAL)) - .setMultiTenant(multiTenant) - .setStoreNulls(storeNulls) - .setViewType(viewType) - .setViewIndexIdType(viewIndexIdType) - .setViewIndexId(result.getViewIndexId()) - .setIndexType(indexType) - .setTransactionProvider(transactionProvider) - .setUpdateCacheFrequency(updateCacheFrequency) - .setNamespaceMapped(isNamespaceMapped) - .setAutoPartitionSeqName(autoPartitionSeq) - .setAppendOnlySchema(isAppendOnlySchema) - .setImmutableStorageScheme(immutableStorageScheme) - .setQualifierEncodingScheme(encodingScheme) - .setBaseColumnCount(baseTableColumnCount) - .setEncodedCQCounter(cqCounterToBe) - .setUseStatsForParallelization(useStatsForParallelizationProp) - .setExcludedColumns(ImmutableList.of()) - .setTenantId(tenantId) - .setSchemaName(newSchemaName) - .setTableName(PNameFactory.newName(tableName)) - .setPkName(pkName == null ? null : PNameFactory.newName(pkName)) - .setDefaultFamilyName(defaultFamilyName == null ? - null : PNameFactory.newName(defaultFamilyName)) - .setRowKeyOrderOptimizable(rowKeyOrderOptimizable) - .setBucketNum(saltBucketNum) - .setIndexes(Collections.emptyList()) - .setParentSchemaName((parent == null) ? null : parent.getSchemaName()) - .setParentTableName((parent == null) ? null : parent.getTableName()) - .setPhysicalNames(ImmutableList.copyOf(physicalNames)) - .setColumns(columns.values()) - .setViewModifiedUpdateCacheFrequency(tableType == PTableType.VIEW && - parent != null && - parent.getUpdateCacheFrequency() != updateCacheFrequency) - .setViewModifiedUseStatsForParallelization(tableType == PTableType.VIEW && - parent != null && - parent.useStatsForParallelization() - != useStatsForParallelizationProp) - .setLastDDLTimestamp(result.getTable() != null ? - result.getTable().getLastDDLTimestamp() : null) - .setIsChangeDetectionEnabled(isChangeDetectionEnabledProp) - .setSchemaVersion(schemaVersion) - .setExternalSchemaId(result.getTable() != null ? - result.getTable().getExternalSchemaId() : null) - .setStreamingTopicName(streamingTopicName) - .setIndexWhere(statement.getWhereClause() == null ? null - : statement.getWhereClause().toString()) - .setMaxLookbackAge(maxLookbackAge) - .setCDCIncludeScopes(cdcIncludeScopes) - .setTTL(ttl == null || ttl == TTL_NOT_DEFINED ? ttlFromHierarchy : ttl) - .setRowKeyMatcher(rowKeyMatcher) - .build(); - result = new MetaDataMutationResult(code, result.getMutationTime(), table, true); - addTableToCache(result, false); - return table; - } catch (Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableNameNode.toString(), - NUM_METADATA_LOOKUP_FAILURES, 1); - throw e; - } - } finally { - connection.setAutoCommit(wasAutoCommit); - deleteMutexCells(parentPhysicalSchemaName, parentPhysicalTableName, - acquiredColumnMutexSet); - } - } - - private boolean isDuplicateColumn(LinkedHashMap columns, - Set pkColumnNames, PColumn column) { - // either column name is same within same CF or column name within - // default CF is same as any of PK column - return columns.put(column, column) != null - || (column.getFamilyName() != null - && DEFAULT_COLUMN_FAMILY.equals(column.getFamilyName().toString()) - && pkColumnNames.contains(column.getName().toString())); - } - - private void verifyChangeDetectionTableType(PTableType tableType, Boolean isChangeDetectionEnabledProp) throws SQLException { - if (isChangeDetectionEnabledProp != null && isChangeDetectionEnabledProp) { - if (tableType != TABLE && tableType != VIEW) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY) - .build().buildException(); - } - } - } - - private QualifierEncodingScheme getEncodingScheme(Map tableProps, String schemaName, String tableName, TransactionFactory.Provider transactionProvider) - throws SQLException { - QualifierEncodingScheme encodingScheme = null; - Byte encodingSchemeSerializedByte = (Byte) TableProperty.COLUMN_ENCODED_BYTES.getValue(tableProps); - if (encodingSchemeSerializedByte == null) { - if (tableProps.containsKey(ENCODING_SCHEME)) { - encodingSchemeSerializedByte = QualifierEncodingScheme.valueOf(((String) tableProps.get(ENCODING_SCHEME))).getSerializedMetadataValue(); - } - } - if (encodingSchemeSerializedByte == null) { - // Ignore default if transactional and column encoding is not supported (as with OMID) - if (transactionProvider == null || !transactionProvider.getTransactionProvider().isUnsupported(PhoenixTransactionProvider.Feature.COLUMN_ENCODING) ) { - encodingSchemeSerializedByte = (byte)connection.getQueryServices().getProps().getInt(QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, - QueryServicesOptions.DEFAULT_COLUMN_ENCODED_BYTES); - encodingScheme = QualifierEncodingScheme.fromSerializedValue(encodingSchemeSerializedByte); - } else { - encodingScheme = NON_ENCODED_QUALIFIERS; - } - } else { - encodingScheme = QualifierEncodingScheme.fromSerializedValue(encodingSchemeSerializedByte); - if (encodingScheme != NON_ENCODED_QUALIFIERS && transactionProvider != null && transactionProvider.getTransactionProvider() - .isUnsupported(PhoenixTransactionProvider.Feature.COLUMN_ENCODING)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNSUPPORTED_COLUMN_ENCODING_FOR_TXN_PROVIDER) - .setSchemaName(schemaName).setTableName(tableName).setMessage(transactionProvider.name()).build().buildException(); - } - } - - return encodingScheme; - } - - private void checkImmutableStorageSchemeForIndex(ImmutableStorageScheme immutableStorageSchemeProp, String schemaName, String tableName, TransactionFactory.Provider transactionProvider) - throws SQLException { - if (immutableStorageSchemeProp != ONE_CELL_PER_COLUMN && transactionProvider != null && transactionProvider.getTransactionProvider().isUnsupported(PhoenixTransactionProvider.Feature.COLUMN_ENCODING) ) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.UNSUPPORTED_STORAGE_FORMAT_FOR_TXN_PROVIDER) - .setSchemaName(schemaName).setTableName(tableName) - .setMessage(transactionProvider.name()) - .build() - .buildException(); - } - } - - /* This method handles mutation codes sent by phoenix server, except for TABLE_NOT_FOUND which - * is considered to be a success code. If TABLE_ALREADY_EXISTS in hbase, we don't need to add - * it in ConnectionQueryServices and we return result as true. However if code is - * NEWER_TABLE_FOUND and it does not exists in statement then we return false because we need to - * add it ConnectionQueryServices. For other mutation codes it throws related SQLException. - * If server is throwing new mutation code which is not being handled by client then it throws - * SQLException stating the server side Mutation code. - */ - @VisibleForTesting - public boolean handleCreateTableMutationCode(MetaDataMutationResult result, MutationCode code, - CreateTableStatement statement, String schemaName, String tableName, - PTable parent) throws SQLException { - switch(code) { - case TABLE_ALREADY_EXISTS: - if (result.getTable() != null) { - addTableToCache(result, false); - } - if (!statement.ifNotExists()) { - throw new TableAlreadyExistsException(schemaName, tableName, result.getTable()); - } - return true; - case NEWER_TABLE_FOUND: - // Add table to ConnectionQueryServices so it's cached, but don't add - // it to this connection as we can't see it. - if (!statement.ifNotExists()) { - throw new NewerTableAlreadyExistsException(schemaName, tableName, - result.getTable()); - } - return false; - case UNALLOWED_TABLE_MUTATION: - throwsSQLExceptionUtil("CANNOT_MUTATE_TABLE",schemaName,tableName); - case CONCURRENT_TABLE_MUTATION: - addTableToCache(result, false); - throw new ConcurrentTableMutationException(schemaName, tableName); - case AUTO_PARTITION_SEQUENCE_NOT_FOUND: - throw new SQLExceptionInfo.Builder(SQLExceptionCode.AUTO_PARTITION_SEQUENCE_UNDEFINED) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - case CANNOT_COERCE_AUTO_PARTITION_ID: - case UNABLE_TO_CREATE_CHILD_LINK: - case PARENT_TABLE_NOT_FOUND: - case TABLE_NOT_IN_REGION: - throwsSQLExceptionUtil(String.valueOf(code), schemaName, tableName); - case TOO_MANY_INDEXES: - case UNABLE_TO_UPDATE_PARENT_TABLE: - throwsSQLExceptionUtil(String.valueOf(code), SchemaUtil.getSchemaNameFromFullName( - parent.getPhysicalName().getString()),SchemaUtil.getTableNameFromFullName( - parent.getPhysicalName().getString())); - case ERROR_WRITING_TO_SCHEMA_REGISTRY: - throw new SQLExceptionInfo.Builder(ERROR_WRITING_TO_SCHEMA_REGISTRY) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - default: - // Cannot use SQLExecptionInfo here since not all mutation codes have their - // corresponding codes in the enum SQLExceptionCode - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNEXPECTED_MUTATION_CODE) - .setSchemaName(schemaName).setTableName(tableName).setMessage("mutation code: " - + code).build().buildException(); - } - } - - private void throwsSQLExceptionUtil(String code,String schemaName, String tableName) - throws SQLException { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.valueOf(code)) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - - private static boolean isPkColumn(PrimaryKeyConstraint pkConstraint, ColumnDef colDef) { - return colDef.isPK() || (pkConstraint != null && pkConstraint.contains(colDef.getColumnDefName())); - } - - /** - * A table can be a parent table to tenant-specific tables if all of the following conditions are true: - *

- * FOR TENANT-SPECIFIC TABLES WITH TENANT_TYPE_ID SPECIFIED: - *

    - *
  1. It has 3 or more PK columns AND - *
  2. First PK (tenant id) column is not nullible AND - *
  3. Firsts PK column's data type is either VARCHAR or CHAR AND - *
  4. Second PK (tenant type id) column is not nullible AND - *
  5. Second PK column data type is either VARCHAR or CHAR - *
- * FOR TENANT-SPECIFIC TABLES WITH NO TENANT_TYPE_ID SPECIFIED: - *
    - *
  1. It has 2 or more PK columns AND - *
  2. First PK (tenant id) column is not nullible AND - *
  3. Firsts PK column's data type is either VARCHAR or CHAR - *
- */ - private static void throwIfInsufficientColumns(String schemaName, String tableName, Collection columns, boolean isSalted, boolean isMultiTenant) throws SQLException { - if (!isMultiTenant) { - return; - } - int nPKColumns = columns.size() - (isSalted ? 1 : 0); - if (nPKColumns < 2) { - throw new SQLExceptionInfo.Builder(INSUFFICIENT_MULTI_TENANT_COLUMNS).setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - Iterator iterator = columns.iterator(); - if (isSalted) { - iterator.next(); - } - // Tenant ID must be VARCHAR or CHAR and be NOT NULL - // NOT NULL is a requirement, since otherwise the table key would conflict - // potentially with the global table definition. - PColumn tenantIdCol = iterator.next(); - if ( tenantIdCol.isNullable()) { - throw new SQLExceptionInfo.Builder(INSUFFICIENT_MULTI_TENANT_COLUMNS).setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - } - - public MutationState dropTable(DropTableStatement statement) throws SQLException { - String schemaName = connection.getSchema() != null && statement.getTableName().getSchemaName() == null - ? connection.getSchema() : statement.getTableName().getSchemaName(); - String tableName = statement.getTableName().getTableName(); - return dropTable(schemaName, tableName, null, statement.getTableType(), statement.ifExists(), - statement.cascade(), statement.getSkipAddingParentColumns()); - } - - public MutationState dropFunction(DropFunctionStatement statement) throws SQLException { - return dropFunction(statement.getFunctionName(), statement.ifExists()); - } - - public MutationState dropIndex(DropIndexStatement statement) throws SQLException { - String schemaName = statement.getTableName().getSchemaName(); - String tableName = statement.getIndexName().getName(); - String parentTableName = statement.getTableName().getTableName(); - return dropTable(schemaName, tableName, parentTableName, PTableType.INDEX, statement.ifExists(), false, false); - } - - public MutationState dropCDC(DropCDCStatement statement) throws SQLException { - String schemaName = statement.getTableName().getSchemaName(); - String cdcTableName = statement.getCdcObjName().getName(); - String parentTableName = statement.getTableName().getTableName(); - // Dropping the virtual CDC Table - dropTable(schemaName, cdcTableName, parentTableName, PTableType.CDC, statement.ifExists(), - false, false); - - String indexName = CDCUtil.getCDCIndexName(statement.getCdcObjName().getName()); - // Dropping the uncovered index associated with the CDC Table - try { - return dropTable(schemaName, indexName, parentTableName, PTableType.INDEX, - statement.ifExists(), false, false); - } catch (SQLException e) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.fromErrorCode(e.getErrorCode())) - .setTableName(statement.getCdcObjName().getName()).setRootCause(e.getCause()) - .build().buildException(); - } - } - - private MutationState dropFunction(String functionName, - boolean ifExists) throws SQLException { - connection.rollback(); - boolean wasAutoCommit = connection.getAutoCommit(); - try { - PName tenantId = connection.getTenantId(); - byte[] key = - SchemaUtil.getFunctionKey(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY - : tenantId.getBytes(), Bytes.toBytes(functionName)); - Long scn = connection.getSCN(); - long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - try { - PFunction function = connection.getMetaDataCache().getFunction(new PTableKey(tenantId, functionName)); - if (function.isTemporaryFunction()) { - connection.removeFunction(tenantId, functionName, clientTimeStamp); - return new MutationState(0, 0, connection); - } - } catch(FunctionNotFoundException e) { - - } - List functionMetaData = Lists.newArrayListWithExpectedSize(2); - Delete functionDelete = new Delete(key, clientTimeStamp); - functionMetaData.add(functionDelete); - MetaDataMutationResult result = connection.getQueryServices().dropFunction(functionMetaData, ifExists); - MutationCode code = result.getMutationCode(); - switch (code) { - case FUNCTION_NOT_FOUND: - if (!ifExists) { - throw new FunctionNotFoundException(functionName); - } - break; - default: - connection.removeFunction(tenantId, functionName, result.getMutationTime()); - break; - } - return new MutationState(0, 0, connection); - } finally { - connection.setAutoCommit(wasAutoCommit); - } - } - - MutationState dropTable(String schemaName, String tableName, String parentTableName, PTableType tableType, - boolean ifExists, boolean cascade, boolean skipAddingParentColumns) throws SQLException { - // Checking the parent table whether exists - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - try { - PTable ptable = connection.getTable(fullTableName); - if (parentTableName != null &&!parentTableName.equals(ptable.getParentTableName().getString())) { - throw new SQLExceptionInfo.Builder(PARENT_TABLE_NOT_FOUND) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - } catch (TableNotFoundException e) { - if (!ifExists && !e.isThrownToForceReReadForTransformingTable()) { - if (tableType == PTableType.INDEX) - throw new IndexNotFoundException(e.getSchemaName(), - e.getTableName(), e.getTimeStamp()); - throw e; - } - } - - connection.rollback(); - boolean wasAutoCommit = connection.getAutoCommit(); - PName tenantId = connection.getTenantId(); - String tenantIdStr = tenantId == null ? null : tenantId.getString(); - try { - byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName); - Long scn = connection.getSCN(); - long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - List tableMetaData = Lists.newArrayListWithExpectedSize(2); - Delete tableDelete = new Delete(key, clientTimeStamp); - tableMetaData.add(tableDelete); - boolean hasViewIndexTable = false; - if (parentTableName != null) { - byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName); - Delete linkDelete = new Delete(linkKey, clientTimeStamp); - tableMetaData.add(linkDelete); - } - MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade); - MutationCode code = result.getMutationCode(); - PTable table = result.getTable(); - switch (code) { - case TABLE_NOT_FOUND: - if (!ifExists) { throw new TableNotFoundException(schemaName, tableName); } - break; - case NEWER_TABLE_FOUND: - throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable()); - case UNALLOWED_TABLE_MUTATION: - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - case UNABLE_TO_DELETE_CHILD_LINK: - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_DELETE_CHILD_LINK) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - default: - connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName, result.getMutationTime()); - - if (table != null) { - boolean dropMetaData = connection.getQueryServices().getProps() - .getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); - long ts = (scn == null ? result.getMutationTime() : scn); - List tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size()); - connection.setAutoCommit(true); - if (tableType == PTableType.VIEW) { - for (PTable index : table.getIndexes()) { - tableRefs.add(new TableRef(null, index, ts, false)); - } - } else { - dropMetaData = result.getTable().getViewIndexId() == null && - connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); - // Create empty table and schema - they're only used to get the name from - // PName name, PTableType type, long timeStamp, long sequenceNumber, List columns - // All multi-tenant tables have a view index table, so no need to check in that case - if (parentTableName == null) { - hasViewIndexTable = true;// keeping always true for deletion of stats if view index present - // or not - MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), - table.isNamespaceMapped()); - byte[] viewIndexPhysicalName = MetaDataUtil - .getViewIndexPhysicalName(table.getPhysicalName().getBytes()); - if (!dropMetaData) { - // we need to drop rows only when actually view index exists - try (Admin admin = connection.getQueryServices().getAdmin()) { - hasViewIndexTable = admin.tableExists(org.apache.hadoop.hbase.TableName.valueOf(viewIndexPhysicalName)); - } catch (IOException e1) { - // absorbing as it is not critical check - } - } - } - if (tableType == PTableType.TABLE - && (table.isMultiTenant() || hasViewIndexTable)) { - if (hasViewIndexTable) { - byte[] viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes()); - String viewIndexSchemaName = SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName); - String viewIndexTableName = SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName); - PName viewIndexName = PNameFactory.newName(SchemaUtil.getTableName(viewIndexSchemaName, viewIndexTableName)); - - PTable viewIndexTable = new PTableImpl.Builder() - .setName(viewIndexName) - .setKey(new PTableKey(tenantId, viewIndexName.getString())) - .setSchemaName(PNameFactory.newName(viewIndexSchemaName)) - .setTableName(PNameFactory.newName(viewIndexTableName)) - .setType(PTableType.VIEW) - .setViewType(ViewType.MAPPED) - .setTimeStamp(ts) - .setPkColumns(Collections.emptyList()) - .setAllColumns(Collections.emptyList()) - .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) - .setIndexes(Collections.emptyList()) - .setFamilyAttributes(table.getColumnFamilies()) - .setPhysicalNames(Collections.emptyList()) - .setNamespaceMapped(table.isNamespaceMapped()) - .setImmutableStorageScheme(table.getImmutableStorageScheme()) - .setQualifierEncodingScheme(table.getEncodingScheme()) - .setUseStatsForParallelization(table.useStatsForParallelization()) - .build(); - tableRefs.add(new TableRef(null, viewIndexTable, ts, false)); - } - } - tableRefs.add(new TableRef(null, table, ts, false)); - // TODO: Let the standard mutable secondary index maintenance handle this? - for (PTable index : table.getIndexes()) { - tableRefs.add(new TableRef(null, index, ts, false)); - } - } - if (!dropMetaData) { - MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, - Collections. emptyList(), ts); - // Delete everything in the column. You'll still be able to do queries at earlier timestamps - return connection.getQueryServices().updateData(plan); - } - } - break; - } - return new MutationState(0, 0, connection); - } finally { - connection.setAutoCommit(wasAutoCommit); - } - } - - private MutationCode processMutationResult(String schemaName, String tableName, MetaDataMutationResult result) throws SQLException { - final MutationCode mutationCode = result.getMutationCode(); - PName tenantId = connection.getTenantId(); - switch (mutationCode) { - case TABLE_NOT_FOUND: - // Only called for add/remove column so parentTableName will always be null - connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), null, HConstants.LATEST_TIMESTAMP); - throw new TableNotFoundException(schemaName, tableName); - case UNALLOWED_TABLE_MUTATION: - String columnName = null; - String familyName = null; - String msg = null; - // TODO: better to return error code - if (result.getColumnName() != null) { - familyName = result.getFamilyName() == null ? null : Bytes.toString(result.getFamilyName()); - columnName = Bytes.toString(result.getColumnName()); - msg = "Cannot add/drop column referenced by VIEW"; - } - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) - .setSchemaName(schemaName).setTableName(tableName).setFamilyName(familyName).setColumnName(columnName).setMessage(msg).build().buildException(); - case UNALLOWED_SCHEMA_MUTATION: - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_SET_OR_ALTER_TTL) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - case NO_OP: - case COLUMN_ALREADY_EXISTS: - case COLUMN_NOT_FOUND: - break; - case CONCURRENT_TABLE_MUTATION: - addTableToCache(result, false); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), connection)); - } - throw new ConcurrentTableMutationException(schemaName, tableName); - case NEWER_TABLE_FOUND: - // TODO: update cache? - // if (result.getTable() != null) { - // connection.addTable(result.getTable()); - // } - throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable()); - case NO_PK_COLUMNS: - throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - case TABLE_ALREADY_EXISTS: - break; - case ERROR_WRITING_TO_SCHEMA_REGISTRY: - throw new SQLExceptionInfo.Builder(ERROR_WRITING_TO_SCHEMA_REGISTRY). - setSchemaName(schemaName).setTableName(tableName).build().buildException(); - default: - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNEXPECTED_MUTATION_CODE).setSchemaName(schemaName) - .setTableName(tableName).setMessage("mutation code: " + mutationCode).build().buildException(); - } - return mutationCode; - } - - private long incrementTableSeqNum(PTable table, PTableType expectedType, int columnCountDelta, - MetaPropertiesEvaluated metaPropertiesEvaluated) - throws SQLException { - return incrementTableSeqNum(table, expectedType, columnCountDelta, - metaPropertiesEvaluated.getIsTransactional(), - metaPropertiesEvaluated.getTransactionProvider(), - metaPropertiesEvaluated.getUpdateCacheFrequency(), - metaPropertiesEvaluated.getIsImmutableRows(), - metaPropertiesEvaluated.getDisableWAL(), - metaPropertiesEvaluated.getMultiTenant(), - metaPropertiesEvaluated.getStoreNulls(), - metaPropertiesEvaluated.getGuidePostWidth(), - metaPropertiesEvaluated.getAppendOnlySchema(), - metaPropertiesEvaluated.getImmutableStorageScheme(), - metaPropertiesEvaluated.getUseStatsForParallelization(), - metaPropertiesEvaluated.getTTL(), - metaPropertiesEvaluated.isChangeDetectionEnabled(), - metaPropertiesEvaluated.getPhysicalTableName(), - metaPropertiesEvaluated.getSchemaVersion(), - metaPropertiesEvaluated.getColumnEncodedBytes(), - metaPropertiesEvaluated.getStreamingTopicName(), - metaPropertiesEvaluated.getMaxLookbackAge()); - } - - private long incrementTableSeqNum(PTable table, PTableType expectedType, int columnCountDelta, Boolean isTransactional, - Long updateCacheFrequency, String physicalTableName, - String schemaVersion, QualifierEncodingScheme columnEncodedBytes) throws SQLException { - return incrementTableSeqNum(table, expectedType, columnCountDelta, isTransactional, null, - updateCacheFrequency, null, null, null, null, -1L, null, null, null,null, false, physicalTableName, - schemaVersion, columnEncodedBytes, null, null); - } - - private long incrementTableSeqNum(PTable table, PTableType expectedType, int columnCountDelta, - Boolean isTransactional, TransactionFactory.Provider transactionProvider, - Long updateCacheFrequency, Boolean isImmutableRows, Boolean disableWAL, - Boolean isMultiTenant, Boolean storeNulls, Long guidePostWidth, Boolean appendOnlySchema, - ImmutableStorageScheme immutableStorageScheme, Boolean useStatsForParallelization, - Integer ttl, Boolean isChangeDetectionEnabled, String physicalTableName, String schemaVersion, - QualifierEncodingScheme columnEncodedBytes, String streamingTopicName, Long maxLookbackAge) - throws SQLException { - String schemaName = table.getSchemaName().getString(); - String tableName = table.getTableName().getString(); - // Ordinal position is 1-based and we don't count SALT column in ordinal position - int totalColumnCount = table.getColumns().size() + (table.getBucketNum() == null ? 0 : -1); - final long seqNum = table.getSequenceNumber() + 1; - String tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getString(); - PreparedStatement tableUpsert = connection.prepareStatement(MUTATE_TABLE); - try { - tableUpsert.setString(1, tenantId); - tableUpsert.setString(2, schemaName); - tableUpsert.setString(3, tableName); - tableUpsert.setString(4, expectedType.getSerializedValue()); - tableUpsert.setLong(5, seqNum); - tableUpsert.setInt(6, totalColumnCount + columnCountDelta); - tableUpsert.execute(); - } finally { - tableUpsert.close(); - } - if (isImmutableRows != null) { - mutateBooleanProperty(connection, tenantId, schemaName, tableName, IMMUTABLE_ROWS, isImmutableRows); - } - if (disableWAL != null) { - mutateBooleanProperty(connection,tenantId, schemaName, tableName, DISABLE_WAL, disableWAL); - } - if (isMultiTenant != null) { - mutateBooleanProperty(connection,tenantId, schemaName, tableName, MULTI_TENANT, isMultiTenant); - } - if (storeNulls != null) { - mutateBooleanProperty(connection,tenantId, schemaName, tableName, STORE_NULLS, storeNulls); - } - if (isTransactional != null) { - mutateBooleanProperty(connection,tenantId, schemaName, tableName, TRANSACTIONAL, isTransactional); - } - if (transactionProvider !=null) { - mutateByteProperty(connection, tenantId, schemaName, tableName, TRANSACTION_PROVIDER, transactionProvider.getCode()); - } - if (updateCacheFrequency != null) { - mutateLongProperty(connection,tenantId, schemaName, tableName, UPDATE_CACHE_FREQUENCY, updateCacheFrequency); - } - if (guidePostWidth == null || guidePostWidth >= 0) { - mutateLongProperty(connection, tenantId, schemaName, tableName, GUIDE_POSTS_WIDTH, guidePostWidth); - } - if (appendOnlySchema !=null) { - mutateBooleanProperty(connection, tenantId, schemaName, tableName, APPEND_ONLY_SCHEMA, appendOnlySchema); - } - if (columnEncodedBytes !=null) { - mutateByteProperty(connection, tenantId, schemaName, tableName, ENCODING_SCHEME, columnEncodedBytes.getSerializedMetadataValue()); - } - if (immutableStorageScheme !=null) { - mutateStringProperty(connection, tenantId, schemaName, tableName, IMMUTABLE_STORAGE_SCHEME, immutableStorageScheme.name()); - } - if (useStatsForParallelization != null) { - mutateBooleanProperty(connection, tenantId, schemaName, tableName, USE_STATS_FOR_PARALLELIZATION, useStatsForParallelization); - } - if (ttl != null) { - mutateStringProperty(connection, tenantId, schemaName, tableName, TTL, - ttl == TTL_NOT_DEFINED ? null : String.valueOf(ttl)); - } - if (isChangeDetectionEnabled != null) { - mutateBooleanProperty(connection, tenantId, schemaName, tableName, CHANGE_DETECTION_ENABLED, isChangeDetectionEnabled); - } - if (!Strings.isNullOrEmpty(physicalTableName)) { - mutateStringProperty(connection, tenantId, schemaName, tableName, PHYSICAL_TABLE_NAME, physicalTableName); - } - if (!Strings.isNullOrEmpty(schemaVersion)) { - mutateStringProperty(connection, tenantId, schemaName, tableName, SCHEMA_VERSION, schemaVersion); - } - if (!Strings.isNullOrEmpty(streamingTopicName)) { - mutateStringProperty(connection, tenantId, schemaName, tableName, STREAMING_TOPIC_NAME, streamingTopicName); - } - if (maxLookbackAge != null) { - mutateLongProperty(connection, tenantId, schemaName, tableName, MAX_LOOKBACK_AGE, maxLookbackAge); - } - return seqNum; - } - - public static void mutateTransformProperties(Connection connection, String tenantId, String schemaName, String tableName, - String physicalTableName, - ImmutableStorageScheme immutableStorageScheme, - QualifierEncodingScheme columnEncodedBytes) throws SQLException { - if (columnEncodedBytes !=null) { - mutateByteProperty(connection, tenantId, schemaName, tableName, ENCODING_SCHEME, columnEncodedBytes.getSerializedMetadataValue()); - } - if (immutableStorageScheme !=null) { - mutateByteProperty(connection, tenantId, schemaName, tableName, IMMUTABLE_STORAGE_SCHEME, immutableStorageScheme.getSerializedMetadataValue()); - } - if (!Strings.isNullOrEmpty(physicalTableName)) { - mutateStringProperty(connection, tenantId, schemaName, tableName, PHYSICAL_TABLE_NAME, physicalTableName); - } - } - - private static void mutateBooleanProperty(Connection connection, String tenantId, String schemaName, String tableName, - String propertyName, boolean propertyValue) throws SQLException { - String updatePropertySql = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - propertyName + - ") VALUES (?, ?, ?, ?)"; - try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { - tableBoolUpsert.setString(1, tenantId); - tableBoolUpsert.setString(2, schemaName); - tableBoolUpsert.setString(3, tableName); - tableBoolUpsert.setBoolean(4, propertyValue); - tableBoolUpsert.execute(); - } - } - - private static void mutateLongProperty(Connection connection, String tenantId, String schemaName, String tableName, - String propertyName, Long propertyValue) throws SQLException { - String updatePropertySql = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - propertyName + - ") VALUES (?, ?, ?, ?)"; - try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { - tableBoolUpsert.setString(1, tenantId); - tableBoolUpsert.setString(2, schemaName); - tableBoolUpsert.setString(3, tableName); - if (propertyValue == null) { - tableBoolUpsert.setNull(4, Types.BIGINT); - } else { - tableBoolUpsert.setLong(4, propertyValue); - } - tableBoolUpsert.execute(); - } - } - - private static void mutateIntegerProperty(Connection connection, String tenantId, - String schemaName, String tableName, String propertyName, Integer propertyValue) - throws SQLException { - String updatePropertySql = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" - + SYSTEM_CATALOG_TABLE + "\"( " - + TENANT_ID + "," - + TABLE_SCHEM + "," - + TABLE_NAME + "," - + propertyName - + ") VALUES (?, ?, ?, ?)"; - try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { - tableBoolUpsert.setString(1, tenantId); - tableBoolUpsert.setString(2, schemaName); - tableBoolUpsert.setString(3, tableName); - if (propertyValue == null) { - tableBoolUpsert.setNull(4, Types.INTEGER); - } else { - tableBoolUpsert.setInt(4, propertyValue); - } - tableBoolUpsert.execute(); - } - } - - private static void mutateByteProperty(Connection connection, String tenantId, String schemaName, String tableName, - String propertyName, Byte propertyValue) throws SQLException { - String updatePropertySql = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - propertyName + - ") VALUES (?, ?, ?, ?)"; - try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { - tableBoolUpsert.setString(1, tenantId); - tableBoolUpsert.setString(2, schemaName); - tableBoolUpsert.setString(3, tableName); - if (propertyValue == null) { - tableBoolUpsert.setNull(4, Types.TINYINT); - } else { - tableBoolUpsert.setByte(4, propertyValue); - } - tableBoolUpsert.execute(); - } - } - - private static void mutateStringProperty(Connection connection, String tenantId, String schemaName, String tableName, - String propertyName, String propertyValue) throws SQLException { - String updatePropertySql = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - propertyName + - ") VALUES (?, ?, ?, ?)"; - try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { - tableBoolUpsert.setString(1, tenantId); - tableBoolUpsert.setString(2, schemaName); - tableBoolUpsert.setString(3, tableName); - tableBoolUpsert.setString(4, propertyValue); - tableBoolUpsert.execute(); - } - } - - public MutationState addColumn(AddColumnStatement statement) throws SQLException { - PTable table = FromCompiler.getResolver(statement, connection).getTables().get(0).getTable(); - return addColumn(table, statement.getColumnDefs(), statement.getProps(), statement.ifNotExists(), false, statement.getTable(), statement.getTableType(), statement.isCascade(), statement.getIndexes()); - } - - public MutationState addColumn(PTable table, List origColumnDefs, - ListMultimap> stmtProperties, boolean ifNotExists, - boolean removeTableProps, NamedTableNode namedTableNode, PTableType tableType, boolean cascade, List indexes) - throws SQLException { - connection.rollback(); - List indexesPTable = Lists.newArrayListWithExpectedSize(indexes != null ? - indexes.size() : table.getIndexes().size()); - Map indexToColumnSizeMap = new HashMap<>(); - - // if cascade keyword is passed and indexes are provided either implicitly or explicitly - if (cascade && (indexes == null || !indexes.isEmpty())) { - indexesPTable = getIndexesPTableForCascade(indexes, table); - if (indexesPTable.size() == 0) { - // go back to regular behavior of altering the table/view - cascade = false; - } else { - for (PTable index : indexesPTable) { - indexToColumnSizeMap.put(index, index.getColumns().size()); - } - } - } - boolean wasAutoCommit = connection.getAutoCommit(); - List columns = Lists.newArrayListWithExpectedSize(origColumnDefs != null ? - origColumnDefs.size() : 0); - PName tenantId = connection.getTenantId(); - boolean sharedIndex = tableType == PTableType.INDEX && (table.getIndexType() == IndexType.LOCAL || table.getViewIndexId() != null); - String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null; - String schemaName = table.getSchemaName().getString(); - String tableName = table.getTableName().getString(); - PName physicalName = table.getPhysicalName(); - String physicalSchemaName = - SchemaUtil.getSchemaNameFromFullName(physicalName.getString()); - String physicalTableName = - SchemaUtil.getTableNameFromFullName(physicalName.getString()); - Set acquiredColumnMutexSet = Sets.newHashSetWithExpectedSize(3); - boolean acquiredBaseTableMutex = false; - try { - connection.setAutoCommit(false); - - List columnDefs; - if ((table.isAppendOnlySchema() || ifNotExists) && origColumnDefs != null) { - // only make the rpc if we are adding new columns - columnDefs = Lists.newArrayList(); - for (ColumnDef columnDef : origColumnDefs) { - String familyName = columnDef.getColumnDefName().getFamilyName(); - String columnName = columnDef.getColumnDefName().getColumnName(); - if (familyName != null) { - try { - PColumnFamily columnFamily = table.getColumnFamily(familyName); - columnFamily.getPColumnForColumnName(columnName); - if (!ifNotExists) { - throw new ColumnAlreadyExistsException(schemaName, tableName, - columnName); - } - } catch (ColumnFamilyNotFoundException | ColumnNotFoundException e) { - columnDefs.add(columnDef); - } - } else { - try { - table.getColumnForColumnName(columnName); - if (!ifNotExists) { - throw new ColumnAlreadyExistsException(schemaName, tableName, - columnName); - } - } catch (ColumnNotFoundException e) { - columnDefs.add(columnDef); - } - } - } - } else { - columnDefs = origColumnDefs == null ? Collections.emptyList() : origColumnDefs; - } - - boolean retried = false; - boolean changingPhoenixTableProperty = false; - MutableBoolean areWeIntroducingTTLAtThisLevel = new MutableBoolean(false); - MetaProperties metaProperties = new MetaProperties(); - while (true) { - Map>> properties=new HashMap<>(stmtProperties.size());; - metaProperties = loadStmtProperties(stmtProperties,properties,table,removeTableProps); - - ColumnResolver resolver = FromCompiler.getResolver(namedTableNode, connection); - table = resolver.getTables().get(0).getTable(); - int nIndexes = table.getIndexes().size(); - int numCols = columnDefs.size(); - int nNewColumns = numCols; - List tableMetaData = Lists.newArrayListWithExpectedSize((1 + nNewColumns) * (nIndexes + 1)); - List columnMetaData = Lists.newArrayListWithExpectedSize(nNewColumns * (nIndexes + 1)); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection)); - } - - int position = table.getColumns().size(); - - boolean addPKColumns = columnDefs.stream().anyMatch(ColumnDef::isPK); - if (addPKColumns) { - List currentPKs = table.getPKColumns(); - PColumn lastPK = currentPKs.get(currentPKs.size()-1); - // Disallow adding columns if the last column in the primary key is VARBIANRY - // or ARRAY. - if (lastPK.getDataType() == PVarbinary.INSTANCE || lastPK.getDataType().isArrayType()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_LAST_PK) - .setColumnName(lastPK.getName().getString()).build().buildException(); - } - // Disallow adding columns if last column in the primary key is fixed width - // and nullable. - if (lastPK.isNullable() && lastPK.getDataType().isFixedWidth()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULLABLE_FIXED_WIDTH_LAST_PK) - .setColumnName(lastPK.getName().getString()).build().buildException(); - } - } - - MetaPropertiesEvaluated metaPropertiesEvaluated = new MetaPropertiesEvaluated(); - changingPhoenixTableProperty = evaluateStmtProperties(metaProperties,metaPropertiesEvaluated,table,schemaName,tableName,areWeIntroducingTTLAtThisLevel); - if (areWeIntroducingTTLAtThisLevel.booleanValue()) { - //As we are introducing TTL for the first time at this level, we need to check - //if TTL is already defined up or down in the hierarchy. - Integer ttlAlreadyDefined = TTL_NOT_DEFINED; - //Check up the hierarchy - if (table.getType() != PTableType.TABLE) { - ttlAlreadyDefined = checkAndGetTTLFromHierarchy(PhoenixRuntime.getTableNoCache(connection, table.getParentName().toString())); - } - if (ttlAlreadyDefined != TTL_NOT_DEFINED) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode. - TTL_ALREADY_DEFINED_IN_HIERARCHY) - .setSchemaName(schemaName) - .setTableName(tableName) - .build() - .buildException(); - } - - /** - * To check if TTL is defined at any of the child below we are checking it at - * {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl#mutateColumn(List, - * ColumnMutator, int, PTable, PTable, boolean)} level where in function - * {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl# - * validateIfMutationAllowedOnParent(PTable, List, PTableType, long, byte[], - * byte[], byte[], List, int)} we are already traversing through - * allDescendantViews. - */ - - - - } - - boolean isTransformNeeded = TransformClient.checkIsTransformNeeded(metaProperties, schemaName, table, tableName, null, tenantIdToUse, connection); - if (isTransformNeeded) { - // We can add a support for these later. For now, not supported. - if (MetaDataUtil.hasLocalIndexTable(connection, physicalTableName.getBytes())) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_TRANSFORM_TABLE_WITH_LOCAL_INDEX) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - if (table.isAppendOnlySchema()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_TRANSFORM_TABLE_WITH_APPEND_ONLY_SCHEMA) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - if (table.isTransactional()) { - throw new SQLExceptionInfo.Builder(CANNOT_TRANSFORM_TRANSACTIONAL_TABLE) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - } - - // If changing isImmutableRows to true or it's not being changed and is already true - boolean willBeImmutableRows = Boolean.TRUE.equals(metaPropertiesEvaluated.getIsImmutableRows()) || (metaPropertiesEvaluated.getIsImmutableRows() == null && table.isImmutableRows()); - boolean willBeTxnl = metaProperties.getNonTxToTx(); - Long timeStamp = TransactionUtil.getTableTimestamp(connection, table.isTransactional() || willBeTxnl, table.isTransactional() ? table.getTransactionProvider() : metaPropertiesEvaluated.getTransactionProvider()); - int numPkColumnsAdded = 0; - Set colFamiliesForPColumnsToBeAdded = new LinkedHashSet<>(); - Set families = new LinkedHashSet<>(); - PTable tableForCQCounters = tableType == PTableType.VIEW - ? connection.getTable(table.getPhysicalName().getString()) - : table; - EncodedCQCounter cqCounterToUse = tableForCQCounters.getEncodedCQCounter(); - Map changedCqCounters = new HashMap<>(numCols); - if (numCols > 0 ) { - StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver); - short nextKeySeq = SchemaUtil.getMaxKeySeq(table); - for ( ColumnDef colDef : columnDefs) { - if (colDef != null && !colDef.isNull()) { - if (colDef.isPK()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY) - .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException(); - } else if (!willBeImmutableRows) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL) - .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException(); - } - } - if (colDef != null && colDef.isPK() && table.getType() == VIEW && table.getViewType() != MAPPED) { - throwIfLastPKOfParentIsVariableLength(getParentOfView(table), schemaName, tableName, colDef); - } - if (colDef != null && colDef.isRowTimestamp()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_CREATE_ONLY) - .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException(); - } - if (!colDef.validateDefault(context, null)) { - colDef = new ColumnDef(colDef, null); // Remove DEFAULT as it's not necessary - } - String familyName = null; - Integer encodedCQ = null; - if (!colDef.isPK()) { - String colDefFamily = colDef.getColumnDefName().getFamilyName(); - ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme(); - String defaultColumnFamily = tableForCQCounters.getDefaultFamilyName() != null && !Strings.isNullOrEmpty(tableForCQCounters.getDefaultFamilyName().getString()) ? - tableForCQCounters.getDefaultFamilyName().getString() : DEFAULT_COLUMN_FAMILY; - if (table.getType() == PTableType.INDEX && table.getIndexType() == IndexType.LOCAL) { - defaultColumnFamily = QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX + defaultColumnFamily; - } - if (storageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS) { - familyName = colDefFamily != null ? colDefFamily : defaultColumnFamily; - } else { - familyName = defaultColumnFamily; - } - encodedCQ = table.isAppendOnlySchema() ? Integer.valueOf(ENCODED_CQ_COUNTER_INITIAL_VALUE + position) : cqCounterToUse.getNextQualifier(familyName); - if (!table.isAppendOnlySchema() && cqCounterToUse.increment(familyName)) { - changedCqCounters.put(familyName, - cqCounterToUse.getNextQualifier(familyName)); - } - } - byte[] columnQualifierBytes = null; - try { - columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes(colDef.getColumnDefName().getColumnName(), encodedCQ, table, colDef.isPK()); - } - catch (QualifierOutOfRangeException e) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_COLUMNS_EXCEEDED) - .setSchemaName(schemaName) - .setTableName(tableName).build().buildException(); - } - PColumn column = newColumn(position++, colDef, PrimaryKeyConstraint.EMPTY, table.getDefaultFamilyName() == null ? null : table.getDefaultFamilyName().getString(), true, columnQualifierBytes, willBeImmutableRows); - HashMap indexToIndexColumnMap = null; - if (cascade) { - indexToIndexColumnMap = getPTablePColumnHashMapForCascade(indexesPTable, willBeImmutableRows, - colDef, familyName, indexToColumnSizeMap); - } - - columns.add(column); - String pkName = null; - Short keySeq = null; - - // TODO: support setting properties on other families? - if (column.getFamilyName() == null) { - ++numPkColumnsAdded; - pkName = table.getPKName() == null ? null : table.getPKName().getString(); - keySeq = ++nextKeySeq; - } else { - families.add(column.getFamilyName().getString()); - } - colFamiliesForPColumnsToBeAdded.add(column.getFamilyName() == null ? null : column.getFamilyName().getString()); - addColumnMutation(connection, schemaName, tableName, column, null, pkName, keySeq, table.getBucketNum() != null); - // add new columns for given indexes one by one - if (cascade) { - for (PTable index: indexesPTable) { - LOGGER.info("Adding column "+column.getName().getString()+" to "+index.getTableName().toString()); - addColumnMutation(connection, schemaName, index.getTableName().getString(), indexToIndexColumnMap.get(index), null, "", keySeq, index.getBucketNum() != null); - } - } - } - - // Add any new PK columns to end of index PK - if (numPkColumnsAdded > 0) { - // create PK column list that includes the newly created columns - List pkColumns = Lists.newArrayListWithExpectedSize(table.getPKColumns().size()+numPkColumnsAdded); - pkColumns.addAll(table.getPKColumns()); - for (int i=0; i0 || metaProperties.getNonTxToTx() || - metaPropertiesEvaluated.getUpdateCacheFrequency() != null)) { - for (PTable index : table.getIndexes()) { - incrementTableSeqNum(index, index.getType(), numPkColumnsAdded, - metaProperties.getNonTxToTx() ? Boolean.TRUE : null, - metaPropertiesEvaluated.getUpdateCacheFrequency(), - metaPropertiesEvaluated.getPhysicalTableName(), - metaPropertiesEvaluated.getSchemaVersion(), - metaProperties.getColumnEncodedBytesProp()); - } - tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); - connection.rollback(); - } - - if (cascade) { - for (PTable index : indexesPTable) { - incrementTableSeqNum(index, index.getType(), columnDefs.size(), - Boolean.FALSE, - metaPropertiesEvaluated.getUpdateCacheFrequency(), - metaPropertiesEvaluated.getPhysicalTableName(), - metaPropertiesEvaluated.getSchemaVersion(), - metaPropertiesEvaluated.getColumnEncodedBytes()); - } - tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); - connection.rollback(); - } - - long seqNum = 0; - if (changingPhoenixTableProperty || columnDefs.size() > 0) { - seqNum = incrementTableSeqNum(table, tableType, columnDefs.size(), metaPropertiesEvaluated); - - tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); - connection.rollback(); - } - - PTable transformingNewTable = null; - if (isTransformNeeded) { - try { - transformingNewTable = TransformClient.addTransform(connection, tenantIdToUse, table, metaProperties, seqNum, PTable.TransformType.METADATA_TRANSFORM); - } catch (SQLException ex) { - connection.rollback(); - throw ex; - } - } - - // Force the table header row to be first - Collections.reverse(tableMetaData); - // Add column metadata afterwards, maintaining the order so columns have more predictable ordinal position - tableMetaData.addAll(columnMetaData); - if (!changedCqCounters.isEmpty()) { - try (PreparedStatement linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER)) { - for (Entry entry : changedCqCounters.entrySet()) { - linkStatement.setString(1, tenantIdToUse); - linkStatement.setString(2, tableForCQCounters.getSchemaName().getString()); - linkStatement.setString(3, tableForCQCounters.getTableName().getString()); - linkStatement.setString(4, entry.getKey()); - linkStatement.setInt(5, entry.getValue()); - linkStatement.execute(); - } - } - - // When a view adds its own columns, then we need to increase the sequence number of the base table - // too since we want clients to get the latest PTable of the base table. - if (tableType == VIEW) { - try (PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM)) { - incrementStatement.setString(1, null); - incrementStatement.setString(2, tableForCQCounters.getSchemaName().getString()); - incrementStatement.setString(3, tableForCQCounters.getTableName().getString()); - incrementStatement.setLong(4, tableForCQCounters.getSequenceNumber() + 1); - incrementStatement.execute(); - } - } - tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); - connection.rollback(); - } - - byte[] family = families.size() > 0 ? - families.iterator().next().getBytes(StandardCharsets.UTF_8) : null; - - // Figure out if the empty column family is changing as a result of adding the new column - byte[] emptyCF = null; - byte[] projectCF = null; - if (table.getType() != PTableType.VIEW && family != null) { - if (table.getColumnFamilies().isEmpty()) { - emptyCF = family; - } else { - try { - table.getColumnFamily(family); - } catch (ColumnFamilyNotFoundException e) { - projectCF = family; - emptyCF = SchemaUtil.getEmptyColumnFamily(table); - } - } - } - - if (EncodedColumnsUtil.usesEncodedColumnNames(table) - && stmtProperties.isEmpty() && !acquiredBaseTableMutex) { - // For tables that use column encoding acquire a mutex on - // the base table as we need to update the encoded column - // qualifier counter on the base table. Not applicable to - // ALTER TABLE/VIEW SET statements because - // we don't update the column qualifier counter while - // setting property, hence the check: stmtProperties.isEmpty() - acquiredBaseTableMutex = writeCell(null, physicalSchemaName, - physicalTableName, null); - if (!acquiredBaseTableMutex) { - throw new ConcurrentTableMutationException( - physicalSchemaName, physicalTableName); - } - } - for (PColumn pColumn : columns) { - // acquire the mutex using the global physical table name to - // prevent creating the same column on a table or view with - // a conflicting type etc - boolean acquiredMutex = writeCell(null, physicalSchemaName, physicalTableName, - pColumn.toString()); - if (!acquiredMutex && !acquiredColumnMutexSet.contains(pColumn.toString())) { - throw new ConcurrentTableMutationException(physicalSchemaName, physicalTableName); - } - acquiredColumnMutexSet.add(pColumn.toString()); - } - MetaDataMutationResult result = connection.getQueryServices().addColumn(tableMetaData, table, - getParentTable(table), transformingNewTable, properties, colFamiliesForPColumnsToBeAdded, columns); - - try { - MutationCode code = processMutationResult(schemaName, tableName, result); - if (code == MutationCode.COLUMN_ALREADY_EXISTS) { - addTableToCache(result, false); - if (!ifNotExists) { - throw new ColumnAlreadyExistsException(schemaName, tableName, SchemaUtil.findExistingColumn(result.getTable(), columns)); - } - return new MutationState(0, 0, connection); - } - // Only update client side cache if we aren't adding a PK column to a table with indexes or - // transitioning a table from non transactional to transactional. - // We could update the cache manually then too, it'd just be a pain. - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - long resolvedTimeStamp = TransactionUtil.getResolvedTime(connection, result); - if (table.getIndexes().isEmpty() || (numPkColumnsAdded==0 && ! metaProperties.getNonTxToTx())) { - addTableToCache(result, false, resolvedTimeStamp); - table = result.getTable(); - } else { - // remove the table from the cache, it will be fetched from the server the - // next time it is resolved - connection.removeTable(tenantId, fullTableName, null, resolvedTimeStamp); - } - // Delete rows in view index if we haven't dropped it already - // We only need to do this if the multiTenant transitioned to false - if (table.getType() == PTableType.TABLE - && Boolean.FALSE.equals(metaPropertiesEvaluated.getMultiTenant()) - && MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName())) { - connection.setAutoCommit(true); - MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), table.isNamespaceMapped()); - // If we're not dropping metadata, then make sure no rows are left in - // our view index physical table. - // TODO: remove this, as the DROP INDEX commands run when the DROP VIEW - // commands are run would remove all rows already. - if (!connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA)) { - Long scn = connection.getSCN(); - long ts = (scn == null ? result.getMutationTime() : scn); - byte[] viewIndexPhysicalName = MetaDataUtil - .getViewIndexPhysicalName(table.getPhysicalName().getBytes()); - String viewIndexSchemaName = SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName); - String viewIndexTableName = SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName); - PName viewIndexName = PNameFactory.newName(SchemaUtil.getTableName(viewIndexSchemaName, viewIndexTableName)); - - PTable viewIndexTable = new PTableImpl.Builder() - .setName(viewIndexName) - .setKey(new PTableKey(tenantId, viewIndexName.getString())) - .setSchemaName(PNameFactory.newName(viewIndexSchemaName)) - .setTableName(PNameFactory.newName(viewIndexTableName)) - .setType(PTableType.VIEW) - .setViewType(ViewType.MAPPED) - .setTimeStamp(ts) - .setPkColumns(Collections.emptyList()) - .setAllColumns(Collections.emptyList()) - .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) - .setIndexes(Collections.emptyList()) - .setFamilyAttributes(table.getColumnFamilies()) - .setPhysicalNames(Collections.emptyList()) - .setNamespaceMapped(table.isNamespaceMapped()) - .setImmutableStorageScheme(table.getImmutableStorageScheme()) - .setQualifierEncodingScheme(table.getEncodingScheme()) - .setUseStatsForParallelization(table.useStatsForParallelization()) - .build(); - List tableRefs = Collections.singletonList(new TableRef(null, viewIndexTable, ts, false)); - MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, - Collections.emptyList(), ts); - connection.getQueryServices().updateData(plan); - } - } - if (transformingNewTable != null) { - connection.removeTable(tenantId, fullTableName, null, resolvedTimeStamp); - connection.getQueryServices().clearCache(); - } - if (emptyCF != null) { - Long scn = connection.getSCN(); - connection.setAutoCommit(true); - // Delete everything in the column. You'll still be able to do queries at earlier timestamps - long ts = (scn == null ? result.getMutationTime() : scn); - MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(new TableRef(null, table, ts, false)), emptyCF, projectCF == null ? null : Collections.singletonList(projectCF), null, ts); - return connection.getQueryServices().updateData(plan); - } - return new MutationState(0, 0, connection); - } catch (ConcurrentTableMutationException e) { - if (retried) { - throw e; - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection)); - } - retried = true; - } catch(Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, - NUM_METADATA_LOOKUP_FAILURES, 1); - throw e; - } - } - } finally { - connection.setAutoCommit(wasAutoCommit); - if (acquiredBaseTableMutex) { - // release the mutex on the physical table (used to prevent concurrent conflicting - // add column changes) - deleteCell(null, physicalSchemaName, physicalTableName, null); - } - deleteMutexCells(physicalSchemaName, physicalTableName, acquiredColumnMutexSet); - } - } - - private List getIndexesPTableForCascade(List indexes, PTable table) throws SQLException { - boolean isView = table.getType().equals(PTableType.VIEW); - List indexesPTable = new ArrayList<>(); - - // when indexes is null, that means ALL keyword is passed and - // we ll collect all global indexes for cascading - if (indexes == null) { - indexesPTable.addAll(table.getIndexes()); - for (PTable index : table.getIndexes()) { - // a child view has access to its parents indexes, - // this if clause ensures we only get the indexes that - // are only created on the view itself. - if (index.getIndexType().equals(IndexType.LOCAL) - || (isView && index.getTableName().toString().contains( - QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR))) { - indexesPTable.remove(index); - } - } - } else { - List indexesParam = Lists.newArrayListWithExpectedSize(indexes.size()); - for (NamedNode index : indexes) { - indexesParam.add(index.getName()); - } - // gets the PTable for list of indexes passed in the function - // if all the names in parameter list are correct, indexesParam list should be empty - // by end of the loop - for (PTable index : table.getIndexes()) { - if (index.getIndexType().equals(IndexType.LOCAL)) { - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.NOT_SUPPORTED_CASCADE_FEATURE_LOCAL_INDEX) - .setTableName(index.getName().getString()) - .build() - .buildException(); - } - if (indexesParam.remove(index.getTableName().getString())) { - indexesPTable.add(index); - } - } - // indexesParam has index names that are not correct - if (!indexesParam.isEmpty()) { - throw new SQLExceptionInfo - .Builder(SQLExceptionCode.INCORRECT_INDEX_NAME) - .setTableName(StringUtils.join(",", indexesParam)) - .build() - .buildException(); - } - } - return indexesPTable; - } - - private HashMap getPTablePColumnHashMapForCascade(List indexesPTable, - boolean willBeImmutableRows, ColumnDef colDef, String familyName, Map indexToColumnSizeMap) throws SQLException { - HashMap indexColumn; - if (colDef.isPK()) { - //only supported for non pk column + if (viewType != MAPPED) { + /* + * For regular phoenix views, use the storage scheme of the physical table since they all + * share the the same HTable. Views always use the base table's column qualifier counter + * for doling out encoded column qualifier. + */ + viewPhysicalTable = connection.getTable(physicalNames.get(0).getString()); + immutableStorageScheme = viewPhysicalTable.getImmutableStorageScheme(); + encodingScheme = viewPhysicalTable.getEncodingScheme(); + if (EncodedColumnsUtil.usesEncodedColumnNames(viewPhysicalTable)) { + cqCounter = viewPhysicalTable.getEncodedCQCounter(); + } + } + } + // System tables have hard-coded column qualifiers. So we can't use column encoding for them. + else if ( + !SchemaUtil.isSystemTable(Bytes.toBytes(SchemaUtil.getTableName(schemaName, tableName))) + || SchemaUtil.isLogTable(schemaName, tableName) + ) { + /* + * Indexes inherit the storage scheme of the parent data tables. Otherwise, we always + * attempt to create tables with encoded column names. Also of note is the case with shared + * indexes i.e. local indexes and view indexes. In these cases, column qualifiers for + * covered columns don't have to be unique because rows of the logical indexes are + * partitioned by the virtue of indexId present in the row key. As such, different shared + * indexes can use potentially overlapping column qualifiers. + */ + if (parent != null) { + Byte encodingSchemeSerializedByte = + (Byte) TableProperty.COLUMN_ENCODED_BYTES.getValue(tableProps); + // Table has encoding scheme defined + if (encodingSchemeSerializedByte != null) { + encodingScheme = + getEncodingScheme(tableProps, schemaName, tableName, transactionProvider); + } else { + encodingScheme = parent.getEncodingScheme(); + } + + ImmutableStorageScheme immutableStorageSchemeProp = + (ImmutableStorageScheme) TableProperty.IMMUTABLE_STORAGE_SCHEME.getValue(tableProps); + if (immutableStorageSchemeProp == null) { + immutableStorageScheme = parent.getImmutableStorageScheme(); + } else { + checkImmutableStorageSchemeForIndex(immutableStorageSchemeProp, schemaName, tableName, + transactionProvider); + immutableStorageScheme = immutableStorageSchemeProp; + } + + if (immutableStorageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS) { + if (encodingScheme == NON_ENCODED_QUALIFIERS) { + if (encodingSchemeSerializedByte != null) { + // encoding scheme is set as non-encoded on purpose, so we should fail + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } else { + // encoding scheme is inherited from parent but it is not compatible with Single + // Cell. + encodingScheme = QualifierEncodingScheme + .fromSerializedValue((byte) QueryServicesOptions.DEFAULT_COLUMN_ENCODED_BYTES); + } + } + } + + if ( + tableType != CDC && parent.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS + && immutableStorageScheme == ONE_CELL_PER_COLUMN + ) { throw new SQLExceptionInfo.Builder( - SQLExceptionCode.NOT_SUPPORTED_CASCADE_FEATURE_PK) - .build() - .buildException(); - } - indexColumn = new HashMap(indexesPTable.size()); - ColumnName - indexColName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(familyName, colDef.getColumnDefName().getColumnName())); - ColumnDef indexColDef = FACTORY.columnDef(indexColName, - colDef.getDataType().getSqlTypeName(), colDef.isNull(), - colDef.getMaxLength(), colDef.getScale(), false, - colDef.getSortOrder(), colDef.getExpression(), colDef.isRowTimestamp()); - // TODO: add support to specify tenant owned indexes in the DDL statement with CASCADE executed with Global connection - for (PTable index : indexesPTable) { - int iPos = indexToColumnSizeMap.get(index); - EncodedCQCounter cqCounterToUse = index.getEncodedCQCounter(); - int baseCount = 0; - baseCount = (cqCounterToUse != null && cqCounterToUse.getNextQualifier(familyName)!=null) ? cqCounterToUse.getNextQualifier(familyName) : 0 ; - Integer encodedCQ = index.isAppendOnlySchema() ? Integer.valueOf(ENCODED_CQ_COUNTER_INITIAL_VALUE + iPos) : baseCount + iPos; - byte[] columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes(indexColDef.getColumnDefName().getColumnName(), encodedCQ, index, indexColDef.isPK()); - PColumn iColumn = newColumn(iPos, indexColDef, null, index.getDefaultFamilyName() == null ? null : index.getDefaultFamilyName().getString(), false, columnQualifierBytes, willBeImmutableRows); - indexColumn.put(index, iColumn); - indexToColumnSizeMap.put(index, iPos+1); - } - return indexColumn; - } - - private void deleteMutexCells(String physicalSchemaName, String physicalTableName, Set acquiredColumnMutexSet) throws SQLException { - if (!acquiredColumnMutexSet.isEmpty()) { - for (String columnName : acquiredColumnMutexSet) { - // release the mutex (used to prevent concurrent conflicting add column changes) - deleteCell(null, physicalSchemaName, physicalTableName, columnName); + SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + LOGGER.info( + String.format("STORAGE--ENCODING: %s--%s", immutableStorageScheme, encodingScheme)); + } else { + encodingScheme = + getEncodingScheme(tableProps, schemaName, tableName, transactionProvider); + + ImmutableStorageScheme immutableStorageSchemeProp = + (ImmutableStorageScheme) TableProperty.IMMUTABLE_STORAGE_SCHEME.getValue(tableProps); + if (immutableStorageSchemeProp == null) { + // Ignore default if transactional and column encoding is not supported + if ( + transactionProvider == null || !transactionProvider.getTransactionProvider() + .isUnsupported(PhoenixTransactionProvider.Feature.COLUMN_ENCODING) + ) { + if (multiTenant) { + immutableStorageScheme = + ImmutableStorageScheme.valueOf(connection.getQueryServices().getProps().get( + QueryServices.DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME_ATTRIB, + QueryServicesOptions.DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME)); + } else { + if (isImmutableRows) { + immutableStorageScheme = + ImmutableStorageScheme.valueOf(connection.getQueryServices().getProps().get( + QueryServices.DEFAULT_IMMUTABLE_STORAGE_SCHEME_ATTRIB, + QueryServicesOptions.DEFAULT_IMMUTABLE_STORAGE_SCHEME)); + } else { + immutableStorageScheme = ONE_CELL_PER_COLUMN; + } + } + } + } else { + immutableStorageScheme = + isImmutableRows ? immutableStorageSchemeProp : ONE_CELL_PER_COLUMN; + checkImmutableStorageSchemeForIndex(immutableStorageScheme, schemaName, tableName, + transactionProvider); + } + if ( + immutableStorageScheme != ONE_CELL_PER_COLUMN + && encodingScheme == NON_ENCODED_QUALIFIERS + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + } + cqCounter = + encodingScheme != NON_ENCODED_QUALIFIERS ? new EncodedCQCounter() : NULL_COUNTER; + if (encodingScheme != NON_ENCODED_QUALIFIERS && statement.getFamilyCQCounters() != null) { + for (Map.Entry cq : statement.getFamilyCQCounters().entrySet()) { + if (cq.getValue() < QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_CQ) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } + cqCounter.setValue(cq.getKey(), cq.getValue()); + changedCqCounters.put(cq.getKey(), cqCounter.getNextQualifier(cq.getKey())); + inputCqCounters.putIfAbsent(cq.getKey(), new HashSet()); + } + } + } + + boolean wasPKDefined = false; + // Keep track of all columns that are newly added to a view + Set viewNewColumnPositions = Sets.newHashSetWithExpectedSize(colDefs.size()); + Set pkColumnNames = new HashSet<>(); + for (PColumn pColumn : pkColumns) { + pkColumnNames.add(pColumn.getName().toString()); + } + for (ColumnDef colDef : colDefs) { + rowTimeStampColumnAlreadyFound = checkAndValidateRowTimestampCol(colDef, pkConstraint, + rowTimeStampColumnAlreadyFound, tableType); + if (colDef.isPK()) { // i.e. the column is declared as CREATE TABLE COLNAME DATATYPE PRIMARY + // KEY... + if (wasPKDefined) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) + .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException(); + } + wasPKDefined = true; + } else { + // do not allow setting NOT-NULL constraint on non-primary columns. + if ( + !colDef.isNull() && !isImmutableRows + && (wasPKDefined || !isPkColumn(pkConstraint, colDef)) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL) + .setSchemaName(schemaName).setTableName(tableName) + .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException(); + } } - } - - private String dropColumnMutations(PTable table, List columnsToDrop) throws SQLException { - String tenantId = connection.getTenantId() == null ? "" : connection.getTenantId().getString(); - String schemaName = table.getSchemaName().getString(); - String tableName = table.getTableName().getString(); - String familyName = null; - /* - * Generate a fully qualified RVC with an IN clause, since that's what our optimizer can - * handle currently. If/when the optimizer handles (A and ((B AND C) OR (D AND E))) we - * can factor out the tenant ID, schema name, and table name columns - */ - StringBuilder buf = new StringBuilder("DELETE FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\" WHERE "); - buf.append("(" + - TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + - COLUMN_NAME + ", " + COLUMN_FAMILY + ") IN ("); - for (PColumn columnToDrop : columnsToDrop) { - buf.append("('" + tenantId + "'"); - buf.append(",'" + schemaName + "'"); - buf.append(",'" + tableName + "'"); - buf.append(",'" + columnToDrop.getName().getString() + "'"); - buf.append(",'" + (columnToDrop.getFamilyName() == null ? "" : columnToDrop.getFamilyName().getString()) + "'),"); - } - buf.setCharAt(buf.length()-1, ')'); - - try (PreparedStatement delCol = connection.prepareStatement(buf.toString())) { - delCol.execute(); - } - Collections.sort(columnsToDrop,new Comparator () { - @Override - public int compare(PColumn left, PColumn right) { - return Ints.compare(left.getPosition(), right.getPosition()); + ColumnName columnDefName = colDef.getColumnDefName(); + String colDefFamily = columnDefName.getFamilyName(); + boolean isPkColumn = isPkColumn(pkConstraint, colDef); + String cqCounterFamily = null; + if (!isPkColumn) { + if ( + immutableStorageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS + && encodingScheme != NON_ENCODED_QUALIFIERS + ) { + // For this scheme we track column qualifier counters at the column family level. + cqCounterFamily = colDefFamily != null + ? colDefFamily + : (defaultFamilyName != null ? defaultFamilyName : DEFAULT_COLUMN_FAMILY); + } else { + // For other schemes, column qualifier counters are tracked using the default column + // family. + cqCounterFamily = defaultFamilyName != null ? defaultFamilyName : DEFAULT_COLUMN_FAMILY; + } + } + // Use position as column qualifier if APPEND_ONLY_SCHEMA to prevent gaps in + // the column encoding (PHOENIX-4737). + Integer encodedCQ = null; + if (!isPkColumn) { + if (colDef.getEncodedQualifier() != null && encodingScheme != NON_ENCODED_QUALIFIERS) { + if ( + cqCounter.getNextQualifier(cqCounterFamily) > ENCODED_CQ_COUNTER_INITIAL_VALUE + && !inputCqCounters.containsKey(cqCounterFamily) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.MISSING_CQ) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } - }); - - boolean isSalted = table.getBucketNum() != null; - int columnsToDropIndex = 0; - try (PreparedStatement colUpdate = connection.prepareStatement(UPDATE_COLUMN_POSITION)) { - colUpdate.setString(1, tenantId); - colUpdate.setString(2, schemaName); - colUpdate.setString(3, tableName); - for (int i = columnsToDrop.get(columnsToDropIndex).getPosition() + 1; - i < table.getColumns().size(); i++) { - PColumn column = table.getColumns().get(i); - if (columnsToDrop.contains(column)) { - columnsToDropIndex++; - continue; - } - colUpdate.setString(4, column.getName().getString()); - colUpdate.setString(5, column.getFamilyName() == null - ? null : column.getFamilyName().getString()); - // Adjust position to not include the salt column - colUpdate.setInt(6, - column.getPosition() - columnsToDropIndex - (isSalted ? 1 : 0)); - colUpdate.execute(); + + if ( + statement.getFamilyCQCounters() == null + || statement.getFamilyCQCounters().get(cqCounterFamily) == null + ) { + if (colDef.getEncodedQualifier() >= cqCounter.getNextQualifier(cqCounterFamily)) { + cqCounter.setValue(cqCounterFamily, colDef.getEncodedQualifier()); + cqCounter.increment(cqCounterFamily); + } + changedCqCounters.put(cqCounterFamily, cqCounter.getNextQualifier(cqCounterFamily)); } - } - return familyName; - } - /** - * Calculate what the new column family will be after the column is dropped, returning null - * if unchanged. - * @param table table containing column to drop - * @param columnToDrop column being dropped - * @return the new column family or null if unchanged. - */ - private static byte[] getNewEmptyColumnFamilyOrNull (PTable table, PColumn columnToDrop) { - if (table.getType() != PTableType.VIEW && !SchemaUtil.isPKColumn(columnToDrop) && table.getColumnFamilies().get(0).getName().equals(columnToDrop.getFamilyName()) && table.getColumnFamilies().get(0).getColumns().size() == 1) { - return SchemaUtil.getEmptyColumnFamily(table.getDefaultFamilyName(), table.getColumnFamilies().subList(1, table.getColumnFamilies().size()), table.getIndexType() == IndexType.LOCAL); - } - // If unchanged, return null - return null; - } + encodedCQ = colDef.getEncodedQualifier(); + if ( + encodedCQ < QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE + || encodedCQ >= cqCounter.getNextQualifier(cqCounterFamily) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_CQ) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } - private PTable getParentTable(PTable table) throws SQLException { - PTable parentTable = null; - boolean hasIndexId = table.getViewIndexId() != null; - if ( (table.getType()==PTableType.INDEX && hasIndexId) - || (table.getType() == PTableType.VIEW && table.getViewType() != ViewType.MAPPED)) { - parentTable = connection.getTable(table.getParentName().getString()); - if (parentTable==null) { - String schemaName = table.getSchemaName()!=null ? table.getSchemaName().getString() : null; - throw new TableNotFoundException(schemaName, table.getTableName().getString()); + inputCqCounters.putIfAbsent(cqCounterFamily, new HashSet()); + Set familyCounters = inputCqCounters.get(cqCounterFamily); + if (!familyCounters.add(encodedCQ)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.DUPLICATE_CQ) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } - // only inherit columns view indexes (and not local indexes - // on regular tables which also have a viewIndexId) - if (hasIndexId && parentTable.getType() != PTableType.VIEW) { - return null; + } else { + if (inputCqCounters.containsKey(cqCounterFamily)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.MISSING_CQ) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } - } - return parentTable; - } - - public MutationState dropColumn(DropColumnStatement statement) throws SQLException { - connection.rollback(); - boolean wasAutoCommit = connection.getAutoCommit(); - Set acquiredColumnMutexSet = Sets.newHashSetWithExpectedSize(3); - String physicalSchemaName = null; - String physicalTableName = null; - try { - connection.setAutoCommit(false); - PName tenantId = connection.getTenantId(); - TableName tableNameNode = statement.getTable().getName(); - String schemaName = tableNameNode.getSchemaName(); - String tableName = tableNameNode.getTableName(); - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - boolean retried = false; - while (true) { - final ColumnResolver resolver = FromCompiler.getResolver(statement, connection); - TableRef tableRef = resolver.getTables().get(0); - PTable table = tableRef.getTable(); - PName physicalName = table.getPhysicalName(); - physicalSchemaName = SchemaUtil.getSchemaNameFromFullName(physicalName.getString()); - physicalTableName = SchemaUtil.getTableNameFromFullName(physicalName.getString()); - - List columnRefs = statement.getColumnRefs(); - if (columnRefs == null) { - columnRefs = Lists.newArrayListWithCapacity(0); - } - List columnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size()); - List indexesToDrop = Lists.newArrayListWithExpectedSize(table.getIndexes().size()); - List tableMetaData = Lists.newArrayListWithExpectedSize((table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size())); - List tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size()); - - for (ColumnName column : columnRefs) { - ColumnRef columnRef = null; - try { - columnRef = resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName()); - } catch (ColumnNotFoundException e) { - if (statement.ifExists()) { - return new MutationState(0, 0, connection); - } - throw e; - } - PColumn columnToDrop = columnRef.getColumn(); - tableColumnsToDrop.add(columnToDrop); - if (SchemaUtil.isPKColumn(columnToDrop)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK) - .setColumnName(columnToDrop.getName().getString()).build().buildException(); - } - else if (table.isAppendOnlySchema()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA) - .setColumnName(columnToDrop.getName().getString()).build().buildException(); - } - else if (columnToDrop.isViewReferenced()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_VIEW_REFERENCED_COL) - .setColumnName(columnToDrop.getName().getString()).build().buildException(); - } - columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition())); - // check if client is already holding a mutex from previous retry - if (!acquiredColumnMutexSet.contains(columnToDrop.toString())) { - boolean acquiredMutex = writeCell(null, physicalSchemaName, - physicalTableName, columnToDrop.toString()); - if (!acquiredMutex) { - throw new ConcurrentTableMutationException(physicalSchemaName, - physicalTableName); - } - acquiredColumnMutexSet.add(columnToDrop.toString()); - } - } - dropColumnMutations(table, tableColumnsToDrop); - boolean removedIndexTableOrColumn=false; - Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null; - for (PTable index : table.getIndexes()) { - IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection); - // get the covered columns - List indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size()); - Set> indexedColsInfo = indexMaintainer.getIndexedColumnInfo(); - Set coveredCols = indexMaintainer.getCoveredColumns(); - for (PColumn columnToDrop : tableColumnsToDrop) { - Pair columnToDropInfo = new Pair<>(columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString()); - ColumnReference colDropRef = new ColumnReference(columnToDrop.getFamilyName() == null ? null - : columnToDrop.getFamilyName().getBytes(), columnToDrop.getColumnQualifierBytes()); - boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo); - if (isColumnIndexed) { - if (index.getViewIndexId() == null) { - indexesToDrop.add(new TableRef(index)); - } - connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp()); - removedIndexTableOrColumn = true; - } else if (coveredCols.contains(colDropRef)) { - String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop); - PColumn indexColumn = index.getColumnForColumnName(indexColumnName); - indexColumnsToDrop.add(indexColumn); - // add the index column to be dropped so that we actually delete the column values - columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition())); - removedIndexTableOrColumn = true; - } - } - if (!indexColumnsToDrop.isEmpty()) { - long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), - null, null, null, null, null); - dropColumnMutations(index, indexColumnsToDrop); - long clientTimestamp = MutationState.getTableTimestamp(timeStamp, connection.getSCN()); - connection.removeColumn(tenantId, index.getName().getString(), - indexColumnsToDrop, clientTimestamp, indexTableSeqNum, - TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp)); - } - } - tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); - connection.rollback(); - - long seqNum = incrementTableSeqNum(table, statement.getTableType(), -tableColumnsToDrop.size(), - null, null, null, null, null); - tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); - connection.rollback(); - // Force table header to be first in list - Collections.reverse(tableMetaData); - - /* - * Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if we drop the last column - * in a column family that was the empty column family. In that case, we have to pick another one. If there are no other - * ones, then we need to create our default empty column family. Note that this may no longer be necessary once we - * support declaring what the empty column family is on a table, as: - * - If you declare it, we'd just ensure it's created at DDL time and never switch what it is unless you change it - * - If you don't declare it, we can just continue to use the old empty column family in this case, dynamically updating - * the empty column family name on the PTable. - */ - for (ColumnRef columnRefToDrop : columnsToDrop) { - PTable tableContainingColumnToDrop = columnRefToDrop.getTable(); - byte[] emptyCF = getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn()); - if (emptyCF != null) { - try { - tableContainingColumnToDrop.getColumnFamily(emptyCF); - } catch (ColumnFamilyNotFoundException e) { - // Only if it's not already a column family do we need to ensure it's created - Map>> family = new HashMap<>(1); - family.put(Bytes.toString(emptyCF), Collections.>emptyList()); - // Just use a Put without any key values as the Mutation, as addColumn will treat this specially - // TODO: pass through schema name and table name instead to these methods as it's cleaner - byte[] tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); - if (tenantIdBytes == null) tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY; - connection.getQueryServices().addColumn( - Collections.singletonList(new Put(SchemaUtil.getTableKey - (tenantIdBytes, tableContainingColumnToDrop.getSchemaName().getBytes(), - tableContainingColumnToDrop.getTableName().getBytes()))), - tableContainingColumnToDrop, null, null,family, Sets.newHashSet(Bytes.toString(emptyCF)), Collections.emptyList()); - - } - } - } - - MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, - statement.getTableType(), getParentTable(table)); - try { - MutationCode code = processMutationResult(schemaName, tableName, result); - if (code == MutationCode.COLUMN_NOT_FOUND) { - addTableToCache(result, false); - if (!statement.ifExists()) { - throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName())); - } - return new MutationState(0, 0, connection); - } - // If we've done any index metadata updates, don't bother trying to update - // client-side cache as it would be too painful. Just let it pull it over from - // the server when needed. - if (tableColumnsToDrop.size() > 0) { - //need to remove the cached table because the DDL timestamp changed. We - // also need to remove it if we dropped an indexed column - connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp()); - } - // If we have a VIEW, then only delete the metadata, and leave the table data alone - if (table.getType() != PTableType.VIEW) { - MutationState state = null; - connection.setAutoCommit(true); - Long scn = connection.getSCN(); - // Delete everything in the column. You'll still be able to do queries at earlier timestamps - long ts = (scn == null ? result.getMutationTime() : scn); - PostDDLCompiler compiler = new PostDDLCompiler(connection); - - boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); - // if the index is a local index or view index it uses a shared physical table - // so we need to issue deletes markers for all the rows of the index - final List tableRefsToDrop = Lists.newArrayList(); - Map> tenantIdTableRefMap = Maps.newHashMap(); - if (result.getSharedTablesToDelete() != null) { - for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) { - ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme(); - QualifierEncodingScheme qualifierEncodingScheme = table.getEncodingScheme(); - List columns = sharedTableState.getColumns(); - if (table.getBucketNum() != null) { - columns = columns.subList(1, columns.size()); - } - - PTableImpl viewIndexTable = new PTableImpl.Builder() - .setPkColumns(Collections.emptyList()) - .setAllColumns(Collections.emptyList()) - .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) - .setIndexes(Collections.emptyList()) - .setFamilyAttributes(table.getColumnFamilies()) - .setType(PTableType.INDEX) - .setTimeStamp(ts) - .setMultiTenant(table.isMultiTenant()) - .setViewIndexIdType(sharedTableState.getViewIndexIdType()) - .setViewIndexId(sharedTableState.getViewIndexId()) - .setNamespaceMapped(table.isNamespaceMapped()) - .setAppendOnlySchema(false) - .setImmutableStorageScheme(storageScheme == null ? - ImmutableStorageScheme.ONE_CELL_PER_COLUMN : storageScheme) - .setQualifierEncodingScheme(qualifierEncodingScheme == null ? - QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : qualifierEncodingScheme) - .setEncodedCQCounter(table.getEncodedCQCounter()) - .setUseStatsForParallelization(table.useStatsForParallelization()) - .setExcludedColumns(ImmutableList.of()) - .setTenantId(sharedTableState.getTenantId()) - .setSchemaName(sharedTableState.getSchemaName()) - .setTableName(sharedTableState.getTableName()) - .setRowKeyOrderOptimizable(false) - .setBucketNum(table.getBucketNum()) - .setIndexes(Collections.emptyList()) - .setPhysicalNames(sharedTableState.getPhysicalNames() == null ? - ImmutableList.of() : - ImmutableList.copyOf(sharedTableState.getPhysicalNames())) - .setColumns(columns) - .build(); - TableRef indexTableRef = new TableRef(viewIndexTable); - PName indexTableTenantId = sharedTableState.getTenantId(); - if (indexTableTenantId == null) { - tableRefsToDrop.add(indexTableRef); - } else { - if (!tenantIdTableRefMap.containsKey( - indexTableTenantId.getString())) { - tenantIdTableRefMap.put(indexTableTenantId.getString(), - Lists.newArrayList()); - } - tenantIdTableRefMap.get(indexTableTenantId.getString()) - .add(indexTableRef); - } - - } - } - // if dropMetaData is false delete all rows for the indexes (if it was true - // they would have been dropped in ConnectionQueryServices.dropColumn) - if (!dropMetaData) { - tableRefsToDrop.addAll(indexesToDrop); - } - // Drop any index tables that had the dropped column in the PK - state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.emptyList(), ts)); - - // Drop any tenant-specific indexes - if (!tenantIdTableRefMap.isEmpty()) { - for (Entry> entry : tenantIdTableRefMap.entrySet()) { - String indexTenantId = entry.getKey(); - Properties props = new Properties(connection.getClientInfo()); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId); - try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) { - PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn); - state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.emptyList(), ts)); - } - } - } - - // TODO For immutable tables, if the storage scheme is not ONE_CELL_PER_COLUMN we will remove the column values at compaction time - // See https://issues.apache.org/jira/browse/PHOENIX-3605 - if (!table.isImmutableRows() || table.getImmutableStorageScheme()==ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - // Update empty key value column if necessary - for (ColumnRef droppedColumnRef : columnsToDrop) { - // Painful, but we need a TableRef with a pre-set timestamp to prevent attempts - // to get any updates from the region server. - // TODO: move this into PostDDLCompiler - // TODO: consider filtering mutable indexes here, but then the issue is that - // we'd need to force an update of the data row empty key value if a mutable - // secondary index is changing its empty key value family. - droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts); - TableRef droppedColumnTableRef = droppedColumnRef.getTableRef(); - PColumn droppedColumn = droppedColumnRef.getColumn(); - MutationPlan plan = compiler.compile( - Collections.singletonList(droppedColumnTableRef), - getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), - null, - Collections.singletonList(droppedColumn), - ts); - state = connection.getQueryServices().updateData(plan); - } - } - // Return the last MutationState - return state; - } - return new MutationState(0, 0, connection); - } catch (ConcurrentTableMutationException e) { - if (retried) { - throw e; - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations( - "Caught ConcurrentTableMutationException for table " - + SchemaUtil.getTableName(e.getSchemaName(), e.getTableName()) - + ". Will update cache and try again...", connection)); - } - updateCache(connection.getTenantId(), - e.getSchemaName(), e.getTableName(), true); - retried = true; - } catch (Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, NUM_METADATA_LOOKUP_FAILURES, 1); - throw e; - } + if (isAppendOnlySchema) { + encodedCQ = Integer.valueOf(ENCODED_CQ_COUNTER_INITIAL_VALUE + position); + } else { + encodedCQ = cqCounter.getNextQualifier(cqCounterFamily); } - } finally { - connection.setAutoCommit(wasAutoCommit); - deleteMutexCells(physicalSchemaName, physicalTableName, acquiredColumnMutexSet); + } } - } - - public MutationState alterIndex(AlterIndexStatement statement) throws SQLException { - connection.rollback(); - boolean wasAutoCommit = connection.getAutoCommit(); - String dataTableName; - long seqNum = 0L; + byte[] columnQualifierBytes = null; try { - dataTableName = statement.getTableName(); - final String indexName = statement.getTable().getName().getTableName(); - boolean isAsync = statement.isAsync(); - boolean isRebuildAll = statement.isRebuildAll(); - String tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getString(); - PTable table = FromCompiler.getIndexResolver(statement, connection) - .getTables().get(0).getTable(); - - String schemaName = statement.getTable().getName().getSchemaName(); - String tableName = table.getTableName().getString(); - - Map>> properties=new HashMap<>(statement.getProps().size());; - MetaProperties metaProperties = loadStmtProperties(statement.getProps(),properties,table,false); - - boolean isTransformNeeded = TransformClient.checkIsTransformNeeded(metaProperties, schemaName, table, indexName, dataTableName, tenantId, connection); - MetaPropertiesEvaluated metaPropertiesEvaluated = new MetaPropertiesEvaluated(); - boolean changingPhoenixTableProperty= evaluateStmtProperties(metaProperties,metaPropertiesEvaluated,table,schemaName,tableName,new MutableBoolean(false)); - - PIndexState newIndexState = statement.getIndexState(); - - if (isAsync && newIndexState != PIndexState.REBUILD) { throw new SQLExceptionInfo.Builder( - SQLExceptionCode.ASYNC_NOT_ALLOWED) - .setMessage(" ASYNC building of index is allowed only with REBUILD index state") - .setSchemaName(schemaName).setTableName(indexName).build().buildException(); } - - if (newIndexState == PIndexState.REBUILD) { - newIndexState = PIndexState.BUILDING; + columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes( + columnDefName.getColumnName(), encodedCQ, encodingScheme, isPkColumn); + } catch (QualifierOutOfRangeException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_COLUMNS_EXCEEDED) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + PColumn column = newColumn(position++, colDef, pkConstraint, defaultFamilyName, false, + columnQualifierBytes, isImmutableRows); + if ( + !isAppendOnlySchema && colDef.getEncodedQualifier() == null + && cqCounter.increment(cqCounterFamily) + ) { + changedCqCounters.put(cqCounterFamily, cqCounter.getNextQualifier(cqCounterFamily)); + } + if (SchemaUtil.isPKColumn(column)) { + // TODO: remove this constraint? + if ( + pkColumnsIterator.hasNext() && !column.getName().getString() + .equals(pkColumnsIterator.next().getFirst().getColumnName()) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER) + .setSchemaName(schemaName).setTableName(tableName) + .setColumnName(column.getName().getString()).build().buildException(); + } + if (tableType == PTableType.VIEW && viewType != ViewType.MAPPED) { + throwIfLastPKOfParentIsVariableLength(parent, schemaName, tableName, colDef); + } + if (!pkColumns.add(column)) { + throw new ColumnAlreadyExistsException(schemaName, tableName, + column.getName().getString()); + } + } + // check for duplicate column + if (isDuplicateColumn(columns, pkColumnNames, column)) { + throw new ColumnAlreadyExistsException(schemaName, tableName, + column.getName().getString()); + } else if (tableType == VIEW) { + viewNewColumnPositions.add(column.getPosition()); + } + if (isPkColumn) { + pkColumnNames.add(column.getName().toString()); + } + if ( + (colDef.getDataType() == PVarbinary.INSTANCE || colDef.getDataType().isArrayType()) + && SchemaUtil.isPKColumn(column) && pkColumnsIterator.hasNext() + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_IN_ROW_KEY) + .setSchemaName(schemaName).setTableName(tableName) + .setColumnName(column.getName().getString()).build().buildException(); + } + if (column.getFamilyName() != null) { + familyNames.put(IndexUtil.getActualColumnFamilyName(column.getFamilyName().getString()), + column.getFamilyName()); + } + } + + // We need a PK definition for a TABLE or mapped VIEW + if ( + !wasPKDefined && pkColumnsNames.isEmpty() && tableType != PTableType.VIEW + && viewType != ViewType.MAPPED + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + if ( + !pkColumnsNames.isEmpty() && pkColumnsNames.size() != pkColumns.size() - pkPositionOffset + ) { // Then a column name in the primary key constraint wasn't resolved + Iterator> pkColumnNamesIterator = pkColumnsNames.iterator(); + while (pkColumnNamesIterator.hasNext()) { + ColumnName colName = pkColumnNamesIterator.next().getFirst(); + ColumnDef colDef = findColumnDefOrNull(colDefs, colName); + if (colDef == null) { + throw new ColumnNotFoundException(schemaName, tableName, null, colName.getColumnName()); + } + if (colDef.getColumnDefName().getFamilyName() != null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME) + .setSchemaName(schemaName).setTableName(tableName) + .setColumnName(colDef.getColumnDefName().getColumnName()) + .setFamilyName(colDef.getColumnDefName().getFamilyName()).build().buildException(); + } + } + // The above should actually find the specific one, but just in case... + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_PRIMARY_KEY_CONSTRAINT) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + + if (!statement.getProps().isEmpty()) { + for (String familyName : statement.getProps().keySet()) { + if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { + if (familyNames.get(familyName) == null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY) + .setFamilyName(familyName).build().buildException(); + } else if (statement.getTableType() == PTableType.VIEW) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build() + .buildException(); } - connection.setAutoCommit(false); - // Confirm index table is valid and up-to-date - TableRef indexRef = FromCompiler.getResolver(statement, connection).getTables().get(0); - PreparedStatement tableUpsert = null; - try { - if (newIndexState == PIndexState.ACTIVE){ - tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE_TO_ACTIVE); - } else { - tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE); - } - tableUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString()); - tableUpsert.setString(2, schemaName); - tableUpsert.setString(3, indexName); - tableUpsert.setString(4, newIndexState.getSerializedValue()); - tableUpsert.setLong(5, 0); - if (newIndexState == PIndexState.ACTIVE){ - tableUpsert.setLong(6, 0); - } - tableUpsert.execute(); - } finally { - if (tableUpsert != null) { - tableUpsert.close(); - } + } + } + } + throwIfInsufficientColumns(schemaName, tableName, pkColumns, saltBucketNum != null, + multiTenant); + + List>> familyPropList = + Lists.newArrayListWithExpectedSize(familyNames.size()); + populateFamilyPropsList(familyNames, commonFamilyProps, statement, defaultFamilyName, + isLocalIndex, familyPropList); + + // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists + if (SchemaUtil.isMetaTable(schemaName, tableName)) { + // TODO: what about stats for system catalog? + PName newSchemaName = PNameFactory.newName(schemaName); + // Column names and qualifiers and hardcoded for system tables. + PTable table = new PTableImpl.Builder().setType(tableType) + .setTimeStamp(MetaDataProtocol.MIN_TABLE_TIMESTAMP).setIndexDisableTimestamp(0L) + .setSequenceNumber(PTable.INITIAL_SEQ_NUM).setImmutableRows(isImmutableRows) + .setDisableWAL(Boolean.TRUE.equals(disableWAL)).setMultiTenant(false).setStoreNulls(false) + .setViewIndexIdType(viewIndexIdType).setIndexType(indexType).setUpdateCacheFrequency(0) + .setNamespaceMapped(isNamespaceMapped).setAutoPartitionSeqName(autoPartitionSeq) + .setAppendOnlySchema(isAppendOnlySchema).setImmutableStorageScheme(ONE_CELL_PER_COLUMN) + .setQualifierEncodingScheme(NON_ENCODED_QUALIFIERS) + .setBaseColumnCount(QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT) + .setEncodedCQCounter(PTable.EncodedCQCounter.NULL_COUNTER) + .setUseStatsForParallelization(true).setExcludedColumns(ImmutableList. of()) + .setTenantId(tenantId).setSchemaName(newSchemaName) + .setTableName(PNameFactory.newName(tableName)) + .setPkName(PNameFactory.newName(QueryConstants.SYSTEM_TABLE_PK_NAME)) + .setDefaultFamilyName( + defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName)) + .setRowKeyOrderOptimizable(true).setIndexes(Collections. emptyList()) + .setPhysicalNames(ImmutableList. of()).setColumns(columns.values()) + .setLastDDLTimestamp(0L) + .setIndexWhere( + statement.getWhereClause() == null ? null : statement.getWhereClause().toString()) + .setRowKeyMatcher(rowKeyMatcher).setTTL(TTL_NOT_DEFINED).build(); + connection.addTable(table, MetaDataProtocol.MIN_TABLE_TIMESTAMP); + } + + // Update column qualifier counters + if (EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme)) { + // Store the encoded column counter for phoenix entities that have their own hbase + // tables i.e. base tables and indexes. + String schemaNameToUse = + tableType == VIEW ? viewPhysicalTable.getSchemaName().getString() : schemaName; + String tableNameToUse = + tableType == VIEW ? viewPhysicalTable.getTableName().getString() : tableName; + boolean sharedIndex = tableType == PTableType.INDEX + && (indexType == IndexType.LOCAL || parent.getType() == PTableType.VIEW); + // For local indexes and indexes on views, pass on the the tenant id since all their + // meta-data rows have + // tenant ids in there. + String tenantIdToUse = connection.getTenantId() != null && sharedIndex + ? connection.getTenantId().getString() + : null; + // When a view adds its own columns, then we need to increase the sequence number of the + // base table + // too since we want clients to get the latest PTable of the base table. + for (Entry entry : changedCqCounters.entrySet()) { + try (PreparedStatement linkStatement = + connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER)) { + linkStatement.setString(1, tenantIdToUse); + linkStatement.setString(2, schemaNameToUse); + linkStatement.setString(3, tableNameToUse); + linkStatement.setString(4, entry.getKey()); + linkStatement.setInt(5, entry.getValue()); + linkStatement.execute(); + } + } + if (tableType == VIEW && !changedCqCounters.isEmpty()) { + try ( + PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM)) { + incrementStatement.setString(1, null); + incrementStatement.setString(2, viewPhysicalTable.getSchemaName().getString()); + incrementStatement.setString(3, viewPhysicalTable.getTableName().getString()); + incrementStatement.setLong(4, viewPhysicalTable.getSequenceNumber() + 1); + incrementStatement.execute(); + } + } + if (connection.getMutationState().toMutations(timestamp).hasNext()) { + tableMetaData + .addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); + connection.rollback(); + } + } + + short nextKeySeq = 0; + + List columnMetadata = Lists.newArrayListWithExpectedSize(columns.size()); + boolean isRegularView = (tableType == PTableType.VIEW && viewType != ViewType.MAPPED); + for (Map.Entry entry : columns.entrySet()) { + PColumn column = entry.getValue(); + final int columnPosition = column.getPosition(); + // For client-side cache, we need to update the column + // set the autoPartition column attributes + if ( + parent != null && parent.getAutoPartitionSeqName() != null + && parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent)) + .equals(column) + ) { + entry.setValue(column = new DelegateColumn(column) { + @Override + public byte[] getViewConstant() { + // set to non-null value so that we will generate a Put that + // will be set correctly on the server + return QueryConstants.EMPTY_COLUMN_VALUE_BYTES; } - Long timeStamp = indexRef.getTable().isTransactional() ? indexRef.getTimeStamp() : null; - List tableMetadata = connection.getMutationState().toMutations(timeStamp).next().getSecond(); - connection.rollback(); - - if (changingPhoenixTableProperty) { - seqNum = incrementTableSeqNum(table,statement.getTableType(), 0, metaPropertiesEvaluated); - tableMetadata.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); - connection.rollback(); + @Override + public boolean isViewReferenced() { + return true; + } + }); + } else if (isViewColumnReferenced != null) { + if (viewColumnConstants != null && columnPosition < viewColumnConstants.length) { + entry.setValue(column = new DelegateColumn(column) { + @Override + public byte[] getViewConstant() { + return viewColumnConstants[columnPosition]; + } + + @Override + public boolean isViewReferenced() { + return isViewColumnReferenced.get(columnPosition); + } + }); + } else { + entry.setValue(column = new DelegateColumn(column) { + @Override + public boolean isViewReferenced() { + return isViewColumnReferenced.get(columnPosition); + } + }); + } + + // if the base table column is referenced in the view + // or if we are adding a new column during view creation + if ( + isViewColumnReferenced.get(columnPosition) + || viewNewColumnPositions.contains(columnPosition) + ) { + // acquire the mutex using the global physical table + // name to prevent this column from being dropped + // while the view is being created or to prevent + // a conflicting column from being added to a parent + // in case the view creation adds new columns + boolean acquiredMutex = + writeCell(null, parentPhysicalSchemaName, parentPhysicalTableName, column.toString()); + if (!acquiredMutex) { + throw new ConcurrentTableMutationException(parentPhysicalSchemaName, + parentPhysicalTableName); + } + acquiredColumnMutexSet.add(column.toString()); + } + } + Short keySeq = SchemaUtil.isPKColumn(column) ? ++nextKeySeq : null; + // Prior to PHOENIX-3534 we were sending the parent table column metadata while creating a + // child view, now that we combine columns by resolving the parent table hierarchy we + // don't need to include the parent table columns while creating a view + // If QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK is true we continue + // to store the parent table column metadata along with the child view metadata + // so that we can rollback the upgrade if required. + if ( + allowSystemCatalogRollback || !isRegularView || columnPosition >= baseTableColumnCount + ) { + addColumnMutation(connection, schemaName, tableName, column, parentTableName, pkName, + keySeq, saltBucketNum != null); + columnMetadata + .addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); + connection.rollback(); + } + } + + // add the columns in reverse order since we reverse the list later + Collections.reverse(columnMetadata); + tableMetaData.addAll(columnMetadata); + String dataTableName = + parent == null || tableType == PTableType.VIEW ? null : parent.getTableName().getString(); + PIndexState defaultCreateState; + String defaultCreateStateString = connection.getClientInfo(INDEX_CREATE_DEFAULT_STATE); + if (defaultCreateStateString == null) { + defaultCreateStateString = connection.getQueryServices().getConfiguration() + .get(INDEX_CREATE_DEFAULT_STATE, QueryServicesOptions.DEFAULT_CREATE_INDEX_STATE); + } + defaultCreateState = PIndexState.valueOf(defaultCreateStateString); + if (defaultCreateState == PIndexState.CREATE_DISABLE) { + if (indexType == IndexType.LOCAL || sharedTable) { + defaultCreateState = PIndexState.BUILDING; + } + } + PIndexState indexState = + parent == null || (tableType == PTableType.VIEW || tableType == PTableType.CDC) + ? null + : defaultCreateState; + if (indexState == null && tableProps.containsKey(INDEX_STATE)) { + indexState = PIndexState.fromSerializedValue(tableProps.get(INDEX_STATE).toString()); + } + PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE); + tableUpsert.setString(1, tenantIdStr); + tableUpsert.setString(2, schemaName); + tableUpsert.setString(3, tableName); + tableUpsert.setString(4, tableType.getSerializedValue()); + tableUpsert.setLong(5, PTable.INITIAL_SEQ_NUM); + tableUpsert.setInt(6, position); + if (saltBucketNum != null) { + tableUpsert.setInt(7, saltBucketNum); + } else { + tableUpsert.setNull(7, Types.INTEGER); + } + tableUpsert.setString(8, pkName); + tableUpsert.setString(9, dataTableName); + tableUpsert.setString(10, indexState == null ? null : indexState.getSerializedValue()); + tableUpsert.setBoolean(11, isImmutableRows); + tableUpsert.setString(12, defaultFamilyName); + if (parent != null && parent.getAutoPartitionSeqName() != null && viewStatement == null) { + // set to non-null value so that we will generate a Put that + // will be set correctly on the server + tableUpsert.setString(13, QueryConstants.EMPTY_COLUMN_VALUE); + } else { + tableUpsert.setString(13, viewStatement); + } + tableUpsert.setBoolean(14, disableWAL); + tableUpsert.setBoolean(15, multiTenant); + if (viewType == null) { + tableUpsert.setNull(16, Types.TINYINT); + } else { + tableUpsert.setByte(16, viewType.getSerializedValue()); + } + if (indexType == null) { + tableUpsert.setNull(17, Types.TINYINT); + } else { + tableUpsert.setByte(17, indexType.getSerializedValue()); + } + tableUpsert.setBoolean(18, storeNulls); + if (parent != null && tableType == PTableType.VIEW) { + tableUpsert.setInt(19, parent.getColumns().size()); + } else { + tableUpsert.setInt(19, BASE_TABLE_BASE_COLUMN_COUNT); + } + if (transactionProvider == null) { + tableUpsert.setNull(20, Types.TINYINT); + } else { + tableUpsert.setByte(20, transactionProvider.getCode()); + } + tableUpsert.setLong(21, updateCacheFrequency); + tableUpsert.setBoolean(22, isNamespaceMapped); + if (autoPartitionSeq == null) { + tableUpsert.setNull(23, Types.VARCHAR); + } else { + tableUpsert.setString(23, autoPartitionSeq); + } + tableUpsert.setBoolean(24, isAppendOnlySchema); + if (guidePostsWidth == null) { + tableUpsert.setNull(25, Types.BIGINT); + } else { + tableUpsert.setLong(25, guidePostsWidth); + } + tableUpsert.setByte(26, immutableStorageScheme.getSerializedMetadataValue()); + tableUpsert.setByte(27, encodingScheme.getSerializedMetadataValue()); + if (useStatsForParallelizationProp == null) { + tableUpsert.setNull(28, Types.BOOLEAN); + } else { + tableUpsert.setBoolean(28, useStatsForParallelizationProp); + } + if ( + indexType == IndexType.LOCAL || (parent != null && parent.getType() == PTableType.VIEW + && tableType == PTableType.INDEX) + ) { + tableUpsert.setInt(29, viewIndexIdType.getSqlType()); + } else { + tableUpsert.setNull(29, Types.NULL); + } + + if (isChangeDetectionEnabledProp == null) { + tableUpsert.setNull(30, Types.BOOLEAN); + } else { + tableUpsert.setBoolean(30, isChangeDetectionEnabledProp); + } + + if (physicalTableName == null) { + tableUpsert.setNull(31, Types.VARCHAR); + } else { + tableUpsert.setString(31, physicalTableName); + } + + if (schemaVersion == null) { + tableUpsert.setNull(32, Types.VARCHAR); + } else { + tableUpsert.setString(32, schemaVersion); + } + + if (streamingTopicName == null) { + tableUpsert.setNull(33, Types.VARCHAR); + } else { + tableUpsert.setString(33, streamingTopicName); + } + + if (tableType == INDEX && statement.getWhereClause() != null) { + tableUpsert.setString(34, statement.getWhereClause().toString()); + } else { + tableUpsert.setNull(34, Types.VARCHAR); + } + if (maxLookbackAge == null) { + tableUpsert.setNull(35, Types.BIGINT); + } else { + tableUpsert.setLong(35, maxLookbackAge); + } + + if (cdcIncludeScopesStr == null) { + tableUpsert.setNull(36, Types.VARCHAR); + } else { + tableUpsert.setString(36, cdcIncludeScopesStr); + } + + if (ttl == null || ttl == TTL_NOT_DEFINED) { + tableUpsert.setNull(37, Types.VARCHAR); + } else { + tableUpsert.setString(37, String.valueOf(ttl)); + } + + if ( + (rowKeyMatcher == null) || Bytes.compareTo(rowKeyMatcher, HConstants.EMPTY_BYTE_ARRAY) == 0 + ) { + tableUpsert.setNull(38, Types.VARBINARY); + } else { + tableUpsert.setBytes(38, rowKeyMatcher); + } + + tableUpsert.execute(); + + if (asyncCreatedDate != null) { + try (PreparedStatement setAsync = connection.prepareStatement(SET_ASYNC_CREATED_DATE)) { + setAsync.setString(1, tenantIdStr); + setAsync.setString(2, schemaName); + setAsync.setString(3, tableName); + setAsync.setDate(4, asyncCreatedDate); + setAsync.execute(); + } + } else { + Date syncCreatedDate = new Date(EnvironmentEdgeManager.currentTimeMillis()); + try (PreparedStatement setSync = connection.prepareStatement(SET_INDEX_SYNC_CREATED_DATE)) { + setSync.setString(1, tenantIdStr); + setSync.setString(2, schemaName); + setSync.setString(3, tableName); + setSync.setDate(4, syncCreatedDate); + setSync.execute(); + } + } + tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond()); + connection.rollback(); + + /* + * The table metadata must be in the following order: 1) table header row 2) ordered column + * rows 3) parent table header row + */ + Collections.reverse(tableMetaData); + + if (indexType != IndexType.LOCAL) { + splits = SchemaUtil.processSplits(splits, pkColumns, saltBucketNum, + connection.getQueryServices().getProps().getBoolean( + QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, + QueryServicesOptions.DEFAULT_FORCE_ROW_KEY_ORDER)); + } + + // Modularized this code for unit testing + PName parentName = + physicalNames != null && physicalNames.size() > 0 ? physicalNames.get(0) : null; + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("createTable tableName=" + tableName + " parent=" + + (parent == null ? "" : parent.getTableName() + "-" + parent.getPhysicalName()) + + " parent physical=" + parentName + "-" + + (physicalNames.size() > 0 ? physicalNames.get(0) : "null") + " viewType " + viewType + + allocateIndexId); + } + MetaDataMutationResult result = connection.getQueryServices().createTable(tableMetaData, + viewType == ViewType.MAPPED || allocateIndexId ? physicalNames.get(0).getBytes() : null, + tableType, tableProps, familyPropList, splits, isNamespaceMapped, allocateIndexId, + UpgradeUtil.isNoUpgradeSet(connection.getClientInfo()), parent); + MutationCode code = result.getMutationCode(); + try { + if (code != MutationCode.TABLE_NOT_FOUND) { + boolean tableAlreadyExists = + handleCreateTableMutationCode(result, code, statement, schemaName, tableName, parent); + if (tableAlreadyExists) { + return null; + } + } + // If the parent table of the view has the auto partition sequence name attribute, + // set the view statement and relevant partition column attributes correctly + if (parent != null && parent.getAutoPartitionSeqName() != null) { + final PColumn autoPartitionCol = + parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent)); + final Long autoPartitionNum = Long.valueOf(result.getAutoPartitionNum()); + columns.put(autoPartitionCol, new DelegateColumn(autoPartitionCol) { + @Override + public byte[] getViewConstant() { + PDataType dataType = autoPartitionCol.getDataType(); + Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE); + byte[] bytes = new byte[dataType.getByteSize() + 1]; + dataType.toBytes(val, bytes, 0); + return bytes; } - MetaDataMutationResult result = connection.getQueryServices().updateIndexState(tableMetadata, dataTableName, properties, table); - - try { - MutationCode code = result.getMutationCode(); - if (code == MutationCode.TABLE_NOT_FOUND) { - throw new TableNotFoundException(schemaName, indexName); - } - if (code == MutationCode.UNALLOWED_TABLE_MUTATION) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION) - .setMessage(" currentState=" + indexRef.getTable().getIndexState() + ". requestedState=" + newIndexState) - .setSchemaName(schemaName).setTableName(indexName).build().buildException(); - } - - if (isTransformNeeded) { - if (indexRef.getTable().getViewIndexId() != null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_TRANSFORM_LOCAL_OR_VIEW_INDEX) - .setSchemaName(schemaName).setTableName(indexName).build().buildException(); - } - try { - TransformClient.addTransform(connection, tenantId, table, metaProperties, seqNum, PTable.TransformType.METADATA_TRANSFORM); - } catch (SQLException ex) { - connection.rollback(); - throw ex; - } - } - - if (code == MutationCode.TABLE_ALREADY_EXISTS) { - if (result.getTable() != null) { // To accommodate connection-less update of index state - addTableToCache(result, false); - // Set so that we get the table below with the potentially modified rowKeyOrderOptimizable flag set - indexRef.setTable(result.getTable()); - if (newIndexState == PIndexState.BUILDING && isAsync) { - if (isRebuildAll) { - List tasks = Task.queryTaskTable(connection, null, schemaName, tableName, PTable.TaskType.INDEX_REBUILD, - tenantId, indexName); - if (tasks == null || tasks.size() == 0) { - Timestamp ts = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); - Map props = new HashMap() {{ - put(INDEX_NAME, indexName); - put(REBUILD_ALL, true); - }}; - try { - String json = JacksonUtil.getObjectWriter().writeValueAsString(props); - List sysTaskUpsertMutations = Task.getMutationsForAddTask(new SystemTaskParams.SystemTaskParamsBuilder() - .setConn(connection) - .setTaskType( - PTable.TaskType.INDEX_REBUILD) - .setTenantId(tenantId) - .setSchemaName(schemaName) - .setTableName(dataTableName) - .setTaskStatus( - PTable.TaskStatus.CREATED.toString()) - .setData(json) - .setPriority(null) - .setStartTs(ts) - .setEndTs(null) - .setAccessCheckEnabled(true) - .build()); - byte[] rowKey = sysTaskUpsertMutations - .get(0).getRow(); - MetaDataMutationResult metaDataMutationResult = - Task.taskMetaDataCoprocessorExec(connection, rowKey, - new TaskMetaDataServiceCallBack(sysTaskUpsertMutations)); - if (MutationCode.UNABLE_TO_UPSERT_TASK.equals( - metaDataMutationResult.getMutationCode())) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_UPSERT_TASK) - .setSchemaName(SYSTEM_SCHEMA_NAME) - .setTableName(SYSTEM_TASK_TABLE).build().buildException(); - } - } catch (IOException e) { - throw new SQLException("Exception happened while adding a System.Task" + e.toString()); - } - } - } else { - try { - tableUpsert = connection.prepareStatement(UPDATE_INDEX_REBUILD_ASYNC_STATE); - tableUpsert.setString(1, connection.getTenantId() == null ? - null : - connection.getTenantId().getString()); - tableUpsert.setString(2, schemaName); - tableUpsert.setString(3, indexName); - long beginTimestamp = result.getTable().getTimeStamp(); - tableUpsert.setLong(4, beginTimestamp); - tableUpsert.execute(); - connection.commit(); - } finally { - if (tableUpsert != null) { - tableUpsert.close(); - } - } - } - } - } - } - if (newIndexState == PIndexState.BUILDING && !isAsync) { - PTable index = indexRef.getTable(); - // First delete any existing rows of the index - if (IndexUtil.isGlobalIndex(index) && index.getViewIndexId() == null) { - //for a global index of a normal base table, it's safe to just truncate and - //rebuild. We preserve splits to reduce the amount of splitting we need to do - //during rebuild - org.apache.hadoop.hbase.TableName physicalTableName = - org.apache.hadoop.hbase.TableName.valueOf(index.getPhysicalName().getBytes()); - try (Admin admin = connection.getQueryServices().getAdmin()) { - admin.disableTable(physicalTableName); - admin.truncateTable(physicalTableName, true); - //trunateTable automatically re-enables when it's done - } catch (IOException ie) { - String failedTable = physicalTableName.getNameAsString(); - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNKNOWN_ERROR_CODE). - setMessage("Error when truncating index table [" + failedTable + - "] before rebuilding: " + ie.getMessage()). - setTableName(failedTable).build().buildException(); - } - } else { - Long scn = connection.getSCN(); - long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - MutationPlan plan = new PostDDLCompiler(connection) - .compile(Collections.singletonList(indexRef), null, - null, Collections.emptyList(), ts); - connection.getQueryServices().updateData(plan); - } - NamedTableNode dataTableNode = NamedTableNode.create(null, - TableName.create(schemaName, dataTableName), Collections.emptyList()); - // Next rebuild the index - connection.setAutoCommit(true); - if (connection.getSCN() != null) { - return buildIndexAtTimeStamp(index, dataTableNode); - } - TableRef dataTableRef = FromCompiler.getResolver(dataTableNode, connection).getTables().get(0); - return buildIndex(index, dataTableRef); - } - - return new MutationState(1, 1000, connection); - } catch (Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(dataTableName, NUM_METADATA_LOOKUP_FAILURES, 1); - throw e; - } - } catch (TableNotFoundException e) { - if (!statement.ifExists()) { - throw e; - } - return new MutationState(0, 0, connection); - } finally { - connection.setAutoCommit(wasAutoCommit); - } + @Override + public boolean isViewReferenced() { + return true; + } + }); + String viewPartitionClause = QueryUtil.getViewPartitionClause( + MetaDataUtil.getAutoPartitionColumnName(parent), autoPartitionNum); + if (viewStatement != null) { + viewStatement = viewStatement + " AND " + viewPartitionClause; + } else { + viewStatement = QueryUtil.getViewStatement(parent.getSchemaName().getString(), + parent.getTableName().getString(), viewPartitionClause); + } + } + PName newSchemaName = PNameFactory.newName(schemaName); + /* + * It doesn't hurt for the PTable of views to have the cqCounter. However, views always rely + * on the parent table's counter to dole out encoded column qualifiers. So setting the + * counter as NULL_COUNTER for extra safety. + */ + EncodedCQCounter cqCounterToBe = tableType == PTableType.VIEW ? NULL_COUNTER : cqCounter; + PTable table = new PTableImpl.Builder().setType(tableType).setState(indexState) + .setTimeStamp(timestamp != null ? timestamp : result.getMutationTime()) + .setIndexDisableTimestamp(0L).setSequenceNumber(PTable.INITIAL_SEQ_NUM) + .setImmutableRows(isImmutableRows).setViewStatement(viewStatement) + .setDisableWAL(Boolean.TRUE.equals(disableWAL)).setMultiTenant(multiTenant) + .setStoreNulls(storeNulls).setViewType(viewType).setViewIndexIdType(viewIndexIdType) + .setViewIndexId(result.getViewIndexId()).setIndexType(indexType) + .setTransactionProvider(transactionProvider).setUpdateCacheFrequency(updateCacheFrequency) + .setNamespaceMapped(isNamespaceMapped).setAutoPartitionSeqName(autoPartitionSeq) + .setAppendOnlySchema(isAppendOnlySchema).setImmutableStorageScheme(immutableStorageScheme) + .setQualifierEncodingScheme(encodingScheme).setBaseColumnCount(baseTableColumnCount) + .setEncodedCQCounter(cqCounterToBe) + .setUseStatsForParallelization(useStatsForParallelizationProp) + .setExcludedColumns(ImmutableList. of()).setTenantId(tenantId) + .setSchemaName(newSchemaName).setTableName(PNameFactory.newName(tableName)) + .setPkName(pkName == null ? null : PNameFactory.newName(pkName)) + .setDefaultFamilyName( + defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName)) + .setRowKeyOrderOptimizable(rowKeyOrderOptimizable).setBucketNum(saltBucketNum) + .setIndexes(Collections. emptyList()) + .setParentSchemaName((parent == null) ? null : parent.getSchemaName()) + .setParentTableName((parent == null) ? null : parent.getTableName()) + .setPhysicalNames(ImmutableList.copyOf(physicalNames)).setColumns(columns.values()) + .setViewModifiedUpdateCacheFrequency(tableType == PTableType.VIEW && parent != null + && parent.getUpdateCacheFrequency() != updateCacheFrequency) + .setViewModifiedUseStatsForParallelization(tableType == PTableType.VIEW && parent != null + && parent.useStatsForParallelization() != useStatsForParallelizationProp) + .setLastDDLTimestamp( + result.getTable() != null ? result.getTable().getLastDDLTimestamp() : null) + .setIsChangeDetectionEnabled(isChangeDetectionEnabledProp).setSchemaVersion(schemaVersion) + .setExternalSchemaId( + result.getTable() != null ? result.getTable().getExternalSchemaId() : null) + .setStreamingTopicName(streamingTopicName) + .setIndexWhere( + statement.getWhereClause() == null ? null : statement.getWhereClause().toString()) + .setMaxLookbackAge(maxLookbackAge).setCDCIncludeScopes(cdcIncludeScopes) + .setTTL(ttl == null || ttl == TTL_NOT_DEFINED ? ttlFromHierarchy : ttl) + .setRowKeyMatcher(rowKeyMatcher).build(); + result = new MetaDataMutationResult(code, result.getMutationTime(), table, true); + addTableToCache(result, false); + return table; + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableNameNode.toString(), + NUM_METADATA_LOOKUP_FAILURES, 1); + throw e; + } + } finally { + connection.setAutoCommit(wasAutoCommit); + deleteMutexCells(parentPhysicalSchemaName, parentPhysicalTableName, acquiredColumnMutexSet); } - - private void addTableToCache(MetaDataMutationResult result, boolean alwaysHitServerForAncestors) - throws SQLException { - addTableToCache(result, alwaysHitServerForAncestors, - TransactionUtil.getResolvedTime(connection, result)); + } + + private boolean isDuplicateColumn(LinkedHashMap columns, + Set pkColumnNames, PColumn column) { + // either column name is same within same CF or column name within + // default CF is same as any of PK column + return columns.put(column, column) != null || (column.getFamilyName() != null + && DEFAULT_COLUMN_FAMILY.equals(column.getFamilyName().toString()) + && pkColumnNames.contains(column.getName().toString())); + } + + private void verifyChangeDetectionTableType(PTableType tableType, + Boolean isChangeDetectionEnabledProp) throws SQLException { + if (isChangeDetectionEnabledProp != null && isChangeDetectionEnabledProp) { + if (tableType != TABLE && tableType != VIEW) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CHANGE_DETECTION_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY).build() + .buildException(); + } } - - private void addTableToCache(MetaDataMutationResult result, boolean alwaysHitServerForAncestors, - long timestamp) throws SQLException { - addColumnsIndexesAndLastDDLTimestampsFromAncestors(result, null, - false, alwaysHitServerForAncestors); - updateIndexesWithAncestorMap(result); - connection.addTable(result.getTable(), timestamp); + } + + private QualifierEncodingScheme getEncodingScheme(Map tableProps, + String schemaName, String tableName, TransactionFactory.Provider transactionProvider) + throws SQLException { + QualifierEncodingScheme encodingScheme = null; + Byte encodingSchemeSerializedByte = + (Byte) TableProperty.COLUMN_ENCODED_BYTES.getValue(tableProps); + if (encodingSchemeSerializedByte == null) { + if (tableProps.containsKey(ENCODING_SCHEME)) { + encodingSchemeSerializedByte = QualifierEncodingScheme + .valueOf(((String) tableProps.get(ENCODING_SCHEME))).getSerializedMetadataValue(); + } + } + if (encodingSchemeSerializedByte == null) { + // Ignore default if transactional and column encoding is not supported (as with OMID) + if ( + transactionProvider == null || !transactionProvider.getTransactionProvider() + .isUnsupported(PhoenixTransactionProvider.Feature.COLUMN_ENCODING) + ) { + encodingSchemeSerializedByte = (byte) connection.getQueryServices().getProps().getInt( + QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, + QueryServicesOptions.DEFAULT_COLUMN_ENCODED_BYTES); + encodingScheme = QualifierEncodingScheme.fromSerializedValue(encodingSchemeSerializedByte); + } else { + encodingScheme = NON_ENCODED_QUALIFIERS; + } + } else { + encodingScheme = QualifierEncodingScheme.fromSerializedValue(encodingSchemeSerializedByte); + if ( + encodingScheme != NON_ENCODED_QUALIFIERS && transactionProvider != null + && transactionProvider.getTransactionProvider() + .isUnsupported(PhoenixTransactionProvider.Feature.COLUMN_ENCODING) + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.UNSUPPORTED_COLUMN_ENCODING_FOR_TXN_PROVIDER).setSchemaName(schemaName) + .setTableName(tableName).setMessage(transactionProvider.name()).build() + .buildException(); + } } - private void addFunctionToCache(MetaDataMutationResult result) throws SQLException { - for (PFunction function: result.getFunctions()) { - connection.addFunction(function); + return encodingScheme; + } + + private void checkImmutableStorageSchemeForIndex( + ImmutableStorageScheme immutableStorageSchemeProp, String schemaName, String tableName, + TransactionFactory.Provider transactionProvider) throws SQLException { + if ( + immutableStorageSchemeProp != ONE_CELL_PER_COLUMN && transactionProvider != null + && transactionProvider.getTransactionProvider() + .isUnsupported(PhoenixTransactionProvider.Feature.COLUMN_ENCODING) + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.UNSUPPORTED_STORAGE_FORMAT_FOR_TXN_PROVIDER).setSchemaName(schemaName) + .setTableName(tableName).setMessage(transactionProvider.name()).build().buildException(); + } + } + + /* + * This method handles mutation codes sent by phoenix server, except for TABLE_NOT_FOUND which is + * considered to be a success code. If TABLE_ALREADY_EXISTS in hbase, we don't need to add it in + * ConnectionQueryServices and we return result as true. However if code is NEWER_TABLE_FOUND and + * it does not exists in statement then we return false because we need to add it + * ConnectionQueryServices. For other mutation codes it throws related SQLException. If server is + * throwing new mutation code which is not being handled by client then it throws SQLException + * stating the server side Mutation code. + */ + @VisibleForTesting + public boolean handleCreateTableMutationCode(MetaDataMutationResult result, MutationCode code, + CreateTableStatement statement, String schemaName, String tableName, PTable parent) + throws SQLException { + switch (code) { + case TABLE_ALREADY_EXISTS: + if (result.getTable() != null) { + addTableToCache(result, false); + } + if (!statement.ifNotExists()) { + throw new TableAlreadyExistsException(schemaName, tableName, result.getTable()); + } + return true; + case NEWER_TABLE_FOUND: + // Add table to ConnectionQueryServices so it's cached, but don't add + // it to this connection as we can't see it. + if (!statement.ifNotExists()) { + throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable()); } + return false; + case UNALLOWED_TABLE_MUTATION: + throwsSQLExceptionUtil("CANNOT_MUTATE_TABLE", schemaName, tableName); + case CONCURRENT_TABLE_MUTATION: + addTableToCache(result, false); + throw new ConcurrentTableMutationException(schemaName, tableName); + case AUTO_PARTITION_SEQUENCE_NOT_FOUND: + throw new SQLExceptionInfo.Builder(SQLExceptionCode.AUTO_PARTITION_SEQUENCE_UNDEFINED) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + case CANNOT_COERCE_AUTO_PARTITION_ID: + case UNABLE_TO_CREATE_CHILD_LINK: + case PARENT_TABLE_NOT_FOUND: + case TABLE_NOT_IN_REGION: + throwsSQLExceptionUtil(String.valueOf(code), schemaName, tableName); + case TOO_MANY_INDEXES: + case UNABLE_TO_UPDATE_PARENT_TABLE: + throwsSQLExceptionUtil(String.valueOf(code), + SchemaUtil.getSchemaNameFromFullName(parent.getPhysicalName().getString()), + SchemaUtil.getTableNameFromFullName(parent.getPhysicalName().getString())); + case ERROR_WRITING_TO_SCHEMA_REGISTRY: + throw new SQLExceptionInfo.Builder(ERROR_WRITING_TO_SCHEMA_REGISTRY) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + default: + // Cannot use SQLExecptionInfo here since not all mutation codes have their + // corresponding codes in the enum SQLExceptionCode + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNEXPECTED_MUTATION_CODE) + .setSchemaName(schemaName).setTableName(tableName).setMessage("mutation code: " + code) + .build().buildException(); } - - private void addSchemaToCache(MetaDataMutationResult result) throws SQLException { - connection.addSchema(result.getSchema()); + } + + private void throwsSQLExceptionUtil(String code, String schemaName, String tableName) + throws SQLException { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.valueOf(code)).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + + private static boolean isPkColumn(PrimaryKeyConstraint pkConstraint, ColumnDef colDef) { + return colDef.isPK() + || (pkConstraint != null && pkConstraint.contains(colDef.getColumnDefName())); + } + + /** + * A table can be a parent table to tenant-specific tables if all of the following conditions are + * true: + *

+ * FOR TENANT-SPECIFIC TABLES WITH TENANT_TYPE_ID SPECIFIED: + *

    + *
  1. It has 3 or more PK columns AND + *
  2. First PK (tenant id) column is not nullible AND + *
  3. Firsts PK column's data type is either VARCHAR or CHAR AND + *
  4. Second PK (tenant type id) column is not nullible AND + *
  5. Second PK column data type is either VARCHAR or CHAR + *
+ * FOR TENANT-SPECIFIC TABLES WITH NO TENANT_TYPE_ID SPECIFIED: + *
    + *
  1. It has 2 or more PK columns AND + *
  2. First PK (tenant id) column is not nullible AND + *
  3. Firsts PK column's data type is either VARCHAR or CHAR + *
+ */ + private static void throwIfInsufficientColumns(String schemaName, String tableName, + Collection columns, boolean isSalted, boolean isMultiTenant) throws SQLException { + if (!isMultiTenant) { + return; } - - private void throwIfLastPKOfParentIsVariableLength(PTable parent, String viewSchemaName, String viewName, ColumnDef col) throws SQLException { - // if the last pk column is variable length then we read all the - // bytes of the rowkey without looking for a separator byte see - // https://issues.apache.org/jira/browse/PHOENIX-978?focusedCommentId=14617847&page=com.atlassian.jira.plugin.system.issuetabpanels%3Acomment-tabpanel#comment-14617847 - // so we cannot add a pk column to a view if the last pk column of the parent is variable length - if (isLastPKVariableLength(parent)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MODIFY_VIEW_PK) - .setSchemaName(viewSchemaName) - .setTableName(viewName) - .setColumnName(col.getColumnDefName().getColumnName()) - .build().buildException(); } + int nPKColumns = columns.size() - (isSalted ? 1 : 0); + if (nPKColumns < 2) { + throw new SQLExceptionInfo.Builder(INSUFFICIENT_MULTI_TENANT_COLUMNS) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } - - private boolean isLastPKVariableLength(PTable table) { - List pkColumns = table.getPKColumns(); - return !pkColumns.get(pkColumns.size()-1).getDataType().isFixedWidth(); + Iterator iterator = columns.iterator(); + if (isSalted) { + iterator.next(); } - - private PTable getParentOfView(PTable view) throws SQLException { - return connection - .getTable(new PTableKey(view.getTenantId(), view.getParentName().getString())); + // Tenant ID must be VARCHAR or CHAR and be NOT NULL + // NOT NULL is a requirement, since otherwise the table key would conflict + // potentially with the global table definition. + PColumn tenantIdCol = iterator.next(); + if (tenantIdCol.isNullable()) { + throw new SQLExceptionInfo.Builder(INSUFFICIENT_MULTI_TENANT_COLUMNS) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } - - public MutationState createSchema(CreateSchemaStatement create) throws SQLException { - boolean wasAutoCommit = connection.getAutoCommit(); - connection.rollback(); - try { - if (!SchemaUtil.isNamespaceMappingEnabled(null, - connection.getQueryServices() - .getProps())) { throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CREATE_SCHEMA_NOT_ALLOWED).setSchemaName(create.getSchemaName()) - .build().buildException(); } - boolean isIfNotExists = create.isIfNotExists(); - PSchema schema = new PSchema(create.getSchemaName()); - // Use SchemaName from PSchema object to get the normalized SchemaName - // See PHOENIX-4424 for details - validateSchema(schema.getSchemaName()); - connection.setAutoCommit(false); - List schemaMutations; - - try (PreparedStatement schemaUpsert = connection.prepareStatement(CREATE_SCHEMA)) { - schemaUpsert.setString(1, schema.getSchemaName()); - schemaUpsert.setString(2, MetaDataClient.EMPTY_TABLE); - schemaUpsert.execute(); - schemaMutations = connection.getMutationState().toMutations(null).next().getSecond(); - connection.rollback(); - } - MetaDataMutationResult result = connection.getQueryServices().createSchema(schemaMutations, - schema.getSchemaName()); - MutationCode code = result.getMutationCode(); - try { - switch (code) { - case SCHEMA_ALREADY_EXISTS: - if (result.getSchema() != null) { - addSchemaToCache(result); - } - if (!isIfNotExists) { - throw new SchemaAlreadyExistsException(schema.getSchemaName()); - } - break; - case NEWER_SCHEMA_FOUND: - throw new NewerSchemaAlreadyExistsException(schema.getSchemaName()); - default: - result = new MetaDataMutationResult(code, schema, result.getMutationTime()); - addSchemaToCache(result); - } - } catch(Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, NUM_METADATA_LOOKUP_FAILURES, 1); - throw e; - } - } finally { - connection.setAutoCommit(wasAutoCommit); - } - return new MutationState(0, 0, connection); + } + + public MutationState dropTable(DropTableStatement statement) throws SQLException { + String schemaName = + connection.getSchema() != null && statement.getTableName().getSchemaName() == null + ? connection.getSchema() + : statement.getTableName().getSchemaName(); + String tableName = statement.getTableName().getTableName(); + return dropTable(schemaName, tableName, null, statement.getTableType(), statement.ifExists(), + statement.cascade(), statement.getSkipAddingParentColumns()); + } + + public MutationState dropFunction(DropFunctionStatement statement) throws SQLException { + return dropFunction(statement.getFunctionName(), statement.ifExists()); + } + + public MutationState dropIndex(DropIndexStatement statement) throws SQLException { + String schemaName = statement.getTableName().getSchemaName(); + String tableName = statement.getIndexName().getName(); + String parentTableName = statement.getTableName().getTableName(); + return dropTable(schemaName, tableName, parentTableName, PTableType.INDEX, statement.ifExists(), + false, false); + } + + public MutationState dropCDC(DropCDCStatement statement) throws SQLException { + String schemaName = statement.getTableName().getSchemaName(); + String cdcTableName = statement.getCdcObjName().getName(); + String parentTableName = statement.getTableName().getTableName(); + // Dropping the virtual CDC Table + dropTable(schemaName, cdcTableName, parentTableName, PTableType.CDC, statement.ifExists(), + false, false); + + String indexName = CDCUtil.getCDCIndexName(statement.getCdcObjName().getName()); + // Dropping the uncovered index associated with the CDC Table + try { + return dropTable(schemaName, indexName, parentTableName, PTableType.INDEX, + statement.ifExists(), false, false); + } catch (SQLException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.fromErrorCode(e.getErrorCode())) + .setTableName(statement.getCdcObjName().getName()).setRootCause(e.getCause()).build() + .buildException(); } - - private void validateSchema(String schemaName) throws SQLException { - if (SchemaUtil.NOT_ALLOWED_SCHEMA_LIST.contains(schemaName)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.SCHEMA_NOT_ALLOWED) - .setSchemaName(schemaName).build().buildException(); } + } + + private MutationState dropFunction(String functionName, boolean ifExists) throws SQLException { + connection.rollback(); + boolean wasAutoCommit = connection.getAutoCommit(); + try { + PName tenantId = connection.getTenantId(); + byte[] key = SchemaUtil.getFunctionKey( + tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(), + Bytes.toBytes(functionName)); + Long scn = connection.getSCN(); + long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + try { + PFunction function = + connection.getMetaDataCache().getFunction(new PTableKey(tenantId, functionName)); + if (function.isTemporaryFunction()) { + connection.removeFunction(tenantId, functionName, clientTimeStamp); + return new MutationState(0, 0, connection); + } + } catch (FunctionNotFoundException e) { + + } + List functionMetaData = Lists.newArrayListWithExpectedSize(2); + Delete functionDelete = new Delete(key, clientTimeStamp); + functionMetaData.add(functionDelete); + MetaDataMutationResult result = + connection.getQueryServices().dropFunction(functionMetaData, ifExists); + MutationCode code = result.getMutationCode(); + switch (code) { + case FUNCTION_NOT_FOUND: + if (!ifExists) { + throw new FunctionNotFoundException(functionName); + } + break; + default: + connection.removeFunction(tenantId, functionName, result.getMutationTime()); + break; + } + return new MutationState(0, 0, connection); + } finally { + connection.setAutoCommit(wasAutoCommit); } - - public MutationState dropSchema(DropSchemaStatement executableDropSchemaStatement) throws SQLException { - connection.rollback(); - boolean wasAutoCommit = connection.getAutoCommit(); - try { - PSchema schema = new PSchema(executableDropSchemaStatement.getSchemaName()); - String schemaName = schema.getSchemaName(); - boolean ifExists = executableDropSchemaStatement.ifExists(); - byte[] key = SchemaUtil.getSchemaKey(schemaName); - - Long scn = connection.getSCN(); - long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - List schemaMetaData = Lists.newArrayListWithExpectedSize(2); - Delete schemaDelete = new Delete(key, clientTimeStamp); - schemaMetaData.add(schemaDelete); - MetaDataMutationResult result = connection.getQueryServices().dropSchema(schemaMetaData, schemaName); - MutationCode code = result.getMutationCode(); - schema = result.getSchema(); - try { - switch (code) { - case SCHEMA_NOT_FOUND: - if (!ifExists) { - throw new SchemaNotFoundException(schemaName); - } - break; - case NEWER_SCHEMA_FOUND: - throw new NewerSchemaAlreadyExistsException(schemaName); - case TABLES_EXIST_ON_SCHEMA: - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_SCHEMA).setSchemaName(schemaName) - .build().buildException(); - default: - connection.removeSchema(schema, result.getMutationTime()); - break; - } - } catch (Throwable e) { - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, NUM_METADATA_LOOKUP_FAILURES, 1); - throw e; - } - return new MutationState(0, 0, connection); - } finally { - connection.setAutoCommit(wasAutoCommit); - } + } + + MutationState dropTable(String schemaName, String tableName, String parentTableName, + PTableType tableType, boolean ifExists, boolean cascade, boolean skipAddingParentColumns) + throws SQLException { + // Checking the parent table whether exists + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + try { + PTable ptable = connection.getTable(fullTableName); + if ( + parentTableName != null && !parentTableName.equals(ptable.getParentTableName().getString()) + ) { + throw new SQLExceptionInfo.Builder(PARENT_TABLE_NOT_FOUND).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + } catch (TableNotFoundException e) { + if (!ifExists && !e.isThrownToForceReReadForTransformingTable()) { + if (tableType == PTableType.INDEX) + throw new IndexNotFoundException(e.getSchemaName(), e.getTableName(), e.getTimeStamp()); + throw e; + } } - public MutationState useSchema(UseSchemaStatement useSchemaStatement) throws SQLException { - // As we allow default namespace mapped to empty schema, so this is to reset schema in connection - if (useSchemaStatement.getSchemaName().equals(StringUtil.EMPTY_STRING)) { - connection.setSchema(null); - } else { - FromCompiler.getResolverForSchema(useSchemaStatement, connection) - .resolveSchema(useSchemaStatement.getSchemaName()); - connection.setSchema(useSchemaStatement.getSchemaName()); - } - return new MutationState(0, 0, connection); + connection.rollback(); + boolean wasAutoCommit = connection.getAutoCommit(); + PName tenantId = connection.getTenantId(); + String tenantIdStr = tenantId == null ? null : tenantId.getString(); + try { + byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName); + Long scn = connection.getSCN(); + long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + List tableMetaData = Lists.newArrayListWithExpectedSize(2); + Delete tableDelete = new Delete(key, clientTimeStamp); + tableMetaData.add(tableDelete); + boolean hasViewIndexTable = false; + if (parentTableName != null) { + byte[] linkKey = + MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName); + Delete linkDelete = new Delete(linkKey, clientTimeStamp); + tableMetaData.add(linkDelete); + } + MetaDataMutationResult result = + connection.getQueryServices().dropTable(tableMetaData, tableType, cascade); + MutationCode code = result.getMutationCode(); + PTable table = result.getTable(); + switch (code) { + case TABLE_NOT_FOUND: + if (!ifExists) { + throw new TableNotFoundException(schemaName, tableName); + } + break; + case NEWER_TABLE_FOUND: + throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable()); + case UNALLOWED_TABLE_MUTATION: + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + case UNABLE_TO_DELETE_CHILD_LINK: + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_DELETE_CHILD_LINK) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + default: + connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), + parentTableName, result.getMutationTime()); + + if (table != null) { + boolean dropMetaData = connection.getQueryServices().getProps() + .getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); + long ts = (scn == null ? result.getMutationTime() : scn); + List tableRefs = + Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size()); + connection.setAutoCommit(true); + if (tableType == PTableType.VIEW) { + for (PTable index : table.getIndexes()) { + tableRefs.add(new TableRef(null, index, ts, false)); + } + } else { + dropMetaData = + result.getTable().getViewIndexId() == null && connection.getQueryServices() + .getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); + // Create empty table and schema - they're only used to get the name from + // PName name, PTableType type, long timeStamp, long sequenceNumber, List + // columns + // All multi-tenant tables have a view index table, so no need to check in that case + if (parentTableName == null) { + hasViewIndexTable = true;// keeping always true for deletion of stats if view index + // present + // or not + MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), + table.isNamespaceMapped()); + byte[] viewIndexPhysicalName = + MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes()); + if (!dropMetaData) { + // we need to drop rows only when actually view index exists + try (Admin admin = connection.getQueryServices().getAdmin()) { + hasViewIndexTable = admin.tableExists( + org.apache.hadoop.hbase.TableName.valueOf(viewIndexPhysicalName)); + } catch (IOException e1) { + // absorbing as it is not critical check + } + } + } + if (tableType == PTableType.TABLE && (table.isMultiTenant() || hasViewIndexTable)) { + if (hasViewIndexTable) { + byte[] viewIndexPhysicalName = + MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes()); + String viewIndexSchemaName = + SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName); + String viewIndexTableName = + SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName); + PName viewIndexName = PNameFactory + .newName(SchemaUtil.getTableName(viewIndexSchemaName, viewIndexTableName)); + + PTable viewIndexTable = new PTableImpl.Builder().setName(viewIndexName) + .setKey(new PTableKey(tenantId, viewIndexName.getString())) + .setSchemaName(PNameFactory.newName(viewIndexSchemaName)) + .setTableName(PNameFactory.newName(viewIndexTableName)).setType(PTableType.VIEW) + .setViewType(ViewType.MAPPED).setTimeStamp(ts) + .setPkColumns(Collections. emptyList()) + .setAllColumns(Collections. emptyList()) + .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) + .setIndexes(Collections. emptyList()) + .setFamilyAttributes(table.getColumnFamilies()) + .setPhysicalNames(Collections. emptyList()) + .setNamespaceMapped(table.isNamespaceMapped()) + .setImmutableStorageScheme(table.getImmutableStorageScheme()) + .setQualifierEncodingScheme(table.getEncodingScheme()) + .setUseStatsForParallelization(table.useStatsForParallelization()).build(); + tableRefs.add(new TableRef(null, viewIndexTable, ts, false)); + } + } + tableRefs.add(new TableRef(null, table, ts, false)); + // TODO: Let the standard mutable secondary index maintenance handle this? + for (PTable index : table.getIndexes()) { + tableRefs.add(new TableRef(null, index, ts, false)); + } + } + if (!dropMetaData) { + MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, + Collections. emptyList(), ts); + // Delete everything in the column. You'll still be able to do queries at earlier + // timestamps + return connection.getQueryServices().updateData(plan); + } + } + break; + } + return new MutationState(0, 0, connection); + } finally { + connection.setAutoCommit(wasAutoCommit); } - - private MetaProperties loadStmtProperties(ListMultimap> stmtProperties, Map>> properties, PTable table, boolean removeTableProps) - throws SQLException { - MetaProperties metaProperties = new MetaProperties(); - for (String family : stmtProperties.keySet()) { - List> origPropsList = stmtProperties.get(family); - List> propsList = Lists.newArrayListWithExpectedSize(origPropsList.size()); - for (Pair prop : origPropsList) { - String propName = prop.getFirst(); - if (TableProperty.isPhoenixTableProperty(propName)) { - TableProperty tableProp = TableProperty.valueOf(propName); - tableProp.validate(true, !family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY), table.getType()); - Object value = tableProp.getValue(prop.getSecond()); - if (propName.equals(PTable.IS_IMMUTABLE_ROWS_PROP_NAME)) { - metaProperties.setImmutableRowsProp((Boolean)value); - } else if (propName.equals(PhoenixDatabaseMetaData.MULTI_TENANT)) { - metaProperties.setMultiTenantProp((Boolean)value); - } else if (propName.equals(DISABLE_WAL)) { - metaProperties.setDisableWALProp((Boolean)value); - } else if (propName.equals(STORE_NULLS)) { - metaProperties.setStoreNullsProp((Boolean)value); - } else if (propName.equals(TRANSACTIONAL)) { - metaProperties.setIsTransactionalProp((Boolean)value); - } else if (propName.equals(TRANSACTION_PROVIDER)) { - metaProperties.setTransactionProviderProp((TransactionFactory.Provider) value); - } else if (propName.equals(UPDATE_CACHE_FREQUENCY)) { - metaProperties.setUpdateCacheFrequencyProp((Long)value); - } else if (propName.equals(PHYSICAL_TABLE_NAME)) { - metaProperties.setPhysicalTableNameProp((String) value); - } else if (propName.equals(GUIDE_POSTS_WIDTH)) { - metaProperties.setGuidePostWidth((Long)value); - } else if (propName.equals(APPEND_ONLY_SCHEMA)) { - metaProperties.setAppendOnlySchemaProp((Boolean) value); - } else if (propName.equalsIgnoreCase(IMMUTABLE_STORAGE_SCHEME)) { - metaProperties.setImmutableStorageSchemeProp((ImmutableStorageScheme)value); - } else if (propName.equalsIgnoreCase(COLUMN_ENCODED_BYTES)) { - metaProperties.setColumnEncodedBytesProp(QualifierEncodingScheme.fromSerializedValue((byte)value)); - } else if (propName.equalsIgnoreCase(USE_STATS_FOR_PARALLELIZATION)) { - metaProperties.setUseStatsForParallelizationProp((Boolean)value); - } else if (propName.equalsIgnoreCase(TTL)) { - metaProperties.setTTL((Integer) value); - } else if (propName.equalsIgnoreCase(CHANGE_DETECTION_ENABLED)) { - metaProperties.setChangeDetectionEnabled((Boolean) value); - } else if (propName.equalsIgnoreCase(PHYSICAL_TABLE_NAME)) { - metaProperties.setPhysicalTableName((String) value); - } else if (propName.equalsIgnoreCase(SCHEMA_VERSION)) { - metaProperties.setSchemaVersion((String) value); - } else if (propName.equalsIgnoreCase(STREAMING_TOPIC_NAME)) { - metaProperties.setStreamingTopicName((String) value); - } else if (propName.equalsIgnoreCase(MAX_LOOKBACK_AGE)) { - metaProperties.setMaxLookbackAge((Long) value); - } - } - // if removeTableProps is true only add the property if it is not an HTable or Phoenix Table property - if (!removeTableProps || (!TableProperty.isPhoenixTableProperty(propName) && !MetaDataUtil.isHTableProperty(propName))) { - propsList.add(prop); - } - } - properties.put(family, propsList); - } - return metaProperties; + } + + private MutationCode processMutationResult(String schemaName, String tableName, + MetaDataMutationResult result) throws SQLException { + final MutationCode mutationCode = result.getMutationCode(); + PName tenantId = connection.getTenantId(); + switch (mutationCode) { + case TABLE_NOT_FOUND: + // Only called for add/remove column so parentTableName will always be null + connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), null, + HConstants.LATEST_TIMESTAMP); + throw new TableNotFoundException(schemaName, tableName); + case UNALLOWED_TABLE_MUTATION: + String columnName = null; + String familyName = null; + String msg = null; + // TODO: better to return error code + if (result.getColumnName() != null) { + familyName = + result.getFamilyName() == null ? null : Bytes.toString(result.getFamilyName()); + columnName = Bytes.toString(result.getColumnName()); + msg = "Cannot add/drop column referenced by VIEW"; + } + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) + .setSchemaName(schemaName).setTableName(tableName).setFamilyName(familyName) + .setColumnName(columnName).setMessage(msg).build().buildException(); + case UNALLOWED_SCHEMA_MUTATION: + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_OR_ALTER_TTL) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + case NO_OP: + case COLUMN_ALREADY_EXISTS: + case COLUMN_NOT_FOUND: + break; + case CONCURRENT_TABLE_MUTATION: + addTableToCache(result, false); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations( + "CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), + connection)); + } + throw new ConcurrentTableMutationException(schemaName, tableName); + case NEWER_TABLE_FOUND: + // TODO: update cache? + // if (result.getTable() != null) { + // connection.addTable(result.getTable()); + // } + throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable()); + case NO_PK_COLUMNS: + throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + case TABLE_ALREADY_EXISTS: + break; + case ERROR_WRITING_TO_SCHEMA_REGISTRY: + throw new SQLExceptionInfo.Builder(ERROR_WRITING_TO_SCHEMA_REGISTRY) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + default: + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNEXPECTED_MUTATION_CODE) + .setSchemaName(schemaName).setTableName(tableName) + .setMessage("mutation code: " + mutationCode).build().buildException(); } - - private boolean evaluateStmtProperties(MetaProperties metaProperties, - MetaPropertiesEvaluated metaPropertiesEvaluated, - PTable table, String schemaName, String tableName, - MutableBoolean areWeIntroducingTTLAtThisLevel) - throws SQLException { - boolean changingPhoenixTableProperty = false; - - if (metaProperties.getImmutableRowsProp() != null) { - if (metaProperties.getImmutableRowsProp().booleanValue() != table.isImmutableRows()) { - if (table.getImmutableStorageScheme() != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_IMMUTABLE_ROWS_PROPERTY) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - metaPropertiesEvaluated.setIsImmutableRows(metaProperties.getImmutableRowsProp()); - changingPhoenixTableProperty = true; - } - } - - if (metaProperties.getImmutableRowsProp() != null && table.getType() != INDEX) { - if (metaProperties.getImmutableRowsProp().booleanValue() != table.isImmutableRows()) { - metaPropertiesEvaluated.setIsImmutableRows(metaProperties.getImmutableRowsProp()); - changingPhoenixTableProperty = true; - } - } - - if (metaProperties.getMultiTenantProp() != null) { - if (metaProperties.getMultiTenantProp().booleanValue() != table.isMultiTenant()) { - metaPropertiesEvaluated.setMultiTenant(metaProperties.getMultiTenantProp()); - changingPhoenixTableProperty = true; - } - } - - if (metaProperties.getDisableWALProp() != null) { - if (metaProperties.getDisableWALProp().booleanValue() != table.isWALDisabled()) { - metaPropertiesEvaluated.setDisableWAL(metaProperties.getDisableWALProp()); - changingPhoenixTableProperty = true; - } + return mutationCode; + } + + private long incrementTableSeqNum(PTable table, PTableType expectedType, int columnCountDelta, + MetaPropertiesEvaluated metaPropertiesEvaluated) throws SQLException { + return incrementTableSeqNum(table, expectedType, columnCountDelta, + metaPropertiesEvaluated.getIsTransactional(), + metaPropertiesEvaluated.getTransactionProvider(), + metaPropertiesEvaluated.getUpdateCacheFrequency(), + metaPropertiesEvaluated.getIsImmutableRows(), metaPropertiesEvaluated.getDisableWAL(), + metaPropertiesEvaluated.getMultiTenant(), metaPropertiesEvaluated.getStoreNulls(), + metaPropertiesEvaluated.getGuidePostWidth(), metaPropertiesEvaluated.getAppendOnlySchema(), + metaPropertiesEvaluated.getImmutableStorageScheme(), + metaPropertiesEvaluated.getUseStatsForParallelization(), metaPropertiesEvaluated.getTTL(), + metaPropertiesEvaluated.isChangeDetectionEnabled(), + metaPropertiesEvaluated.getPhysicalTableName(), metaPropertiesEvaluated.getSchemaVersion(), + metaPropertiesEvaluated.getColumnEncodedBytes(), + metaPropertiesEvaluated.getStreamingTopicName(), metaPropertiesEvaluated.getMaxLookbackAge()); + } + + private long incrementTableSeqNum(PTable table, PTableType expectedType, int columnCountDelta, + Boolean isTransactional, Long updateCacheFrequency, String physicalTableName, + String schemaVersion, QualifierEncodingScheme columnEncodedBytes) throws SQLException { + return incrementTableSeqNum(table, expectedType, columnCountDelta, isTransactional, null, + updateCacheFrequency, null, null, null, null, -1L, null, null, null, null, false, + physicalTableName, schemaVersion, columnEncodedBytes, null, null); + } + + private long incrementTableSeqNum(PTable table, PTableType expectedType, int columnCountDelta, + Boolean isTransactional, TransactionFactory.Provider transactionProvider, + Long updateCacheFrequency, Boolean isImmutableRows, Boolean disableWAL, Boolean isMultiTenant, + Boolean storeNulls, Long guidePostWidth, Boolean appendOnlySchema, + ImmutableStorageScheme immutableStorageScheme, Boolean useStatsForParallelization, Integer ttl, + Boolean isChangeDetectionEnabled, String physicalTableName, String schemaVersion, + QualifierEncodingScheme columnEncodedBytes, String streamingTopicName, Long maxLookbackAge) + throws SQLException { + String schemaName = table.getSchemaName().getString(); + String tableName = table.getTableName().getString(); + // Ordinal position is 1-based and we don't count SALT column in ordinal position + int totalColumnCount = table.getColumns().size() + (table.getBucketNum() == null ? 0 : -1); + final long seqNum = table.getSequenceNumber() + 1; + String tenantId = + connection.getTenantId() == null ? null : connection.getTenantId().getString(); + PreparedStatement tableUpsert = connection.prepareStatement(MUTATE_TABLE); + try { + tableUpsert.setString(1, tenantId); + tableUpsert.setString(2, schemaName); + tableUpsert.setString(3, tableName); + tableUpsert.setString(4, expectedType.getSerializedValue()); + tableUpsert.setLong(5, seqNum); + tableUpsert.setInt(6, totalColumnCount + columnCountDelta); + tableUpsert.execute(); + } finally { + tableUpsert.close(); + } + if (isImmutableRows != null) { + mutateBooleanProperty(connection, tenantId, schemaName, tableName, IMMUTABLE_ROWS, + isImmutableRows); + } + if (disableWAL != null) { + mutateBooleanProperty(connection, tenantId, schemaName, tableName, DISABLE_WAL, disableWAL); + } + if (isMultiTenant != null) { + mutateBooleanProperty(connection, tenantId, schemaName, tableName, MULTI_TENANT, + isMultiTenant); + } + if (storeNulls != null) { + mutateBooleanProperty(connection, tenantId, schemaName, tableName, STORE_NULLS, storeNulls); + } + if (isTransactional != null) { + mutateBooleanProperty(connection, tenantId, schemaName, tableName, TRANSACTIONAL, + isTransactional); + } + if (transactionProvider != null) { + mutateByteProperty(connection, tenantId, schemaName, tableName, TRANSACTION_PROVIDER, + transactionProvider.getCode()); + } + if (updateCacheFrequency != null) { + mutateLongProperty(connection, tenantId, schemaName, tableName, UPDATE_CACHE_FREQUENCY, + updateCacheFrequency); + } + if (guidePostWidth == null || guidePostWidth >= 0) { + mutateLongProperty(connection, tenantId, schemaName, tableName, GUIDE_POSTS_WIDTH, + guidePostWidth); + } + if (appendOnlySchema != null) { + mutateBooleanProperty(connection, tenantId, schemaName, tableName, APPEND_ONLY_SCHEMA, + appendOnlySchema); + } + if (columnEncodedBytes != null) { + mutateByteProperty(connection, tenantId, schemaName, tableName, ENCODING_SCHEME, + columnEncodedBytes.getSerializedMetadataValue()); + } + if (immutableStorageScheme != null) { + mutateStringProperty(connection, tenantId, schemaName, tableName, IMMUTABLE_STORAGE_SCHEME, + immutableStorageScheme.name()); + } + if (useStatsForParallelization != null) { + mutateBooleanProperty(connection, tenantId, schemaName, tableName, + USE_STATS_FOR_PARALLELIZATION, useStatsForParallelization); + } + if (ttl != null) { + mutateStringProperty(connection, tenantId, schemaName, tableName, TTL, + ttl == TTL_NOT_DEFINED ? null : String.valueOf(ttl)); + } + if (isChangeDetectionEnabled != null) { + mutateBooleanProperty(connection, tenantId, schemaName, tableName, CHANGE_DETECTION_ENABLED, + isChangeDetectionEnabled); + } + if (!Strings.isNullOrEmpty(physicalTableName)) { + mutateStringProperty(connection, tenantId, schemaName, tableName, PHYSICAL_TABLE_NAME, + physicalTableName); + } + if (!Strings.isNullOrEmpty(schemaVersion)) { + mutateStringProperty(connection, tenantId, schemaName, tableName, SCHEMA_VERSION, + schemaVersion); + } + if (!Strings.isNullOrEmpty(streamingTopicName)) { + mutateStringProperty(connection, tenantId, schemaName, tableName, STREAMING_TOPIC_NAME, + streamingTopicName); + } + if (maxLookbackAge != null) { + mutateLongProperty(connection, tenantId, schemaName, tableName, MAX_LOOKBACK_AGE, + maxLookbackAge); + } + return seqNum; + } + + public static void mutateTransformProperties(Connection connection, String tenantId, + String schemaName, String tableName, String physicalTableName, + ImmutableStorageScheme immutableStorageScheme, QualifierEncodingScheme columnEncodedBytes) + throws SQLException { + if (columnEncodedBytes != null) { + mutateByteProperty(connection, tenantId, schemaName, tableName, ENCODING_SCHEME, + columnEncodedBytes.getSerializedMetadataValue()); + } + if (immutableStorageScheme != null) { + mutateByteProperty(connection, tenantId, schemaName, tableName, IMMUTABLE_STORAGE_SCHEME, + immutableStorageScheme.getSerializedMetadataValue()); + } + if (!Strings.isNullOrEmpty(physicalTableName)) { + mutateStringProperty(connection, tenantId, schemaName, tableName, PHYSICAL_TABLE_NAME, + physicalTableName); + } + } + + private static void mutateBooleanProperty(Connection connection, String tenantId, + String schemaName, String tableName, String propertyName, boolean propertyValue) + throws SQLException { + String updatePropertySql = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + propertyName + ") VALUES (?, ?, ?, ?)"; + try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { + tableBoolUpsert.setString(1, tenantId); + tableBoolUpsert.setString(2, schemaName); + tableBoolUpsert.setString(3, tableName); + tableBoolUpsert.setBoolean(4, propertyValue); + tableBoolUpsert.execute(); + } + } + + private static void mutateLongProperty(Connection connection, String tenantId, String schemaName, + String tableName, String propertyName, Long propertyValue) throws SQLException { + String updatePropertySql = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + propertyName + ") VALUES (?, ?, ?, ?)"; + try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { + tableBoolUpsert.setString(1, tenantId); + tableBoolUpsert.setString(2, schemaName); + tableBoolUpsert.setString(3, tableName); + if (propertyValue == null) { + tableBoolUpsert.setNull(4, Types.BIGINT); + } else { + tableBoolUpsert.setLong(4, propertyValue); + } + tableBoolUpsert.execute(); + } + } + + private static void mutateIntegerProperty(Connection connection, String tenantId, + String schemaName, String tableName, String propertyName, Integer propertyValue) + throws SQLException { + String updatePropertySql = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + propertyName + ") VALUES (?, ?, ?, ?)"; + try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { + tableBoolUpsert.setString(1, tenantId); + tableBoolUpsert.setString(2, schemaName); + tableBoolUpsert.setString(3, tableName); + if (propertyValue == null) { + tableBoolUpsert.setNull(4, Types.INTEGER); + } else { + tableBoolUpsert.setInt(4, propertyValue); + } + tableBoolUpsert.execute(); + } + } + + private static void mutateByteProperty(Connection connection, String tenantId, String schemaName, + String tableName, String propertyName, Byte propertyValue) throws SQLException { + String updatePropertySql = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + propertyName + ") VALUES (?, ?, ?, ?)"; + try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { + tableBoolUpsert.setString(1, tenantId); + tableBoolUpsert.setString(2, schemaName); + tableBoolUpsert.setString(3, tableName); + if (propertyValue == null) { + tableBoolUpsert.setNull(4, Types.TINYINT); + } else { + tableBoolUpsert.setByte(4, propertyValue); + } + tableBoolUpsert.execute(); + } + } + + private static void mutateStringProperty(Connection connection, String tenantId, + String schemaName, String tableName, String propertyName, String propertyValue) + throws SQLException { + String updatePropertySql = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + propertyName + ") VALUES (?, ?, ?, ?)"; + try (PreparedStatement tableBoolUpsert = connection.prepareStatement(updatePropertySql)) { + tableBoolUpsert.setString(1, tenantId); + tableBoolUpsert.setString(2, schemaName); + tableBoolUpsert.setString(3, tableName); + tableBoolUpsert.setString(4, propertyValue); + tableBoolUpsert.execute(); + } + } + + public MutationState addColumn(AddColumnStatement statement) throws SQLException { + PTable table = FromCompiler.getResolver(statement, connection).getTables().get(0).getTable(); + return addColumn(table, statement.getColumnDefs(), statement.getProps(), + statement.ifNotExists(), false, statement.getTable(), statement.getTableType(), + statement.isCascade(), statement.getIndexes()); + } + + public MutationState addColumn(PTable table, List origColumnDefs, + ListMultimap> stmtProperties, boolean ifNotExists, + boolean removeTableProps, NamedTableNode namedTableNode, PTableType tableType, boolean cascade, + List indexes) throws SQLException { + connection.rollback(); + List indexesPTable = Lists + .newArrayListWithExpectedSize(indexes != null ? indexes.size() : table.getIndexes().size()); + Map indexToColumnSizeMap = new HashMap<>(); + + // if cascade keyword is passed and indexes are provided either implicitly or explicitly + if (cascade && (indexes == null || !indexes.isEmpty())) { + indexesPTable = getIndexesPTableForCascade(indexes, table); + if (indexesPTable.size() == 0) { + // go back to regular behavior of altering the table/view + cascade = false; + } else { + for (PTable index : indexesPTable) { + indexToColumnSizeMap.put(index, index.getColumns().size()); } - - if (metaProperties.getUpdateCacheFrequencyProp() != null) { - // See PHOENIX-4891 - if (table.getType() == PTableType.INDEX) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_SET_OR_ALTER_UPDATE_CACHE_FREQ_FOR_INDEX) - .build() - .buildException(); + } + } + boolean wasAutoCommit = connection.getAutoCommit(); + List columns = + Lists.newArrayListWithExpectedSize(origColumnDefs != null ? origColumnDefs.size() : 0); + PName tenantId = connection.getTenantId(); + boolean sharedIndex = tableType == PTableType.INDEX + && (table.getIndexType() == IndexType.LOCAL || table.getViewIndexId() != null); + String tenantIdToUse = + connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null; + String schemaName = table.getSchemaName().getString(); + String tableName = table.getTableName().getString(); + PName physicalName = table.getPhysicalName(); + String physicalSchemaName = SchemaUtil.getSchemaNameFromFullName(physicalName.getString()); + String physicalTableName = SchemaUtil.getTableNameFromFullName(physicalName.getString()); + Set acquiredColumnMutexSet = Sets.newHashSetWithExpectedSize(3); + boolean acquiredBaseTableMutex = false; + try { + connection.setAutoCommit(false); + + List columnDefs; + if ((table.isAppendOnlySchema() || ifNotExists) && origColumnDefs != null) { + // only make the rpc if we are adding new columns + columnDefs = Lists.newArrayList(); + for (ColumnDef columnDef : origColumnDefs) { + String familyName = columnDef.getColumnDefName().getFamilyName(); + String columnName = columnDef.getColumnDefName().getColumnName(); + if (familyName != null) { + try { + PColumnFamily columnFamily = table.getColumnFamily(familyName); + columnFamily.getPColumnForColumnName(columnName); + if (!ifNotExists) { + throw new ColumnAlreadyExistsException(schemaName, tableName, columnName); + } + } catch (ColumnFamilyNotFoundException | ColumnNotFoundException e) { + columnDefs.add(columnDef); + } + } else { + try { + table.getColumnForColumnName(columnName); + if (!ifNotExists) { + throw new ColumnAlreadyExistsException(schemaName, tableName, columnName); + } + } catch (ColumnNotFoundException e) { + columnDefs.add(columnDef); + } + } + } + } else { + columnDefs = origColumnDefs == null ? Collections. emptyList() : origColumnDefs; + } + + boolean retried = false; + boolean changingPhoenixTableProperty = false; + MutableBoolean areWeIntroducingTTLAtThisLevel = new MutableBoolean(false); + MetaProperties metaProperties = new MetaProperties(); + while (true) { + Map>> properties = new HashMap<>(stmtProperties.size()); + ; + metaProperties = loadStmtProperties(stmtProperties, properties, table, removeTableProps); + + ColumnResolver resolver = FromCompiler.getResolver(namedTableNode, connection); + table = resolver.getTables().get(0).getTable(); + int nIndexes = table.getIndexes().size(); + int numCols = columnDefs.size(); + int nNewColumns = numCols; + List tableMetaData = + Lists.newArrayListWithExpectedSize((1 + nNewColumns) * (nIndexes + 1)); + List columnMetaData = + Lists.newArrayListWithExpectedSize(nNewColumns * (nIndexes + 1)); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations( + "Resolved table to " + table.getName().getString() + " with seqNum " + + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + + table.getColumns().size() + " columns: " + table.getColumns(), + connection)); + } + + int position = table.getColumns().size(); + + boolean addPKColumns = columnDefs.stream().anyMatch(ColumnDef::isPK); + if (addPKColumns) { + List currentPKs = table.getPKColumns(); + PColumn lastPK = currentPKs.get(currentPKs.size() - 1); + // Disallow adding columns if the last column in the primary key is VARBIANRY + // or ARRAY. + if (lastPK.getDataType() == PVarbinary.INSTANCE || lastPK.getDataType().isArrayType()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_LAST_PK) + .setColumnName(lastPK.getName().getString()).build().buildException(); + } + // Disallow adding columns if last column in the primary key is fixed width + // and nullable. + if (lastPK.isNullable() && lastPK.getDataType().isFixedWidth()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULLABLE_FIXED_WIDTH_LAST_PK) + .setColumnName(lastPK.getName().getString()).build().buildException(); + } + } + + MetaPropertiesEvaluated metaPropertiesEvaluated = new MetaPropertiesEvaluated(); + changingPhoenixTableProperty = evaluateStmtProperties(metaProperties, + metaPropertiesEvaluated, table, schemaName, tableName, areWeIntroducingTTLAtThisLevel); + if (areWeIntroducingTTLAtThisLevel.booleanValue()) { + // As we are introducing TTL for the first time at this level, we need to check + // if TTL is already defined up or down in the hierarchy. + Integer ttlAlreadyDefined = TTL_NOT_DEFINED; + // Check up the hierarchy + if (table.getType() != PTableType.TABLE) { + ttlAlreadyDefined = checkAndGetTTLFromHierarchy( + PhoenixRuntime.getTableNoCache(connection, table.getParentName().toString())); + } + if (ttlAlreadyDefined != TTL_NOT_DEFINED) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TTL_ALREADY_DEFINED_IN_HIERARCHY) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + + /** + * To check if TTL is defined at any of the child below we are checking it at + * {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl#mutateColumn(List, ColumnMutator, int, PTable, PTable, boolean)} + * level where in function + * {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl# validateIfMutationAllowedOnParent(PTable, List, PTableType, long, byte[], byte[], byte[], List, int)} + * we are already traversing through allDescendantViews. + */ + + } + + boolean isTransformNeeded = TransformClient.checkIsTransformNeeded(metaProperties, + schemaName, table, tableName, null, tenantIdToUse, connection); + if (isTransformNeeded) { + // We can add a support for these later. For now, not supported. + if (MetaDataUtil.hasLocalIndexTable(connection, physicalTableName.getBytes())) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_TRANSFORM_TABLE_WITH_LOCAL_INDEX).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + if (table.isAppendOnlySchema()) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_TRANSFORM_TABLE_WITH_APPEND_ONLY_SCHEMA) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + if (table.isTransactional()) { + throw new SQLExceptionInfo.Builder(CANNOT_TRANSFORM_TRANSACTIONAL_TABLE) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + } + + // If changing isImmutableRows to true or it's not being changed and is already true + boolean willBeImmutableRows = + Boolean.TRUE.equals(metaPropertiesEvaluated.getIsImmutableRows()) + || (metaPropertiesEvaluated.getIsImmutableRows() == null && table.isImmutableRows()); + boolean willBeTxnl = metaProperties.getNonTxToTx(); + Long timeStamp = + TransactionUtil.getTableTimestamp(connection, table.isTransactional() || willBeTxnl, + table.isTransactional() + ? table.getTransactionProvider() + : metaPropertiesEvaluated.getTransactionProvider()); + int numPkColumnsAdded = 0; + Set colFamiliesForPColumnsToBeAdded = new LinkedHashSet<>(); + Set families = new LinkedHashSet<>(); + PTable tableForCQCounters = tableType == PTableType.VIEW + ? connection.getTable(table.getPhysicalName().getString()) + : table; + EncodedCQCounter cqCounterToUse = tableForCQCounters.getEncodedCQCounter(); + Map changedCqCounters = new HashMap<>(numCols); + if (numCols > 0) { + StatementContext context = + new StatementContext(new PhoenixStatement(connection), resolver); + short nextKeySeq = SchemaUtil.getMaxKeySeq(table); + for (ColumnDef colDef : columnDefs) { + if (colDef != null && !colDef.isNull()) { + if (colDef.isPK()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY) + .setColumnName(colDef.getColumnDefName().getColumnName()).build() + .buildException(); + } else if (!willBeImmutableRows) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL) + .setColumnName(colDef.getColumnDefName().getColumnName()).build() + .buildException(); + } + } + if ( + colDef != null && colDef.isPK() && table.getType() == VIEW + && table.getViewType() != MAPPED + ) { + throwIfLastPKOfParentIsVariableLength(getParentOfView(table), schemaName, tableName, + colDef); + } + if (colDef != null && colDef.isRowTimestamp()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_CREATE_ONLY) + .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException(); + } + if (!colDef.validateDefault(context, null)) { + colDef = new ColumnDef(colDef, null); // Remove DEFAULT as it's not necessary } - if (metaProperties.getUpdateCacheFrequencyProp().longValue() != table.getUpdateCacheFrequency()) { - metaPropertiesEvaluated.setUpdateCacheFrequency(metaProperties.getUpdateCacheFrequencyProp()); - changingPhoenixTableProperty = true; + String familyName = null; + Integer encodedCQ = null; + if (!colDef.isPK()) { + String colDefFamily = colDef.getColumnDefName().getFamilyName(); + ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme(); + String defaultColumnFamily = tableForCQCounters.getDefaultFamilyName() != null + && !Strings.isNullOrEmpty(tableForCQCounters.getDefaultFamilyName().getString()) + ? tableForCQCounters.getDefaultFamilyName().getString() + : DEFAULT_COLUMN_FAMILY; + if (table.getType() == PTableType.INDEX && table.getIndexType() == IndexType.LOCAL) { + defaultColumnFamily = + QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX + defaultColumnFamily; + } + if (storageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS) { + familyName = colDefFamily != null ? colDefFamily : defaultColumnFamily; + } else { + familyName = defaultColumnFamily; + } + encodedCQ = table.isAppendOnlySchema() + ? Integer.valueOf(ENCODED_CQ_COUNTER_INITIAL_VALUE + position) + : cqCounterToUse.getNextQualifier(familyName); + if (!table.isAppendOnlySchema() && cqCounterToUse.increment(familyName)) { + changedCqCounters.put(familyName, cqCounterToUse.getNextQualifier(familyName)); + } + } + byte[] columnQualifierBytes = null; + try { + columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes( + colDef.getColumnDefName().getColumnName(), encodedCQ, table, colDef.isPK()); + } catch (QualifierOutOfRangeException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_COLUMNS_EXCEEDED) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } - } - - if (metaProperties.getAppendOnlySchemaProp() !=null) { - if (metaProperties.getAppendOnlySchemaProp() != table.isAppendOnlySchema()) { - metaPropertiesEvaluated.setAppendOnlySchema(metaProperties.getAppendOnlySchemaProp()); - changingPhoenixTableProperty = true; + PColumn column = newColumn(position++, colDef, PrimaryKeyConstraint.EMPTY, + table.getDefaultFamilyName() == null + ? null + : table.getDefaultFamilyName().getString(), + true, columnQualifierBytes, willBeImmutableRows); + HashMap indexToIndexColumnMap = null; + if (cascade) { + indexToIndexColumnMap = getPTablePColumnHashMapForCascade(indexesPTable, + willBeImmutableRows, colDef, familyName, indexToColumnSizeMap); } - } - if (metaProperties.getColumnEncodedBytesProp() != null) { - if (metaProperties.getColumnEncodedBytesProp() != table.getEncodingScheme()) { - // Transform is needed, so we will not be setting it here. We set the boolean to increment sequence num - changingPhoenixTableProperty = true; - } - } + columns.add(column); + String pkName = null; + Short keySeq = null; - if (metaProperties.getImmutableStorageSchemeProp()!=null) { - if (metaProperties.getImmutableStorageSchemeProp() != table.getImmutableStorageScheme()) { - // Transform is needed, so we will not be setting it here. We set the boolean to increment sequence num - changingPhoenixTableProperty = true; + // TODO: support setting properties on other families? + if (column.getFamilyName() == null) { + ++numPkColumnsAdded; + pkName = table.getPKName() == null ? null : table.getPKName().getString(); + keySeq = ++nextKeySeq; + } else { + families.add(column.getFamilyName().getString()); + } + colFamiliesForPColumnsToBeAdded + .add(column.getFamilyName() == null ? null : column.getFamilyName().getString()); + addColumnMutation(connection, schemaName, tableName, column, null, pkName, keySeq, + table.getBucketNum() != null); + // add new columns for given indexes one by one + if (cascade) { + for (PTable index : indexesPTable) { + LOGGER.info("Adding column " + column.getName().getString() + " to " + + index.getTableName().toString()); + addColumnMutation(connection, schemaName, index.getTableName().getString(), + indexToIndexColumnMap.get(index), null, "", keySeq, index.getBucketNum() != null); + } + } + } + + // Add any new PK columns to end of index PK + if (numPkColumnsAdded > 0) { + // create PK column list that includes the newly created columns + List pkColumns = + Lists.newArrayListWithExpectedSize(table.getPKColumns().size() + numPkColumnsAdded); + pkColumns.addAll(table.getPKColumns()); + for (int i = 0; i < numCols; ++i) { + if (columnDefs.get(i).isPK()) { + pkColumns.add(columns.get(i)); + } + } + int pkSlotPosition = table.getPKColumns().size() - 1; + for (PTable index : table.getIndexes()) { + short nextIndexKeySeq = SchemaUtil.getMaxKeySeq(index); + int indexPosition = index.getColumns().size(); + for (int i = 0; i < numCols; ++i) { + ColumnDef colDef = columnDefs.get(i); + if (colDef.isPK()) { + PDataType indexColDataType = + IndexUtil.getIndexColumnDataType(colDef.isNull(), colDef.getDataType()); + ColumnName indexColName = ColumnName.caseSensitiveColumnName( + IndexUtil.getIndexColumnName(null, colDef.getColumnDefName().getColumnName())); + Expression expression = new RowKeyColumnExpression(columns.get(i), + new RowKeyValueAccessor(pkColumns, pkSlotPosition)); + ColumnDef indexColDef = + FACTORY.columnDef(indexColName, indexColDataType.getSqlTypeName(), + colDef.isNull(), colDef.getMaxLength(), colDef.getScale(), true, + colDef.getSortOrder(), expression.toString(), colDef.isRowTimestamp()); + PColumn indexColumn = newColumn(indexPosition++, indexColDef, + PrimaryKeyConstraint.EMPTY, null, true, null, willBeImmutableRows); + addColumnMutation(connection, schemaName, index.getTableName().getString(), + indexColumn, index.getParentTableName().getString(), + index.getPKName() == null ? null : index.getPKName().getString(), + ++nextIndexKeySeq, index.getBucketNum() != null); + } + } + } + ++pkSlotPosition; + } + columnMetaData + .addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); + connection.rollback(); + } else { + // Check that HBase configured properly for mutable secondary indexing + // if we're changing from an immutable table to a mutable table and we + // have existing indexes. + if ( + Boolean.FALSE.equals(metaPropertiesEvaluated.getIsImmutableRows()) + && !table.getIndexes().isEmpty() + ) { + int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion(); + if (hbaseVersion < MetaDataProtocol.MUTABLE_SI_VERSION_THRESHOLD) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } - } - - // Get immutableStorageScheme and encoding and check compatibility - ImmutableStorageScheme immutableStorageScheme = table.getImmutableStorageScheme(); - if (metaProperties.getImmutableStorageSchemeProp() != null) { - immutableStorageScheme = metaProperties.getImmutableStorageSchemeProp(); - } - QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); - if (metaProperties.getColumnEncodedBytesProp() != null) { - encodingScheme = metaProperties.getColumnEncodedBytesProp(); - } - if (immutableStorageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS && encodingScheme == NON_ENCODED_QUALIFIERS) { - // encoding scheme is set as non-encoded on purpose, so we should fail - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - - if (metaProperties.getGuidePostWidth() == null || metaProperties.getGuidePostWidth() >= 0) { - metaPropertiesEvaluated.setGuidePostWidth(metaProperties.getGuidePostWidth()); - changingPhoenixTableProperty = true; - } - - if (metaProperties.getStoreNullsProp() != null) { - if (metaProperties.getStoreNullsProp().booleanValue() != table.getStoreNulls()) { - metaPropertiesEvaluated.setStoreNulls(metaProperties.getStoreNullsProp()); - changingPhoenixTableProperty = true; + if (!connection.getQueryServices().hasIndexWALCodec() && !table.isTransactional()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } - } + } + if (Boolean.TRUE.equals(metaPropertiesEvaluated.getMultiTenant())) { + throwIfInsufficientColumns(schemaName, tableName, table.getPKColumns(), + table.getBucketNum() != null, metaPropertiesEvaluated.getMultiTenant()); + } + } + + if ( + !table.getIndexes().isEmpty() && (numPkColumnsAdded > 0 || metaProperties.getNonTxToTx() + || metaPropertiesEvaluated.getUpdateCacheFrequency() != null) + ) { + for (PTable index : table.getIndexes()) { + incrementTableSeqNum(index, index.getType(), numPkColumnsAdded, + metaProperties.getNonTxToTx() ? Boolean.TRUE : null, + metaPropertiesEvaluated.getUpdateCacheFrequency(), + metaPropertiesEvaluated.getPhysicalTableName(), + metaPropertiesEvaluated.getSchemaVersion(), + metaProperties.getColumnEncodedBytesProp()); + } + tableMetaData + .addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); + connection.rollback(); + } + + if (cascade) { + for (PTable index : indexesPTable) { + incrementTableSeqNum(index, index.getType(), columnDefs.size(), Boolean.FALSE, + metaPropertiesEvaluated.getUpdateCacheFrequency(), + metaPropertiesEvaluated.getPhysicalTableName(), + metaPropertiesEvaluated.getSchemaVersion(), + metaPropertiesEvaluated.getColumnEncodedBytes()); + } + tableMetaData + .addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); + connection.rollback(); + } + + long seqNum = 0; + if (changingPhoenixTableProperty || columnDefs.size() > 0) { + seqNum = + incrementTableSeqNum(table, tableType, columnDefs.size(), metaPropertiesEvaluated); + + tableMetaData + .addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); + connection.rollback(); + } + + PTable transformingNewTable = null; + if (isTransformNeeded) { + try { + transformingNewTable = TransformClient.addTransform(connection, tenantIdToUse, table, + metaProperties, seqNum, PTable.TransformType.METADATA_TRANSFORM); + } catch (SQLException ex) { + connection.rollback(); + throw ex; + } + } + + // Force the table header row to be first + Collections.reverse(tableMetaData); + // Add column metadata afterwards, maintaining the order so columns have more predictable + // ordinal position + tableMetaData.addAll(columnMetaData); + if (!changedCqCounters.isEmpty()) { + try (PreparedStatement linkStatement = + connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER)) { + for (Entry entry : changedCqCounters.entrySet()) { + linkStatement.setString(1, tenantIdToUse); + linkStatement.setString(2, tableForCQCounters.getSchemaName().getString()); + linkStatement.setString(3, tableForCQCounters.getTableName().getString()); + linkStatement.setString(4, entry.getKey()); + linkStatement.setInt(5, entry.getValue()); + linkStatement.execute(); + } + } + + // When a view adds its own columns, then we need to increase the sequence number of the + // base table + // too since we want clients to get the latest PTable of the base table. + if (tableType == VIEW) { + try (PreparedStatement incrementStatement = + connection.prepareStatement(INCREMENT_SEQ_NUM)) { + incrementStatement.setString(1, null); + incrementStatement.setString(2, tableForCQCounters.getSchemaName().getString()); + incrementStatement.setString(3, tableForCQCounters.getTableName().getString()); + incrementStatement.setLong(4, tableForCQCounters.getSequenceNumber() + 1); + incrementStatement.execute(); + } + } + tableMetaData + .addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); + connection.rollback(); + } + + byte[] family = + families.size() > 0 ? families.iterator().next().getBytes(StandardCharsets.UTF_8) : null; + + // Figure out if the empty column family is changing as a result of adding the new column + byte[] emptyCF = null; + byte[] projectCF = null; + if (table.getType() != PTableType.VIEW && family != null) { + if (table.getColumnFamilies().isEmpty()) { + emptyCF = family; + } else { + try { + table.getColumnFamily(family); + } catch (ColumnFamilyNotFoundException e) { + projectCF = family; + emptyCF = SchemaUtil.getEmptyColumnFamily(table); + } + } + } + + if ( + EncodedColumnsUtil.usesEncodedColumnNames(table) && stmtProperties.isEmpty() + && !acquiredBaseTableMutex + ) { + // For tables that use column encoding acquire a mutex on + // the base table as we need to update the encoded column + // qualifier counter on the base table. Not applicable to + // ALTER TABLE/VIEW SET statements because + // we don't update the column qualifier counter while + // setting property, hence the check: stmtProperties.isEmpty() + acquiredBaseTableMutex = writeCell(null, physicalSchemaName, physicalTableName, null); + if (!acquiredBaseTableMutex) { + throw new ConcurrentTableMutationException(physicalSchemaName, physicalTableName); + } + } + for (PColumn pColumn : columns) { + // acquire the mutex using the global physical table name to + // prevent creating the same column on a table or view with + // a conflicting type etc + boolean acquiredMutex = + writeCell(null, physicalSchemaName, physicalTableName, pColumn.toString()); + if (!acquiredMutex && !acquiredColumnMutexSet.contains(pColumn.toString())) { + throw new ConcurrentTableMutationException(physicalSchemaName, physicalTableName); + } + acquiredColumnMutexSet.add(pColumn.toString()); + } + MetaDataMutationResult result = + connection.getQueryServices().addColumn(tableMetaData, table, getParentTable(table), + transformingNewTable, properties, colFamiliesForPColumnsToBeAdded, columns); - if (metaProperties.getUseStatsForParallelizationProp() != null - && (table.useStatsForParallelization() == null - || (metaProperties.getUseStatsForParallelizationProp().booleanValue() != table - .useStatsForParallelization()))) { - metaPropertiesEvaluated.setUseStatsForParallelization(metaProperties.getUseStatsForParallelizationProp()); - changingPhoenixTableProperty = true; - } - - if (metaProperties.getIsTransactionalProp() != null) { - if (metaProperties.getIsTransactionalProp().booleanValue() != table.isTransactional()) { - metaPropertiesEvaluated.setIsTransactional(metaProperties.getIsTransactionalProp()); - // Note: Going from transactional to non transactional used to be not supportable because - // it would have required rewriting the cell timestamps and doing a major compaction to - // remove Tephra specific delete markers. After PHOENIX-6627, Tephra has been removed. - // For now we continue to reject the request. - if (!metaPropertiesEvaluated.getIsTransactional()) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - // cannot create a transactional table if transactions are disabled - boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean( - QueryServices.TRANSACTIONS_ENABLED, - QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED); - if (!transactionsEnabled) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED) - .setSchemaName(schemaName).setTableName(tableName).build().buildException(); - } - // cannot make a table transactional if it has a row timestamp column - if (SchemaUtil.hasRowTimestampColumn(table)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP) - .setSchemaName(schemaName).setTableName(tableName) - .build().buildException(); - } - TransactionFactory.Provider provider = metaProperties.getTransactionProviderProp(); - if (provider == null) { - provider = (Provider) - TableProperty.TRANSACTION_PROVIDER.getValue( - connection.getQueryServices().getProps().get( - QueryServices.DEFAULT_TRANSACTION_PROVIDER_ATTRIB, - QueryServicesOptions.DEFAULT_TRANSACTION_PROVIDER)); - } - metaPropertiesEvaluated.setTransactionProvider(provider); - if (provider.getTransactionProvider().isUnsupported(PhoenixTransactionProvider.Feature.ALTER_NONTX_TO_TX)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TABLE_FROM_NON_TXN_TO_TXNL) - .setMessage(provider.name() + ". ") - .setSchemaName(schemaName) - .setTableName(tableName) - .build().buildException(); - } - changingPhoenixTableProperty = true; - metaProperties.setNonTxToTx(true); + try { + MutationCode code = processMutationResult(schemaName, tableName, result); + if (code == MutationCode.COLUMN_ALREADY_EXISTS) { + addTableToCache(result, false); + if (!ifNotExists) { + throw new ColumnAlreadyExistsException(schemaName, tableName, + SchemaUtil.findExistingColumn(result.getTable(), columns)); } - } + return new MutationState(0, 0, connection); + } + // Only update client side cache if we aren't adding a PK column to a table with indexes + // or + // transitioning a table from non transactional to transactional. + // We could update the cache manually then too, it'd just be a pain. + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + long resolvedTimeStamp = TransactionUtil.getResolvedTime(connection, result); + if ( + table.getIndexes().isEmpty() + || (numPkColumnsAdded == 0 && !metaProperties.getNonTxToTx()) + ) { + addTableToCache(result, false, resolvedTimeStamp); + table = result.getTable(); + } else { + // remove the table from the cache, it will be fetched from the server the + // next time it is resolved + connection.removeTable(tenantId, fullTableName, null, resolvedTimeStamp); + } + // Delete rows in view index if we haven't dropped it already + // We only need to do this if the multiTenant transitioned to false + if ( + table.getType() == PTableType.TABLE + && Boolean.FALSE.equals(metaPropertiesEvaluated.getMultiTenant()) + && MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName()) + ) { + connection.setAutoCommit(true); + MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName(), + table.isNamespaceMapped()); + // If we're not dropping metadata, then make sure no rows are left in + // our view index physical table. + // TODO: remove this, as the DROP INDEX commands run when the DROP VIEW + // commands are run would remove all rows already. + if ( + !connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, + DEFAULT_DROP_METADATA) + ) { + Long scn = connection.getSCN(); + long ts = (scn == null ? result.getMutationTime() : scn); + byte[] viewIndexPhysicalName = + MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes()); + String viewIndexSchemaName = + SchemaUtil.getSchemaNameFromFullName(viewIndexPhysicalName); + String viewIndexTableName = + SchemaUtil.getTableNameFromFullName(viewIndexPhysicalName); + PName viewIndexName = PNameFactory + .newName(SchemaUtil.getTableName(viewIndexSchemaName, viewIndexTableName)); + + PTable viewIndexTable = new PTableImpl.Builder().setName(viewIndexName) + .setKey(new PTableKey(tenantId, viewIndexName.getString())) + .setSchemaName(PNameFactory.newName(viewIndexSchemaName)) + .setTableName(PNameFactory.newName(viewIndexTableName)).setType(PTableType.VIEW) + .setViewType(ViewType.MAPPED).setTimeStamp(ts) + .setPkColumns(Collections. emptyList()) + .setAllColumns(Collections. emptyList()) + .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) + .setIndexes(Collections. emptyList()) + .setFamilyAttributes(table.getColumnFamilies()) + .setPhysicalNames(Collections. emptyList()) + .setNamespaceMapped(table.isNamespaceMapped()) + .setImmutableStorageScheme(table.getImmutableStorageScheme()) + .setQualifierEncodingScheme(table.getEncodingScheme()) + .setUseStatsForParallelization(table.useStatsForParallelization()).build(); + List tableRefs = + Collections.singletonList(new TableRef(null, viewIndexTable, ts, false)); + MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, + Collections. emptyList(), ts); + connection.getQueryServices().updateData(plan); + } + } + if (transformingNewTable != null) { + connection.removeTable(tenantId, fullTableName, null, resolvedTimeStamp); + connection.getQueryServices().clearCache(); + } + if (emptyCF != null) { + Long scn = connection.getSCN(); + connection.setAutoCommit(true); + // Delete everything in the column. You'll still be able to do queries at earlier + // timestamps + long ts = (scn == null ? result.getMutationTime() : scn); + MutationPlan plan = new PostDDLCompiler(connection).compile( + Collections.singletonList(new TableRef(null, table, ts, false)), emptyCF, + projectCF == null ? null : Collections.singletonList(projectCF), null, ts); + return connection.getQueryServices().updateData(plan); + } + return new MutationState(0, 0, connection); + } catch (ConcurrentTableMutationException e) { + if (retried) { + throw e; + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations( + "Caught ConcurrentTableMutationException for table " + + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", + connection)); + } + retried = true; + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, + NUM_METADATA_LOOKUP_FAILURES, 1); + throw e; + } + } + } finally { + connection.setAutoCommit(wasAutoCommit); + if (acquiredBaseTableMutex) { + // release the mutex on the physical table (used to prevent concurrent conflicting + // add column changes) + deleteCell(null, physicalSchemaName, physicalTableName, null); + } + deleteMutexCells(physicalSchemaName, physicalTableName, acquiredColumnMutexSet); + } + } + + private List getIndexesPTableForCascade(List indexes, PTable table) + throws SQLException { + boolean isView = table.getType().equals(PTableType.VIEW); + List indexesPTable = new ArrayList<>(); + + // when indexes is null, that means ALL keyword is passed and + // we ll collect all global indexes for cascading + if (indexes == null) { + indexesPTable.addAll(table.getIndexes()); + for (PTable index : table.getIndexes()) { + // a child view has access to its parents indexes, + // this if clause ensures we only get the indexes that + // are only created on the view itself. + if ( + index.getIndexType().equals(IndexType.LOCAL) || (isView && index.getTableName().toString() + .contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) + ) { + indexesPTable.remove(index); + } + } + } else { + List indexesParam = Lists.newArrayListWithExpectedSize(indexes.size()); + for (NamedNode index : indexes) { + indexesParam.add(index.getName()); + } + // gets the PTable for list of indexes passed in the function + // if all the names in parameter list are correct, indexesParam list should be empty + // by end of the loop + for (PTable index : table.getIndexes()) { + if (index.getIndexType().equals(IndexType.LOCAL)) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.NOT_SUPPORTED_CASCADE_FEATURE_LOCAL_INDEX) + .setTableName(index.getName().getString()).build().buildException(); + } + if (indexesParam.remove(index.getTableName().getString())) { + indexesPTable.add(index); + } + } + // indexesParam has index names that are not correct + if (!indexesParam.isEmpty()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INCORRECT_INDEX_NAME) + .setTableName(StringUtils.join(",", indexesParam)).build().buildException(); + } + } + return indexesPTable; + } + + private HashMap getPTablePColumnHashMapForCascade(List indexesPTable, + boolean willBeImmutableRows, ColumnDef colDef, String familyName, + Map indexToColumnSizeMap) throws SQLException { + HashMap indexColumn; + if (colDef.isPK()) { + // only supported for non pk column + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NOT_SUPPORTED_CASCADE_FEATURE_PK).build() + .buildException(); + } + indexColumn = new HashMap(indexesPTable.size()); + ColumnName indexColName = ColumnName.caseSensitiveColumnName( + IndexUtil.getIndexColumnName(familyName, colDef.getColumnDefName().getColumnName())); + ColumnDef indexColDef = FACTORY.columnDef(indexColName, colDef.getDataType().getSqlTypeName(), + colDef.isNull(), colDef.getMaxLength(), colDef.getScale(), false, colDef.getSortOrder(), + colDef.getExpression(), colDef.isRowTimestamp()); + // TODO: add support to specify tenant owned indexes in the DDL statement with CASCADE executed + // with Global connection + for (PTable index : indexesPTable) { + int iPos = indexToColumnSizeMap.get(index); + EncodedCQCounter cqCounterToUse = index.getEncodedCQCounter(); + int baseCount = 0; + baseCount = (cqCounterToUse != null && cqCounterToUse.getNextQualifier(familyName) != null) + ? cqCounterToUse.getNextQualifier(familyName) + : 0; + Integer encodedCQ = index.isAppendOnlySchema() + ? Integer.valueOf(ENCODED_CQ_COUNTER_INITIAL_VALUE + iPos) + : baseCount + iPos; + byte[] columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes( + indexColDef.getColumnDefName().getColumnName(), encodedCQ, index, indexColDef.isPK()); + PColumn iColumn = newColumn(iPos, indexColDef, null, + index.getDefaultFamilyName() == null ? null : index.getDefaultFamilyName().getString(), + false, columnQualifierBytes, willBeImmutableRows); + indexColumn.put(index, iColumn); + indexToColumnSizeMap.put(index, iPos + 1); + } + return indexColumn; + } + + private void deleteMutexCells(String physicalSchemaName, String physicalTableName, + Set acquiredColumnMutexSet) throws SQLException { + if (!acquiredColumnMutexSet.isEmpty()) { + for (String columnName : acquiredColumnMutexSet) { + // release the mutex (used to prevent concurrent conflicting add column changes) + deleteCell(null, physicalSchemaName, physicalTableName, columnName); + } + } + } + + private String dropColumnMutations(PTable table, List columnsToDrop) + throws SQLException { + String tenantId = connection.getTenantId() == null ? "" : connection.getTenantId().getString(); + String schemaName = table.getSchemaName().getString(); + String tableName = table.getTableName().getString(); + String familyName = null; + /* + * Generate a fully qualified RVC with an IN clause, since that's what our optimizer can handle + * currently. If/when the optimizer handles (A and ((B AND C) OR (D AND E))) we can factor out + * the tenant ID, schema name, and table name columns + */ + StringBuilder buf = new StringBuilder( + "DELETE FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\" WHERE "); + buf.append("(" + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + ", " + + COLUMN_FAMILY + ") IN ("); + for (PColumn columnToDrop : columnsToDrop) { + buf.append("('" + tenantId + "'"); + buf.append(",'" + schemaName + "'"); + buf.append(",'" + tableName + "'"); + buf.append(",'" + columnToDrop.getName().getString() + "'"); + buf.append(",'" + + (columnToDrop.getFamilyName() == null ? "" : columnToDrop.getFamilyName().getString()) + + "'),"); + } + buf.setCharAt(buf.length() - 1, ')'); - if (metaProperties.getTTL() != null) { - if (table.getType() == PTableType.INDEX) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX) - .build() - .buildException(); + try (PreparedStatement delCol = connection.prepareStatement(buf.toString())) { + delCol.execute(); + } + Collections.sort(columnsToDrop, new Comparator() { + @Override + public int compare(PColumn left, PColumn right) { + return Ints.compare(left.getPosition(), right.getPosition()); + } + }); + + boolean isSalted = table.getBucketNum() != null; + int columnsToDropIndex = 0; + try (PreparedStatement colUpdate = connection.prepareStatement(UPDATE_COLUMN_POSITION)) { + colUpdate.setString(1, tenantId); + colUpdate.setString(2, schemaName); + colUpdate.setString(3, tableName); + for (int i = columnsToDrop.get(columnsToDropIndex).getPosition() + 1; i + < table.getColumns().size(); i++) { + PColumn column = table.getColumns().get(i); + if (columnsToDrop.contains(column)) { + columnsToDropIndex++; + continue; + } + colUpdate.setString(4, column.getName().getString()); + colUpdate.setString(5, + column.getFamilyName() == null ? null : column.getFamilyName().getString()); + // Adjust position to not include the salt column + colUpdate.setInt(6, column.getPosition() - columnsToDropIndex - (isSalted ? 1 : 0)); + colUpdate.execute(); + } + } + return familyName; + } + + /** + * Calculate what the new column family will be after the column is dropped, returning null if + * unchanged. + * @param table table containing column to drop + * @param columnToDrop column being dropped + * @return the new column family or null if unchanged. + */ + private static byte[] getNewEmptyColumnFamilyOrNull(PTable table, PColumn columnToDrop) { + if ( + table.getType() != PTableType.VIEW && !SchemaUtil.isPKColumn(columnToDrop) + && table.getColumnFamilies().get(0).getName().equals(columnToDrop.getFamilyName()) + && table.getColumnFamilies().get(0).getColumns().size() == 1 + ) { + return SchemaUtil.getEmptyColumnFamily(table.getDefaultFamilyName(), + table.getColumnFamilies().subList(1, table.getColumnFamilies().size()), + table.getIndexType() == IndexType.LOCAL); + } + // If unchanged, return null + return null; + } + + private PTable getParentTable(PTable table) throws SQLException { + PTable parentTable = null; + boolean hasIndexId = table.getViewIndexId() != null; + if ( + (table.getType() == PTableType.INDEX && hasIndexId) + || (table.getType() == PTableType.VIEW && table.getViewType() != ViewType.MAPPED) + ) { + parentTable = connection.getTable(table.getParentName().getString()); + if (parentTable == null) { + String schemaName = + table.getSchemaName() != null ? table.getSchemaName().getString() : null; + throw new TableNotFoundException(schemaName, table.getTableName().getString()); + } + // only inherit columns view indexes (and not local indexes + // on regular tables which also have a viewIndexId) + if (hasIndexId && parentTable.getType() != PTableType.VIEW) { + return null; + } + } + return parentTable; + } + + public MutationState dropColumn(DropColumnStatement statement) throws SQLException { + connection.rollback(); + boolean wasAutoCommit = connection.getAutoCommit(); + Set acquiredColumnMutexSet = Sets.newHashSetWithExpectedSize(3); + String physicalSchemaName = null; + String physicalTableName = null; + try { + connection.setAutoCommit(false); + PName tenantId = connection.getTenantId(); + TableName tableNameNode = statement.getTable().getName(); + String schemaName = tableNameNode.getSchemaName(); + String tableName = tableNameNode.getTableName(); + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + boolean retried = false; + while (true) { + final ColumnResolver resolver = FromCompiler.getResolver(statement, connection); + TableRef tableRef = resolver.getTables().get(0); + PTable table = tableRef.getTable(); + PName physicalName = table.getPhysicalName(); + physicalSchemaName = SchemaUtil.getSchemaNameFromFullName(physicalName.getString()); + physicalTableName = SchemaUtil.getTableNameFromFullName(physicalName.getString()); + + List columnRefs = statement.getColumnRefs(); + if (columnRefs == null) { + columnRefs = Lists.newArrayListWithCapacity(0); + } + List columnsToDrop = + Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size()); + List indexesToDrop = + Lists.newArrayListWithExpectedSize(table.getIndexes().size()); + List tableMetaData = Lists.newArrayListWithExpectedSize( + (table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size())); + List tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size()); + + for (ColumnName column : columnRefs) { + ColumnRef columnRef = null; + try { + columnRef = + resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName()); + } catch (ColumnNotFoundException e) { + if (statement.ifExists()) { + return new MutationState(0, 0, connection); } + throw e; + } + PColumn columnToDrop = columnRef.getColumn(); + tableColumnsToDrop.add(columnToDrop); + if (SchemaUtil.isPKColumn(columnToDrop)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK) + .setColumnName(columnToDrop.getName().getString()).build().buildException(); + } else if (table.isAppendOnlySchema()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA) + .setColumnName(columnToDrop.getName().getString()).build().buildException(); + } else if (columnToDrop.isViewReferenced()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_VIEW_REFERENCED_COL) + .setColumnName(columnToDrop.getName().getString()).build().buildException(); + } + columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition())); + // check if client is already holding a mutex from previous retry + if (!acquiredColumnMutexSet.contains(columnToDrop.toString())) { + boolean acquiredMutex = + writeCell(null, physicalSchemaName, physicalTableName, columnToDrop.toString()); + if (!acquiredMutex) { + throw new ConcurrentTableMutationException(physicalSchemaName, physicalTableName); + } + acquiredColumnMutexSet.add(columnToDrop.toString()); + } + } + + dropColumnMutations(table, tableColumnsToDrop); + boolean removedIndexTableOrColumn = false; + Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null; + for (PTable index : table.getIndexes()) { + IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection); + // get the covered columns + List indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size()); + Set> indexedColsInfo = indexMaintainer.getIndexedColumnInfo(); + Set coveredCols = indexMaintainer.getCoveredColumns(); + for (PColumn columnToDrop : tableColumnsToDrop) { + Pair columnToDropInfo = new Pair<>( + columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString()); + ColumnReference colDropRef = new ColumnReference( + columnToDrop.getFamilyName() == null ? null : columnToDrop.getFamilyName().getBytes(), + columnToDrop.getColumnQualifierBytes()); + boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo); + if (isColumnIndexed) { + if (index.getViewIndexId() == null) { + indexesToDrop.add(new TableRef(index)); + } + connection.removeTable(tenantId, + SchemaUtil.getTableName(schemaName, index.getName().getString()), + index.getParentName() == null ? null : index.getParentName().getString(), + index.getTimeStamp()); + removedIndexTableOrColumn = true; + } else if (coveredCols.contains(colDropRef)) { + String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop); + PColumn indexColumn = index.getColumnForColumnName(indexColumnName); + indexColumnsToDrop.add(indexColumn); + // add the index column to be dropped so that we actually delete the column values + columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition())); + removedIndexTableOrColumn = true; + } + } + if (!indexColumnsToDrop.isEmpty()) { + long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), + -indexColumnsToDrop.size(), null, null, null, null, null); + dropColumnMutations(index, indexColumnsToDrop); + long clientTimestamp = MutationState.getTableTimestamp(timeStamp, connection.getSCN()); + connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, + clientTimestamp, indexTableSeqNum, TransactionUtil.getResolvedTimestamp(connection, + index.isTransactional(), clientTimestamp)); + } + } + tableMetaData + .addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); + connection.rollback(); - if (!isViewTTLEnabled() && table.getType() == PTableType.VIEW) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.VIEW_TTL_NOT_ENABLED) - .build() - .buildException(); - } + long seqNum = incrementTableSeqNum(table, statement.getTableType(), + -tableColumnsToDrop.size(), null, null, null, null, null); + tableMetaData + .addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); + connection.rollback(); + // Force table header to be first in list + Collections.reverse(tableMetaData); - if (table.getType() != PTableType.TABLE && (table.getType() != PTableType.VIEW || - table.getViewType() != UPDATABLE)) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TTL_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY) - .build() - .buildException(); - } - if (metaProperties.getTTL() != table.getTTL()) { - metaPropertiesEvaluated.setTTL(metaProperties.getTTL()); - changingPhoenixTableProperty = true; + /* + * Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if + * we drop the last column in a column family that was the empty column family. In that + * case, we have to pick another one. If there are no other ones, then we need to create our + * default empty column family. Note that this may no longer be necessary once we support + * declaring what the empty column family is on a table, as: - If you declare it, we'd just + * ensure it's created at DDL time and never switch what it is unless you change it - If you + * don't declare it, we can just continue to use the old empty column family in this case, + * dynamically updating the empty column family name on the PTable. + */ + for (ColumnRef columnRefToDrop : columnsToDrop) { + PTable tableContainingColumnToDrop = columnRefToDrop.getTable(); + byte[] emptyCF = + getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn()); + if (emptyCF != null) { + try { + tableContainingColumnToDrop.getColumnFamily(emptyCF); + } catch (ColumnFamilyNotFoundException e) { + // Only if it's not already a column family do we need to ensure it's created + Map>> family = new HashMap<>(1); + family.put(Bytes.toString(emptyCF), Collections.> emptyList()); + // Just use a Put without any key values as the Mutation, as addColumn will treat this + // specially + // TODO: pass through schema name and table name instead to these methods as it's + // cleaner + byte[] tenantIdBytes = + connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); + if (tenantIdBytes == null) tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY; + connection.getQueryServices().addColumn( + Collections. singletonList(new Put(SchemaUtil.getTableKey(tenantIdBytes, + tableContainingColumnToDrop.getSchemaName().getBytes(), + tableContainingColumnToDrop.getTableName().getBytes()))), + tableContainingColumnToDrop, null, null, family, + Sets.newHashSet(Bytes.toString(emptyCF)), Collections. emptyList()); + + } + } + } + + MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, + statement.getTableType(), getParentTable(table)); + try { + MutationCode code = processMutationResult(schemaName, tableName, result); + if (code == MutationCode.COLUMN_NOT_FOUND) { + addTableToCache(result, false); + if (!statement.ifExists()) { + throw new ColumnNotFoundException(schemaName, tableName, + Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName())); } - //Updating Introducing TTL variable to true so that we will check if TTL is already - //defined in hierarchy or not. - areWeIntroducingTTLAtThisLevel.setTrue(); - } + return new MutationState(0, 0, connection); + } + // If we've done any index metadata updates, don't bother trying to update + // client-side cache as it would be too painful. Just let it pull it over from + // the server when needed. + if (tableColumnsToDrop.size() > 0) { + // need to remove the cached table because the DDL timestamp changed. We + // also need to remove it if we dropped an indexed column + connection.removeTable(tenantId, tableName, + table.getParentName() == null ? null : table.getParentName().getString(), + table.getTimeStamp()); + } + // If we have a VIEW, then only delete the metadata, and leave the table data alone + if (table.getType() != PTableType.VIEW) { + MutationState state = null; + connection.setAutoCommit(true); + Long scn = connection.getSCN(); + // Delete everything in the column. You'll still be able to do queries at earlier + // timestamps + long ts = (scn == null ? result.getMutationTime() : scn); + PostDDLCompiler compiler = new PostDDLCompiler(connection); - if (metaProperties.isChangeDetectionEnabled() != null) { - verifyChangeDetectionTableType(table.getType(), - metaProperties.isChangeDetectionEnabled()); - if (!metaProperties.isChangeDetectionEnabled().equals(table.isChangeDetectionEnabled())) { - metaPropertiesEvaluated.setChangeDetectionEnabled(metaProperties.isChangeDetectionEnabled()); - changingPhoenixTableProperty = true; - } - } + boolean dropMetaData = connection.getQueryServices().getProps() + .getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA); + // if the index is a local index or view index it uses a shared physical table + // so we need to issue deletes markers for all the rows of the index + final List tableRefsToDrop = Lists.newArrayList(); + Map> tenantIdTableRefMap = Maps.newHashMap(); + if (result.getSharedTablesToDelete() != null) { + for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) { + ImmutableStorageScheme storageScheme = table.getImmutableStorageScheme(); + QualifierEncodingScheme qualifierEncodingScheme = table.getEncodingScheme(); + List columns = sharedTableState.getColumns(); + if (table.getBucketNum() != null) { + columns = columns.subList(1, columns.size()); + } + + PTableImpl viewIndexTable = + new PTableImpl.Builder().setPkColumns(Collections. emptyList()) + .setAllColumns(Collections. emptyList()) + .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) + .setIndexes(Collections. emptyList()) + .setFamilyAttributes(table.getColumnFamilies()).setType(PTableType.INDEX) + .setTimeStamp(ts).setMultiTenant(table.isMultiTenant()) + .setViewIndexIdType(sharedTableState.getViewIndexIdType()) + .setViewIndexId(sharedTableState.getViewIndexId()) + .setNamespaceMapped(table.isNamespaceMapped()).setAppendOnlySchema(false) + .setImmutableStorageScheme(storageScheme == null + ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN + : storageScheme) + .setQualifierEncodingScheme(qualifierEncodingScheme == null + ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + : qualifierEncodingScheme) + .setEncodedCQCounter(table.getEncodedCQCounter()) + .setUseStatsForParallelization(table.useStatsForParallelization()) + .setExcludedColumns(ImmutableList. of()) + .setTenantId(sharedTableState.getTenantId()) + .setSchemaName(sharedTableState.getSchemaName()) + .setTableName(sharedTableState.getTableName()).setRowKeyOrderOptimizable(false) + .setBucketNum(table.getBucketNum()).setIndexes(Collections. emptyList()) + .setPhysicalNames(sharedTableState.getPhysicalNames() == null + ? ImmutableList. of() + : ImmutableList.copyOf(sharedTableState.getPhysicalNames())) + .setColumns(columns).build(); + TableRef indexTableRef = new TableRef(viewIndexTable); + PName indexTableTenantId = sharedTableState.getTenantId(); + if (indexTableTenantId == null) { + tableRefsToDrop.add(indexTableRef); + } else { + if (!tenantIdTableRefMap.containsKey(indexTableTenantId.getString())) { + tenantIdTableRefMap.put(indexTableTenantId.getString(), + Lists. newArrayList()); + } + tenantIdTableRefMap.get(indexTableTenantId.getString()).add(indexTableRef); + } + + } + } + // if dropMetaData is false delete all rows for the indexes (if it was true + // they would have been dropped in ConnectionQueryServices.dropColumn) + if (!dropMetaData) { + tableRefsToDrop.addAll(indexesToDrop); + } + // Drop any index tables that had the dropped column in the PK + state = connection.getQueryServices().updateData( + compiler.compile(tableRefsToDrop, null, null, Collections. emptyList(), ts)); + + // Drop any tenant-specific indexes + if (!tenantIdTableRefMap.isEmpty()) { + for (Entry> entry : tenantIdTableRefMap.entrySet()) { + String indexTenantId = entry.getKey(); + Properties props = new Properties(connection.getClientInfo()); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId); + try (PhoenixConnection tenantConn = + new PhoenixConnection(connection, connection.getQueryServices(), props)) { + PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn); + state = tenantConn.getQueryServices().updateData(dropCompiler + .compile(entry.getValue(), null, null, Collections. emptyList(), ts)); + } + } + } + + // TODO For immutable tables, if the storage scheme is not ONE_CELL_PER_COLUMN we will + // remove the column values at compaction time + // See https://issues.apache.org/jira/browse/PHOENIX-3605 + if ( + !table.isImmutableRows() + || table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN + ) { + // Update empty key value column if necessary + for (ColumnRef droppedColumnRef : columnsToDrop) { + // Painful, but we need a TableRef with a pre-set timestamp to prevent attempts + // to get any updates from the region server. + // TODO: move this into PostDDLCompiler + // TODO: consider filtering mutable indexes here, but then the issue is that + // we'd need to force an update of the data row empty key value if a mutable + // secondary index is changing its empty key value family. + droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts); + TableRef droppedColumnTableRef = droppedColumnRef.getTableRef(); + PColumn droppedColumn = droppedColumnRef.getColumn(); + MutationPlan plan = + compiler.compile(Collections.singletonList(droppedColumnTableRef), + getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), + null, Collections.singletonList(droppedColumn), ts); + state = connection.getQueryServices().updateData(plan); + } + } + // Return the last MutationState + return state; + } + return new MutationState(0, 0, connection); + } catch (ConcurrentTableMutationException e) { + if (retried) { + throw e; + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + + SchemaUtil.getTableName(e.getSchemaName(), e.getTableName()) + + ". Will update cache and try again...", connection)); + } + updateCache(connection.getTenantId(), e.getSchemaName(), e.getTableName(), true); + retried = true; + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableName, + NUM_METADATA_LOOKUP_FAILURES, 1); + throw e; + } + } + } finally { + connection.setAutoCommit(wasAutoCommit); + deleteMutexCells(physicalSchemaName, physicalTableName, acquiredColumnMutexSet); + } + } + + public MutationState alterIndex(AlterIndexStatement statement) throws SQLException { + connection.rollback(); + boolean wasAutoCommit = connection.getAutoCommit(); + String dataTableName; + long seqNum = 0L; + try { + dataTableName = statement.getTableName(); + final String indexName = statement.getTable().getName().getTableName(); + boolean isAsync = statement.isAsync(); + boolean isRebuildAll = statement.isRebuildAll(); + String tenantId = + connection.getTenantId() == null ? null : connection.getTenantId().getString(); + PTable table = + FromCompiler.getIndexResolver(statement, connection).getTables().get(0).getTable(); + + String schemaName = statement.getTable().getName().getSchemaName(); + String tableName = table.getTableName().getString(); + + Map>> properties = + new HashMap<>(statement.getProps().size()); + ; + MetaProperties metaProperties = + loadStmtProperties(statement.getProps(), properties, table, false); + + boolean isTransformNeeded = TransformClient.checkIsTransformNeeded(metaProperties, schemaName, + table, indexName, dataTableName, tenantId, connection); + MetaPropertiesEvaluated metaPropertiesEvaluated = new MetaPropertiesEvaluated(); + boolean changingPhoenixTableProperty = evaluateStmtProperties(metaProperties, + metaPropertiesEvaluated, table, schemaName, tableName, new MutableBoolean(false)); + + PIndexState newIndexState = statement.getIndexState(); + + if (isAsync && newIndexState != PIndexState.REBUILD) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.ASYNC_NOT_ALLOWED) + .setMessage(" ASYNC building of index is allowed only with REBUILD index state") + .setSchemaName(schemaName).setTableName(indexName).build().buildException(); + } + + if (newIndexState == PIndexState.REBUILD) { + newIndexState = PIndexState.BUILDING; + } + connection.setAutoCommit(false); + // Confirm index table is valid and up-to-date + TableRef indexRef = FromCompiler.getResolver(statement, connection).getTables().get(0); + PreparedStatement tableUpsert = null; + try { + if (newIndexState == PIndexState.ACTIVE) { + tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE_TO_ACTIVE); + } else { + tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE); + } + tableUpsert.setString(1, + connection.getTenantId() == null ? null : connection.getTenantId().getString()); + tableUpsert.setString(2, schemaName); + tableUpsert.setString(3, indexName); + tableUpsert.setString(4, newIndexState.getSerializedValue()); + tableUpsert.setLong(5, 0); + if (newIndexState == PIndexState.ACTIVE) { + tableUpsert.setLong(6, 0); + } + tableUpsert.execute(); + } finally { + if (tableUpsert != null) { + tableUpsert.close(); + } + } + Long timeStamp = indexRef.getTable().isTransactional() ? indexRef.getTimeStamp() : null; + List tableMetadata = + connection.getMutationState().toMutations(timeStamp).next().getSecond(); + connection.rollback(); + + if (changingPhoenixTableProperty) { + seqNum = incrementTableSeqNum(table, statement.getTableType(), 0, metaPropertiesEvaluated); + tableMetadata + .addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); + connection.rollback(); + } - if (!Strings.isNullOrEmpty(metaProperties.getPhysicalTableNameProp())) { - if (!metaProperties.getPhysicalTableNameProp().equals(table.getPhysicalName(true))) { - metaPropertiesEvaluated.setPhysicalTableName(metaProperties.getPhysicalTableNameProp()); - changingPhoenixTableProperty = true; - } - } + MetaDataMutationResult result = connection.getQueryServices().updateIndexState(tableMetadata, + dataTableName, properties, table); - if (!Strings.isNullOrEmpty(metaProperties.getSchemaVersion())) { - if (!metaProperties.getSchemaVersion().equals(table.getSchemaVersion())) { - metaPropertiesEvaluated.setSchemaVersion(metaProperties.getSchemaVersion()); - changingPhoenixTableProperty = true; - } + try { + MutationCode code = result.getMutationCode(); + if (code == MutationCode.TABLE_NOT_FOUND) { + throw new TableNotFoundException(schemaName, indexName); + } + if (code == MutationCode.UNALLOWED_TABLE_MUTATION) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION) + .setMessage(" currentState=" + indexRef.getTable().getIndexState() + ". requestedState=" + + newIndexState) + .setSchemaName(schemaName).setTableName(indexName).build().buildException(); } - if (!Strings.isNullOrEmpty(metaProperties.getStreamingTopicName())) { - if (!metaProperties.getStreamingTopicName().equals(table.getStreamingTopicName())) { - metaPropertiesEvaluated. - setStreamingTopicName(metaProperties.getStreamingTopicName()); - changingPhoenixTableProperty = true; - } + if (isTransformNeeded) { + if (indexRef.getTable().getViewIndexId() != null) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_TRANSFORM_LOCAL_OR_VIEW_INDEX).setSchemaName(schemaName) + .setTableName(indexName).build().buildException(); + } + try { + TransformClient.addTransform(connection, tenantId, table, metaProperties, seqNum, + PTable.TransformType.METADATA_TRANSFORM); + } catch (SQLException ex) { + connection.rollback(); + throw ex; + } } - if (metaProperties.getMaxLookbackAge() != null) { - if (table.getType() != TABLE) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode. - MAX_LOOKBACK_AGE_SUPPORTED_FOR_TABLES_ONLY) - .setSchemaName(schemaName) - .setTableName(tableName) - .build() + if (code == MutationCode.TABLE_ALREADY_EXISTS) { + if (result.getTable() != null) { // To accommodate connection-less update of index state + addTableToCache(result, false); + // Set so that we get the table below with the potentially modified + // rowKeyOrderOptimizable flag set + indexRef.setTable(result.getTable()); + if (newIndexState == PIndexState.BUILDING && isAsync) { + if (isRebuildAll) { + List tasks = Task.queryTaskTable(connection, null, schemaName, + tableName, PTable.TaskType.INDEX_REBUILD, tenantId, indexName); + if (tasks == null || tasks.size() == 0) { + Timestamp ts = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); + Map props = new HashMap() { + { + put(INDEX_NAME, indexName); + put(REBUILD_ALL, true); + } + }; + try { + String json = JacksonUtil.getObjectWriter().writeValueAsString(props); + List sysTaskUpsertMutations = + Task.getMutationsForAddTask(new SystemTaskParams.SystemTaskParamsBuilder() + .setConn(connection).setTaskType(PTable.TaskType.INDEX_REBUILD) + .setTenantId(tenantId).setSchemaName(schemaName).setTableName(dataTableName) + .setTaskStatus(PTable.TaskStatus.CREATED.toString()).setData(json) + .setPriority(null).setStartTs(ts).setEndTs(null).setAccessCheckEnabled(true) + .build()); + byte[] rowKey = sysTaskUpsertMutations.get(0).getRow(); + MetaDataMutationResult metaDataMutationResult = + Task.taskMetaDataCoprocessorExec(connection, rowKey, + new TaskMetaDataServiceCallBack(sysTaskUpsertMutations)); + if ( + MutationCode.UNABLE_TO_UPSERT_TASK + .equals(metaDataMutationResult.getMutationCode()) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_UPSERT_TASK) + .setSchemaName(SYSTEM_SCHEMA_NAME).setTableName(SYSTEM_TASK_TABLE).build() .buildException(); - } - if (! Objects.equals(metaProperties.getMaxLookbackAge(), table.getMaxLookbackAge())) { - metaPropertiesEvaluated.setMaxLookbackAge(metaProperties.getMaxLookbackAge()); - changingPhoenixTableProperty = true; - } + } + } catch (IOException e) { + throw new SQLException( + "Exception happened while adding a System.Task" + e.toString()); + } + } + } else { + try { + tableUpsert = connection.prepareStatement(UPDATE_INDEX_REBUILD_ASYNC_STATE); + tableUpsert.setString(1, + connection.getTenantId() == null ? null : connection.getTenantId().getString()); + tableUpsert.setString(2, schemaName); + tableUpsert.setString(3, indexName); + long beginTimestamp = result.getTable().getTimeStamp(); + tableUpsert.setLong(4, beginTimestamp); + tableUpsert.execute(); + connection.commit(); + } finally { + if (tableUpsert != null) { + tableUpsert.close(); + } + } + } + } + } + } + if (newIndexState == PIndexState.BUILDING && !isAsync) { + PTable index = indexRef.getTable(); + // First delete any existing rows of the index + if (IndexUtil.isGlobalIndex(index) && index.getViewIndexId() == null) { + // for a global index of a normal base table, it's safe to just truncate and + // rebuild. We preserve splits to reduce the amount of splitting we need to do + // during rebuild + org.apache.hadoop.hbase.TableName physicalTableName = + org.apache.hadoop.hbase.TableName.valueOf(index.getPhysicalName().getBytes()); + try (Admin admin = connection.getQueryServices().getAdmin()) { + admin.disableTable(physicalTableName); + admin.truncateTable(physicalTableName, true); + // trunateTable automatically re-enables when it's done + } catch (IOException ie) { + String failedTable = physicalTableName.getNameAsString(); + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNKNOWN_ERROR_CODE) + .setMessage("Error when truncating index table [" + failedTable + + "] before rebuilding: " + ie.getMessage()) + .setTableName(failedTable).build().buildException(); + } + } else { + Long scn = connection.getSCN(); + long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + MutationPlan plan = + new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, + null, Collections. emptyList(), ts); + connection.getQueryServices().updateData(plan); + } + NamedTableNode dataTableNode = NamedTableNode.create(null, + TableName.create(schemaName, dataTableName), Collections. emptyList()); + // Next rebuild the index + connection.setAutoCommit(true); + if (connection.getSCN() != null) { + return buildIndexAtTimeStamp(index, dataTableNode); + } + TableRef dataTableRef = + FromCompiler.getResolver(dataTableNode, connection).getTables().get(0); + return buildIndex(index, dataTableRef); } - return changingPhoenixTableProperty; + return new MutationState(1, 1000, connection); + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(dataTableName, + NUM_METADATA_LOOKUP_FAILURES, 1); + throw e; + } + } catch (TableNotFoundException e) { + if (!statement.ifExists()) { + throw e; + } + return new MutationState(0, 0, connection); + } finally { + connection.setAutoCommit(wasAutoCommit); } + } + + private void addTableToCache(MetaDataMutationResult result, boolean alwaysHitServerForAncestors) + throws SQLException { + addTableToCache(result, alwaysHitServerForAncestors, + TransactionUtil.getResolvedTime(connection, result)); + } + + private void addTableToCache(MetaDataMutationResult result, boolean alwaysHitServerForAncestors, + long timestamp) throws SQLException { + addColumnsIndexesAndLastDDLTimestampsFromAncestors(result, null, false, + alwaysHitServerForAncestors); + updateIndexesWithAncestorMap(result); + connection.addTable(result.getTable(), timestamp); + } + + private void addFunctionToCache(MetaDataMutationResult result) throws SQLException { + for (PFunction function : result.getFunctions()) { + connection.addFunction(function); + } + } + + private void addSchemaToCache(MetaDataMutationResult result) throws SQLException { + connection.addSchema(result.getSchema()); + } + + private void throwIfLastPKOfParentIsVariableLength(PTable parent, String viewSchemaName, + String viewName, ColumnDef col) throws SQLException { + // if the last pk column is variable length then we read all the + // bytes of the rowkey without looking for a separator byte see + // https://issues.apache.org/jira/browse/PHOENIX-978?focusedCommentId=14617847&page=com.atlassian.jira.plugin.system.issuetabpanels%3Acomment-tabpanel#comment-14617847 + // so we cannot add a pk column to a view if the last pk column of the parent is variable length + if (isLastPKVariableLength(parent)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MODIFY_VIEW_PK) + .setSchemaName(viewSchemaName).setTableName(viewName) + .setColumnName(col.getColumnDefName().getColumnName()).build().buildException(); + } + } + + private boolean isLastPKVariableLength(PTable table) { + List pkColumns = table.getPKColumns(); + return !pkColumns.get(pkColumns.size() - 1).getDataType().isFixedWidth(); + } + + private PTable getParentOfView(PTable view) throws SQLException { + return connection.getTable(new PTableKey(view.getTenantId(), view.getParentName().getString())); + } + + public MutationState createSchema(CreateSchemaStatement create) throws SQLException { + boolean wasAutoCommit = connection.getAutoCommit(); + connection.rollback(); + try { + if (!SchemaUtil.isNamespaceMappingEnabled(null, connection.getQueryServices().getProps())) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CREATE_SCHEMA_NOT_ALLOWED) + .setSchemaName(create.getSchemaName()).build().buildException(); + } + boolean isIfNotExists = create.isIfNotExists(); + PSchema schema = new PSchema(create.getSchemaName()); + // Use SchemaName from PSchema object to get the normalized SchemaName + // See PHOENIX-4424 for details + validateSchema(schema.getSchemaName()); + connection.setAutoCommit(false); + List schemaMutations; + + try (PreparedStatement schemaUpsert = connection.prepareStatement(CREATE_SCHEMA)) { + schemaUpsert.setString(1, schema.getSchemaName()); + schemaUpsert.setString(2, MetaDataClient.EMPTY_TABLE); + schemaUpsert.execute(); + schemaMutations = connection.getMutationState().toMutations(null).next().getSecond(); + connection.rollback(); + } + MetaDataMutationResult result = + connection.getQueryServices().createSchema(schemaMutations, schema.getSchemaName()); + MutationCode code = result.getMutationCode(); + try { + switch (code) { + case SCHEMA_ALREADY_EXISTS: + if (result.getSchema() != null) { + addSchemaToCache(result); + } + if (!isIfNotExists) { + throw new SchemaAlreadyExistsException(schema.getSchemaName()); + } + break; + case NEWER_SCHEMA_FOUND: + throw new NewerSchemaAlreadyExistsException(schema.getSchemaName()); + default: + result = new MetaDataMutationResult(code, schema, result.getMutationTime()); + addSchemaToCache(result); + } + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + NUM_METADATA_LOOKUP_FAILURES, 1); + throw e; + } + } finally { + connection.setAutoCommit(wasAutoCommit); + } + return new MutationState(0, 0, connection); + } - public static class MetaProperties { - private Boolean isImmutableRowsProp = null; - private Boolean multiTenantProp = null; - private Boolean disableWALProp = null; - private Boolean storeNullsProp = null; - private TransactionFactory.Provider transactionProviderProp = null; - private Boolean isTransactionalProp = null; - private Long updateCacheFrequencyProp = null; - private String physicalTableNameProp = null; - private QualifierEncodingScheme columnEncodedBytesProp = null; - private Boolean appendOnlySchemaProp = null; - private Long guidePostWidth = -1L; - private ImmutableStorageScheme immutableStorageSchemeProp = null; - private Boolean useStatsForParallelizationProp = null; - private boolean nonTxToTx = false; - private Integer ttl = null; - private Boolean isChangeDetectionEnabled = null; - private String physicalTableName = null; - private String schemaVersion = null; - private String streamingTopicName = null; + private void validateSchema(String schemaName) throws SQLException { + if (SchemaUtil.NOT_ALLOWED_SCHEMA_LIST.contains(schemaName)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.SCHEMA_NOT_ALLOWED) + .setSchemaName(schemaName).build().buildException(); + } + } + + public MutationState dropSchema(DropSchemaStatement executableDropSchemaStatement) + throws SQLException { + connection.rollback(); + boolean wasAutoCommit = connection.getAutoCommit(); + try { + PSchema schema = new PSchema(executableDropSchemaStatement.getSchemaName()); + String schemaName = schema.getSchemaName(); + boolean ifExists = executableDropSchemaStatement.ifExists(); + byte[] key = SchemaUtil.getSchemaKey(schemaName); + + Long scn = connection.getSCN(); + long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + List schemaMetaData = Lists.newArrayListWithExpectedSize(2); + Delete schemaDelete = new Delete(key, clientTimeStamp); + schemaMetaData.add(schemaDelete); + MetaDataMutationResult result = + connection.getQueryServices().dropSchema(schemaMetaData, schemaName); + MutationCode code = result.getMutationCode(); + schema = result.getSchema(); + try { + switch (code) { + case SCHEMA_NOT_FOUND: + if (!ifExists) { + throw new SchemaNotFoundException(schemaName); + } + break; + case NEWER_SCHEMA_FOUND: + throw new NewerSchemaAlreadyExistsException(schemaName); + case TABLES_EXIST_ON_SCHEMA: + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_SCHEMA) + .setSchemaName(schemaName).build().buildException(); + default: + connection.removeSchema(schema, result.getMutationTime()); + break; + } + } catch (Throwable e) { + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(null, + NUM_METADATA_LOOKUP_FAILURES, 1); + throw e; + } + return new MutationState(0, 0, connection); + } finally { + connection.setAutoCommit(wasAutoCommit); + } + } + + public MutationState useSchema(UseSchemaStatement useSchemaStatement) throws SQLException { + // As we allow default namespace mapped to empty schema, so this is to reset schema in + // connection + if (useSchemaStatement.getSchemaName().equals(StringUtil.EMPTY_STRING)) { + connection.setSchema(null); + } else { + FromCompiler.getResolverForSchema(useSchemaStatement, connection) + .resolveSchema(useSchemaStatement.getSchemaName()); + connection.setSchema(useSchemaStatement.getSchemaName()); + } + return new MutationState(0, 0, connection); + } + + private MetaProperties loadStmtProperties( + ListMultimap> stmtProperties, + Map>> properties, PTable table, boolean removeTableProps) + throws SQLException { + MetaProperties metaProperties = new MetaProperties(); + for (String family : stmtProperties.keySet()) { + List> origPropsList = stmtProperties.get(family); + List> propsList = + Lists.newArrayListWithExpectedSize(origPropsList.size()); + for (Pair prop : origPropsList) { + String propName = prop.getFirst(); + if (TableProperty.isPhoenixTableProperty(propName)) { + TableProperty tableProp = TableProperty.valueOf(propName); + tableProp.validate(true, !family.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY), + table.getType()); + Object value = tableProp.getValue(prop.getSecond()); + if (propName.equals(PTable.IS_IMMUTABLE_ROWS_PROP_NAME)) { + metaProperties.setImmutableRowsProp((Boolean) value); + } else if (propName.equals(PhoenixDatabaseMetaData.MULTI_TENANT)) { + metaProperties.setMultiTenantProp((Boolean) value); + } else if (propName.equals(DISABLE_WAL)) { + metaProperties.setDisableWALProp((Boolean) value); + } else if (propName.equals(STORE_NULLS)) { + metaProperties.setStoreNullsProp((Boolean) value); + } else if (propName.equals(TRANSACTIONAL)) { + metaProperties.setIsTransactionalProp((Boolean) value); + } else if (propName.equals(TRANSACTION_PROVIDER)) { + metaProperties.setTransactionProviderProp((TransactionFactory.Provider) value); + } else if (propName.equals(UPDATE_CACHE_FREQUENCY)) { + metaProperties.setUpdateCacheFrequencyProp((Long) value); + } else if (propName.equals(PHYSICAL_TABLE_NAME)) { + metaProperties.setPhysicalTableNameProp((String) value); + } else if (propName.equals(GUIDE_POSTS_WIDTH)) { + metaProperties.setGuidePostWidth((Long) value); + } else if (propName.equals(APPEND_ONLY_SCHEMA)) { + metaProperties.setAppendOnlySchemaProp((Boolean) value); + } else if (propName.equalsIgnoreCase(IMMUTABLE_STORAGE_SCHEME)) { + metaProperties.setImmutableStorageSchemeProp((ImmutableStorageScheme) value); + } else if (propName.equalsIgnoreCase(COLUMN_ENCODED_BYTES)) { + metaProperties + .setColumnEncodedBytesProp(QualifierEncodingScheme.fromSerializedValue((byte) value)); + } else if (propName.equalsIgnoreCase(USE_STATS_FOR_PARALLELIZATION)) { + metaProperties.setUseStatsForParallelizationProp((Boolean) value); + } else if (propName.equalsIgnoreCase(TTL)) { + metaProperties.setTTL((Integer) value); + } else if (propName.equalsIgnoreCase(CHANGE_DETECTION_ENABLED)) { + metaProperties.setChangeDetectionEnabled((Boolean) value); + } else if (propName.equalsIgnoreCase(PHYSICAL_TABLE_NAME)) { + metaProperties.setPhysicalTableName((String) value); + } else if (propName.equalsIgnoreCase(SCHEMA_VERSION)) { + metaProperties.setSchemaVersion((String) value); + } else if (propName.equalsIgnoreCase(STREAMING_TOPIC_NAME)) { + metaProperties.setStreamingTopicName((String) value); + } else if (propName.equalsIgnoreCase(MAX_LOOKBACK_AGE)) { + metaProperties.setMaxLookbackAge((Long) value); + } + } + // if removeTableProps is true only add the property if it is not an HTable or Phoenix Table + // property + if ( + !removeTableProps || (!TableProperty.isPhoenixTableProperty(propName) + && !MetaDataUtil.isHTableProperty(propName)) + ) { + propsList.add(prop); + } + } + properties.put(family, propsList); + } + return metaProperties; + } + + private boolean evaluateStmtProperties(MetaProperties metaProperties, + MetaPropertiesEvaluated metaPropertiesEvaluated, PTable table, String schemaName, + String tableName, MutableBoolean areWeIntroducingTTLAtThisLevel) throws SQLException { + boolean changingPhoenixTableProperty = false; + + if (metaProperties.getImmutableRowsProp() != null) { + if (metaProperties.getImmutableRowsProp().booleanValue() != table.isImmutableRows()) { + if (table.getImmutableStorageScheme() != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_IMMUTABLE_ROWS_PROPERTY) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } + metaPropertiesEvaluated.setIsImmutableRows(metaProperties.getImmutableRowsProp()); + changingPhoenixTableProperty = true; + } + } - private Long maxLookbackAge = null; + if (metaProperties.getImmutableRowsProp() != null && table.getType() != INDEX) { + if (metaProperties.getImmutableRowsProp().booleanValue() != table.isImmutableRows()) { + metaPropertiesEvaluated.setIsImmutableRows(metaProperties.getImmutableRowsProp()); + changingPhoenixTableProperty = true; + } + } - public Boolean getImmutableRowsProp() { - return isImmutableRowsProp; - } + if (metaProperties.getMultiTenantProp() != null) { + if (metaProperties.getMultiTenantProp().booleanValue() != table.isMultiTenant()) { + metaPropertiesEvaluated.setMultiTenant(metaProperties.getMultiTenantProp()); + changingPhoenixTableProperty = true; + } + } - public void setImmutableRowsProp(Boolean isImmutableRowsProp) { - this.isImmutableRowsProp = isImmutableRowsProp; - } + if (metaProperties.getDisableWALProp() != null) { + if (metaProperties.getDisableWALProp().booleanValue() != table.isWALDisabled()) { + metaPropertiesEvaluated.setDisableWAL(metaProperties.getDisableWALProp()); + changingPhoenixTableProperty = true; + } + } - public Boolean getMultiTenantProp() { - return multiTenantProp; - } + if (metaProperties.getUpdateCacheFrequencyProp() != null) { + // See PHOENIX-4891 + if (table.getType() == PTableType.INDEX) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_SET_OR_ALTER_UPDATE_CACHE_FREQ_FOR_INDEX).build() + .buildException(); + } + if ( + metaProperties.getUpdateCacheFrequencyProp().longValue() != table.getUpdateCacheFrequency() + ) { + metaPropertiesEvaluated + .setUpdateCacheFrequency(metaProperties.getUpdateCacheFrequencyProp()); + changingPhoenixTableProperty = true; + } + } - public void setMultiTenantProp(Boolean multiTenantProp) { - this.multiTenantProp = multiTenantProp; - } + if (metaProperties.getAppendOnlySchemaProp() != null) { + if (metaProperties.getAppendOnlySchemaProp() != table.isAppendOnlySchema()) { + metaPropertiesEvaluated.setAppendOnlySchema(metaProperties.getAppendOnlySchemaProp()); + changingPhoenixTableProperty = true; + } + } - public Boolean getDisableWALProp() { - return disableWALProp; - } + if (metaProperties.getColumnEncodedBytesProp() != null) { + if (metaProperties.getColumnEncodedBytesProp() != table.getEncodingScheme()) { + // Transform is needed, so we will not be setting it here. We set the boolean to increment + // sequence num + changingPhoenixTableProperty = true; + } + } - public void setDisableWALProp(Boolean disableWALProp) { - this.disableWALProp = disableWALProp; - } + if (metaProperties.getImmutableStorageSchemeProp() != null) { + if (metaProperties.getImmutableStorageSchemeProp() != table.getImmutableStorageScheme()) { + // Transform is needed, so we will not be setting it here. We set the boolean to increment + // sequence num + changingPhoenixTableProperty = true; + } + } - public Boolean getStoreNullsProp() { - return storeNullsProp; - } + // Get immutableStorageScheme and encoding and check compatibility + ImmutableStorageScheme immutableStorageScheme = table.getImmutableStorageScheme(); + if (metaProperties.getImmutableStorageSchemeProp() != null) { + immutableStorageScheme = metaProperties.getImmutableStorageSchemeProp(); + } + QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); + if (metaProperties.getColumnEncodedBytesProp() != null) { + encodingScheme = metaProperties.getColumnEncodedBytesProp(); + } + if ( + immutableStorageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS + && encodingScheme == NON_ENCODED_QUALIFIERS + ) { + // encoding scheme is set as non-encoded on purpose, so we should fail + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); + } - public void setStoreNullsProp(Boolean storeNullsProp) { - this.storeNullsProp = storeNullsProp; - } + if (metaProperties.getGuidePostWidth() == null || metaProperties.getGuidePostWidth() >= 0) { + metaPropertiesEvaluated.setGuidePostWidth(metaProperties.getGuidePostWidth()); + changingPhoenixTableProperty = true; + } - public TransactionFactory.Provider getTransactionProviderProp() { - return transactionProviderProp; - } + if (metaProperties.getStoreNullsProp() != null) { + if (metaProperties.getStoreNullsProp().booleanValue() != table.getStoreNulls()) { + metaPropertiesEvaluated.setStoreNulls(metaProperties.getStoreNullsProp()); + changingPhoenixTableProperty = true; + } + } - public void setTransactionProviderProp(TransactionFactory.Provider transactionProviderProp) { - this.transactionProviderProp = transactionProviderProp; - } + if ( + metaProperties.getUseStatsForParallelizationProp() != null + && (table.useStatsForParallelization() == null + || (metaProperties.getUseStatsForParallelizationProp().booleanValue() + != table.useStatsForParallelization())) + ) { + metaPropertiesEvaluated + .setUseStatsForParallelization(metaProperties.getUseStatsForParallelizationProp()); + changingPhoenixTableProperty = true; + } - public Boolean getIsTransactionalProp() { - return isTransactionalProp; + if (metaProperties.getIsTransactionalProp() != null) { + if (metaProperties.getIsTransactionalProp().booleanValue() != table.isTransactional()) { + metaPropertiesEvaluated.setIsTransactional(metaProperties.getIsTransactionalProp()); + // Note: Going from transactional to non transactional used to be not supportable because + // it would have required rewriting the cell timestamps and doing a major compaction to + // remove Tephra specific delete markers. After PHOENIX-6627, Tephra has been removed. + // For now we continue to reject the request. + if (!metaPropertiesEvaluated.getIsTransactional()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX) + .setSchemaName(schemaName).setTableName(tableName).build().buildException(); } + // cannot create a transactional table if transactions are disabled + boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean( + QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED); + if (!transactionsEnabled) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_IF_TXNS_DISABLED).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + // cannot make a table transactional if it has a row timestamp column + if (SchemaUtil.hasRowTimestampColumn(table)) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_ALTER_TO_BE_TXN_WITH_ROW_TIMESTAMP).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + TransactionFactory.Provider provider = metaProperties.getTransactionProviderProp(); + if (provider == null) { + provider = (Provider) TableProperty.TRANSACTION_PROVIDER.getValue(connection + .getQueryServices().getProps().get(QueryServices.DEFAULT_TRANSACTION_PROVIDER_ATTRIB, + QueryServicesOptions.DEFAULT_TRANSACTION_PROVIDER)); + } + metaPropertiesEvaluated.setTransactionProvider(provider); + if ( + provider.getTransactionProvider() + .isUnsupported(PhoenixTransactionProvider.Feature.ALTER_NONTX_TO_TX) + ) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_ALTER_TABLE_FROM_NON_TXN_TO_TXNL) + .setMessage(provider.name() + ". ").setSchemaName(schemaName).setTableName(tableName) + .build().buildException(); + } + changingPhoenixTableProperty = true; + metaProperties.setNonTxToTx(true); + } + } - public void setIsTransactionalProp(Boolean isTransactionalProp) { - this.isTransactionalProp = isTransactionalProp; - } + if (metaProperties.getTTL() != null) { + if (table.getType() == PTableType.INDEX) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_OR_ALTER_PROPERTY_FOR_INDEX) + .build().buildException(); + } + + if (!isViewTTLEnabled() && table.getType() == PTableType.VIEW) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_TTL_NOT_ENABLED).build() + .buildException(); + } + + if ( + table.getType() != PTableType.TABLE + && (table.getType() != PTableType.VIEW || table.getViewType() != UPDATABLE) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TTL_SUPPORTED_FOR_TABLES_AND_VIEWS_ONLY) + .build().buildException(); + } + if (metaProperties.getTTL() != table.getTTL()) { + metaPropertiesEvaluated.setTTL(metaProperties.getTTL()); + changingPhoenixTableProperty = true; + } + // Updating Introducing TTL variable to true so that we will check if TTL is already + // defined in hierarchy or not. + areWeIntroducingTTLAtThisLevel.setTrue(); + } - public void setPhysicalTableNameProp(String physicalTableNameProp) { - this.physicalTableNameProp = physicalTableNameProp; - } + if (metaProperties.isChangeDetectionEnabled() != null) { + verifyChangeDetectionTableType(table.getType(), metaProperties.isChangeDetectionEnabled()); + if (!metaProperties.isChangeDetectionEnabled().equals(table.isChangeDetectionEnabled())) { + metaPropertiesEvaluated + .setChangeDetectionEnabled(metaProperties.isChangeDetectionEnabled()); + changingPhoenixTableProperty = true; + } + } - public String getPhysicalTableNameProp() { - return this.physicalTableNameProp; - } + if (!Strings.isNullOrEmpty(metaProperties.getPhysicalTableNameProp())) { + if (!metaProperties.getPhysicalTableNameProp().equals(table.getPhysicalName(true))) { + metaPropertiesEvaluated.setPhysicalTableName(metaProperties.getPhysicalTableNameProp()); + changingPhoenixTableProperty = true; + } + } - public Long getUpdateCacheFrequencyProp() { - return updateCacheFrequencyProp; - } + if (!Strings.isNullOrEmpty(metaProperties.getSchemaVersion())) { + if (!metaProperties.getSchemaVersion().equals(table.getSchemaVersion())) { + metaPropertiesEvaluated.setSchemaVersion(metaProperties.getSchemaVersion()); + changingPhoenixTableProperty = true; + } + } - public void setUpdateCacheFrequencyProp(Long updateCacheFrequencyProp) { - this.updateCacheFrequencyProp = updateCacheFrequencyProp; - } + if (!Strings.isNullOrEmpty(metaProperties.getStreamingTopicName())) { + if (!metaProperties.getStreamingTopicName().equals(table.getStreamingTopicName())) { + metaPropertiesEvaluated.setStreamingTopicName(metaProperties.getStreamingTopicName()); + changingPhoenixTableProperty = true; + } + } - public Boolean getAppendOnlySchemaProp() { - return appendOnlySchemaProp; - } + if (metaProperties.getMaxLookbackAge() != null) { + if (table.getType() != TABLE) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.MAX_LOOKBACK_AGE_SUPPORTED_FOR_TABLES_ONLY).setSchemaName(schemaName) + .setTableName(tableName).build().buildException(); + } + if (!Objects.equals(metaProperties.getMaxLookbackAge(), table.getMaxLookbackAge())) { + metaPropertiesEvaluated.setMaxLookbackAge(metaProperties.getMaxLookbackAge()); + changingPhoenixTableProperty = true; + } + } - public void setAppendOnlySchemaProp(Boolean appendOnlySchemaProp) { - this.appendOnlySchemaProp = appendOnlySchemaProp; - } + return changingPhoenixTableProperty; + } + + public static class MetaProperties { + private Boolean isImmutableRowsProp = null; + private Boolean multiTenantProp = null; + private Boolean disableWALProp = null; + private Boolean storeNullsProp = null; + private TransactionFactory.Provider transactionProviderProp = null; + private Boolean isTransactionalProp = null; + private Long updateCacheFrequencyProp = null; + private String physicalTableNameProp = null; + private QualifierEncodingScheme columnEncodedBytesProp = null; + private Boolean appendOnlySchemaProp = null; + private Long guidePostWidth = -1L; + private ImmutableStorageScheme immutableStorageSchemeProp = null; + private Boolean useStatsForParallelizationProp = null; + private boolean nonTxToTx = false; + private Integer ttl = null; + private Boolean isChangeDetectionEnabled = null; + private String physicalTableName = null; + private String schemaVersion = null; + private String streamingTopicName = null; + + private Long maxLookbackAge = null; + + public Boolean getImmutableRowsProp() { + return isImmutableRowsProp; + } - public Long getGuidePostWidth() { - return guidePostWidth; - } + public void setImmutableRowsProp(Boolean isImmutableRowsProp) { + this.isImmutableRowsProp = isImmutableRowsProp; + } - public void setGuidePostWidth(Long guidePostWidth) { - this.guidePostWidth = guidePostWidth; - } + public Boolean getMultiTenantProp() { + return multiTenantProp; + } - public ImmutableStorageScheme getImmutableStorageSchemeProp() { - return immutableStorageSchemeProp; - } + public void setMultiTenantProp(Boolean multiTenantProp) { + this.multiTenantProp = multiTenantProp; + } - public void setImmutableStorageSchemeProp( - ImmutableStorageScheme immutableStorageSchemeProp) { - this.immutableStorageSchemeProp = immutableStorageSchemeProp; - } + public Boolean getDisableWALProp() { + return disableWALProp; + } - public QualifierEncodingScheme getColumnEncodedBytesProp() { - return columnEncodedBytesProp; - } + public void setDisableWALProp(Boolean disableWALProp) { + this.disableWALProp = disableWALProp; + } - public void setColumnEncodedBytesProp( - QualifierEncodingScheme columnEncodedBytesProp) { - this.columnEncodedBytesProp = columnEncodedBytesProp; - } + public Boolean getStoreNullsProp() { + return storeNullsProp; + } - public Boolean getUseStatsForParallelizationProp() { - return useStatsForParallelizationProp; - } + public void setStoreNullsProp(Boolean storeNullsProp) { + this.storeNullsProp = storeNullsProp; + } - public void setUseStatsForParallelizationProp(Boolean useStatsForParallelizationProp) { - this.useStatsForParallelizationProp = useStatsForParallelizationProp; - } + public TransactionFactory.Provider getTransactionProviderProp() { + return transactionProviderProp; + } - public boolean getNonTxToTx() { - return nonTxToTx; - } + public void setTransactionProviderProp(TransactionFactory.Provider transactionProviderProp) { + this.transactionProviderProp = transactionProviderProp; + } - public void setNonTxToTx(boolean nonTxToTx) { - this.nonTxToTx = nonTxToTx; - } + public Boolean getIsTransactionalProp() { + return isTransactionalProp; + } - public Integer getTTL() { - return ttl; - } + public void setIsTransactionalProp(Boolean isTransactionalProp) { + this.isTransactionalProp = isTransactionalProp; + } - public void setTTL(Integer ttl) { - this.ttl = ttl; - } + public void setPhysicalTableNameProp(String physicalTableNameProp) { + this.physicalTableNameProp = physicalTableNameProp; + } - public Boolean isChangeDetectionEnabled() { - return isChangeDetectionEnabled; - } + public String getPhysicalTableNameProp() { + return this.physicalTableNameProp; + } - public void setChangeDetectionEnabled(Boolean isChangeDetectionEnabled) { - this.isChangeDetectionEnabled = isChangeDetectionEnabled; - } + public Long getUpdateCacheFrequencyProp() { + return updateCacheFrequencyProp; + } - public String getPhysicalTableName() { - return physicalTableName; - } + public void setUpdateCacheFrequencyProp(Long updateCacheFrequencyProp) { + this.updateCacheFrequencyProp = updateCacheFrequencyProp; + } - public void setPhysicalTableName(String physicalTableName) { - this.physicalTableName = physicalTableName; - } + public Boolean getAppendOnlySchemaProp() { + return appendOnlySchemaProp; + } - public String getSchemaVersion() { - return schemaVersion; - } + public void setAppendOnlySchemaProp(Boolean appendOnlySchemaProp) { + this.appendOnlySchemaProp = appendOnlySchemaProp; + } - public void setSchemaVersion(String schemaVersion) { - this.schemaVersion = schemaVersion; - } + public Long getGuidePostWidth() { + return guidePostWidth; + } - public String getStreamingTopicName() { return streamingTopicName; } + public void setGuidePostWidth(Long guidePostWidth) { + this.guidePostWidth = guidePostWidth; + } - public void setStreamingTopicName(String streamingTopicName) { - this.streamingTopicName = streamingTopicName; - } + public ImmutableStorageScheme getImmutableStorageSchemeProp() { + return immutableStorageSchemeProp; + } - public Long getMaxLookbackAge() { - return maxLookbackAge; - } + public void setImmutableStorageSchemeProp(ImmutableStorageScheme immutableStorageSchemeProp) { + this.immutableStorageSchemeProp = immutableStorageSchemeProp; + } - public void setMaxLookbackAge(Long maxLookbackAge) { - this.maxLookbackAge = maxLookbackAge; - } + public QualifierEncodingScheme getColumnEncodedBytesProp() { + return columnEncodedBytesProp; } - private static class MetaPropertiesEvaluated { - private Boolean isImmutableRows; - private Boolean multiTenant = null; - private Boolean disableWAL = null; - private Long updateCacheFrequency = null; - private Boolean appendOnlySchema = null; - private Long guidePostWidth = -1L; - private ImmutableStorageScheme immutableStorageScheme = null; - private QualifierEncodingScheme columnEncodedBytes = null; - private Boolean storeNulls = null; - private Boolean useStatsForParallelization = null; - private Boolean isTransactional = null; - private TransactionFactory.Provider transactionProvider = null; - private Integer ttl = null; - private Boolean isChangeDetectionEnabled = null; - private String physicalTableName = null; - private String schemaVersion = null; - private String streamingTopicName = null; + public void setColumnEncodedBytesProp(QualifierEncodingScheme columnEncodedBytesProp) { + this.columnEncodedBytesProp = columnEncodedBytesProp; + } - private Long maxLookbackAge = null; + public Boolean getUseStatsForParallelizationProp() { + return useStatsForParallelizationProp; + } - public Boolean getIsImmutableRows() { - return isImmutableRows; - } + public void setUseStatsForParallelizationProp(Boolean useStatsForParallelizationProp) { + this.useStatsForParallelizationProp = useStatsForParallelizationProp; + } - public void setIsImmutableRows(Boolean isImmutableRows) { - this.isImmutableRows = isImmutableRows; - } + public boolean getNonTxToTx() { + return nonTxToTx; + } - public Boolean getMultiTenant() { - return multiTenant; - } + public void setNonTxToTx(boolean nonTxToTx) { + this.nonTxToTx = nonTxToTx; + } - public void setMultiTenant(Boolean multiTenant) { - this.multiTenant = multiTenant; - } + public Integer getTTL() { + return ttl; + } - public Boolean getDisableWAL() { - return disableWAL; - } + public void setTTL(Integer ttl) { + this.ttl = ttl; + } - public void setDisableWAL(Boolean disableWAL) { - this.disableWAL = disableWAL; - } + public Boolean isChangeDetectionEnabled() { + return isChangeDetectionEnabled; + } - public Long getUpdateCacheFrequency() { - return updateCacheFrequency; - } + public void setChangeDetectionEnabled(Boolean isChangeDetectionEnabled) { + this.isChangeDetectionEnabled = isChangeDetectionEnabled; + } - public void setUpdateCacheFrequency(Long updateCacheFrequency) { - this.updateCacheFrequency = updateCacheFrequency; - } + public String getPhysicalTableName() { + return physicalTableName; + } - public Boolean getAppendOnlySchema() { - return appendOnlySchema; - } + public void setPhysicalTableName(String physicalTableName) { + this.physicalTableName = physicalTableName; + } - public void setAppendOnlySchema(Boolean appendOnlySchema) { - this.appendOnlySchema = appendOnlySchema; - } + public String getSchemaVersion() { + return schemaVersion; + } - public Long getGuidePostWidth() { - return guidePostWidth; - } + public void setSchemaVersion(String schemaVersion) { + this.schemaVersion = schemaVersion; + } - public void setGuidePostWidth(Long guidePostWidth) { - this.guidePostWidth = guidePostWidth; - } + public String getStreamingTopicName() { + return streamingTopicName; + } - public ImmutableStorageScheme getImmutableStorageScheme() { - return immutableStorageScheme; - } + public void setStreamingTopicName(String streamingTopicName) { + this.streamingTopicName = streamingTopicName; + } - public void setImmutableStorageScheme(ImmutableStorageScheme immutableStorageScheme) { - this.immutableStorageScheme = immutableStorageScheme; - } + public Long getMaxLookbackAge() { + return maxLookbackAge; + } - public QualifierEncodingScheme getColumnEncodedBytes() { - return columnEncodedBytes; - } + public void setMaxLookbackAge(Long maxLookbackAge) { + this.maxLookbackAge = maxLookbackAge; + } + } + + private static class MetaPropertiesEvaluated { + private Boolean isImmutableRows; + private Boolean multiTenant = null; + private Boolean disableWAL = null; + private Long updateCacheFrequency = null; + private Boolean appendOnlySchema = null; + private Long guidePostWidth = -1L; + private ImmutableStorageScheme immutableStorageScheme = null; + private QualifierEncodingScheme columnEncodedBytes = null; + private Boolean storeNulls = null; + private Boolean useStatsForParallelization = null; + private Boolean isTransactional = null; + private TransactionFactory.Provider transactionProvider = null; + private Integer ttl = null; + private Boolean isChangeDetectionEnabled = null; + private String physicalTableName = null; + private String schemaVersion = null; + private String streamingTopicName = null; + + private Long maxLookbackAge = null; + + public Boolean getIsImmutableRows() { + return isImmutableRows; + } - public void setColumnEncodedBytes(QualifierEncodingScheme columnEncodedBytes) { - this.columnEncodedBytes = columnEncodedBytes; - } - public Boolean getStoreNulls() { - return storeNulls; - } + public void setIsImmutableRows(Boolean isImmutableRows) { + this.isImmutableRows = isImmutableRows; + } - public void setStoreNulls(Boolean storeNulls) { - this.storeNulls = storeNulls; - } + public Boolean getMultiTenant() { + return multiTenant; + } - public Boolean getUseStatsForParallelization() { - return useStatsForParallelization; - } + public void setMultiTenant(Boolean multiTenant) { + this.multiTenant = multiTenant; + } - public void setUseStatsForParallelization(Boolean useStatsForParallelization) { - this.useStatsForParallelization = useStatsForParallelization; - } + public Boolean getDisableWAL() { + return disableWAL; + } - public Boolean getIsTransactional() { - return isTransactional; - } + public void setDisableWAL(Boolean disableWAL) { + this.disableWAL = disableWAL; + } - public void setIsTransactional(Boolean isTransactional) { - this.isTransactional = isTransactional; - } - - public TransactionFactory.Provider getTransactionProvider() { - return transactionProvider; - } + public Long getUpdateCacheFrequency() { + return updateCacheFrequency; + } - public void setTransactionProvider(TransactionFactory.Provider transactionProvider) { - this.transactionProvider = transactionProvider; - } + public void setUpdateCacheFrequency(Long updateCacheFrequency) { + this.updateCacheFrequency = updateCacheFrequency; + } - public Integer getTTL() { return ttl; } + public Boolean getAppendOnlySchema() { + return appendOnlySchema; + } - public void setTTL(Integer ttl) { this.ttl = ttl; } + public void setAppendOnlySchema(Boolean appendOnlySchema) { + this.appendOnlySchema = appendOnlySchema; + } - public Boolean isChangeDetectionEnabled() { - return isChangeDetectionEnabled; - } + public Long getGuidePostWidth() { + return guidePostWidth; + } - public void setChangeDetectionEnabled(Boolean isChangeDetectionEnabled) { - this.isChangeDetectionEnabled = isChangeDetectionEnabled; - } + public void setGuidePostWidth(Long guidePostWidth) { + this.guidePostWidth = guidePostWidth; + } - public String getPhysicalTableName() { - return physicalTableName; - } + public ImmutableStorageScheme getImmutableStorageScheme() { + return immutableStorageScheme; + } - public void setPhysicalTableName(String physicalTableName) { - this.physicalTableName = physicalTableName; - } + public void setImmutableStorageScheme(ImmutableStorageScheme immutableStorageScheme) { + this.immutableStorageScheme = immutableStorageScheme; + } - public String getSchemaVersion() { - return schemaVersion; - } + public QualifierEncodingScheme getColumnEncodedBytes() { + return columnEncodedBytes; + } - public void setSchemaVersion(String schemaVersion) { - this.schemaVersion = schemaVersion; - } + public void setColumnEncodedBytes(QualifierEncodingScheme columnEncodedBytes) { + this.columnEncodedBytes = columnEncodedBytes; + } - public String getStreamingTopicName() { return streamingTopicName; } + public Boolean getStoreNulls() { + return storeNulls; + } - public void setStreamingTopicName(String streamingTopicName) { - this.streamingTopicName = streamingTopicName; - } + public void setStoreNulls(Boolean storeNulls) { + this.storeNulls = storeNulls; + } - public Long getMaxLookbackAge() { - return maxLookbackAge; - } + public Boolean getUseStatsForParallelization() { + return useStatsForParallelization; + } - public void setMaxLookbackAge(Long maxLookbackAge) { - this.maxLookbackAge = maxLookbackAge; - } + public void setUseStatsForParallelization(Boolean useStatsForParallelization) { + this.useStatsForParallelization = useStatsForParallelization; } + public Boolean getIsTransactional() { + return isTransactional; + } - /** - * GRANT/REVOKE statements use this method to update HBase acl's - * Perms can be changed at Schema, Table or User level - * @throws SQLException - */ - public MutationState changePermissions(ChangePermsStatement changePermsStatement) throws SQLException { + public void setIsTransactional(Boolean isTransactional) { + this.isTransactional = isTransactional; + } - LOGGER.info(changePermsStatement.toString()); + public TransactionFactory.Provider getTransactionProvider() { + return transactionProvider; + } - try(Admin admin = connection.getQueryServices().getAdmin()) { - ClusterConnection clusterConnection = (ClusterConnection) admin.getConnection(); + public void setTransactionProvider(TransactionFactory.Provider transactionProvider) { + this.transactionProvider = transactionProvider; + } - if (changePermsStatement.getSchemaName() != null) { - // SYSTEM.CATALOG doesn't have any entry for "default" HBase namespace, hence we will bypass the check - if (!changePermsStatement.getSchemaName() - .equals(SchemaUtil.SCHEMA_FOR_DEFAULT_NAMESPACE)) { - FromCompiler.getResolverForSchema(changePermsStatement.getSchemaName(), - connection); - } + public Integer getTTL() { + return ttl; + } - changePermsOnSchema(clusterConnection, changePermsStatement); - } else if (changePermsStatement.getTableName() != null) { - PTable inputTable = connection.getTable(SchemaUtil. - normalizeFullTableName(changePermsStatement.getTableName().toString())); - if (!(PTableType.TABLE.equals(inputTable.getType()) || PTableType.SYSTEM.equals(inputTable.getType()))) { - throw new AccessDeniedException("Cannot GRANT or REVOKE permissions on INDEX TABLES or VIEWS"); - } + public void setTTL(Integer ttl) { + this.ttl = ttl; + } - // Changing perms on base table and update the perms for global and view indexes - // Views and local indexes are not physical tables and hence update perms is not needed - changePermsOnTables(clusterConnection, admin, changePermsStatement, inputTable); - } else { + public Boolean isChangeDetectionEnabled() { + return isChangeDetectionEnabled; + } - // User can be given perms at the global level - changePermsOnUser(clusterConnection, changePermsStatement); - } + public void setChangeDetectionEnabled(Boolean isChangeDetectionEnabled) { + this.isChangeDetectionEnabled = isChangeDetectionEnabled; + } - } catch (SQLException e) { - // Bubble up the SQL Exception - throw e; - } catch (Throwable throwable) { - // To change perms, the user must have ADMIN perms on that scope, otherwise it throws ADE - // Wrap around ADE and other exceptions to PhoenixIOException - throw ClientUtil.parseServerException(throwable); - } + public String getPhysicalTableName() { + return physicalTableName; + } - return new MutationState(0, 0, connection); + public void setPhysicalTableName(String physicalTableName) { + this.physicalTableName = physicalTableName; } - private void changePermsOnSchema(ClusterConnection clusterConnection, ChangePermsStatement changePermsStatement) throws Throwable { - if (changePermsStatement.isGrantStatement()) { - AccessControlClient.grant(clusterConnection, changePermsStatement.getSchemaName(), changePermsStatement.getName(), changePermsStatement.getPermsList()); - } else { - AccessControlClient.revoke(clusterConnection, changePermsStatement.getSchemaName(), changePermsStatement.getName(), Permission.Action.values()); - } + public String getSchemaVersion() { + return schemaVersion; } - private void changePermsOnTables(ClusterConnection clusterConnection, Admin admin, ChangePermsStatement changePermsStatement, PTable inputTable) throws Throwable { + public void setSchemaVersion(String schemaVersion) { + this.schemaVersion = schemaVersion; + } - org.apache.hadoop.hbase.TableName tableName = SchemaUtil.getPhysicalTableName - (inputTable.getPhysicalName().getBytes(), inputTable.isNamespaceMapped()); + public String getStreamingTopicName() { + return streamingTopicName; + } - changePermsOnTable(clusterConnection, changePermsStatement, tableName); + public void setStreamingTopicName(String streamingTopicName) { + this.streamingTopicName = streamingTopicName; + } - boolean schemaInconsistency = false; - List inconsistentTables = null; + public Long getMaxLookbackAge() { + return maxLookbackAge; + } - for (PTable indexTable : inputTable.getIndexes()) { - // Local Indexes don't correspond to new physical table, they are just stored in separate CF of base table. - if (indexTable.getIndexType().equals(IndexType.LOCAL)) { - continue; - } - if (inputTable.isNamespaceMapped() != indexTable.isNamespaceMapped()) { - schemaInconsistency = true; - if (inconsistentTables == null) { - inconsistentTables = new ArrayList<>(); - } - inconsistentTables.add(indexTable); - continue; - } - LOGGER.info("Updating permissions for Index Table: " + - indexTable.getName() + " Base Table: " + inputTable.getName()); - tableName = SchemaUtil.getPhysicalTableName(indexTable.getPhysicalName().getBytes(), indexTable.isNamespaceMapped()); - changePermsOnTable(clusterConnection, changePermsStatement, tableName); - } + public void setMaxLookbackAge(Long maxLookbackAge) { + this.maxLookbackAge = maxLookbackAge; + } + } + + /** + * GRANT/REVOKE statements use this method to update HBase acl's Perms can be changed at Schema, + * Table or User level + */ + public MutationState changePermissions(ChangePermsStatement changePermsStatement) + throws SQLException { + + LOGGER.info(changePermsStatement.toString()); + + try (Admin admin = connection.getQueryServices().getAdmin()) { + ClusterConnection clusterConnection = (ClusterConnection) admin.getConnection(); + + if (changePermsStatement.getSchemaName() != null) { + // SYSTEM.CATALOG doesn't have any entry for "default" HBase namespace, hence we will bypass + // the check + if (!changePermsStatement.getSchemaName().equals(SchemaUtil.SCHEMA_FOR_DEFAULT_NAMESPACE)) { + FromCompiler.getResolverForSchema(changePermsStatement.getSchemaName(), connection); + } + + changePermsOnSchema(clusterConnection, changePermsStatement); + } else if (changePermsStatement.getTableName() != null) { + PTable inputTable = connection.getTable( + SchemaUtil.normalizeFullTableName(changePermsStatement.getTableName().toString())); + if ( + !(PTableType.TABLE.equals(inputTable.getType()) + || PTableType.SYSTEM.equals(inputTable.getType())) + ) { + throw new AccessDeniedException( + "Cannot GRANT or REVOKE permissions on INDEX TABLES or VIEWS"); + } + + // Changing perms on base table and update the perms for global and view indexes + // Views and local indexes are not physical tables and hence update perms is not needed + changePermsOnTables(clusterConnection, admin, changePermsStatement, inputTable); + } else { + + // User can be given perms at the global level + changePermsOnUser(clusterConnection, changePermsStatement); + } + + } catch (SQLException e) { + // Bubble up the SQL Exception + throw e; + } catch (Throwable throwable) { + // To change perms, the user must have ADMIN perms on that scope, otherwise it throws ADE + // Wrap around ADE and other exceptions to PhoenixIOException + throw ClientUtil.parseServerException(throwable); + } - if (schemaInconsistency) { - for (PTable table : inconsistentTables) { - LOGGER.error("Fail to propagate permissions to Index Table: " + table.getName()); - } - throw new TablesNotInSyncException(inputTable.getTableName().getString(), - inconsistentTables.get(0).getTableName().getString(), "Namespace properties"); - } - - // There will be only a single View Index Table for all the indexes created on views - byte[] viewIndexTableBytes = MetaDataUtil.getViewIndexPhysicalName(inputTable.getPhysicalName().getBytes()); - tableName = org.apache.hadoop.hbase.TableName.valueOf(viewIndexTableBytes); - boolean viewIndexTableExists = admin.tableExists(tableName); - if (viewIndexTableExists) { - LOGGER.info("Updating permissions for View Index Table: " + - Bytes.toString(viewIndexTableBytes) + " Base Table: " + inputTable.getName()); - changePermsOnTable(clusterConnection, changePermsStatement, tableName); - } else { - if (inputTable.isMultiTenant()) { - LOGGER.error("View Index Table not found for MultiTenant Table: " + inputTable.getName()); - LOGGER.error("Fail to propagate permissions to view Index Table: " + tableName.getNameAsString()); - throw new TablesNotInSyncException(inputTable.getTableName().getString(), - Bytes.toString(viewIndexTableBytes), " View Index table should exist for MultiTenant tables"); - } - } + return new MutationState(0, 0, connection); + } + + private void changePermsOnSchema(ClusterConnection clusterConnection, + ChangePermsStatement changePermsStatement) throws Throwable { + if (changePermsStatement.isGrantStatement()) { + AccessControlClient.grant(clusterConnection, changePermsStatement.getSchemaName(), + changePermsStatement.getName(), changePermsStatement.getPermsList()); + } else { + AccessControlClient.revoke(clusterConnection, changePermsStatement.getSchemaName(), + changePermsStatement.getName(), Permission.Action.values()); + } + } + + private void changePermsOnTables(ClusterConnection clusterConnection, Admin admin, + ChangePermsStatement changePermsStatement, PTable inputTable) throws Throwable { + + org.apache.hadoop.hbase.TableName tableName = SchemaUtil.getPhysicalTableName( + inputTable.getPhysicalName().getBytes(), inputTable.isNamespaceMapped()); + + changePermsOnTable(clusterConnection, changePermsStatement, tableName); + + boolean schemaInconsistency = false; + List inconsistentTables = null; + + for (PTable indexTable : inputTable.getIndexes()) { + // Local Indexes don't correspond to new physical table, they are just stored in separate CF + // of base table. + if (indexTable.getIndexType().equals(IndexType.LOCAL)) { + continue; + } + if (inputTable.isNamespaceMapped() != indexTable.isNamespaceMapped()) { + schemaInconsistency = true; + if (inconsistentTables == null) { + inconsistentTables = new ArrayList<>(); + } + inconsistentTables.add(indexTable); + continue; + } + LOGGER.info("Updating permissions for Index Table: " + indexTable.getName() + " Base Table: " + + inputTable.getName()); + tableName = SchemaUtil.getPhysicalTableName(indexTable.getPhysicalName().getBytes(), + indexTable.isNamespaceMapped()); + changePermsOnTable(clusterConnection, changePermsStatement, tableName); } - private void changePermsOnTable(ClusterConnection clusterConnection, ChangePermsStatement changePermsStatement, org.apache.hadoop.hbase.TableName tableName) - throws Throwable { - if (changePermsStatement.isGrantStatement()) { - AccessControlClient.grant(clusterConnection, tableName, changePermsStatement.getName(), - null, null, changePermsStatement.getPermsList()); - } else { - AccessControlClient.revoke(clusterConnection, tableName, changePermsStatement.getName(), - null, null, Permission.Action.values()); - } + if (schemaInconsistency) { + for (PTable table : inconsistentTables) { + LOGGER.error("Fail to propagate permissions to Index Table: " + table.getName()); + } + throw new TablesNotInSyncException(inputTable.getTableName().getString(), + inconsistentTables.get(0).getTableName().getString(), "Namespace properties"); } - private void changePermsOnUser(ClusterConnection clusterConnection, ChangePermsStatement changePermsStatement) - throws Throwable { - if (changePermsStatement.isGrantStatement()) { - AccessControlClient.grant(clusterConnection, changePermsStatement.getName(), changePermsStatement.getPermsList()); - } else { - AccessControlClient.revoke(clusterConnection, changePermsStatement.getName(), Permission.Action.values()); - } + // There will be only a single View Index Table for all the indexes created on views + byte[] viewIndexTableBytes = + MetaDataUtil.getViewIndexPhysicalName(inputTable.getPhysicalName().getBytes()); + tableName = org.apache.hadoop.hbase.TableName.valueOf(viewIndexTableBytes); + boolean viewIndexTableExists = admin.tableExists(tableName); + if (viewIndexTableExists) { + LOGGER.info("Updating permissions for View Index Table: " + + Bytes.toString(viewIndexTableBytes) + " Base Table: " + inputTable.getName()); + changePermsOnTable(clusterConnection, changePermsStatement, tableName); + } else { + if (inputTable.isMultiTenant()) { + LOGGER.error("View Index Table not found for MultiTenant Table: " + inputTable.getName()); + LOGGER.error( + "Fail to propagate permissions to view Index Table: " + tableName.getNameAsString()); + throw new TablesNotInSyncException(inputTable.getTableName().getString(), + Bytes.toString(viewIndexTableBytes), + " View Index table should exist for MultiTenant tables"); + } + } + } + + private void changePermsOnTable(ClusterConnection clusterConnection, + ChangePermsStatement changePermsStatement, org.apache.hadoop.hbase.TableName tableName) + throws Throwable { + if (changePermsStatement.isGrantStatement()) { + AccessControlClient.grant(clusterConnection, tableName, changePermsStatement.getName(), null, + null, changePermsStatement.getPermsList()); + } else { + AccessControlClient.revoke(clusterConnection, tableName, changePermsStatement.getName(), null, + null, Permission.Action.values()); + } + } + + private void changePermsOnUser(ClusterConnection clusterConnection, + ChangePermsStatement changePermsStatement) throws Throwable { + if (changePermsStatement.isGrantStatement()) { + AccessControlClient.grant(clusterConnection, changePermsStatement.getName(), + changePermsStatement.getPermsList()); + } else { + AccessControlClient.revoke(clusterConnection, changePermsStatement.getName(), + Permission.Action.values()); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataEntityNotFoundException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataEntityNotFoundException.java index a52381cfbd0..6f643fc4c55 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataEntityNotFoundException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataEntityNotFoundException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,13 +20,13 @@ import java.sql.SQLException; public abstract class MetaDataEntityNotFoundException extends SQLException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; private final String schemaName; private final String tableName; public MetaDataEntityNotFoundException(String reason, String sqlState, int code, - String schemaName, String tableName, Throwable cause) { + String schemaName, String tableName, Throwable cause) { super(reason, sqlState, code, cause); this.schemaName = schemaName; this.tableName = tableName; diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerFunctionAlreadyExistsException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerFunctionAlreadyExistsException.java index 85457a0e21c..266ed1889b9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerFunctionAlreadyExistsException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerFunctionAlreadyExistsException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,20 +20,20 @@ import org.apache.phoenix.parse.PFunction; public class NewerFunctionAlreadyExistsException extends FunctionAlreadyExistsException { - private static final long serialVersionUID = 1L; - private final PFunction function; + private static final long serialVersionUID = 1L; + private final PFunction function; - public NewerFunctionAlreadyExistsException(String functionName) { - this(functionName, null); - } + public NewerFunctionAlreadyExistsException(String functionName) { + this(functionName, null); + } - public NewerFunctionAlreadyExistsException(String functionName, PFunction function) { - super(functionName); - this.function = function; - } + public NewerFunctionAlreadyExistsException(String functionName, PFunction function) { + super(functionName); + this.function = function; + } - public PFunction getFunction() { - return this.function; - } + public PFunction getFunction() { + return this.function; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerSchemaAlreadyExistsException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerSchemaAlreadyExistsException.java index b90845c0c04..01cf93cf238 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerSchemaAlreadyExistsException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerSchemaAlreadyExistsException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,9 +18,9 @@ package org.apache.phoenix.schema; public class NewerSchemaAlreadyExistsException extends SchemaAlreadyExistsException { - private static final long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; - public NewerSchemaAlreadyExistsException(String schemaName) { - super(schemaName); - } + public NewerSchemaAlreadyExistsException(String schemaName) { + super(schemaName); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerTableAlreadyExistsException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerTableAlreadyExistsException.java index 54044858882..e5ca3f59028 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerTableAlreadyExistsException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/NewerTableAlreadyExistsException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,19 +18,19 @@ package org.apache.phoenix.schema; public class NewerTableAlreadyExistsException extends TableAlreadyExistsException { - private static final long serialVersionUID = 1L; - private final PTable table; + private static final long serialVersionUID = 1L; + private final PTable table; - public NewerTableAlreadyExistsException(String schemaName, String tableName) { - this(schemaName, tableName, null); - } + public NewerTableAlreadyExistsException(String schemaName, String tableName) { + this(schemaName, tableName, null); + } - public NewerTableAlreadyExistsException(String schemaName, String tableName, PTable table) { - super(schemaName, tableName); - this.table = table; - } + public NewerTableAlreadyExistsException(String schemaName, String tableName, PTable table) { + super(schemaName, tableName); + this.table = table; + } - public PTable getTable() { - return table; - } + public PTable getTable() { + return table; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumn.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumn.java index 2e518c49911..371b1c470f7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumn.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumn.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,61 +17,44 @@ */ package org.apache.phoenix.schema; - /** * Definition of a Phoenix column - * - * * @since 0.1 */ public interface PColumn extends PDatum { - /** - * @return the name of the column - */ - PName getName(); + /** Returns the name of the column */ + PName getName(); + + /** Returns the name of the column family */ + PName getFamilyName(); + + /** Returns the zero-based ordinal position of the column */ + int getPosition(); + + /** Returns the declared array size or zero if this is not an array */ + Integer getArraySize(); + + byte[] getViewConstant(); + + boolean isViewReferenced(); + + int getEstimatedSize(); + + String getExpressionStr(); + + /** Returns the cell timestamp associated with this PColumn */ + long getTimestamp(); - /** - * @return the name of the column family - */ - PName getFamilyName(); + /** Returns is the column derived from some other table / view or not */ + boolean isDerived(); - /** - * @return the zero-based ordinal position of the column - */ - int getPosition(); - - /** - * @return the declared array size or zero if this is not an array - */ - Integer getArraySize(); - - byte[] getViewConstant(); - - boolean isViewReferenced(); - - int getEstimatedSize(); - - String getExpressionStr(); + boolean isExcluded(); - /** - * @return the cell timestamp associated with this PColumn - */ - long getTimestamp(); + /** Returns whether this column represents/stores the hbase cell timestamp. */ + boolean isRowTimestamp(); - /** - * @return is the column derived from some other table / view or not - */ - boolean isDerived(); + boolean isDynamic(); - boolean isExcluded(); - - /** - * @return whether this column represents/stores the hbase cell timestamp. - */ - boolean isRowTimestamp(); - - boolean isDynamic(); - - byte[] getColumnQualifierBytes(); + byte[] getColumnQualifierBytes(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnFamily.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnFamily.java index c4c383e7f43..da316269ad8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnFamily.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnFamily.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,41 +20,34 @@ import java.util.Collection; /** - * * Definition of a Phoenix Column Family - * - * * @since 0.1 */ public interface PColumnFamily { - - /** - * @return The column family name. - */ - PName getName(); - - /** - * @return All the PColumns in this column family. - */ - Collection getColumns(); - - /** - * @return The PColumn for the specified column name. - * @throws ColumnNotFoundException if the column cannot be found - */ - PColumn getPColumnForColumnNameBytes(byte[] columnNameBytes) throws ColumnNotFoundException; - - /** - * @return The PColumn for the specified column name. - * @throws ColumnNotFoundException if the column cannot be found - */ - PColumn getPColumnForColumnName(String columnName) throws ColumnNotFoundException; - - int getEstimatedSize(); - - /** - * @return The PColumn for the specified column qualifier. - * @throws ColumnNotFoundException if the column cannot be found - */ - PColumn getPColumnForColumnQualifier(byte[] cq) throws ColumnNotFoundException; -} \ No newline at end of file + + /** Returns The column family name. */ + PName getName(); + + /** Returns All the PColumns in this column family. */ + Collection getColumns(); + + /** + * @return The PColumn for the specified column name. + * @throws ColumnNotFoundException if the column cannot be found + */ + PColumn getPColumnForColumnNameBytes(byte[] columnNameBytes) throws ColumnNotFoundException; + + /** + * @return The PColumn for the specified column name. + * @throws ColumnNotFoundException if the column cannot be found + */ + PColumn getPColumnForColumnName(String columnName) throws ColumnNotFoundException; + + int getEstimatedSize(); + + /** + * @return The PColumn for the specified column qualifier. + * @throws ColumnNotFoundException if the column cannot be found + */ + PColumn getPColumnForColumnQualifier(byte[] cq) throws ColumnNotFoundException; +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java index b894687fef0..89f8291ea3f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnFamilyImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,87 +21,89 @@ import java.util.Map; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.util.SizedUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableSortedMap; +import org.apache.phoenix.util.SchemaUtil; +import org.apache.phoenix.util.SizedUtil; public class PColumnFamilyImpl implements PColumnFamily { - private final PName name; - private final List columns; - private final Map columnNamesByStrings; - private final Map columnNamesByBytes; - private final Map columnsByQualifiers; - private final int estimatedSize; + private final PName name; + private final List columns; + private final Map columnNamesByStrings; + private final Map columnNamesByBytes; + private final Map columnsByQualifiers; + private final int estimatedSize; - @Override - public int getEstimatedSize() { - return estimatedSize; - } - - public PColumnFamilyImpl(PName name, List columns) { - Preconditions.checkNotNull(name); - // Include guidePosts also in estimating the size - long estimatedSize = SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE * 5 + SizedUtil.INT_SIZE + name.getEstimatedSize() + - SizedUtil.sizeOfMap(columns.size()) * 2 + SizedUtil.sizeOfArrayList(columns.size()); - this.name = name; - this.columns = ImmutableList.copyOf(columns); - ImmutableMap.Builder columnNamesByStringBuilder = ImmutableMap.builder(); - ImmutableSortedMap.Builder columnNamesByBytesBuilder = ImmutableSortedMap.orderedBy(Bytes.BYTES_COMPARATOR); - ImmutableSortedMap.Builder columnsByQualifiersBuilder = ImmutableSortedMap.orderedBy(Bytes.BYTES_COMPARATOR); - for (PColumn column : columns) { - estimatedSize += column.getEstimatedSize(); - columnNamesByBytesBuilder.put(column.getName().getBytes(), column); - columnNamesByStringBuilder.put(column.getName().getString(), column); - // In certain cases like JOIN, PK columns are assigned a column family. So they - // are not evaluated as a PK column. However, their column qualifier bytes are - // still null. - if (!SchemaUtil.isPKColumn(column) && column.getColumnQualifierBytes() != null) { - columnsByQualifiersBuilder.put(column.getColumnQualifierBytes(), column); - } - } - this.columnNamesByBytes = columnNamesByBytesBuilder.build(); - this.columnNamesByStrings = columnNamesByStringBuilder.build(); - this.columnsByQualifiers = columnsByQualifiersBuilder.build(); - this.estimatedSize = (int)estimatedSize; - } - - @Override - public PName getName() { - return name; - } + @Override + public int getEstimatedSize() { + return estimatedSize; + } - @Override - public List getColumns() { - return columns; + public PColumnFamilyImpl(PName name, List columns) { + Preconditions.checkNotNull(name); + // Include guidePosts also in estimating the size + long estimatedSize = SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE * 5 + SizedUtil.INT_SIZE + + name.getEstimatedSize() + SizedUtil.sizeOfMap(columns.size()) * 2 + + SizedUtil.sizeOfArrayList(columns.size()); + this.name = name; + this.columns = ImmutableList.copyOf(columns); + ImmutableMap.Builder columnNamesByStringBuilder = ImmutableMap.builder(); + ImmutableSortedMap.Builder columnNamesByBytesBuilder = + ImmutableSortedMap.orderedBy(Bytes.BYTES_COMPARATOR); + ImmutableSortedMap.Builder columnsByQualifiersBuilder = + ImmutableSortedMap.orderedBy(Bytes.BYTES_COMPARATOR); + for (PColumn column : columns) { + estimatedSize += column.getEstimatedSize(); + columnNamesByBytesBuilder.put(column.getName().getBytes(), column); + columnNamesByStringBuilder.put(column.getName().getString(), column); + // In certain cases like JOIN, PK columns are assigned a column family. So they + // are not evaluated as a PK column. However, their column qualifier bytes are + // still null. + if (!SchemaUtil.isPKColumn(column) && column.getColumnQualifierBytes() != null) { + columnsByQualifiersBuilder.put(column.getColumnQualifierBytes(), column); + } } + this.columnNamesByBytes = columnNamesByBytesBuilder.build(); + this.columnNamesByStrings = columnNamesByStringBuilder.build(); + this.columnsByQualifiers = columnsByQualifiersBuilder.build(); + this.estimatedSize = (int) estimatedSize; + } - @Override - public PColumn getPColumnForColumnNameBytes(byte[] columnNameBytes) throws ColumnNotFoundException { - PColumn column = columnNamesByBytes.get(columnNameBytes); - if (column == null) { - throw new ColumnNotFoundException(Bytes.toString(columnNameBytes)); - } - return column; - } - - @Override - public PColumn getPColumnForColumnName(String columnName) throws ColumnNotFoundException { - PColumn column = columnNamesByStrings.get(columnName); - if (column == null) { - throw new ColumnNotFoundException(columnName); - } - return column; + @Override + public PName getName() { + return name; + } + + @Override + public List getColumns() { + return columns; + } + + @Override + public PColumn getPColumnForColumnNameBytes(byte[] columnNameBytes) + throws ColumnNotFoundException { + PColumn column = columnNamesByBytes.get(columnNameBytes); + if (column == null) { + throw new ColumnNotFoundException(Bytes.toString(columnNameBytes)); } - - - //TODO: samarth think about backward compatibility here - @Override - public PColumn getPColumnForColumnQualifier(byte[] cq) throws ColumnNotFoundException { - Preconditions.checkNotNull(cq); - return columnsByQualifiers.get(cq); + return column; + } + + @Override + public PColumn getPColumnForColumnName(String columnName) throws ColumnNotFoundException { + PColumn column = columnNamesByStrings.get(columnName); + if (column == null) { + throw new ColumnNotFoundException(columnName); } + return column; + } + + // TODO: samarth think about backward compatibility here + @Override + public PColumn getPColumnForColumnQualifier(byte[] cq) throws ColumnNotFoundException { + Preconditions.checkNotNull(cq); + return columnsByQualifiers.get(cq); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnImpl.java index e3123a7502f..08cea836b6d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PColumnImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,344 +22,354 @@ import org.apache.phoenix.coprocessor.generated.PTableProtos; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.SizedUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - public class PColumnImpl implements PColumn { - private PName name; - private PName familyName; - private PDataType dataType; - private Integer maxLength; - private Integer scale; - private boolean nullable; - private int position; - private SortOrder sortOrder; - private Integer arraySize; - private byte[] viewConstant; - private boolean isViewReferenced; - private String expressionStr; - private boolean isRowTimestamp; - private boolean isDynamic; - private byte[] columnQualifierBytes; - private boolean derived; - private long timestamp; - - public PColumnImpl() { - } + private PName name; + private PName familyName; + private PDataType dataType; + private Integer maxLength; + private Integer scale; + private boolean nullable; + private int position; + private SortOrder sortOrder; + private Integer arraySize; + private byte[] viewConstant; + private boolean isViewReferenced; + private String expressionStr; + private boolean isRowTimestamp; + private boolean isDynamic; + private byte[] columnQualifierBytes; + private boolean derived; + private long timestamp; - public PColumnImpl(PColumn column, int position) { - this(column, column.isDerived(), position); - } + public PColumnImpl() { + } - public PColumnImpl(PColumn column, byte[] viewConstant, boolean isViewReferenced) { - this(column.getName(), column.getFamilyName(), column.getDataType(), column.getMaxLength(), - column.getScale(), column.isNullable(), column.getPosition(), column.getSortOrder(), column.getArraySize(), viewConstant, isViewReferenced, column.getExpressionStr(), column.isRowTimestamp(), column.isDynamic(), column.getColumnQualifierBytes(), - column.getTimestamp(), column.isDerived()); - } + public PColumnImpl(PColumn column, int position) { + this(column, column.isDerived(), position); + } - public PColumnImpl(PColumn column, boolean derivedColumn, int position) { - this(column, derivedColumn, position, column.getViewConstant()); - } + public PColumnImpl(PColumn column, byte[] viewConstant, boolean isViewReferenced) { + this(column.getName(), column.getFamilyName(), column.getDataType(), column.getMaxLength(), + column.getScale(), column.isNullable(), column.getPosition(), column.getSortOrder(), + column.getArraySize(), viewConstant, isViewReferenced, column.getExpressionStr(), + column.isRowTimestamp(), column.isDynamic(), column.getColumnQualifierBytes(), + column.getTimestamp(), column.isDerived()); + } - public PColumnImpl(PColumn column, boolean derivedColumn, int position, byte[] viewConstant) { - this(column.getName(), column.getFamilyName(), column.getDataType(), column.getMaxLength(), - column.getScale(), column.isNullable(), position, column.getSortOrder(), column.getArraySize(), viewConstant, column.isViewReferenced(), column.getExpressionStr(), column.isRowTimestamp(), column.isDynamic(), column.getColumnQualifierBytes(), - column.getTimestamp(), derivedColumn); - } - - public PColumnImpl(PName name, PName familyName, PDataType dataType, Integer maxLength, Integer scale, boolean nullable, - int position, SortOrder sortOrder, Integer arrSize, byte[] viewConstant, boolean isViewReferenced, String expressionStr, boolean isRowTimestamp, boolean isDynamic, - byte[] columnQualifierBytes, long timestamp) { - this(name, familyName, dataType, maxLength, scale, nullable, position, sortOrder, arrSize, viewConstant, isViewReferenced, expressionStr, isRowTimestamp, isDynamic, columnQualifierBytes, timestamp, false); - } + public PColumnImpl(PColumn column, boolean derivedColumn, int position) { + this(column, derivedColumn, position, column.getViewConstant()); + } - public PColumnImpl(PName name, PName familyName, PDataType dataType, Integer maxLength, Integer scale, boolean nullable, - int position, SortOrder sortOrder, Integer arrSize, byte[] viewConstant, boolean isViewReferenced, String expressionStr, boolean isRowTimestamp, boolean isDynamic, - byte[] columnQualifierBytes, long timestamp, boolean derived) { - init(name, familyName, dataType, maxLength, scale, nullable, position, sortOrder, arrSize, viewConstant, isViewReferenced, expressionStr, isRowTimestamp, isDynamic, columnQualifierBytes, timestamp, derived); - } + public PColumnImpl(PColumn column, boolean derivedColumn, int position, byte[] viewConstant) { + this(column.getName(), column.getFamilyName(), column.getDataType(), column.getMaxLength(), + column.getScale(), column.isNullable(), position, column.getSortOrder(), + column.getArraySize(), viewConstant, column.isViewReferenced(), column.getExpressionStr(), + column.isRowTimestamp(), column.isDynamic(), column.getColumnQualifierBytes(), + column.getTimestamp(), derivedColumn); + } - private PColumnImpl(PName familyName, PName columnName, Long timestamp) { - this.familyName = familyName; - this.name = columnName; - this.derived = true; - if (timestamp!=null) { - this.timestamp = timestamp; - } - } + public PColumnImpl(PName name, PName familyName, PDataType dataType, Integer maxLength, + Integer scale, boolean nullable, int position, SortOrder sortOrder, Integer arrSize, + byte[] viewConstant, boolean isViewReferenced, String expressionStr, boolean isRowTimestamp, + boolean isDynamic, byte[] columnQualifierBytes, long timestamp) { + this(name, familyName, dataType, maxLength, scale, nullable, position, sortOrder, arrSize, + viewConstant, isViewReferenced, expressionStr, isRowTimestamp, isDynamic, + columnQualifierBytes, timestamp, false); + } - // a excluded column (a column that was derived from a parent but that has been deleted) is - // denoted by a column that has a null type - public static PColumnImpl createExcludedColumn(PName familyName, PName columnName, Long timestamp) { - return new PColumnImpl(familyName, columnName, timestamp); - } - - private void init(PName name, PName familyName, PDataType dataType, Integer maxLength, - Integer scale, boolean nullable, int position, SortOrder sortOrder, Integer arrSize, - byte[] viewConstant, boolean isViewReferenced, String expressionStr, - boolean isRowTimestamp, boolean isDynamic, byte[] columnQualifierBytes, long timestamp, - boolean derived) { - Preconditions.checkNotNull(sortOrder); - this.dataType = dataType; - if (familyName == null) { - // Allow nullable columns in PK, but only if they're variable length. - // Variable length types may be null, since we use a null-byte terminator - // (which is a disallowed character in variable length types). However, - // fixed width types do not have a way of representing null. - // TODO: we may be able to allow this for columns at the end of the PK - Preconditions.checkArgument(!nullable || !dataType.isFixedWidth(), - "PK columns may not be both fixed width and nullable: " + name.getString()); - } - this.name = name; - this.familyName = familyName == null ? null : familyName; - this.maxLength = maxLength; - this.scale = scale; - this.nullable = nullable; - this.position = position; - this.sortOrder = sortOrder; - this.arraySize = arrSize; - this.viewConstant = viewConstant; - this.isViewReferenced = isViewReferenced; - this.expressionStr = expressionStr; - this.isRowTimestamp = isRowTimestamp; - this.isDynamic = isDynamic; - this.columnQualifierBytes = columnQualifierBytes; - this.timestamp = timestamp; - this.derived = derived; - } + public PColumnImpl(PName name, PName familyName, PDataType dataType, Integer maxLength, + Integer scale, boolean nullable, int position, SortOrder sortOrder, Integer arrSize, + byte[] viewConstant, boolean isViewReferenced, String expressionStr, boolean isRowTimestamp, + boolean isDynamic, byte[] columnQualifierBytes, long timestamp, boolean derived) { + init(name, familyName, dataType, maxLength, scale, nullable, position, sortOrder, arrSize, + viewConstant, isViewReferenced, expressionStr, isRowTimestamp, isDynamic, + columnQualifierBytes, timestamp, derived); + } - @Override - public int getEstimatedSize() { - return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE * 8 + SizedUtil.INT_OBJECT_SIZE * 3 + SizedUtil.INT_SIZE + - name.getEstimatedSize() + (familyName == null ? 0 : familyName.getEstimatedSize()) + - (viewConstant == null ? 0 : (SizedUtil.ARRAY_SIZE + viewConstant.length)); - } + private PColumnImpl(PName familyName, PName columnName, Long timestamp) { + this.familyName = familyName; + this.name = columnName; + this.derived = true; + if (timestamp != null) { + this.timestamp = timestamp; + } + } - @Override - public PName getName() { - return name; - } + // a excluded column (a column that was derived from a parent but that has been deleted) is + // denoted by a column that has a null type + public static PColumnImpl createExcludedColumn(PName familyName, PName columnName, + Long timestamp) { + return new PColumnImpl(familyName, columnName, timestamp); + } - @Override - public PName getFamilyName() { - return familyName; - } + private void init(PName name, PName familyName, PDataType dataType, Integer maxLength, + Integer scale, boolean nullable, int position, SortOrder sortOrder, Integer arrSize, + byte[] viewConstant, boolean isViewReferenced, String expressionStr, boolean isRowTimestamp, + boolean isDynamic, byte[] columnQualifierBytes, long timestamp, boolean derived) { + Preconditions.checkNotNull(sortOrder); + this.dataType = dataType; + if (familyName == null) { + // Allow nullable columns in PK, but only if they're variable length. + // Variable length types may be null, since we use a null-byte terminator + // (which is a disallowed character in variable length types). However, + // fixed width types do not have a way of representing null. + // TODO: we may be able to allow this for columns at the end of the PK + Preconditions.checkArgument(!nullable || !dataType.isFixedWidth(), + "PK columns may not be both fixed width and nullable: " + name.getString()); + } + this.name = name; + this.familyName = familyName == null ? null : familyName; + this.maxLength = maxLength; + this.scale = scale; + this.nullable = nullable; + this.position = position; + this.sortOrder = sortOrder; + this.arraySize = arrSize; + this.viewConstant = viewConstant; + this.isViewReferenced = isViewReferenced; + this.expressionStr = expressionStr; + this.isRowTimestamp = isRowTimestamp; + this.isDynamic = isDynamic; + this.columnQualifierBytes = columnQualifierBytes; + this.timestamp = timestamp; + this.derived = derived; + } - @Override - public PDataType getDataType() { - return dataType; - } + @Override + public int getEstimatedSize() { + return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE * 8 + SizedUtil.INT_OBJECT_SIZE * 3 + + SizedUtil.INT_SIZE + name.getEstimatedSize() + + (familyName == null ? 0 : familyName.getEstimatedSize()) + + (viewConstant == null ? 0 : (SizedUtil.ARRAY_SIZE + viewConstant.length)); + } - @Override - public Integer getMaxLength() { - return maxLength; - } + @Override + public PName getName() { + return name; + } - @Override - public Integer getScale() { - return scale; - } - - @Override - public String getExpressionStr() { - return expressionStr; - } + @Override + public PName getFamilyName() { + return familyName; + } - @Override - public long getTimestamp() { - return timestamp; - } + @Override + public PDataType getDataType() { + return dataType; + } - @Override - public boolean isExcluded() { - return dataType == null; - } + @Override + public Integer getMaxLength() { + return maxLength; + } - @Override - public boolean isNullable() { - return nullable; - } + @Override + public Integer getScale() { + return scale; + } - @Override - public int getPosition() { - return position; - } - - @Override - public SortOrder getSortOrder() { - return sortOrder; - } + @Override + public String getExpressionStr() { + return expressionStr; + } - @Override - public String toString() { - return (familyName == null ? "" : familyName.toString() + QueryConstants.NAME_SEPARATOR) + name.toString(); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((familyName == null) ? 0 : familyName.hashCode()); - result = prime * result + ((name == null) ? 0 : name.hashCode()); - return result; - } + @Override + public long getTimestamp() { + return timestamp; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (! (obj instanceof PColumn) ) return false; - PColumn other = (PColumn)obj; - if (familyName == null) { - if (other.getFamilyName() != null) return false; - } else if (!familyName.equals(other.getFamilyName())) return false; - if (name == null) { - if (other.getName() != null) return false; - } else if (!name.equals(other.getName())) return false; - return true; - } + @Override + public boolean isExcluded() { + return dataType == null; + } - @Override - public Integer getArraySize() { - return arraySize; - } + @Override + public boolean isNullable() { + return nullable; + } - @Override - public byte[] getViewConstant() { - return viewConstant; - } + @Override + public int getPosition() { + return position; + } + + @Override + public SortOrder getSortOrder() { + return sortOrder; + } + + @Override + public String toString() { + return (familyName == null ? "" : familyName.toString() + QueryConstants.NAME_SEPARATOR) + + name.toString(); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((familyName == null) ? 0 : familyName.hashCode()); + result = prime * result + ((name == null) ? 0 : name.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (!(obj instanceof PColumn)) return false; + PColumn other = (PColumn) obj; + if (familyName == null) { + if (other.getFamilyName() != null) return false; + } else if (!familyName.equals(other.getFamilyName())) return false; + if (name == null) { + if (other.getName() != null) return false; + } else if (!name.equals(other.getName())) return false; + return true; + } + + @Override + public Integer getArraySize() { + return arraySize; + } + + @Override + public byte[] getViewConstant() { + return viewConstant; + } - @Override - public boolean isViewReferenced() { - return isViewReferenced; + @Override + public boolean isViewReferenced() { + return isViewReferenced; + } + + @Override + public boolean isRowTimestamp() { + return isRowTimestamp; + } + + @Override + public boolean isDynamic() { + return isDynamic; + } + + @Override + public byte[] getColumnQualifierBytes() { + // Needed for backward compatibility + if (!SchemaUtil.isPKColumn(this) && columnQualifierBytes == null) { + return this.name.getBytes(); + } + return columnQualifierBytes; + } + + /** + * Create a PColumn instance from PBed PColumn instance + */ + public static PColumn createFromProto(PTableProtos.PColumn column) { + byte[] columnNameBytes = column.getColumnNameBytes().toByteArray(); + PName columnName = PNameFactory.newName(columnNameBytes); + PName familyName = null; + if (column.hasFamilyNameBytes()) { + familyName = PNameFactory.newName(column.getFamilyNameBytes().toByteArray()); + } + PDataType dataType = + column.hasDataType() ? PDataType.fromSqlTypeName(column.getDataType()) : null; + Integer maxLength = null; + if (column.hasMaxLength()) { + maxLength = column.getMaxLength(); + } + Integer scale = null; + if (column.hasScale()) { + scale = column.getScale(); + } + boolean nullable = column.getNullable(); + int position = column.getPosition(); + SortOrder sortOrder = SortOrder.fromSystemValue(column.getSortOrder()); + Integer arraySize = null; + if (column.hasArraySize()) { + arraySize = column.getArraySize(); + } + byte[] viewConstant = null; + if (column.hasViewConstant()) { + viewConstant = column.getViewConstant().toByteArray(); + } + boolean isViewReferenced = false; + if (column.hasViewReferenced()) { + isViewReferenced = column.getViewReferenced(); + } + String expressionStr = null; + if (column.hasExpression()) { + expressionStr = column.getExpression(); + } + boolean isRowTimestamp = column.getIsRowTimestamp(); + boolean isDynamic = false; + if (column.hasIsDynamic()) { + isDynamic = column.getIsDynamic(); + } + byte[] columnQualifierBytes = null; + if (column.hasColumnQualifierBytes()) { + columnQualifierBytes = column.getColumnQualifierBytes().toByteArray(); + } + long timestamp = HConstants.LATEST_TIMESTAMP; + if (column.hasTimestamp()) { + timestamp = column.getTimestamp(); + } + boolean derived = false; + if (column.hasDerived()) { + derived = column.getDerived(); + } + return new PColumnImpl(columnName, familyName, dataType, maxLength, scale, nullable, position, + sortOrder, arraySize, viewConstant, isViewReferenced, expressionStr, isRowTimestamp, + isDynamic, columnQualifierBytes, timestamp, derived); + } + + public static PTableProtos.PColumn toProto(PColumn column) { + PTableProtos.PColumn.Builder builder = PTableProtos.PColumn.newBuilder(); + builder.setColumnNameBytes(ByteStringer.wrap(column.getName().getBytes())); + if (column.getFamilyName() != null) { + builder.setFamilyNameBytes(ByteStringer.wrap(column.getFamilyName().getBytes())); } - - @Override - public boolean isRowTimestamp() { - return isRowTimestamp; + if (column.getDataType() != null) { + builder.setDataType(column.getDataType().getSqlTypeName()); } - - @Override - public boolean isDynamic() { - return isDynamic; + if (column.getMaxLength() != null) { + builder.setMaxLength(column.getMaxLength()); } - - @Override - public byte[] getColumnQualifierBytes() { - // Needed for backward compatibility - if (!SchemaUtil.isPKColumn(this) && columnQualifierBytes == null) { - return this.name.getBytes(); - } - return columnQualifierBytes; + if (column.getScale() != null) { + builder.setScale(column.getScale()); } - - /** - * Create a PColumn instance from PBed PColumn instance - * - * @param column - */ - public static PColumn createFromProto(PTableProtos.PColumn column) { - byte[] columnNameBytes = column.getColumnNameBytes().toByteArray(); - PName columnName = PNameFactory.newName(columnNameBytes); - PName familyName = null; - if (column.hasFamilyNameBytes()) { - familyName = PNameFactory.newName(column.getFamilyNameBytes().toByteArray()); - } - PDataType dataType = column.hasDataType() ? PDataType.fromSqlTypeName(column.getDataType()) : null; - Integer maxLength = null; - if (column.hasMaxLength()) { - maxLength = column.getMaxLength(); - } - Integer scale = null; - if (column.hasScale()) { - scale = column.getScale(); - } - boolean nullable = column.getNullable(); - int position = column.getPosition(); - SortOrder sortOrder = SortOrder.fromSystemValue(column.getSortOrder()); - Integer arraySize = null; - if (column.hasArraySize()) { - arraySize = column.getArraySize(); - } - byte[] viewConstant = null; - if (column.hasViewConstant()) { - viewConstant = column.getViewConstant().toByteArray(); - } - boolean isViewReferenced = false; - if (column.hasViewReferenced()) { - isViewReferenced = column.getViewReferenced(); - } - String expressionStr = null; - if (column.hasExpression()) { - expressionStr = column.getExpression(); - } - boolean isRowTimestamp = column.getIsRowTimestamp(); - boolean isDynamic = false; - if (column.hasIsDynamic()) { - isDynamic = column.getIsDynamic(); - } - byte[] columnQualifierBytes = null; - if (column.hasColumnQualifierBytes()) { - columnQualifierBytes = column.getColumnQualifierBytes().toByteArray(); - } - long timestamp = HConstants.LATEST_TIMESTAMP; - if (column.hasTimestamp()) { - timestamp = column.getTimestamp(); - } - boolean derived = false; - if (column.hasDerived()) { - derived = column.getDerived(); - } - return new PColumnImpl(columnName, familyName, dataType, maxLength, scale, nullable, position, sortOrder, - arraySize, viewConstant, isViewReferenced, expressionStr, isRowTimestamp, isDynamic, columnQualifierBytes, - timestamp, derived); + builder.setNullable(column.isNullable()); + builder.setPosition(column.getPosition()); + if (column.getSortOrder() != null) { + builder.setSortOrder(column.getSortOrder().getSystemValue()); } - - public static PTableProtos.PColumn toProto(PColumn column) { - PTableProtos.PColumn.Builder builder = PTableProtos.PColumn.newBuilder(); - builder.setColumnNameBytes(ByteStringer.wrap(column.getName().getBytes())); - if (column.getFamilyName() != null) { - builder.setFamilyNameBytes(ByteStringer.wrap(column.getFamilyName().getBytes())); - } - if (column.getDataType()!=null) { - builder.setDataType(column.getDataType().getSqlTypeName()); - } - if (column.getMaxLength() != null) { - builder.setMaxLength(column.getMaxLength()); - } - if (column.getScale() != null) { - builder.setScale(column.getScale()); - } - builder.setNullable(column.isNullable()); - builder.setPosition(column.getPosition()); - if (column.getSortOrder()!=null) { - builder.setSortOrder(column.getSortOrder().getSystemValue()); - } - if (column.getArraySize() != null) { - builder.setArraySize(column.getArraySize()); - } - if (column.getViewConstant() != null) { - builder.setViewConstant(ByteStringer.wrap(column.getViewConstant())); - } - builder.setViewReferenced(column.isViewReferenced()); - - if (column.getExpressionStr() != null) { - builder.setExpression(column.getExpressionStr()); - } - builder.setIsRowTimestamp(column.isRowTimestamp()); - if (column.getColumnQualifierBytes() != null) { - builder.setColumnQualifierBytes(ByteStringer.wrap(column.getColumnQualifierBytes())); - } - if (column.getTimestamp() != HConstants.LATEST_TIMESTAMP) { - builder.setTimestamp(column.getTimestamp()); - } - builder.setDerived(column.isDerived()); - return builder.build(); + if (column.getArraySize() != null) { + builder.setArraySize(column.getArraySize()); + } + if (column.getViewConstant() != null) { + builder.setViewConstant(ByteStringer.wrap(column.getViewConstant())); } + builder.setViewReferenced(column.isViewReferenced()); - public boolean isDerived() { - return derived; + if (column.getExpressionStr() != null) { + builder.setExpression(column.getExpressionStr()); + } + builder.setIsRowTimestamp(column.isRowTimestamp()); + if (column.getColumnQualifierBytes() != null) { + builder.setColumnQualifierBytes(ByteStringer.wrap(column.getColumnQualifierBytes())); } + if (column.getTimestamp() != HConstants.LATEST_TIMESTAMP) { + builder.setTimestamp(column.getTimestamp()); + } + builder.setDerived(column.isDerived()); + return builder.build(); + } + + public boolean isDerived() { + return derived; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PDatum.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PDatum.java index d6082a71d26..6db453f3a0d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PDatum.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PDatum.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,30 +20,22 @@ import org.apache.phoenix.schema.types.PDataType; public interface PDatum { - /** - * @return is this column nullable? - */ - boolean isNullable(); + /** Returns is this column nullable? */ + boolean isNullable(); - /** - * @return data type of the column - */ - PDataType getDataType(); + /** Returns data type of the column */ + PDataType getDataType(); - /** - * @return the actual length of the column. For decimal, it would be its precision. For char or - * varchar, it would be the maximum length as specified during schema definition. - */ - Integer getMaxLength(); + /** + * @return the actual length of the column. For decimal, it would be its precision. For char or + * varchar, it would be the maximum length as specified during schema definition. + */ + Integer getMaxLength(); + + /** Returns scale of a decimal number. */ + Integer getScale(); + + /** Returns The SortOrder for this column, never null */ + SortOrder getSortOrder(); - /** - * @return scale of a decimal number. - */ - Integer getScale(); - - /** - * @return The SortOrder for this column, never null - */ - SortOrder getSortOrder(); - } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PIndexState.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PIndexState.java index 6d4dd7d6826..198866ceaf6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PIndexState.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PIndexState.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,87 +20,92 @@ import org.apache.phoenix.schema.types.PVarchar; public enum PIndexState { - BUILDING("b"), - USABLE("e"), - UNUSABLE("d"), - ACTIVE("a"), - INACTIVE("i"), - DISABLE("x"), - REBUILD("r"), - PENDING_ACTIVE("p"), - // Used when disabling an index on write failure (PHOENIX-4130) - // When an index write fails, it is put in this state, and we let the client retry the mutation - // After retries are exhausted, the client should mark the index as disabled, but if that - // doesn't happen, then the index is considered disabled if it's been in this state too long - PENDING_DISABLE("w"), - //When we create/drop some indexes in one cluster with a replication peer, the peer doesn't immediately have this - //index and Hbase throws a replication error when we try to write into indexes that don't have a matching table. - //To remediate this issue, we can optionally create indexes in CREATE_DISABLED state and enable them after all - //the replication peers have the table. Similar for drop. - CREATE_DISABLE("c"); + BUILDING("b"), + USABLE("e"), + UNUSABLE("d"), + ACTIVE("a"), + INACTIVE("i"), + DISABLE("x"), + REBUILD("r"), + PENDING_ACTIVE("p"), + // Used when disabling an index on write failure (PHOENIX-4130) + // When an index write fails, it is put in this state, and we let the client retry the mutation + // After retries are exhausted, the client should mark the index as disabled, but if that + // doesn't happen, then the index is considered disabled if it's been in this state too long + PENDING_DISABLE("w"), + // When we create/drop some indexes in one cluster with a replication peer, the peer doesn't + // immediately have this + // index and Hbase throws a replication error when we try to write into indexes that don't have a + // matching table. + // To remediate this issue, we can optionally create indexes in CREATE_DISABLED state and enable + // them after all + // the replication peers have the table. Similar for drop. + CREATE_DISABLE("c"); - private final String serializedValue; - private final byte[] serializedBytes; - private final byte[] nameBytesValue; + private final String serializedValue; + private final byte[] serializedBytes; + private final byte[] nameBytesValue; - private PIndexState(String value) { - this.serializedValue = value; - this.serializedBytes = PVarchar.INSTANCE.toBytes(value); - this.nameBytesValue = PVarchar.INSTANCE.toBytes(this.toString()); - } + private PIndexState(String value) { + this.serializedValue = value; + this.serializedBytes = PVarchar.INSTANCE.toBytes(value); + this.nameBytesValue = PVarchar.INSTANCE.toBytes(this.toString()); + } - public String getSerializedValue() { - return serializedValue; - } + public String getSerializedValue() { + return serializedValue; + } - public byte[] getSerializedBytes() { - return serializedBytes; - } + public byte[] getSerializedBytes() { + return serializedBytes; + } - public byte[] toBytes() { - return nameBytesValue; - } + public byte[] toBytes() { + return nameBytesValue; + } - public boolean isDisabled() { - return (this == DISABLE || this == CREATE_DISABLE); - } + public boolean isDisabled() { + return (this == DISABLE || this == CREATE_DISABLE); + } - private static final PIndexState[] FROM_VALUE; - private static final int FROM_VALUE_OFFSET; - static { - int minChar = Integer.MAX_VALUE; - int maxChar = Integer.MIN_VALUE; - for (PIndexState state: PIndexState.values()) { - char c = state.getSerializedValue().charAt(0); - if (c < minChar) { - minChar = c; - } - if (c > maxChar) { - maxChar = c; - } - } - FROM_VALUE_OFFSET = minChar; - FROM_VALUE = new PIndexState[maxChar - minChar + 1]; - for (PIndexState state: PIndexState.values()) { - FROM_VALUE[state.getSerializedValue().charAt(0) - minChar] = state; - } + private static final PIndexState[] FROM_VALUE; + private static final int FROM_VALUE_OFFSET; + static { + int minChar = Integer.MAX_VALUE; + int maxChar = Integer.MIN_VALUE; + for (PIndexState state : PIndexState.values()) { + char c = state.getSerializedValue().charAt(0); + if (c < minChar) { + minChar = c; + } + if (c > maxChar) { + maxChar = c; + } + } + FROM_VALUE_OFFSET = minChar; + FROM_VALUE = new PIndexState[maxChar - minChar + 1]; + for (PIndexState state : PIndexState.values()) { + FROM_VALUE[state.getSerializedValue().charAt(0) - minChar] = state; } + } - public static PIndexState fromSerializedValue(String serializedValue) { - if (serializedValue.length() == 1) { - int i = serializedValue.charAt(0) - FROM_VALUE_OFFSET; - if (i >= 0 && i < FROM_VALUE.length && FROM_VALUE[i] != null) { - return FROM_VALUE[i]; - } - } - throw new IllegalArgumentException("Unable to PIndexState enum for serialized value of '" + serializedValue + "'"); + public static PIndexState fromSerializedValue(String serializedValue) { + if (serializedValue.length() == 1) { + int i = serializedValue.charAt(0) - FROM_VALUE_OFFSET; + if (i >= 0 && i < FROM_VALUE.length && FROM_VALUE[i] != null) { + return FROM_VALUE[i]; + } } + throw new IllegalArgumentException( + "Unable to PIndexState enum for serialized value of '" + serializedValue + "'"); + } - public static PIndexState fromSerializedValue(byte serializedByte) { - int i = serializedByte - FROM_VALUE_OFFSET; - if (i >= 0 && i < FROM_VALUE.length && FROM_VALUE[i] != null) { - return FROM_VALUE[i]; - } - throw new IllegalArgumentException("Unable to PIndexState enum for serialized value of '" + (char)serializedByte + "'"); + public static PIndexState fromSerializedValue(byte serializedByte) { + int i = serializedByte - FROM_VALUE_OFFSET; + if (i >= 0 && i < FROM_VALUE.length && FROM_VALUE[i] != null) { + return FROM_VALUE[i]; } + throw new IllegalArgumentException( + "Unable to PIndexState enum for serialized value of '" + (char) serializedByte + "'"); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaData.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaData.java index fd5ebce0c96..5a03d899f9b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaData.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaData.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,15 +23,23 @@ public interface PMetaData extends MetaDataMutated, Iterable { - public static interface Pruner { - public boolean prune(PTable table); - public boolean prune(PFunction function); - } - public int size(); - public PTableRef getTableRef(PTableKey key) throws TableNotFoundException; - public void pruneTables(Pruner pruner); - public PFunction getFunction(PTableKey key) throws FunctionNotFoundException; - public void pruneFunctions(Pruner pruner); - public long getAge(PTableRef ref); - public PSchema getSchema(PTableKey key) throws SchemaNotFoundException; + public static interface Pruner { + public boolean prune(PTable table); + + public boolean prune(PFunction function); + } + + public int size(); + + public PTableRef getTableRef(PTableKey key) throws TableNotFoundException; + + public void pruneTables(Pruner pruner); + + public PFunction getFunction(PTableKey key) throws FunctionNotFoundException; + + public void pruneFunctions(Pruner pruner); + + public long getAge(PTableRef ref); + + public PSchema getSchema(PTableKey key) throws SchemaNotFoundException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataCache.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataCache.java index 767969785ae..28061ae7e47 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataCache.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,114 +25,108 @@ import org.apache.phoenix.monitoring.GlobalClientMetrics; import org.apache.phoenix.parse.PFunction; import org.apache.phoenix.parse.PSchema; -import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.phoenix.thirdparty.com.google.common.cache.Cache; +import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalListener; import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalNotification; import org.apache.phoenix.thirdparty.com.google.common.cache.Weigher; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.TimeKeeper; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; class PMetaDataCache { - private static final Logger LOGGER = LoggerFactory.getLogger(PMetaDataCache.class); - - private final TimeKeeper timeKeeper; - private final Cache tables; - final Map functions; - final Map schemas; + private static final Logger LOGGER = LoggerFactory.getLogger(PMetaDataCache.class); - public PMetaDataCache(int initialCapacity, long maxByteSize, - TimeKeeper timeKeeper) { - this.tables = CacheBuilder.newBuilder() - .removalListener(new RemovalListener() { - @Override - public void onRemoval(RemovalNotification notification) { - String key = notification.getKey().toString(); - LOGGER.debug("Expiring " + key + " because of " - + notification.getCause().name()); - if (notification.wasEvicted()) { - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_EVICTION_COUNTER - .increment(); - } else { - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_REMOVAL_COUNTER - .increment(); - } - if (notification.getValue() != null) { - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE - .update(-notification.getValue().getEstimatedSize()); - } - } - }) - .maximumWeight(maxByteSize) - .weigher(new Weigher() { - @Override - public int weigh(PTableKey tableKey, PTableRef tableRef) { - if (PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals( - SchemaUtil.getSchemaNameFromFullName(tableKey.getName()))) { - // Ensure there is always room for system tables - return 0; - } - return tableRef.getEstimatedSize(); - } - }) - .build(); - this.functions = new ConcurrentHashMap<>(initialCapacity); - this.schemas = new ConcurrentHashMap<>(initialCapacity); - this.timeKeeper = timeKeeper; - } - - public PTableRef get(PTableKey key) { - PTableRef tableAccess = this.tables.getIfPresent(key); - return tableAccess; - } + private final TimeKeeper timeKeeper; + private final Cache tables; + final Map functions; + final Map schemas; - PTable put(PTableKey key, PTableRef ref) { - PTableRef oldTableRef = tables.asMap().put(key, ref); - if (oldTableRef == null) { - return null; + public PMetaDataCache(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper) { + this.tables = + CacheBuilder.newBuilder().removalListener(new RemovalListener() { + @Override + public void onRemoval(RemovalNotification notification) { + String key = notification.getKey().toString(); + LOGGER.debug("Expiring " + key + " because of " + notification.getCause().name()); + if (notification.wasEvicted()) { + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_EVICTION_COUNTER.increment(); + } else { + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_REMOVAL_COUNTER.increment(); + } + if (notification.getValue() != null) { + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE + .update(-notification.getValue().getEstimatedSize()); + } } - return oldTableRef.getTable(); - } + }).maximumWeight(maxByteSize).weigher(new Weigher() { + @Override + public int weigh(PTableKey tableKey, PTableRef tableRef) { + if ( + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + .equals(SchemaUtil.getSchemaNameFromFullName(tableKey.getName())) + ) { + // Ensure there is always room for system tables + return 0; + } + return tableRef.getEstimatedSize(); + } + }).build(); + this.functions = new ConcurrentHashMap<>(initialCapacity); + this.schemas = new ConcurrentHashMap<>(initialCapacity); + this.timeKeeper = timeKeeper; + } - public long getAge(PTableRef ref) { - return timeKeeper.getCurrentTime() - ref.getCreateTime(); + public PTableRef get(PTableKey key) { + PTableRef tableAccess = this.tables.getIfPresent(key); + return tableAccess; + } + + PTable put(PTableKey key, PTableRef ref) { + PTableRef oldTableRef = tables.asMap().put(key, ref); + if (oldTableRef == null) { + return null; } - - public PTable remove(PTableKey key) { - PTableRef value = tables.getIfPresent(key); - tables.invalidate(key); - if (value == null) { - return null; - } - return value.getTable(); + return oldTableRef.getTable(); + } + + public long getAge(PTableRef ref) { + return timeKeeper.getCurrentTime() - ref.getCreateTime(); + } + + public PTable remove(PTableKey key) { + PTableRef value = tables.getIfPresent(key); + tables.invalidate(key); + if (value == null) { + return null; } - - public Iterator iterator() { - final Iterator iterator = this.tables.asMap().values().iterator(); - return new Iterator() { + return value.getTable(); + } - @Override - public boolean hasNext() { - return iterator.hasNext(); - } + public Iterator iterator() { + final Iterator iterator = this.tables.asMap().values().iterator(); + return new Iterator() { - @Override - public PTable next() { - return iterator.next().getTable(); - } + @Override + public boolean hasNext() { + return iterator.hasNext(); + } - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - }; - } + @Override + public PTable next() { + return iterator.next().getTable(); + } - public long size() { - return this.tables.size(); - } -} \ No newline at end of file + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } + + public long size() { + return this.tables.size(); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataEntity.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataEntity.java index 09017777346..246e082420e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataEntity.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataEntity.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,5 +18,5 @@ package org.apache.phoenix.schema; public interface PMetaDataEntity { - public int getEstimatedSize(); + public int getEstimatedSize(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java index 2029868dede..8c1677bdec2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,332 +23,348 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; -import java.util.function.Consumer; -import org.apache.phoenix.monitoring.GlobalClientMetrics; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.hbase.HConstants; +import org.apache.phoenix.monitoring.GlobalClientMetrics; import org.apache.phoenix.parse.PFunction; import org.apache.phoenix.parse.PSchema; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.TimeKeeper; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * Client-side cache of MetaData, not thread safe. Internally uses a LinkedHashMap that evicts the * oldest entries when size grows beyond the maxSize specified at create time. */ public class PMetaDataImpl implements PMetaData { - - private PMetaDataCache metaData; - private final TimeKeeper timeKeeper; - private final PTableRefFactory tableRefFactory; - private final long updateCacheFrequency; - private HashMap physicalNameToLogicalTableMap = new HashMap<>(); - - public PMetaDataImpl(int initialCapacity, long updateCacheFrequency, ReadOnlyProps props) { - this(initialCapacity, updateCacheFrequency, TimeKeeper.SYSTEM, props); - } - public PMetaDataImpl(int initialCapacity, long updateCacheFrequency, TimeKeeper timeKeeper, ReadOnlyProps props) { + private PMetaDataCache metaData; + private final TimeKeeper timeKeeper; + private final PTableRefFactory tableRefFactory; + private final long updateCacheFrequency; + private HashMap physicalNameToLogicalTableMap = new HashMap<>(); - this(new PMetaDataCache(initialCapacity, props.getLong( - QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE), timeKeeper), - timeKeeper, PTableRefFactory.getFactory(props), - updateCacheFrequency); - } + public PMetaDataImpl(int initialCapacity, long updateCacheFrequency, ReadOnlyProps props) { + this(initialCapacity, updateCacheFrequency, TimeKeeper.SYSTEM, props); + } + + public PMetaDataImpl(int initialCapacity, long updateCacheFrequency, TimeKeeper timeKeeper, + ReadOnlyProps props) { + + this( + new PMetaDataCache(initialCapacity, + props.getLong(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE), + timeKeeper), + timeKeeper, PTableRefFactory.getFactory(props), updateCacheFrequency); + } + + private PMetaDataImpl(PMetaDataCache metaData, TimeKeeper timeKeeper, + PTableRefFactory tableRefFactory, long updateCacheFrequency) { + this.timeKeeper = timeKeeper; + this.metaData = metaData; + this.tableRefFactory = tableRefFactory; + this.updateCacheFrequency = updateCacheFrequency; + } - private PMetaDataImpl(PMetaDataCache metaData, TimeKeeper timeKeeper, - PTableRefFactory tableRefFactory, long updateCacheFrequency) { - this.timeKeeper = timeKeeper; - this.metaData = metaData; - this.tableRefFactory = tableRefFactory; - this.updateCacheFrequency = updateCacheFrequency; + private void updateGlobalMetric(PTableRef pTableRef) { + if (pTableRef != null && pTableRef.getTable() != null) { + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_HIT_COUNTER.increment(); + } else { + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_MISS_COUNTER.increment(); } + } - private void updateGlobalMetric(PTableRef pTableRef) { - if (pTableRef != null && pTableRef.getTable() != null) { - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_HIT_COUNTER.increment(); - } - else { - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_MISS_COUNTER.increment(); - } + @Override + public PTableRef getTableRef(PTableKey key) throws TableNotFoundException { + if (physicalNameToLogicalTableMap.containsKey(key.getName())) { + key = physicalNameToLogicalTableMap.get(key.getName()); + } + PTableRef ref = metaData.get(key); + if (!key.getName().contains(QueryConstants.SYSTEM_SCHEMA_NAME)) { + updateGlobalMetric(ref); } + if (ref == null) { + throw new TableNotFoundException(key.getName()); + } + return ref; + } - @Override - public PTableRef getTableRef(PTableKey key) throws TableNotFoundException { - if (physicalNameToLogicalTableMap.containsKey(key.getName())) { - key = physicalNameToLogicalTableMap.get(key.getName()); - } - PTableRef ref = metaData.get(key); - if (!key.getName().contains(QueryConstants.SYSTEM_SCHEMA_NAME)) { - updateGlobalMetric(ref); - } - if (ref == null) { - throw new TableNotFoundException(key.getName()); - } - return ref; + @Override + public PFunction getFunction(PTableKey key) throws FunctionNotFoundException { + PFunction function = metaData.functions.get(key); + if (function == null) { + throw new FunctionNotFoundException(key.getName()); } + return function; + } + + @Override + public int size() { + return (int) metaData.size(); + } + + // TODO The tables with zero update cache frequency should not be inserted to the cache. However, + // Phoenix + // uses the cache as the temporary memory during all operations currently. When this behavior + // changes, we can use + // useMetaDataCache to determine if a table should be inserted to the cache. + private boolean useMetaDataCache(PTable table) { + return table.getType() == PTableType.SYSTEM || table.getUpdateCacheFrequency() != 0 + || updateCacheFrequency != 0; + } + + @Override + public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException { + metaData.put(table.getKey(), + tableRefFactory.makePTableRef(table, this.timeKeeper.getCurrentTime(), resolvedTimestamp)); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE + .update(table.getEstimatedSize()); + } - @Override - public PFunction getFunction(PTableKey key) throws FunctionNotFoundException { - PFunction function = metaData.functions.get(key); - if (function == null) { - throw new FunctionNotFoundException(key.getName()); + @Override + public void addTable(PTable table, long resolvedTime) throws SQLException { + PTableRef tableRef = + tableRefFactory.makePTableRef(table, this.timeKeeper.getCurrentTime(), resolvedTime); + PTableKey key = table.getKey(); + PTable newParentTable = null; + PTableRef newParentTableRef = null; + long parentResolvedTimestamp = resolvedTime; + if (table.getType() == PTableType.INDEX) { // Upsert new index table into parent data table list + String parentName = table.getParentName().getString(); + PTableRef oldParentRef = metaData.get(new PTableKey(table.getTenantId(), parentName)); + // If parentTable isn't cached, that's ok we can skip this + if (oldParentRef != null) { + List oldIndexes = oldParentRef.getTable().getIndexes(); + List newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size() + 1); + newIndexes.addAll(oldIndexes); + for (int i = 0; i < newIndexes.size(); i++) { + PTable index = newIndexes.get(i); + if (index.getName().equals(table.getName())) { + newIndexes.remove(i); + break; + } } - return function; + newIndexes.add(table); + newParentTable = PTableImpl + .builderWithColumns(oldParentRef.getTable(), getColumnsToClone(oldParentRef.getTable())) + .setIndexes(newIndexes).setTimeStamp(table.getTimeStamp()).build(); + newParentTableRef = tableRefFactory.makePTableRef(newParentTable, + this.timeKeeper.getCurrentTime(), parentResolvedTimestamp); + } } - @Override - public int size() { - return (int) metaData.size(); + if (newParentTable != null) { // Upsert new index table into parent data table list + metaData.put(newParentTable.getKey(), newParentTableRef); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE + .update(newParentTable.getEstimatedSize()); } - // TODO The tables with zero update cache frequency should not be inserted to the cache. However, Phoenix - // uses the cache as the temporary memory during all operations currently. When this behavior changes, we can use - // useMetaDataCache to determine if a table should be inserted to the cache. - private boolean useMetaDataCache(PTable table) { - return table.getType() == PTableType.SYSTEM - || table.getUpdateCacheFrequency() != 0 - || updateCacheFrequency != 0; + metaData.put(table.getKey(), tableRef); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE + .update(table.getEstimatedSize()); + + for (PTable index : table.getIndexes()) { + metaData.put(index.getKey(), + tableRefFactory.makePTableRef(index, this.timeKeeper.getCurrentTime(), resolvedTime)); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE + .update(index.getEstimatedSize()); } - @Override - public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException { - metaData.put(table.getKey(), tableRefFactory.makePTableRef(table, - this.timeKeeper.getCurrentTime(), resolvedTimestamp)); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE.update( - table.getEstimatedSize()); + if ( + table.getPhysicalName(true) != null + && !Strings.isNullOrEmpty(table.getPhysicalName(true).getString()) + && !table.getPhysicalName(true).getString().equals(table.getTableName().getString()) + ) { + String physicalTableName = table.getPhysicalName(true).getString() + .replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); + String physicalTableFullName = SchemaUtil.getTableName( + table.getSchemaName() != null ? table.getSchemaName().getString() : null, + physicalTableName); + this.physicalNameToLogicalTableMap.put(physicalTableFullName, key); } + } - @Override - public void addTable(PTable table, long resolvedTime) throws SQLException { - PTableRef tableRef = tableRefFactory.makePTableRef(table, this.timeKeeper.getCurrentTime(), resolvedTime); - PTableKey key = table.getKey(); - PTable newParentTable = null; - PTableRef newParentTableRef = null; - long parentResolvedTimestamp = resolvedTime; - if (table.getType() == PTableType.INDEX) { // Upsert new index table into parent data table list - String parentName = table.getParentName().getString(); - PTableRef oldParentRef = metaData.get(new PTableKey(table.getTenantId(), parentName)); - // If parentTable isn't cached, that's ok we can skip this - if (oldParentRef != null) { - List oldIndexes = oldParentRef.getTable().getIndexes(); - List newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size() + 1); - newIndexes.addAll(oldIndexes); - for (int i = 0; i < newIndexes.size(); i++) { - PTable index = newIndexes.get(i); - if (index.getName().equals(table.getName())) { - newIndexes.remove(i); - break; - } - } - newIndexes.add(table); - newParentTable = PTableImpl.builderWithColumns(oldParentRef.getTable(), - getColumnsToClone(oldParentRef.getTable())) - .setIndexes(newIndexes) - .setTimeStamp(table.getTimeStamp()) - .build(); - newParentTableRef = tableRefFactory.makePTableRef(newParentTable, this.timeKeeper.getCurrentTime(), parentResolvedTimestamp); + @Override + public void removeTable(PName tenantId, String tableName, String parentTableName, + long tableTimeStamp) throws SQLException { + PTableRef parentTableRef = null; + PTableKey key = new PTableKey(tenantId, tableName); + if (metaData.get(key) == null) { + if (parentTableName != null) { + parentTableRef = metaData.get(new PTableKey(tenantId, parentTableName)); + } + if (parentTableRef == null) { + return; + } + } else { + PTable table = metaData.remove(key); + for (PTable index : table.getIndexes()) { + metaData.remove(index.getKey()); + } + if (table.getParentName() != null) { + parentTableRef = metaData.get(new PTableKey(tenantId, table.getParentName().getString())); + } + } + // also remove its reference from parent table + if (parentTableRef != null) { + List oldIndexes = parentTableRef.getTable().getIndexes(); + if (oldIndexes != null && !oldIndexes.isEmpty()) { + List newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size()); + newIndexes.addAll(oldIndexes); + for (int i = 0; i < newIndexes.size(); i++) { + PTable index = newIndexes.get(i); + if (index.getName().getString().equals(tableName)) { + newIndexes.remove(i); + PTableImpl.Builder parentTableBuilder = + PTableImpl.builderWithColumns(parentTableRef.getTable(), + getColumnsToClone(parentTableRef.getTable())).setIndexes(newIndexes); + if (tableTimeStamp != HConstants.LATEST_TIMESTAMP) { + parentTableBuilder.setTimeStamp(tableTimeStamp); } - } - - if (newParentTable != null) { // Upsert new index table into parent data table list - metaData.put(newParentTable.getKey(), newParentTableRef); + PTable parentTable = parentTableBuilder.build(); + metaData.put(parentTable.getKey(), tableRefFactory.makePTableRef(parentTable, + this.timeKeeper.getCurrentTime(), parentTableRef.getResolvedTimeStamp())); GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE.update( - newParentTable.getEstimatedSize()); - } - metaData.put(table.getKey(), tableRef); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE.update( - table.getEstimatedSize()); - - for (PTable index : table.getIndexes()) { - metaData.put(index.getKey(), tableRefFactory.makePTableRef(index, this.timeKeeper.getCurrentTime(), resolvedTime)); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE.update( - index.getEstimatedSize()); - } - if (table.getPhysicalName(true) != null && - !Strings.isNullOrEmpty(table.getPhysicalName(true).getString()) && !table.getPhysicalName(true).getString().equals(table.getTableName().getString())) { - String physicalTableName = table.getPhysicalName(true).getString().replace( - QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); - String physicalTableFullName = SchemaUtil.getTableName(table.getSchemaName() != null ? table.getSchemaName().getString() : null, physicalTableName); - this.physicalNameToLogicalTableMap.put(physicalTableFullName, key); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE + .update(parentTable.getEstimatedSize()); + break; + } } + } } + } - @Override - public void removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException { - PTableRef parentTableRef = null; - PTableKey key = new PTableKey(tenantId, tableName); - if (metaData.get(key) == null) { - if (parentTableName != null) { - parentTableRef = metaData.get(new PTableKey(tenantId, parentTableName)); - } - if (parentTableRef == null) { - return; - } - } else { - PTable table = metaData.remove(key); - for (PTable index : table.getIndexes()) { - metaData.remove(index.getKey()); - } - if (table.getParentName() != null) { - parentTableRef = metaData.get(new PTableKey(tenantId, table.getParentName().getString())); - } - } - // also remove its reference from parent table - if (parentTableRef != null) { - List oldIndexes = parentTableRef.getTable().getIndexes(); - if(oldIndexes != null && !oldIndexes.isEmpty()) { - List newIndexes = Lists.newArrayListWithExpectedSize(oldIndexes.size()); - newIndexes.addAll(oldIndexes); - for (int i = 0; i < newIndexes.size(); i++) { - PTable index = newIndexes.get(i); - if (index.getName().getString().equals(tableName)) { - newIndexes.remove(i); - PTableImpl.Builder parentTableBuilder = - PTableImpl.builderWithColumns(parentTableRef.getTable(), - getColumnsToClone(parentTableRef.getTable())) - .setIndexes(newIndexes); - if (tableTimeStamp != HConstants.LATEST_TIMESTAMP) { - parentTableBuilder.setTimeStamp(tableTimeStamp); - } - PTable parentTable = parentTableBuilder.build(); - metaData.put(parentTable.getKey(), tableRefFactory.makePTableRef(parentTable, this.timeKeeper.getCurrentTime(), parentTableRef.getResolvedTimeStamp())); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE.update( - parentTable.getEstimatedSize()); - break; - } - } - } - } + @Override + public void removeColumn(PName tenantId, String tableName, List columnsToRemove, + long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException { + PTableRef tableRef = metaData.get(new PTableKey(tenantId, tableName)); + if (tableRef == null) { + return; } - - @Override - public void removeColumn(PName tenantId, String tableName, List columnsToRemove, long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException { - PTableRef tableRef = metaData.get(new PTableKey(tenantId, tableName)); - if (tableRef == null) { - return; - } - PTable table = tableRef.getTable(); - PMetaDataCache tables = metaData; - for (PColumn columnToRemove : columnsToRemove) { - PColumn column; - String familyName = columnToRemove.getFamilyName().getString(); - if (familyName == null) { - column = table.getPKColumn(columnToRemove.getName().getString()); - } else { - column = table.getColumnFamily(familyName).getPColumnForColumnName(columnToRemove.getName().getString()); - } - int positionOffset = 0; - int position = column.getPosition(); - List oldColumns = table.getColumns(); - if (table.getBucketNum() != null) { - position--; - positionOffset = 1; - oldColumns = oldColumns.subList(positionOffset, oldColumns.size()); - } - List columns = Lists.newArrayListWithExpectedSize(oldColumns.size() - 1); - columns.addAll(oldColumns.subList(0, position)); - // Update position of columns that follow removed column - for (int i = position+1; i < oldColumns.size(); i++) { - PColumn oldColumn = oldColumns.get(i); - PColumn newColumn = new PColumnImpl(oldColumn.getName(), oldColumn.getFamilyName(), oldColumn.getDataType(), oldColumn.getMaxLength(), oldColumn.getScale(), oldColumn.isNullable(), i-1+positionOffset, oldColumn.getSortOrder(), oldColumn.getArraySize(), oldColumn.getViewConstant(), oldColumn.isViewReferenced(), oldColumn.getExpressionStr(), oldColumn.isRowTimestamp(), oldColumn.isDynamic(), oldColumn.getColumnQualifierBytes(), - oldColumn.getTimestamp()); - columns.add(newColumn); - } - table = PTableImpl.builderWithColumns(table, columns) - .setTimeStamp(tableTimeStamp) - .setSequenceNumber(tableSeqNum) - .build(); - } - tables.put(table.getKey(), tableRefFactory.makePTableRef(table, this.timeKeeper.getCurrentTime(), resolvedTime)); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE.update( - table.getEstimatedSize()); + PTable table = tableRef.getTable(); + PMetaDataCache tables = metaData; + for (PColumn columnToRemove : columnsToRemove) { + PColumn column; + String familyName = columnToRemove.getFamilyName().getString(); + if (familyName == null) { + column = table.getPKColumn(columnToRemove.getName().getString()); + } else { + column = table.getColumnFamily(familyName) + .getPColumnForColumnName(columnToRemove.getName().getString()); + } + int positionOffset = 0; + int position = column.getPosition(); + List oldColumns = table.getColumns(); + if (table.getBucketNum() != null) { + position--; + positionOffset = 1; + oldColumns = oldColumns.subList(positionOffset, oldColumns.size()); + } + List columns = Lists.newArrayListWithExpectedSize(oldColumns.size() - 1); + columns.addAll(oldColumns.subList(0, position)); + // Update position of columns that follow removed column + for (int i = position + 1; i < oldColumns.size(); i++) { + PColumn oldColumn = oldColumns.get(i); + PColumn newColumn = new PColumnImpl(oldColumn.getName(), oldColumn.getFamilyName(), + oldColumn.getDataType(), oldColumn.getMaxLength(), oldColumn.getScale(), + oldColumn.isNullable(), i - 1 + positionOffset, oldColumn.getSortOrder(), + oldColumn.getArraySize(), oldColumn.getViewConstant(), oldColumn.isViewReferenced(), + oldColumn.getExpressionStr(), oldColumn.isRowTimestamp(), oldColumn.isDynamic(), + oldColumn.getColumnQualifierBytes(), oldColumn.getTimestamp()); + columns.add(newColumn); + } + table = PTableImpl.builderWithColumns(table, columns).setTimeStamp(tableTimeStamp) + .setSequenceNumber(tableSeqNum).build(); } + tables.put(table.getKey(), + tableRefFactory.makePTableRef(table, this.timeKeeper.getCurrentTime(), resolvedTime)); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE + .update(table.getEstimatedSize()); + } - @Override - public void pruneTables(Pruner pruner) { - List keysToPrune = Lists.newArrayListWithExpectedSize(this.size()); - for (PTable table : this) { - if (pruner.prune(table)) { - keysToPrune.add(table.getKey()); - } - } - if (!keysToPrune.isEmpty()) { - for (PTableKey key : keysToPrune) { - metaData.remove(key); - } - } + @Override + public void pruneTables(Pruner pruner) { + List keysToPrune = Lists.newArrayListWithExpectedSize(this.size()); + for (PTable table : this) { + if (pruner.prune(table)) { + keysToPrune.add(table.getKey()); + } } - - @Override - public Iterator iterator() { - return metaData.iterator(); + if (!keysToPrune.isEmpty()) { + for (PTableKey key : keysToPrune) { + metaData.remove(key); + } } + } - @Override - public void addFunction(PFunction function) throws SQLException { - this.metaData.functions.put(function.getKey(), function); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE.update( - function.getEstimatedSize()); - } + @Override + public Iterator iterator() { + return metaData.iterator(); + } - @Override - public void removeFunction(PName tenantId, String function, long functionTimeStamp) - throws SQLException { - this.metaData.functions.remove(new PTableKey(tenantId, function)); - } + @Override + public void addFunction(PFunction function) throws SQLException { + this.metaData.functions.put(function.getKey(), function); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE + .update(function.getEstimatedSize()); + } - @Override - public void pruneFunctions(Pruner pruner) { - List keysToPrune = Lists.newArrayListWithExpectedSize(this.size()); - for (PFunction function : this.metaData.functions.values()) { - if (pruner.prune(function)) { - keysToPrune.add(function.getKey()); - } - } - if (!keysToPrune.isEmpty()) { - for (PTableKey key : keysToPrune) { - metaData.functions.remove(key); - } - } - } + @Override + public void removeFunction(PName tenantId, String function, long functionTimeStamp) + throws SQLException { + this.metaData.functions.remove(new PTableKey(tenantId, function)); + } - @Override - public long getAge(PTableRef ref) { - return this.metaData.getAge(ref); + @Override + public void pruneFunctions(Pruner pruner) { + List keysToPrune = Lists.newArrayListWithExpectedSize(this.size()); + for (PFunction function : this.metaData.functions.values()) { + if (pruner.prune(function)) { + keysToPrune.add(function.getKey()); + } } - - @Override - public void addSchema(PSchema schema) throws SQLException { - this.metaData.schemas.put(schema.getSchemaKey(), schema); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); - GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE.update( - schema.getEstimatedSize()); + if (!keysToPrune.isEmpty()) { + for (PTableKey key : keysToPrune) { + metaData.functions.remove(key); + } } + } - @Override - public PSchema getSchema(PTableKey key) throws SchemaNotFoundException { - PSchema schema = metaData.schemas.get(key); - if (schema == null) { throw new SchemaNotFoundException(key.getName()); } - return schema; - } + @Override + public long getAge(PTableRef ref) { + return this.metaData.getAge(ref); + } + + @Override + public void addSchema(PSchema schema) throws SQLException { + this.metaData.schemas.put(schema.getSchemaKey(), schema); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ADD_COUNTER.increment(); + GlobalClientMetrics.GLOBAL_CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE + .update(schema.getEstimatedSize()); + } - @Override - public void removeSchema(PSchema schema, long schemaTimeStamp) { - this.metaData.schemas.remove(schema.getSchemaKey()); + @Override + public PSchema getSchema(PTableKey key) throws SchemaNotFoundException { + PSchema schema = metaData.schemas.get(key); + if (schema == null) { + throw new SchemaNotFoundException(key.getName()); } + return schema; + } + + @Override + public void removeSchema(PSchema schema, long schemaTimeStamp) { + this.metaData.schemas.remove(schema.getSchemaKey()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PName.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PName.java index 0e1337c5c86..43cc56167d6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PName.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PName.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,86 +21,79 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.util.ByteUtil; - /** - * - * Interface to encapsulate both the client-side name - * together with the server-side name for a named object - * - * + * Interface to encapsulate both the client-side name together with the server-side name for a named + * object * @since 0.1 */ public interface PName { - public static PName EMPTY_NAME = new PName() { - @Override - public String getString() { - return ""; - } + public static PName EMPTY_NAME = new PName() { + @Override + public String getString() { + return ""; + } + + @Override + public byte[] getBytes() { + return ByteUtil.EMPTY_BYTE_ARRAY; + } + + @Override + public String toString() { + return getString(); + } + + @Override + public ImmutableBytesPtr getBytesPtr() { + return ByteUtil.EMPTY_BYTE_ARRAY_PTR; + } + + @Override + public int getEstimatedSize() { + return 0; + } + }; + public static PName EMPTY_COLUMN_NAME = new PName() { + @Override + public String getString() { + return QueryConstants.EMPTY_COLUMN_NAME; + } + + @Override + public byte[] getBytes() { + return QueryConstants.EMPTY_COLUMN_BYTES; + } - @Override - public byte[] getBytes() { - return ByteUtil.EMPTY_BYTE_ARRAY; - } - - @Override - public String toString() { - return getString(); - } + @Override + public String toString() { + return getString(); + } - @Override - public ImmutableBytesPtr getBytesPtr() { - return ByteUtil.EMPTY_BYTE_ARRAY_PTR; - } + @Override + public ImmutableBytesPtr getBytesPtr() { + return QueryConstants.EMPTY_COLUMN_BYTES_PTR; + } - @Override - public int getEstimatedSize() { - return 0; - } - }; - public static PName EMPTY_COLUMN_NAME = new PName() { - @Override - public String getString() { - return QueryConstants.EMPTY_COLUMN_NAME; - } + @Override + public int getEstimatedSize() { + return 0; + } + }; - @Override - public byte[] getBytes() { - return QueryConstants.EMPTY_COLUMN_BYTES; - } - - @Override - public String toString() { - return getString(); - } + /** + * Get the client-side, normalized name as referenced in a SQL statement. + * @return the normalized string name + */ + String getString(); - @Override - public ImmutableBytesPtr getBytesPtr() { - return QueryConstants.EMPTY_COLUMN_BYTES_PTR; - } + /** + * Get the server-side name as referenced in HBase-related APIs such as Scan, Filter, etc. + * @return the name as a byte array + */ + byte[] getBytes(); - @Override - public int getEstimatedSize() { - return 0; - } - }; - /** - * Get the client-side, normalized name as referenced - * in a SQL statement. - * @return the normalized string name - */ - String getString(); - - /** - * Get the server-side name as referenced in HBase-related - * APIs such as Scan, Filter, etc. - * @return the name as a byte array - */ - byte[] getBytes(); + /** Returns a pointer to the underlying bytes */ + ImmutableBytesPtr getBytesPtr(); - /** - * @return a pointer to the underlying bytes - */ - ImmutableBytesPtr getBytesPtr(); - - int getEstimatedSize(); + int getEstimatedSize(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PNameFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PNameFactory.java index f7b869d2364..f8108bdd94b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PNameFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PNameFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema; import org.apache.hadoop.hbase.util.Bytes; @@ -23,35 +22,35 @@ import org.apache.phoenix.util.SchemaUtil; public class PNameFactory { - public static int getEstimatedSize(PName name) { - return name == null ? 0 : name.getEstimatedSize(); - } + public static int getEstimatedSize(PName name) { + return name == null ? 0 : name.getEstimatedSize(); + } - private PNameFactory() { - } + private PNameFactory() { + } - public static PName newNormalizedName(String name) { - return newName(SchemaUtil.normalizeIdentifier(name)); - } - - public static PName newName(String name) { - return name == null || name.isEmpty() ? PName.EMPTY_NAME : - name.equals(QueryConstants.EMPTY_COLUMN_NAME ) ? PName.EMPTY_COLUMN_NAME : - new PNameImpl(name); - } - - public static PName newName(byte[] bytes) { - return bytes == null || bytes.length == 0 ? PName.EMPTY_NAME : - Bytes.compareTo(bytes, QueryConstants.EMPTY_COLUMN_BYTES) == 0 ? PName.EMPTY_COLUMN_NAME : - new PNameImpl(bytes); - } + public static PName newNormalizedName(String name) { + return newName(SchemaUtil.normalizeIdentifier(name)); + } + + public static PName newName(String name) { + return name == null || name.isEmpty() ? PName.EMPTY_NAME + : name.equals(QueryConstants.EMPTY_COLUMN_NAME) ? PName.EMPTY_COLUMN_NAME + : new PNameImpl(name); + } + + public static PName newName(byte[] bytes) { + return bytes == null || bytes.length == 0 ? PName.EMPTY_NAME + : Bytes.compareTo(bytes, QueryConstants.EMPTY_COLUMN_BYTES) == 0 ? PName.EMPTY_COLUMN_NAME + : new PNameImpl(bytes); + } - public static PName newName(byte[] bytes, int offset, int length) { - if (bytes == null || length == 0) { - return PName.EMPTY_NAME; - } - byte[] buf = new byte[length]; - System.arraycopy(bytes, offset, buf, 0, length); - return new PNameImpl(buf); + public static PName newName(byte[] bytes, int offset, int length) { + if (bytes == null || length == 0) { + return PName.EMPTY_NAME; } + byte[] buf = new byte[length]; + System.arraycopy(bytes, offset, buf, 0, length); + return new PNameImpl(buf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PNameImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PNameImpl.java index bccf7bf5422..27c1eab1cd6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PNameImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PNameImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,94 +17,94 @@ */ package org.apache.phoenix.schema; -import net.jcip.annotations.Immutable; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.util.SizedUtil; +import net.jcip.annotations.Immutable; + @Immutable public class PNameImpl implements PName { + /** + */ + private static class PNameImplData { + /** */ + public String stringName; + /** */ + public byte[] bytesName; + /** */ + public volatile ImmutableBytesPtr ptr; + /** + * */ - private static class PNameImplData { - /** */ - public String stringName; - /** */ - public byte[] bytesName; - /** */ - public volatile ImmutableBytesPtr ptr; - - /** - * - */ - public PNameImplData() { - } + public PNameImplData() { } - private PNameImplData data = new PNameImplData(); + } + private PNameImplData data = new PNameImplData(); - @Override - public int getEstimatedSize() { - return SizedUtil.OBJECT_SIZE * 3 + SizedUtil.ARRAY_SIZE + SizedUtil.IMMUTABLE_BYTES_PTR_SIZE + - data.stringName.length() * SizedUtil.CHAR_SIZE + data.bytesName.length; - } + @Override + public int getEstimatedSize() { + return SizedUtil.OBJECT_SIZE * 3 + SizedUtil.ARRAY_SIZE + SizedUtil.IMMUTABLE_BYTES_PTR_SIZE + + data.stringName.length() * SizedUtil.CHAR_SIZE + data.bytesName.length; + } - PNameImpl(String name) { - this.data.stringName = name; - this.data.bytesName = Bytes.toBytes(name); - } + PNameImpl(String name) { + this.data.stringName = name; + this.data.bytesName = Bytes.toBytes(name); + } - PNameImpl(byte[] name) { - this.data.stringName = Bytes.toString(name); - this.data.bytesName = name; - } + PNameImpl(byte[] name) { + this.data.stringName = Bytes.toString(name); + this.data.bytesName = name; + } - @Override - public String getString() { - return data.stringName; - } + @Override + public String getString() { + return data.stringName; + } - @Override - public byte[] getBytes() { - return data.bytesName; - } + @Override + public byte[] getBytes() { + return data.bytesName; + } - @Override - public ImmutableBytesPtr getBytesPtr() { + @Override + public ImmutableBytesPtr getBytesPtr() { + if (data.ptr == null) { + synchronized (data.bytesName) { if (data.ptr == null) { - synchronized (data.bytesName) { - if (data.ptr == null) { - this.data.ptr = new ImmutableBytesPtr(data.bytesName); - } - } + this.data.ptr = new ImmutableBytesPtr(data.bytesName); } - return this.data.ptr; + } } + return this.data.ptr; + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + data.stringName.hashCode(); - return result; - } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + data.stringName.hashCode(); + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (! (obj instanceof PName) ) return false; - PName other = (PName)obj; - if (hashCode() != other.hashCode()) return false; - // Compare normalized stringName for equality, since bytesName - // may differ since it remains case sensitive. - if (!getString().equals(other.getString())) return false; - return true; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (!(obj instanceof PName)) return false; + PName other = (PName) obj; + if (hashCode() != other.hashCode()) return false; + // Compare normalized stringName for equality, since bytesName + // may differ since it remains case sensitive. + if (!getString().equals(other.getString())) return false; + return true; + } - @Override - public String toString() { - return data.stringName; - } + @Override + public String toString() { + return data.stringName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PRow.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PRow.java index 1e9379fad63..ad52c89e435 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PRow.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PRow.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,67 +21,57 @@ import java.util.Map; import org.apache.hadoop.hbase.client.Mutation; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; /** - * - * Provide a client API for updating rows. The updates are processed in - * the calling order. Calling setValue after calling delete will cause the - * delete to be canceled. Conversely, calling delete after calling - * setValue will cause all prior setValue calls to be canceled. - * - * + * Provide a client API for updating rows. The updates are processed in the calling order. Calling + * setValue after calling delete will cause the delete to be canceled. Conversely, calling delete + * after calling setValue will cause all prior setValue calls to be canceled. * @since 0.1 */ public interface PRow { - Map DELETE_MARKER = ImmutableMap.of(); + Map DELETE_MARKER = ImmutableMap.of(); + + /** + * Get the list of {@link org.apache.hadoop.hbase.client.Mutation} used to update an HTable after + * all mutations through calls to {@link #setValue(PColumn, byte[])} or {@link #delete()}. + * @return the list of mutations representing all changes made to a row + * @throws ConstraintViolationException if row data violates schema constraint + */ + public List toRowMutations(); - /** - * Get the list of {@link org.apache.hadoop.hbase.client.Mutation} used to - * update an HTable after all mutations through calls to - * {@link #setValue(PColumn, byte[])} or {@link #delete()}. - * @return the list of mutations representing all changes made to a row - * @throws ConstraintViolationException if row data violates schema - * constraint - */ - public List toRowMutations(); - - /** - * Set a column value in the row - * @param col the column for which the value is being set - * @param value the value - * @throws ConstraintViolationException if row data violates schema - * constraint - */ - public void setValue(PColumn col, byte[] value); + /** + * Set a column value in the row + * @param col the column for which the value is being set + * @param value the value + * @throws ConstraintViolationException if row data violates schema constraint + */ + public void setValue(PColumn col, byte[] value); - /** - * Set attributes for the Put operations involving dynamic columns. These attributes are - * persisted as cells under a reserved qualifier for the dynamic column metadata so that we - * can resolve them for wildcard queries without requiring the user to provide the data type - * of the dynamic columns. See PHOENIX-374 - * @return true if attributes for dynamic columns are added, otherwise false - */ - public boolean setAttributesForDynamicColumnsIfReqd(); + /** + * Set attributes for the Put operations involving dynamic columns. These attributes are persisted + * as cells under a reserved qualifier for the dynamic column metadata so that we can resolve them + * for wildcard queries without requiring the user to provide the data type of the dynamic + * columns. See PHOENIX-374 + * @return true if attributes for dynamic columns are added, otherwise false + */ + public boolean setAttributesForDynamicColumnsIfReqd(); - /** - * Set an attribute to indicate that we must process dynamic column metadata for the mutation. - * This is set if the configuration for supporting dynamic columns in wildcard queries is on - * and there are actually dynamic columns for which we need to add metadata. - * In case of old clients or for clients where this configuration is off, or for clients where - * this configuration is on and there are no dynamic columns to process in the mutation, this - * attribute will not be set. - * If this attribute is not set, we can avoid unnecessary iterations over each mutation's - * column families. See - * {@link org.apache.phoenix.coprocessor.ScanRegionObserver#preBatchMutate(ObserverContext, - * MiniBatchOperationInProgress)} - */ - public void setAttributeToProcessDynamicColumnsMetadata(); + /** + * Set an attribute to indicate that we must process dynamic column metadata for the mutation. + * This is set if the configuration for supporting dynamic columns in wildcard queries is on and + * there are actually dynamic columns for which we need to add metadata. In case of old clients or + * for clients where this configuration is off, or for clients where this configuration is on and + * there are no dynamic columns to process in the mutation, this attribute will not be set. If + * this attribute is not set, we can avoid unnecessary iterations over each mutation's column + * families. See + * {@link org.apache.phoenix.coprocessor.ScanRegionObserver#preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} + */ + public void setAttributeToProcessDynamicColumnsMetadata(); - /** - * Delete the row. Note that a delete take precedence over any - * values that may have been set before or after the delete call. - */ - public void delete(); + /** + * Delete the row. Note that a delete take precedence over any values that may have been set + * before or after the delete call. + */ + public void delete(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PSynchronizedMetaData.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PSynchronizedMetaData.java index 42c76540dc5..b2bffe31217 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PSynchronizedMetaData.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PSynchronizedMetaData.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,192 +30,176 @@ public class PSynchronizedMetaData implements PMetaData { - @GuardedBy("readWriteLock") - private PMetaData delegate; - private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - - public PSynchronizedMetaData(PMetaData metadata) { - this.delegate = metadata; - } - - @Override - public Iterator iterator() { - readWriteLock.readLock().lock(); - try { - return delegate.iterator(); - } - finally { - readWriteLock.readLock().unlock(); - } - } - - @Override - public int size() { - readWriteLock.readLock().lock(); - try { - return delegate.size(); - } - finally { - readWriteLock.readLock().unlock(); - } - } - - @Override - public void addTable(PTable table, long resolvedTime) throws SQLException { - readWriteLock.writeLock().lock(); - try { - delegate.addTable(table, resolvedTime); - } - finally { - readWriteLock.writeLock().unlock(); - } - } - - @Override - public PTableRef getTableRef(PTableKey key) throws TableNotFoundException { - readWriteLock.readLock().lock(); - try { - return delegate.getTableRef(key); - } - finally { - readWriteLock.readLock().unlock(); - } - } - - @Override - public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException { - readWriteLock.writeLock().lock(); - try { - delegate.updateResolvedTimestamp(table, resolvedTimestamp); - } - finally { - readWriteLock.writeLock().unlock(); - } - } - - @Override - public void pruneTables(Pruner pruner) { - readWriteLock.writeLock().lock(); - try { - delegate.pruneTables(pruner); - } - finally { - readWriteLock.writeLock().unlock(); - } - } - - @Override - public PFunction getFunction(PTableKey key) throws FunctionNotFoundException { - readWriteLock.readLock().lock(); - try { - return delegate.getFunction(key); - } - finally { - readWriteLock.readLock().unlock(); - } - } - - @Override - public void removeTable(PName tenantId, String tableName, String parentTableName, - long tableTimeStamp) throws SQLException { - readWriteLock.writeLock().lock(); - try { - delegate.removeTable(tenantId, tableName, parentTableName, tableTimeStamp); - } - finally { - readWriteLock.writeLock().unlock(); - } - } - - @Override - public void pruneFunctions(Pruner pruner) { - readWriteLock.writeLock().lock(); - try { - delegate.pruneFunctions(pruner); - } - finally { - readWriteLock.writeLock().unlock(); - } - } - - @Override - public long getAge(PTableRef ref) { - readWriteLock.readLock().lock(); - try { - return delegate.getAge(ref); - } - finally { - readWriteLock.readLock().unlock(); - } - } - - @Override - public PSchema getSchema(PTableKey key) throws SchemaNotFoundException { - readWriteLock.readLock().lock(); - try { - return delegate.getSchema(key); - } - finally { - readWriteLock.readLock().unlock(); - } - } - - @Override - public void removeColumn(PName tenantId, String tableName, List columnsToRemove, - long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException { - readWriteLock.writeLock().lock(); - try { - delegate.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, - resolvedTime); - } - finally { - readWriteLock.writeLock().unlock(); - } - } - - @Override - public void addFunction(PFunction function) throws SQLException { - readWriteLock.writeLock().lock(); - try { - delegate.addFunction(function); - } - finally { - readWriteLock.writeLock().unlock(); - } - } - - @Override - public void removeFunction(PName tenantId, String function, long functionTimeStamp) - throws SQLException { - readWriteLock.writeLock().lock(); - try { - delegate.removeFunction(tenantId, function, functionTimeStamp); - } - finally { - readWriteLock.writeLock().unlock(); - } - } - - @Override - public void addSchema(PSchema schema) throws SQLException { - readWriteLock.writeLock().lock(); - try { - delegate.addSchema(schema); - } - finally { - readWriteLock.writeLock().unlock(); - } - } - - @Override - public void removeSchema(PSchema schema, long schemaTimeStamp) { - readWriteLock.writeLock().lock(); - try { - delegate.removeSchema(schema, schemaTimeStamp); - } - finally { - readWriteLock.writeLock().unlock(); - } - } + @GuardedBy("readWriteLock") + private PMetaData delegate; + private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + + public PSynchronizedMetaData(PMetaData metadata) { + this.delegate = metadata; + } + + @Override + public Iterator iterator() { + readWriteLock.readLock().lock(); + try { + return delegate.iterator(); + } finally { + readWriteLock.readLock().unlock(); + } + } + + @Override + public int size() { + readWriteLock.readLock().lock(); + try { + return delegate.size(); + } finally { + readWriteLock.readLock().unlock(); + } + } + + @Override + public void addTable(PTable table, long resolvedTime) throws SQLException { + readWriteLock.writeLock().lock(); + try { + delegate.addTable(table, resolvedTime); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + @Override + public PTableRef getTableRef(PTableKey key) throws TableNotFoundException { + readWriteLock.readLock().lock(); + try { + return delegate.getTableRef(key); + } finally { + readWriteLock.readLock().unlock(); + } + } + + @Override + public void updateResolvedTimestamp(PTable table, long resolvedTimestamp) throws SQLException { + readWriteLock.writeLock().lock(); + try { + delegate.updateResolvedTimestamp(table, resolvedTimestamp); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + @Override + public void pruneTables(Pruner pruner) { + readWriteLock.writeLock().lock(); + try { + delegate.pruneTables(pruner); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + @Override + public PFunction getFunction(PTableKey key) throws FunctionNotFoundException { + readWriteLock.readLock().lock(); + try { + return delegate.getFunction(key); + } finally { + readWriteLock.readLock().unlock(); + } + } + + @Override + public void removeTable(PName tenantId, String tableName, String parentTableName, + long tableTimeStamp) throws SQLException { + readWriteLock.writeLock().lock(); + try { + delegate.removeTable(tenantId, tableName, parentTableName, tableTimeStamp); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + @Override + public void pruneFunctions(Pruner pruner) { + readWriteLock.writeLock().lock(); + try { + delegate.pruneFunctions(pruner); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + @Override + public long getAge(PTableRef ref) { + readWriteLock.readLock().lock(); + try { + return delegate.getAge(ref); + } finally { + readWriteLock.readLock().unlock(); + } + } + + @Override + public PSchema getSchema(PTableKey key) throws SchemaNotFoundException { + readWriteLock.readLock().lock(); + try { + return delegate.getSchema(key); + } finally { + readWriteLock.readLock().unlock(); + } + } + + @Override + public void removeColumn(PName tenantId, String tableName, List columnsToRemove, + long tableTimeStamp, long tableSeqNum, long resolvedTime) throws SQLException { + readWriteLock.writeLock().lock(); + try { + delegate.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum, + resolvedTime); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + @Override + public void addFunction(PFunction function) throws SQLException { + readWriteLock.writeLock().lock(); + try { + delegate.addFunction(function); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + @Override + public void removeFunction(PName tenantId, String function, long functionTimeStamp) + throws SQLException { + readWriteLock.writeLock().lock(); + try { + delegate.removeFunction(tenantId, function, functionTimeStamp); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + @Override + public void addSchema(PSchema schema) throws SQLException { + readWriteLock.writeLock().lock(); + try { + delegate.addSchema(schema); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + @Override + public void removeSchema(PSchema schema, long schemaTimeStamp) { + readWriteLock.writeLock().lock(); + try { + delegate.removeSchema(schema, schemaTimeStamp); + } finally { + readWriteLock.writeLock().unlock(); + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTable.java index 4ace717a06f..0d74f5b2945 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTable.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,8 @@ */ package org.apache.phoenix.schema; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.phoenix.util.EncodedColumnsUtil.isReservedColumnQualifier; import java.io.DataOutputStream; @@ -46,1070 +46,1079 @@ import org.apache.phoenix.schema.types.PArrayDataTypeEncoder; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.util.TrustedByteArrayOutputStream; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; - - /** * Definition of a Phoenix table - * - * * @since 0.1 */ public interface PTable extends PMetaDataEntity { - public static final long INITIAL_SEQ_NUM = 0; - public static final String IS_IMMUTABLE_ROWS_PROP_NAME = "IMMUTABLE_ROWS"; - public static final boolean DEFAULT_DISABLE_WAL = false; - public static final boolean DEFAULT_IMMUTABLE_ROWS = false; - static final Integer NO_SALTING = -1; - - public enum ViewType { - MAPPED((byte)1), - READ_ONLY((byte)2), - UPDATABLE((byte)3); - - private final byte[] byteValue; - private final byte serializedValue; - - ViewType(byte serializedValue) { - this.serializedValue = serializedValue; - this.byteValue = Bytes.toBytes(this.name()); - } + public static final long INITIAL_SEQ_NUM = 0; + public static final String IS_IMMUTABLE_ROWS_PROP_NAME = "IMMUTABLE_ROWS"; + public static final boolean DEFAULT_DISABLE_WAL = false; + public static final boolean DEFAULT_IMMUTABLE_ROWS = false; + static final Integer NO_SALTING = -1; + + public enum ViewType { + MAPPED((byte) 1), + READ_ONLY((byte) 2), + UPDATABLE((byte) 3); + + private final byte[] byteValue; + private final byte serializedValue; + + ViewType(byte serializedValue) { + this.serializedValue = serializedValue; + this.byteValue = Bytes.toBytes(this.name()); + } - public byte[] getBytes() { - return byteValue; - } + public byte[] getBytes() { + return byteValue; + } - public boolean isReadOnly() { - return this != UPDATABLE; - } + public boolean isReadOnly() { + return this != UPDATABLE; + } - public byte getSerializedValue() { - return this.serializedValue; - } + public byte getSerializedValue() { + return this.serializedValue; + } - public static ViewType fromSerializedValue(byte serializedValue) { - if (serializedValue < 1 || serializedValue > ViewType.values().length) { - throw new IllegalArgumentException("Invalid ViewType " + serializedValue); - } - return ViewType.values()[serializedValue-1]; - } + public static ViewType fromSerializedValue(byte serializedValue) { + if (serializedValue < 1 || serializedValue > ViewType.values().length) { + throw new IllegalArgumentException("Invalid ViewType " + serializedValue); + } + return ViewType.values()[serializedValue - 1]; + } - public ViewType combine(ViewType otherType) { - if (otherType == null) { - return this; - } - if (this == UPDATABLE && otherType == UPDATABLE) { - return UPDATABLE; - } - return READ_ONLY; - } + public ViewType combine(ViewType otherType) { + if (otherType == null) { + return this; + } + if (this == UPDATABLE && otherType == UPDATABLE) { + return UPDATABLE; + } + return READ_ONLY; } + } - public enum IndexType { - GLOBAL((byte)1), // Covered Global - LOCAL((byte)2), // Covered Local - UNCOVERED_GLOBAL((byte)3); // Uncovered Global + public enum IndexType { + GLOBAL((byte) 1), // Covered Global + LOCAL((byte) 2), // Covered Local + UNCOVERED_GLOBAL((byte) 3); // Uncovered Global - private final byte[] byteValue; - private final byte serializedValue; + private final byte[] byteValue; + private final byte serializedValue; - IndexType(byte serializedValue) { - this.serializedValue = serializedValue; - this.byteValue = Bytes.toBytes(this.name()); - } + IndexType(byte serializedValue) { + this.serializedValue = serializedValue; + this.byteValue = Bytes.toBytes(this.name()); + } - public byte[] getBytes() { - return byteValue; - } + public byte[] getBytes() { + return byteValue; + } - public byte getSerializedValue() { - return this.serializedValue; - } + public byte getSerializedValue() { + return this.serializedValue; + } - public static IndexType getDefault() { - return GLOBAL; - } + public static IndexType getDefault() { + return GLOBAL; + } - public static IndexType fromToken(String token) { - return IndexType.valueOf(token.trim().toUpperCase()); - } + public static IndexType fromToken(String token) { + return IndexType.valueOf(token.trim().toUpperCase()); + } - public static IndexType fromSerializedValue(byte serializedValue) { - if (serializedValue < 1 || serializedValue > IndexType.values().length) { - throw new IllegalArgumentException("Invalid IndexType " + serializedValue); - } - return IndexType.values()[serializedValue-1]; - } + public static IndexType fromSerializedValue(byte serializedValue) { + if (serializedValue < 1 || serializedValue > IndexType.values().length) { + throw new IllegalArgumentException("Invalid IndexType " + serializedValue); + } + return IndexType.values()[serializedValue - 1]; } + } - public enum LinkType { - /** - * Link from a table to its index table - */ - INDEX_TABLE((byte)1), - /** - * Link from a view or index to its physical table - */ - PHYSICAL_TABLE((byte)2), - /** - * Link from a view to its parent table - */ - PARENT_TABLE((byte)3), - /** - * Link from a parent table to its child view - * (these are stored in SYSTEM.CHILD_LINK for scalability) - */ - CHILD_TABLE((byte)4), - /** - * Link for an excluded (dropped) column - */ - EXCLUDED_COLUMN((byte)5), - /** - * Link from an index on a view to its parent table - */ - VIEW_INDEX_PARENT_TABLE((byte)6), - /** - * Link from the old table to the new transforming table - */ - TRANSFORMING_NEW_TABLE((byte)7); - - private final byte[] byteValue; - private final byte serializedValue; - private final byte[] serializedByteArrayValue; - - LinkType(byte serializedValue) { - this.serializedValue = serializedValue; - this.byteValue = Bytes.toBytes(this.name()); - this.serializedByteArrayValue = new byte[] { serializedValue }; - } + public enum LinkType { + /** + * Link from a table to its index table + */ + INDEX_TABLE((byte) 1), + /** + * Link from a view or index to its physical table + */ + PHYSICAL_TABLE((byte) 2), + /** + * Link from a view to its parent table + */ + PARENT_TABLE((byte) 3), + /** + * Link from a parent table to its child view (these are stored in SYSTEM.CHILD_LINK for + * scalability) + */ + CHILD_TABLE((byte) 4), + /** + * Link for an excluded (dropped) column + */ + EXCLUDED_COLUMN((byte) 5), + /** + * Link from an index on a view to its parent table + */ + VIEW_INDEX_PARENT_TABLE((byte) 6), + /** + * Link from the old table to the new transforming table + */ + TRANSFORMING_NEW_TABLE((byte) 7); - public byte[] getBytes() { - return byteValue; - } + private final byte[] byteValue; + private final byte serializedValue; + private final byte[] serializedByteArrayValue; - public byte getSerializedValue() { - return this.serializedValue; - } + LinkType(byte serializedValue) { + this.serializedValue = serializedValue; + this.byteValue = Bytes.toBytes(this.name()); + this.serializedByteArrayValue = new byte[] { serializedValue }; + } - public byte[] getSerializedValueAsByteArray() { - return serializedByteArrayValue; - } + public byte[] getBytes() { + return byteValue; + } - public static LinkType fromSerializedValue(byte serializedValue) { - if (serializedValue < 1 || serializedValue > LinkType.values().length) { - return null; - } - return LinkType.values()[serializedValue-1]; - } + public byte getSerializedValue() { + return this.serializedValue; } - public enum TaskType { - DROP_CHILD_VIEWS((byte)1), - INDEX_REBUILD((byte)2), - TRANSFORM_MONITOR((byte)3); + public byte[] getSerializedValueAsByteArray() { + return serializedByteArrayValue; + } - private final byte[] byteValue; - private final byte serializedValue; + public static LinkType fromSerializedValue(byte serializedValue) { + if (serializedValue < 1 || serializedValue > LinkType.values().length) { + return null; + } + return LinkType.values()[serializedValue - 1]; + } + } - TaskType(byte serializedValue) { - this.serializedValue = serializedValue; - this.byteValue = Bytes.toBytes(this.name()); - } + public enum TaskType { + DROP_CHILD_VIEWS((byte) 1), + INDEX_REBUILD((byte) 2), + TRANSFORM_MONITOR((byte) 3); - public byte[] getBytes() { - return byteValue; - } + private final byte[] byteValue; + private final byte serializedValue; - public byte getSerializedValue() { - return this.serializedValue; - } - public static TaskType getDefault() { - return DROP_CHILD_VIEWS; - } - public static TaskType fromSerializedValue(byte serializedValue) { - if (serializedValue < 1 || serializedValue > TaskType.values().length) { - throw new IllegalArgumentException("Invalid TaskType " + serializedValue); - } - return TaskType.values()[serializedValue-1]; - } + TaskType(byte serializedValue) { + this.serializedValue = serializedValue; + this.byteValue = Bytes.toBytes(this.name()); } - public enum TaskStatus { - CREATED { - public String toString() { - return "CREATED"; - } - }, - STARTED { - public String toString() { - return "STARTED"; - } - }, - COMPLETED { - public String toString() { - return "COMPLETED"; - } - }, - FAILED { - public String toString() { - return "FAILED"; - } - }, - RETRY { - public String toString() { - return "RETRY"; - } - }, + public byte[] getBytes() { + return byteValue; } - public enum TransformType { - METADATA_TRANSFORM((byte)1), - METADATA_TRANSFORM_PARTIAL((byte)2); + public byte getSerializedValue() { + return this.serializedValue; + } - private final byte[] byteValue; - private final int serializedValue; + public static TaskType getDefault() { + return DROP_CHILD_VIEWS; + } - TransformType(int serializedValue) { - this.serializedValue = serializedValue; - this.byteValue = Bytes.toBytes(this.name()); - } + public static TaskType fromSerializedValue(byte serializedValue) { + if (serializedValue < 1 || serializedValue > TaskType.values().length) { + throw new IllegalArgumentException("Invalid TaskType " + serializedValue); + } + return TaskType.values()[serializedValue - 1]; + } + } + + public enum TaskStatus { + CREATED { + public String toString() { + return "CREATED"; + } + }, + STARTED { + public String toString() { + return "STARTED"; + } + }, + COMPLETED { + public String toString() { + return "COMPLETED"; + } + }, + FAILED { + public String toString() { + return "FAILED"; + } + }, + RETRY { + public String toString() { + return "RETRY"; + } + }, + } + + public enum TransformType { + METADATA_TRANSFORM((byte) 1), + METADATA_TRANSFORM_PARTIAL((byte) 2); + + private final byte[] byteValue; + private final int serializedValue; + + TransformType(int serializedValue) { + this.serializedValue = serializedValue; + this.byteValue = Bytes.toBytes(this.name()); + } - public byte[] getBytes() { - return byteValue; - } + public byte[] getBytes() { + return byteValue; + } - public int getSerializedValue() { - return this.serializedValue; - } - public static TransformType getDefault() { - return METADATA_TRANSFORM; - } - public static TransformType fromSerializedValue(int serializedValue) { - if (serializedValue < 1 || serializedValue > TransformType.values().length) { - throw new IllegalArgumentException("Invalid TransformType " + serializedValue); - } - return TransformType.values()[serializedValue-1]; - } - public static TransformType getPartialTransform(TransformType transformType) { - if (transformType == METADATA_TRANSFORM) { - return METADATA_TRANSFORM_PARTIAL; - } - return null; - } - public static boolean isPartialTransform(TransformType transformType){ - List partials = new ArrayList<>(); - partials.add(PTable.TransformType.METADATA_TRANSFORM_PARTIAL); - return partials.contains(transformType); - } + public int getSerializedValue() { + return this.serializedValue; } - public enum TransformStatus { - CREATED { - public String toString() { - return "CREATED"; - } - }, - STARTED { - public String toString() { - return "STARTED"; - } - }, - PENDING_CUTOVER { - public String toString() { - return "PENDING_CUTOVER"; - } - }, - COMPLETED { - public String toString() { - return "COMPLETED"; - } - }, - FAILED { - public String toString() { - return "FAILED"; - } - }, - PAUSED { - public String toString() { - return "PAUSED"; - } - }, + public static TransformType getDefault() { + return METADATA_TRANSFORM; } - public enum ImmutableStorageScheme implements ColumnValueEncoderDecoderSupplier { - ONE_CELL_PER_COLUMN((byte)1) { - @Override - public ColumnValueEncoder getEncoder(int numElements) { - throw new UnsupportedOperationException(); - } - - @Override - public ColumnValueDecoder getDecoder() { - throw new UnsupportedOperationException(); - } - }, - // stores a single cell per column family that contains all serialized column values - SINGLE_CELL_ARRAY_WITH_OFFSETS((byte)2, PArrayDataType.IMMUTABLE_SERIALIZATION_V2) { - @Override - public ColumnValueEncoder getEncoder(int numElements) { - PDataType type = PVarbinary.INSTANCE; - int estimatedSize = PArrayDataType.estimateSize(numElements, type); - TrustedByteArrayOutputStream byteStream = new TrustedByteArrayOutputStream(estimatedSize); - DataOutputStream oStream = new DataOutputStream(byteStream); - return new PArrayDataTypeEncoder(byteStream, oStream, numElements, type, SortOrder.ASC, false, getSerializationVersion()); - } - - @Override - public ColumnValueDecoder getDecoder() { - return new PArrayDataTypeDecoder(); - } - }; - - private final byte serializedValue; - private byte serializationVersion; - - private ImmutableStorageScheme(byte serializedValue) { - this.serializedValue = serializedValue; - } + public static TransformType fromSerializedValue(int serializedValue) { + if (serializedValue < 1 || serializedValue > TransformType.values().length) { + throw new IllegalArgumentException("Invalid TransformType " + serializedValue); + } + return TransformType.values()[serializedValue - 1]; + } - private ImmutableStorageScheme(byte serializedValue, byte serializationVersion) { - this.serializedValue = serializedValue; - this.serializationVersion = serializationVersion; - } + public static TransformType getPartialTransform(TransformType transformType) { + if (transformType == METADATA_TRANSFORM) { + return METADATA_TRANSFORM_PARTIAL; + } + return null; + } - public byte getSerializedMetadataValue() { - return this.serializedValue; - } + public static boolean isPartialTransform(TransformType transformType) { + List partials = new ArrayList<>(); + partials.add(PTable.TransformType.METADATA_TRANSFORM_PARTIAL); + return partials.contains(transformType); + } + } + + public enum TransformStatus { + CREATED { + public String toString() { + return "CREATED"; + } + }, + STARTED { + public String toString() { + return "STARTED"; + } + }, + PENDING_CUTOVER { + public String toString() { + return "PENDING_CUTOVER"; + } + }, + COMPLETED { + public String toString() { + return "COMPLETED"; + } + }, + FAILED { + public String toString() { + return "FAILED"; + } + }, + PAUSED { + public String toString() { + return "PAUSED"; + } + }, + } + + public enum ImmutableStorageScheme implements ColumnValueEncoderDecoderSupplier { + ONE_CELL_PER_COLUMN((byte) 1) { + @Override + public ColumnValueEncoder getEncoder(int numElements) { + throw new UnsupportedOperationException(); + } + + @Override + public ColumnValueDecoder getDecoder() { + throw new UnsupportedOperationException(); + } + }, + // stores a single cell per column family that contains all serialized column values + SINGLE_CELL_ARRAY_WITH_OFFSETS((byte) 2, PArrayDataType.IMMUTABLE_SERIALIZATION_V2) { + @Override + public ColumnValueEncoder getEncoder(int numElements) { + PDataType type = PVarbinary.INSTANCE; + int estimatedSize = PArrayDataType.estimateSize(numElements, type); + TrustedByteArrayOutputStream byteStream = new TrustedByteArrayOutputStream(estimatedSize); + DataOutputStream oStream = new DataOutputStream(byteStream); + return new PArrayDataTypeEncoder(byteStream, oStream, numElements, type, SortOrder.ASC, + false, getSerializationVersion()); + } + + @Override + public ColumnValueDecoder getDecoder() { + return new PArrayDataTypeDecoder(); + } + }; + + private final byte serializedValue; + private byte serializationVersion; + + private ImmutableStorageScheme(byte serializedValue) { + this.serializedValue = serializedValue; + } - public byte getSerializationVersion() { - return this.serializationVersion; - } + private ImmutableStorageScheme(byte serializedValue, byte serializationVersion) { + this.serializedValue = serializedValue; + this.serializationVersion = serializationVersion; + } - @VisibleForTesting - void setSerializationVersion(byte serializationVersion) { - this.serializationVersion = serializationVersion; - } + public byte getSerializedMetadataValue() { + return this.serializedValue; + } - public static ImmutableStorageScheme fromSerializedValue(byte serializedValue) { - if (serializedValue < 1 || serializedValue > ImmutableStorageScheme.values().length) { - return null; - } - return ImmutableStorageScheme.values()[serializedValue-1]; - } + public byte getSerializationVersion() { + return this.serializationVersion; + } + @VisibleForTesting + void setSerializationVersion(byte serializationVersion) { + this.serializationVersion = serializationVersion; } - - interface ColumnValueEncoderDecoderSupplier { - ColumnValueEncoder getEncoder(int numElements); - ColumnValueDecoder getDecoder(); + + public static ImmutableStorageScheme fromSerializedValue(byte serializedValue) { + if (serializedValue < 1 || serializedValue > ImmutableStorageScheme.values().length) { + return null; + } + return ImmutableStorageScheme.values()[serializedValue - 1]; } - - public enum QualifierEncodingScheme implements QualifierEncoderDecoder { - NON_ENCODED_QUALIFIERS((byte)0, null) { - @Override - public byte[] encode(int value) { - throw new UnsupportedOperationException(); - } - - @Override - public int decode(byte[] bytes) { - throw new UnsupportedOperationException(); - } - - @Override - public int decode(byte[] bytes, int offset, int length) { - throw new UnsupportedOperationException(); - } - - @Override - public String toString() { - return name(); - } - }, - ONE_BYTE_QUALIFIERS((byte)1, 255) { - private final int c = Math.abs(Byte.MIN_VALUE); - - @Override - public byte[] encode(int value) { - if (isReservedColumnQualifier(value)) { - return FOUR_BYTE_QUALIFIERS.encode(value); - } - if (value < 0 || value > maxQualifier) { - throw new QualifierOutOfRangeException(0, maxQualifier); - } - return new byte[]{(byte)(value - c)}; - } - - @Override - public int decode(byte[] bytes) { - if (bytes.length == 4) { - return getReservedQualifier(bytes); - } - if (bytes.length != 1) { - throw new InvalidQualifierBytesException(1, bytes.length); - } - return bytes[0] + c; - } - - @Override - public int decode(byte[] bytes, int offset, int length) { - if (length == 4) { - return getReservedQualifier(bytes, offset, length); - } - if (length != 1) { - throw new InvalidQualifierBytesException(1, length); - } - return bytes[offset] + c; - } - - @Override - public String toString() { - return name(); - } - }, - TWO_BYTE_QUALIFIERS((byte)2, 65535) { - private final int c = Math.abs(Short.MIN_VALUE); - - @Override - public byte[] encode(int value) { - if (isReservedColumnQualifier(value)) { - return FOUR_BYTE_QUALIFIERS.encode(value); - } - if (value < 0 || value > maxQualifier) { - throw new QualifierOutOfRangeException(0, maxQualifier); - } - return Bytes.toBytes((short)(value - c)); - } - - @Override - public int decode(byte[] bytes) { - if (bytes.length == 4) { - return getReservedQualifier(bytes); - } - if (bytes.length != 2) { - throw new InvalidQualifierBytesException(2, bytes.length); - } - return Bytes.toShort(bytes) + c; - } - - @Override - public int decode(byte[] bytes, int offset, int length) { - if (length == 4) { - return getReservedQualifier(bytes, offset, length); - } - if (length != 2) { - throw new InvalidQualifierBytesException(2, length); - } - return Bytes.toShort(bytes, offset, length) + c; - } - - @Override - public String toString() { - return name(); - } - }, - THREE_BYTE_QUALIFIERS((byte)3, 16777215) { - @Override - public byte[] encode(int value) { - if (isReservedColumnQualifier(value)) { - return FOUR_BYTE_QUALIFIERS.encode(value); - } - if (value < 0 || value > maxQualifier) { - throw new QualifierOutOfRangeException(0, maxQualifier); - } - byte[] arr = Bytes.toBytes(value); - return new byte[]{arr[1], arr[2], arr[3]}; - } - - @Override - public int decode(byte[] bytes) { - if (bytes.length == 4) { - return getReservedQualifier(bytes); - } - if (bytes.length != 3) { - throw new InvalidQualifierBytesException(2, bytes.length); - } - byte[] toReturn = new byte[4]; - toReturn[1] = bytes[0]; - toReturn[2] = bytes[1]; - toReturn[3] = bytes[2]; - return Bytes.toInt(toReturn); - } - - @Override - public int decode(byte[] bytes, int offset, int length) { - if (length == 4) { - return getReservedQualifier(bytes, offset, length); - } - if (length != 3) { - throw new InvalidQualifierBytesException(3, length); - } - byte[] toReturn = new byte[4]; - toReturn[1] = bytes[offset]; - toReturn[2] = bytes[offset + 1]; - toReturn[3] = bytes[offset + 2]; - return Bytes.toInt(toReturn); - } - - @Override - public String toString() { - return name(); - } - }, - FOUR_BYTE_QUALIFIERS((byte)4, Integer.MAX_VALUE) { - @Override - public byte[] encode(int value) { - if (value < 0) { - throw new QualifierOutOfRangeException(0, maxQualifier); - } - return Bytes.toBytes(value); - } - - @Override - public int decode(byte[] bytes) { - if (bytes.length != 4) { - throw new InvalidQualifierBytesException(4, bytes.length); - } - return Bytes.toInt(bytes); - } - - @Override - public int decode(byte[] bytes, int offset, int length) { - if (length != 4) { - throw new InvalidQualifierBytesException(4, length); - } - return Bytes.toInt(bytes, offset, length); - } - - @Override - public String toString() { - return name(); - } - }; - - final byte metadataValue; - final Integer maxQualifier; - - public byte getSerializedMetadataValue() { - return this.metadataValue; + + } + + interface ColumnValueEncoderDecoderSupplier { + ColumnValueEncoder getEncoder(int numElements); + + ColumnValueDecoder getDecoder(); + } + + public enum QualifierEncodingScheme implements QualifierEncoderDecoder { + NON_ENCODED_QUALIFIERS((byte) 0, null) { + @Override + public byte[] encode(int value) { + throw new UnsupportedOperationException(); + } + + @Override + public int decode(byte[] bytes) { + throw new UnsupportedOperationException(); + } + + @Override + public int decode(byte[] bytes, int offset, int length) { + throw new UnsupportedOperationException(); + } + + @Override + public String toString() { + return name(); + } + }, + ONE_BYTE_QUALIFIERS((byte) 1, 255) { + private final int c = Math.abs(Byte.MIN_VALUE); + + @Override + public byte[] encode(int value) { + if (isReservedColumnQualifier(value)) { + return FOUR_BYTE_QUALIFIERS.encode(value); + } + if (value < 0 || value > maxQualifier) { + throw new QualifierOutOfRangeException(0, maxQualifier); } + return new byte[] { (byte) (value - c) }; + } - public static QualifierEncodingScheme fromSerializedValue(byte serializedValue) { - if (serializedValue < 0 || serializedValue >= QualifierEncodingScheme.values().length) { - return null; - } - return QualifierEncodingScheme.values()[serializedValue]; + @Override + public int decode(byte[] bytes) { + if (bytes.length == 4) { + return getReservedQualifier(bytes); } - - @Override - public Integer getMaxQualifier() { - return maxQualifier; + if (bytes.length != 1) { + throw new InvalidQualifierBytesException(1, bytes.length); } + return bytes[0] + c; + } - private QualifierEncodingScheme(byte serializedMetadataValue, Integer maxQualifier) { - this.metadataValue = serializedMetadataValue; - this.maxQualifier = maxQualifier; + @Override + public int decode(byte[] bytes, int offset, int length) { + if (length == 4) { + return getReservedQualifier(bytes, offset, length); } - - @VisibleForTesting - public static class QualifierOutOfRangeException extends RuntimeException { - public QualifierOutOfRangeException(int minQualifier, int maxQualifier) { - super("Qualifier out of range (" + minQualifier + ", " + maxQualifier + ")"); - } + if (length != 1) { + throw new InvalidQualifierBytesException(1, length); } - - @VisibleForTesting - public static class InvalidQualifierBytesException extends RuntimeException { - public InvalidQualifierBytesException(int expectedLength, int actualLength) { - super("Invalid number of qualifier bytes. Expected length: " + expectedLength + ". Actual: " + actualLength); - } + return bytes[offset] + c; + } + + @Override + public String toString() { + return name(); + } + }, + TWO_BYTE_QUALIFIERS((byte) 2, 65535) { + private final int c = Math.abs(Short.MIN_VALUE); + + @Override + public byte[] encode(int value) { + if (isReservedColumnQualifier(value)) { + return FOUR_BYTE_QUALIFIERS.encode(value); } + if (value < 0 || value > maxQualifier) { + throw new QualifierOutOfRangeException(0, maxQualifier); + } + return Bytes.toBytes((short) (value - c)); + } - /** - * We generate our column qualifiers in the reserved range 0-10 using the FOUR_BYTE_QUALIFIERS - * encoding. When adding Cells corresponding to the reserved qualifiers to the - * EncodedColumnQualifierCells list, we need to make sure that we use the FOUR_BYTE_QUALIFIERS - * scheme to decode the correct int value. - */ - private static int getReservedQualifier(byte[] bytes) { - checkArgument(bytes.length == 4); - int number = FOUR_BYTE_QUALIFIERS.decode(bytes); - if (!isReservedColumnQualifier(number)) { - throw new InvalidQualifierBytesException(4, bytes.length); - } - return number; + @Override + public int decode(byte[] bytes) { + if (bytes.length == 4) { + return getReservedQualifier(bytes); + } + if (bytes.length != 2) { + throw new InvalidQualifierBytesException(2, bytes.length); + } + return Bytes.toShort(bytes) + c; + } + + @Override + public int decode(byte[] bytes, int offset, int length) { + if (length == 4) { + return getReservedQualifier(bytes, offset, length); + } + if (length != 2) { + throw new InvalidQualifierBytesException(2, length); + } + return Bytes.toShort(bytes, offset, length) + c; + } + + @Override + public String toString() { + return name(); + } + }, + THREE_BYTE_QUALIFIERS((byte) 3, 16777215) { + @Override + public byte[] encode(int value) { + if (isReservedColumnQualifier(value)) { + return FOUR_BYTE_QUALIFIERS.encode(value); + } + if (value < 0 || value > maxQualifier) { + throw new QualifierOutOfRangeException(0, maxQualifier); + } + byte[] arr = Bytes.toBytes(value); + return new byte[] { arr[1], arr[2], arr[3] }; + } + + @Override + public int decode(byte[] bytes) { + if (bytes.length == 4) { + return getReservedQualifier(bytes); + } + if (bytes.length != 3) { + throw new InvalidQualifierBytesException(2, bytes.length); + } + byte[] toReturn = new byte[4]; + toReturn[1] = bytes[0]; + toReturn[2] = bytes[1]; + toReturn[3] = bytes[2]; + return Bytes.toInt(toReturn); + } + + @Override + public int decode(byte[] bytes, int offset, int length) { + if (length == 4) { + return getReservedQualifier(bytes, offset, length); + } + if (length != 3) { + throw new InvalidQualifierBytesException(3, length); + } + byte[] toReturn = new byte[4]; + toReturn[1] = bytes[offset]; + toReturn[2] = bytes[offset + 1]; + toReturn[3] = bytes[offset + 2]; + return Bytes.toInt(toReturn); + } + + @Override + public String toString() { + return name(); + } + }, + FOUR_BYTE_QUALIFIERS((byte) 4, Integer.MAX_VALUE) { + @Override + public byte[] encode(int value) { + if (value < 0) { + throw new QualifierOutOfRangeException(0, maxQualifier); } + return Bytes.toBytes(value); + } - /** - * We generate our column qualifiers in the reserved range 0-10 using the FOUR_BYTE_QUALIFIERS - * encoding. When adding Cells corresponding to the reserved qualifiers to the - * EncodedColumnQualifierCells list, we need to make sure that we use the FOUR_BYTE_QUALIFIERS - * scheme to decode the correct int value. - */ - private static int getReservedQualifier(byte[] bytes, int offset, int length) { - checkArgument(length == 4); - int number = FOUR_BYTE_QUALIFIERS.decode(bytes, offset, length); - if (!isReservedColumnQualifier(number)) { - throw new InvalidQualifierBytesException(4, length); - } - return number; + @Override + public int decode(byte[] bytes) { + if (bytes.length != 4) { + throw new InvalidQualifierBytesException(4, bytes.length); } + return Bytes.toInt(bytes); + } + + @Override + public int decode(byte[] bytes, int offset, int length) { + if (length != 4) { + throw new InvalidQualifierBytesException(4, length); + } + return Bytes.toInt(bytes, offset, length); + } + + @Override + public String toString() { + return name(); + } + }; + + final byte metadataValue; + final Integer maxQualifier; + + public byte getSerializedMetadataValue() { + return this.metadataValue; } - - interface QualifierEncoderDecoder { - byte[] encode(int value); - int decode(byte[] bytes); - int decode(byte[] bytes, int offset, int length); - Integer getMaxQualifier(); + + public static QualifierEncodingScheme fromSerializedValue(byte serializedValue) { + if (serializedValue < 0 || serializedValue >= QualifierEncodingScheme.values().length) { + return null; + } + return QualifierEncodingScheme.values()[serializedValue]; } - long getTimeStamp(); - long getSequenceNumber(); - long getIndexDisableTimestamp(); + @Override + public Integer getMaxQualifier() { + return maxQualifier; + } - boolean isIndexStateDisabled(); + private QualifierEncodingScheme(byte serializedMetadataValue, Integer maxQualifier) { + this.metadataValue = serializedMetadataValue; + this.maxQualifier = maxQualifier; + } + + @VisibleForTesting + public static class QualifierOutOfRangeException extends RuntimeException { + public QualifierOutOfRangeException(int minQualifier, int maxQualifier) { + super("Qualifier out of range (" + minQualifier + ", " + maxQualifier + ")"); + } + } + + @VisibleForTesting + public static class InvalidQualifierBytesException extends RuntimeException { + public InvalidQualifierBytesException(int expectedLength, int actualLength) { + super("Invalid number of qualifier bytes. Expected length: " + expectedLength + ". Actual: " + + actualLength); + } + } /** - * @return table name + * We generate our column qualifiers in the reserved range 0-10 using the FOUR_BYTE_QUALIFIERS + * encoding. When adding Cells corresponding to the reserved qualifiers to the + * EncodedColumnQualifierCells list, we need to make sure that we use the FOUR_BYTE_QUALIFIERS + * scheme to decode the correct int value. */ - PName getName(); - PName getSchemaName(); - PName getTableName(); - PName getTenantId(); + private static int getReservedQualifier(byte[] bytes) { + checkArgument(bytes.length == 4); + int number = FOUR_BYTE_QUALIFIERS.decode(bytes); + if (!isReservedColumnQualifier(number)) { + throw new InvalidQualifierBytesException(4, bytes.length); + } + return number; + } /** - * @return the table type + * We generate our column qualifiers in the reserved range 0-10 using the FOUR_BYTE_QUALIFIERS + * encoding. When adding Cells corresponding to the reserved qualifiers to the + * EncodedColumnQualifierCells list, we need to make sure that we use the FOUR_BYTE_QUALIFIERS + * scheme to decode the correct int value. */ - PTableType getType(); + private static int getReservedQualifier(byte[] bytes, int offset, int length) { + checkArgument(length == 4); + int number = FOUR_BYTE_QUALIFIERS.decode(bytes, offset, length); + if (!isReservedColumnQualifier(number)) { + throw new InvalidQualifierBytesException(4, length); + } + return number; + } + } - PName getPKName(); + interface QualifierEncoderDecoder { + byte[] encode(int value); + + int decode(byte[] bytes); + + int decode(byte[] bytes, int offset, int length); + + Integer getMaxQualifier(); + } + + long getTimeStamp(); + + long getSequenceNumber(); + + long getIndexDisableTimestamp(); + + boolean isIndexStateDisabled(); + + /** Returns table name */ + PName getName(); + + PName getSchemaName(); + + PName getTableName(); + + PName getTenantId(); + + /** Returns the table type */ + PTableType getType(); + + PName getPKName(); + + /** + * Get the PK columns ordered by position. + * @return a list of the PK columns + */ + List getPKColumns(); + + /** + * Get all columns ordered by position. + * @return a list of all columns + */ + List getColumns(); + + /** + * Get all excluded columns + * @return a list of excluded columns + */ + List getExcludedColumns(); + + /** Returns A list of the column families of this table ordered by position. */ + List getColumnFamilies(); + + /** + * Return true if the table only has pk columns and no non-pk columns. + * @return true if the table only has pk columns and no non-pk columns. + */ + boolean hasOnlyPkColumns(); + + /** + * Get the column family with the given name + * @param family the column family name + * @return the PColumnFamily with the given name + * @throws ColumnFamilyNotFoundException if the column family cannot be found + */ + PColumnFamily getColumnFamily(byte[] family) throws ColumnFamilyNotFoundException; + + PColumnFamily getColumnFamily(String family) throws ColumnFamilyNotFoundException; + + /** + * Get the column with the given string name. + * @param name the column name + * @return the PColumn with the given name + * @throws ColumnNotFoundException if no column with the given name can be found + * @throws AmbiguousColumnException if multiple columns are found with the given name + */ + PColumn getColumnForColumnName(String name) + throws ColumnNotFoundException, AmbiguousColumnException; + + /** + * Get the column with the given column qualifier. + * @param cf column family bytes + * @param cq qualifier bytes + * @return the PColumn with the given column qualifier + * @throws ColumnNotFoundException if no column with the given column qualifier can be found + * @throws AmbiguousColumnException if multiple columns are found with the given column qualifier + */ + PColumn getColumnForColumnQualifier(byte[] cf, byte[] cq) + throws ColumnNotFoundException, AmbiguousColumnException; + + /** + * Get the PK column with the given name. + * @param name the column name + * @return the PColumn with the given name + * @throws ColumnNotFoundException if no PK column with the given name can be found + */ + PColumn getPKColumn(String name) throws ColumnNotFoundException; + + /** + * Creates a new row at the specified timestamp using the key for the PK values (from + * {@link #newKey(ImmutableBytesWritable, byte[][])} and the optional key values specified using + * values. + * @param ts the timestamp that the key value will have when committed + * @param key the row key of the key value + * @param hasOnDupKey true if row has an ON DUPLICATE KEY clause and false otherwise. + * @param values the optional key values + * @return the new row. Use {@link org.apache.phoenix.schema.PRow#toRowMutations()} to generate + * the Row to send to the HBase server. + * @throws ConstraintViolationException if row data violates schema constraint + */ + PRow newRow(KeyValueBuilder builder, long ts, ImmutableBytesWritable key, boolean hasOnDupKey, + byte[]... values); + + /** + * Creates a new row for the PK values (from {@link #newKey(ImmutableBytesWritable, byte[][])} and + * the optional key values specified using values. The timestamp of the key value will be set by + * the HBase server. + * @param key the row key of the key value + * @param hasOnDupKey true if row has an ON DUPLICATE KEY clause and false otherwise. + * @param values the optional key values + * @return the new row. Use {@link org.apache.phoenix.schema.PRow#toRowMutations()} to generate + * the row to send to the HBase server. + * @throws ConstraintViolationException if row data violates schema constraint + */ + PRow newRow(KeyValueBuilder builder, ImmutableBytesWritable key, boolean hasOnDupKey, + byte[]... values); + + /** + * Formulates a row key using the values provided. The values must be in the same order as + * {@link #getPKColumns()}. + * @param key bytes pointer that will be filled in with the row key + * @param values the PK column values + * @return the number of values that were used from values to set the row key + */ + int newKey(ImmutableBytesWritable key, byte[][] values); + + RowKeySchema getRowKeySchema(); + + /** + * Return the number of buckets used by this table for salting. If the table does not use salting, + * returns null. + * @return number of buckets used by this table for salting, or null if salting is not used. + */ + Integer getBucketNum(); + + /** + * Return the list of indexes defined on this table. + * @return the list of indexes. + */ + List getIndexes(); + + /** + * Return the new version of the table if it is going through transform. + * @return the new table. + */ + PTable getTransformingNewTable(); + + /** + * For a table of index type, return the state of the table. + * @return the state of the index. + */ + PIndexState getIndexState(); + + /** + * @return the full name of the parent view for a view or data table for an index table or null if + * this is not a view or index table. Also returns null for a view of a data table + * (use @getPhysicalName for this case) + */ + PName getParentName(); + + /** + * @return the table name of the parent view for a view or data table for an index table or null + * if this is not a view or index table. Also returns null for a view of a data table + * (use @getPhysicalTableName for this case) + */ + PName getParentTableName(); + + /** + * @return the logical full name of the base table. In case of the view index, it is the + * _IDX_+logical name of base table Ex: For hierarchical views like tableLogicalName --> + * view1 --> view2, for view2, returns sc.tableLogicalName For view2, getParentTableName + * returns view1 and getBaseTableLogicalName returns sc.tableLogicalName + */ + PName getBaseTableLogicalName(); + + /** + * @return the schema name of the parent view for a view or data table for an index table or null + * if this is not a view or index table. Also returns null for view of a data table + * (use @getPhysicalSchemaName for this case) + */ + PName getParentSchemaName(); + + /** + * For a view, return the name of table in Phoenix that physically stores data. Currently a single + * name, but when views are allowed over multiple tables, will become multi-valued. + * @return the name of the physical table storing the data. + */ + public List getPhysicalNames(); + + /** + * For a view, return the name of table in HBase that physically stores data. + * @return the name of the physical HBase table storing the data. + */ + PName getPhysicalName(); + + /** + * If returnColValueFromSyscat is true, returns the column value set in the syscat. Otherwise, + * behaves like getPhysicalName() + * @return the name of the physical HBase table storing the data. + */ + PName getPhysicalName(boolean returnColValueFromSyscat); - /** - * Get the PK columns ordered by position. - * @return a list of the PK columns - */ - List getPKColumns(); + boolean isImmutableRows(); + + boolean getIndexMaintainers(ImmutableBytesWritable ptr, PhoenixConnection connection) + throws SQLException; + + IndexMaintainer getIndexMaintainer(PTable dataTable, PhoenixConnection connection) + throws SQLException; - /** - * Get all columns ordered by position. - * @return a list of all columns - */ - List getColumns(); - - /** - * Get all excluded columns - * @return a list of excluded columns - */ - List getExcludedColumns(); + IndexMaintainer getIndexMaintainer(PTable dataTable, PTable cdcTable, + PhoenixConnection connection) throws SQLException; - /** - * @return A list of the column families of this table - * ordered by position. - */ - List getColumnFamilies(); + TransformMaintainer getTransformMaintainer(PTable oldTable, PhoenixConnection connection); - /** - * Return true if the table only has pk columns and no non-pk columns. - * - * @return true if the table only has pk columns and no non-pk columns. - */ - boolean hasOnlyPkColumns(); + PName getDefaultFamilyName(); - /** - * Get the column family with the given name - * @param family the column family name - * @return the PColumnFamily with the given name - * @throws ColumnFamilyNotFoundException if the column family cannot be found - */ - PColumnFamily getColumnFamily(byte[] family) throws ColumnFamilyNotFoundException; + boolean isWALDisabled(); - PColumnFamily getColumnFamily(String family) throws ColumnFamilyNotFoundException; + boolean isMultiTenant(); - /** - * Get the column with the given string name. - * @param name the column name - * @return the PColumn with the given name - * @throws ColumnNotFoundException if no column with the given name - * can be found - * @throws AmbiguousColumnException if multiple columns are found with the given name - */ - PColumn getColumnForColumnName(String name) throws ColumnNotFoundException, AmbiguousColumnException; - - /** - * Get the column with the given column qualifier. - * @param cf column family bytes - * @param cq qualifier bytes - * @return the PColumn with the given column qualifier - * @throws ColumnNotFoundException if no column with the given column qualifier can be found - * @throws AmbiguousColumnException if multiple columns are found with the given column qualifier - */ - PColumn getColumnForColumnQualifier(byte[] cf, byte[] cq) throws ColumnNotFoundException, AmbiguousColumnException; - - /** - * Get the PK column with the given name. - * @param name the column name - * @return the PColumn with the given name - * @throws ColumnNotFoundException if no PK column with the given name - * can be found - * @throws ColumnNotFoundException - */ - PColumn getPKColumn(String name) throws ColumnNotFoundException; + boolean getStoreNulls(); - /** - * Creates a new row at the specified timestamp using the key - * for the PK values (from {@link #newKey(ImmutableBytesWritable, byte[][])} - * and the optional key values specified using values. - * @param ts the timestamp that the key value will have when committed - * @param key the row key of the key value - * @param hasOnDupKey true if row has an ON DUPLICATE KEY clause and false otherwise. - * @param values the optional key values - * @return the new row. Use {@link org.apache.phoenix.schema.PRow#toRowMutations()} to - * generate the Row to send to the HBase server. - * @throws ConstraintViolationException if row data violates schema - * constraint - */ - PRow newRow(KeyValueBuilder builder, long ts, ImmutableBytesWritable key, boolean hasOnDupKey, byte[]... values); + boolean isTransactional(); - /** - * Creates a new row for the PK values (from {@link #newKey(ImmutableBytesWritable, byte[][])} - * and the optional key values specified using values. The timestamp of the key value - * will be set by the HBase server. - * @param key the row key of the key value - * @param hasOnDupKey true if row has an ON DUPLICATE KEY clause and false otherwise. - * @param values the optional key values - * @return the new row. Use {@link org.apache.phoenix.schema.PRow#toRowMutations()} to - * generate the row to send to the HBase server. - * @throws ConstraintViolationException if row data violates schema - * constraint - */ - PRow newRow(KeyValueBuilder builder, ImmutableBytesWritable key, boolean hasOnDupKey, byte[]... values); + TransactionFactory.Provider getTransactionProvider(); - /** - * Formulates a row key using the values provided. The values must be in - * the same order as {@link #getPKColumns()}. - * @param key bytes pointer that will be filled in with the row key - * @param values the PK column values - * @return the number of values that were used from values to set - * the row key - */ - int newKey(ImmutableBytesWritable key, byte[][] values); + ViewType getViewType(); - RowKeySchema getRowKeySchema(); + String getViewStatement(); - /** - * Return the number of buckets used by this table for salting. If the table does - * not use salting, returns null. - * @return number of buckets used by this table for salting, or null if salting is not used. - */ - Integer getBucketNum(); + Long getViewIndexId(); - /** - * Return the list of indexes defined on this table. - * @return the list of indexes. - */ - List getIndexes(); + PDataType getviewIndexIdType(); - /** - * Return the new version of the table if it is going through transform. - * @return the new table. - */ - PTable getTransformingNewTable(); + PTableKey getKey(); - /** - * For a table of index type, return the state of the table. - * @return the state of the index. - */ - PIndexState getIndexState(); + IndexType getIndexType(); - /** - * @return the full name of the parent view for a view or data table for an index table - * or null if this is not a view or index table. Also returns null for a view of a data table - * (use @getPhysicalName for this case) - */ - PName getParentName(); - /** - * @return the table name of the parent view for a view or data table for an index table - * or null if this is not a view or index table. Also returns null for a view of a data table - * (use @getPhysicalTableName for this case) - */ - PName getParentTableName(); + int getBaseColumnCount(); - /** - * @return the logical full name of the base table. In case of the view index, it is the _IDX_+logical name of base table - * Ex: For hierarchical views like tableLogicalName --> view1 --> view2, for view2, returns sc.tableLogicalName - * For view2, getParentTableName returns view1 and getBaseTableLogicalName returns sc.tableLogicalName - */ - PName getBaseTableLogicalName(); + /** + * Determines whether or not we may optimize out an ORDER BY or do a GROUP BY in-place when the + * optimizer tells us it's possible. This is due to PHOENIX-2067 and only applicable for tables + * using DESC primary key column(s) which have not been upgraded. + * @return true if optimizations row key order optimizations are possible + */ + boolean rowKeyOrderOptimizable(); - /** - * @return the schema name of the parent view for a view or data table for an index table - * or null if this is not a view or index table. Also returns null for view of a data table - * (use @getPhysicalSchemaName for this case) - */ - PName getParentSchemaName(); + /** + * @return Position of the column with {@link PColumn#isRowTimestamp()} as true. -1 if there is no + * such column. + */ + int getRowTimestampColPos(); - /** - * For a view, return the name of table in Phoenix that physically stores data. - * Currently a single name, but when views are allowed over multiple tables, will become multi-valued. - * @return the name of the physical table storing the data. - */ - public List getPhysicalNames(); + long getUpdateCacheFrequency(); - /** - * For a view, return the name of table in HBase that physically stores data. - * @return the name of the physical HBase table storing the data. - */ - PName getPhysicalName(); - /** - * If returnColValueFromSyscat is true, returns the column value set in the syscat. - * Otherwise, behaves like getPhysicalName() - * @return the name of the physical HBase table storing the data. - */ - PName getPhysicalName(boolean returnColValueFromSyscat); - - boolean isImmutableRows(); - boolean getIndexMaintainers(ImmutableBytesWritable ptr, PhoenixConnection connection) - throws SQLException; - IndexMaintainer getIndexMaintainer(PTable dataTable, PhoenixConnection connection) - throws SQLException; - IndexMaintainer getIndexMaintainer(PTable dataTable, PTable cdcTable, - PhoenixConnection connection) throws SQLException; - TransformMaintainer getTransformMaintainer(PTable oldTable, PhoenixConnection connection); - PName getDefaultFamilyName(); - - boolean isWALDisabled(); - boolean isMultiTenant(); - boolean getStoreNulls(); - boolean isTransactional(); - TransactionFactory.Provider getTransactionProvider(); - - ViewType getViewType(); - String getViewStatement(); - Long getViewIndexId(); - PDataType getviewIndexIdType(); - PTableKey getKey(); - - IndexType getIndexType(); - int getBaseColumnCount(); + boolean isNamespaceMapped(); - /** - * Determines whether or not we may optimize out an ORDER BY or do a GROUP BY - * in-place when the optimizer tells us it's possible. This is due to PHOENIX-2067 - * and only applicable for tables using DESC primary key column(s) which have - * not been upgraded. - * @return true if optimizations row key order optimizations are possible - */ - boolean rowKeyOrderOptimizable(); - - /** - * @return Position of the column with {@link PColumn#isRowTimestamp()} as true. - * -1 if there is no such column. - */ - int getRowTimestampColPos(); - long getUpdateCacheFrequency(); - boolean isNamespaceMapped(); + /** + * @return The sequence name used to get the unique identifier for views that are automatically + * partitioned. + */ + String getAutoPartitionSeqName(); - /** - * @return The sequence name used to get the unique identifier for views - * that are automatically partitioned. - */ - String getAutoPartitionSeqName(); - - /** - * @return true if the you can only add (and never delete) columns to the table, - * you are also not allowed to delete the table - */ - boolean isAppendOnlySchema(); - ImmutableStorageScheme getImmutableStorageScheme(); - QualifierEncodingScheme getEncodingScheme(); - EncodedCQCounter getEncodedCQCounter(); - Boolean useStatsForParallelization(); - boolean hasViewModifiedUpdateCacheFrequency(); - boolean hasViewModifiedUseStatsForParallelization(); - Map getPropertyValues(); - Map getDefaultPropertyValues(); + /** + * @return true if the you can only add (and never delete) columns to the table, you are also not + * allowed to delete the table + */ + boolean isAppendOnlySchema(); - /** - * @return The TTL duration associated with the entity when Phoenix level TTL is enabled. - */ - int getTTL(); + ImmutableStorageScheme getImmutableStorageScheme(); - /** - * @return the last timestamp at which this entity had its data shape created or modified (e - * .g, create entity, adding or dropping a column. Not affected by changing table properties - */ - Long getLastDDLTimestamp(); + QualifierEncodingScheme getEncodingScheme(); - /** - * @return Whether change detection is enabled on a given table or view. If it is, we will - * annotate write-ahead logs with additional metadata - */ - boolean isChangeDetectionEnabled(); + EncodedCQCounter getEncodedCQCounter(); - /** - * @return User-provided string identifying the application version that last created or modified this schema - * object. Used only on tables, views, and indexes. - */ - String getSchemaVersion(); + Boolean useStatsForParallelization(); - /** - * @return String provided by an external schema registry to be used to lookup the schema for - * a Phoenix table or view in the registry. - */ - String getExternalSchemaId(); + boolean hasViewModifiedUpdateCacheFrequency(); - /** - * @return Optional string to be used for a logical topic name that change detection capture - * will use to persist changes for this table or view - */ - String getStreamingTopicName(); + boolean hasViewModifiedUseStatsForParallelization(); - /** - * @return Optional string that represents the default include scopes to be used for CDC queries. - */ - Set getCDCIncludeScopes(); + Map getPropertyValues(); - /** - * - * @return the optional where clause in string used for partial indexes - */ - String getIndexWhere(); + Map getDefaultPropertyValues(); - /** - * @return the map of all ancestors to their LAST_DDL_TIMESTAMP - */ - Map getAncestorLastDDLTimestampMap(); + /** Returns The TTL duration associated with the entity when Phoenix level TTL is enabled. */ + int getTTL(); + + /** + * @return the last timestamp at which this entity had its data shape created or modified (e .g, + * create entity, adding or dropping a column. Not affected by changing table properties + */ + Long getLastDDLTimestamp(); + + /** + * @return Whether change detection is enabled on a given table or view. If it is, we will + * annotate write-ahead logs with additional metadata + */ + boolean isChangeDetectionEnabled(); + + /** + * @return User-provided string identifying the application version that last created or modified + * this schema object. Used only on tables, views, and indexes. + */ + String getSchemaVersion(); + + /** + * @return String provided by an external schema registry to be used to lookup the schema for a + * Phoenix table or view in the registry. + */ + String getExternalSchemaId(); + + /** + * @return Optional string to be used for a logical topic name that change detection capture will + * use to persist changes for this table or view + */ + String getStreamingTopicName(); + + /** + * Returns Optional string that represents the default include scopes to be used for CDC queries. + */ + Set getCDCIncludeScopes(); + + /** Returns the optional where clause in string used for partial indexes */ + String getIndexWhere(); + + /** Returns the map of all ancestors to their LAST_DDL_TIMESTAMP */ + Map getAncestorLastDDLTimestampMap(); + + /** + * @param connection PhoenixConnection + * @return the optional where clause in DNF expression used for partial indexes + */ + Expression getIndexWhereExpression(PhoenixConnection connection) throws SQLException; + + /** Returns the set of column references for the columns included in the index where clause */ + Set getIndexWhereColumns(PhoenixConnection connection) throws SQLException; + + /** + * Returns: Table level max lookback age if configured else null. + */ + Long getMaxLookbackAge(); + + /** + * @return Prefixed KeyRange generated by the expression representing the view statement. In other + * words this will be one-to-one mapping between view and PREFIXED KeyRange that'll exist. + */ + byte[] getRowKeyMatcher(); + + /** + * Class to help track encoded column qualifier counters per column family. + */ + public class EncodedCQCounter { + + private final Map familyCounters = new HashMap<>(); /** - * - * @param connection PhoenixConnection - * @return the optional where clause in DNF expression used for partial indexes - * @throws SQLException + * Copy constructor + * @return copy of the passed counter */ - Expression getIndexWhereExpression(PhoenixConnection connection) throws SQLException; + public static EncodedCQCounter copy(EncodedCQCounter counterToCopy) { + EncodedCQCounter cqCounter = new EncodedCQCounter(); + for (Entry e : counterToCopy.values().entrySet()) { + cqCounter.setValue(e.getKey(), e.getValue()); + } + return cqCounter; + } + + public static final EncodedCQCounter NULL_COUNTER = new EncodedCQCounter() { + + @Override + public Integer getNextQualifier(String columnFamily) { + return null; + } + + @Override + public void setValue(String columnFamily, Integer value) { + } + + @Override + public boolean increment(String columnFamily) { + return false; + } + + @Override + public Map values() { + return Collections.emptyMap(); + } + + }; /** - * - * @param connection - * @return the set of column references for the columns included in the index where clause - * @throws SQLException + * Get the next qualifier to be used for the column family. This method also ends up + * initializing the counter if the column family already doesn't have one. */ - Set getIndexWhereColumns(PhoenixConnection connection) throws SQLException; + @Nullable + public Integer getNextQualifier(String columnFamily) { + Integer counter = familyCounters.get(columnFamily); + if (counter == null) { + counter = ENCODED_CQ_COUNTER_INITIAL_VALUE; + familyCounters.put(columnFamily, counter); + } + return counter; + } + + public void setValue(String columnFamily, Integer value) { + familyCounters.put(columnFamily, value); + } + /** Returns true if the counter was incremented, false otherwise. */ + public boolean increment(String columnFamily) { + if (columnFamily == null) { + return false; + } + Integer counter = familyCounters.get(columnFamily); + if (counter == null) { + counter = ENCODED_CQ_COUNTER_INITIAL_VALUE; + } + counter++; + familyCounters.put(columnFamily, counter); + return true; + } + + public Map values() { + return Collections.unmodifiableMap(familyCounters); + } + + } + + enum CDCChangeScope { /** - * Returns: Table level max lookback age if configured else null. + * Include only the actual change in image. */ - Long getMaxLookbackAge(); + CHANGE, /** - * @return Prefixed KeyRange generated by the expression representing the view statement. In - * other words this will be one-to-one mapping between view and PREFIXED KeyRange that'll exist. + * Include only the pre image (state prior to the change) of the row. */ - byte[] getRowKeyMatcher(); + PRE, /** - * Class to help track encoded column qualifier counters per column family. + * Include only the post image (state past the change) of the row. */ - public class EncodedCQCounter { - - private final Map familyCounters = new HashMap<>(); - - /** - * Copy constructor - * @param counterToCopy - * @return copy of the passed counter - */ - public static EncodedCQCounter copy(EncodedCQCounter counterToCopy) { - EncodedCQCounter cqCounter = new EncodedCQCounter(); - for (Entry e : counterToCopy.values().entrySet()) { - cqCounter.setValue(e.getKey(), e.getValue()); - } - return cqCounter; - } - - public static final EncodedCQCounter NULL_COUNTER = new EncodedCQCounter() { - - @Override - public Integer getNextQualifier(String columnFamily) { - return null; - } - - @Override - public void setValue(String columnFamily, Integer value) { - } - - @Override - public boolean increment(String columnFamily) { - return false; - } - - @Override - public Map values() { - return Collections.emptyMap(); - } - - }; - - /** - * Get the next qualifier to be used for the column family. - * This method also ends up initializing the counter if the - * column family already doesn't have one. - */ - @Nullable - public Integer getNextQualifier(String columnFamily) { - Integer counter = familyCounters.get(columnFamily); - if (counter == null) { - counter = ENCODED_CQ_COUNTER_INITIAL_VALUE; - familyCounters.put(columnFamily, counter); - } - return counter; - } - - public void setValue(String columnFamily, Integer value) { - familyCounters.put(columnFamily, value); - } - - /** - * - * @param columnFamily - * @return true if the counter was incremented, false otherwise. - */ - public boolean increment(String columnFamily) { - if (columnFamily == null) { - return false; - } - Integer counter = familyCounters.get(columnFamily); - if (counter == null) { - counter = ENCODED_CQ_COUNTER_INITIAL_VALUE; - } - counter++; - familyCounters.put(columnFamily, counter); - return true; - } - - public Map values() { - return Collections.unmodifiableMap(familyCounters); - } - - } - - enum CDCChangeScope { - /** - * Include only the actual change in image. - */ - CHANGE, - - /** - * Include only the pre image (state prior to the change) of the row. - */ - PRE, - - /** - * Include only the post image (state past the change) of the row. - */ - POST, - } + POST, + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableImpl.java index 8caa0f72e63..86af7ea6a0c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,20 +24,20 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_ENCODED_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODING_SCHEME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_STATE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHYSICAL_TABLE_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTIONAL; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTION_PROVIDER; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_COLUMN_ENCODED_BYTES; @@ -52,66 +52,6 @@ import static org.apache.phoenix.schema.TableProperty.DEFAULT_COLUMN_FAMILY; import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.util.ByteStringer; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.phoenix.compile.ExpressionCompiler; -import org.apache.phoenix.compile.FromCompiler; -import org.apache.phoenix.compile.QueryPlan; -import org.apache.phoenix.compile.StatementContext; -import org.apache.phoenix.coprocessor.generated.DynamicColumnMetaDataProtos; -import org.apache.phoenix.coprocessor.generated.PTableProtos; -import org.apache.phoenix.exception.DataExceedsCapacityException; -import org.apache.phoenix.expression.Expression; -import org.apache.phoenix.expression.LiteralExpression; -import org.apache.phoenix.expression.SingleCellConstructorExpression; -import org.apache.phoenix.hbase.index.covered.update.ColumnReference; -import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.apache.phoenix.hbase.index.util.KeyValueBuilder; -import org.apache.phoenix.index.IndexMaintainer; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.jdbc.PhoenixPreparedStatement; -import org.apache.phoenix.jdbc.PhoenixStatement; -import org.apache.phoenix.parse.ParseNode; -import org.apache.phoenix.parse.SQLParser; -import org.apache.phoenix.protobuf.ProtobufUtil; -import org.apache.phoenix.query.QueryConstants; -import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder; -import org.apache.phoenix.schema.transform.TransformMaintainer; -import org.apache.phoenix.schema.types.PBinary; -import org.apache.phoenix.schema.types.PChar; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PDouble; -import org.apache.phoenix.schema.types.PFloat; -import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Objects; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableSortedMap; -import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; -import org.apache.phoenix.transaction.TransactionFactory; -import org.apache.phoenix.util.ByteUtil; -import org.apache.phoenix.util.EncodedColumnsUtil; -import org.apache.phoenix.util.MetaDataUtil; -import org.apache.phoenix.util.PhoenixRuntime; -import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.util.SizedUtil; -import org.apache.phoenix.util.TrustedByteArrayOutputStream; - import java.io.IOException; import java.sql.DriverManager; import java.sql.SQLException; @@ -122,18 +62,14 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; -import javax.annotation.Nonnull; import javax.annotation.Nonnull; -import org.apache.phoenix.schema.types.PVarbinary; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Delete; @@ -157,10 +93,10 @@ import org.apache.phoenix.hbase.index.covered.update.ColumnReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.hbase.index.util.KeyValueBuilder; +import org.apache.phoenix.index.IndexMaintainer; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.jdbc.PhoenixPreparedStatement; import org.apache.phoenix.jdbc.PhoenixStatement; -import org.apache.phoenix.index.IndexMaintainer; import org.apache.phoenix.parse.ParseNode; import org.apache.phoenix.parse.SQLParser; import org.apache.phoenix.protobuf.ProtobufUtil; @@ -174,7 +110,6 @@ import org.apache.phoenix.schema.types.PFloat; import org.apache.phoenix.schema.types.PVarbinaryEncoded; import org.apache.phoenix.schema.types.PVarchar; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.thirdparty.com.google.common.base.Objects; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; @@ -198,2416 +133,2405 @@ import org.apache.phoenix.util.TrustedByteArrayOutputStream; /** - * - * Base class for PTable implementors. Provides abstraction for - * storing data in a single column (ColumnLayout.SINGLE) or in - * multiple columns (ColumnLayout.MULTI). - * + * Base class for PTable implementors. Provides abstraction for storing data in a single column + * (ColumnLayout.SINGLE) or in multiple columns (ColumnLayout.MULTI). * @since 0.1 */ public class PTableImpl implements PTable { - private static final int VIEW_MODIFIED_UPDATE_CACHE_FREQUENCY_BIT_SET_POS = 0; - private static final int VIEW_MODIFIED_USE_STATS_FOR_PARALLELIZATION_BIT_SET_POS = 1; + private static final int VIEW_MODIFIED_UPDATE_CACHE_FREQUENCY_BIT_SET_POS = 0; + private static final int VIEW_MODIFIED_USE_STATS_FOR_PARALLELIZATION_BIT_SET_POS = 1; + private IndexMaintainer indexMaintainer; + private TransformMaintainer transformMaintainer; + private ImmutableBytesWritable indexMaintainersPtr; + + private final PTableKey key; + private final PName name; + private final PName schemaName; + private final PName tableName; + private final PName physicalTableNameColumnInSyscat; + private final PName tenantId; + private final PTableType type; + private final PIndexState state; + private final long sequenceNumber; + private final long timeStamp; + private final long indexDisableTimestamp; + // Have MultiMap for String->PColumn (may need family qualifier) + private final List pkColumns; + private final List allColumns; + // columns that were inherited from a parent table but that were dropped in the view + private final List excludedColumns; + private final List families; + private final Map familyByBytes; + private final Map familyByString; + private final ListMultimap columnsByName; + private final Map kvColumnsByQualifiers; + private final PName pkName; + private final Integer bucketNum; + private final RowKeySchema rowKeySchema; + // Indexes associated with this table. + private final List indexes; + // If the table is going through transform, we have this. + private final PTable transformingNewTable; + + // Data table name that the index is created on. + private final PName parentName; + private final PName parentSchemaName; + private final PName parentTableName; + private final PName baseTableLogicalName; + private final List physicalNames; + private final boolean isImmutableRows; + private final PName defaultFamilyName; + private final String viewStatement; + private final boolean disableWAL; + private final boolean multiTenant; + private final boolean storeNulls; + private final TransactionFactory.Provider transactionProvider; + private final ViewType viewType; + private final PDataType viewIndexIdType; + private final Long viewIndexId; + private final int estimatedSize; + private final IndexType indexType; + private final int baseColumnCount; + private final boolean rowKeyOrderOptimizable; // TODO: remove when required that tables have been + // upgrade for PHOENIX-2067 + private final boolean hasColumnsRequiringUpgrade; // TODO: remove when required that tables have + // been upgrade for PHOENIX-2067 + private final int rowTimestampColPos; + private final long updateCacheFrequency; + private final boolean isNamespaceMapped; + private final String autoPartitionSeqName; + private final boolean isAppendOnlySchema; + private final ImmutableStorageScheme immutableStorageScheme; + private final QualifierEncodingScheme qualifierEncodingScheme; + private final EncodedCQCounter encodedCQCounter; + private final Boolean useStatsForParallelization; + private final int ttl; + private final BitSet viewModifiedPropSet; + private final Long lastDDLTimestamp; + private final boolean isChangeDetectionEnabled; + private Map propertyValues; + private String schemaVersion; + private String externalSchemaId; + private String streamingTopicName; + private byte[] rowKeyMatcher; + private String indexWhere; + private Expression indexWhereExpression; + private Set indexWhereColumns; + private Long maxLookbackAge; + private Map ancestorLastDDLTimestampMap; + private Set cdcIncludeScopes; + + public static class Builder { + private PTableKey key; + private PName name; + private PName schemaName = PName.EMPTY_NAME; + private PName tableName = PName.EMPTY_NAME; + private PName physicalTableName = PName.EMPTY_NAME; + private PName tenantId; + private PTableType type; + private PIndexState state; + private long sequenceNumber; + private long timeStamp; + private long indexDisableTimestamp; + private List pkColumns; + private List allColumns; + private List excludedColumns; + private List families; + private Map familyByBytes; + private Map familyByString; + private ListMultimap columnsByName; + private Map kvColumnsByQualifiers; + private PName pkName; + private Integer bucketNum; + private RowKeySchema rowKeySchema; + private List indexes; + private PTable transformingNewTable; + private PName parentName; + private PName parentSchemaName; + private PName parentTableName; + private PName baseTableLogicalName; + private List physicalNames; + private boolean isImmutableRows; private IndexMaintainer indexMaintainer; - private TransformMaintainer transformMaintainer; private ImmutableBytesWritable indexMaintainersPtr; - - private final PTableKey key; - private final PName name; - private final PName schemaName; - private final PName tableName; - private final PName physicalTableNameColumnInSyscat; - private final PName tenantId; - private final PTableType type; - private final PIndexState state; - private final long sequenceNumber; - private final long timeStamp; - private final long indexDisableTimestamp; - // Have MultiMap for String->PColumn (may need family qualifier) - private final List pkColumns; - private final List allColumns; - // columns that were inherited from a parent table but that were dropped in the view - private final List excludedColumns; - private final List families; - private final Map familyByBytes; - private final Map familyByString; - private final ListMultimap columnsByName; - private final Map kvColumnsByQualifiers; - private final PName pkName; - private final Integer bucketNum; - private final RowKeySchema rowKeySchema; - // Indexes associated with this table. - private final List indexes; - // If the table is going through transform, we have this. - private final PTable transformingNewTable; - - // Data table name that the index is created on. - private final PName parentName; - private final PName parentSchemaName; - private final PName parentTableName; - private final PName baseTableLogicalName; - private final List physicalNames; - private final boolean isImmutableRows; - private final PName defaultFamilyName; - private final String viewStatement; - private final boolean disableWAL; - private final boolean multiTenant; - private final boolean storeNulls; - private final TransactionFactory.Provider transactionProvider; - private final ViewType viewType; - private final PDataType viewIndexIdType; - private final Long viewIndexId; - private final int estimatedSize; - private final IndexType indexType; - private final int baseColumnCount; - private final boolean rowKeyOrderOptimizable; // TODO: remove when required that tables have been upgrade for PHOENIX-2067 - private final boolean hasColumnsRequiringUpgrade; // TODO: remove when required that tables have been upgrade for PHOENIX-2067 - private final int rowTimestampColPos; - private final long updateCacheFrequency; - private final boolean isNamespaceMapped; - private final String autoPartitionSeqName; - private final boolean isAppendOnlySchema; - private final ImmutableStorageScheme immutableStorageScheme; - private final QualifierEncodingScheme qualifierEncodingScheme; - private final EncodedCQCounter encodedCQCounter; - private final Boolean useStatsForParallelization; - private final int ttl; - private final BitSet viewModifiedPropSet; - private final Long lastDDLTimestamp; - private final boolean isChangeDetectionEnabled; - private Map propertyValues; + private PName defaultFamilyName; + private String viewStatement; + private boolean disableWAL; + private boolean multiTenant; + private boolean storeNulls; + private TransactionFactory.Provider transactionProvider; + private ViewType viewType; + private PDataType viewIndexIdType; + private Long viewIndexId; + private int estimatedSize; + private IndexType indexType; + private int baseColumnCount; + private boolean rowKeyOrderOptimizable; + private boolean hasColumnsRequiringUpgrade; + private int rowTimestampColPos; + private long updateCacheFrequency; + private boolean isNamespaceMapped; + private String autoPartitionSeqName; + private boolean isAppendOnlySchema; + private ImmutableStorageScheme immutableStorageScheme; + private QualifierEncodingScheme qualifierEncodingScheme; + private EncodedCQCounter encodedCQCounter; + private Boolean useStatsForParallelization; + private Long lastDDLTimestamp; + private boolean isChangeDetectionEnabled = false; + private Map propertyValues = new HashMap<>(); private String schemaVersion; private String externalSchemaId; private String streamingTopicName; - private byte[] rowKeyMatcher; + private Set cdcIncludeScopes; private String indexWhere; - private Expression indexWhereExpression; - private Set indexWhereColumns; private Long maxLookbackAge; - private Map ancestorLastDDLTimestampMap; - private Set cdcIncludeScopes; + private Map ancestorLastDDLTimestampMap = new HashMap<>(); + private int ttl; + private byte[] rowKeyMatcher; - public static class Builder { - private PTableKey key; - private PName name; - private PName schemaName = PName.EMPTY_NAME; - private PName tableName = PName.EMPTY_NAME; - private PName physicalTableName = PName.EMPTY_NAME; - private PName tenantId; - private PTableType type; - private PIndexState state; - private long sequenceNumber; - private long timeStamp; - private long indexDisableTimestamp; - private List pkColumns; - private List allColumns; - private List excludedColumns; - private List families; - private Map familyByBytes; - private Map familyByString; - private ListMultimap columnsByName; - private Map kvColumnsByQualifiers; - private PName pkName; - private Integer bucketNum; - private RowKeySchema rowKeySchema; - private List indexes; - private PTable transformingNewTable; - private PName parentName; - private PName parentSchemaName; - private PName parentTableName; - private PName baseTableLogicalName; - private List physicalNames; - private boolean isImmutableRows; - private IndexMaintainer indexMaintainer; - private ImmutableBytesWritable indexMaintainersPtr; - private PName defaultFamilyName; - private String viewStatement; - private boolean disableWAL; - private boolean multiTenant; - private boolean storeNulls; - private TransactionFactory.Provider transactionProvider; - private ViewType viewType; - private PDataType viewIndexIdType; - private Long viewIndexId; - private int estimatedSize; - private IndexType indexType; - private int baseColumnCount; - private boolean rowKeyOrderOptimizable; - private boolean hasColumnsRequiringUpgrade; - private int rowTimestampColPos; - private long updateCacheFrequency; - private boolean isNamespaceMapped; - private String autoPartitionSeqName; - private boolean isAppendOnlySchema; - private ImmutableStorageScheme immutableStorageScheme; - private QualifierEncodingScheme qualifierEncodingScheme; - private EncodedCQCounter encodedCQCounter; - private Boolean useStatsForParallelization; - private Long lastDDLTimestamp; - private boolean isChangeDetectionEnabled = false; - private Map propertyValues = new HashMap<>(); - private String schemaVersion; - private String externalSchemaId; - private String streamingTopicName; - private Set cdcIncludeScopes; - private String indexWhere; - private Long maxLookbackAge; - private Map ancestorLastDDLTimestampMap = new HashMap<>(); - private int ttl; - private byte[] rowKeyMatcher; - - // Used to denote which properties a view has explicitly modified - private BitSet viewModifiedPropSet = new BitSet(3); - // Optionally set columns for the builder, but not for the actual PTable - private Collection columns; - - public Builder setKey(PTableKey key) { - this.key = key; - return this; - } + // Used to denote which properties a view has explicitly modified + private BitSet viewModifiedPropSet = new BitSet(3); + // Optionally set columns for the builder, but not for the actual PTable + private Collection columns; - public Builder setName(PName name) { - this.name = name; - return this; - } + public Builder setKey(PTableKey key) { + this.key = key; + return this; + } - public Builder setSchemaName(PName schemaName) { - this.schemaName = schemaName; - return this; - } + public Builder setName(PName name) { + this.name = name; + return this; + } - public Builder setTableName(PName tableName) { - this.tableName = tableName; - return this; - } + public Builder setSchemaName(PName schemaName) { + this.schemaName = schemaName; + return this; + } - public Builder setTenantId(PName tenantId) { - this.tenantId = tenantId; - return this; - } + public Builder setTableName(PName tableName) { + this.tableName = tableName; + return this; + } - public Builder setType(PTableType type) { - this.type = type; - return this; - } + public Builder setTenantId(PName tenantId) { + this.tenantId = tenantId; + return this; + } - public Builder setState(PIndexState state) { - if (state != null) { - propertyValues.put(INDEX_STATE, state.getSerializedValue()); - } - this.state = state; - return this; - } + public Builder setType(PTableType type) { + this.type = type; + return this; + } - public Builder setSequenceNumber(long sequenceNumber) { - this.sequenceNumber = sequenceNumber; - return this; - } + public Builder setState(PIndexState state) { + if (state != null) { + propertyValues.put(INDEX_STATE, state.getSerializedValue()); + } + this.state = state; + return this; + } - public Builder setTimeStamp(long timeStamp) { - this.timeStamp = timeStamp; - return this; - } + public Builder setSequenceNumber(long sequenceNumber) { + this.sequenceNumber = sequenceNumber; + return this; + } - public Builder setIndexDisableTimestamp(long indexDisableTimestamp) { - this.indexDisableTimestamp = indexDisableTimestamp; - return this; - } + public Builder setTimeStamp(long timeStamp) { + this.timeStamp = timeStamp; + return this; + } - public Builder setPkColumns(List pkColumns) { - this.pkColumns = pkColumns; - return this; - } + public Builder setIndexDisableTimestamp(long indexDisableTimestamp) { + this.indexDisableTimestamp = indexDisableTimestamp; + return this; + } - public Builder setAllColumns(List allColumns) { - this.allColumns = allColumns; - return this; - } + public Builder setPkColumns(List pkColumns) { + this.pkColumns = pkColumns; + return this; + } - public Builder setExcludedColumns(List excludedColumns) { - this.excludedColumns = excludedColumns; - return this; - } + public Builder setAllColumns(List allColumns) { + this.allColumns = allColumns; + return this; + } - public Builder setFamilyAttributes(List families) { - this.familyByBytes = Maps.newHashMapWithExpectedSize(families.size()); - this.familyByString = Maps.newHashMapWithExpectedSize(families.size()); - for (PColumnFamily family : families) { - familyByBytes.put(family.getName().getBytes(), family); - familyByString.put(family.getName().getString(), family); - } - this.families = families; - return this; - } + public Builder setExcludedColumns(List excludedColumns) { + this.excludedColumns = excludedColumns; + return this; + } - public Builder setFamilies(List families) { - this.families = families; - return this; - } + public Builder setFamilyAttributes(List families) { + this.familyByBytes = Maps.newHashMapWithExpectedSize(families.size()); + this.familyByString = Maps.newHashMapWithExpectedSize(families.size()); + for (PColumnFamily family : families) { + familyByBytes.put(family.getName().getBytes(), family); + familyByString.put(family.getName().getString(), family); + } + this.families = families; + return this; + } - public Builder setFamilyByBytes(Map familyByBytes) { - this.familyByBytes = familyByBytes; - return this; - } + public Builder setFamilies(List families) { + this.families = families; + return this; + } - public Builder setFamilyByString(Map familyByString) { - this.familyByString = familyByString; - return this; - } + public Builder setFamilyByBytes(Map familyByBytes) { + this.familyByBytes = familyByBytes; + return this; + } - public Builder setColumnsByName(ListMultimap columnsByName) { - this.columnsByName = columnsByName; - return this; - } + public Builder setFamilyByString(Map familyByString) { + this.familyByString = familyByString; + return this; + } - public Builder setKvColumnsByQualifiers(Map kvColumnsByQualifiers) { - this.kvColumnsByQualifiers = kvColumnsByQualifiers; - return this; - } + public Builder setColumnsByName(ListMultimap columnsByName) { + this.columnsByName = columnsByName; + return this; + } - public Builder setPkName(PName pkName) { - this.pkName = pkName; - return this; - } + public Builder + setKvColumnsByQualifiers(Map kvColumnsByQualifiers) { + this.kvColumnsByQualifiers = kvColumnsByQualifiers; + return this; + } - public Builder setBucketNum(Integer bucketNum) { - if(bucketNum!=null) { - propertyValues.put(SALT_BUCKETS, String.valueOf(bucketNum)); - } - this.bucketNum = bucketNum; - return this; - } + public Builder setPkName(PName pkName) { + this.pkName = pkName; + return this; + } - public Builder setRowKeySchema(RowKeySchema rowKeySchema) { - this.rowKeySchema = rowKeySchema; - return this; - } + public Builder setBucketNum(Integer bucketNum) { + if (bucketNum != null) { + propertyValues.put(SALT_BUCKETS, String.valueOf(bucketNum)); + } + this.bucketNum = bucketNum; + return this; + } - public Builder setIndexes(List indexes) { - this.indexes = indexes; - return this; - } + public Builder setRowKeySchema(RowKeySchema rowKeySchema) { + this.rowKeySchema = rowKeySchema; + return this; + } - public Builder setTransformingNewTable(PTable transformingNewTable) { - this.transformingNewTable = transformingNewTable; - return this; - } + public Builder setIndexes(List indexes) { + this.indexes = indexes; + return this; + } - public Builder setParentName(PName parentName) { - this.parentName = parentName; - return this; - } + public Builder setTransformingNewTable(PTable transformingNewTable) { + this.transformingNewTable = transformingNewTable; + return this; + } - public Builder setParentSchemaName(PName parentSchemaName) { - this.parentSchemaName = parentSchemaName; - return this; - } + public Builder setParentName(PName parentName) { + this.parentName = parentName; + return this; + } - public Builder setParentTableName(PName parentTableName) { - this.parentTableName = parentTableName; - return this; - } + public Builder setParentSchemaName(PName parentSchemaName) { + this.parentSchemaName = parentSchemaName; + return this; + } - public Builder setBaseTableLogicalName(PName baseTableLogicalName) { - this.baseTableLogicalName = baseTableLogicalName; - return this; - } + public Builder setParentTableName(PName parentTableName) { + this.parentTableName = parentTableName; + return this; + } - public Builder setPhysicalNames(List physicalNames) { - this.physicalNames = physicalNames; - return this; - } + public Builder setBaseTableLogicalName(PName baseTableLogicalName) { + this.baseTableLogicalName = baseTableLogicalName; + return this; + } - public Builder setImmutableRows(boolean immutableRows) { - propertyValues.put(IMMUTABLE_ROWS, String.valueOf(immutableRows)); - isImmutableRows = immutableRows; - return this; - } + public Builder setPhysicalNames(List physicalNames) { + this.physicalNames = physicalNames; + return this; + } - public Builder setIndexMaintainer(IndexMaintainer indexMaintainer) { - this.indexMaintainer = indexMaintainer; - return this; - } + public Builder setImmutableRows(boolean immutableRows) { + propertyValues.put(IMMUTABLE_ROWS, String.valueOf(immutableRows)); + isImmutableRows = immutableRows; + return this; + } - public Builder setIndexMaintainersPtr(ImmutableBytesWritable indexMaintainersPtr) { - this.indexMaintainersPtr = indexMaintainersPtr; - return this; - } + public Builder setIndexMaintainer(IndexMaintainer indexMaintainer) { + this.indexMaintainer = indexMaintainer; + return this; + } - public Builder setDefaultFamilyName(PName defaultFamilyName) { - if (defaultFamilyName != null){ - propertyValues.put(DEFAULT_COLUMN_FAMILY_NAME, defaultFamilyName.getString()); - } - this.defaultFamilyName = defaultFamilyName; - return this; - } + public Builder setIndexMaintainersPtr(ImmutableBytesWritable indexMaintainersPtr) { + this.indexMaintainersPtr = indexMaintainersPtr; + return this; + } - public Builder setViewStatement(String viewStatement) { - this.viewStatement = viewStatement; - return this; - } + public Builder setDefaultFamilyName(PName defaultFamilyName) { + if (defaultFamilyName != null) { + propertyValues.put(DEFAULT_COLUMN_FAMILY_NAME, defaultFamilyName.getString()); + } + this.defaultFamilyName = defaultFamilyName; + return this; + } - public Builder setDisableWAL(boolean disableWAL) { - propertyValues.put(DISABLE_WAL, String.valueOf(disableWAL)); - this.disableWAL = disableWAL; - return this; - } + public Builder setViewStatement(String viewStatement) { + this.viewStatement = viewStatement; + return this; + } - public Builder setMultiTenant(boolean multiTenant) { - propertyValues.put(MULTI_TENANT, String.valueOf(multiTenant)); - this.multiTenant = multiTenant; - return this; - } + public Builder setDisableWAL(boolean disableWAL) { + propertyValues.put(DISABLE_WAL, String.valueOf(disableWAL)); + this.disableWAL = disableWAL; + return this; + } - public Builder setStoreNulls(boolean storeNulls) { - this.storeNulls = storeNulls; - return this; - } + public Builder setMultiTenant(boolean multiTenant) { + propertyValues.put(MULTI_TENANT, String.valueOf(multiTenant)); + this.multiTenant = multiTenant; + return this; + } - public Builder setTransactionProvider(TransactionFactory.Provider transactionProvider) { - if(transactionProvider != null) { - propertyValues.put(TRANSACTION_PROVIDER, String.valueOf(transactionProvider)); - } - this.transactionProvider = transactionProvider; - return this; - } + public Builder setStoreNulls(boolean storeNulls) { + this.storeNulls = storeNulls; + return this; + } - public Builder setViewType(ViewType viewType) { - this.viewType = viewType; - return this; - } + public Builder setTransactionProvider(TransactionFactory.Provider transactionProvider) { + if (transactionProvider != null) { + propertyValues.put(TRANSACTION_PROVIDER, String.valueOf(transactionProvider)); + } + this.transactionProvider = transactionProvider; + return this; + } - public Builder setViewIndexIdType(PDataType viewIndexIdType) { - this.viewIndexIdType = viewIndexIdType; - return this; - } + public Builder setViewType(ViewType viewType) { + this.viewType = viewType; + return this; + } - public Builder setViewIndexId(Long viewIndexId) { - this.viewIndexId = viewIndexId; - return this; - } + public Builder setViewIndexIdType(PDataType viewIndexIdType) { + this.viewIndexIdType = viewIndexIdType; + return this; + } - public Builder setEstimatedSize(int estimatedSize) { - this.estimatedSize = estimatedSize; - return this; - } + public Builder setViewIndexId(Long viewIndexId) { + this.viewIndexId = viewIndexId; + return this; + } - public Builder setIndexType(IndexType indexType) { - this.indexType = indexType; - return this; - } + public Builder setEstimatedSize(int estimatedSize) { + this.estimatedSize = estimatedSize; + return this; + } - public Builder setBaseColumnCount(int baseColumnCount) { - this.baseColumnCount = baseColumnCount; - return this; - } + public Builder setIndexType(IndexType indexType) { + this.indexType = indexType; + return this; + } - public Builder setRowKeyOrderOptimizable(boolean rowKeyOrderOptimizable) { - this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; - return this; - } + public Builder setBaseColumnCount(int baseColumnCount) { + this.baseColumnCount = baseColumnCount; + return this; + } - public Builder setHasColumnsRequiringUpgrade(boolean hasColumnsRequiringUpgrade) { - this.hasColumnsRequiringUpgrade = hasColumnsRequiringUpgrade; - return this; - } + public Builder setRowKeyOrderOptimizable(boolean rowKeyOrderOptimizable) { + this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; + return this; + } - public Builder setRowTimestampColPos(int rowTimestampColPos) { - this.rowTimestampColPos = rowTimestampColPos; - return this; - } + public Builder setHasColumnsRequiringUpgrade(boolean hasColumnsRequiringUpgrade) { + this.hasColumnsRequiringUpgrade = hasColumnsRequiringUpgrade; + return this; + } - public Builder setPhysicalTableName(PName physicalTableName) { - if (physicalTableName != null) { - propertyValues.put(PHYSICAL_TABLE_NAME, String.valueOf(physicalTableName)); - } - if (this.physicalTableName.equals(PName.EMPTY_NAME) && physicalTableName == null) { - //don't override a "blank" PName with null. - return this; - } - this.physicalTableName = physicalTableName; - return this; - } + public Builder setRowTimestampColPos(int rowTimestampColPos) { + this.rowTimestampColPos = rowTimestampColPos; + return this; + } - public Builder setUpdateCacheFrequency(long updateCacheFrequency) { - propertyValues.put(UPDATE_CACHE_FREQUENCY, String.valueOf(updateCacheFrequency)); - this.updateCacheFrequency = updateCacheFrequency; - return this; - } + public Builder setPhysicalTableName(PName physicalTableName) { + if (physicalTableName != null) { + propertyValues.put(PHYSICAL_TABLE_NAME, String.valueOf(physicalTableName)); + } + if (this.physicalTableName.equals(PName.EMPTY_NAME) && physicalTableName == null) { + // don't override a "blank" PName with null. + return this; + } + this.physicalTableName = physicalTableName; + return this; + } - public Builder setNamespaceMapped(boolean namespaceMapped) { - isNamespaceMapped = namespaceMapped; - return this; - } + public Builder setUpdateCacheFrequency(long updateCacheFrequency) { + propertyValues.put(UPDATE_CACHE_FREQUENCY, String.valueOf(updateCacheFrequency)); + this.updateCacheFrequency = updateCacheFrequency; + return this; + } - public Builder setAutoPartitionSeqName(String autoPartitionSeqName) { - propertyValues.put(AUTO_PARTITION_SEQ, autoPartitionSeqName); - this.autoPartitionSeqName = autoPartitionSeqName; - return this; - } + public Builder setNamespaceMapped(boolean namespaceMapped) { + isNamespaceMapped = namespaceMapped; + return this; + } - public Builder setAppendOnlySchema(boolean appendOnlySchema) { - propertyValues.put(APPEND_ONLY_SCHEMA, String.valueOf(appendOnlySchema)); - isAppendOnlySchema = appendOnlySchema; - return this; - } + public Builder setAutoPartitionSeqName(String autoPartitionSeqName) { + propertyValues.put(AUTO_PARTITION_SEQ, autoPartitionSeqName); + this.autoPartitionSeqName = autoPartitionSeqName; + return this; + } - public Builder setImmutableStorageScheme(ImmutableStorageScheme immutableStorageScheme) { - propertyValues.put(IMMUTABLE_STORAGE_SCHEME, immutableStorageScheme.toString()); - this.immutableStorageScheme = immutableStorageScheme; - return this; - } + public Builder setAppendOnlySchema(boolean appendOnlySchema) { + propertyValues.put(APPEND_ONLY_SCHEMA, String.valueOf(appendOnlySchema)); + isAppendOnlySchema = appendOnlySchema; + return this; + } - public Builder setQualifierEncodingScheme(QualifierEncodingScheme qualifierEncodingScheme) { - propertyValues.put(ENCODING_SCHEME, qualifierEncodingScheme.toString()); - this.qualifierEncodingScheme = qualifierEncodingScheme; - return this; - } + public Builder setImmutableStorageScheme(ImmutableStorageScheme immutableStorageScheme) { + propertyValues.put(IMMUTABLE_STORAGE_SCHEME, immutableStorageScheme.toString()); + this.immutableStorageScheme = immutableStorageScheme; + return this; + } - public Builder setEncodedCQCounter(EncodedCQCounter encodedCQCounter) { - this.encodedCQCounter = encodedCQCounter; - return this; - } + public Builder setQualifierEncodingScheme(QualifierEncodingScheme qualifierEncodingScheme) { + propertyValues.put(ENCODING_SCHEME, qualifierEncodingScheme.toString()); + this.qualifierEncodingScheme = qualifierEncodingScheme; + return this; + } - public Builder setUseStatsForParallelization(Boolean useStatsForParallelization) { - if(useStatsForParallelization!=null) { - propertyValues.put(USE_STATS_FOR_PARALLELIZATION, String.valueOf(useStatsForParallelization)); - } - this.useStatsForParallelization = useStatsForParallelization; - return this; - } + public Builder setEncodedCQCounter(EncodedCQCounter encodedCQCounter) { + this.encodedCQCounter = encodedCQCounter; + return this; + } - public Builder setViewModifiedUpdateCacheFrequency(boolean modified) { - this.viewModifiedPropSet.set(VIEW_MODIFIED_UPDATE_CACHE_FREQUENCY_BIT_SET_POS, - modified); - return this; - } + public Builder setUseStatsForParallelization(Boolean useStatsForParallelization) { + if (useStatsForParallelization != null) { + propertyValues.put(USE_STATS_FOR_PARALLELIZATION, + String.valueOf(useStatsForParallelization)); + } + this.useStatsForParallelization = useStatsForParallelization; + return this; + } - public Builder setViewModifiedUseStatsForParallelization(boolean modified) { - this.viewModifiedPropSet.set(VIEW_MODIFIED_USE_STATS_FOR_PARALLELIZATION_BIT_SET_POS, - modified); - return this; - } + public Builder setViewModifiedUpdateCacheFrequency(boolean modified) { + this.viewModifiedPropSet.set(VIEW_MODIFIED_UPDATE_CACHE_FREQUENCY_BIT_SET_POS, modified); + return this; + } - public Builder setTTL(int ttl) { - propertyValues.put(TTL, String.valueOf(ttl)); - this.ttl = ttl; - return this; - } + public Builder setViewModifiedUseStatsForParallelization(boolean modified) { + this.viewModifiedPropSet.set(VIEW_MODIFIED_USE_STATS_FOR_PARALLELIZATION_BIT_SET_POS, + modified); + return this; + } - /** - * Note: When set in the builder, we must call {@link Builder#initDerivedAttributes()} - * before building the PTable in order to correctly populate other attributes of the PTable - * @param columns PColumns to be set in the builder - * @return PTableImpl.Builder object - */ - public Builder setColumns(Collection columns) { - this.columns = columns; - return this; - } + public Builder setTTL(int ttl) { + propertyValues.put(TTL, String.valueOf(ttl)); + this.ttl = ttl; + return this; + } - public Builder addOrSetColumns(Collection changedColumns) { - if (this.columns == null || this.columns.size() == 0) { - //no need to merge, just take the changes as the complete set of PColumns - this.columns = changedColumns; - } else { - //We have to merge the old and new columns, keeping the columns in the original order - List existingColumnList = Lists.newArrayList(this.columns); - List columnsToAdd = Lists.newArrayList(); - //create a new list that's almost a copy of this.columns, but everywhere there's - //a "newer" PColumn of an existing column in the parameter, replace it with the - //newer version - for (PColumn newColumn : changedColumns) { - int indexOf = existingColumnList.indexOf(newColumn); - if (indexOf != -1) { - existingColumnList.set(indexOf, newColumn); - } else { - columnsToAdd.add(newColumn); - } - } - //now tack on any completely new columns at the end - existingColumnList.addAll(columnsToAdd); - this.columns = existingColumnList; - } - return this; - } + /** + * Note: When set in the builder, we must call {@link Builder#initDerivedAttributes()} before + * building the PTable in order to correctly populate other attributes of the PTable + * @param columns PColumns to be set in the builder + * @return PTableImpl.Builder object + */ + public Builder setColumns(Collection columns) { + this.columns = columns; + return this; + } - public Builder setLastDDLTimestamp(Long lastDDLTimestamp) { - this.lastDDLTimestamp = lastDDLTimestamp; - return this; + public Builder addOrSetColumns(Collection changedColumns) { + if (this.columns == null || this.columns.size() == 0) { + // no need to merge, just take the changes as the complete set of PColumns + this.columns = changedColumns; + } else { + // We have to merge the old and new columns, keeping the columns in the original order + List existingColumnList = Lists.newArrayList(this.columns); + List columnsToAdd = Lists.newArrayList(); + // create a new list that's almost a copy of this.columns, but everywhere there's + // a "newer" PColumn of an existing column in the parameter, replace it with the + // newer version + for (PColumn newColumn : changedColumns) { + int indexOf = existingColumnList.indexOf(newColumn); + if (indexOf != -1) { + existingColumnList.set(indexOf, newColumn); + } else { + columnsToAdd.add(newColumn); + } } + // now tack on any completely new columns at the end + existingColumnList.addAll(columnsToAdd); + this.columns = existingColumnList; + } + return this; + } - public Builder setIsChangeDetectionEnabled(Boolean isChangeDetectionEnabled) { - if (isChangeDetectionEnabled != null) { - this.isChangeDetectionEnabled = isChangeDetectionEnabled; - } - return this; - } + public Builder setLastDDLTimestamp(Long lastDDLTimestamp) { + this.lastDDLTimestamp = lastDDLTimestamp; + return this; + } - public Builder setSchemaVersion(String schemaVersion) { - if (schemaVersion != null) { - this.schemaVersion = schemaVersion; - } - return this; - } + public Builder setIsChangeDetectionEnabled(Boolean isChangeDetectionEnabled) { + if (isChangeDetectionEnabled != null) { + this.isChangeDetectionEnabled = isChangeDetectionEnabled; + } + return this; + } - public Builder setExternalSchemaId(String externalSchemaId) { - if (externalSchemaId != null) { - this.externalSchemaId = externalSchemaId; - } - return this; - } + public Builder setSchemaVersion(String schemaVersion) { + if (schemaVersion != null) { + this.schemaVersion = schemaVersion; + } + return this; + } - public Builder setStreamingTopicName(String streamingTopicName) { - if (streamingTopicName != null) { - this.streamingTopicName = streamingTopicName; - } - return this; - } + public Builder setExternalSchemaId(String externalSchemaId) { + if (externalSchemaId != null) { + this.externalSchemaId = externalSchemaId; + } + return this; + } - public Builder setRowKeyMatcher(byte[] rowKeyMatcher) { - if (rowKeyMatcher != null) { - this.rowKeyMatcher = rowKeyMatcher; - } - return this; - } + public Builder setStreamingTopicName(String streamingTopicName) { + if (streamingTopicName != null) { + this.streamingTopicName = streamingTopicName; + } + return this; + } - public Builder setIndexWhere(String indexWhere) { - if (indexWhere != null) { - this.indexWhere = indexWhere; - } - return this; - } + public Builder setRowKeyMatcher(byte[] rowKeyMatcher) { + if (rowKeyMatcher != null) { + this.rowKeyMatcher = rowKeyMatcher; + } + return this; + } - public Builder setMaxLookbackAge(Long maxLookbackAge) { - if (maxLookbackAge != null) { - propertyValues.put(MAX_LOOKBACK_AGE, String.valueOf(maxLookbackAge)); - } - this.maxLookbackAge = maxLookbackAge; - return this; - } + public Builder setIndexWhere(String indexWhere) { + if (indexWhere != null) { + this.indexWhere = indexWhere; + } + return this; + } - public Builder setAncestorLastDDLTimestampMap(Map map) { - this.ancestorLastDDLTimestampMap = map; - return this; - } + public Builder setMaxLookbackAge(Long maxLookbackAge) { + if (maxLookbackAge != null) { + propertyValues.put(MAX_LOOKBACK_AGE, String.valueOf(maxLookbackAge)); + } + this.maxLookbackAge = maxLookbackAge; + return this; + } - public Builder setCDCIncludeScopes(Set cdcIncludeScopes) { - if (cdcIncludeScopes != null) { - this.cdcIncludeScopes = cdcIncludeScopes; - } - return this; - } + public Builder setAncestorLastDDLTimestampMap(Map map) { + this.ancestorLastDDLTimestampMap = map; + return this; + } - /** - * Populate derivable attributes of the PTable - * @return PTableImpl.Builder object - * @throws SQLException - */ - private Builder initDerivedAttributes() throws SQLException { - checkTenantId(this.tenantId); - Preconditions.checkNotNull(this.schemaName); - Preconditions.checkNotNull(this.tableName); - Preconditions.checkNotNull(this.columns); - Preconditions.checkNotNull(this.indexes); - Preconditions.checkNotNull(this.physicalNames); - //hasColumnsRequiringUpgrade and rowKeyOrderOptimizable are booleans and can never be - // null, so no need to check them - PName fullName = PNameFactory.newName(SchemaUtil.getTableName( - this.schemaName.getString(), this.tableName.getString())); - int estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + - 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE + - PNameFactory.getEstimatedSize(this.tenantId) + - PNameFactory.getEstimatedSize(this.schemaName) + - PNameFactory.getEstimatedSize(this.tableName) + - PNameFactory.getEstimatedSize(this.pkName) + - PNameFactory.getEstimatedSize(this.parentTableName) + - PNameFactory.getEstimatedSize(this.defaultFamilyName); - int numPKColumns = 0; - List pkColumns; - PColumn[] allColumns; - if (this.bucketNum != null) { - // Add salt column to allColumns and pkColumns, but don't add to - // columnsByName, since it should not be addressable via name. - allColumns = new PColumn[this.columns.size()+1]; - allColumns[SALTING_COLUMN.getPosition()] = SALTING_COLUMN; - pkColumns = Lists.newArrayListWithExpectedSize(this.columns.size()+1); - ++numPKColumns; - } else { - allColumns = new PColumn[this.columns.size()]; - pkColumns = Lists.newArrayListWithExpectedSize(this.columns.size()); - } - // Must do this as with the new method of storing diffs, we just care about - // ordinal position relative order and not the true ordinal value itself. - List sortedColumns = Lists.newArrayList(this.columns); - Collections.sort(sortedColumns, new Comparator() { - @Override - public int compare(PColumn o1, PColumn o2) { - return Integer.compare(o1.getPosition(), o2.getPosition()); - } - }); - - // With the new uncovered index code, we pass the data table columns to the index - // PTable. This wreaks havoc with the code used disambiguate column qualifiers. - // localColumns only holds the actual columns of the table, and not external references - List localColumns = new ArrayList<>(this.columns.size()); - - //TODO should we just pass the global indexref columns separately instead ? - for (PColumn column : sortedColumns) { - if (!(column instanceof ProjectedColumn && ((ProjectedColumn) column) - .getSourceColumnRef() instanceof IndexUncoveredDataColumnRef)) { - localColumns.add(column); - } - } + public Builder setCDCIncludeScopes(Set cdcIncludeScopes) { + if (cdcIncludeScopes != null) { + this.cdcIncludeScopes = cdcIncludeScopes; + } + return this; + } - int position = 0; - if (this.bucketNum != null) { - position = 1; - } - ListMultimap populateColumnsByName = - ArrayListMultimap.create(this.columns.size(), 1); - for (PColumn column : sortedColumns) { - allColumns[position] = column; - position++; - PName familyName = column.getFamilyName(); - if (familyName == null) { - ++numPKColumns; - } - String columnName = column.getName().getString(); - if (populateColumnsByName.put(columnName, column)) { - int count = 0; - for (PColumn dupColumn : populateColumnsByName.get(columnName)) { - if (Objects.equal(familyName, dupColumn.getFamilyName())) { - count++; - if (count > 1) { - throw new ColumnAlreadyExistsException(this.schemaName.getString(), - fullName.getString(), columnName); - } - } - } - } - } - Map populateKvColumnsByQualifiers = - Maps.newHashMapWithExpectedSize(localColumns.size()); - for (PColumn column : localColumns) { - byte[] cq = column.getColumnQualifierBytes(); - String cf = column.getFamilyName() != null ? - column.getFamilyName().getString() : null; - if (cf != null && cq != null) { - KVColumnFamilyQualifier info = new KVColumnFamilyQualifier(cf, cq); - if (populateKvColumnsByQualifiers.get(info) != null) { - throw new ColumnAlreadyExistsException(this.schemaName.getString(), - fullName.getString(), column.getName().getString()); - } - populateKvColumnsByQualifiers.put(info, column); - } - } - estimatedSize += SizedUtil.sizeOfMap(allColumns.length, SizedUtil.POINTER_SIZE, - SizedUtil.sizeOfArrayList(1)); // for multi-map - estimatedSize += SizedUtil.sizeOfMap(numPKColumns) + - SizedUtil.sizeOfMap(allColumns.length); - - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(numPKColumns); - // Two pass so that column order in column families matches overall column order - // and to ensure that column family order is constant - int maxExpectedSize = allColumns.length - numPKColumns; - // Maintain iteration order so that column families are ordered as they are listed - Map> familyMap = Maps.newLinkedHashMap(); - PColumn rowTimestampCol = null; - boolean hasColsRequiringUpgrade = false; - for (PColumn column : allColumns) { - PName familyName = column.getFamilyName(); - if (familyName == null) { - hasColsRequiringUpgrade |= - (column.getSortOrder() == SortOrder.DESC - && (!column.getDataType().isFixedWidth() - || column.getDataType() == PChar.INSTANCE - || column.getDataType() == PFloat.INSTANCE - || column.getDataType() == PDouble.INSTANCE - || column.getDataType() == PBinary.INSTANCE) ) - || (column.getSortOrder() == SortOrder.ASC - && column.getDataType() == PBinary.INSTANCE - && column.getMaxLength() != null - && column.getMaxLength() > 1); - pkColumns.add(column); - if (column.isRowTimestamp()) { - rowTimestampCol = column; - } - estimatedSize += column.getEstimatedSize(); // PK columns - builder.addField(column, column.isNullable(), column.getSortOrder()); - } - } - for (PColumn column : localColumns) { - PName familyName = column.getFamilyName(); - if (familyName != null) { - List columnsInFamily = familyMap.get(familyName); - if (columnsInFamily == null) { - columnsInFamily = Lists.newArrayListWithExpectedSize(maxExpectedSize); - familyMap.put(familyName, columnsInFamily); - } - columnsInFamily.add(column); - } - } - int rowTimestampColPos; - if (rowTimestampCol != null) { - rowTimestampColPos = pkColumns.indexOf(rowTimestampCol); - } else { - rowTimestampColPos = -1; + /** + * Populate derivable attributes of the PTable + * @return PTableImpl.Builder object + */ + private Builder initDerivedAttributes() throws SQLException { + checkTenantId(this.tenantId); + Preconditions.checkNotNull(this.schemaName); + Preconditions.checkNotNull(this.tableName); + Preconditions.checkNotNull(this.columns); + Preconditions.checkNotNull(this.indexes); + Preconditions.checkNotNull(this.physicalNames); + // hasColumnsRequiringUpgrade and rowKeyOrderOptimizable are booleans and can never be + // null, so no need to check them + PName fullName = PNameFactory + .newName(SchemaUtil.getTableName(this.schemaName.getString(), this.tableName.getString())); + int estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE + + PNameFactory.getEstimatedSize(this.tenantId) + + PNameFactory.getEstimatedSize(this.schemaName) + + PNameFactory.getEstimatedSize(this.tableName) + PNameFactory.getEstimatedSize(this.pkName) + + PNameFactory.getEstimatedSize(this.parentTableName) + + PNameFactory.getEstimatedSize(this.defaultFamilyName); + int numPKColumns = 0; + List pkColumns; + PColumn[] allColumns; + if (this.bucketNum != null) { + // Add salt column to allColumns and pkColumns, but don't add to + // columnsByName, since it should not be addressable via name. + allColumns = new PColumn[this.columns.size() + 1]; + allColumns[SALTING_COLUMN.getPosition()] = SALTING_COLUMN; + pkColumns = Lists.newArrayListWithExpectedSize(this.columns.size() + 1); + ++numPKColumns; + } else { + allColumns = new PColumn[this.columns.size()]; + pkColumns = Lists.newArrayListWithExpectedSize(this.columns.size()); + } + // Must do this as with the new method of storing diffs, we just care about + // ordinal position relative order and not the true ordinal value itself. + List sortedColumns = Lists.newArrayList(this.columns); + Collections.sort(sortedColumns, new Comparator() { + @Override + public int compare(PColumn o1, PColumn o2) { + return Integer.compare(o1.getPosition(), o2.getPosition()); + } + }); + + // With the new uncovered index code, we pass the data table columns to the index + // PTable. This wreaks havoc with the code used disambiguate column qualifiers. + // localColumns only holds the actual columns of the table, and not external references + List localColumns = new ArrayList<>(this.columns.size()); + + // TODO should we just pass the global indexref columns separately instead ? + for (PColumn column : sortedColumns) { + if ( + !(column instanceof ProjectedColumn && ((ProjectedColumn) column) + .getSourceColumnRef() instanceof IndexUncoveredDataColumnRef) + ) { + localColumns.add(column); + } + } + + int position = 0; + if (this.bucketNum != null) { + position = 1; + } + ListMultimap populateColumnsByName = + ArrayListMultimap.create(this.columns.size(), 1); + for (PColumn column : sortedColumns) { + allColumns[position] = column; + position++; + PName familyName = column.getFamilyName(); + if (familyName == null) { + ++numPKColumns; + } + String columnName = column.getName().getString(); + if (populateColumnsByName.put(columnName, column)) { + int count = 0; + for (PColumn dupColumn : populateColumnsByName.get(columnName)) { + if (Objects.equal(familyName, dupColumn.getFamilyName())) { + count++; + if (count > 1) { + throw new ColumnAlreadyExistsException(this.schemaName.getString(), + fullName.getString(), columnName); + } } - - Iterator>> iterator = familyMap.entrySet().iterator(); - PColumnFamily[] families = new PColumnFamily[familyMap.size()]; - ImmutableMap.Builder familyByString = ImmutableMap.builder(); - ImmutableSortedMap.Builder familyByBytes = ImmutableSortedMap - .orderedBy(Bytes.BYTES_COMPARATOR); - for (int i = 0; i < families.length; i++) { - Map.Entry> entry = iterator.next(); - PColumnFamily family = new PColumnFamilyImpl(entry.getKey(), entry.getValue()); - families[i] = family; - familyByString.put(family.getName().getString(), family); - familyByBytes.put(family.getName().getBytes(), family); - estimatedSize += family.getEstimatedSize(); + } + } + } + Map populateKvColumnsByQualifiers = + Maps.newHashMapWithExpectedSize(localColumns.size()); + for (PColumn column : localColumns) { + byte[] cq = column.getColumnQualifierBytes(); + String cf = column.getFamilyName() != null ? column.getFamilyName().getString() : null; + if (cf != null && cq != null) { + KVColumnFamilyQualifier info = new KVColumnFamilyQualifier(cf, cq); + if (populateKvColumnsByQualifiers.get(info) != null) { + throw new ColumnAlreadyExistsException(this.schemaName.getString(), + fullName.getString(), column.getName().getString()); + } + populateKvColumnsByQualifiers.put(info, column); + } + } + estimatedSize += SizedUtil.sizeOfMap(allColumns.length, SizedUtil.POINTER_SIZE, + SizedUtil.sizeOfArrayList(1)); // for multi-map + estimatedSize += SizedUtil.sizeOfMap(numPKColumns) + SizedUtil.sizeOfMap(allColumns.length); + + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(numPKColumns); + // Two pass so that column order in column families matches overall column order + // and to ensure that column family order is constant + int maxExpectedSize = allColumns.length - numPKColumns; + // Maintain iteration order so that column families are ordered as they are listed + Map> familyMap = Maps.newLinkedHashMap(); + PColumn rowTimestampCol = null; + boolean hasColsRequiringUpgrade = false; + for (PColumn column : allColumns) { + PName familyName = column.getFamilyName(); + if (familyName == null) { + hasColsRequiringUpgrade |= (column.getSortOrder() == SortOrder.DESC + && (!column.getDataType().isFixedWidth() || column.getDataType() == PChar.INSTANCE + || column.getDataType() == PFloat.INSTANCE || column.getDataType() == PDouble.INSTANCE + || column.getDataType() == PBinary.INSTANCE)) + || (column.getSortOrder() == SortOrder.ASC && column.getDataType() == PBinary.INSTANCE + && column.getMaxLength() != null && column.getMaxLength() > 1); + pkColumns.add(column); + if (column.isRowTimestamp()) { + rowTimestampCol = column; + } + estimatedSize += column.getEstimatedSize(); // PK columns + builder.addField(column, column.isNullable(), column.getSortOrder()); + } + } + for (PColumn column : localColumns) { + PName familyName = column.getFamilyName(); + if (familyName != null) { + List columnsInFamily = familyMap.get(familyName); + if (columnsInFamily == null) { + columnsInFamily = Lists.newArrayListWithExpectedSize(maxExpectedSize); + familyMap.put(familyName, columnsInFamily); + } + columnsInFamily.add(column); + } + } + int rowTimestampColPos; + if (rowTimestampCol != null) { + rowTimestampColPos = pkColumns.indexOf(rowTimestampCol); + } else { + rowTimestampColPos = -1; + } + + Iterator>> iterator = familyMap.entrySet().iterator(); + PColumnFamily[] families = new PColumnFamily[familyMap.size()]; + ImmutableMap.Builder familyByString = ImmutableMap.builder(); + ImmutableSortedMap.Builder familyByBytes = + ImmutableSortedMap.orderedBy(Bytes.BYTES_COMPARATOR); + for (int i = 0; i < families.length; i++) { + Map.Entry> entry = iterator.next(); + PColumnFamily family = new PColumnFamilyImpl(entry.getKey(), entry.getValue()); + families[i] = family; + familyByString.put(family.getName().getString(), family); + familyByBytes.put(family.getName().getBytes(), family); + estimatedSize += family.getEstimatedSize(); + } + estimatedSize += SizedUtil.sizeOfArrayList(families.length); + estimatedSize += SizedUtil.sizeOfMap(families.length) * 2; + for (PTable index : this.indexes) { + estimatedSize += index.getEstimatedSize(); + } + if (transformingNewTable != null) { + estimatedSize += transformingNewTable.getEstimatedSize(); + } + + estimatedSize += PNameFactory.getEstimatedSize(this.parentName); + for (PName physicalName : this.physicalNames) { + estimatedSize += physicalName.getEstimatedSize(); + } + // Populate the derived fields and return the builder + return this.setName(fullName).setKey(new PTableKey(this.tenantId, fullName.getString())) + .setParentName(this.parentTableName == null + ? null + : PNameFactory.newName(SchemaUtil.getTableName( + this.parentSchemaName != null ? this.parentSchemaName.getString() : null, + this.parentTableName.getString()))) + .setColumnsByName(populateColumnsByName) + .setKvColumnsByQualifiers(populateKvColumnsByQualifiers) + .setAllColumns(ImmutableList.copyOf(allColumns)) + .setHasColumnsRequiringUpgrade(hasColsRequiringUpgrade | this.hasColumnsRequiringUpgrade) + .setPkColumns(ImmutableList.copyOf(pkColumns)).setRowTimestampColPos(rowTimestampColPos) + // after hasDescVarLengthColumns is calculated + .setRowKeySchema(builder + .rowKeyOrderOptimizable(this.rowKeyOrderOptimizable || !this.hasColumnsRequiringUpgrade) + .build()) + .setFamilies(ImmutableList.copyOf(families)).setFamilyByBytes(familyByBytes.build()) + .setFamilyByString(familyByString.build()) + .setEstimatedSize(estimatedSize + this.rowKeySchema.getEstimatedSize()); + } + + public PTableImpl build() throws SQLException { + // Note that we call initDerivedAttributes to populate derivable attributes if + // this.columns is set in the PTableImpl.Builder object + return (this.columns == null) + ? new PTableImpl(this) + : new PTableImpl(this.initDerivedAttributes()); + } + + } + + @VisibleForTesting + PTableImpl() { + this(new PTableImpl.Builder().setIndexes(Collections.emptyList()) + .setPhysicalNames(Collections.emptyList()).setRowKeySchema(RowKeySchema.EMPTY_SCHEMA)); + } + + // Private constructor used by the builder + private PTableImpl(Builder builder) { + this.key = builder.key; + this.name = builder.name; + this.schemaName = builder.schemaName; + this.tableName = builder.tableName; + this.physicalTableNameColumnInSyscat = builder.physicalTableName; + this.tenantId = builder.tenantId; + this.type = builder.type; + this.state = builder.state; + this.sequenceNumber = builder.sequenceNumber; + this.timeStamp = builder.timeStamp; + this.indexDisableTimestamp = builder.indexDisableTimestamp; + this.pkColumns = builder.pkColumns; + this.allColumns = builder.allColumns; + this.excludedColumns = builder.excludedColumns; + this.families = builder.families; + this.familyByBytes = builder.familyByBytes; + this.familyByString = builder.familyByString; + this.columnsByName = builder.columnsByName; + this.kvColumnsByQualifiers = builder.kvColumnsByQualifiers; + this.pkName = builder.pkName; + this.bucketNum = builder.bucketNum; + this.rowKeySchema = builder.rowKeySchema; + this.indexes = builder.indexes; + this.transformingNewTable = builder.transformingNewTable; + this.parentName = builder.parentName; + this.parentSchemaName = builder.parentSchemaName; + this.parentTableName = builder.parentTableName; + this.baseTableLogicalName = builder.baseTableLogicalName; + this.physicalNames = builder.physicalNames; + this.isImmutableRows = builder.isImmutableRows; + this.indexMaintainer = builder.indexMaintainer; + this.indexMaintainersPtr = builder.indexMaintainersPtr; + this.defaultFamilyName = builder.defaultFamilyName; + this.viewStatement = builder.viewStatement; + this.disableWAL = builder.disableWAL; + this.multiTenant = builder.multiTenant; + this.storeNulls = builder.storeNulls; + this.transactionProvider = builder.transactionProvider; + this.viewType = builder.viewType; + this.viewIndexIdType = builder.viewIndexIdType; + this.viewIndexId = builder.viewIndexId; + this.estimatedSize = builder.estimatedSize; + this.indexType = builder.indexType; + this.baseColumnCount = builder.baseColumnCount; + this.rowKeyOrderOptimizable = builder.rowKeyOrderOptimizable; + this.hasColumnsRequiringUpgrade = builder.hasColumnsRequiringUpgrade; + this.rowTimestampColPos = builder.rowTimestampColPos; + this.updateCacheFrequency = builder.updateCacheFrequency; + this.isNamespaceMapped = builder.isNamespaceMapped; + this.autoPartitionSeqName = builder.autoPartitionSeqName; + this.isAppendOnlySchema = builder.isAppendOnlySchema; + this.immutableStorageScheme = builder.immutableStorageScheme; + this.qualifierEncodingScheme = builder.qualifierEncodingScheme; + this.encodedCQCounter = builder.encodedCQCounter; + this.useStatsForParallelization = builder.useStatsForParallelization; + this.ttl = builder.ttl; + this.viewModifiedPropSet = builder.viewModifiedPropSet; + this.propertyValues = builder.propertyValues; + this.lastDDLTimestamp = builder.lastDDLTimestamp; + this.isChangeDetectionEnabled = builder.isChangeDetectionEnabled; + this.schemaVersion = builder.schemaVersion; + this.externalSchemaId = builder.externalSchemaId; + this.streamingTopicName = builder.streamingTopicName; + this.cdcIncludeScopes = builder.cdcIncludeScopes; + this.indexWhere = builder.indexWhere; + this.maxLookbackAge = builder.maxLookbackAge; + this.ancestorLastDDLTimestampMap = builder.ancestorLastDDLTimestampMap; + this.rowKeyMatcher = builder.rowKeyMatcher; + } + + // When cloning table, ignore the salt column as it will be added back in the constructor + public static List getColumnsToClone(PTable table) { + return table == null + ? Collections. emptyList() + : (table.getBucketNum() == null + ? table.getColumns() + : table.getColumns().subList(1, table.getColumns().size())); + } + + /** + * Get a PTableImpl.Builder from an existing PTable and set the builder columns + * @param table Original PTable + * @param columns Columns to set in the builder for the new PTable to be constructed + * @return PTable builder object based on an existing PTable + */ + public static PTableImpl.Builder builderWithColumns(PTable table, Collection columns) { + return builderFromExisting(table).setColumns(columns); + } + + /** + * Get a PTableImpl.Builder from an existing PTable + * @param table Original PTable + */ + public static PTableImpl.Builder builderFromExisting(PTable table) { + return new PTableImpl.Builder().setType(table.getType()).setState(table.getIndexState()) + .setTimeStamp(table.getTimeStamp()).setIndexDisableTimestamp(table.getIndexDisableTimestamp()) + .setSequenceNumber(table.getSequenceNumber()).setImmutableRows(table.isImmutableRows()) + .setViewStatement(table.getViewStatement()).setDisableWAL(table.isWALDisabled()) + .setMultiTenant(table.isMultiTenant()).setStoreNulls(table.getStoreNulls()) + .setViewType(table.getViewType()).setViewIndexIdType(table.getviewIndexIdType()) + .setViewIndexId(table.getViewIndexId()).setIndexType(table.getIndexType()) + .setTransactionProvider(table.getTransactionProvider()) + .setUpdateCacheFrequency(table.getUpdateCacheFrequency()) + .setNamespaceMapped(table.isNamespaceMapped()) + .setAutoPartitionSeqName(table.getAutoPartitionSeqName()) + .setAppendOnlySchema(table.isAppendOnlySchema()) + .setImmutableStorageScheme(table.getImmutableStorageScheme() == null + ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN + : table.getImmutableStorageScheme()) + .setQualifierEncodingScheme(table.getEncodingScheme() == null + ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + : table.getEncodingScheme()) + .setBaseColumnCount(table.getBaseColumnCount()) + .setEncodedCQCounter(table.getEncodedCQCounter()) + .setUseStatsForParallelization(table.useStatsForParallelization()) + .setExcludedColumns(table.getExcludedColumns() == null + ? ImmutableList.of() + : ImmutableList.copyOf(table.getExcludedColumns())) + .setTenantId(table.getTenantId()).setSchemaName(table.getSchemaName()) + .setTableName(table.getTableName()).setPhysicalTableName(table.getPhysicalName(true)) + .setPkName(table.getPKName()).setDefaultFamilyName(table.getDefaultFamilyName()) + .setRowKeyOrderOptimizable(table.rowKeyOrderOptimizable()).setBucketNum(table.getBucketNum()) + .setIndexes(table.getIndexes() == null ? Collections.emptyList() : table.getIndexes()) + .setTransformingNewTable(table.getTransformingNewTable()) + .setParentSchemaName(table.getParentSchemaName()) + .setParentTableName(table.getParentTableName()) + .setBaseTableLogicalName(table.getBaseTableLogicalName()) + .setPhysicalNames(table.getPhysicalNames() == null + ? ImmutableList.of() + : ImmutableList.copyOf(table.getPhysicalNames())) + .setViewModifiedUseStatsForParallelization(table.hasViewModifiedUseStatsForParallelization()) + .setViewModifiedUpdateCacheFrequency(table.hasViewModifiedUpdateCacheFrequency()) + .setLastDDLTimestamp(table.getLastDDLTimestamp()) + .setIsChangeDetectionEnabled(table.isChangeDetectionEnabled()) + .setSchemaVersion(table.getSchemaVersion()).setExternalSchemaId(table.getExternalSchemaId()) + .setStreamingTopicName(table.getStreamingTopicName()).setIndexWhere(table.getIndexWhere()) + .setMaxLookbackAge(table.getMaxLookbackAge()).setCDCIncludeScopes(table.getCDCIncludeScopes()) + .setAncestorLastDDLTimestampMap(table.getAncestorLastDDLTimestampMap()).setTTL(table.getTTL()) + .setRowKeyMatcher(table.getRowKeyMatcher()); + } + + @Override + public long getUpdateCacheFrequency() { + return updateCacheFrequency; + } + + @Override + public boolean isMultiTenant() { + return multiTenant; + } + + @Override + public boolean getStoreNulls() { + return storeNulls; + } + + @Override + public ViewType getViewType() { + return viewType; + } + + @Override + public int getEstimatedSize() { + return estimatedSize; + } + + public static void checkTenantId(PName tenantId) { + // tenantId should be null or not empty + Preconditions.checkArgument(tenantId == null || tenantId.getBytes().length > 0); + } + + @Override + public boolean isImmutableRows() { + return isImmutableRows; + } + + @Override + public String toString() { + return name.getString(); + } + + @Override + public List getPKColumns() { + return pkColumns; + } + + @Override + public final PName getName() { + return name; + } + + @Override + public final PName getSchemaName() { + return schemaName; + } + + @Override + public final PName getTableName() { + return tableName; + } + + @Override + public final PTableType getType() { + return type; + } + + @Override + public final List getColumnFamilies() { + return families; + } + + @Override + public boolean hasOnlyPkColumns() { + return allColumns.stream().allMatch(SchemaUtil::isPKColumn); + } + + @Override + public int newKey(ImmutableBytesWritable key, byte[][] values) { + List columns = getPKColumns(); + int nValues = values.length; + while (nValues > 0 && (values[nValues - 1] == null || values[nValues - 1].length == 0)) { + nValues--; + } + for (PColumn column : columns) { + if (column.getExpressionStr() != null) { + nValues++; + } + } + int i = 0; + TrustedByteArrayOutputStream os = + new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this)); + try { + Integer bucketNum = this.getBucketNum(); + if (bucketNum != null) { + // Write place holder for salt byte + i++; + os.write(QueryConstants.SEPARATOR_BYTE_ARRAY); + } + int nColumns = columns.size(); + PDataType type = null; + SortOrder sortOrder = null; + boolean wasNull = false; + + while (i < nValues && i < nColumns) { + // Separate variable length column values in key with zero byte + if (type != null && !type.isFixedWidth()) { + os.write( + SchemaUtil.getSeparatorBytes(type, rowKeyOrderOptimizable(), wasNull, sortOrder)); + } + PColumn column = columns.get(i); + sortOrder = column.getSortOrder(); + type = column.getDataType(); + // This will throw if the value is null and the type doesn't allow null + byte[] byteValue = values[i++]; + if (byteValue == null) { + if (column.getExpressionStr() != null) { + try { + String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + + PhoenixRuntime.CONNECTIONLESS; + PhoenixConnection conn = + DriverManager.getConnection(url).unwrap(PhoenixConnection.class); + StatementContext context = new StatementContext(new PhoenixStatement(conn)); + + ExpressionCompiler compiler = new ExpressionCompiler(context); + ParseNode defaultParseNode = + new SQLParser(column.getExpressionStr()).parseExpression(); + Expression defaultExpression = defaultParseNode.accept(compiler); + defaultExpression.evaluate(null, key); + column.getDataType().coerceBytes(key, null, defaultExpression.getDataType(), + defaultExpression.getMaxLength(), defaultExpression.getScale(), + defaultExpression.getSortOrder(), column.getMaxLength(), column.getScale(), + column.getSortOrder()); + byteValue = ByteUtil.copyKeyBytesIfNecessary(key); + } catch (SQLException e) { // should not be possible + throw new ConstraintViolationException( + name.getString() + "." + column.getName().getString() + + " failed to compile default value expression of " + column.getExpressionStr()); } - estimatedSize += SizedUtil.sizeOfArrayList(families.length); - estimatedSize += SizedUtil.sizeOfMap(families.length) * 2; - for (PTable index : this.indexes) { - estimatedSize += index.getEstimatedSize(); + } else { + byteValue = ByteUtil.EMPTY_BYTE_ARRAY; + } + } + wasNull = byteValue.length == 0; + // An empty byte array return value means null. Do this, + // since a type may have muliple representations of null. + // For example, VARCHAR treats both null and an empty string + // as null. This way we don't need to leak that part of the + // implementation outside of PDataType by checking the value + // here. + if (byteValue.length == 0 && !column.isNullable()) { + throw new ConstraintViolationException( + name.getString() + "." + column.getName().getString() + " may not be null"); + } + Integer maxLength = column.getMaxLength(); + Integer scale = column.getScale(); + key.set(byteValue); + if (!type.isSizeCompatible(key, null, type, sortOrder, null, null, maxLength, scale)) { + throw new DataExceedsCapacityException(column.getDataType(), maxLength, column.getScale(), + column.getName().getString()); + } + key.set(byteValue); + type.pad(key, maxLength, sortOrder); + byteValue = ByteUtil.copyKeyBytesIfNecessary(key); + os.write(byteValue, 0, byteValue.length); + } + // Need trailing byte for DESC columns + if (type != null && !type.isFixedWidth()) { + if (type != PVarbinaryEncoded.INSTANCE) { + if ( + SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder) + == QueryConstants.DESC_SEPARATOR_BYTE + ) { + os.write(QueryConstants.DESC_SEPARATOR_BYTE); + } + } else { + byte[] separatorBytes = SchemaUtil + .getSeparatorBytesForVarBinaryEncoded(rowKeyOrderOptimizable(), wasNull, sortOrder); + if (separatorBytes == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES) { + os.write(separatorBytes, 0, separatorBytes.length); + } + } + } + // If some non null pk values aren't set, then throw + if (i < nColumns) { + PColumn column = columns.get(i); + if (column.getDataType().isFixedWidth() || !column.isNullable()) { + throw new ConstraintViolationException( + name.getString() + "." + column.getName().getString() + " may not be null"); + } + } + if (nValues == 0) { + throw new ConstraintViolationException( + "Primary key may not be null (" + name.getString() + ")"); + } + byte[] buf = os.getBuffer(); + int size = os.size(); + if (bucketNum != null) { + buf[0] = SaltingUtil.getSaltingByte(buf, 1, size - 1, bucketNum); + } + key.set(buf, 0, size); + return i; + } finally { + try { + os.close(); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } + } + } + + private PRow newRow(KeyValueBuilder builder, long ts, ImmutableBytesWritable key, int i, + boolean hasOnDupKey, byte[]... values) { + PRow row = new PRowImpl(builder, key, ts, getBucketNum(), hasOnDupKey); + if (i < values.length) { + for (PColumnFamily family : getColumnFamilies()) { + for (PColumn column : family.getColumns()) { + row.setValue(column, values[i++]); + if (i == values.length) return row; + } + } + } + return row; + } + + @Override + public PRow newRow(KeyValueBuilder builder, long ts, ImmutableBytesWritable key, + boolean hasOnDupKey, byte[]... values) { + return newRow(builder, ts, key, 0, hasOnDupKey, values); + } + + @Override + public PRow newRow(KeyValueBuilder builder, ImmutableBytesWritable key, boolean hasOnDupKey, + byte[]... values) { + return newRow(builder, HConstants.LATEST_TIMESTAMP, key, hasOnDupKey, values); + } + + @Override + public PColumn getColumnForColumnName(String name) + throws ColumnNotFoundException, AmbiguousColumnException { + String schemaNameStr = schemaName == null ? null : schemaName.getString(); + String tableNameStr = tableName == null ? null : tableName.getString(); + + // Throw exception if trying to create a column name that does not exist + if (columnsByName == null) { + throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, name); + } + + List columns = columnsByName.get(name); + int size = columns.size(); + if (size == 0) { + throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, name); + + } + if (size > 1) { + for (PColumn column : columns) { + if ( + column.getFamilyName() == null + || QueryConstants.DEFAULT_COLUMN_FAMILY.equals(column.getFamilyName().getString()) + ) { + // Allow ambiguity with PK column or column in the default column family, + // since a PK column cannot be prefixed and a user would not know how to + // prefix a column in the default column family. + return column; + } + } + throw new AmbiguousColumnException(name); + } + return columns.get(0); + } + + @Override + public PColumn getColumnForColumnQualifier(byte[] cf, byte[] cq) + throws ColumnNotFoundException, AmbiguousColumnException { + Preconditions.checkNotNull(cq); + if (!EncodedColumnsUtil.usesEncodedColumnNames(this) || cf == null) { + String columnName = (String) PVarchar.INSTANCE.toObject(cq); + return getColumnForColumnName(columnName); + } else { + String family = (String) PVarchar.INSTANCE.toObject(cf); + PColumn col = kvColumnsByQualifiers.get(new KVColumnFamilyQualifier(family, cq)); + if (col == null) { + String schemaNameStr = schemaName == null ? null : schemaName.getString(); + String tableNameStr = tableName == null ? null : tableName.getString(); + throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, + "No column found for column qualifier " + qualifierEncodingScheme.decode(cq)); + } + return col; + } + } + + /** + * PRow implementation for ColumnLayout.MULTI mode which stores column values across multiple + * hbase columns. + * @since 0.1 + */ + private class PRowImpl implements PRow { + private final byte[] key; + private final ImmutableBytesWritable keyPtr; + // default to the generic builder, and only override when we know on the client + private final KeyValueBuilder kvBuilder; + + private Mutation setValues; + private Delete unsetValues; + private Mutation deleteRow; + private final long ts; + private final boolean hasOnDupKey; + // map from column name to value + private Map columnToValueMap; + // Map from the column family name to the list of dynamic columns in that column family. + // If there are no dynamic columns in a column family, the key for that column family + // will not exist in the map, rather than the corresponding value being an empty list. + private Map> colFamToDynamicColumnsMapping; + + PRowImpl(KeyValueBuilder kvBuilder, ImmutableBytesWritable key, long ts, Integer bucketNum, + boolean hasOnDupKey) { + this.kvBuilder = kvBuilder; + this.ts = ts; + this.hasOnDupKey = hasOnDupKey; + if (bucketNum != null) { + this.key = SaltingUtil.getSaltedKey(key, bucketNum); + this.keyPtr = new ImmutableBytesPtr(this.key); + } else { + this.keyPtr = new ImmutableBytesPtr(key); + this.key = ByteUtil.copyKeyBytesIfNecessary(key); + } + this.columnToValueMap = Maps.newHashMapWithExpectedSize(1); + this.colFamToDynamicColumnsMapping = Maps.newHashMapWithExpectedSize(1); + newMutations(); + } + + private void newMutations() { + Mutation put = new Put(this.key); + Delete delete = new Delete(this.key); + if (isWALDisabled()) { + put.setDurability(Durability.SKIP_WAL); + delete.setDurability(Durability.SKIP_WAL); + } + this.setValues = put; + this.unsetValues = delete; + } + + @Override + public List toRowMutations() { + List mutations = new ArrayList(3); + if (deleteRow != null) { + // Include only deleteRow mutation if present because it takes precedence over all others + mutations.add(deleteRow); + } else { + // store all columns for a given column family in a single cell instead of one column per + // cell in order to improve write performance + if ( + immutableStorageScheme != null + && immutableStorageScheme != ImmutableStorageScheme.ONE_CELL_PER_COLUMN + ) { + Put put = new Put(this.key); + if (isWALDisabled()) { + put.setDurability(Durability.SKIP_WAL); + } + // the setValues Put contains one cell per column, we need to convert it to a Put that + // contains a cell with all columns for a given column family + for (PColumnFamily family : families) { + byte[] columnFamily = family.getName().getBytes(); + Collection columns = family.getColumns(); + int maxEncodedColumnQualifier = Integer.MIN_VALUE; + for (PColumn column : columns) { + int qualifier = qualifierEncodingScheme.decode(column.getColumnQualifierBytes()); + maxEncodedColumnQualifier = Math.max(maxEncodedColumnQualifier, qualifier); } - if (transformingNewTable!=null) { - estimatedSize += transformingNewTable.getEstimatedSize(); + Expression[] colValues = + EncodedColumnsUtil.createColumnExpressionArray(maxEncodedColumnQualifier); + for (PColumn column : columns) { + if (columnToValueMap.containsKey(column)) { + int colIndex = qualifierEncodingScheme.decode(column.getColumnQualifierBytes()) + - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE + 1; + colValues[colIndex] = new LiteralExpression(columnToValueMap.get(column)); + } } - estimatedSize += PNameFactory.getEstimatedSize(this.parentName); - for (PName physicalName : this.physicalNames) { - estimatedSize += physicalName.getEstimatedSize(); + List children = Arrays.asList(colValues); + // we use SingleCellConstructorExpression to serialize all the columns into a single + // byte[] + SingleCellConstructorExpression singleCellConstructorExpression = + new SingleCellConstructorExpression(immutableStorageScheme, children); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + singleCellConstructorExpression.evaluate(null, ptr); + ImmutableBytesPtr colFamilyPtr = new ImmutableBytesPtr(columnFamily); + addQuietly(put, kvBuilder.buildPut(keyPtr, colFamilyPtr, + QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES_PTR, ts, ptr)); + } + // Preserve the attributes of the original mutation + Map attrsMap = setValues.getAttributesMap(); + setValues = put; + for (String attrKey : attrsMap.keySet()) { + setValues.setAttribute(attrKey, attrsMap.get(attrKey)); + } + } + // Because we cannot enforce a not null constraint on a KV column (since we don't know if + // the row exists when + // we upsert it), so instead add a KV that is always empty. This allows us to imitate SQL + // semantics given the + // way HBase works. + Pair emptyKvInfo = EncodedColumnsUtil.getEmptyKeyValueInfo(PTableImpl.this); + addQuietly(setValues, + kvBuilder.buildPut(keyPtr, SchemaUtil.getEmptyColumnFamilyPtr(PTableImpl.this), + new ImmutableBytesPtr(emptyKvInfo.getFirst()), ts, + new ImmutableBytesPtr(emptyKvInfo.getSecond()))); + mutations.add(setValues); + if (!unsetValues.isEmpty()) { + mutations.add(unsetValues); + } + } + return mutations; + } + + private void removeIfPresent(Mutation m, byte[] family, byte[] qualifier) { + Map> familyMap = m.getFamilyCellMap(); + List kvs = familyMap.get(family); + if (kvs != null) { + Iterator iterator = kvs.iterator(); + while (iterator.hasNext()) { + Cell kv = iterator.next(); + if ( + Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0 + ) { + iterator.remove(); + break; + } + } + } + } + + @Override + public void setValue(PColumn column, byte[] byteValue) { + deleteRow = null; + byte[] family = column.getFamilyName().getBytes(); + byte[] qualifier = column.getColumnQualifierBytes(); + ImmutableBytesPtr qualifierPtr = new ImmutableBytesPtr(qualifier); + PDataType type = column.getDataType(); + // Check null, since some types have no byte representation for null + if (byteValue == null) { + byteValue = ByteUtil.EMPTY_BYTE_ARRAY; + } + boolean isNull = type.isNull(byteValue); + if (isNull && !column.isNullable()) { + throw new ConstraintViolationException( + name.getString() + "." + column.getName().getString() + " may not be null"); + } else if (isNull && PTableImpl.this.isImmutableRows() && column.getExpressionStr() == null) { + // Store nulls for immutable tables otherwise default value would be used + removeIfPresent(setValues, family, qualifier); + removeIfPresent(unsetValues, family, qualifier); + } else + if (isNull && !getStoreNulls() && !this.hasOnDupKey && column.getExpressionStr() == null) { + // Cannot use column delete marker when row has ON DUPLICATE KEY clause + // because we cannot change a Delete mutation to a Put mutation in the + // case of updates occurring due to the execution of the clause. + removeIfPresent(setValues, family, qualifier); + deleteQuietly(unsetValues, kvBuilder, kvBuilder.buildDeleteColumns(keyPtr, + column.getFamilyName().getBytesPtr(), qualifierPtr, ts)); + } else { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(byteValue); + Integer maxLength = column.getMaxLength(); + Integer scale = column.getScale(); + SortOrder sortOrder = column.getSortOrder(); + if (!type.isSizeCompatible(ptr, null, type, sortOrder, null, null, maxLength, scale)) { + throw new DataExceedsCapacityException(column.getDataType(), maxLength, + column.getScale(), column.getName().getString()); + } + ptr.set(byteValue); + type.pad(ptr, maxLength, sortOrder); + removeIfPresent(unsetValues, family, qualifier); + // store all columns for a given column family in a single cell instead of one column per + // cell in order to improve write performance + // we don't need to do anything with unsetValues as it is only used when storeNulls is + // false, storeNulls is always true when storeColsInSingleCell is true + if (immutableStorageScheme == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { + columnToValueMap.put(column, ptr.get()); + } else { + removeIfPresent(unsetValues, family, qualifier); + addQuietly(setValues, kvBuilder.buildPut(keyPtr, column.getFamilyName().getBytesPtr(), + qualifierPtr, ts, ptr)); + } + String fam = Bytes.toString(family); + if (column.isDynamic()) { + if (!this.colFamToDynamicColumnsMapping.containsKey(fam)) { + this.colFamToDynamicColumnsMapping.put(fam, new ArrayList<>()); } - // Populate the derived fields and return the builder - return this.setName(fullName) - .setKey(new PTableKey(this.tenantId, fullName.getString())) - .setParentName(this.parentTableName == null ? null : - PNameFactory.newName(SchemaUtil.getTableName( - this.parentSchemaName != null ? - this.parentSchemaName.getString() : null, - this.parentTableName.getString()))) - .setColumnsByName(populateColumnsByName) - .setKvColumnsByQualifiers(populateKvColumnsByQualifiers) - .setAllColumns(ImmutableList.copyOf(allColumns)) - .setHasColumnsRequiringUpgrade(hasColsRequiringUpgrade - | this.hasColumnsRequiringUpgrade) - .setPkColumns(ImmutableList.copyOf(pkColumns)) - .setRowTimestampColPos(rowTimestampColPos) - // after hasDescVarLengthColumns is calculated - .setRowKeySchema(builder.rowKeyOrderOptimizable( - this.rowKeyOrderOptimizable || !this.hasColumnsRequiringUpgrade) - .build()) - .setFamilies(ImmutableList.copyOf(families)) - .setFamilyByBytes(familyByBytes.build()) - .setFamilyByString(familyByString.build()) - .setEstimatedSize(estimatedSize + this.rowKeySchema.getEstimatedSize()); + this.colFamToDynamicColumnsMapping.get(fam).add(column); + } } - - public PTableImpl build() throws SQLException { - // Note that we call initDerivedAttributes to populate derivable attributes if - // this.columns is set in the PTableImpl.Builder object - return (this.columns == null) ? new PTableImpl(this) : - new PTableImpl(this.initDerivedAttributes()); - } - - } - - @VisibleForTesting - PTableImpl() { - this(new PTableImpl.Builder() - .setIndexes(Collections.emptyList()) - .setPhysicalNames(Collections.emptyList()) - .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA)); - } - - // Private constructor used by the builder - private PTableImpl(Builder builder) { - this.key = builder.key; - this.name = builder.name; - this.schemaName = builder.schemaName; - this.tableName = builder.tableName; - this.physicalTableNameColumnInSyscat = builder.physicalTableName; - this.tenantId = builder.tenantId; - this.type = builder.type; - this.state = builder.state; - this.sequenceNumber = builder.sequenceNumber; - this.timeStamp = builder.timeStamp; - this.indexDisableTimestamp = builder.indexDisableTimestamp; - this.pkColumns = builder.pkColumns; - this.allColumns = builder.allColumns; - this.excludedColumns = builder.excludedColumns; - this.families = builder.families; - this.familyByBytes = builder.familyByBytes; - this.familyByString = builder.familyByString; - this.columnsByName = builder.columnsByName; - this.kvColumnsByQualifiers = builder.kvColumnsByQualifiers; - this.pkName = builder.pkName; - this.bucketNum = builder.bucketNum; - this.rowKeySchema = builder.rowKeySchema; - this.indexes = builder.indexes; - this.transformingNewTable = builder.transformingNewTable; - this.parentName = builder.parentName; - this.parentSchemaName = builder.parentSchemaName; - this.parentTableName = builder.parentTableName; - this.baseTableLogicalName = builder.baseTableLogicalName; - this.physicalNames = builder.physicalNames; - this.isImmutableRows = builder.isImmutableRows; - this.indexMaintainer = builder.indexMaintainer; - this.indexMaintainersPtr = builder.indexMaintainersPtr; - this.defaultFamilyName = builder.defaultFamilyName; - this.viewStatement = builder.viewStatement; - this.disableWAL = builder.disableWAL; - this.multiTenant = builder.multiTenant; - this.storeNulls = builder.storeNulls; - this.transactionProvider = builder.transactionProvider; - this.viewType = builder.viewType; - this.viewIndexIdType = builder.viewIndexIdType; - this.viewIndexId = builder.viewIndexId; - this.estimatedSize = builder.estimatedSize; - this.indexType = builder.indexType; - this.baseColumnCount = builder.baseColumnCount; - this.rowKeyOrderOptimizable = builder.rowKeyOrderOptimizable; - this.hasColumnsRequiringUpgrade = builder.hasColumnsRequiringUpgrade; - this.rowTimestampColPos = builder.rowTimestampColPos; - this.updateCacheFrequency = builder.updateCacheFrequency; - this.isNamespaceMapped = builder.isNamespaceMapped; - this.autoPartitionSeqName = builder.autoPartitionSeqName; - this.isAppendOnlySchema = builder.isAppendOnlySchema; - this.immutableStorageScheme = builder.immutableStorageScheme; - this.qualifierEncodingScheme = builder.qualifierEncodingScheme; - this.encodedCQCounter = builder.encodedCQCounter; - this.useStatsForParallelization = builder.useStatsForParallelization; - this.ttl = builder.ttl; - this.viewModifiedPropSet = builder.viewModifiedPropSet; - this.propertyValues = builder.propertyValues; - this.lastDDLTimestamp = builder.lastDDLTimestamp; - this.isChangeDetectionEnabled = builder.isChangeDetectionEnabled; - this.schemaVersion = builder.schemaVersion; - this.externalSchemaId = builder.externalSchemaId; - this.streamingTopicName = builder.streamingTopicName; - this.cdcIncludeScopes = builder.cdcIncludeScopes; - this.indexWhere = builder.indexWhere; - this.maxLookbackAge = builder.maxLookbackAge; - this.ancestorLastDDLTimestampMap = builder.ancestorLastDDLTimestampMap; - this.rowKeyMatcher = builder.rowKeyMatcher; - } - - // When cloning table, ignore the salt column as it will be added back in the constructor - public static List getColumnsToClone(PTable table) { - return table == null ? Collections. emptyList() : - (table.getBucketNum() == null ? table.getColumns() : - table.getColumns().subList(1, table.getColumns().size())); - } - - /** - * Get a PTableImpl.Builder from an existing PTable and set the builder columns - * @param table Original PTable - * @param columns Columns to set in the builder for the new PTable to be constructed - * @return PTable builder object based on an existing PTable - */ - public static PTableImpl.Builder builderWithColumns(PTable table, Collection columns) { - return builderFromExisting(table).setColumns(columns); - } - - /** - * Get a PTableImpl.Builder from an existing PTable - * @param table Original PTable - */ - public static PTableImpl.Builder builderFromExisting(PTable table) { - return new PTableImpl.Builder() - .setType(table.getType()) - .setState(table.getIndexState()) - .setTimeStamp(table.getTimeStamp()) - .setIndexDisableTimestamp(table.getIndexDisableTimestamp()) - .setSequenceNumber(table.getSequenceNumber()) - .setImmutableRows(table.isImmutableRows()) - .setViewStatement(table.getViewStatement()) - .setDisableWAL(table.isWALDisabled()) - .setMultiTenant(table.isMultiTenant()) - .setStoreNulls(table.getStoreNulls()) - .setViewType(table.getViewType()) - .setViewIndexIdType(table.getviewIndexIdType()) - .setViewIndexId(table.getViewIndexId()) - .setIndexType(table.getIndexType()) - .setTransactionProvider(table.getTransactionProvider()) - .setUpdateCacheFrequency(table.getUpdateCacheFrequency()) - .setNamespaceMapped(table.isNamespaceMapped()) - .setAutoPartitionSeqName(table.getAutoPartitionSeqName()) - .setAppendOnlySchema(table.isAppendOnlySchema()) - .setImmutableStorageScheme(table.getImmutableStorageScheme() == null ? - ImmutableStorageScheme.ONE_CELL_PER_COLUMN : table.getImmutableStorageScheme()) - .setQualifierEncodingScheme(table.getEncodingScheme() == null ? - QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : table.getEncodingScheme()) - .setBaseColumnCount(table.getBaseColumnCount()) - .setEncodedCQCounter(table.getEncodedCQCounter()) - .setUseStatsForParallelization(table.useStatsForParallelization()) - .setExcludedColumns(table.getExcludedColumns() == null ? - ImmutableList.of() : ImmutableList.copyOf(table.getExcludedColumns())) - .setTenantId(table.getTenantId()) - .setSchemaName(table.getSchemaName()) - .setTableName(table.getTableName()) - .setPhysicalTableName(table.getPhysicalName(true)) - .setPkName(table.getPKName()) - .setDefaultFamilyName(table.getDefaultFamilyName()) - .setRowKeyOrderOptimizable(table.rowKeyOrderOptimizable()) - .setBucketNum(table.getBucketNum()) - .setIndexes(table.getIndexes() == null ? - Collections.emptyList() : table.getIndexes()) - .setTransformingNewTable(table.getTransformingNewTable()) - .setParentSchemaName(table.getParentSchemaName()) - .setParentTableName(table.getParentTableName()) - .setBaseTableLogicalName(table.getBaseTableLogicalName()) - .setPhysicalNames(table.getPhysicalNames() == null ? - ImmutableList.of() : ImmutableList.copyOf(table.getPhysicalNames())) - .setViewModifiedUseStatsForParallelization(table - .hasViewModifiedUseStatsForParallelization()) - .setViewModifiedUpdateCacheFrequency(table.hasViewModifiedUpdateCacheFrequency()) - .setLastDDLTimestamp(table.getLastDDLTimestamp()) - .setIsChangeDetectionEnabled(table.isChangeDetectionEnabled()) - .setSchemaVersion(table.getSchemaVersion()) - .setExternalSchemaId(table.getExternalSchemaId()) - .setStreamingTopicName(table.getStreamingTopicName()) - .setIndexWhere(table.getIndexWhere()) - .setMaxLookbackAge(table.getMaxLookbackAge()) - .setCDCIncludeScopes(table.getCDCIncludeScopes()) - .setAncestorLastDDLTimestampMap(table.getAncestorLastDDLTimestampMap()) - .setTTL(table.getTTL()) - .setRowKeyMatcher(table.getRowKeyMatcher()); - } - - @Override - public long getUpdateCacheFrequency() { - return updateCacheFrequency; - } - - @Override - public boolean isMultiTenant() { - return multiTenant; - } - - @Override - public boolean getStoreNulls() { - return storeNulls; - } - - @Override - public ViewType getViewType() { - return viewType; - } - - @Override - public int getEstimatedSize() { - return estimatedSize; - } - - public static void checkTenantId(PName tenantId) { - // tenantId should be null or not empty - Preconditions.checkArgument(tenantId == null || tenantId.getBytes().length > 0); - } - - @Override - public boolean isImmutableRows() { - return isImmutableRows; - } - - @Override - public String toString() { - return name.getString(); - } - - @Override - public List getPKColumns() { - return pkColumns; - } - - @Override - public final PName getName() { - return name; - } - - @Override - public final PName getSchemaName() { - return schemaName; - } - - @Override - public final PName getTableName() { - return tableName; - } - - @Override - public final PTableType getType() { - return type; - } - - @Override - public final List getColumnFamilies() { - return families; - } - - @Override - public boolean hasOnlyPkColumns() { - return allColumns.stream().allMatch(SchemaUtil::isPKColumn); - } - - @Override - public int newKey(ImmutableBytesWritable key, byte[][] values) { - List columns = getPKColumns(); - int nValues = values.length; - while (nValues > 0 && (values[nValues-1] == null || values[nValues-1].length == 0)) { - nValues--; - } - for (PColumn column : columns) { - if (column.getExpressionStr() != null) { - nValues++; - } - } - int i = 0; - TrustedByteArrayOutputStream os = new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this)); - try { - Integer bucketNum = this.getBucketNum(); - if (bucketNum != null) { - // Write place holder for salt byte - i++; - os.write(QueryConstants.SEPARATOR_BYTE_ARRAY); - } - int nColumns = columns.size(); - PDataType type = null; - SortOrder sortOrder = null; - boolean wasNull = false; - - while (i < nValues && i < nColumns) { - // Separate variable length column values in key with zero byte - if (type != null && !type.isFixedWidth()) { - os.write(SchemaUtil.getSeparatorBytes(type, - rowKeyOrderOptimizable(), - wasNull, - sortOrder)); - } - PColumn column = columns.get(i); - sortOrder = column.getSortOrder(); - type = column.getDataType(); - // This will throw if the value is null and the type doesn't allow null - byte[] byteValue = values[i++]; - if (byteValue == null) { - if (column.getExpressionStr() != null) { - try { - String url = PhoenixRuntime.JDBC_PROTOCOL - + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR - + PhoenixRuntime.CONNECTIONLESS; - PhoenixConnection conn = DriverManager.getConnection(url) - .unwrap(PhoenixConnection.class); - StatementContext context = - new StatementContext(new PhoenixStatement(conn)); - - ExpressionCompiler compiler = new ExpressionCompiler(context); - ParseNode defaultParseNode = - new SQLParser(column.getExpressionStr()).parseExpression(); - Expression defaultExpression = defaultParseNode.accept(compiler); - defaultExpression.evaluate(null, key); - column.getDataType().coerceBytes(key, null, - defaultExpression.getDataType(), - defaultExpression.getMaxLength(), defaultExpression.getScale(), - defaultExpression.getSortOrder(), - column.getMaxLength(), column.getScale(), - column.getSortOrder()); - byteValue = ByteUtil.copyKeyBytesIfNecessary(key); - } catch (SQLException e) { // should not be possible - throw new ConstraintViolationException(name.getString() + "." - + column.getName().getString() - + " failed to compile default value expression of " - + column.getExpressionStr()); - } - } - else { - byteValue = ByteUtil.EMPTY_BYTE_ARRAY; - } - } - wasNull = byteValue.length == 0; - // An empty byte array return value means null. Do this, - // since a type may have muliple representations of null. - // For example, VARCHAR treats both null and an empty string - // as null. This way we don't need to leak that part of the - // implementation outside of PDataType by checking the value - // here. - if (byteValue.length == 0 && !column.isNullable()) { - throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null"); - } - Integer maxLength = column.getMaxLength(); - Integer scale = column.getScale(); - key.set(byteValue); - if (!type.isSizeCompatible(key, null, type, sortOrder, null, null, maxLength, scale)) { - throw new DataExceedsCapacityException(column.getDataType(), maxLength, - column.getScale(), column.getName().getString()); - } - key.set(byteValue); - type.pad(key, maxLength, sortOrder); - byteValue = ByteUtil.copyKeyBytesIfNecessary(key); - os.write(byteValue, 0, byteValue.length); - } - // Need trailing byte for DESC columns - if (type != null && !type.isFixedWidth()) { - if (type != PVarbinaryEncoded.INSTANCE) { - if (SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder) - == QueryConstants.DESC_SEPARATOR_BYTE) { - os.write(QueryConstants.DESC_SEPARATOR_BYTE); - } - } else { - byte[] separatorBytes = - SchemaUtil.getSeparatorBytesForVarBinaryEncoded(rowKeyOrderOptimizable(), - wasNull, sortOrder); - if (separatorBytes == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES) { - os.write(separatorBytes, 0, separatorBytes.length); - } - } - } - // If some non null pk values aren't set, then throw - if (i < nColumns) { - PColumn column = columns.get(i); - if (column.getDataType().isFixedWidth() || !column.isNullable()) { - throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null"); - } - } - if (nValues == 0) { - throw new ConstraintViolationException("Primary key may not be null ("+ name.getString() + ")"); - } - byte[] buf = os.getBuffer(); - int size = os.size(); - if (bucketNum != null) { - buf[0] = SaltingUtil.getSaltingByte(buf, 1, size-1, bucketNum); - } - key.set(buf,0,size); - return i; - } finally { - try { - os.close(); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } - } - } - - private PRow newRow(KeyValueBuilder builder, long ts, ImmutableBytesWritable key, int i, boolean hasOnDupKey, byte[]... values) { - PRow row = new PRowImpl(builder, key, ts, getBucketNum(), hasOnDupKey); - if (i < values.length) { - for (PColumnFamily family : getColumnFamilies()) { - for (PColumn column : family.getColumns()) { - row.setValue(column, values[i++]); - if (i == values.length) - return row; - } - } - } - return row; - } - - @Override - public PRow newRow(KeyValueBuilder builder, long ts, ImmutableBytesWritable key, - boolean hasOnDupKey, byte[]... values) { - return newRow(builder, ts, key, 0, hasOnDupKey, values); - } - - @Override - public PRow newRow(KeyValueBuilder builder, ImmutableBytesWritable key, boolean hasOnDupKey, byte[]... values) { - return newRow(builder, HConstants.LATEST_TIMESTAMP, key, hasOnDupKey, values); - } - - @Override - public PColumn getColumnForColumnName(String name) throws ColumnNotFoundException, AmbiguousColumnException { - String schemaNameStr = schemaName == null ? null : schemaName.getString(); - String tableNameStr = tableName == null ? null : tableName.getString(); - - //Throw exception if trying to create a column name that does not exist - if (columnsByName == null) { - throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, name); - } - - List columns = columnsByName.get(name); - int size = columns.size(); - if (size == 0) { - throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, name); - - } - if (size > 1) { - for (PColumn column : columns) { - if (column.getFamilyName() == null || QueryConstants.DEFAULT_COLUMN_FAMILY.equals(column.getFamilyName().getString())) { - // Allow ambiguity with PK column or column in the default column family, - // since a PK column cannot be prefixed and a user would not know how to - // prefix a column in the default column family. - return column; - } - } - throw new AmbiguousColumnException(name); - } - return columns.get(0); - } - - @Override - public PColumn getColumnForColumnQualifier(byte[] cf, byte[] cq) throws ColumnNotFoundException, AmbiguousColumnException { - Preconditions.checkNotNull(cq); - if (!EncodedColumnsUtil.usesEncodedColumnNames(this) || cf == null) { - String columnName = (String)PVarchar.INSTANCE.toObject(cq); - return getColumnForColumnName(columnName); - } else { - String family = (String)PVarchar.INSTANCE.toObject(cf); - PColumn col = kvColumnsByQualifiers.get(new KVColumnFamilyQualifier(family, cq)); - if (col == null) { - String schemaNameStr = schemaName==null?null:schemaName.getString(); - String tableNameStr = tableName==null?null:tableName.getString(); - throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, - "No column found for column qualifier " + qualifierEncodingScheme.decode(cq)); - } - return col; - } - } + } /** - * - * PRow implementation for ColumnLayout.MULTI mode which stores column - * values across multiple hbase columns. - * - * - * @since 0.1 + * Add attributes to the Put mutations indicating that we need to add shadow cells to Puts to + * store dynamic column metadata. See + * {@link org.apache.phoenix.coprocessor.ScanRegionObserver#preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} */ - private class PRowImpl implements PRow { - private final byte[] key; - private final ImmutableBytesWritable keyPtr; - // default to the generic builder, and only override when we know on the client - private final KeyValueBuilder kvBuilder; - - private Mutation setValues; - private Delete unsetValues; - private Mutation deleteRow; - private final long ts; - private final boolean hasOnDupKey; - // map from column name to value - private Map columnToValueMap; - // Map from the column family name to the list of dynamic columns in that column family. - // If there are no dynamic columns in a column family, the key for that column family - // will not exist in the map, rather than the corresponding value being an empty list. - private Map> colFamToDynamicColumnsMapping; - - PRowImpl(KeyValueBuilder kvBuilder, ImmutableBytesWritable key, long ts, Integer bucketNum, boolean hasOnDupKey) { - this.kvBuilder = kvBuilder; - this.ts = ts; - this.hasOnDupKey = hasOnDupKey; - if (bucketNum != null) { - this.key = SaltingUtil.getSaltedKey(key, bucketNum); - this.keyPtr = new ImmutableBytesPtr(this.key); - } else { - this.keyPtr = new ImmutableBytesPtr(key); - this.key = ByteUtil.copyKeyBytesIfNecessary(key); - } - this.columnToValueMap = Maps.newHashMapWithExpectedSize(1); - this.colFamToDynamicColumnsMapping = Maps.newHashMapWithExpectedSize(1); - newMutations(); - } - - private void newMutations() { - Mutation put = new Put(this.key); - Delete delete = new Delete(this.key); - if (isWALDisabled()) { - put.setDurability(Durability.SKIP_WAL); - delete.setDurability(Durability.SKIP_WAL); - } - this.setValues = put; - this.unsetValues = delete; - } - - @Override - public List toRowMutations() { - List mutations = new ArrayList(3); - if (deleteRow != null) { - // Include only deleteRow mutation if present because it takes precedence over all others - mutations.add(deleteRow); - } else { - // store all columns for a given column family in a single cell instead of one column per cell in order to improve write performance - if (immutableStorageScheme != null && immutableStorageScheme != ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - Put put = new Put(this.key); - if (isWALDisabled()) { - put.setDurability(Durability.SKIP_WAL); - } - // the setValues Put contains one cell per column, we need to convert it to a Put that contains a cell with all columns for a given column family - for (PColumnFamily family : families) { - byte[] columnFamily = family.getName().getBytes(); - Collection columns = family.getColumns(); - int maxEncodedColumnQualifier = Integer.MIN_VALUE; - for (PColumn column : columns) { - int qualifier = qualifierEncodingScheme.decode(column.getColumnQualifierBytes()); - maxEncodedColumnQualifier = Math.max(maxEncodedColumnQualifier, qualifier); - } - Expression[] colValues = EncodedColumnsUtil.createColumnExpressionArray(maxEncodedColumnQualifier); - for (PColumn column : columns) { - if (columnToValueMap.containsKey(column)) { - int colIndex = qualifierEncodingScheme.decode(column.getColumnQualifierBytes())-QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE+1; - colValues[colIndex] = new LiteralExpression(columnToValueMap.get(column)); - } - } - - List children = Arrays.asList(colValues); - // we use SingleCellConstructorExpression to serialize all the columns into a single byte[] - SingleCellConstructorExpression singleCellConstructorExpression = new SingleCellConstructorExpression(immutableStorageScheme, children); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - singleCellConstructorExpression.evaluate(null, ptr); - ImmutableBytesPtr colFamilyPtr = new ImmutableBytesPtr(columnFamily); - addQuietly(put, kvBuilder.buildPut(keyPtr, - colFamilyPtr, QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES_PTR, ts, ptr)); - } - // Preserve the attributes of the original mutation - Map attrsMap = setValues.getAttributesMap(); - setValues = put; - for (String attrKey : attrsMap.keySet()) { - setValues.setAttribute(attrKey, attrsMap.get(attrKey)); - } - } - // Because we cannot enforce a not null constraint on a KV column (since we don't know if the row exists when - // we upsert it), so instead add a KV that is always empty. This allows us to imitate SQL semantics given the - // way HBase works. - Pair emptyKvInfo = EncodedColumnsUtil.getEmptyKeyValueInfo(PTableImpl.this); - addQuietly(setValues, kvBuilder.buildPut(keyPtr, - SchemaUtil.getEmptyColumnFamilyPtr(PTableImpl.this), - new ImmutableBytesPtr(emptyKvInfo.getFirst()), ts, - new ImmutableBytesPtr(emptyKvInfo.getSecond()))); - mutations.add(setValues); - if (!unsetValues.isEmpty()) { - mutations.add(unsetValues); - } - } - return mutations; - } - - private void removeIfPresent(Mutation m, byte[] family, byte[] qualifier) { - Map> familyMap = m.getFamilyCellMap(); - List kvs = familyMap.get(family); - if (kvs != null) { - Iterator iterator = kvs.iterator(); - while (iterator.hasNext()) { - Cell kv = iterator.next(); - if (Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), - qualifier, 0, qualifier.length) == 0) { - iterator.remove(); - break; - } - } - } - } - - @Override - public void setValue(PColumn column, byte[] byteValue) { - deleteRow = null; - byte[] family = column.getFamilyName().getBytes(); - byte[] qualifier = column.getColumnQualifierBytes(); - ImmutableBytesPtr qualifierPtr = new ImmutableBytesPtr(qualifier); - PDataType type = column.getDataType(); - // Check null, since some types have no byte representation for null - if (byteValue == null) { - byteValue = ByteUtil.EMPTY_BYTE_ARRAY; - } - boolean isNull = type.isNull(byteValue); - if (isNull && !column.isNullable()) { - throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + - " may not be null"); - } else if (isNull && PTableImpl.this.isImmutableRows() && column.getExpressionStr() == null) { - // Store nulls for immutable tables otherwise default value would be used - removeIfPresent(setValues, family, qualifier); - removeIfPresent(unsetValues, family, qualifier); - } else if (isNull && !getStoreNulls() && !this.hasOnDupKey && column.getExpressionStr() == null) { - // Cannot use column delete marker when row has ON DUPLICATE KEY clause - // because we cannot change a Delete mutation to a Put mutation in the - // case of updates occurring due to the execution of the clause. - removeIfPresent(setValues, family, qualifier); - deleteQuietly(unsetValues, kvBuilder, kvBuilder.buildDeleteColumns(keyPtr, column - .getFamilyName().getBytesPtr(), qualifierPtr, ts)); - } else { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(byteValue); - Integer maxLength = column.getMaxLength(); - Integer scale = column.getScale(); - SortOrder sortOrder = column.getSortOrder(); - if (!type.isSizeCompatible(ptr, null, type, sortOrder, null, null, maxLength, scale)) { - throw new DataExceedsCapacityException(column.getDataType(), maxLength, - column.getScale(), column.getName().getString()); - } - ptr.set(byteValue); - type.pad(ptr, maxLength, sortOrder); - removeIfPresent(unsetValues, family, qualifier); - // store all columns for a given column family in a single cell instead of one column per cell in order to improve write performance - // we don't need to do anything with unsetValues as it is only used when storeNulls is false, storeNulls is always true when storeColsInSingleCell is true - if (immutableStorageScheme == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { - columnToValueMap.put(column, ptr.get()); - } - else { - removeIfPresent(unsetValues, family, qualifier); - addQuietly(setValues, kvBuilder.buildPut(keyPtr, - column.getFamilyName().getBytesPtr(), qualifierPtr, - ts, ptr)); - } - String fam = Bytes.toString(family); - if (column.isDynamic()) { - if (!this.colFamToDynamicColumnsMapping.containsKey(fam)) { - this.colFamToDynamicColumnsMapping.put(fam, new ArrayList<>()); - } - this.colFamToDynamicColumnsMapping.get(fam).add(column); - } - } - } - - /** - * Add attributes to the Put mutations indicating that we need to add shadow cells to Puts - * to store dynamic column metadata. See - * {@link org.apache.phoenix.coprocessor.ScanRegionObserver#preBatchMutate(ObserverContext, - * MiniBatchOperationInProgress)} - */ - public boolean setAttributesForDynamicColumnsIfReqd() { - if (this.colFamToDynamicColumnsMapping == null || - this.colFamToDynamicColumnsMapping.isEmpty()) { - return false; - } - boolean attrsForDynColsSet = false; - for (Entry> colFamToDynColsList : - this.colFamToDynamicColumnsMapping.entrySet()) { - DynamicColumnMetaDataProtos.DynamicColumnMetaData.Builder builder = - DynamicColumnMetaDataProtos.DynamicColumnMetaData.newBuilder(); - for (PColumn dynCol : colFamToDynColsList.getValue()) { - builder.addDynamicColumns(PColumnImpl.toProto(dynCol)); - } - if (builder.getDynamicColumnsCount() != 0) { - // The attribute key is the column family name and the value is the - // serialized list of dynamic columns - setValues.setAttribute(colFamToDynColsList.getKey(), - builder.build().toByteArray()); - attrsForDynColsSet = true; - } - } - return attrsForDynColsSet; - } - - @Override public void setAttributeToProcessDynamicColumnsMetadata() { - setValues.setAttribute(DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION, TRUE_BYTES); - } - - @Override - public void delete() { - newMutations(); - Delete delete = new Delete(key); - if (families.isEmpty()) { - delete.addFamily(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), ts); - } else { - for (PColumnFamily colFamily : families) { - delete.addFamily(colFamily.getName().getBytes(), ts); - } - } - deleteRow = delete; - if (isWALDisabled()) { - deleteRow.setDurability(Durability.SKIP_WAL); - } - } - - } - - @Override - public PColumnFamily getColumnFamily(String familyName) throws ColumnFamilyNotFoundException { - PColumnFamily family = familyByString.get(familyName); - if (family == null) { - String schemaNameStr = schemaName==null?null:schemaName.getString(); - String tableNameStr = tableName==null?null:tableName.getString(); - throw new ColumnFamilyNotFoundException(schemaNameStr, tableNameStr, familyName); - } - return family; - } - - @Override - public PColumnFamily getColumnFamily(byte[] familyBytes) throws ColumnFamilyNotFoundException { - PColumnFamily family = familyByBytes.get(familyBytes); - if (family == null) { - String familyName = Bytes.toString(familyBytes); - String schemaNameStr = schemaName==null?null:schemaName.getString(); - String tableNameStr = tableName==null?null:tableName.getString(); - throw new ColumnFamilyNotFoundException(schemaNameStr, tableNameStr, familyName); - } - return family; - } - - @Override - public List getColumns() { - return allColumns; - } - - @Override - public List getExcludedColumns() { - return excludedColumns; - } - - @Override - public long getSequenceNumber() { - return sequenceNumber; - } - - @Override - public long getTimeStamp() { - return timeStamp; - } - - @Override - public long getIndexDisableTimestamp() { - return indexDisableTimestamp; - } - - @Override - public boolean isIndexStateDisabled() { - return getIndexState()!= null && getIndexState().isDisabled(); - } - - @Override - public PColumn getPKColumn(String name) throws ColumnNotFoundException { - List columns = columnsByName.get(name); - int size = columns.size(); - if (size == 0) { - String schemaNameStr = schemaName==null?null:schemaName.getString(); - String tableNameStr = tableName==null?null:tableName.getString(); - throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, name); - } - if (size > 1) { - do { - PColumn column = columns.get(--size); - if (column.getFamilyName() == null) { - return column; - } - } while (size > 0); - String schemaNameStr = schemaName==null?null:schemaName.getString(); - String tableNameStr = tableName==null?null:tableName.getString(); - throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, name); - } - return columns.get(0); - } - - @Override - public PName getPKName() { - return pkName; - } - - @Override - public RowKeySchema getRowKeySchema() { - return rowKeySchema; - } - - @Override - public Integer getBucketNum() { - return bucketNum; - } - - @Override - public List getIndexes() { - return indexes; - } - - @Override - public PTable getTransformingNewTable() { - return transformingNewTable; - } - - @Override - public PIndexState getIndexState() { - return state; - } - - @Override - public PName getParentTableName() { - // a view on a table will not have a parent name but will have a physical table name (which is the parent) - return (type!=PTableType.VIEW || parentName!=null) ? parentTableName : - PNameFactory.newName(SchemaUtil.getTableNameFromFullName(getPhysicalName().getBytes())); - } - - @Override - public PName getParentName() { - // a view on a table will not have a parent name but will have a physical table name (which is the parent) - return (type!=PTableType.VIEW || parentName!=null) ? parentName : - ((baseTableLogicalName != null && !Strings.isNullOrEmpty(baseTableLogicalName.getString()))? baseTableLogicalName - : getPhysicalName()); - } - - @Override - public PName getBaseTableLogicalName() { - PName result = null; - if (baseTableLogicalName != null && !Strings.isNullOrEmpty(baseTableLogicalName.getString())) { - result = baseTableLogicalName; - } else { - if (parentName != null) { - result = parentName; - } else { - if (type == PTableType.VIEW) { - result = getPhysicalName(); - } else if (type == PTableType.INDEX) { - result = SchemaUtil.getTableName(parentSchemaName, parentTableName); - } - } - } - return result; - } - - @Override - public synchronized TransformMaintainer getTransformMaintainer(PTable oldTable, PhoenixConnection connection) { - if (transformMaintainer == null) { - transformMaintainer = TransformMaintainer.create(oldTable, this, connection); - } - return transformMaintainer; - } - - @Override - public synchronized IndexMaintainer getIndexMaintainer(PTable dataTable, - PhoenixConnection connection) - throws SQLException { - return getIndexMaintainer(dataTable, null, connection); - } - - @Override - public synchronized IndexMaintainer getIndexMaintainer(PTable dataTable, PTable cdcTable, - PhoenixConnection connection) throws SQLException { - if (indexMaintainer == null) { - indexMaintainer = IndexMaintainer.create(dataTable, cdcTable, this, connection); - } - return indexMaintainer; - } - - @Override - public synchronized boolean getIndexMaintainers(ImmutableBytesWritable ptr, - PhoenixConnection connection) throws SQLException { - if (indexMaintainersPtr == null || indexMaintainersPtr.getLength()==0) { - indexMaintainersPtr = new ImmutableBytesWritable(); - if (indexes.isEmpty() && transformingNewTable == null) { - indexMaintainersPtr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } else { - IndexMaintainer.serialize(this, indexMaintainersPtr, connection); - } - } - ptr.set(indexMaintainersPtr.get(), indexMaintainersPtr.getOffset(), indexMaintainersPtr.getLength()); - return indexMaintainersPtr.getLength() > 0; - } - - @Override - public PName getPhysicalName() { - // For views, physicalName is base table physical name. There might be a case where the Phoenix table is pointing to another physical table. - // In that case, physicalTableName is not null - if (physicalNames.isEmpty()) { - if (physicalTableNameColumnInSyscat != null && !Strings.isNullOrEmpty( - physicalTableNameColumnInSyscat.getString())) { - return SchemaUtil.getPhysicalHBaseTableName(schemaName, - physicalTableNameColumnInSyscat, isNamespaceMapped); - } - return SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped); - } else { - return PNameFactory.newName(physicalNames.get(0).getBytes()); - } - } - - @Override - public PName getPhysicalName(boolean returnColValueFromSyscat) { - if (returnColValueFromSyscat) { - return physicalTableNameColumnInSyscat; - } else { - return getPhysicalName(); - } - } - - @Override - public List getPhysicalNames() { - return !physicalNames.isEmpty() ? physicalNames : Lists.newArrayList(getPhysicalName()); - } - - @Override - public PName getDefaultFamilyName() { - return defaultFamilyName; - } - - @Override - public String getViewStatement() { - return viewStatement; - } - - @Override - public boolean isWALDisabled() { - return disableWAL; - } - - @Override - public Long getViewIndexId() { - return viewIndexId; - } - - @Override - public PDataType getviewIndexIdType() { - return viewIndexIdType; - } - - @Override - public PName getTenantId() { - return tenantId; - } - - @Override - public IndexType getIndexType() { - return indexType; - } - - /** - * Construct a PTable instance from ProtoBuffered PTable instance - * @param table - */ - public static PTable createFromProto(PTableProtos.PTable table) { - if (table==null) - return null; - PName tenantId = null; - if(table.hasTenantId()){ - tenantId = PNameFactory.newName(table.getTenantId().toByteArray()); - } - PName schemaName = PNameFactory.newName(table.getSchemaNameBytes().toByteArray()); - PName tableName = PNameFactory.newName(table.getTableNameBytes().toByteArray()); - PName physicalTableName = null; - if (table.getPhysicalTableNameBytes() != null) { - physicalTableName = PNameFactory.newName(table.getPhysicalTableNameBytes().toByteArray()); - } - PTableType tableType = PTableType.values()[table.getTableType().ordinal()]; - PIndexState indexState = null; - if (table.hasIndexState()) { - indexState = PIndexState.fromSerializedValue(table.getIndexState()); - } - Long viewIndexId = null; - if (table.hasViewIndexId()) { - viewIndexId = table.getViewIndexId(); - } - PDataType viewIndexIdType = table.hasViewIndexIdType() - ? PDataType.fromTypeId(table.getViewIndexIdType()) - : MetaDataUtil.getLegacyViewIndexIdDataType(); - IndexType indexType = IndexType.getDefault(); - if(table.hasIndexType()){ - indexType = IndexType.fromSerializedValue(table.getIndexType().toByteArray()[0]); - } - long sequenceNumber = table.getSequenceNumber(); - long timeStamp = table.getTimeStamp(); - long indexDisableTimestamp = table.getIndexDisableTimestamp(); - PName pkName = null; - if (table.hasPkNameBytes()) { - pkName = PNameFactory.newName(table.getPkNameBytes().toByteArray()); - } - int bucketNum = table.getBucketNum(); - List columns = Lists.newArrayListWithExpectedSize(table.getColumnsCount()); - for (PTableProtos.PColumn curPColumnProto : table.getColumnsList()) { - columns.add(PColumnImpl.createFromProto(curPColumnProto)); - } - List indexes = Lists.newArrayListWithExpectedSize(table.getIndexesCount()); - for (PTableProtos.PTable curPTableProto : table.getIndexesList()) { - indexes.add(createFromProto(curPTableProto)); - } - - PTable transformingNewTable = null; - if (table.hasTransformingNewTable()){ - PTableProtos.PTable curTransformingPTableProto = table.getTransformingNewTable(); - transformingNewTable = createFromProto(curTransformingPTableProto); - } - boolean isImmutableRows = table.getIsImmutableRows(); - PName parentSchemaName = null; - PName parentTableName = null; - PName parentLogicalName = null; - if (table.hasParentNameBytes()) { - parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName((table.getParentNameBytes().toByteArray()))); - parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(table.getParentNameBytes().toByteArray())); - } - if (table.getBaseTableLogicalNameBytes() != null) { - parentLogicalName = PNameFactory.newName(table.getBaseTableLogicalNameBytes().toByteArray()); - } - PName defaultFamilyName = null; - if (table.hasDefaultFamilyName()) { - defaultFamilyName = PNameFactory.newName(table.getDefaultFamilyName().toByteArray()); - } - boolean disableWAL = table.getDisableWAL(); - boolean multiTenant = table.getMultiTenant(); - boolean storeNulls = table.getStoreNulls(); - TransactionFactory.Provider transactionProvider = null; - if (table.hasTransactionProvider()) { - transactionProvider = TransactionFactory.Provider.fromCode(table.getTransactionProvider()); - } else if (table.hasTransactional()) { - // For backward compatibility prior to transactionProvider field - transactionProvider = TransactionFactory.Provider.NOTAVAILABLE; - } - ViewType viewType = null; - String viewStatement = null; - if (tableType == PTableType.VIEW) { - viewType = ViewType.fromSerializedValue(table.getViewType().toByteArray()[0]); - } - if(table.hasViewStatement()){ - viewStatement = (String) PVarchar.INSTANCE.toObject(table.getViewStatement().toByteArray()); - } - List physicalNames = Lists.newArrayListWithExpectedSize(table.getPhysicalNamesCount()); - for(int i = 0; i < table.getPhysicalNamesCount(); i++) { - physicalNames.add(PNameFactory.newName(table.getPhysicalNames(i).toByteArray())); - } - int baseColumnCount = -1; - if (table.hasBaseColumnCount()) { - baseColumnCount = table.getBaseColumnCount(); - } - - boolean rowKeyOrderOptimizable = false; - if (table.hasRowKeyOrderOptimizable()) { - rowKeyOrderOptimizable = table.getRowKeyOrderOptimizable(); - } - long updateCacheFrequency = 0; - if (table.hasUpdateCacheFrequency()) { - updateCacheFrequency = table.getUpdateCacheFrequency(); - } - boolean isNamespaceMapped=false; - if (table.hasIsNamespaceMapped()) { - isNamespaceMapped = table.getIsNamespaceMapped(); - } - String autoPartitionSeqName = null; - if (table.hasAutoParititonSeqName()) { - autoPartitionSeqName = table.getAutoParititonSeqName(); - } - boolean isAppendOnlySchema = false; - if (table.hasIsAppendOnlySchema()) { - isAppendOnlySchema = table.getIsAppendOnlySchema(); - } - // For backward compatibility. Clients older than 4.10 will always have non-encoded immutable tables. - ImmutableStorageScheme storageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN; - if (table.hasStorageScheme()) { - storageScheme = ImmutableStorageScheme.fromSerializedValue(table.getStorageScheme().toByteArray()[0]); - } - // For backward compatibility. Clients older than 4.10 will always have non-encoded qualifiers. - QualifierEncodingScheme qualifierEncodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; - if (table.hasEncodingScheme()) { - qualifierEncodingScheme = QualifierEncodingScheme.fromSerializedValue(table.getEncodingScheme().toByteArray()[0]); - } - EncodedCQCounter encodedColumnQualifierCounter; - if ((!EncodedColumnsUtil.usesEncodedColumnNames(qualifierEncodingScheme) || tableType == PTableType.VIEW)) { - encodedColumnQualifierCounter = PTable.EncodedCQCounter.NULL_COUNTER; - } - else { - encodedColumnQualifierCounter = new EncodedCQCounter(); - if (table.getEncodedCQCountersList() != null) { - for (org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounter cqCounterFromProto : table.getEncodedCQCountersList()) { - encodedColumnQualifierCounter.setValue(cqCounterFromProto.getColFamily(), cqCounterFromProto.getCounter()); - } - } - } - Boolean useStatsForParallelization = null; - if (table.hasUseStatsForParallelization()) { - useStatsForParallelization = table.getUseStatsForParallelization(); - } - - // for older clients just use the value of the properties that are set on the view - boolean viewModifiedUpdateCacheFrequency = true; - boolean viewModifiedUseStatsForParallelization = true; - if (table.hasViewModifiedUpdateCacheFrequency()) { - viewModifiedUpdateCacheFrequency = table.getViewModifiedUpdateCacheFrequency(); - } - if (table.hasViewModifiedUseStatsForParallelization()) { - viewModifiedUseStatsForParallelization = table.getViewModifiedUseStatsForParallelization(); - } - Long lastDDLTimestamp = null; - if (table.hasLastDDLTimestamp()) { - lastDDLTimestamp = table.getLastDDLTimestamp(); - } - boolean isChangeDetectionEnabled = false; - if (table.hasChangeDetectionEnabled()) { - isChangeDetectionEnabled = table.getChangeDetectionEnabled(); - } - String schemaVersion = null; - if (table.hasSchemaVersion()) { - schemaVersion = (String) PVarchar.INSTANCE.toObject(table.getSchemaVersion().toByteArray()); - } - String externalSchemaId = null; - if (table.hasExternalSchemaId()) { - externalSchemaId = - (String) PVarchar.INSTANCE.toObject(table.getExternalSchemaId().toByteArray()); - } - String streamingTopicName = null; - if (table.hasStreamingTopicName()) { - streamingTopicName = - (String) PVarchar.INSTANCE.toObject(table.getStreamingTopicName().toByteArray()); - } - String indexWhere = null; - if (table.hasIndexWhere()) { - indexWhere = - (String) PVarchar.INSTANCE.toObject(table.getIndexWhere().toByteArray()); - } - Long maxLookbackAge = null; - if (table.hasMaxLookbackAge()) { - maxLookbackAge = table.getMaxLookbackAge(); - } - String cdcIncludeScopesStr = null; - if (table.hasCDCIncludeScopes()) { - cdcIncludeScopesStr = table.getCDCIncludeScopes(); - } - - Integer ttl = TTL_NOT_DEFINED; - if (table.hasTtl()) { - String ttlStr = (String) PVarchar.INSTANCE.toObject(table.getTtl().toByteArray()); - ttl = Integer.parseInt(ttlStr); - } - - byte[] rowKeyMatcher = null; - if (table.hasRowKeyMatcher()) { - rowKeyMatcher = table.getRowKeyMatcher().toByteArray(); - } - - try { - return new PTableImpl.Builder() - .setType(tableType) - .setState(indexState) - .setTimeStamp(timeStamp) - .setIndexDisableTimestamp(indexDisableTimestamp) - .setSequenceNumber(sequenceNumber) - .setImmutableRows(isImmutableRows) - .setViewStatement(viewStatement) - .setDisableWAL(disableWAL) - .setMultiTenant(multiTenant) - .setStoreNulls(storeNulls) - .setViewType(viewType) - .setViewIndexIdType(viewIndexIdType) - .setViewIndexId(viewIndexId) - .setIndexType(indexType) - .setTransactionProvider(transactionProvider) - .setUpdateCacheFrequency(updateCacheFrequency) - .setNamespaceMapped(isNamespaceMapped) - .setAutoPartitionSeqName(autoPartitionSeqName) - .setAppendOnlySchema(isAppendOnlySchema) - // null check for backward compatibility and sanity. If any of the two below is null, - // then it means the table is a non-encoded table. - .setImmutableStorageScheme(storageScheme == null ? - ImmutableStorageScheme.ONE_CELL_PER_COLUMN : storageScheme) - .setQualifierEncodingScheme(qualifierEncodingScheme == null ? - QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : qualifierEncodingScheme) - .setBaseColumnCount(baseColumnCount) - .setEncodedCQCounter(encodedColumnQualifierCounter) - .setUseStatsForParallelization(useStatsForParallelization) - .setExcludedColumns(ImmutableList.of()) - .setTenantId(tenantId) - .setSchemaName(schemaName) - .setTableName(tableName) - .setPhysicalTableName(physicalTableName) - .setPkName(pkName) - .setDefaultFamilyName(defaultFamilyName) - .setRowKeyOrderOptimizable(rowKeyOrderOptimizable) - .setBucketNum((bucketNum == NO_SALTING) ? null : bucketNum) - .setIndexes(indexes == null ? Collections.emptyList() : indexes) - .setTransformingNewTable(transformingNewTable) - .setParentSchemaName(parentSchemaName) - .setParentTableName(parentTableName) - .setBaseTableLogicalName(parentLogicalName) - .setPhysicalNames(physicalNames == null ? - ImmutableList.of() : ImmutableList.copyOf(physicalNames)) - .setColumns(columns) - .setViewModifiedUpdateCacheFrequency(viewModifiedUpdateCacheFrequency) - .setViewModifiedUseStatsForParallelization(viewModifiedUseStatsForParallelization) - .setLastDDLTimestamp(lastDDLTimestamp) - .setIsChangeDetectionEnabled(isChangeDetectionEnabled) - .setSchemaVersion(schemaVersion) - .setExternalSchemaId(externalSchemaId) - .setStreamingTopicName(streamingTopicName) - .setCDCIncludeScopes( - CDCUtil.makeChangeScopeEnumsFromString(cdcIncludeScopesStr)) - .setIndexWhere(indexWhere) - .setMaxLookbackAge(maxLookbackAge) - .setTTL(ttl) - .setRowKeyMatcher(rowKeyMatcher) - .build(); - } catch (SQLException e) { - throw new RuntimeException(e); // Impossible - } - } - - public static PTableProtos.PTable toProto(PTable table) { - PTableProtos.PTable.Builder builder = PTableProtos.PTable.newBuilder(); - if (table.getTenantId() != null) { - builder.setTenantId(ByteStringer.wrap(table.getTenantId().getBytes())); - } - builder.setSchemaNameBytes(ByteStringer.wrap(table.getSchemaName().getBytes())); - builder.setTableNameBytes(ByteStringer.wrap(table.getTableName().getBytes())); - if (table.getPhysicalName(true) == null) { - builder.setPhysicalTableNameBytes(ByteStringer.wrap(table.getTableName().getBytes())); - } else { - builder.setPhysicalTableNameBytes(ByteStringer.wrap(table.getPhysicalName(true).getBytes())); - } - builder.setTableType(ProtobufUtil.toPTableTypeProto(table.getType())); - if (table.getIndexState() != null) { - builder.setIndexState(table.getIndexState().getSerializedValue()); - } - if (table.getType() == PTableType.INDEX) { - if (table.getViewIndexId() != null) { - builder.setViewIndexId(table.getViewIndexId()); - builder.setViewIndexIdType(table.getviewIndexIdType().getSqlType()); - } - if (table.getIndexType() != null) { - builder.setIndexType(ByteStringer - .wrap(new byte[] { table.getIndexType().getSerializedValue() })); - } - } - builder.setSequenceNumber(table.getSequenceNumber()); - builder.setTimeStamp(table.getTimeStamp()); - PName tmp = table.getPKName(); - if (tmp != null) { - builder.setPkNameBytes(ByteStringer.wrap(tmp.getBytes())); - } - Integer bucketNum = table.getBucketNum(); - int offset = 0; - if (bucketNum == null) { - builder.setBucketNum(NO_SALTING); - } else { - offset = 1; - builder.setBucketNum(bucketNum); - } - List columns = table.getColumns(); - int columnSize = columns.size(); - for (int i = offset; i < columnSize; i++) { - PColumn column = columns.get(i); - builder.addColumns(PColumnImpl.toProto(column)); - } - List indexes = table.getIndexes(); - for (PTable curIndex : indexes) { - builder.addIndexes(toProto(curIndex)); - } - PTable transformingNewTable = table.getTransformingNewTable(); - if (transformingNewTable != null) { - builder.setTransformingNewTable(toProto(transformingNewTable)); - } - builder.setIsImmutableRows(table.isImmutableRows()); - // TODO remove this field in 5.0 release - if (table.getParentName() != null) { - builder.setDataTableNameBytes(ByteStringer.wrap(table.getParentTableName().getBytes())); - } - if (table.getParentName() != null) { - builder.setParentNameBytes(ByteStringer.wrap(table.getParentName().getBytes())); - } - if (table.getBaseTableLogicalName() != null) { - builder.setBaseTableLogicalNameBytes(ByteStringer.wrap(table.getBaseTableLogicalName().getBytes())); - } - if (table.getDefaultFamilyName() != null) { - builder.setDefaultFamilyName(ByteStringer.wrap(table.getDefaultFamilyName().getBytes())); - } - builder.setDisableWAL(table.isWALDisabled()); - builder.setMultiTenant(table.isMultiTenant()); - builder.setStoreNulls(table.getStoreNulls()); - if (table.getTransactionProvider() != null) { - builder.setTransactionProvider(table.getTransactionProvider().getCode()); - } - if (table.getType() == PTableType.VIEW) { - builder.setViewType( - ByteStringer.wrap(new byte[] { table.getViewType().getSerializedValue() })); - } - if (table.getViewStatement() != null) { - builder.setViewStatement(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getViewStatement()))); - } - for (int i = 0; i < table.getPhysicalNames().size(); i++) { - builder.addPhysicalNames(ByteStringer.wrap(table.getPhysicalNames().get(i).getBytes())); - } - builder.setBaseColumnCount(table.getBaseColumnCount()); - builder.setRowKeyOrderOptimizable(table.rowKeyOrderOptimizable()); - builder.setUpdateCacheFrequency(table.getUpdateCacheFrequency()); - builder.setIndexDisableTimestamp(table.getIndexDisableTimestamp()); - builder.setIsNamespaceMapped(table.isNamespaceMapped()); - if (table.getAutoPartitionSeqName() != null) { - builder.setAutoParititonSeqName(table.getAutoPartitionSeqName()); - } - builder.setIsAppendOnlySchema(table.isAppendOnlySchema()); - if (table.getImmutableStorageScheme() != null) { - builder.setStorageScheme(ByteStringer.wrap(new byte[] { - table.getImmutableStorageScheme().getSerializedMetadataValue() })); - } - if (table.getEncodedCQCounter() != null) { - Map values = table.getEncodedCQCounter().values(); - for (Entry cqCounter : values.entrySet()) { - org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounter.Builder - cqBuilder = - org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounter.newBuilder(); - cqBuilder.setColFamily(cqCounter.getKey()); - cqBuilder.setCounter(cqCounter.getValue()); - builder.addEncodedCQCounters(cqBuilder.build()); - } - } - if (table.getEncodingScheme() != null) { - builder.setEncodingScheme(ByteStringer - .wrap(new byte[] { table.getEncodingScheme().getSerializedMetadataValue() })); - } - if (table.useStatsForParallelization() != null) { - builder.setUseStatsForParallelization(table.useStatsForParallelization()); - } - builder.setViewModifiedUpdateCacheFrequency(table.hasViewModifiedUpdateCacheFrequency()); - builder.setViewModifiedUseStatsForParallelization(table.hasViewModifiedUseStatsForParallelization()); - if (table.getLastDDLTimestamp() != null) { - builder.setLastDDLTimestamp(table.getLastDDLTimestamp()); - } - builder.setChangeDetectionEnabled(table.isChangeDetectionEnabled()); - if (table.getSchemaVersion() != null) { - builder.setSchemaVersion(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getSchemaVersion()))); - } - if (table.getExternalSchemaId() != null) { - builder.setExternalSchemaId(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getExternalSchemaId()))); - } - if (table.getStreamingTopicName() != null) { - builder.setStreamingTopicName(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getStreamingTopicName()))); - } - if (table.getIndexWhere() != null) { - builder.setIndexWhere(ByteStringer.wrap(PVarchar.INSTANCE.toBytes( - table.getIndexWhere()))); - } - if (table.getMaxLookbackAge() != null) { - builder.setMaxLookbackAge(table.getMaxLookbackAge()); - } - builder.setCDCIncludeScopes(CDCUtil.makeChangeScopeStringFromEnums( - table.getCDCIncludeScopes() != null ? table.getCDCIncludeScopes() - : Collections.EMPTY_SET)); - - builder.setTtl(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(String.valueOf(table.getTTL())))); - - if (table.getRowKeyMatcher() != null) { - builder.setRowKeyMatcher(ByteStringer.wrap(table.getRowKeyMatcher())); - } - return builder.build(); - } - - @Override - public PTableKey getKey() { - return key; - } - - @Override - public PName getParentSchemaName() { - // a view on a table will not have a parent name but will have a physical table name (which is the parent) - return (type!=PTableType.VIEW || parentName!=null) ? parentSchemaName : - PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(getPhysicalName().getBytes())); - } - - @Override - public TransactionFactory.Provider getTransactionProvider() { - return transactionProvider; - } - - @Override - public final boolean isTransactional() { - return transactionProvider != null; - } - - @Override - public int getBaseColumnCount() { - return baseColumnCount; - } - - @Override - public boolean rowKeyOrderOptimizable() { - return rowKeyOrderOptimizable || !hasColumnsRequiringUpgrade; - } - - @Override - public int getRowTimestampColPos() { - return rowTimestampColPos; - } - - @Override - public boolean isNamespaceMapped() { - return isNamespaceMapped; - } - - @Override - public String getAutoPartitionSeqName() { - return autoPartitionSeqName; - } - - @Override - public boolean isAppendOnlySchema() { - return isAppendOnlySchema; + public boolean setAttributesForDynamicColumnsIfReqd() { + if ( + this.colFamToDynamicColumnsMapping == null || this.colFamToDynamicColumnsMapping.isEmpty() + ) { + return false; + } + boolean attrsForDynColsSet = false; + for (Entry> colFamToDynColsList : this.colFamToDynamicColumnsMapping + .entrySet()) { + DynamicColumnMetaDataProtos.DynamicColumnMetaData.Builder builder = + DynamicColumnMetaDataProtos.DynamicColumnMetaData.newBuilder(); + for (PColumn dynCol : colFamToDynColsList.getValue()) { + builder.addDynamicColumns(PColumnImpl.toProto(dynCol)); + } + if (builder.getDynamicColumnsCount() != 0) { + // The attribute key is the column family name and the value is the + // serialized list of dynamic columns + setValues.setAttribute(colFamToDynColsList.getKey(), builder.build().toByteArray()); + attrsForDynColsSet = true; + } + } + return attrsForDynColsSet; + } + + @Override + public void setAttributeToProcessDynamicColumnsMetadata() { + setValues.setAttribute(DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION, TRUE_BYTES); + } + + @Override + public void delete() { + newMutations(); + Delete delete = new Delete(key); + if (families.isEmpty()) { + delete.addFamily(SchemaUtil.getEmptyColumnFamily(PTableImpl.this), ts); + } else { + for (PColumnFamily colFamily : families) { + delete.addFamily(colFamily.getName().getBytes(), ts); + } + } + deleteRow = delete; + if (isWALDisabled()) { + deleteRow.setDurability(Durability.SKIP_WAL); + } + } + + } + + @Override + public PColumnFamily getColumnFamily(String familyName) throws ColumnFamilyNotFoundException { + PColumnFamily family = familyByString.get(familyName); + if (family == null) { + String schemaNameStr = schemaName == null ? null : schemaName.getString(); + String tableNameStr = tableName == null ? null : tableName.getString(); + throw new ColumnFamilyNotFoundException(schemaNameStr, tableNameStr, familyName); + } + return family; + } + + @Override + public PColumnFamily getColumnFamily(byte[] familyBytes) throws ColumnFamilyNotFoundException { + PColumnFamily family = familyByBytes.get(familyBytes); + if (family == null) { + String familyName = Bytes.toString(familyBytes); + String schemaNameStr = schemaName == null ? null : schemaName.getString(); + String tableNameStr = tableName == null ? null : tableName.getString(); + throw new ColumnFamilyNotFoundException(schemaNameStr, tableNameStr, familyName); + } + return family; + } + + @Override + public List getColumns() { + return allColumns; + } + + @Override + public List getExcludedColumns() { + return excludedColumns; + } + + @Override + public long getSequenceNumber() { + return sequenceNumber; + } + + @Override + public long getTimeStamp() { + return timeStamp; + } + + @Override + public long getIndexDisableTimestamp() { + return indexDisableTimestamp; + } + + @Override + public boolean isIndexStateDisabled() { + return getIndexState() != null && getIndexState().isDisabled(); + } + + @Override + public PColumn getPKColumn(String name) throws ColumnNotFoundException { + List columns = columnsByName.get(name); + int size = columns.size(); + if (size == 0) { + String schemaNameStr = schemaName == null ? null : schemaName.getString(); + String tableNameStr = tableName == null ? null : tableName.getString(); + throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, name); + } + if (size > 1) { + do { + PColumn column = columns.get(--size); + if (column.getFamilyName() == null) { + return column; + } + } while (size > 0); + String schemaNameStr = schemaName == null ? null : schemaName.getString(); + String tableNameStr = tableName == null ? null : tableName.getString(); + throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, name); + } + return columns.get(0); + } + + @Override + public PName getPKName() { + return pkName; + } + + @Override + public RowKeySchema getRowKeySchema() { + return rowKeySchema; + } + + @Override + public Integer getBucketNum() { + return bucketNum; + } + + @Override + public List getIndexes() { + return indexes; + } + + @Override + public PTable getTransformingNewTable() { + return transformingNewTable; + } + + @Override + public PIndexState getIndexState() { + return state; + } + + @Override + public PName getParentTableName() { + // a view on a table will not have a parent name but will have a physical table name (which is + // the parent) + return (type != PTableType.VIEW || parentName != null) + ? parentTableName + : PNameFactory.newName(SchemaUtil.getTableNameFromFullName(getPhysicalName().getBytes())); + } + + @Override + public PName getParentName() { + // a view on a table will not have a parent name but will have a physical table name (which is + // the parent) + return (type != PTableType.VIEW || parentName != null) + ? parentName + : ((baseTableLogicalName != null && !Strings.isNullOrEmpty(baseTableLogicalName.getString())) + ? baseTableLogicalName + : getPhysicalName()); + } + + @Override + public PName getBaseTableLogicalName() { + PName result = null; + if (baseTableLogicalName != null && !Strings.isNullOrEmpty(baseTableLogicalName.getString())) { + result = baseTableLogicalName; + } else { + if (parentName != null) { + result = parentName; + } else { + if (type == PTableType.VIEW) { + result = getPhysicalName(); + } else if (type == PTableType.INDEX) { + result = SchemaUtil.getTableName(parentSchemaName, parentTableName); + } + } + } + return result; + } + + @Override + public synchronized TransformMaintainer getTransformMaintainer(PTable oldTable, + PhoenixConnection connection) { + if (transformMaintainer == null) { + transformMaintainer = TransformMaintainer.create(oldTable, this, connection); + } + return transformMaintainer; + } + + @Override + public synchronized IndexMaintainer getIndexMaintainer(PTable dataTable, + PhoenixConnection connection) throws SQLException { + return getIndexMaintainer(dataTable, null, connection); + } + + @Override + public synchronized IndexMaintainer getIndexMaintainer(PTable dataTable, PTable cdcTable, + PhoenixConnection connection) throws SQLException { + if (indexMaintainer == null) { + indexMaintainer = IndexMaintainer.create(dataTable, cdcTable, this, connection); + } + return indexMaintainer; + } + + @Override + public synchronized boolean getIndexMaintainers(ImmutableBytesWritable ptr, + PhoenixConnection connection) throws SQLException { + if (indexMaintainersPtr == null || indexMaintainersPtr.getLength() == 0) { + indexMaintainersPtr = new ImmutableBytesWritable(); + if (indexes.isEmpty() && transformingNewTable == null) { + indexMaintainersPtr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } else { + IndexMaintainer.serialize(this, indexMaintainersPtr, connection); + } + } + ptr.set(indexMaintainersPtr.get(), indexMaintainersPtr.getOffset(), + indexMaintainersPtr.getLength()); + return indexMaintainersPtr.getLength() > 0; + } + + @Override + public PName getPhysicalName() { + // For views, physicalName is base table physical name. There might be a case where the Phoenix + // table is pointing to another physical table. + // In that case, physicalTableName is not null + if (physicalNames.isEmpty()) { + if ( + physicalTableNameColumnInSyscat != null + && !Strings.isNullOrEmpty(physicalTableNameColumnInSyscat.getString()) + ) { + return SchemaUtil.getPhysicalHBaseTableName(schemaName, physicalTableNameColumnInSyscat, + isNamespaceMapped); + } + return SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped); + } else { + return PNameFactory.newName(physicalNames.get(0).getBytes()); + } + } + + @Override + public PName getPhysicalName(boolean returnColValueFromSyscat) { + if (returnColValueFromSyscat) { + return physicalTableNameColumnInSyscat; + } else { + return getPhysicalName(); + } + } + + @Override + public List getPhysicalNames() { + return !physicalNames.isEmpty() ? physicalNames : Lists.newArrayList(getPhysicalName()); + } + + @Override + public PName getDefaultFamilyName() { + return defaultFamilyName; + } + + @Override + public String getViewStatement() { + return viewStatement; + } + + @Override + public boolean isWALDisabled() { + return disableWAL; + } + + @Override + public Long getViewIndexId() { + return viewIndexId; + } + + @Override + public PDataType getviewIndexIdType() { + return viewIndexIdType; + } + + @Override + public PName getTenantId() { + return tenantId; + } + + @Override + public IndexType getIndexType() { + return indexType; + } + + /** + * Construct a PTable instance from ProtoBuffered PTable instance + */ + public static PTable createFromProto(PTableProtos.PTable table) { + if (table == null) return null; + PName tenantId = null; + if (table.hasTenantId()) { + tenantId = PNameFactory.newName(table.getTenantId().toByteArray()); + } + PName schemaName = PNameFactory.newName(table.getSchemaNameBytes().toByteArray()); + PName tableName = PNameFactory.newName(table.getTableNameBytes().toByteArray()); + PName physicalTableName = null; + if (table.getPhysicalTableNameBytes() != null) { + physicalTableName = PNameFactory.newName(table.getPhysicalTableNameBytes().toByteArray()); + } + PTableType tableType = PTableType.values()[table.getTableType().ordinal()]; + PIndexState indexState = null; + if (table.hasIndexState()) { + indexState = PIndexState.fromSerializedValue(table.getIndexState()); + } + Long viewIndexId = null; + if (table.hasViewIndexId()) { + viewIndexId = table.getViewIndexId(); + } + PDataType viewIndexIdType = table.hasViewIndexIdType() + ? PDataType.fromTypeId(table.getViewIndexIdType()) + : MetaDataUtil.getLegacyViewIndexIdDataType(); + IndexType indexType = IndexType.getDefault(); + if (table.hasIndexType()) { + indexType = IndexType.fromSerializedValue(table.getIndexType().toByteArray()[0]); + } + long sequenceNumber = table.getSequenceNumber(); + long timeStamp = table.getTimeStamp(); + long indexDisableTimestamp = table.getIndexDisableTimestamp(); + PName pkName = null; + if (table.hasPkNameBytes()) { + pkName = PNameFactory.newName(table.getPkNameBytes().toByteArray()); + } + int bucketNum = table.getBucketNum(); + List columns = Lists.newArrayListWithExpectedSize(table.getColumnsCount()); + for (PTableProtos.PColumn curPColumnProto : table.getColumnsList()) { + columns.add(PColumnImpl.createFromProto(curPColumnProto)); + } + List indexes = Lists.newArrayListWithExpectedSize(table.getIndexesCount()); + for (PTableProtos.PTable curPTableProto : table.getIndexesList()) { + indexes.add(createFromProto(curPTableProto)); + } + + PTable transformingNewTable = null; + if (table.hasTransformingNewTable()) { + PTableProtos.PTable curTransformingPTableProto = table.getTransformingNewTable(); + transformingNewTable = createFromProto(curTransformingPTableProto); + } + boolean isImmutableRows = table.getIsImmutableRows(); + PName parentSchemaName = null; + PName parentTableName = null; + PName parentLogicalName = null; + if (table.hasParentNameBytes()) { + parentSchemaName = PNameFactory + .newName(SchemaUtil.getSchemaNameFromFullName((table.getParentNameBytes().toByteArray()))); + parentTableName = PNameFactory + .newName(SchemaUtil.getTableNameFromFullName(table.getParentNameBytes().toByteArray())); + } + if (table.getBaseTableLogicalNameBytes() != null) { + parentLogicalName = PNameFactory.newName(table.getBaseTableLogicalNameBytes().toByteArray()); + } + PName defaultFamilyName = null; + if (table.hasDefaultFamilyName()) { + defaultFamilyName = PNameFactory.newName(table.getDefaultFamilyName().toByteArray()); + } + boolean disableWAL = table.getDisableWAL(); + boolean multiTenant = table.getMultiTenant(); + boolean storeNulls = table.getStoreNulls(); + TransactionFactory.Provider transactionProvider = null; + if (table.hasTransactionProvider()) { + transactionProvider = TransactionFactory.Provider.fromCode(table.getTransactionProvider()); + } else if (table.hasTransactional()) { + // For backward compatibility prior to transactionProvider field + transactionProvider = TransactionFactory.Provider.NOTAVAILABLE; + } + ViewType viewType = null; + String viewStatement = null; + if (tableType == PTableType.VIEW) { + viewType = ViewType.fromSerializedValue(table.getViewType().toByteArray()[0]); + } + if (table.hasViewStatement()) { + viewStatement = (String) PVarchar.INSTANCE.toObject(table.getViewStatement().toByteArray()); + } + List physicalNames = Lists.newArrayListWithExpectedSize(table.getPhysicalNamesCount()); + for (int i = 0; i < table.getPhysicalNamesCount(); i++) { + physicalNames.add(PNameFactory.newName(table.getPhysicalNames(i).toByteArray())); + } + int baseColumnCount = -1; + if (table.hasBaseColumnCount()) { + baseColumnCount = table.getBaseColumnCount(); + } + + boolean rowKeyOrderOptimizable = false; + if (table.hasRowKeyOrderOptimizable()) { + rowKeyOrderOptimizable = table.getRowKeyOrderOptimizable(); + } + long updateCacheFrequency = 0; + if (table.hasUpdateCacheFrequency()) { + updateCacheFrequency = table.getUpdateCacheFrequency(); + } + boolean isNamespaceMapped = false; + if (table.hasIsNamespaceMapped()) { + isNamespaceMapped = table.getIsNamespaceMapped(); + } + String autoPartitionSeqName = null; + if (table.hasAutoParititonSeqName()) { + autoPartitionSeqName = table.getAutoParititonSeqName(); + } + boolean isAppendOnlySchema = false; + if (table.hasIsAppendOnlySchema()) { + isAppendOnlySchema = table.getIsAppendOnlySchema(); + } + // For backward compatibility. Clients older than 4.10 will always have non-encoded immutable + // tables. + ImmutableStorageScheme storageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN; + if (table.hasStorageScheme()) { + storageScheme = + ImmutableStorageScheme.fromSerializedValue(table.getStorageScheme().toByteArray()[0]); + } + // For backward compatibility. Clients older than 4.10 will always have non-encoded qualifiers. + QualifierEncodingScheme qualifierEncodingScheme = + QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; + if (table.hasEncodingScheme()) { + qualifierEncodingScheme = + QualifierEncodingScheme.fromSerializedValue(table.getEncodingScheme().toByteArray()[0]); + } + EncodedCQCounter encodedColumnQualifierCounter; + if ( + (!EncodedColumnsUtil.usesEncodedColumnNames(qualifierEncodingScheme) + || tableType == PTableType.VIEW) + ) { + encodedColumnQualifierCounter = PTable.EncodedCQCounter.NULL_COUNTER; + } else { + encodedColumnQualifierCounter = new EncodedCQCounter(); + if (table.getEncodedCQCountersList() != null) { + for (org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounter cqCounterFromProto : table + .getEncodedCQCountersList()) { + encodedColumnQualifierCounter.setValue(cqCounterFromProto.getColFamily(), + cqCounterFromProto.getCounter()); + } + } + } + Boolean useStatsForParallelization = null; + if (table.hasUseStatsForParallelization()) { + useStatsForParallelization = table.getUseStatsForParallelization(); + } + + // for older clients just use the value of the properties that are set on the view + boolean viewModifiedUpdateCacheFrequency = true; + boolean viewModifiedUseStatsForParallelization = true; + if (table.hasViewModifiedUpdateCacheFrequency()) { + viewModifiedUpdateCacheFrequency = table.getViewModifiedUpdateCacheFrequency(); + } + if (table.hasViewModifiedUseStatsForParallelization()) { + viewModifiedUseStatsForParallelization = table.getViewModifiedUseStatsForParallelization(); + } + Long lastDDLTimestamp = null; + if (table.hasLastDDLTimestamp()) { + lastDDLTimestamp = table.getLastDDLTimestamp(); + } + boolean isChangeDetectionEnabled = false; + if (table.hasChangeDetectionEnabled()) { + isChangeDetectionEnabled = table.getChangeDetectionEnabled(); + } + String schemaVersion = null; + if (table.hasSchemaVersion()) { + schemaVersion = (String) PVarchar.INSTANCE.toObject(table.getSchemaVersion().toByteArray()); + } + String externalSchemaId = null; + if (table.hasExternalSchemaId()) { + externalSchemaId = + (String) PVarchar.INSTANCE.toObject(table.getExternalSchemaId().toByteArray()); + } + String streamingTopicName = null; + if (table.hasStreamingTopicName()) { + streamingTopicName = + (String) PVarchar.INSTANCE.toObject(table.getStreamingTopicName().toByteArray()); + } + String indexWhere = null; + if (table.hasIndexWhere()) { + indexWhere = (String) PVarchar.INSTANCE.toObject(table.getIndexWhere().toByteArray()); + } + Long maxLookbackAge = null; + if (table.hasMaxLookbackAge()) { + maxLookbackAge = table.getMaxLookbackAge(); + } + String cdcIncludeScopesStr = null; + if (table.hasCDCIncludeScopes()) { + cdcIncludeScopesStr = table.getCDCIncludeScopes(); + } + + Integer ttl = TTL_NOT_DEFINED; + if (table.hasTtl()) { + String ttlStr = (String) PVarchar.INSTANCE.toObject(table.getTtl().toByteArray()); + ttl = Integer.parseInt(ttlStr); + } + + byte[] rowKeyMatcher = null; + if (table.hasRowKeyMatcher()) { + rowKeyMatcher = table.getRowKeyMatcher().toByteArray(); + } + + try { + return new PTableImpl.Builder().setType(tableType).setState(indexState) + .setTimeStamp(timeStamp).setIndexDisableTimestamp(indexDisableTimestamp) + .setSequenceNumber(sequenceNumber).setImmutableRows(isImmutableRows) + .setViewStatement(viewStatement).setDisableWAL(disableWAL).setMultiTenant(multiTenant) + .setStoreNulls(storeNulls).setViewType(viewType).setViewIndexIdType(viewIndexIdType) + .setViewIndexId(viewIndexId).setIndexType(indexType) + .setTransactionProvider(transactionProvider).setUpdateCacheFrequency(updateCacheFrequency) + .setNamespaceMapped(isNamespaceMapped).setAutoPartitionSeqName(autoPartitionSeqName) + .setAppendOnlySchema(isAppendOnlySchema) + // null check for backward compatibility and sanity. If any of the two below is null, + // then it means the table is a non-encoded table. + .setImmutableStorageScheme( + storageScheme == null ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN : storageScheme) + .setQualifierEncodingScheme(qualifierEncodingScheme == null + ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + : qualifierEncodingScheme) + .setBaseColumnCount(baseColumnCount).setEncodedCQCounter(encodedColumnQualifierCounter) + .setUseStatsForParallelization(useStatsForParallelization) + .setExcludedColumns(ImmutableList.of()).setTenantId(tenantId).setSchemaName(schemaName) + .setTableName(tableName).setPhysicalTableName(physicalTableName).setPkName(pkName) + .setDefaultFamilyName(defaultFamilyName).setRowKeyOrderOptimizable(rowKeyOrderOptimizable) + .setBucketNum((bucketNum == NO_SALTING) ? null : bucketNum) + .setIndexes(indexes == null ? Collections.emptyList() : indexes) + .setTransformingNewTable(transformingNewTable).setParentSchemaName(parentSchemaName) + .setParentTableName(parentTableName).setBaseTableLogicalName(parentLogicalName) + .setPhysicalNames( + physicalNames == null ? ImmutableList.of() : ImmutableList.copyOf(physicalNames)) + .setColumns(columns).setViewModifiedUpdateCacheFrequency(viewModifiedUpdateCacheFrequency) + .setViewModifiedUseStatsForParallelization(viewModifiedUseStatsForParallelization) + .setLastDDLTimestamp(lastDDLTimestamp).setIsChangeDetectionEnabled(isChangeDetectionEnabled) + .setSchemaVersion(schemaVersion).setExternalSchemaId(externalSchemaId) + .setStreamingTopicName(streamingTopicName) + .setCDCIncludeScopes(CDCUtil.makeChangeScopeEnumsFromString(cdcIncludeScopesStr)) + .setIndexWhere(indexWhere).setMaxLookbackAge(maxLookbackAge).setTTL(ttl) + .setRowKeyMatcher(rowKeyMatcher).build(); + } catch (SQLException e) { + throw new RuntimeException(e); // Impossible + } + } + + public static PTableProtos.PTable toProto(PTable table) { + PTableProtos.PTable.Builder builder = PTableProtos.PTable.newBuilder(); + if (table.getTenantId() != null) { + builder.setTenantId(ByteStringer.wrap(table.getTenantId().getBytes())); + } + builder.setSchemaNameBytes(ByteStringer.wrap(table.getSchemaName().getBytes())); + builder.setTableNameBytes(ByteStringer.wrap(table.getTableName().getBytes())); + if (table.getPhysicalName(true) == null) { + builder.setPhysicalTableNameBytes(ByteStringer.wrap(table.getTableName().getBytes())); + } else { + builder.setPhysicalTableNameBytes(ByteStringer.wrap(table.getPhysicalName(true).getBytes())); + } + builder.setTableType(ProtobufUtil.toPTableTypeProto(table.getType())); + if (table.getIndexState() != null) { + builder.setIndexState(table.getIndexState().getSerializedValue()); + } + if (table.getType() == PTableType.INDEX) { + if (table.getViewIndexId() != null) { + builder.setViewIndexId(table.getViewIndexId()); + builder.setViewIndexIdType(table.getviewIndexIdType().getSqlType()); + } + if (table.getIndexType() != null) { + builder.setIndexType( + ByteStringer.wrap(new byte[] { table.getIndexType().getSerializedValue() })); + } + } + builder.setSequenceNumber(table.getSequenceNumber()); + builder.setTimeStamp(table.getTimeStamp()); + PName tmp = table.getPKName(); + if (tmp != null) { + builder.setPkNameBytes(ByteStringer.wrap(tmp.getBytes())); + } + Integer bucketNum = table.getBucketNum(); + int offset = 0; + if (bucketNum == null) { + builder.setBucketNum(NO_SALTING); + } else { + offset = 1; + builder.setBucketNum(bucketNum); + } + List columns = table.getColumns(); + int columnSize = columns.size(); + for (int i = offset; i < columnSize; i++) { + PColumn column = columns.get(i); + builder.addColumns(PColumnImpl.toProto(column)); + } + List indexes = table.getIndexes(); + for (PTable curIndex : indexes) { + builder.addIndexes(toProto(curIndex)); + } + PTable transformingNewTable = table.getTransformingNewTable(); + if (transformingNewTable != null) { + builder.setTransformingNewTable(toProto(transformingNewTable)); + } + builder.setIsImmutableRows(table.isImmutableRows()); + // TODO remove this field in 5.0 release + if (table.getParentName() != null) { + builder.setDataTableNameBytes(ByteStringer.wrap(table.getParentTableName().getBytes())); + } + if (table.getParentName() != null) { + builder.setParentNameBytes(ByteStringer.wrap(table.getParentName().getBytes())); + } + if (table.getBaseTableLogicalName() != null) { + builder.setBaseTableLogicalNameBytes( + ByteStringer.wrap(table.getBaseTableLogicalName().getBytes())); + } + if (table.getDefaultFamilyName() != null) { + builder.setDefaultFamilyName(ByteStringer.wrap(table.getDefaultFamilyName().getBytes())); + } + builder.setDisableWAL(table.isWALDisabled()); + builder.setMultiTenant(table.isMultiTenant()); + builder.setStoreNulls(table.getStoreNulls()); + if (table.getTransactionProvider() != null) { + builder.setTransactionProvider(table.getTransactionProvider().getCode()); + } + if (table.getType() == PTableType.VIEW) { + builder + .setViewType(ByteStringer.wrap(new byte[] { table.getViewType().getSerializedValue() })); + } + if (table.getViewStatement() != null) { + builder + .setViewStatement(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getViewStatement()))); + } + for (int i = 0; i < table.getPhysicalNames().size(); i++) { + builder.addPhysicalNames(ByteStringer.wrap(table.getPhysicalNames().get(i).getBytes())); + } + builder.setBaseColumnCount(table.getBaseColumnCount()); + builder.setRowKeyOrderOptimizable(table.rowKeyOrderOptimizable()); + builder.setUpdateCacheFrequency(table.getUpdateCacheFrequency()); + builder.setIndexDisableTimestamp(table.getIndexDisableTimestamp()); + builder.setIsNamespaceMapped(table.isNamespaceMapped()); + if (table.getAutoPartitionSeqName() != null) { + builder.setAutoParititonSeqName(table.getAutoPartitionSeqName()); + } + builder.setIsAppendOnlySchema(table.isAppendOnlySchema()); + if (table.getImmutableStorageScheme() != null) { + builder.setStorageScheme(ByteStringer + .wrap(new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() })); + } + if (table.getEncodedCQCounter() != null) { + Map values = table.getEncodedCQCounter().values(); + for (Entry cqCounter : values.entrySet()) { + org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounter.Builder cqBuilder = + org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounter.newBuilder(); + cqBuilder.setColFamily(cqCounter.getKey()); + cqBuilder.setCounter(cqCounter.getValue()); + builder.addEncodedCQCounters(cqBuilder.build()); + } + } + if (table.getEncodingScheme() != null) { + builder.setEncodingScheme( + ByteStringer.wrap(new byte[] { table.getEncodingScheme().getSerializedMetadataValue() })); + } + if (table.useStatsForParallelization() != null) { + builder.setUseStatsForParallelization(table.useStatsForParallelization()); + } + builder.setViewModifiedUpdateCacheFrequency(table.hasViewModifiedUpdateCacheFrequency()); + builder + .setViewModifiedUseStatsForParallelization(table.hasViewModifiedUseStatsForParallelization()); + if (table.getLastDDLTimestamp() != null) { + builder.setLastDDLTimestamp(table.getLastDDLTimestamp()); + } + builder.setChangeDetectionEnabled(table.isChangeDetectionEnabled()); + if (table.getSchemaVersion() != null) { + builder + .setSchemaVersion(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getSchemaVersion()))); + } + if (table.getExternalSchemaId() != null) { + builder.setExternalSchemaId( + ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getExternalSchemaId()))); + } + if (table.getStreamingTopicName() != null) { + builder.setStreamingTopicName( + ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getStreamingTopicName()))); + } + if (table.getIndexWhere() != null) { + builder.setIndexWhere(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(table.getIndexWhere()))); + } + if (table.getMaxLookbackAge() != null) { + builder.setMaxLookbackAge(table.getMaxLookbackAge()); + } + builder.setCDCIncludeScopes(CDCUtil.makeChangeScopeStringFromEnums( + table.getCDCIncludeScopes() != null ? table.getCDCIncludeScopes() : Collections.EMPTY_SET)); + + builder.setTtl(ByteStringer.wrap(PVarchar.INSTANCE.toBytes(String.valueOf(table.getTTL())))); + + if (table.getRowKeyMatcher() != null) { + builder.setRowKeyMatcher(ByteStringer.wrap(table.getRowKeyMatcher())); + } + return builder.build(); + } + + @Override + public PTableKey getKey() { + return key; + } + + @Override + public PName getParentSchemaName() { + // a view on a table will not have a parent name but will have a physical table name (which is + // the parent) + return (type != PTableType.VIEW || parentName != null) + ? parentSchemaName + : PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(getPhysicalName().getBytes())); + } + + @Override + public TransactionFactory.Provider getTransactionProvider() { + return transactionProvider; + } + + @Override + public final boolean isTransactional() { + return transactionProvider != null; + } + + @Override + public int getBaseColumnCount() { + return baseColumnCount; + } + + @Override + public boolean rowKeyOrderOptimizable() { + return rowKeyOrderOptimizable || !hasColumnsRequiringUpgrade; + } + + @Override + public int getRowTimestampColPos() { + return rowTimestampColPos; + } + + @Override + public boolean isNamespaceMapped() { + return isNamespaceMapped; + } + + @Override + public String getAutoPartitionSeqName() { + return autoPartitionSeqName; + } + + @Override + public boolean isAppendOnlySchema() { + return isAppendOnlySchema; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((key == null) ? 0 : key.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (!(obj instanceof PTable)) return false; + PTable other = (PTable) obj; + if (key == null) { + if (other.getKey() != null) return false; + } else if (!key.equals(other.getKey())) return false; + return true; + } + + @Override + public ImmutableStorageScheme getImmutableStorageScheme() { + return immutableStorageScheme; + } + + @Override + public EncodedCQCounter getEncodedCQCounter() { + return encodedCQCounter; + } + + @Override + public QualifierEncodingScheme getEncodingScheme() { + return qualifierEncodingScheme; + } + + @Override + public Boolean useStatsForParallelization() { + return useStatsForParallelization; + } + + @Override + public int getTTL() { + return ttl; + } + + @Override + public boolean hasViewModifiedUpdateCacheFrequency() { + return viewModifiedPropSet.get(VIEW_MODIFIED_UPDATE_CACHE_FREQUENCY_BIT_SET_POS); + } + + @Override + public boolean hasViewModifiedUseStatsForParallelization() { + return viewModifiedPropSet.get(VIEW_MODIFIED_USE_STATS_FOR_PARALLELIZATION_BIT_SET_POS); + } + + @Override + public Long getLastDDLTimestamp() { + return lastDDLTimestamp; + } + + @Override + public boolean isChangeDetectionEnabled() { + return isChangeDetectionEnabled; + } + + @Override + public String getSchemaVersion() { + return schemaVersion; + } + + @Override + public String getExternalSchemaId() { + return externalSchemaId; + } + + @Override + public String getStreamingTopicName() { + return streamingTopicName; + } + + @Override + public Set getCDCIncludeScopes() { + return cdcIncludeScopes; + } + + @Override + public String getIndexWhere() { + return indexWhere; + } + + @Override + public Long getMaxLookbackAge() { + return maxLookbackAge; + } + + @Override + public Map getAncestorLastDDLTimestampMap() { + return ancestorLastDDLTimestampMap; + } + + private void buildIndexWhereExpression(PhoenixConnection connection) throws SQLException { + PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(connection, + "select * from " + SchemaUtil.getTableName(parentSchemaName, parentTableName).getString() + + " where " + indexWhere); + QueryPlan plan = pstmt.compileQuery(); + ParseNode where = plan.getStatement().getWhere(); + plan.getContext().setResolver(FromCompiler.getResolver(plan.getTableRef())); + indexWhereExpression = transformDNF(where, plan.getContext()); + indexWhereColumns = + Sets.newHashSetWithExpectedSize(plan.getContext().getWhereConditionColumns().size()); + for (Pair column : plan.getContext().getWhereConditionColumns()) { + indexWhereColumns.add(new ColumnReference(column.getFirst(), column.getSecond())); + } + } + + @Override + public Expression getIndexWhereExpression(PhoenixConnection connection) throws SQLException { + if (indexWhereExpression == null && indexWhere != null) { + buildIndexWhereExpression(connection); + } + return indexWhereExpression; + } + + @Override + public Set getIndexWhereColumns(PhoenixConnection connection) + throws SQLException { + if (indexWhereColumns == null && indexWhere != null) { + buildIndexWhereExpression(connection); + } + return indexWhereColumns; + } + + @Override + public byte[] getRowKeyMatcher() { + return rowKeyMatcher; + } + + private static final class KVColumnFamilyQualifier { + @Nonnull + private final String colFamilyName; + @Nonnull + private final byte[] colQualifier; + + public KVColumnFamilyQualifier(String colFamilyName, byte[] colQualifier) { + Preconditions.checkArgument(colFamilyName != null && colQualifier != null, + "None of the arguments, column family name or column qualifier can be null"); + this.colFamilyName = colFamilyName; + this.colQualifier = colQualifier; } @Override public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - return result; + final int prime = 31; + int result = 1; + result = prime * result + colFamilyName.hashCode(); + result = prime * result + Arrays.hashCode(colQualifier); + return result; } @Override public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (! (obj instanceof PTable)) return false; - PTable other = (PTable) obj; - if (key == null) { - if (other.getKey() != null) return false; - } else if (!key.equals(other.getKey())) return false; - return true; - } - - @Override - public ImmutableStorageScheme getImmutableStorageScheme() { - return immutableStorageScheme; - } - - @Override - public EncodedCQCounter getEncodedCQCounter() { - return encodedCQCounter; - } - - @Override - public QualifierEncodingScheme getEncodingScheme() { - return qualifierEncodingScheme; - } - - @Override - public Boolean useStatsForParallelization() { - return useStatsForParallelization; - } - - @Override - public int getTTL() { - return ttl; - } - - @Override public boolean hasViewModifiedUpdateCacheFrequency() { - return viewModifiedPropSet.get(VIEW_MODIFIED_UPDATE_CACHE_FREQUENCY_BIT_SET_POS); - } - - @Override public boolean hasViewModifiedUseStatsForParallelization() { - return viewModifiedPropSet.get(VIEW_MODIFIED_USE_STATS_FOR_PARALLELIZATION_BIT_SET_POS); - } - - @Override - public Long getLastDDLTimestamp() { - return lastDDLTimestamp; - } - - @Override - public boolean isChangeDetectionEnabled() { - return isChangeDetectionEnabled; - } - - @Override - public String getSchemaVersion() { - return schemaVersion; - } - - @Override - public String getExternalSchemaId() { - return externalSchemaId; - } - - @Override - public String getStreamingTopicName() { - return streamingTopicName; - } - - @Override - public Set getCDCIncludeScopes() { - return cdcIncludeScopes; - } - - @Override - public String getIndexWhere() { - return indexWhere; - } - - @Override - public Long getMaxLookbackAge() { - return maxLookbackAge; - } - - @Override - public Map getAncestorLastDDLTimestampMap() { - return ancestorLastDDLTimestampMap; - } - - private void buildIndexWhereExpression(PhoenixConnection connection) throws SQLException { - PhoenixPreparedStatement - pstmt = - new PhoenixPreparedStatement(connection, - "select * from " + SchemaUtil.getTableName(parentSchemaName, parentTableName).getString() + " where " + indexWhere); - QueryPlan plan = pstmt.compileQuery(); - ParseNode where = plan.getStatement().getWhere(); - plan.getContext().setResolver(FromCompiler.getResolver(plan.getTableRef())); - indexWhereExpression = transformDNF(where, plan.getContext()); - indexWhereColumns = - Sets.newHashSetWithExpectedSize(plan.getContext().getWhereConditionColumns().size()); - for (Pair column : plan.getContext().getWhereConditionColumns()) { - indexWhereColumns.add(new ColumnReference(column.getFirst(), column.getSecond())); - } - } - @Override - public Expression getIndexWhereExpression(PhoenixConnection connection) throws SQLException { - if (indexWhereExpression == null && indexWhere != null) { - buildIndexWhereExpression(connection); - } - return indexWhereExpression; - } - - @Override - public Set getIndexWhereColumns(PhoenixConnection connection) - throws SQLException { - if (indexWhereColumns == null && indexWhere != null) { - buildIndexWhereExpression(connection); - } - return indexWhereColumns; - } - - @Override - public byte[] getRowKeyMatcher() { - return rowKeyMatcher; - } - - private static final class KVColumnFamilyQualifier { - @Nonnull - private final String colFamilyName; - @Nonnull - private final byte[] colQualifier; - - public KVColumnFamilyQualifier(String colFamilyName, byte[] colQualifier) { - Preconditions.checkArgument(colFamilyName != null && colQualifier != null, - "None of the arguments, column family name or column qualifier can be null"); - this.colFamilyName = colFamilyName; - this.colQualifier = colQualifier; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + colFamilyName.hashCode(); - result = prime * result + Arrays.hashCode(colQualifier); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - KVColumnFamilyQualifier other = (KVColumnFamilyQualifier) obj; - if (!colFamilyName.equals(other.colFamilyName)) return false; - if (!Arrays.equals(colQualifier, other.colQualifier)) return false; - return true; - } - - } - - @Override - public Map getPropertyValues() { - return Collections.unmodifiableMap(propertyValues); - } - - @Override - public Map getDefaultPropertyValues() { - Map map = new HashMap<>(); - map.put(DISABLE_WAL, String.valueOf(DEFAULT_DISABLE_WAL)); - map.put(IMMUTABLE_ROWS, String.valueOf(DEFAULT_IMMUTABLE_ROWS)); - map.put(TRANSACTION_PROVIDER, DEFAULT_TRANSACTION_PROVIDER); - map.put(IMMUTABLE_STORAGE_SCHEME, DEFAULT_IMMUTABLE_STORAGE_SCHEME); - map.put(COLUMN_ENCODED_BYTES, String.valueOf(DEFAULT_COLUMN_ENCODED_BYTES)); - map.put(UPDATE_CACHE_FREQUENCY, String.valueOf(DEFAULT_UPDATE_CACHE_FREQUENCY)); - map.put(USE_STATS_FOR_PARALLELIZATION, String.valueOf(DEFAULT_USE_STATS_FOR_PARALLELIZATION)); - map.put(TRANSACTIONAL, String.valueOf(DEFAULT_TRANSACTIONAL)); - map.put(MULTI_TENANT, String.valueOf(DEFAULT_MULTI_TENANT)); - map.put(SALT_BUCKETS, String.valueOf(DEFAULT_SALT_BUCKETS)); - map.put(DEFAULT_COLUMN_FAMILY_NAME, String.valueOf(DEFAULT_COLUMN_FAMILY)); - return Collections.unmodifiableMap(map); - } + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + KVColumnFamilyQualifier other = (KVColumnFamilyQualifier) obj; + if (!colFamilyName.equals(other.colFamilyName)) return false; + if (!Arrays.equals(colQualifier, other.colQualifier)) return false; + return true; + } + + } + + @Override + public Map getPropertyValues() { + return Collections.unmodifiableMap(propertyValues); + } + + @Override + public Map getDefaultPropertyValues() { + Map map = new HashMap<>(); + map.put(DISABLE_WAL, String.valueOf(DEFAULT_DISABLE_WAL)); + map.put(IMMUTABLE_ROWS, String.valueOf(DEFAULT_IMMUTABLE_ROWS)); + map.put(TRANSACTION_PROVIDER, DEFAULT_TRANSACTION_PROVIDER); + map.put(IMMUTABLE_STORAGE_SCHEME, DEFAULT_IMMUTABLE_STORAGE_SCHEME); + map.put(COLUMN_ENCODED_BYTES, String.valueOf(DEFAULT_COLUMN_ENCODED_BYTES)); + map.put(UPDATE_CACHE_FREQUENCY, String.valueOf(DEFAULT_UPDATE_CACHE_FREQUENCY)); + map.put(USE_STATS_FOR_PARALLELIZATION, String.valueOf(DEFAULT_USE_STATS_FOR_PARALLELIZATION)); + map.put(TRANSACTIONAL, String.valueOf(DEFAULT_TRANSACTIONAL)); + map.put(MULTI_TENANT, String.valueOf(DEFAULT_MULTI_TENANT)); + map.put(SALT_BUCKETS, String.valueOf(DEFAULT_SALT_BUCKETS)); + map.put(DEFAULT_COLUMN_FAMILY_NAME, String.valueOf(DEFAULT_COLUMN_FAMILY)); + return Collections.unmodifiableMap(map); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableKey.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableKey.java index a0204b85415..07cf435bce3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableKey.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableKey.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,69 +18,71 @@ package org.apache.phoenix.schema; import org.apache.phoenix.query.QueryConstants; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.SchemaUtil; public class PTableKey { - private final PName tenantId; - private final String name; - private final String schemaName; - private final String tableName; - - public PTableKey(PName tenantId, String name) { - Preconditions.checkNotNull(name); - this.tenantId = tenantId; - if (name.indexOf(QueryConstants.NAMESPACE_SEPARATOR) != -1) { - this.name = name.replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); - } else { - this.name = name; - } - this.schemaName = SchemaUtil.getSchemaNameFromFullName(this.name); - this.tableName = SchemaUtil.getTableNameFromFullName(this.name); - } + private final PName tenantId; + private final String name; + private final String schemaName; + private final String tableName; - public PName getTenantId() { - return tenantId; + public PTableKey(PName tenantId, String name) { + Preconditions.checkNotNull(name); + this.tenantId = tenantId; + if (name.indexOf(QueryConstants.NAMESPACE_SEPARATOR) != -1) { + this.name = name.replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); + } else { + this.name = name; } + this.schemaName = SchemaUtil.getSchemaNameFromFullName(this.name); + this.tableName = SchemaUtil.getTableNameFromFullName(this.name); + } - public String getName() { - return name; - } + public PName getTenantId() { + return tenantId; + } - public String getSchemaName() { - return schemaName; - } + public String getName() { + return name; + } - public String getTableName() { - return tableName; - } - - @Override - public String toString() { - return name + ((tenantId == null || tenantId.getBytes().length==0) ? "" : " for " + tenantId.getString()); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((tenantId == null || tenantId.getBytes().length==0) ? 0 : tenantId.hashCode()); - result = prime * result + name.hashCode(); - return result; - } + public String getSchemaName() { + return schemaName; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - PTableKey other = (PTableKey)obj; - if (!name.equals(other.name)) return false; - if (tenantId == null) { - if (other.tenantId != null) return false; - } else if (!tenantId.equals(other.tenantId)) return false; - return true; - } + public String getTableName() { + return tableName; + } + + @Override + public String toString() { + return name + ((tenantId == null || tenantId.getBytes().length == 0) + ? "" + : " for " + tenantId.getString()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((tenantId == null || tenantId.getBytes().length == 0) ? 0 : tenantId.hashCode()); + result = prime * result + name.hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + PTableKey other = (PTableKey) obj; + if (!name.equals(other.name)) return false; + if (tenantId == null) { + if (other.tenantId != null) return false; + } else if (!tenantId.equals(other.tenantId)) return false; + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRef.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRef.java index 79ecf43b660..4716c23dffc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRef.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRef.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,32 +18,32 @@ package org.apache.phoenix.schema; public abstract class PTableRef { - - protected final int estSize; - protected final long createTime; - protected final long resolvedTimeStamp; - - public PTableRef(long createTime, long resolvedTime, int estimatedSize) { - this.estSize = estimatedSize; - this.resolvedTimeStamp = resolvedTime; - this.createTime = createTime; - } - - /** - * Tracks how long this entry has been in the cache - * @return time in milliseconds for how long this entry has been in the cache. - */ - public long getCreateTime() { - return createTime; - } - - public abstract PTable getTable(); - public long getResolvedTimeStamp() { - return resolvedTimeStamp; - } - - public int getEstimatedSize() { - return estSize; - } -} \ No newline at end of file + protected final int estSize; + protected final long createTime; + protected final long resolvedTimeStamp; + + public PTableRef(long createTime, long resolvedTime, int estimatedSize) { + this.estSize = estimatedSize; + this.resolvedTimeStamp = resolvedTime; + this.createTime = createTime; + } + + /** + * Tracks how long this entry has been in the cache + * @return time in milliseconds for how long this entry has been in the cache. + */ + public long getCreateTime() { + return createTime; + } + + public abstract PTable getTable(); + + public long getResolvedTimeStamp() { + return resolvedTimeStamp; + } + + public int getEstimatedSize() { + return estSize; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRefFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRefFactory.java index 14eb23559d1..2d8e47074a3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRefFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRefFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,31 +22,31 @@ import org.apache.phoenix.util.ReadOnlyProps; public class PTableRefFactory { - public PTableRef makePTableRef(PTable table, long lastAccessTime, long resolvedTime) { - return new PTableRefImpl(table, lastAccessTime, resolvedTime, table.getEstimatedSize()); - } + public PTableRef makePTableRef(PTable table, long lastAccessTime, long resolvedTime) { + return new PTableRefImpl(table, lastAccessTime, resolvedTime, table.getEstimatedSize()); + } - public PTableRef makePTableRef(PTableRef tableRef) { - return new PTableRefImpl(tableRef); - } + public PTableRef makePTableRef(PTableRef tableRef) { + return new PTableRefImpl(tableRef); + } - private static final PTableRefFactory INSTANCE = new PTableRefFactory(); + private static final PTableRefFactory INSTANCE = new PTableRefFactory(); - public static enum Encoding { - OBJECT, PROTOBUF - }; + public static enum Encoding { + OBJECT, + PROTOBUF + }; - public static PTableRefFactory getFactory(ReadOnlyProps props) { - String encodingEnumString = - props.get(QueryServices.CLIENT_CACHE_ENCODING, - QueryServicesOptions.DEFAULT_CLIENT_CACHE_ENCODING); - Encoding encoding = Encoding.valueOf(encodingEnumString.toUpperCase()); - switch (encoding) { - case PROTOBUF: - return SerializedPTableRefFactory.getFactory(); - case OBJECT: - default: - return INSTANCE; - } + public static PTableRefFactory getFactory(ReadOnlyProps props) { + String encodingEnumString = props.get(QueryServices.CLIENT_CACHE_ENCODING, + QueryServicesOptions.DEFAULT_CLIENT_CACHE_ENCODING); + Encoding encoding = Encoding.valueOf(encodingEnumString.toUpperCase()); + switch (encoding) { + case PROTOBUF: + return SerializedPTableRefFactory.getFactory(); + case OBJECT: + default: + return INSTANCE; } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRefImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRefImpl.java index db1cdc358d6..20ea515f4cd 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRefImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableRefImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,23 +17,22 @@ */ package org.apache.phoenix.schema; - public class PTableRefImpl extends PTableRef { - - private final PTable table; - - public PTableRefImpl(PTable table, long lastAccessTime, long resolvedTime, int estimatedSize) { - super(lastAccessTime, resolvedTime, estimatedSize); - this.table = table; - } - public PTableRefImpl(PTableRef tableRef) { - super(tableRef.getCreateTime(), tableRef.getResolvedTimeStamp(), tableRef.getEstimatedSize()); - this.table = tableRef.getTable(); - } + private final PTable table; + + public PTableRefImpl(PTable table, long lastAccessTime, long resolvedTime, int estimatedSize) { + super(lastAccessTime, resolvedTime, estimatedSize); + this.table = table; + } + + public PTableRefImpl(PTableRef tableRef) { + super(tableRef.getCreateTime(), tableRef.getResolvedTimeStamp(), tableRef.getEstimatedSize()); + this.table = tableRef.getTable(); + } - @Override - public PTable getTable() { - return table; - } + @Override + public PTable getTable() { + return table; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableType.java index d89f1bd36ae..8266f61bc91 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableType.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/PTableType.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,92 +21,94 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; - public enum PTableType { - SYSTEM("s", "SYSTEM TABLE"), - TABLE("u", "TABLE"), - VIEW("v", "VIEW"), - INDEX("i", "INDEX"), - PROJECTED("p", "PROJECTED"), - CDC("c", "CDC"), - SUBQUERY("q", "SUBQUERY"), - ; + SYSTEM("s", "SYSTEM TABLE"), + TABLE("u", "TABLE"), + VIEW("v", "VIEW"), + INDEX("i", "INDEX"), + PROJECTED("p", "PROJECTED"), + CDC("c", "CDC"), + SUBQUERY("q", "SUBQUERY"),; - private final PName value; - private final String serializedValue; - - private PTableType(String serializedValue, String value) { - this.serializedValue = serializedValue; - this.value = PNameFactory.newName(value); - } - - public String getSerializedValue() { - return serializedValue; - } - - public PName getValue() { - return value; - } - - @Override - public String toString() { - return value.getString(); + private final PName value; + private final String serializedValue; + + private PTableType(String serializedValue, String value) { + this.serializedValue = serializedValue; + this.value = PNameFactory.newName(value); + } + + public String getSerializedValue() { + return serializedValue; + } + + public PName getValue() { + return value; + } + + @Override + public String toString() { + return value.getString(); + } + + private static final PTableType[] FROM_SERIALIZED_VALUE; + private static final int FROM_SERIALIZED_VALUE_OFFSET; + private static final Map FROM_VALUE = + Maps.newHashMapWithExpectedSize(PTableType.values().length); + + static { + int minChar = Integer.MAX_VALUE; + int maxChar = Integer.MIN_VALUE; + for (PTableType type : PTableType.values()) { + char c = type.getSerializedValue().charAt(0); + if (c < minChar) { + minChar = c; + } + if (c > maxChar) { + maxChar = c; + } } - - private static final PTableType[] FROM_SERIALIZED_VALUE; - private static final int FROM_SERIALIZED_VALUE_OFFSET; - private static final Map FROM_VALUE = Maps.newHashMapWithExpectedSize(PTableType.values().length); - - static { - int minChar = Integer.MAX_VALUE; - int maxChar = Integer.MIN_VALUE; - for (PTableType type : PTableType.values()) { - char c = type.getSerializedValue().charAt(0); - if (c < minChar) { - minChar = c; - } - if (c > maxChar) { - maxChar = c; - } - } - FROM_SERIALIZED_VALUE_OFFSET = minChar; - FROM_SERIALIZED_VALUE = new PTableType[maxChar - minChar + 1]; - for (PTableType type : PTableType.values()) { - FROM_SERIALIZED_VALUE[type.getSerializedValue().charAt(0) - minChar] = type; - } + FROM_SERIALIZED_VALUE_OFFSET = minChar; + FROM_SERIALIZED_VALUE = new PTableType[maxChar - minChar + 1]; + for (PTableType type : PTableType.values()) { + FROM_SERIALIZED_VALUE[type.getSerializedValue().charAt(0) - minChar] = type; } - - static { - for (PTableType type : PTableType.values()) { - if (FROM_VALUE.put(type.getValue().getString(),type) != null) { - throw new IllegalStateException("Duplicate PTableType value of " + type.getValue().getString() + " is not allowed"); - } - } + } + + static { + for (PTableType type : PTableType.values()) { + if (FROM_VALUE.put(type.getValue().getString(), type) != null) { + throw new IllegalStateException( + "Duplicate PTableType value of " + type.getValue().getString() + " is not allowed"); + } } - - public static PTableType fromValue(String value) { - PTableType type = FROM_VALUE.get(value); - if (type == null) { - throw new IllegalArgumentException("Unable to PTableType enum for value of '" + value + "'"); - } - return type; + } + + public static PTableType fromValue(String value) { + PTableType type = FROM_VALUE.get(value); + if (type == null) { + throw new IllegalArgumentException("Unable to PTableType enum for value of '" + value + "'"); } - - public static PTableType fromSerializedValue(String serializedValue) { - if (serializedValue.length() == 1) { - int i = serializedValue.charAt(0) - FROM_SERIALIZED_VALUE_OFFSET; - if (i >= 0 && i < FROM_SERIALIZED_VALUE.length && FROM_SERIALIZED_VALUE[i] != null) { - return FROM_SERIALIZED_VALUE[i]; - } - } - throw new IllegalArgumentException("Unable to PTableType enum for serialized value of '" + serializedValue + "'"); + return type; + } + + public static PTableType fromSerializedValue(String serializedValue) { + if (serializedValue.length() == 1) { + int i = serializedValue.charAt(0) - FROM_SERIALIZED_VALUE_OFFSET; + if (i >= 0 && i < FROM_SERIALIZED_VALUE.length && FROM_SERIALIZED_VALUE[i] != null) { + return FROM_SERIALIZED_VALUE[i]; + } } - - public static PTableType fromSerializedValue(byte serializedByte) { - int i = serializedByte - FROM_SERIALIZED_VALUE_OFFSET; - if (i >= 0 && i < FROM_SERIALIZED_VALUE.length && FROM_SERIALIZED_VALUE[i] != null) { - return FROM_SERIALIZED_VALUE[i]; - } - throw new IllegalArgumentException("Unable to PTableType enum for serialized value of '" + (char)serializedByte + "'"); + throw new IllegalArgumentException( + "Unable to PTableType enum for serialized value of '" + serializedValue + "'"); + } + + public static PTableType fromSerializedValue(byte serializedByte) { + int i = serializedByte - FROM_SERIALIZED_VALUE_OFFSET; + if (i >= 0 && i < FROM_SERIALIZED_VALUE.length && FROM_SERIALIZED_VALUE[i] != null) { + return FROM_SERIALIZED_VALUE[i]; } + throw new IllegalArgumentException( + "Unable to PTableType enum for serialized value of '" + (char) serializedByte + "'"); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java index d8759825149..db0fed2d58f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,50 +18,51 @@ package org.apache.phoenix.schema; public class ProjectedColumn extends DelegateColumn { - - private final PName name; - private final PName familyName; - private final int position; - private final boolean nullable; - private final ColumnRef sourceColumnRef; - private final byte[] cq; - public ProjectedColumn(PName name, PName familyName, int position, boolean nullable, ColumnRef sourceColumnRef, byte[] cq) { - super(sourceColumnRef.getColumn()); - this.name = name; - this.familyName = familyName; - this.position = position; - this.nullable = nullable; - this.sourceColumnRef = sourceColumnRef; - this.cq = cq; - } - - @Override - public PName getName() { - return name; - } - - @Override - public PName getFamilyName() { - return familyName; - } - - @Override - public int getPosition() { - return position; - } - - @Override - public boolean isNullable() { - return nullable; - } - - @Override - public byte[] getColumnQualifierBytes() { - return cq; - } - - public ColumnRef getSourceColumnRef() { - return sourceColumnRef; - } + private final PName name; + private final PName familyName; + private final int position; + private final boolean nullable; + private final ColumnRef sourceColumnRef; + private final byte[] cq; + + public ProjectedColumn(PName name, PName familyName, int position, boolean nullable, + ColumnRef sourceColumnRef, byte[] cq) { + super(sourceColumnRef.getColumn()); + this.name = name; + this.familyName = familyName; + this.position = position; + this.nullable = nullable; + this.sourceColumnRef = sourceColumnRef; + this.cq = cq; + } + + @Override + public PName getName() { + return name; + } + + @Override + public PName getFamilyName() { + return familyName; + } + + @Override + public int getPosition() { + return position; + } + + @Override + public boolean isNullable() { + return nullable; + } + + @Override + public byte[] getColumnQualifierBytes() { + return cq; + } + + public ColumnRef getSourceColumnRef() { + return sourceColumnRef; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ReadOnlyTableException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ReadOnlyTableException.java index 71115d8b75c..d4a801f9d03 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ReadOnlyTableException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ReadOnlyTableException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,33 +23,36 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * * Exception thrown when an attempt is made to modify or write to a read-only table. - * - * * @since 0.1 */ public class ReadOnlyTableException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.READ_ONLY_TABLE; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.READ_ONLY_TABLE; - public ReadOnlyTableException(String schemaName, String tableName) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).toString(), - code.getSQLState(), code.getErrorCode()); - } + public ReadOnlyTableException(String schemaName, String tableName) { + super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .toString(), code.getSQLState(), code.getErrorCode()); + } - public ReadOnlyTableException(String message, String schemaName, String tableName) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).setMessage(message).toString(), - code.getSQLState(), code.getErrorCode()); - } + public ReadOnlyTableException(String message, String schemaName, String tableName) { + super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .setMessage(message).toString(), code.getSQLState(), code.getErrorCode()); + } - public ReadOnlyTableException(String message, String schemaName, String tableName, Throwable cause) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).setRootCause(cause).setMessage(message).toString(), - code.getSQLState(), code.getErrorCode(), cause); - } + public ReadOnlyTableException(String message, String schemaName, String tableName, + Throwable cause) { + super( + new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .setRootCause(cause).setMessage(message).toString(), + code.getSQLState(), code.getErrorCode(), cause); + } - public ReadOnlyTableException(String message, String schemaName, String tableName, String familyName) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).setFamilyName(familyName).setMessage(message).toString(), - code.getSQLState(), code.getErrorCode()); - } + public ReadOnlyTableException(String message, String schemaName, String tableName, + String familyName) { + super( + new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .setFamilyName(familyName).setMessage(message).toString(), + code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowKeySchema.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowKeySchema.java index e3241cdc16e..942a9362509 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowKeySchema.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowKeySchema.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,462 +27,467 @@ import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.SchemaUtil; - /** - * - * Schema for the bytes in a RowKey. For the RowKey, we use a null byte - * to terminate a variable length type, while for KeyValue bytes we - * write the length of the var char preceding the value. We can't do - * that for a RowKey because it would affect the sort order. - * - * + * Schema for the bytes in a RowKey. For the RowKey, we use a null byte to terminate a variable + * length type, while for KeyValue bytes we write the length of the var char preceding the value. We + * can't do that for a RowKey because it would affect the sort order. * @since 0.1 */ public class RowKeySchema extends ValueSchema { - public static final RowKeySchema EMPTY_SCHEMA = new RowKeySchema(0,Collections.emptyList(), true) - ; + public static final RowKeySchema EMPTY_SCHEMA = + new RowKeySchema(0, Collections. emptyList(), true); - private static final int SEPARATOR_OFFSET_NON_ENCODED_TYPES = 1; - private static final int SEPARATOR_OFFSET_ENCODED_TYPES = 2; + private static final int SEPARATOR_OFFSET_NON_ENCODED_TYPES = 1; + private static final int SEPARATOR_OFFSET_ENCODED_TYPES = 2; - public RowKeySchema() { - } - - protected RowKeySchema(int minNullable, List fields, boolean rowKeyOrderOptimizable) { - super(minNullable, fields, rowKeyOrderOptimizable); - } + public RowKeySchema() { + } - public static class RowKeySchemaBuilder extends ValueSchemaBuilder { - private boolean rowKeyOrderOptimizable = false; - - public RowKeySchemaBuilder(int maxFields) { - super(maxFields); - setMaxFields(maxFields); - } - - @Override - public RowKeySchemaBuilder addField(PDatum datum, boolean isNullable, SortOrder sortOrder) { - super.addField(datum, isNullable, sortOrder); - return this; - } + protected RowKeySchema(int minNullable, List fields, boolean rowKeyOrderOptimizable) { + super(minNullable, fields, rowKeyOrderOptimizable); + } - public RowKeySchemaBuilder rowKeyOrderOptimizable(boolean rowKeyOrderOptimizable) { - this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; - return this; - } + public static class RowKeySchemaBuilder extends ValueSchemaBuilder { + private boolean rowKeyOrderOptimizable = false; - @Override - public RowKeySchema build() { - List condensedFields = buildFields(); - return new RowKeySchema(this.minNullable, condensedFields, rowKeyOrderOptimizable); - } + public RowKeySchemaBuilder(int maxFields) { + super(maxFields); + setMaxFields(maxFields); } - public boolean rowKeyOrderOptimizable() { - return rowKeyOrderOptimizable; + @Override + public RowKeySchemaBuilder addField(PDatum datum, boolean isNullable, SortOrder sortOrder) { + super.addField(datum, isNullable, sortOrder); + return this; } - public int getMaxFields() { - return this.getMinNullable(); + public RowKeySchemaBuilder rowKeyOrderOptimizable(boolean rowKeyOrderOptimizable) { + this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; + return this; } - // "iterator" initialization methods that initialize a bytes ptr with a row key for further navigation - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="NP_BOOLEAN_RETURN_NULL", - justification="Designed to return null.") - public Boolean iterator(byte[] src, int srcOffset, int srcLength, ImmutableBytesWritable ptr, int position,int extraColumnSpan) { - Boolean hasValue = null; - ptr.set(src, srcOffset, 0); - int maxOffset = srcOffset + srcLength; - for (int i = 0; i < position; i++) { - hasValue = next(ptr, i, maxOffset); - } - if(extraColumnSpan > 0) { - readExtraFields(ptr, position, maxOffset, extraColumnSpan); - } - return hasValue; + @Override + public RowKeySchema build() { + List condensedFields = buildFields(); + return new RowKeySchema(this.minNullable, condensedFields, rowKeyOrderOptimizable); } + } - public Boolean iterator(byte[] src, int srcOffset, int srcLength, ImmutableBytesWritable ptr, int position) { - return iterator(src, srcOffset,srcLength, ptr, position,0); - } - - public Boolean iterator(ImmutableBytesWritable srcPtr, ImmutableBytesWritable ptr, int position) { - return iterator(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength(), ptr, position); - } - - public Boolean iterator(byte[] src, ImmutableBytesWritable ptr, int position) { - return iterator(src, 0, src.length, ptr, position); + public boolean rowKeyOrderOptimizable() { + return rowKeyOrderOptimizable; + } + + public int getMaxFields() { + return this.getMinNullable(); + } + + // "iterator" initialization methods that initialize a bytes ptr with a row key for further + // navigation + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_BOOLEAN_RETURN_NULL", + justification = "Designed to return null.") + public Boolean iterator(byte[] src, int srcOffset, int srcLength, ImmutableBytesWritable ptr, + int position, int extraColumnSpan) { + Boolean hasValue = null; + ptr.set(src, srcOffset, 0); + int maxOffset = srcOffset + srcLength; + for (int i = 0; i < position; i++) { + hasValue = next(ptr, i, maxOffset); } - - public int iterator(byte[] src, int srcOffset, int srcLength, ImmutableBytesWritable ptr) { - int maxOffset = srcOffset + srcLength; - iterator(src, srcOffset, srcLength, ptr, 0); - return maxOffset; + if (extraColumnSpan > 0) { + readExtraFields(ptr, position, maxOffset, extraColumnSpan); } - - public int iterator(byte[] src, ImmutableBytesWritable ptr) { - return iterator(src, 0, src.length, ptr); + return hasValue; + } + + public Boolean iterator(byte[] src, int srcOffset, int srcLength, ImmutableBytesWritable ptr, + int position) { + return iterator(src, srcOffset, srcLength, ptr, position, 0); + } + + public Boolean iterator(ImmutableBytesWritable srcPtr, ImmutableBytesWritable ptr, int position) { + return iterator(srcPtr.get(), srcPtr.getOffset(), srcPtr.getLength(), ptr, position); + } + + public Boolean iterator(byte[] src, ImmutableBytesWritable ptr, int position) { + return iterator(src, 0, src.length, ptr, position); + } + + public int iterator(byte[] src, int srcOffset, int srcLength, ImmutableBytesWritable ptr) { + int maxOffset = srcOffset + srcLength; + iterator(src, srcOffset, srcLength, ptr, 0); + return maxOffset; + } + + public int iterator(byte[] src, ImmutableBytesWritable ptr) { + return iterator(src, 0, src.length, ptr); + } + + public int iterator(ImmutableBytesWritable ptr) { + return iterator(ptr.get(), ptr.getOffset(), ptr.getLength(), ptr); + } + + // navigation methods that "select" different chunks of the row key held in a bytes ptr + + /** + * Move the bytes ptr to the next position in the row key relative to its current position. You + * must have a complete row key. Use @link {@link #position(ImmutableBytesWritable, int, int)} if + * you have a partial row key. + * @param ptr bytes pointer pointing to the value at the positional index provided. + * @param position zero-based index of the next field in the value schema + * @param maxOffset max possible offset value when iterating + * @return true if a value was found and ptr was set, false if the value is null and ptr was not + * set, and null if the value is null and there are no more values + */ + public Boolean next(ImmutableBytesWritable ptr, int position, int maxOffset) { + return next(ptr, position, maxOffset, false); + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_BOOLEAN_RETURN_NULL", + justification = "Designed to return null.") + private Boolean next(ImmutableBytesWritable ptr, int position, int maxOffset, boolean isFirst) { + if (ptr.getOffset() + ptr.getLength() >= maxOffset) { + ptr.set(ptr.get(), maxOffset, 0); + return null; } - - public int iterator(ImmutableBytesWritable ptr) { - return iterator(ptr.get(),ptr.getOffset(),ptr.getLength(), ptr); + if (position >= getFieldCount()) { + return null; } - - // navigation methods that "select" different chunks of the row key held in a bytes ptr - - /** - * Move the bytes ptr to the next position in the row key relative to its current position. You - * must have a complete row key. Use @link {@link #position(ImmutableBytesWritable, int, int)} - * if you have a partial row key. - * @param ptr bytes pointer pointing to the value at the positional index provided. - * @param position zero-based index of the next field in the value schema - * @param maxOffset max possible offset value when iterating - * @return true if a value was found and ptr was set, false if the value is null and ptr was not - * set, and null if the value is null and there are no more values - */ - public Boolean next(ImmutableBytesWritable ptr, int position, int maxOffset) { - return next(ptr, position, maxOffset, false); + // Move the pointer past the current value and set length + // to 0 to ensure you never set the ptr past the end of the + // backing byte array. + ptr.set(ptr.get(), ptr.getOffset() + ptr.getLength(), 0); + // If positioned at SEPARATOR_BYTE, skip it. + // Don't look back at previous fields if this is our first next call, as + // we may have a partial key for RVCs that doesn't include the leading field. + if (position > 0 && !isFirst && !getField(position - 1).getDataType().isFixedWidth()) { + if (getField(position - 1).getDataType() != PVarbinaryEncoded.INSTANCE) { + ptr.set(ptr.get(), ptr.getOffset() + ptr.getLength() + SEPARATOR_OFFSET_NON_ENCODED_TYPES, + 0); + } else { + ptr.set(ptr.get(), ptr.getOffset() + ptr.getLength() + SEPARATOR_OFFSET_ENCODED_TYPES, 0); + } } - - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="NP_BOOLEAN_RETURN_NULL", - justification="Designed to return null.") - private Boolean next(ImmutableBytesWritable ptr, int position, int maxOffset, boolean isFirst) { - if (ptr.getOffset() + ptr.getLength() >= maxOffset) { - ptr.set(ptr.get(), maxOffset, 0); - return null; - } - if (position >= getFieldCount()) { - return null; - } - // Move the pointer past the current value and set length - // to 0 to ensure you never set the ptr past the end of the - // backing byte array. - ptr.set(ptr.get(), ptr.getOffset() + ptr.getLength(), 0); - // If positioned at SEPARATOR_BYTE, skip it. - // Don't look back at previous fields if this is our first next call, as - // we may have a partial key for RVCs that doesn't include the leading field. - if (position > 0 && !isFirst && !getField(position - 1).getDataType().isFixedWidth()) { - if (getField(position - 1).getDataType() != PVarbinaryEncoded.INSTANCE) { - ptr.set(ptr.get(), - ptr.getOffset() + ptr.getLength() + SEPARATOR_OFFSET_NON_ENCODED_TYPES, 0); - } else { - ptr.set(ptr.get(), - ptr.getOffset() + ptr.getLength() + SEPARATOR_OFFSET_ENCODED_TYPES, 0); - } + Field field = this.getField(position); + if (field.getDataType().isFixedWidth()) { + // It is possible that the number of remaining row key bytes are less than the fixed + // width size. See PHOENIX-3968. + ptr.set(ptr.get(), ptr.getOffset(), + Math.min(maxOffset - ptr.getOffset(), field.getByteSize())); + } else { + if (position + 1 == getFieldCount()) { + // Last field has no terminator unless it's descending sort order + int len = maxOffset - ptr.getOffset(); + if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { + ptr.set(ptr.get(), ptr.getOffset(), + maxOffset - ptr.getOffset() + - (SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, len == 0, field) + == QueryConstants.DESC_SEPARATOR_BYTE ? 1 : 0)); + } else { + boolean ascSort = + !rowKeyOrderOptimizable || len == 0 || field.getSortOrder() == SortOrder.ASC; + int lastFieldTerminatorLen = ascSort ? 0 : 2; + ptr.set(ptr.get(), ptr.getOffset(), maxOffset - ptr.getOffset() - lastFieldTerminatorLen); } - Field field = this.getField(position); - if (field.getDataType().isFixedWidth()) { - // It is possible that the number of remaining row key bytes are less than the fixed - // width size. See PHOENIX-3968. - ptr.set(ptr.get(), ptr.getOffset(), - Math.min(maxOffset - ptr.getOffset(), field.getByteSize())); + } else { + byte[] buf = ptr.get(); + int offset = ptr.getOffset(); + if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { + if (offset < maxOffset && buf[offset] != QueryConstants.SEPARATOR_BYTE) { + byte sepByte = SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, false, field); + do { + offset++; + } while (offset < maxOffset && buf[offset] != sepByte); + } + ptr.set(buf, ptr.getOffset(), offset - ptr.getOffset()); } else { - if (position + 1 == getFieldCount()) { - // Last field has no terminator unless it's descending sort order - int len = maxOffset - ptr.getOffset(); - if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { - ptr.set(ptr.get(), ptr.getOffset(), maxOffset - ptr.getOffset() - ( - SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, len == 0, field) - == QueryConstants.DESC_SEPARATOR_BYTE ? 1 : 0)); - } else { - boolean ascSort = !rowKeyOrderOptimizable || len == 0 - || field.getSortOrder() == SortOrder.ASC; - int lastFieldTerminatorLen = ascSort ? 0 : 2; - ptr.set(ptr.get(), ptr.getOffset(), - maxOffset - ptr.getOffset() - lastFieldTerminatorLen); - } - } else { - byte[] buf = ptr.get(); - int offset = ptr.getOffset(); - if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { - if (offset < maxOffset && buf[offset] != QueryConstants.SEPARATOR_BYTE) { - byte sepByte = - SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, false, field); - do { - offset++; - } while (offset < maxOffset && buf[offset] != sepByte); - } - ptr.set(buf, ptr.getOffset(), offset - ptr.getOffset()); - } else { - if (offset < maxOffset && !SchemaUtil - .areSeparatorBytesForVarBinaryEncoded(buf, offset, SortOrder.ASC)) { - boolean ascSort = - !rowKeyOrderOptimizable || field.getSortOrder() == SortOrder.ASC; - do { - offset++; - } while (offset < maxOffset - && !SchemaUtil.areSeparatorBytesForVarBinaryEncoded(buf, offset, - ascSort ? SortOrder.ASC : SortOrder.DESC)); - } - ptr.set(buf, ptr.getOffset(), offset - ptr.getOffset()); - } - } + if ( + offset < maxOffset + && !SchemaUtil.areSeparatorBytesForVarBinaryEncoded(buf, offset, SortOrder.ASC) + ) { + boolean ascSort = !rowKeyOrderOptimizable || field.getSortOrder() == SortOrder.ASC; + do { + offset++; + } while ( + offset < maxOffset && !SchemaUtil.areSeparatorBytesForVarBinaryEncoded(buf, offset, + ascSort ? SortOrder.ASC : SortOrder.DESC) + ); + } + ptr.set(buf, ptr.getOffset(), offset - ptr.getOffset()); } - return ptr.getLength() > 0; + } } + return ptr.getLength() > 0; + } - /** - * Like {@link #next(org.apache.hadoop.hbase.io.ImmutableBytesWritable, int, int)}, but also - * includes the next {@code extraSpan} additional fields in the bytes ptr. - * This allows multiple fields to be treated as one concatenated whole. - * @param ptr bytes pointer pointing to the value at the positional index provided. - * @param position zero-based index of the next field in the value schema - * @param maxOffset max possible offset value when iterating - * @param extraSpan the number of extra fields to expand the ptr to contain - * @return true if a value was found and ptr was set, false if the value is null and ptr was not - * set, and null if the value is null and there are no more values - */ - public int next(ImmutableBytesWritable ptr, int position, int maxOffset, int extraSpan) { - if (next(ptr, position, maxOffset) == null) { - return position-1; - } - return readExtraFields(ptr, position + 1, maxOffset, extraSpan); + /** + * Like {@link #next(org.apache.hadoop.hbase.io.ImmutableBytesWritable, int, int)}, but also + * includes the next {@code extraSpan} additional fields in the bytes ptr. This allows multiple + * fields to be treated as one concatenated whole. + * @param ptr bytes pointer pointing to the value at the positional index provided. + * @param position zero-based index of the next field in the value schema + * @param maxOffset max possible offset value when iterating + * @param extraSpan the number of extra fields to expand the ptr to contain + * @return true if a value was found and ptr was set, false if the value is null and ptr was not + * set, and null if the value is null and there are no more values + */ + public int next(ImmutableBytesWritable ptr, int position, int maxOffset, int extraSpan) { + if (next(ptr, position, maxOffset) == null) { + return position - 1; } - - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="NP_BOOLEAN_RETURN_NULL", - justification="Designed to return null.") - public Boolean previous(ImmutableBytesWritable ptr, int position, int minOffset) { - if (position < 0) { - return null; - } - Field field = this.getField(position); - if (field.getDataType().isFixedWidth()) { - ptr.set(ptr.get(), ptr.getOffset()-field.getByteSize(), field.getByteSize()); - return true; - } - // If ptr has length of zero, it is assumed that we're at the end of the row key - int offsetAdjustment = position + 1 == this.getFieldCount() || ptr.getLength() == 0 ? 0 : 1; - if (position == 0) { - ptr.set(ptr.get(), minOffset, ptr.getOffset() - minOffset - offsetAdjustment); - return true; - } - field = this.getField(position-1); - // Field before the one we want to position at is variable length - // In this case, we can search backwards for our separator byte - // to determine the length - if (!field.getDataType().isFixedWidth()) { - byte[] buf = ptr.get(); - int offset = ptr.getOffset()-1-offsetAdjustment; - // Separator always zero byte if zero length - if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { - if (offset > minOffset && buf[offset] != QueryConstants.SEPARATOR_BYTE) { - byte sepByte = - SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, false, field); - do { - offset--; - } while (offset > minOffset && buf[offset] != sepByte); - } - } else { - if (offset > minOffset && SchemaUtil.areSeparatorBytesForVarBinaryEncoded(buf, - offset, SortOrder.ASC)) { - boolean ascSort = - !rowKeyOrderOptimizable || field.getSortOrder() == SortOrder.ASC; - do { - offset--; - } while (offset > minOffset && !SchemaUtil.areSeparatorBytesForVarBinaryEncoded( - buf, offset, ascSort ? SortOrder.ASC : SortOrder.DESC)); - } - } - if (offset == minOffset) { // shouldn't happen - ptr.set(buf, minOffset, ptr.getOffset()-minOffset-1); - } else { - ptr.set(buf,offset+1,ptr.getOffset()-1-offsetAdjustment-offset); // Don't include null terminator in length - } - return true; - } - int i,fixedOffset = field.getByteSize(); - for (i = position-2; i >= 0 && this.getField(i).getDataType().isFixedWidth(); i--) { - fixedOffset += this.getField(i).getByteSize(); + return readExtraFields(ptr, position + 1, maxOffset, extraSpan); + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_BOOLEAN_RETURN_NULL", + justification = "Designed to return null.") + public Boolean previous(ImmutableBytesWritable ptr, int position, int minOffset) { + if (position < 0) { + return null; + } + Field field = this.getField(position); + if (field.getDataType().isFixedWidth()) { + ptr.set(ptr.get(), ptr.getOffset() - field.getByteSize(), field.getByteSize()); + return true; + } + // If ptr has length of zero, it is assumed that we're at the end of the row key + int offsetAdjustment = position + 1 == this.getFieldCount() || ptr.getLength() == 0 ? 0 : 1; + if (position == 0) { + ptr.set(ptr.get(), minOffset, ptr.getOffset() - minOffset - offsetAdjustment); + return true; + } + field = this.getField(position - 1); + // Field before the one we want to position at is variable length + // In this case, we can search backwards for our separator byte + // to determine the length + if (!field.getDataType().isFixedWidth()) { + byte[] buf = ptr.get(); + int offset = ptr.getOffset() - 1 - offsetAdjustment; + // Separator always zero byte if zero length + if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { + if (offset > minOffset && buf[offset] != QueryConstants.SEPARATOR_BYTE) { + byte sepByte = SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, false, field); + do { + offset--; + } while (offset > minOffset && buf[offset] != sepByte); } - // All of the previous fields are fixed width, so we can calculate the offset - // based on the total fixed offset - if (i < 0) { - int length = ptr.getOffset() - fixedOffset - minOffset - offsetAdjustment; - ptr.set(ptr.get(),minOffset+fixedOffset, length); - return true; + } else { + if ( + offset > minOffset + && SchemaUtil.areSeparatorBytesForVarBinaryEncoded(buf, offset, SortOrder.ASC) + ) { + boolean ascSort = !rowKeyOrderOptimizable || field.getSortOrder() == SortOrder.ASC; + do { + offset--; + } while ( + offset > minOffset && !SchemaUtil.areSeparatorBytesForVarBinaryEncoded(buf, offset, + ascSort ? SortOrder.ASC : SortOrder.DESC) + ); } - // Otherwise we're stuck with starting from the minOffset and working all the way forward, - // because we can't infer the length of the previous position. - return iterator(ptr.get(), minOffset, ptr.getOffset() - minOffset - offsetAdjustment, ptr, position+1); + } + if (offset == minOffset) { // shouldn't happen + ptr.set(buf, minOffset, ptr.getOffset() - minOffset - 1); + } else { + ptr.set(buf, offset + 1, ptr.getOffset() - 1 - offsetAdjustment - offset); // Don't include + // null + // terminator in + // length + } + return true; + } + int i, fixedOffset = field.getByteSize(); + for (i = position - 2; i >= 0 && this.getField(i).getDataType().isFixedWidth(); i--) { + fixedOffset += this.getField(i).getByteSize(); + } + // All of the previous fields are fixed width, so we can calculate the offset + // based on the total fixed offset + if (i < 0) { + int length = ptr.getOffset() - fixedOffset - minOffset - offsetAdjustment; + ptr.set(ptr.get(), minOffset + fixedOffset, length); + return true; } + // Otherwise we're stuck with starting from the minOffset and working all the way forward, + // because we can't infer the length of the previous position. + return iterator(ptr.get(), minOffset, ptr.getOffset() - minOffset - offsetAdjustment, ptr, + position + 1); + } - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="NP_BOOLEAN_RETURN_NULL", - justification="Designed to return null.") - public Boolean reposition(ImmutableBytesWritable ptr, int oldPosition, int newPosition, int minOffset, int maxOffset) { - if (newPosition == oldPosition) { - return ptr.getLength() > 0; + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_BOOLEAN_RETURN_NULL", + justification = "Designed to return null.") + public Boolean reposition(ImmutableBytesWritable ptr, int oldPosition, int newPosition, + int minOffset, int maxOffset) { + if (newPosition == oldPosition) { + return ptr.getLength() > 0; + } + Boolean hasValue = null; + if (newPosition > oldPosition) { + do { + hasValue = next(ptr, ++oldPosition, maxOffset); + } while (hasValue != null && oldPosition < newPosition); + } else { + int nVarLengthFromBeginning = 0; + for (int i = 0; i <= newPosition; i++) { + if (!this.getField(i).getDataType().isFixedWidth()) { + nVarLengthFromBeginning++; } - Boolean hasValue = null; - if (newPosition > oldPosition) { - do { - hasValue = next(ptr, ++oldPosition, maxOffset); - } while (hasValue != null && oldPosition < newPosition) ; - } else { - int nVarLengthFromBeginning = 0; - for (int i = 0; i <= newPosition; i++) { - if (!this.getField(i).getDataType().isFixedWidth()) { - nVarLengthFromBeginning++; - } - } - int nVarLengthBetween = 0; - for (int i = oldPosition - 1; i >= newPosition; i--) { - if (!this.getField(i).getDataType().isFixedWidth()) { - nVarLengthBetween++; - } - } - if (nVarLengthBetween > nVarLengthFromBeginning) { - return iterator(ptr.get(), minOffset, maxOffset-minOffset, ptr, newPosition+1); - } - do { - hasValue = previous(ptr, --oldPosition, minOffset); - } while (hasValue != null && oldPosition > newPosition); + } + int nVarLengthBetween = 0; + for (int i = oldPosition - 1; i >= newPosition; i--) { + if (!this.getField(i).getDataType().isFixedWidth()) { + nVarLengthBetween++; } - - return hasValue; + } + if (nVarLengthBetween > nVarLengthFromBeginning) { + return iterator(ptr.get(), minOffset, maxOffset - minOffset, ptr, newPosition + 1); + } + do { + hasValue = previous(ptr, --oldPosition, minOffset); + } while (hasValue != null && oldPosition > newPosition); } - /** - * Like {@link #reposition(org.apache.hadoop.hbase.io.ImmutableBytesWritable, int, int, int, int)}, - * but also includes the next {@code extraSpan} additional fields in the bytes ptr. - * This allows multiple fields to be treated as one concatenated whole. - * @param extraSpan the number of extra fields to expand the ptr to contain. - */ - public Boolean reposition(ImmutableBytesWritable ptr, int oldPosition, int newPosition, int minOffset, int maxOffset, int extraSpan) { - Boolean returnValue = reposition(ptr, oldPosition, newPosition, minOffset, maxOffset); - readExtraFields(ptr, newPosition + 1, maxOffset, extraSpan); - return returnValue; - } - - - /** - * Positions ptr at the part of the row key for the field at endPosition, - * starting from the field at position. - * @param ptr bytes pointer that points to row key being traversed. - * @param position the starting field position - * @param endPosition the ending field position - * @return true if the row key has a value at endPosition with ptr pointing to - * that value and false otherwise with ptr not necessarily set. - */ - public boolean position(ImmutableBytesWritable ptr, int position, int endPosition) { - int maxOffset = ptr.getLength(); - this.iterator(ptr); // initialize for iteration - boolean isFirst = true; - while (position <= endPosition) { - if (this.next(ptr, position++, maxOffset, isFirst) == null) { - return false; - } - isFirst = false; - } - return true; + return hasValue; + } + + /** + * Like + * {@link #reposition(org.apache.hadoop.hbase.io.ImmutableBytesWritable, int, int, int, int)}, but + * also includes the next {@code extraSpan} additional fields in the bytes ptr. This allows + * multiple fields to be treated as one concatenated whole. + * @param extraSpan the number of extra fields to expand the ptr to contain. + */ + public Boolean reposition(ImmutableBytesWritable ptr, int oldPosition, int newPosition, + int minOffset, int maxOffset, int extraSpan) { + Boolean returnValue = reposition(ptr, oldPosition, newPosition, minOffset, maxOffset); + readExtraFields(ptr, newPosition + 1, maxOffset, extraSpan); + return returnValue; + } + + /** + * Positions ptr at the part of the row key for the field at endPosition, starting from the field + * at position. + * @param ptr bytes pointer that points to row key being traversed. + * @param position the starting field position + * @param endPosition the ending field position + * @return true if the row key has a value at endPosition with ptr pointing to that value and + * false otherwise with ptr not necessarily set. + */ + public boolean position(ImmutableBytesWritable ptr, int position, int endPosition) { + int maxOffset = ptr.getLength(); + this.iterator(ptr); // initialize for iteration + boolean isFirst = true; + while (position <= endPosition) { + if (this.next(ptr, position++, maxOffset, isFirst) == null) { + return false; + } + isFirst = false; } + return true; + } + /** + * Extends the boundaries of the {@code ptr} to contain the next {@code extraSpan} fields in the + * row key. + * @param ptr bytes pointer pointing to the value at the positional index provided. + * @param position row key position of the first extra key to read + * @param maxOffset the maximum offset into the bytes pointer to allow + * @param extraSpan the number of extra fields to expand the ptr to contain. + */ + private int readExtraFields(ImmutableBytesWritable ptr, int position, int maxOffset, + int extraSpan) { + int initialOffset = ptr.getOffset(); - /** - * Extends the boundaries of the {@code ptr} to contain the next {@code extraSpan} fields in the row key. - * @param ptr bytes pointer pointing to the value at the positional index provided. - * @param position row key position of the first extra key to read - * @param maxOffset the maximum offset into the bytes pointer to allow - * @param extraSpan the number of extra fields to expand the ptr to contain. - */ - private int readExtraFields(ImmutableBytesWritable ptr, int position, int maxOffset, int extraSpan) { - int initialOffset = ptr.getOffset(); - - int i = 0; - Boolean hasValue = Boolean.FALSE; - for(i = 0; i < extraSpan; i++) { - hasValue = next(ptr, position + i, maxOffset); - - if(hasValue == null) { - break; - } - } + int i = 0; + Boolean hasValue = Boolean.FALSE; + for (i = 0; i < extraSpan; i++) { + hasValue = next(ptr, position + i, maxOffset); - int finalLength = ptr.getOffset() - initialOffset + ptr.getLength(); - ptr.set(ptr.get(), initialOffset, finalLength); - return position + i - (Boolean.FALSE.equals(hasValue) ? 1 : 0); + if (hasValue == null) { + break; + } } - public int computeMaxSpan(int pkPos, KeyRange result, ImmutableBytesWritable ptr) { - int maxOffset = iterator(result.getLowerRange(), ptr); - int lowerSpan = 0; - int i = pkPos; - while (this.next(ptr, i++, maxOffset) != null) { - lowerSpan++; - } - int upperSpan = 0; - i = pkPos; - maxOffset = iterator(result.getUpperRange(), ptr); - while (this.next(ptr, i++, maxOffset) != null) { - upperSpan++; - } - return Math.max(Math.max(lowerSpan, upperSpan), 1); + int finalLength = ptr.getOffset() - initialOffset + ptr.getLength(); + ptr.set(ptr.get(), initialOffset, finalLength); + return position + i - (Boolean.FALSE.equals(hasValue) ? 1 : 0); + } + + public int computeMaxSpan(int pkPos, KeyRange result, ImmutableBytesWritable ptr) { + int maxOffset = iterator(result.getLowerRange(), ptr); + int lowerSpan = 0; + int i = pkPos; + while (this.next(ptr, i++, maxOffset) != null) { + lowerSpan++; + } + int upperSpan = 0; + i = pkPos; + maxOffset = iterator(result.getUpperRange(), ptr); + while (this.next(ptr, i++, maxOffset) != null) { + upperSpan++; } + return Math.max(Math.max(lowerSpan, upperSpan), 1); + } - public int computeMinSpan(int pkPos, KeyRange keyRange, ImmutableBytesWritable ptr) { - if (keyRange == KeyRange.EVERYTHING_RANGE) { - return 0; - } - int lowerSpan = Integer.MAX_VALUE; - byte[] range = keyRange.getLowerRange(); - if (range != KeyRange.UNBOUND) { - lowerSpan = 0; - int maxOffset = iterator(range, ptr); - int i = pkPos; - while (this.next(ptr, i++, maxOffset) != null) { - lowerSpan++; - } - } - int upperSpan = Integer.MAX_VALUE; - range = keyRange.getUpperRange(); - if (range != KeyRange.UNBOUND) { - upperSpan = 0; - int maxOffset = iterator(range, ptr); - int i = pkPos; - while (this.next(ptr, i++, maxOffset) != null) { - upperSpan++; - } - } - return Math.min(lowerSpan, upperSpan); + public int computeMinSpan(int pkPos, KeyRange keyRange, ImmutableBytesWritable ptr) { + if (keyRange == KeyRange.EVERYTHING_RANGE) { + return 0; + } + int lowerSpan = Integer.MAX_VALUE; + byte[] range = keyRange.getLowerRange(); + if (range != KeyRange.UNBOUND) { + lowerSpan = 0; + int maxOffset = iterator(range, ptr); + int i = pkPos; + while (this.next(ptr, i++, maxOffset) != null) { + lowerSpan++; + } } + int upperSpan = Integer.MAX_VALUE; + range = keyRange.getUpperRange(); + if (range != KeyRange.UNBOUND) { + upperSpan = 0; + int maxOffset = iterator(range, ptr); + int i = pkPos; + while (this.next(ptr, i++, maxOffset) != null) { + upperSpan++; + } + } + return Math.min(lowerSpan, upperSpan); + } - /** - * Clip the left hand portion of the keyRange up to the spansToClip. If keyRange is shorter in - * spans than spansToClip, the portion of the range that exists will be returned. - * @param pkPos the leading pk position of the keyRange. - * @param keyRange the key range to clip - * @param spansToClip the number of spans to clip - * @param ptr an ImmutableBytesWritable to use for temporary storage. - * @return the clipped portion of the keyRange - */ - public KeyRange clipLeft(int pkPos, KeyRange keyRange, int spansToClip, ImmutableBytesWritable ptr) { - if (spansToClip < 0) { - throw new IllegalArgumentException("Cannot specify a negative spansToClip (" + spansToClip + ")"); - } - if (spansToClip == 0) { - return keyRange; - } - byte[] lowerRange = keyRange.getLowerRange(); - if (lowerRange != KeyRange.UNBOUND) { - ptr.set(lowerRange); - this.position(ptr, pkPos, pkPos+spansToClip-1); - ptr.set(lowerRange, 0, ptr.getOffset() + ptr.getLength()); - lowerRange = ByteUtil.copyKeyBytesIfNecessary(ptr); - } - byte[] upperRange = keyRange.getUpperRange(); - if (upperRange != KeyRange.UNBOUND) { - ptr.set(upperRange); - this.position(ptr, pkPos, pkPos+spansToClip-1); - ptr.set(upperRange, 0, ptr.getOffset() + ptr.getLength()); - upperRange = ByteUtil.copyKeyBytesIfNecessary(ptr); - } - //Have to update the bounds to inclusive - //Consider a partial key on pk columns (INT A, INT B, ....) and a predicate (A,B) > (3,5) - //This initial key as a row key would look like (x0305 - *] - //If we were to clip the left to (x03 - *], we would skip values like (3,6) - return KeyRange.getKeyRange(lowerRange, true, upperRange, true); + /** + * Clip the left hand portion of the keyRange up to the spansToClip. If keyRange is shorter in + * spans than spansToClip, the portion of the range that exists will be returned. + * @param pkPos the leading pk position of the keyRange. + * @param keyRange the key range to clip + * @param spansToClip the number of spans to clip + * @param ptr an ImmutableBytesWritable to use for temporary storage. + * @return the clipped portion of the keyRange + */ + public KeyRange clipLeft(int pkPos, KeyRange keyRange, int spansToClip, + ImmutableBytesWritable ptr) { + if (spansToClip < 0) { + throw new IllegalArgumentException( + "Cannot specify a negative spansToClip (" + spansToClip + ")"); + } + if (spansToClip == 0) { + return keyRange; + } + byte[] lowerRange = keyRange.getLowerRange(); + if (lowerRange != KeyRange.UNBOUND) { + ptr.set(lowerRange); + this.position(ptr, pkPos, pkPos + spansToClip - 1); + ptr.set(lowerRange, 0, ptr.getOffset() + ptr.getLength()); + lowerRange = ByteUtil.copyKeyBytesIfNecessary(ptr); + } + byte[] upperRange = keyRange.getUpperRange(); + if (upperRange != KeyRange.UNBOUND) { + ptr.set(upperRange); + this.position(ptr, pkPos, pkPos + spansToClip - 1); + ptr.set(upperRange, 0, ptr.getOffset() + ptr.getLength()); + upperRange = ByteUtil.copyKeyBytesIfNecessary(ptr); } + // Have to update the bounds to inclusive + // Consider a partial key on pk columns (INT A, INT B, ....) and a predicate (A,B) > (3,5) + // This initial key as a row key would look like (x0305 - *] + // If we were to clip the left to (x03 - *], we would skip values like (3,6) + return KeyRange.getKeyRange(lowerRange, true, upperRange, true); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowKeyValueAccessor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowKeyValueAccessor.java index a5cadf8ea9f..2f537a71534 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowKeyValueAccessor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowKeyValueAccessor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,9 +26,6 @@ import java.util.Iterator; import java.util.List; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Writable; @@ -39,450 +36,446 @@ import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.JacksonUtil; import org.apache.phoenix.util.SchemaUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * * Class that encapsulates accessing a value stored in the row key. - * - * * @since 0.1 */ public class RowKeyValueAccessor implements Writable { - private static final Logger LOGGER = LoggerFactory.getLogger(RowKeyValueAccessor.class); + private static final Logger LOGGER = LoggerFactory.getLogger(RowKeyValueAccessor.class); - /** - * Constructor solely for use during deserialization. Should not - * otherwise be used. - */ - public RowKeyValueAccessor() { - } + /** + * Constructor solely for use during deserialization. Should not otherwise be used. + */ + public RowKeyValueAccessor() { + } - /** - * The class used to keep list of booleans representing whether the data type of the given - * field is of type VARBINARY_ENCODED. For example, let's say we have variable length - * data types in the order (VARCHAR, VARBINARY_ENCODED, VARCHAR). The corresponding object - * of this class will have binaryEncodedDataTypes list with values (false, true, false). - * The array of this class is serialized and deserialized together with other fields of - * RowKeyValueAccessor. The value of the list determines which separator bytes need to be - * used while iterating through the rowkey bytes. - */ - static class ListOfEncodedTypeFlags { - private final List binaryEncodedDataTypes = new ArrayList<>(); + /** + * The class used to keep list of booleans representing whether the data type of the given field + * is of type VARBINARY_ENCODED. For example, let's say we have variable length data types in the + * order (VARCHAR, VARBINARY_ENCODED, VARCHAR). The corresponding object of this class will have + * binaryEncodedDataTypes list with values (false, true, false). The array of this class is + * serialized and deserialized together with other fields of RowKeyValueAccessor. The value of the + * list determines which separator bytes need to be used while iterating through the rowkey bytes. + */ + static class ListOfEncodedTypeFlags { + private final List binaryEncodedDataTypes = new ArrayList<>(); - public void addBinaryEncodedDataTypes(boolean val) { - this.binaryEncodedDataTypes.add(val); - } + public void addBinaryEncodedDataTypes(boolean val) { + this.binaryEncodedDataTypes.add(val); + } - public List getBinaryEncodedDataTypes() { - return binaryEncodedDataTypes; - } + public List getBinaryEncodedDataTypes() { + return binaryEncodedDataTypes; } + } - /** - * The class used to keep list of booleans representing whether the order of the given - * field is Ascending. For example, let's say we have rowkey with pk columns - * (A ASC, B DESC, C ASC). The corresponding object of this class will have sortOrderAsc list - * with values (true, false, true). The array of this class is serialized and deserialized - * together with other fields of RowKeyValueAccessor. The value of the list determines which - * separator bytes need to be used while iterating through the rowkey bytes. - */ - static class SortOrderList { - private final List sortOrderAsc = new ArrayList<>(); + /** + * The class used to keep list of booleans representing whether the order of the given field is + * Ascending. For example, let's say we have rowkey with pk columns (A ASC, B DESC, C ASC). The + * corresponding object of this class will have sortOrderAsc list with values (true, false, true). + * The array of this class is serialized and deserialized together with other fields of + * RowKeyValueAccessor. The value of the list determines which separator bytes need to be used + * while iterating through the rowkey bytes. + */ + static class SortOrderList { + private final List sortOrderAsc = new ArrayList<>(); - public void addSortOrderAsc(boolean val) { - this.sortOrderAsc.add(val); - } + public void addSortOrderAsc(boolean val) { + this.sortOrderAsc.add(val); + } - public List getSortOrderAsc() { - return sortOrderAsc; - } + public List getSortOrderAsc() { + return sortOrderAsc; } + } - /** - * Constructor to compile access to the value in the row key formed from - * a list of PData. - * - * @param data the list of data that make up the key - * @param index the zero-based index of the data item to access. - */ - public RowKeyValueAccessor(List data, int index) { - this.index = index; - int[] offsets = new int[data.size()]; - ListOfEncodedTypeFlags[] listOfEncodedTypesLists = - new ListOfEncodedTypeFlags[data.size()]; - SortOrderList[] sortOrderLists = new SortOrderList[data.size()]; - int nOffsets = 0; - Iterator iterator = data.iterator(); - PDatum datum = iterator.next(); - int pos = 0; - while (pos < index) { - int offset = 0; - if (datum.getDataType().isFixedWidth()) { - // For continuous fixed width data type columns, accumulate how many - // of them contains ASC and DESC order types. - ListOfEncodedTypeFlags encodedTypesLists = new ListOfEncodedTypeFlags(); - SortOrderList sortOrders = new SortOrderList(); - do { - encodedTypesLists.addBinaryEncodedDataTypes(false); - sortOrders.addSortOrderAsc(datum.getSortOrder() == SortOrder.ASC); - // For non parameterized types such as BIGINT, the type will return its max length. - // For parameterized types, for example CHAR(10) the type cannot know the max length, - // so in this case, the max length is retrieved from the datum. - Integer maxLength = datum.getDataType().getByteSize(); - offset += maxLength == null ? datum.getMaxLength() : maxLength; - datum = iterator.next(); - pos++; - } while (pos < index && datum.getDataType().isFixedWidth()); - offsets[nOffsets] = offset; // Encode fixed byte offset as positive - listOfEncodedTypesLists[nOffsets] = encodedTypesLists; - sortOrderLists[nOffsets++] = sortOrders; - } else { - // For continuous variable length data type columns, accumulate how many - // of them contains ASC and DESC order types. And how many of them contains - // VARBINARY_ENCODED and other variable length types. This information is - // crucial to figure out which separator bytes to use while going through each - // column value. - ListOfEncodedTypeFlags encodedTypesLists = new ListOfEncodedTypeFlags(); - SortOrderList sortOrders = new SortOrderList(); - do { - encodedTypesLists.addBinaryEncodedDataTypes( - datum.getDataType() == PVarbinaryEncoded.INSTANCE); - sortOrders.addSortOrderAsc(datum.getSortOrder() == SortOrder.ASC); - offset++; // Count the number of variable length columns - datum = iterator.next(); - pos++; - } while (pos < index && !datum.getDataType().isFixedWidth()); - offsets[nOffsets] = -offset; // Encode number of variable length columns as negative - listOfEncodedTypesLists[nOffsets] = encodedTypesLists; - sortOrderLists[nOffsets++] = sortOrders; - } - } - if (nOffsets < offsets.length) { - this.offsets = Arrays.copyOf(offsets, nOffsets); - this.listOfEncodedTypesLists = Arrays.copyOf(listOfEncodedTypesLists, nOffsets); - this.sortOrderLists = Arrays.copyOf(sortOrderLists, nOffsets); - } else { - this.offsets = offsets; - this.listOfEncodedTypesLists = listOfEncodedTypesLists; - this.sortOrderLists = sortOrderLists; - } - // Remember this so that we don't bother looking for the null separator byte in this case - this.isFixedLength = datum.getDataType().isFixedWidth(); - this.hasSeparator = !isFixedLength && iterator.hasNext(); + /** + * Constructor to compile access to the value in the row key formed from a list of PData. + * @param data the list of data that make up the key + * @param index the zero-based index of the data item to access. + */ + public RowKeyValueAccessor(List data, int index) { + this.index = index; + int[] offsets = new int[data.size()]; + ListOfEncodedTypeFlags[] listOfEncodedTypesLists = new ListOfEncodedTypeFlags[data.size()]; + SortOrderList[] sortOrderLists = new SortOrderList[data.size()]; + int nOffsets = 0; + Iterator iterator = data.iterator(); + PDatum datum = iterator.next(); + int pos = 0; + while (pos < index) { + int offset = 0; + if (datum.getDataType().isFixedWidth()) { + // For continuous fixed width data type columns, accumulate how many + // of them contains ASC and DESC order types. + ListOfEncodedTypeFlags encodedTypesLists = new ListOfEncodedTypeFlags(); + SortOrderList sortOrders = new SortOrderList(); + do { + encodedTypesLists.addBinaryEncodedDataTypes(false); + sortOrders.addSortOrderAsc(datum.getSortOrder() == SortOrder.ASC); + // For non parameterized types such as BIGINT, the type will return its max length. + // For parameterized types, for example CHAR(10) the type cannot know the max length, + // so in this case, the max length is retrieved from the datum. + Integer maxLength = datum.getDataType().getByteSize(); + offset += maxLength == null ? datum.getMaxLength() : maxLength; + datum = iterator.next(); + pos++; + } while (pos < index && datum.getDataType().isFixedWidth()); + offsets[nOffsets] = offset; // Encode fixed byte offset as positive + listOfEncodedTypesLists[nOffsets] = encodedTypesLists; + sortOrderLists[nOffsets++] = sortOrders; + } else { + // For continuous variable length data type columns, accumulate how many + // of them contains ASC and DESC order types. And how many of them contains + // VARBINARY_ENCODED and other variable length types. This information is + // crucial to figure out which separator bytes to use while going through each + // column value. + ListOfEncodedTypeFlags encodedTypesLists = new ListOfEncodedTypeFlags(); + SortOrderList sortOrders = new SortOrderList(); + do { + encodedTypesLists + .addBinaryEncodedDataTypes(datum.getDataType() == PVarbinaryEncoded.INSTANCE); + sortOrders.addSortOrderAsc(datum.getSortOrder() == SortOrder.ASC); + offset++; // Count the number of variable length columns + datum = iterator.next(); + pos++; + } while (pos < index && !datum.getDataType().isFixedWidth()); + offsets[nOffsets] = -offset; // Encode number of variable length columns as negative + listOfEncodedTypesLists[nOffsets] = encodedTypesLists; + sortOrderLists[nOffsets++] = sortOrders; + } } - - RowKeyValueAccessor(int[] offsets, boolean isFixedLength, boolean hasSeparator) { - this.offsets = offsets; - this.isFixedLength = isFixedLength; - this.hasSeparator = hasSeparator; + if (nOffsets < offsets.length) { + this.offsets = Arrays.copyOf(offsets, nOffsets); + this.listOfEncodedTypesLists = Arrays.copyOf(listOfEncodedTypesLists, nOffsets); + this.sortOrderLists = Arrays.copyOf(sortOrderLists, nOffsets); + } else { + this.offsets = offsets; + this.listOfEncodedTypesLists = listOfEncodedTypesLists; + this.sortOrderLists = sortOrderLists; } + // Remember this so that we don't bother looking for the null separator byte in this case + this.isFixedLength = datum.getDataType().isFixedWidth(); + this.hasSeparator = !isFixedLength && iterator.hasNext(); + } - private int index = -1; // Only available on client side - private int[] offsets; - /** - * An array of BinaryEncodedTypesLists. Each element of BinaryEncodedTypesLists consists of - * list of booleans representing whether the given column is of type VARBINARY_ENCODED or other - * variable length data type. All the continuous fixed length as well as variable length - * data type columns are clubbed together. For example, let's say we have columns - * (A INTEGER, B FLOAT, C DOUBLE, D VARCHAR, E VARBINARY_ENCODED, F VARCHAR). Here, we have - * three continuous fixed-length columns and three continuous variable length columns. Hence, - * binaryEncodedTypesLists array consists of only two elements. binaryEncodedTypesLists[0] - * consists of List as [false, false, false] because INTEGER, FLOAT and DOUBLE are of - * fixed length data type. Whereas binaryEncodedTypesLists[1] consists of List as - * [false, true, false] because D and F are not VARBINARY_ENCODED whereas E is - * VARBINARY_ENCODED. - */ - private ListOfEncodedTypeFlags[] listOfEncodedTypesLists; - /** - * An array of SortOrderLists. Each element of SortOrderLists consists of list of booleans - * representing whether the given column is ASC of DESC order. All the continuous fixed length - * as well as variable length data type columns are clubbed together. For example, let's say - * we have columns - * (A INTEGER, B FLOAT DESC, C DOUBLE, D VARCHAR DESC, E VARBINARY_ENCODED, F VARCHAR). - * Here, we have three continuous fixed-length columns and three continuous variable length - * columns. Hence, sortOrderLists array consists of only two elements. sortOrderLists[0] - * consists of List as [true, false, true] because A and C are ASC whereas B is DESC - * ordered column. sortOrderLists[1] consists of List as [false, true, true] because - * D is DESC whereas E and F are ASC ordered columns. - */ - private SortOrderList[] sortOrderLists; - private boolean isFixedLength; - private boolean hasSeparator; + RowKeyValueAccessor(int[] offsets, boolean isFixedLength, boolean hasSeparator) { + this.offsets = offsets; + this.isFixedLength = isFixedLength; + this.hasSeparator = hasSeparator; + } - public int getIndex() { - return index; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (hasSeparator ? 1231 : 1237); - result = prime * result + (isFixedLength ? 1231 : 1237); - result = prime * result + Arrays.hashCode(offsets); - return result; - } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - RowKeyValueAccessor other = (RowKeyValueAccessor)obj; - if (hasSeparator != other.hasSeparator) return false; - if (isFixedLength != other.isFixedLength) return false; - if (!Arrays.equals(offsets, other.offsets)) return false; - return true; - } + private int index = -1; // Only available on client side + private int[] offsets; + /** + * An array of BinaryEncodedTypesLists. Each element of BinaryEncodedTypesLists consists of list + * of booleans representing whether the given column is of type VARBINARY_ENCODED or other + * variable length data type. All the continuous fixed length as well as variable length data type + * columns are clubbed together. For example, let's say we have columns (A INTEGER, B FLOAT, C + * DOUBLE, D VARCHAR, E VARBINARY_ENCODED, F VARCHAR). Here, we have three continuous fixed-length + * columns and three continuous variable length columns. Hence, binaryEncodedTypesLists array + * consists of only two elements. binaryEncodedTypesLists[0] consists of List as [false, + * false, false] because INTEGER, FLOAT and DOUBLE are of fixed length data type. Whereas + * binaryEncodedTypesLists[1] consists of List as [false, true, false] because D and F + * are not VARBINARY_ENCODED whereas E is VARBINARY_ENCODED. + */ + private ListOfEncodedTypeFlags[] listOfEncodedTypesLists; + /** + * An array of SortOrderLists. Each element of SortOrderLists consists of list of booleans + * representing whether the given column is ASC of DESC order. All the continuous fixed length as + * well as variable length data type columns are clubbed together. For example, let's say we have + * columns (A INTEGER, B FLOAT DESC, C DOUBLE, D VARCHAR DESC, E VARBINARY_ENCODED, F VARCHAR). + * Here, we have three continuous fixed-length columns and three continuous variable length + * columns. Hence, sortOrderLists array consists of only two elements. sortOrderLists[0] consists + * of List as [true, false, true] because A and C are ASC whereas B is DESC ordered + * column. sortOrderLists[1] consists of List as [false, true, true] because D is DESC + * whereas E and F are ASC ordered columns. + */ + private SortOrderList[] sortOrderLists; + private boolean isFixedLength; + private boolean hasSeparator; - @Override - public String toString() { - return "RowKeyValueAccessor [offsets=" + Arrays.toString(offsets) + ", isFixedLength=" + isFixedLength - + ", hasSeparator=" + hasSeparator + "]"; - } + public int getIndex() { + return index; + } - @Override - public void readFields(DataInput input) throws IOException { - // Decode hasSeparator and isFixedLength from vint storing offset array length - int length = WritableUtils.readVInt(input); - hasSeparator = (length & 0x02) != 0; - isFixedLength = (length & 0x01) != 0; - length >>= 2; - offsets = ByteUtil.deserializeVIntArray(input, length); + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (hasSeparator ? 1231 : 1237); + result = prime * result + (isFixedLength ? 1231 : 1237); + result = prime * result + Arrays.hashCode(offsets); + return result; + } - this.listOfEncodedTypesLists = null; - this.sortOrderLists = null; + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + RowKeyValueAccessor other = (RowKeyValueAccessor) obj; + if (hasSeparator != other.hasSeparator) return false; + if (isFixedLength != other.isFixedLength) return false; + if (!Arrays.equals(offsets, other.offsets)) return false; + return true; + } - // New client that supports new structure of RowKeyValueAccessor with additional fields - // need to differentiate serialization from old client that does not support additional - // fields of RowKeyValueAccessor. This is specifically required because multiple - // Expressions are serialized and deserialized together. - // We expect to use DataInputBuffer or DataInputStream only. Both of them support - // reading bytes from the byte stream without moving the cursor. While DataInputBuffer - // provides access to the underlying byte buffer, DataInputStream supports mark and reset - // functions to read the bytes and re-adjust the cursor back to original position. - // If mark is not supported, we should not risk reading additional bytes as it can - // move the cursor forward and the deserialization of other Expressions can fail. - // It is very important to read the separator bytes - // ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR without moving the pointer forward. If the - // pointer is moved forward, old client that does not support new fields of - // RowKeyValueAccessor would likely fail while running against new server that supports - // new fields of RowKeyValueAccessor. - if (input instanceof DataInputBuffer) { - DataInputBuffer dataInputBuffer = (DataInputBuffer) input; - int offset = dataInputBuffer.getPosition(); - int len = dataInputBuffer.getLength(); - if ((offset + QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length) > len) { - return; - } - byte[] data = dataInputBuffer.getData(); - if (!Bytes.equals(data, offset, - QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length, - QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR, 0, - QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length)) { - return; - } - } else if (input instanceof DataInputStream) { - DataInputStream dataInputStream = (DataInputStream) input; - if (dataInputStream.markSupported()) { - dataInputStream.mark( - QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length * 1000); - byte[] data = - new byte[QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length]; - try { - int bytesRead = dataInputStream.read(data); - if (bytesRead - != QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length) { - dataInputStream.reset(); - return; - } - } catch (IOException e) { - // This can happen if EOF is reached while reading the data i.e. the stream - // does not have ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length num of bytes. - dataInputStream.reset(); - return; - } - if (!Bytes.equals(data, 0, - QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length, - QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR, 0, - QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length)) { - dataInputStream.reset(); - return; - } - dataInputStream.reset(); - } else { - LOGGER.warn("DataInputStream {} does not support mark.", dataInputStream); - return; - } - } else { - LOGGER.error("Type of DataInput is neither DataInputBuffer nor DataInputStream. " - + "This is not expected. Do not attempt deserialization of " - + "binaryEncodedTypesLists and sortOrderLists for compatibility purpose." - + " input: {}", input); - return; - } + @Override + public String toString() { + return "RowKeyValueAccessor [offsets=" + Arrays.toString(offsets) + ", isFixedLength=" + + isFixedLength + ", hasSeparator=" + hasSeparator + "]"; + } - byte[] bytes = new byte[QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length]; - input.readFully(bytes, 0, bytes.length); + @Override + public void readFields(DataInput input) throws IOException { + // Decode hasSeparator and isFixedLength from vint storing offset array length + int length = WritableUtils.readVInt(input); + hasSeparator = (length & 0x02) != 0; + isFixedLength = (length & 0x01) != 0; + length >>= 2; + offsets = ByteUtil.deserializeVIntArray(input, length); - int binaryEncodedTypesListsLen = WritableUtils.readVInt(input); - if (binaryEncodedTypesListsLen == 0) { - this.listOfEncodedTypesLists = new ListOfEncodedTypeFlags[0]; - } else { - byte[] binaryEncodedTypesListsBytes = new byte[binaryEncodedTypesListsLen]; - input.readFully(binaryEncodedTypesListsBytes, 0, binaryEncodedTypesListsLen); - this.listOfEncodedTypesLists = JacksonUtil.getObjectReader() - .readValue(binaryEncodedTypesListsBytes, ListOfEncodedTypeFlags[].class); - } + this.listOfEncodedTypesLists = null; + this.sortOrderLists = null; - int sortOrderListsLen = WritableUtils.readVInt(input); - if (sortOrderListsLen == 0) { - this.sortOrderLists = new SortOrderList[0]; - } else { - byte[] sortOrdersListsBytes = new byte[sortOrderListsLen]; - input.readFully(sortOrdersListsBytes, 0, sortOrderListsLen); - this.sortOrderLists = JacksonUtil.getObjectReader() - .readValue(sortOrdersListsBytes, SortOrderList[].class); + // New client that supports new structure of RowKeyValueAccessor with additional fields + // need to differentiate serialization from old client that does not support additional + // fields of RowKeyValueAccessor. This is specifically required because multiple + // Expressions are serialized and deserialized together. + // We expect to use DataInputBuffer or DataInputStream only. Both of them support + // reading bytes from the byte stream without moving the cursor. While DataInputBuffer + // provides access to the underlying byte buffer, DataInputStream supports mark and reset + // functions to read the bytes and re-adjust the cursor back to original position. + // If mark is not supported, we should not risk reading additional bytes as it can + // move the cursor forward and the deserialization of other Expressions can fail. + // It is very important to read the separator bytes + // ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR without moving the pointer forward. If the + // pointer is moved forward, old client that does not support new fields of + // RowKeyValueAccessor would likely fail while running against new server that supports + // new fields of RowKeyValueAccessor. + if (input instanceof DataInputBuffer) { + DataInputBuffer dataInputBuffer = (DataInputBuffer) input; + int offset = dataInputBuffer.getPosition(); + int len = dataInputBuffer.getLength(); + if ((offset + QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length) > len) { + return; + } + byte[] data = dataInputBuffer.getData(); + if ( + !Bytes.equals(data, offset, QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length, + QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR, 0, + QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length) + ) { + return; + } + } else if (input instanceof DataInputStream) { + DataInputStream dataInputStream = (DataInputStream) input; + if (dataInputStream.markSupported()) { + dataInputStream + .mark(QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length * 1000); + byte[] data = new byte[QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length]; + try { + int bytesRead = dataInputStream.read(data); + if (bytesRead != QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length) { + dataInputStream.reset(); + return; + } + } catch (IOException e) { + // This can happen if EOF is reached while reading the data i.e. the stream + // does not have ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length num of bytes. + dataInputStream.reset(); + return; + } + if ( + !Bytes.equals(data, 0, QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length, + QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR, 0, + QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length) + ) { + dataInputStream.reset(); + return; } + dataInputStream.reset(); + } else { + LOGGER.warn("DataInputStream {} does not support mark.", dataInputStream); + return; + } + } else { + LOGGER.error( + "Type of DataInput is neither DataInputBuffer nor DataInputStream. " + + "This is not expected. Do not attempt deserialization of " + + "binaryEncodedTypesLists and sortOrderLists for compatibility purpose." + " input: {}", + input); + return; } - @Override - public void write(DataOutput output) throws IOException { - // Encode hasSeparator and isFixedLength into vint storing offset array length - // (since there's plenty of room) - int length = offsets.length << 2; - length |= (hasSeparator ? 1 << 1 : 0) | (isFixedLength ? 1 : 0); - ByteUtil.serializeVIntArray(output, offsets, length); + byte[] bytes = new byte[QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR.length]; + input.readFully(bytes, 0, bytes.length); - // New client that supports new structure of RowKeyValueAccessor with additional fields - // need to differentiate serialization from old client that does not support additional - // fields of RowKeyValueAccessor. This is specifically required because multiple - // Expressions are serialized and deserialized together. - // So, let's write separator bytes as ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR, followed - // by serialization of new fields (binaryEncodedTypesLists and sortOrderLists). - output.write(QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR); - if (this.listOfEncodedTypesLists.length == 0) { - WritableUtils.writeVInt(output, 0); - } else { - byte[] binaryEncodedTypesListsBytes = - JacksonUtil.getObjectWriter().writeValueAsBytes(this.listOfEncodedTypesLists); - WritableUtils.writeVInt(output, binaryEncodedTypesListsBytes.length); - if (binaryEncodedTypesListsBytes.length > 0) { - output.write(binaryEncodedTypesListsBytes); - } - } + int binaryEncodedTypesListsLen = WritableUtils.readVInt(input); + if (binaryEncodedTypesListsLen == 0) { + this.listOfEncodedTypesLists = new ListOfEncodedTypeFlags[0]; + } else { + byte[] binaryEncodedTypesListsBytes = new byte[binaryEncodedTypesListsLen]; + input.readFully(binaryEncodedTypesListsBytes, 0, binaryEncodedTypesListsLen); + this.listOfEncodedTypesLists = JacksonUtil.getObjectReader() + .readValue(binaryEncodedTypesListsBytes, ListOfEncodedTypeFlags[].class); + } - if (this.sortOrderLists.length == 0) { - WritableUtils.writeVInt(output, 0); - } else { - byte[] sortOrdersListsBytes = - JacksonUtil.getObjectWriter().writeValueAsBytes(this.sortOrderLists); - WritableUtils.writeVInt(output, sortOrdersListsBytes.length); - if (sortOrdersListsBytes.length > 0) { - output.write(sortOrdersListsBytes); - } - } + int sortOrderListsLen = WritableUtils.readVInt(input); + if (sortOrderListsLen == 0) { + this.sortOrderLists = new SortOrderList[0]; + } else { + byte[] sortOrdersListsBytes = new byte[sortOrderListsLen]; + input.readFully(sortOrdersListsBytes, 0, sortOrderListsLen); + this.sortOrderLists = + JacksonUtil.getObjectReader().readValue(sortOrdersListsBytes, SortOrderList[].class); } - - private static boolean isSeparatorByte(byte b) { - return b == QueryConstants.SEPARATOR_BYTE || b == QueryConstants.DESC_SEPARATOR_BYTE; + } + + @Override + public void write(DataOutput output) throws IOException { + // Encode hasSeparator and isFixedLength into vint storing offset array length + // (since there's plenty of room) + int length = offsets.length << 2; + length |= (hasSeparator ? 1 << 1 : 0) | (isFixedLength ? 1 : 0); + ByteUtil.serializeVIntArray(output, offsets, length); + + // New client that supports new structure of RowKeyValueAccessor with additional fields + // need to differentiate serialization from old client that does not support additional + // fields of RowKeyValueAccessor. This is specifically required because multiple + // Expressions are serialized and deserialized together. + // So, let's write separator bytes as ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR, followed + // by serialization of new fields (binaryEncodedTypesLists and sortOrderLists). + output.write(QueryConstants.ROW_KEY_VAL_ACCESSOR_NEW_FIELDS_SEPARATOR); + if (this.listOfEncodedTypesLists.length == 0) { + WritableUtils.writeVInt(output, 0); + } else { + byte[] binaryEncodedTypesListsBytes = + JacksonUtil.getObjectWriter().writeValueAsBytes(this.listOfEncodedTypesLists); + WritableUtils.writeVInt(output, binaryEncodedTypesListsBytes.length); + if (binaryEncodedTypesListsBytes.length > 0) { + output.write(binaryEncodedTypesListsBytes); + } } - /** - * Calculate the byte offset in the row key to the start of the PK column value - * @param keyBuffer the byte array of the row key - * @param keyOffset the offset in the byte array of where the key begins - * @return byte offset to the start of the PK column value - */ - public int getOffset(byte[] keyBuffer, int keyOffset) { - if (this.listOfEncodedTypesLists == null && this.sortOrderLists == null) { - return getOffsetWithNullTypeAndOrderInfo(keyBuffer, keyOffset); - } - // Use encoded offsets to navigate through row key buffer - for (int i = 0; i < offsets.length; i++) { - int offset = offsets[i]; - ListOfEncodedTypeFlags binaryEncodedTypesList = this.listOfEncodedTypesLists[i]; - SortOrderList sortOrderList = this.sortOrderLists[i]; - if (offset >= 0) { // If offset is non negative, it's a byte offset - keyOffset += offset; - } else { // Else, a negative offset is the number of variable length values to skip - int pos = 0; - while (offset++ < 0) { - boolean isVarBinaryEncoded = - binaryEncodedTypesList.getBinaryEncodedDataTypes().get(pos); - boolean sortOrderAsc = sortOrderList.getSortOrderAsc().get(pos++); - if (!isVarBinaryEncoded) { - while (keyOffset < keyBuffer.length && !isSeparatorByte( - keyBuffer[keyOffset++])) { - // empty - } - } else { - while (keyOffset < keyBuffer.length - && !SchemaUtil.areSeparatorBytesForVarBinaryEncoded(keyBuffer, - keyOffset++, sortOrderAsc ? SortOrder.ASC : SortOrder.DESC)) { - // empty - } - if (keyOffset < keyBuffer.length) { - keyOffset++; - } - } - } - } - } - return keyOffset; + if (this.sortOrderLists.length == 0) { + WritableUtils.writeVInt(output, 0); + } else { + byte[] sortOrdersListsBytes = + JacksonUtil.getObjectWriter().writeValueAsBytes(this.sortOrderLists); + WritableUtils.writeVInt(output, sortOrdersListsBytes.length); + if (sortOrdersListsBytes.length > 0) { + output.write(sortOrdersListsBytes); + } } + } + + private static boolean isSeparatorByte(byte b) { + return b == QueryConstants.SEPARATOR_BYTE || b == QueryConstants.DESC_SEPARATOR_BYTE; + } - public int getOffsetWithNullTypeAndOrderInfo(byte[] keyBuffer, int keyOffset) { - // Use encoded offsets to navigate through row key buffer - for (int offset : offsets) { - if (offset >= 0) { // If offset is non negative, it's a byte offset - keyOffset += offset; - } else { // Else, a negative offset is the number of variable length values to skip - while (offset++ < 0) { - // FIXME: keyOffset < keyBuffer.length required because HBase passes bogus keys to filter to position scan (HBASE-6562) - while (keyOffset < keyBuffer.length && !isSeparatorByte( - keyBuffer[keyOffset++])) { - } - } + /** + * Calculate the byte offset in the row key to the start of the PK column value + * @param keyBuffer the byte array of the row key + * @param keyOffset the offset in the byte array of where the key begins + * @return byte offset to the start of the PK column value + */ + public int getOffset(byte[] keyBuffer, int keyOffset) { + if (this.listOfEncodedTypesLists == null && this.sortOrderLists == null) { + return getOffsetWithNullTypeAndOrderInfo(keyBuffer, keyOffset); + } + // Use encoded offsets to navigate through row key buffer + for (int i = 0; i < offsets.length; i++) { + int offset = offsets[i]; + ListOfEncodedTypeFlags binaryEncodedTypesList = this.listOfEncodedTypesLists[i]; + SortOrderList sortOrderList = this.sortOrderLists[i]; + if (offset >= 0) { // If offset is non negative, it's a byte offset + keyOffset += offset; + } else { // Else, a negative offset is the number of variable length values to skip + int pos = 0; + while (offset++ < 0) { + boolean isVarBinaryEncoded = binaryEncodedTypesList.getBinaryEncodedDataTypes().get(pos); + boolean sortOrderAsc = sortOrderList.getSortOrderAsc().get(pos++); + if (!isVarBinaryEncoded) { + while (keyOffset < keyBuffer.length && !isSeparatorByte(keyBuffer[keyOffset++])) { + // empty + } + } else { + while ( + keyOffset < keyBuffer.length + && !SchemaUtil.areSeparatorBytesForVarBinaryEncoded(keyBuffer, keyOffset++, + sortOrderAsc ? SortOrder.ASC : SortOrder.DESC) + ) { + // empty + } + if (keyOffset < keyBuffer.length) { + keyOffset++; } + } } - return keyOffset; + } } + return keyOffset; + } - /** - * Calculate the length of the PK column value - * @param keyBuffer the byte array of the row key - * @param keyOffset the offset in the byte array of where the key begins - * @param maxOffset maximum offset to use while calculating length - * @param type the data type of the column. - * @param sortOrder sort order. - * @return the length of the PK column value - */ - public int getLength(byte[] keyBuffer, int keyOffset, int maxOffset, PDataType type, - SortOrder sortOrder) { - if (!hasSeparator) { - if (type == PVarbinaryEncoded.INSTANCE) { - if (sortOrder == null || sortOrder == SortOrder.ASC) { - return maxOffset - keyOffset; - } else if (sortOrder == SortOrder.DESC) { - return maxOffset - keyOffset - 2; - } - } else { - return maxOffset - keyOffset - ( - keyBuffer[maxOffset - 1] == QueryConstants.DESC_SEPARATOR_BYTE ? 1 : 0); - } + public int getOffsetWithNullTypeAndOrderInfo(byte[] keyBuffer, int keyOffset) { + // Use encoded offsets to navigate through row key buffer + for (int offset : offsets) { + if (offset >= 0) { // If offset is non negative, it's a byte offset + keyOffset += offset; + } else { // Else, a negative offset is the number of variable length values to skip + while (offset++ < 0) { + // FIXME: keyOffset < keyBuffer.length required because HBase passes bogus keys to filter + // to position scan (HBASE-6562) + while (keyOffset < keyBuffer.length && !isSeparatorByte(keyBuffer[keyOffset++])) { + } } - int offset = keyOffset; - if (type == PVarbinaryEncoded.INSTANCE) { - while (offset < maxOffset && !SchemaUtil.areSeparatorBytesForVarBinaryEncoded(keyBuffer, - offset, sortOrder)) { - offset++; - } - } else { - while (offset < maxOffset && !isSeparatorByte(keyBuffer[offset])) { - offset++; - } + } + } + return keyOffset; + } + + /** + * Calculate the length of the PK column value + * @param keyBuffer the byte array of the row key + * @param keyOffset the offset in the byte array of where the key begins + * @param maxOffset maximum offset to use while calculating length + * @param type the data type of the column. + * @param sortOrder sort order. + * @return the length of the PK column value + */ + public int getLength(byte[] keyBuffer, int keyOffset, int maxOffset, PDataType type, + SortOrder sortOrder) { + if (!hasSeparator) { + if (type == PVarbinaryEncoded.INSTANCE) { + if (sortOrder == null || sortOrder == SortOrder.ASC) { + return maxOffset - keyOffset; + } else if (sortOrder == SortOrder.DESC) { + return maxOffset - keyOffset - 2; } - return offset - keyOffset; + } else { + return maxOffset - keyOffset + - (keyBuffer[maxOffset - 1] == QueryConstants.DESC_SEPARATOR_BYTE ? 1 : 0); + } + } + int offset = keyOffset; + if (type == PVarbinaryEncoded.INSTANCE) { + while ( + offset < maxOffset + && !SchemaUtil.areSeparatorBytesForVarBinaryEncoded(keyBuffer, offset, sortOrder) + ) { + offset++; + } + } else { + while (offset < maxOffset && !isSeparatorByte(keyBuffer[offset])) { + offset++; + } } + return offset - keyOffset; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetInternalErrorException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetInternalErrorException.java index 5e2622c9b55..70a06049692 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetInternalErrorException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetInternalErrorException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,16 +23,15 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * * Exception thrown when a RVC Offset is not coercible to a PK or index of a table - * */ public class RowValueConstructorOffsetInternalErrorException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.ROW_VALUE_CONSTRUCTOR_OFFSET_INTERNAL_ERROR; - final private static String BASE_MESSAGE = new SQLExceptionInfo.Builder(code).build().toString(); + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = + SQLExceptionCode.ROW_VALUE_CONSTRUCTOR_OFFSET_INTERNAL_ERROR; + final private static String BASE_MESSAGE = new SQLExceptionInfo.Builder(code).build().toString(); - public RowValueConstructorOffsetInternalErrorException(String additionalInfo) { - super(BASE_MESSAGE + " " + additionalInfo, code.getSQLState(), code.getErrorCode()); - } + public RowValueConstructorOffsetInternalErrorException(String additionalInfo) { + super(BASE_MESSAGE + " " + additionalInfo, code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetNotAllowedInQueryException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetNotAllowedInQueryException.java index 1ab02857885..e5941ef812b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetNotAllowedInQueryException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetNotAllowedInQueryException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,16 +23,15 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * * Exception thrown when a RVC Offset is not coercible to a PK or index of a table - * */ public class RowValueConstructorOffsetNotAllowedInQueryException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.ROW_VALUE_CONSTRUCTOR_OFFSET_NOT_ALLOWED_IN_QUERY; - final private static String BASE_MESSAGE = new SQLExceptionInfo.Builder(code).build().toString(); + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = + SQLExceptionCode.ROW_VALUE_CONSTRUCTOR_OFFSET_NOT_ALLOWED_IN_QUERY; + final private static String BASE_MESSAGE = new SQLExceptionInfo.Builder(code).build().toString(); - public RowValueConstructorOffsetNotAllowedInQueryException(String additionalInfo) { - super(BASE_MESSAGE + " " + additionalInfo, code.getSQLState(), code.getErrorCode()); - } + public RowValueConstructorOffsetNotAllowedInQueryException(String additionalInfo) { + super(BASE_MESSAGE + " " + additionalInfo, code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetNotCoercibleException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetNotCoercibleException.java index 4183d0cbe18..d13892c742b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetNotCoercibleException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/RowValueConstructorOffsetNotCoercibleException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,16 +23,15 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * * Exception thrown when a RVC Offset is not coercible to a PK or index of a table - * */ public class RowValueConstructorOffsetNotCoercibleException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.ROW_VALUE_CONSTRUCTOR_OFFSET_NOT_COERCIBLE; - final private static String BASE_MESSAGE = new SQLExceptionInfo.Builder(code).build().toString(); + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = + SQLExceptionCode.ROW_VALUE_CONSTRUCTOR_OFFSET_NOT_COERCIBLE; + final private static String BASE_MESSAGE = new SQLExceptionInfo.Builder(code).build().toString(); - public RowValueConstructorOffsetNotCoercibleException(String additionalInfo) { - super(BASE_MESSAGE + " " + additionalInfo, code.getSQLState(), code.getErrorCode()); - } + public RowValueConstructorOffsetNotCoercibleException(String additionalInfo) { + super(BASE_MESSAGE + " " + additionalInfo, code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SaltingUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SaltingUtil.java index cfa8fe2bfe9..dcf9824991a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SaltingUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SaltingUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,101 +23,99 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.query.KeyRange; -import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder; import org.apache.phoenix.schema.types.PBinary; -import org.apache.phoenix.util.SchemaUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * Utility methods related to transparent salting of row keys. */ public class SaltingUtil { - public static final int NUM_SALTING_BYTES = 1; - public static final Integer MAX_BUCKET_NUM = 256; // Unsigned byte. - public static final String SALTING_COLUMN_NAME = "_SALT"; - public static final String SALTED_ROW_KEY_NAME = "_SALTED_KEY"; - public static final PColumnImpl SALTING_COLUMN = new PColumnImpl( - PNameFactory.newName(SALTING_COLUMN_NAME), null, PBinary.INSTANCE, 1, 0, false, 0, SortOrder.getDefault(), 0, null, false, null, false, false, null, - HConstants.LATEST_TIMESTAMP); + public static final int NUM_SALTING_BYTES = 1; + public static final Integer MAX_BUCKET_NUM = 256; // Unsigned byte. + public static final String SALTING_COLUMN_NAME = "_SALT"; + public static final String SALTED_ROW_KEY_NAME = "_SALTED_KEY"; + public static final PColumnImpl SALTING_COLUMN = new PColumnImpl( + PNameFactory.newName(SALTING_COLUMN_NAME), null, PBinary.INSTANCE, 1, 0, false, 0, + SortOrder.getDefault(), 0, null, false, null, false, false, null, HConstants.LATEST_TIMESTAMP); - public static List generateAllSaltingRanges(int bucketNum) { - List allRanges = Lists.newArrayListWithExpectedSize(bucketNum); - for (int i=0; i generateAllSaltingRanges(int bucketNum) { + List allRanges = Lists.newArrayListWithExpectedSize(bucketNum); + for (int i = 0; i < bucketNum; i++) { + byte[] saltByte = new byte[] { (byte) i }; + allRanges.add( + SALTING_COLUMN.getDataType().getKeyRange(saltByte, true, saltByte, true, SortOrder.ASC)); } + return allRanges; + } - public static byte[][] getSalteByteSplitPoints(int saltBucketNum) { - byte[][] splits = new byte[saltBucketNum-1][]; - for (int i = 1; i < saltBucketNum; i++) { - splits[i-1] = new byte[] {(byte) i}; - } - return splits; + public static byte[][] getSalteByteSplitPoints(int saltBucketNum) { + byte[][] splits = new byte[saltBucketNum - 1][]; + for (int i = 1; i < saltBucketNum; i++) { + splits[i - 1] = new byte[] { (byte) i }; } + return splits; + } - // Compute the hash of the key value stored in key and set its first byte as the value. The - // first byte of key should be left empty as a place holder for the salting byte. - public static byte[] getSaltedKey(ImmutableBytesWritable key, int bucketNum) { - byte[] keyBytes = new byte[key.getLength()]; - byte saltByte = getSaltingByte(key.get(), key.getOffset() + 1, key.getLength() - 1, bucketNum); - keyBytes[0] = saltByte; - System.arraycopy(key.get(), key.getOffset() + 1, keyBytes, 1, key.getLength() - 1); - return keyBytes; - } + // Compute the hash of the key value stored in key and set its first byte as the value. The + // first byte of key should be left empty as a place holder for the salting byte. + public static byte[] getSaltedKey(ImmutableBytesWritable key, int bucketNum) { + byte[] keyBytes = new byte[key.getLength()]; + byte saltByte = getSaltingByte(key.get(), key.getOffset() + 1, key.getLength() - 1, bucketNum); + keyBytes[0] = saltByte; + System.arraycopy(key.get(), key.getOffset() + 1, keyBytes, 1, key.getLength() - 1); + return keyBytes; + } - // Generate the bucket byte given a byte array and the number of buckets. - public static byte getSaltingByte(byte[] value, int offset, int length, int bucketNum) { - int hash = calculateHashCode(value, offset, length); - return (byte) Math.abs(hash % bucketNum); - } + // Generate the bucket byte given a byte array and the number of buckets. + public static byte getSaltingByte(byte[] value, int offset, int length, int bucketNum) { + int hash = calculateHashCode(value, offset, length); + return (byte) Math.abs(hash % bucketNum); + } - private static int calculateHashCode(byte a[], int offset, int length) { - if (a == null) - return 0; - int result = 1; - for (int i = offset; i < offset + length; i++) { - result = 31 * result + a[i]; - } - return result; + private static int calculateHashCode(byte a[], int offset, int length) { + if (a == null) return 0; + int result = 1; + for (int i = offset; i < offset + length; i++) { + result = 31 * result + a[i]; } + return result; + } - public static KeyRange addSaltByte(byte[] startKey, KeyRange minMaxRange) { - byte saltByte = startKey.length == 0 ? 0 : startKey[0]; - byte[] lowerRange = minMaxRange.getLowerRange(); - if(!minMaxRange.lowerUnbound()) { - byte[] newLowerRange = new byte[lowerRange.length + 1]; - newLowerRange[0] = saltByte; - System.arraycopy(lowerRange, 0, newLowerRange, 1, lowerRange.length); - lowerRange = newLowerRange; - } - byte[] upperRange = minMaxRange.getUpperRange(); + public static KeyRange addSaltByte(byte[] startKey, KeyRange minMaxRange) { + byte saltByte = startKey.length == 0 ? 0 : startKey[0]; + byte[] lowerRange = minMaxRange.getLowerRange(); + if (!minMaxRange.lowerUnbound()) { + byte[] newLowerRange = new byte[lowerRange.length + 1]; + newLowerRange[0] = saltByte; + System.arraycopy(lowerRange, 0, newLowerRange, 1, lowerRange.length); + lowerRange = newLowerRange; + } + byte[] upperRange = minMaxRange.getUpperRange(); - if(!minMaxRange.upperUnbound()) { - byte[] newUpperRange = new byte[upperRange.length + 1]; - newUpperRange[0] = saltByte; - System.arraycopy(upperRange, 0, newUpperRange, 1, upperRange.length); - upperRange = newUpperRange; - } - return KeyRange.getKeyRange(lowerRange, upperRange); + if (!minMaxRange.upperUnbound()) { + byte[] newUpperRange = new byte[upperRange.length + 1]; + newUpperRange[0] = saltByte; + System.arraycopy(upperRange, 0, newUpperRange, 1, upperRange.length); + upperRange = newUpperRange; } + return KeyRange.getKeyRange(lowerRange, upperRange); + } - public static void addRegionStartKeyToScanStartAndStopRows(byte[] startKey, byte[] endKey, Scan scan) { - if (startKey.length == 0 && endKey.length == 0) return; - byte[] prefixBytes = startKey.length != 0 ? startKey : new byte[endKey.length]; - byte[] newStartRow = new byte[scan.getStartRow().length + prefixBytes.length]; - System.arraycopy(prefixBytes, 0, newStartRow, 0, prefixBytes.length); - System.arraycopy(scan.getStartRow(), 0, newStartRow, prefixBytes.length, scan.getStartRow().length); - scan.withStartRow(newStartRow); - if (scan.getStopRow().length != 0) { - byte[] newStopRow = new byte[scan.getStopRow().length + prefixBytes.length]; - System.arraycopy(prefixBytes, 0, newStopRow, 0, prefixBytes.length); - System.arraycopy(scan.getStopRow(), 0, newStopRow, prefixBytes.length, scan.getStopRow().length); - scan.withStopRow(newStopRow); - } + public static void addRegionStartKeyToScanStartAndStopRows(byte[] startKey, byte[] endKey, + Scan scan) { + if (startKey.length == 0 && endKey.length == 0) return; + byte[] prefixBytes = startKey.length != 0 ? startKey : new byte[endKey.length]; + byte[] newStartRow = new byte[scan.getStartRow().length + prefixBytes.length]; + System.arraycopy(prefixBytes, 0, newStartRow, 0, prefixBytes.length); + System.arraycopy(scan.getStartRow(), 0, newStartRow, prefixBytes.length, + scan.getStartRow().length); + scan.withStartRow(newStartRow); + if (scan.getStopRow().length != 0) { + byte[] newStopRow = new byte[scan.getStopRow().length + prefixBytes.length]; + System.arraycopy(prefixBytes, 0, newStopRow, 0, prefixBytes.length); + System.arraycopy(scan.getStopRow(), 0, newStopRow, prefixBytes.length, + scan.getStopRow().length); + scan.withStopRow(newStopRow); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SchemaAlreadyExistsException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SchemaAlreadyExistsException.java index 2fc5f78606b..94e3765bf9e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SchemaAlreadyExistsException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SchemaAlreadyExistsException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,32 +22,28 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; - /** - * * Exception thrown when a schema name already exists - * - * * @since 0.1 */ public class SchemaAlreadyExistsException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.SCHEMA_ALREADY_EXISTS; - private final String schemaName; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.SCHEMA_ALREADY_EXISTS; + private final String schemaName; - public SchemaAlreadyExistsException(String schemaName) { - this(schemaName, null); - } + public SchemaAlreadyExistsException(String schemaName) { + this(schemaName, null); + } - public SchemaAlreadyExistsException(String schemaName, String msg) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setMessage(msg).build().toString(), - code.getSQLState(), code.getErrorCode()); - this.schemaName = schemaName; + public SchemaAlreadyExistsException(String schemaName, String msg) { + super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setMessage(msg).build() + .toString(), code.getSQLState(), code.getErrorCode()); + this.schemaName = schemaName; - } + } - public String getSchemaName() { - return schemaName; - } + public String getSchemaName() { + return schemaName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SchemaNotFoundException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SchemaNotFoundException.java index 32541c1af6e..c309e27bbd9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SchemaNotFoundException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SchemaNotFoundException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,25 +22,25 @@ import org.apache.phoenix.exception.SQLExceptionInfo; public class SchemaNotFoundException extends MetaDataEntityNotFoundException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.SCHEMA_NOT_FOUND; - private final long timestamp; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.SCHEMA_NOT_FOUND; + private final long timestamp; - public SchemaNotFoundException(SchemaNotFoundException e, long timestamp) { - this(e.getSchemaName(), timestamp); - } + public SchemaNotFoundException(SchemaNotFoundException e, long timestamp) { + this(e.getSchemaName(), timestamp); + } - public SchemaNotFoundException(String schemaName) { - this(schemaName, HConstants.LATEST_TIMESTAMP); - } + public SchemaNotFoundException(String schemaName) { + this(schemaName, HConstants.LATEST_TIMESTAMP); + } - public SchemaNotFoundException(String schemaName, long timestamp) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).build().toString(), code.getSQLState(), - code.getErrorCode(), schemaName, null, null); - this.timestamp = timestamp; - } + public SchemaNotFoundException(String schemaName, long timestamp) { + super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).build().toString(), + code.getSQLState(), code.getErrorCode(), schemaName, null, null); + this.timestamp = timestamp; + } - public long getTimeStamp() { - return timestamp; - } + public long getTimeStamp() { + return timestamp; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/Sequence.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/Sequence.java index a9eff9bc3ad..4de58223aeb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/Sequence.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/Sequence.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CACHE_SIZE_BYTES; @@ -53,587 +52,627 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.math.LongMath; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.SequenceUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.math.LongMath; - public class Sequence { - public static final int SUCCESS = 0; - - public enum ValueOp { - VALIDATE_SEQUENCE, // Check that the sequence statements are valid, during statement compilation - INCREMENT_SEQUENCE, // Perform the seqence operations, during execution - NOOP // Do not do anything, for compiling unbound prepared statements - }; - public enum MetaOp {CREATE_SEQUENCE, DROP_SEQUENCE, RETURN_SEQUENCE}; - - // create empty Sequence key values used while created a sequence row - private static final Cell CURRENT_VALUE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CURRENT_VALUE_BYTES); - private static final Cell INCREMENT_BY_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, INCREMENT_BY_BYTES); - private static final Cell CACHE_SIZE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CACHE_SIZE_BYTES); - private static final Cell MIN_VALUE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, MIN_VALUE_BYTES); - private static final Cell MAX_VALUE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, MAX_VALUE_BYTES); - private static final Cell CYCLE_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CYCLE_FLAG_BYTES); - private static final Cell LIMIT_REACHED_KV = org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, LIMIT_REACHED_FLAG_BYTES); - private static final List SEQUENCE_KV_COLUMNS = Arrays.asList( - CURRENT_VALUE_KV, - INCREMENT_BY_KV, - CACHE_SIZE_KV, - // The following three columns were added in 3.1/4.1 - MIN_VALUE_KV, - MAX_VALUE_KV, - CYCLE_KV, - LIMIT_REACHED_KV - ); - static { - Collections.sort(SEQUENCE_KV_COLUMNS, CellComparatorImpl.COMPARATOR); - } - // Pre-compute index of sequence key values to prevent binary search - private static final int CURRENT_VALUE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(CURRENT_VALUE_KV); - private static final int INCREMENT_BY_INDEX = SEQUENCE_KV_COLUMNS.indexOf(INCREMENT_BY_KV); - private static final int CACHE_SIZE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(CACHE_SIZE_KV); - private static final int MIN_VALUE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(MIN_VALUE_KV); - private static final int MAX_VALUE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(MAX_VALUE_KV); - private static final int CYCLE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(CYCLE_KV); - private static final int LIMIT_REACHED_INDEX = SEQUENCE_KV_COLUMNS.indexOf(LIMIT_REACHED_KV); - - public static final int NUM_SEQUENCE_KEY_VALUES = SEQUENCE_KV_COLUMNS.size(); - private static final EmptySequenceCacheException EMPTY_SEQUENCE_CACHE_EXCEPTION = new EmptySequenceCacheException(); - - private final SequenceKey key; - private final ReentrantLock lock; - private List values; - - public Sequence(SequenceKey key) { - if (key == null) throw new NullPointerException(); - this.key = key; - this.lock = new ReentrantLock(); - } - - private void insertSequenceValue(SequenceValue value) { - if (values == null) { - values = Lists.newArrayListWithExpectedSize(1); - values.add(value); - } else { - int i = values.size()-1; - while (i >= 0 && values.get(i).timestamp > value.timestamp) { - i--; - } - // Don't insert another value if there's one at the same timestamp that is a delete - if (i >= 0 && values.get(i).timestamp == value.timestamp) { - if (values.get(i).isDeleted) { - throw new IllegalStateException("Unexpected delete marker at timestamp " + value.timestamp + " for "+ key); - } - values.set(i, value); - } else { - values.add(i+1, value); - } - } + public static final int SUCCESS = 0; + + public enum ValueOp { + VALIDATE_SEQUENCE, // Check that the sequence statements are valid, during statement compilation + INCREMENT_SEQUENCE, // Perform the seqence operations, during execution + NOOP // Do not do anything, for compiling unbound prepared statements + }; + + public enum MetaOp { + CREATE_SEQUENCE, + DROP_SEQUENCE, + RETURN_SEQUENCE + }; + + // create empty Sequence key values used while created a sequence row + private static final Cell CURRENT_VALUE_KV = org.apache.hadoop.hbase.KeyValueUtil + .createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CURRENT_VALUE_BYTES); + private static final Cell INCREMENT_BY_KV = org.apache.hadoop.hbase.KeyValueUtil + .createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, INCREMENT_BY_BYTES); + private static final Cell CACHE_SIZE_KV = org.apache.hadoop.hbase.KeyValueUtil + .createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CACHE_SIZE_BYTES); + private static final Cell MIN_VALUE_KV = org.apache.hadoop.hbase.KeyValueUtil + .createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, MIN_VALUE_BYTES); + private static final Cell MAX_VALUE_KV = org.apache.hadoop.hbase.KeyValueUtil + .createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, MAX_VALUE_BYTES); + private static final Cell CYCLE_KV = org.apache.hadoop.hbase.KeyValueUtil + .createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, SYSTEM_SEQUENCE_FAMILY_BYTES, CYCLE_FLAG_BYTES); + private static final Cell LIMIT_REACHED_KV = + org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, + SYSTEM_SEQUENCE_FAMILY_BYTES, LIMIT_REACHED_FLAG_BYTES); + private static final List SEQUENCE_KV_COLUMNS = + Arrays. asList(CURRENT_VALUE_KV, INCREMENT_BY_KV, CACHE_SIZE_KV, + // The following three columns were added in 3.1/4.1 + MIN_VALUE_KV, MAX_VALUE_KV, CYCLE_KV, LIMIT_REACHED_KV); + static { + Collections.sort(SEQUENCE_KV_COLUMNS, CellComparatorImpl.COMPARATOR); + } + // Pre-compute index of sequence key values to prevent binary search + private static final int CURRENT_VALUE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(CURRENT_VALUE_KV); + private static final int INCREMENT_BY_INDEX = SEQUENCE_KV_COLUMNS.indexOf(INCREMENT_BY_KV); + private static final int CACHE_SIZE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(CACHE_SIZE_KV); + private static final int MIN_VALUE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(MIN_VALUE_KV); + private static final int MAX_VALUE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(MAX_VALUE_KV); + private static final int CYCLE_INDEX = SEQUENCE_KV_COLUMNS.indexOf(CYCLE_KV); + private static final int LIMIT_REACHED_INDEX = SEQUENCE_KV_COLUMNS.indexOf(LIMIT_REACHED_KV); + + public static final int NUM_SEQUENCE_KEY_VALUES = SEQUENCE_KV_COLUMNS.size(); + private static final EmptySequenceCacheException EMPTY_SEQUENCE_CACHE_EXCEPTION = + new EmptySequenceCacheException(); + + private final SequenceKey key; + private final ReentrantLock lock; + private List values; + + public Sequence(SequenceKey key) { + if (key == null) throw new NullPointerException(); + this.key = key; + this.lock = new ReentrantLock(); + } + + private void insertSequenceValue(SequenceValue value) { + if (values == null) { + values = Lists.newArrayListWithExpectedSize(1); + values.add(value); + } else { + int i = values.size() - 1; + while (i >= 0 && values.get(i).timestamp > value.timestamp) { + i--; + } + // Don't insert another value if there's one at the same timestamp that is a delete + if (i >= 0 && values.get(i).timestamp == value.timestamp) { + if (values.get(i).isDeleted) { + throw new IllegalStateException( + "Unexpected delete marker at timestamp " + value.timestamp + " for " + key); + } + values.set(i, value); + } else { + values.add(i + 1, value); + } + } + } + + private SequenceValue findSequenceValue(long timestamp) { + if (values == null) { + return null; } - - private SequenceValue findSequenceValue(long timestamp) { - if (values == null) { - return null; - } - int i = values.size()-1; - while (i >= 0 && values.get(i).timestamp >= timestamp) { - i--; - } - if (i < 0) { - return null; - } - SequenceValue value = values.get(i); - return value.isDeleted ? null : value; - } - - private long increment(SequenceValue value, ValueOp op, long numToAllocate) throws SQLException { - boolean increasingSeq = value.incrementBy > 0 && op != ValueOp.VALIDATE_SEQUENCE; - // check if the the sequence has already reached the min/max limit - if (value.limitReached && op != ValueOp.VALIDATE_SEQUENCE) { - if (value.cycle) { - value.limitReached=false; - throw EMPTY_SEQUENCE_CACHE_EXCEPTION; - } else { - SQLExceptionCode code = - increasingSeq ? SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE - : SQLExceptionCode.SEQUENCE_VAL_REACHED_MIN_VALUE; - throw SequenceUtil.getException(this.key.getSchemaName(), - this.key.getSequenceName(), code); - } - } - - long returnValue = value.currentValue; - if (op == ValueOp.INCREMENT_SEQUENCE) { - boolean overflowOrUnderflow=false; - // advance currentValue while checking for overflow - try { - // advance by numToAllocate * the increment amount - value.currentValue = LongMath.checkedAdd(value.currentValue, numToAllocate * value.incrementBy); - } catch (ArithmeticException e) { - overflowOrUnderflow = true; - } - - // set the limitReached flag (which will be checked the next time increment is called) - // if overflow or limit was reached - if (overflowOrUnderflow || (increasingSeq && value.currentValue > value.maxValue) - || (!increasingSeq && value.currentValue < value.minValue)) { - value.limitReached=true; - } - } - return returnValue; + int i = values.size() - 1; + while (i >= 0 && values.get(i).timestamp >= timestamp) { + i--; } + if (i < 0) { + return null; + } + SequenceValue value = values.get(i); + return value.isDeleted ? null : value; + } - public long incrementValue(long timestamp, ValueOp op, long numToAllocate) throws SQLException { - SequenceValue value = findSequenceValue(timestamp); - if (value == null) { - throw EMPTY_SEQUENCE_CACHE_EXCEPTION; - } - - if (isSequenceCacheExhausted(numToAllocate, value)) { - if (op == ValueOp.VALIDATE_SEQUENCE) { - return value.currentValue; - } - throw EMPTY_SEQUENCE_CACHE_EXCEPTION; - } - return increment(value, op, numToAllocate); - } - - /** - * This method first checks whether value.currentValue = value.nextValue, this check is what - * determines whether we need to refresh the cache when evaluating NEXT VALUE FOR. Once - * current value reaches the next value we know the cache is exhausted as we give sequence - * values out one at time. - * - * However for bulk allocations, evaluated by NEXT VALUE FOR, we need a different check - * @see isSequenceCacheExhaustedForBulkAllocation - * - * Using the bulk allocation method for determining if the cache is exhausted for both cases - * works in most of the cases, however when dealing with CYCLEs and overflow and underflow, things - * break down due to things like sign changes that can happen if we overflow from a positive to - * a negative number and vice versa. Therefore, leaving both checks in place. - * - */ - private boolean isSequenceCacheExhausted(final long numToAllocate, final SequenceValue value) throws SQLException { - return value.currentValue == value.nextValue || (SequenceUtil.isBulkAllocation(numToAllocate) && isSequenceCacheExhaustedForBulkAllocation(numToAllocate, value)); - } - - /** - * This method checks whether there are sufficient values in the SequenceValue - * cached on the client to allocate the requested number of slots. It handles - * decreasing and increasing sequences as well as any overflows or underflows - * encountered. - */ - private boolean isSequenceCacheExhaustedForBulkAllocation(final long numToAllocate, final SequenceValue value) throws SQLException { - long targetSequenceValue; - - performValidationForBulkAllocation(numToAllocate, value); - - try { - targetSequenceValue = LongMath.checkedAdd(value.currentValue, numToAllocate * value.incrementBy); - } catch (ArithmeticException e) { - // Perform a CheckedAdd to make sure if over/underflow - // We don't treat this as the cache being exhausted as the current value may be valid in the case - // of no cycle, logic in increment() will take care of detecting we've hit the limit of the sequence - return false; - } + private long increment(SequenceValue value, ValueOp op, long numToAllocate) throws SQLException { + boolean increasingSeq = value.incrementBy > 0 && op != ValueOp.VALIDATE_SEQUENCE; + // check if the the sequence has already reached the min/max limit + if (value.limitReached && op != ValueOp.VALIDATE_SEQUENCE) { + if (value.cycle) { + value.limitReached = false; + throw EMPTY_SEQUENCE_CACHE_EXCEPTION; + } else { + SQLExceptionCode code = increasingSeq + ? SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE + : SQLExceptionCode.SEQUENCE_VAL_REACHED_MIN_VALUE; + throw SequenceUtil.getException(this.key.getSchemaName(), this.key.getSequenceName(), code); + } + } - if (value.incrementBy > 0) { - return targetSequenceValue > value.nextValue; - } else { - return targetSequenceValue < value.nextValue; - } + long returnValue = value.currentValue; + if (op == ValueOp.INCREMENT_SEQUENCE) { + boolean overflowOrUnderflow = false; + // advance currentValue while checking for overflow + try { + // advance by numToAllocate * the increment amount + value.currentValue = + LongMath.checkedAdd(value.currentValue, numToAllocate * value.incrementBy); + } catch (ArithmeticException e) { + overflowOrUnderflow = true; + } + + // set the limitReached flag (which will be checked the next time increment is called) + // if overflow or limit was reached + if ( + overflowOrUnderflow || (increasingSeq && value.currentValue > value.maxValue) + || (!increasingSeq && value.currentValue < value.minValue) + ) { + value.limitReached = true; + } + } + return returnValue; + } + + public long incrementValue(long timestamp, ValueOp op, long numToAllocate) throws SQLException { + SequenceValue value = findSequenceValue(timestamp); + if (value == null) { + throw EMPTY_SEQUENCE_CACHE_EXCEPTION; } - - /** - * @throws SQLException with the correct error code if sequence limit is reached with - * this request for allocation or we attempt to perform a bulk allocation on a sequence - * with cycles. - */ - private void performValidationForBulkAllocation(final long numToAllocate, final SequenceValue value) - throws SQLException { - boolean increasingSeq = value.incrementBy > 0 ? true : false; - - // We don't support Bulk Allocations on sequences that have the CYCLE flag set to true - // Check for this here so we fail on expression evaluation and don't allow corner case - // whereby a client requests less than cached number of slots on sequence with cycle to succeed - if (value.cycle && !SequenceUtil.isCycleAllowed(numToAllocate)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED) - .setSchemaName(key.getSchemaName()) - .setTableName(key.getSequenceName()) - .build().buildException(); - } - - if (SequenceUtil.checkIfLimitReached(value.currentValue, value.minValue, value.maxValue, value.incrementBy, value.cacheSize, numToAllocate)) { - throw new SQLExceptionInfo.Builder(SequenceUtil.getLimitReachedErrorCode(increasingSeq)) - .setSchemaName(key.getSchemaName()) - .setTableName(key.getSequenceName()) - .build().buildException(); - } + + if (isSequenceCacheExhausted(numToAllocate, value)) { + if (op == ValueOp.VALIDATE_SEQUENCE) { + return value.currentValue; + } + throw EMPTY_SEQUENCE_CACHE_EXCEPTION; + } + return increment(value, op, numToAllocate); + } + + /** + * This method first checks whether value.currentValue = value.nextValue, this check is what + * determines whether we need to refresh the cache when evaluating NEXT VALUE FOR. Once current + * value reaches the next value we know the cache is exhausted as we give sequence values out one + * at time. However for bulk allocations, evaluated by NEXT VALUE FOR, we need a different + * check + * @see isSequenceCacheExhaustedForBulkAllocation Using the bulk allocation method for determining + * if the cache is exhausted for both cases works in most of the cases, however when dealing + * with CYCLEs and overflow and underflow, things break down due to things like sign changes + * that can happen if we overflow from a positive to a negative number and vice versa. + * Therefore, leaving both checks in place. + */ + private boolean isSequenceCacheExhausted(final long numToAllocate, final SequenceValue value) + throws SQLException { + return value.currentValue == value.nextValue || (SequenceUtil.isBulkAllocation(numToAllocate) + && isSequenceCacheExhaustedForBulkAllocation(numToAllocate, value)); + } + + /** + * This method checks whether there are sufficient values in the SequenceValue cached on the + * client to allocate the requested number of slots. It handles decreasing and increasing + * sequences as well as any overflows or underflows encountered. + */ + private boolean isSequenceCacheExhaustedForBulkAllocation(final long numToAllocate, + final SequenceValue value) throws SQLException { + long targetSequenceValue; + + performValidationForBulkAllocation(numToAllocate, value); + + try { + targetSequenceValue = + LongMath.checkedAdd(value.currentValue, numToAllocate * value.incrementBy); + } catch (ArithmeticException e) { + // Perform a CheckedAdd to make sure if over/underflow + // We don't treat this as the cache being exhausted as the current value may be valid in the + // case + // of no cycle, logic in increment() will take care of detecting we've hit the limit of the + // sequence + return false; } - public List newReturns() { - if (values == null) { - return Collections.emptyList(); - } - List appends = Lists.newArrayListWithExpectedSize(values.size()); - for (SequenceValue value : values) { - if (value.isInitialized() && value.currentValue != value.nextValue) { - appends.add(newReturn(value)); - } - } - return appends; + if (value.incrementBy > 0) { + return targetSequenceValue > value.nextValue; + } else { + return targetSequenceValue < value.nextValue; } - - public Append newReturn(long timestamp) throws EmptySequenceCacheException { - SequenceValue value = findSequenceValue(timestamp); - if (value == null) { - throw EMPTY_SEQUENCE_CACHE_EXCEPTION; - } - if (value.currentValue == value.nextValue) { - throw EMPTY_SEQUENCE_CACHE_EXCEPTION; - } - return newReturn(value); - } - - private Append newReturn(SequenceValue value) { - byte[] key = this.key.getKey(); - Append append = new Append(key); - byte[] opBuf = new byte[] {(byte)MetaOp.RETURN_SEQUENCE.ordinal()}; - append.setAttribute(SequenceRegionObserverConstants.OPERATION_ATTRIB, opBuf); - append.setAttribute(SequenceRegionObserverConstants.CURRENT_VALUE_ATTRIB, PLong.INSTANCE.toBytes(value.nextValue)); - Map> familyMap = append.getFamilyCellMap(); - familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.asList( - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, value.timestamp, PLong.INSTANCE.toBytes(value.currentValue)), - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, value.timestamp, PBoolean.INSTANCE.toBytes(value.limitReached)) - )); - return append; - } - - public long currentValue(long timestamp) throws EmptySequenceCacheException { - SequenceValue value = findSequenceValue(timestamp); - if (value == null || value.isUnitialized()) { - throw EMPTY_SEQUENCE_CACHE_EXCEPTION; - } - return value.currentValue - value.incrementBy; - } - - public ReentrantLock getLock() { - return lock; - } - - public SequenceKey getKey() { - return key; - } - - public long incrementValue(Result result, ValueOp op, long numToAllocate) throws SQLException { - // In this case, we don't definitely know the timestamp of the deleted sequence, - // but we know anything older is likely deleted. Worse case, we remove a sequence - // from the cache that we shouldn't have which will cause a gap in sequence values. - // In that case, we might get an error that a curr value was done on a sequence - // before a next val was. Not sure how to prevent that. - if (result.rawCells().length == 1) { - Cell errorKV = result.rawCells()[0]; - int errorCode = PInteger.INSTANCE.getCodec().decodeInt(errorKV.getValueArray(), errorKV.getValueOffset(), SortOrder.getDefault()); - SQLExceptionCode code = SQLExceptionCode.fromErrorCode(errorCode); - // TODO: We could have the server return the timestamps of the - // delete markers and we could insert them here, but this seems - // like overkill. - // if (code == SQLExceptionCode.SEQUENCE_UNDEFINED) { - // } - throw new SQLExceptionInfo.Builder(code) - .setSchemaName(key.getSchemaName()) - .setTableName(key.getSequenceName()) - .build().buildException(); - } - // If we found the sequence, we update our cache with the new value - SequenceValue value = new SequenceValue(result, op, numToAllocate); - insertSequenceValue(value); - return increment(value, op, numToAllocate); - } - - - public Increment newIncrement(long timestamp, Sequence.ValueOp action, long numToAllocate) { - byte[] incKey = key.getKey(); - byte[] incValue = Bytes.toBytes((long)action.ordinal()); - Increment inc = new Increment(incKey); - // It doesn't matter what we set the amount too - we always use the values we get - // from the Get we do to prevent any race conditions. All columns that get added - // are returned with their current value - try { - inc.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); - inc.setAttribute(SequenceRegionObserverConstants.NUM_TO_ALLOCATE, Bytes.toBytes(numToAllocate)); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } - for (Cell kv : SEQUENCE_KV_COLUMNS) { - try { - // Store the timestamp on the cell as well as HBase 1.2 seems to not - // be serializing over the time range (see HBASE-15698). - Cell cell = new KeyValue(incKey, 0, incKey.length, - kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), - kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), - timestamp, - KeyValue.Type.Put, - incValue, 0, incValue.length); - inc.add(cell); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } - } - return inc; - } - - /** - * Returns a KeyValue from the input result row - * @param kv an empty KeyValue used only to get the column family and column qualifier of the - * key value to be returned (if the sequence row is from a previous version) - * @param cellIndex index of the KeyValue to be returned (if the sequence row is from a previous version - * @return KeyValue - */ - private static Cell getKeyValue(Result r, Cell kv, int cellIndex) { - Cell[] cells = r.rawCells(); - // if the sequence row is from a previous version then MIN_VALUE, MAX_VALUE, CYCLE and LIMIT_REACHED key values are not present, - // the sequence row has only three columns (INCREMENT_BY, CACHE_SIZE and CURRENT_VALUE) and the order of the cells - // in the array returned by rawCells() is not what what we expect so use getColumnLatestCell() to get the cell we want - return cells.length == NUM_SEQUENCE_KEY_VALUES ? cells[cellIndex] : - r.getColumnLatestCell(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); - } - - private static Cell getKeyValue(Result r, Cell kv) { - return getKeyValue(r, kv, SEQUENCE_KV_COLUMNS.indexOf(kv)); - } - - public static Cell getCurrentValueKV(Result r) { - return getKeyValue(r, CURRENT_VALUE_KV, CURRENT_VALUE_INDEX); - } - - public static Cell getIncrementByKV(Result r) { - return getKeyValue(r, INCREMENT_BY_KV, INCREMENT_BY_INDEX); - } - - public static Cell getCacheSizeKV(Result r) { - return getKeyValue(r, CACHE_SIZE_KV, CACHE_SIZE_INDEX); - } - - public static Cell getMinValueKV(Result r) { - return getKeyValue(r, MIN_VALUE_KV, MIN_VALUE_INDEX); - } - - public static Cell getMaxValueKV(Result r) { - return getKeyValue(r, MAX_VALUE_KV, MAX_VALUE_INDEX); - } - - public static Cell getCycleKV(Result r) { - return getKeyValue(r, CYCLE_KV, CYCLE_INDEX); - } - - public static Cell getLimitReachedKV(Result r) { - return getKeyValue(r, LIMIT_REACHED_KV, LIMIT_REACHED_INDEX); - } - - public static void replaceCurrentValueKV(List kvs, Cell currentValueKV) { - kvs.set(CURRENT_VALUE_INDEX, currentValueKV); - } - - public static void replaceMinValueKV(List kvs, Cell minValueKV) { - kvs.set(MIN_VALUE_INDEX, minValueKV); - } - - public static void replaceMaxValueKV(List kvs, Cell maxValueKV) { - kvs.set(MAX_VALUE_INDEX, maxValueKV); - } - - public static void replaceCycleValueKV(List kvs, Cell cycleValueKV) { - kvs.set(CYCLE_INDEX, cycleValueKV); - } - public static void replaceLimitReachedKV(List kvs, Cell limitReachedKV) { - kvs.set(LIMIT_REACHED_INDEX, limitReachedKV); - } - - /** - * Returns the KeyValues of r if it contains the expected number of KeyValues, - * else returns a list of KeyValues corresponding to SEQUENCE_KV_COLUMNS - */ - public static List getCells(Result r, int numKVs) { - // if the sequence row is from a previous version - if (r.rawCells().length == numKVs ) - return Lists.newArrayList(r.rawCells()); - // else we need to handle missing MIN_VALUE, MAX_VALUE, CYCLE and LIMIT_REACHED KeyValues - List cellList = Lists.newArrayListWithCapacity(NUM_SEQUENCE_KEY_VALUES); - for (Cell kv : SEQUENCE_KV_COLUMNS) { - cellList.add(getKeyValue(r,kv)); - } - return cellList; - } - - private static final class SequenceValue { - public final long incrementBy; - public final long timestamp; - public final long cacheSize; - - public long currentValue; - public long nextValue; - public long minValue; - public long maxValue; - public boolean cycle; - public boolean isDeleted; - public boolean limitReached; - - public SequenceValue(long timestamp, long minValue, long maxValue, boolean cycle) { - this(timestamp, false); - this.minValue = minValue; - this.maxValue = maxValue; - this.cycle = cycle; - } - - public SequenceValue(long timestamp, boolean isDeleted) { - this.timestamp = timestamp; - this.isDeleted = isDeleted; - this.incrementBy = 0; - this.limitReached = false; - this.cacheSize = 0; - } - - public boolean isInitialized() { - return this.incrementBy != 0; - } - - public boolean isUnitialized() { - return this.incrementBy == 0; - } - - public SequenceValue(Result r, ValueOp op, long numToAllocate) { - Cell currentValueKV = getCurrentValueKV(r); - Cell incrementByKV = getIncrementByKV(r); - Cell cacheSizeKV = getCacheSizeKV(r); - Cell minValueKV = getMinValueKV(r); - Cell maxValueKV = getMaxValueKV(r); - Cell cycleKV = getCycleKV(r); - this.timestamp = currentValueKV.getTimestamp(); - this.nextValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault()); - this.incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), incrementByKV.getValueOffset(), SortOrder.getDefault()); - this.cacheSize = PLong.INSTANCE.getCodec().decodeLong(cacheSizeKV.getValueArray(), cacheSizeKV.getValueOffset(), SortOrder.getDefault()); - this.minValue = PLong.INSTANCE.getCodec().decodeLong(minValueKV.getValueArray(), minValueKV.getValueOffset(), SortOrder.getDefault()); - this.maxValue = PLong.INSTANCE.getCodec().decodeLong(maxValueKV.getValueArray(), maxValueKV.getValueOffset(), SortOrder.getDefault()); - this.cycle = (Boolean) PBoolean.INSTANCE.toObject(cycleKV.getValueArray(), cycleKV.getValueOffset(), cycleKV.getValueLength()); - this.limitReached = false; - currentValue = nextValue; - - if (op != ValueOp.VALIDATE_SEQUENCE) { - // We can't just take the max of numToAllocate and cacheSize - // We need to handle a valid edgecase where a client requests bulk allocation of - // a number of slots that are less than cache size of the sequence - currentValue -= incrementBy * (SequenceUtil.isBulkAllocation(numToAllocate) ? numToAllocate : cacheSize); - } - } + } + + /** + * @throws SQLException with the correct error code if sequence limit is reached with this request + * for allocation or we attempt to perform a bulk allocation on a sequence + * with cycles. + */ + private void performValidationForBulkAllocation(final long numToAllocate, + final SequenceValue value) throws SQLException { + boolean increasingSeq = value.incrementBy > 0 ? true : false; + + // We don't support Bulk Allocations on sequences that have the CYCLE flag set to true + // Check for this here so we fail on expression evaluation and don't allow corner case + // whereby a client requests less than cached number of slots on sequence with cycle to succeed + if (value.cycle && !SequenceUtil.isCycleAllowed(numToAllocate)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED) + .setSchemaName(key.getSchemaName()).setTableName(key.getSequenceName()).build() + .buildException(); } - public boolean returnValue(Result result) throws SQLException { - Cell statusKV = result.rawCells()[0]; - if (statusKV.getValueLength() == 0) { // No error, but unable to return sequence values - return false; - } - long timestamp = statusKV.getTimestamp(); - int statusCode = PInteger.INSTANCE.getCodec().decodeInt(statusKV.getValueArray(), statusKV.getValueOffset(), SortOrder.getDefault()); - if (statusCode == SUCCESS) { // Success - update nextValue down to currentValue - SequenceValue value = findSequenceValue(timestamp); - if (value == null) { - throw new EmptySequenceCacheException(key.getSchemaName(),key.getSequenceName()); - } - return true; - } - SQLExceptionCode code = SQLExceptionCode.fromErrorCode(statusCode); - // TODO: We could have the server return the timestamps of the - // delete markers and we could insert them here, but this seems - // like overkill. - // if (code == SQLExceptionCode.SEQUENCE_UNDEFINED) { - // } - throw new SQLExceptionInfo.Builder(code) - .setSchemaName(key.getSchemaName()) - .setTableName(key.getSequenceName()) - .build().buildException(); - } - - public Append createSequence(long startWith, long incrementBy, long cacheSize, long timestamp, long minValue, long maxValue, boolean cycle) { - byte[] key = this.key.getKey(); - Append append = new Append(key); - append.setAttribute(SequenceRegionObserverConstants.OPERATION_ATTRIB, new byte[] {(byte)MetaOp.CREATE_SEQUENCE.ordinal()}); - if (timestamp != HConstants.LATEST_TIMESTAMP) { - append.setAttribute(SequenceRegionObserverConstants.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp)); - } - Map> familyMap = append.getFamilyCellMap(); - byte[] startWithBuf = PLong.INSTANCE.toBytes(startWith); - familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.asList( - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY), - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf), - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf), - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PLong.INSTANCE.toBytes(incrementBy)), - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PLong.INSTANCE.toBytes(cacheSize)), - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(minValue)), - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(maxValue)), - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, timestamp, PBoolean.INSTANCE.toBytes(cycle)), - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, timestamp, PDataType.FALSE_BYTES) - )); - return append; - } - - public long createSequence(Result result, long minValue, long maxValue, boolean cycle) throws SQLException { - Cell statusKV = result.rawCells()[0]; - long timestamp = statusKV.getTimestamp(); - int statusCode = PInteger.INSTANCE.getCodec().decodeInt(statusKV.getValueArray(), statusKV.getValueOffset(), SortOrder.getDefault()); - if (statusCode == 0) { // Success - add sequence value and return timestamp - SequenceValue value = new SequenceValue(timestamp, minValue, maxValue, cycle); - insertSequenceValue(value); - return timestamp; - } - SQLExceptionCode code = SQLExceptionCode.fromErrorCode(statusCode); - throw new SQLExceptionInfo.Builder(code) - .setSchemaName(key.getSchemaName()) - .setTableName(key.getSequenceName()) - .build().buildException(); - } - - public Append dropSequence(long timestamp) { - byte[] key = this.key.getKey(); - Append append = new Append(key); - append.setAttribute(SequenceRegionObserverConstants.OPERATION_ATTRIB, new byte[] {(byte)MetaOp.DROP_SEQUENCE.ordinal()}); - if (timestamp != HConstants.LATEST_TIMESTAMP) { - append.setAttribute(SequenceRegionObserverConstants.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp)); - } - Map> familyMap = append.getFamilyCellMap(); - familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.asList( - PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY))); - return append; - } - - public long dropSequence(Result result) throws SQLException { - Cell statusKV = result.rawCells()[0]; - long timestamp = statusKV.getTimestamp(); - int statusCode = PInteger.INSTANCE.getCodec().decodeInt(statusKV.getValueArray(), statusKV.getValueOffset(), SortOrder.getDefault()); - SQLExceptionCode code = statusCode == 0 ? null : SQLExceptionCode.fromErrorCode(statusCode); - if (code == null) { - // Insert delete marker so that point-in-time sequences work - insertSequenceValue(new SequenceValue(timestamp, true)); - return timestamp; - } - // TODO: We could have the server return the timestamps of the - // delete markers and we could insert them here, but this seems - // like overkill. - // if (code == SQLExceptionCode.SEQUENCE_UNDEFINED) { - // } - throw new SQLExceptionInfo.Builder(code) - .setSchemaName(key.getSchemaName()) - .setTableName(key.getSequenceName()) - .build().buildException(); - } - - public static String getCreateTableStatement(String schema, int nSaltBuckets) { - if (nSaltBuckets <= 0) { - return schema; - } - return schema + "," + PhoenixDatabaseMetaData.SALT_BUCKETS + "=" + nSaltBuckets; + if ( + SequenceUtil.checkIfLimitReached(value.currentValue, value.minValue, value.maxValue, + value.incrementBy, value.cacheSize, numToAllocate) + ) { + throw new SQLExceptionInfo.Builder(SequenceUtil.getLimitReachedErrorCode(increasingSeq)) + .setSchemaName(key.getSchemaName()).setTableName(key.getSequenceName()).build() + .buildException(); + } + } + + public List newReturns() { + if (values == null) { + return Collections.emptyList(); + } + List appends = Lists.newArrayListWithExpectedSize(values.size()); + for (SequenceValue value : values) { + if (value.isInitialized() && value.currentValue != value.nextValue) { + appends.add(newReturn(value)); + } + } + return appends; + } + + public Append newReturn(long timestamp) throws EmptySequenceCacheException { + SequenceValue value = findSequenceValue(timestamp); + if (value == null) { + throw EMPTY_SEQUENCE_CACHE_EXCEPTION; + } + if (value.currentValue == value.nextValue) { + throw EMPTY_SEQUENCE_CACHE_EXCEPTION; + } + return newReturn(value); + } + + private Append newReturn(SequenceValue value) { + byte[] key = this.key.getKey(); + Append append = new Append(key); + byte[] opBuf = new byte[] { (byte) MetaOp.RETURN_SEQUENCE.ordinal() }; + append.setAttribute(SequenceRegionObserverConstants.OPERATION_ATTRIB, opBuf); + append.setAttribute(SequenceRegionObserverConstants.CURRENT_VALUE_ATTRIB, + PLong.INSTANCE.toBytes(value.nextValue)); + Map> familyMap = append.getFamilyCellMap(); + familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + Arrays. asList( + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, value.timestamp, + PLong.INSTANCE.toBytes(value.currentValue)), + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, value.timestamp, + PBoolean.INSTANCE.toBytes(value.limitReached)))); + return append; + } + + public long currentValue(long timestamp) throws EmptySequenceCacheException { + SequenceValue value = findSequenceValue(timestamp); + if (value == null || value.isUnitialized()) { + throw EMPTY_SEQUENCE_CACHE_EXCEPTION; + } + return value.currentValue - value.incrementBy; + } + + public ReentrantLock getLock() { + return lock; + } + + public SequenceKey getKey() { + return key; + } + + public long incrementValue(Result result, ValueOp op, long numToAllocate) throws SQLException { + // In this case, we don't definitely know the timestamp of the deleted sequence, + // but we know anything older is likely deleted. Worse case, we remove a sequence + // from the cache that we shouldn't have which will cause a gap in sequence values. + // In that case, we might get an error that a curr value was done on a sequence + // before a next val was. Not sure how to prevent that. + if (result.rawCells().length == 1) { + Cell errorKV = result.rawCells()[0]; + int errorCode = PInteger.INSTANCE.getCodec().decodeInt(errorKV.getValueArray(), + errorKV.getValueOffset(), SortOrder.getDefault()); + SQLExceptionCode code = SQLExceptionCode.fromErrorCode(errorCode); + // TODO: We could have the server return the timestamps of the + // delete markers and we could insert them here, but this seems + // like overkill. + // if (code == SQLExceptionCode.SEQUENCE_UNDEFINED) { + // } + throw new SQLExceptionInfo.Builder(code).setSchemaName(key.getSchemaName()) + .setTableName(key.getSequenceName()).build().buildException(); + } + // If we found the sequence, we update our cache with the new value + SequenceValue value = new SequenceValue(result, op, numToAllocate); + insertSequenceValue(value); + return increment(value, op, numToAllocate); + } + + public Increment newIncrement(long timestamp, Sequence.ValueOp action, long numToAllocate) { + byte[] incKey = key.getKey(); + byte[] incValue = Bytes.toBytes((long) action.ordinal()); + Increment inc = new Increment(incKey); + // It doesn't matter what we set the amount too - we always use the values we get + // from the Get we do to prevent any race conditions. All columns that get added + // are returned with their current value + try { + inc.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); + inc.setAttribute(SequenceRegionObserverConstants.NUM_TO_ALLOCATE, + Bytes.toBytes(numToAllocate)); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } + for (Cell kv : SEQUENCE_KV_COLUMNS) { + try { + // Store the timestamp on the cell as well as HBase 1.2 seems to not + // be serializing over the time range (see HBASE-15698). + Cell cell = + new KeyValue(incKey, 0, incKey.length, kv.getFamilyArray(), kv.getFamilyOffset(), + kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), timestamp, KeyValue.Type.Put, incValue, 0, incValue.length); + inc.add(cell); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } + } + return inc; + } + + /** + * Returns a KeyValue from the input result row + * @param kv an empty KeyValue used only to get the column family and column qualifier of + * the key value to be returned (if the sequence row is from a previous version) + * @param cellIndex index of the KeyValue to be returned (if the sequence row is from a previous + * version + */ + private static Cell getKeyValue(Result r, Cell kv, int cellIndex) { + Cell[] cells = r.rawCells(); + // if the sequence row is from a previous version then MIN_VALUE, MAX_VALUE, CYCLE and + // LIMIT_REACHED key values are not present, + // the sequence row has only three columns (INCREMENT_BY, CACHE_SIZE and CURRENT_VALUE) and the + // order of the cells + // in the array returned by rawCells() is not what what we expect so use getColumnLatestCell() + // to get the cell we want + return cells.length == NUM_SEQUENCE_KEY_VALUES + ? cells[cellIndex] + : r.getColumnLatestCell(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), + kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); + } + + private static Cell getKeyValue(Result r, Cell kv) { + return getKeyValue(r, kv, SEQUENCE_KV_COLUMNS.indexOf(kv)); + } + + public static Cell getCurrentValueKV(Result r) { + return getKeyValue(r, CURRENT_VALUE_KV, CURRENT_VALUE_INDEX); + } + + public static Cell getIncrementByKV(Result r) { + return getKeyValue(r, INCREMENT_BY_KV, INCREMENT_BY_INDEX); + } + + public static Cell getCacheSizeKV(Result r) { + return getKeyValue(r, CACHE_SIZE_KV, CACHE_SIZE_INDEX); + } + + public static Cell getMinValueKV(Result r) { + return getKeyValue(r, MIN_VALUE_KV, MIN_VALUE_INDEX); + } + + public static Cell getMaxValueKV(Result r) { + return getKeyValue(r, MAX_VALUE_KV, MAX_VALUE_INDEX); + } + + public static Cell getCycleKV(Result r) { + return getKeyValue(r, CYCLE_KV, CYCLE_INDEX); + } + + public static Cell getLimitReachedKV(Result r) { + return getKeyValue(r, LIMIT_REACHED_KV, LIMIT_REACHED_INDEX); + } + + public static void replaceCurrentValueKV(List kvs, Cell currentValueKV) { + kvs.set(CURRENT_VALUE_INDEX, currentValueKV); + } + + public static void replaceMinValueKV(List kvs, Cell minValueKV) { + kvs.set(MIN_VALUE_INDEX, minValueKV); + } + + public static void replaceMaxValueKV(List kvs, Cell maxValueKV) { + kvs.set(MAX_VALUE_INDEX, maxValueKV); + } + + public static void replaceCycleValueKV(List kvs, Cell cycleValueKV) { + kvs.set(CYCLE_INDEX, cycleValueKV); + } + + public static void replaceLimitReachedKV(List kvs, Cell limitReachedKV) { + kvs.set(LIMIT_REACHED_INDEX, limitReachedKV); + } + + /** + * Returns the KeyValues of r if it contains the expected number of KeyValues, else returns a list + * of KeyValues corresponding to SEQUENCE_KV_COLUMNS + */ + public static List getCells(Result r, int numKVs) { + // if the sequence row is from a previous version + if (r.rawCells().length == numKVs) return Lists.newArrayList(r.rawCells()); + // else we need to handle missing MIN_VALUE, MAX_VALUE, CYCLE and LIMIT_REACHED KeyValues + List cellList = Lists.newArrayListWithCapacity(NUM_SEQUENCE_KEY_VALUES); + for (Cell kv : SEQUENCE_KV_COLUMNS) { + cellList.add(getKeyValue(r, kv)); + } + return cellList; + } + + private static final class SequenceValue { + public final long incrementBy; + public final long timestamp; + public final long cacheSize; + + public long currentValue; + public long nextValue; + public long minValue; + public long maxValue; + public boolean cycle; + public boolean isDeleted; + public boolean limitReached; + + public SequenceValue(long timestamp, long minValue, long maxValue, boolean cycle) { + this(timestamp, false); + this.minValue = minValue; + this.maxValue = maxValue; + this.cycle = cycle; + } + + public SequenceValue(long timestamp, boolean isDeleted) { + this.timestamp = timestamp; + this.isDeleted = isDeleted; + this.incrementBy = 0; + this.limitReached = false; + this.cacheSize = 0; + } + + public boolean isInitialized() { + return this.incrementBy != 0; + } + + public boolean isUnitialized() { + return this.incrementBy == 0; + } + + public SequenceValue(Result r, ValueOp op, long numToAllocate) { + Cell currentValueKV = getCurrentValueKV(r); + Cell incrementByKV = getIncrementByKV(r); + Cell cacheSizeKV = getCacheSizeKV(r); + Cell minValueKV = getMinValueKV(r); + Cell maxValueKV = getMaxValueKV(r); + Cell cycleKV = getCycleKV(r); + this.timestamp = currentValueKV.getTimestamp(); + this.nextValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), + currentValueKV.getValueOffset(), SortOrder.getDefault()); + this.incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), + incrementByKV.getValueOffset(), SortOrder.getDefault()); + this.cacheSize = PLong.INSTANCE.getCodec().decodeLong(cacheSizeKV.getValueArray(), + cacheSizeKV.getValueOffset(), SortOrder.getDefault()); + this.minValue = PLong.INSTANCE.getCodec().decodeLong(minValueKV.getValueArray(), + minValueKV.getValueOffset(), SortOrder.getDefault()); + this.maxValue = PLong.INSTANCE.getCodec().decodeLong(maxValueKV.getValueArray(), + maxValueKV.getValueOffset(), SortOrder.getDefault()); + this.cycle = (Boolean) PBoolean.INSTANCE.toObject(cycleKV.getValueArray(), + cycleKV.getValueOffset(), cycleKV.getValueLength()); + this.limitReached = false; + currentValue = nextValue; + + if (op != ValueOp.VALIDATE_SEQUENCE) { + // We can't just take the max of numToAllocate and cacheSize + // We need to handle a valid edgecase where a client requests bulk allocation of + // a number of slots that are less than cache size of the sequence + currentValue -= + incrementBy * (SequenceUtil.isBulkAllocation(numToAllocate) ? numToAllocate : cacheSize); + } + } + } + + public boolean returnValue(Result result) throws SQLException { + Cell statusKV = result.rawCells()[0]; + if (statusKV.getValueLength() == 0) { // No error, but unable to return sequence values + return false; + } + long timestamp = statusKV.getTimestamp(); + int statusCode = PInteger.INSTANCE.getCodec().decodeInt(statusKV.getValueArray(), + statusKV.getValueOffset(), SortOrder.getDefault()); + if (statusCode == SUCCESS) { // Success - update nextValue down to currentValue + SequenceValue value = findSequenceValue(timestamp); + if (value == null) { + throw new EmptySequenceCacheException(key.getSchemaName(), key.getSequenceName()); + } + return true; + } + SQLExceptionCode code = SQLExceptionCode.fromErrorCode(statusCode); + // TODO: We could have the server return the timestamps of the + // delete markers and we could insert them here, but this seems + // like overkill. + // if (code == SQLExceptionCode.SEQUENCE_UNDEFINED) { + // } + throw new SQLExceptionInfo.Builder(code).setSchemaName(key.getSchemaName()) + .setTableName(key.getSequenceName()).build().buildException(); + } + + public Append createSequence(long startWith, long incrementBy, long cacheSize, long timestamp, + long minValue, long maxValue, boolean cycle) { + byte[] key = this.key.getKey(); + Append append = new Append(key); + append.setAttribute(SequenceRegionObserverConstants.OPERATION_ATTRIB, + new byte[] { (byte) MetaOp.CREATE_SEQUENCE.ordinal() }); + if (timestamp != HConstants.LATEST_TIMESTAMP) { + append.setAttribute(SequenceRegionObserverConstants.MAX_TIMERANGE_ATTRIB, + Bytes.toBytes(timestamp)); + } + Map> familyMap = append.getFamilyCellMap(); + byte[] startWithBuf = PLong.INSTANCE.toBytes(startWith); + familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays. asList( + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY), + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf), + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf), + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PLong.INSTANCE.toBytes(incrementBy)), + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PLong.INSTANCE.toBytes(cacheSize)), + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.MIN_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(minValue)), + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.MAX_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(maxValue)), + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, timestamp, PBoolean.INSTANCE.toBytes(cycle)), + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, timestamp, PDataType.FALSE_BYTES))); + return append; + } + + public long createSequence(Result result, long minValue, long maxValue, boolean cycle) + throws SQLException { + Cell statusKV = result.rawCells()[0]; + long timestamp = statusKV.getTimestamp(); + int statusCode = PInteger.INSTANCE.getCodec().decodeInt(statusKV.getValueArray(), + statusKV.getValueOffset(), SortOrder.getDefault()); + if (statusCode == 0) { // Success - add sequence value and return timestamp + SequenceValue value = new SequenceValue(timestamp, minValue, maxValue, cycle); + insertSequenceValue(value); + return timestamp; + } + SQLExceptionCode code = SQLExceptionCode.fromErrorCode(statusCode); + throw new SQLExceptionInfo.Builder(code).setSchemaName(key.getSchemaName()) + .setTableName(key.getSequenceName()).build().buildException(); + } + + public Append dropSequence(long timestamp) { + byte[] key = this.key.getKey(); + Append append = new Append(key); + append.setAttribute(SequenceRegionObserverConstants.OPERATION_ATTRIB, + new byte[] { (byte) MetaOp.DROP_SEQUENCE.ordinal() }); + if (timestamp != HConstants.LATEST_TIMESTAMP) { + append.setAttribute(SequenceRegionObserverConstants.MAX_TIMERANGE_ATTRIB, + Bytes.toBytes(timestamp)); + } + Map> familyMap = append.getFamilyCellMap(); + familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + Arrays. asList( + PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY))); + return append; + } + + public long dropSequence(Result result) throws SQLException { + Cell statusKV = result.rawCells()[0]; + long timestamp = statusKV.getTimestamp(); + int statusCode = PInteger.INSTANCE.getCodec().decodeInt(statusKV.getValueArray(), + statusKV.getValueOffset(), SortOrder.getDefault()); + SQLExceptionCode code = statusCode == 0 ? null : SQLExceptionCode.fromErrorCode(statusCode); + if (code == null) { + // Insert delete marker so that point-in-time sequences work + insertSequenceValue(new SequenceValue(timestamp, true)); + return timestamp; + } + // TODO: We could have the server return the timestamps of the + // delete markers and we could insert them here, but this seems + // like overkill. + // if (code == SQLExceptionCode.SEQUENCE_UNDEFINED) { + // } + throw new SQLExceptionInfo.Builder(code).setSchemaName(key.getSchemaName()) + .setTableName(key.getSequenceName()).build().buildException(); + } + + public static String getCreateTableStatement(String schema, int nSaltBuckets) { + if (nSaltBuckets <= 0) { + return schema; } + return schema + "," + PhoenixDatabaseMetaData.SALT_BUCKETS + "=" + nSaltBuckets; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceAllocation.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceAllocation.java index e24c2ad0e83..ab8c82011df 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceAllocation.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceAllocation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,50 +20,44 @@ import edu.umd.cs.findbugs.annotations.SuppressWarnings; /** - * A SequenceKey and the number of slots requested to be allocated for the sequence. - * It binds these two together to allow operations such as sorting - * a Collection of SequenceKeys and at the same time preserving the associated requested - * number of slots to allocate. - * - * This class delegates hashCode, equals and compareTo to @see{SequenceKey}. - * + * A SequenceKey and the number of slots requested to be allocated for the sequence. It binds these + * two together to allow operations such as sorting a Collection of SequenceKeys and at the same + * time preserving the associated requested number of slots to allocate. This class delegates + * hashCode, equals and compareTo to @see{SequenceKey}. */ public class SequenceAllocation implements Comparable { - - private final SequenceKey sequenceKey; - private final long numAllocations; - - public SequenceAllocation(SequenceKey sequenceKey, long numAllocations) { - this.sequenceKey = sequenceKey; - this.numAllocations = numAllocations; - } - - - public SequenceKey getSequenceKey() { - return sequenceKey; - } + private final SequenceKey sequenceKey; + private final long numAllocations; + + public SequenceAllocation(SequenceKey sequenceKey, long numAllocations) { + this.sequenceKey = sequenceKey; + this.numAllocations = numAllocations; + } + + public SequenceKey getSequenceKey() { + return sequenceKey; + } + + public long getNumAllocations() { + return numAllocations; + } + + @Override + public int hashCode() { + return sequenceKey.hashCode(); + } - public long getNumAllocations() { - return numAllocations; - } + @Override + @SuppressWarnings(value = "EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS", + justification = "Checked in called function") + public boolean equals(Object obj) { + return sequenceKey.equals(obj); + } + @Override + public int compareTo(SequenceAllocation that) { + return sequenceKey.compareTo(that.sequenceKey); + } - @Override - public int hashCode() { - return sequenceKey.hashCode(); - } - - @Override - @SuppressWarnings(value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS", - justification="Checked in called function") - public boolean equals(Object obj) { - return sequenceKey.equals(obj); - } - - @Override - public int compareTo(SequenceAllocation that) { - return sequenceKey.compareTo(that.sequenceKey); - } - } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceAlreadyExistsException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceAlreadyExistsException.java index 0cb725a313c..87d2922d993 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceAlreadyExistsException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceAlreadyExistsException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,29 +18,28 @@ package org.apache.phoenix.schema; import java.sql.SQLException; + import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; - public class SequenceAlreadyExistsException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.SEQUENCE_ALREADY_EXIST; - private final String schemaName; - private final String sequenceName; - + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.SEQUENCE_ALREADY_EXIST; + private final String schemaName; + private final String sequenceName; - public SequenceAlreadyExistsException(String schemaName, String sequenceName) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(sequenceName).build().toString(), - code.getSQLState(), code.getErrorCode()); - this.schemaName = schemaName; - this.sequenceName = sequenceName; - } + public SequenceAlreadyExistsException(String schemaName, String sequenceName) { + super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(sequenceName) + .build().toString(), code.getSQLState(), code.getErrorCode()); + this.schemaName = schemaName; + this.sequenceName = sequenceName; + } - public String getSequenceName() { - return sequenceName; - } + public String getSequenceName() { + return sequenceName; + } - public String getSchemaName() { - return schemaName; - } + public String getSchemaName() { + return schemaName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceInfo.java index be4455be197..de386f029f2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceInfo.java @@ -1,31 +1,39 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema; public class SequenceInfo { - public long sequenceValue; - public final long incrementBy; - public final long minValue; - public final long maxValue; - public final long cacheSize; - public final boolean cycle; - public boolean limitReached; + public long sequenceValue; + public final long incrementBy; + public final long minValue; + public final long maxValue; + public final long cacheSize; + public final boolean cycle; + public boolean limitReached; - public SequenceInfo(long sequenceValue, long incrementBy, long minValue, long maxValue, long cacheSize, boolean cycle) { - this.sequenceValue = sequenceValue; - this.incrementBy = incrementBy; - this.minValue = minValue; - this.maxValue = maxValue; - this.cacheSize = cacheSize; - this.cycle = cycle; - this.limitReached = false; - } -} \ No newline at end of file + public SequenceInfo(long sequenceValue, long incrementBy, long minValue, long maxValue, + long cacheSize, boolean cycle) { + this.sequenceValue = sequenceValue; + this.incrementBy = incrementBy; + this.minValue = minValue; + this.maxValue = maxValue; + this.cacheSize = cacheSize; + this.cycle = cycle; + this.limitReached = false; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceKey.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceKey.java index f83fc04adf2..54e3a8e2272 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceKey.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceKey.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,71 +23,79 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.util.ByteUtil; - public class SequenceKey implements Comparable { - private final String tenantId; - private final String schemaName; - private final String sequenceName; - private final byte[] key; - - public SequenceKey(String tenantId, String schemaName, String sequenceName, int nBuckets) { - this.tenantId = tenantId; - this.schemaName = schemaName; - this.sequenceName = sequenceName; - this.key = ByteUtil.concat((nBuckets <= 0 ? ByteUtil.EMPTY_BYTE_ARRAY : QueryConstants.SEPARATOR_BYTE_ARRAY), tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), QueryConstants.SEPARATOR_BYTE_ARRAY, schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(sequenceName)); - if (nBuckets > 0) { - key[0] = SaltingUtil.getSaltingByte(key, SaltingUtil.NUM_SALTING_BYTES, key.length - SaltingUtil.NUM_SALTING_BYTES, nBuckets); - } + private final String tenantId; + private final String schemaName; + private final String sequenceName; + private final byte[] key; + + public SequenceKey(String tenantId, String schemaName, String sequenceName, int nBuckets) { + this.tenantId = tenantId; + this.schemaName = schemaName; + this.sequenceName = sequenceName; + this.key = ByteUtil.concat( + (nBuckets <= 0 ? ByteUtil.EMPTY_BYTE_ARRAY : QueryConstants.SEPARATOR_BYTE_ARRAY), + tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), + QueryConstants.SEPARATOR_BYTE_ARRAY, + schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), + QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(sequenceName)); + if (nBuckets > 0) { + key[0] = SaltingUtil.getSaltingByte(key, SaltingUtil.NUM_SALTING_BYTES, + key.length - SaltingUtil.NUM_SALTING_BYTES, nBuckets); } + } - public byte[] getKey() { - return key; + public byte[] getKey() { + return key; - } - public String getTenantId() { - return tenantId; - } + } - public String getSchemaName() { - return schemaName; - } + public String getTenantId() { + return tenantId; + } - public String getSequenceName() { - return sequenceName; - } + public String getSchemaName() { + return schemaName; + } - @Override - public int compareTo(SequenceKey that) { - int c = Objects.equals(this.tenantId, that.getTenantId()) ? 0 - : this.tenantId == null ? -1 : that.getTenantId() == null ? 1 - : this.tenantId.compareTo(that.getTenantId()); - if (c == 0) { - c = Objects.equals(this.schemaName, that.getSchemaName()) ? 0 - : this.schemaName == null ? -1 : that.getSchemaName() == null ? 1 - : this.schemaName.compareTo(that.getSchemaName()); - if (c == 0) { - return sequenceName.compareTo(that.getSequenceName()); - } - } - return c; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((tenantId == null) ? 0 : tenantId.hashCode()); - result = prime * result + ((schemaName == null) ? 0 : schemaName.hashCode()); - result = prime * result + sequenceName.hashCode(); - return result; - } + public String getSequenceName() { + return sequenceName; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - SequenceKey other = (SequenceKey)obj; - return this.compareTo(other) == 0; + @Override + public int compareTo(SequenceKey that) { + int c = Objects.equals(this.tenantId, that.getTenantId()) ? 0 + : this.tenantId == null ? -1 + : that.getTenantId() == null ? 1 + : this.tenantId.compareTo(that.getTenantId()); + if (c == 0) { + c = Objects.equals(this.schemaName, that.getSchemaName()) ? 0 + : this.schemaName == null ? -1 + : that.getSchemaName() == null ? 1 + : this.schemaName.compareTo(that.getSchemaName()); + if (c == 0) { + return sequenceName.compareTo(that.getSequenceName()); + } } -} \ No newline at end of file + return c; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((tenantId == null) ? 0 : tenantId.hashCode()); + result = prime * result + ((schemaName == null) ? 0 : schemaName.hashCode()); + result = prime * result + sequenceName.hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + SequenceKey other = (SequenceKey) obj; + return this.compareTo(other) == 0; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceNotFoundException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceNotFoundException.java index 29ff87b47d2..c1565b1c278 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceNotFoundException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SequenceNotFoundException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,17 +20,16 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; - public class SequenceNotFoundException extends MetaDataEntityNotFoundException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.SEQUENCE_UNDEFINED; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.SEQUENCE_UNDEFINED; - public SequenceNotFoundException(String tableName) { - this(null, tableName); - } + public SequenceNotFoundException(String tableName) { + this(null, tableName); + } - public SequenceNotFoundException(String schemaName, String tableName) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).build().toString(), - code.getSQLState(), code.getErrorCode(), schemaName, tableName, null); - } + public SequenceNotFoundException(String schemaName, String tableName) { + super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .build().toString(), code.getSQLState(), code.getErrorCode(), schemaName, tableName, null); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SerializedPTableRef.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SerializedPTableRef.java index 6c51b8fb7ee..ce2adf4d0aa 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SerializedPTableRef.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SerializedPTableRef.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,25 +23,26 @@ public class SerializedPTableRef extends PTableRef { - private final byte[] tableBytes; - - public SerializedPTableRef(byte[] tableBytes, long lastAccessTime, long resolvedTime, int estimatedSize) { - super(lastAccessTime, resolvedTime, tableBytes.length); - this.tableBytes = tableBytes; - } - - public SerializedPTableRef(PTableRef tableRef) { - super(tableRef.getCreateTime(), tableRef.getResolvedTimeStamp(), tableRef.getEstimatedSize()); - this.tableBytes = ((SerializedPTableRef)tableRef).tableBytes; - } - - @Override - public PTable getTable() { - try { - return PTableImpl.createFromProto(PTableProtos.PTable.parseFrom(tableBytes)); - } catch (IOException e) { - throw new RuntimeException(e); - } + private final byte[] tableBytes; + + public SerializedPTableRef(byte[] tableBytes, long lastAccessTime, long resolvedTime, + int estimatedSize) { + super(lastAccessTime, resolvedTime, tableBytes.length); + this.tableBytes = tableBytes; + } + + public SerializedPTableRef(PTableRef tableRef) { + super(tableRef.getCreateTime(), tableRef.getResolvedTimeStamp(), tableRef.getEstimatedSize()); + this.tableBytes = ((SerializedPTableRef) tableRef).tableBytes; + } + + @Override + public PTable getTable() { + try { + return PTableImpl.createFromProto(PTableProtos.PTable.parseFrom(tableBytes)); + } catch (IOException e) { + throw new RuntimeException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SerializedPTableRefFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SerializedPTableRefFactory.java index 5da1fd62829..40157a89e90 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SerializedPTableRefFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SerializedPTableRefFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,20 +18,21 @@ package org.apache.phoenix.schema; class SerializedPTableRefFactory extends PTableRefFactory { - @Override - public PTableRef makePTableRef(PTable table, long lastAccessTime, long resolvedTime) { - byte[] serializedBytes = PTableImpl.toProto(table).toByteArray(); - return new SerializedPTableRef(serializedBytes, lastAccessTime, resolvedTime, table.getEstimatedSize()); - } - - @Override - public PTableRef makePTableRef(PTableRef tableRef) { - return new SerializedPTableRef(tableRef); - } - - private static final SerializedPTableRefFactory INSTANCE = new SerializedPTableRefFactory(); - - public static PTableRefFactory getFactory() { - return INSTANCE; - } -} \ No newline at end of file + @Override + public PTableRef makePTableRef(PTable table, long lastAccessTime, long resolvedTime) { + byte[] serializedBytes = PTableImpl.toProto(table).toByteArray(); + return new SerializedPTableRef(serializedBytes, lastAccessTime, resolvedTime, + table.getEstimatedSize()); + } + + @Override + public PTableRef makePTableRef(PTableRef tableRef) { + return new SerializedPTableRef(tableRef); + } + + private static final SerializedPTableRefFactory INSTANCE = new SerializedPTableRefFactory(); + + public static PTableRefFactory getFactory() { + return INSTANCE; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SortOrder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SortOrder.java index a169b9df1eb..d8e43f16962 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SortOrder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/SortOrder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,133 +18,128 @@ package org.apache.phoenix.schema; import org.apache.hadoop.hbase.CompareOperator; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * Specifies the sort order on disk of row key columns. The default is ASC. - * - * HBase always stores row keys in ascending order resulting in scans to also be - * sorted by ascending row keys. This enum is used to associate a sort order - * with each row key column to allow storing row key columns in descending - * order. - * - * The often cited example of when you may want to do this is a row key that has - * a date component. If all other parts of the row key are equal, a scan would - * return the data from least recent to most recent; to get the scan to return - * the most recent data first, the time component of the row key can be marked - * as "desc". - * - * Internally, the bits of values for columns marked as "desc" are inverted before handing - * them to HBase to persist; they are inverted again when read back out. - * - * - * Example DDL: - * - * CREATE TABLE Events(event_type INTEGER NOT NULL, event_date DATE NOT NULL, event_name VARCHAR NOT NULL - * CONSTRAINT PK PRIMARY KEY (event_type, event_date DESC)) - * + * Specifies the sort order on disk of row key columns. The default is ASC. HBase always stores row + * keys in ascending order resulting in scans to also be sorted by ascending row keys. This enum is + * used to associate a sort order with each row key column to allow storing row key columns in + * descending order. The often cited example of when you may want to do this is a row key that has a + * date component. If all other parts of the row key are equal, a scan would return the data from + * least recent to most recent; to get the scan to return the most recent data first, the time + * component of the row key can be marked as "desc". Internally, the bits of values for columns + * marked as "desc" are inverted before handing them to HBase to persist; they are inverted again + * when read back out. Example DDL: CREATE TABLE Events(event_type INTEGER NOT NULL, event_date DATE + * NOT NULL, event_name VARCHAR NOT NULL CONSTRAINT PK PRIMARY KEY (event_type, event_date DESC)) * @since 1.2 */ public enum SortOrder { - ASC(2) { - @Override - public CompareOperator transform(CompareOperator op) { - return op; - } - - @Override - public byte normalize(byte b) { - return b; - } - }, - - DESC(1) { - @Override - public CompareOperator transform(CompareOperator op) { - switch (op) { - case EQUAL: return op; - case NOT_EQUAL: return op; - case NO_OP: return op; - case GREATER: return CompareOperator.LESS; - case GREATER_OR_EQUAL: return CompareOperator.LESS_OR_EQUAL; - case LESS: return CompareOperator.GREATER; - case LESS_OR_EQUAL: return CompareOperator.GREATER_OR_EQUAL; - } - throw new IllegalArgumentException("Add the missing case statement!"); - } - - @Override - public byte normalize(byte b) { - return SortOrder.invert(b); - } - }; - - /** - * The default order that row keys are stored in. - */ - public static SortOrder getDefault() { - return ASC; - } - - public static byte[] invert(byte[] src, int srcOffset, byte[] dest, int dstOffset, int length) { - Preconditions.checkNotNull(src); - Preconditions.checkNotNull(dest); - for (int i = 0; i < length; i++) { - dest[dstOffset + i] = (byte) (src[srcOffset + i] ^ 0xFF); - } - return dest; - } - - public static byte[] invert(byte[] src, int srcOffset, int length) { - return invert(src, srcOffset, new byte[length], 0, length); - } - - public static byte invert(byte b) { - return (byte) (b ^ 0xFF); - } - - /** - * Returns the SortOrder instance for the specified DDL stmt keyword. - */ - public static SortOrder fromDDLValue(String sortOrder) { - Preconditions.checkArgument(sortOrder != null); - if (sortOrder.equalsIgnoreCase("ASC")) { - return ASC; - } else if (sortOrder.equalsIgnoreCase("DESC")) { - return DESC; - } else { - throw new IllegalArgumentException("Unknown SortOrder: " + sortOrder); - } - } - - /** - * Returns the SortOrder instance for the specified internal value. - */ - public static SortOrder fromSystemValue(int value) { - for (SortOrder mod : SortOrder.values()) { - if (mod.getSystemValue() == value) { - return mod; - } - } - return getDefault(); - } - - private final int serializationId; - - private SortOrder(int serializationId) { - this.serializationId = serializationId; - } - - /** - * Returns an internal value representing the specified SortOrder. - */ - public int getSystemValue() { - return serializationId; - } - - public abstract CompareOperator transform(CompareOperator op); - public abstract byte normalize(byte b); + ASC(2) { + @Override + public CompareOperator transform(CompareOperator op) { + return op; + } + + @Override + public byte normalize(byte b) { + return b; + } + }, + + DESC(1) { + @Override + public CompareOperator transform(CompareOperator op) { + switch (op) { + case EQUAL: + return op; + case NOT_EQUAL: + return op; + case NO_OP: + return op; + case GREATER: + return CompareOperator.LESS; + case GREATER_OR_EQUAL: + return CompareOperator.LESS_OR_EQUAL; + case LESS: + return CompareOperator.GREATER; + case LESS_OR_EQUAL: + return CompareOperator.GREATER_OR_EQUAL; + } + throw new IllegalArgumentException("Add the missing case statement!"); + } + + @Override + public byte normalize(byte b) { + return SortOrder.invert(b); + } + }; + + /** + * The default order that row keys are stored in. + */ + public static SortOrder getDefault() { + return ASC; + } + + public static byte[] invert(byte[] src, int srcOffset, byte[] dest, int dstOffset, int length) { + Preconditions.checkNotNull(src); + Preconditions.checkNotNull(dest); + for (int i = 0; i < length; i++) { + dest[dstOffset + i] = (byte) (src[srcOffset + i] ^ 0xFF); + } + return dest; + } + + public static byte[] invert(byte[] src, int srcOffset, int length) { + return invert(src, srcOffset, new byte[length], 0, length); + } + + public static byte invert(byte b) { + return (byte) (b ^ 0xFF); + } + + /** + * Returns the SortOrder instance for the specified DDL stmt keyword. + */ + public static SortOrder fromDDLValue(String sortOrder) { + Preconditions.checkArgument(sortOrder != null); + if (sortOrder.equalsIgnoreCase("ASC")) { + return ASC; + } else if (sortOrder.equalsIgnoreCase("DESC")) { + return DESC; + } else { + throw new IllegalArgumentException("Unknown SortOrder: " + sortOrder); + } + } + + /** + * Returns the SortOrder instance for the specified internal value. + */ + public static SortOrder fromSystemValue(int value) { + for (SortOrder mod : SortOrder.values()) { + if (mod.getSystemValue() == value) { + return mod; + } + } + return getDefault(); + } + + private final int serializationId; + + private SortOrder(int serializationId) { + this.serializationId = serializationId; + } + + /** + * Returns an internal value representing the specified SortOrder. + */ + public int getSystemValue() { + return serializationId; + } + + public abstract CompareOperator transform(CompareOperator op); + + public abstract byte normalize(byte b); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/StaleRegionBoundaryCacheException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/StaleRegionBoundaryCacheException.java index eb9d8758d5e..96f05107efc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/StaleRegionBoundaryCacheException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/StaleRegionBoundaryCacheException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,23 +24,25 @@ import org.apache.phoenix.util.SchemaUtil; public class StaleRegionBoundaryCacheException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode ERROR_CODE = SQLExceptionCode.STALE_REGION_BOUNDARY_CACHE; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode ERROR_CODE = SQLExceptionCode.STALE_REGION_BOUNDARY_CACHE; - public StaleRegionBoundaryCacheException() { - this(null, null); - } + public StaleRegionBoundaryCacheException() { + this(null, null); + } - public StaleRegionBoundaryCacheException(byte[] fullTableName) { - this(SchemaUtil.getSchemaNameFromFullName(fullTableName),SchemaUtil.getTableNameFromFullName(fullTableName)); - } + public StaleRegionBoundaryCacheException(byte[] fullTableName) { + this(SchemaUtil.getSchemaNameFromFullName(fullTableName), + SchemaUtil.getTableNameFromFullName(fullTableName)); + } - public StaleRegionBoundaryCacheException(String fullTableName) { - this(SchemaUtil.getSchemaNameFromFullName(fullTableName),SchemaUtil.getTableNameFromFullName(fullTableName)); - } + public StaleRegionBoundaryCacheException(String fullTableName) { + this(SchemaUtil.getSchemaNameFromFullName(fullTableName), + SchemaUtil.getTableNameFromFullName(fullTableName)); + } - public StaleRegionBoundaryCacheException(String schemaName, String tableName) { - super(new SQLExceptionInfo.Builder(ERROR_CODE).setSchemaName(schemaName).setTableName(tableName).build().toString(), - ERROR_CODE.getSQLState(), ERROR_CODE.getErrorCode(), null); - } + public StaleRegionBoundaryCacheException(String schemaName, String tableName) { + super(new SQLExceptionInfo.Builder(ERROR_CODE).setSchemaName(schemaName).setTableName(tableName) + .build().toString(), ERROR_CODE.getSQLState(), ERROR_CODE.getErrorCode(), null); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableAlreadyExistsException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableAlreadyExistsException.java index 2b4eaeb7609..2bcc33c5f62 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableAlreadyExistsException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableAlreadyExistsException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,50 +22,47 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; - /** - * * Exception thrown when a table name could not be found in the schema - * - * * @since 0.1 */ public class TableAlreadyExistsException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.TABLE_ALREADY_EXIST; - private final String schemaName; - private final String tableName; - private final PTable table; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.TABLE_ALREADY_EXIST; + private final String schemaName; + private final String tableName; + private final PTable table; + + public TableAlreadyExistsException(String schemaName, String tableName) { + this(schemaName, tableName, null, null); + } - public TableAlreadyExistsException(String schemaName, String tableName) { - this(schemaName, tableName, null, null); - } + public TableAlreadyExistsException(String schemaName, String tableName, String msg) { + this(schemaName, tableName, msg, null); + } - public TableAlreadyExistsException(String schemaName, String tableName, String msg) { - this(schemaName, tableName, msg, null); - } + public TableAlreadyExistsException(String schemaName, String tableName, PTable table) { + this(schemaName, tableName, null, table); + } - public TableAlreadyExistsException(String schemaName, String tableName, PTable table) { - this(schemaName, tableName, null, table); - } + public TableAlreadyExistsException(String schemaName, String tableName, String msg, + PTable table) { + super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .setMessage(msg).build().toString(), code.getSQLState(), code.getErrorCode()); + this.schemaName = schemaName; + this.tableName = tableName; + this.table = table; + } - public TableAlreadyExistsException(String schemaName, String tableName, String msg, PTable table) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).setMessage(msg).build().toString(), - code.getSQLState(), code.getErrorCode()); - this.schemaName = schemaName; - this.tableName = tableName; - this.table = table; - } + public String getTableName() { + return tableName; + } - public String getTableName() { - return tableName; - } + public String getSchemaName() { + return schemaName; + } - public String getSchemaName() { - return schemaName; - } - - public PTable getTable() { - return table; - } + public PTable getTable() { + return table; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableNotFoundException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableNotFoundException.java index 48da43f18ec..ce5bae567ca 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableNotFoundException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableNotFoundException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,55 +18,56 @@ package org.apache.phoenix.schema; import org.apache.hadoop.hbase.HConstants; - import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.util.SchemaUtil; - /** - * * Exception thrown when a table name could not be found in the schema - * - * * @since 0.1 */ public class TableNotFoundException extends MetaDataEntityNotFoundException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.TABLE_UNDEFINED; - private boolean thrownToForceReReadForTransformingTable = false; - private final long timestamp; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.TABLE_UNDEFINED; + private boolean thrownToForceReReadForTransformingTable = false; + private final long timestamp; + + public TableNotFoundException(TableNotFoundException e, long timestamp) { + this(e.getSchemaName(), e.getTableName(), timestamp); + } + + public TableNotFoundException(String tableName) { + this(SchemaUtil.getSchemaNameFromFullName(tableName), + SchemaUtil.getTableNameFromFullName(tableName)); + } - public TableNotFoundException(TableNotFoundException e, long timestamp) { - this(e.getSchemaName(),e.getTableName(), timestamp); - } + public TableNotFoundException(String tableName, boolean thrownForForce) { + this(SchemaUtil.getSchemaNameFromFullName(tableName), + SchemaUtil.getTableNameFromFullName(tableName), HConstants.LATEST_TIMESTAMP, code, + thrownForForce); + } - public TableNotFoundException(String tableName) { - this(SchemaUtil.getSchemaNameFromFullName(tableName), SchemaUtil.getTableNameFromFullName(tableName)); - } + public TableNotFoundException(String schemaName, String tableName) { + this(schemaName, tableName, HConstants.LATEST_TIMESTAMP); + } - public TableNotFoundException(String tableName, boolean thrownForForce) { - this(SchemaUtil.getSchemaNameFromFullName(tableName), SchemaUtil.getTableNameFromFullName(tableName), - HConstants.LATEST_TIMESTAMP, code, thrownForForce); - } + public TableNotFoundException(String schemaName, String tableName, long timestamp) { + this(schemaName, tableName, timestamp, code, false); + } - public TableNotFoundException(String schemaName, String tableName) { - this(schemaName, tableName, HConstants.LATEST_TIMESTAMP); - } - - public TableNotFoundException(String schemaName, String tableName, long timestamp) { - this(schemaName, tableName, timestamp, code, false); - } + public TableNotFoundException(String schemaName, String tableName, long timestamp, + SQLExceptionCode code, boolean thrownForForceReRead) { + super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .build().toString(), code.getSQLState(), code.getErrorCode(), schemaName, tableName, null); + this.timestamp = timestamp; + this.thrownToForceReReadForTransformingTable = thrownForForceReRead; + } - public TableNotFoundException(String schemaName, String tableName, long timestamp, SQLExceptionCode code, boolean thrownForForceReRead) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName).build().toString(), - code.getSQLState(), code.getErrorCode(), schemaName, tableName, null); - this.timestamp = timestamp; - this.thrownToForceReReadForTransformingTable = thrownForForceReRead; - } + public long getTimeStamp() { + return timestamp; + } - public long getTimeStamp() { - return timestamp; - } - public boolean isThrownToForceReReadForTransformingTable() { return thrownToForceReReadForTransformingTable;}; + public boolean isThrownToForceReReadForTransformingTable() { + return thrownToForceReReadForTransformingTable; + }; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableProperty.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableProperty.java index 7f42c421246..168ba178cee 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableProperty.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableProperty.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,425 +34,450 @@ import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.schema.PTable.ImmutableStorageScheme; +import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.schema.types.PLong; public enum TableProperty { - @Deprecated // use the IMMUTABLE keyword while creating the table - IMMUTABLE_ROWS(PhoenixDatabaseMetaData.IMMUTABLE_ROWS, true, true, false) { - @Override - public Object getPTableValue(PTable table) { - return table.isImmutableRows(); - } - }, - - MULTI_TENANT(PhoenixDatabaseMetaData.MULTI_TENANT, true, false, false) { - @Override - public Object getPTableValue(PTable table) { - return table.isMultiTenant(); - } - }, + @Deprecated // use the IMMUTABLE keyword while creating the table + IMMUTABLE_ROWS(PhoenixDatabaseMetaData.IMMUTABLE_ROWS, true, true, false) { + @Override + public Object getPTableValue(PTable table) { + return table.isImmutableRows(); + } + }, - DISABLE_WAL(PhoenixDatabaseMetaData.DISABLE_WAL, true, false, false) { - @Override - public Object getPTableValue(PTable table) { - return table.isWALDisabled(); - } - }, + MULTI_TENANT(PhoenixDatabaseMetaData.MULTI_TENANT, true, false, false) { + @Override + public Object getPTableValue(PTable table) { + return table.isMultiTenant(); + } + }, - SALT_BUCKETS(PhoenixDatabaseMetaData.SALT_BUCKETS, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, SALT_ONLY_ON_CREATE_TABLE, false, false) { - @Override - public Object getPTableValue(PTable table) { - return table.getBucketNum(); - } - }, + DISABLE_WAL(PhoenixDatabaseMetaData.DISABLE_WAL, true, false, false) { + @Override + public Object getPTableValue(PTable table) { + return table.isWALDisabled(); + } + }, - DEFAULT_COLUMN_FAMILY(DEFAULT_COLUMN_FAMILY_NAME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, DEFAULT_COLUMN_FAMILY_ONLY_ON_CREATE_TABLE, false, false) { - @Override - public Object getPTableValue(PTable table) { - return table.getDefaultFamilyName(); - } - }, + SALT_BUCKETS(PhoenixDatabaseMetaData.SALT_BUCKETS, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, + false, SALT_ONLY_ON_CREATE_TABLE, false, false) { + @Override + public Object getPTableValue(PTable table) { + return table.getBucketNum(); + } + }, - STORE_NULLS(PhoenixDatabaseMetaData.STORE_NULLS, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { - @Override - public Object getPTableValue(PTable table) { - return table.getStoreNulls(); - } - }, + DEFAULT_COLUMN_FAMILY(DEFAULT_COLUMN_FAMILY_NAME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, + DEFAULT_COLUMN_FAMILY_ONLY_ON_CREATE_TABLE, false, false) { + @Override + public Object getPTableValue(PTable table) { + return table.getDefaultFamilyName(); + } + }, - TRANSACTIONAL(PhoenixDatabaseMetaData.TRANSACTIONAL, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { - @Override - public Object getPTableValue(PTable table) { - return table.isTransactional(); - } - }, + STORE_NULLS(PhoenixDatabaseMetaData.STORE_NULLS, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, + false, false) { + @Override + public Object getPTableValue(PTable table) { + return table.getStoreNulls(); + } + }, - TRANSACTION_PROVIDER(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { - @Override - public Object getPTableValue(PTable table) { - return table.getTransactionProvider(); - } - @Override - public Object getValue(Object value) { - try { - return value == null ? null : TransactionFactory.Provider.valueOf(value.toString()); - } catch (IllegalArgumentException e) { - throw new RuntimeException(new SQLExceptionInfo.Builder(SQLExceptionCode.UNKNOWN_TRANSACTION_PROVIDER) - .setMessage(value.toString()) - .build().buildException()); - } - } - }, - - UPDATE_CACHE_FREQUENCY(PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY, true, true, true) { - @Override - public Object getValue(Object value) { - if (value == null) { - return null; - } - - if (value instanceof String) { - String strValue = (String) value; - if ("ALWAYS".equalsIgnoreCase(strValue)) { - return 0L; - } - - if ("NEVER".equalsIgnoreCase(strValue)) { - return Long.MAX_VALUE; - } - - throw new IllegalArgumentException("Table's " + - PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + - " can only be set to 'ALWAYS', 'NEVER' or a millisecond numeric value."); - } - - if (value instanceof Integer || value instanceof Long) { - return ((Number) value).longValue(); - } - - throw new IllegalArgumentException("Table's " + - PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + - " can only be set to 'ALWAYS', 'NEVER' or a millisecond numeric value."); - } + TRANSACTIONAL(PhoenixDatabaseMetaData.TRANSACTIONAL, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, + true, false, false) { + @Override + public Object getPTableValue(PTable table) { + return table.isTransactional(); + } + }, - @Override - public Object getPTableValue(PTable table) { - return table.getUpdateCacheFrequency(); - } - }, + TRANSACTION_PROVIDER(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER, + COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { + @Override + public Object getPTableValue(PTable table) { + return table.getTransactionProvider(); + } - AUTO_PARTITION_SEQ(PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, false, false) { - @Override - public Object getValue(Object value) { - return value == null ? null : SchemaUtil.normalizeIdentifier(value.toString()); - } + @Override + public Object getValue(Object value) { + try { + return value == null ? null : TransactionFactory.Provider.valueOf(value.toString()); + } catch (IllegalArgumentException e) { + throw new RuntimeException( + new SQLExceptionInfo.Builder(SQLExceptionCode.UNKNOWN_TRANSACTION_PROVIDER) + .setMessage(value.toString()).build().buildException()); + } + } + }, - @Override - public Object getPTableValue(PTable table) { - return table.getAutoPartitionSeqName(); - } - }, + UPDATE_CACHE_FREQUENCY(PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY, true, true, true) { + @Override + public Object getValue(Object value) { + if (value == null) { + return null; + } - APPEND_ONLY_SCHEMA(PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, true, false) { - @Override - public Object getPTableValue(PTable table) { - return table.isAppendOnlySchema(); - } - }, - GUIDE_POSTS_WIDTH(PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH, true, false, false) { - @Override - public Object getValue(Object value) { - return value == null ? null : ((Number) value).longValue(); + if (value instanceof String) { + String strValue = (String) value; + if ("ALWAYS".equalsIgnoreCase(strValue)) { + return 0L; } - @Override - public Object getPTableValue(PTable table) { - return null; - } - - }, - - COLUMN_ENCODED_BYTES(PhoenixDatabaseMetaData.ENCODING_SCHEME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { - @Override - public Object getValue(Object value) { - if (value instanceof String) { - String strValue = (String) value; - if ("NONE".equalsIgnoreCase(strValue)) { - return (byte)0; - } - } else { - return value == null ? null : ((Number) value).byteValue(); - } - return value; + if ("NEVER".equalsIgnoreCase(strValue)) { + return Long.MAX_VALUE; } - @Override - public Object getPTableValue(PTable table) { - return table.getEncodingScheme(); - } + throw new IllegalArgumentException( + "Table's " + PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + + " can only be set to 'ALWAYS', 'NEVER' or a millisecond numeric value."); + } - }, + if (value instanceof Integer || value instanceof Long) { + return ((Number) value).longValue(); + } - // Same as COLUMN_ENCODED_BYTES. If we don't have this one, isPhoenixProperty returns false. - ENCODING_SCHEME(PhoenixDatabaseMetaData.ENCODING_SCHEME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { - @Override - public Object getValue(Object value) { - return COLUMN_ENCODED_BYTES.getValue(value); - } + throw new IllegalArgumentException("Table's " + PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY + + " can only be set to 'ALWAYS', 'NEVER' or a millisecond numeric value."); + } - @Override - public Object getPTableValue(PTable table) { - return table.getEncodingScheme(); - } + @Override + public Object getPTableValue(PTable table) { + return table.getUpdateCacheFrequency(); + } + }, - }, - - IMMUTABLE_STORAGE_SCHEME(PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { - @Override - public ImmutableStorageScheme getValue(Object value) { - if (value == null) { - return null; - } else if (value instanceof String) { - String strValue = (String) value; - return ImmutableStorageScheme.valueOf(strValue.toUpperCase()); - } else { - throw new IllegalArgumentException("Immutable storage scheme table property must be a string"); - } - } + AUTO_PARTITION_SEQ(PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ, + COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, false, false) { + @Override + public Object getValue(Object value) { + return value == null ? null : SchemaUtil.normalizeIdentifier(value.toString()); + } - @Override - public Object getPTableValue(PTable table) { - return table.getImmutableStorageScheme(); - } - - }, - - USE_STATS_FOR_PARALLELIZATION(PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION, true, true, true) { - @Override - public Object getValue(Object value) { - if (value == null) { - return null; - } else if (value instanceof Boolean) { - return value; - } else { - throw new IllegalArgumentException("Use stats for parallelization property can only be either true or false"); - } - } + @Override + public Object getPTableValue(PTable table) { + return table.getAutoPartitionSeqName(); + } + }, - @Override - public Object getPTableValue(PTable table) { - return table.useStatsForParallelization(); - } - }, - - TTL(PhoenixDatabaseMetaData.TTL, COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY, true, true, true) { - /** - * PHOENIX_TTL can take any values ranging between 0 < PHOENIX_TTL <= HConstants.LATEST_TIMESTAMP. - * special values :- - * NONE or 0L => Not Defined. - * FOREVER => HConstants.LATEST_TIMESTAMP - * - * @param value - * @return - */ - @Override - public Object getValue(Object value) { - if (value instanceof String) { - String strValue = (String) value; - if ("FOREVER".equalsIgnoreCase(strValue)) { - return HConstants.FOREVER; - } else if ("NONE".equalsIgnoreCase(strValue)) { - return TTL_NOT_DEFINED; - } - } else if (value != null) { - //Not converting to milli-seconds for better understanding at compaction and masking - //stage. As HBase Descriptor level gives this value in seconds. - return ((Number) value).intValue(); - } - return value; - } + APPEND_ONLY_SCHEMA(PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA, + COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, true, false) { + @Override + public Object getPTableValue(PTable table) { + return table.isAppendOnlySchema(); + } + }, + GUIDE_POSTS_WIDTH(PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH, true, false, false) { + @Override + public Object getValue(Object value) { + return value == null ? null : ((Number) value).longValue(); + } - @Override - public Object getPTableValue(PTable table) { - return table.getTTL(); - } - }, - - CHANGE_DETECTION_ENABLED(PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED, true, true, true) { - /** - * CHANGE_DETECTION_ENABLED is a boolean that can take TRUE or FALSE - */ - @Override - public Object getValue(Object value) { - if (value == null) { - return null; - } else if (value instanceof Boolean) { - return value; - } else { - throw new IllegalArgumentException("CHANGE_DETECTION_ENABLED property can only be" + - " either true or false"); - } - } + @Override + public Object getPTableValue(PTable table) { + return null; + } - @Override - public Object getPTableValue(PTable table) { - return table.isChangeDetectionEnabled(); - } - }, + }, - PHYSICAL_TABLE_NAME(PhoenixDatabaseMetaData.PHYSICAL_TABLE_NAME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { - @Override public Object getPTableValue(PTable table) { - return table.getPhysicalName(true); - } - }, + COLUMN_ENCODED_BYTES(PhoenixDatabaseMetaData.ENCODING_SCHEME, + COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { + @Override + public Object getValue(Object value) { + if (value instanceof String) { + String strValue = (String) value; + if ("NONE".equalsIgnoreCase(strValue)) { + return (byte) 0; + } + } else { + return value == null ? null : ((Number) value).byteValue(); + } + return value; + } - SCHEMA_VERSION(PhoenixDatabaseMetaData.SCHEMA_VERSION, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, true, true) { - @Override - public Object getValue(Object value) { - return value == null ? null : SchemaUtil.normalizeIdentifier(value.toString()); - } + @Override + public Object getPTableValue(PTable table) { + return table.getEncodingScheme(); + } - @Override public Object getPTableValue(PTable table) { - return table.getSchemaVersion(); - } - }, + }, - STREAMING_TOPIC_NAME(PhoenixDatabaseMetaData.STREAMING_TOPIC_NAME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, true, true) { - @Override - public Object getValue(Object value) { - return value == null ? null : value.toString(); - } + // Same as COLUMN_ENCODED_BYTES. If we don't have this one, isPhoenixProperty returns false. + ENCODING_SCHEME(PhoenixDatabaseMetaData.ENCODING_SCHEME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, + true, false, false) { + @Override + public Object getValue(Object value) { + return COLUMN_ENCODED_BYTES.getValue(value); + } - @Override public Object getPTableValue(PTable table) { - return table.getStreamingTopicName(); - } - }, - - MAX_LOOKBACK_AGE(PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE, true, false, false) { - @Override - public Object getValue(Object value) { - if (value == null) { - return null; - } - else if (value instanceof Integer) { - return Long.valueOf((Integer) value); - } - else if (value instanceof Long) { - return value; - } - else { - throw new IllegalArgumentException("Table level MAX_LOOKBACK_AGE should be a " + PLong.INSTANCE.getSqlTypeName() + " value in milli-seconds"); - } - } + @Override + public Object getPTableValue(PTable table) { + return table.getEncodingScheme(); + } - @Override - public Object getPTableValue(PTable table) { - return table.getMaxLookbackAge(); - } - }, + }, + + IMMUTABLE_STORAGE_SCHEME(PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME, + COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { + @Override + public ImmutableStorageScheme getValue(Object value) { + if (value == null) { + return null; + } else if (value instanceof String) { + String strValue = (String) value; + return ImmutableStorageScheme.valueOf(strValue.toUpperCase()); + } else { + throw new IllegalArgumentException( + "Immutable storage scheme table property must be a string"); + } + } - INCLUDE(PhoenixDatabaseMetaData.CDC_INCLUDE_NAME, COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY, - true, false, false) { - @Override - public Object getPTableValue(PTable table) { - return table.getCDCIncludeScopes(); - } - }; + @Override + public Object getPTableValue(PTable table) { + return table.getImmutableStorageScheme(); + } - private final String propertyName; - private final SQLExceptionCode colFamSpecifiedException; - private final boolean isMutable; // whether or not a property can be changed through statements like ALTER TABLE. - private final SQLExceptionCode mutatingImmutablePropException; - private final boolean isValidOnView; - private final boolean isMutableOnView; + }, - private TableProperty(String propertyName, boolean isMutable, boolean isValidOnView, boolean isMutableOnView) { - this(propertyName, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, isMutable, CANNOT_ALTER_PROPERTY, isValidOnView, isMutableOnView); + USE_STATS_FOR_PARALLELIZATION(PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION, true, true, + true) { + @Override + public Object getValue(Object value) { + if (value == null) { + return null; + } else if (value instanceof Boolean) { + return value; + } else { + throw new IllegalArgumentException( + "Use stats for parallelization property can only be either true or false"); + } } - private TableProperty(String propertyName, SQLExceptionCode colFamilySpecifiedException, boolean isMutable, boolean isValidOnView, boolean isMutableOnView) { - this(propertyName, colFamilySpecifiedException, isMutable, CANNOT_ALTER_PROPERTY, isValidOnView, isMutableOnView); + @Override + public Object getPTableValue(PTable table) { + return table.useStatsForParallelization(); + } + }, + + TTL(PhoenixDatabaseMetaData.TTL, COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY, true, true, true) { + /** + * PHOENIX_TTL can take any values ranging between 0 < PHOENIX_TTL <= + * HConstants.LATEST_TIMESTAMP. special values :- NONE or 0L => Not Defined. FOREVER => + * HConstants.LATEST_TIMESTAMP + */ + @Override + public Object getValue(Object value) { + if (value instanceof String) { + String strValue = (String) value; + if ("FOREVER".equalsIgnoreCase(strValue)) { + return HConstants.FOREVER; + } else if ("NONE".equalsIgnoreCase(strValue)) { + return TTL_NOT_DEFINED; + } + } else if (value != null) { + // Not converting to milli-seconds for better understanding at compaction and masking + // stage. As HBase Descriptor level gives this value in seconds. + return ((Number) value).intValue(); + } + return value; } - private TableProperty(String propertyName, boolean isMutable, boolean isValidOnView, boolean isMutableOnView, SQLExceptionCode isMutatingException) { - this(propertyName, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, isMutable, isMutatingException, isValidOnView, isMutableOnView); + @Override + public Object getPTableValue(PTable table) { + return table.getTTL(); } + }, - private TableProperty(String propertyName, SQLExceptionCode colFamSpecifiedException, boolean isMutable, SQLExceptionCode mutatingException, boolean isValidOnView, boolean isMutableOnView) { - this.propertyName = propertyName; - this.colFamSpecifiedException = colFamSpecifiedException; - this.isMutable = isMutable; - this.mutatingImmutablePropException = mutatingException; - this.isValidOnView = isValidOnView; - this.isMutableOnView = isMutableOnView; + CHANGE_DETECTION_ENABLED(PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED, true, true, true) { + /** + * CHANGE_DETECTION_ENABLED is a boolean that can take TRUE or FALSE + */ + @Override + public Object getValue(Object value) { + if (value == null) { + return null; + } else if (value instanceof Boolean) { + return value; + } else { + throw new IllegalArgumentException( + "CHANGE_DETECTION_ENABLED property can only be" + " either true or false"); + } } - public static boolean isPhoenixTableProperty(String property) { - try { - TableProperty.valueOf(property); - } catch (IllegalArgumentException e) { - return false; - } - return true; + @Override + public Object getPTableValue(PTable table) { + return table.isChangeDetectionEnabled(); } + }, - public Object getValue(Object value) { - return value; + PHYSICAL_TABLE_NAME(PhoenixDatabaseMetaData.PHYSICAL_TABLE_NAME, + COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) { + @Override + public Object getPTableValue(PTable table) { + return table.getPhysicalName(true); } + }, - public Object getValue(Map props) { - return getValue(props.get(this.toString())); + SCHEMA_VERSION(PhoenixDatabaseMetaData.SCHEMA_VERSION, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, + true, true, true) { + @Override + public Object getValue(Object value) { + return value == null ? null : SchemaUtil.normalizeIdentifier(value.toString()); } - // isQualified is true if column family name is specified in property name - public void validate(boolean isMutating, boolean isQualified, PTableType tableType) throws SQLException { - checkForColumnFamily(isQualified); - checkIfApplicableForView(tableType); - checkForMutability(isMutating,tableType); + @Override + public Object getPTableValue(PTable table) { + return table.getSchemaVersion(); } + }, - private void checkForColumnFamily(boolean isQualified) throws SQLException { - if (isQualified) { - throw new SQLExceptionInfo.Builder(colFamSpecifiedException).setMessage(". Property: " + propertyName).build().buildException(); - } + STREAMING_TOPIC_NAME(PhoenixDatabaseMetaData.STREAMING_TOPIC_NAME, + COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, true, true) { + @Override + public Object getValue(Object value) { + return value == null ? null : value.toString(); } - private void checkForMutability(boolean isMutating, PTableType tableType) throws SQLException { - if (isMutating && !isMutable) { - throw new SQLExceptionInfo.Builder(mutatingImmutablePropException).setMessage(". Property: " + propertyName).build().buildException(); - } - if (isMutating && tableType == PTableType.VIEW && !isMutableOnView) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TABLE_PROPERTY_ON_VIEW).setMessage(". Property: " + propertyName).build().buildException(); - } + @Override + public Object getPTableValue(PTable table) { + return table.getStreamingTopicName(); } + }, - private void checkIfApplicableForView(PTableType tableType) - throws SQLException { - if (tableType == PTableType.VIEW && !isValidOnView) { - throw new SQLExceptionInfo.Builder( - VIEW_WITH_PROPERTIES).setMessage("Property: " + propertyName).build().buildException(); - } + MAX_LOOKBACK_AGE(PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE, true, false, false) { + @Override + public Object getValue(Object value) { + if (value == null) { + return null; + } else if (value instanceof Integer) { + return Long.valueOf((Integer) value); + } else if (value instanceof Long) { + return value; + } else { + throw new IllegalArgumentException("Table level MAX_LOOKBACK_AGE should be a " + + PLong.INSTANCE.getSqlTypeName() + " value in milli-seconds"); + } } - public String getPropertyName() { - return propertyName; + @Override + public Object getPTableValue(PTable table) { + return table.getMaxLookbackAge(); } + }, - public boolean isValidOnView() { - return isValidOnView; + INCLUDE(PhoenixDatabaseMetaData.CDC_INCLUDE_NAME, COLUMN_FAMILY_NOT_ALLOWED_FOR_PROPERTY, true, + false, false) { + @Override + public Object getPTableValue(PTable table) { + return table.getCDCIncludeScopes(); + } + }; + + private final String propertyName; + private final SQLExceptionCode colFamSpecifiedException; + private final boolean isMutable; // whether or not a property can be changed through statements + // like ALTER TABLE. + private final SQLExceptionCode mutatingImmutablePropException; + private final boolean isValidOnView; + private final boolean isMutableOnView; + + private TableProperty(String propertyName, boolean isMutable, boolean isValidOnView, + boolean isMutableOnView) { + this(propertyName, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, isMutable, CANNOT_ALTER_PROPERTY, + isValidOnView, isMutableOnView); + } + + private TableProperty(String propertyName, SQLExceptionCode colFamilySpecifiedException, + boolean isMutable, boolean isValidOnView, boolean isMutableOnView) { + this(propertyName, colFamilySpecifiedException, isMutable, CANNOT_ALTER_PROPERTY, isValidOnView, + isMutableOnView); + } + + private TableProperty(String propertyName, boolean isMutable, boolean isValidOnView, + boolean isMutableOnView, SQLExceptionCode isMutatingException) { + this(propertyName, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, isMutable, isMutatingException, + isValidOnView, isMutableOnView); + } + + private TableProperty(String propertyName, SQLExceptionCode colFamSpecifiedException, + boolean isMutable, SQLExceptionCode mutatingException, boolean isValidOnView, + boolean isMutableOnView) { + this.propertyName = propertyName; + this.colFamSpecifiedException = colFamSpecifiedException; + this.isMutable = isMutable; + this.mutatingImmutablePropException = mutatingException; + this.isValidOnView = isValidOnView; + this.isMutableOnView = isMutableOnView; + } + + public static boolean isPhoenixTableProperty(String property) { + try { + TableProperty.valueOf(property); + } catch (IllegalArgumentException e) { + return false; + } + return true; + } + + public Object getValue(Object value) { + return value; + } + + public Object getValue(Map props) { + return getValue(props.get(this.toString())); + } + + // isQualified is true if column family name is specified in property name + public void validate(boolean isMutating, boolean isQualified, PTableType tableType) + throws SQLException { + checkForColumnFamily(isQualified); + checkIfApplicableForView(tableType); + checkForMutability(isMutating, tableType); + } + + private void checkForColumnFamily(boolean isQualified) throws SQLException { + if (isQualified) { + throw new SQLExceptionInfo.Builder(colFamSpecifiedException) + .setMessage(". Property: " + propertyName).build().buildException(); } + } - public boolean isMutable() { - return isMutable; + private void checkForMutability(boolean isMutating, PTableType tableType) throws SQLException { + if (isMutating && !isMutable) { + throw new SQLExceptionInfo.Builder(mutatingImmutablePropException) + .setMessage(". Property: " + propertyName).build().buildException(); } + if (isMutating && tableType == PTableType.VIEW && !isMutableOnView) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TABLE_PROPERTY_ON_VIEW) + .setMessage(". Property: " + propertyName).build().buildException(); + } + } - public boolean isMutableOnView() { - return isMutableOnView; + private void checkIfApplicableForView(PTableType tableType) throws SQLException { + if (tableType == PTableType.VIEW && !isValidOnView) { + throw new SQLExceptionInfo.Builder(VIEW_WITH_PROPERTIES) + .setMessage("Property: " + propertyName).build().buildException(); } + } + + public String getPropertyName() { + return propertyName; + } + + public boolean isValidOnView() { + return isValidOnView; + } + + public boolean isMutable() { + return isMutable; + } + + public boolean isMutableOnView() { + return isMutableOnView; + } - abstract public Object getPTableValue(PTable table); + abstract public Object getPTableValue(PTable table); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableRef.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableRef.java index 64f13acf6b9..3a3ecfb74b2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableRef.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TableRef.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,159 +27,163 @@ import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.SchemaUtil; - - public class TableRef { - public static final TableRef EMPTY_TABLE_REF = createEmptyTableRef(); - - private PTable table; - private long upperBoundTimeStamp; - private final String alias; - private final long lowerBoundTimeStamp; - private final boolean hasDynamicCols; - private final long currentTime; - private boolean hinted; - - private static TableRef createEmptyTableRef() { - try { - return new TableRef(new PTableImpl.Builder() - .setIndexes(Collections.emptyList()) - .setPhysicalNames(Collections.emptyList()) - .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) - .build()); - } catch (SQLException e) { - // Should never happen - return null; - } - } - - public TableRef(TableRef tableRef) { - this(tableRef.alias, tableRef.table, tableRef.upperBoundTimeStamp, - tableRef.lowerBoundTimeStamp, tableRef.hasDynamicCols, tableRef.hinted); - } - - public TableRef(TableRef tableRef, long timeStamp) { - this(tableRef.alias, tableRef.table, timeStamp, tableRef.lowerBoundTimeStamp, - tableRef.hasDynamicCols, tableRef.hinted); - } - - public TableRef(TableRef tableRef, String alias) { - this(alias, tableRef.table, tableRef.upperBoundTimeStamp, tableRef.lowerBoundTimeStamp, - tableRef.hasDynamicCols, tableRef.hinted); - } - - public TableRef(PTable table) { - this(null, table, QueryConstants.UNSET_TIMESTAMP, false); - } - - public TableRef(PTable table, long upperBoundTimeStamp, long lowerBoundTimeStamp) { - this(null, table, upperBoundTimeStamp, lowerBoundTimeStamp, false, false); - } - - public TableRef(String alias, PTable table, long upperBoundTimeStamp, boolean hasDynamicCols) { - this(alias, table, upperBoundTimeStamp, 0, hasDynamicCols, false); - } - - public TableRef(String alias, PTable table, long upperBoundTimeStamp, long lowerBoundTimeStamp, - boolean hasDynamicCols) { - this(alias, table, upperBoundTimeStamp, lowerBoundTimeStamp, hasDynamicCols, false); - } - - public TableRef(String alias, PTable table, long upperBoundTimeStamp, long lowerBoundTimeStamp, - boolean hasDynamicCols, boolean hinted) { - this.alias = alias; - this.table = table; - // if UPDATE_CACHE_FREQUENCY is set, always let the server set timestamps - this.upperBoundTimeStamp = table.getUpdateCacheFrequency()!=0 ? QueryConstants.UNSET_TIMESTAMP : upperBoundTimeStamp; - this.currentTime = upperBoundTimeStamp; - this.lowerBoundTimeStamp = lowerBoundTimeStamp; - this.hasDynamicCols = hasDynamicCols; - this.hinted = hinted; - } - - public PTable getTable() { - return table; - } - - public void setTable(PTable value) { - this.table = value; - } - - public void setTimeStamp(long timeStamp) { - this.upperBoundTimeStamp = timeStamp; - } - - public String getTableAlias() { - return alias; - } - - public boolean isHinted() { - return hinted; - } - - public void setHinted(boolean hinted) { - this.hinted = hinted; - } - - public String getColumnDisplayName(ColumnRef ref, boolean cfCaseSensitive, boolean cqCaseSensitive) { - String cf = null; - String cq = null; - PColumn column = ref.getColumn(); - String name = column.getName().getString(); - boolean isIndex = IndexUtil.isIndexColumn(name); - if ((table.getType() == PTableType.PROJECTED && TupleProjectionCompiler.PROJECTED_TABLE_SCHEMA.equals(table.getSchemaName())) - || table.getType() == PTableType.SUBQUERY) { - cq = name; - } - else if (SchemaUtil.isPKColumn(column)) { - cq = isIndex ? IndexUtil.getDataColumnName(name) : name; - } - else { - String defaultFamilyName = table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : table.getDefaultFamilyName().getString(); - // Translate to the data table column name - String dataFamilyName = isIndex ? IndexUtil.getDataColumnFamilyName(name) : column.getFamilyName().getString() ; - cf = (table.getIndexType()==IndexType.LOCAL? IndexUtil.getActualColumnFamilyName(defaultFamilyName):defaultFamilyName).equals(dataFamilyName) ? null : dataFamilyName; - cq = isIndex ? IndexUtil.getDataColumnName(name) : name; - } - - cf = (cf!=null && cfCaseSensitive) ? "\"" + cf + "\"" : cf; - cq = cqCaseSensitive ? "\"" + cq + "\"" : cq; - return SchemaUtil.getColumnDisplayName(cf, cq); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = alias == null ? 0 : alias.hashCode(); - result = prime * result + ( this.table.getName()!=null ? this.table.getName().hashCode() : 0); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - TableRef other = (TableRef)obj; - if (!Objects.equals(alias, other.alias)) return false; - if (!Objects.equals(table.getName(), other.table.getName())) return false; - return true; - } - - public long getTimeStamp() { - return this.upperBoundTimeStamp; - } - - public long getLowerBoundTimeStamp() { - return this.lowerBoundTimeStamp; - } - - public boolean hasDynamicCols() { - return hasDynamicCols; - } - - public long getCurrentTime() { - return this.currentTime; - } + public static final TableRef EMPTY_TABLE_REF = createEmptyTableRef(); + + private PTable table; + private long upperBoundTimeStamp; + private final String alias; + private final long lowerBoundTimeStamp; + private final boolean hasDynamicCols; + private final long currentTime; + private boolean hinted; + + private static TableRef createEmptyTableRef() { + try { + return new TableRef(new PTableImpl.Builder().setIndexes(Collections.emptyList()) + .setPhysicalNames(Collections.emptyList()).setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) + .build()); + } catch (SQLException e) { + // Should never happen + return null; + } + } + + public TableRef(TableRef tableRef) { + this(tableRef.alias, tableRef.table, tableRef.upperBoundTimeStamp, tableRef.lowerBoundTimeStamp, + tableRef.hasDynamicCols, tableRef.hinted); + } + + public TableRef(TableRef tableRef, long timeStamp) { + this(tableRef.alias, tableRef.table, timeStamp, tableRef.lowerBoundTimeStamp, + tableRef.hasDynamicCols, tableRef.hinted); + } + + public TableRef(TableRef tableRef, String alias) { + this(alias, tableRef.table, tableRef.upperBoundTimeStamp, tableRef.lowerBoundTimeStamp, + tableRef.hasDynamicCols, tableRef.hinted); + } + + public TableRef(PTable table) { + this(null, table, QueryConstants.UNSET_TIMESTAMP, false); + } + + public TableRef(PTable table, long upperBoundTimeStamp, long lowerBoundTimeStamp) { + this(null, table, upperBoundTimeStamp, lowerBoundTimeStamp, false, false); + } + + public TableRef(String alias, PTable table, long upperBoundTimeStamp, boolean hasDynamicCols) { + this(alias, table, upperBoundTimeStamp, 0, hasDynamicCols, false); + } + + public TableRef(String alias, PTable table, long upperBoundTimeStamp, long lowerBoundTimeStamp, + boolean hasDynamicCols) { + this(alias, table, upperBoundTimeStamp, lowerBoundTimeStamp, hasDynamicCols, false); + } + + public TableRef(String alias, PTable table, long upperBoundTimeStamp, long lowerBoundTimeStamp, + boolean hasDynamicCols, boolean hinted) { + this.alias = alias; + this.table = table; + // if UPDATE_CACHE_FREQUENCY is set, always let the server set timestamps + this.upperBoundTimeStamp = + table.getUpdateCacheFrequency() != 0 ? QueryConstants.UNSET_TIMESTAMP : upperBoundTimeStamp; + this.currentTime = upperBoundTimeStamp; + this.lowerBoundTimeStamp = lowerBoundTimeStamp; + this.hasDynamicCols = hasDynamicCols; + this.hinted = hinted; + } + + public PTable getTable() { + return table; + } + + public void setTable(PTable value) { + this.table = value; + } + + public void setTimeStamp(long timeStamp) { + this.upperBoundTimeStamp = timeStamp; + } + + public String getTableAlias() { + return alias; + } + + public boolean isHinted() { + return hinted; + } + + public void setHinted(boolean hinted) { + this.hinted = hinted; + } + + public String getColumnDisplayName(ColumnRef ref, boolean cfCaseSensitive, + boolean cqCaseSensitive) { + String cf = null; + String cq = null; + PColumn column = ref.getColumn(); + String name = column.getName().getString(); + boolean isIndex = IndexUtil.isIndexColumn(name); + if ( + (table.getType() == PTableType.PROJECTED + && TupleProjectionCompiler.PROJECTED_TABLE_SCHEMA.equals(table.getSchemaName())) + || table.getType() == PTableType.SUBQUERY + ) { + cq = name; + } else if (SchemaUtil.isPKColumn(column)) { + cq = isIndex ? IndexUtil.getDataColumnName(name) : name; + } else { + String defaultFamilyName = table.getDefaultFamilyName() == null + ? QueryConstants.DEFAULT_COLUMN_FAMILY + : table.getDefaultFamilyName().getString(); + // Translate to the data table column name + String dataFamilyName = + isIndex ? IndexUtil.getDataColumnFamilyName(name) : column.getFamilyName().getString(); + cf = (table.getIndexType() == IndexType.LOCAL + ? IndexUtil.getActualColumnFamilyName(defaultFamilyName) + : defaultFamilyName).equals(dataFamilyName) ? null : dataFamilyName; + cq = isIndex ? IndexUtil.getDataColumnName(name) : name; + } + + cf = (cf != null && cfCaseSensitive) ? "\"" + cf + "\"" : cf; + cq = cqCaseSensitive ? "\"" + cq + "\"" : cq; + return SchemaUtil.getColumnDisplayName(cf, cq); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = alias == null ? 0 : alias.hashCode(); + result = prime * result + (this.table.getName() != null ? this.table.getName().hashCode() : 0); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + TableRef other = (TableRef) obj; + if (!Objects.equals(alias, other.alias)) return false; + if (!Objects.equals(table.getName(), other.table.getName())) return false; + return true; + } + + public long getTimeStamp() { + return this.upperBoundTimeStamp; + } + + public long getLowerBoundTimeStamp() { + return this.lowerBoundTimeStamp; + } + + public boolean hasDynamicCols() { + return hasDynamicCols; + } + + public long getCurrentTime() { + return this.currentTime; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TablesNotInSyncException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TablesNotInSyncException.java index dac5b7f5ff1..727c5006f44 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TablesNotInSyncException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TablesNotInSyncException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,23 +17,25 @@ */ package org.apache.phoenix.schema; +import java.sql.SQLException; + import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; -import java.sql.SQLException; - /** - * Exception to raise when multiple tables differ in specified properties - * This can happen since Apache Phoenix code doesn't work atomically for many parts - * For example, Base table and index tables are inconsistent in namespace mapping - * OR View Index table doesn't exist for multi-tenant base table + * Exception to raise when multiple tables differ in specified properties This can happen since + * Apache Phoenix code doesn't work atomically for many parts For example, Base table and index + * tables are inconsistent in namespace mapping OR View Index table doesn't exist for multi-tenant + * base table */ public class TablesNotInSyncException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.TABLES_NOT_IN_SYNC; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.TABLES_NOT_IN_SYNC; - public TablesNotInSyncException(String table1, String table2, String diff) { - super(new SQLExceptionInfo.Builder(code).setMessage("Table: " + table1 + " and Table: " + table2 + " differ in " + diff).build().toString(), code.getSQLState(), code.getErrorCode()); - } + public TablesNotInSyncException(String table1, String table2, String diff) { + super(new SQLExceptionInfo.Builder(code) + .setMessage("Table: " + table1 + " and Table: " + table2 + " differ in " + diff).build() + .toString(), code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TypeMismatchException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TypeMismatchException.java index 0cd15b0c7d4..f01d0161c43 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TypeMismatchException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/TypeMismatchException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,35 +25,34 @@ /** * Exception thrown when we try to convert one type into a different incompatible type. - * - * * @since 1.0 */ public class TypeMismatchException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.TYPE_MISMATCH; - - public TypeMismatchException(String msg) { - super(new SQLExceptionInfo.Builder(code).setMessage(msg).build().toString(), code.getSQLState(), code.getErrorCode()); - } - - public static TypeMismatchException newException(PDataType lhs) { - return new TypeMismatchException(getMessage(lhs,null,null)); - } - - public static TypeMismatchException newException(PDataType lhs, String location) { - return new TypeMismatchException(getMessage(lhs,null,location)); - } - - public static TypeMismatchException newException(PDataType lhs, PDataType rhs) { - return new TypeMismatchException(getMessage(lhs,rhs,null)); - } - - public static TypeMismatchException newException(PDataType lhs, PDataType rhs, String location) { - return new TypeMismatchException(getMessage(lhs,rhs,location)); - } - - public static String getMessage(PDataType lhs, PDataType rhs, String location) { - return lhs + (rhs == null ? "" : " and " + rhs) + (location == null ? "" : " for " + location); - } + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.TYPE_MISMATCH; + + public TypeMismatchException(String msg) { + super(new SQLExceptionInfo.Builder(code).setMessage(msg).build().toString(), code.getSQLState(), + code.getErrorCode()); + } + + public static TypeMismatchException newException(PDataType lhs) { + return new TypeMismatchException(getMessage(lhs, null, null)); + } + + public static TypeMismatchException newException(PDataType lhs, String location) { + return new TypeMismatchException(getMessage(lhs, null, location)); + } + + public static TypeMismatchException newException(PDataType lhs, PDataType rhs) { + return new TypeMismatchException(getMessage(lhs, rhs, null)); + } + + public static TypeMismatchException newException(PDataType lhs, PDataType rhs, String location) { + return new TypeMismatchException(getMessage(lhs, rhs, location)); + } + + public static String getMessage(PDataType lhs, PDataType rhs, String location) { + return lhs + (rhs == null ? "" : " and " + rhs) + (location == null ? "" : " for " + location); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/UpsertColumnsValuesMismatchException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/UpsertColumnsValuesMismatchException.java index f63fab1b4de..12cb55cedf9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/UpsertColumnsValuesMismatchException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/UpsertColumnsValuesMismatchException.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,20 +22,24 @@ import org.apache.phoenix.exception.SQLExceptionInfo; public class UpsertColumnsValuesMismatchException extends MetaDataEntityNotFoundException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH; - private final long timestamp; - public UpsertColumnsValuesMismatchException(String schemaName, String tableName, String message) { - this(schemaName, tableName, message, HConstants.LATEST_TIMESTAMP); - } - public UpsertColumnsValuesMismatchException(String schemaName, String tableName, String message, long timestamp) { - super(new SQLExceptionInfo.Builder(code).setSchemaName(schemaName). - setTableName(tableName).setMessage(message).build().toString(), code.getSQLState(), - code.getErrorCode(), schemaName, tableName, null); - this.timestamp = timestamp; - } + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH; + private final long timestamp; - public long getTimeStamp() { - return timestamp; - } + public UpsertColumnsValuesMismatchException(String schemaName, String tableName, String message) { + this(schemaName, tableName, message, HConstants.LATEST_TIMESTAMP); + } + + public UpsertColumnsValuesMismatchException(String schemaName, String tableName, String message, + long timestamp) { + super( + new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .setMessage(message).build().toString(), + code.getSQLState(), code.getErrorCode(), schemaName, tableName, null); + this.timestamp = timestamp; + } + + public long getTimeStamp() { + return timestamp; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueBitSet.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueBitSet.java index 7931659f3d6..f77170a76f4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueBitSet.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueBitSet.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,183 +21,178 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.phoenix.util.SizedUtil; - /** - * - * Class to track whether or not a value is null. - * The value is a zero-based position in the schema provided. - * - * + * Class to track whether or not a value is null. The value is a zero-based position in the schema + * provided. * @since 0.1 - * */ public class ValueBitSet { - public final static ValueBitSet EMPTY_VALUE_BITSET = new ValueBitSet(); - private static final int BITS_PER_LONG = 64; - private static final int BITS_PER_SHORT = 16; - private final long[] bits; - private final ValueSchema schema; - - private int maxSetBit = -1; - - public static ValueBitSet newInstance(ValueSchema schema) { - if (schema.getFieldCount() == schema.getMinNullable()) { - return EMPTY_VALUE_BITSET; - } - return new ValueBitSet(schema); - } - - private ValueBitSet() { - schema = null; - bits = new long[0]; - } - - private ValueBitSet(ValueSchema schema) { - this.schema = schema; - bits = new long[Math.max(1,(schema.getFieldCount() - schema.getMinNullable() + BITS_PER_LONG -1) / BITS_PER_LONG)]; - } - - public int getMaxSetBit() { - return maxSetBit; - } - - private boolean isVarLength() { - return schema == null ? false : schema.getFieldCount() - schema.getMinNullable() > BITS_PER_SHORT; - } - - public int getNullCount(int nBit, int nFields) { - if (schema == null) { - return 0; - } - int count = 0; - int index = nBit/BITS_PER_LONG; - // Shift right based on the bit index, because we aren't interested in the bits before this. - int shiftRight = nBit % BITS_PER_LONG; - int bitsToLeft = BITS_PER_LONG - shiftRight; - // Shift left based on the number of fields we're interested in counting. - int shiftLeft = Math.max(0, (BITS_PER_LONG - nFields)); - // Mask off the bits of interest by shifting the bitset. - count += Math.min(nFields, bitsToLeft) - (Long.bitCount((bits[index] >>> shiftRight) << shiftLeft)); - // Subtract from the number of fields the total number of possible fields we looked at - nFields -= bitsToLeft; - if (nFields > 0) { - // If more fields to count, then walk through the successive long bits - while (nFields > BITS_PER_LONG) { - count += BITS_PER_LONG - Long.bitCount(bits[++index]); - nFields -= BITS_PER_LONG; - } - // Count the final remaining fields - if (nFields > 0) { - count += nFields - Long.bitCount(bits[++index] << (BITS_PER_LONG - nFields)); - } - } - return count; - } - - /** - * Serialize the value bit set into a byte array. The byte array - * is expected to have enough room (use {@link #getEstimatedLength()} - * to ensure enough room exists. - * @param b the byte array into which to put the serialized bit set - * @param offset the offset into the byte array - * @return the incremented offset - */ - public int toBytes(byte[] b, int offset) { - if (schema == null) { - return offset; - } - // If the total number of possible values is bigger than 16 bits (the - // size of a short), then serialize the long array followed by the - // array length. - if (isVarLength()) { - short nLongs = (short)((maxSetBit + BITS_PER_LONG) / BITS_PER_LONG); - for (int i = 0; i < nLongs; i++) { - offset = Bytes.putLong(b, offset, bits[i]); - } - offset = Bytes.putShort(b, offset, nLongs); - } else { - // Else if the number of values is less than or equal to 16, - // serialize the bits directly into a short. - offset = Bytes.putShort(b, offset, (short)bits[0]); - } - return offset; - } - - public void clear() { - Arrays.fill(bits, 0); - maxSetBit = -1; - } - - public boolean get(int nBit) { - int lIndex = nBit / BITS_PER_LONG; - int bIndex = nBit % BITS_PER_LONG; - return (bits[lIndex] & (1L << bIndex)) != 0; - } - - public void set(int nBit) { - int lIndex = nBit / BITS_PER_LONG; - int bIndex = nBit % BITS_PER_LONG; - bits[lIndex] |= (1L << bIndex); - maxSetBit = Math.max(maxSetBit, nBit); - } - - public void or(ImmutableBytesWritable ptr) { - or(ptr, isVarLength() ? Bytes.SIZEOF_SHORT + 1 : Bytes.SIZEOF_SHORT); - } - - public void or(ImmutableBytesWritable ptr, int length) { - if (schema == null || length == 0) { - return; - } - if (length > Bytes.SIZEOF_SHORT) { - int offset = ptr.getOffset() + ptr.getLength() - Bytes.SIZEOF_SHORT; - short nLongs = Bytes.toShort(ptr.get(), offset); - offset -= nLongs * Bytes.SIZEOF_LONG; - for (int i = 0; i < nLongs; i++) { - bits[i] |= Bytes.toLong(ptr.get(), offset); - offset += Bytes.SIZEOF_LONG; - } - maxSetBit = Math.max(maxSetBit, nLongs * BITS_PER_LONG - 1); - } else { - long l = Bytes.toShort(ptr.get(), ptr.getOffset() + ptr.getLength() - Bytes.SIZEOF_SHORT); - bits[0] |= l; - maxSetBit = Math.max(maxSetBit, (bits[0] == 0 ? 0 : BITS_PER_SHORT) - 1); - } - - } - - /** - * @return Max serialization size - */ - public int getEstimatedLength() { - if (schema == null) { - return 0; - } - return Bytes.SIZEOF_SHORT + (isVarLength() ? (maxSetBit + BITS_PER_LONG) / BITS_PER_LONG * Bytes.SIZEOF_LONG : 0); - } - - public static int getSize(int nBits) { - return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.ARRAY_SIZE + SizedUtil.INT_SIZE + (nBits + BITS_PER_LONG - 1) / BITS_PER_LONG * Bytes.SIZEOF_LONG; + public final static ValueBitSet EMPTY_VALUE_BITSET = new ValueBitSet(); + private static final int BITS_PER_LONG = 64; + private static final int BITS_PER_SHORT = 16; + private final long[] bits; + private final ValueSchema schema; + + private int maxSetBit = -1; + + public static ValueBitSet newInstance(ValueSchema schema) { + if (schema.getFieldCount() == schema.getMinNullable()) { + return EMPTY_VALUE_BITSET; } - - /** - * @return Size of object in memory - */ - public int getSize() { - if (schema == null) { - return 0; - } - return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.ARRAY_SIZE + SizedUtil.LONG_SIZE * bits.length + SizedUtil.INT_SIZE; + return new ValueBitSet(schema); + } + + private ValueBitSet() { + schema = null; + bits = new long[0]; + } + + private ValueBitSet(ValueSchema schema) { + this.schema = schema; + bits = new long[Math.max(1, + (schema.getFieldCount() - schema.getMinNullable() + BITS_PER_LONG - 1) / BITS_PER_LONG)]; + } + + public int getMaxSetBit() { + return maxSetBit; + } + + private boolean isVarLength() { + return schema == null + ? false + : schema.getFieldCount() - schema.getMinNullable() > BITS_PER_SHORT; + } + + public int getNullCount(int nBit, int nFields) { + if (schema == null) { + return 0; + } + int count = 0; + int index = nBit / BITS_PER_LONG; + // Shift right based on the bit index, because we aren't interested in the bits before this. + int shiftRight = nBit % BITS_PER_LONG; + int bitsToLeft = BITS_PER_LONG - shiftRight; + // Shift left based on the number of fields we're interested in counting. + int shiftLeft = Math.max(0, (BITS_PER_LONG - nFields)); + // Mask off the bits of interest by shifting the bitset. + count += + Math.min(nFields, bitsToLeft) - (Long.bitCount((bits[index] >>> shiftRight) << shiftLeft)); + // Subtract from the number of fields the total number of possible fields we looked at + nFields -= bitsToLeft; + if (nFields > 0) { + // If more fields to count, then walk through the successive long bits + while (nFields > BITS_PER_LONG) { + count += BITS_PER_LONG - Long.bitCount(bits[++index]); + nFields -= BITS_PER_LONG; + } + // Count the final remaining fields + if (nFields > 0) { + count += nFields - Long.bitCount(bits[++index] << (BITS_PER_LONG - nFields)); + } + } + return count; + } + + /** + * Serialize the value bit set into a byte array. The byte array is expected to have enough room + * (use {@link #getEstimatedLength()} to ensure enough room exists. + * @param b the byte array into which to put the serialized bit set + * @param offset the offset into the byte array + * @return the incremented offset + */ + public int toBytes(byte[] b, int offset) { + if (schema == null) { + return offset; + } + // If the total number of possible values is bigger than 16 bits (the + // size of a short), then serialize the long array followed by the + // array length. + if (isVarLength()) { + short nLongs = (short) ((maxSetBit + BITS_PER_LONG) / BITS_PER_LONG); + for (int i = 0; i < nLongs; i++) { + offset = Bytes.putLong(b, offset, bits[i]); + } + offset = Bytes.putShort(b, offset, nLongs); + } else { + // Else if the number of values is less than or equal to 16, + // serialize the bits directly into a short. + offset = Bytes.putShort(b, offset, (short) bits[0]); + } + return offset; + } + + public void clear() { + Arrays.fill(bits, 0); + maxSetBit = -1; + } + + public boolean get(int nBit) { + int lIndex = nBit / BITS_PER_LONG; + int bIndex = nBit % BITS_PER_LONG; + return (bits[lIndex] & (1L << bIndex)) != 0; + } + + public void set(int nBit) { + int lIndex = nBit / BITS_PER_LONG; + int bIndex = nBit % BITS_PER_LONG; + bits[lIndex] |= (1L << bIndex); + maxSetBit = Math.max(maxSetBit, nBit); + } + + public void or(ImmutableBytesWritable ptr) { + or(ptr, isVarLength() ? Bytes.SIZEOF_SHORT + 1 : Bytes.SIZEOF_SHORT); + } + + public void or(ImmutableBytesWritable ptr, int length) { + if (schema == null || length == 0) { + return; + } + if (length > Bytes.SIZEOF_SHORT) { + int offset = ptr.getOffset() + ptr.getLength() - Bytes.SIZEOF_SHORT; + short nLongs = Bytes.toShort(ptr.get(), offset); + offset -= nLongs * Bytes.SIZEOF_LONG; + for (int i = 0; i < nLongs; i++) { + bits[i] |= Bytes.toLong(ptr.get(), offset); + offset += Bytes.SIZEOF_LONG; + } + maxSetBit = Math.max(maxSetBit, nLongs * BITS_PER_LONG - 1); + } else { + long l = Bytes.toShort(ptr.get(), ptr.getOffset() + ptr.getLength() - Bytes.SIZEOF_SHORT); + bits[0] |= l; + maxSetBit = Math.max(maxSetBit, (bits[0] == 0 ? 0 : BITS_PER_SHORT) - 1); } - public void or(ValueBitSet isSet) { - for (int i = 0; i < bits.length; i++) { - bits[i] |= isSet.bits[i]; - } - maxSetBit = Math.max(maxSetBit, isSet.maxSetBit); + } + + /** Returns Max serialization size */ + public int getEstimatedLength() { + if (schema == null) { + return 0; + } + return Bytes.SIZEOF_SHORT + + (isVarLength() ? (maxSetBit + BITS_PER_LONG) / BITS_PER_LONG * Bytes.SIZEOF_LONG : 0); + } + + public static int getSize(int nBits) { + return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.ARRAY_SIZE + + SizedUtil.INT_SIZE + (nBits + BITS_PER_LONG - 1) / BITS_PER_LONG * Bytes.SIZEOF_LONG; + } + + /** Returns Size of object in memory */ + public int getSize() { + if (schema == null) { + return 0; + } + return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.ARRAY_SIZE + + SizedUtil.LONG_SIZE * bits.length + SizedUtil.INT_SIZE; + } + + public void or(ValueBitSet isSet) { + for (int i = 0; i < bits.length; i++) { + bits[i] |= isSet.bits[i]; } + maxSetBit = Math.max(maxSetBit, isSet.maxSetBit); + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueRangeExcpetion.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueRangeExcpetion.java index 49814c0eb7a..0009839c6e9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueRangeExcpetion.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueRangeExcpetion.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,16 +23,18 @@ import org.apache.phoenix.exception.SQLExceptionInfo; /** - * Exception thrown when we try to use use an argument that has the wrong type. - * - * + * Exception thrown when we try to use use an argument that has the wrong type. * @since 1.1.2 */ -public class ValueRangeExcpetion extends SQLException{ - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.VALUE_OUTSIDE_RANGE; - - public ValueRangeExcpetion(Object minValue, Object maxValue, Object actualValue, String location){ - super(new SQLExceptionInfo.Builder(code).setMessage("expected: [" + minValue + " , " + maxValue + "] but was: " + actualValue + " at " + location).build().toString(), code.getSQLState(), code.getErrorCode()); - } +public class ValueRangeExcpetion extends SQLException { + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.VALUE_OUTSIDE_RANGE; + + public ValueRangeExcpetion(Object minValue, Object maxValue, Object actualValue, + String location) { + super( + new SQLExceptionInfo.Builder(code).setMessage("expected: [" + minValue + " , " + maxValue + + "] but was: " + actualValue + " at " + location).build().toString(), + code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueSchema.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueSchema.java index cafe11e7278..5ed58837160 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueSchema.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/ValueSchema.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,364 +27,354 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.util.SizedUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.SizedUtil; /** - * - * Simple flat schema over a byte array where fields may be any of {@link org.apache.phoenix.schema.types.PDataType}. - * Optimized for positional access by index. - * - * + * Simple flat schema over a byte array where fields may be any of + * {@link org.apache.phoenix.schema.types.PDataType}. Optimized for positional access by index. * @since 0.1 */ public abstract class ValueSchema implements Writable { - public static final int ESTIMATED_VARIABLE_LENGTH_SIZE = 10; - private int[] fieldIndexByPosition; - private List fields; - private int estimatedLength; - private boolean isFixedLength; - private boolean isMaxLength; - private int minNullable; - // Only applicable for RowKeySchema (and only due to PHOENIX-2067), but - // added here as this is where serialization is done (and we need to - // maintain the same serialization shape for b/w compat). - protected boolean rowKeyOrderOptimizable; - - public ValueSchema() { + public static final int ESTIMATED_VARIABLE_LENGTH_SIZE = 10; + private int[] fieldIndexByPosition; + private List fields; + private int estimatedLength; + private boolean isFixedLength; + private boolean isMaxLength; + private int minNullable; + // Only applicable for RowKeySchema (and only due to PHOENIX-2067), but + // added here as this is where serialization is done (and we need to + // maintain the same serialization shape for b/w compat). + protected boolean rowKeyOrderOptimizable; + + public ValueSchema() { + } + + protected ValueSchema(int minNullable, List fields) { + this(minNullable, fields, true); + } + + protected ValueSchema(int minNullable, List fields, boolean rowKeyOrderOptimizable) { + init(minNullable, fields, rowKeyOrderOptimizable); + } + + @Override + public String toString() { + return fields.toString(); + } + + public int getEstimatedSize() { // Memory size of ValueSchema + int count = fieldIndexByPosition.length; + return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.INT_SIZE * (4 + count) + + SizedUtil.ARRAY_SIZE + count * Field.ESTIMATED_SIZE + SizedUtil.sizeOfArrayList(count); + } + + private void init(int minNullable, List fields, boolean rowKeyOrderOptimizable) { + this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; + this.minNullable = minNullable; + this.fields = ImmutableList.copyOf(fields); + int estimatedLength = 0; + boolean isMaxLength = true, isFixedLength = true; + int positions = 0; + for (Field field : fields) { + int fieldEstLength = 0; + PDataType type = field.getDataType(); + if (type != null) { + Integer byteSize = type.getByteSize(); + if (type.isFixedWidth()) { + fieldEstLength += field.getByteSize(); + } else { + isFixedLength = false; + // Account for vint for length if not fixed + if (byteSize == null) { + isMaxLength = false; + fieldEstLength += ESTIMATED_VARIABLE_LENGTH_SIZE; + } else { + fieldEstLength += WritableUtils.getVIntSize(byteSize); + fieldEstLength = byteSize; + } + } + } + positions += field.getCount(); + estimatedLength += fieldEstLength * field.getCount(); } - - protected ValueSchema(int minNullable, List fields) { - this(minNullable, fields, true); + fieldIndexByPosition = new int[positions]; + for (int i = 0, j = 0; i < fields.size(); i++) { + Field field = fields.get(i); + Arrays.fill(fieldIndexByPosition, j, j + field.getCount(), i); + j += field.getCount(); } - - protected ValueSchema(int minNullable, List fields, boolean rowKeyOrderOptimizable) { - init(minNullable, fields, rowKeyOrderOptimizable); + this.isFixedLength = isFixedLength; + this.isMaxLength = isMaxLength; + this.estimatedLength = estimatedLength; + } + + public int getFieldCount() { + return fieldIndexByPosition.length; + } + + public List getFields() { + return fields; + } + + /** Returns true if all types are fixed width */ + public boolean isFixedLength() { + return isFixedLength; + } + + /** + * @return true if {@link #getEstimatedValueLength()} returns the maximum length of a serialized + * value for this schema + */ + public boolean isMaxLength() { + return isMaxLength; + } + + /** Returns estimated size in bytes of a serialized value for this schema */ + public int getEstimatedValueLength() { + return estimatedLength; + } + + /** + * Non-nullable fields packed to the left so that we do not need to store trailing nulls. Knowing + * the minimum position of a nullable field enables this. + * @return the minimum position of a nullable field + */ + public int getMinNullable() { + return minNullable; + } + + public static final class Field implements Writable, PDatum { + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + byteSize; + result = prime * result + (type == null ? 0 : type.hashCode()); + result = prime * result + sortOrder.hashCode(); + result = prime * result + (isNullable ? 1231 : 1237); + return result; } - + @Override - public String toString() { - return fields.toString(); + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + Field other = (Field) obj; + if (byteSize != other.byteSize) return false; + if (sortOrder != other.sortOrder) return false; + if (isNullable != other.isNullable) return false; + if (type != other.type) return false; + return true; } - - public int getEstimatedSize() { // Memory size of ValueSchema - int count = fieldIndexByPosition.length; - return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.INT_SIZE * (4 + count) + - SizedUtil.ARRAY_SIZE + count * Field.ESTIMATED_SIZE + SizedUtil.sizeOfArrayList(count); + + public static final int ESTIMATED_SIZE = + SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE * 2 + SizedUtil.INT_SIZE * 3; + + private int count; + private PDataType type; + private int byteSize = 0; + private boolean isNullable; + private SortOrder sortOrder; + + public Field() { } - private void init(int minNullable, List fields, boolean rowKeyOrderOptimizable) { - this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; - this.minNullable = minNullable; - this.fields = ImmutableList.copyOf(fields); - int estimatedLength = 0; - boolean isMaxLength = true, isFixedLength = true; - int positions = 0; - for (Field field : fields) { - int fieldEstLength = 0; - PDataType type = field.getDataType(); - if (type != null) { - Integer byteSize = type.getByteSize(); - if (type.isFixedWidth()) { - fieldEstLength += field.getByteSize(); - } else { - isFixedLength = false; - // Account for vint for length if not fixed - if (byteSize == null) { - isMaxLength = false; - fieldEstLength += ESTIMATED_VARIABLE_LENGTH_SIZE; - } else { - fieldEstLength += WritableUtils.getVIntSize(byteSize); - fieldEstLength = byteSize; - } - } - } - positions += field.getCount(); - estimatedLength += fieldEstLength * field.getCount(); - } - fieldIndexByPosition = new int[positions]; - for (int i = 0, j= 0; i < fields.size(); i++) { - Field field = fields.get(i); - Arrays.fill(fieldIndexByPosition, j, j + field.getCount(), i); - j += field.getCount(); + private Field(PDatum datum, boolean isNullable, int count, SortOrder sortOrder) { + Preconditions.checkNotNull(sortOrder); + this.type = datum.getDataType(); + this.sortOrder = sortOrder; + this.count = count; + this.isNullable = isNullable; + if (this.type != null && this.type.isFixedWidth() && this.type.getByteSize() == null) { + if (datum.getMaxLength() != null) { + this.byteSize = datum.getMaxLength(); } - this.isFixedLength = isFixedLength; - this.isMaxLength = isMaxLength; - this.estimatedLength = estimatedLength; + } } - - public int getFieldCount() { - return fieldIndexByPosition.length; + + @Override + public String toString() { + return (count == 1 ? "" : count + " * ") + type + (byteSize == 0 ? "" : "(" + byteSize + ")") + + (isNullable ? "" : " NOT NULL") + (sortOrder == SortOrder.ASC ? "" : " " + sortOrder); } - - public List getFields() { - return fields; + + private Field(Field field, int count) { + this.type = field.getDataType(); + this.byteSize = field.byteSize; + this.count = count; + this.sortOrder = field.getSortOrder(); } - - /** - * @return true if all types are fixed width - */ - public boolean isFixedLength() { - return isFixedLength; + + @Override + public final SortOrder getSortOrder() { + return sortOrder; } - - /** - * @return true if {@link #getEstimatedValueLength()} returns the maximum length - * of a serialized value for this schema - */ - public boolean isMaxLength() { - return isMaxLength; + + @Override + public final PDataType getDataType() { + return type; } - - /** - * @return estimated size in bytes of a serialized value for this schema - */ - public int getEstimatedValueLength() { - return estimatedLength; + + @Override + public final boolean isNullable() { + return isNullable; } - - /** - * Non-nullable fields packed to the left so that we do not need to store trailing nulls. - * Knowing the minimum position of a nullable field enables this. - * @return the minimum position of a nullable field - */ - public int getMinNullable() { - return minNullable; + + public final int getByteSize() { + return type.getByteSize() == null ? byteSize : type.getByteSize(); } - - public static final class Field implements Writable, PDatum { - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + byteSize; - result = prime * result + (type == null ? 0 : type.hashCode()); - result = prime * result + sortOrder.hashCode(); - result = prime * result + (isNullable ? 1231 : 1237); - return result; - } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - Field other = (Field)obj; - if (byteSize != other.byteSize) return false; - if (sortOrder != other.sortOrder) return false; - if (isNullable != other.isNullable) return false; - if (type != other.type) return false; - return true; - } - - public static final int ESTIMATED_SIZE = SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE * 2 + SizedUtil.INT_SIZE * 3; - - private int count; - private PDataType type; - private int byteSize = 0; - private boolean isNullable; - private SortOrder sortOrder; - - public Field() { - } - - private Field(PDatum datum, boolean isNullable, int count, SortOrder sortOrder) { - Preconditions.checkNotNull(sortOrder); - this.type = datum.getDataType(); - this.sortOrder = sortOrder; - this.count = count; - this.isNullable = isNullable; - if (this.type != null && this.type.isFixedWidth() && this.type.getByteSize() == null) { - if (datum.getMaxLength() != null) { - this.byteSize = datum.getMaxLength(); - } - } - } - - @Override - public String toString() { - return (count == 1 ? "" : count + " * ") - + type - + (byteSize == 0 ? "" : "(" + byteSize + ")") - + (isNullable ? "" : " NOT NULL") - + (sortOrder == SortOrder.ASC ? "" : " " + sortOrder); - } - - private Field(Field field, int count) { - this.type = field.getDataType(); - this.byteSize = field.byteSize; - this.count = count; - this.sortOrder = field.getSortOrder(); - } - - @Override - public final SortOrder getSortOrder() { - return sortOrder; - } - - @Override - public final PDataType getDataType() { - return type; - } - - @Override - public final boolean isNullable() { - return isNullable; - } - - public final int getByteSize() { - return type.getByteSize() == null ? byteSize : type.getByteSize(); - } - - public final int getCount() { - return count; - } + public final int getCount() { + return count; + } - @Override - public Integer getMaxLength() { - return type.isFixedWidth() ? byteSize : null; - } + @Override + public Integer getMaxLength() { + return type.isFixedWidth() ? byteSize : null; + } - @Override - public Integer getScale() { - return null; - } + @Override + public Integer getScale() { + return null; + } - @Override - public void readFields(DataInput input) throws IOException { - // Encode isNullable in sign bit of type ordinal (offset by 1, since ordinal could be 0) - int typeOrdinal = WritableUtils.readVInt(input); - if (typeOrdinal < 0) { - typeOrdinal *= -1; - this.isNullable = true; - } - this.type = PDataType.values()[typeOrdinal-1]; - this.count = WritableUtils.readVInt(input); - if (this.count < 0) { - this.count *= -1; - this.sortOrder = SortOrder.DESC; - } else { - this.sortOrder = SortOrder.ASC; - } - if (this.type.isFixedWidth() && this.type.getByteSize() == null) { - this.byteSize = WritableUtils.readVInt(input); - } - } + @Override + public void readFields(DataInput input) throws IOException { + // Encode isNullable in sign bit of type ordinal (offset by 1, since ordinal could be 0) + int typeOrdinal = WritableUtils.readVInt(input); + if (typeOrdinal < 0) { + typeOrdinal *= -1; + this.isNullable = true; + } + this.type = PDataType.values()[typeOrdinal - 1]; + this.count = WritableUtils.readVInt(input); + if (this.count < 0) { + this.count *= -1; + this.sortOrder = SortOrder.DESC; + } else { + this.sortOrder = SortOrder.ASC; + } + if (this.type.isFixedWidth() && this.type.getByteSize() == null) { + this.byteSize = WritableUtils.readVInt(input); + } + } - @Override - public void write(DataOutput output) throws IOException { - WritableUtils.writeVInt(output, (type.ordinal() + 1) * (this.isNullable ? -1 : 1)); - WritableUtils.writeVInt(output, count * (sortOrder == SortOrder.ASC ? 1 : -1)); - if (type.isFixedWidth() && type.getByteSize() == null) { - WritableUtils.writeVInt(output, byteSize); - } - } + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeVInt(output, (type.ordinal() + 1) * (this.isNullable ? -1 : 1)); + WritableUtils.writeVInt(output, count * (sortOrder == SortOrder.ASC ? 1 : -1)); + if (type.isFixedWidth() && type.getByteSize() == null) { + WritableUtils.writeVInt(output, byteSize); + } } - - public abstract static class ValueSchemaBuilder { - protected List fields = new ArrayList(); - protected int nFields = Integer.MAX_VALUE; - protected final int minNullable; - - public ValueSchemaBuilder(int minNullable) { - this.minNullable = minNullable; - } - - protected List buildFields() { - List condensedFields = new ArrayList(fields.size()); - for (int i = 0; i < Math.min(nFields,fields.size()); ) { - Field field = fields.get(i); - int count = 1; - while ( ++i < fields.size() && field.equals(fields.get(i))) { - count++; - } - condensedFields.add(count == 1 ? field : new Field(field,count)); - } - return condensedFields; - } + } - abstract public ValueSchema build(); + public abstract static class ValueSchemaBuilder { + protected List fields = new ArrayList(); + protected int nFields = Integer.MAX_VALUE; + protected final int minNullable; - public ValueSchemaBuilder setMaxFields(int nFields) { - this.nFields = nFields; - return this; - } - - protected ValueSchemaBuilder addField(PDatum datum, boolean isNullable, SortOrder sortOrder) { - if(fields.size() >= nFields) { - throw new IllegalArgumentException("Adding too many fields to Schema (max " + nFields + ")"); - } - fields.add(new Field(datum, isNullable, 1, sortOrder)); - return this; + public ValueSchemaBuilder(int minNullable) { + this.minNullable = minNullable; + } + + protected List buildFields() { + List condensedFields = new ArrayList(fields.size()); + for (int i = 0; i < Math.min(nFields, fields.size());) { + Field field = fields.get(i); + int count = 1; + while (++i < fields.size() && field.equals(fields.get(i))) { + count++; } + condensedFields.add(count == 1 ? field : new Field(field, count)); + } + return condensedFields; } - - public int getEstimatedByteSize() { - int size = 0; - size += WritableUtils.getVIntSize(minNullable); - size += WritableUtils.getVIntSize(fields.size()); - size += fields.size() * 3; - return size; + + abstract public ValueSchema build(); + + public ValueSchemaBuilder setMaxFields(int nFields) { + this.nFields = nFields; + return this; } - - public Field getField(int position) { - return fields.get(fieldIndexByPosition[position]); + + protected ValueSchemaBuilder addField(PDatum datum, boolean isNullable, SortOrder sortOrder) { + if (fields.size() >= nFields) { + throw new IllegalArgumentException( + "Adding too many fields to Schema (max " + nFields + ")"); + } + fields.add(new Field(datum, isNullable, 1, sortOrder)); + return this; } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + minNullable; - for (Field field : fields) { - result = prime * result + field.hashCode(); - } - return result; + } + + public int getEstimatedByteSize() { + int size = 0; + size += WritableUtils.getVIntSize(minNullable); + size += WritableUtils.getVIntSize(fields.size()); + size += fields.size() * 3; + return size; + } + + public Field getField(int position) { + return fields.get(fieldIndexByPosition[position]); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + minNullable; + for (Field field : fields) { + result = prime * result + field.hashCode(); } + return result; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ValueSchema other = (ValueSchema)obj; - if (minNullable != other.minNullable) return false; - if (fields.size() != other.fields.size()) return false; - for (int i = 0; i < fields.size(); i++) { - if (!fields.get(i).equals(other.fields.get(i))) - return false; - } - return true; + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ValueSchema other = (ValueSchema) obj; + if (minNullable != other.minNullable) return false; + if (fields.size() != other.fields.size()) return false; + for (int i = 0; i < fields.size(); i++) { + if (!fields.get(i).equals(other.fields.get(i))) return false; } - - @Override - public void readFields(DataInput in) throws IOException { - int minNullable = WritableUtils.readVInt(in); - int nFields = WritableUtils.readVInt(in); - boolean rowKeyOrderOptimizable = false; - if (nFields < 0) { - rowKeyOrderOptimizable = true; - nFields *= -1; - } - List fields = Lists.newArrayListWithExpectedSize(nFields); - for (int i = 0; i < nFields; i++) { - Field field = new Field(); - field.readFields(in); - fields.add(field); - } - init(minNullable, fields, rowKeyOrderOptimizable); + return true; + } + + @Override + public void readFields(DataInput in) throws IOException { + int minNullable = WritableUtils.readVInt(in); + int nFields = WritableUtils.readVInt(in); + boolean rowKeyOrderOptimizable = false; + if (nFields < 0) { + rowKeyOrderOptimizable = true; + nFields *= -1; } - - @Override - public void write(DataOutput out) throws IOException { - WritableUtils.writeVInt(out, minNullable); - WritableUtils.writeVInt(out, fields.size() * (rowKeyOrderOptimizable ? -1 : 1)); - for (int i = 0; i < fields.size(); i++) { - fields.get(i).write(out); - } + List fields = Lists.newArrayListWithExpectedSize(nFields); + for (int i = 0; i < nFields; i++) { + Field field = new Field(); + field.readFields(in); + fields.add(field); + } + init(minNullable, fields, rowKeyOrderOptimizable); + } + + @Override + public void write(DataOutput out) throws IOException { + WritableUtils.writeVInt(out, minNullable); + WritableUtils.writeVInt(out, fields.size() * (rowKeyOrderOptimizable ? -1 : 1)); + for (int i = 0; i < fields.size(); i++) { + fields.get(i).write(out); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/DefaultSchemaRegistryRepository.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/DefaultSchemaRegistryRepository.java index 34785aa71ce..382dafe3bcf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/DefaultSchemaRegistryRepository.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/DefaultSchemaRegistryRepository.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,75 +17,74 @@ */ package org.apache.phoenix.schema.export; -import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.util.EnvironmentEdgeManager; - import java.io.IOException; import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.util.EnvironmentEdgeManager; + /** * Default in-memory implementation of SchemaRegistryRepository. Not intended for production use */ public class DefaultSchemaRegistryRepository implements SchemaRegistryRepository { - public static final String DEFAULT_SCHEMA_NAME = "default_schema"; - public static final String DEFAULT_TENANT_ID = "global"; + public static final String DEFAULT_SCHEMA_NAME = "default_schema"; + public static final String DEFAULT_TENANT_ID = "global"; - private static final String SEPARATOR = "*"; + private static final String SEPARATOR = "*"; - Map schemaMap = new HashMap(); + Map schemaMap = new HashMap(); - @Override - public void init(Configuration conf) throws IOException { + @Override + public void init(Configuration conf) throws IOException { - } + } - @Override - public String exportSchema(SchemaWriter writer, PTable table) throws IOException { - String schemaId = getSchemaId(table); - schemaMap.put(schemaId, writer.exportSchema(table)); - return schemaId; - } + @Override + public String exportSchema(SchemaWriter writer, PTable table) throws IOException { + String schemaId = getSchemaId(table); + schemaMap.put(schemaId, writer.exportSchema(table)); + return schemaId; + } - @Override - public String getSchemaById(String schemaId) throws IOException { - return schemaMap.get(schemaId); - } + @Override + public String getSchemaById(String schemaId) throws IOException { + return schemaMap.get(schemaId); + } - @Override - public String getSchemaByTable(PTable table) throws IOException { - return schemaMap.get(getSchemaId(table)); - } + @Override + public String getSchemaByTable(PTable table) throws IOException { + return schemaMap.get(getSchemaId(table)); + } - @Override - public void close() throws IOException { - schemaMap.clear(); - } + @Override + public void close() throws IOException { + schemaMap.clear(); + } - public static String getSchemaId(PTable table) { - String schemaMetadataName = getSchemaMetadataName(table); - String version = table.getSchemaVersion() != null ? table.getSchemaVersion() : - table.getLastDDLTimestamp() != null ? table.getLastDDLTimestamp().toString() : - Long.toString(EnvironmentEdgeManager.currentTimeMillis()); + public static String getSchemaId(PTable table) { + String schemaMetadataName = getSchemaMetadataName(table); + String version = table.getSchemaVersion() != null ? table.getSchemaVersion() + : table.getLastDDLTimestamp() != null ? table.getLastDDLTimestamp().toString() + : Long.toString(EnvironmentEdgeManager.currentTimeMillis()); - //tenant*schema*table*version-id - return String.format("%s" + SEPARATOR + "%s", schemaMetadataName, - version); - } + // tenant*schema*table*version-id + return String.format("%s" + SEPARATOR + "%s", schemaMetadataName, version); + } - private static String getSchemaMetadataName(PTable table) { - String schemaGroup = getSchemaGroup(table); - return schemaGroup + SEPARATOR + table.getTableName().getString(); - } + private static String getSchemaMetadataName(PTable table) { + String schemaGroup = getSchemaGroup(table); + return schemaGroup + SEPARATOR + table.getTableName().getString(); + } - private static String getSchemaGroup(PTable table) { - String tenantId = (table.getTenantId() != null) ? table.getTenantId().getString() : - DEFAULT_TENANT_ID; - String schemaName = table.getSchemaName().getString(); - if (schemaName == null) { - schemaName = DEFAULT_SCHEMA_NAME; - } - return tenantId + SEPARATOR + schemaName; + private static String getSchemaGroup(PTable table) { + String tenantId = + (table.getTenantId() != null) ? table.getTenantId().getString() : DEFAULT_TENANT_ID; + String schemaName = table.getSchemaName().getString(); + if (schemaName == null) { + schemaName = DEFAULT_SCHEMA_NAME; } + return tenantId + SEPARATOR + schemaName; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/DefaultSchemaWriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/DefaultSchemaWriter.java index f9930cd4d94..ab1df619fa2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/DefaultSchemaWriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/DefaultSchemaWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,28 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.export; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableImpl; -import java.io.IOException; - public class DefaultSchemaWriter implements SchemaWriter { - @Override - public void init(Configuration conf) throws IOException { + @Override + public void init(Configuration conf) throws IOException { - } + } - @Override - public String exportSchema(PTable table) throws IOException { - return PTableImpl.toProto(table).toString(); - } + @Override + public String exportSchema(PTable table) throws IOException { + return PTableImpl.toProto(table).toString(); + } - @Override - public void close() throws IOException { + @Override + public void close() throws IOException { - } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaImporter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaImporter.java index 89c0fdd6ea7..3ffa3b3d53f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaImporter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaImporter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,25 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.export; -import org.apache.phoenix.schema.PTable; - import java.io.IOException; +import org.apache.phoenix.schema.PTable; + /** * Interface for importing schemas stored externally from Phoenix into Phoenix by converting the * schema into a PTable */ public interface SchemaImporter { - /** - * - * @param schema String form of an external schema. The expected format of the schema depends - * on the implementation of the class. - * @return a Phoenix PTable - */ - PTable getTableFromSchema(String schema) throws IOException; + /** + * @param schema String form of an external schema. The expected format of the schema depends on + * the implementation of the class. + * @return a Phoenix PTable + */ + PTable getTableFromSchema(String schema) throws IOException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaRegistryRepository.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaRegistryRepository.java index 30f4a8d2e19..ae14d859d4b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaRegistryRepository.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaRegistryRepository.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,54 +17,52 @@ */ package org.apache.phoenix.schema.export; -import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.schema.PTable; - import java.io.Closeable; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.schema.PTable; + /** * Interface for exporting a Phoenix object (e.g a table or view) to an external schema repository - * The choice of which schema repository, and the transport mechanism, are deferred to - * implementing classes + * The choice of which schema repository, and the transport mechanism, are deferred to implementing + * classes */ public interface SchemaRegistryRepository extends Closeable { - String SCHEMA_WRITER_IMPL_KEY = - "org.apache.phoenix.export.schemawriter.impl"; - String SCHEMA_REGISTRY_IMPL_KEY = - "org.apache.phoenix.export.schemaregistry.impl"; + String SCHEMA_WRITER_IMPL_KEY = "org.apache.phoenix.export.schemawriter.impl"; + String SCHEMA_REGISTRY_IMPL_KEY = "org.apache.phoenix.export.schemaregistry.impl"; - /** - * Optional method for any necessary bootstrapping to connect to the external schema registry - * @param conf Configuration object with necessary parameters to talk to the external schema - * registry - * @throws IOException Exception if something goes wrong in connecting to the registry - */ - void init(Configuration conf) throws IOException; + /** + * Optional method for any necessary bootstrapping to connect to the external schema registry + * @param conf Configuration object with necessary parameters to talk to the external schema + * registry + * @throws IOException Exception if something goes wrong in connecting to the registry + */ + void init(Configuration conf) throws IOException; - /** - * Export a Phoenix PTable into an external schema registry by reformatting it into a suitable - * form. - * @param writer An object which can translate a PTable into a String suitable for the external - * schema registry - * @param table a Phoenix PTable for a table or view - * @return Schema id generated by the schema registry, represented as a string. - * @throws IOException Exception if something goes wrong in constructing or sending the schema - */ - String exportSchema(SchemaWriter writer, PTable table) throws IOException; + /** + * Export a Phoenix PTable into an external schema registry by reformatting it into a suitable + * form. + * @param writer An object which can translate a PTable into a String suitable for the external + * schema registry + * @param table a Phoenix PTable for a table or view + * @return Schema id generated by the schema registry, represented as a string. + * @throws IOException Exception if something goes wrong in constructing or sending the schema + */ + String exportSchema(SchemaWriter writer, PTable table) throws IOException; - /** - * Return a schema from an external schema repository by its unique identifier - * @param schemaId schema identifier - * @return a schema - */ - String getSchemaById(String schemaId) throws IOException; + /** + * Return a schema from an external schema repository by its unique identifier + * @param schemaId schema identifier + * @return a schema + */ + String getSchemaById(String schemaId) throws IOException; - /** - * Return a schema from an external schema repository using information on a PTable - * @param table a Phoenix PTable for a table or view - * @return a schema - */ - String getSchemaByTable(PTable table) throws IOException; + /** + * Return a schema from an external schema repository using information on a PTable + * @param table a Phoenix PTable for a table or view + * @return a schema + */ + String getSchemaByTable(PTable table) throws IOException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaRegistryRepositoryFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaRegistryRepositoryFactory.java index d20a48797eb..581f3c1137e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaRegistryRepositoryFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaRegistryRepositoryFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,53 +15,52 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.export; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; - public final class SchemaRegistryRepositoryFactory { - private static final Logger LOGGER = LoggerFactory.getLogger(SchemaRegistryRepository.class); - private static SchemaRegistryRepository exporter; + private static final Logger LOGGER = LoggerFactory.getLogger(SchemaRegistryRepository.class); + private static SchemaRegistryRepository exporter; - public synchronized static SchemaRegistryRepository getSchemaRegistryRepository(Configuration conf) - throws IOException { - if (exporter != null) { - return exporter; - } + public synchronized static SchemaRegistryRepository + getSchemaRegistryRepository(Configuration conf) throws IOException { + if (exporter != null) { + return exporter; + } + try { + String className = conf.get(SchemaRegistryRepository.SCHEMA_REGISTRY_IMPL_KEY); + if (className == null) { + exporter = new DefaultSchemaRegistryRepository(); + } else { + Class clazz = + (Class) Class.forName(className); + exporter = clazz.newInstance(); + } + exporter.init(conf); + return exporter; + } catch (Exception e) { + LOGGER.error("Error constructing SchemaRegistryExporter object", e); + if (exporter != null) { try { - String className = conf.get(SchemaRegistryRepository.SCHEMA_REGISTRY_IMPL_KEY); - if (className == null) { - exporter = new DefaultSchemaRegistryRepository(); - } else { - Class clazz = - (Class) Class.forName(className); - exporter = clazz.newInstance(); - } - exporter.init(conf); - return exporter; - } catch (Exception e) { - LOGGER.error("Error constructing SchemaRegistryExporter object", e); - if (exporter != null) { - try { - exporter.close(); - exporter = null; - } catch (IOException innerE) { - LOGGER.error("Error closing incorrectly constructed SchemaRegistryExporter", e); - } - } - throw new IOException(e); + exporter.close(); + exporter = null; + } catch (IOException innerE) { + LOGGER.error("Error closing incorrectly constructed SchemaRegistryExporter", e); } + } + throw new IOException(e); } + } - public synchronized static void close() throws IOException { - if (exporter != null) { - exporter.close(); - exporter = null; - } + public synchronized static void close() throws IOException { + if (exporter != null) { + exporter.close(); + exporter = null; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaWriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaWriter.java index bef63527de7..527b85ce944 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaWriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,37 +15,36 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.export; -import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.schema.PTable; - import java.io.Closeable; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.schema.PTable; + /** * Interface for classes to implement for converting Phoenix schema data, captured in a PTable, into - * some other format interpretable by an external system. Possible implementing classes would include - * converters to Avro, Protobuf, Thrift, various dialects of SQL, - * or other similar cross-platform data description languages. + * some other format interpretable by an external system. Possible implementing classes would + * include converters to Avro, Protobuf, Thrift, various dialects of SQL, or other similar + * cross-platform data description languages. */ public interface SchemaWriter extends Closeable { - public static final String SCHEMA_REGISTRY_IMPL_KEY = - "org.apache.phoenix.export.schemawriter.impl"; + public static final String SCHEMA_REGISTRY_IMPL_KEY = + "org.apache.phoenix.export.schemawriter.impl"; - /** - * Initialize the schema writer with appropriate configuration - * @param conf a Configuration object - * @throws IOException if something goes wrong during initialization - */ - void init(Configuration conf) throws IOException; + /** + * Initialize the schema writer with appropriate configuration + * @param conf a Configuration object + * @throws IOException if something goes wrong during initialization + */ + void init(Configuration conf) throws IOException; - /** - * Given a Phoenix PTable, output a schema document readable by some external system. - * @param table A Phoenix PTable describing a table or view - * @return a String interpretable as a data format schema in an external system - */ - String exportSchema(PTable table) throws IOException; + /** + * Given a Phoenix PTable, output a schema document readable by some external system. + * @param table A Phoenix PTable describing a table or view + * @return a String interpretable as a data format schema in an external system + */ + String exportSchema(PTable table) throws IOException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaWriterFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaWriterFactory.java index a47709ba76c..0ca1be8713e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaWriterFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/export/SchemaWriterFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,27 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.export; -import org.apache.hadoop.conf.Configuration; - import java.io.IOException; +import org.apache.hadoop.conf.Configuration; + public class SchemaWriterFactory { - public static SchemaWriter getSchemaWriter(Configuration conf) - throws IOException { - try { - String className = conf.get(SchemaRegistryRepository.SCHEMA_WRITER_IMPL_KEY); - if (className == null) { - return new DefaultSchemaWriter(); - } - Class clazz = - (Class) Class.forName(className); - return clazz.newInstance(); - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | - ClassCastException e) { - throw new IOException(e); - } + public static SchemaWriter getSchemaWriter(Configuration conf) throws IOException { + try { + String className = conf.get(SchemaRegistryRepository.SCHEMA_WRITER_IMPL_KEY); + if (className == null) { + return new DefaultSchemaWriter(); + } + Class clazz = (Class) Class.forName(className); + return clazz.newInstance(); + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException + | ClassCastException e) { + throw new IOException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSource.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSource.java index 873a9f2d304..a165c37747b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSource.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,240 +15,241 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.metrics; public interface MetricsMetadataSource { - // Metrics2 and JMX constants - String METRICS_NAME = "PhoenixMetadata"; - String METRICS_CONTEXT = "phoenix"; - String METRICS_DESCRIPTION = "Metrics about the Phoenix MetadataEndpoint"; - String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - - String CREATE_EXPORT_COUNT = "createExportCount"; - String CREATE_EXPORT_COUNT_DESC = "Count of CREATE DDL statements exported to schema registry"; - - String CREATE_EXPORT_FAILURE_COUNT = "createExportFailureCount"; - String CREATE_EXPORT_FAILURE_COUNT_DESC = "Count of create DDL that failed on export " - + "to schema registry"; - - String CREATE_EXPORT_TIME = "createExportTime"; - String CREATE_EXPORT_TIME_DESC = "Time taken while exporting CREATE DDL statements to schema registry"; + // Metrics2 and JMX constants + String METRICS_NAME = "PhoenixMetadata"; + String METRICS_CONTEXT = "phoenix"; + String METRICS_DESCRIPTION = "Metrics about the Phoenix MetadataEndpoint"; + String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String CREATE_EXPORT_FAILURE_TIME = "createExportFailureTime"; - String CREATE_EXPORT_FAILURE_TIME_DESC = "Time taken while failing to export " - + "CREATE DDL to schema registry"; + String CREATE_EXPORT_COUNT = "createExportCount"; + String CREATE_EXPORT_COUNT_DESC = "Count of CREATE DDL statements exported to schema registry"; - String ALTER_EXPORT_COUNT = "alterExportCount"; - String ALTER_EXPORT_COUNT_DESC = "Count of ALTER DDL statements exported to schema registry"; + String CREATE_EXPORT_FAILURE_COUNT = "createExportFailureCount"; + String CREATE_EXPORT_FAILURE_COUNT_DESC = + "Count of create DDL that failed on export " + "to schema registry"; - String ALTER_EXPORT_FAILURE_COUNT = "alterExportFailureCount"; - String ALTER_EXPORT_FAILURE_COUNT_DESC = "Count of ALTER DDL that failed on export " - + "to schema registry"; + String CREATE_EXPORT_TIME = "createExportTime"; + String CREATE_EXPORT_TIME_DESC = + "Time taken while exporting CREATE DDL statements to schema registry"; - String ALTER_EXPORT_TIME = "alterExportTime"; - String ALTER_EXPORT_TIME_DESC = "Time taken while exporting ALTER DDL statements to schema registry"; + String CREATE_EXPORT_FAILURE_TIME = "createExportFailureTime"; + String CREATE_EXPORT_FAILURE_TIME_DESC = + "Time taken while failing to export " + "CREATE DDL to schema registry"; - String ALTER_EXPORT_FAILURE_TIME = "alterExportFailureTime"; - String ALTER_EXPORT_FAILURE_TIME_DESC = "Time taken while failing to export " - + "ALTER DDL to schema registry"; + String ALTER_EXPORT_COUNT = "alterExportCount"; + String ALTER_EXPORT_COUNT_DESC = "Count of ALTER DDL statements exported to schema registry"; - String CREATE_TABLE_COUNT = "createTableCount"; - String CREATE_TABLE_COUNT_DESC = "Count of CREATE TABLE DDL statements"; + String ALTER_EXPORT_FAILURE_COUNT = "alterExportFailureCount"; + String ALTER_EXPORT_FAILURE_COUNT_DESC = + "Count of ALTER DDL that failed on export " + "to schema registry"; - String CREATE_VIEW_COUNT = "createViewCount"; - String CREATE_VIEW_COUNT_DESC = "Count of CREATE VIEW DDL statements"; + String ALTER_EXPORT_TIME = "alterExportTime"; + String ALTER_EXPORT_TIME_DESC = + "Time taken while exporting ALTER DDL statements to schema registry"; - String CREATE_INDEX_COUNT = "createIndexCount"; - String CREATE_INDEX_COUNT_DESC = "Count of CREATE INDEX DDL statements"; + String ALTER_EXPORT_FAILURE_TIME = "alterExportFailureTime"; + String ALTER_EXPORT_FAILURE_TIME_DESC = + "Time taken while failing to export " + "ALTER DDL to schema registry"; - String CREATE_SCHEMA_COUNT = "createSchemaCount"; - String CREATE_SCHEMA_COUNT_DESC = "Count of CREATE SCHEMA DDL statements"; + String CREATE_TABLE_COUNT = "createTableCount"; + String CREATE_TABLE_COUNT_DESC = "Count of CREATE TABLE DDL statements"; - String CREATE_FUNCTION_COUNT = "createFunctionCount"; - String CREATE_FUNCTION_COUNT_DESC = "Count of CREATE FUNCTION DDL statements"; + String CREATE_VIEW_COUNT = "createViewCount"; + String CREATE_VIEW_COUNT_DESC = "Count of CREATE VIEW DDL statements"; - String ALTER_ADD_COLUMN_COUNT = "alterAddColumnCount"; - String ALTER_ADD_COLUMN_COUNT_DESC = "Count of ALTER statements that add columns"; + String CREATE_INDEX_COUNT = "createIndexCount"; + String CREATE_INDEX_COUNT_DESC = "Count of CREATE INDEX DDL statements"; - String ALTER_DROP_COLUMN_COUNT = "alterDropColumnCount"; - String ALTER_DROP_COLUMN_COUNT_DESC = "Count of ALTER statements that drop columns"; + String CREATE_SCHEMA_COUNT = "createSchemaCount"; + String CREATE_SCHEMA_COUNT_DESC = "Count of CREATE SCHEMA DDL statements"; - String DROP_TABLE_COUNT = "dropTableCount"; - String DROP_TABLE_COUNT_DESC = "Count of DROP TABLE DDL statements"; + String CREATE_FUNCTION_COUNT = "createFunctionCount"; + String CREATE_FUNCTION_COUNT_DESC = "Count of CREATE FUNCTION DDL statements"; - String DROP_VIEW_COUNT = "dropViewCount"; - String DROP_VIEW_COUNT_DESC = "Count of DROP VIEW DDL statements"; + String ALTER_ADD_COLUMN_COUNT = "alterAddColumnCount"; + String ALTER_ADD_COLUMN_COUNT_DESC = "Count of ALTER statements that add columns"; - String DROP_INDEX_COUNT = "dropIndexCount"; - String DROP_INDEX_COUNT_DESC = "Count of DROP INDEX DDL statements"; + String ALTER_DROP_COLUMN_COUNT = "alterDropColumnCount"; + String ALTER_DROP_COLUMN_COUNT_DESC = "Count of ALTER statements that drop columns"; - String DROP_SCHEMA_COUNT = "dropSchemaCount"; - String DROP_SCHEMA_COUNT_DESC = "Count of DROP SCHEMA DDL statements"; + String DROP_TABLE_COUNT = "dropTableCount"; + String DROP_TABLE_COUNT_DESC = "Count of DROP TABLE DDL statements"; - String DROP_FUNCTION_COUNT = "dropFunctionCount"; - String DROP_FUNCTION_COUNT_DESC = "Count of DROP FUNCTION DDL statements"; + String DROP_VIEW_COUNT = "dropViewCount"; + String DROP_VIEW_COUNT_DESC = "Count of DROP VIEW DDL statements"; - String METADATA_CACHE_ESTIMATED_USED_SIZE = "metadataCacheEstimatedUsedSize"; - String METADATA_CACHE_ESTIMATED_USED_SIZE_DESC = "Estimated used size of the metadata cache"; + String DROP_INDEX_COUNT = "dropIndexCount"; + String DROP_INDEX_COUNT_DESC = "Count of DROP INDEX DDL statements"; - String METADATA_CACHE_HIT_COUNT = "metadataCacheHitCount"; - String METADATA_CACHE_HIT_COUNT_DESC = "Hit count of the metadata cache"; + String DROP_SCHEMA_COUNT = "dropSchemaCount"; + String DROP_SCHEMA_COUNT_DESC = "Count of DROP SCHEMA DDL statements"; - String METADATA_CACHE_MISS_COUNT = "metadataCacheMissCount"; - String METADATA_CACHE_MISS_COUNT_DESC = "Miss count of the metadata cache"; + String DROP_FUNCTION_COUNT = "dropFunctionCount"; + String DROP_FUNCTION_COUNT_DESC = "Count of DROP FUNCTION DDL statements"; + + String METADATA_CACHE_ESTIMATED_USED_SIZE = "metadataCacheEstimatedUsedSize"; + String METADATA_CACHE_ESTIMATED_USED_SIZE_DESC = "Estimated used size of the metadata cache"; - String METADATA_CACHE_EVICTION_COUNT = "metadataCacheEvictionCount"; - String METADATA_CACHE_EVICTION_COUNT_DESC = "Eviction count of the metadata cache"; + String METADATA_CACHE_HIT_COUNT = "metadataCacheHitCount"; + String METADATA_CACHE_HIT_COUNT_DESC = "Hit count of the metadata cache"; - String METADATA_CACHE_REMOVAL_COUNT = "metadataCacheRemovalCount"; - String METADATA_CACHE_REMOVAL_COUNT_DESC = "Removal count of the metadata cache"; - - String METADATA_CACHE_ADD_COUNT = "metadataCacheAddCount"; - String METADATA_CACHE_ADD_COUNT_DESC = "Add count of the metadata cache"; - - // TODO: metrics for addIndexToTable and dropIndexes - - /** - * Updates the count of successful requests to the schema registry for CREATE statements - */ - void incrementCreateExportCount(); - - /** - * Updates the histogram of time taken to update the schema registry for CREATE statements - * @param t Time taken - */ - void updateCreateExportTime(long t); - - /** - * Updates the count of unsuccessful requests to the schema registry for CREATE statements - */ - void incrementCreateExportFailureCount(); - - /** - * Updates the histogram of time taken trying and failing to - * update the schema registry for CREATE statements - * @param t time taken - */ - void updateCreateExportFailureTime(long t); - - /** - * Updates the count of successful requests to the schema registry for ALTER statements - */ - void incrementAlterExportCount(); - - /** - * Updates the histogram of time taken updating the schema registry for ALTER statements - * @param t time taken - */ - void updateAlterExportTime(long t); - - /** - * Updates the count of unsuccessful requests to the schema registry for ALTER statements - */ - void incrementAlterExportFailureCount(); - - /** - * Updates the histogram of time taken trying and failing to update the schema registry for - * ALTER statements - * @param t time taken - */ - void updateAlterExportFailureTime(long t); - - /** - * Updates the count of successful CREATE TABLE DDL operations - */ - void incrementCreateTableCount(); - - /** - * Updates the count of successful CREATE VIEW DDL operations - */ - void incrementCreateViewCount(); - - /** - * Updates the count of successful CREATE INDEX DDL operations - */ - void incrementCreateIndexCount(); - - /** - * Updates the count of successful CREATE SCHEMA DDL operations - */ - void incrementCreateSchemaCount(); - - /** - * Updates the count of successful CREATE FUNCTION DDL operations - */ - void incrementCreateFunctionCount(); - - /** - * Updates the count of successful ALTER DDL operations that add columns - */ - void incrementAlterAddColumnCount(); - - /** - * Updates the count of successful ALTER DDL operations that drop columns - */ - void incrementAlterDropColumnCount(); - - /** - * Updates the count of successful DROP TABLE DDL operations - */ - void incrementDropTableCount(); - - /** - * Updates the count of successful DROP VIEW DDL operations - */ - void incrementDropViewCount(); - - /** - * Updates the count of successful DROP INDEX DDL operations - */ - void incrementDropIndexCount(); - - /** - * Updates the count of successful DROP SCHEMA DDL operations - */ - void incrementDropSchemaCount(); - - /** - * Updates the count of successful DROP FUNCTION DDL operations - */ - void incrementDropFunctionCount(); - - /** - * Increases the estimated used size of metadata cache - * @param estimatedSize the estimated size to be increased - */ - void incrementMetadataCacheUsedSize(long estimatedSize); - - /** - * Decreases the estimated used size of metadata cache - * @param estimatedSize the estimated size to be decreased - */ - void decrementMetadataCacheUsedSize(long estimatedSize); - - /** - * Updates the count of metadata cache hit - */ - void incrementMetadataCacheHitCount(); - - /** - * Updates the count of metadata cache miss - */ - void incrementMetadataCacheMissCount(); - - /** - * Updates the count of metadata cache eviction - */ - void incrementMetadataCacheEvictionCount(); - - /** - * Updates the count of metadata cache removal - */ - void incrementMetadataCacheRemovalCount(); - - /** - * Updates the count of metadata cache add - */ - void incrementMetadataCacheAddCount(); + String METADATA_CACHE_MISS_COUNT = "metadataCacheMissCount"; + String METADATA_CACHE_MISS_COUNT_DESC = "Miss count of the metadata cache"; + + String METADATA_CACHE_EVICTION_COUNT = "metadataCacheEvictionCount"; + String METADATA_CACHE_EVICTION_COUNT_DESC = "Eviction count of the metadata cache"; + + String METADATA_CACHE_REMOVAL_COUNT = "metadataCacheRemovalCount"; + String METADATA_CACHE_REMOVAL_COUNT_DESC = "Removal count of the metadata cache"; + + String METADATA_CACHE_ADD_COUNT = "metadataCacheAddCount"; + String METADATA_CACHE_ADD_COUNT_DESC = "Add count of the metadata cache"; + + // TODO: metrics for addIndexToTable and dropIndexes + + /** + * Updates the count of successful requests to the schema registry for CREATE statements + */ + void incrementCreateExportCount(); + + /** + * Updates the histogram of time taken to update the schema registry for CREATE statements + * @param t Time taken + */ + void updateCreateExportTime(long t); + + /** + * Updates the count of unsuccessful requests to the schema registry for CREATE statements + */ + void incrementCreateExportFailureCount(); + + /** + * Updates the histogram of time taken trying and failing to update the schema registry for CREATE + * statements + * @param t time taken + */ + void updateCreateExportFailureTime(long t); + + /** + * Updates the count of successful requests to the schema registry for ALTER statements + */ + void incrementAlterExportCount(); + + /** + * Updates the histogram of time taken updating the schema registry for ALTER statements + * @param t time taken + */ + void updateAlterExportTime(long t); + + /** + * Updates the count of unsuccessful requests to the schema registry for ALTER statements + */ + void incrementAlterExportFailureCount(); + + /** + * Updates the histogram of time taken trying and failing to update the schema registry for ALTER + * statements + * @param t time taken + */ + void updateAlterExportFailureTime(long t); + + /** + * Updates the count of successful CREATE TABLE DDL operations + */ + void incrementCreateTableCount(); + + /** + * Updates the count of successful CREATE VIEW DDL operations + */ + void incrementCreateViewCount(); + + /** + * Updates the count of successful CREATE INDEX DDL operations + */ + void incrementCreateIndexCount(); + + /** + * Updates the count of successful CREATE SCHEMA DDL operations + */ + void incrementCreateSchemaCount(); + + /** + * Updates the count of successful CREATE FUNCTION DDL operations + */ + void incrementCreateFunctionCount(); + + /** + * Updates the count of successful ALTER DDL operations that add columns + */ + void incrementAlterAddColumnCount(); + + /** + * Updates the count of successful ALTER DDL operations that drop columns + */ + void incrementAlterDropColumnCount(); + + /** + * Updates the count of successful DROP TABLE DDL operations + */ + void incrementDropTableCount(); + + /** + * Updates the count of successful DROP VIEW DDL operations + */ + void incrementDropViewCount(); + + /** + * Updates the count of successful DROP INDEX DDL operations + */ + void incrementDropIndexCount(); + + /** + * Updates the count of successful DROP SCHEMA DDL operations + */ + void incrementDropSchemaCount(); + + /** + * Updates the count of successful DROP FUNCTION DDL operations + */ + void incrementDropFunctionCount(); + + /** + * Increases the estimated used size of metadata cache + * @param estimatedSize the estimated size to be increased + */ + void incrementMetadataCacheUsedSize(long estimatedSize); + + /** + * Decreases the estimated used size of metadata cache + * @param estimatedSize the estimated size to be decreased + */ + void decrementMetadataCacheUsedSize(long estimatedSize); + + /** + * Updates the count of metadata cache hit + */ + void incrementMetadataCacheHitCount(); + + /** + * Updates the count of metadata cache miss + */ + void incrementMetadataCacheMissCount(); + + /** + * Updates the count of metadata cache eviction + */ + void incrementMetadataCacheEvictionCount(); + + /** + * Updates the count of metadata cache removal + */ + void incrementMetadataCacheRemovalCount(); + + /** + * Updates the count of metadata cache add + */ + void incrementMetadataCacheAddCount(); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSourceFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSourceFactory.java index 29e914ff3f2..95c4f97050c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSourceFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSourceFactory.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,28 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.metrics; /** * Factory class for creating {@link MetricsMetadataSource} instances */ public class MetricsMetadataSourceFactory { - private static final MetricsMetadataSourceFactory INSTANCE = new MetricsMetadataSourceFactory(); + private static final MetricsMetadataSourceFactory INSTANCE = new MetricsMetadataSourceFactory(); - private volatile MetricsMetadataSource metricsMetadataSource; + private volatile MetricsMetadataSource metricsMetadataSource; - private MetricsMetadataSourceFactory() {} + private MetricsMetadataSourceFactory() { + } - public static MetricsMetadataSourceFactory getInstance() { - return INSTANCE; - } + public static MetricsMetadataSourceFactory getInstance() { + return INSTANCE; + } - public static synchronized MetricsMetadataSource getMetadataMetricsSource() { - if (INSTANCE.metricsMetadataSource == null) { - INSTANCE.metricsMetadataSource = new MetricsMetadataSourceImpl(); - } - return INSTANCE.metricsMetadataSource; + public static synchronized MetricsMetadataSource getMetadataMetricsSource() { + if (INSTANCE.metricsMetadataSource == null) { + INSTANCE.metricsMetadataSource = new MetricsMetadataSourceImpl(); } + return INSTANCE.metricsMetadataSource; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSourceImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSourceImpl.java index 4af91ff1cca..e7310d87629 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSourceImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/metrics/MetricsMetadataSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.metrics; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -23,219 +23,238 @@ public class MetricsMetadataSourceImpl extends BaseSourceImpl implements MetricsMetadataSource { - private final MutableFastCounter createExportCount; - private final MetricHistogram createExportTimeHisto; - - private final MutableFastCounter createExportFailureCount; - private final MetricHistogram createExportFailureTimeHisto; - - private final MutableFastCounter alterExportCount; - private final MetricHistogram alterExportTimeHisto; - - private final MutableFastCounter alterExportFailureCount; - private final MetricHistogram alterExportFailureTimeHisto; - - private final MutableFastCounter createTableCount; - private final MutableFastCounter createViewCount; - private final MutableFastCounter createIndexCount; - private final MutableFastCounter createSchemaCount; - private final MutableFastCounter createFunctionCount; - - private final MutableFastCounter alterAddColumnCount; - private final MutableFastCounter alterDropColumnCount; - - private final MutableFastCounter dropTableCount; - private final MutableFastCounter dropViewCount; - private final MutableFastCounter dropIndexCount; - private final MutableFastCounter dropSchemaCount; - private final MutableFastCounter dropFunctionCount; - - private final MutableFastCounter metadataCacheUsedSize; - private final MutableFastCounter metadataCacheHitCount; - private final MutableFastCounter metadataCacheMissCount; - private final MutableFastCounter metadataCacheEvictionCount; - private final MutableFastCounter metadataCacheRemovalCount; - private final MutableFastCounter metadataCacheAddCount; - - public MetricsMetadataSourceImpl() { - this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); - } - - public MetricsMetadataSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { - super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - - createExportCount = getMetricsRegistry().newCounter(CREATE_EXPORT_COUNT, - CREATE_EXPORT_COUNT_DESC, 0L); - createExportTimeHisto = getMetricsRegistry().newHistogram(CREATE_EXPORT_TIME, CREATE_EXPORT_TIME_DESC); - - createExportFailureCount = getMetricsRegistry().newCounter(CREATE_EXPORT_FAILURE_COUNT, - CREATE_EXPORT_FAILURE_COUNT_DESC, 0L); - createExportFailureTimeHisto = getMetricsRegistry().newHistogram(CREATE_EXPORT_FAILURE_TIME, - CREATE_EXPORT_FAILURE_TIME_DESC); - - alterExportCount = getMetricsRegistry().newCounter(ALTER_EXPORT_COUNT, - ALTER_EXPORT_COUNT_DESC, 0L); - alterExportTimeHisto = getMetricsRegistry().newHistogram(ALTER_EXPORT_TIME, ALTER_EXPORT_TIME_DESC); - - alterExportFailureCount = getMetricsRegistry().newCounter(ALTER_EXPORT_FAILURE_COUNT, - ALTER_EXPORT_FAILURE_COUNT_DESC, 0L); - alterExportFailureTimeHisto = getMetricsRegistry().newHistogram(ALTER_EXPORT_FAILURE_TIME, - ALTER_EXPORT_FAILURE_TIME_DESC); - - createTableCount = getMetricsRegistry().newCounter(CREATE_TABLE_COUNT, - CREATE_TABLE_COUNT_DESC, 0L); - createViewCount = getMetricsRegistry().newCounter(CREATE_VIEW_COUNT, - CREATE_VIEW_COUNT_DESC, 0L); - createIndexCount = getMetricsRegistry().newCounter(CREATE_INDEX_COUNT, - CREATE_INDEX_COUNT_DESC, 0L); - createFunctionCount = getMetricsRegistry().newCounter(CREATE_FUNCTION_COUNT, - CREATE_FUNCTION_COUNT_DESC, 0L); - createSchemaCount = getMetricsRegistry().newCounter(CREATE_SCHEMA_COUNT, - CREATE_SCHEMA_COUNT_DESC, 0L); - - alterAddColumnCount = getMetricsRegistry().newCounter(ALTER_ADD_COLUMN_COUNT, - ALTER_ADD_COLUMN_COUNT_DESC, 0L); - alterDropColumnCount = getMetricsRegistry().newCounter(ALTER_DROP_COLUMN_COUNT, - ALTER_DROP_COLUMN_COUNT_DESC, 0L); - - dropTableCount = getMetricsRegistry().newCounter(DROP_TABLE_COUNT, - DROP_TABLE_COUNT_DESC, 0L); - dropViewCount = getMetricsRegistry().newCounter(DROP_VIEW_COUNT, - DROP_VIEW_COUNT_DESC, 0L); - dropIndexCount = getMetricsRegistry().newCounter(DROP_INDEX_COUNT, - DROP_INDEX_COUNT_DESC, 0L); - dropSchemaCount = getMetricsRegistry().newCounter(DROP_SCHEMA_COUNT, - DROP_SCHEMA_COUNT_DESC, 0L); - dropFunctionCount = getMetricsRegistry().newCounter(DROP_FUNCTION_COUNT, - DROP_FUNCTION_COUNT_DESC, 0L); - - metadataCacheUsedSize = getMetricsRegistry().newCounter(METADATA_CACHE_ESTIMATED_USED_SIZE, - METADATA_CACHE_ESTIMATED_USED_SIZE_DESC, 0L); - metadataCacheHitCount = getMetricsRegistry().newCounter(METADATA_CACHE_HIT_COUNT, - METADATA_CACHE_HIT_COUNT_DESC, 0L); - metadataCacheMissCount = getMetricsRegistry().newCounter(METADATA_CACHE_MISS_COUNT, - METADATA_CACHE_MISS_COUNT_DESC, 0L); - metadataCacheEvictionCount = getMetricsRegistry().newCounter(METADATA_CACHE_EVICTION_COUNT, - METADATA_CACHE_EVICTION_COUNT_DESC, 0L); - metadataCacheRemovalCount = getMetricsRegistry().newCounter(METADATA_CACHE_REMOVAL_COUNT, - METADATA_CACHE_REMOVAL_COUNT_DESC, 0L); - metadataCacheAddCount = getMetricsRegistry().newCounter(METADATA_CACHE_ADD_COUNT, - METADATA_CACHE_ADD_COUNT_DESC, 0L); - } - - @Override public void incrementCreateExportCount() { - createExportCount.incr(); - } - - @Override public void updateCreateExportTime(long t) { - createExportTimeHisto.add(t); - } - - @Override public void incrementCreateExportFailureCount() { - createExportFailureCount.incr(); - } - - @Override public void updateCreateExportFailureTime(long t) { - createExportFailureTimeHisto.add(t); - } - - @Override public void incrementAlterExportCount() { - alterExportCount.incr(); - } - - @Override public void updateAlterExportTime(long t) { - alterExportTimeHisto.add(t); - } - - @Override public void incrementAlterExportFailureCount() { - alterExportFailureCount.incr(); - } - - @Override public void updateAlterExportFailureTime(long t) { - alterExportFailureTimeHisto.add(t); - } - - @Override public void incrementCreateTableCount() { - createTableCount.incr(); - } - - @Override public void incrementCreateViewCount() { - createViewCount.incr(); - } - - @Override public void incrementCreateIndexCount() { - createIndexCount.incr(); - } - - @Override public void incrementCreateSchemaCount() { - createSchemaCount.incr(); - } - - @Override public void incrementCreateFunctionCount() { - createFunctionCount.incr(); - } - - @Override public void incrementAlterAddColumnCount() { - alterAddColumnCount.incr(); - } - - @Override public void incrementAlterDropColumnCount() { - alterDropColumnCount.incr(); - } - - @Override public void incrementDropTableCount() { - dropTableCount.incr(); - } - - @Override public void incrementDropViewCount() { - dropViewCount.incr(); - } - - @Override public void incrementDropIndexCount() { - dropIndexCount.incr(); - } - - @Override public void incrementDropSchemaCount() { - dropSchemaCount.incr(); - } - - @Override public void incrementDropFunctionCount() { - dropFunctionCount.incr(); - } - - @Override - public void incrementMetadataCacheUsedSize(long estimatedSize) { - metadataCacheUsedSize.incr(estimatedSize); - } - - @Override - public void decrementMetadataCacheUsedSize(long estimatedSize) { - metadataCacheUsedSize.incr(-estimatedSize); - } - - @Override - public void incrementMetadataCacheHitCount() { - metadataCacheHitCount.incr(); - } - - @Override - public void incrementMetadataCacheMissCount() { - metadataCacheMissCount.incr(); - } - - @Override - public void incrementMetadataCacheEvictionCount() { - metadataCacheEvictionCount.incr(); - } - - @Override - public void incrementMetadataCacheRemovalCount() { - metadataCacheRemovalCount.incr(); - } - - @Override - public void incrementMetadataCacheAddCount() { - metadataCacheAddCount.incr(); - } + private final MutableFastCounter createExportCount; + private final MetricHistogram createExportTimeHisto; + + private final MutableFastCounter createExportFailureCount; + private final MetricHistogram createExportFailureTimeHisto; + + private final MutableFastCounter alterExportCount; + private final MetricHistogram alterExportTimeHisto; + + private final MutableFastCounter alterExportFailureCount; + private final MetricHistogram alterExportFailureTimeHisto; + + private final MutableFastCounter createTableCount; + private final MutableFastCounter createViewCount; + private final MutableFastCounter createIndexCount; + private final MutableFastCounter createSchemaCount; + private final MutableFastCounter createFunctionCount; + + private final MutableFastCounter alterAddColumnCount; + private final MutableFastCounter alterDropColumnCount; + + private final MutableFastCounter dropTableCount; + private final MutableFastCounter dropViewCount; + private final MutableFastCounter dropIndexCount; + private final MutableFastCounter dropSchemaCount; + private final MutableFastCounter dropFunctionCount; + + private final MutableFastCounter metadataCacheUsedSize; + private final MutableFastCounter metadataCacheHitCount; + private final MutableFastCounter metadataCacheMissCount; + private final MutableFastCounter metadataCacheEvictionCount; + private final MutableFastCounter metadataCacheRemovalCount; + private final MutableFastCounter metadataCacheAddCount; + + public MetricsMetadataSourceImpl() { + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); + } + + public MetricsMetadataSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { + super(metricsName, metricsDescription, metricsContext, metricsJmxContext); + + createExportCount = + getMetricsRegistry().newCounter(CREATE_EXPORT_COUNT, CREATE_EXPORT_COUNT_DESC, 0L); + createExportTimeHisto = + getMetricsRegistry().newHistogram(CREATE_EXPORT_TIME, CREATE_EXPORT_TIME_DESC); + + createExportFailureCount = getMetricsRegistry().newCounter(CREATE_EXPORT_FAILURE_COUNT, + CREATE_EXPORT_FAILURE_COUNT_DESC, 0L); + createExportFailureTimeHisto = getMetricsRegistry().newHistogram(CREATE_EXPORT_FAILURE_TIME, + CREATE_EXPORT_FAILURE_TIME_DESC); + + alterExportCount = + getMetricsRegistry().newCounter(ALTER_EXPORT_COUNT, ALTER_EXPORT_COUNT_DESC, 0L); + alterExportTimeHisto = + getMetricsRegistry().newHistogram(ALTER_EXPORT_TIME, ALTER_EXPORT_TIME_DESC); + + alterExportFailureCount = getMetricsRegistry().newCounter(ALTER_EXPORT_FAILURE_COUNT, + ALTER_EXPORT_FAILURE_COUNT_DESC, 0L); + alterExportFailureTimeHisto = + getMetricsRegistry().newHistogram(ALTER_EXPORT_FAILURE_TIME, ALTER_EXPORT_FAILURE_TIME_DESC); + + createTableCount = + getMetricsRegistry().newCounter(CREATE_TABLE_COUNT, CREATE_TABLE_COUNT_DESC, 0L); + createViewCount = + getMetricsRegistry().newCounter(CREATE_VIEW_COUNT, CREATE_VIEW_COUNT_DESC, 0L); + createIndexCount = + getMetricsRegistry().newCounter(CREATE_INDEX_COUNT, CREATE_INDEX_COUNT_DESC, 0L); + createFunctionCount = + getMetricsRegistry().newCounter(CREATE_FUNCTION_COUNT, CREATE_FUNCTION_COUNT_DESC, 0L); + createSchemaCount = + getMetricsRegistry().newCounter(CREATE_SCHEMA_COUNT, CREATE_SCHEMA_COUNT_DESC, 0L); + + alterAddColumnCount = + getMetricsRegistry().newCounter(ALTER_ADD_COLUMN_COUNT, ALTER_ADD_COLUMN_COUNT_DESC, 0L); + alterDropColumnCount = + getMetricsRegistry().newCounter(ALTER_DROP_COLUMN_COUNT, ALTER_DROP_COLUMN_COUNT_DESC, 0L); + + dropTableCount = getMetricsRegistry().newCounter(DROP_TABLE_COUNT, DROP_TABLE_COUNT_DESC, 0L); + dropViewCount = getMetricsRegistry().newCounter(DROP_VIEW_COUNT, DROP_VIEW_COUNT_DESC, 0L); + dropIndexCount = getMetricsRegistry().newCounter(DROP_INDEX_COUNT, DROP_INDEX_COUNT_DESC, 0L); + dropSchemaCount = + getMetricsRegistry().newCounter(DROP_SCHEMA_COUNT, DROP_SCHEMA_COUNT_DESC, 0L); + dropFunctionCount = + getMetricsRegistry().newCounter(DROP_FUNCTION_COUNT, DROP_FUNCTION_COUNT_DESC, 0L); + + metadataCacheUsedSize = getMetricsRegistry().newCounter(METADATA_CACHE_ESTIMATED_USED_SIZE, + METADATA_CACHE_ESTIMATED_USED_SIZE_DESC, 0L); + metadataCacheHitCount = + getMetricsRegistry().newCounter(METADATA_CACHE_HIT_COUNT, METADATA_CACHE_HIT_COUNT_DESC, 0L); + metadataCacheMissCount = getMetricsRegistry().newCounter(METADATA_CACHE_MISS_COUNT, + METADATA_CACHE_MISS_COUNT_DESC, 0L); + metadataCacheEvictionCount = getMetricsRegistry().newCounter(METADATA_CACHE_EVICTION_COUNT, + METADATA_CACHE_EVICTION_COUNT_DESC, 0L); + metadataCacheRemovalCount = getMetricsRegistry().newCounter(METADATA_CACHE_REMOVAL_COUNT, + METADATA_CACHE_REMOVAL_COUNT_DESC, 0L); + metadataCacheAddCount = + getMetricsRegistry().newCounter(METADATA_CACHE_ADD_COUNT, METADATA_CACHE_ADD_COUNT_DESC, 0L); + } + + @Override + public void incrementCreateExportCount() { + createExportCount.incr(); + } + + @Override + public void updateCreateExportTime(long t) { + createExportTimeHisto.add(t); + } + + @Override + public void incrementCreateExportFailureCount() { + createExportFailureCount.incr(); + } + + @Override + public void updateCreateExportFailureTime(long t) { + createExportFailureTimeHisto.add(t); + } + + @Override + public void incrementAlterExportCount() { + alterExportCount.incr(); + } + + @Override + public void updateAlterExportTime(long t) { + alterExportTimeHisto.add(t); + } + + @Override + public void incrementAlterExportFailureCount() { + alterExportFailureCount.incr(); + } + + @Override + public void updateAlterExportFailureTime(long t) { + alterExportFailureTimeHisto.add(t); + } + + @Override + public void incrementCreateTableCount() { + createTableCount.incr(); + } + + @Override + public void incrementCreateViewCount() { + createViewCount.incr(); + } + + @Override + public void incrementCreateIndexCount() { + createIndexCount.incr(); + } + + @Override + public void incrementCreateSchemaCount() { + createSchemaCount.incr(); + } + + @Override + public void incrementCreateFunctionCount() { + createFunctionCount.incr(); + } + + @Override + public void incrementAlterAddColumnCount() { + alterAddColumnCount.incr(); + } + + @Override + public void incrementAlterDropColumnCount() { + alterDropColumnCount.incr(); + } + + @Override + public void incrementDropTableCount() { + dropTableCount.incr(); + } + + @Override + public void incrementDropViewCount() { + dropViewCount.incr(); + } + + @Override + public void incrementDropIndexCount() { + dropIndexCount.incr(); + } + + @Override + public void incrementDropSchemaCount() { + dropSchemaCount.incr(); + } + + @Override + public void incrementDropFunctionCount() { + dropFunctionCount.incr(); + } + + @Override + public void incrementMetadataCacheUsedSize(long estimatedSize) { + metadataCacheUsedSize.incr(estimatedSize); + } + + @Override + public void decrementMetadataCacheUsedSize(long estimatedSize) { + metadataCacheUsedSize.incr(-estimatedSize); + } + + @Override + public void incrementMetadataCacheHitCount() { + metadataCacheHitCount.incr(); + } + + @Override + public void incrementMetadataCacheMissCount() { + metadataCacheMissCount.incr(); + } + + @Override + public void incrementMetadataCacheEvictionCount() { + metadataCacheEvictionCount.incr(); + } + + @Override + public void incrementMetadataCacheRemovalCount() { + metadataCacheRemovalCount.incr(); + } + + @Override + public void incrementMetadataCacheAddCount() { + metadataCacheAddCount.incr(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java index 6d5e2a54805..bb59f843475 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,138 +22,125 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.SizedUtil; -import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; - import edu.umd.cs.findbugs.annotations.SuppressWarnings; /** - * A class that holds the guidePosts of a region and also allows combining the - * guidePosts of different regions when the GuidePostsInfo is formed for a table. + * A class that holds the guidePosts of a region and also allows combining the guidePosts of + * different regions when the GuidePostsInfo is formed for a table. */ public class GuidePostsInfo { - public final static GuidePostsInfo NO_GUIDEPOST = - new GuidePostsInfo(Collections. emptyList(), - new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY), - Collections. emptyList(), 0, 0, Collections. emptyList()) { - @Override - public int getEstimatedSize() { - return 0; - } - }; - - public final static byte[] EMPTY_GUIDEPOST_KEY = ByteUtil.EMPTY_BYTE_ARRAY; - - /** - * the total number of guidePosts for the table combining all the guidePosts per region per cf. - */ - private final ImmutableBytesWritable guidePosts; - /** - * Maximum length of a guidePost collected - */ - private final int maxLength; - /** - * Number of guidePosts - */ - private final int guidePostsCount; - /** - * The rowCounts of each guidePost traversed - */ - private final long[] rowCounts; - /** - * The bytecounts of each guidePost traversed - */ - private final long[] byteCounts; - /** - * Estimate of byte size of this instance - */ - private final int estimatedSize; - /** - * The timestamps at which guideposts were created/updated - */ - private final long[] gpTimestamps; - - /** - * Constructor that creates GuidePostsInfo per region - * - * @param byteCounts - * The bytecounts of each guidePost traversed - * @param guidePosts - * Prefix byte encoded guidePosts - * @param rowCounts - * The rowCounts of each guidePost traversed - * @param maxLength - * Maximum length of a guidePost collected - * @param guidePostsCount - * Number of guidePosts - * @param updateTimes - * Times at which guidePosts were updated/created - */ - public GuidePostsInfo(List byteCounts, ImmutableBytesWritable guidePosts, List rowCounts, int maxLength, - int guidePostsCount, List updateTimes) { - this.guidePosts = new ImmutableBytesWritable(guidePosts); - this.maxLength = maxLength; - this.guidePostsCount = guidePostsCount; - this.rowCounts = Longs.toArray(rowCounts); - this.byteCounts = Longs.toArray(byteCounts); - this.gpTimestamps = Longs.toArray(updateTimes); - // Those Java equivalents of sizeof() in C/C++, mentioned on the Web, might be overkilled here. - int estimatedSize = SizedUtil.OBJECT_SIZE - + SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE + guidePosts.getLength() // guidePosts - + SizedUtil.INT_SIZE // maxLength - + SizedUtil.INT_SIZE // guidePostsCount - + SizedUtil.ARRAY_SIZE + this.rowCounts.length * SizedUtil.LONG_SIZE // rowCounts - + SizedUtil.ARRAY_SIZE + this.byteCounts.length * SizedUtil.LONG_SIZE // byteCounts - + SizedUtil.ARRAY_SIZE + this.gpTimestamps.length * SizedUtil.LONG_SIZE // gpTimestamps - + SizedUtil.INT_SIZE; // estimatedSize - this.estimatedSize = estimatedSize; - } - - public ImmutableBytesWritable getGuidePosts() { - return guidePosts; + public final static GuidePostsInfo NO_GUIDEPOST = new GuidePostsInfo( + Collections. emptyList(), new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY), + Collections. emptyList(), 0, 0, Collections. emptyList()) { + @Override + public int getEstimatedSize() { + return 0; } + }; - public int getGuidePostsCount() { - return guidePostsCount; - } - - public int getMaxLength() { - return maxLength; - } + public final static byte[] EMPTY_GUIDEPOST_KEY = ByteUtil.EMPTY_BYTE_ARRAY; - public long[] getRowCounts() { - return rowCounts; - } + /** + * the total number of guidePosts for the table combining all the guidePosts per region per cf. + */ + private final ImmutableBytesWritable guidePosts; + /** + * Maximum length of a guidePost collected + */ + private final int maxLength; + /** + * Number of guidePosts + */ + private final int guidePostsCount; + /** + * The rowCounts of each guidePost traversed + */ + private final long[] rowCounts; + /** + * The bytecounts of each guidePost traversed + */ + private final long[] byteCounts; + /** + * Estimate of byte size of this instance + */ + private final int estimatedSize; + /** + * The timestamps at which guideposts were created/updated + */ + private final long[] gpTimestamps; - public long[] getByteCounts() { - return byteCounts; - } + /** + * Constructor that creates GuidePostsInfo per region The bytecounts of each guidePost traversed + * Prefix byte encoded guidePosts The rowCounts of each guidePost traversed Maximum length of a + * guidePost collected Number of guidePosts Times at which guidePosts were updated/created + */ + public GuidePostsInfo(List byteCounts, ImmutableBytesWritable guidePosts, + List rowCounts, int maxLength, int guidePostsCount, List updateTimes) { + this.guidePosts = new ImmutableBytesWritable(guidePosts); + this.maxLength = maxLength; + this.guidePostsCount = guidePostsCount; + this.rowCounts = Longs.toArray(rowCounts); + this.byteCounts = Longs.toArray(byteCounts); + this.gpTimestamps = Longs.toArray(updateTimes); + // Those Java equivalents of sizeof() in C/C++, mentioned on the Web, might be overkilled here. + int estimatedSize = + SizedUtil.OBJECT_SIZE + SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE + guidePosts.getLength() // guidePosts + + SizedUtil.INT_SIZE // maxLength + + SizedUtil.INT_SIZE // guidePostsCount + + SizedUtil.ARRAY_SIZE + this.rowCounts.length * SizedUtil.LONG_SIZE // rowCounts + + SizedUtil.ARRAY_SIZE + this.byteCounts.length * SizedUtil.LONG_SIZE // byteCounts + + SizedUtil.ARRAY_SIZE + this.gpTimestamps.length * SizedUtil.LONG_SIZE // gpTimestamps + + SizedUtil.INT_SIZE; // estimatedSize + this.estimatedSize = estimatedSize; + } - public long[] getGuidePostTimestamps() { - return gpTimestamps; - } + public ImmutableBytesWritable getGuidePosts() { + return guidePosts; + } - public int getEstimatedSize() { - return estimatedSize; - } + public int getGuidePostsCount() { + return guidePostsCount; + } - @SuppressWarnings(value="EC_ARRAY_AND_NONARRAY", - justification="ImmutableBytesWritable DOES implement equals(byte])") - public boolean isEmptyGuidePost() { - return guidePosts.equals(EMPTY_GUIDEPOST_KEY) && guidePostsCount == 0 - && byteCounts.length == 1 && gpTimestamps.length == 1; - } + public int getMaxLength() { + return maxLength; + } - public static GuidePostsInfo createEmptyGuidePost(long byteCount, long guidePostUpdateTime) { - return new GuidePostsInfo(Collections.singletonList(byteCount), - new ImmutableBytesWritable(EMPTY_GUIDEPOST_KEY), Collections. emptyList(), 0, - 0, Collections. singletonList(guidePostUpdateTime)); - } - - public static boolean isEmptyGpsKey(byte[] key) { - return Bytes.equals(key, GuidePostsInfo.EMPTY_GUIDEPOST_KEY); - } + public long[] getRowCounts() { + return rowCounts; + } + + public long[] getByteCounts() { + return byteCounts; + } + + public long[] getGuidePostTimestamps() { + return gpTimestamps; + } + + public int getEstimatedSize() { + return estimatedSize; + } + + @SuppressWarnings(value = "EC_ARRAY_AND_NONARRAY", + justification = "ImmutableBytesWritable DOES implement equals(byte])") + public boolean isEmptyGuidePost() { + return guidePosts.equals(EMPTY_GUIDEPOST_KEY) && guidePostsCount == 0 && byteCounts.length == 1 + && gpTimestamps.length == 1; + } + + public static GuidePostsInfo createEmptyGuidePost(long byteCount, long guidePostUpdateTime) { + return new GuidePostsInfo(Collections.singletonList(byteCount), + new ImmutableBytesWritable(EMPTY_GUIDEPOST_KEY), Collections. emptyList(), 0, 0, + Collections. singletonList(guidePostUpdateTime)); + } + + public static boolean isEmptyGpsKey(byte[] key) { + return Bytes.equals(key, GuidePostsInfo.EMPTY_GUIDEPOST_KEY); + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java index 8e0bd296463..924e1af0ced 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfoBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,108 +32,109 @@ */ public class GuidePostsInfoBuilder { - private PrefixByteEncoder encoder; - private ImmutableBytesWritable lastRow; - private ImmutableBytesWritable guidePosts=new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - private int guidePostsCount; - - /** - * The rowCount that is flattened across the total number of guide posts. + private PrefixByteEncoder encoder; + private ImmutableBytesWritable lastRow; + private ImmutableBytesWritable guidePosts = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); + private int guidePostsCount; + + /** + * The rowCount that is flattened across the total number of guide posts. + */ + private long rowCount = 0; + + /** + * Maximum length of a guidePost collected + */ + private int maxLength; + private DataOutputStream output; + private TrustedByteArrayOutputStream stream; + private List rowCounts = new ArrayList(); + private List byteCounts = new ArrayList(); + private List guidePostsTimestamps = new ArrayList(); + + public boolean isEmpty() { + return rowCounts.size() == 0; + } + + public List getRowCounts() { + return rowCounts; + } + + public List getByteCounts() { + return byteCounts; + } + + public List getGuidePostsTimestamps() { + return guidePostsTimestamps; + } + + public int getMaxLength() { + return maxLength; + } + + public GuidePostsInfoBuilder() { + this.stream = new TrustedByteArrayOutputStream(1); + this.output = new DataOutputStream(stream); + this.encoder = new PrefixByteEncoder(); + lastRow = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); + } + + public boolean addGuidePostOnCollection(ImmutableBytesWritable row, long byteCount, + long rowCount) { + /* + * When collecting guideposts, we don't care about the time at which guide post is being + * created/updated at. So passing it as 0 here. The update/create timestamp is important when we + * are reading guideposts out of the SYSTEM.STATS table. */ - private long rowCount = 0; - - /** - * Maximum length of a guidePost collected - */ - private int maxLength; - private DataOutputStream output; - private TrustedByteArrayOutputStream stream; - private List rowCounts = new ArrayList(); - private List byteCounts = new ArrayList(); - private List guidePostsTimestamps = new ArrayList(); - - public boolean isEmpty() { - return rowCounts.size() == 0; - } - - public List getRowCounts() { - return rowCounts; - } - - public List getByteCounts() { - return byteCounts; - } - - public List getGuidePostsTimestamps() { - return guidePostsTimestamps; - } - - public int getMaxLength() { - return maxLength; - } - public GuidePostsInfoBuilder(){ - this.stream = new TrustedByteArrayOutputStream(1); - this.output = new DataOutputStream(stream); - this.encoder=new PrefixByteEncoder(); - lastRow = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - } - - public boolean addGuidePostOnCollection(ImmutableBytesWritable row, long byteCount, - long rowCount) { - /* - * When collecting guideposts, we don't care about the time at which guide post is being - * created/updated at. So passing it as 0 here. The update/create timestamp is important - * when we are reading guideposts out of the SYSTEM.STATS table. - */ - return trackGuidePost(row, byteCount, rowCount, 0); - } - - /** - * Track a new guide post - * @param row number of rows in the guidepost - * @param byteCount number of bytes in the guidepost - * @param updateTimestamp time at which guidepost was created/updated. - */ - public boolean trackGuidePost(ImmutableBytesWritable row, long byteCount, long rowCount, - long updateTimestamp) { - if (row.getLength() != 0 && lastRow.compareTo(row) < 0) { - try { - encoder.encode(output, row.get(), row.getOffset(), row.getLength()); - rowCounts.add(rowCount); - byteCounts.add(byteCount); - guidePostsTimestamps.add(updateTimestamp); - this.guidePostsCount++; - this.maxLength = encoder.getMaxLength(); - lastRow = row; - return true; - } catch (IOException e) { - return false; - } - } + return trackGuidePost(row, byteCount, rowCount, 0); + } + + /** + * Track a new guide post + * @param row number of rows in the guidepost + * @param byteCount number of bytes in the guidepost + * @param updateTimestamp time at which guidepost was created/updated. + */ + public boolean trackGuidePost(ImmutableBytesWritable row, long byteCount, long rowCount, + long updateTimestamp) { + if (row.getLength() != 0 && lastRow.compareTo(row) < 0) { + try { + encoder.encode(output, row.get(), row.getOffset(), row.getLength()); + rowCounts.add(rowCount); + byteCounts.add(byteCount); + guidePostsTimestamps.add(updateTimestamp); + this.guidePostsCount++; + this.maxLength = encoder.getMaxLength(); + lastRow = row; + return true; + } catch (IOException e) { return false; + } } + return false; + } - public GuidePostsInfo build() { - this.guidePosts.set(stream.getBuffer(), 0, stream.size()); - GuidePostsInfo guidePostsInfo = new GuidePostsInfo(this.byteCounts, this.guidePosts, this.rowCounts, - this.maxLength, this.guidePostsCount, this.guidePostsTimestamps); - return guidePostsInfo; - } + public GuidePostsInfo build() { + this.guidePosts.set(stream.getBuffer(), 0, stream.size()); + GuidePostsInfo guidePostsInfo = new GuidePostsInfo(this.byteCounts, this.guidePosts, + this.rowCounts, this.maxLength, this.guidePostsCount, this.guidePostsTimestamps); + return guidePostsInfo; + } - public void incrementRowCount() { - this.rowCount++; - } + public void incrementRowCount() { + this.rowCount++; + } - public void resetRowCount() { - this.rowCount = 0; - } + public void resetRowCount() { + this.rowCount = 0; + } - public long getRowCount() { - return rowCount; - } - - public boolean hasGuidePosts() { - return guidePostsCount > 0; - } + public long getRowCount() { + return rowCount; + } + + public boolean hasGuidePosts() { + return guidePostsCount > 0; + } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsKey.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsKey.java index 3c158d5cb60..c79fb6bac7d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsKey.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/GuidePostsKey.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,63 +22,62 @@ import javax.annotation.Nonnull; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * * Key for the client-side caching of the guideposts information - * */ public final class GuidePostsKey { - private final int hashCode; - @Nonnull private final byte[] physicalName; - @Nonnull private final byte[] columnFamily; - - public GuidePostsKey(byte[] physicalName, byte[] columnFamily) { - Preconditions.checkNotNull(physicalName); - Preconditions.checkNotNull(columnFamily); - this.physicalName = physicalName; - this.columnFamily = columnFamily; - this.hashCode = computeHashCode(); - } - - public byte[] getPhysicalName() { - return physicalName; - } + private final int hashCode; + @Nonnull + private final byte[] physicalName; + @Nonnull + private final byte[] columnFamily; + + public GuidePostsKey(byte[] physicalName, byte[] columnFamily) { + Preconditions.checkNotNull(physicalName); + Preconditions.checkNotNull(columnFamily); + this.physicalName = physicalName; + this.columnFamily = columnFamily; + this.hashCode = computeHashCode(); + } + + public byte[] getPhysicalName() { + return physicalName; + } + + public byte[] getColumnFamily() { + return columnFamily; + } + + @Override + public int hashCode() { + return hashCode; + } - public byte[] getColumnFamily() { - return columnFamily; - } + private int computeHashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(columnFamily); + result = prime * result + Arrays.hashCode(physicalName); + return result; + } - @Override - public int hashCode() { - return hashCode; - } - - private int computeHashCode() { - final int prime = 31; - int result = 1; - result = prime * result + Arrays.hashCode(columnFamily); - result = prime * result + Arrays.hashCode(physicalName); - return result; - } + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + GuidePostsKey other = (GuidePostsKey) obj; + if (other.hashCode != this.hashCode) return false; + if (!Arrays.equals(columnFamily, other.columnFamily)) return false; + if (!Arrays.equals(physicalName, other.physicalName)) return false; + return true; + } - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - GuidePostsKey other = (GuidePostsKey)obj; - if (other.hashCode != this.hashCode) return false; - if (!Arrays.equals(columnFamily, other.columnFamily)) return false; - if (!Arrays.equals(physicalName, other.physicalName)) return false; - return true; - } - - @Override - public String toString() { - return "GuidePostsKey[physicalName=" + Bytes.toStringBinary(physicalName) - + ",columnFamily=" + Bytes.toStringBinary(columnFamily) + "]"; - } + @Override + public String toString() { + return "GuidePostsKey[physicalName=" + Bytes.toStringBinary(physicalName) + ",columnFamily=" + + Bytes.toStringBinary(columnFamily) + "]"; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionRunTracker.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionRunTracker.java index fabbad4967a..67be1322a4b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionRunTracker.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionRunTracker.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,133 +29,133 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.util.ByteUtil; - import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.phoenix.util.ByteUtil; /** * Singleton that is used to track state associated with regions undergoing stats collection at the * region server's JVM level. */ public class StatisticsCollectionRunTracker { - private static volatile StatisticsCollectionRunTracker INSTANCE; - private final Set updateStatsRegions = Collections - .newSetFromMap(new ConcurrentHashMap()); - private final Set compactingRegions = Collections - .newSetFromMap(new ConcurrentHashMap()); - private final ExecutorService executor; - - // Constants added for testing purposes - public static final long CONCURRENT_UPDATE_STATS_ROW_COUNT = -100l; - public static final long COMPACTION_UPDATE_STATS_ROW_COUNT = -200l; - - public static StatisticsCollectionRunTracker getInstance(Configuration config) { - StatisticsCollectionRunTracker result = INSTANCE; + private static volatile StatisticsCollectionRunTracker INSTANCE; + private final Set updateStatsRegions = + Collections.newSetFromMap(new ConcurrentHashMap()); + private final Set compactingRegions = + Collections.newSetFromMap(new ConcurrentHashMap()); + private final ExecutorService executor; + + // Constants added for testing purposes + public static final long CONCURRENT_UPDATE_STATS_ROW_COUNT = -100l; + public static final long COMPACTION_UPDATE_STATS_ROW_COUNT = -200l; + + public static StatisticsCollectionRunTracker getInstance(Configuration config) { + StatisticsCollectionRunTracker result = INSTANCE; + if (result == null) { + synchronized (StatisticsCollectionRunTracker.class) { + result = INSTANCE; if (result == null) { - synchronized (StatisticsCollectionRunTracker.class) { - result = INSTANCE; - if (result == null) { - INSTANCE = result = new StatisticsCollectionRunTracker(config); - } - } + INSTANCE = result = new StatisticsCollectionRunTracker(config); } - return result; + } } - - private StatisticsCollectionRunTracker(Configuration config) { - int poolSize = - config.getInt(QueryServices.STATS_SERVER_POOL_SIZE, - QueryServicesOptions.DEFAULT_STATS_POOL_SIZE); - ThreadFactoryBuilder builder = - new ThreadFactoryBuilder().setDaemon(true).setNameFormat( - "phoenix-update-statistics-%s"); - executor = Executors.newFixedThreadPool(poolSize, builder.build()); - } - - /** - * @param regionInfo for the region that should be marked as undergoing stats collection via - * major compaction. - * @return true if the region wasn't already marked for stats collection via compaction, false - * otherwise. - */ - public boolean addCompactingRegion(RegionInfo regionInfo) { - return compactingRegions.add(regionInfo); + return result; + } + + private StatisticsCollectionRunTracker(Configuration config) { + int poolSize = config.getInt(QueryServices.STATS_SERVER_POOL_SIZE, + QueryServicesOptions.DEFAULT_STATS_POOL_SIZE); + ThreadFactoryBuilder builder = + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("phoenix-update-statistics-%s"); + executor = Executors.newFixedThreadPool(poolSize, builder.build()); + } + + /** + * @param regionInfo for the region that should be marked as undergoing stats collection via major + * compaction. + * @return true if the region wasn't already marked for stats collection via compaction, false + * otherwise. + */ + public boolean addCompactingRegion(RegionInfo regionInfo) { + return compactingRegions.add(regionInfo); + } + + /** + * @param regionInfo for the region that should be unmarked as undergoing stats collection via + * major compaction. + * @return true if the region was marked for stats collection via compaction, false otherwise. + */ + public boolean removeCompactingRegion(RegionInfo regionInfo) { + return compactingRegions.remove(regionInfo); + } + + /** + * @param regionInfo for the region to check for. + * @return true if stats are being collected for the region via major compaction, false otherwise. + */ + public boolean areStatsBeingCollectedOnCompaction(RegionInfo regionInfo) { + return compactingRegions.contains(regionInfo); + } + + /** + * @param regionInfo for the region to run UPDATE STATISTICS command on. + * @return true if UPDATE STATISTICS wasn't already running on the region, false otherwise. + */ + public boolean addUpdateStatsCommandRegion(RegionInfo regionInfo, Set familySet) { + return updateStatsRegions.add(new ColumnFamilyRegionInfo(regionInfo, familySet)); + } + + /** + * @param regionInfo for the region to mark as not running UPDATE STATISTICS command on. + * @return true if UPDATE STATISTICS was running on the region, false otherwise. + */ + public boolean removeUpdateStatsCommandRegion(RegionInfo regionInfo, Set familySet) { + return updateStatsRegions.remove(new ColumnFamilyRegionInfo(regionInfo, familySet)); + } + + /** + * Enqueues the task for execution. + * @param + * @param c task to execute + */ + public Future runTask(Callable c) { + return executor.submit(c); + } + + private static class ColumnFamilyRegionInfo { + private final RegionInfo regionInfo; + private final Set familySet; + + public ColumnFamilyRegionInfo(RegionInfo regionInfo, Set familySet) { + this.regionInfo = regionInfo; + this.familySet = familySet; } - /** - * @param regionInfo for the region that should be unmarked as undergoing stats collection via - * major compaction. - * @return true if the region was marked for stats collection via compaction, false otherwise. - */ - public boolean removeCompactingRegion(RegionInfo regionInfo) { - return compactingRegions.remove(regionInfo); + public RegionInfo getRegionInfo() { + return regionInfo; } - /** - * @param regionInfo for the region to check for. - * @return true if stats are being collected for the region via major compaction, false - * otherwise. - */ - public boolean areStatsBeingCollectedOnCompaction(RegionInfo regionInfo) { - return compactingRegions.contains(regionInfo); + public Set getFamilySet() { + return familySet; } - /** - * @param regionInfo for the region to run UPDATE STATISTICS command on. - * @param familySet - * @return true if UPDATE STATISTICS wasn't already running on the region, false otherwise. - */ - public boolean addUpdateStatsCommandRegion(RegionInfo regionInfo, Set familySet) { - return updateStatsRegions.add(new ColumnFamilyRegionInfo(regionInfo, familySet)); + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ColumnFamilyRegionInfo)) { + return false; + } + + ColumnFamilyRegionInfo c = (ColumnFamilyRegionInfo) obj; + return c.getRegionInfo().equals(this.regionInfo) + && ByteUtil.match(this.familySet, c.getFamilySet()); } - /** - * @param regionInfo for the region to mark as not running UPDATE STATISTICS command on. - * @return true if UPDATE STATISTICS was running on the region, false otherwise. - */ - public boolean removeUpdateStatsCommandRegion(RegionInfo regionInfo, Set familySet) { - return updateStatsRegions.remove(new ColumnFamilyRegionInfo(regionInfo, familySet)); - } - - /** - * Enqueues the task for execution. - * @param - * @param c task to execute - */ - public Future runTask(Callable c) { - return executor.submit(c); - } - - private static class ColumnFamilyRegionInfo { - private final RegionInfo regionInfo; - private final Set familySet; - - public ColumnFamilyRegionInfo(RegionInfo regionInfo, Set familySet) { - this.regionInfo = regionInfo; - this.familySet = familySet; - } - - public RegionInfo getRegionInfo() { - return regionInfo; - } - - public Set getFamilySet() { - return familySet; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { return true; } - if (!(obj instanceof ColumnFamilyRegionInfo)) { return false; } - - ColumnFamilyRegionInfo c = (ColumnFamilyRegionInfo)obj; - return c.getRegionInfo().equals(this.regionInfo) && ByteUtil.match(this.familySet, c.getFamilySet()); - } - - @Override - public int hashCode() { - return this.getRegionInfo().hashCode(); - } + @Override + public int hashCode() { + return this.getRegionInfo().hashCode(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionScope.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionScope.java index b82aeae704b..63098ea424c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionScope.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionScope.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -18,9 +18,11 @@ package org.apache.phoenix.schema.stats; public enum StatisticsCollectionScope { - COLUMNS, INDEX, ALL; + COLUMNS, + INDEX, + ALL; - public static StatisticsCollectionScope getDefault() { - return ALL; - } + public static StatisticsCollectionScope getDefault() { + return ALL; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java index 8856925c6df..0636b026fab 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,210 +44,230 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; - /** * Simple utility class for managing multiple key parts of the statistic */ public class StatisticsUtil { - /** - * Indication to client that the statistics estimates were not - * calculated based on statistics but instead are based on row - * limits from the query. - */ - public static final long NOT_STATS_BASED_TS = 0; - - private static final Set DISABLE_STATS = Sets.newHashSetWithExpectedSize(8); - // TODO: make this declarative through new DISABLE_STATS column on SYSTEM.CATALOG table. - // Also useful would be a USE_CURRENT_TIME_FOR_STATS column on SYSTEM.CATALOG table. - static { - DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)); - DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME)); - DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME)); - DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)); - DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME)); - DISABLE_STATS.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,true)); - DISABLE_STATS.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES,true)); - DISABLE_STATS.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES,true)); - DISABLE_STATS.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES,true)); - DISABLE_STATS.add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES,true)); - } - - private StatisticsUtil() { - // private ctor for utility classes - } - + /** + * Indication to client that the statistics estimates were not calculated based on statistics but + * instead are based on row limits from the query. + */ + public static final long NOT_STATS_BASED_TS = 0; - /** Number of parts in our complex key */ - protected static final int NUM_KEY_PARTS = 3; - - public static byte[] getRowKey(byte[] table, ImmutableBytesWritable fam, byte[] guidePostStartKey) { - return getRowKey(table, fam, new ImmutableBytesWritable(guidePostStartKey,0,guidePostStartKey.length)); - } + private static final Set DISABLE_STATS = Sets.newHashSetWithExpectedSize(8); + // TODO: make this declarative through new DISABLE_STATS column on SYSTEM.CATALOG table. + // Also useful would be a USE_CURRENT_TIME_FOR_STATS column on SYSTEM.CATALOG table. + static { + DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)); + DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME)); + DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME)); + DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)); + DISABLE_STATS.add(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME)); + DISABLE_STATS.add( + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true)); + DISABLE_STATS.add( + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES, true)); + DISABLE_STATS.add( + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, true)); + DISABLE_STATS + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, true)); + DISABLE_STATS + .add(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, true)); + } - public static byte[] getRowKey(byte[] table, ImmutableBytesWritable fam, ImmutableBytesWritable guidePostStartKey) { - // always starts with the source table - int guidePostLength = guidePostStartKey.getLength(); - boolean hasGuidePost = guidePostLength > 0; - byte[] rowKey = new byte[table.length + fam.getLength() + guidePostLength + (hasGuidePost ? 2 : 1)]; - int offset = 0; - System.arraycopy(table, 0, rowKey, offset, table.length); - offset += table.length; - rowKey[offset++] = QueryConstants.SEPARATOR_BYTE; // assumes stats table columns not DESC - System.arraycopy(fam.get(), fam.getOffset(), rowKey, offset, fam.getLength()); - if (hasGuidePost) { - offset += fam.getLength(); - rowKey[offset++] = QueryConstants.SEPARATOR_BYTE; // assumes stats table columns not DESC - System.arraycopy(guidePostStartKey.get(), 0, rowKey, offset, guidePostLength); - } - return rowKey; + private StatisticsUtil() { + // private ctor for utility classes + } + + /** Number of parts in our complex key */ + protected static final int NUM_KEY_PARTS = 3; + + public static byte[] getRowKey(byte[] table, ImmutableBytesWritable fam, + byte[] guidePostStartKey) { + return getRowKey(table, fam, + new ImmutableBytesWritable(guidePostStartKey, 0, guidePostStartKey.length)); + } + + public static byte[] getRowKey(byte[] table, ImmutableBytesWritable fam, + ImmutableBytesWritable guidePostStartKey) { + // always starts with the source table + int guidePostLength = guidePostStartKey.getLength(); + boolean hasGuidePost = guidePostLength > 0; + byte[] rowKey = + new byte[table.length + fam.getLength() + guidePostLength + (hasGuidePost ? 2 : 1)]; + int offset = 0; + System.arraycopy(table, 0, rowKey, offset, table.length); + offset += table.length; + rowKey[offset++] = QueryConstants.SEPARATOR_BYTE; // assumes stats table columns not DESC + System.arraycopy(fam.get(), fam.getOffset(), rowKey, offset, fam.getLength()); + if (hasGuidePost) { + offset += fam.getLength(); + rowKey[offset++] = QueryConstants.SEPARATOR_BYTE; // assumes stats table columns not DESC + System.arraycopy(guidePostStartKey.get(), 0, rowKey, offset, guidePostLength); } + return rowKey; + } - private static byte[] getStartKey(byte[] table, ImmutableBytesWritable fam) { - return getKey(table, fam, false); + private static byte[] getStartKey(byte[] table, ImmutableBytesWritable fam) { + return getKey(table, fam, false); + } + + private static byte[] getEndKey(byte[] table, ImmutableBytesWritable fam) { + byte[] key = getKey(table, fam, true); + ByteUtil.nextKey(key, key.length); + return key; + } + + private static byte[] getKey(byte[] table, ImmutableBytesWritable fam, + boolean terminateWithSeparator) { + // always starts with the source table and column family + byte[] rowKey = new byte[table.length + fam.getLength() + 1 + (terminateWithSeparator ? 1 : 0)]; + int offset = 0; + System.arraycopy(table, 0, rowKey, offset, table.length); + offset += table.length; + rowKey[offset++] = QueryConstants.SEPARATOR_BYTE; // assumes stats table columns not DESC + System.arraycopy(fam.get(), fam.getOffset(), rowKey, offset, fam.getLength()); + offset += fam.getLength(); + if (terminateWithSeparator) { + rowKey[offset] = QueryConstants.SEPARATOR_BYTE; } - - private static byte[] getEndKey(byte[] table, ImmutableBytesWritable fam) { - byte[] key = getKey(table, fam, true); - ByteUtil.nextKey(key, key.length); - return key; + return rowKey; + } + + public static byte[] getAdjustedKey(byte[] key, byte[] tableNameBytes, ImmutableBytesWritable cf, + boolean nextKey) { + if (Bytes.compareTo(key, ByteUtil.EMPTY_BYTE_ARRAY) != 0) { + return getRowKey(tableNameBytes, cf, key); } - - private static byte[] getKey(byte[] table, ImmutableBytesWritable fam, boolean terminateWithSeparator) { - // always starts with the source table and column family - byte[] rowKey = new byte[table.length + fam.getLength() + 1 + (terminateWithSeparator ? 1 : 0)]; - int offset = 0; - System.arraycopy(table, 0, rowKey, offset, table.length); - offset += table.length; - rowKey[offset++] = QueryConstants.SEPARATOR_BYTE; // assumes stats table columns not DESC - System.arraycopy(fam.get(), fam.getOffset(), rowKey, offset, fam.getLength()); - offset += fam.getLength(); - if (terminateWithSeparator) { - rowKey[offset] = QueryConstants.SEPARATOR_BYTE; - } - return rowKey; + key = getKey(tableNameBytes, cf, nextKey); + if (nextKey) { + ByteUtil.nextKey(key, key.length); } + return key; + } - public static byte[] getAdjustedKey(byte[] key, byte[] tableNameBytes, ImmutableBytesWritable cf, boolean nextKey) { - if (Bytes.compareTo(key, ByteUtil.EMPTY_BYTE_ARRAY) != 0) { - return getRowKey(tableNameBytes, cf, key); + public static GuidePostsInfo readStatistics(Table statsHTable, GuidePostsKey key, + long clientTimeStamp) throws IOException { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(key.getColumnFamily()); + byte[] tableNameBytes = key.getPhysicalName(); + byte[] startKey = getStartKey(tableNameBytes, ptr); + byte[] endKey = getEndKey(tableNameBytes, ptr); + Scan s = MetaDataUtil.newTableRowsScan(startKey, endKey, MetaDataProtocol.MIN_TABLE_TIMESTAMP, + clientTimeStamp); + s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES); + s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES); + s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); + GuidePostsInfoBuilder guidePostsInfoBuilder = new GuidePostsInfoBuilder(); + Cell current = null; + GuidePostsInfo emptyGuidePost = null; + try (ResultScanner scanner = statsHTable.getScanner(s)) { + Result result = null; + while ((result = scanner.next()) != null) { + CellScanner cellScanner = result.cellScanner(); + long rowCount = 0; + long byteCount = 0; + while (cellScanner.advance()) { + current = cellScanner.current(); + if ( + Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), + current.getQualifierLength(), PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES, 0, + PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES.length) + ) { + rowCount = PLong.INSTANCE.getCodec().decodeLong(current.getValueArray(), + current.getValueOffset(), SortOrder.getDefault()); + } else if ( + Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), + current.getQualifierLength(), PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES, 0, + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES.length) + ) { + byteCount = PLong.INSTANCE.getCodec().decodeLong(current.getValueArray(), + current.getValueOffset(), SortOrder.getDefault()); + } } - key = getKey(tableNameBytes, cf, nextKey); - if (nextKey) { - ByteUtil.nextKey(key, key.length); + if (current != null) { + int tableNameLength = tableNameBytes.length + 1; + int cfOffset = current.getRowOffset() + tableNameLength; + int cfLength = getVarCharLength(current.getRowArray(), cfOffset, + current.getRowLength() - tableNameLength); + ptr.set(current.getRowArray(), cfOffset, cfLength); + byte[] cfName = ByteUtil.copyKeyBytesIfNecessary(ptr); + byte[] newGPStartKey = + getGuidePostsInfoFromRowKey(tableNameBytes, cfName, result.getRow()); + boolean isEmptyGuidePost = GuidePostsInfo.isEmptyGpsKey(newGPStartKey); + // Use the timestamp of the cell as the time at which guidepost was + // created/updated + long guidePostUpdateTime = current.getTimestamp(); + if (isEmptyGuidePost) { + emptyGuidePost = GuidePostsInfo.createEmptyGuidePost(byteCount, guidePostUpdateTime); + } else { + guidePostsInfoBuilder.trackGuidePost(new ImmutableBytesWritable(newGPStartKey), + byteCount, rowCount, guidePostUpdateTime); + } } - return key; + } } + // We write a row with an empty KeyValue in the case that stats were generated but without + // enough data + // for any guideposts. If we have no rows, it means stats were never generated. + return current == null ? GuidePostsInfo.NO_GUIDEPOST + : guidePostsInfoBuilder.isEmpty() ? emptyGuidePost + : guidePostsInfoBuilder.build(); + } - public static GuidePostsInfo readStatistics(Table statsHTable, GuidePostsKey key, long clientTimeStamp) - throws IOException { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(key.getColumnFamily()); - byte[] tableNameBytes = key.getPhysicalName(); - byte[] startKey = getStartKey(tableNameBytes, ptr); - byte[] endKey = getEndKey(tableNameBytes, ptr); - Scan s = MetaDataUtil.newTableRowsScan(startKey, endKey, MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp); - s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES); - s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES); - s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); - GuidePostsInfoBuilder guidePostsInfoBuilder = new GuidePostsInfoBuilder(); - Cell current = null; - GuidePostsInfo emptyGuidePost = null; - try (ResultScanner scanner = statsHTable.getScanner(s)) { - Result result = null; - while ((result = scanner.next()) != null) { - CellScanner cellScanner = result.cellScanner(); - long rowCount = 0; - long byteCount = 0; - while (cellScanner.advance()) { - current = cellScanner.current(); - if (Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), - current.getQualifierLength(), PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES, 0, - PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES.length)) { - rowCount = PLong.INSTANCE.getCodec().decodeLong(current.getValueArray(), - current.getValueOffset(), SortOrder.getDefault()); - } else if (Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), - current.getQualifierLength(), PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES, 0, - PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES.length)) { - byteCount = PLong.INSTANCE.getCodec().decodeLong(current.getValueArray(), - current.getValueOffset(), SortOrder.getDefault()); - } - } - if (current != null) { - int tableNameLength = tableNameBytes.length + 1; - int cfOffset = current.getRowOffset() + tableNameLength; - int cfLength = getVarCharLength(current.getRowArray(), cfOffset, - current.getRowLength() - tableNameLength); - ptr.set(current.getRowArray(), cfOffset, cfLength); - byte[] cfName = ByteUtil.copyKeyBytesIfNecessary(ptr); - byte[] newGPStartKey = getGuidePostsInfoFromRowKey(tableNameBytes, cfName, result.getRow()); - boolean isEmptyGuidePost = GuidePostsInfo.isEmptyGpsKey(newGPStartKey); - // Use the timestamp of the cell as the time at which guidepost was - // created/updated - long guidePostUpdateTime = current.getTimestamp(); - if (isEmptyGuidePost) { - emptyGuidePost = - GuidePostsInfo.createEmptyGuidePost(byteCount, guidePostUpdateTime); - } else { - guidePostsInfoBuilder.trackGuidePost( - new ImmutableBytesWritable(newGPStartKey), byteCount, rowCount, - guidePostUpdateTime); - } - } - } + public static long getGuidePostDepth(int guidepostPerRegion, long guidepostWidth, + TableDescriptor tableDesc) { + if (guidepostPerRegion > 0) { + long maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE; + if (tableDesc != null) { + long tableMaxFileSize = tableDesc.getMaxFileSize(); + if (tableMaxFileSize >= 0) { + maxFileSize = tableMaxFileSize; } - // We write a row with an empty KeyValue in the case that stats were generated but without enough data - // for any guideposts. If we have no rows, it means stats were never generated. - return current == null ? GuidePostsInfo.NO_GUIDEPOST : guidePostsInfoBuilder.isEmpty() ? emptyGuidePost : guidePostsInfoBuilder.build(); + } + return maxFileSize / guidepostPerRegion; + } else { + return guidepostWidth; } + } - public static long getGuidePostDepth(int guidepostPerRegion, long guidepostWidth, TableDescriptor tableDesc) { - if (guidepostPerRegion > 0) { - long maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE; - if (tableDesc != null) { - long tableMaxFileSize = tableDesc.getMaxFileSize(); - if (tableMaxFileSize >= 0) { - maxFileSize = tableMaxFileSize; - } - } - return maxFileSize / guidepostPerRegion; - } else { - return guidepostWidth; - } - } - - public static byte[] getGuidePostsInfoFromRowKey(byte[] tableNameBytes, byte[] fam, byte[] row) { - if (row.length > tableNameBytes.length + 1 + fam.length) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - int gpOffset = tableNameBytes.length + 1 + fam.length + 1; - ptr.set(row, gpOffset, row.length - gpOffset); - return ByteUtil.copyKeyBytesIfNecessary(ptr); - } - return ByteUtil.EMPTY_BYTE_ARRAY; - } - - public static boolean isStatsEnabled(TableName tableName) { - return !DISABLE_STATS.contains(tableName); + public static byte[] getGuidePostsInfoFromRowKey(byte[] tableNameBytes, byte[] fam, byte[] row) { + if (row.length > tableNameBytes.length + 1 + fam.length) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + int gpOffset = tableNameBytes.length + 1 + fam.length + 1; + ptr.set(row, gpOffset, row.length - gpOffset); + return ByteUtil.copyKeyBytesIfNecessary(ptr); } + return ByteUtil.EMPTY_BYTE_ARRAY; + } - public static void setScanAttributes(Scan scan, Map statsProps) { - scan.setCacheBlocks(false); - scan.readAllVersions(); - scan.setAttribute(ANALYZE_TABLE, TRUE_BYTES); - if (statsProps != null) { - Object gp_width = statsProps.get(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB); - if (gp_width != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.GUIDEPOST_WIDTH_BYTES, PLong.INSTANCE.toBytes(gp_width)); - } - Object gp_per_region = statsProps.get(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB); - if (gp_per_region != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.GUIDEPOST_PER_REGION, PInteger.INSTANCE.toBytes(gp_per_region)); - } - } + public static boolean isStatsEnabled(TableName tableName) { + return !DISABLE_STATS.contains(tableName); + } + + public static void setScanAttributes(Scan scan, Map statsProps) { + scan.setCacheBlocks(false); + scan.readAllVersions(); + scan.setAttribute(ANALYZE_TABLE, TRUE_BYTES); + if (statsProps != null) { + Object gp_width = statsProps.get(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB); + if (gp_width != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.GUIDEPOST_WIDTH_BYTES, + PLong.INSTANCE.toBytes(gp_width)); + } + Object gp_per_region = statsProps.get(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB); + if (gp_per_region != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.GUIDEPOST_PER_REGION, + PInteger.INSTANCE.toBytes(gp_per_region)); + } } - -} \ No newline at end of file + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatsCollectionDisabledOnServerException.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatsCollectionDisabledOnServerException.java index 05d66f6043c..b3e7b72b208 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatsCollectionDisabledOnServerException.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/stats/StatsCollectionDisabledOnServerException.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,18 +17,17 @@ */ package org.apache.phoenix.schema.stats; +import java.sql.SQLException; + import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; -import java.sql.SQLException; - public class StatsCollectionDisabledOnServerException extends SQLException { - private static final long serialVersionUID = 1L; - private static SQLExceptionCode code = SQLExceptionCode.STATS_COLLECTION_DISABLED_ON_SERVER; + private static final long serialVersionUID = 1L; + private static SQLExceptionCode code = SQLExceptionCode.STATS_COLLECTION_DISABLED_ON_SERVER; - public StatsCollectionDisabledOnServerException() { - super(new SQLExceptionInfo.Builder(code) - .setMessage(code.getMessage()).build().toString(), - code.getSQLState(), code.getErrorCode()); - } + public StatsCollectionDisabledOnServerException() { + super(new SQLExceptionInfo.Builder(code).setMessage(code.getMessage()).build().toString(), + code.getSQLState(), code.getErrorCode()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/task/SystemTaskParams.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/task/SystemTaskParams.java index 17402565ae5..c0563522066 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/task/SystemTaskParams.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/task/SystemTaskParams.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,174 +15,167 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.task; +import java.sql.Timestamp; + import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.schema.PTable; -import java.sql.Timestamp; - /** - * Task params to be used while upserting records in SYSTEM.TASK table. - * This POJO is mainly used while upserting(and committing) or generating - * upsert mutations plan in {@link Task} class + * Task params to be used while upserting records in SYSTEM.TASK table. This POJO is mainly used + * while upserting(and committing) or generating upsert mutations plan in {@link Task} class */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = {"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}, +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = { "EI_EXPOSE_REP", "EI_EXPOSE_REP2" }, justification = "endTs and startTs are not used for mutation") public class SystemTaskParams { - private final PhoenixConnection conn; - private final PTable.TaskType taskType; - private final String tenantId; - private final String schemaName; - private final String tableName; - private final String taskStatus; - private final String data; - private final Integer priority; - private final Timestamp startTs; - private final Timestamp endTs; - private final boolean accessCheckEnabled; - - public SystemTaskParams(PhoenixConnection conn, PTable.TaskType taskType, - String tenantId, String schemaName, String tableName, - String taskStatus, String data, Integer priority, Timestamp startTs, - Timestamp endTs, boolean accessCheckEnabled) { - this.conn = conn; - this.taskType = taskType; - this.tenantId = tenantId; - this.schemaName = schemaName; - this.tableName = tableName; - this.taskStatus = taskStatus; - this.data = data; - this.priority = priority; - this.startTs = startTs; - this.endTs = endTs; - this.accessCheckEnabled = accessCheckEnabled; - } - - public PhoenixConnection getConn() { - return conn; + private final PhoenixConnection conn; + private final PTable.TaskType taskType; + private final String tenantId; + private final String schemaName; + private final String tableName; + private final String taskStatus; + private final String data; + private final Integer priority; + private final Timestamp startTs; + private final Timestamp endTs; + private final boolean accessCheckEnabled; + + public SystemTaskParams(PhoenixConnection conn, PTable.TaskType taskType, String tenantId, + String schemaName, String tableName, String taskStatus, String data, Integer priority, + Timestamp startTs, Timestamp endTs, boolean accessCheckEnabled) { + this.conn = conn; + this.taskType = taskType; + this.tenantId = tenantId; + this.schemaName = schemaName; + this.tableName = tableName; + this.taskStatus = taskStatus; + this.data = data; + this.priority = priority; + this.startTs = startTs; + this.endTs = endTs; + this.accessCheckEnabled = accessCheckEnabled; + } + + public PhoenixConnection getConn() { + return conn; + } + + public PTable.TaskType getTaskType() { + return taskType; + } + + public String getTenantId() { + return tenantId; + } + + public String getSchemaName() { + return schemaName; + } + + public String getTableName() { + return tableName; + } + + public String getTaskStatus() { + return taskStatus; + } + + public String getData() { + return data; + } + + public Integer getPriority() { + return priority; + } + + public Timestamp getStartTs() { + return startTs; + } + + public Timestamp getEndTs() { + return endTs; + } + + public boolean isAccessCheckEnabled() { + return accessCheckEnabled; + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = { "EI_EXPOSE_REP", "EI_EXPOSE_REP2" }, + justification = "endTs and startTs are not used for mutation") + public static class SystemTaskParamsBuilder { + + private PhoenixConnection conn; + private PTable.TaskType taskType; + private String tenantId; + private String schemaName; + private String tableName; + private String taskStatus; + private String data; + private Integer priority; + private Timestamp startTs; + private Timestamp endTs; + private boolean accessCheckEnabled; + + public SystemTaskParamsBuilder setConn(PhoenixConnection conn) { + this.conn = conn; + return this; } - public PTable.TaskType getTaskType() { - return taskType; + public SystemTaskParamsBuilder setTaskType(PTable.TaskType taskType) { + this.taskType = taskType; + return this; } - public String getTenantId() { - return tenantId; + public SystemTaskParamsBuilder setTenantId(String tenantId) { + this.tenantId = tenantId; + return this; } - public String getSchemaName() { - return schemaName; + public SystemTaskParamsBuilder setSchemaName(String schemaName) { + this.schemaName = schemaName; + return this; } - public String getTableName() { - return tableName; + public SystemTaskParamsBuilder setTableName(String tableName) { + this.tableName = tableName; + return this; } - public String getTaskStatus() { - return taskStatus; + public SystemTaskParamsBuilder setTaskStatus(String taskStatus) { + this.taskStatus = taskStatus; + return this; } - public String getData() { - return data; + public SystemTaskParamsBuilder setData(String data) { + this.data = data; + return this; } - public Integer getPriority() { - return priority; + public SystemTaskParamsBuilder setPriority(Integer priority) { + this.priority = priority; + return this; } - public Timestamp getStartTs() { - return startTs; + public SystemTaskParamsBuilder setStartTs(Timestamp startTs) { + this.startTs = startTs; + return this; } - public Timestamp getEndTs() { - return endTs; + public SystemTaskParamsBuilder setEndTs(Timestamp endTs) { + this.endTs = endTs; + return this; } - public boolean isAccessCheckEnabled() { - return accessCheckEnabled; + public SystemTaskParamsBuilder setAccessCheckEnabled(boolean accessCheckEnabled) { + this.accessCheckEnabled = accessCheckEnabled; + return this; } - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = {"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}, - justification = "endTs and startTs are not used for mutation") - public static class SystemTaskParamsBuilder { - - private PhoenixConnection conn; - private PTable.TaskType taskType; - private String tenantId; - private String schemaName; - private String tableName; - private String taskStatus; - private String data; - private Integer priority; - private Timestamp startTs; - private Timestamp endTs; - private boolean accessCheckEnabled; - - public SystemTaskParamsBuilder setConn(PhoenixConnection conn) { - this.conn = conn; - return this; - } - - public SystemTaskParamsBuilder setTaskType(PTable.TaskType taskType) { - this.taskType = taskType; - return this; - } - - public SystemTaskParamsBuilder setTenantId(String tenantId) { - this.tenantId = tenantId; - return this; - } - - public SystemTaskParamsBuilder setSchemaName(String schemaName) { - this.schemaName = schemaName; - return this; - } - - public SystemTaskParamsBuilder setTableName(String tableName) { - this.tableName = tableName; - return this; - } - - public SystemTaskParamsBuilder setTaskStatus(String taskStatus) { - this.taskStatus = taskStatus; - return this; - } - - public SystemTaskParamsBuilder setData(String data) { - this.data = data; - return this; - } - - public SystemTaskParamsBuilder setPriority(Integer priority) { - this.priority = priority; - return this; - } - - public SystemTaskParamsBuilder setStartTs(Timestamp startTs) { - this.startTs = startTs; - return this; - } - - public SystemTaskParamsBuilder setEndTs(Timestamp endTs) { - this.endTs = endTs; - return this; - } - - public SystemTaskParamsBuilder setAccessCheckEnabled( - boolean accessCheckEnabled) { - this.accessCheckEnabled = accessCheckEnabled; - return this; - } - - public SystemTaskParams build() { - return new SystemTaskParams(conn, taskType, tenantId, schemaName, - tableName, taskStatus, data, priority, startTs, endTs, - accessCheckEnabled); - } + public SystemTaskParams build() { + return new SystemTaskParams(conn, taskType, tenantId, schemaName, tableName, taskStatus, data, + priority, startTs, endTs, accessCheckEnabled); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/task/Task.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/task/Task.java index bd1d912b03f..e4ee2a992c9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/task/Task.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/task/Task.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,19 @@ */ package org.apache.phoenix.schema.task; +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + import org.apache.commons.lang3.StringUtils; -import org.apache.phoenix.schema.transform.SystemTransformRecord; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Mutation; @@ -32,6 +42,8 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.transform.SystemTransformRecord; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.QueryUtil; @@ -39,386 +51,346 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.sql.Types; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - import edu.umd.cs.findbugs.annotations.SuppressWarnings; -@SuppressWarnings(value="SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING", - justification="Not possible to avoid") +@SuppressWarnings(value = "SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING", + justification = "Not possible to avoid") public class Task { - public static final Logger LOGGER = LoggerFactory.getLogger(Task.class); - - protected static List executeStatementAndGetTaskMutations( - PhoenixConnection conn, PreparedStatement stmt) - throws SQLException { - stmt.execute(); - // retrieve mutations for SYSTEM.TASK upsert query - Iterator>> iterator = - conn.getMutationState().toMutations(); - return iterator.next().getSecond(); - } - - protected static PreparedStatement setValuesToAddTaskPS(PreparedStatement stmt, PTable.TaskType taskType, - String tenantId, String schemaName, String tableName, String taskStatus, String data, - Integer priority, Timestamp startTs, Timestamp endTs) throws SQLException { - stmt.setByte(1, taskType.getSerializedValue()); - if (tenantId != null) { - stmt.setString(2, tenantId); - } else { - stmt.setNull(2, Types.VARCHAR); - } - if (schemaName != null) { - stmt.setString(3, schemaName); - } else { - stmt.setNull(3, Types.VARCHAR); - } - stmt.setString(4, tableName); - if (taskStatus != null) { - stmt.setString(5, taskStatus); - } else { - stmt.setString(5, PTable.TaskStatus.CREATED.toString()); - } - if (priority != null) { - stmt.setInt(6, priority); - } else { - byte defaultPri = 4; - stmt.setInt(6, defaultPri); - } - if (startTs == null) { - startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); - } - stmt.setTimestamp(7, startTs); - if (endTs != null) { - stmt.setTimestamp(8, endTs); - } else { - if (taskStatus != null && taskStatus.equals(PTable.TaskStatus.COMPLETED.toString())) { - endTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); - stmt.setTimestamp(8, endTs); - } else { - stmt.setNull(8, Types.TIMESTAMP); - } - } - if (data != null) { - stmt.setString(9, data); - } else { - stmt.setNull(9, Types.VARCHAR); - } - return stmt; - } - - protected static PreparedStatement addTaskAndGetStatement( - SystemTaskParams systemTaskParams, PhoenixConnection connection) - throws IOException { - PreparedStatement stmt; - try { - stmt = connection.prepareStatement("UPSERT INTO " + - PhoenixDatabaseMetaData.SYSTEM_TASK_NAME + " ( " + - PhoenixDatabaseMetaData.TASK_TYPE + ", " + - PhoenixDatabaseMetaData.TENANT_ID + ", " + - PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + - PhoenixDatabaseMetaData.TABLE_NAME + ", " + - PhoenixDatabaseMetaData.TASK_STATUS + ", " + - PhoenixDatabaseMetaData.TASK_PRIORITY + ", " + - PhoenixDatabaseMetaData.TASK_TS + ", " + - PhoenixDatabaseMetaData.TASK_END_TS + ", " + - PhoenixDatabaseMetaData.TASK_DATA + - " ) VALUES(?,?,?,?,?,?,?,?,?)"); - stmt = setValuesToAddTaskPS(stmt, systemTaskParams.getTaskType(), - systemTaskParams.getTenantId(), - systemTaskParams.getSchemaName(), - systemTaskParams.getTableName(), - systemTaskParams.getTaskStatus(), systemTaskParams.getData(), - systemTaskParams.getPriority(), systemTaskParams.getStartTs(), - systemTaskParams.getEndTs()); - LOGGER.info("Adding task type: {} , tableName: {} , taskStatus: {}" - + " , startTs: {} , endTs: {}", systemTaskParams.getTaskType(), - systemTaskParams.getTableName(), - systemTaskParams.getTaskStatus(), systemTaskParams.getStartTs(), - systemTaskParams.getEndTs()); - } catch (SQLException e) { - throw new IOException(e); - } - return stmt; - } - - public static List getMutationsForAddTask( - SystemTaskParams systemTaskParams) - throws IOException, SQLException { - PhoenixConnection curConn = systemTaskParams.getConn(); - Configuration conf = curConn.getQueryServices().getConfiguration(); - // create new connection as we do not want to mix up mutationState - // with existing connection - try (PhoenixConnection newConnection = - QueryUtil.getConnectionOnServer(curConn.getClientInfo(), conf) - .unwrap(PhoenixConnection.class)) { - try (PreparedStatement statement = addTaskAndGetStatement( - systemTaskParams, newConnection)) { - return executeStatementAndGetTaskMutations(newConnection, - statement); - } - } + public static final Logger LOGGER = LoggerFactory.getLogger(Task.class); + + protected static List executeStatementAndGetTaskMutations(PhoenixConnection conn, + PreparedStatement stmt) throws SQLException { + stmt.execute(); + // retrieve mutations for SYSTEM.TASK upsert query + Iterator>> iterator = conn.getMutationState().toMutations(); + return iterator.next().getSecond(); + } + + protected static PreparedStatement setValuesToAddTaskPS(PreparedStatement stmt, + PTable.TaskType taskType, String tenantId, String schemaName, String tableName, + String taskStatus, String data, Integer priority, Timestamp startTs, Timestamp endTs) + throws SQLException { + stmt.setByte(1, taskType.getSerializedValue()); + if (tenantId != null) { + stmt.setString(2, tenantId); + } else { + stmt.setNull(2, Types.VARCHAR); + } + if (schemaName != null) { + stmt.setString(3, schemaName); + } else { + stmt.setNull(3, Types.VARCHAR); + } + stmt.setString(4, tableName); + if (taskStatus != null) { + stmt.setString(5, taskStatus); + } else { + stmt.setString(5, PTable.TaskStatus.CREATED.toString()); + } + if (priority != null) { + stmt.setInt(6, priority); + } else { + byte defaultPri = 4; + stmt.setInt(6, defaultPri); + } + if (startTs == null) { + startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); + } + stmt.setTimestamp(7, startTs); + if (endTs != null) { + stmt.setTimestamp(8, endTs); + } else { + if (taskStatus != null && taskStatus.equals(PTable.TaskStatus.COMPLETED.toString())) { + endTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); + stmt.setTimestamp(8, endTs); + } else { + stmt.setNull(8, Types.TIMESTAMP); + } + } + if (data != null) { + stmt.setString(9, data); + } else { + stmt.setNull(9, Types.VARCHAR); + } + return stmt; + } + + protected static PreparedStatement addTaskAndGetStatement(SystemTaskParams systemTaskParams, + PhoenixConnection connection) throws IOException { + PreparedStatement stmt; + try { + stmt = connection.prepareStatement("UPSERT INTO " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME + + " ( " + PhoenixDatabaseMetaData.TASK_TYPE + ", " + PhoenixDatabaseMetaData.TENANT_ID + + ", " + PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + PhoenixDatabaseMetaData.TABLE_NAME + + ", " + PhoenixDatabaseMetaData.TASK_STATUS + ", " + PhoenixDatabaseMetaData.TASK_PRIORITY + + ", " + PhoenixDatabaseMetaData.TASK_TS + ", " + PhoenixDatabaseMetaData.TASK_END_TS + ", " + + PhoenixDatabaseMetaData.TASK_DATA + " ) VALUES(?,?,?,?,?,?,?,?,?)"); + stmt = setValuesToAddTaskPS(stmt, systemTaskParams.getTaskType(), + systemTaskParams.getTenantId(), systemTaskParams.getSchemaName(), + systemTaskParams.getTableName(), systemTaskParams.getTaskStatus(), + systemTaskParams.getData(), systemTaskParams.getPriority(), systemTaskParams.getStartTs(), + systemTaskParams.getEndTs()); + LOGGER.info( + "Adding task type: {} , tableName: {} , taskStatus: {}" + " , startTs: {} , endTs: {}", + systemTaskParams.getTaskType(), systemTaskParams.getTableName(), + systemTaskParams.getTaskStatus(), systemTaskParams.getStartTs(), + systemTaskParams.getEndTs()); + } catch (SQLException e) { + throw new IOException(e); + } + return stmt; + } + + public static List getMutationsForAddTask(SystemTaskParams systemTaskParams) + throws IOException, SQLException { + PhoenixConnection curConn = systemTaskParams.getConn(); + Configuration conf = curConn.getQueryServices().getConfiguration(); + // create new connection as we do not want to mix up mutationState + // with existing connection + try (PhoenixConnection newConnection = QueryUtil + .getConnectionOnServer(curConn.getClientInfo(), conf).unwrap(PhoenixConnection.class)) { + try (PreparedStatement statement = addTaskAndGetStatement(systemTaskParams, newConnection)) { + return executeStatementAndGetTaskMutations(newConnection, statement); + } + } + } + + /** + * Invoke SYSTEM.TASK metadata coprocessor endpoint + * @param connection Phoenix Connection + * @param rowKey key corresponding to SYSTEM.TASK mutation + * @param callable used to invoke the coprocessor endpoint to upsert records in SYSTEM.TASK + * @return result of invoking the coprocessor endpoint + * @throws SQLException If something goes wrong while executing co + */ + public static MetaDataMutationResult taskMetaDataCoprocessorExec( + final PhoenixConnection connection, final byte[] rowKey, + final Batch.Call callable) throws SQLException { + TableName tableName = SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, + connection.getQueryServices().getProps()); + try (Table table = connection.getQueryServices().getTable(tableName.getName())) { + final Map results = + table.coprocessorService(TaskMetaDataService.class, rowKey, rowKey, callable); + assert results.size() == 1; + MetaDataResponse result = results.values().iterator().next(); + return MetaDataMutationResult.constructFromProto(result); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } catch (Throwable t) { + throw new SQLException(t); + } + } + + private static List populateTasks(Connection connection, String taskQuery) + throws SQLException { + List result = new ArrayList<>(); + try (PreparedStatement taskStatement = connection.prepareStatement(taskQuery); + ResultSet rs = taskStatement.executeQuery()) { + while (rs.next()) { + // delete child views only if the parent table is deleted from the system catalog + TaskRecord taskRecord = parseResult(rs); + result.add(taskRecord); + } + } + return result; + } + + public static List queryTaskTable(Connection connection, Timestamp ts, String schema, + String tableName, PTable.TaskType taskType, String tenantId, String indexName) + throws SQLException { + String taskQuery = "SELECT " + PhoenixDatabaseMetaData.TASK_TS + ", " + + PhoenixDatabaseMetaData.TENANT_ID + ", " + PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + + PhoenixDatabaseMetaData.TABLE_NAME + ", " + PhoenixDatabaseMetaData.TASK_STATUS + ", " + + PhoenixDatabaseMetaData.TASK_TYPE + ", " + PhoenixDatabaseMetaData.TASK_PRIORITY + ", " + + PhoenixDatabaseMetaData.TASK_DATA + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME; + taskQuery += " WHERE " + PhoenixDatabaseMetaData.TABLE_NAME + " ='" + tableName + "' AND " + + PhoenixDatabaseMetaData.TASK_TYPE + "=" + taskType.getSerializedValue(); + if (!Strings.isNullOrEmpty(tenantId)) { + taskQuery += " AND " + PhoenixDatabaseMetaData.TENANT_ID + "='" + tenantId + "' "; } - /** - * Invoke SYSTEM.TASK metadata coprocessor endpoint - * - * @param connection Phoenix Connection - * @param rowKey key corresponding to SYSTEM.TASK mutation - * @param callable used to invoke the coprocessor endpoint to upsert - * records in SYSTEM.TASK - * @return result of invoking the coprocessor endpoint - * @throws SQLException If something goes wrong while executing co - */ - public static MetaDataMutationResult taskMetaDataCoprocessorExec( - final PhoenixConnection connection, final byte[] rowKey, - final Batch.Call callable) - throws SQLException { - TableName tableName = SchemaUtil.getPhysicalName( - PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, - connection.getQueryServices().getProps()); - try (Table table = - connection.getQueryServices().getTable(tableName.getName())) { - final Map results = - table.coprocessorService(TaskMetaDataService.class, rowKey, - rowKey, callable); - assert results.size() == 1; - MetaDataResponse result = results.values().iterator().next(); - return MetaDataMutationResult.constructFromProto(result); - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } catch (Throwable t) { - throw new SQLException(t); - } + if (!Strings.isNullOrEmpty(schema)) { + taskQuery += " AND " + PhoenixDatabaseMetaData.TABLE_SCHEM + "='" + schema + "' "; } - private static List populateTasks(Connection connection, String taskQuery) - throws SQLException { - List result = new ArrayList<>(); - try (PreparedStatement taskStatement = connection.prepareStatement(taskQuery); - ResultSet rs = taskStatement.executeQuery()) { - while (rs.next()) { - // delete child views only if the parent table is deleted from the system catalog - TaskRecord taskRecord = parseResult(rs); - result.add(taskRecord); - } - } - return result; - } - - public static List queryTaskTable(Connection connection, Timestamp ts, - String schema, String tableName, - PTable.TaskType taskType, String tenantId, String indexName) - throws SQLException { - String taskQuery = "SELECT " + - PhoenixDatabaseMetaData.TASK_TS + ", " + - PhoenixDatabaseMetaData.TENANT_ID + ", " + - PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + - PhoenixDatabaseMetaData.TABLE_NAME + ", " + - PhoenixDatabaseMetaData.TASK_STATUS + ", " + - PhoenixDatabaseMetaData.TASK_TYPE + ", " + - PhoenixDatabaseMetaData.TASK_PRIORITY + ", " + - PhoenixDatabaseMetaData.TASK_DATA + - " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME; - taskQuery += " WHERE " + - PhoenixDatabaseMetaData.TABLE_NAME + " ='" + tableName + "' AND " + - PhoenixDatabaseMetaData.TASK_TYPE + "=" + taskType.getSerializedValue(); - if (!Strings.isNullOrEmpty(tenantId)) { - taskQuery += " AND " + PhoenixDatabaseMetaData.TENANT_ID + "='" + tenantId + "' "; - } - - if (!Strings.isNullOrEmpty(schema)) { - taskQuery += " AND " + PhoenixDatabaseMetaData.TABLE_SCHEM + "='" + schema + "' "; - } - - if (!Strings.isNullOrEmpty(indexName)) { - taskQuery += " AND " + PhoenixDatabaseMetaData.TASK_DATA + " LIKE '%" + indexName + "%'"; - } - - List taskRecords = populateTasks(connection, taskQuery); - List result = new ArrayList(); - if (ts != null) { - // Adding TASK_TS to the where clause did not work. It returns empty when directly querying with the timestamp. - for (TaskRecord tr : taskRecords) { - if (tr.getTimeStamp().equals(ts)) { - result.add(tr); - } - } - } else { - result = taskRecords; - } - - return result; - } - - public static List queryTaskTable(Connection connection, String[] excludedTaskStatus) - throws SQLException { - String taskQuery = "SELECT " + - PhoenixDatabaseMetaData.TASK_TS + ", " + - PhoenixDatabaseMetaData.TENANT_ID + ", " + - PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + - PhoenixDatabaseMetaData.TABLE_NAME + ", " + - PhoenixDatabaseMetaData.TASK_STATUS + ", " + - PhoenixDatabaseMetaData.TASK_TYPE + ", " + - PhoenixDatabaseMetaData.TASK_PRIORITY + ", " + - PhoenixDatabaseMetaData.TASK_DATA + - " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME; - if (excludedTaskStatus != null && excludedTaskStatus.length > 0) { - taskQuery += " WHERE " + PhoenixDatabaseMetaData.TASK_STATUS + " IS NULL OR " + - PhoenixDatabaseMetaData.TASK_STATUS + " NOT IN ("; - String[] values = new String[excludedTaskStatus.length]; - for (int i=0; i < excludedTaskStatus.length; i++) { - values[i] = String.format("'%s'", excludedTaskStatus[i].trim()); - } - - //Delimit with comma - taskQuery += String.join(",", values); - taskQuery += ")"; - } + if (!Strings.isNullOrEmpty(indexName)) { + taskQuery += " AND " + PhoenixDatabaseMetaData.TASK_DATA + " LIKE '%" + indexName + "%'"; + } - return populateTasks(connection, taskQuery); - } - - public static TaskRecord parseResult(ResultSet rs) throws SQLException { - TaskRecord taskRecord = new TaskRecord(); - taskRecord.setTimeStamp(rs.getTimestamp(PhoenixDatabaseMetaData.TASK_TS)); - taskRecord.setTenantId(rs.getString(PhoenixDatabaseMetaData.TENANT_ID)); - taskRecord.setTenantIdBytes(rs.getBytes(PhoenixDatabaseMetaData.TENANT_ID)); - taskRecord.setSchemaName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM)); - taskRecord.setSchemaNameBytes(rs.getBytes(PhoenixDatabaseMetaData.TABLE_SCHEM)); - taskRecord.setTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); - taskRecord.setTableNameBytes(rs.getBytes(PhoenixDatabaseMetaData.TABLE_NAME)); - taskRecord.setStatus(rs.getString(PhoenixDatabaseMetaData.TASK_STATUS)); - taskRecord.setTaskType(PTable.TaskType.fromSerializedValue(rs.getByte(PhoenixDatabaseMetaData.TASK_TYPE ))); - taskRecord.setPriority(rs.getInt(PhoenixDatabaseMetaData.TASK_PRIORITY)); - taskRecord.setData(rs.getString(PhoenixDatabaseMetaData.TASK_DATA)); - return taskRecord; - } - - public static class TaskRecord { - private String tenantId; - private Timestamp timeStamp; - private byte[] tenantIdBytes; - private String schemaName= null; - private byte[] schemaNameBytes; - private String tableName = null; - private byte[] tableNameBytes; - - private PTable.TaskType taskType; - private String status; - private int priority; - private String data; - - public String getTenantId() { - return tenantId; - } + List taskRecords = populateTasks(connection, taskQuery); + List result = new ArrayList(); + if (ts != null) { + // Adding TASK_TS to the where clause did not work. It returns empty when directly querying + // with the timestamp. + for (TaskRecord tr : taskRecords) { + if (tr.getTimeStamp().equals(ts)) { + result.add(tr); + } + } + } else { + result = taskRecords; + } - public void setTenantId(String tenantId) { - this.tenantId = tenantId; - } + return result; + } + + public static List queryTaskTable(Connection connection, String[] excludedTaskStatus) + throws SQLException { + String taskQuery = "SELECT " + PhoenixDatabaseMetaData.TASK_TS + ", " + + PhoenixDatabaseMetaData.TENANT_ID + ", " + PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + + PhoenixDatabaseMetaData.TABLE_NAME + ", " + PhoenixDatabaseMetaData.TASK_STATUS + ", " + + PhoenixDatabaseMetaData.TASK_TYPE + ", " + PhoenixDatabaseMetaData.TASK_PRIORITY + ", " + + PhoenixDatabaseMetaData.TASK_DATA + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME; + if (excludedTaskStatus != null && excludedTaskStatus.length > 0) { + taskQuery += " WHERE " + PhoenixDatabaseMetaData.TASK_STATUS + " IS NULL OR " + + PhoenixDatabaseMetaData.TASK_STATUS + " NOT IN ("; + String[] values = new String[excludedTaskStatus.length]; + for (int i = 0; i < excludedTaskStatus.length; i++) { + values[i] = String.format("'%s'", excludedTaskStatus[i].trim()); + } + + // Delimit with comma + taskQuery += String.join(",", values); + taskQuery += ")"; + } - public Timestamp getTimeStamp() { - return timeStamp; - } + return populateTasks(connection, taskQuery); + } + + public static TaskRecord parseResult(ResultSet rs) throws SQLException { + TaskRecord taskRecord = new TaskRecord(); + taskRecord.setTimeStamp(rs.getTimestamp(PhoenixDatabaseMetaData.TASK_TS)); + taskRecord.setTenantId(rs.getString(PhoenixDatabaseMetaData.TENANT_ID)); + taskRecord.setTenantIdBytes(rs.getBytes(PhoenixDatabaseMetaData.TENANT_ID)); + taskRecord.setSchemaName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM)); + taskRecord.setSchemaNameBytes(rs.getBytes(PhoenixDatabaseMetaData.TABLE_SCHEM)); + taskRecord.setTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); + taskRecord.setTableNameBytes(rs.getBytes(PhoenixDatabaseMetaData.TABLE_NAME)); + taskRecord.setStatus(rs.getString(PhoenixDatabaseMetaData.TASK_STATUS)); + taskRecord.setTaskType( + PTable.TaskType.fromSerializedValue(rs.getByte(PhoenixDatabaseMetaData.TASK_TYPE))); + taskRecord.setPriority(rs.getInt(PhoenixDatabaseMetaData.TASK_PRIORITY)); + taskRecord.setData(rs.getString(PhoenixDatabaseMetaData.TASK_DATA)); + return taskRecord; + } + + public static class TaskRecord { + private String tenantId; + private Timestamp timeStamp; + private byte[] tenantIdBytes; + private String schemaName = null; + private byte[] schemaNameBytes; + private String tableName = null; + private byte[] tableNameBytes; + + private PTable.TaskType taskType; + private String status; + private int priority; + private String data; + + public String getTenantId() { + return tenantId; + } - public void setTimeStamp(Timestamp timeStamp) { - this.timeStamp = timeStamp; - } + public void setTenantId(String tenantId) { + this.tenantId = tenantId; + } - public byte[] getTenantIdBytes() { - return tenantIdBytes; - } + public Timestamp getTimeStamp() { + return timeStamp; + } - public void setTenantIdBytes(byte[] tenantIdBytes) { - this.tenantIdBytes = tenantIdBytes; - } + public void setTimeStamp(Timestamp timeStamp) { + this.timeStamp = timeStamp; + } - public String getSchemaName() { - return schemaName; - } + public byte[] getTenantIdBytes() { + return tenantIdBytes; + } - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } + public void setTenantIdBytes(byte[] tenantIdBytes) { + this.tenantIdBytes = tenantIdBytes; + } - public byte[] getSchemaNameBytes() { - return schemaNameBytes; - } + public String getSchemaName() { + return schemaName; + } - public void setSchemaNameBytes(byte[] schemaNameBytes) { - this.schemaNameBytes = schemaNameBytes; - } + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } - public String getTableName() { - return tableName; - } + public byte[] getSchemaNameBytes() { + return schemaNameBytes; + } - public void setTableName(String tableName) { - this.tableName = tableName; - } + public void setSchemaNameBytes(byte[] schemaNameBytes) { + this.schemaNameBytes = schemaNameBytes; + } - public byte[] getTableNameBytes() { - return tableNameBytes; - } + public String getTableName() { + return tableName; + } - public void setTableNameBytes(byte[] tableNameBytes) { - this.tableNameBytes = tableNameBytes; - } + public void setTableName(String tableName) { + this.tableName = tableName; + } - public String getData() { - if (data == null) { - return ""; - } - return data; - } + public byte[] getTableNameBytes() { + return tableNameBytes; + } - public int getPriority() { - return priority; - } + public void setTableNameBytes(byte[] tableNameBytes) { + this.tableNameBytes = tableNameBytes; + } - public void setPriority(int priority) { - this.priority = priority; - } + public String getData() { + if (data == null) { + return ""; + } + return data; + } - public void setData(String data) { - this.data = data; - } + public int getPriority() { + return priority; + } - public String getStatus() { - return status; - } + public void setPriority(int priority) { + this.priority = priority; + } - public void setStatus(String status) { - this.status = status; - } + public void setData(String data) { + this.data = data; + } - public PTable.TaskType getTaskType() { - return taskType; - } + public String getStatus() { + return status; + } - public void setTaskType(PTable.TaskType taskType) { - this.taskType = taskType; - } + public void setStatus(String status) { + this.status = status; + } - public boolean isMatchingTask(SystemTransformRecord transformRecord) { - if (getTaskType() != PTable.TaskType.TRANSFORM_MONITOR) { - return false; - } - if (StringUtils.equals(transformRecord.getLogicalTableName(), getTableName()) - && StringUtils.equals(transformRecord.getTenantId(), getTenantId()) - && StringUtils.equals(transformRecord.getSchemaName(), getSchemaName())) { - return true; - } - return false; - } + public PTable.TaskType getTaskType() { + return taskType; + } + + public void setTaskType(PTable.TaskType taskType) { + this.taskType = taskType; + } + + public boolean isMatchingTask(SystemTransformRecord transformRecord) { + if (getTaskType() != PTable.TaskType.TRANSFORM_MONITOR) { + return false; + } + if ( + StringUtils.equals(transformRecord.getLogicalTableName(), getTableName()) + && StringUtils.equals(transformRecord.getTenantId(), getTenantId()) + && StringUtils.equals(transformRecord.getSchemaName(), getSchemaName()) + ) { + return true; + } + return false; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaExtractionProcessor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaExtractionProcessor.java index ea69320f36b..388c7032932 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaExtractionProcessor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaExtractionProcessor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,26 @@ */ package org.apache.phoenix.schema.tool; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTION_PROVIDER; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY; +import static org.apache.phoenix.schema.PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS; +import static org.apache.phoenix.util.MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + import org.apache.commons.lang3.math.NumberUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; @@ -41,680 +61,678 @@ import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.SchemaUtil; -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.Collections; -import java.util.HashSet; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.List; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.stream.Collectors; - -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTION_PROVIDER; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY; -import static org.apache.phoenix.schema.PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS; -import static org.apache.phoenix.util.MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES; - public class SchemaExtractionProcessor implements SchemaProcessor { - Map defaultProps = new HashMap<>(); - Map definedProps = new HashMap<>(); - - private static final String CREATE_TABLE = "CREATE TABLE %s"; - private static final String CREATE_INDEX = "CREATE %sINDEX %s ON %s"; - private static final String CREATE_VIEW = "CREATE VIEW %s%s AS SELECT * FROM %s%s"; - private static final List QUOTE_PROPERTIES = - //Copying here, because this only exists in Hbase 2.5+ - Arrays.asList(new String[] {"hbase.store.file-tracker.impl"}); - - private PTable table; - private Configuration conf; - private String ddl = null; - private String tenantId; - private boolean shouldGenerateWithDefaults = false; - private boolean isPhoenixTTLEnabled = true; - - public SchemaExtractionProcessor(String tenantId, Configuration conf, - String pSchemaName, String pTableName) - throws SQLException { - this.tenantId = tenantId; - this.conf = conf; - this.table = getPTable(pSchemaName, pTableName); - this.isPhoenixTTLEnabled = conf.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); - } - - public SchemaExtractionProcessor(String tenantId, Configuration conf, - PTable pTable, boolean shouldGenerateWithDefaults) - throws SQLException { - this.tenantId = tenantId; - this.conf = conf; - this.table = pTable; - this.shouldGenerateWithDefaults = shouldGenerateWithDefaults; - this.isPhoenixTTLEnabled = conf.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); - } - - @Override - public String process() throws Exception { - if (ddl != null) { - return ddl; - } - if (this.table.getType().equals(PTableType.TABLE)) { - ddl = extractCreateTableDDL(this.table); - } else if (this.table.getType().equals(PTableType.INDEX)) { - ddl = extractCreateIndexDDL(this.table); - } else if (this.table.getType().equals(PTableType.VIEW)) { - ddl = extractCreateViewDDL(this.table); - } - return ddl; - } - - protected String extractCreateIndexDDL(PTable indexPTable) - throws SQLException, IOException { - String quotedIndexTableName = SchemaUtil - .getFullTableNameWithQuotes(null, indexPTable.getTableName().getString()); - - String baseTableName = indexPTable.getParentTableName().getString(); - String baseTableFullName = indexPTable.getSchemaName().getString() + "." + baseTableName; - PTable dataPTable = getPTable(baseTableFullName); - - String quotedBaseTableFullName = SchemaUtil - .getFullTableNameWithQuotes(indexPTable.getSchemaName().getString(), baseTableName); - - String defaultCF = SchemaUtil.getEmptyColumnFamilyAsString(indexPTable); - String indexedColumnsString = getIndexedColumnsString(indexPTable, dataPTable, defaultCF); - String coveredColumnsString = getCoveredColumnsString(indexPTable, defaultCF); - if (shouldGenerateWithDefaults) { - populateDefaultProperties(indexPTable); - setPTableProperties(indexPTable); - ConnectionQueryServices cqsi = getCQSIObject(); - TableDescriptor htd = getTableDescriptor(cqsi, table); - setHTableProperties(htd); - } - - String propertiesString = convertPropertiesToString(true); - return generateIndexDDLString(quotedBaseTableFullName, indexedColumnsString, coveredColumnsString, - indexPTable.getIndexType().equals(PTable.IndexType.LOCAL), quotedIndexTableName, propertiesString); - } - - //TODO: Indexed on an expression - //TODO: test with different CF - private String getIndexedColumnsString(PTable indexPTable, PTable dataPTable, String defaultCF) { - - List indexPK = indexPTable.getPKColumns(); - List dataPK = dataPTable.getPKColumns(); - List indexPKName = new ArrayList<>(); - List dataPKName = new ArrayList<>(); - Map indexSortOrderMap = new HashMap<>(); - StringBuilder indexedColumnsBuilder = new StringBuilder(); - - for (PColumn indexedColumn : indexPK) { - String indexColumn = extractIndexColumn(indexedColumn.getExpressionStr(), defaultCF); - if (indexColumn == null) { - continue; - } - indexPKName.add(indexColumn); - indexSortOrderMap.put(indexColumn, indexedColumn.getSortOrder()); - } - for (PColumn pColumn : dataPK) { - dataPKName.add(pColumn.getName().getString()); - } - - // This is added because of PHOENIX-2340 - String tenantIdColumn = dataPKName.get(0); - if (dataPTable.isMultiTenant() && indexPKName.contains(tenantIdColumn)) { - indexPKName.remove(tenantIdColumn); - } - - for (String column : indexPKName) { - if (indexedColumnsBuilder.length()!=0) { - indexedColumnsBuilder.append(", "); - } - indexedColumnsBuilder.append(column); - if (indexSortOrderMap.containsKey(column) - && indexSortOrderMap.get(column) != SortOrder.getDefault()) { - indexedColumnsBuilder.append(" "); - indexedColumnsBuilder.append(indexSortOrderMap.get(column)); - } - } - return indexedColumnsBuilder.toString(); - } - - private List getSymmetricDifferencePColumns(List firstList, List secondList) { - List effectivePK = new ArrayList<>(); - for (PColumn column : firstList) { - if (secondList.contains(column)) { - continue; - } - effectivePK.add(column); - } - for (PColumn column : secondList) { - if (firstList.contains(column)) { - continue; - } - effectivePK.add(column); - } - return effectivePK; - } - - private String extractIndexColumn(String columnName, String defaultCF) { - if (columnName == null) { - return null; - } - String [] columnNameSplit = columnName.split(":"); - if (columnNameSplit[0].equals("") || columnNameSplit[0].equalsIgnoreCase(defaultCF) || - (defaultCF.startsWith("L#") && columnNameSplit[0].equalsIgnoreCase(defaultCF.substring(2)))) { - return formatColumnOrExpression(columnNameSplit[1]); - } else { - if (columnNameSplit.length > 1) { - String schema = SchemaUtil.formatSchemaName(columnNameSplit[0]); - String name = SchemaUtil.formatColumnName(columnNameSplit[1]); - return String.format("%s.%s", schema, name); - } else { - return formatColumnOrExpression(columnNameSplit[0]); - } - } - } - - private String formatColumnOrExpression(String columnOrExpression) { - if (columnOrExpression.startsWith("(")) { - //Expressions like (a*b) are always parenthesised - return columnOrExpression.substring(1, columnOrExpression.length()-1); - } else if (columnOrExpression.contains("(")) { - //Expressions like like func(a) are always have a parenthesis - return columnOrExpression; + Map defaultProps = new HashMap<>(); + Map definedProps = new HashMap<>(); + + private static final String CREATE_TABLE = "CREATE TABLE %s"; + private static final String CREATE_INDEX = "CREATE %sINDEX %s ON %s"; + private static final String CREATE_VIEW = "CREATE VIEW %s%s AS SELECT * FROM %s%s"; + private static final List QUOTE_PROPERTIES = + // Copying here, because this only exists in Hbase 2.5+ + Arrays.asList(new String[] { "hbase.store.file-tracker.impl" }); + + private PTable table; + private Configuration conf; + private String ddl = null; + private String tenantId; + private boolean shouldGenerateWithDefaults = false; + private boolean isPhoenixTTLEnabled = true; + + public SchemaExtractionProcessor(String tenantId, Configuration conf, String pSchemaName, + String pTableName) throws SQLException { + this.tenantId = tenantId; + this.conf = conf; + this.table = getPTable(pSchemaName, pTableName); + this.isPhoenixTTLEnabled = conf.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); + } + + public SchemaExtractionProcessor(String tenantId, Configuration conf, PTable pTable, + boolean shouldGenerateWithDefaults) throws SQLException { + this.tenantId = tenantId; + this.conf = conf; + this.table = pTable; + this.shouldGenerateWithDefaults = shouldGenerateWithDefaults; + this.isPhoenixTTLEnabled = conf.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); + } + + @Override + public String process() throws Exception { + if (ddl != null) { + return ddl; + } + if (this.table.getType().equals(PTableType.TABLE)) { + ddl = extractCreateTableDDL(this.table); + } else if (this.table.getType().equals(PTableType.INDEX)) { + ddl = extractCreateIndexDDL(this.table); + } else if (this.table.getType().equals(PTableType.VIEW)) { + ddl = extractCreateViewDDL(this.table); + } + return ddl; + } + + protected String extractCreateIndexDDL(PTable indexPTable) throws SQLException, IOException { + String quotedIndexTableName = + SchemaUtil.getFullTableNameWithQuotes(null, indexPTable.getTableName().getString()); + + String baseTableName = indexPTable.getParentTableName().getString(); + String baseTableFullName = indexPTable.getSchemaName().getString() + "." + baseTableName; + PTable dataPTable = getPTable(baseTableFullName); + + String quotedBaseTableFullName = + SchemaUtil.getFullTableNameWithQuotes(indexPTable.getSchemaName().getString(), baseTableName); + + String defaultCF = SchemaUtil.getEmptyColumnFamilyAsString(indexPTable); + String indexedColumnsString = getIndexedColumnsString(indexPTable, dataPTable, defaultCF); + String coveredColumnsString = getCoveredColumnsString(indexPTable, defaultCF); + if (shouldGenerateWithDefaults) { + populateDefaultProperties(indexPTable); + setPTableProperties(indexPTable); + ConnectionQueryServices cqsi = getCQSIObject(); + TableDescriptor htd = getTableDescriptor(cqsi, table); + setHTableProperties(htd); + } + + String propertiesString = convertPropertiesToString(true); + return generateIndexDDLString(quotedBaseTableFullName, indexedColumnsString, + coveredColumnsString, indexPTable.getIndexType().equals(PTable.IndexType.LOCAL), + quotedIndexTableName, propertiesString); + } + + // TODO: Indexed on an expression + // TODO: test with different CF + private String getIndexedColumnsString(PTable indexPTable, PTable dataPTable, String defaultCF) { + + List indexPK = indexPTable.getPKColumns(); + List dataPK = dataPTable.getPKColumns(); + List indexPKName = new ArrayList<>(); + List dataPKName = new ArrayList<>(); + Map indexSortOrderMap = new HashMap<>(); + StringBuilder indexedColumnsBuilder = new StringBuilder(); + + for (PColumn indexedColumn : indexPK) { + String indexColumn = extractIndexColumn(indexedColumn.getExpressionStr(), defaultCF); + if (indexColumn == null) { + continue; + } + indexPKName.add(indexColumn); + indexSortOrderMap.put(indexColumn, indexedColumn.getSortOrder()); + } + for (PColumn pColumn : dataPK) { + dataPKName.add(pColumn.getName().getString()); + } + + // This is added because of PHOENIX-2340 + String tenantIdColumn = dataPKName.get(0); + if (dataPTable.isMultiTenant() && indexPKName.contains(tenantIdColumn)) { + indexPKName.remove(tenantIdColumn); + } + + for (String column : indexPKName) { + if (indexedColumnsBuilder.length() != 0) { + indexedColumnsBuilder.append(", "); + } + indexedColumnsBuilder.append(column); + if ( + indexSortOrderMap.containsKey(column) + && indexSortOrderMap.get(column) != SortOrder.getDefault() + ) { + indexedColumnsBuilder.append(" "); + indexedColumnsBuilder.append(indexSortOrderMap.get(column)); + } + } + return indexedColumnsBuilder.toString(); + } + + private List getSymmetricDifferencePColumns(List firstList, + List secondList) { + List effectivePK = new ArrayList<>(); + for (PColumn column : firstList) { + if (secondList.contains(column)) { + continue; + } + effectivePK.add(column); + } + for (PColumn column : secondList) { + if (firstList.contains(column)) { + continue; + } + effectivePK.add(column); + } + return effectivePK; + } + + private String extractIndexColumn(String columnName, String defaultCF) { + if (columnName == null) { + return null; + } + String[] columnNameSplit = columnName.split(":"); + if ( + columnNameSplit[0].equals("") || columnNameSplit[0].equalsIgnoreCase(defaultCF) + || (defaultCF.startsWith("L#") + && columnNameSplit[0].equalsIgnoreCase(defaultCF.substring(2))) + ) { + return formatColumnOrExpression(columnNameSplit[1]); + } else { + if (columnNameSplit.length > 1) { + String schema = SchemaUtil.formatSchemaName(columnNameSplit[0]); + String name = SchemaUtil.formatColumnName(columnNameSplit[1]); + return String.format("%s.%s", schema, name); + } else { + return formatColumnOrExpression(columnNameSplit[0]); + } + } + } + + private String formatColumnOrExpression(String columnOrExpression) { + if (columnOrExpression.startsWith("(")) { + // Expressions like (a*b) are always parenthesised + return columnOrExpression.substring(1, columnOrExpression.length() - 1); + } else if (columnOrExpression.contains("(")) { + // Expressions like like func(a) are always have a parenthesis + return columnOrExpression; + } else { + // If there are no parentheses, this is a column name + return SchemaUtil.formatIndexColumnName(columnOrExpression); + } + } + + private String getCoveredColumnsString(PTable indexPTable, String defaultCF) { + StringBuilder coveredColumnsBuilder = new StringBuilder(); + List pkColumns = indexPTable.getColumns(); + for (PColumn cc : pkColumns) { + if (coveredColumnsBuilder.length() != 0) { + coveredColumnsBuilder.append(", "); + } + if (cc.getFamilyName() != null) { + String indexColumn = extractIndexColumn(cc.getName().getString(), defaultCF); + if (indexColumn != null) { + coveredColumnsBuilder.append(indexColumn); + } + } + } + return coveredColumnsBuilder.toString(); + } + + protected String generateIndexDDLString(String quotedBaseTableFullName, + String indexedColumnString, String coveredColumnString, boolean local, + String quotedIndexTableName, String properties) { + StringBuilder outputBuilder = new StringBuilder(String.format(CREATE_INDEX, + local ? "LOCAL " : "", quotedIndexTableName, quotedBaseTableFullName)); + outputBuilder.append("("); + outputBuilder.append(indexedColumnString); + outputBuilder.append(")"); + if (!coveredColumnString.equals("")) { + outputBuilder.append(" INCLUDE ("); + outputBuilder.append(coveredColumnString); + outputBuilder.append(")"); + } + outputBuilder.append(properties); + return outputBuilder.toString(); + } + + PTable getPTable(String pTableFullName) throws SQLException { + try (Connection conn = getConnection()) { + return conn.unwrap(PhoenixConnection.class).getTable(pTableFullName); + } + } + + protected String extractCreateViewDDL(PTable table) throws SQLException { + String pSchemaName = table.getSchemaName().getString(); + String pTableName = table.getTableName().getString(); + String baseTableName = table.getParentTableName().getString(); + String quotedBaseTableName = SchemaUtil.getFullTableNameWithQuotes(pSchemaName, baseTableName); + + String baseTableFullName = pSchemaName + "." + baseTableName; + PTable baseTable = getPTable(baseTableFullName); + String columnInfoString = getColumnInfoStringForView(table, baseTable); + + String whereClause = table.getViewStatement(); + if (whereClause != null) { + whereClause = whereClause.substring(whereClause.indexOf("WHERE")); + } + return generateCreateViewDDL(columnInfoString, quotedBaseTableName, + whereClause == null ? "" : " " + whereClause, pSchemaName, pTableName); + } + + private String generateCreateViewDDL(String columnInfoString, String quotedBaseTableName, + String whereClause, String pSchemaName, String pTableName) { + String quotedViewFullName = SchemaUtil.getFullTableNameWithQuotes(pSchemaName, pTableName); + StringBuilder outputBuilder = new StringBuilder(String.format(CREATE_VIEW, quotedViewFullName, + columnInfoString, quotedBaseTableName, whereClause)); + return outputBuilder.toString(); + } + + public String extractCreateTableDDL(PTable table) throws IOException, SQLException { + String pSchemaName = table.getSchemaName().getString(); + String pTableName = table.getTableName().getString(); + + ConnectionQueryServices cqsi = getCQSIObject(); + TableDescriptor htd = getTableDescriptor(cqsi, table); + ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies(); + populateDefaultProperties(table); + setPTableProperties(table); + setHTableProperties(htd); + setHColumnFamilyProperties(hcds); + + String columnInfoString = getColumnInfoStringForTable(table); + String propertiesString = convertPropertiesToString(false); + String columnQualifierString = convertColumnQualifierCountersToString(table); + + return generateTableDDLString(columnInfoString, propertiesString, columnQualifierString, + pSchemaName, pTableName); + } + + private String generateTableDDLString(String columnInfoString, String propertiesString, + String columnQualifierString, String pSchemaName, String pTableName) { + String quotedTableFullName = SchemaUtil.getFullTableNameWithQuotes(pSchemaName, pTableName); + StringBuilder outputBuilder = + new StringBuilder(String.format(CREATE_TABLE, quotedTableFullName)); + outputBuilder.append(columnInfoString).append(" ").append(propertiesString) + .append(columnQualifierString); + return outputBuilder.toString(); + } + + private void populateDefaultProperties(PTable table) { + Map propsMap = ColumnFamilyDescriptorBuilder.getDefaultValues(); + for (Map.Entry entry : propsMap.entrySet()) { + String key = entry.getKey(); + String value = entry.getValue(); + defaultProps.put(key, value); + if (key.equalsIgnoreCase(ColumnFamilyDescriptorBuilder.BLOOMFILTER)) { + defaultProps.put(key, "ROW"); + } + if (key.equalsIgnoreCase(ColumnFamilyDescriptorBuilder.COMPRESSION)) { + defaultProps.put(key, "NONE"); + } + if (key.equalsIgnoreCase(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING)) { + defaultProps.put(key, String.valueOf(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING)); + } + } + defaultProps.putAll(table.getDefaultPropertyValues()); + } + + private void setHTableProperties(TableDescriptor htd) { + Map propsMap = htd.getValues(); + for (Map.Entry entry : propsMap.entrySet()) { + Bytes key = entry.getKey(); + Bytes value = entry.getValue(); + if ( + Bytes.toString(key.get()).contains("coprocessor") + || Bytes.toString(key.get()).contains(TableDescriptorBuilder.IS_META) + ) { + continue; + } + defaultProps.put(Bytes.toString(key.get()), "false"); + definedProps.put(Bytes.toString(key.get()), Bytes.toString(value.get())); + } + } + + private void setHColumnFamilyProperties(ColumnFamilyDescriptor[] columnDescriptors) { + Map propsMap = columnDescriptors[0].getValues(); + for (Map.Entry entry : propsMap.entrySet()) { + Bytes key = entry.getKey(); + Bytes globalValue = entry.getValue(); + if (Bytes.toString(key.get()).equalsIgnoreCase(TTL) && isPhoenixTTLEnabled) { + continue; + } + Map cfToPropertyValueMap = new HashMap(); + Set cfPropertyValueSet = new HashSet<>(); + for (ColumnFamilyDescriptor columnDescriptor : columnDescriptors) { + String columnFamilyName = Bytes.toString(columnDescriptor.getName()); + Bytes value = columnDescriptor.getValues().get(key); + // check if it is universal properties + if (SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES.contains(Bytes.toString(key.get()))) { + definedProps.put(Bytes.toString(key.get()), Bytes.toString(value.get())); + break; + } + cfToPropertyValueMap.put(columnFamilyName, Bytes.toString(value.get())); + cfPropertyValueSet.add(value); + } + if (cfPropertyValueSet.size() > 1) { + for (Map.Entry mapEntry : cfToPropertyValueMap.entrySet()) { + definedProps.put(String.format("%s.%s", mapEntry.getKey(), Bytes.toString(key.get())), + mapEntry.getValue()); + } + } else { + definedProps.put(Bytes.toString(key.get()), Bytes.toString(globalValue.get())); + } + } + } + + private void setPTableProperties(PTable table) { + Map map = table.getPropertyValues(); + for (Map.Entry entry : map.entrySet()) { + String key = entry.getKey(); + String value = entry.getValue(); + if (value != null) { + if (!key.equalsIgnoreCase(TTL)) { + definedProps.put(key, value); } else { - //If there are no parentheses, this is a column name - return SchemaUtil.formatIndexColumnName(columnOrExpression); - } - } - - private String getCoveredColumnsString(PTable indexPTable, String defaultCF) { - StringBuilder coveredColumnsBuilder = new StringBuilder(); - List pkColumns = indexPTable.getColumns(); - for (PColumn cc : pkColumns) { - if (coveredColumnsBuilder.length()!=0) { - coveredColumnsBuilder.append(", "); - } - if (cc.getFamilyName()!=null) { - String indexColumn = extractIndexColumn(cc.getName().getString(), defaultCF); - if (indexColumn != null) { - coveredColumnsBuilder.append(indexColumn); - } - } - } - return coveredColumnsBuilder.toString(); - } - - protected String generateIndexDDLString(String quotedBaseTableFullName, String indexedColumnString, - String coveredColumnString, boolean local, String quotedIndexTableName, String properties) { - StringBuilder outputBuilder = new StringBuilder(String.format(CREATE_INDEX, - local ? "LOCAL " : "", quotedIndexTableName, quotedBaseTableFullName)); - outputBuilder.append("("); - outputBuilder.append(indexedColumnString); - outputBuilder.append(")"); - if (!coveredColumnString.equals("")) { - outputBuilder.append(" INCLUDE ("); - outputBuilder.append(coveredColumnString); - outputBuilder.append(")"); - } - outputBuilder.append(properties); - return outputBuilder.toString(); - } - - PTable getPTable(String pTableFullName) throws SQLException { - try (Connection conn = getConnection()) { - return conn.unwrap(PhoenixConnection.class).getTable(pTableFullName); - } - } - - protected String extractCreateViewDDL(PTable table) throws SQLException { - String pSchemaName = table.getSchemaName().getString(); - String pTableName = table.getTableName().getString(); - String baseTableName = table.getParentTableName().getString(); - String quotedBaseTableName = SchemaUtil - .getFullTableNameWithQuotes(pSchemaName, baseTableName); - - String baseTableFullName = pSchemaName + "." + baseTableName; - PTable baseTable = getPTable(baseTableFullName); - String columnInfoString = getColumnInfoStringForView(table, baseTable); - - String whereClause = table.getViewStatement(); - if (whereClause != null) { - whereClause = whereClause.substring(whereClause.indexOf("WHERE")); - } - return generateCreateViewDDL(columnInfoString, quotedBaseTableName, - whereClause == null ? "" : " "+whereClause, pSchemaName, pTableName); - } - - private String generateCreateViewDDL(String columnInfoString, String quotedBaseTableName, - String whereClause, String pSchemaName, String pTableName) { - String quotedViewFullName = SchemaUtil.getFullTableNameWithQuotes(pSchemaName, pTableName); - StringBuilder outputBuilder = new StringBuilder(String.format(CREATE_VIEW, - quotedViewFullName, columnInfoString, quotedBaseTableName, whereClause)); - return outputBuilder.toString(); - } - - public String extractCreateTableDDL(PTable table) throws IOException, SQLException { - String pSchemaName = table.getSchemaName().getString(); - String pTableName = table.getTableName().getString(); - - ConnectionQueryServices cqsi = getCQSIObject(); - TableDescriptor htd = getTableDescriptor(cqsi, table); - ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies(); - populateDefaultProperties(table); - setPTableProperties(table); - setHTableProperties(htd); - setHColumnFamilyProperties(hcds); - - String columnInfoString = getColumnInfoStringForTable(table); - String propertiesString = convertPropertiesToString(false); - String columnQualifierString = convertColumnQualifierCountersToString(table); - - return generateTableDDLString(columnInfoString, propertiesString, columnQualifierString, - pSchemaName, pTableName); - } - - private String generateTableDDLString(String columnInfoString, String propertiesString, - String columnQualifierString, String pSchemaName, String pTableName) { - String quotedTableFullName = SchemaUtil.getFullTableNameWithQuotes(pSchemaName, pTableName); - StringBuilder outputBuilder = new StringBuilder(String.format(CREATE_TABLE, - quotedTableFullName)); - outputBuilder.append(columnInfoString).append(" ").append(propertiesString) - .append(columnQualifierString); - return outputBuilder.toString(); - } - - private void populateDefaultProperties(PTable table) { - Map propsMap = ColumnFamilyDescriptorBuilder.getDefaultValues(); - for (Map.Entry entry : propsMap.entrySet()) { - String key = entry.getKey(); - String value = entry.getValue(); - defaultProps.put(key, value); - if (key.equalsIgnoreCase(ColumnFamilyDescriptorBuilder.BLOOMFILTER)) { - defaultProps.put(key, "ROW"); - } - if (key.equalsIgnoreCase(ColumnFamilyDescriptorBuilder.COMPRESSION)) { - defaultProps.put(key, "NONE"); - } - if (key.equalsIgnoreCase(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING)) { - defaultProps.put(key, String.valueOf(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING)); - } - } - defaultProps.putAll(table.getDefaultPropertyValues()); - } - - private void setHTableProperties(TableDescriptor htd) { - Map propsMap = htd.getValues(); - for (Map.Entry entry : propsMap.entrySet()) { - Bytes key = entry.getKey(); - Bytes value = entry.getValue(); - if (Bytes.toString(key.get()).contains("coprocessor") || Bytes.toString(key.get()).contains( - TableDescriptorBuilder.IS_META)) { - continue; - } - defaultProps.put(Bytes.toString(key.get()), "false"); - definedProps.put(Bytes.toString(key.get()), Bytes.toString(value.get())); - } - } - - private void setHColumnFamilyProperties(ColumnFamilyDescriptor[] columnDescriptors) { - Map propsMap = columnDescriptors[0].getValues(); - for (Map.Entry entry : propsMap.entrySet()) { - Bytes key = entry.getKey(); - Bytes globalValue = entry.getValue(); - if (Bytes.toString(key.get()).equalsIgnoreCase(TTL) && isPhoenixTTLEnabled) { - continue; - } - Map cfToPropertyValueMap = new HashMap(); - Set cfPropertyValueSet = new HashSet<>(); - for (ColumnFamilyDescriptor columnDescriptor: columnDescriptors) { - String columnFamilyName = Bytes.toString(columnDescriptor.getName()); - Bytes value = columnDescriptor.getValues().get(key); - // check if it is universal properties - if (SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES.contains(Bytes.toString(key.get()))) { - definedProps.put(Bytes.toString(key.get()), Bytes.toString(value.get())); - break; - } - cfToPropertyValueMap.put(columnFamilyName, Bytes.toString(value.get())); - cfPropertyValueSet.add(value); - } - if (cfPropertyValueSet.size() > 1) { - for (Map.Entry mapEntry: cfToPropertyValueMap.entrySet()) { - definedProps.put(String.format("%s.%s", mapEntry.getKey(), Bytes.toString(key.get())), mapEntry.getValue()); - } - } else { - definedProps.put(Bytes.toString(key.get()), Bytes.toString(globalValue.get())); - } - } - } - - private void setPTableProperties(PTable table) { - Map map = table.getPropertyValues(); - for (Map.Entry entry : map.entrySet()) { - String key = entry.getKey(); - String value = entry.getValue(); - if (value != null) { - if (!key.equalsIgnoreCase(TTL)) { - definedProps.put(key, value); - } else { - if (isPhoenixTTLEnabled && Integer.parseInt(value) != TTL_NOT_DEFINED) { - definedProps.put(key, value); - } - } - } - } - } - - private TableDescriptor getTableDescriptor(ConnectionQueryServices cqsi, PTable table) - throws SQLException, IOException { - try (Admin admin = cqsi.getAdmin()) { - return admin.getDescriptor(TableName.valueOf( - table.getPhysicalName().getString())); - } - } - - private String convertColumnQualifierCountersToString(PTable table) { - StringBuilder cqBuilder = new StringBuilder(); - if (shouldGenerateWithDefaults) { - return cqBuilder.toString(); - } - Map cqCounterValues = table.getEncodedCQCounter().values(); - ArrayList cqCounters = new ArrayList<>(cqCounterValues.size()); - - for (Map.Entry entry : cqCounterValues.entrySet()) { - Boolean include = table.getColumns().stream() - .filter(c -> !table.getPKColumns().contains(c)) - .filter(pColumn -> table.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS ? - pColumn.getFamilyName().getString().equalsIgnoreCase(entry.getKey()) : true) - .map(o -> table.getEncodingScheme().decode(o.getColumnQualifierBytes())) - .max(Integer::compare).map(maxCounter -> maxCounter != entry.getValue() - 1) - .orElse(false); - if (include) { - String def = "'" + entry.getKey() + "'=" + entry.getValue().toString(); - cqCounters.add(def); - } - } - if (cqCounters.size() > 0) { - cqBuilder.append(" COLUMN_QUALIFIER_COUNTER"); - cqBuilder.append(" ("); - cqBuilder.append(StringUtils.join( ", ", cqCounters)); - cqBuilder.append(')'); - } - return cqBuilder.toString(); - } - - private String convertPropertiesToString(boolean forIndex) { - StringBuilder optionBuilder = new StringBuilder(); - for (Map.Entry entry : definedProps.entrySet()) { - String key = entry.getKey(); - String value = entry.getValue(); - String columnFamilyName = QueryConstants.DEFAULT_COLUMN_FAMILY; - - String[] colPropKey = key.split("\\."); - if (QUOTE_PROPERTIES.contains(key)) { - key = "\"" + key + "\""; - } else if (colPropKey.length > 1) { - columnFamilyName = colPropKey[0]; - key = colPropKey[1]; - } - - if (value!=null && (shouldGenerateWithDefaults || (defaultProps.get(key) != null && !value.equals(defaultProps.get(key))))) { - if (forIndex) { - // cannot set these for index - if (key.equals(UPDATE_CACHE_FREQUENCY)) { - continue; - } - } - - if (key.contains("TTL") && definedProps.containsKey(TRANSACTION_PROVIDER) - && definedProps.get(TRANSACTION_PROVIDER).equalsIgnoreCase("OMID")) { - // TTL is unsupported for OMID transactional table - continue; - } - - if (optionBuilder.length() != 0) { - optionBuilder.append(", "); - } - key = columnFamilyName.equals(QueryConstants.DEFAULT_COLUMN_FAMILY)? - key : String.format("\"%s\".%s", columnFamilyName, key); - // properties value that corresponds to a number will not need single quotes around it - // properties value that corresponds to a boolean value will not need single quotes around it - if (!(NumberUtils.isNumber(value)) && - !(value.equalsIgnoreCase(Boolean.TRUE.toString()) ||value.equalsIgnoreCase(Boolean.FALSE.toString()))) { - value= "'" + value + "'"; - } - optionBuilder.append(key + "=" + value); - } - } - return optionBuilder.toString(); - } - - private PTable getPTable(String pSchemaName, String pTableName) throws SQLException { - String pTableFullName = SchemaUtil.getQualifiedTableName(pSchemaName, pTableName); - return getPTable(pTableFullName); - } - - private ConnectionQueryServices getCQSIObject() throws SQLException { - try(Connection conn = getConnection()) { - return conn.unwrap(PhoenixConnection.class).getQueryServices(); - } - } - - public Connection getConnection() throws SQLException { - if (tenantId!=null) { - conf.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - } - return ConnectionUtil.getInputConnection(conf); - } - - private String getColumnInfoStringForTable(PTable table) { - StringBuilder colInfo = new StringBuilder(); - List columns = table.getBucketNum() == null ? table.getColumns() : table.getColumns().subList(1, table.getColumns().size()); - List pkColumns = table.getBucketNum() == null ? table.getPKColumns() : table.getColumns().subList(1, table.getPKColumns().size()); - - return getColumnInfoString(table, colInfo, columns, pkColumns); - } - - private boolean hasEncodedQualifier(PTable table) - { - return table.getColumns().size() > 0 - && !shouldGenerateWithDefaults - && table.getType() == PTableType.TABLE - && table.getEncodingScheme() != PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; - } - - private boolean areEncodedIdsComplete(List encodedIds, Integer initialID, - Integer lastEncodedID) { - if (encodedIds.size() == 0) { - return true; - } - if (encodedIds.get(0) > initialID || - encodedIds.get(encodedIds.size() - 1) < lastEncodedID) { - return false; - } - for (int i = 1; i < encodedIds.size(); i++) { - if (encodedIds.get(i - 1) + 1 != encodedIds.get(i)) { - return false; - } - } - return true; - } - - private List getNonConsecutiveQualifierFamilies(PTable table) { - List ret = new ArrayList<>(); - if (!hasEncodedQualifier(table)) { - return ret; - } - - PTable.QualifierEncodingScheme scheme = table.getEncodingScheme(); - PTable.EncodedCQCounter encodedCQCounter = table.getEncodedCQCounter(); - if (table.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS) { - // For this scheme we track column qualifier counters at the column family level - for (PColumnFamily colFamily : table.getColumnFamilies()) { - String colFamilyName = colFamily.getName().getString(); - List encodedIds = colFamily.getColumns().stream() - .filter(c -> !table.getPKColumns().contains(c)) - .map(pColumn -> scheme.decode(pColumn.getColumnQualifierBytes())) - .collect(Collectors.toList()); - Collections.sort(encodedIds); - if (!areEncodedIdsComplete(encodedIds, - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE, - encodedCQCounter.getNextQualifier(colFamilyName) - 1)) { - ret.add(colFamilyName); - } - } - } else { - // For other schemes, column qualifier counters are tracked using the default column - // family. - List encodedIds = table.getColumns().stream() - .filter(c -> !table.getPKColumns().contains(c)) - .map(pColumn -> scheme.decode(pColumn.getColumnQualifierBytes())) - .collect(Collectors.toList()); - Collections.sort(encodedIds); - String defaultFamilyName = table.getDefaultFamilyName() == null ? - QueryConstants.DEFAULT_COLUMN_FAMILY - : table.getDefaultFamilyName().getString(); - if (!areEncodedIdsComplete(encodedIds, - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE, - encodedCQCounter.getNextQualifier(defaultFamilyName) - 1)) { - ret = table.getColumnFamilies().stream() - .map(pColumnFamily -> pColumnFamily.getName().getString()) - .collect(Collectors.toList()); - } - } - return ret; - } - - private String getColumnInfoString(PTable table, StringBuilder colInfo, List columns, - List pkColumns) { - List nonConsecutiveCounterFamilies = getNonConsecutiveQualifierFamilies(table); - ArrayList colDefs = new ArrayList<>(columns.size()); - for (PColumn col : columns) { - String def = extractColumn(col); - if (pkColumns.size() == 1 && pkColumns.contains(col)) { - def += " PRIMARY KEY" + extractPKColumnAttributes(col); - } - if (!pkColumns.contains(col) - && nonConsecutiveCounterFamilies.contains(col.getFamilyName().getString())) { - def += " ENCODED_QUALIFIER " + - table.getEncodingScheme().decode(col.getColumnQualifierBytes()); - } - colDefs.add(def); - } - if (colDefs.size() > 0) { - colInfo.append('('); - colInfo.append(StringUtils.join( ", " ,colDefs)); - } - if (pkColumns.size() > 1) { - // multi column primary key - String - pkConstraint = - String.format(" CONSTRAINT %s PRIMARY KEY (%s)", table.getPKName().getString(), - extractPKConstraint(pkColumns)); - colInfo.append(pkConstraint); - } - if (colDefs.size() > 0) { - colInfo.append(')'); - } - return colInfo.toString(); - } - - private String getColumnInfoStringForView(PTable table, PTable baseTable) { - StringBuilder colInfo = new StringBuilder(); - - List columns = table.getColumns(); - List pkColumns = table.getPKColumns(); - - List baseColumns = baseTable.getColumns(); - List basePkColumns = baseTable.getPKColumns(); - - columns = getSymmetricDifferencePColumns(baseColumns, columns); - pkColumns = getSymmetricDifferencePColumns(basePkColumns, pkColumns); - - return getColumnInfoString(table, colInfo, columns, pkColumns); - } - - private String extractColumn(PColumn column) { - String colName = SchemaUtil.formatColumnName(column.getName().getString()); - if (column.getFamilyName() != null){ - String colFamilyName = SchemaUtil.formatSchemaName(column.getFamilyName().getString()); - // check if it is default column family name - colName = colFamilyName.equals(QueryConstants.DEFAULT_COLUMN_FAMILY) ? colName : - String.format("%s.%s", colFamilyName, colName); - } - boolean isArrayType = column.getDataType().isArrayType(); - String type = column.getDataType().getSqlTypeName(); - Integer maxLength = column.getMaxLength(); - Integer arrSize = column.getArraySize(); - Integer scale = column.getScale(); - StringBuilder buf = new StringBuilder(colName); - buf.append(' '); - - if (isArrayType) { - String arrayPrefix = type.split("\\s+")[0]; - buf.append(arrayPrefix); - appendMaxLengthAndScale(buf, maxLength, scale); - buf.append(' '); - buf.append("ARRAY"); - if (arrSize != null) { - buf.append('['); - buf.append(arrSize); - buf.append(']'); - } - } else { - buf.append(type); - appendMaxLengthAndScale(buf, maxLength, scale); - } - - if (!column.isNullable()) { - buf.append(' '); - buf.append("NOT NULL"); - } - - return buf.toString(); - } - - private void appendMaxLengthAndScale(StringBuilder buf, Integer maxLength, Integer scale){ - if (maxLength != null) { - buf.append('('); - buf.append(maxLength); - if (scale != null) { - buf.append(','); - buf.append(scale); // has both max length and scale. For ex- decimal(10,2) - } - buf.append(')'); - } - } - - private String extractPKColumnAttributes(PColumn column) { - StringBuilder buf = new StringBuilder(); - - if (column.getSortOrder() != SortOrder.getDefault()) { - buf.append(' '); - buf.append(column.getSortOrder().toString()); - } - - if (column.isRowTimestamp()) { - buf.append(' '); - buf.append("ROW_TIMESTAMP"); - } - - return buf.toString(); - } - - private String extractPKConstraint(List pkColumns) { - ArrayList colDefs = new ArrayList<>(pkColumns.size()); - for (PColumn pkCol : pkColumns) { - colDefs.add(SchemaUtil.formatColumnName(pkCol.getName().getString()) + extractPKColumnAttributes(pkCol)); - } - return StringUtils.join(", ", colDefs); - } + if (isPhoenixTTLEnabled && Integer.parseInt(value) != TTL_NOT_DEFINED) { + definedProps.put(key, value); + } + } + } + } + } + + private TableDescriptor getTableDescriptor(ConnectionQueryServices cqsi, PTable table) + throws SQLException, IOException { + try (Admin admin = cqsi.getAdmin()) { + return admin.getDescriptor(TableName.valueOf(table.getPhysicalName().getString())); + } + } + + private String convertColumnQualifierCountersToString(PTable table) { + StringBuilder cqBuilder = new StringBuilder(); + if (shouldGenerateWithDefaults) { + return cqBuilder.toString(); + } + Map cqCounterValues = table.getEncodedCQCounter().values(); + ArrayList cqCounters = new ArrayList<>(cqCounterValues.size()); + + for (Map.Entry entry : cqCounterValues.entrySet()) { + Boolean include = table.getColumns().stream().filter(c -> !table.getPKColumns().contains(c)) + .filter(pColumn -> table.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS + ? pColumn.getFamilyName().getString().equalsIgnoreCase(entry.getKey()) + : true) + .map(o -> table.getEncodingScheme().decode(o.getColumnQualifierBytes())) + .max(Integer::compare).map(maxCounter -> maxCounter != entry.getValue() - 1).orElse(false); + if (include) { + String def = "'" + entry.getKey() + "'=" + entry.getValue().toString(); + cqCounters.add(def); + } + } + if (cqCounters.size() > 0) { + cqBuilder.append(" COLUMN_QUALIFIER_COUNTER"); + cqBuilder.append(" ("); + cqBuilder.append(StringUtils.join(", ", cqCounters)); + cqBuilder.append(')'); + } + return cqBuilder.toString(); + } + + private String convertPropertiesToString(boolean forIndex) { + StringBuilder optionBuilder = new StringBuilder(); + for (Map.Entry entry : definedProps.entrySet()) { + String key = entry.getKey(); + String value = entry.getValue(); + String columnFamilyName = QueryConstants.DEFAULT_COLUMN_FAMILY; + + String[] colPropKey = key.split("\\."); + if (QUOTE_PROPERTIES.contains(key)) { + key = "\"" + key + "\""; + } else if (colPropKey.length > 1) { + columnFamilyName = colPropKey[0]; + key = colPropKey[1]; + } + + if ( + value != null && (shouldGenerateWithDefaults + || (defaultProps.get(key) != null && !value.equals(defaultProps.get(key)))) + ) { + if (forIndex) { + // cannot set these for index + if (key.equals(UPDATE_CACHE_FREQUENCY)) { + continue; + } + } + + if ( + key.contains("TTL") && definedProps.containsKey(TRANSACTION_PROVIDER) + && definedProps.get(TRANSACTION_PROVIDER).equalsIgnoreCase("OMID") + ) { + // TTL is unsupported for OMID transactional table + continue; + } + + if (optionBuilder.length() != 0) { + optionBuilder.append(", "); + } + key = columnFamilyName.equals(QueryConstants.DEFAULT_COLUMN_FAMILY) + ? key + : String.format("\"%s\".%s", columnFamilyName, key); + // properties value that corresponds to a number will not need single quotes around it + // properties value that corresponds to a boolean value will not need single quotes around + // it + if ( + !(NumberUtils.isNumber(value)) && !(value.equalsIgnoreCase(Boolean.TRUE.toString()) + || value.equalsIgnoreCase(Boolean.FALSE.toString())) + ) { + value = "'" + value + "'"; + } + optionBuilder.append(key + "=" + value); + } + } + return optionBuilder.toString(); + } + + private PTable getPTable(String pSchemaName, String pTableName) throws SQLException { + String pTableFullName = SchemaUtil.getQualifiedTableName(pSchemaName, pTableName); + return getPTable(pTableFullName); + } + + private ConnectionQueryServices getCQSIObject() throws SQLException { + try (Connection conn = getConnection()) { + return conn.unwrap(PhoenixConnection.class).getQueryServices(); + } + } + + public Connection getConnection() throws SQLException { + if (tenantId != null) { + conf.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + } + return ConnectionUtil.getInputConnection(conf); + } + + private String getColumnInfoStringForTable(PTable table) { + StringBuilder colInfo = new StringBuilder(); + List columns = table.getBucketNum() == null + ? table.getColumns() + : table.getColumns().subList(1, table.getColumns().size()); + List pkColumns = table.getBucketNum() == null + ? table.getPKColumns() + : table.getColumns().subList(1, table.getPKColumns().size()); + + return getColumnInfoString(table, colInfo, columns, pkColumns); + } + + private boolean hasEncodedQualifier(PTable table) { + return table.getColumns().size() > 0 && !shouldGenerateWithDefaults + && table.getType() == PTableType.TABLE + && table.getEncodingScheme() != PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; + } + + private boolean areEncodedIdsComplete(List encodedIds, Integer initialID, + Integer lastEncodedID) { + if (encodedIds.size() == 0) { + return true; + } + if (encodedIds.get(0) > initialID || encodedIds.get(encodedIds.size() - 1) < lastEncodedID) { + return false; + } + for (int i = 1; i < encodedIds.size(); i++) { + if (encodedIds.get(i - 1) + 1 != encodedIds.get(i)) { + return false; + } + } + return true; + } + + private List getNonConsecutiveQualifierFamilies(PTable table) { + List ret = new ArrayList<>(); + if (!hasEncodedQualifier(table)) { + return ret; + } + + PTable.QualifierEncodingScheme scheme = table.getEncodingScheme(); + PTable.EncodedCQCounter encodedCQCounter = table.getEncodedCQCounter(); + if (table.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS) { + // For this scheme we track column qualifier counters at the column family level + for (PColumnFamily colFamily : table.getColumnFamilies()) { + String colFamilyName = colFamily.getName().getString(); + List encodedIds = + colFamily.getColumns().stream().filter(c -> !table.getPKColumns().contains(c)) + .map(pColumn -> scheme.decode(pColumn.getColumnQualifierBytes())) + .collect(Collectors.toList()); + Collections.sort(encodedIds); + if ( + !areEncodedIdsComplete(encodedIds, QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE, + encodedCQCounter.getNextQualifier(colFamilyName) - 1) + ) { + ret.add(colFamilyName); + } + } + } else { + // For other schemes, column qualifier counters are tracked using the default column + // family. + List encodedIds = + table.getColumns().stream().filter(c -> !table.getPKColumns().contains(c)) + .map(pColumn -> scheme.decode(pColumn.getColumnQualifierBytes())) + .collect(Collectors.toList()); + Collections.sort(encodedIds); + String defaultFamilyName = table.getDefaultFamilyName() == null + ? QueryConstants.DEFAULT_COLUMN_FAMILY + : table.getDefaultFamilyName().getString(); + if ( + !areEncodedIdsComplete(encodedIds, QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE, + encodedCQCounter.getNextQualifier(defaultFamilyName) - 1) + ) { + ret = table.getColumnFamilies().stream() + .map(pColumnFamily -> pColumnFamily.getName().getString()).collect(Collectors.toList()); + } + } + return ret; + } + + private String getColumnInfoString(PTable table, StringBuilder colInfo, List columns, + List pkColumns) { + List nonConsecutiveCounterFamilies = getNonConsecutiveQualifierFamilies(table); + ArrayList colDefs = new ArrayList<>(columns.size()); + for (PColumn col : columns) { + String def = extractColumn(col); + if (pkColumns.size() == 1 && pkColumns.contains(col)) { + def += " PRIMARY KEY" + extractPKColumnAttributes(col); + } + if ( + !pkColumns.contains(col) + && nonConsecutiveCounterFamilies.contains(col.getFamilyName().getString()) + ) { + def += + " ENCODED_QUALIFIER " + table.getEncodingScheme().decode(col.getColumnQualifierBytes()); + } + colDefs.add(def); + } + if (colDefs.size() > 0) { + colInfo.append('('); + colInfo.append(StringUtils.join(", ", colDefs)); + } + if (pkColumns.size() > 1) { + // multi column primary key + String pkConstraint = String.format(" CONSTRAINT %s PRIMARY KEY (%s)", + table.getPKName().getString(), extractPKConstraint(pkColumns)); + colInfo.append(pkConstraint); + } + if (colDefs.size() > 0) { + colInfo.append(')'); + } + return colInfo.toString(); + } + + private String getColumnInfoStringForView(PTable table, PTable baseTable) { + StringBuilder colInfo = new StringBuilder(); + + List columns = table.getColumns(); + List pkColumns = table.getPKColumns(); + + List baseColumns = baseTable.getColumns(); + List basePkColumns = baseTable.getPKColumns(); + + columns = getSymmetricDifferencePColumns(baseColumns, columns); + pkColumns = getSymmetricDifferencePColumns(basePkColumns, pkColumns); + + return getColumnInfoString(table, colInfo, columns, pkColumns); + } + + private String extractColumn(PColumn column) { + String colName = SchemaUtil.formatColumnName(column.getName().getString()); + if (column.getFamilyName() != null) { + String colFamilyName = SchemaUtil.formatSchemaName(column.getFamilyName().getString()); + // check if it is default column family name + colName = colFamilyName.equals(QueryConstants.DEFAULT_COLUMN_FAMILY) + ? colName + : String.format("%s.%s", colFamilyName, colName); + } + boolean isArrayType = column.getDataType().isArrayType(); + String type = column.getDataType().getSqlTypeName(); + Integer maxLength = column.getMaxLength(); + Integer arrSize = column.getArraySize(); + Integer scale = column.getScale(); + StringBuilder buf = new StringBuilder(colName); + buf.append(' '); + + if (isArrayType) { + String arrayPrefix = type.split("\\s+")[0]; + buf.append(arrayPrefix); + appendMaxLengthAndScale(buf, maxLength, scale); + buf.append(' '); + buf.append("ARRAY"); + if (arrSize != null) { + buf.append('['); + buf.append(arrSize); + buf.append(']'); + } + } else { + buf.append(type); + appendMaxLengthAndScale(buf, maxLength, scale); + } + + if (!column.isNullable()) { + buf.append(' '); + buf.append("NOT NULL"); + } + + return buf.toString(); + } + + private void appendMaxLengthAndScale(StringBuilder buf, Integer maxLength, Integer scale) { + if (maxLength != null) { + buf.append('('); + buf.append(maxLength); + if (scale != null) { + buf.append(','); + buf.append(scale); // has both max length and scale. For ex- decimal(10,2) + } + buf.append(')'); + } + } + + private String extractPKColumnAttributes(PColumn column) { + StringBuilder buf = new StringBuilder(); + + if (column.getSortOrder() != SortOrder.getDefault()) { + buf.append(' '); + buf.append(column.getSortOrder().toString()); + } + + if (column.isRowTimestamp()) { + buf.append(' '); + buf.append("ROW_TIMESTAMP"); + } + + return buf.toString(); + } + + private String extractPKConstraint(List pkColumns) { + ArrayList colDefs = new ArrayList<>(pkColumns.size()); + for (PColumn pkCol : pkColumns) { + colDefs.add(SchemaUtil.formatColumnName(pkCol.getName().getString()) + + extractPKColumnAttributes(pkCol)); + } + return StringUtils.join(", ", colDefs); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaProcessor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaProcessor.java index babd9527cd4..33a23dc486b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaProcessor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaProcessor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,5 +18,5 @@ package org.apache.phoenix.schema.tool; public interface SchemaProcessor { - String process() throws Exception; + String process() throws Exception; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaSQLUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaSQLUtil.java index 5ce73cb8f1f..87e606dd685 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaSQLUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaSQLUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,9 @@ */ package org.apache.phoenix.schema.tool; -import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; +import java.util.List; +import java.util.Map; + import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.parse.ColumnDef; import org.apache.phoenix.parse.ColumnName; @@ -25,155 +27,149 @@ import org.apache.phoenix.parse.CreateTableStatement; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; import org.apache.phoenix.util.SchemaUtil; -import java.util.List; -import java.util.Map; - public class SchemaSQLUtil { - protected static String getCreateTableSQL(CreateTableStatement createStmt) { - if (createStmt == null) { - return ""; - } - StringBuffer sb = new StringBuffer() - .append("CREATE "+createStmt.getTableType() + " "); - if (createStmt.ifNotExists()) { - sb.append("IF NOT EXISTS "); - } - sb.append(createStmt.getTableName()).append("\n") - .append(getColumnDefListToString(createStmt)) - .append("\nCONSTRAINT "+createStmt.getPrimaryKeyConstraint().getName()+" PRIMARY KEY") - .append(" ("+createStmt.getPrimaryKeyConstraint().toString()+"))" - .replaceAll(",", ",\n")); - if (createStmt.getTableType().equals(PTableType.VIEW)) { - sb.append("\nAS SELECT * FROM " + createStmt.getBaseTableName()); - if (createStmt.getWhereClause()!=null) { - sb.append(" WHERE " +createStmt.getWhereClause()); - } - } - appendProperties(sb, createStmt.getProps()); - return sb.toString(); + protected static String getCreateTableSQL(CreateTableStatement createStmt) { + if (createStmt == null) { + return ""; } - - protected static String getCreateIndexSQL(CreateIndexStatement createStmt) { - if (createStmt == null) { - return ""; - } - StringBuffer sb = new StringBuffer() - .append("CREATE" - + (createStmt.getIndexType().equals(PTable.IndexType.LOCAL) ? " "+createStmt.getIndexType() : "") - + " INDEX "); - if (createStmt.ifNotExists()) { - sb.append("IF NOT EXISTS "); - } - sb.append(createStmt.getIndexTableName().getTableName()).append("\n") - .append("ON "+createStmt.getTable().getName()) - .append("("+createStmt.getIndexConstraint().toString()).append(")"); - if (createStmt.getIncludeColumns()!=null && !createStmt.getIncludeColumns().isEmpty()) { - sb.append("\nINCLUDE "); - sb.append(getColumnListToString(createStmt.getIncludeColumns())); - } - if (createStmt.isAsync()) { - sb.append(" ASYNC"); - } - appendProperties(sb, createStmt.getProps()); - return sb.toString(); + StringBuffer sb = new StringBuffer().append("CREATE " + createStmt.getTableType() + " "); + if (createStmt.ifNotExists()) { + sb.append("IF NOT EXISTS "); } + sb.append(createStmt.getTableName()).append("\n").append(getColumnDefListToString(createStmt)) + .append("\nCONSTRAINT " + createStmt.getPrimaryKeyConstraint().getName() + " PRIMARY KEY") + .append(" (" + createStmt.getPrimaryKeyConstraint().toString() + "))".replaceAll(",", ",\n")); + if (createStmt.getTableType().equals(PTableType.VIEW)) { + sb.append("\nAS SELECT * FROM " + createStmt.getBaseTableName()); + if (createStmt.getWhereClause() != null) { + sb.append(" WHERE " + createStmt.getWhereClause()); + } + } + appendProperties(sb, createStmt.getProps()); + return sb.toString(); + } - private static String getColumnListToString(List columnNames) { - StringBuffer sb = new StringBuffer(); - for(ColumnName cName : columnNames) { - if (sb.length()==0) { - sb.append("("); - } - sb.append(cName.toString()).append(",\n"); - } - if (sb.length()!=0) { - sb.deleteCharAt(sb.length()-1).deleteCharAt(sb.length()-1); - sb.append(")"); - } - return sb.toString(); + protected static String getCreateIndexSQL(CreateIndexStatement createStmt) { + if (createStmt == null) { + return ""; + } + StringBuffer sb = + new StringBuffer().append("CREATE" + (createStmt.getIndexType().equals(PTable.IndexType.LOCAL) + ? " " + createStmt.getIndexType() + : "") + " INDEX "); + if (createStmt.ifNotExists()) { + sb.append("IF NOT EXISTS "); } + sb.append(createStmt.getIndexTableName().getTableName()).append("\n") + .append("ON " + createStmt.getTable().getName()) + .append("(" + createStmt.getIndexConstraint().toString()).append(")"); + if (createStmt.getIncludeColumns() != null && !createStmt.getIncludeColumns().isEmpty()) { + sb.append("\nINCLUDE "); + sb.append(getColumnListToString(createStmt.getIncludeColumns())); + } + if (createStmt.isAsync()) { + sb.append(" ASYNC"); + } + appendProperties(sb, createStmt.getProps()); + return sb.toString(); + } - private static String getColumnDefListToString(CreateTableStatement createStatement) { - List colDef = createStatement.getColumnDefs(); - StringBuffer sb = new StringBuffer(); - for(ColumnDef cDef : colDef) { - String columnString = getColumnInfoString(cDef); - if (sb.length()==0) { - sb.append("("); - } else { - sb.append(",\n"); - } - sb.append(columnString); - } - return sb.toString(); + private static String getColumnListToString(List columnNames) { + StringBuffer sb = new StringBuffer(); + for (ColumnName cName : columnNames) { + if (sb.length() == 0) { + sb.append("("); + } + sb.append(cName.toString()).append(",\n"); + } + if (sb.length() != 0) { + sb.deleteCharAt(sb.length() - 1).deleteCharAt(sb.length() - 1); + sb.append(")"); } + return sb.toString(); + } - private static String getColumnInfoString(ColumnDef cDef) { - String colName = cDef.getColumnDefName().toString(); - boolean isArrayType = cDef.getDataType().isArrayType(); - String type = cDef.getDataType().getSqlTypeName(); - Integer maxLength = cDef.getMaxLength(); - Integer arrSize = cDef.getArraySize(); - Integer scale = cDef.getScale(); - StringBuilder buf = new StringBuilder(colName); - buf.append(' '); - if (isArrayType) { - String arrayPrefix = type.split("\\s+")[0]; - buf.append(arrayPrefix); - appendMaxLengthAndScale(buf, maxLength, scale); - buf.append(' '); - buf.append("ARRAY"); - if (arrSize != null) { - buf.append('['); - buf.append(arrSize); - buf.append(']'); - } - } else { - buf.append(type); - appendMaxLengthAndScale(buf, maxLength, scale); - } + private static String getColumnDefListToString(CreateTableStatement createStatement) { + List colDef = createStatement.getColumnDefs(); + StringBuffer sb = new StringBuffer(); + for (ColumnDef cDef : colDef) { + String columnString = getColumnInfoString(cDef); + if (sb.length() == 0) { + sb.append("("); + } else { + sb.append(",\n"); + } + sb.append(columnString); + } + return sb.toString(); + } - if (!cDef.isNull()) { - buf.append(' '); - buf.append("NOT NULL"); - } - if(cDef.getExpression()!=null) { - buf.append(" DEFAULT "); - buf.append(cDef.getExpression()); - } + private static String getColumnInfoString(ColumnDef cDef) { + String colName = cDef.getColumnDefName().toString(); + boolean isArrayType = cDef.getDataType().isArrayType(); + String type = cDef.getDataType().getSqlTypeName(); + Integer maxLength = cDef.getMaxLength(); + Integer arrSize = cDef.getArraySize(); + Integer scale = cDef.getScale(); + StringBuilder buf = new StringBuilder(colName); + buf.append(' '); + if (isArrayType) { + String arrayPrefix = type.split("\\s+")[0]; + buf.append(arrayPrefix); + appendMaxLengthAndScale(buf, maxLength, scale); + buf.append(' '); + buf.append("ARRAY"); + if (arrSize != null) { + buf.append('['); + buf.append(arrSize); + buf.append(']'); + } + } else { + buf.append(type); + appendMaxLengthAndScale(buf, maxLength, scale); + } - return buf.toString(); + if (!cDef.isNull()) { + buf.append(' '); + buf.append("NOT NULL"); + } + if (cDef.getExpression() != null) { + buf.append(" DEFAULT "); + buf.append(cDef.getExpression()); } - private static void appendMaxLengthAndScale(StringBuilder buf, Integer maxLength, Integer scale){ - if (maxLength != null) { - buf.append('('); - buf.append(maxLength); - if (scale != null) { - buf.append(','); - buf.append(scale); // has both max length and scale. For ex- decimal(10,2) - } - buf.append(')'); - } + return buf.toString(); + } + + private static void appendMaxLengthAndScale(StringBuilder buf, Integer maxLength, Integer scale) { + if (maxLength != null) { + buf.append('('); + buf.append(maxLength); + if (scale != null) { + buf.append(','); + buf.append(scale); // has both max length and scale. For ex- decimal(10,2) + } + buf.append(')'); } + } - private static void appendProperties(StringBuffer sb, - ListMultimap> props) { - if (props != null && !props.isEmpty()) { - sb.append("\n"); - for (Map.Entry> entry : props.entries()) { - String prop = entry.getValue().getFirst(); - if (prop.contains(".")) { - prop = SchemaUtil.getEscapedArgument(prop); - } - sb.append(prop).append("=") - .append(entry.getValue().getSecond()); - sb.append(","); - } - sb.deleteCharAt(sb.length()-1); + private static void appendProperties(StringBuffer sb, + ListMultimap> props) { + if (props != null && !props.isEmpty()) { + sb.append("\n"); + for (Map.Entry> entry : props.entries()) { + String prop = entry.getValue().getFirst(); + if (prop.contains(".")) { + prop = SchemaUtil.getEscapedArgument(prop); } + sb.append(prop).append("=").append(entry.getValue().getSecond()); + sb.append(","); + } + sb.deleteCharAt(sb.length() - 1); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaSynthesisProcessor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaSynthesisProcessor.java index 761621a754a..ca4f94f2fa7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaSynthesisProcessor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaSynthesisProcessor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,20 @@ */ package org.apache.phoenix.schema.tool; -import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; +import static org.apache.phoenix.schema.tool.SchemaSQLUtil.getCreateIndexSQL; +import static org.apache.phoenix.schema.tool.SchemaSQLUtil.getCreateTableSQL; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.parse.AddColumnStatement; import org.apache.phoenix.parse.BindableStatement; @@ -33,205 +45,189 @@ import org.apache.phoenix.parse.PrimaryKeyConstraint; import org.apache.phoenix.parse.SQLParser; import org.apache.phoenix.schema.SortOrder; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.apache.phoenix.schema.tool.SchemaSQLUtil.getCreateIndexSQL; -import static org.apache.phoenix.schema.tool.SchemaSQLUtil.getCreateTableSQL; +import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; public class SchemaSynthesisProcessor implements SchemaProcessor { - public static final String - ENTITY_NAME_IN_BASE_AND_ALTER_DDL_DON_T_MATCH = - "Entity name in base and alter DDL don't match"; - public static final String - UNSUPPORTED_DDL_EXCEPTION = - "SchemaTool in Synth mode is supported for CREATE TABLE/VIEW/INDEX ddls"; - private final String ddlFile; + public static final String ENTITY_NAME_IN_BASE_AND_ALTER_DDL_DON_T_MATCH = + "Entity name in base and alter DDL don't match"; + public static final String UNSUPPORTED_DDL_EXCEPTION = + "SchemaTool in Synth mode is supported for CREATE TABLE/VIEW/INDEX ddls"; + private final String ddlFile; - public SchemaSynthesisProcessor(String ddlFile) { - this.ddlFile = ddlFile; - } + public SchemaSynthesisProcessor(String ddlFile) { + this.ddlFile = ddlFile; + } - @Override - public String process() throws Exception { - List allDDL = getQueriesFromFile(ddlFile); - String ddl = null; - for(String s : allDDL) { - ddl = synthesize(ddl, s); - } - return ddl == null ? "" :ddl; + @Override + public String process() throws Exception { + List allDDL = getQueriesFromFile(ddlFile); + String ddl = null; + for (String s : allDDL) { + ddl = synthesize(ddl, s); } + return ddl == null ? "" : ddl; + } - private String synthesize(String baseDDL, String nextDDL) throws Exception { - if (baseDDL == null && nextDDL != null) { - BindableStatement bStmt = new SQLParser(nextDDL).parseStatement(); - if (bStmt instanceof CreateTableStatement || bStmt instanceof CreateIndexStatement) { - return nextDDL; - } - throw new Exception(UNSUPPORTED_DDL_EXCEPTION); - } - BindableStatement createStatement = new SQLParser(baseDDL).parseStatement(); - BindableStatement alterStatement = new SQLParser(nextDDL).parseStatement(); - if (createStatement instanceof CreateTableStatement) { - CreateTableStatement newCreateStmt = null; - CreateTableStatement createStmt = (CreateTableStatement) createStatement; - if (alterStatement instanceof AddColumnStatement) { - newCreateStmt = - getCreateTableStatement((AddColumnStatement) alterStatement, createStmt); - } else if (alterStatement instanceof DropColumnStatement) { - newCreateStmt = - getCreateTableStatement((DropColumnStatement) alterStatement, createStmt); - } else if (alterStatement instanceof DropTableStatement) { - return null; - } - return getCreateTableSQL(newCreateStmt); - } else if (createStatement instanceof CreateIndexStatement) { - if (alterStatement instanceof DropIndexStatement) { - return null; - } - CreateIndexStatement newCreateIndexStmt = - getCreateIndexStatement(alterStatement, (CreateIndexStatement) createStatement); - return getCreateIndexSQL(newCreateIndexStmt); - } else { - throw new Exception(UNSUPPORTED_DDL_EXCEPTION); - } + private String synthesize(String baseDDL, String nextDDL) throws Exception { + if (baseDDL == null && nextDDL != null) { + BindableStatement bStmt = new SQLParser(nextDDL).parseStatement(); + if (bStmt instanceof CreateTableStatement || bStmt instanceof CreateIndexStatement) { + return nextDDL; + } + throw new Exception(UNSUPPORTED_DDL_EXCEPTION); + } + BindableStatement createStatement = new SQLParser(baseDDL).parseStatement(); + BindableStatement alterStatement = new SQLParser(nextDDL).parseStatement(); + if (createStatement instanceof CreateTableStatement) { + CreateTableStatement newCreateStmt = null; + CreateTableStatement createStmt = (CreateTableStatement) createStatement; + if (alterStatement instanceof AddColumnStatement) { + newCreateStmt = getCreateTableStatement((AddColumnStatement) alterStatement, createStmt); + } else if (alterStatement instanceof DropColumnStatement) { + newCreateStmt = getCreateTableStatement((DropColumnStatement) alterStatement, createStmt); + } else if (alterStatement instanceof DropTableStatement) { + return null; + } + return getCreateTableSQL(newCreateStmt); + } else if (createStatement instanceof CreateIndexStatement) { + if (alterStatement instanceof DropIndexStatement) { + return null; + } + CreateIndexStatement newCreateIndexStmt = + getCreateIndexStatement(alterStatement, (CreateIndexStatement) createStatement); + return getCreateIndexSQL(newCreateIndexStmt); + } else { + throw new Exception(UNSUPPORTED_DDL_EXCEPTION); } + } - private CreateIndexStatement getCreateIndexStatement(BindableStatement alterStatement, CreateIndexStatement createStatement) throws Exception { - CreateIndexStatement newCreateIndexStmt = null; - String tableName = createStatement.getIndexTableName().toString(); - String tableNameInAlter = ((AddColumnStatement)alterStatement).getTable().toString().trim(); - sanityCheck(tableName, tableNameInAlter); - AddColumnStatement addStmt = (AddColumnStatement) alterStatement; - if (addStmt.getColumnDefs() == null) { - ListMultimap> - finalProps = - getEffectiveProperties(addStmt, createStatement.getProps()); - newCreateIndexStmt = new CreateIndexStatement(createStatement, finalProps); - } - return newCreateIndexStmt; + private CreateIndexStatement getCreateIndexStatement(BindableStatement alterStatement, + CreateIndexStatement createStatement) throws Exception { + CreateIndexStatement newCreateIndexStmt = null; + String tableName = createStatement.getIndexTableName().toString(); + String tableNameInAlter = ((AddColumnStatement) alterStatement).getTable().toString().trim(); + sanityCheck(tableName, tableNameInAlter); + AddColumnStatement addStmt = (AddColumnStatement) alterStatement; + if (addStmt.getColumnDefs() == null) { + ListMultimap> finalProps = + getEffectiveProperties(addStmt, createStatement.getProps()); + newCreateIndexStmt = new CreateIndexStatement(createStatement, finalProps); } + return newCreateIndexStmt; + } - private CreateTableStatement getCreateTableStatement(DropColumnStatement alterStatement, - CreateTableStatement createStmt) throws Exception { - CreateTableStatement newCreateStmt = null; - String tableName = createStmt.getTableName().toString(); - String tableNameInAlter = alterStatement.getTable().toString().trim(); - sanityCheck(tableName, tableNameInAlter); - List oldColumnDef = createStmt.getColumnDefs(); - List newColumnDef = new ArrayList<>(); - newColumnDef.addAll(oldColumnDef); - DropColumnStatement dropStmt = alterStatement; - for(ColumnName cName : dropStmt.getColumnRefs()) { - for(ColumnDef colDef : oldColumnDef) { - if (colDef.getColumnDefName().equals(cName)) { - newColumnDef.remove(colDef); - break; - } - } + private CreateTableStatement getCreateTableStatement(DropColumnStatement alterStatement, + CreateTableStatement createStmt) throws Exception { + CreateTableStatement newCreateStmt = null; + String tableName = createStmt.getTableName().toString(); + String tableNameInAlter = alterStatement.getTable().toString().trim(); + sanityCheck(tableName, tableNameInAlter); + List oldColumnDef = createStmt.getColumnDefs(); + List newColumnDef = new ArrayList<>(); + newColumnDef.addAll(oldColumnDef); + DropColumnStatement dropStmt = alterStatement; + for (ColumnName cName : dropStmt.getColumnRefs()) { + for (ColumnDef colDef : oldColumnDef) { + if (colDef.getColumnDefName().equals(cName)) { + newColumnDef.remove(colDef); + break; } - newCreateStmt = new CreateTableStatement(createStmt, newColumnDef); - return newCreateStmt; + } } + newCreateStmt = new CreateTableStatement(createStmt, newColumnDef); + return newCreateStmt; + } - private CreateTableStatement getCreateTableStatement(AddColumnStatement alterStatement, - CreateTableStatement createStmt) throws Exception { - CreateTableStatement newCreateStmt = null; - String tableName = createStmt.getTableName().toString(); - String tableNameInAlter = alterStatement.getTable().toString().trim(); - sanityCheck(tableName, tableNameInAlter); - AddColumnStatement addStmt = alterStatement; - List oldColDef = createStmt.getColumnDefs(); - List newColDef = new ArrayList<>(); - if (addStmt.getColumnDefs() == null) { - ListMultimap> - finalProps = getEffectiveProperties(addStmt, createStmt.getProps()); - newCreateStmt = new CreateTableStatement(createStmt, finalProps, oldColDef); - } else { - newColDef.addAll(oldColDef); - newColDef.addAll(addStmt.getColumnDefs()); - PrimaryKeyConstraint oldPKConstraint = createStmt.getPrimaryKeyConstraint(); - List pkList = new ArrayList<>(); - for(Pair entry : oldPKConstraint.getColumnNames()) { - ColumnDefInPkConstraint cd = new - ColumnDefInPkConstraint(entry.getFirst(), entry.getSecond(), oldPKConstraint.isColumnRowTimestamp(entry - .getFirst())); - pkList.add(cd); - } - for(ColumnDef cd : addStmt.getColumnDefs()) { - if(cd.isPK()) { - ColumnDefInPkConstraint cdpk = new ColumnDefInPkConstraint(cd.getColumnDefName(), cd.getSortOrder(), cd.isRowTimestamp()); - pkList.add(cdpk); - } - } - PrimaryKeyConstraint pkConstraint = new PrimaryKeyConstraint(oldPKConstraint.getName(), pkList); - newCreateStmt = new CreateTableStatement(createStmt, pkConstraint, newColDef); + private CreateTableStatement getCreateTableStatement(AddColumnStatement alterStatement, + CreateTableStatement createStmt) throws Exception { + CreateTableStatement newCreateStmt = null; + String tableName = createStmt.getTableName().toString(); + String tableNameInAlter = alterStatement.getTable().toString().trim(); + sanityCheck(tableName, tableNameInAlter); + AddColumnStatement addStmt = alterStatement; + List oldColDef = createStmt.getColumnDefs(); + List newColDef = new ArrayList<>(); + if (addStmt.getColumnDefs() == null) { + ListMultimap> finalProps = + getEffectiveProperties(addStmt, createStmt.getProps()); + newCreateStmt = new CreateTableStatement(createStmt, finalProps, oldColDef); + } else { + newColDef.addAll(oldColDef); + newColDef.addAll(addStmt.getColumnDefs()); + PrimaryKeyConstraint oldPKConstraint = createStmt.getPrimaryKeyConstraint(); + List pkList = new ArrayList<>(); + for (Pair entry : oldPKConstraint.getColumnNames()) { + ColumnDefInPkConstraint cd = new ColumnDefInPkConstraint(entry.getFirst(), + entry.getSecond(), oldPKConstraint.isColumnRowTimestamp(entry.getFirst())); + pkList.add(cd); + } + for (ColumnDef cd : addStmt.getColumnDefs()) { + if (cd.isPK()) { + ColumnDefInPkConstraint cdpk = new ColumnDefInPkConstraint(cd.getColumnDefName(), + cd.getSortOrder(), cd.isRowTimestamp()); + pkList.add(cdpk); } - return newCreateStmt; + } + PrimaryKeyConstraint pkConstraint = + new PrimaryKeyConstraint(oldPKConstraint.getName(), pkList); + newCreateStmt = new CreateTableStatement(createStmt, pkConstraint, newColDef); } + return newCreateStmt; + } - private void sanityCheck(String tableName, String tableNameInAlter) throws Exception { - if (!tableName.equalsIgnoreCase(tableNameInAlter)) { - throw new Exception(ENTITY_NAME_IN_BASE_AND_ALTER_DDL_DON_T_MATCH); - } + private void sanityCheck(String tableName, String tableNameInAlter) throws Exception { + if (!tableName.equalsIgnoreCase(tableNameInAlter)) { + throw new Exception(ENTITY_NAME_IN_BASE_AND_ALTER_DDL_DON_T_MATCH); } + } - private ListMultimap> getEffectiveProperties( - AddColumnStatement addStmt, ListMultimap> oldProps) { - Map oldPropMap = new HashMap(); - Map changePropMap = new HashMap(); + private ListMultimap> getEffectiveProperties( + AddColumnStatement addStmt, ListMultimap> oldProps) { + Map oldPropMap = new HashMap(); + Map changePropMap = new HashMap(); - for (Pair value : oldProps.values()) { - oldPropMap.put(value.getFirst(),value.getSecond()); - } - for (Pair value : addStmt.getProps().values()) { - changePropMap.put(value.getFirst(),value.getSecond()); - } + for (Pair value : oldProps.values()) { + oldPropMap.put(value.getFirst(), value.getSecond()); + } + for (Pair value : addStmt.getProps().values()) { + changePropMap.put(value.getFirst(), value.getSecond()); + } - oldPropMap.putAll(changePropMap); - ListMultimap> - finalProps = - ArrayListMultimap.>create(); - for (Map.Entry entry : oldPropMap.entrySet()) { - finalProps.put("", Pair.newPair(entry.getKey(), entry.getValue())); - } - return finalProps; + oldPropMap.putAll(changePropMap); + ListMultimap> finalProps = + ArrayListMultimap.> create(); + for (Map.Entry entry : oldPropMap.entrySet()) { + finalProps.put("", Pair.newPair(entry.getKey(), entry.getValue())); } + return finalProps; + } - private List getQueriesFromFile(String ddlFile) throws IOException { - StringBuilder sb = new StringBuilder(); - File file = new File(ddlFile); - BufferedReader br = new BufferedReader(new InputStreamReader( - new FileInputStream(file), StandardCharsets.UTF_8)); - String st; - while ((st = br.readLine()) != null) { - sb.append(st).append("\n"); - } - String trimmedQuery = sb.toString().trim(); - if (trimmedQuery.contains("/*") && trimmedQuery.contains("*/")) { - trimmedQuery = trimmedQuery.substring(trimmedQuery.lastIndexOf("*/") + 2); - } - String [] queries = trimmedQuery.split(";"); - List output = new ArrayList<>(); - for(String query: queries) { - StringBuilder newSb = new StringBuilder(query); - char lastChar = newSb.charAt(newSb.length() - 1); - // DDL in the file should not have a ; at the end - // remove the last char if it is ; or \n - if (lastChar == '\n' || lastChar == ';') { - newSb.deleteCharAt(newSb.length() - 1); - } - output.add(newSb.toString().trim()); - } - return output; + private List getQueriesFromFile(String ddlFile) throws IOException { + StringBuilder sb = new StringBuilder(); + File file = new File(ddlFile); + BufferedReader br = + new BufferedReader(new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8)); + String st; + while ((st = br.readLine()) != null) { + sb.append(st).append("\n"); + } + String trimmedQuery = sb.toString().trim(); + if (trimmedQuery.contains("/*") && trimmedQuery.contains("*/")) { + trimmedQuery = trimmedQuery.substring(trimmedQuery.lastIndexOf("*/") + 2); + } + String[] queries = trimmedQuery.split(";"); + List output = new ArrayList<>(); + for (String query : queries) { + StringBuilder newSb = new StringBuilder(query); + char lastChar = newSb.charAt(newSb.length() - 1); + // DDL in the file should not have a ; at the end + // remove the last char if it is ; or \n + if (lastChar == '\n' || lastChar == ';') { + newSb.deleteCharAt(newSb.length() - 1); + } + output.add(newSb.toString().trim()); } + return output; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaTool.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaTool.java index f000b980640..ad62080bee8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaTool.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tool/SchemaTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,12 @@ */ package org.apache.phoenix.schema.tool; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; @@ -24,14 +30,6 @@ import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hbase.HBaseConfiguration; - -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,135 +37,135 @@ public class SchemaTool extends Configured implements Tool { - private static final Logger LOGGER = LoggerFactory.getLogger(SchemaTool.class); - private static final Option HELP_OPTION = new Option("h", "help", - false, "Help"); - private static final Option MODE_OPTION = new Option("m", "mode", true, - "[Required] Takes either synth or extract value"); - private static final Option DDL_OPTION = new Option("d", "ddl", true, - "[Required with synth mode] SQL file that has one or more ddl statements" - + " for the same entity"); - private static final Option TABLE_OPTION = new Option("tb", "table", true, - "[Required with extract mode] Table name ex. table1"); - private static final Option SCHEMA_OPTION = new Option("s", "schema", true, - "[Optional] Schema name ex. schema"); - private static final Option TENANT_OPTION = new Option("t", "tenant", true, - "[Optional] Tenant Id ex. abc"); - - private String pTableName; - private String pSchemaName; - private String tenantId; - private Enum mode; - - protected static Configuration conf; - private String output; - private String ddlFile; - private String alterDDLFile; - - @Override - public int run(String[] args) throws Exception { - try { - populateToolAttributes(args); - SchemaProcessor processor=null; - if(Mode.SYNTH.equals(mode)) { - processor = new SchemaSynthesisProcessor(ddlFile); - } else if(Mode.EXTRACT.equals(mode)) { - conf = HBaseConfiguration.addHbaseResources(getConf()); - processor = new SchemaExtractionProcessor(tenantId, conf, pSchemaName, pTableName); - } else { - throw new Exception(mode+" is not accepted, provide [synth or extract]"); - } - output = processor.process(); - LOGGER.info("Effective DDL with " + mode.toString() +": " + output); - return 0; - } catch (Exception e) { - e.printStackTrace(); - return -1; - } + private static final Logger LOGGER = LoggerFactory.getLogger(SchemaTool.class); + private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); + private static final Option MODE_OPTION = + new Option("m", "mode", true, "[Required] Takes either synth or extract value"); + private static final Option DDL_OPTION = new Option("d", "ddl", true, + "[Required with synth mode] SQL file that has one or more ddl statements" + + " for the same entity"); + private static final Option TABLE_OPTION = + new Option("tb", "table", true, "[Required with extract mode] Table name ex. table1"); + private static final Option SCHEMA_OPTION = + new Option("s", "schema", true, "[Optional] Schema name ex. schema"); + private static final Option TENANT_OPTION = + new Option("t", "tenant", true, "[Optional] Tenant Id ex. abc"); + + private String pTableName; + private String pSchemaName; + private String tenantId; + private Enum mode; + + protected static Configuration conf; + private String output; + private String ddlFile; + private String alterDDLFile; + + @Override + public int run(String[] args) throws Exception { + try { + populateToolAttributes(args); + SchemaProcessor processor = null; + if (Mode.SYNTH.equals(mode)) { + processor = new SchemaSynthesisProcessor(ddlFile); + } else if (Mode.EXTRACT.equals(mode)) { + conf = HBaseConfiguration.addHbaseResources(getConf()); + processor = new SchemaExtractionProcessor(tenantId, conf, pSchemaName, pTableName); + } else { + throw new Exception(mode + " is not accepted, provide [synth or extract]"); + } + output = processor.process(); + LOGGER.info("Effective DDL with " + mode.toString() + ": " + output); + return 0; + } catch (Exception e) { + e.printStackTrace(); + return -1; } - - public String getOutput() { - return output; + } + + public String getOutput() { + return output; + } + + private void populateToolAttributes(String[] args) { + try { + CommandLine cmdLine = parseOptions(args); + mode = Mode.valueOf(cmdLine.getOptionValue(MODE_OPTION.getOpt())); + ddlFile = cmdLine.getOptionValue(DDL_OPTION.getOpt()); + pTableName = cmdLine.getOptionValue(TABLE_OPTION.getOpt()); + pSchemaName = cmdLine.getOptionValue(SCHEMA_OPTION.getOpt()); + tenantId = cmdLine.getOptionValue(TENANT_OPTION.getOpt()); + LOGGER.info("Schema Tool initiated: " + StringUtils.join(args, ",")); + } catch (IllegalStateException e) { + printHelpAndExit(e.getMessage(), getOptions()); } - - private void populateToolAttributes(String[] args) { - try { - CommandLine cmdLine = parseOptions(args); - mode = Mode.valueOf(cmdLine.getOptionValue(MODE_OPTION.getOpt())); - ddlFile = cmdLine.getOptionValue(DDL_OPTION.getOpt()); - pTableName = cmdLine.getOptionValue(TABLE_OPTION.getOpt()); - pSchemaName = cmdLine.getOptionValue(SCHEMA_OPTION.getOpt()); - tenantId = cmdLine.getOptionValue(TENANT_OPTION.getOpt()); - LOGGER.info("Schema Tool initiated: " + StringUtils.join( args, ",")); - } catch (IllegalStateException e) { - printHelpAndExit(e.getMessage(), getOptions()); - } + } + + @SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "null path call calls System.exit()") + private CommandLine parseOptions(String[] args) { + final Options options = getOptions(); + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("severe parsing command line options: " + e.getMessage(), options); } - - @SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="null path call calls System.exit()") - private CommandLine parseOptions(String[] args) { - final Options options = getOptions(); - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("severe parsing command line options: " + e.getMessage(), - options); - } - if(cmdLine == null) { - printHelpAndExit("parsed command line object is null", options); - } - if (cmdLine.hasOption(HELP_OPTION.getOpt())) { - printHelpAndExit(options, 0); - } - if (!(cmdLine.hasOption(TABLE_OPTION.getOpt())) - && cmdLine.getOptionValue(MODE_OPTION.getOpt()).equalsIgnoreCase(Mode.EXTRACT.toString())) { - throw new IllegalStateException("Table name should be passed with EXTRACT mode" - +TABLE_OPTION.getLongOpt()); - } - if ((!(cmdLine.hasOption(DDL_OPTION.getOpt()))) - && cmdLine.getOptionValue(MODE_OPTION.getOpt()).equalsIgnoreCase(Mode.SYNTH.toString())) { - throw new IllegalStateException("ddl option should be passed with SYNTH mode" - + DDL_OPTION.getLongOpt()); - } - return cmdLine; + if (cmdLine == null) { + printHelpAndExit("parsed command line object is null", options); } - - enum Mode { - SYNTH, - EXTRACT + if (cmdLine.hasOption(HELP_OPTION.getOpt())) { + printHelpAndExit(options, 0); } - - private Options getOptions() { - final Options options = new Options(); - options.addOption(TABLE_OPTION); - options.addOption(MODE_OPTION); - options.addOption(DDL_OPTION); - SCHEMA_OPTION.setOptionalArg(true); - options.addOption(SCHEMA_OPTION); - TENANT_OPTION.setOptionalArg(true); - options.addOption(TENANT_OPTION); - return options; - } - - private void printHelpAndExit(String severeMessage, Options options) { - System.err.println(severeMessage); - printHelpAndExit(options, 1); + if ( + !(cmdLine.hasOption(TABLE_OPTION.getOpt())) + && cmdLine.getOptionValue(MODE_OPTION.getOpt()).equalsIgnoreCase(Mode.EXTRACT.toString()) + ) { + throw new IllegalStateException( + "Table name should be passed with EXTRACT mode" + TABLE_OPTION.getLongOpt()); } - - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); - } - - public static void main (String[] args) throws Exception { - int result = ToolRunner.run(new SchemaTool(), args); - System.exit(result); + if ( + (!(cmdLine.hasOption(DDL_OPTION.getOpt()))) + && cmdLine.getOptionValue(MODE_OPTION.getOpt()).equalsIgnoreCase(Mode.SYNTH.toString()) + ) { + throw new IllegalStateException( + "ddl option should be passed with SYNTH mode" + DDL_OPTION.getLongOpt()); } + return cmdLine; + } + + enum Mode { + SYNTH, + EXTRACT + } + + private Options getOptions() { + final Options options = new Options(); + options.addOption(TABLE_OPTION); + options.addOption(MODE_OPTION); + options.addOption(DDL_OPTION); + SCHEMA_OPTION.setOptionalArg(true); + options.addOption(SCHEMA_OPTION); + TENANT_OPTION.setOptionalArg(true); + options.addOption(TENANT_OPTION); + return options; + } + + private void printHelpAndExit(String severeMessage, Options options) { + System.err.println(severeMessage); + printHelpAndExit(options, 1); + } + + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } + + public static void main(String[] args) throws Exception { + int result = ToolRunner.run(new SchemaTool(), args); + System.exit(result); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/SystemTransformRecord.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/SystemTransformRecord.java index d6fec5ae9c9..63a71f4533a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/SystemTransformRecord.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/SystemTransformRecord.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,264 +15,270 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.transform; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.util.EnvironmentEdgeManager; - import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Timestamp; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.util.EnvironmentEdgeManager; + /** - * Task params to be used while upserting records in SYSTEM.TRANSFORM table. - * This POJO is mainly used while upserting(and committing) or generating - * upsert mutations plan in {@link Transform} class + * Task params to be used while upserting records in SYSTEM.TRANSFORM table. This POJO is mainly + * used while upserting(and committing) or generating upsert mutations plan in {@link Transform} + * class */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = {"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}, +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = { "EI_EXPOSE_REP", "EI_EXPOSE_REP2" }, justification = "lastStateTs and startTs are not used for mutation") public class SystemTransformRecord { - private final PTable.TransformType transformType; - private final String schemaName; - private final String logicalTableName; - private final String tenantId; - private final String logicalParentName; - private final String newPhysicalTableName; - private final String transformStatus; - private final String transformJobId; - private final Integer transformRetryCount; - private final Timestamp startTs; - private final Timestamp lastStateTs; - private final byte[] oldMetadata; - private final String newMetadata; - private final String transformFunction; - - public SystemTransformRecord(PTable.TransformType transformType, - String schemaName, String logicalTableName, String tenantId, String newPhysicalTableName, String logicalParentName, - String transformStatus, String transformJobId, Integer transformRetryCount, Timestamp startTs, - Timestamp lastStateTs, byte[] oldMetadata, String newMetadata, String transformFunction) { - this.transformType = transformType; - this.schemaName = schemaName; - this.tenantId = tenantId; - this.logicalTableName = logicalTableName; - this.newPhysicalTableName = newPhysicalTableName; - this.logicalParentName = logicalParentName; - this.transformStatus = transformStatus; - this.transformJobId = transformJobId; - this.transformRetryCount = transformRetryCount; - this.startTs = startTs; - this.lastStateTs = lastStateTs; - this.oldMetadata = oldMetadata; - this.newMetadata = newMetadata; - this.transformFunction = transformFunction; + private final PTable.TransformType transformType; + private final String schemaName; + private final String logicalTableName; + private final String tenantId; + private final String logicalParentName; + private final String newPhysicalTableName; + private final String transformStatus; + private final String transformJobId; + private final Integer transformRetryCount; + private final Timestamp startTs; + private final Timestamp lastStateTs; + private final byte[] oldMetadata; + private final String newMetadata; + private final String transformFunction; + + public SystemTransformRecord(PTable.TransformType transformType, String schemaName, + String logicalTableName, String tenantId, String newPhysicalTableName, String logicalParentName, + String transformStatus, String transformJobId, Integer transformRetryCount, Timestamp startTs, + Timestamp lastStateTs, byte[] oldMetadata, String newMetadata, String transformFunction) { + this.transformType = transformType; + this.schemaName = schemaName; + this.tenantId = tenantId; + this.logicalTableName = logicalTableName; + this.newPhysicalTableName = newPhysicalTableName; + this.logicalParentName = logicalParentName; + this.transformStatus = transformStatus; + this.transformJobId = transformJobId; + this.transformRetryCount = transformRetryCount; + this.startTs = startTs; + this.lastStateTs = lastStateTs; + this.oldMetadata = oldMetadata; + this.newMetadata = newMetadata; + this.transformFunction = transformFunction; + } + + public String getString() { + return String.format( + "transformType: %s, schameName: %s, logicalTableName: %s, newPhysicalTableName: %s, logicalParentName: %s, status: %s", + String.valueOf(transformType), String.valueOf(schemaName), String.valueOf(logicalTableName), + String.valueOf(newPhysicalTableName), String.valueOf(logicalParentName), + String.valueOf(transformStatus)); + } + + public PTable.TransformType getTransformType() { + return transformType; + } + + public String getSchemaName() { + return schemaName; + } + + public String getTenantId() { + return tenantId; + } + + public String getLogicalTableName() { + return logicalTableName; + } + + public String getLogicalParentName() { + return logicalParentName; + } + + public String getNewPhysicalTableName() { + return newPhysicalTableName; + } + + public String getTransformStatus() { + return transformStatus; + } + + public String getTransformJobId() { + return transformJobId; + } + + public int getTransformRetryCount() { + return transformRetryCount; + } + + public Timestamp getTransformStartTs() { + return startTs; + } + + public Timestamp getTransformLastStateTs() { + return lastStateTs; + } + + public byte[] getOldMetadata() { + return oldMetadata; + } + + public String getNewMetadata() { + return newMetadata; + } + + public String getTransformFunction() { + return transformFunction; + } + + public boolean isActive() { + return (transformStatus.equals(PTable.TransformStatus.STARTED.name()) + || transformStatus.equals(PTable.TransformStatus.CREATED.name()) + || transformStatus.equals(PTable.TransformStatus.PENDING_CUTOVER.name())); + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = { "EI_EXPOSE_REP", "EI_EXPOSE_REP2" }, + justification = "lastStateTs and startTs are not used for mutation") + public static class SystemTransformBuilder { + + private PTable.TransformType transformType = PTable.TransformType.METADATA_TRANSFORM; + private String schemaName; + private String tenantId; + private String logicalTableName; + private String logicalParentName; + private String newPhysicalTableName; + private String transformStatus = PTable.TransformStatus.CREATED.name(); + private String transformJobId; + private int transformRetryCount = 0; + private Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); + private Timestamp lastStateTs; + private byte[] oldMetadata; + private String newMetadata; + private String transformFunction; + + public SystemTransformBuilder() { + } - public String getString() { - return String.format("transformType: %s, schameName: %s, logicalTableName: %s, newPhysicalTableName: %s, logicalParentName: %s, status: %s" - , String.valueOf(transformType), String.valueOf(schemaName), String.valueOf(logicalTableName), String.valueOf(newPhysicalTableName), - String.valueOf(logicalParentName), String.valueOf(transformStatus)); + public SystemTransformBuilder(SystemTransformRecord systemTransformRecord) { + this.setTransformType(systemTransformRecord.getTransformType()); + this.setTenantId(systemTransformRecord.getTenantId()); + this.setSchemaName(systemTransformRecord.getSchemaName()); + this.setLogicalTableName(systemTransformRecord.getLogicalTableName()); + this.setNewPhysicalTableName(systemTransformRecord.getNewPhysicalTableName()); + this.setLogicalParentName(systemTransformRecord.getLogicalParentName()); + this.setTransformStatus(systemTransformRecord.getTransformStatus()); + this.setTransformJobId(systemTransformRecord.getTransformJobId()); + this.setTransformRetryCount(systemTransformRecord.getTransformRetryCount()); + this.setStartTs(systemTransformRecord.getTransformStartTs()); + this.setLastStateTs(systemTransformRecord.getTransformLastStateTs()); + this.setOldMetadata(systemTransformRecord.getOldMetadata()); + this.setNewMetadata(systemTransformRecord.getNewMetadata()); + this.setTransformFunction(systemTransformRecord.getTransformFunction()); } - public PTable.TransformType getTransformType() { - return transformType; + public SystemTransformBuilder setTransformType(PTable.TransformType transformType) { + this.transformType = transformType; + return this; } - public String getSchemaName() { - return schemaName; + public SystemTransformBuilder setSchemaName(String schemaName) { + this.schemaName = schemaName; + return this; } - public String getTenantId() { - return tenantId; + public SystemTransformBuilder setLogicalTableName(String tableName) { + this.logicalTableName = tableName; + return this; } - public String getLogicalTableName() { - return logicalTableName; + public SystemTransformBuilder setTenantId(String tenant) { + this.tenantId = tenant; + return this; } - public String getLogicalParentName() { - return logicalParentName; + public SystemTransformBuilder setLogicalParentName(String name) { + this.logicalParentName = name; + return this; } - public String getNewPhysicalTableName() { - return newPhysicalTableName; + public SystemTransformBuilder setNewPhysicalTableName(String tableName) { + this.newPhysicalTableName = tableName; + return this; } - public String getTransformStatus() { - return transformStatus; + public SystemTransformBuilder setTransformStatus(String transformStatus) { + this.transformStatus = transformStatus; + return this; } - public String getTransformJobId() { - return transformJobId; + public SystemTransformBuilder setTransformJobId(String transformJobId) { + this.transformJobId = transformJobId; + return this; } - public int getTransformRetryCount() { - return transformRetryCount; + public SystemTransformBuilder setOldMetadata(byte[] oldMetadata) { + this.oldMetadata = oldMetadata; + return this; } - public Timestamp getTransformStartTs() { - return startTs; + public SystemTransformBuilder setNewMetadata(String newMetadata) { + this.newMetadata = newMetadata; + return this; } - public Timestamp getTransformLastStateTs() { - return lastStateTs; + public SystemTransformBuilder setTransformRetryCount(int transformRetryCount) { + this.transformRetryCount = transformRetryCount; + return this; } - public byte[] getOldMetadata() { - return oldMetadata; + public SystemTransformBuilder setStartTs(Timestamp startTs) { + this.startTs = startTs; + return this; } - public String getNewMetadata() { - return newMetadata; + + public SystemTransformBuilder setLastStateTs(Timestamp ts) { + this.lastStateTs = ts; + return this; + } + + public SystemTransformBuilder setTransformFunction(String transformFunction) { + this.transformFunction = transformFunction; + return this; } - public String getTransformFunction() { return transformFunction; } - public boolean isActive() { - return (transformStatus.equals(PTable.TransformStatus.STARTED.name()) - || transformStatus.equals(PTable.TransformStatus.CREATED.name()) - || transformStatus.equals(PTable.TransformStatus.PENDING_CUTOVER.name())); + public SystemTransformRecord build() { + Timestamp lastTs = lastStateTs; + if ( + lastTs == null && transformStatus != null + && transformStatus.equals(PTable.TaskStatus.COMPLETED.toString()) + ) { + lastTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); + } + return new SystemTransformRecord(transformType, schemaName, logicalTableName, tenantId, + newPhysicalTableName, logicalParentName, transformStatus, transformJobId, + transformRetryCount, startTs, lastTs, oldMetadata, newMetadata, transformFunction); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = {"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}, - justification = "lastStateTs and startTs are not used for mutation") - public static class SystemTransformBuilder { - - private PTable.TransformType transformType = PTable.TransformType.METADATA_TRANSFORM; - private String schemaName; - private String tenantId; - private String logicalTableName; - private String logicalParentName; - private String newPhysicalTableName; - private String transformStatus = PTable.TransformStatus.CREATED.name(); - private String transformJobId; - private int transformRetryCount =0; - private Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); - private Timestamp lastStateTs; - private byte[] oldMetadata; - private String newMetadata; - private String transformFunction; - - public SystemTransformBuilder() { - - } - - public SystemTransformBuilder(SystemTransformRecord systemTransformRecord) { - this.setTransformType(systemTransformRecord.getTransformType()); - this.setTenantId(systemTransformRecord.getTenantId()); - this.setSchemaName(systemTransformRecord.getSchemaName()); - this.setLogicalTableName(systemTransformRecord.getLogicalTableName()); - this.setNewPhysicalTableName(systemTransformRecord.getNewPhysicalTableName()); - this.setLogicalParentName(systemTransformRecord.getLogicalParentName()); - this.setTransformStatus(systemTransformRecord.getTransformStatus()); - this.setTransformJobId(systemTransformRecord.getTransformJobId()); - this.setTransformRetryCount(systemTransformRecord.getTransformRetryCount()); - this.setStartTs(systemTransformRecord.getTransformStartTs()); - this.setLastStateTs(systemTransformRecord.getTransformLastStateTs()); - this.setOldMetadata(systemTransformRecord.getOldMetadata()); - this.setNewMetadata(systemTransformRecord.getNewMetadata()); - this.setTransformFunction(systemTransformRecord.getTransformFunction()); - } - - public SystemTransformBuilder setTransformType(PTable.TransformType transformType) { - this.transformType = transformType; - return this; - } - - public SystemTransformBuilder setSchemaName(String schemaName) { - this.schemaName = schemaName; - return this; - } - - public SystemTransformBuilder setLogicalTableName(String tableName) { - this.logicalTableName = tableName; - return this; - } - - public SystemTransformBuilder setTenantId(String tenant) { - this.tenantId = tenant; - return this; - } - - public SystemTransformBuilder setLogicalParentName(String name) { - this.logicalParentName = name; - return this; - } - - public SystemTransformBuilder setNewPhysicalTableName(String tableName) { - this.newPhysicalTableName = tableName; - return this; - } - - public SystemTransformBuilder setTransformStatus(String transformStatus) { - this.transformStatus = transformStatus; - return this; - } - - public SystemTransformBuilder setTransformJobId(String transformJobId) { - this.transformJobId = transformJobId; - return this; - } - - public SystemTransformBuilder setOldMetadata(byte[] oldMetadata) { - this.oldMetadata = oldMetadata; - return this; - } - - public SystemTransformBuilder setNewMetadata(String newMetadata) { - this.newMetadata = newMetadata; - return this; - } - - public SystemTransformBuilder setTransformRetryCount(int transformRetryCount) { - this.transformRetryCount = transformRetryCount; - return this; - } - - public SystemTransformBuilder setStartTs(Timestamp startTs) { - this.startTs = startTs; - return this; - } - - public SystemTransformBuilder setLastStateTs(Timestamp ts) { - this.lastStateTs = ts; - return this; - } - - public SystemTransformBuilder setTransformFunction(String transformFunction) { - this.transformFunction = transformFunction; - return this; - } - - public SystemTransformRecord build() { - Timestamp lastTs = lastStateTs; - if (lastTs == null && transformStatus != null && transformStatus.equals(PTable.TaskStatus.COMPLETED.toString())) { - lastTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); - } - return new SystemTransformRecord(transformType, schemaName, - logicalTableName, tenantId, newPhysicalTableName, logicalParentName, transformStatus, transformJobId, transformRetryCount, startTs, lastTs, - oldMetadata, newMetadata, transformFunction); - } - - public static SystemTransformRecord build(ResultSet resultSet) throws SQLException { - int col = 1; - SystemTransformBuilder builder = new SystemTransformBuilder(); - builder.setTenantId(resultSet.getString(col++)); - builder.setSchemaName(resultSet.getString(col++)); - builder.setLogicalTableName(resultSet.getString(col++)); - builder.setNewPhysicalTableName(resultSet.getString(col++)); - builder.setTransformType(PTable.TransformType.fromSerializedValue(resultSet.getByte(col++))); - builder.setLogicalParentName(resultSet.getString(col++)); - builder.setTransformStatus(resultSet.getString(col++)); - builder.setTransformJobId(resultSet.getString(col++)); - builder.setTransformRetryCount(resultSet.getInt(col++)); - builder.setStartTs(resultSet.getTimestamp(col++)); - builder.setLastStateTs(resultSet.getTimestamp(col++)); - builder.setOldMetadata(resultSet.getBytes(col++)); - builder.setNewMetadata(resultSet.getString(col++)); - builder.setTransformFunction(resultSet.getString(col++)); - - return builder.build(); - } + public static SystemTransformRecord build(ResultSet resultSet) throws SQLException { + int col = 1; + SystemTransformBuilder builder = new SystemTransformBuilder(); + builder.setTenantId(resultSet.getString(col++)); + builder.setSchemaName(resultSet.getString(col++)); + builder.setLogicalTableName(resultSet.getString(col++)); + builder.setNewPhysicalTableName(resultSet.getString(col++)); + builder.setTransformType(PTable.TransformType.fromSerializedValue(resultSet.getByte(col++))); + builder.setLogicalParentName(resultSet.getString(col++)); + builder.setTransformStatus(resultSet.getString(col++)); + builder.setTransformJobId(resultSet.getString(col++)); + builder.setTransformRetryCount(resultSet.getInt(col++)); + builder.setStartTs(resultSet.getTimestamp(col++)); + builder.setLastStateTs(resultSet.getTimestamp(col++)); + builder.setOldMetadata(resultSet.getBytes(col++)); + builder.setNewMetadata(resultSet.getString(col++)); + builder.setTransformFunction(resultSet.getString(col++)); + + return builder.build(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/TransformClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/TransformClient.java index 202040295b9..25f3001414c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/TransformClient.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/TransformClient.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,27 @@ */ package org.apache.phoenix.schema.transform; -import com.fasterxml.jackson.core.JsonProcessingException; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_TASK_TABLE; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_TRANSFORM_NAME; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSFORM_STATUS; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtilHelper.DEFAULT_TRANSFORM_MONITOR_ENABLED; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtilHelper.TRANSFORM_MONITOR_ENABLED; +import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME; +import static org.apache.phoenix.query.QueryServices.INDEX_CREATE_DEFAULT_STATE; +import static org.apache.phoenix.schema.MetaDataClient.CREATE_LINK; +import static org.apache.phoenix.schema.PTableType.INDEX; +import static org.apache.phoenix.schema.PTableType.VIEW; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.util.Bytes; @@ -50,382 +70,368 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.sql.Types; -import java.util.Collections; -import java.util.List; - -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_TASK_TABLE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_TRANSFORM_NAME; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSFORM_STATUS; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtilHelper.DEFAULT_TRANSFORM_MONITOR_ENABLED; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtilHelper.TRANSFORM_MONITOR_ENABLED; -import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME; -import static org.apache.phoenix.query.QueryServices.INDEX_CREATE_DEFAULT_STATE; -import static org.apache.phoenix.schema.MetaDataClient.CREATE_LINK; -import static org.apache.phoenix.schema.PTableType.INDEX; -import static org.apache.phoenix.schema.PTableType.VIEW; +import com.fasterxml.jackson.core.JsonProcessingException; public class TransformClient { - private static final Logger LOGGER = LoggerFactory.getLogger(TransformClient.class); - private static final String TRANSFORM_SELECT = "SELECT " + - PhoenixDatabaseMetaData.TENANT_ID + ", " + - PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + - PhoenixDatabaseMetaData.LOGICAL_TABLE_NAME + ", " + - PhoenixDatabaseMetaData.NEW_PHYS_TABLE_NAME + ", " + - PhoenixDatabaseMetaData.TRANSFORM_TYPE + ", " + - PhoenixDatabaseMetaData.LOGICAL_PARENT_NAME + ", " + - TRANSFORM_STATUS + ", " + - PhoenixDatabaseMetaData.TRANSFORM_JOB_ID + ", " + - PhoenixDatabaseMetaData.TRANSFORM_RETRY_COUNT + ", " + - PhoenixDatabaseMetaData.TRANSFORM_START_TS + ", " + - PhoenixDatabaseMetaData.TRANSFORM_LAST_STATE_TS + ", " + - PhoenixDatabaseMetaData.OLD_METADATA + " , " + - PhoenixDatabaseMetaData.NEW_METADATA + " , " + - PhoenixDatabaseMetaData.TRANSFORM_FUNCTION + - " FROM " + PhoenixDatabaseMetaData.SYSTEM_TRANSFORM_NAME; + private static final Logger LOGGER = LoggerFactory.getLogger(TransformClient.class); + private static final String TRANSFORM_SELECT = "SELECT " + PhoenixDatabaseMetaData.TENANT_ID + + ", " + PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + PhoenixDatabaseMetaData.LOGICAL_TABLE_NAME + + ", " + PhoenixDatabaseMetaData.NEW_PHYS_TABLE_NAME + ", " + + PhoenixDatabaseMetaData.TRANSFORM_TYPE + ", " + PhoenixDatabaseMetaData.LOGICAL_PARENT_NAME + + ", " + TRANSFORM_STATUS + ", " + PhoenixDatabaseMetaData.TRANSFORM_JOB_ID + ", " + + PhoenixDatabaseMetaData.TRANSFORM_RETRY_COUNT + ", " + + PhoenixDatabaseMetaData.TRANSFORM_START_TS + ", " + + PhoenixDatabaseMetaData.TRANSFORM_LAST_STATE_TS + ", " + PhoenixDatabaseMetaData.OLD_METADATA + + " , " + PhoenixDatabaseMetaData.NEW_METADATA + " , " + + PhoenixDatabaseMetaData.TRANSFORM_FUNCTION + " FROM " + + PhoenixDatabaseMetaData.SYSTEM_TRANSFORM_NAME; - public static SystemTransformRecord getTransformRecord( - PName schema, PName logicalTableName, PName logicalParentName, PName tenantId, PhoenixConnection connection) throws SQLException { - return getTransformRecordFromDB((schema==null?null:schema.getString()) - , (logicalTableName==null?null:logicalTableName.getString()) - , (logicalParentName==null?null:logicalParentName.getString()) - , (tenantId==null?null:tenantId.getString()), connection); - } + public static SystemTransformRecord getTransformRecord(PName schema, PName logicalTableName, + PName logicalParentName, PName tenantId, PhoenixConnection connection) throws SQLException { + return getTransformRecordFromDB((schema == null ? null : schema.getString()), + (logicalTableName == null ? null : logicalTableName.getString()), + (logicalParentName == null ? null : logicalParentName.getString()), + (tenantId == null ? null : tenantId.getString()), connection); + } - public static SystemTransformRecord getTransformRecord( - String schema, String logicalTableName, String logicalParentName, String tenantId, PhoenixConnection connection) throws SQLException { - return getTransformRecordFromDB(schema, logicalTableName, logicalParentName, tenantId, connection); - } + public static SystemTransformRecord getTransformRecord(String schema, String logicalTableName, + String logicalParentName, String tenantId, PhoenixConnection connection) throws SQLException { + return getTransformRecordFromDB(schema, logicalTableName, logicalParentName, tenantId, + connection); + } - public static SystemTransformRecord getTransformRecordFromDB( - String schema, String logicalTableName, String logicalParentName, String tenantId, PhoenixConnection connection) throws SQLException { - if (SYSTEM_TRANSFORM_NAME.equals(SchemaUtil.getTableName(schema, logicalTableName))) { - // Cannot query itself - return null; - } - String sql = TRANSFORM_SELECT + " WHERE " + - (Strings.isNullOrEmpty(tenantId) ? "" : (PhoenixDatabaseMetaData.TENANT_ID + " ='" + tenantId + "' AND ")) + - (Strings.isNullOrEmpty(schema) ? "" : (PhoenixDatabaseMetaData.TABLE_SCHEM + " ='" + schema + "' AND ")) + - PhoenixDatabaseMetaData.LOGICAL_TABLE_NAME + " ='" + logicalTableName + "'" + - (Strings.isNullOrEmpty(logicalParentName) ? "" : (" AND " + PhoenixDatabaseMetaData.LOGICAL_PARENT_NAME + "='" + logicalParentName + "'")); - try (ResultSet resultSet = ((PhoenixPreparedStatement) connection.prepareStatement( - sql)).executeQuery()) { - if (resultSet.next()) { - return SystemTransformRecord.SystemTransformBuilder.build(resultSet); - } - LOGGER.info("Could not find System.Transform record with " + sql); - return null; - } + public static SystemTransformRecord getTransformRecordFromDB(String schema, + String logicalTableName, String logicalParentName, String tenantId, + PhoenixConnection connection) throws SQLException { + if (SYSTEM_TRANSFORM_NAME.equals(SchemaUtil.getTableName(schema, logicalTableName))) { + // Cannot query itself + return null; } - - private static boolean isTransformNeeded(MetaDataClient.MetaProperties metaProperties, PTable table){ - if (metaProperties.getImmutableStorageSchemeProp()!=null - && metaProperties.getImmutableStorageSchemeProp() != table.getImmutableStorageScheme()) { - // Transform is needed - return true; - } - if (metaProperties.getColumnEncodedBytesProp()!=null - && metaProperties.getColumnEncodedBytesProp() != table.getEncodingScheme()) { - return true; - } - return false; + String sql = TRANSFORM_SELECT + " WHERE " + + (Strings.isNullOrEmpty(tenantId) + ? "" + : (PhoenixDatabaseMetaData.TENANT_ID + " ='" + tenantId + "' AND ")) + + (Strings.isNullOrEmpty(schema) + ? "" + : (PhoenixDatabaseMetaData.TABLE_SCHEM + " ='" + schema + "' AND ")) + + PhoenixDatabaseMetaData.LOGICAL_TABLE_NAME + " ='" + logicalTableName + "'" + + (Strings.isNullOrEmpty(logicalParentName) + ? "" + : (" AND " + PhoenixDatabaseMetaData.LOGICAL_PARENT_NAME + "='" + logicalParentName + "'")); + try (ResultSet resultSet = + ((PhoenixPreparedStatement) connection.prepareStatement(sql)).executeQuery()) { + if (resultSet.next()) { + return SystemTransformRecord.SystemTransformBuilder.build(resultSet); + } + LOGGER.info("Could not find System.Transform record with " + sql); + return null; } + } - public static boolean checkIsTransformNeeded(MetaDataClient.MetaProperties metaProperties, String schemaName, - PTable table, String logicalTableName, String parentTableName, - String tenantId, PhoenixConnection connection) throws SQLException { - boolean isTransformNeeded = isTransformNeeded(metaProperties, table); - if (isTransformNeeded) { - SystemTransformRecord existingTransform = getTransformRecord(schemaName, logicalTableName, parentTableName, tenantId,connection); - if (existingTransform != null && existingTransform.isActive()) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.CANNOT_TRANSFORM_ALREADY_TRANSFORMING_TABLE) - .setMessage(" Only one transform at a time is allowed ") - .setSchemaName(schemaName).setTableName(logicalTableName).build().buildException(); - } - } - return isTransformNeeded; + private static boolean isTransformNeeded(MetaDataClient.MetaProperties metaProperties, + PTable table) { + if ( + metaProperties.getImmutableStorageSchemeProp() != null + && metaProperties.getImmutableStorageSchemeProp() != table.getImmutableStorageScheme() + ) { + // Transform is needed + return true; + } + if ( + metaProperties.getColumnEncodedBytesProp() != null + && metaProperties.getColumnEncodedBytesProp() != table.getEncodingScheme() + ) { + return true; } + return false; + } - protected static SystemTransformRecord getTransformRecord(PhoenixConnection connection, PTableType tableType, PName schemaName, - PName tableName, PName dataTableName, PName tenantId, - PName parentLogicalName) throws SQLException { + public static boolean checkIsTransformNeeded(MetaDataClient.MetaProperties metaProperties, + String schemaName, PTable table, String logicalTableName, String parentTableName, + String tenantId, PhoenixConnection connection) throws SQLException { + boolean isTransformNeeded = isTransformNeeded(metaProperties, table); + if (isTransformNeeded) { + SystemTransformRecord existingTransform = + getTransformRecord(schemaName, logicalTableName, parentTableName, tenantId, connection); + if (existingTransform != null && existingTransform.isActive()) { + throw new SQLExceptionInfo.Builder( + SQLExceptionCode.CANNOT_TRANSFORM_ALREADY_TRANSFORMING_TABLE) + .setMessage(" Only one transform at a time is allowed ").setSchemaName(schemaName) + .setTableName(logicalTableName).build().buildException(); + } + } + return isTransformNeeded; + } - if (tableType == PTableType.TABLE) { - return getTransformRecord(schemaName, tableName, null, tenantId, connection); + protected static SystemTransformRecord getTransformRecord(PhoenixConnection connection, + PTableType tableType, PName schemaName, PName tableName, PName dataTableName, PName tenantId, + PName parentLogicalName) throws SQLException { - } else if (tableType == INDEX) { - return getTransformRecord(schemaName, tableName, dataTableName, tenantId, connection); - } else if (tableType == VIEW) { - if (parentLogicalName == null) { - LOGGER.warn("View doesn't seem to have a parent"); - return null; - } - return getTransformRecord(SchemaUtil.getSchemaNameFromFullName(parentLogicalName.getString()), - SchemaUtil.getTableNameFromFullName(parentLogicalName.getString()), null, tenantId == null ? null : tenantId.getString(), connection); - } + if (tableType == PTableType.TABLE) { + return getTransformRecord(schemaName, tableName, null, tenantId, connection); + } else if (tableType == INDEX) { + return getTransformRecord(schemaName, tableName, dataTableName, tenantId, connection); + } else if (tableType == VIEW) { + if (parentLogicalName == null) { + LOGGER.warn("View doesn't seem to have a parent"); return null; + } + return getTransformRecord(SchemaUtil.getSchemaNameFromFullName(parentLogicalName.getString()), + SchemaUtil.getTableNameFromFullName(parentLogicalName.getString()), null, + tenantId == null ? null : tenantId.getString(), connection); } - private static String generateNewTableName(String schema, String logicalTableName, long seqNum) { - // TODO: Support schema versioning as well. - String newName = String.format("%s_%d", SchemaUtil.getTableName(schema, logicalTableName), seqNum); - return newName; - } + return null; + } + + private static String generateNewTableName(String schema, String logicalTableName, long seqNum) { + // TODO: Support schema versioning as well. + String newName = + String.format("%s_%d", SchemaUtil.getTableName(schema, logicalTableName), seqNum); + return newName; + } - public static PTable addTransform(PhoenixConnection connection, String tenantId, PTable table, MetaDataClient.MetaProperties changingProperties, - long sequenceNum, PTable.TransformType transformType) throws SQLException { - try { - String newMetadata = JacksonUtil.getObjectWriter().writeValueAsString(changingProperties); - byte[] oldMetadata = PTableImpl.toProto(table).toByteArray(); - String newPhysicalTableName = ""; - SystemTransformRecord.SystemTransformBuilder transformBuilder = new SystemTransformRecord.SystemTransformBuilder(); - String schema = table.getSchemaName()!=null ? table.getSchemaName().getString() : null; - String logicalTableName = table.getTableName().getString(); - transformBuilder.setSchemaName(schema); - transformBuilder.setLogicalTableName(logicalTableName); - transformBuilder.setTenantId(tenantId); - if (table.getType() == INDEX) { - transformBuilder.setLogicalParentName(table.getParentName().getString()); - } - // TODO: add more ways of finding out what transform type this is - transformBuilder.setTransformType(transformType); - // TODO: calculate old and new metadata - transformBuilder.setNewMetadata(newMetadata); - transformBuilder.setOldMetadata(oldMetadata); - PIndexState defaultCreateState = PIndexState.valueOf(connection.getQueryServices().getConfiguration(). - get(INDEX_CREATE_DEFAULT_STATE, QueryServicesOptions.DEFAULT_CREATE_INDEX_STATE)); - if (defaultCreateState == PIndexState.CREATE_DISABLE) { - // Create a paused transform. This can be enabled later by calling TransformTool resume - transformBuilder.setTransformStatus(PTable.TransformStatus.PAUSED.name()); - } - if (Strings.isNullOrEmpty(newPhysicalTableName)) { - newPhysicalTableName = generateNewTableName(schema, logicalTableName, sequenceNum); - } - transformBuilder.setNewPhysicalTableName(newPhysicalTableName); - return addTransform(table, changingProperties, transformBuilder.build(), sequenceNum, connection); - } catch (JsonProcessingException ex) { - LOGGER.error("addTransform failed", ex); - throw new SQLException("Adding transform failed with JsonProcessingException"); - } catch (SQLException ex) { - throw ex; - } catch(Exception ex) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.valueOf("CANNOT_MUTATE_TABLE")) - .setSchemaName((table.getSchemaName() == null? null: table.getSchemaName().getString())) - .setRootCause(ex) - .setTableName(table.getName().getString()).build().buildException(); - } + public static PTable addTransform(PhoenixConnection connection, String tenantId, PTable table, + MetaDataClient.MetaProperties changingProperties, long sequenceNum, + PTable.TransformType transformType) throws SQLException { + try { + String newMetadata = JacksonUtil.getObjectWriter().writeValueAsString(changingProperties); + byte[] oldMetadata = PTableImpl.toProto(table).toByteArray(); + String newPhysicalTableName = ""; + SystemTransformRecord.SystemTransformBuilder transformBuilder = + new SystemTransformRecord.SystemTransformBuilder(); + String schema = table.getSchemaName() != null ? table.getSchemaName().getString() : null; + String logicalTableName = table.getTableName().getString(); + transformBuilder.setSchemaName(schema); + transformBuilder.setLogicalTableName(logicalTableName); + transformBuilder.setTenantId(tenantId); + if (table.getType() == INDEX) { + transformBuilder.setLogicalParentName(table.getParentName().getString()); + } + // TODO: add more ways of finding out what transform type this is + transformBuilder.setTransformType(transformType); + // TODO: calculate old and new metadata + transformBuilder.setNewMetadata(newMetadata); + transformBuilder.setOldMetadata(oldMetadata); + PIndexState defaultCreateState = + PIndexState.valueOf(connection.getQueryServices().getConfiguration() + .get(INDEX_CREATE_DEFAULT_STATE, QueryServicesOptions.DEFAULT_CREATE_INDEX_STATE)); + if (defaultCreateState == PIndexState.CREATE_DISABLE) { + // Create a paused transform. This can be enabled later by calling TransformTool resume + transformBuilder.setTransformStatus(PTable.TransformStatus.PAUSED.name()); + } + if (Strings.isNullOrEmpty(newPhysicalTableName)) { + newPhysicalTableName = generateNewTableName(schema, logicalTableName, sequenceNum); + } + transformBuilder.setNewPhysicalTableName(newPhysicalTableName); + return addTransform(table, changingProperties, transformBuilder.build(), sequenceNum, + connection); + } catch (JsonProcessingException ex) { + LOGGER.error("addTransform failed", ex); + throw new SQLException("Adding transform failed with JsonProcessingException"); + } catch (SQLException ex) { + throw ex; + } catch (Exception ex) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.valueOf("CANNOT_MUTATE_TABLE")) + .setSchemaName((table.getSchemaName() == null ? null : table.getSchemaName().getString())) + .setRootCause(ex).setTableName(table.getName().getString()).build().buildException(); } + } - protected static PTable addTransform( - PTable table, MetaDataClient.MetaProperties changedProps, SystemTransformRecord systemTransformParams, - long sequenceNum, PhoenixConnection connection) throws Exception { - PName newTableName = PNameFactory.newName(systemTransformParams.getNewPhysicalTableName()); - PName newTableNameWithoutSchema = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(systemTransformParams.getNewPhysicalTableName())); - PIndexState defaultCreateState = PIndexState.valueOf(connection.getQueryServices().getConfiguration(). - get(INDEX_CREATE_DEFAULT_STATE, QueryServicesOptions.DEFAULT_CREATE_INDEX_STATE)); - PTable newTable = new PTableImpl.Builder() - .setTableName(newTableNameWithoutSchema) - .setParentTableName(table.getParentTableName()) - .setBaseTableLogicalName(table.getBaseTableLogicalName()) - .setPhysicalTableName(newTableNameWithoutSchema) - .setState(defaultCreateState) - .setAllColumns(table.getColumns()) - .setAppendOnlySchema(table.isAppendOnlySchema()) - .setAutoPartitionSeqName(table.getAutoPartitionSeqName()) - .setBaseColumnCount(table.getBaseColumnCount()) - .setBucketNum(table.getBucketNum()) - .setDefaultFamilyName(table.getDefaultFamilyName()) - .setDisableWAL(table.isWALDisabled()) - .setEstimatedSize(table.getEstimatedSize()) - .setFamilies(table.getColumnFamilies()) - .setImmutableRows(table.isImmutableRows()) - .setIsChangeDetectionEnabled(table.isChangeDetectionEnabled()) - .setIndexType(table.getIndexType()) - .setIndexes(Collections.emptyList()) - .setName(newTableName) - .setMultiTenant(table.isMultiTenant()) - .setParentName(table.getParentName()) - .setParentSchemaName(table.getParentSchemaName()) - .setNamespaceMapped(table.isNamespaceMapped()) - .setSchemaName(table.getSchemaName()) - .setPkColumns(table.getPKColumns()) - .setPkName(table.getPKName()) - .setRowKeySchema(table.getRowKeySchema()) - .setStoreNulls(table.getStoreNulls()) - .setTenantId(table.getTenantId()) - .setType(table.getType()) - // SchemaExtractor uses physical name to get the table descriptor from. So we use the existing table here - .setPhysicalNames(ImmutableList.copyOf(table.getPhysicalNames())) - .setUpdateCacheFrequency(table.getUpdateCacheFrequency()) - .setTransactionProvider(table.getTransactionProvider()) - .setUseStatsForParallelization(table.useStatsForParallelization()) - .setSchemaVersion(table.getSchemaVersion()) - .setIsChangeDetectionEnabled(table.isChangeDetectionEnabled()) - .setStreamingTopicName(table.getStreamingTopicName()) - .setMaxLookbackAge(table.getMaxLookbackAge()) - // Transformables - .setImmutableStorageScheme( - (changedProps.getImmutableStorageSchemeProp() != null? changedProps.getImmutableStorageSchemeProp():table.getImmutableStorageScheme())) - .setQualifierEncodingScheme( - (changedProps.getColumnEncodedBytesProp() != null? changedProps.getColumnEncodedBytesProp() : table.getEncodingScheme())) - .build(); - SchemaExtractionProcessor schemaExtractionProcessor = new SchemaExtractionProcessor(systemTransformParams.getTenantId(), - connection.getQueryServices().getConfiguration(), newTable, true); - String ddl = schemaExtractionProcessor.process(); - LOGGER.info("Creating transforming table via " + ddl); - connection.createStatement().execute(ddl); - upsertTransform(systemTransformParams, connection); + protected static PTable addTransform(PTable table, MetaDataClient.MetaProperties changedProps, + SystemTransformRecord systemTransformParams, long sequenceNum, PhoenixConnection connection) + throws Exception { + PName newTableName = PNameFactory.newName(systemTransformParams.getNewPhysicalTableName()); + PName newTableNameWithoutSchema = PNameFactory.newName( + SchemaUtil.getTableNameFromFullName(systemTransformParams.getNewPhysicalTableName())); + PIndexState defaultCreateState = + PIndexState.valueOf(connection.getQueryServices().getConfiguration() + .get(INDEX_CREATE_DEFAULT_STATE, QueryServicesOptions.DEFAULT_CREATE_INDEX_STATE)); + PTable newTable = new PTableImpl.Builder().setTableName(newTableNameWithoutSchema) + .setParentTableName(table.getParentTableName()) + .setBaseTableLogicalName(table.getBaseTableLogicalName()) + .setPhysicalTableName(newTableNameWithoutSchema).setState(defaultCreateState) + .setAllColumns(table.getColumns()).setAppendOnlySchema(table.isAppendOnlySchema()) + .setAutoPartitionSeqName(table.getAutoPartitionSeqName()) + .setBaseColumnCount(table.getBaseColumnCount()).setBucketNum(table.getBucketNum()) + .setDefaultFamilyName(table.getDefaultFamilyName()).setDisableWAL(table.isWALDisabled()) + .setEstimatedSize(table.getEstimatedSize()).setFamilies(table.getColumnFamilies()) + .setImmutableRows(table.isImmutableRows()) + .setIsChangeDetectionEnabled(table.isChangeDetectionEnabled()) + .setIndexType(table.getIndexType()).setIndexes(Collections. emptyList()) + .setName(newTableName).setMultiTenant(table.isMultiTenant()) + .setParentName(table.getParentName()).setParentSchemaName(table.getParentSchemaName()) + .setNamespaceMapped(table.isNamespaceMapped()).setSchemaName(table.getSchemaName()) + .setPkColumns(table.getPKColumns()).setPkName(table.getPKName()) + .setRowKeySchema(table.getRowKeySchema()).setStoreNulls(table.getStoreNulls()) + .setTenantId(table.getTenantId()).setType(table.getType()) + // SchemaExtractor uses physical name to get the table descriptor from. So we use the existing + // table here + .setPhysicalNames(ImmutableList.copyOf(table.getPhysicalNames())) + .setUpdateCacheFrequency(table.getUpdateCacheFrequency()) + .setTransactionProvider(table.getTransactionProvider()) + .setUseStatsForParallelization(table.useStatsForParallelization()) + .setSchemaVersion(table.getSchemaVersion()) + .setIsChangeDetectionEnabled(table.isChangeDetectionEnabled()) + .setStreamingTopicName(table.getStreamingTopicName()) + .setMaxLookbackAge(table.getMaxLookbackAge()) + // Transformables + .setImmutableStorageScheme((changedProps.getImmutableStorageSchemeProp() != null + ? changedProps.getImmutableStorageSchemeProp() + : table.getImmutableStorageScheme())) + .setQualifierEncodingScheme((changedProps.getColumnEncodedBytesProp() != null + ? changedProps.getColumnEncodedBytesProp() + : table.getEncodingScheme())) + .build(); + SchemaExtractionProcessor schemaExtractionProcessor = + new SchemaExtractionProcessor(systemTransformParams.getTenantId(), + connection.getQueryServices().getConfiguration(), newTable, true); + String ddl = schemaExtractionProcessor.process(); + LOGGER.info("Creating transforming table via " + ddl); + connection.createStatement().execute(ddl); + upsertTransform(systemTransformParams, connection); - // Add row linking from old table row to new table row - addTransformTableLink(connection, systemTransformParams.getTenantId(), systemTransformParams.getSchemaName(), - systemTransformParams.getLogicalTableName(), newTableName, sequenceNum); + // Add row linking from old table row to new table row + addTransformTableLink(connection, systemTransformParams.getTenantId(), + systemTransformParams.getSchemaName(), systemTransformParams.getLogicalTableName(), + newTableName, sequenceNum); - // Also add the transforming new table link to views - TableViewFinderResult childViewsResult = ViewUtil.findChildViews(connection, systemTransformParams.getTenantId() - , systemTransformParams.getSchemaName(), systemTransformParams.getLogicalTableName()); - for (TableInfo view : childViewsResult.getLinks()) { - addTransformTableLink(connection, view.getTenantId()==null? null: Bytes.toString(view.getTenantId()), - (view.getSchemaName()==null? null: Bytes.toString(view.getSchemaName())), Bytes.toString(view.getTableName()) - , newTableName, sequenceNum); - } + // Also add the transforming new table link to views + TableViewFinderResult childViewsResult = + ViewUtil.findChildViews(connection, systemTransformParams.getTenantId(), + systemTransformParams.getSchemaName(), systemTransformParams.getLogicalTableName()); + for (TableInfo view : childViewsResult.getLinks()) { + addTransformTableLink(connection, + view.getTenantId() == null ? null : Bytes.toString(view.getTenantId()), + (view.getSchemaName() == null ? null : Bytes.toString(view.getSchemaName())), + Bytes.toString(view.getTableName()), newTableName, sequenceNum); + } - if (defaultCreateState != PIndexState.CREATE_DISABLE) { - // add a monitoring task - addTransformMonitorTask(connection, connection.getQueryServices().getConfiguration(), systemTransformParams, - PTable.TaskStatus.CREATED, new Timestamp(EnvironmentEdgeManager.currentTimeMillis()), null); - } else { - LOGGER.info("Transform will not be monitored until it is resumed again."); - } - return newTable; + if (defaultCreateState != PIndexState.CREATE_DISABLE) { + // add a monitoring task + addTransformMonitorTask(connection, connection.getQueryServices().getConfiguration(), + systemTransformParams, PTable.TaskStatus.CREATED, + new Timestamp(EnvironmentEdgeManager.currentTimeMillis()), null); + } else { + LOGGER.info("Transform will not be monitored until it is resumed again."); } + return newTable; + } - public static void upsertTransform( - SystemTransformRecord systemTransformParams, PhoenixConnection connection) throws SQLException { - try (PreparedStatement stmt = connection.prepareStatement("UPSERT INTO " + - PhoenixDatabaseMetaData.SYSTEM_TRANSFORM_NAME + " ( " + - PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + - PhoenixDatabaseMetaData.LOGICAL_TABLE_NAME + ", " + - PhoenixDatabaseMetaData.TENANT_ID + "," + - PhoenixDatabaseMetaData.NEW_PHYS_TABLE_NAME + ", " + - PhoenixDatabaseMetaData.TRANSFORM_TYPE + ", " + - PhoenixDatabaseMetaData.LOGICAL_PARENT_NAME + ", " + - TRANSFORM_STATUS + ", " + - PhoenixDatabaseMetaData.TRANSFORM_JOB_ID + ", " + - PhoenixDatabaseMetaData.TRANSFORM_RETRY_COUNT + ", " + - PhoenixDatabaseMetaData.TRANSFORM_START_TS + ", " + - PhoenixDatabaseMetaData.TRANSFORM_LAST_STATE_TS + ", " + - PhoenixDatabaseMetaData.OLD_METADATA + " , " + - PhoenixDatabaseMetaData.NEW_METADATA + " , " + - PhoenixDatabaseMetaData.TRANSFORM_FUNCTION + - " ) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)")) { - int colNum = 1; - if (systemTransformParams.getSchemaName() != null) { - stmt.setString(colNum++, systemTransformParams.getSchemaName()); - } else { - stmt.setNull(colNum++, Types.VARCHAR); - } - stmt.setString(colNum++, systemTransformParams.getLogicalTableName()); - if (systemTransformParams.getTenantId() != null) { - stmt.setString(colNum++, systemTransformParams.getTenantId()); - } else { - stmt.setNull(colNum++, Types.VARCHAR); - } - stmt.setString(colNum++, systemTransformParams.getNewPhysicalTableName()); + public static void upsertTransform(SystemTransformRecord systemTransformParams, + PhoenixConnection connection) throws SQLException { + try (PreparedStatement stmt = connection.prepareStatement("UPSERT INTO " + + PhoenixDatabaseMetaData.SYSTEM_TRANSFORM_NAME + " ( " + PhoenixDatabaseMetaData.TABLE_SCHEM + + ", " + PhoenixDatabaseMetaData.LOGICAL_TABLE_NAME + ", " + PhoenixDatabaseMetaData.TENANT_ID + + "," + PhoenixDatabaseMetaData.NEW_PHYS_TABLE_NAME + ", " + + PhoenixDatabaseMetaData.TRANSFORM_TYPE + ", " + PhoenixDatabaseMetaData.LOGICAL_PARENT_NAME + + ", " + TRANSFORM_STATUS + ", " + PhoenixDatabaseMetaData.TRANSFORM_JOB_ID + ", " + + PhoenixDatabaseMetaData.TRANSFORM_RETRY_COUNT + ", " + + PhoenixDatabaseMetaData.TRANSFORM_START_TS + ", " + + PhoenixDatabaseMetaData.TRANSFORM_LAST_STATE_TS + ", " + + PhoenixDatabaseMetaData.OLD_METADATA + " , " + PhoenixDatabaseMetaData.NEW_METADATA + " , " + + PhoenixDatabaseMetaData.TRANSFORM_FUNCTION + " ) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)")) { + int colNum = 1; + if (systemTransformParams.getSchemaName() != null) { + stmt.setString(colNum++, systemTransformParams.getSchemaName()); + } else { + stmt.setNull(colNum++, Types.VARCHAR); + } + stmt.setString(colNum++, systemTransformParams.getLogicalTableName()); + if (systemTransformParams.getTenantId() != null) { + stmt.setString(colNum++, systemTransformParams.getTenantId()); + } else { + stmt.setNull(colNum++, Types.VARCHAR); + } + stmt.setString(colNum++, systemTransformParams.getNewPhysicalTableName()); - stmt.setInt(colNum++, systemTransformParams.getTransformType().getSerializedValue()); - if (systemTransformParams.getLogicalParentName() != null) { - stmt.setString(colNum++, systemTransformParams.getLogicalParentName()); - } else { - stmt.setNull(colNum++, Types.VARCHAR); - } + stmt.setInt(colNum++, systemTransformParams.getTransformType().getSerializedValue()); + if (systemTransformParams.getLogicalParentName() != null) { + stmt.setString(colNum++, systemTransformParams.getLogicalParentName()); + } else { + stmt.setNull(colNum++, Types.VARCHAR); + } - stmt.setString(colNum++, systemTransformParams.getTransformStatus()); + stmt.setString(colNum++, systemTransformParams.getTransformStatus()); - if (systemTransformParams.getTransformJobId() != null) { - stmt.setString(colNum++, systemTransformParams.getTransformJobId()); - } else { - stmt.setNull(colNum++, Types.VARCHAR); - } - stmt.setInt(colNum++, systemTransformParams.getTransformRetryCount()); + if (systemTransformParams.getTransformJobId() != null) { + stmt.setString(colNum++, systemTransformParams.getTransformJobId()); + } else { + stmt.setNull(colNum++, Types.VARCHAR); + } + stmt.setInt(colNum++, systemTransformParams.getTransformRetryCount()); - stmt.setTimestamp(colNum++, systemTransformParams.getTransformStartTs()); + stmt.setTimestamp(colNum++, systemTransformParams.getTransformStartTs()); - if (systemTransformParams.getTransformLastStateTs() != null) { - stmt.setTimestamp(colNum++, systemTransformParams.getTransformLastStateTs()); - } else { - stmt.setNull(colNum++, Types.TIMESTAMP); - } - if (systemTransformParams.getOldMetadata() != null) { - stmt.setBytes(colNum++, systemTransformParams.getOldMetadata()); - } else { - stmt.setNull(colNum++, Types.VARBINARY); - } - if (systemTransformParams.getNewMetadata() != null) { - stmt.setString(colNum++, systemTransformParams.getNewMetadata()); - } else { - stmt.setNull(colNum++, Types.VARCHAR); - } - if (systemTransformParams.getTransformFunction() != null) { - stmt.setString(colNum++, systemTransformParams.getTransformFunction()); - } else { - stmt.setNull(colNum++, Types.VARCHAR); - } - LOGGER.info("Adding transform type: " - + systemTransformParams.getString()); - stmt.execute(); - } + if (systemTransformParams.getTransformLastStateTs() != null) { + stmt.setTimestamp(colNum++, systemTransformParams.getTransformLastStateTs()); + } else { + stmt.setNull(colNum++, Types.TIMESTAMP); + } + if (systemTransformParams.getOldMetadata() != null) { + stmt.setBytes(colNum++, systemTransformParams.getOldMetadata()); + } else { + stmt.setNull(colNum++, Types.VARBINARY); + } + if (systemTransformParams.getNewMetadata() != null) { + stmt.setString(colNum++, systemTransformParams.getNewMetadata()); + } else { + stmt.setNull(colNum++, Types.VARCHAR); + } + if (systemTransformParams.getTransformFunction() != null) { + stmt.setString(colNum++, systemTransformParams.getTransformFunction()); + } else { + stmt.setNull(colNum++, Types.VARCHAR); + } + LOGGER.info("Adding transform type: " + systemTransformParams.getString()); + stmt.execute(); } + } - private static void addTransformTableLink(Connection connection, String tenantId, String schemaName, String tableName, - PName newTableName, long sequenceNum) throws SQLException { - PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK); - linkStatement.setString(1, tenantId); - linkStatement.setString(2, schemaName); - linkStatement.setString(3,tableName); - linkStatement.setString(4, newTableName.getString()); - linkStatement.setByte(5, PTable.LinkType.TRANSFORMING_NEW_TABLE.getSerializedValue()); - linkStatement.setLong(6, sequenceNum); - linkStatement.setString(7, PTableType.TABLE.getSerializedValue()); - linkStatement.execute(); - } + private static void addTransformTableLink(Connection connection, String tenantId, + String schemaName, String tableName, PName newTableName, long sequenceNum) throws SQLException { + PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK); + linkStatement.setString(1, tenantId); + linkStatement.setString(2, schemaName); + linkStatement.setString(3, tableName); + linkStatement.setString(4, newTableName.getString()); + linkStatement.setByte(5, PTable.LinkType.TRANSFORMING_NEW_TABLE.getSerializedValue()); + linkStatement.setLong(6, sequenceNum); + linkStatement.setString(7, PTableType.TABLE.getSerializedValue()); + linkStatement.execute(); + } - public static void addTransformMonitorTask(PhoenixConnection connection, Configuration configuration, SystemTransformRecord systemTransformRecord, - PTable.TaskStatus taskStatus, Timestamp startTimestamp, Timestamp endTimestamp) throws IOException, SQLException { - boolean transformMonitorEnabled = configuration.getBoolean(TRANSFORM_MONITOR_ENABLED, DEFAULT_TRANSFORM_MONITOR_ENABLED); - if (!transformMonitorEnabled) { - LOGGER.warn("TransformMonitor is not enabled. Monitoring/retrying TransformTool and doing cutover will not be done automatically"); - return; - } + public static void addTransformMonitorTask(PhoenixConnection connection, + Configuration configuration, SystemTransformRecord systemTransformRecord, + PTable.TaskStatus taskStatus, Timestamp startTimestamp, Timestamp endTimestamp) + throws IOException, SQLException { + boolean transformMonitorEnabled = + configuration.getBoolean(TRANSFORM_MONITOR_ENABLED, DEFAULT_TRANSFORM_MONITOR_ENABLED); + if (!transformMonitorEnabled) { + LOGGER.warn( + "TransformMonitor is not enabled. Monitoring/retrying TransformTool and doing cutover will not be done automatically"); + return; + } - List sysTaskUpsertMutations = Task.getMutationsForAddTask(new SystemTaskParams.SystemTaskParamsBuilder() - .setConn(connection) - .setTaskType(PTable.TaskType.TRANSFORM_MONITOR) - .setTenantId(systemTransformRecord.getTenantId()) - .setSchemaName(systemTransformRecord.getSchemaName()) - .setTableName(systemTransformRecord.getLogicalTableName()) - .setTaskStatus(taskStatus.toString()) - .setStartTs(startTimestamp) - .setEndTs(endTimestamp) - .setAccessCheckEnabled(true) - .build()); - byte[] rowKey = sysTaskUpsertMutations - .get(0).getRow(); - MetaDataProtocol.MetaDataMutationResult metaDataMutationResult = - Task.taskMetaDataCoprocessorExec(connection, rowKey, - new TaskMetaDataServiceCallBack(sysTaskUpsertMutations)); - if (MetaDataProtocol.MutationCode.UNABLE_TO_UPSERT_TASK.equals( - metaDataMutationResult.getMutationCode())) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_UPSERT_TASK) - .setSchemaName(SYSTEM_SCHEMA_NAME) - .setTableName(SYSTEM_TASK_TABLE).build().buildException(); - } + List sysTaskUpsertMutations = + Task.getMutationsForAddTask(new SystemTaskParams.SystemTaskParamsBuilder().setConn(connection) + .setTaskType(PTable.TaskType.TRANSFORM_MONITOR) + .setTenantId(systemTransformRecord.getTenantId()) + .setSchemaName(systemTransformRecord.getSchemaName()) + .setTableName(systemTransformRecord.getLogicalTableName()) + .setTaskStatus(taskStatus.toString()).setStartTs(startTimestamp).setEndTs(endTimestamp) + .setAccessCheckEnabled(true).build()); + byte[] rowKey = sysTaskUpsertMutations.get(0).getRow(); + MetaDataProtocol.MetaDataMutationResult metaDataMutationResult = + Task.taskMetaDataCoprocessorExec(connection, rowKey, + new TaskMetaDataServiceCallBack(sysTaskUpsertMutations)); + if ( + MetaDataProtocol.MutationCode.UNABLE_TO_UPSERT_TASK + .equals(metaDataMutationResult.getMutationCode()) + ) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_UPSERT_TASK) + .setSchemaName(SYSTEM_SCHEMA_NAME).setTableName(SYSTEM_TASK_TABLE).build().buildException(); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/TransformMaintainer.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/TransformMaintainer.java index 4a20afea1f3..2f7d5834c63 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/TransformMaintainer.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/transform/TransformMaintainer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +17,22 @@ */ package org.apache.phoenix.schema.transform; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; -import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.ByteStringer; @@ -30,7 +41,6 @@ import org.apache.phoenix.coprocessor.generated.ServerCachingProtos; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.ExpressionType; - import org.apache.phoenix.hbase.index.ValueGetter; import org.apache.phoenix.hbase.index.covered.update.ColumnReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; @@ -46,503 +56,551 @@ import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.RowKeySchema; import org.apache.phoenix.schema.SaltingUtil; - import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.TrustedByteArrayOutputStream; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.DataOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; - - public class TransformMaintainer extends IndexMaintainer { - private boolean isMultiTenant; - // expressions that are not present in the row key of the old table, the expression can also refer to a regular column - private List newTableExpressions; - private Set newTableColumns; - - private List newTableColumnTypes; - private int newTableColumnCount; - private byte[] newTableName; - private int nNewTableSaltBuckets; - private byte[] oldTableEmptyKeyValueCF; - private ImmutableBytesPtr emptyKeyValueCFPtr; - private int nOldTableCFs; - private boolean newTableWALDisabled; - private boolean newTableImmutableRows; - private Set allColumns; - - // Transient state - private final boolean isOldTableSalted; - private final RowKeySchema oldTableRowKeySchema; - - private int estimatedNewTableRowKeyBytes; - private ColumnReference newTableEmptyKeyValueRef; - private ColumnReference oldTableEmptyKeyValueRef; - private boolean newTableRowKeyOrderOptimizable; - - private PTable.QualifierEncodingScheme newTableEncodingScheme; - private PTable.ImmutableStorageScheme newTableImmutableStorageScheme; - private PTable.QualifierEncodingScheme oldTableEncodingScheme; - private PTable.ImmutableStorageScheme oldTableImmutableStorageScheme; - /* - * The first part of the pair is column family name - * and second part is the column name. The reason we need to track this state is because for certain storage schemes - * like ImmutableStorageScheme#SINGLE_CELL_ARRAY_WITH_OFFSETS, the column for which we need to generate an new - * table put/delete is different from the old columns in the phoenix schema. - */ - private Set> newTableColumnsInfo; - /* - * Map of covered columns where a key is column reference for a column in the data table - * and value is column reference for corresponding column in the new table. - */ - private Map coveredColumnsMap; - - private String logicalNewTableName; - - public static TransformMaintainer create(PTable oldTable, PTable newTable, PhoenixConnection connection) { - if (oldTable.getType() == PTableType.INDEX) { - throw new IllegalArgumentException(); - } - TransformMaintainer maintainer = new TransformMaintainer(oldTable, newTable, connection); - return maintainer; - } - - private TransformMaintainer(RowKeySchema oldRowKeySchema, boolean isOldTableSalted) { - super(oldRowKeySchema, isOldTableSalted); - this.oldTableRowKeySchema = oldRowKeySchema; - this.isOldTableSalted = isOldTableSalted; + private boolean isMultiTenant; + // expressions that are not present in the row key of the old table, the expression can also refer + // to a regular column + private List newTableExpressions; + private Set newTableColumns; + + private List newTableColumnTypes; + private int newTableColumnCount; + private byte[] newTableName; + private int nNewTableSaltBuckets; + private byte[] oldTableEmptyKeyValueCF; + private ImmutableBytesPtr emptyKeyValueCFPtr; + private int nOldTableCFs; + private boolean newTableWALDisabled; + private boolean newTableImmutableRows; + private Set allColumns; + + // Transient state + private final boolean isOldTableSalted; + private final RowKeySchema oldTableRowKeySchema; + + private int estimatedNewTableRowKeyBytes; + private ColumnReference newTableEmptyKeyValueRef; + private ColumnReference oldTableEmptyKeyValueRef; + private boolean newTableRowKeyOrderOptimizable; + + private PTable.QualifierEncodingScheme newTableEncodingScheme; + private PTable.ImmutableStorageScheme newTableImmutableStorageScheme; + private PTable.QualifierEncodingScheme oldTableEncodingScheme; + private PTable.ImmutableStorageScheme oldTableImmutableStorageScheme; + /* + * The first part of the pair is column family name and second part is the column name. The reason + * we need to track this state is because for certain storage schemes like + * ImmutableStorageScheme#SINGLE_CELL_ARRAY_WITH_OFFSETS, the column for which we need to generate + * an new table put/delete is different from the old columns in the phoenix schema. + */ + private Set> newTableColumnsInfo; + /* + * Map of covered columns where a key is column reference for a column in the data table and value + * is column reference for corresponding column in the new table. + */ + private Map coveredColumnsMap; + + private String logicalNewTableName; + + public static TransformMaintainer create(PTable oldTable, PTable newTable, + PhoenixConnection connection) { + if (oldTable.getType() == PTableType.INDEX) { + throw new IllegalArgumentException(); } - - public Set getAllColumns() { - return allColumns; - } - - public Set getCoveredColumns() { - return coveredColumnsMap.keySet(); - } - - private TransformMaintainer(final PTable oldTable, final PTable newTable, PhoenixConnection connection) { - this(oldTable.getRowKeySchema(), oldTable.getBucketNum() != null); - this.newTableRowKeyOrderOptimizable = newTable.rowKeyOrderOptimizable(); - this.isMultiTenant = oldTable.isMultiTenant(); - - this.newTableEncodingScheme = newTable.getEncodingScheme() == null ? PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : newTable.getEncodingScheme(); - this.newTableImmutableStorageScheme = newTable.getImmutableStorageScheme() == null ? PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : newTable.getImmutableStorageScheme(); - this.oldTableEncodingScheme = oldTable.getEncodingScheme() == null ? PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : oldTable.getEncodingScheme(); - this.oldTableImmutableStorageScheme = oldTable.getImmutableStorageScheme() == null ? PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : oldTable.getImmutableStorageScheme(); - - this.newTableName = newTable.getPhysicalName().getBytes(); - boolean newTableWALDisabled = newTable.isWALDisabled(); - int nNewTableColumns = newTable.getColumns().size(); - int nNewTablePKColumns = newTable.getPKColumns().size(); - - List oldTablePKColumns = oldTable.getPKColumns(); - - this.newTableColumnCount = oldTablePKColumns.size(); - - this.newTableColumnTypes = Lists.newArrayListWithExpectedSize(nNewTablePKColumns); - this.newTableExpressions = Lists.newArrayListWithExpectedSize(nNewTableColumns); - this.coveredColumnsMap = Maps.newHashMapWithExpectedSize(nNewTableColumns - nNewTablePKColumns); - this.nNewTableSaltBuckets = newTable.getBucketNum() == null ? 0 : newTable.getBucketNum(); - this.oldTableEmptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(oldTable); - this.emptyKeyValueCFPtr = SchemaUtil.getEmptyColumnFamilyPtr(newTable); - this.nOldTableCFs = oldTable.getColumnFamilies().size(); - this.newTableWALDisabled = newTableWALDisabled; - this.newTableImmutableRows = newTable.isImmutableRows(); - this.newTableColumnsInfo = Sets.newHashSetWithExpectedSize(nNewTableColumns - nNewTablePKColumns); - - for (int i = 0; i < newTable.getColumnFamilies().size(); i++) { - PColumnFamily family = newTable.getColumnFamilies().get(i); - for (PColumn newColumn : family.getColumns()) { - PColumn oldColumn = getColumnOrNull(oldTable, newColumn.getName().getString(), newColumn.getFamilyName().getString()); - // This can happen during deletion where we don't need covered columns - if (oldColumn != null) { - byte[] oldColumnCq = oldColumn.getColumnQualifierBytes(); - byte[] newColumnCq = newColumn.getColumnQualifierBytes(); - this.coveredColumnsMap.put(new ColumnReference(oldColumn.getFamilyName().getBytes(), oldColumnCq), - new ColumnReference(newColumn.getFamilyName().getBytes(), newColumnCq)); - } - } + TransformMaintainer maintainer = new TransformMaintainer(oldTable, newTable, connection); + return maintainer; + } + + private TransformMaintainer(RowKeySchema oldRowKeySchema, boolean isOldTableSalted) { + super(oldRowKeySchema, isOldTableSalted); + this.oldTableRowKeySchema = oldRowKeySchema; + this.isOldTableSalted = isOldTableSalted; + } + + public Set getAllColumns() { + return allColumns; + } + + public Set getCoveredColumns() { + return coveredColumnsMap.keySet(); + } + + private TransformMaintainer(final PTable oldTable, final PTable newTable, + PhoenixConnection connection) { + this(oldTable.getRowKeySchema(), oldTable.getBucketNum() != null); + this.newTableRowKeyOrderOptimizable = newTable.rowKeyOrderOptimizable(); + this.isMultiTenant = oldTable.isMultiTenant(); + + this.newTableEncodingScheme = newTable.getEncodingScheme() == null + ? PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + : newTable.getEncodingScheme(); + this.newTableImmutableStorageScheme = newTable.getImmutableStorageScheme() == null + ? PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN + : newTable.getImmutableStorageScheme(); + this.oldTableEncodingScheme = oldTable.getEncodingScheme() == null + ? PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + : oldTable.getEncodingScheme(); + this.oldTableImmutableStorageScheme = oldTable.getImmutableStorageScheme() == null + ? PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN + : oldTable.getImmutableStorageScheme(); + + this.newTableName = newTable.getPhysicalName().getBytes(); + boolean newTableWALDisabled = newTable.isWALDisabled(); + int nNewTableColumns = newTable.getColumns().size(); + int nNewTablePKColumns = newTable.getPKColumns().size(); + + List oldTablePKColumns = oldTable.getPKColumns(); + + this.newTableColumnCount = oldTablePKColumns.size(); + + this.newTableColumnTypes = Lists.newArrayListWithExpectedSize(nNewTablePKColumns); + this.newTableExpressions = Lists.newArrayListWithExpectedSize(nNewTableColumns); + this.coveredColumnsMap = Maps.newHashMapWithExpectedSize(nNewTableColumns - nNewTablePKColumns); + this.nNewTableSaltBuckets = newTable.getBucketNum() == null ? 0 : newTable.getBucketNum(); + this.oldTableEmptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(oldTable); + this.emptyKeyValueCFPtr = SchemaUtil.getEmptyColumnFamilyPtr(newTable); + this.nOldTableCFs = oldTable.getColumnFamilies().size(); + this.newTableWALDisabled = newTableWALDisabled; + this.newTableImmutableRows = newTable.isImmutableRows(); + this.newTableColumnsInfo = + Sets.newHashSetWithExpectedSize(nNewTableColumns - nNewTablePKColumns); + + for (int i = 0; i < newTable.getColumnFamilies().size(); i++) { + PColumnFamily family = newTable.getColumnFamilies().get(i); + for (PColumn newColumn : family.getColumns()) { + PColumn oldColumn = getColumnOrNull(oldTable, newColumn.getName().getString(), + newColumn.getFamilyName().getString()); + // This can happen during deletion where we don't need covered columns + if (oldColumn != null) { + byte[] oldColumnCq = oldColumn.getColumnQualifierBytes(); + byte[] newColumnCq = newColumn.getColumnQualifierBytes(); + this.coveredColumnsMap.put( + new ColumnReference(oldColumn.getFamilyName().getBytes(), oldColumnCq), + new ColumnReference(newColumn.getFamilyName().getBytes(), newColumnCq)); } - this.logicalNewTableName = newTable.getName().getString(); - initCachedState(); + } } - - public static PColumn getColumnOrNull(PTable table, String columnName, String familyName) { - PColumnFamily family; - try { - family = table.getColumnFamily(familyName); - } catch (ColumnFamilyNotFoundException e) { - return null; - } - try { - return family.getPColumnForColumnName(columnName); - } catch (ColumnNotFoundException e) { - return null; - } + this.logicalNewTableName = newTable.getName().getString(); + initCachedState(); + } + + public static PColumn getColumnOrNull(PTable table, String columnName, String familyName) { + PColumnFamily family; + try { + family = table.getColumnFamily(familyName); + } catch (ColumnFamilyNotFoundException e) { + return null; } - - public Set getAllColumnsForDataTable() { - Set result = Sets.newLinkedHashSetWithExpectedSize(newTableExpressions.size() + coveredColumnsMap.size()); - result.addAll(newTableColumns); - for (ColumnReference colRef : coveredColumnsMap.keySet()) { - if (oldTableImmutableStorageScheme == PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - result.add(colRef); - } else { - result.add(new ColumnReference(colRef.getFamily(), QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES)); - } - } - return result; + try { + return family.getPColumnForColumnName(columnName); + } catch (ColumnNotFoundException e) { + return null; } - - public byte[] buildDataRowKey(ImmutableBytesWritable indexRowKeyPtr, byte[][] viewConstants) { - return this.buildDataRowKey(indexRowKeyPtr, viewConstants, false); + } + + public Set getAllColumnsForDataTable() { + Set result = + Sets.newLinkedHashSetWithExpectedSize(newTableExpressions.size() + coveredColumnsMap.size()); + result.addAll(newTableColumns); + for (ColumnReference colRef : coveredColumnsMap.keySet()) { + if (oldTableImmutableStorageScheme == PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { + result.add(colRef); + } else { + result.add(new ColumnReference(colRef.getFamily(), + QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES)); + } } - - /* - * Build the old table row key - */ - public byte[] buildDataRowKey(ImmutableBytesWritable indexRowKeyPtr, byte[][] viewConstants, - boolean truncateEndSeparators) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedNewTableRowKeyBytes); - DataOutput output = new DataOutputStream(stream); - - try { - int dataPosOffset = 0; - int maxRowKeyOffset = indexRowKeyPtr.getLength(); - - oldTableRowKeySchema.iterator(indexRowKeyPtr, ptr, 0); - // The oldTableRowKeySchema includes the salt byte field, - while (oldTableRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset) != null) { - output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); - if (!oldTableRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { - output.writeByte(SchemaUtil.getSeparatorByte(oldTableRowKeySchema.rowKeyOrderOptimizable(), ptr.getLength()==0 - , oldTableRowKeySchema.getField(dataPosOffset))); - } - dataPosOffset++; - } - - byte[] oldTableRowKey = stream.getBuffer(); - return oldTableRowKey; - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } + return result; + } + + public byte[] buildDataRowKey(ImmutableBytesWritable indexRowKeyPtr, byte[][] viewConstants) { + return this.buildDataRowKey(indexRowKeyPtr, viewConstants, false); + } + + /* + * Build the old table row key + */ + public byte[] buildDataRowKey(ImmutableBytesWritable indexRowKeyPtr, byte[][] viewConstants, + boolean truncateEndSeparators) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + TrustedByteArrayOutputStream stream = + new TrustedByteArrayOutputStream(estimatedNewTableRowKeyBytes); + DataOutput output = new DataOutputStream(stream); + + try { + int dataPosOffset = 0; + int maxRowKeyOffset = indexRowKeyPtr.getLength(); + + oldTableRowKeySchema.iterator(indexRowKeyPtr, ptr, 0); + // The oldTableRowKeySchema includes the salt byte field, + while (oldTableRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset) != null) { + output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); + if (!oldTableRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { + output + .writeByte(SchemaUtil.getSeparatorByte(oldTableRowKeySchema.rowKeyOrderOptimizable(), + ptr.getLength() == 0, oldTableRowKeySchema.getField(dataPosOffset))); } + dataPosOffset++; + } + + byte[] oldTableRowKey = stream.getBuffer(); + return oldTableRowKey; + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } } - - /** - * Init calculated state reading/creating - */ - private void initCachedState() { - this.allColumns = Sets.newLinkedHashSetWithExpectedSize(newTableExpressions.size() + coveredColumnsMap.size()); - - byte[] newTableEmptyKvQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(newTableEncodingScheme).getFirst(); - byte[] oldTableEmptyKvQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(oldTableEncodingScheme).getFirst(); - newTableEmptyKeyValueRef = new ColumnReference(oldTableEmptyKeyValueCF, newTableEmptyKvQualifier); - oldTableEmptyKeyValueRef = new ColumnReference(oldTableEmptyKeyValueCF, oldTableEmptyKvQualifier); - this.newTableColumns = Sets.newLinkedHashSetWithExpectedSize(this.newTableColumnCount); - - for (ColumnReference colRef : coveredColumnsMap.keySet()) { - if (newTableImmutableStorageScheme == PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { - newTableColumns.add(colRef); - } else { - newTableColumns.add(new ColumnReference(colRef.getFamily(), QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES)); - } - } - } - - /** - * For client-side to serialize TransformMaintainer for a given table - * - * @param oldTable old table - * @param ptr bytes pointer to hold returned serialized value - * @param newTable new table to serialize - */ - public static void serialize(PTable oldTable, ImmutableBytesWritable ptr, - PTable newTable, PhoenixConnection connection) { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - DataOutputStream output = new DataOutputStream(stream); - try { - // Encode data table salting - WritableUtils.writeVInt(output, oldTable.getBucketNum() == null ? 1 : -1); - // Write out data row key schema once, since it's the same - oldTable.getRowKeySchema().write(output); - org.apache.phoenix.coprocessor.generated.ServerCachingProtos.TransformMaintainer proto = - TransformMaintainer.toProto(newTable.getTransformMaintainer(oldTable, connection)); - byte[] protoBytes = proto.toByteArray(); - WritableUtils.writeVInt(output, protoBytes.length); - output.write(protoBytes); - - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } - ptr.set(stream.toByteArray(), 0, stream.size()); + } + + /** + * Init calculated state reading/creating + */ + private void initCachedState() { + this.allColumns = + Sets.newLinkedHashSetWithExpectedSize(newTableExpressions.size() + coveredColumnsMap.size()); + + byte[] newTableEmptyKvQualifier = + EncodedColumnsUtil.getEmptyKeyValueInfo(newTableEncodingScheme).getFirst(); + byte[] oldTableEmptyKvQualifier = + EncodedColumnsUtil.getEmptyKeyValueInfo(oldTableEncodingScheme).getFirst(); + newTableEmptyKeyValueRef = + new ColumnReference(oldTableEmptyKeyValueCF, newTableEmptyKvQualifier); + oldTableEmptyKeyValueRef = + new ColumnReference(oldTableEmptyKeyValueCF, oldTableEmptyKvQualifier); + this.newTableColumns = Sets.newLinkedHashSetWithExpectedSize(this.newTableColumnCount); + + for (ColumnReference colRef : coveredColumnsMap.keySet()) { + if (newTableImmutableStorageScheme == PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN) { + newTableColumns.add(colRef); + } else { + newTableColumns.add(new ColumnReference(colRef.getFamily(), + QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES)); + } } - - @Override - public Iterator iterator() { - return newTableColumns.iterator(); + } + + /** + * For client-side to serialize TransformMaintainer for a given table + * @param oldTable old table + * @param ptr bytes pointer to hold returned serialized value + * @param newTable new table to serialize + */ + public static void serialize(PTable oldTable, ImmutableBytesWritable ptr, PTable newTable, + PhoenixConnection connection) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + DataOutputStream output = new DataOutputStream(stream); + try { + // Encode data table salting + WritableUtils.writeVInt(output, oldTable.getBucketNum() == null ? 1 : -1); + // Write out data row key schema once, since it's the same + oldTable.getRowKeySchema().write(output); + org.apache.phoenix.coprocessor.generated.ServerCachingProtos.TransformMaintainer proto = + TransformMaintainer.toProto(newTable.getTransformMaintainer(oldTable, connection)); + byte[] protoBytes = proto.toByteArray(); + WritableUtils.writeVInt(output, protoBytes.length); + output.write(protoBytes); + + } catch (IOException e) { + throw new RuntimeException(e); // Impossible } - - public static ServerCachingProtos.TransformMaintainer toProto(TransformMaintainer maintainer) throws IOException { - ServerCachingProtos.TransformMaintainer.Builder builder = ServerCachingProtos.TransformMaintainer.newBuilder(); - builder.setSaltBuckets(maintainer.nNewTableSaltBuckets); - builder.setIsMultiTenant(maintainer.isMultiTenant); - - for (ColumnReference colRef : maintainer.newTableColumns) { - ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); - cRefBuilder.setFamily(ByteStringer.wrap(colRef.getFamily())); - cRefBuilder.setQualifier(ByteStringer.wrap(colRef.getQualifier())); - builder.addNewTableColumns(cRefBuilder.build()); - } - - for (Map.Entry e : maintainer.coveredColumnsMap.entrySet()) { - ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); - ColumnReference dataTableColRef = e.getKey(); - cRefBuilder.setFamily(ByteStringer.wrap(dataTableColRef.getFamily())); - cRefBuilder.setQualifier(ByteStringer.wrap(dataTableColRef.getQualifier())); - builder.addOldTableColRefForCoveredColumns(cRefBuilder.build()); - ColumnReference newTableColRef = e.getValue(); - cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); - cRefBuilder.setFamily(ByteStringer.wrap(newTableColRef.getFamily())); - cRefBuilder.setQualifier(ByteStringer.wrap(newTableColRef.getQualifier())); - builder.addNewTableColRefForCoveredColumns(cRefBuilder.build()); - } - - builder.setNewTableColumnCount(maintainer.newTableColumnCount); - builder.setNewTableName(ByteStringer.wrap(maintainer.newTableName)); - builder.setNewTableRowKeyOrderOptimizable(maintainer.newTableRowKeyOrderOptimizable); - builder.setOldTableEmptyKeyValueColFamily(ByteStringer.wrap(maintainer.oldTableEmptyKeyValueCF)); - ServerCachingProtos.ImmutableBytesWritable.Builder ibwBuilder = ServerCachingProtos.ImmutableBytesWritable.newBuilder(); - ibwBuilder.setByteArray(ByteStringer.wrap(maintainer.emptyKeyValueCFPtr.get())); - ibwBuilder.setLength(maintainer.emptyKeyValueCFPtr.getLength()); - ibwBuilder.setOffset(maintainer.emptyKeyValueCFPtr.getOffset()); - builder.setEmptyKeyValueColFamily(ibwBuilder.build()); - try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { - DataOutput output = new DataOutputStream(stream); - for (Expression expression : maintainer.newTableExpressions) { - WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); - expression.write(output); - } - builder.setNewTableExpressions(ByteStringer.wrap(stream.toByteArray())); - } - - builder.setNumDataTableColFamilies(maintainer.nOldTableCFs); - builder.setNewTableWalDisabled(maintainer.newTableWALDisabled); - builder.setNewTableRowKeyByteSize(maintainer.estimatedNewTableRowKeyBytes); - builder.setNewTableImmutable(maintainer.newTableImmutableRows); - for (Pair p : maintainer.newTableColumnsInfo) { - ServerCachingProtos.ColumnInfo.Builder ciBuilder = ServerCachingProtos.ColumnInfo.newBuilder(); - if (p.getFirst() != null) { - ciBuilder.setFamilyName(p.getFirst()); - } - ciBuilder.setColumnName(p.getSecond()); - builder.addNewTableColumnInfo(ciBuilder.build()); - } - builder.setNewTableEncodingScheme(maintainer.newTableEncodingScheme.getSerializedMetadataValue()); - builder.setNewTableImmutableStorageScheme(maintainer.newTableImmutableStorageScheme.getSerializedMetadataValue()); - builder.setLogicalNewTableName(maintainer.logicalNewTableName); - builder.setOldTableEncodingScheme(maintainer.oldTableEncodingScheme.getSerializedMetadataValue()); - builder.setOldTableImmutableStorageScheme(maintainer.oldTableImmutableStorageScheme.getSerializedMetadataValue()); - return builder.build(); + ptr.set(stream.toByteArray(), 0, stream.size()); + } + + @Override + public Iterator iterator() { + return newTableColumns.iterator(); + } + + public static ServerCachingProtos.TransformMaintainer toProto(TransformMaintainer maintainer) + throws IOException { + ServerCachingProtos.TransformMaintainer.Builder builder = + ServerCachingProtos.TransformMaintainer.newBuilder(); + builder.setSaltBuckets(maintainer.nNewTableSaltBuckets); + builder.setIsMultiTenant(maintainer.isMultiTenant); + + for (ColumnReference colRef : maintainer.newTableColumns) { + ServerCachingProtos.ColumnReference.Builder cRefBuilder = + ServerCachingProtos.ColumnReference.newBuilder(); + cRefBuilder.setFamily(ByteStringer.wrap(colRef.getFamily())); + cRefBuilder.setQualifier(ByteStringer.wrap(colRef.getQualifier())); + builder.addNewTableColumns(cRefBuilder.build()); } - public static TransformMaintainer fromProto(ServerCachingProtos.TransformMaintainer proto, RowKeySchema dataTableRowKeySchema, boolean isDataTableSalted) throws IOException { - TransformMaintainer maintainer = new TransformMaintainer(dataTableRowKeySchema, isDataTableSalted); - maintainer.nNewTableSaltBuckets = proto.getSaltBuckets(); - maintainer.isMultiTenant = proto.getIsMultiTenant(); - List newTableColList = proto.getNewTableColumnsList(); - maintainer.newTableColumns = new HashSet(newTableColList.size()); - for (ServerCachingProtos.ColumnReference colRefFromProto : newTableColList) { - maintainer.newTableColumns.add(new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray())); - } - - maintainer.newTableName = proto.getNewTableName().toByteArray(); - if (proto.getNewTableColumnCount() != -1) { - maintainer.newTableColumnCount = proto.getNewTableColumnCount(); - } - - maintainer.newTableRowKeyOrderOptimizable = proto.getNewTableRowKeyOrderOptimizable(); - maintainer.oldTableEmptyKeyValueCF = proto.getOldTableEmptyKeyValueColFamily().toByteArray(); - ServerCachingProtos.ImmutableBytesWritable emptyKeyValueColFamily = proto.getEmptyKeyValueColFamily(); - maintainer.emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueColFamily.getByteArray().toByteArray(), emptyKeyValueColFamily.getOffset(), emptyKeyValueColFamily.getLength()); - - maintainer.nOldTableCFs = proto.getNumDataTableColFamilies(); - maintainer.newTableWALDisabled = proto.getNewTableWalDisabled(); - maintainer.estimatedNewTableRowKeyBytes = proto.getNewTableRowKeyByteSize(); - maintainer.newTableImmutableRows = proto.getNewTableImmutable(); - List newTblColumnInfoList = proto.getNewTableColumnInfoList(); - maintainer.newTableColumnsInfo = Sets.newHashSet(); - for (ServerCachingProtos.ColumnInfo info : newTblColumnInfoList) { - maintainer.newTableColumnsInfo.add(new Pair<>(info.getFamilyName(), info.getColumnName())); - } - maintainer.newTableExpressions = new ArrayList<>(); - try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getNewTableExpressions().toByteArray())) { - DataInput input = new DataInputStream(stream); - while (stream.available() > 0) { - int expressionOrdinal = WritableUtils.readVInt(input); - Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); - expression.readFields(input); - maintainer.newTableExpressions.add(expression); - } - } - // proto doesn't support single byte so need an explicit cast here - maintainer.newTableEncodingScheme = PTable.QualifierEncodingScheme.fromSerializedValue((byte) proto.getNewTableEncodingScheme()); - maintainer.newTableImmutableStorageScheme = PTable.ImmutableStorageScheme.fromSerializedValue((byte) proto.getNewTableImmutableStorageScheme()); - maintainer.oldTableEncodingScheme = PTable.QualifierEncodingScheme.fromSerializedValue((byte) proto.getOldTableEncodingScheme()); - maintainer.oldTableImmutableStorageScheme = PTable.ImmutableStorageScheme.fromSerializedValue((byte) proto.getOldTableImmutableStorageScheme()); - - List oldTableColRefsForCoveredColumnsList = proto.getOldTableColRefForCoveredColumnsList(); - List newTableColRefsForCoveredColumnsList = proto.getNewTableColRefForCoveredColumnsList(); - maintainer.coveredColumnsMap = Maps.newHashMapWithExpectedSize(oldTableColRefsForCoveredColumnsList.size()); - Iterator newTableColRefItr = newTableColRefsForCoveredColumnsList.iterator(); - for (ServerCachingProtos.ColumnReference colRefFromProto : oldTableColRefsForCoveredColumnsList) { - ColumnReference oldTableColRef = new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray()); - ColumnReference newTableColRef; - ServerCachingProtos.ColumnReference fromProto = newTableColRefItr.next(); - newTableColRef = new ColumnReference(fromProto.getFamily().toByteArray(), fromProto.getQualifier().toByteArray()); - maintainer.coveredColumnsMap.put(oldTableColRef, newTableColRef); - } - maintainer.logicalNewTableName = proto.getLogicalNewTableName(); - maintainer.initCachedState(); - return maintainer; + for (Map.Entry e : maintainer.coveredColumnsMap.entrySet()) { + ServerCachingProtos.ColumnReference.Builder cRefBuilder = + ServerCachingProtos.ColumnReference.newBuilder(); + ColumnReference dataTableColRef = e.getKey(); + cRefBuilder.setFamily(ByteStringer.wrap(dataTableColRef.getFamily())); + cRefBuilder.setQualifier(ByteStringer.wrap(dataTableColRef.getQualifier())); + builder.addOldTableColRefForCoveredColumns(cRefBuilder.build()); + ColumnReference newTableColRef = e.getValue(); + cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder(); + cRefBuilder.setFamily(ByteStringer.wrap(newTableColRef.getFamily())); + cRefBuilder.setQualifier(ByteStringer.wrap(newTableColRef.getQualifier())); + builder.addNewTableColRefForCoveredColumns(cRefBuilder.build()); } - - public static List deserialize(byte[] buf) { - return deserialize(buf, 0, buf.length); + builder.setNewTableColumnCount(maintainer.newTableColumnCount); + builder.setNewTableName(ByteStringer.wrap(maintainer.newTableName)); + builder.setNewTableRowKeyOrderOptimizable(maintainer.newTableRowKeyOrderOptimizable); + builder + .setOldTableEmptyKeyValueColFamily(ByteStringer.wrap(maintainer.oldTableEmptyKeyValueCF)); + ServerCachingProtos.ImmutableBytesWritable.Builder ibwBuilder = + ServerCachingProtos.ImmutableBytesWritable.newBuilder(); + ibwBuilder.setByteArray(ByteStringer.wrap(maintainer.emptyKeyValueCFPtr.get())); + ibwBuilder.setLength(maintainer.emptyKeyValueCFPtr.getLength()); + ibwBuilder.setOffset(maintainer.emptyKeyValueCFPtr.getOffset()); + builder.setEmptyKeyValueColFamily(ibwBuilder.build()); + try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { + DataOutput output = new DataOutputStream(stream); + for (Expression expression : maintainer.newTableExpressions) { + WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal()); + expression.write(output); + } + builder.setNewTableExpressions(ByteStringer.wrap(stream.toByteArray())); } - private static List deserialize(byte[] buf, int offset, int length) { - List maintainers = Collections.emptyList(); - if (length > 0) { - ByteArrayInputStream stream = new ByteArrayInputStream(buf, offset, length); - DataInput input = new DataInputStream(stream); - try { - int size = WritableUtils.readVInt(input); - boolean isDataTableSalted = size < 0; - size = Math.abs(size); - RowKeySchema rowKeySchema = new RowKeySchema(); - rowKeySchema.readFields(input); - maintainers = Lists.newArrayListWithExpectedSize(size); - for (int i = 0; i < size; i++) { - int protoSize = WritableUtils.readVInt(input); - byte[] b = new byte[protoSize]; - input.readFully(b); - ServerCachingProtos.TransformMaintainer proto = ServerCachingProtos.TransformMaintainer.parseFrom(b); - maintainers.add(TransformMaintainer.fromProto(proto, rowKeySchema, isDataTableSalted)); - } - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } - } - return maintainers; + builder.setNumDataTableColFamilies(maintainer.nOldTableCFs); + builder.setNewTableWalDisabled(maintainer.newTableWALDisabled); + builder.setNewTableRowKeyByteSize(maintainer.estimatedNewTableRowKeyBytes); + builder.setNewTableImmutable(maintainer.newTableImmutableRows); + for (Pair p : maintainer.newTableColumnsInfo) { + ServerCachingProtos.ColumnInfo.Builder ciBuilder = + ServerCachingProtos.ColumnInfo.newBuilder(); + if (p.getFirst() != null) { + ciBuilder.setFamilyName(p.getFirst()); + } + ciBuilder.setColumnName(p.getSecond()); + builder.addNewTableColumnInfo(ciBuilder.build()); } - - // Return new table's name - public byte[] getIndexTableName() { - return newTableName; + builder + .setNewTableEncodingScheme(maintainer.newTableEncodingScheme.getSerializedMetadataValue()); + builder.setNewTableImmutableStorageScheme( + maintainer.newTableImmutableStorageScheme.getSerializedMetadataValue()); + builder.setLogicalNewTableName(maintainer.logicalNewTableName); + builder + .setOldTableEncodingScheme(maintainer.oldTableEncodingScheme.getSerializedMetadataValue()); + builder.setOldTableImmutableStorageScheme( + maintainer.oldTableImmutableStorageScheme.getSerializedMetadataValue()); + return builder.build(); + } + + public static TransformMaintainer fromProto(ServerCachingProtos.TransformMaintainer proto, + RowKeySchema dataTableRowKeySchema, boolean isDataTableSalted) throws IOException { + TransformMaintainer maintainer = + new TransformMaintainer(dataTableRowKeySchema, isDataTableSalted); + maintainer.nNewTableSaltBuckets = proto.getSaltBuckets(); + maintainer.isMultiTenant = proto.getIsMultiTenant(); + List newTableColList = proto.getNewTableColumnsList(); + maintainer.newTableColumns = new HashSet(newTableColList.size()); + for (ServerCachingProtos.ColumnReference colRefFromProto : newTableColList) { + maintainer.newTableColumns.add(new ColumnReference(colRefFromProto.getFamily().toByteArray(), + colRefFromProto.getQualifier().toByteArray())); } - // Builds new table's rowkey using the old table's rowkey. - // This method will change when we support rowkey related transforms - public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean isNewTableSalted = nNewTableSaltBuckets > 0; - - try (TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedNewTableRowKeyBytes)){ - DataOutput output = new DataOutputStream(stream); - - if (isNewTableSalted) { - output.write(0); // will be set at end to new table salt byte - } - // The oldTableRowKeySchema includes the salt byte field, - // so we must adjust for that here. - int dataPosOffset = isOldTableSalted ? 1 : 0 ; - //BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); - // Skip data table salt byte - int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength(); - oldTableRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset); - - // Write new table row key - int trailingVariableWidthColumnNum = 0; - while (oldTableRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset) != null) { - output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); - if (!oldTableRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { - output.writeByte(SchemaUtil.getSeparatorByte(newTableRowKeyOrderOptimizable, ptr.getLength()==0 - , oldTableRowKeySchema.getField(dataPosOffset))); - trailingVariableWidthColumnNum++; - } else { - trailingVariableWidthColumnNum = 0; - } - - dataPosOffset++; - } - - byte[] newTableRowKey = stream.getBuffer(); - // Remove trailing nulls - int length = stream.size(); - // The existing code does not eliminate the separator if the data type is not nullable. It not clear why. - // The actual bug is in the calculation of maxTrailingNulls with view indexes. So, in order not to impact some other cases, we should keep minLength check here. - while (trailingVariableWidthColumnNum > 0 && length > 0 && newTableRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) { - length--; - trailingVariableWidthColumnNum--; - } - - if (isNewTableSalted) { - // Set salt byte - byte saltByte = SaltingUtil.getSaltingByte(newTableRowKey, SaltingUtil.NUM_SALTING_BYTES, length-SaltingUtil.NUM_SALTING_BYTES, nNewTableSaltBuckets); - newTableRowKey[0] = saltByte; - } - return newTableRowKey.length == length ? newTableRowKey : Arrays.copyOf(newTableRowKey, length); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } + maintainer.newTableName = proto.getNewTableName().toByteArray(); + if (proto.getNewTableColumnCount() != -1) { + maintainer.newTableColumnCount = proto.getNewTableColumnCount(); } - public Put buildUpdateMutation(KeyValueBuilder kvBuilder, ValueGetter valueGetter, ImmutableBytesWritable oldRowKeyPtr, - long ts, byte[] regionStartKey, byte[] regionEndKey, boolean verified) throws IOException { - byte[] newRowKey = this.buildRowKey(valueGetter, oldRowKeyPtr, regionStartKey, regionEndKey, ts); - return buildUpdateMutation(kvBuilder, valueGetter, oldRowKeyPtr, ts, regionStartKey, regionEndKey, - newRowKey, this.getEmptyKeyValueFamily(), coveredColumnsMap, - newTableEmptyKeyValueRef, newTableWALDisabled, oldTableImmutableStorageScheme, newTableImmutableStorageScheme, - newTableEncodingScheme, oldTableEncodingScheme, verified); + maintainer.newTableRowKeyOrderOptimizable = proto.getNewTableRowKeyOrderOptimizable(); + maintainer.oldTableEmptyKeyValueCF = proto.getOldTableEmptyKeyValueColFamily().toByteArray(); + ServerCachingProtos.ImmutableBytesWritable emptyKeyValueColFamily = + proto.getEmptyKeyValueColFamily(); + maintainer.emptyKeyValueCFPtr = + new ImmutableBytesPtr(emptyKeyValueColFamily.getByteArray().toByteArray(), + emptyKeyValueColFamily.getOffset(), emptyKeyValueColFamily.getLength()); + + maintainer.nOldTableCFs = proto.getNumDataTableColFamilies(); + maintainer.newTableWALDisabled = proto.getNewTableWalDisabled(); + maintainer.estimatedNewTableRowKeyBytes = proto.getNewTableRowKeyByteSize(); + maintainer.newTableImmutableRows = proto.getNewTableImmutable(); + List newTblColumnInfoList = proto.getNewTableColumnInfoList(); + maintainer.newTableColumnsInfo = Sets.newHashSet(); + for (ServerCachingProtos.ColumnInfo info : newTblColumnInfoList) { + maintainer.newTableColumnsInfo.add(new Pair<>(info.getFamilyName(), info.getColumnName())); } - - public ImmutableBytesPtr getEmptyKeyValueFamily() { - return emptyKeyValueCFPtr; + maintainer.newTableExpressions = new ArrayList<>(); + try (ByteArrayInputStream stream = + new ByteArrayInputStream(proto.getNewTableExpressions().toByteArray())) { + DataInput input = new DataInputStream(stream); + while (stream.available() > 0) { + int expressionOrdinal = WritableUtils.readVInt(input); + Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); + expression.readFields(input); + maintainer.newTableExpressions.add(expression); + } } - - public byte[] getEmptyKeyValueQualifier() { - return newTableEmptyKeyValueRef.getQualifier(); + // proto doesn't support single byte so need an explicit cast here + maintainer.newTableEncodingScheme = + PTable.QualifierEncodingScheme.fromSerializedValue((byte) proto.getNewTableEncodingScheme()); + maintainer.newTableImmutableStorageScheme = PTable.ImmutableStorageScheme + .fromSerializedValue((byte) proto.getNewTableImmutableStorageScheme()); + maintainer.oldTableEncodingScheme = + PTable.QualifierEncodingScheme.fromSerializedValue((byte) proto.getOldTableEncodingScheme()); + maintainer.oldTableImmutableStorageScheme = PTable.ImmutableStorageScheme + .fromSerializedValue((byte) proto.getOldTableImmutableStorageScheme()); + + List oldTableColRefsForCoveredColumnsList = + proto.getOldTableColRefForCoveredColumnsList(); + List newTableColRefsForCoveredColumnsList = + proto.getNewTableColRefForCoveredColumnsList(); + maintainer.coveredColumnsMap = + Maps.newHashMapWithExpectedSize(oldTableColRefsForCoveredColumnsList.size()); + Iterator newTableColRefItr = + newTableColRefsForCoveredColumnsList.iterator(); + for (ServerCachingProtos.ColumnReference colRefFromProto : oldTableColRefsForCoveredColumnsList) { + ColumnReference oldTableColRef = new ColumnReference( + colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray()); + ColumnReference newTableColRef; + ServerCachingProtos.ColumnReference fromProto = newTableColRefItr.next(); + newTableColRef = new ColumnReference(fromProto.getFamily().toByteArray(), + fromProto.getQualifier().toByteArray()); + maintainer.coveredColumnsMap.put(oldTableColRef, newTableColRef); } - - public byte[] getDataEmptyKeyValueCF() { - return oldTableEmptyKeyValueCF; + maintainer.logicalNewTableName = proto.getLogicalNewTableName(); + maintainer.initCachedState(); + return maintainer; + } + + public static List deserialize(byte[] buf) { + return deserialize(buf, 0, buf.length); + } + + private static List deserialize(byte[] buf, int offset, int length) { + List maintainers = Collections.emptyList(); + if (length > 0) { + ByteArrayInputStream stream = new ByteArrayInputStream(buf, offset, length); + DataInput input = new DataInputStream(stream); + try { + int size = WritableUtils.readVInt(input); + boolean isDataTableSalted = size < 0; + size = Math.abs(size); + RowKeySchema rowKeySchema = new RowKeySchema(); + rowKeySchema.readFields(input); + maintainers = Lists.newArrayListWithExpectedSize(size); + for (int i = 0; i < size; i++) { + int protoSize = WritableUtils.readVInt(input); + byte[] b = new byte[protoSize]; + input.readFully(b); + ServerCachingProtos.TransformMaintainer proto = + ServerCachingProtos.TransformMaintainer.parseFrom(b); + maintainers.add(TransformMaintainer.fromProto(proto, rowKeySchema, isDataTableSalted)); + } + } catch (IOException e) { + throw new RuntimeException(e); // Impossible + } } + return maintainers; + } + + // Return new table's name + public byte[] getIndexTableName() { + return newTableName; + } + + // Builds new table's rowkey using the old table's rowkey. + // This method will change when we support rowkey related transforms + public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, + byte[] regionStartKey, byte[] regionEndKey, long ts) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean isNewTableSalted = nNewTableSaltBuckets > 0; + + try (TrustedByteArrayOutputStream stream = + new TrustedByteArrayOutputStream(estimatedNewTableRowKeyBytes)) { + DataOutput output = new DataOutputStream(stream); + + if (isNewTableSalted) { + output.write(0); // will be set at end to new table salt byte + } + // The oldTableRowKeySchema includes the salt byte field, + // so we must adjust for that here. + int dataPosOffset = isOldTableSalted ? 1 : 0; + // BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet(); + // Skip data table salt byte + int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength(); + oldTableRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset); + + // Write new table row key + int trailingVariableWidthColumnNum = 0; + while (oldTableRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset) != null) { + output.write(ptr.get(), ptr.getOffset(), ptr.getLength()); + if (!oldTableRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) { + output.writeByte(SchemaUtil.getSeparatorByte(newTableRowKeyOrderOptimizable, + ptr.getLength() == 0, oldTableRowKeySchema.getField(dataPosOffset))); + trailingVariableWidthColumnNum++; + } else { + trailingVariableWidthColumnNum = 0; + } - public byte[] getEmptyKeyValueQualifierForDataTable() { - return oldTableEmptyKeyValueRef.getQualifier(); + dataPosOffset++; + } + + byte[] newTableRowKey = stream.getBuffer(); + // Remove trailing nulls + int length = stream.size(); + // The existing code does not eliminate the separator if the data type is not nullable. It not + // clear why. + // The actual bug is in the calculation of maxTrailingNulls with view indexes. So, in order + // not to impact some other cases, we should keep minLength check here. + while ( + trailingVariableWidthColumnNum > 0 && length > 0 + && newTableRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE + ) { + length--; + trailingVariableWidthColumnNum--; + } + + if (isNewTableSalted) { + // Set salt byte + byte saltByte = SaltingUtil.getSaltingByte(newTableRowKey, SaltingUtil.NUM_SALTING_BYTES, + length - SaltingUtil.NUM_SALTING_BYTES, nNewTableSaltBuckets); + newTableRowKey[0] = saltByte; + } + return newTableRowKey.length == length + ? newTableRowKey + : Arrays.copyOf(newTableRowKey, length); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible } -} \ No newline at end of file + } + + public Put buildUpdateMutation(KeyValueBuilder kvBuilder, ValueGetter valueGetter, + ImmutableBytesWritable oldRowKeyPtr, long ts, byte[] regionStartKey, byte[] regionEndKey, + boolean verified) throws IOException { + byte[] newRowKey = + this.buildRowKey(valueGetter, oldRowKeyPtr, regionStartKey, regionEndKey, ts); + return buildUpdateMutation(kvBuilder, valueGetter, oldRowKeyPtr, ts, regionStartKey, + regionEndKey, newRowKey, this.getEmptyKeyValueFamily(), coveredColumnsMap, + newTableEmptyKeyValueRef, newTableWALDisabled, oldTableImmutableStorageScheme, + newTableImmutableStorageScheme, newTableEncodingScheme, oldTableEncodingScheme, verified); + } + + public ImmutableBytesPtr getEmptyKeyValueFamily() { + return emptyKeyValueCFPtr; + } + + public byte[] getEmptyKeyValueQualifier() { + return newTableEmptyKeyValueRef.getQualifier(); + } + + public byte[] getDataEmptyKeyValueCF() { + return oldTableEmptyKeyValueCF; + } + + public byte[] getEmptyKeyValueQualifierForDataTable() { + return oldTableEmptyKeyValueRef.getQualifier(); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/BaseTuple.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/BaseTuple.java index 058c0e48679..97cf8d438e9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/BaseTuple.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/BaseTuple.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,50 +22,49 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - public abstract class BaseTuple implements Tuple { - @Override - public int size() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean isImmutable() { - throw new UnsupportedOperationException(); - } - - @Override - public void getKey(ImmutableBytesWritable ptr) { - throw new UnsupportedOperationException(); - } + @Override + public int size() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isImmutable() { + throw new UnsupportedOperationException(); + } + + @Override + public void getKey(ImmutableBytesWritable ptr) { + throw new UnsupportedOperationException(); + } + + @Override + public Cell mergeWithDynColsListBytesAndGetValue(int index, byte[] dynColsList) { + throw new UnsupportedOperationException(); + } + + @Override + public Cell getValue(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public Cell getValue(byte[] family, byte[] qualifier) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + throw new UnsupportedOperationException(); + } - @Override - public Cell mergeWithDynColsListBytesAndGetValue(int index, byte[] dynColsList) { - throw new UnsupportedOperationException(); - } - - @Override - public Cell getValue(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public Cell getValue(byte [] family, byte [] qualifier) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean getValue(byte [] family, byte [] qualifier, ImmutableBytesWritable ptr) { - throw new UnsupportedOperationException(); - } + @Override + public long getSequenceValue(int index) { + throw new UnsupportedOperationException(); + } - @Override - public long getSequenceValue(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public void setKeyValues(List values) { - throw new UnsupportedOperationException(); - } + @Override + public void setKeyValues(List values) { + throw new UnsupportedOperationException(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/DelegateTuple.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/DelegateTuple.java index 7cd3acc6a7c..88272975291 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/DelegateTuple.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/DelegateTuple.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,54 +23,54 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; public class DelegateTuple implements Tuple { - private final Tuple delegate; - - public DelegateTuple(Tuple delegate) { - this.delegate = delegate; - } - - @Override - public int size() { - return delegate.size(); - } + private final Tuple delegate; - @Override - public boolean isImmutable() { - return delegate.isImmutable(); - } + public DelegateTuple(Tuple delegate) { + this.delegate = delegate; + } - @Override - public void getKey(ImmutableBytesWritable ptr) { - delegate.getKey(ptr); - } + @Override + public int size() { + return delegate.size(); + } - @Override - public Cell mergeWithDynColsListBytesAndGetValue(int index, byte[] dynColsList) { - return delegate.mergeWithDynColsListBytesAndGetValue(index, dynColsList); - } + @Override + public boolean isImmutable() { + return delegate.isImmutable(); + } - @Override - public Cell getValue(int index) { - return delegate.getValue(index); - } + @Override + public void getKey(ImmutableBytesWritable ptr) { + delegate.getKey(ptr); + } - @Override - public Cell getValue(byte[] family, byte[] qualifier) { - return delegate.getValue(family, qualifier); - } + @Override + public Cell mergeWithDynColsListBytesAndGetValue(int index, byte[] dynColsList) { + return delegate.mergeWithDynColsListBytesAndGetValue(index, dynColsList); + } - @Override - public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { - return delegate.getValue(family, qualifier, ptr); - } + @Override + public Cell getValue(int index) { + return delegate.getValue(index); + } - @Override - public long getSequenceValue(int index) { - return delegate.getSequenceValue(index); - } + @Override + public Cell getValue(byte[] family, byte[] qualifier) { + return delegate.getValue(family, qualifier); + } - @Override - public void setKeyValues(List values) { - delegate.setKeyValues(values); - } + @Override + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + return delegate.getValue(family, qualifier, ptr); + } + + @Override + public long getSequenceValue(int index) { + return delegate.getSequenceValue(index); + } + + @Override + public void setKeyValues(List values) { + delegate.setKeyValues(values); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java index e280de7c54e..dadd99af43d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,9 @@ */ package org.apache.phoenix.schema.tuple; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; import static org.apache.phoenix.query.QueryConstants.ENCODED_EMPTY_COLUMN_NAME; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.phoenix.util.ScanUtil.isDummy; import java.util.Collection; @@ -57,560 +57,555 @@ @NotThreadSafe public class EncodedColumnQualiferCellsList implements List { - private int minQualifier; - private int maxQualifier; - private int nonReservedRangeOffset; - private final Cell[] array; - private int numNonNullElements; - private int firstNonNullElementIdx = -1; - private static final int RESERVED_RANGE_SIZE = - ENCODED_CQ_COUNTER_INITIAL_VALUE - ENCODED_EMPTY_COLUMN_NAME; - // Used by iterators to figure out if the list was structurally modified. - private int modCount = 0; - private final QualifierEncodingScheme encodingScheme; - - public EncodedColumnQualiferCellsList(int minQ, int maxQ, - QualifierEncodingScheme encodingScheme) { - checkArgument(minQ <= maxQ, "Invalid arguments. Min: " + minQ + ". Max: " + maxQ); - this.minQualifier = minQ; - this.maxQualifier = maxQ; - int size = 0; - if (maxQ < ENCODED_CQ_COUNTER_INITIAL_VALUE) { - size = RESERVED_RANGE_SIZE; - } else if (minQ < ENCODED_CQ_COUNTER_INITIAL_VALUE) { - size = (maxQ - minQ + 1); - } else { - size = RESERVED_RANGE_SIZE + (maxQ - minQ + 1); - } - this.array = new Cell[size]; - this.nonReservedRangeOffset = - minQ > ENCODED_CQ_COUNTER_INITIAL_VALUE ? minQ - ENCODED_CQ_COUNTER_INITIAL_VALUE - : 0; - this.encodingScheme = encodingScheme; - } - - @Override - public int size() { - return numNonNullElements; - } - - @Override - public boolean isEmpty() { - return numNonNullElements == 0; - } - - @Override - public boolean contains(Object o) { - return indexOf(o) >= 0; - } - - @Override - public Object[] toArray() { - Object[] toReturn = new Object[numNonNullElements]; - int counter = 0; - if (numNonNullElements > 0) { - for (int i = 0; i < array.length; i++) { - if (array[i] != null) { - toReturn[counter++] = array[i]; - } - } - } - return toReturn; - } - - @Override - @SuppressWarnings("unchecked") - public T[] toArray(T[] a) { - T[] toReturn = - (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), - numNonNullElements); - int counter = 0; - for (int i = 0; i < array.length; i++) { - if (array[i] != null) { - toReturn[counter++] = (T) array[i]; - } - } - return toReturn; - } - - @Override - public boolean add(Cell e) { - if (e == null) { - throw new NullPointerException(); - } - if (isDummy(e)) { - array[0] = e; - firstNonNullElementIdx = 0; - numNonNullElements = 1; - return true; - } - int columnQualifier = - encodingScheme.decode(e.getQualifierArray(), e.getQualifierOffset(), - e.getQualifierLength()); - - checkQualifierRange(columnQualifier); - int idx = getArrayIndex(columnQualifier); - if (array[idx] == null) { - numNonNullElements++; - } - array[idx] = e; - if (firstNonNullElementIdx == -1) { - firstNonNullElementIdx = idx; - } else if (idx < firstNonNullElementIdx) { - firstNonNullElementIdx = idx; + private int minQualifier; + private int maxQualifier; + private int nonReservedRangeOffset; + private final Cell[] array; + private int numNonNullElements; + private int firstNonNullElementIdx = -1; + private static final int RESERVED_RANGE_SIZE = + ENCODED_CQ_COUNTER_INITIAL_VALUE - ENCODED_EMPTY_COLUMN_NAME; + // Used by iterators to figure out if the list was structurally modified. + private int modCount = 0; + private final QualifierEncodingScheme encodingScheme; + + public EncodedColumnQualiferCellsList(int minQ, int maxQ, + QualifierEncodingScheme encodingScheme) { + checkArgument(minQ <= maxQ, "Invalid arguments. Min: " + minQ + ". Max: " + maxQ); + this.minQualifier = minQ; + this.maxQualifier = maxQ; + int size = 0; + if (maxQ < ENCODED_CQ_COUNTER_INITIAL_VALUE) { + size = RESERVED_RANGE_SIZE; + } else if (minQ < ENCODED_CQ_COUNTER_INITIAL_VALUE) { + size = (maxQ - minQ + 1); + } else { + size = RESERVED_RANGE_SIZE + (maxQ - minQ + 1); + } + this.array = new Cell[size]; + this.nonReservedRangeOffset = + minQ > ENCODED_CQ_COUNTER_INITIAL_VALUE ? minQ - ENCODED_CQ_COUNTER_INITIAL_VALUE : 0; + this.encodingScheme = encodingScheme; + } + + @Override + public int size() { + return numNonNullElements; + } + + @Override + public boolean isEmpty() { + return numNonNullElements == 0; + } + + @Override + public boolean contains(Object o) { + return indexOf(o) >= 0; + } + + @Override + public Object[] toArray() { + Object[] toReturn = new Object[numNonNullElements]; + int counter = 0; + if (numNonNullElements > 0) { + for (int i = 0; i < array.length; i++) { + if (array[i] != null) { + toReturn[counter++] = array[i]; + } + } + } + return toReturn; + } + + @Override + @SuppressWarnings("unchecked") + public T[] toArray(T[] a) { + T[] toReturn = (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), + numNonNullElements); + int counter = 0; + for (int i = 0; i < array.length; i++) { + if (array[i] != null) { + toReturn[counter++] = (T) array[i]; + } + } + return toReturn; + } + + @Override + public boolean add(Cell e) { + if (e == null) { + throw new NullPointerException(); + } + if (isDummy(e)) { + array[0] = e; + firstNonNullElementIdx = 0; + numNonNullElements = 1; + return true; + } + int columnQualifier = + encodingScheme.decode(e.getQualifierArray(), e.getQualifierOffset(), e.getQualifierLength()); + + checkQualifierRange(columnQualifier); + int idx = getArrayIndex(columnQualifier); + if (array[idx] == null) { + numNonNullElements++; + } + array[idx] = e; + if (firstNonNullElementIdx == -1) { + firstNonNullElementIdx = idx; + } else if (idx < firstNonNullElementIdx) { + firstNonNullElementIdx = idx; + } + modCount++; + /* + * Note that we don't care about equality of the element being added with the element already + * present at the index. + */ + return true; + } + + @Override + public boolean remove(Object o) { + if (o == null) { + return false; + } + Cell e = (Cell) o; + int i = 0; + while (i < array.length) { + if (array[i] != null && array[i].equals(e)) { + array[i] = null; + numNonNullElements--; + if (numNonNullElements == 0) { + firstNonNullElementIdx = -1; + } else if (firstNonNullElementIdx == i) { + // the element being removed was the first non-null element we knew + adjustFirstNonNullElement(); } modCount++; - /* - * Note that we don't care about equality of the element being added with the element - * already present at the index. - */ return true; + } + i++; + } + return false; + } + + @Override + public boolean containsAll(Collection c) { + boolean containsAll = true; + Iterator itr = c.iterator(); + while (itr.hasNext()) { + containsAll &= (indexOf(itr.next()) >= 0); + } + return containsAll; + } + + @Override + public boolean addAll(Collection c) { + boolean changed = false; + for (Cell cell : c) { + if (c == null) { + throw new NullPointerException(); + } + changed |= add(cell); + } + return changed; + } + + @Override + public boolean addAll(int index, Collection c) { + throwGenericUnsupportedOperationException(); + return false; + } + + @Override + public boolean removeAll(Collection c) { + Iterator itr = c.iterator(); + boolean changed = false; + while (itr.hasNext()) { + changed |= remove(itr.next()); + } + return changed; + } + + @Override + public boolean retainAll(Collection collection) { + boolean changed = false; + // Optimize if the passed collection is an instance of EncodedColumnQualiferCellsList + if (collection instanceof EncodedColumnQualiferCellsList) { + EncodedColumnQualiferCellsList list = (EncodedColumnQualiferCellsList) collection; + ListIterator listItr = this.listIterator(); + while (listItr.hasNext()) { + Cell cellInThis = listItr.next(); + int qualifier = encodingScheme.decode(cellInThis.getQualifierArray(), + cellInThis.getQualifierOffset(), cellInThis.getQualifierLength()); + try { + Cell cellInParam = list.getCellForColumnQualifier(qualifier); + if (cellInParam != null && cellInParam.equals(cellInThis)) { + continue; + } + listItr.remove(); + changed = true; + } catch (IndexOutOfBoundsException expected) { + // this could happen when the qualifier of cellInParam lies out of + // the range of this list. + listItr.remove(); + changed = true; + } + } + } else { + throw new UnsupportedOperationException( + "Operation only supported for collections of type EncodedColumnQualiferCellsList"); + } + return changed; + } + + @Override + public void clear() { + for (int i = 0; i < array.length; i++) { + array[i] = null; + } + firstNonNullElementIdx = -1; + numNonNullElements = 0; + modCount++; + } + + @Override + public Cell get(int index) { + rangeCheck(index); + int numNonNullElementsFound = 0; + for (int i = firstNonNullElementIdx; i < array.length; i++) { + if (array[i] != null) { + numNonNullElementsFound++; + if (numNonNullElementsFound == index + 1) { + return array[i]; + } + } + } + throw new IllegalStateException("There was no element present in the list at index " + index + + " even though number of elements in the list are " + size()); + } + + @Override + public Cell set(int index, Cell e) { + throwGenericUnsupportedOperationException(); + return null; + } + + @Override + public void add(int index, Cell element) { + throwGenericUnsupportedOperationException(); + } + + @Override + public Cell remove(int index) { + throwGenericUnsupportedOperationException(); + return null; + } + + @Override + public int indexOf(Object o) { + if (o == null || isEmpty()) { + return -1; + } else { + int numNonNull = -1; + for (int i = 0; i < array.length; i++) { + if (array[i] != null) { + numNonNull++; + } + if (o.equals(array[i])) { + return numNonNull; + } + } + } + return -1; + } + + @Override + public int lastIndexOf(Object o) { + if (o == null || isEmpty()) { + return -1; + } + int lastIndex = numNonNullElements; + for (int i = array.length - 1; i >= 0; i--) { + if (array[i] != null) { + lastIndex--; + } + if (o.equals(array[i])) { + return lastIndex; + } + } + return -1; + } + + @Override + public ListIterator listIterator() { + return new ListItr(); + } + + @Override + public ListIterator listIterator(int index) { + throwGenericUnsupportedOperationException(); + return null; + } + + @Override + public List subList(int fromIndex, int toIndex) { + throwGenericUnsupportedOperationException(); + return null; + } + + @Override + public Iterator iterator() { + return new Itr(); + } + + /** + * @param qualifierBytes bytes of the column qualifier which serves as the index + * @return {@link Cell} at the index + */ + public Cell getCellForColumnQualifier(byte[] qualifierBytes) { + int columnQualifier = encodingScheme.decode(qualifierBytes); + return getCellForColumnQualifier(columnQualifier); + } + + /** + * @param qualifierBytes bytes of the column qualifier which serves as the index + * @param offset offset in the byte array + * @param length length starting from offset + * @return {@link Cell} at the index + */ + public Cell getCellForColumnQualifier(byte[] qualifierBytes, int offset, int length) { + int columnQualifier = encodingScheme.decode(qualifierBytes, offset, length); + return getCellForColumnQualifier(columnQualifier); + } + + private void adjustFirstNonNullElement() { + int i = firstNonNullElementIdx; + while (i < array.length && (array[i]) == null) { + i++; + } + if (i < array.length) { + firstNonNullElementIdx = i; + } else { + firstNonNullElementIdx = -1; + } + + } + + private Cell getCellForColumnQualifier(int columnQualifier) { + checkQualifierRange(columnQualifier); + int idx = getArrayIndex(columnQualifier); + Cell c = array[idx]; + return c; + } + + public Cell getFirstCell() { + if (firstNonNullElementIdx == -1) { + throw new NoSuchElementException("No elements present in the list"); + } + return array[firstNonNullElementIdx]; + } + + private void checkQualifierRange(int qualifier) { + if (qualifier < ENCODED_CQ_COUNTER_INITIAL_VALUE) { + return; // space in the array for reserved range is always allocated. + } + if (qualifier < minQualifier || qualifier > maxQualifier) { + throw new IndexOutOfBoundsException("Qualifier " + qualifier + + " is out of the valid range - (" + minQualifier + ", " + maxQualifier + ")"); + } + } + + private void rangeCheck(int index) { + if (index < 0 || index >= size()) { + throw new IndexOutOfBoundsException(); + } + } + + private int getArrayIndex(int columnQualifier) { + checkArgument(columnQualifier >= ENCODED_EMPTY_COLUMN_NAME); + if (columnQualifier < ENCODED_CQ_COUNTER_INITIAL_VALUE) { + return columnQualifier; + } + return columnQualifier - nonReservedRangeOffset; + } + + private void throwGenericUnsupportedOperationException() { + throw new UnsupportedOperationException( + "Operation cannot be supported because it potentially violates the invariance contract of this list implementation"); + } + + private class Itr implements Iterator { + protected int nextIndex = 0; + protected int lastRet = -1; + protected int expectedModCount = modCount; + + private Itr() { + moveForward(true); } @Override - public boolean remove(Object o) { - if (o == null) { - return false; - } - Cell e = (Cell) o; - int i = 0; - while (i < array.length) { - if (array[i] != null && array[i].equals(e)) { - array[i] = null; - numNonNullElements--; - if (numNonNullElements == 0) { - firstNonNullElementIdx = -1; - } else if (firstNonNullElementIdx == i) { - // the element being removed was the first non-null element we knew - adjustFirstNonNullElement(); - } - modCount++; - return true; - } - i++; - } - return false; + public boolean hasNext() { + return nextIndex != -1; } @Override - public boolean containsAll(Collection c) { - boolean containsAll = true; - Iterator itr = c.iterator(); - while (itr.hasNext()) { - containsAll &= (indexOf(itr.next()) >= 0); - } - return containsAll; + public Cell next() { + checkForCoModification(); + if (!hasNext()) { + throw new NoSuchElementException(); + } + Cell next = array[nextIndex]; + lastRet = nextIndex; + moveForward(false); + modCount++; + expectedModCount = modCount; + return next; } @Override - public boolean addAll(Collection c) { - boolean changed = false; - for (Cell cell : c) { - if (c == null) { - throw new NullPointerException(); - } - changed |= add(cell); - } - return changed; + public void remove() { + if (lastRet < 0) { + throw new IllegalStateException(); + } + checkForCoModification(); + array[lastRet] = null; + if (firstNonNullElementIdx == lastRet) { + // the element being removed was the first non-null element we knew + adjustFirstNonNullElement(); + } + lastRet = -1; + numNonNullElements--; + modCount++; + expectedModCount = modCount; } - @Override - public boolean addAll(int index, Collection c) { - throwGenericUnsupportedOperationException(); - return false; + protected void moveForward(boolean init) { + int i = init ? 0 : nextIndex + 1; + while (i < array.length && (array[i]) == null) { + i++; + } + if (i < array.length) { + nextIndex = i; + } else { + nextIndex = -1; + } } - @Override - public boolean removeAll(Collection c) { - Iterator itr = c.iterator(); - boolean changed = false; - while (itr.hasNext()) { - changed |= remove(itr.next()); - } - return changed; + protected void checkForCoModification() { + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } } - @Override - public boolean retainAll(Collection collection) { - boolean changed = false; - // Optimize if the passed collection is an instance of EncodedColumnQualiferCellsList - if (collection instanceof EncodedColumnQualiferCellsList) { - EncodedColumnQualiferCellsList list = (EncodedColumnQualiferCellsList) collection; - ListIterator listItr = this.listIterator(); - while (listItr.hasNext()) { - Cell cellInThis = listItr.next(); - int qualifier = - encodingScheme.decode(cellInThis.getQualifierArray(), - cellInThis.getQualifierOffset(), cellInThis.getQualifierLength()); - try { - Cell cellInParam = list.getCellForColumnQualifier(qualifier); - if (cellInParam != null && cellInParam.equals(cellInThis)) { - continue; - } - listItr.remove(); - changed = true; - } catch (IndexOutOfBoundsException expected) { - // this could happen when the qualifier of cellInParam lies out of - // the range of this list. - listItr.remove(); - changed = true; - } - } - } else { - throw new UnsupportedOperationException( - "Operation only supported for collections of type EncodedColumnQualiferCellsList"); - } - return changed; - } + } - @Override - public void clear() { - for (int i = 0; i < array.length; i++) { - array[i] = null; - } - firstNonNullElementIdx = -1; - numNonNullElements = 0; - modCount++; - } + private class ListItr extends Itr implements ListIterator { + private int previousIndex = -1; - @Override - public Cell get(int index) { - rangeCheck(index); - int numNonNullElementsFound = 0; - for (int i = firstNonNullElementIdx; i < array.length; i++) { - if (array[i] != null) { - numNonNullElementsFound++; - if (numNonNullElementsFound == index + 1) { - return array[i]; - } - } - } - throw new IllegalStateException("There was no element present in the list at index " + index - + " even though number of elements in the list are " + size()); + private ListItr() { + moveForward(true); } @Override - public Cell set(int index, Cell e) { - throwGenericUnsupportedOperationException(); - return null; + public boolean hasNext() { + return nextIndex != -1; } @Override - public void add(int index, Cell element) { - throwGenericUnsupportedOperationException(); + public boolean hasPrevious() { + return previousIndex != -1; } @Override - public Cell remove(int index) { - throwGenericUnsupportedOperationException(); - return null; + public Cell previous() { + if (previousIndex == -1) { + throw new NoSuchElementException(); + } + checkForCoModification(); + lastRet = previousIndex; + movePointersBackward(); + return array[lastRet]; } @Override - public int indexOf(Object o) { - if (o == null || isEmpty()) { - return -1; - } else { - int numNonNull = -1; - for (int i = 0; i < array.length; i++) { - if (array[i] != null) { - numNonNull++; - } - if (o.equals(array[i])) { - return numNonNull; - } - } - } - return -1; + public int nextIndex() { + return nextIndex; } @Override - public int lastIndexOf(Object o) { - if (o == null || isEmpty()) { - return -1; - } - int lastIndex = numNonNullElements; - for (int i = array.length - 1; i >= 0; i--) { - if (array[i] != null) { - lastIndex--; - } - if (o.equals(array[i])) { - return lastIndex; - } - } - return -1; + public int previousIndex() { + return previousIndex; } @Override - public ListIterator listIterator() { - return new ListItr(); + public void remove() { + if (lastRet == nextIndex) { + moveNextPointer(nextIndex); + } + super.remove(); + expectedModCount = modCount; } @Override - public ListIterator listIterator(int index) { - throwGenericUnsupportedOperationException(); - return null; + public void set(Cell e) { + if (lastRet == -1) { + throw new IllegalStateException(); + } + int columnQualifier = encodingScheme.decode(e.getQualifierArray(), e.getQualifierOffset(), + e.getQualifierLength()); + int idx = getArrayIndex(columnQualifier); + if (idx != lastRet) { + throw new IllegalArgumentException("Cell " + e + " with column qualifier " + columnQualifier + + " belongs at index " + idx + ". It cannot be added at the position " + lastRet + + " to which the previous next() or previous() was pointing to."); + } + EncodedColumnQualiferCellsList.this.add(e); + expectedModCount = modCount; } @Override - public List subList(int fromIndex, int toIndex) { - throwGenericUnsupportedOperationException(); - return null; + public void add(Cell e) { + throwGenericUnsupportedOperationException(); } @Override - public Iterator iterator() { - return new Itr(); - } - - /** - * @param qualifierBytes bytes of the column qualifier which serves as the index - * @return {@link Cell} at the index - */ - public Cell getCellForColumnQualifier(byte[] qualifierBytes) { - int columnQualifier = encodingScheme.decode(qualifierBytes); - return getCellForColumnQualifier(columnQualifier); - } - - /** - * @param qualifierBytes bytes of the column qualifier which serves as the index - * @param offset offset in the byte array - * @param length length starting from offset - * @return {@link Cell} at the index - */ - public Cell getCellForColumnQualifier(byte[] qualifierBytes, int offset, int length) { - int columnQualifier = encodingScheme.decode(qualifierBytes, offset, length); - return getCellForColumnQualifier(columnQualifier); - } - - private void adjustFirstNonNullElement() { - int i = firstNonNullElementIdx; - while (i < array.length && (array[i]) == null) { - i++; - } - if (i < array.length) { - firstNonNullElementIdx = i; - } else { - firstNonNullElementIdx = -1; - } - - } - private Cell getCellForColumnQualifier(int columnQualifier) { - checkQualifierRange(columnQualifier); - int idx = getArrayIndex(columnQualifier); - Cell c = array[idx]; - return c; - } - - public Cell getFirstCell() { - if (firstNonNullElementIdx == -1) { - throw new NoSuchElementException("No elements present in the list"); - } - return array[firstNonNullElementIdx]; - } - - private void checkQualifierRange(int qualifier) { - if (qualifier < ENCODED_CQ_COUNTER_INITIAL_VALUE) { - return; // space in the array for reserved range is always allocated. - } - if (qualifier < minQualifier || qualifier > maxQualifier) { - throw new IndexOutOfBoundsException("Qualifier " + qualifier - + " is out of the valid range - (" + minQualifier + ", " + maxQualifier + ")"); - } - } - - private void rangeCheck(int index) { - if (index < 0 || index >= size()) { - throw new IndexOutOfBoundsException(); - } - } - - private int getArrayIndex(int columnQualifier) { - checkArgument(columnQualifier >= ENCODED_EMPTY_COLUMN_NAME); - if (columnQualifier < ENCODED_CQ_COUNTER_INITIAL_VALUE) { - return columnQualifier; - } - return columnQualifier - nonReservedRangeOffset; - } - - private void throwGenericUnsupportedOperationException() { - throw new UnsupportedOperationException( - "Operation cannot be supported because it potentially violates the invariance contract of this list implementation"); - } - - private class Itr implements Iterator { - protected int nextIndex = 0; - protected int lastRet = -1; - protected int expectedModCount = modCount; - - private Itr() { - moveForward(true); - } - - @Override - public boolean hasNext() { - return nextIndex != -1; - } - - @Override - public Cell next() { - checkForCoModification(); - if (!hasNext()) { - throw new NoSuchElementException(); - } - Cell next = array[nextIndex]; - lastRet = nextIndex; - moveForward(false); - modCount++; - expectedModCount = modCount; - return next; - } - - @Override - public void remove() { - if (lastRet < 0) { - throw new IllegalStateException(); - } - checkForCoModification(); - array[lastRet] = null; - if (firstNonNullElementIdx == lastRet) { - // the element being removed was the first non-null element we knew - adjustFirstNonNullElement(); - } - lastRet = -1; - numNonNullElements--; - modCount++; - expectedModCount = modCount; - } - - protected void moveForward(boolean init) { - int i = init ? 0 : nextIndex + 1; - while (i < array.length && (array[i]) == null) { - i++; - } - if (i < array.length) { - nextIndex = i; - } else { - nextIndex = -1; - } - } - - protected void checkForCoModification() { - if (modCount != expectedModCount) { - throw new ConcurrentModificationException(); - } - } - - } - - private class ListItr extends Itr implements ListIterator { - private int previousIndex = -1; - - private ListItr() { - moveForward(true); - } - - @Override - public boolean hasNext() { - return nextIndex != -1; - } - - @Override - public boolean hasPrevious() { - return previousIndex != -1; - } - - @Override - public Cell previous() { - if (previousIndex == -1) { - throw new NoSuchElementException(); - } - checkForCoModification(); - lastRet = previousIndex; - movePointersBackward(); - return array[lastRet]; - } - - @Override - public int nextIndex() { - return nextIndex; - } - - @Override - public int previousIndex() { - return previousIndex; - } - - @Override - public void remove() { - if (lastRet == nextIndex) { - moveNextPointer(nextIndex); - } - super.remove(); - expectedModCount = modCount; - } - - @Override - public void set(Cell e) { - if (lastRet == -1) { - throw new IllegalStateException(); - } - int columnQualifier = - encodingScheme.decode(e.getQualifierArray(), e.getQualifierOffset(), - e.getQualifierLength()); - int idx = getArrayIndex(columnQualifier); - if (idx != lastRet) { - throw new IllegalArgumentException("Cell " + e + " with column qualifier " - + columnQualifier + " belongs at index " + idx - + ". It cannot be added at the position " + lastRet - + " to which the previous next() or previous() was pointing to."); - } - EncodedColumnQualiferCellsList.this.add(e); - expectedModCount = modCount; - } - - @Override - public void add(Cell e) { - throwGenericUnsupportedOperationException(); - } - - @Override - protected void moveForward(boolean init) { - if (!init) { - previousIndex = nextIndex; - } - int i = init ? 0 : nextIndex + 1; - moveNextPointer(i); - } - - private void moveNextPointer(int i) { - while (i < array.length && (array[i]) == null) { - i++; - } - if (i < array.length) { - nextIndex = i; - } else { - nextIndex = -1; - } - } - - private void movePointersBackward() { - nextIndex = previousIndex; - int i = previousIndex - 1; - movePreviousPointer(i); - } - - private void movePreviousPointer(int i) { - for (; i >= 0; i--) { - if (array[i] != null) { - previousIndex = i; - break; - } - } - if (i < 0) { - previousIndex = -1; - } - } - } + protected void moveForward(boolean init) { + if (!init) { + previousIndex = nextIndex; + } + int i = init ? 0 : nextIndex + 1; + moveNextPointer(i); + } + + private void moveNextPointer(int i) { + while (i < array.length && (array[i]) == null) { + i++; + } + if (i < array.length) { + nextIndex = i; + } else { + nextIndex = -1; + } + } + + private void movePointersBackward() { + nextIndex = previousIndex; + int i = previousIndex - 1; + movePreviousPointer(i); + } + + private void movePreviousPointer(int i) { + for (; i >= 0; i--) { + if (array[i] != null) { + previousIndex = i; + break; + } + } + if (i < 0) { + previousIndex = -1; + } + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java index bfa63ba142b..a5e2e98a28c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,59 +31,58 @@ * a particular cell will fail. */ public class MultiKeyValueTuple extends BaseTuple { - private List values; - - public MultiKeyValueTuple(List values) { - setKeyValues(values); - } - - public MultiKeyValueTuple() { - } + private List values; - /** Caller must not modify the list that is passed here */ - @Override - public void setKeyValues(List values) { - this.values = values; - } - - @Override - public void getKey(ImmutableBytesWritable ptr) { - Cell value = values.get(0); - ptr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); - } + public MultiKeyValueTuple(List values) { + setKeyValues(values); + } - @Override - public boolean isImmutable() { - return true; - } + public MultiKeyValueTuple() { + } - @Override - public Cell getValue(byte[] family, byte[] qualifier) { - return PhoenixKeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, values, family, qualifier); - } + /** Caller must not modify the list that is passed here */ + @Override + public void setKeyValues(List values) { + this.values = values; + } - @Override - public String toString() { - return values.toString(); - } + @Override + public void getKey(ImmutableBytesWritable ptr) { + Cell value = values.get(0); + ptr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); + } - @Override - public int size() { - return values.size(); - } + @Override + public boolean isImmutable() { + return true; + } - @Override - public Cell getValue(int index) { - return values.get(index); - } + @Override + public Cell getValue(byte[] family, byte[] qualifier) { + return PhoenixKeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, values, family, + qualifier); + } - @Override - public boolean getValue(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - Cell kv = getValue(family, qualifier); - if (kv == null) - return false; - ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - return true; - } + @Override + public String toString() { + return values.toString(); + } + + @Override + public int size() { + return values.size(); + } + + @Override + public Cell getValue(int index) { + return values.get(index); + } + + @Override + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + Cell kv = getValue(family, qualifier); + if (kv == null) return false; + ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java index 7b00ef7b1e4..3954ab324d1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,61 +30,61 @@ * a MultiKeyValueTuple where we have to do a binary search in the List. */ public class PositionBasedMultiKeyValueTuple extends BaseTuple { - private EncodedColumnQualiferCellsList values; + private EncodedColumnQualiferCellsList values; - public PositionBasedMultiKeyValueTuple() { - } + public PositionBasedMultiKeyValueTuple() { + } - public PositionBasedMultiKeyValueTuple(List values) { - checkArgument(values instanceof EncodedColumnQualiferCellsList, - "PositionBasedMultiKeyValueTuple only works with lists of type EncodedColumnQualiferCellsList"); - this.values = (EncodedColumnQualiferCellsList) values; - } + public PositionBasedMultiKeyValueTuple(List values) { + checkArgument(values instanceof EncodedColumnQualiferCellsList, + "PositionBasedMultiKeyValueTuple only works with lists of type EncodedColumnQualiferCellsList"); + this.values = (EncodedColumnQualiferCellsList) values; + } - /** Caller must not modify the list that is passed here */ - @Override - public void setKeyValues(List values) { - checkArgument(values instanceof EncodedColumnQualiferCellsList, - "PositionBasedMultiKeyValueTuple only works with lists of type EncodedColumnQualiferCellsList"); - this.values = (EncodedColumnQualiferCellsList) values; - } + /** Caller must not modify the list that is passed here */ + @Override + public void setKeyValues(List values) { + checkArgument(values instanceof EncodedColumnQualiferCellsList, + "PositionBasedMultiKeyValueTuple only works with lists of type EncodedColumnQualiferCellsList"); + this.values = (EncodedColumnQualiferCellsList) values; + } - @Override - public void getKey(ImmutableBytesWritable ptr) { - Cell value = values.getFirstCell(); - ptr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); - } + @Override + public void getKey(ImmutableBytesWritable ptr) { + Cell value = values.getFirstCell(); + ptr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); + } - @Override - public boolean isImmutable() { - return true; - } + @Override + public boolean isImmutable() { + return true; + } - @Override - public Cell getValue(byte[] family, byte[] qualifier) { - return values.getCellForColumnQualifier(qualifier); - } + @Override + public Cell getValue(byte[] family, byte[] qualifier) { + return values.getCellForColumnQualifier(qualifier); + } - @Override - public String toString() { - return values.toString(); - } + @Override + public String toString() { + return values.toString(); + } - @Override - public int size() { - return values.size(); - } + @Override + public int size() { + return values.size(); + } - @Override - public Cell getValue(int index) { - return values.get(index); - } + @Override + public Cell getValue(int index) { + return values.get(index); + } - @Override - public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { - Cell kv = getValue(family, qualifier); - if (kv == null) return false; - ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - return true; - } + @Override + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + Cell kv = getValue(family, qualifier); + if (kv == null) return false; + ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java index 34b2d5a7cc1..b0af2dbac5c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,103 +23,100 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.util.EncodedColumnsUtil; public class PositionBasedResultTuple extends BaseTuple { - private final EncodedColumnQualiferCellsList cells; - - public PositionBasedResultTuple(List list) { - checkArgument(list instanceof EncodedColumnQualiferCellsList, "Invalid list type"); - this.cells = (EncodedColumnQualiferCellsList)list; + private final EncodedColumnQualiferCellsList cells; + + public PositionBasedResultTuple(List list) { + checkArgument(list instanceof EncodedColumnQualiferCellsList, "Invalid list type"); + this.cells = (EncodedColumnQualiferCellsList) list; + } + + @Override + public void getKey(ImmutableBytesWritable ptr) { + Cell value = cells.getFirstCell(); + ptr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); + } + + @Override + public boolean isImmutable() { + return true; + } + + @Override + public Cell getValue(byte[] family, byte[] qualifier) { + return cells.getCellForColumnQualifier(qualifier); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("keyvalues="); + if (this.cells == null || this.cells.isEmpty()) { + sb.append("NONE"); + return sb.toString(); } - - @Override - public void getKey(ImmutableBytesWritable ptr) { - Cell value = cells.getFirstCell(); - ptr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); + sb.append("{"); + boolean moreThanOne = false; + for (Cell kv : this.cells) { + if (moreThanOne) { + sb.append(", \n"); + } else { + moreThanOne = true; + } + sb.append(kv.toString() + "/value=" + + Bytes.toString(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); } + sb.append("}\n"); + return sb.toString(); + } - @Override - public boolean isImmutable() { - return true; - } + @Override + public int size() { + return cells.size(); + } - @Override - public Cell getValue(byte[] family, byte[] qualifier) { - return cells.getCellForColumnQualifier(qualifier); - } + @Override + public Cell getValue(int index) { + return index == 0 ? cells.getFirstCell() : cells.get(index); + } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("keyvalues="); - if(this.cells == null || this.cells.isEmpty()) { - sb.append("NONE"); - return sb.toString(); - } - sb.append("{"); - boolean moreThanOne = false; - for(Cell kv : this.cells) { - if(moreThanOne) { - sb.append(", \n"); - } else { - moreThanOne = true; - } - sb.append(kv.toString()+"/value="+Bytes.toString(kv.getValueArray(), - kv.getValueOffset(), kv.getValueLength())); - } - sb.append("}\n"); - return sb.toString(); + @Override + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + Cell kv = getValue(family, qualifier); + if (kv == null) return false; + ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return true; + } + + public Iterator getTupleIterator() { + return new TupleIterator(cells.iterator()); + } + + private static class TupleIterator implements Iterator { + + private final Iterator delegate; + + private TupleIterator(Iterator delegate) { + this.delegate = delegate; } @Override - public int size() { - return cells.size(); + public boolean hasNext() { + return delegate.hasNext(); } @Override - public Cell getValue(int index) { - return index == 0 ? cells.getFirstCell() : cells.get(index); + public Cell next() { + return delegate.next(); } @Override - public boolean getValue(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - Cell kv = getValue(family, qualifier); - if (kv == null) - return false; - ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - return true; - } - - public Iterator getTupleIterator() { - return new TupleIterator(cells.iterator()); - } - - private static class TupleIterator implements Iterator { - - private final Iterator delegate; - private TupleIterator(Iterator delegate) { - this.delegate = delegate; - } - - @Override - public boolean hasNext() { - return delegate.hasNext(); - } - - @Override - public Cell next() { - return delegate.next(); - } - - @Override - public void remove() { - delegate.remove(); - } - + public void remove() { + delegate.remove(); } + + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java index 825728f4c34..e2e4af961ae 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,77 +28,75 @@ import org.apache.phoenix.util.PhoenixKeyValueUtil; /** - * * Wrapper around {@link Result} that implements Phoenix's {@link Tuple} interface. - * */ public class ResultTuple extends BaseTuple { - private final Result result; - public static final ResultTuple EMPTY_TUPLE = new ResultTuple(Result.create(Collections.emptyList())); - public ResultTuple(Result result) { - this.result = result; - } - - public Result getResult() { - return this.result; - } + private final Result result; + public static final ResultTuple EMPTY_TUPLE = + new ResultTuple(Result.create(Collections. emptyList())); - @Override - public void getKey(ImmutableBytesWritable ptr) { - ptr.set(result.getRow()); - } + public ResultTuple(Result result) { + this.result = result; + } - @Override - public boolean isImmutable() { - return true; - } + public Result getResult() { + return this.result; + } - @Override - public Cell getValue(byte[] family, byte[] qualifier) { - return PhoenixKeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, - result.rawCells(), family, qualifier); - } + @Override + public void getKey(ImmutableBytesWritable ptr) { + ptr.set(result.getRow()); + } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("keyvalues="); - if(this.result == null || this.result.isEmpty()) { - sb.append("NONE"); - return sb.toString(); - } - sb.append("{"); - boolean moreThanOne = false; - for(Cell kv : this.result.listCells()) { - if(moreThanOne) { - sb.append(", \n"); - } else { - moreThanOne = true; - } - sb.append(kv.toString()+"/value="+Bytes.toString(kv.getValueArray(), - kv.getValueOffset(), kv.getValueLength())); - } - sb.append("}\n"); + @Override + public boolean isImmutable() { + return true; + } + + @Override + public Cell getValue(byte[] family, byte[] qualifier) { + return PhoenixKeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE, result.rawCells(), + family, qualifier); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("keyvalues="); + if (this.result == null || this.result.isEmpty()) { + sb.append("NONE"); return sb.toString(); } - - @Override - public int size() { - return result.size(); + sb.append("{"); + boolean moreThanOne = false; + for (Cell kv : this.result.listCells()) { + if (moreThanOne) { + sb.append(", \n"); + } else { + moreThanOne = true; + } + sb.append(kv.toString() + "/value=" + + Bytes.toString(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); } + sb.append("}\n"); + return sb.toString(); + } - @Override - public KeyValue getValue(int index) { - return PhoenixKeyValueUtil.maybeCopyCell(result.rawCells()[index]); - } + @Override + public int size() { + return result.size(); + } - @Override - public boolean getValue(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - Cell kv = getValue(family, qualifier); - if (kv == null) - return false; - ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - return true; - } -} \ No newline at end of file + @Override + public KeyValue getValue(int index) { + return PhoenixKeyValueUtil.maybeCopyCell(result.rawCells()[index]); + } + + @Override + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + Cell kv = getValue(family, qualifier); + if (kv == null) return false; + ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return true; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/SingleKeyValueTuple.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/SingleKeyValueTuple.java index c2895b8488b..c4552029b70 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/SingleKeyValueTuple.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/SingleKeyValueTuple.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,101 +21,99 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; - public class SingleKeyValueTuple extends BaseTuple { - private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; - private Cell cell; - private ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); - - public SingleKeyValueTuple() { - } - - public SingleKeyValueTuple(Cell cell) { - if (cell == null) { - throw new NullPointerException(); - } - setCell(cell); - } - - public boolean hasKey() { - return rowKeyPtr.get() != UNITIALIZED_KEY_BUFFER; - } - - public void reset() { - this.cell = null; - rowKeyPtr.set(UNITIALIZED_KEY_BUFFER); - } - - public void setCell(Cell cell) { - if (cell == null) { - throw new IllegalArgumentException(); - } - this.cell = cell; - setKey(cell); - } - - public void setKey(ImmutableBytesWritable ptr) { - rowKeyPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); - } - - public void setKey(Cell cell) { - if (cell == null) { - throw new IllegalArgumentException(); - } - rowKeyPtr.set(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); - } - - @Override - public void getKey(ImmutableBytesWritable ptr) { - ptr.set(rowKeyPtr.get(), rowKeyPtr.getOffset(), rowKeyPtr.getLength()); - } - - @Override - public Cell getValue(byte[] cf, byte[] cq) { - return cell; - } + private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0]; + private Cell cell; + private ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER); - @Override - public boolean isImmutable() { - return true; + public SingleKeyValueTuple() { + } + + public SingleKeyValueTuple(Cell cell) { + if (cell == null) { + throw new NullPointerException(); } + setCell(cell); + } + + public boolean hasKey() { + return rowKeyPtr.get() != UNITIALIZED_KEY_BUFFER; + } + + public void reset() { + this.cell = null; + rowKeyPtr.set(UNITIALIZED_KEY_BUFFER); + } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("SingleKeyValueTuple["); - if (cell == null) { - if (rowKeyPtr.get() == UNITIALIZED_KEY_BUFFER) { - sb.append("null"); - } else { - sb.append(Bytes.toStringBinary(rowKeyPtr.get(),rowKeyPtr.getOffset(),rowKeyPtr.getLength())); - } - } else { - sb.append(cell.toString()); - } - sb.append("]"); - return sb.toString(); + public void setCell(Cell cell) { + if (cell == null) { + throw new IllegalArgumentException(); } + this.cell = cell; + setKey(cell); + } - @Override - public int size() { - return cell == null ? 0 : 1; + public void setKey(ImmutableBytesWritable ptr) { + rowKeyPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); + } + + public void setKey(Cell cell) { + if (cell == null) { + throw new IllegalArgumentException(); } + rowKeyPtr.set(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + } + + @Override + public void getKey(ImmutableBytesWritable ptr) { + ptr.set(rowKeyPtr.get(), rowKeyPtr.getOffset(), rowKeyPtr.getLength()); + } - @Override - public Cell getValue(int index) { - if (index != 0 || cell == null) { - throw new IndexOutOfBoundsException(Integer.toString(index)); - } - return cell; + @Override + public Cell getValue(byte[] cf, byte[] cq) { + return cell; + } + + @Override + public boolean isImmutable() { + return true; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("SingleKeyValueTuple["); + if (cell == null) { + if (rowKeyPtr.get() == UNITIALIZED_KEY_BUFFER) { + sb.append("null"); + } else { + sb.append( + Bytes.toStringBinary(rowKeyPtr.get(), rowKeyPtr.getOffset(), rowKeyPtr.getLength())); + } + } else { + sb.append(cell.toString()); } + sb.append("]"); + return sb.toString(); + } - @Override - public boolean getValue(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - if (cell == null) - return false; - ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - return true; + @Override + public int size() { + return cell == null ? 0 : 1; + } + + @Override + public Cell getValue(int index) { + if (index != 0 || cell == null) { + throw new IndexOutOfBoundsException(Integer.toString(index)); } + return cell; + } + + @Override + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + if (cell == null) return false; + ptr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java index d42cd2d3873..a1ec76bf1ca 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/Tuple.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,82 +23,70 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; /** - * - * Interface representing an ordered list of KeyValues returned as the - * result of a query. Each tuple represents a row (i.e. all its KeyValues - * will have the same key), and each KeyValue represents a column value. - * - * + * Interface representing an ordered list of KeyValues returned as the result of a query. Each tuple + * represents a row (i.e. all its KeyValues will have the same key), and each KeyValue represents a + * column value. * @since 0.1 */ public interface Tuple { - /** - * @return Number of KeyValues contained by the Tuple. - */ - public int size(); - - /** - * Determines whether or not the Tuple is immutable (the typical case) - * or will potentially have additional KeyValues added to it (the case - * during filter evaluation when we see one KeyValue at a time). - * @return true if Tuple is immutable and false otherwise. - */ - public boolean isImmutable(); - - /** - * Get the row key for the Tuple - * @param ptr the bytes pointer that will be updated to point to - * the key buffer. - */ - public void getKey(ImmutableBytesWritable ptr); + /** Returns Number of KeyValues contained by the Tuple. */ + public int size(); + + /** + * Determines whether or not the Tuple is immutable (the typical case) or will potentially have + * additional KeyValues added to it (the case during filter evaluation when we see one KeyValue at + * a time). + * @return true if Tuple is immutable and false otherwise. + */ + public boolean isImmutable(); + + /** + * Get the row key for the Tuple + * @param ptr the bytes pointer that will be updated to point to the key buffer. + */ + public void getKey(ImmutableBytesWritable ptr); + + /** + * Get the KeyValue at the given index whose value is concatenated with the serialized list of + * dynamic column PColumns for that row key. + * @param index the zero-based KeyValue index between 0 and {@link #size()} exclusive + * @param dynColsList the serialized list of dynamic column PColumns + * @return the KeyValue at the given index + * @throws IndexOutOfBoundsException if an invalid index is used + */ + public Cell mergeWithDynColsListBytesAndGetValue(int index, byte[] dynColsList); + + /** + * Get the KeyValue at the given index. + * @param index the zero-based KeyValue index between 0 and {@link #size()} exclusive + * @return the KeyValue at the given index + * @throws IndexOutOfBoundsException if an invalid index is used + */ + public Cell getValue(int index); + + /** + * Get the KeyValue contained by the Tuple with the given family and qualifier name. + * @param family the column family of the KeyValue being retrieved + * @param qualifier the column qualify of the KeyValue being retrieved + * @return the KeyValue with the given family and qualifier name or null if not found. + */ + public Cell getValue(byte[] family, byte[] qualifier); + + /** + * Get the value byte array of the KeyValue contained by the Tuple with the given family and + * qualifier name. + * @param family the column family of the KeyValue being retrieved + * @param qualifier the column qualify of the KeyValue being retrieved + * @param ptr the bytes pointer that will be updated to point to the value buffer. + * @return true if the KeyValue with the given family and qualifier name exists; otherwise false. + */ + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr); - /** - * Get the KeyValue at the given index whose value is concatenated with the serialized list of - * dynamic column PColumns for that row key. - * @param index the zero-based KeyValue index between 0 and {@link #size()} exclusive - * @param dynColsList the serialized list of dynamic column PColumns - * @return the KeyValue at the given index - * @throws IndexOutOfBoundsException if an invalid index is used - */ - public Cell mergeWithDynColsListBytesAndGetValue(int index, byte[] dynColsList); + /** + * Get the sequence value given the sequence index. May only be evaluated on the client-side. + * @return the current or next sequence value + */ + public long getSequenceValue(int index); - /** - * Get the KeyValue at the given index. - * @param index the zero-based KeyValue index between 0 and {@link #size()} exclusive - * @return the KeyValue at the given index - * @throws IndexOutOfBoundsException if an invalid index is used - */ - public Cell getValue(int index); - - /** - * Get the KeyValue contained by the Tuple with the given family and - * qualifier name. - * @param family the column family of the KeyValue being retrieved - * @param qualifier the column qualify of the KeyValue being retrieved - * @return the KeyValue with the given family and qualifier name or - * null if not found. - */ - public Cell getValue(byte [] family, byte [] qualifier); - - /** - * Get the value byte array of the KeyValue contained by the Tuple with - * the given family and qualifier name. - * @param family the column family of the KeyValue being retrieved - * @param qualifier the column qualify of the KeyValue being retrieved - * @param ptr the bytes pointer that will be updated to point to the - * value buffer. - * @return true if the KeyValue with the given family and qualifier name - * exists; otherwise false. - */ - public boolean getValue(byte [] family, byte [] qualifier, ImmutableBytesWritable ptr); - - /** - * Get the sequence value given the sequence index. May only be evaluated - * on the client-side. - * @param index - * @return the current or next sequence value - */ - public long getSequenceValue(int index); - - public void setKeyValues(List values); + public void setKeyValues(List values); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/ValueGetterTuple.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/ValueGetterTuple.java index e25be80ac93..3c0592f505c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/ValueGetterTuple.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/tuple/ValueGetterTuple.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,86 +28,84 @@ import org.apache.phoenix.hbase.index.covered.update.ColumnReference; /** - * * Class used to construct a {@link Tuple} in order to evaluate an {@link Expression} */ public class ValueGetterTuple extends BaseTuple { - private final ValueGetter valueGetter; - private final long ts; - - public ValueGetterTuple(ValueGetter valueGetter, long ts) { - this.valueGetter = valueGetter; - this.ts = ts; - } - - public ValueGetterTuple() { - this.valueGetter = null; - this.ts = HConstants.LATEST_TIMESTAMP; - } - - @Override - public void getKey(ImmutableBytesWritable ptr) { - ptr.set(valueGetter.getRowKey()); - } + private final ValueGetter valueGetter; + private final long ts; - @Override - public boolean isImmutable() { - return true; - } + public ValueGetterTuple(ValueGetter valueGetter, long ts) { + this.valueGetter = valueGetter; + this.ts = ts; + } - public KeyValue getValueUnsafe(byte[] family, byte[] qualifier) { - try { - return valueGetter.getLatestKeyValue(new ColumnReference(family, qualifier), ts); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + public ValueGetterTuple() { + this.valueGetter = null; + this.ts = HConstants.LATEST_TIMESTAMP; + } - @Override - public KeyValue getValue(byte[] family, byte[] qualifier) { - KeyValue kv = getValueUnsafe(family, qualifier); - if (kv != null) { - return kv; - } - byte[] rowKey = valueGetter.getRowKey(); - byte[] valueBytes = HConstants.EMPTY_BYTE_ARRAY; - return new KeyValue(rowKey, 0, rowKey.length, family, 0, family.length, qualifier, 0, qualifier.length, ts, Type.Put, valueBytes, 0, 0); - } + @Override + public void getKey(ImmutableBytesWritable ptr) { + ptr.set(valueGetter.getRowKey()); + } - @Override - public String toString() { - throw new UnsupportedOperationException(); - } + @Override + public boolean isImmutable() { + return true; + } - @Override - public int size() { - throw new UnsupportedOperationException(); + public KeyValue getValueUnsafe(byte[] family, byte[] qualifier) { + try { + return valueGetter.getLatestKeyValue(new ColumnReference(family, qualifier), ts); + } catch (IOException e) { + throw new RuntimeException(e); } + } - @Override - public KeyValue getValue(int index) { - throw new UnsupportedOperationException(); + @Override + public KeyValue getValue(byte[] family, byte[] qualifier) { + KeyValue kv = getValueUnsafe(family, qualifier); + if (kv != null) { + return kv; } + byte[] rowKey = valueGetter.getRowKey(); + byte[] valueBytes = HConstants.EMPTY_BYTE_ARRAY; + return new KeyValue(rowKey, 0, rowKey.length, family, 0, family.length, qualifier, 0, + qualifier.length, ts, Type.Put, valueBytes, 0, 0); + } + + @Override + public String toString() { + throw new UnsupportedOperationException(); + } + + @Override + public int size() { + throw new UnsupportedOperationException(); + } + + @Override + public KeyValue getValue(int index) { + throw new UnsupportedOperationException(); + } - @Override - public boolean getValue(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - KeyValue kv = getValue(family, qualifier); - if (kv == null) { - return false; - } - ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - return true; + @Override + public boolean getValue(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + KeyValue kv = getValue(family, qualifier); + if (kv == null) { + return false; } + ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return true; + } - public boolean getValueUnsafe(byte[] family, byte[] qualifier, - ImmutableBytesWritable ptr) { - KeyValue kv = getValueUnsafe(family, qualifier); - if (kv == null) { - return false; - } - ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - return true; + public boolean getValueUnsafe(byte[] family, byte[] qualifier, ImmutableBytesWritable ptr) { + KeyValue kv = getValueUnsafe(family, qualifier); + if (kv == null) { + return false; } + ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return true; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java index f98e1669dd7..1f83b499e4d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,1151 +42,1258 @@ import edu.umd.cs.findbugs.annotations.SuppressWarnings; /** - * The datatype for PColummns that are Arrays. Any variable length array would follow the below order. Every element - * would be seperated by a seperator byte '0'. Null elements are counted and once a first non null element appears we - * write the count of the nulls prefixed with a seperator byte. Trailing nulls are not taken into account. The last non - * null element is followed by two seperator bytes. For eg {@code a, b, null, null, c, null -> 65 0 66 0 0 2 67 0 0 0 a null - * null null b c null d -> 65 0 0 3 66 0 67 0 0 1 68 0 0 0 }. The reason we use this serialization format is to allow the - * byte array of arrays of the same type to be directly comparable against each other. This prevents a costly - * deserialization on compare and allows an array column to be used as the last column in a primary key constraint. + * The datatype for PColummns that are Arrays. Any variable length array would follow the below + * order. Every element would be seperated by a seperator byte '0'. Null elements are counted and + * once a first non null element appears we write the count of the nulls prefixed with a seperator + * byte. Trailing nulls are not taken into account. The last non null element is followed by two + * seperator bytes. For eg {@code a, b, null, null, c, null -> 65 0 66 0 0 2 67 0 0 0 a null + * null null b c null d -> 65 0 0 3 66 0 67 0 0 1 68 0 0 0 }. The reason we use this serialization + * format is to allow the byte array of arrays of the same type to be directly comparable against + * each other. This prevents a costly deserialization on compare and allows an array column to be + * used as the last column in a primary key constraint. */ public abstract class PArrayDataType extends PDataType { - @Override - public final int getResultSetSqlType() { - return Types.ARRAY; + @Override + public final int getResultSetSqlType() { + return Types.ARRAY; + } + + @Override + public final void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, + Integer maxLength, Integer scale, SortOrder actualModifer, Integer desiredMaxLength, + Integer desiredScale, SortOrder desiredModifier, boolean expectedRowKeyOrderOptimizable) { + coerceBytes(ptr, object, actualType, maxLength, scale, desiredMaxLength, desiredScale, this, + actualModifer, desiredModifier, expectedRowKeyOrderOptimizable); + } + + @Override + public final void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, + Integer maxLength, Integer scale, SortOrder actualModifer, Integer desiredMaxLength, + Integer desiredScale, SortOrder desiredModifier) { + coerceBytes(ptr, object, actualType, maxLength, scale, desiredMaxLength, desiredScale, this, + actualModifer, desiredModifier, true); + } + + // array serialization format where bytes can be used as part of the row key + public static final byte SORTABLE_SERIALIZATION_VERSION = 1; + // array serialization format where bytes are immutable (does not support prepend/append or + // sorting) + @Deprecated + public static final byte IMMUTABLE_SERIALIZATION_VERSION = 2; + // array serialization format where bytes are immutable (does not support prepend/append or + // sorting) + // differs from V1 in that nulls are not serialized + // we rely only on offsets to determine the presence of nulls + public static final byte IMMUTABLE_SERIALIZATION_V2 = 3; + + protected PArrayDataType(String sqlTypeName, int sqlType, Class clazz, PDataCodec codec, + int ordinal) { + super(sqlTypeName, sqlType, clazz, codec, ordinal); + } + + public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, SortOrder sortOrder) { + return SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, false, sortOrder); + } + + public byte[] toBytes(Object object, PDataType baseType, SortOrder sortOrder) { + return toBytes(object, baseType, sortOrder, true); + } + + /** + * Ensures that the provided {@code object} is a PhoenixArray, attempting a conversion in the case + * when it is not. + */ + PhoenixArray toPhoenixArray(Object object, PDataType baseType) { + if (object instanceof PhoenixArray) { + return (PhoenixArray) object; } - - @Override - public final void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, - Integer maxLength, Integer scale, SortOrder actualModifer, Integer desiredMaxLength, - Integer desiredScale, SortOrder desiredModifier, boolean expectedRowKeyOrderOptimizable) { - coerceBytes(ptr, object, actualType, maxLength, scale, desiredMaxLength, desiredScale, - this, actualModifer, desiredModifier, expectedRowKeyOrderOptimizable); + if (!(object instanceof Array)) { + throw new IllegalArgumentException("Expected an Array but got " + object.getClass()); } - - @Override - public final void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, - Integer maxLength, Integer scale, SortOrder actualModifer, Integer desiredMaxLength, - Integer desiredScale, SortOrder desiredModifier) { - coerceBytes(ptr, object, actualType, maxLength, scale, desiredMaxLength, desiredScale, - this, actualModifer, desiredModifier, true); + Array arr = (Array) object; + try { + Object untypedArrayData = arr.getArray(); + if (!(untypedArrayData instanceof Object[])) { + throw new IllegalArgumentException("Array data is required to be Object[] but data for " + + arr.getClass() + " is " + untypedArrayData.getClass()); + } + return this.getArrayFactory().newArray(baseType, (Object[]) untypedArrayData); + } catch (SQLException e) { + throw new IllegalArgumentException("Could not convert Array data", e); } + } - // array serialization format where bytes can be used as part of the row key - public static final byte SORTABLE_SERIALIZATION_VERSION = 1; - // array serialization format where bytes are immutable (does not support prepend/append or sorting) - @Deprecated - public static final byte IMMUTABLE_SERIALIZATION_VERSION = 2; - // array serialization format where bytes are immutable (does not support prepend/append or sorting) - // differs from V1 in that nulls are not serialized - // we rely only on offsets to determine the presence of nulls - public static final byte IMMUTABLE_SERIALIZATION_V2 = 3; - - protected PArrayDataType(String sqlTypeName, int sqlType, Class clazz, PDataCodec codec, int ordinal) { - super(sqlTypeName, sqlType, clazz, codec, ordinal); + public byte[] toBytes(Object object, PDataType baseType, SortOrder sortOrder, + boolean rowKeyOrderOptimizable) { + if (object == null) { + throw new ConstraintViolationException(this + " may not be null"); } - - public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, SortOrder sortOrder) { - return SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, false, sortOrder); + PhoenixArray arr = toPhoenixArray(object, baseType); + int noOfElements = arr.numElements; + if (noOfElements == 0) { + return ByteUtil.EMPTY_BYTE_ARRAY; } - - public byte[] toBytes(Object object, PDataType baseType, SortOrder sortOrder) { - return toBytes(object, baseType, sortOrder, true); + TrustedByteArrayOutputStream byteStream = null; + if (!baseType.isFixedWidth()) { + Pair nullsVsNullRepeationCounter = new Pair<>(); + int size = estimateByteSize(object, nullsVsNullRepeationCounter, + PDataType.fromTypeId((baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE))); + size += ((2 * Bytes.SIZEOF_BYTE) + + (noOfElements - nullsVsNullRepeationCounter.getFirst()) * Bytes.SIZEOF_BYTE) + + (nullsVsNullRepeationCounter.getSecond() * 2 * Bytes.SIZEOF_BYTE); + // Assume an offset array that fit into Short.MAX_VALUE. Also not considering nulls that could + // be > 255 + // In both of these cases, finally an array copy would happen + int capacity = noOfElements * Bytes.SIZEOF_SHORT; + // Here the int for noofelements, byte for the version, int for the offsetarray position and 2 + // bytes for the + // end seperator + byteStream = new TrustedByteArrayOutputStream( + size + capacity + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT); + } else { + int elemLength = (arr.getMaxLength() == null ? baseType.getByteSize() : arr.getMaxLength()); + int size = elemLength * noOfElements; + // Here the int for noofelements, byte for the version + byteStream = new TrustedByteArrayOutputStream(size); } - - /** - * Ensures that the provided {@code object} is a PhoenixArray, attempting a conversion in the - * case when it is not. - */ - PhoenixArray toPhoenixArray(Object object, PDataType baseType) { - if (object instanceof PhoenixArray) { - return (PhoenixArray) object; - } - if (!(object instanceof Array)) { - throw new IllegalArgumentException("Expected an Array but got " + object.getClass()); - } - Array arr = (Array) object; - try { - Object untypedArrayData = arr.getArray(); - if (!(untypedArrayData instanceof Object[])) { - throw new IllegalArgumentException("Array data is required to be Object[] but data for " - + arr.getClass() + " is " + untypedArrayData.getClass()); - } - return this.getArrayFactory().newArray(baseType, (Object[]) untypedArrayData); - } catch (SQLException e) { - throw new IllegalArgumentException("Could not convert Array data", e); - } - } - - public byte[] toBytes(Object object, PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable) { - if (object == null) { throw new ConstraintViolationException(this + " may not be null"); } - PhoenixArray arr = toPhoenixArray(object, baseType); - int noOfElements = arr.numElements; - if (noOfElements == 0) { return ByteUtil.EMPTY_BYTE_ARRAY; } - TrustedByteArrayOutputStream byteStream = null; - if (!baseType.isFixedWidth()) { - Pair nullsVsNullRepeationCounter = new Pair<>(); - int size = estimateByteSize(object, nullsVsNullRepeationCounter, - PDataType.fromTypeId((baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE))); - size += ((2 * Bytes.SIZEOF_BYTE) + (noOfElements - nullsVsNullRepeationCounter.getFirst()) - * Bytes.SIZEOF_BYTE) - + (nullsVsNullRepeationCounter.getSecond() * 2 * Bytes.SIZEOF_BYTE); - // Assume an offset array that fit into Short.MAX_VALUE. Also not considering nulls that could be > 255 - // In both of these cases, finally an array copy would happen - int capacity = noOfElements * Bytes.SIZEOF_SHORT; - // Here the int for noofelements, byte for the version, int for the offsetarray position and 2 bytes for the - // end seperator - byteStream = new TrustedByteArrayOutputStream(size + capacity + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE - + Bytes.SIZEOF_INT); - } else { - int elemLength = (arr.getMaxLength() == null ? baseType.getByteSize() : arr.getMaxLength()); - int size = elemLength * noOfElements; - // Here the int for noofelements, byte for the version - byteStream = new TrustedByteArrayOutputStream(size); - } - DataOutputStream oStream = new DataOutputStream(byteStream); - // Handles bit inversion also - return createArrayBytes(byteStream, oStream, arr, noOfElements, baseType, sortOrder, rowKeyOrderOptimizable); + DataOutputStream oStream = new DataOutputStream(byteStream); + // Handles bit inversion also + return createArrayBytes(byteStream, oStream, arr, noOfElements, baseType, sortOrder, + rowKeyOrderOptimizable); + } + + public static int serializeNulls(DataOutputStream oStream, int nulls) throws IOException { + // We need to handle 3 different cases here + // 1) Arrays with repeating nulls in the middle which is less than 255 + // 2) Arrays with repeating nulls in the middle which is less than 255 but greater than + // bytes.MAX_VALUE + // 3) Arrays with repeating nulls in the middle greaterh than 255 + // Take a case where we have two arrays that has the following elements + // Array 1 - size : 240, elements = abc, bcd, null, null, bcd,null,null......,null, abc + // Array 2 - size : 16 : elements = abc, bcd, null, null, bcd, null, null...null, abc + // In both case the elements and the value array will be the same but the Array 1 is actually + // smaller because it + // has more nulls. + // Now we should have mechanism to show that we treat arrays with more nulls as lesser. Hence in + // the above case + // as + // 240 > Bytes.MAX_VALUE, by always inverting the number of nulls we would get a +ve value + // For Array 2, by inverting we would get a -ve value. On comparison Array 2 > Array 1. + // Now for cases where the number of nulls is greater than 255, we would write an those many + // (byte)1, it is + // bigger than 255. + // This would ensure that we don't compare with triple zero which is used as an end byte + if (nulls > 0) { + oStream.write(QueryConstants.SEPARATOR_BYTE); + int nMultiplesOver255 = nulls / 255; + while (nMultiplesOver255-- > 0) { + // Don't write a zero byte, as we need to ensure that the only triple zero + // byte occurs at the end of the array (i.e. the terminator byte for the + // element plus the double zero byte at the end of the array). + oStream.write((byte) 1); + } + int nRemainingNulls = nulls % 255; // From 0 to 254 + // Write a byte for the remaining null elements + if (nRemainingNulls > 0) { + // Remaining null elements is from 1 to 254. + // Subtract one and invert so that more remaining nulls becomes smaller than less + // remaining nulls and min byte value is always greater than 1, the repeating value + // used for arrays with more than 255 repeating null elements. + // The reason we invert is that an array with less null elements has a non + // null element sooner than an array with more null elements. Thus, the more + // null elements you have, the smaller the array becomes. + byte nNullByte = SortOrder.invert((byte) (nRemainingNulls - 1)); + oStream.write(nNullByte); // Single byte for repeating nulls + } } + return 0; + } - public static int serializeNulls(DataOutputStream oStream, int nulls) throws IOException { - // We need to handle 3 different cases here - // 1) Arrays with repeating nulls in the middle which is less than 255 - // 2) Arrays with repeating nulls in the middle which is less than 255 but greater than bytes.MAX_VALUE - // 3) Arrays with repeating nulls in the middle greaterh than 255 - // Take a case where we have two arrays that has the following elements - // Array 1 - size : 240, elements = abc, bcd, null, null, bcd,null,null......,null, abc - // Array 2 - size : 16 : elements = abc, bcd, null, null, bcd, null, null...null, abc - // In both case the elements and the value array will be the same but the Array 1 is actually smaller because it - // has more nulls. - // Now we should have mechanism to show that we treat arrays with more nulls as lesser. Hence in the above case - // as - // 240 > Bytes.MAX_VALUE, by always inverting the number of nulls we would get a +ve value - // For Array 2, by inverting we would get a -ve value. On comparison Array 2 > Array 1. - // Now for cases where the number of nulls is greater than 255, we would write an those many (byte)1, it is - // bigger than 255. - // This would ensure that we don't compare with triple zero which is used as an end byte - if (nulls > 0) { - oStream.write(QueryConstants.SEPARATOR_BYTE); - int nMultiplesOver255 = nulls / 255; - while (nMultiplesOver255-- > 0) { - // Don't write a zero byte, as we need to ensure that the only triple zero - // byte occurs at the end of the array (i.e. the terminator byte for the - // element plus the double zero byte at the end of the array). - oStream.write((byte)1); - } - int nRemainingNulls = nulls % 255; // From 0 to 254 - // Write a byte for the remaining null elements - if (nRemainingNulls > 0) { - // Remaining null elements is from 1 to 254. - // Subtract one and invert so that more remaining nulls becomes smaller than less - // remaining nulls and min byte value is always greater than 1, the repeating value - // used for arrays with more than 255 repeating null elements. - // The reason we invert is that an array with less null elements has a non - // null element sooner than an array with more null elements. Thus, the more - // null elements you have, the smaller the array becomes. - byte nNullByte = SortOrder.invert((byte)(nRemainingNulls - 1)); - oStream.write(nNullByte); // Single byte for repeating nulls - } - } - return 0; + public static int serializeNulls(byte[] bytes, int position, int nulls) { + int nMultiplesOver255 = nulls / 255; + while (nMultiplesOver255-- > 0) { + bytes[position++] = 1; } - - public static int serializeNulls(byte[] bytes, int position, int nulls) { - int nMultiplesOver255 = nulls / 255; - while (nMultiplesOver255-- > 0) { - bytes[position++] = 1; - } - int nRemainingNulls = nulls % 255; - if (nRemainingNulls > 0) { - byte nNullByte = SortOrder.invert((byte)(nRemainingNulls - 1)); - bytes[position++] = nNullByte; - } - return position; + int nRemainingNulls = nulls % 255; + if (nRemainingNulls > 0) { + byte nNullByte = SortOrder.invert((byte) (nRemainingNulls - 1)); + bytes[position++] = nNullByte; } - - public static void writeEndSeperatorForVarLengthArray(DataOutputStream oStream, SortOrder sortOrder) throws IOException { - writeEndSeperatorForVarLengthArray(oStream, sortOrder, true); + return position; + } + + public static void writeEndSeperatorForVarLengthArray(DataOutputStream oStream, + SortOrder sortOrder) throws IOException { + writeEndSeperatorForVarLengthArray(oStream, sortOrder, true); + } + + public static void writeEndSeperatorForVarLengthArray(DataOutputStream oStream, + SortOrder sortOrder, boolean rowKeyOrderOptimizable) throws IOException { + byte sepByte = getSeparatorByte(rowKeyOrderOptimizable, sortOrder); + oStream.write(sepByte); + oStream.write(sepByte); + } + + // this method is only for append/prepend/concat operations which are only supported for the + // SORTABLE_SERIALIZATION_VERSION + public static boolean useShortForOffsetArray(int maxoffset) { + return useShortForOffsetArray(maxoffset, SORTABLE_SERIALIZATION_VERSION); + } + + public static boolean useShortForOffsetArray(int maxoffset, byte serializationVersion) { + if ( + serializationVersion == IMMUTABLE_SERIALIZATION_VERSION + || serializationVersion == IMMUTABLE_SERIALIZATION_V2 + ) { + return (maxoffset <= Short.MAX_VALUE && maxoffset >= Short.MIN_VALUE); } - - public static void writeEndSeperatorForVarLengthArray(DataOutputStream oStream, SortOrder sortOrder, boolean rowKeyOrderOptimizable) - throws IOException { - byte sepByte = getSeparatorByte(rowKeyOrderOptimizable, sortOrder); - oStream.write(sepByte); - oStream.write(sepByte); + // If the max offset is less than Short.MAX_VALUE then offset array can use short + else if (maxoffset <= (2 * Short.MAX_VALUE)) { + return true; } - - // this method is only for append/prepend/concat operations which are only supported for the SORTABLE_SERIALIZATION_VERSION - public static boolean useShortForOffsetArray(int maxoffset) { - return useShortForOffsetArray(maxoffset, SORTABLE_SERIALIZATION_VERSION); + // else offset array can use Int + return false; + } + + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + PhoenixArray array = (PhoenixArray) object; + if (array == null || array.baseType == null) { + return 0; } - - public static boolean useShortForOffsetArray(int maxoffset, byte serializationVersion) { - if (serializationVersion == IMMUTABLE_SERIALIZATION_VERSION || serializationVersion == IMMUTABLE_SERIALIZATION_V2) { - return (maxoffset <= Short.MAX_VALUE && maxoffset >= Short.MIN_VALUE ); - } - // If the max offset is less than Short.MAX_VALUE then offset array can use short - else if (maxoffset <= (2 * Short.MAX_VALUE)) { return true; } - // else offset array can use Int - return false; + return estimateByteSize(object, null, + PDataType.fromTypeId((array.baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE))); + } + + // Estimates the size of the given array and also calculates the number of nulls and its + // repetition factor + public int estimateByteSize(Object o, Pair nullsVsNullRepeationCounter, + PDataType baseType) { + if (baseType.isFixedWidth()) { + return baseType.getByteSize(); } - - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - PhoenixArray array = (PhoenixArray)object; - if (array == null || array.baseType == null) { return 0; } - return estimateByteSize(object, null, - PDataType.fromTypeId((array.baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE))); - } - - // Estimates the size of the given array and also calculates the number of nulls and its repetition factor - public int estimateByteSize(Object o, Pair nullsVsNullRepeationCounter, PDataType baseType) { - if (baseType.isFixedWidth()) { return baseType.getByteSize(); } - if (baseType.isArrayType()) { - PhoenixArray array = (PhoenixArray)o; - int noOfElements = array.numElements; - int totalVarSize = 0; - int nullsRepeationCounter = 0; - int nulls = 0; - int totalNulls = 0; - for (int i = 0; i < noOfElements; i++) { - totalVarSize += array.estimateByteSize(i); - if (!PDataType.fromTypeId((baseType.getSqlType() - PDataType.ARRAY_TYPE_BASE)).isFixedWidth()) { - if (array.isNull(i)) { - nulls++; - } else { - if (nulls > 0) { - totalNulls += nulls; - nulls = 0; - nullsRepeationCounter++; - } - } - } - } - if (nullsVsNullRepeationCounter != null) { - if (nulls > 0) { - totalNulls += nulls; - // do not increment nullsRepeationCounter to identify trailing nulls - } - nullsVsNullRepeationCounter.setFirst(totalNulls); - nullsVsNullRepeationCounter.setSecond(nullsRepeationCounter); + if (baseType.isArrayType()) { + PhoenixArray array = (PhoenixArray) o; + int noOfElements = array.numElements; + int totalVarSize = 0; + int nullsRepeationCounter = 0; + int nulls = 0; + int totalNulls = 0; + for (int i = 0; i < noOfElements; i++) { + totalVarSize += array.estimateByteSize(i); + if ( + !PDataType.fromTypeId((baseType.getSqlType() - PDataType.ARRAY_TYPE_BASE)).isFixedWidth() + ) { + if (array.isNull(i)) { + nulls++; + } else { + if (nulls > 0) { + totalNulls += nulls; + nulls = 0; + nullsRepeationCounter++; } - return totalVarSize; + } } - // Non fixed width types must override this - throw new UnsupportedOperationException(); - } - - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - return targetType.isCoercibleTo(targetType, value); - } - - public boolean isCoercibleTo(PDataType targetType, PDataType expectedTargetType) { - if (!targetType.isArrayType()) { - return false; - } else { - PDataType targetElementType = PDataType.fromTypeId(targetType.getSqlType() - PDataType.ARRAY_TYPE_BASE); - PDataType expectedTargetElementType = PDataType.fromTypeId(expectedTargetType.getSqlType() - - PDataType.ARRAY_TYPE_BASE); - return expectedTargetElementType.isCoercibleTo(targetElementType); + } + if (nullsVsNullRepeationCounter != null) { + if (nulls > 0) { + totalNulls += nulls; + // do not increment nullsRepeationCounter to identify trailing nulls } + nullsVsNullRepeationCounter.setFirst(totalNulls); + nullsVsNullRepeationCounter.setSecond(nullsRepeationCounter); + } + return totalVarSize; } - - @Override - public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, SortOrder sortOrder, - Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) { - if (value == null) return true; - PhoenixArray pArr = (PhoenixArray)value; - PDataType baseType = PDataType.fromTypeId(srcType.getSqlType() - PDataType.ARRAY_TYPE_BASE); - // Since we only have a value and no byte[], use an empty length byte[] as otherwise - // isSizeCompatible will attempt to interpret the array ptr as a ptr to an element. - ImmutableBytesWritable elementPtr = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - for (int i = 0; i < pArr.numElements; i++) { - Object val = pArr.getElement(i); - if (!baseType.isSizeCompatible(elementPtr, val, baseType, sortOrder, srcType.getMaxLength(val), scale, - desiredMaxLength, desiredScale)) { return false; } - } - return true; + // Non fixed width types must override this + throw new UnsupportedOperationException(); + } + + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + return targetType.isCoercibleTo(targetType, value); + } + + public boolean isCoercibleTo(PDataType targetType, PDataType expectedTargetType) { + if (!targetType.isArrayType()) { + return false; + } else { + PDataType targetElementType = + PDataType.fromTypeId(targetType.getSqlType() - PDataType.ARRAY_TYPE_BASE); + PDataType expectedTargetElementType = + PDataType.fromTypeId(expectedTargetType.getSqlType() - PDataType.ARRAY_TYPE_BASE); + return expectedTargetElementType.isCoercibleTo(targetElementType); } - - @SuppressWarnings(value="RC_REF_COMPARISON", - justification="PDataTypes are expected to be singletons") - private void coerceBytes(ImmutableBytesWritable ptr, Object value, PDataType actualType, Integer maxLength, - Integer scale, Integer desiredMaxLength, Integer desiredScale, PDataType desiredType, - SortOrder actualSortOrder, SortOrder desiredSortOrder, - boolean expectedRowKeyOrderOptimizable) { - if (ptr.getLength() == 0) { // a zero length ptr means null which will not be coerced to anything different - return; - } - PDataType baseType = PDataType.fromTypeId(actualType.getSqlType() - PDataType.ARRAY_TYPE_BASE); - PDataType desiredBaseType = PDataType.fromTypeId(desiredType.getSqlType() - PDataType.ARRAY_TYPE_BASE); - if ((Objects.equal(maxLength, desiredMaxLength) || maxLength == null || desiredMaxLength == null) - && actualType.isBytesComparableWith(desiredType) - && baseType.isFixedWidth() == desiredBaseType.isFixedWidth() - && actualSortOrder == desiredSortOrder - && (desiredSortOrder == SortOrder.ASC || desiredBaseType.isFixedWidth() || isRowKeyOrderOptimized(actualType, actualSortOrder, ptr) == expectedRowKeyOrderOptimizable)) { - return; - } - PhoenixArray pArr; - if (value == null || actualType != desiredType) { - value = toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), baseType, actualSortOrder, maxLength, - desiredScale, desiredBaseType); - pArr = (PhoenixArray)value; - // VARCHAR <=> CHAR - if (baseType.isFixedWidth() != desiredBaseType.isFixedWidth()) { - if (!pArr.isPrimitiveType()) { - pArr = new PhoenixArray(pArr, desiredMaxLength); - } - } - // Coerce to new max length when only max lengths differ - if (actualType == desiredType && !pArr.isPrimitiveType() && maxLength != null - && maxLength != desiredMaxLength) { - pArr = new PhoenixArray(pArr, desiredMaxLength); - } - baseType = desiredBaseType; - } else { - pArr = (PhoenixArray) value; - if (!Objects.equal(maxLength, desiredMaxLength)) { - pArr = new PhoenixArray(pArr, desiredMaxLength); - } - } - ptr.set(toBytes(pArr, baseType, desiredSortOrder, expectedRowKeyOrderOptimizable)); + } + + @Override + public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, + SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, + Integer desiredScale) { + if (value == null) return true; + PhoenixArray pArr = (PhoenixArray) value; + PDataType baseType = PDataType.fromTypeId(srcType.getSqlType() - PDataType.ARRAY_TYPE_BASE); + // Since we only have a value and no byte[], use an empty length byte[] as otherwise + // isSizeCompatible will attempt to interpret the array ptr as a ptr to an element. + ImmutableBytesWritable elementPtr = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); + for (int i = 0; i < pArr.numElements; i++) { + Object val = pArr.getElement(i); + if ( + !baseType.isSizeCompatible(elementPtr, val, baseType, sortOrder, srcType.getMaxLength(val), + scale, desiredMaxLength, desiredScale) + ) { + return false; + } } - - public static boolean isRowKeyOrderOptimized(PDataType type, SortOrder sortOrder, ImmutableBytesWritable ptr) { - return isRowKeyOrderOptimized(type, sortOrder, ptr.get(), ptr.getOffset(), ptr.getLength()); + return true; + } + + @SuppressWarnings(value = "RC_REF_COMPARISON", + justification = "PDataTypes are expected to be singletons") + private void coerceBytes(ImmutableBytesWritable ptr, Object value, PDataType actualType, + Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale, + PDataType desiredType, SortOrder actualSortOrder, SortOrder desiredSortOrder, + boolean expectedRowKeyOrderOptimizable) { + if (ptr.getLength() == 0) { // a zero length ptr means null which will not be coerced to + // anything different + return; } - - public static boolean isRowKeyOrderOptimized(PDataType type, SortOrder sortOrder, byte[] buf, int offset, int length) { - PDataType baseType = PDataType.fromTypeId(type.getSqlType() - PDataType.ARRAY_TYPE_BASE); - return isRowKeyOrderOptimized(baseType.isFixedWidth(), sortOrder, buf, offset, length); + PDataType baseType = PDataType.fromTypeId(actualType.getSqlType() - PDataType.ARRAY_TYPE_BASE); + PDataType desiredBaseType = + PDataType.fromTypeId(desiredType.getSqlType() - PDataType.ARRAY_TYPE_BASE); + if ( + (Objects.equal(maxLength, desiredMaxLength) || maxLength == null || desiredMaxLength == null) + && actualType.isBytesComparableWith(desiredType) + && baseType.isFixedWidth() == desiredBaseType.isFixedWidth() + && actualSortOrder == desiredSortOrder + && (desiredSortOrder == SortOrder.ASC || desiredBaseType.isFixedWidth() + || isRowKeyOrderOptimized(actualType, actualSortOrder, ptr) + == expectedRowKeyOrderOptimizable) + ) { + return; } - - private static boolean isRowKeyOrderOptimized(boolean isFixedWidth, SortOrder sortOrder, byte[] buf, int offset, int length) { - if (length == 0 || sortOrder == SortOrder.ASC || isFixedWidth) { - return true; + PhoenixArray pArr; + if (value == null || actualType != desiredType) { + value = toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), baseType, actualSortOrder, + maxLength, desiredScale, desiredBaseType); + pArr = (PhoenixArray) value; + // VARCHAR <=> CHAR + if (baseType.isFixedWidth() != desiredBaseType.isFixedWidth()) { + if (!pArr.isPrimitiveType()) { + pArr = new PhoenixArray(pArr, desiredMaxLength); } - int offsetToHeaderOffset = offset + length - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_INT * 2; - int offsetToSeparatorByte = Bytes.readAsInt(buf, offsetToHeaderOffset, Bytes.SIZEOF_INT) - 1; - return buf[offsetToSeparatorByte] == QueryConstants.DESC_SEPARATOR_BYTE; + } + // Coerce to new max length when only max lengths differ + if ( + actualType == desiredType && !pArr.isPrimitiveType() && maxLength != null + && maxLength != desiredMaxLength + ) { + pArr = new PhoenixArray(pArr, desiredMaxLength); + } + baseType = desiredBaseType; + } else { + pArr = (PhoenixArray) value; + if (!Objects.equal(maxLength, desiredMaxLength)) { + pArr = new PhoenixArray(pArr, desiredMaxLength); + } } - - @Override - public Object toObject(String value) { - throw new IllegalArgumentException("This operation is not suppported"); + ptr.set(toBytes(pArr, baseType, desiredSortOrder, expectedRowKeyOrderOptimizable)); + } + + public static boolean isRowKeyOrderOptimized(PDataType type, SortOrder sortOrder, + ImmutableBytesWritable ptr) { + return isRowKeyOrderOptimized(type, sortOrder, ptr.get(), ptr.getOffset(), ptr.getLength()); + } + + public static boolean isRowKeyOrderOptimized(PDataType type, SortOrder sortOrder, byte[] buf, + int offset, int length) { + PDataType baseType = PDataType.fromTypeId(type.getSqlType() - PDataType.ARRAY_TYPE_BASE); + return isRowKeyOrderOptimized(baseType.isFixedWidth(), sortOrder, buf, offset, length); + } + + private static boolean isRowKeyOrderOptimized(boolean isFixedWidth, SortOrder sortOrder, + byte[] buf, int offset, int length) { + if (length == 0 || sortOrder == SortOrder.ASC || isFixedWidth) { + return true; } - - public Object toObject(byte[] bytes, int offset, int length, PDataType baseType, SortOrder sortOrder, - Integer maxLength, Integer scale, PDataType desiredDataType) { - return createPhoenixArray(bytes, offset, length, sortOrder, baseType, maxLength, desiredDataType); + int offsetToHeaderOffset = offset + length - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_INT * 2; + int offsetToSeparatorByte = Bytes.readAsInt(buf, offsetToHeaderOffset, Bytes.SIZEOF_INT) - 1; + return buf[offsetToSeparatorByte] == QueryConstants.DESC_SEPARATOR_BYTE; + } + + @Override + public Object toObject(String value) { + throw new IllegalArgumentException("This operation is not suppported"); + } + + public Object toObject(byte[] bytes, int offset, int length, PDataType baseType, + SortOrder sortOrder, Integer maxLength, Integer scale, PDataType desiredDataType) { + return createPhoenixArray(bytes, offset, length, sortOrder, baseType, maxLength, + desiredDataType); + } + + static int getOffset(byte[] bytes, int arrayIndex, boolean useShort, int indexOffset, + byte serializationVersion) { + return Math + .abs(getSerializedOffset(bytes, arrayIndex, useShort, indexOffset, serializationVersion)); + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + if (java.sql.Array.class.isAssignableFrom(jdbcType)) { + return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); } - - static int getOffset(byte[] bytes, int arrayIndex, boolean useShort, int indexOffset, byte serializationVersion) { - return Math.abs(getSerializedOffset(bytes, arrayIndex, useShort, indexOffset, serializationVersion)); + throw newMismatchException(actualType, jdbcType); + } + + static int getSerializedOffset(byte[] bytes, int arrayIndex, boolean useShort, int indexOffset, + byte serializationVersion) { + int offset; + if (useShort) { + offset = indexOffset + (Bytes.SIZEOF_SHORT * arrayIndex); + return Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT) + + (serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION + || serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_V2 + ? 0 + : Short.MAX_VALUE); + } else { + offset = indexOffset + (Bytes.SIZEOF_INT * arrayIndex); + return Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT); } - - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - if (java.sql.Array.class.isAssignableFrom(jdbcType)) { - return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - } - throw newMismatchException(actualType, jdbcType); + } + + private static int getOffset(ByteBuffer indexBuffer, int arrayIndex, boolean useShort, + int indexOffset) { + int offset; + if (useShort) { + offset = indexBuffer.getShort() + Short.MAX_VALUE; + } else { + offset = indexBuffer.getInt(); } - - static int getSerializedOffset(byte[] bytes, int arrayIndex, boolean useShort, int indexOffset, byte serializationVersion) { - int offset; - if (useShort) { - offset = indexOffset + (Bytes.SIZEOF_SHORT * arrayIndex); - return Bytes.toShort(bytes, offset, Bytes.SIZEOF_SHORT) - + (serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION - || serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_V2 ? 0 - : Short.MAX_VALUE); - } else { - offset = indexOffset + (Bytes.SIZEOF_INT * arrayIndex); - return Bytes.toInt(bytes, offset, Bytes.SIZEOF_INT); - } - } - - private static int getOffset(ByteBuffer indexBuffer, int arrayIndex, boolean useShort, int indexOffset) { - int offset; - if (useShort) { - offset = indexBuffer.getShort() + Short.MAX_VALUE; - } else { - offset = indexBuffer.getInt(); - } - return offset; + return offset; + } + + @Override + public Object toObject(Object object, PDataType actualType) { + return toPhoenixArray(object, arrayBaseType(actualType)); + } + + public Object toObject(Object object, PDataType actualType, SortOrder sortOrder) { + // How to use the sortOrder ? Just reverse the elements + return toObject(object, actualType); + } + + /** + * creates array bytes using the SORTABLE_SERIALIZATION_VERSION format + * @param rowKeyOrderOptimizable TODO + */ + private byte[] createArrayBytes(TrustedByteArrayOutputStream byteStream, DataOutputStream oStream, + PhoenixArray array, int noOfElements, PDataType baseType, SortOrder sortOrder, + boolean rowKeyOrderOptimizable) { + PArrayDataTypeEncoder builder = new PArrayDataTypeEncoder(byteStream, oStream, noOfElements, + baseType, sortOrder, rowKeyOrderOptimizable); + for (int i = 0; i < noOfElements; i++) { + byte[] bytes = array.toBytes(i); + builder.appendValue(bytes); } - - @Override - public Object toObject(Object object, PDataType actualType) { - return toPhoenixArray(object, arrayBaseType(actualType)); + return builder.encode(); + } + + private static byte[] generateEmptyArrayBytes(PDataType baseType, SortOrder sortOrder) { + PArrayDataTypeEncoder encoder = new PArrayDataTypeEncoder(baseType, sortOrder); + byte[] arrayBytes = encoder.encode(); + if (arrayBytes == null) { + arrayBytes = ByteUtil.EMPTY_BYTE_ARRAY; } - - public Object toObject(Object object, PDataType actualType, SortOrder sortOrder) { - // How to use the sortOrder ? Just reverse the elements - return toObject(object, actualType); + return arrayBytes; + } + + /** + * Appends an item to array. Uses the ptr bytes of item and the array bytes to create new array + * bytes with appended item bytes, then sets the new array bytes to ptr. + * @param ptr holds the bytes of the item to be added to array + * @param arrayBytes byte [] form of phoenix array + * @param length arrayBytes length + * @param offset arrayBytes offset + * @param arrayLength length of the array + * @param maxLength maximum length of the item to be added + * @param sortOrder sort order of the elements in array + */ + public static boolean appendItemToArray(ImmutableBytesWritable ptr, int length, int offset, + byte[] arrayBytes, PDataType baseType, int arrayLength, Integer maxLength, + SortOrder sortOrder) { + if (ptr.getLength() == 0) { + ptr.set(arrayBytes, offset, length); + return true; } - /** - * creates array bytes using the SORTABLE_SERIALIZATION_VERSION format - * @param rowKeyOrderOptimizable TODO - */ - private byte[] createArrayBytes(TrustedByteArrayOutputStream byteStream, DataOutputStream oStream, - PhoenixArray array, int noOfElements, PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable) { - PArrayDataTypeEncoder builder = - new PArrayDataTypeEncoder(byteStream, oStream, noOfElements, baseType, sortOrder, rowKeyOrderOptimizable); - for (int i = 0; i < noOfElements; i++) { - byte[] bytes = array.toBytes(i); - builder.appendValue(bytes); - } - return builder.encode(); + // If the arrayBytes is null or empty, generate an empty array which will get filled in below + if (arrayBytes.length == 0) { + arrayBytes = generateEmptyArrayBytes(baseType, sortOrder); + offset = 0; + length = arrayBytes.length; } - private static byte[] generateEmptyArrayBytes(PDataType baseType, SortOrder sortOrder) { - PArrayDataTypeEncoder encoder = new PArrayDataTypeEncoder(baseType, sortOrder); - byte[] arrayBytes = encoder.encode(); - if (arrayBytes == null) { - arrayBytes = ByteUtil.EMPTY_BYTE_ARRAY; - } - return arrayBytes; - } - - /** - * Appends an item to array. Uses the ptr bytes of item and the array bytes to create new array bytes with appended item bytes, - * then sets the new array bytes to ptr. - * - * @param ptr holds the bytes of the item to be added to array - * @param arrayBytes byte [] form of phoenix array - * @param length arrayBytes length - * @param offset arrayBytes offset - * @param arrayLength length of the array - * @param maxLength maximum length of the item to be added - * @param sortOrder sort order of the elements in array - */ - public static boolean appendItemToArray(ImmutableBytesWritable ptr, int length, int offset, byte[] arrayBytes, - PDataType baseType, int arrayLength, Integer maxLength, SortOrder sortOrder) { - if (ptr.getLength() == 0) { - ptr.set(arrayBytes, offset, length); - return true; - } - - // If the arrayBytes is null or empty, generate an empty array which will get filled in below - if (arrayBytes.length == 0) { - arrayBytes = generateEmptyArrayBytes(baseType, sortOrder); - offset = 0; - length = arrayBytes.length; - } + int elementLength = maxLength == null ? ptr.getLength() : maxLength; - int elementLength = maxLength == null ? ptr.getLength() : maxLength; + // padding + if (elementLength > ptr.getLength()) { + baseType.pad(ptr, elementLength, sortOrder); + } - // padding - if (elementLength > ptr.getLength()) { - baseType.pad(ptr, elementLength, sortOrder); - } + int elementOffset = ptr.getOffset(); + byte[] elementBytes = ptr.get(); - int elementOffset = ptr.getOffset(); - byte[] elementBytes = ptr.get(); - - byte[] newArray; - if (!baseType.isFixedWidth()) { - byte serializationVersion = arrayBytes[offset + length - Bytes.SIZEOF_BYTE]; - int offsetArrayPosition = Bytes.toInt(arrayBytes, offset + length - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT); - int offsetArrayLength = length - offsetArrayPosition - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - - Bytes.SIZEOF_BYTE; - - // checks whether offset array consists of shorts or integers - boolean useInt = arrayLength == 0 ? false : offsetArrayLength / Math.abs(arrayLength) == Bytes.SIZEOF_INT; - boolean convertToInt = false; - - int newElementPosition = offsetArrayPosition - 2 * Bytes.SIZEOF_BYTE; - - if (!useInt) { - if (PArrayDataType.useShortForOffsetArray(newElementPosition)) { - newArray = new byte[length + elementLength + Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BYTE]; - } else { - newArray = new byte[length + elementLength + arrayLength * Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT - + Bytes.SIZEOF_BYTE]; - convertToInt = true; - } - } else { - newArray = new byte[length + elementLength + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; - } + byte[] newArray; + if (!baseType.isFixedWidth()) { + byte serializationVersion = arrayBytes[offset + length - Bytes.SIZEOF_BYTE]; + int offsetArrayPosition = Bytes.toInt(arrayBytes, + offset + length - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, + Bytes.SIZEOF_INT); + int offsetArrayLength = + length - offsetArrayPosition - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE; - int newOffsetArrayPosition = newElementPosition + elementLength + 3 * Bytes.SIZEOF_BYTE; + // checks whether offset array consists of shorts or integers + boolean useInt = + arrayLength == 0 ? false : offsetArrayLength / Math.abs(arrayLength) == Bytes.SIZEOF_INT; + boolean convertToInt = false; - System.arraycopy(arrayBytes, offset, newArray, 0, newElementPosition); - // Write separator explicitly, as it may not be 0 - byte sepByte = getSeparatorByte(isRowKeyOrderOptimized(false, sortOrder, arrayBytes, offset, length), sortOrder); - newArray[newOffsetArrayPosition-3] = sepByte; // Separator for new value - newArray[newOffsetArrayPosition-2] = sepByte; // Double byte separator - newArray[newOffsetArrayPosition-1] = sepByte; - System.arraycopy(elementBytes, elementOffset, newArray, newElementPosition, elementLength); + int newElementPosition = offsetArrayPosition - 2 * Bytes.SIZEOF_BYTE; - int factor = (int)Math.signum(arrayLength); - if (factor == 0) { - factor = 1; - } - arrayLength = (Math.abs(arrayLength) + 1) * factor; - if (useInt) { - System.arraycopy(arrayBytes, offset + offsetArrayPosition, newArray, newOffsetArrayPosition, - offsetArrayLength); - Bytes.putInt(newArray, newOffsetArrayPosition + offsetArrayLength, newElementPosition); - - writeEndBytes(newArray, newOffsetArrayPosition, offsetArrayLength, arrayLength, arrayBytes[offset - + length - 1], true); - } else { - if (!convertToInt) { - System.arraycopy(arrayBytes, offset + offsetArrayPosition, newArray, newOffsetArrayPosition, - offsetArrayLength); - Bytes.putShort(newArray, newOffsetArrayPosition + offsetArrayLength, - (short)(newElementPosition - Short.MAX_VALUE)); - - writeEndBytes(newArray, newOffsetArrayPosition, offsetArrayLength, arrayLength, arrayBytes[offset - + length - 1], false); - } else { - int off = newOffsetArrayPosition; - for (int arrayIndex = 0; arrayIndex < Math.abs(arrayLength) - 1; arrayIndex++) { - Bytes.putInt(newArray, off, - getOffset(arrayBytes, arrayIndex, true, offsetArrayPosition + offset, serializationVersion)); - off += Bytes.SIZEOF_INT; - } - - Bytes.putInt(newArray, off, newElementPosition); - Bytes.putInt(newArray, off + Bytes.SIZEOF_INT, newOffsetArrayPosition); - Bytes.putInt(newArray, off + 2 * Bytes.SIZEOF_INT, -arrayLength); - Bytes.putByte(newArray, off + 3 * Bytes.SIZEOF_INT, arrayBytes[offset + length - 1]); - - } - } + if (!useInt) { + if (PArrayDataType.useShortForOffsetArray(newElementPosition)) { + newArray = new byte[length + elementLength + Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BYTE]; } else { - newArray = new byte[length + elementLength]; - - System.arraycopy(arrayBytes, offset, newArray, 0, length); - System.arraycopy(elementBytes, elementOffset, newArray, length, elementLength); + newArray = new byte[length + elementLength + arrayLength * Bytes.SIZEOF_SHORT + + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; + convertToInt = true; } + } else { + newArray = new byte[length + elementLength + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; + } + + int newOffsetArrayPosition = newElementPosition + elementLength + 3 * Bytes.SIZEOF_BYTE; + + System.arraycopy(arrayBytes, offset, newArray, 0, newElementPosition); + // Write separator explicitly, as it may not be 0 + byte sepByte = getSeparatorByte( + isRowKeyOrderOptimized(false, sortOrder, arrayBytes, offset, length), sortOrder); + newArray[newOffsetArrayPosition - 3] = sepByte; // Separator for new value + newArray[newOffsetArrayPosition - 2] = sepByte; // Double byte separator + newArray[newOffsetArrayPosition - 1] = sepByte; + System.arraycopy(elementBytes, elementOffset, newArray, newElementPosition, elementLength); + + int factor = (int) Math.signum(arrayLength); + if (factor == 0) { + factor = 1; + } + arrayLength = (Math.abs(arrayLength) + 1) * factor; + if (useInt) { + System.arraycopy(arrayBytes, offset + offsetArrayPosition, newArray, newOffsetArrayPosition, + offsetArrayLength); + Bytes.putInt(newArray, newOffsetArrayPosition + offsetArrayLength, newElementPosition); + + writeEndBytes(newArray, newOffsetArrayPosition, offsetArrayLength, arrayLength, + arrayBytes[offset + length - 1], true); + } else { + if (!convertToInt) { + System.arraycopy(arrayBytes, offset + offsetArrayPosition, newArray, + newOffsetArrayPosition, offsetArrayLength); + Bytes.putShort(newArray, newOffsetArrayPosition + offsetArrayLength, + (short) (newElementPosition - Short.MAX_VALUE)); + + writeEndBytes(newArray, newOffsetArrayPosition, offsetArrayLength, arrayLength, + arrayBytes[offset + length - 1], false); + } else { + int off = newOffsetArrayPosition; + for (int arrayIndex = 0; arrayIndex < Math.abs(arrayLength) - 1; arrayIndex++) { + Bytes.putInt(newArray, off, getOffset(arrayBytes, arrayIndex, true, + offsetArrayPosition + offset, serializationVersion)); + off += Bytes.SIZEOF_INT; + } + + Bytes.putInt(newArray, off, newElementPosition); + Bytes.putInt(newArray, off + Bytes.SIZEOF_INT, newOffsetArrayPosition); + Bytes.putInt(newArray, off + 2 * Bytes.SIZEOF_INT, -arrayLength); + Bytes.putByte(newArray, off + 3 * Bytes.SIZEOF_INT, arrayBytes[offset + length - 1]); - ptr.set(newArray); + } + } + } else { + newArray = new byte[length + elementLength]; - return true; + System.arraycopy(arrayBytes, offset, newArray, 0, length); + System.arraycopy(elementBytes, elementOffset, newArray, length, elementLength); } - private static void writeEndBytes(byte[] array, int newOffsetArrayPosition, int offsetArrayLength, int arrayLength, - byte header, boolean useInt) { - int byteSize = useInt ? Bytes.SIZEOF_INT : Bytes.SIZEOF_SHORT; - - Bytes.putInt(array, newOffsetArrayPosition + offsetArrayLength + byteSize, newOffsetArrayPosition); - Bytes.putInt(array, newOffsetArrayPosition + offsetArrayLength + byteSize + Bytes.SIZEOF_INT, arrayLength); - Bytes.putByte(array, newOffsetArrayPosition + offsetArrayLength + byteSize + 2 * Bytes.SIZEOF_INT, header); + ptr.set(newArray); + + return true; + } + + private static void writeEndBytes(byte[] array, int newOffsetArrayPosition, int offsetArrayLength, + int arrayLength, byte header, boolean useInt) { + int byteSize = useInt ? Bytes.SIZEOF_INT : Bytes.SIZEOF_SHORT; + + Bytes.putInt(array, newOffsetArrayPosition + offsetArrayLength + byteSize, + newOffsetArrayPosition); + Bytes.putInt(array, newOffsetArrayPosition + offsetArrayLength + byteSize + Bytes.SIZEOF_INT, + arrayLength); + Bytes.putByte(array, + newOffsetArrayPosition + offsetArrayLength + byteSize + 2 * Bytes.SIZEOF_INT, header); + } + + public static boolean prependItemToArray(ImmutableBytesWritable ptr, int length, int offset, + byte[] arrayBytes, PDataType baseType, int arrayLength, Integer maxLength, + SortOrder sortOrder) { + int elementLength = maxLength == null ? ptr.getLength() : maxLength; + if (ptr.getLength() == 0) { + elementLength = 0; + } + // If the arrayBytes is null or empty, generate an empty array which will get filled in below + if (arrayBytes.length == 0) { + arrayBytes = generateEmptyArrayBytes(baseType, sortOrder); + offset = 0; + length = arrayBytes.length; } - public static boolean prependItemToArray(ImmutableBytesWritable ptr, int length, int offset, byte[] arrayBytes, - PDataType baseType, int arrayLength, Integer maxLength, SortOrder sortOrder) { - int elementLength = maxLength == null ? ptr.getLength() : maxLength; - if (ptr.getLength() == 0) { - elementLength = 0; - } - // If the arrayBytes is null or empty, generate an empty array which will get filled in below - if (arrayBytes.length == 0) { - arrayBytes = generateEmptyArrayBytes(baseType, sortOrder); - offset = 0; - length = arrayBytes.length; + // padding + if (elementLength > ptr.getLength()) { + baseType.pad(ptr, elementLength, sortOrder); + } + int elementOffset = ptr.getOffset(); + byte[] elementBytes = ptr.get(); + + byte[] newArray; + if (!baseType.isFixedWidth()) { + byte serializationVersion = arrayBytes[offset + length - Bytes.SIZEOF_BYTE]; + int offsetArrayPosition = Bytes.toInt(arrayBytes, + offset + length - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, + Bytes.SIZEOF_INT); + int offsetArrayLength = + length - offsetArrayPosition - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE; + arrayLength = Math.abs(arrayLength); + + // checks whether offset array consists of shorts or integers + boolean useInt = + arrayLength == 0 ? false : offsetArrayLength / arrayLength == Bytes.SIZEOF_INT; + boolean convertToInt = false; + int endElementPosition = getOffset(arrayBytes, arrayLength - 1, !useInt, + offsetArrayPosition + offset, serializationVersion) + elementLength + Bytes.SIZEOF_BYTE; + int newOffsetArrayPosition; + int lengthIncrease; + int firstNonNullElementPosition = 0; + int currentPosition = 0; + // handle the case where prepended element is null + if (elementLength == 0) { + int nulls = 1; + // counts the number of nulls which are already at the beginning of the array + for (int index = 0; index < arrayLength; index++) { + int currOffset = getOffset(arrayBytes, index, !useInt, offsetArrayPosition + offset, + serializationVersion); + if (arrayBytes[offset + currOffset] == QueryConstants.SEPARATOR_BYTE) { + nulls++; + } else { + // gets the offset of the first element after nulls at the beginning + firstNonNullElementPosition = currOffset; + break; + } } - // padding - if (elementLength > ptr.getLength()) { - baseType.pad(ptr, elementLength, sortOrder); - } - int elementOffset = ptr.getOffset(); - byte[] elementBytes = ptr.get(); - - byte[] newArray; - if (!baseType.isFixedWidth()) { - byte serializationVersion = arrayBytes[offset + length - Bytes.SIZEOF_BYTE]; - int offsetArrayPosition = Bytes.toInt(arrayBytes, offset + length - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT); - int offsetArrayLength = length - offsetArrayPosition - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - - Bytes.SIZEOF_BYTE; - arrayLength = Math.abs(arrayLength); - - // checks whether offset array consists of shorts or integers - boolean useInt = arrayLength == 0 ? false : offsetArrayLength / arrayLength == Bytes.SIZEOF_INT; - boolean convertToInt = false; - int endElementPosition = getOffset(arrayBytes, arrayLength - 1, !useInt, offsetArrayPosition + offset, serializationVersion) - + elementLength + Bytes.SIZEOF_BYTE; - int newOffsetArrayPosition; - int lengthIncrease; - int firstNonNullElementPosition = 0; - int currentPosition = 0; - // handle the case where prepended element is null - if (elementLength == 0) { - int nulls = 1; - // counts the number of nulls which are already at the beginning of the array - for (int index = 0; index < arrayLength; index++) { - int currOffset = getOffset(arrayBytes, index, !useInt, offsetArrayPosition + offset, serializationVersion); - if (arrayBytes[offset + currOffset] == QueryConstants.SEPARATOR_BYTE) { - nulls++; - } else { - // gets the offset of the first element after nulls at the beginning - firstNonNullElementPosition = currOffset; - break; - } - } - - int nMultiplesOver255 = nulls / 255; - int nRemainingNulls = nulls % 255; - - // Calculates the increase in length due to prepending the null - // There is a length increase only when nRemainingNulls == 1 - // nRemainingNulls == 1 and nMultiplesOver255 == 0 means there were no nulls at the beginning - // previously. - // At that case we need to increase the length by two bytes, one for separator byte and one for null - // count. - // ex: initial array - 65 0 66 0 0 0 after prepending null - 0 1(inverted) 65 0 66 0 0 0 - // nRemainingNulls == 1 and nMultiplesOver255 != 0 means there were null at the beginning previously. - // In this case due to prepending nMultiplesOver255 is increased by 1. - // We need to increase the length by one byte to store increased that. - // ex: initial array - 0 1 65 0 66 0 0 0 after prepending null - 0 1 1(inverted) 65 0 66 0 0 0 - // nRemainingNulls == 0 case. - // ex: initial array - 0 254(inverted) 65 0 66 0 0 0 after prepending null - 0 1 65 0 66 0 0 0 - // nRemainingNulls > 1 case. - // ex: initial array - 0 45(inverted) 65 0 66 0 0 0 after prepending null - 0 46(inverted) 65 0 66 0 0 0 - lengthIncrease = nRemainingNulls == 1 ? (nMultiplesOver255 == 0 ? 2 * Bytes.SIZEOF_BYTE - : Bytes.SIZEOF_BYTE) : 0; - endElementPosition = getOffset(arrayBytes, arrayLength - 1, !useInt, offsetArrayPosition + offset, serializationVersion) - + lengthIncrease; - if (!useInt) { - if (PArrayDataType.useShortForOffsetArray(endElementPosition)) { - newArray = new byte[length + Bytes.SIZEOF_SHORT + lengthIncrease]; - } else { - newArray = new byte[length + arrayLength * Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT - + lengthIncrease]; - convertToInt = true; - } - } else { - newArray = new byte[length + Bytes.SIZEOF_INT + lengthIncrease]; - } - newArray[currentPosition] = QueryConstants.SEPARATOR_BYTE; - currentPosition++; - - newOffsetArrayPosition = offsetArrayPosition + lengthIncrease; - // serialize nulls at the beginning - currentPosition = serializeNulls(newArray, currentPosition, nulls); - } else { - if (!useInt) { - if (PArrayDataType.useShortForOffsetArray(endElementPosition)) { - newArray = new byte[length + elementLength + Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BYTE]; - } else { - newArray = new byte[length + elementLength + arrayLength * Bytes.SIZEOF_SHORT - + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; - convertToInt = true; - } - } else { - newArray = new byte[length + elementLength + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; - } - newOffsetArrayPosition = offsetArrayPosition + Bytes.SIZEOF_BYTE + elementLength; - - lengthIncrease = elementLength + Bytes.SIZEOF_BYTE; - System.arraycopy(elementBytes, elementOffset, newArray, 0, elementLength); - // Explicitly set separator byte since for DESC it won't be 0. - newArray[elementLength] = getSeparatorByte(isRowKeyOrderOptimized(false, sortOrder, arrayBytes, offset, length), sortOrder); - currentPosition += elementLength + Bytes.SIZEOF_BYTE; - } + int nMultiplesOver255 = nulls / 255; + int nRemainingNulls = nulls % 255; - System.arraycopy(arrayBytes, firstNonNullElementPosition + offset, newArray, currentPosition, - offsetArrayPosition); - - arrayLength = arrayLength + 1; - // writes the new offset and changes the previous offsets - if (useInt || convertToInt) { - writeNewOffsets(arrayBytes, newArray, false, !useInt, newOffsetArrayPosition, arrayLength, - offsetArrayPosition, offset, lengthIncrease, length); - } else { - writeNewOffsets(arrayBytes, newArray, true, true, newOffsetArrayPosition, arrayLength, - offsetArrayPosition, offset, lengthIncrease, length); - } + // Calculates the increase in length due to prepending the null + // There is a length increase only when nRemainingNulls == 1 + // nRemainingNulls == 1 and nMultiplesOver255 == 0 means there were no nulls at the + // beginning + // previously. + // At that case we need to increase the length by two bytes, one for separator byte and one + // for null + // count. + // ex: initial array - 65 0 66 0 0 0 after prepending null - 0 1(inverted) 65 0 66 0 0 0 + // nRemainingNulls == 1 and nMultiplesOver255 != 0 means there were null at the beginning + // previously. + // In this case due to prepending nMultiplesOver255 is increased by 1. + // We need to increase the length by one byte to store increased that. + // ex: initial array - 0 1 65 0 66 0 0 0 after prepending null - 0 1 1(inverted) 65 0 66 0 0 + // 0 + // nRemainingNulls == 0 case. + // ex: initial array - 0 254(inverted) 65 0 66 0 0 0 after prepending null - 0 1 65 0 66 0 0 + // 0 + // nRemainingNulls > 1 case. + // ex: initial array - 0 45(inverted) 65 0 66 0 0 0 after prepending null - 0 46(inverted) + // 65 0 66 0 0 0 + lengthIncrease = nRemainingNulls == 1 + ? (nMultiplesOver255 == 0 ? 2 * Bytes.SIZEOF_BYTE : Bytes.SIZEOF_BYTE) + : 0; + endElementPosition = getOffset(arrayBytes, arrayLength - 1, !useInt, + offsetArrayPosition + offset, serializationVersion) + lengthIncrease; + if (!useInt) { + if (PArrayDataType.useShortForOffsetArray(endElementPosition)) { + newArray = new byte[length + Bytes.SIZEOF_SHORT + lengthIncrease]; + } else { + newArray = new byte[length + arrayLength * Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT + + lengthIncrease]; + convertToInt = true; + } } else { - newArray = new byte[length + elementLength]; - - System.arraycopy(elementBytes, elementOffset, newArray, 0, elementLength); - System.arraycopy(arrayBytes, offset, newArray, elementLength, length); + newArray = new byte[length + Bytes.SIZEOF_INT + lengthIncrease]; } + newArray[currentPosition] = QueryConstants.SEPARATOR_BYTE; + currentPosition++; + + newOffsetArrayPosition = offsetArrayPosition + lengthIncrease; + // serialize nulls at the beginning + currentPosition = serializeNulls(newArray, currentPosition, nulls); + } else { + if (!useInt) { + if (PArrayDataType.useShortForOffsetArray(endElementPosition)) { + newArray = new byte[length + elementLength + Bytes.SIZEOF_SHORT + Bytes.SIZEOF_BYTE]; + } else { + newArray = new byte[length + elementLength + arrayLength * Bytes.SIZEOF_SHORT + + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; + convertToInt = true; + } + } else { + newArray = new byte[length + elementLength + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; + } + newOffsetArrayPosition = offsetArrayPosition + Bytes.SIZEOF_BYTE + elementLength; + + lengthIncrease = elementLength + Bytes.SIZEOF_BYTE; + System.arraycopy(elementBytes, elementOffset, newArray, 0, elementLength); + // Explicitly set separator byte since for DESC it won't be 0. + newArray[elementLength] = getSeparatorByte( + isRowKeyOrderOptimized(false, sortOrder, arrayBytes, offset, length), sortOrder); + currentPosition += elementLength + Bytes.SIZEOF_BYTE; + } + + System.arraycopy(arrayBytes, firstNonNullElementPosition + offset, newArray, currentPosition, + offsetArrayPosition); + + arrayLength = arrayLength + 1; + // writes the new offset and changes the previous offsets + if (useInt || convertToInt) { + writeNewOffsets(arrayBytes, newArray, false, !useInt, newOffsetArrayPosition, arrayLength, + offsetArrayPosition, offset, lengthIncrease, length); + } else { + writeNewOffsets(arrayBytes, newArray, true, true, newOffsetArrayPosition, arrayLength, + offsetArrayPosition, offset, lengthIncrease, length); + } + } else { + newArray = new byte[length + elementLength]; + + System.arraycopy(elementBytes, elementOffset, newArray, 0, elementLength); + System.arraycopy(arrayBytes, offset, newArray, elementLength, length); + } - ptr.set(newArray); - return true; + ptr.set(newArray); + return true; + } + + private static void writeNewOffsets(byte[] arrayBytes, byte[] newArray, boolean useShortNew, + boolean useShortPrevious, int newOffsetArrayPosition, int arrayLength, int offsetArrayPosition, + int offset, int offsetShift, int length) { + int currentPosition = newOffsetArrayPosition; + int offsetArrayElementSize = useShortNew ? Bytes.SIZEOF_SHORT : Bytes.SIZEOF_INT; + if (useShortNew) { + Bytes.putShort(newArray, currentPosition, (short) (0 - Short.MAX_VALUE)); + } else { + Bytes.putInt(newArray, currentPosition, 0); } - private static void writeNewOffsets(byte[] arrayBytes, byte[] newArray, boolean useShortNew, - boolean useShortPrevious, int newOffsetArrayPosition, int arrayLength, int offsetArrayPosition, int offset, - int offsetShift, int length) { - int currentPosition = newOffsetArrayPosition; - int offsetArrayElementSize = useShortNew ? Bytes.SIZEOF_SHORT : Bytes.SIZEOF_INT; + currentPosition += offsetArrayElementSize; + boolean nullsAtBeginning = true; + byte serializationVersion = arrayBytes[offset + length - Bytes.SIZEOF_BYTE]; + for (int arrayIndex = 0; arrayIndex < arrayLength - 1; arrayIndex++) { + int oldOffset = getOffset(arrayBytes, arrayIndex, useShortPrevious, + offsetArrayPosition + offset, serializationVersion); + if (arrayBytes[offset + oldOffset] == QueryConstants.SEPARATOR_BYTE && nullsAtBeginning) { if (useShortNew) { - Bytes.putShort(newArray, currentPosition, (short)(0 - Short.MAX_VALUE)); + Bytes.putShort(newArray, currentPosition, (short) (oldOffset - Short.MAX_VALUE)); } else { - Bytes.putInt(newArray, currentPosition, 0); - } - - currentPosition += offsetArrayElementSize; - boolean nullsAtBeginning = true; - byte serializationVersion = arrayBytes[offset + length - Bytes.SIZEOF_BYTE]; - for (int arrayIndex = 0; arrayIndex < arrayLength - 1; arrayIndex++) { - int oldOffset = getOffset(arrayBytes, arrayIndex, useShortPrevious, offsetArrayPosition + offset, serializationVersion); - if (arrayBytes[offset + oldOffset] == QueryConstants.SEPARATOR_BYTE && nullsAtBeginning) { - if (useShortNew) { - Bytes.putShort(newArray, currentPosition, (short)(oldOffset - Short.MAX_VALUE)); - } else { - Bytes.putInt(newArray, currentPosition, oldOffset); - } - } else { - if (useShortNew) { - Bytes.putShort(newArray, currentPosition, (short)(oldOffset + offsetShift - Short.MAX_VALUE)); - } else { - Bytes.putInt(newArray, currentPosition, oldOffset + offsetShift); - } - nullsAtBeginning = false; - } - currentPosition += offsetArrayElementSize; + Bytes.putInt(newArray, currentPosition, oldOffset); } - - Bytes.putInt(newArray, currentPosition, newOffsetArrayPosition); - currentPosition += Bytes.SIZEOF_INT; - Bytes.putInt(newArray, currentPosition, useShortNew ? arrayLength : -arrayLength); - currentPosition += Bytes.SIZEOF_INT; - Bytes.putByte(newArray, currentPosition, arrayBytes[offset + length - 1]); - } - - public static boolean concatArrays(ImmutableBytesWritable ptr, int array1BytesLength, int array1BytesOffset, - byte[] array1Bytes, PDataType baseType, int actualLengthOfArray1, int actualLengthOfArray2) { - int array2BytesLength = ptr.getLength(); - int array2BytesOffset = ptr.getOffset(); - byte[] array2Bytes = ptr.get(); - - byte[] newArray; - - if (!baseType.isFixedWidth()) { - byte serializationVersion1 = array1Bytes[array1BytesOffset + array1BytesLength - Bytes.SIZEOF_BYTE]; - int offsetArrayPositionArray1 = Bytes.toInt(array1Bytes, array1BytesOffset + array1BytesLength - - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT); - int offsetArrayPositionArray2 = Bytes.toInt(array2Bytes, array2BytesOffset + array2BytesLength - - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT); - int offsetArrayLengthArray1 = array1BytesLength - offsetArrayPositionArray1 - Bytes.SIZEOF_INT - - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE; - int offsetArrayLengthArray2 = array2BytesLength - offsetArrayPositionArray2 - Bytes.SIZEOF_INT - - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE; - int newArrayLength = actualLengthOfArray1 + actualLengthOfArray2; - int nullsAtTheEndOfArray1 = 0; - int nullsAtTheBeginningOfArray2 = 0; - // checks whether offset array consists of shorts or integers - boolean useIntArray1 = offsetArrayLengthArray1 / actualLengthOfArray1 == Bytes.SIZEOF_INT; - boolean useIntArray2 = offsetArrayLengthArray2 / actualLengthOfArray2 == Bytes.SIZEOF_INT; - boolean useIntNewArray = false; - // count nulls at the end of array 1 - for (int index = actualLengthOfArray1 - 1; index > -1; index--) { - int offset = getOffset(array1Bytes, index, !useIntArray1, array1BytesOffset + offsetArrayPositionArray1, serializationVersion1); - if (array1Bytes[array1BytesOffset + offset] == QueryConstants.SEPARATOR_BYTE || array1Bytes[array1BytesOffset + offset] == QueryConstants.DESC_SEPARATOR_BYTE) { - nullsAtTheEndOfArray1++; - } else { - break; - } - } - // count nulls at the beginning of the array 2 - int array2FirstNonNullElementOffset = 0; - int array2FirstNonNullIndex = 0; - byte serializationVersion2 = array2Bytes[array2BytesOffset + array2BytesLength - Bytes.SIZEOF_BYTE]; - for (int index = 0; index < actualLengthOfArray2; index++) { - int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset + offsetArrayPositionArray2, serializationVersion2); - if (array2Bytes[array2BytesOffset + offset] == QueryConstants.SEPARATOR_BYTE) { - nullsAtTheBeginningOfArray2++; - } else { - array2FirstNonNullIndex = index; - array2FirstNonNullElementOffset = offset; - break; - } - } - int nullsInMiddleAfterConcat = nullsAtTheEndOfArray1 + nullsAtTheBeginningOfArray2; - int bytesForNullsBefore = nullsAtTheBeginningOfArray2 / 255 - + (nullsAtTheBeginningOfArray2 % 255 == 0 ? 0 : 1); - int bytesForNullsAfter = nullsInMiddleAfterConcat / 255 + (nullsInMiddleAfterConcat % 255 == 0 ? 0 : 1); - // Increase of length required to store nulls - int lengthIncreaseForNulls = bytesForNullsAfter - bytesForNullsBefore; - // Length increase incremented by one when there were no nulls at the beginning of array and when there are - // nulls at the end of array 1 as we need to allocate a byte for separator byte in this case. - lengthIncreaseForNulls += nullsAtTheBeginningOfArray2 == 0 && nullsAtTheEndOfArray1 != 0 ? Bytes.SIZEOF_BYTE - : 0; - int newOffsetArrayPosition = offsetArrayPositionArray1 + offsetArrayPositionArray2 + lengthIncreaseForNulls - - 2 * Bytes.SIZEOF_BYTE; - int endElementPositionOfArray2 = getOffset(array2Bytes, actualLengthOfArray2 - 1, !useIntArray2, - array2BytesOffset + offsetArrayPositionArray2, serializationVersion2); - int newEndElementPosition = lengthIncreaseForNulls + endElementPositionOfArray2 + offsetArrayPositionArray1 - - 2 * Bytes.SIZEOF_BYTE; - // Creates a byte array to store the concatenated array - if (PArrayDataType.useShortForOffsetArray(newEndElementPosition)) { - newArray = new byte[newOffsetArrayPosition + newArrayLength * Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT - + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; - } else { - useIntNewArray = true; - newArray = new byte[newOffsetArrayPosition + newArrayLength * Bytes.SIZEOF_INT + Bytes.SIZEOF_INT - + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; - } - - int currentPosition = 0; - // Copies all the elements from array 1 to new array - System.arraycopy(array1Bytes, array1BytesOffset, newArray, currentPosition, offsetArrayPositionArray1 - 2 - * Bytes.SIZEOF_BYTE); - currentPosition = offsetArrayPositionArray1 - 2 * Bytes.SIZEOF_BYTE; - int array2StartingPosition = currentPosition; - currentPosition += nullsInMiddleAfterConcat != 0 ? 1 : 0; - // Writes nulls in the middle of the array. - currentPosition = serializeNulls(newArray, currentPosition, nullsInMiddleAfterConcat); - // Copies the elements from array 2 beginning from the first non null element. - System.arraycopy(array2Bytes, array2BytesOffset + array2FirstNonNullElementOffset, newArray, - currentPosition, offsetArrayPositionArray2 - array2FirstNonNullElementOffset); - currentPosition += offsetArrayPositionArray2 - array2FirstNonNullElementOffset; - - // Writing offset arrays - if (useIntNewArray) { - // offsets for the elements from array 1. Simply copied. - for (int index = 0; index < actualLengthOfArray1; index++) { - int offset = getOffset(array1Bytes, index, !useIntArray1, array1BytesOffset - + offsetArrayPositionArray1, serializationVersion1); - Bytes.putInt(newArray, currentPosition, offset); - currentPosition += Bytes.SIZEOF_INT; - } - // offsets for nulls in the middle - for (int index = 0; index < array2FirstNonNullIndex; index++) { - int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset - + offsetArrayPositionArray2, serializationVersion2); - Bytes.putInt(newArray, currentPosition, offset + array2StartingPosition); - currentPosition += Bytes.SIZEOF_INT; - } - // offsets for the elements from the first non null element from array 2 - int part2NonNullStartingPosition = array2StartingPosition + bytesForNullsAfter - + (bytesForNullsAfter == 0 ? 0 : Bytes.SIZEOF_BYTE); - for (int index = array2FirstNonNullIndex; index < actualLengthOfArray2; index++) { - int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset - + offsetArrayPositionArray2, serializationVersion2); - Bytes.putInt(newArray, currentPosition, offset - array2FirstNonNullElementOffset - + part2NonNullStartingPosition); - currentPosition += Bytes.SIZEOF_INT; - } - } else { - // offsets for the elements from array 1. Simply copied. - for (int index = 0; index < actualLengthOfArray1; index++) { - int offset = getOffset(array1Bytes, index, !useIntArray1, array1BytesOffset - + offsetArrayPositionArray1, serializationVersion1); - Bytes.putShort(newArray, currentPosition, (short)(offset - Short.MAX_VALUE)); - currentPosition += Bytes.SIZEOF_SHORT; - } - // offsets for nulls in the middle - for (int index = 0; index < array2FirstNonNullIndex; index++) { - int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset - + offsetArrayPositionArray2, serializationVersion2); - Bytes.putShort(newArray, currentPosition, - (short)(offset + array2StartingPosition - Short.MAX_VALUE)); - currentPosition += Bytes.SIZEOF_SHORT; - } - // offsets for the elements from the first non null element from array 2 - int part2NonNullStartingPosition = array2StartingPosition + bytesForNullsAfter - + (bytesForNullsAfter == 0 ? 0 : Bytes.SIZEOF_BYTE); - for (int index = array2FirstNonNullIndex; index < actualLengthOfArray2; index++) { - int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset - + offsetArrayPositionArray2, serializationVersion2); - Bytes.putShort(newArray, currentPosition, (short)(offset - array2FirstNonNullElementOffset - + part2NonNullStartingPosition - Short.MAX_VALUE)); - currentPosition += Bytes.SIZEOF_SHORT; - } - } - Bytes.putInt(newArray, currentPosition, newOffsetArrayPosition); - currentPosition += Bytes.SIZEOF_INT; - Bytes.putInt(newArray, currentPosition, useIntNewArray ? -newArrayLength : newArrayLength); - currentPosition += Bytes.SIZEOF_INT; - Bytes.putByte(newArray, currentPosition, array1Bytes[array1BytesOffset + array1BytesLength - 1]); + } else { + if (useShortNew) { + Bytes.putShort(newArray, currentPosition, + (short) (oldOffset + offsetShift - Short.MAX_VALUE)); } else { - newArray = new byte[array1BytesLength + array2BytesLength]; - System.arraycopy(array1Bytes, array1BytesOffset, newArray, 0, array1BytesLength); - System.arraycopy(array2Bytes, array2BytesOffset, newArray, array1BytesLength, array2BytesLength); + Bytes.putInt(newArray, currentPosition, oldOffset + offsetShift); } - ptr.set(newArray); - return true; + nullsAtBeginning = false; + } + currentPosition += offsetArrayElementSize; } - public static boolean arrayToString(ImmutableBytesWritable ptr, PhoenixArray array, String delimiter, String nullString, SortOrder sortOrder) { - StringBuilder result = new StringBuilder(); - boolean delimiterPending = false; - for (int i = 0; i < array.getDimensions() - 1; i++) { - Object element = array.getElement(i); - if (element == null) { - if (nullString != null) { - result.append(nullString); - } - } else { - result.append(element.toString()); - delimiterPending = true; - } - if (nullString != null || (array.getElement(i + 1) != null && delimiterPending)) { - result.append(delimiter); - delimiterPending = false; - } - } - Object element = array.getElement(array.getDimensions() - 1); - if (element == null) { - if (nullString != null) { - result.append(nullString); - } + Bytes.putInt(newArray, currentPosition, newOffsetArrayPosition); + currentPosition += Bytes.SIZEOF_INT; + Bytes.putInt(newArray, currentPosition, useShortNew ? arrayLength : -arrayLength); + currentPosition += Bytes.SIZEOF_INT; + Bytes.putByte(newArray, currentPosition, arrayBytes[offset + length - 1]); + } + + public static boolean concatArrays(ImmutableBytesWritable ptr, int array1BytesLength, + int array1BytesOffset, byte[] array1Bytes, PDataType baseType, int actualLengthOfArray1, + int actualLengthOfArray2) { + int array2BytesLength = ptr.getLength(); + int array2BytesOffset = ptr.getOffset(); + byte[] array2Bytes = ptr.get(); + + byte[] newArray; + + if (!baseType.isFixedWidth()) { + byte serializationVersion1 = + array1Bytes[array1BytesOffset + array1BytesLength - Bytes.SIZEOF_BYTE]; + int offsetArrayPositionArray1 = Bytes.toInt(array1Bytes, array1BytesOffset + array1BytesLength + - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT); + int offsetArrayPositionArray2 = Bytes.toInt(array2Bytes, array2BytesOffset + array2BytesLength + - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT); + int offsetArrayLengthArray1 = array1BytesLength - offsetArrayPositionArray1 - Bytes.SIZEOF_INT + - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE; + int offsetArrayLengthArray2 = array2BytesLength - offsetArrayPositionArray2 - Bytes.SIZEOF_INT + - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE; + int newArrayLength = actualLengthOfArray1 + actualLengthOfArray2; + int nullsAtTheEndOfArray1 = 0; + int nullsAtTheBeginningOfArray2 = 0; + // checks whether offset array consists of shorts or integers + boolean useIntArray1 = offsetArrayLengthArray1 / actualLengthOfArray1 == Bytes.SIZEOF_INT; + boolean useIntArray2 = offsetArrayLengthArray2 / actualLengthOfArray2 == Bytes.SIZEOF_INT; + boolean useIntNewArray = false; + // count nulls at the end of array 1 + for (int index = actualLengthOfArray1 - 1; index > -1; index--) { + int offset = getOffset(array1Bytes, index, !useIntArray1, + array1BytesOffset + offsetArrayPositionArray1, serializationVersion1); + if ( + array1Bytes[array1BytesOffset + offset] == QueryConstants.SEPARATOR_BYTE + || array1Bytes[array1BytesOffset + offset] == QueryConstants.DESC_SEPARATOR_BYTE + ) { + nullsAtTheEndOfArray1++; } else { - result.append(element.toString()); + break; } - ptr.set(PVarchar.INSTANCE.toBytes(result.toString(), sortOrder)); - return true; - } - - public static boolean stringToArray(ImmutableBytesWritable ptr, String string, String delimiter, String nullString, SortOrder sortOrder) { - Pattern pattern = Pattern.compile(Pattern.quote(delimiter)); - String[] array; - if (delimiter.length() != 0) { - array = pattern.split(string); - if (nullString != null) { - for (int i = 0; i < array.length; i++) { - if (array[i].equals(nullString)) { - array[i] = null; - } - } - } + } + // count nulls at the beginning of the array 2 + int array2FirstNonNullElementOffset = 0; + int array2FirstNonNullIndex = 0; + byte serializationVersion2 = + array2Bytes[array2BytesOffset + array2BytesLength - Bytes.SIZEOF_BYTE]; + for (int index = 0; index < actualLengthOfArray2; index++) { + int offset = getOffset(array2Bytes, index, !useIntArray2, + array2BytesOffset + offsetArrayPositionArray2, serializationVersion2); + if (array2Bytes[array2BytesOffset + offset] == QueryConstants.SEPARATOR_BYTE) { + nullsAtTheBeginningOfArray2++; } else { - array = string.split("(?!^)"); + array2FirstNonNullIndex = index; + array2FirstNonNullElementOffset = offset; + break; + } + } + int nullsInMiddleAfterConcat = nullsAtTheEndOfArray1 + nullsAtTheBeginningOfArray2; + int bytesForNullsBefore = + nullsAtTheBeginningOfArray2 / 255 + (nullsAtTheBeginningOfArray2 % 255 == 0 ? 0 : 1); + int bytesForNullsAfter = + nullsInMiddleAfterConcat / 255 + (nullsInMiddleAfterConcat % 255 == 0 ? 0 : 1); + // Increase of length required to store nulls + int lengthIncreaseForNulls = bytesForNullsAfter - bytesForNullsBefore; + // Length increase incremented by one when there were no nulls at the beginning of array and + // when there are + // nulls at the end of array 1 as we need to allocate a byte for separator byte in this case. + lengthIncreaseForNulls += + nullsAtTheBeginningOfArray2 == 0 && nullsAtTheEndOfArray1 != 0 ? Bytes.SIZEOF_BYTE : 0; + int newOffsetArrayPosition = offsetArrayPositionArray1 + offsetArrayPositionArray2 + + lengthIncreaseForNulls - 2 * Bytes.SIZEOF_BYTE; + int endElementPositionOfArray2 = getOffset(array2Bytes, actualLengthOfArray2 - 1, + !useIntArray2, array2BytesOffset + offsetArrayPositionArray2, serializationVersion2); + int newEndElementPosition = lengthIncreaseForNulls + endElementPositionOfArray2 + + offsetArrayPositionArray1 - 2 * Bytes.SIZEOF_BYTE; + // Creates a byte array to store the concatenated array + if (PArrayDataType.useShortForOffsetArray(newEndElementPosition)) { + newArray = new byte[newOffsetArrayPosition + newArrayLength * Bytes.SIZEOF_SHORT + + Bytes.SIZEOF_INT + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; + } else { + useIntNewArray = true; + newArray = new byte[newOffsetArrayPosition + newArrayLength * Bytes.SIZEOF_INT + + Bytes.SIZEOF_INT + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE]; + } + + int currentPosition = 0; + // Copies all the elements from array 1 to new array + System.arraycopy(array1Bytes, array1BytesOffset, newArray, currentPosition, + offsetArrayPositionArray1 - 2 * Bytes.SIZEOF_BYTE); + currentPosition = offsetArrayPositionArray1 - 2 * Bytes.SIZEOF_BYTE; + int array2StartingPosition = currentPosition; + currentPosition += nullsInMiddleAfterConcat != 0 ? 1 : 0; + // Writes nulls in the middle of the array. + currentPosition = serializeNulls(newArray, currentPosition, nullsInMiddleAfterConcat); + // Copies the elements from array 2 beginning from the first non null element. + System.arraycopy(array2Bytes, array2BytesOffset + array2FirstNonNullElementOffset, newArray, + currentPosition, offsetArrayPositionArray2 - array2FirstNonNullElementOffset); + currentPosition += offsetArrayPositionArray2 - array2FirstNonNullElementOffset; + + // Writing offset arrays + if (useIntNewArray) { + // offsets for the elements from array 1. Simply copied. + for (int index = 0; index < actualLengthOfArray1; index++) { + int offset = getOffset(array1Bytes, index, !useIntArray1, + array1BytesOffset + offsetArrayPositionArray1, serializationVersion1); + Bytes.putInt(newArray, currentPosition, offset); + currentPosition += Bytes.SIZEOF_INT; + } + // offsets for nulls in the middle + for (int index = 0; index < array2FirstNonNullIndex; index++) { + int offset = getOffset(array2Bytes, index, !useIntArray2, + array2BytesOffset + offsetArrayPositionArray2, serializationVersion2); + Bytes.putInt(newArray, currentPosition, offset + array2StartingPosition); + currentPosition += Bytes.SIZEOF_INT; + } + // offsets for the elements from the first non null element from array 2 + int part2NonNullStartingPosition = array2StartingPosition + bytesForNullsAfter + + (bytesForNullsAfter == 0 ? 0 : Bytes.SIZEOF_BYTE); + for (int index = array2FirstNonNullIndex; index < actualLengthOfArray2; index++) { + int offset = getOffset(array2Bytes, index, !useIntArray2, + array2BytesOffset + offsetArrayPositionArray2, serializationVersion2); + Bytes.putInt(newArray, currentPosition, + offset - array2FirstNonNullElementOffset + part2NonNullStartingPosition); + currentPosition += Bytes.SIZEOF_INT; + } + } else { + // offsets for the elements from array 1. Simply copied. + for (int index = 0; index < actualLengthOfArray1; index++) { + int offset = getOffset(array1Bytes, index, !useIntArray1, + array1BytesOffset + offsetArrayPositionArray1, serializationVersion1); + Bytes.putShort(newArray, currentPosition, (short) (offset - Short.MAX_VALUE)); + currentPosition += Bytes.SIZEOF_SHORT; + } + // offsets for nulls in the middle + for (int index = 0; index < array2FirstNonNullIndex; index++) { + int offset = getOffset(array2Bytes, index, !useIntArray2, + array2BytesOffset + offsetArrayPositionArray2, serializationVersion2); + Bytes.putShort(newArray, currentPosition, + (short) (offset + array2StartingPosition - Short.MAX_VALUE)); + currentPosition += Bytes.SIZEOF_SHORT; + } + // offsets for the elements from the first non null element from array 2 + int part2NonNullStartingPosition = array2StartingPosition + bytesForNullsAfter + + (bytesForNullsAfter == 0 ? 0 : Bytes.SIZEOF_BYTE); + for (int index = array2FirstNonNullIndex; index < actualLengthOfArray2; index++) { + int offset = getOffset(array2Bytes, index, !useIntArray2, + array2BytesOffset + offsetArrayPositionArray2, serializationVersion2); + Bytes.putShort(newArray, currentPosition, (short) (offset + - array2FirstNonNullElementOffset + part2NonNullStartingPosition - Short.MAX_VALUE)); + currentPosition += Bytes.SIZEOF_SHORT; } - PhoenixArray phoenixArray = new PhoenixArray(PVarchar.INSTANCE, array); - ptr.set(PVarcharArray.INSTANCE.toBytes(phoenixArray, PVarchar.INSTANCE, sortOrder)); - return true; + } + Bytes.putInt(newArray, currentPosition, newOffsetArrayPosition); + currentPosition += Bytes.SIZEOF_INT; + Bytes.putInt(newArray, currentPosition, useIntNewArray ? -newArrayLength : newArrayLength); + currentPosition += Bytes.SIZEOF_INT; + Bytes.putByte(newArray, currentPosition, + array1Bytes[array1BytesOffset + array1BytesLength - 1]); + } else { + newArray = new byte[array1BytesLength + array2BytesLength]; + System.arraycopy(array1Bytes, array1BytesOffset, newArray, 0, array1BytesLength); + System.arraycopy(array2Bytes, array2BytesOffset, newArray, array1BytesLength, + array2BytesLength); } - - public static int serializeOffsetArrayIntoStream(DataOutputStream oStream, TrustedByteArrayOutputStream byteStream, - int noOfElements, int maxOffset, int[] offsetPos, byte serializationVersion) throws IOException { - int offsetPosition = (byteStream.size()); - byte[] offsetArr = null; - boolean useInt = true; - if (PArrayDataType.useShortForOffsetArray(maxOffset, serializationVersion)) { - offsetArr = new byte[PArrayDataType.initOffsetArray(noOfElements, Bytes.SIZEOF_SHORT)]; - useInt = false; - } else { - offsetArr = new byte[PArrayDataType.initOffsetArray(noOfElements, Bytes.SIZEOF_INT)]; - noOfElements = -noOfElements; + ptr.set(newArray); + return true; + } + + public static boolean arrayToString(ImmutableBytesWritable ptr, PhoenixArray array, + String delimiter, String nullString, SortOrder sortOrder) { + StringBuilder result = new StringBuilder(); + boolean delimiterPending = false; + for (int i = 0; i < array.getDimensions() - 1; i++) { + Object element = array.getElement(i); + if (element == null) { + if (nullString != null) { + result.append(nullString); } - int off = 0; - if (useInt) { - for (int pos : offsetPos) { - Bytes.putInt(offsetArr, off, pos); - off += Bytes.SIZEOF_INT; - } - } else { - for (int pos : offsetPos) { - short val = - serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION - || serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_V2 - ? (short) pos - : (short) (pos - Short.MAX_VALUE); - Bytes.putShort(offsetArr, off, val); - off += Bytes.SIZEOF_SHORT; - } + } else { + result.append(element.toString()); + delimiterPending = true; + } + if (nullString != null || (array.getElement(i + 1) != null && delimiterPending)) { + result.append(delimiter); + delimiterPending = false; + } + } + Object element = array.getElement(array.getDimensions() - 1); + if (element == null) { + if (nullString != null) { + result.append(nullString); + } + } else { + result.append(element.toString()); + } + ptr.set(PVarchar.INSTANCE.toBytes(result.toString(), sortOrder)); + return true; + } + + public static boolean stringToArray(ImmutableBytesWritable ptr, String string, String delimiter, + String nullString, SortOrder sortOrder) { + Pattern pattern = Pattern.compile(Pattern.quote(delimiter)); + String[] array; + if (delimiter.length() != 0) { + array = pattern.split(string); + if (nullString != null) { + for (int i = 0; i < array.length; i++) { + if (array[i].equals(nullString)) { + array[i] = null; + } } - oStream.write(offsetArr); - oStream.writeInt(offsetPosition); - return noOfElements; + } + } else { + array = string.split("(?!^)"); } - - public static void serializeHeaderInfoIntoStream(DataOutputStream oStream, int noOfElements, byte serializationVersion) throws IOException { - // No of elements - oStream.writeInt(noOfElements); - // Version of the array - oStream.write(serializationVersion); + PhoenixArray phoenixArray = new PhoenixArray(PVarchar.INSTANCE, array); + ptr.set(PVarcharArray.INSTANCE.toBytes(phoenixArray, PVarchar.INSTANCE, sortOrder)); + return true; + } + + public static int serializeOffsetArrayIntoStream(DataOutputStream oStream, + TrustedByteArrayOutputStream byteStream, int noOfElements, int maxOffset, int[] offsetPos, + byte serializationVersion) throws IOException { + int offsetPosition = (byteStream.size()); + byte[] offsetArr = null; + boolean useInt = true; + if (PArrayDataType.useShortForOffsetArray(maxOffset, serializationVersion)) { + offsetArr = new byte[PArrayDataType.initOffsetArray(noOfElements, Bytes.SIZEOF_SHORT)]; + useInt = false; + } else { + offsetArr = new byte[PArrayDataType.initOffsetArray(noOfElements, Bytes.SIZEOF_INT)]; + noOfElements = -noOfElements; } - - public static int initOffsetArray(int noOfElements, int baseSize) { - // for now create an offset array equal to the noofelements - return noOfElements * baseSize; + int off = 0; + if (useInt) { + for (int pos : offsetPos) { + Bytes.putInt(offsetArr, off, pos); + off += Bytes.SIZEOF_INT; + } + } else { + for (int pos : offsetPos) { + short val = serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION + || serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_V2 + ? (short) pos + : (short) (pos - Short.MAX_VALUE); + Bytes.putShort(offsetArr, off, val); + off += Bytes.SIZEOF_SHORT; + } } - - // Any variable length array would follow the below order - // Every element would be seperated by a seperator byte '0' - // Null elements are counted and once a first non null element appears we - // write the count of the nulls prefixed with a seperator byte - // Trailing nulls are not taken into account - // The last non null element is followed by two seperator bytes - // For eg - // a, b, null, null, c, null would be - // 65 0 66 0 0 2 67 0 0 0 - // a null null null b c null d would be - // 65 0 0 3 66 0 67 0 0 1 68 0 0 0 - // Follow the above example to understand how this works - private Object createPhoenixArray(byte[] bytes, int offset, int length, SortOrder sortOrder, - PDataType baseDataType, Integer maxLength, PDataType desiredDataType) { - if (bytes == null || length == 0) { return null; } - Object[] elements; - if (!baseDataType.isFixedWidth()) { - ByteBuffer buffer = ByteBuffer.wrap(bytes, offset, length); - int initPos = buffer.position(); - buffer.position((buffer.limit() - (Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT))); - int noOfElements = buffer.getInt(); - boolean useShort = true; - int baseSize = Bytes.SIZEOF_SHORT; - if (noOfElements < 0) { - noOfElements = -noOfElements; - baseSize = Bytes.SIZEOF_INT; - useShort = false; - } - if (baseDataType == desiredDataType) { - elements = (Object[])java.lang.reflect.Array.newInstance(baseDataType.getJavaClass(), noOfElements); - } else { - elements = (Object[])java.lang.reflect.Array.newInstance(desiredDataType.getJavaClass(), noOfElements); - } - buffer.position(buffer.limit() - (Bytes.SIZEOF_BYTE + (2 * Bytes.SIZEOF_INT))); - int indexOffset = buffer.getInt(); - buffer.position(initPos); - buffer.position(indexOffset + initPos); - ByteBuffer indexArr = ByteBuffer.allocate(initOffsetArray(noOfElements, baseSize)); - byte[] array = indexArr.array(); - buffer.get(array); - int countOfElementsRead = 0; - int i = 0; - int currOffset = -1; - int nextOff = -1; - boolean foundNull = false; - if (noOfElements != 0) { - while (countOfElementsRead <= noOfElements) { - if (countOfElementsRead == 0) { - currOffset = getOffset(indexArr, countOfElementsRead, useShort, indexOffset); - countOfElementsRead++; - } else { - currOffset = nextOff; - } - if (countOfElementsRead == noOfElements) { - nextOff = indexOffset - 2; - } else { - nextOff = getOffset(indexArr, countOfElementsRead + 1, useShort, indexOffset); - } - countOfElementsRead++; - if ((bytes[currOffset + initPos] != QueryConstants.SEPARATOR_BYTE && bytes[currOffset + initPos] != QueryConstants.DESC_SEPARATOR_BYTE) && foundNull) { - // Found a non null element - foundNull = false; - } - if (bytes[currOffset + initPos] == QueryConstants.SEPARATOR_BYTE || bytes[currOffset + initPos] == QueryConstants.DESC_SEPARATOR_BYTE) { - // Null element - foundNull = true; - i++; - continue; - } - int elementLength = nextOff - currOffset; - buffer.position(currOffset + initPos); - // Subtract the seperator from the element length - byte[] val = new byte[elementLength - 1]; - buffer.get(val); - if (baseDataType == desiredDataType) { - elements[i++] = baseDataType.toObject(val, sortOrder); - } else { - elements[i++] = desiredDataType.toObject(val, sortOrder, baseDataType); - } - } - } - } else { - int elemLength = (maxLength == null ? baseDataType.getByteSize() : maxLength); - int noOfElements = length / elemLength; - if (baseDataType == desiredDataType) { - elements = (Object[])java.lang.reflect.Array.newInstance(baseDataType.getJavaClass(), noOfElements); - } else { - elements = (Object[])java.lang.reflect.Array.newInstance(desiredDataType.getJavaClass(), noOfElements); - } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - for (int i = 0; i < noOfElements; i++) { - ptr.set(bytes, offset + i * elemLength, elemLength); - if (baseDataType == desiredDataType) { - elements[i] = baseDataType.toObject(ptr, sortOrder); - } else { - elements[i] = desiredDataType.toObject(ptr, baseDataType, sortOrder); - } - } + oStream.write(offsetArr); + oStream.writeInt(offsetPosition); + return noOfElements; + } + + public static void serializeHeaderInfoIntoStream(DataOutputStream oStream, int noOfElements, + byte serializationVersion) throws IOException { + // No of elements + oStream.writeInt(noOfElements); + // Version of the array + oStream.write(serializationVersion); + } + + public static int initOffsetArray(int noOfElements, int baseSize) { + // for now create an offset array equal to the noofelements + return noOfElements * baseSize; + } + + // Any variable length array would follow the below order + // Every element would be seperated by a seperator byte '0' + // Null elements are counted and once a first non null element appears we + // write the count of the nulls prefixed with a seperator byte + // Trailing nulls are not taken into account + // The last non null element is followed by two seperator bytes + // For eg + // a, b, null, null, c, null would be + // 65 0 66 0 0 2 67 0 0 0 + // a null null null b c null d would be + // 65 0 0 3 66 0 67 0 0 1 68 0 0 0 + // Follow the above example to understand how this works + private Object createPhoenixArray(byte[] bytes, int offset, int length, SortOrder sortOrder, + PDataType baseDataType, Integer maxLength, PDataType desiredDataType) { + if (bytes == null || length == 0) { + return null; + } + Object[] elements; + if (!baseDataType.isFixedWidth()) { + ByteBuffer buffer = ByteBuffer.wrap(bytes, offset, length); + int initPos = buffer.position(); + buffer.position((buffer.limit() - (Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT))); + int noOfElements = buffer.getInt(); + boolean useShort = true; + int baseSize = Bytes.SIZEOF_SHORT; + if (noOfElements < 0) { + noOfElements = -noOfElements; + baseSize = Bytes.SIZEOF_INT; + useShort = false; + } + if (baseDataType == desiredDataType) { + elements = + (Object[]) java.lang.reflect.Array.newInstance(baseDataType.getJavaClass(), noOfElements); + } else { + elements = (Object[]) java.lang.reflect.Array.newInstance(desiredDataType.getJavaClass(), + noOfElements); + } + buffer.position(buffer.limit() - (Bytes.SIZEOF_BYTE + (2 * Bytes.SIZEOF_INT))); + int indexOffset = buffer.getInt(); + buffer.position(initPos); + buffer.position(indexOffset + initPos); + ByteBuffer indexArr = ByteBuffer.allocate(initOffsetArray(noOfElements, baseSize)); + byte[] array = indexArr.array(); + buffer.get(array); + int countOfElementsRead = 0; + int i = 0; + int currOffset = -1; + int nextOff = -1; + boolean foundNull = false; + if (noOfElements != 0) { + while (countOfElementsRead <= noOfElements) { + if (countOfElementsRead == 0) { + currOffset = getOffset(indexArr, countOfElementsRead, useShort, indexOffset); + countOfElementsRead++; + } else { + currOffset = nextOff; + } + if (countOfElementsRead == noOfElements) { + nextOff = indexOffset - 2; + } else { + nextOff = getOffset(indexArr, countOfElementsRead + 1, useShort, indexOffset); + } + countOfElementsRead++; + if ( + (bytes[currOffset + initPos] != QueryConstants.SEPARATOR_BYTE + && bytes[currOffset + initPos] != QueryConstants.DESC_SEPARATOR_BYTE) && foundNull + ) { + // Found a non null element + foundNull = false; + } + if ( + bytes[currOffset + initPos] == QueryConstants.SEPARATOR_BYTE + || bytes[currOffset + initPos] == QueryConstants.DESC_SEPARATOR_BYTE + ) { + // Null element + foundNull = true; + i++; + continue; + } + int elementLength = nextOff - currOffset; + buffer.position(currOffset + initPos); + // Subtract the seperator from the element length + byte[] val = new byte[elementLength - 1]; + buffer.get(val); + if (baseDataType == desiredDataType) { + elements[i++] = baseDataType.toObject(val, sortOrder); + } else { + elements[i++] = desiredDataType.toObject(val, sortOrder, baseDataType); + } } + } + } else { + int elemLength = (maxLength == null ? baseDataType.getByteSize() : maxLength); + int noOfElements = length / elemLength; + if (baseDataType == desiredDataType) { + elements = + (Object[]) java.lang.reflect.Array.newInstance(baseDataType.getJavaClass(), noOfElements); + } else { + elements = (Object[]) java.lang.reflect.Array.newInstance(desiredDataType.getJavaClass(), + noOfElements); + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + for (int i = 0; i < noOfElements; i++) { + ptr.set(bytes, offset + i * elemLength, elemLength); if (baseDataType == desiredDataType) { - return PArrayDataType.instantiatePhoenixArray(baseDataType, elements); + elements[i] = baseDataType.toObject(ptr, sortOrder); } else { - return PArrayDataType.instantiatePhoenixArray(desiredDataType, elements); + elements[i] = desiredDataType.toObject(ptr, baseDataType, sortOrder); } + } } - - public static PhoenixArray instantiatePhoenixArray(PDataType actualType, Object[] elements) { - return PDataType.instantiatePhoenixArray(actualType, elements); + if (baseDataType == desiredDataType) { + return PArrayDataType.instantiatePhoenixArray(baseDataType, elements); + } else { + return PArrayDataType.instantiatePhoenixArray(desiredDataType, elements); } + } - @Override - public int compareTo(Object lhs, Object rhs) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; - } - PhoenixArray lhsArr = (PhoenixArray)lhs; - PhoenixArray rhsArr = (PhoenixArray)rhs; - if (lhsArr.equals(rhsArr)) { return 0; } - return 1; - } + public static PhoenixArray instantiatePhoenixArray(PDataType actualType, Object[] elements) { + return PDataType.instantiatePhoenixArray(actualType, elements); + } - public static int getArrayLength(ImmutableBytesWritable ptr, PDataType baseType, Integer maxLength) { - byte[] bytes = ptr.get(); - if (ptr.getLength() == 0) { - return 0; - } - if (baseType.isFixedWidth()) { - int elemLength = maxLength == null ? baseType.getByteSize() : maxLength; - return (ptr.getLength() / elemLength); - } - // In case where the number of elements is greater than SHORT.MAX_VALUE we do negate the number of - // elements. So it is always better to return the absolute value - return (Bytes.toInt(bytes, (ptr.getOffset() + ptr.getLength() - (Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT)))); + @Override + public int compareTo(Object lhs, Object rhs) { + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; + } + PhoenixArray lhsArr = (PhoenixArray) lhs; + PhoenixArray rhsArr = (PhoenixArray) rhs; + if (lhsArr.equals(rhsArr)) { + return 0; + } + return 1; + } + + public static int getArrayLength(ImmutableBytesWritable ptr, PDataType baseType, + Integer maxLength) { + byte[] bytes = ptr.get(); + if (ptr.getLength() == 0) { + return 0; + } + if (baseType.isFixedWidth()) { + int elemLength = maxLength == null ? baseType.getByteSize() : maxLength; + return (ptr.getLength() / elemLength); + } + // In case where the number of elements is greater than SHORT.MAX_VALUE we do negate the number + // of + // elements. So it is always better to return the absolute value + return (Bytes.toInt(bytes, + (ptr.getOffset() + ptr.getLength() - (Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT)))); + } + + public static int estimateSize(int size, PDataType baseType) { + if (baseType.isFixedWidth() && baseType.getByteSize() != null) { + return baseType.getByteSize() * size; + } else { + return size * ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE; } - public static int estimateSize(int size, PDataType baseType) { - if (baseType.isFixedWidth() && baseType.getByteSize() != null) { - return baseType.getByteSize() * size; - } else { - return size * ValueSchema.ESTIMATED_VARIABLE_LENGTH_SIZE; - } + } + public Object getSampleValue(PDataType baseType, Integer arrayLength, Integer elemLength) { + Preconditions.checkArgument(arrayLength == null || arrayLength >= 0); + if (arrayLength == null) { + arrayLength = 1; } - - public Object getSampleValue(PDataType baseType, Integer arrayLength, Integer elemLength) { - Preconditions.checkArgument(arrayLength == null || arrayLength >= 0); - if (arrayLength == null) { - arrayLength = 1; - } - Object[] array = new Object[arrayLength]; - for (int i = 0; i < arrayLength; i++) { - array[i] = baseType.getSampleValue(elemLength, arrayLength); - } - return instantiatePhoenixArray(baseType, array); + Object[] array = new Object[arrayLength]; + for (int i = 0; i < arrayLength; i++) { + array[i] = baseType.getSampleValue(elemLength, arrayLength); } - - @Override - public String toStringLiteral(Object o, Format formatter) { - StringBuilder buf = new StringBuilder(PArrayDataType.ARRAY_TYPE_SUFFIX + "["); - PhoenixArray array = (PhoenixArray)o; - PDataType baseType = PDataType.arrayBaseType(this); - int len = array.getDimensions(); - if (len != 0) { - for (int i = 0; i < len; i++) { - buf.append(baseType.toStringLiteral(array.getElement(i), null)); - buf.append(','); - } - buf.setLength(buf.length() - 1); - } - buf.append(']'); - return buf.toString(); + return instantiatePhoenixArray(baseType, array); + } + + @Override + public String toStringLiteral(Object o, Format formatter) { + StringBuilder buf = new StringBuilder(PArrayDataType.ARRAY_TYPE_SUFFIX + "["); + PhoenixArray array = (PhoenixArray) o; + PDataType baseType = PDataType.arrayBaseType(this); + int len = array.getDimensions(); + if (len != 0) { + for (int i = 0; i < len; i++) { + buf.append(baseType.toStringLiteral(array.getElement(i), null)); + buf.append(','); + } + buf.setLength(buf.length() - 1); } + buf.append(']'); + return buf.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataTypeDecoder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataTypeDecoder.java index 22fa46c5c5b..0eff5125942 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataTypeDecoder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataTypeDecoder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,144 +26,157 @@ import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.ByteUtil; - public class PArrayDataTypeDecoder implements ColumnValueDecoder { - - @Override - public boolean decode(ImmutableBytesWritable ptr, int index) { - return PArrayDataTypeDecoder.positionAtArrayElement(ptr, index, PVarbinary.INSTANCE, null); - } - public static boolean positionAtArrayElement(Tuple tuple, ImmutableBytesWritable ptr, int index, - Expression arrayExpr, PDataType pDataType, Integer maxLen) { - if (!arrayExpr.evaluate(tuple, ptr)) { - return false; - } else if (ptr.getLength() == 0) { return true; } - - // Given a ptr to the entire array, set ptr to point to a particular element within that array - // given the type of an array element (see comments in PDataTypeForArray) - return positionAtArrayElement(ptr, index - 1, pDataType, maxLen); + @Override + public boolean decode(ImmutableBytesWritable ptr, int index) { + return PArrayDataTypeDecoder.positionAtArrayElement(ptr, index, PVarbinary.INSTANCE, null); + } + + public static boolean positionAtArrayElement(Tuple tuple, ImmutableBytesWritable ptr, int index, + Expression arrayExpr, PDataType pDataType, Integer maxLen) { + if (!arrayExpr.evaluate(tuple, ptr)) { + return false; + } else if (ptr.getLength() == 0) { + return true; } - public static boolean positionAtArrayElement(ImmutableBytesWritable ptr, int arrayIndex, PDataType baseDataType, - Integer byteSize) { - byte[] bytes = ptr.get(); - int initPos = ptr.getOffset(); - if (!baseDataType.isFixedWidth()) { - byte serializationVersion = bytes[ptr.getOffset() + ptr.getLength() - Bytes.SIZEOF_BYTE]; - int noOfElements = Bytes.toInt(bytes, - (ptr.getOffset() + ptr.getLength() - (Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT)), Bytes.SIZEOF_INT); - boolean useShort = true; - if (noOfElements < 0) { - noOfElements = -noOfElements; - useShort = false; - } - if (arrayIndex >= noOfElements) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return false; - } - - int indexOffset = Bytes.toInt(bytes, - (ptr.getOffset() + ptr.getLength() - (Bytes.SIZEOF_BYTE + 2 * Bytes.SIZEOF_INT))) + ptr.getOffset(); - // Skip those many offsets as given in the arrayIndex - // If suppose there are 5 elements in the array and the arrayIndex = 3 - // This means we need to read the 4th element of the array - // So inorder to know the length of the 4th element we will read the offset of 4th element and the - // offset of 5th element. - // Subtracting the offset of 5th element and 4th element will give the length of 4th element - // So we could just skip reading the other elements. - int currOffset = PArrayDataType.getSerializedOffset(bytes, arrayIndex, useShort, indexOffset, serializationVersion); - if (currOffset<0) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - return false; - } - int elementLength = 0; - if (arrayIndex == (noOfElements - 1)) { - // in the original IMMUTABLE_SERIALIZATION_VERSION (v1), for nulls we store - // (separatorByte, #_of_nulls) in the data. Because of the separatorByte, we can't - // distinguish between nulls and actual data values that start with the separator - // byte. We do a hack here to limit the damage by checking offsets - if the prior - // offset had a length of 0, then we know we're storing 2 or more nulls. However, we - // still can't fix the case distinguishing a single null from a short value. There - // are two kinds of separatorByte, so the results will be potentially incorrect for - // 2 short values that correspond to (separatorByte, 1) - if (serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION) { - elementLength = indexOffset - (currOffset + initPos); - if (isNullValue(arrayIndex, bytes, initPos, serializationVersion, useShort, indexOffset, currOffset, elementLength)) { - elementLength = 0; - } - } else { - int separatorBytes = serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION ? 3 : 0; - elementLength = isSeparatorByte(bytes, initPos, currOffset) && serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION ? 0 : indexOffset - - (currOffset + initPos) - separatorBytes; - } - } else { - if (serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION) { - elementLength = PArrayDataType.getOffset(bytes, arrayIndex + 1, - useShort, indexOffset, serializationVersion) - - currOffset; - if (isNullValue(arrayIndex, bytes, initPos, serializationVersion, useShort, indexOffset, currOffset, elementLength)) { - elementLength = 0; - } - } else { - int separatorByte = - serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION - ? 1 - : 0; - elementLength = - isSeparatorByte(bytes, initPos, currOffset) - && serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION - ? 0 - : PArrayDataType.getOffset(bytes, arrayIndex + 1, - useShort, indexOffset, serializationVersion) - - currOffset - separatorByte; - } - } - ptr.set(bytes, currOffset + initPos, elementLength); + // Given a ptr to the entire array, set ptr to point to a particular element within that array + // given the type of an array element (see comments in PDataTypeForArray) + return positionAtArrayElement(ptr, index - 1, pDataType, maxLen); + } + + public static boolean positionAtArrayElement(ImmutableBytesWritable ptr, int arrayIndex, + PDataType baseDataType, Integer byteSize) { + byte[] bytes = ptr.get(); + int initPos = ptr.getOffset(); + if (!baseDataType.isFixedWidth()) { + byte serializationVersion = bytes[ptr.getOffset() + ptr.getLength() - Bytes.SIZEOF_BYTE]; + int noOfElements = Bytes.toInt(bytes, + (ptr.getOffset() + ptr.getLength() - (Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT)), + Bytes.SIZEOF_INT); + boolean useShort = true; + if (noOfElements < 0) { + noOfElements = -noOfElements; + useShort = false; + } + if (arrayIndex >= noOfElements) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return false; + } + + int indexOffset = Bytes.toInt(bytes, + (ptr.getOffset() + ptr.getLength() - (Bytes.SIZEOF_BYTE + 2 * Bytes.SIZEOF_INT))) + + ptr.getOffset(); + // Skip those many offsets as given in the arrayIndex + // If suppose there are 5 elements in the array and the arrayIndex = 3 + // This means we need to read the 4th element of the array + // So inorder to know the length of the 4th element we will read the offset of 4th element and + // the + // offset of 5th element. + // Subtracting the offset of 5th element and 4th element will give the length of 4th element + // So we could just skip reading the other elements. + int currOffset = PArrayDataType.getSerializedOffset(bytes, arrayIndex, useShort, indexOffset, + serializationVersion); + if (currOffset < 0) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + return false; + } + int elementLength = 0; + if (arrayIndex == (noOfElements - 1)) { + // in the original IMMUTABLE_SERIALIZATION_VERSION (v1), for nulls we store + // (separatorByte, #_of_nulls) in the data. Because of the separatorByte, we can't + // distinguish between nulls and actual data values that start with the separator + // byte. We do a hack here to limit the damage by checking offsets - if the prior + // offset had a length of 0, then we know we're storing 2 or more nulls. However, we + // still can't fix the case distinguishing a single null from a short value. There + // are two kinds of separatorByte, so the results will be potentially incorrect for + // 2 short values that correspond to (separatorByte, 1) + if (serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION) { + elementLength = indexOffset - (currOffset + initPos); + if ( + isNullValue(arrayIndex, bytes, initPos, serializationVersion, useShort, indexOffset, + currOffset, elementLength) + ) { + elementLength = 0; + } } else { - int elemByteSize = (byteSize == null ? baseDataType.getByteSize() : byteSize); - int offset = arrayIndex * elemByteSize; - if (offset >= ptr.getLength()) { - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - } else { - ptr.set(bytes, ptr.getOffset() + offset, elemByteSize); - } + int separatorBytes = + serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION ? 3 : 0; + elementLength = isSeparatorByte(bytes, initPos, currOffset) + && serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION + ? 0 + : indexOffset - (currOffset + initPos) - separatorBytes; } - return true; + } else { + if (serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION) { + elementLength = PArrayDataType.getOffset(bytes, arrayIndex + 1, useShort, indexOffset, + serializationVersion) - currOffset; + if ( + isNullValue(arrayIndex, bytes, initPos, serializationVersion, useShort, indexOffset, + currOffset, elementLength) + ) { + elementLength = 0; + } + } else { + int separatorByte = + serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION ? 1 : 0; + elementLength = isSeparatorByte(bytes, initPos, currOffset) + && serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION + ? 0 + : PArrayDataType.getOffset(bytes, arrayIndex + 1, useShort, indexOffset, + serializationVersion) - currOffset - separatorByte; + } + } + ptr.set(bytes, currOffset + initPos, elementLength); + } else { + int elemByteSize = (byteSize == null ? baseDataType.getByteSize() : byteSize); + int offset = arrayIndex * elemByteSize; + if (offset >= ptr.getLength()) { + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + } else { + ptr.set(bytes, ptr.getOffset() + offset, elemByteSize); + } } + return true; + } - // returns true if the prior element in the array is a null - private static boolean isNullValue(int arrayIndex, byte[] bytes, int initPos, - byte serializationVersion, boolean useShort, int indexOffset, int currOffset, - int elementLength) { - if (isSeparatorByte(bytes, initPos, currOffset)) { - if (isPriorValueZeroLength(arrayIndex, bytes, - serializationVersion, useShort, indexOffset, currOffset)) { - return true; - } else { - // if there's no prior null, there can be at most 1 null - if (elementLength == 2) { - // nullByte calculation comes from the encoding of one null - // see PArrayDataType#serializeNulls - byte nullByte = SortOrder.invert((byte)(0)); - if (bytes[initPos+currOffset+1] == nullByte) { - return true; - } - } - } + // returns true if the prior element in the array is a null + private static boolean isNullValue(int arrayIndex, byte[] bytes, int initPos, + byte serializationVersion, boolean useShort, int indexOffset, int currOffset, + int elementLength) { + if (isSeparatorByte(bytes, initPos, currOffset)) { + if ( + isPriorValueZeroLength(arrayIndex, bytes, serializationVersion, useShort, indexOffset, + currOffset) + ) { + return true; + } else { + // if there's no prior null, there can be at most 1 null + if (elementLength == 2) { + // nullByte calculation comes from the encoding of one null + // see PArrayDataType#serializeNulls + byte nullByte = SortOrder.invert((byte) (0)); + if (bytes[initPos + currOffset + 1] == nullByte) { + return true; + } } - return false; + } } + return false; + } - // checks prior value length by subtracting offset of the previous item from the current offset - private static boolean isPriorValueZeroLength(int arrayIndex, byte[] bytes, byte serializationVersion, - boolean useShort, int indexOffset, int currOffset) { - return arrayIndex > 0 && currOffset - PArrayDataType.getOffset(bytes, arrayIndex - 1, - useShort, indexOffset, serializationVersion) == 0; - } + // checks prior value length by subtracting offset of the previous item from the current offset + private static boolean isPriorValueZeroLength(int arrayIndex, byte[] bytes, + byte serializationVersion, boolean useShort, int indexOffset, int currOffset) { + return arrayIndex > 0 && currOffset + - PArrayDataType.getOffset(bytes, arrayIndex - 1, useShort, indexOffset, serializationVersion) + == 0; + } - private static boolean isSeparatorByte(byte[] bytes, int initPos, int currOffset) { - return bytes[currOffset + initPos] == QueryConstants.SEPARATOR_BYTE || bytes[currOffset + initPos] == QueryConstants.DESC_SEPARATOR_BYTE; - } + private static boolean isSeparatorByte(byte[] bytes, int initPos, int currOffset) { + return bytes[currOffset + initPos] == QueryConstants.SEPARATOR_BYTE + || bytes[currOffset + initPos] == QueryConstants.DESC_SEPARATOR_BYTE; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataTypeEncoder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataTypeEncoder.java index da1a42b44d7..f55dab0c6c7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataTypeEncoder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PArrayDataTypeEncoder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,204 +38,216 @@ import org.apache.phoenix.util.TrustedByteArrayOutputStream; public class PArrayDataTypeEncoder implements ColumnValueEncoder { - static private final int BYTE_ARRAY_DEFAULT_SIZE = 128; - - private PDataType baseType; - private SortOrder sortOrder; - private List offsetPos; - private TrustedByteArrayOutputStream byteStream; - private DataOutputStream oStream; - private int nulls; - private byte serializationVersion; - private boolean rowKeyOrderOptimizable; - - public PArrayDataTypeEncoder(PDataType baseType, SortOrder sortOrder) { - this(new TrustedByteArrayOutputStream(BYTE_ARRAY_DEFAULT_SIZE), new LinkedList(), baseType, sortOrder, true); - } - - public PArrayDataTypeEncoder(TrustedByteArrayOutputStream byteStream, DataOutputStream oStream, - int numElements, PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable, byte serializationVersion) { - this(byteStream, oStream, new ArrayList(numElements), baseType, sortOrder, rowKeyOrderOptimizable, serializationVersion); - } - - public PArrayDataTypeEncoder(TrustedByteArrayOutputStream byteStream, DataOutputStream oStream, - int numElements, PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable) { - this(byteStream, oStream, new ArrayList(numElements), baseType, sortOrder, rowKeyOrderOptimizable, PArrayDataType.SORTABLE_SERIALIZATION_VERSION); - } - - public PArrayDataTypeEncoder(TrustedByteArrayOutputStream byteStream, - List offsetPos, PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable) { - this(byteStream, new DataOutputStream(byteStream), offsetPos, baseType, sortOrder, rowKeyOrderOptimizable, PArrayDataType.SORTABLE_SERIALIZATION_VERSION); - } - - public PArrayDataTypeEncoder(TrustedByteArrayOutputStream byteStream, DataOutputStream oStream, - List offsetPos, PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable, byte serializationVersion) { - this.baseType = baseType; - this.sortOrder = sortOrder; - this.offsetPos = offsetPos; - this.byteStream = byteStream; - this.oStream = oStream; - this.nulls = 0; - this.serializationVersion = serializationVersion; - this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; + static private final int BYTE_ARRAY_DEFAULT_SIZE = 128; + + private PDataType baseType; + private SortOrder sortOrder; + private List offsetPos; + private TrustedByteArrayOutputStream byteStream; + private DataOutputStream oStream; + private int nulls; + private byte serializationVersion; + private boolean rowKeyOrderOptimizable; + + public PArrayDataTypeEncoder(PDataType baseType, SortOrder sortOrder) { + this(new TrustedByteArrayOutputStream(BYTE_ARRAY_DEFAULT_SIZE), new LinkedList(), + baseType, sortOrder, true); + } + + public PArrayDataTypeEncoder(TrustedByteArrayOutputStream byteStream, DataOutputStream oStream, + int numElements, PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable, + byte serializationVersion) { + this(byteStream, oStream, new ArrayList(numElements), baseType, sortOrder, + rowKeyOrderOptimizable, serializationVersion); + } + + public PArrayDataTypeEncoder(TrustedByteArrayOutputStream byteStream, DataOutputStream oStream, + int numElements, PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable) { + this(byteStream, oStream, new ArrayList(numElements), baseType, sortOrder, + rowKeyOrderOptimizable, PArrayDataType.SORTABLE_SERIALIZATION_VERSION); + } + + public PArrayDataTypeEncoder(TrustedByteArrayOutputStream byteStream, List offsetPos, + PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable) { + this(byteStream, new DataOutputStream(byteStream), offsetPos, baseType, sortOrder, + rowKeyOrderOptimizable, PArrayDataType.SORTABLE_SERIALIZATION_VERSION); + } + + public PArrayDataTypeEncoder(TrustedByteArrayOutputStream byteStream, DataOutputStream oStream, + List offsetPos, PDataType baseType, SortOrder sortOrder, + boolean rowKeyOrderOptimizable, byte serializationVersion) { + this.baseType = baseType; + this.sortOrder = sortOrder; + this.offsetPos = offsetPos; + this.byteStream = byteStream; + this.oStream = oStream; + this.nulls = 0; + this.serializationVersion = serializationVersion; + this.rowKeyOrderOptimizable = rowKeyOrderOptimizable; + } + + private void close() { + try { + if (byteStream != null) byteStream.close(); + if (oStream != null) oStream.close(); + byteStream = null; + oStream = null; + } catch (IOException ioe) { } + } - private void close() { - try { - if (byteStream != null) byteStream.close(); - if (oStream != null) oStream.close(); - byteStream = null; - oStream = null; - } catch (IOException ioe) {} + // used to represent the absence of a value + @Override + public void appendAbsentValue() { + if ( + (serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION + || serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_V2) + && !baseType.isFixedWidth() + ) { + offsetPos.add(-byteStream.size()); + nulls++; + } else { + throw new UnsupportedOperationException("Cannot represent an absent element"); } - - // used to represent the absence of a value - @Override - public void appendAbsentValue() { - if ((serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION - || serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_V2) - && !baseType.isFixedWidth()) { - offsetPos.add(-byteStream.size()); - nulls++; + } + + public void appendValue(byte[] bytes) { + appendValue(bytes, 0, bytes.length); + } + + @Override + public void appendValue(byte[] bytes, int offset, int len) { + try { + // track the offset position here from the size of the byteStream + if (!baseType.isFixedWidth()) { + // Any variable length array would follow the below order + // Every element would be seperated by a seperator byte '0' + // Null elements are counted and once a first non null element appears we + // write the count of the nulls prefixed with a seperator byte + // Trailing nulls are not taken into account + // The last non null element is followed by two seperator bytes + // For eg + // a, b, null, null, c, null would be + // 65 0 66 0 0 2 67 0 0 0 + // a null null null b c null d would be + // 65 0 0 3 66 0 67 0 0 1 68 0 0 0 + if (len == 0) { + offsetPos.add(byteStream.size()); + nulls++; + } else { + // we don't serialize nulls for IMMUTABLE_SERIALIZATION_V2 + if ( + serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION + || serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION + ) { + nulls = PArrayDataType.serializeNulls(oStream, nulls); + } + offsetPos.add(byteStream.size()); + if (sortOrder == SortOrder.DESC) { + SortOrder.invert(bytes, offset, bytes, offset, len); + offset = 0; + } + oStream.write(bytes, offset, len); + if (serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION) { + oStream.write(PArrayDataType.getSeparatorByte(rowKeyOrderOptimizable, sortOrder)); + } } - else { - throw new UnsupportedOperationException("Cannot represent an absent element"); + } else { + // No nulls for fixed length + if (sortOrder == SortOrder.DESC) { + SortOrder.invert(bytes, offset, bytes, offset, len); + offset = 0; } + oStream.write(bytes, offset, len); + } + } catch (IOException e) { } + } - public void appendValue(byte[] bytes) { - appendValue(bytes, 0, bytes.length); - } - - @Override - public void appendValue(byte[] bytes, int offset, int len) { - try { - // track the offset position here from the size of the byteStream - if (!baseType.isFixedWidth()) { - // Any variable length array would follow the below order - // Every element would be seperated by a seperator byte '0' - // Null elements are counted and once a first non null element appears we - // write the count of the nulls prefixed with a seperator byte - // Trailing nulls are not taken into account - // The last non null element is followed by two seperator bytes - // For eg - // a, b, null, null, c, null would be - // 65 0 66 0 0 2 67 0 0 0 - // a null null null b c null d would be - // 65 0 0 3 66 0 67 0 0 1 68 0 0 0 - if (len == 0) { - offsetPos.add(byteStream.size()); - nulls++; - } else { - // we don't serialize nulls for IMMUTABLE_SERIALIZATION_V2 - if (serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION - || serializationVersion == PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION) { - nulls = PArrayDataType.serializeNulls(oStream, nulls); - } - offsetPos.add(byteStream.size()); - if (sortOrder == SortOrder.DESC) { - SortOrder.invert(bytes, offset, bytes, offset, len); - offset = 0; - } - oStream.write(bytes, offset, len); - if (serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION) { - oStream.write(PArrayDataType.getSeparatorByte(rowKeyOrderOptimizable, sortOrder)); - } - } - } else { - // No nulls for fixed length - if (sortOrder == SortOrder.DESC) { - SortOrder.invert(bytes, offset, bytes, offset, len); - offset = 0; - } - oStream.write(bytes, offset, len); - } - } catch (IOException e) {} + @Override + public byte[] encode() { + try { + if (!baseType.isFixedWidth()) { + int noOfElements = offsetPos.size(); + int[] offsetPosArray = new int[noOfElements]; + int index = 0, maxOffset = 0; + for (Integer i : offsetPos) { + maxOffset = offsetPosArray[index] = i; + ++index; + } + if (serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION) { + // Double seperator byte to show end of the non null array + PArrayDataType.writeEndSeperatorForVarLengthArray(oStream, sortOrder, + rowKeyOrderOptimizable); + } + noOfElements = PArrayDataType.serializeOffsetArrayIntoStream(oStream, byteStream, + noOfElements, maxOffset, offsetPosArray, serializationVersion); + PArrayDataType.serializeHeaderInfoIntoStream(oStream, noOfElements, serializationVersion); + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(byteStream.getBuffer(), 0, byteStream.size()); + return ByteUtil.copyKeyBytesIfNecessary(ptr); + } catch (IOException e) { + } finally { + close(); } + return null; + } - @Override - public byte[] encode() { - try { - if (!baseType.isFixedWidth()) { - int noOfElements = offsetPos.size(); - int[] offsetPosArray = new int[noOfElements]; - int index = 0, maxOffset = 0; - for (Integer i : offsetPos) { - maxOffset = offsetPosArray[index] = i; - ++index; - } - if (serializationVersion == PArrayDataType.SORTABLE_SERIALIZATION_VERSION) { - // Double seperator byte to show end of the non null array - PArrayDataType.writeEndSeperatorForVarLengthArray(oStream, sortOrder, rowKeyOrderOptimizable); - } - noOfElements = PArrayDataType.serializeOffsetArrayIntoStream(oStream, byteStream, noOfElements, - maxOffset, offsetPosArray, serializationVersion); - PArrayDataType.serializeHeaderInfoIntoStream(oStream, noOfElements, serializationVersion); + /** + * @param colValueMap map from column to value + * @return estimated encoded size + */ + public static int getEstimatedByteSize(PTable table, int rowLength, + Map colValueMap) { + // iterate over column familiies + int rowSize = 0; + for (PColumnFamily family : table.getColumnFamilies()) { + Collection columns = family.getColumns(); + // we add a non null value to the start so that we can represent absent values in the array + // with negative offsets + int numColumns = columns.size() + 1; + int cellSize = 1; + int nulls = 0; + int maxOffset = 0; + // iterate over columns + for (PColumn column : columns) { + if (colValueMap.containsKey(column)) { + byte[] colValue = colValueMap.get(column); + // the column value is null + if (colValue == null || colValue.length == 0) { + ++nulls; + maxOffset = cellSize; + } else { + // count the bytes written to serialize nulls + if (nulls > 0) { + cellSize += (1 + Math.ceil(nulls / 255.0)); + nulls = 0; } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(byteStream.getBuffer(), 0, byteStream.size()); - return ByteUtil.copyKeyBytesIfNecessary(ptr); - } catch (IOException e) {} finally { - close(); + maxOffset = cellSize; + cellSize += colValue.length; + } } - return null; - } - - /** - * @param colValueMap map from column to value - * @return estimated encoded size - */ - public static int getEstimatedByteSize(PTable table, int rowLength, - Map colValueMap) { - // iterate over column familiies - int rowSize = 0; - for (PColumnFamily family : table.getColumnFamilies()) { - Collection columns = family.getColumns(); - // we add a non null value to the start so that we can represent absent values in the array with negative offsets - int numColumns = columns.size() + 1; - int cellSize = 1; - int nulls = 0; - int maxOffset = 0; - // iterate over columns - for (PColumn column : columns) { - if (colValueMap.containsKey(column)) { - byte[] colValue = colValueMap.get(column); - // the column value is null - if (colValue == null || colValue.length == 0) { - ++nulls; - maxOffset = cellSize; - } else { - // count the bytes written to serialize nulls - if (nulls > 0) { - cellSize += (1 + Math.ceil(nulls / 255.0)); - nulls = 0; - } - maxOffset = cellSize; - cellSize += colValue.length; - } - } - // the column value is absent - else { - ++nulls; - maxOffset = cellSize; - } - } - // count the bytes used for the offset array - cellSize += - PArrayDataType.useShortForOffsetArray(maxOffset, - PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION) - ? numColumns * Bytes.SIZEOF_SHORT - : numColumns * Bytes.SIZEOF_INT; - cellSize += 4; - // count the bytes used for header information - cellSize += 5; - // add the size of the single cell containing all column values - rowSize += - KeyValue.getKeyValueDataStructureSize(rowLength, - family.getName().getBytes().length, - QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES.length, cellSize); + // the column value is absent + else { + ++nulls; + maxOffset = cellSize; } - return rowSize; + } + // count the bytes used for the offset array + cellSize += PArrayDataType.useShortForOffsetArray(maxOffset, + PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION) + ? numColumns * Bytes.SIZEOF_SHORT + : numColumns * Bytes.SIZEOF_INT; + cellSize += 4; + // count the bytes used for header information + cellSize += 5; + // add the size of the single cell containing all column values + rowSize += + KeyValue.getKeyValueDataStructureSize(rowLength, family.getName().getBytes().length, + QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES.length, cellSize); } - -} \ No newline at end of file + return rowSize; + } + +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinary.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinary.java index d35e8a5bf5f..f9d7451c95a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinary.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinary.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,174 +29,174 @@ public class PBinary extends PBinaryBase { - public static final PBinary INSTANCE = new PBinary(); + public static final PBinary INSTANCE = new PBinary(); - private PBinary() { - super("BINARY", Types.BINARY, byte[].class, null, 23); - } - - @Override - public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, - Integer actualMaxLength, Integer actualScale, SortOrder actualModifier, - Integer desiredMaxLength, Integer desiredScale, SortOrder expectedModifier) { - PVarbinary.INSTANCE.coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, - actualModifier, desiredMaxLength, desiredScale, expectedModifier); - if (ptr.getLength() > 0 && null != desiredMaxLength && null != expectedModifier) { - pad(ptr, desiredMaxLength, expectedModifier); - } - } - - @Override - public byte[] pad(byte[] b, Integer maxLength, SortOrder sortOrder) { - if (b == null || b.length >= maxLength) { - return b; - } - byte[] newBytes = new byte[maxLength]; - System.arraycopy(b, 0, newBytes, 0, b.length); - if (sortOrder == SortOrder.DESC) { - Arrays.fill(newBytes, b.length, maxLength, QueryConstants.DESC_SEPARATOR_BYTE); - } - return newBytes; - } - - @Override - public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) { - if (ptr.getLength() >= maxLength) { - return; - } - byte[] newBytes = new byte[maxLength]; - System.arraycopy(ptr.get(), ptr.getOffset(), newBytes, 0, ptr.getLength()); - if (sortOrder == SortOrder.DESC) { - Arrays.fill(newBytes, ptr.getLength(), maxLength, QueryConstants.DESC_SEPARATOR_BYTE); - } - ptr.set(newBytes); - } - - @Override - public Object pad(Object object, Integer maxLength) { - byte[] b = (byte[]) object; - int length = (b == null ? 0 : b.length); - if (length == maxLength) { - return object; - } - if (length > maxLength) { - throw new DataExceedsCapacityException(this, maxLength, null, null); - } - byte[] newBytes = new byte[maxLength]; - System.arraycopy(b, 0, newBytes, 0, length); - - return newBytes; - } - - @Override - public byte[] toBytes(Object object) { // Delegate to VARBINARY - if (object == null) { - throw newIllegalDataException(this + " may not be null"); - } - return PVarbinary.INSTANCE.toBytes(object); - } + private PBinary() { + super("BINARY", Types.BINARY, byte[].class, null, 23); + } - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - throw newIllegalDataException(this + " may not be null"); - } - return PVarbinary.INSTANCE.toBytes(object, bytes, offset); - - } - - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - byte[] bytes = toBytes(object); - if (sortOrder == SortOrder.DESC) { - return SortOrder.invert(bytes, 0, new byte[bytes.length], 0, bytes.length); - } - return bytes; - } - - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale) { - if (!actualType.isCoercibleTo(this)) { - throwConstraintViolationException(actualType, this); - } - return PVarbinary.INSTANCE.toObject(bytes, offset, length, actualType, sortOrder); - } - - @Override - public Object toObject(Object object, PDataType actualType) { - return actualType.toBytes(object); - } - - @Override - public boolean isFixedWidth() { - return true; - } - - @Override - public int estimateByteSize(Object o) { - byte[] value = (byte[]) o; - return value == null ? 1 : value.length; - } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return equalsAny(targetType, this, PVarbinary.INSTANCE); - } - - @Override - public Integer estimateByteSizeFromLength(Integer length) { - return length; - } - - @Override - public Integer getByteSize() { - return null; - } - - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == null && rhs == null) { - return 0; - } else if (lhs == null) { - return -1; - } else if (rhs == null) { - return 1; - } - if (equalsAny(rhsType, PVarbinary.INSTANCE, PBinary.INSTANCE, PVarbinaryEncoded.INSTANCE)) { - return Bytes.compareTo((byte[]) lhs, (byte[]) rhs); - } else { - byte[] rhsBytes = rhsType.toBytes(rhs); - return Bytes.compareTo((byte[]) lhs, rhsBytes); - } - } - - @Override - public Integer getMaxLength(Object o) { - if (o == null) { - return null; - } - byte[] value = (byte[]) o; - return value.length; - } - - @Override - public Object toObject(String value) { - return PVarbinary.INSTANCE.toObject(value); - } - - @Override - public String toStringLiteral(byte[] b, int offset, int length, Format formatter) { - return PVarbinary.INSTANCE.toStringLiteral(b, offset, length, formatter); - } - - @Override - public String toStringLiteral(Object o, Format formatter) { - return toStringLiteral((byte[])o, 0, ((byte[]) o).length, formatter); - } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return PVarbinary.INSTANCE.getSampleValue(maxLength, arrayLength); - } + @Override + public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, + Integer actualMaxLength, Integer actualScale, SortOrder actualModifier, + Integer desiredMaxLength, Integer desiredScale, SortOrder expectedModifier) { + PVarbinary.INSTANCE.coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, + actualModifier, desiredMaxLength, desiredScale, expectedModifier); + if (ptr.getLength() > 0 && null != desiredMaxLength && null != expectedModifier) { + pad(ptr, desiredMaxLength, expectedModifier); + } + } + + @Override + public byte[] pad(byte[] b, Integer maxLength, SortOrder sortOrder) { + if (b == null || b.length >= maxLength) { + return b; + } + byte[] newBytes = new byte[maxLength]; + System.arraycopy(b, 0, newBytes, 0, b.length); + if (sortOrder == SortOrder.DESC) { + Arrays.fill(newBytes, b.length, maxLength, QueryConstants.DESC_SEPARATOR_BYTE); + } + return newBytes; + } + + @Override + public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) { + if (ptr.getLength() >= maxLength) { + return; + } + byte[] newBytes = new byte[maxLength]; + System.arraycopy(ptr.get(), ptr.getOffset(), newBytes, 0, ptr.getLength()); + if (sortOrder == SortOrder.DESC) { + Arrays.fill(newBytes, ptr.getLength(), maxLength, QueryConstants.DESC_SEPARATOR_BYTE); + } + ptr.set(newBytes); + } + + @Override + public Object pad(Object object, Integer maxLength) { + byte[] b = (byte[]) object; + int length = (b == null ? 0 : b.length); + if (length == maxLength) { + return object; + } + if (length > maxLength) { + throw new DataExceedsCapacityException(this, maxLength, null, null); + } + byte[] newBytes = new byte[maxLength]; + System.arraycopy(b, 0, newBytes, 0, length); + + return newBytes; + } + + @Override + public byte[] toBytes(Object object) { // Delegate to VARBINARY + if (object == null) { + throw newIllegalDataException(this + " may not be null"); + } + return PVarbinary.INSTANCE.toBytes(object); + } + + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + throw newIllegalDataException(this + " may not be null"); + } + return PVarbinary.INSTANCE.toBytes(object, bytes, offset); + + } + + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + byte[] bytes = toBytes(object); + if (sortOrder == SortOrder.DESC) { + return SortOrder.invert(bytes, 0, new byte[bytes.length], 0, bytes.length); + } + return bytes; + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + if (!actualType.isCoercibleTo(this)) { + throwConstraintViolationException(actualType, this); + } + return PVarbinary.INSTANCE.toObject(bytes, offset, length, actualType, sortOrder); + } + + @Override + public Object toObject(Object object, PDataType actualType) { + return actualType.toBytes(object); + } + + @Override + public boolean isFixedWidth() { + return true; + } + + @Override + public int estimateByteSize(Object o) { + byte[] value = (byte[]) o; + return value == null ? 1 : value.length; + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return equalsAny(targetType, this, PVarbinary.INSTANCE); + } + + @Override + public Integer estimateByteSizeFromLength(Integer length) { + return length; + } + + @Override + public Integer getByteSize() { + return null; + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == null && rhs == null) { + return 0; + } else if (lhs == null) { + return -1; + } else if (rhs == null) { + return 1; + } + if (equalsAny(rhsType, PVarbinary.INSTANCE, PBinary.INSTANCE, PVarbinaryEncoded.INSTANCE)) { + return Bytes.compareTo((byte[]) lhs, (byte[]) rhs); + } else { + byte[] rhsBytes = rhsType.toBytes(rhs); + return Bytes.compareTo((byte[]) lhs, rhsBytes); + } + } + + @Override + public Integer getMaxLength(Object o) { + if (o == null) { + return null; + } + byte[] value = (byte[]) o; + return value.length; + } + + @Override + public Object toObject(String value) { + return PVarbinary.INSTANCE.toObject(value); + } + + @Override + public String toStringLiteral(byte[] b, int offset, int length, Format formatter) { + return PVarbinary.INSTANCE.toStringLiteral(b, offset, length, formatter); + } + + @Override + public String toStringLiteral(Object o, Format formatter) { + return toStringLiteral((byte[]) o, 0, ((byte[]) o).length, formatter); + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return PVarbinary.INSTANCE.getSampleValue(maxLength, arrayLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinaryArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinaryArray.java index 523f774a1bc..6e95fe8e8a4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinaryArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinaryArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,66 +21,72 @@ public class PBinaryArray extends PArrayDataType { - public static final PBinaryArray INSTANCE = new PBinaryArray(); + public static final PBinaryArray INSTANCE = new PBinaryArray(); - private PBinaryArray() { - super("BINARY ARRAY", PDataType.ARRAY_TYPE_BASE + PBinary.INSTANCE.getSqlType(), PhoenixArray.class, null, 28); - } + private PBinaryArray() { + super("BINARY ARRAY", PDataType.ARRAY_TYPE_BASE + PBinary.INSTANCE.getSqlType(), + PhoenixArray.class, null, 28); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PBinary.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PBinary.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - return toObject(bytes, offset, length, PBinary.INSTANCE, sortOrder, maxLength, scale, PBinary.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PBinary.INSTANCE, sortOrder, maxLength, scale, + PBinary.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { return true; } - PhoenixArray pArr = (PhoenixArray)value; - Object[] charArr = (Object[])pArr.array; - for (Object i : charArr) { - if (!super.isCoercibleTo(PBinary.INSTANCE, i)) { return false; } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PBinary.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] charArr = (Object[]) pArr.array; + for (Object i : charArr) { + if (!super.isCoercibleTo(PBinary.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PBinary.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinaryBase.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinaryBase.java index 178e331eaa7..c2af8ca891c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinaryBase.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBinaryBase.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,106 +24,107 @@ public abstract class PBinaryBase extends PDataType { - protected PBinaryBase(String sqlTypeName, int sqlType, Class clazz, - org.apache.phoenix.schema.types.PDataType.PDataCodec codec, int ordinal) { - super(sqlTypeName, sqlType, clazz, codec, ordinal); + protected PBinaryBase(String sqlTypeName, int sqlType, Class clazz, + org.apache.phoenix.schema.types.PDataType.PDataCodec codec, int ordinal) { + super(sqlTypeName, sqlType, clazz, codec, ordinal); + } + + public void getByte(ImmutableBytesWritable ptr, SortOrder sortOrder, int offset, + ImmutableBytesWritable outPtr) { + getByte(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, offset, outPtr); + } + + public void getByte(byte[] bytes, int offset, int length, SortOrder sortOrder, int off, + ImmutableBytesWritable outPtr) { + byte ret = bytes[offset + off]; + if (sortOrder == SortOrder.DESC) ret = SortOrder.invert(ret); + outPtr.set(PInteger.INSTANCE.toBytes(Integer.valueOf(ret))); + } + + public void setByte(ImmutableBytesWritable ptr, SortOrder sortOrder, int offset, byte newValue, + ImmutableBytesWritable outPtr) { + setByte(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, offset, newValue, outPtr); + } + + public void setByte(byte[] bytes, int offset, int length, SortOrder sortOrder, int off, + byte newValue, ImmutableBytesWritable outPtr) { + byte[] ret; + if (sortOrder == SortOrder.ASC) { + ret = new byte[length]; + System.arraycopy(bytes, offset, ret, 0, length); + } else { + ret = SortOrder.invert(bytes, offset, length); } - - public void getByte(ImmutableBytesWritable ptr, SortOrder sortOrder, int offset, - ImmutableBytesWritable outPtr) { - getByte(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, offset, outPtr); - } - - public void getByte(byte[] bytes, int offset, int length, SortOrder sortOrder, int off, - ImmutableBytesWritable outPtr) { - byte ret = bytes[offset + off]; - if (sortOrder == SortOrder.DESC) ret = SortOrder.invert(ret); - outPtr.set(PInteger.INSTANCE.toBytes(Integer.valueOf(ret))); - } - - public void setByte(ImmutableBytesWritable ptr, SortOrder sortOrder, int offset, byte newValue, - ImmutableBytesWritable outPtr) { - setByte(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, offset, newValue, outPtr); - } - - public void setByte(byte[] bytes, int offset, int length, SortOrder sortOrder, int off, - byte newValue, ImmutableBytesWritable outPtr) { - byte[] ret; - if (sortOrder == SortOrder.ASC) { - ret = new byte[length]; - System.arraycopy(bytes, offset, ret, 0, length); - } else { - ret = SortOrder.invert(bytes, offset, length); - } - ret[off] = newValue; - outPtr.set(ret); - } - - public void getBit(ImmutableBytesWritable ptr, SortOrder sortOrder, int offset, - ImmutableBytesWritable outPtr) { - getBit(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, offset, outPtr); - } - - public void getBit(byte[] bytes, int offset, int length, SortOrder sortOrder, int off, - ImmutableBytesWritable outPtr) { - byte ret = bytes[offset + (off / Byte.SIZE)]; - if (sortOrder == SortOrder.DESC) ret = SortOrder.invert(ret); - ret &= 1 << (off % Byte.SIZE); - ret = (ret != 0) ? (byte) 1 : (byte) 0; - outPtr.set(PInteger.INSTANCE.toBytes(Integer.valueOf(ret))); - } - - public void setBit(ImmutableBytesWritable ptr, SortOrder sortOrder, int offset, byte newValue, - ImmutableBytesWritable outPtr) { - setBit(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, offset, newValue, outPtr); - } - - public void setBit(byte[] bytes, int offset, int length, SortOrder sortOrder, int off, - byte newValue, ImmutableBytesWritable outPtr) { - byte ret = bytes[offset + (off / Byte.SIZE)]; - if (sortOrder == SortOrder.DESC) ret = SortOrder.invert(ret); - ret = (byte) ((ret & (~(1 << (off % Byte.SIZE)))) | (newValue << (off % Byte.SIZE))); - setByte(bytes, offset, length, sortOrder, off / Byte.SIZE, ret, outPtr); - } - - public void octetLength(ImmutableBytesWritable ptr, SortOrder sortOrder, - ImmutableBytesWritable outPtr) { - octetLength(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, outPtr); - } - - public void octetLength(byte[] bytes, int offset, int length, SortOrder sortOrder, - ImmutableBytesWritable outPtr) { - bytes = new byte[PInteger.INSTANCE.getByteSize()]; - PInteger.INSTANCE.getCodec().encodeInt(length, bytes, 0); - outPtr.set(bytes); - } - - @Override - public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, - SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) { - if (ptr.getLength() != 0 && desiredMaxLength != null) { - if (maxLength == null) { // If not specified, compute - if (value != null && srcType instanceof PBinaryBase) { // Use value if provided - maxLength = ((byte[])value).length; - } else { // Else use ptr, coercing (which is likely a noop) - this.coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, desiredScale, sortOrder, true); - maxLength = ptr.getLength(); - } - } - return maxLength <= desiredMaxLength; + ret[off] = newValue; + outPtr.set(ret); + } + + public void getBit(ImmutableBytesWritable ptr, SortOrder sortOrder, int offset, + ImmutableBytesWritable outPtr) { + getBit(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, offset, outPtr); + } + + public void getBit(byte[] bytes, int offset, int length, SortOrder sortOrder, int off, + ImmutableBytesWritable outPtr) { + byte ret = bytes[offset + (off / Byte.SIZE)]; + if (sortOrder == SortOrder.DESC) ret = SortOrder.invert(ret); + ret &= 1 << (off % Byte.SIZE); + ret = (ret != 0) ? (byte) 1 : (byte) 0; + outPtr.set(PInteger.INSTANCE.toBytes(Integer.valueOf(ret))); + } + + public void setBit(ImmutableBytesWritable ptr, SortOrder sortOrder, int offset, byte newValue, + ImmutableBytesWritable outPtr) { + setBit(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, offset, newValue, outPtr); + } + + public void setBit(byte[] bytes, int offset, int length, SortOrder sortOrder, int off, + byte newValue, ImmutableBytesWritable outPtr) { + byte ret = bytes[offset + (off / Byte.SIZE)]; + if (sortOrder == SortOrder.DESC) ret = SortOrder.invert(ret); + ret = (byte) ((ret & (~(1 << (off % Byte.SIZE)))) | (newValue << (off % Byte.SIZE))); + setByte(bytes, offset, length, sortOrder, off / Byte.SIZE, ret, outPtr); + } + + public void octetLength(ImmutableBytesWritable ptr, SortOrder sortOrder, + ImmutableBytesWritable outPtr) { + octetLength(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, outPtr); + } + + public void octetLength(byte[] bytes, int offset, int length, SortOrder sortOrder, + ImmutableBytesWritable outPtr) { + bytes = new byte[PInteger.INSTANCE.getByteSize()]; + PInteger.INSTANCE.getCodec().encodeInt(length, bytes, 0); + outPtr.set(bytes); + } + + @Override + public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, + SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, + Integer desiredScale) { + if (ptr.getLength() != 0 && desiredMaxLength != null) { + if (maxLength == null) { // If not specified, compute + if (value != null && srcType instanceof PBinaryBase) { // Use value if provided + maxLength = ((byte[]) value).length; + } else { // Else use ptr, coercing (which is likely a noop) + this.coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, + desiredScale, sortOrder, true); + maxLength = ptr.getLength(); } - return true; + } + return maxLength <= desiredMaxLength; } - - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - if (jdbcType == byte[].class) { - return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - } else { - // TODO we could duplicate getString() for String type - throw newMismatchException(actualType, jdbcType); - } + return true; + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + if (jdbcType == byte[].class) { + return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); + } else { + // TODO we could duplicate getString() for String type + throw newMismatchException(actualType, jdbcType); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBoolean.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBoolean.java index 35e38486b01..3a794c5f395 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBoolean.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBoolean.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,156 +22,156 @@ import java.sql.Types; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.primitives.Booleans; public class PBoolean extends PDataType { - public static final PBoolean INSTANCE = new PBoolean(); - - private PBoolean() { - super("BOOLEAN", Types.BOOLEAN, Boolean.class, null, 21); - } - - @Override - public byte[] toBytes(Object object) { - if (object == null) { - // TODO: review - return null? - throw newIllegalDataException(this + " may not be null"); - } - return ((Boolean) object).booleanValue() ? TRUE_BYTES : FALSE_BYTES; - } - - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - // TODO: review - return null? - throw newIllegalDataException(this + " may not be null"); - } - bytes[offset] = ((Boolean) object).booleanValue() ? TRUE_BYTE : FALSE_BYTE; - return BOOLEAN_LENGTH; - } - - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - if (object == null) { - // TODO: review - return null? - throw newIllegalDataException(this + " may not be null"); - } - return ((Boolean) object).booleanValue() ^ sortOrder == SortOrder.ASC ? - FALSE_BYTES : - TRUE_BYTES; - } - - @Override - public Boolean toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale) { - Preconditions.checkNotNull(sortOrder); - if (length == 0) { - return null; - } - if (actualType == this) { - if (length > 1) { - throw newIllegalDataException("BOOLEAN may only be a single byte"); - } - return ((bytes[offset] == FALSE_BYTE ^ sortOrder == SortOrder.DESC) ? - Boolean.FALSE : - Boolean.TRUE); - } else if (actualType == PDecimal.INSTANCE) { - // false translated to the ZERO_BYTE - return sortOrder == SortOrder.DESC ? SortOrder.invert(bytes[offset]) != ZERO_BYTE : bytes[offset] != ZERO_BYTE; - } - throwConstraintViolationException(actualType, this); - return null; - } - - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - // FIXME according to the JDBC spec, we should support all these types: - // TINYINT, SMALLINT, INTEGER, BIGINT, REAL, FLOAT, DOUBLE, DECIMAL, NUMERIC, BIT, - // BOOLEAN, CHAR, VARCHAR, LONGVARCHAR - if (Boolean.class.isAssignableFrom(jdbcType)) { - return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - } - throw newMismatchException(actualType, jdbcType); - } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return super.isCoercibleTo(targetType) || targetType.equals(PBinary.INSTANCE); - } - - @Override - public boolean isCastableTo(PDataType targetType) { - // Allow cast to BOOLEAN so it can be used in an index or group by - return super.isCastableTo(targetType) || targetType.equals(PDecimal.INSTANCE); - } - - @Override - public boolean isFixedWidth() { - return true; - } - - @Override - public Integer getByteSize() { - return BOOLEAN_LENGTH; - } - - @Override - public int estimateByteSize(Object o) { - return BOOLEAN_LENGTH; - } - - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; - } - return Booleans.compare((Boolean) lhs, (Boolean) rhs); - } - - @Override - public Object toObject(String value) { - return Boolean.parseBoolean(value); - } - - @Override - public Object toObject(Object object, PDataType actualType) { - if (actualType == this || object == null) { - return object; - } - if (actualType == PVarbinary.INSTANCE || actualType == PBinary.INSTANCE) { - byte[] bytes = (byte[]) object; - return toObject(bytes, 0, bytes.length); - } - if (actualType == PDecimal.INSTANCE) { - return ((BigDecimal) object).equals(BigDecimal.ZERO) ? Boolean.FALSE : Boolean.TRUE; - } - return throwConstraintViolationException(actualType, this); - } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return RANDOM.get().nextBoolean(); - } - - @Override - public PhoenixArrayFactory getArrayFactory() { - return new PhoenixArrayFactory() { - - @Override - public PhoenixArray newArray(PDataType type, Object[] elements) { - return new PhoenixArray.PrimitiveBooleanPhoenixArray(type, elements); - } - }; - } + public static final PBoolean INSTANCE = new PBoolean(); + + private PBoolean() { + super("BOOLEAN", Types.BOOLEAN, Boolean.class, null, 21); + } + + @Override + public byte[] toBytes(Object object) { + if (object == null) { + // TODO: review - return null? + throw newIllegalDataException(this + " may not be null"); + } + return ((Boolean) object).booleanValue() ? TRUE_BYTES : FALSE_BYTES; + } + + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + // TODO: review - return null? + throw newIllegalDataException(this + " may not be null"); + } + bytes[offset] = ((Boolean) object).booleanValue() ? TRUE_BYTE : FALSE_BYTE; + return BOOLEAN_LENGTH; + } + + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + if (object == null) { + // TODO: review - return null? + throw newIllegalDataException(this + " may not be null"); + } + return ((Boolean) object).booleanValue() ^ sortOrder == SortOrder.ASC + ? FALSE_BYTES + : TRUE_BYTES; + } + + @Override + public Boolean toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + Preconditions.checkNotNull(sortOrder); + if (length == 0) { + return null; + } + if (actualType == this) { + if (length > 1) { + throw newIllegalDataException("BOOLEAN may only be a single byte"); + } + return ((bytes[offset] == FALSE_BYTE ^ sortOrder == SortOrder.DESC) + ? Boolean.FALSE + : Boolean.TRUE); + } else if (actualType == PDecimal.INSTANCE) { + // false translated to the ZERO_BYTE + return sortOrder == SortOrder.DESC + ? SortOrder.invert(bytes[offset]) != ZERO_BYTE + : bytes[offset] != ZERO_BYTE; + } + throwConstraintViolationException(actualType, this); + return null; + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + // FIXME according to the JDBC spec, we should support all these types: + // TINYINT, SMALLINT, INTEGER, BIGINT, REAL, FLOAT, DOUBLE, DECIMAL, NUMERIC, BIT, + // BOOLEAN, CHAR, VARCHAR, LONGVARCHAR + if (Boolean.class.isAssignableFrom(jdbcType)) { + return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); + } + throw newMismatchException(actualType, jdbcType); + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return super.isCoercibleTo(targetType) || targetType.equals(PBinary.INSTANCE); + } + + @Override + public boolean isCastableTo(PDataType targetType) { + // Allow cast to BOOLEAN so it can be used in an index or group by + return super.isCastableTo(targetType) || targetType.equals(PDecimal.INSTANCE); + } + + @Override + public boolean isFixedWidth() { + return true; + } + + @Override + public Integer getByteSize() { + return BOOLEAN_LENGTH; + } + + @Override + public int estimateByteSize(Object o) { + return BOOLEAN_LENGTH; + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; + } + return Booleans.compare((Boolean) lhs, (Boolean) rhs); + } + + @Override + public Object toObject(String value) { + return Boolean.parseBoolean(value); + } + + @Override + public Object toObject(Object object, PDataType actualType) { + if (actualType == this || object == null) { + return object; + } + if (actualType == PVarbinary.INSTANCE || actualType == PBinary.INSTANCE) { + byte[] bytes = (byte[]) object; + return toObject(bytes, 0, bytes.length); + } + if (actualType == PDecimal.INSTANCE) { + return ((BigDecimal) object).equals(BigDecimal.ZERO) ? Boolean.FALSE : Boolean.TRUE; + } + return throwConstraintViolationException(actualType, this); + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return RANDOM.get().nextBoolean(); + } + + @Override + public PhoenixArrayFactory getArrayFactory() { + return new PhoenixArrayFactory() { + + @Override + public PhoenixArray newArray(PDataType type, Object[] elements) { + return new PhoenixArray.PrimitiveBooleanPhoenixArray(type, elements); + } + }; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBooleanArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBooleanArray.java index 742b0de480e..0817e253e3b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBooleanArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBooleanArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,66 +22,72 @@ public class PBooleanArray extends PArrayDataType { - public static final PBooleanArray INSTANCE = new PBooleanArray(); + public static final PBooleanArray INSTANCE = new PBooleanArray(); - private PBooleanArray() { - super("BOOLEAN ARRAY", PDataType.ARRAY_TYPE_BASE + PBoolean.INSTANCE.getSqlType(), PhoenixArray.class, null, 25); - } + private PBooleanArray() { + super("BOOLEAN ARRAY", PDataType.ARRAY_TYPE_BASE + PBoolean.INSTANCE.getSqlType(), + PhoenixArray.class, null, 25); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PBoolean.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PBoolean.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - return toObject(bytes, offset, length, PBoolean.INSTANCE, sortOrder, maxLength, scale, PBoolean.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PBoolean.INSTANCE, sortOrder, maxLength, scale, + PBoolean.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { return true; } - PrimitiveBooleanPhoenixArray pArr = (PrimitiveBooleanPhoenixArray)value; - boolean[] booleanArr = (boolean[])pArr.array; - for (boolean b : booleanArr) { - if (!super.isCoercibleTo(PInteger.INSTANCE, b)) { return false; } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PBoolean.INSTANCE, arrayLength, maxLength); + PrimitiveBooleanPhoenixArray pArr = (PrimitiveBooleanPhoenixArray) value; + boolean[] booleanArr = (boolean[]) pArr.array; + for (boolean b : booleanArr) { + if (!super.isCoercibleTo(PInteger.INSTANCE, b)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PBoolean.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBson.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBson.java index 6944f2cbcd6..29a54aa5e8c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBson.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PBson.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,122 +15,120 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.types; -import org.bson.BsonDocument; -import org.bson.RawBsonDocument; -import org.bson.codecs.BsonDocumentCodec; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.util.ByteUtil; +import org.bson.BsonDocument; +import org.bson.RawBsonDocument; +import org.bson.codecs.BsonDocumentCodec; /** *

- * A Phoenix data type to represent Bson. The Bson can represent Scalar types as well as - * Complex nested types in Binary Encoded JSON. + * A Phoenix data type to represent Bson. The Bson can represent Scalar types as well as Complex + * nested types in Binary Encoded JSON. *

*/ public class PBson extends PVarbinary { - public static final PBson INSTANCE = new PBson(); - - private PBson() { - super("BSON", PDataType.BSON_TYPE, byte[].class, null, 49); - } - - @Override - public boolean canBePrimaryKey() { - return false; - } + public static final PBson INSTANCE = new PBson(); - @Override - public boolean isComparisonSupported() { - return false; - } + private PBson() { + super("BSON", PDataType.BSON_TYPE, byte[].class, null, 49); + } - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - return 0; - } - byte[] b = toBytes(object); - System.arraycopy(b, 0, bytes, offset, b.length); - return b.length; - } + @Override + public boolean canBePrimaryKey() { + return false; + } - @Override - public byte[] toBytes(Object object) { - if (object == null) { - return ByteUtil.EMPTY_BYTE_ARRAY; - } - if (!(object instanceof BsonDocument)) { - throw new IllegalArgumentException("The object should be of type BsonDocument"); - } - if (object instanceof RawBsonDocument) { - return Bytes.toBytes(((RawBsonDocument) object).getByteBuffer().asNIO()); - } else { - RawBsonDocument rawBsonDocument = - new RawBsonDocument((BsonDocument) object, new BsonDocumentCodec()); - return Bytes.toBytes((rawBsonDocument).getByteBuffer().asNIO()); - } - } + @Override + public boolean isComparisonSupported() { + return false; + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - @SuppressWarnings("rawtypes") PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - if (length == 0) { - return null; - } - return new RawBsonDocument(bytes, offset, length); + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + return 0; } - - @Override - public Object toObject(Object object, @SuppressWarnings("rawtypes") PDataType actualType) { - if (object == null) { - return null; - } - if (equalsAny(actualType, PVarchar.INSTANCE)) { - return toObject((String) object); - } - return object; + byte[] b = toBytes(object); + System.arraycopy(b, 0, bytes, offset, b.length); + return b.length; + } + + @Override + public byte[] toBytes(Object object) { + if (object == null) { + return ByteUtil.EMPTY_BYTE_ARRAY; } - - @Override - public Object toObject(String value) { - if (value == null || value.isEmpty()) { - return null; - } - return RawBsonDocument.parse(value); + if (!(object instanceof BsonDocument)) { + throw new IllegalArgumentException("The object should be of type BsonDocument"); } - - @Override - public boolean isCoercibleTo(@SuppressWarnings("rawtypes") PDataType targetType) { - return equalsAny(targetType, this, PBinary.INSTANCE, PVarbinary.INSTANCE); + if (object instanceof RawBsonDocument) { + return Bytes.toBytes(((RawBsonDocument) object).getByteBuffer().asNIO()); + } else { + RawBsonDocument rawBsonDocument = + new RawBsonDocument((BsonDocument) object, new BsonDocumentCodec()); + return Bytes.toBytes((rawBsonDocument).getByteBuffer().asNIO()); } - - @Override - public int estimateByteSize(Object o) { - byte[] value = toBytes(o); - return value == null ? 1 : value.length; + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, + @SuppressWarnings("rawtypes") PDataType actualType, SortOrder sortOrder, Integer maxLength, + Integer scale) { + if (length == 0) { + return null; } + return new RawBsonDocument(bytes, offset, length); + } - @Override - public Integer getByteSize() { - return null; + @Override + public Object toObject(Object object, @SuppressWarnings("rawtypes") PDataType actualType) { + if (object == null) { + return null; } - - @Override - public boolean isBytesComparableWith(@SuppressWarnings("rawtypes") PDataType otherType) { - return otherType == PVarbinary.INSTANCE; + if (equalsAny(actualType, PVarchar.INSTANCE)) { + return toObject((String) object); } + return object; + } - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - String mapStr = "{\"map\":{\"attr_0\":{\"s\":\"val_0\"}}}"; - return this.toObject(mapStr); + @Override + public Object toObject(String value) { + if (value == null || value.isEmpty()) { + return null; } + return RawBsonDocument.parse(value); + } + + @Override + public boolean isCoercibleTo(@SuppressWarnings("rawtypes") PDataType targetType) { + return equalsAny(targetType, this, PBinary.INSTANCE, PVarbinary.INSTANCE); + } + + @Override + public int estimateByteSize(Object o) { + byte[] value = toBytes(o); + return value == null ? 1 : value.length; + } + + @Override + public Integer getByteSize() { + return null; + } + + @Override + public boolean isBytesComparableWith(@SuppressWarnings("rawtypes") PDataType otherType) { + return otherType == PVarbinary.INSTANCE; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + String mapStr = "{\"map\":{\"attr_0\":{\"s\":\"val_0\"}}}"; + return this.toObject(mapStr); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PChar.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PChar.java index b36768d5c1d..d690c3fbdb7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PChar.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PChar.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,11 +26,10 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.exception.DataExceedsCapacityException; import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.StringUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; - /** * Fixed length single byte characters */ @@ -42,205 +41,211 @@ private PChar() { super("CHAR", Types.CHAR, String.class, null, 1); } - @Override - public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) { - if (ptr.getLength() >= maxLength) { - return; - } - byte[] newBytes = new byte[maxLength]; - System.arraycopy(ptr.get(), ptr.getOffset(), newBytes, 0, ptr.getLength()); - Arrays.fill(newBytes, ptr.getLength(), maxLength, sortOrder == SortOrder.ASC ? StringUtil.SPACE_UTF8 : StringUtil.INVERTED_SPACE_UTF8); - ptr.set(newBytes); + @Override + public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) { + if (ptr.getLength() >= maxLength) { + return; } + byte[] newBytes = new byte[maxLength]; + System.arraycopy(ptr.get(), ptr.getOffset(), newBytes, 0, ptr.getLength()); + Arrays.fill(newBytes, ptr.getLength(), maxLength, + sortOrder == SortOrder.ASC ? StringUtil.SPACE_UTF8 : StringUtil.INVERTED_SPACE_UTF8); + ptr.set(newBytes); + } - @Override - public byte[] pad(byte[] b, Integer maxLength, SortOrder sortOrder) { - if (b == null || b.length >= maxLength) { - return b; - } - byte[] newBytes = new byte[maxLength]; - System.arraycopy(b, 0, newBytes, 0, b.length); - Arrays.fill(newBytes, b.length, maxLength, sortOrder == SortOrder.ASC ? StringUtil.SPACE_UTF8 : StringUtil.INVERTED_SPACE_UTF8); - return newBytes; + @Override + public byte[] pad(byte[] b, Integer maxLength, SortOrder sortOrder) { + if (b == null || b.length >= maxLength) { + return b; } + byte[] newBytes = new byte[maxLength]; + System.arraycopy(b, 0, newBytes, 0, b.length); + Arrays.fill(newBytes, b.length, maxLength, + sortOrder == SortOrder.ASC ? StringUtil.SPACE_UTF8 : StringUtil.INVERTED_SPACE_UTF8); + return newBytes; + } - @Override - public Object pad(Object object, Integer maxLength) { - String s = (String) object; - if (s == null) { - return Strings.padEnd("", maxLength, ' '); - } - if (s.length() == maxLength) { - return object; - } - if (s.length() > maxLength) { - throw new DataExceedsCapacityException(this, maxLength, null, null); - } - return Strings.padEnd(s, maxLength, ' '); + @Override + public Object pad(Object object, Integer maxLength) { + String s = (String) object; + if (s == null) { + return Strings.padEnd("", maxLength, ' '); } - - @Override - public byte[] toBytes(Object object) { - if (object == null) { - return ByteUtil.EMPTY_BYTE_ARRAY; - } - byte[] b = PVarchar.INSTANCE.toBytes(object); - if (b.length != ((String) object).length()) { - throw newIllegalDataException("CHAR types may only contain single byte characters."); - } - return b; + if (s.length() == maxLength) { + return object; } - - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - throw newIllegalDataException(this + " may not be null"); - } - int len = PVarchar.INSTANCE.toBytes(object, bytes, offset); - if (len != ((String) object).length()) { - throw newIllegalDataException("CHAR types may only contain single byte characters."); - } - return len; + if (s.length() > maxLength) { + throw new DataExceedsCapacityException(this, maxLength, null, null); } + return Strings.padEnd(s, maxLength, ' '); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, SortOrder sortOrder, Integer maxLength, Integer scale) { - if (length == 0) { - return null; - } - if (!actualType.isCoercibleTo(this)) { - throwConstraintViolationException(actualType, this); - } - length = StringUtil.getUnpaddedCharLength(bytes, offset, length, sortOrder); - if (sortOrder == SortOrder.DESC) { - bytes = SortOrder.invert(bytes, offset, length); - offset = 0; - } - // TODO: UTF-8 decoder that will invert as it decodes - String s = Bytes.toString(bytes, offset, length); - if (length != s.length()) { - throw newIllegalDataException("CHAR types may only contain single byte characters."); - } - return s; + @Override + public byte[] toBytes(Object object) { + if (object == null) { + return ByteUtil.EMPTY_BYTE_ARRAY; } - - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - if (String.class.isAssignableFrom(jdbcType)) { - //We don't actually get here, we shortcut the String case in ResultSet - return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - } - throw newMismatchException(actualType, jdbcType); + byte[] b = PVarchar.INSTANCE.toBytes(object); + if (b.length != ((String) object).length()) { + throw newIllegalDataException("CHAR types may only contain single byte characters."); } + return b; + } - @Override - public Object toObject(Object object, PDataType actualType) { - if (equalsAny(actualType, PVarchar.INSTANCE, this)) { - String s = (String) object; - return s == null || s.length() > 0 ? s : null; - } - return throwConstraintViolationException(actualType,this); + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + throw newIllegalDataException(this + " may not be null"); } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return equalsAny(targetType, this, PVarchar.INSTANCE, PBinary.INSTANCE, PVarbinary.INSTANCE); + int len = PVarchar.INSTANCE.toBytes(object, bytes, offset); + if (len != ((String) object).length()) { + throw newIllegalDataException("CHAR types may only contain single byte characters."); } + return len; + } - @Override - public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, - Integer actualMaxLength, Integer actualScale, SortOrder actualModifier, - Integer desiredMaxLength, Integer desiredScale, SortOrder expectedModifier) { - if (o != null && actualType.equals(PVarchar.INSTANCE) && ((String)o).length() != ptr.getLength()) { - throw newIllegalDataException("CHAR types may only contain single byte characters."); - } - super.coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, actualModifier, desiredMaxLength, desiredScale, expectedModifier); - if (ptr.getLength() > 0 && desiredMaxLength != null && - desiredMaxLength > ptr.getLength()) { - pad(ptr, desiredMaxLength, expectedModifier); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + if (length == 0) { + return null; } - - @Override - public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, - SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) { - if (ptr.getLength() != 0 && desiredMaxLength != null) { - if (maxLength == null) { - if (value != null && srcType == INSTANCE) { // Use value if provided - maxLength = ((String)value).length(); - } else { - this.coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, desiredScale, sortOrder, true); - maxLength = ptr.getLength(); // Only single byte characters - } - } - return maxLength <= desiredMaxLength; - } - return true; + if (!actualType.isCoercibleTo(this)) { + throwConstraintViolationException(actualType, this); } - - @Override - public boolean isFixedWidth() { - return true; + length = StringUtil.getUnpaddedCharLength(bytes, offset, length, sortOrder); + if (sortOrder == SortOrder.DESC) { + bytes = SortOrder.invert(bytes, offset, length); + offset = 0; } - - @Override - public Integer getByteSize() { - return null; + // TODO: UTF-8 decoder that will invert as it decodes + String s = Bytes.toString(bytes, offset, length); + if (length != s.length()) { + throw newIllegalDataException("CHAR types may only contain single byte characters."); } + return s; + } - @Override - public Integer getMaxLength(Object o) { - if (o == null) { - return null; - } - String value = (String) o; - return value.length(); + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + if (String.class.isAssignableFrom(jdbcType)) { + // We don't actually get here, we shortcut the String case in ResultSet + return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); } + throw newMismatchException(actualType, jdbcType); + } - @Override - public int estimateByteSize(Object o) { - String value = (String) o; - return value.length(); + @Override + public Object toObject(Object object, PDataType actualType) { + if (equalsAny(actualType, PVarchar.INSTANCE, this)) { + String s = (String) object; + return s == null || s.length() > 0 ? s : null; } + return throwConstraintViolationException(actualType, this); + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return equalsAny(targetType, this, PVarchar.INSTANCE, PBinary.INSTANCE, PVarbinary.INSTANCE); + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return PVarchar.INSTANCE.compareTo(lhs, rhs, rhsType); + @Override + public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, + Integer actualMaxLength, Integer actualScale, SortOrder actualModifier, + Integer desiredMaxLength, Integer desiredScale, SortOrder expectedModifier) { + if ( + o != null && actualType.equals(PVarchar.INSTANCE) && ((String) o).length() != ptr.getLength() + ) { + throw newIllegalDataException("CHAR types may only contain single byte characters."); + } + super.coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, actualModifier, + desiredMaxLength, desiredScale, expectedModifier); + if (ptr.getLength() > 0 && desiredMaxLength != null && desiredMaxLength > ptr.getLength()) { + pad(ptr, desiredMaxLength, expectedModifier); } + } - @Override - public Object toObject(String value) { - if (StringUtil.hasMultiByteChars(value)) { - throw newIllegalDataException("CHAR types may only contain single byte characters."); + @Override + public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, + SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, + Integer desiredScale) { + if (ptr.getLength() != 0 && desiredMaxLength != null) { + if (maxLength == null) { + if (value != null && srcType == INSTANCE) { // Use value if provided + maxLength = ((String) value).length(); + } else { + this.coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, + desiredScale, sortOrder, true); + maxLength = ptr.getLength(); // Only single byte characters + } } - return value; + return maxLength <= desiredMaxLength; } + return true; + } - @Override - public Integer estimateByteSizeFromLength(Integer length) { - return length; - } + @Override + public boolean isFixedWidth() { + return true; + } - @Override - public boolean isBytesComparableWith(PDataType otherType) { - return super.isBytesComparableWith(otherType) || otherType.equals(PVarchar.INSTANCE); - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public String toStringLiteral(byte[] b, int offset, int length, Format formatter) { - return PVarchar.INSTANCE.toStringLiteral(b, offset, length, formatter); + @Override + public Integer getMaxLength(Object o) { + if (o == null) { + return null; } + String value = (String) o; + return value.length(); + } - @Override - public String toStringLiteral(Object o, Format formatter) { - return PVarchar.INSTANCE.toStringLiteral(o, formatter); - } + @Override + public int estimateByteSize(Object o) { + String value = (String) o; + return value.length(); + } - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return PVarchar.INSTANCE.getSampleValue(maxLength, arrayLength); + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return PVarchar.INSTANCE.compareTo(lhs, rhs, rhsType); + } + + @Override + public Object toObject(String value) { + if (StringUtil.hasMultiByteChars(value)) { + throw newIllegalDataException("CHAR types may only contain single byte characters."); } + return value; + } + + @Override + public Integer estimateByteSizeFromLength(Integer length) { + return length; + } + + @Override + public boolean isBytesComparableWith(PDataType otherType) { + return super.isBytesComparableWith(otherType) || otherType.equals(PVarchar.INSTANCE); + } + + @Override + public String toStringLiteral(byte[] b, int offset, int length, Format formatter) { + return PVarchar.INSTANCE.toStringLiteral(b, offset, length, formatter); + } + + @Override + public String toStringLiteral(Object o, Format formatter) { + return PVarchar.INSTANCE.toStringLiteral(o, formatter); + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return PVarchar.INSTANCE.getSampleValue(maxLength, arrayLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PCharArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PCharArray.java index a740c7f3306..b70c4bd2ca4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PCharArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PCharArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,72 +21,72 @@ public class PCharArray extends PArrayDataType { - public static final PCharArray INSTANCE = new PCharArray(); + public static final PCharArray INSTANCE = new PCharArray(); - private PCharArray() { - super("CHAR ARRAY", PDataType.ARRAY_TYPE_BASE + PChar.INSTANCE.getSqlType(), PhoenixArray.class, - null, 29); - } + private PCharArray() { + super("CHAR ARRAY", PDataType.ARRAY_TYPE_BASE + PChar.INSTANCE.getSqlType(), PhoenixArray.class, + null, 29); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PChar.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PChar.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, Integer scale) { - return toObject(bytes, offset, length, PChar.INSTANCE, sortOrder, maxLength, scale, - PChar.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PChar.INSTANCE, sortOrder, maxLength, scale, + PChar.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray pArr = (PhoenixArray) value; - Object[] charArr = (Object[]) pArr.array; - for (Object i : charArr) { - if (!super.isCoercibleTo(PChar.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PChar.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] charArr = (Object[]) pArr.array; + for (Object i : charArr) { + if (!super.isCoercibleTo(PChar.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PChar.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDataType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDataType.java index 1a76bafc47b..9ae67c15f76 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDataType.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDataType.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,1226 +37,1271 @@ import org.apache.phoenix.schema.ConstraintViolationException; import org.apache.phoenix.schema.IllegalDataException; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.util.ByteUtil; -import org.apache.phoenix.util.ScanUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.math.LongMath; import org.apache.phoenix.thirdparty.com.google.common.primitives.Doubles; import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; - +import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.util.ScanUtil; import org.bson.BsonDocument; -import org.bson.RawBsonDocument; /** * The data types of PColumns */ public abstract class PDataType implements DataType, Comparable> { - private final String sqlTypeName; - private final int sqlType; - private final Class clazz; - private final byte[] clazzNameBytes; - private final byte[] sqlTypeNameBytes; - private final PDataCodec codec; - private final int ordinal; - - protected PDataType(String sqlTypeName, int sqlType, Class clazz, PDataCodec codec, int ordinal) { - this.sqlTypeName = sqlTypeName; - this.sqlType = sqlType; - this.clazz = clazz; - this.clazzNameBytes = Bytes.toBytes(clazz.getName()); - this.sqlTypeNameBytes = Bytes.toBytes(sqlTypeName); - this.codec = codec; - this.ordinal = ordinal; - } - - public static PDataType[] values() { - return PDataTypeFactory.getInstance().getOrderedTypes(); - } - - public int ordinal() { - return ordinal; - } - - @SuppressWarnings("unchecked") - @Override - public Class encodedClass() { - return getJavaClass(); - } - - public boolean isCastableTo(PDataType targetType) { - return isComparableTo(targetType); - } - - public final PDataCodec getCodec() { - return codec; - } - - public boolean isBytesComparableWith(PDataType otherType) { - return equalsAny(this, otherType, PVarbinary.INSTANCE, PBinary.INSTANCE, - PVarbinaryEncoded.INSTANCE); - } - - /** - * @return true if {@link PDataType} can be declared as primary key otherwise false. - */ - public boolean canBePrimaryKey() { - return true; - } - - /** - * @return true if {@link PDataType} supports equality operators (=,!=,<,>,<=,>=) otherwise - * false. - */ - public boolean isComparisonSupported() { - return true; - } - - public int estimateByteSize(Object o) { - if (isFixedWidth()) { return getByteSize(); } - if (isArrayType()) { - PhoenixArray array = (PhoenixArray)o; - int noOfElements = array.numElements; - int totalVarSize = 0; - for (int i = 0; i < noOfElements; i++) { - totalVarSize += array.estimateByteSize(i); - } - return totalVarSize; - } - // Non fixed width types must override this - throw new UnsupportedOperationException(); - } - - public Integer getMaxLength(Object o) { - return null; - } - - public Integer getScale(Object o) { - return null; - } - - /** - * Estimate the byte size from the type length. For example, for char, byte size would be the same as length. For - * decimal, byte size would have no correlation with the length. - */ - public Integer estimateByteSizeFromLength(Integer length) { - if (isFixedWidth()) { return getByteSize(); } - if (isArrayType()) { return null; } - // If not fixed width, default to say the byte size is the same as length. - return length; - } - - public final String getSqlTypeName() { - return sqlTypeName; - } - - public final int getSqlType() { - return sqlType; - } - - public final Class getJavaClass() { - return clazz; - } - - public boolean isArrayType() { - return false; - } - - public final int compareTo(byte[] lhs, int lhsOffset, int lhsLength, SortOrder lhsSortOrder, byte[] rhs, - int rhsOffset, int rhsLength, SortOrder rhsSortOrder, PDataType rhsType) { - Preconditions.checkNotNull(lhsSortOrder); - Preconditions.checkNotNull(rhsSortOrder); - if (this.isBytesComparableWith(rhsType)) { // directly compare the bytes - // Special case as we may be comparing two arrays that have different separator characters due to PHOENIX-2067 - if (!this.isArrayType() || !rhsType.isArrayType() || - PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset, lhsLength) == PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, rhsOffset, rhsLength)) { - // Ignore trailing zero bytes if fixed byte length (for example TIMESTAMP compared to DATE) - if (lhsLength != rhsLength && this.isFixedWidth() && rhsType.isFixedWidth() && this.getByteSize() != null && rhsType.getByteSize() != null) { - if (lhsLength > rhsLength) { - int minOffset = lhsOffset + rhsLength; - for (int i = lhsOffset + lhsLength - 1; i >= minOffset && lhsSortOrder.normalize(lhs[i]) == 0; i--,lhsLength--) { - } - } else { - int minOffset = rhsOffset + lhsLength; - for (int i = rhsOffset + rhsLength - 1; i >= minOffset && rhsSortOrder.normalize(rhs[i]) == 0; i--,rhsLength--) { - } - } - } - return compareTo(lhs, lhsOffset, lhsLength, lhsSortOrder, rhs, rhsOffset, rhsLength, rhsSortOrder); + private final String sqlTypeName; + private final int sqlType; + private final Class clazz; + private final byte[] clazzNameBytes; + private final byte[] sqlTypeNameBytes; + private final PDataCodec codec; + private final int ordinal; + + protected PDataType(String sqlTypeName, int sqlType, Class clazz, PDataCodec codec, int ordinal) { + this.sqlTypeName = sqlTypeName; + this.sqlType = sqlType; + this.clazz = clazz; + this.clazzNameBytes = Bytes.toBytes(clazz.getName()); + this.sqlTypeNameBytes = Bytes.toBytes(sqlTypeName); + this.codec = codec; + this.ordinal = ordinal; + } + + public static PDataType[] values() { + return PDataTypeFactory.getInstance().getOrderedTypes(); + } + + public int ordinal() { + return ordinal; + } + + @SuppressWarnings("unchecked") + @Override + public Class encodedClass() { + return getJavaClass(); + } + + public boolean isCastableTo(PDataType targetType) { + return isComparableTo(targetType); + } + + public final PDataCodec getCodec() { + return codec; + } + + public boolean isBytesComparableWith(PDataType otherType) { + return equalsAny(this, otherType, PVarbinary.INSTANCE, PBinary.INSTANCE, + PVarbinaryEncoded.INSTANCE); + } + + /** Returns true if {@link PDataType} can be declared as primary key otherwise false. */ + public boolean canBePrimaryKey() { + return true; + } + + /** + * Returns true if {@link PDataType} supports equality operators (=,!=,<,>,<=,>=) otherwise false. + */ + public boolean isComparisonSupported() { + return true; + } + + public int estimateByteSize(Object o) { + if (isFixedWidth()) { + return getByteSize(); + } + if (isArrayType()) { + PhoenixArray array = (PhoenixArray) o; + int noOfElements = array.numElements; + int totalVarSize = 0; + for (int i = 0; i < noOfElements; i++) { + totalVarSize += array.estimateByteSize(i); + } + return totalVarSize; + } + // Non fixed width types must override this + throw new UnsupportedOperationException(); + } + + public Integer getMaxLength(Object o) { + return null; + } + + public Integer getScale(Object o) { + return null; + } + + /** + * Estimate the byte size from the type length. For example, for char, byte size would be the same + * as length. For decimal, byte size would have no correlation with the length. + */ + public Integer estimateByteSizeFromLength(Integer length) { + if (isFixedWidth()) { + return getByteSize(); + } + if (isArrayType()) { + return null; + } + // If not fixed width, default to say the byte size is the same as length. + return length; + } + + public final String getSqlTypeName() { + return sqlTypeName; + } + + public final int getSqlType() { + return sqlType; + } + + public final Class getJavaClass() { + return clazz; + } + + public boolean isArrayType() { + return false; + } + + public final int compareTo(byte[] lhs, int lhsOffset, int lhsLength, SortOrder lhsSortOrder, + byte[] rhs, int rhsOffset, int rhsLength, SortOrder rhsSortOrder, PDataType rhsType) { + Preconditions.checkNotNull(lhsSortOrder); + Preconditions.checkNotNull(rhsSortOrder); + if (this.isBytesComparableWith(rhsType)) { // directly compare the bytes + // Special case as we may be comparing two arrays that have different separator characters due + // to PHOENIX-2067 + if ( + !this.isArrayType() || !rhsType.isArrayType() + || PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset, lhsLength) + == PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, rhsOffset, + rhsLength) + ) { + // Ignore trailing zero bytes if fixed byte length (for example TIMESTAMP compared to DATE) + if ( + lhsLength != rhsLength && this.isFixedWidth() && rhsType.isFixedWidth() + && this.getByteSize() != null && rhsType.getByteSize() != null + ) { + if (lhsLength > rhsLength) { + int minOffset = lhsOffset + rhsLength; + for (int i = lhsOffset + lhsLength - 1; i >= minOffset + && lhsSortOrder.normalize(lhs[i]) == 0; i--, lhsLength--) { } - } - PDataCodec lhsCodec = this.getCodec(); - if ( lhsCodec == null ) { - byte[] rhsConverted; - Object o = this.toObject(rhs, rhsOffset, rhsLength, rhsType, rhsSortOrder); - - // No lhs native type representation, so convert rhsType to bytes representation of lhs type - // Due to PHOENIX-2067, favor the array that is already in the new format so we don't have to convert both. - if ( this.isArrayType() && PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset, lhsLength) == PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, rhsOffset, rhsLength)) { - rhsConverted = ((PArrayDataType)this).toBytes(o, PArrayDataType.arrayBaseType(this), lhsSortOrder, PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset, lhsLength)); - } else { - rhsConverted = this.toBytes(o); - if (lhsSortOrder == SortOrder.DESC) { - lhs = SortOrder.invert(lhs, lhsOffset, new byte[lhsLength], 0, lhsLength); - lhsOffset = 0; - } + } else { + int minOffset = rhsOffset + lhsLength; + for (int i = rhsOffset + rhsLength - 1; i >= minOffset + && rhsSortOrder.normalize(rhs[i]) == 0; i--, rhsLength--) { } - return Bytes.compareTo(lhs, lhsOffset, lhsLength, rhsConverted, 0, rhsConverted.length); + } } - PDataCodec rhsCodec = rhsType.getCodec(); - if (rhsCodec == null) { - byte[] lhsConverted; - Object o = rhsType.toObject(lhs, lhsOffset, lhsLength, this, lhsSortOrder); - - // No rhs native type representation, so convert lhsType to bytes representation of rhs type - // Due to PHOENIX-2067, favor the array that is already in the new format so we don't have to convert both. - if ( rhsType.isArrayType() && PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, rhsOffset, rhsLength) == PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset, lhsLength)) { - lhsConverted = ((PArrayDataType)rhsType).toBytes(o, PArrayDataType.arrayBaseType(rhsType), rhsSortOrder, PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, rhsOffset, rhsLength)); - } else { - lhsConverted = rhsType.toBytes(o); - if (rhsSortOrder == SortOrder.DESC) { - rhs = SortOrder.invert(rhs, rhsOffset, new byte[rhsLength], 0, rhsLength); - } - } - return Bytes.compareTo(lhsConverted, 0, lhsConverted.length, rhs, rhsOffset, rhsLength); + return compareTo(lhs, lhsOffset, lhsLength, lhsSortOrder, rhs, rhsOffset, rhsLength, + rhsSortOrder); + } + } + PDataCodec lhsCodec = this.getCodec(); + if (lhsCodec == null) { + byte[] rhsConverted; + Object o = this.toObject(rhs, rhsOffset, rhsLength, rhsType, rhsSortOrder); + + // No lhs native type representation, so convert rhsType to bytes representation of lhs type + // Due to PHOENIX-2067, favor the array that is already in the new format so we don't have to + // convert both. + if ( + this.isArrayType() + && PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset, lhsLength) + == PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, rhsOffset, + rhsLength) + ) { + rhsConverted = + ((PArrayDataType) this).toBytes(o, PArrayDataType.arrayBaseType(this), lhsSortOrder, + PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset, lhsLength)); + } else { + rhsConverted = this.toBytes(o); + if (lhsSortOrder == SortOrder.DESC) { + lhs = SortOrder.invert(lhs, lhsOffset, new byte[lhsLength], 0, lhsLength); + lhsOffset = 0; } - // convert to native and compare - if ( (this.isCoercibleTo(PLong.INSTANCE) || this.isCoercibleTo(PDate.INSTANCE)) && - (rhsType.isCoercibleTo(PLong.INSTANCE) || rhsType.isCoercibleTo(PDate.INSTANCE)) ) { - return Longs.compare(this.getCodec().decodeLong(lhs, lhsOffset, lhsSortOrder), rhsType.getCodec() - .decodeLong(rhs, rhsOffset, rhsSortOrder)); - } else if (isDoubleOrFloat(this) && isDoubleOrFloat(rhsType)) { // native double to double comparison - return Doubles.compare(this.getCodec().decodeDouble(lhs, lhsOffset, lhsSortOrder), rhsType.getCodec() - .decodeDouble(rhs, rhsOffset, rhsSortOrder)); - } else { // native float/double to long comparison - float fvalue = 0.0F; - double dvalue = 0.0; - long lvalue = 0; - boolean isFloat = false; - int invert = 1; - - if (this.isCoercibleTo(PLong.INSTANCE)) { - lvalue = this.getCodec().decodeLong(lhs, lhsOffset, lhsSortOrder); - } else if (this.getClass() == PFloat.class) { - isFloat = true; - fvalue = this.getCodec().decodeFloat(lhs, lhsOffset, lhsSortOrder); - } else if (this.isCoercibleTo(PDouble.INSTANCE)) { - dvalue = this.getCodec().decodeDouble(lhs, lhsOffset, lhsSortOrder); - } - if (rhsType.isCoercibleTo(PLong.INSTANCE)) { - lvalue = rhsType.getCodec().decodeLong(rhs, rhsOffset, rhsSortOrder); - } else if (rhsType == PFloat.INSTANCE) { - invert = -1; - isFloat = true; - fvalue = rhsType.getCodec().decodeFloat(rhs, rhsOffset, rhsSortOrder); - } else if (rhsType.isCoercibleTo(PDouble.INSTANCE)) { - invert = -1; - dvalue = rhsType.getCodec().decodeDouble(rhs, rhsOffset, rhsSortOrder); - } - // Invert the comparison if float/double value is on the RHS - return invert * (isFloat ? compareFloatToLong(fvalue, lvalue) : compareDoubleToLong(dvalue, lvalue)); + } + return Bytes.compareTo(lhs, lhsOffset, lhsLength, rhsConverted, 0, rhsConverted.length); + } + PDataCodec rhsCodec = rhsType.getCodec(); + if (rhsCodec == null) { + byte[] lhsConverted; + Object o = rhsType.toObject(lhs, lhsOffset, lhsLength, this, lhsSortOrder); + + // No rhs native type representation, so convert lhsType to bytes representation of rhs type + // Due to PHOENIX-2067, favor the array that is already in the new format so we don't have to + // convert both. + if ( + rhsType.isArrayType() && PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, + rhsOffset, rhsLength) + == PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset, lhsLength) + ) { + lhsConverted = ((PArrayDataType) rhsType).toBytes(o, PArrayDataType.arrayBaseType(rhsType), + rhsSortOrder, + PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, rhsOffset, rhsLength)); + } else { + lhsConverted = rhsType.toBytes(o); + if (rhsSortOrder == SortOrder.DESC) { + rhs = SortOrder.invert(rhs, rhsOffset, new byte[rhsLength], 0, rhsLength); } - } + } + return Bytes.compareTo(lhsConverted, 0, lhsConverted.length, rhs, rhsOffset, rhsLength); + } + // convert to native and compare + if ( + (this.isCoercibleTo(PLong.INSTANCE) || this.isCoercibleTo(PDate.INSTANCE)) + && (rhsType.isCoercibleTo(PLong.INSTANCE) || rhsType.isCoercibleTo(PDate.INSTANCE)) + ) { + return Longs.compare(this.getCodec().decodeLong(lhs, lhsOffset, lhsSortOrder), + rhsType.getCodec().decodeLong(rhs, rhsOffset, rhsSortOrder)); + } else if (isDoubleOrFloat(this) && isDoubleOrFloat(rhsType)) { // native double to double + // comparison + return Doubles.compare(this.getCodec().decodeDouble(lhs, lhsOffset, lhsSortOrder), + rhsType.getCodec().decodeDouble(rhs, rhsOffset, rhsSortOrder)); + } else { // native float/double to long comparison + float fvalue = 0.0F; + double dvalue = 0.0; + long lvalue = 0; + boolean isFloat = false; + int invert = 1; + + if (this.isCoercibleTo(PLong.INSTANCE)) { + lvalue = this.getCodec().decodeLong(lhs, lhsOffset, lhsSortOrder); + } else if (this.getClass() == PFloat.class) { + isFloat = true; + fvalue = this.getCodec().decodeFloat(lhs, lhsOffset, lhsSortOrder); + } else if (this.isCoercibleTo(PDouble.INSTANCE)) { + dvalue = this.getCodec().decodeDouble(lhs, lhsOffset, lhsSortOrder); + } + if (rhsType.isCoercibleTo(PLong.INSTANCE)) { + lvalue = rhsType.getCodec().decodeLong(rhs, rhsOffset, rhsSortOrder); + } else if (rhsType == PFloat.INSTANCE) { + invert = -1; + isFloat = true; + fvalue = rhsType.getCodec().decodeFloat(rhs, rhsOffset, rhsSortOrder); + } else if (rhsType.isCoercibleTo(PDouble.INSTANCE)) { + invert = -1; + dvalue = rhsType.getCodec().decodeDouble(rhs, rhsOffset, rhsSortOrder); + } + // Invert the comparison if float/double value is on the RHS + return invert + * (isFloat ? compareFloatToLong(fvalue, lvalue) : compareDoubleToLong(dvalue, lvalue)); + } + } + + public static boolean isDoubleOrFloat(PDataType type) { + return type == PFloat.INSTANCE || type == PDouble.INSTANCE || type == PUnsignedFloat.INSTANCE + || type == PUnsignedDouble.INSTANCE; + } + + /** + * Compares a float against a long. Behaves better than {@link #compareDoubleToLong(double, long)} + * for float values outside of Integer.MAX_VALUE and Integer.MIN_VALUE. a float value a long value + * @return -1 if f is less than l, 1 if f is greater than l, and 0 if f is equal to l + */ + private static int compareFloatToLong(float f, long l) { + if (f > Integer.MAX_VALUE || f < Integer.MIN_VALUE) { + return f < l ? -1 : f > l ? 1 : 0; + } + long diff = (long) f - l; + return Long.signum(diff); + } + + /** + * Compares a double against a long. a double value a long value + * @return -1 if d is less than l, 1 if d is greater than l, and 0 if d is equal to l + */ + private static int compareDoubleToLong(double d, long l) { + if (d > Long.MAX_VALUE) { + return 1; + } + if (d < Long.MIN_VALUE) { + return -1; + } + long diff = (long) d - l; + return Long.signum(diff); + } - public static boolean isDoubleOrFloat(PDataType type) { - return type == PFloat.INSTANCE || type == PDouble.INSTANCE || type == PUnsignedFloat.INSTANCE - || type == PUnsignedDouble.INSTANCE; + protected static void checkForSufficientLength(byte[] b, int offset, int requiredLength) { + if (b.length < offset + requiredLength) { + throw new RuntimeException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) + .setMessage("Expected length of at least " + requiredLength + " bytes, but had " + + (b.length - offset)) + .build().buildException()); } + } - /** - * Compares a float against a long. Behaves better than {@link #compareDoubleToLong(double, long)} for float values - * outside of Integer.MAX_VALUE and Integer.MIN_VALUE. - * - * @param f - * a float value - * @param l - * a long value - * @return -1 if f is less than l, 1 if f is greater than l, and 0 if f is equal to l - */ - private static int compareFloatToLong(float f, long l) { - if (f > Integer.MAX_VALUE || f < Integer.MIN_VALUE) { return f < l ? -1 : f > l ? 1 : 0; } - long diff = (long)f - l; - return Long.signum(diff); - } + protected static Void throwConstraintViolationException(PDataType source, PDataType target) { + throw new ConstraintViolationException( + new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) + .setMessage(source + " cannot be coerced to " + target).build().buildException()); + } - /** - * Compares a double against a long. - * - * @param d - * a double value - * @param l - * a long value - * @return -1 if d is less than l, 1 if d is greater than l, and 0 if d is equal to l - */ - private static int compareDoubleToLong(double d, long l) { - if (d > Long.MAX_VALUE) { return 1; } - if (d < Long.MIN_VALUE) { return -1; } - long diff = (long)d - l; - return Long.signum(diff); - } + protected static SQLException newMismatchException(PDataType source, Class target) { + return new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) + .setMessage(source + " cannot be retrieved as " + target).build().buildException(); + } - protected static void checkForSufficientLength(byte[] b, int offset, int requiredLength) { - if (b.length < offset + requiredLength) { throw new RuntimeException(new SQLExceptionInfo.Builder( - SQLExceptionCode.ILLEGAL_DATA) - .setMessage("Expected length of at least " + requiredLength + " bytes, but had " + (b.length - offset)) - .build().buildException()); } - } + protected static RuntimeException newIllegalDataException() { + return new IllegalDataException( + new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA).build().buildException()); + } - protected static Void throwConstraintViolationException(PDataType source, PDataType target) { - throw new ConstraintViolationException(new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) - .setMessage(source + " cannot be coerced to " + target).build().buildException()); - } + protected static RuntimeException newIllegalDataException(String msg) { + return new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) + .setMessage(msg).build().buildException()); + } - protected static SQLException newMismatchException(PDataType source, Class target) { - return new SQLExceptionInfo.Builder(SQLExceptionCode.TYPE_MISMATCH) - .setMessage(source + " cannot be retrieved as " + target).build().buildException(); - } + protected static RuntimeException newIllegalDataException(Exception e) { + return new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA) + .setRootCause(e).build().buildException()); + } - protected static RuntimeException newIllegalDataException() { - return new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA).build() - .buildException()); - } + @Override + public boolean equals(Object o) { + // PDataTypes are expected to be singletons. + // TODO: this doesn't jive with HBase's DataType + if (o == null) return false; + return getClass() == o.getClass(); + } - protected static RuntimeException newIllegalDataException(String msg) { - return new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA).setMessage(msg) - .build().buildException()); - } - - protected static RuntimeException newIllegalDataException(Exception e) { - return new IllegalDataException(new SQLExceptionInfo.Builder(SQLExceptionCode.ILLEGAL_DATA).setRootCause(e) - .build().buildException()); - } - - @Override - public boolean equals(Object o) { - // PDataTypes are expected to be singletons. - // TODO: this doesn't jive with HBase's DataType - if (o == null) return false; - return getClass() == o.getClass(); - } - - /** - * @return true when {@code lhs} equals any of {@code rhs}. - */ - public static boolean equalsAny(PDataType lhs, PDataType... rhs) { - for (int i = 0; i < rhs.length; i++) { - if (lhs.equals(rhs[i])) return true; - } - return false; + /** Returns true when {@code lhs} equals any of {@code rhs}. */ + public static boolean equalsAny(PDataType lhs, PDataType... rhs) { + for (int i = 0; i < rhs.length; i++) { + if (lhs.equals(rhs[i])) return true; } + return false; + } - public static interface PDataCodec { - public long decodeLong(ImmutableBytesWritable ptr, SortOrder sortOrder); + public static interface PDataCodec { + public long decodeLong(ImmutableBytesWritable ptr, SortOrder sortOrder); - public long decodeLong(byte[] b, int o, SortOrder sortOrder); + public long decodeLong(byte[] b, int o, SortOrder sortOrder); - public int decodeInt(ImmutableBytesWritable ptr, SortOrder sortOrder); + public int decodeInt(ImmutableBytesWritable ptr, SortOrder sortOrder); - public int decodeInt(byte[] b, int o, SortOrder sortOrder); + public int decodeInt(byte[] b, int o, SortOrder sortOrder); - public byte decodeByte(ImmutableBytesWritable ptr, SortOrder sortOrder); + public byte decodeByte(ImmutableBytesWritable ptr, SortOrder sortOrder); - public byte decodeByte(byte[] b, int o, SortOrder sortOrder); + public byte decodeByte(byte[] b, int o, SortOrder sortOrder); - public short decodeShort(ImmutableBytesWritable ptr, SortOrder sortOrder); + public short decodeShort(ImmutableBytesWritable ptr, SortOrder sortOrder); - public short decodeShort(byte[] b, int o, SortOrder sortOrder); + public short decodeShort(byte[] b, int o, SortOrder sortOrder); - public float decodeFloat(ImmutableBytesWritable ptr, SortOrder sortOrder); + public float decodeFloat(ImmutableBytesWritable ptr, SortOrder sortOrder); - public float decodeFloat(byte[] b, int o, SortOrder sortOrder); + public float decodeFloat(byte[] b, int o, SortOrder sortOrder); - public double decodeDouble(ImmutableBytesWritable ptr, SortOrder sortOrder); + public double decodeDouble(ImmutableBytesWritable ptr, SortOrder sortOrder); - public double decodeDouble(byte[] b, int o, SortOrder sortOrder); + public double decodeDouble(byte[] b, int o, SortOrder sortOrder); - public int encodeLong(long v, ImmutableBytesWritable ptr); + public int encodeLong(long v, ImmutableBytesWritable ptr); - public int encodeLong(long v, byte[] b, int o); + public int encodeLong(long v, byte[] b, int o); - public int encodeInt(int v, ImmutableBytesWritable ptr); + public int encodeInt(int v, ImmutableBytesWritable ptr); - public int encodeInt(int v, byte[] b, int o); + public int encodeInt(int v, byte[] b, int o); - public int encodeByte(byte v, ImmutableBytesWritable ptr); + public int encodeByte(byte v, ImmutableBytesWritable ptr); - public int encodeByte(byte v, byte[] b, int o); + public int encodeByte(byte v, byte[] b, int o); - public int encodeShort(short v, ImmutableBytesWritable ptr); + public int encodeShort(short v, ImmutableBytesWritable ptr); - public int encodeShort(short v, byte[] b, int o); + public int encodeShort(short v, byte[] b, int o); - public int encodeFloat(float v, ImmutableBytesWritable ptr); + public int encodeFloat(float v, ImmutableBytesWritable ptr); - public int encodeFloat(float v, byte[] b, int o); + public int encodeFloat(float v, byte[] b, int o); - public int encodeDouble(double v, ImmutableBytesWritable ptr); + public int encodeDouble(double v, ImmutableBytesWritable ptr); - public int encodeDouble(double v, byte[] b, int o); + public int encodeDouble(double v, byte[] b, int o); - public PhoenixArrayFactory getPhoenixArrayFactory(); - } - - public static abstract class BaseCodec implements PDataCodec { - @Override - public int decodeInt(ImmutableBytesWritable ptr, SortOrder sortOrder) { - return decodeInt(ptr.get(), ptr.getOffset(), sortOrder); - } - - @Override - public long decodeLong(ImmutableBytesWritable ptr, SortOrder sortOrder) { - return decodeLong(ptr.get(), ptr.getOffset(), sortOrder); - } - - @Override - public byte decodeByte(ImmutableBytesWritable ptr, SortOrder sortOrder) { - return decodeByte(ptr.get(), ptr.getOffset(), sortOrder); - } - - @Override - public short decodeShort(ImmutableBytesWritable ptr, SortOrder sortOrder) { - return decodeShort(ptr.get(), ptr.getOffset(), sortOrder); - } - - @Override - public float decodeFloat(ImmutableBytesWritable ptr, SortOrder sortOrder) { - return decodeFloat(ptr.get(), ptr.getOffset(), sortOrder); - } - - @Override - public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { - throw new UnsupportedOperationException(); - } - - @Override - public double decodeDouble(ImmutableBytesWritable ptr, SortOrder sortOrder) { - return decodeDouble(ptr.get(), ptr.getOffset(), sortOrder); - } - - @Override - public double decodeDouble(byte[] b, int o, SortOrder sortOrder) { - throw new UnsupportedOperationException(); - } - - @Override - public int encodeInt(int v, ImmutableBytesWritable ptr) { - return encodeInt(v, ptr.get(), ptr.getOffset()); - } - - @Override - public int encodeLong(long v, ImmutableBytesWritable ptr) { - return encodeLong(v, ptr.get(), ptr.getOffset()); - } - - @Override - public int encodeByte(byte v, ImmutableBytesWritable ptr) { - return encodeByte(v, ptr.get(), ptr.getOffset()); - } - - @Override - public int encodeShort(short v, ImmutableBytesWritable ptr) { - return encodeShort(v, ptr.get(), ptr.getOffset()); - } - - @Override - public int encodeFloat(float v, ImmutableBytesWritable ptr) { - return encodeFloat(v, ptr.get(), ptr.getOffset()); - } - - @Override - public int encodeDouble(double v, ImmutableBytesWritable ptr) { - return encodeDouble(v, ptr.get(), ptr.getOffset()); - } - - @Override - public int encodeInt(int v, byte[] b, int o) { - throw new UnsupportedOperationException(); - } - - @Override - public int encodeLong(long v, byte[] b, int o) { - throw new UnsupportedOperationException(); - } - - @Override - public int encodeByte(byte v, byte[] b, int o) { - throw new UnsupportedOperationException(); - } - - @Override - public int encodeShort(short v, byte[] b, int o) { - throw new UnsupportedOperationException(); - } - - @Override - public int encodeFloat(float v, byte[] b, int o) { - throw new UnsupportedOperationException(); - } - - @Override - public int encodeDouble(double v, byte[] b, int o) { - throw new UnsupportedOperationException(); - } - } - - public static final int MAX_PRECISION = 38; - // Max precision guaranteed to fit into a long (and this should be plenty) - public static final int MIN_DECIMAL_AVG_SCALE = 4; - public static final MathContext DEFAULT_MATH_CONTEXT = new MathContext(MAX_PRECISION, RoundingMode.HALF_UP); - public static final int DEFAULT_SCALE = 0; - - protected static final Integer MAX_BIG_DECIMAL_BYTES = 21; - protected static final Integer MAX_TIMESTAMP_BYTES = Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT; - - protected static final byte ZERO_BYTE = (byte)0x80; - protected static final byte NEG_TERMINAL_BYTE = (byte)102; - protected static final int EXP_BYTE_OFFSET = 65; - protected static final int POS_DIGIT_OFFSET = 1; - protected static final int NEG_DIGIT_OFFSET = 101; - protected static final BigInteger MAX_LONG = BigInteger.valueOf(Long.MAX_VALUE); - protected static final BigInteger MIN_LONG = BigInteger.valueOf(Long.MIN_VALUE); - protected static final long MAX_LONG_FOR_DESERIALIZE = Long.MAX_VALUE / 1000; - protected static final BigInteger ONE_HUNDRED = BigInteger.valueOf(100); - - protected static final byte FALSE_BYTE = 0; - protected static final byte TRUE_BYTE = 1; - public static final byte[] FALSE_BYTES = new byte[] { FALSE_BYTE }; - public static final byte[] TRUE_BYTES = new byte[] { TRUE_BYTE }; - public static final byte[] NULL_BYTES = ByteUtil.EMPTY_BYTE_ARRAY; - protected static final Integer BOOLEAN_LENGTH = 1; - - public final static Integer ZERO = 0; - public final static Integer INT_PRECISION = 10; - public final static Integer LONG_PRECISION = 19; - public final static Integer SHORT_PRECISION = 5; - public final static Integer BYTE_PRECISION = 3; - public final static Integer DOUBLE_PRECISION = 15; - - public static final int ARRAY_TYPE_BASE = 3000; - public static final int JSON_TYPE = 5000; - public static final int BSON_TYPE = 7000; - public static final int VARBINARY_ENCODED_TYPE = 9000; - public static final String ARRAY_TYPE_SUFFIX = "ARRAY"; - - protected static final ThreadLocal RANDOM = new ThreadLocal() { - @Override - protected Random initialValue() { - return new Random(); - } - }; - - /** - * Serialize a BigDecimal into a variable length byte array in such a way that it is binary comparable. - * - * @param v - * the BigDecimal - * @param result - * the byte array to contain the serialized bytes. Max size necessary would be 21 bytes. - * @param length - * the number of bytes required to store the big decimal. May be adjusted down if it exceeds - * {@link #MAX_BIG_DECIMAL_BYTES} - * @return the number of bytes that make up the serialized BigDecimal - */ - protected static int toBytes(BigDecimal v, byte[] result, final int offset, int length) { - // From scale to exponent byte (if BigDecimal is positive): (-(scale+(scale % 2 == 0 : 0 : 1)) / 2 + 65) | 0x80 - // If scale % 2 is 1 (i.e. it's odd), then multiple last base-100 digit by 10 - // For example: new BigDecimal(BigInteger.valueOf(1), -4); - // (byte)((-(-4+0) / 2 + 65) | 0x80) = -61 - // From scale to exponent byte (if BigDecimal is negative): ~(-(scale+1)/2 + 65 + 128) & 0x7F - // For example: new BigDecimal(BigInteger.valueOf(1), 2); - // ~(-2/2 + 65 + 128) & 0x7F = 63 - int signum = v.signum(); - if (signum == 0) { - result[offset] = ZERO_BYTE; - return 1; - } - int index = offset + length; - int scale = v.scale(); - int expOffset = scale % 2 * (scale < 0 ? -1 : 1); - // In order to get twice as much of a range for scale, it - // is multiplied by 2. If the scale is an odd number, then - // the first digit is multiplied by 10 to make up for the - // scale being off by one. - int multiplyBy; - BigInteger divideBy; - if (expOffset == 0) { - multiplyBy = 1; - divideBy = ONE_HUNDRED; - } else { - multiplyBy = 10; - divideBy = BigInteger.TEN; - } - // Normalize the scale based on what is necessary to end up with a base 100 decimal (i.e. 10.123e3) - int digitOffset; - BigInteger compareAgainst; - if (signum == 1) { - digitOffset = POS_DIGIT_OFFSET; - compareAgainst = MAX_LONG; - scale -= (length - 2) * 2; - result[offset] = (byte)((-(scale + expOffset) / 2 + EXP_BYTE_OFFSET) | 0x80); - } else { - digitOffset = NEG_DIGIT_OFFSET; - compareAgainst = MIN_LONG; - // Scale adjustment shouldn't include terminal byte in length - scale -= (length - 2 - 1) * 2; - result[offset] = (byte)(~(-(scale + expOffset) / 2 + EXP_BYTE_OFFSET + 128) & 0x7F); - if (length <= MAX_BIG_DECIMAL_BYTES) { - result[--index] = NEG_TERMINAL_BYTE; - } else { - // Adjust length and offset down because we don't have enough room - length = MAX_BIG_DECIMAL_BYTES; - index = offset + length; - } - } - BigInteger bi = v.unscaledValue(); - // Use BigDecimal arithmetic until we can fit into a long - while (bi.compareTo(compareAgainst) * signum > 0) { - BigInteger[] dandr = bi.divideAndRemainder(divideBy); - bi = dandr[0]; - int digit = dandr[1].intValue(); - result[--index] = (byte)(digit * multiplyBy + digitOffset); - multiplyBy = 1; - divideBy = ONE_HUNDRED; - } - long l = bi.longValue(); - do { - long divBy = 100 / multiplyBy; - long digit = l % divBy; - l /= divBy; - result[--index] = (byte)(digit * multiplyBy + digitOffset); - multiplyBy = 1; - } while (l != 0); - - return length; - } - - /** - * Deserialize a variable length byte array into a BigDecimal. Note that because of the normalization that gets done - * to the scale, if you roundtrip a BigDecimal, it may not be equal before and after. However, the before and after - * number will always compare to be equal {@code (i.e. .compareTo() == 0) } - * - * @param bytes - * the bytes containing the number - * @param offset - * the offset into the byte array - * @param length - * the length of the serialized BigDecimal - * @return the BigDecimal value. - */ - protected static BigDecimal toBigDecimal(byte[] bytes, int offset, int length) { - // From exponent byte back to scale: ( & 0x7F) - 65) * 2 - // For example, (((-63 & 0x7F) - 65) & 0xFF) * 2 = 0 - // Another example: ((-64 & 0x7F) - 65) * 2 = -2 (then swap the sign for the scale) - // If number is negative, going from exponent byte back to scale: (byte)((~ - 65 - 128) * 2) - // For example: new BigDecimal(new BigInteger("-1"), -2); - // (byte)((~61 - 65 - 128) * 2) = 2, so scale is -2 - // Potentially, when switching back, the scale can be added by one and the trailing zero dropped - // For digits, just do a mod 100 on the BigInteger. Use long if BigInteger fits - if (length == 1 && bytes[offset] == ZERO_BYTE) { return BigDecimal.ZERO; } - int signum = ((bytes[offset] & 0x80) == 0) ? -1 : 1; - int scale; - int index; - int digitOffset; - long multiplier = 100L; - int begIndex = offset + 1; - if (signum == 1) { - scale = (byte)(((bytes[offset] & 0x7F) - 65) * -2); - index = offset + length; - digitOffset = POS_DIGIT_OFFSET; - } else { - scale = (byte)((~bytes[offset] - 65 - 128) * -2); - index = offset + length - (bytes[offset + length - 1] == NEG_TERMINAL_BYTE ? 1 : 0); - digitOffset = -NEG_DIGIT_OFFSET; - } - length = index - offset; - long l = signum * bytes[--index] - digitOffset; - if (l % 10 == 0) { // trailing zero - scale--; // drop trailing zero and compensate in the scale - l /= 10; - multiplier = 10; - } - // Use long arithmetic for as long as we can - while (index > begIndex) { - if (l >= MAX_LONG_FOR_DESERIALIZE || multiplier >= Long.MAX_VALUE / 100) { - multiplier = LongMath.divide(multiplier, 100L, RoundingMode.UNNECESSARY); - break; // Exit loop early so we don't overflow our multiplier - } - int digit100 = signum * bytes[--index] - digitOffset; - l += digit100 * multiplier; - multiplier = LongMath.checkedMultiply(multiplier, 100); - } - - BigInteger bi; - // If still more digits, switch to BigInteger arithmetic - if (index > begIndex) { - bi = BigInteger.valueOf(l); - BigInteger biMultiplier = BigInteger.valueOf(multiplier).multiply(ONE_HUNDRED); - do { - int digit100 = signum * bytes[--index] - digitOffset; - bi = bi.add(biMultiplier.multiply(BigInteger.valueOf(digit100))); - biMultiplier = biMultiplier.multiply(ONE_HUNDRED); - } while (index > begIndex); - if (signum == -1) { - bi = bi.negate(); - } - } else { - bi = BigInteger.valueOf(l * signum); - } - // Update the scale based on the precision - scale += (length - 2) * 2; - BigDecimal v = new BigDecimal(bi, scale); - return v; - } - - // Calculate the precision and scale of a raw decimal bytes. Returns the values as an int - // array. The first value is precision, the second value is scale. - // Default scope for testing - protected static int[] getDecimalPrecisionAndScale(byte[] bytes, int offset, int length, SortOrder sortOrder) { - // 0, which should have no precision nor scale. - if (length == 1 && sortOrder.normalize(bytes[offset]) == ZERO_BYTE) { return new int[] { 0, 0 }; } - int signum = ((sortOrder.normalize(bytes[offset]) & 0x80) == 0) ? -1 : 1; - int scale; - int index; - int digitOffset; - if (signum == 1) { - scale = (byte)(((sortOrder.normalize(bytes[offset]) & 0x7F) - 65) * -2); - index = offset + length; - digitOffset = POS_DIGIT_OFFSET; - } else { - scale = (byte)((~sortOrder.normalize(bytes[offset]) - 65 - 128) * -2); - index = offset + length - (sortOrder.normalize(bytes[offset + length - 1]) == NEG_TERMINAL_BYTE ? 1 : 0); - digitOffset = -NEG_DIGIT_OFFSET; - } - length = index - offset; - int precision = 2 * (length - 1); - int d = signum * sortOrder.normalize(bytes[--index]) - digitOffset; - if (d % 10 == 0) { // trailing zero - // drop trailing zero and compensate in the scale and precision. - d /= 10; - scale--; - precision -= 1; - } - d = signum * sortOrder.normalize(bytes[offset + 1]) - digitOffset; - if (d < 10) { // Leading single digit - // Compensate in the precision. - precision -= 1; - } - // Update the scale based on the precision - scale += (length - 2) * 2; - if (scale < 0) { - precision = precision - scale; - scale = 0; - } - return new int[] { precision, scale }; - } - - public boolean isCoercibleTo(PDataType targetType) { - return this.equals(targetType) || targetType.equals(PVarbinary.INSTANCE) - || targetType.equals(PVarbinaryEncoded.INSTANCE); - } - - // Specialized on enums to take into account type hierarchy (i.e. UNSIGNED_LONG is comparable to INTEGER) - public boolean isComparableTo(PDataType targetType) { - return targetType.isCoercibleTo(this) || this.isCoercibleTo(targetType); - } - - public boolean isCoercibleTo(PDataType targetType, Object value) { - return isCoercibleTo(targetType); - } - - /** - * Checks whether or not the value represented by value (or ptr if value is null) is compatible in terms - * of size with the desired max length and scale. The srcType must be coercible to this type. - * @param ptr bytes pointer for the value - * @param value object representation of the value. May be null in which case ptr will be used - * @param srcType the type of the value - * @param sortOrder the sort order of the value - * @param maxLength the max length of the source value or null if not applicable - * @param scale the scale of the source value or null if not applicable - * @param desiredMaxLength the desired max length for the value to be coerced - * @param desiredScale the desired scale for the value to be coerced - * @return true if the value may be coerced without losing precision and false otherwise. - */ - public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, SortOrder sortOrder, - Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) { - return true; - } - - public int compareTo(byte[] b1, byte[] b2) { - return compareTo(b1, 0, b1.length, SortOrder.getDefault(), b2, 0, b2.length, SortOrder.getDefault()); - } - - public final int compareTo(ImmutableBytesWritable ptr1, ImmutableBytesWritable ptr2) { - return compareTo(ptr1.get(), ptr1.getOffset(), ptr1.getLength(), SortOrder.getDefault(), ptr2.get(), - ptr2.getOffset(), ptr2.getLength(), SortOrder.getDefault()); - } - - public final int compareTo(byte[] ba1, int offset1, int length1, SortOrder so1, byte[] ba2, int offset2, - int length2, SortOrder so2) { - Preconditions.checkNotNull(so1); - Preconditions.checkNotNull(so2); - if (so1 != so2) { - int length = Math.min(length1, length2); - for (int i = 0; i < length; i++) { - byte b1 = ba1[offset1 + i]; - byte b2 = ba2[offset2 + i]; - if (so1 == SortOrder.DESC) { - b1 = SortOrder.invert(b1); - } else { - b2 = SortOrder.invert(b2); - } - int c = b1 - b2; - if (c != 0) { return c; } - } - return (length1 - length2); - } - return (so1 == SortOrder.DESC ? -1 : 1) * ScanUtil.getComparator(length1 == length2, so1).compare(ba1, offset1, length1, ba2, offset2, length2); - } - - public final int compareTo(ImmutableBytesWritable ptr1, SortOrder ptr1SortOrder, ImmutableBytesWritable ptr2, - SortOrder ptr2SortOrder, PDataType type2) { - return compareTo(ptr1.get(), ptr1.getOffset(), ptr1.getLength(), ptr1SortOrder, ptr2.get(), ptr2.getOffset(), - ptr2.getLength(), ptr2SortOrder, type2); - } - - public int compareTo(Object lhs, Object rhs) { - return compareTo(lhs, rhs, this); - } - - /* - * We need an empty byte array to mean null, since we have no other representation in the row key for null. - */ - public final boolean isNull(byte[] value) { - return value == null || value.length == 0; - } - - public byte[] toBytes(Object object, SortOrder sortOrder) { - Preconditions.checkNotNull(sortOrder); - byte[] bytes = toBytes(object); - if (sortOrder == SortOrder.DESC) { - SortOrder.invert(bytes, 0, bytes, 0, bytes.length); - } - return bytes; - } - - public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, Integer actualMaxLength, - Integer actualScale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale, - SortOrder expectedModifier, boolean expectedRowKeyOrderOptimizable) { - coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, actualModifier, desiredMaxLength, desiredScale, - expectedModifier); - } - - public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, Integer actualMaxLength, - Integer actualScale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale, - SortOrder expectedModifier) { - Preconditions.checkNotNull(actualModifier); - Preconditions.checkNotNull(expectedModifier); - if (ptr.getLength() == 0) { return; } - if (this.isBytesComparableWith(actualType)) { // No coerce necessary - if (actualModifier == expectedModifier) { return; } - byte[] b = ptr.copyBytes(); - SortOrder.invert(b, 0, b, 0, b.length); - ptr.set(b); - return; - } - - // Optimization for cases in which we already have the object around - if (o == null) { - o = actualType.toObject(ptr, actualType, actualModifier); - } - - o = toObject(o, actualType); - byte[] b = toBytes(o, expectedModifier); - ptr.set(b); - } - - public final void coerceBytes(ImmutableBytesWritable ptr, PDataType actualType, SortOrder actualModifier, - SortOrder expectedModifier) { - coerceBytes(ptr, null, actualType, null, null, actualModifier, null, null, expectedModifier); - } - - public final void coerceBytes(ImmutableBytesWritable ptr, PDataType actualType, SortOrder actualModifier, - SortOrder expectedModifier, Integer desiredMaxLength) { - coerceBytes(ptr, null, actualType, null, null, actualModifier, desiredMaxLength, null, expectedModifier); - } - - protected static boolean isNonNegativeDate(java.util.Date date) { - return (date == null || date.getTime() >= 0); - } - - //FIXME this is misnamed - protected static void throwIfNonNegativeDate(java.util.Date date) { - if (!isNonNegativeDate(date)) { throw newIllegalDataException("Value may not be negative(" + date + ")"); } - } - - protected static boolean isNonNegativeNumber(Number v) { - return v == null || v.longValue() >= 0; - } - - //FIXME this is misnamed - protected static void throwIfNonNegativeNumber(Number v) { - if (!isNonNegativeNumber(v)) { throw newIllegalDataException("Value may not be negative(" + v + ")"); } - } + public PhoenixArrayFactory getPhoenixArrayFactory(); + } + public static abstract class BaseCodec implements PDataCodec { @Override - public boolean isNullable() { - return false; + public int decodeInt(ImmutableBytesWritable ptr, SortOrder sortOrder) { + return decodeInt(ptr.get(), ptr.getOffset(), sortOrder); } - public abstract Integer getByteSize(); - @Override - public int encodedLength(T val) { - // default implementation based on existing PDataType methods. - return getByteSize(); + public long decodeLong(ImmutableBytesWritable ptr, SortOrder sortOrder) { + return decodeLong(ptr.get(), ptr.getOffset(), sortOrder); } @Override - public int skip(PositionedByteRange pbr) { - // default implementation based on existing PDataType methods. - int len = getByteSize(); - pbr.setPosition(pbr.getPosition() + len); - return len; + public byte decodeByte(ImmutableBytesWritable ptr, SortOrder sortOrder) { + return decodeByte(ptr.get(), ptr.getOffset(), sortOrder); } @Override - public boolean isOrderPreserving() { - return true; + public short decodeShort(ImmutableBytesWritable ptr, SortOrder sortOrder) { + return decodeShort(ptr.get(), ptr.getOffset(), sortOrder); } @Override - public boolean isSkippable() { - return true; + public float decodeFloat(ImmutableBytesWritable ptr, SortOrder sortOrder) { + return decodeFloat(ptr.get(), ptr.getOffset(), sortOrder); } @Override - public Order getOrder() { - return Order.ASCENDING; + public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { + throw new UnsupportedOperationException(); } - public abstract boolean isFixedWidth(); - - public abstract int compareTo(Object lhs, Object rhs, PDataType rhsType); - @Override - public int compareTo(PDataType other) { - return Integer.compare(this.ordinal(), other.ordinal()); + public double decodeDouble(ImmutableBytesWritable ptr, SortOrder sortOrder) { + return decodeDouble(ptr.get(), ptr.getOffset(), sortOrder); } - /** - * Convert from the object representation of a data type value into the serialized byte form. - * - * @param object - * the object to convert - * @param bytes - * the byte array into which to put the serialized form of object - * @param offset - * the offset from which to start writing the serialized form - * @return the byte length of the serialized object - */ - public abstract int toBytes(Object object, byte[] bytes, int offset); - @Override - public int encode(PositionedByteRange pbr, T val) { - // default implementation based on existing PDataType methods. - int pos = pbr.getPosition(); - pbr.put(toBytes(val)); - return pbr.getPosition() - pos; + public double decodeDouble(byte[] b, int o, SortOrder sortOrder) { + throw new UnsupportedOperationException(); } @Override - public String toString() { - return sqlTypeName; + public int encodeInt(int v, ImmutableBytesWritable ptr) { + return encodeInt(v, ptr.get(), ptr.getOffset()); } - public abstract byte[] toBytes(Object object); - - /** - * Convert from a string to the object representation of a given type - * - * @param value - * a stringified value - * @return the object representation of a string value - */ - public abstract Object toObject(String value); - - /* - * Each enum must override this to define the set of object it may be coerced to - */ - public abstract Object toObject(Object object, PDataType actualType); - - /* - * Each enum must override this to define the set of objects it may create - */ - public abstract Object toObject(byte[] bytes, int offset, int length, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale); - - public abstract Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException; - - @SuppressWarnings("unchecked") @Override - public T decode(PositionedByteRange pbr) { - // default implementation based on existing PDataType methods. - byte[] b = new byte[getByteSize()]; - pbr.get(b); - return (T)toObject(b, 0, b.length, this, SortOrder.ASC, getMaxLength(null), getScale(null)); + public int encodeLong(long v, ImmutableBytesWritable ptr) { + return encodeLong(v, ptr.get(), ptr.getOffset()); } - /* - * Return a valid object of this enum type - */ - public abstract Object getSampleValue(Integer maxLength, Integer arrayLength); - - public final Object getSampleValue() { - return getSampleValue(null); - } - - public final Object getSampleValue(Integer maxLength) { - return getSampleValue(maxLength, null); - } - - public final Object toObject(byte[] bytes, int offset, int length, PDataType actualType, SortOrder sortOrder) { - return toObject(bytes, offset, length, actualType, sortOrder, null, null); - } - - public final Object toObject(byte[] bytes, int offset, int length, PDataType actualType) { - return toObject(bytes, offset, length, actualType, SortOrder.getDefault()); - } - - public final Object toObject(ImmutableBytesWritable ptr, PDataType actualType) { - return toObject(ptr, actualType, SortOrder.getDefault()); - } - - public final Object toObject(ImmutableBytesWritable ptr, PDataType actualType, SortOrder sortOrder) { - return this.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), actualType, sortOrder); - } - - public final Object toObject(ImmutableBytesWritable ptr, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - return this.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), actualType, sortOrder, maxLength, scale); + @Override + public int encodeByte(byte v, ImmutableBytesWritable ptr) { + return encodeByte(v, ptr.get(), ptr.getOffset()); } - public final Object toObject(ImmutableBytesWritable ptr, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - return this.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), actualType, sortOrder, - maxLength, scale, jdbcType); + @Override + public int encodeShort(short v, ImmutableBytesWritable ptr) { + return encodeShort(v, ptr.get(), ptr.getOffset()); } - public final Object toObject(ImmutableBytesWritable ptr, SortOrder sortOrder, Integer maxLength, Integer scale) { - return this.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), this, sortOrder, maxLength, scale); + @Override + public int encodeFloat(float v, ImmutableBytesWritable ptr) { + return encodeFloat(v, ptr.get(), ptr.getOffset()); } - public final Object toObject(ImmutableBytesWritable ptr) { - return toObject(ptr.get(), ptr.getOffset(), ptr.getLength()); + @Override + public int encodeDouble(double v, ImmutableBytesWritable ptr) { + return encodeDouble(v, ptr.get(), ptr.getOffset()); } - public final Object toObject(ImmutableBytesWritable ptr, SortOrder sortOrder) { - return toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), this, sortOrder); + @Override + public int encodeInt(int v, byte[] b, int o) { + throw new UnsupportedOperationException(); } - public final Object toObject(byte[] bytes, int offset, int length) { - return toObject(bytes, offset, length, this); + @Override + public int encodeLong(long v, byte[] b, int o) { + throw new UnsupportedOperationException(); } - public final Object toObject(byte[] bytes) { - return toObject(bytes, SortOrder.getDefault()); + @Override + public int encodeByte(byte v, byte[] b, int o) { + throw new UnsupportedOperationException(); } - public final Object toObject(byte[] bytes, SortOrder sortOrder) { - return toObject(bytes, 0, bytes.length, this, sortOrder); + @Override + public int encodeShort(short v, byte[] b, int o) { + throw new UnsupportedOperationException(); } - public final Object toObject(byte[] bytes, SortOrder sortOrder, PDataType actualType) { - return toObject(bytes, 0, bytes.length, actualType, sortOrder); + @Override + public int encodeFloat(float v, byte[] b, int o) { + throw new UnsupportedOperationException(); } - public static PDataType fromSqlTypeName(String sqlTypeName) { - for (PDataType t : PDataTypeFactory.getInstance().getTypes()) { - if (t.getSqlTypeName().equalsIgnoreCase(sqlTypeName)) return t; + @Override + public int encodeDouble(double v, byte[] b, int o) { + throw new UnsupportedOperationException(); + } + } + + public static final int MAX_PRECISION = 38; + // Max precision guaranteed to fit into a long (and this should be plenty) + public static final int MIN_DECIMAL_AVG_SCALE = 4; + public static final MathContext DEFAULT_MATH_CONTEXT = + new MathContext(MAX_PRECISION, RoundingMode.HALF_UP); + public static final int DEFAULT_SCALE = 0; + + protected static final Integer MAX_BIG_DECIMAL_BYTES = 21; + protected static final Integer MAX_TIMESTAMP_BYTES = Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT; + + protected static final byte ZERO_BYTE = (byte) 0x80; + protected static final byte NEG_TERMINAL_BYTE = (byte) 102; + protected static final int EXP_BYTE_OFFSET = 65; + protected static final int POS_DIGIT_OFFSET = 1; + protected static final int NEG_DIGIT_OFFSET = 101; + protected static final BigInteger MAX_LONG = BigInteger.valueOf(Long.MAX_VALUE); + protected static final BigInteger MIN_LONG = BigInteger.valueOf(Long.MIN_VALUE); + protected static final long MAX_LONG_FOR_DESERIALIZE = Long.MAX_VALUE / 1000; + protected static final BigInteger ONE_HUNDRED = BigInteger.valueOf(100); + + protected static final byte FALSE_BYTE = 0; + protected static final byte TRUE_BYTE = 1; + public static final byte[] FALSE_BYTES = new byte[] { FALSE_BYTE }; + public static final byte[] TRUE_BYTES = new byte[] { TRUE_BYTE }; + public static final byte[] NULL_BYTES = ByteUtil.EMPTY_BYTE_ARRAY; + protected static final Integer BOOLEAN_LENGTH = 1; + + public final static Integer ZERO = 0; + public final static Integer INT_PRECISION = 10; + public final static Integer LONG_PRECISION = 19; + public final static Integer SHORT_PRECISION = 5; + public final static Integer BYTE_PRECISION = 3; + public final static Integer DOUBLE_PRECISION = 15; + + public static final int ARRAY_TYPE_BASE = 3000; + public static final int JSON_TYPE = 5000; + public static final int BSON_TYPE = 7000; + public static final int VARBINARY_ENCODED_TYPE = 9000; + public static final String ARRAY_TYPE_SUFFIX = "ARRAY"; + + protected static final ThreadLocal RANDOM = new ThreadLocal() { + @Override + protected Random initialValue() { + return new Random(); + } + }; + + /** + * Serialize a BigDecimal into a variable length byte array in such a way that it is binary + * comparable. the BigDecimal the byte array to contain the serialized bytes. Max size necessary + * would be 21 bytes. the number of bytes required to store the big decimal. May be adjusted down + * if it exceeds {@link #MAX_BIG_DECIMAL_BYTES} + * @return the number of bytes that make up the serialized BigDecimal + */ + protected static int toBytes(BigDecimal v, byte[] result, final int offset, int length) { + // From scale to exponent byte (if BigDecimal is positive): (-(scale+(scale % 2 == 0 : 0 : 1)) / + // 2 + 65) | 0x80 + // If scale % 2 is 1 (i.e. it's odd), then multiple last base-100 digit by 10 + // For example: new BigDecimal(BigInteger.valueOf(1), -4); + // (byte)((-(-4+0) / 2 + 65) | 0x80) = -61 + // From scale to exponent byte (if BigDecimal is negative): ~(-(scale+1)/2 + 65 + 128) & 0x7F + // For example: new BigDecimal(BigInteger.valueOf(1), 2); + // ~(-2/2 + 65 + 128) & 0x7F = 63 + int signum = v.signum(); + if (signum == 0) { + result[offset] = ZERO_BYTE; + return 1; + } + int index = offset + length; + int scale = v.scale(); + int expOffset = scale % 2 * (scale < 0 ? -1 : 1); + // In order to get twice as much of a range for scale, it + // is multiplied by 2. If the scale is an odd number, then + // the first digit is multiplied by 10 to make up for the + // scale being off by one. + int multiplyBy; + BigInteger divideBy; + if (expOffset == 0) { + multiplyBy = 1; + divideBy = ONE_HUNDRED; + } else { + multiplyBy = 10; + divideBy = BigInteger.TEN; + } + // Normalize the scale based on what is necessary to end up with a base 100 decimal (i.e. + // 10.123e3) + int digitOffset; + BigInteger compareAgainst; + if (signum == 1) { + digitOffset = POS_DIGIT_OFFSET; + compareAgainst = MAX_LONG; + scale -= (length - 2) * 2; + result[offset] = (byte) ((-(scale + expOffset) / 2 + EXP_BYTE_OFFSET) | 0x80); + } else { + digitOffset = NEG_DIGIT_OFFSET; + compareAgainst = MIN_LONG; + // Scale adjustment shouldn't include terminal byte in length + scale -= (length - 2 - 1) * 2; + result[offset] = (byte) (~(-(scale + expOffset) / 2 + EXP_BYTE_OFFSET + 128) & 0x7F); + if (length <= MAX_BIG_DECIMAL_BYTES) { + result[--index] = NEG_TERMINAL_BYTE; + } else { + // Adjust length and offset down because we don't have enough room + length = MAX_BIG_DECIMAL_BYTES; + index = offset + length; + } + } + BigInteger bi = v.unscaledValue(); + // Use BigDecimal arithmetic until we can fit into a long + while (bi.compareTo(compareAgainst) * signum > 0) { + BigInteger[] dandr = bi.divideAndRemainder(divideBy); + bi = dandr[0]; + int digit = dandr[1].intValue(); + result[--index] = (byte) (digit * multiplyBy + digitOffset); + multiplyBy = 1; + divideBy = ONE_HUNDRED; + } + long l = bi.longValue(); + do { + long divBy = 100 / multiplyBy; + long digit = l % divBy; + l /= divBy; + result[--index] = (byte) (digit * multiplyBy + digitOffset); + multiplyBy = 1; + } while (l != 0); + + return length; + } + + /** + * Deserialize a variable length byte array into a BigDecimal. Note that because of the + * normalization that gets done to the scale, if you roundtrip a BigDecimal, it may not be equal + * before and after. However, the before and after number will always compare to be equal + * {@code (i.e. .compareTo() == 0) } the bytes containing the number the offset + * into the byte array the length of the serialized BigDecimal + * @return the BigDecimal value. + */ + protected static BigDecimal toBigDecimal(byte[] bytes, int offset, int length) { + // From exponent byte back to scale: ( & 0x7F) - 65) * 2 + // For example, (((-63 & 0x7F) - 65) & 0xFF) * 2 = 0 + // Another example: ((-64 & 0x7F) - 65) * 2 = -2 (then swap the sign for the scale) + // If number is negative, going from exponent byte back to scale: (byte)((~ - 65 + // - 128) * 2) + // For example: new BigDecimal(new BigInteger("-1"), -2); + // (byte)((~61 - 65 - 128) * 2) = 2, so scale is -2 + // Potentially, when switching back, the scale can be added by one and the trailing zero dropped + // For digits, just do a mod 100 on the BigInteger. Use long if BigInteger fits + if (length == 1 && bytes[offset] == ZERO_BYTE) { + return BigDecimal.ZERO; + } + int signum = ((bytes[offset] & 0x80) == 0) ? -1 : 1; + int scale; + int index; + int digitOffset; + long multiplier = 100L; + int begIndex = offset + 1; + if (signum == 1) { + scale = (byte) (((bytes[offset] & 0x7F) - 65) * -2); + index = offset + length; + digitOffset = POS_DIGIT_OFFSET; + } else { + scale = (byte) ((~bytes[offset] - 65 - 128) * -2); + index = offset + length - (bytes[offset + length - 1] == NEG_TERMINAL_BYTE ? 1 : 0); + digitOffset = -NEG_DIGIT_OFFSET; + } + length = index - offset; + long l = signum * bytes[--index] - digitOffset; + if (l % 10 == 0) { // trailing zero + scale--; // drop trailing zero and compensate in the scale + l /= 10; + multiplier = 10; + } + // Use long arithmetic for as long as we can + while (index > begIndex) { + if (l >= MAX_LONG_FOR_DESERIALIZE || multiplier >= Long.MAX_VALUE / 100) { + multiplier = LongMath.divide(multiplier, 100L, RoundingMode.UNNECESSARY); + break; // Exit loop early so we don't overflow our multiplier + } + int digit100 = signum * bytes[--index] - digitOffset; + l += digit100 * multiplier; + multiplier = LongMath.checkedMultiply(multiplier, 100); + } + + BigInteger bi; + // If still more digits, switch to BigInteger arithmetic + if (index > begIndex) { + bi = BigInteger.valueOf(l); + BigInteger biMultiplier = BigInteger.valueOf(multiplier).multiply(ONE_HUNDRED); + do { + int digit100 = signum * bytes[--index] - digitOffset; + bi = bi.add(biMultiplier.multiply(BigInteger.valueOf(digit100))); + biMultiplier = biMultiplier.multiply(ONE_HUNDRED); + } while (index > begIndex); + if (signum == -1) { + bi = bi.negate(); + } + } else { + bi = BigInteger.valueOf(l * signum); + } + // Update the scale based on the precision + scale += (length - 2) * 2; + BigDecimal v = new BigDecimal(bi, scale); + return v; + } + + // Calculate the precision and scale of a raw decimal bytes. Returns the values as an int + // array. The first value is precision, the second value is scale. + // Default scope for testing + protected static int[] getDecimalPrecisionAndScale(byte[] bytes, int offset, int length, + SortOrder sortOrder) { + // 0, which should have no precision nor scale. + if (length == 1 && sortOrder.normalize(bytes[offset]) == ZERO_BYTE) { + return new int[] { 0, 0 }; + } + int signum = ((sortOrder.normalize(bytes[offset]) & 0x80) == 0) ? -1 : 1; + int scale; + int index; + int digitOffset; + if (signum == 1) { + scale = (byte) (((sortOrder.normalize(bytes[offset]) & 0x7F) - 65) * -2); + index = offset + length; + digitOffset = POS_DIGIT_OFFSET; + } else { + scale = (byte) ((~sortOrder.normalize(bytes[offset]) - 65 - 128) * -2); + index = offset + length + - (sortOrder.normalize(bytes[offset + length - 1]) == NEG_TERMINAL_BYTE ? 1 : 0); + digitOffset = -NEG_DIGIT_OFFSET; + } + length = index - offset; + int precision = 2 * (length - 1); + int d = signum * sortOrder.normalize(bytes[--index]) - digitOffset; + if (d % 10 == 0) { // trailing zero + // drop trailing zero and compensate in the scale and precision. + d /= 10; + scale--; + precision -= 1; + } + d = signum * sortOrder.normalize(bytes[offset + 1]) - digitOffset; + if (d < 10) { // Leading single digit + // Compensate in the precision. + precision -= 1; + } + // Update the scale based on the precision + scale += (length - 2) * 2; + if (scale < 0) { + precision = precision - scale; + scale = 0; + } + return new int[] { precision, scale }; + } + + public boolean isCoercibleTo(PDataType targetType) { + return this.equals(targetType) || targetType.equals(PVarbinary.INSTANCE) + || targetType.equals(PVarbinaryEncoded.INSTANCE); + } + + // Specialized on enums to take into account type hierarchy (i.e. UNSIGNED_LONG is comparable to + // INTEGER) + public boolean isComparableTo(PDataType targetType) { + return targetType.isCoercibleTo(this) || this.isCoercibleTo(targetType); + } + + public boolean isCoercibleTo(PDataType targetType, Object value) { + return isCoercibleTo(targetType); + } + + /** + * Checks whether or not the value represented by value (or ptr if value is null) is compatible in + * terms of size with the desired max length and scale. The srcType must be coercible to this + * type. + * @param ptr bytes pointer for the value + * @param value object representation of the value. May be null in which case ptr will + * be used + * @param srcType the type of the value + * @param sortOrder the sort order of the value + * @param maxLength the max length of the source value or null if not applicable + * @param scale the scale of the source value or null if not applicable + * @param desiredMaxLength the desired max length for the value to be coerced + * @param desiredScale the desired scale for the value to be coerced + * @return true if the value may be coerced without losing precision and false otherwise. + */ + public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, + SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, + Integer desiredScale) { + return true; + } + + public int compareTo(byte[] b1, byte[] b2) { + return compareTo(b1, 0, b1.length, SortOrder.getDefault(), b2, 0, b2.length, + SortOrder.getDefault()); + } + + public final int compareTo(ImmutableBytesWritable ptr1, ImmutableBytesWritable ptr2) { + return compareTo(ptr1.get(), ptr1.getOffset(), ptr1.getLength(), SortOrder.getDefault(), + ptr2.get(), ptr2.getOffset(), ptr2.getLength(), SortOrder.getDefault()); + } + + public final int compareTo(byte[] ba1, int offset1, int length1, SortOrder so1, byte[] ba2, + int offset2, int length2, SortOrder so2) { + Preconditions.checkNotNull(so1); + Preconditions.checkNotNull(so2); + if (so1 != so2) { + int length = Math.min(length1, length2); + for (int i = 0; i < length; i++) { + byte b1 = ba1[offset1 + i]; + byte b2 = ba2[offset2 + i]; + if (so1 == SortOrder.DESC) { + b1 = SortOrder.invert(b1); + } else { + b2 = SortOrder.invert(b2); } - throw newIllegalDataException("Unsupported sql type: " + sqlTypeName); - } - - public static int sqlArrayType(String sqlTypeName) { - PDataType fromSqlTypeName = fromSqlTypeName(sqlTypeName); - return fromSqlTypeName.getSqlType() + PDataType.ARRAY_TYPE_BASE; - } - - protected static interface PhoenixArrayFactory { - PhoenixArray newArray(PDataType type, Object[] elements); - } - - public static PDataType fromTypeId(int typeId) { - for (PDataType t : PDataTypeFactory.getInstance().getTypes()) { - if (t.getSqlType() == typeId) return t; + int c = b1 - b2; + if (c != 0) { + return c; } - throw newIllegalDataException("Unsupported sql type: " + typeId); - } - - public String getJavaClassName() { - return getJavaClass().getName(); + } + return (length1 - length2); + } + return (so1 == SortOrder.DESC ? -1 : 1) * ScanUtil.getComparator(length1 == length2, so1) + .compare(ba1, offset1, length1, ba2, offset2, length2); + } + + public final int compareTo(ImmutableBytesWritable ptr1, SortOrder ptr1SortOrder, + ImmutableBytesWritable ptr2, SortOrder ptr2SortOrder, PDataType type2) { + return compareTo(ptr1.get(), ptr1.getOffset(), ptr1.getLength(), ptr1SortOrder, ptr2.get(), + ptr2.getOffset(), ptr2.getLength(), ptr2SortOrder, type2); + } + + public int compareTo(Object lhs, Object rhs) { + return compareTo(lhs, rhs, this); + } + + /* + * We need an empty byte array to mean null, since we have no other representation in the row key + * for null. + */ + public final boolean isNull(byte[] value) { + return value == null || value.length == 0; + } + + public byte[] toBytes(Object object, SortOrder sortOrder) { + Preconditions.checkNotNull(sortOrder); + byte[] bytes = toBytes(object); + if (sortOrder == SortOrder.DESC) { + SortOrder.invert(bytes, 0, bytes, 0, bytes.length); + } + return bytes; + } + + public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, + Integer actualMaxLength, Integer actualScale, SortOrder actualModifier, + Integer desiredMaxLength, Integer desiredScale, SortOrder expectedModifier, + boolean expectedRowKeyOrderOptimizable) { + coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, actualModifier, desiredMaxLength, + desiredScale, expectedModifier); + } + + public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, + Integer actualMaxLength, Integer actualScale, SortOrder actualModifier, + Integer desiredMaxLength, Integer desiredScale, SortOrder expectedModifier) { + Preconditions.checkNotNull(actualModifier); + Preconditions.checkNotNull(expectedModifier); + if (ptr.getLength() == 0) { + return; + } + if (this.isBytesComparableWith(actualType)) { // No coerce necessary + if (actualModifier == expectedModifier) { + return; + } + byte[] b = ptr.copyBytes(); + SortOrder.invert(b, 0, b, 0, b.length); + ptr.set(b); + return; + } + + // Optimization for cases in which we already have the object around + if (o == null) { + o = actualType.toObject(ptr, actualType, actualModifier); + } + + o = toObject(o, actualType); + byte[] b = toBytes(o, expectedModifier); + ptr.set(b); + } + + public final void coerceBytes(ImmutableBytesWritable ptr, PDataType actualType, + SortOrder actualModifier, SortOrder expectedModifier) { + coerceBytes(ptr, null, actualType, null, null, actualModifier, null, null, expectedModifier); + } + + public final void coerceBytes(ImmutableBytesWritable ptr, PDataType actualType, + SortOrder actualModifier, SortOrder expectedModifier, Integer desiredMaxLength) { + coerceBytes(ptr, null, actualType, null, null, actualModifier, desiredMaxLength, null, + expectedModifier); + } + + protected static boolean isNonNegativeDate(java.util.Date date) { + return (date == null || date.getTime() >= 0); + } + + // FIXME this is misnamed + protected static void throwIfNonNegativeDate(java.util.Date date) { + if (!isNonNegativeDate(date)) { + throw newIllegalDataException("Value may not be negative(" + date + ")"); + } + } + + protected static boolean isNonNegativeNumber(Number v) { + return v == null || v.longValue() >= 0; + } + + // FIXME this is misnamed + protected static void throwIfNonNegativeNumber(Number v) { + if (!isNonNegativeNumber(v)) { + throw newIllegalDataException("Value may not be negative(" + v + ")"); + } + } + + @Override + public boolean isNullable() { + return false; + } + + public abstract Integer getByteSize(); + + @Override + public int encodedLength(T val) { + // default implementation based on existing PDataType methods. + return getByteSize(); + } + + @Override + public int skip(PositionedByteRange pbr) { + // default implementation based on existing PDataType methods. + int len = getByteSize(); + pbr.setPosition(pbr.getPosition() + len); + return len; + } + + @Override + public boolean isOrderPreserving() { + return true; + } + + @Override + public boolean isSkippable() { + return true; + } + + @Override + public Order getOrder() { + return Order.ASCENDING; + } + + public abstract boolean isFixedWidth(); + + public abstract int compareTo(Object lhs, Object rhs, PDataType rhsType); + + @Override + public int compareTo(PDataType other) { + return Integer.compare(this.ordinal(), other.ordinal()); + } + + /** + * Convert from the object representation of a data type value into the serialized byte form. the + * object to convert the byte array into which to put the serialized form of object the offset + * from which to start writing the serialized form + * @return the byte length of the serialized object + */ + public abstract int toBytes(Object object, byte[] bytes, int offset); + + @Override + public int encode(PositionedByteRange pbr, T val) { + // default implementation based on existing PDataType methods. + int pos = pbr.getPosition(); + pbr.put(toBytes(val)); + return pbr.getPosition() - pos; + } + + @Override + public String toString() { + return sqlTypeName; + } + + public abstract byte[] toBytes(Object object); + + /** + * Convert from a string to the object representation of a given type a stringified value + * @return the object representation of a string value + */ + public abstract Object toObject(String value); + + /* + * Each enum must override this to define the set of object it may be coerced to + */ + public abstract Object toObject(Object object, PDataType actualType); + + /* + * Each enum must override this to define the set of objects it may create + */ + public abstract Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale); + + public abstract Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException; + + @SuppressWarnings("unchecked") + @Override + public T decode(PositionedByteRange pbr) { + // default implementation based on existing PDataType methods. + byte[] b = new byte[getByteSize()]; + pbr.get(b); + return (T) toObject(b, 0, b.length, this, SortOrder.ASC, getMaxLength(null), getScale(null)); + } + + /* + * Return a valid object of this enum type + */ + public abstract Object getSampleValue(Integer maxLength, Integer arrayLength); + + public final Object getSampleValue() { + return getSampleValue(null); + } + + public final Object getSampleValue(Integer maxLength) { + return getSampleValue(maxLength, null); + } + + public final Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder) { + return toObject(bytes, offset, length, actualType, sortOrder, null, null); + } + + public final Object toObject(byte[] bytes, int offset, int length, PDataType actualType) { + return toObject(bytes, offset, length, actualType, SortOrder.getDefault()); + } + + public final Object toObject(ImmutableBytesWritable ptr, PDataType actualType) { + return toObject(ptr, actualType, SortOrder.getDefault()); + } + + public final Object toObject(ImmutableBytesWritable ptr, PDataType actualType, + SortOrder sortOrder) { + return this.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), actualType, sortOrder); + } + + public final Object toObject(ImmutableBytesWritable ptr, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return this.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), actualType, sortOrder, + maxLength, scale); + } + + public final Object toObject(ImmutableBytesWritable ptr, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + return this.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), actualType, sortOrder, + maxLength, scale, jdbcType); + } + + public final Object toObject(ImmutableBytesWritable ptr, SortOrder sortOrder, Integer maxLength, + Integer scale) { + return this.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), this, sortOrder, maxLength, + scale); + } + + public final Object toObject(ImmutableBytesWritable ptr) { + return toObject(ptr.get(), ptr.getOffset(), ptr.getLength()); + } + + public final Object toObject(ImmutableBytesWritable ptr, SortOrder sortOrder) { + return toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), this, sortOrder); + } + + public final Object toObject(byte[] bytes, int offset, int length) { + return toObject(bytes, offset, length, this); + } + + public final Object toObject(byte[] bytes) { + return toObject(bytes, SortOrder.getDefault()); + } + + public final Object toObject(byte[] bytes, SortOrder sortOrder) { + return toObject(bytes, 0, bytes.length, this, sortOrder); + } + + public final Object toObject(byte[] bytes, SortOrder sortOrder, PDataType actualType) { + return toObject(bytes, 0, bytes.length, actualType, sortOrder); + } + + public static PDataType fromSqlTypeName(String sqlTypeName) { + for (PDataType t : PDataTypeFactory.getInstance().getTypes()) { + if (t.getSqlTypeName().equalsIgnoreCase(sqlTypeName)) return t; + } + throw newIllegalDataException("Unsupported sql type: " + sqlTypeName); + } + + public static int sqlArrayType(String sqlTypeName) { + PDataType fromSqlTypeName = fromSqlTypeName(sqlTypeName); + return fromSqlTypeName.getSqlType() + PDataType.ARRAY_TYPE_BASE; + } + + protected static interface PhoenixArrayFactory { + PhoenixArray newArray(PDataType type, Object[] elements); + } + + public static PDataType fromTypeId(int typeId) { + for (PDataType t : PDataTypeFactory.getInstance().getTypes()) { + if (t.getSqlType() == typeId) return t; + } + throw newIllegalDataException("Unsupported sql type: " + typeId); + } + + public String getJavaClassName() { + return getJavaClass().getName(); + } + + public byte[] getJavaClassNameBytes() { + return clazzNameBytes; + } + + public byte[] getSqlTypeNameBytes() { + return sqlTypeNameBytes; + } + + /** + * By default returns sqlType for the PDataType, however it allows unknown types (our unsigned + * types) to return the regular corresponding sqlType so that tools like SQuirrel correctly + * display values of this type. + * @return integer representing the SQL type for display of a result set of this type + */ + public int getResultSetSqlType() { + return this.sqlType; + } + + public KeyRange getKeyRange(byte[] point, SortOrder order) { + return getKeyRange(point, true, point, true, order); + } + + public final String toStringLiteral(ImmutableBytesWritable ptr, Format formatter) { + return toStringLiteral(ptr.get(), ptr.getOffset(), ptr.getLength(), formatter); + } + + public final String toStringLiteral(byte[] b, Format formatter) { + return toStringLiteral(b, 0, b.length, formatter); + } + + public String toStringLiteral(byte[] b, int offset, int length, Format formatter) { + Object o = toObject(b, offset, length); + return toStringLiteral(o, formatter); + } + + public String toStringLiteral(Object o, Format formatter) { + if (o == null) { + return String.valueOf(o); + } + if (formatter != null) { + return formatter.format(o); + } + return o.toString(); + } + + public String toStringLiteral(Object o) { + // use default formatter when one is unspecified + return toStringLiteral(o, null); + } + + private static final PhoenixArrayFactory DEFAULT_ARRAY_FACTORY = new PhoenixArrayFactory() { + @Override + public PhoenixArray newArray(PDataType type, Object[] elements) { + return new PhoenixArray(type, elements); } + }; - public byte[] getJavaClassNameBytes() { - return clazzNameBytes; - } + public PhoenixArrayFactory getArrayFactory() { + if (getCodec() != null) return getCodec().getPhoenixArrayFactory(); + else return DEFAULT_ARRAY_FACTORY; + } - public byte[] getSqlTypeNameBytes() { - return sqlTypeNameBytes; - } + public static PhoenixArray instantiatePhoenixArray(PDataType actualType, Object[] elements) { + return actualType.getArrayFactory().newArray(actualType, elements); + } - /** - * By default returns sqlType for the PDataType, however it allows unknown types (our unsigned types) to return the - * regular corresponding sqlType so that tools like SQuirrel correctly display values of this type. - * - * @return integer representing the SQL type for display of a result set of this type + public KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, + boolean upperInclusive, SortOrder order) { + /* + * Force lower bound to be inclusive for fixed width keys because it makes comparisons less + * expensive when you can count on one bound or the other being inclusive. Comparing two fixed + * width exclusive bounds against each other is inherently more expensive, because you need to + * take into account if the bigger key is equal to the next key after the smaller key. For + * example: (A-B] compared against [A-B) An exclusive lower bound A is bigger than an exclusive + * upper bound B. Forcing a fixed width exclusive lower bound key to be inclusive prevents us + * from having to do this extra logic in the compare function. */ - public int getResultSetSqlType() { - return this.sqlType; - } - - public KeyRange getKeyRange(byte[] point, SortOrder order) { - return getKeyRange(point, true, point, true, order); - } - - public final String toStringLiteral(ImmutableBytesWritable ptr, Format formatter) { - return toStringLiteral(ptr.get(), ptr.getOffset(), ptr.getLength(), formatter); - } - - public final String toStringLiteral(byte[] b, Format formatter) { - return toStringLiteral(b, 0, b.length, formatter); - } - - public String toStringLiteral(byte[] b, int offset, int length, Format formatter) { - Object o = toObject(b, offset, length); - return toStringLiteral(o, formatter); - } - - public String toStringLiteral(Object o, Format formatter) { - if (o == null) { - return String.valueOf(o); - } - if (formatter != null) { - return formatter.format(o); - } - return o.toString(); - } - - public String toStringLiteral(Object o) { - // use default formatter when one is unspecified - return toStringLiteral(o, null); - } - - private static final PhoenixArrayFactory DEFAULT_ARRAY_FACTORY = new PhoenixArrayFactory() { - @Override - public PhoenixArray newArray(PDataType type, Object[] elements) { - return new PhoenixArray(type, elements); + if (lowerRange != KeyRange.UNBOUND && !lowerInclusive && isFixedWidth()) { + lowerRange = ByteUtil.nextKey(lowerRange); + if (lowerRange == null) { // overflow + lowerRange = KeyRange.UNBOUND; + } + lowerInclusive = true; + } + return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive, + order == SortOrder.DESC); + } + + // TODO this could be improved by some lookup tables instead of iterating over all types + public static PDataType fromLiteral(Object value) { + if (value == null) { + return null; + } + for (PDataType type : PDataType.values()) { + if (type.isArrayType()) { + if (type.getJavaClass().isInstance(value)) { + if (type.isArrayType()) { + PhoenixArray arr = (PhoenixArray) value; + if ((type.getSqlType() == arr.baseType.sqlType + PDataType.ARRAY_TYPE_BASE)) { + return type; + } + } else { + return type; + } } - }; - - public PhoenixArrayFactory getArrayFactory() { - if (getCodec() != null) - return getCodec().getPhoenixArrayFactory(); - else - return DEFAULT_ARRAY_FACTORY; - } - - public static PhoenixArray instantiatePhoenixArray(PDataType actualType, Object[] elements) { - return actualType.getArrayFactory().newArray(actualType, elements); - } - public KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, - boolean upperInclusive, SortOrder order) { - /* - * Force lower bound to be inclusive for fixed width keys because it makes comparisons less - * expensive when you can count on one bound or the other being inclusive. Comparing two - * fixed width exclusive bounds against each other is inherently more expensive, because you - * need to take into account if the bigger key is equal to the next key after the smaller - * key. For example: (A-B] compared against [A-B) An exclusive lower bound A is bigger than - * an exclusive upper bound B. Forcing a fixed width exclusive lower bound key to be - * inclusive prevents us from having to do this extra logic in the compare function. - */ - if (lowerRange != KeyRange.UNBOUND && !lowerInclusive && isFixedWidth()) { - lowerRange = ByteUtil.nextKey(lowerRange); - if (lowerRange == null) { // overflow - lowerRange = KeyRange.UNBOUND; + if (value instanceof PhoenixArray) { + PhoenixArray arr = (PhoenixArray) value; + if ( + (type.getSqlType() == arr.baseType.sqlType + PDataType.ARRAY_TYPE_BASE) + && type.getJavaClass().isInstance(value) + ) { + return type; + } + } else if (value instanceof Array) { + Array arr = (Array) value; + try { + // Does the array's component type make sense for what we were told it is + if (arr.getBaseType() == type.getSqlType() - PDataType.ARRAY_TYPE_BASE) { + return type; } - lowerInclusive = true; + } catch (SQLException e) { + /* Passthrough to fail */ } } - return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive, - order == SortOrder.DESC); - } - - //TODO this could be improved by some lookup tables instead of iterating over all types - public static PDataType fromLiteral(Object value) { - if (value == null) { - return null; + } else if (value instanceof BsonDocument) { + if (type == PBson.INSTANCE) { + return type; } - for (PDataType type : PDataType.values()) { - if (type.isArrayType()) { - if(type.getJavaClass().isInstance(value)){ - if (type.isArrayType()) { - PhoenixArray arr = (PhoenixArray) value; - if ((type.getSqlType() == arr.baseType.sqlType - + PDataType.ARRAY_TYPE_BASE)) { - return type; - } - } else { - return type; - } - } - - if (value instanceof PhoenixArray) { - PhoenixArray arr = (PhoenixArray)value; - if ((type.getSqlType() == arr.baseType.sqlType + PDataType.ARRAY_TYPE_BASE) - && type.getJavaClass().isInstance(value)) { - return type; - } - } else if (value instanceof Array) { - Array arr = (Array) value; - try { - // Does the array's component type make sense for what we were told it is - if (arr.getBaseType() == type.getSqlType() - PDataType.ARRAY_TYPE_BASE) { - return type; - } - } catch (SQLException e) { /* Passthrough to fail */ } - } - } else if (value instanceof BsonDocument) { - if (type == PBson.INSTANCE) { - return type; - } - } else { - if (type.getJavaClass().isInstance(value)) { - return type; - } - } + } else { + if (type.getJavaClass().isInstance(value)) { + return type; } - throw new UnsupportedOperationException("Unsupported literal value [" + value + "] of type " - + value.getClass().getName()); + } } + throw new UnsupportedOperationException( + "Unsupported literal value [" + value + "] of type " + value.getClass().getName()); + } - public int getNanos(ImmutableBytesWritable ptr, SortOrder sortOrder) { - throw new UnsupportedOperationException("Operation not supported for type " + this); - } + public int getNanos(ImmutableBytesWritable ptr, SortOrder sortOrder) { + throw new UnsupportedOperationException("Operation not supported for type " + this); + } - public long getMillis(ImmutableBytesWritable ptr, SortOrder sortOrder) { - throw new UnsupportedOperationException("Operation not supported for type " + this); - } + public long getMillis(ImmutableBytesWritable ptr, SortOrder sortOrder) { + throw new UnsupportedOperationException("Operation not supported for type " + this); + } - public Object pad(Object object, Integer maxLength) { - return object; - } + public Object pad(Object object, Integer maxLength) { + return object; + } - public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) {} - public byte[] pad(byte[] b, Integer maxLength, SortOrder sortOrder) { return b; } + public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) { + } - public static PDataType arrayBaseType(PDataType arrayType) { - Preconditions.checkArgument(arrayType.isArrayType(), "Not a phoenix array type"); - return fromTypeId(arrayType.getSqlType() - ARRAY_TYPE_BASE); - } + public byte[] pad(byte[] b, Integer maxLength, SortOrder sortOrder) { + return b; + } + + public static PDataType arrayBaseType(PDataType arrayType) { + Preconditions.checkArgument(arrayType.isArrayType(), "Not a phoenix array type"); + return fromTypeId(arrayType.getSqlType() - ARRAY_TYPE_BASE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDataTypeFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDataTypeFactory.java index 89fb7ce7205..648426bd90c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDataTypeFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDataTypeFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -50,13 +50,13 @@ private PDataTypeFactory() { public int compare(PDataType o1, PDataType o2) { return Integer.compare(o1.ordinal(), o2.ordinal()); } - }); // TODO: replace with ServiceLoader or some other plugin system + }); // TODO: replace with ServiceLoader or some other plugin system unsignedtypes = new TreeSet<>(new Comparator() { - @Override - public int compare(PDataType o1, PDataType o2) { - return Integer.compare(o1.ordinal(), o2.ordinal()); - } - }); // TODO: replace with ServiceLoader or some other plugin system + @Override + public int compare(PDataType o1, PDataType o2) { + return Integer.compare(o1.ordinal(), o2.ordinal()); + } + }); // TODO: replace with ServiceLoader or some other plugin system types.add(PBinary.INSTANCE); types.add(PBinaryArray.INSTANCE); types.add(PChar.INSTANCE); @@ -133,15 +133,15 @@ public int compare(PDataType o1, PDataType o2) { } javaClassToInstance = new HashMap<>(types.size()); for (PDataType t : types) { - Class javaClass = t.getJavaClass(); - // The first match - javaClassToInstance.putIfAbsent(javaClass, t); + Class javaClass = t.getJavaClass(); + // The first match + javaClassToInstance.putIfAbsent(javaClass, t); } javaClassToUnsignedInstance = new HashMap<>(types.size()); for (PDataType t : unsignedtypes) { - Class javaClass = t.getJavaClass(); - // The first match - javaClassToInstance.putIfAbsent(javaClass, t); + Class javaClass = t.getJavaClass(); + // The first match + javaClassToInstance.putIfAbsent(javaClass, t); } orderedTypes = types.toArray(new PDataType[types.size()]); } @@ -159,10 +159,10 @@ public PDataType instanceFromClass(Class clazz) { } public PDataType instanceFromJavaClass(Class clazz, PDataType actualType) { - if (unsignedtypes.contains(actualType)) { - return javaClassToUnsignedInstance.get(clazz); - } else { - return javaClassToInstance.get(clazz); - } + if (unsignedtypes.contains(actualType)) { + return javaClassToUnsignedInstance.get(clazz); + } else { + return javaClassToInstance.get(clazz); + } } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDate.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDate.java index c98a8eb4af3..e06b05e9b78 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDate.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDate.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,216 +32,220 @@ public class PDate extends PDataType { - public static final PDate INSTANCE = new PDate(); + public static final PDate INSTANCE = new PDate(); + + private PDate() { + super("DATE", Types.DATE, Date.class, new DateCodec(), 11); // After TIMESTAMP and DATE to + // ensure toLiteral finds those + // first + } + + @Override + public byte[] toBytes(Object object) { + byte[] bytes = new byte[getByteSize()]; + toBytes(object, bytes, 0); + return bytes; + } + + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + getCodec().encodeLong(0l, bytes, offset); + return this.getByteSize(); + } + getCodec().encodeLong(((java.util.Date) object).getTime(), bytes, offset); + return this.getByteSize(); + } + + @Override + public Object toObject(Object object, PDataType actualType) { + if (object == null) { + return null; + } + if (equalsAny(actualType, PTime.INSTANCE, PUnsignedTime.INSTANCE)) { + return new Date(((java.sql.Time) object).getTime()); + } else if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { + return new Date(((java.sql.Timestamp) object).getTime()); + } else if (equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE)) { + return object; + } else if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { + return new Date((Long) object); + } else if (actualType == PDecimal.INSTANCE) { + return new Date(((BigDecimal) object).longValueExact()); + } else if (actualType == PVarchar.INSTANCE) { + return DateUtil.parseDate((String) object); + } + return throwConstraintViolationException(actualType, this); + } + + @Override + public Date toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, + Integer maxLength, Integer scale) { + if (l == 0) { + return null; + } + if (actualType.getCodec() != null) { + return new Date(actualType.getCodec().decodeLong(b, o, sortOrder)); + } else if (actualType == PTimestamp.INSTANCE) { + return new Date(PDate.INSTANCE.getCodec().decodeLong(b, o, sortOrder)); + } else if (actualType == PUnsignedTimestamp.INSTANCE) { + return new Date(PUnsignedDate.INSTANCE.getCodec().decodeLong(b, o, sortOrder)); + } else if (actualType == PDecimal.INSTANCE) { + BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); + return new Date(bd.longValueExact()); + } + throwConstraintViolationException(actualType, this); + return null; + } + + // Keep this in sync with PUnsignedDate + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + java.sql.Date sqlDate = + toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); + return dateToClass(sqlDate, actualType, jdbcType); + } + + Object dateToClass(java.sql.Date sqlDate, PDataType actualType, Class jdbcType) + throws SQLException { + // FIXME java.time.Local conversions use ISO chronology, unlike the rest of Phoenix. + if (jdbcType == java.time.LocalDate.class) { + // FIXME this does a lot of unnecessary computation. + return java.time.LocalDateTime + .ofInstant(java.time.Instant.ofEpochMilli(sqlDate.getTime()), ZoneOffset.UTC).toLocalDate(); + } else if (jdbcType == java.time.LocalDateTime.class) { + // This is NOT JDBC compliant, but is useful because Dates are really Timestamps. + // We cannot use toInstant(), as that nulls the time fields. + return java.time.LocalDateTime.ofInstant(java.time.Instant.ofEpochMilli(sqlDate.getTime()), + ZoneOffset.UTC); + } else if (jdbcType == java.time.LocalTime.class) { + // This is NOT JDBC compliant, but is useful because Dates are really Timestamps. + return java.time.LocalDateTime + .ofInstant(java.time.Instant.ofEpochMilli(sqlDate.getTime()), ZoneOffset.UTC).toLocalTime(); + } else if (jdbcType == java.sql.Date.class) { + return sqlDate; + } else if (jdbcType == java.sql.Time.class) { + return new java.sql.Time(sqlDate.getTime()); + } else if (jdbcType == java.sql.Timestamp.class) { + return new java.sql.Timestamp(sqlDate.getTime()); + } else if (jdbcType == java.util.Date.class) { + return new java.util.Date(sqlDate.getTime()); + } + throw newMismatchException(actualType, jdbcType); + } + + @Override + public boolean isCastableTo(PDataType targetType) { + return super.isCastableTo(targetType) + || equalsAny(targetType, PDecimal.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE); + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return equalsAny(targetType, PDate.INSTANCE, PTime.INSTANCE, PTimestamp.INSTANCE, + PVarbinary.INSTANCE, PBinary.INSTANCE); + } + + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value != null) { + if ( + equalsAny(targetType, PUnsignedTimestamp.INSTANCE, PUnsignedDate.INSTANCE, + PUnsignedTime.INSTANCE) + ) { + return ((java.util.Date) value).getTime() >= 0; + } + } + return super.isCoercibleTo(targetType, value); + } + + @Override + public boolean isFixedWidth() { + return true; + } + + @Override + public Integer getByteSize() { + return Bytes.SIZEOF_LONG; + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; + } + if (rhsType == PTimestamp.INSTANCE || rhsType == PUnsignedTimestamp.INSTANCE) { + return -rhsType.compareTo(rhs, lhs, PTime.INSTANCE); + } + return ((java.util.Date) lhs).compareTo((java.util.Date) rhs); + } + + @Override + public Object toObject(String value) { + if (value == null || value.length() == 0) { + return null; + } + return DateUtil.parseDate(value); + } + + @Override + public boolean isBytesComparableWith(PDataType otherType) { + return super.isBytesComparableWith(otherType) || otherType == PTime.INSTANCE + || otherType == PTimestamp.INSTANCE || otherType == PLong.INSTANCE; + } + + @Override + public String toStringLiteral(Object o, Format formatter) { + if (formatter == null) { + // If default formatter has not been overridden, + // use default one. + formatter = DateUtil.DEFAULT_DATE_FORMATTER; + } + return null == o + ? String.valueOf(o) + : "'" + StringUtil.escapeStringConstant(super.toStringLiteral(o, formatter)) + "'"; + } + + @Override + public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, + Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, + Integer desiredScale, SortOrder expectedModifier) { + // Decrease size of TIMESTAMP to size of DATE and continue coerce + if (ptr.getLength() > getByteSize()) { + ptr.set(ptr.get(), ptr.getOffset(), getByteSize()); + } + super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, + desiredScale, expectedModifier); + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return new Date((Long) PLong.INSTANCE.getSampleValue(maxLength, arrayLength)); + } + + static class DateCodec extends PLong.LongCodec { - private PDate() { - super("DATE", Types.DATE, Date.class, - new DateCodec(), 11); // After TIMESTAMP and DATE to ensure toLiteral finds those first - } - - @Override - public byte[] toBytes(Object object) { - byte[] bytes = new byte[getByteSize()]; - toBytes(object, bytes, 0); - return bytes; - } - - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - getCodec().encodeLong(0l, bytes, offset); - return this.getByteSize(); - } - getCodec().encodeLong(((java.util.Date) object).getTime(), bytes, offset); - return this.getByteSize(); - } - - @Override - public Object toObject(Object object, PDataType actualType) { - if (object == null) { - return null; - } - if (equalsAny(actualType, PTime.INSTANCE, PUnsignedTime.INSTANCE)) { - return new Date(((java.sql.Time) object).getTime()); - } else if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { - return new Date(((java.sql.Timestamp) object).getTime()); - } else if (equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE)) { - return object; - } else if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { - return new Date((Long) object); - } else if (actualType == PDecimal.INSTANCE) { - return new Date(((BigDecimal) object).longValueExact()); - } else if (actualType == PVarchar.INSTANCE) { - return DateUtil.parseDate((String) object); - } - return throwConstraintViolationException(actualType, this); - } - - @Override - public Date toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, Integer maxLength, Integer scale) { - if (l == 0) { - return null; - } - if (actualType.getCodec() != null ) { - return new Date(actualType.getCodec().decodeLong(b, o, sortOrder)); - } else if (actualType == PTimestamp.INSTANCE) { - return new Date(PDate.INSTANCE.getCodec().decodeLong(b, o, sortOrder)); - } else if (actualType == PUnsignedTimestamp.INSTANCE) { - return new Date(PUnsignedDate.INSTANCE.getCodec().decodeLong(b, o, sortOrder)); - } else if (actualType == PDecimal.INSTANCE) { - BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); - return new Date(bd.longValueExact()); - } - throwConstraintViolationException(actualType, this); - return null; - } - - // Keep this in sync with PUnsignedDate - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - java.sql.Date sqlDate = - toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - return dateToClass(sqlDate, actualType, jdbcType); - } - - Object dateToClass(java.sql.Date sqlDate, PDataType actualType, Class jdbcType) - throws SQLException { - // FIXME java.time.Local conversions use ISO chronology, unlike the rest of Phoenix. - if (jdbcType == java.time.LocalDate.class) { - // FIXME this does a lot of unnecessary computation. - return java.time.LocalDateTime - .ofInstant(java.time.Instant.ofEpochMilli(sqlDate.getTime()), ZoneOffset.UTC) - .toLocalDate(); - } else if (jdbcType == java.time.LocalDateTime.class) { - // This is NOT JDBC compliant, but is useful because Dates are really Timestamps. - // We cannot use toInstant(), as that nulls the time fields. - return java.time.LocalDateTime - .ofInstant(java.time.Instant.ofEpochMilli(sqlDate.getTime()), ZoneOffset.UTC); - } else if (jdbcType == java.time.LocalTime.class) { - // This is NOT JDBC compliant, but is useful because Dates are really Timestamps. - return java.time.LocalDateTime - .ofInstant(java.time.Instant.ofEpochMilli(sqlDate.getTime()), ZoneOffset.UTC) - .toLocalTime(); - } else if (jdbcType == java.sql.Date.class) { - return sqlDate; - } else if (jdbcType == java.sql.Time.class) { - return new java.sql.Time(sqlDate.getTime()); - } else if (jdbcType == java.sql.Timestamp.class) { - return new java.sql.Timestamp(sqlDate.getTime()); - } else if (jdbcType == java.util.Date.class) { - return new java.util.Date(sqlDate.getTime()); - } - throw newMismatchException(actualType, jdbcType); - } - - @Override - public boolean isCastableTo(PDataType targetType) { - return super.isCastableTo(targetType) || - equalsAny(targetType, PDecimal.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE); - } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return equalsAny(targetType, PDate.INSTANCE, PTime.INSTANCE, PTimestamp.INSTANCE, PVarbinary.INSTANCE, PBinary.INSTANCE); - } - - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value != null) { - if (equalsAny(targetType, PUnsignedTimestamp.INSTANCE, PUnsignedDate.INSTANCE, - PUnsignedTime.INSTANCE)) { - return ((java.util.Date) value).getTime() >= 0; - } - } - return super.isCoercibleTo(targetType, value); - } - - @Override - public boolean isFixedWidth() { - return true; - } - - @Override - public Integer getByteSize() { - return Bytes.SIZEOF_LONG; - } - - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; - } - if (rhsType == PTimestamp.INSTANCE || rhsType == PUnsignedTimestamp.INSTANCE) { - return -rhsType.compareTo(rhs, lhs, PTime.INSTANCE); - } - return ((java.util.Date) lhs).compareTo((java.util.Date) rhs); - } - - @Override - public Object toObject(String value) { - if (value == null || value.length() == 0) { - return null; - } - return DateUtil.parseDate(value); - } - - @Override - public boolean isBytesComparableWith(PDataType otherType) { - return super.isBytesComparableWith(otherType) || otherType == PTime.INSTANCE || otherType == PTimestamp.INSTANCE || otherType == PLong.INSTANCE; - } - - @Override - public String toStringLiteral(Object o, Format formatter) { - if (formatter == null) { - // If default formatter has not been overridden, - // use default one. - formatter = DateUtil.DEFAULT_DATE_FORMATTER; - } - return null == o ? String.valueOf(o) : "'" - + StringUtil.escapeStringConstant(super.toStringLiteral(o, formatter)) + "'"; - } - - @Override - public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, - Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale, - SortOrder expectedModifier) { - // Decrease size of TIMESTAMP to size of DATE and continue coerce - if (ptr.getLength() > getByteSize()) { - ptr.set(ptr.get(), ptr.getOffset(), getByteSize()); - } - super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, - desiredScale, expectedModifier); - } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return new Date((Long) PLong.INSTANCE.getSampleValue(maxLength, arrayLength)); - } - - static class DateCodec extends PLong.LongCodec { - - @Override - public int decodeInt(byte[] b, int o, SortOrder sortOrder) { - throw new UnsupportedOperationException(); - } + @Override + public int decodeInt(byte[] b, int o, SortOrder sortOrder) { + throw new UnsupportedOperationException(); + } + + @Override + public PhoenixArrayFactory getPhoenixArrayFactory() { + return new PhoenixArrayFactory() { @Override - public PhoenixArrayFactory getPhoenixArrayFactory() { - return new PhoenixArrayFactory() { - - @Override - public PhoenixArray newArray(PDataType type, Object[] elements) { - return new PhoenixArray(type, elements); - } - }; + public PhoenixArray newArray(PDataType type, Object[] elements) { + return new PhoenixArray(type, elements); } + }; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDateArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDateArray.java index a07418caa6a..c5b38031812 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDateArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDateArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,72 +23,72 @@ public class PDateArray extends PArrayDataType { - public static final PDateArray INSTANCE = new PDateArray(); + public static final PDateArray INSTANCE = new PDateArray(); - private PDateArray() { - super("DATE ARRAY", PDataType.ARRAY_TYPE_BASE + PDate.INSTANCE.getSqlType(), PhoenixArray.class, - null, 40); - } + private PDateArray() { + super("DATE ARRAY", PDataType.ARRAY_TYPE_BASE + PDate.INSTANCE.getSqlType(), PhoenixArray.class, + null, 40); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PDate.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PDate.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, Integer scale) { - return toObject(bytes, offset, length, PDate.INSTANCE, sortOrder, maxLength, scale, - PDate.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PDate.INSTANCE, sortOrder, maxLength, scale, + PDate.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray pArr = (PhoenixArray) value; - Object[] dateArr = (Object[]) pArr.array; - for (Object i : dateArr) { - if (!super.isCoercibleTo(PDate.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PDate.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] dateArr = (Object[]) pArr.array; + for (Object i : dateArr) { + if (!super.isCoercibleTo(PDate.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PDate.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDecimal.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDecimal.java index a0480b5ae82..469b6fbf0bc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDecimal.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDecimal.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,426 +27,430 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.TypeMismatchException; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.NumberUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - public class PDecimal extends PRealNumber { - public static final PDecimal INSTANCE = new PDecimal(); + public static final PDecimal INSTANCE = new PDecimal(); - private static final BigDecimal MIN_DOUBLE_AS_BIG_DECIMAL = - BigDecimal.valueOf(-Double.MAX_VALUE); - private static final BigDecimal MAX_DOUBLE_AS_BIG_DECIMAL = - BigDecimal.valueOf(Double.MAX_VALUE); - private static final BigDecimal MIN_FLOAT_AS_BIG_DECIMAL = - BigDecimal.valueOf(-Float.MAX_VALUE); - private static final BigDecimal MAX_FLOAT_AS_BIG_DECIMAL = - BigDecimal.valueOf(Float.MAX_VALUE); + private static final BigDecimal MIN_DOUBLE_AS_BIG_DECIMAL = BigDecimal.valueOf(-Double.MAX_VALUE); + private static final BigDecimal MAX_DOUBLE_AS_BIG_DECIMAL = BigDecimal.valueOf(Double.MAX_VALUE); + private static final BigDecimal MIN_FLOAT_AS_BIG_DECIMAL = BigDecimal.valueOf(-Float.MAX_VALUE); + private static final BigDecimal MAX_FLOAT_AS_BIG_DECIMAL = BigDecimal.valueOf(Float.MAX_VALUE); - private PDecimal() { - super("DECIMAL", Types.DECIMAL, BigDecimal.class, null, 8); - } + private PDecimal() { + super("DECIMAL", Types.DECIMAL, BigDecimal.class, null, 8); + } - @Override - public byte[] toBytes(Object object) { - if (object == null) { - return ByteUtil.EMPTY_BYTE_ARRAY; - } - BigDecimal v = (BigDecimal) object; - v = NumberUtil.normalize(v); - int len = getLength(v); - byte[] result = new byte[Math.min(len, MAX_BIG_DECIMAL_BYTES)]; - PDataType.toBytes(v, result, 0, len); - return result; + @Override + public byte[] toBytes(Object object) { + if (object == null) { + return ByteUtil.EMPTY_BYTE_ARRAY; } + BigDecimal v = (BigDecimal) object; + v = NumberUtil.normalize(v); + int len = getLength(v); + byte[] result = new byte[Math.min(len, MAX_BIG_DECIMAL_BYTES)]; + PDataType.toBytes(v, result, 0, len); + return result; + } - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - return 0; - } - BigDecimal v = (BigDecimal) object; - v = NumberUtil.normalize(v); - int len = getLength(v); - return PDataType.toBytes(v, bytes, offset, len); + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + return 0; } + BigDecimal v = (BigDecimal) object; + v = NumberUtil.normalize(v); + int len = getLength(v); + return PDataType.toBytes(v, bytes, offset, len); + } - private int getLength(BigDecimal v) { - int signum = v.signum(); - if (signum == 0) { // Special case for zero - return 1; - } - /* - * Size of DECIMAL includes: - * 1) one byte for exponent - * 2) one byte for terminal byte if negative - * 3) one byte for every two digits with the following caveats: - * a) add one to round up in the case when there is an odd number of digits - * b) add one in the case that the scale is odd to account for 10x of lowest significant digit - * (basically done to increase the range of exponents that can be represented) - */ - return (signum < 0 ? 2 : 1) + (v.precision() + 1 + (v.scale() % 2 == 0 ? 0 : 1)) / 2; + private int getLength(BigDecimal v) { + int signum = v.signum(); + if (signum == 0) { // Special case for zero + return 1; } + /* + * Size of DECIMAL includes: 1) one byte for exponent 2) one byte for terminal byte if negative + * 3) one byte for every two digits with the following caveats: a) add one to round up in the + * case when there is an odd number of digits b) add one in the case that the scale is odd to + * account for 10x of lowest significant digit (basically done to increase the range of + * exponents that can be represented) + */ + return (signum < 0 ? 2 : 1) + (v.precision() + 1 + (v.scale() % 2 == 0 ? 0 : 1)) / 2; + } - @Override - public int estimateByteSize(Object o) { - if (o == null) { - return 1; - } - BigDecimal v = (BigDecimal) o; - // TODO: should we strip zeros and round here too? - return Math.min(getLength(v), MAX_BIG_DECIMAL_BYTES); + @Override + public int estimateByteSize(Object o) { + if (o == null) { + return 1; } + BigDecimal v = (BigDecimal) o; + // TODO: should we strip zeros and round here too? + return Math.min(getLength(v), MAX_BIG_DECIMAL_BYTES); + } - @Override - public Integer getMaxLength(Object o) { - if (o == null) { - return MAX_PRECISION; - } - BigDecimal v = (BigDecimal) o; - return v.precision(); + @Override + public Integer getMaxLength(Object o) { + if (o == null) { + return MAX_PRECISION; } + BigDecimal v = (BigDecimal) o; + return v.precision(); + } - @Override - public Integer getScale(Object o) { - return null; - } + @Override + public Integer getScale(Object o) { + return null; + } - @Override - public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - Preconditions.checkNotNull(sortOrder); - if (l == 0) { - return null; - } - if (actualType == PDecimal.INSTANCE) { - if (sortOrder == SortOrder.DESC) { - b = SortOrder.invert(b, o, new byte[l], 0, l); - o = 0; - } - return toBigDecimal(b, o, l); - } else if (equalsAny(actualType, PDate.INSTANCE, PTime.INSTANCE, PUnsignedDate.INSTANCE, - PUnsignedTime.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, - PUnsignedInt.INSTANCE, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, - PUnsignedTinyint.INSTANCE)) { - return BigDecimal.valueOf(actualType.getCodec().decodeLong(b, o, sortOrder)); - } else if (equalsAny(actualType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE)) { - return BigDecimal.valueOf(actualType.getCodec().decodeFloat(b, o, sortOrder)); - } else if (equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE)) { - return BigDecimal.valueOf(actualType.getCodec().decodeDouble(b, o, sortOrder)); - } else if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { - long millisPart = DateUtil.getCodecFor(actualType).decodeLong(b, o, sortOrder); - int nanoPart = PUnsignedInt.INSTANCE.getCodec().decodeInt(b, o + Bytes.SIZEOF_LONG, sortOrder); - BigDecimal nanosPart = BigDecimal.valueOf( - (nanoPart % QueryConstants.MILLIS_TO_NANOS_CONVERTOR) - / QueryConstants.MILLIS_TO_NANOS_CONVERTOR); - return BigDecimal.valueOf(millisPart).add(nanosPart); - } else if (actualType == PBoolean.INSTANCE) { - return (Boolean) PBoolean.INSTANCE.toObject(b, o, l, actualType, sortOrder) ? - BigDecimal.ONE : - BigDecimal.ZERO; - } - return throwConstraintViolationException(actualType, this); + @Override + public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, + Integer maxLength, Integer scale) { + Preconditions.checkNotNull(sortOrder); + if (l == 0) { + return null; } - - @Override - public Object toObject(Object object, PDataType actualType) { - if (object == null) { - return null; - } - if (equalsAny(actualType, PInteger.INSTANCE, PUnsignedInt.INSTANCE)) { - return BigDecimal.valueOf((Integer) object); - } else if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { - return BigDecimal.valueOf((Long) object); - } else if (equalsAny(actualType, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE)) { - return BigDecimal.valueOf((Short) object); - } else if (equalsAny(actualType, PTinyint.INSTANCE, PUnsignedTinyint.INSTANCE)) { - return BigDecimal.valueOf((Byte) object); - } else if (equalsAny(actualType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE)) { - return BigDecimal.valueOf((Float) object); - } else if (equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE)) { - return BigDecimal.valueOf((Double) object); - } else if (actualType == PDecimal.INSTANCE) { - return object; - } else if (equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, - PUnsignedTime.INSTANCE)) { - java.util.Date d = (java.util.Date) object; - return BigDecimal.valueOf(d.getTime()); - } else if (equalsAny(actualType, PTimestamp.INSTANCE, - PUnsignedTimestamp.INSTANCE)) { - Timestamp ts = (Timestamp) object; - long millisPart = ts.getTime(); - BigDecimal nanosPart = BigDecimal.valueOf( - (ts.getNanos() % QueryConstants.MILLIS_TO_NANOS_CONVERTOR) - / QueryConstants.MILLIS_TO_NANOS_CONVERTOR); - BigDecimal value = BigDecimal.valueOf(millisPart).add(nanosPart); - return value; - } else if (actualType == PBoolean.INSTANCE) { - return ((Boolean) object) ? BigDecimal.ONE : BigDecimal.ZERO; - } - return throwConstraintViolationException(actualType, this); + if (actualType == PDecimal.INSTANCE) { + if (sortOrder == SortOrder.DESC) { + b = SortOrder.invert(b, o, new byte[l], 0, l); + o = 0; + } + return toBigDecimal(b, o, l); + } else if ( + equalsAny(actualType, PDate.INSTANCE, PTime.INSTANCE, PUnsignedDate.INSTANCE, + PUnsignedTime.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, + PUnsignedInt.INSTANCE, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, + PUnsignedTinyint.INSTANCE) + ) { + return BigDecimal.valueOf(actualType.getCodec().decodeLong(b, o, sortOrder)); + } else if (equalsAny(actualType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE)) { + return BigDecimal.valueOf(actualType.getCodec().decodeFloat(b, o, sortOrder)); + } else if (equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE)) { + return BigDecimal.valueOf(actualType.getCodec().decodeDouble(b, o, sortOrder)); + } else if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { + long millisPart = DateUtil.getCodecFor(actualType).decodeLong(b, o, sortOrder); + int nanoPart = + PUnsignedInt.INSTANCE.getCodec().decodeInt(b, o + Bytes.SIZEOF_LONG, sortOrder); + BigDecimal nanosPart = + BigDecimal.valueOf((nanoPart % QueryConstants.MILLIS_TO_NANOS_CONVERTOR) + / QueryConstants.MILLIS_TO_NANOS_CONVERTOR); + return BigDecimal.valueOf(millisPart).add(nanosPart); + } else if (actualType == PBoolean.INSTANCE) { + return (Boolean) PBoolean.INSTANCE.toObject(b, o, l, actualType, sortOrder) + ? BigDecimal.ONE + : BigDecimal.ZERO; } + return throwConstraintViolationException(actualType, this); + } - @Override - public boolean isFixedWidth() { - return false; + @Override + public Object toObject(Object object, PDataType actualType) { + if (object == null) { + return null; } - - @Override - public Integer getByteSize() { - return MAX_BIG_DECIMAL_BYTES; + if (equalsAny(actualType, PInteger.INSTANCE, PUnsignedInt.INSTANCE)) { + return BigDecimal.valueOf((Integer) object); + } else if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { + return BigDecimal.valueOf((Long) object); + } else if (equalsAny(actualType, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE)) { + return BigDecimal.valueOf((Short) object); + } else if (equalsAny(actualType, PTinyint.INSTANCE, PUnsignedTinyint.INSTANCE)) { + return BigDecimal.valueOf((Byte) object); + } else if (equalsAny(actualType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE)) { + return BigDecimal.valueOf((Float) object); + } else if (equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE)) { + return BigDecimal.valueOf((Double) object); + } else if (actualType == PDecimal.INSTANCE) { + return object; + } else if ( + equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, + PUnsignedTime.INSTANCE) + ) { + java.util.Date d = (java.util.Date) object; + return BigDecimal.valueOf(d.getTime()); + } else if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { + Timestamp ts = (Timestamp) object; + long millisPart = ts.getTime(); + BigDecimal nanosPart = + BigDecimal.valueOf((ts.getNanos() % QueryConstants.MILLIS_TO_NANOS_CONVERTOR) + / QueryConstants.MILLIS_TO_NANOS_CONVERTOR); + BigDecimal value = BigDecimal.valueOf(millisPart).add(nanosPart); + return value; + } else if (actualType == PBoolean.INSTANCE) { + return ((Boolean) object) ? BigDecimal.ONE : BigDecimal.ZERO; } + return throwConstraintViolationException(actualType, this); + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; - } - if (rhsType == PDecimal.INSTANCE) { - return ((BigDecimal) lhs).compareTo((BigDecimal) rhs); - } - return -rhsType.compareTo(rhs, lhs, this); - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public boolean isCastableTo(PDataType targetType) { - return super.isCastableTo(targetType) || targetType.isCoercibleTo( - PTimestamp.INSTANCE) || targetType.equals(PBoolean.INSTANCE); - } + @Override + public Integer getByteSize() { + return MAX_BIG_DECIMAL_BYTES; + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value != null) { - BigDecimal bd; - if (equalsAny(targetType, PUnsignedLong.INSTANCE, PUnsignedInt.INSTANCE, - PUnsignedSmallint.INSTANCE, PUnsignedTinyint.INSTANCE)) { - bd = (BigDecimal) value; - if (bd.signum() == -1) { - return false; - } - } else if (targetType.equals(PLong.INSTANCE)) { - bd = (BigDecimal) value; - try { - bd.longValueExact(); - return true; - } catch (ArithmeticException e) { - return false; - } - } else if (targetType.equals(PInteger.INSTANCE)) { - bd = (BigDecimal) value; - try { - bd.intValueExact(); - return true; - } catch (ArithmeticException e) { - return false; - } - } else if (targetType.equals(PSmallint.INSTANCE)) { - bd = (BigDecimal) value; - try { - bd.shortValueExact(); - return true; - } catch (ArithmeticException e) { - return false; - } - } else if (targetType.equals(PTinyint.INSTANCE)) { - bd = (BigDecimal) value; - try { - bd.byteValueExact(); - return true; - } catch (ArithmeticException e) { - return false; - } - } else if (targetType.equals(PUnsignedFloat.INSTANCE)) { - bd = (BigDecimal) value; - try { - BigDecimal maxFloat = MAX_FLOAT_AS_BIG_DECIMAL; - boolean isNegtive = (bd.signum() == -1); - return bd.compareTo(maxFloat) <= 0 && !isNegtive; - } catch (Exception e) { - return false; - } - } else if (targetType.equals(PFloat.INSTANCE)) { - bd = (BigDecimal) value; - try { - BigDecimal maxFloat = MAX_FLOAT_AS_BIG_DECIMAL; - // Float.MIN_VALUE should not be used here, as this is the - // smallest in terms of closest to zero. - BigDecimal minFloat = MIN_FLOAT_AS_BIG_DECIMAL; - return bd.compareTo(maxFloat) <= 0 && bd.compareTo(minFloat) >= 0; - } catch (Exception e) { - return false; - } - } else if (targetType.equals(PUnsignedDouble.INSTANCE)) { - bd = (BigDecimal) value; - try { - BigDecimal maxDouble = MAX_DOUBLE_AS_BIG_DECIMAL; - boolean isNegtive = (bd.signum() == -1); - return bd.compareTo(maxDouble) <= 0 && !isNegtive; - } catch (Exception e) { - return false; - } - } else if (targetType.equals(PDouble.INSTANCE)) { - bd = (BigDecimal) value; - try { - BigDecimal maxDouble = MAX_DOUBLE_AS_BIG_DECIMAL; - BigDecimal minDouble = MIN_DOUBLE_AS_BIG_DECIMAL; - return bd.compareTo(maxDouble) <= 0 && bd.compareTo(minDouble) >= 0; - } catch (Exception e) { - return false; - } - } - } - return super.isCoercibleTo(targetType, value); + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; } + if (rhsType == PDecimal.INSTANCE) { + return ((BigDecimal) lhs).compareTo((BigDecimal) rhs); + } + return -rhsType.compareTo(rhs, lhs, this); + } - @Override - public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, - SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) { - if (ptr.getLength() == 0) { - return true; - } - // Any numeric type fits into a DECIMAL - if (srcType != PDecimal.INSTANCE) { - if(!srcType.isCoercibleTo(this)) { - throw new IllegalArgumentException(TypeMismatchException.newException(srcType, this)); - } - return true; + @Override + public boolean isCastableTo(PDataType targetType) { + return super.isCastableTo(targetType) || targetType.isCoercibleTo(PTimestamp.INSTANCE) + || targetType.equals(PBoolean.INSTANCE); + } + + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value != null) { + BigDecimal bd; + if ( + equalsAny(targetType, PUnsignedLong.INSTANCE, PUnsignedInt.INSTANCE, + PUnsignedSmallint.INSTANCE, PUnsignedTinyint.INSTANCE) + ) { + bd = (BigDecimal) value; + if (bd.signum() == -1) { + return false; } - // Use the scale from the value if provided, as it prevents a deserialization. - // The maxLength and scale for the underlying expression are ignored, because they - // are not relevant in this case: for example a DECIMAL(10,2) may be assigned to a - // DECIMAL(5,0) as long as the value fits. - if (value != null) { - BigDecimal v = (BigDecimal) value; - maxLength = v.precision(); - scale = v.scale(); - } else { - this.coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, desiredScale, sortOrder, true); - int[] v = getDecimalPrecisionAndScale(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); - maxLength = v[0]; - scale = v[1]; + } else if (targetType.equals(PLong.INSTANCE)) { + bd = (BigDecimal) value; + try { + bd.longValueExact(); + return true; + } catch (ArithmeticException e) { + return false; } - if (desiredMaxLength != null && desiredScale != null && maxLength != null && scale != null - && ((desiredMaxLength - desiredScale) < (maxLength - scale))) { - return false; + } else if (targetType.equals(PInteger.INSTANCE)) { + bd = (BigDecimal) value; + try { + bd.intValueExact(); + return true; + } catch (ArithmeticException e) { + return false; } - return true; - } - - @Override - public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, - Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale, - SortOrder expectedModifier) { - if (desiredScale == null) { - // deiredScale not available, or we do not have scale requirement, delegate to parents. - super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, - desiredScale, expectedModifier); - return; + } else if (targetType.equals(PSmallint.INSTANCE)) { + bd = (BigDecimal) value; + try { + bd.shortValueExact(); + return true; + } catch (ArithmeticException e) { + return false; } - if (ptr.getLength() == 0) { - return; + } else if (targetType.equals(PTinyint.INSTANCE)) { + bd = (BigDecimal) value; + try { + bd.byteValueExact(); + return true; + } catch (ArithmeticException e) { + return false; } - if (scale == null) { - if (object != null) { - BigDecimal v = (BigDecimal) object; - scale = v.scale(); - } else { - int[] v = getDecimalPrecisionAndScale(ptr.get(), ptr.getOffset(), ptr.getLength(), actualModifier); - scale = v[1]; - } + } else if (targetType.equals(PUnsignedFloat.INSTANCE)) { + bd = (BigDecimal) value; + try { + BigDecimal maxFloat = MAX_FLOAT_AS_BIG_DECIMAL; + boolean isNegtive = (bd.signum() == -1); + return bd.compareTo(maxFloat) <= 0 && !isNegtive; + } catch (Exception e) { + return false; } - if (this == actualType && scale <= desiredScale) { - // No coerce and rescale necessary - return; - } else { - BigDecimal decimal; - // Rescale is necessary. - if (object != null) { // value object is passed in. - decimal = (BigDecimal) toObject(object, actualType); - } else { // only value bytes is passed in, need to convert to object first. - decimal = (BigDecimal) toObject(ptr); - } - decimal = decimal.setScale(desiredScale, BigDecimal.ROUND_DOWN); - ptr.set(toBytes(decimal)); + } else if (targetType.equals(PFloat.INSTANCE)) { + bd = (BigDecimal) value; + try { + BigDecimal maxFloat = MAX_FLOAT_AS_BIG_DECIMAL; + // Float.MIN_VALUE should not be used here, as this is the + // smallest in terms of closest to zero. + BigDecimal minFloat = MIN_FLOAT_AS_BIG_DECIMAL; + return bd.compareTo(maxFloat) <= 0 && bd.compareTo(minFloat) >= 0; + } catch (Exception e) { + return false; } - } - - @Override - public Object toObject(String value) { - if (value == null || value.length() == 0) { - return null; + } else if (targetType.equals(PUnsignedDouble.INSTANCE)) { + bd = (BigDecimal) value; + try { + BigDecimal maxDouble = MAX_DOUBLE_AS_BIG_DECIMAL; + boolean isNegtive = (bd.signum() == -1); + return bd.compareTo(maxDouble) <= 0 && !isNegtive; + } catch (Exception e) { + return false; } + } else if (targetType.equals(PDouble.INSTANCE)) { + bd = (BigDecimal) value; try { - return new BigDecimal(value); - } catch (NumberFormatException e) { - throw newIllegalDataException(e); + BigDecimal maxDouble = MAX_DOUBLE_AS_BIG_DECIMAL; + BigDecimal minDouble = MIN_DOUBLE_AS_BIG_DECIMAL; + return bd.compareTo(maxDouble) <= 0 && bd.compareTo(minDouble) >= 0; + } catch (Exception e) { + return false; } + } } + return super.isCoercibleTo(targetType, value); + } - @Override - public Integer estimateByteSizeFromLength(Integer length) { - // No association of runtime byte size from decimal precision. - return null; + @Override + public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, + SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, + Integer desiredScale) { + if (ptr.getLength() == 0) { + return true; + } + // Any numeric type fits into a DECIMAL + if (srcType != PDecimal.INSTANCE) { + if (!srcType.isCoercibleTo(this)) { + throw new IllegalArgumentException(TypeMismatchException.newException(srcType, this)); + } + return true; } + // Use the scale from the value if provided, as it prevents a deserialization. + // The maxLength and scale for the underlying expression are ignored, because they + // are not relevant in this case: for example a DECIMAL(10,2) may be assigned to a + // DECIMAL(5,0) as long as the value fits. + if (value != null) { + BigDecimal v = (BigDecimal) value; + maxLength = v.precision(); + scale = v.scale(); + } else { + this.coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, + desiredScale, sortOrder, true); + int[] v = getDecimalPrecisionAndScale(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); + maxLength = v[0]; + scale = v[1]; + } + if ( + desiredMaxLength != null && desiredScale != null && maxLength != null && scale != null + && ((desiredMaxLength - desiredScale) < (maxLength - scale)) + ) { + return false; + } + return true; + } - @Override - public String toStringLiteral(byte[] b, int offset, int length, Format formatter) { - if (formatter == null) { - BigDecimal o = (BigDecimal) toObject(b, offset, length); - return o.toPlainString(); - } - return super.toStringLiteral(b, offset, length, formatter); + @Override + public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, + Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, + Integer desiredScale, SortOrder expectedModifier) { + if (desiredScale == null) { + // deiredScale not available, or we do not have scale requirement, delegate to parents. + super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, + desiredScale, expectedModifier); + return; } + if (ptr.getLength() == 0) { + return; + } + if (scale == null) { + if (object != null) { + BigDecimal v = (BigDecimal) object; + scale = v.scale(); + } else { + int[] v = + getDecimalPrecisionAndScale(ptr.get(), ptr.getOffset(), ptr.getLength(), actualModifier); + scale = v[1]; + } + } + if (this == actualType && scale <= desiredScale) { + // No coerce and rescale necessary + return; + } else { + BigDecimal decimal; + // Rescale is necessary. + if (object != null) { // value object is passed in. + decimal = (BigDecimal) toObject(object, actualType); + } else { // only value bytes is passed in, need to convert to object first. + decimal = (BigDecimal) toObject(ptr); + } + decimal = decimal.setScale(desiredScale, BigDecimal.ROUND_DOWN); + ptr.set(toBytes(decimal)); + } + } - @Override - public String toStringLiteral(Object o, Format formatter) { - if (formatter == null) { - if(o == null) { - return String.valueOf(o); - } - return ((BigDecimal)o).toPlainString(); - } - return super.toStringLiteral(o, formatter); + @Override + public Object toObject(String value) { + if (value == null || value.length() == 0) { + return null; + } + try { + return new BigDecimal(value); + } catch (NumberFormatException e) { + throw newIllegalDataException(e); + } + } + + @Override + public Integer estimateByteSizeFromLength(Integer length) { + // No association of runtime byte size from decimal precision. + return null; + } + + @Override + public String toStringLiteral(byte[] b, int offset, int length, Format formatter) { + if (formatter == null) { + BigDecimal o = (BigDecimal) toObject(b, offset, length); + return o.toPlainString(); } + return super.toStringLiteral(b, offset, length, formatter); + } - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return new BigDecimal((Long) PLong.INSTANCE.getSampleValue(maxLength, arrayLength)); + @Override + public String toStringLiteral(Object o, Format formatter) { + if (formatter == null) { + if (o == null) { + return String.valueOf(o); + } + return ((BigDecimal) o).toPlainString(); } + return super.toStringLiteral(o, formatter); + } - // take details from org.apache.phoenix.schema.types.PDataType#toBigDecimal(byte[], int, int) - @Override - public int signum(byte[] bytes, int offset, int length, SortOrder sortOrder, Integer maxLength, - Integer scale) { - byte signByte; - if (sortOrder == SortOrder.DESC) { - signByte = SortOrder.invert(bytes[offset]); - } else { - signByte = bytes[offset]; - } - if (length == 1 && signByte == ZERO_BYTE) { - return 0; - } - return ((signByte & 0x80) == 0) ? -1 : 1; + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return new BigDecimal((Long) PLong.INSTANCE.getSampleValue(maxLength, arrayLength)); + } + + // take details from org.apache.phoenix.schema.types.PDataType#toBigDecimal(byte[], int, int) + @Override + public int signum(byte[] bytes, int offset, int length, SortOrder sortOrder, Integer maxLength, + Integer scale) { + byte signByte; + if (sortOrder == SortOrder.DESC) { + signByte = SortOrder.invert(bytes[offset]); + } else { + signByte = bytes[offset]; + } + if (length == 1 && signByte == ZERO_BYTE) { + return 0; } + return ((signByte & 0x80) == 0) ? -1 : 1; + } - @Override - public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder, - ImmutableBytesWritable outPtr) { - if (sortOrder == SortOrder.DESC) { - bytes = SortOrder.invert(bytes, offset, new byte[length], 0, length); - offset = 0; - } - BigDecimal bigDecimal = toBigDecimal(bytes, offset, length); - outPtr.set(toBytes(bigDecimal.abs())); + @Override + public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder, + ImmutableBytesWritable outPtr) { + if (sortOrder == SortOrder.DESC) { + bytes = SortOrder.invert(bytes, offset, new byte[length], 0, length); + offset = 0; } + BigDecimal bigDecimal = toBigDecimal(bytes, offset, length); + outPtr.set(toBytes(bigDecimal.abs())); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDecimalArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDecimalArray.java index 101546a4939..f24a594a584 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDecimalArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDecimalArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,66 +23,72 @@ public class PDecimalArray extends PArrayDataType { - public static final PDecimalArray INSTANCE = new PDecimalArray(); + public static final PDecimalArray INSTANCE = new PDecimalArray(); - private PDecimalArray() { - super("DECIMAL ARRAY", PDataType.ARRAY_TYPE_BASE + PDecimal.INSTANCE.getSqlType(), PhoenixArray.class, null, 35); - } + private PDecimalArray() { + super("DECIMAL ARRAY", PDataType.ARRAY_TYPE_BASE + PDecimal.INSTANCE.getSqlType(), + PhoenixArray.class, null, 35); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PDecimal.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PDecimal.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - return toObject(bytes, offset, length, PDecimal.INSTANCE, sortOrder, maxLength, scale, PDecimal.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PDecimal.INSTANCE, sortOrder, maxLength, scale, + PDecimal.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { return true; } - PhoenixArray pArr = (PhoenixArray)value; - Object[] decimalArr = (Object[])pArr.array; - for (Object i : decimalArr) { - if (!super.isCoercibleTo(PDecimal.INSTANCE, i)) { return false; } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PDecimal.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] decimalArr = (Object[]) pArr.array; + for (Object i : decimalArr) { + if (!super.isCoercibleTo(PDecimal.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PDecimal.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDouble.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDouble.java index 350478bc4cb..c84dbe6b0c0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDouble.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDouble.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.primitives.Doubles; @@ -35,15 +34,15 @@ private PDouble() { @Override public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; - } + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; + } if (rhsType == PDecimal.INSTANCE) { return -((BigDecimal) rhs).compareTo(BigDecimal.valueOf(((Number) lhs).doubleValue())); } @@ -92,8 +91,7 @@ public int toBytes(Object object, byte[] bytes, int offset) { if (object == null) { throw newIllegalDataException(this + " may not be null"); } - return this.getCodec().encodeDouble(((Number) object).doubleValue(), - bytes, offset); + return this.getCodec().encodeDouble(((Number) object).doubleValue(), bytes, offset); } @Override @@ -139,15 +137,17 @@ public Object toObject(Object object, PDataType actualType) { } @Override - public Double toObject(byte[] b, int o, int l, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale) { + public Double toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, + Integer maxLength, Integer scale) { if (l <= 0) { return null; } - if (equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE, PFloat.INSTANCE, + if ( + equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE, PFloat.INSTANCE, PUnsignedFloat.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, PUnsignedInt.INSTANCE, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, - PUnsignedTinyint.INSTANCE)) { + PUnsignedTinyint.INSTANCE) + ) { return actualType.getCodec().decodeDouble(b, o, sortOrder); } else if (actualType == PDecimal.INSTANCE) { BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); @@ -164,13 +164,10 @@ public boolean isCoercibleTo(PDataType targetType, Object value) { if (targetType.equals(PUnsignedDouble.INSTANCE)) { return d >= 0; } else if (targetType.equals(PFloat.INSTANCE)) { - return Double.isNaN(d) - || d == Double.POSITIVE_INFINITY - || d == Double.NEGATIVE_INFINITY - || (d >= -Float.MAX_VALUE && d <= Float.MAX_VALUE); + return Double.isNaN(d) || d == Double.POSITIVE_INFINITY || d == Double.NEGATIVE_INFINITY + || (d >= -Float.MAX_VALUE && d <= Float.MAX_VALUE); } else if (targetType.equals(PUnsignedFloat.INSTANCE)) { - return Double.isNaN(d) || d == Double.POSITIVE_INFINITY - || (d >= 0 && d <= Float.MAX_VALUE); + return Double.isNaN(d) || d == Double.POSITIVE_INFINITY || (d >= 0 && d <= Float.MAX_VALUE); } else if (targetType.equals(PUnsignedLong.INSTANCE)) { return (d >= 0 && d <= Long.MAX_VALUE); } else if (targetType.equals(PLong.INSTANCE)) { @@ -209,7 +206,7 @@ public long decodeLong(byte[] b, int o, SortOrder sortOrder) { double v = decodeDouble(b, o, sortOrder); if (v < Long.MIN_VALUE || v > Long.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Long without changing its value"); + "Value " + v + " cannot be cast to Long without changing its value"); } return (long) v; } @@ -219,7 +216,7 @@ public int decodeInt(byte[] b, int o, SortOrder sortOrder) { double v = decodeDouble(b, o, sortOrder); if (v < Integer.MIN_VALUE || v > Integer.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Integer without changing its value"); + "Value " + v + " cannot be cast to Integer without changing its value"); } return (int) v; } @@ -229,7 +226,7 @@ public byte decodeByte(byte[] b, int o, SortOrder sortOrder) { double v = decodeDouble(b, o, sortOrder); if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Byte without changing its value"); + "Value " + v + " cannot be cast to Byte without changing its value"); } return (byte) v; } @@ -239,7 +236,7 @@ public short decodeShort(byte[] b, int o, SortOrder sortOrder) { double v = decodeDouble(b, o, sortOrder); if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Short without changing its value"); + "Value " + v + " cannot be cast to Short without changing its value"); } return (short) v; } @@ -250,15 +247,15 @@ public double decodeDouble(byte[] bytes, int o, SortOrder sortOrder) { checkForSufficientLength(bytes, o, Bytes.SIZEOF_LONG); long l; if (sortOrder == SortOrder.DESC) { - // Copied from Bytes.toLong(), but without using the toLongUnsafe - // TODO: would it be possible to use the toLongUnsafe? - l = 0; - for(int i = o; i < o + Bytes.SIZEOF_LONG; i++) { - l <<= 8; - l ^= (bytes[i] ^ 0xff) & 0xFF; - } + // Copied from Bytes.toLong(), but without using the toLongUnsafe + // TODO: would it be possible to use the toLongUnsafe? + l = 0; + for (int i = o; i < o + Bytes.SIZEOF_LONG; i++) { + l <<= 8; + l ^= (bytes[i] ^ 0xff) & 0xFF; + } } else { - l = Bytes.toLong(bytes, o); + l = Bytes.toLong(bytes, o); } l--; l ^= (~l >> Long.SIZE - 1) | Long.MIN_VALUE; @@ -268,13 +265,14 @@ public double decodeDouble(byte[] bytes, int o, SortOrder sortOrder) { @Override public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { double v = decodeDouble(b, o, sortOrder); - if (Double.isNaN(v) || v == Double.NEGATIVE_INFINITY - || v == Double.POSITIVE_INFINITY - || (v >= -Float.MAX_VALUE && v <= Float.MAX_VALUE)) { + if ( + Double.isNaN(v) || v == Double.NEGATIVE_INFINITY || v == Double.POSITIVE_INFINITY + || (v >= -Float.MAX_VALUE && v <= Float.MAX_VALUE) + ) { return (float) v; } else { throw newIllegalDataException( - "Value " + v + " cannot be cast to Float without changing its value"); + "Value " + v + " cannot be cast to Float without changing its value"); } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDoubleArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDoubleArray.java index d5852293f01..db34eae4ae1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDoubleArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PDoubleArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,72 +21,73 @@ public class PDoubleArray extends PArrayDataType { - public static final PDoubleArray INSTANCE = new PDoubleArray(); + public static final PDoubleArray INSTANCE = new PDoubleArray(); - private PDoubleArray() { - super("DOUBLE ARRAY", PDataType.ARRAY_TYPE_BASE + PDouble.INSTANCE.getSqlType(), - PhoenixArray.class, null, 34); - } + private PDoubleArray() { + super("DOUBLE ARRAY", PDataType.ARRAY_TYPE_BASE + PDouble.INSTANCE.getSqlType(), + PhoenixArray.class, null, 34); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PDouble.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PDouble.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, Integer scale) { - return toObject(bytes, offset, length, PDouble.INSTANCE, sortOrder, maxLength, scale, - PDouble.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PDouble.INSTANCE, sortOrder, maxLength, scale, + PDouble.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveDoublePhoenixArray pArr = (PhoenixArray.PrimitiveDoublePhoenixArray) value; - double[] doubleArr = (double[]) pArr.array; - for (double i : doubleArr) { - if (!super.isCoercibleTo(PDouble.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PDouble.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveDoublePhoenixArray pArr = + (PhoenixArray.PrimitiveDoublePhoenixArray) value; + double[] doubleArr = (double[]) pArr.array; + for (double i : doubleArr) { + if (!super.isCoercibleTo(PDouble.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PDouble.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PFloat.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PFloat.java index b4869db201a..72fecdfc043 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PFloat.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PFloat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; public class PFloat extends PRealNumber { @@ -80,8 +79,7 @@ public int toBytes(Object object, byte[] bytes, int offset) { if (object == null) { throw newIllegalDataException(this + " may not be null"); } - return this.getCodec().encodeFloat(((Number) object).floatValue(), - bytes, offset); + return this.getCodec().encodeFloat(((Number) object).floatValue(), bytes, offset); } @Override @@ -106,14 +104,14 @@ public Object toObject(Object object, PDataType actualType) { return object; } else if (equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE)) { double d = (Double) object; - if (Double.isNaN(d) - || d == Double.POSITIVE_INFINITY - || d == Double.NEGATIVE_INFINITY - || (d >= -Float.MAX_VALUE && d <= Float.MAX_VALUE)) { + if ( + Double.isNaN(d) || d == Double.POSITIVE_INFINITY || d == Double.NEGATIVE_INFINITY + || (d >= -Float.MAX_VALUE && d <= Float.MAX_VALUE) + ) { return (float) d; } else { throw newIllegalDataException( - actualType + " value " + d + " cannot be cast to Float without changing its value"); + actualType + " value " + d + " cannot be cast to Float without changing its value"); } } else if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { f = (Long) object; @@ -136,14 +134,16 @@ public Object toObject(Object object, PDataType actualType) { @Override public Float toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { + Integer maxLength, Integer scale) { if (l <= 0) { return null; } - if (equalsAny(actualType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE, PDouble.INSTANCE, + if ( + equalsAny(actualType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE, PDouble.INSTANCE, PUnsignedDouble.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, PUnsignedInt.INSTANCE, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, - PUnsignedTinyint.INSTANCE)) { + PUnsignedTinyint.INSTANCE) + ) { return actualType.getCodec().decodeFloat(b, o, sortOrder); } else if (actualType == PDecimal.INSTANCE) { BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); @@ -198,7 +198,7 @@ public long decodeLong(byte[] b, int o, SortOrder sortOrder) { float v = decodeFloat(b, o, sortOrder); if (v < Long.MIN_VALUE || v > Long.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Long without changing its value"); + "Value " + v + " cannot be cast to Long without changing its value"); } return (long) v; } @@ -208,7 +208,7 @@ public int decodeInt(byte[] b, int o, SortOrder sortOrder) { float v = decodeFloat(b, o, sortOrder); if (v < Integer.MIN_VALUE || v > Integer.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Integer without changing its value"); + "Value " + v + " cannot be cast to Integer without changing its value"); } return (int) v; } @@ -218,7 +218,7 @@ public byte decodeByte(byte[] b, int o, SortOrder sortOrder) { float v = decodeFloat(b, o, sortOrder); if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Byte without changing its value"); + "Value " + v + " cannot be cast to Byte without changing its value"); } return (byte) v; } @@ -228,14 +228,13 @@ public short decodeShort(byte[] b, int o, SortOrder sortOrder) { float v = decodeFloat(b, o, sortOrder); if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Short without changing its value"); + "Value " + v + " cannot be cast to Short without changing its value"); } return (short) v; } @Override - public double decodeDouble(byte[] b, int o, - SortOrder sortOrder) { + public double decodeDouble(byte[] b, int o, SortOrder sortOrder) { return decodeFloat(b, o, sortOrder); } @@ -245,13 +244,13 @@ public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { checkForSufficientLength(b, o, Bytes.SIZEOF_INT); int value; if (sortOrder == SortOrder.DESC) { - value = 0; - for(int i = o; i < (o + Bytes.SIZEOF_INT); i++) { - value <<= 8; - value ^= (b[i] ^ 0xff) & 0xFF; - } + value = 0; + for (int i = o; i < (o + Bytes.SIZEOF_INT); i++) { + value <<= 8; + value ^= (b[i] ^ 0xff) & 0xFF; + } } else { - value = Bytes.toInt(b, o); + value = Bytes.toInt(b, o); } value--; value ^= (~value >> Integer.SIZE - 1) | Integer.MIN_VALUE; @@ -280,13 +279,14 @@ public int encodeByte(byte v, byte[] b, int o) { @Override public int encodeDouble(double v, byte[] b, int o) { - if (Double.isNaN(v) || v == Double.POSITIVE_INFINITY - || v == Double.NEGATIVE_INFINITY - || (v >= -Float.MAX_VALUE && v <= Float.MAX_VALUE)) { + if ( + Double.isNaN(v) || v == Double.POSITIVE_INFINITY || v == Double.NEGATIVE_INFINITY + || (v >= -Float.MAX_VALUE && v <= Float.MAX_VALUE) + ) { return encodeFloat((float) v, b, o); } else { throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Float without changing its value"); + "Value " + v + " cannot be encoded as an Float without changing its value"); } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PFloatArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PFloatArray.java index 6005c3e83c7..548f8ffcddb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PFloatArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PFloatArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PFloatArray extends PArrayDataType { - public static final PFloatArray INSTANCE = new PFloatArray(); + public static final PFloatArray INSTANCE = new PFloatArray(); - private PFloatArray() { - super("FLOAT ARRAY", PDataType.ARRAY_TYPE_BASE + PFloat.INSTANCE.getSqlType(), - PhoenixArray.class, null, 33); - } + private PFloatArray() { + super("FLOAT ARRAY", PDataType.ARRAY_TYPE_BASE + PFloat.INSTANCE.getSqlType(), + PhoenixArray.class, null, 33); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PFloat.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PFloat.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PFloat.INSTANCE, sortOrder, maxLength, scale, - PFloat.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PFloat.INSTANCE, sortOrder, maxLength, scale, + PFloat.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveFloatPhoenixArray pArr = (PhoenixArray.PrimitiveFloatPhoenixArray) value; - float[] floatArr = (float[]) pArr.array; - for (float i : floatArr) { - if (!super.isCoercibleTo(PFloat.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PFloat.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveFloatPhoenixArray pArr = (PhoenixArray.PrimitiveFloatPhoenixArray) value; + float[] floatArr = (float[]) pArr.array; + for (float i : floatArr) { + if (!super.isCoercibleTo(PFloat.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PFloat.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PInteger.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PInteger.java index 2e22c6059fc..ad4ceaf83d1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PInteger.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PInteger.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; public class PInteger extends PWholeNumber { @@ -62,22 +61,24 @@ public Object toObject(Object object, PDataType actualType) { long l = (Long) o; if (l < Integer.MIN_VALUE || l > Integer.MAX_VALUE) { throw newIllegalDataException( - actualType + " value " + l + " cannot be cast to Integer without changing its value"); + actualType + " value " + l + " cannot be cast to Integer without changing its value"); } int v = (int) l; return v; } @Override - public Integer toObject(byte[] b, int o, int l, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale) { + public Integer toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, + Integer maxLength, Integer scale) { if (l == 0) { return null; } - if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, + if ( + equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, PUnsignedInt.INSTANCE, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, PUnsignedTinyint.INSTANCE, PFloat.INSTANCE, PUnsignedFloat.INSTANCE, PDouble.INSTANCE, - PUnsignedDouble.INSTANCE)) { + PUnsignedDouble.INSTANCE) + ) { return actualType.getCodec().decodeInt(b, o, sortOrder); } else if (actualType == PDecimal.INSTANCE) { BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); @@ -91,8 +92,10 @@ public Integer toObject(byte[] b, int o, int l, PDataType actualType, public boolean isCoercibleTo(PDataType targetType, Object value) { if (value != null) { int i; - if (equalsAny(targetType, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, - PUnsignedLong.INSTANCE, PUnsignedInt.INSTANCE)) { + if ( + equalsAny(targetType, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, + PUnsignedLong.INSTANCE, PUnsignedInt.INSTANCE) + ) { i = (Integer) value; return i >= 0; } else if (targetType.equals(PUnsignedSmallint.INSTANCE)) { @@ -172,8 +175,7 @@ public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { } @Override - public double decodeDouble(byte[] b, int o, - SortOrder sortOrder) { + public double decodeDouble(byte[] b, int o, SortOrder sortOrder) { return decodeInt(b, o, sortOrder); } @@ -210,7 +212,7 @@ public int encodeInt(int v, byte[] b, int o) { public int encodeFloat(float v, byte[] b, int o) { if (v < Integer.MIN_VALUE || v > Integer.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Integer without changing its value"); + "Value " + v + " cannot be encoded as an Integer without changing its value"); } return encodeInt((int) v, b, o); } @@ -219,7 +221,7 @@ public int encodeFloat(float v, byte[] b, int o) { public int encodeDouble(double v, byte[] b, int o) { if (v < Integer.MIN_VALUE || v > Integer.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Integer without changing its value"); + "Value " + v + " cannot be encoded as an Integer without changing its value"); } return encodeInt((int) v, b, o); } @@ -228,7 +230,7 @@ public int encodeDouble(double v, byte[] b, int o) { public int encodeLong(long v, byte[] b, int o) { if (v < Integer.MIN_VALUE || v > Integer.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Integer without changing its value"); + "Value " + v + " cannot be encoded as an Integer without changing its value"); } return encodeInt((int) v, b, o); } @@ -238,7 +240,7 @@ public byte decodeByte(byte[] b, int o, SortOrder sortOrder) { int v = decodeInt(b, o, sortOrder); if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Byte without changing its value"); + "Value " + v + " cannot be cast to Byte without changing its value"); } return (byte) v; } @@ -248,7 +250,7 @@ public short decodeShort(byte[] b, int o, SortOrder sortOrder) { int v = decodeInt(b, o, sortOrder); if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be cast to Short without changing its value"); + "Value " + v + " cannot be cast to Short without changing its value"); } return (short) v; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PIntegerArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PIntegerArray.java index b317cfe90f1..47863621bae 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PIntegerArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PIntegerArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PIntegerArray extends PArrayDataType { - public static final PIntegerArray INSTANCE = new PIntegerArray(); + public static final PIntegerArray INSTANCE = new PIntegerArray(); - private PIntegerArray() { - super("INTEGER ARRAY", PDataType.ARRAY_TYPE_BASE + PInteger.INSTANCE.getSqlType(), - PhoenixArray.class, null, 24); - } + private PIntegerArray() { + super("INTEGER ARRAY", PDataType.ARRAY_TYPE_BASE + PInteger.INSTANCE.getSqlType(), + PhoenixArray.class, null, 24); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PInteger.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PInteger.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PInteger.INSTANCE, sortOrder, maxLength, scale, - PInteger.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PInteger.INSTANCE, sortOrder, maxLength, scale, + PInteger.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveIntPhoenixArray pArr = (PhoenixArray.PrimitiveIntPhoenixArray) value; - int[] intArr = (int[]) pArr.array; - for (int i : intArr) { - if (!super.isCoercibleTo(PInteger.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PInteger.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveIntPhoenixArray pArr = (PhoenixArray.PrimitiveIntPhoenixArray) value; + int[] intArr = (int[]) pArr.array; + for (int i : intArr) { + if (!super.isCoercibleTo(PInteger.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PInteger.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PJson.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PJson.java index 748d5b61e94..519ee88340c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PJson.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PJson.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema.types; import org.apache.phoenix.schema.SortOrder; @@ -35,96 +34,96 @@ */ public class PJson extends PVarbinary { - public static final PJson INSTANCE = new PJson(); - private JsonDataFormat jsonDataFormat; - - private PJson() { - super("JSON", PDataType.JSON_TYPE, byte[].class, null, 48); - jsonDataFormat = JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); - } + public static final PJson INSTANCE = new PJson(); + private JsonDataFormat jsonDataFormat; - @Override - public boolean canBePrimaryKey() { - return false; - } + private PJson() { + super("JSON", PDataType.JSON_TYPE, byte[].class, null, 48); + jsonDataFormat = JsonDataFormatFactory.getJsonDataFormat(JsonDataFormatFactory.DataFormat.BSON); + } - @Override - public boolean isComparisonSupported() { - return false; - } + @Override + public boolean canBePrimaryKey() { + return false; + } - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - return 0; - } - byte[] b = toBytes(object); - System.arraycopy(b, 0, bytes, offset, b.length); - return b.length; + @Override + public boolean isComparisonSupported() { + return false; + } + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + return 0; } + byte[] b = toBytes(object); + System.arraycopy(b, 0, bytes, offset, b.length); + return b.length; - @Override - public byte[] toBytes(Object object) { - if (object == null) { - return ByteUtil.EMPTY_BYTE_ARRAY; - } - return jsonDataFormat.toBytes(object); - } + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - @SuppressWarnings("rawtypes") PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - if (length == 0) { - return null; - } - return jsonDataFormat.toObject(bytes, offset, length); + @Override + public byte[] toBytes(Object object) { + if (object == null) { + return ByteUtil.EMPTY_BYTE_ARRAY; } - - @Override - public Object toObject(Object object, @SuppressWarnings("rawtypes") PDataType actualType) { - if (object == null) { - return null; - } - if (equalsAny(actualType, PVarchar.INSTANCE)) { - return toObject((String) object); - } - return object; + return jsonDataFormat.toBytes(object); + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, + @SuppressWarnings("rawtypes") PDataType actualType, SortOrder sortOrder, Integer maxLength, + Integer scale) { + if (length == 0) { + return null; } + return jsonDataFormat.toObject(bytes, offset, length); + } - @Override - public Object toObject(String value) { - if (value == null || value.length() == 0) { - return null; - } - return jsonDataFormat.toObject(value); + @Override + public Object toObject(Object object, @SuppressWarnings("rawtypes") PDataType actualType) { + if (object == null) { + return null; } - - @Override - public boolean isCoercibleTo(@SuppressWarnings("rawtypes") PDataType targetType) { - return equalsAny(targetType, this, PBinary.INSTANCE, PVarbinary.INSTANCE); - - } - - @Override - public int estimateByteSize(Object o) { - return jsonDataFormat.estimateByteSize(o); - } - - @Override - public Integer getByteSize() { - return null; - } - - @Override - public boolean isBytesComparableWith(@SuppressWarnings("rawtypes") PDataType otherType) { - return otherType == PVarbinary.INSTANCE || otherType == PJson.INSTANCE; + if (equalsAny(actualType, PVarchar.INSTANCE)) { + return toObject((String) object); } + return object; + } - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - String json = "{a : 1}"; - return this.toObject(json); + @Override + public Object toObject(String value) { + if (value == null || value.length() == 0) { + return null; } + return jsonDataFormat.toObject(value); + } + + @Override + public boolean isCoercibleTo(@SuppressWarnings("rawtypes") PDataType targetType) { + return equalsAny(targetType, this, PBinary.INSTANCE, PVarbinary.INSTANCE); + + } + + @Override + public int estimateByteSize(Object o) { + return jsonDataFormat.estimateByteSize(o); + } + + @Override + public Integer getByteSize() { + return null; + } + + @Override + public boolean isBytesComparableWith(@SuppressWarnings("rawtypes") PDataType otherType) { + return otherType == PVarbinary.INSTANCE || otherType == PJson.INSTANCE; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + String json = "{a : 1}"; + return this.toObject(json); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PLong.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PLong.java index 2bc8e71c567..557666d24b9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PLong.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PLong.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,337 +23,345 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.primitives.Doubles; import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; public class PLong extends PWholeNumber { - public static final PLong INSTANCE = new PLong(); + public static final PLong INSTANCE = new PLong(); - private PLong() { - super("BIGINT", Types.BIGINT, Long.class, new LongCodec(), 2); - } + private PLong() { + super("BIGINT", Types.BIGINT, Long.class, new LongCodec(), 2); + } - @Override - public Integer getScale(Object o) { - return ZERO; + @Override + public Integer getScale(Object o) { + return ZERO; + } + + @Override + public byte[] toBytes(Object object) { + byte[] b = new byte[Bytes.SIZEOF_LONG]; + toBytes(object, b, 0); + return b; + } + + @Override + public int toBytes(Object object, byte[] b, int o) { + if (object == null) { + throw newIllegalDataException(this + " may not be null"); } + return this.getCodec().encodeLong(((Number) object).longValue(), b, o); + } - @Override - public byte[] toBytes(Object object) { - byte[] b = new byte[Bytes.SIZEOF_LONG]; - toBytes(object, b, 0); - return b; + @Override + public Object toObject(Object object, PDataType actualType) { + if (object == null) { + return null; + } + long s; + if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { + return object; + } else if (equalsAny(actualType, PUnsignedInt.INSTANCE, PInteger.INSTANCE)) { + s = (Integer) object; + return s; + } else if (equalsAny(actualType, PTinyint.INSTANCE, PUnsignedTinyint.INSTANCE)) { + s = (Byte) object; + return s; + } else if (equalsAny(actualType, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE)) { + s = (Short) object; + return s; + } else if (equalsAny(actualType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE)) { + Float f = (Float) object; + if (f > Long.MAX_VALUE || f < Long.MIN_VALUE) { + throw newIllegalDataException( + actualType + " value " + f + " cannot be cast to Long without changing its value"); + } + s = f.longValue(); + return s; + } else if (equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE)) { + Double de = (Double) object; + if (de > Long.MAX_VALUE || de < Long.MIN_VALUE) { + throw newIllegalDataException( + actualType + " value " + de + " cannot be cast to Long without changing its value"); + } + s = de.longValue(); + return s; + } else if (actualType == PDecimal.INSTANCE) { + BigDecimal d = (BigDecimal) object; + return d.longValueExact(); + } else if ( + equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, + PUnsignedTime.INSTANCE) + ) { + java.util.Date date = (java.util.Date) object; + return date.getTime(); + } + return throwConstraintViolationException(actualType, this); + } + + @Override + public Long toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, + Integer maxLength, Integer scale) { + if (l == 0) { + return null; + } + if ( + equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, + PUnsignedInt.INSTANCE, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, + PUnsignedTinyint.INSTANCE, PFloat.INSTANCE, PUnsignedFloat.INSTANCE, PDouble.INSTANCE, + PUnsignedDouble.INSTANCE, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, + PUnsignedTime.INSTANCE) + ) { + return actualType.getCodec().decodeLong(b, o, sortOrder); + } else if (actualType == PDecimal.INSTANCE) { + BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); + return bd.longValueExact(); } + throwConstraintViolationException(actualType, this); + return null; + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + // In general, don't allow conversion of LONG to INTEGER. There are times when + // we check isComparableTo for a more relaxed check and then throw a runtime + // exception if we overflow + return equalsAny(targetType, this, PDecimal.INSTANCE, PVarbinary.INSTANCE, PBinary.INSTANCE, + PDouble.INSTANCE); + } + + @Override + public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, + Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, + Integer desiredScale, SortOrder expectedModifier) { + + // Decrease size of TIMESTAMP to size of LONG and continue coerce + if (ptr.getLength() > getByteSize() && actualType.isCoercibleTo(PTimestamp.INSTANCE)) { + ptr.set(ptr.get(), ptr.getOffset(), getByteSize()); + } + super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, + desiredScale, expectedModifier); + } + + @Override + public boolean isComparableTo(PDataType targetType) { + return PDecimal.INSTANCE.isComparableTo(targetType); + } + + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value != null) { + long l; + if ( + equalsAny(targetType, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, + PUnsignedLong.INSTANCE) + ) { + l = (Long) value; + return l >= 0; + } else if (targetType.equals(PUnsignedInt.INSTANCE)) { + l = (Long) value; + return (l >= 0 && l <= Integer.MAX_VALUE); + } else if (targetType.equals(PInteger.INSTANCE)) { + l = (Long) value; + return (l >= Integer.MIN_VALUE && l <= Integer.MAX_VALUE); + } else if (targetType.equals(PUnsignedSmallint.INSTANCE)) { + l = (Long) value; + return (l >= 0 && l <= Short.MAX_VALUE); + } else if (targetType.equals(PSmallint.INSTANCE)) { + l = (Long) value; + return (l >= Short.MIN_VALUE && l <= Short.MAX_VALUE); + } else if (targetType.equals(PTinyint.INSTANCE)) { + l = (Long) value; + return (l >= Byte.MIN_VALUE && l <= Byte.MAX_VALUE); + } else if (targetType.equals(PUnsignedTinyint.INSTANCE)) { + l = (Long) value; + return (l >= 0 && l <= Byte.MAX_VALUE); + } + } + return super.isCoercibleTo(targetType, value); + } + + @Override + public boolean isCastableTo(PDataType targetType) { + return super.isCastableTo(targetType) || targetType.isCoercibleTo(PTimestamp.INSTANCE); + } + + @Override + public boolean isFixedWidth() { + return true; + } + + @Override + public Integer getByteSize() { + return Bytes.SIZEOF_LONG; + } + + @Override + public Integer getMaxLength(Object o) { + return LONG_PRECISION; + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; + } + if (rhsType == PDecimal.INSTANCE) { + return -((BigDecimal) rhs).compareTo(BigDecimal.valueOf(((Number) lhs).longValue())); + } else if ( + equalsAny(rhsType, PDouble.INSTANCE, PFloat.INSTANCE, PUnsignedDouble.INSTANCE, + PUnsignedFloat.INSTANCE) + ) { + return Doubles.compare(((Number) lhs).doubleValue(), ((Number) rhs).doubleValue()); + } + return Longs.compare(((Number) lhs).longValue(), ((Number) rhs).longValue()); + } - @Override - public int toBytes(Object object, byte[] b, int o) { - if (object == null) { - throw newIllegalDataException(this + " may not be null"); - } - return this.getCodec().encodeLong(((Number) object).longValue(), b, o); + @Override + public Object toObject(String value) { + if (value == null || value.length() == 0) { + return null; + } + try { + return Long.parseLong(value); + } catch (NumberFormatException e) { + throw newIllegalDataException(e); } + } - @Override - public Object toObject(Object object, PDataType actualType) { - if (object == null) { - return null; - } - long s; - if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { - return object; - } else if (equalsAny(actualType, PUnsignedInt.INSTANCE, - PInteger.INSTANCE)) { - s = (Integer) object; - return s; - } else if (equalsAny(actualType, PTinyint.INSTANCE, PUnsignedTinyint.INSTANCE)) { - s = (Byte) object; - return s; - } else if (equalsAny(actualType, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE)) { - s = (Short) object; - return s; - } else if (equalsAny(actualType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE)) { - Float f = (Float) object; - if (f > Long.MAX_VALUE || f < Long.MIN_VALUE) { - throw newIllegalDataException( - actualType + " value " + f + " cannot be cast to Long without changing its value"); - } - s = f.longValue(); - return s; - } else if (equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE)) { - Double de = (Double) object; - if (de > Long.MAX_VALUE || de < Long.MIN_VALUE) { - throw newIllegalDataException( - actualType + " value " + de + " cannot be cast to Long without changing its value"); - } - s = de.longValue(); - return s; - } else if (actualType == PDecimal.INSTANCE) { - BigDecimal d = (BigDecimal) object; - return d.longValueExact(); - } else if (equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, - PUnsignedTime.INSTANCE)) { - java.util.Date date = (java.util.Date) object; - return date.getTime(); - } - return throwConstraintViolationException(actualType, this); + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + long val = RANDOM.get().nextLong(); + if (val == Long.MIN_VALUE) { + return Long.MAX_VALUE; } + return Math.abs(val); + } + + static class LongCodec extends BaseCodec { @Override - public Long toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - if (l == 0) { - return null; - } - if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE, - PInteger.INSTANCE, PUnsignedInt.INSTANCE, PSmallint.INSTANCE, - PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, PUnsignedTinyint.INSTANCE, PFloat.INSTANCE, - PUnsignedFloat.INSTANCE, PDouble.INSTANCE, PUnsignedDouble.INSTANCE, PDate.INSTANCE, - PUnsignedDate.INSTANCE, PTime.INSTANCE, PUnsignedTime.INSTANCE)) { - return actualType.getCodec().decodeLong(b, o, sortOrder); - } else if (actualType == PDecimal.INSTANCE) { - BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); - return bd.longValueExact(); - } - throwConstraintViolationException(actualType, this); - return null; + public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { + return decodeLong(b, o, sortOrder); } @Override - public boolean isCoercibleTo(PDataType targetType) { - // In general, don't allow conversion of LONG to INTEGER. There are times when - // we check isComparableTo for a more relaxed check and then throw a runtime - // exception if we overflow - return equalsAny(targetType, this, PDecimal.INSTANCE, PVarbinary.INSTANCE, PBinary.INSTANCE, PDouble.INSTANCE); + public double decodeDouble(byte[] b, int o, SortOrder sortOrder) { + return decodeLong(b, o, sortOrder); } @Override - public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, - Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale, - SortOrder expectedModifier) { - - // Decrease size of TIMESTAMP to size of LONG and continue coerce - if (ptr.getLength() > getByteSize() && actualType.isCoercibleTo(PTimestamp.INSTANCE)) { - ptr.set(ptr.get(), ptr.getOffset(), getByteSize()); + public long decodeLong(byte[] bytes, int o, SortOrder sortOrder) { + Preconditions.checkNotNull(sortOrder); + checkForSufficientLength(bytes, o, Bytes.SIZEOF_LONG); + long v; + byte b = bytes[o]; + if (sortOrder == SortOrder.ASC) { + v = b ^ 0x80; // Flip sign bit back + for (int i = 1; i < Bytes.SIZEOF_LONG; i++) { + b = bytes[o + i]; + v = (v << 8) + (b & 0xff); } - super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, - desiredScale, expectedModifier); + } else { + b = (byte) (b ^ 0xff); + v = b ^ 0x80; // Flip sign bit back + for (int i = 1; i < Bytes.SIZEOF_LONG; i++) { + b = bytes[o + i]; + b ^= 0xff; + v = (v << 8) + (b & 0xff); + } + } + return v; } @Override - public boolean isComparableTo(PDataType targetType) { - return PDecimal.INSTANCE.isComparableTo(targetType); + public int decodeInt(byte[] b, int o, SortOrder sortOrder) { + long v = decodeLong(b, o, sortOrder); + if (v < Integer.MIN_VALUE || v > Integer.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be cast to Integer without changing its value"); + } + return (int) v; } @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value != null) { - long l; - if (equalsAny(targetType, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, - PUnsignedLong.INSTANCE)) { - l = (Long) value; - return l >= 0; - } else if (targetType.equals(PUnsignedInt.INSTANCE)) { - l = (Long) value; - return (l >= 0 && l <= Integer.MAX_VALUE); - } else if (targetType.equals(PInteger.INSTANCE)) { - l = (Long) value; - return (l >= Integer.MIN_VALUE && l <= Integer.MAX_VALUE); - } else if (targetType.equals(PUnsignedSmallint.INSTANCE)) { - l = (Long) value; - return (l >= 0 && l <= Short.MAX_VALUE); - } else if (targetType.equals(PSmallint.INSTANCE)) { - l = (Long) value; - return (l >= Short.MIN_VALUE && l <= Short.MAX_VALUE); - } else if (targetType.equals(PTinyint.INSTANCE)) { - l = (Long) value; - return (l >= Byte.MIN_VALUE && l <= Byte.MAX_VALUE); - } else if (targetType.equals(PUnsignedTinyint.INSTANCE)) { - l = (Long) value; - return (l >= 0 && l <= Byte.MAX_VALUE); - } - } - return super.isCoercibleTo(targetType, value); + public int encodeFloat(float v, byte[] b, int o) { + if (v < Long.MIN_VALUE || v > Long.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be encoded as an Long without changing its value"); + } + return encodeLong((long) v, b, o); } @Override - public boolean isCastableTo(PDataType targetType) { - return super.isCastableTo(targetType) || targetType.isCoercibleTo(PTimestamp.INSTANCE); + public int encodeDouble(double v, byte[] b, int o) { + if (v < Long.MIN_VALUE || v > Long.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be encoded as an Long without changing its value"); + } + return encodeLong((long) v, b, o); } @Override - public boolean isFixedWidth() { - return true; + public int encodeLong(long v, byte[] b, int o) { + checkForSufficientLength(b, o, Bytes.SIZEOF_LONG); + b[o + 0] = (byte) ((v >> 56) ^ 0x80); // Flip sign bit so that INTEGER is binary comparable + b[o + 1] = (byte) (v >> 48); + b[o + 2] = (byte) (v >> 40); + b[o + 3] = (byte) (v >> 32); + b[o + 4] = (byte) (v >> 24); + b[o + 5] = (byte) (v >> 16); + b[o + 6] = (byte) (v >> 8); + b[o + 7] = (byte) v; + return Bytes.SIZEOF_LONG; } @Override - public Integer getByteSize() { - return Bytes.SIZEOF_LONG; + public byte decodeByte(byte[] b, int o, SortOrder sortOrder) { + long v = decodeLong(b, o, sortOrder); + if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be cast to Byte without changing its value"); + } + return (byte) v; } @Override - public Integer getMaxLength(Object o) { - return LONG_PRECISION; + public short decodeShort(byte[] b, int o, SortOrder sortOrder) { + long v = decodeLong(b, o, sortOrder); + if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be cast to Short without changing its value"); + } + return (short) v; } @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; - } - if (rhsType == PDecimal.INSTANCE) { - return -((BigDecimal) rhs).compareTo(BigDecimal.valueOf(((Number) lhs).longValue())); - } else if (equalsAny(rhsType, PDouble.INSTANCE, PFloat.INSTANCE, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE)) { - return Doubles.compare(((Number) lhs).doubleValue(), ((Number) rhs).doubleValue()); - } - return Longs.compare(((Number) lhs).longValue(), ((Number) rhs).longValue()); + public int encodeByte(byte v, byte[] b, int o) { + return encodeLong(v, b, o); } @Override - public Object toObject(String value) { - if (value == null || value.length() == 0) { - return null; - } - try { - return Long.parseLong(value); - } catch (NumberFormatException e) { - throw newIllegalDataException(e); - } + public int encodeShort(short v, byte[] b, int o) { + return encodeLong(v, b, o); } @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - long val = RANDOM.get().nextLong(); - if (val == Long.MIN_VALUE) { - return Long.MAX_VALUE; - } - return Math.abs(val); - } - - static class LongCodec extends BaseCodec { - - @Override - public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { - return decodeLong(b, o, sortOrder); - } - - @Override - public double decodeDouble(byte[] b, int o, SortOrder sortOrder) { - return decodeLong(b, o, sortOrder); - } - - @Override - public long decodeLong(byte[] bytes, int o, SortOrder sortOrder) { - Preconditions.checkNotNull(sortOrder); - checkForSufficientLength(bytes, o, Bytes.SIZEOF_LONG); - long v; - byte b = bytes[o]; - if (sortOrder == SortOrder.ASC) { - v = b ^ 0x80; // Flip sign bit back - for (int i = 1; i < Bytes.SIZEOF_LONG; i++) { - b = bytes[o + i]; - v = (v << 8) + (b & 0xff); - } - } else { - b = (byte) (b ^ 0xff); - v = b ^ 0x80; // Flip sign bit back - for (int i = 1; i < Bytes.SIZEOF_LONG; i++) { - b = bytes[o + i]; - b ^= 0xff; - v = (v << 8) + (b & 0xff); - } - } - return v; - } - - @Override - public int decodeInt(byte[] b, int o, SortOrder sortOrder) { - long v = decodeLong(b, o, sortOrder); - if (v < Integer.MIN_VALUE || v > Integer.MAX_VALUE) { - throw newIllegalDataException( - "Value " + v + " cannot be cast to Integer without changing its value"); - } - return (int) v; - } - - @Override - public int encodeFloat(float v, byte[] b, int o) { - if (v < Long.MIN_VALUE || v > Long.MAX_VALUE) { - throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Long without changing its value"); - } - return encodeLong((long) v, b, o); - } - - @Override - public int encodeDouble(double v, byte[] b, int o) { - if (v < Long.MIN_VALUE || v > Long.MAX_VALUE) { - throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Long without changing its value"); - } - return encodeLong((long) v, b, o); - } - - @Override - public int encodeLong(long v, byte[] b, int o) { - checkForSufficientLength(b, o, Bytes.SIZEOF_LONG); - b[o + 0] = (byte) ((v >> 56) ^ 0x80); // Flip sign bit so that INTEGER is binary comparable - b[o + 1] = (byte) (v >> 48); - b[o + 2] = (byte) (v >> 40); - b[o + 3] = (byte) (v >> 32); - b[o + 4] = (byte) (v >> 24); - b[o + 5] = (byte) (v >> 16); - b[o + 6] = (byte) (v >> 8); - b[o + 7] = (byte) v; - return Bytes.SIZEOF_LONG; - } - - @Override - public byte decodeByte(byte[] b, int o, SortOrder sortOrder) { - long v = decodeLong(b, o, sortOrder); - if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { - throw newIllegalDataException( - "Value " + v + " cannot be cast to Byte without changing its value"); - } - return (byte) v; - } - - @Override - public short decodeShort(byte[] b, int o, SortOrder sortOrder) { - long v = decodeLong(b, o, sortOrder); - if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { - throw newIllegalDataException( - "Value " + v + " cannot be cast to Short without changing its value"); - } - return (short) v; - } - - @Override - public int encodeByte(byte v, byte[] b, int o) { - return encodeLong(v, b, o); - } - - @Override - public int encodeShort(short v, byte[] b, int o) { - return encodeLong(v, b, o); - } - + public PhoenixArrayFactory getPhoenixArrayFactory() { + return new PhoenixArrayFactory() { @Override - public PhoenixArrayFactory getPhoenixArrayFactory() { - return new PhoenixArrayFactory() { - @Override - public PhoenixArray newArray(PDataType type, Object[] elements) { - return new PhoenixArray.PrimitiveLongPhoenixArray(type, elements); - } - }; + public PhoenixArray newArray(PDataType type, Object[] elements) { + return new PhoenixArray.PrimitiveLongPhoenixArray(type, elements); } + }; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PLongArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PLongArray.java index aef6437d1f3..5a3ae8c16c3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PLongArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PLongArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PLongArray extends PArrayDataType { - public static final PLongArray INSTANCE = new PLongArray(); + public static final PLongArray INSTANCE = new PLongArray(); - private PLongArray() { - super("BIGINT ARRAY", PDataType.ARRAY_TYPE_BASE + PLong.INSTANCE.getSqlType(), - PhoenixArray.class, null, 30); - } + private PLongArray() { + super("BIGINT ARRAY", PDataType.ARRAY_TYPE_BASE + PLong.INSTANCE.getSqlType(), + PhoenixArray.class, null, 30); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PLong.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PLong.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PLong.INSTANCE, sortOrder, maxLength, scale, - PLong.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PLong.INSTANCE, sortOrder, maxLength, scale, + PLong.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveLongPhoenixArray pArr = (PhoenixArray.PrimitiveLongPhoenixArray) value; - long[] longArr = (long[]) pArr.array; - for (long i : longArr) { - if (!super.isCoercibleTo(PLong.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PLong.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveLongPhoenixArray pArr = (PhoenixArray.PrimitiveLongPhoenixArray) value; + long[] longArr = (long[]) pArr.array; + for (long i : longArr) { + if (!super.isCoercibleTo(PLong.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PLong.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PNumericType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PNumericType.java index 3e18d2d7617..9b48c3c6ff3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PNumericType.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PNumericType.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,39 +28,38 @@ */ public abstract class PNumericType extends PDataType { - protected PNumericType(String sqlTypeName, int sqlType, Class clazz, - org.apache.phoenix.schema.types.PDataType.PDataCodec codec, int ordinal) { - super(sqlTypeName, sqlType, clazz, codec, ordinal); - } + protected PNumericType(String sqlTypeName, int sqlType, Class clazz, + org.apache.phoenix.schema.types.PDataType.PDataCodec codec, int ordinal) { + super(sqlTypeName, sqlType, clazz, codec, ordinal); + } - public final int signum(byte[] bytes, int offset, int length, SortOrder sortOrder) { - return signum(bytes, offset, length, sortOrder, null, null); - } + public final int signum(byte[] bytes, int offset, int length, SortOrder sortOrder) { + return signum(bytes, offset, length, sortOrder, null, null); + } - public final int signum(ImmutableBytesWritable ptr, SortOrder sortOrder) { - return signum(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); - } + public final int signum(ImmutableBytesWritable ptr, SortOrder sortOrder) { + return signum(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); + } - abstract public int signum(byte[] bytes, int offset, int length, SortOrder sortOrder, - Integer maxLength, Integer scale); + abstract public int signum(byte[] bytes, int offset, int length, SortOrder sortOrder, + Integer maxLength, Integer scale); - abstract public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder, - ImmutableBytesWritable outPtr); + abstract public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder, + ImmutableBytesWritable outPtr); - public final void abs(ImmutableBytesWritable ptr, SortOrder sortOrder, - ImmutableBytesWritable outPtr) { - abs(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, outPtr); - } + public final void abs(ImmutableBytesWritable ptr, SortOrder sortOrder, + ImmutableBytesWritable outPtr) { + abs(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, outPtr); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - PDataType pType = PDataTypeFactory.getInstance().instanceFromJavaClass(jdbcType, this); - if (pType == null || !PNumericType.class.isAssignableFrom(pType.getClass())) { - throw newMismatchException(actualType, jdbcType); - } else { - return pType.toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + PDataType pType = PDataTypeFactory.getInstance().instanceFromJavaClass(jdbcType, this); + if (pType == null || !PNumericType.class.isAssignableFrom(pType.getClass())) { + throw newMismatchException(actualType, jdbcType); + } else { + return pType.toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PRealNumber.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PRealNumber.java index 4cab4338ab2..ca7413bd962 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PRealNumber.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PRealNumber.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,25 +23,25 @@ public abstract class PRealNumber extends PNumericType { - protected PRealNumber(String sqlTypeName, int sqlType, Class clazz, - org.apache.phoenix.schema.types.PDataType.PDataCodec codec, int ordinal) { - super(sqlTypeName, sqlType, clazz, codec, ordinal); - } + protected PRealNumber(String sqlTypeName, int sqlType, Class clazz, + org.apache.phoenix.schema.types.PDataType.PDataCodec codec, int ordinal) { + super(sqlTypeName, sqlType, clazz, codec, ordinal); + } - @Override - public int signum(byte[] bytes, int offset, int length, SortOrder sortOrder, Integer maxLength, - Integer scale) { - double d = getCodec().decodeDouble(bytes, offset, sortOrder); - if (Double.isNaN(d)) { - throw new IllegalDataException(); - } - return (d > 0) ? 1 : ((d < 0) ? -1 : 0); + @Override + public int signum(byte[] bytes, int offset, int length, SortOrder sortOrder, Integer maxLength, + Integer scale) { + double d = getCodec().decodeDouble(bytes, offset, sortOrder); + if (Double.isNaN(d)) { + throw new IllegalDataException(); } + return (d > 0) ? 1 : ((d < 0) ? -1 : 0); + } - @Override - public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder, - ImmutableBytesWritable outPtr) { - double d = getCodec().decodeDouble(bytes, offset, sortOrder); - getCodec().encodeDouble(Math.abs(d), outPtr); - } + @Override + public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder, + ImmutableBytesWritable outPtr) { + double d = getCodec().decodeDouble(bytes, offset, sortOrder); + getCodec().encodeDouble(Math.abs(d), outPtr); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PSmallint.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PSmallint.java index c0d1d9ecfd5..89895194ec6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PSmallint.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PSmallint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; public class PSmallint extends PWholeNumber { @@ -34,227 +33,237 @@ private PSmallint() { } @Override - public Integer getScale(Object o) { - return ZERO; + public Integer getScale(Object o) { + return ZERO; + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return PLong.INSTANCE.compareTo(lhs, rhs, rhsType); + } + + @Override + public boolean isComparableTo(PDataType targetType) { + return PDecimal.INSTANCE.isComparableTo(targetType); + } + + @Override + public boolean isFixedWidth() { + return true; + } + + @Override + public Integer getByteSize() { + return Bytes.SIZEOF_SHORT; + } + + @Override + public Integer getMaxLength(Object o) { + return SHORT_PRECISION; + } + + @Override + public byte[] toBytes(Object object) { + byte[] b = new byte[Bytes.SIZEOF_SHORT]; + toBytes(object, b, 0); + return b; + } + + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + throw newIllegalDataException(this + " may not be null"); } + return this.getCodec().encodeShort(((Number) object).shortValue(), bytes, offset); + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return PLong.INSTANCE.compareTo(lhs, rhs, rhsType); + @Override + public Object toObject(Object object, PDataType actualType) { + Object o = PLong.INSTANCE.toObject(object, actualType); + if (!(o instanceof Long) || o == null) { + return o; + } + long l = (Long) o; + if (l < Short.MIN_VALUE || l > Short.MAX_VALUE) { + throw newIllegalDataException( + actualType + " value " + l + " cannot be cast to Short without changing its value"); } + short s = (short) l; + return s; + } - @Override - public boolean isComparableTo(PDataType targetType) { - return PDecimal.INSTANCE.isComparableTo(targetType); + @Override + public Short toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, + Integer maxLength, Integer scale) { + if (l == 0) { + return null; + } + if ( + equalsAny(actualType, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, + PUnsignedTinyint.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, + PUnsignedInt.INSTANCE, PFloat.INSTANCE, PUnsignedFloat.INSTANCE, PDouble.INSTANCE, + PUnsignedDouble.INSTANCE) + ) { + return actualType.getCodec().decodeShort(b, o, sortOrder); + } else if (actualType == PDecimal.INSTANCE) { + BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); + return bd.shortValueExact(); } + throwConstraintViolationException(actualType, this); + return null; + } - @Override - public boolean isFixedWidth() { - return true; + @Override + public Object toObject(String value) { + if (value == null || value.length() == 0) { + return null; } + try { + return Short.parseShort(value); + } catch (NumberFormatException e) { + throw newIllegalDataException(e); + } + } - @Override - public Integer getByteSize() { - return Bytes.SIZEOF_SHORT; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value != null) { + short i; + if ( + equalsAny(targetType, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, + PUnsignedLong.INSTANCE, PUnsignedInt.INSTANCE, PUnsignedSmallint.INSTANCE) + ) { + i = (Short) value; + return i >= 0; + } else if (targetType == PUnsignedTinyint.INSTANCE) { + i = (Short) value; + return (i >= 0 && i <= Byte.MAX_VALUE); + } else if (targetType == PTinyint.INSTANCE) { + i = (Short) value; + return (i >= Byte.MIN_VALUE && i <= Byte.MAX_VALUE); + } } + return super.isCoercibleTo(targetType, value); + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return this.equals(targetType) || PInteger.INSTANCE.isCoercibleTo(targetType); + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return ((Integer) PInteger.INSTANCE.getSampleValue(maxLength, arrayLength)).shortValue(); + } + + static class ShortCodec extends BaseCodec { @Override - public Integer getMaxLength(Object o) { - return SHORT_PRECISION; + public long decodeLong(byte[] b, int o, SortOrder sortOrder) { + return decodeShort(b, o, sortOrder); } @Override - public byte[] toBytes(Object object) { - byte[] b = new byte[Bytes.SIZEOF_SHORT]; - toBytes(object, b, 0); - return b; + public int decodeInt(byte[] b, int o, SortOrder sortOrder) { + return decodeShort(b, o, sortOrder); } @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - throw newIllegalDataException(this + " may not be null"); + public byte decodeByte(byte[] b, int o, SortOrder sortOrder) { + short v = decodeShort(b, o, sortOrder); + if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be cast to Byte without changing its value"); } - return this.getCodec().encodeShort(((Number)object).shortValue(), bytes, offset); + return (byte) v; } @Override - public Object toObject(Object object, PDataType actualType) { - Object o = PLong.INSTANCE.toObject(object, actualType); - if (!(o instanceof Long) || o == null) { - return o; - } - long l = (Long)o; - if (l < Short.MIN_VALUE || l > Short.MAX_VALUE) { - throw newIllegalDataException(actualType + " value " + l + " cannot be cast to Short without changing its value"); + public short decodeShort(byte[] b, int o, SortOrder sortOrder) { + Preconditions.checkNotNull(sortOrder); + checkForSufficientLength(b, o, Bytes.SIZEOF_SHORT); + int v; + if (sortOrder == SortOrder.ASC) { + v = b[o] ^ 0x80; // Flip sign bit back + for (int i = 1; i < Bytes.SIZEOF_SHORT; i++) { + v = (v << 8) + (b[o + i] & 0xff); + } + } else { + v = b[o] ^ 0xff ^ 0x80; // Flip sign bit back + for (int i = 1; i < Bytes.SIZEOF_SHORT; i++) { + v = (v << 8) + ((b[o + i] ^ 0xff) & 0xff); + } } - short s = (short)l; - return s; + return (short) v; } @Override - public Short toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, Integer maxLength, Integer scale) { - if (l == 0) { - return null; - } - if (equalsAny(actualType, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, - PUnsignedTinyint.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, - PUnsignedInt.INSTANCE, PFloat.INSTANCE, PUnsignedFloat.INSTANCE, PDouble.INSTANCE, - PUnsignedDouble.INSTANCE)) { - return actualType.getCodec().decodeShort(b, o, sortOrder); - } else if (actualType == PDecimal.INSTANCE) { - BigDecimal bd = (BigDecimal)actualType.toObject(b, o, l, actualType, sortOrder); - return bd.shortValueExact(); - } - throwConstraintViolationException(actualType,this); - return null; + public int encodeShort(short v, byte[] b, int o) { + checkForSufficientLength(b, o, Bytes.SIZEOF_SHORT); + b[o + 0] = (byte) ((v >> 8) ^ 0x80); // Flip sign bit so that Short is binary comparable + b[o + 1] = (byte) v; + return Bytes.SIZEOF_SHORT; } @Override - public Object toObject(String value) { - if (value == null || value.length() == 0) { - return null; - } - try { - return Short.parseShort(value); - } catch (NumberFormatException e) { - throw newIllegalDataException(e); + public int encodeLong(long v, byte[] b, int o) { + if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be encoded as an Short without changing its value"); } + return encodeShort((short) v, b, o); } @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value != null) { - short i; - if (equalsAny(targetType, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, - PUnsignedLong.INSTANCE, PUnsignedInt.INSTANCE, PUnsignedSmallint.INSTANCE)) { - i = (Short) value; - return i >= 0; - } else if (targetType == PUnsignedTinyint.INSTANCE) { - i = (Short) value; - return (i >= 0 && i <= Byte.MAX_VALUE); - } else if (targetType == PTinyint.INSTANCE) { - i = (Short) value; - return (i >= Byte.MIN_VALUE && i <= Byte.MAX_VALUE); - } + public int encodeInt(int v, byte[] b, int o) { + if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be encoded as an Short without changing its value"); } - return super.isCoercibleTo(targetType, value); + return encodeShort((short) v, b, o); } @Override - public boolean isCoercibleTo(PDataType targetType) { - return this.equals(targetType) || PInteger.INSTANCE.isCoercibleTo(targetType); + public int encodeByte(byte v, byte[] b, int o) { + return encodeShort(v, b, o); } @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return ((Integer) PInteger.INSTANCE.getSampleValue(maxLength, arrayLength)).shortValue(); + public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { + return decodeShort(b, o, sortOrder); } - static class ShortCodec extends BaseCodec { - - @Override - public long decodeLong(byte[] b, int o, SortOrder sortOrder) { - return decodeShort(b, o, sortOrder); - } - - @Override - public int decodeInt(byte[] b, int o, SortOrder sortOrder) { - return decodeShort(b, o, sortOrder); - } - - @Override - public byte decodeByte(byte[] b, int o, SortOrder sortOrder) { - short v = decodeShort(b, o, sortOrder); - if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { - throw newIllegalDataException("Value " + v + " cannot be cast to Byte without changing its value"); - } - return (byte)v; - } - - @Override - public short decodeShort(byte[] b, int o, SortOrder sortOrder) { - Preconditions.checkNotNull(sortOrder); - checkForSufficientLength(b, o, Bytes.SIZEOF_SHORT); - int v; - if (sortOrder == SortOrder.ASC) { - v = b[o] ^ 0x80; // Flip sign bit back - for (int i = 1; i < Bytes.SIZEOF_SHORT; i++) { - v = (v << 8) + (b[o + i] & 0xff); - } - } else { - v = b[o] ^ 0xff ^ 0x80; // Flip sign bit back - for (int i = 1; i < Bytes.SIZEOF_SHORT; i++) { - v = (v << 8) + ((b[o + i] ^ 0xff) & 0xff); - } - } - return (short)v; - } + @Override + public double decodeDouble(byte[] b, int o, SortOrder sortOrder) { + return decodeShort(b, o, sortOrder); + } - @Override - public int encodeShort(short v, byte[] b, int o) { - checkForSufficientLength(b, o, Bytes.SIZEOF_SHORT); - b[o + 0] = (byte) ((v >> 8) ^ 0x80); // Flip sign bit so that Short is binary comparable - b[o + 1] = (byte) v; - return Bytes.SIZEOF_SHORT; + @Override + public int encodeDouble(double v, byte[] b, int o) { + if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be encoded as an Short without changing its value"); } + return encodeShort((short) v, b, o); + } - @Override - public int encodeLong(long v, byte[] b, int o) { - if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { - throw newIllegalDataException("Value " + v + " cannot be encoded as an Short without changing its value"); - } - return encodeShort((short)v,b,o); + @Override + public int encodeFloat(float v, byte[] b, int o) { + if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { + throw newIllegalDataException( + "Value " + v + " cannot be encoded as an Short without changing its value"); } + return encodeShort((short) v, b, o); + } - @Override - public int encodeInt(int v, byte[] b, int o) { - if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { - throw newIllegalDataException("Value " + v + " cannot be encoded as an Short without changing its value"); + @Override + public PhoenixArrayFactory getPhoenixArrayFactory() { + return new PhoenixArrayFactory() { + @Override + public PhoenixArray newArray(PDataType type, Object[] elements) { + return new PhoenixArray.PrimitiveShortPhoenixArray(type, elements); } - return encodeShort((short)v,b,o); - } - - @Override - public int encodeByte(byte v, byte[] b, int o) { - return encodeShort(v,b,o); - } - - @Override - public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { - return decodeShort(b, o, sortOrder); - } - - @Override - public double decodeDouble(byte[] b, int o, - SortOrder sortOrder) { - return decodeShort(b, o, sortOrder); - } - - @Override - public int encodeDouble(double v, byte[] b, int o) { - if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { - throw newIllegalDataException("Value " + v + " cannot be encoded as an Short without changing its value"); - } - return encodeShort((short)v,b,o); - } - - @Override - public int encodeFloat(float v, byte[] b, int o) { - if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) { - throw newIllegalDataException("Value " + v + " cannot be encoded as an Short without changing its value"); - } - return encodeShort((short)v,b,o); - } - - @Override - public PhoenixArrayFactory getPhoenixArrayFactory() { - return new PhoenixArrayFactory() { - @Override - public PhoenixArray newArray(PDataType type, Object[] elements) { - return new PhoenixArray.PrimitiveShortPhoenixArray(type, elements); - } - }; - } + }; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PSmallintArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PSmallintArray.java index aea4384db45..28de19c419c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PSmallintArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PSmallintArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PSmallintArray extends PArrayDataType { - public static final PSmallintArray INSTANCE = new PSmallintArray(); + public static final PSmallintArray INSTANCE = new PSmallintArray(); - private PSmallintArray() { - super("SMALLINT ARRAY", PDataType.ARRAY_TYPE_BASE + PSmallint.INSTANCE.getSqlType(), - PhoenixArray.class, null, 31); - } + private PSmallintArray() { + super("SMALLINT ARRAY", PDataType.ARRAY_TYPE_BASE + PSmallint.INSTANCE.getSqlType(), + PhoenixArray.class, null, 31); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PSmallint.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PSmallint.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PSmallint.INSTANCE, sortOrder, maxLength, scale, - PSmallint.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PSmallint.INSTANCE, sortOrder, maxLength, scale, + PSmallint.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveShortPhoenixArray pArr = (PhoenixArray.PrimitiveShortPhoenixArray) value; - short[] shortArr = (short[]) pArr.array; - for (short i : shortArr) { - if (!super.isCoercibleTo(PSmallint.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PSmallint.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveShortPhoenixArray pArr = (PhoenixArray.PrimitiveShortPhoenixArray) value; + short[] shortArr = (short[]) pArr.array; + for (short i : shortArr) { + if (!super.isCoercibleTo(PSmallint.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PSmallint.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTime.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTime.java index e0bcd9232ac..409300dd18a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTime.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTime.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -48,13 +48,15 @@ public int toBytes(Object object, byte[] bytes, int offset) { @Override public java.sql.Time toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { + Integer maxLength, Integer scale) { if (l == 0) { return null; } - if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE, PDate.INSTANCE, + if ( + equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, PUnsignedTime.INSTANCE, PLong.INSTANCE, - PUnsignedLong.INSTANCE)) { + PUnsignedLong.INSTANCE) + ) { return new java.sql.Time(DateUtil.getCodecFor(actualType).decodeLong(b, o, sortOrder)); } else if (actualType == PDecimal.INSTANCE) { BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); @@ -87,41 +89,38 @@ public Object toObject(Object object, PDataType actualType) { @Override public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - java.sql.Time sqlTime = - toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - return timeToClass(sqlTime, actualType, jdbcType); + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + java.sql.Time sqlTime = + toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); + return timeToClass(sqlTime, actualType, jdbcType); } Object timeToClass(java.sql.Time sqlTime, PDataType actualType, Class jdbcType) - throws SQLException { - if (jdbcType == java.time.LocalTime.class) { - // FIXME this does a lot of unnecessary computation. - return java.time.LocalDateTime - .ofInstant(java.time.Instant.ofEpochMilli(sqlTime.getTime()), ZoneOffset.UTC) - .toLocalTime(); - } else if (jdbcType == java.time.LocalDateTime.class) { - // This is NOT JDBC compliant - // We cannot use toInstant(), as that nulls the time fields. - return java.time.LocalDateTime - .ofInstant(java.time.Instant.ofEpochMilli(sqlTime.getTime()), ZoneOffset.UTC); - } else if (jdbcType == java.time.LocalDate.class) { - // This is NOT JDBC compliant - // FIXME this does a lot of unnecessary computation. - return java.time.LocalDateTime - .ofInstant(java.time.Instant.ofEpochMilli(sqlTime.getTime()), ZoneOffset.UTC) - .toLocalDate(); - } else if (jdbcType == java.sql.Time.class) { - return sqlTime; - } else if (jdbcType == java.sql.Date.class) { - return new java.sql.Date(sqlTime.getTime()); - } else if (jdbcType == java.sql.Timestamp.class) { - return new java.sql.Timestamp(sqlTime.getTime()); - } else if (jdbcType == java.util.Date.class) { - return new java.util.Date(sqlTime.getTime()); - } - throw newMismatchException(actualType, jdbcType); + throws SQLException { + if (jdbcType == java.time.LocalTime.class) { + // FIXME this does a lot of unnecessary computation. + return java.time.LocalDateTime + .ofInstant(java.time.Instant.ofEpochMilli(sqlTime.getTime()), ZoneOffset.UTC).toLocalTime(); + } else if (jdbcType == java.time.LocalDateTime.class) { + // This is NOT JDBC compliant + // We cannot use toInstant(), as that nulls the time fields. + return java.time.LocalDateTime.ofInstant(java.time.Instant.ofEpochMilli(sqlTime.getTime()), + ZoneOffset.UTC); + } else if (jdbcType == java.time.LocalDate.class) { + // This is NOT JDBC compliant + // FIXME this does a lot of unnecessary computation. + return java.time.LocalDateTime + .ofInstant(java.time.Instant.ofEpochMilli(sqlTime.getTime()), ZoneOffset.UTC).toLocalDate(); + } else if (jdbcType == java.sql.Time.class) { + return sqlTime; + } else if (jdbcType == java.sql.Date.class) { + return new java.sql.Date(sqlTime.getTime()); + } else if (jdbcType == java.sql.Timestamp.class) { + return new java.sql.Timestamp(sqlTime.getTime()); + } else if (jdbcType == java.util.Date.class) { + return new java.util.Date(sqlTime.getTime()); + } + throw newMismatchException(actualType, jdbcType); } @Override @@ -164,15 +163,16 @@ public Object toObject(String value) { @Override public boolean isBytesComparableWith(PDataType otherType) { - return super.isBytesComparableWith(otherType) || otherType == PDate.INSTANCE || otherType == PTimestamp.INSTANCE || otherType == PLong.INSTANCE; + return super.isBytesComparableWith(otherType) || otherType == PDate.INSTANCE + || otherType == PTimestamp.INSTANCE || otherType == PLong.INSTANCE; } @Override public String toStringLiteral(Object o, Format formatter) { - if (formatter == null) { - formatter = DateUtil.DEFAULT_TIME_FORMATTER; - } - return "'" + super.toStringLiteral(o, formatter) + "'"; + if (formatter == null) { + formatter = DateUtil.DEFAULT_TIME_FORMATTER; + } + return "'" + super.toStringLiteral(o, formatter) + "'"; } @Override diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimeArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimeArray.java index c19fd71af07..ad6b29783e2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimeArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimeArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,73 +23,72 @@ public class PTimeArray extends PArrayDataType { - public static final PTimeArray INSTANCE = new PTimeArray(); + public static final PTimeArray INSTANCE = new PTimeArray(); - private PTimeArray() { - super("TIME ARRAY", PDataType.ARRAY_TYPE_BASE + PTime.INSTANCE.getSqlType(), PhoenixArray.class, - null, 38); - } + private PTimeArray() { + super("TIME ARRAY", PDataType.ARRAY_TYPE_BASE + PTime.INSTANCE.getSqlType(), PhoenixArray.class, + null, 38); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PTime.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PTime.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PTime.INSTANCE, sortOrder, maxLength, scale, - PTime.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PTime.INSTANCE, sortOrder, maxLength, scale, + PTime.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray pArr = (PhoenixArray) value; - Object[] timeArr = (Object[]) pArr.array; - for (Object i : timeArr) { - if (!super.isCoercibleTo(PTime.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PTime.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] timeArr = (Object[]) pArr.array; + for (Object i : timeArr) { + if (!super.isCoercibleTo(PTime.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PTime.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java index 53149ec22b9..d7fa7696b05 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,317 +35,321 @@ import org.apache.phoenix.util.DateUtil; public class PTimestamp extends PDataType { - public static final int MAX_NANOS_VALUE_EXCLUSIVE = 1000000; - public static final PTimestamp INSTANCE = new PTimestamp(); + public static final int MAX_NANOS_VALUE_EXCLUSIVE = 1000000; + public static final PTimestamp INSTANCE = new PTimestamp(); - protected PTimestamp(String sqlTypeName, int sqlType, int ordinal) { - super(sqlTypeName, sqlType, java.sql.Timestamp.class, null, ordinal); - } + protected PTimestamp(String sqlTypeName, int sqlType, int ordinal) { + super(sqlTypeName, sqlType, java.sql.Timestamp.class, null, ordinal); + } - private PTimestamp() { - super("TIMESTAMP", Types.TIMESTAMP, java.sql.Timestamp.class, - null, 9); - } + private PTimestamp() { + super("TIMESTAMP", Types.TIMESTAMP, java.sql.Timestamp.class, null, 9); + } - @Override - public byte[] toBytes(Object object) { - byte[] bytes = new byte[getByteSize()]; - toBytes(object, bytes, 0); - return bytes; - } + @Override + public byte[] toBytes(Object object) { + byte[] bytes = new byte[getByteSize()]; + toBytes(object, bytes, 0); + return bytes; + } - @Override - public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, Integer actualMaxLength, - Integer actualScale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale, - SortOrder expectedModifier) { - Preconditions.checkNotNull(actualModifier); - Preconditions.checkNotNull(expectedModifier); - if (ptr.getLength() == 0) { return; } - if (this.isBytesComparableWith(actualType)) { // No coerce necessary - if (actualModifier != expectedModifier || (actualType.isFixedWidth() && actualType.getByteSize() < this.getByteSize())) { - byte[] b = new byte[this.getByteSize()]; - System.arraycopy(ptr.get(), ptr.getOffset(), b, 0, actualType.getByteSize()); - ptr.set(b); - - if (actualModifier != expectedModifier) { - SortOrder.invert(b, 0, b, 0, b.length); - } - } - return; - } - super.coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, actualModifier, desiredMaxLength, desiredScale, expectedModifier); + @Override + public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, + Integer actualMaxLength, Integer actualScale, SortOrder actualModifier, + Integer desiredMaxLength, Integer desiredScale, SortOrder expectedModifier) { + Preconditions.checkNotNull(actualModifier); + Preconditions.checkNotNull(expectedModifier); + if (ptr.getLength() == 0) { + return; } + if (this.isBytesComparableWith(actualType)) { // No coerce necessary + if ( + actualModifier != expectedModifier + || (actualType.isFixedWidth() && actualType.getByteSize() < this.getByteSize()) + ) { + byte[] b = new byte[this.getByteSize()]; + System.arraycopy(ptr.get(), ptr.getOffset(), b, 0, actualType.getByteSize()); + ptr.set(b); - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - // Create the byte[] of size MAX_TIMESTAMP_BYTES - if(bytes.length != getByteSize()) { - bytes = Bytes.padTail(bytes, (getByteSize() - bytes.length)); - } - PDate.INSTANCE.getCodec().encodeLong(0l, bytes, offset); - Bytes.putInt(bytes, offset + Bytes.SIZEOF_LONG, 0); - return getByteSize(); + if (actualModifier != expectedModifier) { + SortOrder.invert(b, 0, b, 0, b.length); } - java.sql.Timestamp value = (java.sql.Timestamp) object; - // For Timestamp, the getTime() method includes milliseconds that may - // be stored in the nanos part as well. - DateUtil.getCodecFor(this).encodeLong(value.getTime(), bytes, offset); - - /* - * By not getting the stuff that got spilled over from the millis part, - * it leaves the timestamp's byte representation saner - 8 bytes of millis | 4 bytes of nanos. - * Also, it enables timestamp bytes to be directly compared with date/time bytes. - */ - Bytes.putInt(bytes, offset + Bytes.SIZEOF_LONG, value.getNanos() % MAX_NANOS_VALUE_EXCLUSIVE); - return getByteSize(); + } + return; } + super.coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, actualModifier, + desiredMaxLength, desiredScale, expectedModifier); + } - @Override - public boolean isBytesComparableWith(PDataType otherType) { - return super.isBytesComparableWith(otherType) || otherType == PTime.INSTANCE || otherType == PDate.INSTANCE || otherType == PLong.INSTANCE; + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + // Create the byte[] of size MAX_TIMESTAMP_BYTES + if (bytes.length != getByteSize()) { + bytes = Bytes.padTail(bytes, (getByteSize() - bytes.length)); + } + PDate.INSTANCE.getCodec().encodeLong(0l, bytes, offset); + Bytes.putInt(bytes, offset + Bytes.SIZEOF_LONG, 0); + return getByteSize(); } + java.sql.Timestamp value = (java.sql.Timestamp) object; + // For Timestamp, the getTime() method includes milliseconds that may + // be stored in the nanos part as well. + DateUtil.getCodecFor(this).encodeLong(value.getTime(), bytes, offset); - @Override - public Object toObject(Object object, PDataType actualType) { - if (object == null) { - return null; - } - if (equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, - PUnsignedTime.INSTANCE)) { - return new java.sql.Timestamp(((java.util.Date) object).getTime()); - } else if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { - return object; - } else if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { - return new java.sql.Timestamp((Long) object); - } else if (actualType == PDecimal.INSTANCE) { - BigDecimal bd = (BigDecimal) object; - long ms = bd.longValue(); - int nanos = - (bd.remainder(BigDecimal.ONE).multiply(QueryConstants.BD_MILLIS_NANOS_CONVERSION)) - .intValue(); - return DateUtil.getTimestamp(ms, nanos); - } else if (actualType == PVarchar.INSTANCE) { - return DateUtil.parseTimestamp((String) object); - } - return throwConstraintViolationException(actualType, this); - } + /* + * By not getting the stuff that got spilled over from the millis part, it leaves the + * timestamp's byte representation saner - 8 bytes of millis | 4 bytes of nanos. Also, it + * enables timestamp bytes to be directly compared with date/time bytes. + */ + Bytes.putInt(bytes, offset + Bytes.SIZEOF_LONG, value.getNanos() % MAX_NANOS_VALUE_EXCLUSIVE); + return getByteSize(); + } - @Override - public java.sql.Timestamp toObject(byte[] b, int o, int l, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale) { - if (actualType == null || l == 0) { - return null; - } - java.sql.Timestamp v; - if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { - long millisDeserialized = - DateUtil.getCodecFor(actualType).decodeLong(b, o, sortOrder); - v = new java.sql.Timestamp(millisDeserialized); - int nanosDeserialized = - PUnsignedInt.INSTANCE.getCodec().decodeInt(b, o + Bytes.SIZEOF_LONG, sortOrder); - /* - * There was a bug in serialization of timestamps which was causing the sub-second millis part - * of time stamp to be present both in the LONG and INT bytes. Having the <100000 check - * makes this serialization fix backward compatible. - */ - v.setNanos( - nanosDeserialized < MAX_NANOS_VALUE_EXCLUSIVE ? v.getNanos() + nanosDeserialized : nanosDeserialized); - return v; - } else if (equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, - PUnsignedTime.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { - return new java.sql.Timestamp(actualType.getCodec().decodeLong(b, o, sortOrder)); - } else if (actualType == PDecimal.INSTANCE) { - BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); - long ms = bd.longValue(); - int nanos = (bd.remainder(BigDecimal.ONE).multiply(QueryConstants.BD_MILLIS_NANOS_CONVERSION)) - .intValue(); - v = DateUtil.getTimestamp(ms, nanos); - return v; - } - throwConstraintViolationException(actualType, this); - return null; - } + @Override + public boolean isBytesComparableWith(PDataType otherType) { + return super.isBytesComparableWith(otherType) || otherType == PTime.INSTANCE + || otherType == PDate.INSTANCE || otherType == PLong.INSTANCE; + } - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - java.sql.Timestamp sqlTs = - toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - return dateToClass(sqlTs, actualType, jdbcType); + @Override + public Object toObject(Object object, PDataType actualType) { + if (object == null) { + return null; } - - Object dateToClass(java.sql.Timestamp sqlTs, PDataType actualType, Class jdbcType) - throws SQLException { - // FIXME java.time.Local conversions use ISO chronology, unlike the rest of Phoenix. - if (jdbcType == java.time.LocalDateTime.class) { - return java.time.LocalDateTime.ofInstant(sqlTs.toInstant(), ZoneOffset.UTC); - } else if (jdbcType == java.time.LocalTime.class) { - // This is NOT JDBC compliant - // This preserves nanos - return java.time.LocalDateTime.ofInstant(sqlTs.toInstant(), ZoneOffset.UTC) - .toLocalTime(); - } else if (jdbcType == java.time.LocalDate.class) { - // This is NOT JDBC compliant - return java.time.LocalDateTime.ofInstant(sqlTs.toInstant(), ZoneOffset.UTC) - .toLocalDate(); - } else if (jdbcType == java.sql.Timestamp.class) { - return sqlTs; - } else if (jdbcType == java.sql.Date.class) { - return new java.sql.Date(sqlTs.getTime()); - } else if (jdbcType == java.util.Date.class) { - return new java.util.Date(sqlTs.getTime()); - } else if (jdbcType == java.sql.Time.class) { - return new java.sql.Time(sqlTs.getTime()); - } - throw newMismatchException(actualType, jdbcType); + if ( + equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, + PUnsignedTime.INSTANCE) + ) { + return new java.sql.Timestamp(((java.util.Date) object).getTime()); + } else if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { + return object; + } else if (equalsAny(actualType, PLong.INSTANCE, PUnsignedLong.INSTANCE)) { + return new java.sql.Timestamp((Long) object); + } else if (actualType == PDecimal.INSTANCE) { + BigDecimal bd = (BigDecimal) object; + long ms = bd.longValue(); + int nanos = (bd.remainder(BigDecimal.ONE).multiply(QueryConstants.BD_MILLIS_NANOS_CONVERSION)) + .intValue(); + return DateUtil.getTimestamp(ms, nanos); + } else if (actualType == PVarchar.INSTANCE) { + return DateUtil.parseTimestamp((String) object); } + return throwConstraintViolationException(actualType, this); + } - @Override - public boolean isCastableTo(PDataType targetType) { - return PDate.INSTANCE.isCastableTo(targetType); + @Override + public java.sql.Timestamp toObject(byte[] b, int o, int l, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + if (actualType == null || l == 0) { + return null; } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return equalsAny(targetType, this, PVarbinary.INSTANCE, PBinary.INSTANCE); + java.sql.Timestamp v; + if (equalsAny(actualType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { + long millisDeserialized = DateUtil.getCodecFor(actualType).decodeLong(b, o, sortOrder); + v = new java.sql.Timestamp(millisDeserialized); + int nanosDeserialized = + PUnsignedInt.INSTANCE.getCodec().decodeInt(b, o + Bytes.SIZEOF_LONG, sortOrder); + /* + * There was a bug in serialization of timestamps which was causing the sub-second millis part + * of time stamp to be present both in the LONG and INT bytes. Having the <100000 check makes + * this serialization fix backward compatible. + */ + v.setNanos(nanosDeserialized < MAX_NANOS_VALUE_EXCLUSIVE + ? v.getNanos() + nanosDeserialized + : nanosDeserialized); + return v; + } else if ( + equalsAny(actualType, PDate.INSTANCE, PUnsignedDate.INSTANCE, PTime.INSTANCE, + PUnsignedTime.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE) + ) { + return new java.sql.Timestamp(actualType.getCodec().decodeLong(b, o, sortOrder)); + } else if (actualType == PDecimal.INSTANCE) { + BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); + long ms = bd.longValue(); + int nanos = (bd.remainder(BigDecimal.ONE).multiply(QueryConstants.BD_MILLIS_NANOS_CONVERSION)) + .intValue(); + v = DateUtil.getTimestamp(ms, nanos); + return v; } + throwConstraintViolationException(actualType, this); + return null; + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value != null) { - if (targetType.equals(PUnsignedTimestamp.INSTANCE)) { - return ((java.util.Date) value).getTime() >= 0; - } else if (equalsAny(targetType, PUnsignedDate.INSTANCE, PUnsignedTime.INSTANCE)) { - return ((java.util.Date) value).getTime() >= 0 - && ((java.sql.Timestamp) value).getNanos() == 0; - } else if (equalsAny(targetType, PDate.INSTANCE, PTime.INSTANCE)) { - return ((java.sql.Timestamp) value).getNanos() == 0; - } - } - return super.isCoercibleTo(targetType, value); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + java.sql.Timestamp sqlTs = + toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); + return dateToClass(sqlTs, actualType, jdbcType); + } - @Override - public boolean isFixedWidth() { - return true; + Object dateToClass(java.sql.Timestamp sqlTs, PDataType actualType, Class jdbcType) + throws SQLException { + // FIXME java.time.Local conversions use ISO chronology, unlike the rest of Phoenix. + if (jdbcType == java.time.LocalDateTime.class) { + return java.time.LocalDateTime.ofInstant(sqlTs.toInstant(), ZoneOffset.UTC); + } else if (jdbcType == java.time.LocalTime.class) { + // This is NOT JDBC compliant + // This preserves nanos + return java.time.LocalDateTime.ofInstant(sqlTs.toInstant(), ZoneOffset.UTC).toLocalTime(); + } else if (jdbcType == java.time.LocalDate.class) { + // This is NOT JDBC compliant + return java.time.LocalDateTime.ofInstant(sqlTs.toInstant(), ZoneOffset.UTC).toLocalDate(); + } else if (jdbcType == java.sql.Timestamp.class) { + return sqlTs; + } else if (jdbcType == java.sql.Date.class) { + return new java.sql.Date(sqlTs.getTime()); + } else if (jdbcType == java.util.Date.class) { + return new java.util.Date(sqlTs.getTime()); + } else if (jdbcType == java.sql.Time.class) { + return new java.sql.Time(sqlTs.getTime()); } + throw newMismatchException(actualType, jdbcType); + } - @Override - public Integer getByteSize() { - return MAX_TIMESTAMP_BYTES; - } + @Override + public boolean isCastableTo(PDataType targetType) { + return PDate.INSTANCE.isCastableTo(targetType); + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; - } - if (equalsAny(rhsType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { - return ((java.sql.Timestamp) lhs).compareTo((java.sql.Timestamp) rhs); - } - int c = ((java.util.Date) lhs).compareTo((java.util.Date) rhs); - if (c != 0) return c; - return ((java.sql.Timestamp) lhs).getNanos(); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return equalsAny(targetType, this, PVarbinary.INSTANCE, PBinary.INSTANCE); + } - @Override - public Object toObject(String value) { - if (value == null || value.length() == 0) { - return null; - } - return DateUtil.parseTimestamp(value); + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value != null) { + if (targetType.equals(PUnsignedTimestamp.INSTANCE)) { + return ((java.util.Date) value).getTime() >= 0; + } else if (equalsAny(targetType, PUnsignedDate.INSTANCE, PUnsignedTime.INSTANCE)) { + return ((java.util.Date) value).getTime() >= 0 + && ((java.sql.Timestamp) value).getNanos() == 0; + } else if (equalsAny(targetType, PDate.INSTANCE, PTime.INSTANCE)) { + return ((java.sql.Timestamp) value).getNanos() == 0; + } } + return super.isCoercibleTo(targetType, value); + } - @Override - public String toStringLiteral(Object o, Format formatter) { - if (formatter == null) { - formatter = DateUtil.DEFAULT_TIMESTAMP_FORMATTER; - } - return "'" + super.toStringLiteral(o, formatter) + "'"; - } + @Override + public boolean isFixedWidth() { + return true; + } + @Override + public Integer getByteSize() { + return MAX_TIMESTAMP_BYTES; + } - @Override - public int getNanos(ImmutableBytesWritable ptr, SortOrder sortOrder) { - int nanos = PUnsignedInt.INSTANCE.getCodec() - .decodeInt(ptr.get(), ptr.getOffset() + PLong.INSTANCE.getByteSize(), sortOrder); - return nanos; + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; + } + if (equalsAny(rhsType, PTimestamp.INSTANCE, PUnsignedTimestamp.INSTANCE)) { + return ((java.sql.Timestamp) lhs).compareTo((java.sql.Timestamp) rhs); } + int c = ((java.util.Date) lhs).compareTo((java.util.Date) rhs); + if (c != 0) return c; + return ((java.sql.Timestamp) lhs).getNanos(); + } - @Override - public long getMillis(ImmutableBytesWritable ptr, SortOrder sortOrder) { - long millis = DateUtil.getCodecFor(this).decodeLong(ptr.get(), ptr.getOffset(), sortOrder); - return millis; + @Override + public Object toObject(String value) { + if (value == null || value.length() == 0) { + return null; } + return DateUtil.parseTimestamp(value); + } - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return new java.sql.Timestamp( - (Long) PLong.INSTANCE.getSampleValue(maxLength, arrayLength)); + @Override + public String toStringLiteral(Object o, Format formatter) { + if (formatter == null) { + formatter = DateUtil.DEFAULT_TIMESTAMP_FORMATTER; } + return "'" + super.toStringLiteral(o, formatter) + "'"; + } - /** - * With timestamp, because our last 4 bytes store a value from [0 - 1000000), we need to detect - * when the boundary is crossed if we increment to the nextKey. + @Override + public int getNanos(ImmutableBytesWritable ptr, SortOrder sortOrder) { + int nanos = PUnsignedInt.INSTANCE.getCodec().decodeInt(ptr.get(), + ptr.getOffset() + PLong.INSTANCE.getByteSize(), sortOrder); + return nanos; + } + + @Override + public long getMillis(ImmutableBytesWritable ptr, SortOrder sortOrder) { + long millis = DateUtil.getCodecFor(this).decodeLong(ptr.get(), ptr.getOffset(), sortOrder); + return millis; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return new java.sql.Timestamp((Long) PLong.INSTANCE.getSampleValue(maxLength, arrayLength)); + } + + /** + * With timestamp, because our last 4 bytes store a value from [0 - 1000000), we need to detect + * when the boundary is crossed if we increment to the nextKey. + */ + @Override + public KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, + boolean upperInclusive, SortOrder sortOrder) { + /* + * Force lower bound to be inclusive for fixed width keys because it makes comparisons less + * expensive when you can count on one bound or the other being inclusive. Comparing two fixed + * width exclusive bounds against each other is inherently more expensive, because you need to + * take into account if the bigger key is equal to the next key after the smaller key. For + * example: (A-B] compared against [A-B) An exclusive lower bound A is bigger than an exclusive + * upper bound B. Forcing a fixed width exclusive lower bound key to be inclusive prevents us + * from having to do this extra logic in the compare function. */ - @Override - public KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, - boolean upperInclusive, SortOrder sortOrder) { - /* - * Force lower bound to be inclusive for fixed width keys because it makes comparisons less - * expensive when you can count on one bound or the other being inclusive. Comparing two - * fixed width exclusive bounds against each other is inherently more expensive, because you - * need to take into account if the bigger key is equal to the next key after the smaller - * key. For example: (A-B] compared against [A-B) An exclusive lower bound A is bigger than - * an exclusive upper bound B. Forcing a fixed width exclusive lower bound key to be - * inclusive prevents us from having to do this extra logic in the compare function. - */ - if (lowerRange != KeyRange.UNBOUND && !lowerInclusive && isFixedWidth()) { - if (lowerRange.length != MAX_TIMESTAMP_BYTES) { - throw new IllegalDataException( - "Unexpected size of " + lowerRange.length + " for " + this); - } + if (lowerRange != KeyRange.UNBOUND && !lowerInclusive && isFixedWidth()) { + if (lowerRange.length != MAX_TIMESTAMP_BYTES) { + throw new IllegalDataException("Unexpected size of " + lowerRange.length + " for " + this); + } - int nanos = - PUnsignedInt.INSTANCE.getCodec().decodeInt(lowerRange, Bytes.SIZEOF_LONG, - sortOrder); - if ((sortOrder == SortOrder.DESC && nanos == 0) - || (sortOrder == SortOrder.ASC && nanos == MAX_NANOS_VALUE_EXCLUSIVE - 1)) { - // With timestamp, because our last 4 bytes store a value from [0 - 1000000), we - // need - // to detect when the boundary is crossed with our nextKey - byte[] newLowerRange = new byte[MAX_TIMESTAMP_BYTES]; - if (sortOrder == SortOrder.DESC) { - // Set nanos part as inverted 999999 as it needs to be the max nano value - // The millisecond part is moving to the previous value below - System.arraycopy(lowerRange, 0, newLowerRange, 0, Bytes.SIZEOF_LONG); - PUnsignedInt.INSTANCE.getCodec().encodeInt(MAX_NANOS_VALUE_EXCLUSIVE - 1, - newLowerRange, Bytes.SIZEOF_LONG); - SortOrder.invert(newLowerRange, Bytes.SIZEOF_LONG, newLowerRange, - Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT); - } else { - // Leave nanos part as zero as the millisecond part is rolling over to the next - // value - System.arraycopy(lowerRange, 0, newLowerRange, 0, Bytes.SIZEOF_LONG); - } - // Increment millisecond part, but leave nanos alone - if (ByteUtil.nextKey(newLowerRange, Bytes.SIZEOF_LONG)) { - lowerRange = newLowerRange; - } else { - lowerRange = KeyRange.UNBOUND; - } - return KeyRange.getKeyRange(lowerRange, true, upperRange, upperInclusive); - } + int nanos = + PUnsignedInt.INSTANCE.getCodec().decodeInt(lowerRange, Bytes.SIZEOF_LONG, sortOrder); + if ( + (sortOrder == SortOrder.DESC && nanos == 0) + || (sortOrder == SortOrder.ASC && nanos == MAX_NANOS_VALUE_EXCLUSIVE - 1) + ) { + // With timestamp, because our last 4 bytes store a value from [0 - 1000000), we + // need + // to detect when the boundary is crossed with our nextKey + byte[] newLowerRange = new byte[MAX_TIMESTAMP_BYTES]; + if (sortOrder == SortOrder.DESC) { + // Set nanos part as inverted 999999 as it needs to be the max nano value + // The millisecond part is moving to the previous value below + System.arraycopy(lowerRange, 0, newLowerRange, 0, Bytes.SIZEOF_LONG); + PUnsignedInt.INSTANCE.getCodec().encodeInt(MAX_NANOS_VALUE_EXCLUSIVE - 1, newLowerRange, + Bytes.SIZEOF_LONG); + SortOrder.invert(newLowerRange, Bytes.SIZEOF_LONG, newLowerRange, Bytes.SIZEOF_LONG, + Bytes.SIZEOF_INT); + } else { + // Leave nanos part as zero as the millisecond part is rolling over to the next + // value + System.arraycopy(lowerRange, 0, newLowerRange, 0, Bytes.SIZEOF_LONG); + } + // Increment millisecond part, but leave nanos alone + if (ByteUtil.nextKey(newLowerRange, Bytes.SIZEOF_LONG)) { + lowerRange = newLowerRange; + } else { + lowerRange = KeyRange.UNBOUND; } - return super.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive, sortOrder); + return KeyRange.getKeyRange(lowerRange, true, upperRange, upperInclusive); + } } + return super.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive, sortOrder); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimestampArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimestampArray.java index 562f139fddd..bcf34a0c4d9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimestampArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTimestampArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,73 +23,72 @@ public class PTimestampArray extends PArrayDataType { - public static final PTimestampArray INSTANCE = new PTimestampArray(); + public static final PTimestampArray INSTANCE = new PTimestampArray(); - private PTimestampArray() { - super("TIMESTAMP ARRAY", PDataType.ARRAY_TYPE_BASE + PTimestamp.INSTANCE.getSqlType(), - PhoenixArray.class, null, 36); - } + private PTimestampArray() { + super("TIMESTAMP ARRAY", PDataType.ARRAY_TYPE_BASE + PTimestamp.INSTANCE.getSqlType(), + PhoenixArray.class, null, 36); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PTimestamp.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PTimestamp.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PTimestamp.INSTANCE, sortOrder, maxLength, scale, - PTimestamp.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PTimestamp.INSTANCE, sortOrder, maxLength, scale, + PTimestamp.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray pArr = (PhoenixArray) value; - Object[] timeStampArr = (Object[]) pArr.array; - for (Object i : timeStampArr) { - if (!super.isCoercibleTo(PTimestamp.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PTimestamp.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] timeStampArr = (Object[]) pArr.array; + for (Object i : timeStampArr) { + if (!super.isCoercibleTo(PTimestamp.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PTimestamp.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTinyint.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTinyint.java index bf2fa599166..6a99a632857 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTinyint.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTinyint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; public class PTinyint extends PWholeNumber { @@ -100,21 +99,23 @@ public Object toObject(Object object, PDataType actualType) { long l = (Long) o; if (l < Byte.MIN_VALUE || l > Byte.MAX_VALUE) { throw newIllegalDataException( - actualType + " value " + l + " cannot be cast to Byte without changing its value"); + actualType + " value " + l + " cannot be cast to Byte without changing its value"); } return (byte) l; } @Override public Byte toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { + Integer maxLength, Integer scale) { if (l == 0) { return null; } - if (equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE, PFloat.INSTANCE, + if ( + equalsAny(actualType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE, PFloat.INSTANCE, PUnsignedFloat.INSTANCE, PLong.INSTANCE, PUnsignedLong.INSTANCE, PInteger.INSTANCE, PUnsignedInt.INSTANCE, PSmallint.INSTANCE, PUnsignedSmallint.INSTANCE, PTinyint.INSTANCE, - PUnsignedTinyint.INSTANCE)) { + PUnsignedTinyint.INSTANCE) + ) { return actualType.getCodec().decodeByte(b, o, sortOrder); } else if (actualType == PDecimal.INSTANCE) { BigDecimal bd = (BigDecimal) actualType.toObject(b, o, l, actualType, sortOrder); @@ -127,9 +128,11 @@ public Byte toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sor @Override public boolean isCoercibleTo(PDataType targetType, Object value) { if (value != null) { - if (equalsAny(targetType, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, + if ( + equalsAny(targetType, PUnsignedDouble.INSTANCE, PUnsignedFloat.INSTANCE, PUnsignedLong.INSTANCE, PUnsignedInt.INSTANCE, PUnsignedSmallint.INSTANCE, - PUnsignedTinyint.INSTANCE)) { + PUnsignedTinyint.INSTANCE) + ) { byte i = (Byte) value; return i >= 0; } @@ -144,8 +147,7 @@ public boolean isCoercibleTo(PDataType targetType) { @Override public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return ((Integer) PInteger.INSTANCE.getSampleValue(maxLength, arrayLength)) - .byteValue(); + return ((Integer) PInteger.INSTANCE.getSampleValue(maxLength, arrayLength)).byteValue(); } static class ByteCodec extends BaseCodec { @@ -183,7 +185,7 @@ public int encodeShort(short v, byte[] b, int o) { checkForSufficientLength(b, o, Bytes.SIZEOF_BYTE); if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Byte without changing its value"); + "Value " + v + " cannot be encoded as an Byte without changing its value"); } return encodeByte((byte) v, b, o); } @@ -192,7 +194,7 @@ public int encodeShort(short v, byte[] b, int o) { public int encodeLong(long v, byte[] b, int o) { if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Byte without changing its value"); + "Value " + v + " cannot be encoded as an Byte without changing its value"); } return encodeByte((byte) v, b, o); } @@ -201,7 +203,7 @@ public int encodeLong(long v, byte[] b, int o) { public int encodeInt(int v, byte[] b, int o) { if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Byte without changing its value"); + "Value " + v + " cannot be encoded as an Byte without changing its value"); } return encodeByte((byte) v, b, o); } @@ -227,7 +229,7 @@ public float decodeFloat(byte[] b, int o, SortOrder sortOrder) { public int encodeFloat(float v, byte[] b, int o) { if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Byte without changing its value"); + "Value " + v + " cannot be encoded as an Byte without changing its value"); } return encodeByte((byte) v, b, o); } @@ -236,7 +238,7 @@ public int encodeFloat(float v, byte[] b, int o) { public int encodeDouble(double v, byte[] b, int o) { if (v < Byte.MIN_VALUE || v > Byte.MAX_VALUE) { throw newIllegalDataException( - "Value " + v + " cannot be encoded as an Byte without changing its value"); + "Value " + v + " cannot be encoded as an Byte without changing its value"); } return encodeByte((byte) v, b, o); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTinyintArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTinyintArray.java index 7ffaeb10e50..791b4ac09a3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTinyintArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PTinyintArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PTinyintArray extends PArrayDataType { - public static final PTinyintArray INSTANCE = new PTinyintArray(); + public static final PTinyintArray INSTANCE = new PTinyintArray(); - private PTinyintArray() { - super("TINYINT ARRAY", PDataType.ARRAY_TYPE_BASE + PTinyint.INSTANCE.getSqlType(), - PhoenixArray.class, null, 32); - } + private PTinyintArray() { + super("TINYINT ARRAY", PDataType.ARRAY_TYPE_BASE + PTinyint.INSTANCE.getSqlType(), + PhoenixArray.class, null, 32); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PTinyint.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PTinyint.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PTinyint.INSTANCE, sortOrder, maxLength, scale, - PTinyint.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PTinyint.INSTANCE, sortOrder, maxLength, scale, + PTinyint.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveBytePhoenixArray pArr = (PhoenixArray.PrimitiveBytePhoenixArray) value; - byte[] byteArr = (byte[]) pArr.array; - for (byte i : byteArr) { - if (!super.isCoercibleTo(PTinyint.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PTinyint.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveBytePhoenixArray pArr = (PhoenixArray.PrimitiveBytePhoenixArray) value; + byte[] byteArr = (byte[]) pArr.array; + for (byte i : byteArr) { + if (!super.isCoercibleTo(PTinyint.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PTinyint.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDate.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDate.java index ee21fa832ef..84ff383503a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDate.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDate.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,147 +28,148 @@ public class PUnsignedDate extends PDataType { - public static final PUnsignedDate INSTANCE = new PUnsignedDate(); - - private PUnsignedDate() { - super("UNSIGNED_DATE", 19, Date.class, - new UnsignedDateCodec(), 14); // After TIMESTAMP and DATE to ensure toLiteral finds those first - } - - @Override - public byte[] toBytes(Object object) { - if (object == null) { - throw newIllegalDataException(this + " may not be null"); - } - byte[] bytes = new byte[getByteSize()]; - toBytes(object, bytes, 0); - return bytes; - } - - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - throw newIllegalDataException(this + " may not be null"); - } - getCodec().encodeLong(((java.util.Date) object).getTime(), bytes, offset); - return this.getByteSize(); - } - - @Override - public Object toObject(Object object, PDataType actualType) { - Date d = (Date) PDate.INSTANCE.toObject(object, actualType); - throwIfNonNegativeDate(d); - return d; - } - - @Override - public Date toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - Date d = (Date) PDate.INSTANCE.toObject(b, o, l, actualType, sortOrder); - throwIfNonNegativeDate(d); - return d; - } - - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - java.sql.Date sqlDate = - toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - return PDate.INSTANCE.dateToClass(sqlDate, actualType, jdbcType); - } - - @Override - public boolean isCastableTo(PDataType targetType) { - return PDate.INSTANCE.isCastableTo(targetType); - } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return equalsAny(targetType, this, PUnsignedTime.INSTANCE, PUnsignedTimestamp.INSTANCE) - || PDate.INSTANCE.isCoercibleTo(targetType); - } - - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - return super.isCoercibleTo(targetType, value) || PDate.INSTANCE.isCoercibleTo(targetType, value); - } - - @Override - public boolean isFixedWidth() { - return true; - } - - @Override - public Integer getByteSize() { - return PDate.INSTANCE.getByteSize(); - } - - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return PDate.INSTANCE.compareTo(lhs, rhs, rhsType); - } - - @Override - public Object toObject(String value) { - return PDate.INSTANCE.toObject(value); - } - - @Override - public boolean isBytesComparableWith(PDataType otherType) { - return super.isBytesComparableWith(otherType) || otherType == PUnsignedTime.INSTANCE || otherType == PUnsignedTimestamp.INSTANCE || otherType == PUnsignedLong.INSTANCE; - } - - @Override - public String toStringLiteral(Object o, Format formatter) { - // Can't delegate, as the super.toStringLiteral calls this.toBytes - if (formatter == null || formatter == DateUtil.DEFAULT_DATE_FORMATTER) { - // If default formatter has not been overridden, - // use one that displays milliseconds. - formatter = DateUtil.DEFAULT_MS_DATE_FORMATTER; - } - return "'" + super.toStringLiteral(o, formatter) + "'"; - } - - // TODO: derive PUnsignedDate from PDate to avoid copy/paste - @Override - public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, - Integer maxLength, Integer scale, SortOrder actualModifier, - Integer desiredMaxLength, Integer desiredScale, - SortOrder expectedModifier) { - if (ptr.getLength() > getByteSize()) { - ptr.set(ptr.get(), ptr.getOffset(), getByteSize()); - } - super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, - desiredScale, expectedModifier); - } - - @Override - public int getResultSetSqlType() { - return Types.DATE; - } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return new Date((Long) PUnsignedLong.INSTANCE.getSampleValue(maxLength, arrayLength)); - } - - static class UnsignedDateCodec extends PUnsignedLong.UnsignedLongCodec { - - @Override - public int decodeInt(byte[] b, int o, SortOrder sortOrder) { - throw new UnsupportedOperationException(); - } + public static final PUnsignedDate INSTANCE = new PUnsignedDate(); + + private PUnsignedDate() { + super("UNSIGNED_DATE", 19, Date.class, new UnsignedDateCodec(), 14); // After TIMESTAMP and DATE + // to ensure toLiteral + // finds those first + } + + @Override + public byte[] toBytes(Object object) { + if (object == null) { + throw newIllegalDataException(this + " may not be null"); + } + byte[] bytes = new byte[getByteSize()]; + toBytes(object, bytes, 0); + return bytes; + } + + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + throw newIllegalDataException(this + " may not be null"); + } + getCodec().encodeLong(((java.util.Date) object).getTime(), bytes, offset); + return this.getByteSize(); + } + + @Override + public Object toObject(Object object, PDataType actualType) { + Date d = (Date) PDate.INSTANCE.toObject(object, actualType); + throwIfNonNegativeDate(d); + return d; + } + + @Override + public Date toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, + Integer maxLength, Integer scale) { + Date d = (Date) PDate.INSTANCE.toObject(b, o, l, actualType, sortOrder); + throwIfNonNegativeDate(d); + return d; + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + java.sql.Date sqlDate = + toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); + return PDate.INSTANCE.dateToClass(sqlDate, actualType, jdbcType); + } + + @Override + public boolean isCastableTo(PDataType targetType) { + return PDate.INSTANCE.isCastableTo(targetType); + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return equalsAny(targetType, this, PUnsignedTime.INSTANCE, PUnsignedTimestamp.INSTANCE) + || PDate.INSTANCE.isCoercibleTo(targetType); + } + + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + return super.isCoercibleTo(targetType, value) + || PDate.INSTANCE.isCoercibleTo(targetType, value); + } + + @Override + public boolean isFixedWidth() { + return true; + } + + @Override + public Integer getByteSize() { + return PDate.INSTANCE.getByteSize(); + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return PDate.INSTANCE.compareTo(lhs, rhs, rhsType); + } + + @Override + public Object toObject(String value) { + return PDate.INSTANCE.toObject(value); + } + + @Override + public boolean isBytesComparableWith(PDataType otherType) { + return super.isBytesComparableWith(otherType) || otherType == PUnsignedTime.INSTANCE + || otherType == PUnsignedTimestamp.INSTANCE || otherType == PUnsignedLong.INSTANCE; + } + + @Override + public String toStringLiteral(Object o, Format formatter) { + // Can't delegate, as the super.toStringLiteral calls this.toBytes + if (formatter == null || formatter == DateUtil.DEFAULT_DATE_FORMATTER) { + // If default formatter has not been overridden, + // use one that displays milliseconds. + formatter = DateUtil.DEFAULT_MS_DATE_FORMATTER; + } + return "'" + super.toStringLiteral(o, formatter) + "'"; + } + + // TODO: derive PUnsignedDate from PDate to avoid copy/paste + @Override + public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, + Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, + Integer desiredScale, SortOrder expectedModifier) { + if (ptr.getLength() > getByteSize()) { + ptr.set(ptr.get(), ptr.getOffset(), getByteSize()); + } + super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, + desiredScale, expectedModifier); + } + + @Override + public int getResultSetSqlType() { + return Types.DATE; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return new Date((Long) PUnsignedLong.INSTANCE.getSampleValue(maxLength, arrayLength)); + } + + static class UnsignedDateCodec extends PUnsignedLong.UnsignedLongCodec { + + @Override + public int decodeInt(byte[] b, int o, SortOrder sortOrder) { + throw new UnsupportedOperationException(); + } + + @Override + public PhoenixArrayFactory getPhoenixArrayFactory() { + return new PhoenixArrayFactory() { @Override - public PhoenixArrayFactory getPhoenixArrayFactory() { - return new PhoenixArrayFactory() { - - @Override - public PhoenixArray newArray(PDataType type, Object[] elements) { - return new PhoenixArray(type, elements); - } - }; + public PhoenixArray newArray(PDataType type, Object[] elements) { + return new PhoenixArray(type, elements); } + }; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDateArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDateArray.java index b2e173a05fe..49983bfcfab 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDateArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDateArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,68 +23,72 @@ public class PUnsignedDateArray extends PArrayDataType { - public static final PUnsignedDateArray INSTANCE = new PUnsignedDateArray(); + public static final PUnsignedDateArray INSTANCE = new PUnsignedDateArray(); - private PUnsignedDateArray() { - super("UNSIGNED_DATE ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedDate.INSTANCE.getSqlType(), - PhoenixArray.class, null, 41); - } + private PUnsignedDateArray() { + super("UNSIGNED_DATE ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedDate.INSTANCE.getSqlType(), + PhoenixArray.class, null, 41); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PUnsignedDate.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PUnsignedDate.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - return toObject(bytes, offset, length, PUnsignedDate.INSTANCE, sortOrder, maxLength, scale, - PUnsignedDate.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PUnsignedDate.INSTANCE, sortOrder, maxLength, scale, + PUnsignedDate.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { return true; } - PhoenixArray pArr = (PhoenixArray)value; - Object[] dateArr = (Object[])pArr.array; - for (Object i : dateArr) { - if (!super.isCoercibleTo(PUnsignedDate.INSTANCE, i)) { return false; } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PUnsignedDate.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] dateArr = (Object[]) pArr.array; + for (Object i : dateArr) { + if (!super.isCoercibleTo(PUnsignedDate.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PUnsignedDate.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDouble.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDouble.java index 5479c32dcf4..efd3b880e2d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDouble.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDouble.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,149 +21,146 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.primitives.Doubles; public class PUnsignedDouble extends PRealNumber { - public static final PUnsignedDouble INSTANCE = new PUnsignedDouble(); - - private PUnsignedDouble() { - super("UNSIGNED_DOUBLE", 15, Double.class, new UnsignedDoubleCodec(), 20); - } - - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; - } - if (rhsType == PDecimal.INSTANCE) { - return -((BigDecimal) rhs).compareTo(BigDecimal.valueOf(((Number) lhs).doubleValue())); - } - return Doubles.compare(((Number) lhs).doubleValue(), ((Number) rhs).doubleValue()); - } - - @Override - public boolean isFixedWidth() { - return true; - } - - @Override - public Integer getByteSize() { - return Bytes.SIZEOF_DOUBLE; - } - - @Override - public Integer getScale(Object o) { - return PDouble.INSTANCE.getScale(o); - } - - @Override - public Integer getMaxLength(Object o) { - return PDouble.INSTANCE.getMaxLength(o); - } - - @Override - public byte[] toBytes(Object object) { - byte[] b = new byte[Bytes.SIZEOF_DOUBLE]; - toBytes(object, b, 0); - return b; - } - - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - throw newIllegalDataException(this + " may not be null"); - } - return this.getCodec().encodeDouble(((Number) object).doubleValue(), - bytes, offset); - } - - @Override - public Object toObject(String value) { - if (value == null || value.length() == 0) { - return null; - } - try { - Double d = Double.parseDouble(value); - if (d.doubleValue() < 0) { - throw newIllegalDataException("Value may not be negative(" - + d + ")"); - } - return d; - } catch (NumberFormatException e) { - throw newIllegalDataException(e); - } - } + public static final PUnsignedDouble INSTANCE = new PUnsignedDouble(); + + private PUnsignedDouble() { + super("UNSIGNED_DOUBLE", 15, Double.class, new UnsignedDoubleCodec(), 20); + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; + } + if (rhsType == PDecimal.INSTANCE) { + return -((BigDecimal) rhs).compareTo(BigDecimal.valueOf(((Number) lhs).doubleValue())); + } + return Doubles.compare(((Number) lhs).doubleValue(), ((Number) rhs).doubleValue()); + } + + @Override + public boolean isFixedWidth() { + return true; + } + + @Override + public Integer getByteSize() { + return Bytes.SIZEOF_DOUBLE; + } + + @Override + public Integer getScale(Object o) { + return PDouble.INSTANCE.getScale(o); + } + + @Override + public Integer getMaxLength(Object o) { + return PDouble.INSTANCE.getMaxLength(o); + } + + @Override + public byte[] toBytes(Object object) { + byte[] b = new byte[Bytes.SIZEOF_DOUBLE]; + toBytes(object, b, 0); + return b; + } + + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + throw newIllegalDataException(this + " may not be null"); + } + return this.getCodec().encodeDouble(((Number) object).doubleValue(), bytes, offset); + } + + @Override + public Object toObject(String value) { + if (value == null || value.length() == 0) { + return null; + } + try { + Double d = Double.parseDouble(value); + if (d.doubleValue() < 0) { + throw newIllegalDataException("Value may not be negative(" + d + ")"); + } + return d; + } catch (NumberFormatException e) { + throw newIllegalDataException(e); + } + } + + @Override + public Object toObject(Object object, PDataType actualType) { + Double v = (Double) PDouble.INSTANCE.toObject(object, actualType); + throwIfNonNegativeNumber(v); + return v; + } + + @Override + public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, + Integer maxLength, Integer scale) { + Double v = (Double) PDouble.INSTANCE.toObject(b, o, l, actualType, sortOrder); + throwIfNonNegativeNumber(v); + return v; + } + + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + return super.isCoercibleTo(targetType, value) + || PDouble.INSTANCE.isCoercibleTo(targetType, value); + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return this.equals(targetType) || PDouble.INSTANCE.isCoercibleTo(targetType); + } + + @Override + public int getResultSetSqlType() { + return PDouble.INSTANCE.getResultSetSqlType(); + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return Math.abs((Double) PDouble.INSTANCE.getSampleValue(maxLength, arrayLength)); + } + + static class UnsignedDoubleCodec extends PDouble.DoubleCodec { @Override - public Object toObject(Object object, PDataType actualType) { - Double v = (Double) PDouble.INSTANCE.toObject(object, actualType); - throwIfNonNegativeNumber(v); - return v; + public int encodeDouble(double v, byte[] b, int o) { + checkForSufficientLength(b, o, Bytes.SIZEOF_DOUBLE); + if (v < 0) { + throw newIllegalDataException(); + } + Bytes.putDouble(b, o, v); + return Bytes.SIZEOF_DOUBLE; } @Override - public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - Double v = (Double) PDouble.INSTANCE.toObject(b, o, l, actualType, sortOrder); - throwIfNonNegativeNumber(v); - return v; - } - - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - return super.isCoercibleTo(targetType, value) || PDouble.INSTANCE - .isCoercibleTo(targetType, value); - } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return this.equals(targetType) || PDouble.INSTANCE.isCoercibleTo(targetType); - } - - @Override - public int getResultSetSqlType() { - return PDouble.INSTANCE.getResultSetSqlType(); - } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return Math.abs((Double) PDouble.INSTANCE.getSampleValue(maxLength, arrayLength)); - } - - static class UnsignedDoubleCodec extends PDouble.DoubleCodec { - - @Override - public int encodeDouble(double v, byte[] b, int o) { - checkForSufficientLength(b, o, Bytes.SIZEOF_DOUBLE); - if (v < 0) { - throw newIllegalDataException(); - } - Bytes.putDouble(b, o, v); - return Bytes.SIZEOF_DOUBLE; - } - - @Override - public double decodeDouble(byte[] b, int o, SortOrder sortOrder) { - Preconditions.checkNotNull(sortOrder); - checkForSufficientLength(b, o, Bytes.SIZEOF_DOUBLE); - if (sortOrder == SortOrder.DESC) { - b = SortOrder.invert(b, o, new byte[Bytes.SIZEOF_DOUBLE], 0, Bytes.SIZEOF_DOUBLE); - o = 0; - } - double v = Bytes.toDouble(b, o); - if (v < 0) { - throw newIllegalDataException(); - } - return v; - } - } + public double decodeDouble(byte[] b, int o, SortOrder sortOrder) { + Preconditions.checkNotNull(sortOrder); + checkForSufficientLength(b, o, Bytes.SIZEOF_DOUBLE); + if (sortOrder == SortOrder.DESC) { + b = SortOrder.invert(b, o, new byte[Bytes.SIZEOF_DOUBLE], 0, Bytes.SIZEOF_DOUBLE); + o = 0; + } + double v = Bytes.toDouble(b, o); + if (v < 0) { + throw newIllegalDataException(); + } + return v; + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDoubleArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDoubleArray.java index ef3ef0672df..d7dfc5eb17a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDoubleArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedDoubleArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,76 +21,79 @@ public class PUnsignedDoubleArray extends PArrayDataType { - public static final PUnsignedDoubleArray INSTANCE = new PUnsignedDoubleArray(); + public static final PUnsignedDoubleArray INSTANCE = new PUnsignedDoubleArray(); - private PUnsignedDoubleArray() { - super("UNSIGNED_DOUBLE ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedDouble.INSTANCE.getSqlType(), - PhoenixArray.class, null, 47); - } + private PUnsignedDoubleArray() { + super("UNSIGNED_DOUBLE ARRAY", + PDataType.ARRAY_TYPE_BASE + PUnsignedDouble.INSTANCE.getSqlType(), PhoenixArray.class, null, + 47); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PUnsignedDouble.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PUnsignedDouble.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PUnsignedDouble.INSTANCE, sortOrder, maxLength, - scale, PUnsignedDouble.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PUnsignedDouble.INSTANCE, sortOrder, maxLength, scale, + PUnsignedDouble.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveDoublePhoenixArray pArr = (PhoenixArray.PrimitiveDoublePhoenixArray) value; - double[] doubleArr = (double[]) pArr.array; - for (Object i : doubleArr) { - if (!super.isCoercibleTo(PUnsignedDouble.INSTANCE, i) && (!super.isCoercibleTo( - PUnsignedTimestamp.INSTANCE, i)) - && (!super.isCoercibleTo(PUnsignedTime.INSTANCE, i)) && (!super - .isCoercibleTo(PUnsignedDate.INSTANCE, i))) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PUnsignedDouble.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveDoublePhoenixArray pArr = + (PhoenixArray.PrimitiveDoublePhoenixArray) value; + double[] doubleArr = (double[]) pArr.array; + for (Object i : doubleArr) { + if ( + !super.isCoercibleTo(PUnsignedDouble.INSTANCE, i) + && (!super.isCoercibleTo(PUnsignedTimestamp.INSTANCE, i)) + && (!super.isCoercibleTo(PUnsignedTime.INSTANCE, i)) + && (!super.isCoercibleTo(PUnsignedDate.INSTANCE, i)) + ) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PUnsignedDouble.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedFloat.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedFloat.java index 1707cb3a8aa..d74227c51b2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedFloat.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedFloat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,7 +19,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; public class PUnsignedFloat extends PRealNumber { @@ -67,8 +66,7 @@ public int toBytes(Object object, byte[] bytes, int offset) { if (object == null) { throw newIllegalDataException(this + " may not be null"); } - return this.getCodec().encodeFloat(((Number) object).floatValue(), - bytes, offset); + return this.getCodec().encodeFloat(((Number) object).floatValue(), bytes, offset); } @Override @@ -79,8 +77,7 @@ public Object toObject(String value) { try { Float f = Float.parseFloat(value); if (f.floatValue() < 0) { - throw newIllegalDataException("Value may not be negative(" - + f + ")"); + throw newIllegalDataException("Value may not be negative(" + f + ")"); } return f; } catch (NumberFormatException e) { @@ -97,7 +94,7 @@ public Object toObject(Object object, PDataType actualType) { @Override public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { + Integer maxLength, Integer scale) { Float v = (Float) PFloat.INSTANCE.toObject(b, o, l, actualType, sortOrder); throwIfNonNegativeNumber(v); return v; @@ -110,8 +107,8 @@ public boolean isCoercibleTo(PDataType targetType, Object value) { @Override public boolean isCoercibleTo(PDataType targetType) { - return this.equals(targetType) || PUnsignedDouble.INSTANCE.isCoercibleTo(targetType) || PFloat.INSTANCE - .isCoercibleTo(targetType); + return this.equals(targetType) || PUnsignedDouble.INSTANCE.isCoercibleTo(targetType) + || PFloat.INSTANCE.isCoercibleTo(targetType); } @Override diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedFloatArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedFloatArray.java index 4cdb8ffb3c8..f5dc3398225 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedFloatArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedFloatArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PUnsignedFloatArray extends PArrayDataType { - public static final PUnsignedFloatArray INSTANCE = new PUnsignedFloatArray(); + public static final PUnsignedFloatArray INSTANCE = new PUnsignedFloatArray(); - private PUnsignedFloatArray() { - super("UNSIGNED_FLOAT ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedFloat.INSTANCE.getSqlType(), - PhoenixArray.class, null, 46); - } + private PUnsignedFloatArray() { + super("UNSIGNED_FLOAT ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedFloat.INSTANCE.getSqlType(), + PhoenixArray.class, null, 46); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PUnsignedFloat.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PUnsignedFloat.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PUnsignedFloat.INSTANCE, sortOrder, maxLength, - scale, PUnsignedFloat.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PUnsignedFloat.INSTANCE, sortOrder, maxLength, scale, + PUnsignedFloat.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveFloatPhoenixArray pArr = (PhoenixArray.PrimitiveFloatPhoenixArray) value; - float[] floatArr = (float[]) pArr.array; - for (Object i : floatArr) { - if (!super.isCoercibleTo(PUnsignedFloat.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PUnsignedFloat.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveFloatPhoenixArray pArr = (PhoenixArray.PrimitiveFloatPhoenixArray) value; + float[] floatArr = (float[]) pArr.array; + for (Object i : floatArr) { + if (!super.isCoercibleTo(PUnsignedFloat.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PUnsignedFloat.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedInt.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedInt.java index b4405d78e02..32660ba6da2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedInt.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedInt.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,16 +20,14 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Order; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * Unsigned integer type that restricts values to be from 0 to {@link Integer#MAX_VALUE} - * inclusive. May be used to map to existing HTable values created through - * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(int)} - * as long as all values are non negative (the leading sign bit of negative numbers would cause - * them to sort ahead of positive numbers when they're used as part of the row key when using the - * HBase utility methods). + * Unsigned integer type that restricts values to be from 0 to {@link Integer#MAX_VALUE} inclusive. + * May be used to map to existing HTable values created through + * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(int)} as long as all values are non negative + * (the leading sign bit of negative numbers would cause them to sort ahead of positive numbers when + * they're used as part of the row key when using the HBase utility methods). */ public class PUnsignedInt extends PWholeNumber { @@ -37,7 +35,7 @@ public class PUnsignedInt extends PWholeNumber { private PUnsignedInt() { super("UNSIGNED_INT", 9 /* no constant available in Types */, Integer.class, - new UnsignedIntCodec(), 16); + new UnsignedIntCodec(), 16); } @Override @@ -84,9 +82,8 @@ public Object toObject(Object object, PDataType actualType) { @Override public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - Integer v = - (Integer) PInteger.INSTANCE.toObject(b, o, l, actualType, sortOrder); + Integer maxLength, Integer scale) { + Integer v = (Integer) PInteger.INSTANCE.toObject(b, o, l, actualType, sortOrder); throwIfNonNegativeNumber(v); return v; } @@ -94,14 +91,14 @@ public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder s @Override public boolean isCoercibleTo(PDataType targetType) { return targetType.equals(this) || targetType.equals(PUnsignedFloat.INSTANCE) - || PUnsignedLong.INSTANCE.isCoercibleTo(targetType) - || PInteger.INSTANCE.isCoercibleTo(targetType); + || PUnsignedLong.INSTANCE.isCoercibleTo(targetType) + || PInteger.INSTANCE.isCoercibleTo(targetType); } @Override public boolean isCoercibleTo(PDataType targetType, Object value) { - return super.isCoercibleTo(targetType, value) || PInteger.INSTANCE - .isCoercibleTo(targetType, value); + return super.isCoercibleTo(targetType, value) + || PInteger.INSTANCE.isCoercibleTo(targetType, value); } @Override diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedIntArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedIntArray.java index bc0c39b7473..3285bf75ac0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedIntArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedIntArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PUnsignedIntArray extends PArrayDataType { - public static final PUnsignedIntArray INSTANCE = new PUnsignedIntArray(); + public static final PUnsignedIntArray INSTANCE = new PUnsignedIntArray(); - private PUnsignedIntArray() { - super("UNSIGNED_INT ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedInt.INSTANCE.getSqlType(), - PhoenixArray.class, null, 43); - } + private PUnsignedIntArray() { + super("UNSIGNED_INT ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedInt.INSTANCE.getSqlType(), + PhoenixArray.class, null, 43); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PUnsignedInt.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PUnsignedInt.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PUnsignedInt.INSTANCE, sortOrder, maxLength, - scale, PUnsignedInt.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PUnsignedInt.INSTANCE, sortOrder, maxLength, scale, + PUnsignedInt.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveIntPhoenixArray pArr = (PhoenixArray.PrimitiveIntPhoenixArray) value; - int[] intArr = (int[]) pArr.array; - for (Object i : intArr) { - if (!super.isCoercibleTo(PUnsignedInt.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PUnsignedInt.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveIntPhoenixArray pArr = (PhoenixArray.PrimitiveIntPhoenixArray) value; + int[] intArr = (int[]) pArr.array; + for (Object i : intArr) { + if (!super.isCoercibleTo(PUnsignedInt.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PUnsignedInt.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java index 11532f74793..bfaeaf3e790 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,199 +23,200 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Order; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.primitives.Doubles; import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; /** - * Unsigned long type that restricts values to be from 0 to {@link Long#MAX_VALUE} - * inclusive. May be used to map to existing HTable values created through - * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(long)} - * as long as all values are non negative (the leading sign bit of negative numbers would cause - * them to sort ahead of positive numbers when they're used as part of the row key when using the - * HBase utility methods). + * Unsigned long type that restricts values to be from 0 to {@link Long#MAX_VALUE} inclusive. May be + * used to map to existing HTable values created through + * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(long)} as long as all values are non negative + * (the leading sign bit of negative numbers would cause them to sort ahead of positive numbers when + * they're used as part of the row key when using the HBase utility methods). */ public class PUnsignedLong extends PWholeNumber { - public static final PUnsignedLong INSTANCE = new PUnsignedLong(); - - private PUnsignedLong() { - super("UNSIGNED_LONG", 10 /* no constant available in Types */, Long.class, - new UnsignedLongCodec(), 15); - } - - @Override - public boolean isOrderPreserving() { - return true; - } - - @Override - public Order getOrder() { - return Order.ASCENDING; - } - - @Override - public boolean isSkippable() { - return true; - } - - @Override - public Integer getScale(Object o) { - return ZERO; - } - - @Override - public byte[] toBytes(Object object) { - byte[] b = new byte[Bytes.SIZEOF_LONG]; - toBytes(object, b, 0); - return b; - } - - @Override - public int toBytes(Object object, byte[] b, int o) { - if (object == null) { - throw newIllegalDataException(this + " may not be null"); + public static final PUnsignedLong INSTANCE = new PUnsignedLong(); + + private PUnsignedLong() { + super("UNSIGNED_LONG", 10 /* no constant available in Types */, Long.class, + new UnsignedLongCodec(), 15); + } + + @Override + public boolean isOrderPreserving() { + return true; + } + + @Override + public Order getOrder() { + return Order.ASCENDING; + } + + @Override + public boolean isSkippable() { + return true; + } + + @Override + public Integer getScale(Object o) { + return ZERO; + } + + @Override + public byte[] toBytes(Object object) { + byte[] b = new byte[Bytes.SIZEOF_LONG]; + toBytes(object, b, 0); + return b; + } + + @Override + public int toBytes(Object object, byte[] b, int o) { + if (object == null) { + throw newIllegalDataException(this + " may not be null"); + } + return this.getCodec().encodeLong(((Number) object).longValue(), b, o); + } + + @Override + public Object toObject(Object object, PDataType actualType) { + Long v = (Long) PLong.INSTANCE.toObject(object, actualType); + throwIfNonNegativeNumber(v); + return v; + } + + @Override + public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, + Integer maxLength, Integer scale) { + Long v = (Long) PLong.INSTANCE.toObject(b, o, l, actualType, sortOrder); + throwIfNonNegativeNumber(v); + return v; + } + + @Override + public boolean isCastableTo(PDataType targetType) { + return super.isCastableTo(targetType) || targetType.isCoercibleTo(PTimestamp.INSTANCE); + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return targetType == this || targetType == PUnsignedDouble.INSTANCE + || PLong.INSTANCE.isCoercibleTo(targetType); + } + + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + return super.isCoercibleTo(targetType, value) + || PLong.INSTANCE.isCoercibleTo(targetType, value); + } + + @Override + public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, + Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, + Integer desiredScale, SortOrder expectedModifier) { + // Decrease size of TIMESTAMP to size of LONG and continue coerce + if (ptr.getLength() > getByteSize()) { + ptr.set(ptr.get(), ptr.getOffset(), getByteSize()); + } + super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, + desiredScale, expectedModifier); + } + + @Override + public boolean isFixedWidth() { + return true; + } + + @Override + public Integer getByteSize() { + return Bytes.SIZEOF_LONG; + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; + } + if (rhsType == PDecimal.INSTANCE) { + return -((BigDecimal) rhs).compareTo(BigDecimal.valueOf(((Number) lhs).longValue())); + } else if ( + equalsAny(rhsType, PDouble.INSTANCE, PFloat.INSTANCE, PUnsignedDouble.INSTANCE, + PUnsignedFloat.INSTANCE) + ) { + return Doubles.compare(((Number) lhs).doubleValue(), ((Number) rhs).doubleValue()); + } + return Longs.compare(((Number) lhs).longValue(), ((Number) rhs).longValue()); + } + + @Override + public boolean isComparableTo(PDataType targetType) { + return PDecimal.INSTANCE.isComparableTo(targetType); + } + + @Override + public Object toObject(String value) { + if (value == null || value.length() == 0) { + return null; + } + try { + Long l = Long.parseLong(value); + if (l.longValue() < 0) { + throw newIllegalDataException("Value may not be negative(" + l + ")"); + } + return l; + } catch (NumberFormatException e) { + throw newIllegalDataException(e); + } + } + + @Override + public int getResultSetSqlType() { + return PLong.INSTANCE.getResultSetSqlType(); + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return Math.abs((Long) PLong.INSTANCE.getSampleValue(maxLength, arrayLength)); + } + + static class UnsignedLongCodec extends PLong.LongCodec { + + @Override + public long decodeLong(byte[] b, int o, SortOrder sortOrder) { + Preconditions.checkNotNull(sortOrder); + checkForSufficientLength(b, o, Bytes.SIZEOF_LONG); + long v = 0; + if (sortOrder == SortOrder.ASC) { + for (int i = o; i < o + Bytes.SIZEOF_LONG; i++) { + v <<= 8; + v ^= b[i] & 0xFF; } - return this.getCodec().encodeLong(((Number) object).longValue(), b, o); - } - - @Override - public Object toObject(Object object, PDataType actualType) { - Long v = (Long) PLong.INSTANCE.toObject(object, actualType); - throwIfNonNegativeNumber(v); - return v; - } - - @Override - public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { - Long v = (Long) PLong.INSTANCE.toObject(b, o, l, actualType, sortOrder); - throwIfNonNegativeNumber(v); - return v; - } - - @Override - public boolean isCastableTo(PDataType targetType) { - return super.isCastableTo(targetType) || targetType.isCoercibleTo(PTimestamp.INSTANCE); - } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return targetType == this || targetType == PUnsignedDouble.INSTANCE || PLong.INSTANCE - .isCoercibleTo(targetType); - } - - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - return super.isCoercibleTo(targetType, value) || PLong.INSTANCE.isCoercibleTo(targetType, value); - } - - @Override - public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType, - Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale, - SortOrder expectedModifier) { - // Decrease size of TIMESTAMP to size of LONG and continue coerce - if (ptr.getLength() > getByteSize()) { - ptr.set(ptr.get(), ptr.getOffset(), getByteSize()); - } - super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength, - desiredScale, expectedModifier); - } - - @Override - public boolean isFixedWidth() { - return true; - } - - @Override - public Integer getByteSize() { - return Bytes.SIZEOF_LONG; - } - - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; - } - if (rhsType == PDecimal.INSTANCE) { - return -((BigDecimal) rhs).compareTo(BigDecimal.valueOf(((Number) lhs).longValue())); - } else if (equalsAny(rhsType, PDouble.INSTANCE, PFloat.INSTANCE, PUnsignedDouble.INSTANCE, - PUnsignedFloat.INSTANCE)) { - return Doubles.compare(((Number) lhs).doubleValue(), ((Number) rhs).doubleValue()); + } else { + for (int i = o; i < o + Bytes.SIZEOF_LONG; i++) { + v <<= 8; + v ^= (b[i] & 0xFF) ^ 0xFF; } - return Longs.compare(((Number) lhs).longValue(), ((Number) rhs).longValue()); + } + if (v < 0) { + throw newIllegalDataException(); + } + return v; } @Override - public boolean isComparableTo(PDataType targetType) { - return PDecimal.INSTANCE.isComparableTo(targetType); - } - - @Override - public Object toObject(String value) { - if (value == null || value.length() == 0) { - return null; - } - try { - Long l = Long.parseLong(value); - if (l.longValue() < 0) { - throw newIllegalDataException("Value may not be negative(" + l + ")"); - } - return l; - } catch (NumberFormatException e) { - throw newIllegalDataException(e); - } - } - - @Override - public int getResultSetSqlType() { - return PLong.INSTANCE.getResultSetSqlType(); - } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return Math.abs((Long) PLong.INSTANCE.getSampleValue(maxLength, arrayLength)); - } - - static class UnsignedLongCodec extends PLong.LongCodec { - - @Override - public long decodeLong(byte[] b, int o, SortOrder sortOrder) { - Preconditions.checkNotNull(sortOrder); - checkForSufficientLength(b, o, Bytes.SIZEOF_LONG); - long v = 0; - if (sortOrder == SortOrder.ASC) { - for (int i = o; i < o + Bytes.SIZEOF_LONG; i++) { - v <<= 8; - v ^= b[i] & 0xFF; - } - } else { - for (int i = o; i < o + Bytes.SIZEOF_LONG; i++) { - v <<= 8; - v ^= (b[i] & 0xFF) ^ 0xFF; - } - } - if (v < 0) { - throw newIllegalDataException(); - } - return v; - } - - @Override - public int encodeLong(long v, byte[] b, int o) { - checkForSufficientLength(b, o, Bytes.SIZEOF_LONG); - if (v < 0) { - throw newIllegalDataException(); - } - Bytes.putLong(b, o, v); - return Bytes.SIZEOF_LONG; - } + public int encodeLong(long v, byte[] b, int o) { + checkForSufficientLength(b, o, Bytes.SIZEOF_LONG); + if (v < 0) { + throw newIllegalDataException(); + } + Bytes.putLong(b, o, v); + return Bytes.SIZEOF_LONG; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedLongArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedLongArray.java index 20f693b0c50..4eb57cc30ca 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedLongArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedLongArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PUnsignedLongArray extends PArrayDataType { - public static final PUnsignedLongArray INSTANCE = new PUnsignedLongArray(); + public static final PUnsignedLongArray INSTANCE = new PUnsignedLongArray(); - private PUnsignedLongArray() { - super("UNSIGNED_LONG ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedLong.INSTANCE.getSqlType(), - PhoenixArray.class, null, 42); - } + private PUnsignedLongArray() { + super("UNSIGNED_LONG ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedLong.INSTANCE.getSqlType(), + PhoenixArray.class, null, 42); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PUnsignedLong.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PUnsignedLong.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PUnsignedLong.INSTANCE, sortOrder, maxLength, - scale, PUnsignedLong.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PUnsignedLong.INSTANCE, sortOrder, maxLength, scale, + PUnsignedLong.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveLongPhoenixArray pArr = (PhoenixArray.PrimitiveLongPhoenixArray) value; - long[] longArr = (long[]) pArr.array; - for (Object i : longArr) { - if (!super.isCoercibleTo(PUnsignedLong.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PUnsignedLong.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveLongPhoenixArray pArr = (PhoenixArray.PrimitiveLongPhoenixArray) value; + long[] longArr = (long[]) pArr.array; + for (Object i : longArr) { + if (!super.isCoercibleTo(PUnsignedLong.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PUnsignedLong.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedSmallint.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedSmallint.java index c50af782686..3a3999a2bb6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedSmallint.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedSmallint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,7 +19,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; public class PUnsignedSmallint extends PWholeNumber { @@ -98,7 +97,7 @@ public Object toObject(Object object, PDataType actualType) { @Override public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { + Integer maxLength, Integer scale) { Short v = (Short) PSmallint.INSTANCE.toObject(b, o, l, actualType, sortOrder); throwIfNonNegativeNumber(v); return v; @@ -111,14 +110,14 @@ public boolean isComparableTo(PDataType targetType) { @Override public boolean isCoercibleTo(PDataType targetType) { - return targetType.equals(this) || PUnsignedInt.INSTANCE.isCoercibleTo(targetType) || PSmallint.INSTANCE - .isCoercibleTo(targetType); + return targetType.equals(this) || PUnsignedInt.INSTANCE.isCoercibleTo(targetType) + || PSmallint.INSTANCE.isCoercibleTo(targetType); } @Override public boolean isCoercibleTo(PDataType targetType, Object value) { - return super.isCoercibleTo(targetType, value) || PSmallint.INSTANCE - .isCoercibleTo(targetType, value); + return super.isCoercibleTo(targetType, value) + || PSmallint.INSTANCE.isCoercibleTo(targetType, value); } @Override diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedSmallintArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedSmallintArray.java index d4e67e0da5a..bb1f9ea6dc2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedSmallintArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedSmallintArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,74 +21,73 @@ public class PUnsignedSmallintArray extends PArrayDataType { - public static final PUnsignedSmallintArray INSTANCE = new PUnsignedSmallintArray(); + public static final PUnsignedSmallintArray INSTANCE = new PUnsignedSmallintArray(); - private PUnsignedSmallintArray() { - super("UNSIGNED_SMALLINT ARRAY", - PDataType.ARRAY_TYPE_BASE + PUnsignedSmallint.INSTANCE.getSqlType(), PhoenixArray.class, - null, 44); - } + private PUnsignedSmallintArray() { + super("UNSIGNED_SMALLINT ARRAY", + PDataType.ARRAY_TYPE_BASE + PUnsignedSmallint.INSTANCE.getSqlType(), PhoenixArray.class, null, + 44); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PUnsignedSmallint.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PUnsignedSmallint.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PUnsignedSmallint.INSTANCE, sortOrder, maxLength, - scale, PUnsignedSmallint.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PUnsignedSmallint.INSTANCE, sortOrder, maxLength, scale, + PUnsignedSmallint.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveShortPhoenixArray pArr = (PhoenixArray.PrimitiveShortPhoenixArray) value; - short[] shortArr = (short[]) pArr.array; - for (Object i : shortArr) { - if (!super.isCoercibleTo(PUnsignedSmallint.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PUnsignedSmallint.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveShortPhoenixArray pArr = (PhoenixArray.PrimitiveShortPhoenixArray) value; + short[] shortArr = (short[]) pArr.array; + for (Object i : shortArr) { + if (!super.isCoercibleTo(PUnsignedSmallint.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PUnsignedSmallint.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTime.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTime.java index fd07ec485e0..c38d5927b55 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTime.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTime.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,7 +45,7 @@ public int toBytes(Object object, byte[] bytes, int offset) { @Override public Time toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { + Integer maxLength, Integer scale) { java.sql.Time t = (java.sql.Time) PTime.INSTANCE.toObject(b, o, l, actualType, sortOrder); throwIfNonNegativeDate(t); return t; @@ -53,11 +53,10 @@ public Time toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sor @Override public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - java.sql.Time sqlTime = - toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - return PTime.INSTANCE.timeToClass(sqlTime, actualType, jdbcType); + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + java.sql.Time sqlTime = + toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); + return PTime.INSTANCE.timeToClass(sqlTime, actualType, jdbcType); } @Override @@ -79,7 +78,8 @@ public boolean isCoercibleTo(PDataType targetType) { @Override public boolean isCoercibleTo(PDataType targetType, Object value) { - return super.isCoercibleTo(targetType, value) || PTime.INSTANCE.isCoercibleTo(targetType, value); + return super.isCoercibleTo(targetType, value) + || PTime.INSTANCE.isCoercibleTo(targetType, value); } @Override @@ -104,7 +104,8 @@ public Object toObject(String value) { @Override public boolean isBytesComparableWith(PDataType otherType) { - return super.isBytesComparableWith(otherType) || otherType == PUnsignedDate.INSTANCE || otherType == PUnsignedTimestamp.INSTANCE; + return super.isBytesComparableWith(otherType) || otherType == PUnsignedDate.INSTANCE + || otherType == PUnsignedTimestamp.INSTANCE; } @Override @@ -124,7 +125,6 @@ public int getResultSetSqlType() { @Override public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return new java.sql.Time( - (Long) PUnsignedLong.INSTANCE.getSampleValue(maxLength, arrayLength)); + return new java.sql.Time((Long) PUnsignedLong.INSTANCE.getSampleValue(maxLength, arrayLength)); } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimeArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimeArray.java index 0851d48a45f..e91602c6c1f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimeArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimeArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,73 +23,72 @@ public class PUnsignedTimeArray extends PArrayDataType { - public static final PUnsignedTimeArray INSTANCE = new PUnsignedTimeArray(); + public static final PUnsignedTimeArray INSTANCE = new PUnsignedTimeArray(); - private PUnsignedTimeArray() { - super("UNSIGNED_TIME ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedTime.INSTANCE.getSqlType(), - PhoenixArray.class, null, 39); - } + private PUnsignedTimeArray() { + super("UNSIGNED_TIME ARRAY", PDataType.ARRAY_TYPE_BASE + PUnsignedTime.INSTANCE.getSqlType(), + PhoenixArray.class, null, 39); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PUnsignedTime.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PUnsignedTime.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PUnsignedTime.INSTANCE, sortOrder, maxLength, - scale, PUnsignedTime.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PUnsignedTime.INSTANCE, sortOrder, maxLength, scale, + PUnsignedTime.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray pArr = (PhoenixArray) value; - Object[] timeArr = (Object[]) pArr.array; - for (Object i : timeArr) { - if (!super.isCoercibleTo(PUnsignedTime.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PUnsignedTime.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] timeArr = (Object[]) pArr.array; + for (Object i : timeArr) { + if (!super.isCoercibleTo(PUnsignedTime.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PUnsignedTime.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestamp.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestamp.java index 08b28c032bd..ccf2f048cb9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestamp.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestamp.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,55 +22,56 @@ public class PUnsignedTimestamp extends PTimestamp { - public static final PUnsignedTimestamp INSTANCE = new PUnsignedTimestamp(); + public static final PUnsignedTimestamp INSTANCE = new PUnsignedTimestamp(); - private PUnsignedTimestamp() { - super("UNSIGNED_TIMESTAMP", 20, 12); - } + private PUnsignedTimestamp() { + super("UNSIGNED_TIMESTAMP", 20, 12); + } - @Override - public boolean isBytesComparableWith(PDataType otherType) { - return equalsAny(this, otherType, PVarbinary.INSTANCE, PBinary.INSTANCE, PUnsignedTime.INSTANCE, PUnsignedDate.INSTANCE, PUnsignedLong.INSTANCE); - } + @Override + public boolean isBytesComparableWith(PDataType otherType) { + return equalsAny(this, otherType, PVarbinary.INSTANCE, PBinary.INSTANCE, PUnsignedTime.INSTANCE, + PUnsignedDate.INSTANCE, PUnsignedLong.INSTANCE); + } - @Override - public Object toObject(Object object, PDataType actualType) { - java.sql.Timestamp ts = (java.sql.Timestamp) super.toObject(object, actualType); - throwIfNonNegativeDate(ts); - return ts; - } + @Override + public Object toObject(Object object, PDataType actualType) { + java.sql.Timestamp ts = (java.sql.Timestamp) super.toObject(object, actualType); + throwIfNonNegativeDate(ts); + return ts; + } - @Override - public boolean isCastableTo(PDataType targetType) { - return PUnsignedDate.INSTANCE.isCastableTo(targetType); - } + @Override + public boolean isCastableTo(PDataType targetType) { + return PUnsignedDate.INSTANCE.isCastableTo(targetType); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return targetType.equals(this) || PUnsignedDate.INSTANCE.isCoercibleTo(targetType); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return targetType.equals(this) || PUnsignedDate.INSTANCE.isCoercibleTo(targetType); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - return super.isCoercibleTo(targetType, value) || PTimestamp.INSTANCE - .isCoercibleTo(targetType, value); - } + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + return super.isCoercibleTo(targetType, value) + || PTimestamp.INSTANCE.isCoercibleTo(targetType, value); + } - @Override - public int getResultSetSqlType() { - return PTimestamp.INSTANCE.getResultSetSqlType(); - } + @Override + public int getResultSetSqlType() { + return PTimestamp.INSTANCE.getResultSetSqlType(); + } - @Override - public int getNanos(ImmutableBytesWritable ptr, SortOrder sortOrder) { - int nanos = PUnsignedInt.INSTANCE.getCodec() - .decodeInt(ptr.get(), ptr.getOffset() + PLong.INSTANCE.getByteSize(), sortOrder); - return nanos; - } + @Override + public int getNanos(ImmutableBytesWritable ptr, SortOrder sortOrder) { + int nanos = PUnsignedInt.INSTANCE.getCodec().decodeInt(ptr.get(), + ptr.getOffset() + PLong.INSTANCE.getByteSize(), sortOrder); + return nanos; + } - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return new java.sql.Timestamp( - (Long) PUnsignedLong.INSTANCE.getSampleValue(maxLength, arrayLength)); - } + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return new java.sql.Timestamp( + (Long) PUnsignedLong.INSTANCE.getSampleValue(maxLength, arrayLength)); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestampArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestampArray.java index 3407310fbae..35df3df7c15 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestampArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestampArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,74 +23,73 @@ public class PUnsignedTimestampArray extends PArrayDataType { - public static final PUnsignedTimestampArray INSTANCE = new PUnsignedTimestampArray(); + public static final PUnsignedTimestampArray INSTANCE = new PUnsignedTimestampArray(); - private PUnsignedTimestampArray() { - super("UNSIGNED_TIMESTAMP ARRAY", - PDataType.ARRAY_TYPE_BASE + PUnsignedTimestamp.INSTANCE.getSqlType(), PhoenixArray.class, - null, 37); - } + private PUnsignedTimestampArray() { + super("UNSIGNED_TIMESTAMP ARRAY", + PDataType.ARRAY_TYPE_BASE + PUnsignedTimestamp.INSTANCE.getSqlType(), PhoenixArray.class, + null, 37); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PUnsignedTimestamp.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PUnsignedTimestamp.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PUnsignedTimestamp.INSTANCE, sortOrder, - maxLength, scale, PUnsignedTimestamp.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PUnsignedTimestamp.INSTANCE, sortOrder, maxLength, scale, + PUnsignedTimestamp.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray pArr = (PhoenixArray) value; - Object[] timeStampArr = (Object[]) pArr.array; - for (Object i : timeStampArr) { - if (!super.isCoercibleTo(PUnsignedTimestamp.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PUnsignedTimestamp.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] timeStampArr = (Object[]) pArr.array; + for (Object i : timeStampArr) { + if (!super.isCoercibleTo(PUnsignedTimestamp.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PUnsignedTimestamp.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyint.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyint.java index b772f5ee69d..aa3cf7a57cc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyint.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,7 +19,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; public class PUnsignedTinyint extends PWholeNumber { @@ -95,7 +94,7 @@ public Object toObject(Object object, PDataType actualType) { @Override public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder sortOrder, - Integer maxLength, Integer scale) { + Integer maxLength, Integer scale) { Byte v = (Byte) PTinyint.INSTANCE.toObject(b, o, l, actualType, sortOrder); throwIfNonNegativeNumber(v); return v; @@ -104,13 +103,13 @@ public Object toObject(byte[] b, int o, int l, PDataType actualType, SortOrder s @Override public boolean isCoercibleTo(PDataType targetType) { return targetType.equals(this) || PUnsignedSmallint.INSTANCE.isCoercibleTo(targetType) - || PTinyint.INSTANCE.isCoercibleTo(targetType); + || PTinyint.INSTANCE.isCoercibleTo(targetType); } @Override public boolean isCoercibleTo(PDataType targetType, Object value) { - return super.isCoercibleTo(targetType, value) || PTinyint.INSTANCE - .isCoercibleTo(targetType, value); + return super.isCoercibleTo(targetType, value) + || PTinyint.INSTANCE.isCoercibleTo(targetType, value); } @Override diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyintArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyintArray.java index 6e622349aa3..5d1f0ae40b1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyintArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyintArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,74 +21,73 @@ public class PUnsignedTinyintArray extends PArrayDataType { - public static final PUnsignedTinyintArray INSTANCE = new PUnsignedTinyintArray(); + public static final PUnsignedTinyintArray INSTANCE = new PUnsignedTinyintArray(); - private PUnsignedTinyintArray() { - super("UNSIGNED_TINYINT ARRAY", - PDataType.ARRAY_TYPE_BASE + PUnsignedTinyint.INSTANCE.getSqlType(), PhoenixArray.class, - null, 45); - } + private PUnsignedTinyintArray() { + super("UNSIGNED_TINYINT ARRAY", + PDataType.ARRAY_TYPE_BASE + PUnsignedTinyint.INSTANCE.getSqlType(), PhoenixArray.class, null, + 45); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PUnsignedTinyint.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PUnsignedTinyint.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PUnsignedTinyint.INSTANCE, sortOrder, maxLength, - scale, PUnsignedTinyint.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PUnsignedTinyint.INSTANCE, sortOrder, maxLength, scale, + PUnsignedTinyint.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray.PrimitiveBytePhoenixArray pArr = (PhoenixArray.PrimitiveBytePhoenixArray) value; - byte[] byteArr = (byte[]) pArr.array; - for (Object i : byteArr) { - if (!super.isCoercibleTo(PUnsignedTinyint.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PUnsignedTinyint.INSTANCE, arrayLength, maxLength); + PhoenixArray.PrimitiveBytePhoenixArray pArr = (PhoenixArray.PrimitiveBytePhoenixArray) value; + byte[] byteArr = (byte[]) pArr.array; + for (Object i : byteArr) { + if (!super.isCoercibleTo(PUnsignedTinyint.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PUnsignedTinyint.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java index 17167e96a90..5c2f58c6dcf 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinary.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,141 +27,142 @@ public class PVarbinary extends PBinaryBase { - public static final PVarbinary INSTANCE = new PVarbinary(); - - private PVarbinary() { - super("VARBINARY", Types.VARBINARY, byte[].class, null, 22); - } - - PVarbinary(String sqlTypeName, int sqlType, Class clazz, PDataCodec codec, int ordinal) { - super(sqlTypeName, sqlType, clazz, codec, ordinal); - } - - @Override - public byte[] toBytes(Object object) { - if (object == null) { - return ByteUtil.EMPTY_BYTE_ARRAY; - } - return (byte[]) object; - } - - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - return 0; - } - byte[] o = (byte[]) object; - // assumes there's enough room - System.arraycopy(bytes, offset, o, 0, o.length); - return o.length; - } - - /** - * Override because we must always create a new byte array - */ - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - byte[] bytes = toBytes(object); - // Override because we need to allocate a new buffer in this case - if (sortOrder == SortOrder.DESC) { - return SortOrder.invert(bytes, 0, new byte[bytes.length], 0, bytes.length); - } - return bytes; - } - - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale) { - if (length == 0) { - return null; - } - if (offset == 0 && bytes.length == length && sortOrder == SortOrder.ASC) { - return bytes; - } - byte[] bytesCopy = new byte[length]; - System.arraycopy(bytes, offset, bytesCopy, 0, length); - if (sortOrder == SortOrder.DESC) { - bytesCopy = SortOrder.invert(bytes, offset, bytesCopy, 0, length); - offset = 0; - } - return bytesCopy; - } - - @Override - public Object toObject(Object object, PDataType actualType) { - return actualType.toBytes(object); - } - - @Override - public boolean isFixedWidth() { - return false; - } - - @Override - public int estimateByteSize(Object o) { - byte[] value = (byte[]) o; - return value == null ? 1 : value.length; - } - - @Override - public Integer getByteSize() { - return null; - } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return equalsAny(targetType, this, PBinary.INSTANCE, PVarbinaryEncoded.INSTANCE); - } - - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == null && rhs == null) { - return 0; - } else if (lhs == null) { - return -1; - } else if (rhs == null) { - return 1; - } - if (equalsAny(rhsType, this, PBinary.INSTANCE)) { - return Bytes.compareTo((byte[]) lhs, (byte[]) rhs); - } else { - byte[] rhsBytes = rhsType.toBytes(rhs); - return Bytes.compareTo((byte[]) lhs, rhsBytes); - } - } - - @Override - public Object toObject(String value) { - if (value == null || value.length() == 0) { - return null; - } - Object object = Base64.getDecoder().decode(value); - if (object == null) { throw newIllegalDataException( - "Input: [" + value + "] is not base64 encoded"); } - return object; - } - - @Override - public String toStringLiteral(byte[] b, int o, int length, Format formatter) { - StringBuilder buf = new StringBuilder(); - buf.append("X'"); - if (length > 0) { - buf.append(Bytes.toHex(b, o, length)); - } - buf.append("'"); - return buf.toString(); - } - - @Override - public String toStringLiteral(Object o, Format formatter) { - return toStringLiteral((byte[])o, 0, ((byte[]) o).length, formatter); - } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - int length = maxLength != null && maxLength > 0 ? maxLength : 1; - byte[] b = new byte[length]; - RANDOM.get().nextBytes(b); - return b; - } + public static final PVarbinary INSTANCE = new PVarbinary(); + + private PVarbinary() { + super("VARBINARY", Types.VARBINARY, byte[].class, null, 22); + } + + PVarbinary(String sqlTypeName, int sqlType, Class clazz, PDataCodec codec, int ordinal) { + super(sqlTypeName, sqlType, clazz, codec, ordinal); + } + + @Override + public byte[] toBytes(Object object) { + if (object == null) { + return ByteUtil.EMPTY_BYTE_ARRAY; + } + return (byte[]) object; + } + + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + return 0; + } + byte[] o = (byte[]) object; + // assumes there's enough room + System.arraycopy(bytes, offset, o, 0, o.length); + return o.length; + } + + /** + * Override because we must always create a new byte array + */ + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + byte[] bytes = toBytes(object); + // Override because we need to allocate a new buffer in this case + if (sortOrder == SortOrder.DESC) { + return SortOrder.invert(bytes, 0, new byte[bytes.length], 0, bytes.length); + } + return bytes; + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + if (length == 0) { + return null; + } + if (offset == 0 && bytes.length == length && sortOrder == SortOrder.ASC) { + return bytes; + } + byte[] bytesCopy = new byte[length]; + System.arraycopy(bytes, offset, bytesCopy, 0, length); + if (sortOrder == SortOrder.DESC) { + bytesCopy = SortOrder.invert(bytes, offset, bytesCopy, 0, length); + offset = 0; + } + return bytesCopy; + } + + @Override + public Object toObject(Object object, PDataType actualType) { + return actualType.toBytes(object); + } + + @Override + public boolean isFixedWidth() { + return false; + } + + @Override + public int estimateByteSize(Object o) { + byte[] value = (byte[]) o; + return value == null ? 1 : value.length; + } + + @Override + public Integer getByteSize() { + return null; + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return equalsAny(targetType, this, PBinary.INSTANCE, PVarbinaryEncoded.INSTANCE); + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == null && rhs == null) { + return 0; + } else if (lhs == null) { + return -1; + } else if (rhs == null) { + return 1; + } + if (equalsAny(rhsType, this, PBinary.INSTANCE)) { + return Bytes.compareTo((byte[]) lhs, (byte[]) rhs); + } else { + byte[] rhsBytes = rhsType.toBytes(rhs); + return Bytes.compareTo((byte[]) lhs, rhsBytes); + } + } + + @Override + public Object toObject(String value) { + if (value == null || value.length() == 0) { + return null; + } + Object object = Base64.getDecoder().decode(value); + if (object == null) { + throw newIllegalDataException("Input: [" + value + "] is not base64 encoded"); + } + return object; + } + + @Override + public String toStringLiteral(byte[] b, int o, int length, Format formatter) { + StringBuilder buf = new StringBuilder(); + buf.append("X'"); + if (length > 0) { + buf.append(Bytes.toHex(b, o, length)); + } + buf.append("'"); + return buf.toString(); + } + + @Override + public String toStringLiteral(Object o, Format formatter) { + return toStringLiteral((byte[]) o, 0, ((byte[]) o).length, formatter); + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + int length = maxLength != null && maxLength > 0 ? maxLength : 1; + byte[] b = new byte[length]; + RANDOM.get().nextBytes(b); + return b; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinaryArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinaryArray.java index 01b866743b3..a39ed9fd539 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinaryArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinaryArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PVarbinaryArray extends PArrayDataType { - public static final PVarbinaryArray INSTANCE = new PVarbinaryArray(); + public static final PVarbinaryArray INSTANCE = new PVarbinaryArray(); - private PVarbinaryArray() { - super("VARBINARY ARRAY", PDataType.ARRAY_TYPE_BASE + PVarbinary.INSTANCE.getSqlType(), - PhoenixArray.class, null, 27); - } + private PVarbinaryArray() { + super("VARBINARY ARRAY", PDataType.ARRAY_TYPE_BASE + PVarbinary.INSTANCE.getSqlType(), + PhoenixArray.class, null, 27); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PVarbinary.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PVarbinary.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PVarbinary.INSTANCE, sortOrder, maxLength, scale, - PVarbinary.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PVarbinary.INSTANCE, sortOrder, maxLength, scale, + PVarbinary.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray pArr = (PhoenixArray) value; - Object[] charArr = (Object[]) pArr.array; - for (Object i : charArr) { - if (!super.isCoercibleTo(PVarbinary.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PVarbinary.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] charArr = (Object[]) pArr.array; + for (Object i : charArr) { + if (!super.isCoercibleTo(PVarbinary.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PVarbinary.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinaryEncoded.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinaryEncoded.java index 84fcf6b4c73..9513c9e7101 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinaryEncoded.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarbinaryEncoded.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,165 +22,167 @@ public class PVarbinaryEncoded extends PVarbinary { - public static final PVarbinaryEncoded INSTANCE = new PVarbinaryEncoded(); + public static final PVarbinaryEncoded INSTANCE = new PVarbinaryEncoded(); - private PVarbinaryEncoded() { - super("VARBINARY_ENCODED", PDataType.VARBINARY_ENCODED_TYPE, byte[].class, null, 50); - } + private PVarbinaryEncoded() { + super("VARBINARY_ENCODED", PDataType.VARBINARY_ENCODED_TYPE, byte[].class, null, 50); + } - private byte[] encodeBytesAscOrder(byte[] bytes) { - int countZeros = 0; - for (byte b : bytes) { - if (b == (byte) 0x00) { - countZeros++; - } - } - if (countZeros == 0) { - return bytes; - } - byte[] encodedBytes = new byte[bytes.length + countZeros]; - int pos = 0; - for (byte b : bytes) { - if (b != (byte) 0x00) { - encodedBytes[pos++] = b; - } else { - encodedBytes[pos++] = (byte) 0x00; - encodedBytes[pos++] = (byte) 0xFF; - } - } - return encodedBytes; + private byte[] encodeBytesAscOrder(byte[] bytes) { + int countZeros = 0; + for (byte b : bytes) { + if (b == (byte) 0x00) { + countZeros++; + } } - - private byte[] decodeBytesAscOrder(byte[] bytes) { - int countZeros = 0; - for (int i = 0; i < (bytes.length - 1); i++) { - if (bytes[i] == (byte) 0x00 && bytes[i + 1] == (byte) 0xFF) { - countZeros++; - } - } - if (countZeros == 0) { - return bytes; - } - byte[] decodedBytes = new byte[bytes.length - countZeros]; - int pos = 0; - int i = 0; - for (; i < (bytes.length - 1); i++) { - if (bytes[i] == (byte) 0x00 && bytes[i + 1] == (byte) 0xFF) { - decodedBytes[pos++] = (byte) 0x00; - i++; - } else { - decodedBytes[pos++] = bytes[i]; - } - } - if (i == (bytes.length - 1)) { - decodedBytes[pos] = bytes[bytes.length - 1]; - } - return decodedBytes; + if (countZeros == 0) { + return bytes; } - - private byte[] encodeBytesDescOrder(byte[] bytes) { - int countZeros = 0; - for (byte b : bytes) { - if (b == SortOrder.invert((byte) 0x00)) { - countZeros++; - } - } - if (countZeros == 0) { - return bytes; - } - byte[] encodedBytes = new byte[bytes.length + countZeros]; - int pos = 0; - for (byte b : bytes) { - if (b != SortOrder.invert((byte) 0x00)) { - encodedBytes[pos++] = b; - } else { - encodedBytes[pos++] = SortOrder.invert((byte) 0x00); - encodedBytes[pos++] = SortOrder.invert((byte) 0xFF); - } - } - return encodedBytes; + byte[] encodedBytes = new byte[bytes.length + countZeros]; + int pos = 0; + for (byte b : bytes) { + if (b != (byte) 0x00) { + encodedBytes[pos++] = b; + } else { + encodedBytes[pos++] = (byte) 0x00; + encodedBytes[pos++] = (byte) 0xFF; + } } + return encodedBytes; + } - private byte[] decodeBytesDescOrder(byte[] bytes) { - int countZeros = 0; - for (int i = 0; i < (bytes.length - 1); i++) { - if (bytes[i] == SortOrder.invert((byte) 0x00) - && bytes[i + 1] == SortOrder.invert((byte) 0xFF)) { - countZeros++; - } - } - if (countZeros == 0) { - return bytes; - } - byte[] decodedBytes = new byte[bytes.length - countZeros]; - int pos = 0; - int i = 0; - for (; i < (bytes.length - 1); i++) { - if (bytes[i] == SortOrder.invert((byte) 0x00) - && bytes[i + 1] == SortOrder.invert((byte) 0xFF)) { - decodedBytes[pos++] = SortOrder.invert((byte) 0x00); - i++; - } else { - decodedBytes[pos++] = bytes[i]; - } - } - if (i == (bytes.length - 1)) { - decodedBytes[pos] = bytes[bytes.length - 1]; - } - return decodedBytes; + private byte[] decodeBytesAscOrder(byte[] bytes) { + int countZeros = 0; + for (int i = 0; i < (bytes.length - 1); i++) { + if (bytes[i] == (byte) 0x00 && bytes[i + 1] == (byte) 0xFF) { + countZeros++; + } + } + if (countZeros == 0) { + return bytes; } + byte[] decodedBytes = new byte[bytes.length - countZeros]; + int pos = 0; + int i = 0; + for (; i < (bytes.length - 1); i++) { + if (bytes[i] == (byte) 0x00 && bytes[i + 1] == (byte) 0xFF) { + decodedBytes[pos++] = (byte) 0x00; + i++; + } else { + decodedBytes[pos++] = bytes[i]; + } + } + if (i == (bytes.length - 1)) { + decodedBytes[pos] = bytes[bytes.length - 1]; + } + return decodedBytes; + } - @Override - public byte[] toBytes(Object object) { - return encodeBytesAscOrder(super.toBytes(object)); + private byte[] encodeBytesDescOrder(byte[] bytes) { + int countZeros = 0; + for (byte b : bytes) { + if (b == SortOrder.invert((byte) 0x00)) { + countZeros++; + } + } + if (countZeros == 0) { + return bytes; + } + byte[] encodedBytes = new byte[bytes.length + countZeros]; + int pos = 0; + for (byte b : bytes) { + if (b != SortOrder.invert((byte) 0x00)) { + encodedBytes[pos++] = b; + } else { + encodedBytes[pos++] = SortOrder.invert((byte) 0x00); + encodedBytes[pos++] = SortOrder.invert((byte) 0xFF); + } } + return encodedBytes; + } - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - return 0; - } - byte[] o = (byte[]) object; - System.arraycopy(bytes, offset, o, 0, o.length); - byte[] result = encodeBytesAscOrder(o); - return result.length; + private byte[] decodeBytesDescOrder(byte[] bytes) { + int countZeros = 0; + for (int i = 0; i < (bytes.length - 1); i++) { + if ( + bytes[i] == SortOrder.invert((byte) 0x00) && bytes[i + 1] == SortOrder.invert((byte) 0xFF) + ) { + countZeros++; + } } + if (countZeros == 0) { + return bytes; + } + byte[] decodedBytes = new byte[bytes.length - countZeros]; + int pos = 0; + int i = 0; + for (; i < (bytes.length - 1); i++) { + if ( + bytes[i] == SortOrder.invert((byte) 0x00) && bytes[i + 1] == SortOrder.invert((byte) 0xFF) + ) { + decodedBytes[pos++] = SortOrder.invert((byte) 0x00); + i++; + } else { + decodedBytes[pos++] = bytes[i]; + } + } + if (i == (bytes.length - 1)) { + decodedBytes[pos] = bytes[bytes.length - 1]; + } + return decodedBytes; + } + + @Override + public byte[] toBytes(Object object) { + return encodeBytesAscOrder(super.toBytes(object)); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - byte[] bytes; - if (object == null) { - bytes = ByteUtil.EMPTY_BYTE_ARRAY; - } else { - bytes = (byte[]) object; - } - if (sortOrder == SortOrder.DESC) { - byte[] result = SortOrder.invert(bytes, 0, new byte[bytes.length], 0, bytes.length); - return encodeBytesDescOrder(result); - } - return encodeBytesAscOrder(bytes); + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + return 0; } + byte[] o = (byte[]) object; + System.arraycopy(bytes, offset, o, 0, o.length); + byte[] result = encodeBytesAscOrder(o); + return result.length; + } - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale) { - if (length == 0) { - return null; - } - if (offset == 0 && bytes.length == length && sortOrder == SortOrder.ASC) { - return decodeBytesAscOrder(bytes); - } - byte[] bytesCopy = new byte[length]; - System.arraycopy(bytes, offset, bytesCopy, 0, length); - if (sortOrder == SortOrder.DESC) { - bytesCopy = SortOrder.invert(bytes, offset, bytesCopy, 0, length); - } - return decodeBytesAscOrder(bytesCopy); + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + byte[] bytes; + if (object == null) { + bytes = ByteUtil.EMPTY_BYTE_ARRAY; + } else { + bytes = (byte[]) object; } + if (sortOrder == SortOrder.DESC) { + byte[] result = SortOrder.invert(bytes, 0, new byte[bytes.length], 0, bytes.length); + return encodeBytesDescOrder(result); + } + return encodeBytesAscOrder(bytes); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return equalsAny(targetType, this, PBinary.INSTANCE, PVarbinary.INSTANCE); + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + if (length == 0) { + return null; + } + if (offset == 0 && bytes.length == length && sortOrder == SortOrder.ASC) { + return decodeBytesAscOrder(bytes); } + byte[] bytesCopy = new byte[length]; + System.arraycopy(bytes, offset, bytesCopy, 0, length); + if (sortOrder == SortOrder.DESC) { + bytesCopy = SortOrder.invert(bytes, offset, bytesCopy, 0, length); + } + return decodeBytesAscOrder(bytesCopy); + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return equalsAny(targetType, this, PBinary.INSTANCE, PVarbinary.INSTANCE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarchar.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarchar.java index 911edf3f4e1..9b161aa7c87 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarchar.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarchar.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,177 +24,177 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.StringUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - public class PVarchar extends PDataType { - public static final PVarchar INSTANCE = new PVarchar(); - - private PVarchar() { - super("VARCHAR", Types.VARCHAR, String.class, null, 0); - } - - @Override - public byte[] toBytes(Object object) { - // TODO: consider using avro UTF8 object instead of String - // so that we get get the size easily - if (object == null) { - return ByteUtil.EMPTY_BYTE_ARRAY; - } - return Bytes.toBytes((String) object); - } - - @Override - public int toBytes(Object object, byte[] bytes, int offset) { - if (object == null) { - return 0; - } - byte[] b = toBytes(object); // TODO: no byte[] allocation: use CharsetEncoder - System.arraycopy(b, 0, bytes, offset, b.length); - return b.length; - } - - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale) { - if (length == 0) { - return null; - } - if (!actualType.isCoercibleTo(this)) { - throwConstraintViolationException(actualType, this); - } - if (sortOrder == SortOrder.DESC) { - bytes = SortOrder.invert(bytes, offset, length); - offset = 0; - } - return Bytes.toString(bytes, offset, length); - } - - @Override - public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, - SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) - throws SQLException { - if (String.class.isAssignableFrom(jdbcType)) { - //We don't actually get here, we shortcut the String case in ResultSet - return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); - } - throw newMismatchException(actualType, jdbcType); - } - - @Override - public Object toObject(Object object, PDataType actualType) { - if (equalsAny(actualType, this, PChar.INSTANCE)) { - String s = (String) object; - return s == null || s.length() > 0 ? s : null; - } else if (equalsAny(actualType, PVarchar.INSTANCE)) { - if (object == null) { - return null; - } - return object.toString(); - } - return throwConstraintViolationException(actualType, this); - } - - @Override - public boolean isCoercibleTo(PDataType targetType) { - return equalsAny(targetType, this, PChar.INSTANCE, PVarbinary.INSTANCE, PBinary.INSTANCE, - PVarbinaryEncoded.INSTANCE); - } - - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (isCoercibleTo(targetType)) { - if (targetType.equals(PChar.INSTANCE)) { - return value != null; - } - return true; - } - return false; - } - - @Override - public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, - SortOrder sortOrder, Integer maxLength, Integer scale, - Integer desiredMaxLength, Integer desiredScale) { - if (ptr.getLength() != 0 && desiredMaxLength != null) { - if (maxLength == null || maxLength > desiredMaxLength) { - if (value != null) { // Use value if provided - maxLength = value.toString().length(); - } else { - coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, - desiredScale, sortOrder, true); - maxLength = StringUtil - .calculateUTF8Length(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); - } - return maxLength <= desiredMaxLength; - } - } - return true; - } - - @Override - public boolean isFixedWidth() { - return false; - } - - @Override - public int estimateByteSize(Object o) { - String value = (String) o; - return value == null ? 1 : value.length(); - } - - @Override - public Integer getByteSize() { + public static final PVarchar INSTANCE = new PVarchar(); + + private PVarchar() { + super("VARCHAR", Types.VARCHAR, String.class, null, 0); + } + + @Override + public byte[] toBytes(Object object) { + // TODO: consider using avro UTF8 object instead of String + // so that we get get the size easily + if (object == null) { + return ByteUtil.EMPTY_BYTE_ARRAY; + } + return Bytes.toBytes((String) object); + } + + @Override + public int toBytes(Object object, byte[] bytes, int offset) { + if (object == null) { + return 0; + } + byte[] b = toBytes(object); // TODO: no byte[] allocation: use CharsetEncoder + System.arraycopy(b, 0, bytes, offset, b.length); + return b.length; + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + if (length == 0) { + return null; + } + if (!actualType.isCoercibleTo(this)) { + throwConstraintViolationException(actualType, this); + } + if (sortOrder == SortOrder.DESC) { + bytes = SortOrder.invert(bytes, offset, length); + offset = 0; + } + return Bytes.toString(bytes, offset, length); + } + + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale, Class jdbcType) throws SQLException { + if (String.class.isAssignableFrom(jdbcType)) { + // We don't actually get here, we shortcut the String case in ResultSet + return toObject(bytes, offset, length, actualType, sortOrder, maxLength, scale); + } + throw newMismatchException(actualType, jdbcType); + } + + @Override + public Object toObject(Object object, PDataType actualType) { + if (equalsAny(actualType, this, PChar.INSTANCE)) { + String s = (String) object; + return s == null || s.length() > 0 ? s : null; + } else if (equalsAny(actualType, PVarchar.INSTANCE)) { + if (object == null) { return null; - } - - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - if (lhs == rhs) { - return 0; - } - if (lhs == null) { - return -1; - } - if (rhs == null) { - return 1; + } + return object.toString(); + } + return throwConstraintViolationException(actualType, this); + } + + @Override + public boolean isCoercibleTo(PDataType targetType) { + return equalsAny(targetType, this, PChar.INSTANCE, PVarbinary.INSTANCE, PBinary.INSTANCE, + PVarbinaryEncoded.INSTANCE); + } + + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (isCoercibleTo(targetType)) { + if (targetType.equals(PChar.INSTANCE)) { + return value != null; + } + return true; + } + return false; + } + + @Override + public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, + SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, + Integer desiredScale) { + if (ptr.getLength() != 0 && desiredMaxLength != null) { + if (maxLength == null || maxLength > desiredMaxLength) { + if (value != null) { // Use value if provided + maxLength = value.toString().length(); + } else { + coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, + desiredScale, sortOrder, true); + maxLength = + StringUtil.calculateUTF8Length(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder); } - return ((String) lhs).compareTo((String) rhs); - } - - @Override - public Object toObject(String value) { - return value; - } - - @Override - public boolean isBytesComparableWith(PDataType otherType) { - return super.isBytesComparableWith(otherType) || otherType == PChar.INSTANCE; - } - - @Override - public String toStringLiteral(Object o, Format formatter) { - if (formatter != null) { - return "'" + formatter.format(o) + "'"; - } - return null == o ? String.valueOf(o) : "'" + StringUtil.escapeStringConstant(o.toString()) + "'"; - } - - private char[] sampleChars = new char[1]; - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - Preconditions.checkArgument(maxLength == null || maxLength >= 0); - int length = maxLength != null ? maxLength : 1; - if (length != sampleChars.length) { - sampleChars = new char[length]; - } - for (int i = 0; i < length; i++) { - sampleChars[i] = (char) (RANDOM.get().nextInt(Byte.MAX_VALUE-2) + 1); - } - return new String(sampleChars); - } + return maxLength <= desiredMaxLength; + } + } + return true; + } + + @Override + public boolean isFixedWidth() { + return false; + } + + @Override + public int estimateByteSize(Object o) { + String value = (String) o; + return value == null ? 1 : value.length(); + } + + @Override + public Integer getByteSize() { + return null; + } + + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + if (lhs == rhs) { + return 0; + } + if (lhs == null) { + return -1; + } + if (rhs == null) { + return 1; + } + return ((String) lhs).compareTo((String) rhs); + } + + @Override + public Object toObject(String value) { + return value; + } + + @Override + public boolean isBytesComparableWith(PDataType otherType) { + return super.isBytesComparableWith(otherType) || otherType == PChar.INSTANCE; + } + + @Override + public String toStringLiteral(Object o, Format formatter) { + if (formatter != null) { + return "'" + formatter.format(o) + "'"; + } + return null == o + ? String.valueOf(o) + : "'" + StringUtil.escapeStringConstant(o.toString()) + "'"; + } + + private char[] sampleChars = new char[1]; + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + Preconditions.checkArgument(maxLength == null || maxLength >= 0); + int length = maxLength != null ? maxLength : 1; + if (length != sampleChars.length) { + sampleChars = new char[length]; + } + for (int i = 0; i < length; i++) { + sampleChars[i] = (char) (RANDOM.get().nextInt(Byte.MAX_VALUE - 2) + 1); + } + return new String(sampleChars); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarcharArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarcharArray.java index 6edaf805d3e..9322ec6ea62 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarcharArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PVarcharArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,73 +21,72 @@ public class PVarcharArray extends PArrayDataType { - public static final PVarcharArray INSTANCE = new PVarcharArray(); + public static final PVarcharArray INSTANCE = new PVarcharArray(); - private PVarcharArray() { - super("VARCHAR ARRAY", PDataType.ARRAY_TYPE_BASE + PVarchar.INSTANCE.getSqlType(), - PhoenixArray.class, null, 26); - } + private PVarcharArray() { + super("VARCHAR ARRAY", PDataType.ARRAY_TYPE_BASE + PVarchar.INSTANCE.getSqlType(), + PhoenixArray.class, null, 26); + } - @Override - public boolean isArrayType() { - return true; - } + @Override + public boolean isArrayType() { + return true; + } - @Override - public boolean isFixedWidth() { - return false; - } + @Override + public boolean isFixedWidth() { + return false; + } - @Override - public int compareTo(Object lhs, Object rhs, PDataType rhsType) { - return compareTo(lhs, rhs); - } + @Override + public int compareTo(Object lhs, Object rhs, PDataType rhsType) { + return compareTo(lhs, rhs); + } - @Override - public Integer getByteSize() { - return null; - } + @Override + public Integer getByteSize() { + return null; + } - @Override - public byte[] toBytes(Object object) { - return toBytes(object, SortOrder.ASC); - } + @Override + public byte[] toBytes(Object object) { + return toBytes(object, SortOrder.ASC); + } - @Override - public byte[] toBytes(Object object, SortOrder sortOrder) { - return toBytes(object, PVarchar.INSTANCE, sortOrder); - } + @Override + public byte[] toBytes(Object object, SortOrder sortOrder) { + return toBytes(object, PVarchar.INSTANCE, sortOrder); + } - @Override - public Object toObject(byte[] bytes, int offset, int length, - PDataType actualType, SortOrder sortOrder, Integer maxLength, - Integer scale) { - return toObject(bytes, offset, length, PVarchar.INSTANCE, sortOrder, maxLength, scale, - PVarchar.INSTANCE); - } + @Override + public Object toObject(byte[] bytes, int offset, int length, PDataType actualType, + SortOrder sortOrder, Integer maxLength, Integer scale) { + return toObject(bytes, offset, length, PVarchar.INSTANCE, sortOrder, maxLength, scale, + PVarchar.INSTANCE); + } - @Override - public boolean isCoercibleTo(PDataType targetType) { - return isCoercibleTo(targetType, this); - } + @Override + public boolean isCoercibleTo(PDataType targetType) { + return isCoercibleTo(targetType, this); + } - @Override - public boolean isCoercibleTo(PDataType targetType, Object value) { - if (value == null) { - return true; - } - PhoenixArray pArr = (PhoenixArray) value; - Object[] charArr = (Object[]) pArr.array; - for (Object i : charArr) { - if (!super.isCoercibleTo(PVarchar.INSTANCE, i)) { - return false; - } - } - return true; + @Override + public boolean isCoercibleTo(PDataType targetType, Object value) { + if (value == null) { + return true; } - - @Override - public Object getSampleValue(Integer maxLength, Integer arrayLength) { - return getSampleValue(PVarchar.INSTANCE, arrayLength, maxLength); + PhoenixArray pArr = (PhoenixArray) value; + Object[] charArr = (Object[]) pArr.array; + for (Object i : charArr) { + if (!super.isCoercibleTo(PVarchar.INSTANCE, i)) { + return false; + } } + return true; + } + + @Override + public Object getSampleValue(Integer maxLength, Integer arrayLength) { + return getSampleValue(PVarchar.INSTANCE, arrayLength, maxLength); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PWholeNumber.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PWholeNumber.java index a3a1d13d6b6..d6b418a0be5 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PWholeNumber.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PWholeNumber.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,22 +22,22 @@ public abstract class PWholeNumber extends PNumericType { - protected PWholeNumber(String sqlTypeName, int sqlType, Class clazz, - org.apache.phoenix.schema.types.PDataType.PDataCodec codec, int ordinal) { - super(sqlTypeName, sqlType, clazz, codec, ordinal); - } + protected PWholeNumber(String sqlTypeName, int sqlType, Class clazz, + org.apache.phoenix.schema.types.PDataType.PDataCodec codec, int ordinal) { + super(sqlTypeName, sqlType, clazz, codec, ordinal); + } - @Override - public int signum(byte[] bytes, int offset, int length, SortOrder sortOrder, Integer maxLength, - Integer scale) { - long l = getCodec().decodeLong(bytes, offset, sortOrder); - return Long.signum(l); - } + @Override + public int signum(byte[] bytes, int offset, int length, SortOrder sortOrder, Integer maxLength, + Integer scale) { + long l = getCodec().decodeLong(bytes, offset, sortOrder); + return Long.signum(l); + } - @Override - public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder, - ImmutableBytesWritable outPtr) { - long l = getCodec().decodeLong(bytes, offset, sortOrder); - getCodec().encodeLong(Math.abs(l), outPtr); - } + @Override + public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder, + ImmutableBytesWritable outPtr) { + long l = getCodec().decodeLong(bytes, offset, sortOrder); + getCodec().encodeLong(Math.abs(l), outPtr); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PhoenixArray.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PhoenixArray.java index 73f3c67de0c..9f7d4ec1814 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PhoenixArray.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/types/PhoenixArray.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,200 +23,200 @@ import java.util.Arrays; import java.util.Map; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.phoenix.util.SQLCloseable; /** * java.sql.Array implementation for Phoenix */ -public class PhoenixArray implements Array,SQLCloseable { - private static final String TO_STRING_SEPARATOR = ", "; - private static final String TO_STRING_END = "]"; - private static final String TO_STRING_BEGIN = "["; - - PDataType baseType; - Object array; - int numElements; - Integer maxLength; - protected int hashCode = Integer.MIN_VALUE; - - public PhoenixArray() { - // empty constructor - } - - public Integer getMaxLength() { - return maxLength; - } +public class PhoenixArray implements Array, SQLCloseable { + private static final String TO_STRING_SEPARATOR = ", "; + private static final String TO_STRING_END = "]"; + private static final String TO_STRING_BEGIN = "["; + + PDataType baseType; + Object array; + int numElements; + Integer maxLength; + protected int hashCode = Integer.MIN_VALUE; + + public PhoenixArray() { + // empty constructor + } - public boolean isPrimitiveType() { - return this.baseType.getCodec() != null; - } - - private static Object[] coerceToNewLength(PDataType baseType, Object[] elements, int maxLength) { - Object[] resizedElements = new Object[elements.length]; - for (int i = 0; i < elements.length; i++) { - Integer length = baseType.getMaxLength(elements[i]); - if (length != null) { - if (length == maxLength) { - resizedElements[i] = elements[i]; - } else { - resizedElements[i] = baseType.pad(elements[i], maxLength); - } - } else { - resizedElements[i] = baseType.pad(elements[i], maxLength); - } + public Integer getMaxLength() { + return maxLength; + } + + public boolean isPrimitiveType() { + return this.baseType.getCodec() != null; + } + + private static Object[] coerceToNewLength(PDataType baseType, Object[] elements, int maxLength) { + Object[] resizedElements = new Object[elements.length]; + for (int i = 0; i < elements.length; i++) { + Integer length = baseType.getMaxLength(elements[i]); + if (length != null) { + if (length == maxLength) { + resizedElements[i] = elements[i]; + } else { + resizedElements[i] = baseType.pad(elements[i], maxLength); } - return resizedElements; - } - private static Object[] coerceToEqualLength(PDataType baseType, Object[] elements) { - if (elements == null || elements.length == 0) { - return elements; - } - int maxLength = 0; - boolean resizeElements = false; - for (int i = 0; i < elements.length; i++) { - Integer length = baseType.getMaxLength(elements[i]); - if (length != null) { - if (maxLength == 0){ - maxLength = length; - continue; - } - if (length > maxLength) { - maxLength = length; - resizeElements = true; - } else if (length < maxLength) { - resizeElements = true; - } - } else { - resizeElements = true; - } + } else { + resizedElements[i] = baseType.pad(elements[i], maxLength); + } + } + return resizedElements; + } + + private static Object[] coerceToEqualLength(PDataType baseType, Object[] elements) { + if (elements == null || elements.length == 0) { + return elements; + } + int maxLength = 0; + boolean resizeElements = false; + for (int i = 0; i < elements.length; i++) { + Integer length = baseType.getMaxLength(elements[i]); + if (length != null) { + if (maxLength == 0) { + maxLength = length; + continue; } - if (!resizeElements) { - return elements; - } - return coerceToNewLength(baseType, elements, maxLength); - } - - public PhoenixArray(PDataType baseType, Object[] elements) { - // As we are dealing with primitive types and only the Boxed objects - this.baseType = baseType; - if (baseType.isFixedWidth()) { - if (baseType.getByteSize() == null) { - elements = coerceToEqualLength(baseType, elements); - if (elements != null && elements.length > 0) { - for(int i = 0; i < elements.length; i++) { - if(elements[i] != null) { - maxLength = baseType.getMaxLength(elements[i]); - break; - } - } - } - } - } - this.array = convertObjectArrayToPrimitiveArray(elements); - this.numElements = elements.length; - } - - public PhoenixArray(PhoenixArray pArr, Integer desiredMaxLength) { - this.baseType = pArr.baseType; - Object[] elements = (Object[])pArr.array; - if (baseType.isFixedWidth()) { - if (baseType.getByteSize() == null) { - elements = coerceToNewLength(baseType, (Object[])pArr.array, desiredMaxLength); - maxLength = desiredMaxLength; + if (length > maxLength) { + maxLength = length; + resizeElements = true; + } else if (length < maxLength) { + resizeElements = true; + } + } else { + resizeElements = true; + } + } + if (!resizeElements) { + return elements; + } + return coerceToNewLength(baseType, elements, maxLength); + } + + public PhoenixArray(PDataType baseType, Object[] elements) { + // As we are dealing with primitive types and only the Boxed objects + this.baseType = baseType; + if (baseType.isFixedWidth()) { + if (baseType.getByteSize() == null) { + elements = coerceToEqualLength(baseType, elements); + if (elements != null && elements.length > 0) { + for (int i = 0; i < elements.length; i++) { + if (elements[i] != null) { + maxLength = baseType.getMaxLength(elements[i]); + break; } + } } - this.array = convertObjectArrayToPrimitiveArray(elements); - this.numElements = elements.length; + } + } + this.array = convertObjectArrayToPrimitiveArray(elements); + this.numElements = elements.length; + } + + public PhoenixArray(PhoenixArray pArr, Integer desiredMaxLength) { + this.baseType = pArr.baseType; + Object[] elements = (Object[]) pArr.array; + if (baseType.isFixedWidth()) { + if (baseType.getByteSize() == null) { + elements = coerceToNewLength(baseType, (Object[]) pArr.array, desiredMaxLength); + maxLength = desiredMaxLength; + } } + this.array = convertObjectArrayToPrimitiveArray(elements); + this.numElements = elements.length; + } public Object convertObjectArrayToPrimitiveArray(Object[] elements) { return elements; - } - - @Override - public void free() throws SQLException { - } - - @Override - public Object getArray() throws SQLException { - return array; - } - - @Override - public void close() throws SQLException { - this.array = null; - } - - @Override - public Object getArray(Map> map) throws SQLException { - throw new UnsupportedOperationException("Currently not supported"); - } - - @Override - public Object getArray(long index, int count) throws SQLException { - if(index < 1) { - throw new IllegalArgumentException("Index cannot be less than 1"); - } - // Get the set of elements from the given index to the specified count - Object[] intArr = (Object[]) array; - boundaryCheck(index, count, intArr); - Object[] newArr = new Object[count]; - // Add checks() here. - int i = 0; - for (int j = (int) index; j < count; j++) { - newArr[i] = intArr[j]; - i++; - } - return newArr; - } - - private void boundaryCheck(long index, int count, Object[] arr) { - if (index - 1 + count > arr.length) { - throw new IllegalArgumentException("The array index is out of range of the total number of elements in the array " + arr.length); - } - } - - @Override - public Object getArray(long index, int count, Map> map) - throws SQLException { - if(map != null && !map.isEmpty()) { - throw new UnsupportedOperationException("Currently not supported"); - } - return null; - } - - @Override - public int getBaseType() throws SQLException { - return baseType.getSqlType(); - } - - @Override - public String getBaseTypeName() throws SQLException { - return baseType.getSqlTypeName(); - } - - @Override - public ResultSet getResultSet() throws SQLException { - throw new UnsupportedOperationException("Currently not supported"); - } - - @Override - public ResultSet getResultSet(Map> arg0) - throws SQLException { - throw new UnsupportedOperationException("Currently not supported"); - } - - @Override - public ResultSet getResultSet(long arg0, int arg1) throws SQLException { - throw new UnsupportedOperationException("Currently not supported"); - } - - @Override - public ResultSet getResultSet(long arg0, int arg1, - Map> arg2) throws SQLException { - throw new UnsupportedOperationException("Currently not supported"); - } + } + + @Override + public void free() throws SQLException { + } + + @Override + public Object getArray() throws SQLException { + return array; + } + + @Override + public void close() throws SQLException { + this.array = null; + } + + @Override + public Object getArray(Map> map) throws SQLException { + throw new UnsupportedOperationException("Currently not supported"); + } + + @Override + public Object getArray(long index, int count) throws SQLException { + if (index < 1) { + throw new IllegalArgumentException("Index cannot be less than 1"); + } + // Get the set of elements from the given index to the specified count + Object[] intArr = (Object[]) array; + boundaryCheck(index, count, intArr); + Object[] newArr = new Object[count]; + // Add checks() here. + int i = 0; + for (int j = (int) index; j < count; j++) { + newArr[i] = intArr[j]; + i++; + } + return newArr; + } + + private void boundaryCheck(long index, int count, Object[] arr) { + if (index - 1 + count > arr.length) { + throw new IllegalArgumentException( + "The array index is out of range of the total number of elements in the array " + + arr.length); + } + } + + @Override + public Object getArray(long index, int count, Map> map) throws SQLException { + if (map != null && !map.isEmpty()) { + throw new UnsupportedOperationException("Currently not supported"); + } + return null; + } + + @Override + public int getBaseType() throws SQLException { + return baseType.getSqlType(); + } + + @Override + public String getBaseTypeName() throws SQLException { + return baseType.getSqlTypeName(); + } + + @Override + public ResultSet getResultSet() throws SQLException { + throw new UnsupportedOperationException("Currently not supported"); + } + + @Override + public ResultSet getResultSet(Map> arg0) throws SQLException { + throw new UnsupportedOperationException("Currently not supported"); + } + + @Override + public ResultSet getResultSet(long arg0, int arg1) throws SQLException { + throw new UnsupportedOperationException("Currently not supported"); + } + + @Override + public ResultSet getResultSet(long arg0, int arg1, Map> arg2) + throws SQLException { + throw new UnsupportedOperationException("Currently not supported"); + } /** * Return the value in position {@code index} from the underlying array. Used to work around @@ -226,52 +226,52 @@ public Object getElement(int index) { return ((Object[]) array)[index]; } - public int getDimensions() { - return this.numElements; - } - - public int estimateByteSize(int pos) { - if(((Object[])array)[pos] == null) { - return 0; - } - return this.baseType.estimateByteSize(((Object[])array)[pos]); - } - - public Integer getMaxLength(int pos) { - return this.baseType.getMaxLength(((Object[])array)[pos]); - } - - public byte[] toBytes(int pos) { - return this.baseType.toBytes(((Object[])array)[pos]); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(TO_STRING_BEGIN); - boolean isFirst = true; - for (int i = 0; i < getDimensions(); i++) { - Object o = getElement(i); - if (isFirst) { - isFirst = false; - } else { - sb.append(TO_STRING_SEPARATOR); - } - sb.append(this.baseType.toStringLiteral(o)); - } - sb.append(TO_STRING_END); - return sb.toString(); - } - - public boolean isNull(int pos) { - if(this.baseType.toBytes(((Object[])array)[pos]).length == 0) { - return true; - } else { - return false; - } - } - - @Override - public boolean equals(Object obj) { + public int getDimensions() { + return this.numElements; + } + + public int estimateByteSize(int pos) { + if (((Object[]) array)[pos] == null) { + return 0; + } + return this.baseType.estimateByteSize(((Object[]) array)[pos]); + } + + public Integer getMaxLength(int pos) { + return this.baseType.getMaxLength(((Object[]) array)[pos]); + } + + public byte[] toBytes(int pos) { + return this.baseType.toBytes(((Object[]) array)[pos]); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(TO_STRING_BEGIN); + boolean isFirst = true; + for (int i = 0; i < getDimensions(); i++) { + Object o = getElement(i); + if (isFirst) { + isFirst = false; + } else { + sb.append(TO_STRING_SEPARATOR); + } + sb.append(this.baseType.toStringLiteral(o)); + } + sb.append(TO_STRING_END); + return sb.toString(); + } + + public boolean isNull(int pos) { + if (this.baseType.toBytes(((Object[]) array)[pos]).length == 0) { + return true; + } else { + return false; + } + } + + @Override + public boolean equals(Object obj) { if (obj == null) return false; if (this == obj) return true; if (!(obj instanceof PhoenixArray)) return false; @@ -279,10 +279,10 @@ public boolean equals(Object obj) { if (numElements != oArray.numElements) return false; if (baseType.getSqlType() != oArray.baseType.getSqlType()) return false; return Arrays.deepEquals((Object[]) array, (Object[]) oArray.array); - } + } - @Override - public int hashCode() { + @Override + public int hashCode() { // implementation based on commons.lang.HashCodeBuilder, except the hashcode is cached and // reused for a given instance. if (hashCode != Integer.MIN_VALUE) return hashCode; @@ -291,39 +291,40 @@ public int hashCode() { hashCode = hashCode * 37 + baseType.getSqlType(); hashCode = hashCode * 37 + Arrays.deepHashCode((Object[]) array); return hashCode; - } - - public static class PrimitiveIntPhoenixArray extends PhoenixArray { - private int[] intArr; - public PrimitiveIntPhoenixArray(PDataType dataType, Object[] elements) { - super(dataType, elements); - } - - @Override - public Object convertObjectArrayToPrimitiveArray(Object[] elements) { - intArr = new int[elements.length]; - int i = 0; - for (Object o : elements) { - if (o != null) { - intArr[i] = (Integer) o; - } - i++; - } - return intArr; + } + + public static class PrimitiveIntPhoenixArray extends PhoenixArray { + private int[] intArr; + + public PrimitiveIntPhoenixArray(PDataType dataType, Object[] elements) { + super(dataType, elements); + } + + @Override + public Object convertObjectArrayToPrimitiveArray(Object[] elements) { + intArr = new int[elements.length]; + int i = 0; + for (Object o : elements) { + if (o != null) { + intArr[i] = (Integer) o; } - - @Override - public int estimateByteSize(int pos) { - return this.baseType.estimateByteSize(intArr[pos]); - } - - @Override - public byte[] toBytes(int pos) { - return this.baseType.toBytes(intArr[pos]); - } - - @Override - public boolean equals(Object obj) { + i++; + } + return intArr; + } + + @Override + public int estimateByteSize(int pos) { + return this.baseType.estimateByteSize(intArr[pos]); + } + + @Override + public byte[] toBytes(int pos) { + return this.baseType.toBytes(intArr[pos]); + } + + @Override + public boolean equals(Object obj) { if (obj == null) return false; if (this == obj) return true; if (this.getClass() != obj.getClass()) return false; @@ -331,7 +332,7 @@ public boolean equals(Object obj) { if (numElements != oArray.numElements) return false; if (baseType.getSqlType() != oArray.baseType.getSqlType()) return false; return Arrays.equals((int[]) array, (int[]) oArray.array); - } + } @Override public int hashCode() { @@ -347,38 +348,40 @@ public int hashCode() { public Object getElement(int index) { return ((int[]) array)[index]; } - } - - public static class PrimitiveShortPhoenixArray extends PhoenixArray { - private short[] shortArr; - public PrimitiveShortPhoenixArray(PDataType dataType, Object[] elements) { - super(dataType, elements); - } - @Override - public Object convertObjectArrayToPrimitiveArray(Object[] elements) { - shortArr = new short[elements.length]; - int i = 0; - for(Object o : elements) { - if (o != null) { - shortArr[i] = (Short)o; - } - i++; - } + } + + public static class PrimitiveShortPhoenixArray extends PhoenixArray { + private short[] shortArr; + + public PrimitiveShortPhoenixArray(PDataType dataType, Object[] elements) { + super(dataType, elements); + } + + @Override + public Object convertObjectArrayToPrimitiveArray(Object[] elements) { + shortArr = new short[elements.length]; + int i = 0; + for (Object o : elements) { + if (o != null) { + shortArr[i] = (Short) o; + } + i++; + } return shortArr; - } - - @Override - public int estimateByteSize(int pos) { - return this.baseType.estimateByteSize(shortArr[pos]); - } - - @Override - public byte[] toBytes(int pos) { - return this.baseType.toBytes(shortArr[pos]); - } - - @Override - public boolean equals(Object obj) { + } + + @Override + public int estimateByteSize(int pos) { + return this.baseType.estimateByteSize(shortArr[pos]); + } + + @Override + public byte[] toBytes(int pos) { + return this.baseType.toBytes(shortArr[pos]); + } + + @Override + public boolean equals(Object obj) { if (obj == null) return false; if (this == obj) return true; if (this.getClass() != obj.getClass()) return false; @@ -386,7 +389,7 @@ public boolean equals(Object obj) { if (numElements != oArray.numElements) return false; if (baseType.getSqlType() != oArray.baseType.getSqlType()) return false; return Arrays.equals((short[]) array, (short[]) oArray.array); - } + } @Override public int hashCode() { @@ -403,36 +406,39 @@ public Object getElement(int index) { return ((short[]) array)[index]; } } - - public static class PrimitiveLongPhoenixArray extends PhoenixArray { - private long[] longArr; - public PrimitiveLongPhoenixArray(PDataType dataType, Object[] elements) { - super(dataType, elements); - } - @Override - public Object convertObjectArrayToPrimitiveArray(Object[] elements) { - longArr = new long[elements.length]; - int i = 0; - for(Object o : elements) { - if (o != null) { - longArr[i] = (Long)o; - } - i++; - } + + public static class PrimitiveLongPhoenixArray extends PhoenixArray { + private long[] longArr; + + public PrimitiveLongPhoenixArray(PDataType dataType, Object[] elements) { + super(dataType, elements); + } + + @Override + public Object convertObjectArrayToPrimitiveArray(Object[] elements) { + longArr = new long[elements.length]; + int i = 0; + for (Object o : elements) { + if (o != null) { + longArr[i] = (Long) o; + } + i++; + } return longArr; - } - @Override - public int estimateByteSize(int pos) { - return this.baseType.estimateByteSize(longArr[pos]); - } - - @Override - public byte[] toBytes(int pos) { - return this.baseType.toBytes(longArr[pos]); - } - - @Override - public boolean equals(Object obj) { + } + + @Override + public int estimateByteSize(int pos) { + return this.baseType.estimateByteSize(longArr[pos]); + } + + @Override + public byte[] toBytes(int pos) { + return this.baseType.toBytes(longArr[pos]); + } + + @Override + public boolean equals(Object obj) { if (obj == null) return false; if (this == obj) return true; if (this.getClass() != obj.getClass()) return false; @@ -440,7 +446,7 @@ public boolean equals(Object obj) { if (numElements != oArray.numElements) return false; if (baseType.getSqlType() != oArray.baseType.getSqlType()) return false; return Arrays.equals((long[]) array, (long[]) oArray.array); - } + } @Override public int hashCode() { @@ -456,38 +462,40 @@ public int hashCode() { public Object getElement(int index) { return ((long[]) array)[index]; } - } - - public static class PrimitiveDoublePhoenixArray extends PhoenixArray { - private double[] doubleArr; - public PrimitiveDoublePhoenixArray(PDataType dataType, Object[] elements) { - super(dataType, elements); - } - @Override - public Object convertObjectArrayToPrimitiveArray(Object[] elements) { - doubleArr = new double[elements.length]; - int i = 0; - for (Object o : elements) { - if (o != null) { - doubleArr[i] = (Double) o; - } - i++; - } + } + + public static class PrimitiveDoublePhoenixArray extends PhoenixArray { + private double[] doubleArr; + + public PrimitiveDoublePhoenixArray(PDataType dataType, Object[] elements) { + super(dataType, elements); + } + + @Override + public Object convertObjectArrayToPrimitiveArray(Object[] elements) { + doubleArr = new double[elements.length]; + int i = 0; + for (Object o : elements) { + if (o != null) { + doubleArr[i] = (Double) o; + } + i++; + } return doubleArr; - } - - @Override - public int estimateByteSize(int pos) { - return this.baseType.estimateByteSize(doubleArr[pos]); - } - - @Override - public byte[] toBytes(int pos) { - return this.baseType.toBytes(doubleArr[pos]); - } - - @Override - public boolean equals(Object obj) { + } + + @Override + public int estimateByteSize(int pos) { + return this.baseType.estimateByteSize(doubleArr[pos]); + } + + @Override + public byte[] toBytes(int pos) { + return this.baseType.toBytes(doubleArr[pos]); + } + + @Override + public boolean equals(Object obj) { if (obj == null) return false; if (this == obj) return true; if (this.getClass() != obj.getClass()) return false; @@ -495,7 +503,7 @@ public boolean equals(Object obj) { if (numElements != oArray.numElements) return false; if (baseType.getSqlType() != oArray.baseType.getSqlType()) return false; return Arrays.equals((double[]) array, (double[]) oArray.array); - } + } @Override public int hashCode() { @@ -511,38 +519,40 @@ public int hashCode() { public Object getElement(int index) { return ((double[]) array)[index]; } - } - - public static class PrimitiveFloatPhoenixArray extends PhoenixArray { - private float[] floatArr; - public PrimitiveFloatPhoenixArray(PDataType dataType, Object[] elements) { - super(dataType, elements); - } - @Override - public Object convertObjectArrayToPrimitiveArray(Object[] elements) { - floatArr = new float[elements.length]; - int i = 0; - for(Object o : elements) { - if (o != null) { - floatArr[i] = (Float)o; - } - i++; - } + } + + public static class PrimitiveFloatPhoenixArray extends PhoenixArray { + private float[] floatArr; + + public PrimitiveFloatPhoenixArray(PDataType dataType, Object[] elements) { + super(dataType, elements); + } + + @Override + public Object convertObjectArrayToPrimitiveArray(Object[] elements) { + floatArr = new float[elements.length]; + int i = 0; + for (Object o : elements) { + if (o != null) { + floatArr[i] = (Float) o; + } + i++; + } return floatArr; - } - - @Override - public int estimateByteSize(int pos) { - return this.baseType.estimateByteSize(floatArr[pos]); - } - - @Override - public byte[] toBytes(int pos) { - return this.baseType.toBytes(floatArr[pos]); - } - - @Override - public boolean equals(Object obj) { + } + + @Override + public int estimateByteSize(int pos) { + return this.baseType.estimateByteSize(floatArr[pos]); + } + + @Override + public byte[] toBytes(int pos) { + return this.baseType.toBytes(floatArr[pos]); + } + + @Override + public boolean equals(Object obj) { if (obj == null) return false; if (this == obj) return true; if (this.getClass() != obj.getClass()) return false; @@ -550,7 +560,7 @@ public boolean equals(Object obj) { if (numElements != oArray.numElements) return false; if (baseType.getSqlType() != oArray.baseType.getSqlType()) return false; return Arrays.equals((float[]) array, (float[]) oArray.array); - } + } @Override public int hashCode() { @@ -566,38 +576,40 @@ public int hashCode() { public Object getElement(int index) { return ((float[]) array)[index]; } - } - - public static class PrimitiveBytePhoenixArray extends PhoenixArray { - private byte[] byteArr; - public PrimitiveBytePhoenixArray(PDataType dataType, Object[] elements) { - super(dataType, elements); - } - @Override - public Object convertObjectArrayToPrimitiveArray(Object[] elements) { - byteArr = new byte[elements.length]; - int i = 0; - for(Object o : elements) { - if (o != null) { - byteArr[i] = (Byte)o; - } - i++; - } + } + + public static class PrimitiveBytePhoenixArray extends PhoenixArray { + private byte[] byteArr; + + public PrimitiveBytePhoenixArray(PDataType dataType, Object[] elements) { + super(dataType, elements); + } + + @Override + public Object convertObjectArrayToPrimitiveArray(Object[] elements) { + byteArr = new byte[elements.length]; + int i = 0; + for (Object o : elements) { + if (o != null) { + byteArr[i] = (Byte) o; + } + i++; + } return byteArr; - } - - @Override - public int estimateByteSize(int pos) { - return this.baseType.estimateByteSize(byteArr[pos]); - } - - @Override - public byte[] toBytes(int pos) { - return this.baseType.toBytes(byteArr[pos]); - } - - @Override - public boolean equals(Object obj) { + } + + @Override + public int estimateByteSize(int pos) { + return this.baseType.estimateByteSize(byteArr[pos]); + } + + @Override + public byte[] toBytes(int pos) { + return this.baseType.toBytes(byteArr[pos]); + } + + @Override + public boolean equals(Object obj) { if (obj == null) return false; if (this == obj) return true; if (this.getClass() != obj.getClass()) return false; @@ -605,7 +617,7 @@ public boolean equals(Object obj) { if (numElements != oArray.numElements) return false; if (baseType.getSqlType() != oArray.baseType.getSqlType()) return false; return Arrays.equals((byte[]) array, (byte[]) oArray.array); - } + } @Override public int hashCode() { @@ -621,45 +633,46 @@ public int hashCode() { public Object getElement(int index) { return ((byte[]) array)[index]; } - } - - public static class PrimitiveBooleanPhoenixArray extends PhoenixArray { - private boolean[] booleanArr; - public PrimitiveBooleanPhoenixArray(PDataType dataType, Object[] elements) { - super(dataType, elements); - } - - @Override - public boolean isPrimitiveType() { - // boolean is primitive although PBoolean has no codec - return true; - } - - @Override - public Object convertObjectArrayToPrimitiveArray(Object[] elements) { - booleanArr = new boolean[elements.length]; - int i = 0; - for (Object o : elements) { - if (o != null) { - booleanArr[i] = (Boolean) o; - } - i++; - } - return booleanArr; + } + + public static class PrimitiveBooleanPhoenixArray extends PhoenixArray { + private boolean[] booleanArr; + + public PrimitiveBooleanPhoenixArray(PDataType dataType, Object[] elements) { + super(dataType, elements); + } + + @Override + public boolean isPrimitiveType() { + // boolean is primitive although PBoolean has no codec + return true; + } + + @Override + public Object convertObjectArrayToPrimitiveArray(Object[] elements) { + booleanArr = new boolean[elements.length]; + int i = 0; + for (Object o : elements) { + if (o != null) { + booleanArr[i] = (Boolean) o; } - - @Override - public int estimateByteSize(int pos) { - return this.baseType.estimateByteSize(booleanArr[pos]); - } - - @Override - public byte[] toBytes(int pos) { - return this.baseType.toBytes(booleanArr[pos]); - } - - @Override - public boolean equals(Object obj) { + i++; + } + return booleanArr; + } + + @Override + public int estimateByteSize(int pos) { + return this.baseType.estimateByteSize(booleanArr[pos]); + } + + @Override + public byte[] toBytes(int pos) { + return this.baseType.toBytes(booleanArr[pos]); + } + + @Override + public boolean equals(Object obj) { if (obj == null) return false; if (this == obj) return true; if (this.getClass() != obj.getClass()) return false; @@ -667,7 +680,7 @@ public boolean equals(Object obj) { if (numElements != oArray.numElements) return false; if (baseType.getSqlType() != oArray.baseType.getSqlType()) return false; return Arrays.equals((boolean[]) array, (boolean[]) oArray.array); - } + } @Override public int hashCode() { @@ -683,5 +696,5 @@ public int hashCode() { public Object getElement(int index) { return ((boolean[]) array)[index]; } - } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java index 9f8bacb51ba..d1f8ca09d54 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +17,11 @@ */ package org.apache.phoenix.trace; -import org.apache.phoenix.thirdparty.com.google.common.base.MoreObjects; -import org.apache.phoenix.thirdparty.com.google.common.base.Objects; import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.*; + import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.phoenix.thirdparty.com.google.common.base.MoreObjects; +import org.apache.phoenix.thirdparty.com.google.common.base.Objects; /** * Making implementing metric info a little easier @@ -35,30 +36,33 @@ public class MetricsInfoImpl implements MetricsInfo { this.description = checkNotNull(description, "description"); } - @Override public String name() { + @Override + public String name() { return name; } - @Override public String description() { + @Override + public String description() { return description; } - @Override public boolean equals(Object obj) { + @Override + public boolean equals(Object obj) { if (obj instanceof MetricsInfo) { MetricsInfo other = (MetricsInfo) obj; - return Objects.equal(name, other.name()) && - Objects.equal(description, other.description()); + return Objects.equal(name, other.name()) && Objects.equal(description, other.description()); } return false; } - @Override public int hashCode() { + @Override + public int hashCode() { return Objects.hashCode(name, description); } - @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name).add("description", description) - .toString(); + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).add("description", description) + .toString(); } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java index cc672a0bcc0..a1f91846a51 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,281 +50,260 @@ import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.TableNotFoundException; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.util.QueryUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; - /** - * Write the metrics to a phoenix table. - * Generally, this class is instantiated via hadoop-metrics2 property files. - * Specifically, you would create this class by adding the following to - * by - * This would actually be set as: + * Write the metrics to a phoenix table. Generally, this class is instantiated via hadoop-metrics2 + * property files. Specifically, you would create this class by adding the following to by This + * would actually be set as: * [prefix].sink.[some instance name].class=org.apache.phoenix.trace.PhoenixMetricsSink * , where prefix is either: *
    *
  1. "phoenix", for the client
  2. *
  3. "hbase", for the server
  4. *
- * and - * some instance name is just any unique name, so properties can be differentiated if + * and some instance name is just any unique name, so properties can be differentiated if * there are multiple sinks of the same type created */ public class PhoenixMetricsSink implements MetricsSink { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMetricsSink.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMetricsSink.class); - private static final String VARIABLE_VALUE = "?"; + private static final String VARIABLE_VALUE = "?"; - private static final Joiner COLUMN_JOIN = Joiner.on("."); - static final String TAG_FAMILY = "tags"; - /** - * Count of the number of tags we are storing for this row - */ - static final String TAG_COUNT = COLUMN_JOIN.join(TAG_FAMILY, "count"); + private static final Joiner COLUMN_JOIN = Joiner.on("."); + static final String TAG_FAMILY = "tags"; + /** + * Count of the number of tags we are storing for this row + */ + static final String TAG_COUNT = COLUMN_JOIN.join(TAG_FAMILY, "count"); - static final String ANNOTATION_FAMILY = "annotations"; - static final String ANNOTATION_COUNT = COLUMN_JOIN.join(ANNOTATION_FAMILY, "count"); + static final String ANNOTATION_FAMILY = "annotations"; + static final String ANNOTATION_COUNT = COLUMN_JOIN.join(ANNOTATION_FAMILY, "count"); - /** - * Join strings on a comma - */ - private static final Joiner COMMAS = Joiner.on(','); + /** + * Join strings on a comma + */ + private static final Joiner COMMAS = Joiner.on(','); - private Connection conn; + private Connection conn; - private String table; - - public PhoenixMetricsSink() { - LOGGER.info("Writing tracing metrics to phoenix table"); + private String table; - } + public PhoenixMetricsSink() { + LOGGER.info("Writing tracing metrics to phoenix table"); + + } + + @Override + public void init(SubsetConfiguration config) { + Metrics.markSinkInitialized(); + LOGGER.info("Phoenix tracing writer started"); + } - @Override - public void init(SubsetConfiguration config) { - Metrics.markSinkInitialized(); - LOGGER.info("Phoenix tracing writer started"); + /** + * Initialize this only when we need it + */ + private void lazyInitialize() { + synchronized (this) { + if (this.conn != null) { + return; + } + try { + // create the phoenix connection + Properties props = new Properties(); + props.setProperty(QueryServices.TRACING_FREQ_ATTRIB, Tracing.Frequency.NEVER.getKey()); + org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create(); + Connection conn = QueryUtil.getConnectionOnServer(props, conf); + // enable bulk loading when we have enough data + conn.setAutoCommit(true); + + String tableName = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB, + QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME); + + initializeInternal(conn, tableName); + } catch (Exception e) { + throw new RuntimeException(e); + } } + } - /** - * Initialize this only when we need it - */ - private void lazyInitialize() { - synchronized (this) { - if (this.conn != null) { - return; - } - try { - // create the phoenix connection - Properties props = new Properties(); - props.setProperty(QueryServices.TRACING_FREQ_ATTRIB, - Tracing.Frequency.NEVER.getKey()); - org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create(); - Connection conn = QueryUtil.getConnectionOnServer(props, conf); - // enable bulk loading when we have enough data - conn.setAutoCommit(true); - - String tableName = - conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB, - QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME); - - initializeInternal(conn, tableName); - } catch (Exception e) { - throw new RuntimeException(e); - } - } + private void initializeInternal(Connection conn, String tableName) throws SQLException { + this.conn = conn; + // ensure that the target table already exists + if (!traceTableExists(conn, tableName)) { + createTable(conn, tableName); } - - private void initializeInternal(Connection conn, String tableName) throws SQLException { - this.conn = conn; - // ensure that the target table already exists - if (!traceTableExists(conn, tableName)) { - createTable(conn, tableName); - } - this.table = tableName; + this.table = tableName; + } + + private boolean traceTableExists(Connection conn, String traceTableName) throws SQLException { + try { + conn.unwrap(PhoenixConnection.class).getTable(traceTableName); + return true; + } catch (TableNotFoundException e) { + return false; } - - private boolean traceTableExists(Connection conn, String traceTableName) throws SQLException { - try { - conn.unwrap(PhoenixConnection.class).getTable(traceTableName); - return true; - } catch (TableNotFoundException e) { - return false; - } + } + + /** + * Used for TESTING ONLY Initialize the connection and setup the table to use the + * {@link org.apache.phoenix.query.QueryServicesOptions#DEFAULT_TRACING_STATS_TABLE_NAME} + * @param conn to store for upserts and to create the table (if necessary) + * @param tableName TODO + * @throws SQLException if any phoenix operation fails + */ + @VisibleForTesting + public void initForTesting(Connection conn, String tableName) throws SQLException { + initializeInternal(conn, tableName); + } + + /** + * Create a stats table with the given name. Stores the name for use later when creating upsert + * statements + * @param conn connection to use when creating the table + * @param table name of the table to create + * @throws SQLException if any phoenix operations fails + */ + private void createTable(Connection conn, String table) throws SQLException { + // only primary-key columns can be marked non-null + String ddl = "create table if not exists " + table + "( " + TRACE.columnName + + " bigint not null, " + PARENT.columnName + " bigint not null, " + SPAN.columnName + + " bigint not null, " + DESCRIPTION.columnName + " varchar, " + START.columnName + + " bigint, " + END.columnName + " bigint, " + HOSTNAME.columnName + " varchar, " + TAG_COUNT + + " smallint, " + ANNOTATION_COUNT + " smallint" + " CONSTRAINT pk PRIMARY KEY (" + + TRACE.columnName + ", " + PARENT.columnName + ", " + SPAN.columnName + "))\n" + + // We have a config parameter that can be set so that tables are + // transactional by default. If that's set, we still don't want these system + // tables created as transactional tables, make these table non + // transactional + PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE; + try (PreparedStatement stmt = conn.prepareStatement(ddl)) { + stmt.execute(); } - - /** - * Used for TESTING ONLY - * Initialize the connection and setup the table to use the - * {@link org.apache.phoenix.query.QueryServicesOptions#DEFAULT_TRACING_STATS_TABLE_NAME} - * - * @param conn to store for upserts and to create the table (if necessary) - * @param tableName TODO - * @throws SQLException if any phoenix operation fails - */ - @VisibleForTesting - public void initForTesting(Connection conn, String tableName) throws SQLException { - initializeInternal(conn, tableName); + } + + @Override + public void flush() { + try { + this.conn.commit(); + } catch (SQLException e) { + LOGGER.error("Failed to commit changes to table", e); } - - /** - * Create a stats table with the given name. Stores the name for use later when creating upsert - * statements - * - * @param conn connection to use when creating the table - * @param table name of the table to create - * @throws SQLException if any phoenix operations fails - */ - private void createTable(Connection conn, String table) throws SQLException { - // only primary-key columns can be marked non-null - String ddl = - "create table if not exists " + table + "( " + - TRACE.columnName + " bigint not null, " + - PARENT.columnName + " bigint not null, " + - SPAN.columnName + " bigint not null, " + - DESCRIPTION.columnName + " varchar, " + - START.columnName + " bigint, " + - END.columnName + " bigint, " + - HOSTNAME.columnName + " varchar, " + - TAG_COUNT + " smallint, " + - ANNOTATION_COUNT + " smallint" + - " CONSTRAINT pk PRIMARY KEY (" + TRACE.columnName + ", " - + PARENT.columnName + ", " + SPAN.columnName + "))\n" + - // We have a config parameter that can be set so that tables are - // transactional by default. If that's set, we still don't want these system - // tables created as transactional tables, make these table non - // transactional - PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE; - try (PreparedStatement stmt = conn.prepareStatement(ddl)) { - stmt.execute(); - } + } + + /** + * Add a new metric record to be written. + */ + @Override + public void putMetrics(MetricsRecord record) { + // its not a tracing record, we are done. This could also be handled by filters, but safer + // to do it here, in case it gets misconfigured + if (!record.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) { + return; } - @Override - public void flush() { - try { - this.conn.commit(); - } catch (SQLException e) { - LOGGER.error("Failed to commit changes to table", e); - } + // don't initialize until we actually have something to write + lazyInitialize(); + + String stmt = "UPSERT INTO " + table + " ("; + // drop it into the queue of things that should be written + List keys = new ArrayList(); + List values = new ArrayList(); + // we need to keep variable values in a separate set since they may have spaces, which + // causes the parser to barf. Instead, we need to add them after the statement is prepared + List variableValues = new ArrayList(record.tags().size()); + keys.add(TRACE.columnName); + values.add(Long.parseLong(record.name().substring(TracingUtils.METRIC_SOURCE_KEY.length()))); + + keys.add(DESCRIPTION.columnName); + values.add(VARIABLE_VALUE); + variableValues.add(record.description()); + + // add each of the metrics + for (AbstractMetric metric : record.metrics()) { + // name of the metric is also the column name to which we write + keys.add(MetricInfo.getColumnName(metric.name())); + values.add(metric.value()); } - /** - * Add a new metric record to be written. - * - * @param record - */ - @Override - public void putMetrics(MetricsRecord record) { - // its not a tracing record, we are done. This could also be handled by filters, but safer - // to do it here, in case it gets misconfigured - if (!record.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) { - return; - } - - // don't initialize until we actually have something to write - lazyInitialize(); - - String stmt = "UPSERT INTO " + table + " ("; - // drop it into the queue of things that should be written - List keys = new ArrayList(); - List values = new ArrayList(); - // we need to keep variable values in a separate set since they may have spaces, which - // causes the parser to barf. Instead, we need to add them after the statement is prepared - List variableValues = new ArrayList(record.tags().size()); - keys.add(TRACE.columnName); - values.add( - Long.parseLong(record.name().substring(TracingUtils.METRIC_SOURCE_KEY.length()))); - - keys.add(DESCRIPTION.columnName); + // get the tags out so we can set them later (otherwise, need to be a single value) + int annotationCount = 0; + int tagCount = 0; + for (MetricsTag tag : record.tags()) { + if (tag.name().equals(ANNOTATION.traceName)) { + addDynamicEntry(keys, values, variableValues, ANNOTATION_FAMILY, tag, ANNOTATION, + annotationCount); + annotationCount++; + } else if (tag.name().equals(TAG.traceName)) { + addDynamicEntry(keys, values, variableValues, TAG_FAMILY, tag, TAG, tagCount); + tagCount++; + } else if (tag.name().equals(HOSTNAME.traceName)) { + keys.add(HOSTNAME.columnName); values.add(VARIABLE_VALUE); - variableValues.add(record.description()); - - // add each of the metrics - for (AbstractMetric metric : record.metrics()) { - // name of the metric is also the column name to which we write - keys.add(MetricInfo.getColumnName(metric.name())); - values.add(metric.value()); - } - - // get the tags out so we can set them later (otherwise, need to be a single value) - int annotationCount = 0; - int tagCount = 0; - for (MetricsTag tag : record.tags()) { - if (tag.name().equals(ANNOTATION.traceName)) { - addDynamicEntry(keys, values, variableValues, ANNOTATION_FAMILY, tag, ANNOTATION, - annotationCount); - annotationCount++; - } else if (tag.name().equals(TAG.traceName)) { - addDynamicEntry(keys, values, variableValues, TAG_FAMILY, tag, TAG, tagCount); - tagCount++; - } else if (tag.name().equals(HOSTNAME.traceName)) { - keys.add(HOSTNAME.columnName); - values.add(VARIABLE_VALUE); - variableValues.add(tag.value()); - } else if (tag.name().equals("Context")) { - // ignored - } else { - LOGGER.error("Got an unexpected tag: " + tag); - } - } - - // add the tag count, now that we know it - keys.add(TAG_COUNT); - // ignore the hostname in the tags, if we know it - values.add(tagCount); - - keys.add(ANNOTATION_COUNT); - values.add(annotationCount); - - // compile the statement together - stmt += COMMAS.join(keys); - stmt += ") VALUES (" + COMMAS.join(values) + ")"; - - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Logging metrics to phoenix table via: " + stmt); - LOGGER.trace("With tags: " + variableValues); - } - try (PreparedStatement ps = conn.prepareStatement(stmt)) { - // add everything that wouldn't/may not parse - int index = 1; - for (String tag : variableValues) { - ps.setString(index++, tag); - } - // Not going through the standard route of using statement.execute() as that code path - // is blocked if the metadata hasn't been been upgraded to the new minor release. - MutationPlan plan = ps.unwrap(PhoenixPreparedStatement.class).compileMutation(stmt); - MutationState state = conn.unwrap(PhoenixConnection.class).getMutationState(); - MutationState newState = plan.execute(); - state.join(newState); - } catch (SQLException e) { - LOGGER.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt, - e); - } + variableValues.add(tag.value()); + } else if (tag.name().equals("Context")) { + // ignored + } else { + LOGGER.error("Got an unexpected tag: " + tag); + } } - public static String getDynamicColumnName(String family, String column, int count) { - return COLUMN_JOIN.join(family, column) + count; - } + // add the tag count, now that we know it + keys.add(TAG_COUNT); + // ignore the hostname in the tags, if we know it + values.add(tagCount); - private void addDynamicEntry(List keys, List values, - List variableValues, String family, MetricsTag tag, - MetricInfo metric, int count) { - // <.dynColumn> - keys.add(getDynamicColumnName(family, metric.columnName, count) + " VARCHAR"); + keys.add(ANNOTATION_COUNT); + values.add(annotationCount); - // build the annotation value - String val = tag.description() + " - " + tag.value(); - values.add(VARIABLE_VALUE); - variableValues.add(val); - } + // compile the statement together + stmt += COMMAS.join(keys); + stmt += ") VALUES (" + COMMAS.join(values) + ")"; - @VisibleForTesting - public void clearForTesting() throws SQLException { - this.conn.rollback(); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Logging metrics to phoenix table via: " + stmt); + LOGGER.trace("With tags: " + variableValues); + } + try (PreparedStatement ps = conn.prepareStatement(stmt)) { + // add everything that wouldn't/may not parse + int index = 1; + for (String tag : variableValues) { + ps.setString(index++, tag); + } + // Not going through the standard route of using statement.execute() as that code path + // is blocked if the metadata hasn't been been upgraded to the new minor release. + MutationPlan plan = ps.unwrap(PhoenixPreparedStatement.class).compileMutation(stmt); + MutationState state = conn.unwrap(PhoenixConnection.class).getMutationState(); + MutationState newState = plan.execute(); + state.join(newState); + } catch (SQLException e) { + LOGGER.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt, e); } -} \ No newline at end of file + } + + public static String getDynamicColumnName(String family, String column, int count) { + return COLUMN_JOIN.join(family, column) + count; + } + + private void addDynamicEntry(List keys, List values, List variableValues, + String family, MetricsTag tag, MetricInfo metric, int count) { + // <.dynColumn> + keys.add(getDynamicColumnName(family, metric.columnName, count) + " VARCHAR"); + + // build the annotation value + String val = tag.description() + " - " + tag.value(); + values.add(VARIABLE_VALUE); + variableValues.add(val); + } + + @VisibleForTesting + public void clearForTesting() throws SQLException { + this.conn.rollback(); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceReader.java index 1b93000588c..f2b414d2fe4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceReader.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,348 +34,341 @@ import org.apache.phoenix.metrics.MetricInfo; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; +import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; import org.apache.phoenix.util.LogUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; - /** * Read the traces written to phoenix tables by the {@link TraceWriter}. */ public class TraceReader { - private static final Logger LOGGER = LoggerFactory.getLogger(TraceReader.class); - private final Joiner comma = Joiner.on(','); - private String knownColumns; - { - // the order here dictates the order we pull out the values below. For now, just keep them - // in sync - so we can be efficient pulling them off the results. - knownColumns = - comma.join(MetricInfo.TRACE.columnName, MetricInfo.PARENT.columnName, - MetricInfo.SPAN.columnName, MetricInfo.DESCRIPTION.columnName, - MetricInfo.START.columnName, MetricInfo.END.columnName, - MetricInfo.HOSTNAME.columnName, TraceWriter.TAG_COUNT, - TraceWriter.ANNOTATION_COUNT); - } - - private Connection conn; - private String table; - private int pageSize; - - public TraceReader(Connection conn, String tracingTableName) throws SQLException { - this.conn = conn; - this.table = tracingTableName; - String ps = conn.getClientInfo(QueryServices.TRACING_PAGE_SIZE_ATTRIB); - this.pageSize = ps == null ? QueryServicesOptions.DEFAULT_TRACING_PAGE_SIZE : Integer.parseInt(ps); - } + private static final Logger LOGGER = LoggerFactory.getLogger(TraceReader.class); + private final Joiner comma = Joiner.on(','); + private String knownColumns; + { + // the order here dictates the order we pull out the values below. For now, just keep them + // in sync - so we can be efficient pulling them off the results. + knownColumns = comma.join(MetricInfo.TRACE.columnName, MetricInfo.PARENT.columnName, + MetricInfo.SPAN.columnName, MetricInfo.DESCRIPTION.columnName, MetricInfo.START.columnName, + MetricInfo.END.columnName, MetricInfo.HOSTNAME.columnName, TraceWriter.TAG_COUNT, + TraceWriter.ANNOTATION_COUNT); + } + + private Connection conn; + private String table; + private int pageSize; + + public TraceReader(Connection conn, String tracingTableName) throws SQLException { + this.conn = conn; + this.table = tracingTableName; + String ps = conn.getClientInfo(QueryServices.TRACING_PAGE_SIZE_ATTRIB); + this.pageSize = + ps == null ? QueryServicesOptions.DEFAULT_TRACING_PAGE_SIZE : Integer.parseInt(ps); + } + + /** + * Read all the currently stored traces. + *

+ * Be Careful! This could cause an OOME if there are a lot of traces. + * @param limit max number of traces to return. If -1, returns all known traces. + * @return the found traces + */ + public Collection readAll(int limit) throws SQLException { + Set traces = new HashSet(); + // read all the known columns from the table, sorting first by trace column (so the same + // trace + // goes together), and then by start time (so parent spans always appear before child spans) + String query = + "SELECT " + knownColumns + " FROM " + table + " ORDER BY " + MetricInfo.TRACE.columnName + + " DESC, " + MetricInfo.START.columnName + " ASC" + " LIMIT " + pageSize; + int resultCount = 0; + try (PreparedStatement stmt = conn.prepareStatement(query); + ResultSet results = stmt.executeQuery()) { + TraceHolder trace = null; + // the spans that are not the root span, but haven't seen their parent yet + List orphans = null; + while (results.next()) { + int index = 1; + long traceid = results.getLong(index++); + long parent = results.getLong(index++); + long span = results.getLong(index++); + String desc = results.getString(index++); + long start = results.getLong(index++); + long end = results.getLong(index++); + String host = results.getString(index++); + int tagCount = results.getInt(index++); + int annotationCount = results.getInt(index++); + // we have a new trace + if (trace == null || traceid != trace.traceid) { + // only increment if we are on a new trace, to ensure we get at least one + if (trace != null) { + resultCount++; + } + // we beyond the limit, so we stop + if (resultCount >= limit) { + break; + } + trace = new TraceHolder(); + // add the orphans, so we can track them later + orphans = new ArrayList(); + trace.orphans = orphans; + trace.traceid = traceid; + traces.add(trace); + } - /** - * Read all the currently stored traces. - *

- * Be Careful! This could cause an OOME if there are a lot of traces. - * @param limit max number of traces to return. If -1, returns all known traces. - * @return the found traces - * @throws SQLException - */ - public Collection readAll(int limit) throws SQLException { - Set traces = new HashSet(); - // read all the known columns from the table, sorting first by trace column (so the same - // trace - // goes together), and then by start time (so parent spans always appear before child spans) - String query = - "SELECT " + knownColumns + " FROM " + table - + " ORDER BY " + MetricInfo.TRACE.columnName + " DESC, " - + MetricInfo.START.columnName + " ASC" + " LIMIT " + pageSize; - int resultCount = 0; - try (PreparedStatement stmt = conn.prepareStatement(query); - ResultSet results = stmt.executeQuery()) { - TraceHolder trace = null; - // the spans that are not the root span, but haven't seen their parent yet - List orphans = null; - while (results.next()) { - int index = 1; - long traceid = results.getLong(index++); - long parent = results.getLong(index++); - long span = results.getLong(index++); - String desc = results.getString(index++); - long start = results.getLong(index++); - long end = results.getLong(index++); - String host = results.getString(index++); - int tagCount = results.getInt(index++); - int annotationCount = results.getInt(index++); - // we have a new trace - if (trace == null || traceid != trace.traceid) { - // only increment if we are on a new trace, to ensure we get at least one - if (trace != null) { - resultCount++; - } - // we beyond the limit, so we stop - if (resultCount >= limit) { - break; - } - trace = new TraceHolder(); - // add the orphans, so we can track them later - orphans = new ArrayList(); - trace.orphans = orphans; - trace.traceid = traceid; - traces.add(trace); - } - - // search the spans to determine the if we have a known parent - SpanInfo parentSpan = null; - if (parent != Span.ROOT_SPAN_ID) { - // find the parent - for (SpanInfo p : trace.spans) { - if (p.id == parent) { - parentSpan = p; - break; - } - } - } - SpanInfo spanInfo = - new SpanInfo(parentSpan, parent, span, desc, start, end, host, tagCount, - annotationCount); - // search the orphans to see if this is the parent id - - for (int i = 0; i < orphans.size(); i++) { - SpanInfo orphan = orphans.get(i); - // we found the parent for the orphan - if (orphan.parentId == span) { - // update the bi-directional relationship - orphan.parent = spanInfo; - spanInfo.children.add(orphan); - // / its no longer an orphan - LOGGER.trace(addCustomAnnotations("Found parent for span: " + span)); - orphans.remove(i--); - } - } - - if (parentSpan != null) { - // add this as a child to the parent span - parentSpan.children.add(spanInfo); - } else if (parent != Span.ROOT_SPAN_ID) { - // add the span to the orphan pile to check for the remaining spans we see - LOGGER.info(addCustomAnnotations("No parent span found for span: " - + span + " (root span id: " + Span.ROOT_SPAN_ID + ")")); - orphans.add(spanInfo); - } - - // add the span to the full known list - trace.spans.add(spanInfo); - - // go back and find the tags for the row - spanInfo.tags.addAll(getTags(traceid, parent, span, tagCount)); - - spanInfo.annotations.addAll(getAnnotations(traceid, parent, span, annotationCount)); + // search the spans to determine the if we have a known parent + SpanInfo parentSpan = null; + if (parent != Span.ROOT_SPAN_ID) { + // find the parent + for (SpanInfo p : trace.spans) { + if (p.id == parent) { + parentSpan = p; + break; } + } + } + SpanInfo spanInfo = + new SpanInfo(parentSpan, parent, span, desc, start, end, host, tagCount, annotationCount); + // search the orphans to see if this is the parent id + + for (int i = 0; i < orphans.size(); i++) { + SpanInfo orphan = orphans.get(i); + // we found the parent for the orphan + if (orphan.parentId == span) { + // update the bi-directional relationship + orphan.parent = spanInfo; + spanInfo.children.add(orphan); + // / its no longer an orphan + LOGGER.trace(addCustomAnnotations("Found parent for span: " + span)); + orphans.remove(i--); + } } - return traces; - } + if (parentSpan != null) { + // add this as a child to the parent span + parentSpan.children.add(spanInfo); + } else if (parent != Span.ROOT_SPAN_ID) { + // add the span to the orphan pile to check for the remaining spans we see + LOGGER.info(addCustomAnnotations("No parent span found for span: " + span + + " (root span id: " + Span.ROOT_SPAN_ID + ")")); + orphans.add(spanInfo); + } - private Collection getTags(long traceid, long parent, long span, int count) - throws SQLException { - return getDynamicCountColumns(traceid, parent, span, count, - TraceWriter.TAG_FAMILY, MetricInfo.TAG.columnName); - } + // add the span to the full known list + trace.spans.add(spanInfo); + + // go back and find the tags for the row + spanInfo.tags.addAll(getTags(traceid, parent, span, tagCount)); - private Collection getAnnotations(long traceid, long parent, long span, - int count) throws SQLException { - return getDynamicCountColumns(traceid, parent, span, count, - TraceWriter.ANNOTATION_FAMILY, MetricInfo.ANNOTATION.columnName); + spanInfo.annotations.addAll(getAnnotations(traceid, parent, span, annotationCount)); + } } - private Collection getDynamicCountColumns(long traceid, long parent, - long span, int count, String family, String columnName) throws SQLException { - if (count == 0) { - return Collections.emptyList(); - } + return traces; + } + + private Collection getTags(long traceid, long parent, long span, int count) + throws SQLException { + return getDynamicCountColumns(traceid, parent, span, count, TraceWriter.TAG_FAMILY, + MetricInfo.TAG.columnName); + } + + private Collection getAnnotations(long traceid, long parent, long span, + int count) throws SQLException { + return getDynamicCountColumns(traceid, parent, span, count, TraceWriter.ANNOTATION_FAMILY, + MetricInfo.ANNOTATION.columnName); + } + + private Collection getDynamicCountColumns(long traceid, long parent, long span, + int count, String family, String columnName) throws SQLException { + if (count == 0) { + return Collections.emptyList(); + } - // build the column strings, family.column - String[] parts = new String[count]; - for (int i = 0; i < count; i++) { - parts[i] = TraceWriter.getDynamicColumnName(family, columnName, i); - } - // join the columns together - String columns = comma.join(parts); + // build the column strings, family.column + String[] parts = new String[count]; + for (int i = 0; i < count; i++) { + parts[i] = TraceWriter.getDynamicColumnName(family, columnName, i); + } + // join the columns together + String columns = comma.join(parts); - // redo them and add "VARCHAR to the end, so we can specify the columns - for (int i = 0; i < count; i++) { - parts[i] = parts[i] + " VARCHAR"; - } + // redo them and add "VARCHAR to the end, so we can specify the columns + for (int i = 0; i < count; i++) { + parts[i] = parts[i] + " VARCHAR"; + } - String dynamicColumns = comma.join(parts); - String request = - "SELECT " + columns + " from " + table + "(" + dynamicColumns + ") WHERE " - + MetricInfo.TRACE.columnName + "=" + traceid + " AND " - + MetricInfo.PARENT.columnName + "=" + parent + " AND " - + MetricInfo.SPAN.columnName + "=" + span; - LOGGER.trace(addCustomAnnotations("Requesting columns with: " + request)); - ResultSet results = conn.createStatement().executeQuery(request); - List cols = new ArrayList(); - while (results.next()) { - for (int index = 1; index <= count; index++) { - cols.add(results.getString(index)); - } - } - if (cols.size() < count) { - LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count + - ", but only got " + cols.size() + " tags from rquest " + request)); - } - return cols; + String dynamicColumns = comma.join(parts); + String request = "SELECT " + columns + " from " + table + "(" + dynamicColumns + ") WHERE " + + MetricInfo.TRACE.columnName + "=" + traceid + " AND " + MetricInfo.PARENT.columnName + "=" + + parent + " AND " + MetricInfo.SPAN.columnName + "=" + span; + LOGGER.trace(addCustomAnnotations("Requesting columns with: " + request)); + ResultSet results = conn.createStatement().executeQuery(request); + List cols = new ArrayList(); + while (results.next()) { + for (int index = 1; index <= count; index++) { + cols.add(results.getString(index)); + } } - - private String addCustomAnnotations(String logLine) throws SQLException { - if (conn.isWrapperFor(PhoenixConnection.class)) { - PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); - logLine = LogUtil.addCustomAnnotations(logLine, phxConn); - } - return logLine; + if (cols.size() < count) { + LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + + cols.size() + " tags from rquest " + request)); } - - /** - * Holds information about a trace - */ - public static class TraceHolder { - public List orphans; - public long traceid; - public TreeSet spans = new TreeSet(); - - @Override - public int hashCode() { - return new Long(traceid).hashCode(); - } + return cols; + } - @Override - public boolean equals(Object o) { - if (o instanceof TraceHolder) { - return traceid == ((TraceHolder) o).traceid; - } - return false; - } + private String addCustomAnnotations(String logLine) throws SQLException { + if (conn.isWrapperFor(PhoenixConnection.class)) { + PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); + logLine = LogUtil.addCustomAnnotations(logLine, phxConn); + } + return logLine; + } + + /** + * Holds information about a trace + */ + public static class TraceHolder { + public List orphans; + public long traceid; + public TreeSet spans = new TreeSet(); + + @Override + public int hashCode() { + return new Long(traceid).hashCode(); + } - @Override - public String toString() { - StringBuilder sb = new StringBuilder("Trace: " + traceid + "\n"); - // get the first span, which is always going to be the root span - SpanInfo root = spans.iterator().next(); - if (root.parent != null) { - sb.append("Root span not present! Just printing found spans\n"); - for (SpanInfo span : spans) { - sb.append(span.toString() + "\n"); - } - } else { - // print the tree of spans - List toPrint = new ArrayList(); - toPrint.add(root); - while (!toPrint.isEmpty()) { - SpanInfo span = toPrint.remove(0); - sb.append(span.toString() + "\n"); - toPrint.addAll(span.children); - } - } - if (orphans.size() > 0) { - sb.append("Found orphan spans:\n" + orphans); - } - return sb.toString(); - } + @Override + public boolean equals(Object o) { + if (o instanceof TraceHolder) { + return traceid == ((TraceHolder) o).traceid; + } + return false; } - public static class SpanInfo implements Comparable { - public SpanInfo parent; - public List children = new ArrayList(); - public String description; - public long id; - public long start; - public long end; - public String hostname; - public int tagCount; - public List tags = new ArrayList(); - public int annotationCount; - public List annotations = new ArrayList(); - private long parentId; - - public SpanInfo(SpanInfo parent, long parentid, long span, String desc, long start, - long end, String host, int tagCount, int annotationCount) { - this.parent = parent; - this.parentId = parentid; - this.id = span; - this.description = desc; - this.start = start; - this.end = end; - this.hostname = host; - this.tagCount = tagCount; - this.annotationCount = annotationCount; + @Override + public String toString() { + StringBuilder sb = new StringBuilder("Trace: " + traceid + "\n"); + // get the first span, which is always going to be the root span + SpanInfo root = spans.iterator().next(); + if (root.parent != null) { + sb.append("Root span not present! Just printing found spans\n"); + for (SpanInfo span : spans) { + sb.append(span.toString() + "\n"); } - - @Override - public int hashCode() { - return new Long(id).hashCode(); + } else { + // print the tree of spans + List toPrint = new ArrayList(); + toPrint.add(root); + while (!toPrint.isEmpty()) { + SpanInfo span = toPrint.remove(0); + sb.append(span.toString() + "\n"); + toPrint.addAll(span.children); } + } + if (orphans.size() > 0) { + sb.append("Found orphan spans:\n" + orphans); + } + return sb.toString(); + } + } + + public static class SpanInfo implements Comparable { + public SpanInfo parent; + public List children = new ArrayList(); + public String description; + public long id; + public long start; + public long end; + public String hostname; + public int tagCount; + public List tags = new ArrayList(); + public int annotationCount; + public List annotations = new ArrayList(); + private long parentId; + + public SpanInfo(SpanInfo parent, long parentid, long span, String desc, long start, long end, + String host, int tagCount, int annotationCount) { + this.parent = parent; + this.parentId = parentid; + this.id = span; + this.description = desc; + this.start = start; + this.end = end; + this.hostname = host; + this.tagCount = tagCount; + this.annotationCount = annotationCount; + } - @Override - public boolean equals(Object o) { - if (o instanceof SpanInfo) { - return id == ((SpanInfo) o).id; - } - return false; - } + @Override + public int hashCode() { + return new Long(id).hashCode(); + } - /** - * Do the same sorting that we would get from reading the table with a {@link TraceReader}, - * specifically, by trace and then by start/end. However, these are only every stored in a - * single trace, so we can just sort on start/end times. - */ - @Override - public int compareTo(SpanInfo o) { - // root span always comes first - if (this.parentId == Span.ROOT_SPAN_ID) { - return -1; - } else if (o.parentId == Span.ROOT_SPAN_ID) { - return 1; - } + @Override + public boolean equals(Object o) { + if (o instanceof SpanInfo) { + return id == ((SpanInfo) o).id; + } + return false; + } - int compare = Longs.compare(start, o.start); - if (compare == 0) { - compare = Longs.compare(end, o.end); - if (compare == 0) { - return Longs.compare(id, o.id); - } - } - return compare; + /** + * Do the same sorting that we would get from reading the table with a {@link TraceReader}, + * specifically, by trace and then by start/end. However, these are only every stored in a + * single trace, so we can just sort on start/end times. + */ + @Override + public int compareTo(SpanInfo o) { + // root span always comes first + if (this.parentId == Span.ROOT_SPAN_ID) { + return -1; + } else if (o.parentId == Span.ROOT_SPAN_ID) { + return 1; + } + + int compare = Longs.compare(start, o.start); + if (compare == 0) { + compare = Longs.compare(end, o.end); + if (compare == 0) { + return Longs.compare(id, o.id); } + } + return compare; + } - @Override - public String toString() { - StringBuilder sb = new StringBuilder("Span: " + id + "\n"); - sb.append("\tdescription=" + description); - sb.append("\n"); - sb.append("\tparent=" - + (parent == null ? (parentId == Span.ROOT_SPAN_ID ? "ROOT" : "[orphan - id: " - + parentId + "]") : parent.id)); - sb.append("\n"); - sb.append("\tstart,end=" + start + "," + end); - sb.append("\n"); - sb.append("\telapsed=" + (end - start)); - sb.append("\n"); - sb.append("\thostname=" + hostname); - sb.append("\n"); - sb.append("\ttags=(" + tagCount + ") " + tags); - sb.append("\n"); - sb.append("\tannotations=(" + annotationCount + ") " + annotations); - sb.append("\n"); - sb.append("\tchildren="); - for (SpanInfo child : children) { - sb.append(child.id + ", "); - } - sb.append("\n"); - return sb.toString(); - } + @Override + public String toString() { + StringBuilder sb = new StringBuilder("Span: " + id + "\n"); + sb.append("\tdescription=" + description); + sb.append("\n"); + sb.append("\tparent=" + (parent == null + ? (parentId == Span.ROOT_SPAN_ID ? "ROOT" : "[orphan - id: " + parentId + "]") + : parent.id)); + sb.append("\n"); + sb.append("\tstart,end=" + start + "," + end); + sb.append("\n"); + sb.append("\telapsed=" + (end - start)); + sb.append("\n"); + sb.append("\thostname=" + hostname); + sb.append("\n"); + sb.append("\ttags=(" + tagCount + ") " + tags); + sb.append("\n"); + sb.append("\tannotations=(" + annotationCount + ") " + annotations); + sb.append("\n"); + sb.append("\tchildren="); + for (SpanInfo child : children) { + sb.append(child.id + ", "); + } + sb.append("\n"); + return sb.toString(); + } - public long getParentIdForTesting() { - return parentId; - } + public long getParentIdForTesting() { + return parentId; } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java index 9440da030c6..b5d9a5e1854 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -56,47 +56,48 @@ * This allows us to make the updates in batches. We might have spans that finish before other spans * (for instance in the same parent). By batching the updates we can lessen the overhead on the * client, which is also busy doing 'real' work.
- * This class is custom implementation of metrics queue and handles batch writes to the Phoenix Table - * via another thread. Batch size and number of threads are configurable. + * This class is custom implementation of metrics queue and handles batch writes to the Phoenix + * Table via another thread. Batch size and number of threads are configurable. *

*/ public class TraceSpanReceiver implements SpanReceiver { - private static final Logger LOGGER = LoggerFactory.getLogger(TraceSpanReceiver.class); + private static final Logger LOGGER = LoggerFactory.getLogger(TraceSpanReceiver.class); - private static final int CAPACITY = QueryServicesOptions.withDefaults().getTracingTraceBufferSize(); + private static final int CAPACITY = + QueryServicesOptions.withDefaults().getTracingTraceBufferSize(); - private BlockingQueue spanQueue = null; + private BlockingQueue spanQueue = null; - public TraceSpanReceiver() { - this.spanQueue = new ArrayBlockingQueue(CAPACITY); - } + public TraceSpanReceiver() { + this.spanQueue = new ArrayBlockingQueue(CAPACITY); + } - @Override - public void receiveSpan(Span span) { - if (span.getTraceId() != 0 && spanQueue.offer(span)) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Span buffered to queue " + span.toJson()); - } - } else if (span.getTraceId() != 0 && LOGGER.isDebugEnabled()) { - LOGGER.debug("Span NOT buffered due to overflow in queue " + span.toJson()); - } + @Override + public void receiveSpan(Span span) { + if (span.getTraceId() != 0 && spanQueue.offer(span)) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Span buffered to queue " + span.toJson()); + } + } else if (span.getTraceId() != 0 && LOGGER.isDebugEnabled()) { + LOGGER.debug("Span NOT buffered due to overflow in queue " + span.toJson()); } + } - @Override - public void close() throws IOException { - // noop - } + @Override + public void close() throws IOException { + // noop + } - boolean isSpanAvailable() { - return spanQueue.isEmpty(); - } + boolean isSpanAvailable() { + return spanQueue.isEmpty(); + } - Span getSpan() { - return spanQueue.poll(); - } + Span getSpan() { + return spanQueue.poll(); + } - int getNumSpans() { - return spanQueue.size(); - } + int getNumSpans() { + return spanQueue.size(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceWriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceWriter.java index 1d2b75f1887..057e905d836 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceWriter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,15 +51,14 @@ import org.apache.phoenix.metrics.MetricInfo; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.schema.TableNotFoundException; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; +import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.util.QueryUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; - /** * Sink for the trace spans pushed into the queue by {@link TraceSpanReceiver}. The class * instantiates a thread pool of configurable size, which will pull the data from queue and write to @@ -67,265 +66,256 @@ * batch commit size. */ public class TraceWriter { - private static final Logger LOGGER = LoggerFactory.getLogger(TraceWriter.class); + private static final Logger LOGGER = LoggerFactory.getLogger(TraceWriter.class); - private static final String VARIABLE_VALUE = "?"; + private static final String VARIABLE_VALUE = "?"; - private static final Joiner COLUMN_JOIN = Joiner.on("."); - static final String TAG_FAMILY = "tags"; - /** - * Count of the number of tags we are storing for this row - */ - static final String TAG_COUNT = COLUMN_JOIN.join(TAG_FAMILY, "count"); + private static final Joiner COLUMN_JOIN = Joiner.on("."); + static final String TAG_FAMILY = "tags"; + /** + * Count of the number of tags we are storing for this row + */ + static final String TAG_COUNT = COLUMN_JOIN.join(TAG_FAMILY, "count"); - static final String ANNOTATION_FAMILY = "annotations"; - static final String ANNOTATION_COUNT = COLUMN_JOIN.join(ANNOTATION_FAMILY, "count"); + static final String ANNOTATION_FAMILY = "annotations"; + static final String ANNOTATION_COUNT = COLUMN_JOIN.join(ANNOTATION_FAMILY, "count"); - /** - * Join strings on a comma - */ - private static final Joiner COMMAS = Joiner.on(','); + /** + * Join strings on a comma + */ + private static final Joiner COMMAS = Joiner.on(','); - private String tableName; - private int batchSize; - private int numThreads; - private TraceSpanReceiver traceSpanReceiver; + private String tableName; + private int batchSize; + private int numThreads; + private TraceSpanReceiver traceSpanReceiver; - protected ScheduledExecutorService executor; + protected ScheduledExecutorService executor; - public TraceWriter(String tableName, int numThreads, int batchSize) { + public TraceWriter(String tableName, int numThreads, int batchSize) { - this.batchSize = batchSize; - this.numThreads = numThreads; - this.tableName = tableName; - } - - public void start() { - - traceSpanReceiver = getTraceSpanReceiver(); - if (traceSpanReceiver == null) { - LOGGER.warn( - "No receiver has been initialized for TraceWriter. Traces will not be written."); - LOGGER.warn("Restart Phoenix to try again."); - return; - } + this.batchSize = batchSize; + this.numThreads = numThreads; + this.tableName = tableName; + } - ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); - builder.setDaemon(true).setNameFormat("PHOENIX-METRICS-WRITER"); - executor = Executors.newScheduledThreadPool(this.numThreads, builder.build()); + public void start() { - for (int i = 0; i < this.numThreads; i++) { - executor.scheduleAtFixedRate(new FlushMetrics(), 0, 10, TimeUnit.SECONDS); - } - - LOGGER.info("Writing tracing metrics to phoenix table"); + traceSpanReceiver = getTraceSpanReceiver(); + if (traceSpanReceiver == null) { + LOGGER.warn("No receiver has been initialized for TraceWriter. Traces will not be written."); + LOGGER.warn("Restart Phoenix to try again."); + return; } - @VisibleForTesting - protected TraceSpanReceiver getTraceSpanReceiver() { - return Tracing.getTraceSpanReceiver(); - } + ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); + builder.setDaemon(true).setNameFormat("PHOENIX-METRICS-WRITER"); + executor = Executors.newScheduledThreadPool(this.numThreads, builder.build()); - public class FlushMetrics implements Runnable { - - private Connection conn; - private int counter = 0; + for (int i = 0; i < this.numThreads; i++) { + executor.scheduleAtFixedRate(new FlushMetrics(), 0, 10, TimeUnit.SECONDS); + } - public FlushMetrics() { - conn = getConnection(tableName); - } + LOGGER.info("Writing tracing metrics to phoenix table"); + } - @Override - public void run() { - if (conn == null) return; - while (!traceSpanReceiver.isSpanAvailable()) { - Span span = traceSpanReceiver.getSpan(); - if (null == span) break; - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Span received: " + span.toJson()); - } - addToBatch(span); - counter++; - if (counter >= batchSize) { - commitBatch(conn); - counter = 0; - } - } - } - - private void addToBatch(Span span) { - - String stmt = "UPSERT INTO " + tableName + " ("; - // drop it into the queue of things that should be written - List keys = new ArrayList(); - List values = new ArrayList(); - // we need to keep variable values in a separate set since they may have spaces, which - // causes the parser to barf. Instead, we need to add them after the statement is - // prepared - List variableValues = new ArrayList(); - keys.add(TRACE.columnName); - values.add(span.getTraceId()); - - keys.add(DESCRIPTION.columnName); - values.add(VARIABLE_VALUE); - variableValues.add(span.getDescription()); - - keys.add(SPAN.traceName); - values.add(span.getSpanId()); - - keys.add(PARENT.traceName); - values.add(span.getParentId()); - - keys.add(START.traceName); - values.add(span.getStartTimeMillis()); - - keys.add(END.traceName); - values.add(span.getStopTimeMillis()); - - int annotationCount = 0; - int tagCount = 0; - - // add the tags to the span. They were written in order received so we mark them as such - for (TimelineAnnotation ta : span.getTimelineAnnotations()) { - addDynamicEntry(keys, values, variableValues, TAG_FAMILY, - Long.toString(ta.getTime()), ta.getMessage(), TAG, tagCount); - tagCount++; - } - - // add the annotations. We assume they are serialized as strings and integers, but that - // can - // change in the future - Map annotations = span.getKVAnnotations(); - for (Map.Entry annotation : annotations.entrySet()) { - Pair val = - TracingUtils.readAnnotation(annotation.getKey(), annotation.getValue()); - addDynamicEntry(keys, values, variableValues, ANNOTATION_FAMILY, val.getFirst(), - val.getSecond(), ANNOTATION, annotationCount); - annotationCount++; - } - - // add the tag count, now that we know it - keys.add(TAG_COUNT); - // ignore the hostname in the tags, if we know it - values.add(tagCount); - - keys.add(ANNOTATION_COUNT); - values.add(annotationCount); - - // compile the statement together - stmt += COMMAS.join(keys); - stmt += ") VALUES (" + COMMAS.join(values) + ")"; - - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Logging metrics to phoenix table via: " + stmt); - LOGGER.trace("With tags: " + variableValues); - } - try (PreparedStatement ps = conn.prepareStatement(stmt)) { - // add everything that wouldn't/may not parse - int index = 1; - for (String tag : variableValues) { - ps.setString(index++, tag); - } - - // Not going through the standard route of using statement.execute() as that code - // path - // is blocked if the metadata hasn't been been upgraded to the new minor release. - MutationPlan plan = ps.unwrap(PhoenixPreparedStatement.class).compileMutation(stmt); - MutationState state = conn.unwrap(PhoenixConnection.class).getMutationState(); - MutationState newState = plan.execute(); - state.join(newState); - } catch (SQLException e) { - LOGGER.error("Could not write metric: \n" + span + " to prepared statement:\n" + stmt, - e); - } - } - } + @VisibleForTesting + protected TraceSpanReceiver getTraceSpanReceiver() { + return Tracing.getTraceSpanReceiver(); + } - public static String getDynamicColumnName(String family, String column, int count) { - return COLUMN_JOIN.join(family, column) + count; - } + public class FlushMetrics implements Runnable { - private void addDynamicEntry(List keys, List values, - List variableValues, String family, String desc, String value, - MetricInfo metric, int count) { - // <.dynColumn> - keys.add(getDynamicColumnName(family, metric.columnName, count) + " VARCHAR"); + private Connection conn; + private int counter = 0; - // build the annotation value - String val = desc + " - " + value; - values.add(VARIABLE_VALUE); - variableValues.add(val); + public FlushMetrics() { + conn = getConnection(tableName); } - protected Connection getConnection(String tableName) { - - try { - // create the phoenix connection - Properties props = new Properties(); - props.setProperty(QueryServices.TRACING_FREQ_ATTRIB, Tracing.Frequency.NEVER.getKey()); - Configuration conf = HBaseConfiguration.create(); - Connection conn = QueryUtil.getConnectionOnServer(props, conf); - - if (!traceTableExists(conn, tableName)) { - createTable(conn, tableName); - } - - LOGGER.info( - "Created new connection for tracing " + conn.toString() + " Table: " + tableName); - return conn; - } catch (Exception e) { - LOGGER.error("Tracing will NOT be pursued. New connection failed for tracing Table: " - + tableName, - e); - LOGGER.error("Restart Phoenix to retry."); - return null; + @Override + public void run() { + if (conn == null) return; + while (!traceSpanReceiver.isSpanAvailable()) { + Span span = traceSpanReceiver.getSpan(); + if (null == span) break; + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Span received: " + span.toJson()); + } + addToBatch(span); + counter++; + if (counter >= batchSize) { + commitBatch(conn); + counter = 0; } + } } - protected boolean traceTableExists(Connection conn, String traceTableName) throws SQLException { - try { - conn.unwrap(PhoenixConnection.class).getTable(traceTableName); - return true; - } catch (TableNotFoundException e) { - return false; + private void addToBatch(Span span) { + + String stmt = "UPSERT INTO " + tableName + " ("; + // drop it into the queue of things that should be written + List keys = new ArrayList(); + List values = new ArrayList(); + // we need to keep variable values in a separate set since they may have spaces, which + // causes the parser to barf. Instead, we need to add them after the statement is + // prepared + List variableValues = new ArrayList(); + keys.add(TRACE.columnName); + values.add(span.getTraceId()); + + keys.add(DESCRIPTION.columnName); + values.add(VARIABLE_VALUE); + variableValues.add(span.getDescription()); + + keys.add(SPAN.traceName); + values.add(span.getSpanId()); + + keys.add(PARENT.traceName); + values.add(span.getParentId()); + + keys.add(START.traceName); + values.add(span.getStartTimeMillis()); + + keys.add(END.traceName); + values.add(span.getStopTimeMillis()); + + int annotationCount = 0; + int tagCount = 0; + + // add the tags to the span. They were written in order received so we mark them as such + for (TimelineAnnotation ta : span.getTimelineAnnotations()) { + addDynamicEntry(keys, values, variableValues, TAG_FAMILY, Long.toString(ta.getTime()), + ta.getMessage(), TAG, tagCount); + tagCount++; + } + + // add the annotations. We assume they are serialized as strings and integers, but that + // can + // change in the future + Map annotations = span.getKVAnnotations(); + for (Map.Entry annotation : annotations.entrySet()) { + Pair val = + TracingUtils.readAnnotation(annotation.getKey(), annotation.getValue()); + addDynamicEntry(keys, values, variableValues, ANNOTATION_FAMILY, val.getFirst(), + val.getSecond(), ANNOTATION, annotationCount); + annotationCount++; + } + + // add the tag count, now that we know it + keys.add(TAG_COUNT); + // ignore the hostname in the tags, if we know it + values.add(tagCount); + + keys.add(ANNOTATION_COUNT); + values.add(annotationCount); + + // compile the statement together + stmt += COMMAS.join(keys); + stmt += ") VALUES (" + COMMAS.join(values) + ")"; + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Logging metrics to phoenix table via: " + stmt); + LOGGER.trace("With tags: " + variableValues); + } + try (PreparedStatement ps = conn.prepareStatement(stmt)) { + // add everything that wouldn't/may not parse + int index = 1; + for (String tag : variableValues) { + ps.setString(index++, tag); } - } - /** - * Create a stats table with the given name. Stores the name for use later when creating upsert - * statements - * @param conn connection to use when creating the table - * @param table name of the table to create - * @throws SQLException if any phoenix operations fails - */ - protected void createTable(Connection conn, String table) throws SQLException { - // only primary-key columns can be marked non-null - String ddl = - "create table if not exists " + table + "( " + TRACE.columnName - + " bigint not null, " + PARENT.columnName + " bigint not null, " - + SPAN.columnName + " bigint not null, " + DESCRIPTION.columnName - + " varchar, " + START.columnName + " bigint, " + END.columnName - + " bigint, " + HOSTNAME.columnName + " varchar, " + TAG_COUNT - + " smallint, " + ANNOTATION_COUNT + " smallint" - + " CONSTRAINT pk PRIMARY KEY (" + TRACE.columnName + ", " - + PARENT.columnName + ", " + SPAN.columnName + "))\n" + - // We have a config parameter that can be set so that tables are - // transactional by default. If that's set, we still don't want these system - // tables created as transactional tables, make these table non - // transactional - PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE; - PreparedStatement stmt = conn.prepareStatement(ddl); - stmt.execute(); + // Not going through the standard route of using statement.execute() as that code + // path + // is blocked if the metadata hasn't been been upgraded to the new minor release. + MutationPlan plan = ps.unwrap(PhoenixPreparedStatement.class).compileMutation(stmt); + MutationState state = conn.unwrap(PhoenixConnection.class).getMutationState(); + MutationState newState = plan.execute(); + state.join(newState); + } catch (SQLException e) { + LOGGER.error("Could not write metric: \n" + span + " to prepared statement:\n" + stmt, e); + } } - - protected void commitBatch(Connection conn) { - try { - conn.commit(); - } catch (SQLException e) { - LOGGER.error( - "Unable to commit traces on conn: " + conn.toString() + " to table: " + tableName, - e); - } + } + + public static String getDynamicColumnName(String family, String column, int count) { + return COLUMN_JOIN.join(family, column) + count; + } + + private void addDynamicEntry(List keys, List values, List variableValues, + String family, String desc, String value, MetricInfo metric, int count) { + // <.dynColumn> + keys.add(getDynamicColumnName(family, metric.columnName, count) + " VARCHAR"); + + // build the annotation value + String val = desc + " - " + value; + values.add(VARIABLE_VALUE); + variableValues.add(val); + } + + protected Connection getConnection(String tableName) { + + try { + // create the phoenix connection + Properties props = new Properties(); + props.setProperty(QueryServices.TRACING_FREQ_ATTRIB, Tracing.Frequency.NEVER.getKey()); + Configuration conf = HBaseConfiguration.create(); + Connection conn = QueryUtil.getConnectionOnServer(props, conf); + + if (!traceTableExists(conn, tableName)) { + createTable(conn, tableName); + } + + LOGGER.info("Created new connection for tracing " + conn.toString() + " Table: " + tableName); + return conn; + } catch (Exception e) { + LOGGER.error( + "Tracing will NOT be pursued. New connection failed for tracing Table: " + tableName, e); + LOGGER.error("Restart Phoenix to retry."); + return null; + } + } + + protected boolean traceTableExists(Connection conn, String traceTableName) throws SQLException { + try { + conn.unwrap(PhoenixConnection.class).getTable(traceTableName); + return true; + } catch (TableNotFoundException e) { + return false; + } + } + + /** + * Create a stats table with the given name. Stores the name for use later when creating upsert + * statements + * @param conn connection to use when creating the table + * @param table name of the table to create + * @throws SQLException if any phoenix operations fails + */ + protected void createTable(Connection conn, String table) throws SQLException { + // only primary-key columns can be marked non-null + String ddl = "create table if not exists " + table + "( " + TRACE.columnName + + " bigint not null, " + PARENT.columnName + " bigint not null, " + SPAN.columnName + + " bigint not null, " + DESCRIPTION.columnName + " varchar, " + START.columnName + + " bigint, " + END.columnName + " bigint, " + HOSTNAME.columnName + " varchar, " + TAG_COUNT + + " smallint, " + ANNOTATION_COUNT + " smallint" + " CONSTRAINT pk PRIMARY KEY (" + + TRACE.columnName + ", " + PARENT.columnName + ", " + SPAN.columnName + "))\n" + + // We have a config parameter that can be set so that tables are + // transactional by default. If that's set, we still don't want these system + // tables created as transactional tables, make these table non + // transactional + PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE; + PreparedStatement stmt = conn.prepareStatement(ddl); + stmt.execute(); + } + + protected void commitBatch(Connection conn) { + try { + conn.commit(); + } catch (SQLException e) { + LOGGER.error( + "Unable to commit traces on conn: " + conn.toString() + " to table: " + tableName, e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingIterator.java index 4808f258a30..227d358607b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingIterator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,45 +19,45 @@ import java.sql.SQLException; +import org.apache.htrace.TraceScope; import org.apache.phoenix.iterate.DelegateResultIterator; import org.apache.phoenix.iterate.ResultIterator; import org.apache.phoenix.schema.tuple.Tuple; -import org.apache.htrace.TraceScope; /** * A simple iterator that closes the trace scope when the iterator is closed. */ public class TracingIterator extends DelegateResultIterator { - private TraceScope scope; - private boolean started; - - /** - * @param scope a scope with a non-null span - * @param iterator delegate - */ - public TracingIterator(TraceScope scope, ResultIterator iterator) { - super(iterator); - this.scope = scope; + private TraceScope scope; + private boolean started; + + /** + * @param scope a scope with a non-null span + * @param iterator delegate + */ + public TracingIterator(TraceScope scope, ResultIterator iterator) { + super(iterator); + this.scope = scope; + } + + @Override + public void close() throws SQLException { + scope.close(); + super.close(); + } + + @Override + public Tuple next() throws SQLException { + if (!started) { + scope.getSpan().addTimelineAnnotation("First request completed"); + started = true; } - - @Override - public void close() throws SQLException { - scope.close(); - super.close(); - } - - @Override - public Tuple next() throws SQLException { - if (!started) { - scope.getSpan().addTimelineAnnotation("First request completed"); - started = true; - } - return super.next(); - } - - @Override - public String toString() { - return "TracingIterator [scope=" + scope + ", started=" + started + "]"; - } -} \ No newline at end of file + return super.next(); + } + + @Override + public String toString() { + return "TracingIterator [scope=" + scope + ", started=" + started + "]"; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingUtils.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingUtils.java index 47409e00439..bbc9c1b1c19 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingUtils.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,36 +27,35 @@ * Utilities for tracing */ public class TracingUtils { - public static final String METRIC_SOURCE_KEY = "phoenix."; - - /** Set context to enable filtering */ - public static final String METRICS_CONTEXT = "tracing"; - - /** Marker metric to ensure that we register the tracing mbeans */ - public static final String METRICS_MARKER_CONTEXT = "marker"; - - public static void addAnnotation(Span span, String message, int value) { - span.addKVAnnotation(message.getBytes(StandardCharsets.UTF_8), - Bytes.toBytes(Integer.toString(value))); - } - - public static Pair readAnnotation(byte[] key, byte[] value) { - return new Pair(new String(key, StandardCharsets.UTF_8), - Bytes.toString(value)); - } - - /** - * @see #getTraceMetricName(String) - */ - public static final String getTraceMetricName(long traceId) { - return getTraceMetricName(Long.toString(traceId)); - } - - /** - * @param traceId unique id of the trace - * @return the name of the metric record that should be generated for a given trace - */ - public static final String getTraceMetricName(String traceId) { - return METRIC_SOURCE_KEY + traceId; - } + public static final String METRIC_SOURCE_KEY = "phoenix."; + + /** Set context to enable filtering */ + public static final String METRICS_CONTEXT = "tracing"; + + /** Marker metric to ensure that we register the tracing mbeans */ + public static final String METRICS_MARKER_CONTEXT = "marker"; + + public static void addAnnotation(Span span, String message, int value) { + span.addKVAnnotation(message.getBytes(StandardCharsets.UTF_8), + Bytes.toBytes(Integer.toString(value))); + } + + public static Pair readAnnotation(byte[] key, byte[] value) { + return new Pair(new String(key, StandardCharsets.UTF_8), Bytes.toString(value)); + } + + /** + * @see #getTraceMetricName(String) + */ + public static final String getTraceMetricName(long traceId) { + return getTraceMetricName(Long.toString(traceId)); + } + + /** + * @param traceId unique id of the trace + * @return the name of the metric record that should be generated for a given trace + */ + public static final String getTraceMetricName(String traceId) { + return METRIC_SOURCE_KEY + traceId; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java index cde7f9e4720..0646feeeefa 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,4 +52,4 @@ public String get(String key, String defaultValue) { return conf.get(key, defaultValue); } } -} \ No newline at end of file +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/NullSpan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/NullSpan.java index afde49297dd..f4d31827264 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/NullSpan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/NullSpan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/Tracing.java index 2edff890fe6..4f7a1d312e0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/Tracing.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/Tracing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,13 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.htrace.HTraceConfiguration; -import org.apache.phoenix.call.CallRunner; -import org.apache.phoenix.call.CallWrapper; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.parse.TraceStatement; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.trace.TraceSpanReceiver; import org.apache.htrace.Sampler; import org.apache.htrace.Span; import org.apache.htrace.Trace; @@ -43,13 +36,19 @@ import org.apache.htrace.impl.ProbabilitySampler; import org.apache.htrace.wrappers.TraceCallable; import org.apache.htrace.wrappers.TraceRunnable; +import org.apache.phoenix.call.CallRunner; +import org.apache.phoenix.call.CallWrapper; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.parse.TraceStatement; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.trace.TraceSpanReceiver; import org.apache.phoenix.trace.TraceWriter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - import edu.umd.cs.findbugs.annotations.NonNull; /** @@ -57,255 +56,260 @@ */ public class Tracing { - private static final Logger LOGGER = LoggerFactory.getLogger(Tracing.class); - - private static final String SEPARATOR = "."; - // Constants for tracing across the wire - public static final String TRACE_ID_ATTRIBUTE_KEY = "phoenix.trace.traceid"; - public static final String SPAN_ID_ATTRIBUTE_KEY = "phoenix.trace.spanid"; + private static final Logger LOGGER = LoggerFactory.getLogger(Tracing.class); - // Constants for passing into the metrics system - private static final String TRACE_METRIC_PREFIX = "phoenix.trace.instance"; + private static final String SEPARATOR = "."; + // Constants for tracing across the wire + public static final String TRACE_ID_ATTRIBUTE_KEY = "phoenix.trace.traceid"; + public static final String SPAN_ID_ATTRIBUTE_KEY = "phoenix.trace.spanid"; - /** - * Manage the types of frequencies that we support. By default, we never turn on tracing. - */ - public static enum Frequency { - NEVER("never", CREATE_NEVER), // default - ALWAYS("always", CREATE_ALWAYS), PROBABILITY("probability", CREATE_PROBABILITY); + // Constants for passing into the metrics system + private static final String TRACE_METRIC_PREFIX = "phoenix.trace.instance"; - String key; - Function> builder; + /** + * Manage the types of frequencies that we support. By default, we never turn on tracing. + */ + public static enum Frequency { + NEVER("never", CREATE_NEVER), // default + ALWAYS("always", CREATE_ALWAYS), + PROBABILITY("probability", CREATE_PROBABILITY); - private Frequency(String key, Function> builder) { - this.key = key; - this.builder = builder; - } - - public String getKey() { - return key; - } + String key; + Function> builder; - static Frequency getSampler(String key) { - for (Frequency type : Frequency.values()) { - if (type.key.equals(key)) { - return type; - } - } - return NEVER; - } + private Frequency(String key, Function> builder) { + this.key = key; + this.builder = builder; } - private static Function> CREATE_ALWAYS = - new Function>() { - @Override - public Sampler apply(ConfigurationAdapter arg0) { - return Sampler.ALWAYS; - } - }; - - private static Function> CREATE_NEVER = - new Function>() { - @Override - public Sampler apply(ConfigurationAdapter arg0) { - return Sampler.NEVER; - } - }; - - private static Function> CREATE_PROBABILITY = - new Function>() { - @Override - public Sampler apply(ConfigurationAdapter conf) { - // get the connection properties for the probability information - Map items = new HashMap(); - items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY, - conf.get(QueryServices.TRACING_PROBABILITY_THRESHOLD_ATTRIB, Double.toString(QueryServicesOptions.DEFAULT_TRACING_PROBABILITY_THRESHOLD))); - return new ProbabilitySampler(HTraceConfiguration.fromMap(items)); - } - }; - - public static Sampler getConfiguredSampler(PhoenixConnection connection) { - String tracelevel = connection.getQueryServices().getProps().get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ); - return getSampler(tracelevel, new ConfigurationAdapter.ConnectionConfigurationAdapter( - connection)); + public String getKey() { + return key; } - public static Sampler getConfiguredSampler(Configuration conf) { - String tracelevel = conf.get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ); - return getSampler(tracelevel, new ConfigurationAdapter.HadoopConfigConfigurationAdapter( - conf)); - } - - public static Sampler getConfiguredSampler(TraceStatement traceStatement) { - double samplingRate = traceStatement.getSamplingRate(); - if (samplingRate >= 1.0) { - return Sampler.ALWAYS; - } else if (samplingRate < 1.0 && samplingRate > 0.0) { - Map items = new HashMap(); - items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY, Double.toString(samplingRate)); - return new ProbabilitySampler(HTraceConfiguration.fromMap(items)); - } else { - return Sampler.NEVER; + static Frequency getSampler(String key) { + for (Frequency type : Frequency.values()) { + if (type.key.equals(key)) { + return type; + } } + return NEVER; } + } - private static Sampler getSampler(String traceLevel, ConfigurationAdapter conf) { - return Frequency.getSampler(traceLevel).builder.apply(conf); - } - - public static void setSampling(Properties props, Frequency freq) { - props.setProperty(QueryServices.TRACING_FREQ_ATTRIB, freq.key); - } + private static Function> CREATE_ALWAYS = + new Function>() { + @Override + public Sampler apply(ConfigurationAdapter arg0) { + return Sampler.ALWAYS; + } + }; - /** - * Start a span with the currently configured sampling frequency. Creates a new 'current' span - * on this thread - the previous 'current' span will be replaced with this newly created span. - *

- * Hands back the direct span as you shouldn't be detaching the span - use {@link TraceRunnable} - * instead to detach a span from this operation. - * @param connection from which to read parameters - * @param string description of the span to start - * @return the underlying span. - */ - public static TraceScope startNewSpan(PhoenixConnection connection, String string) { - Sampler sampler = connection.getSampler(); - TraceScope scope = Trace.startSpan(string, sampler); - addCustomAnnotationsToSpan(scope.getSpan(), connection); - return scope; + private static Function> CREATE_NEVER = + new Function>() { + @Override + public Sampler apply(ConfigurationAdapter arg0) { + return Sampler.NEVER; + } + }; + + private static Function> CREATE_PROBABILITY = + new Function>() { + @Override + public Sampler apply(ConfigurationAdapter conf) { + // get the connection properties for the probability information + Map items = new HashMap(); + items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY, + conf.get(QueryServices.TRACING_PROBABILITY_THRESHOLD_ATTRIB, + Double.toString(QueryServicesOptions.DEFAULT_TRACING_PROBABILITY_THRESHOLD))); + return new ProbabilitySampler(HTraceConfiguration.fromMap(items)); + } + }; + + public static Sampler getConfiguredSampler(PhoenixConnection connection) { + String tracelevel = connection.getQueryServices().getProps() + .get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ); + return getSampler(tracelevel, + new ConfigurationAdapter.ConnectionConfigurationAdapter(connection)); + } + + public static Sampler getConfiguredSampler(Configuration conf) { + String tracelevel = + conf.get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ); + return getSampler(tracelevel, new ConfigurationAdapter.HadoopConfigConfigurationAdapter(conf)); + } + + public static Sampler getConfiguredSampler(TraceStatement traceStatement) { + double samplingRate = traceStatement.getSamplingRate(); + if (samplingRate >= 1.0) { + return Sampler.ALWAYS; + } else if (samplingRate < 1.0 && samplingRate > 0.0) { + Map items = new HashMap(); + items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY, Double.toString(samplingRate)); + return new ProbabilitySampler(HTraceConfiguration.fromMap(items)); + } else { + return Sampler.NEVER; } - - public static String getSpanName(Span span) { - return Tracing.TRACE_METRIC_PREFIX + span.getTraceId() + SEPARATOR + span.getParentId() - + SEPARATOR + span.getSpanId(); + } + + private static Sampler getSampler(String traceLevel, ConfigurationAdapter conf) { + return Frequency.getSampler(traceLevel).builder.apply(conf); + } + + public static void setSampling(Properties props, Frequency freq) { + props.setProperty(QueryServices.TRACING_FREQ_ATTRIB, freq.key); + } + + /** + * Start a span with the currently configured sampling frequency. Creates a new 'current' span on + * this thread - the previous 'current' span will be replaced with this newly created span. + *

+ * Hands back the direct span as you shouldn't be detaching the span - use {@link TraceRunnable} + * instead to detach a span from this operation. + * @param connection from which to read parameters + * @param string description of the span to start + * @return the underlying span. + */ + public static TraceScope startNewSpan(PhoenixConnection connection, String string) { + Sampler sampler = connection.getSampler(); + TraceScope scope = Trace.startSpan(string, sampler); + addCustomAnnotationsToSpan(scope.getSpan(), connection); + return scope; + } + + public static String getSpanName(Span span) { + return Tracing.TRACE_METRIC_PREFIX + span.getTraceId() + SEPARATOR + span.getParentId() + + SEPARATOR + span.getSpanId(); + } + + public static Span child(Span s, String d) { + if (s == null) { + return NullSpan.INSTANCE; } - - public static Span child(Span s, String d) { - if (s == null) { - return NullSpan.INSTANCE; - } - return s.child(d); + return s.child(d); + } + + /** + * Wrap the callable in a TraceCallable, if tracing. + * @param callable to call + * @param description description of the operation being run. If null uses the current + * thread name + * @return The callable provided, wrapped if tracing, 'callable' if not. + */ + public static Callable wrap(Callable callable, String description) { + if (Trace.isTracing()) { + return new TraceCallable(Trace.currentSpan(), callable, description); } - - /** - * Wrap the callable in a TraceCallable, if tracing. - * @param callable to call - * @param description description of the operation being run. If null uses the current - * thread name - * @return The callable provided, wrapped if tracing, 'callable' if not. - */ - public static Callable wrap(Callable callable, String description) { - if (Trace.isTracing()) { - return new TraceCallable(Trace.currentSpan(), callable, description); - } - return callable; + return callable; + } + + /** + * Helper to automatically start and complete tracing on the given method, used in conjuction with + * {@link CallRunner#run}. + *

+ * This will always attempt start a new span (which will always start, unless the {@link Sampler} + * says it shouldn't be traced). If you are just looking for flexible tracing that only turns on + * if the current thread/query is already tracing, use {@link #wrap(Callable, String)} or + * {@link Trace#wrap(Callable)}. + *

+ * Ensures that the trace is closed, even if there is an exception from the + * {@link org.apache.phoenix.call.CallRunner.CallableThrowable}. + *

+ * Generally, this should wrap a long-running operation. + * @param conn connection from which to determine if we are tracing, ala + * {@link #startNewSpan(PhoenixConnection, String)} + * @param desc description of the operation being run + * @return the value returned from the call + */ + public static CallWrapper withTracing(PhoenixConnection conn, String desc) { + return new TracingWrapper(conn, desc); + } + + private static void addCustomAnnotationsToSpan(@Nullable Span span, + @NonNull PhoenixConnection conn) { + Preconditions.checkNotNull(conn); + + if (span == null) { + return; } - - /** - * Helper to automatically start and complete tracing on the given method, used in conjuction - * with {@link CallRunner#run}. - *

- * This will always attempt start a new span (which will always start, unless the - * {@link Sampler} says it shouldn't be traced). If you are just looking for flexible tracing - * that only turns on if the current thread/query is already tracing, use - * {@link #wrap(Callable, String)} or {@link Trace#wrap(Callable)}. - *

- * Ensures that the trace is closed, even if there is an exception from the - * {@link org.apache.phoenix.call.CallRunner.CallableThrowable}. - *

- * Generally, this should wrap a long-running operation. - * @param conn connection from which to determine if we are tracing, ala - * {@link #startNewSpan(PhoenixConnection, String)} - * @param desc description of the operation being run - * @return the value returned from the call - */ - public static CallWrapper withTracing(PhoenixConnection conn, String desc) { - return new TracingWrapper(conn, desc); + Map annotations = conn.getCustomTracingAnnotations(); + // copy over the annotations as bytes + for (Map.Entry annotation : annotations.entrySet()) { + span.addKVAnnotation(toBytes(annotation.getKey()), toBytes(annotation.getValue())); } + } - private static void addCustomAnnotationsToSpan(@Nullable Span span, @NonNull PhoenixConnection conn) { - Preconditions.checkNotNull(conn); + private static class TracingWrapper implements CallWrapper { + private TraceScope scope; + private final PhoenixConnection conn; + private final String desc; - if (span == null) { - return; - } - Map annotations = conn.getCustomTracingAnnotations(); - // copy over the annotations as bytes - for (Map.Entry annotation : annotations.entrySet()) { - span.addKVAnnotation(toBytes(annotation.getKey()), toBytes(annotation.getValue())); - } + public TracingWrapper(PhoenixConnection conn, String desc) { + this.conn = conn; + this.desc = desc; } - private static class TracingWrapper implements CallWrapper { - private TraceScope scope; - private final PhoenixConnection conn; - private final String desc; - - public TracingWrapper(PhoenixConnection conn, String desc){ - this.conn = conn; - this.desc = desc; - } - - @Override - public void before() { - scope = Tracing.startNewSpan(conn, "Executing " + desc); - } - - @Override - public void after() { - scope.close(); - } + @Override + public void before() { + scope = Tracing.startNewSpan(conn, "Executing " + desc); } - /** - * Track if the tracing system has been initialized for phoenix - */ - private static boolean initialized = false; - private static TraceSpanReceiver traceSpanReceiver = null; - - /** - * Add the phoenix span receiver so we can log the traces. We have a single trace source for the - * whole JVM - */ - public synchronized static void addTraceMetricsSource() { - try { - QueryServicesOptions options = QueryServicesOptions.withDefaults(); - if (!initialized && options.isTracingEnabled()) { - traceSpanReceiver = new TraceSpanReceiver(); - Trace.addReceiver(traceSpanReceiver); - TraceWriter traceWriter = new TraceWriter(options.getTableName(), options.getTracingThreadPoolSize(), options.getTracingBatchSize()); - traceWriter.start(); - } - } catch (RuntimeException e) { - LOGGER.warn("Tracing will outputs will not be written to any metrics sink! No " - + "TraceMetricsSink found on the classpath", e); - } catch (IllegalAccessError e) { - // This is an issue when we have a class incompatibility error, such as when running - // within SquirrelSQL which uses an older incompatible version of commons-collections. - // Seeing as this only results in disabling tracing, we swallow this exception and just - // continue on without tracing. - LOGGER.warn("Class incompatibility while initializing metrics, metrics will be disabled", e); - } - initialized = true; + @Override + public void after() { + scope.close(); } - - public static TraceSpanReceiver getTraceSpanReceiver() { - return traceSpanReceiver; - } - - public static boolean isTraceOn(String traceOption) { - Preconditions.checkArgument(traceOption != null); - if(traceOption.equalsIgnoreCase("ON")) return true; - if(traceOption.equalsIgnoreCase("OFF")) return false; - else { - throw new IllegalArgumentException("Unknown tracing option: " + traceOption); - } + } + + /** + * Track if the tracing system has been initialized for phoenix + */ + private static boolean initialized = false; + private static TraceSpanReceiver traceSpanReceiver = null; + + /** + * Add the phoenix span receiver so we can log the traces. We have a single trace source for the + * whole JVM + */ + public synchronized static void addTraceMetricsSource() { + try { + QueryServicesOptions options = QueryServicesOptions.withDefaults(); + if (!initialized && options.isTracingEnabled()) { + traceSpanReceiver = new TraceSpanReceiver(); + Trace.addReceiver(traceSpanReceiver); + TraceWriter traceWriter = new TraceWriter(options.getTableName(), + options.getTracingThreadPoolSize(), options.getTracingBatchSize()); + traceWriter.start(); + } + } catch (RuntimeException e) { + LOGGER.warn("Tracing will outputs will not be written to any metrics sink! No " + + "TraceMetricsSink found on the classpath", e); + } catch (IllegalAccessError e) { + // This is an issue when we have a class incompatibility error, such as when running + // within SquirrelSQL which uses an older incompatible version of commons-collections. + // Seeing as this only results in disabling tracing, we swallow this exception and just + // continue on without tracing. + LOGGER.warn("Class incompatibility while initializing metrics, metrics will be disabled", e); } - - /** - * Check whether tracing is generally enabled. - * @return true If tracing is enabled, false otherwise - */ - public static boolean isTracing() { - return Trace.isTracing(); + initialized = true; + } + + public static TraceSpanReceiver getTraceSpanReceiver() { + return traceSpanReceiver; + } + + public static boolean isTraceOn(String traceOption) { + Preconditions.checkArgument(traceOption != null); + if (traceOption.equalsIgnoreCase("ON")) return true; + if (traceOption.equalsIgnoreCase("OFF")) return false; + else { + throw new IllegalArgumentException("Unknown tracing option: " + traceOption); } + } + + /** + * Check whether tracing is generally enabled. + * @return true If tracing is enabled, false otherwise + */ + public static boolean isTracing() { + return Trace.isTracing(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/NotAvailableTransactionProvider.java b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/NotAvailableTransactionProvider.java index cde98bcde66..efca3757970 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/NotAvailableTransactionProvider.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/NotAvailableTransactionProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,62 +27,66 @@ import org.apache.phoenix.transaction.TransactionFactory.Provider; public class NotAvailableTransactionProvider implements PhoenixTransactionProvider { - private static final NotAvailableTransactionProvider INSTANCE = new NotAvailableTransactionProvider(); + private static final NotAvailableTransactionProvider INSTANCE = + new NotAvailableTransactionProvider(); - private static final String message = "Phoenix no longer supports the Tephra transaction processor."; + private static final String message = + "Phoenix no longer supports the Tephra transaction processor."; - public static final NotAvailableTransactionProvider getInstance() { - return INSTANCE; - } + public static final NotAvailableTransactionProvider getInstance() { + return INSTANCE; + } - private NotAvailableTransactionProvider() { - } + private NotAvailableTransactionProvider() { + } - @Override - public String toString() { - throw new UnsupportedOperationException(message); - } + @Override + public String toString() { + throw new UnsupportedOperationException(message); + } - @Override - public PhoenixTransactionContext getTransactionContext(byte[] txnBytes) throws IOException { - throw new UnsupportedOperationException(message); - } + @Override + public PhoenixTransactionContext getTransactionContext(byte[] txnBytes) throws IOException { + throw new UnsupportedOperationException(message); + } - @Override - public PhoenixTransactionContext getTransactionContext(PhoenixConnection connection) throws SQLException { - throw new UnsupportedOperationException(message); - } + @Override + public PhoenixTransactionContext getTransactionContext(PhoenixConnection connection) + throws SQLException { + throw new UnsupportedOperationException(message); + } - @Override - public PhoenixTransactionClient getTransactionClient(Configuration config, ConnectionInfo connectionInfo) { - throw new UnsupportedOperationException(message); - } + @Override + public PhoenixTransactionClient getTransactionClient(Configuration config, + ConnectionInfo connectionInfo) { + throw new UnsupportedOperationException(message); + } - @Override - public Provider getProvider() { - return TransactionFactory.Provider.NOTAVAILABLE; - } + @Override + public Provider getProvider() { + return TransactionFactory.Provider.NOTAVAILABLE; + } - @Override - public String getCoprocessorClassName() { - // No coprocessor is required - return null; - } + @Override + public String getCoprocessorClassName() { + // No coprocessor is required + return null; + } - @Override - public String getGCCoprocessorClassName() { - // No GC coprocessor is required - return null; - } + @Override + public String getGCCoprocessorClassName() { + // No GC coprocessor is required + return null; + } - @Override - public boolean isUnsupported(Feature feature) { - // All features are unsupported - return true; - } + @Override + public boolean isUnsupported(Feature feature) { + // All features are unsupported + return true; + } - @Override - public Put markPutAsCommitted(Put put, long timestamp, long commitTimestamp) { - throw new UnsupportedOperationException(message); - } + @Override + public Put markPutAsCommitted(Put put, long timestamp, long commitTimestamp) { + throw new UnsupportedOperationException(message); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java index 414a519bcc1..8caf5e86a05 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,285 +42,271 @@ import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.InvalidProtocolBufferException; - public class OmidTransactionContext implements PhoenixTransactionContext { - private static final Logger LOGGER = LoggerFactory.getLogger(OmidTransactionContext.class); + private static final Logger LOGGER = LoggerFactory.getLogger(OmidTransactionContext.class); + + private HBaseTransactionManager tm; + private HBaseTransaction tx; + + public OmidTransactionContext() { + this.tx = null; + this.tm = null; + } + + public OmidTransactionContext(PhoenixConnection connection) throws SQLException { + PhoenixTransactionClient client = + connection.getQueryServices().initTransactionClient(getProvider()); + assert (client instanceof OmidTransactionProvider.OmidTransactionClient); + this.tm = ((OmidTransactionProvider.OmidTransactionClient) client).getTransactionClient(); + this.tx = null; + } + + public OmidTransactionContext(byte[] txnBytes) throws InvalidProtocolBufferException { + this(); + if (txnBytes != null && txnBytes.length > 0) { + TSOProto.Transaction transaction = TSOProto.Transaction.parseFrom(txnBytes); + tx = new HBaseTransaction(transaction.getTimestamp(), transaction.getEpoch(), + new HashSet(), new HashSet(), null, tm.isLowLatency()); + } else { + tx = null; + } + } - private HBaseTransactionManager tm; - private HBaseTransaction tx; + public OmidTransactionContext(PhoenixTransactionContext ctx, boolean subTask) { + assert (ctx instanceof OmidTransactionContext); + OmidTransactionContext omidTransactionContext = (OmidTransactionContext) ctx; - public OmidTransactionContext() { - this.tx = null; - this.tm = null; - } + this.tm = omidTransactionContext.tm; - public OmidTransactionContext(PhoenixConnection connection) throws SQLException { - PhoenixTransactionClient client = connection.getQueryServices().initTransactionClient(getProvider()); - assert (client instanceof OmidTransactionProvider.OmidTransactionClient); - this.tm = ((OmidTransactionProvider.OmidTransactionClient)client).getTransactionClient(); + if (subTask) { + if (omidTransactionContext.isTransactionRunning()) { + Transaction transaction = omidTransactionContext.getTransaction(); + this.tx = new HBaseTransaction(transaction.getTransactionId(), transaction.getEpoch(), + new HashSet(), new HashSet(), this.tm, + transaction.getReadTimestamp(), transaction.getWriteTimestamp(), tm.isLowLatency()); + } else { this.tx = null; - } + } - public OmidTransactionContext(byte[] txnBytes) throws InvalidProtocolBufferException { - this(); - if (txnBytes != null && txnBytes.length > 0) { - TSOProto.Transaction transaction = TSOProto.Transaction.parseFrom(txnBytes); - tx = new HBaseTransaction(transaction.getTimestamp(), transaction.getEpoch(), new HashSet(), - new HashSet(), null, tm.isLowLatency()); - } else { - tx = null; - } + this.tm = null; + } else { + this.tx = omidTransactionContext.getTransaction(); } + } - public OmidTransactionContext(PhoenixTransactionContext ctx, boolean subTask) { - assert (ctx instanceof OmidTransactionContext); - OmidTransactionContext omidTransactionContext = (OmidTransactionContext) ctx; - - this.tm = omidTransactionContext.tm; - - if (subTask) { - if (omidTransactionContext.isTransactionRunning()) { - Transaction transaction = omidTransactionContext.getTransaction(); - this.tx = new HBaseTransaction(transaction.getTransactionId(), transaction.getEpoch(), - new HashSet(), new HashSet(), this.tm, - transaction.getReadTimestamp(), transaction.getWriteTimestamp(), tm.isLowLatency()); - } else { - this.tx = null; - } - - this.tm = null; - } else { - this.tx = omidTransactionContext.getTransaction(); - } + @Override + public void begin() throws SQLException { + if (tm == null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build() + .buildException(); } - @Override - public void begin() throws SQLException { - if (tm == null) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.NULL_TRANSACTION_CONTEXT).build() - .buildException(); - } - - - try { - tx = (HBaseTransaction) tm.begin(); - } catch (TransactionException e) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TRANSACTION_FAILED) - .setMessage(e.getMessage()).setRootCause(e).build() - .buildException(); - } + try { + tx = (HBaseTransaction) tm.begin(); + } catch (TransactionException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED) + .setMessage(e.getMessage()).setRootCause(e).build().buildException(); } - - @Override - public void commit() throws SQLException { - if (tx == null || tm == null) - return; - - try { - tm.commit(tx); - } catch (TransactionException e) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TRANSACTION_FAILED) - .setMessage(e.getMessage()).setRootCause(e).build() - .buildException(); - } catch (RollbackException e) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION) - .setMessage(e.getMessage()).setRootCause(e).build() - .buildException(); - } + } + + @Override + public void commit() throws SQLException { + if (tx == null || tm == null) return; + + try { + tm.commit(tx); + } catch (TransactionException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED) + .setMessage(e.getMessage()).setRootCause(e).build().buildException(); + } catch (RollbackException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION) + .setMessage(e.getMessage()).setRootCause(e).build().buildException(); } + } - @Override - public void abort() throws SQLException { - if (tx == null || tm == null || tx.getStatus() != Status.RUNNING) { - return; - } - - try { - tm.rollback(tx); - } catch (TransactionException e) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TRANSACTION_FAILED) - .setMessage(e.getMessage()).setRootCause(e).build() - .buildException(); - } + @Override + public void abort() throws SQLException { + if (tx == null || tm == null || tx.getStatus() != Status.RUNNING) { + return; } - @Override - public void checkpoint(boolean hasUncommittedData) throws SQLException { - try { - tx.checkpoint(); - } catch (TransactionException e) { - throw new SQLException(e); - } - tx.setVisibilityLevel(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT); + try { + tm.rollback(tx); + } catch (TransactionException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED) + .setMessage(e.getMessage()).setRootCause(e).build().buildException(); } - - @Override - public void commitDDLFence(PTable dataTable) throws SQLException { - - try { - tx = (HBaseTransaction) tm.fence(dataTable.getName().getBytes()); - if (LOGGER.isInfoEnabled()) { - LOGGER.info("Added write fence at ~" - + tx.getReadTimestamp()); - } - } catch (TransactionException e) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TX_UNABLE_TO_GET_WRITE_FENCE) - .setSchemaName(dataTable.getSchemaName().getString()) - .setTableName(dataTable.getTableName().getString()).build() - .buildException(); - } + } + + @Override + public void checkpoint(boolean hasUncommittedData) throws SQLException { + try { + tx.checkpoint(); + } catch (TransactionException e) { + throw new SQLException(e); } - - @Override - public void join(PhoenixTransactionContext ctx) { - - if (ctx == PhoenixTransactionContext.NULL_CONTEXT) { - return; - } - - assert (ctx instanceof OmidTransactionContext); - OmidTransactionContext omidContext = (OmidTransactionContext) ctx; - - HBaseTransaction transaction = omidContext.getTransaction(); - if (transaction == null || tx == null) return; - - Set writeSet = transaction.getWriteSet(); - - for (HBaseCellId cell : writeSet) { - tx.addWriteSetElement(cell); - } + tx.setVisibilityLevel(VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT); + } + + @Override + public void commitDDLFence(PTable dataTable) throws SQLException { + + try { + tx = (HBaseTransaction) tm.fence(dataTable.getName().getBytes()); + if (LOGGER.isInfoEnabled()) { + LOGGER.info("Added write fence at ~" + tx.getReadTimestamp()); + } + } catch (TransactionException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_UNABLE_TO_GET_WRITE_FENCE) + .setSchemaName(dataTable.getSchemaName().getString()) + .setTableName(dataTable.getTableName().getString()).build().buildException(); } + } - @Override - public boolean isTransactionRunning() { - return (tx != null); - } + @Override + public void join(PhoenixTransactionContext ctx) { - @Override - public void reset() { - tx = null; + if (ctx == PhoenixTransactionContext.NULL_CONTEXT) { + return; } - @Override - public long getTransactionId() { - return tx.getTransactionId(); - } + assert (ctx instanceof OmidTransactionContext); + OmidTransactionContext omidContext = (OmidTransactionContext) ctx; - @Override - public long getReadPointer() { - return tx.getReadTimestamp(); - } + HBaseTransaction transaction = omidContext.getTransaction(); + if (transaction == null || tx == null) return; - @Override - public long getWritePointer() { - return tx.getWriteTimestamp(); - } + Set writeSet = transaction.getWriteSet(); - @Override - public PhoenixVisibilityLevel getVisibilityLevel() { - VisibilityLevel visibilityLevel = null; - - assert(tx != null); - visibilityLevel = tx.getVisibilityLevel(); - - PhoenixVisibilityLevel phoenixVisibilityLevel; - switch (visibilityLevel) { - case SNAPSHOT: - phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT; - break; - case SNAPSHOT_EXCLUDE_CURRENT: - phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT; - break; - case SNAPSHOT_ALL: - phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_ALL; - default: - phoenixVisibilityLevel = null; - } - - return phoenixVisibilityLevel; + for (HBaseCellId cell : writeSet) { + tx.addWriteSetElement(cell); } - - @Override - public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) { - - VisibilityLevel omidVisibilityLevel = null; - - switch (visibilityLevel) { - case SNAPSHOT: - omidVisibilityLevel = VisibilityLevel.SNAPSHOT; - break; - case SNAPSHOT_EXCLUDE_CURRENT: - omidVisibilityLevel = VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT; - break; - case SNAPSHOT_ALL: - omidVisibilityLevel = VisibilityLevel.SNAPSHOT_ALL; - break; - default: - assert (false); - } - - assert(tx != null); - tx.setVisibilityLevel(omidVisibilityLevel); - - } - - @Override - public byte[] encodeTransaction() throws SQLException { - assert(tx != null); - - TSOProto.Transaction.Builder transactionBuilder = TSOProto.Transaction.newBuilder(); - - transactionBuilder.setTimestamp(tx.getTransactionId()); - transactionBuilder.setEpoch(tx.getEpoch()); - - byte[] encodedTxBytes = transactionBuilder.build().toByteArray(); - // Add code of TransactionProvider at end of byte array - encodedTxBytes = Arrays.copyOf(encodedTxBytes, encodedTxBytes.length + 1); - encodedTxBytes[encodedTxBytes.length - 1] = getProvider().getCode(); - return encodedTxBytes; - } - - @Override - public Provider getProvider() { - return TransactionFactory.Provider.OMID; + } + + @Override + public boolean isTransactionRunning() { + return (tx != null); + } + + @Override + public void reset() { + tx = null; + } + + @Override + public long getTransactionId() { + return tx.getTransactionId(); + } + + @Override + public long getReadPointer() { + return tx.getReadTimestamp(); + } + + @Override + public long getWritePointer() { + return tx.getWriteTimestamp(); + } + + @Override + public PhoenixVisibilityLevel getVisibilityLevel() { + VisibilityLevel visibilityLevel = null; + + assert (tx != null); + visibilityLevel = tx.getVisibilityLevel(); + + PhoenixVisibilityLevel phoenixVisibilityLevel; + switch (visibilityLevel) { + case SNAPSHOT: + phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT; + break; + case SNAPSHOT_EXCLUDE_CURRENT: + phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT; + break; + case SNAPSHOT_ALL: + phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_ALL; + default: + phoenixVisibilityLevel = null; } - @Override - public PhoenixTransactionContext newTransactionContext(PhoenixTransactionContext context, boolean subTask) { - return new OmidTransactionContext(context, subTask); + return phoenixVisibilityLevel; + } + + @Override + public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) { + + VisibilityLevel omidVisibilityLevel = null; + + switch (visibilityLevel) { + case SNAPSHOT: + omidVisibilityLevel = VisibilityLevel.SNAPSHOT; + break; + case SNAPSHOT_EXCLUDE_CURRENT: + omidVisibilityLevel = VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT; + break; + case SNAPSHOT_ALL: + omidVisibilityLevel = VisibilityLevel.SNAPSHOT_ALL; + break; + default: + assert (false); } - @Override - public void markDMLFence(PTable dataTable) { - } - - /** - * OmidTransactionContext specific functions - */ - - public HBaseTransaction getTransaction() { - return tx; - } - - - @Override - public Table getTransactionalTable(Table htable, boolean isConflictFree) throws SQLException { - return new OmidTransactionTable(this, htable, isConflictFree); - } - - @Override - public Table getTransactionalTableWriter(PhoenixConnection connection, PTable table, Table htable, boolean isIndex) throws SQLException { - // When we're getting a table for writing, if the table being written to is an index, - // write the shadow cells immediately since the only time we write to an index is - // when we initially populate it synchronously. - return new OmidTransactionTable(this, htable, table.isImmutableRows() || isIndex, isIndex); - } + assert (tx != null); + tx.setVisibilityLevel(omidVisibilityLevel); + + } + + @Override + public byte[] encodeTransaction() throws SQLException { + assert (tx != null); + + TSOProto.Transaction.Builder transactionBuilder = TSOProto.Transaction.newBuilder(); + + transactionBuilder.setTimestamp(tx.getTransactionId()); + transactionBuilder.setEpoch(tx.getEpoch()); + + byte[] encodedTxBytes = transactionBuilder.build().toByteArray(); + // Add code of TransactionProvider at end of byte array + encodedTxBytes = Arrays.copyOf(encodedTxBytes, encodedTxBytes.length + 1); + encodedTxBytes[encodedTxBytes.length - 1] = getProvider().getCode(); + return encodedTxBytes; + } + + @Override + public Provider getProvider() { + return TransactionFactory.Provider.OMID; + } + + @Override + public PhoenixTransactionContext newTransactionContext(PhoenixTransactionContext context, + boolean subTask) { + return new OmidTransactionContext(context, subTask); + } + + @Override + public void markDMLFence(PTable dataTable) { + } + + /** + * OmidTransactionContext specific functions + */ + + public HBaseTransaction getTransaction() { + return tx; + } + + @Override + public Table getTransactionalTable(Table htable, boolean isConflictFree) throws SQLException { + return new OmidTransactionTable(this, htable, isConflictFree); + } + + @Override + public Table getTransactionalTableWriter(PhoenixConnection connection, PTable table, Table htable, + boolean isIndex) throws SQLException { + // When we're getting a table for writing, if the table being written to is an index, + // write the shadow cells immediately since the only time we write to an index is + // when we initially populate it synchronously. + return new OmidTransactionTable(this, htable, table.isImmutableRows() || isIndex, isIndex); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionProvider.java b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionProvider.java index b8049bd89dd..75eff1f2e98 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionProvider.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,100 +35,103 @@ import org.apache.phoenix.transaction.TransactionFactory.Provider; public class OmidTransactionProvider implements PhoenixTransactionProvider { - private static final OmidTransactionProvider INSTANCE = new OmidTransactionProvider(); - - private HBaseTransactionManager transactionManager = null; - private volatile CommitTable.Client commitTableClient = null; - - public static final OmidTransactionProvider getInstance() { - return INSTANCE; - } - - private OmidTransactionProvider() { - } - - @Override - public String toString() { - return getProvider().toString(); - } - - @Override - public PhoenixTransactionContext getTransactionContext(byte[] txnBytes) throws IOException { - // Remove last byte (which is used to identify transaction provider) - return new OmidTransactionContext(Arrays.copyOf(txnBytes,txnBytes.length-1)); - } - - @Override - public PhoenixTransactionContext getTransactionContext(PhoenixConnection connection) throws SQLException { - return new OmidTransactionContext(connection); - } - - @Override - public PhoenixTransactionClient getTransactionClient(Configuration config, ConnectionInfo connectionInfo) throws SQLException{ - if (transactionManager == null) { - try { - HBaseOmidClientConfiguration clientConf = new HBaseOmidClientConfiguration(); - clientConf.setConflictAnalysisLevel(OmidClientConfiguration.ConflictDetectionLevel.ROW); - transactionManager = (HBaseTransactionManager) HBaseTransactionManager.newInstance(clientConf); - } catch (IOException | InterruptedException e) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TRANSACTION_FAILED) - .setMessage(e.getMessage()).setRootCause(e).build() - .buildException(); - } - } - - return new OmidTransactionClient(transactionManager); + private static final OmidTransactionProvider INSTANCE = new OmidTransactionProvider(); + + private HBaseTransactionManager transactionManager = null; + private volatile CommitTable.Client commitTableClient = null; + + public static final OmidTransactionProvider getInstance() { + return INSTANCE; + } + + private OmidTransactionProvider() { + } + + @Override + public String toString() { + return getProvider().toString(); + } + + @Override + public PhoenixTransactionContext getTransactionContext(byte[] txnBytes) throws IOException { + // Remove last byte (which is used to identify transaction provider) + return new OmidTransactionContext(Arrays.copyOf(txnBytes, txnBytes.length - 1)); + } + + @Override + public PhoenixTransactionContext getTransactionContext(PhoenixConnection connection) + throws SQLException { + return new OmidTransactionContext(connection); + } + + @Override + public PhoenixTransactionClient getTransactionClient(Configuration config, + ConnectionInfo connectionInfo) throws SQLException { + if (transactionManager == null) { + try { + HBaseOmidClientConfiguration clientConf = new HBaseOmidClientConfiguration(); + clientConf.setConflictAnalysisLevel(OmidClientConfiguration.ConflictDetectionLevel.ROW); + transactionManager = + (HBaseTransactionManager) HBaseTransactionManager.newInstance(clientConf); + } catch (IOException | InterruptedException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED) + .setMessage(e.getMessage()).setRootCause(e).build().buildException(); + } } - static class OmidTransactionClient implements PhoenixTransactionClient { - private final HBaseTransactionManager transactionManager; + return new OmidTransactionClient(transactionManager); + } - public OmidTransactionClient(HBaseTransactionManager transactionManager) { - this.transactionManager = transactionManager; - } + static class OmidTransactionClient implements PhoenixTransactionClient { + private final HBaseTransactionManager transactionManager; - public HBaseTransactionManager getTransactionClient() { - return transactionManager; - } - - @Override - public void close() throws IOException {} - } - - // For testing only - public CommitTable.Client getCommitTableClient() { - return commitTableClient; - } - - // For testing only - public void injectTestService(HBaseTransactionManager transactionManager, CommitTable.Client commitTableClient) { - this.transactionManager = transactionManager; - this.commitTableClient = commitTableClient; - } - - @Override - public String getCoprocessorClassName() { - return "org.apache.phoenix.coprocessor.OmidTransactionalProcessor"; + public OmidTransactionClient(HBaseTransactionManager transactionManager) { + this.transactionManager = transactionManager; } - @Override - public String getGCCoprocessorClassName() { - return "org.apache.phoenix.coprocessor.OmidGCProcessor"; - } - - @Override - public Provider getProvider() { - return TransactionFactory.Provider.OMID; - } - - @Override - public boolean isUnsupported(Feature feature) { - return true; + public HBaseTransactionManager getTransactionClient() { + return transactionManager; } @Override - public Put markPutAsCommitted(Put put, long timestamp, long commitTimestamp) { - return TTable.markPutAsCommitted(put, timestamp, timestamp); + public void close() throws IOException { } + } + + // For testing only + public CommitTable.Client getCommitTableClient() { + return commitTableClient; + } + + // For testing only + public void injectTestService(HBaseTransactionManager transactionManager, + CommitTable.Client commitTableClient) { + this.transactionManager = transactionManager; + this.commitTableClient = commitTableClient; + } + + @Override + public String getCoprocessorClassName() { + return "org.apache.phoenix.coprocessor.OmidTransactionalProcessor"; + } + + @Override + public String getGCCoprocessorClassName() { + return "org.apache.phoenix.coprocessor.OmidGCProcessor"; + } + + @Override + public Provider getProvider() { + return TransactionFactory.Provider.OMID; + } + + @Override + public boolean isUnsupported(Feature feature) { + return true; + } + + @Override + public Put markPutAsCommitted(Put put, long timestamp, long commitTimestamp) { + return TTable.markPutAsCommitted(put, timestamp, timestamp); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java index 483a52b7257..86e3eca666b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; @@ -57,295 +56,288 @@ import com.google.protobuf.ServiceException; public class OmidTransactionTable extends CompatOmidTransactionTable implements Table { - // Copied from HBase ProtobufUtil since it's not accessible - final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); - - private TTable tTable; - private Transaction tx; - private final boolean addShadowCells; - - public OmidTransactionTable() throws SQLException { - super(null); - this.tTable = null; - this.tx = null; - this.addShadowCells = false; - } - - public OmidTransactionTable(PhoenixTransactionContext ctx, Table hTable) throws SQLException { - this(ctx, hTable, false); - } - - public OmidTransactionTable(PhoenixTransactionContext ctx, Table hTable, boolean isConflictFree) throws SQLException { - this(ctx, hTable, isConflictFree, false); - } - - public OmidTransactionTable(PhoenixTransactionContext ctx, Table hTable, boolean isConflictFree, boolean addShadowCells) throws SQLException { - super(hTable); - assert(ctx instanceof OmidTransactionContext); - - OmidTransactionContext omidTransactionContext = (OmidTransactionContext) ctx; - this.addShadowCells = addShadowCells; - try { - tTable = new TTable(hTable, true, isConflictFree); - } catch (IOException e) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.TRANSACTION_FAILED) - .setMessage(e.getMessage()).setRootCause(e).build() - .buildException(); - } - - this.tx = omidTransactionContext.getTransaction(); - } - - @Override - public Result get(Get get) throws IOException { - return tTable.get(tx, get); - } - - @Override - public void put(Put put) throws IOException { - tTable.put(tx, put, addShadowCells); - } - - @Override - public void delete(Delete delete) throws IOException { - tTable.delete(tx, delete); - } - - @Override - public ResultScanner getScanner(Scan scan) throws IOException { - scan.setTimeRange(0, Long.MAX_VALUE); - return tTable.getScanner(tx, scan); - } - - @Override - public Configuration getConfiguration() { - return tTable.getConfiguration(); - } - - @Override - public boolean exists(Get get) throws IOException { - return tTable.exists(tx, get); - } - - @Override - public Result[] get(List gets) throws IOException { - return tTable.get(tx, gets); - } - - @Override - public ResultScanner getScanner(byte[] family) throws IOException { - return tTable.getScanner(tx, family); - } - - @Override - public ResultScanner getScanner(byte[] family, byte[] qualifier) - throws IOException { - return tTable.getScanner(tx, family, qualifier); - } - - @Override - public void put(List puts) throws IOException { - tTable.put(tx, puts, addShadowCells); - } - - @Override - public void delete(List deletes) throws IOException { - tTable.delete(tx, deletes); - } - - @Override - public void close() throws IOException { - tTable.close(); - } - - @Override - public TableName getName() { - byte[] name = tTable.getTableName(); - return TableName.valueOf(name); - } - - @Override - public boolean[] existsAll(List gets) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void batch(List actions, Object[] results) - throws IOException, InterruptedException { - tTable.batch(tx, actions, addShadowCells); - if (results != null) { - Arrays.fill(results, EMPTY_RESULT_EXISTS_TRUE); - } - } - - @Override - public void batchCallback(List actions, - Object[] results, Callback callback) throws IOException, - InterruptedException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Put put) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public Result append(Append append) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public Result increment(Increment increment) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount, Durability durability) - throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public CoprocessorRpcChannel coprocessorService(byte[] row) { - throw new UnsupportedOperationException(); - } - - @Override - public Map coprocessorService( - Class service, byte[] startKey, byte[] endKey, - Call callable) throws ServiceException, Throwable { - throw new UnsupportedOperationException(); - } - - @Override - public void coprocessorService(Class service, - byte[] startKey, byte[] endKey, Call callable, - Callback callback) throws ServiceException, Throwable { - throw new UnsupportedOperationException(); - } - - @Override - public Map batchCoprocessorService( - MethodDescriptor methodDescriptor, Message request, - byte[] startKey, byte[] endKey, R responsePrototype) - throws ServiceException, Throwable { - throw new UnsupportedOperationException(); - } - - @Override - public void batchCoprocessorService( - MethodDescriptor methodDescriptor, Message request, - byte[] startKey, byte[] endKey, R responsePrototype, - Callback callback) throws ServiceException, Throwable { - throw new UnsupportedOperationException(); - } - - @Override - public int getOperationTimeout() { - throw new UnsupportedOperationException(); - } - - @Override - public int getRpcTimeout() { - throw new UnsupportedOperationException(); - } - - @Override - public void setOperationTimeout(int arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void setRpcTimeout(int arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public int getWriteRpcTimeout() { - throw new UnsupportedOperationException(); - } - - @Override - public void setWriteRpcTimeout(int writeRpcTimeout) { - throw new UnsupportedOperationException(); - } - - @Override - public int getReadRpcTimeout() { - throw new UnsupportedOperationException(); - } - - @Override - public void setReadRpcTimeout(int readRpcTimeout) { - throw new UnsupportedOperationException(); - } - - @Override - public TableDescriptor getDescriptor() throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean[] exists(List gets) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Put put) - throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, - Delete delete) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, - RowMutations mutation) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public long getRpcTimeout(TimeUnit unit) { - throw new UnsupportedOperationException(); - } - - @Override - public long getReadRpcTimeout(TimeUnit unit) { - throw new UnsupportedOperationException(); - } - - @Override - public long getWriteRpcTimeout(TimeUnit unit) { - throw new UnsupportedOperationException(); - } - - @Override - public long getOperationTimeout(TimeUnit unit) { - throw new UnsupportedOperationException(); - } - - @Override - public RegionLocator getRegionLocator() throws IOException { - throw new UnsupportedOperationException(); - } + // Copied from HBase ProtobufUtil since it's not accessible + final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); + + private TTable tTable; + private Transaction tx; + private final boolean addShadowCells; + + public OmidTransactionTable() throws SQLException { + super(null); + this.tTable = null; + this.tx = null; + this.addShadowCells = false; + } + + public OmidTransactionTable(PhoenixTransactionContext ctx, Table hTable) throws SQLException { + this(ctx, hTable, false); + } + + public OmidTransactionTable(PhoenixTransactionContext ctx, Table hTable, boolean isConflictFree) + throws SQLException { + this(ctx, hTable, isConflictFree, false); + } + + public OmidTransactionTable(PhoenixTransactionContext ctx, Table hTable, boolean isConflictFree, + boolean addShadowCells) throws SQLException { + super(hTable); + assert (ctx instanceof OmidTransactionContext); + + OmidTransactionContext omidTransactionContext = (OmidTransactionContext) ctx; + this.addShadowCells = addShadowCells; + try { + tTable = new TTable(hTable, true, isConflictFree); + } catch (IOException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED) + .setMessage(e.getMessage()).setRootCause(e).build().buildException(); + } + + this.tx = omidTransactionContext.getTransaction(); + } + + @Override + public Result get(Get get) throws IOException { + return tTable.get(tx, get); + } + + @Override + public void put(Put put) throws IOException { + tTable.put(tx, put, addShadowCells); + } + + @Override + public void delete(Delete delete) throws IOException { + tTable.delete(tx, delete); + } + + @Override + public ResultScanner getScanner(Scan scan) throws IOException { + scan.setTimeRange(0, Long.MAX_VALUE); + return tTable.getScanner(tx, scan); + } + + @Override + public Configuration getConfiguration() { + return tTable.getConfiguration(); + } + + @Override + public boolean exists(Get get) throws IOException { + return tTable.exists(tx, get); + } + + @Override + public Result[] get(List gets) throws IOException { + return tTable.get(tx, gets); + } + + @Override + public ResultScanner getScanner(byte[] family) throws IOException { + return tTable.getScanner(tx, family); + } + + @Override + public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { + return tTable.getScanner(tx, family, qualifier); + } + + @Override + public void put(List puts) throws IOException { + tTable.put(tx, puts, addShadowCells); + } + + @Override + public void delete(List deletes) throws IOException { + tTable.delete(tx, deletes); + } + + @Override + public void close() throws IOException { + tTable.close(); + } + + @Override + public TableName getName() { + byte[] name = tTable.getTableName(); + return TableName.valueOf(name); + } + + @Override + public boolean[] existsAll(List gets) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void batch(List actions, Object[] results) + throws IOException, InterruptedException { + tTable.batch(tx, actions, addShadowCells); + if (results != null) { + Arrays.fill(results, EMPTY_RESULT_EXISTS_TRUE); + } + } + + @Override + public void batchCallback(List actions, Object[] results, Callback callback) + throws IOException, InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) + throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, + Delete delete) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Result append(Append append) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Result increment(Increment increment) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) + throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, + Durability durability) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public CoprocessorRpcChannel coprocessorService(byte[] row) { + throw new UnsupportedOperationException(); + } + + @Override + public Map coprocessorService(Class service, byte[] startKey, + byte[] endKey, Call callable) throws ServiceException, Throwable { + throw new UnsupportedOperationException(); + } + + @Override + public void coprocessorService(Class service, byte[] startKey, + byte[] endKey, Call callable, Callback callback) throws ServiceException, Throwable { + throw new UnsupportedOperationException(); + } + + @Override + public Map batchCoprocessorService( + MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, + R responsePrototype) throws ServiceException, Throwable { + throw new UnsupportedOperationException(); + } + + @Override + public void batchCoprocessorService(MethodDescriptor methodDescriptor, + Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) + throws ServiceException, Throwable { + throw new UnsupportedOperationException(); + } + + @Override + public int getOperationTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public int getRpcTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public void setOperationTimeout(int arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void setRpcTimeout(int arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public int getWriteRpcTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public void setWriteRpcTimeout(int writeRpcTimeout) { + throw new UnsupportedOperationException(); + } + + @Override + public int getReadRpcTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public void setReadRpcTimeout(int readRpcTimeout) { + throw new UnsupportedOperationException(); + } + + @Override + public TableDescriptor getDescriptor() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean[] exists(List gets) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, + byte[] value, Put put) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, + byte[] value, Delete delete) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, + byte[] value, RowMutations mutation) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long getRpcTimeout(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public long getReadRpcTimeout(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public long getWriteRpcTimeout(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public long getOperationTimeout(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public RegionLocator getRegionLocator() throws IOException { + throw new UnsupportedOperationException(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionClient.java index f12f8183c55..318f184060a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionClient.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionClient.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java index 2d219bc12f8..2e453eed367 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,6 @@ package org.apache.phoenix.transaction; import java.sql.SQLException; -import java.util.concurrent.TimeoutException; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.Bytes; @@ -27,210 +26,201 @@ import org.apache.phoenix.transaction.TransactionFactory.Provider; public interface PhoenixTransactionContext { - public static PhoenixTransactionContext NULL_CONTEXT = new PhoenixTransactionContext() { - - @Override - public void begin() throws SQLException { - } - - @Override - public void commit() throws SQLException { - } - - @Override - public void abort() throws SQLException { - } - - @Override - public void checkpoint(boolean hasUncommittedData) throws SQLException { - } - - @Override - public void commitDDLFence(PTable dataTable) throws SQLException { - } - - @Override - public void join(PhoenixTransactionContext ctx) { - } - - @Override - public boolean isTransactionRunning() { - return false; - } - - @Override - public void reset() { - } - - @Override - public long getTransactionId() { - return 0; - } - - @Override - public long getReadPointer() { - return 0; - } - - @Override - public long getWritePointer() { - return 0; - } - - @Override - public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) { - } - - @Override - public PhoenixVisibilityLevel getVisibilityLevel() { - return null; - } - - @Override - public byte[] encodeTransaction() throws SQLException { - return null; - } - - @Override - public Provider getProvider() { - return null; - } - - @Override - public PhoenixTransactionContext newTransactionContext(PhoenixTransactionContext contex, boolean subTask) { - return NULL_CONTEXT; - } - - @Override - public void markDMLFence(PTable dataTable) { - - } - - @Override - public Table getTransactionalTable(Table htable, boolean isConflictFree) { - return null; - } - - @Override - public Table getTransactionalTableWriter(PhoenixConnection connection, PTable table, Table htable, boolean isIndex) { - return null; - } - }; - /** - * - * Visibility levels needed for checkpointing and - * - */ - public enum PhoenixVisibilityLevel { - SNAPSHOT, - SNAPSHOT_EXCLUDE_CURRENT, - SNAPSHOT_ALL - } - - public static final String TX_ROLLBACK_ATTRIBUTE_KEY = "tephra.tx.rollback"; //"phoenix.tx.rollback"; - - // Note: After PHOENIX-6627, is PhoenixTransactionContext.PROPERTY_TTL still useful? - public static final String PROPERTY_TTL = "dataset.table.ttl"; - public static final byte[] PROPERTY_TTL_BYTES = Bytes.toBytes(PROPERTY_TTL); - - public static final String READ_NON_TX_DATA = "data.tx.read.pre.existing"; - - /** - * Starts a transaction - * - * @throws SQLException - */ - public void begin() throws SQLException; - - /** - * Commits a transaction - * - * @throws SQLException - */ - public void commit() throws SQLException; - - /** - * Rollback a transaction - * - * @throws SQLException - */ - public void abort() throws SQLException; - - /** - * Create a checkpoint in a transaction. - * @throws SQLException - */ - public void checkpoint(boolean hasUncommittedData) throws SQLException; - - /** - * Commit DDL to guarantee that no transaction started before create index - * and committed afterwards, as explained in [PHOENIX-2478] and [OMID-56]. - * - * @param dataTable the table that the DDL command works on - * @throws SQLException - */ - public void commitDDLFence(PTable dataTable) - throws SQLException; - - - /** - * Mark the start of DML go ensure that updates to indexed rows are not - * missed. - * @param dataTable the table on which DML command is working - */ - public void markDMLFence(PTable dataTable); - - /** - * Augment the current context with ctx modified keys - * - * @param ctx - */ - public void join(PhoenixTransactionContext ctx); - - /** - * Is there a transaction in flight? - */ - public boolean isTransactionRunning(); - - /** - * Reset transaction state - */ - public void reset(); - - /** - * Returns transaction unique identifier which is also - * assumed to be the earliest write pointer. - */ - public long getTransactionId(); - - /** - * Returns transaction snapshot id - */ - public long getReadPointer(); - - /** - * Returns transaction write pointer. After checkpoint the write pointer is different than the initial one - */ - public long getWritePointer(); - - /** - * Set visibility level - */ - public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel); - - /** - * Returns visibility level - */ - public PhoenixVisibilityLevel getVisibilityLevel(); - - /** - * Encode transaction - */ - public byte[] encodeTransaction() throws SQLException; - - public Provider getProvider(); - public PhoenixTransactionContext newTransactionContext(PhoenixTransactionContext contex, boolean subTask); - - public Table getTransactionalTable(Table htable, boolean isConflictFree) throws SQLException; - public Table getTransactionalTableWriter(PhoenixConnection connection, PTable table, Table htable, boolean isIndex) throws SQLException; + public static PhoenixTransactionContext NULL_CONTEXT = new PhoenixTransactionContext() { + + @Override + public void begin() throws SQLException { + } + + @Override + public void commit() throws SQLException { + } + + @Override + public void abort() throws SQLException { + } + + @Override + public void checkpoint(boolean hasUncommittedData) throws SQLException { + } + + @Override + public void commitDDLFence(PTable dataTable) throws SQLException { + } + + @Override + public void join(PhoenixTransactionContext ctx) { + } + + @Override + public boolean isTransactionRunning() { + return false; + } + + @Override + public void reset() { + } + + @Override + public long getTransactionId() { + return 0; + } + + @Override + public long getReadPointer() { + return 0; + } + + @Override + public long getWritePointer() { + return 0; + } + + @Override + public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) { + } + + @Override + public PhoenixVisibilityLevel getVisibilityLevel() { + return null; + } + + @Override + public byte[] encodeTransaction() throws SQLException { + return null; + } + + @Override + public Provider getProvider() { + return null; + } + + @Override + public PhoenixTransactionContext newTransactionContext(PhoenixTransactionContext contex, + boolean subTask) { + return NULL_CONTEXT; + } + + @Override + public void markDMLFence(PTable dataTable) { + + } + + @Override + public Table getTransactionalTable(Table htable, boolean isConflictFree) { + return null; + } + + @Override + public Table getTransactionalTableWriter(PhoenixConnection connection, PTable table, + Table htable, boolean isIndex) { + return null; + } + }; + + /** + * Visibility levels needed for checkpointing and + */ + public enum PhoenixVisibilityLevel { + SNAPSHOT, + SNAPSHOT_EXCLUDE_CURRENT, + SNAPSHOT_ALL + } + + public static final String TX_ROLLBACK_ATTRIBUTE_KEY = "tephra.tx.rollback"; // "phoenix.tx.rollback"; + + // Note: After PHOENIX-6627, is PhoenixTransactionContext.PROPERTY_TTL still useful? + public static final String PROPERTY_TTL = "dataset.table.ttl"; + public static final byte[] PROPERTY_TTL_BYTES = Bytes.toBytes(PROPERTY_TTL); + + public static final String READ_NON_TX_DATA = "data.tx.read.pre.existing"; + + /** + * Starts a transaction + */ + public void begin() throws SQLException; + + /** + * Commits a transaction + */ + public void commit() throws SQLException; + + /** + * Rollback a transaction + */ + public void abort() throws SQLException; + + /** + * Create a checkpoint in a transaction. + */ + public void checkpoint(boolean hasUncommittedData) throws SQLException; + + /** + * Commit DDL to guarantee that no transaction started before create index and committed + * afterwards, as explained in [PHOENIX-2478] and [OMID-56]. + * @param dataTable the table that the DDL command works on + */ + public void commitDDLFence(PTable dataTable) throws SQLException; + + /** + * Mark the start of DML go ensure that updates to indexed rows are not missed. + * @param dataTable the table on which DML command is working + */ + public void markDMLFence(PTable dataTable); + + /** + * Augment the current context with ctx modified keys + */ + public void join(PhoenixTransactionContext ctx); + + /** + * Is there a transaction in flight? + */ + public boolean isTransactionRunning(); + + /** + * Reset transaction state + */ + public void reset(); + + /** + * Returns transaction unique identifier which is also assumed to be the earliest write pointer. + */ + public long getTransactionId(); + + /** + * Returns transaction snapshot id + */ + public long getReadPointer(); + + /** + * Returns transaction write pointer. After checkpoint the write pointer is different than the + * initial one + */ + public long getWritePointer(); + + /** + * Set visibility level + */ + public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel); + + /** + * Returns visibility level + */ + public PhoenixVisibilityLevel getVisibilityLevel(); + + /** + * Encode transaction + */ + public byte[] encodeTransaction() throws SQLException; + + public Provider getProvider(); + + public PhoenixTransactionContext newTransactionContext(PhoenixTransactionContext contex, + boolean subTask); + + public Table getTransactionalTable(Table htable, boolean isConflictFree) throws SQLException; + + public Table getTransactionalTableWriter(PhoenixConnection connection, PTable table, Table htable, + boolean isIndex) throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionProvider.java b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionProvider.java index dd07c225287..e3f3226aa6d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionProvider.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,40 +27,46 @@ import org.apache.phoenix.jdbc.PhoenixConnection; public interface PhoenixTransactionProvider { - public enum Feature { - ALTER_NONTX_TO_TX(SQLExceptionCode.CANNOT_ALTER_TABLE_FROM_NON_TXN_TO_TXNL), - COLUMN_ENCODING(SQLExceptionCode.UNSUPPORTED_COLUMN_ENCODING_FOR_TXN_PROVIDER), - MAINTAIN_LOCAL_INDEX_ON_SERVER(null), - SET_TTL(SQLExceptionCode.TTL_UNSUPPORTED_FOR_TXN_TABLE), - ALLOW_LOCAL_INDEX(SQLExceptionCode.CANNOT_CREATE_LOCAL_INDEX_FOR_TXN_TABLE) - ; - - private final SQLExceptionCode code; - - Feature(SQLExceptionCode code) { - this.code = code; - } - - public SQLExceptionCode getCode() { - return code; - } + public enum Feature { + ALTER_NONTX_TO_TX(SQLExceptionCode.CANNOT_ALTER_TABLE_FROM_NON_TXN_TO_TXNL), + COLUMN_ENCODING(SQLExceptionCode.UNSUPPORTED_COLUMN_ENCODING_FOR_TXN_PROVIDER), + MAINTAIN_LOCAL_INDEX_ON_SERVER(null), + SET_TTL(SQLExceptionCode.TTL_UNSUPPORTED_FOR_TXN_TABLE), + ALLOW_LOCAL_INDEX(SQLExceptionCode.CANNOT_CREATE_LOCAL_INDEX_FOR_TXN_TABLE); + + private final SQLExceptionCode code; + + Feature(SQLExceptionCode code) { + this.code = code; } - public PhoenixTransactionContext getTransactionContext(byte[] txnBytes) throws IOException; - public PhoenixTransactionContext getTransactionContext(PhoenixConnection connection) throws SQLException; - public PhoenixTransactionClient getTransactionClient(Configuration config, ConnectionInfo connectionInfo) throws SQLException; - public String getCoprocessorClassName(); - public String getGCCoprocessorClassName(); + public SQLExceptionCode getCode() { + return code; + } + } + + public PhoenixTransactionContext getTransactionContext(byte[] txnBytes) throws IOException; + + public PhoenixTransactionContext getTransactionContext(PhoenixConnection connection) + throws SQLException; + + public PhoenixTransactionClient getTransactionClient(Configuration config, + ConnectionInfo connectionInfo) throws SQLException; + + public String getCoprocessorClassName(); + + public String getGCCoprocessorClassName(); + + public TransactionFactory.Provider getProvider(); - public TransactionFactory.Provider getProvider(); - public boolean isUnsupported(Feature feature); + public boolean isUnsupported(Feature feature); - /** - * Converts put operation to autocommit operation - * @param put put operation - * @param timestamp - start timestamp - * @param commitTimestamp - commit timestamp - * @return put operation with metadata - */ - public Put markPutAsCommitted(Put put, long timestamp, long commitTimestamp); + /** + * Converts put operation to autocommit operation + * @param put put operation + * @param timestamp - start timestamp + * @param commitTimestamp - commit timestamp + * @return put operation with metadata + */ + public Put markPutAsCommitted(Put put, long timestamp, long commitTimestamp); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java index 754045723e8..d93f3c898ac 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,69 +18,72 @@ package org.apache.phoenix.transaction; import java.io.IOException; + import org.apache.phoenix.coprocessorclient.MetaDataProtocol; public class TransactionFactory { - public enum Provider { - // The provider formerly known as TEPHRA, deliberately renamed to warn downstreams - // that Tephra support no longer exists, while preserving the ordinal for backwards - // compatible use in system schema. - NOTAVAILABLE((byte)1, NotAvailableTransactionProvider.getInstance()), - // The OMID provider. - OMID((byte)2, OmidTransactionProvider.getInstance()); - - private final byte code; - private final PhoenixTransactionProvider provider; + public enum Provider { + // The provider formerly known as TEPHRA, deliberately renamed to warn downstreams + // that Tephra support no longer exists, while preserving the ordinal for backwards + // compatible use in system schema. + NOTAVAILABLE((byte) 1, NotAvailableTransactionProvider.getInstance()), + // The OMID provider. + OMID((byte) 2, OmidTransactionProvider.getInstance()); - Provider(byte code, PhoenixTransactionProvider provider) { - this.code = code; - this.provider = provider; - } + private final byte code; + private final PhoenixTransactionProvider provider; - public static Provider[] available() { - return new Provider[] { OMID }; - } + Provider(byte code, PhoenixTransactionProvider provider) { + this.code = code; + this.provider = provider; + } - public byte getCode() { - return this.code; - } + public static Provider[] available() { + return new Provider[] { OMID }; + } - public static Provider fromCode(int code) { - if (code < 1 || code > Provider.values().length) { - throw new IllegalArgumentException("Invalid TransactionFactory.Provider " + code); - } - return Provider.values()[code-1]; - } + public byte getCode() { + return this.code; + } - public static Provider getDefault() { - return OMID; - } + public static Provider fromCode(int code) { + if (code < 1 || code > Provider.values().length) { + throw new IllegalArgumentException("Invalid TransactionFactory.Provider " + code); + } + return Provider.values()[code - 1]; + } - public PhoenixTransactionProvider getTransactionProvider() { - return provider; - } + public static Provider getDefault() { + return OMID; } - public static PhoenixTransactionProvider getTransactionProvider(Provider provider) { - return provider.getTransactionProvider(); + public PhoenixTransactionProvider getTransactionProvider() { + return provider; } + } + + public static PhoenixTransactionProvider getTransactionProvider(Provider provider) { + return provider.getTransactionProvider(); + } - public static PhoenixTransactionProvider getTransactionProvider(byte[] txState, int clientVersion) { - if (txState == null || txState.length == 0) { - return null; - } - Provider provider = (clientVersion < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0) - ? Provider.NOTAVAILABLE - : Provider.fromCode(txState[txState.length-1]); - return provider.getTransactionProvider(); + public static PhoenixTransactionProvider getTransactionProvider(byte[] txState, + int clientVersion) { + if (txState == null || txState.length == 0) { + return null; } + Provider provider = (clientVersion < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_14_0) + ? Provider.NOTAVAILABLE + : Provider.fromCode(txState[txState.length - 1]); + return provider.getTransactionProvider(); + } - public static PhoenixTransactionContext getTransactionContext(byte[] txState, int clientVersion) throws IOException { - PhoenixTransactionProvider provider = getTransactionProvider(txState, clientVersion); - if (provider == null) { - return null; - } - return provider.getTransactionContext(txState); + public static PhoenixTransactionContext getTransactionContext(byte[] txState, int clientVersion) + throws IOException { + PhoenixTransactionProvider provider = getTransactionProvider(txState, clientVersion); + if (provider == null) { + return null; } + return provider.getTransactionContext(txState); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/Base62Encoder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/Base62Encoder.java index b3c38f372ec..b156cfec614 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/Base62Encoder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/Base62Encoder.java @@ -1,130 +1,120 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.util; /** - * Utility for converting a base 10 number to string that represents a base 62 number + * Utility for converting a base 10 number to string that represents a base 62 number */ public class Base62Encoder { - // All possible chars for representing a number as a base 62 encoded String - public static final char[] digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz".toCharArray(); + // All possible chars for representing a number as a base 62 encoded String + public static final char[] digits = + "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz".toCharArray(); - private static final char[] DigitTens = new char[3844]; - private static final char[] DigitOnes = new char[3844]; + private static final char[] DigitTens = new char[3844]; + private static final char[] DigitOnes = new char[3844]; - static { - for (byte i = 0; i < 62; ++i) { - for (byte j = 0; j < 62; ++j) { - DigitTens[i * 62 + j] = digits[i]; - DigitOnes[i * 62 + j] = digits[j]; - } - } + static { + for (byte i = 0; i < 62; ++i) { + for (byte j = 0; j < 62; ++j) { + DigitTens[i * 62 + j] = digits[i]; + DigitOnes[i * 62 + j] = digits[j]; + } } + } - final static long[] pow62 = { 62, 3844, 238328, 14776336, 916132832, 56800235584L, 3521614606208L, - 218340105584896L, 13537086546263552L, 839299365868340224L }; + final static long[] pow62 = { 62, 3844, 238328, 14776336, 916132832, 56800235584L, 3521614606208L, + 218340105584896L, 13537086546263552L, 839299365868340224L }; - /** - * Returns the length of the base 62 encoded string required to represent num - * - * @param num - * must be a positive number - */ - static int stringSize(long num) { - for (int i = 0; i < 10; i++) { - if (num < pow62[i]) - return i + 1; - } - return 11; + /** + * Returns the length of the base 62 encoded string required to represent num must be a positive + * number + */ + static int stringSize(long num) { + for (int i = 0; i < 10; i++) { + if (num < pow62[i]) return i + 1; } + return 11; + } - /** - * Fills the given buffer with a string representing the given number in base 62. The characters are placed into the - * buffer backwards starting with the least significant digit and working backwards from there. - * - * @param num - * number to convert, should be > Long.MIN_VALUE - * @param size - * size of the buffer - * @param buf - * buffer to place encoded string - */ - static void getChars(long num, int size, char[] buf) { - long q; - int r; - int charPos = size; - char sign = 0; - - if (num < 0) { - sign = '-'; - num = -num; - } + /** + * Fills the given buffer with a string representing the given number in base 62. The characters + * are placed into the buffer backwards starting with the least significant digit and working + * backwards from there. number to convert, should be > Long.MIN_VALUE size of the buffer buffer + * to place encoded string + */ + static void getChars(long num, int size, char[] buf) { + long q; + int r; + int charPos = size; + char sign = 0; - // Get 2 digits per iteration using longs until quotient fits into an int - while (num > Integer.MAX_VALUE) { - q = num / 3844; - r = (int) (num - (q * 3844)); - num = q; - buf[--charPos] = DigitOnes[r]; - buf[--charPos] = DigitTens[r]; - } + if (num < 0) { + sign = '-'; + num = -num; + } - // Get 2 digits per iteration using ints - int q2; - int i2 = (int) num; - while (i2 >= 65536) { - q2 = i2 / 3844; - r = i2 - (q2 * 3844); - i2 = q2; - buf[--charPos] = DigitOnes[r]; - buf[--charPos] = DigitTens[r]; - } + // Get 2 digits per iteration using longs until quotient fits into an int + while (num > Integer.MAX_VALUE) { + q = num / 3844; + r = (int) (num - (q * 3844)); + num = q; + buf[--charPos] = DigitOnes[r]; + buf[--charPos] = DigitTens[r]; + } - // Fall through to fast mode for smaller numbers - // assert(i2 <= 65536, i2); - for (;;) { - // this evaluates to i2/62 - // see "How to optimize for the Pentium family of microprocessors", Agner Fog, section 18.7 - q2 = ((i2 + 1) * 33825) >>> (16 + 5); - r = i2 - (q2 * 62); - buf[--charPos] = digits[r]; - i2 = q2; - if (i2 == 0) - break; - } - if (sign != 0) { - buf[--charPos] = sign; - } + // Get 2 digits per iteration using ints + int q2; + int i2 = (int) num; + while (i2 >= 65536) { + q2 = i2 / 3844; + r = i2 - (q2 * 3844); + i2 = q2; + buf[--charPos] = DigitOnes[r]; + buf[--charPos] = DigitTens[r]; } - /** - * Returns a String object representing the specified long encoded in base 62. - * - * @param num - * number to be converted - * @return a string representation of the number encoded in base 62 - */ - public static String toString(long num) { - if (num == Long.MIN_VALUE) - return "-AzL8n0Y58m8"; - int size = (num < 0) ? stringSize(-num) + 1 : stringSize(num); - char[] buf = new char[size]; - getChars(num, size, buf); - return new String(buf); + // Fall through to fast mode for smaller numbers + // assert(i2 <= 65536, i2); + for (;;) { + // this evaluates to i2/62 + // see "How to optimize for the Pentium family of microprocessors", Agner Fog, section 18.7 + q2 = ((i2 + 1) * 33825) >>> (16 + 5); + r = i2 - (q2 * 62); + buf[--charPos] = digits[r]; + i2 = q2; + if (i2 == 0) break; + } + if (sign != 0) { + buf[--charPos] = sign; } + } + + /** + * Returns a String object representing the specified long encoded in base 62. number to be + * converted + * @return a string representation of the number encoded in base 62 + */ + public static String toString(long num) { + if (num == Long.MIN_VALUE) return "-AzL8n0Y58m8"; + int size = (num < 0) ? stringSize(-num) + 1 : stringSize(num); + char[] buf = new char[size]; + getChars(num, size, buf); + return new String(buf); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/BigDecimalUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/BigDecimalUtil.java index db2f1bfc6cd..146e2fac182 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/BigDecimalUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/BigDecimalUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,49 +20,54 @@ import org.apache.hadoop.hbase.util.Pair; /** - * - * * @since 1.2.1 */ public class BigDecimalUtil { - /** - * Calculates the precision and scale for BigDecimal arithmetic operation results. It uses the algorithm mentioned - * here - * @param lp precision of the left operand - * @param ls scale of the left operand - * @param rp precision of the right operand - * @param rs scale of the right operand - * @param op The operation type - * @return {@link Pair} comprising of the precision and scale. - */ - public static Pair getResultPrecisionScale(int lp, int ls, int rp, int rs, Operation op) { - int resultPrec = 0, resultScale = 0; - switch (op) { - case MULTIPLY: - resultPrec = lp + rp; - resultScale = ls + rs; - break; - case DIVIDE: - resultPrec = lp - ls + rp + Math.max(ls + rp - rs + 1, 4); - resultScale = 31 - lp + ls - rs; - break; - case ADD: - resultPrec = 2 * (lp - ls) + ls; // Is this correct? The page says addition -> 2 * (p - s) + s. - resultScale = Math.max(ls, rs); - break; - case AVG: - resultPrec = Math.max(lp - ls, rp - rs) + 1 + Math.max(ls, rs); - resultScale = Math.max(Math.max(ls, rs), 4); - break; - case OTHERS: - resultPrec = Math.max(lp - ls, rp - rs) + 1 + Math.max(ls, rs); - resultScale = Math.max(ls, rs); - } - return new Pair(resultPrec, resultScale); + /** + * Calculates the precision and scale for BigDecimal arithmetic operation results. It uses the + * algorithm mentioned here + * @param lp precision of the left operand + * @param ls scale of the left operand + * @param rp precision of the right operand + * @param rs scale of the right operand + * @param op The operation type + * @return {@link Pair} comprising of the precision and scale. + */ + public static Pair getResultPrecisionScale(int lp, int ls, int rp, int rs, + Operation op) { + int resultPrec = 0, resultScale = 0; + switch (op) { + case MULTIPLY: + resultPrec = lp + rp; + resultScale = ls + rs; + break; + case DIVIDE: + resultPrec = lp - ls + rp + Math.max(ls + rp - rs + 1, 4); + resultScale = 31 - lp + ls - rs; + break; + case ADD: + resultPrec = 2 * (lp - ls) + ls; // Is this correct? The page says addition -> 2 * (p - s) + + // s. + resultScale = Math.max(ls, rs); + break; + case AVG: + resultPrec = Math.max(lp - ls, rp - rs) + 1 + Math.max(ls, rs); + resultScale = Math.max(Math.max(ls, rs), 4); + break; + case OTHERS: + resultPrec = Math.max(lp - ls, rp - rs) + 1 + Math.max(ls, rs); + resultScale = Math.max(ls, rs); } - - public static enum Operation { - MULTIPLY, DIVIDE, ADD, AVG, OTHERS; - } -} \ No newline at end of file + return new Pair(resultPrec, resultScale); + } + + public static enum Operation { + MULTIPLY, + DIVIDE, + ADD, + AVG, + OTHERS; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/BitSet.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/BitSet.java index 658608ba045..26582cf1718 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/BitSet.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/BitSet.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; import java.io.*; @@ -23,102 +22,98 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * - * BitSet that can be initialized with primitive types, which - * is only available in Java 7 or above. - * - * + * BitSet that can be initialized with primitive types, which is only available in Java 7 or above. * @since 2.1.0 */ public class BitSet { - public static final int BITS_PER_LONG = 64; - public static final int BITS_PER_INT = 32; - public static final int BITS_PER_SHORT = 16; - public static final int BITS_PER_BYTE = 8; - private final long[] bits; - - public static int getByteSize(int capacity) { - if (capacity <= BitSet.BITS_PER_BYTE) { - return Bytes.SIZEOF_BYTE; - } else if (capacity <= BitSet.BITS_PER_SHORT) { - return Bytes.SIZEOF_SHORT; - } else if (capacity <= BitSet.BITS_PER_INT) { - return Bytes.SIZEOF_INT; - } else if (capacity <= BitSet.BITS_PER_LONG) { - return Bytes.SIZEOF_LONG; - } else { - int nLongs = (capacity-1) / BitSet.BITS_PER_LONG + 1; - return nLongs * Bytes.SIZEOF_LONG; - } - } + public static final int BITS_PER_LONG = 64; + public static final int BITS_PER_INT = 32; + public static final int BITS_PER_SHORT = 16; + public static final int BITS_PER_BYTE = 8; + private final long[] bits; - public static BitSet read(DataInput input, int capacity) throws IOException { - if (capacity <= BitSet.BITS_PER_BYTE) { - return fromPrimitive(input.readByte()); - } else if (capacity <= BitSet.BITS_PER_SHORT) { - return fromPrimitive(input.readShort()); - } else if (capacity <= BitSet.BITS_PER_INT) { - return fromPrimitive(input.readInt()); - } else if (capacity <= BitSet.BITS_PER_LONG) { - return fromPrimitive(input.readLong()); - } else { - int nLongs = (capacity-1) / BitSet.BITS_PER_LONG + 1; - return fromArray(ByteUtil.readFixedLengthLongArray(input, nLongs)); - } - } - - public static void write(DataOutput output, BitSet bitSet, int capacity) throws IOException { - if (capacity <= BitSet.BITS_PER_BYTE) { - output.writeByte((byte)bitSet.bits[0]); - } else if (capacity <= BitSet.BITS_PER_SHORT) { - output.writeShort((short)bitSet.bits[0]); - } else if (capacity <= BitSet.BITS_PER_INT) { - output.writeInt((int)bitSet.bits[0]); - } else if (capacity <= BitSet.BITS_PER_LONG) { - output.writeLong(bitSet.bits[0]); - } else { - ByteUtil.writeFixedLengthLongArray(output, bitSet.bits); - } - } - - public static BitSet fromPrimitive(byte bits) { - return new BitSet(new long[] { bits }); + public static int getByteSize(int capacity) { + if (capacity <= BitSet.BITS_PER_BYTE) { + return Bytes.SIZEOF_BYTE; + } else if (capacity <= BitSet.BITS_PER_SHORT) { + return Bytes.SIZEOF_SHORT; + } else if (capacity <= BitSet.BITS_PER_INT) { + return Bytes.SIZEOF_INT; + } else if (capacity <= BitSet.BITS_PER_LONG) { + return Bytes.SIZEOF_LONG; + } else { + int nLongs = (capacity - 1) / BitSet.BITS_PER_LONG + 1; + return nLongs * Bytes.SIZEOF_LONG; } + } - public static BitSet fromPrimitive(short bits) { - return new BitSet(new long[] { bits }); + public static BitSet read(DataInput input, int capacity) throws IOException { + if (capacity <= BitSet.BITS_PER_BYTE) { + return fromPrimitive(input.readByte()); + } else if (capacity <= BitSet.BITS_PER_SHORT) { + return fromPrimitive(input.readShort()); + } else if (capacity <= BitSet.BITS_PER_INT) { + return fromPrimitive(input.readInt()); + } else if (capacity <= BitSet.BITS_PER_LONG) { + return fromPrimitive(input.readLong()); + } else { + int nLongs = (capacity - 1) / BitSet.BITS_PER_LONG + 1; + return fromArray(ByteUtil.readFixedLengthLongArray(input, nLongs)); } + } - public static BitSet fromPrimitive(int bits) { - return new BitSet(new long[] { bits }); + public static void write(DataOutput output, BitSet bitSet, int capacity) throws IOException { + if (capacity <= BitSet.BITS_PER_BYTE) { + output.writeByte((byte) bitSet.bits[0]); + } else if (capacity <= BitSet.BITS_PER_SHORT) { + output.writeShort((short) bitSet.bits[0]); + } else if (capacity <= BitSet.BITS_PER_INT) { + output.writeInt((int) bitSet.bits[0]); + } else if (capacity <= BitSet.BITS_PER_LONG) { + output.writeLong(bitSet.bits[0]); + } else { + ByteUtil.writeFixedLengthLongArray(output, bitSet.bits); } + } - public static BitSet fromPrimitive(long bits) { - return new BitSet(new long[] { bits }); - } + public static BitSet fromPrimitive(byte bits) { + return new BitSet(new long[] { bits }); + } - public static BitSet fromArray(long[] bits) { - return new BitSet(bits); - } + public static BitSet fromPrimitive(short bits) { + return new BitSet(new long[] { bits }); + } - public static BitSet withCapacity(int maxBits) { - int size = Math.max(1,(maxBits + BITS_PER_LONG -1) / BITS_PER_LONG); - return new BitSet(new long[size]); - } + public static BitSet fromPrimitive(int bits) { + return new BitSet(new long[] { bits }); + } - public BitSet(long[] bits) { - this.bits = bits; - } + public static BitSet fromPrimitive(long bits) { + return new BitSet(new long[] { bits }); + } - public boolean get(int nBit) { - int lIndex = nBit / BITS_PER_LONG; - int bIndex = nBit % BITS_PER_LONG; - return (bits[lIndex] & (1L << bIndex)) != 0; - } - - public void set(int nBit) { - int lIndex = nBit / BITS_PER_LONG; - int bIndex = nBit % BITS_PER_LONG; - bits[lIndex] |= (1L << bIndex); - } + public static BitSet fromArray(long[] bits) { + return new BitSet(bits); + } + + public static BitSet withCapacity(int maxBits) { + int size = Math.max(1, (maxBits + BITS_PER_LONG - 1) / BITS_PER_LONG); + return new BitSet(new long[size]); + } + + public BitSet(long[] bits) { + this.bits = bits; + } + + public boolean get(int nBit) { + int lIndex = nBit / BITS_PER_LONG; + int bIndex = nBit % BITS_PER_LONG; + return (bits[lIndex] & (1L << bIndex)) != 0; + } + + public void set(int nBit) { + int lIndex = nBit / BITS_PER_LONG; + int bIndex = nBit % BITS_PER_LONG; + bits[lIndex] |= (1L << bIndex); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ByteUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ByteUtil.java index 9788fdbc674..928a5015a70 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ByteUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ByteUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,804 +39,796 @@ import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PDataType; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * * Byte utilities - * - * * @since 0.1 */ public class ByteUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(ByteUtil.class); + private static final Logger LOGGER = LoggerFactory.getLogger(ByteUtil.class); - public static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; - public static final ImmutableBytesPtr EMPTY_BYTE_ARRAY_PTR = new ImmutableBytesPtr( - EMPTY_BYTE_ARRAY); - public static final ImmutableBytesWritable EMPTY_IMMUTABLE_BYTE_ARRAY = new ImmutableBytesWritable( - EMPTY_BYTE_ARRAY); + public static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; + public static final ImmutableBytesPtr EMPTY_BYTE_ARRAY_PTR = + new ImmutableBytesPtr(EMPTY_BYTE_ARRAY); + public static final ImmutableBytesWritable EMPTY_IMMUTABLE_BYTE_ARRAY = + new ImmutableBytesWritable(EMPTY_BYTE_ARRAY); + /** Mask for bit 0 of a byte. */ + private static final int BIT_0 = 0x01; - /** Mask for bit 0 of a byte. */ - private static final int BIT_0 = 0x01; + /** Mask for bit 1 of a byte. */ + private static final int BIT_1 = 0x02; - /** Mask for bit 1 of a byte. */ - private static final int BIT_1 = 0x02; + /** Mask for bit 2 of a byte. */ + private static final int BIT_2 = 0x04; - /** Mask for bit 2 of a byte. */ - private static final int BIT_2 = 0x04; + /** Mask for bit 3 of a byte. */ + private static final int BIT_3 = 0x08; - /** Mask for bit 3 of a byte. */ - private static final int BIT_3 = 0x08; + /** Mask for bit 4 of a byte. */ + private static final int BIT_4 = 0x10; - /** Mask for bit 4 of a byte. */ - private static final int BIT_4 = 0x10; + /** Mask for bit 5 of a byte. */ + private static final int BIT_5 = 0x20; - /** Mask for bit 5 of a byte. */ - private static final int BIT_5 = 0x20; + /** Mask for bit 6 of a byte. */ + private static final int BIT_6 = 0x40; - /** Mask for bit 6 of a byte. */ - private static final int BIT_6 = 0x40; + /** Mask for bit 7 of a byte. */ + private static final int BIT_7 = 0x80; - /** Mask for bit 7 of a byte. */ - private static final int BIT_7 = 0x80; + private static final int[] BITS = { BIT_7, BIT_6, BIT_5, BIT_4, BIT_3, BIT_2, BIT_1, BIT_0 }; - private static final int[] BITS = {BIT_7, BIT_6, BIT_5, BIT_4, BIT_3, BIT_2, BIT_1, BIT_0}; + public static final byte[] ZERO_BYTE = Bytes.toBytesBinary("\\x00"); - public static final byte[] ZERO_BYTE = Bytes.toBytesBinary("\\x00"); + public static final Comparator BYTES_PTR_COMPARATOR = + new Comparator() { - public static final Comparator BYTES_PTR_COMPARATOR = new Comparator() { + @Override + public int compare(ImmutableBytesPtr o1, ImmutableBytesPtr o2) { + return Bytes.compareTo(o1.get(), o1.getOffset(), o1.getLength(), o2.get(), o2.getOffset(), + o2.getLength()); + } - @Override - public int compare(ImmutableBytesPtr o1, ImmutableBytesPtr o2) { - return Bytes.compareTo(o1.get(), o1.getOffset(), o1.getLength(), o2.get(), o2.getOffset(), o2.getLength()); - } - }; - /** - * Serialize an array of byte arrays into a single byte array. Used - * to pass through a set of bytes arrays as an attribute of a Scan. - * Use {@link #toByteArrays(byte[], int)} to convert the serialized - * byte array back to the array of byte arrays. - * @param byteArrays the array of byte arrays to serialize - * @return the byte array - */ - public static byte[] toBytes(byte[][] byteArrays) { - int size = 0; - for (byte[] b : byteArrays) { - if (b == null) { - size++; - } else { - size += b.length; - size += WritableUtils.getVIntSize(b.length); - } - } - TrustedByteArrayOutputStream bytesOut = new TrustedByteArrayOutputStream(size); - DataOutputStream out = new DataOutputStream(bytesOut); - try { - for (byte[] b : byteArrays) { - if (b == null) { - WritableUtils.writeVInt(out, 0); - } else { - WritableUtils.writeVInt(out, b.length); - out.write(b); - } - } - } catch (IOException e) { - throw new RuntimeException(e); // not possible - } finally { - try { - out.close(); - } catch (IOException e) { - throw new RuntimeException(e); // not possible - } - } - return bytesOut.getBuffer(); - } - - /** - * Deserialize a byte array into a set of byte arrays. Used in - * coprocessor to reconstruct byte arrays from attribute value - * passed through the Scan. - * @param b byte array containing serialized byte arrays (created by {@link #toBytes(byte[][])}). - * @param length number of byte arrays that were serialized - * @return array of now deserialized byte arrays - * @throws IllegalStateException if there are more than length number of byte arrays that were serialized - */ - public static byte[][] toByteArrays(byte[] b, int length) { - return toByteArrays(b, 0, length); - } - - public static byte[][] toByteArrays(byte[] b, int offset, int length) { - ByteArrayInputStream bytesIn = new ByteArrayInputStream(b, offset, b.length - offset); - DataInputStream in = new DataInputStream(bytesIn); - byte[][] byteArrays = new byte[length][]; - try { - for (int i = 0; i < length; i++) { - int bLength = WritableUtils.readVInt(in); - if (bLength == 0) { - byteArrays[i] = null; - } else { - byteArrays[i] = new byte[bLength]; - int rLength = in.read(byteArrays[i], 0, bLength); - assert (rLength == bLength); // For find bugs - } - } - if (in.read() != -1) { - throw new IllegalStateException("Expected only " + length + " byte arrays, but found more"); - } - return byteArrays; - } catch (IOException e) { - throw new RuntimeException(e); // not possible - } finally { - try { - in.close(); - } catch (IOException e) { - throw new RuntimeException(e); // not possible - } - } - } - - public static byte[] serializeVIntArray(int[] intArray) { - return serializeVIntArray(intArray,intArray.length); - } - - public static byte[] serializeVIntArray(int[] intArray, int encodedLength) { - int size = WritableUtils.getVIntSize(encodedLength); - for (int i = 0; i < intArray.length; i++) { - size += WritableUtils.getVIntSize(intArray[i]); - } - int offset = 0; - byte[] out = new byte[size]; - offset += ByteUtil.vintToBytes(out, offset, size); - for (int i = 0; i < intArray.length; i++) { - offset += ByteUtil.vintToBytes(out, offset, intArray[i]); - } - return out; - } - - public static void serializeVIntArray(DataOutput output, int[] intArray) throws IOException { - serializeVIntArray(output, intArray, intArray.length); - } - - /** - * Allows additional stuff to be encoded in length - * @param output - * @param intArray - * @param encodedLength - * @throws IOException - */ - public static void serializeVIntArray(DataOutput output, int[] intArray, int encodedLength) throws IOException { - WritableUtils.writeVInt(output, encodedLength); - for (int i = 0; i < intArray.length; i++) { - WritableUtils.writeVInt(output, intArray[i]); - } - } - - public static long[] readFixedLengthLongArray(DataInput input, int length) throws IOException { - long[] longArray = new long[length]; - for (int i = 0; i < length; i++) { - longArray[i] = input.readLong(); - } - return longArray; - } - - public static void writeFixedLengthLongArray(DataOutput output, long[] longArray) throws IOException { - for (int i = 0; i < longArray.length; i++) { - output.writeLong(longArray[i]); - } - } - - /** - * Deserialize a byte array into a int array. - * @param b byte array storing serialized vints - * @return int array - */ - public static int[] deserializeVIntArray(byte[] b) { - ByteArrayInputStream bytesIn = new ByteArrayInputStream(b); - DataInputStream in = new DataInputStream(bytesIn); - try { - int length = WritableUtils.readVInt(in); - return deserializeVIntArray(in, length); - } catch (IOException e) { - throw new RuntimeException(e); // not possible - } finally { - try { - in.close(); - } catch (IOException e) { - throw new RuntimeException(e); // not possible - } - } - } - - public static int[] deserializeVIntArray(DataInput in) throws IOException { - return deserializeVIntArray(in, WritableUtils.readVInt(in)); - } - - public static int[] deserializeVIntArray(DataInput in, int length) throws IOException { - int i = 0; - int[] intArray = new int[length]; - while (i < length) { - intArray[i++] = WritableUtils.readVInt(in); - } - return intArray; - } - - /** - * Deserialize a byte array into a int array. - * @param b byte array storing serialized vints - * @param length number of serialized vints - * @return int array - */ - public static int[] deserializeVIntArray(byte[] b, int length) { - ByteArrayInputStream bytesIn = new ByteArrayInputStream(b); - DataInputStream in = new DataInputStream(bytesIn); - try { - return deserializeVIntArray(in,length); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - /** - * Concatenate together one or more byte arrays - * @param first first byte array - * @param rest rest of byte arrays - * @return newly allocated byte array that is a concatenation of all the byte arrays passed in - */ - public static byte[] concat(byte[] first, byte[]... rest) { - int totalLength = first.length; - for (byte[] array : rest) { - totalLength += array.length; - } - byte[] result = Arrays.copyOf(first, totalLength); - int offset = first.length; - for (byte[] array : rest) { - System.arraycopy(array, 0, result, offset, array.length); - offset += array.length; - } - return result; + /** + * Serialize an array of byte arrays into a single byte array. Used to pass through a set of bytes + * arrays as an attribute of a Scan. Use {@link #toByteArrays(byte[], int)} to convert the + * serialized byte array back to the array of byte arrays. + * @param byteArrays the array of byte arrays to serialize + * @return the byte array + */ + public static byte[] toBytes(byte[][] byteArrays) { + int size = 0; + for (byte[] b : byteArrays) { + if (b == null) { + size++; + } else { + size += b.length; + size += WritableUtils.getVIntSize(b.length); + } } - - public static T[] concat(T[] first, T[]... rest) { - int totalLength = first.length; - for (T[] array : rest) { - totalLength += array.length; + TrustedByteArrayOutputStream bytesOut = new TrustedByteArrayOutputStream(size); + DataOutputStream out = new DataOutputStream(bytesOut); + try { + for (byte[] b : byteArrays) { + if (b == null) { + WritableUtils.writeVInt(out, 0); + } else { + WritableUtils.writeVInt(out, b.length); + out.write(b); } - T[] result = Arrays.copyOf(first, totalLength); - int offset = first.length; - for (T[] array : rest) { - System.arraycopy(array, 0, result, offset, array.length); - offset += array.length; - } - return result; + } + } catch (IOException e) { + throw new RuntimeException(e); // not possible + } finally { + try { + out.close(); + } catch (IOException e) { + throw new RuntimeException(e); // not possible + } } - - public static byte[] concat(SortOrder sortOrder, ImmutableBytesWritable... writables) { - Preconditions.checkNotNull(sortOrder); - int totalLength = 0; - for (ImmutableBytesWritable writable : writables) { - totalLength += writable.getLength(); + return bytesOut.getBuffer(); + } + + /** + * Deserialize a byte array into a set of byte arrays. Used in coprocessor to reconstruct byte + * arrays from attribute value passed through the Scan. + * @param b byte array containing serialized byte arrays (created by + * {@link #toBytes(byte[][])}). + * @param length number of byte arrays that were serialized + * @return array of now deserialized byte arrays + * @throws IllegalStateException if there are more than length number of byte arrays that were + * serialized + */ + public static byte[][] toByteArrays(byte[] b, int length) { + return toByteArrays(b, 0, length); + } + + public static byte[][] toByteArrays(byte[] b, int offset, int length) { + ByteArrayInputStream bytesIn = new ByteArrayInputStream(b, offset, b.length - offset); + DataInputStream in = new DataInputStream(bytesIn); + byte[][] byteArrays = new byte[length][]; + try { + for (int i = 0; i < length; i++) { + int bLength = WritableUtils.readVInt(in); + if (bLength == 0) { + byteArrays[i] = null; + } else { + byteArrays[i] = new byte[bLength]; + int rLength = in.read(byteArrays[i], 0, bLength); + assert (rLength == bLength); // For find bugs } - byte[] result = new byte[totalLength]; - int totalOffset = 0; - for (ImmutableBytesWritable array : writables) { - byte[] bytes = array.get(); - int offset = array.getOffset(); - if (sortOrder == SortOrder.DESC) { - bytes = SortOrder.invert(bytes, offset, new byte[array.getLength()], 0, array.getLength()); - offset = 0; - } - System.arraycopy(bytes, offset, result, totalOffset, array.getLength()); - totalOffset += array.getLength(); - } - return result; - } - - public static int vintFromBytes(byte[] buffer, int offset) { - return (int)Bytes.readAsVLong(buffer, offset); - } - - /** - * Decode a vint from the buffer pointed at to by ptr and - * increment the offset of the ptr by the length of the - * vint. - * @param ptr a pointer to a byte array buffer - * @return the decoded vint value as an int - */ - public static int vintFromBytes(ImmutableBytesWritable ptr) { - return (int) vlongFromBytes(ptr); - } - - /** - * Decode a vint from the buffer pointed at to by ptr and - * increment the offset of the ptr by the length of the - * vint. - * @param ptr a pointer to a byte array buffer - * @return the decoded vint value as a long - */ - public static long vlongFromBytes(ImmutableBytesWritable ptr) { - final byte [] buffer = ptr.get(); - final int offset = ptr.getOffset(); - byte firstByte = buffer[offset]; - int len = WritableUtils.decodeVIntSize(firstByte); - if (len == 1) { - ptr.set(buffer, offset+1, ptr.getLength()); - return firstByte; - } - long i = 0; - for (int idx = 0; idx < len-1; idx++) { - byte b = buffer[offset + 1 + idx]; - i = i << 8; - i = i | (b & 0xFF); - } - ptr.set(buffer, offset+len, ptr.getLength()); - return (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); - } - - - /** - * Put long as variable length encoded number at the offset in the result byte array - * @param vint Integer to make a vint of. - * @param result buffer to put vint into - * @return Vint length in bytes of vint - */ - public static int vintToBytes(byte[] result, int offset, final long vint) { - long i = vint; - if (i >= -112 && i <= 127) { - result[offset] = (byte) i; - return 1; } - - int len = -112; - if (i < 0) { - i ^= -1L; // take one's complement' - len = -120; + if (in.read() != -1) { + throw new IllegalStateException("Expected only " + length + " byte arrays, but found more"); } - - long tmp = i; - while (tmp != 0) { - tmp = tmp >> 8; - len--; - } - - result[offset++] = (byte) len; - - len = (len < -120) ? -(len + 120) : -(len + 112); - - for (int idx = len; idx != 0; idx--) { - int shiftbits = (idx - 1) * 8; - long mask = 0xFFL << shiftbits; - result[offset++] = (byte)((i & mask) >> shiftbits); + return byteArrays; + } catch (IOException e) { + throw new RuntimeException(e); // not possible + } finally { + try { + in.close(); + } catch (IOException e) { + throw new RuntimeException(e); // not possible } - return len + 1; - } - - /** - * Increment the key to the next key - * @param key the key to increment - * @return a new byte array with the next key or null - * if the key could not be incremented because it's - * already at its max value. - */ - public static byte[] nextKey(byte[] key) { - byte[] nextStartRow = new byte[key.length]; - System.arraycopy(key, 0, nextStartRow, 0, key.length); - if (!nextKey(nextStartRow, nextStartRow.length)) { - return null; - } - return nextStartRow; - } - - /** - * Increment the key in-place to the next key - * @param key the key to increment - * @param length the length of the key - * @return true if the key can be incremented and - * false otherwise if the key is at its max - * value. - */ - public static boolean nextKey(byte[] key, int length) { - return nextKey(key, 0, length); - } - - public static boolean nextKey(byte[] key, int offset, int length) { - if (length == 0) { - return false; - } - int i = offset + length - 1; - while (key[i] == -1) { - key[i] = 0; - i--; - if (i < offset) { - // Change bytes back to the way they were - do { - key[++i] = -1; - } while (i < offset + length - 1); - return false; - } - } - key[i] = (byte)(key[i] + 1); - return true; - } - - public static byte[] previousKey(byte[] key) { - byte[] previousKey = new byte[key.length]; - System.arraycopy(key, 0, previousKey, 0, key.length); - if (!previousKey(previousKey, previousKey.length)) { - return null; - } - return previousKey; - } - - /** - * Best attempt to generate largest rowkey smaller than endKey i.e. largest rowkey in the - * range of [startKey, endKey). If startKey and endKey are empty, the empty key is returned. - * This function is used to return valid rowkey for some ungrouped aggregation e.g. while - * returning count value after scanning the rows. If any error or validation issues (e.g. - * startKey > endKey) are encountered, null value is returned. - * - * @param startKey start rowkey for the range. - * @param endKey end rowkey for the range. - * @return best attempt of largest rowkey in the range of [startKey, endKey). - */ - public static byte[] getLargestPossibleRowKeyInRange(byte[] startKey, byte[] endKey) { - if (startKey.length == 0 && endKey.length == 0) { - return HConstants.EMPTY_END_ROW; - } - byte[] rowKey; - try { - if (startKey.length > 0 && endKey.length > 0) { - int commonBytesIdx = 0; - while (commonBytesIdx < startKey.length && commonBytesIdx < endKey.length) { - if (startKey[commonBytesIdx] == endKey[commonBytesIdx]) { - commonBytesIdx++; - } else { - break; - } - } - if (commonBytesIdx == 0) { - rowKey = ByteUtil.previousKeyWithLength(ByteUtil.concat(endKey, - new byte[startKey.length + 1]), - Math.max(endKey.length, startKey.length) + 1); - } else { - byte[] newStartKey; - byte[] newEndKey; - if (commonBytesIdx < startKey.length) { - newStartKey = new byte[startKey.length - commonBytesIdx]; - System.arraycopy(startKey, commonBytesIdx, newStartKey, 0, - newStartKey.length); - } else { - newStartKey = startKey; - } - if (commonBytesIdx < endKey.length) { - newEndKey = new byte[endKey.length - commonBytesIdx]; - System.arraycopy(endKey, commonBytesIdx, newEndKey, 0, newEndKey.length); - } else { - newEndKey = endKey; - } - byte[] commonBytes = new byte[commonBytesIdx]; - System.arraycopy(startKey, 0, commonBytes, 0, commonBytesIdx); - byte[] tmpRowKey = ByteUtil.previousKeyWithLength(ByteUtil.concat(newEndKey, - new byte[newStartKey.length + 1]), - Math.max(newEndKey.length, newStartKey.length) + 1); - // tmpRowKey can be null if newEndKey has only \x00 bytes - if (tmpRowKey == null) { - tmpRowKey = new byte[newEndKey.length - 1]; - System.arraycopy(newEndKey, 0, tmpRowKey, 0, tmpRowKey.length); - rowKey = ByteUtil.concat(commonBytes, tmpRowKey); - } else { - rowKey = ByteUtil.concat(commonBytes, tmpRowKey); - } - } - } else if (endKey.length > 0) { - rowKey = ByteUtil.previousKeyWithLength(ByteUtil.concat(endKey, - new byte[1]), endKey.length + 1); - } else { - rowKey = ByteUtil.nextKeyWithLength(ByteUtil.concat(startKey, - new byte[1]), startKey.length + 1); - } - if (rowKey == null) { - LOGGER.error("Unexpected result while retrieving rowkey in range ({} , {})", - Bytes.toStringBinary(startKey), Bytes.toStringBinary(endKey)); - return null; - } - if (Bytes.compareTo(startKey, rowKey) >= 0 - || Bytes.compareTo(rowKey, endKey) >= 0) { - LOGGER.error("Unexpected result while comparing result rowkey in range " - + "({} , {}) , rowKey: {}", - Bytes.toStringBinary(startKey), Bytes.toStringBinary(endKey), - Bytes.toStringBinary(rowKey)); - return null; - } - } catch (Exception e) { - LOGGER.error("Error while retrieving rowkey in range ({} , {})", - Bytes.toStringBinary(startKey), Bytes.toStringBinary(endKey)); - return null; - } - return rowKey; - } - - public static byte[] previousKeyWithLength(byte[] key, int length) { - Preconditions.checkArgument(key.length >= length, "Key length " + key.length + " is " - + "less than least expected length " + length); - byte[] previousKey = new byte[length]; - System.arraycopy(key, 0, previousKey, 0, length); - if (!previousKey(previousKey, length)) { - return null; - } - return previousKey; - } - - public static byte[] nextKeyWithLength(byte[] key, int length) { - Preconditions.checkArgument(key.length >= length, "Key length " + key.length + " is " - + "less than least expected length " + length); - byte[] nextStartRow = new byte[length]; - System.arraycopy(key, 0, nextStartRow, 0, length); - if (!nextKey(nextStartRow, length)) { - return null; - } - return nextStartRow; - } - - public static boolean previousKey(byte[] key, int length) { - return previousKey(key, 0, length); - } - - public static boolean previousKey(byte[] key, int offset, int length) { - if (length == 0) { - return false; - } - int i = offset + length - 1; - while (key[i] == 0) { - key[i] = -1; - i--; - if (i < offset) { - // Change bytes back to the way they were - do { - key[++i] = 0; - } while (i < offset + length - 1); - return false; - } - } - key[i] = (byte)(key[i] - 1); - return true; } - - /** - * Expand the key to length bytes using a null byte. - */ - public static byte[] fillKey(byte[] key, int length) { - if(key.length > length) { - throw new IllegalStateException(); - } - if (key.length == length) { - return key; - } - byte[] newBound = new byte[length]; - System.arraycopy(key, 0, newBound, 0, key.length); - return newBound; + } + + public static byte[] serializeVIntArray(int[] intArray) { + return serializeVIntArray(intArray, intArray.length); + } + + public static byte[] serializeVIntArray(int[] intArray, int encodedLength) { + int size = WritableUtils.getVIntSize(encodedLength); + for (int i = 0; i < intArray.length; i++) { + size += WritableUtils.getVIntSize(intArray[i]); + } + int offset = 0; + byte[] out = new byte[size]; + offset += ByteUtil.vintToBytes(out, offset, size); + for (int i = 0; i < intArray.length; i++) { + offset += ByteUtil.vintToBytes(out, offset, intArray[i]); + } + return out; + } + + public static void serializeVIntArray(DataOutput output, int[] intArray) throws IOException { + serializeVIntArray(output, intArray, intArray.length); + } + + /** + * Allows additional stuff to be encoded in length + */ + public static void serializeVIntArray(DataOutput output, int[] intArray, int encodedLength) + throws IOException { + WritableUtils.writeVInt(output, encodedLength); + for (int i = 0; i < intArray.length; i++) { + WritableUtils.writeVInt(output, intArray[i]); + } + } + + public static long[] readFixedLengthLongArray(DataInput input, int length) throws IOException { + long[] longArray = new long[length]; + for (int i = 0; i < length; i++) { + longArray[i] = input.readLong(); + } + return longArray; + } + + public static void writeFixedLengthLongArray(DataOutput output, long[] longArray) + throws IOException { + for (int i = 0; i < longArray.length; i++) { + output.writeLong(longArray[i]); + } + } + + /** + * Deserialize a byte array into a int array. + * @param b byte array storing serialized vints + * @return int array + */ + public static int[] deserializeVIntArray(byte[] b) { + ByteArrayInputStream bytesIn = new ByteArrayInputStream(b); + DataInputStream in = new DataInputStream(bytesIn); + try { + int length = WritableUtils.readVInt(in); + return deserializeVIntArray(in, length); + } catch (IOException e) { + throw new RuntimeException(e); // not possible + } finally { + try { + in.close(); + } catch (IOException e) { + throw new RuntimeException(e); // not possible + } } - - public static boolean isEmptyOrNull(byte[] b, int offset, int length) { - if ((b == null) || (Bytes.compareTo(b, HConstants.EMPTY_BYTE_ARRAY) == 0 )) { - return true; - } - if (b.length < offset + length) { - throw new IllegalStateException(); - } - boolean result = true; - for (int i = (offset + length) - 1; i >= offset; i--) { - if (b[i] != 0x0) { - result = false; - break; - } - } - return result; + } + + public static int[] deserializeVIntArray(DataInput in) throws IOException { + return deserializeVIntArray(in, WritableUtils.readVInt(in)); + } + + public static int[] deserializeVIntArray(DataInput in, int length) throws IOException { + int i = 0; + int[] intArray = new int[length]; + while (i < length) { + intArray[i++] = WritableUtils.readVInt(in); + } + return intArray; + } + + /** + * Deserialize a byte array into a int array. + * @param b byte array storing serialized vints + * @param length number of serialized vints + * @return int array + */ + public static int[] deserializeVIntArray(byte[] b, int length) { + ByteArrayInputStream bytesIn = new ByteArrayInputStream(b); + DataInputStream in = new DataInputStream(bytesIn); + try { + return deserializeVIntArray(in, length); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Concatenate together one or more byte arrays + * @param first first byte array + * @param rest rest of byte arrays + * @return newly allocated byte array that is a concatenation of all the byte arrays passed in + */ + public static byte[] concat(byte[] first, byte[]... rest) { + int totalLength = first.length; + for (byte[] array : rest) { + totalLength += array.length; + } + byte[] result = Arrays.copyOf(first, totalLength); + int offset = first.length; + for (byte[] array : rest) { + System.arraycopy(array, 0, result, offset, array.length); + offset += array.length; + } + return result; + } + + public static T[] concat(T[] first, T[]... rest) { + int totalLength = first.length; + for (T[] array : rest) { + totalLength += array.length; + } + T[] result = Arrays.copyOf(first, totalLength); + int offset = first.length; + for (T[] array : rest) { + System.arraycopy(array, 0, result, offset, array.length); + offset += array.length; + } + return result; + } + + public static byte[] concat(SortOrder sortOrder, ImmutableBytesWritable... writables) { + Preconditions.checkNotNull(sortOrder); + int totalLength = 0; + for (ImmutableBytesWritable writable : writables) { + totalLength += writable.getLength(); + } + byte[] result = new byte[totalLength]; + int totalOffset = 0; + for (ImmutableBytesWritable array : writables) { + byte[] bytes = array.get(); + int offset = array.getOffset(); + if (sortOrder == SortOrder.DESC) { + bytes = SortOrder.invert(bytes, offset, new byte[array.getLength()], 0, array.getLength()); + offset = 0; + } + System.arraycopy(bytes, offset, result, totalOffset, array.getLength()); + totalOffset += array.getLength(); + } + return result; + } + + public static int vintFromBytes(byte[] buffer, int offset) { + return (int) Bytes.readAsVLong(buffer, offset); + } + + /** + * Decode a vint from the buffer pointed at to by ptr and increment the offset of the ptr by the + * length of the vint. + * @param ptr a pointer to a byte array buffer + * @return the decoded vint value as an int + */ + public static int vintFromBytes(ImmutableBytesWritable ptr) { + return (int) vlongFromBytes(ptr); + } + + /** + * Decode a vint from the buffer pointed at to by ptr and increment the offset of the ptr by the + * length of the vint. + * @param ptr a pointer to a byte array buffer + * @return the decoded vint value as a long + */ + public static long vlongFromBytes(ImmutableBytesWritable ptr) { + final byte[] buffer = ptr.get(); + final int offset = ptr.getOffset(); + byte firstByte = buffer[offset]; + int len = WritableUtils.decodeVIntSize(firstByte); + if (len == 1) { + ptr.set(buffer, offset + 1, ptr.getLength()); + return firstByte; + } + long i = 0; + for (int idx = 0; idx < len - 1; idx++) { + byte b = buffer[offset + 1 + idx]; + i = i << 8; + i = i | (b & 0xFF); + } + ptr.set(buffer, offset + len, ptr.getLength()); + return (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); + } + + /** + * Put long as variable length encoded number at the offset in the result byte array + * @param vint Integer to make a vint of. + * @param result buffer to put vint into + * @return Vint length in bytes of vint + */ + public static int vintToBytes(byte[] result, int offset, final long vint) { + long i = vint; + if (i >= -112 && i <= 127) { + result[offset] = (byte) i; + return 1; + } + + int len = -112; + if (i < 0) { + i ^= -1L; // take one's complement' + len = -120; + } + + long tmp = i; + while (tmp != 0) { + tmp = tmp >> 8; + len--; + } + + result[offset++] = (byte) len; + + len = (len < -120) ? -(len + 120) : -(len + 112); + + for (int idx = len; idx != 0; idx--) { + int shiftbits = (idx - 1) * 8; + long mask = 0xFFL << shiftbits; + result[offset++] = (byte) ((i & mask) >> shiftbits); + } + return len + 1; + } + + /** + * Increment the key to the next key + * @param key the key to increment + * @return a new byte array with the next key or null if the key could not be incremented because + * it's already at its max value. + */ + public static byte[] nextKey(byte[] key) { + byte[] nextStartRow = new byte[key.length]; + System.arraycopy(key, 0, nextStartRow, 0, key.length); + if (!nextKey(nextStartRow, nextStartRow.length)) { + return null; + } + return nextStartRow; + } + + /** + * Increment the key in-place to the next key + * @param key the key to increment + * @param length the length of the key + * @return true if the key can be incremented and false otherwise if the key is at its max value. + */ + public static boolean nextKey(byte[] key, int length) { + return nextKey(key, 0, length); + } + + public static boolean nextKey(byte[] key, int offset, int length) { + if (length == 0) { + return false; + } + int i = offset + length - 1; + while (key[i] == -1) { + key[i] = 0; + i--; + if (i < offset) { + // Change bytes back to the way they were + do { + key[++i] = -1; + } while (i < offset + length - 1); + return false; + } } - - /** - * Expand the key to length bytes using the fillByte to fill the - * bytes beyond the current key length. - */ - public static void nullPad(ImmutableBytesWritable ptr, int length) { - if(ptr.getLength() > length) { - throw new IllegalStateException(); - } - if (ptr.getLength() == length) { - return; - } - byte[] newBound = new byte[length]; - System.arraycopy(ptr.get(), ptr.getOffset(), newBound, 0, ptr.getLength()); - ptr.set(newBound); - } - - /** - * Get the size in bytes of the UTF-8 encoded CharSequence - * @param sequence the CharSequence - */ - public static int getSize(CharSequence sequence) { - int count = 0; - for (int i = 0, len = sequence.length(); i < len; i++) { - char ch = sequence.charAt(i); - if (ch <= 0x7F) { - count++; - } else if (ch <= 0x7FF) { - count += 2; - } else if (Character.isHighSurrogate(ch)) { - count += 4; - ++i; + key[i] = (byte) (key[i] + 1); + return true; + } + + public static byte[] previousKey(byte[] key) { + byte[] previousKey = new byte[key.length]; + System.arraycopy(key, 0, previousKey, 0, key.length); + if (!previousKey(previousKey, previousKey.length)) { + return null; + } + return previousKey; + } + + /** + * Best attempt to generate largest rowkey smaller than endKey i.e. largest rowkey in the range of + * [startKey, endKey). If startKey and endKey are empty, the empty key is returned. This function + * is used to return valid rowkey for some ungrouped aggregation e.g. while returning count value + * after scanning the rows. If any error or validation issues (e.g. startKey > endKey) are + * encountered, null value is returned. + * @param startKey start rowkey for the range. + * @param endKey end rowkey for the range. + * @return best attempt of largest rowkey in the range of [startKey, endKey). + */ + public static byte[] getLargestPossibleRowKeyInRange(byte[] startKey, byte[] endKey) { + if (startKey.length == 0 && endKey.length == 0) { + return HConstants.EMPTY_END_ROW; + } + byte[] rowKey; + try { + if (startKey.length > 0 && endKey.length > 0) { + int commonBytesIdx = 0; + while (commonBytesIdx < startKey.length && commonBytesIdx < endKey.length) { + if (startKey[commonBytesIdx] == endKey[commonBytesIdx]) { + commonBytesIdx++; } else { - count += 3; + break; } } - return count; - } - - public static boolean isInclusive(CompareOperator op) { - switch (op) { - case LESS: - case GREATER: - return false; - case EQUAL: - case NOT_EQUAL: - case LESS_OR_EQUAL: - case GREATER_OR_EQUAL: - return true; - default: - throw new RuntimeException("Unknown Compare op " + op.name()); + if (commonBytesIdx == 0) { + rowKey = + ByteUtil.previousKeyWithLength(ByteUtil.concat(endKey, new byte[startKey.length + 1]), + Math.max(endKey.length, startKey.length) + 1); + } else { + byte[] newStartKey; + byte[] newEndKey; + if (commonBytesIdx < startKey.length) { + newStartKey = new byte[startKey.length - commonBytesIdx]; + System.arraycopy(startKey, commonBytesIdx, newStartKey, 0, newStartKey.length); + } else { + newStartKey = startKey; + } + if (commonBytesIdx < endKey.length) { + newEndKey = new byte[endKey.length - commonBytesIdx]; + System.arraycopy(endKey, commonBytesIdx, newEndKey, 0, newEndKey.length); + } else { + newEndKey = endKey; + } + byte[] commonBytes = new byte[commonBytesIdx]; + System.arraycopy(startKey, 0, commonBytes, 0, commonBytesIdx); + byte[] tmpRowKey = ByteUtil.previousKeyWithLength( + ByteUtil.concat(newEndKey, new byte[newStartKey.length + 1]), + Math.max(newEndKey.length, newStartKey.length) + 1); + // tmpRowKey can be null if newEndKey has only \x00 bytes + if (tmpRowKey == null) { + tmpRowKey = new byte[newEndKey.length - 1]; + System.arraycopy(newEndKey, 0, tmpRowKey, 0, tmpRowKey.length); + rowKey = ByteUtil.concat(commonBytes, tmpRowKey); + } else { + rowKey = ByteUtil.concat(commonBytes, tmpRowKey); + } } + } else if (endKey.length > 0) { + rowKey = + ByteUtil.previousKeyWithLength(ByteUtil.concat(endKey, new byte[1]), endKey.length + 1); + } else { + rowKey = + ByteUtil.nextKeyWithLength(ByteUtil.concat(startKey, new byte[1]), startKey.length + 1); + } + if (rowKey == null) { + LOGGER.error("Unexpected result while retrieving rowkey in range ({} , {})", + Bytes.toStringBinary(startKey), Bytes.toStringBinary(endKey)); + return null; + } + if (Bytes.compareTo(startKey, rowKey) >= 0 || Bytes.compareTo(rowKey, endKey) >= 0) { + LOGGER.error( + "Unexpected result while comparing result rowkey in range " + "({} , {}) , rowKey: {}", + Bytes.toStringBinary(startKey), Bytes.toStringBinary(endKey), + Bytes.toStringBinary(rowKey)); + return null; + } + } catch (Exception e) { + LOGGER.error("Error while retrieving rowkey in range ({} , {})", + Bytes.toStringBinary(startKey), Bytes.toStringBinary(endKey)); + return null; + } + return rowKey; + } + + public static byte[] previousKeyWithLength(byte[] key, int length) { + Preconditions.checkArgument(key.length >= length, + "Key length " + key.length + " is " + "less than least expected length " + length); + byte[] previousKey = new byte[length]; + System.arraycopy(key, 0, previousKey, 0, length); + if (!previousKey(previousKey, length)) { + return null; + } + return previousKey; + } + + public static byte[] nextKeyWithLength(byte[] key, int length) { + Preconditions.checkArgument(key.length >= length, + "Key length " + key.length + " is " + "less than least expected length " + length); + byte[] nextStartRow = new byte[length]; + System.arraycopy(key, 0, nextStartRow, 0, length); + if (!nextKey(nextStartRow, length)) { + return null; + } + return nextStartRow; + } + + public static boolean previousKey(byte[] key, int length) { + return previousKey(key, 0, length); + } + + public static boolean previousKey(byte[] key, int offset, int length) { + if (length == 0) { + return false; + } + int i = offset + length - 1; + while (key[i] == 0) { + key[i] = -1; + i--; + if (i < offset) { + // Change bytes back to the way they were + do { + key[++i] = 0; + } while (i < offset + length - 1); + return false; + } } - public static boolean compare(CompareOperator op, int compareResult) { - switch (op) { - case LESS: - return compareResult < 0; - case LESS_OR_EQUAL: - return compareResult <= 0; - case EQUAL: - return compareResult == 0; - case NOT_EQUAL: - return compareResult != 0; - case GREATER_OR_EQUAL: - return compareResult >= 0; - case GREATER: - return compareResult > 0; - default: - throw new RuntimeException("Unknown Compare op " + op.name()); - } + key[i] = (byte) (key[i] - 1); + return true; + } + + /** + * Expand the key to length bytes using a null byte. + */ + public static byte[] fillKey(byte[] key, int length) { + if (key.length > length) { + throw new IllegalStateException(); + } + if (key.length == length) { + return key; + } + byte[] newBound = new byte[length]; + System.arraycopy(key, 0, newBound, 0, key.length); + return newBound; + } + + public static boolean isEmptyOrNull(byte[] b, int offset, int length) { + if ((b == null) || (Bytes.compareTo(b, HConstants.EMPTY_BYTE_ARRAY) == 0)) { + return true; + } + if (b.length < offset + length) { + throw new IllegalStateException(); + } + boolean result = true; + for (int i = (offset + length) - 1; i >= offset; i--) { + if (b[i] != 0x0) { + result = false; + break; + } } - - /** - * Given an ImmutableBytesWritable, returns the payload part of the argument as an byte array. - */ - public static byte[] copyKeyBytesIfNecessary(ImmutableBytesWritable ptr) { - if (ptr.getOffset() == 0 && ptr.getLength() == ptr.get().length) { - return ptr.get(); - } - return ptr.copyBytes(); - } - - public static KeyRange getKeyRange(byte[] key, SortOrder order, CompareOperator op, - PDataType type) { - op = order.transform(op); - switch (op) { - case EQUAL: - return type.getKeyRange(key, true, key, true, order); - case GREATER: - return type.getKeyRange(key, false, KeyRange.UNBOUND, false, order); - case GREATER_OR_EQUAL: - return type.getKeyRange(key, true, KeyRange.UNBOUND, false, order); - case LESS: - return type.getKeyRange(KeyRange.UNBOUND, false, key, false, order); - case LESS_OR_EQUAL: - return type.getKeyRange(KeyRange.UNBOUND, false, key, true, order); - default: - throw new IllegalArgumentException("Unknown operator " + op); - } + return result; + } + + /** + * Expand the key to length bytes using the fillByte to fill the bytes beyond the current key + * length. + */ + public static void nullPad(ImmutableBytesWritable ptr, int length) { + if (ptr.getLength() > length) { + throw new IllegalStateException(); + } + if (ptr.getLength() == length) { + return; + } + byte[] newBound = new byte[length]; + System.arraycopy(ptr.get(), ptr.getOffset(), newBound, 0, ptr.getLength()); + ptr.set(newBound); + } + + /** + * Get the size in bytes of the UTF-8 encoded CharSequence + * @param sequence the CharSequence + */ + public static int getSize(CharSequence sequence) { + int count = 0; + for (int i = 0, len = sequence.length(); i < len; i++) { + char ch = sequence.charAt(i); + if (ch <= 0x7F) { + count++; + } else if (ch <= 0x7FF) { + count += 2; + } else if (Character.isHighSurrogate(ch)) { + count += 4; + ++i; + } else { + count += 3; + } } + return count; + } - public static boolean contains(Collection keys, byte[] key) { - for (byte[] k : keys) { - if (Arrays.equals(k, key)) { return true; } - } + public static boolean isInclusive(CompareOperator op) { + switch (op) { + case LESS: + case GREATER: return false; + case EQUAL: + case NOT_EQUAL: + case LESS_OR_EQUAL: + case GREATER_OR_EQUAL: + return true; + default: + throw new RuntimeException("Unknown Compare op " + op.name()); + } + } + + public static boolean compare(CompareOperator op, int compareResult) { + switch (op) { + case LESS: + return compareResult < 0; + case LESS_OR_EQUAL: + return compareResult <= 0; + case EQUAL: + return compareResult == 0; + case NOT_EQUAL: + return compareResult != 0; + case GREATER_OR_EQUAL: + return compareResult >= 0; + case GREATER: + return compareResult > 0; + default: + throw new RuntimeException("Unknown Compare op " + op.name()); + } + } + + /** + * Given an ImmutableBytesWritable, returns the payload part of the argument as an byte array. + */ + public static byte[] copyKeyBytesIfNecessary(ImmutableBytesWritable ptr) { + if (ptr.getOffset() == 0 && ptr.getLength() == ptr.get().length) { + return ptr.get(); + } + return ptr.copyBytes(); + } + + public static KeyRange getKeyRange(byte[] key, SortOrder order, CompareOperator op, + PDataType type) { + op = order.transform(op); + switch (op) { + case EQUAL: + return type.getKeyRange(key, true, key, true, order); + case GREATER: + return type.getKeyRange(key, false, KeyRange.UNBOUND, false, order); + case GREATER_OR_EQUAL: + return type.getKeyRange(key, true, KeyRange.UNBOUND, false, order); + case LESS: + return type.getKeyRange(KeyRange.UNBOUND, false, key, false, order); + case LESS_OR_EQUAL: + return type.getKeyRange(KeyRange.UNBOUND, false, key, true, order); + default: + throw new IllegalArgumentException("Unknown operator " + op); + } + } + + public static boolean contains(Collection keys, byte[] key) { + for (byte[] k : keys) { + if (Arrays.equals(k, key)) { + return true; + } } + return false; + } - public static boolean contains(List keys, ImmutableBytesPtr key) { - for (ImmutableBytesPtr k : keys) { - if (key.compareTo(k) == 0) { return true; } - } - return false; + public static boolean contains(List keys, ImmutableBytesPtr key) { + for (ImmutableBytesPtr k : keys) { + if (key.compareTo(k) == 0) { + return true; + } } + return false; + } - public static boolean match(Set keys, Set keys2) { - if (keys == keys2) return true; - if (keys == null || keys2 == null) return false; + public static boolean match(Set keys, Set keys2) { + if (keys == keys2) return true; + if (keys == null || keys2 == null) return false; - int size = keys.size(); - if (keys2.size() != size) return false; - for (byte[] k : keys) { - if (!contains(keys2, k)) { return false; } - } - return true; + int size = keys.size(); + if (keys2.size() != size) return false; + for (byte[] k : keys) { + if (!contains(keys2, k)) { + return false; + } } + return true; + } - public static byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) { - // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually. - // Search for the place where the trailing 0xFFs start - int offset = rowKeyPrefix.length; - while (offset > 0) { - if (rowKeyPrefix[offset - 1] != (byte) 0xFF) { - break; - } - offset--; - } - if (offset == 0) { - // We got an 0xFFFF... (only FFs) stopRow value which is - // the last possible prefix before the end of the table. - // So set it to stop at the 'end of the table' - return HConstants.EMPTY_END_ROW; - } - // Copy the right length of the original - byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset); - // And increment the last one - newStopRow[newStopRow.length - 1]++; - return newStopRow; - } - - public static byte[][] splitArrayBySeparator(byte[] src, byte separator){ - List separatorLocations = new ArrayList(); - for (int k = 0; k < src.length; k++){ - if (src[k] == separator){ - separatorLocations.add(k); - } - } - byte[][] dst = new byte[separatorLocations.size() +1][]; - int previousSepartor = -1; - for (int j = 0; j < separatorLocations.size(); j++){ - int separatorLocation = separatorLocations.get(j); - dst[j] = Bytes.copy(src, previousSepartor +1, separatorLocation- previousSepartor -1); - previousSepartor = separatorLocation; - } - if (previousSepartor < src.length){ - dst[separatorLocations.size()] = Bytes.copy(src, - previousSepartor +1, src.length - previousSepartor -1); - } - return dst; + public static byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) { + // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually. + // Search for the place where the trailing 0xFFs start + int offset = rowKeyPrefix.length; + while (offset > 0) { + if (rowKeyPrefix[offset - 1] != (byte) 0xFF) { + break; + } + offset--; + } + if (offset == 0) { + // We got an 0xFFFF... (only FFs) stopRow value which is + // the last possible prefix before the end of the table. + // So set it to stop at the 'end of the table' + return HConstants.EMPTY_END_ROW; + } + // Copy the right length of the original + byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset); + // And increment the last one + newStopRow[newStopRow.length - 1]++; + return newStopRow; + } + + public static byte[][] splitArrayBySeparator(byte[] src, byte separator) { + List separatorLocations = new ArrayList(); + for (int k = 0; k < src.length; k++) { + if (src[k] == separator) { + separatorLocations.add(k); + } } - - // Adapted from the Commons Codec BinaryCodec, but treat the input as a byte sequence, without - // the endinanness reversion in the original code - public static byte[] fromAscii(final char[] ascii) { - if (ascii == null || ascii.length == 0) { - return EMPTY_BYTE_ARRAY; + byte[][] dst = new byte[separatorLocations.size() + 1][]; + int previousSepartor = -1; + for (int j = 0; j < separatorLocations.size(); j++) { + int separatorLocation = separatorLocations.get(j); + dst[j] = Bytes.copy(src, previousSepartor + 1, separatorLocation - previousSepartor - 1); + previousSepartor = separatorLocation; + } + if (previousSepartor < src.length) { + dst[separatorLocations.size()] = + Bytes.copy(src, previousSepartor + 1, src.length - previousSepartor - 1); + } + return dst; + } + + // Adapted from the Commons Codec BinaryCodec, but treat the input as a byte sequence, without + // the endinanness reversion in the original code + public static byte[] fromAscii(final char[] ascii) { + if (ascii == null || ascii.length == 0) { + return EMPTY_BYTE_ARRAY; + } + final int asciiLength = ascii.length; + // get length/8 times bytes with 3 bit shifts to the right of the length + final byte[] l_raw = new byte[asciiLength >> 3]; + // We incr index jj by 8 as we go along to not recompute indices using multiplication every + // time inside the loop. + for (int ii = 0, jj = 0; ii < l_raw.length; ii++, jj += 8) { + for (int bits = 0; bits < BITS.length; ++bits) { + if (ascii[jj + bits] == '1') { + l_raw[ii] |= BITS[bits]; } - final int asciiLength = ascii.length; - // get length/8 times bytes with 3 bit shifts to the right of the length - final byte[] l_raw = new byte[asciiLength >> 3]; - // We incr index jj by 8 as we go along to not recompute indices using multiplication every - // time inside the loop. - for (int ii = 0, jj = 0; ii < l_raw.length; ii++, jj += 8) { - for (int bits = 0; bits < BITS.length; ++bits) { - if (ascii[jj + bits] == '1') { - l_raw[ii] |= BITS[bits]; - } - } - } - return l_raw; + } } + return l_raw; + } - /** - * Create the closest row after the specified row - */ - public static byte[] closestPossibleRowAfter(byte[] row) { - return Arrays.copyOf(row, row.length + 1); - } + /** + * Create the closest row after the specified row + */ + public static byte[] closestPossibleRowAfter(byte[] row) { + return Arrays.copyOf(row, row.length + 1); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CDCChangeBuilder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CDCChangeBuilder.java index 4bd2567ddf4..916917fc248 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CDCChangeBuilder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CDCChangeBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,17 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; -import org.apache.hadoop.hbase.Cell; -import org.apache.phoenix.index.CDCTableInfo; -import org.apache.phoenix.schema.PTable; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - import static org.apache.phoenix.query.QueryConstants.CDC_CHANGE_IMAGE; import static org.apache.phoenix.query.QueryConstants.CDC_DELETE_EVENT_TYPE; import static org.apache.phoenix.query.QueryConstants.CDC_EVENT_TYPE; @@ -33,119 +24,125 @@ import static org.apache.phoenix.query.QueryConstants.CDC_PRE_IMAGE; import static org.apache.phoenix.query.QueryConstants.CDC_UPSERT_EVENT_TYPE; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.hbase.Cell; +import org.apache.phoenix.index.CDCTableInfo; +import org.apache.phoenix.schema.PTable; + public class CDCChangeBuilder { - private final boolean isChangeImageInScope; - private final boolean isPreImageInScope; - private final boolean isPostImageInScope; - private final CDCTableInfo cdcDataTableInfo; - private String changeType; - private long lastDeletedTimestamp; - private long changeTimestamp; - private Map preImage = null; - private Map changeImage = null; - - public CDCChangeBuilder(CDCTableInfo cdcDataTableInfo) { - this.cdcDataTableInfo = cdcDataTableInfo; - Set changeScopes = cdcDataTableInfo.getIncludeScopes(); - isChangeImageInScope = changeScopes.contains(PTable.CDCChangeScope.CHANGE); - isPreImageInScope = changeScopes.contains(PTable.CDCChangeScope.PRE); - isPostImageInScope = changeScopes.contains(PTable.CDCChangeScope.POST); + private final boolean isChangeImageInScope; + private final boolean isPreImageInScope; + private final boolean isPostImageInScope; + private final CDCTableInfo cdcDataTableInfo; + private String changeType; + private long lastDeletedTimestamp; + private long changeTimestamp; + private Map preImage = null; + private Map changeImage = null; + + public CDCChangeBuilder(CDCTableInfo cdcDataTableInfo) { + this.cdcDataTableInfo = cdcDataTableInfo; + Set changeScopes = cdcDataTableInfo.getIncludeScopes(); + isChangeImageInScope = changeScopes.contains(PTable.CDCChangeScope.CHANGE); + isPreImageInScope = changeScopes.contains(PTable.CDCChangeScope.PRE); + isPostImageInScope = changeScopes.contains(PTable.CDCChangeScope.POST); + } + + public void initChange(long ts) { + changeTimestamp = ts; + changeType = null; + lastDeletedTimestamp = 0L; + if (isPreImageInScope || isPostImageInScope) { + preImage = new HashMap<>(); } - - public void initChange(long ts) { - changeTimestamp = ts; - changeType = null; - lastDeletedTimestamp = 0L; - if (isPreImageInScope || isPostImageInScope) { - preImage = new HashMap<>(); - } - if (isChangeImageInScope || isPostImageInScope) { - changeImage = new HashMap<>(); - } + if (isChangeImageInScope || isPostImageInScope) { + changeImage = new HashMap<>(); } + } - public long getChangeTimestamp() { - return changeTimestamp; - } + public long getChangeTimestamp() { + return changeTimestamp; + } - public boolean isDeletionEvent() { - return changeType == CDC_DELETE_EVENT_TYPE; - } + public boolean isDeletionEvent() { + return changeType == CDC_DELETE_EVENT_TYPE; + } - public boolean isNonEmptyEvent() { - return changeType != null; - } + public boolean isNonEmptyEvent() { + return changeType != null; + } - public void markAsDeletionEvent() { - changeType = CDC_DELETE_EVENT_TYPE; - } + public void markAsDeletionEvent() { + changeType = CDC_DELETE_EVENT_TYPE; + } - public long getLastDeletedTimestamp() { - return lastDeletedTimestamp; - } + public long getLastDeletedTimestamp() { + return lastDeletedTimestamp; + } - public void setLastDeletedTimestamp(long lastDeletedTimestamp) { - this.lastDeletedTimestamp = lastDeletedTimestamp; - } + public void setLastDeletedTimestamp(long lastDeletedTimestamp) { + this.lastDeletedTimestamp = lastDeletedTimestamp; + } - public boolean isChangeRelevant(Cell cell) { - if (cell.getTimestamp() > changeTimestamp) { - return false; - } - if (cell.getType() != Cell.Type.DeleteFamily && !isOlderThanChange(cell) && - isDeletionEvent()) { - // We don't need to build the change image in this case. - return false; - } - return true; + public boolean isChangeRelevant(Cell cell) { + if (cell.getTimestamp() > changeTimestamp) { + return false; } - - public void registerChange(Cell cell, int columnNum, Object value) { - if (!isChangeRelevant(cell)) { - return; - } - CDCTableInfo.CDCColumnInfo columnInfo = - cdcDataTableInfo.getColumnInfoList().get(columnNum); - String cdcColumnName = columnInfo.getColumnDisplayName(cdcDataTableInfo); - if (isOlderThanChange(cell)) { - if ((isPreImageInScope || isPostImageInScope) && - !preImage.containsKey(cdcColumnName)) { - preImage.put(cdcColumnName, value); - } - } else if (cell.getTimestamp() == changeTimestamp) { - assert !isDeletionEvent() : "Not expected to find a change for delete event"; - changeType = CDC_UPSERT_EVENT_TYPE; - if (isChangeImageInScope || isPostImageInScope) { - changeImage.put(cdcColumnName, value); - } - } + if (cell.getType() != Cell.Type.DeleteFamily && !isOlderThanChange(cell) && isDeletionEvent()) { + // We don't need to build the change image in this case. + return false; } + return true; + } - public Map buildCDCEvent() { - assert (changeType != null) : "Not expected when no event was detected"; - Map cdcChange = new HashMap<>(); - if (isPreImageInScope) { - cdcChange.put(CDC_PRE_IMAGE, preImage); - } - if (changeType == CDC_UPSERT_EVENT_TYPE) { - if (isChangeImageInScope) { - cdcChange.put(CDC_CHANGE_IMAGE, changeImage); - } - if (isPostImageInScope) { - Map postImage = new HashMap<>(); - if (!isDeletionEvent()) { - postImage.putAll(preImage); - postImage.putAll(changeImage); - } - cdcChange.put(CDC_POST_IMAGE, postImage); - } - } - cdcChange.put(CDC_EVENT_TYPE, changeType); - return cdcChange; + public void registerChange(Cell cell, int columnNum, Object value) { + if (!isChangeRelevant(cell)) { + return; + } + CDCTableInfo.CDCColumnInfo columnInfo = cdcDataTableInfo.getColumnInfoList().get(columnNum); + String cdcColumnName = columnInfo.getColumnDisplayName(cdcDataTableInfo); + if (isOlderThanChange(cell)) { + if ((isPreImageInScope || isPostImageInScope) && !preImage.containsKey(cdcColumnName)) { + preImage.put(cdcColumnName, value); + } + } else if (cell.getTimestamp() == changeTimestamp) { + assert !isDeletionEvent() : "Not expected to find a change for delete event"; + changeType = CDC_UPSERT_EVENT_TYPE; + if (isChangeImageInScope || isPostImageInScope) { + changeImage.put(cdcColumnName, value); + } } + } - public boolean isOlderThanChange(Cell cell) { - return (cell.getTimestamp() < changeTimestamp && - cell.getTimestamp() > lastDeletedTimestamp) ? true : false; + public Map buildCDCEvent() { + assert (changeType != null) : "Not expected when no event was detected"; + Map cdcChange = new HashMap<>(); + if (isPreImageInScope) { + cdcChange.put(CDC_PRE_IMAGE, preImage); + } + if (changeType == CDC_UPSERT_EVENT_TYPE) { + if (isChangeImageInScope) { + cdcChange.put(CDC_CHANGE_IMAGE, changeImage); + } + if (isPostImageInScope) { + Map postImage = new HashMap<>(); + if (!isDeletionEvent()) { + postImage.putAll(preImage); + postImage.putAll(changeImage); + } + cdcChange.put(CDC_POST_IMAGE, postImage); + } } + cdcChange.put(CDC_EVENT_TYPE, changeType); + return cdcChange; + } + + public boolean isOlderThanChange(Cell cell) { + return (cell.getTimestamp() < changeTimestamp && cell.getTimestamp() > lastDeletedTimestamp) + ? true + : false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CDCUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CDCUtil.java index 6e87121ef9f..852cf9ed2df 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CDCUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CDCUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; import java.sql.SQLException; @@ -38,116 +37,107 @@ import org.bson.RawBsonDocument; public class CDCUtil { - public static final String CDC_INDEX_PREFIX = "PHOENIX_CDC_INDEX"; + public static final String CDC_INDEX_PREFIX = "PHOENIX_CDC_INDEX"; - /** - * Make a set of CDC change scope enums from the given string containing comma separated scope - * names. - * - * @param includeScopes Comma-separated scope names. - * @return the set of enums, which can be empty if the string is empty or has no valid names. - */ - public static Set makeChangeScopeEnumsFromString(String includeScopes) - throws SQLException { - Set cdcChangeScopes = new HashSet<>(); - if (includeScopes != null) { - StringTokenizer st = new StringTokenizer(includeScopes, ","); - while (st.hasMoreTokens()) { - String tok = st.nextToken(); - try { - cdcChangeScopes.add(PTable.CDCChangeScope.valueOf(tok.trim().toUpperCase())); - } - catch (IllegalArgumentException e) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.UNKNOWN_INCLUDE_CHANGE_SCOPE).setCdcChangeScope( - tok).build().buildException(); - } - } + /** + * Make a set of CDC change scope enums from the given string containing comma separated scope + * names. + * @param includeScopes Comma-separated scope names. + * @return the set of enums, which can be empty if the string is empty or has no valid names. + */ + public static Set makeChangeScopeEnumsFromString(String includeScopes) + throws SQLException { + Set cdcChangeScopes = new HashSet<>(); + if (includeScopes != null) { + StringTokenizer st = new StringTokenizer(includeScopes, ","); + while (st.hasMoreTokens()) { + String tok = st.nextToken(); + try { + cdcChangeScopes.add(PTable.CDCChangeScope.valueOf(tok.trim().toUpperCase())); + } catch (IllegalArgumentException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNKNOWN_INCLUDE_CHANGE_SCOPE) + .setCdcChangeScope(tok).build().buildException(); } - return cdcChangeScopes; + } } + return cdcChangeScopes; + } - /** - * Make a string of comma-separated scope names from the specified set of enums. - * - * @param includeScopes Set of scope enums - * @return the comma-separated string of scopes, which can be an empty string in case the set is empty. - */ - public static String makeChangeScopeStringFromEnums(Set includeScopes) { - String cdcChangeScopes = null; - if (includeScopes != null) { - Iterable tmpStream = () -> includeScopes.stream().sorted() - .map(s -> s.name()).iterator(); - cdcChangeScopes = StringUtils.join(",", tmpStream); - } - return cdcChangeScopes; + /** + * Make a string of comma-separated scope names from the specified set of enums. + * @param includeScopes Set of scope enums + * @return the comma-separated string of scopes, which can be an empty string in case the set is + * empty. + */ + public static String makeChangeScopeStringFromEnums(Set includeScopes) { + String cdcChangeScopes = null; + if (includeScopes != null) { + Iterable tmpStream = + () -> includeScopes.stream().sorted().map(s -> s.name()).iterator(); + cdcChangeScopes = StringUtils.join(",", tmpStream); } + return cdcChangeScopes; + } - public static String getCDCIndexName(String cdcName) { - return CDC_INDEX_PREFIX + SchemaUtil.getTableNameFromFullName(cdcName.toUpperCase()); - } + public static String getCDCIndexName(String cdcName) { + return CDC_INDEX_PREFIX + SchemaUtil.getTableNameFromFullName(cdcName.toUpperCase()); + } - public static boolean isCDCIndex(String indexName) { - return indexName.startsWith(CDC_INDEX_PREFIX); - } + public static boolean isCDCIndex(String indexName) { + return indexName.startsWith(CDC_INDEX_PREFIX); + } - public static boolean isCDCIndex(PTable indexTable) { - return isCDCIndex(indexTable.getTableName().getString()); - } + public static boolean isCDCIndex(PTable indexTable) { + return isCDCIndex(indexTable.getTableName().getString()); + } - public static Scan setupScanForCDC(Scan scan) { - scan.setRaw(true); - scan.readAllVersions(); - scan.setCacheBlocks(false); - Map> familyMap = scan.getFamilyMap(); - if (! familyMap.isEmpty()) { - familyMap.clear(); - } - return scan; + public static Scan setupScanForCDC(Scan scan) { + scan.setRaw(true); + scan.readAllVersions(); + scan.setCacheBlocks(false); + Map> familyMap = scan.getFamilyMap(); + if (!familyMap.isEmpty()) { + familyMap.clear(); } + return scan; + } - public static int compareCellFamilyAndQualifier(byte[] columnFamily1, - byte[] columnQual1, - byte[] columnFamily2, - byte[] columnQual2) { - int familyNameComparison = DescVarLengthFastByteComparisons.compareTo(columnFamily1, - 0, columnFamily1.length, columnFamily2, 0, columnFamily2.length); - if (familyNameComparison != 0) { - return familyNameComparison; - } - return DescVarLengthFastByteComparisons.compareTo(columnQual1, - 0, columnQual1.length, columnQual2, 0, columnQual2.length); + public static int compareCellFamilyAndQualifier(byte[] columnFamily1, byte[] columnQual1, + byte[] columnFamily2, byte[] columnQual2) { + int familyNameComparison = DescVarLengthFastByteComparisons.compareTo(columnFamily1, 0, + columnFamily1.length, columnFamily2, 0, columnFamily2.length); + if (familyNameComparison != 0) { + return familyNameComparison; } + return DescVarLengthFastByteComparisons.compareTo(columnQual1, 0, columnQual1.length, + columnQual2, 0, columnQual2.length); + } - public static Object getColumnEncodedValue(Object value, PDataType dataType) { - if (value != null) { - if (dataType.getSqlType() == PDataType.BSON_TYPE) { - value = Bytes.toBytes(((RawBsonDocument) value).getByteBuffer().asNIO()); - } else if (isBinaryType(dataType)) { - // Unfortunately, Base64.Encoder has no option to specify offset and length so can't - // avoid copying bytes. - value = Base64.getEncoder().encodeToString((byte[]) value); - } else { - int sqlType = dataType.getSqlType(); - if (sqlType == Types.DATE - || sqlType == Types.TIMESTAMP - || sqlType == Types.TIME - || sqlType == Types.TIME_WITH_TIMEZONE - || dataType.isArrayType() - || sqlType == PDataType.JSON_TYPE - || sqlType == Types.TIMESTAMP_WITH_TIMEZONE) { - value = value.toString(); - } - } + public static Object getColumnEncodedValue(Object value, PDataType dataType) { + if (value != null) { + if (dataType.getSqlType() == PDataType.BSON_TYPE) { + value = Bytes.toBytes(((RawBsonDocument) value).getByteBuffer().asNIO()); + } else if (isBinaryType(dataType)) { + // Unfortunately, Base64.Encoder has no option to specify offset and length so can't + // avoid copying bytes. + value = Base64.getEncoder().encodeToString((byte[]) value); + } else { + int sqlType = dataType.getSqlType(); + if ( + sqlType == Types.DATE || sqlType == Types.TIMESTAMP || sqlType == Types.TIME + || sqlType == Types.TIME_WITH_TIMEZONE || dataType.isArrayType() + || sqlType == PDataType.JSON_TYPE || sqlType == Types.TIMESTAMP_WITH_TIMEZONE + ) { + value = value.toString(); } - return value; + } } + return value; + } - public static boolean isBinaryType(PDataType dataType) { - int sqlType = dataType.getSqlType(); - return (sqlType == Types.BINARY - || sqlType == Types.VARBINARY - || sqlType == Types.LONGVARBINARY - || dataType.getSqlType() == PDataType.VARBINARY_ENCODED_TYPE); - } + public static boolean isBinaryType(PDataType dataType) { + int sqlType = dataType.getSqlType(); + return (sqlType == Types.BINARY || sqlType == Types.VARBINARY || sqlType == Types.LONGVARBINARY + || dataType.getSqlType() == PDataType.VARBINARY_ENCODED_TYPE); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java index b52092ae602..4dc2f4e6807 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CSVCommonsLoader.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,275 +28,240 @@ import org.apache.commons.csv.CSVParser; import org.apache.commons.csv.CSVRecord; import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.util.csv.CsvUpsertExecutor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.base.Charsets; import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.phoenix.util.csv.CsvUpsertExecutor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /*** * Upserts CSV data using Phoenix JDBC connection */ public class CSVCommonsLoader { - private static final Logger LOGGER = LoggerFactory.getLogger(CSVCommonsLoader.class); - - public static final String DEFAULT_ARRAY_ELEMENT_SEPARATOR = ":"; - - private static final Map CTRL_CHARACTER_TABLE = - ImmutableMap.builder() - .put('1', '\u0001') - .put('2', '\u0002') - .put('3', '\u0003') - .put('4', '\u0004') - .put('5', '\u0005') - .put('6', '\u0006') - .put('7', '\u0007') - .put('8', '\u0008') - .put('9', '\u0009') - .build(); - - private final PhoenixConnection conn; - private final String tableName; - private final List columns; - private final boolean isStrict; - private final char fieldDelimiter; - private final char quoteCharacter; - private final Character escapeCharacter; - private PhoenixHeaderSource headerSource = PhoenixHeaderSource.FROM_TABLE; - private final CSVFormat format; - - - private final String arrayElementSeparator; - - public enum PhoenixHeaderSource { - FROM_TABLE, - IN_LINE, - SUPPLIED_BY_USER - } - - public CSVCommonsLoader(PhoenixConnection conn, String tableName, - List columns, boolean isStrict) { - this(conn, tableName, columns, isStrict, ',', '"', null, DEFAULT_ARRAY_ELEMENT_SEPARATOR); - } - - public CSVCommonsLoader(PhoenixConnection conn, String tableName, - List columns, boolean isStrict, char fieldDelimiter, char quoteCharacter, - Character escapeCharacter, String arrayElementSeparator) { - this.conn = conn; - this.tableName = tableName; - this.columns = columns; - this.isStrict = isStrict; - this.fieldDelimiter = fieldDelimiter; - this.quoteCharacter = quoteCharacter; - this.escapeCharacter = escapeCharacter; - - // implicit in the columns value. - if (columns !=null && !columns.isEmpty()) { - headerSource = PhoenixHeaderSource.SUPPLIED_BY_USER; - } - else if (columns != null && columns.isEmpty()) { - headerSource = PhoenixHeaderSource.IN_LINE; - } - - this.arrayElementSeparator = arrayElementSeparator; - this.format = buildFormat(); + private static final Logger LOGGER = LoggerFactory.getLogger(CSVCommonsLoader.class); + + public static final String DEFAULT_ARRAY_ELEMENT_SEPARATOR = ":"; + + private static final Map CTRL_CHARACTER_TABLE = + ImmutableMap. builder().put('1', '\u0001').put('2', '\u0002') + .put('3', '\u0003').put('4', '\u0004').put('5', '\u0005').put('6', '\u0006') + .put('7', '\u0007').put('8', '\u0008').put('9', '\u0009').build(); + + private final PhoenixConnection conn; + private final String tableName; + private final List columns; + private final boolean isStrict; + private final char fieldDelimiter; + private final char quoteCharacter; + private final Character escapeCharacter; + private PhoenixHeaderSource headerSource = PhoenixHeaderSource.FROM_TABLE; + private final CSVFormat format; + + private final String arrayElementSeparator; + + public enum PhoenixHeaderSource { + FROM_TABLE, + IN_LINE, + SUPPLIED_BY_USER + } + + public CSVCommonsLoader(PhoenixConnection conn, String tableName, List columns, + boolean isStrict) { + this(conn, tableName, columns, isStrict, ',', '"', null, DEFAULT_ARRAY_ELEMENT_SEPARATOR); + } + + public CSVCommonsLoader(PhoenixConnection conn, String tableName, List columns, + boolean isStrict, char fieldDelimiter, char quoteCharacter, Character escapeCharacter, + String arrayElementSeparator) { + this.conn = conn; + this.tableName = tableName; + this.columns = columns; + this.isStrict = isStrict; + this.fieldDelimiter = fieldDelimiter; + this.quoteCharacter = quoteCharacter; + this.escapeCharacter = escapeCharacter; + + // implicit in the columns value. + if (columns != null && !columns.isEmpty()) { + headerSource = PhoenixHeaderSource.SUPPLIED_BY_USER; + } else if (columns != null && columns.isEmpty()) { + headerSource = PhoenixHeaderSource.IN_LINE; } - public CSVFormat getFormat() { - return format; + this.arrayElementSeparator = arrayElementSeparator; + this.format = buildFormat(); + } + + public CSVFormat getFormat() { + return format; + } + + /** + * default settings delimiter = ',' quoteChar = '"', escape = null recordSeparator = CRLF, CR, or + * LF ignore empty lines allows the last data line to have a recordSeparator + * @return CSVFormat based on constructor settings. + */ + private CSVFormat buildFormat() { + CSVFormat format = + CSVFormat.DEFAULT.withIgnoreEmptyLines(true).withDelimiter(asControlCharacter(fieldDelimiter)) + .withQuote(asControlCharacter(quoteCharacter)); + + if (escapeCharacter != null) { + format = format.withEscape(asControlCharacter(escapeCharacter)); } - /** - * default settings - * delimiter = ',' - * quoteChar = '"', - * escape = null - * recordSeparator = CRLF, CR, or LF - * ignore empty lines allows the last data line to have a recordSeparator - * - * @return CSVFormat based on constructor settings. - */ - private CSVFormat buildFormat() { - CSVFormat format = CSVFormat.DEFAULT - .withIgnoreEmptyLines(true) - .withDelimiter(asControlCharacter(fieldDelimiter)) - .withQuote(asControlCharacter(quoteCharacter)); - - if (escapeCharacter != null) { - format = format.withEscape(asControlCharacter(escapeCharacter)); - } + switch (headerSource) { + case FROM_TABLE: + // obtain headers from table, so format should not expect a header. + break; + case IN_LINE: + // an empty string array triggers csv loader to grab the first line as the header + format = format.withHeader(new String[0]); + break; + case SUPPLIED_BY_USER: + // a populated string array supplied by the user + format = format.withHeader(columns.toArray(new String[columns.size()])); + break; + default: + throw new RuntimeException("Header source was unable to be inferred."); - switch(headerSource) { - case FROM_TABLE: - // obtain headers from table, so format should not expect a header. - break; - case IN_LINE: - // an empty string array triggers csv loader to grab the first line as the header - format = format.withHeader(new String[0]); - break; - case SUPPLIED_BY_USER: - // a populated string array supplied by the user - format = format.withHeader(columns.toArray(new String[columns.size()])); - break; - default: - throw new RuntimeException("Header source was unable to be inferred."); - - } - return format; } - - - /** - * Translate a field separator, escape character, or phrase delimiter into a control character - * if it is a single digit other than 0. - * - * @param delimiter - * @return - */ - public static char asControlCharacter(char delimiter) { - return CTRL_CHARACTER_TABLE.getOrDefault(delimiter, delimiter); + return format; + } + + /** + * Translate a field separator, escape character, or phrase delimiter into a control character if + * it is a single digit other than 0. + */ + public static char asControlCharacter(char delimiter) { + return CTRL_CHARACTER_TABLE.getOrDefault(delimiter, delimiter); + } + + /** + * Upserts data from CSV file. Data is batched up based on connection batch size. Column PDataType + * is read from metadata and is used to convert column value to correct type before upsert. The + * constructor determines the format for the CSV files. + */ + public void upsert(String fileName) throws Exception { + CSVParser parser = CSVParser.parse(new File(fileName), Charsets.UTF_8, format); + upsert(parser); + } + + public void upsert(Reader reader) throws Exception { + CSVParser parser = new CSVParser(reader, format); + upsert(parser); + } + + private static String buildStringFromList(List list) { + return Joiner.on(", ").useForNull("null").join(list); + } + + /** + * Data is batched up based on connection batch size. Column PDataType is read from metadata and + * is used to convert column value to correct type before upsert. The format is determined by the + * supplied csvParser. CSVParser instance + */ + public void upsert(CSVParser csvParser) throws Exception { + List columnInfoList = buildColumnInfoList(csvParser); + + boolean wasAutoCommit = conn.getAutoCommit(); + try { + conn.setAutoCommit(false); + long start = EnvironmentEdgeManager.currentTimeMillis(); + CsvUpsertListener upsertListener = + new CsvUpsertListener(conn, conn.getMutateBatchSize(), isStrict); + CsvUpsertExecutor csvUpsertExecutor = + new CsvUpsertExecutor(conn, SchemaUtil.getEscapedFullTableName(tableName), columnInfoList, + upsertListener, arrayElementSeparator); + + csvUpsertExecutor.execute(csvParser); + csvUpsertExecutor.close(); + + conn.commit(); + double elapsedDuration = ((EnvironmentEdgeManager.currentTimeMillis() - start) / 1000.0); + System.out + .println("CSV Upsert complete. " + upsertListener.getTotalUpsertCount() + " rows upserted"); + System.out.println("Time: " + elapsedDuration + " sec(s)\n"); + + } finally { + + // release reader resources. + if (csvParser != null) { + csvParser.close(); + } + if (wasAutoCommit) { + conn.setAutoCommit(true); + } } - - /** - * Upserts data from CSV file. - * - * Data is batched up based on connection batch size. - * Column PDataType is read from metadata and is used to convert - * column value to correct type before upsert. - * - * The constructor determines the format for the CSV files. - * - * @param fileName - * @throws Exception - */ - public void upsert(String fileName) throws Exception { - CSVParser parser = CSVParser.parse(new File(fileName), Charsets.UTF_8, format); - upsert(parser); + } + + private List buildColumnInfoList(CSVParser parser) throws SQLException { + List columns = this.columns; + switch (headerSource) { + case FROM_TABLE: + System.out.println(String.format("csv columns from database.")); + break; + case IN_LINE: + columns = new ArrayList<>(parser.getHeaderMap().keySet()); + System.out.println(String.format("csv columns from header line. length=%s, %s", + columns.size(), buildStringFromList(columns))); + break; + case SUPPLIED_BY_USER: + System.out.println(String.format("csv columns from user. length=%s, %s", columns.size(), + buildStringFromList(columns))); + break; + default: + throw new IllegalStateException("parser has unknown column source."); } + return SchemaUtil.generateColumnInfo(conn, tableName, columns, isStrict); + } - public void upsert(Reader reader) throws Exception { - CSVParser parser = new CSVParser(reader,format); - upsert(parser); - } + static class CsvUpsertListener implements UpsertExecutor.UpsertListener { - private static String buildStringFromList(List list) { - return Joiner.on(", ").useForNull("null").join(list); + private final PhoenixConnection conn; + private final int upsertBatchSize; + private long totalUpserts = 0L; + private final boolean strict; + + CsvUpsertListener(PhoenixConnection conn, int upsertBatchSize, boolean strict) { + this.conn = conn; + this.upsertBatchSize = upsertBatchSize; + this.strict = strict; } - /** - * Data is batched up based on connection batch size. - * Column PDataType is read from metadata and is used to convert - * column value to correct type before upsert. - * - * The format is determined by the supplied csvParser. - - * @param csvParser - * CSVParser instance - * @throws Exception - */ - public void upsert(CSVParser csvParser) throws Exception { - List columnInfoList = buildColumnInfoList(csvParser); - - boolean wasAutoCommit = conn.getAutoCommit(); + @Override + public void upsertDone(long upsertCount) { + totalUpserts = upsertCount; + if (upsertCount % upsertBatchSize == 0) { + if (upsertCount % 1000 == 0) { + LOGGER.info("Processed upsert #{}", upsertCount); + } try { - conn.setAutoCommit(false); - long start = EnvironmentEdgeManager.currentTimeMillis(); - CsvUpsertListener upsertListener = new CsvUpsertListener(conn, - conn.getMutateBatchSize(), isStrict); - CsvUpsertExecutor csvUpsertExecutor = new CsvUpsertExecutor(conn, - SchemaUtil.getEscapedFullTableName(tableName), - columnInfoList, upsertListener, arrayElementSeparator); - - csvUpsertExecutor.execute(csvParser); - csvUpsertExecutor.close(); - - conn.commit(); - double elapsedDuration = ((EnvironmentEdgeManager.currentTimeMillis() - start) / 1000.0); - System.out.println("CSV Upsert complete. " + upsertListener.getTotalUpsertCount() - + " rows upserted"); - System.out.println("Time: " + elapsedDuration + " sec(s)\n"); - - } finally { - - // release reader resources. - if (csvParser != null) { - csvParser.close(); - } - if (wasAutoCommit) { - conn.setAutoCommit(true); - } + LOGGER.info("Committing after {} records", upsertCount); + conn.commit(); + } catch (SQLException e) { + throw new RuntimeException(e); } + } } - private List buildColumnInfoList(CSVParser parser) throws SQLException { - List columns = this.columns; - switch (headerSource) { - case FROM_TABLE: - System.out.println(String.format("csv columns from database.")); - break; - case IN_LINE: - columns = new ArrayList<>(parser.getHeaderMap().keySet()); - System.out.println(String.format("csv columns from header line. length=%s, %s", - columns.size(), buildStringFromList(columns))); - break; - case SUPPLIED_BY_USER: - System.out.println(String.format("csv columns from user. length=%s, %s", - columns.size(), buildStringFromList(columns))); - break; - default: - throw new IllegalStateException("parser has unknown column source."); - } - return SchemaUtil.generateColumnInfo(conn, tableName, columns, isStrict); + @Override + public void errorOnRecord(CSVRecord csvRecord, Throwable throwable) { + LOGGER.error("Error upserting record " + csvRecord, throwable.getMessage()); + if (strict) { + Throwables.propagate(throwable); + } } - static class CsvUpsertListener implements UpsertExecutor.UpsertListener { - - private final PhoenixConnection conn; - private final int upsertBatchSize; - private long totalUpserts = 0L; - private final boolean strict; - - CsvUpsertListener(PhoenixConnection conn, int upsertBatchSize, boolean strict) { - this.conn = conn; - this.upsertBatchSize = upsertBatchSize; - this.strict = strict; - } - - @Override - public void upsertDone(long upsertCount) { - totalUpserts = upsertCount; - if (upsertCount % upsertBatchSize == 0) { - if (upsertCount % 1000 == 0) { - LOGGER.info("Processed upsert #{}", upsertCount); - } - try { - LOGGER.info("Committing after {} records", upsertCount); - conn.commit(); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - } - - @Override - public void errorOnRecord(CSVRecord csvRecord, Throwable throwable) { - LOGGER.error("Error upserting record " + csvRecord, throwable.getMessage()); - if (strict) { - Throwables.propagate(throwable); - } - } - - /** - * Get the total number of upserts that this listener has been notified about up until now. - * - * @return the total count of upserts - */ - public long getTotalUpsertCount() { - return totalUpserts; - } + /** + * Get the total number of upserts that this listener has been notified about up until now. + * @return the total count of upserts + */ + public long getTotalUpsertCount() { + return totalUpserts; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ClientUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ClientUtil.java index 2afd4640b3f..97838545373 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ClientUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ClientUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +25,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NotServingRegionException; @@ -37,173 +36,182 @@ import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.schema.StaleRegionBoundaryCacheException; -public class ClientUtil { - private static final String FORMAT = "ERROR %d (%s): %s"; - private static final Pattern PATTERN = Pattern.compile("ERROR (\\d+) \\((\\w+)\\): (.*)"); - private static final Pattern HASH_JOIN_EXCEPTION_PATTERN = Pattern.compile("joinId: (-?\\d+)"); - private static final Pattern PATTERN_FOR_TS = Pattern.compile(",serverTimestamp=(\\d+),"); - private static final Map, SQLExceptionCode> errorcodeMap - = new HashMap, SQLExceptionCode>(); - static { - // Map a normal exception into a corresponding SQLException. - errorcodeMap.put(ArithmeticException.class, SQLExceptionCode.SERVER_ARITHMETIC_ERROR); - } - - - private static String constructSQLErrorMessage(SQLExceptionCode code, Throwable e, String message) { - return constructSQLErrorMessage(code.getErrorCode(), code.getSQLState(), code.getMessage() + " " + e.getMessage() + " " + message); - } - - private static String constructSQLErrorMessage(SQLException e, String message) { - return constructSQLErrorMessage(e.getErrorCode(), e.getSQLState(), e.getMessage() + " " + message); - } - - private static String constructSQLErrorMessage(int errorCode, String SQLState, String message) { - return String.format(FORMAT, errorCode, SQLState, message); - } - - public static SQLException parseServerException(Throwable t) { - SQLException e = parseServerExceptionOrNull(t); - if (e != null) { - return e; - } - return new PhoenixIOException(t); - } +import com.google.protobuf.ServiceException; - public static SQLException parseServerExceptionOrNull(Throwable t) { - while (t.getCause() != null) { - if (t instanceof NotServingRegionException) { - return parseRemoteException(new StaleRegionBoundaryCacheException()); - } - t = t.getCause(); - } - return parseRemoteException(t); +public class ClientUtil { + private static final String FORMAT = "ERROR %d (%s): %s"; + private static final Pattern PATTERN = Pattern.compile("ERROR (\\d+) \\((\\w+)\\): (.*)"); + private static final Pattern HASH_JOIN_EXCEPTION_PATTERN = Pattern.compile("joinId: (-?\\d+)"); + private static final Pattern PATTERN_FOR_TS = Pattern.compile(",serverTimestamp=(\\d+),"); + private static final Map, SQLExceptionCode> errorcodeMap = + new HashMap, SQLExceptionCode>(); + static { + // Map a normal exception into a corresponding SQLException. + errorcodeMap.put(ArithmeticException.class, SQLExceptionCode.SERVER_ARITHMETIC_ERROR); + } + + private static String constructSQLErrorMessage(SQLExceptionCode code, Throwable e, + String message) { + return constructSQLErrorMessage(code.getErrorCode(), code.getSQLState(), + code.getMessage() + " " + e.getMessage() + " " + message); + } + + private static String constructSQLErrorMessage(SQLException e, String message) { + return constructSQLErrorMessage(e.getErrorCode(), e.getSQLState(), + e.getMessage() + " " + message); + } + + private static String constructSQLErrorMessage(int errorCode, String SQLState, String message) { + return String.format(FORMAT, errorCode, SQLState, message); + } + + public static SQLException parseServerException(Throwable t) { + SQLException e = parseServerExceptionOrNull(t); + if (e != null) { + return e; } - - /** - * Return the first SQLException in the exception chain, otherwise parse it. - * When we're receiving an exception locally, there's no need to string parse, - * as the SQLException will already be part of the chain. - * @param t - * @return the SQLException, or null if none found - */ - public static SQLException parseLocalOrRemoteServerException(Throwable t) { - while (t.getCause() != null) { - if (t instanceof NotServingRegionException) { - return parseRemoteException(new StaleRegionBoundaryCacheException()); - } else if (t instanceof SQLException) { - return (SQLException) t; - } - t = t.getCause(); - } - return parseRemoteException(t); + return new PhoenixIOException(t); + } + + public static SQLException parseServerExceptionOrNull(Throwable t) { + while (t.getCause() != null) { + if (t instanceof NotServingRegionException) { + return parseRemoteException(new StaleRegionBoundaryCacheException()); + } + t = t.getCause(); } - - public static SQLException parseRemoteException(Throwable t) { - - String message = t.getLocalizedMessage(); - if (message != null) { - // If the message matches the standard pattern, recover the SQLException and throw it. - Matcher matcher = PATTERN.matcher(t.getLocalizedMessage()); - if (matcher.find()) { - int statusCode = Integer.parseInt(matcher.group(1)); - SQLExceptionCode code = SQLExceptionCode.fromErrorCode(statusCode); - if(code.equals(SQLExceptionCode.HASH_JOIN_CACHE_NOT_FOUND)){ - Matcher m = HASH_JOIN_EXCEPTION_PATTERN.matcher(t.getLocalizedMessage()); - if (m.find()) { return new HashJoinCacheNotFoundException(Long.parseLong(m.group(1))); } - } - return new SQLExceptionInfo.Builder(code).setMessage(matcher.group()).setRootCause(t).build().buildException(); - } - } - return null; + return parseRemoteException(t); + } + + /** + * Return the first SQLException in the exception chain, otherwise parse it. When we're receiving + * an exception locally, there's no need to string parse, as the SQLException will already be part + * of the chain. + * @return the SQLException, or null if none found + */ + public static SQLException parseLocalOrRemoteServerException(Throwable t) { + while (t.getCause() != null) { + if (t instanceof NotServingRegionException) { + return parseRemoteException(new StaleRegionBoundaryCacheException()); + } else if (t instanceof SQLException) { + return (SQLException) t; + } + t = t.getCause(); } - - public static long parseServerTimestamp(Throwable t) { - while (t.getCause() != null) { - t = t.getCause(); + return parseRemoteException(t); + } + + public static SQLException parseRemoteException(Throwable t) { + + String message = t.getLocalizedMessage(); + if (message != null) { + // If the message matches the standard pattern, recover the SQLException and throw it. + Matcher matcher = PATTERN.matcher(t.getLocalizedMessage()); + if (matcher.find()) { + int statusCode = Integer.parseInt(matcher.group(1)); + SQLExceptionCode code = SQLExceptionCode.fromErrorCode(statusCode); + if (code.equals(SQLExceptionCode.HASH_JOIN_CACHE_NOT_FOUND)) { + Matcher m = HASH_JOIN_EXCEPTION_PATTERN.matcher(t.getLocalizedMessage()); + if (m.find()) { + return new HashJoinCacheNotFoundException(Long.parseLong(m.group(1))); + } } - return parseTimestampFromRemoteException(t); + return new SQLExceptionInfo.Builder(code).setMessage(matcher.group()).setRootCause(t) + .build().buildException(); + } } + return null; + } - public static long parseTimestampFromRemoteException(Throwable t) { - String message = t.getLocalizedMessage(); - if (message != null) { - // If the message matches the standard pattern, recover the SQLException and throw it. - Matcher matcher = PATTERN_FOR_TS.matcher(t.getLocalizedMessage()); - if (matcher.find()) { - String tsString = matcher.group(1); - if (tsString != null) { - return Long.parseLong(tsString); - } - } - } - return HConstants.LATEST_TIMESTAMP; + public static long parseServerTimestamp(Throwable t) { + while (t.getCause() != null) { + t = t.getCause(); } - - public static IOException createIOException(String msg, Throwable t) { - // First unwrap SQLExceptions if it's root cause is an IOException. - if (t instanceof SQLException) { - Throwable cause = t.getCause(); - if (cause instanceof IOException) { - t = cause; - } - } - // Throw immediately if DoNotRetryIOException - if (t instanceof DoNotRetryIOException) { - return (DoNotRetryIOException) t; - } else if (t instanceof IOException) { - // If the IOException does not wrap any exception, then bubble it up. - Throwable cause = t.getCause(); - if (cause instanceof RetriesExhaustedWithDetailsException) - return new DoNotRetryIOException(t.getMessage(), cause); - else if (cause == null || cause instanceof IOException) { - return (IOException) t; - } - // Else assume it's been wrapped, so throw as DoNotRetryIOException to prevent client hanging while retrying - return new DoNotRetryIOException(t.getMessage(), cause); - } else if (t instanceof SQLException) { - // If it's already an SQLException, construct an error message so we can parse and reconstruct on the client side. - return new DoNotRetryIOException(constructSQLErrorMessage((SQLException) t, msg), t); - } else { - // Not a DoNotRetryIOException, IOException or SQLException. Map the exception type to a general SQLException - // and construct the error message so it can be reconstruct on the client side. - // - // If no mapping exists, rethrow it as a generic exception. - SQLExceptionCode code = errorcodeMap.get(t.getClass()); - if (code == null) { - return new DoNotRetryIOException(msg + ": " + t.getMessage(), t); - } else { - return new DoNotRetryIOException(constructSQLErrorMessage(code, t, msg), t); - } + return parseTimestampFromRemoteException(t); + } + + public static long parseTimestampFromRemoteException(Throwable t) { + String message = t.getLocalizedMessage(); + if (message != null) { + // If the message matches the standard pattern, recover the SQLException and throw it. + Matcher matcher = PATTERN_FOR_TS.matcher(t.getLocalizedMessage()); + if (matcher.find()) { + String tsString = matcher.group(1); + if (tsString != null) { + return Long.parseLong(tsString); } + } } - - public static void throwIOException(String msg, Throwable t) throws IOException { - throw createIOException(msg, t); + return HConstants.LATEST_TIMESTAMP; + } + + public static IOException createIOException(String msg, Throwable t) { + // First unwrap SQLExceptions if it's root cause is an IOException. + if (t instanceof SQLException) { + Throwable cause = t.getCause(); + if (cause instanceof IOException) { + t = cause; + } } - - /** - * Returns true if HBase namespace exists, else returns false - * @param admin HbaseAdmin Object - * @param schemaName Phoenix schema name for which we check existence of the HBase namespace - * @return true if the HBase namespace exists, else returns false - * @throws IOException If there is an exception checking the HBase namespace - */ - public static boolean isHBaseNamespaceAvailable(Admin admin, String schemaName) throws IOException { - String[] hbaseNamespaces = admin.listNamespaces(); - return Arrays.asList(hbaseNamespaces).contains(schemaName); + // Throw immediately if DoNotRetryIOException + if (t instanceof DoNotRetryIOException) { + return (DoNotRetryIOException) t; + } else if (t instanceof IOException) { + // If the IOException does not wrap any exception, then bubble it up. + Throwable cause = t.getCause(); + if (cause instanceof RetriesExhaustedWithDetailsException) + return new DoNotRetryIOException(t.getMessage(), cause); + else if (cause == null || cause instanceof IOException) { + return (IOException) t; + } + // Else assume it's been wrapped, so throw as DoNotRetryIOException to prevent client hanging + // while retrying + return new DoNotRetryIOException(t.getMessage(), cause); + } else if (t instanceof SQLException) { + // If it's already an SQLException, construct an error message so we can parse and reconstruct + // on the client side. + return new DoNotRetryIOException(constructSQLErrorMessage((SQLException) t, msg), t); + } else { + // Not a DoNotRetryIOException, IOException or SQLException. Map the exception type to a + // general SQLException + // and construct the error message so it can be reconstruct on the client side. + // + // If no mapping exists, rethrow it as a generic exception. + SQLExceptionCode code = errorcodeMap.get(t.getClass()); + if (code == null) { + return new DoNotRetryIOException(msg + ": " + t.getMessage(), t); + } else { + return new DoNotRetryIOException(constructSQLErrorMessage(code, t, msg), t); + } } - - /** - * Convert ServiceException into an IOException - * @param se ServiceException - * @return IOException - */ - public static IOException parseServiceException(ServiceException se) { - Throwable cause = se.getCause(); - if (cause != null && cause instanceof IOException) { - return (IOException) cause; - } - return new IOException(se); + } + + public static void throwIOException(String msg, Throwable t) throws IOException { + throw createIOException(msg, t); + } + + /** + * Returns true if HBase namespace exists, else returns false + * @param admin HbaseAdmin Object + * @param schemaName Phoenix schema name for which we check existence of the HBase namespace + * @return true if the HBase namespace exists, else returns false + * @throws IOException If there is an exception checking the HBase namespace + */ + public static boolean isHBaseNamespaceAvailable(Admin admin, String schemaName) + throws IOException { + String[] hbaseNamespaces = admin.listNamespaces(); + return Arrays.asList(hbaseNamespaces).contains(schemaName); + } + + /** + * Convert ServiceException into an IOException + * @param se ServiceException + */ + public static IOException parseServiceException(ServiceException se) { + Throwable cause = se.getCause(); + if (cause != null && cause instanceof IOException) { + return (IOException) cause; } + return new IOException(se); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/Closeables.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/Closeables.java index 09c4acb9a7d..1892fa1b403 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/Closeables.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/Closeables.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +17,6 @@ */ package org.apache.phoenix.util; -import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.annotation.Nullable; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; @@ -29,125 +24,126 @@ import java.util.Collections; import java.util.LinkedList; +import javax.annotation.Nullable; + +import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Utilities for operating on {@link Closeable}s. - * */ public class Closeables { - private static final Logger LOGGER = LoggerFactory.getLogger(Closeables.class); + private static final Logger LOGGER = LoggerFactory.getLogger(Closeables.class); - /** Not constructed */ - private Closeables() { } - - /** - * Close a {@code Closeable}, returning an {@code IOException} if it occurs while closing - * instead of throwing it. This is nearly a clone of the Guava Closeables.closeQuietly method - * which has long since been removed from Guava. - * - * Use of this method should be avoided -- quietly swallowing IOExceptions (particularly on - * Closeables that are being written to) is a code smell. Use of the equivalent method in - * Guava was done for this reason. - * - * @param closeable the Closeable to be closed, can be null - * @return the IOException if one was thrown, otherwise {@code null} - */ - public static IOException closeQuietly(@Nullable Closeable closeable) { - if (closeable == null) { - return null; - } - try { - closeable.close(); - return null; - } catch (IOException e) { - LOGGER.error("Error closing " + closeable, e); - return e; - } + /** Not constructed */ + private Closeables() { + } + + /** + * Close a {@code Closeable}, returning an {@code IOException} if it occurs while closing instead + * of throwing it. This is nearly a clone of the Guava Closeables.closeQuietly method which has + * long since been removed from Guava. Use of this method should be avoided -- quietly swallowing + * IOExceptions (particularly on Closeables that are being written to) is a code smell. Use of the + * equivalent method in Guava was done for this reason. + * @param closeable the Closeable to be closed, can be null + * @return the IOException if one was thrown, otherwise {@code null} + */ + public static IOException closeQuietly(@Nullable Closeable closeable) { + if (closeable == null) { + return null; + } + try { + closeable.close(); + return null; + } catch (IOException e) { + LOGGER.error("Error closing " + closeable, e); + return e; + } + } + + /** + * Allows you to close as many of the {@link Closeable}s as possible. If any of the close's fail + * with an IOException, those exception(s) will be thrown after attempting to close all of the + * inputs. + */ + public static void closeAll(Iterable iterable) throws IOException { + IOException ex = closeAllQuietly(iterable); + if (ex != null) throw ex; + } + + public static IOException closeAllQuietly(Iterable iterable) { + if (iterable == null) return null; + + LinkedList exceptions = null; + for (Closeable closeable : iterable) { + IOException ioe = closeQuietly(closeable); + if (ioe != null) { + if (exceptions == null) exceptions = new LinkedList(); + exceptions.add(ioe); + } } + IOException ex = MultipleCausesIOException.fromIOExceptions(exceptions); + return ex; + } + + static private class MultipleCausesIOException extends IOException { + private static final long serialVersionUID = 1L; + + static IOException fromIOExceptions(Collection exceptions) { + if (exceptions == null || exceptions.isEmpty()) return null; + if (exceptions.size() == 1) return Iterables.getOnlyElement(exceptions); + + return new MultipleCausesIOException(exceptions); + } + + private final Collection exceptions; + private boolean hasSetStackTrace; + /** - * Allows you to close as many of the {@link Closeable}s as possible. - * - * If any of the close's fail with an IOException, those exception(s) will - * be thrown after attempting to close all of the inputs. + * Use the {@link #fromIOExceptions(Collection) factory}. */ - public static void closeAll(Iterable iterable) throws IOException { - IOException ex = closeAllQuietly(iterable); - if (ex != null) throw ex; + private MultipleCausesIOException(Collection exceptions) { + this.exceptions = exceptions; } - - public static IOException closeAllQuietly(Iterable iterable) { - if (iterable == null) return null; - - LinkedList exceptions = null; - for (Closeable closeable : iterable) { - IOException ioe = closeQuietly(closeable); - if (ioe != null) { - if (exceptions == null) exceptions = new LinkedList(); - exceptions.add(ioe); - } - } - - IOException ex = MultipleCausesIOException.fromIOExceptions(exceptions); - return ex; + + @Override + public String getMessage() { + StringBuilder sb = new StringBuilder(this.exceptions.size() * 50); + int exceptionNum = 0; + for (IOException ex : this.exceptions) { + sb.append("Cause Number " + exceptionNum + ": " + ex.getMessage() + "\n"); + exceptionNum++; + } + return sb.toString(); } - static private class MultipleCausesIOException extends IOException { - private static final long serialVersionUID = 1L; + @Override + public StackTraceElement[] getStackTrace() { + if (!this.hasSetStackTrace) { + ArrayList frames = + new ArrayList(this.exceptions.size() * 20); - static IOException fromIOExceptions(Collection exceptions) { - if (exceptions == null || exceptions.isEmpty()) return null; - if (exceptions.size() == 1) return Iterables.getOnlyElement(exceptions); - - return new MultipleCausesIOException(exceptions); - } - - private final Collection exceptions; - private boolean hasSetStackTrace; - - /** - * Use the {@link #fromIOExceptions(Collection) factory}. - */ - private MultipleCausesIOException(Collection exceptions) { - this.exceptions = exceptions; - } + int exceptionNum = 0; + for (IOException exception : this.exceptions) { + StackTraceElement header = + new StackTraceElement(MultipleCausesIOException.class.getName(), + "Exception Number " + exceptionNum, "", 0); - @Override - public String getMessage() { - StringBuilder sb = new StringBuilder(this.exceptions.size() * 50); - int exceptionNum = 0; - for (IOException ex : this.exceptions) { - sb.append("Cause Number " + exceptionNum + ": " + ex.getMessage() + "\n"); - exceptionNum++; - } - return sb.toString(); - } - - @Override - public StackTraceElement[] getStackTrace() { - if (!this.hasSetStackTrace) { - ArrayList frames = new ArrayList(this.exceptions.size() * 20); - - int exceptionNum = 0; - for (IOException exception : this.exceptions) { - StackTraceElement header = new StackTraceElement(MultipleCausesIOException.class.getName(), - "Exception Number " + exceptionNum, - "", - 0); - - frames.add(header); - Collections.addAll(frames, exception.getStackTrace()); - exceptionNum++; - } - - setStackTrace(frames.toArray(new StackTraceElement[frames.size()])); - this.hasSetStackTrace = true; - } - - return super.getStackTrace(); + frames.add(header); + Collections.addAll(frames, exception.getStackTrace()); + exceptionNum++; } + setStackTrace(frames.toArray(new StackTraceElement[frames.size()])); + this.hasSetStackTrace = true; + } + + return super.getStackTrace(); } - + + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ColumnInfo.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ColumnInfo.java index eea838d571d..474c5e8bc29 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ColumnInfo.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ColumnInfo.java @@ -1,16 +1,22 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.util; -import java.sql.Types; import java.util.List; import java.util.Objects; import java.util.regex.Matcher; @@ -18,7 +24,6 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.types.*; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; @@ -27,160 +32,157 @@ */ public class ColumnInfo { - /** Separator used for the toString representation */ - private static final String STR_SEPARATOR = ":"; - - private final String columnName; - private final int sqlType; - - private final Integer precision; - private final Integer scale; - - public static ColumnInfo create(String columnName, int sqlType, Integer maxLength, Integer scale) { - if(scale != null) { - assert(maxLength != null); // If we have a scale, we should always have a maxLength - scale = Math.min(maxLength, scale); - return new ColumnInfo(columnName, sqlType, maxLength, scale); - } - if (maxLength != null) { - return new ColumnInfo(columnName, sqlType, maxLength); - } - return new ColumnInfo(columnName, sqlType); - } - - public ColumnInfo(String columnName, int sqlType) { - this(columnName, sqlType, null); - } - - public ColumnInfo(String columnName, int sqlType, Integer maxLength) { - this(columnName, sqlType, maxLength, null); - } - - public ColumnInfo(String columnName, int sqlType, Integer precision, Integer scale) { - Preconditions.checkNotNull(columnName, "columnName cannot be null"); - Preconditions.checkArgument(!columnName.isEmpty(), "columnName cannot be empty"); - if(!columnName.startsWith(SchemaUtil.ESCAPE_CHARACTER)) { - columnName = SchemaUtil.getEscapedFullColumnName(columnName); - } - this.columnName = columnName; - this.sqlType = sqlType; - this.precision = precision; - this.scale = scale; - } - - public String getColumnName() { - return columnName; - } - - public int getSqlType() { - return sqlType; - } - - public PDataType getPDataType() { - return PDataType.fromTypeId(sqlType); - } - - /** - * Returns the column name without the associated Column Family. - * @return - */ - public String getDisplayName() { - final String unescapedColumnName = SchemaUtil.getUnEscapedFullColumnName(columnName); - int index = unescapedColumnName.indexOf(QueryConstants.NAME_SEPARATOR); - if (index < 0) { - return unescapedColumnName; - } - return unescapedColumnName.substring(index+1).trim(); - } - - // Return the proper SQL type string, taking into account possible array, length and scale parameters - public String toTypeString() { - return PhoenixRuntime.getSqlTypeName(getPDataType(), getMaxLength(), getScale()); - } - - @Override - public String toString() { - return toTypeString() + STR_SEPARATOR + columnName ; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - ColumnInfo that = (ColumnInfo) o; - - if (sqlType != that.sqlType) return false; - if (!Objects.equals(precision, that.precision)) return false; - if (!Objects.equals(scale, that.scale)) return false; - if (!columnName.equals(that.columnName)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = columnName.hashCode(); - result = 31 * result + (precision << 2) + (scale << 1) + sqlType; - return result; - } - - /** - * Instantiate a {@code ColumnInfo} from a string representation created by the {@link - * #toString()} method. - * - * @param stringRepresentation string representation of a ColumnInfo - * @return the corresponding ColumnInfo - * @throws java.lang.IllegalArgumentException if the given string representation cannot be - * parsed - */ - public static ColumnInfo fromString(String stringRepresentation) { - List components = - Lists.newArrayList(stringRepresentation.split(":", 2)); - - if (components.size() != 2) { - throw new IllegalArgumentException("Unparseable string: " + stringRepresentation); - } - - String[] typeParts = components.get(0).split(" "); - String columnName = components.get(1); - - Integer maxLength = null; - Integer scale = null; - if (typeParts[0].contains("(")) { - Matcher matcher = Pattern.compile("([^\\(]+)\\((\\d+)(?:,(\\d+))?\\)").matcher(typeParts[0]); - if (!matcher.matches() || matcher.groupCount() > 3) { - throw new IllegalArgumentException("Unparseable type string: " + typeParts[0]); - } - maxLength = Integer.valueOf(matcher.group(2)); - if (matcher.group(3) != null) { - scale = Integer.valueOf(matcher.group(3)); - } - // Drop the (N) or (N,N) from the original type - typeParts[0] = matcher.group(1); - } - - // Create the PDataType from the sql type name, including the second 'ARRAY' part if present - PDataType dataType; - if(typeParts.length < 2) { - dataType = PDataType.fromSqlTypeName(typeParts[0]); - } - else { - dataType = PDataType.fromSqlTypeName(typeParts[0] + " " + typeParts[1]); - } - - return ColumnInfo.create(columnName, dataType.getSqlType(), maxLength, scale); - } - - public Integer getMaxLength() { - return precision; - } - - public Integer getPrecision() { - return precision; - } - - public Integer getScale() { - return scale; - } + /** Separator used for the toString representation */ + private static final String STR_SEPARATOR = ":"; + + private final String columnName; + private final int sqlType; + + private final Integer precision; + private final Integer scale; + + public static ColumnInfo create(String columnName, int sqlType, Integer maxLength, + Integer scale) { + if (scale != null) { + assert (maxLength != null); // If we have a scale, we should always have a maxLength + scale = Math.min(maxLength, scale); + return new ColumnInfo(columnName, sqlType, maxLength, scale); + } + if (maxLength != null) { + return new ColumnInfo(columnName, sqlType, maxLength); + } + return new ColumnInfo(columnName, sqlType); + } + + public ColumnInfo(String columnName, int sqlType) { + this(columnName, sqlType, null); + } + + public ColumnInfo(String columnName, int sqlType, Integer maxLength) { + this(columnName, sqlType, maxLength, null); + } + + public ColumnInfo(String columnName, int sqlType, Integer precision, Integer scale) { + Preconditions.checkNotNull(columnName, "columnName cannot be null"); + Preconditions.checkArgument(!columnName.isEmpty(), "columnName cannot be empty"); + if (!columnName.startsWith(SchemaUtil.ESCAPE_CHARACTER)) { + columnName = SchemaUtil.getEscapedFullColumnName(columnName); + } + this.columnName = columnName; + this.sqlType = sqlType; + this.precision = precision; + this.scale = scale; + } + + public String getColumnName() { + return columnName; + } + + public int getSqlType() { + return sqlType; + } + + public PDataType getPDataType() { + return PDataType.fromTypeId(sqlType); + } + + /** + * Returns the column name without the associated Column Family. + */ + public String getDisplayName() { + final String unescapedColumnName = SchemaUtil.getUnEscapedFullColumnName(columnName); + int index = unescapedColumnName.indexOf(QueryConstants.NAME_SEPARATOR); + if (index < 0) { + return unescapedColumnName; + } + return unescapedColumnName.substring(index + 1).trim(); + } + + // Return the proper SQL type string, taking into account possible array, length and scale + // parameters + public String toTypeString() { + return PhoenixRuntime.getSqlTypeName(getPDataType(), getMaxLength(), getScale()); + } + + @Override + public String toString() { + return toTypeString() + STR_SEPARATOR + columnName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ColumnInfo that = (ColumnInfo) o; + + if (sqlType != that.sqlType) return false; + if (!Objects.equals(precision, that.precision)) return false; + if (!Objects.equals(scale, that.scale)) return false; + if (!columnName.equals(that.columnName)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = columnName.hashCode(); + result = 31 * result + (precision << 2) + (scale << 1) + sqlType; + return result; + } + + /** + * Instantiate a {@code ColumnInfo} from a string representation created by the + * {@link #toString()} method. + * @param stringRepresentation string representation of a ColumnInfo + * @return the corresponding ColumnInfo + * @throws java.lang.IllegalArgumentException if the given string representation cannot be parsed + */ + public static ColumnInfo fromString(String stringRepresentation) { + List components = Lists.newArrayList(stringRepresentation.split(":", 2)); + + if (components.size() != 2) { + throw new IllegalArgumentException("Unparseable string: " + stringRepresentation); + } + + String[] typeParts = components.get(0).split(" "); + String columnName = components.get(1); + + Integer maxLength = null; + Integer scale = null; + if (typeParts[0].contains("(")) { + Matcher matcher = Pattern.compile("([^\\(]+)\\((\\d+)(?:,(\\d+))?\\)").matcher(typeParts[0]); + if (!matcher.matches() || matcher.groupCount() > 3) { + throw new IllegalArgumentException("Unparseable type string: " + typeParts[0]); + } + maxLength = Integer.valueOf(matcher.group(2)); + if (matcher.group(3) != null) { + scale = Integer.valueOf(matcher.group(3)); + } + // Drop the (N) or (N,N) from the original type + typeParts[0] = matcher.group(1); + } + + // Create the PDataType from the sql type name, including the second 'ARRAY' part if present + PDataType dataType; + if (typeParts.length < 2) { + dataType = PDataType.fromSqlTypeName(typeParts[0]); + } else { + dataType = PDataType.fromSqlTypeName(typeParts[0] + " " + typeParts[1]); + } + + return ColumnInfo.create(columnName, dataType.getSqlType(), maxLength, scale); + } + + public Integer getMaxLength() { + return precision; + } + + public Integer getPrecision() { + return precision; + } + + public Integer getScale() { + return scale; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ConfigUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ConfigUtil.java index 80bdce41b4e..fb6e0507bf3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ConfigUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ConfigUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +26,6 @@ public class ConfigUtil { /** * This function set missed replication configuration settings. It should only be used in testing * env. - * @param conf */ public static void setReplicationConfigIfAbsent(Configuration conf) { // set replication required parameter diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CostUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CostUtil.java index db2b5fff440..8a9a7f5b57a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CostUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CostUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,70 +22,69 @@ import org.apache.phoenix.query.QueryServices; /** - * Utilities for computing costs. - * - * Some of the methods here should eventually be replaced by a metadata framework which - * estimates output metrics for each QueryPlan or operation, e.g. row count, byte count, - * etc. + * Utilities for computing costs. Some of the methods here should eventually be replaced by a + * metadata framework which estimates output metrics for each QueryPlan or operation, e.g. row + * count, byte count, etc. */ public class CostUtil { - /** - * Estimate the cost of an aggregate. - * @param inputBytes the number of input bytes - * @param outputBytes the number of output bytes - * @param groupBy the compiled GroupBy object - * @param parallelLevel number of parallel workers or threads - * @return the cost - */ - public static Cost estimateAggregateCost( - double inputBytes, double outputBytes, GroupBy groupBy, int parallelLevel) { - double hashMapOverhead = groupBy.isOrderPreserving() || groupBy.isUngroupedAggregate() ? 1 : (outputBytes < 1 ? 1 : outputBytes); - return new Cost(0, 0, (outputBytes + hashMapOverhead * Math.log(inputBytes)) / parallelLevel); - } + /** + * Estimate the cost of an aggregate. + * @param inputBytes the number of input bytes + * @param outputBytes the number of output bytes + * @param groupBy the compiled GroupBy object + * @param parallelLevel number of parallel workers or threads + * @return the cost + */ + public static Cost estimateAggregateCost(double inputBytes, double outputBytes, GroupBy groupBy, + int parallelLevel) { + double hashMapOverhead = groupBy.isOrderPreserving() || groupBy.isUngroupedAggregate() + ? 1 + : (outputBytes < 1 ? 1 : outputBytes); + return new Cost(0, 0, (outputBytes + hashMapOverhead * Math.log(inputBytes)) / parallelLevel); + } - /** - * Estimate the cost of an order-by - * @param inputBytes the number of input bytes - * @param outputBytes the number of output bytes, which may be different from inputBytes - * depending on whether there is a LIMIT - * @param parallelLevel number of parallel workers or threads - * @return the cost - */ - public static Cost estimateOrderByCost(double inputBytes, double outputBytes, int parallelLevel) { - if (inputBytes < 1) { - inputBytes = 1; - } - return new Cost(0, 0, - (outputBytes + outputBytes * Math.log(inputBytes)) / parallelLevel); + /** + * Estimate the cost of an order-by + * @param inputBytes the number of input bytes + * @param outputBytes the number of output bytes, which may be different from inputBytes + * depending on whether there is a LIMIT + * @param parallelLevel number of parallel workers or threads + * @return the cost + */ + public static Cost estimateOrderByCost(double inputBytes, double outputBytes, int parallelLevel) { + if (inputBytes < 1) { + inputBytes = 1; } + return new Cost(0, 0, (outputBytes + outputBytes * Math.log(inputBytes)) / parallelLevel); + } - /** - * Estimate the cost of a hash-join - * @param lhsBytes the number of left input bytes - * @param rhsBytes the number of right input bytes - * @param outputBytes the number of output bytes - * @param parallelLevel number of parallel workers or threads - * @return the cost - */ - public static Cost estimateHashJoinCost( - double lhsBytes, double rhsBytes, double outputBytes, - boolean hasKeyRangeExpression, int parallelLevel) { - if (rhsBytes < 1) { - rhsBytes = 1; - } - return new Cost(0, 0, - (rhsBytes * Math.log(rhsBytes) + (hasKeyRangeExpression ? 0 : lhsBytes)) / parallelLevel + outputBytes); + /** + * Estimate the cost of a hash-join + * @param lhsBytes the number of left input bytes + * @param rhsBytes the number of right input bytes + * @param outputBytes the number of output bytes + * @param parallelLevel number of parallel workers or threads + * @return the cost + */ + public static Cost estimateHashJoinCost(double lhsBytes, double rhsBytes, double outputBytes, + boolean hasKeyRangeExpression, int parallelLevel) { + if (rhsBytes < 1) { + rhsBytes = 1; } + return new Cost(0, 0, + (rhsBytes * Math.log(rhsBytes) + (hasKeyRangeExpression ? 0 : lhsBytes)) / parallelLevel + + outputBytes); + } - /** - * Estimate the parallel level of an operation - * @param runningOnServer if the operation will be running on server side - * @param services the QueryServices object - * @return the parallel level - */ - public static int estimateParallelLevel(boolean runningOnServer, QueryServices services) { - // TODO currently return constants for simplicity, should derive from cluster config. - return runningOnServer ? 10 : 1; - } + /** + * Estimate the parallel level of an operation + * @param runningOnServer if the operation will be running on server side + * @param services the QueryServices object + * @return the parallel level + */ + public static int estimateParallelLevel(boolean runningOnServer, QueryServices services) { + // TODO currently return constants for simplicity, should derive from cluster config. + return runningOnServer ? 10 : 1; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CursorUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CursorUtil.java index c0034812a66..ae063c8f999 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/CursorUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/CursorUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; import java.sql.Connection; @@ -25,8 +24,8 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; +import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.execute.CursorFetchPlan; import org.apache.phoenix.iterate.CursorResultIterator; import org.apache.phoenix.parse.CloseStatement; @@ -36,154 +35,157 @@ public final class CursorUtil { - private static class CursorWrapper { - private final String cursorName; - private final String selectSQL; - private boolean isOpen = false; - QueryPlan queryPlan; - ImmutableBytesWritable row; - ImmutableBytesWritable previousRow; - private Scan scan; - private boolean moreValues=true; - private boolean isReversed; - private boolean islastCallNext; - private CursorFetchPlan fetchPlan; - private int offset = -1; - private boolean isAggregate; - - private CursorWrapper(String cursorName, String selectSQL, QueryPlan queryPlan){ - this.cursorName = cursorName; - this.selectSQL = selectSQL; - this.queryPlan = queryPlan; - this.islastCallNext = true; - this.fetchPlan = new CursorFetchPlan(queryPlan,cursorName); - isAggregate = fetchPlan.isAggregate(); - } - - private synchronized void openCursor(Connection conn) throws SQLException { - if(isOpen){ - return; - } - this.scan = this.queryPlan.getContext().getScan(); - isReversed=OrderBy.REV_ROW_KEY_ORDER_BY.equals(this.queryPlan.getOrderBy()); - isOpen = true; - } - - private void closeCursor() throws SQLException { - isOpen = false; - ((CursorResultIterator) fetchPlan.iterator()).closeCursor(); - //TODO: Determine if the cursor should be removed from the HashMap at this point. - //Semantically it makes sense that something which is 'Closed' one should be able to 'Open' again. - mapCursorIDQuery.remove(this.cursorName); - } - - private QueryPlan getFetchPlan(boolean isNext, int fetchSize) throws SQLException { - if (!isOpen) - throw new SQLException("Fetch call on closed cursor '" + this.cursorName + "'!"); - ((CursorResultIterator)fetchPlan.iterator()).setFetchSize(fetchSize); - if (!isAggregate) { - if (row!=null){ - scan.withStartRow(row.get()); - } - } - return this.fetchPlan; - } - - public void updateLastScanRow(Tuple rowValues,Tuple nextRowValues) { - - this.moreValues = !isReversed ? nextRowValues != null : rowValues != null; - if(!moreValues()){ - return; - } - if (row == null) { - row = new ImmutableBytesWritable(); - } - if (previousRow == null) { - previousRow = new ImmutableBytesWritable(); - } - if (nextRowValues != null) { - nextRowValues.getKey(row); - } - if (rowValues != null) { - rowValues.getKey(previousRow); - } - offset++; - } - - public boolean moreValues() { - return moreValues; - } - - public String getFetchSQL() throws SQLException { - if (!isOpen) - throw new SQLException("Fetch call on closed cursor '" + this.cursorName + "'!"); - return selectSQL; - } + private static class CursorWrapper { + private final String cursorName; + private final String selectSQL; + private boolean isOpen = false; + QueryPlan queryPlan; + ImmutableBytesWritable row; + ImmutableBytesWritable previousRow; + private Scan scan; + private boolean moreValues = true; + private boolean isReversed; + private boolean islastCallNext; + private CursorFetchPlan fetchPlan; + private int offset = -1; + private boolean isAggregate; + + private CursorWrapper(String cursorName, String selectSQL, QueryPlan queryPlan) { + this.cursorName = cursorName; + this.selectSQL = selectSQL; + this.queryPlan = queryPlan; + this.islastCallNext = true; + this.fetchPlan = new CursorFetchPlan(queryPlan, cursorName); + isAggregate = fetchPlan.isAggregate(); } - private static Map mapCursorIDQuery = new HashMap(); + private synchronized void openCursor(Connection conn) throws SQLException { + if (isOpen) { + return; + } + this.scan = this.queryPlan.getContext().getScan(); + isReversed = OrderBy.REV_ROW_KEY_ORDER_BY.equals(this.queryPlan.getOrderBy()); + isOpen = true; + } - /** - * Private constructor - */ - private CursorUtil() { + private void closeCursor() throws SQLException { + isOpen = false; + ((CursorResultIterator) fetchPlan.iterator()).closeCursor(); + // TODO: Determine if the cursor should be removed from the HashMap at this point. + // Semantically it makes sense that something which is 'Closed' one should be able to 'Open' + // again. + mapCursorIDQuery.remove(this.cursorName); } - /** - * - * @param stmt DeclareCursorStatement instance intending to declare a new cursor. - * @return Returns true if the new cursor was successfully declared. False if a cursor with the same - * identifier already exists. - */ - public static boolean declareCursor(DeclareCursorStatement stmt, QueryPlan queryPlan) throws SQLException { - if(mapCursorIDQuery.containsKey(stmt.getCursorName())){ - throw new SQLException("Can't declare cursor " + stmt.getCursorName() + ", cursor identifier already in use."); - } else { - mapCursorIDQuery.put(stmt.getCursorName(), new CursorWrapper(stmt.getCursorName(), stmt.getQuerySQL(), queryPlan)); - return true; + private QueryPlan getFetchPlan(boolean isNext, int fetchSize) throws SQLException { + if (!isOpen) throw new SQLException("Fetch call on closed cursor '" + this.cursorName + "'!"); + ((CursorResultIterator) fetchPlan.iterator()).setFetchSize(fetchSize); + if (!isAggregate) { + if (row != null) { + scan.withStartRow(row.get()); } + } + return this.fetchPlan; } - public static boolean openCursor(OpenStatement stmt, Connection conn) throws SQLException { - if(mapCursorIDQuery.containsKey(stmt.getCursorName())){ - mapCursorIDQuery.get(stmt.getCursorName()).openCursor(conn); - return true; - } else{ - throw new SQLException("Cursor " + stmt.getCursorName() + " not declared."); - } + public void updateLastScanRow(Tuple rowValues, Tuple nextRowValues) { + + this.moreValues = !isReversed ? nextRowValues != null : rowValues != null; + if (!moreValues()) { + return; + } + if (row == null) { + row = new ImmutableBytesWritable(); + } + if (previousRow == null) { + previousRow = new ImmutableBytesWritable(); + } + if (nextRowValues != null) { + nextRowValues.getKey(row); + } + if (rowValues != null) { + rowValues.getKey(previousRow); + } + offset++; } - public static void closeCursor(CloseStatement stmt) throws SQLException { - if(mapCursorIDQuery.containsKey(stmt.getCursorName())){ - mapCursorIDQuery.get(stmt.getCursorName()).closeCursor(); - } + public boolean moreValues() { + return moreValues; } - public static QueryPlan getFetchPlan(String cursorName, boolean isNext, int fetchSize) throws SQLException { - if(mapCursorIDQuery.containsKey(cursorName)){ - return mapCursorIDQuery.get(cursorName).getFetchPlan(isNext, fetchSize); - } else { - throw new SQLException("Cursor " + cursorName + " not declared."); - } + public String getFetchSQL() throws SQLException { + if (!isOpen) throw new SQLException("Fetch call on closed cursor '" + this.cursorName + "'!"); + return selectSQL; } - - public static String getFetchSQL(String cursorName) throws SQLException { - if (mapCursorIDQuery.containsKey(cursorName)) { - return mapCursorIDQuery.get(cursorName).getFetchSQL(); - } else { - throw new SQLException("Cursor " + cursorName + " not declared."); - } + } + + private static Map mapCursorIDQuery = new HashMap(); + + /** + * Private constructor + */ + private CursorUtil() { + } + + /** + * @param stmt DeclareCursorStatement instance intending to declare a new cursor. + * @return Returns true if the new cursor was successfully declared. False if a cursor with the + * same identifier already exists. + */ + public static boolean declareCursor(DeclareCursorStatement stmt, QueryPlan queryPlan) + throws SQLException { + if (mapCursorIDQuery.containsKey(stmt.getCursorName())) { + throw new SQLException( + "Can't declare cursor " + stmt.getCursorName() + ", cursor identifier already in use."); + } else { + mapCursorIDQuery.put(stmt.getCursorName(), + new CursorWrapper(stmt.getCursorName(), stmt.getQuerySQL(), queryPlan)); + return true; } - - public static void updateCursor(String cursorName, Tuple rowValues, Tuple nextRowValues) throws SQLException { - mapCursorIDQuery.get(cursorName).updateLastScanRow(rowValues,nextRowValues); + } + + public static boolean openCursor(OpenStatement stmt, Connection conn) throws SQLException { + if (mapCursorIDQuery.containsKey(stmt.getCursorName())) { + mapCursorIDQuery.get(stmt.getCursorName()).openCursor(conn); + return true; + } else { + throw new SQLException("Cursor " + stmt.getCursorName() + " not declared."); } + } - public static boolean cursorDeclared(String cursorName){ - return mapCursorIDQuery.containsKey(cursorName); + public static void closeCursor(CloseStatement stmt) throws SQLException { + if (mapCursorIDQuery.containsKey(stmt.getCursorName())) { + mapCursorIDQuery.get(stmt.getCursorName()).closeCursor(); + } + } + + public static QueryPlan getFetchPlan(String cursorName, boolean isNext, int fetchSize) + throws SQLException { + if (mapCursorIDQuery.containsKey(cursorName)) { + return mapCursorIDQuery.get(cursorName).getFetchPlan(isNext, fetchSize); + } else { + throw new SQLException("Cursor " + cursorName + " not declared."); } + } - public static boolean moreValues(String cursorName) { - return mapCursorIDQuery.get(cursorName).moreValues(); + public static String getFetchSQL(String cursorName) throws SQLException { + if (mapCursorIDQuery.containsKey(cursorName)) { + return mapCursorIDQuery.get(cursorName).getFetchSQL(); + } else { + throw new SQLException("Cursor " + cursorName + " not declared."); } + } + + public static void updateCursor(String cursorName, Tuple rowValues, Tuple nextRowValues) + throws SQLException { + mapCursorIDQuery.get(cursorName).updateLastScanRow(rowValues, nextRowValues); + } + + public static boolean cursorDeclared(String cursorName) { + return mapCursorIDQuery.containsKey(cursorName); + } + + public static boolean moreValues(String cursorName) { + return mapCursorIDQuery.get(cursorName).moreValues(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/DateUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/DateUtil.java index 5bd7027202d..4b1b83ccabd 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/DateUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/DateUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,462 +51,466 @@ import edu.umd.cs.findbugs.annotations.NonNull; - @SuppressWarnings({ "serial", "deprecation" }) public class DateUtil { - public static final String DEFAULT_TIME_ZONE_ID = "GMT"; - public static final String LOCAL_TIME_ZONE_ID = "LOCAL"; - private static final TimeZone DEFAULT_TIME_ZONE = TimeZone.getTimeZone(DEFAULT_TIME_ZONE_ID); - - public static final String DEFAULT_MS_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss.SSS"; - public static final Format DEFAULT_MS_DATE_FORMATTER = FastDateFormat.getInstance( - DEFAULT_MS_DATE_FORMAT, TimeZone.getTimeZone(DEFAULT_TIME_ZONE_ID)); - - public static final String DEFAULT_DATE_FORMAT = DEFAULT_MS_DATE_FORMAT; - public static final Format DEFAULT_DATE_FORMATTER = DEFAULT_MS_DATE_FORMATTER; - - public static final String DEFAULT_TIME_FORMAT = DEFAULT_MS_DATE_FORMAT; - public static final Format DEFAULT_TIME_FORMATTER = DEFAULT_MS_DATE_FORMATTER; - - public static final String DEFAULT_TIMESTAMP_FORMAT = DEFAULT_MS_DATE_FORMAT; - public static final Format DEFAULT_TIMESTAMP_FORMATTER = DEFAULT_MS_DATE_FORMATTER; - - public static final java.time.LocalDate LD_EPOCH = java.time.LocalDate.of(1970, 1, 1); - - private static final DateTimeFormatter JULIAN_DATE_TIME_FORMATTER = new DateTimeFormatterBuilder() - .append(ISODateTimeFormat.dateParser()) - .appendOptional(new DateTimeFormatterBuilder() - .appendLiteral(' ').toParser()) - .appendOptional(new DateTimeFormatterBuilder() - .append(ISODateTimeFormat.timeParser()).toParser()) - .toFormatter().withChronology(GJChronology.getInstanceUTC()); - - private DateUtil() { - } + public static final String DEFAULT_TIME_ZONE_ID = "GMT"; + public static final String LOCAL_TIME_ZONE_ID = "LOCAL"; + private static final TimeZone DEFAULT_TIME_ZONE = TimeZone.getTimeZone(DEFAULT_TIME_ZONE_ID); - @NonNull - // FIXME why don't we just set these codecs in the Types ? - public static PDataCodec getCodecFor(PDataType type) { - PDataCodec codec = type.getCodec(); - if (codec != null) { - return codec; - } - if (type == PTimestamp.INSTANCE) { - return PDate.INSTANCE.getCodec(); - } else if (type == PUnsignedTimestamp.INSTANCE) { - return PUnsignedDate.INSTANCE.getCodec(); - } else { - throw new RuntimeException(TypeMismatchException.newException(PTimestamp.INSTANCE, type)); - } - } - - public static TimeZone getTimeZone(String timeZoneId) { - TimeZone parserTimeZone; - if (timeZoneId == null || timeZoneId.equals(DateUtil.DEFAULT_TIME_ZONE_ID)) { - parserTimeZone = DateUtil.DEFAULT_TIME_ZONE; - } else if (LOCAL_TIME_ZONE_ID.equalsIgnoreCase(timeZoneId)) { - parserTimeZone = TimeZone.getDefault(); - } else { - parserTimeZone = TimeZone.getTimeZone(timeZoneId); - } - return parserTimeZone; - } - - private static String[] defaultPattern; - static { - int maxOrdinal = Integer.MIN_VALUE; - List timeDataTypes = Lists.newArrayListWithExpectedSize(6); - for (PDataType type : PDataType.values()) { - if (java.util.Date.class.isAssignableFrom(type.getJavaClass())) { - timeDataTypes.add(type); - if (type.ordinal() > maxOrdinal) { - maxOrdinal = type.ordinal(); - } - } - } - defaultPattern = new String[maxOrdinal+1]; - for (PDataType type : timeDataTypes) { - switch (type.getResultSetSqlType()) { - case Types.TIMESTAMP: - defaultPattern[type.ordinal()] = DateUtil.DEFAULT_TIMESTAMP_FORMAT; - break; - case Types.TIME: - defaultPattern[type.ordinal()] = DateUtil.DEFAULT_TIME_FORMAT; - break; - case Types.DATE: - defaultPattern[type.ordinal()] = DateUtil.DEFAULT_DATE_FORMAT; - break; - } - } - } - - private static String getDefaultFormat(PDataType type) { - int ordinal = type.ordinal(); - if (ordinal >= 0 || ordinal < defaultPattern.length) { - String format = defaultPattern[ordinal]; - if (format != null) { - return format; - } - } - throw new IllegalArgumentException("Expected a date/time type, but got " + type); - } + public static final String DEFAULT_MS_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss.SSS"; + public static final Format DEFAULT_MS_DATE_FORMATTER = + FastDateFormat.getInstance(DEFAULT_MS_DATE_FORMAT, TimeZone.getTimeZone(DEFAULT_TIME_ZONE_ID)); - public static DateTimeParser getDateTimeParser(String pattern, PDataType pDataType, String timeZoneId) { - TimeZone timeZone = getTimeZone(timeZoneId); - String defaultPattern = getDefaultFormat(pDataType); - if (pattern == null || pattern.length() == 0) { - pattern = defaultPattern; - } - if (defaultPattern.equals(pattern)) { - return JulianDateFormatParserFactory.getParser(timeZone); - } else { - return new SimpleDateFormatParser(pattern, timeZone); - } - } + public static final String DEFAULT_DATE_FORMAT = DEFAULT_MS_DATE_FORMAT; + public static final Format DEFAULT_DATE_FORMATTER = DEFAULT_MS_DATE_FORMATTER; - public static DateTimeParser getDateTimeParser(String pattern, PDataType pDataType) { - return getDateTimeParser(pattern, pDataType, null); - } + public static final String DEFAULT_TIME_FORMAT = DEFAULT_MS_DATE_FORMAT; + public static final Format DEFAULT_TIME_FORMATTER = DEFAULT_MS_DATE_FORMATTER; - public static Format getDateFormatter(String pattern) { - return getDateFormatter(pattern, DateUtil.DEFAULT_TIME_ZONE_ID); - } + public static final String DEFAULT_TIMESTAMP_FORMAT = DEFAULT_MS_DATE_FORMAT; + public static final Format DEFAULT_TIMESTAMP_FORMATTER = DEFAULT_MS_DATE_FORMATTER; - public static Format getDateFormatter(String pattern, String timeZoneID) { - return DateUtil.DEFAULT_DATE_FORMAT.equals(pattern) && DateUtil.DEFAULT_TIME_ZONE_ID.equals(timeZoneID) - ? DateUtil.DEFAULT_DATE_FORMATTER - : FastDateFormat.getInstance(pattern, getTimeZone(timeZoneID)); - } + public static final java.time.LocalDate LD_EPOCH = java.time.LocalDate.of(1970, 1, 1); - public static Format getTimeFormatter(String pattern, String timeZoneID) { - return DateUtil.DEFAULT_TIME_FORMAT.equals(pattern) && DateUtil.DEFAULT_TIME_ZONE_ID.equals(timeZoneID) - ? DateUtil.DEFAULT_TIME_FORMATTER - : FastDateFormat.getInstance(pattern, getTimeZone(timeZoneID)); - } + private static final DateTimeFormatter JULIAN_DATE_TIME_FORMATTER = + new DateTimeFormatterBuilder().append(ISODateTimeFormat.dateParser()) + .appendOptional(new DateTimeFormatterBuilder().appendLiteral(' ').toParser()) + .appendOptional( + new DateTimeFormatterBuilder().append(ISODateTimeFormat.timeParser()).toParser()) + .toFormatter().withChronology(GJChronology.getInstanceUTC()); - public static Format getTimestampFormatter(String pattern, String timeZoneID) { - return DateUtil.DEFAULT_TIMESTAMP_FORMAT.equals(pattern) && DateUtil.DEFAULT_TIME_ZONE_ID.equals(timeZoneID) - ? DateUtil.DEFAULT_TIMESTAMP_FORMATTER - : FastDateFormat.getInstance(pattern, getTimeZone(timeZoneID)); - } + private DateUtil() { + } - /** - * Parses a datetime string in the UTC time zone. - * - * @param dateValue datetime string in UTC - * @return epoch ms - */ - private static long parseDateTime(String dateTimeValue) { - return JulianDateFormatParser.getInstance().parseDateTime(dateTimeValue); + @NonNull + // FIXME why don't we just set these codecs in the Types ? + public static PDataCodec getCodecFor(PDataType type) { + PDataCodec codec = type.getCodec(); + if (codec != null) { + return codec; } - - /** - * Parses a date string in the UTC time zone. - * - * @param dateValue date string in UTC - * @return epoch ms - */ - public static Date parseDate(String dateValue) { - return new Date(parseDateTime(dateValue)); + if (type == PTimestamp.INSTANCE) { + return PDate.INSTANCE.getCodec(); + } else if (type == PUnsignedTimestamp.INSTANCE) { + return PUnsignedDate.INSTANCE.getCodec(); + } else { + throw new RuntimeException(TypeMismatchException.newException(PTimestamp.INSTANCE, type)); } - - /** - * Parses a time string in the UTC time zone. - * - * @param dateValue time string in UTC - * @return epoch ms - */ - public static Time parseTime(String timeValue) { - return new Time(parseDateTime(timeValue)); + } + + public static TimeZone getTimeZone(String timeZoneId) { + TimeZone parserTimeZone; + if (timeZoneId == null || timeZoneId.equals(DateUtil.DEFAULT_TIME_ZONE_ID)) { + parserTimeZone = DateUtil.DEFAULT_TIME_ZONE; + } else if (LOCAL_TIME_ZONE_ID.equalsIgnoreCase(timeZoneId)) { + parserTimeZone = TimeZone.getDefault(); + } else { + parserTimeZone = TimeZone.getTimeZone(timeZoneId); } - - /** - * Parses the timestsamp string in the UTC time zone. - * - * @param timestampValue timestamp string in UTC - * @return Timestamp parsed in UTC - */ - public static Timestamp parseTimestamp(String timestampValue) { - Timestamp timestamp = new Timestamp(parseDateTime(timestampValue)); - int period = timestampValue.indexOf('.'); - if (period > 0) { - String nanosStr = timestampValue.substring(period + 1); - if (nanosStr.length() > 9) - throw new IllegalDataException("nanos > 999999999 or < 0"); - if (nanosStr.length() > 3) { - int nanos = Integer.parseInt(nanosStr); - for (int i = 0; i < 9 - nanosStr.length(); i++) { - nanos *= 10; - } - timestamp.setNanos(nanos); - } + return parserTimeZone; + } + + private static String[] defaultPattern; + static { + int maxOrdinal = Integer.MIN_VALUE; + List timeDataTypes = Lists.newArrayListWithExpectedSize(6); + for (PDataType type : PDataType.values()) { + if (java.util.Date.class.isAssignableFrom(type.getJavaClass())) { + timeDataTypes.add(type); + if (type.ordinal() > maxOrdinal) { + maxOrdinal = type.ordinal(); } - return timestamp; + } } - - /** - * Utility function to work around the weirdness of the {@link Timestamp} constructor. - * This method takes the milli-seconds that spills over to the nanos part as part of - * constructing the {@link Timestamp} object. - * If we just set the nanos part of timestamp to the nanos passed in param, we - * end up losing the sub-second part of timestamp. - */ - public static Timestamp getTimestamp(long millis, int nanos) { - if (nanos > MAX_ALLOWED_NANOS || nanos < 0) { - throw new IllegalArgumentException("nanos > " + MAX_ALLOWED_NANOS + " or < 0"); - } - Timestamp ts = new Timestamp(millis); - if (ts.getNanos() + nanos > MAX_ALLOWED_NANOS) { - int millisToNanosConvertor = BigDecimal.valueOf(MILLIS_TO_NANOS_CONVERTOR).intValue(); - int overFlowMs = (ts.getNanos() + nanos) / millisToNanosConvertor; - int overFlowNanos = (ts.getNanos() + nanos) - (overFlowMs * millisToNanosConvertor); - ts = new Timestamp(millis + overFlowMs); - ts.setNanos(ts.getNanos() + overFlowNanos); - } else { - ts.setNanos(ts.getNanos() + nanos); - } - return ts; + defaultPattern = new String[maxOrdinal + 1]; + for (PDataType type : timeDataTypes) { + switch (type.getResultSetSqlType()) { + case Types.TIMESTAMP: + defaultPattern[type.ordinal()] = DateUtil.DEFAULT_TIMESTAMP_FORMAT; + break; + case Types.TIME: + defaultPattern[type.ordinal()] = DateUtil.DEFAULT_TIME_FORMAT; + break; + case Types.DATE: + defaultPattern[type.ordinal()] = DateUtil.DEFAULT_DATE_FORMAT; + break; + } } - - /** - * Utility function to convert a {@link BigDecimal} value to {@link Timestamp}. - */ - public static Timestamp getTimestamp(BigDecimal bd) { - return DateUtil.getTimestamp(bd.longValue(), ((bd.remainder(BigDecimal.ONE).multiply(BigDecimal.valueOf(MILLIS_TO_NANOS_CONVERTOR))).intValue())); + } + + private static String getDefaultFormat(PDataType type) { + int ordinal = type.ordinal(); + if (ordinal >= 0 || ordinal < defaultPattern.length) { + String format = defaultPattern[ordinal]; + if (format != null) { + return format; + } } - - public static interface DateTimeParser { - public long parseDateTime(String dateTimeString) throws IllegalDataException; - public TimeZone getTimeZone(); + throw new IllegalArgumentException("Expected a date/time type, but got " + type); + } + + public static DateTimeParser getDateTimeParser(String pattern, PDataType pDataType, + String timeZoneId) { + TimeZone timeZone = getTimeZone(timeZoneId); + String defaultPattern = getDefaultFormat(pDataType); + if (pattern == null || pattern.length() == 0) { + pattern = defaultPattern; } - - /** - * This class is used when a user explicitly provides phoenix.query.dateFormat in configuration - */ - private static class SimpleDateFormatParser implements DateTimeParser { - private String datePattern; - private SimpleDateFormat parser; - - public SimpleDateFormatParser(String pattern, TimeZone timeZone) { - datePattern = pattern; - parser = new SimpleDateFormat(pattern) { - @Override - public java.util.Date parseObject(String source) throws ParseException { - java.util.Date date = super.parse(source); - return new java.sql.Date(date.getTime()); - } - }; - parser.setTimeZone(timeZone); - } - - @Override - public long parseDateTime(String dateTimeString) throws IllegalDataException { - try { - java.util.Date date =parser.parse(dateTimeString); - return date.getTime(); - } catch (ParseException e) { - throw new IllegalDataException("Unable to parse date/time '" + dateTimeString + "' using format string of '" + datePattern + "'."); - } + if (defaultPattern.equals(pattern)) { + return JulianDateFormatParserFactory.getParser(timeZone); + } else { + return new SimpleDateFormatParser(pattern, timeZone); + } + } + + public static DateTimeParser getDateTimeParser(String pattern, PDataType pDataType) { + return getDateTimeParser(pattern, pDataType, null); + } + + public static Format getDateFormatter(String pattern) { + return getDateFormatter(pattern, DateUtil.DEFAULT_TIME_ZONE_ID); + } + + public static Format getDateFormatter(String pattern, String timeZoneID) { + return DateUtil.DEFAULT_DATE_FORMAT.equals(pattern) + && DateUtil.DEFAULT_TIME_ZONE_ID.equals(timeZoneID) + ? DateUtil.DEFAULT_DATE_FORMATTER + : FastDateFormat.getInstance(pattern, getTimeZone(timeZoneID)); + } + + public static Format getTimeFormatter(String pattern, String timeZoneID) { + return DateUtil.DEFAULT_TIME_FORMAT.equals(pattern) + && DateUtil.DEFAULT_TIME_ZONE_ID.equals(timeZoneID) + ? DateUtil.DEFAULT_TIME_FORMATTER + : FastDateFormat.getInstance(pattern, getTimeZone(timeZoneID)); + } + + public static Format getTimestampFormatter(String pattern, String timeZoneID) { + return DateUtil.DEFAULT_TIMESTAMP_FORMAT.equals(pattern) + && DateUtil.DEFAULT_TIME_ZONE_ID.equals(timeZoneID) + ? DateUtil.DEFAULT_TIMESTAMP_FORMATTER + : FastDateFormat.getInstance(pattern, getTimeZone(timeZoneID)); + } + + /** + * Parses a datetime string in the UTC time zone. + * @param dateValue datetime string in UTC + * @return epoch ms + */ + private static long parseDateTime(String dateTimeValue) { + return JulianDateFormatParser.getInstance().parseDateTime(dateTimeValue); + } + + /** + * Parses a date string in the UTC time zone. + * @param dateValue date string in UTC + * @return epoch ms + */ + public static Date parseDate(String dateValue) { + return new Date(parseDateTime(dateValue)); + } + + /** + * Parses a time string in the UTC time zone. + * @param dateValue time string in UTC + * @return epoch ms + */ + public static Time parseTime(String timeValue) { + return new Time(parseDateTime(timeValue)); + } + + /** + * Parses the timestsamp string in the UTC time zone. + * @param timestampValue timestamp string in UTC + * @return Timestamp parsed in UTC + */ + public static Timestamp parseTimestamp(String timestampValue) { + Timestamp timestamp = new Timestamp(parseDateTime(timestampValue)); + int period = timestampValue.indexOf('.'); + if (period > 0) { + String nanosStr = timestampValue.substring(period + 1); + if (nanosStr.length() > 9) throw new IllegalDataException("nanos > 999999999 or < 0"); + if (nanosStr.length() > 3) { + int nanos = Integer.parseInt(nanosStr); + for (int i = 0; i < 9 - nanosStr.length(); i++) { + nanos *= 10; } - + timestamp.setNanos(nanos); + } + } + return timestamp; + } + + /** + * Utility function to work around the weirdness of the {@link Timestamp} constructor. This method + * takes the milli-seconds that spills over to the nanos part as part of constructing the + * {@link Timestamp} object. If we just set the nanos part of timestamp to the nanos passed in + * param, we end up losing the sub-second part of timestamp. + */ + public static Timestamp getTimestamp(long millis, int nanos) { + if (nanos > MAX_ALLOWED_NANOS || nanos < 0) { + throw new IllegalArgumentException("nanos > " + MAX_ALLOWED_NANOS + " or < 0"); + } + Timestamp ts = new Timestamp(millis); + if (ts.getNanos() + nanos > MAX_ALLOWED_NANOS) { + int millisToNanosConvertor = BigDecimal.valueOf(MILLIS_TO_NANOS_CONVERTOR).intValue(); + int overFlowMs = (ts.getNanos() + nanos) / millisToNanosConvertor; + int overFlowNanos = (ts.getNanos() + nanos) - (overFlowMs * millisToNanosConvertor); + ts = new Timestamp(millis + overFlowMs); + ts.setNanos(ts.getNanos() + overFlowNanos); + } else { + ts.setNanos(ts.getNanos() + nanos); + } + return ts; + } + + /** + * Utility function to convert a {@link BigDecimal} value to {@link Timestamp}. + */ + public static Timestamp getTimestamp(BigDecimal bd) { + return DateUtil.getTimestamp(bd.longValue(), + ((bd.remainder(BigDecimal.ONE).multiply(BigDecimal.valueOf(MILLIS_TO_NANOS_CONVERTOR))) + .intValue())); + } + + public static interface DateTimeParser { + public long parseDateTime(String dateTimeString) throws IllegalDataException; + + public TimeZone getTimeZone(); + } + + /** + * This class is used when a user explicitly provides phoenix.query.dateFormat in configuration + */ + private static class SimpleDateFormatParser implements DateTimeParser { + private String datePattern; + private SimpleDateFormat parser; + + public SimpleDateFormatParser(String pattern, TimeZone timeZone) { + datePattern = pattern; + parser = new SimpleDateFormat(pattern) { @Override - public TimeZone getTimeZone() { - return parser.getTimeZone(); + public java.util.Date parseObject(String source) throws ParseException { + java.util.Date date = super.parse(source); + return new java.sql.Date(date.getTime()); } + }; + parser.setTimeZone(timeZone); } - private static class JulianDateFormatParserFactory { - private JulianDateFormatParserFactory() {} - - public static DateTimeParser getParser(final TimeZone timeZone) { - // If timeZone matches default, get singleton DateTimeParser - if (timeZone.equals(DEFAULT_TIME_ZONE)) { - return JulianDateFormatParser.getInstance(); - } - // Otherwise, create new DateTimeParser - return new DateTimeParser() { - private final DateTimeFormatter formatter = JULIAN_DATE_TIME_FORMATTER - .withZone(DateTimeZone.forTimeZone(timeZone)); - - @Override - public long parseDateTime(String dateTimeString) throws IllegalDataException { - try { - return formatter.parseDateTime(dateTimeString).getMillis(); - } catch(IllegalArgumentException ex) { - throw new IllegalDataException(ex); - } - } - - @Override - public TimeZone getTimeZone() { - return timeZone; - } - }; - } + @Override + public long parseDateTime(String dateTimeString) throws IllegalDataException { + try { + java.util.Date date = parser.parse(dateTimeString); + return date.getTime(); + } catch (ParseException e) { + throw new IllegalDataException("Unable to parse date/time '" + dateTimeString + + "' using format string of '" + datePattern + "'."); + } } - /** - * This class is our default DateTime string parser - */ - private static class JulianDateFormatParser implements DateTimeParser { - private static final JulianDateFormatParser INSTANCE = new JulianDateFormatParser(); - - public static JulianDateFormatParser getInstance() { - return INSTANCE; - } + @Override + public TimeZone getTimeZone() { + return parser.getTimeZone(); + } + } - private final DateTimeFormatter formatter = JULIAN_DATE_TIME_FORMATTER.withZone(DateTimeZone.UTC); + private static class JulianDateFormatParserFactory { + private JulianDateFormatParserFactory() { + } - private JulianDateFormatParser() {} + public static DateTimeParser getParser(final TimeZone timeZone) { + // If timeZone matches default, get singleton DateTimeParser + if (timeZone.equals(DEFAULT_TIME_ZONE)) { + return JulianDateFormatParser.getInstance(); + } + // Otherwise, create new DateTimeParser + return new DateTimeParser() { + private final DateTimeFormatter formatter = + JULIAN_DATE_TIME_FORMATTER.withZone(DateTimeZone.forTimeZone(timeZone)); @Override public long parseDateTime(String dateTimeString) throws IllegalDataException { - try { - return formatter.parseDateTime(dateTimeString).getMillis(); - } catch(IllegalArgumentException ex) { - throw new IllegalDataException(ex); - } + try { + return formatter.parseDateTime(dateTimeString).getMillis(); + } catch (IllegalArgumentException ex) { + throw new IllegalDataException(ex); + } } @Override public TimeZone getTimeZone() { - return formatter.getZone().toTimeZone(); - } - } - - public static long rangeJodaHalfEven(DateTime roundedDT, DateTime otherDT, - DateTimeFieldType type) { - // It's OK if this is slow, as it's only called O(1) times per query - // - // We need to reverse engineer what roundHalfEvenCopy() does - // and return the lower/upper (inclusive) range here - // Joda simply works on milliseconds between the floor and ceil values. - // We could avoid the period call for units less than a day, but this is not a perf - // critical function. - long roundedMs = roundedDT.getMillis(); - long otherMs = otherDT.getMillis(); - long midMs = (roundedMs + otherMs) / 2; - long remainder = (roundedMs + otherMs) % 2; - if (remainder == 0) { - int roundedUnits = roundedDT.get(type); - if (otherMs > roundedMs) { - // Upper range, other is bigger. - if ((roundedUnits & 1) == 0) { - // This unit is even, the next second is odd, so we get the mid point - return midMs; - } else { - // This unit is odd, the next second is even and takes the midpoint. - return midMs - 1; - } - } else { - // Lower range, other is smaller. - if ((roundedUnits & 1) == 0) { - // This unit is even, the next second is odd, so we get the mid point - return midMs; - } else { - // This unit is odd, the next second is even and takes the midpoint. - return midMs + 1; - } - } - } else { - // probably never happens - if (otherMs > roundedMs) { - // Upper range, return the rounded down value - return midMs; - } else { - // Lower range, the mid value belongs to the previous unit. - return midMs + 1; - } + return timeZone; } + }; } + } - // These implementations favour speed over historical correctness, and use - // java.util.TimeZone#getOffset(epoch millis) and inherit its limitations. - - // When we switch to java.time, we might want to revisit this, and add an option for - // slower but more correct conversions. - // However, any conversion for TZs with DST is best effort anyway. - - /** - * Apply the time zone displacement to the input, so that the output represents the same - * LocalDateTime in the UTC time zone as the Input in the specified time zone. - * @param jdbc Date interpreted in timeZone - * @param timeZone for displacement calculation - * @return input with the TZ displacement applied - */ - public static java.sql.Date applyInputDisplacement(java.sql.Date jdbc, TimeZone timeZone) { - long epoch = jdbc.getTime(); - return new java.sql.Date(epoch + timeZone.getOffset(epoch)); - } + /** + * This class is our default DateTime string parser + */ + private static class JulianDateFormatParser implements DateTimeParser { + private static final JulianDateFormatParser INSTANCE = new JulianDateFormatParser(); - /** - * Apply the time zone displacement to the input, so that the output represents the same - * LocalDateTime in the UTC time zone as the Input in the specified time zone. - * @param jdbc Time interpreted in timeZone - * @param timeZone for displacement calculation - * @return input with the TZ displacement applied - */ - public static java.sql.Time applyInputDisplacement(java.sql.Time jdbc, TimeZone timeZone) { - long epoch = jdbc.getTime(); - return new java.sql.Time(epoch + timeZone.getOffset(epoch)); + public static JulianDateFormatParser getInstance() { + return INSTANCE; } - /** - * Apply the time zone displacement to the input, so that the output represents the same - * LocalDateTime in the UTC time zone as the Input in the specified time zone. - * @param jdbc Timestamp interpreted in timeZone - * @param timeZone for displacement calculation - * @return input with the TZ displacement applied - */ - public static java.sql.Timestamp applyInputDisplacement(java.sql.Timestamp jdbc, TimeZone timeZone) { - long epoch = jdbc.getTime(); - java.sql.Timestamp ts = new java.sql.Timestamp(epoch + timeZone.getOffset(epoch)); - ts.setNanos(jdbc.getNanos()); - return ts; - } + private final DateTimeFormatter formatter = + JULIAN_DATE_TIME_FORMATTER.withZone(DateTimeZone.UTC); - /** - * Apply the time zone displacement to the input, so that the output represents the same - * LocalDateTime in the specified time zone as the Input in the UTC time zone. - * @param internal Date as UTC epoch - * @param timeZone for displacement calculation - * @return input with the TZ displacement applied - */ - public static java.sql.Date applyOutputDisplacement(java.sql.Date internal, TimeZone timeZone) { - long epoch = internal.getTime(); - return new java.sql.Date(epoch - getReverseOffset(epoch, timeZone)); + private JulianDateFormatParser() { } - /** - * Apply the time zone displacement to the input, so that the output represents the same - * LocalDateTime in the specified time zone as the Input in the UTC time zone. - * @param internal Date as UTC epoch - * @param timeZone for displacement calculation - * @return input with the TZ displacement applied - */ - public static java.sql.Time applyOutputDisplacement(java.sql.Time internal, TimeZone timeZone) { - long epoch = internal.getTime(); - return new java.sql.Time(epoch - getReverseOffset(epoch, timeZone)); + @Override + public long parseDateTime(String dateTimeString) throws IllegalDataException { + try { + return formatter.parseDateTime(dateTimeString).getMillis(); + } catch (IllegalArgumentException ex) { + throw new IllegalDataException(ex); + } } - /** - * Apply the time zone displacement to the input, so that the output represents the same - * LocalDateTime in the specified time zone as the Input in the UTC time zone. - * @param internal Timestamp as UTC epoch - * @param timeZone for displacement calculation - * @return input with the TZ displacement applied - */ - public static java.sql.Timestamp applyOutputDisplacement(java.sql.Timestamp internal, TimeZone timeZone) { - long epoch = internal.getTime(); - java.sql.Timestamp ts = new java.sql.Timestamp(epoch - getReverseOffset(epoch, timeZone)); - ts.setNanos(internal.getNanos()); - return ts; + @Override + public TimeZone getTimeZone() { + return formatter.getZone().toTimeZone(); } - - private static int getReverseOffset(long epoch, TimeZone tz) { - return tz.getOffset( - epoch - tz.getRawOffset() - tz.getDSTSavings()); + } + + public static long rangeJodaHalfEven(DateTime roundedDT, DateTime otherDT, + DateTimeFieldType type) { + // It's OK if this is slow, as it's only called O(1) times per query + // + // We need to reverse engineer what roundHalfEvenCopy() does + // and return the lower/upper (inclusive) range here + // Joda simply works on milliseconds between the floor and ceil values. + // We could avoid the period call for units less than a day, but this is not a perf + // critical function. + long roundedMs = roundedDT.getMillis(); + long otherMs = otherDT.getMillis(); + long midMs = (roundedMs + otherMs) / 2; + long remainder = (roundedMs + otherMs) % 2; + if (remainder == 0) { + int roundedUnits = roundedDT.get(type); + if (otherMs > roundedMs) { + // Upper range, other is bigger. + if ((roundedUnits & 1) == 0) { + // This unit is even, the next second is odd, so we get the mid point + return midMs; + } else { + // This unit is odd, the next second is even and takes the midpoint. + return midMs - 1; + } + } else { + // Lower range, other is smaller. + if ((roundedUnits & 1) == 0) { + // This unit is even, the next second is odd, so we get the mid point + return midMs; + } else { + // This unit is odd, the next second is even and takes the midpoint. + return midMs + 1; + } + } + } else { + // probably never happens + if (otherMs > roundedMs) { + // Upper range, return the rounded down value + return midMs; + } else { + // Lower range, the mid value belongs to the previous unit. + return midMs + 1; + } } + } + + // These implementations favour speed over historical correctness, and use + // java.util.TimeZone#getOffset(epoch millis) and inherit its limitations. + + // When we switch to java.time, we might want to revisit this, and add an option for + // slower but more correct conversions. + // However, any conversion for TZs with DST is best effort anyway. + + /** + * Apply the time zone displacement to the input, so that the output represents the same + * LocalDateTime in the UTC time zone as the Input in the specified time zone. + * @param jdbc Date interpreted in timeZone + * @param timeZone for displacement calculation + * @return input with the TZ displacement applied + */ + public static java.sql.Date applyInputDisplacement(java.sql.Date jdbc, TimeZone timeZone) { + long epoch = jdbc.getTime(); + return new java.sql.Date(epoch + timeZone.getOffset(epoch)); + } + + /** + * Apply the time zone displacement to the input, so that the output represents the same + * LocalDateTime in the UTC time zone as the Input in the specified time zone. + * @param jdbc Time interpreted in timeZone + * @param timeZone for displacement calculation + * @return input with the TZ displacement applied + */ + public static java.sql.Time applyInputDisplacement(java.sql.Time jdbc, TimeZone timeZone) { + long epoch = jdbc.getTime(); + return new java.sql.Time(epoch + timeZone.getOffset(epoch)); + } + + /** + * Apply the time zone displacement to the input, so that the output represents the same + * LocalDateTime in the UTC time zone as the Input in the specified time zone. + * @param jdbc Timestamp interpreted in timeZone + * @param timeZone for displacement calculation + * @return input with the TZ displacement applied + */ + public static java.sql.Timestamp applyInputDisplacement(java.sql.Timestamp jdbc, + TimeZone timeZone) { + long epoch = jdbc.getTime(); + java.sql.Timestamp ts = new java.sql.Timestamp(epoch + timeZone.getOffset(epoch)); + ts.setNanos(jdbc.getNanos()); + return ts; + } + + /** + * Apply the time zone displacement to the input, so that the output represents the same + * LocalDateTime in the specified time zone as the Input in the UTC time zone. + * @param internal Date as UTC epoch + * @param timeZone for displacement calculation + * @return input with the TZ displacement applied + */ + public static java.sql.Date applyOutputDisplacement(java.sql.Date internal, TimeZone timeZone) { + long epoch = internal.getTime(); + return new java.sql.Date(epoch - getReverseOffset(epoch, timeZone)); + } + + /** + * Apply the time zone displacement to the input, so that the output represents the same + * LocalDateTime in the specified time zone as the Input in the UTC time zone. + * @param internal Date as UTC epoch + * @param timeZone for displacement calculation + * @return input with the TZ displacement applied + */ + public static java.sql.Time applyOutputDisplacement(java.sql.Time internal, TimeZone timeZone) { + long epoch = internal.getTime(); + return new java.sql.Time(epoch - getReverseOffset(epoch, timeZone)); + } + + /** + * Apply the time zone displacement to the input, so that the output represents the same + * LocalDateTime in the specified time zone as the Input in the UTC time zone. + * @param internal Timestamp as UTC epoch + * @param timeZone for displacement calculation + * @return input with the TZ displacement applied + */ + public static java.sql.Timestamp applyOutputDisplacement(java.sql.Timestamp internal, + TimeZone timeZone) { + long epoch = internal.getTime(); + java.sql.Timestamp ts = new java.sql.Timestamp(epoch - getReverseOffset(epoch, timeZone)); + ts.setNanos(internal.getNanos()); + return ts; + } + + private static int getReverseOffset(long epoch, TimeZone tz) { + return tz.getOffset(epoch - tz.getRawOffset() - tz.getDSTSavings()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java index 6a1e0755846..a75503bdc6b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/DefaultEnvironmentEdge.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +22,7 @@ */ public class DefaultEnvironmentEdge extends EnvironmentEdge { /** - * {@inheritDoc} - * This implementation returns {@link System#currentTimeMillis()} + * {@inheritDoc} This implementation returns {@link System#currentTimeMillis()} */ @Override public long currentTime() { diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/DeferredStringBuilder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/DeferredStringBuilder.java index 45dec5c22ed..c18f1ce2e34 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/DeferredStringBuilder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/DeferredStringBuilder.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,118 +19,116 @@ /** * This utility class was partially copied from Salesforce's internationalization utility library - * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. - * The i18n-util library is not maintained anymore, and it was using vulnerable dependencies. - * For more info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 - * - * This class implements a StringBuilder that is incrementally copied from a source String. - * Actual creation the new buffer is deferred until a character differs from a character at - * the same position in the source String. This class is useful for reducing garbage creation - * when doing operations like escaping a String, when most Strings are not expected to contain - * any escapable characters. In that case, no additional memory is used (as the original - * String is not actually copied). + * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. The + * i18n-util library is not maintained anymore, and it was using vulnerable dependencies. For more + * info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 This class implements a + * StringBuilder that is incrementally copied from a source String. Actual creation the new buffer + * is deferred until a character differs from a character at the same position in the source String. + * This class is useful for reducing garbage creation when doing operations like escaping a String, + * when most Strings are not expected to contain any escapable characters. In that case, no + * additional memory is used (as the original String is not actually copied). */ public final class DeferredStringBuilder implements Appendable, CharSequence { - private StringBuilder buf; - private int pos; - private final CharSequence source; + private StringBuilder buf; + private int pos; + private final CharSequence source; - public DeferredStringBuilder(CharSequence source) { - if (source == null) { - this.buf = new StringBuilder(16); - } - this.source = source; + public DeferredStringBuilder(CharSequence source) { + if (source == null) { + this.buf = new StringBuilder(16); } + this.source = source; + } - public DeferredStringBuilder append(char c) { - if (this.buf == null) { - if (this.pos < this.source.length() && c == this.source.charAt(this.pos)) { - // characters match - just move ahead - ++this.pos; - } else { - // doh - character mismatch - now we need to allocate a real StringBuilder - this.buf = new StringBuilder(this.source.length() + 16); - this.buf.append(this.source.subSequence(0, this.pos)); - this.buf.append(c); - } - } else { - // we've already got the buf - just add this character - this.buf.append(c); - } - return this; - } - - public DeferredStringBuilder append(CharSequence csq) { - if (csq == null) { - return this; - } - return append(csq, 0, csq.length()); + public DeferredStringBuilder append(char c) { + if (this.buf == null) { + if (this.pos < this.source.length() && c == this.source.charAt(this.pos)) { + // characters match - just move ahead + ++this.pos; + } else { + // doh - character mismatch - now we need to allocate a real StringBuilder + this.buf = new StringBuilder(this.source.length() + 16); + this.buf.append(this.source.subSequence(0, this.pos)); + this.buf.append(c); + } + } else { + // we've already got the buf - just add this character + this.buf.append(c); } + return this; + } - public DeferredStringBuilder append(CharSequence csq, int start, int end) { - if (csq != null) { - if (buf == null) { - int chars = end - start; - // For small strings or overflow, do it char by char. - if (chars < 10 || (this.pos + chars > this.source.length())) { - for (int i = start; i < end; ++i) { - append(csq.charAt(i)); - } - } else { - CharSequence subSeq = csq.subSequence(start, end); - //String.equals seems to get optimized a lot quicker than the - // chartA + length + loop method. I don't think this will matter at all, - // but between this and OptimizedURLEncoder, this made these classes - // disappear from my profiler - if (this.source.subSequence(this.pos, this.pos + chars).equals(subSeq)) { - this.pos += chars; - } else { - this.buf = new StringBuilder(this.source.length() + 16); - this.buf.append(this.source.subSequence(0, this.pos)); - this.buf.append(subSeq); - } - } - } else { - // We know it's different, so just append the whole string. - buf.append(csq, start, end); - } - } - return this; + public DeferredStringBuilder append(CharSequence csq) { + if (csq == null) { + return this; } + return append(csq, 0, csq.length()); + } - public char charAt(int index) { - if (this.buf != null) { - return this.buf.charAt(index); - } else if (index < pos) { - return this.source.charAt(index); + public DeferredStringBuilder append(CharSequence csq, int start, int end) { + if (csq != null) { + if (buf == null) { + int chars = end - start; + // For small strings or overflow, do it char by char. + if (chars < 10 || (this.pos + chars > this.source.length())) { + for (int i = start; i < end; ++i) { + append(csq.charAt(i)); + } } else { - throw new StringIndexOutOfBoundsException(index); + CharSequence subSeq = csq.subSequence(start, end); + // String.equals seems to get optimized a lot quicker than the + // chartA + length + loop method. I don't think this will matter at all, + // but between this and OptimizedURLEncoder, this made these classes + // disappear from my profiler + if (this.source.subSequence(this.pos, this.pos + chars).equals(subSeq)) { + this.pos += chars; + } else { + this.buf = new StringBuilder(this.source.length() + 16); + this.buf.append(this.source.subSequence(0, this.pos)); + this.buf.append(subSeq); + } } + } else { + // We know it's different, so just append the whole string. + buf.append(csq, start, end); + } } + return this; + } - public CharSequence subSequence(int start, int end) { - if (this.buf != null) { - return this.buf.subSequence(start, end); - } else if (end <= pos) { - return this.source.subSequence(start, end); - } else { - throw new StringIndexOutOfBoundsException(end); - } + public char charAt(int index) { + if (this.buf != null) { + return this.buf.charAt(index); + } else if (index < pos) { + return this.source.charAt(index); + } else { + throw new StringIndexOutOfBoundsException(index); } + } - @Override - public String toString() { - if (this.buf != null) { - return this.buf.toString(); - } - if (this.pos == this.source.length()) { - return this.source.toString(); - } - return this.source.subSequence(0, this.pos).toString(); + public CharSequence subSequence(int start, int end) { + if (this.buf != null) { + return this.buf.subSequence(start, end); + } else if (end <= pos) { + return this.source.subSequence(start, end); + } else { + throw new StringIndexOutOfBoundsException(end); } + } - public int length() { - return this.buf != null ? this.buf.length() : this.pos; + @Override + public String toString() { + if (this.buf != null) { + return this.buf.toString(); + } + if (this.pos == this.source.length()) { + return this.source.toString(); } + return this.source.subSequence(0, this.pos).toString(); + } + + public int length() { + return this.buf != null ? this.buf.length() : this.pos; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/EncodedColumnsUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/EncodedColumnsUtil.java index 00d6a73dbf2..75ef69f915e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/EncodedColumnsUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/EncodedColumnsUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,8 @@ */ package org.apache.phoenix.util; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; import java.util.Arrays; import java.util.Collection; @@ -42,162 +42,179 @@ public class EncodedColumnsUtil { - public static boolean usesEncodedColumnNames(PTable table) { - return usesEncodedColumnNames(table.getEncodingScheme()); - } - - public static boolean usesEncodedColumnNames(QualifierEncodingScheme encodingScheme) { - return encodingScheme != null && encodingScheme != QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; - } - - public static void setColumns(PColumn column, PTable table, Scan scan) { - if (table.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { - // if a table storage scheme is COLUMNS_STORED_IN_SINGLE_CELL set then all columns of a column family are stored in a single cell - // (with the qualifier name being same as the family name), just project the column family here - // so that we can calculate estimatedByteSize correctly in ProjectionCompiler - scan.addFamily(column.getFamilyName().getBytes()); - } - else { - if (column.getColumnQualifierBytes() != null) { - scan.addColumn(column.getFamilyName().getBytes(), column.getColumnQualifierBytes()); - } - } - } - - public static boolean useNewValueColumnQualifier(Scan s) { - // null check for backward compatibility - return s.getAttribute(BaseScannerRegionObserverConstants.USE_NEW_VALUE_COLUMN_QUALIFIER) != null; - } - - public static QualifierEncodingScheme getQualifierEncodingScheme(Scan s) { - // null check for backward compatibility - return s.getAttribute(BaseScannerRegionObserverConstants.QUALIFIER_ENCODING_SCHEME) == null ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : QualifierEncodingScheme.fromSerializedValue(s.getAttribute(BaseScannerRegionObserverConstants.QUALIFIER_ENCODING_SCHEME)[0]); + public static boolean usesEncodedColumnNames(PTable table) { + return usesEncodedColumnNames(table.getEncodingScheme()); + } + + public static boolean usesEncodedColumnNames(QualifierEncodingScheme encodingScheme) { + return encodingScheme != null + && encodingScheme != QualifierEncodingScheme.NON_ENCODED_QUALIFIERS; + } + + public static void setColumns(PColumn column, PTable table, Scan scan) { + if ( + table.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS + ) { + // if a table storage scheme is COLUMNS_STORED_IN_SINGLE_CELL set then all columns of a column + // family are stored in a single cell + // (with the qualifier name being same as the family name), just project the column family + // here + // so that we can calculate estimatedByteSize correctly in ProjectionCompiler + scan.addFamily(column.getFamilyName().getBytes()); + } else { + if (column.getColumnQualifierBytes() != null) { + scan.addColumn(column.getFamilyName().getBytes(), column.getColumnQualifierBytes()); + } + } + } + + public static boolean useNewValueColumnQualifier(Scan s) { + // null check for backward compatibility + return s.getAttribute(BaseScannerRegionObserverConstants.USE_NEW_VALUE_COLUMN_QUALIFIER) + != null; + } + + public static QualifierEncodingScheme getQualifierEncodingScheme(Scan s) { + // null check for backward compatibility + return s.getAttribute(BaseScannerRegionObserverConstants.QUALIFIER_ENCODING_SCHEME) == null + ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + : QualifierEncodingScheme.fromSerializedValue( + s.getAttribute(BaseScannerRegionObserverConstants.QUALIFIER_ENCODING_SCHEME)[0]); + } + + public static ImmutableStorageScheme getImmutableStorageScheme(Scan s) { + // null check for backward compatibility + return s + .getAttribute(BaseScannerRegionObserverConstants.IMMUTABLE_STORAGE_ENCODING_SCHEME) == null + ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN + : ImmutableStorageScheme.fromSerializedValue( + s.getAttribute(BaseScannerRegionObserverConstants.IMMUTABLE_STORAGE_ENCODING_SCHEME)[0]); + } + + /** + * @return pair of byte arrays. The first part of the pair is the empty key value's column + * qualifier, and the second part is the value to use for it. + */ + public static Pair getEmptyKeyValueInfo(PTable table) { + return getEmptyKeyValueInfo(usesEncodedColumnNames(table)); + } + + /** + * @return pair of byte arrays. The first part of the pair is the empty key value's column + * qualifier, and the second part is the value to use for it. + */ + public static Pair getEmptyKeyValueInfo(boolean usesEncodedColumnNames) { + return usesEncodedColumnNames + ? new Pair<>(QueryConstants.ENCODED_EMPTY_COLUMN_BYTES, + QueryConstants.ENCODED_EMPTY_COLUMN_VALUE_BYTES) + : new Pair<>(QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + } + + /** + * @return pair of byte arrays. The first part of the pair is the empty key value's column + * qualifier, and the second part is the value to use for it. + */ + public static Pair getEmptyKeyValueInfo(QualifierEncodingScheme encodingScheme) { + return getEmptyKeyValueInfo(usesEncodedColumnNames(encodingScheme)); + } + + public static Pair getMinMaxQualifiersFromScan(Scan scan) { + Integer minQ = null, maxQ = null; + byte[] minQualifier = scan.getAttribute(BaseScannerRegionObserverConstants.MIN_QUALIFIER); + if (minQualifier != null) { + minQ = Bytes.toInt(minQualifier); } - - public static ImmutableStorageScheme getImmutableStorageScheme(Scan s) { - // null check for backward compatibility - return s.getAttribute(BaseScannerRegionObserverConstants.IMMUTABLE_STORAGE_ENCODING_SCHEME) == null ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN : ImmutableStorageScheme.fromSerializedValue(s.getAttribute(BaseScannerRegionObserverConstants.IMMUTABLE_STORAGE_ENCODING_SCHEME)[0]); + byte[] maxQualifier = scan.getAttribute(BaseScannerRegionObserverConstants.MAX_QUALIFIER); + if (maxQualifier != null) { + maxQ = Bytes.toInt(maxQualifier); } - - /** - * @return pair of byte arrays. The first part of the pair is the empty key value's column qualifier, and the second - * part is the value to use for it. - */ - public static Pair getEmptyKeyValueInfo(PTable table) { - return getEmptyKeyValueInfo(usesEncodedColumnNames(table)); + if (minQualifier == null) { + return null; } + return new Pair<>(minQ, maxQ); + } - /** - * @return pair of byte arrays. The first part of the pair is the empty key value's column qualifier, and the second - * part is the value to use for it. + public static boolean useEncodedQualifierListOptimization(PTable table, Scan scan) { + /* + * HBase doesn't allow raw scans to have columns set. And we need columns to be set explicitly + * on the scan to use this optimization. Disabling this optimization for tables with more than + * one column family. See PHOENIX-3890. */ - public static Pair getEmptyKeyValueInfo(boolean usesEncodedColumnNames) { - return usesEncodedColumnNames ? new Pair<>(QueryConstants.ENCODED_EMPTY_COLUMN_BYTES, - QueryConstants.ENCODED_EMPTY_COLUMN_VALUE_BYTES) : new Pair<>(QueryConstants.EMPTY_COLUMN_BYTES, - QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - } - - /** - * @return pair of byte arrays. The first part of the pair is the empty key value's column qualifier, and the second - * part is the value to use for it. - */ - public static Pair getEmptyKeyValueInfo(QualifierEncodingScheme encodingScheme) { - return getEmptyKeyValueInfo(usesEncodedColumnNames(encodingScheme)); - } + return !scan.isRaw() && table.getColumnFamilies().size() == 1 + && table.getImmutableStorageScheme() != null + && table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN + && usesEncodedColumnNames(table) && !table.isTransactional() + && !ScanUtil.hasDynamicColumns(table); + } - public static Pair getMinMaxQualifiersFromScan(Scan scan) { - Integer minQ = null, maxQ = null; - byte[] minQualifier = scan.getAttribute(BaseScannerRegionObserverConstants.MIN_QUALIFIER); - if (minQualifier != null) { - minQ = Bytes.toInt(minQualifier); - } - byte[] maxQualifier = scan.getAttribute(BaseScannerRegionObserverConstants.MAX_QUALIFIER); - if (maxQualifier != null) { - maxQ = Bytes.toInt(maxQualifier); - } - if (minQualifier == null) { - return null; - } - return new Pair<>(minQ, maxQ); - } + public static boolean useQualifierAsIndex(Pair minMaxQualifiers) { + return minMaxQualifiers != null; + } - public static boolean useEncodedQualifierListOptimization(PTable table, Scan scan) { - /* - * HBase doesn't allow raw scans to have columns set. And we need columns to be set - * explicitly on the scan to use this optimization. - * - * Disabling this optimization for tables with more than one column family. - * See PHOENIX-3890. - */ - return !scan.isRaw() && table.getColumnFamilies().size() == 1 && table.getImmutableStorageScheme() != null - && table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN - && usesEncodedColumnNames(table) && !table.isTransactional() - && !ScanUtil.hasDynamicColumns(table); - } + public static Pair setQualifiersForColumnsInFamily(PTable table, String cf, + NavigableSet qualifierSet) throws ColumnFamilyNotFoundException { + QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); + checkArgument(encodingScheme != QualifierEncodingScheme.NON_ENCODED_QUALIFIERS); + Collection columns = table.getColumnFamily(cf).getColumns(); + if (columns.size() > 0) { + int[] qualifiers = new int[columns.size()]; + int i = 0; + for (PColumn col : columns) { + qualifierSet.add(col.getColumnQualifierBytes()); + qualifiers[i++] = encodingScheme.decode(col.getColumnQualifierBytes()); + } + Arrays.sort(qualifiers); + return new Pair<>(qualifiers[0], qualifiers[qualifiers.length - 1]); + } + return null; + } - public static boolean useQualifierAsIndex(Pair minMaxQualifiers) { - return minMaxQualifiers != null; - } + public static byte[] getColumnQualifierBytes(String columnName, Integer numberBasedQualifier, + PTable table, boolean isPk) { + QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); + return getColumnQualifierBytes(columnName, numberBasedQualifier, encodingScheme, isPk); + } - public static Pair setQualifiersForColumnsInFamily(PTable table, String cf, NavigableSet qualifierSet) - throws ColumnFamilyNotFoundException { - QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); - checkArgument(encodingScheme != QualifierEncodingScheme.NON_ENCODED_QUALIFIERS); - Collection columns = table.getColumnFamily(cf).getColumns(); - if (columns.size() > 0) { - int[] qualifiers = new int[columns.size()]; - int i = 0; - for (PColumn col : columns) { - qualifierSet.add(col.getColumnQualifierBytes()); - qualifiers[i++] = encodingScheme.decode(col.getColumnQualifierBytes()); - } - Arrays.sort(qualifiers); - return new Pair<>(qualifiers[0], qualifiers[qualifiers.length - 1]); - } - return null; - } - - public static byte[] getColumnQualifierBytes(String columnName, Integer numberBasedQualifier, PTable table, boolean isPk) { - QualifierEncodingScheme encodingScheme = table.getEncodingScheme(); - return getColumnQualifierBytes(columnName, numberBasedQualifier, encodingScheme, isPk); - } - - public static byte[] getColumnQualifierBytes(String columnName, Integer numberBasedQualifier, QualifierEncodingScheme encodingScheme, boolean isPk) { - if (isPk) { - return null; - } - if (encodingScheme == null || encodingScheme == NON_ENCODED_QUALIFIERS) { - return Bytes.toBytes(columnName); - } - return encodingScheme.encode(numberBasedQualifier); + public static byte[] getColumnQualifierBytes(String columnName, Integer numberBasedQualifier, + QualifierEncodingScheme encodingScheme, boolean isPk) { + if (isPk) { + return null; } - - public static Expression[] createColumnExpressionArray(int maxEncodedColumnQualifier) { - // reserve the first position and offset maxEncodedColumnQualifier by ENCODED_CQ_COUNTER_INITIAL_VALUE (which is the minimum encoded column qualifier) - int numElements = maxEncodedColumnQualifier - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE + 2; - Expression[] colValues = new Expression[numElements]; - Arrays.fill(colValues, new DelegateExpression(LiteralExpression.newConstant(null)) { - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - return false; - } - }); - // 0 is a reserved position, set it to a non-null value so that we can represent absence of a value using a negative offset - colValues[0]=LiteralExpression.newConstant(QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - return colValues; + if (encodingScheme == null || encodingScheme == NON_ENCODED_QUALIFIERS) { + return Bytes.toBytes(columnName); } + return encodingScheme.encode(numberBasedQualifier); + } - public static boolean isReservedColumnQualifier(int number) { - if (number < 0) { - throw new IllegalArgumentException("Negative column qualifier" + number + " not allowed "); - } - return number < QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; - } - - public static boolean isPossibleToUseEncodedCQFilter(QualifierEncodingScheme encodingScheme, - ImmutableStorageScheme storageScheme) { - return EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme) - && storageScheme == ImmutableStorageScheme.ONE_CELL_PER_COLUMN; + public static Expression[] createColumnExpressionArray(int maxEncodedColumnQualifier) { + // reserve the first position and offset maxEncodedColumnQualifier by + // ENCODED_CQ_COUNTER_INITIAL_VALUE (which is the minimum encoded column qualifier) + int numElements = + maxEncodedColumnQualifier - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE + 2; + Expression[] colValues = new Expression[numElements]; + Arrays.fill(colValues, new DelegateExpression(LiteralExpression.newConstant(null)) { + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + return false; + } + }); + // 0 is a reserved position, set it to a non-null value so that we can represent absence of a + // value using a negative offset + colValues[0] = LiteralExpression.newConstant(QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + return colValues; + } + + public static boolean isReservedColumnQualifier(int number) { + if (number < 0) { + throw new IllegalArgumentException("Negative column qualifier" + number + " not allowed "); } + return number < QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; + } + + public static boolean isPossibleToUseEncodedCQFilter(QualifierEncodingScheme encodingScheme, + ImmutableStorageScheme storageScheme) { + return EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme) + && storageScheme == ImmutableStorageScheme.ONE_CELL_PER_COLUMN; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/EnvironmentEdge.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/EnvironmentEdge.java index 31e8a4c0d6a..fc029e51d6b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/EnvironmentEdge.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/EnvironmentEdge.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,15 +18,13 @@ package org.apache.phoenix.util; /** - * Has some basic interaction with the environment. Alternate implementations - * can be used where required (eg in tests). - * + * Has some basic interaction with the environment. Alternate implementations can be used where + * required (eg in tests). * @see EnvironmentEdgeManager */ public abstract class EnvironmentEdge implements org.apache.hadoop.hbase.util.EnvironmentEdge { /** * Returns the currentTime. - * * @return Current time. */ @Override diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/EnvironmentEdgeManager.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/EnvironmentEdgeManager.java index c5e1a63476c..ba7b9319087 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/EnvironmentEdgeManager.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/EnvironmentEdgeManager.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +18,8 @@ package org.apache.phoenix.util; /** - * Manages a singleton instance of the environment edge. This class shall - * implement static versions of the interface {@link EnvironmentEdge}, then - * defer to the delegate on invocation. + * Manages a singleton instance of the environment edge. This class shall implement static versions + * of the interface {@link EnvironmentEdge}, then defer to the delegate on invocation. */ public class EnvironmentEdgeManager { private static volatile EnvironmentEdge delegate = new DefaultEnvironmentEdge(); @@ -31,9 +29,7 @@ private EnvironmentEdgeManager() { } /** - * Retrieves the singleton instance of the {@link EnvironmentEdge} that is - * being managed. - * + * Retrieves the singleton instance of the {@link EnvironmentEdge} that is being managed. * @return the edge. */ public static EnvironmentEdge getDelegate() { @@ -41,20 +37,17 @@ public static EnvironmentEdge getDelegate() { } /** - * Resets the managed instance to the default instance: {@link - * DefaultEnvironmentEdge}. + * Resets the managed instance to the default instance: {@link DefaultEnvironmentEdge}. */ public static void reset() { injectEdge(new DefaultEnvironmentEdge()); } /** - * Injects the given edge such that it becomes the managed entity. If null is - * passed to this method, the default type is assigned to the delegate. - * - * Note: This is JVM global. Make sure to call reset() after the test. - * See org.apache.hadoop.hbase.util.EnvironmentEdgeManager for other caveats - * + * Injects the given edge such that it becomes the managed entity. If null is passed to this + * method, the default type is assigned to the delegate. Note: This is JVM global. Make sure to + * call reset() after the test. See org.apache.hadoop.hbase.util.EnvironmentEdgeManager for + * other caveats * @param edge the new edge. */ public static void injectEdge(EnvironmentEdge edge) { @@ -67,9 +60,7 @@ public static void injectEdge(EnvironmentEdge edge) { } /** - * Defers to the delegate and calls the - * {@link EnvironmentEdge#currentTime()} method. - * + * Defers to the delegate and calls the {@link EnvironmentEdge#currentTime()} method. * @return current time in millis according to the delegate. */ public static long currentTimeMillis() { diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java index 2dd41f32059..8e84b333c52 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,431 +24,422 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * Equi-Depth histogram based on http://web.cs.ucla.edu/~zaniolo/papers/Histogram-EDBT2011-CamReady.pdf, - * but without the sliding window - we assume a single window over the entire data set. - * - * Used to generate the bucket boundaries of a histogram where each bucket has the same # of items. - * This is useful, for example, for pre-splitting an index table, by feeding in data from the indexed column. - * Works on streaming data - the histogram is dynamically updated for each new value. - * - * Add values by calling addValue(), then at the end computeBuckets() can be called to get - * the buckets with their bounds. - * - * Average time complexity: O(log(B x p) + (B x p)/T) = nearly constant - * B = number of buckets, p = expansion factor constant, T = # of values - * - * Space complexity: different from paper since here we keep the blocked bars but don't have expiration, - * comes out to basically O(log(T)) + * Equi-Depth histogram based on + * http://web.cs.ucla.edu/~zaniolo/papers/Histogram-EDBT2011-CamReady.pdf, but without the sliding + * window - we assume a single window over the entire data set. Used to generate the bucket + * boundaries of a histogram where each bucket has the same # of items. This is useful, for example, + * for pre-splitting an index table, by feeding in data from the indexed column. Works on streaming + * data - the histogram is dynamically updated for each new value. Add values by calling addValue(), + * then at the end computeBuckets() can be called to get the buckets with their bounds. Average time + * complexity: O(log(B x p) + (B x p)/T) = nearly constant B = number of buckets, p = expansion + * factor constant, T = # of values Space complexity: different from paper since here we keep the + * blocked bars but don't have expiration, comes out to basically O(log(T)) */ public class EquiDepthStreamHistogram { - private static final Logger LOGGER = LoggerFactory.getLogger(EquiDepthStreamHistogram.class); - - // used in maxSize calculation for each bar - private static final double MAX_COEF = 1.7; - // higher expansion factor = better accuracy and worse performance - private static final short DEFAULT_EXPANSION_FACTOR = 7; - private int numBuckets; - private int maxBars; - @VisibleForTesting - long totalCount; // number of values - i.e. count across all bars - @VisibleForTesting - List bars; + private static final Logger LOGGER = LoggerFactory.getLogger(EquiDepthStreamHistogram.class); + + // used in maxSize calculation for each bar + private static final double MAX_COEF = 1.7; + // higher expansion factor = better accuracy and worse performance + private static final short DEFAULT_EXPANSION_FACTOR = 7; + private int numBuckets; + private int maxBars; + @VisibleForTesting + long totalCount; // number of values - i.e. count across all bars + @VisibleForTesting + List bars; + + /** + * Create a new histogram + * @param numBuckets number of buckets, which can be used to get the splits + */ + public EquiDepthStreamHistogram(int numBuckets) { + this(numBuckets, DEFAULT_EXPANSION_FACTOR); + } + + /** + * @param numBuckets number of buckets + * @param expansionFactor number of bars = expansionFactor * numBuckets The more bars, the better + * the accuracy, at the cost of worse performance + */ + public EquiDepthStreamHistogram(int numBuckets, int expansionFactor) { + this.numBuckets = numBuckets; + this.maxBars = numBuckets * expansionFactor; + this.bars = new ArrayList<>(maxBars); + } + + /** + * Add a new value to the histogram, updating the count for the appropriate bucket + */ + public void addValue(byte[] value) { + Bar bar = getBar(value); + bar.incrementCount(); + totalCount++; + // split the bar if necessary + if (bar.getSize() > getMaxBarSize()) { + splitBar(bar); + } + } + + /** + * Compute the buckets, which have the boundaries and estimated counts. Note that the right bound + * for the very last bucket is inclusive. The left and right bounds can be equivalent, for single + * value buckets. + */ + public List computeBuckets() { + Preconditions.checkState(bars.size() >= numBuckets, + "Not enough data points to compute buckets"); + List buckets = new ArrayList<>(); + long idealBuckSize = (long) Math.ceil(totalCount / (double) numBuckets); + long currCount = 0; + int barsIdx = 0; + byte[] prevBound = bars.get(0).leftBoundInclusive; + Bar currBar = null; + for (int i = 0; i < numBuckets; i++) { + while (currCount <= idealBuckSize && barsIdx < bars.size()) { + currBar = bars.get(barsIdx++); + currCount += currBar.getSize(); + } + long surplus = Math.max(currCount - idealBuckSize, 0); + // deviate a bit from the paper here + // to estimate the bound, we split the range into 8 splits for a total of 10 including + // start/end + // then we calculate the % of the currBar's count we've used, and round down to the closest + // split + int closestSplitIdx = (int) ((1 - ((double) surplus / currBar.getSize())) * 9); + byte[][] splits = Bytes.split(currBar.leftBoundInclusive, currBar.rightBoundExclusive, 8); + Bucket bucket = new Bucket(prevBound, splits[closestSplitIdx]); + bucket.incrementCountEstimate(currCount - surplus); + prevBound = splits[closestSplitIdx]; + buckets.add(bucket); + currCount = surplus; + } + return buckets; + } + + /** Returns total number of values added to this histogram */ + public long getTotalCount() { + return totalCount; + } + + // attempts to split the given bar into two new bars + @VisibleForTesting + void splitBar(Bar origBar) { + // short circuit - don't split a bar of length 1 + if (Bytes.compareTo(origBar.leftBoundInclusive, origBar.rightBoundExclusive) == 0) { + return; + } + if (bars.size() == maxBars) { // max bars hit, need to merge two existing bars first + boolean mergeSuccessful = mergeBars(); + if (!mergeSuccessful) return; // don't split if we couldn't merge + } + byte[] mid = + Bytes.split(origBar.getLeftBoundInclusive(), origBar.getRightBoundExclusive(), 1)[1]; + Bar newLeft = new Bar(origBar.getLeftBoundInclusive(), mid); + Bar newRight = new Bar(mid, origBar.getRightBoundExclusive()); + // distribute blocked bars between the new bars + long leftSize = 0; + long bbAggCount = origBar.getBlockedBarsSize(); + for (Bar bb : origBar.getBlockedBars()) { + long bbSize = bb.getSize(); + if (leftSize + bbSize < bbAggCount / 2) { + leftSize += bbSize; + newLeft.addBlockedBar(bb); + } else { + newRight.addBlockedBar(bb); + } + } + // at this point the two new bars may have different counts, + // distribute the rest of origBar's count to make them as close as possible + long countToDistribute = origBar.getSize() - bbAggCount; + long rightSize = newRight.getSize(); + long sizeDiff = Math.abs(leftSize - rightSize); + Bar smallerBar = leftSize <= rightSize ? newLeft : newRight; + if (sizeDiff <= countToDistribute) { + smallerBar.incrementCount(sizeDiff); + countToDistribute -= sizeDiff; + long halfDistrib = countToDistribute / 2; + newLeft.incrementCount(halfDistrib); + newRight.incrementCount(countToDistribute - halfDistrib); + } else { + smallerBar.incrementCount(countToDistribute); + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace( + String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight)); + } + bars.remove(origBar); + bars.add(newLeft); + bars.add(newRight); + // technically don't need to sort here, as we can get the index from getBar, + // and put the new bars in the same index. But we'd have to handle merge as well, + // doable but not worth the more complicated code since bars.size is fixed and generally small + Collections.sort(bars); + } + + // Merges the two adjacent bars with the lowest summed count + @VisibleForTesting + boolean mergeBars() { + Preconditions.checkState(bars.size() > 1, "Need at least two bars to merge"); + // pairwise search for the two bars with the smallest summed count + int currIdx = 0; + Bar currBar = bars.get(currIdx); + Bar nextBar = bars.get(currIdx + 1); + long currMinSum = Long.MAX_VALUE; + int currMinIdx = currIdx; // keep this for fast removal from ArrayList later + Pair minBars = new Pair<>(currBar, nextBar); + while (nextBar != null) { + long sum = currBar.getSize() + nextBar.getSize(); + if (sum < currMinSum) { + currMinSum = sum; + minBars = new Pair<>(currBar, nextBar); + currMinIdx = currIdx; + } + currBar = nextBar; + nextBar = ++currIdx < bars.size() - 1 ? bars.get(currIdx + 1) : null; + } + // don't want to merge bars into one that will just need an immediate split again + if (currMinSum >= getMaxBarSize()) { + return false; + } + // do the merge + Bar leftBar = minBars.getFirst(); + Bar rightBar = minBars.getSecond(); + Bar newBar = new Bar(leftBar.getLeftBoundInclusive(), rightBar.getRightBoundExclusive()); + if (leftBar.getSize() >= rightBar.getSize()) { + newBar.incrementCount(rightBar.getCount()); // count of rightBar without its blocked bars + // this just adds the leftBar without its blocked bars, as we don't want nested blocked bars + // the leftBar's blocked bars are added later below + newBar.addBlockedBar(new Bar(leftBar)); + } else { + newBar.incrementCount(leftBar.getCount()); + newBar.addBlockedBar(new Bar(rightBar)); + } + newBar.addBlockedBars(leftBar.getBlockedBars()); + newBar.addBlockedBars(rightBar.getBlockedBars()); + bars.subList(currMinIdx, currMinIdx + 2).clear(); // remove minBars + bars.add(newBar); + Collections.sort(bars); + if (LOGGER.isTraceEnabled()) { + LOGGER + .trace(String.format("Merged left=%s , right=%s , newBar=%s", leftBar, rightBar, newBar)); + } + return true; + } + + /** + * Get the appropriate bar for the value, extending existing bar bounds to accommodate if + * necessary + * @param value value to add + * @return the bar for the value + */ + @VisibleForTesting + Bar getBar(byte[] value) { + Bar searchKey = new Bar(value, value); + int searchIdx = Collections.binarySearch(this.bars, searchKey); + if (searchIdx < 0) { + // copy value so later changes by caller don't affect histogram results + byte[] newBound = Bytes.copy(value); + if (this.bars.size() == 0) { + Bar firstBar = new Bar(newBound, newBound); + bars.add(firstBar); + return firstBar; + } + int expectedIndex = Math.abs(searchIdx + 1); // jdk binary search index + if (expectedIndex == bars.size()) { // no bars >= value, need to extend rightBound of last bar + Bar lastBar = bars.get(expectedIndex - 1); + lastBar.setRightBoundExclusive(newBound); // actually inclusive for last bar + return lastBar; + } else { // extend leftBound of next greatest bar + Bar nextBar = bars.get(expectedIndex); + nextBar.setLeftBoundInclusive(newBound); + return nextBar; + } + } else { + return bars.get(searchIdx); + } + } + + private long getMaxBarSize() { + // from the paper, 1.7 has been "determined empirically" + // interpretation: We don't want a given bar to deviate more than 70% from its ideal target size + return (long) (MAX_COEF * (totalCount / maxBars)); + } + + public static class Bucket { + protected long count = 0; + protected byte[] leftBoundInclusive; + protected byte[] rightBoundExclusive; + + public Bucket(byte[] leftBoundInclusive, byte[] rightBoundExclusive) { + this.leftBoundInclusive = leftBoundInclusive; + this.rightBoundExclusive = rightBoundExclusive; + } - /** - * Create a new histogram - * @param numBuckets number of buckets, which can be used to get the splits - */ - public EquiDepthStreamHistogram(int numBuckets) { - this(numBuckets, DEFAULT_EXPANSION_FACTOR); + public byte[] getLeftBoundInclusive() { + return leftBoundInclusive; } - /** - * @param numBuckets number of buckets - * @param expansionFactor number of bars = expansionFactor * numBuckets - * The more bars, the better the accuracy, at the cost of worse performance - */ - public EquiDepthStreamHistogram(int numBuckets, int expansionFactor) { - this.numBuckets = numBuckets; - this.maxBars = numBuckets * expansionFactor; - this.bars = new ArrayList<>(maxBars); + public void setLeftBoundInclusive(byte[] leftBoundInclusive) { + this.leftBoundInclusive = leftBoundInclusive; } - /** - * Add a new value to the histogram, updating the count for the appropriate bucket - * @param value - */ - public void addValue(byte[] value) { - Bar bar = getBar(value); - bar.incrementCount(); - totalCount++; - // split the bar if necessary - if (bar.getSize() > getMaxBarSize()) { - splitBar(bar); - } + public byte[] getRightBoundExclusive() { + return rightBoundExclusive; } - /** - * Compute the buckets, which have the boundaries and estimated counts. - * Note that the right bound for the very last bucket is inclusive. - * The left and right bounds can be equivalent, for single value buckets. - * @return - */ - public List computeBuckets() { - Preconditions.checkState(bars.size() >= numBuckets, "Not enough data points to compute buckets"); - List buckets = new ArrayList<>(); - long idealBuckSize = (long) Math.ceil(totalCount / (double) numBuckets); - long currCount = 0; - int barsIdx = 0; - byte[] prevBound = bars.get(0).leftBoundInclusive; - Bar currBar = null; - for (int i = 0; i < numBuckets; i++) { - while (currCount <= idealBuckSize && barsIdx < bars.size()) { - currBar = bars.get(barsIdx++); - currCount += currBar.getSize(); - } - long surplus = Math.max(currCount - idealBuckSize, 0); - // deviate a bit from the paper here - // to estimate the bound, we split the range into 8 splits for a total of 10 including start/end - // then we calculate the % of the currBar's count we've used, and round down to the closest split - int closestSplitIdx = (int) ((1 - ((double) surplus / currBar.getSize())) * 9); - byte[][] splits = Bytes.split(currBar.leftBoundInclusive, currBar.rightBoundExclusive, 8); - Bucket bucket = new Bucket(prevBound, splits[closestSplitIdx]); - bucket.incrementCountEstimate(currCount - surplus); - prevBound = splits[closestSplitIdx]; - buckets.add(bucket); - currCount = surplus; - } - return buckets; + public void setRightBoundExclusive(byte[] rightBoundExclusive) { + this.rightBoundExclusive = rightBoundExclusive; + } + + public long getCountEstimate() { + return count; } + public void incrementCountEstimate(long count) { + this.count += count; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(leftBoundInclusive); + result = prime * result + Arrays.hashCode(rightBoundExclusive); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + Bucket other = (Bucket) obj; + if (!Arrays.equals(leftBoundInclusive, other.leftBoundInclusive)) return false; + if (!Arrays.equals(rightBoundExclusive, other.rightBoundExclusive)) return false; + return true; + } + + @Override + public String toString() { + return "Bucket [count=" + count + ", leftBoundInclusive=" + Bytes.toString(leftBoundInclusive) + + ", rightBoundExclusive=" + Bytes.toString(rightBoundExclusive) + "]"; + } + } + + // Used internally to further subdivide each bucket + @VisibleForTesting + static class Bar extends Bucket implements Comparable { + private List blockedBars = new ArrayList<>(); // populated through a merge + /** - * @return total number of values added to this histogram + * Create a new bar. Single value buckets can have leftBound = rightBound */ - public long getTotalCount() { - return totalCount; - } - - // attempts to split the given bar into two new bars - @VisibleForTesting - void splitBar(Bar origBar) { - // short circuit - don't split a bar of length 1 - if (Bytes.compareTo(origBar.leftBoundInclusive, origBar.rightBoundExclusive) == 0) { - return; - } - if (bars.size() == maxBars) { // max bars hit, need to merge two existing bars first - boolean mergeSuccessful = mergeBars(); - if (!mergeSuccessful) return; // don't split if we couldn't merge - } - byte[] mid = Bytes.split(origBar.getLeftBoundInclusive(), origBar.getRightBoundExclusive(), 1)[1]; - Bar newLeft = new Bar(origBar.getLeftBoundInclusive(), mid); - Bar newRight = new Bar(mid, origBar.getRightBoundExclusive()); - // distribute blocked bars between the new bars - long leftSize = 0; - long bbAggCount = origBar.getBlockedBarsSize(); - for (Bar bb : origBar.getBlockedBars()) { - long bbSize = bb.getSize(); - if (leftSize + bbSize < bbAggCount/2) { - leftSize += bbSize; - newLeft.addBlockedBar(bb); - } else { - newRight.addBlockedBar(bb); - } - } - // at this point the two new bars may have different counts, - // distribute the rest of origBar's count to make them as close as possible - long countToDistribute = origBar.getSize() - bbAggCount; - long rightSize = newRight.getSize(); - long sizeDiff = Math.abs(leftSize - rightSize); - Bar smallerBar = leftSize <= rightSize ? newLeft : newRight; - if (sizeDiff <= countToDistribute) { - smallerBar.incrementCount(sizeDiff); - countToDistribute -= sizeDiff; - long halfDistrib = countToDistribute / 2; - newLeft.incrementCount(halfDistrib); - newRight.incrementCount(countToDistribute - halfDistrib); - } else { - smallerBar.incrementCount(countToDistribute); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", - origBar, newLeft, newRight)); - } - bars.remove(origBar); - bars.add(newLeft); - bars.add(newRight); - // technically don't need to sort here, as we can get the index from getBar, - // and put the new bars in the same index. But we'd have to handle merge as well, - // doable but not worth the more complicated code since bars.size is fixed and generally small - Collections.sort(bars); - } - - //Merges the two adjacent bars with the lowest summed count - @VisibleForTesting - boolean mergeBars() { - Preconditions.checkState(bars.size() > 1, "Need at least two bars to merge"); - // pairwise search for the two bars with the smallest summed count - int currIdx = 0; - Bar currBar = bars.get(currIdx); - Bar nextBar = bars.get(currIdx + 1); - long currMinSum = Long.MAX_VALUE; - int currMinIdx = currIdx; // keep this for fast removal from ArrayList later - Pair minBars = new Pair<>(currBar, nextBar); - while (nextBar != null) { - long sum = currBar.getSize() + nextBar.getSize(); - if (sum < currMinSum) { - currMinSum = sum; - minBars = new Pair<>(currBar, nextBar); - currMinIdx = currIdx; - } - currBar = nextBar; - nextBar = ++currIdx < bars.size() - 1 ? bars.get(currIdx+1) : null; - } - // don't want to merge bars into one that will just need an immediate split again - if (currMinSum >= getMaxBarSize()) { - return false; - } - // do the merge - Bar leftBar = minBars.getFirst(); - Bar rightBar = minBars.getSecond(); - Bar newBar = new Bar(leftBar.getLeftBoundInclusive(), rightBar.getRightBoundExclusive()); - if (leftBar.getSize() >= rightBar.getSize()) { - newBar.incrementCount(rightBar.getCount()); // count of rightBar without its blocked bars - // this just adds the leftBar without its blocked bars, as we don't want nested blocked bars - // the leftBar's blocked bars are added later below - newBar.addBlockedBar(new Bar(leftBar)); - } else { - newBar.incrementCount(leftBar.getCount()); - newBar.addBlockedBar(new Bar(rightBar)); - } - newBar.addBlockedBars(leftBar.getBlockedBars()); - newBar.addBlockedBars(rightBar.getBlockedBars()); - bars.subList(currMinIdx, currMinIdx + 2).clear(); // remove minBars - bars.add(newBar); - Collections.sort(bars); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Merged left=%s , right=%s , newBar=%s", leftBar, rightBar, newBar)); - } - return true; + public Bar(byte[] leftBoundInclusive, byte[] rightBoundExclusive) { + super(leftBoundInclusive, rightBoundExclusive); } /** - * Get the appropriate bar for the value, extending existing bar bounds to accommodate if necessary - * @param value value to add - * @return the bar for the value + * Creates a copy of the passed in bar, but without any blocked bars */ - @VisibleForTesting - Bar getBar(byte[] value) { - Bar searchKey = new Bar(value, value); - int searchIdx = Collections.binarySearch(this.bars, searchKey); - if (searchIdx < 0) { - // copy value so later changes by caller don't affect histogram results - byte[] newBound = Bytes.copy(value); - if (this.bars.size() == 0) { - Bar firstBar = new Bar(newBound, newBound); - bars.add(firstBar); - return firstBar; - } - int expectedIndex = Math.abs(searchIdx + 1); // jdk binary search index - if (expectedIndex == bars.size()) { // no bars >= value, need to extend rightBound of last bar - Bar lastBar = bars.get(expectedIndex - 1); - lastBar.setRightBoundExclusive(newBound); // actually inclusive for last bar - return lastBar; - } else { // extend leftBound of next greatest bar - Bar nextBar = bars.get(expectedIndex); - nextBar.setLeftBoundInclusive(newBound); - return nextBar; - } - } else { - return bars.get(searchIdx); - } - } - - private long getMaxBarSize() { - // from the paper, 1.7 has been "determined empirically" - // interpretation: We don't want a given bar to deviate more than 70% from its ideal target size - return (long) (MAX_COEF * (totalCount / maxBars)); - } - - public static class Bucket { - protected long count = 0; - protected byte[] leftBoundInclusive; - protected byte[] rightBoundExclusive; - - public Bucket(byte[] leftBoundInclusive, byte[] rightBoundExclusive) { - this.leftBoundInclusive = leftBoundInclusive; - this.rightBoundExclusive = rightBoundExclusive; - } - - public byte[] getLeftBoundInclusive() { - return leftBoundInclusive; - } - - public void setLeftBoundInclusive(byte[] leftBoundInclusive) { - this.leftBoundInclusive = leftBoundInclusive; - } - - public byte[] getRightBoundExclusive() { - return rightBoundExclusive; - } - - public void setRightBoundExclusive(byte[] rightBoundExclusive) { - this.rightBoundExclusive = rightBoundExclusive; - } - - public long getCountEstimate() { - return count; - } - - public void incrementCountEstimate(long count) { - this.count += count; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + Arrays.hashCode(leftBoundInclusive); - result = prime * result + Arrays.hashCode(rightBoundExclusive); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - Bucket other = (Bucket) obj; - if (!Arrays.equals(leftBoundInclusive, other.leftBoundInclusive)) return false; - if (!Arrays.equals(rightBoundExclusive, other.rightBoundExclusive)) return false; - return true; - } - - @Override - public String toString() { - return "Bucket [count=" + count + ", leftBoundInclusive=" - + Bytes.toString(leftBoundInclusive) + ", rightBoundExclusive=" - + Bytes.toString(rightBoundExclusive) + "]"; - } - } - - // Used internally to further subdivide each bucket - @VisibleForTesting - static class Bar extends Bucket implements Comparable { - private List blockedBars = new ArrayList<>(); // populated through a merge - - /** - * Create a new bar. Single value buckets can have leftBound = rightBound - * @param leftBoundInclusive - * @param rightBoundExclusive - */ - public Bar(byte[] leftBoundInclusive, byte[] rightBoundExclusive) { - super(leftBoundInclusive, rightBoundExclusive); - } - - /** - * Creates a copy of the passed in bar, but without any blocked bars - * @param bar - */ - public Bar(Bar bar) { - super(bar.leftBoundInclusive, bar.rightBoundExclusive); - this.count = bar.count; - } - - // Used to keep the bars sorted by bounds - @Override - public int compareTo(Bar other) { - // if one bar fully contains the other, they are considered the same. For binary search - int leftComp = Bytes.compareTo(this.leftBoundInclusive, other.leftBoundInclusive); - int rightComp = Bytes.compareTo(this.rightBoundExclusive, other.rightBoundExclusive); - if ((leftComp >= 0 && rightComp < 0) || (leftComp <= 0 && rightComp > 0) - || (leftComp == 0 && rightComp == 0)) { - return 0; - } - if (Bytes.compareTo(this.leftBoundInclusive, other.rightBoundExclusive) >= 0) { - return 1; - } - if (Bytes.compareTo(this.rightBoundExclusive, other.leftBoundInclusive) <= 0) { - return -1; - } - throw new AssertionError("Cannot not have overlapping bars"); - } - - /** - * @return The aggregate count of this bar and its blocked bars' counts - */ - public long getSize() { - long blockedBarSum = getBlockedBarsSize(); - return count + blockedBarSum; - } - - /** - * @return The sum of the counts of all the blocked bars - */ - public long getBlockedBarsSize() { - long blockedBarSum = 0; - for (Bar bb : blockedBars) { - blockedBarSum += bb.getSize(); - } - return blockedBarSum; - } - - public void addBlockedBar(Bar bar) { - blockedBars.add(bar); - } - - public void addBlockedBars(List bars) { - blockedBars.addAll(bars); - } - - public List getBlockedBars() { - return blockedBars; - } - - public long getCount() { - return this.count; - } - - public void incrementCount() { - count++; - } - - public void incrementCount(long increment) { - count += increment; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((blockedBars == null) ? 0 : blockedBars.hashCode()); - result = prime * result + (int) (count ^ (count >>> 32)); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!super.equals(obj)) return false; - if (getClass() != obj.getClass()) return false; - Bar other = (Bar) obj; - if (blockedBars == null) { - if (other.blockedBars != null) return false; - } else if (!blockedBars.equals(other.blockedBars)) return false; - if (count != other.count) return false; - return true; - } - - @Override - public String toString() { - return "Bar[count=" + count + ", blockedBars=" + blockedBars + ", leftBoundInclusive=" - + Bytes.toString(leftBoundInclusive) + ", rightBoundExclusive=" - + Bytes.toString(rightBoundExclusive) + "]"; - } + public Bar(Bar bar) { + super(bar.leftBoundInclusive, bar.rightBoundExclusive); + this.count = bar.count; + } + + // Used to keep the bars sorted by bounds + @Override + public int compareTo(Bar other) { + // if one bar fully contains the other, they are considered the same. For binary search + int leftComp = Bytes.compareTo(this.leftBoundInclusive, other.leftBoundInclusive); + int rightComp = Bytes.compareTo(this.rightBoundExclusive, other.rightBoundExclusive); + if ( + (leftComp >= 0 && rightComp < 0) || (leftComp <= 0 && rightComp > 0) + || (leftComp == 0 && rightComp == 0) + ) { + return 0; + } + if (Bytes.compareTo(this.leftBoundInclusive, other.rightBoundExclusive) >= 0) { + return 1; + } + if (Bytes.compareTo(this.rightBoundExclusive, other.leftBoundInclusive) <= 0) { + return -1; + } + throw new AssertionError("Cannot not have overlapping bars"); + } + + /** Returns The aggregate count of this bar and its blocked bars' counts */ + public long getSize() { + long blockedBarSum = getBlockedBarsSize(); + return count + blockedBarSum; + } + + /** Returns The sum of the counts of all the blocked bars */ + public long getBlockedBarsSize() { + long blockedBarSum = 0; + for (Bar bb : blockedBars) { + blockedBarSum += bb.getSize(); + } + return blockedBarSum; + } + + public void addBlockedBar(Bar bar) { + blockedBars.add(bar); + } + + public void addBlockedBars(List bars) { + blockedBars.addAll(bars); + } + + public List getBlockedBars() { + return blockedBars; + } + + public long getCount() { + return this.count; + } + + public void incrementCount() { + count++; + } + + public void incrementCount(long increment) { + count += increment; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((blockedBars == null) ? 0 : blockedBars.hashCode()); + result = prime * result + (int) (count ^ (count >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!super.equals(obj)) return false; + if (getClass() != obj.getClass()) return false; + Bar other = (Bar) obj; + if (blockedBars == null) { + if (other.blockedBars != null) return false; + } else if (!blockedBars.equals(other.blockedBars)) return false; + if (count != other.count) return false; + return true; + } + + @Override + public String toString() { + return "Bar[count=" + count + ", blockedBars=" + blockedBars + ", leftBoundInclusive=" + + Bytes.toString(leftBoundInclusive) + ", rightBoundExclusive=" + + Bytes.toString(rightBoundExclusive) + "]"; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ExpressionUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ExpressionUtil.java index bf84a4c4133..76b013e2cc1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ExpressionUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ExpressionUtil.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.util; @@ -15,16 +23,16 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; -import java.util.TreeMap; import java.util.Map.Entry; +import java.util.TreeMap; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.compile.ExpressionCompiler; -import org.apache.phoenix.compile.OrderPreservingTracker.Info; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; +import org.apache.phoenix.compile.OrderPreservingTracker.Info; import org.apache.phoenix.expression.AndExpression; import org.apache.phoenix.expression.ColumnExpression; import org.apache.phoenix.expression.ComparisonExpression; @@ -48,474 +56,455 @@ import org.apache.phoenix.schema.types.PDataType; public class ExpressionUtil { - private ExpressionUtil() { - } - - public static boolean isConstant(Expression expression) { - return (expression.isStateless() && isContantForStatement(expression)); - } - - /** - * this method determines if expression is constant if all children of it are constants. - * @param expression - * @return - */ - public static boolean isContantForStatement(Expression expression) { - return (expression.getDeterminism() == Determinism.ALWAYS - || expression.getDeterminism() == Determinism.PER_STATEMENT); - } - - public static LiteralExpression getConstantExpression(Expression expression, ImmutableBytesWritable ptr) - throws SQLException { - Object value = null; - PDataType type = expression.getDataType(); - if (expression.evaluate(null, ptr) && ptr.getLength() != 0) { - value = type.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), type, expression.getSortOrder(), expression.getMaxLength(), expression.getScale()); - } - return LiteralExpression.newConstant(value, type, expression.getDeterminism()); + private ExpressionUtil() { + } + + public static boolean isConstant(Expression expression) { + return (expression.isStateless() && isContantForStatement(expression)); + } + + /** + * this method determines if expression is constant if all children of it are constants. + */ + public static boolean isContantForStatement(Expression expression) { + return (expression.getDeterminism() == Determinism.ALWAYS + || expression.getDeterminism() == Determinism.PER_STATEMENT); + } + + public static LiteralExpression getConstantExpression(Expression expression, + ImmutableBytesWritable ptr) throws SQLException { + Object value = null; + PDataType type = expression.getDataType(); + if (expression.evaluate(null, ptr) && ptr.getLength() != 0) { + value = type.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), type, + expression.getSortOrder(), expression.getMaxLength(), expression.getScale()); } - - public static boolean isNull(Expression expression, ImmutableBytesWritable ptr) { - return isConstant(expression) && (!expression.evaluate(null, ptr) || ptr.getLength() == 0); + return LiteralExpression.newConstant(value, type, expression.getDeterminism()); + } + + public static boolean isNull(Expression expression, ImmutableBytesWritable ptr) { + return isConstant(expression) && (!expression.evaluate(null, ptr) || ptr.getLength() == 0); + } + + public static LiteralExpression getNullExpression(Expression expression) throws SQLException { + return LiteralExpression.newConstant(null, expression.getDataType(), + expression.getDeterminism()); + } + + public static boolean evaluatesToTrue(Expression expression) { + if (isConstant(expression)) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + expression.evaluate(null, ptr); + return Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)); } - - public static LiteralExpression getNullExpression(Expression expression) throws SQLException { - return LiteralExpression.newConstant(null, expression.getDataType(), expression.getDeterminism()); + return false; + } + + public static boolean isPkPositionChanging(TableRef tableRef, + List projectedExpressions) throws SQLException { + for (int i = 0; i < tableRef.getTable().getPKColumns().size(); i++) { + PColumn column = tableRef.getTable().getPKColumns().get(i); + Expression source = projectedExpressions.get(i); + if ( + source == null + || !source.equals(new ColumnRef(tableRef, column.getPosition()).newColumnExpression()) + ) { + return true; + } } - - public static boolean evaluatesToTrue(Expression expression) { - if (isConstant(expression)) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - expression.evaluate(null, ptr); - return Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)); - } - return false; + return false; + } + + /** + * check the whereExpression to see if the columnExpression is constant. eg. for + * {@code "where a = 3 and b > 9" }, a is constant,but b is not. + */ + public static boolean isColumnExpressionConstant(ColumnExpression columnExpression, + Expression whereExpression) { + if (whereExpression == null) { + return false; } - - public static boolean isPkPositionChanging(TableRef tableRef, List projectedExpressions) throws SQLException { - for (int i = 0; i < tableRef.getTable().getPKColumns().size(); i++) { - PColumn column = tableRef.getTable().getPKColumns().get(i); - Expression source = projectedExpressions.get(i); - if (source == null || !source - .equals(new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) { return true; } - } - return false; + IsColumnConstantExpressionVisitor isColumnConstantExpressionVisitor = + new IsColumnConstantExpressionVisitor(columnExpression); + whereExpression.accept(isColumnConstantExpressionVisitor); + return isColumnConstantExpressionVisitor.isConstant(); + } + + private static class IsColumnConstantExpressionVisitor + extends StatelessTraverseNoExpressionVisitor { + private final Expression columnExpression; + private Expression firstRhsConstantExpression = null; + private int rhsConstantCount = 0; + private boolean isNullExpressionVisited = false; + + public IsColumnConstantExpressionVisitor(Expression columnExpression) { + this.columnExpression = columnExpression; } /** - * check the whereExpression to see if the columnExpression is constant. - * eg. for {@code "where a = 3 and b > 9" }, a is constant,but b is not. - * @param columnExpression - * @param whereExpression - * @return + * only consider and,for "where a = 3 or b = 9", neither a or b is constant. */ - public static boolean isColumnExpressionConstant(ColumnExpression columnExpression, Expression whereExpression) { - if(whereExpression == null) { - return false; - } - IsColumnConstantExpressionVisitor isColumnConstantExpressionVisitor = - new IsColumnConstantExpressionVisitor(columnExpression); - whereExpression.accept(isColumnConstantExpressionVisitor); - return isColumnConstantExpressionVisitor.isConstant(); - } - - private static class IsColumnConstantExpressionVisitor extends StatelessTraverseNoExpressionVisitor { - private final Expression columnExpression ; - private Expression firstRhsConstantExpression = null; - private int rhsConstantCount = 0; - private boolean isNullExpressionVisited = false; - - public IsColumnConstantExpressionVisitor(Expression columnExpression) { - this.columnExpression = columnExpression; - } - /** - * only consider and,for "where a = 3 or b = 9", neither a or b is constant. - */ - @Override - public Iterator visitEnter(AndExpression andExpression) { - if(rhsConstantCount > 1) { - return null; - } - return andExpression.getChildren().iterator(); - } - /** - *

-         * We just consider {@link ComparisonExpression} because:
-         * 1.for {@link InListExpression} as "a in ('2')", the {@link InListExpression} is rewritten to
-         *  {@link ComparisonExpression} in {@link InListExpression#create}
-         * 2.for {@link RowValueConstructorExpression} as "(a,b)=(1,2)",{@link RowValueConstructorExpression}
-         *   is rewritten to {@link ComparisonExpression} in {@link ComparisonExpression#create}
-         * 3.not consider {@link CoerceExpression}, because for "where cast(a as integer)=2", when a is double,
-         *   a is not constant.
-         * 
- */ - @Override - public Iterator visitEnter(ComparisonExpression comparisonExpression) { - if(rhsConstantCount > 1) { - return null; - } - if(comparisonExpression.getFilterOp() != CompareOperator.EQUAL) { - return null; - } - Expression lhsExpresssion = comparisonExpression.getChildren().get(0); - if(!this.columnExpression.equals(lhsExpresssion)) { - return null; - } - Expression rhsExpression = comparisonExpression.getChildren().get(1); - if(rhsExpression == null) { - return null; - } - Boolean isConstant = rhsExpression.accept(new IsCompositeLiteralExpressionVisitor()); - if(isConstant != null && isConstant.booleanValue()) { - checkConstantValue(rhsExpression); - } - return null; - } - - public boolean isConstant() { - return this.rhsConstantCount == 1; - } - - @Override - public Iterator visitEnter(IsNullExpression isNullExpression) { - if(rhsConstantCount > 1) { - return null; - } - if(isNullExpression.isNegate()) { - return null; - } - Expression lhsExpresssion = isNullExpression.getChildren().get(0); - if(!this.columnExpression.equals(lhsExpresssion)) { - return null; - } - this.checkConstantValue(null); - return null; - } - - private void checkConstantValue(Expression rhsExpression) { - if(!this.isNullExpressionVisited && this.firstRhsConstantExpression == null) { - this.firstRhsConstantExpression = rhsExpression; - rhsConstantCount++; - if(rhsExpression == null) { - this.isNullExpressionVisited = true; - } - return; - } - - if(!isExpressionEquals(this.isNullExpressionVisited ? null : this.firstRhsConstantExpression, rhsExpression)) { - rhsConstantCount++; - return; - } - } - - private static boolean isExpressionEquals(Expression oldExpression,Expression newExpression) { - if(oldExpression == null) { - if(newExpression == null) { - return true; - } - return ExpressionUtil.isNull(newExpression, new ImmutableBytesWritable()); - } - if(newExpression == null) { - return ExpressionUtil.isNull(oldExpression, new ImmutableBytesWritable()); - } - return oldExpression.equals(newExpression); - } - } - - private static class IsCompositeLiteralExpressionVisitor extends StatelessTraverseAllExpressionVisitor { - @Override - public Boolean defaultReturn(Expression expression, List childResultValues) { - if (!ExpressionUtil.isContantForStatement(expression) || - childResultValues.size() < expression.getChildren().size()) { - return Boolean.FALSE; - } - for (Boolean childResultValue : childResultValues) { - if (!childResultValue) { - return Boolean.FALSE; - } - } - return Boolean.TRUE; - } - @Override - public Boolean visit(LiteralExpression literalExpression) { - return Boolean.TRUE; - } + @Override + public Iterator visitEnter(AndExpression andExpression) { + if (rhsConstantCount > 1) { + return null; + } + return andExpression.getChildren().iterator(); } /** *
-     * Infer OrderBys from the rowkey columns of {@link PTable}, for projected table may be no rowkey columns,
-     * so we should move forward to inspect {@link ProjectedColumn} by {@link #getOrderByFromProjectedTable}.
-     * The second part of the return pair is the rowkey column offset we must skip when we create OrderBys, because for table with salted/multiTenant/viewIndexId,
-     * some leading rowkey columns should be skipped.
+     * We just consider {@link ComparisonExpression} because:
+     * 1.for {@link InListExpression} as "a in ('2')", the {@link InListExpression} is rewritten to
+     *  {@link ComparisonExpression} in {@link InListExpression#create}
+     * 2.for {@link RowValueConstructorExpression} as "(a,b)=(1,2)",{@link RowValueConstructorExpression}
+     *   is rewritten to {@link ComparisonExpression} in {@link ComparisonExpression#create}
+     * 3.not consider {@link CoerceExpression}, because for "where cast(a as integer)=2", when a is double,
+     *   a is not constant.
      * 
- * @param tableRef - * @param phoenixConnection - * @param orderByReverse - * @return - * @throws SQLException */ - public static Pair getOrderByFromTable( - TableRef tableRef, - PhoenixConnection phoenixConnection, - boolean orderByReverse) throws SQLException { - - PTable table = tableRef.getTable(); - Pair orderByAndRowKeyColumnOffset = - getOrderByFromTableByRowKeyColumn(table, phoenixConnection, orderByReverse); - if(orderByAndRowKeyColumnOffset.getFirst() != OrderBy.EMPTY_ORDER_BY) { - return orderByAndRowKeyColumnOffset; - } - if(table.getType() == PTableType.PROJECTED) { - orderByAndRowKeyColumnOffset = - getOrderByFromProjectedTable(tableRef, phoenixConnection, orderByReverse); - if(orderByAndRowKeyColumnOffset.getFirst() != OrderBy.EMPTY_ORDER_BY) { - return orderByAndRowKeyColumnOffset; - } - } - return new Pair(OrderBy.EMPTY_ORDER_BY, 0); + @Override + public Iterator visitEnter(ComparisonExpression comparisonExpression) { + if (rhsConstantCount > 1) { + return null; + } + if (comparisonExpression.getFilterOp() != CompareOperator.EQUAL) { + return null; + } + Expression lhsExpresssion = comparisonExpression.getChildren().get(0); + if (!this.columnExpression.equals(lhsExpresssion)) { + return null; + } + Expression rhsExpression = comparisonExpression.getChildren().get(1); + if (rhsExpression == null) { + return null; + } + Boolean isConstant = rhsExpression.accept(new IsCompositeLiteralExpressionVisitor()); + if (isConstant != null && isConstant.booleanValue()) { + checkConstantValue(rhsExpression); + } + return null; } - /** - * Infer OrderBys from the rowkey columns of {@link PTable}. - * The second part of the return pair is the rowkey column offset we must skip when we create OrderBys, because for table with salted/multiTenant/viewIndexId, - * some leading rowkey columns should be skipped. - * @param table - * @param phoenixConnection - * @param orderByReverse - * @return - */ - public static Pair getOrderByFromTableByRowKeyColumn( - PTable table, - PhoenixConnection phoenixConnection, - boolean orderByReverse) { - Pair,Integer> rowKeyColumnExpressionsAndRowKeyColumnOffset = - ExpressionUtil.getRowKeyColumnExpressionsFromTable(table, phoenixConnection); - List rowKeyColumnExpressions = rowKeyColumnExpressionsAndRowKeyColumnOffset.getFirst(); - int rowKeyColumnOffset = rowKeyColumnExpressionsAndRowKeyColumnOffset.getSecond(); - if(rowKeyColumnExpressions.isEmpty()) { - return new Pair(OrderBy.EMPTY_ORDER_BY,0); - } - return new Pair( - convertRowKeyColumnExpressionsToOrderBy(rowKeyColumnExpressions, orderByReverse), - rowKeyColumnOffset); + public boolean isConstant() { + return this.rhsConstantCount == 1; } - /** - * For projected table may be no rowkey columns, - * so we should move forward to inspect {@link ProjectedColumn} to check if the source column is rowkey column. - * The second part of the return pair is the rowkey column offset we must skip when we create OrderBys, because for table with salted/multiTenant/viewIndexId, - * some leading rowkey columns should be skipped. - * @param projectedTableRef - * @param phoenixConnection - * @param orderByReverse - * @return - * @throws SQLException - */ - public static Pair getOrderByFromProjectedTable( - TableRef projectedTableRef, - PhoenixConnection phoenixConnection, - boolean orderByReverse) throws SQLException { - - PTable projectedTable = projectedTableRef.getTable(); - assert projectedTable.getType() == PTableType.PROJECTED; - TableRef sourceTableRef = null; - TreeMap sourceRowKeyColumnIndexToProjectedColumnRef = - new TreeMap(); - - for(PColumn column : projectedTable.getColumns()) { - if(!(column instanceof ProjectedColumn)) { - continue; - } - ProjectedColumn projectedColumn = (ProjectedColumn)column; - ColumnRef sourceColumnRef = projectedColumn.getSourceColumnRef(); - TableRef currentSourceTableRef = sourceColumnRef.getTableRef(); - if(sourceTableRef == null) { - sourceTableRef = currentSourceTableRef; - } - else if(!sourceTableRef.equals(currentSourceTableRef)) { - return new Pair(OrderBy.EMPTY_ORDER_BY, 0); - } - int sourceRowKeyColumnIndex = sourceColumnRef.getPKSlotPosition(); - if(sourceRowKeyColumnIndex >= 0) { - ColumnRef projectedColumnRef = - new ColumnRef(projectedTableRef, projectedColumn.getPosition()); - sourceRowKeyColumnIndexToProjectedColumnRef.put( - Integer.valueOf(sourceRowKeyColumnIndex), projectedColumnRef); - } - } + @Override + public Iterator visitEnter(IsNullExpression isNullExpression) { + if (rhsConstantCount > 1) { + return null; + } + if (isNullExpression.isNegate()) { + return null; + } + Expression lhsExpresssion = isNullExpression.getChildren().get(0); + if (!this.columnExpression.equals(lhsExpresssion)) { + return null; + } + this.checkConstantValue(null); + return null; + } - if(sourceTableRef == null) { - return new Pair(OrderBy.EMPTY_ORDER_BY, 0); + private void checkConstantValue(Expression rhsExpression) { + if (!this.isNullExpressionVisited && this.firstRhsConstantExpression == null) { + this.firstRhsConstantExpression = rhsExpression; + rhsConstantCount++; + if (rhsExpression == null) { + this.isNullExpressionVisited = true; } + return; + } + + if ( + !isExpressionEquals(this.isNullExpressionVisited ? null : this.firstRhsConstantExpression, + rhsExpression) + ) { + rhsConstantCount++; + return; + } + } - final int sourceRowKeyColumnOffset = getRowKeyColumnOffset(sourceTableRef.getTable(), phoenixConnection); - List orderByExpressions = new LinkedList(); - int matchedSourceRowKeyColumnOffset = sourceRowKeyColumnOffset; - for(Entry entry : sourceRowKeyColumnIndexToProjectedColumnRef.entrySet()) { - int currentRowKeyColumnOffset = entry.getKey(); - if(currentRowKeyColumnOffset < matchedSourceRowKeyColumnOffset) { - continue; - } - else if(currentRowKeyColumnOffset == matchedSourceRowKeyColumnOffset) { - matchedSourceRowKeyColumnOffset++; - } - else { - break; - } - - ColumnRef projectedColumnRef = entry.getValue(); - Expression projectedValueColumnExpression = projectedColumnRef.newColumnExpression(); - OrderByExpression orderByExpression = - OrderByExpression.convertExpressionToOrderByExpression(projectedValueColumnExpression, orderByReverse); - orderByExpressions.add(orderByExpression); + private static boolean isExpressionEquals(Expression oldExpression, Expression newExpression) { + if (oldExpression == null) { + if (newExpression == null) { + return true; } - - if(orderByExpressions.isEmpty()) { - return new Pair(OrderBy.EMPTY_ORDER_BY, 0); + return ExpressionUtil.isNull(newExpression, new ImmutableBytesWritable()); + } + if (newExpression == null) { + return ExpressionUtil.isNull(oldExpression, new ImmutableBytesWritable()); + } + return oldExpression.equals(newExpression); + } + } + + private static class IsCompositeLiteralExpressionVisitor + extends StatelessTraverseAllExpressionVisitor { + @Override + public Boolean defaultReturn(Expression expression, List childResultValues) { + if ( + !ExpressionUtil.isContantForStatement(expression) + || childResultValues.size() < expression.getChildren().size() + ) { + return Boolean.FALSE; + } + for (Boolean childResultValue : childResultValues) { + if (!childResultValue) { + return Boolean.FALSE; } - return new Pair(new OrderBy(orderByExpressions), sourceRowKeyColumnOffset); + } + return Boolean.TRUE; } - /** - * For table with salted/multiTenant/viewIndexId,some leading rowkey columns should be skipped. - * @param table - * @param phoenixConnection - * @return - */ - public static int getRowKeyColumnOffset(PTable table, PhoenixConnection phoenixConnection) { - boolean isSalted = table.getBucketNum() != null; - boolean isMultiTenant = phoenixConnection.getTenantId() != null && table.isMultiTenant(); - boolean isSharedViewIndex = table.getViewIndexId() != null; - return (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); + @Override + public Boolean visit(LiteralExpression literalExpression) { + return Boolean.TRUE; } - - /** - * Create {@link RowKeyColumnExpression} from {@link PTable}. - * The second part of the return pair is the rowkey column offset we must skip when we create OrderBys, because for table with salted/multiTenant/viewIndexId, - * some leading rowkey columns should be skipped. - * @param table - * @param phoenixConnection - * @return - */ - public static Pair,Integer> getRowKeyColumnExpressionsFromTable(PTable table, PhoenixConnection phoenixConnection) { - int pkPositionOffset = getRowKeyColumnOffset(table, phoenixConnection); - List pkColumns = table.getPKColumns(); - if(pkPositionOffset >= pkColumns.size()) { - return new Pair,Integer>(Collections. emptyList(), 0); - } - List rowKeyColumnExpressions = new ArrayList(pkColumns.size() - pkPositionOffset); - for(int index = pkPositionOffset; index < pkColumns.size(); index++) { - RowKeyColumnExpression rowKeyColumnExpression = - new RowKeyColumnExpression(pkColumns.get(index), new RowKeyValueAccessor(pkColumns, index)); - rowKeyColumnExpressions.add(rowKeyColumnExpression); - } - return new Pair,Integer>(rowKeyColumnExpressions, pkPositionOffset); + } + + /** + *
+   * Infer OrderBys from the rowkey columns of {@link PTable}, for projected table may be no rowkey columns,
+   * so we should move forward to inspect {@link ProjectedColumn} by {@link #getOrderByFromProjectedTable}.
+   * The second part of the return pair is the rowkey column offset we must skip when we create OrderBys, because for table with salted/multiTenant/viewIndexId,
+   * some leading rowkey columns should be skipped.
+   * 
+ */ + public static Pair getOrderByFromTable(TableRef tableRef, + PhoenixConnection phoenixConnection, boolean orderByReverse) throws SQLException { + + PTable table = tableRef.getTable(); + Pair orderByAndRowKeyColumnOffset = + getOrderByFromTableByRowKeyColumn(table, phoenixConnection, orderByReverse); + if (orderByAndRowKeyColumnOffset.getFirst() != OrderBy.EMPTY_ORDER_BY) { + return orderByAndRowKeyColumnOffset; } - - /** - * Create OrderByExpression by RowKeyColumnExpression,isNullsLast is the default value "false",isAscending is based on {@link Expression#getSortOrder()}. - * If orderByReverse is true, reverse the isNullsLast and isAscending. - * @param rowKeyColumnExpressions - * @param orderByReverse - * @return - */ - public static OrderBy convertRowKeyColumnExpressionsToOrderBy(List rowKeyColumnExpressions, boolean orderByReverse) { - return convertRowKeyColumnExpressionsToOrderBy( - rowKeyColumnExpressions, Collections. emptyList(), orderByReverse); + if (table.getType() == PTableType.PROJECTED) { + orderByAndRowKeyColumnOffset = + getOrderByFromProjectedTable(tableRef, phoenixConnection, orderByReverse); + if (orderByAndRowKeyColumnOffset.getFirst() != OrderBy.EMPTY_ORDER_BY) { + return orderByAndRowKeyColumnOffset; + } } - - /** - * Create OrderByExpression by RowKeyColumnExpression, if the orderPreservingTrackInfos is not null, use isNullsLast and isAscending from orderPreservingTrackInfos. - * If orderByReverse is true, reverse the isNullsLast and isAscending. - * @param rowKeyColumnExpressions - * @param orderPreservingTrackInfos - * @param orderByReverse - * @return - */ - public static OrderBy convertRowKeyColumnExpressionsToOrderBy( - List rowKeyColumnExpressions, - List orderPreservingTrackInfos, - boolean orderByReverse) { - if(rowKeyColumnExpressions.isEmpty()) { - return OrderBy.EMPTY_ORDER_BY; - } - List orderByExpressions = new ArrayList(rowKeyColumnExpressions.size()); - Iterator orderPreservingTrackInfosIter = null; - if(orderPreservingTrackInfos != null && orderPreservingTrackInfos.size() > 0) { - if(orderPreservingTrackInfos.size() != rowKeyColumnExpressions.size()) { - throw new IllegalStateException( - "orderPreservingTrackInfos.size():[" + orderPreservingTrackInfos.size() + - "] should equals rowKeyColumnExpressions.size():[" + rowKeyColumnExpressions.size()+"]!"); - } - orderPreservingTrackInfosIter = orderPreservingTrackInfos.iterator(); - } - for(RowKeyColumnExpression rowKeyColumnExpression : rowKeyColumnExpressions) { - Info orderPreservingTrackInfo = null; - if(orderPreservingTrackInfosIter != null) { - assert orderPreservingTrackInfosIter.hasNext(); - orderPreservingTrackInfo = orderPreservingTrackInfosIter.next(); - } - OrderByExpression orderByExpression = - OrderByExpression.convertExpressionToOrderByExpression(rowKeyColumnExpression, orderPreservingTrackInfo, orderByReverse); - orderByExpressions.add(orderByExpression); - } - return new OrderBy(orderByExpressions); + return new Pair(OrderBy.EMPTY_ORDER_BY, 0); + } + + /** + * Infer OrderBys from the rowkey columns of {@link PTable}. The second part of the return pair is + * the rowkey column offset we must skip when we create OrderBys, because for table with + * salted/multiTenant/viewIndexId, some leading rowkey columns should be skipped. + */ + public static Pair getOrderByFromTableByRowKeyColumn(PTable table, + PhoenixConnection phoenixConnection, boolean orderByReverse) { + Pair, Integer> rowKeyColumnExpressionsAndRowKeyColumnOffset = + ExpressionUtil.getRowKeyColumnExpressionsFromTable(table, phoenixConnection); + List rowKeyColumnExpressions = + rowKeyColumnExpressionsAndRowKeyColumnOffset.getFirst(); + int rowKeyColumnOffset = rowKeyColumnExpressionsAndRowKeyColumnOffset.getSecond(); + if (rowKeyColumnExpressions.isEmpty()) { + return new Pair(OrderBy.EMPTY_ORDER_BY, 0); + } + return new Pair( + convertRowKeyColumnExpressionsToOrderBy(rowKeyColumnExpressions, orderByReverse), + rowKeyColumnOffset); + } + + /** + * For projected table may be no rowkey columns, so we should move forward to inspect + * {@link ProjectedColumn} to check if the source column is rowkey column. The second part of the + * return pair is the rowkey column offset we must skip when we create OrderBys, because for table + * with salted/multiTenant/viewIndexId, some leading rowkey columns should be skipped. + */ + public static Pair getOrderByFromProjectedTable(TableRef projectedTableRef, + PhoenixConnection phoenixConnection, boolean orderByReverse) throws SQLException { + + PTable projectedTable = projectedTableRef.getTable(); + assert projectedTable.getType() == PTableType.PROJECTED; + TableRef sourceTableRef = null; + TreeMap sourceRowKeyColumnIndexToProjectedColumnRef = + new TreeMap(); + + for (PColumn column : projectedTable.getColumns()) { + if (!(column instanceof ProjectedColumn)) { + continue; + } + ProjectedColumn projectedColumn = (ProjectedColumn) column; + ColumnRef sourceColumnRef = projectedColumn.getSourceColumnRef(); + TableRef currentSourceTableRef = sourceColumnRef.getTableRef(); + if (sourceTableRef == null) { + sourceTableRef = currentSourceTableRef; + } else if (!sourceTableRef.equals(currentSourceTableRef)) { + return new Pair(OrderBy.EMPTY_ORDER_BY, 0); + } + int sourceRowKeyColumnIndex = sourceColumnRef.getPKSlotPosition(); + if (sourceRowKeyColumnIndex >= 0) { + ColumnRef projectedColumnRef = + new ColumnRef(projectedTableRef, projectedColumn.getPosition()); + sourceRowKeyColumnIndexToProjectedColumnRef.put(Integer.valueOf(sourceRowKeyColumnIndex), + projectedColumnRef); + } } - /** - * Convert the GroupBy to OrderBy, expressions in GroupBy should be converted to {@link RowKeyColumnExpression}. - * @param groupBy - * @param orderByReverse - * @return - */ - public static OrderBy convertGroupByToOrderBy(GroupBy groupBy, boolean orderByReverse) { - if(groupBy.isEmpty()) { - return OrderBy.EMPTY_ORDER_BY; - } - List rowKeyColumnExpressions = convertGroupByToRowKeyColumnExpressions(groupBy); - List orderPreservingTrackInfos = Collections. emptyList(); - if(groupBy.isOrderPreserving()) { - orderPreservingTrackInfos = groupBy.getOrderPreservingTrackInfos(); - } - return convertRowKeyColumnExpressionsToOrderBy(rowKeyColumnExpressions, orderPreservingTrackInfos, orderByReverse); + if (sourceTableRef == null) { + return new Pair(OrderBy.EMPTY_ORDER_BY, 0); } - /** - * Convert the expressions in GroupBy to {@link RowKeyColumnExpression}, the convert logic is same as {@link ExpressionCompiler#wrapGroupByExpression}. - * @param groupBy - * @return - */ - public static List convertGroupByToRowKeyColumnExpressions(GroupBy groupBy) { - if(groupBy.isEmpty()) { - return Collections. emptyList(); - } - List groupByExpressions = groupBy.getExpressions(); - List rowKeyColumnExpressions = new ArrayList(groupByExpressions.size()); - int columnIndex = 0; - for(Expression groupByExpression : groupByExpressions) { - RowKeyColumnExpression rowKeyColumnExpression = - convertGroupByExpressionToRowKeyColumnExpression(groupBy, groupByExpression, columnIndex++); - rowKeyColumnExpressions.add(rowKeyColumnExpression); - } - return rowKeyColumnExpressions; + final int sourceRowKeyColumnOffset = + getRowKeyColumnOffset(sourceTableRef.getTable(), phoenixConnection); + List orderByExpressions = new LinkedList(); + int matchedSourceRowKeyColumnOffset = sourceRowKeyColumnOffset; + for (Entry entry : sourceRowKeyColumnIndexToProjectedColumnRef.entrySet()) { + int currentRowKeyColumnOffset = entry.getKey(); + if (currentRowKeyColumnOffset < matchedSourceRowKeyColumnOffset) { + continue; + } else if (currentRowKeyColumnOffset == matchedSourceRowKeyColumnOffset) { + matchedSourceRowKeyColumnOffset++; + } else { + break; + } + + ColumnRef projectedColumnRef = entry.getValue(); + Expression projectedValueColumnExpression = projectedColumnRef.newColumnExpression(); + OrderByExpression orderByExpression = OrderByExpression + .convertExpressionToOrderByExpression(projectedValueColumnExpression, orderByReverse); + orderByExpressions.add(orderByExpression); } - /** - * Convert the expressions in GroupBy to {@link RowKeyColumnExpression}, a typical case is in {@link ExpressionCompiler#wrapGroupByExpression}. - * @param groupBy - * @param originalExpression - * @param groupByColumnIndex - * @return - */ - public static RowKeyColumnExpression convertGroupByExpressionToRowKeyColumnExpression( - GroupBy groupBy, - Expression originalExpression, - int groupByColumnIndex) { - RowKeyValueAccessor rowKeyValueAccessor = new RowKeyValueAccessor(groupBy.getKeyExpressions(), groupByColumnIndex); - return new RowKeyColumnExpression( - originalExpression, - rowKeyValueAccessor, - groupBy.getKeyExpressions().get(groupByColumnIndex).getDataType()); + if (orderByExpressions.isEmpty()) { + return new Pair(OrderBy.EMPTY_ORDER_BY, 0); + } + return new Pair(new OrderBy(orderByExpressions), sourceRowKeyColumnOffset); + } + + /** + * For table with salted/multiTenant/viewIndexId,some leading rowkey columns should be skipped. + */ + public static int getRowKeyColumnOffset(PTable table, PhoenixConnection phoenixConnection) { + boolean isSalted = table.getBucketNum() != null; + boolean isMultiTenant = phoenixConnection.getTenantId() != null && table.isMultiTenant(); + boolean isSharedViewIndex = table.getViewIndexId() != null; + return (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); + } + + /** + * Create {@link RowKeyColumnExpression} from {@link PTable}. The second part of the return pair + * is the rowkey column offset we must skip when we create OrderBys, because for table with + * salted/multiTenant/viewIndexId, some leading rowkey columns should be skipped. + */ + public static Pair, Integer> + getRowKeyColumnExpressionsFromTable(PTable table, PhoenixConnection phoenixConnection) { + int pkPositionOffset = getRowKeyColumnOffset(table, phoenixConnection); + List pkColumns = table.getPKColumns(); + if (pkPositionOffset >= pkColumns.size()) { + return new Pair, Integer>( + Collections. emptyList(), 0); + } + List rowKeyColumnExpressions = + new ArrayList(pkColumns.size() - pkPositionOffset); + for (int index = pkPositionOffset; index < pkColumns.size(); index++) { + RowKeyColumnExpression rowKeyColumnExpression = + new RowKeyColumnExpression(pkColumns.get(index), new RowKeyValueAccessor(pkColumns, index)); + rowKeyColumnExpressions.add(rowKeyColumnExpression); + } + return new Pair, Integer>(rowKeyColumnExpressions, + pkPositionOffset); + } + + /** + * Create OrderByExpression by RowKeyColumnExpression,isNullsLast is the default value + * "false",isAscending is based on {@link Expression#getSortOrder()}. If orderByReverse is true, + * reverse the isNullsLast and isAscending. + */ + public static OrderBy convertRowKeyColumnExpressionsToOrderBy( + List rowKeyColumnExpressions, boolean orderByReverse) { + return convertRowKeyColumnExpressionsToOrderBy(rowKeyColumnExpressions, + Collections. emptyList(), orderByReverse); + } + + /** + * Create OrderByExpression by RowKeyColumnExpression, if the orderPreservingTrackInfos is not + * null, use isNullsLast and isAscending from orderPreservingTrackInfos. If orderByReverse is + * true, reverse the isNullsLast and isAscending. + */ + public static OrderBy convertRowKeyColumnExpressionsToOrderBy( + List rowKeyColumnExpressions, List orderPreservingTrackInfos, + boolean orderByReverse) { + if (rowKeyColumnExpressions.isEmpty()) { + return OrderBy.EMPTY_ORDER_BY; + } + List orderByExpressions = + new ArrayList(rowKeyColumnExpressions.size()); + Iterator orderPreservingTrackInfosIter = null; + if (orderPreservingTrackInfos != null && orderPreservingTrackInfos.size() > 0) { + if (orderPreservingTrackInfos.size() != rowKeyColumnExpressions.size()) { + throw new IllegalStateException("orderPreservingTrackInfos.size():[" + + orderPreservingTrackInfos.size() + "] should equals rowKeyColumnExpressions.size():[" + + rowKeyColumnExpressions.size() + "]!"); + } + orderPreservingTrackInfosIter = orderPreservingTrackInfos.iterator(); + } + for (RowKeyColumnExpression rowKeyColumnExpression : rowKeyColumnExpressions) { + Info orderPreservingTrackInfo = null; + if (orderPreservingTrackInfosIter != null) { + assert orderPreservingTrackInfosIter.hasNext(); + orderPreservingTrackInfo = orderPreservingTrackInfosIter.next(); + } + OrderByExpression orderByExpression = OrderByExpression.convertExpressionToOrderByExpression( + rowKeyColumnExpression, orderPreservingTrackInfo, orderByReverse); + orderByExpressions.add(orderByExpression); + } + return new OrderBy(orderByExpressions); + } + + /** + * Convert the GroupBy to OrderBy, expressions in GroupBy should be converted to + * {@link RowKeyColumnExpression}. + */ + public static OrderBy convertGroupByToOrderBy(GroupBy groupBy, boolean orderByReverse) { + if (groupBy.isEmpty()) { + return OrderBy.EMPTY_ORDER_BY; + } + List rowKeyColumnExpressions = + convertGroupByToRowKeyColumnExpressions(groupBy); + List orderPreservingTrackInfos = Collections. emptyList(); + if (groupBy.isOrderPreserving()) { + orderPreservingTrackInfos = groupBy.getOrderPreservingTrackInfos(); + } + return convertRowKeyColumnExpressionsToOrderBy(rowKeyColumnExpressions, + orderPreservingTrackInfos, orderByReverse); + } + + /** + * Convert the expressions in GroupBy to {@link RowKeyColumnExpression}, the convert logic is same + * as {@link ExpressionCompiler#wrapGroupByExpression}. + */ + public static List + convertGroupByToRowKeyColumnExpressions(GroupBy groupBy) { + if (groupBy.isEmpty()) { + return Collections. emptyList(); + } + List groupByExpressions = groupBy.getExpressions(); + List rowKeyColumnExpressions = + new ArrayList(groupByExpressions.size()); + int columnIndex = 0; + for (Expression groupByExpression : groupByExpressions) { + RowKeyColumnExpression rowKeyColumnExpression = + convertGroupByExpressionToRowKeyColumnExpression(groupBy, groupByExpression, columnIndex++); + rowKeyColumnExpressions.add(rowKeyColumnExpression); } + return rowKeyColumnExpressions; + } + + /** + * Convert the expressions in GroupBy to {@link RowKeyColumnExpression}, a typical case is in + * {@link ExpressionCompiler#wrapGroupByExpression}. + */ + public static RowKeyColumnExpression convertGroupByExpressionToRowKeyColumnExpression( + GroupBy groupBy, Expression originalExpression, int groupByColumnIndex) { + RowKeyValueAccessor rowKeyValueAccessor = + new RowKeyValueAccessor(groupBy.getKeyExpressions(), groupByColumnIndex); + return new RowKeyColumnExpression(originalExpression, rowKeyValueAccessor, + groupBy.getKeyExpressions().get(groupByColumnIndex).getDataType()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/FirstLastNthValueDataContainer.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/FirstLastNthValueDataContainer.java index 9b02103602d..f7d33e0e9cc 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/FirstLastNthValueDataContainer.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/FirstLastNthValueDataContainer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,157 +28,146 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Container for data transfer between server and client aggregation (FIRST|LAST|NTH)_VALUE functions - * + * Container for data transfer between server and client aggregation (FIRST|LAST|NTH)_VALUE + * functions */ public class FirstLastNthValueDataContainer { - protected boolean isAscending = false; - protected int offset; - protected TreeMap> data; - protected boolean isOrderValuesFixedLength = false; - protected boolean isDataValuesFixedLength = false; + protected boolean isAscending = false; + protected int offset; + protected TreeMap> data; + protected boolean isOrderValuesFixedLength = false; + protected boolean isDataValuesFixedLength = false; - public void setIsAscending(boolean ascending) { - isAscending = ascending; - } + public void setIsAscending(boolean ascending) { + isAscending = ascending; + } - public void setData(TreeMap> topValues) { - data = topValues; - } + public void setData(TreeMap> topValues) { + data = topValues; + } - public void setFixedWidthOrderValues(boolean fixedSize) { - isOrderValuesFixedLength = fixedSize; - } + public void setFixedWidthOrderValues(boolean fixedSize) { + isOrderValuesFixedLength = fixedSize; + } - public void setFixedWidthDataValues(boolean fixedSize) { - isDataValuesFixedLength = fixedSize; - } + public void setFixedWidthDataValues(boolean fixedSize) { + isDataValuesFixedLength = fixedSize; + } - public void setOffset(int offset) { - this.offset = offset; - } - - public void setPayload(byte[] payload) { - if (payload[0] == (byte) 1) { - isAscending = true; - } - - int lengthOfOrderValues = Bytes.toInt(payload, 1); - int lengthOfDataValues = Bytes.toInt(payload, 5); - int sizeOfMap = Bytes.toInt(payload, 9); - - data = new TreeMap>(new Bytes.ByteArrayComparator()); - - int payloadOffset = 13; - - for (; sizeOfMap != 0; sizeOfMap--) { - byte[] key; - byte[] value; - - if (lengthOfOrderValues != 0) { - key = Bytes.copy(payload, payloadOffset, lengthOfOrderValues); - payloadOffset += lengthOfOrderValues; - } else { - int l = Bytes.toInt(payload, payloadOffset); - payloadOffset += 4; - key = Bytes.copy(payload, payloadOffset, l); - payloadOffset += l; - } - - if (lengthOfDataValues != 0) { - value = Bytes.copy(payload, payloadOffset, lengthOfDataValues); - payloadOffset += lengthOfDataValues; - } else { - int l = Bytes.toInt(payload, payloadOffset); - payloadOffset += 4; - value = Bytes.copy(payload, payloadOffset, l); - payloadOffset += l; - } - - if(!data.containsKey(key)) { - data.put(key, new LinkedList()); - } - data.get(key).add(value); - } + public void setOffset(int offset) { + this.offset = offset; + } + public void setPayload(byte[] payload) { + if (payload[0] == (byte) 1) { + isAscending = true; } - public byte[] getPayload() throws IOException { - /* - PAYLOAD STUCTURE - - what | size (bytes) | info - is ascending | 1 | 1 = asc, 0 = desc - length of order by vals | 4 | 0 if dynamic length, size otherwise - length of values | 4 | 0 if dynamic length, size otherwise - [ length of first order | 4 | set if order is var length (optional) ] - first order value | n | order by val - [ length of first value | 4 | set if value is var length (optional) ] - first value | n | data val - ... and so on, repeat order by values and data values - - - example with fixed length for data and order by values - 0 | 0000 0004 | 0000 0004 | 0000 0001 | 0000 000FF | ... - is ascendig | length order vals | length data vals | first order val | first value | ... more values + int lengthOfOrderValues = Bytes.toInt(payload, 1); + int lengthOfDataValues = Bytes.toInt(payload, 5); + int sizeOfMap = Bytes.toInt(payload, 9); + + data = new TreeMap>(new Bytes.ByteArrayComparator()); + + int payloadOffset = 13; + + for (; sizeOfMap != 0; sizeOfMap--) { + byte[] key; + byte[] value; + + if (lengthOfOrderValues != 0) { + key = Bytes.copy(payload, payloadOffset, lengthOfOrderValues); + payloadOffset += lengthOfOrderValues; + } else { + int l = Bytes.toInt(payload, payloadOffset); + payloadOffset += 4; + key = Bytes.copy(payload, payloadOffset, l); + payloadOffset += l; + } + + if (lengthOfDataValues != 0) { + value = Bytes.copy(payload, payloadOffset, lengthOfDataValues); + payloadOffset += lengthOfDataValues; + } else { + int l = Bytes.toInt(payload, payloadOffset); + payloadOffset += 4; + value = Bytes.copy(payload, payloadOffset, l); + payloadOffset += l; + } + + if (!data.containsKey(key)) { + data.put(key, new LinkedList()); + } + data.get(key).add(value); + } - example with dynamic length for data (length will be zeros) - 0 | 0000 0000 | 0000 0000 | 0000 0004 | 0000 000FF | ... - is ascendig | length order vals | length data vals | first order length | first order value | ... more values + } + + public byte[] getPayload() throws IOException { + /* + * PAYLOAD STUCTURE what | size (bytes) | info is ascending | 1 | 1 = asc, 0 = desc length of + * order by vals | 4 | 0 if dynamic length, size otherwise length of values | 4 | 0 if dynamic + * length, size otherwise [ length of first order | 4 | set if order is var length (optional) ] + * first order value | n | order by val [ length of first value | 4 | set if value is var length + * (optional) ] first value | n | data val ... and so on, repeat order by values and data values + * example with fixed length for data and order by values 0 | 0000 0004 | 0000 0004 | 0000 0001 + * | 0000 000FF | ... is ascendig | length order vals | length data vals | first order val | + * first value | ... more values example with dynamic length for data (length will be zeros) 0 | + * 0000 0000 | 0000 0000 | 0000 0004 | 0000 000FF | ... is ascendig | length order vals | length + * data vals | first order length | first order value | ... more values + */ + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + + bos.write(isAscending ? (byte) 1 : (byte) 0); + + Entry> firstEntry = data.firstEntry(); + if (isOrderValuesFixedLength) { + bos.write(Bytes.toBytes(firstEntry.getKey().length)); + } else { + bos.write(Bytes.toBytes(0)); + } - */ + if (isDataValuesFixedLength) { + bos.write(Bytes.toBytes(firstEntry.getValue().getFirst().length)); + } else { + bos.write(Bytes.toBytes(0)); + } - ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int offsetForDataLength = bos.size(); + bos.write(new byte[4]); // space for number of elements + int valuesCount = 0; - bos.write(isAscending ? (byte) 1 : (byte) 0); + for (Map.Entry> entry : data.entrySet()) { + ListIterator it = entry.getValue().listIterator(); + while (it.hasNext()) { + valuesCount++; + byte[] itemValue = it.next(); - Entry> firstEntry = data.firstEntry(); - if (isOrderValuesFixedLength) { - bos.write(Bytes.toBytes(firstEntry.getKey().length)); - } else { - bos.write(Bytes.toBytes(0)); + if (!isOrderValuesFixedLength) { + bos.write(Bytes.toBytes(entry.getKey().length)); } + bos.write(entry.getKey()); - if (isDataValuesFixedLength) { - bos.write(Bytes.toBytes(firstEntry.getValue().getFirst().length)); - } else { - bos.write(Bytes.toBytes(0)); + if (!isDataValuesFixedLength) { + bos.write(Bytes.toBytes(itemValue.length)); } - - int offsetForDataLength = bos.size(); - bos.write(new byte[4]); //space for number of elements - int valuesCount = 0; - - for (Map.Entry> entry : data.entrySet()) { - ListIterator it = entry.getValue().listIterator(); - while(it.hasNext()) { - valuesCount++; - byte[] itemValue = it.next(); - - if (!isOrderValuesFixedLength) { - bos.write(Bytes.toBytes(entry.getKey().length)); - } - bos.write(entry.getKey()); - - if (!isDataValuesFixedLength) { - bos.write(Bytes.toBytes(itemValue.length)); - } - bos.write(itemValue); - } - } - - byte[] outputArray = bos.toByteArray(); - //write number of elements - System.arraycopy(Bytes.toBytes(valuesCount), 0, outputArray, offsetForDataLength, 4); - return outputArray; + bos.write(itemValue); + } } - public boolean getIsAscending() { - return isAscending; - } + byte[] outputArray = bos.toByteArray(); + // write number of elements + System.arraycopy(Bytes.toBytes(valuesCount), 0, outputArray, offsetForDataLength, 4); + return outputArray; + } - public TreeMap> getData() { - return data; - } + public boolean getIsAscending() { + return isAscending; + } + + public TreeMap> getData() { + return data; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/IndexUtil.java index 8aa2c33e929..191e76b4218 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/IndexUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/IndexUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -49,12 +49,6 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; -import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; -import org.apache.phoenix.index.PhoenixIndexCodec; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.thirdparty.com.google.common.cache.Cache; -import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; @@ -63,6 +57,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils.BlockingRpcCallback; @@ -78,6 +73,7 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse; import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService; import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest; +import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MetaDataMutationResult; import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MutationCode; import org.apache.phoenix.exception.SQLExceptionCode; @@ -96,6 +92,7 @@ import org.apache.phoenix.hbase.index.util.KeyValueBuilder; import org.apache.phoenix.hbase.index.util.VersionUtil; import org.apache.phoenix.index.IndexMaintainer; +import org.apache.phoenix.index.PhoenixIndexCodec; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.jdbc.PhoenixStatement; @@ -104,6 +101,7 @@ import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.protobuf.ProtobufUtil; import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.ColumnFamilyNotFoundException; import org.apache.phoenix.schema.ColumnNotFoundException; @@ -129,872 +127,915 @@ import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.transaction.PhoenixTransactionProvider.Feature; - +import org.apache.phoenix.thirdparty.com.google.common.cache.Cache; +import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.transaction.PhoenixTransactionProvider.Feature; public class IndexUtil { - public static final String INDEX_COLUMN_NAME_SEP = ":"; - public static final byte[] INDEX_COLUMN_NAME_SEP_BYTES = Bytes.toBytes(INDEX_COLUMN_NAME_SEP); - - public static final String CODEC_CLASS_NAME_KEY = "org.apache.hadoop.hbase.index.codec.class"; - public static final String PHOENIX_INDEX_BUILDER_CLASSNAME = "org.apache.phoenix.index.PhoenixIndexBuilder"; - - /** Configuration key for the {@link org.apache.phoenix.hbase.index.builder.IndexBuilder} to use */ - public static final String INDEX_BUILDER_CONF_KEY = "index.builder"; - - private final static Cache indexNameGlobalIndexCheckerEnabledMap = - CacheBuilder.newBuilder() - .expireAfterWrite(QueryServicesOptions.GLOBAL_INDEX_CHECKER_ENABLED_MAP_EXPIRATION_MIN, - TimeUnit.MINUTES) - .build(); - - private IndexUtil() { + public static final String INDEX_COLUMN_NAME_SEP = ":"; + public static final byte[] INDEX_COLUMN_NAME_SEP_BYTES = Bytes.toBytes(INDEX_COLUMN_NAME_SEP); + + public static final String CODEC_CLASS_NAME_KEY = "org.apache.hadoop.hbase.index.codec.class"; + public static final String PHOENIX_INDEX_BUILDER_CLASSNAME = + "org.apache.phoenix.index.PhoenixIndexBuilder"; + + /** + * Configuration key for the {@link org.apache.phoenix.hbase.index.builder.IndexBuilder} to use + */ + public static final String INDEX_BUILDER_CONF_KEY = "index.builder"; + + private final static Cache indexNameGlobalIndexCheckerEnabledMap = CacheBuilder.newBuilder() + .expireAfterWrite(QueryServicesOptions.GLOBAL_INDEX_CHECKER_ENABLED_MAP_EXPIRATION_MIN, + TimeUnit.MINUTES) + .build(); + + private IndexUtil() { + } + + // Since we cannot have nullable fixed length in a row key + // we need to translate to variable length. + public static PDataType getIndexColumnDataType(PColumn dataColumn) throws SQLException { + PDataType type = getIndexColumnDataType(dataColumn.isNullable(), dataColumn.getDataType()); + if (type == null) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_INDEX_COLUMN_ON_TYPE) + .setColumnName(dataColumn.getName().getString()) + .setMessage("Type=" + dataColumn.getDataType()).build().buildException(); } - - // Since we cannot have nullable fixed length in a row key - // we need to translate to variable length. - public static PDataType getIndexColumnDataType(PColumn dataColumn) throws SQLException { - PDataType type = getIndexColumnDataType(dataColumn.isNullable(),dataColumn.getDataType()); - if (type == null) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_INDEX_COLUMN_ON_TYPE).setColumnName(dataColumn.getName().getString()) - .setMessage("Type="+dataColumn.getDataType()).build().buildException(); - } - return type; - } - - // Since we cannot have nullable fixed length in a row key - // we need to translate to variable length. The verification that we have a valid index - // row key was already done, so here we just need to convert from one built-in type to - // another. - public static PDataType getIndexColumnDataType(boolean isNullable, PDataType dataType) { - if (dataType == null || !isNullable || !dataType.isFixedWidth()) { - return dataType; - } - // for fixed length numeric types and boolean - if (dataType.isCastableTo(PDecimal.INSTANCE)) { - return PDecimal.INSTANCE; - } - // for CHAR - if (dataType.isCoercibleTo(PVarchar.INSTANCE)) { - return PVarchar.INSTANCE; - } - - if (PBinary.INSTANCE.equals(dataType)) { - return PVarbinary.INSTANCE; - } - throw new IllegalArgumentException("Unsupported non nullable type " + dataType); + return type; + } + + // Since we cannot have nullable fixed length in a row key + // we need to translate to variable length. The verification that we have a valid index + // row key was already done, so here we just need to convert from one built-in type to + // another. + public static PDataType getIndexColumnDataType(boolean isNullable, PDataType dataType) { + if (dataType == null || !isNullable || !dataType.isFixedWidth()) { + return dataType; } - - - public static String getDataColumnName(String name) { - return name.substring(name.indexOf(INDEX_COLUMN_NAME_SEP) + 1); + // for fixed length numeric types and boolean + if (dataType.isCastableTo(PDecimal.INSTANCE)) { + return PDecimal.INSTANCE; } - - public static String getDataColumnFamilyName(String name) { - return name.substring(0,name.indexOf(INDEX_COLUMN_NAME_SEP)); + // for CHAR + if (dataType.isCoercibleTo(PVarchar.INSTANCE)) { + return PVarchar.INSTANCE; } - public static String getActualColumnFamilyName(String name) { - if(name.startsWith(LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - return name.substring(LOCAL_INDEX_COLUMN_FAMILY_PREFIX.length()); - } - return name; + if (PBinary.INSTANCE.equals(dataType)) { + return PVarbinary.INSTANCE; } + throw new IllegalArgumentException("Unsupported non nullable type " + dataType); + } - public static String getCaseSensitiveDataColumnFullName(String name) { - int index = name.indexOf(INDEX_COLUMN_NAME_SEP) ; - return SchemaUtil.getCaseSensitiveColumnDisplayName(getDataColumnFamilyName(name), name.substring(index+1)); - } + public static String getDataColumnName(String name) { + return name.substring(name.indexOf(INDEX_COLUMN_NAME_SEP) + 1); + } - public static String getIndexColumnName(String dataColumnFamilyName, String dataColumnName) { - return (dataColumnFamilyName == null ? "" : dataColumnFamilyName) + INDEX_COLUMN_NAME_SEP - + dataColumnName; + public static String getDataColumnFamilyName(String name) { + return name.substring(0, name.indexOf(INDEX_COLUMN_NAME_SEP)); + } + + public static String getActualColumnFamilyName(String name) { + if (name.startsWith(LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + return name.substring(LOCAL_INDEX_COLUMN_FAMILY_PREFIX.length()); } - - public static byte[] getIndexColumnName(byte[] dataColumnFamilyName, byte[] dataColumnName) { - return ByteUtil.concat(dataColumnFamilyName == null ? ByteUtil.EMPTY_BYTE_ARRAY : dataColumnFamilyName, INDEX_COLUMN_NAME_SEP_BYTES, dataColumnName); + return name; + } + + public static String getCaseSensitiveDataColumnFullName(String name) { + int index = name.indexOf(INDEX_COLUMN_NAME_SEP); + return SchemaUtil.getCaseSensitiveColumnDisplayName(getDataColumnFamilyName(name), + name.substring(index + 1)); + } + + public static String getIndexColumnName(String dataColumnFamilyName, String dataColumnName) { + return (dataColumnFamilyName == null ? "" : dataColumnFamilyName) + INDEX_COLUMN_NAME_SEP + + dataColumnName; + } + + public static byte[] getIndexColumnName(byte[] dataColumnFamilyName, byte[] dataColumnName) { + return ByteUtil.concat( + dataColumnFamilyName == null ? ByteUtil.EMPTY_BYTE_ARRAY : dataColumnFamilyName, + INDEX_COLUMN_NAME_SEP_BYTES, dataColumnName); + } + + public static String getIndexColumnName(PColumn dataColumn) { + String dataColumnFamilyName = + SchemaUtil.isPKColumn(dataColumn) ? null : dataColumn.getFamilyName().getString(); + return getIndexColumnName(dataColumnFamilyName, dataColumn.getName().getString()); + } + + public static PColumn getIndexPKColumn(int position, PColumn dataColumn) { + assert (SchemaUtil.isPKColumn(dataColumn)); + PName indexColumnName = + PNameFactory.newName(getIndexColumnName(null, dataColumn.getName().getString())); + PColumn column = new PColumnImpl(indexColumnName, null, dataColumn.getDataType(), + dataColumn.getMaxLength(), dataColumn.getScale(), dataColumn.isNullable(), position, + dataColumn.getSortOrder(), dataColumn.getArraySize(), null, false, + dataColumn.getExpressionStr(), dataColumn.isRowTimestamp(), false, + // TODO set the columnQualifierBytes correctly + /* columnQualifierBytes */null, HConstants.LATEST_TIMESTAMP); + return column; + } + + public static String getLocalIndexColumnFamily(String dataColumnFamilyName) { + return dataColumnFamilyName == null + ? null + : QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX + dataColumnFamilyName; + } + + public static byte[] getLocalIndexColumnFamily(byte[] dataColumnFamilyBytes) { + String dataCF = Bytes.toString(dataColumnFamilyBytes); + return getLocalIndexColumnFamily(dataCF).getBytes(StandardCharsets.UTF_8); + } + + public static PColumn getDataColumn(PTable dataTable, String indexColumnName) { + PColumn column = getDataColumnOrNull(dataTable, indexColumnName); + if (column == null) { + throw new IllegalArgumentException("Could not find column \"" + SchemaUtil + .getColumnName(getDataColumnFamilyName(indexColumnName), getDataColumnName(indexColumnName)) + + " in " + dataTable); } + return column; + } - public static String getIndexColumnName(PColumn dataColumn) { - String dataColumnFamilyName = SchemaUtil.isPKColumn(dataColumn) ? null : dataColumn.getFamilyName().getString(); - return getIndexColumnName(dataColumnFamilyName, dataColumn.getName().getString()); + public static PColumn getDataColumnOrNull(PTable dataTable, String indexColumnName) { + int pos = indexColumnName.indexOf(INDEX_COLUMN_NAME_SEP); + if (pos < 0) { + return null; } - - public static PColumn getIndexPKColumn(int position, PColumn dataColumn) { - assert (SchemaUtil.isPKColumn(dataColumn)); - PName indexColumnName = PNameFactory.newName(getIndexColumnName(null, dataColumn.getName().getString())); - PColumn column = new PColumnImpl(indexColumnName, null, dataColumn.getDataType(), dataColumn.getMaxLength(), - dataColumn.getScale(), dataColumn.isNullable(), position, dataColumn.getSortOrder(), - dataColumn.getArraySize(), null, false, dataColumn.getExpressionStr(), dataColumn.isRowTimestamp(), false, - // TODO set the columnQualifierBytes correctly - /*columnQualifierBytes*/null, HConstants.LATEST_TIMESTAMP); - return column; - } - - public static String getLocalIndexColumnFamily(String dataColumnFamilyName) { - return dataColumnFamilyName == null ? null - : QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX + dataColumnFamilyName; + if (pos == 0) { + try { + return dataTable.getPKColumn(indexColumnName.substring(1)); + } catch (ColumnNotFoundException e) { + return null; + } } - - public static byte[] getLocalIndexColumnFamily(byte[] dataColumnFamilyBytes) { - String dataCF = Bytes.toString(dataColumnFamilyBytes); - return getLocalIndexColumnFamily(dataCF).getBytes(StandardCharsets.UTF_8); + PColumnFamily family; + try { + family = dataTable.getColumnFamily(getDataColumnFamilyName(indexColumnName)); + } catch (ColumnFamilyNotFoundException e) { + return null; } - - public static PColumn getDataColumn(PTable dataTable, String indexColumnName) { - PColumn column = getDataColumnOrNull(dataTable, indexColumnName); - if (column == null) { - throw new IllegalArgumentException("Could not find column \"" + SchemaUtil.getColumnName(getDataColumnFamilyName(indexColumnName), getDataColumnName(indexColumnName)) + " in " + dataTable); - } - return column; + try { + return family.getPColumnForColumnName(indexColumnName.substring(pos + 1)); + } catch (ColumnNotFoundException e) { + return null; } - - public static PColumn getDataColumnOrNull(PTable dataTable, String indexColumnName) { - int pos = indexColumnName.indexOf(INDEX_COLUMN_NAME_SEP); - if (pos < 0) { - return null; - } - if (pos == 0) { - try { - return dataTable.getPKColumn(indexColumnName.substring(1)); - } catch (ColumnNotFoundException e) { - return null; - } - } - PColumnFamily family; - try { - family = dataTable.getColumnFamily(getDataColumnFamilyName(indexColumnName)); - } catch (ColumnFamilyNotFoundException e) { - return null; - } - try { - return family.getPColumnForColumnName(indexColumnName.substring(pos+1)); - } catch (ColumnNotFoundException e) { - return null; - } + } + + /** + * Return a list of {@code PColumn} for the associated data columns given the corresponding index + * columns. For a tenant specific view, the connection needs to be tenant specific too. + * @throws TableNotFoundException if table cannot be found in the connection's metdata cache + */ + public static List getDataColumns(String dataTableName, List indexColumns, + PhoenixConnection conn) throws SQLException { + PTable dataTable = conn.getTable(dataTableName); + List dataColumns = new ArrayList(indexColumns.size()); + for (PColumn indexColumn : indexColumns) { + dataColumns.add(getDataColumn(dataTable, indexColumn.getName().getString())); } - - /** - * Return a list of {@code PColumn} for the associated data columns given the corresponding index columns. For a tenant - * specific view, the connection needs to be tenant specific too. - * @param dataTableName - * @param indexColumns - * @param conn - * @return - * @throws TableNotFoundException if table cannot be found in the connection's metdata cache - */ - public static List getDataColumns(String dataTableName, List indexColumns, PhoenixConnection conn) throws SQLException { - PTable dataTable = conn.getTable(dataTableName); - List dataColumns = new ArrayList(indexColumns.size()); - for (PColumn indexColumn : indexColumns) { - dataColumns.add(getDataColumn(dataTable, indexColumn.getName().getString())); - } - return dataColumns; - } - - - public static boolean isEmptyKeyValue(PTable table, ColumnReference ref) { - byte[] emptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(table); - byte[] emptyKeyValueQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); - return (Bytes.compareTo(emptyKeyValueCF, 0, emptyKeyValueCF.length, ref.getFamilyWritable() - .get(), ref.getFamilyWritable().getOffset(), ref.getFamilyWritable().getLength()) == 0 && Bytes - .compareTo(emptyKeyValueQualifier, 0, - emptyKeyValueQualifier.length, ref.getQualifierWritable().get(), ref - .getQualifierWritable().getOffset(), ref.getQualifierWritable() - .getLength()) == 0); + return dataColumns; + } + + public static boolean isEmptyKeyValue(PTable table, ColumnReference ref) { + byte[] emptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(table); + byte[] emptyKeyValueQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); + return (Bytes.compareTo(emptyKeyValueCF, 0, emptyKeyValueCF.length, + ref.getFamilyWritable().get(), ref.getFamilyWritable().getOffset(), + ref.getFamilyWritable().getLength()) == 0 + && Bytes.compareTo(emptyKeyValueQualifier, 0, emptyKeyValueQualifier.length, + ref.getQualifierWritable().get(), ref.getQualifierWritable().getOffset(), + ref.getQualifierWritable().getLength()) == 0); + } + + public static boolean isGlobalIndexCheckerEnabled(PhoenixConnection connection, PName index) + throws SQLException { + String indexName = index.getString(); + Boolean entry = indexNameGlobalIndexCheckerEnabledMap.getIfPresent(indexName); + if (entry != null) { + return entry; } + boolean result = false; + try { + TableDescriptor desc = connection.getQueryServices().getTableDescriptor(index.getBytes()); - public static boolean isGlobalIndexCheckerEnabled(PhoenixConnection connection, PName index) - throws SQLException { - String indexName = index.getString(); - Boolean entry = indexNameGlobalIndexCheckerEnabledMap.getIfPresent(indexName); - if (entry != null){ - return entry; + if (desc != null) { + if (desc.hasCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME)) { + result = true; } + } + indexNameGlobalIndexCheckerEnabledMap.put(indexName, result); + } catch (TableNotFoundException ex) { + // We can swallow this because some indexes don't have separate tables like local indexes + } - boolean result = false; - try { - TableDescriptor desc = connection.getQueryServices().getTableDescriptor(index.getBytes()); + return result; + } + + public static List generateIndexData(final PTable table, PTable index, + final MultiRowMutationState multiRowMutationState, List dataMutations, + final KeyValueBuilder kvBuilder, PhoenixConnection connection) + throws SQLException, IOException { + try { + final ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); + List indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size()); + for (final Mutation dataMutation : dataMutations) { + long ts = MetaDataUtil.getClientTimeStamp(dataMutation); + ptr.set(dataMutation.getRow()); + /* + * We only need to generate the additional mutations for a Put for immutable indexes. + * Deletes of rows are handled by running a re-written query against the index table, and + * Deletes of column values should never be necessary, as you should never be updating an + * existing row. + */ + if (dataMutation instanceof Put) { + ValueGetter valueGetter = new AbstractValueGetter() { - if (desc != null) { - if (desc.hasCoprocessor(QueryConstants.GLOBAL_INDEX_CHECKER_CLASSNAME)) { - result = true; - } + @Override + public byte[] getRowKey() { + return dataMutation.getRow(); } - indexNameGlobalIndexCheckerEnabledMap.put(indexName, result); - } catch (TableNotFoundException ex) { - // We can swallow this because some indexes don't have separate tables like local indexes - } - return result; - } - - public static List generateIndexData(final PTable table, PTable index, - final MultiRowMutationState multiRowMutationState, List dataMutations, final KeyValueBuilder kvBuilder, PhoenixConnection connection) - throws SQLException, IOException { - try { - final ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - IndexMaintainer maintainer = index.getIndexMaintainer(table, connection); - List indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size()); - for (final Mutation dataMutation : dataMutations) { - long ts = MetaDataUtil.getClientTimeStamp(dataMutation); - ptr.set(dataMutation.getRow()); - /* - * We only need to generate the additional mutations for a Put for immutable indexes. - * Deletes of rows are handled by running a re-written query against the index table, - * and Deletes of column values should never be necessary, as you should never be - * updating an existing row. - */ - if (dataMutation instanceof Put) { - ValueGetter valueGetter = new AbstractValueGetter() { - - @Override - public byte[] getRowKey() { - return dataMutation.getRow(); - } - - @Override - public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { - // Always return null for our empty key value, as this will cause the index - // maintainer to always treat this Put as a new row. - if (isEmptyKeyValue(table, ref)) { - return null; - } - byte[] family = ref.getFamily(); - byte[] qualifier = ref.getQualifier(); - Map> familyMap = dataMutation.getFamilyCellMap(); - List kvs = familyMap.get(family); - if (kvs == null) { - return null; - } - for (Cell kv : kvs) { - if (Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), family, 0, family.length) == 0 && - Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0) { - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - kvBuilder.getValueAsPtr(kv, ptr); - return ptr; - } - } - return null; - } - - }; - byte[] regionStartKey = null; - byte[] regionEndkey = null; - if(maintainer.isLocalIndex()) { - HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow()); - regionStartKey = tableRegionLocation.getRegion().getStartKey(); - regionEndkey = tableRegionLocation.getRegion().getEndKey(); - } - indexMutations.add(maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, ts, regionStartKey, regionEndkey, false)); + @Override + public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { + // Always return null for our empty key value, as this will cause the index + // maintainer to always treat this Put as a new row. + if (isEmptyKeyValue(table, ref)) { + return null; + } + byte[] family = ref.getFamily(); + byte[] qualifier = ref.getQualifier(); + Map> familyMap = dataMutation.getFamilyCellMap(); + List kvs = familyMap.get(family); + if (kvs == null) { + return null; + } + for (Cell kv : kvs) { + if ( + Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), + family, 0, family.length) == 0 + && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0 + ) { + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + kvBuilder.getValueAsPtr(kv, ptr); + return ptr; } + } + return null; } - return indexMutations; - } catch (IOException e) { - throw new SQLException(e); + + }; + byte[] regionStartKey = null; + byte[] regionEndkey = null; + if (maintainer.isLocalIndex()) { + HRegionLocation tableRegionLocation = connection.getQueryServices() + .getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow()); + regionStartKey = tableRegionLocation.getRegion().getStartKey(); + regionEndkey = tableRegionLocation.getRegion().getEndKey(); + } + indexMutations.add(maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, ts, + regionStartKey, regionEndkey, false)); } + } + return indexMutations; + } catch (IOException e) { + throw new SQLException(e); } + } + + public static boolean isDataPKColumn(PColumn column) { + return column.getName().getString().startsWith(INDEX_COLUMN_NAME_SEP); + } - public static boolean isDataPKColumn(PColumn column) { - return column.getName().getString().startsWith(INDEX_COLUMN_NAME_SEP); + public static boolean isIndexColumn(String name) { + return name.contains(INDEX_COLUMN_NAME_SEP); + } + + public static boolean getViewConstantValue(PColumn column, ImmutableBytesWritable ptr) { + byte[] value = column.getViewConstant(); + if (value != null) { + ptr.set(value, 0, value.length - 1); + return true; } - - public static boolean isIndexColumn(String name) { - return name.contains(INDEX_COLUMN_NAME_SEP); + return false; + } + + /** + * Traverse the expression tree and set the offset of every RowKeyColumnExpression to the offset + * provided. This is used for local indexing on the server-side to skip over the region start key + * that prefixes index rows. + * @param rootExpression the root expression from which to begin traversal + * @param offset the offset to set on each RowKeyColumnExpression + */ + public static void setRowKeyExpressionOffset(Expression rootExpression, final int offset) { + rootExpression.accept(new RowKeyExpressionVisitor() { + + @Override + public Void visit(RowKeyColumnExpression node) { + node.setOffset(offset); + return null; + } + + }); + } + + public static ColumnReference[] deserializeDataTableColumnsToJoin(Scan scan) { + byte[] columnsBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.DATA_TABLE_COLUMNS_TO_JOIN); + if (columnsBytes == null) return null; + ByteArrayInputStream stream = new ByteArrayInputStream(columnsBytes); // TODO: size? + try { + DataInputStream input = new DataInputStream(stream); + int numColumns = WritableUtils.readVInt(input); + ColumnReference[] dataColumns = new ColumnReference[numColumns]; + for (int i = 0; i < numColumns; i++) { + dataColumns[i] = + new ColumnReference(Bytes.readByteArray(input), Bytes.readByteArray(input)); + } + return dataColumns; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } - - public static boolean getViewConstantValue(PColumn column, ImmutableBytesWritable ptr) { - byte[] value = column.getViewConstant(); - if (value != null) { - ptr.set(value, 0, value.length-1); - return true; - } - return false; + } + + public static List deSerializeIndexMaintainersFromScan(Scan scan) { + boolean useProto = false; + byte[] indexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO); + useProto = indexBytes != null; + if (indexBytes == null) { + indexBytes = scan.getAttribute(LOCAL_INDEX_BUILD); } - - /** - * Traverse the expression tree and set the offset of every RowKeyColumnExpression - * to the offset provided. This is used for local indexing on the server-side to - * skip over the region start key that prefixes index rows. - * @param rootExpression the root expression from which to begin traversal - * @param offset the offset to set on each RowKeyColumnExpression - */ - public static void setRowKeyExpressionOffset(Expression rootExpression, final int offset) { - rootExpression.accept(new RowKeyExpressionVisitor() { - - @Override - public Void visit(RowKeyColumnExpression node) { - node.setOffset(offset); - return null; - } - - }); + if (indexBytes == null) { + indexBytes = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD); + useProto = indexBytes != null; } - - public static ColumnReference[] deserializeDataTableColumnsToJoin(Scan scan) { - byte[] columnsBytes = scan.getAttribute(BaseScannerRegionObserverConstants.DATA_TABLE_COLUMNS_TO_JOIN); - if (columnsBytes == null) return null; - ByteArrayInputStream stream = new ByteArrayInputStream(columnsBytes); // TODO: size? - try { - DataInputStream input = new DataInputStream(stream); - int numColumns = WritableUtils.readVInt(input); - ColumnReference[] dataColumns = new ColumnReference[numColumns]; - for (int i = 0; i < numColumns; i++) { - dataColumns[i] = new ColumnReference(Bytes.readByteArray(input), Bytes.readByteArray(input)); - } - return dataColumns; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + if (indexBytes == null) { + indexBytes = scan.getAttribute(PhoenixIndexCodec.INDEX_MD); } - - public static List deSerializeIndexMaintainersFromScan(Scan scan) { - boolean useProto = false; - byte[] indexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO); - useProto = indexBytes != null; - if (indexBytes == null) { - indexBytes = scan.getAttribute(LOCAL_INDEX_BUILD); - } - if (indexBytes == null) { - indexBytes = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD); - useProto = indexBytes != null; - } - if (indexBytes == null) { - indexBytes = scan.getAttribute(PhoenixIndexCodec.INDEX_MD); + List indexMaintainers = + indexBytes == null ? null : IndexMaintainer.deserialize(indexBytes, useProto); + return indexMaintainers; + } + + public static byte[][] deserializeViewConstantsFromScan(Scan scan) { + byte[] bytes = scan.getAttribute(BaseScannerRegionObserverConstants.VIEW_CONSTANTS); + if (bytes == null) return null; + ByteArrayInputStream stream = new ByteArrayInputStream(bytes); // TODO: size? + try { + DataInputStream input = new DataInputStream(stream); + int numConstants = WritableUtils.readVInt(input); + byte[][] viewConstants = new byte[numConstants][]; + for (int i = 0; i < numConstants; i++) { + viewConstants[i] = Bytes.readByteArray(input); + } + return viewConstants; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + public static KeyValueSchema deserializeLocalIndexJoinSchemaFromScan(final Scan scan) { + byte[] schemaBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_JOIN_SCHEMA); + if (schemaBytes == null) return null; + ByteArrayInputStream stream = new ByteArrayInputStream(schemaBytes); // TODO: size? + try { + DataInputStream input = new DataInputStream(stream); + KeyValueSchema schema = new KeyValueSchema(); + schema.readFields(input); + return schema; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + public static TupleProjector getTupleProjector(Scan scan, ColumnReference[] dataColumns) { + if (dataColumns != null && dataColumns.length != 0) { + KeyValueSchema keyValueSchema = deserializeLocalIndexJoinSchemaFromScan(scan); + boolean storeColsInSingleCell = + scan.getAttribute(BaseScannerRegionObserverConstants.COLUMNS_STORED_IN_SINGLE_CELL) != null; + QualifierEncodingScheme encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); + ImmutableStorageScheme immutableStorageScheme = + EncodedColumnsUtil.getImmutableStorageScheme(scan); + Expression[] colExpressions = storeColsInSingleCell + ? new SingleCellColumnExpression[dataColumns.length] + : new KeyValueColumnExpression[dataColumns.length]; + for (int i = 0; i < dataColumns.length; i++) { + byte[] family = dataColumns[i].getFamily(); + byte[] qualifier = dataColumns[i].getQualifier(); + Field field = keyValueSchema.getField(i); + Expression dataColumnExpr = storeColsInSingleCell + ? new SingleCellColumnExpression(field, family, qualifier, encodingScheme, + immutableStorageScheme) + : new KeyValueColumnExpression(field, family, qualifier); + colExpressions[i] = dataColumnExpr; + } + return new TupleProjector(keyValueSchema, colExpressions); + } + return null; + } + + /** + * Rewrite a view statement to be valid against an index + */ + public static String rewriteViewStatement(PhoenixConnection conn, PTable index, PTable table, + String viewStatement) throws SQLException { + if (viewStatement == null) { + return null; + } + SelectStatement select = new SQLParser(viewStatement).parseQuery(); + ColumnResolver resolver = FromCompiler.getResolver(new TableRef(table)); + SelectStatement translatedSelect = IndexStatementRewriter.translate(select, resolver); + ParseNode whereNode = translatedSelect.getWhere(); + PhoenixStatement statement = new PhoenixStatement(conn); + TableRef indexTableRef = new TableRef(index) { + @Override + public String getColumnDisplayName(ColumnRef ref, boolean schemaNameCaseSensitive, + boolean colNameCaseSensitive) { + return '"' + ref.getColumn().getName().getString() + '"'; + } + }; + ColumnResolver indexResolver = FromCompiler.getResolver(indexTableRef); + StatementContext context = new StatementContext(statement, indexResolver); + // Compile to ensure validity + WhereCompiler.compile(context, whereNode); + StringBuilder buf = new StringBuilder(); + whereNode.toSQL(indexResolver, buf); + return QueryUtil.getViewStatement(index.getSchemaName().getString(), + index.getTableName().getString(), buf.toString()); + } + + public static void addTupleAsOneCell(List result, Tuple tuple, + TupleProjector tupleProjector, ImmutableBytesWritable ptr) { + // This will create a byte[] that captures all of the values from the data table + byte[] value = tupleProjector.getSchema().toBytes(tuple, tupleProjector.getExpressions(), + tupleProjector.getValueBitSet(), ptr); + Cell firstCell = result.get(0); + Cell keyValue = PhoenixKeyValueUtil.newKeyValue(firstCell.getRowArray(), // FIXME: This does + // DEEP_COPY of cell, + // do we need that? + firstCell.getRowOffset(), firstCell.getRowLength(), VALUE_COLUMN_FAMILY, + VALUE_COLUMN_QUALIFIER, firstCell.getTimestamp(), value, 0, value.length); + result.add(keyValue); + } + + public static String getIndexColumnExpressionStr(PColumn col) { + return col.getExpressionStr() == null + ? IndexUtil.getCaseSensitiveDataColumnFullName(col.getName().getString()) + : col.getExpressionStr(); + } + + public static byte[][] getViewConstants(PTable dataTable) { + if (dataTable.getType() != PTableType.VIEW && dataTable.getType() != PTableType.PROJECTED) + return null; + int dataPosOffset = + (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + List viewConstants = new ArrayList(); + List dataPkColumns = dataTable.getPKColumns(); + for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { + PColumn dataPKColumn = dataPkColumns.get(i); + if (dataPKColumn.getViewConstant() != null) { + if (IndexUtil.getViewConstantValue(dataPKColumn, ptr)) { + viewConstants.add(ByteUtil.copyKeyBytesIfNecessary(ptr)); + } else { + throw new IllegalStateException(); } - List indexMaintainers = - indexBytes == null ? null : IndexMaintainer.deserialize(indexBytes, useProto); - return indexMaintainers; + } } - - public static byte[][] deserializeViewConstantsFromScan(Scan scan) { - byte[] bytes = scan.getAttribute(BaseScannerRegionObserverConstants.VIEW_CONSTANTS); - if (bytes == null) return null; - ByteArrayInputStream stream = new ByteArrayInputStream(bytes); // TODO: size? - try { - DataInputStream input = new DataInputStream(stream); - int numConstants = WritableUtils.readVInt(input); - byte[][] viewConstants = new byte[numConstants][]; - for (int i = 0; i < numConstants; i++) { - viewConstants[i] = Bytes.readByteArray(input); + return viewConstants.isEmpty() ? null : viewConstants.toArray(new byte[viewConstants.size()][]); + } + + public static MetaDataMutationResult updateIndexState(String indexTableName, long minTimeStamp, + Table metaTable, PIndexState newState) throws Throwable { + byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName); + return updateIndexState(indexTableKey, minTimeStamp, metaTable, newState); + } + + public static MetaDataMutationResult updateIndexState(byte[] indexTableKey, long minTimeStamp, + Table metaTable, PIndexState newState) throws Throwable { + // Mimic the Put that gets generated by the client on an update of the index state + Put put = new Put(indexTableKey); + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_STATE_BYTES, newState.getSerializedBytes()); + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(minTimeStamp)); + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0)); + final List tableMetadata = Collections. singletonList(put); + + final Map results = + metaTable.coprocessorService(MetaDataService.class, indexTableKey, indexTableKey, + new Batch.Call() { + @Override + public MetaDataResponse call(MetaDataService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = new BlockingRpcCallback<>(); + UpdateIndexStateRequest.Builder builder = UpdateIndexStateRequest.newBuilder(); + for (Mutation m : tableMetadata) { + MutationProto mp = ProtobufUtil.toProto(m); + builder.addTableMetadataMutations(mp.toByteString()); } - return viewConstants; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); + builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, + PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); + instance.updateIndexState(controller, builder.build(), rpcCallback); + if (controller.getFailedOn() != null) { + throw controller.getFailedOn(); } + return rpcCallback.get(); + } + }); + if (results.isEmpty()) { + throw new IOException("Didn't get expected result size"); + } + MetaDataResponse tmpResponse = results.values().iterator().next(); + return MetaDataMutationResult.constructFromProto(tmpResponse); + } + + public static boolean matchingSplitKeys(byte[][] splitKeys1, byte[][] splitKeys2) { + if (splitKeys1 != null && splitKeys2 != null && splitKeys1.length == splitKeys2.length) { + for (int i = 0; i < splitKeys1.length; i++) { + if (Bytes.compareTo(splitKeys1[i], splitKeys2[i]) != 0) { + return false; } + } + } else { + return false; } - - public static KeyValueSchema deserializeLocalIndexJoinSchemaFromScan(final Scan scan) { - byte[] schemaBytes = scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_JOIN_SCHEMA); - if (schemaBytes == null) return null; - ByteArrayInputStream stream = new ByteArrayInputStream(schemaBytes); // TODO: size? + return true; + } + + public static PTable getPDataTable(PhoenixConnection conn, TableDescriptor tableDesc) + throws SQLException { + String dataTableName = + Bytes.toString(tableDesc.getValue(MetaDataUtil.DATA_TABLE_NAME_PROP_BYTES)); + String physicalTableName = tableDesc.getTableName().getNameAsString(); + PTable pDataTable = null; + if (dataTableName == null) { + if (physicalTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { try { - DataInputStream input = new DataInputStream(stream); - KeyValueSchema schema = new KeyValueSchema(); - schema.readFields(input); - return schema; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } + pDataTable = conn.getTable(physicalTableName.replace(QueryConstants.NAMESPACE_SEPARATOR, + QueryConstants.NAME_SEPARATOR)); + } catch (TableNotFoundException e) { + // could be a table mapped to external table + pDataTable = conn.getTable(physicalTableName); } + } else { + pDataTable = conn.getTable(physicalTableName); + } + } else { + pDataTable = conn.getTable(dataTableName); } - - public static TupleProjector getTupleProjector(Scan scan, ColumnReference[] dataColumns) { - if (dataColumns != null && dataColumns.length != 0) { - KeyValueSchema keyValueSchema = deserializeLocalIndexJoinSchemaFromScan(scan); - boolean storeColsInSingleCell = scan.getAttribute(BaseScannerRegionObserverConstants.COLUMNS_STORED_IN_SINGLE_CELL) != null; - QualifierEncodingScheme encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); - ImmutableStorageScheme immutableStorageScheme = EncodedColumnsUtil.getImmutableStorageScheme(scan); - Expression[] colExpressions = storeColsInSingleCell ? new SingleCellColumnExpression[dataColumns.length] : new KeyValueColumnExpression[dataColumns.length]; - for (int i = 0; i < dataColumns.length; i++) { - byte[] family = dataColumns[i].getFamily(); - byte[] qualifier = dataColumns[i].getQualifier(); - Field field = keyValueSchema.getField(i); - Expression dataColumnExpr = - storeColsInSingleCell ? new SingleCellColumnExpression(field, family, qualifier, encodingScheme, immutableStorageScheme) - : new KeyValueColumnExpression(field, family, qualifier); - colExpressions[i] = dataColumnExpr; - } - return new TupleProjector(keyValueSchema, colExpressions); - } - return null; + return pDataTable; + } + + public static boolean isLocalIndexFamily(String family) { + return family.indexOf(LOCAL_INDEX_COLUMN_FAMILY_PREFIX) != -1; + } + + public static void updateIndexState(PhoenixConnection conn, String indexTableName, + PIndexState newState, Long indexDisableTimestamp) throws SQLException { + updateIndexState(conn, indexTableName, newState, indexDisableTimestamp, + HConstants.LATEST_TIMESTAMP); + } + + public static void updateIndexState(PhoenixConnection conn, String indexTableName, + PIndexState newState, Long indexDisableTimestamp, Long expectedMaxTimestamp) + throws SQLException { + byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName); + String schemaName = SchemaUtil.getSchemaNameFromFullName(indexTableName); + String indexName = SchemaUtil.getTableNameFromFullName(indexTableName); + // Mimic the Put that gets generated by the client on an update of the + // index state + Put put = new Put(indexTableKey); + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_STATE_BYTES, expectedMaxTimestamp, + newState.getSerializedBytes()); + if (indexDisableTimestamp != null) { + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, expectedMaxTimestamp, + PLong.INSTANCE.toBytes(indexDisableTimestamp)); } - - /** - * Rewrite a view statement to be valid against an index - * @param conn - * @param index - * @param table - * @return - * @throws SQLException - */ - public static String rewriteViewStatement(PhoenixConnection conn, PTable index, PTable table, String viewStatement) throws SQLException { - if (viewStatement == null) { - return null; - } - SelectStatement select = new SQLParser(viewStatement).parseQuery(); - ColumnResolver resolver = FromCompiler.getResolver(new TableRef(table)); - SelectStatement translatedSelect = IndexStatementRewriter.translate(select, resolver); - ParseNode whereNode = translatedSelect.getWhere(); - PhoenixStatement statement = new PhoenixStatement(conn); - TableRef indexTableRef = new TableRef(index) { - @Override - public String getColumnDisplayName(ColumnRef ref, boolean schemaNameCaseSensitive, boolean colNameCaseSensitive) { - return '"' + ref.getColumn().getName().getString() + '"'; - } - }; - ColumnResolver indexResolver = FromCompiler.getResolver(indexTableRef); - StatementContext context = new StatementContext(statement, indexResolver); - // Compile to ensure validity - WhereCompiler.compile(context, whereNode); - StringBuilder buf = new StringBuilder(); - whereNode.toSQL(indexResolver, buf); - return QueryUtil.getViewStatement(index.getSchemaName().getString(), index.getTableName().getString(), buf.toString()); - } - - public static void addTupleAsOneCell(List result, - Tuple tuple, - TupleProjector tupleProjector, - ImmutableBytesWritable ptr) { - // This will create a byte[] that captures all of the values from the data table - byte[] value = - tupleProjector.getSchema().toBytes(tuple, tupleProjector.getExpressions(), - tupleProjector.getValueBitSet(), ptr); - Cell firstCell = result.get(0); - Cell keyValue = - PhoenixKeyValueUtil.newKeyValue(firstCell.getRowArray(), // FIXME: This does DEEP_COPY of cell, do we need that? - firstCell.getRowOffset(),firstCell.getRowLength(), VALUE_COLUMN_FAMILY, - VALUE_COLUMN_QUALIFIER, firstCell.getTimestamp(), value, 0, value.length); - result.add(keyValue); - } - - public static String getIndexColumnExpressionStr(PColumn col) { - return col.getExpressionStr() == null ? IndexUtil.getCaseSensitiveDataColumnFullName(col.getName().getString()) - : col.getExpressionStr(); - } - - public static byte[][] getViewConstants(PTable dataTable) { - if (dataTable.getType() != PTableType.VIEW && dataTable.getType() != PTableType.PROJECTED) return null; - int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - List viewConstants = new ArrayList(); - List dataPkColumns = dataTable.getPKColumns(); - for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { - PColumn dataPKColumn = dataPkColumns.get(i); - if (dataPKColumn.getViewConstant() != null) { - if (IndexUtil.getViewConstantValue(dataPKColumn, ptr)) { - viewConstants.add(ByteUtil.copyKeyBytesIfNecessary(ptr)); - } else { - throw new IllegalStateException(); - } - } - } - return viewConstants.isEmpty() ? null : viewConstants - .toArray(new byte[viewConstants.size()][]); - } - - public static MetaDataMutationResult updateIndexState(String indexTableName, long minTimeStamp, - Table metaTable, PIndexState newState) throws Throwable { - byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName); - return updateIndexState(indexTableKey, minTimeStamp, metaTable, newState); - } - - public static MetaDataMutationResult updateIndexState(byte[] indexTableKey, long minTimeStamp, - Table metaTable, PIndexState newState) throws Throwable { - // Mimic the Put that gets generated by the client on an update of the index state - Put put = new Put(indexTableKey); - put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, - newState.getSerializedBytes()); - put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, - PLong.INSTANCE.toBytes(minTimeStamp)); - put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, - PLong.INSTANCE.toBytes(0)); - final List tableMetadata = Collections. singletonList(put); - - final Map results = metaTable.coprocessorService(MetaDataService.class, indexTableKey, - indexTableKey, new Batch.Call() { - @Override - public MetaDataResponse call(MetaDataService instance) throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = new BlockingRpcCallback<>(); - UpdateIndexStateRequest.Builder builder = UpdateIndexStateRequest.newBuilder(); - for (Mutation m : tableMetadata) { - MutationProto mp = ProtobufUtil.toProto(m); - builder.addTableMetadataMutations(mp.toByteString()); - } - builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER)); - instance.updateIndexState(controller, builder.build(), rpcCallback); - if (controller.getFailedOn() != null) { throw controller.getFailedOn(); } - return rpcCallback.get(); - } - }); - if (results.isEmpty()) { throw new IOException("Didn't get expected result size"); } - MetaDataResponse tmpResponse = results.values().iterator().next(); - return MetaDataMutationResult.constructFromProto(tmpResponse); - } - - public static boolean matchingSplitKeys(byte[][] splitKeys1, byte[][] splitKeys2) { - if (splitKeys1 != null && splitKeys2 != null && splitKeys1.length == splitKeys2.length) { - for (int i = 0; i < splitKeys1.length; i++) { - if (Bytes.compareTo(splitKeys1[i], splitKeys2[i]) != 0) { return false; } - } - } else { - return false; - } - return true; - } - - public static PTable getPDataTable(PhoenixConnection conn, TableDescriptor tableDesc) - throws SQLException { - String dataTableName = Bytes.toString(tableDesc.getValue(MetaDataUtil.DATA_TABLE_NAME_PROP_BYTES)); - String physicalTableName = tableDesc.getTableName().getNameAsString(); - PTable pDataTable = null; - if (dataTableName == null) { - if (physicalTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - try { - pDataTable = conn.getTable(physicalTableName - .replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR)); - } catch (TableNotFoundException e) { - // could be a table mapped to external table - pDataTable = conn.getTable(physicalTableName); - } - }else{ - pDataTable = conn.getTable(physicalTableName); - } - } else { - pDataTable = conn.getTable(dataTableName); - } - return pDataTable; - } - - public static boolean isLocalIndexFamily(String family) { - return family.indexOf(LOCAL_INDEX_COLUMN_FAMILY_PREFIX) != -1; - } - - public static void updateIndexState(PhoenixConnection conn, String indexTableName, - PIndexState newState, Long indexDisableTimestamp) throws SQLException { - updateIndexState(conn, indexTableName, newState, indexDisableTimestamp, HConstants.LATEST_TIMESTAMP); - } - - public static void updateIndexState(PhoenixConnection conn, String indexTableName, - PIndexState newState, Long indexDisableTimestamp, Long expectedMaxTimestamp) throws SQLException { - byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName); - String schemaName = SchemaUtil.getSchemaNameFromFullName(indexTableName); - String indexName = SchemaUtil.getTableNameFromFullName(indexTableName); - // Mimic the Put that gets generated by the client on an update of the - // index state - Put put = new Put(indexTableKey); - put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, - expectedMaxTimestamp, - newState.getSerializedBytes()); - if (indexDisableTimestamp != null) { - put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, - expectedMaxTimestamp, - PLong.INSTANCE.toBytes(indexDisableTimestamp)); - } - if (newState == PIndexState.ACTIVE) { - put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0)); - } - final List tableMetadata = Collections. singletonList(put); - MetaDataMutationResult result = conn.getQueryServices().updateIndexState(tableMetadata, null); - MutationCode code = result.getMutationCode(); - if (code == MutationCode.TABLE_NOT_FOUND) { - throw new TableNotFoundException(schemaName, indexName); - } - if (code == MutationCode.UNALLOWED_TABLE_MUTATION) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION) - .setMessage("indexState=" + newState).setSchemaName(schemaName) - .setTableName(indexName).build().buildException(); - } - } - - public static List getClientMaintainedIndexes(PTable table) { - Iterator indexIterator = // Only maintain tables with immutable rows through this client-side mechanism - (table.isTransactional() && table.getTransactionProvider().getTransactionProvider().isUnsupported(Feature.MAINTAIN_LOCAL_INDEX_ON_SERVER)) ? - IndexMaintainer.maintainedIndexes(table.getIndexes().iterator()) : - (table.isImmutableRows() || table.isTransactional()) ? - // If the data table has a different storage scheme than index table, don't maintain this on the client - // For example, if the index is single cell but the data table is one_cell, if there is a partial update on the data table, index can't be built on the client. - IndexMaintainer.maintainedGlobalIndexesWithMatchingStorageScheme(table, table.getIndexes().iterator()) : - Collections.emptyIterator(); - return Lists.newArrayList(indexIterator); - } - - public static Result incrementCounterForIndex(PhoenixConnection conn, String failedIndexTable,long amount) throws IOException { - byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(failedIndexTable); - Increment incr = new Increment(indexTableKey); - incr.addColumn(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES, amount); - try (Table table = conn.getQueryServices().getTable( - SchemaUtil.getPhysicalTableName( - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, - conn.getQueryServices().getProps()).getName())) { - return table.increment(incr); - } catch (SQLException e) { - throw new IOException(e); - } + if (newState == PIndexState.ACTIVE) { + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0)); } - - public static long getIndexPendingDisableCount(PhoenixConnection conn, String failedIndexTable) throws IOException { - byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(failedIndexTable); - Get get = new Get(indexTableKey); - get.addColumn(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES); - try (Table table = conn.getQueryServices().getTable( - SchemaUtil.getPhysicalTableName( - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, - conn.getQueryServices().getProps()).getName())) { - Result result = table.get(get); - return Bytes.toLong(result.getValue(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES)); - } catch (SQLException e) { - throw new IOException(e); - } + final List tableMetadata = Collections. singletonList(put); + MetaDataMutationResult result = conn.getQueryServices().updateIndexState(tableMetadata, null); + MutationCode code = result.getMutationCode(); + if (code == MutationCode.TABLE_NOT_FOUND) { + throw new TableNotFoundException(schemaName, indexName); } - - public static long getIndexPendingDisableCountLastUpdatedTimestamp( - PhoenixConnection conn, String failedIndexTable) - throws IOException { - byte[] indexTableKey = - SchemaUtil.getTableKeyFromFullName(failedIndexTable); - Get get = new Get(indexTableKey); - get.addColumn(TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES); - byte[] systemCatalog = SchemaUtil.getPhysicalTableName( - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, - conn.getQueryServices().getProps()).getName(); - try (Table table = conn.getQueryServices().getTable(systemCatalog)) { - Result result = table.get(get); - Cell cell = result.listCells().get(0); - return cell.getTimestamp(); - } catch (SQLException e) { - throw new IOException(e); + if (code == MutationCode.UNALLOWED_TABLE_MUTATION) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION) + .setMessage("indexState=" + newState).setSchemaName(schemaName).setTableName(indexName) + .build().buildException(); + } + } + + public static List getClientMaintainedIndexes(PTable table) { + Iterator indexIterator = // Only maintain tables with immutable rows through this + // client-side mechanism + (table.isTransactional() && table.getTransactionProvider().getTransactionProvider() + .isUnsupported(Feature.MAINTAIN_LOCAL_INDEX_ON_SERVER)) + ? IndexMaintainer.maintainedIndexes(table.getIndexes().iterator()) + : (table.isImmutableRows() || table.isTransactional()) ? + // If the data table has a different storage scheme than index table, don't maintain this on + // the client + // For example, if the index is single cell but the data table is one_cell, if there is a + // partial update on the data table, index can't be built on the client. + IndexMaintainer.maintainedGlobalIndexesWithMatchingStorageScheme(table, + table.getIndexes().iterator()) + : Collections. emptyIterator(); + return Lists.newArrayList(indexIterator); + } + + public static Result incrementCounterForIndex(PhoenixConnection conn, String failedIndexTable, + long amount) throws IOException { + byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(failedIndexTable); + Increment incr = new Increment(indexTableKey); + incr.addColumn(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES, amount); + try (Table table = conn.getQueryServices() + .getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, + conn.getQueryServices().getProps()).getName())) { + return table.increment(incr); + } catch (SQLException e) { + throw new IOException(e); + } + } + + public static long getIndexPendingDisableCount(PhoenixConnection conn, String failedIndexTable) + throws IOException { + byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(failedIndexTable); + Get get = new Get(indexTableKey); + get.addColumn(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES); + try (Table table = conn.getQueryServices() + .getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, + conn.getQueryServices().getProps()).getName())) { + Result result = table.get(get); + return Bytes.toLong( + result.getValue(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES)); + } catch (SQLException e) { + throw new IOException(e); + } + } + + public static long getIndexPendingDisableCountLastUpdatedTimestamp(PhoenixConnection conn, + String failedIndexTable) throws IOException { + byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(failedIndexTable); + Get get = new Get(indexTableKey); + get.addColumn(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES); + byte[] systemCatalog = + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, + conn.getQueryServices().getProps()).getName(); + try (Table table = conn.getQueryServices().getTable(systemCatalog)) { + Result result = table.get(get); + Cell cell = result.listCells().get(0); + return cell.getTimestamp(); + } catch (SQLException e) { + throw new IOException(e); + } + } + + public static boolean isCoveredGlobalIndex(final PTable table) { + return table.getIndexType() == PTable.IndexType.GLOBAL; + } + + public static boolean isGlobalIndex(final PTable table) { + return table.getIndexType() == PTable.IndexType.GLOBAL + || table.getIndexType() == PTable.IndexType.UNCOVERED_GLOBAL; + } + + public static boolean shouldIndexBeUsedForUncoveredQuery(final TableRef tableRef) { + PTable table = tableRef.getTable(); + return table.getType() == PTableType.INDEX && (table.getIndexType() == PTable.IndexType.LOCAL + || table.getIndexType() == PTable.IndexType.UNCOVERED_GLOBAL || tableRef.isHinted()); + } + + public static long getMaxTimestamp(Mutation m) { + long ts = 0; + for (List cells : m.getFamilyCellMap().values()) { + if (cells == null) { + continue; + } + for (Cell cell : cells) { + if (ts < cell.getTimestamp()) { + ts = cell.getTimestamp(); } + } } - - public static boolean isCoveredGlobalIndex(final PTable table) { - return table.getIndexType() == PTable.IndexType.GLOBAL; + return ts; + } + + public static int getIndexPriority(Configuration conf) { + return conf.getInt(QueryServices.INDEX_PRIOIRTY_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_PRIORITY); + } + + public static int getMetadataPriority(Configuration conf) { + return conf.getInt(QueryServices.METADATA_PRIOIRTY_ATTRIB, + QueryServicesOptions.DEFAULT_METADATA_PRIORITY); + } + + public static int getServerSidePriority(Configuration conf) { + return conf.getInt(QueryServices.SERVER_SIDE_PRIOIRTY_ATTRIB, + QueryServicesOptions.DEFAULT_SERVER_SIDE_PRIORITY); + } + + public static int getInvalidateMetadataCachePriority(Configuration conf) { + return conf.getInt(QueryServices.INVALIDATE_METADATA_CACHE_PRIORITY_ATTRIB, + QueryServicesOptions.DEFAULT_INVALIDATE_METADATA_CACHE_PRIORITY); + } + + public static void removeEmptyColumn(Mutation m, byte[] emptyCF, byte[] emptyCQ) { + List cellList = m.getFamilyCellMap().get(emptyCF); + if (cellList == null) { + return; } - public static boolean isGlobalIndex(final PTable table) { - return table.getIndexType() == PTable.IndexType.GLOBAL || table.getIndexType() == PTable.IndexType.UNCOVERED_GLOBAL; + Iterator cellIterator = cellList.iterator(); + while (cellIterator.hasNext()) { + Cell cell = cellIterator.next(); + if ( + Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), emptyCQ, 0, emptyCQ.length) == 0 + ) { + cellIterator.remove(); + return; + } } - - public static boolean shouldIndexBeUsedForUncoveredQuery(final TableRef tableRef) { - PTable table = tableRef.getTable(); - return table.getType() == PTableType.INDEX - && (table.getIndexType() == PTable.IndexType.LOCAL - || table.getIndexType() == PTable.IndexType.UNCOVERED_GLOBAL - || tableRef.isHinted()); + } + + /** + * Enable indexing on the given table + * @param descBuilder {@link TableDescriptor} for the table on which indexing should be + * enabled + * @param indexBuilderClassName class name to use when building the index for this table + * @param properties map of custom configuration options to make available to your + * {@link org.apache.phoenix.hbase.index.builder.IndexBuilder} on the + * server-side + * @param priority TODO + * @throws IOException the Indexer coprocessor cannot be added + */ + public static void enableIndexing(TableDescriptorBuilder descBuilder, + String indexBuilderClassName, Map properties, int priority, + String coprocessorClassName) throws IOException { + if (properties == null) { + properties = new HashMap(); + } + properties.put(IndexUtil.INDEX_BUILDER_CONF_KEY, indexBuilderClassName); + descBuilder.setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(coprocessorClassName) + .setPriority(priority).setProperties(properties).build()); + } + + public static List readColumnsFromRow(Put row, Set cols) { + if (row == null) { + return Collections.EMPTY_LIST; } - public static long getMaxTimestamp(Mutation m) { - long ts = 0; - for (List cells : m.getFamilyCellMap().values()) { - if (cells == null) { - continue; - } - for (Cell cell : cells) { - if (ts < cell.getTimestamp()) { - ts = cell.getTimestamp(); - } - } + List columns = Lists.newArrayList(); + + if (cols.isEmpty()) { + // just return any cell FirstKeyOnlyFilter + for (List cells : row.getFamilyCellMap().values()) { + if (cells == null || cells.isEmpty()) { + continue; } - return ts; + columns.add(cells.get(0)); + break; + } + return columns; } - public static int getIndexPriority(Configuration conf) { - return conf.getInt(QueryServices.INDEX_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_PRIORITY); + IndexUtil.SimpleValueGetter valueGetter = new IndexUtil.SimpleValueGetter(row); + for (ColumnReference colRef : cols) { + Cell cell = valueGetter.getLatestCell(colRef, HConstants.LATEST_TIMESTAMP); + if (cell != null) { + columns.add(cell); + } } + return columns; + } - public static int getMetadataPriority(Configuration conf) { - return conf.getInt(QueryServices.METADATA_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_METADATA_PRIORITY); - } + public static class SimpleValueGetter implements ValueGetter { + final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable(); + final Put put; - public static int getServerSidePriority(Configuration conf) { - return conf.getInt(QueryServices.SERVER_SIDE_PRIOIRTY_ATTRIB, QueryServicesOptions.DEFAULT_SERVER_SIDE_PRIORITY); + public SimpleValueGetter(final Put put) { + this.put = put; } - public static int getInvalidateMetadataCachePriority(Configuration conf) { - return conf.getInt(QueryServices.INVALIDATE_METADATA_CACHE_PRIORITY_ATTRIB, - QueryServicesOptions.DEFAULT_INVALIDATE_METADATA_CACHE_PRIORITY); + @Override + public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { + Cell cell = getLatestCell(ref, ts); + if (cell == null) { + return null; + } + valuePtr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + return valuePtr; } - public static void removeEmptyColumn(Mutation m, byte[] emptyCF, byte[] emptyCQ) { - List cellList = m.getFamilyCellMap().get(emptyCF); - if (cellList == null) { - return; - } - Iterator cellIterator = cellList.iterator(); - while (cellIterator.hasNext()) { - Cell cell = cellIterator.next(); - if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - emptyCQ, 0, emptyCQ.length) == 0) { - cellIterator.remove(); - return; - } - } + public Cell getLatestCell(ColumnReference ref, long ts) { + List cellList = put.get(ref.getFamily(), ref.getQualifier()); + if (cellList == null || cellList.isEmpty()) { + return null; + } + return cellList.get(0); } - /** - * Enable indexing on the given table - * @param descBuilder {@link TableDescriptor} for the table on which indexing should be enabled - * @param indexBuilderClassName class name to use when building the index for this table - * @param properties map of custom configuration options to make available to your - * {@link org.apache.phoenix.hbase.index.builder.IndexBuilder} on the server-side - * @param priority TODO - * @throws IOException the Indexer coprocessor cannot be added - */ - public static void enableIndexing(TableDescriptorBuilder descBuilder, String indexBuilderClassName, - Map properties, int priority, String coprocessorClassName) throws IOException { - if (properties == null) { - properties = new HashMap(); - } - properties.put(IndexUtil.INDEX_BUILDER_CONF_KEY, indexBuilderClassName); - descBuilder.setCoprocessor( - CoprocessorDescriptorBuilder - .newBuilder(coprocessorClassName) - .setPriority(priority) - .setProperties(properties) - .build()); - } - - public static List readColumnsFromRow(Put row, Set cols) { - if (row == null) { - return Collections.EMPTY_LIST; - } + @Override + public KeyValue getLatestKeyValue(ColumnReference ref, long ts) { + Cell cell = getLatestCell(ref, ts); + KeyValue kv = cell == null + ? null + : new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), + cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), + cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), + cell.getTimestamp(), KeyValue.Type.codeToType(cell.getType().getCode()), + cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + return kv; + } - List columns = Lists.newArrayList(); + @Override + public byte[] getRowKey() { + return put.getRow(); + } - if (cols.isEmpty()) { - // just return any cell FirstKeyOnlyFilter - for (List cells : row.getFamilyCellMap().values()) { - if (cells == null || cells.isEmpty()) { - continue; - } - columns.add(cells.get(0)); - break; - } - return columns; - } + } - IndexUtil.SimpleValueGetter valueGetter = new IndexUtil.SimpleValueGetter(row); - for (ColumnReference colRef : cols) { - Cell cell = valueGetter.getLatestCell(colRef, HConstants.LATEST_TIMESTAMP); - if (cell != null) { - columns.add(cell); - } - } - return columns; - } + /** + * Updates the EMPTY cell value to VERIFIED for global index table rows. + */ + public static class IndexStatusUpdater { - public static class SimpleValueGetter implements ValueGetter { - final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable(); - final Put put; - public SimpleValueGetter (final Put put) { - this.put = put; - } - @Override - public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { - Cell cell = getLatestCell(ref, ts); - if (cell == null) { - return null; - } - valuePtr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - return valuePtr; - } - public Cell getLatestCell(ColumnReference ref, long ts) { - List cellList = put.get(ref.getFamily(), ref.getQualifier()); - if (cellList == null || cellList.isEmpty()) { - return null; - } - return cellList.get(0); - } - @Override - public KeyValue getLatestKeyValue(ColumnReference ref, long ts) { - Cell cell = getLatestCell(ref, ts); - KeyValue kv = cell == null ? null : - new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), - cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - cell.getTimestamp(), KeyValue.Type.codeToType(cell.getType().getCode()), - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - return kv; - } - @Override - public byte[] getRowKey() { - return put.getRow(); - } + private final byte[] emptyKeyValueCF; + private final int emptyKeyValueCFLength; + private final byte[] emptyKeyValueQualifier; + private final int emptyKeyValueQualifierLength; + public IndexStatusUpdater(final byte[] emptyKeyValueCF, final byte[] emptyKeyValueQualifier) { + this.emptyKeyValueCF = emptyKeyValueCF; + this.emptyKeyValueQualifier = emptyKeyValueQualifier; + this.emptyKeyValueCFLength = emptyKeyValueCF.length; + this.emptyKeyValueQualifierLength = emptyKeyValueQualifier.length; } + /** - * Updates the EMPTY cell value to VERIFIED for global index table rows. + * Update the Empty cell values to VERIFIED in the passed keyValues list + * @param keyValues will be modified */ - public static class IndexStatusUpdater { - - private final byte[] emptyKeyValueCF; - private final int emptyKeyValueCFLength; - private final byte[] emptyKeyValueQualifier; - private final int emptyKeyValueQualifierLength; - - public IndexStatusUpdater(final byte[] emptyKeyValueCF, final byte[] emptyKeyValueQualifier) { - this.emptyKeyValueCF = emptyKeyValueCF; - this.emptyKeyValueQualifier = emptyKeyValueQualifier; - this.emptyKeyValueCFLength = emptyKeyValueCF.length; - this.emptyKeyValueQualifierLength = emptyKeyValueQualifier.length; - } - - /** - * Update the Empty cell values to VERIFIED in the passed keyValues list - * - * @param keyValues will be modified - */ - public void setVerified(List keyValues) { - for (int i = 0; i < keyValues.size(); i++) { - updateVerified(keyValues.get(i)); - } - } + public void setVerified(List keyValues) { + for (int i = 0; i < keyValues.size(); i++) { + updateVerified(keyValues.get(i)); + } + } - /** - * Update the Empty cell values to VERIFIED in the passed keyValues list - * - * @param cellScanner contents will be modified - * @throws IOException - */ - public void setVerified(CellScanner cellScanner) throws IOException { - while (cellScanner.advance()) { - updateVerified(cellScanner.current()); - } - } + /** + * Update the Empty cell values to VERIFIED in the passed keyValues list + * @param cellScanner contents will be modified + */ + public void setVerified(CellScanner cellScanner) throws IOException { + while (cellScanner.advance()) { + updateVerified(cellScanner.current()); + } + } - private void updateVerified(Cell cell) { - if (CellUtil.compareFamilies(cell, emptyKeyValueCF, 0, emptyKeyValueCFLength) == 0 - && CellUtil.compareQualifiers(cell, emptyKeyValueQualifier, - 0, emptyKeyValueQualifierLength) == 0) { - if (cell.getValueLength() != 1) { - //This should never happen. Fail fast if it does. - throw new IllegalArgumentException("Empty cell value length is not 1"); - } - //We are directly overwriting the value for performance - cell.getValueArray()[cell.getValueOffset()] = QueryConstants.VERIFIED_BYTE; - } + private void updateVerified(Cell cell) { + if ( + CellUtil.compareFamilies(cell, emptyKeyValueCF, 0, emptyKeyValueCFLength) == 0 && CellUtil + .compareQualifiers(cell, emptyKeyValueQualifier, 0, emptyKeyValueQualifierLength) == 0 + ) { + if (cell.getValueLength() != 1) { + // This should never happen. Fail fast if it does. + throw new IllegalArgumentException("Empty cell value length is not 1"); } + // We are directly overwriting the value for performance + cell.getValueArray()[cell.getValueOffset()] = QueryConstants.VERIFIED_BYTE; + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/InstanceResolver.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/InstanceResolver.java index 698c33072b2..4cb6fb90bde 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/InstanceResolver.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/InstanceResolver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,79 +17,79 @@ */ package org.apache.phoenix.util; -import org.apache.commons.collections.IteratorUtils; - -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; - import java.util.Iterator; import java.util.List; import java.util.ServiceLoader; import java.util.concurrent.ConcurrentHashMap; +import org.apache.commons.collections.IteratorUtils; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; + /** * Resolves object instances registered using the JDK 6+ {@link java.util.ServiceLoader}. - * - * * @since 2.0 */ public class InstanceResolver { - private static final ConcurrentHashMap RESOLVED_SINGLETONS = new ConcurrentHashMap(); + private static final ConcurrentHashMap RESOLVED_SINGLETONS = + new ConcurrentHashMap(); - private InstanceResolver() {/* not allowed */} + private InstanceResolver() { + /* not allowed */} - /** - * Resolves an instance of the specified class if it has not already been resolved. - * @param clazz The type of instance to resolve - * @param defaultInstance The instance to use if a custom instance has not been registered - * @return The resolved instance or the default instance provided. - * {@code null} if an instance is not registered and a default is not provided. - */ - @SuppressWarnings("unchecked") - public static T getSingleton(Class clazz, T defaultInstance) { - Object obj = RESOLVED_SINGLETONS.get(clazz); - if(obj != null) { - return (T)obj; - } - if (defaultInstance != null && !clazz.isInstance(defaultInstance)) throw new IllegalArgumentException("defaultInstance is not of type " + clazz.getName()); - final Object o = resolveSingleton(clazz, defaultInstance); - obj = RESOLVED_SINGLETONS.putIfAbsent(clazz, o); - if(obj == null) { - obj = o; - } - return (T)obj; + /** + * Resolves an instance of the specified class if it has not already been resolved. + * @param clazz The type of instance to resolve + * @param defaultInstance The instance to use if a custom instance has not been registered + * @return The resolved instance or the default instance provided. {@code null} if an instance is + * not registered and a default is not provided. + */ + @SuppressWarnings("unchecked") + public static T getSingleton(Class clazz, T defaultInstance) { + Object obj = RESOLVED_SINGLETONS.get(clazz); + if (obj != null) { + return (T) obj; } - - /** - * Resolves all instances of a specified class and add it to the list of default implementations - * @param clazz Type of the instance to resolve - * @param defaultInstances {@link List} of instances that match the type clazz - * @param Type of class passed - * @return {@link List} of instance of the specified class. Newly found instances will be added - * to the existing contents of defaultInstances - */ - @SuppressWarnings("unchecked") - public static List get(Class clazz, List defaultInstances) { - Iterator iterator = ServiceLoader.load(clazz).iterator(); - if (defaultInstances != null) { - defaultInstances.addAll(IteratorUtils.toList(iterator)); - } else { - defaultInstances = IteratorUtils.toList(iterator); - } - - return defaultInstances; + if (defaultInstance != null && !clazz.isInstance(defaultInstance)) + throw new IllegalArgumentException("defaultInstance is not of type " + clazz.getName()); + final Object o = resolveSingleton(clazz, defaultInstance); + obj = RESOLVED_SINGLETONS.putIfAbsent(clazz, o); + if (obj == null) { + obj = o; } + return (T) obj; + } - private synchronized static T resolveSingleton(Class clazz, T defaultInstance) { - ServiceLoader loader = ServiceLoader.load(clazz); - // returns the first registered instance found - for (T singleton : loader) { - return singleton; - } - return defaultInstance; + /** + * Resolves all instances of a specified class and add it to the list of default implementations + * @param clazz Type of the instance to resolve + * @param defaultInstances {@link List} of instances that match the type clazz + * @param Type of class passed + * @return {@link List} of instance of the specified class. Newly found instances will be added to + * the existing contents of defaultInstances + */ + @SuppressWarnings("unchecked") + public static List get(Class clazz, List defaultInstances) { + Iterator iterator = ServiceLoader.load(clazz).iterator(); + if (defaultInstances != null) { + defaultInstances.addAll(IteratorUtils.toList(iterator)); + } else { + defaultInstances = IteratorUtils.toList(iterator); } - @VisibleForTesting - public static void clearSingletons() { - RESOLVED_SINGLETONS.clear(); + return defaultInstances; + } + + private synchronized static T resolveSingleton(Class clazz, T defaultInstance) { + ServiceLoader loader = ServiceLoader.load(clazz); + // returns the first registered instance found + for (T singleton : loader) { + return singleton; } + return defaultInstance; + } + + @VisibleForTesting + public static void clearSingletons() { + RESOLVED_SINGLETONS.clear(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/JDBCUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/JDBCUtil.java index 6e72fc1940c..73716e935b2 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/JDBCUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/JDBCUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,207 +33,212 @@ import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.PName; import org.apache.phoenix.schema.PNameFactory; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import edu.umd.cs.findbugs.annotations.NonNull; /** * Utilities for JDBC - * */ public class JDBCUtil { - - private JDBCUtil() { - } - /** - * Find the propName by first looking in the url string and if not found, - * next in the info properties. If not found, null is returned. - * @param url JDBC connection URL - * @param info JDBC connection properties - * @param propName the name of the property to find - * @return the property value or null if not found - */ - public static String findProperty(String url, Properties info, String propName) { - String urlPropName = PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + propName.toUpperCase() + "="; - String upperCaseURL = url.toUpperCase(); - String propValue = info.getProperty(propName); - if (propValue == null) { - int begIndex = upperCaseURL.indexOf(urlPropName); - if (begIndex >= 0) { - int endIndex = - upperCaseURL.indexOf(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR, begIndex - + urlPropName.length()); - if (endIndex < 0) { - endIndex = url.length(); - } - propValue = url.substring(begIndex + urlPropName.length(), endIndex); - } + private JDBCUtil() { + } + + /** + * Find the propName by first looking in the url string and if not found, next in the info + * properties. If not found, null is returned. + * @param url JDBC connection URL + * @param info JDBC connection properties + * @param propName the name of the property to find + * @return the property value or null if not found + */ + public static String findProperty(String url, Properties info, String propName) { + String urlPropName = PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + propName.toUpperCase() + "="; + String upperCaseURL = url.toUpperCase(); + String propValue = info.getProperty(propName); + if (propValue == null) { + int begIndex = upperCaseURL.indexOf(urlPropName); + if (begIndex >= 0) { + int endIndex = upperCaseURL.indexOf(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR, + begIndex + urlPropName.length()); + if (endIndex < 0) { + endIndex = url.length(); } - return propValue; + propValue = url.substring(begIndex + urlPropName.length(), endIndex); + } } - - public static String removeProperty(String url, String propName) { - String urlPropName = PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + propName.toUpperCase() + "="; - String upperCaseURL = url.toUpperCase(); - int begIndex = upperCaseURL.indexOf(urlPropName); - if (begIndex >= 0) { - int endIndex = - upperCaseURL.indexOf(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR, begIndex - + urlPropName.length()); - if (endIndex < 0) { - endIndex = url.length(); - } - String prefix = url.substring(0, begIndex); - String suffix = url.substring(endIndex, url.length()); - return prefix + suffix; - } else { - return url; - } + return propValue; + } + + public static String removeProperty(String url, String propName) { + String urlPropName = PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + propName.toUpperCase() + "="; + String upperCaseURL = url.toUpperCase(); + int begIndex = upperCaseURL.indexOf(urlPropName); + if (begIndex >= 0) { + int endIndex = upperCaseURL.indexOf(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR, + begIndex + urlPropName.length()); + if (endIndex < 0) { + endIndex = url.length(); + } + String prefix = url.substring(0, begIndex); + String suffix = url.substring(endIndex, url.length()); + return prefix + suffix; + } else { + return url; } - - /** - * Returns a map that contains connection properties from both info and url. - */ - private static Map getCombinedConnectionProperties(String url, Properties info) { - Map result = newHashMapWithExpectedSize(info.size()); - for (String propName : info.stringPropertyNames()) { - result.put(propName, info.getProperty(propName)); - } - String[] urlPropNameValues = url.split(Character.toString(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR)); - if (urlPropNameValues.length > 1) { - for (int i = 1; i < urlPropNameValues.length; i++) { - String[] urlPropNameValue = urlPropNameValues[i].split("="); - if (urlPropNameValue.length == 2) { - result.put(urlPropNameValue[0], urlPropNameValue[1]); - } - } - } - - return result; - } - - public static Map getAnnotations(@NonNull String url, @NonNull Properties info) { - Preconditions.checkNotNull(url); - Preconditions.checkNotNull(info); - - Map combinedProperties = getCombinedConnectionProperties(url, info); - Map result = newHashMapWithExpectedSize(combinedProperties.size()); - for (Map.Entry prop : combinedProperties.entrySet()) { - if (prop.getKey().startsWith(ANNOTATION_ATTRIB_PREFIX) && - prop.getKey().length() > ANNOTATION_ATTRIB_PREFIX.length()) { - result.put(prop.getKey().substring(ANNOTATION_ATTRIB_PREFIX.length()), prop.getValue()); - } - } - return result; - } - - public static Long getCurrentSCN(String url, Properties info) throws SQLException { - String scnStr = findProperty(url, info, PhoenixRuntime.CURRENT_SCN_ATTRIB); - return (scnStr == null ? null : Long.parseLong(scnStr)); - } - - public static Long getBuildIndexSCN(String url, Properties info) throws SQLException { - String scnStr = findProperty(url, info, PhoenixRuntime.BUILD_INDEX_AT_ATTRIB); - return (scnStr == null ? null : Long.parseLong(scnStr)); + } + + /** + * Returns a map that contains connection properties from both info and + * url. + */ + private static Map getCombinedConnectionProperties(String url, Properties info) { + Map result = newHashMapWithExpectedSize(info.size()); + for (String propName : info.stringPropertyNames()) { + result.put(propName, info.getProperty(propName)); } - - public static int getMutateBatchSize(String url, Properties info, ReadOnlyProps props) throws SQLException { - String batchSizeStr = findProperty(url, info, PhoenixRuntime.UPSERT_BATCH_SIZE_ATTRIB); - return (batchSizeStr == null ? props.getInt(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE) : Integer.parseInt(batchSizeStr)); - } - - public static long getMutateBatchSizeBytes(String url, Properties info, ReadOnlyProps props) throws SQLException { - String batchSizeStr = findProperty(url, info, PhoenixRuntime.UPSERT_BATCH_SIZE_BYTES_ATTRIB); - return batchSizeStr == null ? - props.getLongBytes(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES) - : Long.parseLong(batchSizeStr); - } - - public static @Nullable PName getTenantId(String url, Properties info) throws SQLException { - String tenantId = findProperty(url, info, PhoenixRuntime.TENANT_ID_ATTRIB); - return (tenantId == null ? null : PNameFactory.newName(tenantId)); - } - - /** - * Retrieve the value of the optional auto-commit setting from JDBC url or connection - * properties. - * - * @param url JDBC url used for connecting to Phoenix - * @param info connection properties - * @param defaultValue default to return if the auto-commit property is not set in the url - * or connection properties - * @return the boolean value supplied for the AutoCommit in the connection URL or properties, - * or the supplied default value if no AutoCommit attribute was provided - */ - public static boolean getAutoCommit(String url, Properties info, boolean defaultValue) { - String autoCommit = findProperty(url, info, PhoenixRuntime.AUTO_COMMIT_ATTRIB); - if (autoCommit == null) { - return defaultValue; + String[] urlPropNameValues = + url.split(Character.toString(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR)); + if (urlPropNameValues.length > 1) { + for (int i = 1; i < urlPropNameValues.length; i++) { + String[] urlPropNameValue = urlPropNameValues[i].split("="); + if (urlPropNameValue.length == 2) { + result.put(urlPropNameValue[0], urlPropNameValue[1]); } - return Boolean.valueOf(autoCommit); + } } - /** - * Retrieve the value of the optional consistency read setting from JDBC url or connection - * properties. - * - * @param url JDBC url used for connecting to Phoenix - * @param info connection properties - * @param defaultValue default to return if ReadConsistency property is not set in the url - * or connection properties - * @return the boolean value supplied for the AutoCommit in the connection URL or properties, - * or the supplied default value if no AutoCommit attribute was provided - */ - public static Consistency getConsistencyLevel(String url, Properties info, String defaultValue) { - String consistency = findProperty(url, info, PhoenixRuntime.CONSISTENCY_ATTRIB); - - if (consistency != null && consistency.equalsIgnoreCase(Consistency.TIMELINE.toString())){ - return Consistency.TIMELINE; - } - return Consistency.STRONG; + return result; + } + + public static Map getAnnotations(@NonNull String url, @NonNull Properties info) { + Preconditions.checkNotNull(url); + Preconditions.checkNotNull(info); + + Map combinedProperties = getCombinedConnectionProperties(url, info); + Map result = newHashMapWithExpectedSize(combinedProperties.size()); + for (Map.Entry prop : combinedProperties.entrySet()) { + if ( + prop.getKey().startsWith(ANNOTATION_ATTRIB_PREFIX) + && prop.getKey().length() > ANNOTATION_ATTRIB_PREFIX.length() + ) { + result.put(prop.getKey().substring(ANNOTATION_ATTRIB_PREFIX.length()), prop.getValue()); + } } - - public static boolean isCollectingRequestLevelMetricsEnabled(String url, Properties overrideProps, ReadOnlyProps queryServicesProps) throws SQLException { - String batchSizeStr = findProperty(url, overrideProps, PhoenixRuntime.REQUEST_METRIC_ATTRIB); - return (batchSizeStr == null ? queryServicesProps.getBoolean(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, QueryServicesOptions.DEFAULT_REQUEST_LEVEL_METRICS_ENABLED) : Boolean.parseBoolean(batchSizeStr)); + return result; + } + + public static Long getCurrentSCN(String url, Properties info) throws SQLException { + String scnStr = findProperty(url, info, PhoenixRuntime.CURRENT_SCN_ATTRIB); + return (scnStr == null ? null : Long.parseLong(scnStr)); + } + + public static Long getBuildIndexSCN(String url, Properties info) throws SQLException { + String scnStr = findProperty(url, info, PhoenixRuntime.BUILD_INDEX_AT_ATTRIB); + return (scnStr == null ? null : Long.parseLong(scnStr)); + } + + public static int getMutateBatchSize(String url, Properties info, ReadOnlyProps props) + throws SQLException { + String batchSizeStr = findProperty(url, info, PhoenixRuntime.UPSERT_BATCH_SIZE_ATTRIB); + return (batchSizeStr == null + ? props.getInt(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE) + : Integer.parseInt(batchSizeStr)); + } + + public static long getMutateBatchSizeBytes(String url, Properties info, ReadOnlyProps props) + throws SQLException { + String batchSizeStr = findProperty(url, info, PhoenixRuntime.UPSERT_BATCH_SIZE_BYTES_ATTRIB); + return batchSizeStr == null + ? props.getLongBytes(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES) + : Long.parseLong(batchSizeStr); + } + + public static @Nullable PName getTenantId(String url, Properties info) throws SQLException { + String tenantId = findProperty(url, info, PhoenixRuntime.TENANT_ID_ATTRIB); + return (tenantId == null ? null : PNameFactory.newName(tenantId)); + } + + /** + * Retrieve the value of the optional auto-commit setting from JDBC url or connection properties. + * @param url JDBC url used for connecting to Phoenix + * @param info connection properties + * @param defaultValue default to return if the auto-commit property is not set in the url or + * connection properties + * @return the boolean value supplied for the AutoCommit in the connection URL or properties, or + * the supplied default value if no AutoCommit attribute was provided + */ + public static boolean getAutoCommit(String url, Properties info, boolean defaultValue) { + String autoCommit = findProperty(url, info, PhoenixRuntime.AUTO_COMMIT_ATTRIB); + if (autoCommit == null) { + return defaultValue; } - - public static String getSchema(String url, Properties info, String defaultValue) { - String schema = findProperty(url, info, PhoenixRuntime.SCHEMA_ATTRIB); - return (schema == null || schema.equals("")) ? defaultValue : schema; + return Boolean.valueOf(autoCommit); + } + + /** + * Retrieve the value of the optional consistency read setting from JDBC url or connection + * properties. + * @param url JDBC url used for connecting to Phoenix + * @param info connection properties + * @param defaultValue default to return if ReadConsistency property is not set in the url or + * connection properties + * @return the boolean value supplied for the AutoCommit in the connection URL or properties, or + * the supplied default value if no AutoCommit attribute was provided + */ + public static Consistency getConsistencyLevel(String url, Properties info, String defaultValue) { + String consistency = findProperty(url, info, PhoenixRuntime.CONSISTENCY_ATTRIB); + + if (consistency != null && consistency.equalsIgnoreCase(Consistency.TIMELINE.toString())) { + return Consistency.TIMELINE; } - /** - * Get the ZK quorom and root and node part of the URL, which is used by the HA code internally - * to identify the clusters. - * As we interpret a missing protocol as ZK, this is mostly idempotent for zk quorum strings. - * - * @param jdbcUrl JDBC URL - * @return part of the URL determining the ZK quorum and node - * @throws RuntimeException if the URL is invalid, or does not resolve to a ZK Registry - * connection - */ - public static String formatZookeeperUrl(String jdbcUrl) { - ConnectionInfo connInfo; - try { - connInfo = ConnectionInfo.create(jdbcUrl, null, null); - // TODO in theory we could support non-ZK registries for HA. - // However, as HA already relies on ZK, this wouldn't be particularly useful, - // and would require significant changes. - if (!(connInfo instanceof ZKConnectionInfo)) { - throw new SQLException("HA connections must use ZooKeeper registry. " + jdbcUrl - + " is not a Zookeeper HBase connection"); - } - ZKConnectionInfo zkInfo = (ZKConnectionInfo) connInfo; - StringBuilder sb = new StringBuilder(); - sb.append(zkInfo.getZkHosts().replaceAll(":", "\\\\:")).append("::") - .append(zkInfo.getZkRootNode()); - return sb.toString(); - } catch (SQLException e) { - throw new RuntimeException(e); - } + return Consistency.STRONG; + } + + public static boolean isCollectingRequestLevelMetricsEnabled(String url, Properties overrideProps, + ReadOnlyProps queryServicesProps) throws SQLException { + String batchSizeStr = findProperty(url, overrideProps, PhoenixRuntime.REQUEST_METRIC_ATTRIB); + return (batchSizeStr == null + ? queryServicesProps.getBoolean(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, + QueryServicesOptions.DEFAULT_REQUEST_LEVEL_METRICS_ENABLED) + : Boolean.parseBoolean(batchSizeStr)); + } + + public static String getSchema(String url, Properties info, String defaultValue) { + String schema = findProperty(url, info, PhoenixRuntime.SCHEMA_ATTRIB); + return (schema == null || schema.equals("")) ? defaultValue : schema; + } + + /** + * Get the ZK quorom and root and node part of the URL, which is used by the HA code internally to + * identify the clusters. As we interpret a missing protocol as ZK, this is mostly idempotent for + * zk quorum strings. + * @param jdbcUrl JDBC URL + * @return part of the URL determining the ZK quorum and node + * @throws RuntimeException if the URL is invalid, or does not resolve to a ZK Registry connection + */ + public static String formatZookeeperUrl(String jdbcUrl) { + ConnectionInfo connInfo; + try { + connInfo = ConnectionInfo.create(jdbcUrl, null, null); + // TODO in theory we could support non-ZK registries for HA. + // However, as HA already relies on ZK, this wouldn't be particularly useful, + // and would require significant changes. + if (!(connInfo instanceof ZKConnectionInfo)) { + throw new SQLException("HA connections must use ZooKeeper registry. " + jdbcUrl + + " is not a Zookeeper HBase connection"); + } + ZKConnectionInfo zkInfo = (ZKConnectionInfo) connInfo; + StringBuilder sb = new StringBuilder(); + sb.append(zkInfo.getZkHosts().replaceAll(":", "\\\\:")).append("::") + .append(zkInfo.getZkRootNode()); + return sb.toString(); + } catch (SQLException e) { + throw new RuntimeException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/JacksonUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/JacksonUtil.java index 33c45187816..4f849c636ff 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/JacksonUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/JacksonUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,28 +22,28 @@ import com.fasterxml.jackson.databind.ObjectWriter; public final class JacksonUtil { - private static final ObjectMapper mapper = new ObjectMapper(); + private static final ObjectMapper mapper = new ObjectMapper(); - private JacksonUtil() { - } + private JacksonUtil() { + } - public static ObjectReader getObjectReader(Class clazz) { - return mapper.readerFor(clazz); - } + public static ObjectReader getObjectReader(Class clazz) { + return mapper.readerFor(clazz); + } - public static ObjectReader getObjectReader(){ - return mapper.reader(); - } + public static ObjectReader getObjectReader() { + return mapper.reader(); + } - public static ObjectWriter getObjectWriter(Class clazz) { - return mapper.writerFor(clazz); - } + public static ObjectWriter getObjectWriter(Class clazz) { + return mapper.writerFor(clazz); + } - public static ObjectWriter getObjectWriter(){ - return mapper.writer(); - } + public static ObjectWriter getObjectWriter() { + return mapper.writer(); + } - public static ObjectWriter getObjectWriterPretty() { - return mapper.writerWithDefaultPrettyPrinter(); - } + public static ObjectWriter getObjectWriterPretty() { + return mapper.writerWithDefaultPrettyPrinter(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/LogUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/LogUtil.java index 7ebdbb41c00..a01d9ca26ff 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/LogUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/LogUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,40 +24,48 @@ public class LogUtil { - private LogUtil() { - } + private LogUtil() { + } - public static String addCustomAnnotations(@Nullable String logLine, @Nullable PhoenixConnection con) { - if (con == null || con.getCustomTracingAnnotations() == null || con.getCustomTracingAnnotations().isEmpty()) { - return logLine; - } else { - return customAnnotationsToString(con) + ' ' + logLine; - } + public static String addCustomAnnotations(@Nullable String logLine, + @Nullable PhoenixConnection con) { + if ( + con == null || con.getCustomTracingAnnotations() == null + || con.getCustomTracingAnnotations().isEmpty() + ) { + return logLine; + } else { + return customAnnotationsToString(con) + ' ' + logLine; } - - public static String addCustomAnnotations(@Nullable String logLine, @Nullable byte[] annotations) { - if (annotations == null) { - return logLine; - } else { - return Bytes.toString(annotations) + ' ' + logLine; - } + } + + public static String addCustomAnnotations(@Nullable String logLine, + @Nullable byte[] annotations) { + if (annotations == null) { + return logLine; + } else { + return Bytes.toString(annotations) + ' ' + logLine; } - - public static String customAnnotationsToString(@Nullable PhoenixConnection con) { - if (con == null || con.getCustomTracingAnnotations() == null || con.getCustomTracingAnnotations().isEmpty()) { - return null; - } else { - return con.getCustomTracingAnnotations().toString(); - } + } + + public static String customAnnotationsToString(@Nullable PhoenixConnection con) { + if ( + con == null || con.getCustomTracingAnnotations() == null + || con.getCustomTracingAnnotations().isEmpty() + ) { + return null; + } else { + return con.getCustomTracingAnnotations().toString(); } + } - public static String getCallerStackTrace() { - StackTraceElement[] st = Thread.currentThread().getStackTrace(); - StringBuilder sb = new StringBuilder(); - for (StackTraceElement element : st) { - sb.append(element.toString()); - sb.append("\n"); - } - return sb.toString(); + public static String getCallerStackTrace() { + StackTraceElement[] st = Thread.currentThread().getStackTrace(); + StringBuilder sb = new StringBuilder(); + for (StackTraceElement element : st) { + sb.append(element.toString()); + sb.append("\n"); } + return sb.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/MajorMinorVersion.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/MajorMinorVersion.java index 977ddf9978e..9cfcd3cc468 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/MajorMinorVersion.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/MajorMinorVersion.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; /** @@ -23,41 +22,41 @@ */ class MajorMinorVersion { - private final int majorVersion; - private final int minorVersion; + private final int majorVersion; + private final int minorVersion; - public MajorMinorVersion(int majorVersion, int minorVersion) { - this.majorVersion = majorVersion; - this.minorVersion = minorVersion; - } + public MajorMinorVersion(int majorVersion, int minorVersion) { + this.majorVersion = majorVersion; + this.minorVersion = minorVersion; + } - public int getMajorVersion() { - return majorVersion; - } + public int getMajorVersion() { + return majorVersion; + } - public int getMinorVersion() { - return minorVersion; - } + public int getMinorVersion() { + return minorVersion; + } - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - MajorMinorVersion version = (MajorMinorVersion) o; - if (majorVersion != version.majorVersion) { - return false; - } - return minorVersion == version.minorVersion; + @Override + public boolean equals(Object o) { + if (this == o) { + return true; } - - @Override - public int hashCode() { - int result = majorVersion; - result = 31 * result + minorVersion; - return result; + if (o == null || getClass() != o.getClass()) { + return false; + } + MajorMinorVersion version = (MajorMinorVersion) o; + if (majorVersion != version.majorVersion) { + return false; } + return minorVersion == version.minorVersion; + } + + @Override + public int hashCode() { + int result = majorVersion; + result = 31 * result + minorVersion; + return result; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ManualEnvironmentEdge.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ManualEnvironmentEdge.java index 169e3131caa..dccbd1e31d3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ManualEnvironmentEdge.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ManualEnvironmentEdge.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,19 +18,19 @@ package org.apache.phoenix.util; public class ManualEnvironmentEdge extends EnvironmentEdge { - // Sometimes 0 ts might have a special value, so lets start with 1 - protected long value = 1L; + // Sometimes 0 ts might have a special value, so lets start with 1 + protected long value = 1L; - public void setValue(long newValue) { - value = newValue; - } + public void setValue(long newValue) { + value = newValue; + } - public void incrementValue(long addedValue) { - value += addedValue; - } + public void incrementValue(long addedValue) { + value += addedValue; + } - @Override - public long currentTime() { - return this.value; - } -} \ No newline at end of file + @Override + public long currentTime() { + return this.value; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/MetaDataUtil.java index ca13846d9e3..827d74ed621 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/MetaDataUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/MetaDataUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,22 +25,6 @@ import java.sql.SQLException; import java.util.*; -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; -import org.apache.phoenix.schema.ColumnFamilyNotFoundException; -import org.apache.phoenix.schema.ColumnNotFoundException; -import org.apache.phoenix.schema.PColumn; -import org.apache.phoenix.schema.PColumnFamily; -import org.apache.phoenix.schema.PName; -import org.apache.phoenix.schema.PNameFactory; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.PTableRef; -import org.apache.phoenix.schema.PTableType; -import org.apache.phoenix.schema.SequenceKey; -import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.TableNotFoundException; -import org.apache.phoenix.schema.TableProperty; -import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -58,6 +42,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder; @@ -68,439 +53,492 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.schema.ColumnFamilyNotFoundException; +import org.apache.phoenix.schema.ColumnNotFoundException; +import org.apache.phoenix.schema.PColumn; +import org.apache.phoenix.schema.PColumnFamily; +import org.apache.phoenix.schema.PName; +import org.apache.phoenix.schema.PNameFactory; +import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.PTable.LinkType; - +import org.apache.phoenix.schema.PTableRef; +import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.schema.SequenceKey; +import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.schema.TableNotFoundException; +import org.apache.phoenix.schema.TableProperty; import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PSmallint; import org.apache.phoenix.schema.types.PUnsignedTinyint; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class MetaDataUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataUtil.class); - - public static final String VIEW_INDEX_TABLE_PREFIX = "_IDX_"; - public static final String LOCAL_INDEX_TABLE_PREFIX = "_LOCAL_IDX_"; - public static final String VIEW_INDEX_SEQUENCE_PREFIX = "_SEQ_"; - public static final String VIEW_INDEX_SEQUENCE_NAME_PREFIX = "_ID_"; - public static final byte[] VIEW_INDEX_SEQUENCE_PREFIX_BYTES = Bytes.toBytes(VIEW_INDEX_SEQUENCE_PREFIX); - public static final String VIEW_INDEX_ID_COLUMN_NAME = "_INDEX_ID"; - public static final String PARENT_TABLE_KEY = "PARENT_TABLE"; - public static final String IS_VIEW_INDEX_TABLE_PROP_NAME = "IS_VIEW_INDEX_TABLE"; - public static final byte[] IS_VIEW_INDEX_TABLE_PROP_BYTES = Bytes.toBytes(IS_VIEW_INDEX_TABLE_PROP_NAME); - - public static final String IS_LOCAL_INDEX_TABLE_PROP_NAME = "IS_LOCAL_INDEX_TABLE"; - public static final byte[] IS_LOCAL_INDEX_TABLE_PROP_BYTES = Bytes.toBytes(IS_LOCAL_INDEX_TABLE_PROP_NAME); - - public static final String DATA_TABLE_NAME_PROP_NAME = "DATA_TABLE_NAME"; - - public static final byte[] DATA_TABLE_NAME_PROP_BYTES = Bytes.toBytes(DATA_TABLE_NAME_PROP_NAME); - - private static final Map ALLOWED_SERVER_CLIENT_MAJOR_VERSION = - ImmutableMap.of( - new MajorMinorVersion(5, 1), new MajorMinorVersion(4, 16) - ); - - // See PHOENIX-3955 - public static final List SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES = ImmutableList.of( - ColumnFamilyDescriptorBuilder.TTL, - ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS, - ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE); - - public static Put getLastDDLTimestampUpdate(byte[] tableHeaderRowKey, - long clientTimestamp, - long lastDDLTimestamp) { - //use client timestamp as the timestamp of the Cell, to match the other Cells that might - // be created by this DDL. But the actual value will be a _server_ timestamp - Put p = new Put(tableHeaderRowKey, clientTimestamp); - byte[] lastDDLTimestampBytes = PLong.INSTANCE.toBytes(lastDDLTimestamp); - p.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.LAST_DDL_TIMESTAMP_BYTES, lastDDLTimestampBytes); - return p; - } - - public static Put getExternalSchemaIdUpdate(byte[] tableHeaderRowKey, - String externalSchemaId) { - //use client timestamp as the timestamp of the Cell, to match the other Cells that might - // be created by this DDL. But the actual value will be a _server_ timestamp - Put p = new Put(tableHeaderRowKey); - byte[] externalSchemaIdBytes = PVarchar.INSTANCE.toBytes(externalSchemaId); - p.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.EXTERNAL_SCHEMA_ID_BYTES, externalSchemaIdBytes); - return p; - } - - /** - * Checks if a table is meant to be queried directly (and hence is relevant to external - * systems tracking Phoenix schema) - * @param tableType - * @return True if a table or view, false otherwise (such as for an index, system table, or - * subquery) - */ - public static boolean isTableDirectlyQueried(PTableType tableType) { - return tableType.equals(PTableType.TABLE) || tableType.equals(PTableType.VIEW); - } - - public static class ClientServerCompatibility { - - private int errorCode; - private boolean isCompatible; - - ClientServerCompatibility() { - this.errorCode = 0; - } - - public int getErrorCode() { - return this.errorCode; - } - - void setErrorCode(int errorCode) { - this.errorCode = errorCode; - } - - public boolean getIsCompatible() { - return this.isCompatible; - } + private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataUtil.class); + + public static final String VIEW_INDEX_TABLE_PREFIX = "_IDX_"; + public static final String LOCAL_INDEX_TABLE_PREFIX = "_LOCAL_IDX_"; + public static final String VIEW_INDEX_SEQUENCE_PREFIX = "_SEQ_"; + public static final String VIEW_INDEX_SEQUENCE_NAME_PREFIX = "_ID_"; + public static final byte[] VIEW_INDEX_SEQUENCE_PREFIX_BYTES = + Bytes.toBytes(VIEW_INDEX_SEQUENCE_PREFIX); + public static final String VIEW_INDEX_ID_COLUMN_NAME = "_INDEX_ID"; + public static final String PARENT_TABLE_KEY = "PARENT_TABLE"; + public static final String IS_VIEW_INDEX_TABLE_PROP_NAME = "IS_VIEW_INDEX_TABLE"; + public static final byte[] IS_VIEW_INDEX_TABLE_PROP_BYTES = + Bytes.toBytes(IS_VIEW_INDEX_TABLE_PROP_NAME); + + public static final String IS_LOCAL_INDEX_TABLE_PROP_NAME = "IS_LOCAL_INDEX_TABLE"; + public static final byte[] IS_LOCAL_INDEX_TABLE_PROP_BYTES = + Bytes.toBytes(IS_LOCAL_INDEX_TABLE_PROP_NAME); + + public static final String DATA_TABLE_NAME_PROP_NAME = "DATA_TABLE_NAME"; + + public static final byte[] DATA_TABLE_NAME_PROP_BYTES = Bytes.toBytes(DATA_TABLE_NAME_PROP_NAME); + + private static final Map ALLOWED_SERVER_CLIENT_MAJOR_VERSION = + ImmutableMap.of(new MajorMinorVersion(5, 1), new MajorMinorVersion(4, 16)); + + // See PHOENIX-3955 + public static final List SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES = ImmutableList + .of(ColumnFamilyDescriptorBuilder.TTL, ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS, + ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE); + + public static Put getLastDDLTimestampUpdate(byte[] tableHeaderRowKey, long clientTimestamp, + long lastDDLTimestamp) { + // use client timestamp as the timestamp of the Cell, to match the other Cells that might + // be created by this DDL. But the actual value will be a _server_ timestamp + Put p = new Put(tableHeaderRowKey, clientTimestamp); + byte[] lastDDLTimestampBytes = PLong.INSTANCE.toBytes(lastDDLTimestamp); + p.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.LAST_DDL_TIMESTAMP_BYTES, lastDDLTimestampBytes); + return p; + } + + public static Put getExternalSchemaIdUpdate(byte[] tableHeaderRowKey, String externalSchemaId) { + // use client timestamp as the timestamp of the Cell, to match the other Cells that might + // be created by this DDL. But the actual value will be a _server_ timestamp + Put p = new Put(tableHeaderRowKey); + byte[] externalSchemaIdBytes = PVarchar.INSTANCE.toBytes(externalSchemaId); + p.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.EXTERNAL_SCHEMA_ID_BYTES, externalSchemaIdBytes); + return p; + } - void setCompatible(boolean isCompatible) { - this.isCompatible = isCompatible; - } - } - - public static ClientServerCompatibility areClientAndServerCompatible(long serverHBaseAndPhoenixVersion) { - // As of 3.0, we allow a client and server to differ for the minor version. - // Care has to be taken to upgrade the server before the client, as otherwise - // the client may call expressions that don't yet exist on the server. - // Differing by the patch version has always been allowed. - // Only differing by the major version is not allowed. - return areClientAndServerCompatible(MetaDataUtil.decodePhoenixVersion(serverHBaseAndPhoenixVersion), MetaDataProtocol.PHOENIX_MAJOR_VERSION, MetaDataProtocol.PHOENIX_MINOR_VERSION); - } - - // Default scope for testing - @VisibleForTesting - static ClientServerCompatibility areClientAndServerCompatible(int serverVersion, int clientMajorVersion, int clientMinorVersion) { - // A server and client with the same major and minor version number must be compatible. - // So it's important that we roll the PHOENIX_MAJOR_VERSION or PHOENIX_MINOR_VERSION - // when we make an incompatible change. - ClientServerCompatibility compatibility = new ClientServerCompatibility(); - if (VersionUtil.encodeMinPatchVersion(clientMajorVersion, clientMinorVersion) > serverVersion) { // Client major and minor cannot be ahead of server - compatibility.setErrorCode(SQLExceptionCode.OUTDATED_JARS.getErrorCode()); - compatibility.setCompatible(false); - return compatibility; - } else if (VersionUtil.encodeMaxMinorVersion(clientMajorVersion) < serverVersion) { // Client major version must at least be up to server major version - MajorMinorVersion serverMajorMinorVersion = new MajorMinorVersion( - VersionUtil.decodeMajorVersion(serverVersion), - VersionUtil.decodeMinorVersion(serverVersion)); - MajorMinorVersion clientMajorMinorVersion = - new MajorMinorVersion(clientMajorVersion, clientMinorVersion); - if (!clientMajorMinorVersion.equals( - ALLOWED_SERVER_CLIENT_MAJOR_VERSION.get(serverMajorMinorVersion))) { - // Incompatible if not whitelisted by ALLOWED_SERVER_CLIENT_MAJOR_VERSION - compatibility.setErrorCode(SQLExceptionCode - .INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()); - compatibility.setCompatible(false); - return compatibility; - } - } - compatibility.setCompatible(true); + /** + * Checks if a table is meant to be queried directly (and hence is relevant to external systems + * tracking Phoenix schema) + * @return True if a table or view, false otherwise (such as for an index, system table, or + * subquery) + */ + public static boolean isTableDirectlyQueried(PTableType tableType) { + return tableType.equals(PTableType.TABLE) || tableType.equals(PTableType.VIEW); + } + + public static class ClientServerCompatibility { + + private int errorCode; + private boolean isCompatible; + + ClientServerCompatibility() { + this.errorCode = 0; + } + + public int getErrorCode() { + return this.errorCode; + } + + void setErrorCode(int errorCode) { + this.errorCode = errorCode; + } + + public boolean getIsCompatible() { + return this.isCompatible; + } + + void setCompatible(boolean isCompatible) { + this.isCompatible = isCompatible; + } + } + + public static ClientServerCompatibility + areClientAndServerCompatible(long serverHBaseAndPhoenixVersion) { + // As of 3.0, we allow a client and server to differ for the minor version. + // Care has to be taken to upgrade the server before the client, as otherwise + // the client may call expressions that don't yet exist on the server. + // Differing by the patch version has always been allowed. + // Only differing by the major version is not allowed. + return areClientAndServerCompatible( + MetaDataUtil.decodePhoenixVersion(serverHBaseAndPhoenixVersion), + MetaDataProtocol.PHOENIX_MAJOR_VERSION, MetaDataProtocol.PHOENIX_MINOR_VERSION); + } + + // Default scope for testing + @VisibleForTesting + static ClientServerCompatibility areClientAndServerCompatible(int serverVersion, + int clientMajorVersion, int clientMinorVersion) { + // A server and client with the same major and minor version number must be compatible. + // So it's important that we roll the PHOENIX_MAJOR_VERSION or PHOENIX_MINOR_VERSION + // when we make an incompatible change. + ClientServerCompatibility compatibility = new ClientServerCompatibility(); + if (VersionUtil.encodeMinPatchVersion(clientMajorVersion, clientMinorVersion) > serverVersion) { // Client + // major + // and + // minor + // cannot + // be + // ahead + // of + // server + compatibility.setErrorCode(SQLExceptionCode.OUTDATED_JARS.getErrorCode()); + compatibility.setCompatible(false); + return compatibility; + } else if (VersionUtil.encodeMaxMinorVersion(clientMajorVersion) < serverVersion) { // Client + // major + // version + // must at + // least be + // up to + // server + // major + // version + MajorMinorVersion serverMajorMinorVersion = + new MajorMinorVersion(VersionUtil.decodeMajorVersion(serverVersion), + VersionUtil.decodeMinorVersion(serverVersion)); + MajorMinorVersion clientMajorMinorVersion = + new MajorMinorVersion(clientMajorVersion, clientMinorVersion); + if ( + !clientMajorMinorVersion + .equals(ALLOWED_SERVER_CLIENT_MAJOR_VERSION.get(serverMajorMinorVersion)) + ) { + // Incompatible if not whitelisted by ALLOWED_SERVER_CLIENT_MAJOR_VERSION + compatibility.setErrorCode(SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()); + compatibility.setCompatible(false); return compatibility; - } - - // Given the encoded integer representing the phoenix version in the encoded version value. - // The second byte in int would be the major version, 3rd byte minor version, and 4th byte - // patch version. - public static int decodePhoenixVersion(long version) { - return (int) ((version << Byte.SIZE * 4) >>> Byte.SIZE * 5); - } - - // TODO: generalize this to use two bytes to return a SQL error code instead - public static long encodeHasIndexWALCodec(long version, boolean isValid) { - if (!isValid) { - return version | 1; - } - return version; - } - - public static boolean decodeHasIndexWALCodec(long version) { - return (version & 0xF) == 0; - } - - // Given the encoded integer representing the client hbase version in the encoded version value. - // The second byte in int would be the major version, 3rd byte minor version, and 4th byte - // patch version. - public static int decodeHBaseVersion(long version) { - return (int) (version >>> Byte.SIZE * 5); - } - - public static String decodeHBaseVersionAsString(int version) { - int major = (version >>> Byte.SIZE * 2) & 0xFF; - int minor = (version >>> Byte.SIZE * 1) & 0xFF; - int patch = version & 0xFF; - return major + "." + minor + "." + patch; - } - - // Given the encoded integer representing the phoenix version in the encoded version value. - // The second byte in int would be the major version, 3rd byte minor version, and 4th byte - // patch version. - public static boolean decodeTableNamespaceMappingEnabled(long version) { - return ((int)((version << Byte.SIZE * 3) >>> Byte.SIZE * 7) & 0x1) != 0; - } - - // The first three bytes of the long encode the HBase version as major.minor.patch. - // The fourth byte is isTableNamespaceMappingEnabled - // The fifth to seventh bytes of the value encode the Phoenix version as major.minor.patch. - // The eights byte encodes whether the WAL codec is correctly installed - /** - * Encode HBase and Phoenix version along with some server-side config information such as whether WAL codec is - * installed (necessary for non transactional, mutable secondar indexing), and whether systemNamespace mapping is enabled. - * - * @param hbaseVersionStr - * @param config - * @return long value sent back during initialization of a cluster connection. - */ - public static long encodeVersion(String hbaseVersionStr, Configuration config) { - long hbaseVersion = VersionUtil.encodeVersion(hbaseVersionStr); - long isTableNamespaceMappingEnabled = SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, - new ReadOnlyProps(config.iterator())) ? 1 : 0; - long phoenixVersion = VersionUtil.encodeVersion(MetaDataProtocol.PHOENIX_MAJOR_VERSION, - MetaDataProtocol.PHOENIX_MINOR_VERSION, MetaDataProtocol.PHOENIX_PATCH_NUMBER); - long walCodec = IndexManagementUtil.isWALEditCodecSet(config) ? 0 : 1; - long version = - // Encode HBase major, minor, patch version - (hbaseVersion << (Byte.SIZE * 5)) - // Encode if table namespace mapping is enabled on the server side - // Note that we DO NOT return information on whether system tables are mapped - // on the server side - | (isTableNamespaceMappingEnabled << (Byte.SIZE * 4)) - // Encode Phoenix major, minor, patch version - | (phoenixVersion << (Byte.SIZE * 1)) - // Encode whether or not non transactional, mutable secondary indexing was configured properly. - | walCodec; - return version; - } - - public static byte[] getTenantIdAndSchemaAndTableName(Mutation someRow) { - byte[][] rowKeyMetaData = new byte[3][]; - getVarChars(someRow.getRow(), 3, rowKeyMetaData); - return ByteUtil.concat(rowKeyMetaData[0], rowKeyMetaData[1], rowKeyMetaData[2]); - } - - public static byte[] getTenantIdAndSchemaAndTableName(Result result) { - byte[][] rowKeyMetaData = new byte[3][]; - getVarChars(result.getRow(), 3, rowKeyMetaData); - return ByteUtil.concat(rowKeyMetaData[0], rowKeyMetaData[1], rowKeyMetaData[2]); - } - - public static void getTenantIdAndSchemaAndTableName(List tableMetadata, byte[][] rowKeyMetaData) { - Mutation m = getTableHeaderRow(tableMetadata); - getVarChars(m.getRow(), 3, rowKeyMetaData); - } - - public static int getBaseColumnCount(List tableMetadata) { - int result = -1; - for (Mutation mutation : tableMetadata) { - for (List cells : mutation.getFamilyCellMap().values()) { - for (Cell cell : cells) { - // compare using offsets - if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, 0, - PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES.length) == 0) - if (Bytes.contains(cell.getQualifierArray(), PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES)) { - result = PInteger.INSTANCE.getCodec() - .decodeInt(cell.getValueArray(), cell.getValueOffset(), SortOrder.ASC); - } - } - } - } - return result; - } - - public static void mutatePutValue(Put somePut, byte[] family, byte[] qualifier, byte[] newValue) { - NavigableMap> familyCellMap = somePut.getFamilyCellMap(); - List cells = familyCellMap.get(family); - List newCells = Lists.newArrayList(); - if (cells != null) { - for (Cell cell : cells) { - if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - qualifier, 0, qualifier.length) == 0) { - Cell replacementCell = new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), - cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), - KeyValue.Type.codeToType(cell.getType().getCode()), newValue, 0, newValue.length); - newCells.add(replacementCell); - } else { - newCells.add(cell); - } - } - familyCellMap.put(family, newCells); - } - } - - /** - * Iterates over the cells that are mutated by the put operation for the given column family and - * column qualifier and conditionally modifies those cells to add a tags list. We only add tags - * if the cell value does not match the passed valueArray. If we always want to add tags to - * these cells, we can pass in a null valueArray - * @param somePut Put operation - * @param family column family of the cells - * @param qualifier column qualifier of the cells - * @param cellBuilder ExtendedCellBuilder object - * @param valueArray byte array of values or null - * @param tagArray byte array of tags to add to the cells - */ - public static void conditionallyAddTagsToPutCells(Put somePut, byte[] family, byte[] qualifier, - ExtendedCellBuilder cellBuilder, byte[] valueArray, byte[] tagArray) { - NavigableMap> familyCellMap = somePut.getFamilyCellMap(); - List cells = familyCellMap.get(family); - List newCells = Lists.newArrayList(); - if (cells != null) { - for (Cell cell : cells) { - if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), qualifier, 0, qualifier.length) == 0 && - (valueArray == null || !CellUtil.matchingValue(cell, valueArray))) { - ExtendedCell extendedCell = cellBuilder - .setRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) - .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength()) - .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()) - .setValue(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()) - .setTimestamp(cell.getTimestamp()) - .setType(cell.getType()) - .setTags(TagUtil.concatTags(tagArray, cell)) - .build(); - // Replace existing cell with a cell that has the custom tags list - newCells.add(extendedCell); - } else { - // Add cell as is - newCells.add(cell); - } - } - familyCellMap.put(family, newCells); - } - } - - public static Put cloneDeleteToPutAndAddColumn(Delete delete, byte[] family, byte[] qualifier, byte[] value) { - NavigableMap> familyCellMap = delete.getFamilyCellMap(); - List cells = familyCellMap.get(family); - Cell cell = Iterables.getFirst(cells, null); - if (cell == null) { - throw new RuntimeException("Empty cells for delete for family: " + Bytes.toStringBinary(family)); - } - byte[] rowArray = new byte[cell.getRowLength()]; - System.arraycopy(cell.getRowArray(), cell.getRowOffset(), rowArray, 0, cell.getRowLength()); - Put put = new Put(rowArray, delete.getTimestamp()); - put.addColumn(family, qualifier, delete.getTimestamp(), value); - return put; - } - - - public static void getTenantIdAndFunctionName(List functionMetadata, byte[][] rowKeyMetaData) { - Mutation m = getTableHeaderRow(functionMetadata); - getVarChars(m.getRow(), 2, rowKeyMetaData); - } - - /** - * Only return the parent table name if it has the same tenant id and schema name as the current - * table (this is only used to lock the parent table of indexes) - */ - public static byte[] getParentTableName(List tableMetadata) { - if (tableMetadata.size() == 1) { - return null; - } - byte[][] rowKeyMetaData = new byte[3][]; - // get the tenantId, schema name and table name for the current table - getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); - byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - // get the tenantId, schema name and table name for the parent table - Mutation m = getParentTableHeaderRow(tableMetadata); - getVarChars(m.getRow(), 3, rowKeyMetaData); - if (Bytes.compareTo(tenantId, rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]) == 0 - && Bytes.compareTo(schemaName, - rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]) == 0 - && Bytes.compareTo(tableName, - rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]) == 0) { - return null; - } - return rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - } - - public static long getSequenceNumber(Mutation tableMutation) { - List kvs = tableMutation.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES); - if (kvs != null) { - for (Cell kv : kvs) { // list is not ordered, so search. TODO: we could potentially assume the position - if (isSequenceNumber(kv)) { - return PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault()); - } - } - } - throw new IllegalStateException(); - } - - public static long getSequenceNumber(List tableMetaData) { - return getSequenceNumber(getPutOnlyTableHeaderRow(tableMetaData)); - } - - public static boolean isSequenceNumber(Mutation m) { - boolean foundSequenceNumber = false; - for (Cell kv : m.getFamilyCellMap().get(TABLE_FAMILY_BYTES)) { - if (isSequenceNumber(kv)) { - foundSequenceNumber = true; - break; - } + } + } + compatibility.setCompatible(true); + return compatibility; + } + + // Given the encoded integer representing the phoenix version in the encoded version value. + // The second byte in int would be the major version, 3rd byte minor version, and 4th byte + // patch version. + public static int decodePhoenixVersion(long version) { + return (int) ((version << Byte.SIZE * 4) >>> Byte.SIZE * 5); + } + + // TODO: generalize this to use two bytes to return a SQL error code instead + public static long encodeHasIndexWALCodec(long version, boolean isValid) { + if (!isValid) { + return version | 1; + } + return version; + } + + public static boolean decodeHasIndexWALCodec(long version) { + return (version & 0xF) == 0; + } + + // Given the encoded integer representing the client hbase version in the encoded version value. + // The second byte in int would be the major version, 3rd byte minor version, and 4th byte + // patch version. + public static int decodeHBaseVersion(long version) { + return (int) (version >>> Byte.SIZE * 5); + } + + public static String decodeHBaseVersionAsString(int version) { + int major = (version >>> Byte.SIZE * 2) & 0xFF; + int minor = (version >>> Byte.SIZE * 1) & 0xFF; + int patch = version & 0xFF; + return major + "." + minor + "." + patch; + } + + // Given the encoded integer representing the phoenix version in the encoded version value. + // The second byte in int would be the major version, 3rd byte minor version, and 4th byte + // patch version. + public static boolean decodeTableNamespaceMappingEnabled(long version) { + return ((int) ((version << Byte.SIZE * 3) >>> Byte.SIZE * 7) & 0x1) != 0; + } + + // The first three bytes of the long encode the HBase version as major.minor.patch. + // The fourth byte is isTableNamespaceMappingEnabled + // The fifth to seventh bytes of the value encode the Phoenix version as major.minor.patch. + // The eights byte encodes whether the WAL codec is correctly installed + /** + * Encode HBase and Phoenix version along with some server-side config information such as whether + * WAL codec is installed (necessary for non transactional, mutable secondar indexing), and + * whether systemNamespace mapping is enabled. + * @return long value sent back during initialization of a cluster connection. + */ + public static long encodeVersion(String hbaseVersionStr, Configuration config) { + long hbaseVersion = VersionUtil.encodeVersion(hbaseVersionStr); + long isTableNamespaceMappingEnabled = + SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, new ReadOnlyProps(config.iterator())) + ? 1 + : 0; + long phoenixVersion = VersionUtil.encodeVersion(MetaDataProtocol.PHOENIX_MAJOR_VERSION, + MetaDataProtocol.PHOENIX_MINOR_VERSION, MetaDataProtocol.PHOENIX_PATCH_NUMBER); + long walCodec = IndexManagementUtil.isWALEditCodecSet(config) ? 0 : 1; + long version = + // Encode HBase major, minor, patch version + (hbaseVersion << (Byte.SIZE * 5)) + // Encode if table namespace mapping is enabled on the server side + // Note that we DO NOT return information on whether system tables are mapped + // on the server side + | (isTableNamespaceMappingEnabled << (Byte.SIZE * 4)) + // Encode Phoenix major, minor, patch version + | (phoenixVersion << (Byte.SIZE * 1)) + // Encode whether or not non transactional, mutable secondary indexing was configured + // properly. + | walCodec; + return version; + } + + public static byte[] getTenantIdAndSchemaAndTableName(Mutation someRow) { + byte[][] rowKeyMetaData = new byte[3][]; + getVarChars(someRow.getRow(), 3, rowKeyMetaData); + return ByteUtil.concat(rowKeyMetaData[0], rowKeyMetaData[1], rowKeyMetaData[2]); + } + + public static byte[] getTenantIdAndSchemaAndTableName(Result result) { + byte[][] rowKeyMetaData = new byte[3][]; + getVarChars(result.getRow(), 3, rowKeyMetaData); + return ByteUtil.concat(rowKeyMetaData[0], rowKeyMetaData[1], rowKeyMetaData[2]); + } + + public static void getTenantIdAndSchemaAndTableName(List tableMetadata, + byte[][] rowKeyMetaData) { + Mutation m = getTableHeaderRow(tableMetadata); + getVarChars(m.getRow(), 3, rowKeyMetaData); + } + + public static int getBaseColumnCount(List tableMetadata) { + int result = -1; + for (Mutation mutation : tableMetadata) { + for (List cells : mutation.getFamilyCellMap().values()) { + for (Cell cell : cells) { + // compare using offsets + if ( + Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, 0, + PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES.length) == 0 + ) if ( + Bytes.contains(cell.getQualifierArray(), + PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES) + ) { + result = PInteger.INSTANCE.getCodec().decodeInt(cell.getValueArray(), + cell.getValueOffset(), SortOrder.ASC); + } } - return foundSequenceNumber; - } - public static boolean isSequenceNumber(Cell kv) { - return CellUtil.matchingQualifier(kv, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES); - } - - public static PTableType getTableType(List tableMetaData, KeyValueBuilder builder, - ImmutableBytesWritable value) { - if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), - PhoenixDatabaseMetaData.TABLE_TYPE_BYTES, builder, value)) { - return PTableType.fromSerializedValue(value.get()[value.getOffset()]); + } + } + return result; + } + + public static void mutatePutValue(Put somePut, byte[] family, byte[] qualifier, byte[] newValue) { + NavigableMap> familyCellMap = somePut.getFamilyCellMap(); + List cells = familyCellMap.get(family); + List newCells = Lists.newArrayList(); + if (cells != null) { + for (Cell cell : cells) { + if ( + Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), qualifier, 0, qualifier.length) == 0 + ) { + Cell replacementCell = new KeyValue(cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), cell.getTimestamp(), + KeyValue.Type.codeToType(cell.getType().getCode()), newValue, 0, newValue.length); + newCells.add(replacementCell); + } else { + newCells.add(cell); } - return null; + } + familyCellMap.put(family, newCells); } + } - public static boolean isNameSpaceMapped(List tableMetaData, KeyValueBuilder builder, - ImmutableBytesWritable value) { - if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), - PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES, builder, value)) { - return (boolean)PBoolean.INSTANCE.toObject(ByteUtil.copyKeyBytesIfNecessary(value)); + /** + * Iterates over the cells that are mutated by the put operation for the given column family and + * column qualifier and conditionally modifies those cells to add a tags list. We only add tags if + * the cell value does not match the passed valueArray. If we always want to add tags to these + * cells, we can pass in a null valueArray + * @param somePut Put operation + * @param family column family of the cells + * @param qualifier column qualifier of the cells + * @param cellBuilder ExtendedCellBuilder object + * @param valueArray byte array of values or null + * @param tagArray byte array of tags to add to the cells + */ + public static void conditionallyAddTagsToPutCells(Put somePut, byte[] family, byte[] qualifier, + ExtendedCellBuilder cellBuilder, byte[] valueArray, byte[] tagArray) { + NavigableMap> familyCellMap = somePut.getFamilyCellMap(); + List cells = familyCellMap.get(family); + List newCells = Lists.newArrayList(); + if (cells != null) { + for (Cell cell : cells) { + if ( + Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), qualifier, 0, qualifier.length) == 0 + && (valueArray == null || !CellUtil.matchingValue(cell, valueArray)) + ) { + ExtendedCell extendedCell = + cellBuilder.setRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) + .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + .setValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()) + .setTimestamp(cell.getTimestamp()).setType(cell.getType()) + .setTags(TagUtil.concatTags(tagArray, cell)).build(); + // Replace existing cell with a cell that has the custom tags list + newCells.add(extendedCell); + } else { + // Add cell as is + newCells.add(cell); } - return false; - } + } + familyCellMap.put(family, newCells); + } + } + + public static Put cloneDeleteToPutAndAddColumn(Delete delete, byte[] family, byte[] qualifier, + byte[] value) { + NavigableMap> familyCellMap = delete.getFamilyCellMap(); + List cells = familyCellMap.get(family); + Cell cell = Iterables.getFirst(cells, null); + if (cell == null) { + throw new RuntimeException( + "Empty cells for delete for family: " + Bytes.toStringBinary(family)); + } + byte[] rowArray = new byte[cell.getRowLength()]; + System.arraycopy(cell.getRowArray(), cell.getRowOffset(), rowArray, 0, cell.getRowLength()); + Put put = new Put(rowArray, delete.getTimestamp()); + put.addColumn(family, qualifier, delete.getTimestamp(), value); + return put; + } + + public static void getTenantIdAndFunctionName(List functionMetadata, + byte[][] rowKeyMetaData) { + Mutation m = getTableHeaderRow(functionMetadata); + getVarChars(m.getRow(), 2, rowKeyMetaData); + } - public static int getSaltBuckets(List tableMetaData, KeyValueBuilder builder, - ImmutableBytesWritable value) { - if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), - PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, builder, value)) { - return PInteger.INSTANCE.getCodec().decodeInt(value, SortOrder.getDefault()); + /** + * Only return the parent table name if it has the same tenant id and schema name as the current + * table (this is only used to lock the parent table of indexes) + */ + public static byte[] getParentTableName(List tableMetadata) { + if (tableMetadata.size() == 1) { + return null; + } + byte[][] rowKeyMetaData = new byte[3][]; + // get the tenantId, schema name and table name for the current table + getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); + byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + // get the tenantId, schema name and table name for the parent table + Mutation m = getParentTableHeaderRow(tableMetadata); + getVarChars(m.getRow(), 3, rowKeyMetaData); + if ( + Bytes.compareTo(tenantId, rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]) == 0 + && Bytes.compareTo(schemaName, rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]) + == 0 + && Bytes.compareTo(tableName, rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]) == 0 + ) { + return null; + } + return rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + } + + public static long getSequenceNumber(Mutation tableMutation) { + List kvs = + tableMutation.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES); + if (kvs != null) { + for (Cell kv : kvs) { // list is not ordered, so search. TODO: we could potentially assume the + // position + if (isSequenceNumber(kv)) { + return PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), + SortOrder.getDefault()); } - return 0; - } - - public static long getParentSequenceNumber(List tableMetaData) { - return getSequenceNumber(getParentTableHeaderRow(tableMetaData)); - } - - public static Mutation getTableHeaderRow(List tableMetaData) { - return tableMetaData.get(0); - } + } + } + throw new IllegalStateException(); + } + + public static long getSequenceNumber(List tableMetaData) { + return getSequenceNumber(getPutOnlyTableHeaderRow(tableMetaData)); + } + + public static boolean isSequenceNumber(Mutation m) { + boolean foundSequenceNumber = false; + for (Cell kv : m.getFamilyCellMap().get(TABLE_FAMILY_BYTES)) { + if (isSequenceNumber(kv)) { + foundSequenceNumber = true; + break; + } + } + return foundSequenceNumber; + } + + public static boolean isSequenceNumber(Cell kv) { + return CellUtil.matchingQualifier(kv, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES); + } + + public static PTableType getTableType(List tableMetaData, KeyValueBuilder builder, + ImmutableBytesWritable value) { + if ( + getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), + PhoenixDatabaseMetaData.TABLE_TYPE_BYTES, builder, value) + ) { + return PTableType.fromSerializedValue(value.get()[value.getOffset()]); + } + return null; + } + + public static boolean isNameSpaceMapped(List tableMetaData, KeyValueBuilder builder, + ImmutableBytesWritable value) { + if ( + getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), + PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES, builder, value) + ) { + return (boolean) PBoolean.INSTANCE.toObject(ByteUtil.copyKeyBytesIfNecessary(value)); + } + return false; + } + + public static int getSaltBuckets(List tableMetaData, KeyValueBuilder builder, + ImmutableBytesWritable value) { + if ( + getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), + PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, builder, value) + ) { + return PInteger.INSTANCE.getCodec().decodeInt(value, SortOrder.getDefault()); + } + return 0; + } + + public static long getParentSequenceNumber(List tableMetaData) { + return getSequenceNumber(getParentTableHeaderRow(tableMetaData)); + } + + public static Mutation getTableHeaderRow(List tableMetaData) { + return tableMetaData.get(0); + } /** * Get the mutation who's qualifier matches the passed key @@ -509,699 +547,768 @@ public static Mutation getTableHeaderRow(List tableMetaData) { * when dealing with a regular {@link KeyValue} vs. a custom KeyValue as the latter may not * support things like {@link KeyValue#getBuffer()} * @param headerRow mutation to check - * @param key to check - * @param builder that created the {@link KeyValue KeyValues} in the {@link Mutation} - * @param ptr to point to the KeyValue's value if found + * @param key to check + * @param builder that created the {@link KeyValue KeyValues} in the {@link Mutation} + * @param ptr to point to the KeyValue's value if found * @return true if the KeyValue was found and false otherwise */ - public static boolean getMutationValue(Mutation headerRow, byte[] key, - KeyValueBuilder builder, ImmutableBytesWritable ptr) { - List kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES); - if (kvs != null) { - for (Cell cell : kvs) { - KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(cell); - if (builder.compareQualifier(kv, key, 0, key.length) ==0) { - builder.getValueAsPtr(kv, ptr); - return true; - } - } - } - return false; - } - - public static KeyValue getMutationValue(Mutation headerRow, byte[] key, - KeyValueBuilder builder) { - List kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES); - if (kvs != null) { - for (Cell cell : kvs) { - KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell); - if (builder.compareQualifier(kv, key, 0, key.length) ==0) { - return kv; - } - } - } - return null; - } - - public static boolean setMutationValue(Mutation headerRow, byte[] key, - KeyValueBuilder builder, KeyValue keyValue) { - List kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES); - if (kvs != null) { - for (Cell cell : kvs) { - KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell); - if (builder.compareQualifier(kv, key, 0, key.length) ==0) { - KeyValueBuilder.addQuietly(headerRow, keyValue); - return true; - } - } - } - return false; - } - - public static List getTableCellsFromMutations(List tableMetaData) { - List tableCells = Lists.newArrayList(); - byte[] tableKey = tableMetaData.get(0).getRow(); - for (int k = 0; k < tableMetaData.size(); k++) { - Mutation m = tableMetaData.get(k); - if (Bytes.equals(m.getRow(), tableKey)) { - tableCells.addAll(getCellList(m)); - } - } - return tableCells; - } - - public static List> getColumnAndLinkCellsFromMutations(List tableMetaData) { - //skip the first mutation because it's the table header row with table-specific information - //all the rest of the mutations are either from linking rows or column definition rows - List> allColumnsCellList = Lists.newArrayList(); - byte[] tableKey = tableMetaData.get(0).getRow(); - for (int k = 1; k < tableMetaData.size(); k++) { - Mutation m = tableMetaData.get(k); - //filter out mutations for the table header row and TABLE_SEQ_NUM and parent table - //rows such as a view's column qualifier count - if (!Bytes.equals(m.getRow(), tableKey) && !(!isLinkType(m) && isSequenceNumber(m) - && !isParentTableColumnQualifierCounter(m, tableKey))) { - List listToAdd = getCellList(m); - if (listToAdd != null && listToAdd.size() > 0) { - allColumnsCellList.add(listToAdd); - } - } - } - return allColumnsCellList; - } - - private static List getCellList(Mutation m) { - List cellList = Lists.newArrayList(); - for (Cell c : m.getFamilyCellMap().get(TABLE_FAMILY_BYTES)) { - //Mutations will mark NULL columns as deletes, whereas when we read - //from HBase we just won't get Cells for those columns. To use Mutation cells - //with code expecting Cells read from HBase results, we have to purge those - //Delete mutations - if (c != null && !CellUtil.isDelete(c)) { - cellList.add(c); - } - } - return cellList; - } - - /** - * Returns the first Put element in tableMetaData. There could be leading Delete elements before the - * table header row - */ - public static Put getPutOnlyTableHeaderRow(List tableMetaData) { - for (Mutation m : tableMetaData) { - if (m instanceof Put) { return (Put) m; } - } - throw new IllegalStateException("No table header row found in table metadata"); - } - - public static Put getPutOnlyAutoPartitionColumn(PTable parentTable, List tableMetaData) { - int autoPartitionPutIndex = parentTable.isMultiTenant() ? 2: 1; - int i=0; - for (Mutation m : tableMetaData) { - if (m instanceof Put && i++==autoPartitionPutIndex) { return (Put) m; } - } - throw new IllegalStateException("No auto partition column row found in table metadata"); - } - - public static Mutation getParentTableHeaderRow(List tableMetaData) { - return tableMetaData.get(tableMetaData.size()-1); - } - - public static long getClientTimeStamp(List tableMetadata) { - Mutation m = tableMetadata.get(0); - return getClientTimeStamp(m); - } - - public static long getClientTimeStamp(Mutation m) { - Collection> kvs = m.getFamilyCellMap().values(); - // Empty if Mutation is a Delete - // TODO: confirm that Delete timestamp is reset like Put - return kvs.isEmpty() ? m.getTimestamp() : kvs.iterator().next().get(0).getTimestamp(); - } - - public static byte[] getParentLinkKey(String tenantId, String schemaName, String tableName, String indexName) { - return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), QueryConstants.SEPARATOR_BYTE_ARRAY, schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(tableName), QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(indexName)); - } - - public static byte[] getParentLinkKey(byte[] tenantId, byte[] schemaName, byte[] tableName, byte[] indexName) { - return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY, schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName, QueryConstants.SEPARATOR_BYTE_ARRAY, tableName, QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, indexName); - } - - public static byte[] getChildLinkKey(PName parentTenantId, PName parentSchemaName, PName parentTableName, PName viewTenantId, PName viewName) { - return ByteUtil.concat(parentTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentTenantId.getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, - parentSchemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentSchemaName.getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, - parentTableName.getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, - viewTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : viewTenantId.getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, - viewName.getBytes()); - } - - public static Cell getCell(List cells, byte[] cq) { - for (Cell cell : cells) { - if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cq, 0, cq.length) == 0) { - return cell; - } - } - return null; - } - - public static boolean isMultiTenant(Mutation m, KeyValueBuilder builder, ImmutableBytesWritable ptr) { - if (getMutationValue(m, PhoenixDatabaseMetaData.MULTI_TENANT_BYTES, builder, ptr)) { - return Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)); - } - return false; - } - - public static boolean isTransactional(Mutation m, KeyValueBuilder builder, ImmutableBytesWritable ptr) { - if (getMutationValue(m, PhoenixDatabaseMetaData.TRANSACTIONAL_BYTES, builder, ptr)) { - return Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)); - } - return false; - } - - public static boolean isSalted(Mutation m, KeyValueBuilder builder, ImmutableBytesWritable ptr) { - return MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, builder, ptr); - } - - public static byte[] getViewIndexPhysicalName(byte[] physicalTableName) { - return getIndexPhysicalName(physicalTableName, VIEW_INDEX_TABLE_PREFIX); - } - - public static String getViewIndexPhysicalName(String physicalTableName) { - return getIndexPhysicalName(physicalTableName, VIEW_INDEX_TABLE_PREFIX); - } - - public static String getNamespaceMappedName(PName tableName, boolean isNamespaceMapped) { - String logicalName = tableName.getString(); - if (isNamespaceMapped) { - logicalName = logicalName.replace(QueryConstants.NAME_SEPARATOR, QueryConstants.NAMESPACE_SEPARATOR); - } - return logicalName; - } - - public static String getViewIndexPhysicalName(PName logicalTableName, boolean isNamespaceMapped) { - String logicalName = getNamespaceMappedName(logicalTableName, isNamespaceMapped); - return getIndexPhysicalName(logicalName, VIEW_INDEX_TABLE_PREFIX); - } - - private static byte[] getIndexPhysicalName(byte[] physicalTableName, String indexPrefix) { - return Bytes.toBytes(getIndexPhysicalName(Bytes.toString(physicalTableName), indexPrefix)); - } - - private static String getIndexPhysicalName(String physicalTableName, String indexPrefix) { - if (physicalTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - String schemaName = SchemaUtil.getSchemaNameFromFullName(physicalTableName, - QueryConstants.NAMESPACE_SEPARATOR); - String tableName = SchemaUtil.getTableNameFromFullName(physicalTableName, - QueryConstants.NAMESPACE_SEPARATOR); - return (schemaName + QueryConstants.NAMESPACE_SEPARATOR + indexPrefix + tableName); + public static boolean getMutationValue(Mutation headerRow, byte[] key, KeyValueBuilder builder, + ImmutableBytesWritable ptr) { + List kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES); + if (kvs != null) { + for (Cell cell : kvs) { + KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(cell); + if (builder.compareQualifier(kv, key, 0, key.length) == 0) { + builder.getValueAsPtr(kv, ptr); + return true; } - return indexPrefix + physicalTableName; - } - - public static byte[] getLocalIndexPhysicalName(byte[] physicalTableName) { - return getIndexPhysicalName(physicalTableName, LOCAL_INDEX_TABLE_PREFIX); - } - - public static String getLocalIndexTableName(String tableName) { - return LOCAL_INDEX_TABLE_PREFIX + tableName; - } - - public static String getLocalIndexSchemaName(String schemaName) { - return schemaName; - } - - public static String getLocalIndexUserTableName(String localIndexTableName) { - if (localIndexTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - String schemaName = SchemaUtil.getSchemaNameFromFullName(localIndexTableName, - QueryConstants.NAMESPACE_SEPARATOR); - String tableName = SchemaUtil.getTableNameFromFullName(localIndexTableName, - QueryConstants.NAMESPACE_SEPARATOR); - String userTableName = tableName.substring(LOCAL_INDEX_TABLE_PREFIX.length()); - return (schemaName + QueryConstants.NAMESPACE_SEPARATOR + userTableName); - } else { - String schemaName = SchemaUtil.getSchemaNameFromFullName(localIndexTableName); - if (!schemaName.isEmpty()) schemaName = schemaName.substring(LOCAL_INDEX_TABLE_PREFIX.length()); - String tableName = localIndexTableName.substring( - (schemaName.isEmpty() ? 0 : (schemaName.length() + QueryConstants.NAME_SEPARATOR.length())) - + LOCAL_INDEX_TABLE_PREFIX.length()); - return SchemaUtil.getTableName(schemaName, tableName); - } - } - - public static String getViewIndexUserTableName(String viewIndexTableName) { - if (viewIndexTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - String schemaName = SchemaUtil.getSchemaNameFromFullName(viewIndexTableName, - QueryConstants.NAMESPACE_SEPARATOR); - String tableName = SchemaUtil.getTableNameFromFullName(viewIndexTableName, - QueryConstants.NAMESPACE_SEPARATOR); - String userTableName = tableName.substring(VIEW_INDEX_TABLE_PREFIX.length()); - return (schemaName + QueryConstants.NAMESPACE_SEPARATOR + userTableName); - } else { - String schemaName = SchemaUtil.getSchemaNameFromFullName(viewIndexTableName); - if (!schemaName.isEmpty()) schemaName = schemaName.substring(VIEW_INDEX_TABLE_PREFIX.length()); - String tableName = viewIndexTableName.substring( - (schemaName.isEmpty() ? 0 : (schemaName.length() + QueryConstants.NAME_SEPARATOR.length())) - + VIEW_INDEX_TABLE_PREFIX.length()); - return SchemaUtil.getTableName(schemaName, tableName); + } + } + return false; + } + + public static KeyValue getMutationValue(Mutation headerRow, byte[] key, KeyValueBuilder builder) { + List kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES); + if (kvs != null) { + for (Cell cell : kvs) { + KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell); + if (builder.compareQualifier(kv, key, 0, key.length) == 0) { + return kv; } - } - - public static String getOldViewIndexSequenceSchemaName(PName physicalName, boolean isNamespaceMapped) { - if (!isNamespaceMapped) { return VIEW_INDEX_SEQUENCE_PREFIX + physicalName.getString(); } - return SchemaUtil.getSchemaNameFromFullName(physicalName.toString()); - } - - public static String getOldViewIndexSequenceName(PName physicalName, PName tenantId, boolean isNamespaceMapped) { - if (!isNamespaceMapped) { return VIEW_INDEX_SEQUENCE_NAME_PREFIX + (tenantId == null ? "" : tenantId); } - return SchemaUtil.getTableNameFromFullName(physicalName.toString()) + VIEW_INDEX_SEQUENCE_NAME_PREFIX; - } - - public static SequenceKey getOldViewIndexSequenceKey(String tenantId, PName physicalName, int nSaltBuckets, - boolean isNamespaceMapped) { - // Create global sequence of the form: - // rather than tenant-specific sequence, as it makes it much easier - // to cleanup when the physical table is dropped, as we can delete - // all global sequences leading with + physical name. - String schemaName = getOldViewIndexSequenceSchemaName(physicalName, isNamespaceMapped); - String tableName = getOldViewIndexSequenceName(physicalName, PNameFactory.newName(tenantId), isNamespaceMapped); - return new SequenceKey(isNamespaceMapped ? tenantId : null, schemaName, tableName, nSaltBuckets); - } - - public static String getViewIndexSequenceSchemaName(PName logicalBaseTableName, boolean isNamespaceMapped) { - if (!isNamespaceMapped) { - String baseTableName = SchemaUtil.getParentTableNameFromIndexTable(logicalBaseTableName.getString(), - MetaDataUtil.VIEW_INDEX_TABLE_PREFIX); - return SchemaUtil.getSchemaNameFromFullName(baseTableName); - } else { - return SchemaUtil.getSchemaNameFromFullName(logicalBaseTableName.toString()); + } + } + return null; + } + + public static boolean setMutationValue(Mutation headerRow, byte[] key, KeyValueBuilder builder, + KeyValue keyValue) { + List kvs = headerRow.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES); + if (kvs != null) { + for (Cell cell : kvs) { + KeyValue kv = org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell); + if (builder.compareQualifier(kv, key, 0, key.length) == 0) { + KeyValueBuilder.addQuietly(headerRow, keyValue); + return true; } - - } - - public static String getViewIndexSequenceName(PName physicalName, PName tenantId, boolean isNamespaceMapped) { - return SchemaUtil.getTableNameFromFullName(physicalName.toString()) + VIEW_INDEX_SEQUENCE_NAME_PREFIX; - } - - /** - * - * @param tenantId No longer used, but kept in signature for backwards compatibility - * @param physicalName Name of physical view index table - * @param nSaltBuckets Number of salt buckets - * @param isNamespaceMapped Is namespace mapping enabled - * @return SequenceKey for the ViewIndexId - */ - public static SequenceKey getViewIndexSequenceKey(String tenantId, PName physicalName, int nSaltBuckets, - boolean isNamespaceMapped) { - // Create global sequence of the form: . - // We can't use a tenant-owned or escaped sequence because of collisions, - // with other view indexes that may be global or owned by other tenants that - // also use this same physical view index table. It's also much easier - // to cleanup when the physical table is dropped, as we can delete - // all global sequences leading with + physical name. - String schemaName = getViewIndexSequenceSchemaName(physicalName, isNamespaceMapped); - String tableName = getViewIndexSequenceName(physicalName, null, isNamespaceMapped); - return new SequenceKey(null, schemaName, tableName, nSaltBuckets); - } - - public static PDataType getViewIndexIdDataType() { - return PLong.INSTANCE; - } - - public static PDataType getLegacyViewIndexIdDataType() { - return PSmallint.INSTANCE; - } - - public static String getViewIndexIdColumnName() { - return VIEW_INDEX_ID_COLUMN_NAME; - } - - public static boolean hasViewIndexTable(PhoenixConnection connection, PName physicalName) throws SQLException { - return hasViewIndexTable(connection, physicalName.getBytes()); - } - - public static boolean hasViewIndexTable(PhoenixConnection connection, byte[] physicalTableName) - throws SQLException { - byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName); - try { - TableDescriptor desc = connection.getQueryServices().getTableDescriptor(physicalIndexName); - return desc != null && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(desc.getValue(IS_VIEW_INDEX_TABLE_PROP_BYTES))); - } catch (TableNotFoundException e) { - return false; + } + } + return false; + } + + public static List getTableCellsFromMutations(List tableMetaData) { + List tableCells = Lists.newArrayList(); + byte[] tableKey = tableMetaData.get(0).getRow(); + for (int k = 0; k < tableMetaData.size(); k++) { + Mutation m = tableMetaData.get(k); + if (Bytes.equals(m.getRow(), tableKey)) { + tableCells.addAll(getCellList(m)); + } + } + return tableCells; + } + + public static List> getColumnAndLinkCellsFromMutations(List tableMetaData) { + // skip the first mutation because it's the table header row with table-specific information + // all the rest of the mutations are either from linking rows or column definition rows + List> allColumnsCellList = Lists.newArrayList(); + byte[] tableKey = tableMetaData.get(0).getRow(); + for (int k = 1; k < tableMetaData.size(); k++) { + Mutation m = tableMetaData.get(k); + // filter out mutations for the table header row and TABLE_SEQ_NUM and parent table + // rows such as a view's column qualifier count + if ( + !Bytes.equals(m.getRow(), tableKey) && !(!isLinkType(m) && isSequenceNumber(m) + && !isParentTableColumnQualifierCounter(m, tableKey)) + ) { + List listToAdd = getCellList(m); + if (listToAdd != null && listToAdd.size() > 0) { + allColumnsCellList.add(listToAdd); } + } } + return allColumnsCellList; + } - public static boolean hasLocalIndexTable(PhoenixConnection connection, PName physicalName) throws SQLException { - return hasLocalIndexTable(connection, physicalName.getBytes()); + private static List getCellList(Mutation m) { + List cellList = Lists.newArrayList(); + for (Cell c : m.getFamilyCellMap().get(TABLE_FAMILY_BYTES)) { + // Mutations will mark NULL columns as deletes, whereas when we read + // from HBase we just won't get Cells for those columns. To use Mutation cells + // with code expecting Cells read from HBase results, we have to purge those + // Delete mutations + if (c != null && !CellUtil.isDelete(c)) { + cellList.add(c); + } } + return cellList; + } - public static boolean hasLocalIndexTable(PhoenixConnection connection, byte[] physicalTableName) throws SQLException { - try { - TableDescriptor desc = connection.getQueryServices().getTableDescriptor(physicalTableName); - if (desc == null ) { - return false; - } - return hasLocalIndexColumnFamily(desc); - } catch (TableNotFoundException e) { - return false; - } - } - - public static boolean hasLocalIndexColumnFamily(TableDescriptor desc) { - for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) { - if (cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - return true; - } - } - return false; - } - - public static List getNonLocalIndexColumnFamilies(TableDescriptor desc) { - List families = new ArrayList(desc.getColumnFamilies().length); - for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) { - if (!cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - families.add(cf.getName()); - } - } - return families; - } - - public static List getLocalIndexColumnFamilies(PhoenixConnection conn, byte[] physicalTableName) throws SQLException { - TableDescriptor desc = conn.getQueryServices().getTableDescriptor(physicalTableName); - if (desc == null ) { - return Collections.emptyList(); - } - List families = new ArrayList(desc.getColumnFamilies().length / 2); - for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) { - if (cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - families.add(cf.getName()); - } - } - return families; - } - - public static void deleteViewIndexSequences(PhoenixConnection connection, PName name, boolean isNamespaceMapped) - throws SQLException { - String schemaName = getViewIndexSequenceSchemaName(name, isNamespaceMapped); - String sequenceName = getViewIndexSequenceName(name, null, isNamespaceMapped); - String delQuery = String.format(" DELETE FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE - + " WHERE " + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + " %s AND " - + PhoenixDatabaseMetaData.SEQUENCE_NAME + " = ? ", - schemaName.length() > 0 ? "= ? " : " IS NULL"); - try (PreparedStatement delSeqStmt = connection.prepareStatement(delQuery)) { - if (schemaName.length() > 0) { - delSeqStmt.setString(1, schemaName); - delSeqStmt.setString(2, sequenceName); - } else { - delSeqStmt.setString(1, sequenceName); - } - delSeqStmt.executeUpdate(); - } - } - - public static boolean propertyNotAllowedToBeOutOfSync(String colFamProp) { - return SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES.contains(colFamProp); - } - - public static Map getSyncedProps(ColumnFamilyDescriptor defaultCFDesc) { - Map syncedProps = new HashMap<>(); - if (defaultCFDesc != null) { - for (String propToKeepInSync: SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES) { - syncedProps.put(propToKeepInSync, Bytes.toString( - defaultCFDesc.getValue(Bytes.toBytes(propToKeepInSync)))); - } - } - return syncedProps; - } - - public static Scan newTableRowsScan(byte[] key, long startTimeStamp, long stopTimeStamp){ - return newTableRowsScan(key, null, startTimeStamp, stopTimeStamp); - } - - public static Scan newTableRowsScan(byte[] startKey, byte[] stopKey, long startTimeStamp, long stopTimeStamp) { - Scan scan = new Scan(); - ScanUtil.setTimeRange(scan, startTimeStamp, stopTimeStamp); - scan.withStartRow(startKey); - if (stopKey == null) { - stopKey = ByteUtil.concat(startKey, QueryConstants.SEPARATOR_BYTE_ARRAY); - ByteUtil.nextKey(stopKey, stopKey.length); - } - scan.withStopRow(stopKey); - return scan; - } - - public static LinkType getLinkType(Mutation tableMutation) { - return getLinkType(tableMutation.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES)); - } - - public static LinkType getLinkType(Collection kvs) { - if (kvs != null) { - for (Cell kv : kvs) { - if (isLinkType(kv)) { - return LinkType.fromSerializedValue(PUnsignedTinyint.INSTANCE.getCodec(). - decodeByte(kv.getValueArray(), kv.getValueOffset(), - SortOrder.getDefault())); } - } - } - return null; - } + /** + * Returns the first Put element in tableMetaData. There could be leading Delete + * elements before the table header row + */ + public static Put getPutOnlyTableHeaderRow(List tableMetaData) { + for (Mutation m : tableMetaData) { + if (m instanceof Put) { + return (Put) m; + } + } + throw new IllegalStateException("No table header row found in table metadata"); + } + + public static Put getPutOnlyAutoPartitionColumn(PTable parentTable, + List tableMetaData) { + int autoPartitionPutIndex = parentTable.isMultiTenant() ? 2 : 1; + int i = 0; + for (Mutation m : tableMetaData) { + if (m instanceof Put && i++ == autoPartitionPutIndex) { + return (Put) m; + } + } + throw new IllegalStateException("No auto partition column row found in table metadata"); + } + + public static Mutation getParentTableHeaderRow(List tableMetaData) { + return tableMetaData.get(tableMetaData.size() - 1); + } + + public static long getClientTimeStamp(List tableMetadata) { + Mutation m = tableMetadata.get(0); + return getClientTimeStamp(m); + } + + public static long getClientTimeStamp(Mutation m) { + Collection> kvs = m.getFamilyCellMap().values(); + // Empty if Mutation is a Delete + // TODO: confirm that Delete timestamp is reset like Put + return kvs.isEmpty() ? m.getTimestamp() : kvs.iterator().next().get(0).getTimestamp(); + } + + public static byte[] getParentLinkKey(String tenantId, String schemaName, String tableName, + String indexName) { + return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), + QueryConstants.SEPARATOR_BYTE_ARRAY, + schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), + QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(tableName), + QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(indexName)); + } + + public static byte[] getParentLinkKey(byte[] tenantId, byte[] schemaName, byte[] tableName, + byte[] indexName) { + return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId, + QueryConstants.SEPARATOR_BYTE_ARRAY, + schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName, + QueryConstants.SEPARATOR_BYTE_ARRAY, tableName, QueryConstants.SEPARATOR_BYTE_ARRAY, + QueryConstants.SEPARATOR_BYTE_ARRAY, indexName); + } + + public static byte[] getChildLinkKey(PName parentTenantId, PName parentSchemaName, + PName parentTableName, PName viewTenantId, PName viewName) { + return ByteUtil.concat( + parentTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentTenantId.getBytes(), + QueryConstants.SEPARATOR_BYTE_ARRAY, + parentSchemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentSchemaName.getBytes(), + QueryConstants.SEPARATOR_BYTE_ARRAY, parentTableName.getBytes(), + QueryConstants.SEPARATOR_BYTE_ARRAY, + viewTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : viewTenantId.getBytes(), + QueryConstants.SEPARATOR_BYTE_ARRAY, viewName.getBytes()); + } + + public static Cell getCell(List cells, byte[] cq) { + for (Cell cell : cells) { + if ( + Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), cq, 0, cq.length) == 0 + ) { + return cell; + } + } + return null; + } + + public static boolean isMultiTenant(Mutation m, KeyValueBuilder builder, + ImmutableBytesWritable ptr) { + if (getMutationValue(m, PhoenixDatabaseMetaData.MULTI_TENANT_BYTES, builder, ptr)) { + return Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)); + } + return false; + } + + public static boolean isTransactional(Mutation m, KeyValueBuilder builder, + ImmutableBytesWritable ptr) { + if (getMutationValue(m, PhoenixDatabaseMetaData.TRANSACTIONAL_BYTES, builder, ptr)) { + return Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr)); + } + return false; + } + + public static boolean isSalted(Mutation m, KeyValueBuilder builder, ImmutableBytesWritable ptr) { + return MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, builder, + ptr); + } + + public static byte[] getViewIndexPhysicalName(byte[] physicalTableName) { + return getIndexPhysicalName(physicalTableName, VIEW_INDEX_TABLE_PREFIX); + } + + public static String getViewIndexPhysicalName(String physicalTableName) { + return getIndexPhysicalName(physicalTableName, VIEW_INDEX_TABLE_PREFIX); + } + + public static String getNamespaceMappedName(PName tableName, boolean isNamespaceMapped) { + String logicalName = tableName.getString(); + if (isNamespaceMapped) { + logicalName = + logicalName.replace(QueryConstants.NAME_SEPARATOR, QueryConstants.NAMESPACE_SEPARATOR); + } + return logicalName; + } + + public static String getViewIndexPhysicalName(PName logicalTableName, boolean isNamespaceMapped) { + String logicalName = getNamespaceMappedName(logicalTableName, isNamespaceMapped); + return getIndexPhysicalName(logicalName, VIEW_INDEX_TABLE_PREFIX); + } + + private static byte[] getIndexPhysicalName(byte[] physicalTableName, String indexPrefix) { + return Bytes.toBytes(getIndexPhysicalName(Bytes.toString(physicalTableName), indexPrefix)); + } + + private static String getIndexPhysicalName(String physicalTableName, String indexPrefix) { + if (physicalTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + String schemaName = + SchemaUtil.getSchemaNameFromFullName(physicalTableName, QueryConstants.NAMESPACE_SEPARATOR); + String tableName = + SchemaUtil.getTableNameFromFullName(physicalTableName, QueryConstants.NAMESPACE_SEPARATOR); + return (schemaName + QueryConstants.NAMESPACE_SEPARATOR + indexPrefix + tableName); + } + return indexPrefix + physicalTableName; + } + + public static byte[] getLocalIndexPhysicalName(byte[] physicalTableName) { + return getIndexPhysicalName(physicalTableName, LOCAL_INDEX_TABLE_PREFIX); + } + + public static String getLocalIndexTableName(String tableName) { + return LOCAL_INDEX_TABLE_PREFIX + tableName; + } + + public static String getLocalIndexSchemaName(String schemaName) { + return schemaName; + } + + public static String getLocalIndexUserTableName(String localIndexTableName) { + if (localIndexTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + String schemaName = SchemaUtil.getSchemaNameFromFullName(localIndexTableName, + QueryConstants.NAMESPACE_SEPARATOR); + String tableName = SchemaUtil.getTableNameFromFullName(localIndexTableName, + QueryConstants.NAMESPACE_SEPARATOR); + String userTableName = tableName.substring(LOCAL_INDEX_TABLE_PREFIX.length()); + return (schemaName + QueryConstants.NAMESPACE_SEPARATOR + userTableName); + } else { + String schemaName = SchemaUtil.getSchemaNameFromFullName(localIndexTableName); + if (!schemaName.isEmpty()) + schemaName = schemaName.substring(LOCAL_INDEX_TABLE_PREFIX.length()); + String tableName = localIndexTableName.substring( + (schemaName.isEmpty() ? 0 : (schemaName.length() + QueryConstants.NAME_SEPARATOR.length())) + + LOCAL_INDEX_TABLE_PREFIX.length()); + return SchemaUtil.getTableName(schemaName, tableName); + } + } + + public static String getViewIndexUserTableName(String viewIndexTableName) { + if (viewIndexTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + String schemaName = SchemaUtil.getSchemaNameFromFullName(viewIndexTableName, + QueryConstants.NAMESPACE_SEPARATOR); + String tableName = + SchemaUtil.getTableNameFromFullName(viewIndexTableName, QueryConstants.NAMESPACE_SEPARATOR); + String userTableName = tableName.substring(VIEW_INDEX_TABLE_PREFIX.length()); + return (schemaName + QueryConstants.NAMESPACE_SEPARATOR + userTableName); + } else { + String schemaName = SchemaUtil.getSchemaNameFromFullName(viewIndexTableName); + if (!schemaName.isEmpty()) + schemaName = schemaName.substring(VIEW_INDEX_TABLE_PREFIX.length()); + String tableName = viewIndexTableName.substring( + (schemaName.isEmpty() ? 0 : (schemaName.length() + QueryConstants.NAME_SEPARATOR.length())) + + VIEW_INDEX_TABLE_PREFIX.length()); + return SchemaUtil.getTableName(schemaName, tableName); + } + } + + public static String getOldViewIndexSequenceSchemaName(PName physicalName, + boolean isNamespaceMapped) { + if (!isNamespaceMapped) { + return VIEW_INDEX_SEQUENCE_PREFIX + physicalName.getString(); + } + return SchemaUtil.getSchemaNameFromFullName(physicalName.toString()); + } + + public static String getOldViewIndexSequenceName(PName physicalName, PName tenantId, + boolean isNamespaceMapped) { + if (!isNamespaceMapped) { + return VIEW_INDEX_SEQUENCE_NAME_PREFIX + (tenantId == null ? "" : tenantId); + } + return SchemaUtil.getTableNameFromFullName(physicalName.toString()) + + VIEW_INDEX_SEQUENCE_NAME_PREFIX; + } + + public static SequenceKey getOldViewIndexSequenceKey(String tenantId, PName physicalName, + int nSaltBuckets, boolean isNamespaceMapped) { + // Create global sequence of the form: + // rather than tenant-specific sequence, as it makes it much easier + // to cleanup when the physical table is dropped, as we can delete + // all global sequences leading with + physical name. + String schemaName = getOldViewIndexSequenceSchemaName(physicalName, isNamespaceMapped); + String tableName = + getOldViewIndexSequenceName(physicalName, PNameFactory.newName(tenantId), isNamespaceMapped); + return new SequenceKey(isNamespaceMapped ? tenantId : null, schemaName, tableName, + nSaltBuckets); + } + + public static String getViewIndexSequenceSchemaName(PName logicalBaseTableName, + boolean isNamespaceMapped) { + if (!isNamespaceMapped) { + String baseTableName = SchemaUtil.getParentTableNameFromIndexTable( + logicalBaseTableName.getString(), MetaDataUtil.VIEW_INDEX_TABLE_PREFIX); + return SchemaUtil.getSchemaNameFromFullName(baseTableName); + } else { + return SchemaUtil.getSchemaNameFromFullName(logicalBaseTableName.toString()); + } + + } + + public static String getViewIndexSequenceName(PName physicalName, PName tenantId, + boolean isNamespaceMapped) { + return SchemaUtil.getTableNameFromFullName(physicalName.toString()) + + VIEW_INDEX_SEQUENCE_NAME_PREFIX; + } - public static boolean isLocalIndex(String physicalName) { - if (physicalName.contains(LOCAL_INDEX_TABLE_PREFIX)) { return true; } + /** + * @param tenantId No longer used, but kept in signature for backwards compatibility + * @param physicalName Name of physical view index table + * @param nSaltBuckets Number of salt buckets + * @param isNamespaceMapped Is namespace mapping enabled + * @return SequenceKey for the ViewIndexId + */ + public static SequenceKey getViewIndexSequenceKey(String tenantId, PName physicalName, + int nSaltBuckets, boolean isNamespaceMapped) { + // Create global sequence of the form: . + // We can't use a tenant-owned or escaped sequence because of collisions, + // with other view indexes that may be global or owned by other tenants that + // also use this same physical view index table. It's also much easier + // to cleanup when the physical table is dropped, as we can delete + // all global sequences leading with + physical name. + String schemaName = getViewIndexSequenceSchemaName(physicalName, isNamespaceMapped); + String tableName = getViewIndexSequenceName(physicalName, null, isNamespaceMapped); + return new SequenceKey(null, schemaName, tableName, nSaltBuckets); + } + + public static PDataType getViewIndexIdDataType() { + return PLong.INSTANCE; + } + + public static PDataType getLegacyViewIndexIdDataType() { + return PSmallint.INSTANCE; + } + + public static String getViewIndexIdColumnName() { + return VIEW_INDEX_ID_COLUMN_NAME; + } + + public static boolean hasViewIndexTable(PhoenixConnection connection, PName physicalName) + throws SQLException { + return hasViewIndexTable(connection, physicalName.getBytes()); + } + + public static boolean hasViewIndexTable(PhoenixConnection connection, byte[] physicalTableName) + throws SQLException { + byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName); + try { + TableDescriptor desc = connection.getQueryServices().getTableDescriptor(physicalIndexName); + return desc != null && Boolean.TRUE + .equals(PBoolean.INSTANCE.toObject(desc.getValue(IS_VIEW_INDEX_TABLE_PROP_BYTES))); + } catch (TableNotFoundException e) { + return false; + } + } + + public static boolean hasLocalIndexTable(PhoenixConnection connection, PName physicalName) + throws SQLException { + return hasLocalIndexTable(connection, physicalName.getBytes()); + } + + public static boolean hasLocalIndexTable(PhoenixConnection connection, byte[] physicalTableName) + throws SQLException { + try { + TableDescriptor desc = connection.getQueryServices().getTableDescriptor(physicalTableName); + if (desc == null) { return false; - } - - public static boolean isLinkType(Cell kv) { - return CellUtil.matchingQualifier(kv, PhoenixDatabaseMetaData.LINK_TYPE_BYTES); - } - - public static boolean isLinkType(Mutation m) { - boolean foundLinkType = false; - for (Cell kv : m.getFamilyCellMap().get(TABLE_FAMILY_BYTES)) { - if (isLinkType(kv)) { - foundLinkType = true; - break; - } + } + return hasLocalIndexColumnFamily(desc); + } catch (TableNotFoundException e) { + return false; + } + } + + public static boolean hasLocalIndexColumnFamily(TableDescriptor desc) { + for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) { + if (cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + return true; + } + } + return false; + } + + public static List getNonLocalIndexColumnFamilies(TableDescriptor desc) { + List families = new ArrayList(desc.getColumnFamilies().length); + for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) { + if (!cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + families.add(cf.getName()); + } + } + return families; + } + + public static List getLocalIndexColumnFamilies(PhoenixConnection conn, + byte[] physicalTableName) throws SQLException { + TableDescriptor desc = conn.getQueryServices().getTableDescriptor(physicalTableName); + if (desc == null) { + return Collections.emptyList(); + } + List families = new ArrayList(desc.getColumnFamilies().length / 2); + for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) { + if (cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + families.add(cf.getName()); + } + } + return families; + } + + public static void deleteViewIndexSequences(PhoenixConnection connection, PName name, + boolean isNamespaceMapped) throws SQLException { + String schemaName = getViewIndexSequenceSchemaName(name, isNamespaceMapped); + String sequenceName = getViewIndexSequenceName(name, null, isNamespaceMapped); + String delQuery = String.format( + " DELETE FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " WHERE " + + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + " %s AND " + + PhoenixDatabaseMetaData.SEQUENCE_NAME + " = ? ", + schemaName.length() > 0 ? "= ? " : " IS NULL"); + try (PreparedStatement delSeqStmt = connection.prepareStatement(delQuery)) { + if (schemaName.length() > 0) { + delSeqStmt.setString(1, schemaName); + delSeqStmt.setString(2, sequenceName); + } else { + delSeqStmt.setString(1, sequenceName); + } + delSeqStmt.executeUpdate(); + } + } + + public static boolean propertyNotAllowedToBeOutOfSync(String colFamProp) { + return SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES.contains(colFamProp); + } + + public static Map getSyncedProps(ColumnFamilyDescriptor defaultCFDesc) { + Map syncedProps = new HashMap<>(); + if (defaultCFDesc != null) { + for (String propToKeepInSync : SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES) { + syncedProps.put(propToKeepInSync, + Bytes.toString(defaultCFDesc.getValue(Bytes.toBytes(propToKeepInSync)))); + } + } + return syncedProps; + } + + public static Scan newTableRowsScan(byte[] key, long startTimeStamp, long stopTimeStamp) { + return newTableRowsScan(key, null, startTimeStamp, stopTimeStamp); + } + + public static Scan newTableRowsScan(byte[] startKey, byte[] stopKey, long startTimeStamp, + long stopTimeStamp) { + Scan scan = new Scan(); + ScanUtil.setTimeRange(scan, startTimeStamp, stopTimeStamp); + scan.withStartRow(startKey); + if (stopKey == null) { + stopKey = ByteUtil.concat(startKey, QueryConstants.SEPARATOR_BYTE_ARRAY); + ByteUtil.nextKey(stopKey, stopKey.length); + } + scan.withStopRow(stopKey); + return scan; + } + + public static LinkType getLinkType(Mutation tableMutation) { + return getLinkType( + tableMutation.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES)); + } + + public static LinkType getLinkType(Collection kvs) { + if (kvs != null) { + for (Cell kv : kvs) { + if (isLinkType(kv)) { + return LinkType.fromSerializedValue(PUnsignedTinyint.INSTANCE.getCodec() + .decodeByte(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault())); } - return foundLinkType; - } + } + } + return null; + } + + public static boolean isLocalIndex(String physicalName) { + if (physicalName.contains(LOCAL_INDEX_TABLE_PREFIX)) { + return true; + } + return false; + } + + public static boolean isLinkType(Cell kv) { + return CellUtil.matchingQualifier(kv, PhoenixDatabaseMetaData.LINK_TYPE_BYTES); + } + + public static boolean isLinkType(Mutation m) { + boolean foundLinkType = false; + for (Cell kv : m.getFamilyCellMap().get(TABLE_FAMILY_BYTES)) { + if (isLinkType(kv)) { + foundLinkType = true; + break; + } + } + return foundLinkType; + } + + public static boolean isParentTableColumnQualifierCounter(Mutation m, byte[] tableRow) { + boolean foundCQCounter = false; + for (Cell kv : m.getFamilyCellMap().get(TABLE_FAMILY_BYTES)) { + if (isParentTableColumnQualifierCounter(kv, tableRow)) { + foundCQCounter = true; + break; + } + } + return foundCQCounter; + } + + public static boolean isParentTableColumnQualifierCounter(Cell kv, byte[] tableRow) { + byte[][] tableRowKeyMetaData = new byte[5][]; + getVarChars(tableRow, tableRowKeyMetaData); + byte[] tableName = tableRowKeyMetaData[TABLE_NAME_INDEX]; + + byte[][] columnRowKeyMetaData = new byte[5][]; + int nColumns = + getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), 0, columnRowKeyMetaData); + if (nColumns == 5) { + byte[] columnTableName = columnRowKeyMetaData[TABLE_NAME_INDEX]; + if (!Bytes.equals(tableName, columnTableName)) { + return CellUtil.matchingQualifier(kv, COLUMN_QUALIFIER_BYTES); + } + } + return false; + } + + public static boolean isViewIndex(String physicalName) { + if (physicalName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + return SchemaUtil.getTableNameFromFullName(physicalName).startsWith(VIEW_INDEX_TABLE_PREFIX); + } else { + return physicalName.startsWith(VIEW_INDEX_TABLE_PREFIX); + } + } + + public static String getAutoPartitionColumnName(PTable parentTable) { + List parentTableColumns = parentTable.getPKColumns(); + PColumn column = parentTableColumns.get(getAutoPartitionColIndex(parentTable)); + return column.getName().getString(); + } + + // this method should only be called on the parent table (since it has the _SALT column) + public static int getAutoPartitionColIndex(PTable parentTable) { + boolean isMultiTenant = parentTable.isMultiTenant(); + boolean isSalted = parentTable.getBucketNum() != null; + return (isMultiTenant && isSalted) ? 2 : (isMultiTenant || isSalted) ? 1 : 0; + } + + public static boolean isHColumnProperty(String propName) { + return ColumnFamilyDescriptorBuilder.getDefaultValues().containsKey(propName); + } + + public static boolean isHTableProperty(String propName) { + return !isHColumnProperty(propName) && !TableProperty.isPhoenixTableProperty(propName); + } + + public static boolean isLocalIndexFamily(ImmutableBytesPtr cfPtr) { + return cfPtr.getLength() >= QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES.length + && Bytes.compareTo(cfPtr.get(), cfPtr.getOffset(), + QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES.length, + QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES, 0, + QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES.length) == 0; + } + + public static boolean isLocalIndexFamily(byte[] cf) { + return Bytes.startsWith(cf, QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES); + } + + public static final byte[] getPhysicalTableRowForView(PTable view) { + byte[] physicalTableSchemaName = + Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(view.getPhysicalName().getString())); + byte[] physicalTableName = + Bytes.toBytes(SchemaUtil.getTableNameFromFullName(view.getPhysicalName().getString())); + return SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, physicalTableSchemaName, + physicalTableName); + } - public static boolean isParentTableColumnQualifierCounter(Mutation m, byte[] tableRow) { - boolean foundCQCounter = false; - for (Cell kv : m.getFamilyCellMap().get(TABLE_FAMILY_BYTES)) { - if (isParentTableColumnQualifierCounter(kv, tableRow)) { - foundCQCounter = true; - break; - } - } - return foundCQCounter; - } - public static boolean isParentTableColumnQualifierCounter(Cell kv, byte[] tableRow) { - byte[][] tableRowKeyMetaData = new byte[5][]; - getVarChars(tableRow, tableRowKeyMetaData); - byte[] tableName = tableRowKeyMetaData[TABLE_NAME_INDEX]; - - byte[][] columnRowKeyMetaData = new byte[5][]; - int nColumns = getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - 0, columnRowKeyMetaData); - if (nColumns == 5) { - byte[] columnTableName = columnRowKeyMetaData[TABLE_NAME_INDEX]; - if (!Bytes.equals(tableName, columnTableName)) { - return CellUtil.matchingQualifier(kv, COLUMN_QUALIFIER_BYTES); - } - } - return false; - } - - public static boolean isViewIndex(String physicalName) { - if (physicalName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - return SchemaUtil.getTableNameFromFullName(physicalName).startsWith(VIEW_INDEX_TABLE_PREFIX); - } else { - return physicalName.startsWith(VIEW_INDEX_TABLE_PREFIX); + /** + * Extract mutations of link type {@link PTable.LinkType#CHILD_TABLE} from the list of mutations. + * The child link mutations will be sent to SYSTEM.CHILD_LINK and other mutations to + * SYSTEM.CATALOG + * @param metadataMutations total list of mutations + * @return list of mutations pertaining to parent-child links + */ + public static List removeChildLinkMutations(List metadataMutations) { + List childLinkMutations = Lists.newArrayList(); + Iterator iter = metadataMutations.iterator(); + while (iter.hasNext()) { + Mutation m = iter.next(); + for (Cell kv : m.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES)) { + // remove mutations of link type LinkType.CHILD_TABLE + if ( + (Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), + PhoenixDatabaseMetaData.LINK_TYPE_BYTES, 0, + PhoenixDatabaseMetaData.LINK_TYPE_BYTES.length) == 0) + && ((Bytes.compareTo(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength(), + LinkType.CHILD_TABLE.getSerializedValueAsByteArray(), 0, + LinkType.CHILD_TABLE.getSerializedValueAsByteArray().length) == 0)) + ) { + childLinkMutations.add(m); + iter.remove(); } + } } + return childLinkMutations; + } - public static String getAutoPartitionColumnName(PTable parentTable) { - List parentTableColumns = parentTable.getPKColumns(); - PColumn column = parentTableColumns.get(getAutoPartitionColIndex(parentTable)); - return column.getName().getString(); + public static IndexType getIndexType(List tableMetaData, KeyValueBuilder builder, + ImmutableBytesWritable value) { + if ( + getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), + PhoenixDatabaseMetaData.INDEX_TYPE_BYTES, builder, value) + ) { + return IndexType.fromSerializedValue(value.get()[value.getOffset()]); } + return null; + } - // this method should only be called on the parent table (since it has the _SALT column) - public static int getAutoPartitionColIndex(PTable parentTable) { - boolean isMultiTenant = parentTable.isMultiTenant(); - boolean isSalted = parentTable.getBucketNum()!=null; - return (isMultiTenant && isSalted) ? 2 : (isMultiTenant || isSalted) ? 1 : 0; - } - - public static boolean isHColumnProperty(String propName) { - return ColumnFamilyDescriptorBuilder.getDefaultValues().containsKey(propName); - } - - public static boolean isHTableProperty(String propName) { - return !isHColumnProperty(propName) && !TableProperty.isPhoenixTableProperty(propName); - } - - public static boolean isLocalIndexFamily(ImmutableBytesPtr cfPtr) { - return cfPtr.getLength() >= QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES.length && - Bytes.compareTo(cfPtr.get(), cfPtr.getOffset(), QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES.length, QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES, 0, QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES.length) == 0; - } - - public static boolean isLocalIndexFamily(byte[] cf) { - return Bytes.startsWith(cf, QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES); - } - - public static final byte[] getPhysicalTableRowForView(PTable view) { - byte[] physicalTableSchemaName = Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(view.getPhysicalName().getString())); - byte[] physicalTableName = Bytes.toBytes(SchemaUtil.getTableNameFromFullName(view.getPhysicalName().getString())); - return SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, physicalTableSchemaName, physicalTableName); - } - - /** - * Extract mutations of link type {@link PTable.LinkType#CHILD_TABLE} from the list of mutations. - * The child link mutations will be sent to SYSTEM.CHILD_LINK and other mutations to SYSTEM.CATALOG - * @param metadataMutations total list of mutations - * @return list of mutations pertaining to parent-child links - */ - public static List removeChildLinkMutations(List metadataMutations) { - List childLinkMutations = Lists.newArrayList(); - Iterator iter = metadataMutations.iterator(); - while (iter.hasNext()) { - Mutation m = iter.next(); - for (Cell kv : m.getFamilyCellMap().get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES)) { - // remove mutations of link type LinkType.CHILD_TABLE - if ((Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), - PhoenixDatabaseMetaData.LINK_TYPE_BYTES, 0, - PhoenixDatabaseMetaData.LINK_TYPE_BYTES.length) == 0) - && ((Bytes.compareTo(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength(), - LinkType.CHILD_TABLE.getSerializedValueAsByteArray(), 0, - LinkType.CHILD_TABLE.getSerializedValueAsByteArray().length) == 0))) { - childLinkMutations.add(m); - iter.remove(); - } - } - } - return childLinkMutations; - } - - public static IndexType getIndexType(List tableMetaData, KeyValueBuilder builder, - ImmutableBytesWritable value) { - if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), PhoenixDatabaseMetaData.INDEX_TYPE_BYTES, builder, - value)) { return IndexType.fromSerializedValue(value.get()[value.getOffset()]); } - return null; - } - - /** - * Retrieve the viewIndexId datatype from create request. - * - * @see MetaDataEndpointImpl#createTable(com.google.protobuf.RpcController, - * org.apache.phoenix.coprocessor.generated.MetaDataProtos.CreateTableRequest, - * com.google.protobuf.RpcCallback) - */ - public static PDataType getIndexDataType(List tableMetaData, - KeyValueBuilder builder, ImmutableBytesWritable value) { - if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), - PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE_BYTES, builder, value)) { - return PDataType.fromTypeId( - PInteger.INSTANCE.getCodec().decodeInt(value, SortOrder.getDefault())); - } - return getLegacyViewIndexIdDataType(); - } - - public static boolean getChangeDetectionEnabled(List tableMetaData) { - KeyValueBuilder builder = GenericKeyValueBuilder.INSTANCE; - ImmutableBytesWritable value = new ImmutableBytesWritable(); - if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), - PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED_BYTES, builder, value)) { - return Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(value.get(), - value.getOffset(), - value.getLength())); + /** + * Retrieve the viewIndexId datatype from create request. + * @see MetaDataEndpointImpl#createTable(com.google.protobuf.RpcController, + * org.apache.phoenix.coprocessor.generated.MetaDataProtos.CreateTableRequest, + * com.google.protobuf.RpcCallback) + */ + public static PDataType getIndexDataType(List tableMetaData, KeyValueBuilder builder, + ImmutableBytesWritable value) { + if ( + getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), + PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE_BYTES, builder, value) + ) { + return PDataType + .fromTypeId(PInteger.INSTANCE.getCodec().decodeInt(value, SortOrder.getDefault())); + } + return getLegacyViewIndexIdDataType(); + } + + public static boolean getChangeDetectionEnabled(List tableMetaData) { + KeyValueBuilder builder = GenericKeyValueBuilder.INSTANCE; + ImmutableBytesWritable value = new ImmutableBytesWritable(); + if ( + getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), + PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED_BYTES, builder, value) + ) { + return Boolean.TRUE + .equals(PBoolean.INSTANCE.toObject(value.get(), value.getOffset(), value.getLength())); + } else { + return false; + } + } + + public static PColumn getColumn(int pkCount, byte[][] rowKeyMetaData, PTable table) + throws ColumnFamilyNotFoundException, ColumnNotFoundException { + PColumn col = null; + if ( + pkCount > FAMILY_NAME_INDEX + && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0 + ) { + PColumnFamily family = + table.getColumnFamily(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]); + col = family + .getPColumnForColumnNameBytes(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]); + } else if ( + pkCount > COLUMN_NAME_INDEX + && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length > 0 + ) { + col = table.getPKColumn(new String(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX], + StandardCharsets.UTF_8)); + } + return col; + } + + public static void deleteFromStatsTable(PhoenixConnection connection, PTable table, + List physicalTableNames, List sharedTableStates) + throws SQLException { + boolean isAutoCommit = connection.getAutoCommit(); + try { + connection.setAutoCommit(true); + Set physicalTablesSet = new HashSet<>(); + physicalTablesSet.add(table.getPhysicalName().getString()); + for (byte[] physicalTableName : physicalTableNames) { + physicalTablesSet.add(Bytes.toString(physicalTableName)); + } + for (MetaDataProtocol.SharedTableState s : sharedTableStates) { + physicalTablesSet.add(s.getPhysicalNames().get(0).getString()); + } + StringBuilder buf = new StringBuilder("DELETE FROM SYSTEM.STATS WHERE PHYSICAL_NAME IN ("); + for (int i = 0; i < physicalTablesSet.size(); i++) { + buf.append(" ?,"); + } + buf.setCharAt(buf.length() - 1, ')'); + if (table.getIndexType() == IndexType.LOCAL) { + buf.append(" AND COLUMN_FAMILY IN("); + if (table.getColumnFamilies().isEmpty()) { + buf.append("'" + QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY + "',"); } else { - return false; + buf.append(QueryUtil.generateInListParams(table.getColumnFamilies().size())); } - } - - public static PColumn getColumn(int pkCount, byte[][] rowKeyMetaData, PTable table) throws ColumnFamilyNotFoundException, ColumnNotFoundException { - PColumn col = null; - if (pkCount > FAMILY_NAME_INDEX - && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0) { - PColumnFamily family = - table.getColumnFamily(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]); - col = - family.getPColumnForColumnNameBytes(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]); - } else if (pkCount > COLUMN_NAME_INDEX - && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length > 0) { - col = table.getPKColumn(new String( - rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX], StandardCharsets.UTF_8)); + buf.setCharAt(buf.length() - 1, ')'); + } + try (PreparedStatement delStatsStmt = connection.prepareStatement(buf.toString())) { + int param = 0; + Iterator itr = physicalTablesSet.iterator(); + while (itr.hasNext()) { + delStatsStmt.setString(++param, itr.next().toString()); } - return col; - } - - public static void deleteFromStatsTable(PhoenixConnection connection, - PTable table, List physicalTableNames, - List sharedTableStates) - throws SQLException { - boolean isAutoCommit = connection.getAutoCommit(); - try { - connection.setAutoCommit(true); - Set physicalTablesSet = new HashSet<>(); - physicalTablesSet.add(table.getPhysicalName().getString()); - for (byte[] physicalTableName:physicalTableNames) { - physicalTablesSet.add(Bytes.toString(physicalTableName)); - } - for (MetaDataProtocol.SharedTableState s: sharedTableStates) { - physicalTablesSet.add(s.getPhysicalNames().get(0).getString()); - } - StringBuilder buf = new StringBuilder("DELETE FROM SYSTEM.STATS WHERE PHYSICAL_NAME IN ("); - for (int i = 0; i < physicalTablesSet.size(); i++) { - buf.append(" ?,"); - } - buf.setCharAt(buf.length() - 1, ')'); - if (table.getIndexType()==IndexType.LOCAL) { - buf.append(" AND COLUMN_FAMILY IN("); - if (table.getColumnFamilies().isEmpty()) { - buf.append("'" + QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY + "',"); - } else { - buf.append(QueryUtil.generateInListParams(table - .getColumnFamilies().size())); - } - buf.setCharAt(buf.length() - 1, ')'); - } - try (PreparedStatement delStatsStmt = connection.prepareStatement(buf.toString())) { - int param = 0; - Iterator itr = physicalTablesSet.iterator(); - while (itr.hasNext()) { - delStatsStmt.setString(++param, itr.next().toString()); - } - if (table.getIndexType() == IndexType.LOCAL - && !table.getColumnFamilies().isEmpty()) { - for (PColumnFamily cf : table.getColumnFamilies()) { - delStatsStmt.setString(++param, cf.getName().getString()); - } - } - delStatsStmt.execute(); - } - } finally { - connection.setAutoCommit(isAutoCommit); + if (table.getIndexType() == IndexType.LOCAL && !table.getColumnFamilies().isEmpty()) { + for (PColumnFamily cf : table.getColumnFamilies()) { + delStatsStmt.setString(++param, cf.getName().getString()); + } } + delStatsStmt.execute(); + } + } finally { + connection.setAutoCommit(isAutoCommit); } + } - /** - * @param conf Cluster configuration - * @param maxLookbackAge Input max lookback age - * @return Input max lookback age, if not null. If null, fallback to cluster level - * max lookback age. Will always return non-null long value. - */ - public static long getMaxLookbackAge(Configuration conf, Long maxLookbackAge) { - Preconditions.checkNotNull(conf); - return maxLookbackAge != null ? maxLookbackAge : - BaseScannerRegionObserverConstants.getMaxLookbackInMillis(conf); - } - - public static boolean avoidMetadataRPC(PhoenixConnection connection, PTable table, - PTableRef tableRef, long effectiveUpdateCacheFreq) { - return table.getRowTimestampColPos() == -1 && - connection.getMetaDataCache().getAge(tableRef) < - effectiveUpdateCacheFreq; - } + /** + * @param conf Cluster configuration + * @param maxLookbackAge Input max lookback age + * @return Input max lookback age, if not null. If null, fallback to cluster level max lookback + * age. Will always return non-null long value. + */ + public static long getMaxLookbackAge(Configuration conf, Long maxLookbackAge) { + Preconditions.checkNotNull(conf); + return maxLookbackAge != null + ? maxLookbackAge + : BaseScannerRegionObserverConstants.getMaxLookbackInMillis(conf); + } + + public static boolean avoidMetadataRPC(PhoenixConnection connection, PTable table, + PTableRef tableRef, long effectiveUpdateCacheFreq) { + return table.getRowTimestampColPos() == -1 + && connection.getMetaDataCache().getAge(tableRef) < effectiveUpdateCacheFreq; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/NumberUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/NumberUtil.java index 646f5024095..6a4cc5d0b92 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/NumberUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/NumberUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,66 +23,65 @@ /** * Utility methods for numbers like decimal, long, etc. - * - * * @since 0.1 */ public class NumberUtil { - - public static final String DEFAULT_NUMBER_FORMAT = "#,##0.###"; - /** - * Strip all trailing zeros to ensure that no digit will be zero and - * round using our default context to ensure precision doesn't exceed max allowed. - * @return new {@link BigDecimal} instance - */ - public static BigDecimal normalize(BigDecimal bigDecimal) { - return bigDecimal.round(PDataType.DEFAULT_MATH_CONTEXT).stripTrailingZeros(); - } + public static final String DEFAULT_NUMBER_FORMAT = "#,##0.###"; - public static BigDecimal setDecimalWidthAndScale(BigDecimal decimal, Integer precisionOrNull, Integer scaleOrNull) { - int precision = precisionOrNull == null ? PDataType.MAX_PRECISION : precisionOrNull; - int scale = scaleOrNull == null ? 0 : scaleOrNull; - // If we could not fit all the digits before decimal point into the new desired precision and - // scale, return null and the caller method should handle the error. - if (((precision - scale) < (decimal.precision() - decimal.scale()))){ - return null; - } - if (scaleOrNull != null) { - decimal = decimal.setScale(scale, BigDecimal.ROUND_DOWN); // FIXME: should this be ROUND_UP? - } - return decimal; + /** + * Strip all trailing zeros to ensure that no digit will be zero and round using our default + * context to ensure precision doesn't exceed max allowed. + * @return new {@link BigDecimal} instance + */ + public static BigDecimal normalize(BigDecimal bigDecimal) { + return bigDecimal.round(PDataType.DEFAULT_MATH_CONTEXT).stripTrailingZeros(); + } + + public static BigDecimal setDecimalWidthAndScale(BigDecimal decimal, Integer precisionOrNull, + Integer scaleOrNull) { + int precision = precisionOrNull == null ? PDataType.MAX_PRECISION : precisionOrNull; + int scale = scaleOrNull == null ? 0 : scaleOrNull; + // If we could not fit all the digits before decimal point into the new desired precision and + // scale, return null and the caller method should handle the error. + if (((precision - scale) < (decimal.precision() - decimal.scale()))) { + return null; + } + if (scaleOrNull != null) { + decimal = decimal.setScale(scale, BigDecimal.ROUND_DOWN); // FIXME: should this be ROUND_UP? } + return decimal; + } - public static Long add(Long num1, Long num2) { - if (num1 == null) { - if (num2 == null) { - return null; - } - return num2; - } else { - if (num2 == null) { - return num1; - } - return num1 + num2; - } + public static Long add(Long num1, Long num2) { + if (num1 == null) { + if (num2 == null) { + return null; + } + return num2; + } else { + if (num2 == null) { + return num1; + } + return num1 + num2; } + } - /** - * @return If both are null, then return null. If one is null, return the other. Else, return - * minimum of the two. - */ - public static Long getMin(Long num1, Long num2) { - if (num1 == null) { - if (num2 == null) { - return null; - } - return num2; - } else { - if (num2 == null) { - return num1; - } - return Math.min(num1, num2); - } + /** + * @return If both are null, then return null. If one is null, return the other. Else, return + * minimum of the two. + */ + public static Long getMin(Long num1, Long num2) { + if (num1 == null) { + if (num2 == null) { + return null; + } + return num2; + } else { + if (num2 == null) { + return num1; + } + return Math.min(num1, num2); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ParseNodeUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ParseNodeUtil.java index 2e81af0dbc1..72a0d5ff72d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ParseNodeUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ParseNodeUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,167 +21,159 @@ import java.util.HashSet; import java.util.Set; -import org.apache.phoenix.parse.ColumnParseNode; -import org.apache.phoenix.parse.FamilyWildcardParseNode; -import org.apache.phoenix.parse.OrderByNode; -import org.apache.phoenix.parse.ParseNodeVisitor; -import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.compile.FromCompiler; +import org.apache.phoenix.compile.QueryCompiler; import org.apache.phoenix.compile.StatementNormalizer; import org.apache.phoenix.compile.SubqueryRewriter; import org.apache.phoenix.compile.SubselectRewriter; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.parse.AliasedNode; +import org.apache.phoenix.parse.ColumnParseNode; +import org.apache.phoenix.parse.FamilyWildcardParseNode; +import org.apache.phoenix.parse.OrderByNode; import org.apache.phoenix.parse.ParseNode; +import org.apache.phoenix.parse.ParseNodeVisitor; +import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.parse.StatelessTraverseAllParseNodeVisitor; import org.apache.phoenix.parse.TableWildcardParseNode; import org.apache.phoenix.parse.WildcardParseNode; -import org.apache.phoenix.compile.QueryCompiler; public class ParseNodeUtil { - /** - * Apply the {@link ParseNodeVisitor} to every part of the {@link SelectStatement}. - * @param selectStatement - * @param parseNodeVisitor - * @throws SQLException - */ - public static void applyParseNodeVisitor(SelectStatement selectStatement, ParseNodeVisitor parseNodeVisitor) throws SQLException { - applyParseNodeVisitor(selectStatement, parseNodeVisitor, true); - } - - /** - * Apply the {@link ParseNodeVisitor} to every part of the {@link SelectStatement}. - * @param selectStatement - * @param parseNodeVisitor - * @param applyWhere - * @throws SQLException - */ - public static void applyParseNodeVisitor( - SelectStatement selectStatement, - ParseNodeVisitor parseNodeVisitor, - boolean applyWhere) throws SQLException { - - for (AliasedNode selectAliasedNode : selectStatement.getSelect()) { - selectAliasedNode.getNode().accept(parseNodeVisitor); - } - - if (selectStatement.getGroupBy() != null) { - for (ParseNode groupByParseNode : selectStatement.getGroupBy()) { - groupByParseNode.accept(parseNodeVisitor); - } - } - - if (selectStatement.getHaving() != null) { - selectStatement.getHaving().accept(parseNodeVisitor); - } - - if (selectStatement.getOrderBy() != null) { - for (OrderByNode orderByNode : selectStatement.getOrderBy()) { - orderByNode.getNode().accept(parseNodeVisitor); - } - } - - if(applyWhere && selectStatement.getWhere() != null) { - selectStatement.getWhere().accept(parseNodeVisitor); - } - } - - /** - * Collect referenced columnNames in selectStatement, the selectStatement is a single table query, not a join. - * @param selectStatement - * @return - * @throws SQLException - */ - public static Set collectReferencedColumnNamesForSingleTable(SelectStatement selectStatement) throws SQLException{ - SingleTableCollectColumnNameParseNodeVisitor collectColumnNameParseNodeVisitor = - new SingleTableCollectColumnNameParseNodeVisitor(); - applyParseNodeVisitor(selectStatement, collectColumnNameParseNodeVisitor); - boolean isWildcard = collectColumnNameParseNodeVisitor.isWildcard(); - if(isWildcard) { - return null; - } - return collectColumnNameParseNodeVisitor.getReferenceColumnNames(); - } - - private static class SingleTableCollectColumnNameParseNodeVisitor extends StatelessTraverseAllParseNodeVisitor { - private final Set referenceColumnNames; - private boolean wildcard = false; - - public SingleTableCollectColumnNameParseNodeVisitor() { - this.referenceColumnNames = new HashSet(); - } - - public Set getReferenceColumnNames() { - return this.referenceColumnNames; - } - - public boolean isWildcard() { - return wildcard; - } - - @Override - public Void visit(ColumnParseNode columnParseNode) throws SQLException { - String normalizedColumnName = SchemaUtil.getNormalizedColumnName(columnParseNode); - referenceColumnNames.add(normalizedColumnName); - return null; - } - - @Override - public Void visit(WildcardParseNode node) throws SQLException { - this.wildcard = true; - return null; - } - - @Override - public Void visit(TableWildcardParseNode node) throws SQLException { - this.wildcard = true; - return null; - } - - @Override - public Void visit(FamilyWildcardParseNode node) throws SQLException { - this.wildcard = true; - return null; - } - } - - public static class RewriteResult { - private SelectStatement rewrittenSelectStatement; - private ColumnResolver columnResolver; - public RewriteResult(SelectStatement rewrittenSelectStatement, ColumnResolver columnResolver) { - this.rewrittenSelectStatement = rewrittenSelectStatement; - this.columnResolver = columnResolver; - } - public SelectStatement getRewrittenSelectStatement() { - return rewrittenSelectStatement; - } - public ColumnResolver getColumnResolver() { - return columnResolver; - } - } - - /** - * Optimize rewriting {@link SelectStatement} by {@link SubselectRewriter} and {@link SubqueryRewriter} before - * {@link QueryCompiler#compile}. - * @param selectStatement - * @param phoenixConnection - * @return - * @throws SQLException - */ - public static RewriteResult rewrite(SelectStatement selectStatement, PhoenixConnection phoenixConnection) throws SQLException { - SelectStatement selectStatementToUse = - SubselectRewriter.flatten(selectStatement, phoenixConnection); - ColumnResolver columnResolver = - FromCompiler.getResolverForQuery(selectStatementToUse, phoenixConnection); - selectStatementToUse = StatementNormalizer.normalize(selectStatementToUse, columnResolver); - SelectStatement transformedSubquery = - SubqueryRewriter.transform(selectStatementToUse, columnResolver, phoenixConnection); - if (transformedSubquery != selectStatementToUse) { - columnResolver = FromCompiler.getResolverForQuery(transformedSubquery, phoenixConnection); - transformedSubquery = StatementNormalizer.normalize(transformedSubquery, columnResolver); - } - return new RewriteResult(transformedSubquery, columnResolver); + /** + * Apply the {@link ParseNodeVisitor} to every part of the {@link SelectStatement}. + */ + public static void applyParseNodeVisitor(SelectStatement selectStatement, + ParseNodeVisitor parseNodeVisitor) throws SQLException { + applyParseNodeVisitor(selectStatement, parseNodeVisitor, true); + } + + /** + * Apply the {@link ParseNodeVisitor} to every part of the {@link SelectStatement}. + */ + public static void applyParseNodeVisitor(SelectStatement selectStatement, + ParseNodeVisitor parseNodeVisitor, boolean applyWhere) throws SQLException { + + for (AliasedNode selectAliasedNode : selectStatement.getSelect()) { + selectAliasedNode.getNode().accept(parseNodeVisitor); + } + + if (selectStatement.getGroupBy() != null) { + for (ParseNode groupByParseNode : selectStatement.getGroupBy()) { + groupByParseNode.accept(parseNodeVisitor); + } + } + + if (selectStatement.getHaving() != null) { + selectStatement.getHaving().accept(parseNodeVisitor); + } + + if (selectStatement.getOrderBy() != null) { + for (OrderByNode orderByNode : selectStatement.getOrderBy()) { + orderByNode.getNode().accept(parseNodeVisitor); + } + } + + if (applyWhere && selectStatement.getWhere() != null) { + selectStatement.getWhere().accept(parseNodeVisitor); + } + } + + /** + * Collect referenced columnNames in selectStatement, the selectStatement is a single table query, + * not a join. + */ + public static Set collectReferencedColumnNamesForSingleTable( + SelectStatement selectStatement) throws SQLException { + SingleTableCollectColumnNameParseNodeVisitor collectColumnNameParseNodeVisitor = + new SingleTableCollectColumnNameParseNodeVisitor(); + applyParseNodeVisitor(selectStatement, collectColumnNameParseNodeVisitor); + boolean isWildcard = collectColumnNameParseNodeVisitor.isWildcard(); + if (isWildcard) { + return null; + } + return collectColumnNameParseNodeVisitor.getReferenceColumnNames(); + } + + private static class SingleTableCollectColumnNameParseNodeVisitor + extends StatelessTraverseAllParseNodeVisitor { + private final Set referenceColumnNames; + private boolean wildcard = false; + + public SingleTableCollectColumnNameParseNodeVisitor() { + this.referenceColumnNames = new HashSet(); + } + + public Set getReferenceColumnNames() { + return this.referenceColumnNames; + } + + public boolean isWildcard() { + return wildcard; + } + + @Override + public Void visit(ColumnParseNode columnParseNode) throws SQLException { + String normalizedColumnName = SchemaUtil.getNormalizedColumnName(columnParseNode); + referenceColumnNames.add(normalizedColumnName); + return null; + } + + @Override + public Void visit(WildcardParseNode node) throws SQLException { + this.wildcard = true; + return null; + } + + @Override + public Void visit(TableWildcardParseNode node) throws SQLException { + this.wildcard = true; + return null; + } + + @Override + public Void visit(FamilyWildcardParseNode node) throws SQLException { + this.wildcard = true; + return null; + } + } + + public static class RewriteResult { + private SelectStatement rewrittenSelectStatement; + private ColumnResolver columnResolver; + + public RewriteResult(SelectStatement rewrittenSelectStatement, ColumnResolver columnResolver) { + this.rewrittenSelectStatement = rewrittenSelectStatement; + this.columnResolver = columnResolver; + } + + public SelectStatement getRewrittenSelectStatement() { + return rewrittenSelectStatement; + } + + public ColumnResolver getColumnResolver() { + return columnResolver; + } + } + + /** + * Optimize rewriting {@link SelectStatement} by {@link SubselectRewriter} and + * {@link SubqueryRewriter} before {@link QueryCompiler#compile}. + */ + public static RewriteResult rewrite(SelectStatement selectStatement, + PhoenixConnection phoenixConnection) throws SQLException { + SelectStatement selectStatementToUse = + SubselectRewriter.flatten(selectStatement, phoenixConnection); + ColumnResolver columnResolver = + FromCompiler.getResolverForQuery(selectStatementToUse, phoenixConnection); + selectStatementToUse = StatementNormalizer.normalize(selectStatementToUse, columnResolver); + SelectStatement transformedSubquery = + SubqueryRewriter.transform(selectStatementToUse, columnResolver, phoenixConnection); + if (transformedSubquery != selectStatementToUse) { + columnResolver = FromCompiler.getResolverForQuery(transformedSubquery, phoenixConnection); + transformedSubquery = StatementNormalizer.normalize(transformedSubquery, columnResolver); } + return new RewriteResult(transformedSubquery, columnResolver); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java index 34c9828f333..9b47248e7c3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixContextExecutor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,76 +23,71 @@ /** * Executes {@code Callable}s using a context classloader that is set up to load classes from - * Phoenix. - * - * Loading HBase configuration settings and endpoint coprocessor classes is done via the context - * classloader of the calling thread. When Phoenix is being run via a JDBC-enabled GUI, the - * driver is often loaded dynamically and executed via multiple threads, which makes it difficult - * or impossible to predict the state of the classloader hierarchy in the current thread. This - * class is intended to get around that, to ensure that the same classloader used to load Phoenix - * classes is set as the context classloader for specific calls. + * Phoenix. Loading HBase configuration settings and endpoint coprocessor classes is done via the + * context classloader of the calling thread. When Phoenix is being run via a JDBC-enabled GUI, the + * driver is often loaded dynamically and executed via multiple threads, which makes it difficult or + * impossible to predict the state of the classloader hierarchy in the current thread. This class is + * intended to get around that, to ensure that the same classloader used to load Phoenix classes is + * set as the context classloader for specific calls. */ public class PhoenixContextExecutor { - // We cache the class loader because calls to Class.getClassLoader are relatively expensive - private static final ClassLoader CACHED_CLASSLOADER = PhoenixContextExecutor.class.getClassLoader(); + // We cache the class loader because calls to Class.getClassLoader are relatively expensive + private static final ClassLoader CACHED_CLASSLOADER = + PhoenixContextExecutor.class.getClassLoader(); - private static class CurrentContextWrapper implements CallWrapper { - private ClassLoader saveCcl; + private static class CurrentContextWrapper implements CallWrapper { + private ClassLoader saveCcl; - @Override - public void before() { - saveCcl = Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(CACHED_CLASSLOADER); - } + @Override + public void before() { + saveCcl = Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(CACHED_CLASSLOADER); + } - @Override - public void after() { - Thread.currentThread().setContextClassLoader(saveCcl); + @Override + public void after() { + Thread.currentThread().setContextClassLoader(saveCcl); - }; - } + }; + } - public static CallWrapper inContext() { - return new CurrentContextWrapper(); - } + public static CallWrapper inContext() { + return new CurrentContextWrapper(); + } - /** - * Execute an operation (synchronously) using the context classloader used to load this class, - * instead of the currently-set context classloader of the current thread. This allows loading - * dynamically-loaded classes and configuration files using the same classloader used to - * load the rest of the JDBC driver. - * - * The context classloader of the current thread is reset to its original value after the - * callable has been executed. - * - * @param target the callable to be executed - * @return the return value from the callable - */ - public static T call(Callable target) throws Exception { - ClassLoader saveCcl = Thread.currentThread().getContextClassLoader(); - try { - Thread.currentThread().setContextClassLoader(CACHED_CLASSLOADER); - return target.call(); - } finally { - Thread.currentThread().setContextClassLoader(saveCcl); - } + /** + * Execute an operation (synchronously) using the context classloader used to load this class, + * instead of the currently-set context classloader of the current thread. This allows loading + * dynamically-loaded classes and configuration files using the same classloader used to load the + * rest of the JDBC driver. The context classloader of the current thread is reset to its original + * value after the callable has been executed. + * @param target the callable to be executed + * @return the return value from the callable + */ + public static T call(Callable target) throws Exception { + ClassLoader saveCcl = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(CACHED_CLASSLOADER); + return target.call(); + } finally { + Thread.currentThread().setContextClassLoader(saveCcl); } + } - /** - * Same as {@link #call(java.util.concurrent.Callable)}, but doesn't throw checked exceptions. - * - * @param target the callable to be executed - * @return the return value from the callable - */ - public static T callWithoutPropagation(Callable target) { - try { - return call(target); - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException(e); - } + /** + * Same as {@link #call(java.util.concurrent.Callable)}, but doesn't throw checked exceptions. + * @param target the callable to be executed + * @return the return value from the callable + */ + public static T callWithoutPropagation(Callable target) { + try { + return call(target); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java index a2bced47ee9..b9ab613c879 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hbase.thirdparty.com.google.common.base.Function; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.compat.hbase.CompatUtil; import org.apache.phoenix.execute.MutationState.MultiRowMutationState; import org.apache.phoenix.execute.MutationState.RowMutationState; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; @@ -47,109 +46,96 @@ import org.apache.phoenix.schema.TableRef; /** - * - * Utilities for KeyValue. Where there's duplication with KeyValue methods, - * these avoid creating new objects when not necessary (primary preventing - * byte array copying). - * - * + * Utilities for KeyValue. Where there's duplication with KeyValue methods, these avoid creating new + * objects when not necessary (primary preventing byte array copying). * @since 0.1 */ public class PhoenixKeyValueUtil { - private PhoenixKeyValueUtil() { - } + private PhoenixKeyValueUtil() { + } - public static Cell newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value, int valueOffset, int valueLength) { - return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(key).setFamily(cf) - .setQualifier(cq).setTimestamp(ts).setType(Type.Put) - .setValue(value, valueOffset, valueLength).build(); - } + public static Cell newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value, + int valueOffset, int valueLength) { + return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(key).setFamily(cf) + .setQualifier(cq).setTimestamp(ts).setType(Type.Put).setValue(value, valueOffset, valueLength) + .build(); + } - public static Cell newKeyValue(ImmutableBytesWritable key, byte[] cf, byte[] cq, long ts, byte[] value, int valueOffset, int valueLength) { - return CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(key.get(), key.getOffset(), key.getLength()).setFamily(cf).setQualifier(cq) - .setTimestamp(ts).setType(Type.Put).setValue(value, valueOffset, valueLength) - .build(); - } + public static Cell newKeyValue(ImmutableBytesWritable key, byte[] cf, byte[] cq, long ts, + byte[] value, int valueOffset, int valueLength) { + return CellBuilderFactory.create(CellBuilderType.DEEP_COPY) + .setRow(key.get(), key.getOffset(), key.getLength()).setFamily(cf).setQualifier(cq) + .setTimestamp(ts).setType(Type.Put).setValue(value, valueOffset, valueLength).build(); + } - public static Cell newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, byte[] cq, long ts, byte[] value, int valueOffset, int valueLength) { - return CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(key, keyOffset, keyLength).setFamily(cf).setQualifier(cq).setTimestamp(ts) - .setType(Type.Put).setValue(value, valueOffset, valueLength).build(); - } - - public static Cell newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, - int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength, long ts, byte[] value, - int valueOffset, int valueLength,Type type) { - return CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(key, keyOffset, keyLength).setFamily(cf, cfOffset, cfLength) - .setQualifier(cq, cqOffset, cqLength).setTimestamp(ts) - .setValue(value, valueOffset, valueLength).setType(type).build(); - } - - public static Cell newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value) { - return newKeyValue(key, cf, cq, ts, value, 0, value.length); - } + public static Cell newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, byte[] cq, + long ts, byte[] value, int valueOffset, int valueLength) { + return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(key, keyOffset, keyLength) + .setFamily(cf).setQualifier(cq).setTimestamp(ts).setType(Type.Put) + .setValue(value, valueOffset, valueLength).build(); + } - /** - * Binary search for latest column value without allocating memory in the process - * @param kvBuilder TODO - * @param kvs - * @param family - * @param qualifier - */ - public static Cell getColumnLatest(KeyValueBuilder kvBuilder, Listkvs, byte[] family, byte[] qualifier) { - if (kvs.size() == 0) { - return null; - } - assert CellUtil.matchingRows(kvs.get(0), kvs.get(kvs.size()-1)); + public static Cell newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf, int cfOffset, + int cfLength, byte[] cq, int cqOffset, int cqLength, long ts, byte[] value, int valueOffset, + int valueLength, Type type) { + return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(key, keyOffset, keyLength) + .setFamily(cf, cfOffset, cfLength).setQualifier(cq, cqOffset, cqLength).setTimestamp(ts) + .setValue(value, valueOffset, valueLength).setType(type).build(); + } - Comparator comp = new SearchComparator(kvBuilder, family, qualifier); - int pos = Collections.binarySearch(kvs, null, comp); - if (pos < 0 || pos == kvs.size()) { - return null; // doesn't exist - } - - return kvs.get(pos); + public static Cell newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value) { + return newKeyValue(key, cf, cq, ts, value, 0, value.length); + } + + /** + * Binary search for latest column value without allocating memory in the process + * @param kvBuilder TODO + */ + public static Cell getColumnLatest(KeyValueBuilder kvBuilder, List kvs, byte[] family, + byte[] qualifier) { + if (kvs.size() == 0) { + return null; } + assert CellUtil.matchingRows(kvs.get(0), kvs.get(kvs.size() - 1)); + Comparator comp = new SearchComparator(kvBuilder, family, qualifier); + int pos = Collections.binarySearch(kvs, null, comp); + if (pos < 0 || pos == kvs.size()) { + return null; // doesn't exist + } - /** - * Binary search for latest column value without allocating memory in the process - * @param kvBuilder TODO - * @param kvs - * @param family - * @param qualifier - */ - public static Cell getColumnLatest(KeyValueBuilder kvBuilder, Cell[] kvs, byte[] family, byte[] qualifier) { - if (kvs.length == 0) { - return null; - } - assert CellUtil.matchingRows(kvs[0], kvs[kvs.length-1]); + return kvs.get(pos); + } - Comparator comp = new SearchComparator(kvBuilder, family, qualifier); - int pos = Arrays.binarySearch(kvs, null, comp); - if (pos < 0 || pos == kvs.length) { - return null; // doesn't exist - } - - return kvs[pos]; + /** + * Binary search for latest column value without allocating memory in the process + * @param kvBuilder TODO + */ + public static Cell getColumnLatest(KeyValueBuilder kvBuilder, Cell[] kvs, byte[] family, + byte[] qualifier) { + if (kvs.length == 0) { + return null; } + assert CellUtil.matchingRows(kvs[0], kvs[kvs.length - 1]); - /* - * Special comparator, *only* works for binary search. - * - * We make the following assumption: - * 1. All KVs compared have the same row key - * 2. For each (rowkey, family, qualifier) there is at most one version - * 3. Current JDKs only uses the search term on the right side - * - * #1 allows us to avoid row key comparisons altogether. - * #2 allows for exact matches - * #3 lets us save instanceof checks, and allows to inline the search term in the comparator - */ - private static class SearchComparator implements Comparator { - private final KeyValueBuilder kvBuilder; + Comparator comp = new SearchComparator(kvBuilder, family, qualifier); + int pos = Arrays.binarySearch(kvs, null, comp); + if (pos < 0 || pos == kvs.length) { + return null; // doesn't exist + } + + return kvs[pos]; + } + + /* + * Special comparator, *only* works for binary search. We make the following assumption: 1. All + * KVs compared have the same row key 2. For each (rowkey, family, qualifier) there is at most one + * version 3. Current JDKs only uses the search term on the right side #1 allows us to avoid row + * key comparisons altogether. #2 allows for exact matches #3 lets us save instanceof checks, and + * allows to inline the search term in the comparator + */ + private static class SearchComparator implements Comparator { + private final KeyValueBuilder kvBuilder; private final byte[] family; private final byte[] qualifier; @@ -161,156 +147,151 @@ public SearchComparator(KeyValueBuilder kvBuilder, byte[] f, byte[] q) { @Override public int compare(final Cell l, final Cell ignored) { - assert ignored == null; - // family - int val = kvBuilder.compareFamily(l, family, 0, family.length); - if (val != 0) { - return val; - } - // qualifier - return kvBuilder.compareQualifier(l, qualifier, 0, qualifier.length); - } - } - - /** - * Calculate the size a mutation will likely take when stored in HBase - * @param m The Mutation - * @return the disk size of the passed mutation - */ - public static long calculateMutationDiskSize(Mutation m) { - long size = 0; - for (Entry> entry : m.getFamilyCellMap().entrySet()) { - for (Cell c : entry.getValue()) { - size += c.getSerializedSize(); - } - } - return size; + assert ignored == null; + // family + int val = kvBuilder.compareFamily(l, family, 0, family.length); + if (val != 0) { + return val; + } + // qualifier + return kvBuilder.compareQualifier(l, qualifier, 0, qualifier.length); } + } - /** - * Estimates the storage size of a row - * @param tableMutationMap map from table to row to RowMutationState - * @return estimated row size - */ - public static long getEstimatedRowMutationSize( - Map tableMutationMap) { - long size = 0; - // iterate over table - for (Entry tableEntry : tableMutationMap.entrySet()) { - size += calculateMultiRowMutationSize(tableEntry.getValue()); - } - return size; + /** + * Calculate the size a mutation will likely take when stored in HBase + * @param m The Mutation + * @return the disk size of the passed mutation + */ + public static long calculateMutationDiskSize(Mutation m) { + long size = 0; + for (Entry> entry : m.getFamilyCellMap().entrySet()) { + for (Cell c : entry.getValue()) { + size += c.getSerializedSize(); + } } + return size; + } - public static long getEstimatedRowMutationSizeWithBatch(Map> tableMutationMap) { - long size = 0; - // iterate over table - for (Entry> tableEntry : tableMutationMap.entrySet()) { - for (MultiRowMutationState batch : tableEntry.getValue()) { - size += calculateMultiRowMutationSize(batch); - } - } - return size; + /** + * Estimates the storage size of a row + * @param tableMutationMap map from table to row to RowMutationState + * @return estimated row size + */ + public static long + getEstimatedRowMutationSize(Map tableMutationMap) { + long size = 0; + // iterate over table + for (Entry tableEntry : tableMutationMap.entrySet()) { + size += calculateMultiRowMutationSize(tableEntry.getValue()); } + return size; + } - /** - * If c is not a KeyValue, cast it to KeyValue and return it. - * If c is a KeyValue, just return it - * - * @param c cell - * @return either c case to ExtendedCell, or its copy as a KeyValue - */ - public static KeyValue ensureKeyValue(Cell c) { - // Same as KeyValueUtil, but HBase has deprecated this method. Avoid depending on something - // that will likely be removed at some point in time. - if (c == null) return null; - // TODO Do we really want to return only KeyValues, or would it be enough to - // copy ByteBufferExtendedCells to heap ? - // i.e can we avoid copying on-heap cells like BufferedDataBlockEncoder.OnheapDecodedCell ? - if (c instanceof KeyValue) { - return (KeyValue) c; - } else { - return KeyValueUtil.copyToNewKeyValue(c); - } + public static long getEstimatedRowMutationSizeWithBatch( + Map> tableMutationMap) { + long size = 0; + // iterate over table + for (Entry> tableEntry : tableMutationMap.entrySet()) { + for (MultiRowMutationState batch : tableEntry.getValue()) { + size += calculateMultiRowMutationSize(batch); + } } + return size; + } - /** - * Replace non-KeyValue cells with their KeyValue copies - * - * This ensures that all Cells are copied on-heap, but will do - * extra work for any non-KeyValue on-heap cells - * - * @param cells modified, its elements are replaced - * @return the modified input cells object, for convenience - */ - public static List ensureKeyValues(List cells) { - List lazyList = Lists.transform(cells, new Function() { - @Override - public KeyValue apply(Cell arg0) { - return ensureKeyValue(arg0); - } - }); - return new ArrayList<>(lazyList); + /** + * If c is not a KeyValue, cast it to KeyValue and return it. If c is a KeyValue, just return it + * @param c cell + * @return either c case to ExtendedCell, or its copy as a KeyValue + */ + public static KeyValue ensureKeyValue(Cell c) { + // Same as KeyValueUtil, but HBase has deprecated this method. Avoid depending on something + // that will likely be removed at some point in time. + if (c == null) return null; + // TODO Do we really want to return only KeyValues, or would it be enough to + // copy ByteBufferExtendedCells to heap ? + // i.e can we avoid copying on-heap cells like BufferedDataBlockEncoder.OnheapDecodedCell ? + if (c instanceof KeyValue) { + return (KeyValue) c; + } else { + return KeyValueUtil.copyToNewKeyValue(c); } + } - public static KeyValue maybeCopyCell(Cell c) { - // Same as KeyValueUtil, but HBase has deprecated this method. Avoid depending on something - // that will likely be removed at some point in time. - if (c == null) { - return null; - } - // TODO Do we really want to return only KeyValues, or would it be enough to - // copy ByteBufferExtendedCells to heap ? - // i.e can we avoid copying on-heap cells like BufferedDataBlockEncoder.OnheapDecodedCell ? - if (c instanceof KeyValue) { - return (KeyValue) c; - } - return KeyValueUtil.copyToNewKeyValue(c); - } + /** + * Replace non-KeyValue cells with their KeyValue copies This ensures that all Cells are copied + * on-heap, but will do extra work for any non-KeyValue on-heap cells + * @param cells modified, its elements are replaced + * @return the modified input cells object, for convenience + */ + public static List ensureKeyValues(List cells) { + List lazyList = Lists.transform(cells, new Function() { + @Override + public KeyValue apply(Cell arg0) { + return ensureKeyValue(arg0); + } + }); + return new ArrayList<>(lazyList); + } - /** - * Copy all Off-Heap cells to KeyValues - * The input list is modified. - * - * @param cells is modified in place - * @return the modified list (optional, input list is modified in place) - */ - public static List maybeCopyCellList(List cells) { - ListIterator cellsIt = cells.listIterator(); - while (cellsIt.hasNext()) { - Cell c = cellsIt.next(); - //FIXME this does not catch all off-heap cells - if (c instanceof ByteBufferExtendedCell) { - cellsIt.set(KeyValueUtil.copyToNewKeyValue(c)); - } - } - return cells; + public static KeyValue maybeCopyCell(Cell c) { + // Same as KeyValueUtil, but HBase has deprecated this method. Avoid depending on something + // that will likely be removed at some point in time. + if (c == null) { + return null; + } + // TODO Do we really want to return only KeyValues, or would it be enough to + // copy ByteBufferExtendedCells to heap ? + // i.e can we avoid copying on-heap cells like BufferedDataBlockEncoder.OnheapDecodedCell ? + if (c instanceof KeyValue) { + return (KeyValue) c; } + return KeyValueUtil.copyToNewKeyValue(c); + } - private static long calculateMultiRowMutationSize(MultiRowMutationState mutations) { - long size = 0; - // iterate over rows - for (Entry rowEntry : mutations.entrySet()) { - size += calculateRowMutationSize(rowEntry); - } - return size; + /** + * Copy all Off-Heap cells to KeyValues The input list is modified. + * @param cells is modified in place + * @return the modified list (optional, input list is modified in place) + */ + public static List maybeCopyCellList(List cells) { + ListIterator cellsIt = cells.listIterator(); + while (cellsIt.hasNext()) { + Cell c = cellsIt.next(); + // FIXME this does not catch all off-heap cells + if (c instanceof ByteBufferExtendedCell) { + cellsIt.set(KeyValueUtil.copyToNewKeyValue(c)); + } } + return cells; + } - private static long calculateRowMutationSize(Entry rowEntry) { - int rowLength = rowEntry.getKey().getLength(); - long colValuesLength = rowEntry.getValue().calculateEstimatedSize(); - return (rowLength + colValuesLength); + private static long calculateMultiRowMutationSize(MultiRowMutationState mutations) { + long size = 0; + // iterate over rows + for (Entry rowEntry : mutations.entrySet()) { + size += calculateRowMutationSize(rowEntry); } + return size; + } + + private static long + calculateRowMutationSize(Entry rowEntry) { + int rowLength = rowEntry.getKey().getLength(); + long colValuesLength = rowEntry.getValue().calculateEstimatedSize(); + return (rowLength + colValuesLength); + } - public static void setTimestamp(Mutation m, long timestamp) { - byte[] tsBytes = Bytes.toBytes(timestamp); - for (List family : m.getFamilyCellMap().values()) { - // TODO Do we really need to copy everything to the HEAP here ? - List familyKVs = ensureKeyValues(family); - for (KeyValue kv : familyKVs) { - int tsOffset = kv.getTimestampOffset(); - System.arraycopy(tsBytes, 0, kv.getBuffer(), tsOffset, Bytes.SIZEOF_LONG); - } - } + public static void setTimestamp(Mutation m, long timestamp) { + byte[] tsBytes = Bytes.toBytes(timestamp); + for (List family : m.getFamilyCellMap().values()) { + // TODO Do we really need to copy everything to the HEAP here ? + List familyKVs = ensureKeyValues(family); + for (KeyValue kv : familyKVs) { + int tsOffset = kv.getTimestampOffset(); + System.arraycopy(tsBytes, 0, kv.getBuffer(), tsOffset, Bytes.SIZEOF_LONG); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java index 88ad2e36eff..4060288bee0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.compile.QueryPlan; -import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.LiteralExpression; import org.apache.phoenix.expression.OrderByExpression; @@ -72,7 +71,6 @@ import org.apache.phoenix.monitoring.PhoenixTableMetric; import org.apache.phoenix.monitoring.TableMetricsManager; import org.apache.phoenix.monitoring.connectionqueryservice.ConnectionQueryServicesMetricsManager; -import org.apache.phoenix.query.ConnectionQueryServices; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.schema.AmbiguousColumnException; @@ -102,1562 +100,1562 @@ import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; /** - * * Collection of non JDBC compliant utility methods - * - * * @since 0.1 */ public class PhoenixRuntime { - //TODO use strings, char needs a lot of error-prone conversions - public static final char JDBC_PROTOCOL_TERMINATOR = ';'; - public static final char JDBC_PROTOCOL_SEPARATOR = ':'; - - /** - * JDBC URL jdbc protocol identifier - */ - public static final String JDBC_PROTOCOL_IDENTIFIER = "jdbc"; - - /** - * JDBC URL phoenix protocol identifier (protocol determined from Configuration) - */ - public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER = "phoenix"; - - /** - * JDBC URL phoenix protocol identifier for ZK HBase connection - */ - public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER_ZK = "phoenix+zk"; - - /** - * JDBC URL phoenix protocol identifier for the deprecated Master based HBase connection - */ - public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER_MASTER = "phoenix+master"; - - /** - * JDBC URL phoenix protocol identifier for RPC based HBase connection - */ - public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER_RPC = "phoenix+rpc"; - - /** - * JDBC URL phoenix protocol identifier - */ - public static final String JDBC_PHOENIX_THIN_IDENTIFIER = "thin"; - - /** - * Root for the generic JDBC URL that the Phoenix accepts. - */ - public static final String JDBC_PROTOCOL = - JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR + JDBC_PHOENIX_PROTOCOL_IDENTIFIER; - - /** - * Root for the explicit ZK JDBC URL that the Phoenix accepts. - */ - public static final String JDBC_PROTOCOL_ZK = - JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR - + JDBC_PHOENIX_PROTOCOL_IDENTIFIER_ZK; - - /** - * Root for the explicit Master (HRPC) JDBC URL that the Phoenix accepts. - */ - public static final String JDBC_PROTOCOL_MASTER = - JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR - + JDBC_PHOENIX_PROTOCOL_IDENTIFIER_MASTER; - - /** - * Root for the explicit Master (HRPC) JDBC URL that the Phoenix accepts. - */ - public static final String JDBC_PROTOCOL_RPC = - JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR - + JDBC_PHOENIX_PROTOCOL_IDENTIFIER_RPC; - - /** - * Root for the JDBC URL used by the thin driver. Duplicated here to avoid dependencies between - * modules. - */ - public static final String JDBC_THIN_PROTOCOL = - JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + JDBC_PHOENIX_THIN_IDENTIFIER; - - @Deprecated - public static final String EMBEDDED_JDBC_PROTOCOL = - PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; - - /** - * Use this connection property to control HBase timestamps - * by specifying your own long timestamp value at connection time. - * Specifying this property will force the connection to be read - * only - no DML or DDL will be allowed. - */ - public static final String CURRENT_SCN_ATTRIB = "CurrentSCN"; - - /** - * Internal connection property to force an index to be built at a - * given time stamp. - */ - public static final String BUILD_INDEX_AT_ATTRIB = "BuildIndexAt"; - - /** - * Use this connection property to help with fairness of resource allocation - * for the client and server. The value of the attribute determines the - * bucket used to rollup resource usage for a particular tenant/organization. Each tenant - * may only use a percentage of total resources, governed by the {@link org.apache.phoenix.query.QueryServices} - * configuration properties - */ - public static final String TENANT_ID_ATTRIB = "TenantId"; - - /** - * Use this connection property to prevent an upgrade from occurring when - * connecting to a new server version. - */ - public static final String NO_UPGRADE_ATTRIB = "NoUpgrade"; - /** - * Use this connection property to control the number of rows that are - * batched together on an UPSERT INTO table1... SELECT ... FROM table2. - * It's only used when autoCommit is true and your source table is - * different than your target table or your SELECT statement has a - * GROUP BY clause. - */ - public final static String UPSERT_BATCH_SIZE_ATTRIB = "UpsertBatchSize"; - - /** - * Use this connection property to control the number of bytes that are - * batched together on an UPSERT INTO table1... SELECT ... FROM table2. - * It's only used when autoCommit is true and your source table is - * different than your target table or your SELECT statement has a - * GROUP BY clause. Overrides the value of UpsertBatchSize. - */ - public final static String UPSERT_BATCH_SIZE_BYTES_ATTRIB = "UpsertBatchSizeBytes"; - - - /** - * Use this connection property to explicitly enable or disable auto-commit on a new connection. - */ - public static final String AUTO_COMMIT_ATTRIB = "AutoCommit"; - - /** - * Use this connection property to explicitly set read consistency level on a new connection. - */ - public static final String CONSISTENCY_ATTRIB = "Consistency"; - - /** - * Use this connection property to explicitly enable or disable request level metric collection. - */ - public static final String REQUEST_METRIC_ATTRIB = "RequestMetric"; - - /** - * Use this column name on the row returned by explain plan result set to get estimate of number - * of bytes read. - */ - public static final String EXPLAIN_PLAN_ESTIMATED_BYTES_READ_COLUMN = - PhoenixStatement.EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_ALIAS; - - /** - * Use this column name on the row returned by explain plan result set to get estimate of number - * of rows read. - */ - public static final String EXPLAIN_PLAN_ESTIMATED_ROWS_READ_COLUMN = - PhoenixStatement.EXPLAIN_PLAN_ROWS_COLUMN_ALIAS; - - /** - * Use this column name on the row returned by explain plan result set to get timestamp at which - * the estimate of number or bytes/rows was collected - */ - public static final String EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN = - PhoenixStatement.EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_ALIAS; - - /** - * All Phoenix specific connection properties - * TODO: use enum instead - */ - private final static String[] CONNECTION_PROPERTIES = { - CURRENT_SCN_ATTRIB, - TENANT_ID_ATTRIB, - UPSERT_BATCH_SIZE_ATTRIB, - AUTO_COMMIT_ATTRIB, - CONSISTENCY_ATTRIB, - REQUEST_METRIC_ATTRIB, - }; - - /** - * Use this as the zookeeper quorum name to have a connection-less connection. This enables - * Phoenix-compatible HFiles to be created in a map/reduce job by creating tables, - * upserting data into them, and getting the uncommitted state through {@link #getUncommittedData(Connection)} - */ - public final static String CONNECTIONLESS = "none"; - - /** - * Use this connection property prefix for annotations that you want to show up in traces and log lines emitted by Phoenix. - * This is useful for annotating connections with information available on the client (e.g. user or session identifier) and - * having these annotation automatically passed into log lines and traces by Phoenix. - */ - public static final String ANNOTATION_ATTRIB_PREFIX = "phoenix.annotation."; - - private static final String HEADER_IN_LINE = "in-line"; - private static final String SQL_FILE_EXT = ".sql"; - private static final String CSV_FILE_EXT = ".csv"; + // TODO use strings, char needs a lot of error-prone conversions + public static final char JDBC_PROTOCOL_TERMINATOR = ';'; + public static final char JDBC_PROTOCOL_SEPARATOR = ':'; + + /** + * JDBC URL jdbc protocol identifier + */ + public static final String JDBC_PROTOCOL_IDENTIFIER = "jdbc"; + + /** + * JDBC URL phoenix protocol identifier (protocol determined from Configuration) + */ + public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER = "phoenix"; + + /** + * JDBC URL phoenix protocol identifier for ZK HBase connection + */ + public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER_ZK = "phoenix+zk"; + + /** + * JDBC URL phoenix protocol identifier for the deprecated Master based HBase connection + */ + public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER_MASTER = "phoenix+master"; + + /** + * JDBC URL phoenix protocol identifier for RPC based HBase connection + */ + public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER_RPC = "phoenix+rpc"; + + /** + * JDBC URL phoenix protocol identifier + */ + public static final String JDBC_PHOENIX_THIN_IDENTIFIER = "thin"; + + /** + * Root for the generic JDBC URL that the Phoenix accepts. + */ + public static final String JDBC_PROTOCOL = + JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR + JDBC_PHOENIX_PROTOCOL_IDENTIFIER; + + /** + * Root for the explicit ZK JDBC URL that the Phoenix accepts. + */ + public static final String JDBC_PROTOCOL_ZK = + JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR + JDBC_PHOENIX_PROTOCOL_IDENTIFIER_ZK; + + /** + * Root for the explicit Master (HRPC) JDBC URL that the Phoenix accepts. + */ + public static final String JDBC_PROTOCOL_MASTER = + JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR + JDBC_PHOENIX_PROTOCOL_IDENTIFIER_MASTER; + + /** + * Root for the explicit Master (HRPC) JDBC URL that the Phoenix accepts. + */ + public static final String JDBC_PROTOCOL_RPC = + JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR + JDBC_PHOENIX_PROTOCOL_IDENTIFIER_RPC; + + /** + * Root for the JDBC URL used by the thin driver. Duplicated here to avoid dependencies between + * modules. + */ + public static final String JDBC_THIN_PROTOCOL = + JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + JDBC_PHOENIX_THIN_IDENTIFIER; + + @Deprecated + public static final String EMBEDDED_JDBC_PROTOCOL = + PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; + + /** + * Use this connection property to control HBase timestamps by specifying your own long timestamp + * value at connection time. Specifying this property will force the connection to be read only - + * no DML or DDL will be allowed. + */ + public static final String CURRENT_SCN_ATTRIB = "CurrentSCN"; + + /** + * Internal connection property to force an index to be built at a given time stamp. + */ + public static final String BUILD_INDEX_AT_ATTRIB = "BuildIndexAt"; + + /** + * Use this connection property to help with fairness of resource allocation for the client and + * server. The value of the attribute determines the bucket used to rollup resource usage for a + * particular tenant/organization. Each tenant may only use a percentage of total resources, + * governed by the {@link org.apache.phoenix.query.QueryServices} configuration properties + */ + public static final String TENANT_ID_ATTRIB = "TenantId"; + + /** + * Use this connection property to prevent an upgrade from occurring when connecting to a new + * server version. + */ + public static final String NO_UPGRADE_ATTRIB = "NoUpgrade"; + /** + * Use this connection property to control the number of rows that are batched together on an + * UPSERT INTO table1... SELECT ... FROM table2. It's only used when autoCommit is true and your + * source table is different than your target table or your SELECT statement has a GROUP BY + * clause. + */ + public final static String UPSERT_BATCH_SIZE_ATTRIB = "UpsertBatchSize"; + + /** + * Use this connection property to control the number of bytes that are batched together on an + * UPSERT INTO table1... SELECT ... FROM table2. It's only used when autoCommit is true and your + * source table is different than your target table or your SELECT statement has a GROUP BY + * clause. Overrides the value of UpsertBatchSize. + */ + public final static String UPSERT_BATCH_SIZE_BYTES_ATTRIB = "UpsertBatchSizeBytes"; + + /** + * Use this connection property to explicitly enable or disable auto-commit on a new connection. + */ + public static final String AUTO_COMMIT_ATTRIB = "AutoCommit"; + + /** + * Use this connection property to explicitly set read consistency level on a new connection. + */ + public static final String CONSISTENCY_ATTRIB = "Consistency"; + + /** + * Use this connection property to explicitly enable or disable request level metric collection. + */ + public static final String REQUEST_METRIC_ATTRIB = "RequestMetric"; + + /** + * Use this column name on the row returned by explain plan result set to get estimate of number + * of bytes read. + */ + public static final String EXPLAIN_PLAN_ESTIMATED_BYTES_READ_COLUMN = + PhoenixStatement.EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_ALIAS; + + /** + * Use this column name on the row returned by explain plan result set to get estimate of number + * of rows read. + */ + public static final String EXPLAIN_PLAN_ESTIMATED_ROWS_READ_COLUMN = + PhoenixStatement.EXPLAIN_PLAN_ROWS_COLUMN_ALIAS; + + /** + * Use this column name on the row returned by explain plan result set to get timestamp at which + * the estimate of number or bytes/rows was collected + */ + public static final String EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN = + PhoenixStatement.EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_ALIAS; + + /** + * All Phoenix specific connection properties TODO: use enum instead + */ + private final static String[] CONNECTION_PROPERTIES = { CURRENT_SCN_ATTRIB, TENANT_ID_ATTRIB, + UPSERT_BATCH_SIZE_ATTRIB, AUTO_COMMIT_ATTRIB, CONSISTENCY_ATTRIB, REQUEST_METRIC_ATTRIB, }; + + /** + * Use this as the zookeeper quorum name to have a connection-less connection. This enables + * Phoenix-compatible HFiles to be created in a map/reduce job by creating tables, upserting data + * into them, and getting the uncommitted state through {@link #getUncommittedData(Connection)} + */ + public final static String CONNECTIONLESS = "none"; + + /** + * Use this connection property prefix for annotations that you want to show up in traces and log + * lines emitted by Phoenix. This is useful for annotating connections with information available + * on the client (e.g. user or session identifier) and having these annotation automatically + * passed into log lines and traces by Phoenix. + */ + public static final String ANNOTATION_ATTRIB_PREFIX = "phoenix.annotation."; + + private static final String HEADER_IN_LINE = "in-line"; + private static final String SQL_FILE_EXT = ".sql"; + private static final String CSV_FILE_EXT = ".csv"; + + /** + * Provides a mechanism to run SQL scripts against, where the arguments are: 1) connection URL + * string 2) one or more paths to either SQL scripts or CSV files If a CurrentSCN property is set + * on the connection URL, then it is incremented between processing, with each file being + * processed by a new connection at the increment timestamp value. + */ + public static void main(String[] args) { + + ExecutionCommand execCmd = ExecutionCommand.parseArgs(args); + String jdbcUrl; + if (execCmd.getConnectionString().startsWith(JDBC_PROTOCOL)) { + jdbcUrl = execCmd.getConnectionString(); + } else { + jdbcUrl = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + execCmd.getConnectionString(); + } - /** - * Provides a mechanism to run SQL scripts against, where the arguments are: - * 1) connection URL string - * 2) one or more paths to either SQL scripts or CSV files - * If a CurrentSCN property is set on the connection URL, then it is incremented - * between processing, with each file being processed by a new connection at the - * increment timestamp value. - */ - public static void main(String [] args) { + int exitStatus = 0; - ExecutionCommand execCmd = ExecutionCommand.parseArgs(args); - String jdbcUrl; - if (execCmd.getConnectionString().startsWith(JDBC_PROTOCOL)) { - jdbcUrl = execCmd.getConnectionString(); + PhoenixConnection conn = null; + try { + Properties props = new Properties(); + if (execCmd.isLocalIndexUpgrade()) { + props.setProperty(QueryServices.LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB, "false"); + } + if (execCmd.binaryEncoding != null) { + props.setProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, execCmd.binaryEncoding); + } + conn = DriverManager.getConnection(jdbcUrl, props).unwrap(PhoenixConnection.class); + conn.setRunningUpgrade(true); + if (execCmd.isMapNamespace()) { + String srcTable = execCmd.getSrcTable(); + System.out.println( + "Starting upgrading table:" + srcTable + "... please don't kill it in between!!"); + UpgradeUtil.upgradeTable(conn, srcTable); + } else if (execCmd.isUpgrade()) { + if (conn.getClientInfo(PhoenixRuntime.CURRENT_SCN_ATTRIB) != null) { + throw new SQLException("May not specify the CURRENT_SCN property when upgrading"); + } + if (conn.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB) != null) { + throw new SQLException("May not specify the TENANT_ID_ATTRIB property when upgrading"); + } + if (execCmd.getInputFiles().isEmpty()) { + List tablesNeedingUpgrade = UpgradeUtil.getPhysicalTablesWithDescRowKey(conn); + if (tablesNeedingUpgrade.isEmpty()) { + String msg = + "No tables are required to be upgraded due to incorrect row key order (PHOENIX-2067 and PHOENIX-2120)"; + System.out.println(msg); + } else { + String msg = + "The following tables require upgrade due to a bug causing the row key to be incorrectly ordered (PHOENIX-2067 and PHOENIX-2120):\n" + + Joiner.on(' ').join(tablesNeedingUpgrade); + System.out.println("WARNING: " + msg); + } + List unsupportedTables = + UpgradeUtil.getPhysicalTablesWithDescVarbinaryRowKey(conn); + if (!unsupportedTables.isEmpty()) { + String msg = + "The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n" + + Joiner.on(' ').join(unsupportedTables); + System.out.println("WARNING: " + msg); + } } else { - jdbcUrl = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + execCmd.getConnectionString(); + UpgradeUtil.upgradeDescVarLengthRowKeys(conn, execCmd.getInputFiles(), + execCmd.isBypassUpgrade()); } - - int exitStatus = 0; - - PhoenixConnection conn = null; - try { - Properties props = new Properties(); - if (execCmd.isLocalIndexUpgrade()) { - props.setProperty(QueryServices.LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB, "false"); - } - if (execCmd.binaryEncoding != null) { - props.setProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, execCmd.binaryEncoding); - } - conn = DriverManager.getConnection(jdbcUrl, props).unwrap(PhoenixConnection.class); - conn.setRunningUpgrade(true); - if (execCmd.isMapNamespace()) { - String srcTable = execCmd.getSrcTable(); - System.out.println("Starting upgrading table:" + srcTable + "... please don't kill it in between!!"); - UpgradeUtil.upgradeTable(conn, srcTable); - } else if (execCmd.isUpgrade()) { - if (conn.getClientInfo(PhoenixRuntime.CURRENT_SCN_ATTRIB) != null) { throw new SQLException( - "May not specify the CURRENT_SCN property when upgrading"); } - if (conn.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB) != null) { throw new SQLException( - "May not specify the TENANT_ID_ATTRIB property when upgrading"); } - if (execCmd.getInputFiles().isEmpty()) { - List tablesNeedingUpgrade = UpgradeUtil.getPhysicalTablesWithDescRowKey(conn); - if (tablesNeedingUpgrade.isEmpty()) { - String msg = "No tables are required to be upgraded due to incorrect row key order (PHOENIX-2067 and PHOENIX-2120)"; - System.out.println(msg); - } else { - String msg = "The following tables require upgrade due to a bug causing the row key to be incorrectly ordered (PHOENIX-2067 and PHOENIX-2120):\n" - + Joiner.on(' ').join(tablesNeedingUpgrade); - System.out.println("WARNING: " + msg); - } - List unsupportedTables = UpgradeUtil.getPhysicalTablesWithDescVarbinaryRowKey(conn); - if (!unsupportedTables.isEmpty()) { - String msg = "The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n" - + Joiner.on(' ').join(unsupportedTables); - System.out.println("WARNING: " + msg); - } - } else { - UpgradeUtil.upgradeDescVarLengthRowKeys(conn, execCmd.getInputFiles(), execCmd.isBypassUpgrade()); - } - } else if(execCmd.isLocalIndexUpgrade()) { - UpgradeUtil.upgradeLocalIndexes(conn); - } else { - for (String inputFile : execCmd.getInputFiles()) { - if (inputFile.endsWith(SQL_FILE_EXT)) { - PhoenixRuntime.executeStatements(conn, new InputStreamReader( - new FileInputStream(inputFile), StandardCharsets.UTF_8), - Collections.emptyList()); - } else if (inputFile.endsWith(CSV_FILE_EXT)) { - - String tableName = execCmd.getTableName(); - if (tableName == null) { - tableName = SchemaUtil.normalizeIdentifier( - inputFile.substring(inputFile.lastIndexOf(File.separatorChar) + 1, - inputFile.length() - CSV_FILE_EXT.length())); - } - CSVCommonsLoader csvLoader = new CSVCommonsLoader(conn, tableName, execCmd.getColumns(), - execCmd.isStrict(), execCmd.getFieldDelimiter(), execCmd.getQuoteCharacter(), - execCmd.getEscapeCharacter(), execCmd.getArrayElementSeparator()); - csvLoader.upsert(inputFile); - } - } + } else if (execCmd.isLocalIndexUpgrade()) { + UpgradeUtil.upgradeLocalIndexes(conn); + } else { + for (String inputFile : execCmd.getInputFiles()) { + if (inputFile.endsWith(SQL_FILE_EXT)) { + PhoenixRuntime.executeStatements(conn, + new InputStreamReader(new FileInputStream(inputFile), StandardCharsets.UTF_8), + Collections.emptyList()); + } else if (inputFile.endsWith(CSV_FILE_EXT)) { + + String tableName = execCmd.getTableName(); + if (tableName == null) { + tableName = SchemaUtil.normalizeIdentifier( + inputFile.substring(inputFile.lastIndexOf(File.separatorChar) + 1, + inputFile.length() - CSV_FILE_EXT.length())); } - } catch (Throwable t) { - t.printStackTrace(); - exitStatus = 1; - } finally { - if (conn != null) { - try { - conn.close(); - } catch (SQLException e) { - //going to shut jvm down anyway. So might as well feast on it. - } - } - System.exit(exitStatus); + CSVCommonsLoader csvLoader = new CSVCommonsLoader(conn, tableName, execCmd.getColumns(), + execCmd.isStrict(), execCmd.getFieldDelimiter(), execCmd.getQuoteCharacter(), + execCmd.getEscapeCharacter(), execCmd.getArrayElementSeparator()); + csvLoader.upsert(inputFile); + } } - } - - public static final String PHOENIX_TEST_DRIVER_URL_PARAM = "test=true"; - public static final String SCHEMA_ATTRIB = "schema"; - - private PhoenixRuntime() { - } - - public static final String[] getConnectionProperties() { - return Arrays.copyOf(CONNECTION_PROPERTIES, CONNECTION_PROPERTIES.length); - } - - /** - * Runs a series of semicolon-terminated SQL statements using the connection provided, returning - * the number of SQL statements executed. Note that if the connection has specified an SCN through - * the {@link org.apache.phoenix.util.PhoenixRuntime#CURRENT_SCN_ATTRIB} connection property, then the timestamp - * is bumped up by one after each statement execution. - * @param conn an open JDBC connection - * @param reader a reader for semicolumn separated SQL statements - * @param binds the binds for all statements - * @return the number of SQL statements that were executed - * @throws IOException - * @throws SQLException - */ - public static int executeStatements(Connection conn, Reader reader, List binds) throws IOException,SQLException { - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - // Turn auto commit to true when running scripts in case there's DML - pconn.setAutoCommit(true); - return pconn.executeStatements(reader, binds, System.out); - } - - /** - * Get the list of uncommitted KeyValues for the connection. Currently used to write an - * Phoenix-compliant HFile from a map/reduce job. - * @param conn an open JDBC connection - * @return the list of HBase mutations for uncommitted data - * @throws SQLException - */ - @Deprecated - public static List getUncommittedData(Connection conn) throws SQLException { - Iterator>> iterator = getUncommittedDataIterator(conn); - if (iterator.hasNext()) { - return iterator.next().getSecond(); + } + } catch (Throwable t) { + t.printStackTrace(); + exitStatus = 1; + } finally { + if (conn != null) { + try { + conn.close(); + } catch (SQLException e) { + // going to shut jvm down anyway. So might as well feast on it. } - return Collections.emptyList(); + } + System.exit(exitStatus); } - - /** - * Get the list of uncommitted KeyValues for the connection. Currently used to write an - * Phoenix-compliant HFile from a map/reduce job. - * @param conn an open JDBC connection - * @return the list of HBase mutations for uncommitted data - * @throws SQLException - */ - public static Iterator>> getUncommittedDataIterator(Connection conn) throws SQLException { - return getUncommittedDataIterator(conn, false); + } + + public static final String PHOENIX_TEST_DRIVER_URL_PARAM = "test=true"; + public static final String SCHEMA_ATTRIB = "schema"; + + private PhoenixRuntime() { + } + + public static final String[] getConnectionProperties() { + return Arrays.copyOf(CONNECTION_PROPERTIES, CONNECTION_PROPERTIES.length); + } + + /** + * Runs a series of semicolon-terminated SQL statements using the connection provided, returning + * the number of SQL statements executed. Note that if the connection has specified an SCN through + * the {@link org.apache.phoenix.util.PhoenixRuntime#CURRENT_SCN_ATTRIB} connection property, then + * the timestamp is bumped up by one after each statement execution. + * @param conn an open JDBC connection + * @param reader a reader for semicolumn separated SQL statements + * @param binds the binds for all statements + * @return the number of SQL statements that were executed + */ + public static int executeStatements(Connection conn, Reader reader, List binds) + throws IOException, SQLException { + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + // Turn auto commit to true when running scripts in case there's DML + pconn.setAutoCommit(true); + return pconn.executeStatements(reader, binds, System.out); + } + + /** + * Get the list of uncommitted KeyValues for the connection. Currently used to write an + * Phoenix-compliant HFile from a map/reduce job. + * @param conn an open JDBC connection + * @return the list of HBase mutations for uncommitted data + */ + @Deprecated + public static List getUncommittedData(Connection conn) throws SQLException { + Iterator>> iterator = getUncommittedDataIterator(conn); + if (iterator.hasNext()) { + return iterator.next().getSecond(); } + return Collections.emptyList(); + } + + /** + * Get the list of uncommitted KeyValues for the connection. Currently used to write an + * Phoenix-compliant HFile from a map/reduce job. + * @param conn an open JDBC connection + * @return the list of HBase mutations for uncommitted data + */ + public static Iterator>> getUncommittedDataIterator(Connection conn) + throws SQLException { + return getUncommittedDataIterator(conn, false); + } + + /** + * Get the list of uncommitted KeyValues for the connection. Currently used to write an + * Phoenix-compliant HFile from a map/reduce job. + * @param conn an open JDBC connection + * @return the list of HBase mutations for uncommitted data + */ + public static Iterator>> getUncommittedDataIterator(Connection conn, + boolean includeMutableIndexes) throws SQLException { + final PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + final Iterator>> iterator = + pconn.getMutationState().toMutations(includeMutableIndexes); + return new Iterator>>() { + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } - /** - * Get the list of uncommitted KeyValues for the connection. Currently used to write an - * Phoenix-compliant HFile from a map/reduce job. - * @param conn an open JDBC connection - * @return the list of HBase mutations for uncommitted data - * @throws SQLException - */ - public static Iterator>> getUncommittedDataIterator(Connection conn, boolean includeMutableIndexes) throws SQLException { - final PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - final Iterator>> iterator = pconn.getMutationState().toMutations(includeMutableIndexes); - return new Iterator>>() { - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public Pair> next() { - Pair> pair = iterator.next(); - List keyValues = Lists.newArrayListWithExpectedSize(pair.getSecond().size() * 5); // Guess-timate 5 key values per row - for (Mutation mutation : pair.getSecond()) { - for (List keyValueList : mutation.getFamilyCellMap().values()) { - for (Cell keyValue : keyValueList) { - keyValues.add(PhoenixKeyValueUtil.maybeCopyCell(keyValue)); - } - } - } - Collections.sort(keyValues, pconn.getKeyValueBuilder().getKeyValueComparator()); - return new Pair>(pair.getFirst(),keyValues); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); + @Override + public Pair> next() { + Pair> pair = iterator.next(); + List keyValues = Lists.newArrayListWithExpectedSize(pair.getSecond().size() * 5); // Guess-timate + // 5 + // key + // values + // per + // row + for (Mutation mutation : pair.getSecond()) { + for (List keyValueList : mutation.getFamilyCellMap().values()) { + for (Cell keyValue : keyValueList) { + keyValues.add(PhoenixKeyValueUtil.maybeCopyCell(keyValue)); } + } + } + Collections.sort(keyValues, pconn.getKeyValueBuilder().getKeyValueComparator()); + return new Pair>(pair.getFirst(), keyValues); + } - }; - } + @Override + public void remove() { + throw new UnsupportedOperationException(); + } - public static PTable getTableNoCache(Connection conn, String name) throws SQLException { - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - return pconn.getTableNoCache(name); + }; + } + + public static PTable getTableNoCache(Connection conn, String name) throws SQLException { + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + return pconn.getTableNoCache(name); + } + + /** + * Returns the table if it is found in the connection metadata cache. If the metadata of this + * table has changed since it was put in the cache these changes will not necessarily be reflected + * in the returned table. If the table is not found, makes a call to the server to fetch the + * latest metadata of the table. This is different than how a table is resolved when it is + * referenced from a query (a call is made to the server to fetch the latest metadata of the table + * depending on the UPDATE_CACHE_FREQUENCY property) See + * https://issues.apache.org/jira/browse/PHOENIX-4475 + * @param name requires a pre-normalized table name or a pre-normalized schema and table name + */ + public static PTable getTable(Connection conn, String name) throws SQLException { + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + return pconn.getTable(name); + } + + /** + * Similar to {@link #getTable(Connection, String, String, Long)} but returns the most recent + * PTable + */ + public static PTable getTable(Connection conn, String tenantId, String fullTableName) + throws SQLException { + return getTable(conn, tenantId, fullTableName, HConstants.LATEST_TIMESTAMP); + } + + /** + * Returns the PTable as of the timestamp provided. This method can be used to fetch tenant + * specific PTable through a global connection. A null timestamp would result in the client side + * metadata cache being used (ie. in case table metadata is already present it'll be returned). To + * get the latest metadata use {@link #getTable(Connection, String, String)} + * @throws NullPointerException if conn or fullTableName is null + * @throws IllegalArgumentException if timestamp is negative + */ + public static PTable getTable(Connection conn, @Nullable String tenantId, String fullTableName, + @Nullable Long timestamp) throws SQLException { + checkNotNull(conn); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + return pconn.getTable(tenantId, fullTableName, timestamp); + } + + /** + * Get list of ColumnInfos that contain Column Name and its associated PDataType for an import. + * The supplied list of columns can be null -- if it is non-null, it represents a user-supplied + * list of columns to be imported. + * @param conn Phoenix connection from which metadata will be read + * @param tableName Phoenix table name whose columns are to be checked. Can include a schema name + * @param columns user-supplied list of import columns, can be null + */ + public static List generateColumnInfo(Connection conn, String tableName, + List columns) throws SQLException { + PTable table = PhoenixRuntime.getTable(conn, SchemaUtil.normalizeFullTableName(tableName)); + List columnInfoList = Lists.newArrayList(); + Set unresolvedColumnNames = new TreeSet(); + if (columns == null || columns.isEmpty()) { + // use all columns in the table + int offset = 0; + if (table.getBucketNum() != null) { + offset++; + } + if (table.getTenantId() != null) { + offset++; + } + for (int i = offset; i < table.getColumns().size(); i++) { + PColumn pColumn = table.getColumns().get(i); + columnInfoList.add(PhoenixRuntime.getColumnInfo(pColumn)); + } + } else { + // Leave "null" as indication to skip b/c it doesn't exist + for (int i = 0; i < columns.size(); i++) { + String columnName = columns.get(i); + try { + ColumnInfo columnInfo = PhoenixRuntime.getColumnInfo(table, columnName); + columnInfoList.add(columnInfo); + } catch (ColumnNotFoundException cnfe) { + unresolvedColumnNames.add(columnName); + } catch (AmbiguousColumnException ace) { + unresolvedColumnNames.add(columnName); + } + } } - - /** - * Returns the table if it is found in the connection metadata cache. If the metadata of this - * table has changed since it was put in the cache these changes will not necessarily be - * reflected in the returned table. If the table is not found, makes a call to the server to - * fetch the latest metadata of the table. This is different than how a table is resolved when - * it is referenced from a query (a call is made to the server to fetch the latest metadata of the table - * depending on the UPDATE_CACHE_FREQUENCY property) - * See https://issues.apache.org/jira/browse/PHOENIX-4475 - * @param conn - * @param name requires a pre-normalized table name or a pre-normalized schema and table name - * @return - * @throws SQLException - */ - public static PTable getTable(Connection conn, String name) throws SQLException { - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - return pconn.getTable(name); + // if there exists columns that cannot be resolved, error out. + if (unresolvedColumnNames.size() > 0) { + StringBuilder exceptionMessage = new StringBuilder(); + boolean first = true; + exceptionMessage.append("Unable to resolve these column names:\n"); + for (String col : unresolvedColumnNames) { + if (first) first = false; + else exceptionMessage.append(","); + exceptionMessage.append(col); + } + exceptionMessage.append("\nAvailable columns with column families:\n"); + first = true; + for (PColumn pColumn : table.getColumns()) { + if (first) first = false; + else exceptionMessage.append(","); + exceptionMessage.append(pColumn.toString()); + } + throw new SQLException(exceptionMessage.toString()); } - - /** - * Similar to {@link #getTable(Connection, String, String, Long)} but returns the most recent - * PTable - */ - public static PTable getTable(Connection conn, String tenantId, String fullTableName) - throws SQLException { - return getTable(conn, tenantId, fullTableName, HConstants.LATEST_TIMESTAMP); + return columnInfoList; + } + + /** + * Returns the column info for the given column for the given table. + * @param columnName User-specified column name. May be family-qualified or bare. + * @return columnInfo associated with the column in the table + * @throws SQLException if parameters are null or if column is not found or if column is + * ambiguous. + */ + public static ColumnInfo getColumnInfo(PTable table, String columnName) throws SQLException { + if (table == null) { + throw new SQLException("Table must not be null."); } - - /** - * Returns the PTable as of the timestamp provided. This method can be used to fetch tenant - * specific PTable through a global connection. A null timestamp would result in the client side - * metadata cache being used (ie. in case table metadata is already present it'll be returned). - * To get the latest metadata use {@link #getTable(Connection, String, String)} - * @param conn - * @param tenantId - * @param fullTableName - * @param timestamp - * @return PTable - * @throws SQLException - * @throws NullPointerException if conn or fullTableName is null - * @throws IllegalArgumentException if timestamp is negative - */ - public static PTable getTable(Connection conn, @Nullable String tenantId, String fullTableName, - @Nullable Long timestamp) throws SQLException { - checkNotNull(conn); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - return pconn.getTable(tenantId, fullTableName, timestamp); + if (columnName == null) { + throw new SQLException("columnName must not be null."); } - - /** - * Get list of ColumnInfos that contain Column Name and its associated PDataType for an import. - * The supplied list of columns can be null -- if it is non-null, it represents a user-supplied - * list of columns to be imported. - * @param conn Phoenix connection from which metadata will be read - * @param tableName Phoenix table name whose columns are to be checked. Can include a schema - * name - * @param columns user-supplied list of import columns, can be null - */ - public static List generateColumnInfo(Connection conn, - String tableName, List columns) - throws SQLException { - PTable table = PhoenixRuntime.getTable(conn, SchemaUtil.normalizeFullTableName(tableName)); - List columnInfoList = Lists.newArrayList(); - Set unresolvedColumnNames = new TreeSet(); - if (columns == null || columns.isEmpty()) { - // use all columns in the table - int offset = 0; - if (table.getBucketNum() != null) { - offset++; - } - if (table.getTenantId() != null) { - offset++; - } - for (int i = offset; i < table.getColumns().size(); i++) { - PColumn pColumn = table.getColumns().get(i); - columnInfoList.add(PhoenixRuntime.getColumnInfo(pColumn)); - } - } else { - // Leave "null" as indication to skip b/c it doesn't exist - for (int i = 0; i < columns.size(); i++) { - String columnName = columns.get(i); - try { - ColumnInfo columnInfo = PhoenixRuntime.getColumnInfo(table, columnName); - columnInfoList.add(columnInfo); - } catch (ColumnNotFoundException cnfe) { - unresolvedColumnNames.add(columnName); - } catch (AmbiguousColumnException ace) { - unresolvedColumnNames.add(columnName); - } - } - } - // if there exists columns that cannot be resolved, error out. - if (unresolvedColumnNames.size()>0) { - StringBuilder exceptionMessage = new StringBuilder(); - boolean first = true; - exceptionMessage.append("Unable to resolve these column names:\n"); - for (String col : unresolvedColumnNames) { - if (first) first = false; - else exceptionMessage.append(","); - exceptionMessage.append(col); - } - exceptionMessage.append("\nAvailable columns with column families:\n"); - first = true; - for (PColumn pColumn : table.getColumns()) { - if (first) first = false; - else exceptionMessage.append(","); - exceptionMessage.append(pColumn.toString()); - } - throw new SQLException(exceptionMessage.toString()); + PColumn pColumn = null; + if (columnName.contains(QueryConstants.NAME_SEPARATOR)) { + String[] tokens = columnName.split(QueryConstants.NAME_SEPARATOR_REGEX); + if (tokens.length != 2) { + throw new SQLException(String + .format("Unable to process column %s, expected family-qualified name.", columnName)); } - return columnInfoList; + String familyName = tokens[0]; + String familyColumn = tokens[1]; + PColumnFamily family = table.getColumnFamily(familyName); + pColumn = family.getPColumnForColumnName(familyColumn); + } else { + pColumn = table.getColumnForColumnName(columnName); } - - /** - * Returns the column info for the given column for the given table. - * - * @param table - * @param columnName User-specified column name. May be family-qualified or bare. - * @return columnInfo associated with the column in the table - * @throws SQLException if parameters are null or if column is not found or if column is ambiguous. - */ - public static ColumnInfo getColumnInfo(PTable table, String columnName) throws SQLException { - if (table==null) { - throw new SQLException("Table must not be null."); - } - if (columnName==null) { - throw new SQLException("columnName must not be null."); - } - PColumn pColumn = null; - if (columnName.contains(QueryConstants.NAME_SEPARATOR)) { - String[] tokens = columnName.split(QueryConstants.NAME_SEPARATOR_REGEX); - if (tokens.length!=2) { - throw new SQLException(String.format("Unable to process column %s, expected family-qualified name.",columnName)); - } - String familyName = tokens[0]; - String familyColumn = tokens[1]; - PColumnFamily family = table.getColumnFamily(familyName); - pColumn = family.getPColumnForColumnName(familyColumn); - } else { - pColumn = table.getColumnForColumnName(columnName); - } - return getColumnInfo(pColumn); + return getColumnInfo(pColumn); + } + + /** + * Constructs a column info for the supplied pColumn + * @throws SQLException if the parameter is null. + */ + public static ColumnInfo getColumnInfo(PColumn pColumn) throws SQLException { + if (pColumn == null) { + throw new SQLException("pColumn must not be null."); } + return ColumnInfo.create(pColumn.toString(), pColumn.getDataType().getSqlType(), + pColumn.getMaxLength(), pColumn.getScale()); + } + + /** + * Represents the parsed commandline parameters definining the command or commands to be executed. + */ + static class ExecutionCommand { + private String connectionString; + private List columns; + private String tableName; + private char fieldDelimiter; + private char quoteCharacter; + private Character escapeCharacter; + private String arrayElementSeparator; + private boolean strict; + private List inputFiles; + private boolean isUpgrade; + private boolean isBypassUpgrade; + private boolean mapNamespace; + private String srcTable; + private boolean localIndexUpgrade; + private String binaryEncoding; /** - * Constructs a column info for the supplied pColumn - * @param pColumn - * @return columnInfo - * @throws SQLException if the parameter is null. + * Factory method to build up an {@code ExecutionCommand} based on supplied parameters. */ - public static ColumnInfo getColumnInfo(PColumn pColumn) throws SQLException { - if (pColumn == null) { - throw new SQLException("pColumn must not be null."); - } - return ColumnInfo.create(pColumn.toString(), pColumn.getDataType().getSqlType(), - pColumn.getMaxLength(), pColumn.getScale()); - } - - /** - * Represents the parsed commandline parameters definining the command or commands to be - * executed. - */ - static class ExecutionCommand { - private String connectionString; - private List columns; - private String tableName; - private char fieldDelimiter; - private char quoteCharacter; - private Character escapeCharacter; - private String arrayElementSeparator; - private boolean strict; - private List inputFiles; - private boolean isUpgrade; - private boolean isBypassUpgrade; - private boolean mapNamespace; - private String srcTable; - private boolean localIndexUpgrade; - private String binaryEncoding; - - /** - * Factory method to build up an {@code ExecutionCommand} based on supplied parameters. - */ - public static ExecutionCommand parseArgs(String[] args) { - Option tableOption = new Option("t", "table", true, - "Overrides the table into which the CSV data is loaded and is case sensitive"); - Option binaryEncodingOption = new Option("b", "binaryEncoding", true, - "Specifies binary encoding"); - Option headerOption = new Option("h", "header", true, "Overrides the column names to" + - " which the CSV data maps and is case sensitive. A special value of " + - "in-line indicating that the first line of the CSV file determines the " + - "column to which the data maps"); - Option strictOption = new Option("s", "strict", false, "Use strict mode by throwing " + - "an exception if a column name doesn't match during CSV loading"); - Option delimiterOption = new Option("d", "delimiter", true, - "Field delimiter for CSV loader. A digit is interpreted as " + - "1 -> ctrl A, 2 -> ctrl B ... 9 -> ctrl I."); - Option quoteCharacterOption = new Option("q", "quote-character", true, - "Quote character for CSV loader. A digit is interpreted as a control " + - "character"); - Option escapeCharacterOption = new Option("e", "escape-character", true, - "Escape character for CSV loader. A digit is interpreted as a control " + - "character"); - Option arrayValueSeparatorOption = new Option("a", "array-separator", true, - "Define the array element separator, defaults to ':'"); - Option upgradeOption = new Option("u", "upgrade", false, "Upgrades tables specified as arguments " + - "by rewriting them with the correct row key for descending columns. If no arguments are " + - "specified, then tables that need to be upgraded will be displayed without being upgraded. " + - "Use the -b option to bypass the rewrite if you know that your data does not need to be upgrade. " + - "This would only be the case if you have not relied on auto padding for BINARY and CHAR data, " + - "but instead have always provided data up to the full max length of the column. See PHOENIX-2067 " + - "and PHOENIX-2120 for more information. " + - "Note that " + QueryServices.THREAD_TIMEOUT_MS_ATTRIB + " and hbase.regionserver.lease.period " + - "parameters must be set very high to prevent timeouts when upgrading."); - Option bypassUpgradeOption = new Option("b", "bypass-upgrade", false, - "Used in conjunction with the -u option to bypass the rewrite during upgrade if you know that your data does not need to be upgrade. " + - "This would only be the case if you have not relied on auto padding for BINARY and CHAR data, " + - "but instead have always provided data up to the full max length of the column. See PHOENIX-2067 " + - "and PHOENIX-2120 for more information. "); - Option mapNamespaceOption = new Option("m", "map-namespace", true, - "Used to map table to a namespace matching with schema, require "+ QueryServices.IS_NAMESPACE_MAPPING_ENABLED + - " to be enabled"); - Option localIndexUpgradeOption = new Option("l", "local-index-upgrade", false, - "Used to upgrade local index data by moving index data from separate table to " - + "separate column families in the same table."); - Options options = new Options(); - options.addOption(tableOption); - options.addOption(headerOption); - options.addOption(strictOption); - options.addOption(delimiterOption); - options.addOption(quoteCharacterOption); - options.addOption(escapeCharacterOption); - options.addOption(arrayValueSeparatorOption); - options.addOption(upgradeOption); - options.addOption(bypassUpgradeOption); - options.addOption(mapNamespaceOption); - options.addOption(localIndexUpgradeOption); - options.addOption(binaryEncodingOption); - - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - usageError(options); - } - - ExecutionCommand execCmd = new ExecutionCommand(); - execCmd.connectionString = ""; - if(cmdLine.hasOption(mapNamespaceOption.getOpt())){ - execCmd.mapNamespace = true; - execCmd.srcTable = validateTableName(cmdLine.getOptionValue(mapNamespaceOption.getOpt())); - } - if (cmdLine.hasOption(tableOption.getOpt())) { - execCmd.tableName = cmdLine.getOptionValue(tableOption.getOpt()); - } - - if (cmdLine.hasOption(binaryEncodingOption.getOpt())) { - execCmd.binaryEncoding = cmdLine.getOptionValue(binaryEncodingOption.getOpt()); - } - - if (cmdLine.hasOption(headerOption.getOpt())) { - String columnString = cmdLine.getOptionValue(headerOption.getOpt()); - if (HEADER_IN_LINE.equals(columnString)) { - execCmd.columns = ImmutableList.of(); - } else { - execCmd.columns = ImmutableList.copyOf( - Splitter.on(",").trimResults().split(columnString)); - } - } - - execCmd.strict = cmdLine.hasOption(strictOption.getOpt()); - execCmd.fieldDelimiter = getCharacter( - cmdLine.getOptionValue(delimiterOption.getOpt(), ",")); - execCmd.quoteCharacter = getCharacter( - cmdLine.getOptionValue(quoteCharacterOption.getOpt(), "\"")); - - if (cmdLine.hasOption(escapeCharacterOption.getOpt())) { - execCmd.escapeCharacter = getCharacter( - cmdLine.getOptionValue(escapeCharacterOption.getOpt(), "\\")); - } - - execCmd.arrayElementSeparator = cmdLine.getOptionValue( - arrayValueSeparatorOption.getOpt(), - CSVCommonsLoader.DEFAULT_ARRAY_ELEMENT_SEPARATOR); - - if (cmdLine.hasOption(upgradeOption.getOpt())) { - execCmd.isUpgrade = true; - } - - if (cmdLine.hasOption(bypassUpgradeOption.getOpt())) { - if (!execCmd.isUpgrade()) { - usageError("The bypass-upgrade option may only be used in conjunction with the -u option", options); - } - execCmd.isBypassUpgrade = true; - } - if(cmdLine.hasOption(localIndexUpgradeOption.getOpt())) { - execCmd.localIndexUpgrade = true; - } - - List argList = Lists.newArrayList(cmdLine.getArgList()); - if (argList.isEmpty()) { - usageError("At least one input file must be supplied", options); - } - List inputFiles = Lists.newArrayList(); - int i = 0; - for (String arg : argList) { - if (execCmd.isUpgrade || arg.endsWith(CSV_FILE_EXT) || arg.endsWith(SQL_FILE_EXT)) { - inputFiles.add(arg); - } else { - if (i == 0) { - execCmd.connectionString = arg; - } else { - usageError("Don't know how to interpret argument '" + arg + "'", options); - } - } - i++; - } - - if (inputFiles.isEmpty() && !execCmd.isUpgrade && !execCmd.isMapNamespace() && !execCmd.isLocalIndexUpgrade()) { - usageError("At least one input file must be supplied", options); - } - - execCmd.inputFiles = inputFiles; - - return execCmd; - } - - private static String validateTableName(String tableName) { - if (tableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - throw new IllegalArgumentException( - "tablename:" + tableName + " cannot have '" + QueryConstants.NAMESPACE_SEPARATOR + "' "); - } else { - return tableName; - } - - } - - private static char getCharacter(String s) { - String unescaped = StringEscapeUtils.unescapeJava(s); - if (unescaped.length() > 1) { - throw new IllegalArgumentException("Invalid single character: '" + unescaped + "'"); - } - return unescaped.charAt(0); - } - - private static void usageError(String errorMsg, Options options) { - System.out.println(errorMsg); - usageError(options); - } - - private static void usageError(Options options) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp( - "psql [-t table-name] [-h comma-separated-column-names | in-line] [-d " + - "field-delimiter-char quote-char escape-char] " + - "...", - options); - System.out.println("Examples:\n" + - " psql my_ddl.sql\n" + - " psql localhost my_ddl.sql\n" + - " psql localhost my_ddl.sql my_table.csv\n" + - " psql -t MY_TABLE my_cluster:1825 my_table2012-Q3.csv\n" + - " psql -t MY_TABLE -h COL1,COL2,COL3 my_cluster:1825 my_table2012-Q3.csv\n" + - " psql -t MY_TABLE -h COL1,COL2,COL3 -d : my_cluster:1825 my_table2012-Q3.csv"); - System.exit(-1); - } + public static ExecutionCommand parseArgs(String[] args) { + Option tableOption = new Option("t", "table", true, + "Overrides the table into which the CSV data is loaded and is case sensitive"); + Option binaryEncodingOption = + new Option("b", "binaryEncoding", true, "Specifies binary encoding"); + Option headerOption = new Option("h", "header", true, + "Overrides the column names to" + + " which the CSV data maps and is case sensitive. A special value of " + + "in-line indicating that the first line of the CSV file determines the " + + "column to which the data maps"); + Option strictOption = new Option("s", "strict", false, "Use strict mode by throwing " + + "an exception if a column name doesn't match during CSV loading"); + Option delimiterOption = new Option("d", "delimiter", true, + "Field delimiter for CSV loader. A digit is interpreted as " + + "1 -> ctrl A, 2 -> ctrl B ... 9 -> ctrl I."); + Option quoteCharacterOption = new Option("q", "quote-character", true, + "Quote character for CSV loader. A digit is interpreted as a control " + "character"); + Option escapeCharacterOption = new Option("e", "escape-character", true, + "Escape character for CSV loader. A digit is interpreted as a control " + "character"); + Option arrayValueSeparatorOption = new Option("a", "array-separator", true, + "Define the array element separator, defaults to ':'"); + Option upgradeOption = new Option("u", "upgrade", false, + "Upgrades tables specified as arguments " + + "by rewriting them with the correct row key for descending columns. If no arguments are " + + "specified, then tables that need to be upgraded will be displayed without being upgraded. " + + "Use the -b option to bypass the rewrite if you know that your data does not need to be upgrade. " + + "This would only be the case if you have not relied on auto padding for BINARY and CHAR data, " + + "but instead have always provided data up to the full max length of the column. See PHOENIX-2067 " + + "and PHOENIX-2120 for more information. " + "Note that " + + QueryServices.THREAD_TIMEOUT_MS_ATTRIB + " and hbase.regionserver.lease.period " + + "parameters must be set very high to prevent timeouts when upgrading."); + Option bypassUpgradeOption = new Option("b", "bypass-upgrade", false, + "Used in conjunction with the -u option to bypass the rewrite during upgrade if you know that your data does not need to be upgrade. " + + "This would only be the case if you have not relied on auto padding for BINARY and CHAR data, " + + "but instead have always provided data up to the full max length of the column. See PHOENIX-2067 " + + "and PHOENIX-2120 for more information. "); + Option mapNamespaceOption = new Option("m", "map-namespace", true, + "Used to map table to a namespace matching with schema, require " + + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " to be enabled"); + Option localIndexUpgradeOption = new Option("l", "local-index-upgrade", false, + "Used to upgrade local index data by moving index data from separate table to " + + "separate column families in the same table."); + Options options = new Options(); + options.addOption(tableOption); + options.addOption(headerOption); + options.addOption(strictOption); + options.addOption(delimiterOption); + options.addOption(quoteCharacterOption); + options.addOption(escapeCharacterOption); + options.addOption(arrayValueSeparatorOption); + options.addOption(upgradeOption); + options.addOption(bypassUpgradeOption); + options.addOption(mapNamespaceOption); + options.addOption(localIndexUpgradeOption); + options.addOption(binaryEncodingOption); + + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + usageError(options); + } - public String getConnectionString() { - return connectionString; - } + ExecutionCommand execCmd = new ExecutionCommand(); + execCmd.connectionString = ""; + if (cmdLine.hasOption(mapNamespaceOption.getOpt())) { + execCmd.mapNamespace = true; + execCmd.srcTable = validateTableName(cmdLine.getOptionValue(mapNamespaceOption.getOpt())); + } + if (cmdLine.hasOption(tableOption.getOpt())) { + execCmd.tableName = cmdLine.getOptionValue(tableOption.getOpt()); + } - public List getColumns() { - return columns; - } + if (cmdLine.hasOption(binaryEncodingOption.getOpt())) { + execCmd.binaryEncoding = cmdLine.getOptionValue(binaryEncodingOption.getOpt()); + } - public String getTableName() { - return tableName; + if (cmdLine.hasOption(headerOption.getOpt())) { + String columnString = cmdLine.getOptionValue(headerOption.getOpt()); + if (HEADER_IN_LINE.equals(columnString)) { + execCmd.columns = ImmutableList.of(); + } else { + execCmd.columns = + ImmutableList.copyOf(Splitter.on(",").trimResults().split(columnString)); } + } - public char getFieldDelimiter() { - return fieldDelimiter; - } + execCmd.strict = cmdLine.hasOption(strictOption.getOpt()); + execCmd.fieldDelimiter = getCharacter(cmdLine.getOptionValue(delimiterOption.getOpt(), ",")); + execCmd.quoteCharacter = + getCharacter(cmdLine.getOptionValue(quoteCharacterOption.getOpt(), "\"")); - public char getQuoteCharacter() { - return quoteCharacter; - } + if (cmdLine.hasOption(escapeCharacterOption.getOpt())) { + execCmd.escapeCharacter = + getCharacter(cmdLine.getOptionValue(escapeCharacterOption.getOpt(), "\\")); + } - public Character getEscapeCharacter() { - return escapeCharacter; - } + execCmd.arrayElementSeparator = cmdLine.getOptionValue(arrayValueSeparatorOption.getOpt(), + CSVCommonsLoader.DEFAULT_ARRAY_ELEMENT_SEPARATOR); - public String getArrayElementSeparator() { - return arrayElementSeparator; - } + if (cmdLine.hasOption(upgradeOption.getOpt())) { + execCmd.isUpgrade = true; + } - public List getInputFiles() { - return inputFiles; + if (cmdLine.hasOption(bypassUpgradeOption.getOpt())) { + if (!execCmd.isUpgrade()) { + usageError("The bypass-upgrade option may only be used in conjunction with the -u option", + options); } + execCmd.isBypassUpgrade = true; + } + if (cmdLine.hasOption(localIndexUpgradeOption.getOpt())) { + execCmd.localIndexUpgrade = true; + } - public boolean isStrict() { - return strict; + List argList = Lists.newArrayList(cmdLine.getArgList()); + if (argList.isEmpty()) { + usageError("At least one input file must be supplied", options); + } + List inputFiles = Lists.newArrayList(); + int i = 0; + for (String arg : argList) { + if (execCmd.isUpgrade || arg.endsWith(CSV_FILE_EXT) || arg.endsWith(SQL_FILE_EXT)) { + inputFiles.add(arg); + } else { + if (i == 0) { + execCmd.connectionString = arg; + } else { + usageError("Don't know how to interpret argument '" + arg + "'", options); + } } + i++; + } - public boolean isUpgrade() { - return isUpgrade; - } + if ( + inputFiles.isEmpty() && !execCmd.isUpgrade && !execCmd.isMapNamespace() + && !execCmd.isLocalIndexUpgrade() + ) { + usageError("At least one input file must be supplied", options); + } - public boolean isBypassUpgrade() { - return isBypassUpgrade; - } + execCmd.inputFiles = inputFiles; - public boolean isMapNamespace() { - return mapNamespace; - } + return execCmd; + } - public String getSrcTable() { - return srcTable; - } + private static String validateTableName(String tableName) { + if (tableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + throw new IllegalArgumentException( + "tablename:" + tableName + " cannot have '" + QueryConstants.NAMESPACE_SEPARATOR + "' "); + } else { + return tableName; + } - public boolean isLocalIndexUpgrade() { - return localIndexUpgrade; - } - } - - /** - * Returns the opitmized query plan used by phoenix for executing the sql. - * @param stmt to return the plan for - * @throws SQLException - */ - public static QueryPlan getOptimizedQueryPlan(PreparedStatement stmt) throws SQLException { - checkNotNull(stmt); - QueryPlan plan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery(); - return plan; - } - - /** - * Whether or not the query plan has any order by expressions. - * @param plan - * @return - */ - public static boolean hasOrderBy(QueryPlan plan) { - checkNotNull(plan); - List orderBys = plan.getOrderBy().getOrderByExpressions(); - return orderBys != null && !orderBys.isEmpty(); - } - - public static int getLimit(QueryPlan plan) { - checkNotNull(plan); - return plan.getLimit() == null ? 0 : plan.getLimit(); - } - - private static String addQuotes(String str) { - return str == null ? str : "\"" + str + "\""; - } - - /** - * Get the column family, column name pairs that make up the row key of the table that will be queried. - * @param conn - connection used to generate the query plan. Caller should take care of closing the connection appropriately. - * @param plan - query plan to get info for. - * @return the pairs of column family name and column name columns in the data table that make up the row key for - * the table used in the query plan. Column family names are optional and hence the first part of the pair is nullable. - * Column names and family names are enclosed in double quotes to allow for case sensitivity and for presence of - * special characters. Salting column and view index id column are not included. If the connection is tenant specific - * and the table used by the query plan is multi-tenant, then the tenant id column is not included as well. - * @throws SQLException - */ - public static List> getPkColsForSql(Connection conn, QueryPlan plan) throws SQLException { - checkNotNull(plan); - checkNotNull(conn); - List pkColumns = getPkColumns(plan.getTableRef().getTable(), conn); - List> columns = Lists.newArrayListWithExpectedSize(pkColumns.size()); - String columnName; - String familyName; - for (PColumn pCol : pkColumns ) { - columnName = addQuotes(pCol.getName().getString()); - familyName = pCol.getFamilyName() != null ? addQuotes(pCol.getFamilyName().getString()) : null; - columns.add(new Pair(familyName, columnName)); - } - return columns; } - /** - * - * @param columns - Initialized empty list to be filled with the pairs of column family name and column name for columns that are used - * as row key for the query plan. Column family names are optional and hence the first part of the pair is nullable. - * Column names and family names are enclosed in double quotes to allow for case sensitivity and for presence of - * special characters. Salting column and view index id column are not included. If the connection is tenant specific - * and the table used by the query plan is multi-tenant, then the tenant id column is not included as well. - * @param plan - query plan to get info for. - * @param conn - connection used to generate the query plan. Caller should take care of closing the connection appropriately. - * @param forDataTable - if true, then family names and column names correspond to the data table even if the query plan uses - * the secondary index table. If false, and if the query plan uses the secondary index table, then the family names and column - * names correspond to the index table. - * @throws SQLException - */ - @Deprecated - public static void getPkColsForSql(List> columns, QueryPlan plan, Connection conn, boolean forDataTable) throws SQLException { - checkNotNull(columns); - checkNotNull(plan); - checkNotNull(conn); - List pkColumns = getPkColumns(plan.getTableRef().getTable(), conn, forDataTable); - String columnName; - String familyName; - for (PColumn pCol : pkColumns ) { - columnName = addQuotes(pCol.getName().getString()); - familyName = pCol.getFamilyName() != null ? addQuotes(pCol.getFamilyName().getString()) : null; - columns.add(new Pair(familyName, columnName)); - } + private static char getCharacter(String s) { + String unescaped = StringEscapeUtils.unescapeJava(s); + if (unescaped.length() > 1) { + throw new IllegalArgumentException("Invalid single character: '" + unescaped + "'"); + } + return unescaped.charAt(0); } - /** - * @param columns - Initialized empty list to be filled with the pairs of column family name and column name for columns that are used - * as row key for the query plan. Column family names are optional and hence the first part of the pair is nullable. - * Column names and family names are enclosed in double quotes to allow for case sensitivity and for presence of - * special characters. Salting column and view index id column are not included. If the connection is tenant specific - * and the table used by the query plan is multi-tenant, then the tenant id column is not included as well. - * @param dataTypes - Initialized empty list to be filled with the corresponding data type for the columns in @param columns. - * @param plan - query plan to get info for - * @param conn - phoenix connection used to generate the query plan. Caller should take care of closing the connection appropriately. - * @param forDataTable - if true, then column names and data types correspond to the data table even if the query plan uses - * the secondary index table. If false, and if the query plan uses the secondary index table, then the column names and data - * types correspond to the index table. - * @throws SQLException - */ - @Deprecated - public static void getPkColsDataTypesForSql(List> columns, List dataTypes, QueryPlan plan, Connection conn, boolean forDataTable) throws SQLException { - checkNotNull(columns); - checkNotNull(dataTypes); - checkNotNull(plan); - checkNotNull(conn); - List pkColumns = getPkColumns(plan.getTableRef().getTable(), conn, forDataTable); - String columnName; - String familyName; - for (PColumn pCol : pkColumns) { - String sqlTypeName = getSqlTypeName(pCol); - dataTypes.add(sqlTypeName); - columnName = addQuotes(pCol.getName().getString()); - familyName = pCol.getFamilyName() != null ? addQuotes(pCol.getFamilyName().getString()) : null; - columns.add(new Pair(familyName, columnName)); - } - } - - /** - * - * @param pCol - * @return sql type name that could be used in DDL statements, dynamic column types etc. - */ - public static String getSqlTypeName(PColumn pCol) { - PDataType dataType = pCol.getDataType(); - Integer maxLength = pCol.getMaxLength(); - Integer scale = pCol.getScale(); - return getSqlTypeName(dataType, maxLength, scale); + private static void usageError(String errorMsg, Options options) { + System.out.println(errorMsg); + usageError(options); } - public static String getSqlTypeName(PDataType dataType, Integer maxLength, Integer scale) { - return dataType.isArrayType() ? getArraySqlTypeName(maxLength, scale, dataType) : appendMaxLengthAndScale(maxLength, scale, dataType.getSqlTypeName()); + private static void usageError(Options options) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("psql [-t table-name] [-h comma-separated-column-names | in-line] [-d " + + "field-delimiter-char quote-char escape-char] " + + "...", options); + System.out.println("Examples:\n" + " psql my_ddl.sql\n" + " psql localhost my_ddl.sql\n" + + " psql localhost my_ddl.sql my_table.csv\n" + + " psql -t MY_TABLE my_cluster:1825 my_table2012-Q3.csv\n" + + " psql -t MY_TABLE -h COL1,COL2,COL3 my_cluster:1825 my_table2012-Q3.csv\n" + + " psql -t MY_TABLE -h COL1,COL2,COL3 -d : my_cluster:1825 my_table2012-Q3.csv"); + System.exit(-1); } - - public static String getArraySqlTypeName(@Nullable Integer maxLength, @Nullable Integer scale, PDataType arrayType) { - String baseTypeSqlName = PDataType.arrayBaseType(arrayType).getSqlTypeName(); - return appendMaxLengthAndScale(maxLength, scale, baseTypeSqlName) + " " + ARRAY_TYPE_SUFFIX; // for ex - decimal(10,2) ARRAY + + public String getConnectionString() { + return connectionString; } - private static String appendMaxLengthAndScale(@Nullable Integer maxLength, @Nullable Integer scale, String sqlTypeName) { - if (maxLength != null) { - sqlTypeName = sqlTypeName + "(" + maxLength; - if (scale != null) { - sqlTypeName = sqlTypeName + "," + scale; // has both max length and scale. For ex- decimal(10,2) - } - sqlTypeName = sqlTypeName + ")"; - } - return sqlTypeName; + public List getColumns() { + return columns; } - - @Deprecated - private static List getPkColumns(PTable ptable, Connection conn, boolean forDataTable) throws SQLException { - PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); - List pkColumns = ptable.getPKColumns(); - - // Skip the salting column and the view index id column if present. - // Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant - int offset = (ptable.getBucketNum() == null ? 0 : 1) + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + (ptable.getViewIndexId() == null ? 0 : 1); - - // get a sublist of pkColumns by skipping the offset columns. - pkColumns = pkColumns.subList(offset, pkColumns.size()); - - if (ptable.getType() == PTableType.INDEX && forDataTable) { - // index tables have the same schema name as their parent/data tables. - String fullDataTableName = ptable.getParentName().getString(); - - // Get the corresponding columns of the data table. - List dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn); - pkColumns = dataColumns; - } - return pkColumns; + + public String getTableName() { + return tableName; } - - private static List getPkColumns(PTable ptable, Connection conn) throws SQLException { - PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); - List pkColumns = ptable.getPKColumns(); - - // Skip the salting column and the view index id column if present. - // Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant - int offset = (ptable.getBucketNum() == null ? 0 : 1) + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + (ptable.getViewIndexId() == null ? 0 : 1); - - // get a sublist of pkColumns by skipping the offset columns. - pkColumns = pkColumns.subList(offset, pkColumns.size()); - - if (ptable.getType() == PTableType.INDEX) { - // index tables have the same schema name as their parent/data tables. - String fullDataTableName = ptable.getParentName().getString(); - - // Get the corresponding columns of the data table. - List dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn); - pkColumns = dataColumns; - } - return pkColumns; + + public char getFieldDelimiter() { + return fieldDelimiter; } - /** - * - * @param conn connection that was used for reading/generating value. - * @param fullTableName fully qualified table name - * @param values values of the columns - * @param columns list of pair of column that includes column family as first part and column name as the second part. - * Column family is optional and hence nullable. Columns in the list have to be in the same order as the order of occurence - * of their values in the object array. - * @return values encoded in a byte array - * @throws SQLException - * @see decodeValues(Connection, String, byte[], List) - */ - @Deprecated - public static byte[] encodeValues(Connection conn, String fullTableName, Object[] values, List> columns) throws SQLException { - PTable table = getTable(conn, fullTableName); - List pColumns = getPColumns(table, columns); - List expressions = new ArrayList(pColumns.size()); - int i = 0; - for (PColumn col : pColumns) { - Object value = values[i]; - // for purposes of encoding, sort order of the columns doesn't matter. - Expression expr = LiteralExpression.newConstant(value, col.getDataType(), col.getMaxLength(), col.getScale()); - expressions.add(expr); - i++; - } - KeyValueSchema kvSchema = buildKeyValueSchema(pColumns); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema); - return kvSchema.toBytes(expressions.toArray(new Expression[0]), valueSet, ptr); + public char getQuoteCharacter() { + return quoteCharacter; } - - - /** - * - * @param conn connection that was used for reading/generating value. - * @param fullTableName fully qualified table name - * @param value byte value of the columns concatenated as a single byte array. @see {@link #encodeColumnValues(Connection, String, Object[], List)} - * @param columns list of column names for the columns that have their respective values - * present in the byte array. The column names should be in the same order as their values are in the byte array. - * The column name includes both family name, if present, and column name. - * @return decoded values for each column - * @throws SQLException - * - */ - @Deprecated - public static Object[] decodeValues(Connection conn, String fullTableName, byte[] value, List> columns) throws SQLException { - PTable table = getTable(conn, fullTableName); - KeyValueSchema kvSchema = buildKeyValueSchema(getPColumns(table, columns)); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(value); - ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema); - valueSet.clear(); - valueSet.or(ptr); - int maxOffset = ptr.getOffset() + ptr.getLength(); - Boolean hasValue; - kvSchema.iterator(ptr); - int i = 0; - List values = new ArrayList(); - while(hasValue = kvSchema.next(ptr, i, maxOffset, valueSet) != null) { - if(hasValue) { - values.add(kvSchema.getField(i).getDataType().toObject(ptr)); - } - i++; - } - return values.toArray(); + + public Character getEscapeCharacter() { + return escapeCharacter; } - - /** - * - * @param conn connection that was used for reading/generating value. - * @param fullTableName fully qualified table name - * @param values values of the columns - * @param columns list of pair of column that includes column family as first part and column name as the second part. - * Column family is optional and hence nullable. Columns in the list have to be in the same order as the order of occurence - * of their values in the object array. - * @return values encoded in a byte array - * @throws SQLException - * @see decodeValues(Connection, String, byte[], List) - */ - public static byte[] encodeColumnValues(Connection conn, String fullTableName, Object[] values, List> columns) throws SQLException { - PTable table = getTable(conn, fullTableName); - List pColumns = getColumns(table, columns); - List expressions = new ArrayList(pColumns.size()); - int i = 0; - for (PColumn col : pColumns) { - Object value = values[i]; - // for purposes of encoding, sort order of the columns doesn't matter. - Expression expr = LiteralExpression.newConstant(value, col.getDataType(), col.getMaxLength(), col.getScale()); - expressions.add(expr); - i++; - } - KeyValueSchema kvSchema = buildKeyValueSchema(pColumns); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema); - return kvSchema.toBytes(expressions.toArray(new Expression[0]), valueSet, ptr); + + public String getArrayElementSeparator() { + return arrayElementSeparator; } - - - /** - * - * @param conn connection that was used for reading/generating value. - * @param fullTableName fully qualified table name - * @param value byte value of the columns concatenated as a single byte array. @see {@link #encodeColumnValues(Connection, String, Object[], List)} - * @param columns list of column names for the columns that have their respective values - * present in the byte array. The column names should be in the same order as their values are in the byte array. - * The column name includes both family name, if present, and column name. - * @return decoded values for each column - * @throws SQLException - * - */ - public static Object[] decodeColumnValues(Connection conn, String fullTableName, byte[] value, List> columns) throws SQLException { - PTable table = getTable(conn, fullTableName); - KeyValueSchema kvSchema = buildKeyValueSchema(getColumns(table, columns)); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(value); - ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema); - valueSet.clear(); - valueSet.or(ptr); - int maxOffset = ptr.getOffset() + ptr.getLength(); - Boolean hasValue; - kvSchema.iterator(ptr); - int i = 0; - List values = new ArrayList(); - while(hasValue = kvSchema.next(ptr, i, maxOffset, valueSet) != null) { - if(hasValue) { - values.add(kvSchema.getField(i).getDataType().toObject(ptr)); - } - i++; - } - return values.toArray(); + + public List getInputFiles() { + return inputFiles; } - - public static KeyValueSchema buildKeyValueSchema(List columns) { - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(getMinNullableIndex(columns)); - for (PColumn col : columns) { - builder.addField(col); - } - return builder.build(); + + public boolean isStrict() { + return strict; } - - private static int getMinNullableIndex(List columns) { - int minNullableIndex = columns.size(); - for (int i = 0; i < columns.size(); i++) { - if (columns.get(i).isNullable()) { - minNullableIndex = i; - break; - } - } - return minNullableIndex; + + public boolean isUpgrade() { + return isUpgrade; } - - /** - * @param table table to get the {@code PColumn} for - * @param columns list of pair of column that includes column family as first part and column name as the second part. - * Column family is optional and hence nullable. - * @return list of {@code PColumn} for fullyQualifiedColumnNames - * @throws SQLException - */ - @Deprecated - private static List getPColumns(PTable table, List> columns) throws SQLException { - List pColumns = new ArrayList(columns.size()); - for (Pair column : columns) { - pColumns.add(getPColumn(table, column.getFirst(), column.getSecond())); - } - return pColumns; + + public boolean isBypassUpgrade() { + return isBypassUpgrade; } - - @Deprecated - private static PColumn getPColumn(PTable table, @Nullable String familyName, String columnName) throws SQLException { - if (table==null) { - throw new SQLException("Table must not be null."); - } - if (columnName==null) { - throw new SQLException("columnName must not be null."); - } - // normalize and remove quotes from family and column names before looking up. - familyName = SchemaUtil.normalizeIdentifier(familyName); - columnName = SchemaUtil.normalizeIdentifier(columnName); - PColumn pColumn = null; - if (familyName != null) { - PColumnFamily family = table.getColumnFamily(familyName); - pColumn = family.getPColumnForColumnName(columnName); - } else { - pColumn = table.getColumnForColumnName(columnName); - } - return pColumn; + + public boolean isMapNamespace() { + return mapNamespace; } - - /** - * @param table table to get the {@code PColumn} for - * @param columns list of pair of column that includes column family as first part and column name as the second part. - * Column family is optional and hence nullable. - * @return list of {@code PColumn} for fullyQualifiedColumnNames - * @throws SQLException - */ - private static List getColumns(PTable table, List> columns) throws SQLException { - List pColumns = new ArrayList(columns.size()); - for (Pair column : columns) { - pColumns.add(getColumn(table, column.getFirst(), column.getSecond())); - } - return pColumns; + + public String getSrcTable() { + return srcTable; } - private static PColumn getColumn(PTable table, @Nullable String familyName, String columnName) throws SQLException { - if (table==null) { - throw new SQLException("Table must not be null."); - } - if (columnName==null) { - throw new SQLException("columnName must not be null."); - } - // normalize and remove quotes from family and column names before looking up. - familyName = SchemaUtil.normalizeIdentifier(familyName); - columnName = SchemaUtil.normalizeIdentifier(columnName); - // Column names are always for the data table, so we must translate them if - // we're dealing with an index table. - if (table.getType() == PTableType.INDEX) { - columnName = IndexUtil.getIndexColumnName(familyName, columnName); - } - PColumn pColumn = null; - if (familyName != null) { - PColumnFamily family = table.getColumnFamily(familyName); - pColumn = family.getPColumnForColumnName(columnName); - } else { - pColumn = table.getColumnForColumnName(columnName); - } - return pColumn; + public boolean isLocalIndexUpgrade() { + return localIndexUpgrade; } - - /** - * Get expression that may be used to evaluate the tenant ID of a given row in a - * multi-tenant table. Both the SYSTEM.CATALOG table and the SYSTEM.SEQUENCE - * table are considered multi-tenant. - * @param conn open Phoenix connection - * @param fullTableName full table name - * @return An expression that may be evaluated for a row in the provided table or - * null if the table is not a multi-tenant table. - * @throws SQLException if the table name is not found, a TableNotFoundException - * is thrown. If a multi-tenant local index is supplied a SQLFeatureNotSupportedException - * is thrown. - */ - public static Expression getTenantIdExpression(Connection conn, String fullTableName) throws SQLException { - PTable table = getTable(conn, fullTableName); - // TODO: consider setting MULTI_TENANT = true for SYSTEM.CATALOG and SYSTEM.SEQUENCE - if (!SchemaUtil.isMetaTable(table) && !SchemaUtil.isSequenceTable(table) && !table.isMultiTenant()) { - return null; - } - return getFirstPKColumnExpression(table); + } + + /** + * Returns the opitmized query plan used by phoenix for executing the sql. + * @param stmt to return the plan for + */ + public static QueryPlan getOptimizedQueryPlan(PreparedStatement stmt) throws SQLException { + checkNotNull(stmt); + QueryPlan plan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery(); + return plan; + } + + /** + * Whether or not the query plan has any order by expressions. + */ + public static boolean hasOrderBy(QueryPlan plan) { + checkNotNull(plan); + List orderBys = plan.getOrderBy().getOrderByExpressions(); + return orderBys != null && !orderBys.isEmpty(); + } + + public static int getLimit(QueryPlan plan) { + checkNotNull(plan); + return plan.getLimit() == null ? 0 : plan.getLimit(); + } + + private static String addQuotes(String str) { + return str == null ? str : "\"" + str + "\""; + } + + /** + * Get the column family, column name pairs that make up the row key of the table that will be + * queried. + * @param conn - connection used to generate the query plan. Caller should take care of closing + * the connection appropriately. + * @param plan - query plan to get info for. + * @return the pairs of column family name and column name columns in the data table that make up + * the row key for the table used in the query plan. Column family names are optional and + * hence the first part of the pair is nullable. Column names and family names are + * enclosed in double quotes to allow for case sensitivity and for presence of special + * characters. Salting column and view index id column are not included. If the connection + * is tenant specific and the table used by the query plan is multi-tenant, then the + * tenant id column is not included as well. + */ + public static List> getPkColsForSql(Connection conn, QueryPlan plan) + throws SQLException { + checkNotNull(plan); + checkNotNull(conn); + List pkColumns = getPkColumns(plan.getTableRef().getTable(), conn); + List> columns = Lists.newArrayListWithExpectedSize(pkColumns.size()); + String columnName; + String familyName; + for (PColumn pCol : pkColumns) { + columnName = addQuotes(pCol.getName().getString()); + familyName = + pCol.getFamilyName() != null ? addQuotes(pCol.getFamilyName().getString()) : null; + columns.add(new Pair(familyName, columnName)); } - - /** - * Get expression that may be used to evaluate to the value of the first - * column of a given row in a Phoenix table. - * @param conn open Phoenix connection - * @param fullTableName full table name - * @return An expression that may be evaluated for a row in the provided table. - * @throws SQLException if the table name is not found, a TableNotFoundException - * is thrown. If a local index is supplied a SQLFeatureNotSupportedException - * is thrown. - */ - public static Expression getFirstPKColumnExpression(Connection conn, String fullTableName) throws SQLException { - PTable table = getTable(conn, fullTableName); - return getFirstPKColumnExpression(table); + return columns; + } + + /** + * @param columns - Initialized empty list to be filled with the pairs of column family name + * and column name for columns that are used as row key for the query plan. + * Column family names are optional and hence the first part of the pair is + * nullable. Column names and family names are enclosed in double quotes to + * allow for case sensitivity and for presence of special characters. Salting + * column and view index id column are not included. If the connection is + * tenant specific and the table used by the query plan is multi-tenant, then + * the tenant id column is not included as well. + * @param plan - query plan to get info for. + * @param conn - connection used to generate the query plan. Caller should take care of + * closing the connection appropriately. + * @param forDataTable - if true, then family names and column names correspond to the data table + * even if the query plan uses the secondary index table. If false, and if the + * query plan uses the secondary index table, then the family names and column + * names correspond to the index table. + */ + @Deprecated + public static void getPkColsForSql(List> columns, QueryPlan plan, + Connection conn, boolean forDataTable) throws SQLException { + checkNotNull(columns); + checkNotNull(plan); + checkNotNull(conn); + List pkColumns = getPkColumns(plan.getTableRef().getTable(), conn, forDataTable); + String columnName; + String familyName; + for (PColumn pCol : pkColumns) { + columnName = addQuotes(pCol.getName().getString()); + familyName = + pCol.getFamilyName() != null ? addQuotes(pCol.getFamilyName().getString()) : null; + columns.add(new Pair(familyName, columnName)); } - - private static Expression getFirstPKColumnExpression(PTable table) throws SQLException { - if (table.getIndexType() == IndexType.LOCAL) { - /* - * With some hackery, we could deduce the tenant ID from a multi-tenant local index, - * however it's not clear that we'd want to maintain the same prefixing of the region - * start key, as the region boundaries may end up being different on a cluster being - * replicated/backed-up to (which is the use case driving the method). - */ - throw new SQLFeatureNotSupportedException(); - } - - // skip salt and viewIndexId columns. - int pkPosition = (table.getBucketNum() == null ? 0 : 1) + (table.getViewIndexId() == null ? 0 : 1); - List pkColumns = table.getPKColumns(); - return new RowKeyColumnExpression(pkColumns.get(pkPosition), new RowKeyValueAccessor(pkColumns, pkPosition)); + } + + /** + * @param columns - Initialized empty list to be filled with the pairs of column family name + * and column name for columns that are used as row key for the query plan. + * Column family names are optional and hence the first part of the pair is + * nullable. Column names and family names are enclosed in double quotes to + * allow for case sensitivity and for presence of special characters. Salting + * column and view index id column are not included. If the connection is + * tenant specific and the table used by the query plan is multi-tenant, then + * the tenant id column is not included as well. + * @param dataTypes - Initialized empty list to be filled with the corresponding data type for + * the columns in @param columns. + * @param plan - query plan to get info for + * @param conn - phoenix connection used to generate the query plan. Caller should take + * care of closing the connection appropriately. + * @param forDataTable - if true, then column names and data types correspond to the data table + * even if the query plan uses the secondary index table. If false, and if the + * query plan uses the secondary index table, then the column names and data + * types correspond to the index table. + */ + @Deprecated + public static void getPkColsDataTypesForSql(List> columns, + List dataTypes, QueryPlan plan, Connection conn, boolean forDataTable) + throws SQLException { + checkNotNull(columns); + checkNotNull(dataTypes); + checkNotNull(plan); + checkNotNull(conn); + List pkColumns = getPkColumns(plan.getTableRef().getTable(), conn, forDataTable); + String columnName; + String familyName; + for (PColumn pCol : pkColumns) { + String sqlTypeName = getSqlTypeName(pCol); + dataTypes.add(sqlTypeName); + columnName = addQuotes(pCol.getName().getString()); + familyName = + pCol.getFamilyName() != null ? addQuotes(pCol.getFamilyName().getString()) : null; + columns.add(new Pair(familyName, columnName)); } - - /** - * Exposes the various internal phoenix metrics collected at the client JVM level. - */ - public static Collection getGlobalPhoenixClientMetrics() { - return GlobalClientMetrics.getMetrics(); + } + + /** Returns sql type name that could be used in DDL statements, dynamic column types etc. */ + public static String getSqlTypeName(PColumn pCol) { + PDataType dataType = pCol.getDataType(); + Integer maxLength = pCol.getMaxLength(); + Integer scale = pCol.getScale(); + return getSqlTypeName(dataType, maxLength, scale); + } + + public static String getSqlTypeName(PDataType dataType, Integer maxLength, Integer scale) { + return dataType.isArrayType() + ? getArraySqlTypeName(maxLength, scale, dataType) + : appendMaxLengthAndScale(maxLength, scale, dataType.getSqlTypeName()); + } + + public static String getArraySqlTypeName(@Nullable Integer maxLength, @Nullable Integer scale, + PDataType arrayType) { + String baseTypeSqlName = PDataType.arrayBaseType(arrayType).getSqlTypeName(); + return appendMaxLengthAndScale(maxLength, scale, baseTypeSqlName) + " " + ARRAY_TYPE_SUFFIX; // for + // ex + // - + // decimal(10,2) + // ARRAY + } + + private static String appendMaxLengthAndScale(@Nullable Integer maxLength, + @Nullable Integer scale, String sqlTypeName) { + if (maxLength != null) { + sqlTypeName = sqlTypeName + "(" + maxLength; + if (scale != null) { + sqlTypeName = sqlTypeName + "," + scale; // has both max length and scale. For ex- + // decimal(10,2) + } + sqlTypeName = sqlTypeName + ")"; } - - /** - * This function will be called mainly in Metric Publisher methods. - * Its the only way Metric publisher will be able to access the metrics in phoenix system. - * @return map of TableName to List of GlobalMetric's. - */ - public static Map> getPhoenixTableClientMetrics() { - return TableMetricsManager.getTableMetricsMethod(); + return sqlTypeName; + } + + @Deprecated + private static List getPkColumns(PTable ptable, Connection conn, boolean forDataTable) + throws SQLException { + PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); + List pkColumns = ptable.getPKColumns(); + + // Skip the salting column and the view index id column if present. + // Skip the tenant id column too if the connection is tenant specific and the table used by the + // query plan is multi-tenant + int offset = (ptable.getBucketNum() == null ? 0 : 1) + + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + + (ptable.getViewIndexId() == null ? 0 : 1); + + // get a sublist of pkColumns by skipping the offset columns. + pkColumns = pkColumns.subList(offset, pkColumns.size()); + + if (ptable.getType() == PTableType.INDEX && forDataTable) { + // index tables have the same schema name as their parent/data tables. + String fullDataTableName = ptable.getParentName().getString(); + + // Get the corresponding columns of the data table. + List dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn); + pkColumns = dataColumns; } - - public static Map> getLatencyHistograms() { - return TableMetricsManager.getLatencyHistogramsForAllTables(); + return pkColumns; + } + + private static List getPkColumns(PTable ptable, Connection conn) throws SQLException { + PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); + List pkColumns = ptable.getPKColumns(); + + // Skip the salting column and the view index id column if present. + // Skip the tenant id column too if the connection is tenant specific and the table used by the + // query plan is multi-tenant + int offset = (ptable.getBucketNum() == null ? 0 : 1) + + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + + (ptable.getViewIndexId() == null ? 0 : 1); + + // get a sublist of pkColumns by skipping the offset columns. + pkColumns = pkColumns.subList(offset, pkColumns.size()); + + if (ptable.getType() == PTableType.INDEX) { + // index tables have the same schema name as their parent/data tables. + String fullDataTableName = ptable.getParentName().getString(); + + // Get the corresponding columns of the data table. + List dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn); + pkColumns = dataColumns; } - - public static Map> getSizeHistograms() { - return TableMetricsManager.getSizeHistogramsForAllTables(); + return pkColumns; + } + + /** + * @param conn connection that was used for reading/generating value. + * @param fullTableName fully qualified table name + * @param values values of the columns + * @param columns list of pair of column that includes column family as first part and + * column name as the second part. Column family is optional and hence + * nullable. Columns in the list have to be in the same order as the order of + * occurence of their values in the object array. + * @return values encoded in a byte array + * @see decodeValues(Connection, String, byte[], List) + */ + @Deprecated + public static byte[] encodeValues(Connection conn, String fullTableName, Object[] values, + List> columns) throws SQLException { + PTable table = getTable(conn, fullTableName); + List pColumns = getPColumns(table, columns); + List expressions = new ArrayList(pColumns.size()); + int i = 0; + for (PColumn col : pColumns) { + Object value = values[i]; + // for purposes of encoding, sort order of the columns doesn't matter. + Expression expr = + LiteralExpression.newConstant(value, col.getDataType(), col.getMaxLength(), col.getScale()); + expressions.add(expr); + i++; } - - public static Map> getAllConnectionQueryServicesHistograms() { - return ConnectionQueryServicesMetricsManager.getHistogramsForAllConnectionQueryServices(); + KeyValueSchema kvSchema = buildKeyValueSchema(pColumns); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema); + return kvSchema.toBytes(expressions.toArray(new Expression[0]), valueSet, ptr); + } + + /** + * @param conn connection that was used for reading/generating value. + * @param fullTableName fully qualified table name + * @param value byte value of the columns concatenated as a single byte array. @see + * {@link #encodeColumnValues(Connection, String, Object[], List)} + * @param columns list of column names for the columns that have their respective values + * present in the byte array. The column names should be in the same order as + * their values are in the byte array. The column name includes both family + * name, if present, and column name. + * @return decoded values for each column + */ + @Deprecated + public static Object[] decodeValues(Connection conn, String fullTableName, byte[] value, + List> columns) throws SQLException { + PTable table = getTable(conn, fullTableName); + KeyValueSchema kvSchema = buildKeyValueSchema(getPColumns(table, columns)); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(value); + ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema); + valueSet.clear(); + valueSet.or(ptr); + int maxOffset = ptr.getOffset() + ptr.getLength(); + Boolean hasValue; + kvSchema.iterator(ptr); + int i = 0; + List values = new ArrayList(); + while (hasValue = kvSchema.next(ptr, i, maxOffset, valueSet) != null) { + if (hasValue) { + values.add(kvSchema.getField(i).getDataType().toObject(ptr)); + } + i++; } - - public static Map> getAllConnectionQueryServicesCounters() { - return ConnectionQueryServicesMetricsManager.getAllConnectionQueryServicesMetrics(); + return values.toArray(); + } + + /** + * @param conn connection that was used for reading/generating value. + * @param fullTableName fully qualified table name + * @param values values of the columns + * @param columns list of pair of column that includes column family as first part and + * column name as the second part. Column family is optional and hence + * nullable. Columns in the list have to be in the same order as the order of + * occurence of their values in the object array. + * @return values encoded in a byte array + * @see decodeValues(Connection, String, byte[], List) + */ + public static byte[] encodeColumnValues(Connection conn, String fullTableName, Object[] values, + List> columns) throws SQLException { + PTable table = getTable(conn, fullTableName); + List pColumns = getColumns(table, columns); + List expressions = new ArrayList(pColumns.size()); + int i = 0; + for (PColumn col : pColumns) { + Object value = values[i]; + // for purposes of encoding, sort order of the columns doesn't matter. + Expression expr = + LiteralExpression.newConstant(value, col.getDataType(), col.getMaxLength(), col.getScale()); + expressions.add(expr); + i++; } - - /** - * This is only used in testcases to reset the connection query services Metrics data - */ - @VisibleForTesting - public static void clearAllConnectionQueryServiceMetrics() { - ConnectionQueryServicesMetricsManager.clearAllConnectionQueryServiceMetrics(); + KeyValueSchema kvSchema = buildKeyValueSchema(pColumns); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema); + return kvSchema.toBytes(expressions.toArray(new Expression[0]), valueSet, ptr); + } + + /** + * @param conn connection that was used for reading/generating value. + * @param fullTableName fully qualified table name + * @param value byte value of the columns concatenated as a single byte array. @see + * {@link #encodeColumnValues(Connection, String, Object[], List)} + * @param columns list of column names for the columns that have their respective values + * present in the byte array. The column names should be in the same order as + * their values are in the byte array. The column name includes both family + * name, if present, and column name. + * @return decoded values for each column + */ + public static Object[] decodeColumnValues(Connection conn, String fullTableName, byte[] value, + List> columns) throws SQLException { + PTable table = getTable(conn, fullTableName); + KeyValueSchema kvSchema = buildKeyValueSchema(getColumns(table, columns)); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(value); + ValueBitSet valueSet = ValueBitSet.newInstance(kvSchema); + valueSet.clear(); + valueSet.or(ptr); + int maxOffset = ptr.getOffset() + ptr.getLength(); + Boolean hasValue; + kvSchema.iterator(ptr); + int i = 0; + List values = new ArrayList(); + while (hasValue = kvSchema.next(ptr, i, maxOffset, valueSet) != null) { + if (hasValue) { + values.add(kvSchema.getField(i).getDataType().toObject(ptr)); + } + i++; } + return values.toArray(); + } - /** - * This is only used in testcases to reset the tableLevel Metrics data - */ - @VisibleForTesting - public static void clearTableLevelMetrics(){ - TableMetricsManager.clearTableLevelMetricsMethod(); - } - - /** - * - * @return whether or not the global client metrics are being collected - */ - public static boolean areGlobalClientMetricsBeingCollected() { - return GlobalClientMetrics.isMetricsEnabled(); + public static KeyValueSchema buildKeyValueSchema(List columns) { + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(getMinNullableIndex(columns)); + for (PColumn col : columns) { + builder.addField(col); } - - private static Map createMetricMap(Map metricInfoMap) { - Map metricMap = Maps.newHashMapWithExpectedSize(metricInfoMap.size()); - for (Entry entry : metricInfoMap.entrySet()) { - metricMap.put(entry.getKey().shortName(), entry.getValue()); - } - return metricMap; - } - - private static Map> transformMetrics(Map> metricMap) { - Function, Map> func = new Function, Map>() { - @Override - public Map apply(Map map) { - return createMetricMap(map); - } - }; - return Maps.transformValues(metricMap, func); - } - - /** - * Method to expose the metrics associated with performing reads using the passed result set. A typical pattern is: - * - *
-     * {@code
-     * Map> overAllQueryMetrics = null;
-     * Map> requestReadMetrics = null;
-     * try (ResultSet rs = stmt.executeQuery()) {
-     *    while(rs.next()) {
-     *      .....
-     *    }
-     *    overAllQueryMetrics = PhoenixRuntime.getOverAllReadRequestMetrics(rs);
-     *    requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs);
-     *    PhoenixRuntime.resetMetrics(rs);
-     * }
-     * }
-     * 
- * - * @param rs - * result set to get the metrics for - * @return a map of {@code (table name) -> (map of (metric name) -> (metric value)) } - * @throws SQLException - */ - public static Map> getRequestReadMetricInfo(ResultSet rs) throws SQLException { - PhoenixMonitoredResultSet - resultSet = rs.unwrap(PhoenixMonitoredResultSet - .class); - return resultSet.getReadMetrics(); + return builder.build(); + } + + private static int getMinNullableIndex(List columns) { + int minNullableIndex = columns.size(); + for (int i = 0; i < columns.size(); i++) { + if (columns.get(i).isNullable()) { + minNullableIndex = i; + break; + } } - - @Deprecated - // use getRequestReadMetricInfo - public static Map> getRequestReadMetrics(ResultSet rs) throws SQLException { - return transformMetrics(getRequestReadMetricInfo(rs)); + return minNullableIndex; + } + + /** + * @param table table to get the {@code PColumn} for + * @param columns list of pair of column that includes column family as first part and column name + * as the second part. Column family is optional and hence nullable. + * @return list of {@code PColumn} for fullyQualifiedColumnNames + */ + @Deprecated + private static List getPColumns(PTable table, List> columns) + throws SQLException { + List pColumns = new ArrayList(columns.size()); + for (Pair column : columns) { + pColumns.add(getPColumn(table, column.getFirst(), column.getSecond())); } - - /** - * Method to expose the overall metrics associated with executing a query via phoenix. A typical pattern of - * accessing request level read metrics and overall read query metrics is: - * - *
-     * {@code
-     * Map> overAllQueryMetrics = null;
-     * Map> requestReadMetrics = null;
-     * try (ResultSet rs = stmt.executeQuery()) {
-     *    while(rs.next()) {
-     *      .....
-     *    }
-     *    overAllQueryMetrics = PhoenixRuntime.getOverAllReadRequestMetrics(rs);
-     *    requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs);
-     *    PhoenixRuntime.resetMetrics(rs);
-     * }
-     * }
-     * 
- * - * @param rs - * result set to get the metrics for - * @return a map of {@code metric name -> metric value } - * @throws SQLException - */ - public static Map getOverAllReadRequestMetricInfo(ResultSet rs) throws SQLException { - PhoenixMonitoredResultSet - resultSet = rs.unwrap(PhoenixMonitoredResultSet.class); - return resultSet.getOverAllRequestReadMetrics(); + return pColumns; + } + + @Deprecated + private static PColumn getPColumn(PTable table, @Nullable String familyName, String columnName) + throws SQLException { + if (table == null) { + throw new SQLException("Table must not be null."); } - - @Deprecated - // use getOverAllReadRequestMetricInfo - public static Map getOverAllReadRequestMetrics(ResultSet rs) throws SQLException { - return createMetricMap(getOverAllReadRequestMetricInfo(rs)); + if (columnName == null) { + throw new SQLException("columnName must not be null."); } - - /** - * Method to expose the metrics associated with sending over mutations to HBase. These metrics are updated when - * commit is called on the passed connection. Mutation metrics are accumulated for the connection till - * {@link #resetMetrics(Connection)} is called or the connection is closed. Example usage: - * - *
-     * {@code
-     * Map> mutationWriteMetrics = null;
-     * Map> mutationReadMetrics = null;
-     * try (Connection conn = DriverManager.getConnection(url)) {
-     *    conn.createStatement.executeUpdate(dml1);
-     *    ....
-     *    conn.createStatement.executeUpdate(dml2);
-     *    ...
-     *    conn.createStatement.executeUpdate(dml3);
-     *    ...
-     *    conn.commit();
-     *    mutationWriteMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
-     *    mutationReadMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
-     *    PhoenixRuntime.resetMetrics(rs);
-     * }
-     * }
-     * 
- * - * @param conn - * connection to get the metrics for - * @return a map of {@code (table name) -> (map of (metric name) -> (metric value)) } - * @throws SQLException - */ - public static Map> getWriteMetricInfoForMutationsSinceLastReset(Connection conn) throws SQLException { - PhoenixMonitoredConnection - pConn = conn.unwrap(PhoenixMonitoredConnection.class); - return pConn.getMutationMetrics(); + // normalize and remove quotes from family and column names before looking up. + familyName = SchemaUtil.normalizeIdentifier(familyName); + columnName = SchemaUtil.normalizeIdentifier(columnName); + PColumn pColumn = null; + if (familyName != null) { + PColumnFamily family = table.getColumnFamily(familyName); + pColumn = family.getPColumnForColumnName(columnName); + } else { + pColumn = table.getColumnForColumnName(columnName); } - - @Deprecated - // use getWriteMetricInfoForMutationsSinceLastReset - public static Map> getWriteMetricsForMutationsSinceLastReset(Connection conn) throws SQLException { - return transformMetrics(getWriteMetricInfoForMutationsSinceLastReset(conn)); + return pColumn; + } + + /** + * @param table table to get the {@code PColumn} for + * @param columns list of pair of column that includes column family as first part and column name + * as the second part. Column family is optional and hence nullable. + * @return list of {@code PColumn} for fullyQualifiedColumnNames + */ + private static List getColumns(PTable table, List> columns) + throws SQLException { + List pColumns = new ArrayList(columns.size()); + for (Pair column : columns) { + pColumns.add(getColumn(table, column.getFirst(), column.getSecond())); } + return pColumns; + } - /** - * Method to expose the read metrics associated with executing a dml statement. These metrics are updated when - * commit is called on the passed connection. Read metrics are accumulated till {@link #resetMetrics(Connection)} is - * called or the connection is closed. Example usage: - * - *
-     * {@code
-     * Map> mutationWriteMetrics = null;
-     * Map> mutationReadMetrics = null;
-     * try (Connection conn = DriverManager.getConnection(url)) {
-     *    conn.createStatement.executeUpdate(dml1);
-     *    ....
-     *    conn.createStatement.executeUpdate(dml2);
-     *    ...
-     *    conn.createStatement.executeUpdate(dml3);
-     *    ...
-     *    conn.commit();
-     *    mutationWriteMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
-     *    mutationReadMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
-     *    PhoenixRuntime.resetMetrics(rs);
-     * }
-     * }
-     * 
- * @param conn - * connection to get the metrics for - * @return a map of {@code (table name) -> (map of (metric name) -> (metric value)) } - * @throws SQLException - */ - public static Map> getReadMetricInfoForMutationsSinceLastReset(Connection conn) throws SQLException { - PhoenixMonitoredConnection pConn = conn.unwrap(PhoenixMonitoredConnection.class); - return pConn.getReadMetrics(); + private static PColumn getColumn(PTable table, @Nullable String familyName, String columnName) + throws SQLException { + if (table == null) { + throw new SQLException("Table must not be null."); } - - @Deprecated - // use getReadMetricInfoForMutationsSinceLastReset - public static Map> getReadMetricsForMutationsSinceLastReset(Connection conn) throws SQLException { - return transformMetrics(getReadMetricInfoForMutationsSinceLastReset(conn)); + if (columnName == null) { + throw new SQLException("columnName must not be null."); } - - /** - * Reset the read metrics collected in the result set. - * - * @see #getRequestReadMetrics(ResultSet) #getOverAllReadRequestMetrics(ResultSet) - * @param rs - * @throws SQLException - */ - public static void resetMetrics(ResultSet rs) throws SQLException { - PhoenixMonitoredResultSet - prs = rs.unwrap(PhoenixMonitoredResultSet.class); - prs.resetMetrics(); + // normalize and remove quotes from family and column names before looking up. + familyName = SchemaUtil.normalizeIdentifier(familyName); + columnName = SchemaUtil.normalizeIdentifier(columnName); + // Column names are always for the data table, so we must translate them if + // we're dealing with an index table. + if (table.getType() == PTableType.INDEX) { + columnName = IndexUtil.getIndexColumnName(familyName, columnName); } - - /** - * Reset the mutation and reads-for-mutations metrics collected in the connection. - * - * @see #getReadMetricsForMutationsSinceLastReset(Connection) #getWriteMetricsForMutationsSinceLastReset(Connection) - * @param conn - * @throws SQLException - */ - public static void resetMetrics(Connection conn) throws SQLException { - PhoenixMonitoredConnection - pConn = conn.unwrap(PhoenixMonitoredConnection.class); - pConn.clearMetrics(); + PColumn pColumn = null; + if (familyName != null) { + PColumnFamily family = table.getColumnFamily(familyName); + pColumn = family.getPColumnForColumnName(columnName); + } else { + pColumn = table.getColumnForColumnName(columnName); } - - /** - * Use this utility function to ensure that a timestamp is in milliseconds across transactional and - * non transactional tables. This expects that the Cell timestamp is based either on wall clock - * time or transaction manager nanos wall clock time. - * @param tsOfCell Cell time stamp to be converted. - * @return wall clock time in milliseconds (i.e. Epoch time) of a given Cell time stamp. - */ - public static long getWallClockTimeFromCellTimeStamp(long tsOfCell) { - return TransactionUtil.isTransactionalTimestamp(tsOfCell) ? TransactionUtil.convertToMilliseconds(tsOfCell) : tsOfCell; + return pColumn; + } + + /** + * Get expression that may be used to evaluate the tenant ID of a given row in a multi-tenant + * table. Both the SYSTEM.CATALOG table and the SYSTEM.SEQUENCE table are considered multi-tenant. + * @param conn open Phoenix connection + * @param fullTableName full table name + * @return An expression that may be evaluated for a row in the provided table or null if the + * table is not a multi-tenant table. + * @throws SQLException if the table name is not found, a TableNotFoundException is thrown. If a + * multi-tenant local index is supplied a SQLFeatureNotSupportedException is + * thrown. + */ + public static Expression getTenantIdExpression(Connection conn, String fullTableName) + throws SQLException { + PTable table = getTable(conn, fullTableName); + // TODO: consider setting MULTI_TENANT = true for SYSTEM.CATALOG and SYSTEM.SEQUENCE + if ( + !SchemaUtil.isMetaTable(table) && !SchemaUtil.isSequenceTable(table) && !table.isMultiTenant() + ) { + return null; + } + return getFirstPKColumnExpression(table); + } + + /** + * Get expression that may be used to evaluate to the value of the first column of a given row in + * a Phoenix table. + * @param conn open Phoenix connection + * @param fullTableName full table name + * @return An expression that may be evaluated for a row in the provided table. + * @throws SQLException if the table name is not found, a TableNotFoundException is thrown. If a + * local index is supplied a SQLFeatureNotSupportedException is thrown. + */ + public static Expression getFirstPKColumnExpression(Connection conn, String fullTableName) + throws SQLException { + PTable table = getTable(conn, fullTableName); + return getFirstPKColumnExpression(table); + } + + private static Expression getFirstPKColumnExpression(PTable table) throws SQLException { + if (table.getIndexType() == IndexType.LOCAL) { + /* + * With some hackery, we could deduce the tenant ID from a multi-tenant local index, however + * it's not clear that we'd want to maintain the same prefixing of the region start key, as + * the region boundaries may end up being different on a cluster being replicated/backed-up to + * (which is the use case driving the method). + */ + throw new SQLFeatureNotSupportedException(); } - public static long getCurrentScn(ReadOnlyProps props) { - String scn = props.get(CURRENT_SCN_ATTRIB); - return scn != null ? Long.parseLong(scn) : HConstants.LATEST_TIMESTAMP; + // skip salt and viewIndexId columns. + int pkPosition = + (table.getBucketNum() == null ? 0 : 1) + (table.getViewIndexId() == null ? 0 : 1); + List pkColumns = table.getPKColumns(); + return new RowKeyColumnExpression(pkColumns.get(pkPosition), + new RowKeyValueAccessor(pkColumns, pkPosition)); + } + + /** + * Exposes the various internal phoenix metrics collected at the client JVM level. + */ + public static Collection getGlobalPhoenixClientMetrics() { + return GlobalClientMetrics.getMetrics(); + } + + /** + * This function will be called mainly in Metric Publisher methods. Its the only way Metric + * publisher will be able to access the metrics in phoenix system. + * @return map of TableName to List of GlobalMetric's. + */ + public static Map> getPhoenixTableClientMetrics() { + return TableMetricsManager.getTableMetricsMethod(); + } + + public static Map> getLatencyHistograms() { + return TableMetricsManager.getLatencyHistogramsForAllTables(); + } + + public static Map> getSizeHistograms() { + return TableMetricsManager.getSizeHistogramsForAllTables(); + } + + public static Map> getAllConnectionQueryServicesHistograms() { + return ConnectionQueryServicesMetricsManager.getHistogramsForAllConnectionQueryServices(); + } + + public static Map> + getAllConnectionQueryServicesCounters() { + return ConnectionQueryServicesMetricsManager.getAllConnectionQueryServicesMetrics(); + } + + /** + * This is only used in testcases to reset the connection query services Metrics data + */ + @VisibleForTesting + public static void clearAllConnectionQueryServiceMetrics() { + ConnectionQueryServicesMetricsManager.clearAllConnectionQueryServiceMetrics(); + } + + /** + * This is only used in testcases to reset the tableLevel Metrics data + */ + @VisibleForTesting + public static void clearTableLevelMetrics() { + TableMetricsManager.clearTableLevelMetricsMethod(); + } + + /** Returns whether or not the global client metrics are being collected */ + public static boolean areGlobalClientMetricsBeingCollected() { + return GlobalClientMetrics.isMetricsEnabled(); + } + + private static Map createMetricMap(Map metricInfoMap) { + Map metricMap = Maps.newHashMapWithExpectedSize(metricInfoMap.size()); + for (Entry entry : metricInfoMap.entrySet()) { + metricMap.put(entry.getKey().shortName(), entry.getValue()); } - } + return metricMap; + } + + private static Map> + transformMetrics(Map> metricMap) { + Function, Map> func = + new Function, Map>() { + @Override + public Map apply(Map map) { + return createMetricMap(map); + } + }; + return Maps.transformValues(metricMap, func); + } + + /** + * Method to expose the metrics associated with performing reads using the passed result set. A + * typical pattern is: + * + *
+   * {@code
+   * Map> overAllQueryMetrics = null;
+   * Map> requestReadMetrics = null;
+   * try (ResultSet rs = stmt.executeQuery()) {
+   *    while(rs.next()) {
+   *      .....
+   *    }
+   *    overAllQueryMetrics = PhoenixRuntime.getOverAllReadRequestMetrics(rs);
+   *    requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs);
+   *    PhoenixRuntime.resetMetrics(rs);
+   * }
+   * }
+   * 
+ * + * result set to get the metrics for + * @return a map of {@code (table name) -> (map of (metric name) -> (metric value)) } + */ + public static Map> getRequestReadMetricInfo(ResultSet rs) + throws SQLException { + PhoenixMonitoredResultSet resultSet = rs.unwrap(PhoenixMonitoredResultSet.class); + return resultSet.getReadMetrics(); + } + + @Deprecated + // use getRequestReadMetricInfo + public static Map> getRequestReadMetrics(ResultSet rs) + throws SQLException { + return transformMetrics(getRequestReadMetricInfo(rs)); + } + + /** + * Method to expose the overall metrics associated with executing a query via phoenix. A typical + * pattern of accessing request level read metrics and overall read query metrics is: + * + *
+   * {@code
+   * Map> overAllQueryMetrics = null;
+   * Map> requestReadMetrics = null;
+   * try (ResultSet rs = stmt.executeQuery()) {
+   *    while(rs.next()) {
+   *      .....
+   *    }
+   *    overAllQueryMetrics = PhoenixRuntime.getOverAllReadRequestMetrics(rs);
+   *    requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs);
+   *    PhoenixRuntime.resetMetrics(rs);
+   * }
+   * }
+   * 
+ * + * result set to get the metrics for + * @return a map of {@code metric name -> metric value } + */ + public static Map getOverAllReadRequestMetricInfo(ResultSet rs) + throws SQLException { + PhoenixMonitoredResultSet resultSet = rs.unwrap(PhoenixMonitoredResultSet.class); + return resultSet.getOverAllRequestReadMetrics(); + } + + @Deprecated + // use getOverAllReadRequestMetricInfo + public static Map getOverAllReadRequestMetrics(ResultSet rs) throws SQLException { + return createMetricMap(getOverAllReadRequestMetricInfo(rs)); + } + + /** + * Method to expose the metrics associated with sending over mutations to HBase. These metrics are + * updated when commit is called on the passed connection. Mutation metrics are accumulated for + * the connection till {@link #resetMetrics(Connection)} is called or the connection is closed. + * Example usage: + * + *
+   * {@code
+   * Map> mutationWriteMetrics = null;
+   * Map> mutationReadMetrics = null;
+   * try (Connection conn = DriverManager.getConnection(url)) {
+   *    conn.createStatement.executeUpdate(dml1);
+   *    ....
+   *    conn.createStatement.executeUpdate(dml2);
+   *    ...
+   *    conn.createStatement.executeUpdate(dml3);
+   *    ...
+   *    conn.commit();
+   *    mutationWriteMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+   *    mutationReadMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
+   *    PhoenixRuntime.resetMetrics(rs);
+   * }
+   * }
+   * 
+ * + * connection to get the metrics for + * @return a map of {@code (table name) -> (map of (metric name) -> (metric value)) } + */ + public static Map> + getWriteMetricInfoForMutationsSinceLastReset(Connection conn) throws SQLException { + PhoenixMonitoredConnection pConn = conn.unwrap(PhoenixMonitoredConnection.class); + return pConn.getMutationMetrics(); + } + + @Deprecated + // use getWriteMetricInfoForMutationsSinceLastReset + public static Map> + getWriteMetricsForMutationsSinceLastReset(Connection conn) throws SQLException { + return transformMetrics(getWriteMetricInfoForMutationsSinceLastReset(conn)); + } + + /** + * Method to expose the read metrics associated with executing a dml statement. These metrics are + * updated when commit is called on the passed connection. Read metrics are accumulated till + * {@link #resetMetrics(Connection)} is called or the connection is closed. Example usage: + * + *
+   * {@code
+   * Map> mutationWriteMetrics = null;
+   * Map> mutationReadMetrics = null;
+   * try (Connection conn = DriverManager.getConnection(url)) {
+   *    conn.createStatement.executeUpdate(dml1);
+   *    ....
+   *    conn.createStatement.executeUpdate(dml2);
+   *    ...
+   *    conn.createStatement.executeUpdate(dml3);
+   *    ...
+   *    conn.commit();
+   *    mutationWriteMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+   *    mutationReadMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
+   *    PhoenixRuntime.resetMetrics(rs);
+   * }
+   * }
+   * 
+ * + * connection to get the metrics for + * @return a map of {@code (table name) -> (map of (metric name) -> (metric value)) } + */ + public static Map> + getReadMetricInfoForMutationsSinceLastReset(Connection conn) throws SQLException { + PhoenixMonitoredConnection pConn = conn.unwrap(PhoenixMonitoredConnection.class); + return pConn.getReadMetrics(); + } + + @Deprecated + // use getReadMetricInfoForMutationsSinceLastReset + public static Map> + getReadMetricsForMutationsSinceLastReset(Connection conn) throws SQLException { + return transformMetrics(getReadMetricInfoForMutationsSinceLastReset(conn)); + } + + /** + * Reset the read metrics collected in the result set. + * @see #getRequestReadMetrics(ResultSet) #getOverAllReadRequestMetrics(ResultSet) + */ + public static void resetMetrics(ResultSet rs) throws SQLException { + PhoenixMonitoredResultSet prs = rs.unwrap(PhoenixMonitoredResultSet.class); + prs.resetMetrics(); + } + + /** + * Reset the mutation and reads-for-mutations metrics collected in the connection. + * @see #getReadMetricsForMutationsSinceLastReset(Connection) + * #getWriteMetricsForMutationsSinceLastReset(Connection) + */ + public static void resetMetrics(Connection conn) throws SQLException { + PhoenixMonitoredConnection pConn = conn.unwrap(PhoenixMonitoredConnection.class); + pConn.clearMetrics(); + } + + /** + * Use this utility function to ensure that a timestamp is in milliseconds across transactional + * and non transactional tables. This expects that the Cell timestamp is based either on wall + * clock time or transaction manager nanos wall clock time. + * @param tsOfCell Cell time stamp to be converted. + * @return wall clock time in milliseconds (i.e. Epoch time) of a given Cell time stamp. + */ + public static long getWallClockTimeFromCellTimeStamp(long tsOfCell) { + return TransactionUtil.isTransactionalTimestamp(tsOfCell) + ? TransactionUtil.convertToMilliseconds(tsOfCell) + : tsOfCell; + } + + public static long getCurrentScn(ReadOnlyProps props) { + String scn = props.get(CURRENT_SCN_ATTRIB); + return scn != null ? Long.parseLong(scn) : HConstants.LATEST_TIMESTAMP; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixStopWatch.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixStopWatch.java index 291a19ca93e..7baabbfcc75 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixStopWatch.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PhoenixStopWatch.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,64 +18,64 @@ package org.apache.phoenix.util; /** - * Bare-bones implementation of a stop watch that only measures time in milliseconds. If you want to be fancy then - * please use the guava Stopwatch. However, be warned that the Guava's Stopwatch is a beta class and is subject to - * incompatible changes and removal. So save the future upgrade pain and use this class instead. + * Bare-bones implementation of a stop watch that only measures time in milliseconds. If you want to + * be fancy then please use the guava Stopwatch. However, be warned that the Guava's Stopwatch is a + * beta class and is subject to incompatible changes and removal. So save the future upgrade pain + * and use this class instead. */ public class PhoenixStopWatch { - private boolean isRunning; - private long startTime; - private long elapsedTimeMs; + private boolean isRunning; + private long startTime; + private long elapsedTimeMs; - /** - * Creates a new stop watch without starting it. - */ - public PhoenixStopWatch() {} + /** + * Creates a new stop watch without starting it. + */ + public PhoenixStopWatch() { + } - /** - * Starts the stopwatch. - * - * @return this {@code PhoenixStopWatch} instance - * @throws IllegalStateException - * if the stopwatch is already running. - */ - public PhoenixStopWatch start() { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - if (isRunning) { throw new IllegalStateException("Watch is already running"); } - startTime = currentTime; - isRunning = true; - return this; + /** + * Starts the stopwatch. + * @return this {@code PhoenixStopWatch} instance if the stopwatch is already running. + */ + public PhoenixStopWatch start() { + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + if (isRunning) { + throw new IllegalStateException("Watch is already running"); } + startTime = currentTime; + isRunning = true; + return this; + } - /** - * Stops the stopwatch. Future calls to {@link #elapsedMillis()} will return the fixed duration that had elapsed up - * to this point. - * - * @return this {@code PhoenixStopWatch} instance - * @throws IllegalStateException - * if the stopwatch is already stopped. - */ - public PhoenixStopWatch stop() { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - if (!isRunning) { throw new IllegalStateException("Watch wasn't started"); } - elapsedTimeMs = currentTime - startTime; - startTime = 0; - isRunning = false; - return this; + /** + * Stops the stopwatch. Future calls to {@link #elapsedMillis()} will return the fixed duration + * that had elapsed up to this point. + * @return this {@code PhoenixStopWatch} instance if the stopwatch is already stopped. + */ + public PhoenixStopWatch stop() { + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + if (!isRunning) { + throw new IllegalStateException("Watch wasn't started"); } + elapsedTimeMs = currentTime - startTime; + startTime = 0; + isRunning = false; + return this; + } - /** - * Returns the current elapsed time shown on this stopwatch, expressed in milliseconds. - */ - public long elapsedMillis() { - return elapsedTimeMs; - } + /** + * Returns the current elapsed time shown on this stopwatch, expressed in milliseconds. + */ + public long elapsedMillis() { + return elapsedTimeMs; + } - /** - * Returns {@code true} if {@link #start()} has been called on this stopwatch, and {@link #stop()} has not been - * called since the last call to {@code start()}. - */ - public boolean isRunning() { - return isRunning; - } + /** + * Returns {@code true} if {@link #start()} has been called on this stopwatch, and {@link #stop()} + * has not been called since the last call to {@code start()}. + */ + public boolean isRunning() { + return isRunning; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteCodec.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteCodec.java index 18d2c88940a..f3148b9850b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteCodec.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteCodec.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,57 +27,61 @@ import java.util.List; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; public class PrefixByteCodec { - - public static List decodeBytes(ImmutableBytesWritable encodedBytes, int maxLength) throws IOException { - ByteArrayInputStream stream = new ByteArrayInputStream(encodedBytes.get(), encodedBytes.getOffset(), encodedBytes.getLength()); - DataInput input = new DataInputStream(stream); - PrefixByteDecoder decoder = new PrefixByteDecoder(maxLength); - List listOfBytes = Lists.newArrayList(); - try { - while (true) { - ImmutableBytesWritable ptr = decoder.decode(input); - // For this test, copy the bytes, but we wouldn't do this unless - // necessary for non testing - listOfBytes.add(ptr.copyBytes()); - } - } catch (EOFException e) { // Ignore as this signifies we're done - - } - return listOfBytes; + + public static List decodeBytes(ImmutableBytesWritable encodedBytes, int maxLength) + throws IOException { + ByteArrayInputStream stream = new ByteArrayInputStream(encodedBytes.get(), + encodedBytes.getOffset(), encodedBytes.getLength()); + DataInput input = new DataInputStream(stream); + PrefixByteDecoder decoder = new PrefixByteDecoder(maxLength); + List listOfBytes = Lists.newArrayList(); + try { + while (true) { + ImmutableBytesWritable ptr = decoder.decode(input); + // For this test, copy the bytes, but we wouldn't do this unless + // necessary for non testing + listOfBytes.add(ptr.copyBytes()); + } + } catch (EOFException e) { // Ignore as this signifies we're done + } - - public static int encodeBytes(List listOfBytes, ImmutableBytesWritable ptr) throws IOException { - try (TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(calculateSize(listOfBytes))) { - DataOutput output = new DataOutputStream(stream); - PrefixByteEncoder encoder = new PrefixByteEncoder(); - for (byte[] bytes : listOfBytes) { - encoder.encode(output, bytes, 0, bytes.length); - } - ptr.set(stream.getBuffer(), 0, stream.size()); - return encoder.getMaxLength(); - } + return listOfBytes; + } + + public static int encodeBytes(List listOfBytes, ImmutableBytesWritable ptr) + throws IOException { + try (TrustedByteArrayOutputStream stream = + new TrustedByteArrayOutputStream(calculateSize(listOfBytes))) { + DataOutput output = new DataOutputStream(stream); + PrefixByteEncoder encoder = new PrefixByteEncoder(); + for (byte[] bytes : listOfBytes) { + encoder.encode(output, bytes, 0, bytes.length); + } + ptr.set(stream.getBuffer(), 0, stream.size()); + return encoder.getMaxLength(); } - - public static int calculateSize(List listOfBytes) { - int size = 0; - for (byte[] bytes : listOfBytes) { - size += bytes.length; - } - return size; + } + + public static int calculateSize(List listOfBytes) { + int size = 0; + for (byte[] bytes : listOfBytes) { + size += bytes.length; } + return size; + } - public static ImmutableBytesWritable decode(PrefixByteDecoder decoder, DataInput input) throws EOFException { - try { - ImmutableBytesWritable val= decoder.decode(input); - return val; - } catch(EOFException eof){ - throw eof; - }catch (IOException e) { - throw new RuntimeException(e); - } + public static ImmutableBytesWritable decode(PrefixByteDecoder decoder, DataInput input) + throws EOFException { + try { + ImmutableBytesWritable val = decoder.decode(input); + return val; + } catch (EOFException eof) { + throw eof; + } catch (IOException e) { + throw new RuntimeException(e); } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteDecoder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteDecoder.java index c34bda86f18..72adef4d802 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteDecoder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteDecoder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,67 +24,62 @@ import org.apache.hadoop.io.WritableUtils; /** - * * Prefix decoder for byte arrays. For encoding, see {@link PrefixByteEncoder}. - * */ public class PrefixByteDecoder { - private final int maxLength; - private final ImmutableBytesWritable previous; - - /** - * Used when the maximum length of encoded byte array is not known. Will - * cause a new byte array to be allocated for each call to {@link #decode(DataInput)}. - */ - public PrefixByteDecoder() { - maxLength = -1; - previous = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - } + private final int maxLength; + private final ImmutableBytesWritable previous; - /** - * Used when the maximum length of encoded byte array is known in advance. - * Will not allocate new byte array with each call to {@link #decode(DataInput)}. - * @param maxLength maximum length needed for any call to {@link #decode(DataInput)}. - */ - public PrefixByteDecoder(int maxLength) { - if (maxLength > 0) { - this.maxLength = maxLength; - this.previous = new ImmutableBytesWritable(new byte[maxLength], 0, 0); - } else { - this.maxLength = -1; - previous = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - } - } - - /** - * Resets state of decoder if it will be used to decode bytes from a - * different DataInput. - */ - public void reset() { - previous.set(previous.get(),0,0); + /** + * Used when the maximum length of encoded byte array is not known. Will cause a new byte array to + * be allocated for each call to {@link #decode(DataInput)}. + */ + public PrefixByteDecoder() { + maxLength = -1; + previous = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); + } + + /** + * Used when the maximum length of encoded byte array is known in advance. Will not allocate new + * byte array with each call to {@link #decode(DataInput)}. + * @param maxLength maximum length needed for any call to {@link #decode(DataInput)}. + */ + public PrefixByteDecoder(int maxLength) { + if (maxLength > 0) { + this.maxLength = maxLength; + this.previous = new ImmutableBytesWritable(new byte[maxLength], 0, 0); + } else { + this.maxLength = -1; + previous = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); } - - /** - * Decodes bytes encoded with {@link PrefixByteEncoder}. - * @param in Input from which bytes are read. - * @return Pointer containing bytes that were decoded. Note that the - * same pointer will be returned with each call, so it must be consumed - * prior to calling decode again. - * @throws IOException - */ - public ImmutableBytesWritable decode(DataInput in) throws IOException { - int prefixLen = WritableUtils.readVInt(in); - int suffixLen = WritableUtils.readVInt(in); - int length = prefixLen + suffixLen; - byte[] b; - if (maxLength == -1) { // Allocate new byte array each time - b = new byte[length]; - System.arraycopy(previous.get(), previous.getOffset(), b, 0, prefixLen); - } else { // Reuse same buffer each time - b = previous.get(); - } - in.readFully(b, prefixLen, suffixLen); - previous.set(b, 0, length); - return previous; + } + + /** + * Resets state of decoder if it will be used to decode bytes from a different DataInput. + */ + public void reset() { + previous.set(previous.get(), 0, 0); + } + + /** + * Decodes bytes encoded with {@link PrefixByteEncoder}. + * @param in Input from which bytes are read. + * @return Pointer containing bytes that were decoded. Note that the same pointer will be returned + * with each call, so it must be consumed prior to calling decode again. + */ + public ImmutableBytesWritable decode(DataInput in) throws IOException { + int prefixLen = WritableUtils.readVInt(in); + int suffixLen = WritableUtils.readVInt(in); + int length = prefixLen + suffixLen; + byte[] b; + if (maxLength == -1) { // Allocate new byte array each time + b = new byte[length]; + System.arraycopy(previous.get(), previous.getOffset(), b, 0, prefixLen); + } else { // Reuse same buffer each time + b = previous.get(); } -} \ No newline at end of file + in.readFully(b, prefixLen, suffixLen); + previous.set(b, 0, length); + return previous; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteEncoder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteEncoder.java index bf92be5e23b..36ff767fc9e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteEncoder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PrefixByteEncoder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,75 +25,69 @@ import org.apache.hadoop.io.WritableUtils; /** - * * Prefix encoder for byte arrays. For decoding, see {@link PrefixByteDecoder}. - * */ public class PrefixByteEncoder { - private int maxLength; - private final ImmutableBytesWritable previous = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - - public PrefixByteEncoder() { - } + private int maxLength; + private final ImmutableBytesWritable previous = + new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - /** - * Resets the state of the encoder to its initial state (i.e. forgetting - * the previous byte array that may have been encoded). - */ - public void reset() { - previous.set(ByteUtil.EMPTY_BYTE_ARRAY); - } - - /** - * @return the maximum length byte array encountered while encoding - */ - public int getMaxLength() { - return maxLength; - } - - /** - * Prefix encodes the byte array pointed to into the output stream - * @param out output stream to encode into - * @param ptr pointer to byte array to encode. - * @throws IOException - */ - public void encode(DataOutput out, ImmutableBytesWritable ptr) throws IOException { - encode(out, ptr.get(), ptr.getOffset(), ptr.getLength()); - } - - /** - * Prefix encodes the byte array into the output stream - * @param out output stream to encode into - * @param b byte array to encode - * @throws IOException - */ - public void encode(DataOutput out, byte[] b) throws IOException { - encode(out, b, 0, b.length); - } + public PrefixByteEncoder() { + } + + /** + * Resets the state of the encoder to its initial state (i.e. forgetting the previous byte array + * that may have been encoded). + */ + public void reset() { + previous.set(ByteUtil.EMPTY_BYTE_ARRAY); + } + + /** Returns the maximum length byte array encountered while encoding */ + public int getMaxLength() { + return maxLength; + } + + /** + * Prefix encodes the byte array pointed to into the output stream + * @param out output stream to encode into + * @param ptr pointer to byte array to encode. + */ + public void encode(DataOutput out, ImmutableBytesWritable ptr) throws IOException { + encode(out, ptr.get(), ptr.getOffset(), ptr.getLength()); + } + + /** + * Prefix encodes the byte array into the output stream + * @param out output stream to encode into + * @param b byte array to encode + */ + public void encode(DataOutput out, byte[] b) throws IOException { + encode(out, b, 0, b.length); + } - /** - * Prefix encodes the byte array from offset to length into output stream. - * Instead of writing the entire byte array, only the portion of the byte array - * that differs from the beginning of the previous byte array written is written. - * - * @param out output stream to encode into - * @param b byte array buffer - * @param offset offset into byte array to start encoding - * @param length length of byte array to encode - * @throws IOException - */ - public void encode(DataOutput out, byte[] b, int offset, int length) throws IOException { - int i = 0; - int prevOffset = previous.getOffset(); - byte[] prevBytes = previous.get(); - int prevLength = previous.getLength(); - int minLength = prevLength < b.length ? prevLength : b.length; - for(i = 0; (i < minLength) && (prevBytes[prevOffset + i] == b[offset + i]); i++); - WritableUtils.writeVInt(out, i); - Bytes.writeByteArray(out, b, offset + i, length - i); - previous.set(b, offset, length); - if (length > maxLength) { - maxLength = length; - } + /** + * Prefix encodes the byte array from offset to length into output stream. Instead of writing the + * entire byte array, only the portion of the byte array that differs from the beginning of the + * previous byte array written is written. + * @param out output stream to encode into + * @param b byte array buffer + * @param offset offset into byte array to start encoding + * @param length length of byte array to encode + */ + public void encode(DataOutput out, byte[] b, int offset, int length) throws IOException { + int i = 0; + int prevOffset = previous.getOffset(); + byte[] prevBytes = previous.get(); + int prevLength = previous.getLength(); + int minLength = prevLength < b.length ? prevLength : b.length; + for (i = 0; (i < minLength) && (prevBytes[prevOffset + i] == b[offset + i]); i++) + ; + WritableUtils.writeVInt(out, i); + Bytes.writeByteArray(out, b, offset + i, length - i); + previous.set(b, offset, length); + if (length > maxLength) { + maxLength = length; } -} \ No newline at end of file + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PropertiesUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PropertiesUtil.java index f415f0b9687..5290ac89f72 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/PropertiesUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/PropertiesUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,6 @@ package org.apache.phoenix.util; import java.util.Collections; -import java.util.Enumeration; import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; @@ -26,70 +25,70 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver; public class PropertiesUtil { - private PropertiesUtil() { - } - - /** - * Use this to deep copy properties. The copy constructor in {@link java.util.Properties} does not do a deep copy. - * @param properties - * @return new mutable instance of Properties populated with values from the passed in Properties. - */ - public static Properties deepCopy(Properties properties) { - Properties newProperties = new Properties(); - for (String pName : properties.stringPropertyNames()) { - newProperties.setProperty(pName, properties.getProperty(pName)); - } - return newProperties; - } - - /** - * Add properties from the given Configuration to the provided Properties. Note that only those - * configuration properties will be added to the provided properties whose values are already - * not set. The method doesn't modify the passed in properties instead makes a clone of them - * before combining. - * @return properties object that is a combination of properties contained in props and - * properties contained in conf - */ - public static Properties combineProperties(Properties props, final Configuration conf) { - return combineProperties(props, conf, Collections.emptySet()); + private PropertiesUtil() { + } + + /** + * Use this to deep copy properties. The copy constructor in {@link java.util.Properties} does not + * do a deep copy. + * @return new mutable instance of Properties populated with values from the passed in Properties. + */ + public static Properties deepCopy(Properties properties) { + Properties newProperties = new Properties(); + for (String pName : properties.stringPropertyNames()) { + newProperties.setProperty(pName, properties.getProperty(pName)); } + return newProperties; + } + + /** + * Add properties from the given Configuration to the provided Properties. Note that only those + * configuration properties will be added to the provided properties whose values are already not + * set. The method doesn't modify the passed in properties instead makes a clone of them before + * combining. + * @return properties object that is a combination of properties contained in props and properties + * contained in conf + */ + public static Properties combineProperties(Properties props, final Configuration conf) { + return combineProperties(props, conf, Collections. emptySet()); + } - public static Properties combineProperties(Properties props, final Configuration conf, Set withoutTheseProps) { - Iterator> iterator = conf.iterator(); - Properties copy = deepCopy(props); - if (iterator != null) { - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - // set the property from config only if props doesn't have it already - if (copy.getProperty(entry.getKey()) == null && !withoutTheseProps.contains(entry.getKey())) { - copy.setProperty(entry.getKey(), entry.getValue()); - } - } + public static Properties combineProperties(Properties props, final Configuration conf, + Set withoutTheseProps) { + Iterator> iterator = conf.iterator(); + Properties copy = deepCopy(props); + if (iterator != null) { + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + // set the property from config only if props doesn't have it already + if ( + copy.getProperty(entry.getKey()) == null && !withoutTheseProps.contains(entry.getKey()) + ) { + copy.setProperty(entry.getKey(), entry.getValue()); } - return copy; + } } + return copy; + } - /** - * Utility to work around the limitation of the copy constructor - * {@link Configuration#Configuration(Configuration)} provided by the {@link Configuration} - * class. See https://issues.apache.org/jira/browse/HBASE-18378. - * The copy constructor doesn't copy all the config settings, so we need to resort to - * iterating through all the settings and setting it on the cloned config. - * @param toCopy configuration to copy - * @return - */ - public static Configuration cloneConfig(Configuration toCopy) { - Configuration clone = new Configuration(); - Iterator> iterator = toCopy.iterator(); - while (iterator.hasNext()) { - Entry entry = iterator.next(); - clone.set(entry.getKey(), entry.getValue()); - } - return clone; + /** + * Utility to work around the limitation of the copy constructor + * {@link Configuration#Configuration(Configuration)} provided by the {@link Configuration} class. + * See https://issues.apache.org/jira/browse/HBASE-18378. The copy constructor doesn't copy all + * the config settings, so we need to resort to iterating through all the settings and setting it + * on the cloned config. + * @param toCopy configuration to copy + */ + public static Configuration cloneConfig(Configuration toCopy) { + Configuration clone = new Configuration(); + Iterator> iterator = toCopy.iterator(); + while (iterator.hasNext()) { + Entry entry = iterator.next(); + clone.set(entry.getKey(), entry.getValue()); } + return clone; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/QueryBuilder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/QueryBuilder.java index 92ad7ab385f..3584f3f7eae 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/QueryBuilder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/QueryBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,195 +17,194 @@ */ package org.apache.phoenix.util; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.phoenix.parse.HintNode; +import static org.apache.phoenix.util.SchemaUtil.getEscapedFullColumnName; import java.util.Collections; import java.util.List; -import static org.apache.phoenix.util.SchemaUtil.getEscapedFullColumnName; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.phoenix.parse.HintNode; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; public class QueryBuilder { - private String fullTableName; - // regular columns that are in the select clause - private List selectColumns = Collections.emptyList(); - - // columns that are required for expressions in the select clause - private List selectExpressionColumns = Collections.emptyList(); - // expression string in the select clause (for eg COL1 || COL2) - private String selectExpression; - private String whereClause; - private String orderByClause; - private String groupByClause; - private String havingClause; - private HintNode.Hint hint; - private boolean escapeCols; - private boolean distinct; - private int limit; - - public String getFullTableName() { - return fullTableName; - } - - /** - * @return column names required to evaluate this select statement - */ - public List getRequiredColumns() { - List allColumns = Lists.newArrayList(selectColumns); - if (!CollectionUtils.isEmpty(selectExpressionColumns)) { - allColumns.addAll(selectExpressionColumns); + private String fullTableName; + // regular columns that are in the select clause + private List selectColumns = Collections.emptyList(); + + // columns that are required for expressions in the select clause + private List selectExpressionColumns = Collections.emptyList(); + // expression string in the select clause (for eg COL1 || COL2) + private String selectExpression; + private String whereClause; + private String orderByClause; + private String groupByClause; + private String havingClause; + private HintNode.Hint hint; + private boolean escapeCols; + private boolean distinct; + private int limit; + + public String getFullTableName() { + return fullTableName; + } + + /** Returns column names required to evaluate this select statement */ + public List getRequiredColumns() { + List allColumns = Lists.newArrayList(selectColumns); + if (!CollectionUtils.isEmpty(selectExpressionColumns)) { + allColumns.addAll(selectExpressionColumns); + } + return allColumns; + } + + public String getWhereClause() { + return whereClause; + } + + public HintNode.Hint getHint() { + return hint; + } + + public String getOrderByClause() { + return orderByClause; + } + + public String getGroupByClause() { + return groupByClause; + } + + public QueryBuilder setOrderByClause(String orderByClause) { + this.orderByClause = orderByClause; + return this; + } + + public QueryBuilder setFullTableName(String fullTableName) { + this.fullTableName = fullTableName; + return this; + } + + public QueryBuilder setSelectColumns(List columns) { + this.selectColumns = columns; + return this; + } + + public QueryBuilder setWhereClause(String whereClause) { + this.whereClause = whereClause; + return this; + } + + public QueryBuilder setHint(HintNode.Hint hint) { + this.hint = hint; + return this; + } + + public QueryBuilder setEscapeCols(boolean escapeCols) { + this.escapeCols = escapeCols; + return this; + } + + public QueryBuilder setGroupByClause(String groupByClause) { + this.groupByClause = groupByClause; + return this; + } + + public QueryBuilder setHavingClause(String havingClause) { + this.havingClause = havingClause; + return this; + } + + public List getSelectExpressionColumns() { + return selectExpressionColumns; + } + + public QueryBuilder setSelectExpressionColumns(List selectExpressionColumns) { + this.selectExpressionColumns = selectExpressionColumns; + return this; + } + + public String getSelectExpression() { + return selectExpression; + } + + public QueryBuilder setSelectExpression(String selectExpression) { + this.selectExpression = selectExpression; + return this; + } + + public QueryBuilder setDistinct(boolean distinct) { + this.distinct = distinct; + return this; + } + + public QueryBuilder setLimit(int limit) { + this.limit = limit; + return this; + } + + public String build() { + Preconditions.checkNotNull(fullTableName, "Table name cannot be null"); + if (CollectionUtils.isEmpty(selectColumns) && StringUtils.isBlank(selectExpression)) { + throw new IllegalArgumentException( + "At least one column or select expression must be provided"); + } + StringBuilder query = new StringBuilder(); + query.append("SELECT "); + + if (distinct) { + query.append(" DISTINCT "); + } + + if (hint != null) { + final HintNode node = new HintNode(hint.name()); + String hintStr = node.toString(); + query.append(hintStr); + } + + StringBuilder selectClauseBuilder = new StringBuilder(); + if (StringUtils.isNotBlank(selectExpression)) { + if (selectClauseBuilder.length() != 0) { + selectClauseBuilder.append(" , "); + } + selectClauseBuilder.append(selectExpression); + } + + boolean first = true; + for (String col : selectColumns) { + if (StringUtils.isNotBlank(col)) { + if ((first && selectClauseBuilder.length() != 0) || !first) { + selectClauseBuilder.append(" , "); } - return allColumns; - } - - public String getWhereClause() { - return whereClause; - } - - public HintNode.Hint getHint() { - return hint; - } - - public String getOrderByClause() { - return orderByClause; - } - - public String getGroupByClause() { - return groupByClause; - } - - public QueryBuilder setOrderByClause(String orderByClause) { - this.orderByClause = orderByClause; - return this; - } - - public QueryBuilder setFullTableName(String fullTableName) { - this.fullTableName = fullTableName; - return this; - } - - public QueryBuilder setSelectColumns(List columns) { - this.selectColumns = columns; - return this; - } - - public QueryBuilder setWhereClause(String whereClause) { - this.whereClause = whereClause; - return this; - } - - public QueryBuilder setHint(HintNode.Hint hint) { - this.hint = hint; - return this; - } - - public QueryBuilder setEscapeCols(boolean escapeCols) { - this.escapeCols = escapeCols; - return this; - } - - public QueryBuilder setGroupByClause(String groupByClause) { - this.groupByClause = groupByClause; - return this; - } - - public QueryBuilder setHavingClause(String havingClause) { - this.havingClause = havingClause; - return this; - } - - public List getSelectExpressionColumns() { - return selectExpressionColumns; - } - - public QueryBuilder setSelectExpressionColumns(List selectExpressionColumns) { - this.selectExpressionColumns = selectExpressionColumns; - return this; + String fullColumnName = col; + if (escapeCols) { + fullColumnName = getEscapedFullColumnName(col); + } + selectClauseBuilder.append(fullColumnName); + first = false; + } } - public String getSelectExpression() { - return selectExpression; + query.append(selectClauseBuilder); + query.append(" FROM "); + query.append(fullTableName); + if (StringUtils.isNotBlank(whereClause)) { + query.append(" WHERE (").append(whereClause).append(")"); } - - public QueryBuilder setSelectExpression(String selectExpression) { - this.selectExpression = selectExpression; - return this; + if (StringUtils.isNotBlank(groupByClause)) { + query.append(" GROUP BY ").append(groupByClause); } - - public QueryBuilder setDistinct(boolean distinct) { - this.distinct = distinct; - return this; + if (StringUtils.isNotBlank(havingClause)) { + query.append(" HAVING ").append(havingClause); } - - public QueryBuilder setLimit(int limit) { - this.limit = limit; - return this; + if (StringUtils.isNotBlank(orderByClause)) { + query.append(" ORDER BY ").append(orderByClause); } - - public String build() { - Preconditions.checkNotNull(fullTableName, "Table name cannot be null"); - if (CollectionUtils.isEmpty(selectColumns) && StringUtils.isBlank(selectExpression)) { - throw new IllegalArgumentException("At least one column or select expression must be provided"); - } - StringBuilder query = new StringBuilder(); - query.append("SELECT "); - - if (distinct) { - query.append(" DISTINCT "); - } - - if (hint != null) { - final HintNode node = new HintNode(hint.name()); - String hintStr = node.toString(); - query.append(hintStr); - } - - StringBuilder selectClauseBuilder = new StringBuilder(); - if (StringUtils.isNotBlank(selectExpression)) { - if (selectClauseBuilder.length()!=0) { - selectClauseBuilder.append(" , "); - } - selectClauseBuilder.append(selectExpression); - } - - boolean first = true; - for (String col : selectColumns) { - if (StringUtils.isNotBlank(col)) { - if ((first && selectClauseBuilder.length()!=0) || !first) { - selectClauseBuilder.append(" , "); - } - String fullColumnName = col; - if (escapeCols) { - fullColumnName = getEscapedFullColumnName(col); - } - selectClauseBuilder.append(fullColumnName); - first = false; - } - } - - query.append(selectClauseBuilder); - query.append(" FROM "); - query.append(fullTableName); - if (StringUtils.isNotBlank(whereClause)) { - query.append(" WHERE (").append(whereClause).append(")"); - } - if (StringUtils.isNotBlank(groupByClause)) { - query.append(" GROUP BY ").append(groupByClause); - } - if (StringUtils.isNotBlank(havingClause)) { - query.append(" HAVING ").append(havingClause); - } - if (StringUtils.isNotBlank(orderByClause)) { - query.append(" ORDER BY ").append(orderByClause); - } - if (limit > 0) { - query.append(" LIMIT ").append(limit); - } - return query.toString(); + if (limit > 0) { + query.append(" LIMIT ").append(limit); } + return query.toString(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/QueryUtil.java index 6edb67af11d..8ead2a3aa42 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/QueryUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/QueryUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE; @@ -112,752 +111,708 @@ public final class QueryUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(QueryUtil.class); - - /** - * Column family name index within ResultSet resulting from {@link DatabaseMetaData#getColumns(String, String, String, String)} - */ - public static final int COLUMN_FAMILY_POSITION = 25; - - /** - * Column name index within ResultSet resulting from {@link DatabaseMetaData#getColumns(String, String, String, String)} - */ - public static final int COLUMN_NAME_POSITION = 4; - - /** - * Data type index within ResultSet resulting from {@link DatabaseMetaData#getColumns(String, String, String, String)} - */ - public static final int DATA_TYPE_POSITION = 5; - - /** - * Index of the column containing the datatype name within ResultSet resulting from {@link - * DatabaseMetaData#getColumns(String, String, String, String)}. - */ - public static final int DATA_TYPE_NAME_POSITION = 6; - - public static final String IS_SERVER_CONNECTION = "IS_SERVER_CONNECTION"; - private static final String SELECT = "SELECT"; - private static final String FROM = "FROM"; - private static final String WHERE = "WHERE"; - private static final String AND = "AND"; - private static final String[] CompareOpString = new String[CompareOperator.values().length]; - - static { - CompareOpString[CompareOperator.EQUAL.ordinal()] = "="; - CompareOpString[CompareOperator.NOT_EQUAL.ordinal()] = "!="; - CompareOpString[CompareOperator.GREATER.ordinal()] = ">"; - CompareOpString[CompareOperator.LESS.ordinal()] = "<"; - CompareOpString[CompareOperator.GREATER_OR_EQUAL.ordinal()] = ">="; - CompareOpString[CompareOperator.LESS_OR_EQUAL.ordinal()] = "<="; - } - - public static String toSQL(CompareOperator op) { - return CompareOpString[op.ordinal()]; - } - - /** - * Private constructor - */ - private QueryUtil() { - } - /** - * Generate an upsert statement based on a list of {@code ColumnInfo}s with parameter markers. The list of - * {@code ColumnInfo}s must contain at least one element. - * - * @param tableName name of the table for which the upsert statement is to be created - * @param columnInfos list of column to be included in the upsert statement - * @return the created {@code UPSERT} statement - */ - public static String constructUpsertStatement(String tableName, List columnInfos) { - - if (columnInfos.isEmpty()) { - throw new IllegalArgumentException("At least one column must be provided for upserts"); - } - - final List columnNames = Lists.transform(columnInfos, new Function() { - @Override - public String apply(ColumnInfo columnInfo) { - return columnInfo.getColumnName(); - } - }); - return constructUpsertStatement(tableName, columnNames, null); - - } - - /** - * Generate an upsert statement based on a list of {@code ColumnInfo}s with parameter markers. The list of - * {@code ColumnInfo}s must contain at least one element. - * - * @param tableName name of the table for which the upsert statement is to be created - * @param columns list of columns to be included in the upsert statement - * @param hint hint to be added to the UPSERT statement. - * @return the created {@code UPSERT} statement - */ - public static String constructUpsertStatement(String tableName, List columns, Hint hint) { - - if (columns.isEmpty()) { - throw new IllegalArgumentException("At least one column must be provided for upserts"); - } - - String hintStr = ""; - if(hint != null) { - final HintNode node = new HintNode(hint.name()); - hintStr = node.toString(); - } - - List parameterList = Lists.newArrayList(); - for (int i = 0; i < columns.size(); i++) { - parameterList.add("?"); - } - return String.format( - "UPSERT %s INTO %s (%s) VALUES (%s)", - hintStr, - tableName, - Joiner.on(", ").join( - Iterables.transform( - columns, - new Function() { - @Nullable - @Override - public String apply(String columnName) { - return getEscapedFullColumnName(columnName); - } - })), - Joiner.on(", ").join(parameterList)); - - } - - /** - * Generate a generic upsert statement based on a number of columns. The created upsert statement will not include - * any named columns, but will include parameter markers for the given number of columns. The number of columns - * must be greater than zero. - * - * @param tableName name of the table for which the upsert statement is to be created - * @param numColumns number of columns to be included in the upsert statement - * @return the created {@code UPSERT} statement - */ - public static String constructGenericUpsertStatement(String tableName, int numColumns) { - - - if (numColumns == 0) { - throw new IllegalArgumentException("At least one column must be provided for upserts"); - } - - List parameterList = Lists.newArrayListWithCapacity(numColumns); - for (int i = 0; i < numColumns; i++) { - parameterList.add("?"); - } - return String.format("UPSERT INTO %s VALUES (%s)", tableName, Joiner.on(", ").join(parameterList)); - } - - /** - * - * @param fullTableName name of the table for which the select statement needs to be created. - * @param columnInfos list of columns to be projected in the select statement. - * @param conditions The condition clause to be added to the WHERE condition - * @return Select Query - */ - public static String constructSelectStatement(String fullTableName, List columnInfos,final String conditions) { - List columns = Lists.transform(columnInfos, new Function(){ - @Override - public String apply(ColumnInfo input) { - return input.getColumnName(); - }}); - return constructSelectStatement(fullTableName, columns , conditions, null, false); - } - - /** - * - * @param fullTableName name of the table for which the select statement needs to be created. - * @param columns list of columns to be projected in the select statement. - * @param whereClause The condition clause to be added to the WHERE condition - * @param hint hint to use - * @param escapeCols whether to escape the projected columns - * @return Select Query - */ - public static String constructSelectStatement(String fullTableName, List columns, - final String whereClause, Hint hint, boolean escapeCols) { - return new QueryBuilder().setFullTableName(fullTableName).setSelectColumns(columns) - .setWhereClause(whereClause).setHint(hint).setEscapeCols(escapeCols).build(); - } - - /** - * Constructs parameterized filter for an IN clause e.g. passing in numWhereCols=2, numBatches=3 - * results in ((?,?),(?,?),(?,?)) - * @param numWhereCols number of WHERE columns - * @param numBatches number of column batches - * @return paramterized IN filter - */ - public static String constructParameterizedInClause(int numWhereCols, int numBatches) { - Preconditions.checkArgument(numWhereCols > 0); - Preconditions.checkArgument(numBatches > 0); - String batch = "(" + StringUtils.repeat("?", ",", numWhereCols) + ")"; - return "(" + StringUtils.repeat(batch, ",", numBatches) + ")"; - } - - /** - * Create the Phoenix JDBC connection URL from the provided cluster connection details. - */ - @Deprecated - public static String getUrl(String zkQuorum) { - return getUrlInternal(zkQuorum, null, null, null); - } - - /** - * Create the Phoenix JDBC connection URL from the provided cluster connection details. - */ - @Deprecated - public static String getUrl(String zkQuorum, int clientPort) { - return getUrlInternal(zkQuorum, clientPort, null, null); - } - - /** - * Create the Phoenix JDBC connection URL from the provided cluster connection details. - */ - @Deprecated - public static String getUrl(String zkQuorum, String znodeParent) { - return getUrlInternal(zkQuorum, null, znodeParent, null); - } - - /** - * Create the Phoenix JDBC connection URL from the provided cluster connection details. - */ - @Deprecated - public static String getUrl(String zkQuorum, int port, String znodeParent, String principal) { - return getUrlInternal(zkQuorum, port, znodeParent, principal); - } - - /** - * Create the Phoenix JDBC connection URL from the provided cluster connection details. - */ - @Deprecated - public static String getUrl(String zkQuorum, int port, String znodeParent) { - return getUrlInternal(zkQuorum, port, znodeParent, null); - } - - /** - * Create the Phoenix JDBC connection URL from the provided cluster connection details. - */ - @Deprecated - public static String getUrl(String zkQuorum, Integer port, String znodeParent) { - return getUrlInternal(zkQuorum, port, znodeParent, null); - } - - /** - * Create the Phoenix JDBC connection URL from the provided cluster connection details. - */ - @Deprecated - public static String getUrl(String zkQuorum, Integer port, String znodeParent, String principal) { - return getUrlInternal(zkQuorum, port, znodeParent, principal); - } - - @Deprecated - private static String getUrlInternal(String zkQuorum, Integer port, String znodeParent, String principal) { - return String.join(":", PhoenixRuntime.JDBC_PROTOCOL, zkQuorum, port == null ? "" : port.toString(), znodeParent == null ? "" : znodeParent, principal == null ? "" : principal) - + Character.toString(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR); - } - - public static String getExplainPlan(ResultSet rs) throws SQLException { - StringBuilder buf = new StringBuilder(); - while (rs.next()) { - buf.append(rs.getString(1)); - buf.append('\n'); - } - if (buf.length() > 0) { - buf.setLength(buf.length()-1); - } - return buf.toString(); - } - - public static String getExplainPlan(ResultIterator iterator) throws SQLException { - List steps = Lists.newArrayList(); - iterator.explain(steps); - StringBuilder buf = new StringBuilder(); - for (String step : steps) { - buf.append(step); - buf.append('\n'); - } - if (buf.length() > 0) { - buf.setLength(buf.length()-1); - } - return buf.toString(); - } - - /** - * @return {@link PhoenixConnection} with {@value UpgradeUtil#DO_NOT_UPGRADE} set so that we - * don't initiate metadata upgrade - */ - public static Connection getConnectionOnServer(Configuration conf) throws SQLException { - return getConnectionOnServer(new Properties(), conf); - } - - public static void setServerConnection(Properties props){ - UpgradeUtil.doNotUpgradeOnFirstConnection(props); - props.setProperty(IS_SERVER_CONNECTION, Boolean.TRUE.toString()); - } - - public static boolean isServerConnection(ReadOnlyProps props) { - return props.getBoolean(IS_SERVER_CONNECTION, false); - } - - /** - * @return {@link PhoenixConnection} with {@value UpgradeUtil#DO_NOT_UPGRADE} set - * and with the upgrade-required flag cleared so that we don't initiate metadata upgrade. - */ - public static Connection getConnectionOnServer(Properties props, Configuration conf) - throws SQLException { - setServerConnection(props); - Connection conn = getConnection(props, conf); - conn.unwrap(PhoenixConnection.class).getQueryServices().clearUpgradeRequired(); - return conn; - } - - public static Connection getConnectionOnServerWithCustomUrl(Properties props, String principal) - throws SQLException { - setServerConnection(props); - String url = getConnectionUrl(props, null, principal); - LOGGER.info("Creating connection with the jdbc url: " + url); - return DriverManager.getConnection(url, props); - } - - public static Connection getConnection(Configuration conf) throws SQLException { - return getConnection(new Properties(), conf); - } - - public static Connection getConnection(Properties props, Configuration conf) - throws SQLException { - String url = getConnectionUrl(props, conf); - LOGGER.info(String.format("Creating connection with the jdbc url: %s, isServerSide = %s", - url, props.getProperty(IS_SERVER_CONNECTION))); - props = PropertiesUtil.combineProperties(props, conf); - return DriverManager.getConnection(url, props); - } - - public static String getConnectionUrl(Properties props, Configuration conf) - throws SQLException { - return getConnectionUrl(props, conf, null); - } - /** - * @return connection url using the various properties set in props and conf. - */ - public static String getConnectionUrl(Properties props, Configuration conf, String principal) - throws SQLException { - ReadOnlyProps propsWithPrincipal; - if (principal != null) { - Map principalProp = new HashMap<>(); - principalProp.put(QueryServices.HBASE_CLIENT_PRINCIPAL, principal); - propsWithPrincipal = new ReadOnlyProps(principalProp.entrySet().iterator()); - } else { - propsWithPrincipal = ReadOnlyProps.EMPTY_PROPS; - } - ConnectionInfo info = - ConnectionInfo.createNoLogin(PhoenixRuntime.JDBC_PROTOCOL, conf, propsWithPrincipal, - props); - String url = info.toUrl(); - if (url.endsWith(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + "")) { - url = url.substring(0, url.length() - 1); - } - // Mainly for testing to tack on the test=true part to ensure driver is found on server - String defaultExtraArgs = - conf != null - ? conf.get(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, - QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS) - : QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS; - // If props doesn't have a default for extra args then use the extra args in conf as default - String extraArgs = - props.getProperty(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, defaultExtraArgs); - if (extraArgs.length() > 0) { - url += - PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + extraArgs - + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; - } else { - url += PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; - } - return url; - } - - private static int getInt(String key, int defaultValue, Properties props, Configuration conf) { - if (conf == null) { - Preconditions.checkNotNull(props); - return Integer.parseInt(props.getProperty(key, String.valueOf(defaultValue))); - } - return conf.getInt(key, defaultValue); - } - - private static String getString(String key, String defaultValue, Properties props, Configuration conf) { - if (conf == null) { - Preconditions.checkNotNull(props); - return props.getProperty(key, defaultValue); - } - return conf.get(key, defaultValue); - } - - public static String getViewStatement(String schemaName, String tableName, String where) { - // Only form we currently support for VIEWs: SELECT * FROM t WHERE ... - return SELECT + " " + WildcardParseNode.NAME + " " + FROM + " " + - (schemaName == null || schemaName.length() == 0 ? "" : ("\"" + schemaName + "\".")) + - ("\"" + tableName + "\" ") + - (WHERE + " " + where); - } - - public static Integer getOffsetLimit(Integer limit, Integer offset) { - if (limit == null) { - return null; - } else if (offset == null) { - return limit; - } else { - return limit + offset; - } - - } - - public static Integer getRemainingOffset(Tuple offsetTuple) { - if (offsetTuple != null) { - Cell cell = offsetTuple.getValue(QueryConstants.OFFSET_FAMILY, - QueryConstants.OFFSET_COLUMN); - if (cell != null) { - return PInteger.INSTANCE.toObject( - cell.getValueArray(), - cell.getValueOffset(), - cell.getValueLength(), - PInteger.INSTANCE, - SortOrder.ASC, - null, - null); - } - } - return null; - } - - public static String getViewPartitionClause(String partitionColumnName, long autoPartitionNum) { - return partitionColumnName + " " + toSQL(CompareOperator.EQUAL) + " " + autoPartitionNum; - } - - public static Connection getConnectionForQueryLog(Configuration config) throws SQLException { - //we don't need this connection to upgrade anything or start dispatcher - return getConnectionOnServer(config); - } - - public static PreparedStatement getCatalogsStmt(PhoenixConnection connection) throws SQLException { - List parameterValues = new ArrayList(4); - StringBuilder buf = new StringBuilder("select \n" + - " DISTINCT " + TENANT_ID + " " + TABLE_CAT + - " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + - " where " + COLUMN_NAME + " is null" + - " and " + COLUMN_FAMILY + " is null" + - " and " + TENANT_ID + " is not null"); - addTenantIdFilter(connection, buf, null, parameterValues); - buf.append(" order by " + TENANT_ID); - PreparedStatement stmt = connection.prepareStatement(buf.toString()); - for(int i = 0; i < parameterValues.size(); i++) { - stmt.setString(i+1, parameterValues.get(i)); - } - return stmt; - } - - /** - * Util that generates a PreparedStatement against syscat to fetch schema listings. - */ - public static PreparedStatement getSchemasStmt( - PhoenixConnection connection, String catalog, String schemaPattern) throws SQLException { - List parameterValues = new ArrayList(4); - StringBuilder buf = new StringBuilder("select distinct \n" + - TABLE_SCHEM + "," + - TENANT_ID + " " + TABLE_CATALOG + - " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + - " where " + COLUMN_NAME + " is null"); - addTenantIdFilter(connection, buf, catalog, parameterValues); - if (schemaPattern != null) { - buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like ?")); - if(schemaPattern.length() > 0) { - parameterValues.add(schemaPattern); - } - } - if (SchemaUtil.isNamespaceMappingEnabled(null, connection.getQueryServices().getProps())) { - buf.append(" and " + TABLE_NAME + " = '" + MetaDataClient.EMPTY_TABLE + "'"); - } - - // TODO: we should union this with SYSTEM.SEQUENCE too, but we only have support for - // UNION ALL and we really need UNION so that it dedups. - - PreparedStatement stmt = connection.prepareStatement(buf.toString()); - for(int i = 0; i < parameterValues.size(); i++) { - stmt.setString(i+1, parameterValues.get(i)); - } - return stmt; - } - - public static PreparedStatement getSuperTablesStmt(PhoenixConnection connection, - String catalog, String schemaPattern, String tableNamePattern) throws SQLException { - List parameterValues = new ArrayList(4); - StringBuilder buf = new StringBuilder("select \n" + - TENANT_ID + " " + TABLE_CAT + "," + // Use tenantId for catalog - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_FAMILY + " " + SUPERTABLE_NAME + - " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + - " where " + COLUMN_NAME + " is null" + - " and " + LINK_TYPE + " = " + PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()); - addTenantIdFilter(connection, buf, catalog, parameterValues); - if (schemaPattern != null) { - buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like ?" )); - if(schemaPattern.length() > 0) { - parameterValues.add(schemaPattern); - } - } - if (tableNamePattern != null) { - buf.append(" and " + TABLE_NAME + " like ?" ); - parameterValues.add(tableNamePattern); - } - buf.append(" order by " + TENANT_ID + "," + TABLE_SCHEM + "," +TABLE_NAME + "," + SUPERTABLE_NAME); - PreparedStatement stmt = connection.prepareStatement(buf.toString()); - for(int i = 0; i < parameterValues.size(); i++) { - stmt.setString(i+1, parameterValues.get(i)); - } - return stmt; - } - - public static PreparedStatement getIndexInfoStmt(PhoenixConnection connection, - String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { - if (unique) { // No unique indexes - return null; - } - List parameterValues = new ArrayList(4); - StringBuilder buf = new StringBuilder("select \n" + - TENANT_ID + " " + TABLE_CAT + ",\n" + // use this column for column family name - TABLE_SCHEM + ",\n" + - DATA_TABLE_NAME + " " + TABLE_NAME + ",\n" + - "true NON_UNIQUE,\n" + - "null INDEX_QUALIFIER,\n" + - TABLE_NAME + " INDEX_NAME,\n" + - DatabaseMetaData.tableIndexOther + " TYPE,\n" + - ORDINAL_POSITION + ",\n" + - COLUMN_NAME + ",\n" + - "CASE WHEN " + COLUMN_FAMILY + " IS NOT NULL THEN null WHEN " + SORT_ORDER + " = " + (SortOrder.DESC.getSystemValue()) + " THEN 'D' ELSE 'A' END ASC_OR_DESC,\n" + - "null CARDINALITY,\n" + - "null PAGES,\n" + - "null FILTER_CONDITION,\n" + - // Include data type info, though not in spec - ExternalSqlTypeIdFunction.NAME + "(" + DATA_TYPE + ") AS " + DATA_TYPE + ",\n" + - SqlTypeNameFunction.NAME + "(" + DATA_TYPE + ") AS " + TYPE_NAME + ",\n" + - DATA_TYPE + " " + TYPE_ID + ",\n" + - COLUMN_FAMILY + ",\n" + - COLUMN_SIZE + ",\n" + - ARRAY_SIZE + - "\nfrom " + SYSTEM_CATALOG + - "\nwhere "); - buf.append(TABLE_SCHEM + (schema == null || schema.length() == 0 ? " is null" : " = ?" )); - if(schema != null && schema.length() > 0) { - parameterValues.add(schema); - } - buf.append("\nand " + DATA_TABLE_NAME + " = ?" ); - parameterValues.add(table); - buf.append("\nand " + COLUMN_NAME + " is not null" ); - addTenantIdFilter(connection, buf, catalog, parameterValues); - buf.append("\norder by INDEX_NAME," + ORDINAL_POSITION); - PreparedStatement stmt = connection.prepareStatement(buf.toString()); - for(int i = 0; i < parameterValues.size(); i++) { - stmt.setString(i+1, parameterValues.get(i)); - } - return stmt; - } - - /** - * Util that generates a PreparedStatement against syscat to get the table listing in a given schema. - */ - public static PreparedStatement getTablesStmt(PhoenixConnection connection, String catalog, String schemaPattern, - String tableNamePattern, String[] types) throws SQLException { - boolean isSequence = false; - boolean hasTableTypes = types != null && types.length > 0; - StringBuilder typeClauseBuf = new StringBuilder(); - List parameterValues = new ArrayList(4); - if (hasTableTypes) { - List tableTypes = Lists.newArrayList(types); - isSequence = tableTypes.remove(SEQUENCE_TABLE_TYPE); - StringBuilder typeBuf = new StringBuilder(); - for (String type : tableTypes) { - try { - PTableType tableType = PTableType.fromValue(type); - typeBuf.append('\''); - typeBuf.append(tableType.getSerializedValue()); - typeBuf.append('\''); - typeBuf.append(','); - } catch (IllegalArgumentException e) { - // Ignore and continue - } - } - if (typeBuf.length() > 0) { - typeClauseBuf.append(" and " + TABLE_TYPE + " IN ("); - typeClauseBuf.append(typeBuf); - typeClauseBuf.setCharAt(typeClauseBuf.length()-1, ')'); - } - } - StringBuilder buf = new StringBuilder("select \n"); - // If there were table types specified and they were all filtered out - // and we're not querying for sequences, return an empty result set. - if (hasTableTypes && typeClauseBuf.length() == 0 && !isSequence) { - return null; - } - if (typeClauseBuf.length() > 0 || !isSequence) { - buf.append( - TENANT_ID + " " + TABLE_CAT + "," + // tenant_id is the catalog - TABLE_SCHEM + "," + - TABLE_NAME + " ," + - SQLTableTypeFunction.NAME + "(" + TABLE_TYPE + ") AS " + TABLE_TYPE + "," + - REMARKS + " ," + - TYPE_NAME + "," + - SELF_REFERENCING_COL_NAME + "," + - REF_GENERATION + "," + - IndexStateNameFunction.NAME + "(" + INDEX_STATE + ") AS " + INDEX_STATE + "," + - IMMUTABLE_ROWS + "," + - SALT_BUCKETS + "," + - MULTI_TENANT + "," + - VIEW_STATEMENT + "," + - SQLViewTypeFunction.NAME + "(" + VIEW_TYPE + ") AS " + VIEW_TYPE + "," + - SQLIndexTypeFunction.NAME + "(" + INDEX_TYPE + ") AS " + INDEX_TYPE + "," + - TRANSACTION_PROVIDER + " IS NOT NULL AS " + TRANSACTIONAL + "," + - IS_NAMESPACE_MAPPED + "," + - GUIDE_POSTS_WIDTH + "," + - TransactionProviderNameFunction.NAME + "(" + TRANSACTION_PROVIDER + ") AS TRANSACTION_PROVIDER" + - " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + - " where " + COLUMN_NAME + " is null" + - " and " + COLUMN_FAMILY + " is null" + - " and " + TABLE_NAME + " != '" + MetaDataClient.EMPTY_TABLE + "'"); - addTenantIdFilter(connection, buf, catalog, parameterValues); - if (schemaPattern != null) { - buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like ?" )); - if (schemaPattern.length() > 0) { - parameterValues.add(schemaPattern); - } - } - if (tableNamePattern != null) { - buf.append(" and " + TABLE_NAME + " like ?" ); - parameterValues.add(tableNamePattern); - } - if (typeClauseBuf.length() > 0) { - buf.append(typeClauseBuf); - } - } - if (isSequence) { - // Union the SYSTEM.CATALOG entries with the SYSTEM.SEQUENCE entries - if (typeClauseBuf.length() > 0) { - buf.append(" UNION ALL\n"); - buf.append(" select\n"); - } - buf.append( - TENANT_ID + " " + TABLE_CAT + "," + // tenant_id is the catalog - SEQUENCE_SCHEMA + " " + TABLE_SCHEM + "," + - SEQUENCE_NAME + " " + TABLE_NAME + " ," + - "'" + SEQUENCE_TABLE_TYPE + "' " + TABLE_TYPE + "," + - "'' " + REMARKS + " ," + - "'' " + TYPE_NAME + "," + - "'' " + SELF_REFERENCING_COL_NAME + "," + - "'' " + REF_GENERATION + "," + - "CAST(null AS CHAR(1)) " + INDEX_STATE + "," + - "CAST(null AS BOOLEAN) " + IMMUTABLE_ROWS + "," + - "CAST(null AS INTEGER) " + SALT_BUCKETS + "," + - "CAST(null AS BOOLEAN) " + MULTI_TENANT + "," + - "'' " + VIEW_STATEMENT + "," + - "'' " + VIEW_TYPE + "," + - "'' " + INDEX_TYPE + "," + - "CAST(null AS BOOLEAN) " + TRANSACTIONAL + "," + - "CAST(null AS BOOLEAN) " + IS_NAMESPACE_MAPPED + "," + - "CAST(null AS BIGINT) " + GUIDE_POSTS_WIDTH + "," + - "CAST(null AS VARCHAR) " + TRANSACTION_PROVIDER + "\n"); - buf.append(" from " + SYSTEM_SEQUENCE + "\n"); - StringBuilder whereClause = new StringBuilder(); - addTenantIdFilter(connection, whereClause, catalog, parameterValues); - if (schemaPattern != null) { - appendConjunction(whereClause); - whereClause.append(SEQUENCE_SCHEMA + (schemaPattern.length() == 0 ? " is null" : " like ?\n" )); - if (schemaPattern.length() > 0) { - parameterValues.add(schemaPattern); - } - } - if (tableNamePattern != null) { - appendConjunction(whereClause); - whereClause.append(SEQUENCE_NAME + " like ?\n" ); - parameterValues.add(tableNamePattern); - } - if (whereClause.length() > 0) { - buf.append(" where\n"); - buf.append(whereClause); - } - } - buf.append(" order by 4, 1, 2, 3\n"); - PreparedStatement stmt = connection.prepareStatement(buf.toString()); - for (int i = 0; i < parameterValues.size(); i++) { - stmt.setString(i+1, parameterValues.get(i)); - } - return stmt; - } - - /** - * Util that generates a PreparedStatement against syscat to get the table listing in a given schema. - */ - public static PreparedStatement getShowCreateTableStmt(PhoenixConnection connection, String catalog, TableName tn) throws SQLException { - - String output; - SchemaProcessor processor = new SchemaExtractionProcessor(null, - connection.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration(), - tn.getSchemaName() == null ? null : "\"" + tn.getSchemaName()+ "\"", - "\"" + tn.getTableName() + "\""); + private static final Logger LOGGER = LoggerFactory.getLogger(QueryUtil.class); + + /** + * Column family name index within ResultSet resulting from + * {@link DatabaseMetaData#getColumns(String, String, String, String)} + */ + public static final int COLUMN_FAMILY_POSITION = 25; + + /** + * Column name index within ResultSet resulting from + * {@link DatabaseMetaData#getColumns(String, String, String, String)} + */ + public static final int COLUMN_NAME_POSITION = 4; + + /** + * Data type index within ResultSet resulting from + * {@link DatabaseMetaData#getColumns(String, String, String, String)} + */ + public static final int DATA_TYPE_POSITION = 5; + + /** + * Index of the column containing the datatype name within ResultSet resulting from + * {@link DatabaseMetaData#getColumns(String, String, String, String)}. + */ + public static final int DATA_TYPE_NAME_POSITION = 6; + + public static final String IS_SERVER_CONNECTION = "IS_SERVER_CONNECTION"; + private static final String SELECT = "SELECT"; + private static final String FROM = "FROM"; + private static final String WHERE = "WHERE"; + private static final String AND = "AND"; + private static final String[] CompareOpString = new String[CompareOperator.values().length]; + + static { + CompareOpString[CompareOperator.EQUAL.ordinal()] = "="; + CompareOpString[CompareOperator.NOT_EQUAL.ordinal()] = "!="; + CompareOpString[CompareOperator.GREATER.ordinal()] = ">"; + CompareOpString[CompareOperator.LESS.ordinal()] = "<"; + CompareOpString[CompareOperator.GREATER_OR_EQUAL.ordinal()] = ">="; + CompareOpString[CompareOperator.LESS_OR_EQUAL.ordinal()] = "<="; + } + + public static String toSQL(CompareOperator op) { + return CompareOpString[op.ordinal()]; + } + + /** + * Private constructor + */ + private QueryUtil() { + } + + /** + * Generate an upsert statement based on a list of {@code ColumnInfo}s with parameter markers. The + * list of {@code ColumnInfo}s must contain at least one element. + * @param tableName name of the table for which the upsert statement is to be created + * @param columnInfos list of column to be included in the upsert statement + * @return the created {@code UPSERT} statement + */ + public static String constructUpsertStatement(String tableName, List columnInfos) { + + if (columnInfos.isEmpty()) { + throw new IllegalArgumentException("At least one column must be provided for upserts"); + } + + final List columnNames = + Lists.transform(columnInfos, new Function() { + @Override + public String apply(ColumnInfo columnInfo) { + return columnInfo.getColumnName(); + } + }); + return constructUpsertStatement(tableName, columnNames, null); + + } + + /** + * Generate an upsert statement based on a list of {@code ColumnInfo}s with parameter markers. The + * list of {@code ColumnInfo}s must contain at least one element. + * @param tableName name of the table for which the upsert statement is to be created + * @param columns list of columns to be included in the upsert statement + * @param hint hint to be added to the UPSERT statement. + * @return the created {@code UPSERT} statement + */ + public static String constructUpsertStatement(String tableName, List columns, Hint hint) { + + if (columns.isEmpty()) { + throw new IllegalArgumentException("At least one column must be provided for upserts"); + } + + String hintStr = ""; + if (hint != null) { + final HintNode node = new HintNode(hint.name()); + hintStr = node.toString(); + } + + List parameterList = Lists.newArrayList(); + for (int i = 0; i < columns.size(); i++) { + parameterList.add("?"); + } + return String.format("UPSERT %s INTO %s (%s) VALUES (%s)", hintStr, tableName, + Joiner.on(", ").join(Iterables.transform(columns, new Function() { + @Nullable + @Override + public String apply(String columnName) { + return getEscapedFullColumnName(columnName); + } + })), Joiner.on(", ").join(parameterList)); + + } + + /** + * Generate a generic upsert statement based on a number of columns. The created upsert statement + * will not include any named columns, but will include parameter markers for the given number of + * columns. The number of columns must be greater than zero. + * @param tableName name of the table for which the upsert statement is to be created + * @param numColumns number of columns to be included in the upsert statement + * @return the created {@code UPSERT} statement + */ + public static String constructGenericUpsertStatement(String tableName, int numColumns) { + + if (numColumns == 0) { + throw new IllegalArgumentException("At least one column must be provided for upserts"); + } + + List parameterList = Lists.newArrayListWithCapacity(numColumns); + for (int i = 0; i < numColumns; i++) { + parameterList.add("?"); + } + return String.format("UPSERT INTO %s VALUES (%s)", tableName, + Joiner.on(", ").join(parameterList)); + } + + /** + * @param fullTableName name of the table for which the select statement needs to be created. + * @param columnInfos list of columns to be projected in the select statement. + * @param conditions The condition clause to be added to the WHERE condition + * @return Select Query + */ + public static String constructSelectStatement(String fullTableName, List columnInfos, + final String conditions) { + List columns = Lists.transform(columnInfos, new Function() { + @Override + public String apply(ColumnInfo input) { + return input.getColumnName(); + } + }); + return constructSelectStatement(fullTableName, columns, conditions, null, false); + } + + /** + * @param fullTableName name of the table for which the select statement needs to be created. + * @param columns list of columns to be projected in the select statement. + * @param whereClause The condition clause to be added to the WHERE condition + * @param hint hint to use + * @param escapeCols whether to escape the projected columns + * @return Select Query + */ + public static String constructSelectStatement(String fullTableName, List columns, + final String whereClause, Hint hint, boolean escapeCols) { + return new QueryBuilder().setFullTableName(fullTableName).setSelectColumns(columns) + .setWhereClause(whereClause).setHint(hint).setEscapeCols(escapeCols).build(); + } + + /** + * Constructs parameterized filter for an IN clause e.g. passing in numWhereCols=2, numBatches=3 + * results in ((?,?),(?,?),(?,?)) + * @param numWhereCols number of WHERE columns + * @param numBatches number of column batches + * @return paramterized IN filter + */ + public static String constructParameterizedInClause(int numWhereCols, int numBatches) { + Preconditions.checkArgument(numWhereCols > 0); + Preconditions.checkArgument(numBatches > 0); + String batch = "(" + StringUtils.repeat("?", ",", numWhereCols) + ")"; + return "(" + StringUtils.repeat(batch, ",", numBatches) + ")"; + } + + /** + * Create the Phoenix JDBC connection URL from the provided cluster connection details. + */ + @Deprecated + public static String getUrl(String zkQuorum) { + return getUrlInternal(zkQuorum, null, null, null); + } + + /** + * Create the Phoenix JDBC connection URL from the provided cluster connection details. + */ + @Deprecated + public static String getUrl(String zkQuorum, int clientPort) { + return getUrlInternal(zkQuorum, clientPort, null, null); + } + + /** + * Create the Phoenix JDBC connection URL from the provided cluster connection details. + */ + @Deprecated + public static String getUrl(String zkQuorum, String znodeParent) { + return getUrlInternal(zkQuorum, null, znodeParent, null); + } + + /** + * Create the Phoenix JDBC connection URL from the provided cluster connection details. + */ + @Deprecated + public static String getUrl(String zkQuorum, int port, String znodeParent, String principal) { + return getUrlInternal(zkQuorum, port, znodeParent, principal); + } + + /** + * Create the Phoenix JDBC connection URL from the provided cluster connection details. + */ + @Deprecated + public static String getUrl(String zkQuorum, int port, String znodeParent) { + return getUrlInternal(zkQuorum, port, znodeParent, null); + } + + /** + * Create the Phoenix JDBC connection URL from the provided cluster connection details. + */ + @Deprecated + public static String getUrl(String zkQuorum, Integer port, String znodeParent) { + return getUrlInternal(zkQuorum, port, znodeParent, null); + } + + /** + * Create the Phoenix JDBC connection URL from the provided cluster connection details. + */ + @Deprecated + public static String getUrl(String zkQuorum, Integer port, String znodeParent, String principal) { + return getUrlInternal(zkQuorum, port, znodeParent, principal); + } + + @Deprecated + private static String getUrlInternal(String zkQuorum, Integer port, String znodeParent, + String principal) { + return String.join(":", PhoenixRuntime.JDBC_PROTOCOL, zkQuorum, + port == null ? "" : port.toString(), znodeParent == null ? "" : znodeParent, + principal == null ? "" : principal) + + Character.toString(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR); + } + + public static String getExplainPlan(ResultSet rs) throws SQLException { + StringBuilder buf = new StringBuilder(); + while (rs.next()) { + buf.append(rs.getString(1)); + buf.append('\n'); + } + if (buf.length() > 0) { + buf.setLength(buf.length() - 1); + } + return buf.toString(); + } + + public static String getExplainPlan(ResultIterator iterator) throws SQLException { + List steps = Lists.newArrayList(); + iterator.explain(steps); + StringBuilder buf = new StringBuilder(); + for (String step : steps) { + buf.append(step); + buf.append('\n'); + } + if (buf.length() > 0) { + buf.setLength(buf.length() - 1); + } + return buf.toString(); + } + + /** + * @return {@link PhoenixConnection} with {@value UpgradeUtil#DO_NOT_UPGRADE} set so that we don't + * initiate metadata upgrade + */ + public static Connection getConnectionOnServer(Configuration conf) throws SQLException { + return getConnectionOnServer(new Properties(), conf); + } + + public static void setServerConnection(Properties props) { + UpgradeUtil.doNotUpgradeOnFirstConnection(props); + props.setProperty(IS_SERVER_CONNECTION, Boolean.TRUE.toString()); + } + + public static boolean isServerConnection(ReadOnlyProps props) { + return props.getBoolean(IS_SERVER_CONNECTION, false); + } + + /** + * @return {@link PhoenixConnection} with {@value UpgradeUtil#DO_NOT_UPGRADE} set and with the + * upgrade-required flag cleared so that we don't initiate metadata upgrade. + */ + public static Connection getConnectionOnServer(Properties props, Configuration conf) + throws SQLException { + setServerConnection(props); + Connection conn = getConnection(props, conf); + conn.unwrap(PhoenixConnection.class).getQueryServices().clearUpgradeRequired(); + return conn; + } + + public static Connection getConnectionOnServerWithCustomUrl(Properties props, String principal) + throws SQLException { + setServerConnection(props); + String url = getConnectionUrl(props, null, principal); + LOGGER.info("Creating connection with the jdbc url: " + url); + return DriverManager.getConnection(url, props); + } + + public static Connection getConnection(Configuration conf) throws SQLException { + return getConnection(new Properties(), conf); + } + + public static Connection getConnection(Properties props, Configuration conf) throws SQLException { + String url = getConnectionUrl(props, conf); + LOGGER.info(String.format("Creating connection with the jdbc url: %s, isServerSide = %s", url, + props.getProperty(IS_SERVER_CONNECTION))); + props = PropertiesUtil.combineProperties(props, conf); + return DriverManager.getConnection(url, props); + } + + public static String getConnectionUrl(Properties props, Configuration conf) throws SQLException { + return getConnectionUrl(props, conf, null); + } + + /** Returns connection url using the various properties set in props and conf. */ + public static String getConnectionUrl(Properties props, Configuration conf, String principal) + throws SQLException { + ReadOnlyProps propsWithPrincipal; + if (principal != null) { + Map principalProp = new HashMap<>(); + principalProp.put(QueryServices.HBASE_CLIENT_PRINCIPAL, principal); + propsWithPrincipal = new ReadOnlyProps(principalProp.entrySet().iterator()); + } else { + propsWithPrincipal = ReadOnlyProps.EMPTY_PROPS; + } + ConnectionInfo info = + ConnectionInfo.createNoLogin(PhoenixRuntime.JDBC_PROTOCOL, conf, propsWithPrincipal, props); + String url = info.toUrl(); + if (url.endsWith(PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + "")) { + url = url.substring(0, url.length() - 1); + } + // Mainly for testing to tack on the test=true part to ensure driver is found on server + String defaultExtraArgs = conf != null + ? conf.get(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, + QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS) + : QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS; + // If props doesn't have a default for extra args then use the extra args in conf as default + String extraArgs = + props.getProperty(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, defaultExtraArgs); + if (extraArgs.length() > 0) { + url += PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR + extraArgs + + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; + } else { + url += PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; + } + return url; + } + + private static int getInt(String key, int defaultValue, Properties props, Configuration conf) { + if (conf == null) { + Preconditions.checkNotNull(props); + return Integer.parseInt(props.getProperty(key, String.valueOf(defaultValue))); + } + return conf.getInt(key, defaultValue); + } + + private static String getString(String key, String defaultValue, Properties props, + Configuration conf) { + if (conf == null) { + Preconditions.checkNotNull(props); + return props.getProperty(key, defaultValue); + } + return conf.get(key, defaultValue); + } + + public static String getViewStatement(String schemaName, String tableName, String where) { + // Only form we currently support for VIEWs: SELECT * FROM t WHERE ... + return SELECT + " " + WildcardParseNode.NAME + " " + FROM + " " + + (schemaName == null || schemaName.length() == 0 ? "" : ("\"" + schemaName + "\".")) + + ("\"" + tableName + "\" ") + (WHERE + " " + where); + } + + public static Integer getOffsetLimit(Integer limit, Integer offset) { + if (limit == null) { + return null; + } else if (offset == null) { + return limit; + } else { + return limit + offset; + } + + } + + public static Integer getRemainingOffset(Tuple offsetTuple) { + if (offsetTuple != null) { + Cell cell = offsetTuple.getValue(QueryConstants.OFFSET_FAMILY, QueryConstants.OFFSET_COLUMN); + if (cell != null) { + return PInteger.INSTANCE.toObject(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength(), PInteger.INSTANCE, SortOrder.ASC, null, null); + } + } + return null; + } + + public static String getViewPartitionClause(String partitionColumnName, long autoPartitionNum) { + return partitionColumnName + " " + toSQL(CompareOperator.EQUAL) + " " + autoPartitionNum; + } + + public static Connection getConnectionForQueryLog(Configuration config) throws SQLException { + // we don't need this connection to upgrade anything or start dispatcher + return getConnectionOnServer(config); + } + + public static PreparedStatement getCatalogsStmt(PhoenixConnection connection) + throws SQLException { + List parameterValues = new ArrayList(4); + StringBuilder buf = new StringBuilder("select \n" + " DISTINCT " + TENANT_ID + " " + TABLE_CAT + + " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + " where " + COLUMN_NAME + + " is null" + " and " + COLUMN_FAMILY + " is null" + " and " + TENANT_ID + " is not null"); + addTenantIdFilter(connection, buf, null, parameterValues); + buf.append(" order by " + TENANT_ID); + PreparedStatement stmt = connection.prepareStatement(buf.toString()); + for (int i = 0; i < parameterValues.size(); i++) { + stmt.setString(i + 1, parameterValues.get(i)); + } + return stmt; + } + + /** + * Util that generates a PreparedStatement against syscat to fetch schema listings. + */ + public static PreparedStatement getSchemasStmt(PhoenixConnection connection, String catalog, + String schemaPattern) throws SQLException { + List parameterValues = new ArrayList(4); + StringBuilder buf = new StringBuilder( + "select distinct \n" + TABLE_SCHEM + "," + TENANT_ID + " " + TABLE_CATALOG + " from " + + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + " where " + COLUMN_NAME + " is null"); + addTenantIdFilter(connection, buf, catalog, parameterValues); + if (schemaPattern != null) { + buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like ?")); + if (schemaPattern.length() > 0) { + parameterValues.add(schemaPattern); + } + } + if (SchemaUtil.isNamespaceMappingEnabled(null, connection.getQueryServices().getProps())) { + buf.append(" and " + TABLE_NAME + " = '" + MetaDataClient.EMPTY_TABLE + "'"); + } + + // TODO: we should union this with SYSTEM.SEQUENCE too, but we only have support for + // UNION ALL and we really need UNION so that it dedups. + + PreparedStatement stmt = connection.prepareStatement(buf.toString()); + for (int i = 0; i < parameterValues.size(); i++) { + stmt.setString(i + 1, parameterValues.get(i)); + } + return stmt; + } + + public static PreparedStatement getSuperTablesStmt(PhoenixConnection connection, String catalog, + String schemaPattern, String tableNamePattern) throws SQLException { + List parameterValues = new ArrayList(4); + StringBuilder buf = new StringBuilder("select \n" + TENANT_ID + " " + TABLE_CAT + "," + // Use + // tenantId + // for + // catalog + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_FAMILY + " " + SUPERTABLE_NAME + " from " + + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + " where " + COLUMN_NAME + " is null" + " and " + + LINK_TYPE + " = " + PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()); + addTenantIdFilter(connection, buf, catalog, parameterValues); + if (schemaPattern != null) { + buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like ?")); + if (schemaPattern.length() > 0) { + parameterValues.add(schemaPattern); + } + } + if (tableNamePattern != null) { + buf.append(" and " + TABLE_NAME + " like ?"); + parameterValues.add(tableNamePattern); + } + buf.append( + " order by " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + SUPERTABLE_NAME); + PreparedStatement stmt = connection.prepareStatement(buf.toString()); + for (int i = 0; i < parameterValues.size(); i++) { + stmt.setString(i + 1, parameterValues.get(i)); + } + return stmt; + } + + public static PreparedStatement getIndexInfoStmt(PhoenixConnection connection, String catalog, + String schema, String table, boolean unique, boolean approximate) throws SQLException { + if (unique) { // No unique indexes + return null; + } + List parameterValues = new ArrayList(4); + StringBuilder buf = new StringBuilder("select \n" + TENANT_ID + " " + TABLE_CAT + ",\n" + // use + // this + // column + // for + // column + // family + // name + TABLE_SCHEM + ",\n" + DATA_TABLE_NAME + " " + TABLE_NAME + ",\n" + "true NON_UNIQUE,\n" + + "null INDEX_QUALIFIER,\n" + TABLE_NAME + " INDEX_NAME,\n" + DatabaseMetaData.tableIndexOther + + " TYPE,\n" + ORDINAL_POSITION + ",\n" + COLUMN_NAME + ",\n" + "CASE WHEN " + COLUMN_FAMILY + + " IS NOT NULL THEN null WHEN " + SORT_ORDER + " = " + (SortOrder.DESC.getSystemValue()) + + " THEN 'D' ELSE 'A' END ASC_OR_DESC,\n" + "null CARDINALITY,\n" + "null PAGES,\n" + + "null FILTER_CONDITION,\n" + + // Include data type info, though not in spec + ExternalSqlTypeIdFunction.NAME + "(" + DATA_TYPE + ") AS " + DATA_TYPE + ",\n" + + SqlTypeNameFunction.NAME + "(" + DATA_TYPE + ") AS " + TYPE_NAME + ",\n" + DATA_TYPE + " " + + TYPE_ID + ",\n" + COLUMN_FAMILY + ",\n" + COLUMN_SIZE + ",\n" + ARRAY_SIZE + "\nfrom " + + SYSTEM_CATALOG + "\nwhere "); + buf.append(TABLE_SCHEM + (schema == null || schema.length() == 0 ? " is null" : " = ?")); + if (schema != null && schema.length() > 0) { + parameterValues.add(schema); + } + buf.append("\nand " + DATA_TABLE_NAME + " = ?"); + parameterValues.add(table); + buf.append("\nand " + COLUMN_NAME + " is not null"); + addTenantIdFilter(connection, buf, catalog, parameterValues); + buf.append("\norder by INDEX_NAME," + ORDINAL_POSITION); + PreparedStatement stmt = connection.prepareStatement(buf.toString()); + for (int i = 0; i < parameterValues.size(); i++) { + stmt.setString(i + 1, parameterValues.get(i)); + } + return stmt; + } + + /** + * Util that generates a PreparedStatement against syscat to get the table listing in a given + * schema. + */ + public static PreparedStatement getTablesStmt(PhoenixConnection connection, String catalog, + String schemaPattern, String tableNamePattern, String[] types) throws SQLException { + boolean isSequence = false; + boolean hasTableTypes = types != null && types.length > 0; + StringBuilder typeClauseBuf = new StringBuilder(); + List parameterValues = new ArrayList(4); + if (hasTableTypes) { + List tableTypes = Lists.newArrayList(types); + isSequence = tableTypes.remove(SEQUENCE_TABLE_TYPE); + StringBuilder typeBuf = new StringBuilder(); + for (String type : tableTypes) { try { - output = processor.process(); - } catch (Exception e) { - LOGGER.error(e.getStackTrace().toString()); - throw new SQLException(e.getMessage()); - } - - StringBuilder buf = new StringBuilder("select \n" + - " ? as \"CREATE STATEMENT\""); - PreparedStatement stmt = connection.prepareStatement(buf.toString()); - - stmt.setString(1, output); - - return stmt; - } - - public static void addTenantIdFilter(PhoenixConnection connection, StringBuilder buf, String tenantIdPattern, - List parameterValues) { - PName tenantId = connection.getTenantId(); - if (tenantIdPattern == null) { - if (tenantId != null) { - appendConjunction(buf); - buf.append(" (" + TENANT_ID + " IS NULL " + - " OR " + TENANT_ID + " = ?) "); - parameterValues.add(tenantId.getString()); - } - } else if (tenantIdPattern.length() == 0) { - appendConjunction(buf); - buf.append(TENANT_ID + " IS NULL "); - } else { - appendConjunction(buf); - buf.append(" TENANT_ID LIKE ? "); - parameterValues.add(tenantIdPattern); - if (tenantId != null) { - buf.append(" and TENANT_ID = ? "); - parameterValues.add(tenantId.getString()); - } - } - } - - private static void appendConjunction(StringBuilder buf) { - buf.append(buf.length() == 0 ? "" : " and "); - } - - public static String generateInListParams(int nParams) { - List paramList = Lists.newArrayList(); - for (int i = 0; i < nParams; i++) { - paramList.add("?"); - } - return Joiner.on(", ").join(paramList); - } - - public static void setQuoteInListElements(PreparedStatement ps, List unQuotedString, - int index) throws SQLException { - for (int i = 0; i < unQuotedString.size(); i++) { - ps.setString(++index, "'" + unQuotedString + "'"); - } - } -} \ No newline at end of file + PTableType tableType = PTableType.fromValue(type); + typeBuf.append('\''); + typeBuf.append(tableType.getSerializedValue()); + typeBuf.append('\''); + typeBuf.append(','); + } catch (IllegalArgumentException e) { + // Ignore and continue + } + } + if (typeBuf.length() > 0) { + typeClauseBuf.append(" and " + TABLE_TYPE + " IN ("); + typeClauseBuf.append(typeBuf); + typeClauseBuf.setCharAt(typeClauseBuf.length() - 1, ')'); + } + } + StringBuilder buf = new StringBuilder("select \n"); + // If there were table types specified and they were all filtered out + // and we're not querying for sequences, return an empty result set. + if (hasTableTypes && typeClauseBuf.length() == 0 && !isSequence) { + return null; + } + if (typeClauseBuf.length() > 0 || !isSequence) { + buf.append(TENANT_ID + " " + TABLE_CAT + "," + // tenant_id is the catalog + TABLE_SCHEM + "," + TABLE_NAME + " ," + SQLTableTypeFunction.NAME + "(" + TABLE_TYPE + + ") AS " + TABLE_TYPE + "," + REMARKS + " ," + TYPE_NAME + "," + SELF_REFERENCING_COL_NAME + + "," + REF_GENERATION + "," + IndexStateNameFunction.NAME + "(" + INDEX_STATE + ") AS " + + INDEX_STATE + "," + IMMUTABLE_ROWS + "," + SALT_BUCKETS + "," + MULTI_TENANT + "," + + VIEW_STATEMENT + "," + SQLViewTypeFunction.NAME + "(" + VIEW_TYPE + ") AS " + VIEW_TYPE + + "," + SQLIndexTypeFunction.NAME + "(" + INDEX_TYPE + ") AS " + INDEX_TYPE + "," + + TRANSACTION_PROVIDER + " IS NOT NULL AS " + TRANSACTIONAL + "," + IS_NAMESPACE_MAPPED + + "," + GUIDE_POSTS_WIDTH + "," + TransactionProviderNameFunction.NAME + "(" + + TRANSACTION_PROVIDER + ") AS TRANSACTION_PROVIDER" + " from " + SYSTEM_CATALOG + " " + + SYSTEM_CATALOG_ALIAS + " where " + COLUMN_NAME + " is null" + " and " + COLUMN_FAMILY + + " is null" + " and " + TABLE_NAME + " != '" + MetaDataClient.EMPTY_TABLE + "'"); + addTenantIdFilter(connection, buf, catalog, parameterValues); + if (schemaPattern != null) { + buf.append(" and " + TABLE_SCHEM + (schemaPattern.length() == 0 ? " is null" : " like ?")); + if (schemaPattern.length() > 0) { + parameterValues.add(schemaPattern); + } + } + if (tableNamePattern != null) { + buf.append(" and " + TABLE_NAME + " like ?"); + parameterValues.add(tableNamePattern); + } + if (typeClauseBuf.length() > 0) { + buf.append(typeClauseBuf); + } + } + if (isSequence) { + // Union the SYSTEM.CATALOG entries with the SYSTEM.SEQUENCE entries + if (typeClauseBuf.length() > 0) { + buf.append(" UNION ALL\n"); + buf.append(" select\n"); + } + buf.append(TENANT_ID + " " + TABLE_CAT + "," + // tenant_id is the catalog + SEQUENCE_SCHEMA + " " + TABLE_SCHEM + "," + SEQUENCE_NAME + " " + TABLE_NAME + " ," + "'" + + SEQUENCE_TABLE_TYPE + "' " + TABLE_TYPE + "," + "'' " + REMARKS + " ," + "'' " + TYPE_NAME + + "," + "'' " + SELF_REFERENCING_COL_NAME + "," + "'' " + REF_GENERATION + "," + + "CAST(null AS CHAR(1)) " + INDEX_STATE + "," + "CAST(null AS BOOLEAN) " + IMMUTABLE_ROWS + + "," + "CAST(null AS INTEGER) " + SALT_BUCKETS + "," + "CAST(null AS BOOLEAN) " + + MULTI_TENANT + "," + "'' " + VIEW_STATEMENT + "," + "'' " + VIEW_TYPE + "," + "'' " + + INDEX_TYPE + "," + "CAST(null AS BOOLEAN) " + TRANSACTIONAL + "," + + "CAST(null AS BOOLEAN) " + IS_NAMESPACE_MAPPED + "," + "CAST(null AS BIGINT) " + + GUIDE_POSTS_WIDTH + "," + "CAST(null AS VARCHAR) " + TRANSACTION_PROVIDER + "\n"); + buf.append(" from " + SYSTEM_SEQUENCE + "\n"); + StringBuilder whereClause = new StringBuilder(); + addTenantIdFilter(connection, whereClause, catalog, parameterValues); + if (schemaPattern != null) { + appendConjunction(whereClause); + whereClause + .append(SEQUENCE_SCHEMA + (schemaPattern.length() == 0 ? " is null" : " like ?\n")); + if (schemaPattern.length() > 0) { + parameterValues.add(schemaPattern); + } + } + if (tableNamePattern != null) { + appendConjunction(whereClause); + whereClause.append(SEQUENCE_NAME + " like ?\n"); + parameterValues.add(tableNamePattern); + } + if (whereClause.length() > 0) { + buf.append(" where\n"); + buf.append(whereClause); + } + } + buf.append(" order by 4, 1, 2, 3\n"); + PreparedStatement stmt = connection.prepareStatement(buf.toString()); + for (int i = 0; i < parameterValues.size(); i++) { + stmt.setString(i + 1, parameterValues.get(i)); + } + return stmt; + } + + /** + * Util that generates a PreparedStatement against syscat to get the table listing in a given + * schema. + */ + public static PreparedStatement getShowCreateTableStmt(PhoenixConnection connection, + String catalog, TableName tn) throws SQLException { + + String output; + SchemaProcessor processor = new SchemaExtractionProcessor(null, + connection.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration(), + tn.getSchemaName() == null ? null : "\"" + tn.getSchemaName() + "\"", + "\"" + tn.getTableName() + "\""); + try { + output = processor.process(); + } catch (Exception e) { + LOGGER.error(e.getStackTrace().toString()); + throw new SQLException(e.getMessage()); + } + + StringBuilder buf = new StringBuilder("select \n" + " ? as \"CREATE STATEMENT\""); + PreparedStatement stmt = connection.prepareStatement(buf.toString()); + + stmt.setString(1, output); + + return stmt; + } + + public static void addTenantIdFilter(PhoenixConnection connection, StringBuilder buf, + String tenantIdPattern, List parameterValues) { + PName tenantId = connection.getTenantId(); + if (tenantIdPattern == null) { + if (tenantId != null) { + appendConjunction(buf); + buf.append(" (" + TENANT_ID + " IS NULL " + " OR " + TENANT_ID + " = ?) "); + parameterValues.add(tenantId.getString()); + } + } else if (tenantIdPattern.length() == 0) { + appendConjunction(buf); + buf.append(TENANT_ID + " IS NULL "); + } else { + appendConjunction(buf); + buf.append(" TENANT_ID LIKE ? "); + parameterValues.add(tenantIdPattern); + if (tenantId != null) { + buf.append(" and TENANT_ID = ? "); + parameterValues.add(tenantId.getString()); + } + } + } + + private static void appendConjunction(StringBuilder buf) { + buf.append(buf.length() == 0 ? "" : " and "); + } + + public static String generateInListParams(int nParams) { + List paramList = Lists.newArrayList(); + for (int i = 0; i < nParams; i++) { + paramList.add("?"); + } + return Joiner.on(", ").join(paramList); + } + + public static void setQuoteInListElements(PreparedStatement ps, List unQuotedString, + int index) throws SQLException { + for (int i = 0; i < unQuotedString.size(); i++) { + ps.setString(++index, "'" + unQuotedString + "'"); + } + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java index 9b9de68bfd8..ce5357057b9 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; import java.util.HashMap; @@ -29,317 +28,287 @@ import javax.annotation.Nonnull; import org.apache.hadoop.util.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.base.Objects; import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap.Builder; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * - * Read-only properties that avoids unnecessary synchronization in - * java.util.Properties. - * + * Read-only properties that avoids unnecessary synchronization in java.util.Properties. */ public class ReadOnlyProps implements Iterable> { - private static final Logger LOGGER = LoggerFactory.getLogger(ReadOnlyProps.class); - public static final ReadOnlyProps EMPTY_PROPS = new ReadOnlyProps(); - @Nonnull - private final Map props; - @Nonnull - private final Map overrideProps; - - public ReadOnlyProps(ReadOnlyProps defaultProps, Iterator> iterator) { - Map map = new HashMap(defaultProps.asMap()); - while (iterator.hasNext()) { - Entry entry = iterator.next(); - map.put(entry.getKey(), entry.getValue()); - } - this.props = ImmutableMap.copyOf(map); - this.overrideProps = ImmutableMap.of(); - } - - public ReadOnlyProps(Iterator> iterator) { - this(EMPTY_PROPS, iterator); - } + private static final Logger LOGGER = LoggerFactory.getLogger(ReadOnlyProps.class); + public static final ReadOnlyProps EMPTY_PROPS = new ReadOnlyProps(); + @Nonnull + private final Map props; + @Nonnull + private final Map overrideProps; - private ReadOnlyProps() { - this.props = ImmutableMap.of(); - this.overrideProps = ImmutableMap.of(); + public ReadOnlyProps(ReadOnlyProps defaultProps, Iterator> iterator) { + Map map = new HashMap(defaultProps.asMap()); + while (iterator.hasNext()) { + Entry entry = iterator.next(); + map.put(entry.getKey(), entry.getValue()); } + this.props = ImmutableMap.copyOf(map); + this.overrideProps = ImmutableMap.of(); + } - public ReadOnlyProps(Map props) { - this.props = ImmutableMap.copyOf(props); - this.overrideProps = ImmutableMap.of(); - } + public ReadOnlyProps(Iterator> iterator) { + this(EMPTY_PROPS, iterator); + } - private ReadOnlyProps(ReadOnlyProps defaultProps, Properties overridesArg) { - this.props = defaultProps.props; - if (overridesArg == null || overridesArg.isEmpty()) { - this.overrideProps = defaultProps.overrideProps; - } else { - Map combinedOverrides = - Maps.newHashMapWithExpectedSize(defaultProps.overrideProps.size() - + overridesArg.size()); - if (!defaultProps.overrideProps.isEmpty()) { - combinedOverrides.putAll(defaultProps.overrideProps); - } - for (Entry entry : overridesArg.entrySet()) { - combinedOverrides.put(entry.getKey().toString(), entry.getValue().toString()); - } - this.overrideProps = ImmutableMap.copyOf(combinedOverrides); - } - } + private ReadOnlyProps() { + this.props = ImmutableMap.of(); + this.overrideProps = ImmutableMap.of(); + } - private static Pattern varPat = Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}"); - private static int MAX_SUBST = 20; + public ReadOnlyProps(Map props) { + this.props = ImmutableMap.copyOf(props); + this.overrideProps = ImmutableMap.of(); + } - private String substituteVars(String expr) { - if (expr == null) { - return null; - } - Matcher match = varPat.matcher(""); - String eval = expr; - for(int s=0; s combinedOverrides = + Maps.newHashMapWithExpectedSize(defaultProps.overrideProps.size() + overridesArg.size()); + if (!defaultProps.overrideProps.isEmpty()) { + combinedOverrides.putAll(defaultProps.overrideProps); } - - /** - * Get the value of the name property, without doing - * variable expansion. - * - * @param name the property name. - * @return the value of the name property, - * or null if no such property exists. - */ - public String getRaw(String name) { - String overridenValue = overrideProps.get(name); - return overridenValue == null ? props.get(name) : overridenValue; + for (Entry entry : overridesArg.entrySet()) { + combinedOverrides.put(entry.getKey().toString(), entry.getValue().toString()); + } + this.overrideProps = ImmutableMap.copyOf(combinedOverrides); } + } - public String getRaw(String name, String defaultValue) { - String value = getRaw(name); - if (value == null) { - return defaultValue; - } - return value; - } + private static Pattern varPat = Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}"); + private static int MAX_SUBST = 20; - /** - * Get the value of the name property. If no such property - * exists, then defaultValue is returned. - * - * @param name property name. - * @param defaultValue default value. - * @return property value, or defaultValue if the property - * doesn't exist. - */ - public String get(String name, String defaultValue) { - return substituteVars(getRaw(name, defaultValue)); + private String substituteVars(String expr) { + if (expr == null) { + return null; } - - /** - * Get the value of the name property, null if - * no such property exists. - * - * Values are processed for variable expansion - * before being returned. - * - * @param name the property name. - * @return the value of the name property, - * or null if no such property exists. - */ - public String get(String name) { - return substituteVars(getRaw(name)); + Matcher match = varPat.matcher(""); + String eval = expr; + for (int s = 0; s < MAX_SUBST; s++) { + match.reset(eval); + if (!match.find()) { + return eval; + } + String var = match.group(); + var = var.substring(2, var.length() - 1); // remove ${ .. } + String val = null; + try { + val = System.getProperty(var); + } catch (SecurityException se) { + } + if (val == null) { + val = getRaw(var); + } + if (val == null) { + return eval; // return literal ${var}: var is unbound + } + // substitute + eval = eval.substring(0, match.start()) + val + eval.substring(match.end()); } + throw new IllegalStateException( + "Variable substitution depth too large: " + MAX_SUBST + " " + expr); + } - private String getHexDigits(String value) { - boolean negative = false; - String str = value; - String hexString = null; - if (value.startsWith("-")) { - negative = true; - str = value.substring(1); - } - if (str.startsWith("0x") || str.startsWith("0X")) { - hexString = str.substring(2); - if (negative) { - hexString = "-" + hexString; - } - return hexString; - } - return null; - } - - /** - * Get the value of the name property as a boolean. - * If no such property is specified, or if the specified value is not a valid - * boolean, then defaultValue is returned. - * - * @param name property name. - * @param defaultValue default value. - * @return property value as a boolean, - * or defaultValue. - */ - public boolean getBoolean(String name, boolean defaultValue) { - String valueString = get(name); - if ("true".equals(valueString)) - return true; - else if ("false".equals(valueString)) - return false; - else return defaultValue; + /** + * Get the value of the name property, without doing + * variable expansion. + * @param name the property name. + * @return the value of the name property, or null if no such property exists. + */ + public String getRaw(String name) { + String overridenValue = overrideProps.get(name); + return overridenValue == null ? props.get(name) : overridenValue; + } + + public String getRaw(String name, String defaultValue) { + String value = getRaw(name); + if (value == null) { + return defaultValue; } + return value; + } - /** - * Get the value of the name property as an int. - * - * If no such property exists, or if the specified value is not a valid - * int, then defaultValue is returned. - * - * @param name property name. - * @param defaultValue default value. - * @return property value as an int, - * or defaultValue. - */ - public int getInt(String name, int defaultValue) { - String valueString = get(name); - if (valueString == null) - return defaultValue; - try { - String hexString = getHexDigits(valueString); - if (hexString != null) { - return Integer.parseInt(hexString, 16); - } - return Integer.parseInt(valueString); - } catch (NumberFormatException e) { - return defaultValue; - } + /** + * Get the value of the name property. If no such property exists, then + * defaultValue is returned. + * @param name property name. + * @param defaultValue default value. + * @return property value, or defaultValue if the property doesn't exist. + */ + public String get(String name, String defaultValue) { + return substituteVars(getRaw(name, defaultValue)); + } + + /** + * Get the value of the name property, null if no such property exists. + * Values are processed for variable expansion before being + * returned. + * @param name the property name. + * @return the value of the name property, or null if no such property exists. + */ + public String get(String name) { + return substituteVars(getRaw(name)); + } + + private String getHexDigits(String value) { + boolean negative = false; + String str = value; + String hexString = null; + if (value.startsWith("-")) { + negative = true; + str = value.substring(1); } - - /** - * Get the value of the name property as a long. - * If no such property is specified, or if the specified value is not a valid - * long, then defaultValue is returned. - * - * @param name property name. - * @param defaultValue default value. - * @return property value as a long, - * or defaultValue. - */ - public long getLong(String name, long defaultValue) { - String valueString = get(name); - if (valueString == null) - return defaultValue; - try { - String hexString = getHexDigits(valueString); - if (hexString != null) { - return Long.parseLong(hexString, 16); - } - return Long.parseLong(valueString); - } catch (NumberFormatException e) { - return defaultValue; + if (str.startsWith("0x") || str.startsWith("0X")) { + hexString = str.substring(2); + if (negative) { + hexString = "-" + hexString; } + return hexString; } + return null; + } - /** - * Get the value of the name property as a long or - * human-readable format. If no such property exists, the provided default value - * is returned, or if the specified value is not a valid long or - * human-readable format, then an error is thrown. You can use the following - * suffix (case insensitive): k(kilo), m(mega), g(giga), t(tera), p(peta), e(exa) - * - * @param name property name. - * @param defaultValue default value. - * @return property value as a long, - * or defaultValue. - * @throws NumberFormatException - when the value is invalid - */ - public long getLongBytes(String name, long defaultValue) { - String valueString = get(name); - if (valueString == null) { - return defaultValue; - } - return StringUtils.TraditionalBinaryPrefix.string2long(valueString); - } + /** + * Get the value of the name property as a boolean. If no such property + * is specified, or if the specified value is not a valid boolean, then + * defaultValue is returned. + * @param name property name. + * @param defaultValue default value. + * @return property value as a boolean, or defaultValue. + */ + public boolean getBoolean(String name, boolean defaultValue) { + String valueString = get(name); + if ("true".equals(valueString)) return true; + else if ("false".equals(valueString)) return false; + else return defaultValue; + } - /** - * Get the value of the name property as a float. - * If no such property is specified, or if the specified value is not a valid - * float, then defaultValue is returned. - * - * @param name property name. - * @param defaultValue default value. - * @return property value as a float, - * or defaultValue. - */ - public float getFloat(String name, float defaultValue) { - String valueString = get(name); - if (valueString == null) - return defaultValue; - try { - return Float.parseFloat(valueString); - } catch (NumberFormatException e) { - return defaultValue; + /** + * Get the value of the name property as an int. If no such property + * exists, or if the specified value is not a valid int, then + * defaultValue is returned. + * @param name property name. + * @param defaultValue default value. + * @return property value as an int, or defaultValue. + */ + public int getInt(String name, int defaultValue) { + String valueString = get(name); + if (valueString == null) return defaultValue; + try { + String hexString = getHexDigits(valueString); + if (hexString != null) { + return Integer.parseInt(hexString, 16); } + return Integer.parseInt(valueString); + } catch (NumberFormatException e) { + return defaultValue; } + } - /** - * Get the properties as a {@code Map} - * - * @return {@code Map} - */ - public Map asMap() { - return props; + /** + * Get the value of the name property as a long. If no such property is + * specified, or if the specified value is not a valid long, then + * defaultValue is returned. + * @param name property name. + * @param defaultValue default value. + * @return property value as a long, or defaultValue. + */ + public long getLong(String name, long defaultValue) { + String valueString = get(name); + if (valueString == null) return defaultValue; + try { + String hexString = getHexDigits(valueString); + if (hexString != null) { + return Long.parseLong(hexString, 16); + } + return Long.parseLong(valueString); + } catch (NumberFormatException e) { + return defaultValue; } - - @Override - public Iterator> iterator() { - return props.entrySet().iterator(); + } + + /** + * Get the value of the name property as a long or human-readable + * format. If no such property exists, the provided default value is returned, or if the specified + * value is not a valid long or human-readable format, then an error is thrown. You + * can use the following suffix (case insensitive): k(kilo), m(mega), g(giga), t(tera), p(peta), + * e(exa) + * @param name property name. + * @param defaultValue default value. + * @return property value as a long, or defaultValue. + * @throws NumberFormatException - when the value is invalid + */ + public long getLongBytes(String name, long defaultValue) { + String valueString = get(name); + if (valueString == null) { + return defaultValue; } - - public boolean isEmpty() { - return props.isEmpty(); + return StringUtils.TraditionalBinaryPrefix.string2long(valueString); + } + + /** + * Get the value of the name property as a float. If no such property is + * specified, or if the specified value is not a valid float, then + * defaultValue is returned. + * @param name property name. + * @param defaultValue default value. + * @return property value as a float, or defaultValue. + */ + public float getFloat(String name, float defaultValue) { + String valueString = get(name); + if (valueString == null) return defaultValue; + try { + return Float.parseFloat(valueString); + } catch (NumberFormatException e) { + return defaultValue; } + } - /** - * Constructs new map only if necessary for adding the override properties. - * @param overrides Map of properties to override current properties. - * @return new ReadOnlyProps if in applying the overrides there are - * modifications to the current underlying Map, otherwise returns this. - */ - public ReadOnlyProps addAll(Properties overrides) { - for (Entry entry : overrides.entrySet()) { - String key = entry.getKey().toString(); - String value = entry.getValue().toString(); - String oldValue = props.get(key); - if (!Objects.equal(oldValue, value)) { - if (LOGGER.isDebugEnabled()) LOGGER.debug("Creating new ReadOnlyProps due to " + key + " with " + oldValue + "!=" + value); - return new ReadOnlyProps(this, overrides); - } - } - return this; + /** + * Get the properties as a {@code Map} + * @return {@code Map} + */ + public Map asMap() { + return props; + } + + @Override + public Iterator> iterator() { + return props.entrySet().iterator(); + } + + public boolean isEmpty() { + return props.isEmpty(); + } + + /** + * Constructs new map only if necessary for adding the override properties. + * @param overrides Map of properties to override current properties. + * @return new ReadOnlyProps if in applying the overrides there are modifications to the current + * underlying Map, otherwise returns this. + */ + public ReadOnlyProps addAll(Properties overrides) { + for (Entry entry : overrides.entrySet()) { + String key = entry.getKey().toString(); + String value = entry.getValue().toString(); + String oldValue = props.get(key); + if (!Objects.equal(oldValue, value)) { + if (LOGGER.isDebugEnabled()) LOGGER + .debug("Creating new ReadOnlyProps due to " + key + " with " + oldValue + "!=" + value); + return new ReadOnlyProps(this, overrides); + } } + return this; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ResultUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ResultUtil.java index 967f38db546..19859e9063a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ResultUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ResultUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,102 +28,104 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Static class for various methods that would be nice to have added to {@link org.apache.hadoop.hbase.client.Result}. - * These methods work off of the raw bytes preventing the explosion of Result into object form. - * - * + * Static class for various methods that would be nice to have added to + * {@link org.apache.hadoop.hbase.client.Result}. These methods work off of the raw bytes preventing + * the explosion of Result into object form. * @since 0.1 */ public class ResultUtil { - public static final Result EMPTY_RESULT = new Result() { - @Override - public final boolean isEmpty() { return true; } - }; - - private ResultUtil() { - } - - public static Result toResult(ImmutableBytesWritable bytes) { - byte [] buf = bytes.get(); - int offset = bytes.getOffset(); - int finalOffset = bytes.getLength() + offset; - List kvs = new ArrayList(); - while(offset < finalOffset) { - int keyLength = Bytes.toInt(buf, offset); - offset += Bytes.SIZEOF_INT; - kvs.add(new KeyValue(buf, offset, keyLength)); - offset += keyLength; - } - return Result.create(kvs); - } - - /** - * Return a pointer into a potentially much bigger byte buffer that points to the key of a Result. - * @param r - */ - public static ImmutableBytesWritable getKey(Result r) { - return getKey(r, 0); - } - - public static void getKey(Result r, ImmutableBytesWritable key) { - key.set(r.getRow()); - //key.set(getRawBytes(r), getKeyOffset(r), getKeyLength(r)); - } - - public static void getKey(Cell value, ImmutableBytesWritable key) { - key.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); - } - - /** - * Return a pointer into a potentially much bigger byte buffer that points to the key of a Result. - * Use offset to return a subset of the key bytes, for example to skip the organization ID embedded - * in all of our keys. - * @param r - * @param offset offset added to start of key and subtracted from key length (to select subset of key bytes) - */ - public static ImmutableBytesWritable getKey(Result r, int offset) { - return new ImmutableBytesWritable(getRawBytes(r), getKeyOffset(r) + offset, getKeyLength(r) - offset); + public static final Result EMPTY_RESULT = new Result() { + @Override + public final boolean isEmpty() { + return true; } + }; - public static void getKey(Result r, int offset, int length, ImmutableBytesWritable key) { - key.set(getRawBytes(r), getKeyOffset(r) + offset, length); - } + private ResultUtil() { + } - /** - * Comparator for comparing the keys from two Results in-place, without allocating new byte arrays - */ - public static final Comparator KEY_COMPARATOR = new Comparator() { - - @Override - public int compare(Result r1, Result r2) { - byte[] r1Bytes = getRawBytes(r1); - byte[] r2Bytes = getRawBytes(r2); - return Bytes.compareTo(r1Bytes, getKeyOffset(r1), getKeyLength(r1), r2Bytes, getKeyOffset(r2), getKeyLength(r2)); - } - - }; - - /** - * Get the offset into the Result byte array to the key. - * @param r - */ - static int getKeyOffset(Result r) { - KeyValue firstKV = PhoenixKeyValueUtil.maybeCopyCell(r.rawCells()[0]); - return firstKV.getOffset(); - } - - static int getKeyLength(Result r) { - // Key length stored right before key as a short - return Bytes.toShort(getRawBytes(r), getKeyOffset(r) - Bytes.SIZEOF_SHORT); - } - - static byte[] getRawBytes(Result r) { - KeyValue firstKV = PhoenixKeyValueUtil.maybeCopyCell(r.rawCells()[0]); - return firstKV.getBuffer(); + public static Result toResult(ImmutableBytesWritable bytes) { + byte[] buf = bytes.get(); + int offset = bytes.getOffset(); + int finalOffset = bytes.getLength() + offset; + List kvs = new ArrayList(); + while (offset < finalOffset) { + int keyLength = Bytes.toInt(buf, offset); + offset += Bytes.SIZEOF_INT; + kvs.add(new KeyValue(buf, offset, keyLength)); + offset += keyLength; } + return Result.create(kvs); + } + + /** + * Return a pointer into a potentially much bigger byte buffer that points to the key of a Result. + */ + public static ImmutableBytesWritable getKey(Result r) { + return getKey(r, 0); + } + + public static void getKey(Result r, ImmutableBytesWritable key) { + key.set(r.getRow()); + // key.set(getRawBytes(r), getKeyOffset(r), getKeyLength(r)); + } - public static int compareKeys(Result r1, Result r2) { - return Bytes.compareTo(getRawBytes(r1), getKeyOffset(r1), getKeyLength(r1), getRawBytes(r2), getKeyOffset(r2), getKeyLength(r2)); + public static void getKey(Cell value, ImmutableBytesWritable key) { + key.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); + } + + /** + * Return a pointer into a potentially much bigger byte buffer that points to the key of a Result. + * Use offset to return a subset of the key bytes, for example to skip the organization ID + * embedded in all of our keys. + * @param offset offset added to start of key and subtracted from key length (to select subset of + * key bytes) + */ + public static ImmutableBytesWritable getKey(Result r, int offset) { + return new ImmutableBytesWritable(getRawBytes(r), getKeyOffset(r) + offset, + getKeyLength(r) - offset); + } + + public static void getKey(Result r, int offset, int length, ImmutableBytesWritable key) { + key.set(getRawBytes(r), getKeyOffset(r) + offset, length); + } + + /** + * Comparator for comparing the keys from two Results in-place, without allocating new byte arrays + */ + public static final Comparator KEY_COMPARATOR = new Comparator() { + + @Override + public int compare(Result r1, Result r2) { + byte[] r1Bytes = getRawBytes(r1); + byte[] r2Bytes = getRawBytes(r2); + return Bytes.compareTo(r1Bytes, getKeyOffset(r1), getKeyLength(r1), r2Bytes, getKeyOffset(r2), + getKeyLength(r2)); } + }; + + /** + * Get the offset into the Result byte array to the key. + */ + static int getKeyOffset(Result r) { + KeyValue firstKV = PhoenixKeyValueUtil.maybeCopyCell(r.rawCells()[0]); + return firstKV.getOffset(); + } + + static int getKeyLength(Result r) { + // Key length stored right before key as a short + return Bytes.toShort(getRawBytes(r), getKeyOffset(r) - Bytes.SIZEOF_SHORT); + } + + static byte[] getRawBytes(Result r) { + KeyValue firstKV = PhoenixKeyValueUtil.maybeCopyCell(r.rawCells()[0]); + return firstKV.getBuffer(); + } + + public static int compareKeys(Result r1, Result r2) { + return Bytes.compareTo(getRawBytes(r1), getKeyOffset(r1), getKeyLength(r1), getRawBytes(r2), + getKeyOffset(r2), getKeyLength(r2)); + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SQLCloseable.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SQLCloseable.java index d9a43e5e16c..219afa70ede 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SQLCloseable.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SQLCloseable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,13 +20,9 @@ import java.sql.SQLException; /** - * - * Interface for a SQL resource that should be closed - * after it is no longer in use. - * - * + * Interface for a SQL resource that should be closed after it is no longer in use. * @since 0.1 */ public interface SQLCloseable { - void close() throws SQLException; + void close() throws SQLException; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SQLCloseables.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SQLCloseables.java index f1efb8408fe..6c7c0cdcd3c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SQLCloseables.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SQLCloseables.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,111 +25,105 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; - /** * Utilities for operating on {@link SQLCloseable}s. - * - * * @since 0.1 */ public class SQLCloseables { - /** Not constructed */ - private SQLCloseables() { } - - /** - * Allows you to close as many of the {@link SQLCloseable}s as possible. - * - * If any of the close's fail with an IOException, those exception(s) will - * be thrown after attempting to close all of the inputs. - */ - public static void closeAll(Iterable iterable) throws SQLException { - SQLException ex = closeAllQuietly(iterable); - if (ex != null) throw ex; - } - - public static SQLException closeAllQuietly(Iterable iterable) { - if (iterable == null) return null; - - LinkedList exceptions = null; - for (SQLCloseable closeable : iterable) { - try { - if (closeable == null) { - continue; - } - closeable.close(); - } catch (SQLException x) { - if (exceptions == null) exceptions = new LinkedList(); - exceptions.add(x); - } + /** Not constructed */ + private SQLCloseables() { + } + + /** + * Allows you to close as many of the {@link SQLCloseable}s as possible. If any of the close's + * fail with an IOException, those exception(s) will be thrown after attempting to close all of + * the inputs. + */ + public static void closeAll(Iterable iterable) throws SQLException { + SQLException ex = closeAllQuietly(iterable); + if (ex != null) throw ex; + } + + public static SQLException closeAllQuietly(Iterable iterable) { + if (iterable == null) return null; + + LinkedList exceptions = null; + for (SQLCloseable closeable : iterable) { + try { + if (closeable == null) { + continue; } - - SQLException ex = MultipleCausesSQLException.fromSQLExceptions(exceptions); - return ex; + closeable.close(); + } catch (SQLException x) { + if (exceptions == null) exceptions = new LinkedList(); + exceptions.add(x); + } + } + + SQLException ex = MultipleCausesSQLException.fromSQLExceptions(exceptions); + return ex; + } + + /** + * A subclass of {@link SQLException} that allows you to chain multiple causes together. + * @since 0.1 + * @see SQLCloseables + */ + static private class MultipleCausesSQLException extends SQLException { + private static final long serialVersionUID = 1L; + + static SQLException fromSQLExceptions(Collection exceptions) { + if (exceptions == null || exceptions.isEmpty()) return null; + if (exceptions.size() == 1) return Iterables.getOnlyElement(exceptions); + + return new MultipleCausesSQLException(exceptions); } + private final Collection exceptions; + private boolean hasSetStackTrace; + /** - * A subclass of {@link SQLException} that allows you to chain multiple - * causes together. - * - * - * @since 0.1 - * @see SQLCloseables + * Use the {@link #fromSQLExceptions(Collection) factory}. */ - static private class MultipleCausesSQLException extends SQLException { - private static final long serialVersionUID = 1L; - - static SQLException fromSQLExceptions(Collection exceptions) { - if (exceptions == null || exceptions.isEmpty()) return null; - if (exceptions.size() == 1) return Iterables.getOnlyElement(exceptions); - - return new MultipleCausesSQLException(exceptions); - } - - private final Collection exceptions; - private boolean hasSetStackTrace; - - /** - * Use the {@link #fromSQLExceptions(Collection) factory}. - */ - private MultipleCausesSQLException(Collection exceptions) { - this.exceptions = exceptions; - } + private MultipleCausesSQLException(Collection exceptions) { + this.exceptions = exceptions; + } - @Override - public String getMessage() { - StringBuilder sb = new StringBuilder(this.exceptions.size() * 50); - int exceptionNum = 0; - for (SQLException ex : this.exceptions) { - sb.append("Cause Number " + exceptionNum + ": " + ex.getMessage() + "\n"); - exceptionNum++; - } - return sb.toString(); - } - - @Override - public StackTraceElement[] getStackTrace() { - if (!this.hasSetStackTrace) { - ArrayList frames = new ArrayList(this.exceptions.size() * 20); - - int exceptionNum = 0; - for (SQLException exception : this.exceptions) { - StackTraceElement header = new StackTraceElement(MultipleCausesSQLException.class.getName(), - "Exception Number " + exceptionNum, - "", - 0); - - frames.add(header); - Collections.addAll(frames, exception.getStackTrace()); - exceptionNum++; - } - - setStackTrace(frames.toArray(new StackTraceElement[frames.size()])); - this.hasSetStackTrace = true; - } - - return super.getStackTrace(); + @Override + public String getMessage() { + StringBuilder sb = new StringBuilder(this.exceptions.size() * 50); + int exceptionNum = 0; + for (SQLException ex : this.exceptions) { + sb.append("Cause Number " + exceptionNum + ": " + ex.getMessage() + "\n"); + exceptionNum++; + } + return sb.toString(); + } + + @Override + public StackTraceElement[] getStackTrace() { + if (!this.hasSetStackTrace) { + ArrayList frames = + new ArrayList(this.exceptions.size() * 20); + + int exceptionNum = 0; + for (SQLException exception : this.exceptions) { + StackTraceElement header = + new StackTraceElement(MultipleCausesSQLException.class.getName(), + "Exception Number " + exceptionNum, "", 0); + + frames.add(header); + Collections.addAll(frames, exception.getStackTrace()); + exceptionNum++; } + setStackTrace(frames.toArray(new StackTraceElement[frames.size()])); + this.hasSetStackTrace = true; + } + + return super.getStackTrace(); } - + + } + } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ScanUtil.java index 7f50bd6d72d..ac6c9f6f7eb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ScanUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ScanUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,11 +19,11 @@ import static org.apache.phoenix.compile.OrderByCompiler.OrderBy.FWD_ROW_KEY_ORDER_BY; import static org.apache.phoenix.compile.OrderByCompiler.OrderBy.REV_ROW_KEY_ORDER_BY; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.CDC_DATA_TABLE_DEF; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.CUSTOM_ANNOTATIONS; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.SCAN_START_ROW_SUFFIX; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.SCAN_STOP_ROW_SUFFIX; -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.CDC_DATA_TABLE_DEF; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_TTL; import static org.apache.phoenix.query.QueryConstants.ENCODED_EMPTY_COLUMN_NAME; import static org.apache.phoenix.query.QueryServices.USE_STATS_FOR_PARALLELIZATION; @@ -104,1819 +104,1837 @@ import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.schema.ValueSchema.Field; import org.apache.phoenix.schema.transform.SystemTransformRecord; -import org.apache.phoenix.schema.transform.TransformMaintainer; import org.apache.phoenix.schema.transform.TransformClient; +import org.apache.phoenix.schema.transform.TransformMaintainer; import org.apache.phoenix.schema.tuple.ResultTuple; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarbinary; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.schema.types.PVarbinaryEncoded; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.Iterators; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * * Various utilities for scans - * - * * @since 0.1 */ public class ScanUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(ScanUtil.class); - public static final int[] SINGLE_COLUMN_SLOT_SPAN = new int[1]; - public static final int UNKNOWN_CLIENT_VERSION = VersionUtil.encodeVersion(4, 4, 0); - - private static final byte[] ZERO_BYTE_ARRAY = new byte[1024]; - private static final String RESULT_IS_OUT_OF_SCAN_START_KEY = - "Row key of the result is out of scan start key range"; - private static final String RESULT_IS_OUT_OF_SCAN_STOP_KEY = - "Row key of the result is out of scan stop key range"; - - private ScanUtil() { - } - - public static void setTenantId(Scan scan, byte[] tenantId) { - scan.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - } - - public static void setLocalIndex(Scan scan) { - scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX, PDataType.TRUE_BYTES); - } - - public static void setUncoveredGlobalIndex(Scan scan) { - scan.setAttribute(BaseScannerRegionObserverConstants.UNCOVERED_GLOBAL_INDEX, PDataType.TRUE_BYTES); - } - - public static boolean isLocalIndex(Scan scan) { - return scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX) != null; - } - public static boolean isUncoveredGlobalIndex(Scan scan) { - return scan.getAttribute(BaseScannerRegionObserverConstants.UNCOVERED_GLOBAL_INDEX) != null; - } - - public static boolean isLocalOrUncoveredGlobalIndex(Scan scan) { - return isLocalIndex(scan) || isUncoveredGlobalIndex(scan); - } - - public static boolean isNonAggregateScan(Scan scan) { - return scan.getAttribute(BaseScannerRegionObserverConstants.NON_AGGREGATE_QUERY) != null; - } - - // Designates a "simple scan", i.e. a scan that does not need to be scoped - // to a single region. - public static boolean isSimpleScan(Scan scan) { - return ScanUtil.isNonAggregateScan(scan) && - scan.getAttribute(BaseScannerRegionObserverConstants.TOPN) == null && - scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_OFFSET) == null; - } - - // Use getTenantId and pass in column name to match against - // in as PSchema attribute. If column name matches in - // KeyExpressions, set on scan as attribute - public static ImmutableBytesPtr getTenantId(Scan scan) { - // Create Scan with special aggregation column over which to aggregate - byte[] tenantId = scan.getAttribute(PhoenixRuntime.TENANT_ID_ATTRIB); - if (tenantId == null) { - return null; - } - return new ImmutableBytesPtr(tenantId); - } - - public static void setCustomAnnotations(Scan scan, byte[] annotations) { - scan.setAttribute(CUSTOM_ANNOTATIONS, annotations); - } - - public static byte[] getCustomAnnotations(Scan scan) { - return scan.getAttribute(CUSTOM_ANNOTATIONS); - } - - public static Scan newScan(Scan scan) { - try { - Scan newScan = new Scan(scan); - // Clone the underlying family map instead of sharing it between - // the existing and cloned Scan (which is the retarded default - // behavior). - TreeMap> existingMap = (TreeMap>)scan.getFamilyMap(); - Map> clonedMap = new TreeMap>(existingMap); - newScan.setFamilyMap(clonedMap); - // Carry over the reversed attribute - newScan.setReversed(scan.isReversed()); - if (scan.getReadType() == Scan.ReadType.PREAD) { - // HBASE-25644 : Only if Scan#setSmall(boolean) is called with - // true, readType should be set PREAD. For non-small scan, - // setting setSmall(false) is redundant and degrades perf - // without HBASE-25644 fix. - newScan.setReadType(Scan.ReadType.PREAD); - } - return newScan; - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - /** - * Intersects the scan start/stop row with the startKey and stopKey - * @param scan - * @param startKey - * @param stopKey - * @return false if the Scan cannot possibly return rows and true otherwise - */ - public static boolean intersectScanRange(Scan scan, byte[] startKey, byte[] stopKey) { - return intersectScanRange(scan, startKey, stopKey, false); - } - - public static boolean intersectScanRange(Scan scan, byte[] startKey, byte[] stopKey, boolean useSkipScan) { - boolean mayHaveRows = false; - int offset = 0; - if (ScanUtil.isLocalIndex(scan)) { - offset = startKey.length != 0 ? startKey.length : stopKey.length; - } - byte[] existingStartKey = scan.getStartRow(); - byte[] existingStopKey = scan.getStopRow(); - if (existingStartKey.length > 0) { - if (startKey.length == 0 || Bytes.compareTo(existingStartKey, startKey) > 0) { - startKey = existingStartKey; + private static final Logger LOGGER = LoggerFactory.getLogger(ScanUtil.class); + public static final int[] SINGLE_COLUMN_SLOT_SPAN = new int[1]; + public static final int UNKNOWN_CLIENT_VERSION = VersionUtil.encodeVersion(4, 4, 0); + + private static final byte[] ZERO_BYTE_ARRAY = new byte[1024]; + private static final String RESULT_IS_OUT_OF_SCAN_START_KEY = + "Row key of the result is out of scan start key range"; + private static final String RESULT_IS_OUT_OF_SCAN_STOP_KEY = + "Row key of the result is out of scan stop key range"; + + private ScanUtil() { + } + + public static void setTenantId(Scan scan, byte[] tenantId) { + scan.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + } + + public static void setLocalIndex(Scan scan) { + scan.setAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX, PDataType.TRUE_BYTES); + } + + public static void setUncoveredGlobalIndex(Scan scan) { + scan.setAttribute(BaseScannerRegionObserverConstants.UNCOVERED_GLOBAL_INDEX, + PDataType.TRUE_BYTES); + } + + public static boolean isLocalIndex(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX) != null; + } + + public static boolean isUncoveredGlobalIndex(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.UNCOVERED_GLOBAL_INDEX) != null; + } + + public static boolean isLocalOrUncoveredGlobalIndex(Scan scan) { + return isLocalIndex(scan) || isUncoveredGlobalIndex(scan); + } + + public static boolean isNonAggregateScan(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.NON_AGGREGATE_QUERY) != null; + } + + // Designates a "simple scan", i.e. a scan that does not need to be scoped + // to a single region. + public static boolean isSimpleScan(Scan scan) { + return ScanUtil.isNonAggregateScan(scan) + && scan.getAttribute(BaseScannerRegionObserverConstants.TOPN) == null + && scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_OFFSET) == null; + } + + // Use getTenantId and pass in column name to match against + // in as PSchema attribute. If column name matches in + // KeyExpressions, set on scan as attribute + public static ImmutableBytesPtr getTenantId(Scan scan) { + // Create Scan with special aggregation column over which to aggregate + byte[] tenantId = scan.getAttribute(PhoenixRuntime.TENANT_ID_ATTRIB); + if (tenantId == null) { + return null; + } + return new ImmutableBytesPtr(tenantId); + } + + public static void setCustomAnnotations(Scan scan, byte[] annotations) { + scan.setAttribute(CUSTOM_ANNOTATIONS, annotations); + } + + public static byte[] getCustomAnnotations(Scan scan) { + return scan.getAttribute(CUSTOM_ANNOTATIONS); + } + + public static Scan newScan(Scan scan) { + try { + Scan newScan = new Scan(scan); + // Clone the underlying family map instead of sharing it between + // the existing and cloned Scan (which is the retarded default + // behavior). + TreeMap> existingMap = + (TreeMap>) scan.getFamilyMap(); + Map> clonedMap = + new TreeMap>(existingMap); + newScan.setFamilyMap(clonedMap); + // Carry over the reversed attribute + newScan.setReversed(scan.isReversed()); + if (scan.getReadType() == Scan.ReadType.PREAD) { + // HBASE-25644 : Only if Scan#setSmall(boolean) is called with + // true, readType should be set PREAD. For non-small scan, + // setting setSmall(false) is redundant and degrades perf + // without HBASE-25644 fix. + newScan.setReadType(Scan.ReadType.PREAD); + } + return newScan; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Intersects the scan start/stop row with the startKey and stopKey + * @return false if the Scan cannot possibly return rows and true otherwise + */ + public static boolean intersectScanRange(Scan scan, byte[] startKey, byte[] stopKey) { + return intersectScanRange(scan, startKey, stopKey, false); + } + + public static boolean intersectScanRange(Scan scan, byte[] startKey, byte[] stopKey, + boolean useSkipScan) { + boolean mayHaveRows = false; + int offset = 0; + if (ScanUtil.isLocalIndex(scan)) { + offset = startKey.length != 0 ? startKey.length : stopKey.length; + } + byte[] existingStartKey = scan.getStartRow(); + byte[] existingStopKey = scan.getStopRow(); + if (existingStartKey.length > 0) { + if (startKey.length == 0 || Bytes.compareTo(existingStartKey, startKey) > 0) { + startKey = existingStartKey; + } + } else { + mayHaveRows = true; + } + if (existingStopKey.length > 0) { + if (stopKey.length == 0 || Bytes.compareTo(existingStopKey, stopKey) < 0) { + stopKey = existingStopKey; + } + } else { + mayHaveRows = true; + } + scan.withStartRow(startKey); + scan.withStopRow(stopKey); + if (offset > 0 && useSkipScan) { + byte[] temp = null; + if (startKey.length != 0) { + temp = new byte[startKey.length - offset]; + System.arraycopy(startKey, offset, temp, 0, startKey.length - offset); + startKey = temp; + } + if (stopKey.length != 0) { + temp = new byte[stopKey.length - offset]; + System.arraycopy(stopKey, offset, temp, 0, stopKey.length - offset); + stopKey = temp; + } + } + mayHaveRows = mayHaveRows || Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) < 0; + + // If the scan is using skip scan filter, intersect and replace the filter. + if (mayHaveRows && useSkipScan) { + Filter filter = scan.getFilter(); + if (filter instanceof SkipScanFilter) { + SkipScanFilter oldFilter = (SkipScanFilter) filter; + SkipScanFilter newFilter = oldFilter.intersect(startKey, stopKey); + if (newFilter == null) { + return false; + } + // Intersect found: replace skip scan with intersected one + scan.setFilter(newFilter); + } else if (filter instanceof FilterList) { + FilterList oldList = (FilterList) filter; + FilterList newList = new FilterList(FilterList.Operator.MUST_PASS_ALL); + for (Filter f : oldList.getFilters()) { + if (f instanceof SkipScanFilter) { + SkipScanFilter newFilter = ((SkipScanFilter) f).intersect(startKey, stopKey); + if (newFilter == null) { + return false; } + newList.addFilter(newFilter); + } else { + newList.addFilter(f); + } + } + scan.setFilter(newList); + } + } + return mayHaveRows; + } + + public static void andFilterAtBeginning(Scan scan, Filter andWithFilter) { + if (andWithFilter == null) { + return; + } + Filter filter = scan.getFilter(); + if (filter == null) { + scan.setFilter(andWithFilter); + } else if ( + filter instanceof FilterList + && ((FilterList) filter).getOperator() == FilterList.Operator.MUST_PASS_ALL + ) { + FilterList filterList = (FilterList) filter; + List allFilters = new ArrayList(filterList.getFilters().size() + 1); + allFilters.add(andWithFilter); + allFilters.addAll(filterList.getFilters()); + scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, allFilters)); + } else { + scan.setFilter( + new FilterList(FilterList.Operator.MUST_PASS_ALL, Arrays.asList(andWithFilter, filter))); + } + } + + public static void andFilterAtEnd(Scan scan, Filter andWithFilter) { + if (andWithFilter == null) { + return; + } + Filter filter = scan.getFilter(); + if (filter == null) { + scan.setFilter(andWithFilter); + } else if ( + filter instanceof FilterList + && ((FilterList) filter).getOperator() == FilterList.Operator.MUST_PASS_ALL + ) { + FilterList filterList = (FilterList) filter; + List allFilters = new ArrayList(filterList.getFilters().size() + 1); + allFilters.addAll(filterList.getFilters()); + allFilters.add(andWithFilter); + scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, allFilters)); + } else { + scan.setFilter( + new FilterList(FilterList.Operator.MUST_PASS_ALL, Arrays.asList(filter, andWithFilter))); + } + } + + public static void setQualifierRangesOnFilter(Scan scan, + Pair minMaxQualifiers) { + Filter filter = scan.getFilter(); + if (filter != null) { + if (filter instanceof FilterList) { + for (Filter f : ((FilterList) filter).getFilters()) { + if (f instanceof MultiEncodedCQKeyValueComparisonFilter) { + ((MultiEncodedCQKeyValueComparisonFilter) f).setMinMaxQualifierRange(minMaxQualifiers); + } + } + } else if (filter instanceof MultiEncodedCQKeyValueComparisonFilter) { + ((MultiEncodedCQKeyValueComparisonFilter) filter).setMinMaxQualifierRange(minMaxQualifiers); + } + } + } + + public static void setTimeRange(Scan scan, long ts) { + try { + scan.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, ts); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static void setTimeRange(Scan scan, TimeRange range) { + try { + scan.setTimeRange(range.getMin(), range.getMax()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static void setTimeRange(Scan scan, long minStamp, long maxStamp) { + try { + scan.setTimeRange(minStamp, maxStamp); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static byte[] getMinKey(RowKeySchema schema, List> slots, int[] slotSpan) { + return getKey(schema, slots, slotSpan, Bound.LOWER); + } + + public static byte[] getMaxKey(RowKeySchema schema, List> slots, int[] slotSpan) { + return getKey(schema, slots, slotSpan, Bound.UPPER); + } + + private static byte[] getKey(RowKeySchema schema, List> slots, int[] slotSpan, + Bound bound) { + if (slots.isEmpty()) { + return KeyRange.UNBOUND; + } + int[] position = new int[slots.size()]; + int maxLength = 0; + int slotEndingFieldPos = -1; + for (int i = 0; i < position.length; i++) { + position[i] = bound == Bound.LOWER ? 0 : slots.get(i).size() - 1; + KeyRange range = slots.get(i).get(position[i]); + slotEndingFieldPos = slotEndingFieldPos + slotSpan[i] + 1; + Field field = schema.getField(slotEndingFieldPos); + int keyLength = range.getRange(bound).length; + if (!field.getDataType().isFixedWidth()) { + if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { + keyLength++; + if ( + range.isUnbound(bound) && !range.isInclusive(bound) + && field.getSortOrder() == SortOrder.DESC + ) { + keyLength++; + } } else { - mayHaveRows = true; - } - if (existingStopKey.length > 0) { - if (stopKey.length == 0 || Bytes.compareTo(existingStopKey, stopKey) < 0) { - stopKey = existingStopKey; - } + keyLength += 2; + if ( + range.isUnbound(bound) && !range.isInclusive(bound) + && field.getSortOrder() == SortOrder.DESC + ) { + keyLength += 2; + } + } + } + maxLength += keyLength; + } + byte[] key = new byte[maxLength]; + int length = setKey(schema, slots, slotSpan, position, bound, key, 0, 0, position.length); + if (length == 0) { + return KeyRange.UNBOUND; + } + if (length == maxLength) { + return key; + } + byte[] keyCopy = new byte[length]; + System.arraycopy(key, 0, keyCopy, 0, length); + return keyCopy; + } + + /* + * Set the key by appending the keyRanges inside slots at positions as specified by the position + * array. We need to increment part of the key range, or increment the whole key at the end, + * depending on the bound we are setting and whether the key range is inclusive or exclusive. The + * logic for determining whether to increment or not is: range/single boundary bound increment + * range inclusive lower no range inclusive upper yes, at the end if occurs at any slots. range + * exclusive lower yes range exclusive upper no single inclusive lower no single inclusive upper + * yes, at the end if it is the last slots. + */ + public static int setKey(RowKeySchema schema, List> slots, int[] slotSpan, + int[] position, Bound bound, byte[] key, int byteOffset, int slotStartIndex, int slotEndIndex) { + return setKey(schema, slots, slotSpan, position, bound, key, byteOffset, slotStartIndex, + slotEndIndex, slotStartIndex); + } + + public static int setKey(RowKeySchema schema, List> slots, int[] slotSpan, + int[] position, Bound bound, byte[] key, int byteOffset, int slotStartIndex, int slotEndIndex, + int schemaStartIndex) { + int offset = byteOffset; + boolean lastInclusiveUpperSingleKey = false; + boolean anyInclusiveUpperRangeKey = false; + boolean lastUnboundUpper = false; + // The index used for slots should be incremented by 1, + // but the index for the field it represents in the schema + // should be incremented by 1 + value in the current slotSpan index + // slotSpan stores the number of columns beyond one that the range spans + Field field = null; + int i = slotStartIndex, fieldIndex = ScanUtil.getRowKeyPosition(slotSpan, slotStartIndex); + for (i = slotStartIndex; i < slotEndIndex; i++) { + // Build up the key by appending the bound of each key range + // from the current position of each slot. + KeyRange range = slots.get(i).get(position[i]); + // Use last slot in a multi-span column to determine if fixed width + field = schema.getField(fieldIndex + slotSpan[i]); + boolean isFixedWidth = field.getDataType().isFixedWidth(); + /* + * If the current slot is unbound then stop if: 1) setting the upper bound. There's no value + * in continuing because nothing will be filtered. 2) setting the lower bound when the type is + * fixed length for the same reason. However, if the type is variable width continue building + * the key because null values will be filtered since our separator byte will be appended and + * incremented. + */ + lastUnboundUpper = false; + if (range.isUnbound(bound) && (bound == Bound.UPPER || isFixedWidth)) { + lastUnboundUpper = (bound == Bound.UPPER); + break; + } + byte[] bytes = range.getRange(bound); + System.arraycopy(bytes, 0, key, offset, bytes.length); + offset += bytes.length; + + /* + * We must add a terminator to a variable length key even for the last PK column if the lower + * key is non inclusive or the upper key is inclusive. Otherwise, we'd be incrementing the key + * value itself, and thus bumping it up too much. + */ + boolean inclusiveUpper = range.isUpperInclusive() && bound == Bound.UPPER; + boolean exclusiveLower = + !range.isLowerInclusive() && bound == Bound.LOWER && range != KeyRange.EVERYTHING_RANGE; + boolean exclusiveUpper = !range.isUpperInclusive() && bound == Bound.UPPER; + // If we are setting the upper bound of using inclusive single key, we remember + // to increment the key if we exit the loop after this iteration. + // + // We remember to increment the last slot if we are setting the upper bound with an + // inclusive range key. + // + // We cannot combine the two flags together in case for single-inclusive key followed + // by the range-exclusive key. In that case, we do not need to increment the end at the + // end. But if we combine the two flag, the single inclusive key in the middle of the + // key slots would cause the flag to become true. + lastInclusiveUpperSingleKey = range.isSingleKey() && inclusiveUpper; + anyInclusiveUpperRangeKey |= !range.isSingleKey() && inclusiveUpper; + if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { + // A null or empty byte array is always represented as a zero byte + byte sepByte = + SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), bytes.length == 0, field); + + if ( + !isFixedWidth && (sepByte == QueryConstants.DESC_SEPARATOR_BYTE || (!exclusiveUpper + && (fieldIndex < schema.getMaxFields() || inclusiveUpper || exclusiveLower))) + ) { + key[offset++] = sepByte; + // Set lastInclusiveUpperSingleKey back to false if this is the last pk column + // as we don't want to increment the QueryConstants.SEPARATOR_BYTE byte in this case. + // To test if this is the last pk column we need to consider the span of this slot + // and the field index to see if this slot considers the last column. + // But if last field of rowKey is variable length and also DESC, the trailing 0xFF + // is not removed when stored in HBASE, so for such case, we should not set + // lastInclusiveUpperSingleKey back to false. + if (sepByte != QueryConstants.DESC_SEPARATOR_BYTE) { + lastInclusiveUpperSingleKey &= (fieldIndex + slotSpan[i]) < schema.getMaxFields() - 1; + } + } + } else { + byte[] sepBytes = SchemaUtil.getSeparatorBytesForVarBinaryEncoded( + schema.rowKeyOrderOptimizable(), bytes.length == 0, field.getSortOrder()); + if ( + !isFixedWidth && (sepBytes == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES + || (!exclusiveUpper + && (fieldIndex < schema.getMaxFields() || inclusiveUpper || exclusiveLower))) + ) { + key[offset++] = sepBytes[0]; + key[offset++] = sepBytes[1]; + if (sepBytes != QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES) { + lastInclusiveUpperSingleKey &= (fieldIndex + slotSpan[i]) < schema.getMaxFields() - 1; + } + } + } + if (exclusiveUpper) { + // Cannot include anything else on the key, as otherwise + // keys that match the upper range will be included. For example WHERE k1 < 2 and k2 = 3 + // would match k1 = 2, k2 = 3 which is wrong. + break; + } + // If we are setting the lower bound with an exclusive range key, we need to bump the + // slot up for each key part. For an upper bound, we bump up an inclusive key, but + // only after the last key part. + if (exclusiveLower) { + if (!ByteUtil.nextKey(key, offset)) { + // Special case for not being able to increment. + // In this case we return a negative byteOffset to + // remove this part from the key being formed. Since the + // key has overflowed, this means that we should not + // have an end key specified. + return -byteOffset; + } + // We're filtering on values being non null here, but we still need the 0xFF + // terminator, since DESC keys ignore the last byte as it's expected to be + // the terminator. Without this, we'd ignore the separator byte that was + // just added and incremented. + if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { + if ( + !isFixedWidth && bytes.length == 0 + && SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, field) + == QueryConstants.DESC_SEPARATOR_BYTE + ) { + key[offset++] = QueryConstants.DESC_SEPARATOR_BYTE; + } } else { - mayHaveRows = true; - } - scan.withStartRow(startKey); - scan.withStopRow(stopKey); - if (offset > 0 && useSkipScan) { - byte[] temp = null; - if (startKey.length != 0) { - temp =new byte[startKey.length - offset]; - System.arraycopy(startKey, offset, temp, 0, startKey.length - offset); - startKey = temp; - } - if (stopKey.length != 0) { - temp = new byte[stopKey.length - offset]; - System.arraycopy(stopKey, offset, temp, 0, stopKey.length - offset); - stopKey = temp; - } - } - mayHaveRows = mayHaveRows || Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) < 0; - - // If the scan is using skip scan filter, intersect and replace the filter. - if (mayHaveRows && useSkipScan) { - Filter filter = scan.getFilter(); - if (filter instanceof SkipScanFilter) { - SkipScanFilter oldFilter = (SkipScanFilter)filter; - SkipScanFilter newFilter = oldFilter.intersect(startKey, stopKey); - if (newFilter == null) { - return false; - } - // Intersect found: replace skip scan with intersected one - scan.setFilter(newFilter); - } else if (filter instanceof FilterList) { - FilterList oldList = (FilterList)filter; - FilterList newList = new FilterList(FilterList.Operator.MUST_PASS_ALL); - for (Filter f : oldList.getFilters()) { - if (f instanceof SkipScanFilter) { - SkipScanFilter newFilter = ((SkipScanFilter)f).intersect(startKey, stopKey); - if (newFilter == null) { - return false; - } - newList.addFilter(newFilter); - } else { - newList.addFilter(f); - } - } - scan.setFilter(newList); - } - } - return mayHaveRows; - } - - public static void andFilterAtBeginning(Scan scan, Filter andWithFilter) { - if (andWithFilter == null) { - return; - } - Filter filter = scan.getFilter(); - if (filter == null) { - scan.setFilter(andWithFilter); - } else if (filter instanceof FilterList && ((FilterList)filter).getOperator() == FilterList.Operator.MUST_PASS_ALL) { - FilterList filterList = (FilterList)filter; - List allFilters = new ArrayList(filterList.getFilters().size() + 1); - allFilters.add(andWithFilter); - allFilters.addAll(filterList.getFilters()); - scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL,allFilters)); - } else { - scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL,Arrays.asList(andWithFilter, filter))); - } - } - - public static void andFilterAtEnd(Scan scan, Filter andWithFilter) { - if (andWithFilter == null) { - return; - } - Filter filter = scan.getFilter(); - if (filter == null) { - scan.setFilter(andWithFilter); - } else if (filter instanceof FilterList && ((FilterList)filter).getOperator() == FilterList.Operator.MUST_PASS_ALL) { - FilterList filterList = (FilterList)filter; - List allFilters = new ArrayList(filterList.getFilters().size() + 1); - allFilters.addAll(filterList.getFilters()); - allFilters.add(andWithFilter); - scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL,allFilters)); + if ( + !isFixedWidth && bytes.length == 0 + && SchemaUtil.getSeparatorBytesForVarBinaryEncoded(schema.rowKeyOrderOptimizable(), + false, field.getSortOrder()) + == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES + ) { + key[offset++] = QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[0]; + key[offset++] = QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[1]; + } + } + } + + fieldIndex += slotSpan[i] + 1; + } + if (lastInclusiveUpperSingleKey || anyInclusiveUpperRangeKey || lastUnboundUpper) { + if (!ByteUtil.nextKey(key, offset)) { + // Special case for not being able to increment. + // In this case we return a negative byteOffset to + // remove this part from the key being formed. Since the + // key has overflowed, this means that we should not + // have an end key specified. + return -byteOffset; + } + } + // Remove trailing separator bytes, since the columns may have been added + // after the table has data, in which case there won't be a separator + // byte. + if (bound == Bound.LOWER) { + while ( + --i >= schemaStartIndex && offset > byteOffset + && !(field = schema.getField(--fieldIndex)).getDataType().isFixedWidth() + && field.getSortOrder() == SortOrder.ASC && hasSeparatorBytes(key, field, offset) + ) { + if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { + offset--; + fieldIndex -= slotSpan[i]; } else { - scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL,Arrays.asList(filter, andWithFilter))); - } - } - - public static void setQualifierRangesOnFilter(Scan scan, Pair minMaxQualifiers) { - Filter filter = scan.getFilter(); - if (filter != null) { - if (filter instanceof FilterList) { - for (Filter f : ((FilterList)filter).getFilters()) { - if (f instanceof MultiEncodedCQKeyValueComparisonFilter) { - ((MultiEncodedCQKeyValueComparisonFilter)f).setMinMaxQualifierRange(minMaxQualifiers); - } - } - } else if (filter instanceof MultiEncodedCQKeyValueComparisonFilter) { - ((MultiEncodedCQKeyValueComparisonFilter)filter).setMinMaxQualifierRange(minMaxQualifiers); - } - } - } - - public static void setTimeRange(Scan scan, long ts) { - try { - scan.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, ts); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - public static void setTimeRange(Scan scan, TimeRange range) { - try { - scan.setTimeRange(range.getMin(), range.getMax()); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - public static void setTimeRange(Scan scan, long minStamp, long maxStamp) { - try { - scan.setTimeRange(minStamp, maxStamp); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - public static byte[] getMinKey(RowKeySchema schema, List> slots, int[] slotSpan) { - return getKey(schema, slots, slotSpan, Bound.LOWER); - } - - public static byte[] getMaxKey(RowKeySchema schema, List> slots, int[] slotSpan) { - return getKey(schema, slots, slotSpan, Bound.UPPER); - } - - private static byte[] getKey(RowKeySchema schema, List> slots, int[] slotSpan, Bound bound) { - if (slots.isEmpty()) { - return KeyRange.UNBOUND; - } - int[] position = new int[slots.size()]; - int maxLength = 0; - int slotEndingFieldPos = -1; - for (int i = 0; i < position.length; i++) { - position[i] = bound == Bound.LOWER ? 0 : slots.get(i).size()-1; - KeyRange range = slots.get(i).get(position[i]); - slotEndingFieldPos = slotEndingFieldPos + slotSpan[i] + 1; - Field field = schema.getField(slotEndingFieldPos); - int keyLength = range.getRange(bound).length; - if (!field.getDataType().isFixedWidth()) { - if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { - keyLength++; - if (range.isUnbound(bound) && !range.isInclusive(bound) - && field.getSortOrder() == SortOrder.DESC) { - keyLength++; - } - } else { - keyLength += 2; - if (range.isUnbound(bound) && !range.isInclusive(bound) - && field.getSortOrder() == SortOrder.DESC) { - keyLength += 2; - } - } - } - maxLength += keyLength; - } - byte[] key = new byte[maxLength]; - int length = setKey(schema, slots, slotSpan, position, bound, key, 0, 0, position.length); - if (length == 0) { - return KeyRange.UNBOUND; - } - if (length == maxLength) { - return key; - } - byte[] keyCopy = new byte[length]; - System.arraycopy(key, 0, keyCopy, 0, length); - return keyCopy; - } - + offset -= 2; + fieldIndex -= slotSpan[i]; + } + } + } + return offset - byteOffset; + } + + private static boolean hasSeparatorBytes(byte[] key, Field field, int offset) { + return (field.getDataType() != PVarbinaryEncoded.INSTANCE + && key[offset - 1] == QueryConstants.SEPARATOR_BYTE) + || (field.getDataType() == PVarbinaryEncoded.INSTANCE && offset >= 2 + && key[offset - 1] == QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES[1] + && key[offset - 2] == QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES[0]); + } + + public static boolean adjustScanFilterForGlobalIndexRegionScanner(Scan scan) { + // For rebuilds we use count (*) as query for regular tables which ends up setting the + // FirstKeyOnlyFilter on scan + // This filter doesn't give us all columns and skips to the next row as soon as it finds 1 col + // For rebuilds we need all columns and all versions + + Filter filter = scan.getFilter(); + if (filter instanceof PagingFilter) { + PagingFilter pageFilter = (PagingFilter) filter; + Filter delegateFilter = pageFilter.getDelegateFilter(); + if (delegateFilter instanceof EmptyColumnOnlyFilter) { + pageFilter.setDelegateFilter(null); + } else if (delegateFilter instanceof FirstKeyOnlyFilter) { + scan.setFilter(null); + return true; + } else if (delegateFilter != null) { + // Override the filter so that we get all versions + pageFilter.setDelegateFilter(new AllVersionsIndexRebuildFilter(delegateFilter)); + } + } else if (filter instanceof EmptyColumnOnlyFilter) { + scan.setFilter(null); + return true; + } else if (filter instanceof FirstKeyOnlyFilter) { + scan.setFilter(null); + return true; + } else if (filter != null) { + // Override the filter so that we get all versions + scan.setFilter(new AllVersionsIndexRebuildFilter(filter)); + } + return false; + } + + public static interface BytesComparator { + public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2); + }; + + private static final BytesComparator DESC_VAR_WIDTH_COMPARATOR = new BytesComparator() { + + @Override + public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { + return DescVarLengthFastByteComparisons.compareTo(b1, s1, l1, b2, s2, l2); + } + + }; + + private static final BytesComparator ASC_FIXED_WIDTH_COMPARATOR = new BytesComparator() { + + @Override + public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { + return WritableComparator.compareBytes(b1, s1, l1, b2, s2, l2); + } + + }; + + public static BytesComparator getComparator(boolean isFixedWidth, SortOrder sortOrder) { + return isFixedWidth || sortOrder == SortOrder.ASC + ? ASC_FIXED_WIDTH_COMPARATOR + : DESC_VAR_WIDTH_COMPARATOR; + } + + public static BytesComparator getComparator(Field field) { + return getComparator(field.getDataType().isFixedWidth(), field.getSortOrder()); + } + + /** + * Perform a binary lookup on the list of KeyRange for the tightest slot such that the slotBound + * of the current slot is higher or equal than the slotBound of our range. + * @return the index of the slot whose slot bound equals or are the tightest one that is smaller + * than rangeBound of range, or slots.length if no bound can be found. + */ + public static int searchClosestKeyRangeWithUpperHigherThanPtr(List slots, + ImmutableBytesWritable ptr, int lower, Field field) { + int upper = slots.size() - 1; + int mid; + BytesComparator comparator = + ScanUtil.getComparator(field.getDataType().isFixedWidth(), field.getSortOrder()); + while (lower <= upper) { + mid = (lower + upper) / 2; + int cmp = slots.get(mid).compareUpperToLowerBound(ptr, true, comparator); + if (cmp < 0) { + lower = mid + 1; + } else if (cmp > 0) { + upper = mid - 1; + } else { + return mid; + } + } + mid = (lower + upper) / 2; + if (mid == 0 && slots.get(mid).compareUpperToLowerBound(ptr, true, comparator) > 0) { + return mid; + } else { + return ++mid; + } + } + + public static ScanRanges newScanRanges(List mutations) throws SQLException { + List keys = Lists.newArrayListWithExpectedSize(mutations.size()); + for (Mutation m : mutations) { + keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow(), SortOrder.ASC)); + } + ScanRanges keyRanges = ScanRanges.createPointLookup(keys); + return keyRanges; + } + + /** + * Converts a partially qualified KeyRange into a KeyRange with a inclusive lower bound and an + * exclusive upper bound, widening as necessary. + */ + public static KeyRange convertToInclusiveExclusiveRange(KeyRange partialRange, + RowKeySchema schema, ImmutableBytesWritable ptr) { + // Ensure minMaxRange is lower inclusive and upper exclusive, as that's + // what we need to intersect against for the HBase scan. + byte[] lowerRange = partialRange.getLowerRange(); + if (!partialRange.lowerUnbound()) { + if (!partialRange.isLowerInclusive()) { + lowerRange = ScanUtil.nextKey(lowerRange, schema, ptr); + } + } + + byte[] upperRange = partialRange.getUpperRange(); + if (!partialRange.upperUnbound()) { + if (partialRange.isUpperInclusive()) { + upperRange = ScanUtil.nextKey(upperRange, schema, ptr); + } + } + if (partialRange.getLowerRange() != lowerRange || partialRange.getUpperRange() != upperRange) { + partialRange = KeyRange.getKeyRange(lowerRange, upperRange); + } + return partialRange; + } + + private static byte[] nextKey(byte[] key, RowKeySchema schema, ImmutableBytesWritable ptr) { + int pos = 0; + int maxOffset = schema.iterator(key, ptr); + while (schema.next(ptr, pos, maxOffset) != null) { + pos++; + } + Field field = schema.getField(pos - 1); + if (!field.getDataType().isFixedWidth()) { + if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { + byte[] newLowerRange = new byte[key.length + 1]; + System.arraycopy(key, 0, newLowerRange, 0, key.length); + newLowerRange[key.length] = + SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), key.length == 0, field); + key = newLowerRange; + } else { + byte[] newLowerRange = new byte[key.length + 2]; + System.arraycopy(key, 0, newLowerRange, 0, key.length); + byte[] sepBytes = SchemaUtil.getSeparatorBytesForVarBinaryEncoded( + schema.rowKeyOrderOptimizable(), key.length == 0, field.getSortOrder()); + newLowerRange[key.length] = sepBytes[0]; + newLowerRange[key.length + 1] = sepBytes[1]; + key = newLowerRange; + } + } else { + key = Arrays.copyOf(key, key.length); + } + ByteUtil.nextKey(key, key.length); + return key; + } + + public static boolean isReversed(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.REVERSE_SCAN) != null; + } + + public static void setReversed(Scan scan) { + scan.setAttribute(BaseScannerRegionObserverConstants.REVERSE_SCAN, PDataType.TRUE_BYTES); + scan.setLoadColumnFamiliesOnDemand(false); + } + + public static void unsetReversed(Scan scan) { + scan.setAttribute(BaseScannerRegionObserverConstants.REVERSE_SCAN, PDataType.FALSE_BYTES); + scan.setLoadColumnFamiliesOnDemand(true); + } + + // Start/stop row must be swapped if scan is being done in reverse + public static void setupReverseScan(Scan scan) { + if (isReversed(scan) && !scan.isReversed()) { + byte[] tmpStartRow = scan.getStartRow(); + boolean tmpIncludeStartRow = scan.includeStartRow(); + scan.withStartRow(scan.getStopRow(), scan.includeStopRow()); + scan.withStopRow(tmpStartRow, tmpIncludeStartRow); + scan.setReversed(true); + } + } + + /** + * prefix region start key to the start row/stop row suffix and set as scan boundaries. + */ + public static void setupLocalIndexScan(Scan scan) { + byte[] prefix = + scan.getStartRow().length == 0 ? new byte[scan.getStopRow().length] : scan.getStartRow(); + int prefixLength = + scan.getStartRow().length == 0 ? scan.getStopRow().length : scan.getStartRow().length; + if (scan.getAttribute(SCAN_START_ROW_SUFFIX) != null) { + scan.withStartRow( + ScanRanges.prefixKey(scan.getAttribute(SCAN_START_ROW_SUFFIX), 0, prefix, prefixLength)); + } + if (scan.getAttribute(SCAN_STOP_ROW_SUFFIX) != null) { + scan.withStopRow( + ScanRanges.prefixKey(scan.getAttribute(SCAN_STOP_ROW_SUFFIX), 0, prefix, prefixLength)); + } + } + + public static byte[] getActualStartRow(Scan localIndexScan, RegionInfo regionInfo) { + return localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX) == null + ? localIndexScan.getStartRow() + : ScanRanges.prefixKey(localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX), 0, + regionInfo.getStartKey().length == 0 + ? new byte[regionInfo.getEndKey().length] + : regionInfo.getStartKey(), + regionInfo.getStartKey().length == 0 + ? regionInfo.getEndKey().length + : regionInfo.getStartKey().length); + } + + /** + * Set all attributes required and boundaries for local index scan. + */ + public static void setLocalIndexAttributes(Scan newScan, int keyOffset, byte[] regionStartKey, + byte[] regionEndKey, byte[] startRowSuffix, byte[] stopRowSuffix) { + if (ScanUtil.isLocalIndex(newScan)) { + newScan.setAttribute(SCAN_ACTUAL_START_ROW, regionStartKey); + newScan.withStartRow(regionStartKey); + newScan.withStopRow(regionEndKey); + if (keyOffset > 0) { + newScan.setAttribute(SCAN_START_ROW_SUFFIX, + ScanRanges.stripPrefix(startRowSuffix, keyOffset)); + } else { + newScan.setAttribute(SCAN_START_ROW_SUFFIX, startRowSuffix); + } + if (keyOffset > 0) { + newScan.setAttribute(SCAN_STOP_ROW_SUFFIX, + ScanRanges.stripPrefix(stopRowSuffix, keyOffset)); + } else { + newScan.setAttribute(SCAN_STOP_ROW_SUFFIX, stopRowSuffix); + } + } + } + + public static boolean isContextScan(Scan scan, StatementContext context) { + return Bytes.compareTo(context.getScan().getStartRow(), scan.getStartRow()) == 0 + && Bytes.compareTo(context.getScan().getStopRow(), scan.getStopRow()) == 0; + } + + public static int getRowKeyOffset(byte[] regionStartKey, byte[] regionEndKey) { + return regionStartKey.length > 0 ? regionStartKey.length : regionEndKey.length; + } + + private static void setRowKeyOffset(Filter filter, int offset) { + if (filter instanceof BooleanExpressionFilter) { + BooleanExpressionFilter boolFilter = (BooleanExpressionFilter) filter; + IndexUtil.setRowKeyExpressionOffset(boolFilter.getExpression(), offset); + } else if (filter instanceof SkipScanFilter) { + SkipScanFilter skipScanFilter = (SkipScanFilter) filter; + skipScanFilter.setOffset(offset); + } else if (filter instanceof DistinctPrefixFilter) { + DistinctPrefixFilter prefixFilter = (DistinctPrefixFilter) filter; + prefixFilter.setOffset(offset); + } + } + + public static void setRowKeyOffset(Scan scan, int offset) { + Filter filter = scan.getFilter(); + if (filter == null) { + return; + } + if (filter instanceof PagingFilter) { + filter = ((PagingFilter) filter).getDelegateFilter(); + if (filter == null) { + return; + } + } + if (filter instanceof FilterList) { + FilterList filterList = (FilterList) filter; + for (Filter childFilter : filterList.getFilters()) { + setRowKeyOffset(childFilter, offset); + } + } else { + setRowKeyOffset(filter, offset); + } + } + + public static int[] getDefaultSlotSpans(int nSlots) { + return new int[nSlots]; + } + + /** + * Finds the position in the row key schema for a given position in the scan slots. For example, + * with a slotSpan of {0, 1, 0}, the slot at index 1 spans an extra column in the row key. This + * means that the slot at index 2 has a slot index of 2 but a row key index of 3. To calculate the + * "adjusted position" index, we simply add up the number of extra slots spanned and offset the + * slotPosition by that much. + * @param slotSpan the extra span per skip scan slot. corresponds to + * {@link ScanRanges#getSlotSpans()} + * @param slotPosition the index of a slot in the SkipScan slots list. + * @return the equivalent row key position in the RowKeySchema + */ + public static int getRowKeyPosition(int[] slotSpan, int slotPosition) { + int offset = 0; + + for (int i = 0; i < slotPosition; i++) { + offset += slotSpan[i]; + } + + return offset + slotPosition; + } + + public static boolean isAnalyzeTable(Scan scan) { + return scan.getAttribute((BaseScannerRegionObserverConstants.ANALYZE_TABLE)) != null; + } + + public static boolean crossesPrefixBoundary(byte[] key, byte[] prefixBytes, int prefixLength) { + if (key.length < prefixLength) { + return true; + } + if (prefixBytes.length >= prefixLength) { + return Bytes.compareTo(prefixBytes, 0, prefixLength, key, 0, prefixLength) != 0; + } + return hasNonZeroLeadingBytes(key, prefixLength); + } + + public static byte[] getPrefix(byte[] startKey, int prefixLength) { + // If startKey is at beginning, then our prefix will be a null padded byte array + return startKey.length >= prefixLength ? startKey : EMPTY_BYTE_ARRAY; + } + + private static boolean hasNonZeroLeadingBytes(byte[] key, int nBytesToCheck) { + if (nBytesToCheck > ZERO_BYTE_ARRAY.length) { + do { + if ( + Bytes.compareTo(key, nBytesToCheck - ZERO_BYTE_ARRAY.length, ZERO_BYTE_ARRAY.length, + ScanUtil.ZERO_BYTE_ARRAY, 0, ScanUtil.ZERO_BYTE_ARRAY.length) != 0 + ) { + return true; + } + nBytesToCheck -= ZERO_BYTE_ARRAY.length; + } while (nBytesToCheck > ZERO_BYTE_ARRAY.length); + } + return Bytes.compareTo(key, 0, nBytesToCheck, ZERO_BYTE_ARRAY, 0, nBytesToCheck) != 0; + } + + public static byte[] getTenantIdBytes(RowKeySchema schema, boolean isSalted, PName tenantId, + boolean isMultiTenantTable, boolean isSharedIndex) throws SQLException { + return isMultiTenantTable + ? getTenantIdBytes(schema, isSalted, tenantId, isSharedIndex) + : tenantId.getBytes(); + } + + public static byte[] getTenantIdBytes(RowKeySchema schema, boolean isSalted, PName tenantId, + boolean isSharedIndex) throws SQLException { + int pkPos = (isSalted ? 1 : 0) + (isSharedIndex ? 1 : 0); + Field field = schema.getField(pkPos); + PDataType dataType = field.getDataType(); + byte[] convertedValue; + try { + Object value = dataType.toObject(tenantId.getString()); + convertedValue = dataType.toBytes(value); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(convertedValue); + dataType.pad(ptr, field.getMaxLength(), field.getSortOrder()); + convertedValue = ByteUtil.copyKeyBytesIfNecessary(ptr); + } catch (IllegalDataException ex) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TENANTID_IS_OF_WRONG_TYPE).build() + .buildException(); + } + return convertedValue; + } + + public static Iterator getFilterIterator(Scan scan) { + Iterator filterIterator; + Filter topLevelFilter = scan.getFilter(); + if (topLevelFilter == null) { + filterIterator = Collections.emptyIterator(); + } else if (topLevelFilter instanceof FilterList) { + filterIterator = ((FilterList) topLevelFilter).getFilters().iterator(); + } else { + filterIterator = Iterators.singletonIterator(topLevelFilter); + } + return filterIterator; + } + + /** + * Selecting underlying scanners in a round-robin fashion is possible if there is no ordering of + * rows needed, not even row key order. Also no point doing round robin of scanners if fetch size + * is 1. + */ + public static boolean isRoundRobinPossible(OrderBy orderBy, StatementContext context) + throws SQLException { + int fetchSize = context.getStatement().getFetchSize(); + return fetchSize > 1 && !shouldRowsBeInRowKeyOrder(orderBy, context) + && orderBy.getOrderByExpressions().isEmpty(); + } + + public static boolean forceRowKeyOrder(StatementContext context) { + return context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, QueryServicesOptions.DEFAULT_FORCE_ROW_KEY_ORDER); + } + + public static boolean shouldRowsBeInRowKeyOrder(OrderBy orderBy, StatementContext context) { + return forceRowKeyOrder(context) || orderBy == FWD_ROW_KEY_ORDER_BY + || orderBy == REV_ROW_KEY_ORDER_BY; + } + + public static TimeRange intersectTimeRange(TimeRange rowTimestampColRange, + TimeRange scanTimeRange, Long scn) throws IOException, SQLException { + long scnToUse = scn == null ? HConstants.LATEST_TIMESTAMP : scn; + long lowerRangeToBe = 0; + long upperRangeToBe = scnToUse; + if (rowTimestampColRange != null) { + long minRowTimestamp = rowTimestampColRange.getMin(); + long maxRowTimestamp = rowTimestampColRange.getMax(); + if ((lowerRangeToBe > maxRowTimestamp) || (upperRangeToBe < minRowTimestamp)) { + return null; // degenerate + } else { + // there is an overlap of ranges + lowerRangeToBe = Math.max(lowerRangeToBe, minRowTimestamp); + upperRangeToBe = Math.min(upperRangeToBe, maxRowTimestamp); + } + } + if (scanTimeRange != null) { + long minScanTimeRange = scanTimeRange.getMin(); + long maxScanTimeRange = scanTimeRange.getMax(); + if ((lowerRangeToBe > maxScanTimeRange) || (upperRangeToBe < lowerRangeToBe)) { + return null; // degenerate + } else { + // there is an overlap of ranges + lowerRangeToBe = Math.max(lowerRangeToBe, minScanTimeRange); + upperRangeToBe = Math.min(upperRangeToBe, maxScanTimeRange); + } + } + return TimeRange.between(lowerRangeToBe, upperRangeToBe); + } + + public static boolean isDefaultTimeRange(TimeRange range) { + return range.getMin() == 0 && range.getMax() == Long.MAX_VALUE; + } + + /** + * @return true if scanners could be left open and records retrieved by simply advancing them on + * the server side. To make sure HBase doesn't cancel the leases and close the open + * scanners, we need to periodically renew leases. To look at the earliest HBase version + * that supports renewing leases, see {@link MetaDataProtocol#MIN_RENEW_LEASE_VERSION} + */ + public static boolean isPacingScannersPossible(StatementContext context) { + return context.getConnection().getQueryServices().isRenewingLeasesEnabled(); + } + + public static void addOffsetAttribute(Scan scan, Integer offset) { + scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_OFFSET, Bytes.toBytes(offset)); + } + + public static final boolean canQueryBeExecutedSerially(PTable table, OrderBy orderBy, + StatementContext context) { /* - * Set the key by appending the keyRanges inside slots at positions as specified by the position array. - * - * We need to increment part of the key range, or increment the whole key at the end, depending on the - * bound we are setting and whether the key range is inclusive or exclusive. The logic for determining - * whether to increment or not is: - * range/single boundary bound increment - * range inclusive lower no - * range inclusive upper yes, at the end if occurs at any slots. - * range exclusive lower yes - * range exclusive upper no - * single inclusive lower no - * single inclusive upper yes, at the end if it is the last slots. - */ - public static int setKey(RowKeySchema schema, List> slots, int[] slotSpan, int[] position, - Bound bound, byte[] key, int byteOffset, int slotStartIndex, int slotEndIndex) { - return setKey(schema, slots, slotSpan, position, bound, key, byteOffset, slotStartIndex, slotEndIndex, slotStartIndex); - } - - public static int setKey(RowKeySchema schema, List> slots, int[] slotSpan, int[] position, - Bound bound, byte[] key, int byteOffset, int slotStartIndex, int slotEndIndex, int schemaStartIndex) { - int offset = byteOffset; - boolean lastInclusiveUpperSingleKey = false; - boolean anyInclusiveUpperRangeKey = false; - boolean lastUnboundUpper = false; - // The index used for slots should be incremented by 1, - // but the index for the field it represents in the schema - // should be incremented by 1 + value in the current slotSpan index - // slotSpan stores the number of columns beyond one that the range spans - Field field = null; - int i = slotStartIndex, fieldIndex = ScanUtil.getRowKeyPosition(slotSpan, slotStartIndex); - for (i = slotStartIndex; i < slotEndIndex; i++) { - // Build up the key by appending the bound of each key range - // from the current position of each slot. - KeyRange range = slots.get(i).get(position[i]); - // Use last slot in a multi-span column to determine if fixed width - field = schema.getField(fieldIndex + slotSpan[i]); - boolean isFixedWidth = field.getDataType().isFixedWidth(); - /* - * If the current slot is unbound then stop if: - * 1) setting the upper bound. There's no value in - * continuing because nothing will be filtered. - * 2) setting the lower bound when the type is fixed length - * for the same reason. However, if the type is variable width - * continue building the key because null values will be filtered - * since our separator byte will be appended and incremented. - */ - lastUnboundUpper = false; - if (range.isUnbound(bound) && (bound == Bound.UPPER || isFixedWidth)) { - lastUnboundUpper = (bound == Bound.UPPER); - break; - } - byte[] bytes = range.getRange(bound); - System.arraycopy(bytes, 0, key, offset, bytes.length); - offset += bytes.length; - - /* - * We must add a terminator to a variable length key even for the last PK column if - * the lower key is non inclusive or the upper key is inclusive. Otherwise, we'd be - * incrementing the key value itself, and thus bumping it up too much. - */ - boolean inclusiveUpper = range.isUpperInclusive() && bound == Bound.UPPER; - boolean exclusiveLower = !range.isLowerInclusive() && bound == Bound.LOWER && range != KeyRange.EVERYTHING_RANGE; - boolean exclusiveUpper = !range.isUpperInclusive() && bound == Bound.UPPER; - // If we are setting the upper bound of using inclusive single key, we remember - // to increment the key if we exit the loop after this iteration. - // - // We remember to increment the last slot if we are setting the upper bound with an - // inclusive range key. - // - // We cannot combine the two flags together in case for single-inclusive key followed - // by the range-exclusive key. In that case, we do not need to increment the end at the - // end. But if we combine the two flag, the single inclusive key in the middle of the - // key slots would cause the flag to become true. - lastInclusiveUpperSingleKey = range.isSingleKey() && inclusiveUpper; - anyInclusiveUpperRangeKey |= !range.isSingleKey() && inclusiveUpper; - if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { - // A null or empty byte array is always represented as a zero byte - byte sepByte = - SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), bytes.length == 0, - field); - - if (!isFixedWidth && (sepByte == QueryConstants.DESC_SEPARATOR_BYTE || ( - !exclusiveUpper && (fieldIndex < schema.getMaxFields() || inclusiveUpper - || exclusiveLower)))) { - key[offset++] = sepByte; - // Set lastInclusiveUpperSingleKey back to false if this is the last pk column - // as we don't want to increment the QueryConstants.SEPARATOR_BYTE byte in this case. - // To test if this is the last pk column we need to consider the span of this slot - // and the field index to see if this slot considers the last column. - // But if last field of rowKey is variable length and also DESC, the trailing 0xFF - // is not removed when stored in HBASE, so for such case, we should not set - // lastInclusiveUpperSingleKey back to false. - if (sepByte != QueryConstants.DESC_SEPARATOR_BYTE) { - lastInclusiveUpperSingleKey &= - (fieldIndex + slotSpan[i]) < schema.getMaxFields() - 1; - } - } - } else { - byte[] sepBytes = - SchemaUtil.getSeparatorBytesForVarBinaryEncoded(schema.rowKeyOrderOptimizable(), - bytes.length == 0, field.getSortOrder()); - if (!isFixedWidth && ( - sepBytes == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES || ( - !exclusiveUpper && (fieldIndex < schema.getMaxFields() || inclusiveUpper - || exclusiveLower)))) { - key[offset++] = sepBytes[0]; - key[offset++] = sepBytes[1]; - if (sepBytes != QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES) { - lastInclusiveUpperSingleKey &= - (fieldIndex + slotSpan[i]) < schema.getMaxFields() - 1; - } - } - } - if (exclusiveUpper) { - // Cannot include anything else on the key, as otherwise - // keys that match the upper range will be included. For example WHERE k1 < 2 and k2 = 3 - // would match k1 = 2, k2 = 3 which is wrong. - break; - } - // If we are setting the lower bound with an exclusive range key, we need to bump the - // slot up for each key part. For an upper bound, we bump up an inclusive key, but - // only after the last key part. - if (exclusiveLower) { - if (!ByteUtil.nextKey(key, offset)) { - // Special case for not being able to increment. - // In this case we return a negative byteOffset to - // remove this part from the key being formed. Since the - // key has overflowed, this means that we should not - // have an end key specified. - return -byteOffset; - } - // We're filtering on values being non null here, but we still need the 0xFF - // terminator, since DESC keys ignore the last byte as it's expected to be - // the terminator. Without this, we'd ignore the separator byte that was - // just added and incremented. - if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { - if (!isFixedWidth && bytes.length == 0 && - SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, field) - == QueryConstants.DESC_SEPARATOR_BYTE) { - key[offset++] = QueryConstants.DESC_SEPARATOR_BYTE; - } - } else { - if (!isFixedWidth && bytes.length == 0 && - SchemaUtil.getSeparatorBytesForVarBinaryEncoded( - schema.rowKeyOrderOptimizable(), false, field.getSortOrder()) - == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES) { - key[offset++] = QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[0]; - key[offset++] = QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[1]; - } - } - } - - fieldIndex += slotSpan[i] + 1; - } - if (lastInclusiveUpperSingleKey || anyInclusiveUpperRangeKey || lastUnboundUpper) { - if (!ByteUtil.nextKey(key, offset)) { - // Special case for not being able to increment. - // In this case we return a negative byteOffset to - // remove this part from the key being formed. Since the - // key has overflowed, this means that we should not - // have an end key specified. - return -byteOffset; - } - } - // Remove trailing separator bytes, since the columns may have been added - // after the table has data, in which case there won't be a separator - // byte. - if (bound == Bound.LOWER) { - while (--i >= schemaStartIndex && offset > byteOffset && !(field = - schema.getField(--fieldIndex)).getDataType().isFixedWidth() - && field.getSortOrder() == SortOrder.ASC && hasSeparatorBytes(key, field, offset)) { - if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { - offset--; - fieldIndex -= slotSpan[i]; - } else { - offset -= 2; - fieldIndex -= slotSpan[i]; - } - } - } - return offset - byteOffset; - } - - private static boolean hasSeparatorBytes(byte[] key, Field field, int offset) { - return (field.getDataType() != PVarbinaryEncoded.INSTANCE - && key[offset - 1] == QueryConstants.SEPARATOR_BYTE) || ( - field.getDataType() == PVarbinaryEncoded.INSTANCE && offset >= 2 - && key[offset - 1] == QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES[1] - && key[offset - 2] == QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES[0]); - } - - public static boolean adjustScanFilterForGlobalIndexRegionScanner(Scan scan) { - // For rebuilds we use count (*) as query for regular tables which ends up setting the FirstKeyOnlyFilter on scan - // This filter doesn't give us all columns and skips to the next row as soon as it finds 1 col - // For rebuilds we need all columns and all versions - - Filter filter = scan.getFilter(); - if (filter instanceof PagingFilter) { - PagingFilter pageFilter = (PagingFilter) filter; - Filter delegateFilter = pageFilter.getDelegateFilter(); - if (delegateFilter instanceof EmptyColumnOnlyFilter) { - pageFilter.setDelegateFilter(null); - } else if (delegateFilter instanceof FirstKeyOnlyFilter) { - scan.setFilter(null); - return true; - } else if (delegateFilter != null) { - // Override the filter so that we get all versions - pageFilter.setDelegateFilter(new AllVersionsIndexRebuildFilter(delegateFilter)); - } - } else if (filter instanceof EmptyColumnOnlyFilter) { - scan.setFilter(null); - return true; - } else if (filter instanceof FirstKeyOnlyFilter) { - scan.setFilter(null); - return true; - } else if (filter != null) { - // Override the filter so that we get all versions - scan.setFilter(new AllVersionsIndexRebuildFilter(filter)); - } - return false; - } - - public static interface BytesComparator { - public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2); - }; - - private static final BytesComparator DESC_VAR_WIDTH_COMPARATOR = new BytesComparator() { - - @Override - public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { - return DescVarLengthFastByteComparisons.compareTo(b1, s1, l1, b2, s2, l2); - } - - }; - - private static final BytesComparator ASC_FIXED_WIDTH_COMPARATOR = new BytesComparator() { - - @Override - public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { - return WritableComparator.compareBytes(b1, s1, l1, b2, s2, l2); - } - - }; - public static BytesComparator getComparator(boolean isFixedWidth, SortOrder sortOrder) { - return isFixedWidth || sortOrder == SortOrder.ASC ? ASC_FIXED_WIDTH_COMPARATOR : DESC_VAR_WIDTH_COMPARATOR; - } - public static BytesComparator getComparator(Field field) { - return getComparator(field.getDataType().isFixedWidth(),field.getSortOrder()); - } - /** - * Perform a binary lookup on the list of KeyRange for the tightest slot such that the slotBound - * of the current slot is higher or equal than the slotBound of our range. - * @return the index of the slot whose slot bound equals or are the tightest one that is - * smaller than rangeBound of range, or slots.length if no bound can be found. + * If ordering by columns not on the PK axis, we can't execute a query serially because we need + * to do a merge sort across all the scans which isn't possible with SerialIterators. Similar + * reasoning follows for salted and local index tables when ordering rows in a row key order. + * Serial execution is OK in other cases since SerialIterators will execute scans in the correct + * order. */ - public static int searchClosestKeyRangeWithUpperHigherThanPtr(List slots, ImmutableBytesWritable ptr, int lower, Field field) { - int upper = slots.size() - 1; - int mid; - BytesComparator comparator = ScanUtil.getComparator(field.getDataType().isFixedWidth(), field.getSortOrder()); - while (lower <= upper) { - mid = (lower + upper) / 2; - int cmp = slots.get(mid).compareUpperToLowerBound(ptr, true, comparator); - if (cmp < 0) { - lower = mid + 1; - } else if (cmp > 0) { - upper = mid - 1; - } else { - return mid; - } - } - mid = (lower + upper) / 2; - if (mid == 0 && slots.get(mid).compareUpperToLowerBound(ptr, true, comparator) > 0) { - return mid; - } else { - return ++mid; - } - } - - public static ScanRanges newScanRanges(List mutations) throws SQLException { - List keys = Lists.newArrayListWithExpectedSize(mutations.size()); - for (Mutation m : mutations) { - keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow(), SortOrder.ASC)); - } - ScanRanges keyRanges = ScanRanges.createPointLookup(keys); - return keyRanges; - } - - /** - * Converts a partially qualified KeyRange into a KeyRange with a - * inclusive lower bound and an exclusive upper bound, widening - * as necessary. - */ - public static KeyRange convertToInclusiveExclusiveRange (KeyRange partialRange, RowKeySchema schema, ImmutableBytesWritable ptr) { - // Ensure minMaxRange is lower inclusive and upper exclusive, as that's - // what we need to intersect against for the HBase scan. - byte[] lowerRange = partialRange.getLowerRange(); - if (!partialRange.lowerUnbound()) { - if (!partialRange.isLowerInclusive()) { - lowerRange = ScanUtil.nextKey(lowerRange, schema, ptr); - } - } - - byte[] upperRange = partialRange.getUpperRange(); - if (!partialRange.upperUnbound()) { - if (partialRange.isUpperInclusive()) { - upperRange = ScanUtil.nextKey(upperRange, schema, ptr); - } - } - if (partialRange.getLowerRange() != lowerRange || partialRange.getUpperRange() != upperRange) { - partialRange = KeyRange.getKeyRange(lowerRange, upperRange); - } - return partialRange; - } - - private static byte[] nextKey(byte[] key, RowKeySchema schema, ImmutableBytesWritable ptr) { - int pos = 0; - int maxOffset = schema.iterator(key, ptr); - while (schema.next(ptr, pos, maxOffset) != null) { - pos++; - } - Field field = schema.getField(pos - 1); - if (!field.getDataType().isFixedWidth()) { - if (field.getDataType() != PVarbinaryEncoded.INSTANCE) { - byte[] newLowerRange = new byte[key.length + 1]; - System.arraycopy(key, 0, newLowerRange, 0, key.length); - newLowerRange[key.length] = - SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), key.length == 0, - field); - key = newLowerRange; - } else { - byte[] newLowerRange = new byte[key.length + 2]; - System.arraycopy(key, 0, newLowerRange, 0, key.length); - byte[] sepBytes = - SchemaUtil.getSeparatorBytesForVarBinaryEncoded(schema.rowKeyOrderOptimizable(), - key.length == 0, field.getSortOrder()); - newLowerRange[key.length] = sepBytes[0]; - newLowerRange[key.length + 1] = sepBytes[1]; - key = newLowerRange; - } - } else { - key = Arrays.copyOf(key, key.length); - } - ByteUtil.nextKey(key, key.length); - return key; - } - - public static boolean isReversed(Scan scan) { - return scan.getAttribute(BaseScannerRegionObserverConstants.REVERSE_SCAN) != null; - } - - public static void setReversed(Scan scan) { - scan.setAttribute(BaseScannerRegionObserverConstants.REVERSE_SCAN, PDataType.TRUE_BYTES); - scan.setLoadColumnFamiliesOnDemand(false); - } - - public static void unsetReversed(Scan scan) { - scan.setAttribute(BaseScannerRegionObserverConstants.REVERSE_SCAN, PDataType.FALSE_BYTES); - scan.setLoadColumnFamiliesOnDemand(true); - } - - // Start/stop row must be swapped if scan is being done in reverse - public static void setupReverseScan(Scan scan) { - if (isReversed(scan) && !scan.isReversed()) { - byte[] tmpStartRow = scan.getStartRow(); - boolean tmpIncludeStartRow = scan.includeStartRow(); - scan.withStartRow(scan.getStopRow(), scan.includeStopRow()); - scan.withStopRow(tmpStartRow, tmpIncludeStartRow); - scan.setReversed(true); - } - } - - /** - * prefix region start key to the start row/stop row suffix and set as scan boundaries. - * @param scan - */ - public static void setupLocalIndexScan(Scan scan) { - byte[] prefix = scan.getStartRow().length == 0 ? new byte[scan.getStopRow().length]: scan.getStartRow(); - int prefixLength = scan.getStartRow().length == 0? scan.getStopRow().length: scan.getStartRow().length; - if (scan.getAttribute(SCAN_START_ROW_SUFFIX) != null) { - scan.withStartRow(ScanRanges.prefixKey(scan.getAttribute(SCAN_START_ROW_SUFFIX), 0, prefix, prefixLength)); - } - if (scan.getAttribute(SCAN_STOP_ROW_SUFFIX) != null) { - scan.withStopRow(ScanRanges.prefixKey(scan.getAttribute(SCAN_STOP_ROW_SUFFIX), 0, prefix, prefixLength)); - } - } - - public static byte[] getActualStartRow(Scan localIndexScan, RegionInfo regionInfo) { - return localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX) == null ? localIndexScan - .getStartRow() : ScanRanges.prefixKey(localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX), 0 , - regionInfo.getStartKey().length == 0 ? new byte[regionInfo.getEndKey().length] - : regionInfo.getStartKey(), - regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length : regionInfo - .getStartKey().length); - } - - /** - * Set all attributes required and boundaries for local index scan. - * @param keyOffset - * @param regionStartKey - * @param regionEndKey - * @param newScan - */ - public static void setLocalIndexAttributes(Scan newScan, int keyOffset, byte[] regionStartKey, byte[] regionEndKey, byte[] startRowSuffix, byte[] stopRowSuffix) { - if (ScanUtil.isLocalIndex(newScan)) { - newScan.setAttribute(SCAN_ACTUAL_START_ROW, regionStartKey); - newScan.withStartRow(regionStartKey); - newScan.withStopRow(regionEndKey); - if (keyOffset > 0 ) { - newScan.setAttribute(SCAN_START_ROW_SUFFIX, ScanRanges.stripPrefix(startRowSuffix, keyOffset)); - } else { - newScan.setAttribute(SCAN_START_ROW_SUFFIX, startRowSuffix); - } - if (keyOffset > 0) { - newScan.setAttribute(SCAN_STOP_ROW_SUFFIX, ScanRanges.stripPrefix(stopRowSuffix, keyOffset)); - } else { - newScan.setAttribute(SCAN_STOP_ROW_SUFFIX, stopRowSuffix); - } - } - } - - public static boolean isContextScan(Scan scan, StatementContext context) { - return Bytes.compareTo(context.getScan().getStartRow(), scan.getStartRow()) == 0 && Bytes - .compareTo(context.getScan().getStopRow(), scan.getStopRow()) == 0; - } - public static int getRowKeyOffset(byte[] regionStartKey, byte[] regionEndKey) { - return regionStartKey.length > 0 ? regionStartKey.length : regionEndKey.length; - } - - private static void setRowKeyOffset(Filter filter, int offset) { - if (filter instanceof BooleanExpressionFilter) { - BooleanExpressionFilter boolFilter = (BooleanExpressionFilter)filter; - IndexUtil.setRowKeyExpressionOffset(boolFilter.getExpression(), offset); - } else if (filter instanceof SkipScanFilter) { - SkipScanFilter skipScanFilter = (SkipScanFilter)filter; - skipScanFilter.setOffset(offset); - } else if (filter instanceof DistinctPrefixFilter) { - DistinctPrefixFilter prefixFilter = (DistinctPrefixFilter) filter; - prefixFilter.setOffset(offset); - } - } - - public static void setRowKeyOffset(Scan scan, int offset) { - Filter filter = scan.getFilter(); - if (filter == null) { - return; - } - if (filter instanceof PagingFilter) { - filter = ((PagingFilter) filter).getDelegateFilter(); - if (filter == null) { - return; - } - } - if (filter instanceof FilterList) { - FilterList filterList = (FilterList)filter; - for (Filter childFilter : filterList.getFilters()) { - setRowKeyOffset(childFilter, offset); - } - } else { - setRowKeyOffset(filter, offset); - } - } - - public static int[] getDefaultSlotSpans(int nSlots) { - return new int[nSlots]; - } - - /** - * Finds the position in the row key schema for a given position in the scan slots. - * For example, with a slotSpan of {0, 1, 0}, the slot at index 1 spans an extra column in the row key. This means - * that the slot at index 2 has a slot index of 2 but a row key index of 3. - * To calculate the "adjusted position" index, we simply add up the number of extra slots spanned and offset - * the slotPosition by that much. - * @param slotSpan the extra span per skip scan slot. corresponds to {@link ScanRanges#getSlotSpans()} - * @param slotPosition the index of a slot in the SkipScan slots list. - * @return the equivalent row key position in the RowKeySchema - */ - public static int getRowKeyPosition(int[] slotSpan, int slotPosition) { - int offset = 0; - - for (int i = 0; i < slotPosition; i++) { - offset += slotSpan[i]; - } - - return offset + slotPosition; - } - - public static boolean isAnalyzeTable(Scan scan) { - return scan.getAttribute((BaseScannerRegionObserverConstants.ANALYZE_TABLE)) != null; - } - - public static boolean crossesPrefixBoundary(byte[] key, byte[] prefixBytes, int prefixLength) { - if (key.length < prefixLength) { - return true; - } - if (prefixBytes.length >= prefixLength) { - return Bytes.compareTo(prefixBytes, 0, prefixLength, key, 0, prefixLength) != 0; - } - return hasNonZeroLeadingBytes(key, prefixLength); - } - - public static byte[] getPrefix(byte[] startKey, int prefixLength) { - // If startKey is at beginning, then our prefix will be a null padded byte array - return startKey.length >= prefixLength ? startKey : EMPTY_BYTE_ARRAY; - } - - private static boolean hasNonZeroLeadingBytes(byte[] key, int nBytesToCheck) { - if (nBytesToCheck > ZERO_BYTE_ARRAY.length) { - do { - if (Bytes.compareTo(key, nBytesToCheck - ZERO_BYTE_ARRAY.length, ZERO_BYTE_ARRAY.length, ScanUtil.ZERO_BYTE_ARRAY, 0, ScanUtil.ZERO_BYTE_ARRAY.length) != 0) { - return true; - } - nBytesToCheck -= ZERO_BYTE_ARRAY.length; - } while (nBytesToCheck > ZERO_BYTE_ARRAY.length); - } - return Bytes.compareTo(key, 0, nBytesToCheck, ZERO_BYTE_ARRAY, 0, nBytesToCheck) != 0; - } - - public static byte[] getTenantIdBytes(RowKeySchema schema, boolean isSalted, PName tenantId, boolean isMultiTenantTable, boolean isSharedIndex) - throws SQLException { - return isMultiTenantTable ? - getTenantIdBytes(schema, isSalted, tenantId, isSharedIndex) - : tenantId.getBytes(); - } - - public static byte[] getTenantIdBytes(RowKeySchema schema, boolean isSalted, PName tenantId, boolean isSharedIndex) - throws SQLException { - int pkPos = (isSalted ? 1 : 0) + (isSharedIndex ? 1 : 0); - Field field = schema.getField(pkPos); - PDataType dataType = field.getDataType(); - byte[] convertedValue; - try { - Object value = dataType.toObject(tenantId.getString()); - convertedValue = dataType.toBytes(value); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(convertedValue); - dataType.pad(ptr, field.getMaxLength(), field.getSortOrder()); - convertedValue = ByteUtil.copyKeyBytesIfNecessary(ptr); - } catch(IllegalDataException ex) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TENANTID_IS_OF_WRONG_TYPE) - .build().buildException(); - } - return convertedValue; - } - - public static Iterator getFilterIterator(Scan scan) { - Iterator filterIterator; - Filter topLevelFilter = scan.getFilter(); - if (topLevelFilter == null) { - filterIterator = Collections.emptyIterator(); - } else if (topLevelFilter instanceof FilterList) { - filterIterator = ((FilterList) topLevelFilter).getFilters().iterator(); - } else { - filterIterator = Iterators.singletonIterator(topLevelFilter); - } - return filterIterator; - } - - /** - * Selecting underlying scanners in a round-robin fashion is possible if there is no ordering of - * rows needed, not even row key order. Also no point doing round robin of scanners if fetch - * size is 1. - */ - public static boolean isRoundRobinPossible(OrderBy orderBy, StatementContext context) - throws SQLException { - int fetchSize = context.getStatement().getFetchSize(); - return fetchSize > 1 && !shouldRowsBeInRowKeyOrder(orderBy, context) - && orderBy.getOrderByExpressions().isEmpty(); - } - - public static boolean forceRowKeyOrder(StatementContext context) { - return context.getConnection().getQueryServices().getProps() - .getBoolean(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, QueryServicesOptions.DEFAULT_FORCE_ROW_KEY_ORDER); - } - - public static boolean shouldRowsBeInRowKeyOrder(OrderBy orderBy, StatementContext context) { - return forceRowKeyOrder(context) || orderBy == FWD_ROW_KEY_ORDER_BY || orderBy == REV_ROW_KEY_ORDER_BY; - } - - public static TimeRange intersectTimeRange(TimeRange rowTimestampColRange, TimeRange scanTimeRange, Long scn) throws IOException, SQLException { - long scnToUse = scn == null ? HConstants.LATEST_TIMESTAMP : scn; - long lowerRangeToBe = 0; - long upperRangeToBe = scnToUse; - if (rowTimestampColRange != null) { - long minRowTimestamp = rowTimestampColRange.getMin(); - long maxRowTimestamp = rowTimestampColRange.getMax(); - if ((lowerRangeToBe > maxRowTimestamp) || (upperRangeToBe < minRowTimestamp)) { - return null; // degenerate - } else { - // there is an overlap of ranges - lowerRangeToBe = Math.max(lowerRangeToBe, minRowTimestamp); - upperRangeToBe = Math.min(upperRangeToBe, maxRowTimestamp); - } - } - if (scanTimeRange != null) { - long minScanTimeRange = scanTimeRange.getMin(); - long maxScanTimeRange = scanTimeRange.getMax(); - if ((lowerRangeToBe > maxScanTimeRange) || (upperRangeToBe < lowerRangeToBe)) { - return null; // degenerate - } else { - // there is an overlap of ranges - lowerRangeToBe = Math.max(lowerRangeToBe, minScanTimeRange); - upperRangeToBe = Math.min(upperRangeToBe, maxScanTimeRange); - } - } - return TimeRange.between(lowerRangeToBe, upperRangeToBe); - } - - public static boolean isDefaultTimeRange(TimeRange range) { - return range.getMin() == 0 && range.getMax() == Long.MAX_VALUE; - } - - /** - * @return true if scanners could be left open and records retrieved by simply advancing them on - * the server side. To make sure HBase doesn't cancel the leases and close the open - * scanners, we need to periodically renew leases. To look at the earliest HBase version - * that supports renewing leases, see - * {@link MetaDataProtocol#MIN_RENEW_LEASE_VERSION} - */ - public static boolean isPacingScannersPossible(StatementContext context) { - return context.getConnection().getQueryServices().isRenewingLeasesEnabled(); - } - - public static void addOffsetAttribute(Scan scan, Integer offset) { - scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_OFFSET, Bytes.toBytes(offset)); - } - - public static final boolean canQueryBeExecutedSerially(PTable table, OrderBy orderBy, StatementContext context) { - /* - * If ordering by columns not on the PK axis, we can't execute a query serially because we - * need to do a merge sort across all the scans which isn't possible with SerialIterators. - * Similar reasoning follows for salted and local index tables when ordering rows in a row - * key order. Serial execution is OK in other cases since SerialIterators will execute scans - * in the correct order. - */ - if (!orderBy.getOrderByExpressions().isEmpty() - || ((table.getBucketNum() != null || table.getIndexType() == IndexType.LOCAL) && shouldRowsBeInRowKeyOrder( - orderBy, context))) { - return false; - } + if ( + !orderBy.getOrderByExpressions().isEmpty() + || ((table.getBucketNum() != null || table.getIndexType() == IndexType.LOCAL) + && shouldRowsBeInRowKeyOrder(orderBy, context)) + ) { + return false; + } + return true; + } + + public static boolean hasDynamicColumns(PTable table) { + for (PColumn col : table.getColumns()) { + if (col.isDynamic()) { return true; + } + } + return false; + } + + public static boolean isIndexRebuild(Scan scan) { + return scan.getAttribute((BaseScannerRegionObserverConstants.REBUILD_INDEXES)) != null; + } + + public static int getClientVersion(Scan scan) { + int clientVersion = UNKNOWN_CLIENT_VERSION; + byte[] clientVersionBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION); + if (clientVersionBytes != null) { + clientVersion = Bytes.toInt(clientVersionBytes); + } else { + LOGGER.warn("Scan attribute {} not found. Scan attributes: {}", + BaseScannerRegionObserverConstants.CLIENT_VERSION, scan.getAttributesMap()); + } + return clientVersion; + } + + public static void setClientVersion(Scan scan, int version) { + scan.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, Bytes.toBytes(version)); + } + + public static boolean isServerSideMaskingEnabled(PhoenixConnection phoenixConnection) { + String isServerSideMaskingSet = + phoenixConnection.getClientInfo(QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED); + return (phoenixConnection.getQueryServices().getConfiguration().getBoolean( + QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, + QueryServicesOptions.DEFAULT_SERVER_SIDE_MASKING_ENABLED) + || ((isServerSideMaskingSet != null) && (Boolean.parseBoolean(isServerSideMaskingSet)))); + } + + public static boolean getStatsForParallelizationProp(PhoenixConnection conn, PTable table) + throws SQLException { + Boolean useStats = table.useStatsForParallelization(); + if (useStats != null) { + return useStats; } - - public static boolean hasDynamicColumns(PTable table) { - for (PColumn col : table.getColumns()) { - if (col.isDynamic()) { - return true; - } - } - return false; - } - - public static boolean isIndexRebuild(Scan scan) { - return scan.getAttribute((BaseScannerRegionObserverConstants.REBUILD_INDEXES)) != null; - } - - public static int getClientVersion(Scan scan) { - int clientVersion = UNKNOWN_CLIENT_VERSION; - byte[] clientVersionBytes = scan.getAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION); - if (clientVersionBytes != null) { - clientVersion = Bytes.toInt(clientVersionBytes); - } else { - LOGGER.warn("Scan attribute {} not found. Scan attributes: {}", - BaseScannerRegionObserverConstants.CLIENT_VERSION, scan.getAttributesMap()); - } - return clientVersion; - } - - public static void setClientVersion(Scan scan, int version) { - scan.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, Bytes.toBytes(version)); - } - - public static boolean isServerSideMaskingEnabled(PhoenixConnection phoenixConnection) { - String isServerSideMaskingSet = phoenixConnection.getClientInfo( - QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED); - return (phoenixConnection.getQueryServices() - .getConfiguration().getBoolean( - QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, - QueryServicesOptions.DEFAULT_SERVER_SIDE_MASKING_ENABLED) || - ((isServerSideMaskingSet != null) && (Boolean.parseBoolean(isServerSideMaskingSet)))); - } - - public static boolean getStatsForParallelizationProp(PhoenixConnection conn, PTable table) - throws SQLException { - Boolean useStats = table.useStatsForParallelization(); - if (useStats != null) { - return useStats; - } - /* - * For a view index, we use the property set on view. For indexes on base table, whether - * global or local, we use the property set on the base table. Null check needed when - * dropping local indexes. - */ - PName tenantId = conn.getTenantId(); - int retryCount = 0; - while (retryCount++<2) { - if (table.getType() == PTableType.INDEX && table.getParentName() != null) { - String parentTableName = table.getParentName().getString(); - try { - PTable parentTable = - conn.getTable(new PTableKey(tenantId, parentTableName)); - useStats = parentTable.useStatsForParallelization(); - if (useStats != null) { - return useStats; - } - } catch (TableNotFoundException e) { - // try looking up the table without the tenant id (for - // global tables) - if (tenantId != null) { - tenantId = null; - } else { - LOGGER.warn("Unable to find parent table \"" + parentTableName + "\" of table \"" - + table.getName().getString() + "\" to determine USE_STATS_FOR_PARALLELIZATION",e); - } - } - } - } - return conn.getQueryServices().getConfiguration() - .getBoolean(USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION); - } - - - public static int getTTL(Scan scan) { - byte[] phoenixTTL = scan.getAttribute(BaseScannerRegionObserverConstants.TTL); - if (phoenixTTL == null) { - return DEFAULT_TTL; - } - return Bytes.readAsInt(phoenixTTL, 0, phoenixTTL.length); - } - - public static boolean isPhoenixTableTTLEnabled(Configuration conf) { - return conf.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); - } - - public static boolean isMaskTTLExpiredRows(Scan scan) { - return scan.getAttribute(BaseScannerRegionObserverConstants.MASK_PHOENIX_TTL_EXPIRED) != null && - (Bytes.compareTo(scan.getAttribute(BaseScannerRegionObserverConstants.MASK_PHOENIX_TTL_EXPIRED), - PDataType.TRUE_BYTES) == 0) - && scan.getAttribute(BaseScannerRegionObserverConstants.TTL) != null; - } - - public static boolean isDeleteTTLExpiredRows(Scan scan) { - return scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_PHOENIX_TTL_EXPIRED) != null && ( - Bytes.compareTo(scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_PHOENIX_TTL_EXPIRED), - PDataType.TRUE_BYTES) == 0) - && scan.getAttribute(BaseScannerRegionObserverConstants.TTL) != null; - } - - public static boolean isEmptyColumn(Cell cell, byte[] emptyCF, byte[] emptyCQ) { - return CellUtil.matchingFamily(cell, emptyCF, 0, emptyCF.length) && - CellUtil.matchingQualifier(cell, emptyCQ, 0, emptyCQ.length); - } - - public static long getMaxTimestamp(List cellList) { - long maxTs = 0; - long ts = 0; - Iterator cellIterator = cellList.iterator(); - while (cellIterator.hasNext()) { - Cell cell = cellIterator.next(); - ts = cell.getTimestamp(); - if (ts > maxTs) { - maxTs = ts; - } - } - return maxTs; - } - - public static boolean isTTLExpired(Cell cell, Scan scan, long nowTS) { - long ts = cell.getTimestamp(); - int ttl = ScanUtil.getTTL(scan); - return ts + ttl < nowTS; - } - - /** - * This determines if we need to add the empty column to the scan. The empty column is - * added only when the scan includes another column family but not the empty column family or - * the empty column family includes at least one column. + /* + * For a view index, we use the property set on view. For indexes on base table, whether global + * or local, we use the property set on the base table. Null check needed when dropping local + * indexes. */ - private static boolean shouldAddEmptyColumn(Scan scan, byte[] emptyCF) { - Map> familyMap = scan.getFamilyMap(); - if (familyMap == null || familyMap.isEmpty()) { - // This means that scan includes all columns. Nothing more to do. - return false; - } - for (Map.Entry> entry : familyMap.entrySet()) { - byte[] cf = entry.getKey(); - if (java.util.Arrays.equals(cf, emptyCF)) { - NavigableSet family = entry.getValue(); - if (family != null && !family.isEmpty()) { - // Found the empty column family, and it is not empty. The empty column - // may be already included but no need to check as adding a new one will replace - // the old one - return true; - } - return false; - } - } - // The colum family is not found and there is another column family in the scan. In this - // we need to add the empty column - return true; - } - - private static void addEmptyColumnToFilter(Filter filter, byte[] emptyCF, byte[] emptyCQ) { - if (filter instanceof EncodedQualifiersColumnProjectionFilter) { - ((EncodedQualifiersColumnProjectionFilter) filter). - addTrackedColumn(ENCODED_EMPTY_COLUMN_NAME); - } else if (filter instanceof ColumnProjectionFilter) { - ((ColumnProjectionFilter) filter).addTrackedColumn(new ImmutableBytesPtr(emptyCF), - new ImmutableBytesPtr(emptyCQ)); - } else if (filter instanceof MultiEncodedCQKeyValueComparisonFilter) { - ((MultiEncodedCQKeyValueComparisonFilter) filter). - setMinQualifier(ENCODED_EMPTY_COLUMN_NAME); - } - } - - private static void addEmptyColumnToFilterList(FilterList filterList, - byte[] emptyCF, byte[] emptyCQ) { - Iterator filterIterator = filterList.getFilters().iterator(); - while (filterIterator.hasNext()) { - Filter filter = filterIterator.next(); - if (filter instanceof FilterList) { - addEmptyColumnToFilterList((FilterList) filter, emptyCF, emptyCQ); - } else { - addEmptyColumnToFilter(filter, emptyCF, emptyCQ); - } - } - } - - public static void addEmptyColumnToScan(Scan scan, byte[] emptyCF, byte[] emptyCQ) { - Filter filter = scan.getFilter(); - if (filter != null) { - if (filter instanceof FilterList) { - addEmptyColumnToFilterList((FilterList) filter, emptyCF, emptyCQ); - } else { - addEmptyColumnToFilter(filter, emptyCF, emptyCQ); - } - } - if (shouldAddEmptyColumn(scan, emptyCF)) { - scan.addColumn(emptyCF, emptyCQ); - } - } - - public static PTable getDataTable(PTable index, PhoenixConnection conn) throws SQLException { - String schemaName = index.getParentSchemaName().getString(); - String tableName = index.getParentTableName().getString(); - PTable dataTable; + PName tenantId = conn.getTenantId(); + int retryCount = 0; + while (retryCount++ < 2) { + if (table.getType() == PTableType.INDEX && table.getParentName() != null) { + String parentTableName = table.getParentName().getString(); try { - dataTable = conn.getTable(SchemaUtil.getTableName(schemaName, tableName)); - return dataTable; + PTable parentTable = conn.getTable(new PTableKey(tenantId, parentTableName)); + useStats = parentTable.useStatsForParallelization(); + if (useStats != null) { + return useStats; + } } catch (TableNotFoundException e) { - // This index table must be being deleted - return null; - } - } - - public static void setScanAttributesForIndexReadRepair(Scan scan, PTable table, - PhoenixConnection phoenixConnection, StatementContext context) throws SQLException { - boolean isTransforming = (table.getTransformingNewTable() != null); - PTable indexTable = table; - // Transforming index table can be repaired in regular path via globalindexchecker coproc on it. - // phoenixConnection is closed when it is called from mappers - if (!phoenixConnection.isClosed() && table.getType() == PTableType.TABLE && isTransforming) { - SystemTransformRecord systemTransformRecord = TransformClient.getTransformRecord(indexTable.getSchemaName(), indexTable.getTableName(), - null, phoenixConnection.getTenantId(), phoenixConnection); - if (systemTransformRecord == null) { - return; - } - // Old table is still active, cutover didn't happen yet, so, no need to read repair - if (!systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.COMPLETED.name())) { - return; - } - byte[] oldTableBytes = systemTransformRecord.getOldMetadata(); - if (oldTableBytes == null || oldTableBytes.length == 0) { - return; - } - PTable oldTable = null; - try { - oldTable = PTableImpl.createFromProto(PTableProtos.PTable.parseFrom(oldTableBytes)); - } catch (IOException e) { - LOGGER.error("Cannot parse old table info for read repair for table " + table.getName()); - return; - } - TransformMaintainer indexMaintainer = indexTable.getTransformMaintainer(oldTable, phoenixConnection); - scan.setAttribute(PhoenixIndexCodec.INDEX_NAME_FOR_IDX_MAINTAINER, - indexTable.getTableName().getBytes()); - ScanUtil.annotateScanWithMetadataAttributes(oldTable, scan); - // This is the path where we are reading from the newly transformed table - if (scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD) == null) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - TransformMaintainer.serialize(oldTable, ptr, indexTable, phoenixConnection); - scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr)); - } - scan.setAttribute(BaseScannerRegionObserverConstants.CHECK_VERIFY_COLUMN, TRUE_BYTES); - scan.setAttribute(BaseScannerRegionObserverConstants.PHYSICAL_DATA_TABLE_NAME, oldTable.getPhysicalName().getBytes()); - byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(); - byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier(); - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, emptyCF); - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ); - scan.setAttribute(BaseScannerRegionObserverConstants.READ_REPAIR_TRANSFORMING_TABLE, TRUE_BYTES); - } else { - if (table.getType() != PTableType.INDEX || !IndexUtil.isGlobalIndex(indexTable)) { - return; - } - if (table.isTransactional() && table.getIndexType() == IndexType.UNCOVERED_GLOBAL) { - return; - } - PTable dataTable = context.getCDCDataTableRef() != null ? - context.getCDCDataTableRef().getTable() : - ScanUtil.getDataTable(indexTable, phoenixConnection); - if (dataTable == null) { - // This index table must be being deleted. No need to set the scan attributes - return; - } - // MetaDataClient modifies the index table name for view indexes if the parent view of an index has a child - // view. This, we need to recreate a PTable object with the correct table name for the rest of this code to work - if (indexTable.getViewIndexId() != null && indexTable.getName().getString().contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { - int lastIndexOf = indexTable.getName().getString().lastIndexOf(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); - String indexName = indexTable.getName().getString().substring(lastIndexOf + 1); - indexTable = phoenixConnection.getTable(indexName); - } - if (!dataTable.getIndexes().contains(indexTable)) { - return; - } - - scan.setAttribute(PhoenixIndexCodec.INDEX_NAME_FOR_IDX_MAINTAINER, - indexTable.getTableName().getBytes()); - ScanUtil.annotateScanWithMetadataAttributes(dataTable, scan); - if (scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD) == null) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - IndexMaintainer.serialize(dataTable, ptr, Collections.singletonList(indexTable), phoenixConnection); - scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr)); - } - if (IndexUtil.isCoveredGlobalIndex(indexTable)) { - if (!isIndexRebuild(scan)) { - scan.setAttribute(BaseScannerRegionObserverConstants.CHECK_VERIFY_COLUMN, TRUE_BYTES); - } - } else { - scan.setAttribute(BaseScannerRegionObserverConstants.UNCOVERED_GLOBAL_INDEX, TRUE_BYTES); - } - scan.setAttribute(BaseScannerRegionObserverConstants.PHYSICAL_DATA_TABLE_NAME, dataTable.getPhysicalName().getBytes()); - IndexMaintainer indexMaintainer = indexTable.getIndexMaintainer(dataTable, phoenixConnection); - byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(); - byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier(); - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, emptyCF); - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ); - if (scan.getAttribute(BaseScannerRegionObserverConstants.VIEW_CONSTANTS) == null) { - BaseQueryPlan.serializeViewConstantsIntoScan(scan, dataTable); - } - } - } - - public static void setScanAttributesForPhoenixTTL(Scan scan, PTable table, - PhoenixConnection phoenixConnection) throws SQLException { - - //If entity is a view and phoenix.view.ttl.enabled is false then don't set TTL scan attribute. - if ((table.getType() == PTableType.VIEW) && !phoenixConnection.getQueryServices().getConfiguration().getBoolean( - QueryServices.PHOENIX_VIEW_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_ENABLED - )) { - return; - } - - // If Phoenix level TTL is not enabled OR is a system table then return. - if (!isPhoenixTableTTLEnabled(phoenixConnection.getQueryServices().getConfiguration())) { - if (SchemaUtil.isSystemTable( - SchemaUtil.getTableNameAsBytes(table.getSchemaName().getString(), - table.getTableName().getString()))) { - scan.setAttribute(BaseScannerRegionObserverConstants.IS_PHOENIX_TTL_SCAN_TABLE_SYSTEM, - Bytes.toBytes(true)); - } - return; - } - - PTable dataTable = table; - String tableName = table.getTableName().getString(); - if ((table.getType() == PTableType.INDEX) && (table.getParentName() != null)) { - String parentSchemaName = table.getParentSchemaName().getString(); - String parentTableName = table.getParentTableName().getString(); - // Look up the parent view as we could have inherited this index from an ancestor - // view(V) with Index (VIndex) -> child view (V1) -> grand child view (V2) - // the view index name will be V2#V1#VIndex - // Since we store PHOENIX_TTL at every level, all children have the same value. - // So looking at the child view is sufficient. - if (tableName.contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { - String parentViewName = - SchemaUtil.getSchemaNameFromFullName(tableName, - QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); - parentSchemaName = SchemaUtil.getSchemaNameFromFullName(parentViewName); - parentTableName = SchemaUtil.getTableNameFromFullName(parentViewName); - } - try { - dataTable = phoenixConnection.getTable(SchemaUtil.getTableName(parentSchemaName, - parentTableName)); - } catch (TableNotFoundException e) { - // This data table does not exists anymore. No need to set the scan attributes - return; - } - } - if (dataTable.getTTL() != 0) { - byte[] emptyColumnFamilyName = SchemaUtil.getEmptyColumnFamily(table); - byte[] emptyColumnName = - table.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS ? - QueryConstants.EMPTY_COLUMN_BYTES : - table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); - scan.setAttribute(BaseScannerRegionObserverConstants.PHOENIX_TTL_SCAN_TABLE_NAME, - Bytes.toBytes(tableName)); - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, emptyColumnFamilyName); - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyColumnName); - scan.setAttribute(BaseScannerRegionObserverConstants.TTL, - Bytes.toBytes(Integer.valueOf(dataTable.getTTL()))); - if (!ScanUtil.isDeleteTTLExpiredRows(scan)) { - scan.setAttribute(BaseScannerRegionObserverConstants.MASK_PHOENIX_TTL_EXPIRED, PDataType.TRUE_BYTES); - } - if (ScanUtil.isLocalIndex(scan)) { - byte[] actualStartRow = scan.getAttribute(SCAN_ACTUAL_START_ROW) != null ? - scan.getAttribute(SCAN_ACTUAL_START_ROW) : - HConstants.EMPTY_BYTE_ARRAY; - ScanUtil.setLocalIndexAttributes(scan, 0, - actualStartRow, - HConstants.EMPTY_BYTE_ARRAY, - scan.getStartRow(), scan.getStopRow()); - } - } - } - - public static void setScanAttributesForClient(Scan scan, PTable table, - StatementContext context) throws SQLException { - PhoenixConnection phoenixConnection = context.getConnection(); - setScanAttributesForIndexReadRepair(scan, table, phoenixConnection, context); - setScanAttributesForPhoenixTTL(scan, table, phoenixConnection); - byte[] emptyCF = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME); - byte[] emptyCQ = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME); - if (emptyCF != null && emptyCQ != null) { - addEmptyColumnToScan(scan, emptyCF, emptyCQ); - } else if (!isAnalyzeTable(scan)) { - emptyCF = SchemaUtil.getEmptyColumnFamily(table); - emptyCQ = table.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS ? - QueryConstants.EMPTY_COLUMN_BYTES : - table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, emptyCF); - scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ); - addEmptyColumnToScan(scan, emptyCF, emptyCQ); - } - - setScanAttributeForPaging(scan, phoenixConnection); - scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_SERVER_RETURN_VALID_ROW_KEY, - Bytes.toBytes(true)); - - if (context.getCDCTableRef() != null) { - scan.setAttribute(CDC_DATA_TABLE_DEF, CDCTableInfo.toProto(context).toByteArray()); - CDCUtil.setupScanForCDC(scan); - adjustScanFilterForGlobalIndexRegionScanner(scan); - } - } - - public static void setScanAttributeForPaging(Scan scan, PhoenixConnection phoenixConnection) { - if (phoenixConnection.getQueryServices().getProps().getBoolean( - QueryServices.PHOENIX_SERVER_PAGING_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_PHOENIX_SERVER_PAGING_ENABLED)) { - long pageSizeMs = phoenixConnection.getQueryServices().getProps() - .getInt(QueryServices.PHOENIX_SERVER_PAGE_SIZE_MS, -1); - if (pageSizeMs == -1) { - // Use the half of the HBase RPC timeout value as the server page size to make sure - // that the HBase region server will be able to send a heartbeat message to the - // client before the client times out. - pageSizeMs = (long) (phoenixConnection.getQueryServices().getProps() - .getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT) * 0.5); - } - - scan.setAttribute(BaseScannerRegionObserverConstants.SERVER_PAGE_SIZE_MS, Bytes.toBytes(Long.valueOf(pageSizeMs))); - } - - } - - public static void getDummyResult(byte[] rowKey, List result) { - Cell keyValue = - PhoenixKeyValueUtil.newKeyValue(rowKey, 0, - rowKey.length, EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY, - 0, EMPTY_BYTE_ARRAY, 0, EMPTY_BYTE_ARRAY.length); - result.add(keyValue); - } - - public static Tuple getDummyTuple(byte[] rowKey) { - List result = new ArrayList(1); - getDummyResult(rowKey, result); - return new ResultTuple(Result.create(result)); - } - - public static Tuple getDummyTuple(Tuple tuple) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - tuple.getKey(ptr); - return getDummyTuple(ptr.copyBytes()); - } - - public static boolean isDummy(Cell cell) { - return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY); - } - - public static boolean isDummy(Result result) { - if (result.rawCells().length != 1) { - return false; - } - Cell cell = result.rawCells()[0]; - return isDummy(cell); - } - - public static boolean isDummy(List result) { - if (result.size() != 1) { - return false; - } - Cell cell = result.get(0); - return isDummy(cell); - } - - public static boolean isDummy(Tuple tuple) { - if (tuple instanceof ResultTuple) { - return isDummy(((ResultTuple) tuple).getResult()); + // try looking up the table without the tenant id (for + // global tables) + if (tenantId != null) { + tenantId = null; + } else { + LOGGER.warn("Unable to find parent table \"" + parentTableName + "\" of table \"" + + table.getName().getString() + "\" to determine USE_STATS_FOR_PARALLELIZATION", e); + } + } + } + } + return conn.getQueryServices().getConfiguration().getBoolean(USE_STATS_FOR_PARALLELIZATION, + DEFAULT_USE_STATS_FOR_PARALLELIZATION); + } + + public static int getTTL(Scan scan) { + byte[] phoenixTTL = scan.getAttribute(BaseScannerRegionObserverConstants.TTL); + if (phoenixTTL == null) { + return DEFAULT_TTL; + } + return Bytes.readAsInt(phoenixTTL, 0, phoenixTTL.length); + } + + public static boolean isPhoenixTableTTLEnabled(Configuration conf) { + return conf.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); + } + + public static boolean isMaskTTLExpiredRows(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.MASK_PHOENIX_TTL_EXPIRED) != null + && (Bytes.compareTo( + scan.getAttribute(BaseScannerRegionObserverConstants.MASK_PHOENIX_TTL_EXPIRED), + PDataType.TRUE_BYTES) == 0) + && scan.getAttribute(BaseScannerRegionObserverConstants.TTL) != null; + } + + public static boolean isDeleteTTLExpiredRows(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_PHOENIX_TTL_EXPIRED) != null + && (Bytes.compareTo( + scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_PHOENIX_TTL_EXPIRED), + PDataType.TRUE_BYTES) == 0) + && scan.getAttribute(BaseScannerRegionObserverConstants.TTL) != null; + } + + public static boolean isEmptyColumn(Cell cell, byte[] emptyCF, byte[] emptyCQ) { + return CellUtil.matchingFamily(cell, emptyCF, 0, emptyCF.length) + && CellUtil.matchingQualifier(cell, emptyCQ, 0, emptyCQ.length); + } + + public static long getMaxTimestamp(List cellList) { + long maxTs = 0; + long ts = 0; + Iterator cellIterator = cellList.iterator(); + while (cellIterator.hasNext()) { + Cell cell = cellIterator.next(); + ts = cell.getTimestamp(); + if (ts > maxTs) { + maxTs = ts; + } + } + return maxTs; + } + + public static boolean isTTLExpired(Cell cell, Scan scan, long nowTS) { + long ts = cell.getTimestamp(); + int ttl = ScanUtil.getTTL(scan); + return ts + ttl < nowTS; + } + + /** + * This determines if we need to add the empty column to the scan. The empty column is added only + * when the scan includes another column family but not the empty column family or the empty + * column family includes at least one column. + */ + private static boolean shouldAddEmptyColumn(Scan scan, byte[] emptyCF) { + Map> familyMap = scan.getFamilyMap(); + if (familyMap == null || familyMap.isEmpty()) { + // This means that scan includes all columns. Nothing more to do. + return false; + } + for (Map.Entry> entry : familyMap.entrySet()) { + byte[] cf = entry.getKey(); + if (java.util.Arrays.equals(cf, emptyCF)) { + NavigableSet family = entry.getValue(); + if (family != null && !family.isEmpty()) { + // Found the empty column family, and it is not empty. The empty column + // may be already included but no need to check as adding a new one will replace + // the old one + return true; } return false; - } - - public static PagingFilter getPhoenixPagingFilter(Scan scan) { - Filter filter = scan.getFilter(); - if (filter != null && filter instanceof PagingFilter) { - PagingFilter pageFilter = (PagingFilter) filter; - return pageFilter; - } - return null; - } - - /** - * - * The server page size expressed in ms is the maximum time we want the Phoenix server code to - * spend for each iteration of ResultScanner. For each ResultScanner#next() can be translated - * into one or more HBase RegionScanner#next() calls by a Phoenix RegionScanner object in - * a loop. To ensure that the total time spent by the Phoenix server code will not exceed - * the configured page size value, SERVER_PAGE_SIZE_MS, the loop time in a Phoenix region - * scanner is limited by 0.6 * SERVER_PAGE_SIZE_MS and each HBase RegionScanner#next() time - * which is controlled by PagingFilter is set to 0.3 * SERVER_PAGE_SIZE_MS. - * - */ - private static long getPageSizeMs(Scan scan, double factor) { - long pageSizeMs = Long.MAX_VALUE; - byte[] pageSizeMsBytes = scan.getAttribute(BaseScannerRegionObserverConstants.SERVER_PAGE_SIZE_MS); - if (pageSizeMsBytes != null) { - pageSizeMs = Bytes.toLong(pageSizeMsBytes); - pageSizeMs = (long) (pageSizeMs * factor); - } - return pageSizeMs; - } - - public static long getPageSizeMsForRegionScanner(Scan scan) { return getPageSizeMs(scan, 0.6); } - - public static long getPageSizeMsForFilter(Scan scan) { - return getPageSizeMs(scan, 0.3); - } - - /** - * Put the attributes we want to annotate the WALs with (such as logical table name, - * tenant, DDL timestamp, etc) on the Scan object so that on the - * Ungrouped/GroupedAggregateCoprocessor side, we - * annotate the mutations with them, and then they get written into the WAL as part of - * the RegionObserver's doWALAppend hook. - * @param table Table metadata for the target table/view of the write - * @param scan Scan to trigger the server-side coproc - */ - public static void annotateScanWithMetadataAttributes(PTable table, Scan scan) { - if (table.getTenantId() != null) { - scan.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), - table.getTenantId().getBytes()); - } - scan.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), - table.getSchemaName().getBytes()); - scan.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), - table.getTableName().getBytes()); - scan.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), - table.getType().getValue().getBytes()); - if (table.getLastDDLTimestamp() != null) { - scan.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), - Bytes.toBytes(table.getLastDDLTimestamp())); - } - - if (table.isChangeDetectionEnabled()) { - if (table.getExternalSchemaId() != null) { - scan.setAttribute(MutationState.MutationMetadataType.EXTERNAL_SCHEMA_ID.toString(), - Bytes.toBytes(table.getExternalSchemaId())); - } - } - } - - /** - * Annotate Mutation with required metadata attributes (tenant id, schema name, logical table - * name, table type, last ddl timestamp) from the client side. - * - * @param tenantId tenant id. - * @param schemaName schema name. - * @param logicalTableName logical table name. - * @param tableType table type. - * @param timestamp last ddl timestamp. - * @param mutation mutation object to attach attributes. - */ - public static void annotateMutationWithMetadataAttributes(byte[] tenantId, - byte[] schemaName, - byte[] logicalTableName, - byte[] tableType, - byte[] timestamp, - Mutation mutation) { - if (tenantId != null) { - mutation.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), - tenantId); - } - mutation.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), - schemaName); - mutation.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), - logicalTableName); - mutation.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), - tableType); - if (timestamp != null) { - mutation.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), - timestamp); - } - } - - /** - * Annotate Scan with required metadata attributes (tenant id, schema name, logical table - * name, table type, last ddl timestamp), from old scan object to new scan object. - * - * @param oldScan old scan object. - * @param newScan new scan object. - */ - public static void annotateScanWithMetadataAttributes(Scan oldScan, Scan newScan) { - byte[] tenantId = - oldScan.getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()); - byte[] schemaName = - oldScan.getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()); - byte[] logicalTableName = oldScan.getAttribute( - MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()); - byte[] tableType = - oldScan.getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()); - byte[] timestamp = - oldScan.getAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString()); - if (tenantId != null) { - newScan.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), tenantId); - } - if (schemaName != null) { - newScan.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), - schemaName); - } - if (logicalTableName != null) { - newScan.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), - logicalTableName); - } - if (tableType != null) { - newScan.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), - tableType); - } - if (timestamp != null) { - newScan.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), - timestamp); - } - } - - /** - * Annotate Mutation with required metadata attributes (tenant id, schema name, logical table - * name, table type, last ddl timestamp), derived from the given PTable object. - * - * @param table table object to derive metadata attributes from. - * @param mutation mutation object. - */ - public static void annotateMutationWithMetadataAttributes(PTable table, Mutation mutation) { - if (table.getTenantId() != null) { - mutation.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), - table.getTenantId().getBytes()); - } - mutation.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), - table.getSchemaName().getBytes()); - mutation.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), - table.getTableName().getBytes()); - mutation.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), - table.getType().getValue().getBytes()); - if (table.getLastDDLTimestamp() != null) { - mutation.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), - Bytes.toBytes(table.getLastDDLTimestamp())); - } - } - - public static PageFilter removePageFilterFromFilterList(FilterList filterList) { - Iterator filterIterator = filterList.getFilters().iterator(); - while (filterIterator.hasNext()) { - Filter filter = filterIterator.next(); - if (filter instanceof PageFilter) { - filterIterator.remove(); - return (PageFilter) filter; - } else if (filter instanceof FilterList) { - PageFilter pageFilter = removePageFilterFromFilterList((FilterList) filter); - if (pageFilter != null) { - return pageFilter; - } - } - } - return null; - } - - /** - * Determine if the client is incompatible and therefore will not be able to parse the - * valid rowkey that server returns. - * - * @param scan Scan object. - * @return true if the client is incompatible and therefore will not be able to parse the - * valid rowkey that server returns. - */ - public static boolean isIncompatibleClientForServerReturnValidRowKey(Scan scan) { - return scan.getAttribute( - BaseScannerRegionObserverConstants.SCAN_SERVER_RETURN_VALID_ROW_KEY) == null; - } - - // This method assumes that there is at most one instance of PageFilter in a scan - public static PageFilter removePageFilter(Scan scan) { - Filter filter = scan.getFilter(); - if (filter != null) { - PagingFilter pagingFilter = null; - if (filter instanceof PagingFilter) { - pagingFilter = (PagingFilter) filter; - filter = pagingFilter.getDelegateFilter(); - if (filter == null) { - return null; - } - } - if (filter instanceof PageFilter) { - if (pagingFilter != null) { - pagingFilter.setDelegateFilter(null); - scan.setFilter(pagingFilter); - } else { - scan.setFilter(null); - } - return (PageFilter) filter; - } else if (filter instanceof FilterList) { - return removePageFilterFromFilterList((FilterList) filter); - } - } - return null; - } - - public static SkipScanFilter removeSkipScanFilterFromFilterList(FilterList filterList) { - Iterator filterIterator = filterList.getFilters().iterator(); - while (filterIterator.hasNext()) { - Filter filter = filterIterator.next(); - if (filter instanceof SkipScanFilter - && ((SkipScanFilter) filter).isMultiKeyPointLookup()) { - filterIterator.remove(); - return (SkipScanFilter) filter; - } else if (filter instanceof FilterList) { - SkipScanFilter skipScanFilter = removeSkipScanFilterFromFilterList((FilterList) filter); - if (skipScanFilter != null) { - return skipScanFilter; - } - } - } - return null; - } - public static SkipScanFilter removeSkipScanFilter(Scan scan) { - Filter filter = scan.getFilter(); - if (filter != null) { - PagingFilter pagingFilter = null; - if (filter instanceof PagingFilter) { - pagingFilter = (PagingFilter) filter; - filter = pagingFilter.getDelegateFilter(); - if (filter == null) { - return null; - } - } - if (filter instanceof SkipScanFilter - && ((SkipScanFilter) filter).isMultiKeyPointLookup()) { - if (pagingFilter != null) { - pagingFilter.setDelegateFilter(null); - scan.setFilter(pagingFilter); - } else { - scan.setFilter(null); - } - return (SkipScanFilter) filter; - } else if (filter instanceof FilterList) { - return removeSkipScanFilterFromFilterList((FilterList) filter); - } - } - return null; - } - - public static void setScanAttributeForMaxLookbackAge(Scan scan, Long maxLookbackAge) { - Preconditions.checkNotNull(scan); - if (maxLookbackAge != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE, - Bytes.toBytes(maxLookbackAge)); - } - } - - public static Long getMaxLookbackAgeFromScanAttribute(Scan scan) { - Preconditions.checkNotNull(scan); - byte[] maxLookbackAge = scan.getAttribute(BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE); - return maxLookbackAge != null ? Bytes.toLong(maxLookbackAge) : null; - } - - /** - * Verify whether the given row key is in the scan boundaries i.e. scan start and end keys. - * - * @param ptr row key. - * @param scan scan object used to retrieve the result set. - * @throws ResultSetOutOfScanRangeException if the row key is out of scan range. - */ - public static void verifyKeyInScanRange(ImmutableBytesWritable ptr, - Scan scan) - throws ResultSetOutOfScanRangeException { - try { - if (scan.isReversed()) { - verifyScanRangesForReverseScan(ptr, scan, scan.getStartRow(), scan.getStopRow()); - } else { - verifyScanRanges(ptr, scan, scan.getStartRow(), scan.getStopRow()); - } - } catch (ResultSetOutOfScanRangeException e) { - if (isLocalIndex(scan)) { - verifyScanRanges(ptr, scan, scan.getAttribute(SCAN_START_ROW_SUFFIX), - scan.getAttribute(SCAN_STOP_ROW_SUFFIX)); - return; - } - if (scan.getAttribute(SCAN_ACTUAL_START_ROW) == null) { - throw e; - } - verifyScanRanges(ptr, scan, scan.getAttribute(SCAN_ACTUAL_START_ROW), - scan.getStopRow()); - } - } - - private static void verifyScanRanges(ImmutableBytesWritable ptr, - Scan scan, - byte[] startRow, - byte[] stopRow) - throws ResultSetOutOfScanRangeException { - if (startRow != null - && Bytes.compareTo(startRow, HConstants.EMPTY_START_ROW) != 0) { - if (scan.includeStartRow()) { - if (Bytes.compareTo(startRow, ptr.get()) > 0) { - throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_START_KEY); - } - } else { - if (Bytes.compareTo(startRow, ptr.get()) >= 0) { - throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_START_KEY); - } - } - } - if (stopRow != null - && Bytes.compareTo(stopRow, HConstants.EMPTY_END_ROW) != 0) { - if (scan.includeStopRow()) { - if (Bytes.compareTo(stopRow, ptr.get()) < 0) { - throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_STOP_KEY); - } - } else { - if (Bytes.compareTo(stopRow, ptr.get()) <= 0) { - throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_STOP_KEY); - } - } - } - } - - private static void verifyScanRangesForReverseScan(ImmutableBytesWritable ptr, - Scan scan, - byte[] startRow, - byte[] stopRow) - throws ResultSetOutOfScanRangeException { - if (stopRow != null - && Bytes.compareTo(stopRow, HConstants.EMPTY_START_ROW) != 0) { - if (scan.includeStopRow()) { - if (Bytes.compareTo(stopRow, ptr.get()) > 0) { - throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_START_KEY); - } - } else { - if (Bytes.compareTo(stopRow, ptr.get()) >= 0) { - throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_START_KEY); - } - } + } + } + // The colum family is not found and there is another column family in the scan. In this + // we need to add the empty column + return true; + } + + private static void addEmptyColumnToFilter(Filter filter, byte[] emptyCF, byte[] emptyCQ) { + if (filter instanceof EncodedQualifiersColumnProjectionFilter) { + ((EncodedQualifiersColumnProjectionFilter) filter) + .addTrackedColumn(ENCODED_EMPTY_COLUMN_NAME); + } else if (filter instanceof ColumnProjectionFilter) { + ((ColumnProjectionFilter) filter).addTrackedColumn(new ImmutableBytesPtr(emptyCF), + new ImmutableBytesPtr(emptyCQ)); + } else if (filter instanceof MultiEncodedCQKeyValueComparisonFilter) { + ((MultiEncodedCQKeyValueComparisonFilter) filter).setMinQualifier(ENCODED_EMPTY_COLUMN_NAME); + } + } + + private static void addEmptyColumnToFilterList(FilterList filterList, byte[] emptyCF, + byte[] emptyCQ) { + Iterator filterIterator = filterList.getFilters().iterator(); + while (filterIterator.hasNext()) { + Filter filter = filterIterator.next(); + if (filter instanceof FilterList) { + addEmptyColumnToFilterList((FilterList) filter, emptyCF, emptyCQ); + } else { + addEmptyColumnToFilter(filter, emptyCF, emptyCQ); + } + } + } + + public static void addEmptyColumnToScan(Scan scan, byte[] emptyCF, byte[] emptyCQ) { + Filter filter = scan.getFilter(); + if (filter != null) { + if (filter instanceof FilterList) { + addEmptyColumnToFilterList((FilterList) filter, emptyCF, emptyCQ); + } else { + addEmptyColumnToFilter(filter, emptyCF, emptyCQ); + } + } + if (shouldAddEmptyColumn(scan, emptyCF)) { + scan.addColumn(emptyCF, emptyCQ); + } + } + + public static PTable getDataTable(PTable index, PhoenixConnection conn) throws SQLException { + String schemaName = index.getParentSchemaName().getString(); + String tableName = index.getParentTableName().getString(); + PTable dataTable; + try { + dataTable = conn.getTable(SchemaUtil.getTableName(schemaName, tableName)); + return dataTable; + } catch (TableNotFoundException e) { + // This index table must be being deleted + return null; + } + } + + public static void setScanAttributesForIndexReadRepair(Scan scan, PTable table, + PhoenixConnection phoenixConnection, StatementContext context) throws SQLException { + boolean isTransforming = (table.getTransformingNewTable() != null); + PTable indexTable = table; + // Transforming index table can be repaired in regular path via globalindexchecker coproc on it. + // phoenixConnection is closed when it is called from mappers + if (!phoenixConnection.isClosed() && table.getType() == PTableType.TABLE && isTransforming) { + SystemTransformRecord systemTransformRecord = + TransformClient.getTransformRecord(indexTable.getSchemaName(), indexTable.getTableName(), + null, phoenixConnection.getTenantId(), phoenixConnection); + if (systemTransformRecord == null) { + return; + } + // Old table is still active, cutover didn't happen yet, so, no need to read repair + if ( + !systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.COMPLETED.name()) + ) { + return; + } + byte[] oldTableBytes = systemTransformRecord.getOldMetadata(); + if (oldTableBytes == null || oldTableBytes.length == 0) { + return; + } + PTable oldTable = null; + try { + oldTable = PTableImpl.createFromProto(PTableProtos.PTable.parseFrom(oldTableBytes)); + } catch (IOException e) { + LOGGER.error("Cannot parse old table info for read repair for table " + table.getName()); + return; + } + TransformMaintainer indexMaintainer = + indexTable.getTransformMaintainer(oldTable, phoenixConnection); + scan.setAttribute(PhoenixIndexCodec.INDEX_NAME_FOR_IDX_MAINTAINER, + indexTable.getTableName().getBytes()); + ScanUtil.annotateScanWithMetadataAttributes(oldTable, scan); + // This is the path where we are reading from the newly transformed table + if (scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD) == null) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + TransformMaintainer.serialize(oldTable, ptr, indexTable, phoenixConnection); + scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr)); + } + scan.setAttribute(BaseScannerRegionObserverConstants.CHECK_VERIFY_COLUMN, TRUE_BYTES); + scan.setAttribute(BaseScannerRegionObserverConstants.PHYSICAL_DATA_TABLE_NAME, + oldTable.getPhysicalName().getBytes()); + byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(); + byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier(); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, emptyCF); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ); + scan.setAttribute(BaseScannerRegionObserverConstants.READ_REPAIR_TRANSFORMING_TABLE, + TRUE_BYTES); + } else { + if (table.getType() != PTableType.INDEX || !IndexUtil.isGlobalIndex(indexTable)) { + return; + } + if (table.isTransactional() && table.getIndexType() == IndexType.UNCOVERED_GLOBAL) { + return; + } + PTable dataTable = context.getCDCDataTableRef() != null + ? context.getCDCDataTableRef().getTable() + : ScanUtil.getDataTable(indexTable, phoenixConnection); + if (dataTable == null) { + // This index table must be being deleted. No need to set the scan attributes + return; + } + // MetaDataClient modifies the index table name for view indexes if the parent view of an + // index has a child + // view. This, we need to recreate a PTable object with the correct table name for the rest of + // this code to work + if ( + indexTable.getViewIndexId() != null && indexTable.getName().getString() + .contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR) + ) { + int lastIndexOf = indexTable.getName().getString() + .lastIndexOf(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); + String indexName = indexTable.getName().getString().substring(lastIndexOf + 1); + indexTable = phoenixConnection.getTable(indexName); + } + if (!dataTable.getIndexes().contains(indexTable)) { + return; + } + + scan.setAttribute(PhoenixIndexCodec.INDEX_NAME_FOR_IDX_MAINTAINER, + indexTable.getTableName().getBytes()); + ScanUtil.annotateScanWithMetadataAttributes(dataTable, scan); + if (scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD) == null) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + IndexMaintainer.serialize(dataTable, ptr, Collections.singletonList(indexTable), + phoenixConnection); + scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr)); + } + if (IndexUtil.isCoveredGlobalIndex(indexTable)) { + if (!isIndexRebuild(scan)) { + scan.setAttribute(BaseScannerRegionObserverConstants.CHECK_VERIFY_COLUMN, TRUE_BYTES); + } + } else { + scan.setAttribute(BaseScannerRegionObserverConstants.UNCOVERED_GLOBAL_INDEX, TRUE_BYTES); + } + scan.setAttribute(BaseScannerRegionObserverConstants.PHYSICAL_DATA_TABLE_NAME, + dataTable.getPhysicalName().getBytes()); + IndexMaintainer indexMaintainer = indexTable.getIndexMaintainer(dataTable, phoenixConnection); + byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(); + byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier(); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, emptyCF); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ); + if (scan.getAttribute(BaseScannerRegionObserverConstants.VIEW_CONSTANTS) == null) { + BaseQueryPlan.serializeViewConstantsIntoScan(scan, dataTable); + } + } + } + + public static void setScanAttributesForPhoenixTTL(Scan scan, PTable table, + PhoenixConnection phoenixConnection) throws SQLException { + + // If entity is a view and phoenix.view.ttl.enabled is false then don't set TTL scan attribute. + if ( + (table.getType() == PTableType.VIEW) && !phoenixConnection.getQueryServices() + .getConfiguration().getBoolean(QueryServices.PHOENIX_VIEW_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_ENABLED) + ) { + return; + } + + // If Phoenix level TTL is not enabled OR is a system table then return. + if (!isPhoenixTableTTLEnabled(phoenixConnection.getQueryServices().getConfiguration())) { + if ( + SchemaUtil.isSystemTable(SchemaUtil.getTableNameAsBytes(table.getSchemaName().getString(), + table.getTableName().getString())) + ) { + scan.setAttribute(BaseScannerRegionObserverConstants.IS_PHOENIX_TTL_SCAN_TABLE_SYSTEM, + Bytes.toBytes(true)); + } + return; + } + + PTable dataTable = table; + String tableName = table.getTableName().getString(); + if ((table.getType() == PTableType.INDEX) && (table.getParentName() != null)) { + String parentSchemaName = table.getParentSchemaName().getString(); + String parentTableName = table.getParentTableName().getString(); + // Look up the parent view as we could have inherited this index from an ancestor + // view(V) with Index (VIndex) -> child view (V1) -> grand child view (V2) + // the view index name will be V2#V1#VIndex + // Since we store PHOENIX_TTL at every level, all children have the same value. + // So looking at the child view is sufficient. + if (tableName.contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { + String parentViewName = SchemaUtil.getSchemaNameFromFullName(tableName, + QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); + parentSchemaName = SchemaUtil.getSchemaNameFromFullName(parentViewName); + parentTableName = SchemaUtil.getTableNameFromFullName(parentViewName); + } + try { + dataTable = + phoenixConnection.getTable(SchemaUtil.getTableName(parentSchemaName, parentTableName)); + } catch (TableNotFoundException e) { + // This data table does not exists anymore. No need to set the scan attributes + return; + } + } + if (dataTable.getTTL() != 0) { + byte[] emptyColumnFamilyName = SchemaUtil.getEmptyColumnFamily(table); + byte[] emptyColumnName = + table.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + ? QueryConstants.EMPTY_COLUMN_BYTES + : table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); + scan.setAttribute(BaseScannerRegionObserverConstants.PHOENIX_TTL_SCAN_TABLE_NAME, + Bytes.toBytes(tableName)); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, + emptyColumnFamilyName); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, + emptyColumnName); + scan.setAttribute(BaseScannerRegionObserverConstants.TTL, + Bytes.toBytes(Integer.valueOf(dataTable.getTTL()))); + if (!ScanUtil.isDeleteTTLExpiredRows(scan)) { + scan.setAttribute(BaseScannerRegionObserverConstants.MASK_PHOENIX_TTL_EXPIRED, + PDataType.TRUE_BYTES); + } + if (ScanUtil.isLocalIndex(scan)) { + byte[] actualStartRow = scan.getAttribute(SCAN_ACTUAL_START_ROW) != null + ? scan.getAttribute(SCAN_ACTUAL_START_ROW) + : HConstants.EMPTY_BYTE_ARRAY; + ScanUtil.setLocalIndexAttributes(scan, 0, actualStartRow, HConstants.EMPTY_BYTE_ARRAY, + scan.getStartRow(), scan.getStopRow()); + } + } + } + + public static void setScanAttributesForClient(Scan scan, PTable table, StatementContext context) + throws SQLException { + PhoenixConnection phoenixConnection = context.getConnection(); + setScanAttributesForIndexReadRepair(scan, table, phoenixConnection, context); + setScanAttributesForPhoenixTTL(scan, table, phoenixConnection); + byte[] emptyCF = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME); + byte[] emptyCQ = + scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME); + if (emptyCF != null && emptyCQ != null) { + addEmptyColumnToScan(scan, emptyCF, emptyCQ); + } else if (!isAnalyzeTable(scan)) { + emptyCF = SchemaUtil.getEmptyColumnFamily(table); + emptyCQ = table.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + ? QueryConstants.EMPTY_COLUMN_BYTES + : table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, emptyCF); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ); + addEmptyColumnToScan(scan, emptyCF, emptyCQ); + } + + setScanAttributeForPaging(scan, phoenixConnection); + scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_SERVER_RETURN_VALID_ROW_KEY, + Bytes.toBytes(true)); + + if (context.getCDCTableRef() != null) { + scan.setAttribute(CDC_DATA_TABLE_DEF, CDCTableInfo.toProto(context).toByteArray()); + CDCUtil.setupScanForCDC(scan); + adjustScanFilterForGlobalIndexRegionScanner(scan); + } + } + + public static void setScanAttributeForPaging(Scan scan, PhoenixConnection phoenixConnection) { + if ( + phoenixConnection.getQueryServices().getProps().getBoolean( + QueryServices.PHOENIX_SERVER_PAGING_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_PHOENIX_SERVER_PAGING_ENABLED) + ) { + long pageSizeMs = phoenixConnection.getQueryServices().getProps() + .getInt(QueryServices.PHOENIX_SERVER_PAGE_SIZE_MS, -1); + if (pageSizeMs == -1) { + // Use the half of the HBase RPC timeout value as the server page size to make sure + // that the HBase region server will be able to send a heartbeat message to the + // client before the client times out. + pageSizeMs = (long) (phoenixConnection.getQueryServices().getProps() + .getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT) * 0.5); + } + + scan.setAttribute(BaseScannerRegionObserverConstants.SERVER_PAGE_SIZE_MS, + Bytes.toBytes(Long.valueOf(pageSizeMs))); + } + + } + + public static void getDummyResult(byte[] rowKey, List result) { + Cell keyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, 0, rowKey.length, EMPTY_BYTE_ARRAY, + EMPTY_BYTE_ARRAY, 0, EMPTY_BYTE_ARRAY, 0, EMPTY_BYTE_ARRAY.length); + result.add(keyValue); + } + + public static Tuple getDummyTuple(byte[] rowKey) { + List result = new ArrayList(1); + getDummyResult(rowKey, result); + return new ResultTuple(Result.create(result)); + } + + public static Tuple getDummyTuple(Tuple tuple) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + tuple.getKey(ptr); + return getDummyTuple(ptr.copyBytes()); + } + + public static boolean isDummy(Cell cell) { + return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY); + } + + public static boolean isDummy(Result result) { + if (result.rawCells().length != 1) { + return false; + } + Cell cell = result.rawCells()[0]; + return isDummy(cell); + } + + public static boolean isDummy(List result) { + if (result.size() != 1) { + return false; + } + Cell cell = result.get(0); + return isDummy(cell); + } + + public static boolean isDummy(Tuple tuple) { + if (tuple instanceof ResultTuple) { + return isDummy(((ResultTuple) tuple).getResult()); + } + return false; + } + + public static PagingFilter getPhoenixPagingFilter(Scan scan) { + Filter filter = scan.getFilter(); + if (filter != null && filter instanceof PagingFilter) { + PagingFilter pageFilter = (PagingFilter) filter; + return pageFilter; + } + return null; + } + + /** + * The server page size expressed in ms is the maximum time we want the Phoenix server code to + * spend for each iteration of ResultScanner. For each ResultScanner#next() can be translated into + * one or more HBase RegionScanner#next() calls by a Phoenix RegionScanner object in a loop. To + * ensure that the total time spent by the Phoenix server code will not exceed the configured page + * size value, SERVER_PAGE_SIZE_MS, the loop time in a Phoenix region scanner is limited by 0.6 * + * SERVER_PAGE_SIZE_MS and each HBase RegionScanner#next() time which is controlled by + * PagingFilter is set to 0.3 * SERVER_PAGE_SIZE_MS. + */ + private static long getPageSizeMs(Scan scan, double factor) { + long pageSizeMs = Long.MAX_VALUE; + byte[] pageSizeMsBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.SERVER_PAGE_SIZE_MS); + if (pageSizeMsBytes != null) { + pageSizeMs = Bytes.toLong(pageSizeMsBytes); + pageSizeMs = (long) (pageSizeMs * factor); + } + return pageSizeMs; + } + + public static long getPageSizeMsForRegionScanner(Scan scan) { + return getPageSizeMs(scan, 0.6); + } + + public static long getPageSizeMsForFilter(Scan scan) { + return getPageSizeMs(scan, 0.3); + } + + /** + * Put the attributes we want to annotate the WALs with (such as logical table name, tenant, DDL + * timestamp, etc) on the Scan object so that on the Ungrouped/GroupedAggregateCoprocessor side, + * we annotate the mutations with them, and then they get written into the WAL as part of the + * RegionObserver's doWALAppend hook. + * @param table Table metadata for the target table/view of the write + * @param scan Scan to trigger the server-side coproc + */ + public static void annotateScanWithMetadataAttributes(PTable table, Scan scan) { + if (table.getTenantId() != null) { + scan.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), + table.getTenantId().getBytes()); + } + scan.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), + table.getSchemaName().getBytes()); + scan.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), + table.getTableName().getBytes()); + scan.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), + table.getType().getValue().getBytes()); + if (table.getLastDDLTimestamp() != null) { + scan.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), + Bytes.toBytes(table.getLastDDLTimestamp())); + } + + if (table.isChangeDetectionEnabled()) { + if (table.getExternalSchemaId() != null) { + scan.setAttribute(MutationState.MutationMetadataType.EXTERNAL_SCHEMA_ID.toString(), + Bytes.toBytes(table.getExternalSchemaId())); + } + } + } + + /** + * Annotate Mutation with required metadata attributes (tenant id, schema name, logical table + * name, table type, last ddl timestamp) from the client side. + * @param tenantId tenant id. + * @param schemaName schema name. + * @param logicalTableName logical table name. + * @param tableType table type. + * @param timestamp last ddl timestamp. + * @param mutation mutation object to attach attributes. + */ + public static void annotateMutationWithMetadataAttributes(byte[] tenantId, byte[] schemaName, + byte[] logicalTableName, byte[] tableType, byte[] timestamp, Mutation mutation) { + if (tenantId != null) { + mutation.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), tenantId); + } + mutation.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), schemaName); + mutation.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), + logicalTableName); + mutation.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), tableType); + if (timestamp != null) { + mutation.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), timestamp); + } + } + + /** + * Annotate Scan with required metadata attributes (tenant id, schema name, logical table name, + * table type, last ddl timestamp), from old scan object to new scan object. + * @param oldScan old scan object. + * @param newScan new scan object. + */ + public static void annotateScanWithMetadataAttributes(Scan oldScan, Scan newScan) { + byte[] tenantId = oldScan.getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()); + byte[] schemaName = + oldScan.getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()); + byte[] logicalTableName = + oldScan.getAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()); + byte[] tableType = + oldScan.getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()); + byte[] timestamp = + oldScan.getAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString()); + if (tenantId != null) { + newScan.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), tenantId); + } + if (schemaName != null) { + newScan.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), schemaName); + } + if (logicalTableName != null) { + newScan.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), + logicalTableName); + } + if (tableType != null) { + newScan.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), tableType); + } + if (timestamp != null) { + newScan.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), timestamp); + } + } + + /** + * Annotate Mutation with required metadata attributes (tenant id, schema name, logical table + * name, table type, last ddl timestamp), derived from the given PTable object. + * @param table table object to derive metadata attributes from. + * @param mutation mutation object. + */ + public static void annotateMutationWithMetadataAttributes(PTable table, Mutation mutation) { + if (table.getTenantId() != null) { + mutation.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), + table.getTenantId().getBytes()); + } + mutation.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), + table.getSchemaName().getBytes()); + mutation.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), + table.getTableName().getBytes()); + mutation.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), + table.getType().getValue().getBytes()); + if (table.getLastDDLTimestamp() != null) { + mutation.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), + Bytes.toBytes(table.getLastDDLTimestamp())); + } + } + + public static PageFilter removePageFilterFromFilterList(FilterList filterList) { + Iterator filterIterator = filterList.getFilters().iterator(); + while (filterIterator.hasNext()) { + Filter filter = filterIterator.next(); + if (filter instanceof PageFilter) { + filterIterator.remove(); + return (PageFilter) filter; + } else if (filter instanceof FilterList) { + PageFilter pageFilter = removePageFilterFromFilterList((FilterList) filter); + if (pageFilter != null) { + return pageFilter; + } + } + } + return null; + } + + /** + * Determine if the client is incompatible and therefore will not be able to parse the valid + * rowkey that server returns. + * @param scan Scan object. + * @return true if the client is incompatible and therefore will not be able to parse the valid + * rowkey that server returns. + */ + public static boolean isIncompatibleClientForServerReturnValidRowKey(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_SERVER_RETURN_VALID_ROW_KEY) + == null; + } + + // This method assumes that there is at most one instance of PageFilter in a scan + public static PageFilter removePageFilter(Scan scan) { + Filter filter = scan.getFilter(); + if (filter != null) { + PagingFilter pagingFilter = null; + if (filter instanceof PagingFilter) { + pagingFilter = (PagingFilter) filter; + filter = pagingFilter.getDelegateFilter(); + if (filter == null) { + return null; } - if (startRow != null - && Bytes.compareTo(startRow, HConstants.EMPTY_END_ROW) != 0) { - if (scan.includeStartRow()) { - if (Bytes.compareTo(startRow, ptr.get()) < 0) { - throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_STOP_KEY); - } - } else { - if (Bytes.compareTo(startRow, ptr.get()) <= 0) { - throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_STOP_KEY); - } - } + } + if (filter instanceof PageFilter) { + if (pagingFilter != null) { + pagingFilter.setDelegateFilter(null); + scan.setFilter(pagingFilter); + } else { + scan.setFilter(null); + } + return (PageFilter) filter; + } else if (filter instanceof FilterList) { + return removePageFilterFromFilterList((FilterList) filter); + } + } + return null; + } + + public static SkipScanFilter removeSkipScanFilterFromFilterList(FilterList filterList) { + Iterator filterIterator = filterList.getFilters().iterator(); + while (filterIterator.hasNext()) { + Filter filter = filterIterator.next(); + if (filter instanceof SkipScanFilter && ((SkipScanFilter) filter).isMultiKeyPointLookup()) { + filterIterator.remove(); + return (SkipScanFilter) filter; + } else if (filter instanceof FilterList) { + SkipScanFilter skipScanFilter = removeSkipScanFilterFromFilterList((FilterList) filter); + if (skipScanFilter != null) { + return skipScanFilter; + } + } + } + return null; + } + + public static SkipScanFilter removeSkipScanFilter(Scan scan) { + Filter filter = scan.getFilter(); + if (filter != null) { + PagingFilter pagingFilter = null; + if (filter instanceof PagingFilter) { + pagingFilter = (PagingFilter) filter; + filter = pagingFilter.getDelegateFilter(); + if (filter == null) { + return null; } - } + } + if (filter instanceof SkipScanFilter && ((SkipScanFilter) filter).isMultiKeyPointLookup()) { + if (pagingFilter != null) { + pagingFilter.setDelegateFilter(null); + scan.setFilter(pagingFilter); + } else { + scan.setFilter(null); + } + return (SkipScanFilter) filter; + } else if (filter instanceof FilterList) { + return removeSkipScanFilterFromFilterList((FilterList) filter); + } + } + return null; + } + + public static void setScanAttributeForMaxLookbackAge(Scan scan, Long maxLookbackAge) { + Preconditions.checkNotNull(scan); + if (maxLookbackAge != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE, + Bytes.toBytes(maxLookbackAge)); + } + } + + public static Long getMaxLookbackAgeFromScanAttribute(Scan scan) { + Preconditions.checkNotNull(scan); + byte[] maxLookbackAge = scan.getAttribute(BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE); + return maxLookbackAge != null ? Bytes.toLong(maxLookbackAge) : null; + } + + /** + * Verify whether the given row key is in the scan boundaries i.e. scan start and end keys. + * @param ptr row key. + * @param scan scan object used to retrieve the result set. + * @throws ResultSetOutOfScanRangeException if the row key is out of scan range. + */ + public static void verifyKeyInScanRange(ImmutableBytesWritable ptr, Scan scan) + throws ResultSetOutOfScanRangeException { + try { + if (scan.isReversed()) { + verifyScanRangesForReverseScan(ptr, scan, scan.getStartRow(), scan.getStopRow()); + } else { + verifyScanRanges(ptr, scan, scan.getStartRow(), scan.getStopRow()); + } + } catch (ResultSetOutOfScanRangeException e) { + if (isLocalIndex(scan)) { + verifyScanRanges(ptr, scan, scan.getAttribute(SCAN_START_ROW_SUFFIX), + scan.getAttribute(SCAN_STOP_ROW_SUFFIX)); + return; + } + if (scan.getAttribute(SCAN_ACTUAL_START_ROW) == null) { + throw e; + } + verifyScanRanges(ptr, scan, scan.getAttribute(SCAN_ACTUAL_START_ROW), scan.getStopRow()); + } + } + + private static void verifyScanRanges(ImmutableBytesWritable ptr, Scan scan, byte[] startRow, + byte[] stopRow) throws ResultSetOutOfScanRangeException { + if (startRow != null && Bytes.compareTo(startRow, HConstants.EMPTY_START_ROW) != 0) { + if (scan.includeStartRow()) { + if (Bytes.compareTo(startRow, ptr.get()) > 0) { + throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_START_KEY); + } + } else { + if (Bytes.compareTo(startRow, ptr.get()) >= 0) { + throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_START_KEY); + } + } + } + if (stopRow != null && Bytes.compareTo(stopRow, HConstants.EMPTY_END_ROW) != 0) { + if (scan.includeStopRow()) { + if (Bytes.compareTo(stopRow, ptr.get()) < 0) { + throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_STOP_KEY); + } + } else { + if (Bytes.compareTo(stopRow, ptr.get()) <= 0) { + throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_STOP_KEY); + } + } + } + } + + private static void verifyScanRangesForReverseScan(ImmutableBytesWritable ptr, Scan scan, + byte[] startRow, byte[] stopRow) throws ResultSetOutOfScanRangeException { + if (stopRow != null && Bytes.compareTo(stopRow, HConstants.EMPTY_START_ROW) != 0) { + if (scan.includeStopRow()) { + if (Bytes.compareTo(stopRow, ptr.get()) > 0) { + throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_START_KEY); + } + } else { + if (Bytes.compareTo(stopRow, ptr.get()) >= 0) { + throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_START_KEY); + } + } + } + if (startRow != null && Bytes.compareTo(startRow, HConstants.EMPTY_END_ROW) != 0) { + if (scan.includeStartRow()) { + if (Bytes.compareTo(startRow, ptr.get()) < 0) { + throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_STOP_KEY); + } + } else { + if (Bytes.compareTo(startRow, ptr.get()) <= 0) { + throw new ResultSetOutOfScanRangeException(RESULT_IS_OUT_OF_SCAN_STOP_KEY); + } + } + } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SchemaUtil.java index 61b22d594ac..118d794a9ee 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SchemaUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SchemaUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,6 @@ */ package org.apache.phoenix.util; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull; -import static org.apache.phoenix.thirdparty.com.google.common.base.Strings.isNullOrEmpty; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; @@ -28,6 +25,9 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; import static org.apache.phoenix.query.QueryConstants.SEPARATOR_BYTE; import static org.apache.phoenix.query.QueryConstants.SEPARATOR_BYTE_ARRAY; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.phoenix.thirdparty.com.google.common.base.Strings.isNullOrEmpty; import java.nio.charset.StandardCharsets; import java.sql.Connection; @@ -93,1349 +93,1422 @@ import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarbinaryEncoded; import org.apache.phoenix.schema.types.PVarchar; - import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; /** - * * Static class for various schema-related utilities - * - * * @since 0.1 */ public class SchemaUtil { - private static final int VAR_LENGTH_ESTIMATE = 10; - private static final int VAR_KV_LENGTH_ESTIMATE = 50; - public static final String ESCAPE_CHARACTER = "\""; - public static final DataBlockEncoding DEFAULT_DATA_BLOCK_ENCODING = DataBlockEncoding.FAST_DIFF; - - public static final PDatum VAR_BINARY_DATUM = new PDatum() { - - @Override - public boolean isNullable() { - return false; - } - - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; - } - - @Override - public Integer getMaxLength() { - return null; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - }; - public static final RowKeySchema VAR_BINARY_SCHEMA = new RowKeySchemaBuilder(1).addField(VAR_BINARY_DATUM, false, SortOrder.getDefault()).build(); - // See PHOENIX-4424 - public static final String SCHEMA_FOR_DEFAULT_NAMESPACE = "default"; - public static final String HBASE_NAMESPACE = "hbase"; - public static final List NOT_ALLOWED_SCHEMA_LIST = Collections.unmodifiableList( - Arrays.asList(SCHEMA_FOR_DEFAULT_NAMESPACE, HBASE_NAMESPACE)); - - /** - * May not be instantiated - */ - private SchemaUtil() { - } - - public static boolean isPKColumn(PColumn column) { - return column.getFamilyName() == null; - } - - /** - * Imperfect estimate of row size given a PTable - * TODO: keep row count in stats table and use total size / row count instead - * @param table - * @return estimate of size in bytes of a row - */ - public static long estimateRowSize(PTable table) { - int keyLength = estimateKeyLength(table); - long rowSize = 0; - for (PColumn column : table.getColumns()) { - if (!SchemaUtil.isPKColumn(column)) { - PDataType type = column.getDataType(); - Integer maxLength = column.getMaxLength(); - int valueLength = !type.isFixedWidth() ? VAR_KV_LENGTH_ESTIMATE : maxLength == null ? type.getByteSize() : maxLength; - rowSize += KeyValue.getKeyValueDataStructureSize(keyLength, column.getFamilyName().getBytes().length, column.getName().getBytes().length, valueLength); - } - } - byte[] emptyKeyValueKV = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); - // Empty key value - rowSize += KeyValue.getKeyValueDataStructureSize(keyLength, getEmptyColumnFamily(table).length, emptyKeyValueKV.length, 0); - return rowSize; - } - - /** - * Estimate the max key length in bytes of the PK for a given table - * @param table the table - * @return the max PK length - */ - public static int estimateKeyLength(PTable table) { - int maxKeyLength = 0; - // Calculate the max length of a key (each part must currently be of a fixed width) - int i = 0; - List columns = table.getPKColumns(); - while (i < columns.size()) { - PColumn keyColumn = columns.get(i++); - PDataType type = keyColumn.getDataType(); - Integer maxLength = keyColumn.getMaxLength(); - maxKeyLength += !type.isFixedWidth() ? VAR_LENGTH_ESTIMATE : maxLength == null ? type.getByteSize() : maxLength; - } - return maxKeyLength; - } - - /** - * Normalize an identifier. If name is surrounded by double quotes, - * the double quotes are stripped and the rest is used as-is, - * otherwise the name is upper caased. - * @param name the parsed identifier - * @return the normalized identifier - */ - public static String normalizeIdentifier(String name) { - if (name == null) { - return name; - } - if (isCaseSensitive(name)) { - // Don't upper case if in quotes - return name.substring(1, name.length()-1); - } - return name.toUpperCase(); - } - - /** - * Normalize a Literal. If literal is surrounded by single quotes, - * the quotes are trimmed, else full string is returned - * @param literal the parsed LiteralParseNode - * @return the normalized literal string - */ - public static String normalizeLiteral(LiteralParseNode literal) { - if (literal == null) { - return null; - } - String literalString = literal.toString(); - if (isEnclosedInSingleQuotes(literalString)) { - // Trim the single quotes - return literalString.substring(1, literalString.length()-1); - } - return literalString; - } - - /** - * Normalizes the fulltableName . Uses {@linkplain #normalizeIdentifier} - * @param fullTableName - * @return - */ - public static String normalizeFullTableName(String fullTableName) { - String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName); - String tableName = SchemaUtil.getTableNameFromFullName(fullTableName); - String normalizedTableName = StringUtil.EMPTY_STRING; - if (!schemaName.isEmpty()) { - normalizedTableName = normalizeIdentifier(schemaName) + QueryConstants.NAME_SEPARATOR; - } - return normalizedTableName + normalizeIdentifier(tableName); - } - - public static boolean isEnclosedInSingleQuotes(String name) { - return name!=null && name.length() > 0 && name.charAt(0)=='\''; - } - - public static boolean isCaseSensitive(String name) { - return name!=null && name.length() > 0 && name.charAt(0)=='"'; - } - - private static boolean isExistingTableMappedToPhoenixName(String name) { - return name != null && name.length() > 0 && name.charAt(0) == '"' && name.indexOf("\"", 1) == name.length() - 1; - } - - public static Set concat(Set l1, Set l2) { - int size1 = l1.size(); - if (size1 == 0) { - return l2; - } - int size2 = l2.size(); - if (size2 == 0) { - return l1; - } - Set l3 = new LinkedHashSet(size1 + size2); - l3.addAll(l1); - l3.addAll(l2); - return l3; - } - - public static byte[] getTableKey(PTable dataTable) { - PName tenantId = dataTable.getTenantId(); - PName schemaName = dataTable.getSchemaName(); - return getTableKey(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(), schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName.getBytes(), dataTable.getTableName().getBytes()); - } - - /** - * Get the key used in the Phoenix metadata row for a table definition - * @param schemaName - * @param tableName - */ - public static byte[] getTableKey(byte[] tenantId, byte[] schemaName, byte[] tableName) { - return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId, SEPARATOR_BYTE_ARRAY, - schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName, SEPARATOR_BYTE_ARRAY, tableName); - } - - /** - * Get the key used in the Phoenix function data row for a function definition - * @param tenantId - * @param functionName - */ - public static byte[] getFunctionKey(byte[] tenantId, byte[] functionName) { - return ByteUtil.concat(tenantId, SEPARATOR_BYTE_ARRAY, functionName); - } - - public static byte[] getKeyForSchema(String tenantId, String schemaName) { - return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), - SEPARATOR_BYTE_ARRAY, - schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName)); - } - - public static byte[] getTableKey(String tenantId, String schemaName, String tableName) { - return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), SEPARATOR_BYTE_ARRAY, schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), SEPARATOR_BYTE_ARRAY, Bytes.toBytes(tableName)); - } - - public static byte[] getColumnKey(String tenantId, String schemaName, String tableName, String columnName, String familyName) { - Preconditions.checkNotNull(columnName,"Column name cannot be null"); - if (familyName == null) { - return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), - SEPARATOR_BYTE_ARRAY, schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), - SEPARATOR_BYTE_ARRAY, Bytes.toBytes(tableName), - SEPARATOR_BYTE_ARRAY, Bytes.toBytes(columnName)); - } - return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), - SEPARATOR_BYTE_ARRAY, schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), - SEPARATOR_BYTE_ARRAY, Bytes.toBytes(tableName), - SEPARATOR_BYTE_ARRAY, Bytes.toBytes(columnName), - SEPARATOR_BYTE_ARRAY, Bytes.toBytes(familyName)); - } - - public static PName getTableName(PName schemaName, PName tableName) { - return PNameFactory.newName(getName(schemaName==null? null : schemaName.getString(), tableName.getString(), false)); - } - - public static String getTableName(String schemaName, String tableName) { - return getName(schemaName,tableName, false); - } - - private static String getName(String optionalQualifier, String name, boolean caseSensitive) { - String cq = caseSensitive ? "\"" + name + "\"" : name; - if (optionalQualifier == null || optionalQualifier.isEmpty()) { - return cq; - } - String cf = caseSensitive ? "\"" + optionalQualifier + "\"" : optionalQualifier; - return cf + QueryConstants.NAME_SEPARATOR + cq; - } - - private static String getName(String name, boolean caseSensitive) { - String cq = caseSensitive ? "\"" + name + "\"" : name; - return cq; - } - - public static String getTableName(byte[] schemaName, byte[] tableName) { - return getName(schemaName, tableName); - } - - public static String getColumnDisplayName(byte[] cf, byte[] cq) { - return getName(cf == null || cf.length == 0 ? ByteUtil.EMPTY_BYTE_ARRAY : cf, cq); - } - - public static String getColumnDisplayName(String cf, String cq) { - return getName(cf == null || cf.isEmpty() ? null : cf, cq, false); - } - - public static String getColumnDisplayName(PColumn column) { - PName columnName = column.getFamilyName(); - String cf = columnName == null ? null : columnName.getString(); - return getName(cf == null || cf.isEmpty() ? null : cf, column.getName().getString(), false); - } - - public static String getCaseSensitiveColumnDisplayName(String cf, String cq) { - return getName(cf == null || cf.isEmpty() ? null : cf, cq, true); - } - - public static String getMetaDataEntityName(String schemaName, String tableName, String familyName, String columnName) { - if ((schemaName == null || schemaName.isEmpty()) && (tableName == null || tableName.isEmpty())) { - if (columnName == null || columnName.isEmpty()) { - return familyName; - } - return getName(familyName, columnName, false); - } - if ((familyName == null || familyName.isEmpty()) && (columnName == null || columnName.isEmpty()) - && (tableName == null || tableName.equals(MetaDataClient.EMPTY_TABLE))) { return getName(schemaName, - false); } - if ((familyName == null || familyName.isEmpty()) && (columnName == null || columnName.isEmpty())) { - return getName(schemaName, tableName, false); - } - - return getName(getName(schemaName, tableName, false), getName(familyName, columnName, false), false); - } - - public static String getColumnName(String familyName, String columnName) { - return getName(familyName, columnName, false); - } - - public static List getColumnNames(List pCols) { - return Lists.transform(pCols, new Function() { - @Override - public String apply(PColumn input) { - return input.getName().getString(); - } - }); - } - - public static byte[] getTableNameAsBytes(String schemaName, String tableName) { - if (schemaName == null || schemaName.length() == 0) { - return StringUtil.toBytes(tableName); - } - return getTableNameAsBytes(StringUtil.toBytes(schemaName),StringUtil.toBytes(tableName)); - } - - public static byte[] getTableNameAsBytes(byte[] schemaName, byte[] tableName) { - return getNameAsBytes(schemaName, tableName); - } - - private static byte[] getNameAsBytes(byte[] nameOne, byte[] nameTwo) { - if (nameOne == null || nameOne.length == 0) { - return nameTwo; - } else if ((nameTwo == null || nameTwo.length == 0)) { - return nameOne; - } else { - return ByteUtil.concat(nameOne, QueryConstants.NAME_SEPARATOR_BYTES, nameTwo); - } - } - - public static String getName(byte[] nameOne, byte[] nameTwo) { - return Bytes.toString(getNameAsBytes(nameOne,nameTwo)); - } - - public static int getVarCharLength(byte[] buf, int keyOffset, int maxLength) { - return getVarCharLength(buf, keyOffset, maxLength, 1); - } - - public static int getVarCharLength(byte[] buf, int keyOffset, int maxLength, int skipCount) { - int length = 0; - for (int i=0; i 0; i++) { - int length = getVarCharLength(rowKey, offset, keyLength); - byte[] b = new byte[length]; - System.arraycopy(rowKey, offset, b, 0, length); - offset += length + 1; - keyLength -= length + 1; - colMetaData[i] = b; - } - return i; - } - - public static String findExistingColumn(PTable table, List columns) { - for (PColumn column : columns) { - PName familyName = column.getFamilyName(); - if (familyName == null) { - try { - return table.getPKColumn(column.getName().getString()).getName().getString(); - } catch (ColumnNotFoundException e) { - continue; - } - } else { - try { - return table.getColumnFamily(familyName.getString()).getPColumnForColumnName(column.getName().getString()).getName().getString(); - } catch (ColumnFamilyNotFoundException e) { - continue; // Shouldn't happen - } catch (ColumnNotFoundException e) { - continue; - } - } - } - return null; - } - - public static String toString(byte[][] values) { - if (values == null) { - return "null"; - } - StringBuilder buf = new StringBuilder("["); - for (byte[] value : values) { - buf.append(Bytes.toStringBinary(value)); - buf.append(','); - } - buf.setCharAt(buf.length()-1, ']'); - return buf.toString(); - } - - public static String toString(PDataType type, byte[] value) { - return toString(type, value, 0, value.length); - } - - public static String toString(PDataType type, ImmutableBytesWritable value) { - return toString(type, value.get(), value.getOffset(), value.getLength()); - } - - public static String toString(PDataType type, byte[] value, int offset, int length) { - boolean isString = type.isCoercibleTo(PVarchar.INSTANCE); - return isString ? ("'" + type.toObject(value).toString() + "'") : type.toObject(value, offset, length).toString(); - } - - public static byte[] getEmptyColumnFamily(PName defaultColumnFamily, List families, boolean isLocalIndex) { - return families.isEmpty() ? defaultColumnFamily == null ? (isLocalIndex ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES : QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES) : defaultColumnFamily.getBytes() : families.get(0).getName().getBytes(); - } - - public static byte[] getEmptyColumnFamily(PTable table) { - List families = table.getColumnFamilies(); - return families.isEmpty() ? table.getDefaultFamilyName() == null ? (table.getIndexType() == IndexType.LOCAL ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES : QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES) : table.getDefaultFamilyName().getBytes() : families.get(0).getName().getBytes(); - } - - public static byte[] getEmptyColumnQualifier(PTable table) { - return table.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS ? - QueryConstants.EMPTY_COLUMN_BYTES : - table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); - } - - public static String getEmptyColumnFamilyAsString(PTable table) { - List families = table.getColumnFamilies(); - return families.isEmpty() ? table.getDefaultFamilyName() == null ? (table.getIndexType() == IndexType.LOCAL ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY : QueryConstants.DEFAULT_COLUMN_FAMILY) : table.getDefaultFamilyName().getString() : families.get(0).getName().getString(); - } - - public static ImmutableBytesPtr getEmptyColumnFamilyPtr(PTable table) { - List families = table.getColumnFamilies(); - return families.isEmpty() ? table.getDefaultFamilyName() == null ? (table.getIndexType() == IndexType.LOCAL ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES_PTR : QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES_PTR) : table.getDefaultFamilyName().getBytesPtr() : families.get(0) - .getName().getBytesPtr(); - } - - public static boolean isMetaTable(byte[] tableName) { - return Bytes.compareTo(tableName, SYSTEM_CATALOG_NAME_BYTES) == 0 || Bytes.compareTo(tableName, - SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, true).getName()) == 0; - } - - public static boolean isFunctionTable(byte[] tableName) { - return Bytes.compareTo(tableName, SYSTEM_FUNCTION_NAME_BYTES) == 0 || Bytes.compareTo(tableName, - SchemaUtil.getPhysicalTableName(SYSTEM_FUNCTION_NAME_BYTES, true).getName()) == 0; - } - - public static boolean isStatsTable(byte[] tableName) { - return Bytes.compareTo(tableName, SYSTEM_STATS_NAME_BYTES) == 0 || Bytes.compareTo(tableName, - SchemaUtil.getPhysicalTableName(SYSTEM_STATS_NAME_BYTES, true).getName()) == 0; - } - - public static boolean isSequenceTable(byte[] tableName) { - return Bytes.compareTo(tableName, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES) == 0 - || Bytes.compareTo(tableName, SchemaUtil - .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, true).getName()) == 0; - } - - public static boolean isTaskTable(byte[] tableName) { - return Bytes.compareTo(tableName, PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES) == 0 - || Bytes.compareTo(tableName, SchemaUtil - .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, true).getName()) == 0; - } - - public static boolean isChildLinkTable(byte[] tableName) { - return Bytes.compareTo(tableName, SYSTEM_CHILD_LINK_NAME_BYTES) == 0 || Bytes.compareTo(tableName, - SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, true).getName()) == 0; - } - - public static boolean isSequenceTable(PTable table) { - return PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME.equals(table.getName().getString()); - } - - public static boolean isTaskTable(PTable table) { - return PhoenixDatabaseMetaData.SYSTEM_TASK_NAME.equals(table.getName().getString()); - } - - public static boolean isMetaTable(PTable table) { - return PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(table.getSchemaName().getString()) && PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE.equals(table.getTableName().getString()); - } - - public static boolean isMetaTable(byte[] schemaName, byte[] tableName) { - return Bytes.compareTo(schemaName, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES) == 0 && Bytes.compareTo(tableName, PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES) == 0; - } - - public static boolean isMetaTable(String schemaName, String tableName) { - return PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(schemaName) && PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE.equals(tableName); - } - - public static boolean isSystemTable(byte[] fullTableName) { - String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName); - if (QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) return true; - return false; - } - - // Given the splits and the rowKeySchema, find out the keys that - public static byte[][] processSplits(byte[][] splits, Collection pkColumns, - Integer saltBucketNum, boolean defaultRowKeyOrder) throws SQLException { - // FIXME: shouldn't this return if splits.length == 0? - if (splits == null) return null; - // We do not accept user specified splits if the table is salted and we specify defaultRowKeyOrder. In this case, - // throw an exception. - if (splits.length > 0 && saltBucketNum != null && defaultRowKeyOrder) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_SPLITS_ON_SALTED_TABLE).build().buildException(); - } - // If the splits are not specified and table is salted, pre-split the table. - if (splits.length == 0 && saltBucketNum != null) { - splits = SaltingUtil.getSalteByteSplitPoints(saltBucketNum); - } - byte[][] newSplits = new byte[splits.length][]; - for (int i=0; i pkColumns) { - int pos = 0, offset = 0, maxOffset = split.length; - Iterator iterator = pkColumns.iterator(); - while (pos < pkColumns.size()) { - PColumn column = iterator.next(); - if (column.getDataType().isFixedWidth()) { // Fixed width - int length = SchemaUtil.getFixedByteSize(column); - if (maxOffset - offset < length) { - // The split truncates the field. Fill in the rest of the part and any fields that - // are missing after this field. - int fillInLength = length - (maxOffset - offset); - fillInLength += estimatePartLength(pos + 1, iterator); - return ByteUtil.fillKey(split, split.length + fillInLength); - } - // Account for this field, move to next position; - offset += length; - pos++; - } else { // Variable length - // If we are the last slot, then we are done. Nothing needs to be filled in. - if (pos == pkColumns.size() - 1) { - break; - } - while (offset < maxOffset && split[offset] != SEPARATOR_BYTE) { - offset++; - } - if (offset == maxOffset) { - // The var-length field does not end with a separator and it's not the last field. - int fillInLength = 1; // SEPARATOR byte for the current var-length slot. - fillInLength += estimatePartLength(pos + 1, iterator); - return ByteUtil.fillKey(split, split.length + fillInLength); - } - // Move to the next position; - offset += 1; // skip separator; - pos++; - } - } - return split; - } - - // Estimate the key length after pos slot for schema. - private static int estimatePartLength(int pos, Iterator iterator) { - int length = 0; - while (iterator.hasNext()) { - PColumn column = iterator.next(); - if (column.getDataType().isFixedWidth()) { - length += SchemaUtil.getFixedByteSize(column); - } else { - length += 1; // SEPARATOR byte. - } - } - return length; - } - - public static String getEscapedTableName(String schemaName, String tableName) { - if (schemaName == null || schemaName.length() == 0) { - return "\"" + tableName + "\""; - } - return "\"" + schemaName + "\"" + QueryConstants.NAME_SEPARATOR + "\"" + tableName + "\""; - } - - protected static PhoenixConnection addMetaDataColumn(PhoenixConnection conn, long scn, String columnDef) throws SQLException { - PhoenixConnection metaConnection = null; - Statement stmt = null; + private static final int VAR_LENGTH_ESTIMATE = 10; + private static final int VAR_KV_LENGTH_ESTIMATE = 50; + public static final String ESCAPE_CHARACTER = "\""; + public static final DataBlockEncoding DEFAULT_DATA_BLOCK_ENCODING = DataBlockEncoding.FAST_DIFF; + + public static final PDatum VAR_BINARY_DATUM = new PDatum() { + + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + + }; + public static final RowKeySchema VAR_BINARY_SCHEMA = + new RowKeySchemaBuilder(1).addField(VAR_BINARY_DATUM, false, SortOrder.getDefault()).build(); + // See PHOENIX-4424 + public static final String SCHEMA_FOR_DEFAULT_NAMESPACE = "default"; + public static final String HBASE_NAMESPACE = "hbase"; + public static final List NOT_ALLOWED_SCHEMA_LIST = + Collections.unmodifiableList(Arrays.asList(SCHEMA_FOR_DEFAULT_NAMESPACE, HBASE_NAMESPACE)); + + /** + * May not be instantiated + */ + private SchemaUtil() { + } + + public static boolean isPKColumn(PColumn column) { + return column.getFamilyName() == null; + } + + /** + * Imperfect estimate of row size given a PTable TODO: keep row count in stats table and use total + * size / row count instead + * @return estimate of size in bytes of a row + */ + public static long estimateRowSize(PTable table) { + int keyLength = estimateKeyLength(table); + long rowSize = 0; + for (PColumn column : table.getColumns()) { + if (!SchemaUtil.isPKColumn(column)) { + PDataType type = column.getDataType(); + Integer maxLength = column.getMaxLength(); + int valueLength = !type.isFixedWidth() ? VAR_KV_LENGTH_ESTIMATE + : maxLength == null ? type.getByteSize() + : maxLength; + rowSize += + KeyValue.getKeyValueDataStructureSize(keyLength, column.getFamilyName().getBytes().length, + column.getName().getBytes().length, valueLength); + } + } + byte[] emptyKeyValueKV = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); + // Empty key value + rowSize += KeyValue.getKeyValueDataStructureSize(keyLength, getEmptyColumnFamily(table).length, + emptyKeyValueKV.length, 0); + return rowSize; + } + + /** + * Estimate the max key length in bytes of the PK for a given table + * @param table the table + * @return the max PK length + */ + public static int estimateKeyLength(PTable table) { + int maxKeyLength = 0; + // Calculate the max length of a key (each part must currently be of a fixed width) + int i = 0; + List columns = table.getPKColumns(); + while (i < columns.size()) { + PColumn keyColumn = columns.get(i++); + PDataType type = keyColumn.getDataType(); + Integer maxLength = keyColumn.getMaxLength(); + maxKeyLength += !type.isFixedWidth() ? VAR_LENGTH_ESTIMATE + : maxLength == null ? type.getByteSize() + : maxLength; + } + return maxKeyLength; + } + + /** + * Normalize an identifier. If name is surrounded by double quotes, the double quotes are stripped + * and the rest is used as-is, otherwise the name is upper caased. + * @param name the parsed identifier + * @return the normalized identifier + */ + public static String normalizeIdentifier(String name) { + if (name == null) { + return name; + } + if (isCaseSensitive(name)) { + // Don't upper case if in quotes + return name.substring(1, name.length() - 1); + } + return name.toUpperCase(); + } + + /** + * Normalize a Literal. If literal is surrounded by single quotes, the quotes are trimmed, else + * full string is returned + * @param literal the parsed LiteralParseNode + * @return the normalized literal string + */ + public static String normalizeLiteral(LiteralParseNode literal) { + if (literal == null) { + return null; + } + String literalString = literal.toString(); + if (isEnclosedInSingleQuotes(literalString)) { + // Trim the single quotes + return literalString.substring(1, literalString.length() - 1); + } + return literalString; + } + + /** + * Normalizes the fulltableName . Uses {@linkplain #normalizeIdentifier} + */ + public static String normalizeFullTableName(String fullTableName) { + String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName); + String tableName = SchemaUtil.getTableNameFromFullName(fullTableName); + String normalizedTableName = StringUtil.EMPTY_STRING; + if (!schemaName.isEmpty()) { + normalizedTableName = normalizeIdentifier(schemaName) + QueryConstants.NAME_SEPARATOR; + } + return normalizedTableName + normalizeIdentifier(tableName); + } + + public static boolean isEnclosedInSingleQuotes(String name) { + return name != null && name.length() > 0 && name.charAt(0) == '\''; + } + + public static boolean isCaseSensitive(String name) { + return name != null && name.length() > 0 && name.charAt(0) == '"'; + } + + private static boolean isExistingTableMappedToPhoenixName(String name) { + return name != null && name.length() > 0 && name.charAt(0) == '"' + && name.indexOf("\"", 1) == name.length() - 1; + } + + public static Set concat(Set l1, Set l2) { + int size1 = l1.size(); + if (size1 == 0) { + return l2; + } + int size2 = l2.size(); + if (size2 == 0) { + return l1; + } + Set l3 = new LinkedHashSet(size1 + size2); + l3.addAll(l1); + l3.addAll(l2); + return l3; + } + + public static byte[] getTableKey(PTable dataTable) { + PName tenantId = dataTable.getTenantId(); + PName schemaName = dataTable.getSchemaName(); + return getTableKey(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(), + schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName.getBytes(), + dataTable.getTableName().getBytes()); + } + + /** + * Get the key used in the Phoenix metadata row for a table definition + */ + public static byte[] getTableKey(byte[] tenantId, byte[] schemaName, byte[] tableName) { + return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId, + SEPARATOR_BYTE_ARRAY, schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName, + SEPARATOR_BYTE_ARRAY, tableName); + } + + /** + * Get the key used in the Phoenix function data row for a function definition + */ + public static byte[] getFunctionKey(byte[] tenantId, byte[] functionName) { + return ByteUtil.concat(tenantId, SEPARATOR_BYTE_ARRAY, functionName); + } + + public static byte[] getKeyForSchema(String tenantId, String schemaName) { + return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), + SEPARATOR_BYTE_ARRAY, + schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName)); + } + + public static byte[] getTableKey(String tenantId, String schemaName, String tableName) { + return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), + SEPARATOR_BYTE_ARRAY, + schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), + SEPARATOR_BYTE_ARRAY, Bytes.toBytes(tableName)); + } + + public static byte[] getColumnKey(String tenantId, String schemaName, String tableName, + String columnName, String familyName) { + Preconditions.checkNotNull(columnName, "Column name cannot be null"); + if (familyName == null) { + return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), + SEPARATOR_BYTE_ARRAY, + schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), + SEPARATOR_BYTE_ARRAY, Bytes.toBytes(tableName), SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(columnName)); + } + return ByteUtil.concat(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(tenantId), + SEPARATOR_BYTE_ARRAY, + schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), + SEPARATOR_BYTE_ARRAY, Bytes.toBytes(tableName), SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(columnName), SEPARATOR_BYTE_ARRAY, Bytes.toBytes(familyName)); + } + + public static PName getTableName(PName schemaName, PName tableName) { + return PNameFactory.newName( + getName(schemaName == null ? null : schemaName.getString(), tableName.getString(), false)); + } + + public static String getTableName(String schemaName, String tableName) { + return getName(schemaName, tableName, false); + } + + private static String getName(String optionalQualifier, String name, boolean caseSensitive) { + String cq = caseSensitive ? "\"" + name + "\"" : name; + if (optionalQualifier == null || optionalQualifier.isEmpty()) { + return cq; + } + String cf = caseSensitive ? "\"" + optionalQualifier + "\"" : optionalQualifier; + return cf + QueryConstants.NAME_SEPARATOR + cq; + } + + private static String getName(String name, boolean caseSensitive) { + String cq = caseSensitive ? "\"" + name + "\"" : name; + return cq; + } + + public static String getTableName(byte[] schemaName, byte[] tableName) { + return getName(schemaName, tableName); + } + + public static String getColumnDisplayName(byte[] cf, byte[] cq) { + return getName(cf == null || cf.length == 0 ? ByteUtil.EMPTY_BYTE_ARRAY : cf, cq); + } + + public static String getColumnDisplayName(String cf, String cq) { + return getName(cf == null || cf.isEmpty() ? null : cf, cq, false); + } + + public static String getColumnDisplayName(PColumn column) { + PName columnName = column.getFamilyName(); + String cf = columnName == null ? null : columnName.getString(); + return getName(cf == null || cf.isEmpty() ? null : cf, column.getName().getString(), false); + } + + public static String getCaseSensitiveColumnDisplayName(String cf, String cq) { + return getName(cf == null || cf.isEmpty() ? null : cf, cq, true); + } + + public static String getMetaDataEntityName(String schemaName, String tableName, String familyName, + String columnName) { + if ( + (schemaName == null || schemaName.isEmpty()) && (tableName == null || tableName.isEmpty()) + ) { + if (columnName == null || columnName.isEmpty()) { + return familyName; + } + return getName(familyName, columnName, false); + } + if ( + (familyName == null || familyName.isEmpty()) && (columnName == null || columnName.isEmpty()) + && (tableName == null || tableName.equals(MetaDataClient.EMPTY_TABLE)) + ) { + return getName(schemaName, false); + } + if ( + (familyName == null || familyName.isEmpty()) && (columnName == null || columnName.isEmpty()) + ) { + return getName(schemaName, tableName, false); + } + + return getName(getName(schemaName, tableName, false), getName(familyName, columnName, false), + false); + } + + public static String getColumnName(String familyName, String columnName) { + return getName(familyName, columnName, false); + } + + public static List getColumnNames(List pCols) { + return Lists.transform(pCols, new Function() { + @Override + public String apply(PColumn input) { + return input.getName().getString(); + } + }); + } + + public static byte[] getTableNameAsBytes(String schemaName, String tableName) { + if (schemaName == null || schemaName.length() == 0) { + return StringUtil.toBytes(tableName); + } + return getTableNameAsBytes(StringUtil.toBytes(schemaName), StringUtil.toBytes(tableName)); + } + + public static byte[] getTableNameAsBytes(byte[] schemaName, byte[] tableName) { + return getNameAsBytes(schemaName, tableName); + } + + private static byte[] getNameAsBytes(byte[] nameOne, byte[] nameTwo) { + if (nameOne == null || nameOne.length == 0) { + return nameTwo; + } else if ((nameTwo == null || nameTwo.length == 0)) { + return nameOne; + } else { + return ByteUtil.concat(nameOne, QueryConstants.NAME_SEPARATOR_BYTES, nameTwo); + } + } + + public static String getName(byte[] nameOne, byte[] nameTwo) { + return Bytes.toString(getNameAsBytes(nameOne, nameTwo)); + } + + public static int getVarCharLength(byte[] buf, int keyOffset, int maxLength) { + return getVarCharLength(buf, keyOffset, maxLength, 1); + } + + public static int getVarCharLength(byte[] buf, int keyOffset, int maxLength, int skipCount) { + int length = 0; + for (int i = 0; i < skipCount; i++) { + while (length < maxLength && buf[keyOffset + length] != QueryConstants.SEPARATOR_BYTE) { + length++; + } + if (i != skipCount - 1) { // skip over the separator if it's not the last one. + length++; + } + } + return length; + } + + public static int getVarChars(byte[] rowKey, byte[][] rowKeyMetadata) { + return getVarChars(rowKey, 0, rowKey.length, 0, rowKeyMetadata); + } + + public static int getVarChars(byte[] rowKey, int colMetaDataLength, byte[][] colMetaData) { + return getVarChars(rowKey, 0, rowKey.length, 0, colMetaDataLength, colMetaData); + } + + public static int getVarChars(byte[] rowKey, int keyOffset, int keyLength, int colMetaDataOffset, + byte[][] colMetaData) { + return getVarChars(rowKey, keyOffset, keyLength, colMetaDataOffset, colMetaData.length, + colMetaData); + } + + public static int getVarChars(byte[] rowKey, int keyOffset, int keyLength, int colMetaDataOffset, + int colMetaDataLength, byte[][] colMetaData) { + int i, offset = keyOffset; + for (i = colMetaDataOffset; i < colMetaDataLength && keyLength > 0; i++) { + int length = getVarCharLength(rowKey, offset, keyLength); + byte[] b = new byte[length]; + System.arraycopy(rowKey, offset, b, 0, length); + offset += length + 1; + keyLength -= length + 1; + colMetaData[i] = b; + } + return i; + } + + public static String findExistingColumn(PTable table, List columns) { + for (PColumn column : columns) { + PName familyName = column.getFamilyName(); + if (familyName == null) { try { - metaConnection = new PhoenixConnection(conn, scn); - try { - stmt = metaConnection.createStatement(); - stmt.executeUpdate("ALTER TABLE SYSTEM.\"TABLE\" ADD IF NOT EXISTS " + columnDef); - return metaConnection; - } finally { - if (stmt != null) { - stmt.close(); - } - } - } finally { - if (metaConnection != null) { - metaConnection.close(); - } - } - } - - public static boolean columnExists(PTable table, String columnName) { - try { - table.getColumnForColumnName(columnName); - return true; + return table.getPKColumn(column.getName().getString()).getName().getString(); } catch (ColumnNotFoundException e) { - return false; - } catch (AmbiguousColumnException e) { - return true; - } - } - - public static String getSchemaNameFromFullName(String tableName) { - if (isExistingTableMappedToPhoenixName(tableName)) { - return StringUtil.EMPTY_STRING; - } - if (tableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - return getSchemaNameFromFullName(tableName, QueryConstants.NAMESPACE_SEPARATOR); - } - return getSchemaNameFromFullName(tableName, QueryConstants.NAME_SEPARATOR); - } - - public static String getSchemaNameFromFullName(String tableName, String separator) { - int index = tableName.indexOf(separator); - if (index < 0) { - return StringUtil.EMPTY_STRING; - } - return tableName.substring(0, index); - } - - private static int indexOf (byte[] bytes, byte b) { - for (int i = 0; i < bytes.length; i++) { - if (bytes[i] == b) { - return i; - } - } - return -1; - } - - public static String getSchemaNameFromFullName(byte[] tableName) { - if (tableName == null) { - return null; - } - if (isExistingTableMappedToPhoenixName(Bytes.toString(tableName))) { return StringUtil.EMPTY_STRING; } - int index = indexOf(tableName, QueryConstants.NAME_SEPARATOR_BYTE); - if (index < 0) { - index = indexOf(tableName, QueryConstants.NAMESPACE_SEPARATOR_BYTE); - if (index < 0) { return StringUtil.EMPTY_STRING; } - } - return Bytes.toString(tableName, 0, index); - } - - public static String getTableNameFromFullName(byte[] tableName) { - if (tableName == null) { - return null; - } - if (isExistingTableMappedToPhoenixName(Bytes.toString(tableName))) { return Bytes.toString(tableName); } - int index = indexOf(tableName, QueryConstants.NAME_SEPARATOR_BYTE); - if (index < 0) { - index = indexOf(tableName, QueryConstants.NAMESPACE_SEPARATOR_BYTE); - if (index < 0) { return Bytes.toString(tableName); } - } - return Bytes.toString(tableName, index+1, tableName.length - index - 1); - } - - public static String getTableNameFromFullName(String tableName) { - if (isExistingTableMappedToPhoenixName(tableName)) { return tableName; } - if (tableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { return getTableNameFromFullName(tableName, - QueryConstants.NAMESPACE_SEPARATOR); } - return getTableNameFromFullName(tableName, QueryConstants.NAME_SEPARATOR); - } - - public static String getTableNameFromFullName(String tableName, String separator) { - int index = tableName.indexOf(separator); - if (index < 0) { - return tableName; - } - return tableName.substring(index+1, tableName.length()); - } - - public static byte[] getTableKeyFromFullName(String fullTableName) { - int index = fullTableName.indexOf(QueryConstants.NAME_SEPARATOR); - if (index < 0) { - index = fullTableName.indexOf(QueryConstants.NAMESPACE_SEPARATOR); - if (index < 0) { return getTableKey(null, null, fullTableName); } + continue; } - String schemaName = fullTableName.substring(0, index); - String tableName = fullTableName.substring(index+1); - return getTableKey(null, schemaName, tableName); - } - - private static int getTerminatorCount(RowKeySchema schema) { - int nTerminators = 0; - for (int i = 0; i < schema.getFieldCount(); i++) { - Field field = schema.getField(i); - // We won't have a terminator on the last PK column - // unless it is variable length and exclusive, but - // having the extra byte irregardless won't hurt anything - if (!field.getDataType().isFixedWidth()) { - nTerminators++; - } - } - return nTerminators; - } - - public static int getMaxKeyLength(RowKeySchema schema, List> slots) { - int maxKeyLength = getTerminatorCount(schema) * 2; - for (List slot : slots) { - int maxSlotLength = 0; - for (KeyRange range : slot) { - int maxRangeLength = Math.max(range.getLowerRange().length, range.getUpperRange().length); - if (maxSlotLength < maxRangeLength) { - maxSlotLength = maxRangeLength; - } - } - maxKeyLength += maxSlotLength; - } - return maxKeyLength; - } - - public static int getFixedByteSize(PDatum e) { - assert(e.getDataType().isFixedWidth()); - Integer maxLength = e.getMaxLength(); - return maxLength == null ? e.getDataType().getByteSize() : maxLength; - } - - public static short getMaxKeySeq(PTable table) { - int offset = 0; - if (table.getBucketNum() != null) { - offset++; - } - // TODO: for tenant-specific table on tenant-specific connection, - // we should subtract one for tenant column and another one for - // index ID - return (short)(table.getPKColumns().size() - offset); - } - - public static int getPKPosition(PTable table, PColumn column) { - // TODO: when PColumn has getPKPosition, use that instead - return table.getPKColumns().indexOf(column); - } - - public static String getEscapedFullColumnName(String fullColumnName) { - if (fullColumnName.startsWith(ESCAPE_CHARACTER)) { - return fullColumnName; - } - int index = fullColumnName.indexOf(QueryConstants.NAME_SEPARATOR); - if (index < 0) { - return getEscapedArgument(fullColumnName); - } - String columnFamily = fullColumnName.substring(0,index); - String columnName = fullColumnName.substring(index+1); - return getEscapedArgument(columnFamily) + QueryConstants.NAME_SEPARATOR + getEscapedArgument(columnName) ; - } - - public static List getEscapedFullColumnNames(List fullColumnNames) { - return Lists - .newArrayList(Iterables.transform(fullColumnNames, new Function() { - @Override - public String apply(String col) { - return getEscapedFullColumnName(col); - } - })); - } - - public static String getEscapedFullTableName(String fullTableName) { - final String schemaName = getSchemaNameFromFullName(fullTableName); - final String tableName = getTableNameFromFullName(fullTableName); - return getEscapedTableName(schemaName, tableName); - } - - /** - * Escapes the given argument with {@value #ESCAPE_CHARACTER} - * @param argument any non null value. - * @return - */ - public static String getEscapedArgument(String argument) { - Preconditions.checkNotNull(argument,"Argument passed cannot be null"); - return ESCAPE_CHARACTER + argument + ESCAPE_CHARACTER; - } - - /** - * - * @return a fully qualified column name in the format: "CFNAME"."COLNAME" or "COLNAME" depending on whether or not - * there is a column family name present. - */ - public static String getQuotedFullColumnName(PColumn pCol) { - checkNotNull(pCol); - String columnName = pCol.getName().getString(); - String columnFamilyName = pCol.getFamilyName() != null ? pCol.getFamilyName().getString() : null; - return getQuotedFullColumnName(columnFamilyName, columnName); - } - - /** - * - * @return a fully qualified column name in the format: "CFNAME"."COLNAME" or "COLNAME" depending on whether or not - * there is a column family name present. - */ - public static String getQuotedFullColumnName(@Nullable String columnFamilyName, String columnName) { - checkArgument(!isNullOrEmpty(columnName), "Column name cannot be null or empty"); - return columnFamilyName == null ? ("\"" + columnName + "\"") : ("\"" + columnFamilyName + "\"" + QueryConstants.NAME_SEPARATOR + "\"" + columnName + "\""); - } - - public static boolean hasHTableDescriptorProps(Map tableProps) { - int pTablePropCount = 0; - for (String prop : tableProps.keySet()) { - if (TableProperty.isPhoenixTableProperty(prop) || prop.equals(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME)) { - pTablePropCount++; - } - } - return tableProps.size() - pTablePropCount > 0; - } - - /** - * Replaces all occurrences of {@link #ESCAPE_CHARACTER} with an empty character. - * @param fullColumnName - * @return - */ - public static String getUnEscapedFullColumnName(String fullColumnName) { - checkArgument(!isNullOrEmpty(fullColumnName), "Column name cannot be null or empty"); - fullColumnName = fullColumnName.replaceAll(ESCAPE_CHARACTER, ""); - return fullColumnName.trim(); - } - - /** - * Return the separator byte to use based on: - * @param rowKeyOrderOptimizable whether or not the table may optimize descending row keys. If the - * table has no descending row keys, this will be true. Also, if the table has been upgraded (using - * a new -u option for psql.py), then it'll be true - * @param isNullValue whether or not the value is null. We use a null byte still if the value is null - * regardless of sort order since nulls will always sort first this way. - * @param sortOrder whether the value sorts ascending or descending. - * @return the byte to use as the separator - */ - public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, boolean isNullValue, SortOrder sortOrder) { - return !rowKeyOrderOptimizable || isNullValue || sortOrder == SortOrder.ASC ? SEPARATOR_BYTE : QueryConstants.DESC_SEPARATOR_BYTE; - } - - /** - * Get separator bytes for Variable length encoded data type (e.g. VARBINARY_ENCODED). - * - * @param rowKeyOrderOptimizable Whether the table may optimize descending row keys. - * @param isNullValue Whether the value is null. - * @param sortOrder Whether the value sorts ascending or descending. - * @return The separator byte array. - */ - public static byte[] getSeparatorBytesForVarBinaryEncoded(boolean rowKeyOrderOptimizable, - boolean isNullValue, SortOrder sortOrder) { - return !rowKeyOrderOptimizable || isNullValue || sortOrder == SortOrder.ASC ? - QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES : - QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES; - } - - /** - * Return separator bytes depending on the data type. - * - * @param pDataType Data type used. - * @param rowKeyOrderOptimizable Whether the table may optimize descending row keys. - * @param isNullValue Whether the value is null. - * @param sortOrder Whether the value sorts ascending or descending. - * @return The separator byte array. - */ - public static byte[] getSeparatorBytes(final PDataType pDataType, - final boolean rowKeyOrderOptimizable, - final boolean isNullValue, - final SortOrder sortOrder) { - if (pDataType == PVarbinaryEncoded.INSTANCE) { - return getSeparatorBytesForVarBinaryEncoded(rowKeyOrderOptimizable, isNullValue, - sortOrder); - } else { - return new byte[] {getSeparatorByte(rowKeyOrderOptimizable, isNullValue, sortOrder)}; - } - } - - - public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, boolean isNullValue, Field f) { - return getSeparatorByte(rowKeyOrderOptimizable, isNullValue, f.getSortOrder()); - } - - public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, boolean isNullValue, Expression e) { - return getSeparatorByte(rowKeyOrderOptimizable, isNullValue, e.getSortOrder()); - } - - /** - * Get list of ColumnInfos that contain Column Name and its associated - * PDataType for an import. The supplied list of columns can be null -- if it is non-null, - * it represents a user-supplied list of columns to be imported. - * - * @param conn Phoenix connection from which metadata will be read - * @param tableName Phoenix table name whose columns are to be checked. Can include a schema - * name - * @param columns user-supplied list of import columns, can be null - * @param strict if true, an exception will be thrown if unknown columns are supplied - */ - public static List generateColumnInfo(Connection conn, - String tableName, List columns, boolean strict) - throws SQLException { - Map columnNameToTypeMap = Maps.newLinkedHashMap(); - Set ambiguousColumnNames = new HashSet(); - Map fullColumnNameToTypeMap = Maps.newLinkedHashMap(); - DatabaseMetaData dbmd = conn.getMetaData(); - int unfoundColumnCount = 0; - // TODO: escape wildcard characters here because we don't want that - // behavior here - String escapedTableName = StringUtil.escapeLike(tableName); - String[] schemaAndTable = escapedTableName.split("\\."); - ResultSet rs = null; + } else { try { - rs = dbmd.getColumns(null, (schemaAndTable.length == 1 ? "" - : schemaAndTable[0]), - (schemaAndTable.length == 1 ? escapedTableName - : schemaAndTable[1]), null); - while (rs.next()) { - String colName = rs.getString(QueryUtil.COLUMN_NAME_POSITION); - String colFam = rs.getString(QueryUtil.COLUMN_FAMILY_POSITION); - - // use family qualifier, if available, otherwise, use column name - String fullColumn = (colFam==null?colName:String.format("%s.%s",colFam,colName)); - String sqlTypeName = rs.getString(QueryUtil.DATA_TYPE_NAME_POSITION); - - // allow for both bare and family qualified names. - if (columnNameToTypeMap.keySet().contains(colName)) { - ambiguousColumnNames.add(colName); - } - columnNameToTypeMap.put( - colName, - PDataType.fromSqlTypeName(sqlTypeName).getSqlType()); - fullColumnNameToTypeMap.put( - fullColumn, - PDataType.fromSqlTypeName(sqlTypeName).getSqlType()); - } - if (columnNameToTypeMap.isEmpty()) { - throw new IllegalArgumentException("Table " + tableName + " not found"); - } - } finally { - if (rs != null) { - rs.close(); - } - } - List columnInfoList = Lists.newArrayList(); - Set unresolvedColumnNames = new TreeSet(); - if (columns == null) { - // use family qualified names by default, if no columns are specified. - for (Map.Entry entry : fullColumnNameToTypeMap - .entrySet()) { - columnInfoList.add(new ColumnInfo(entry.getKey(), entry.getValue())); - } - } else { - // Leave "null" as indication to skip b/c it doesn't exist - for (int i = 0; i < columns.size(); i++) { - String columnName = columns.get(i).trim(); - Integer sqlType = null; - if (fullColumnNameToTypeMap.containsKey(columnName)) { - sqlType = fullColumnNameToTypeMap.get(columnName); - } else if (columnNameToTypeMap.containsKey(columnName)) { - if (ambiguousColumnNames.contains(columnName)) { - unresolvedColumnNames.add(columnName); - } - // fall back to bare column name. - sqlType = columnNameToTypeMap.get(columnName); - } - if (unresolvedColumnNames.size()>0) { - StringBuilder exceptionMessage = new StringBuilder(); - boolean first = true; - exceptionMessage.append("Unable to resolve these column names to a single column family:\n"); - for (String col : unresolvedColumnNames) { - if (first) first = false; - else exceptionMessage.append(","); - exceptionMessage.append(col); - } - exceptionMessage.append("\nAvailable columns with column families:\n"); - first = true; - for (String col : fullColumnNameToTypeMap.keySet()) { - if (first) first = false; - else exceptionMessage.append(","); - exceptionMessage.append(col); - } - throw new SQLException(exceptionMessage.toString()); - } - - if (sqlType == null) { - if (strict) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.COLUMN_NOT_FOUND) - .setColumnName(columnName) - .setTableName(tableName).build() - .buildException(); - } - unfoundColumnCount++; - } else { - columnInfoList.add(new ColumnInfo(columnName, sqlType)); - } - } - if (unfoundColumnCount == columns.size()) { - throw new SQLExceptionInfo.Builder( - SQLExceptionCode.COLUMN_NOT_FOUND) - .setColumnName( - Arrays.toString(columns.toArray(new String[0]))) - .setTableName(tableName).build().buildException(); - } - } - return columnInfoList; - } - - public static boolean hasRowTimestampColumn(PTable table) { - return table.getRowTimestampColPos()>0; - } - - public static byte[] getSchemaKey(String schemaName) { - return SchemaUtil.getTableKey(null, schemaName, MetaDataClient.EMPTY_TABLE); - } - - public static PName getPhysicalHBaseTableName(PName schemaName, PName tableName, boolean isNamespaceMapped) { - return getPhysicalHBaseTableName( - schemaName == null ? null : schemaName.toString(), tableName.toString(), isNamespaceMapped); - } - - public static PName getPhysicalHBaseTableName(byte[] schemaName, byte[] tableName, boolean isNamespaceMapped) { - return getPhysicalHBaseTableName(Bytes.toString(schemaName), Bytes.toString(tableName), isNamespaceMapped); - } - - /** - * Note: the following 4 methods (getPhysicalTableName, getPhysicalName) return an unexpected value - * when fullTableName is in default schema and fullTableName contains a dot. For example, - * if fullTableName is in default schema and fullTableName is "AAA.BBB", the expected hbase table - * name is "AAA.BBB" but these methods return "AAA:BBB". - */ - public static TableName getPhysicalTableName(String fullTableName, ReadOnlyProps readOnlyProps) { - return getPhysicalName(Bytes.toBytes(fullTableName), readOnlyProps); - } - - public static TableName getPhysicalTableName(byte[] fullTableName, Configuration conf) { - return getPhysicalTableName(fullTableName, isNamespaceMappingEnabled( - isSystemTable(fullTableName) ? PTableType.SYSTEM : null, conf)); - } - - public static TableName getPhysicalName(byte[] fullTableName, ReadOnlyProps readOnlyProps) { - return getPhysicalTableName(fullTableName, - isNamespaceMappingEnabled(isSystemTable(fullTableName) ? PTableType.SYSTEM : null, readOnlyProps)); - } - - public static TableName getPhysicalTableName(byte[] fullTableName, boolean isNamespaceMappingEnabled) { - if (indexOf(fullTableName, QueryConstants.NAMESPACE_SEPARATOR_BYTE) > 0 - || !isNamespaceMappingEnabled) { return TableName.valueOf(fullTableName); } - String tableName = getTableNameFromFullName(fullTableName); - String schemaName = getSchemaNameFromFullName(fullTableName); - return TableName.valueOf(schemaName, tableName); - } - - public static PName getPhysicalHBaseTableName(byte[] fullTableName, boolean isNamespaceMappingEnabled) { - String tableName = getTableNameFromFullName(fullTableName); - String schemaName = getSchemaNameFromFullName(fullTableName); - return getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMappingEnabled); - } - - public static PName getPhysicalHBaseTableName(String schemaName, String tableName, boolean isNamespaceMapped) { - if (!isNamespaceMapped) { return PNameFactory.newName(getTableNameAsBytes(schemaName, tableName)); } - if (schemaName == null || schemaName.isEmpty()) { return PNameFactory.newName(tableName); } - return PNameFactory.newName(schemaName + QueryConstants.NAMESPACE_SEPARATOR + tableName); - } - - public static String replaceNamespaceSeparator(PName name) { - return name.getString().replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); - } - - public static boolean isSchemaCheckRequired(PTableType tableType, ReadOnlyProps props) { - return (PTableType.TABLE.equals(tableType) || PTableType.VIEW.equals(tableType)) - && isNamespaceMappingEnabled(tableType, props); - } - - public static boolean isNamespaceMappingEnabled(PTableType type, Configuration conf) { - return conf.getBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, - QueryServicesOptions.DEFAULT_IS_NAMESPACE_MAPPING_ENABLED) - && (type == null || !PTableType.SYSTEM.equals(type) - || conf.getBoolean(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, - QueryServicesOptions.DEFAULT_IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE)); - } - - public static boolean isNamespaceMappingEnabled(PTableType type, ReadOnlyProps readOnlyProps) { - return readOnlyProps.getBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, - QueryServicesOptions.DEFAULT_IS_NAMESPACE_MAPPING_ENABLED) - && (type == null || !PTableType.SYSTEM.equals(type) - || readOnlyProps.getBoolean(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, - QueryServicesOptions.DEFAULT_IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE)); - } - - public static byte[] getParentTableNameFromIndexTable(byte[] physicalTableName, String indexPrefix) { - String tableName = Bytes.toString(physicalTableName); - return getParentTableNameFromIndexTable(tableName, indexPrefix) - .getBytes(StandardCharsets.UTF_8); - } - - public static String getParentTableNameFromIndexTable(String physicalTableName, String indexPrefix) { - if (physicalTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - String schemaNameFromFullName = getSchemaNameFromFullName(physicalTableName, - QueryConstants.NAMESPACE_SEPARATOR); - String tableNameFromFullName = getTableNameFromFullName(physicalTableName, - QueryConstants.NAMESPACE_SEPARATOR); - return schemaNameFromFullName + QueryConstants.NAMESPACE_SEPARATOR - + getStrippedName(tableNameFromFullName, indexPrefix); - } - return getStrippedName(physicalTableName, indexPrefix); - } - - private static String getStrippedName(String physicalTableName, String indexPrefix) { - return physicalTableName.indexOf(indexPrefix) == 0 ? physicalTableName.substring(indexPrefix.length()) - : physicalTableName; - } - - /** - * Calculate the HBase HTable name. - * - * @param schemaName import schema name, can be null - * @param tableName import table name - * @return the byte representation of the HTable - */ - public static String getQualifiedTableName(String schemaName, String tableName) { - if (schemaName != null && !schemaName.isEmpty()) { - return String.format("%s.%s", normalizeIdentifier(schemaName), - normalizeIdentifier(tableName)); + return table.getColumnFamily(familyName.getString()) + .getPColumnForColumnName(column.getName().getString()).getName().getString(); + } catch (ColumnFamilyNotFoundException e) { + continue; // Shouldn't happen + } catch (ColumnNotFoundException e) { + continue; + } + } + } + return null; + } + + public static String toString(byte[][] values) { + if (values == null) { + return "null"; + } + StringBuilder buf = new StringBuilder("["); + for (byte[] value : values) { + buf.append(Bytes.toStringBinary(value)); + buf.append(','); + } + buf.setCharAt(buf.length() - 1, ']'); + return buf.toString(); + } + + public static String toString(PDataType type, byte[] value) { + return toString(type, value, 0, value.length); + } + + public static String toString(PDataType type, ImmutableBytesWritable value) { + return toString(type, value.get(), value.getOffset(), value.getLength()); + } + + public static String toString(PDataType type, byte[] value, int offset, int length) { + boolean isString = type.isCoercibleTo(PVarchar.INSTANCE); + return isString + ? ("'" + type.toObject(value).toString() + "'") + : type.toObject(value, offset, length).toString(); + } + + public static byte[] getEmptyColumnFamily(PName defaultColumnFamily, List families, + boolean isLocalIndex) { + return families.isEmpty() + ? defaultColumnFamily == null + ? (isLocalIndex + ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES + : QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES) + : defaultColumnFamily.getBytes() + : families.get(0).getName().getBytes(); + } + + public static byte[] getEmptyColumnFamily(PTable table) { + List families = table.getColumnFamilies(); + return families.isEmpty() + ? table.getDefaultFamilyName() == null + ? (table.getIndexType() == IndexType.LOCAL + ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES + : QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES) + : table.getDefaultFamilyName().getBytes() + : families.get(0).getName().getBytes(); + } + + public static byte[] getEmptyColumnQualifier(PTable table) { + return table.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + ? QueryConstants.EMPTY_COLUMN_BYTES + : table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); + } + + public static String getEmptyColumnFamilyAsString(PTable table) { + List families = table.getColumnFamilies(); + return families.isEmpty() + ? table.getDefaultFamilyName() == null + ? (table.getIndexType() == IndexType.LOCAL + ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY + : QueryConstants.DEFAULT_COLUMN_FAMILY) + : table.getDefaultFamilyName().getString() + : families.get(0).getName().getString(); + } + + public static ImmutableBytesPtr getEmptyColumnFamilyPtr(PTable table) { + List families = table.getColumnFamilies(); + return families.isEmpty() + ? table.getDefaultFamilyName() == null + ? (table.getIndexType() == IndexType.LOCAL + ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES_PTR + : QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES_PTR) + : table.getDefaultFamilyName().getBytesPtr() + : families.get(0).getName().getBytesPtr(); + } + + public static boolean isMetaTable(byte[] tableName) { + return Bytes.compareTo(tableName, SYSTEM_CATALOG_NAME_BYTES) == 0 || Bytes.compareTo(tableName, + SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, true).getName()) == 0; + } + + public static boolean isFunctionTable(byte[] tableName) { + return Bytes.compareTo(tableName, SYSTEM_FUNCTION_NAME_BYTES) == 0 || Bytes.compareTo(tableName, + SchemaUtil.getPhysicalTableName(SYSTEM_FUNCTION_NAME_BYTES, true).getName()) == 0; + } + + public static boolean isStatsTable(byte[] tableName) { + return Bytes.compareTo(tableName, SYSTEM_STATS_NAME_BYTES) == 0 || Bytes.compareTo(tableName, + SchemaUtil.getPhysicalTableName(SYSTEM_STATS_NAME_BYTES, true).getName()) == 0; + } + + public static boolean isSequenceTable(byte[] tableName) { + return Bytes.compareTo(tableName, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES) == 0 + || Bytes + .compareTo(tableName, SchemaUtil + .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, true).getName()) + == 0; + } + + public static boolean isTaskTable(byte[] tableName) { + return Bytes.compareTo(tableName, PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES) == 0 + || Bytes + .compareTo(tableName, SchemaUtil + .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME_BYTES, true).getName()) + == 0; + } + + public static boolean isChildLinkTable(byte[] tableName) { + return Bytes.compareTo(tableName, SYSTEM_CHILD_LINK_NAME_BYTES) == 0 + || Bytes.compareTo(tableName, + SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, true).getName()) == 0; + } + + public static boolean isSequenceTable(PTable table) { + return PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME.equals(table.getName().getString()); + } + + public static boolean isTaskTable(PTable table) { + return PhoenixDatabaseMetaData.SYSTEM_TASK_NAME.equals(table.getName().getString()); + } + + public static boolean isMetaTable(PTable table) { + return PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(table.getSchemaName().getString()) + && PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE.equals(table.getTableName().getString()); + } + + public static boolean isMetaTable(byte[] schemaName, byte[] tableName) { + return Bytes.compareTo(schemaName, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES) == 0 + && Bytes.compareTo(tableName, PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES) == 0; + } + + public static boolean isMetaTable(String schemaName, String tableName) { + return PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(schemaName) + && PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE.equals(tableName); + } + + public static boolean isSystemTable(byte[] fullTableName) { + String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName); + if (QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) return true; + return false; + } + + // Given the splits and the rowKeySchema, find out the keys that + public static byte[][] processSplits(byte[][] splits, Collection pkColumns, + Integer saltBucketNum, boolean defaultRowKeyOrder) throws SQLException { + // FIXME: shouldn't this return if splits.length == 0? + if (splits == null) return null; + // We do not accept user specified splits if the table is salted and we specify + // defaultRowKeyOrder. In this case, + // throw an exception. + if (splits.length > 0 && saltBucketNum != null && defaultRowKeyOrder) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_SPLITS_ON_SALTED_TABLE).build() + .buildException(); + } + // If the splits are not specified and table is salted, pre-split the table. + if (splits.length == 0 && saltBucketNum != null) { + splits = SaltingUtil.getSalteByteSplitPoints(saltBucketNum); + } + byte[][] newSplits = new byte[splits.length][]; + for (int i = 0; i < splits.length; i++) { + // Salted tables don't need this processing, but the Split policy uses it for all + // tables, so it makes sense to apply to those to be consistent + newSplits[i] = processSplit(splits[i], pkColumns); + } + return newSplits; + } + + // Go through each slot in the schema and try match it with the split byte array. If the split + // does not confer to the schema, extends its length to match the schema. + public static byte[] processSplit(byte[] split, Collection pkColumns) { + int pos = 0, offset = 0, maxOffset = split.length; + Iterator iterator = pkColumns.iterator(); + while (pos < pkColumns.size()) { + PColumn column = iterator.next(); + if (column.getDataType().isFixedWidth()) { // Fixed width + int length = SchemaUtil.getFixedByteSize(column); + if (maxOffset - offset < length) { + // The split truncates the field. Fill in the rest of the part and any fields that + // are missing after this field. + int fillInLength = length - (maxOffset - offset); + fillInLength += estimatePartLength(pos + 1, iterator); + return ByteUtil.fillKey(split, split.length + fillInLength); + } + // Account for this field, move to next position; + offset += length; + pos++; + } else { // Variable length + // If we are the last slot, then we are done. Nothing needs to be filled in. + if (pos == pkColumns.size() - 1) { + break; + } + while (offset < maxOffset && split[offset] != SEPARATOR_BYTE) { + offset++; + } + if (offset == maxOffset) { + // The var-length field does not end with a separator and it's not the last field. + int fillInLength = 1; // SEPARATOR byte for the current var-length slot. + fillInLength += estimatePartLength(pos + 1, iterator); + return ByteUtil.fillKey(split, split.length + fillInLength); + } + // Move to the next position; + offset += 1; // skip separator; + pos++; + } + } + return split; + } + + // Estimate the key length after pos slot for schema. + private static int estimatePartLength(int pos, Iterator iterator) { + int length = 0; + while (iterator.hasNext()) { + PColumn column = iterator.next(); + if (column.getDataType().isFixedWidth()) { + length += SchemaUtil.getFixedByteSize(column); + } else { + length += 1; // SEPARATOR byte. + } + } + return length; + } + + public static String getEscapedTableName(String schemaName, String tableName) { + if (schemaName == null || schemaName.length() == 0) { + return "\"" + tableName + "\""; + } + return "\"" + schemaName + "\"" + QueryConstants.NAME_SEPARATOR + "\"" + tableName + "\""; + } + + protected static PhoenixConnection addMetaDataColumn(PhoenixConnection conn, long scn, + String columnDef) throws SQLException { + PhoenixConnection metaConnection = null; + Statement stmt = null; + try { + metaConnection = new PhoenixConnection(conn, scn); + try { + stmt = metaConnection.createStatement(); + stmt.executeUpdate("ALTER TABLE SYSTEM.\"TABLE\" ADD IF NOT EXISTS " + columnDef); + return metaConnection; + } finally { + if (stmt != null) { + stmt.close(); + } + } + } finally { + if (metaConnection != null) { + metaConnection.close(); + } + } + } + + public static boolean columnExists(PTable table, String columnName) { + try { + table.getColumnForColumnName(columnName); + return true; + } catch (ColumnNotFoundException e) { + return false; + } catch (AmbiguousColumnException e) { + return true; + } + } + + public static String getSchemaNameFromFullName(String tableName) { + if (isExistingTableMappedToPhoenixName(tableName)) { + return StringUtil.EMPTY_STRING; + } + if (tableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + return getSchemaNameFromFullName(tableName, QueryConstants.NAMESPACE_SEPARATOR); + } + return getSchemaNameFromFullName(tableName, QueryConstants.NAME_SEPARATOR); + } + + public static String getSchemaNameFromFullName(String tableName, String separator) { + int index = tableName.indexOf(separator); + if (index < 0) { + return StringUtil.EMPTY_STRING; + } + return tableName.substring(0, index); + } + + private static int indexOf(byte[] bytes, byte b) { + for (int i = 0; i < bytes.length; i++) { + if (bytes[i] == b) { + return i; + } + } + return -1; + } + + public static String getSchemaNameFromFullName(byte[] tableName) { + if (tableName == null) { + return null; + } + if (isExistingTableMappedToPhoenixName(Bytes.toString(tableName))) { + return StringUtil.EMPTY_STRING; + } + int index = indexOf(tableName, QueryConstants.NAME_SEPARATOR_BYTE); + if (index < 0) { + index = indexOf(tableName, QueryConstants.NAMESPACE_SEPARATOR_BYTE); + if (index < 0) { + return StringUtil.EMPTY_STRING; + } + } + return Bytes.toString(tableName, 0, index); + } + + public static String getTableNameFromFullName(byte[] tableName) { + if (tableName == null) { + return null; + } + if (isExistingTableMappedToPhoenixName(Bytes.toString(tableName))) { + return Bytes.toString(tableName); + } + int index = indexOf(tableName, QueryConstants.NAME_SEPARATOR_BYTE); + if (index < 0) { + index = indexOf(tableName, QueryConstants.NAMESPACE_SEPARATOR_BYTE); + if (index < 0) { + return Bytes.toString(tableName); + } + } + return Bytes.toString(tableName, index + 1, tableName.length - index - 1); + } + + public static String getTableNameFromFullName(String tableName) { + if (isExistingTableMappedToPhoenixName(tableName)) { + return tableName; + } + if (tableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + return getTableNameFromFullName(tableName, QueryConstants.NAMESPACE_SEPARATOR); + } + return getTableNameFromFullName(tableName, QueryConstants.NAME_SEPARATOR); + } + + public static String getTableNameFromFullName(String tableName, String separator) { + int index = tableName.indexOf(separator); + if (index < 0) { + return tableName; + } + return tableName.substring(index + 1, tableName.length()); + } + + public static byte[] getTableKeyFromFullName(String fullTableName) { + int index = fullTableName.indexOf(QueryConstants.NAME_SEPARATOR); + if (index < 0) { + index = fullTableName.indexOf(QueryConstants.NAMESPACE_SEPARATOR); + if (index < 0) { + return getTableKey(null, null, fullTableName); + } + } + String schemaName = fullTableName.substring(0, index); + String tableName = fullTableName.substring(index + 1); + return getTableKey(null, schemaName, tableName); + } + + private static int getTerminatorCount(RowKeySchema schema) { + int nTerminators = 0; + for (int i = 0; i < schema.getFieldCount(); i++) { + Field field = schema.getField(i); + // We won't have a terminator on the last PK column + // unless it is variable length and exclusive, but + // having the extra byte irregardless won't hurt anything + if (!field.getDataType().isFixedWidth()) { + nTerminators++; + } + } + return nTerminators; + } + + public static int getMaxKeyLength(RowKeySchema schema, List> slots) { + int maxKeyLength = getTerminatorCount(schema) * 2; + for (List slot : slots) { + int maxSlotLength = 0; + for (KeyRange range : slot) { + int maxRangeLength = Math.max(range.getLowerRange().length, range.getUpperRange().length); + if (maxSlotLength < maxRangeLength) { + maxSlotLength = maxRangeLength; + } + } + maxKeyLength += maxSlotLength; + } + return maxKeyLength; + } + + public static int getFixedByteSize(PDatum e) { + assert (e.getDataType().isFixedWidth()); + Integer maxLength = e.getMaxLength(); + return maxLength == null ? e.getDataType().getByteSize() : maxLength; + } + + public static short getMaxKeySeq(PTable table) { + int offset = 0; + if (table.getBucketNum() != null) { + offset++; + } + // TODO: for tenant-specific table on tenant-specific connection, + // we should subtract one for tenant column and another one for + // index ID + return (short) (table.getPKColumns().size() - offset); + } + + public static int getPKPosition(PTable table, PColumn column) { + // TODO: when PColumn has getPKPosition, use that instead + return table.getPKColumns().indexOf(column); + } + + public static String getEscapedFullColumnName(String fullColumnName) { + if (fullColumnName.startsWith(ESCAPE_CHARACTER)) { + return fullColumnName; + } + int index = fullColumnName.indexOf(QueryConstants.NAME_SEPARATOR); + if (index < 0) { + return getEscapedArgument(fullColumnName); + } + String columnFamily = fullColumnName.substring(0, index); + String columnName = fullColumnName.substring(index + 1); + return getEscapedArgument(columnFamily) + QueryConstants.NAME_SEPARATOR + + getEscapedArgument(columnName); + } + + public static List getEscapedFullColumnNames(List fullColumnNames) { + return Lists.newArrayList(Iterables.transform(fullColumnNames, new Function() { + @Override + public String apply(String col) { + return getEscapedFullColumnName(col); + } + })); + } + + public static String getEscapedFullTableName(String fullTableName) { + final String schemaName = getSchemaNameFromFullName(fullTableName); + final String tableName = getTableNameFromFullName(fullTableName); + return getEscapedTableName(schemaName, tableName); + } + + /** + * Escapes the given argument with {@value #ESCAPE_CHARACTER} + * @param argument any non null value. + */ + public static String getEscapedArgument(String argument) { + Preconditions.checkNotNull(argument, "Argument passed cannot be null"); + return ESCAPE_CHARACTER + argument + ESCAPE_CHARACTER; + } + + /** + * @return a fully qualified column name in the format: "CFNAME"."COLNAME" or "COLNAME" depending + * on whether or not there is a column family name present. + */ + public static String getQuotedFullColumnName(PColumn pCol) { + checkNotNull(pCol); + String columnName = pCol.getName().getString(); + String columnFamilyName = + pCol.getFamilyName() != null ? pCol.getFamilyName().getString() : null; + return getQuotedFullColumnName(columnFamilyName, columnName); + } + + /** + * @return a fully qualified column name in the format: "CFNAME"."COLNAME" or "COLNAME" depending + * on whether or not there is a column family name present. + */ + public static String getQuotedFullColumnName(@Nullable String columnFamilyName, + String columnName) { + checkArgument(!isNullOrEmpty(columnName), "Column name cannot be null or empty"); + return columnFamilyName == null + ? ("\"" + columnName + "\"") + : ("\"" + columnFamilyName + "\"" + QueryConstants.NAME_SEPARATOR + "\"" + columnName + "\""); + } + + public static boolean hasHTableDescriptorProps(Map tableProps) { + int pTablePropCount = 0; + for (String prop : tableProps.keySet()) { + if ( + TableProperty.isPhoenixTableProperty(prop) + || prop.equals(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME) + ) { + pTablePropCount++; + } + } + return tableProps.size() - pTablePropCount > 0; + } + + /** + * Replaces all occurrences of {@link #ESCAPE_CHARACTER} with an empty character. + */ + public static String getUnEscapedFullColumnName(String fullColumnName) { + checkArgument(!isNullOrEmpty(fullColumnName), "Column name cannot be null or empty"); + fullColumnName = fullColumnName.replaceAll(ESCAPE_CHARACTER, ""); + return fullColumnName.trim(); + } + + /** + * Return the separator byte to use based on: + * @param rowKeyOrderOptimizable whether or not the table may optimize descending row keys. If the + * table has no descending row keys, this will be true. Also, if the + * table has been upgraded (using a new -u option for psql.py), then + * it'll be true + * @param isNullValue whether or not the value is null. We use a null byte still if the + * value is null regardless of sort order since nulls will always + * sort first this way. + * @param sortOrder whether the value sorts ascending or descending. + * @return the byte to use as the separator + */ + public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, boolean isNullValue, + SortOrder sortOrder) { + return !rowKeyOrderOptimizable || isNullValue || sortOrder == SortOrder.ASC + ? SEPARATOR_BYTE + : QueryConstants.DESC_SEPARATOR_BYTE; + } + + /** + * Get separator bytes for Variable length encoded data type (e.g. VARBINARY_ENCODED). + * @param rowKeyOrderOptimizable Whether the table may optimize descending row keys. + * @param isNullValue Whether the value is null. + * @param sortOrder Whether the value sorts ascending or descending. + * @return The separator byte array. + */ + public static byte[] getSeparatorBytesForVarBinaryEncoded(boolean rowKeyOrderOptimizable, + boolean isNullValue, SortOrder sortOrder) { + return !rowKeyOrderOptimizable || isNullValue || sortOrder == SortOrder.ASC + ? QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES + : QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES; + } + + /** + * Return separator bytes depending on the data type. + * @param pDataType Data type used. + * @param rowKeyOrderOptimizable Whether the table may optimize descending row keys. + * @param isNullValue Whether the value is null. + * @param sortOrder Whether the value sorts ascending or descending. + * @return The separator byte array. + */ + public static byte[] getSeparatorBytes(final PDataType pDataType, + final boolean rowKeyOrderOptimizable, final boolean isNullValue, final SortOrder sortOrder) { + if (pDataType == PVarbinaryEncoded.INSTANCE) { + return getSeparatorBytesForVarBinaryEncoded(rowKeyOrderOptimizable, isNullValue, sortOrder); + } else { + return new byte[] { getSeparatorByte(rowKeyOrderOptimizable, isNullValue, sortOrder) }; + } + } + + public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, boolean isNullValue, + Field f) { + return getSeparatorByte(rowKeyOrderOptimizable, isNullValue, f.getSortOrder()); + } + + public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, boolean isNullValue, + Expression e) { + return getSeparatorByte(rowKeyOrderOptimizable, isNullValue, e.getSortOrder()); + } + + /** + * Get list of ColumnInfos that contain Column Name and its associated PDataType for an import. + * The supplied list of columns can be null -- if it is non-null, it represents a user-supplied + * list of columns to be imported. + * @param conn Phoenix connection from which metadata will be read + * @param tableName Phoenix table name whose columns are to be checked. Can include a schema name + * @param columns user-supplied list of import columns, can be null + * @param strict if true, an exception will be thrown if unknown columns are supplied + */ + public static List generateColumnInfo(Connection conn, String tableName, + List columns, boolean strict) throws SQLException { + Map columnNameToTypeMap = Maps.newLinkedHashMap(); + Set ambiguousColumnNames = new HashSet(); + Map fullColumnNameToTypeMap = Maps.newLinkedHashMap(); + DatabaseMetaData dbmd = conn.getMetaData(); + int unfoundColumnCount = 0; + // TODO: escape wildcard characters here because we don't want that + // behavior here + String escapedTableName = StringUtil.escapeLike(tableName); + String[] schemaAndTable = escapedTableName.split("\\."); + ResultSet rs = null; + try { + rs = dbmd.getColumns(null, (schemaAndTable.length == 1 ? "" : schemaAndTable[0]), + (schemaAndTable.length == 1 ? escapedTableName : schemaAndTable[1]), null); + while (rs.next()) { + String colName = rs.getString(QueryUtil.COLUMN_NAME_POSITION); + String colFam = rs.getString(QueryUtil.COLUMN_FAMILY_POSITION); + + // use family qualifier, if available, otherwise, use column name + String fullColumn = (colFam == null ? colName : String.format("%s.%s", colFam, colName)); + String sqlTypeName = rs.getString(QueryUtil.DATA_TYPE_NAME_POSITION); + + // allow for both bare and family qualified names. + if (columnNameToTypeMap.keySet().contains(colName)) { + ambiguousColumnNames.add(colName); + } + columnNameToTypeMap.put(colName, PDataType.fromSqlTypeName(sqlTypeName).getSqlType()); + fullColumnNameToTypeMap.put(fullColumn, + PDataType.fromSqlTypeName(sqlTypeName).getSqlType()); + } + if (columnNameToTypeMap.isEmpty()) { + throw new IllegalArgumentException("Table " + tableName + " not found"); + } + } finally { + if (rs != null) { + rs.close(); + } + } + List columnInfoList = Lists.newArrayList(); + Set unresolvedColumnNames = new TreeSet(); + if (columns == null) { + // use family qualified names by default, if no columns are specified. + for (Map.Entry entry : fullColumnNameToTypeMap.entrySet()) { + columnInfoList.add(new ColumnInfo(entry.getKey(), entry.getValue())); + } + } else { + // Leave "null" as indication to skip b/c it doesn't exist + for (int i = 0; i < columns.size(); i++) { + String columnName = columns.get(i).trim(); + Integer sqlType = null; + if (fullColumnNameToTypeMap.containsKey(columnName)) { + sqlType = fullColumnNameToTypeMap.get(columnName); + } else if (columnNameToTypeMap.containsKey(columnName)) { + if (ambiguousColumnNames.contains(columnName)) { + unresolvedColumnNames.add(columnName); + } + // fall back to bare column name. + sqlType = columnNameToTypeMap.get(columnName); + } + if (unresolvedColumnNames.size() > 0) { + StringBuilder exceptionMessage = new StringBuilder(); + boolean first = true; + exceptionMessage + .append("Unable to resolve these column names to a single column family:\n"); + for (String col : unresolvedColumnNames) { + if (first) first = false; + else exceptionMessage.append(","); + exceptionMessage.append(col); + } + exceptionMessage.append("\nAvailable columns with column families:\n"); + first = true; + for (String col : fullColumnNameToTypeMap.keySet()) { + if (first) first = false; + else exceptionMessage.append(","); + exceptionMessage.append(col); + } + throw new SQLException(exceptionMessage.toString()); + } + + if (sqlType == null) { + if (strict) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_NOT_FOUND) + .setColumnName(columnName).setTableName(tableName).build().buildException(); + } + unfoundColumnCount++; } else { - return normalizeIdentifier(tableName); - } - } + columnInfoList.add(new ColumnInfo(columnName, sqlType)); + } + } + if (unfoundColumnCount == columns.size()) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_NOT_FOUND) + .setColumnName(Arrays.toString(columns.toArray(new String[0]))).setTableName(tableName) + .build().buildException(); + } + } + return columnInfoList; + } + + public static boolean hasRowTimestampColumn(PTable table) { + return table.getRowTimestampColPos() > 0; + } + + public static byte[] getSchemaKey(String schemaName) { + return SchemaUtil.getTableKey(null, schemaName, MetaDataClient.EMPTY_TABLE); + } + + public static PName getPhysicalHBaseTableName(PName schemaName, PName tableName, + boolean isNamespaceMapped) { + return getPhysicalHBaseTableName(schemaName == null ? null : schemaName.toString(), + tableName.toString(), isNamespaceMapped); + } + + public static PName getPhysicalHBaseTableName(byte[] schemaName, byte[] tableName, + boolean isNamespaceMapped) { + return getPhysicalHBaseTableName(Bytes.toString(schemaName), Bytes.toString(tableName), + isNamespaceMapped); + } + + /** + * Note: the following 4 methods (getPhysicalTableName, getPhysicalName) return an unexpected + * value when fullTableName is in default schema and fullTableName contains a dot. For example, if + * fullTableName is in default schema and fullTableName is "AAA.BBB", the expected hbase table + * name is "AAA.BBB" but these methods return "AAA:BBB". + */ + public static TableName getPhysicalTableName(String fullTableName, ReadOnlyProps readOnlyProps) { + return getPhysicalName(Bytes.toBytes(fullTableName), readOnlyProps); + } + + public static TableName getPhysicalTableName(byte[] fullTableName, Configuration conf) { + return getPhysicalTableName(fullTableName, + isNamespaceMappingEnabled(isSystemTable(fullTableName) ? PTableType.SYSTEM : null, conf)); + } + + public static TableName getPhysicalName(byte[] fullTableName, ReadOnlyProps readOnlyProps) { + return getPhysicalTableName(fullTableName, isNamespaceMappingEnabled( + isSystemTable(fullTableName) ? PTableType.SYSTEM : null, readOnlyProps)); + } + + public static TableName getPhysicalTableName(byte[] fullTableName, + boolean isNamespaceMappingEnabled) { + if ( + indexOf(fullTableName, QueryConstants.NAMESPACE_SEPARATOR_BYTE) > 0 + || !isNamespaceMappingEnabled + ) { + return TableName.valueOf(fullTableName); + } + String tableName = getTableNameFromFullName(fullTableName); + String schemaName = getSchemaNameFromFullName(fullTableName); + return TableName.valueOf(schemaName, tableName); + } + + public static PName getPhysicalHBaseTableName(byte[] fullTableName, + boolean isNamespaceMappingEnabled) { + String tableName = getTableNameFromFullName(fullTableName); + String schemaName = getSchemaNameFromFullName(fullTableName); + return getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMappingEnabled); + } + + public static PName getPhysicalHBaseTableName(String schemaName, String tableName, + boolean isNamespaceMapped) { + if (!isNamespaceMapped) { + return PNameFactory.newName(getTableNameAsBytes(schemaName, tableName)); + } + if (schemaName == null || schemaName.isEmpty()) { + return PNameFactory.newName(tableName); + } + return PNameFactory.newName(schemaName + QueryConstants.NAMESPACE_SEPARATOR + tableName); + } + + public static String replaceNamespaceSeparator(PName name) { + return name.getString().replace(QueryConstants.NAMESPACE_SEPARATOR, + QueryConstants.NAME_SEPARATOR); + } + + public static boolean isSchemaCheckRequired(PTableType tableType, ReadOnlyProps props) { + return (PTableType.TABLE.equals(tableType) || PTableType.VIEW.equals(tableType)) + && isNamespaceMappingEnabled(tableType, props); + } + + public static boolean isNamespaceMappingEnabled(PTableType type, Configuration conf) { + return conf.getBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, + QueryServicesOptions.DEFAULT_IS_NAMESPACE_MAPPING_ENABLED) + && (type == null || !PTableType.SYSTEM.equals(type) + || conf.getBoolean(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, + QueryServicesOptions.DEFAULT_IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE)); + } + + public static boolean isNamespaceMappingEnabled(PTableType type, ReadOnlyProps readOnlyProps) { + return readOnlyProps.getBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, + QueryServicesOptions.DEFAULT_IS_NAMESPACE_MAPPING_ENABLED) + && (type == null || !PTableType.SYSTEM.equals(type) + || readOnlyProps.getBoolean(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, + QueryServicesOptions.DEFAULT_IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE)); + } + + public static byte[] getParentTableNameFromIndexTable(byte[] physicalTableName, + String indexPrefix) { + String tableName = Bytes.toString(physicalTableName); + return getParentTableNameFromIndexTable(tableName, indexPrefix) + .getBytes(StandardCharsets.UTF_8); + } + + public static String getParentTableNameFromIndexTable(String physicalTableName, + String indexPrefix) { + if (physicalTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + String schemaNameFromFullName = + getSchemaNameFromFullName(physicalTableName, QueryConstants.NAMESPACE_SEPARATOR); + String tableNameFromFullName = + getTableNameFromFullName(physicalTableName, QueryConstants.NAMESPACE_SEPARATOR); + return schemaNameFromFullName + QueryConstants.NAMESPACE_SEPARATOR + + getStrippedName(tableNameFromFullName, indexPrefix); + } + return getStrippedName(physicalTableName, indexPrefix); + } + + private static String getStrippedName(String physicalTableName, String indexPrefix) { + return physicalTableName.indexOf(indexPrefix) == 0 + ? physicalTableName.substring(indexPrefix.length()) + : physicalTableName; + } + + /** + * Calculate the HBase HTable name. + * @param schemaName import schema name, can be null + * @param tableName import table name + * @return the byte representation of the HTable + */ + public static String getQualifiedTableName(String schemaName, String tableName) { + if (schemaName != null && !schemaName.isEmpty()) { + return String.format("%s.%s", normalizeIdentifier(schemaName), + normalizeIdentifier(tableName)); + } else { + return normalizeIdentifier(tableName); + } + } + + /** + * Calculate the Phoenix Table name without normalization + * @param schemaName import schema name, can be null + * @param tableName import table name + * @return the qualified Phoenix table name, from the non normalized schema and table + */ + public static String getQualifiedPhoenixTableName(String schemaName, String tableName) { + if (schemaName != null && !schemaName.isEmpty()) { + return String.format("%s.%s", schemaName, tableName); + } else { + return tableName; + } + } + + public static int getIsNullableInt(boolean isNullable) { + return isNullable ? ResultSetMetaData.columnNullable : ResultSetMetaData.columnNoNulls; + } + + public static boolean hasGlobalIndex(PTable table) { + for (PTable index : table.getIndexes()) { + if (IndexUtil.isGlobalIndex(index)) { + return true; + } + } + return false; + } + + public static boolean isNamespaceMapped(Result currentResult) { + Cell isNamespaceMappedCell = + currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, IS_NAMESPACE_MAPPED_BYTES); + return isNamespaceMappedCell != null + && (boolean) PBoolean.INSTANCE.toObject(CellUtil.cloneValue(isNamespaceMappedCell)); + } + + public static boolean isLogTable(String schemaName, String tableName) { + return PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(schemaName) + && PhoenixDatabaseMetaData.SYSTEM_LOG_TABLE.equals(tableName); + } + + /** + * Return the normalized columnName for {@link ColumnParseNode}, because {@link ColumnParseNode} + * ctor have already called {SchemaUtil#normalizeIdentifier} for + * {@link ColumnParseNode#getName},so just return {@link ColumnParseNode#getName}. + */ + public static String getNormalizedColumnName(ColumnParseNode columnParseNode) { + return columnParseNode.getName(); + } + + public static String getFullTableNameWithQuotes(String schemaName, String tableName, + boolean schemaNameCaseSensitive, boolean tableNameCaseSensitive) { + String fullTableName; - /** - * Calculate the Phoenix Table name without normalization - * - * @param schemaName import schema name, can be null - * @param tableName import table name - * @return the qualified Phoenix table name, from the non normalized schema and table - */ - public static String getQualifiedPhoenixTableName(String schemaName, String tableName) { - if (schemaName != null && !schemaName.isEmpty()) { - return String.format("%s.%s", schemaName, tableName); - } else { - return tableName; - } + if (tableNameCaseSensitive) { + fullTableName = "\"" + tableName + "\""; + } else { + fullTableName = tableName; } - public static int getIsNullableInt(boolean isNullable) { - return isNullable ? ResultSetMetaData.columnNullable : ResultSetMetaData.columnNoNulls; - } - - public static boolean hasGlobalIndex(PTable table) { - for (PTable index : table.getIndexes()) { - if (IndexUtil.isGlobalIndex(index)) { - return true; - } - } - return false; + if (schemaName != null && schemaName.length() != 0) { + if (schemaNameCaseSensitive) { + fullTableName = "\"" + schemaName + "\"" + QueryConstants.NAME_SEPARATOR + fullTableName; + } else { + fullTableName = schemaName + QueryConstants.NAME_SEPARATOR + fullTableName; + } } + return fullTableName; + } - public static boolean isNamespaceMapped(Result currentResult) { - Cell isNamespaceMappedCell = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, IS_NAMESPACE_MAPPED_BYTES); - return isNamespaceMappedCell!=null && (boolean) PBoolean.INSTANCE.toObject(CellUtil.cloneValue(isNamespaceMappedCell)); - } + public static String getFullTableNameWithQuotes(String schemaName, String tableName) { + return getFullTableNameWithQuotes(schemaName, tableName, quotesNeededForSchema(schemaName), + quotesNeededForTable(tableName)); + } - public static boolean isLogTable(String schemaName, String tableName) { - return PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(schemaName) && PhoenixDatabaseMetaData.SYSTEM_LOG_TABLE.equals(tableName); + private static boolean quotesNeededForSchema(String name) { + if (Strings.isNullOrEmpty(name) || name.equals(QueryConstants.DEFAULT_COLUMN_FAMILY)) { + return false; } + return quotesNeededForTable(name); + } - /** - * Return the normalized columnName for {@link ColumnParseNode}, - * because {@link ColumnParseNode} ctor have already called {SchemaUtil#normalizeIdentifier} - * for {@link ColumnParseNode#getName},so just return {@link ColumnParseNode#getName}. - * @param columnParseNode - * @return - */ - public static String getNormalizedColumnName(ColumnParseNode columnParseNode) { - return columnParseNode.getName(); + private static boolean quotesNeededForColumn(String name) { + if (!name.equals("_INDEX_ID") && name.startsWith("_")) { + return true; } + return isQuotesNeeded(name) || containsLowerCase(name); + } - public static String getFullTableNameWithQuotes(String schemaName, String tableName, - boolean schemaNameCaseSensitive, boolean tableNameCaseSensitive) { - String fullTableName; - - if (tableNameCaseSensitive) { - fullTableName = "\"" + tableName + "\""; - } else { - fullTableName = tableName; - } - - if (schemaName != null && schemaName.length() != 0) { - if (schemaNameCaseSensitive) { - fullTableName = "\"" + schemaName + "\"" + QueryConstants.NAME_SEPARATOR + fullTableName; - } else { - fullTableName = schemaName + QueryConstants.NAME_SEPARATOR + fullTableName; - } - } - return fullTableName; - } - - public static String getFullTableNameWithQuotes(String schemaName, String tableName) { - return getFullTableNameWithQuotes(schemaName, tableName, - quotesNeededForSchema(schemaName), quotesNeededForTable(tableName)); - } - - private static boolean quotesNeededForSchema(String name) { - if (Strings.isNullOrEmpty(name) || name.equals(QueryConstants.DEFAULT_COLUMN_FAMILY)) { - return false; - } - return quotesNeededForTable(name); - } - - private static boolean quotesNeededForColumn(String name) { - if (!name.equals("_INDEX_ID") && name.startsWith("_")) { - return true; - } - return isQuotesNeeded(name) || containsLowerCase(name); + private static boolean quotesNeededForTable(String name) { + if (name.startsWith("_")) { + return true; } + return isQuotesNeeded(name) || containsLowerCase(name); + } - private static boolean quotesNeededForTable(String name) { - if (name.startsWith("_")) { - return true; - } - return isQuotesNeeded(name) || containsLowerCase(name); + public static String formatSchemaName(String name) { + if (quotesNeededForSchema(name)) { + name = "\"" + name + "\""; } + return name; + } - public static String formatSchemaName(String name) { - if (quotesNeededForSchema(name)) { - name = "\"" + name + "\""; - } - return name; + public static String formatColumnName(String name) { + if (quotesNeededForColumn(name)) { + name = "\"" + name + "\""; } + return name; + } - public static String formatColumnName(String name) { + public static String formatIndexColumnName(String name) { + String[] splitName = name.split("\\."); + if (splitName.length < 2) { + if (!name.contains("\"")) { if (quotesNeededForColumn(name)) { - name = "\"" + name + "\""; + name = "\"" + name + "\""; } return name; - } - - public static String formatIndexColumnName(String name) { - String[] splitName = name.split("\\."); - if (splitName.length < 2) { - if (!name.contains("\"")) { - if (quotesNeededForColumn(name)) { - name = "\"" + name + "\""; - } - return name; - } else if (name.startsWith("\"") && name.endsWith("\"")) { - if (quotesNeededForColumn(name.substring(1, name.length() - 1))) { - return name; - } else { - return name.substring(1, name.length() - 1); - } - } else { - return name; - } + } else if (name.startsWith("\"") && name.endsWith("\"")) { + if (quotesNeededForColumn(name.substring(1, name.length() - 1))) { + return name; } else { - return String.format("%s.%s", formatIndexColumnName(splitName[0]), - formatIndexColumnName(splitName[1])); - } - } - - public static boolean containsLowerCase(String name) { - if (Strings.isNullOrEmpty(name)) { - return false; - } - for (int i=0; i= (bytes.length - 1)) { - return false; - } - if (sortOrder == SortOrder.ASC || sortOrder == null) { - return bytes[offset] == QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES[0] - && bytes[offset + 1] == QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES[1]; - } else if (sortOrder == SortOrder.DESC) { - return bytes[offset] == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[0] - && bytes[offset + 1] == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[1]; - } - return false; - } + } else { + return name; + } + } else { + return String.format("%s.%s", formatIndexColumnName(splitName[0]), + formatIndexColumnName(splitName[1])); + } + } + + public static boolean containsLowerCase(String name) { + if (Strings.isNullOrEmpty(name)) { + return false; + } + for (int i = 0; i < name.toCharArray().length; i++) { + char charAtI = name.charAt(i); + if (Character.isLowerCase(charAtI)) { + return true; + } + } + return false; + } + + /** + * This function is needed so that SchemaExtractionTool returns a valid DDL with correct + * table/schema name that can be parsed + * @return quoted string if schema or table name has non-alphabetic characters in it. + */ + public static String getPTableFullNameWithQuotes(String pSchemaName, String pTableName) { + String pTableFullName = getQualifiedTableName(pSchemaName, pTableName); + boolean tableNameNeedsQuotes = isQuotesNeeded(pTableName); + boolean schemaNameNeedsQuotes = isQuotesNeeded(pSchemaName); + + if (schemaNameNeedsQuotes) { + pSchemaName = "\"" + pSchemaName + "\""; + } + if (tableNameNeedsQuotes) { + pTableName = "\"" + pTableName + "\""; + } + if (tableNameNeedsQuotes || schemaNameNeedsQuotes) { + if (!Strings.isNullOrEmpty(pSchemaName)) { + return String.format("%s.%s", pSchemaName, pTableName); + } else { + return pTableName; + } + } + return pTableFullName; + } + + private static boolean isQuotesNeeded(String name) { + // first char numeric or non-underscore + if (Strings.isNullOrEmpty(name)) { + return false; + } + if (!Character.isAlphabetic(name.charAt(0)) && name.charAt(0) != '_') { + return true; + } + // for all other chars + // ex. name like z@@ will need quotes whereas t0001 will not need quotes + for (int i = 1; i < name.toCharArray().length; i++) { + char charAtI = name.charAt(i); + if (!(Character.isAlphabetic(charAtI)) && !Character.isDigit(charAtI) && charAtI != '_') { + return true; + } + } + return false; + } + + public static boolean areSeparatorBytesForVarBinaryEncoded(byte[] bytes, int offset, + SortOrder sortOrder) { + if (offset >= (bytes.length - 1)) { + return false; + } + if (sortOrder == SortOrder.ASC || sortOrder == null) { + return bytes[offset] == QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES[0] + && bytes[offset + 1] == QueryConstants.VARBINARY_ENCODED_SEPARATOR_BYTES[1]; + } else if (sortOrder == SortOrder.DESC) { + return bytes[offset] == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[0] + && bytes[offset + 1] == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[1]; + } + return false; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SequenceUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SequenceUtil.java index b17246cc5ca..8f90ad72b6b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SequenceUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SequenceUtil.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.util; @@ -15,7 +22,6 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.schema.SequenceInfo; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.math.LongMath; @@ -24,77 +30,77 @@ */ public class SequenceUtil { - public static final long DEFAULT_NUM_SLOTS_TO_ALLOCATE = 1L; - - /** - * @return true if we limit of a sequence has been reached. - */ - public static boolean checkIfLimitReached(long currentValue, long minValue, long maxValue, - long incrementBy, long cacheSize, long numToAllocate) { - long nextValue = 0; - boolean increasingSeq = incrementBy > 0 ? true : false; - // advance currentValue while checking for overflow - try { - long incrementValue; - if (isBulkAllocation(numToAllocate)) { - // For bulk allocation we increment independent of cache size - incrementValue = LongMath.checkedMultiply(incrementBy, numToAllocate); - } else { - incrementValue = LongMath.checkedMultiply(incrementBy, cacheSize); - } - nextValue = LongMath.checkedAdd(currentValue, incrementValue); - } catch (ArithmeticException e) { - return true; - } + public static final long DEFAULT_NUM_SLOTS_TO_ALLOCATE = 1L; - // check if limit was reached - if ((increasingSeq && nextValue > maxValue) - || (!increasingSeq && nextValue < minValue)) { - return true; - } - return false; + /** Returns true if we limit of a sequence has been reached. */ + public static boolean checkIfLimitReached(long currentValue, long minValue, long maxValue, + long incrementBy, long cacheSize, long numToAllocate) { + long nextValue = 0; + boolean increasingSeq = incrementBy > 0 ? true : false; + // advance currentValue while checking for overflow + try { + long incrementValue; + if (isBulkAllocation(numToAllocate)) { + // For bulk allocation we increment independent of cache size + incrementValue = LongMath.checkedMultiply(incrementBy, numToAllocate); + } else { + incrementValue = LongMath.checkedMultiply(incrementBy, cacheSize); + } + nextValue = LongMath.checkedAdd(currentValue, incrementValue); + } catch (ArithmeticException e) { + return true; } - public static boolean checkIfLimitReached(long currentValue, long minValue, long maxValue, - long incrementBy, long cacheSize) throws SQLException { - return checkIfLimitReached(currentValue, minValue, maxValue, incrementBy, cacheSize, DEFAULT_NUM_SLOTS_TO_ALLOCATE); - } - - public static boolean checkIfLimitReached(SequenceInfo info) throws SQLException { - return checkIfLimitReached(info.sequenceValue, info.minValue, info.maxValue, info.incrementBy, info.cacheSize, DEFAULT_NUM_SLOTS_TO_ALLOCATE); - } - - /** - * Returns true if the value of numToAllocate signals that a bulk allocation of sequence slots - * was requested. Prevents proliferation of same comparison in many places throughout the code. - */ - public static boolean isBulkAllocation(long numToAllocate) { - Preconditions.checkArgument(numToAllocate > 0); - return numToAllocate > DEFAULT_NUM_SLOTS_TO_ALLOCATE; - } - - public static boolean isCycleAllowed(long numToAllocate) { - return !isBulkAllocation(numToAllocate); - - } - - /** - * Helper function that returns a {@link SQLException} - */ - public static SQLException getException(String schemaName, String tableName, - SQLExceptionCode code) { - return new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) - .build().buildException(); - } - - /** - * Returns the correct instance of SQLExceptionCode when we detect a limit has been reached, - * depending upon whether a min or max value caused the limit to be exceeded. - */ - public static SQLExceptionCode getLimitReachedErrorCode(boolean increasingSeq) { - SQLExceptionCode code = increasingSeq ? SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE - : SQLExceptionCode.SEQUENCE_VAL_REACHED_MIN_VALUE; - return code; + // check if limit was reached + if ((increasingSeq && nextValue > maxValue) || (!increasingSeq && nextValue < minValue)) { + return true; } + return false; + } + + public static boolean checkIfLimitReached(long currentValue, long minValue, long maxValue, + long incrementBy, long cacheSize) throws SQLException { + return checkIfLimitReached(currentValue, minValue, maxValue, incrementBy, cacheSize, + DEFAULT_NUM_SLOTS_TO_ALLOCATE); + } + + public static boolean checkIfLimitReached(SequenceInfo info) throws SQLException { + return checkIfLimitReached(info.sequenceValue, info.minValue, info.maxValue, info.incrementBy, + info.cacheSize, DEFAULT_NUM_SLOTS_TO_ALLOCATE); + } + + /** + * Returns true if the value of numToAllocate signals that a bulk allocation of sequence slots was + * requested. Prevents proliferation of same comparison in many places throughout the code. + */ + public static boolean isBulkAllocation(long numToAllocate) { + Preconditions.checkArgument(numToAllocate > 0); + return numToAllocate > DEFAULT_NUM_SLOTS_TO_ALLOCATE; + } + + public static boolean isCycleAllowed(long numToAllocate) { + return !isBulkAllocation(numToAllocate); + + } + + /** + * Helper function that returns a {@link SQLException} + */ + public static SQLException getException(String schemaName, String tableName, + SQLExceptionCode code) { + return new SQLExceptionInfo.Builder(code).setSchemaName(schemaName).setTableName(tableName) + .build().buildException(); + } + + /** + * Returns the correct instance of SQLExceptionCode when we detect a limit has been reached, + * depending upon whether a min or max value caused the limit to be exceeded. + */ + public static SQLExceptionCode getLimitReachedErrorCode(boolean increasingSeq) { + SQLExceptionCode code = increasingSeq + ? SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE + : SQLExceptionCode.SEQUENCE_VAL_REACHED_MIN_VALUE; + return code; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SizedUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SizedUtil.java index d67ed7f7fc5..a9da31fc5a3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/SizedUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/SizedUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,58 +17,63 @@ */ package org.apache.phoenix.util; - /** - * Utilities for computing an object's size. All size measurements are in bytes. - * Note, all of the sizes here, but especially OBJECT_SIZE and ARRAY_SIZE are estimates and will - * depend on the JVM itself (and which JVM, 64bit vs. 32bit, etc). - * The current values are based on: - * Java HotSpot(TM) 64-Bit Server VM/14.2-b01 - * - * (Uncomment out and run the main w/ appropriate object to test) - * Also, see this link: + * Utilities for computing an object's size. All size measurements are in bytes. Note, all of the + * sizes here, but especially OBJECT_SIZE and ARRAY_SIZE are estimates and will depend on the JVM + * itself (and which JVM, 64bit vs. 32bit, etc). The current values are based on: Java HotSpot(TM) + * 64-Bit Server VM/14.2-b01 (Uncomment out and run the main w/ appropriate object to test) Also, + * see this link: * https://sites.google.com/a/salesforce.com/development/Home/old-wiki-home-page/i-wish-i-knew#TOC-How-to-measure-the-size-of-a-Java-O * For another way to measure. */ public class SizedUtil { - public static final int POINTER_SIZE = 8; // 64 bit jvm. - public static final int OBJECT_SIZE = 16; // measured, see class comment. - public static final int ARRAY_SIZE = 24; // measured, see class comment. - public static final int CHAR_SIZE = 2; - public static final int INT_SIZE = 4; - public static final int LONG_SIZE = 8; - - public static final int TREE_MAP_SIZE = OBJECT_SIZE + INT_SIZE * 2 + POINTER_SIZE * 2; - public static final int MAP_ENTRY_SIZE = OBJECT_SIZE + 3 * POINTER_SIZE + INT_SIZE; - public static final int IMMUTABLE_BYTES_WRITABLE_SIZE = OBJECT_SIZE + INT_SIZE * 2 + ARRAY_SIZE; - public static final int IMMUTABLE_BYTES_PTR_SIZE = IMMUTABLE_BYTES_WRITABLE_SIZE + INT_SIZE;// Extra is an int field which caches hashcode. - public static final int KEY_VALUE_SIZE = 2 * INT_SIZE + LONG_SIZE + 2 * ARRAY_SIZE; - public static final int RESULT_SIZE = OBJECT_SIZE + 3 * POINTER_SIZE + IMMUTABLE_BYTES_WRITABLE_SIZE; - public static final int INT_OBJECT_SIZE = INT_SIZE + OBJECT_SIZE; - public static final int LONG_OBJECT_SIZE = LONG_SIZE + OBJECT_SIZE; - public static final int BIG_DECIMAL_SIZE = - OBJECT_SIZE + 2 * INT_SIZE + LONG_SIZE + 2 * POINTER_SIZE + - OBJECT_SIZE /* BigInteger */ + 5 * INT_SIZE + ARRAY_SIZE /*mag[]*/ + 2 * INT_SIZE /* est mag[2] */; + public static final int POINTER_SIZE = 8; // 64 bit jvm. + public static final int OBJECT_SIZE = 16; // measured, see class comment. + public static final int ARRAY_SIZE = 24; // measured, see class comment. + public static final int CHAR_SIZE = 2; + public static final int INT_SIZE = 4; + public static final int LONG_SIZE = 8; + + public static final int TREE_MAP_SIZE = OBJECT_SIZE + INT_SIZE * 2 + POINTER_SIZE * 2; + public static final int MAP_ENTRY_SIZE = OBJECT_SIZE + 3 * POINTER_SIZE + INT_SIZE; + public static final int IMMUTABLE_BYTES_WRITABLE_SIZE = OBJECT_SIZE + INT_SIZE * 2 + ARRAY_SIZE; + public static final int IMMUTABLE_BYTES_PTR_SIZE = IMMUTABLE_BYTES_WRITABLE_SIZE + INT_SIZE;// Extra + // is + // an + // int + // field + // which + // caches + // hashcode. + public static final int KEY_VALUE_SIZE = 2 * INT_SIZE + LONG_SIZE + 2 * ARRAY_SIZE; + public static final int RESULT_SIZE = + OBJECT_SIZE + 3 * POINTER_SIZE + IMMUTABLE_BYTES_WRITABLE_SIZE; + public static final int INT_OBJECT_SIZE = INT_SIZE + OBJECT_SIZE; + public static final int LONG_OBJECT_SIZE = LONG_SIZE + OBJECT_SIZE; + public static final int BIG_DECIMAL_SIZE = + OBJECT_SIZE + 2 * INT_SIZE + LONG_SIZE + 2 * POINTER_SIZE + OBJECT_SIZE /* BigInteger */ + + 5 * INT_SIZE + ARRAY_SIZE /* mag[] */ + 2 * INT_SIZE /* est mag[2] */; + + private SizedUtil() { + } + + public static int sizeOfTreeMap(int size) { + return TREE_MAP_SIZE + (OBJECT_SIZE + INT_SIZE + POINTER_SIZE * 5) * size; + } + + public static int sizeOfArrayList(int capacity) { + return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.INT_SIZE + + SizedUtil.ARRAY_SIZE + SizedUtil.POINTER_SIZE * capacity; + } + + public static long sizeOfMap(int nRows) { + return sizeOfMap(nRows, SizedUtil.POINTER_SIZE, SizedUtil.POINTER_SIZE); + } - private SizedUtil() { - } - - public static int sizeOfTreeMap(int size) { - return TREE_MAP_SIZE + (OBJECT_SIZE + INT_SIZE + POINTER_SIZE * 5) * size; - } - - public static int sizeOfArrayList(int capacity) { - return SizedUtil.OBJECT_SIZE + SizedUtil.POINTER_SIZE + SizedUtil.INT_SIZE + SizedUtil.ARRAY_SIZE + SizedUtil.POINTER_SIZE * capacity; - } - - public static long sizeOfMap(int nRows) { - return sizeOfMap(nRows, SizedUtil.POINTER_SIZE, SizedUtil.POINTER_SIZE); - } - - public static long sizeOfMap(int nRows, int keySize, int valueSize) { - return SizedUtil.OBJECT_SIZE * 4 + sizeOfArrayList(nRows) /* key set */ + nRows * ( - SizedUtil.MAP_ENTRY_SIZE * 1L + /* entry set */ - keySize + // key size - valueSize); // value size - } + public static long sizeOfMap(int nRows, int keySize, int valueSize) { + return SizedUtil.OBJECT_SIZE * 4 + sizeOfArrayList(nRows) /* key set */ + + nRows * (SizedUtil.MAP_ENTRY_SIZE * 1L + /* entry set */ + keySize + // key size + valueSize); // value size + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/StringUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/StringUtil.java index 5d1cad2b581..505851c0331 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/StringUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/StringUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,387 +22,356 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.exception.UndecodableByteException; import org.apache.phoenix.schema.SortOrder; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - public class StringUtil { - public static final String EMPTY_STRING = ""; - // Masks to determine how many bytes are in each character - // From http://tools.ietf.org/html/rfc3629#section-3 - public static final byte SPACE_UTF8 = 0x20; - private static final int BYTES_1_MASK = 0xFF << 7; // 0xxxxxxx is a single byte char - private static final int BYTES_2_MASK = 0xFF << 5; // 110xxxxx is a double byte char - private static final int BYTES_3_MASK = 0xFF << 4; // 1110xxxx is a triple byte char - private static final int BYTES_4_MASK = 0xFF << 3; // 11110xxx is a quadruple byte char - - public static final byte INVERTED_SPACE_UTF8 = SortOrder.invert(new byte[] {SPACE_UTF8}, 0, new byte[1], 0, 1)[0]; - public final static char SINGLE_CHAR_WILDCARD = '?'; - public final static char SINGLE_CHAR_LIKE = '_'; - public final static char MULTI_CHAR_WILDCARD = '*'; - public final static char MULTI_CHAR_LIKE = '%'; - private final static String[] LIKE_ESCAPE_SEQS = new String[]{"\\"+SINGLE_CHAR_LIKE, "\\"+MULTI_CHAR_LIKE}; - private final static String[] LIKE_UNESCAPED_SEQS = new String[]{""+SINGLE_CHAR_LIKE, ""+MULTI_CHAR_LIKE}; - - - private StringUtil() { - } + public static final String EMPTY_STRING = ""; + // Masks to determine how many bytes are in each character + // From http://tools.ietf.org/html/rfc3629#section-3 + public static final byte SPACE_UTF8 = 0x20; + private static final int BYTES_1_MASK = 0xFF << 7; // 0xxxxxxx is a single byte char + private static final int BYTES_2_MASK = 0xFF << 5; // 110xxxxx is a double byte char + private static final int BYTES_3_MASK = 0xFF << 4; // 1110xxxx is a triple byte char + private static final int BYTES_4_MASK = 0xFF << 3; // 11110xxx is a quadruple byte char - public static final String[] getLikeEscapeSeqs() { - return Arrays.copyOf(LIKE_ESCAPE_SEQS, LIKE_ESCAPE_SEQS.length); - } + public static final byte INVERTED_SPACE_UTF8 = + SortOrder.invert(new byte[] { SPACE_UTF8 }, 0, new byte[1], 0, 1)[0]; + public final static char SINGLE_CHAR_WILDCARD = '?'; + public final static char SINGLE_CHAR_LIKE = '_'; + public final static char MULTI_CHAR_WILDCARD = '*'; + public final static char MULTI_CHAR_LIKE = '%'; + private final static String[] LIKE_ESCAPE_SEQS = + new String[] { "\\" + SINGLE_CHAR_LIKE, "\\" + MULTI_CHAR_LIKE }; + private final static String[] LIKE_UNESCAPED_SEQS = + new String[] { "" + SINGLE_CHAR_LIKE, "" + MULTI_CHAR_LIKE }; - public static final String[] getLikeUnescapedSeqs() { - return Arrays.copyOf(LIKE_UNESCAPED_SEQS, LIKE_UNESCAPED_SEQS.length); - } + private StringUtil() { + } - /** Replace instances of character ch in String value with String replacement */ - public static String replaceChar(String value, char ch, CharSequence replacement) { - if (value == null) - return null; - int i = value.indexOf(ch); - if (i == -1) - return value; // nothing to do - - // we've got at least one character to replace - StringBuilder buf = new StringBuilder(value.length() + 16); // some extra space - int j = 0; - while (i != -1) { - buf.append(value, j, i).append(replacement); - j = i + 1; - i = value.indexOf(ch, j); - } - if (j < value.length()) - buf.append(value, j, value.length()); - return buf.toString(); + public static final String[] getLikeEscapeSeqs() { + return Arrays.copyOf(LIKE_ESCAPE_SEQS, LIKE_ESCAPE_SEQS.length); + } + + public static final String[] getLikeUnescapedSeqs() { + return Arrays.copyOf(LIKE_UNESCAPED_SEQS, LIKE_UNESCAPED_SEQS.length); + } + + /** Replace instances of character ch in String value with String replacement */ + public static String replaceChar(String value, char ch, CharSequence replacement) { + if (value == null) return null; + int i = value.indexOf(ch); + if (i == -1) return value; // nothing to do + + // we've got at least one character to replace + StringBuilder buf = new StringBuilder(value.length() + 16); // some extra space + int j = 0; + while (i != -1) { + buf.append(value, j, i).append(replacement); + j = i + 1; + i = value.indexOf(ch, j); } + if (j < value.length()) buf.append(value, j, value.length()); + return buf.toString(); + } - /** - * @return the replacement of all occurrences of src[i] with target[i] in s. Src and target are not regex's so this - * uses simple searching with indexOf() - */ - public static String replace(String s, String[] src, String[] target) { - assert src != null && target != null && src.length > 0 && src.length == target.length; - if (src.length == 1 && src[0].length() == 1) { - return replaceChar(s, src[0].charAt(0), target[0]); - } - if (s == null) - return null; - StringBuilder sb = new StringBuilder(s.length()); - int pos = 0; - int limit = s.length(); - int lastMatch = 0; - while (pos < limit) { - boolean matched = false; - for (int i = 0; i < src.length; i++) { - if (s.startsWith(src[i], pos) && src[i].length() > 0) { - // we found a matching pattern - append the acculumation plus the replacement - sb.append(s.substring(lastMatch, pos)).append(target[i]); - pos += src[i].length(); - lastMatch = pos; - matched = true; - break; - } - } - if (!matched) { - // we didn't match any patterns, so move forward 1 character - pos++; - } - } - // see if we found any matches - if (lastMatch == 0) { - // we didn't match anything, so return the source string - return s; + /** + * @return the replacement of all occurrences of src[i] with target[i] in s. Src and target are + * not regex's so this uses simple searching with indexOf() + */ + public static String replace(String s, String[] src, String[] target) { + assert src != null && target != null && src.length > 0 && src.length == target.length; + if (src.length == 1 && src[0].length() == 1) { + return replaceChar(s, src[0].charAt(0), target[0]); + } + if (s == null) return null; + StringBuilder sb = new StringBuilder(s.length()); + int pos = 0; + int limit = s.length(); + int lastMatch = 0; + while (pos < limit) { + boolean matched = false; + for (int i = 0; i < src.length; i++) { + if (s.startsWith(src[i], pos) && src[i].length() > 0) { + // we found a matching pattern - append the acculumation plus the replacement + sb.append(s.substring(lastMatch, pos)).append(target[i]); + pos += src[i].length(); + lastMatch = pos; + matched = true; + break; } - - // apppend the trailing portion - sb.append(s.substring(lastMatch)); - - return sb.toString(); + } + if (!matched) { + // we didn't match any patterns, so move forward 1 character + pos++; + } } + // see if we found any matches + if (lastMatch == 0) { + // we didn't match anything, so return the source string + return s; + } + + // apppend the trailing portion + sb.append(s.substring(lastMatch)); + + return sb.toString(); + } - public static int getBytesInChar(byte b, SortOrder sortOrder) { - int ret = getBytesInCharNoException(b, sortOrder); - if (ret == -1) throw new UndecodableByteException(b); - return ret; + public static int getBytesInChar(byte b, SortOrder sortOrder) { + int ret = getBytesInCharNoException(b, sortOrder); + if (ret == -1) throw new UndecodableByteException(b); + return ret; + } + + private static int getBytesInCharNoException(byte b, SortOrder sortOrder) { + Preconditions.checkNotNull(sortOrder); + if (sortOrder == SortOrder.DESC) { + b = SortOrder.invert(b); } + int c = b & 0xff; + if ((c & BYTES_1_MASK) == 0) return 1; + if ((c & BYTES_2_MASK) == 0xC0) return 2; + if ((c & BYTES_3_MASK) == 0xE0) return 3; + if ((c & BYTES_4_MASK) == 0xF0) return 4; + return -1; + } - private static int getBytesInCharNoException(byte b, SortOrder sortOrder) { - Preconditions.checkNotNull(sortOrder); - if (sortOrder == SortOrder.DESC) { - b = SortOrder.invert(b); - } - int c = b & 0xff; - if ((c & BYTES_1_MASK) == 0) - return 1; - if ((c & BYTES_2_MASK) == 0xC0) - return 2; - if ((c & BYTES_3_MASK) == 0xE0) - return 3; - if ((c & BYTES_4_MASK) == 0xF0) - return 4; - return -1; + public static int calculateUTF8Length(byte[] bytes, int offset, int length, SortOrder sortOrder) { + int i = offset, endOffset = offset + length; + length = 0; + while (i < endOffset) { + int charLength = getBytesInChar(bytes[i], sortOrder); + i += charLength; + length++; } + return length; + } - public static int calculateUTF8Length(byte[] bytes, int offset, int length, SortOrder sortOrder) { - int i = offset, endOffset = offset + length; - length = 0; - while (i < endOffset) { - int charLength = getBytesInChar(bytes[i], sortOrder); - i += charLength; - length++; - } - return length; + // given an array of bytes containing utf-8 encoded strings, starting from curPos, ending before + // range, and return the next character offset, -1 if no next character available or + // UndecodableByteException + private static int calculateNextCharOffset(byte[] bytes, int curPos, int range, + SortOrder sortOrder) { + int ret = getBytesInCharNoException(bytes[curPos], sortOrder); + if (ret == -1) return -1; + ret += curPos; + if (ret >= range) return -1; + return ret; + } + + // given an array of bytes containing utf-8 encoded strings, starting from offset, and return + // the previous character offset , -1 if UndecodableByteException. curPos points to current + // character starting offset. + private static int calculatePreCharOffset(byte[] bytes, int curPos, int offset, + SortOrder sortOrder) { + --curPos; + for (int i = 1, pos = curPos - i + 1; i <= 4 && offset <= pos; ++i, --pos) { + int ret = getBytesInCharNoException(bytes[pos], sortOrder); + if (ret == i) return pos; } + return -1; + } - // given an array of bytes containing utf-8 encoded strings, starting from curPos, ending before - // range, and return the next character offset, -1 if no next character available or - // UndecodableByteException - private static int calculateNextCharOffset(byte[] bytes, int curPos, int range, - SortOrder sortOrder) { - int ret = getBytesInCharNoException(bytes[curPos], sortOrder); + // return actural offsetInBytes corresponding to offsetInStr in utf-8 encoded strings bytes + // containing + // @param bytes an array of bytes containing utf-8 encoded strings + // @param offset + // @param length + // @param sortOrder + // @param offsetInStr offset for utf-8 encoded strings bytes array containing. Can be negative + // meaning counting from the end of encoded strings + // @return actural offsetInBytes corresponding to offsetInStr. -1 if offsetInStr is out of index + public static int calculateUTF8Offset(byte[] bytes, int offset, int length, SortOrder sortOrder, + int offsetInStr) { + if (offsetInStr == 0) return offset; + int ret, range = offset + length; + if (offsetInStr > 0) { + ret = offset; + while (offsetInStr > 0) { + ret = calculateNextCharOffset(bytes, ret, range, sortOrder); if (ret == -1) return -1; - ret += curPos; - if (ret >= range) return -1; - return ret; + --offsetInStr; + } + } else { + ret = offset + length; + while (offsetInStr < 0) { + ret = calculatePreCharOffset(bytes, ret, offset, sortOrder); + // if calculateCurCharOffset returns -1, ret must be smaller than offset + if (ret < offset) return -1; + ++offsetInStr; + } } + return ret; + } - // given an array of bytes containing utf-8 encoded strings, starting from offset, and return - // the previous character offset , -1 if UndecodableByteException. curPos points to current - // character starting offset. - private static int calculatePreCharOffset(byte[] bytes, int curPos, int offset, - SortOrder sortOrder) { - --curPos; - for (int i = 1, pos = curPos - i + 1; i <= 4 && offset <= pos; ++i, --pos) { - int ret = getBytesInCharNoException(bytes[pos], sortOrder); - if (ret == i) return pos; - } - return -1; + // Given an array of bytes containing encoding utf-8 encoded strings, the offset and a length + // parameter, return the actual index into the byte array which would represent a substring + // of starting from the character at . We assume the is the start + // byte of an UTF-8 character. + public static int getByteLengthForUtf8SubStr(byte[] bytes, int offset, int length, + SortOrder sortOrder) { + int byteLength = 0; + while (length > 0 && offset + byteLength < bytes.length) { + int charLength = getBytesInChar(bytes[offset + byteLength], sortOrder); + byteLength += charLength; + length--; } + return byteLength; + } - // return actural offsetInBytes corresponding to offsetInStr in utf-8 encoded strings bytes - // containing - // @param bytes an array of bytes containing utf-8 encoded strings - // @param offset - // @param length - // @param sortOrder - // @param offsetInStr offset for utf-8 encoded strings bytes array containing. Can be negative - // meaning counting from the end of encoded strings - // @return actural offsetInBytes corresponding to offsetInStr. -1 if offsetInStr is out of index - public static int calculateUTF8Offset(byte[] bytes, int offset, int length, - SortOrder sortOrder, int offsetInStr) { - if (offsetInStr == 0) return offset; - int ret, range = offset + length; - if (offsetInStr > 0) { - ret = offset; - while (offsetInStr > 0) { - ret = calculateNextCharOffset(bytes, ret, range, sortOrder); - if (ret == -1) return -1; - --offsetInStr; - } - } else { - ret = offset + length; - while (offsetInStr < 0) { - ret = calculatePreCharOffset(bytes, ret, offset, sortOrder); - // if calculateCurCharOffset returns -1, ret must be smaller than offset - if (ret < offset) return -1; - ++offsetInStr; - } - } - return ret; + public static boolean hasMultiByteChars(String s) { + for (int i = 0; i < s.length(); i++) { + char c = s.charAt(i); + if (c > 0x007F) { + return true; + } } + return false; + } - // Given an array of bytes containing encoding utf-8 encoded strings, the offset and a length - // parameter, return the actual index into the byte array which would represent a substring - // of starting from the character at . We assume the is the start - // byte of an UTF-8 character. - public static int getByteLengthForUtf8SubStr(byte[] bytes, int offset, int length, SortOrder sortOrder) { - int byteLength = 0; - while(length > 0 && offset + byteLength < bytes.length) { - int charLength = getBytesInChar(bytes[offset + byteLength], sortOrder); - byteLength += charLength; - length--; - } - return byteLength; + public static int getFirstNonBlankCharIdxFromStart(byte[] string, int offset, int length, + SortOrder sortOrder) { + int i = offset; + byte space = sortOrder == SortOrder.ASC ? SPACE_UTF8 : INVERTED_SPACE_UTF8; + for (; i < offset + length; i++) { + if (string[i] != space) { + break; + } } + return i; + } - public static boolean hasMultiByteChars(String s) { - for (int i = 0; i < s.length(); i++) { - char c = s.charAt(i); - if (c > 0x007F) { - return true; - } - } - return false; + public static int getFirstNonBlankCharIdxFromEnd(byte[] string, int offset, int length, + SortOrder sortOrder) { + int i = offset + length - 1; + byte space = sortOrder == SortOrder.ASC ? SPACE_UTF8 : INVERTED_SPACE_UTF8; + for (; i >= offset; i--) { + if (string[i] != space) { + break; + } } + return i; + } - public static int getFirstNonBlankCharIdxFromStart(byte[] string, int offset, int length, SortOrder sortOrder) { - int i = offset; - byte space = sortOrder == SortOrder.ASC ? SPACE_UTF8 : INVERTED_SPACE_UTF8; - for ( ; i < offset + length; i++) { - if (string[i] != space) { - break; - } - } - return i; + // A toBytes function backed up HBase's utility function, but would accept null input, in which + // case it returns an empty byte array. + public static byte[] toBytes(String input) { + if (input == null) { + return ByteUtil.EMPTY_BYTE_ARRAY; } + return Bytes.toBytes(input); + } - public static int getFirstNonBlankCharIdxFromEnd(byte[] string, int offset, int length, SortOrder sortOrder) { - int i = offset + length - 1; - byte space = sortOrder == SortOrder.ASC ? SPACE_UTF8 : INVERTED_SPACE_UTF8; - for ( ; i >= offset; i--) { - if (string[i] != space) { - break; - } - } - return i; - } + public static String escapeLike(String s) { + return replace(s, LIKE_UNESCAPED_SEQS, LIKE_ESCAPE_SEQS); + } - // A toBytes function backed up HBase's utility function, but would accept null input, in which - // case it returns an empty byte array. - public static byte[] toBytes(String input) { - if (input == null) { - return ByteUtil.EMPTY_BYTE_ARRAY; - } - return Bytes.toBytes(input); - } + public static int getUnpaddedCharLength(byte[] b, int offset, int length, SortOrder sortOrder) { + return getFirstNonBlankCharIdxFromEnd(b, offset, length, sortOrder) - offset + 1; + } - public static String escapeLike(String s) { - return replace(s, LIKE_UNESCAPED_SEQS, LIKE_ESCAPE_SEQS); + public static byte[] padChar(byte[] value, Integer byteSize) { + byte[] newValue = Arrays.copyOf(value, byteSize); + if (newValue.length > value.length) { + Arrays.fill(newValue, value.length, newValue.length, SPACE_UTF8); } + return newValue; + } - public static int getUnpaddedCharLength(byte[] b, int offset, int length, SortOrder sortOrder) { - return getFirstNonBlankCharIdxFromEnd(b, offset, length, sortOrder) - offset + 1; + /** + * Lame - StringBuilder.equals is retarded. + * @return whether or not the two builders consist the same sequence of characters + */ + public static boolean equals(StringBuilder b1, StringBuilder b2) { + if (b1.length() != b2.length()) { + return false; } - - public static byte[] padChar(byte[] value, Integer byteSize) { - byte[] newValue = Arrays.copyOf(value, byteSize); - if (newValue.length > value.length) { - Arrays.fill(newValue, value.length, newValue.length, SPACE_UTF8); - } - return newValue; + for (int i = 0; i < b1.length(); i++) { + if (b1.charAt(i) != b2.charAt(i)) { + return false; + } } - - /** - * Lame - StringBuilder.equals is retarded. - * @param b1 - * @param b2 - * @return whether or not the two builders consist the same sequence of characters - */ - public static boolean equals(StringBuilder b1, StringBuilder b2) { - if (b1.length() != b2.length()) { - return false; - } - for (int i = 0; i < b1.length(); i++) { - if (b1.charAt(i) != b2.charAt(i)) { - return false; - } + return true; + } + + /** + * LPAD implementation array containing string to be left padded byte offset of string byte length + * of string array containing fill values byte offset of fill byte length of fill if true inverts + * the bits in fill before filling the array length of the string that is returned with fill + * values left padded + * @return byte[] containing left padded string + */ + public static byte[] lpad(byte[] str, int strOffset, int strLength, byte[] fill, int fillOffset, + int fillLength, boolean invertFill, int strWithPaddingLen) { + byte[] paddedStr = new byte[strWithPaddingLen]; + int fillStopIdx = strWithPaddingLen - strLength; + // copy fill into the start of paddedStr + fill(paddedStr, 0, fillStopIdx, fill, fillOffset, fillOffset + fillLength, invertFill); + // fill remaining characters with original string + System.arraycopy(str, strOffset, paddedStr, fillStopIdx, strLength); + return paddedStr; + } + + /** + * Assigns the specified byte values to elements of the specified range of the specified array of + * bytes. The range to be filled extends from index fromIndex, inclusive, to index toIndex, + * exclusive. (If fromIndex==toIndex, the range to be filled is empty.) the array to be filled the + * index of the first element (inclusive) to be filled with the fill values the index of the last + * element (exclusive) to be filled with the fill values the values to be stored in all elements + * of the array the index of the first element (inclusive) to be used as fill values the index of + * the last element (exclusive) to be used as fill value if true inverts the bits in fill before + * filling the array + */ + public static void fill(byte[] str, int strFromIdx, int strToIdx, byte[] fillArray, + int fillFromIdx, int fillToIdx, boolean invertFill) { + rangeCheck(str.length, strFromIdx, strToIdx); + rangeCheck(fillArray.length, fillFromIdx, fillToIdx); + int strIdx = strFromIdx; + byte[] fill = fillArray; + int fillLen = fillToIdx - fillFromIdx; + if (invertFill) fill = SortOrder.invert(fillArray, fillFromIdx, fillLen); + while (strIdx < strToIdx) { + int fillIdx = fillFromIdx; + while (fillIdx < fillToIdx && strIdx < strToIdx) { + if (strIdx + fillLen < fillToIdx) { + System.arraycopy(fill, fillFromIdx, str, strIdx, fillLen); + } else { + str[strIdx++] = fill[fillIdx++]; } - return true; + } } - - /** - * LPAD implementation - * - * @param str - * array containing string to be left padded - * @param strOffset - * byte offset of string - * @param strLength - * byte length of string - * @param fill - * array containing fill values - * @param fillOffset - * byte offset of fill - * @param fillLength - * byte length of fill - * @param invertFill - * if true inverts the bits in fill before filling the array - * @param strWithPaddingLen - * length of the string that is returned with fill values left padded - * @return byte[] containing left padded string - */ - public static byte[] lpad(byte[] str, int strOffset, int strLength, byte[] fill, int fillOffset, int fillLength, - boolean invertFill, int strWithPaddingLen) { - byte[] paddedStr = new byte[strWithPaddingLen]; - int fillStopIdx = strWithPaddingLen - strLength; - // copy fill into the start of paddedStr - fill(paddedStr, 0, fillStopIdx, fill, fillOffset, fillOffset + fillLength, invertFill); - // fill remaining characters with original string - System.arraycopy(str, strOffset, paddedStr, fillStopIdx, strLength); - return paddedStr; + } + + /** + * Checks that fromIndex and toIndex are in the range and throws an appropriate exception, if they + * are not + */ + private static void rangeCheck(int length, int fromIndex, int toIndex) { + if (fromIndex > toIndex) { + throw new IllegalArgumentException("fromIndex(" + fromIndex + ") > toIndex(" + toIndex + ")"); } - - /** - * Assigns the specified byte values to elements of the specified range of the specified array of bytes. The range - * to be filled extends from index fromIndex, inclusive, to index toIndex, exclusive. (If - * fromIndex==toIndex, the range to be filled is empty.) - * - * @param str - * the array to be filled - * @param strFromIdx - * the index of the first element (inclusive) to be filled with the fill values - * @param strToIdx - * the index of the last element (exclusive) to be filled with the fill values - * @param fillArray - * the values to be stored in all elements of the array - * @param fillFromIdx - * the index of the first element (inclusive) to be used as fill values - * @param fillToIdx - * the index of the last element (exclusive) to be used as fill value - * @param invertFill - * if true inverts the bits in fill before filling the array - */ - public static void fill(byte[] str, int strFromIdx, int strToIdx, byte[] fillArray, int fillFromIdx, int fillToIdx, - boolean invertFill) { - rangeCheck(str.length, strFromIdx, strToIdx); - rangeCheck(fillArray.length, fillFromIdx, fillToIdx); - int strIdx = strFromIdx; - byte[] fill = fillArray; - int fillLen = fillToIdx - fillFromIdx; - if (invertFill) - fill = SortOrder.invert(fillArray, fillFromIdx, fillLen); - while (strIdx < strToIdx) { - int fillIdx = fillFromIdx; - while (fillIdx < fillToIdx && strIdx < strToIdx) { - if (strIdx + fillLen < fillToIdx) { - System.arraycopy(fill, fillFromIdx, str, strIdx, fillLen); - } else { - str[strIdx++] = fill[fillIdx++]; - } - } - } + if (fromIndex < 0) { + throw new ArrayIndexOutOfBoundsException(fromIndex); } - - /** - * Checks that fromIndex and toIndex are in the range and throws an appropriate exception, if they - * are not - */ - private static void rangeCheck(int length, int fromIndex, int toIndex) { - if (fromIndex > toIndex) { - throw new IllegalArgumentException("fromIndex(" + fromIndex + ") > toIndex(" + toIndex + ")"); - } - if (fromIndex < 0) { - throw new ArrayIndexOutOfBoundsException(fromIndex); - } - if (toIndex > length) { - throw new ArrayIndexOutOfBoundsException(toIndex); - } + if (toIndex > length) { + throw new ArrayIndexOutOfBoundsException(toIndex); } + } - public static String escapeStringConstant(String pattern) { - // commons-lang3 dropped StringEscapeUtils.escapeSql because it was - // extremely naive in its implementation. Copying that implementation - // here as a stop-gap. - // https://stackoverflow.com/questions/32096614/migrating-stringescapeutils-escapesql-from-commons-lang - if (pattern == null) { - return null; - } - return org.apache.commons.lang3.StringUtils.replace(pattern, "'", "''"); - } - - public static String escapeBackslash(String input) { - // see http://stackoverflow.com/questions/4653831/regex-how-to-escape-backslashes-and-special-characters - return input.replaceAll("\\\\","\\\\\\\\"); + public static String escapeStringConstant(String pattern) { + // commons-lang3 dropped StringEscapeUtils.escapeSql because it was + // extremely naive in its implementation. Copying that implementation + // here as a stop-gap. + // https://stackoverflow.com/questions/32096614/migrating-stringescapeutils-escapesql-from-commons-lang + if (pattern == null) { + return null; } + return org.apache.commons.lang3.StringUtils.replace(pattern, "'", "''"); + } + + public static String escapeBackslash(String input) { + // see + // http://stackoverflow.com/questions/4653831/regex-how-to-escape-backslashes-and-special-characters + return input.replaceAll("\\\\", "\\\\\\\\"); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TableViewFinderResult.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TableViewFinderResult.java index 9afc7abbd18..cd356c1b36a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TableViewFinderResult.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TableViewFinderResult.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,33 +19,33 @@ import java.util.List; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.coprocessorclient.TableInfo; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * This class wraps the results of a scanning SYSTEM.CATALOG or SYSTEM.CHILD_LINK - * for child related tables or views. + * This class wraps the results of a scanning SYSTEM.CATALOG or SYSTEM.CHILD_LINK for child related + * tables or views. */ public class TableViewFinderResult { - private List tableViewInfoList = Lists.newArrayList(); + private List tableViewInfoList = Lists.newArrayList(); - public TableViewFinderResult() { - } + public TableViewFinderResult() { + } - public TableViewFinderResult(List results) { - this.tableViewInfoList = results; - } + public TableViewFinderResult(List results) { + this.tableViewInfoList = results; + } - public boolean hasLinks() { - return !tableViewInfoList.isEmpty(); - } + public boolean hasLinks() { + return !tableViewInfoList.isEmpty(); + } - public List getLinks() { - return tableViewInfoList; - } + public List getLinks() { + return tableViewInfoList; + } - void addResult(TableViewFinderResult result) { - this.tableViewInfoList.addAll(result.getLinks()); - } + void addResult(TableViewFinderResult result) { + this.tableViewInfoList.addAll(result.getLinks()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TaskMetaDataServiceCallBack.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TaskMetaDataServiceCallBack.java index b788cbd7b20..52cb14fd5b1 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TaskMetaDataServiceCallBack.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TaskMetaDataServiceCallBack.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; +import java.io.IOException; +import java.util.List; + import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; @@ -25,43 +27,37 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse; import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos; -import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos - .TaskMetaDataService; +import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos.TaskMetaDataService; import org.apache.phoenix.protobuf.ProtobufUtil; -import java.io.IOException; -import java.util.List; - /** - * Callable implementation for coprocessor endpoint associated with - * SYSTEM.TASK + * Callable implementation for coprocessor endpoint associated with SYSTEM.TASK */ public class TaskMetaDataServiceCallBack - implements Batch.Call { + implements Batch.Call { - private final List taskMutations; + private final List taskMutations; - public TaskMetaDataServiceCallBack(List taskMutations) { - this.taskMutations = taskMutations; - } + public TaskMetaDataServiceCallBack(List taskMutations) { + this.taskMutations = taskMutations; + } - @Override - public MetaDataResponse call(TaskMetaDataService instance) - throws IOException { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - TaskMetaDataProtos.TaskMutateRequest.Builder builder = - TaskMetaDataProtos.TaskMutateRequest.newBuilder(); - for (Mutation mutation : taskMutations) { - ClientProtos.MutationProto mp = ProtobufUtil.toProto(mutation); - builder.addTableMetadataMutations(mp.toByteString()); - } - TaskMetaDataProtos.TaskMutateRequest build = builder.build(); - instance.upsertTaskDetails(controller, build, rpcCallback); - if (controller.getFailedOn() != null) { - throw controller.getFailedOn(); - } - return rpcCallback.get(); + @Override + public MetaDataResponse call(TaskMetaDataService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + TaskMetaDataProtos.TaskMutateRequest.Builder builder = + TaskMetaDataProtos.TaskMutateRequest.newBuilder(); + for (Mutation mutation : taskMutations) { + ClientProtos.MutationProto mp = ProtobufUtil.toProto(mutation); + builder.addTableMetadataMutations(mp.toByteString()); + } + TaskMetaDataProtos.TaskMutateRequest build = builder.build(); + instance.upsertTaskDetails(controller, build, rpcCallback); + if (controller.getFailedOn() != null) { + throw controller.getFailedOn(); } + return rpcCallback.get(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TimeKeeper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TimeKeeper.java index c4fe6eaa457..af3584e5529 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TimeKeeper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TimeKeeper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,12 @@ package org.apache.phoenix.util; public interface TimeKeeper { - static final TimeKeeper SYSTEM = new TimeKeeper() { - @Override - public long getCurrentTime() { - return EnvironmentEdgeManager.currentTimeMillis(); - } - }; - - long getCurrentTime(); -} \ No newline at end of file + static final TimeKeeper SYSTEM = new TimeKeeper() { + @Override + public long getCurrentTime() { + return EnvironmentEdgeManager.currentTimeMillis(); + } + }; + + long getCurrentTime(); +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TransactionUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TransactionUtil.java index 2e98ae2165f..fd83809b963 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TransactionUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TransactionUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,9 +23,9 @@ import java.util.Map; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; @@ -37,135 +37,131 @@ import org.apache.phoenix.transaction.TransactionFactory; public class TransactionUtil { - // All transaction providers must use an empty byte array as the family delete marker - // (see TxConstants.FAMILY_DELETE_QUALIFIER) - public static final byte[] FAMILY_DELETE_MARKER = HConstants.EMPTY_BYTE_ARRAY; - // All transaction providers must multiply timestamps by this constant. - // (see TxConstants.MAX_TX_PER_MS) - public static final int MAX_TRANSACTIONS_PER_MILLISECOND = 1000000; - // Constant used to empirically determine if a timestamp is a transactional or - // non transactional timestamp (see TxUtils.MAX_NON_TX_TIMESTAMP) - private static final long MAX_NON_TX_TIMESTAMP = - (long) (EnvironmentEdgeManager.currentTimeMillis() * 1.1); - - private TransactionUtil() { - - } - - public static boolean isTransactionalTimestamp(long ts) { - return ts >= MAX_NON_TX_TIMESTAMP; - } - - public static boolean isDelete(Cell cell) { - return CellUtil.matchingValue(cell, HConstants.EMPTY_BYTE_ARRAY); - } - - public static boolean isDeleteFamily(Cell cell) { - return CellUtil.matchingQualifier(cell, FAMILY_DELETE_MARKER) && CellUtil.matchingValue(cell, HConstants.EMPTY_BYTE_ARRAY); - } - - private static Cell newDeleteFamilyMarker(byte[] row, byte[] family, long timestamp) { - return CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(FAMILY_DELETE_MARKER) - .setTimestamp(timestamp) - .setType(Cell.Type.Put) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build(); - } - - private static Cell newDeleteColumnMarker(byte[] row, byte[] family, byte[] qualifier, long timestamp) { - return CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier) - .setTimestamp(timestamp) - .setType(Cell.Type.Put) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build(); - } + // All transaction providers must use an empty byte array as the family delete marker + // (see TxConstants.FAMILY_DELETE_QUALIFIER) + public static final byte[] FAMILY_DELETE_MARKER = HConstants.EMPTY_BYTE_ARRAY; + // All transaction providers must multiply timestamps by this constant. + // (see TxConstants.MAX_TX_PER_MS) + public static final int MAX_TRANSACTIONS_PER_MILLISECOND = 1000000; + // Constant used to empirically determine if a timestamp is a transactional or + // non transactional timestamp (see TxUtils.MAX_NON_TX_TIMESTAMP) + private static final long MAX_NON_TX_TIMESTAMP = + (long) (EnvironmentEdgeManager.currentTimeMillis() * 1.1); + + private TransactionUtil() { + + } + + public static boolean isTransactionalTimestamp(long ts) { + return ts >= MAX_NON_TX_TIMESTAMP; + } + + public static boolean isDelete(Cell cell) { + return CellUtil.matchingValue(cell, HConstants.EMPTY_BYTE_ARRAY); + } - public static long convertToNanoseconds(long serverTimeStamp) { - return serverTimeStamp * MAX_TRANSACTIONS_PER_MILLISECOND; + public static boolean isDeleteFamily(Cell cell) { + return CellUtil.matchingQualifier(cell, FAMILY_DELETE_MARKER) + && CellUtil.matchingValue(cell, HConstants.EMPTY_BYTE_ARRAY); + } + + private static Cell newDeleteFamilyMarker(byte[] row, byte[] family, long timestamp) { + return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) + .setQualifier(FAMILY_DELETE_MARKER).setTimestamp(timestamp).setType(Cell.Type.Put) + .setValue(HConstants.EMPTY_BYTE_ARRAY).build(); + } + + private static Cell newDeleteColumnMarker(byte[] row, byte[] family, byte[] qualifier, + long timestamp) { + return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier).setTimestamp(timestamp).setType(Cell.Type.Put) + .setValue(HConstants.EMPTY_BYTE_ARRAY).build(); + } + + public static long convertToNanoseconds(long serverTimeStamp) { + return serverTimeStamp * MAX_TRANSACTIONS_PER_MILLISECOND; + } + + public static long convertToMilliseconds(long serverTimeStamp) { + return serverTimeStamp / MAX_TRANSACTIONS_PER_MILLISECOND; + } + + // we resolve transactional tables at the txn read pointer + public static long getResolvedTimestamp(PhoenixConnection connection, boolean isTransactional, + long defaultResolvedTimestamp) { + MutationState mutationState = connection.getMutationState(); + Long scn = connection.getSCN(); + return scn != null ? scn + : (isTransactional && mutationState.isTransactionStarted()) + ? convertToMilliseconds(mutationState.getInitialWritePointer()) + : defaultResolvedTimestamp; + } + + public static long getResolvedTime(PhoenixConnection connection, MetaDataMutationResult result) { + PTable table = result.getTable(); + boolean isTransactional = table != null && table.isTransactional(); + return getResolvedTimestamp(connection, isTransactional, result.getMutationTime()); + } + + public static long getResolvedTimestamp(PhoenixConnection connection, + MetaDataMutationResult result) { + PTable table = result.getTable(); + MutationState mutationState = connection.getMutationState(); + boolean txInProgress = + table != null && table.isTransactional() && mutationState.isTransactionStarted(); + return txInProgress + ? convertToMilliseconds(mutationState.getInitialWritePointer()) + : result.getMutationTime(); + } + + public static Long getTableTimestamp(PhoenixConnection connection, boolean transactional, + TransactionFactory.Provider provider) throws SQLException { + Long timestamp = null; + if (!transactional) { + return timestamp; } - - public static long convertToMilliseconds(long serverTimeStamp) { - return serverTimeStamp / MAX_TRANSACTIONS_PER_MILLISECOND; + MutationState mutationState = connection.getMutationState(); + if (!mutationState.isTransactionStarted()) { + mutationState.startTransaction(provider); } - - // we resolve transactional tables at the txn read pointer - public static long getResolvedTimestamp(PhoenixConnection connection, boolean isTransactional, long defaultResolvedTimestamp) { - MutationState mutationState = connection.getMutationState(); - Long scn = connection.getSCN(); - return scn != null ? scn : (isTransactional && mutationState.isTransactionStarted()) ? convertToMilliseconds(mutationState.getInitialWritePointer()) : defaultResolvedTimestamp; - } - - public static long getResolvedTime(PhoenixConnection connection, MetaDataMutationResult result) { - PTable table = result.getTable(); - boolean isTransactional = table!=null && table.isTransactional(); - return getResolvedTimestamp(connection, isTransactional, result.getMutationTime()); - } - - public static long getResolvedTimestamp(PhoenixConnection connection, MetaDataMutationResult result) { - PTable table = result.getTable(); - MutationState mutationState = connection.getMutationState(); - boolean txInProgress = table != null && table.isTransactional() && mutationState.isTransactionStarted(); - return txInProgress ? convertToMilliseconds(mutationState.getInitialWritePointer()) : result.getMutationTime(); - } - - public static Long getTableTimestamp(PhoenixConnection connection, boolean transactional, TransactionFactory.Provider provider) throws SQLException { - Long timestamp = null; - if (!transactional) { - return timestamp; - } - MutationState mutationState = connection.getMutationState(); - if (!mutationState.isTransactionStarted()) { - mutationState.startTransaction(provider); - } - timestamp = convertToMilliseconds(mutationState.getInitialWritePointer()); - return timestamp; - } - - // Convert HBase Delete into Put so that it can be undone if transaction is rolled back - public static Mutation convertIfDelete(Mutation mutation) throws IOException { - if (mutation instanceof Delete) { - Put deleteMarker = null; - for (Map.Entry> entry : mutation.getFamilyCellMap().entrySet()) { - byte[] family = entry.getKey(); - List familyCells = entry.getValue(); - if (familyCells.size() == 1) { - if (familyCells.get(0).getType() == Cell.Type.DeleteFamily) { - if (deleteMarker == null) { - deleteMarker = new Put(mutation.getRow()); - } - deleteMarker.add(newDeleteFamilyMarker( - deleteMarker.getRow(), - family, - familyCells.get(0).getTimestamp())); - } - } else { - for (Cell cell : familyCells) { - if (cell.getType() == Cell.Type.DeleteColumn) { - if (deleteMarker == null) { - deleteMarker = new Put(mutation.getRow()); - } - deleteMarker.add(newDeleteColumnMarker( - deleteMarker.getRow(), - family, - CellUtil.cloneQualifier(cell), - cell.getTimestamp())); - } - } - } + timestamp = convertToMilliseconds(mutationState.getInitialWritePointer()); + return timestamp; + } + + // Convert HBase Delete into Put so that it can be undone if transaction is rolled back + public static Mutation convertIfDelete(Mutation mutation) throws IOException { + if (mutation instanceof Delete) { + Put deleteMarker = null; + for (Map.Entry> entry : mutation.getFamilyCellMap().entrySet()) { + byte[] family = entry.getKey(); + List familyCells = entry.getValue(); + if (familyCells.size() == 1) { + if (familyCells.get(0).getType() == Cell.Type.DeleteFamily) { + if (deleteMarker == null) { + deleteMarker = new Put(mutation.getRow()); } - if (deleteMarker != null) { - for (Map.Entry entry : mutation.getAttributesMap().entrySet()) { - deleteMarker.setAttribute(entry.getKey(), entry.getValue()); - } - mutation = deleteMarker; + deleteMarker.add(newDeleteFamilyMarker(deleteMarker.getRow(), family, + familyCells.get(0).getTimestamp())); + } + } else { + for (Cell cell : familyCells) { + if (cell.getType() == Cell.Type.DeleteColumn) { + if (deleteMarker == null) { + deleteMarker = new Put(mutation.getRow()); + } + deleteMarker.add(newDeleteColumnMarker(deleteMarker.getRow(), family, + CellUtil.cloneQualifier(cell), cell.getTimestamp())); } + } } - return mutation; - } + } + if (deleteMarker != null) { + for (Map.Entry entry : mutation.getAttributesMap().entrySet()) { + deleteMarker.setAttribute(entry.getKey(), entry.getValue()); + } + mutation = deleteMarker; + } + } + return mutation; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TrustedByteArrayOutputStream.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TrustedByteArrayOutputStream.java index ee923a0dffd..59dfa46ec94 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TrustedByteArrayOutputStream.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TrustedByteArrayOutputStream.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,34 +21,33 @@ import java.io.IOException; /** - * - * Derived version of {@link java.io.ByteArrayOutputStream} that provides access - * to underlying byte array buffer so that it doesn't have to be copied - * - * + * Derived version of {@link java.io.ByteArrayOutputStream} that provides access to underlying byte + * array buffer so that it doesn't have to be copied * @since 0.1 */ public class TrustedByteArrayOutputStream extends ByteArrayOutputStream { - public TrustedByteArrayOutputStream(int initialSize) { - super(initialSize); - } - public byte[] getBuffer() { - return buf; - } - @Override - public byte[] toByteArray() { - if (buf.length == size()) { - return buf; - } - return super.toByteArray(); + public TrustedByteArrayOutputStream(int initialSize) { + super(initialSize); + } + + public byte[] getBuffer() { + return buf; + } + + @Override + public byte[] toByteArray() { + if (buf.length == size()) { + return buf; } - @Override - public void write(byte[] b) { - try { - super.write(b); - } catch (IOException e) { - throw new RuntimeException(e); // Impossible - } + return super.toByteArray(); + } + + @Override + public void write(byte[] b) { + try { + super.write(b); + } catch (IOException e) { + throw new RuntimeException(e); // Impossible } + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TupleUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TupleUtil.java index 048f1095aee..45f29d8eb78 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/TupleUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/TupleUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,170 +44,171 @@ import org.apache.phoenix.schema.types.PVarbinaryEncoded; /** - * * Utilities for Tuple - * - * * @since 0.1 */ public class TupleUtil { - private TupleUtil() { - } - - public static boolean equals(Tuple t1, Tuple t2, ImmutableBytesWritable ptr) { - t1.getKey(ptr); - byte[] buf = ptr.get(); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - t2.getKey(ptr); - return Bytes.compareTo(buf, offset, length, ptr.get(), ptr.getOffset(), ptr.getLength()) == 0; - } - - public static int compare(Tuple t1, Tuple t2, ImmutableBytesWritable ptr) { - return compare(t1, t2, ptr, 0); + private TupleUtil() { + } + + public static boolean equals(Tuple t1, Tuple t2, ImmutableBytesWritable ptr) { + t1.getKey(ptr); + byte[] buf = ptr.get(); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + t2.getKey(ptr); + return Bytes.compareTo(buf, offset, length, ptr.get(), ptr.getOffset(), ptr.getLength()) == 0; + } + + public static int compare(Tuple t1, Tuple t2, ImmutableBytesWritable ptr) { + return compare(t1, t2, ptr, 0); + } + + public static int compare(Tuple t1, Tuple t2, ImmutableBytesWritable ptr, int keyOffset) { + t1.getKey(ptr); + byte[] buf = ptr.get(); + int offset = ptr.getOffset() + keyOffset; + int length = ptr.getLength() - keyOffset; + t2.getKey(ptr); + return Bytes.compareTo(buf, offset, length, ptr.get(), ptr.getOffset() + keyOffset, + ptr.getLength() - keyOffset); + } + + /** + * Set ptr to point to the value contained in the first KeyValue without exploding Result into + * KeyValue array. + */ + public static void getAggregateValue(Tuple r, ImmutableBytesWritable ptr) { + if (r.size() == 1) { + Cell kv = r.getValue(0); // Just one KV for aggregation + if ( + Bytes.compareTo(SINGLE_COLUMN_FAMILY, 0, SINGLE_COLUMN_FAMILY.length, kv.getFamilyArray(), + kv.getFamilyOffset(), kv.getFamilyLength()) == 0 + ) { + if ( + Bytes.compareTo(SINGLE_COLUMN, 0, SINGLE_COLUMN.length, kv.getQualifierArray(), + kv.getQualifierOffset(), kv.getQualifierLength()) == 0 + ) { + ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return; + } + } } - - public static int compare(Tuple t1, Tuple t2, ImmutableBytesWritable ptr, int keyOffset) { - t1.getKey(ptr); - byte[] buf = ptr.get(); - int offset = ptr.getOffset() + keyOffset; - int length = ptr.getLength() - keyOffset; - t2.getKey(ptr); - return Bytes.compareTo(buf, offset, length, ptr.get(), ptr.getOffset() + keyOffset, ptr.getLength() - keyOffset); + throw new IllegalStateException( + "Expected single, aggregated KeyValue from coprocessor, but instead received " + r + + ". Ensure aggregating coprocessors are loaded correctly on server"); + } + + public static Tuple getAggregateGroupTuple(Tuple tuple) { + if (tuple == null) { + return null; } - - /** - * Set ptr to point to the value contained in the first KeyValue without - * exploding Result into KeyValue array. - * @param r - * @param ptr - */ - public static void getAggregateValue(Tuple r, ImmutableBytesWritable ptr) { - if (r.size() == 1) { - Cell kv = r.getValue(0); // Just one KV for aggregation - if (Bytes.compareTo(SINGLE_COLUMN_FAMILY, 0, SINGLE_COLUMN_FAMILY.length, kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()) == 0) { - if (Bytes.compareTo(SINGLE_COLUMN, 0, SINGLE_COLUMN.length, kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()) == 0) { - ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - return; - } - } + if (tuple.size() == 1) { + Cell kv = tuple.getValue(0); + if ( + Bytes.compareTo(GROUPED_AGGREGATOR_VALUE_BYTES, 0, GROUPED_AGGREGATOR_VALUE_BYTES.length, + kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()) == 0 + ) { + if ( + Bytes.compareTo(GROUPED_AGGREGATOR_VALUE_BYTES, 0, GROUPED_AGGREGATOR_VALUE_BYTES.length, + kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()) == 0 + ) { + byte[] kvValue = new byte[kv.getValueLength()]; + System.arraycopy(kv.getValueArray(), kv.getValueOffset(), kvValue, 0, kvValue.length); + int sizeOfAggregateGroupValue = + PInteger.INSTANCE.getCodec().decodeInt(kvValue, 0, SortOrder.ASC); + Cell result = PhoenixKeyValueUtil.newKeyValue(kvValue, Bytes.SIZEOF_INT, + sizeOfAggregateGroupValue, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, kvValue, + Bytes.SIZEOF_INT + sizeOfAggregateGroupValue, + kvValue.length - Bytes.SIZEOF_INT - sizeOfAggregateGroupValue); + return new ResultTuple(Result.create(Collections.singletonList(result))); } - throw new IllegalStateException("Expected single, aggregated KeyValue from coprocessor, but instead received " + r + ". Ensure aggregating coprocessors are loaded correctly on server"); + } } + return tuple; + } + + /** + * Concatenate results evaluated against a list of expressions + * @param result the tuple for expression evaluation + * @return the concatenated byte array as ImmutableBytesWritable + */ + public static ImmutableBytesPtr getConcatenatedValue(Tuple result, List expressions) + throws IOException { + ImmutableBytesPtr value = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY); + Expression expression = expressions.get(0); + boolean evaluated = expression.evaluate(result, value); - public static Tuple getAggregateGroupTuple(Tuple tuple) { - if (tuple == null) { - return null; + if (expressions.size() == 1) { + if (!evaluated) { + value.set(ByteUtil.EMPTY_BYTE_ARRAY); + } + return value; + } else { + TrustedByteArrayOutputStream output = + new TrustedByteArrayOutputStream(value.getLength() * expressions.size()); + try { + if (evaluated) { + output.write(value.get(), value.getOffset(), value.getLength()); } - if (tuple.size() == 1) { - Cell kv = tuple.getValue(0); - if (Bytes.compareTo(GROUPED_AGGREGATOR_VALUE_BYTES, 0, - GROUPED_AGGREGATOR_VALUE_BYTES.length, - kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()) == 0) { - if (Bytes.compareTo(GROUPED_AGGREGATOR_VALUE_BYTES, 0, - GROUPED_AGGREGATOR_VALUE_BYTES.length, kv.getQualifierArray(), - kv.getQualifierOffset(), kv.getQualifierLength()) == 0) { - byte[] kvValue = new byte[kv.getValueLength()]; - System.arraycopy(kv.getValueArray(), kv.getValueOffset(), kvValue, 0, - kvValue.length); - int sizeOfAggregateGroupValue = - PInteger.INSTANCE.getCodec().decodeInt(kvValue, 0, - SortOrder.ASC); - Cell result = PhoenixKeyValueUtil.newKeyValue( - kvValue, - Bytes.SIZEOF_INT, - sizeOfAggregateGroupValue, - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, - AGG_TIMESTAMP, - kvValue, - Bytes.SIZEOF_INT + sizeOfAggregateGroupValue, - kvValue.length - Bytes.SIZEOF_INT - sizeOfAggregateGroupValue); - return new ResultTuple(Result.create(Collections.singletonList(result))); - } - } + for (int i = 1; i < expressions.size(); i++) { + if (!expression.getDataType().isFixedWidth()) { + output.write(SchemaUtil.getSeparatorBytes(expression.getDataType(), true, + value.getLength() == 0, expression.getSortOrder())); + } + expression = expressions.get(i); + if (expression.evaluate(result, value)) { + output.write(value.get(), value.getOffset(), value.getLength()); + } else if (i < expressions.size() - 1 && expression.getDataType().isFixedWidth()) { + // This should never happen, because any non terminating nullable fixed width type (i.e. + // INT or LONG) is + // converted to a variable length type (i.e. DECIMAL) to allow an empty byte array to + // represent null. + throw new DoNotRetryIOException( + "Non terminating null value found for fixed width expression (" + expression + + ") in row: " + result); + } } - return tuple; - } - - /** Concatenate results evaluated against a list of expressions - * - * @param result the tuple for expression evaluation - * @param expressions - * @return the concatenated byte array as ImmutableBytesWritable - * @throws IOException - */ - public static ImmutableBytesPtr getConcatenatedValue(Tuple result, List expressions) throws IOException { - ImmutableBytesPtr value = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY); - Expression expression = expressions.get(0); - boolean evaluated = expression.evaluate(result, value); - - if (expressions.size() == 1) { - if (!evaluated) { - value.set(ByteUtil.EMPTY_BYTE_ARRAY); + // Write trailing separator if last expression was variable length and descending + if (!expression.getDataType().isFixedWidth()) { + if (expression.getDataType() != PVarbinaryEncoded.INSTANCE) { + if ( + SchemaUtil.getSeparatorByte(true, value.getLength() == 0, expression) + == QueryConstants.DESC_SEPARATOR_BYTE + ) { + output.write(QueryConstants.DESC_SEPARATOR_BYTE); } - return value; - } else { - TrustedByteArrayOutputStream output = new TrustedByteArrayOutputStream(value.getLength() * expressions.size()); - try { - if (evaluated) { - output.write(value.get(), value.getOffset(), value.getLength()); - } - for (int i = 1; i < expressions.size(); i++) { - if (!expression.getDataType().isFixedWidth()) { - output.write(SchemaUtil.getSeparatorBytes( - expression.getDataType(), - true, - value.getLength() == 0, - expression.getSortOrder())); - } - expression = expressions.get(i); - if (expression.evaluate(result, value)) { - output.write(value.get(), value.getOffset(), value.getLength()); - } else if (i < expressions.size()-1 && expression.getDataType().isFixedWidth()) { - // This should never happen, because any non terminating nullable fixed width type (i.e. INT or LONG) is - // converted to a variable length type (i.e. DECIMAL) to allow an empty byte array to represent null. - throw new DoNotRetryIOException("Non terminating null value found for fixed width expression (" + expression + ") in row: " + result); - } - } - // Write trailing separator if last expression was variable length and descending - if (!expression.getDataType().isFixedWidth()) { - if (expression.getDataType() != PVarbinaryEncoded.INSTANCE) { - if (SchemaUtil.getSeparatorByte(true, value.getLength() == 0, expression) - == QueryConstants.DESC_SEPARATOR_BYTE) { - output.write(QueryConstants.DESC_SEPARATOR_BYTE); - } - } else { - byte[] sepBytes = SchemaUtil.getSeparatorBytesForVarBinaryEncoded(true, - value.getLength() == 0, expression.getSortOrder()); - if (sepBytes == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES) { - output.write(QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES); - } - } - } - byte[] outputBytes = output.getBuffer(); - value.set(outputBytes, 0, output.size()); - return value; - } finally { - output.close(); + } else { + byte[] sepBytes = SchemaUtil.getSeparatorBytesForVarBinaryEncoded(true, + value.getLength() == 0, expression.getSortOrder()); + if (sepBytes == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES) { + output.write(QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES); } + } } + byte[] outputBytes = output.getBuffer(); + value.set(outputBytes, 0, output.size()); + return value; + } finally { + output.close(); + } } - - public static int write(Tuple result, DataOutput out) throws IOException { - int size = 0; - for(int i = 0; i < result.size(); i++) { - KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i)); - size += kv.getLength(); - size += Bytes.SIZEOF_INT; // kv.getLength - } + } - WritableUtils.writeVInt(out, size); - for(int i = 0; i < result.size(); i++) { - KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i)); - out.writeInt(kv.getLength()); - out.write(kv.getBuffer(), kv.getOffset(), kv.getLength()); - } - return size; + public static int write(Tuple result, DataOutput out) throws IOException { + int size = 0; + for (int i = 0; i < result.size(); i++) { + KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i)); + size += kv.getLength(); + size += Bytes.SIZEOF_INT; // kv.getLength + } + + WritableUtils.writeVInt(out, size); + for (int i = 0; i < result.size(); i++) { + KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(result.getValue(i)); + out.writeInt(kv.getLength()); + out.write(kv.getBuffer(), kv.getOffset(), kv.getLength()); } + return size; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpgradeUtil.java index 899f40da168..684cd74b540 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpgradeUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpgradeUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,15 +17,6 @@ */ package org.apache.phoenix.util; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHOENIX_TTL_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_BYTES; -import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME_BYTES; -import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED; -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.phoenix.coprocessorclient.MetaDataProtocol.CURRENT_CLIENT_VERSION; import static org.apache.phoenix.coprocessorclient.MetaDataProtocol.getVersion; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE; @@ -42,9 +33,11 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LAST_DDL_TIMESTAMP; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHOENIX_TTL_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.START_WITH; @@ -57,20 +50,26 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_CAT; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES; import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT; import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT; import static org.apache.phoenix.query.QueryConstants.EMPTY_COLUMN_BYTES; import static org.apache.phoenix.query.QueryConstants.EMPTY_COLUMN_VALUE_BYTES; -import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_TIMEOUT_DURING_UPGRADE_MS; +import static org.apache.phoenix.query.QueryConstants.SYSTEM_SCHEMA_NAME_BYTES; +import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED; import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_SCAN_PAGE_SIZE; +import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_TIMEOUT_DURING_UPGRADE_MS; +import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull; import java.io.IOException; import java.math.BigInteger; @@ -106,12 +105,10 @@ import javax.annotation.Nullable; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.phoenix.coprocessorclient.MetaDataEndpointImplConstants; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; @@ -120,10 +117,10 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.CheckAndMutate; -import org.apache.hadoop.hbase.client.CoprocessorDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.CoprocessorDescriptor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; @@ -139,6 +136,7 @@ import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.coprocessorclient.MetaDataEndpointImplConstants; import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MetaDataMutationResult; import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MutationCode; @@ -173,2978 +171,2951 @@ import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.base.Objects; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import edu.umd.cs.findbugs.annotations.SuppressWarnings; @SuppressWarnings(value = "SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING", - justification="Not possible to avoid") + justification = "Not possible to avoid") public class UpgradeUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(UpgradeUtil.class); - private static final byte[] SEQ_PREFIX_BYTES = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("_SEQ_")); - public static final byte[] UPGRADE_TO_4_7_COLUMN_NAME = Bytes.toBytes("UPGRADE_TO_4_7"); - private static final byte[] LINK_ROW = new byte[]{PTable.LinkType.CHILD_TABLE.getSerializedValue()}; - - /** - * Attribute for Phoenix's internal purposes only. When this attribute is set on a phoenix connection, then - * the upgrade code for upgrading the cluster to the new minor release is not triggered. Note that presence - * of this attribute overrides a true value for {@value QueryServices#AUTO_UPGRADE_ENABLED}. - */ - private static final String DO_NOT_UPGRADE = "DoNotUpgrade"; - public static final String UPSERT_BASE_COLUMN_COUNT_IN_HEADER_ROW = "UPSERT " - + "INTO SYSTEM.CATALOG " - + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, BASE_COLUMN_COUNT) " - + "VALUES (?, ?, ?, ?, ?, ?) "; - - public static final String UPSERT_UPDATE_CACHE_FREQUENCY = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - UPDATE_CACHE_FREQUENCY + - ") VALUES (?, ?, ?, ?)"; - - public static final String SELECT_BASE_COLUMN_COUNT_FROM_HEADER_ROW = "SELECT " - + "BASE_COLUMN_COUNT " - + "FROM \"SYSTEM\".CATALOG " - + "WHERE " - + "COLUMN_NAME IS NULL " - + "AND " - + "COLUMN_FAMILY IS NULL " - + "AND " - + "TENANT_ID %s " - + "AND " - + "TABLE_SCHEM %s " - + "AND " - + "TABLE_NAME = ? " - ; - - private static final String UPDATE_LINK = - "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_FAMILY + "," + - LINK_TYPE + "," + - TABLE_SEQ_NUM +"," + - TABLE_TYPE + - ") SELECT " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + ",'%s' AS " - + COLUMN_FAMILY + " ," + LINK_TYPE + "," + TABLE_SEQ_NUM + "," + TABLE_TYPE +" FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" - + SYSTEM_CATALOG_TABLE + "\" WHERE (" + TABLE_SCHEM + "=? OR (" + TABLE_SCHEM + " IS NULL AND ? IS NULL)) AND " + TABLE_NAME + "=? AND " + COLUMN_FAMILY + "=? AND " + LINK_TYPE + " = " - + LinkType.PHYSICAL_TABLE.getSerializedValue(); - - private static final String DELETE_LINK = "DELETE FROM " + SYSTEM_CATALOG_SCHEMA + "." + SYSTEM_CATALOG_TABLE - + " WHERE (" + TABLE_SCHEM + "=? OR (" + TABLE_SCHEM + " IS NULL AND ? IS NULL)) AND " + TABLE_NAME + "=? AND " + COLUMN_FAMILY + "=? AND " + LINK_TYPE + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue(); - - private UpgradeUtil() { - } - - private static byte[] getSequenceSnapshotName() { - return Bytes.toBytes("_BAK_" + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME); - } - - private static void createSequenceSnapshot(Admin admin, PhoenixConnection conn) throws SQLException { - byte[] tableName = getSequenceSnapshotName(); - TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES)) - .build(); + private static final Logger LOGGER = LoggerFactory.getLogger(UpgradeUtil.class); + private static final byte[] SEQ_PREFIX_BYTES = + ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("_SEQ_")); + public static final byte[] UPGRADE_TO_4_7_COLUMN_NAME = Bytes.toBytes("UPGRADE_TO_4_7"); + private static final byte[] LINK_ROW = + new byte[] { PTable.LinkType.CHILD_TABLE.getSerializedValue() }; + + /** + * Attribute for Phoenix's internal purposes only. When this attribute is set on a phoenix + * connection, then the upgrade code for upgrading the cluster to the new minor release is not + * triggered. Note that presence of this attribute overrides a true value for + * {@value QueryServices#AUTO_UPGRADE_ENABLED}. + */ + private static final String DO_NOT_UPGRADE = "DoNotUpgrade"; + public static final String UPSERT_BASE_COLUMN_COUNT_IN_HEADER_ROW = + "UPSERT " + "INTO SYSTEM.CATALOG " + + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, BASE_COLUMN_COUNT) " + + "VALUES (?, ?, ?, ?, ?, ?) "; + + public static final String UPSERT_UPDATE_CACHE_FREQUENCY = + "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + + TABLE_SCHEM + "," + TABLE_NAME + "," + UPDATE_CACHE_FREQUENCY + ") VALUES (?, ?, ?, ?)"; + + public static final String SELECT_BASE_COLUMN_COUNT_FROM_HEADER_ROW = + "SELECT " + "BASE_COLUMN_COUNT " + "FROM \"SYSTEM\".CATALOG " + "WHERE " + + "COLUMN_NAME IS NULL " + "AND " + "COLUMN_FAMILY IS NULL " + "AND " + "TENANT_ID %s " + + "AND " + "TABLE_SCHEM %s " + "AND " + "TABLE_NAME = ? "; + + private static final String UPDATE_LINK = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + COLUMN_FAMILY + "," + LINK_TYPE + "," + TABLE_SEQ_NUM + "," + TABLE_TYPE + ") SELECT " + + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + ",'%s' AS " + COLUMN_FAMILY + " ," + + LINK_TYPE + "," + TABLE_SEQ_NUM + "," + TABLE_TYPE + " FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + + SYSTEM_CATALOG_TABLE + "\" WHERE (" + TABLE_SCHEM + "=? OR (" + TABLE_SCHEM + + " IS NULL AND ? IS NULL)) AND " + TABLE_NAME + "=? AND " + COLUMN_FAMILY + "=? AND " + + LINK_TYPE + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue(); + + private static final String DELETE_LINK = "DELETE FROM " + SYSTEM_CATALOG_SCHEMA + "." + + SYSTEM_CATALOG_TABLE + " WHERE (" + TABLE_SCHEM + "=? OR (" + TABLE_SCHEM + + " IS NULL AND ? IS NULL)) AND " + TABLE_NAME + "=? AND " + COLUMN_FAMILY + "=? AND " + + LINK_TYPE + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue(); + + private UpgradeUtil() { + } + + private static byte[] getSequenceSnapshotName() { + return Bytes.toBytes("_BAK_" + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME); + } + + private static void createSequenceSnapshot(Admin admin, PhoenixConnection conn) + throws SQLException { + byte[] tableName = getSequenceSnapshotName(); + TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.of(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES)) + .build(); + try { + admin.createTable(desc); + copyTable(conn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, tableName); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + } + + private static void restoreSequenceSnapshot(Admin admin, PhoenixConnection conn) + throws SQLException { + byte[] tableName = getSequenceSnapshotName(); + copyTable(conn, tableName, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES); + } + + private static void deleteSequenceSnapshot(Admin admin) throws SQLException { + TableName tableName = TableName.valueOf(getSequenceSnapshotName()); + try { + admin.disableTable(tableName); + admin.deleteTable(tableName); + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } + } + + @SuppressWarnings("deprecation") + private static void copyTable(PhoenixConnection conn, byte[] sourceName, byte[] targetName) + throws SQLException { + int batchSizeBytes = 100 * 1024; // 100K chunks + int sizeBytes = 0; + List mutations = Lists.newArrayListWithExpectedSize(10000); + + Scan scan = new Scan(); + scan.setRaw(true); + scan.readAllVersions(); + ResultScanner scanner = null; + Table source = null; + Table target = null; + try { + source = conn.getQueryServices().getTable(sourceName); + target = conn.getQueryServices().getTable(targetName); + scanner = source.getScanner(scan); + Result result; + while ((result = scanner.next()) != null) { + for (Cell keyValue : result.rawCells()) { + sizeBytes += PrivateCellUtil.estimatedSerializedSizeOf(keyValue); + if (keyValue.getType() == Cell.Type.Put) { + // Put new value + Put put = + new Put(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength()); + put.add(keyValue); + mutations.add(put); + } else if (keyValue.getType() == Cell.Type.Delete) { + // Copy delete marker using new key so that it continues + // to delete the key value preceding it that will be updated + // as well. + Delete delete = + new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength()); + delete.add(keyValue); + mutations.add(delete); + } + } + if (sizeBytes >= batchSizeBytes) { + LOGGER.info("Committing bactch of temp rows"); + target.batch(mutations, null); + mutations.clear(); + sizeBytes = 0; + } + } + if (!mutations.isEmpty()) { + LOGGER.info("Committing last bactch of temp rows"); + target.batch(mutations, null); + } + LOGGER.info("Successfully completed copy"); + } catch (SQLException e) { + throw e; + } catch (Exception e) { + throw ClientUtil.parseServerException(e); + } finally { + try { + if (scanner != null) scanner.close(); + } finally { try { - admin.createTable(desc); - copyTable(conn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, tableName); + if (source != null) source.close(); } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } - } - - private static void restoreSequenceSnapshot(Admin admin, PhoenixConnection conn) throws SQLException { - byte[] tableName = getSequenceSnapshotName(); - copyTable(conn, tableName, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES); - } - - private static void deleteSequenceSnapshot(Admin admin) throws SQLException { - TableName tableName = TableName.valueOf(getSequenceSnapshotName()); + LOGGER.warn("Exception during close of source table", e); + } finally { + try { + if (target != null) target.close(); + } catch (IOException e) { + LOGGER.warn("Exception during close of target table", e); + } + } + } + } + } + + private static void preSplitSequenceTable(PhoenixConnection conn, int nSaltBuckets) + throws SQLException { + Admin admin = conn.getQueryServices().getAdmin(); + boolean snapshotCreated = false; + boolean success = false; + try { + if (nSaltBuckets <= 0) { + return; + } + LOGGER.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + + "-ways. This may take some time - please do not close window."); + TableDescriptor desc = + admin.getDescriptor(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES)); + createSequenceSnapshot(admin, conn); + snapshotCreated = true; + admin.disableTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME)); + admin.deleteTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME)); + byte[][] splitPoints = SaltingUtil.getSalteByteSplitPoints(nSaltBuckets); + admin.createTable(desc, splitPoints); + restoreSequenceSnapshot(admin, conn); + success = true; + LOGGER.warn("Completed pre-splitting SYSTEM.SEQUENCE table"); + } catch (IOException e) { + throw new SQLException("Unable to pre-split SYSTEM.SEQUENCE table", e); + } finally { + try { + if (snapshotCreated && success) { + try { + deleteSequenceSnapshot(admin); + } catch (SQLException e) { + LOGGER.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e); + } + } + } finally { try { - admin.disableTable(tableName); - admin.deleteTable(tableName); + admin.close(); } catch (IOException e) { - throw ClientUtil.parseServerException(e); + LOGGER.warn("Exception while closing admin during pre-split", e); + } + } + } + } + + /** + * Utility method to get a HBaseConnection with overridden properties + */ + private static org.apache.hadoop.hbase.client.Connection getHBaseConnection(Configuration config, + Map options) throws IOException { + Configuration conf = HBaseConfiguration.create(config); + + conf.set(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, + Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); + conf.set(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, + Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); + conf.set(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); + if (options != null) { + for (Map.Entry entry : options.entrySet()) { + String k = entry.getKey(); + String v = entry.getValue(); + switch (k) { + case HConstants.HBASE_RPC_TIMEOUT_KEY: + conf.set(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, v); + conf.set(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, v); + break; + default: + conf.set(k, v); + break; + } + ; + } + ; + } + LOGGER.info(String.format("Creating HBase cluster connection to ==> %s", + conf.get(HConstants.ZOOKEEPER_QUORUM))); + return ConnectionFactory.createConnection(conf); + } + + public static PhoenixConnection upgradeLocalIndexes(PhoenixConnection metaConnection) + throws SQLException, IOException, org.apache.hadoop.hbase.TableNotFoundException { + Properties props = PropertiesUtil.deepCopy(metaConnection.getClientInfo()); + Long originalScn = null; + String str = props.getProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB); + if (str != null) { + originalScn = Long.valueOf(str); + } + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, + Long.toString(HConstants.LATEST_TIMESTAMP)); + PhoenixConnection globalConnection = null; + PhoenixConnection toReturn = null; + globalConnection = + new PhoenixConnection(metaConnection, metaConnection.getQueryServices(), props); + SQLException sqlEx = null; + try (Admin admin = globalConnection.getQueryServices().getAdmin()) { + ResultSet rs = globalConnection.createStatement().executeQuery( + "SELECT TABLE_SCHEM, TABLE_NAME, DATA_TABLE_NAME, TENANT_ID, MULTI_TENANT, SALT_BUCKETS FROM SYSTEM.CATALOG " + + " WHERE COLUMN_NAME IS NULL" + " AND COLUMN_FAMILY IS NULL" + + " AND INDEX_TYPE=" + IndexType.LOCAL.getSerializedValue()); + boolean droppedLocalIndexes = false; + while (rs.next()) { + if (!droppedLocalIndexes) { + List localIndexTables = admin + .listTableDescriptors(Pattern.compile(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + ".*")); + String localIndexSplitter = QueryConstants.LOCAL_INDEX_SPLITTER_CLASSNAME; + for (TableDescriptor table : localIndexTables) { + TableDescriptor dataTableDesc = admin.getDescriptor(TableName.valueOf( + MetaDataUtil.getLocalIndexUserTableName(table.getTableName().getNameAsString()))); + TableDescriptorBuilder dataTableDescBuilder = + TableDescriptorBuilder.newBuilder(dataTableDesc); + ColumnFamilyDescriptor[] columnFamilies = dataTableDesc.getColumnFamilies(); + boolean modifyTable = false; + for (ColumnFamilyDescriptor cf : columnFamilies) { + String localIndexCf = + QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX + cf.getNameAsString(); + if (dataTableDesc.getColumnFamily(Bytes.toBytes(localIndexCf)) == null) { + ColumnFamilyDescriptorBuilder colDefBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(localIndexCf)); + for (Entry keyValue : cf.getValues().entrySet()) { + colDefBuilder.setValue(keyValue.getKey().copyBytes(), + keyValue.getValue().copyBytes()); + } + dataTableDescBuilder.setColumnFamily(colDefBuilder.build()); + modifyTable = true; + } + } + Collection coprocessors = + dataTableDesc.getCoprocessorDescriptors(); + for (CoprocessorDescriptor coprocessor : coprocessors) { + if (coprocessor.getClassName().equals(localIndexSplitter)) { + dataTableDescBuilder.removeCoprocessor(localIndexSplitter); + modifyTable = true; + } + } + if (modifyTable) { + admin.modifyTable(dataTableDescBuilder.build()); + } + } + Pattern pattern = Pattern.compile(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + ".*"); + for (TableDescriptor tableDescriptor : admin.listTableDescriptors(pattern)) { + admin.disableTable(tableDescriptor.getTableName()); + admin.deleteTable(tableDescriptor.getTableName()); + } + droppedLocalIndexes = true; + } + String schemaName = rs.getString(1); + String indexTableName = rs.getString(2); + String dataTableName = rs.getString(3); + String tenantId = rs.getString(4); + boolean multiTenantTable = rs.getBoolean(5); + int numColumnsToSkip = 1 + (multiTenantTable ? 1 : 0); + String getColumns = + "SELECT COLUMN_NAME, COLUMN_FAMILY FROM SYSTEM.CATALOG WHERE TABLE_SCHEM " + + (schemaName == null ? "IS NULL " : "='" + schemaName + "'") + " AND TENANT_ID " + + (tenantId == null ? "IS NULL " : "='" + tenantId + "'") + " and TABLE_NAME='" + + indexTableName + "' AND COLUMN_NAME IS NOT NULL AND KEY_SEQ > " + numColumnsToSkip + + " ORDER BY KEY_SEQ"; + ResultSet getColumnsRs = globalConnection.createStatement().executeQuery(getColumns); + List indexedColumns = new ArrayList(1); + List coveredColumns = new ArrayList(1); + + while (getColumnsRs.next()) { + String column = getColumnsRs.getString(1); + String columnName = IndexUtil.getDataColumnName(column); + String columnFamily = IndexUtil.getDataColumnFamilyName(column); + if (getColumnsRs.getString(2) == null) { + if (columnFamily != null && !columnFamily.isEmpty()) { + if ( + SchemaUtil.normalizeIdentifier(columnFamily) + .equals(QueryConstants.DEFAULT_COLUMN_FAMILY) + ) { + indexedColumns.add(columnName); + } else { + indexedColumns + .add(SchemaUtil.getCaseSensitiveColumnDisplayName(columnFamily, columnName)); + } + } else { + indexedColumns.add(columnName); + } + } else { + coveredColumns.add(SchemaUtil.normalizeIdentifier(columnFamily) + .equals(QueryConstants.DEFAULT_COLUMN_FAMILY) + ? columnName + : SchemaUtil.getCaseSensitiveColumnDisplayName(columnFamily, columnName)); + } + } + StringBuilder createIndex = new StringBuilder("CREATE LOCAL INDEX "); + createIndex.append(indexTableName); + createIndex.append(" ON "); + createIndex.append(SchemaUtil.getTableName(schemaName, dataTableName)); + createIndex.append("("); + for (int i = 0; i < indexedColumns.size(); i++) { + createIndex.append(indexedColumns.get(i)); + if (i < indexedColumns.size() - 1) { + createIndex.append(","); + } + } + createIndex.append(")"); + + if (!coveredColumns.isEmpty()) { + createIndex.append(" INCLUDE("); + for (int i = 0; i < coveredColumns.size(); i++) { + createIndex.append(coveredColumns.get(i)); + if (i < coveredColumns.size() - 1) { + createIndex.append(","); + } + } + createIndex.append(")"); + } + createIndex.append(" ASYNC"); + LOGGER.info("Index creation query is : " + createIndex.toString()); + LOGGER.info("Dropping the index " + indexTableName + + " to clean up the index details from SYSTEM.CATALOG."); + PhoenixConnection localConnection = null; + if (tenantId != null) { + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + localConnection = + new PhoenixConnection(globalConnection, globalConnection.getQueryServices(), props); } - } + try { + (localConnection == null ? globalConnection : localConnection).createStatement() + .execute("DROP INDEX IF EXISTS " + indexTableName + " ON " + + SchemaUtil.getTableName(schemaName, dataTableName)); + LOGGER.info("Recreating the index " + indexTableName); + (localConnection == null ? globalConnection : localConnection).createStatement() + .execute(createIndex.toString()); + LOGGER.info("Created the index " + indexTableName); + } finally { + props.remove(PhoenixRuntime.TENANT_ID_ATTRIB); + if (localConnection != null) { + sqlEx = closeConnection(localConnection, sqlEx); + if (sqlEx != null) { + throw sqlEx; + } + } + } + } + globalConnection.createStatement() + .execute("DELETE FROM SYSTEM.CATALOG WHERE SUBSTR(TABLE_NAME,0,11)='" + + MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + "'"); + if (originalScn != null) { + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(originalScn)); + } + toReturn = + new PhoenixConnection(globalConnection, globalConnection.getQueryServices(), props); + } catch (SQLException e) { + sqlEx = e; + } finally { + sqlEx = closeConnection(metaConnection, sqlEx); + sqlEx = closeConnection(globalConnection, sqlEx); + if (sqlEx != null) { + throw sqlEx; + } + } + return toReturn; + } + + public static PhoenixConnection disableViewIndexes(PhoenixConnection connParam) + throws SQLException, IOException, InterruptedException, TimeoutException { + Properties props = PropertiesUtil.deepCopy(connParam.getClientInfo()); + Long originalScn = null; + String str = props.getProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB); + if (str != null) { + originalScn = Long.valueOf(str); + } + // don't use the passed timestamp as scn because we want to query all view indexes up to now. + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, + Long.toString(HConstants.LATEST_TIMESTAMP)); + Set physicalTables = new HashSet<>(); + SQLException sqlEx = null; + PhoenixConnection globalConnection = null; + PhoenixConnection toReturn = null; + try { + globalConnection = new PhoenixConnection(connParam, connParam.getQueryServices(), props); + String tenantId = null; + try (Admin admin = globalConnection.getQueryServices().getAdmin()) { + String fetchViewIndexes = + "SELECT " + TENANT_ID + ", " + TABLE_SCHEM + ", " + TABLE_NAME + ", " + DATA_TABLE_NAME + + " FROM " + SYSTEM_CATALOG_NAME + " WHERE " + VIEW_INDEX_ID + " IS NOT NULL"; + String disableIndexDDL = "ALTER INDEX %s ON %s DISABLE"; + try (ResultSet rs = globalConnection.createStatement().executeQuery(fetchViewIndexes)) { + while (rs.next()) { + tenantId = rs.getString(1); + String indexSchema = rs.getString(2); + String indexName = rs.getString(3); + String viewName = rs.getString(4); + String fullIndexName = SchemaUtil.getTableName(indexSchema, indexName); + String fullViewName = SchemaUtil.getTableName(indexSchema, viewName); + PTable viewPTable = null; + // Disable the view index and truncate the underlying hbase table. + // Users would need to rebuild the view indexes. + if (tenantId != null && !tenantId.isEmpty()) { + Properties newProps = PropertiesUtil.deepCopy(globalConnection.getClientInfo()); + newProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + PTable indexPTable = null; + try (PhoenixConnection tenantConnection = new PhoenixConnection(globalConnection, + globalConnection.getQueryServices(), newProps)) { + viewPTable = tenantConnection.getTable(fullViewName); + tenantConnection.createStatement() + .execute(String.format(disableIndexDDL, indexName, fullViewName)); + indexPTable = tenantConnection.getTable(fullIndexName); + } + + int offset = indexPTable.getBucketNum() != null ? 1 : 0; + int existingTenantIdPosition = ++offset; // positions are stored 1 based + int existingViewIdxIdPosition = ++offset; + int newTenantIdPosition = existingViewIdxIdPosition; + int newViewIdxPosition = existingTenantIdPosition; + String tenantIdColumn = + indexPTable.getColumns().get(existingTenantIdPosition - 1).getName().getString(); + int index = 0; + String updatePosition = + "UPSERT INTO " + SYSTEM_CATALOG_NAME + " ( " + TENANT_ID + "," + TABLE_SCHEM + "," + + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "," + ORDINAL_POSITION + + ") SELECT " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + + COLUMN_NAME + "," + COLUMN_FAMILY + "," + "?" + " FROM " + SYSTEM_CATALOG_NAME + + " WHERE " + TENANT_ID + " = ? " + " AND " + TABLE_NAME + " = ? " + " AND " + + (indexSchema == null ? TABLE_SCHEM + " IS NULL" : TABLE_SCHEM + " = ? ") + + " AND " + COLUMN_NAME + " = ? "; + // update view index position + try (PreparedStatement s = globalConnection.prepareStatement(updatePosition)) { + index = 0; + s.setInt(++index, newViewIdxPosition); + s.setString(++index, tenantId); + s.setString(++index, indexName); + if (indexSchema != null) { + s.setString(++index, indexSchema); + } + s.setString(++index, MetaDataUtil.getViewIndexIdColumnName()); + s.executeUpdate(); + } + // update tenant id position + try (PreparedStatement s = globalConnection.prepareStatement(updatePosition)) { + index = 0; + s.setInt(++index, newTenantIdPosition); + s.setString(++index, tenantId); + s.setString(++index, indexName); + if (indexSchema != null) { + s.setString(++index, indexSchema); + } + s.setString(++index, tenantIdColumn); + s.executeUpdate(); + } + globalConnection.commit(); + } else { + viewPTable = globalConnection.getTable(fullViewName); + globalConnection.createStatement() + .execute(String.format(disableIndexDDL, indexName, fullViewName)); + } + String indexPhysicalTableName = + MetaDataUtil.getViewIndexPhysicalName(viewPTable.getPhysicalName().getString()); + if (physicalTables.add(indexPhysicalTableName)) { + final TableName tableName = TableName.valueOf(indexPhysicalTableName); + if (admin.tableExists(tableName)) { + admin.disableTable(tableName); + admin.truncateTable(tableName, false); + } + } + } + } + } + if (originalScn != null) { + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(originalScn)); + } + toReturn = + new PhoenixConnection(globalConnection, globalConnection.getQueryServices(), props); + } catch (SQLException e) { + sqlEx = e; + } finally { + sqlEx = closeConnection(connParam, sqlEx); + sqlEx = closeConnection(globalConnection, sqlEx); + if (sqlEx != null) { + throw sqlEx; + } + } + return toReturn; + } + + public static SQLException closeConnection(PhoenixConnection conn, SQLException sqlEx) { + SQLException toReturn = sqlEx; + try { + conn.close(); + } catch (SQLException e) { + if (toReturn != null) { + toReturn.setNextException(e); + } else { + toReturn = e; + } + } + return toReturn; + } + + @SuppressWarnings("deprecation") + public static boolean upgradeSequenceTable(PhoenixConnection conn, int nSaltBuckets, + PTable oldTable) throws SQLException { + LOGGER.info("Upgrading SYSTEM.SEQUENCE table"); + + byte[] seqTableKey = + SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE); + Table sysTable = conn.getQueryServices().getTable(SYSTEM_CATALOG_NAME_BYTES); + try { + LOGGER + .info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM); + Cell saltKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, + PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, PInteger.INSTANCE.toBytes(nSaltBuckets)); + Put saltPut = new Put(seqTableKey); + saltPut.add(saltKV); + // Prevent multiple clients from doing this upgrade + CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(seqTableKey) + .ifNotExists(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES) + .build(saltPut); + + if (!sysTable.checkAndMutate(checkAndMutate).isSuccess()) { + if (oldTable == null) { // Unexpected, but to be safe just run pre-split code + preSplitSequenceTable(conn, nSaltBuckets); + return true; + } + // If upgrading from 4.2.0, then we need this special case of pre-splitting the table. + // This is needed as a fix for https://issues.apache.org/jira/browse/PHOENIX-1401 + if (oldTable.getTimeStamp() == MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0) { + byte[] oldSeqNum = PLong.INSTANCE.toBytes(oldTable.getSequenceNumber()); + Cell seqNumKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, + PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, + PLong.INSTANCE.toBytes(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP)); + Put seqNumPut = new Put(seqTableKey); + seqNumPut.add(seqNumKV); + // Increment TABLE_SEQ_NUM in checkAndPut as semaphore so that only single client + // pre-splits the sequence table. + checkAndMutate = CheckAndMutate.newBuilder(seqTableKey) + .ifEquals(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, oldSeqNum) + .build(seqNumPut); + + if (sysTable.checkAndMutate(checkAndMutate).isSuccess()) { + preSplitSequenceTable(conn, nSaltBuckets); + return true; + } + } + LOGGER.info("SYSTEM.SEQUENCE table has already been upgraded"); + return false; + } - @SuppressWarnings("deprecation") - private static void copyTable(PhoenixConnection conn, byte[] sourceName, byte[] targetName) throws SQLException { + // if the SYSTEM.SEQUENCE table is at 4.1.0 or before then we need to salt the table + // and pre-split it. + if (oldTable.getTimeStamp() <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) { int batchSizeBytes = 100 * 1024; // 100K chunks int sizeBytes = 0; - List mutations = Lists.newArrayListWithExpectedSize(10000); + List mutations = Lists.newArrayListWithExpectedSize(10000); + boolean success = false; Scan scan = new Scan(); scan.setRaw(true); scan.readAllVersions(); - ResultScanner scanner = null; - Table source = null; - Table target = null; + Table seqTable = + conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES); try { - source = conn.getQueryServices().getTable(sourceName); - target = conn.getQueryServices().getTable(targetName); - scanner = source.getScanner(scan); + boolean committed = false; + LOGGER.info("Adding salt byte to all SYSTEM.SEQUENCE rows"); + ResultScanner scanner = seqTable.getScanner(scan); + try { Result result; - while ((result = scanner.next()) != null) { - for (Cell keyValue : result.rawCells()) { - sizeBytes += PrivateCellUtil.estimatedSerializedSizeOf(keyValue); - if (keyValue.getType() == Cell.Type.Put) { - // Put new value - Put put = new Put(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength()); - put.add(keyValue); - mutations.add(put); - } else if (keyValue.getType() == Cell.Type.Delete){ - // Copy delete marker using new key so that it continues - // to delete the key value preceding it that will be updated - // as well. - Delete delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength()); - delete.add(keyValue); - mutations.add(delete); - } + while ((result = scanner.next()) != null) { + for (Cell keyValue : result.rawCells()) { + KeyValue newKeyValue = addSaltByte(keyValue, nSaltBuckets); + if (newKeyValue != null) { + sizeBytes += newKeyValue.getLength(); + if (KeyValue.Type.codeToType(newKeyValue.getTypeByte()) == KeyValue.Type.Put) { + // Delete old value + byte[] buf = keyValue.getRowArray(); + Delete delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), + keyValue.getRowLength()); + KeyValue deleteKeyValue = new KeyValue(buf, keyValue.getRowOffset(), + keyValue.getRowLength(), buf, keyValue.getFamilyOffset(), + keyValue.getFamilyLength(), buf, keyValue.getQualifierOffset(), + keyValue.getQualifierLength(), keyValue.getTimestamp(), KeyValue.Type.Delete, + ByteUtil.EMPTY_BYTE_ARRAY, 0, 0); + delete.add(deleteKeyValue); + mutations.add(delete); + sizeBytes += deleteKeyValue.getLength(); + // Put new value + Put put = new Put(newKeyValue.getRowArray(), newKeyValue.getRowOffset(), + newKeyValue.getRowLength()); + put.add(newKeyValue); + mutations.add(put); + } else if ( + KeyValue.Type.codeToType(newKeyValue.getTypeByte()) == KeyValue.Type.Delete + ) { + // Copy delete marker using new key so that it continues + // to delete the key value preceding it that will be updated + // as well. + Delete delete = new Delete(newKeyValue.getRowArray(), + newKeyValue.getRowOffset(), newKeyValue.getRowLength()); + delete.add(newKeyValue); + mutations.add(delete); + } } if (sizeBytes >= batchSizeBytes) { - LOGGER.info("Committing bactch of temp rows"); - target.batch(mutations, null); - mutations.clear(); - sizeBytes = 0; + LOGGER.info("Committing bactch of SYSTEM.SEQUENCE rows"); + seqTable.batch(mutations, null); + mutations.clear(); + sizeBytes = 0; + committed = true; } + } } if (!mutations.isEmpty()) { - LOGGER.info("Committing last bactch of temp rows"); - target.batch(mutations, null); - } - LOGGER.info("Successfully completed copy"); - } catch (SQLException e) { - throw e; - } catch (Exception e) { - throw ClientUtil.parseServerException(e); - } finally { - try { - if (scanner != null) scanner.close(); - } finally { - try { - if (source != null) source.close(); - } catch (IOException e) { - LOGGER.warn("Exception during close of source table",e); - } finally { - try { - if (target != null) target.close(); - } catch (IOException e) { - LOGGER.warn("Exception during close of target table",e); - } - } + LOGGER.info("Committing last bactch of SYSTEM.SEQUENCE rows"); + seqTable.batch(mutations, null); } - } - } - - private static void preSplitSequenceTable(PhoenixConnection conn, int nSaltBuckets) throws SQLException { - Admin admin = conn.getQueryServices().getAdmin(); - boolean snapshotCreated = false; - boolean success = false; - try { - if (nSaltBuckets <= 0) { - return; - } - LOGGER.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window."); - TableDescriptor desc = admin.getDescriptor(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES)); - createSequenceSnapshot(admin, conn); - snapshotCreated = true; - admin.disableTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME)); - admin.deleteTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME)); - byte[][] splitPoints = SaltingUtil.getSalteByteSplitPoints(nSaltBuckets); - admin.createTable(desc, splitPoints); - restoreSequenceSnapshot(admin, conn); + preSplitSequenceTable(conn, nSaltBuckets); + LOGGER.info("Successfully completed upgrade of SYSTEM.SEQUENCE"); success = true; - LOGGER.warn("Completed pre-splitting SYSTEM.SEQUENCE table"); - } catch (IOException e) { - throw new SQLException("Unable to pre-split SYSTEM.SEQUENCE table", e); - } finally { + return true; + } catch (InterruptedException e) { + throw ClientUtil.parseServerException(e); + } finally { try { - if (snapshotCreated && success) { - try { - deleteSequenceSnapshot(admin); - } catch (SQLException e) { - LOGGER.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e); - } - } + scanner.close(); } finally { - try { - admin.close(); - } catch (IOException e) { - LOGGER.warn("Exception while closing admin during pre-split", e); - } - } - } - } - - /** - * Utility method to get a HBaseConnection with overridden properties - * @param options - * @return - * @throws IOException - */ - private static org.apache.hadoop.hbase.client.Connection getHBaseConnection(Configuration config, Map options) - throws IOException { - Configuration conf = HBaseConfiguration.create(config); - - conf.set(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); - conf.set(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); - conf.set(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, Integer.toString(DEFAULT_TIMEOUT_DURING_UPGRADE_MS)); - if (options != null) { - for (Map.Entry entry : options.entrySet()) { - String k = entry.getKey(); - String v = entry.getValue(); - switch (k) { - case HConstants.HBASE_RPC_TIMEOUT_KEY: - conf.set(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, v); - conf.set(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, v); - break; - default: - conf.set(k, v); - break; - }; - }; - } - LOGGER.info(String.format("Creating HBase cluster connection to ==> %s", conf.get(HConstants.ZOOKEEPER_QUORUM))); - return ConnectionFactory.createConnection(conf); - } - - - public static PhoenixConnection upgradeLocalIndexes(PhoenixConnection metaConnection) - throws SQLException, IOException, org.apache.hadoop.hbase.TableNotFoundException { - Properties props = PropertiesUtil.deepCopy(metaConnection.getClientInfo()); - Long originalScn = null; - String str = props.getProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB); - if (str != null) { - originalScn = Long.valueOf(str); - } - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP)); - PhoenixConnection globalConnection = null; - PhoenixConnection toReturn = null; - globalConnection = new PhoenixConnection(metaConnection, metaConnection.getQueryServices(), props); - SQLException sqlEx = null; - try (Admin admin = globalConnection.getQueryServices().getAdmin()) { - ResultSet rs = globalConnection.createStatement().executeQuery("SELECT TABLE_SCHEM, TABLE_NAME, DATA_TABLE_NAME, TENANT_ID, MULTI_TENANT, SALT_BUCKETS FROM SYSTEM.CATALOG " - + " WHERE COLUMN_NAME IS NULL" - + " AND COLUMN_FAMILY IS NULL" - + " AND INDEX_TYPE=" + IndexType.LOCAL.getSerializedValue()); - boolean droppedLocalIndexes = false; - while (rs.next()) { - if (!droppedLocalIndexes) { - List localIndexTables = admin.listTableDescriptors( - Pattern.compile(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*")); - String localIndexSplitter = QueryConstants.LOCAL_INDEX_SPLITTER_CLASSNAME; - for (TableDescriptor table : localIndexTables) { - TableDescriptor dataTableDesc = admin.getDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName(table.getTableName().getNameAsString()))); - TableDescriptorBuilder dataTableDescBuilder = TableDescriptorBuilder.newBuilder(dataTableDesc); - ColumnFamilyDescriptor[] columnFamilies = dataTableDesc.getColumnFamilies(); - boolean modifyTable = false; - for(ColumnFamilyDescriptor cf : columnFamilies) { - String localIndexCf = QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX+cf.getNameAsString(); - if (dataTableDesc.getColumnFamily(Bytes.toBytes(localIndexCf))==null){ - ColumnFamilyDescriptorBuilder colDefBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(localIndexCf)); - for (Entry keyValue: cf.getValues().entrySet()){ - colDefBuilder.setValue(keyValue.getKey().copyBytes(), keyValue.getValue().copyBytes()); - } - dataTableDescBuilder.setColumnFamily(colDefBuilder.build()); - modifyTable = true; - } - } - Collection coprocessors = dataTableDesc.getCoprocessorDescriptors(); - for (CoprocessorDescriptor coprocessor: coprocessors) { - if (coprocessor.getClassName().equals(localIndexSplitter)) { - dataTableDescBuilder.removeCoprocessor(localIndexSplitter); - modifyTable = true; - } - } - if (modifyTable) { - admin.modifyTable(dataTableDescBuilder.build()); - } - } - Pattern pattern = Pattern.compile(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*"); - for(TableDescriptor tableDescriptor : admin.listTableDescriptors(pattern)){ - admin.disableTable(tableDescriptor.getTableName()); - admin.deleteTable(tableDescriptor.getTableName()); - } - droppedLocalIndexes = true; - } - String schemaName = rs.getString(1); - String indexTableName = rs.getString(2); - String dataTableName = rs.getString(3); - String tenantId = rs.getString(4); - boolean multiTenantTable = rs.getBoolean(5); - int numColumnsToSkip = 1 + (multiTenantTable ? 1 : 0); - String getColumns = - "SELECT COLUMN_NAME, COLUMN_FAMILY FROM SYSTEM.CATALOG WHERE TABLE_SCHEM " - + (schemaName == null ? "IS NULL " : "='" + schemaName+ "'") - + " AND TENANT_ID "+(tenantId == null ? "IS NULL " : "='" + tenantId + "'") - + " and TABLE_NAME='" + indexTableName - + "' AND COLUMN_NAME IS NOT NULL AND KEY_SEQ > "+ numColumnsToSkip +" ORDER BY KEY_SEQ"; - ResultSet getColumnsRs = globalConnection.createStatement().executeQuery(getColumns); - List indexedColumns = new ArrayList(1); - List coveredColumns = new ArrayList(1); - - while (getColumnsRs.next()) { - String column = getColumnsRs.getString(1); - String columnName = IndexUtil.getDataColumnName(column); - String columnFamily = IndexUtil.getDataColumnFamilyName(column); - if (getColumnsRs.getString(2) == null) { - if (columnFamily != null && !columnFamily.isEmpty()) { - if (SchemaUtil.normalizeIdentifier(columnFamily).equals(QueryConstants.DEFAULT_COLUMN_FAMILY)) { - indexedColumns.add(columnName); - } else { - indexedColumns.add(SchemaUtil.getCaseSensitiveColumnDisplayName( - columnFamily, columnName)); - } - } else { - indexedColumns.add(columnName); - } - } else { - coveredColumns.add(SchemaUtil.normalizeIdentifier(columnFamily) - .equals(QueryConstants.DEFAULT_COLUMN_FAMILY) ? columnName - : SchemaUtil.getCaseSensitiveColumnDisplayName( - columnFamily, columnName)); - } - } - StringBuilder createIndex = new StringBuilder("CREATE LOCAL INDEX "); - createIndex.append(indexTableName); - createIndex.append(" ON "); - createIndex.append(SchemaUtil.getTableName(schemaName, dataTableName)); - createIndex.append("("); - for (int i = 0; i < indexedColumns.size(); i++) { - createIndex.append(indexedColumns.get(i)); - if (i < indexedColumns.size() - 1) { - createIndex.append(","); - } - } - createIndex.append(")"); - - if (!coveredColumns.isEmpty()) { - createIndex.append(" INCLUDE("); - for (int i = 0; i < coveredColumns.size(); i++) { - createIndex.append(coveredColumns.get(i)); - if (i < coveredColumns.size() - 1) { - createIndex.append(","); - } - } - createIndex.append(")"); - } - createIndex.append(" ASYNC"); - LOGGER.info("Index creation query is : " + createIndex.toString()); - LOGGER.info("Dropping the index " + indexTableName - + " to clean up the index details from SYSTEM.CATALOG."); - PhoenixConnection localConnection = null; - if (tenantId != null) { - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - localConnection = new PhoenixConnection(globalConnection, globalConnection.getQueryServices(), props); - } - try { - (localConnection == null ? globalConnection : localConnection).createStatement().execute( - "DROP INDEX IF EXISTS " + indexTableName + " ON " - + SchemaUtil.getTableName(schemaName, dataTableName)); - LOGGER.info("Recreating the index " + indexTableName); - (localConnection == null ? globalConnection : localConnection).createStatement().execute(createIndex.toString()); - LOGGER.info("Created the index " + indexTableName); - } finally { - props.remove(PhoenixRuntime.TENANT_ID_ATTRIB); - if (localConnection != null) { - sqlEx = closeConnection(localConnection, sqlEx); - if (sqlEx != null) { - throw sqlEx; - } - } - } - } - globalConnection.createStatement().execute("DELETE FROM SYSTEM.CATALOG WHERE SUBSTR(TABLE_NAME,0,11)='"+MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+"'"); - if (originalScn != null) { - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(originalScn)); - } - toReturn = new PhoenixConnection(globalConnection, globalConnection.getQueryServices(), props); - } catch (SQLException e) { - sqlEx = e; - } finally { - sqlEx = closeConnection(metaConnection, sqlEx); - sqlEx = closeConnection(globalConnection, sqlEx); - if (sqlEx != null) { - throw sqlEx; - } - } - return toReturn; - } - - public static PhoenixConnection disableViewIndexes(PhoenixConnection connParam) throws SQLException, IOException, InterruptedException, TimeoutException { - Properties props = PropertiesUtil.deepCopy(connParam.getClientInfo()); - Long originalScn = null; - String str = props.getProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB); - if (str != null) { - originalScn = Long.valueOf(str); - } - // don't use the passed timestamp as scn because we want to query all view indexes up to now. - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP)); - Set physicalTables = new HashSet<>(); - SQLException sqlEx = null; - PhoenixConnection globalConnection = null; - PhoenixConnection toReturn = null; - try { - globalConnection = new PhoenixConnection(connParam, connParam.getQueryServices(), props); - String tenantId = null; - try (Admin admin = globalConnection.getQueryServices().getAdmin()) { - String fetchViewIndexes = "SELECT " + TENANT_ID + ", " + TABLE_SCHEM + ", " + TABLE_NAME + - ", " + DATA_TABLE_NAME + " FROM " + SYSTEM_CATALOG_NAME + " WHERE " + VIEW_INDEX_ID - + " IS NOT NULL"; - String disableIndexDDL = "ALTER INDEX %s ON %s DISABLE"; - try (ResultSet rs = globalConnection.createStatement().executeQuery(fetchViewIndexes)) { - while (rs.next()) { - tenantId = rs.getString(1); - String indexSchema = rs.getString(2); - String indexName = rs.getString(3); - String viewName = rs.getString(4); - String fullIndexName = SchemaUtil.getTableName(indexSchema, indexName); - String fullViewName = SchemaUtil.getTableName(indexSchema, viewName); - PTable viewPTable = null; - // Disable the view index and truncate the underlying hbase table. - // Users would need to rebuild the view indexes. - if (tenantId != null && !tenantId.isEmpty()) { - Properties newProps = PropertiesUtil.deepCopy(globalConnection.getClientInfo()); - newProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - PTable indexPTable = null; - try (PhoenixConnection tenantConnection = new PhoenixConnection(globalConnection, globalConnection.getQueryServices(), newProps)) { - viewPTable = tenantConnection.getTable(fullViewName); - tenantConnection.createStatement().execute(String.format(disableIndexDDL, indexName, fullViewName)); - indexPTable = tenantConnection.getTable(fullIndexName); - } - - int offset = indexPTable.getBucketNum() != null ? 1 : 0; - int existingTenantIdPosition = ++offset; // positions are stored 1 based - int existingViewIdxIdPosition = ++offset; - int newTenantIdPosition = existingViewIdxIdPosition; - int newViewIdxPosition = existingTenantIdPosition; - String tenantIdColumn = indexPTable.getColumns().get(existingTenantIdPosition - 1).getName().getString(); - int index = 0; - String updatePosition = - "UPSERT INTO " - + SYSTEM_CATALOG_NAME - + " ( " - + TENANT_ID - + "," - + TABLE_SCHEM - + "," - + TABLE_NAME - + "," - + COLUMN_NAME - + "," - + COLUMN_FAMILY - + "," - + ORDINAL_POSITION - + ") SELECT " - + TENANT_ID - + "," - + TABLE_SCHEM - + "," - + TABLE_NAME - + "," - + COLUMN_NAME - + "," - + COLUMN_FAMILY - + "," - + "?" - + " FROM " - + SYSTEM_CATALOG_NAME - + " WHERE " - + TENANT_ID - + " = ? " - + " AND " - + TABLE_NAME - + " = ? " - + " AND " - + (indexSchema == null ? TABLE_SCHEM + " IS NULL" : TABLE_SCHEM + " = ? ") - + " AND " - + COLUMN_NAME - + " = ? "; - // update view index position - try (PreparedStatement s = globalConnection.prepareStatement(updatePosition)) { - index = 0; - s.setInt(++index, newViewIdxPosition); - s.setString(++index, tenantId); - s.setString(++index, indexName); - if (indexSchema != null) { - s.setString(++index, indexSchema); - } - s.setString(++index, MetaDataUtil.getViewIndexIdColumnName()); - s.executeUpdate(); - } - // update tenant id position - try (PreparedStatement s = globalConnection.prepareStatement(updatePosition)) { - index = 0; - s.setInt(++index, newTenantIdPosition); - s.setString(++index, tenantId); - s.setString(++index, indexName); - if (indexSchema != null) { - s.setString(++index, indexSchema); - } - s.setString(++index, tenantIdColumn); - s.executeUpdate(); - } - globalConnection.commit(); - } else { - viewPTable = globalConnection.getTable(fullViewName); - globalConnection.createStatement().execute(String.format(disableIndexDDL, indexName, fullViewName)); - } - String indexPhysicalTableName = MetaDataUtil.getViewIndexPhysicalName(viewPTable.getPhysicalName().getString()); - if (physicalTables.add(indexPhysicalTableName)) { - final TableName tableName = TableName.valueOf(indexPhysicalTableName); - if (admin.tableExists(tableName)) { - admin.disableTable(tableName); - admin.truncateTable(tableName, false); - } - } - } - } - } - if (originalScn != null) { - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(originalScn)); - } - toReturn = new PhoenixConnection(globalConnection, globalConnection.getQueryServices(), props); - } catch (SQLException e) { - sqlEx = e; - } finally { - sqlEx = closeConnection(connParam, sqlEx); - sqlEx = closeConnection(globalConnection, sqlEx); - if (sqlEx != null) { - throw sqlEx; - } - } - return toReturn; - } - - - public static SQLException closeConnection(PhoenixConnection conn, SQLException sqlEx) { - SQLException toReturn = sqlEx; - try { - conn.close(); - } catch (SQLException e) { - if (toReturn != null) { - toReturn.setNextException(e); - } else { - toReturn = e; - } - } - return toReturn; - } - @SuppressWarnings("deprecation") - public static boolean upgradeSequenceTable(PhoenixConnection conn, int nSaltBuckets, PTable oldTable) throws SQLException { - LOGGER.info("Upgrading SYSTEM.SEQUENCE table"); - - byte[] seqTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE); - Table sysTable = conn.getQueryServices().getTable(SYSTEM_CATALOG_NAME_BYTES); - try { - LOGGER.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM); - Cell saltKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, + if (!success) { + if (!committed) { // Try to recover by setting salting back to off, as we haven't + // successfully committed anything + // Don't use Delete here as we'd never be able to change it again at this + // timestamp. + Cell unsaltKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, - PInteger.INSTANCE.toBytes(nSaltBuckets)); - Put saltPut = new Put(seqTableKey); - saltPut.add(saltKV); - // Prevent multiple clients from doing this upgrade - CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(seqTableKey) - .ifNotExists(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES) - .build(saltPut); - - if (!sysTable.checkAndMutate(checkAndMutate).isSuccess()) { - if (oldTable == null) { // Unexpected, but to be safe just run pre-split code - preSplitSequenceTable(conn, nSaltBuckets); - return true; - } - // If upgrading from 4.2.0, then we need this special case of pre-splitting the table. - // This is needed as a fix for https://issues.apache.org/jira/browse/PHOENIX-1401 - if (oldTable.getTimeStamp() == MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0) { - byte[] oldSeqNum = PLong.INSTANCE.toBytes(oldTable.getSequenceNumber()); - Cell seqNumKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, - PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, - PLong.INSTANCE.toBytes(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP)); - Put seqNumPut = new Put(seqTableKey); - seqNumPut.add(seqNumKV); - // Increment TABLE_SEQ_NUM in checkAndPut as semaphore so that only single client - // pre-splits the sequence table. - checkAndMutate = CheckAndMutate.newBuilder(seqTableKey) - .ifEquals(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES,oldSeqNum) - .build(seqNumPut); - - if (sysTable.checkAndMutate(checkAndMutate).isSuccess()) { - preSplitSequenceTable(conn, nSaltBuckets); - return true; - } - } - LOGGER.info("SYSTEM.SEQUENCE table has already been upgraded"); - return false; - } - - // if the SYSTEM.SEQUENCE table is at 4.1.0 or before then we need to salt the table - // and pre-split it. - if (oldTable.getTimeStamp() <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) { - int batchSizeBytes = 100 * 1024; // 100K chunks - int sizeBytes = 0; - List mutations = Lists.newArrayListWithExpectedSize(10000); - - boolean success = false; - Scan scan = new Scan(); - scan.setRaw(true); - scan.readAllVersions(); - Table seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES); - try { - boolean committed = false; - LOGGER.info("Adding salt byte to all SYSTEM.SEQUENCE rows"); - ResultScanner scanner = seqTable.getScanner(scan); - try { - Result result; - while ((result = scanner.next()) != null) { - for (Cell keyValue : result.rawCells()) { - KeyValue newKeyValue = addSaltByte(keyValue, nSaltBuckets); - if (newKeyValue != null) { - sizeBytes += newKeyValue.getLength(); - if (KeyValue.Type.codeToType(newKeyValue.getTypeByte()) == KeyValue.Type.Put) { - // Delete old value - byte[] buf = keyValue.getRowArray(); - Delete delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength()); - KeyValue deleteKeyValue = new KeyValue(buf, keyValue.getRowOffset(), keyValue.getRowLength(), - buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(), - buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(), - keyValue.getTimestamp(), KeyValue.Type.Delete, - ByteUtil.EMPTY_BYTE_ARRAY,0,0); - delete.add(deleteKeyValue); - mutations.add(delete); - sizeBytes += deleteKeyValue.getLength(); - // Put new value - Put put = new Put(newKeyValue.getRowArray(), newKeyValue.getRowOffset(), newKeyValue.getRowLength()); - put.add(newKeyValue); - mutations.add(put); - } else if (KeyValue.Type.codeToType(newKeyValue.getTypeByte()) == KeyValue.Type.Delete){ - // Copy delete marker using new key so that it continues - // to delete the key value preceding it that will be updated - // as well. - Delete delete = new Delete(newKeyValue.getRowArray(), newKeyValue.getRowOffset(), newKeyValue.getRowLength()); - delete.add(newKeyValue); - mutations.add(delete); - } - } - if (sizeBytes >= batchSizeBytes) { - LOGGER.info("Committing bactch of SYSTEM.SEQUENCE rows"); - seqTable.batch(mutations, null); - mutations.clear(); - sizeBytes = 0; - committed = true; - } - } - } - if (!mutations.isEmpty()) { - LOGGER.info("Committing last bactch of SYSTEM.SEQUENCE rows"); - seqTable.batch(mutations, null); - } - preSplitSequenceTable(conn, nSaltBuckets); - LOGGER.info("Successfully completed upgrade of SYSTEM.SEQUENCE"); - success = true; - return true; - } catch (InterruptedException e) { - throw ClientUtil.parseServerException(e); - } finally { - try { - scanner.close(); - } finally { - if (!success) { - if (!committed) { // Try to recover by setting salting back to off, as we haven't successfully committed anything - // Don't use Delete here as we'd never be able to change it again at this timestamp. - Cell unsaltKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, - PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, - PInteger.INSTANCE.toBytes(0)); - Put unsaltPut = new Put(seqTableKey); - unsaltPut.add(unsaltKV); - try { - sysTable.put(unsaltPut); - success = true; - } finally { - if (!success) LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE"); - } - } else { // We're screwed b/c we've already committed some salted sequences... - LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE"); - } - } - } - } - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } finally { - try { - seqTable.close(); - } catch (IOException e) { - LOGGER.warn("Exception during close",e); - } + MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, PInteger.INSTANCE.toBytes(0)); + Put unsaltPut = new Put(seqTableKey); + unsaltPut.add(unsaltKV); + try { + sysTable.put(unsaltPut); + success = true; + } finally { + if (!success) LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE"); + } + } else { // We're screwed b/c we've already committed some salted sequences... + LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE"); } + } } - return false; + } } catch (IOException e) { - throw ClientUtil.parseServerException(e); + throw ClientUtil.parseServerException(e); } finally { - try { - sysTable.close(); - } catch (IOException e) { - LOGGER.warn("Exception during close",e); - } - } - - } - - @SuppressWarnings("deprecation") - private static KeyValue addSaltByte(Cell keyValue, int nSaltBuckets) { - byte[] buf = keyValue.getRowArray(); - int length = keyValue.getRowLength(); - int offset = keyValue.getRowOffset(); - boolean isViewSeq = length > SEQ_PREFIX_BYTES.length && Bytes.compareTo(SEQ_PREFIX_BYTES, 0, SEQ_PREFIX_BYTES.length, buf, offset, SEQ_PREFIX_BYTES.length) == 0; - if (!isViewSeq && nSaltBuckets == 0) { - return null; - } - byte[] newBuf; - if (isViewSeq) { // We messed up the name for the sequences for view indexes so we'll take this opportunity to fix it - if (buf[length-1] == 0) { // Global indexes on views have trailing null byte - length--; + try { + seqTable.close(); + } catch (IOException e) { + LOGGER.warn("Exception during close", e); + } + } + } + return false; + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } finally { + try { + sysTable.close(); + } catch (IOException e) { + LOGGER.warn("Exception during close", e); + } + } + + } + + @SuppressWarnings("deprecation") + private static KeyValue addSaltByte(Cell keyValue, int nSaltBuckets) { + byte[] buf = keyValue.getRowArray(); + int length = keyValue.getRowLength(); + int offset = keyValue.getRowOffset(); + boolean isViewSeq = length > SEQ_PREFIX_BYTES.length && Bytes.compareTo(SEQ_PREFIX_BYTES, 0, + SEQ_PREFIX_BYTES.length, buf, offset, SEQ_PREFIX_BYTES.length) == 0; + if (!isViewSeq && nSaltBuckets == 0) { + return null; + } + byte[] newBuf; + if (isViewSeq) { // We messed up the name for the sequences for view indexes so we'll take this + // opportunity to fix it + if (buf[length - 1] == 0) { // Global indexes on views have trailing null byte + length--; + } + byte[][] rowKeyMetaData = new byte[3][]; + SchemaUtil.getVarChars(buf, offset, length, 0, rowKeyMetaData); + byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] unprefixedSchemaName = + new byte[schemaName.length - MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length]; + System.arraycopy(schemaName, MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length, + unprefixedSchemaName, 0, unprefixedSchemaName.length); + byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + PName physicalName = PNameFactory.newName(unprefixedSchemaName); + // Reformulate key based on correct data + newBuf = + MetaDataUtil.getViewIndexSequenceKey(tableName == null ? null : Bytes.toString(tableName), + physicalName, nSaltBuckets, false).getKey(); + } else { + newBuf = new byte[length + 1]; + System.arraycopy(buf, offset, newBuf, SaltingUtil.NUM_SALTING_BYTES, length); + newBuf[0] = + SaltingUtil.getSaltingByte(newBuf, SaltingUtil.NUM_SALTING_BYTES, length, nSaltBuckets); + } + return new KeyValue(newBuf, 0, newBuf.length, buf, keyValue.getFamilyOffset(), + keyValue.getFamilyLength(), buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(), + keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getType().getCode()), buf, + keyValue.getValueOffset(), keyValue.getValueLength()); + } + + /** + * Upgrade the metadata in the catalog table to enable adding columns to tables with views + * @param oldMetaConnection caller should take care of closing the passed connection appropriately + */ + public static void upgradeTo4_5_0(PhoenixConnection oldMetaConnection) throws SQLException { + PhoenixConnection metaConnection = null; + try { + // Need to use own connection with max time stamp to be able to read all data from + // SYSTEM.CATALOG + metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP); + LOGGER.info("Upgrading metadata to support adding columns to tables with views"); + String getBaseTableAndViews = "SELECT " + COLUMN_FAMILY + " AS BASE_PHYSICAL_TABLE, " + + TENANT_ID + ", " + TABLE_SCHEM + " AS VIEW_SCHEMA, " + TABLE_NAME + " AS VIEW_NAME " + + "FROM " + SYSTEM_CATALOG_NAME + " WHERE " + COLUMN_FAMILY + " IS NOT NULL " // column_family + // column + // points to + // the + // physical + // table name. + + " AND " + COLUMN_NAME + " IS NULL " + " AND " + LINK_TYPE + " = ? "; + // Build a map of base table name -> list of views on the table. + Map> parentTableViewsMap = new HashMap<>(); + try (PreparedStatement stmt = metaConnection.prepareStatement(getBaseTableAndViews)) { + // Get back view rows that have links back to the base physical table. This takes care + // of cases when we have a hierarchy of views too. + stmt.setByte(1, LinkType.PHYSICAL_TABLE.getSerializedValue()); + try (ResultSet rs = stmt.executeQuery()) { + while (rs.next()) { + // this is actually SCHEMANAME.TABLENAME + String parentTable = rs.getString("BASE_PHYSICAL_TABLE"); + String tenantId = rs.getString(TENANT_ID); + String viewSchema = rs.getString("VIEW_SCHEMA"); + String viewName = rs.getString("VIEW_NAME"); + List viewKeysList = parentTableViewsMap.get(parentTable); + if (viewKeysList == null) { + viewKeysList = new ArrayList<>(); + parentTableViewsMap.put(parentTable, viewKeysList); } - byte[][] rowKeyMetaData = new byte[3][]; - SchemaUtil.getVarChars(buf, offset, length, 0, rowKeyMetaData); - byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] unprefixedSchemaName = new byte[schemaName.length - MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length]; - System.arraycopy(schemaName, MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length, unprefixedSchemaName, 0, unprefixedSchemaName.length); - byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - PName physicalName = PNameFactory.newName(unprefixedSchemaName); - // Reformulate key based on correct data - newBuf = MetaDataUtil.getViewIndexSequenceKey(tableName == null ? null : Bytes.toString(tableName), - physicalName, nSaltBuckets, false).getKey(); + viewKeysList.add(new ViewKey(tenantId, viewSchema, viewName)); + } + } + } + boolean clearCache = false; + for (Entry> entry : parentTableViewsMap.entrySet()) { + // Fetch column information for the base physical table + String physicalTable = entry.getKey(); + String baseTableSchemaName = + SchemaUtil.getSchemaNameFromFullName(physicalTable).equals(StringUtil.EMPTY_STRING) + ? null + : SchemaUtil.getSchemaNameFromFullName(physicalTable); + String baseTableName = SchemaUtil.getTableNameFromFullName(physicalTable); + List basePhysicalTableColumns = new ArrayList<>(); + + // Columns fetched in order of ordinal position + String fetchColumnInfoForBasePhysicalTable = "SELECT " + COLUMN_NAME + "," + COLUMN_FAMILY + + "," + DATA_TYPE + "," + COLUMN_SIZE + "," + DECIMAL_DIGITS + "," + ORDINAL_POSITION + + "," + SORT_ORDER + "," + ARRAY_SIZE + " " + "FROM SYSTEM.CATALOG " + "WHERE " + + "TABLE_SCHEM %s " + "AND TABLE_NAME = ? " + "AND COLUMN_NAME IS NOT NULL " + + "AND LINK_TYPE IS NULL " + "ORDER BY " + ORDINAL_POSITION; + + PreparedStatement stmt = null; + if (baseTableSchemaName == null) { + fetchColumnInfoForBasePhysicalTable = + String.format(fetchColumnInfoForBasePhysicalTable, "IS NULL "); + stmt = metaConnection.prepareStatement(fetchColumnInfoForBasePhysicalTable); + stmt.setString(1, baseTableName); } else { - newBuf = new byte[length + 1]; - System.arraycopy(buf, offset, newBuf, SaltingUtil.NUM_SALTING_BYTES, length); - newBuf[0] = SaltingUtil.getSaltingByte(newBuf, SaltingUtil.NUM_SALTING_BYTES, length, nSaltBuckets); - } - return new KeyValue(newBuf, 0, newBuf.length, - buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(), - buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(), - keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getType().getCode()), - buf, keyValue.getValueOffset(), keyValue.getValueLength()); - } - - /** - * Upgrade the metadata in the catalog table to enable adding columns to tables with views - * @param oldMetaConnection caller should take care of closing the passed connection appropriately - * @throws SQLException - */ - public static void upgradeTo4_5_0(PhoenixConnection oldMetaConnection) throws SQLException { - PhoenixConnection metaConnection = null; - try { - // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG - metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP); - LOGGER.info("Upgrading metadata to support adding columns to tables with views"); - String getBaseTableAndViews = "SELECT " - + COLUMN_FAMILY + " AS BASE_PHYSICAL_TABLE, " - + TENANT_ID + ", " - + TABLE_SCHEM + " AS VIEW_SCHEMA, " - + TABLE_NAME + " AS VIEW_NAME " - + "FROM " + SYSTEM_CATALOG_NAME - + " WHERE " + COLUMN_FAMILY + " IS NOT NULL " // column_family column points to the physical table name. - + " AND " + COLUMN_NAME + " IS NULL " - + " AND " + LINK_TYPE + " = ? "; - // Build a map of base table name -> list of views on the table. - Map> parentTableViewsMap = new HashMap<>(); - try (PreparedStatement stmt = metaConnection.prepareStatement(getBaseTableAndViews)) { - // Get back view rows that have links back to the base physical table. This takes care - // of cases when we have a hierarchy of views too. - stmt.setByte(1, LinkType.PHYSICAL_TABLE.getSerializedValue()); - try (ResultSet rs = stmt.executeQuery()) { - while (rs.next()) { - // this is actually SCHEMANAME.TABLENAME - String parentTable = rs.getString("BASE_PHYSICAL_TABLE"); - String tenantId = rs.getString(TENANT_ID); - String viewSchema = rs.getString("VIEW_SCHEMA"); - String viewName = rs.getString("VIEW_NAME"); - List viewKeysList = parentTableViewsMap.get(parentTable); - if (viewKeysList == null) { - viewKeysList = new ArrayList<>(); - parentTableViewsMap.put(parentTable, viewKeysList); - } - viewKeysList.add(new ViewKey(tenantId, viewSchema, viewName)); - } - } + fetchColumnInfoForBasePhysicalTable = + String.format(fetchColumnInfoForBasePhysicalTable, " = ? "); + stmt = metaConnection.prepareStatement(fetchColumnInfoForBasePhysicalTable); + stmt.setString(1, baseTableSchemaName); + stmt.setString(2, baseTableName); + } + + try (ResultSet rs = stmt.executeQuery()) { + while (rs.next()) { + basePhysicalTableColumns + .add(new ColumnDetails(rs.getString(COLUMN_FAMILY), rs.getString(COLUMN_NAME), + rs.getInt(ORDINAL_POSITION), rs.getInt(DATA_TYPE), rs.getInt(COLUMN_SIZE), + rs.getInt(DECIMAL_DIGITS), rs.getInt(SORT_ORDER), rs.getInt(ARRAY_SIZE))); + } + } + + // Fetch column information for all the views on the base physical table ordered by ordinal + // position. + List viewKeys = entry.getValue(); + StringBuilder sb = new StringBuilder(); + sb.append("SELECT " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + + "," + COLUMN_FAMILY + "," + DATA_TYPE + "," + COLUMN_SIZE + "," + DECIMAL_DIGITS + "," + + ORDINAL_POSITION + "," + SORT_ORDER + "," + ARRAY_SIZE + " " + "FROM SYSTEM.CATALOG " + + "WHERE " + COLUMN_NAME + " IS NOT NULL " + "AND " + ORDINAL_POSITION + " <= ? " + // fetch + // only + // those + // columns + // that + // would + // impact + // setting + // of + // base + // column + // count + "AND " + "(" + TENANT_ID + ", " + TABLE_SCHEM + ", " + TABLE_NAME + ") IN ("); + + int numViews = viewKeys.size(); + for (int i = 0; i < numViews; i++) { + sb.append(" (?, ?, ?) "); + if (i < numViews - 1) { + sb.append(", "); + } + } + sb.append(" ) "); + sb.append( + " GROUP BY " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + + COLUMN_FAMILY + "," + DATA_TYPE + "," + COLUMN_SIZE + "," + DECIMAL_DIGITS + "," + + ORDINAL_POSITION + "," + SORT_ORDER + "," + ARRAY_SIZE + " " + "ORDER BY " + TENANT_ID + + "," + TABLE_SCHEM + ", " + TABLE_NAME + ", " + ORDINAL_POSITION); + String fetchViewColumnsSql = sb.toString(); + stmt = metaConnection.prepareStatement(fetchViewColumnsSql); + int numColsInBaseTable = basePhysicalTableColumns.size(); + stmt.setInt(1, numColsInBaseTable); + int paramIndex = 1; + stmt.setInt(paramIndex++, numColsInBaseTable); + for (ViewKey view : viewKeys) { + stmt.setString(paramIndex++, view.tenantId); + stmt.setString(paramIndex++, view.schema); + stmt.setString(paramIndex++, view.name); + } + String currentTenantId = null; + String currentViewSchema = null; + String currentViewName = null; + try (ResultSet rs = stmt.executeQuery()) { + int numBaseTableColsMatched = 0; + boolean ignore = false; + boolean baseColumnCountUpserted = false; + while (rs.next()) { + String viewTenantId = rs.getString(TENANT_ID); + String viewSchema = rs.getString(TABLE_SCHEM); + String viewName = rs.getString(TABLE_NAME); + if ( + !(Objects.equal(viewTenantId, currentTenantId) + && Objects.equal(viewSchema, currentViewSchema) + && Objects.equal(viewName, currentViewName)) + ) { + // We are about to iterate through columns of a different view. Check whether base + // column count was upserted. + // If it wasn't then it is likely the case that a column inherited from the base table + // was dropped from view. + if ( + currentViewName != null && !baseColumnCountUpserted + && numBaseTableColsMatched < numColsInBaseTable + ) { + upsertBaseColumnCountInHeaderRow(metaConnection, currentTenantId, currentViewSchema, + currentViewName, DIVERGED_VIEW_BASE_COLUMN_COUNT); + clearCache = true; + } + // reset the values as we are now going to iterate over columns of a new view. + numBaseTableColsMatched = 0; + currentTenantId = viewTenantId; + currentViewSchema = viewSchema; + currentViewName = viewName; + ignore = false; + baseColumnCountUpserted = false; } - boolean clearCache = false; - for (Entry> entry : parentTableViewsMap.entrySet()) { - // Fetch column information for the base physical table - String physicalTable = entry.getKey(); - String baseTableSchemaName = SchemaUtil.getSchemaNameFromFullName(physicalTable).equals(StringUtil.EMPTY_STRING) ? null : SchemaUtil.getSchemaNameFromFullName(physicalTable); - String baseTableName = SchemaUtil.getTableNameFromFullName(physicalTable); - List basePhysicalTableColumns = new ArrayList<>(); - - // Columns fetched in order of ordinal position - String fetchColumnInfoForBasePhysicalTable = "SELECT " + - COLUMN_NAME + "," + - COLUMN_FAMILY + "," + - DATA_TYPE + "," + - COLUMN_SIZE + "," + - DECIMAL_DIGITS + "," + - ORDINAL_POSITION + "," + - SORT_ORDER + "," + - ARRAY_SIZE + " " + - "FROM SYSTEM.CATALOG " + - "WHERE " + - "TABLE_SCHEM %s " + - "AND TABLE_NAME = ? " + - "AND COLUMN_NAME IS NOT NULL " + - "AND LINK_TYPE IS NULL " + - "ORDER BY " + - ORDINAL_POSITION; - - PreparedStatement stmt = null; - if (baseTableSchemaName == null) { - fetchColumnInfoForBasePhysicalTable = - String.format(fetchColumnInfoForBasePhysicalTable, "IS NULL "); - stmt = metaConnection.prepareStatement(fetchColumnInfoForBasePhysicalTable); - stmt.setString(1, baseTableName); - } else { - fetchColumnInfoForBasePhysicalTable = - String.format(fetchColumnInfoForBasePhysicalTable, " = ? "); - stmt = metaConnection.prepareStatement(fetchColumnInfoForBasePhysicalTable); - stmt.setString(1, baseTableSchemaName); - stmt.setString(2, baseTableName); + if (!ignore) { + /* + * Iterate over all the columns of the base physical table and the columns of the + * view. Compare the two till one of the following happens: 1) We run into a view + * column which is different from column in the base physical table. This means that + * the view has diverged from the base physical table. In such a case we will set a + * special value for the base column count. That special value will also be used on + * the server side to filter out the diverged view so that meta-data changes on the + * base physical table are not propagated to it. 2) Every physical table column is + * present in the view. In that case we set the base column count as the number of + * columns in the base physical table. At that point we ignore rest of the columns of + * the view. + */ + ColumnDetails baseTableColumn = basePhysicalTableColumns.get(numBaseTableColsMatched); + String columName = rs.getString(COLUMN_NAME); + String columnFamily = rs.getString(COLUMN_FAMILY); + int ordinalPos = rs.getInt(ORDINAL_POSITION); + int dataType = rs.getInt(DATA_TYPE); + int columnSize = rs.getInt(COLUMN_SIZE); + int decimalDigits = rs.getInt(DECIMAL_DIGITS); + int sortOrder = rs.getInt(SORT_ORDER); + int arraySize = rs.getInt(ARRAY_SIZE); + ColumnDetails viewColumn = new ColumnDetails(columnFamily, columName, ordinalPos, + dataType, columnSize, decimalDigits, sortOrder, arraySize); + if (baseTableColumn.equals(viewColumn)) { + numBaseTableColsMatched++; + if (numBaseTableColsMatched == numColsInBaseTable) { + upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, + viewName, numColsInBaseTable); + // No need to ignore the rest of the columns of the view here since the + // query retrieved only those columns that had ordinal position <= + // numColsInBaseTable + baseColumnCountUpserted = true; + clearCache = true; } - - try (ResultSet rs = stmt.executeQuery()) { - while (rs.next()) { - basePhysicalTableColumns.add(new ColumnDetails(rs.getString(COLUMN_FAMILY), rs - .getString(COLUMN_NAME), rs.getInt(ORDINAL_POSITION), rs - .getInt(DATA_TYPE), rs.getInt(COLUMN_SIZE), rs.getInt(DECIMAL_DIGITS), - rs.getInt(SORT_ORDER), rs.getInt(ARRAY_SIZE))); - } - } - - // Fetch column information for all the views on the base physical table ordered by ordinal position. - List viewKeys = entry.getValue(); - StringBuilder sb = new StringBuilder(); - sb.append("SELECT " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_NAME + "," + - COLUMN_FAMILY + "," + - DATA_TYPE + "," + - COLUMN_SIZE + "," + - DECIMAL_DIGITS + "," + - ORDINAL_POSITION + "," + - SORT_ORDER + "," + - ARRAY_SIZE + " " + - "FROM SYSTEM.CATALOG " + - "WHERE " + - COLUMN_NAME + " IS NOT NULL " + - "AND " + - ORDINAL_POSITION + " <= ? " + // fetch only those columns that would impact setting of base column count - "AND " + - "(" + TENANT_ID+ ", " + TABLE_SCHEM + ", " + TABLE_NAME + ") IN ("); - - int numViews = viewKeys.size(); - for (int i = 0; i < numViews; i++) { - sb.append(" (?, ?, ?) "); - if (i < numViews - 1) { - sb.append(", "); - } - } - sb.append(" ) "); - sb.append(" GROUP BY " + - TENANT_ID + "," + - TABLE_SCHEM + "," + - TABLE_NAME + "," + - COLUMN_NAME + "," + - COLUMN_FAMILY + "," + - DATA_TYPE + "," + - COLUMN_SIZE + "," + - DECIMAL_DIGITS + "," + - ORDINAL_POSITION + "," + - SORT_ORDER + "," + - ARRAY_SIZE + " " + - "ORDER BY " + - TENANT_ID + "," + TABLE_SCHEM + ", " + TABLE_NAME + ", " + ORDINAL_POSITION); - String fetchViewColumnsSql = sb.toString(); - stmt = metaConnection.prepareStatement(fetchViewColumnsSql); - int numColsInBaseTable = basePhysicalTableColumns.size(); - stmt.setInt(1, numColsInBaseTable); - int paramIndex = 1; - stmt.setInt(paramIndex++, numColsInBaseTable); - for (ViewKey view : viewKeys) { - stmt.setString(paramIndex++, view.tenantId); - stmt.setString(paramIndex++, view.schema); - stmt.setString(paramIndex++, view.name); - } - String currentTenantId = null; - String currentViewSchema = null; - String currentViewName = null; - try (ResultSet rs = stmt.executeQuery()) { - int numBaseTableColsMatched = 0; - boolean ignore = false; - boolean baseColumnCountUpserted = false; - while (rs.next()) { - String viewTenantId = rs.getString(TENANT_ID); - String viewSchema = rs.getString(TABLE_SCHEM); - String viewName = rs.getString(TABLE_NAME); - if (!(Objects.equal(viewTenantId, currentTenantId) && Objects.equal(viewSchema, currentViewSchema) && Objects.equal(viewName, currentViewName))) { - // We are about to iterate through columns of a different view. Check whether base column count was upserted. - // If it wasn't then it is likely the case that a column inherited from the base table was dropped from view. - if (currentViewName != null && !baseColumnCountUpserted && numBaseTableColsMatched < numColsInBaseTable) { - upsertBaseColumnCountInHeaderRow(metaConnection, currentTenantId, currentViewSchema, currentViewName, DIVERGED_VIEW_BASE_COLUMN_COUNT); - clearCache = true; - } - // reset the values as we are now going to iterate over columns of a new view. - numBaseTableColsMatched = 0; - currentTenantId = viewTenantId; - currentViewSchema = viewSchema; - currentViewName = viewName; - ignore = false; - baseColumnCountUpserted = false; - } - if (!ignore) { - /* - * Iterate over all the columns of the base physical table and the columns of the view. Compare the - * two till one of the following happens: - * - * 1) We run into a view column which is different from column in the base physical table. - * This means that the view has diverged from the base physical table. In such a case - * we will set a special value for the base column count. That special value will also be used - * on the server side to filter out the diverged view so that meta-data changes on the base - * physical table are not propagated to it. - * - * 2) Every physical table column is present in the view. In that case we set the base column count - * as the number of columns in the base physical table. At that point we ignore rest of the columns - * of the view. - * - */ - ColumnDetails baseTableColumn = basePhysicalTableColumns.get(numBaseTableColsMatched); - String columName = rs.getString(COLUMN_NAME); - String columnFamily = rs.getString(COLUMN_FAMILY); - int ordinalPos = rs.getInt(ORDINAL_POSITION); - int dataType = rs.getInt(DATA_TYPE); - int columnSize = rs.getInt(COLUMN_SIZE); - int decimalDigits = rs.getInt(DECIMAL_DIGITS); - int sortOrder = rs.getInt(SORT_ORDER); - int arraySize = rs.getInt(ARRAY_SIZE); - ColumnDetails viewColumn = new ColumnDetails(columnFamily, columName, ordinalPos, dataType, columnSize, decimalDigits, sortOrder, arraySize); - if (baseTableColumn.equals(viewColumn)) { - numBaseTableColsMatched++; - if (numBaseTableColsMatched == numColsInBaseTable) { - upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, numColsInBaseTable); - // No need to ignore the rest of the columns of the view here since the - // query retrieved only those columns that had ordinal position <= numColsInBaseTable - baseColumnCountUpserted = true; - clearCache = true; - } - } else { - // special value to denote that the view has diverged from the base physical table. - upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, DIVERGED_VIEW_BASE_COLUMN_COUNT); - baseColumnCountUpserted = true; - clearCache = true; - // ignore rest of the rows for the view. - ignore = true; - } - } - } - } - // set base column count for the header row of the base table too. We use this information - // to figure out whether the upgrade is in progress or hasn't started. - upsertBaseColumnCountInHeaderRow(metaConnection, null, baseTableSchemaName, baseTableName, BASE_TABLE_BASE_COLUMN_COUNT); - metaConnection.commit(); - } - // clear metadata cache on region servers to force loading of the latest metadata - if (clearCache) { - metaConnection.getQueryServices().clearCache(); - } - } finally { - if (metaConnection != null) { - metaConnection.close(); - } - } - } - - /** - * Upgrade the metadata in the catalog table to enable adding columns to tables with views - * @param oldMetaConnection caller should take care of closing the passed connection appropriately - * @throws SQLException - */ - public static void addParentToChildLinks(PhoenixConnection oldMetaConnection) throws SQLException { - PhoenixConnection metaConnection = null; - try { - // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG - metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP); - LOGGER.info("Upgrading metadata to add parent to child links for views"); - metaConnection.commit(); - // physical table - // | - // child view - // | - // grand child view - // Create parent table to child view CHILD link. As the PARENT link from child view to physical table is not there (it gets overwritten with the PHYSICAL link) use the PHYSICAL link instead. - // We need to filter out grand child views PHYSICAL links while running this query - String createChildLink = "UPSERT INTO SYSTEM.CATALOG(TENANT_ID,TABLE_SCHEM,TABLE_NAME,COLUMN_NAME,COLUMN_FAMILY,LINK_TYPE)" + - "SELECT PARENT_TENANT_ID," + - " CASE INSTR(COLUMN_FAMILY,'.')" + - " WHEN 0 THEN NULL" + - " ELSE REGEXP_SUBSTR(COLUMN_FAMILY,'[^\\.]+')" + - " END AS PARENT_SCHEMA," + - " CASE INSTR(COLUMN_FAMILY,'.')" + - " WHEN 0 THEN COLUMN_FAMILY" + - " ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1)" + - " END AS PARENT_TABLE," + - " TENANT_ID," + - " CASE WHEN TABLE_SCHEM IS NULL THEN TABLE_NAME" + - " ELSE TABLE_SCHEM||'.'||TABLE_NAME" + - " END AS VIEW_NAME," + - " 4 AS LINK_TYPE " + - "FROM SYSTEM.CATALOG(PARENT_TENANT_ID VARCHAR)" + - "WHERE LINK_TYPE = 2 " + - "AND TABLE_TYPE IS NULL " + - "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) NOT IN ( " + - " SELECT TENANT_ID, " + - " TABLE_SCHEM, " + - " TABLE_NAME " + - " FROM SYSTEM.CATALOG " + - " WHERE LINK_TYPE = 3 )"; - metaConnection.createStatement().execute(createChildLink); - metaConnection.commit(); - // Create child view to grand child view CHILD link using grand child view to child view PARENT link. - String createGrandChildLink = "UPSERT INTO SYSTEM.CATALOG(TENANT_ID,TABLE_SCHEM,TABLE_NAME,COLUMN_NAME,COLUMN_FAMILY,LINK_TYPE)" + - "SELECT PARENT_TENANT_ID," + - " CASE INSTR(COLUMN_FAMILY,'.')" + - " WHEN 0 THEN NULL" + - " ELSE REGEXP_SUBSTR(COLUMN_FAMILY,'[^\\.]+')" + - " END AS PARENT_SCHEMA," + - " CASE INSTR(COLUMN_FAMILY,'.')" + - " WHEN 0 THEN COLUMN_FAMILY" + - " ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1)" + - " END AS PARENT_TABLE," + - " TENANT_ID," + - " CASE WHEN TABLE_SCHEM IS NULL THEN TABLE_NAME" + - " ELSE TABLE_SCHEM||'.'||TABLE_NAME" + - " END AS VIEW_NAME," + - " 4 AS LINK_TYPE " + - "FROM SYSTEM.CATALOG(PARENT_TENANT_ID VARCHAR)" + - "WHERE LINK_TYPE = 3 "; - metaConnection.createStatement().execute(createGrandChildLink); - metaConnection.commit(); - metaConnection.getQueryServices().clearCache(); - } finally { - if (metaConnection != null) { - metaConnection.close(); + } else { + // special value to denote that the view has diverged from the base physical table. + upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, + DIVERGED_VIEW_BASE_COLUMN_COUNT); + baseColumnCountUpserted = true; + clearCache = true; + // ignore rest of the rows for the view. + ignore = true; + } } - } - } - - /** - * Move or copy child links from SYSTEM.CATALOG to SYSTEM.CHILD_LINK - * @param oldMetaConnection caller should take care of closing the passed connection appropriately - * @throws SQLException - */ - - public static void moveOrCopyChildLinks(PhoenixConnection oldMetaConnection, Map options) throws IOException { - long numberOfCopiedParentChildRows = 0; - long numberOfDeletedParentChildRows = 0; - - boolean moveChildLinksDuringUpgradeEnabled = - oldMetaConnection.getQueryServices().getProps().getBoolean( - QueryServices.MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED, - QueryServicesOptions.DEFAULT_MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED); - - Configuration conf = oldMetaConnection.getQueryServices().getConfiguration(); - - ReadOnlyProps readOnlyProps = oldMetaConnection.getQueryServices().getProps(); - TableName sysCat = SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME, readOnlyProps); - TableName sysChildLink = SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME, readOnlyProps); - - LOGGER.debug(String.format("SYSTEM CATALOG tabled use for copying child links: %s", sysCat.toString())); - LOGGER.debug(String.format("SYSTEM CHILD LINK table used for copying child links: %s", sysChildLink.toString())); - - - try (org.apache.hadoop.hbase.client.Connection moveChildLinkConnection = getHBaseConnection(conf, options); - Table sysCatalogTable = moveChildLinkConnection.getTable(sysCat)) { - boolean pageMore = false; - byte[] lastRowKey = null; - - do { - Scan scan = new Scan(); - scan.addFamily(DEFAULT_COLUMN_FAMILY_BYTES); - // Push down the filter to hbase to avoid transfer - SingleColumnValueFilter childLinkFilter = new SingleColumnValueFilter( - DEFAULT_COLUMN_FAMILY_BYTES, - LINK_TYPE_BYTES, CompareOperator.EQUAL, - new byte[]{ PTable.LinkType.CHILD_TABLE.getSerializedValue()}); - - childLinkFilter.setFilterIfMissing(true); - // Limit number of records - PageFilter pf = new PageFilter(DEFAULT_SCAN_PAGE_SIZE); - - scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, pf, childLinkFilter)); - if (pageMore) { - scan.withStartRow(lastRowKey, false); + } + } + // set base column count for the header row of the base table too. We use this information + // to figure out whether the upgrade is in progress or hasn't started. + upsertBaseColumnCountInHeaderRow(metaConnection, null, baseTableSchemaName, baseTableName, + BASE_TABLE_BASE_COLUMN_COUNT); + metaConnection.commit(); + } + // clear metadata cache on region servers to force loading of the latest metadata + if (clearCache) { + metaConnection.getQueryServices().clearCache(); + } + } finally { + if (metaConnection != null) { + metaConnection.close(); + } + } + } + + /** + * Upgrade the metadata in the catalog table to enable adding columns to tables with views + * @param oldMetaConnection caller should take care of closing the passed connection appropriately + */ + public static void addParentToChildLinks(PhoenixConnection oldMetaConnection) + throws SQLException { + PhoenixConnection metaConnection = null; + try { + // Need to use own connection with max time stamp to be able to read all data from + // SYSTEM.CATALOG + metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP); + LOGGER.info("Upgrading metadata to add parent to child links for views"); + metaConnection.commit(); + // physical table + // | + // child view + // | + // grand child view + // Create parent table to child view CHILD link. As the PARENT link from child view to + // physical table is not there (it gets overwritten with the PHYSICAL link) use the PHYSICAL + // link instead. + // We need to filter out grand child views PHYSICAL links while running this query + String createChildLink = + "UPSERT INTO SYSTEM.CATALOG(TENANT_ID,TABLE_SCHEM,TABLE_NAME,COLUMN_NAME,COLUMN_FAMILY,LINK_TYPE)" + + "SELECT PARENT_TENANT_ID," + " CASE INSTR(COLUMN_FAMILY,'.')" + + " WHEN 0 THEN NULL" + + " ELSE REGEXP_SUBSTR(COLUMN_FAMILY,'[^\\.]+')" + + " END AS PARENT_SCHEMA," + " CASE INSTR(COLUMN_FAMILY,'.')" + + " WHEN 0 THEN COLUMN_FAMILY" + + " ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1)" + + " END AS PARENT_TABLE," + " TENANT_ID," + + " CASE WHEN TABLE_SCHEM IS NULL THEN TABLE_NAME" + + " ELSE TABLE_SCHEM||'.'||TABLE_NAME" + " END AS VIEW_NAME," + + " 4 AS LINK_TYPE " + "FROM SYSTEM.CATALOG(PARENT_TENANT_ID VARCHAR)" + + "WHERE LINK_TYPE = 2 " + "AND TABLE_TYPE IS NULL " + + "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) NOT IN ( " + " SELECT TENANT_ID, " + + " TABLE_SCHEM, " + " TABLE_NAME " + + " FROM SYSTEM.CATALOG " + " WHERE LINK_TYPE = 3 )"; + metaConnection.createStatement().execute(createChildLink); + metaConnection.commit(); + // Create child view to grand child view CHILD link using grand child view to child view + // PARENT link. + String createGrandChildLink = + "UPSERT INTO SYSTEM.CATALOG(TENANT_ID,TABLE_SCHEM,TABLE_NAME,COLUMN_NAME,COLUMN_FAMILY,LINK_TYPE)" + + "SELECT PARENT_TENANT_ID," + " CASE INSTR(COLUMN_FAMILY,'.')" + + " WHEN 0 THEN NULL" + + " ELSE REGEXP_SUBSTR(COLUMN_FAMILY,'[^\\.]+')" + + " END AS PARENT_SCHEMA," + " CASE INSTR(COLUMN_FAMILY,'.')" + + " WHEN 0 THEN COLUMN_FAMILY" + + " ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1)" + + " END AS PARENT_TABLE," + " TENANT_ID," + + " CASE WHEN TABLE_SCHEM IS NULL THEN TABLE_NAME" + + " ELSE TABLE_SCHEM||'.'||TABLE_NAME" + " END AS VIEW_NAME," + + " 4 AS LINK_TYPE " + "FROM SYSTEM.CATALOG(PARENT_TENANT_ID VARCHAR)" + + "WHERE LINK_TYPE = 3 "; + metaConnection.createStatement().execute(createGrandChildLink); + metaConnection.commit(); + metaConnection.getQueryServices().clearCache(); + } finally { + if (metaConnection != null) { + metaConnection.close(); + } + } + } + + /** + * Move or copy child links from SYSTEM.CATALOG to SYSTEM.CHILD_LINK + * @param oldMetaConnection caller should take care of closing the passed connection appropriately + */ + + public static void moveOrCopyChildLinks(PhoenixConnection oldMetaConnection, + Map options) throws IOException { + long numberOfCopiedParentChildRows = 0; + long numberOfDeletedParentChildRows = 0; + + boolean moveChildLinksDuringUpgradeEnabled = oldMetaConnection.getQueryServices().getProps() + .getBoolean(QueryServices.MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED, + QueryServicesOptions.DEFAULT_MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED); + + Configuration conf = oldMetaConnection.getQueryServices().getConfiguration(); + + ReadOnlyProps readOnlyProps = oldMetaConnection.getQueryServices().getProps(); + TableName sysCat = SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME, readOnlyProps); + TableName sysChildLink = SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME, readOnlyProps); + + LOGGER.debug( + String.format("SYSTEM CATALOG tabled use for copying child links: %s", sysCat.toString())); + LOGGER.debug(String.format("SYSTEM CHILD LINK table used for copying child links: %s", + sysChildLink.toString())); + + try ( + org.apache.hadoop.hbase.client.Connection moveChildLinkConnection = + getHBaseConnection(conf, options); + Table sysCatalogTable = moveChildLinkConnection.getTable(sysCat)) { + boolean pageMore = false; + byte[] lastRowKey = null; + + do { + Scan scan = new Scan(); + scan.addFamily(DEFAULT_COLUMN_FAMILY_BYTES); + // Push down the filter to hbase to avoid transfer + SingleColumnValueFilter childLinkFilter = + new SingleColumnValueFilter(DEFAULT_COLUMN_FAMILY_BYTES, LINK_TYPE_BYTES, + CompareOperator.EQUAL, new byte[] { PTable.LinkType.CHILD_TABLE.getSerializedValue() }); + + childLinkFilter.setFilterIfMissing(true); + // Limit number of records + PageFilter pf = new PageFilter(DEFAULT_SCAN_PAGE_SIZE); + + scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, pf, childLinkFilter)); + if (pageMore) { + scan.withStartRow(lastRowKey, false); + } + // Collect the row keys to process them in batch + try (ResultScanner scanner = sysCatalogTable.getScanner(scan)) { + int count = 0; + List rowKeys = new ArrayList<>(); + List puts = new ArrayList<>(); + for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { + count++; + lastRowKey = rr.getRow(); + byte[] tmpKey = new byte[lastRowKey.length]; + System.arraycopy(lastRowKey, 0, tmpKey, 0, tmpKey.length); + long rowTS = rr.rawCells()[0].getTimestamp(); + rowKeys.add(tmpKey); + Put put = new Put(tmpKey); + put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, EMPTY_COLUMN_BYTES, rowTS, + EMPTY_COLUMN_VALUE_BYTES); + put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, LINK_TYPE_BYTES, rowTS, LINK_ROW); + puts.add(put); + } + + if (puts.size() > 0) { + Object[] putResults = new Object[puts.size()]; + try (Table childLinkTable = moveChildLinkConnection.getTable(sysChildLink)) { + // Process a batch of child links + childLinkTable.batch(puts, putResults); + // if move child links is enabled instead of copy, delete the rows from + // SYSTEM.CATALOG. + if (moveChildLinksDuringUpgradeEnabled) { + List deletes = Lists.newArrayList(); + for (int i = 0; i < putResults.length; i++) { + if (java.util.Objects.nonNull(putResults[i])) { + deletes.add(new Delete(rowKeys.get(i))); + } } - // Collect the row keys to process them in batch - try (ResultScanner scanner = sysCatalogTable.getScanner(scan)) { - int count = 0; - List rowKeys = new ArrayList<>(); - List puts = new ArrayList<>(); - for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { - count++; - lastRowKey = rr.getRow(); - byte[] tmpKey = new byte[lastRowKey.length]; - System.arraycopy(lastRowKey, 0, tmpKey, 0, tmpKey.length); - long rowTS = rr.rawCells()[0].getTimestamp(); - rowKeys.add(tmpKey); - Put put = new Put(tmpKey); - put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, EMPTY_COLUMN_BYTES, rowTS, - EMPTY_COLUMN_VALUE_BYTES); - put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, LINK_TYPE_BYTES, rowTS, - LINK_ROW); - puts.add(put); - } - - if (puts.size() > 0) { - Object[] putResults = new Object[puts.size()]; - try (Table childLinkTable = moveChildLinkConnection.getTable(sysChildLink)) { - // Process a batch of child links - childLinkTable.batch(puts, putResults); - // if move child links is enabled instead of copy, delete the rows from SYSTEM.CATALOG. - if (moveChildLinksDuringUpgradeEnabled) { - List deletes = Lists.newArrayList(); - for (int i = 0; i < putResults.length; i++) { - if (java.util.Objects.nonNull(putResults[i])) { - deletes.add(new Delete(rowKeys.get(i))); - } - } - numberOfCopiedParentChildRows += deletes.size(); - - Object[] deleteResults = new Object[deletes.size()]; - sysCatalogTable.batch(deletes, deleteResults); - int numDeletes = 0; - for (int i = 0; i < deleteResults.length; i++) { - if (java.util.Objects.nonNull(deleteResults[i])) { - numDeletes++; - } - } - numberOfDeletedParentChildRows += numDeletes; - - } else { - int numCopied = 0; - for (int i = 0; i < putResults.length; i++) { - if (java.util.Objects.nonNull(putResults[i])) { - numCopied++; - } - } - numberOfCopiedParentChildRows += numCopied; - } - - } catch (Exception e) { - LOGGER.error(String.format( - "Failed adding child link batch from %s to %s with Exception :", - SYSTEM_CATALOG_NAME, SYSTEM_CHILD_LINK_NAME), e); - } - } - pageMore = count != 0; - LOGGER.info(String.format("moveOrCopyChildLinks in progress => numberOfCopiedParentChildRows: %d " + - "numberOfDeletedParentChildRows: %d", - numberOfCopiedParentChildRows, - numberOfDeletedParentChildRows)); - + numberOfCopiedParentChildRows += deletes.size(); + + Object[] deleteResults = new Object[deletes.size()]; + sysCatalogTable.batch(deletes, deleteResults); + int numDeletes = 0; + for (int i = 0; i < deleteResults.length; i++) { + if (java.util.Objects.nonNull(deleteResults[i])) { + numDeletes++; + } } - } while (pageMore); - } catch (IOException ioe) { - LOGGER.error(String.format( - "Failed adding child link rows from %s to %s with Exception :", - SYSTEM_CATALOG_NAME, SYSTEM_CHILD_LINK_NAME), ioe); - throw ioe; - } - LOGGER.info(String.format("Finished moving/copying child link rows from %s to %s ", - SYSTEM_CATALOG_NAME, SYSTEM_CHILD_LINK_NAME)); - } - - public static void copyTTLValuesFromPhoenixTTLColumnToTTLColumn( - PhoenixConnection oldMetaConnection, Map options) throws IOException { - long numOfCopiedTTLRows = 0; - ReadOnlyProps readOnlyProps = oldMetaConnection.getQueryServices().getProps(); - TableName sysCat = SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME, readOnlyProps); - - LOGGER.debug(String.format("SYSTEM CATALOG tabled use for copying TTL values: %s", sysCat.toString())); - Configuration conf = oldMetaConnection.getQueryServices().getConfiguration(); - try (org.apache.hadoop.hbase.client.Connection copyTTLConnection = getHBaseConnection(conf, options); - Table sysCatalogTable = copyTTLConnection.getTable(sysCat)) { - boolean pageMore = false; - byte[] lastRowKey = null; - - do { - Scan scan = new Scan(); - scan.addFamily(DEFAULT_COLUMN_FAMILY_BYTES); - // Push down the filter to hbase to avoid transfer - SingleColumnValueFilter copyTTLFilter = new SingleColumnValueFilter( - DEFAULT_COLUMN_FAMILY_BYTES, - PHOENIX_TTL_BYTES, CompareOperator.NOT_EQUAL, - (byte[]) null); - - copyTTLFilter.setFilterIfMissing(true); - // Limit number of records - PageFilter pf = new PageFilter(DEFAULT_SCAN_PAGE_SIZE); - - scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, pf, copyTTLFilter)); - if (pageMore) { - scan.withStartRow(lastRowKey, false); + numberOfDeletedParentChildRows += numDeletes; + + } else { + int numCopied = 0; + for (int i = 0; i < putResults.length; i++) { + if (java.util.Objects.nonNull(putResults[i])) { + numCopied++; + } } - // Collect the row keys to process them in batch - try (ResultScanner scanner = sysCatalogTable.getScanner(scan)) { - int count = 0; - List rowKeys = new ArrayList<>(); - List puts = new ArrayList<>(); - for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { - count++; - lastRowKey = rr.getRow(); - byte[] tmpKey = new byte[lastRowKey.length]; - System.arraycopy(lastRowKey, 0, tmpKey, 0, tmpKey.length); - long rowTS = rr.rawCells()[0].getTimestamp(); - rowKeys.add(tmpKey); - Put put = new Put(tmpKey); - put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, EMPTY_COLUMN_BYTES, rowTS, - EMPTY_COLUMN_VALUE_BYTES); - int result = new BigInteger(rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES, - PHOENIX_TTL_BYTES)).intValue(); - //Check if result is negative (means greater than INT_MAX, - //put result as INT_MAX - if (result < 0) { - result = Integer.MAX_VALUE; - } - put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, TTL_BYTES, rowTS, - PVarchar.INSTANCE.toBytes(String.valueOf(result))); - puts.add(put); - } - - if (puts.size() > 0) { - Object[] putResults = new Object[puts.size()]; - try (Table copyTTLTable = copyTTLConnection.getTable(sysCat)) { - // Process a batch of ttl values - copyTTLTable.batch(puts, putResults); - int numCopied = 0; - for (int i = 0; i < putResults.length; i++) { - if (java.util.Objects.nonNull(putResults[i])) { - numCopied++; - } - } - numOfCopiedTTLRows += numCopied; - } catch (Exception e) { - LOGGER.error(String.format( - "Failed copying ttl value batch from PHOENIX_TTL column to TTL" + - " column on %s with Exception :", - SYSTEM_CATALOG_NAME), e); - } - } - pageMore = count != 0; - LOGGER.info(String.format("copyTTLValues From PHOENIX_TTL to TTL Column is " + - "in progress => numOfCopiedTTLRows: %d", - numOfCopiedTTLRows)); + numberOfCopiedParentChildRows += numCopied; + } - } - } while (pageMore); - } catch (IOException ioe) { - LOGGER.error(String.format( - "Failed copying ttl value batch from PHOENIX_TTL column to TTL" + - " column in %s with Exception :", - SYSTEM_CATALOG_NAME), ioe); - throw ioe; - } - LOGGER.info(String.format("Finished copying ttl values link rows from PHOENIX_TTL column " + - "to TTL column on %s ", - SYSTEM_CATALOG_NAME)); - - } - - public static void moveHBaseLevelTTLToSYSCAT(PhoenixConnection oldMetaConnection, - Map options) throws IOException { - long numOfTableThatHasTTLMoved = 0; - ReadOnlyProps readOnlyProps = oldMetaConnection.getQueryServices().getProps(); - TableName sysCat = SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME, readOnlyProps); - - LOGGER.debug(String.format("SYSTEM CATALOG table use for copying TTL values: %s", sysCat.toString())); - Configuration conf = oldMetaConnection.getQueryServices().getConfiguration(); - try (org.apache.hadoop.hbase.client.Connection moveTTLConnection = getHBaseConnection(conf, options); - Table sysCatalogTable = moveTTLConnection.getTable(sysCat); - Admin admin = moveTTLConnection.getAdmin()) { - //Scan SYSCAT for all tables... - boolean pageMore = false; - byte[] lastRowKey = null; - - do { - Scan scan = new Scan(); - scan.addFamily(DEFAULT_COLUMN_FAMILY_BYTES); - // Push down the filter to hbase to avoid transfer - SingleColumnValueFilter tableFilter = new SingleColumnValueFilter( - DEFAULT_COLUMN_FAMILY_BYTES, - TABLE_TYPE_BYTES, CompareOperator.EQUAL, - PTableType.TABLE.getSerializedValue().getBytes(StandardCharsets.UTF_8)); - - tableFilter.setFilterIfMissing(true); - // Limit number of records - PageFilter pf = new PageFilter(DEFAULT_SCAN_PAGE_SIZE); - - scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, pf, tableFilter)); - if (pageMore) { - scan.withStartRow(lastRowKey, false); - } - // Collect the row keys to process them in batch - try (ResultScanner scanner = sysCatalogTable.getScanner(scan)) { - int count = 0; - List rowKeys = new ArrayList<>(); - List puts = new ArrayList<>(); - for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { - count++; - lastRowKey = rr.getRow(); - byte[] tmpKey = new byte[lastRowKey.length]; - System.arraycopy(lastRowKey, 0, tmpKey, 0, tmpKey.length); - rowKeys.add(tmpKey); - String tableName = SchemaUtil.getTableName(rr.getValue( - DEFAULT_COLUMN_FAMILY_BYTES, TABLE_SCHEM_BYTES), - rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES, TABLE_NAME_BYTES)); - if (tableName == null || Arrays.equals(rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES, - TABLE_SCHEM_BYTES), SYSTEM_SCHEMA_NAME_BYTES)) { - //We do not support system table ttl through phoenix ttl, and it will be moved to a - //constant value in future commit. - continue; - } - TableDescriptor tableDesc = admin.getDescriptor(SchemaUtil.getPhysicalTableName( - tableName, readOnlyProps)); - int ttl = tableDesc.getColumnFamily(DEFAULT_COLUMN_FAMILY_BYTES). - getTimeToLive(); - if (ttl != ColumnFamilyDescriptorBuilder.DEFAULT_TTL) { - //As we have ttl defined fot this table create a Put to set TTL. - long rowTS = rr.rawCells()[0].getTimestamp(); - Put put = new Put(tmpKey); - put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, EMPTY_COLUMN_BYTES, rowTS, - EMPTY_COLUMN_VALUE_BYTES); - put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, TTL_BYTES, rowTS, - PVarchar.INSTANCE.toBytes(String.valueOf(ttl))); - puts.add(put); - - //Set TTL to Default at CF level when Phoenix level ttl is enabled - if (oldMetaConnection.getQueryServices().getConfiguration().getBoolean( - QueryServices.PHOENIX_TABLE_TTL_ENABLED, DEFAULT_PHOENIX_TABLE_TTL_ENABLED)) { - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(DEFAULT_COLUMN_FAMILY_BYTES).setTimeToLive( - ColumnFamilyDescriptorBuilder.DEFAULT_TTL).build(); - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder( - admin.getDescriptor(SchemaUtil.getPhysicalTableName( - tableName, readOnlyProps))).modifyColumnFamily( - columnFamilyDescriptor).build(); - admin.modifyTable(tableDescriptor); - } - } - } - - if (!puts.isEmpty()) { - Object[] putResults = new Object[puts.size()]; - try (Table moveTTLTable = moveTTLConnection.getTable(sysCat)) { - // Process a batch of ttl values - moveTTLTable.batch(puts, putResults); - int numMoved = 0; - for (Object putResult : putResults) { - if (java.util.Objects.nonNull(putResult)) { - numMoved++; - } - } - numOfTableThatHasTTLMoved += numMoved; - } catch (Exception e) { - LOGGER.error(String.format( - "Failed moving ttl value batch from ColumnDescriptor to TTL" + - " column on %s with Exception :", - SYSTEM_CATALOG_NAME), e); - } - } - - pageMore = count != 0; - LOGGER.info(String.format("moveTTLValues From ColumnDescriptor to TTL Column is " + - "in progress => numOfTableHasTTLMoved: %d", - numOfTableThatHasTTLMoved)); - - } - } while (pageMore); - } catch (IOException ioe) { - LOGGER.error(String.format( - "Failed moving ttl value batch from ColumnDescriptor to TTL" + - " column in %s with Exception :", - SYSTEM_CATALOG_NAME), ioe); - throw ioe; - } - LOGGER.info(String.format("Finished moving ttl value batch from ColumnDescriptor to TTL " + - "column on %s ", - SYSTEM_CATALOG_NAME)); - - } - - public static void addViewIndexToParentLinks(PhoenixConnection oldMetaConnection) throws SQLException { - PhoenixConnection metaConn = null; - boolean isMetaConnUsingQueryConn = true; - // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG - try (PhoenixConnection queryConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP); - PhoenixConnection upsertConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP)) { - LOGGER.info("Upgrading metadata to add parent links for indexes on views"); - String indexQuery = "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY FROM SYSTEM.CATALOG WHERE LINK_TYPE = " - + LinkType.INDEX_TABLE.getSerializedValue(); - String createViewIndexLink = "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY, LINK_TYPE) VALUES (?,?,?,?,?) "; - ResultSet rs = queryConn.createStatement().executeQuery(indexQuery); - String prevTenantId = null; - metaConn = queryConn; - Properties props = new Properties(queryConn.getClientInfo()); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, - Long.toString(HConstants.LATEST_TIMESTAMP)); - while (rs.next()) { - String tenantId = rs.getString("TENANT_ID"); - if (!java.util.Objects.equals(prevTenantId, tenantId)) { - prevTenantId = tenantId; - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - // guard again queryConn because we don't want to close - // queryConn if metaConn was assigned queryConn at - // this point - if (!isMetaConnUsingQueryConn) { - metaConn.close(); - } - metaConn = new PhoenixConnection(oldMetaConnection, props); - // now that we have reassigned metaConn, make - // isMetaConnUsingQueryConn false so that we make - // metaConn eligible for closure - isMetaConnUsingQueryConn = false; - } - String schemaName = rs.getString(TABLE_SCHEM); - String parentTableName = rs.getString(TABLE_NAME); - String fullParentTableName = - SchemaUtil.getTableName(schemaName, parentTableName); - String indexName = rs.getString(COLUMN_FAMILY); - PTable table = metaConn.getTable(fullParentTableName); - if (table == null) { - throw new TableNotFoundException(fullParentTableName); - } - if (table.getType().equals(PTableType.VIEW)) { - PreparedStatement prepareStatement = - upsertConn.prepareStatement(createViewIndexLink); - prepareStatement.setString(1, tenantId); - prepareStatement.setString(2, schemaName); - prepareStatement.setString(3, indexName); - prepareStatement.setString(4, parentTableName); - prepareStatement.setByte(5, - LinkType.VIEW_INDEX_PARENT_TABLE.getSerializedValue()); - prepareStatement.execute(); - upsertConn.commit(); - } + } catch (Exception e) { + LOGGER.error( + String.format("Failed adding child link batch from %s to %s with Exception :", + SYSTEM_CATALOG_NAME, SYSTEM_CHILD_LINK_NAME), + e); } - queryConn.getQueryServices().clearCache(); - } finally { - // while iterating through ResultSet, if metaConn was reassigned - // anytime, we need to close the last reassigned tenant connection - if (!isMetaConnUsingQueryConn) { - metaConn.close(); + } + pageMore = count != 0; + LOGGER.info(String.format( + "moveOrCopyChildLinks in progress => numberOfCopiedParentChildRows: %d " + + "numberOfDeletedParentChildRows: %d", + numberOfCopiedParentChildRows, numberOfDeletedParentChildRows)); + + } + } while (pageMore); + } catch (IOException ioe) { + LOGGER.error(String.format("Failed adding child link rows from %s to %s with Exception :", + SYSTEM_CATALOG_NAME, SYSTEM_CHILD_LINK_NAME), ioe); + throw ioe; + } + LOGGER.info(String.format("Finished moving/copying child link rows from %s to %s ", + SYSTEM_CATALOG_NAME, SYSTEM_CHILD_LINK_NAME)); + } + + public static void copyTTLValuesFromPhoenixTTLColumnToTTLColumn( + PhoenixConnection oldMetaConnection, Map options) throws IOException { + long numOfCopiedTTLRows = 0; + ReadOnlyProps readOnlyProps = oldMetaConnection.getQueryServices().getProps(); + TableName sysCat = SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME, readOnlyProps); + + LOGGER.debug( + String.format("SYSTEM CATALOG tabled use for copying TTL values: %s", sysCat.toString())); + Configuration conf = oldMetaConnection.getQueryServices().getConfiguration(); + try ( + org.apache.hadoop.hbase.client.Connection copyTTLConnection = + getHBaseConnection(conf, options); + Table sysCatalogTable = copyTTLConnection.getTable(sysCat)) { + boolean pageMore = false; + byte[] lastRowKey = null; + + do { + Scan scan = new Scan(); + scan.addFamily(DEFAULT_COLUMN_FAMILY_BYTES); + // Push down the filter to hbase to avoid transfer + SingleColumnValueFilter copyTTLFilter = new SingleColumnValueFilter( + DEFAULT_COLUMN_FAMILY_BYTES, PHOENIX_TTL_BYTES, CompareOperator.NOT_EQUAL, (byte[]) null); + + copyTTLFilter.setFilterIfMissing(true); + // Limit number of records + PageFilter pf = new PageFilter(DEFAULT_SCAN_PAGE_SIZE); + + scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, pf, copyTTLFilter)); + if (pageMore) { + scan.withStartRow(lastRowKey, false); + } + // Collect the row keys to process them in batch + try (ResultScanner scanner = sysCatalogTable.getScanner(scan)) { + int count = 0; + List rowKeys = new ArrayList<>(); + List puts = new ArrayList<>(); + for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { + count++; + lastRowKey = rr.getRow(); + byte[] tmpKey = new byte[lastRowKey.length]; + System.arraycopy(lastRowKey, 0, tmpKey, 0, tmpKey.length); + long rowTS = rr.rawCells()[0].getTimestamp(); + rowKeys.add(tmpKey); + Put put = new Put(tmpKey); + put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, EMPTY_COLUMN_BYTES, rowTS, + EMPTY_COLUMN_VALUE_BYTES); + int result = new BigInteger(rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES, PHOENIX_TTL_BYTES)) + .intValue(); + // Check if result is negative (means greater than INT_MAX, + // put result as INT_MAX + if (result < 0) { + result = Integer.MAX_VALUE; } - } - } - - /** - * Synchronize column family properties using the default cf properties for a given table - * @param tableDesc table descriptor of table to modify - * @param defaultColFam default column family used as the baseline for property synchronization - * @param syncedProps Map of properties to be kept in sync as read from the default column family descriptor - * @return modified table descriptor builder - */ - private static TableDescriptorBuilder syncColFamProperties(TableDescriptor tableDesc, ColumnFamilyDescriptor defaultColFam, - Map syncedProps) { - TableDescriptorBuilder tableDescBuilder = TableDescriptorBuilder.newBuilder(tableDesc); - // Ensure that all column families have necessary properties in sync (including local index cf if present) - for (ColumnFamilyDescriptor currentColFam: tableDesc.getColumnFamilies()) { - if (!currentColFam.equals(defaultColFam)) { - ColumnFamilyDescriptorBuilder colFamDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(currentColFam); - for (String prop: MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES) { - String existingPropVal = Bytes.toString(currentColFam.getValue(Bytes.toBytes(prop))); - String expectedPropVal = syncedProps.get(prop).toString(); - if (existingPropVal == null || !existingPropVal.toLowerCase().equals(expectedPropVal.toLowerCase())) { - // Need to synchronize this property for the current column family descriptor - colFamDescBuilder.setValue(prop, expectedPropVal); - } - } - if (!colFamDescBuilder.equals(ColumnFamilyDescriptorBuilder.newBuilder(currentColFam))) { - tableDescBuilder.modifyColumnFamily(colFamDescBuilder.build()); + put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, TTL_BYTES, rowTS, + PVarchar.INSTANCE.toBytes(String.valueOf(result))); + puts.add(put); + } + + if (puts.size() > 0) { + Object[] putResults = new Object[puts.size()]; + try (Table copyTTLTable = copyTTLConnection.getTable(sysCat)) { + // Process a batch of ttl values + copyTTLTable.batch(puts, putResults); + int numCopied = 0; + for (int i = 0; i < putResults.length; i++) { + if (java.util.Objects.nonNull(putResults[i])) { + numCopied++; } + } + numOfCopiedTTLRows += numCopied; + } catch (Exception e) { + LOGGER + .error(String.format("Failed copying ttl value batch from PHOENIX_TTL column to TTL" + + " column on %s with Exception :", SYSTEM_CATALOG_NAME), e); } - } - return tableDescBuilder; - } - - /** - * Add the table descriptor to the set of table descriptors to keep in sync, if it has been changed - * @param origTableDesc original table descriptor of the table in question - * @param defaultColFam column family to be used for synchronizing properties - * @param syncedProps Map of properties to be kept in sync as read from the default column family descriptor - * @param tableDescsToSync set of modified table descriptors - * @throws SQLException - */ - private static void addTableDescIfPropsChanged(TableDescriptor origTableDesc, ColumnFamilyDescriptor defaultColFam, - Map syncedProps, Set tableDescsToSync) throws SQLException { - TableDescriptorBuilder tableDescBuilder = syncColFamProperties(origTableDesc, defaultColFam, syncedProps); - if (!origTableDesc.equals(tableDescBuilder.build())) { - tableDescsToSync.add(tableDescBuilder.build()); - } - } - - /** - * Synchronize certain properties across column families of global index tables for a given base table - * @param cqs CQS object to get table descriptor from PTable - * @param baseTable base table - * @param defaultColFam column family to be used for synchronizing properties - * @param syncedProps Map of properties to be kept in sync as read from the default column family descriptor - * @param tableDescsToSync set of modified table descriptors - */ - private static void syncGlobalIndexesForTable(ConnectionQueryServices cqs, PTable baseTable, ColumnFamilyDescriptor defaultColFam, - Map syncedProps, Set tableDescsToSync) throws SQLException { - for (PTable indexTable: baseTable.getIndexes()) { - // We already handle local index property synchronization when considering all column families of the base table - if (IndexUtil.isGlobalIndex(indexTable)) { - addTableDescIfPropsChanged(cqs.getTableDescriptor(indexTable.getPhysicalName().getBytes()), - defaultColFam, syncedProps, tableDescsToSync); - } - } - } - - /** - * Synchronize certain properties across column families of view index tables for a given base table - * @param cqs CQS object to get table descriptor from PTable - * @param baseTable base table - * @param defaultColFam column family to be used for synchronizing properties - * @param syncedProps Map of properties to be kept in sync as read from the default column family descriptor - * @param tableDescsToSync set of modified table descriptors - */ - private static void syncViewIndexTable(ConnectionQueryServices cqs, PTable baseTable, ColumnFamilyDescriptor defaultColFam, - Map syncedProps, Set tableDescsToSync) throws SQLException { - String viewIndexName = MetaDataUtil.getViewIndexPhysicalName(baseTable.getName().getString()); - if (!Strings.isNullOrEmpty(viewIndexName)) { - try { - addTableDescIfPropsChanged(cqs.getTableDescriptor(Bytes.toBytes(viewIndexName)), - defaultColFam, syncedProps, tableDescsToSync); - } catch (TableNotFoundException ignore) { - // Ignore since this means that a view index table does not exist for this table + } + pageMore = count != 0; + LOGGER.info(String.format("copyTTLValues From PHOENIX_TTL to TTL Column is " + + "in progress => numOfCopiedTTLRows: %d", numOfCopiedTTLRows)); + + } + } while (pageMore); + } catch (IOException ioe) { + LOGGER.error(String.format("Failed copying ttl value batch from PHOENIX_TTL column to TTL" + + " column in %s with Exception :", SYSTEM_CATALOG_NAME), ioe); + throw ioe; + } + LOGGER.info(String.format( + "Finished copying ttl values link rows from PHOENIX_TTL column " + "to TTL column on %s ", + SYSTEM_CATALOG_NAME)); + + } + + public static void moveHBaseLevelTTLToSYSCAT(PhoenixConnection oldMetaConnection, + Map options) throws IOException { + long numOfTableThatHasTTLMoved = 0; + ReadOnlyProps readOnlyProps = oldMetaConnection.getQueryServices().getProps(); + TableName sysCat = SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME, readOnlyProps); + + LOGGER.debug( + String.format("SYSTEM CATALOG table use for copying TTL values: %s", sysCat.toString())); + Configuration conf = oldMetaConnection.getQueryServices().getConfiguration(); + try ( + org.apache.hadoop.hbase.client.Connection moveTTLConnection = + getHBaseConnection(conf, options); + Table sysCatalogTable = moveTTLConnection.getTable(sysCat); + Admin admin = moveTTLConnection.getAdmin()) { + // Scan SYSCAT for all tables... + boolean pageMore = false; + byte[] lastRowKey = null; + + do { + Scan scan = new Scan(); + scan.addFamily(DEFAULT_COLUMN_FAMILY_BYTES); + // Push down the filter to hbase to avoid transfer + SingleColumnValueFilter tableFilter = new SingleColumnValueFilter( + DEFAULT_COLUMN_FAMILY_BYTES, TABLE_TYPE_BYTES, CompareOperator.EQUAL, + PTableType.TABLE.getSerializedValue().getBytes(StandardCharsets.UTF_8)); + + tableFilter.setFilterIfMissing(true); + // Limit number of records + PageFilter pf = new PageFilter(DEFAULT_SCAN_PAGE_SIZE); + + scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, pf, tableFilter)); + if (pageMore) { + scan.withStartRow(lastRowKey, false); + } + // Collect the row keys to process them in batch + try (ResultScanner scanner = sysCatalogTable.getScanner(scan)) { + int count = 0; + List rowKeys = new ArrayList<>(); + List puts = new ArrayList<>(); + for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { + count++; + lastRowKey = rr.getRow(); + byte[] tmpKey = new byte[lastRowKey.length]; + System.arraycopy(lastRowKey, 0, tmpKey, 0, tmpKey.length); + rowKeys.add(tmpKey); + String tableName = + SchemaUtil.getTableName(rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES, TABLE_SCHEM_BYTES), + rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES, TABLE_NAME_BYTES)); + if ( + tableName == null + || Arrays.equals(rr.getValue(DEFAULT_COLUMN_FAMILY_BYTES, TABLE_SCHEM_BYTES), + SYSTEM_SCHEMA_NAME_BYTES) + ) { + // We do not support system table ttl through phoenix ttl, and it will be moved to a + // constant value in future commit. + continue; } - } - } - - private static void syncUpdateCacheFreqForIndexesOfTable(PTable baseTable, - PreparedStatement stmt, String tenantId) throws SQLException { - for (PTable index : baseTable.getIndexes()) { - if (index.getUpdateCacheFrequency() == baseTable.getUpdateCacheFrequency()) { - continue; + TableDescriptor tableDesc = + admin.getDescriptor(SchemaUtil.getPhysicalTableName(tableName, readOnlyProps)); + int ttl = tableDesc.getColumnFamily(DEFAULT_COLUMN_FAMILY_BYTES).getTimeToLive(); + if (ttl != ColumnFamilyDescriptorBuilder.DEFAULT_TTL) { + // As we have ttl defined fot this table create a Put to set TTL. + long rowTS = rr.rawCells()[0].getTimestamp(); + Put put = new Put(tmpKey); + put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, EMPTY_COLUMN_BYTES, rowTS, + EMPTY_COLUMN_VALUE_BYTES); + put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, TTL_BYTES, rowTS, + PVarchar.INSTANCE.toBytes(String.valueOf(ttl))); + puts.add(put); + + // Set TTL to Default at CF level when Phoenix level ttl is enabled + if ( + oldMetaConnection.getQueryServices().getConfiguration().getBoolean( + QueryServices.PHOENIX_TABLE_TTL_ENABLED, DEFAULT_PHOENIX_TABLE_TTL_ENABLED) + ) { + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(DEFAULT_COLUMN_FAMILY_BYTES) + .setTimeToLive(ColumnFamilyDescriptorBuilder.DEFAULT_TTL).build(); + TableDescriptor tableDescriptor = TableDescriptorBuilder + .newBuilder( + admin.getDescriptor(SchemaUtil.getPhysicalTableName(tableName, readOnlyProps))) + .modifyColumnFamily(columnFamilyDescriptor).build(); + admin.modifyTable(tableDescriptor); + } } - stmt.setString(1, tenantId); - stmt.setString(2, index.getSchemaName().getString()); - stmt.setString(3, index.getTableName().getString()); - stmt.setLong(4, baseTable.getUpdateCacheFrequency()); - stmt.addBatch(); - } - } - - /** - * See PHOENIX-4891. We set the UPDATE_CACHE_FREQUENCY of indexes to be same as their parent. - * We do this for both physical base tables as well as views - * @param conn Phoenix Connection object - * @param table PTable corresponding to a physical base table - * @throws SQLException - * @throws IOException - */ - public static void syncUpdateCacheFreqAllIndexes(PhoenixConnection conn, PTable table) - throws SQLException, IOException { - // Use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG - try(PhoenixConnection newConn = new PhoenixConnection(conn, HConstants.LATEST_TIMESTAMP)) { - // Clear the server-side cache so that we get the latest built PTables - newConn.unwrap(PhoenixConnection.class).getQueryServices().clearCache(); - byte[] tenantId = newConn.getTenantId() != null ? - newConn.getTenantId().getBytes() : null; - - PreparedStatement stmt = - newConn.prepareStatement(UPSERT_UPDATE_CACHE_FREQUENCY); - syncUpdateCacheFreqForIndexesOfTable(table, stmt, - Bytes.toString(tenantId)); - - TableViewFinderResult childViewsResult = new TableViewFinderResult(); - for (int i=0; i<2; i++) { - try (Table sysCatOrSysChildLinkTable = newConn.getQueryServices() - .getTable(SchemaUtil.getPhysicalName( - i==0 ? SYSTEM_CHILD_LINK_NAME_BYTES : SYSTEM_CATALOG_TABLE_BYTES, - newConn.getQueryServices().getProps()) - .getName())) { - ViewUtil.findAllRelatives(sysCatOrSysChildLinkTable, tenantId, - table.getSchemaName().getBytes(), table.getTableName().getBytes(), - LinkType.CHILD_TABLE, childViewsResult); - - // Iterate over the chain of child views - for (TableInfo tableInfo : childViewsResult.getLinks()) { - getViewAndSyncCacheFreqForIndexes(newConn, stmt, - tableInfo); - } - break; - } catch (TableNotFoundException ex) { - // try again with SYSTEM.CATALOG in case the schema is old - if (i == 1) { - // This means even SYSTEM.CATALOG was not found, so this is bad, rethrow - throw ex; - } + } + + if (!puts.isEmpty()) { + Object[] putResults = new Object[puts.size()]; + try (Table moveTTLTable = moveTTLConnection.getTable(sysCat)) { + // Process a batch of ttl values + moveTTLTable.batch(puts, putResults); + int numMoved = 0; + for (Object putResult : putResults) { + if (java.util.Objects.nonNull(putResult)) { + numMoved++; } + } + numOfTableThatHasTTLMoved += numMoved; + } catch (Exception e) { + LOGGER + .error(String.format("Failed moving ttl value batch from ColumnDescriptor to TTL" + + " column on %s with Exception :", SYSTEM_CATALOG_NAME), e); } - stmt.executeBatch(); - newConn.commit(); - } - } - - private static void getViewAndSyncCacheFreqForIndexes( - final PhoenixConnection newConn, - final PreparedStatement stmt, final TableInfo tableInfo) - throws SQLException { - final String viewName = SchemaUtil.getTableName( - tableInfo.getSchemaName(), tableInfo.getTableName()); - final String viewTenantId = Bytes.toString(tableInfo.getTenantId()); - final Optional view; - if (StringUtils.isNotEmpty(viewTenantId)) { - Properties props = new Properties(newConn.getClientInfo()); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, viewTenantId); - // use tenant connection to resolve tenant views - try (PhoenixConnection tenantConn = - new PhoenixConnection(newConn, props)) { - view = resolveView(viewName, tenantConn); - } - } else { - view = resolveView(viewName, newConn); - } - if (view.isPresent()) { - syncUpdateCacheFreqForIndexesOfTable(view.get(), stmt, - viewTenantId); - } - } - - private static Optional resolveView(final String viewName, - final PhoenixConnection conn) throws SQLException { - PTable view; + } + + pageMore = count != 0; + LOGGER.info(String.format("moveTTLValues From ColumnDescriptor to TTL Column is " + + "in progress => numOfTableHasTTLMoved: %d", numOfTableThatHasTTLMoved)); + + } + } while (pageMore); + } catch (IOException ioe) { + LOGGER.error(String.format("Failed moving ttl value batch from ColumnDescriptor to TTL" + + " column in %s with Exception :", SYSTEM_CATALOG_NAME), ioe); + throw ioe; + } + LOGGER.info(String.format( + "Finished moving ttl value batch from ColumnDescriptor to TTL " + "column on %s ", + SYSTEM_CATALOG_NAME)); + + } + + public static void addViewIndexToParentLinks(PhoenixConnection oldMetaConnection) + throws SQLException { + PhoenixConnection metaConn = null; + boolean isMetaConnUsingQueryConn = true; + // Need to use own connection with max time stamp to be able to read all data from + // SYSTEM.CATALOG + try ( + PhoenixConnection queryConn = + new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP); + PhoenixConnection upsertConn = + new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP)) { + LOGGER.info("Upgrading metadata to add parent links for indexes on views"); + String indexQuery = + "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY FROM SYSTEM.CATALOG WHERE LINK_TYPE = " + + LinkType.INDEX_TABLE.getSerializedValue(); + String createViewIndexLink = + "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY, LINK_TYPE) VALUES (?,?,?,?,?) "; + ResultSet rs = queryConn.createStatement().executeQuery(indexQuery); + String prevTenantId = null; + metaConn = queryConn; + Properties props = new Properties(queryConn.getClientInfo()); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, + Long.toString(HConstants.LATEST_TIMESTAMP)); + while (rs.next()) { + String tenantId = rs.getString("TENANT_ID"); + if (!java.util.Objects.equals(prevTenantId, tenantId)) { + prevTenantId = tenantId; + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + // guard again queryConn because we don't want to close + // queryConn if metaConn was assigned queryConn at + // this point + if (!isMetaConnUsingQueryConn) { + metaConn.close(); + } + metaConn = new PhoenixConnection(oldMetaConnection, props); + // now that we have reassigned metaConn, make + // isMetaConnUsingQueryConn false so that we make + // metaConn eligible for closure + isMetaConnUsingQueryConn = false; + } + String schemaName = rs.getString(TABLE_SCHEM); + String parentTableName = rs.getString(TABLE_NAME); + String fullParentTableName = SchemaUtil.getTableName(schemaName, parentTableName); + String indexName = rs.getString(COLUMN_FAMILY); + PTable table = metaConn.getTable(fullParentTableName); + if (table == null) { + throw new TableNotFoundException(fullParentTableName); + } + if (table.getType().equals(PTableType.VIEW)) { + PreparedStatement prepareStatement = upsertConn.prepareStatement(createViewIndexLink); + prepareStatement.setString(1, tenantId); + prepareStatement.setString(2, schemaName); + prepareStatement.setString(3, indexName); + prepareStatement.setString(4, parentTableName); + prepareStatement.setByte(5, LinkType.VIEW_INDEX_PARENT_TABLE.getSerializedValue()); + prepareStatement.execute(); + upsertConn.commit(); + } + } + queryConn.getQueryServices().clearCache(); + } finally { + // while iterating through ResultSet, if metaConn was reassigned + // anytime, we need to close the last reassigned tenant connection + if (!isMetaConnUsingQueryConn) { + metaConn.close(); + } + } + } + + /** + * Synchronize column family properties using the default cf properties for a given table + * @param tableDesc table descriptor of table to modify + * @param defaultColFam default column family used as the baseline for property synchronization + * @param syncedProps Map of properties to be kept in sync as read from the default column + * family descriptor + * @return modified table descriptor builder + */ + private static TableDescriptorBuilder syncColFamProperties(TableDescriptor tableDesc, + ColumnFamilyDescriptor defaultColFam, Map syncedProps) { + TableDescriptorBuilder tableDescBuilder = TableDescriptorBuilder.newBuilder(tableDesc); + // Ensure that all column families have necessary properties in sync (including local index cf + // if present) + for (ColumnFamilyDescriptor currentColFam : tableDesc.getColumnFamilies()) { + if (!currentColFam.equals(defaultColFam)) { + ColumnFamilyDescriptorBuilder colFamDescBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(currentColFam); + for (String prop : MetaDataUtil.SYNCED_DATA_TABLE_AND_INDEX_COL_FAM_PROPERTIES) { + String existingPropVal = Bytes.toString(currentColFam.getValue(Bytes.toBytes(prop))); + String expectedPropVal = syncedProps.get(prop).toString(); + if ( + existingPropVal == null + || !existingPropVal.toLowerCase().equals(expectedPropVal.toLowerCase()) + ) { + // Need to synchronize this property for the current column family descriptor + colFamDescBuilder.setValue(prop, expectedPropVal); + } + } + if (!colFamDescBuilder.equals(ColumnFamilyDescriptorBuilder.newBuilder(currentColFam))) { + tableDescBuilder.modifyColumnFamily(colFamDescBuilder.build()); + } + } + } + return tableDescBuilder; + } + + /** + * Add the table descriptor to the set of table descriptors to keep in sync, if it has been + * changed + * @param origTableDesc original table descriptor of the table in question + * @param defaultColFam column family to be used for synchronizing properties + * @param syncedProps Map of properties to be kept in sync as read from the default column + * family descriptor + * @param tableDescsToSync set of modified table descriptors + */ + private static void addTableDescIfPropsChanged(TableDescriptor origTableDesc, + ColumnFamilyDescriptor defaultColFam, Map syncedProps, + Set tableDescsToSync) throws SQLException { + TableDescriptorBuilder tableDescBuilder = + syncColFamProperties(origTableDesc, defaultColFam, syncedProps); + if (!origTableDesc.equals(tableDescBuilder.build())) { + tableDescsToSync.add(tableDescBuilder.build()); + } + } + + /** + * Synchronize certain properties across column families of global index tables for a given base + * table + * @param cqs CQS object to get table descriptor from PTable + * @param baseTable base table + * @param defaultColFam column family to be used for synchronizing properties + * @param syncedProps Map of properties to be kept in sync as read from the default column + * family descriptor + * @param tableDescsToSync set of modified table descriptors + */ + private static void syncGlobalIndexesForTable(ConnectionQueryServices cqs, PTable baseTable, + ColumnFamilyDescriptor defaultColFam, Map syncedProps, + Set tableDescsToSync) throws SQLException { + for (PTable indexTable : baseTable.getIndexes()) { + // We already handle local index property synchronization when considering all column families + // of the base table + if (IndexUtil.isGlobalIndex(indexTable)) { + addTableDescIfPropsChanged(cqs.getTableDescriptor(indexTable.getPhysicalName().getBytes()), + defaultColFam, syncedProps, tableDescsToSync); + } + } + } + + /** + * Synchronize certain properties across column families of view index tables for a given base + * table + * @param cqs CQS object to get table descriptor from PTable + * @param baseTable base table + * @param defaultColFam column family to be used for synchronizing properties + * @param syncedProps Map of properties to be kept in sync as read from the default column + * family descriptor + * @param tableDescsToSync set of modified table descriptors + */ + private static void syncViewIndexTable(ConnectionQueryServices cqs, PTable baseTable, + ColumnFamilyDescriptor defaultColFam, Map syncedProps, + Set tableDescsToSync) throws SQLException { + String viewIndexName = MetaDataUtil.getViewIndexPhysicalName(baseTable.getName().getString()); + if (!Strings.isNullOrEmpty(viewIndexName)) { + try { + addTableDescIfPropsChanged(cqs.getTableDescriptor(Bytes.toBytes(viewIndexName)), + defaultColFam, syncedProps, tableDescsToSync); + } catch (TableNotFoundException ignore) { + // Ignore since this means that a view index table does not exist for this table + } + } + } + + private static void syncUpdateCacheFreqForIndexesOfTable(PTable baseTable, PreparedStatement stmt, + String tenantId) throws SQLException { + for (PTable index : baseTable.getIndexes()) { + if (index.getUpdateCacheFrequency() == baseTable.getUpdateCacheFrequency()) { + continue; + } + stmt.setString(1, tenantId); + stmt.setString(2, index.getSchemaName().getString()); + stmt.setString(3, index.getTableName().getString()); + stmt.setLong(4, baseTable.getUpdateCacheFrequency()); + stmt.addBatch(); + } + } + + /** + * See PHOENIX-4891. We set the UPDATE_CACHE_FREQUENCY of indexes to be same as their parent. We + * do this for both physical base tables as well as views + * @param conn Phoenix Connection object + * @param table PTable corresponding to a physical base table + */ + public static void syncUpdateCacheFreqAllIndexes(PhoenixConnection conn, PTable table) + throws SQLException, IOException { + // Use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG + try (PhoenixConnection newConn = new PhoenixConnection(conn, HConstants.LATEST_TIMESTAMP)) { + // Clear the server-side cache so that we get the latest built PTables + newConn.unwrap(PhoenixConnection.class).getQueryServices().clearCache(); + byte[] tenantId = newConn.getTenantId() != null ? newConn.getTenantId().getBytes() : null; + + PreparedStatement stmt = newConn.prepareStatement(UPSERT_UPDATE_CACHE_FREQUENCY); + syncUpdateCacheFreqForIndexesOfTable(table, stmt, Bytes.toString(tenantId)); + + TableViewFinderResult childViewsResult = new TableViewFinderResult(); + for (int i = 0; i < 2; i++) { + try (Table sysCatOrSysChildLinkTable = newConn.getQueryServices() + .getTable(SchemaUtil + .getPhysicalName(i == 0 ? SYSTEM_CHILD_LINK_NAME_BYTES : SYSTEM_CATALOG_TABLE_BYTES, + newConn.getQueryServices().getProps()) + .getName())) { + ViewUtil.findAllRelatives(sysCatOrSysChildLinkTable, tenantId, + table.getSchemaName().getBytes(), table.getTableName().getBytes(), LinkType.CHILD_TABLE, + childViewsResult); + + // Iterate over the chain of child views + for (TableInfo tableInfo : childViewsResult.getLinks()) { + getViewAndSyncCacheFreqForIndexes(newConn, stmt, tableInfo); + } + break; + } catch (TableNotFoundException ex) { + // try again with SYSTEM.CATALOG in case the schema is old + if (i == 1) { + // This means even SYSTEM.CATALOG was not found, so this is bad, rethrow + throw ex; + } + } + } + stmt.executeBatch(); + newConn.commit(); + } + } + + private static void getViewAndSyncCacheFreqForIndexes(final PhoenixConnection newConn, + final PreparedStatement stmt, final TableInfo tableInfo) throws SQLException { + final String viewName = + SchemaUtil.getTableName(tableInfo.getSchemaName(), tableInfo.getTableName()); + final String viewTenantId = Bytes.toString(tableInfo.getTenantId()); + final Optional view; + if (StringUtils.isNotEmpty(viewTenantId)) { + Properties props = new Properties(newConn.getClientInfo()); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, viewTenantId); + // use tenant connection to resolve tenant views + try (PhoenixConnection tenantConn = new PhoenixConnection(newConn, props)) { + view = resolveView(viewName, tenantConn); + } + } else { + view = resolveView(viewName, newConn); + } + if (view.isPresent()) { + syncUpdateCacheFreqForIndexesOfTable(view.get(), stmt, viewTenantId); + } + } + + private static Optional resolveView(final String viewName, final PhoenixConnection conn) + throws SQLException { + PTable view; + try { + view = conn.getTable(viewName); + } catch (TableNotFoundException e) { + // Ignore + LOGGER.error("Error getting PTable for view: {}", viewName, e); + return Optional.empty(); + } + return Optional.of(view); + } + + /** + * Make sure that all tables have necessary column family properties in sync with each other and + * also in sync with all the table's indexes See PHOENIX-3955 + * @param conn Phoenix connection + * @throws SQLException if something goes wrong while retrieving Admin, Table or while calling + * underlying utilities like + * {@link #syncUpdateCacheFreqAllIndexes(PhoenixConnection, PTable)} , + * {@link #addTableDescIfPropsChanged(TableDescriptor, ColumnFamilyDescriptor, Map, Set)} + * , + * {@link #syncGlobalIndexesForTable(ConnectionQueryServices, PTable, ColumnFamilyDescriptor, Map, Set)} + * , + * {@link #syncViewIndexTable(ConnectionQueryServices, PTable, ColumnFamilyDescriptor, Map, Set)} + * @throws IOException if something goes wrong while retrieving Admin, performing admin + * operations or while performing sync of updated cache frequencies for + * indexes using + * {@link #syncUpdateCacheFreqAllIndexes(PhoenixConnection, PTable)} + */ + public static void syncTableAndIndexProperties(PhoenixConnection conn) + throws SQLException, IOException { + try (Admin admin = conn.getQueryServices().getAdmin()) { + Set tableDescriptorsToSynchronize = new HashSet<>(); + for (TableDescriptor origTableDesc : admin.listTableDescriptors()) { + if ( + MetaDataUtil.isViewIndex(origTableDesc.getTableName().getNameWithNamespaceInclAsString()) + ) { + // Ignore physical view index tables since we handle them + // for each base table already + continue; + } + PTable table; + String fullTableName = SchemaUtil + .getPhysicalTableName(origTableDesc.getTableName().getName(), + SchemaUtil.isNamespaceMappingEnabled(null, conn.getQueryServices().getProps())) + .getNameAsString(); try { - view = conn.getTable(viewName); + // Use this getTable API to get the latest PTable + table = conn.getTable(null, fullTableName); } catch (TableNotFoundException e) { - // Ignore - LOGGER.error("Error getting PTable for view: {}", viewName, e); - return Optional.empty(); - } - return Optional.of(view); - } - - /** - * Make sure that all tables have necessary column family properties in sync - * with each other and also in sync with all the table's indexes - * See PHOENIX-3955 - * @param conn Phoenix connection - * @throws SQLException if something goes wrong while retrieving Admin, - * Table or while calling underlying utilities like - * {@link #syncUpdateCacheFreqAllIndexes(PhoenixConnection, PTable)} , - * {@link #addTableDescIfPropsChanged(TableDescriptor, - * ColumnFamilyDescriptor, Map, Set)} , - * {@link #syncGlobalIndexesForTable(ConnectionQueryServices, PTable, - * ColumnFamilyDescriptor, Map, Set)} , - * {@link #syncViewIndexTable(ConnectionQueryServices, PTable, - * ColumnFamilyDescriptor, Map, Set)} - * @throws IOException if something goes wrong while retrieving Admin, - * performing admin operations or while performing sync of updated - * cache frequencies for indexes using - * {@link #syncUpdateCacheFreqAllIndexes(PhoenixConnection, PTable)} - */ - public static void syncTableAndIndexProperties(PhoenixConnection conn) - throws SQLException, IOException { - try (Admin admin = conn.getQueryServices().getAdmin()) { - Set tableDescriptorsToSynchronize = - new HashSet<>(); - for (TableDescriptor origTableDesc : admin.listTableDescriptors()) { - if (MetaDataUtil.isViewIndex(origTableDesc.getTableName() - .getNameWithNamespaceInclAsString())) { - // Ignore physical view index tables since we handle them - // for each base table already - continue; - } - PTable table; - String fullTableName = SchemaUtil.getPhysicalTableName( - origTableDesc.getTableName().getName(), - SchemaUtil.isNamespaceMappingEnabled(null, - conn.getQueryServices().getProps())).getNameAsString(); - try { - // Use this getTable API to get the latest PTable - table = conn.getTable(null, fullTableName); - } catch (TableNotFoundException e) { - // Ignore tables not mapped to a Phoenix Table - LOGGER.warn("Error getting PTable for HBase table: {}", - fullTableName); - continue; - } - if (table.getType() == PTableType.INDEX) { - // Ignore global index tables since we handle them for - // each base table already - continue; - } - syncUpdateCacheFreqAllIndexes(conn, table); - ColumnFamilyDescriptor defaultColFam = origTableDesc - .getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)); - Map syncedProps = - MetaDataUtil.getSyncedProps(defaultColFam); - - addTableDescIfPropsChanged(origTableDesc, defaultColFam, - syncedProps, tableDescriptorsToSynchronize); - syncGlobalIndexesForTable(conn.getQueryServices(), table, - defaultColFam, syncedProps, tableDescriptorsToSynchronize); - syncViewIndexTable(conn.getQueryServices(), table, - defaultColFam, syncedProps, tableDescriptorsToSynchronize); - } - for (TableDescriptor t : tableDescriptorsToSynchronize) { - admin.modifyTable(t); - } - } - } - - private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection metaConnection, - String tenantId, String schemaName, String viewOrTableName, int baseColumnCount) - throws SQLException { - try (PreparedStatement stmt = - metaConnection.prepareStatement(UPSERT_BASE_COLUMN_COUNT_IN_HEADER_ROW)) { - stmt.setString(1, tenantId); - stmt.setString(2, schemaName); - stmt.setString(3, viewOrTableName); - stmt.setString(4, null); - stmt.setString(5, null); - stmt.setInt(6, baseColumnCount); - stmt.executeUpdate(); - } - } - - private static class ColumnDetails { - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + columnName.hashCode(); - result = prime * result + ((columnFamily == null) ? 0 : columnFamily.hashCode()); - result = prime * result + arraySize; - result = prime * result + dataType; - result = prime * result + maxLength; - result = prime * result + ordinalValue; - result = prime * result + scale; - result = prime * result + sortOrder; - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ColumnDetails other = (ColumnDetails) obj; - if (!columnName.equals(other.columnName)) return false; - if (columnFamily == null) { - if (other.columnFamily != null) return false; - } else if (!columnFamily.equals(other.columnFamily)) return false; - if (arraySize != other.arraySize) return false; - if (dataType != other.dataType) return false; - if (maxLength != other.maxLength) return false; - if (ordinalValue != other.ordinalValue) return false; - if (scale != other.scale) return false; - if (sortOrder != other.sortOrder) return false; - return true; - } - - @Nullable - private final String columnFamily; - - @Nonnull - private final String columnName; - - private final int ordinalValue; - - private final int dataType; - - private final int maxLength; - - private final int scale; - - private final int sortOrder; - - private final int arraySize; - - ColumnDetails(String columnFamily, String columnName, int ordinalValue, int dataType, - int maxLength, int scale, int sortOrder, int arraySize) { - checkNotNull(columnName); - checkNotNull(ordinalValue); - checkNotNull(dataType); - this.columnFamily = columnFamily; - this.columnName = columnName; - this.ordinalValue = ordinalValue; - this.dataType = dataType; - this.maxLength = maxLength; - this.scale = scale; - this.sortOrder = sortOrder; - this.arraySize = arraySize; - } - - } - - private static class ViewKey { - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((tenantId == null) ? 0 : tenantId.hashCode()); - result = prime * result + name.hashCode(); - result = prime * result + ((schema == null) ? 0 : schema.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - ViewKey other = (ViewKey) obj; - if (tenantId == null) { - if (other.tenantId != null) return false; - } else if (!tenantId.equals(other.tenantId)) return false; - if (!name.equals(other.name)) return false; - if (schema == null) { - if (other.schema != null) return false; - } else if (!schema.equals(other.schema)) return false; - return true; - } - - @Nullable - private final String tenantId; - - @Nullable - private final String schema; - - @Nonnull - private final String name; - - private ViewKey(String tenantId, String schema, String viewName) { - this.tenantId = tenantId; - this.schema = schema; - this.name = viewName; - } - } - - private static String getTableRVCWithParam(List tableNames) { - StringBuilder query = new StringBuilder("("); - for (int i = 0; i < tableNames.size(); i += 3) { - String tenantId = tableNames.get(i); - String schemaName = tableNames.get(i + 1); - String tableName = tableNames.get(i + 2); - query.append('('); - query.append(tenantId == null ? "null" : " ? "); - query.append(','); - query.append(schemaName == null ? "null" : " ? "); - query.append(','); - query.append(" ? "); - query.append("),"); - } - // Replace trailing , with ) to end IN expression - query.setCharAt(query.length() - 1, ')'); - return query.toString(); - } - - private static String getTableRVC(List tableNames) { - StringBuilder query = new StringBuilder("("); - for (int i = 0; i < tableNames.size(); i+=3) { - String tenantId = tableNames.get(i); - String schemaName = tableNames.get(i+1); - String tableName = tableNames.get(i+2); - query.append('('); - query.append(tenantId == null ? "null" : ("'" + tenantId + "'")); - query.append(','); - query.append(schemaName == null ? "null" : ("'" + schemaName + "'")); - query.append(','); - query.append("'" + tableName + "'"); - query.append("),"); - } - // Replace trailing , with ) to end IN expression - query.setCharAt(query.length()-1, ')'); - return query.toString(); - } - - private static List addPhysicalTables(PhoenixConnection conn, ResultSet rs, PTableType otherType, Set physicalTables) throws SQLException { - List tableNames = Lists.newArrayListWithExpectedSize(1024); + // Ignore tables not mapped to a Phoenix Table + LOGGER.warn("Error getting PTable for HBase table: {}", fullTableName); + continue; + } + if (table.getType() == PTableType.INDEX) { + // Ignore global index tables since we handle them for + // each base table already + continue; + } + syncUpdateCacheFreqAllIndexes(conn, table); + ColumnFamilyDescriptor defaultColFam = + origTableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)); + Map syncedProps = MetaDataUtil.getSyncedProps(defaultColFam); + + addTableDescIfPropsChanged(origTableDesc, defaultColFam, syncedProps, + tableDescriptorsToSynchronize); + syncGlobalIndexesForTable(conn.getQueryServices(), table, defaultColFam, syncedProps, + tableDescriptorsToSynchronize); + syncViewIndexTable(conn.getQueryServices(), table, defaultColFam, syncedProps, + tableDescriptorsToSynchronize); + } + for (TableDescriptor t : tableDescriptorsToSynchronize) { + admin.modifyTable(t); + } + } + } + + private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection metaConnection, + String tenantId, String schemaName, String viewOrTableName, int baseColumnCount) + throws SQLException { + try (PreparedStatement stmt = + metaConnection.prepareStatement(UPSERT_BASE_COLUMN_COUNT_IN_HEADER_ROW)) { + stmt.setString(1, tenantId); + stmt.setString(2, schemaName); + stmt.setString(3, viewOrTableName); + stmt.setString(4, null); + stmt.setString(5, null); + stmt.setInt(6, baseColumnCount); + stmt.executeUpdate(); + } + } + + private static class ColumnDetails { + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + columnName.hashCode(); + result = prime * result + ((columnFamily == null) ? 0 : columnFamily.hashCode()); + result = prime * result + arraySize; + result = prime * result + dataType; + result = prime * result + maxLength; + result = prime * result + ordinalValue; + result = prime * result + scale; + result = prime * result + sortOrder; + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ColumnDetails other = (ColumnDetails) obj; + if (!columnName.equals(other.columnName)) return false; + if (columnFamily == null) { + if (other.columnFamily != null) return false; + } else if (!columnFamily.equals(other.columnFamily)) return false; + if (arraySize != other.arraySize) return false; + if (dataType != other.dataType) return false; + if (maxLength != other.maxLength) return false; + if (ordinalValue != other.ordinalValue) return false; + if (scale != other.scale) return false; + if (sortOrder != other.sortOrder) return false; + return true; + } + + @Nullable + private final String columnFamily; + + @Nonnull + private final String columnName; + + private final int ordinalValue; + + private final int dataType; + + private final int maxLength; + + private final int scale; + + private final int sortOrder; + + private final int arraySize; + + ColumnDetails(String columnFamily, String columnName, int ordinalValue, int dataType, + int maxLength, int scale, int sortOrder, int arraySize) { + checkNotNull(columnName); + checkNotNull(ordinalValue); + checkNotNull(dataType); + this.columnFamily = columnFamily; + this.columnName = columnName; + this.ordinalValue = ordinalValue; + this.dataType = dataType; + this.maxLength = maxLength; + this.scale = scale; + this.sortOrder = sortOrder; + this.arraySize = arraySize; + } + + } + + private static class ViewKey { + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((tenantId == null) ? 0 : tenantId.hashCode()); + result = prime * result + name.hashCode(); + result = prime * result + ((schema == null) ? 0 : schema.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + ViewKey other = (ViewKey) obj; + if (tenantId == null) { + if (other.tenantId != null) return false; + } else if (!tenantId.equals(other.tenantId)) return false; + if (!name.equals(other.name)) return false; + if (schema == null) { + if (other.schema != null) return false; + } else if (!schema.equals(other.schema)) return false; + return true; + } + + @Nullable + private final String tenantId; + + @Nullable + private final String schema; + + @Nonnull + private final String name; + + private ViewKey(String tenantId, String schema, String viewName) { + this.tenantId = tenantId; + this.schema = schema; + this.name = viewName; + } + } + + private static String getTableRVCWithParam(List tableNames) { + StringBuilder query = new StringBuilder("("); + for (int i = 0; i < tableNames.size(); i += 3) { + String tenantId = tableNames.get(i); + String schemaName = tableNames.get(i + 1); + String tableName = tableNames.get(i + 2); + query.append('('); + query.append(tenantId == null ? "null" : " ? "); + query.append(','); + query.append(schemaName == null ? "null" : " ? "); + query.append(','); + query.append(" ? "); + query.append("),"); + } + // Replace trailing , with ) to end IN expression + query.setCharAt(query.length() - 1, ')'); + return query.toString(); + } + + private static String getTableRVC(List tableNames) { + StringBuilder query = new StringBuilder("("); + for (int i = 0; i < tableNames.size(); i += 3) { + String tenantId = tableNames.get(i); + String schemaName = tableNames.get(i + 1); + String tableName = tableNames.get(i + 2); + query.append('('); + query.append(tenantId == null ? "null" : ("'" + tenantId + "'")); + query.append(','); + query.append(schemaName == null ? "null" : ("'" + schemaName + "'")); + query.append(','); + query.append("'" + tableName + "'"); + query.append("),"); + } + // Replace trailing , with ) to end IN expression + query.setCharAt(query.length() - 1, ')'); + return query.toString(); + } + + private static List addPhysicalTables(PhoenixConnection conn, ResultSet rs, + PTableType otherType, Set physicalTables) throws SQLException { + List tableNames = Lists.newArrayListWithExpectedSize(1024); + while (rs.next()) { + tableNames.add(rs.getString(1)); + tableNames.add(rs.getString(2)); + tableNames.add(rs.getString(3)); + } + if (tableNames.isEmpty()) { + return Collections.emptyList(); + } + + List otherTables = Lists.newArrayListWithExpectedSize(tableNames.size()); + // Find the header rows for tables that have not been upgraded already. + // We don't care about views, as the row key cannot be different than the table. + // We need this query to find physical tables which won't have a link row. + + String query = String.format( + "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME,TABLE_TYPE" + + "FROM SYSTEM.CATALOG (ROW_KEY_ORDER_OPTIMIZABLE BOOLEAN)" + "WHERE COLUMN_NAME IS NULL" + + "AND COLUMN_FAMILY IS NULL" + "AND ROW_KEY_ORDER_OPTIMIZABLE IS NULL" + + "AND TABLE_TYPE IN (%s , %s )" + "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN %s ", + PTableType.TABLE.getSerializedValue(), otherType.getSerializedValue(), + getTableRVCWithParam(tableNames)); + try (PreparedStatement selSysCat = conn.prepareStatement(query)) { + int param = 0; + for (int i = 0; i < tableNames.size(); i += 3) { + String tenantId = tableNames.get(i); + String schemaName = tableNames.get(i + 1); + String tableName = tableNames.get(i + 2); + if (tenantId != null) { + selSysCat.setString(++param, tenantId); + } + if (schemaName != null) { + selSysCat.setString(++param, schemaName); + } + selSysCat.setString(++param, tableName); + } + rs = selSysCat.executeQuery(); + while (rs.next()) { + if (PTableType.TABLE.getSerializedValue().equals(rs.getString(4))) { + physicalTables.add(SchemaUtil.getTableName(rs.getString(2), rs.getString(3))); + } else { + otherTables.add(rs.getString(1)); + otherTables.add(rs.getString(2)); + otherTables.add(rs.getString(3)); + } + } + return otherTables; + } + } + + // Return all types that are descending and either: + // 1) variable length, which includes all array types (PHOENIX-2067) + // 2) fixed length with padding (PHOENIX-2120) + // 3) float and double (PHOENIX-2171) + // We exclude VARBINARY as we no longer support DESC for it. + private static String getAffectedDataTypes() { + StringBuilder buf = + new StringBuilder("(" + PVarchar.INSTANCE.getSqlType() + "," + +PChar.INSTANCE.getSqlType() + + "," + +PBinary.INSTANCE.getSqlType() + "," + +PFloat.INSTANCE.getSqlType() + "," + + +PDouble.INSTANCE.getSqlType() + "," + +PDecimal.INSTANCE.getSqlType() + ","); + for (PDataType type : PDataType.values()) { + if (type.isArrayType()) { + buf.append(type.getSqlType()); + buf.append(','); + } + } + buf.setCharAt(buf.length() - 1, ')'); + return buf.toString(); + } + + /** + * Identify the tables that are DESC VARBINARY as this is no longer supported + */ + public static List getPhysicalTablesWithDescVarbinaryRowKey(PhoenixConnection conn) + throws SQLException { + String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + "FROM SYSTEM.CATALOG cat1\n" + + "WHERE COLUMN_NAME IS NOT NULL\n" + "AND COLUMN_FAMILY IS NULL\n" + "AND SORT_ORDER = " + + SortOrder.DESC.getSystemValue() + "\n" + "AND DATA_TYPE = " + + PVarbinary.INSTANCE.getSqlType() + "\n" + "GROUP BY TENANT_ID,TABLE_SCHEM,TABLE_NAME"; + return getPhysicalTablesWithDescRowKey(query, conn); + } + + /** + * Identify the tables that need to be upgraded due to PHOENIX-2067 and PHOENIX-2120 + */ + public static List getPhysicalTablesWithDescRowKey(PhoenixConnection conn) + throws SQLException { + String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + "FROM SYSTEM.CATALOG cat1\n" + + "WHERE COLUMN_NAME IS NOT NULL\n" + "AND COLUMN_FAMILY IS NULL\n" + "AND ( ( SORT_ORDER = " + + SortOrder.DESC.getSystemValue() + "\n" + " AND DATA_TYPE IN " + + getAffectedDataTypes() + ")\n" + " OR ( SORT_ORDER = " + SortOrder.ASC.getSystemValue() + + "\n" + " AND DATA_TYPE = " + PBinary.INSTANCE.getSqlType() + "\n" + + " AND COLUMN_SIZE > 1 ) )\n" + "GROUP BY TENANT_ID,TABLE_SCHEM,TABLE_NAME"; + return getPhysicalTablesWithDescRowKey(query, conn); + } + + /** + * Identify the tables that need to be upgraded due to PHOENIX-2067 + */ + private static List getPhysicalTablesWithDescRowKey(String query, PhoenixConnection conn) + throws SQLException { + // First query finds column rows of tables that need to be upgraded. + // We cannot tell if the column is from a table, view, or index however. + ResultSet rs = conn.createStatement().executeQuery(query); + Set physicalTables = Sets.newHashSetWithExpectedSize(1024); + List remainingTableNames = + addPhysicalTables(conn, rs, PTableType.INDEX, physicalTables); + if (!remainingTableNames.isEmpty()) { + // Find tables/views for index + String indexLinkQuery = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + "FROM SYSTEM.CATALOG\n" + + "WHERE COLUMN_NAME IS NULL\n" + "AND (TENANT_ID, TABLE_SCHEM, COLUMN_FAMILY) IN " + + getTableRVC(remainingTableNames) + "\n" + "AND LINK_TYPE = " + + LinkType.INDEX_TABLE.getSerializedValue(); + rs = conn.createStatement().executeQuery(indexLinkQuery); + remainingTableNames = addPhysicalTables(conn, rs, PTableType.VIEW, physicalTables); + if (!remainingTableNames.isEmpty()) { + // Find physical table name from views, splitting on '.' to get schema name and table name + String physicalLinkQuery = "SELECT null, " + + " CASE WHEN INSTR(COLUMN_FAMILY,'.') = 0 THEN NULL ELSE SUBSTR(COLUMN_FAMILY,1,INSTR(COLUMN_FAMILY,'.')) END,\n" + + " CASE WHEN INSTR(COLUMN_FAMILY,'.') = 0 THEN COLUMN_FAMILY ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1) END\n" + + "FROM SYSTEM.CATALOG\n" + "WHERE COLUMN_NAME IS NULL\n" + + "AND COLUMN_FAMILY IS NOT NULL\n" + "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN " + + getTableRVC(remainingTableNames) + "\n" + "AND LINK_TYPE = " + + LinkType.PHYSICAL_TABLE.getSerializedValue(); + rs = conn.createStatement().executeQuery(physicalLinkQuery); + // Add any tables (which will all be physical tables) which have not already been upgraded. + addPhysicalTables(conn, rs, PTableType.TABLE, physicalTables); + } + } + List sortedPhysicalTables = new ArrayList(physicalTables); + Collections.sort(sortedPhysicalTables); + return sortedPhysicalTables; + } + + private static void upgradeDescVarLengthRowKeys(PhoenixConnection upgradeConn, + PhoenixConnection globalConn, String schemaName, String tableName, boolean isTable, + boolean bypassUpgrade) throws SQLException { + TableName physicalName = TableName.valueOf(SchemaUtil.getTableName(schemaName, tableName)); + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + String snapshotName = physicalName + "_" + currentTime; + Admin admin = null; + if (isTable && !bypassUpgrade) { + admin = globalConn.getQueryServices().getAdmin(); + } + boolean restoreSnapshot = false; + boolean success = false; + try { + if (isTable && !bypassUpgrade) { + String msg = "Taking snapshot of physical table " + physicalName + " prior to upgrade..."; + System.out.println(msg); + LOGGER.info(msg); + admin.disableTable(physicalName); + admin.snapshot(snapshotName, physicalName); + admin.enableTable(physicalName); + restoreSnapshot = true; + } + String escapedTableName = SchemaUtil.getEscapedTableName(schemaName, tableName); + String tenantInfo = ""; + PName tenantId = PName.EMPTY_NAME; + if (upgradeConn.getTenantId() != null) { + tenantId = upgradeConn.getTenantId(); + tenantInfo = " for tenant " + tenantId.getString(); + } + String msg = "Starting upgrade of " + escapedTableName + tenantInfo + "..."; + System.out.println(msg); + LOGGER.info(msg); + ResultSet rs; + if (!bypassUpgrade) { + rs = upgradeConn.createStatement() + .executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + escapedTableName); + rs.next(); // Run query + } + List tableNames = Lists.newArrayListWithExpectedSize(1024); + tableNames.add(tenantId == PName.EMPTY_NAME ? null : tenantId.getString()); + tableNames.add(schemaName); + tableNames.add(tableName); + // Find views to mark as upgraded + if (isTable) { + String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + "FROM SYSTEM.CATALOG\n" + + "WHERE COLUMN_NAME IS NULL\n" + "AND COLUMN_FAMILY = '" + physicalName + "'" + + "AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue(); + rs = globalConn.createStatement().executeQuery(query); while (rs.next()) { - tableNames.add(rs.getString(1)); - tableNames.add(rs.getString(2)); - tableNames.add(rs.getString(3)); - } - if (tableNames.isEmpty()) { - return Collections.emptyList(); - } - - List otherTables = Lists.newArrayListWithExpectedSize(tableNames.size()); - // Find the header rows for tables that have not been upgraded already. - // We don't care about views, as the row key cannot be different than the table. - // We need this query to find physical tables which won't have a link row. - - String query = String.format("SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME,TABLE_TYPE" - + "FROM SYSTEM.CATALOG (ROW_KEY_ORDER_OPTIMIZABLE BOOLEAN)" - + "WHERE COLUMN_NAME IS NULL" - + "AND COLUMN_FAMILY IS NULL" - + "AND ROW_KEY_ORDER_OPTIMIZABLE IS NULL" - + "AND TABLE_TYPE IN (%s , %s )" - + "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN %s ", - PTableType.TABLE.getSerializedValue(), otherType.getSerializedValue(), - getTableRVCWithParam(tableNames)); - try (PreparedStatement selSysCat = conn.prepareStatement(query)) { - int param = 0; - for (int i = 0; i < tableNames.size(); i += 3) { - String tenantId = tableNames.get(i); - String schemaName = tableNames.get(i + 1); - String tableName = tableNames.get(i + 2); - if (tenantId != null) { - selSysCat.setString(++param, tenantId); - } - if (schemaName != null) { - selSysCat.setString(++param, schemaName); - } - selSysCat.setString(++param, tableName); - } - rs = selSysCat.executeQuery(); - while (rs.next()) { - if (PTableType.TABLE.getSerializedValue() - .equals(rs.getString(4))) { - physicalTables.add(SchemaUtil - .getTableName(rs.getString(2), rs.getString(3))); - } else { - otherTables.add(rs.getString(1)); - otherTables.add(rs.getString(2)); - otherTables.add(rs.getString(3)); - } - } - return otherTables; - } - } - - // Return all types that are descending and either: - // 1) variable length, which includes all array types (PHOENIX-2067) - // 2) fixed length with padding (PHOENIX-2120) - // 3) float and double (PHOENIX-2171) - // We exclude VARBINARY as we no longer support DESC for it. - private static String getAffectedDataTypes() { - StringBuilder buf = new StringBuilder("(" - + PVarchar.INSTANCE.getSqlType() + "," + - + PChar.INSTANCE.getSqlType() + "," + - + PBinary.INSTANCE.getSqlType() + "," + - + PFloat.INSTANCE.getSqlType() + "," + - + PDouble.INSTANCE.getSqlType() + "," + - + PDecimal.INSTANCE.getSqlType() + "," - ); - for (PDataType type : PDataType.values()) { - if (type.isArrayType()) { - buf.append(type.getSqlType()); - buf.append(','); - } - } - buf.setCharAt(buf.length()-1, ')'); - return buf.toString(); - } - - - /** - * Identify the tables that are DESC VARBINARY as this is no longer supported - */ - public static List getPhysicalTablesWithDescVarbinaryRowKey(PhoenixConnection conn) throws SQLException { - String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + - "FROM SYSTEM.CATALOG cat1\n" + - "WHERE COLUMN_NAME IS NOT NULL\n" + - "AND COLUMN_FAMILY IS NULL\n" + - "AND SORT_ORDER = " + SortOrder.DESC.getSystemValue() + "\n" + - "AND DATA_TYPE = " + PVarbinary.INSTANCE.getSqlType() + "\n" + - "GROUP BY TENANT_ID,TABLE_SCHEM,TABLE_NAME"; - return getPhysicalTablesWithDescRowKey(query, conn); - } - - /** - * Identify the tables that need to be upgraded due to PHOENIX-2067 and PHOENIX-2120 - */ - public static List getPhysicalTablesWithDescRowKey(PhoenixConnection conn) throws SQLException { - String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + - "FROM SYSTEM.CATALOG cat1\n" + - "WHERE COLUMN_NAME IS NOT NULL\n" + - "AND COLUMN_FAMILY IS NULL\n" + - "AND ( ( SORT_ORDER = " + SortOrder.DESC.getSystemValue() + "\n" + - " AND DATA_TYPE IN " + getAffectedDataTypes() + ")\n" + - " OR ( SORT_ORDER = " + SortOrder.ASC.getSystemValue() + "\n" + - " AND DATA_TYPE = " + PBinary.INSTANCE.getSqlType() + "\n" + - " AND COLUMN_SIZE > 1 ) )\n" + - "GROUP BY TENANT_ID,TABLE_SCHEM,TABLE_NAME"; - return getPhysicalTablesWithDescRowKey(query, conn); - } - - /** - * Identify the tables that need to be upgraded due to PHOENIX-2067 - */ - private static List getPhysicalTablesWithDescRowKey(String query, PhoenixConnection conn) throws SQLException { - // First query finds column rows of tables that need to be upgraded. - // We cannot tell if the column is from a table, view, or index however. - ResultSet rs = conn.createStatement().executeQuery(query); - Set physicalTables = Sets.newHashSetWithExpectedSize(1024); - List remainingTableNames = addPhysicalTables(conn, rs, PTableType.INDEX, physicalTables); - if (!remainingTableNames.isEmpty()) { - // Find tables/views for index - String indexLinkQuery = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + - "FROM SYSTEM.CATALOG\n" + - "WHERE COLUMN_NAME IS NULL\n" + - "AND (TENANT_ID, TABLE_SCHEM, COLUMN_FAMILY) IN " + getTableRVC(remainingTableNames) + "\n" + - "AND LINK_TYPE = " + LinkType.INDEX_TABLE.getSerializedValue(); - rs = conn.createStatement().executeQuery(indexLinkQuery); - remainingTableNames = addPhysicalTables(conn, rs, PTableType.VIEW, physicalTables); - if (!remainingTableNames.isEmpty()) { - // Find physical table name from views, splitting on '.' to get schema name and table name - String physicalLinkQuery = "SELECT null, " + - " CASE WHEN INSTR(COLUMN_FAMILY,'.') = 0 THEN NULL ELSE SUBSTR(COLUMN_FAMILY,1,INSTR(COLUMN_FAMILY,'.')) END,\n" + - " CASE WHEN INSTR(COLUMN_FAMILY,'.') = 0 THEN COLUMN_FAMILY ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1) END\n" + - "FROM SYSTEM.CATALOG\n" + - "WHERE COLUMN_NAME IS NULL\n" + - "AND COLUMN_FAMILY IS NOT NULL\n" + - "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN " + getTableRVC(remainingTableNames) + "\n" + - "AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue(); - rs = conn.createStatement().executeQuery(physicalLinkQuery); - // Add any tables (which will all be physical tables) which have not already been upgraded. - addPhysicalTables(conn, rs, PTableType.TABLE, physicalTables); - } - } - List sortedPhysicalTables = new ArrayList(physicalTables); - Collections.sort(sortedPhysicalTables); - return sortedPhysicalTables; - } - - private static void upgradeDescVarLengthRowKeys(PhoenixConnection upgradeConn, PhoenixConnection globalConn, String schemaName, String tableName, boolean isTable, boolean bypassUpgrade) throws SQLException { - TableName physicalName = TableName.valueOf(SchemaUtil.getTableName(schemaName, tableName)); - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - String snapshotName = physicalName + "_" + currentTime; - Admin admin = null; - if (isTable && !bypassUpgrade) { - admin = globalConn.getQueryServices().getAdmin(); - } - boolean restoreSnapshot = false; - boolean success = false; + tableNames.add(rs.getString(1)); + tableNames.add(rs.getString(2)); + tableNames.add(rs.getString(3)); + } + } + // Mark the table and views as upgraded now + for (int i = 0; i < tableNames.size(); i += 3) { + String theTenantId = tableNames.get(i); + String theSchemaName = tableNames.get(i + 1); + String theTableName = tableNames.get(i + 2); + String upsSyscat = String.format("UPSERT INTO " + + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " (" + PhoenixDatabaseMetaData.TENANT_ID + + "," + PhoenixDatabaseMetaData.TABLE_SCHEM + "," + PhoenixDatabaseMetaData.TABLE_NAME + + "," + MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE + " BOOLEAN" + + ") VALUES ( ?, ?, ?, TRUE)"); + try (PreparedStatement upsSyscatStmt = globalConn.prepareStatement(upsSyscat)) { + int param = 0; + if (theTenantId == null) { + upsSyscatStmt.setNull(++param, Types.VARCHAR); + } else { + upsSyscatStmt.setString(++param, theTenantId); + } + if (theSchemaName == null) { + upsSyscatStmt.setNull(++param, Types.VARCHAR); + } else { + upsSyscatStmt.setString(++param, theSchemaName); + } + upsSyscatStmt.setString(++param, theTableName); + upsSyscatStmt.execute(); + } + } + globalConn.commit(); + for (int i = 0; i < tableNames.size(); i += 3) { + String theTenantId = tableNames.get(i); + String theSchemaName = tableNames.get(i + 1); + String theTableName = tableNames.get(i + 2); + globalConn.getQueryServices().clearTableFromCache( + theTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(theTenantId), + theSchemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), + Bytes.toBytes(theTableName), HConstants.LATEST_TIMESTAMP); + } + success = true; + msg = "Completed upgrade of " + escapedTableName + tenantInfo; + System.out.println(msg); + LOGGER.info(msg); + } catch (Exception e) { + LOGGER.error("Exception during upgrade of " + physicalName + ":", e); + } finally { + boolean restored = false; + try { + if (!success && restoreSnapshot) { + admin.disableTable(physicalName); + admin.restoreSnapshot(snapshotName, false); + admin.enableTable(physicalName); + String msg = "Restored snapshot of " + physicalName + " due to failure of upgrade"; + System.out.println(msg); + LOGGER.info(msg); + } + restored = true; + } catch (Exception e) { + LOGGER.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e); + } finally { try { - if (isTable && !bypassUpgrade) { - String msg = "Taking snapshot of physical table " + physicalName + " prior to upgrade..."; - System.out.println(msg); - LOGGER.info(msg); - admin.disableTable(physicalName); - admin.snapshot(snapshotName, physicalName); - admin.enableTable(physicalName); - restoreSnapshot = true; - } - String escapedTableName = SchemaUtil.getEscapedTableName(schemaName, tableName); - String tenantInfo = ""; - PName tenantId = PName.EMPTY_NAME; - if (upgradeConn.getTenantId() != null) { - tenantId = upgradeConn.getTenantId(); - tenantInfo = " for tenant " + tenantId.getString(); - } - String msg = "Starting upgrade of " + escapedTableName + tenantInfo + "..."; - System.out.println(msg); - LOGGER.info(msg); - ResultSet rs; - if (!bypassUpgrade) { - rs = upgradeConn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + escapedTableName); - rs.next(); // Run query - } - List tableNames = Lists.newArrayListWithExpectedSize(1024); - tableNames.add(tenantId == PName.EMPTY_NAME ? null : tenantId.getString()); - tableNames.add(schemaName); - tableNames.add(tableName); - // Find views to mark as upgraded - if (isTable) { - String query = - "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + - "FROM SYSTEM.CATALOG\n" + - "WHERE COLUMN_NAME IS NULL\n" + - "AND COLUMN_FAMILY = '" + physicalName + "'" + - "AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue(); - rs = globalConn.createStatement().executeQuery(query); - while (rs.next()) { - tableNames.add(rs.getString(1)); - tableNames.add(rs.getString(2)); - tableNames.add(rs.getString(3)); - } - } - // Mark the table and views as upgraded now - for (int i = 0; i < tableNames.size(); i += 3) { - String theTenantId = tableNames.get(i); - String theSchemaName = tableNames.get(i+1); - String theTableName = tableNames.get(i+2); - String upsSyscat = String.format("UPSERT INTO " - + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME - + " (" + PhoenixDatabaseMetaData.TENANT_ID + "," - + PhoenixDatabaseMetaData.TABLE_SCHEM + "," - + PhoenixDatabaseMetaData.TABLE_NAME + "," - + MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE + " BOOLEAN" - + ") VALUES ( ?, ?, ?, TRUE)"); - try (PreparedStatement upsSyscatStmt = globalConn.prepareStatement(upsSyscat)) { - int param = 0; - if (theTenantId == null) { - upsSyscatStmt.setNull(++param, Types.VARCHAR); - } else { - upsSyscatStmt.setString(++param, theTenantId); - } - if (theSchemaName == null) { - upsSyscatStmt.setNull(++param, Types.VARCHAR); - } else { - upsSyscatStmt.setString(++param, theSchemaName); - } - upsSyscatStmt.setString(++param, theTableName); - upsSyscatStmt.execute(); - } - } - globalConn.commit(); - for (int i = 0; i < tableNames.size(); i += 3) { - String theTenantId = tableNames.get(i); - String theSchemaName = tableNames.get(i+1); - String theTableName = tableNames.get(i+2); - globalConn.getQueryServices().clearTableFromCache( - theTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(theTenantId), - theSchemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), - Bytes.toBytes(theTableName), HConstants.LATEST_TIMESTAMP); - } - success = true; - msg = "Completed upgrade of " + escapedTableName + tenantInfo; - System.out.println(msg); - LOGGER.info(msg); + if (restoreSnapshot && restored) { + admin.deleteSnapshot(snapshotName); + } } catch (Exception e) { - LOGGER.error("Exception during upgrade of " + physicalName + ":", e); - } finally { - boolean restored = false; - try { - if (!success && restoreSnapshot) { - admin.disableTable(physicalName); - admin.restoreSnapshot(snapshotName, false); - admin.enableTable(physicalName); - String msg = "Restored snapshot of " + physicalName + " due to failure of upgrade"; - System.out.println(msg); - LOGGER.info(msg); - } - restored = true; - } catch (Exception e) { - LOGGER.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e); - } finally { - try { - if (restoreSnapshot && restored) { - admin.deleteSnapshot(snapshotName); - } - } catch (Exception e) { - LOGGER.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e); - } finally { - try { - if (admin != null) { - admin.close(); - } - } catch (IOException e) { - LOGGER.warn("Unable to close admin after upgrade:", e); - } - } - } - } - } - - private static boolean isInvalidTableToUpgrade(PTable table) throws SQLException { - return (table.getType() != PTableType.TABLE || // Must be a table - table.getTenantId() != null || // Must be global - !table.getPhysicalName().equals(table.getName())); // Must be the physical table - } - /** - * Upgrade tables and their indexes due to a bug causing descending row keys to have a row key that - * prevents them from being sorted correctly (PHOENIX-2067). - */ - public static void upgradeDescVarLengthRowKeys(PhoenixConnection conn, List tablesToUpgrade, boolean bypassUpgrade) throws SQLException { - if (tablesToUpgrade.isEmpty()) { - return; - } - List tablesNeedingUpgrading = Lists.newArrayListWithExpectedSize(tablesToUpgrade.size()); - List invalidTables = Lists.newArrayListWithExpectedSize(tablesToUpgrade.size()); - for (String fullTableName : tablesToUpgrade) { - PTable table = conn.getTable(fullTableName); - if (isInvalidTableToUpgrade(table)) { - invalidTables.add(fullTableName); - } else { - tablesNeedingUpgrading.add(table); - } - } - if (!invalidTables.isEmpty()) { - StringBuilder buf = new StringBuilder("Only physical tables should be upgraded as their views and indexes will be updated with them: "); - for (String fullTableName : invalidTables) { - buf.append(fullTableName); - buf.append(' '); - } - throw new SQLException(buf.toString()); - } - PhoenixConnection upgradeConn = new PhoenixConnection(conn, true, true); - try { - upgradeConn.setAutoCommit(true); - for (PTable table : tablesNeedingUpgrading) { - boolean wasUpgraded = false; - if (!table.rowKeyOrderOptimizable()) { - wasUpgraded = true; - upgradeDescVarLengthRowKeys(upgradeConn, conn, table.getSchemaName().getString(), table.getTableName().getString(), true, bypassUpgrade); - } - - // Upgrade global indexes - for (PTable index : table.getIndexes()) { - if (!index.rowKeyOrderOptimizable() && index.getIndexType() != IndexType.LOCAL) { - wasUpgraded = true; - upgradeDescVarLengthRowKeys(upgradeConn, conn, index.getSchemaName().getString(), index.getTableName().getString(), false, bypassUpgrade); - } - } - - String sharedViewIndexName = Bytes.toString(MetaDataUtil.getViewIndexPhysicalName(table.getName().getBytes())); - // Upgrade view indexes - wasUpgraded |= upgradeSharedIndex(upgradeConn, conn, sharedViewIndexName, bypassUpgrade); - String sharedLocalIndexName = Bytes.toString(MetaDataUtil.getLocalIndexPhysicalName(table.getName().getBytes())); - // Upgrade local indexes - wasUpgraded |= upgradeSharedIndex(upgradeConn, conn, sharedLocalIndexName, bypassUpgrade); - - if (!wasUpgraded) { - System.out.println("Upgrade not required for this table or its indexes: " + table.getName().getString()); - } - } + LOGGER.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e); } finally { - upgradeConn.close(); - } - } - - /** - * Upgrade shared indexes by querying for all that are associated with our - * physical table. - * @return true if any upgrades were performed and false otherwise. - */ - private static boolean upgradeSharedIndex(PhoenixConnection upgradeConn, PhoenixConnection globalConn, String physicalName, boolean bypassUpgrade) throws SQLException { - String query = String.format( - "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME" - + "FROM SYSTEM.CATALOG cat1" - + "WHERE COLUMN_NAME IS NULL" - + "AND COLUMN_FAMILY = ? " - + "AND LINK_TYPE = %s " - + "ORDER BY TENANT_ID", LinkType.PHYSICAL_TABLE.getSerializedValue()); - try (PreparedStatement selSysCatstmt = globalConn.prepareStatement(query)) { - selSysCatstmt.setString(1, physicalName); - ResultSet rs = selSysCatstmt.executeQuery(); - String lastTenantId = null; - Connection conn = globalConn; - String url = globalConn.getURL(); - boolean wasUpgraded = false; - while (rs.next()) { - String fullTableName = SchemaUtil.getTableName( - rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), - rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); - String tenantId = rs.getString(1); - if (tenantId != null && !tenantId.equals(lastTenantId)) { - if (lastTenantId != null) { - conn.close(); - } - // Open tenant-specific connection when we find a new one - Properties props = new Properties(globalConn.getClientInfo()); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - conn = DriverManager.getConnection(url, props); - lastTenantId = tenantId; - } - PTable table = conn.unwrap(PhoenixConnection.class).getTable(fullTableName); - String tableTenantId = - table.getTenantId() == null ? null : table.getTenantId().getString(); - if (Objects.equal(lastTenantId, tableTenantId) && !table.rowKeyOrderOptimizable()) { - upgradeDescVarLengthRowKeys(upgradeConn, globalConn, - table.getSchemaName().getString(), table.getTableName().getString(), false, - bypassUpgrade); - wasUpgraded = true; - } + try { + if (admin != null) { + admin.close(); } - rs.close(); - if (lastTenantId != null) { - conn.close(); + } catch (IOException e) { + LOGGER.warn("Unable to close admin after upgrade:", e); + } + } + } + } + } + + private static boolean isInvalidTableToUpgrade(PTable table) throws SQLException { + return (table.getType() != PTableType.TABLE || // Must be a table + table.getTenantId() != null || // Must be global + !table.getPhysicalName().equals(table.getName())); // Must be the physical table + } + + /** + * Upgrade tables and their indexes due to a bug causing descending row keys to have a row key + * that prevents them from being sorted correctly (PHOENIX-2067). + */ + public static void upgradeDescVarLengthRowKeys(PhoenixConnection conn, + List tablesToUpgrade, boolean bypassUpgrade) throws SQLException { + if (tablesToUpgrade.isEmpty()) { + return; + } + List tablesNeedingUpgrading = + Lists.newArrayListWithExpectedSize(tablesToUpgrade.size()); + List invalidTables = Lists.newArrayListWithExpectedSize(tablesToUpgrade.size()); + for (String fullTableName : tablesToUpgrade) { + PTable table = conn.getTable(fullTableName); + if (isInvalidTableToUpgrade(table)) { + invalidTables.add(fullTableName); + } else { + tablesNeedingUpgrading.add(table); + } + } + if (!invalidTables.isEmpty()) { + StringBuilder buf = new StringBuilder( + "Only physical tables should be upgraded as their views and indexes will be updated with them: "); + for (String fullTableName : invalidTables) { + buf.append(fullTableName); + buf.append(' '); + } + throw new SQLException(buf.toString()); + } + PhoenixConnection upgradeConn = new PhoenixConnection(conn, true, true); + try { + upgradeConn.setAutoCommit(true); + for (PTable table : tablesNeedingUpgrading) { + boolean wasUpgraded = false; + if (!table.rowKeyOrderOptimizable()) { + wasUpgraded = true; + upgradeDescVarLengthRowKeys(upgradeConn, conn, table.getSchemaName().getString(), + table.getTableName().getString(), true, bypassUpgrade); + } + + // Upgrade global indexes + for (PTable index : table.getIndexes()) { + if (!index.rowKeyOrderOptimizable() && index.getIndexType() != IndexType.LOCAL) { + wasUpgraded = true; + upgradeDescVarLengthRowKeys(upgradeConn, conn, index.getSchemaName().getString(), + index.getTableName().getString(), false, bypassUpgrade); + } + } + + String sharedViewIndexName = + Bytes.toString(MetaDataUtil.getViewIndexPhysicalName(table.getName().getBytes())); + // Upgrade view indexes + wasUpgraded |= upgradeSharedIndex(upgradeConn, conn, sharedViewIndexName, bypassUpgrade); + String sharedLocalIndexName = + Bytes.toString(MetaDataUtil.getLocalIndexPhysicalName(table.getName().getBytes())); + // Upgrade local indexes + wasUpgraded |= upgradeSharedIndex(upgradeConn, conn, sharedLocalIndexName, bypassUpgrade); + + if (!wasUpgraded) { + System.out.println( + "Upgrade not required for this table or its indexes: " + table.getName().getString()); + } + } + } finally { + upgradeConn.close(); + } + } + + /** + * Upgrade shared indexes by querying for all that are associated with our physical table. + * @return true if any upgrades were performed and false otherwise. + */ + private static boolean upgradeSharedIndex(PhoenixConnection upgradeConn, + PhoenixConnection globalConn, String physicalName, boolean bypassUpgrade) throws SQLException { + String query = String.format("SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME" + + "FROM SYSTEM.CATALOG cat1" + "WHERE COLUMN_NAME IS NULL" + "AND COLUMN_FAMILY = ? " + + "AND LINK_TYPE = %s " + "ORDER BY TENANT_ID", LinkType.PHYSICAL_TABLE.getSerializedValue()); + try (PreparedStatement selSysCatstmt = globalConn.prepareStatement(query)) { + selSysCatstmt.setString(1, physicalName); + ResultSet rs = selSysCatstmt.executeQuery(); + String lastTenantId = null; + Connection conn = globalConn; + String url = globalConn.getURL(); + boolean wasUpgraded = false; + while (rs.next()) { + String fullTableName = + SchemaUtil.getTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), + rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); + String tenantId = rs.getString(1); + if (tenantId != null && !tenantId.equals(lastTenantId)) { + if (lastTenantId != null) { + conn.close(); + } + // Open tenant-specific connection when we find a new one + Properties props = new Properties(globalConn.getClientInfo()); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + conn = DriverManager.getConnection(url, props); + lastTenantId = tenantId; + } + PTable table = conn.unwrap(PhoenixConnection.class).getTable(fullTableName); + String tableTenantId = table.getTenantId() == null ? null : table.getTenantId().getString(); + if (Objects.equal(lastTenantId, tableTenantId) && !table.rowKeyOrderOptimizable()) { + upgradeDescVarLengthRowKeys(upgradeConn, globalConn, table.getSchemaName().getString(), + table.getTableName().getString(), false, bypassUpgrade); + wasUpgraded = true; + } + } + rs.close(); + if (lastTenantId != null) { + conn.close(); + } + return wasUpgraded; + } + } + + public static void addRowKeyOrderOptimizableCell(List tableMetadata, + byte[] tableHeaderRowKey, long clientTimeStamp) { + Put put = new Put(tableHeaderRowKey, clientTimeStamp); + put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE_BYTES, + PBoolean.INSTANCE.toBytes(true)); + tableMetadata.add(put); + } + + public static boolean truncateStats(Table metaTable, Table statsTable) + throws IOException, InterruptedException { + byte[] statsTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME, + PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE); + List columnCells = metaTable.get(new Get(statsTableKey)).getColumnCells( + PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); + long timestamp; + if ( + !columnCells.isEmpty() && (timestamp = columnCells.get(0).getTimestamp()) + < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0 + ) { + + Cell upgradeKV = + PhoenixKeyValueUtil.newKeyValue(statsTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + UPGRADE_TO_4_7_COLUMN_NAME, timestamp, PBoolean.INSTANCE.toBytes(true)); + Put upgradePut = new Put(statsTableKey); + upgradePut.add(upgradeKV); + + // check for null in UPGRADE_TO_4_7_COLUMN_NAME in checkAndPut so that only single client + // drop the rows of SYSTEM.STATS + CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(statsTableKey) + .ifNotExists(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, UPGRADE_TO_4_7_COLUMN_NAME) + .build(upgradePut); + if (metaTable.checkAndMutate(checkAndMutate).isSuccess()) { + List mutations = Lists.newArrayListWithExpectedSize(1000); + Scan scan = new Scan(); + scan.setRaw(true); + scan.readAllVersions(); + ResultScanner statsScanner = statsTable.getScanner(scan); + Result r; + mutations.clear(); + int count = 0; + while ((r = statsScanner.next()) != null) { + Delete delete = null; + for (Cell keyValue : r.rawCells()) { + if (keyValue.getType() == Cell.Type.Put) { + if (delete == null) { + delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), + keyValue.getRowLength()); + } + KeyValue deleteKeyValue = + new KeyValue(keyValue.getRowArray(), keyValue.getRowOffset(), + keyValue.getRowLength(), keyValue.getFamilyArray(), keyValue.getFamilyOffset(), + keyValue.getFamilyLength(), keyValue.getQualifierArray(), + keyValue.getQualifierOffset(), keyValue.getQualifierLength(), + keyValue.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0); + delete.add(deleteKeyValue); } - return wasUpgraded; - } - } - - public static void addRowKeyOrderOptimizableCell(List tableMetadata, byte[] tableHeaderRowKey, long clientTimeStamp) { - Put put = new Put(tableHeaderRowKey, clientTimeStamp); - put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE_BYTES, PBoolean.INSTANCE.toBytes(true)); - tableMetadata.add(put); - } - - public static boolean truncateStats(Table metaTable, Table statsTable) - throws IOException, InterruptedException { - byte[] statsTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME, - PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE); - List columnCells = metaTable.get(new Get(statsTableKey)) - .getColumnCells(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); - long timestamp; - if (!columnCells.isEmpty() && (timestamp = columnCells.get(0) - .getTimestamp()) < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) { - - Cell upgradeKV = PhoenixKeyValueUtil.newKeyValue(statsTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - UPGRADE_TO_4_7_COLUMN_NAME, timestamp, PBoolean.INSTANCE.toBytes(true)); - Put upgradePut = new Put(statsTableKey); - upgradePut.add(upgradeKV); - - // check for null in UPGRADE_TO_4_7_COLUMN_NAME in checkAndPut so that only single client - // drop the rows of SYSTEM.STATS - CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(statsTableKey) - .ifNotExists(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, UPGRADE_TO_4_7_COLUMN_NAME) - .build(upgradePut); - if (metaTable.checkAndMutate(checkAndMutate).isSuccess()) { - List mutations = Lists.newArrayListWithExpectedSize(1000); - Scan scan = new Scan(); - scan.setRaw(true); - scan.readAllVersions(); - ResultScanner statsScanner = statsTable.getScanner(scan); - Result r; - mutations.clear(); - int count = 0; - while ((r = statsScanner.next()) != null) { - Delete delete = null; - for (Cell keyValue : r.rawCells()) { - if (keyValue.getType() == Cell.Type.Put) { - if (delete == null) { - delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength()); - } - KeyValue deleteKeyValue = new KeyValue(keyValue.getRowArray(), keyValue.getRowOffset(), - keyValue.getRowLength(), keyValue.getFamilyArray(), keyValue.getFamilyOffset(), - keyValue.getFamilyLength(), keyValue.getQualifierArray(), - keyValue.getQualifierOffset(), keyValue.getQualifierLength(), - keyValue.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0); - delete.add(deleteKeyValue); - } - } - if (delete != null) { - mutations.add(delete); - if (count > 10) { - statsTable.batch(mutations, null); - mutations.clear(); - count = 0; - } - count++; - } - } - if (!mutations.isEmpty()) { - statsTable.batch(mutations, null); - } - return true; + } + if (delete != null) { + mutations.add(delete); + if (count > 10) { + statsTable.batch(mutations, null); + mutations.clear(); + count = 0; } + count++; + } + } + if (!mutations.isEmpty()) { + statsTable.batch(mutations, null); + } + return true; + } + } + return false; + } + + private static void mapTableToNamespace(Admin admin, Table metatable, String srcTableName, + String destTableName, ReadOnlyProps props, Long ts, String phoenixTableName, + PTableType pTableType, PName tenantId) throws SnapshotCreationException, + IllegalArgumentException, IOException, InterruptedException, SQLException { + if (!SchemaUtil.isNamespaceMappingEnabled(pTableType, props)) { + throw new IllegalArgumentException( + SchemaUtil.isSystemTable(srcTableName.getBytes(StandardCharsets.UTF_8)) + ? "For system table " + QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE + + " also needs to be enabled along with " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + : QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled"); + } + mapTableToNamespace(admin, srcTableName, destTableName, pTableType); + + byte[] tableKey = SchemaUtil.getTableKey(tenantId != null ? tenantId.getString() : null, + SchemaUtil.getSchemaNameFromFullName(phoenixTableName), + SchemaUtil.getTableNameFromFullName(phoenixTableName)); + List columnCells = metatable.get(new Get(tableKey)).getColumnCells( + PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); + if (ts == null) { + if (!columnCells.isEmpty()) { + ts = columnCells.get(0).getTimestamp(); + } else if (PTableType.SYSTEM != pTableType) { + throw new IllegalArgumentException( + "Timestamp passed is null and cannot derive timestamp for " + tableKey + + " from meta table!!"); + } + } + if (ts != null) { + // Update flag to represent table is mapped to namespace + LOGGER.info(String.format( + "Updating meta information of phoenix table '%s' to map to namespace..", phoenixTableName)); + Put put = new Put(tableKey, ts); + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES, PBoolean.INSTANCE.toBytes(Boolean.TRUE)); + metatable.put(put); + } + } + + public static void mapTableToNamespace(Admin admin, String srcTableName, String destTableName, + PTableType pTableType) throws IOException { + TableName srcTable = TableName.valueOf(SchemaUtil.normalizeIdentifier(srcTableName)); + TableName dstTable = TableName.valueOf(destTableName); + boolean srcTableExists = admin.tableExists(srcTable); + // we need to move physical table in actual namespace for TABLE and Index + if ( + srcTableExists && (PTableType.TABLE.equals(pTableType) || PTableType.INDEX.equals(pTableType) + || PTableType.SYSTEM.equals(pTableType)) + ) { + boolean destTableExists = admin.tableExists(dstTable); + if (!destTableExists) { + String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName; + LOGGER.info("Disabling table " + srcTableName + " .."); + admin.disableTable(srcTable); + LOGGER.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName)); + admin.snapshot(snapshotName, srcTable); + LOGGER.info(String.format("Restoring snapshot %s in destination table %s..", snapshotName, + destTableName)); + admin.cloneSnapshot(snapshotName, dstTable); + LOGGER.info(String.format("deleting old table %s..", srcTableName)); + admin.deleteTable(srcTable); + LOGGER.info(String.format("deleting snapshot %s..", snapshotName)); + admin.deleteSnapshot(snapshotName); + } else { + LOGGER.info(String.format("Destination Table %s already exists. No migration needed.", + destTableName)); + } + } + } + + /* + * Method to map existing phoenix table to a namespace. Should not be use if tables has views and + * indexes ,instead use map table utility in psql.py + */ + public static void mapTableToNamespace(Admin admin, Table metatable, String tableName, + ReadOnlyProps props, Long ts, PTableType pTableType, PName tenantId) + throws IllegalArgumentException, IOException, InterruptedException, SQLException { + String destTablename = SchemaUtil + .normalizeIdentifier(SchemaUtil.getPhysicalTableName(tableName, props).getNameAsString()); + mapTableToNamespace(admin, metatable, tableName, destTablename, props, ts, tableName, + pTableType, tenantId); + } + + public static void upgradeTable(PhoenixConnection conn, String srcTable) + throws SQLException, IllegalArgumentException, IOException, InterruptedException { + ReadOnlyProps readOnlyProps = conn.getQueryServices().getProps(); + if (conn.getSchema() != null) { + throw new IllegalArgumentException("Schema should not be set for connection!!"); + } + if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, readOnlyProps)) { + throw new IllegalArgumentException( + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled!!"); + } + try (Admin admin = conn.getQueryServices().getAdmin(); Table metatable = conn.getQueryServices() + .getTable(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, readOnlyProps).getName())) { + String fullTableName = SchemaUtil.normalizeIdentifier(srcTable); + String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName); + String tableName = SchemaUtil.getTableNameFromFullName(fullTableName); + // Confirm table is not already upgraded + PTable table = conn.getTable(fullTableName); + + // Upgrade is not required if schemaName is not present. + if (schemaName.equals("") && !PTableType.VIEW.equals(table.getType())) { + throw new IllegalArgumentException("Table doesn't have schema name"); + } + if (table.isNamespaceMapped()) { + throw new IllegalArgumentException("Table is already upgraded"); + } + if (!schemaName.equals("")) { + LOGGER.info(String.format("Creating schema %s..", schemaName)); + conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); + } + String oldPhysicalName = table.getPhysicalName().getString(); + String newPhysicalTablename = SchemaUtil.normalizeIdentifier( + SchemaUtil.getPhysicalTableName(oldPhysicalName, readOnlyProps).getNameAsString()); + LOGGER.info(String.format("Upgrading %s %s..", table.getType(), fullTableName)); + LOGGER.info(String.format("oldPhysicalName %s newPhysicalTablename %s..", oldPhysicalName, + newPhysicalTablename)); + LOGGER.info(String.format("teanantId %s..", conn.getTenantId())); + + TableViewFinderResult childViewsResult = new TableViewFinderResult(); + + for (int i = 0; i < 2; i++) { + try (Table sysCatOrSysChildLinkTable = conn.getQueryServices() + .getTable(SchemaUtil + .getPhysicalName(i == 0 ? SYSTEM_CHILD_LINK_NAME_BYTES : SYSTEM_CATALOG_TABLE_BYTES, + readOnlyProps) + .getName())) { + byte[] tenantId = conn.getTenantId() != null ? conn.getTenantId().getBytes() : null; + ViewUtil.findAllRelatives(sysCatOrSysChildLinkTable, tenantId, + schemaName.getBytes(StandardCharsets.UTF_8), tableName.getBytes(StandardCharsets.UTF_8), + LinkType.CHILD_TABLE, childViewsResult); + break; + } catch (TableNotFoundException ex) { + // try again with SYSTEM.CATALOG in case the schema is old + if (i == 1) { + // This means even SYSTEM.CATALOG was not found, so this is bad, rethrow + throw ex; + } + } + } + + // Upgrade the data or main table + mapTableToNamespace(admin, metatable, fullTableName, newPhysicalTablename, readOnlyProps, + PhoenixRuntime.getCurrentScn(readOnlyProps), fullTableName, table.getType(), + conn.getTenantId()); + // clear the cache and get new table + MetaDataMutationResult result = clearCacheAndGetNewTable(conn, conn.getTenantId(), + table.getSchemaName() == null ? null : table.getSchemaName().getString(), + table.getTableName().getString(), + table.getParentName() == null ? null : table.getParentName().getString(), + table.getTimeStamp()); + + if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + throw new TableNotFoundException(schemaName, fullTableName); + } + table = result.getTable(); + byte[] tenantIdBytes = + conn.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : conn.getTenantId().getBytes(); + // check whether table is properly upgraded before upgrading indexes + if (table.isNamespaceMapped()) { + for (PTable index : table.getIndexes()) { + String srcTableName = index.getPhysicalName().getString(); + String destTableName = null; + String phoenixTableName = index.getName().getString(); + boolean updateLink = true; + if (srcTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { + // Skip already migrated + LOGGER.info(String.format("skipping as it seems index '%s' is already upgraded..", + index.getName())); + continue; + } + if (MetaDataUtil.isLocalIndex(srcTableName)) { + LOGGER.info(String.format("local index '%s' found with physical hbase table name ''..", + index.getName(), srcTableName)); + destTableName = Bytes.toString(MetaDataUtil + .getLocalIndexPhysicalName(newPhysicalTablename.getBytes(StandardCharsets.UTF_8))); + // update parent_table property in local index table descriptor + conn.createStatement().execute( + String.format("ALTER TABLE %s set " + MetaDataUtil.PARENT_TABLE_KEY + "='%s'", + phoenixTableName, table.getPhysicalName())); + } else if (MetaDataUtil.isViewIndex(srcTableName)) { + LOGGER.info(String.format("View index '%s' found with physical hbase table name ''..", + index.getName(), srcTableName)); + destTableName = Bytes.toString(MetaDataUtil + .getViewIndexPhysicalName(newPhysicalTablename.getBytes(StandardCharsets.UTF_8))); + } else { + LOGGER.info(String.format("Global index '%s' found with physical hbase table name ''..", + index.getName(), srcTableName)); + destTableName = + SchemaUtil.getPhysicalTableName(index.getPhysicalName().getString(), readOnlyProps) + .getNameAsString(); + } + LOGGER.info(String.format("Upgrading index %s..", index.getName())); + if ( + !(table.getType() == PTableType.VIEW && !MetaDataUtil.isViewIndex(srcTableName) + && IndexType.LOCAL != index.getIndexType()) + ) { + mapTableToNamespace(admin, metatable, srcTableName, destTableName, readOnlyProps, + PhoenixRuntime.getCurrentScn(readOnlyProps), phoenixTableName, index.getType(), + conn.getTenantId()); + } + if (updateLink) { + LOGGER + .info(String.format("Updating link information for index '%s' ..", index.getName())); + updateLink(conn, srcTableName, destTableName, index.getSchemaName(), + index.getTableName()); + conn.commit(); + } + + conn.getQueryServices().clearTableFromCache(tenantIdBytes, + index.getSchemaName().getBytes(), index.getTableName().getBytes(), + PhoenixRuntime.getCurrentScn(readOnlyProps)); + } + updateIndexesSequenceIfPresent(conn, table); + conn.commit(); + } else { + throw new RuntimeException( + "Error: problem occured during upgrade. " + "Table is not upgraded successfully"); + } + if (table.getType() == PTableType.VIEW) { + LOGGER + .info(String.format("Updating link information for view '%s' ..", table.getTableName())); + updateLink(conn, oldPhysicalName, newPhysicalTablename, table.getSchemaName(), + table.getTableName()); + conn.commit(); + + // if the view is a first level child, then we need to create the PARENT_TABLE link + // that was overwritten by the PHYSICAL_TABLE link + if (table.getParentName().equals(table.getPhysicalName())) { + LOGGER.info(String.format("Creating PARENT link for view '%s' ..", table.getTableName())); + // Add row linking view to its parent + PreparedStatement linkStatement = conn.prepareStatement(MetaDataClient.CREATE_VIEW_LINK); + linkStatement.setString(1, Bytes.toStringBinary(tenantIdBytes)); + linkStatement.setString(2, table.getSchemaName().getString()); + linkStatement.setString(3, table.getTableName().getString()); + linkStatement.setString(4, table.getParentName().getString()); + linkStatement.setByte(5, LinkType.PARENT_TABLE.getSerializedValue()); + linkStatement.setString(6, null); + linkStatement.execute(); + conn.commit(); + } + + conn.getQueryServices().clearTableFromCache(tenantIdBytes, table.getSchemaName().getBytes(), + table.getTableName().getBytes(), PhoenixRuntime.getCurrentScn(readOnlyProps)); + } + // Upgrade all child views + if (table.getType() == PTableType.TABLE) { + mapChildViewsToNamespace(conn.getURL(), conn.getClientInfo(), childViewsResult.getLinks()); + } + } + } + + public static MetaDataMutationResult clearCacheAndGetNewTable(PhoenixConnection conn, + PName tenantId, String schemaName, String tableName, String parentName, long timestamp) + throws SQLException { + clearCache(conn, tenantId, schemaName, tableName, parentName, timestamp); + MetaDataMutationResult result = + new MetaDataClient(conn).updateCache(tenantId, schemaName, tableName, true); + return result; + } + + public static void clearCache(PhoenixConnection conn, PName tenantId, String schemaName, + String tableName, String parentName, long timestamp) throws SQLException { + conn.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentName, + timestamp); + byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(); + byte[] schemaBytes = schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName.getBytes(); + conn.getQueryServices().clearTableFromCache(tenantIdBytes, schemaBytes, tableName.getBytes(), + PhoenixRuntime.getCurrentScn(conn.getQueryServices().getProps())); + } + + private static void updateIndexesSequenceIfPresent(PhoenixConnection connection, PTable dataTable) + throws SQLException { + PName tenantId = connection.getTenantId(); + PName physicalName = dataTable.getName(); + PName oldPhysicalName = PNameFactory.newName(physicalName.toString() + .replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR)); + String oldSchemaName = MetaDataUtil.getViewIndexSequenceSchemaName(oldPhysicalName, false); + String newSchemaName = MetaDataUtil.getViewIndexSequenceSchemaName(physicalName, true); + String newSequenceName = MetaDataUtil.getViewIndexSequenceName(physicalName, tenantId, true); + // create new entry with new schema format + String upsert = + "UPSERT INTO " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " SELECT NULL, ?, ?, " + START_WITH + + "," + CURRENT_VALUE + "," + INCREMENT_BY + "," + CACHE_SIZE + "," + MIN_VALUE + "," + + MAX_VALUE + "," + CYCLE_FLAG + "," + LIMIT_REACHED_FLAG + " FROM " + + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " WHERE " + PhoenixDatabaseMetaData.TENANT_ID + + " IS NULL AND " + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + " = ?"; + try (PreparedStatement upsertSeqStmt = connection.prepareStatement(upsert)) { + upsertSeqStmt.setString(1, newSchemaName); + upsertSeqStmt.setString(2, newSequenceName); + upsertSeqStmt.setString(3, oldSchemaName); + upsertSeqStmt.executeUpdate(); + } + } + + private static void updateLink(PhoenixConnection conn, String srcTableName, String destTableName, + PName schemaName, PName tableName) throws SQLException { + String updateLinkSql = String.format(UPDATE_LINK, destTableName); + boolean hasTenantId = conn.getTenantId() != null && conn.getTenantId().getBytes().length != 0; + if (hasTenantId) { + updateLinkSql += " AND TENANT_ID = ? "; + } + PreparedStatement updateLinkStatment = conn.prepareStatement(updateLinkSql); + updateLinkStatment.setString(1, schemaName.getString()); + updateLinkStatment.setString(2, schemaName.getString()); + updateLinkStatment.setString(3, tableName.getString()); + updateLinkStatment.setString(4, srcTableName); + if (hasTenantId) { + updateLinkStatment.setString(5, conn.getTenantId().getString()); + } + updateLinkStatment.execute(); + String deleteLinkSql = DELETE_LINK; + if (hasTenantId) { + deleteLinkSql += (" AND TENANT_ID = ? "); + } + PreparedStatement deleteLinkStatment = conn.prepareStatement(deleteLinkSql); + deleteLinkStatment.setString(1, schemaName.getString()); + deleteLinkStatment.setString(2, schemaName.getString()); + deleteLinkStatment.setString(3, tableName.getString()); + deleteLinkStatment.setString(4, srcTableName); + if (hasTenantId) { + deleteLinkStatment.setString(5, conn.getTenantId().getString()); + } + deleteLinkStatment.execute(); + } + + private static void mapChildViewsToNamespace(String connUrl, Properties props, + List viewInfoList) + throws SQLException, IllegalArgumentException, IOException, InterruptedException { + String tenantId; + String prevTenantId = null; + PhoenixConnection conn = null; + for (TableInfo viewInfo : viewInfoList) { + tenantId = viewInfo.getTenantId() != null ? Bytes.toString(viewInfo.getTenantId()) : null; + String viewName = SchemaUtil.getTableName(viewInfo.getSchemaName(), viewInfo.getTableName()); + if (!java.util.Objects.equals(prevTenantId, tenantId)) { + if (tenantId != null) { + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + } else { + props.remove(PhoenixRuntime.TENANT_ID_ATTRIB); } - return false; - } - - private static void mapTableToNamespace(Admin admin, Table metatable, String srcTableName, - String destTableName, ReadOnlyProps props, Long ts, String phoenixTableName, PTableType pTableType,PName tenantId) - throws SnapshotCreationException, IllegalArgumentException, IOException, InterruptedException, - SQLException { - if (!SchemaUtil.isNamespaceMappingEnabled(pTableType, props)) { - throw new IllegalArgumentException( - SchemaUtil.isSystemTable(srcTableName.getBytes(StandardCharsets.UTF_8)) - ? "For system table " + QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE - + " also needs to be enabled along with " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED - : QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled"); } - mapTableToNamespace(admin, srcTableName, destTableName, pTableType); - - byte[] tableKey = SchemaUtil.getTableKey(tenantId != null ? tenantId.getString() : null, - SchemaUtil.getSchemaNameFromFullName(phoenixTableName), - SchemaUtil.getTableNameFromFullName(phoenixTableName)); - List columnCells = metatable.get(new Get(tableKey)) - .getColumnCells(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); - if (ts == null) { - if (!columnCells.isEmpty()) { - ts = columnCells.get(0).getTimestamp(); - } else if (PTableType.SYSTEM != pTableType) { throw new IllegalArgumentException( - "Timestamp passed is null and cannot derive timestamp for " + tableKey + " from meta table!!"); } - } - if (ts != null) { - // Update flag to represent table is mapped to namespace - LOGGER.info(String.format("Updating meta information of phoenix table '%s' to map to namespace..", - phoenixTableName)); - Put put = new Put(tableKey, ts); - put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES, - PBoolean.INSTANCE.toBytes(Boolean.TRUE)); - metatable.put(put); + if (conn != null) { + conn.close(); } - } - - public static void mapTableToNamespace(Admin admin, String srcTableName, String destTableName, PTableType pTableType) throws IOException { - TableName srcTable = TableName.valueOf(SchemaUtil.normalizeIdentifier(srcTableName)); - TableName dstTable = TableName.valueOf(destTableName); - boolean srcTableExists=admin.tableExists(srcTable); - // we need to move physical table in actual namespace for TABLE and Index - if (srcTableExists && (PTableType.TABLE.equals(pTableType) - || PTableType.INDEX.equals(pTableType) || PTableType.SYSTEM.equals(pTableType))) { - boolean destTableExists=admin.tableExists(dstTable); - if (!destTableExists) { - String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName; - LOGGER.info("Disabling table " + srcTableName + " .."); - admin.disableTable(srcTable); - LOGGER.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName)); - admin.snapshot(snapshotName, srcTable); - LOGGER.info( - String.format("Restoring snapshot %s in destination table %s..", snapshotName, destTableName)); - admin.cloneSnapshot(snapshotName, dstTable); - LOGGER.info(String.format("deleting old table %s..", srcTableName)); - admin.deleteTable(srcTable); - LOGGER.info(String.format("deleting snapshot %s..", snapshotName)); - admin.deleteSnapshot(snapshotName); - } else { - LOGGER.info(String.format("Destination Table %s already exists. No migration needed.", destTableName)); - } + conn = DriverManager.getConnection(connUrl, props).unwrap(PhoenixConnection.class); + } + LOGGER.info(String.format("Upgrading view %s for tenantId %s..", viewName, tenantId)); + if (conn != null) { + try { + UpgradeUtil.upgradeTable(conn, viewName); + } catch (TableNotFoundException e) { + // Ignore + LOGGER.error("Error getting PTable for view: " + viewInfo, e); } + } + prevTenantId = tenantId; } + } + public static void mergeViewIndexIdSequences(PhoenixConnection olderMetaConnection) + throws SQLException { /* - * Method to map existing phoenix table to a namespace. Should not be use if tables has views and indexes ,instead - * use map table utility in psql.py + * before PHOENIX-5132, there was a per-tenant sequence to generate view index ids, which could + * cause problems if global and tenant-owned view indexes were mixed for the same physical base + * table. Now there's just one sequence for all view indexes of the same physical table, but we + * have to check to see if there are any legacy sequences, and merge them into a single sequence + * equal to max + 101 (for a safety margin) of the largest legacy sequence to avoid collisons. */ - public static void mapTableToNamespace(Admin admin, Table metatable, String tableName, - ReadOnlyProps props, Long ts, PTableType pTableType, PName tenantId) - throws IllegalArgumentException, IOException, InterruptedException, SQLException { - String destTablename = SchemaUtil - .normalizeIdentifier(SchemaUtil.getPhysicalTableName(tableName, props).getNameAsString()); - mapTableToNamespace(admin, metatable, tableName, destTablename, props, ts, tableName, pTableType, tenantId); - } - - public static void upgradeTable(PhoenixConnection conn, String srcTable) throws SQLException, - IllegalArgumentException, IOException, InterruptedException { - ReadOnlyProps readOnlyProps = conn.getQueryServices().getProps(); - if (conn.getSchema() != null) { throw new IllegalArgumentException( - "Schema should not be set for connection!!"); } - if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, - readOnlyProps)) { throw new IllegalArgumentException( - QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled!!"); } - try (Admin admin = conn.getQueryServices().getAdmin(); - Table metatable = conn.getQueryServices().getTable(SchemaUtil.getPhysicalName( - SYSTEM_CATALOG_NAME_BYTES, readOnlyProps).getName())) { - String fullTableName = SchemaUtil.normalizeIdentifier(srcTable); - String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName); - String tableName = SchemaUtil.getTableNameFromFullName(fullTableName); - // Confirm table is not already upgraded - PTable table = conn.getTable(fullTableName); - - // Upgrade is not required if schemaName is not present. - if (schemaName.equals("") && !PTableType.VIEW - .equals(table.getType())) { - throw new IllegalArgumentException("Table doesn't have schema name"); - } - if (table.isNamespaceMapped()) { - throw new IllegalArgumentException("Table is already upgraded"); - } - if (!schemaName.equals("")) { - LOGGER.info(String.format("Creating schema %s..", schemaName)); - conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); - } - String oldPhysicalName = table.getPhysicalName().getString(); - String newPhysicalTablename = SchemaUtil.normalizeIdentifier( - SchemaUtil.getPhysicalTableName(oldPhysicalName, readOnlyProps) - .getNameAsString()); - LOGGER.info(String.format("Upgrading %s %s..", table.getType(), fullTableName)); - LOGGER.info(String.format("oldPhysicalName %s newPhysicalTablename %s..", - oldPhysicalName, newPhysicalTablename)); - LOGGER.info(String.format("teanantId %s..", conn.getTenantId())); - - TableViewFinderResult childViewsResult = new TableViewFinderResult(); - - for (int i=0; i<2; i++) { - try (Table sysCatOrSysChildLinkTable = conn.getQueryServices() - .getTable(SchemaUtil.getPhysicalName( - i==0 ? SYSTEM_CHILD_LINK_NAME_BYTES : SYSTEM_CATALOG_TABLE_BYTES, - readOnlyProps).getName())) { - byte[] tenantId = conn.getTenantId() != null ? - conn.getTenantId().getBytes() : null; - ViewUtil.findAllRelatives(sysCatOrSysChildLinkTable, tenantId, - schemaName.getBytes(StandardCharsets.UTF_8), - tableName.getBytes(StandardCharsets.UTF_8), LinkType.CHILD_TABLE, - childViewsResult); - break; - } catch (TableNotFoundException ex) { - // try again with SYSTEM.CATALOG in case the schema is old - if (i == 1) { - // This means even SYSTEM.CATALOG was not found, so this is bad, rethrow - throw ex; - } - } - } - - // Upgrade the data or main table - mapTableToNamespace(admin, metatable, fullTableName, newPhysicalTablename, - readOnlyProps, PhoenixRuntime.getCurrentScn(readOnlyProps), fullTableName, - table.getType(),conn.getTenantId()); - // clear the cache and get new table - MetaDataMutationResult result = clearCacheAndGetNewTable(conn, - conn.getTenantId(), - table.getSchemaName()==null?null:table.getSchemaName().getString(), - table.getTableName().getString(), - table.getParentName()==null?null:table.getParentName().getString(), - table.getTimeStamp()); - - if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - throw new TableNotFoundException(schemaName, fullTableName); - } - table = result.getTable(); - byte[] tenantIdBytes = conn.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : - conn.getTenantId().getBytes(); - // check whether table is properly upgraded before upgrading indexes - if (table.isNamespaceMapped()) { - for (PTable index : table.getIndexes()) { - String srcTableName = index.getPhysicalName().getString(); - String destTableName = null; - String phoenixTableName = index.getName().getString(); - boolean updateLink = true; - if (srcTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) { - // Skip already migrated - LOGGER.info(String.format("skipping as it seems index '%s' is already upgraded..", - index.getName())); - continue; - } - if (MetaDataUtil.isLocalIndex(srcTableName)) { - LOGGER.info(String.format("local index '%s' found with physical hbase table name ''..", - index.getName(), srcTableName)); - destTableName = Bytes - .toString(MetaDataUtil.getLocalIndexPhysicalName( - newPhysicalTablename.getBytes(StandardCharsets.UTF_8))); - // update parent_table property in local index table descriptor - conn.createStatement() - .execute(String.format("ALTER TABLE %s set " + - MetaDataUtil.PARENT_TABLE_KEY + "='%s'", - phoenixTableName, table.getPhysicalName())); - } else if (MetaDataUtil.isViewIndex(srcTableName)) { - LOGGER.info(String.format("View index '%s' found with physical hbase table name ''..", - index.getName(), srcTableName)); - destTableName = Bytes - .toString(MetaDataUtil.getViewIndexPhysicalName( - newPhysicalTablename.getBytes(StandardCharsets.UTF_8))); - } else { - LOGGER.info(String.format("Global index '%s' found with physical hbase table name ''..", - index.getName(), srcTableName)); - destTableName = SchemaUtil - .getPhysicalTableName(index.getPhysicalName().getString(), - readOnlyProps).getNameAsString(); - } - LOGGER.info(String.format("Upgrading index %s..", index.getName())); - if (!(table.getType() == PTableType.VIEW && !MetaDataUtil.isViewIndex( - srcTableName) - && IndexType.LOCAL != index.getIndexType())) { - mapTableToNamespace(admin, metatable, srcTableName, destTableName, - readOnlyProps, PhoenixRuntime.getCurrentScn(readOnlyProps), - phoenixTableName, index.getType(), conn.getTenantId()); - } - if (updateLink) { - LOGGER.info(String.format("Updating link information for index '%s' ..", - index.getName())); - updateLink(conn, srcTableName, destTableName,index.getSchemaName(), - index.getTableName()); - conn.commit(); - } - - conn.getQueryServices().clearTableFromCache( - tenantIdBytes, - index.getSchemaName().getBytes(), index.getTableName().getBytes(), - PhoenixRuntime.getCurrentScn(readOnlyProps)); - } - updateIndexesSequenceIfPresent(conn, table); - conn.commit(); - } else { - throw new RuntimeException("Error: problem occured during upgrade. " - + "Table is not upgraded successfully"); - } - if (table.getType() == PTableType.VIEW) { - LOGGER.info(String.format("Updating link information for view '%s' ..", - table.getTableName())); - updateLink(conn, oldPhysicalName, newPhysicalTablename,table.getSchemaName(), - table.getTableName()); - conn.commit(); - - // if the view is a first level child, then we need to create the PARENT_TABLE link - // that was overwritten by the PHYSICAL_TABLE link - if (table.getParentName().equals(table.getPhysicalName())) { - LOGGER.info(String.format("Creating PARENT link for view '%s' ..", - table.getTableName())); - // Add row linking view to its parent - PreparedStatement linkStatement = conn.prepareStatement( - MetaDataClient.CREATE_VIEW_LINK); - linkStatement.setString(1, Bytes.toStringBinary(tenantIdBytes)); - linkStatement.setString(2, table.getSchemaName().getString()); - linkStatement.setString(3, table.getTableName().getString()); - linkStatement.setString(4, table.getParentName().getString()); - linkStatement.setByte(5, LinkType.PARENT_TABLE.getSerializedValue()); - linkStatement.setString(6, null); - linkStatement.execute(); - conn.commit(); - } - - conn.getQueryServices().clearTableFromCache( - tenantIdBytes, - table.getSchemaName().getBytes(), table.getTableName().getBytes(), - PhoenixRuntime.getCurrentScn(readOnlyProps)); - } - // Upgrade all child views - if (table.getType() == PTableType.TABLE) { - mapChildViewsToNamespace(conn.getURL(), conn.getClientInfo(), - childViewsResult.getLinks()); - } - } - } - - public static MetaDataMutationResult clearCacheAndGetNewTable(PhoenixConnection conn, PName tenantId, - String schemaName, String tableName, String parentName, long timestamp) - throws SQLException { - clearCache(conn, tenantId, schemaName, tableName, parentName, timestamp); - MetaDataMutationResult result = - new MetaDataClient(conn).updateCache(tenantId, schemaName, tableName, - true); - return result; - } - - public static void clearCache(PhoenixConnection conn, PName tenantId, - String schemaName, String tableName, String parentName, long timestamp) - throws SQLException { - conn.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), - parentName, timestamp); - byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : - tenantId.getBytes(); - byte[] schemaBytes = schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : - schemaName.getBytes(); - conn.getQueryServices().clearTableFromCache( - tenantIdBytes, - schemaBytes, tableName.getBytes(), - PhoenixRuntime.getCurrentScn(conn.getQueryServices().getProps())); - } - - private static void updateIndexesSequenceIfPresent(PhoenixConnection connection, PTable dataTable) - throws SQLException { - PName tenantId = connection.getTenantId(); - PName physicalName = dataTable.getName(); - PName oldPhysicalName = PNameFactory.newName( - physicalName.toString().replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR)); - String oldSchemaName = MetaDataUtil.getViewIndexSequenceSchemaName(oldPhysicalName, false); - String newSchemaName = MetaDataUtil.getViewIndexSequenceSchemaName(physicalName, true); - String newSequenceName = MetaDataUtil.getViewIndexSequenceName(physicalName, tenantId, true); - // create new entry with new schema format - String upsert = "UPSERT INTO " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE - + " SELECT NULL, ?, ?, " + START_WITH + "," + CURRENT_VALUE + "," + INCREMENT_BY - + "," + CACHE_SIZE + "," + MIN_VALUE + "," + MAX_VALUE + "," + CYCLE_FLAG + "," - + LIMIT_REACHED_FLAG + " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " WHERE " - + PhoenixDatabaseMetaData.TENANT_ID + " IS NULL AND " - + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + " = ?"; - try (PreparedStatement upsertSeqStmt = connection.prepareStatement(upsert)) { - upsertSeqStmt.setString(1, newSchemaName); - upsertSeqStmt.setString(2, newSequenceName); - upsertSeqStmt.setString(3, oldSchemaName); - upsertSeqStmt.executeUpdate(); - } - } - - private static void updateLink(PhoenixConnection conn, String srcTableName, - String destTableName, PName schemaName, PName tableName) throws SQLException { - String updateLinkSql = String.format(UPDATE_LINK, destTableName); - boolean hasTenantId = conn.getTenantId() != null && conn.getTenantId().getBytes().length!=0; - if (hasTenantId) { - updateLinkSql += " AND TENANT_ID = ? "; - } - PreparedStatement updateLinkStatment = conn.prepareStatement(updateLinkSql); - updateLinkStatment.setString(1, schemaName.getString()); - updateLinkStatment.setString(2, schemaName.getString()); - updateLinkStatment.setString(3, tableName.getString()); - updateLinkStatment.setString(4, srcTableName); - if (hasTenantId) { - updateLinkStatment.setString(5, conn.getTenantId().getString()); - } - updateLinkStatment.execute(); - String deleteLinkSql = DELETE_LINK; - if (hasTenantId) { - deleteLinkSql += (" AND TENANT_ID = ? "); - } - PreparedStatement deleteLinkStatment = conn.prepareStatement(deleteLinkSql); - deleteLinkStatment.setString(1, schemaName.getString()); - deleteLinkStatment.setString(2, schemaName.getString()); - deleteLinkStatment.setString(3, tableName.getString()); - deleteLinkStatment.setString(4, srcTableName); - if (hasTenantId) { - deleteLinkStatment.setString(5, conn.getTenantId().getString()); - } - deleteLinkStatment.execute(); - } - - private static void mapChildViewsToNamespace(String connUrl, Properties props, - List viewInfoList) throws SQLException, IllegalArgumentException, - IOException, InterruptedException { - String tenantId; - String prevTenantId = null; - PhoenixConnection conn = null; - for (TableInfo viewInfo : viewInfoList) { - tenantId = viewInfo.getTenantId()!=null ? Bytes.toString(viewInfo.getTenantId()) : null; - String viewName = SchemaUtil.getTableName(viewInfo.getSchemaName(), - viewInfo.getTableName()); - if (!java.util.Objects.equals(prevTenantId, tenantId)) { - if (tenantId != null) { - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - } else { - props.remove(PhoenixRuntime.TENANT_ID_ATTRIB); - } - if (conn!=null) { - conn.close(); - } - conn = DriverManager.getConnection(connUrl, props).unwrap(PhoenixConnection.class); - } - LOGGER.info(String.format("Upgrading view %s for tenantId %s..", viewName,tenantId)); - if (conn != null) { - try { - UpgradeUtil.upgradeTable(conn, viewName); - } catch (TableNotFoundException e) { - // Ignore - LOGGER.error("Error getting PTable for view: " + viewInfo, e); - } - } - prevTenantId = tenantId; - } - } - - public static void mergeViewIndexIdSequences(PhoenixConnection olderMetaConnection) - throws SQLException{ - /* before PHOENIX-5132, there was a per-tenant sequence to generate view index ids, - which could cause problems if global and tenant-owned view indexes were mixed for the - same physical base table. Now there's just one sequence for all view indexes of the same - physical table, but we have to check to see if there are any legacy sequences, and - merge them into a single sequence equal to max + 101 (for a safety margin) - of the largest legacy sequence to avoid collisons. - */ - //map of physical table names to view index sequences - Map> sequenceTableMap = new HashMap<>(); - try (PhoenixConnection metaConnection = new PhoenixConnection( - olderMetaConnection, HConstants.LATEST_TIMESTAMP)) { - DatabaseMetaData metaData = metaConnection.getMetaData(); - ConnectionQueryServices cqs = metaConnection.getQueryServices(); - try (ResultSet sequenceRS = metaData.getTables(null, null, - "%" + MetaDataUtil.VIEW_INDEX_SEQUENCE_NAME_PREFIX + "%", - new String[]{PhoenixDatabaseMetaData.SEQUENCE_TABLE_TYPE})) { - while (sequenceRS.next()) { - String tenantId = sequenceRS.getString(TABLE_CAT); - String schemaName = sequenceRS.getString(TABLE_SCHEM); - String sequenceName = sequenceRS.getString(TABLE_NAME); - int numBuckets = sequenceRS.getInt(SALT_BUCKETS); - SequenceKey key = new SequenceKey(tenantId, schemaName, sequenceName, numBuckets); - String baseTableName; - //under the old naming convention, view index sequences - // of non-namespace mapped tables stored their physical table name in the sequence schema for - //some reason. Namespace-mapped tables stored it in the sequence name itself. - //Note the difference between VIEW_INDEX_SEQUENCE_PREFIX (_SEQ_) - //and VIEW_INDEX_SEQUENCE_NAME_PREFIX (_ID_) - if (schemaName != null && schemaName.contains(MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX)) { - baseTableName = schemaName.replace(MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX, ""); - } else { - baseTableName = SchemaUtil.getTableName(schemaName, - sequenceName.replace(MetaDataUtil.VIEW_INDEX_SEQUENCE_NAME_PREFIX, "")); - } - if (!sequenceTableMap.containsKey(baseTableName)) { - sequenceTableMap.put(baseTableName, new ArrayList()); - } - sequenceTableMap.get(baseTableName).add(key); - } - } - for (String baseTableName : sequenceTableMap.keySet()) { - Map currentSequenceValues = new HashMap(); - long maxViewIndexId = Long.MIN_VALUE; - PName name = PNameFactory.newName(baseTableName); - boolean hasNamespaceMapping = - SchemaUtil.isNamespaceMappingEnabled(null, cqs.getConfiguration()) || - cqs.getProps().getBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, false); - List existingSequenceKeys = sequenceTableMap.get(baseTableName); - for (SequenceKey sequenceKey : existingSequenceKeys) { - long[] currentValueArray = new long[1]; - SQLException[] sqlExceptions = new SQLException[1]; - cqs.incrementSequences( - Lists.newArrayList(new SequenceAllocation(sequenceKey, 1L)), - EnvironmentEdgeManager.currentTimeMillis(), - currentValueArray, new SQLException[1]); - - if (sqlExceptions[0] != null) { - LOGGER.error("Unable to convert view index sequence because of error. " + - "It will need to be converted manually, " + - " or there's a risk that two view indexes of the same base table " + - "will have colliding view index ids.", sqlExceptions[0]); - continue; - } - if (currentValueArray[0] > maxViewIndexId) { - maxViewIndexId = currentValueArray[0]; - } - currentSequenceValues.put(sequenceKey, currentValueArray[0]); - } - //just in case someone is creating a view index RIGHT NOW, increment maxViewIndexId - //by 100 to make very sure there are no collisions - maxViewIndexId += 100; - try { - //In one case (namespaced-mapped base table, global view index), the new sequence - //is the same as the old sequence, so rather than create it we just increment it - //to the right value. - SequenceKey newSequenceKey = new SequenceKey(null, MetaDataUtil.getViewIndexSequenceSchemaName(name, hasNamespaceMapping), - MetaDataUtil.getViewIndexSequenceName(name, null, hasNamespaceMapping), cqs.getSequenceSaltBuckets()); - if (currentSequenceValues.containsKey(newSequenceKey)) { - long incrementValue = maxViewIndexId - currentSequenceValues.get(newSequenceKey); - SQLException[] incrementExceptions = new SQLException[1]; - List incrementAllocations = Lists.newArrayList(new SequenceAllocation(newSequenceKey, incrementValue)); - cqs.incrementSequences(incrementAllocations, EnvironmentEdgeManager.currentTimeMillis(), - new long[1], incrementExceptions); - if (incrementExceptions[0] != null) { - throw incrementExceptions[0]; - } - } else { - cqs.createSequence(null, newSequenceKey.getSchemaName(), - newSequenceKey.getSequenceName(), maxViewIndexId, 1, 1, - Long.MIN_VALUE, Long.MAX_VALUE, - false, EnvironmentEdgeManager.currentTimeMillis()); - } - } catch (SequenceAlreadyExistsException sae) { - LOGGER.info("Tried to create view index sequence " - + SchemaUtil.getTableName(sae.getSchemaName(), sae.getSequenceName()) + - " during upgrade but it already existed. This is probably fine."); - } + // map of physical table names to view index sequences + Map> sequenceTableMap = new HashMap<>(); + try (PhoenixConnection metaConnection = + new PhoenixConnection(olderMetaConnection, HConstants.LATEST_TIMESTAMP)) { + DatabaseMetaData metaData = metaConnection.getMetaData(); + ConnectionQueryServices cqs = metaConnection.getQueryServices(); + try (ResultSet sequenceRS = + metaData.getTables(null, null, "%" + MetaDataUtil.VIEW_INDEX_SEQUENCE_NAME_PREFIX + "%", + new String[] { PhoenixDatabaseMetaData.SEQUENCE_TABLE_TYPE })) { + while (sequenceRS.next()) { + String tenantId = sequenceRS.getString(TABLE_CAT); + String schemaName = sequenceRS.getString(TABLE_SCHEM); + String sequenceName = sequenceRS.getString(TABLE_NAME); + int numBuckets = sequenceRS.getInt(SALT_BUCKETS); + SequenceKey key = new SequenceKey(tenantId, schemaName, sequenceName, numBuckets); + String baseTableName; + // under the old naming convention, view index sequences + // of non-namespace mapped tables stored their physical table name in the sequence schema + // for + // some reason. Namespace-mapped tables stored it in the sequence name itself. + // Note the difference between VIEW_INDEX_SEQUENCE_PREFIX (_SEQ_) + // and VIEW_INDEX_SEQUENCE_NAME_PREFIX (_ID_) + if (schemaName != null && schemaName.contains(MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX)) { + baseTableName = schemaName.replace(MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX, ""); + } else { + baseTableName = SchemaUtil.getTableName(schemaName, + sequenceName.replace(MetaDataUtil.VIEW_INDEX_SEQUENCE_NAME_PREFIX, "")); + } + if (!sequenceTableMap.containsKey(baseTableName)) { + sequenceTableMap.put(baseTableName, new ArrayList()); + } + sequenceTableMap.get(baseTableName).add(key); + } + } + for (String baseTableName : sequenceTableMap.keySet()) { + Map currentSequenceValues = new HashMap(); + long maxViewIndexId = Long.MIN_VALUE; + PName name = PNameFactory.newName(baseTableName); + boolean hasNamespaceMapping = + SchemaUtil.isNamespaceMappingEnabled(null, cqs.getConfiguration()) + || cqs.getProps().getBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, false); + List existingSequenceKeys = sequenceTableMap.get(baseTableName); + for (SequenceKey sequenceKey : existingSequenceKeys) { + long[] currentValueArray = new long[1]; + SQLException[] sqlExceptions = new SQLException[1]; + cqs.incrementSequences(Lists.newArrayList(new SequenceAllocation(sequenceKey, 1L)), + EnvironmentEdgeManager.currentTimeMillis(), currentValueArray, new SQLException[1]); + + if (sqlExceptions[0] != null) { + LOGGER.error("Unable to convert view index sequence because of error. " + + "It will need to be converted manually, " + + " or there's a risk that two view indexes of the same base table " + + "will have colliding view index ids.", sqlExceptions[0]); + continue; + } + if (currentValueArray[0] > maxViewIndexId) { + maxViewIndexId = currentValueArray[0]; + } + currentSequenceValues.put(sequenceKey, currentValueArray[0]); + } + // just in case someone is creating a view index RIGHT NOW, increment maxViewIndexId + // by 100 to make very sure there are no collisions + maxViewIndexId += 100; + try { + // In one case (namespaced-mapped base table, global view index), the new sequence + // is the same as the old sequence, so rather than create it we just increment it + // to the right value. + SequenceKey newSequenceKey = new SequenceKey(null, + MetaDataUtil.getViewIndexSequenceSchemaName(name, hasNamespaceMapping), + MetaDataUtil.getViewIndexSequenceName(name, null, hasNamespaceMapping), + cqs.getSequenceSaltBuckets()); + if (currentSequenceValues.containsKey(newSequenceKey)) { + long incrementValue = maxViewIndexId - currentSequenceValues.get(newSequenceKey); + SQLException[] incrementExceptions = new SQLException[1]; + List incrementAllocations = + Lists.newArrayList(new SequenceAllocation(newSequenceKey, incrementValue)); + cqs.incrementSequences(incrementAllocations, EnvironmentEdgeManager.currentTimeMillis(), + new long[1], incrementExceptions); + if (incrementExceptions[0] != null) { + throw incrementExceptions[0]; } - } - } - - public static String getSysTableSnapshotName( - long currentSystemTableTimestamp, String tableName) { - Format formatter = new SimpleDateFormat("yyyyMMddHHmmss"); - String date = formatter.format(new Date(EnvironmentEdgeManager.currentTimeMillis())); - String upgradingFrom = getVersion(currentSystemTableTimestamp); - return String.format("SNAPSHOT_%s_%s_TO_%s_%s", tableName, - upgradingFrom, CURRENT_CLIENT_VERSION, date); - } - - public static boolean isNoUpgradeSet(Properties props) { - return Boolean.compare(true, Boolean.valueOf(props.getProperty(DO_NOT_UPGRADE))) == 0; - } - - public static void doNotUpgradeOnFirstConnection(Properties props) { - props.setProperty(DO_NOT_UPGRADE, String.valueOf(true)); - } - - public static boolean isUpdateViewIndexIdColumnDataTypeFromShortToLongNeeded( - PhoenixConnection metaConnection, byte[] rowKey, byte[] syscatBytes) { - try (Table sysTable = metaConnection.getQueryServices().getTable(syscatBytes)) { - Scan s = new Scan(); - s.setRowPrefixFilter(rowKey); - s.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.DATA_TYPE_BYTES); - ResultScanner scanner = sysTable.getScanner(s); - Result result = scanner.next(); - Cell cell = result.getColumnLatestCell( - PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.DATA_TYPE_BYTES); - return Bytes.compareTo(CellUtil.cloneValue(cell), - PInteger.INSTANCE.toBytes(Types.SMALLINT)) == 0 ? true : false; - } catch (Exception e) { - LOGGER.error(String.format( - "Checking VIEW_INDEX_ID data type for upgrade failed: %s. ", e.getMessage())); - } - return false; - } - - public static void updateViewIndexIdColumnDataTypeFromShortToLong( - PhoenixConnection metaConnection, byte[] rowKey, byte[] syscatBytes) { - try(Table sysTable = metaConnection.getQueryServices().getTable(syscatBytes)) { - KeyValue viewIndexIdKV = new KeyValue(rowKey, - PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.DATA_TYPE_BYTES, - MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, - PInteger.INSTANCE.toBytes(Types.BIGINT)); - Put viewIndexIdPut = new Put(rowKey); - viewIndexIdPut.add(viewIndexIdKV); - sysTable.put(viewIndexIdPut); - LOGGER.info("Updated VIEW_INDEX_ID data type from SMALLINT TO BIGINT."); - } catch (Exception e) { - LOGGER.error(String.format( - "Upgrade/change VIEW_INDEX_ID data type failed: %s. ",e.getMessage())); - } - } - - //When upgrading to Phoenix 4.16 or 5.1, make each existing table's/view's DDL timestamp equal - //to its last updated row timestamp. - public static void bootstrapLastDDLTimestampForTablesAndViews(Connection metaConnection) - throws SQLException { - bootstrapLastDDLTimestamp(metaConnection, - new String[]{PTableType.TABLE.getSerializedValue(), - PTableType.VIEW.getSerializedValue()}); - } - - //When upgrading to Phoenix 5.2, make each existing index's DDL timestamp equal to its - //last updated row timestamp. - public static void bootstrapLastDDLTimestampForIndexes(Connection metaConnection) - throws SQLException { - bootstrapLastDDLTimestamp(metaConnection, - new String[]{PTableType.INDEX.getSerializedValue()}); - } - - /** - * Sets the LAST_DDL_TIMESTAMP for the metadata header rows for all objects of the given table - * types to their PHOENIX_ROW_TIMESTAMP() - * @param metaConnection a {@link PhoenixConnection} - * @param tableTypes array of serialized {@link PTableType} values - */ - private static void bootstrapLastDDLTimestamp(Connection metaConnection, String[] tableTypes) - throws SQLException { - String tableTypesString = Stream.of(tableTypes).collect( - Collectors.joining("','", "'", "'")).toString(); - String pkCols = TENANT_ID + ", " + TABLE_SCHEM + - ", " + TABLE_NAME + ", " + COLUMN_NAME + ", " + COLUMN_FAMILY; - final String upsertSql = - "UPSERT INTO " + SYSTEM_CATALOG_NAME + " (" + pkCols + ", " + LAST_DDL_TIMESTAMP + ")" - + " " + "SELECT " + pkCols + ", PHOENIX_ROW_TIMESTAMP() FROM " + SYSTEM_CATALOG_NAME - + " " + "WHERE " + TABLE_TYPE + " " + " in " + "(" + tableTypesString + ")"; - LOGGER.info("Setting DDL timestamps for table_type={} to row timestamps", tableTypesString); - try (PreparedStatement stmt = metaConnection.prepareStatement(upsertSql)) { - stmt.execute(); - metaConnection.commit(); - } - LOGGER.info("Setting DDL timestamps for table_type={} is complete", tableTypesString); - } - - public static boolean tableHasKeepDeleted(PhoenixConnection conn, String pTableName) - throws SQLException, org.apache.hadoop.hbase.TableNotFoundException, IOException { - ConnectionQueryServices cqs = conn.getQueryServices(); - Admin admin = cqs.getAdmin(); - PTable table = conn.getTable(pTableName); - TableDescriptor tableDesc = admin.getDescriptor(SchemaUtil.getPhysicalTableName( - pTableName, cqs.getProps())); - return KeepDeletedCells.TRUE.equals(tableDesc.getColumnFamily( - SchemaUtil.getEmptyColumnFamily(table)).getKeepDeletedCells()); - } - - public static boolean tableHasMaxVersions(PhoenixConnection conn, String pTableName) - throws SQLException, org.apache.hadoop.hbase.TableNotFoundException, IOException { - ConnectionQueryServices cqs = conn.getQueryServices(); - Admin admin = cqs.getAdmin(); - PTable table = conn.getTable(pTableName); - TableDescriptor tableDesc = admin.getDescriptor(SchemaUtil.getPhysicalTableName( - pTableName, cqs.getProps())); - return tableDesc.getColumnFamily( - SchemaUtil.getEmptyColumnFamily(table)).getMaxVersions() > 1; - } + } else { + cqs.createSequence(null, newSequenceKey.getSchemaName(), + newSequenceKey.getSequenceName(), maxViewIndexId, 1, 1, Long.MIN_VALUE, + Long.MAX_VALUE, false, EnvironmentEdgeManager.currentTimeMillis()); + } + } catch (SequenceAlreadyExistsException sae) { + LOGGER.info("Tried to create view index sequence " + + SchemaUtil.getTableName(sae.getSchemaName(), sae.getSequenceName()) + + " during upgrade but it already existed. This is probably fine."); + } + } + } + } + + public static String getSysTableSnapshotName(long currentSystemTableTimestamp, String tableName) { + Format formatter = new SimpleDateFormat("yyyyMMddHHmmss"); + String date = formatter.format(new Date(EnvironmentEdgeManager.currentTimeMillis())); + String upgradingFrom = getVersion(currentSystemTableTimestamp); + return String.format("SNAPSHOT_%s_%s_TO_%s_%s", tableName, upgradingFrom, + CURRENT_CLIENT_VERSION, date); + } + + public static boolean isNoUpgradeSet(Properties props) { + return Boolean.compare(true, Boolean.valueOf(props.getProperty(DO_NOT_UPGRADE))) == 0; + } + + public static void doNotUpgradeOnFirstConnection(Properties props) { + props.setProperty(DO_NOT_UPGRADE, String.valueOf(true)); + } + + public static boolean isUpdateViewIndexIdColumnDataTypeFromShortToLongNeeded( + PhoenixConnection metaConnection, byte[] rowKey, byte[] syscatBytes) { + try (Table sysTable = metaConnection.getQueryServices().getTable(syscatBytes)) { + Scan s = new Scan(); + s.setRowPrefixFilter(rowKey); + s.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.DATA_TYPE_BYTES); + ResultScanner scanner = sysTable.getScanner(s); + Result result = scanner.next(); + Cell cell = result.getColumnLatestCell(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.DATA_TYPE_BYTES); + return Bytes.compareTo(CellUtil.cloneValue(cell), PInteger.INSTANCE.toBytes(Types.SMALLINT)) + == 0 ? true : false; + } catch (Exception e) { + LOGGER.error( + String.format("Checking VIEW_INDEX_ID data type for upgrade failed: %s. ", e.getMessage())); + } + return false; + } + + public static void updateViewIndexIdColumnDataTypeFromShortToLong( + PhoenixConnection metaConnection, byte[] rowKey, byte[] syscatBytes) { + try (Table sysTable = metaConnection.getQueryServices().getTable(syscatBytes)) { + KeyValue viewIndexIdKV = new KeyValue(rowKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.DATA_TYPE_BYTES, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, + PInteger.INSTANCE.toBytes(Types.BIGINT)); + Put viewIndexIdPut = new Put(rowKey); + viewIndexIdPut.add(viewIndexIdKV); + sysTable.put(viewIndexIdPut); + LOGGER.info("Updated VIEW_INDEX_ID data type from SMALLINT TO BIGINT."); + } catch (Exception e) { + LOGGER.error( + String.format("Upgrade/change VIEW_INDEX_ID data type failed: %s. ", e.getMessage())); + } + } + + // When upgrading to Phoenix 4.16 or 5.1, make each existing table's/view's DDL timestamp equal + // to its last updated row timestamp. + public static void bootstrapLastDDLTimestampForTablesAndViews(Connection metaConnection) + throws SQLException { + bootstrapLastDDLTimestamp(metaConnection, + new String[] { PTableType.TABLE.getSerializedValue(), PTableType.VIEW.getSerializedValue() }); + } + + // When upgrading to Phoenix 5.2, make each existing index's DDL timestamp equal to its + // last updated row timestamp. + public static void bootstrapLastDDLTimestampForIndexes(Connection metaConnection) + throws SQLException { + bootstrapLastDDLTimestamp(metaConnection, + new String[] { PTableType.INDEX.getSerializedValue() }); + } + + /** + * Sets the LAST_DDL_TIMESTAMP for the metadata header rows for all objects of the given table + * types to their PHOENIX_ROW_TIMESTAMP() + * @param metaConnection a {@link PhoenixConnection} + * @param tableTypes array of serialized {@link PTableType} values + */ + private static void bootstrapLastDDLTimestamp(Connection metaConnection, String[] tableTypes) + throws SQLException { + String tableTypesString = + Stream.of(tableTypes).collect(Collectors.joining("','", "'", "'")).toString(); + String pkCols = TENANT_ID + ", " + TABLE_SCHEM + ", " + TABLE_NAME + ", " + COLUMN_NAME + ", " + + COLUMN_FAMILY; + final String upsertSql = + "UPSERT INTO " + SYSTEM_CATALOG_NAME + " (" + pkCols + ", " + LAST_DDL_TIMESTAMP + ")" + " " + + "SELECT " + pkCols + ", PHOENIX_ROW_TIMESTAMP() FROM " + SYSTEM_CATALOG_NAME + " " + + "WHERE " + TABLE_TYPE + " " + " in " + "(" + tableTypesString + ")"; + LOGGER.info("Setting DDL timestamps for table_type={} to row timestamps", tableTypesString); + try (PreparedStatement stmt = metaConnection.prepareStatement(upsertSql)) { + stmt.execute(); + metaConnection.commit(); + } + LOGGER.info("Setting DDL timestamps for table_type={} is complete", tableTypesString); + } + + public static boolean tableHasKeepDeleted(PhoenixConnection conn, String pTableName) + throws SQLException, org.apache.hadoop.hbase.TableNotFoundException, IOException { + ConnectionQueryServices cqs = conn.getQueryServices(); + Admin admin = cqs.getAdmin(); + PTable table = conn.getTable(pTableName); + TableDescriptor tableDesc = + admin.getDescriptor(SchemaUtil.getPhysicalTableName(pTableName, cqs.getProps())); + return KeepDeletedCells.TRUE.equals( + tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getKeepDeletedCells()); + } + + public static boolean tableHasMaxVersions(PhoenixConnection conn, String pTableName) + throws SQLException, org.apache.hadoop.hbase.TableNotFoundException, IOException { + ConnectionQueryServices cqs = conn.getQueryServices(); + Admin admin = cqs.getAdmin(); + PTable table = conn.getTable(pTableName); + TableDescriptor tableDesc = + admin.getDescriptor(SchemaUtil.getPhysicalTableName(pTableName, cqs.getProps())); + return tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getMaxVersions() > 1; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpsertExecutor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpsertExecutor.java index 55dc0d81571..0eb452da358 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpsertExecutor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/UpsertExecutor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,132 +25,127 @@ import java.util.List; import org.apache.phoenix.schema.types.PDataType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Executes upsert statements on a provided {@code PreparedStatement} based on incoming - * {@code RECORDS}. An {@link UpsertListener} is notified each time the prepared statement - * is executed. + * {@code RECORDS}. An {@link UpsertListener} is notified each time the prepared statement is + * executed. */ public abstract class UpsertExecutor implements Closeable { + /** + * A listener that is called for events based on incoming JSON data. + */ + public interface UpsertListener { + /** - * A listener that is called for events based on incoming JSON data. + * Called when an upsert has been sucessfully completed. The given upsertCount is the total + * number of upserts completed on the caller up to this point. + * @param upsertCount total number of upserts that have been completed */ - public interface UpsertListener { - - /** - * Called when an upsert has been sucessfully completed. The given upsertCount is the total number of upserts - * completed on the caller up to this point. - * - * @param upsertCount total number of upserts that have been completed - */ - void upsertDone(long upsertCount); - - - /** - * Called when executing a prepared statement has failed on a given record. - * - * @param record the JSON record that was being upserted when the error occurred - */ - void errorOnRecord(RECORD record, Throwable throwable); - } - - private static final Logger LOGGER = LoggerFactory.getLogger(UpsertExecutor.class); - - protected final Connection conn; - protected final List columnInfos; - protected final List dataTypes; - protected final List> conversionFunctions; - protected final PreparedStatement preparedStatement; - protected final UpsertListener upsertListener; - protected long upsertCount = 0L; - protected boolean initFinished = false; // allow subclasses to finish initialization - - private static PreparedStatement createStatement(Connection conn, String tableName, - List columnInfoList) { - PreparedStatement preparedStatement; - try { - String upsertSql = QueryUtil.constructUpsertStatement(tableName, columnInfoList); - LOGGER.info("Upserting SQL data with {}", upsertSql); - preparedStatement = conn.prepareStatement(upsertSql); - } catch (SQLException e) { - throw new RuntimeException(e); - } - return preparedStatement; - } + void upsertDone(long upsertCount); /** - * Construct with the definition of incoming columns, and the statement upon which upsert - * statements are to be performed. + * Called when executing a prepared statement has failed on a given record. + * @param record the JSON record that was being upserted when the error occurred */ - public UpsertExecutor(Connection conn, String tableName, - List columnInfoList, UpsertListener upsertListener) { - this(conn, columnInfoList, createStatement(conn, tableName, columnInfoList), upsertListener); + void errorOnRecord(RECORD record, Throwable throwable); + } + + private static final Logger LOGGER = LoggerFactory.getLogger(UpsertExecutor.class); + + protected final Connection conn; + protected final List columnInfos; + protected final List dataTypes; + protected final List> conversionFunctions; + protected final PreparedStatement preparedStatement; + protected final UpsertListener upsertListener; + protected long upsertCount = 0L; + protected boolean initFinished = false; // allow subclasses to finish initialization + + private static PreparedStatement createStatement(Connection conn, String tableName, + List columnInfoList) { + PreparedStatement preparedStatement; + try { + String upsertSql = QueryUtil.constructUpsertStatement(tableName, columnInfoList); + LOGGER.info("Upserting SQL data with {}", upsertSql); + preparedStatement = conn.prepareStatement(upsertSql); + } catch (SQLException e) { + throw new RuntimeException(e); } - - /** Testing constructor. Do not use in prod. */ - @VisibleForTesting - protected UpsertExecutor(Connection conn, List columnInfoList, - PreparedStatement preparedStatement, UpsertListener upsertListener) { - this.conn = conn; - this.upsertListener = upsertListener; - this.columnInfos = columnInfoList; - this.preparedStatement = preparedStatement; - this.dataTypes = Lists.newArrayList(); - this.conversionFunctions = Lists.newArrayList(); + return preparedStatement; + } + + /** + * Construct with the definition of incoming columns, and the statement upon which upsert + * statements are to be performed. + */ + public UpsertExecutor(Connection conn, String tableName, List columnInfoList, + UpsertListener upsertListener) { + this(conn, columnInfoList, createStatement(conn, tableName, columnInfoList), upsertListener); + } + + /** Testing constructor. Do not use in prod. */ + @VisibleForTesting + protected UpsertExecutor(Connection conn, List columnInfoList, + PreparedStatement preparedStatement, UpsertListener upsertListener) { + this.conn = conn; + this.upsertListener = upsertListener; + this.columnInfos = columnInfoList; + this.preparedStatement = preparedStatement; + this.dataTypes = Lists.newArrayList(); + this.conversionFunctions = Lists.newArrayList(); + } + + /** + * Awkward protocol allows subclass constructors to finish initializing context before proceeding + * to record processing. + */ + protected void finishInit() { + for (ColumnInfo columnInfo : columnInfos) { + PDataType dataType = PDataType.fromTypeId(columnInfo.getSqlType()); + dataTypes.add(dataType); + conversionFunctions.add(createConversionFunction(dataType)); } - - /** - * Awkward protocol allows subclass constructors to finish initializing context before - * proceeding to record processing. - */ - protected void finishInit() { - for (ColumnInfo columnInfo : columnInfos) { - PDataType dataType = PDataType.fromTypeId(columnInfo.getSqlType()); - dataTypes.add(dataType); - conversionFunctions.add(createConversionFunction(dataType)); - } - this.initFinished = true; + this.initFinished = true; + } + + /** + * Execute upserts for each JSON record contained in the given iterable, notifying this instance's + * {@code UpsertListener} for each completed upsert. + * @param records iterable of JSON records to be upserted + */ + public void execute(Iterable records) { + if (!initFinished) { + finishInit(); } - - /** - * Execute upserts for each JSON record contained in the given iterable, notifying this instance's - * {@code UpsertListener} for each completed upsert. - * - * @param records iterable of JSON records to be upserted - */ - public void execute(Iterable records) { - if (!initFinished) { - finishInit(); - } - for (RECORD record : records) { - execute(record); - } + for (RECORD record : records) { + execute(record); } - - /** - * Upsert a single record. - * - * @param record JSON record containing the data to be upserted - */ - protected abstract void execute(RECORD record); - - @Override - public void close() throws IOException { - try { - preparedStatement.close(); - } catch (SQLException e) { - // An exception while closing the prepared statement is most likely a sign of a real problem, so we don't - // want to hide it with closeQuietly or something similar - throw new RuntimeException(e); - } + } + + /** + * Upsert a single record. + * @param record JSON record containing the data to be upserted + */ + protected abstract void execute(RECORD record); + + @Override + public void close() throws IOException { + try { + preparedStatement.close(); + } catch (SQLException e) { + // An exception while closing the prepared statement is most likely a sign of a real problem, + // so we don't + // want to hide it with closeQuietly or something similar + throw new RuntimeException(e); } + } - protected abstract Function createConversionFunction(PDataType dataType); + protected abstract Function createConversionFunction(PDataType dataType); } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ValidateLastDDLTimestampUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ValidateLastDDLTimestampUtil.java index 95b1d1537a3..3a9e436809b 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ValidateLastDDLTimestampUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ValidateLastDDLTimestampUtil.java @@ -1,13 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,208 +51,190 @@ */ public class ValidateLastDDLTimestampUtil { - private ValidateLastDDLTimestampUtil() {} + private ValidateLastDDLTimestampUtil() { + } - private static final Logger LOGGER = LoggerFactory - .getLogger(ValidateLastDDLTimestampUtil.class); - private static final List ALLOWED_PTABLE_TYPES = Arrays.asList(new PTableType[] - {PTableType.TABLE, PTableType.VIEW, PTableType.INDEX, PTableType.SYSTEM}); + private static final Logger LOGGER = LoggerFactory.getLogger(ValidateLastDDLTimestampUtil.class); + private static final List ALLOWED_PTABLE_TYPES = Arrays.asList( + new PTableType[] { PTableType.TABLE, PTableType.VIEW, PTableType.INDEX, PTableType.SYSTEM }); - public static String getInfoString(PName tenantId, List tableRefs) { - StringBuilder sb = new StringBuilder(); - sb.append(String.format("Tenant: %s, ", tenantId)); - for (TableRef tableRef : tableRefs) { - sb.append(String.format("{Schema: %s, Table: %s},", - tableRef.getTable().getSchemaName(), - tableRef.getTable().getTableName())); - } - return sb.toString(); + public static String getInfoString(PName tenantId, List tableRefs) { + StringBuilder sb = new StringBuilder(); + sb.append(String.format("Tenant: %s, ", tenantId)); + for (TableRef tableRef : tableRefs) { + sb.append(String.format("{Schema: %s, Table: %s},", tableRef.getTable().getSchemaName(), + tableRef.getTable().getTableName())); } - - /** - * Get whether last ddl timestamp validation is enabled on the connection - * @param connection - * @return true if it is enabled, false otherwise - */ - public static boolean getValidateLastDdlTimestampEnabled(PhoenixConnection connection) { - return connection.getQueryServices().getProps() - .getBoolean(QueryServices.LAST_DDL_TIMESTAMP_VALIDATION_ENABLED, - QueryServicesOptions.DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED); - } - - /** - * Get whether last ddl timestamp validation is enabled in the Configuration - * @param config - * @return true if it is enabled, false otherwise - */ - public static boolean getValidateLastDdlTimestampEnabled(Configuration config) { - return config.getBoolean( - QueryServices.LAST_DDL_TIMESTAMP_VALIDATION_ENABLED, - QueryServicesOptions.DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED); - } - - /** - * Verifies that table metadata for given tables is up-to-date in client cache with server. - * A random live region server is picked for invoking the RPC to validate LastDDLTimestamp. - * Retry once if there was an error performing the RPC, otherwise throw the Exception. - * - * @param allTableRefs - * @param doRetry - * @throws SQLException - */ - public static void validateLastDDLTimestamp(PhoenixConnection conn, - List allTableRefs, - boolean doRetry) throws SQLException { - List tableRefs = filterTableRefs(conn, allTableRefs); - if (tableRefs.isEmpty()) { - return; - } - String infoString = getInfoString(conn.getTenantId(), tableRefs); - try (Admin admin = conn.getQueryServices().getAdmin()) { - // get all live region servers - List regionServers - = conn.getQueryServices().getLiveRegionServers(); - // pick one at random - ServerName regionServer - = regionServers.get(ThreadLocalRandom.current().nextInt(regionServers.size())); - - // RPC - RegionServerEndpointProtos.RegionServerEndpointService.BlockingInterface - service = RegionServerEndpointProtos.RegionServerEndpointService - .newBlockingStub(admin.coprocessorService(regionServer)); - RegionServerEndpointProtos.ValidateLastDDLTimestampRequest request - = getValidateDDLTimestampRequest(tableRefs); - service.validateLastDDLTimestamp(null, request); - } catch (Exception e) { - SQLException parsedException = ClientUtil.parseServerException(e); - if (parsedException instanceof StaleMetadataCacheException) { - throw parsedException; - } - //retry once for any exceptions other than StaleMetadataCacheException - LOGGER.error("Error in validating DDL timestamp for {}", infoString, parsedException); - if (doRetry) { - // update the list of live region servers - conn.getQueryServices().refreshLiveRegionServers(); - validateLastDDLTimestamp(conn, tableRefs, false); - return; - } - throw parsedException; - } + return sb.toString(); + } + + /** + * Get whether last ddl timestamp validation is enabled on the connection + * @return true if it is enabled, false otherwise + */ + public static boolean getValidateLastDdlTimestampEnabled(PhoenixConnection connection) { + return connection.getQueryServices().getProps().getBoolean( + QueryServices.LAST_DDL_TIMESTAMP_VALIDATION_ENABLED, + QueryServicesOptions.DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED); + } + + /** + * Get whether last ddl timestamp validation is enabled in the Configuration + * @return true if it is enabled, false otherwise + */ + public static boolean getValidateLastDdlTimestampEnabled(Configuration config) { + return config.getBoolean(QueryServices.LAST_DDL_TIMESTAMP_VALIDATION_ENABLED, + QueryServicesOptions.DEFAULT_LAST_DDL_TIMESTAMP_VALIDATION_ENABLED); + } + + /** + * Verifies that table metadata for given tables is up-to-date in client cache with server. A + * random live region server is picked for invoking the RPC to validate LastDDLTimestamp. Retry + * once if there was an error performing the RPC, otherwise throw the Exception. + */ + public static void validateLastDDLTimestamp(PhoenixConnection conn, List allTableRefs, + boolean doRetry) throws SQLException { + List tableRefs = filterTableRefs(conn, allTableRefs); + if (tableRefs.isEmpty()) { + return; } - - /** - * Build a request for the validateLastDDLTimestamp RPC for the given tables. - * 1. For a view, we need to add all its ancestors to the request - * in case something changed in the hierarchy. - * 2. For an index, we need to add its parent table to the request - * in case the index was dropped. - * 3. Add all indexes of a table/view in case index state was changed. - * - * @param tableRefs - * @return ValidateLastDDLTimestampRequest for the table in tableRef - */ - private static RegionServerEndpointProtos.ValidateLastDDLTimestampRequest - getValidateDDLTimestampRequest(List tableRefs) { - - RegionServerEndpointProtos.ValidateLastDDLTimestampRequest.Builder requestBuilder - = RegionServerEndpointProtos.ValidateLastDDLTimestampRequest.newBuilder(); - RegionServerEndpointProtos.LastDDLTimestampRequest.Builder innerBuilder; - - for (TableRef tableRef : tableRefs) { - - // validate all ancestors of this PTable if any - // index -> base table - // view -> parent view and its ancestors - // view index -> view and its ancestors - for (Map.Entry entry - : tableRef.getTable().getAncestorLastDDLTimestampMap().entrySet()) { - innerBuilder = RegionServerEndpointProtos.LastDDLTimestampRequest.newBuilder(); - PTableKey ancestorKey = entry.getKey(); - setLastDDLTimestampRequestParameters(innerBuilder, ancestorKey, entry.getValue()); - requestBuilder.addLastDDLTimestampRequests(innerBuilder); - } - - // add the current table to the request - PTable ptable = tableRef.getTable(); - innerBuilder = RegionServerEndpointProtos.LastDDLTimestampRequest.newBuilder(); - setLastDDLTimestampRequestParameters(innerBuilder, ptable.getKey(), - ptable.getLastDDLTimestamp()); - requestBuilder.addLastDDLTimestampRequests(innerBuilder); - - // add all indexes of the current table - for (PTable idxPTable : tableRef.getTable().getIndexes()) { - innerBuilder = RegionServerEndpointProtos.LastDDLTimestampRequest.newBuilder(); - setLastDDLTimestampRequestParameters(innerBuilder, idxPTable.getKey(), - idxPTable.getLastDDLTimestamp()); - requestBuilder.addLastDDLTimestampRequests(innerBuilder); - } - } - return requestBuilder.build(); + String infoString = getInfoString(conn.getTenantId(), tableRefs); + try (Admin admin = conn.getQueryServices().getAdmin()) { + // get all live region servers + List regionServers = conn.getQueryServices().getLiveRegionServers(); + // pick one at random + ServerName regionServer = + regionServers.get(ThreadLocalRandom.current().nextInt(regionServers.size())); + + // RPC + RegionServerEndpointProtos.RegionServerEndpointService.BlockingInterface service = + RegionServerEndpointProtos.RegionServerEndpointService + .newBlockingStub(admin.coprocessorService(regionServer)); + RegionServerEndpointProtos.ValidateLastDDLTimestampRequest request = + getValidateDDLTimestampRequest(tableRefs); + service.validateLastDDLTimestamp(null, request); + } catch (Exception e) { + SQLException parsedException = ClientUtil.parseServerException(e); + if (parsedException instanceof StaleMetadataCacheException) { + throw parsedException; + } + // retry once for any exceptions other than StaleMetadataCacheException + LOGGER.error("Error in validating DDL timestamp for {}", infoString, parsedException); + if (doRetry) { + // update the list of live region servers + conn.getQueryServices().refreshLiveRegionServers(); + validateLastDDLTimestamp(conn, tableRefs, false); + return; + } + throw parsedException; } - - /** - * For the given PTable, set the attributes on the LastDDLTimestampRequest. - */ - private static void setLastDDLTimestampRequestParameters( - RegionServerEndpointProtos.LastDDLTimestampRequest.Builder builder, - PTableKey key, long lastDDLTimestamp) { - String tableName = key.getTableName(); - String schemaName = key.getSchemaName(); - - // view(V) with Index (VIndex) -> child view (V1) -> grand child view (V2) - // inherited view index is of the form V2#V1#VIndex, it does not exist in syscat - if (tableName.contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { - int lastIndexOf = tableName.lastIndexOf(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); - String indexFullName = tableName.substring(lastIndexOf + 1); - tableName = SchemaUtil.getTableNameFromFullName(indexFullName); - schemaName = SchemaUtil.getSchemaNameFromFullName(indexFullName); - } - - byte[] tenantIDBytes = key.getTenantId() == null - ? HConstants.EMPTY_BYTE_ARRAY - : key.getTenantId().getBytes(); - byte[] schemaBytes = (schemaName == null || schemaName.isEmpty()) - ? HConstants.EMPTY_BYTE_ARRAY - : key.getSchemaName().getBytes(); - builder.setTenantId(ByteStringer.wrap(tenantIDBytes)); - builder.setSchemaName(ByteStringer.wrap(schemaBytes)); - builder.setTableName(ByteStringer.wrap(tableName.getBytes())); - builder.setLastDDLTimestamp(lastDDLTimestamp); + } + + /** + * Build a request for the validateLastDDLTimestamp RPC for the given tables. 1. For a view, we + * need to add all its ancestors to the request in case something changed in the hierarchy. 2. For + * an index, we need to add its parent table to the request in case the index was dropped. 3. Add + * all indexes of a table/view in case index state was changed. + * @return ValidateLastDDLTimestampRequest for the table in tableRef + */ + private static RegionServerEndpointProtos.ValidateLastDDLTimestampRequest + getValidateDDLTimestampRequest(List tableRefs) { + + RegionServerEndpointProtos.ValidateLastDDLTimestampRequest.Builder requestBuilder = + RegionServerEndpointProtos.ValidateLastDDLTimestampRequest.newBuilder(); + RegionServerEndpointProtos.LastDDLTimestampRequest.Builder innerBuilder; + + for (TableRef tableRef : tableRefs) { + + // validate all ancestors of this PTable if any + // index -> base table + // view -> parent view and its ancestors + // view index -> view and its ancestors + for (Map.Entry entry : tableRef.getTable().getAncestorLastDDLTimestampMap() + .entrySet()) { + innerBuilder = RegionServerEndpointProtos.LastDDLTimestampRequest.newBuilder(); + PTableKey ancestorKey = entry.getKey(); + setLastDDLTimestampRequestParameters(innerBuilder, ancestorKey, entry.getValue()); + requestBuilder.addLastDDLTimestampRequests(innerBuilder); + } + + // add the current table to the request + PTable ptable = tableRef.getTable(); + innerBuilder = RegionServerEndpointProtos.LastDDLTimestampRequest.newBuilder(); + setLastDDLTimestampRequestParameters(innerBuilder, ptable.getKey(), + ptable.getLastDDLTimestamp()); + requestBuilder.addLastDDLTimestampRequests(innerBuilder); + + // add all indexes of the current table + for (PTable idxPTable : tableRef.getTable().getIndexes()) { + innerBuilder = RegionServerEndpointProtos.LastDDLTimestampRequest.newBuilder(); + setLastDDLTimestampRequestParameters(innerBuilder, idxPTable.getKey(), + idxPTable.getLastDDLTimestamp()); + requestBuilder.addLastDDLTimestampRequests(innerBuilder); + } } - - /** - * Filter out TableRefs for sending to server to validate last_ddl_timestamp. - * 1. table type is in ALLOWED_PTABLE_TYPES - * 2. table schema has a non-zero UPDATE_CACHE_FREQUENCY and cache entry is old. - * @param tableRefs - * @return - */ - private static List filterTableRefs(PhoenixConnection conn, - List tableRefs) { - List filteredTableRefs = tableRefs.stream() - .filter(tableRef -> ALLOWED_PTABLE_TYPES.contains(tableRef.getTable().getType()) - && !avoidRpc(conn, tableRef.getTable())) - .collect(Collectors.toList()); - return filteredTableRefs; + return requestBuilder.build(); + } + + /** + * For the given PTable, set the attributes on the LastDDLTimestampRequest. + */ + private static void setLastDDLTimestampRequestParameters( + RegionServerEndpointProtos.LastDDLTimestampRequest.Builder builder, PTableKey key, + long lastDDLTimestamp) { + String tableName = key.getTableName(); + String schemaName = key.getSchemaName(); + + // view(V) with Index (VIndex) -> child view (V1) -> grand child view (V2) + // inherited view index is of the form V2#V1#VIndex, it does not exist in syscat + if (tableName.contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { + int lastIndexOf = tableName.lastIndexOf(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); + String indexFullName = tableName.substring(lastIndexOf + 1); + tableName = SchemaUtil.getTableNameFromFullName(indexFullName); + schemaName = SchemaUtil.getSchemaNameFromFullName(indexFullName); } - /** - * Decide whether we should avoid the validate timestamp RPC for this table. If the schema of - * the table had specified a positive UCF to begin with, clients for this table should not see - * a regression when metadata caching re-design is enabled i.e. any server RPC should be - * skipped for them within the UCF window. - */ - private static boolean avoidRpc(PhoenixConnection conn, PTable table) { - try { - PTableRef ptr = conn.getTableRef(table.getKey()); - long tableUCF = table.getUpdateCacheFrequency(); - return tableUCF > (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue("ALWAYS") - && tableUCF < (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue("NEVER") - && MetaDataUtil.avoidMetadataRPC(conn, table, ptr, tableUCF); - } catch (TableNotFoundException e) { - //should not happen since this is called after query compilation and optimizer - //so the table would be in the cache - return false; - } + byte[] tenantIDBytes = + key.getTenantId() == null ? HConstants.EMPTY_BYTE_ARRAY : key.getTenantId().getBytes(); + byte[] schemaBytes = (schemaName == null || schemaName.isEmpty()) + ? HConstants.EMPTY_BYTE_ARRAY + : key.getSchemaName().getBytes(); + builder.setTenantId(ByteStringer.wrap(tenantIDBytes)); + builder.setSchemaName(ByteStringer.wrap(schemaBytes)); + builder.setTableName(ByteStringer.wrap(tableName.getBytes())); + builder.setLastDDLTimestamp(lastDDLTimestamp); + } + + /** + * Filter out TableRefs for sending to server to validate last_ddl_timestamp. 1. table type is in + * ALLOWED_PTABLE_TYPES 2. table schema has a non-zero UPDATE_CACHE_FREQUENCY and cache entry is + * old. + */ + private static List filterTableRefs(PhoenixConnection conn, List tableRefs) { + List filteredTableRefs = tableRefs.stream() + .filter(tableRef -> ALLOWED_PTABLE_TYPES.contains(tableRef.getTable().getType()) + && !avoidRpc(conn, tableRef.getTable())) + .collect(Collectors.toList()); + return filteredTableRefs; + } + + /** + * Decide whether we should avoid the validate timestamp RPC for this table. If the schema of the + * table had specified a positive UCF to begin with, clients for this table should not see a + * regression when metadata caching re-design is enabled i.e. any server RPC should be skipped for + * them within the UCF window. + */ + private static boolean avoidRpc(PhoenixConnection conn, PTable table) { + try { + PTableRef ptr = conn.getTableRef(table.getKey()); + long tableUCF = table.getUpdateCacheFrequency(); + return tableUCF > (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue("ALWAYS") + && tableUCF < (Long) ConnectionProperty.UPDATE_CACHE_FREQUENCY.getValue("NEVER") + && MetaDataUtil.avoidMetadataRPC(conn, table, ptr, tableUCF); + } catch (TableNotFoundException e) { + // should not happen since this is called after query compilation and optimizer + // so the table would be in the cache + return false; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/VarBinaryFormatter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/VarBinaryFormatter.java index 7f0d030b642..3bdcfa7f792 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/VarBinaryFormatter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/VarBinaryFormatter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,29 +24,28 @@ import org.apache.commons.codec.binary.Hex; /** - * A formatter that formats a byte array to a hexadecimal string - * (with each byte converted to a 2-digit hex sequence) - * + * A formatter that formats a byte array to a hexadecimal string (with each byte converted to a + * 2-digit hex sequence) * @author snakhoda-sfdc */ public class VarBinaryFormatter extends Format { - private static final long serialVersionUID = -7940880118392024750L; + private static final long serialVersionUID = -7940880118392024750L; - public static final VarBinaryFormatter INSTANCE = new VarBinaryFormatter(); + public static final VarBinaryFormatter INSTANCE = new VarBinaryFormatter(); - @Override - public StringBuffer format(Object obj, StringBuffer toAppendTo, FieldPosition pos) { - if (!(obj instanceof byte[])) { - throw new IllegalArgumentException("VarBinaryFormatter can only format byte arrays"); - } - String hexString = Hex.encodeHexString((byte[]) obj); - toAppendTo.append(hexString); - return toAppendTo; - } + @Override + public StringBuffer format(Object obj, StringBuffer toAppendTo, FieldPosition pos) { + if (!(obj instanceof byte[])) { + throw new IllegalArgumentException("VarBinaryFormatter can only format byte arrays"); + } + String hexString = Hex.encodeHexString((byte[]) obj); + toAppendTo.append(hexString); + return toAppendTo; + } - @Override - public Object parseObject(String source, ParsePosition pos) { - return new UnsupportedOperationException(); - } + @Override + public Object parseObject(String source, ParsePosition pos) { + return new UnsupportedOperationException(); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewIndexIdRetrieveUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewIndexIdRetrieveUtil.java index d5b0feafa1a..ff80ce381e8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewIndexIdRetrieveUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewIndexIdRetrieveUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,42 +25,39 @@ import org.apache.phoenix.schema.types.PSmallint; public final class ViewIndexIdRetrieveUtil { - public static final int VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN = 9; - public static final int VIEW_INDEX_ID_SMALLINT_TYPE_VALUE_LEN = 3; - public static final int NULL_DATA_TYPE_VALUE = 0; + public static final int VIEW_INDEX_ID_BIGINT_TYPE_PTR_LEN = 9; + public static final int VIEW_INDEX_ID_SMALLINT_TYPE_VALUE_LEN = 3; + public static final int NULL_DATA_TYPE_VALUE = 0; - private ViewIndexIdRetrieveUtil() { + private ViewIndexIdRetrieveUtil() { - } + } - public static Cell buildNewCell(Cell viewIndexIdCell, byte[] newVal) { - KeyValue keyValue = new KeyValue( - viewIndexIdCell.getRowArray(), viewIndexIdCell.getRowOffset(), - viewIndexIdCell.getRowLength(), - viewIndexIdCell.getFamilyArray(), viewIndexIdCell.getFamilyOffset(), - viewIndexIdCell.getFamilyLength(), - viewIndexIdCell.getQualifierArray(), viewIndexIdCell.getQualifierOffset(), - viewIndexIdCell.getQualifierLength(), - viewIndexIdCell.getTimestamp(),KeyValue.Type.Put, - newVal, 0,newVal.length); - keyValue.setSequenceId(viewIndexIdCell.getSequenceId()); - return keyValue; - } + public static Cell buildNewCell(Cell viewIndexIdCell, byte[] newVal) { + KeyValue keyValue = new KeyValue(viewIndexIdCell.getRowArray(), viewIndexIdCell.getRowOffset(), + viewIndexIdCell.getRowLength(), viewIndexIdCell.getFamilyArray(), + viewIndexIdCell.getFamilyOffset(), viewIndexIdCell.getFamilyLength(), + viewIndexIdCell.getQualifierArray(), viewIndexIdCell.getQualifierOffset(), + viewIndexIdCell.getQualifierLength(), viewIndexIdCell.getTimestamp(), KeyValue.Type.Put, + newVal, 0, newVal.length); + keyValue.setSequenceId(viewIndexIdCell.getSequenceId()); + return keyValue; + } - public static Cell getRetrievedViewIndexIdCell(Cell viewIndexIdCell, boolean isShortToLong) { + public static Cell getRetrievedViewIndexIdCell(Cell viewIndexIdCell, boolean isShortToLong) { - ImmutableBytesWritable columnValue = - new ImmutableBytesWritable(CellUtil.cloneValue(viewIndexIdCell)); - ImmutableBytesWritable newValue = new ImmutableBytesWritable(); + ImmutableBytesWritable columnValue = + new ImmutableBytesWritable(CellUtil.cloneValue(viewIndexIdCell)); + ImmutableBytesWritable newValue = new ImmutableBytesWritable(); - byte[] newBytes; + byte[] newBytes; - if (isShortToLong) { - newBytes = PLong.INSTANCE.toBytes(PSmallint.INSTANCE.toObject(columnValue.get())); - } else { - newBytes = PSmallint.INSTANCE.toBytes(PLong.INSTANCE.toObject(columnValue.get())); - } - newValue.set(newBytes); - return buildNewCell(viewIndexIdCell, newValue.get()); + if (isShortToLong) { + newBytes = PLong.INSTANCE.toBytes(PSmallint.INSTANCE.toObject(columnValue.get())); + } else { + newBytes = PSmallint.INSTANCE.toBytes(PLong.INSTANCE.toObject(columnValue.get())); } -} \ No newline at end of file + newValue.set(newBytes); + return buildNewCell(viewIndexIdCell, newValue.get()); + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewUtil.java index 1f52bfa4bb9..70db95669df 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/ViewUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -15,14 +17,36 @@ */ package org.apache.phoenix.util; -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.phoenix.coprocessorclient.MetaDataEndpointImplConstants; -import org.apache.phoenix.thirdparty.com.google.common.base.Objects; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import static org.apache.phoenix.coprocessorclient.MetaDataProtocol.MIN_SPLITTABLE_SYSTEM_CATALOG; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_BYTES; +import static org.apache.phoenix.schema.PTableImpl.getColumnsToClone; +import static org.apache.phoenix.util.PhoenixRuntime.CURRENT_SCN_ATTRIB; +import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB; +import static org.apache.phoenix.util.SchemaUtil.getVarChars; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +import javax.annotation.Nullable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -36,6 +60,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.compile.ColumnNameTrackingExpressionCompiler; +import org.apache.phoenix.coprocessorclient.MetaDataEndpointImplConstants; import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.coprocessorclient.TableInfo; import org.apache.phoenix.coprocessorclient.WhereConstantParser; @@ -58,939 +83,886 @@ import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.base.Objects; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; - -import static org.apache.phoenix.coprocessorclient.MetaDataProtocol.MIN_SPLITTABLE_SYSTEM_CATALOG; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_BYTES; -import static org.apache.phoenix.schema.PTableImpl.getColumnsToClone; -import static org.apache.phoenix.util.PhoenixRuntime.CURRENT_SCN_ATTRIB; -import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB; -import static org.apache.phoenix.util.SchemaUtil.getVarChars; - public class ViewUtil { - private static final Logger logger = LoggerFactory.getLogger(ViewUtil.class); - - private static final byte[] LINK_ROW = new byte[]{PTable.LinkType.CHILD_TABLE.getSerializedValue()}; - - /** - * Find all the descendant views of a given table or view in a depth-first fashion. - * Note that apart from scanning the {@code parent->child } links, we also validate each view - * by trying to resolve it. - * Use {@link ViewUtil#findAllRelatives(Table, byte[], byte[], byte[], LinkType, - * TableViewFinderResult)} if you want to find other links and don't care about orphan results. - * - * @param sysCatOrsysChildLink Table corresponding to either SYSTEM.CATALOG or SYSTEM.CHILD_LINK - * @param serverSideConfig server-side configuration - * @param tenantId tenantId of the view (null if it is a table or global view) - * @param schemaName schema name of the table/view - * @param tableOrViewName name of the table/view - * @param clientTimeStamp client timestamp - * @param findJustOneLegitimateChildView if true, we are only interested in knowing if there is - * at least one legitimate child view, so we return early. - * If false, we want to find all legitimate child views - * and all orphan views (views that no longer exist) - * stemming from this table/view and all of its legitimate - * child views. - * - * @return a Pair where the first element is a list of all legitimate child views (or just 1 - * child view in case findJustOneLegitimateChildView is true) and where the second element is - * a list of all orphan views stemming from this table/view and all of its legitimate child - * views (in case findJustOneLegitimateChildView is true, this list will be incomplete since we - * are not interested in it anyhow) - * - * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG - * @throws SQLException thrown if there is an error getting a connection to the server or an - * error retrieving the PTable for a child view - */ - public static Pair, List> findAllDescendantViews( - Table sysCatOrsysChildLink, Configuration serverSideConfig, byte[] tenantId, - byte[] schemaName, byte[] tableOrViewName, long clientTimeStamp, - boolean findJustOneLegitimateChildView) - throws IOException, SQLException { - List legitimateChildViews = new ArrayList<>(); - List orphanChildViews = new ArrayList<>(); - - return findAllDescendantViews(sysCatOrsysChildLink, serverSideConfig, tenantId, schemaName, - tableOrViewName, clientTimeStamp, legitimateChildViews, orphanChildViews, - findJustOneLegitimateChildView); - } - - public static Pair, List> findAllDescendantViews(Table sysCatOrsysChildLink, - Configuration serverSideConfig, byte[] parentTenantId, byte[] parentSchemaName, - byte[] parentTableOrViewName, long clientTimeStamp, List legitimateChildViews, - List orphanChildViews, boolean findJustOneLegitimateChildView) - throws IOException, SQLException{ - - return findAllDescendantViews(sysCatOrsysChildLink, null, serverSideConfig, - parentTenantId, parentSchemaName, parentTableOrViewName, clientTimeStamp, - legitimateChildViews, orphanChildViews, findJustOneLegitimateChildView, - new Pair<>(false, false)); - - } - - /** - * Find all the descendant views of a given table or view in a depth-first fashion. - * Note that apart from scanning the {@code parent->child } links, we also validate each view - * by trying to resolve it. - * Use {@link ViewUtil#findAllRelatives(Table, byte[], byte[], byte[], LinkType, - * TableViewFinderResult)} if you want to find other links and don't care about orphan results. - * - * @param sysCatOrsysChildLink Table corresponding to either SYSTEM.CATALOG or SYSTEM.CHILD_LINK - * @param sysCat Table corresponding to SYSTEM.CATALOG especially for checking if TTL is defined - * at any of the descendant view. This can be null then we are not scanning it for - * checking if TTL is defined or not. - * @param serverSideConfig server-side configuration - * @param parentTenantId tenantId of the view (null if it is a table or global view) - * @param parentSchemaName schema name of the table/view - * @param parentTableOrViewName name of the table/view - * @param clientTimeStamp client timestamp - * @param legitimateChildViews List to be returned as first element of Pair containing - * legitimate child views - * @param orphanChildViews list to be returned as second element of Pair containing orphan views - * @param findJustOneLegitimateChildView if true, we are only interested in knowing if there is - * at least one legitimate child view, so we return early. - * If false, we want to find all legitimate child views - * and all orphan views (views that no longer exist) - * stemming from this table/view and all of its legitimate - * child views. - * @param scanSysCatForTTLDefinedOnAnyChildPair Boolean pair, where first element is used in - * {@link ViewUtil#findImmediateRelatedViews(Table, - * Table, byte[], byte[], byte[], LinkType, long, - * Pair)} to determine if we have to scan the - * sysCat or not for checking if TTL is defined. - * Second element is used to store the result if - * we found atleast one children in hierarchy where - * TTL is defined or not. - * - * @return a Pair where the first element is a list of all legitimate child views (or just 1 - * child view in case findJustOneLegitimateChildView is true) and where the second element is - * a list of all orphan views stemming from this table/view and all of its legitimate child - * views (in case findJustOneLegitimateChildView is true, this list will be incomplete since we - * are not interested in it anyhow) - * - * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG - * @throws SQLException thrown if there is an error getting a connection to the server or an - * error retrieving the PTable for a child view - */ - - - public static Pair, List> findAllDescendantViews( - Table sysCatOrsysChildLink, Table sysCat, Configuration serverSideConfig, - byte[] parentTenantId, byte[] parentSchemaName, byte[] parentTableOrViewName, - long clientTimeStamp, List legitimateChildViews, - List orphanChildViews, boolean findJustOneLegitimateChildView, - Pair scanSysCatForTTLDefinedOnAnyChildPair) - throws IOException, SQLException { - TableViewFinderResult currentResult = - findImmediateRelatedViews(sysCatOrsysChildLink, sysCat, parentTenantId, - parentSchemaName, parentTableOrViewName, LinkType.CHILD_TABLE, - clientTimeStamp, scanSysCatForTTLDefinedOnAnyChildPair); - for (TableInfo viewInfo : currentResult.getLinks()) { - byte[] viewTenantId = viewInfo.getTenantId(); - byte[] viewSchemaName = viewInfo.getSchemaName(); - byte[] viewName = viewInfo.getTableName(); - PTable view; - Properties props = new Properties(); - if (viewTenantId != null) { - props.setProperty(TENANT_ID_ATTRIB, Bytes.toString(viewTenantId)); - } - if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { - props.setProperty(CURRENT_SCN_ATTRIB, Long.toString(clientTimeStamp)); - } - try (PhoenixConnection connection = - QueryUtil.getConnectionOnServer(props, serverSideConfig) - .unwrap(PhoenixConnection.class)) { - try { - view = connection.getTableNoCache( - SchemaUtil.getTableName(viewSchemaName, viewName)); - } catch (TableNotFoundException ex) { - logger.error("Found an orphan parent->child link keyed by this parent." - + " Parent Tenant Id: '" + Bytes.toString(parentTenantId) - + "'. Parent Schema Name: '" + Bytes.toString(parentSchemaName) - + "'. Parent Table/View Name: '" + Bytes.toString(parentTableOrViewName) - + "'. The child view which could not be resolved has ViewInfo: '" - + viewInfo + "'.", ex); - orphanChildViews.add(viewInfo); - // Prune orphan branches - continue; - } - - if (isLegitimateChildView(view, parentSchemaName, parentTableOrViewName)) { - legitimateChildViews.add(view); - // return early since we're only interested in knowing if there is at least one - // valid child view - if (findJustOneLegitimateChildView) { - break; - } - // Note that we only explore this branch if the current view is a legitimate - // child view, else we ignore it and move on to the next potential child view - findAllDescendantViews(sysCatOrsysChildLink, sysCat, serverSideConfig, - viewInfo.getTenantId(), viewInfo.getSchemaName(), - viewInfo.getTableName(), clientTimeStamp, legitimateChildViews, - orphanChildViews, findJustOneLegitimateChildView, - scanSysCatForTTLDefinedOnAnyChildPair); - } else { - logger.error("Found an orphan parent->child link keyed by this parent." - + " Parent Tenant Id: '" + Bytes.toString(parentTenantId) - + "'. Parent Schema Name: '" + Bytes.toString(parentSchemaName) - + "'. Parent Table/View Name: '" + Bytes.toString(parentTableOrViewName) - + "'. There currently exists a legitimate view of the same name which" - + " is not a descendant of this table/view. View Info: '" + viewInfo - + "'. Ignoring this view and not counting it as a child view."); - // Prune unrelated view branches left around due to orphan parent->child links - } - } + private static final Logger logger = LoggerFactory.getLogger(ViewUtil.class); + + private static final byte[] LINK_ROW = + new byte[] { PTable.LinkType.CHILD_TABLE.getSerializedValue() }; + + /** + * Find all the descendant views of a given table or view in a depth-first fashion. Note that + * apart from scanning the {@code parent->child } links, we also validate each view by trying to + * resolve it. Use + * {@link ViewUtil#findAllRelatives(Table, byte[], byte[], byte[], LinkType, TableViewFinderResult)} + * if you want to find other links and don't care about orphan results. + * @param sysCatOrsysChildLink Table corresponding to either SYSTEM.CATALOG or + * SYSTEM.CHILD_LINK + * @param serverSideConfig server-side configuration + * @param tenantId tenantId of the view (null if it is a table or global + * view) + * @param schemaName schema name of the table/view + * @param tableOrViewName name of the table/view + * @param clientTimeStamp client timestamp + * @param findJustOneLegitimateChildView if true, we are only interested in knowing if there is at + * least one legitimate child view, so we return early. If + * false, we want to find all legitimate child views and all + * orphan views (views that no longer exist) stemming from + * this table/view and all of its legitimate child views. + * @return a Pair where the first element is a list of all legitimate child views (or just 1 child + * view in case findJustOneLegitimateChildView is true) and where the second element is a + * list of all orphan views stemming from this table/view and all of its legitimate child + * views (in case findJustOneLegitimateChildView is true, this list will be incomplete + * since we are not interested in it anyhow) + * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG + * @throws SQLException thrown if there is an error getting a connection to the server or an error + * retrieving the PTable for a child view + */ + public static Pair, List> findAllDescendantViews( + Table sysCatOrsysChildLink, Configuration serverSideConfig, byte[] tenantId, byte[] schemaName, + byte[] tableOrViewName, long clientTimeStamp, boolean findJustOneLegitimateChildView) + throws IOException, SQLException { + List legitimateChildViews = new ArrayList<>(); + List orphanChildViews = new ArrayList<>(); + + return findAllDescendantViews(sysCatOrsysChildLink, serverSideConfig, tenantId, schemaName, + tableOrViewName, clientTimeStamp, legitimateChildViews, orphanChildViews, + findJustOneLegitimateChildView); + } + + public static Pair, List> findAllDescendantViews( + Table sysCatOrsysChildLink, Configuration serverSideConfig, byte[] parentTenantId, + byte[] parentSchemaName, byte[] parentTableOrViewName, long clientTimeStamp, + List legitimateChildViews, List orphanChildViews, + boolean findJustOneLegitimateChildView) throws IOException, SQLException { + + return findAllDescendantViews(sysCatOrsysChildLink, null, serverSideConfig, parentTenantId, + parentSchemaName, parentTableOrViewName, clientTimeStamp, legitimateChildViews, + orphanChildViews, findJustOneLegitimateChildView, new Pair<>(false, false)); + + } + + /** + * Find all the descendant views of a given table or view in a depth-first fashion. Note that + * apart from scanning the {@code parent->child } links, we also validate each view by trying to + * resolve it. Use + * {@link ViewUtil#findAllRelatives(Table, byte[], byte[], byte[], LinkType, TableViewFinderResult)} + * if you want to find other links and don't care about orphan results. + * @param sysCatOrsysChildLink Table corresponding to either SYSTEM.CATALOG or + * SYSTEM.CHILD_LINK + * @param sysCat Table corresponding to SYSTEM.CATALOG especially + * for checking if TTL is defined at any of the + * descendant view. This can be null then we are not + * scanning it for checking if TTL is defined or not. + * @param serverSideConfig server-side configuration + * @param parentTenantId tenantId of the view (null if it is a table or + * global view) + * @param parentSchemaName schema name of the table/view + * @param parentTableOrViewName name of the table/view + * @param clientTimeStamp client timestamp + * @param legitimateChildViews List to be returned as first element of Pair + * containing legitimate child views + * @param orphanChildViews list to be returned as second element of Pair + * containing orphan views + * @param findJustOneLegitimateChildView if true, we are only interested in knowing if + * there is at least one legitimate child view, so we + * return early. If false, we want to find all + * legitimate child views and all orphan views (views + * that no longer exist) stemming from this + * table/view and all of its legitimate child views. + * @param scanSysCatForTTLDefinedOnAnyChildPair Boolean pair, where first element is used in + * {@link ViewUtil#findImmediateRelatedViews(Table, Table, byte[], byte[], byte[], LinkType, long, Pair)} + * to determine if we have to scan the sysCat or not + * for checking if TTL is defined. Second element is + * used to store the result if we found atleast one + * children in hierarchy where TTL is defined or not. + * @return a Pair where the first element is a list of all legitimate child views (or just 1 child + * view in case findJustOneLegitimateChildView is true) and where the second element is a + * list of all orphan views stemming from this table/view and all of its legitimate child + * views (in case findJustOneLegitimateChildView is true, this list will be incomplete + * since we are not interested in it anyhow) + * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG + * @throws SQLException thrown if there is an error getting a connection to the server or an error + * retrieving the PTable for a child view + */ + + public static Pair, List> findAllDescendantViews( + Table sysCatOrsysChildLink, Table sysCat, Configuration serverSideConfig, byte[] parentTenantId, + byte[] parentSchemaName, byte[] parentTableOrViewName, long clientTimeStamp, + List legitimateChildViews, List orphanChildViews, + boolean findJustOneLegitimateChildView, + Pair scanSysCatForTTLDefinedOnAnyChildPair) throws IOException, SQLException { + TableViewFinderResult currentResult = findImmediateRelatedViews(sysCatOrsysChildLink, sysCat, + parentTenantId, parentSchemaName, parentTableOrViewName, LinkType.CHILD_TABLE, + clientTimeStamp, scanSysCatForTTLDefinedOnAnyChildPair); + for (TableInfo viewInfo : currentResult.getLinks()) { + byte[] viewTenantId = viewInfo.getTenantId(); + byte[] viewSchemaName = viewInfo.getSchemaName(); + byte[] viewName = viewInfo.getTableName(); + PTable view; + Properties props = new Properties(); + if (viewTenantId != null) { + props.setProperty(TENANT_ID_ATTRIB, Bytes.toString(viewTenantId)); + } + if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { + props.setProperty(CURRENT_SCN_ATTRIB, Long.toString(clientTimeStamp)); + } + try (PhoenixConnection connection = + QueryUtil.getConnectionOnServer(props, serverSideConfig).unwrap(PhoenixConnection.class)) { + try { + view = connection.getTableNoCache(SchemaUtil.getTableName(viewSchemaName, viewName)); + } catch (TableNotFoundException ex) { + logger.error( + "Found an orphan parent->child link keyed by this parent." + " Parent Tenant Id: '" + + Bytes.toString(parentTenantId) + "'. Parent Schema Name: '" + + Bytes.toString(parentSchemaName) + "'. Parent Table/View Name: '" + + Bytes.toString(parentTableOrViewName) + + "'. The child view which could not be resolved has ViewInfo: '" + viewInfo + "'.", + ex); + orphanChildViews.add(viewInfo); + // Prune orphan branches + continue; } - return new Pair<>(legitimateChildViews, orphanChildViews); - } - private static boolean isLegitimateChildView(PTable view, byte[] parentSchemaName, - byte[] parentTableOrViewName) { - return view != null && view.getParentSchemaName() != null && - view.getParentTableName() != null && - (Arrays.equals(view.getParentSchemaName().getBytes(), parentSchemaName) && - Arrays.equals(view.getParentTableName().getBytes(), parentTableOrViewName)); + if (isLegitimateChildView(view, parentSchemaName, parentTableOrViewName)) { + legitimateChildViews.add(view); + // return early since we're only interested in knowing if there is at least one + // valid child view + if (findJustOneLegitimateChildView) { + break; + } + // Note that we only explore this branch if the current view is a legitimate + // child view, else we ignore it and move on to the next potential child view + findAllDescendantViews(sysCatOrsysChildLink, sysCat, serverSideConfig, + viewInfo.getTenantId(), viewInfo.getSchemaName(), viewInfo.getTableName(), + clientTimeStamp, legitimateChildViews, orphanChildViews, findJustOneLegitimateChildView, + scanSysCatForTTLDefinedOnAnyChildPair); + } else { + logger.error("Found an orphan parent->child link keyed by this parent." + + " Parent Tenant Id: '" + Bytes.toString(parentTenantId) + "'. Parent Schema Name: '" + + Bytes.toString(parentSchemaName) + "'. Parent Table/View Name: '" + + Bytes.toString(parentTableOrViewName) + + "'. There currently exists a legitimate view of the same name which" + + " is not a descendant of this table/view. View Info: '" + viewInfo + + "'. Ignoring this view and not counting it as a child view."); + // Prune unrelated view branches left around due to orphan parent->child links + } + } } - - /** - * Returns relatives in a breadth-first fashion. Note that this is not resilient to orphan - * linking rows and we also do not try to resolve any of the views to ensure they are valid. - * Use {@link ViewUtil#findAllDescendantViews(Table, Configuration, byte[], byte[], byte[], - * long, boolean)} if you are only interested in {@link LinkType#CHILD_TABLE} and need to be - * resilient to orphan linking rows. - * - * @param sysCatOrsysChildLink Table corresponding to either SYSTEM.CATALOG or SYSTEM.CHILD_LINK - * @param tenantId tenantId of the key (null if it is a table or global view) - * @param schema schema name to use in the key - * @param table table/view name to use in the key - * @param linkType link type - * @param result containing all linked entities - * - * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG - */ - public static void findAllRelatives(Table sysCatOrsysChildLink, byte[] tenantId, byte[] schema, - byte[] table, PTable.LinkType linkType, TableViewFinderResult result) - throws IOException { - findAllRelatives(sysCatOrsysChildLink, tenantId, schema, table, linkType, - HConstants.LATEST_TIMESTAMP, result); + return new Pair<>(legitimateChildViews, orphanChildViews); + } + + private static boolean isLegitimateChildView(PTable view, byte[] parentSchemaName, + byte[] parentTableOrViewName) { + return view != null && view.getParentSchemaName() != null && view.getParentTableName() != null + && (Arrays.equals(view.getParentSchemaName().getBytes(), parentSchemaName) + && Arrays.equals(view.getParentTableName().getBytes(), parentTableOrViewName)); + } + + /** + * Returns relatives in a breadth-first fashion. Note that this is not resilient to orphan linking + * rows and we also do not try to resolve any of the views to ensure they are valid. Use + * {@link ViewUtil#findAllDescendantViews(Table, Configuration, byte[], byte[], byte[], long, boolean)} + * if you are only interested in {@link LinkType#CHILD_TABLE} and need to be resilient to orphan + * linking rows. + * @param sysCatOrsysChildLink Table corresponding to either SYSTEM.CATALOG or SYSTEM.CHILD_LINK + * @param tenantId tenantId of the key (null if it is a table or global view) + * @param schema schema name to use in the key + * @param table table/view name to use in the key + * @param linkType link type + * @param result containing all linked entities + * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG + */ + public static void findAllRelatives(Table sysCatOrsysChildLink, byte[] tenantId, byte[] schema, + byte[] table, PTable.LinkType linkType, TableViewFinderResult result) throws IOException { + findAllRelatives(sysCatOrsysChildLink, tenantId, schema, table, linkType, + HConstants.LATEST_TIMESTAMP, result); + } + + private static void findAllRelatives(Table sysCatOrsysChildLink, byte[] tenantId, byte[] schema, + byte[] table, PTable.LinkType linkType, long timestamp, TableViewFinderResult result) + throws IOException { + TableViewFinderResult currentResult = + findImmediateRelatedViews(sysCatOrsysChildLink, tenantId, schema, table, linkType, timestamp); + result.addResult(currentResult); + for (TableInfo viewInfo : currentResult.getLinks()) { + findAllRelatives(sysCatOrsysChildLink, viewInfo.getTenantId(), viewInfo.getSchemaName(), + viewInfo.getTableName(), linkType, timestamp, result); } - - private static void findAllRelatives(Table sysCatOrsysChildLink, byte[] tenantId, byte[] schema, - byte[] table, PTable.LinkType linkType, long timestamp, TableViewFinderResult result) - throws IOException { - TableViewFinderResult currentResult = findImmediateRelatedViews(sysCatOrsysChildLink, - tenantId, schema, table, linkType, timestamp); - result.addResult(currentResult); - for (TableInfo viewInfo : currentResult.getLinks()) { - findAllRelatives(sysCatOrsysChildLink, viewInfo.getTenantId(), viewInfo.getSchemaName(), - viewInfo.getTableName(), linkType, timestamp, result); - } + } + + static TableViewFinderResult findImmediateRelatedViews(Table sysCatOrsysChildLink, + byte[] tenantId, byte[] schema, byte[] table, PTable.LinkType linkType, long timestamp) + throws IOException { + + return findImmediateRelatedViews(sysCatOrsysChildLink, null, tenantId, schema, table, linkType, + timestamp, new Pair<>(false, false)); + } + + /** + * Runs a scan on SYSTEM.CATALOG or SYSTEM.CHILD_LINK to get the immediate related tables/views. + * @param sysCatOrsysChildLink Table corresponding to either SYSTEM.CATALOG or + * SYSTEM.CHILD_LINK + * @param sysCat Table corresponding to SYSTEM.CATALOG especially + * for checking if TTL is defined at immediate + * related view. This can be null then we are not + * scanning it for checking if TTL is defined or not. + * @param tenantId tenantId of the key (null if it is a table or + * global view) + * @param schema schema name to use in the key + * @param table table/view name to use in the key + * @param linkType link type + * @param timestamp client timestamp + * @param scanSysCatForTTLDefinedOnAnyChildPair Boolean pair, where first element is used to + * determine if we have to scan the sysCat or not for + * checking if TTL is defined. Second element is used + * to store the result if we found atleast one + * children in hierarchy where TTL is defined or not. + * @return TableViewFinderResult of the scan to get immediate related table/views. + * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG + */ + private static TableViewFinderResult findImmediateRelatedViews(Table sysCatOrsysChildLink, + @Nullable Table sysCat, byte[] tenantId, byte[] schema, byte[] table, PTable.LinkType linkType, + long timestamp, Pair scanSysCatForTTLDefinedOnAnyChildPair) + throws IOException { + if (linkType == PTable.LinkType.INDEX_TABLE || linkType == PTable.LinkType.EXCLUDED_COLUMN) { + throw new IllegalArgumentException("findAllRelatives does not support link type " + linkType); } - - static TableViewFinderResult findImmediateRelatedViews(Table sysCatOrsysChildLink, - byte[] tenantId, byte[] schema, byte[] table, PTable.LinkType linkType, long timestamp) - throws IOException { - - return findImmediateRelatedViews(sysCatOrsysChildLink, null, tenantId, schema, table, - linkType, timestamp, new Pair<>(false, false)); + if (sysCat == null) { + // Means no scan is need on SYSCAT for TTL Values of each child view. + scanSysCatForTTLDefinedOnAnyChildPair.setFirst(false); } - /** - * Runs a scan on SYSTEM.CATALOG or SYSTEM.CHILD_LINK to get the immediate related tables/views. - * @param sysCatOrsysChildLink Table corresponding to either SYSTEM.CATALOG or SYSTEM.CHILD_LINK - * @param sysCat Table corresponding to SYSTEM.CATALOG especially for checking if TTL is defined - * at immediate related view. This can be null then we are not scanning it for - * checking if TTL is defined or not. - * @param tenantId tenantId of the key (null if it is a table or global view) - * @param schema schema name to use in the key - * @param table table/view name to use in the key - * @param linkType link type - * @param timestamp client timestamp - * @param scanSysCatForTTLDefinedOnAnyChildPair Boolean pair, where first element is used to - * determine if we have to scan the - * sysCat or not for checking if TTL is defined. - * Second element is used to store the result if - * we found atleast one children in hierarchy where - * TTL is defined or not. - * @return TableViewFinderResult of the scan to get immediate related table/views. - * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG - */ - private static TableViewFinderResult findImmediateRelatedViews(Table sysCatOrsysChildLink, - @Nullable Table sysCat, byte[] tenantId, byte[] schema, byte[] table, - PTable.LinkType linkType, long timestamp, - Pair scanSysCatForTTLDefinedOnAnyChildPair) - throws IOException { - if (linkType==PTable.LinkType.INDEX_TABLE || linkType==PTable.LinkType.EXCLUDED_COLUMN) { - throw new IllegalArgumentException("findAllRelatives does not support link type " - + linkType); - } - if (sysCat == null) { - //Means no scan is need on SYSCAT for TTL Values of each child view. - scanSysCatForTTLDefinedOnAnyChildPair.setFirst(false); + byte[] key = SchemaUtil.getTableKey(tenantId, schema, table); + Scan scan = MetaDataUtil.newTableRowsScan(key, MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); + SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, + LINK_TYPE_BYTES, CompareOperator.EQUAL, linkType.getSerializedValueAsByteArray()); + linkFilter.setFilterIfMissing(true); + scan.setFilter(linkFilter); + scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); + if (linkType == PTable.LinkType.PARENT_TABLE) + scan.addColumn(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES); + if (linkType == PTable.LinkType.PHYSICAL_TABLE) + scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES); + List tableInfoList = Lists.newArrayList(); + try (ResultScanner scanner = sysCatOrsysChildLink.getScanner(scan)) { + for (Result result = scanner.next(); (result != null); result = scanner.next()) { + byte[][] rowKeyMetaData = new byte[5][]; + byte[] viewTenantId = null; + getVarChars(result.getRow(), 5, rowKeyMetaData); + if (linkType == PTable.LinkType.PARENT_TABLE) { + viewTenantId = result.getValue(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES); + } else if (linkType == PTable.LinkType.CHILD_TABLE) { + viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]; + } else if (linkType == PTable.LinkType.VIEW_INDEX_PARENT_TABLE) { + viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + } else if ( + linkType == PTable.LinkType.PHYSICAL_TABLE + && result.getValue(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES) != null + ) { + // do not links from indexes to their physical table + continue; } - - byte[] key = SchemaUtil.getTableKey(tenantId, schema, table); - Scan scan = MetaDataUtil.newTableRowsScan(key, MetaDataProtocol.MIN_TABLE_TIMESTAMP, - timestamp); - SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, - LINK_TYPE_BYTES, CompareOperator.EQUAL, - linkType.getSerializedValueAsByteArray()); - linkFilter.setFilterIfMissing(true); - scan.setFilter(linkFilter); - scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); - if (linkType==PTable.LinkType.PARENT_TABLE) - scan.addColumn(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES); - if (linkType==PTable.LinkType.PHYSICAL_TABLE) - scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES); - List tableInfoList = Lists.newArrayList(); - try (ResultScanner scanner = sysCatOrsysChildLink.getScanner(scan)) { - for (Result result = scanner.next(); (result != null); result = scanner.next()) { - byte[][] rowKeyMetaData = new byte[5][]; - byte[] viewTenantId = null; - getVarChars(result.getRow(), 5, rowKeyMetaData); - if (linkType==PTable.LinkType.PARENT_TABLE) { - viewTenantId = result.getValue(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES); - } else if (linkType==PTable.LinkType.CHILD_TABLE) { - viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]; - } else if (linkType==PTable.LinkType.VIEW_INDEX_PARENT_TABLE) { - viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - } - else if (linkType==PTable.LinkType.PHYSICAL_TABLE && - result.getValue(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES)!=null) { - // do not links from indexes to their physical table - continue; - } - byte[] viewSchemaName = SchemaUtil.getSchemaNameFromFullName( - rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]) - .getBytes(StandardCharsets.UTF_8); - byte[] viewName = SchemaUtil.getTableNameFromFullName( - rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]) - .getBytes(StandardCharsets.UTF_8); - tableInfoList.add(new TableInfo(viewTenantId, viewSchemaName, viewName)); - if (scanSysCatForTTLDefinedOnAnyChildPair.getFirst()) { - byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName); - Scan ttlScan = MetaDataUtil.newTableRowsScan(viewKey, - MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); - Result ttlResult = sysCat.getScanner(ttlScan).next(); - if (ttlResult != null) { - if (ttlResult.getValue(TABLE_FAMILY_BYTES, TTL_BYTES) != null) { - scanSysCatForTTLDefinedOnAnyChildPair.setSecond(true); - scanSysCatForTTLDefinedOnAnyChildPair.setFirst(false); - } - } - } - } - return new TableViewFinderResult(tableInfoList); - } - } - - public static TableViewFinderResult findChildViews(PhoenixConnection connection, String tenantId, String schema, - String tableName) throws IOException, SQLException { - // Find child views - TableViewFinderResult childViewsResult = new TableViewFinderResult(); - ReadOnlyProps readOnlyProps = connection.getQueryServices().getProps(); - for (int i=0; i<2; i++) { - try (Table sysCatOrSysChildLinkTable = connection.getQueryServices() - .getTable(SchemaUtil.getPhysicalName( - i==0 ? SYSTEM_CHILD_LINK_NAME_BYTES : SYSTEM_CATALOG_TABLE_BYTES, - readOnlyProps).getName())) { - byte[] tenantIdBytes = tenantId != null ? tenantId.getBytes() : null; - ViewUtil.findAllRelatives(sysCatOrSysChildLinkTable, tenantIdBytes, - schema == null?null:schema.getBytes(), - tableName.getBytes(), PTable.LinkType.CHILD_TABLE, childViewsResult); - break; - } catch (TableNotFoundException ex) { - // try again with SYSTEM.CATALOG in case the schema is old - if (i == 1) { - // This means even SYSTEM.CATALOG was not found, so this is bad, rethrow - throw ex; - } + byte[] viewSchemaName = SchemaUtil + .getSchemaNameFromFullName(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]) + .getBytes(StandardCharsets.UTF_8); + byte[] viewName = SchemaUtil + .getTableNameFromFullName(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]) + .getBytes(StandardCharsets.UTF_8); + tableInfoList.add(new TableInfo(viewTenantId, viewSchemaName, viewName)); + if (scanSysCatForTTLDefinedOnAnyChildPair.getFirst()) { + byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName); + Scan ttlScan = + MetaDataUtil.newTableRowsScan(viewKey, MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); + Result ttlResult = sysCat.getScanner(ttlScan).next(); + if (ttlResult != null) { + if (ttlResult.getValue(TABLE_FAMILY_BYTES, TTL_BYTES) != null) { + scanSysCatForTTLDefinedOnAnyChildPair.setSecond(true); + scanSysCatForTTLDefinedOnAnyChildPair.setFirst(false); } + } } - return childViewsResult; + } + return new TableViewFinderResult(tableInfoList); } - - /** - * Check metadata to find if a given table/view has any immediate child views. Note that this - * is not resilient to orphan {@code parent->child } links. - * @param sysCatOrsysChildLink For older (pre-4.15.0) clients, we look for child links inside - * SYSTEM.CATALOG, otherwise we look for them inside - * SYSTEM.CHILD_LINK - * @param tenantId tenantId - * @param schemaName table schema name - * @param tableName table name - * @param timestamp passed client-side timestamp - * @return true if the given table has at least one child view - * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG - */ - public static boolean hasChildViews(Table sysCatOrsysChildLink, byte[] tenantId, - byte[] schemaName, byte[] tableName, long timestamp) throws IOException { - byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); - Scan scan = MetaDataUtil.newTableRowsScan(key, MetaDataProtocol.MIN_TABLE_TIMESTAMP, - timestamp); - SingleColumnValueFilter linkFilter = - new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, - CompareOperator.EQUAL, - LinkType.CHILD_TABLE.getSerializedValueAsByteArray()) { - // if we found a row with the CHILD_TABLE link type we are done and can - // terminate the scan - @Override - public boolean filterAllRemaining() { - return matchedColumn; - } - }; - linkFilter.setFilterIfMissing(true); - scan.setFilter(linkFilter); - scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); - try (ResultScanner scanner = sysCatOrsysChildLink.getScanner(scan)) { - Result result = scanner.next(); - return result!=null; + } + + public static TableViewFinderResult findChildViews(PhoenixConnection connection, String tenantId, + String schema, String tableName) throws IOException, SQLException { + // Find child views + TableViewFinderResult childViewsResult = new TableViewFinderResult(); + ReadOnlyProps readOnlyProps = connection.getQueryServices().getProps(); + for (int i = 0; i < 2; i++) { + try (Table sysCatOrSysChildLinkTable = connection.getQueryServices() + .getTable(SchemaUtil + .getPhysicalName(i == 0 ? SYSTEM_CHILD_LINK_NAME_BYTES : SYSTEM_CATALOG_TABLE_BYTES, + readOnlyProps) + .getName())) { + byte[] tenantIdBytes = tenantId != null ? tenantId.getBytes() : null; + ViewUtil.findAllRelatives(sysCatOrSysChildLinkTable, tenantIdBytes, + schema == null ? null : schema.getBytes(), tableName.getBytes(), + PTable.LinkType.CHILD_TABLE, childViewsResult); + break; + } catch (TableNotFoundException ex) { + // try again with SYSTEM.CATALOG in case the schema is old + if (i == 1) { + // This means even SYSTEM.CATALOG was not found, so this is bad, rethrow + throw ex; } + } } - - /** - * Determines whether we should use SYSTEM.CATALOG or SYSTEM.CHILD_LINK to find - * {@code parent->child } links i.e. {@link LinkType#CHILD_TABLE}. - * If the client is older than 4.15.0 and the SYSTEM.CHILD_LINK table does not exist, we use - * the SYSTEM.CATALOG table. In all other cases, we use the SYSTEM.CHILD_LINK table. - * This is required for backwards compatibility. - * @param clientVersion client version - * @param conf server-side configuration - * @return name of the system table to be used - * @throws SQLException thrown if there is an error connecting to the server - */ - public static TableName getSystemTableForChildLinks(int clientVersion, - Configuration conf) throws SQLException, IOException { - byte[] fullTableName = SYSTEM_CHILD_LINK_NAME_BYTES; - if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG) { - - try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(conf). - unwrap(PhoenixConnection.class)) { - connection.getTableNoCache(SYSTEM_CHILD_LINK_NAME); - } catch (TableNotFoundException e) { - // If this is an old client and the CHILD_LINK table doesn't exist i.e. metadata - // hasn't been updated since there was never a connection from a 4.15 client - fullTableName = SYSTEM_CATALOG_NAME_BYTES; - } catch (SQLException e) { - logger.error("Error getting a connection on the server : " + e); - throw e; - } + return childViewsResult; + } + + /** + * Check metadata to find if a given table/view has any immediate child views. Note that this is + * not resilient to orphan {@code parent->child } links. + * @param sysCatOrsysChildLink For older (pre-4.15.0) clients, we look for child links inside + * SYSTEM.CATALOG, otherwise we look for them inside SYSTEM.CHILD_LINK + * @param tenantId tenantId + * @param schemaName table schema name + * @param tableName table name + * @param timestamp passed client-side timestamp + * @return true if the given table has at least one child view + * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG + */ + public static boolean hasChildViews(Table sysCatOrsysChildLink, byte[] tenantId, + byte[] schemaName, byte[] tableName, long timestamp) throws IOException { + byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); + Scan scan = MetaDataUtil.newTableRowsScan(key, MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); + SingleColumnValueFilter linkFilter = + new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOperator.EQUAL, + LinkType.CHILD_TABLE.getSerializedValueAsByteArray()) { + // if we found a row with the CHILD_TABLE link type we are done and can + // terminate the scan + @Override + public boolean filterAllRemaining() { + return matchedColumn; } - return SchemaUtil.getPhysicalTableName(fullTableName, conf); + }; + linkFilter.setFilterIfMissing(true); + scan.setFilter(linkFilter); + scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); + try (ResultScanner scanner = sysCatOrsysChildLink.getScanner(scan)) { + Result result = scanner.next(); + return result != null; } - public static boolean isDivergedView(PTable view) { - return view.getBaseColumnCount() == QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT; - } - - public static boolean isViewDiverging(PColumn columnToDelete, PTable view, - long clientVersion) { - // If we are dropping a column from a pre-4.15 client, the only way to know if the - // view is diverging is by comparing the base column count - return !isDivergedView(view) && (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG ? - columnToDelete.getPosition() < view.getBaseColumnCount() : - columnToDelete.isDerived()); + } + + /** + * Determines whether we should use SYSTEM.CATALOG or SYSTEM.CHILD_LINK to find + * {@code parent->child } links i.e. {@link LinkType#CHILD_TABLE}. If the client is older than + * 4.15.0 and the SYSTEM.CHILD_LINK table does not exist, we use the SYSTEM.CATALOG table. In all + * other cases, we use the SYSTEM.CHILD_LINK table. This is required for backwards compatibility. + * @param clientVersion client version + * @param conf server-side configuration + * @return name of the system table to be used + * @throws SQLException thrown if there is an error connecting to the server + */ + public static TableName getSystemTableForChildLinks(int clientVersion, Configuration conf) + throws SQLException, IOException { + byte[] fullTableName = SYSTEM_CHILD_LINK_NAME_BYTES; + if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG) { + + try (PhoenixConnection connection = + QueryUtil.getConnectionOnServer(conf).unwrap(PhoenixConnection.class)) { + connection.getTableNoCache(SYSTEM_CHILD_LINK_NAME); + } catch (TableNotFoundException e) { + // If this is an old client and the CHILD_LINK table doesn't exist i.e. metadata + // hasn't been updated since there was never a connection from a 4.15 client + fullTableName = SYSTEM_CATALOG_NAME_BYTES; + } catch (SQLException e) { + logger.error("Error getting a connection on the server : " + e); + throw e; + } } - - /** - * Adds indexes of the parent table to inheritedIndexes if the index contains all required - * columns - */ - public static void addIndexesFromParent(PhoenixConnection connection, PTable view, - PTable parentTable, List inheritedIndexes) throws SQLException { - List parentTableIndexes = parentTable.getIndexes(); - for (PTable index : parentTableIndexes) { - boolean containsAllReqdCols = true; - // Ensure that all columns required to create index exist in the view too, - // since view columns may be removed. - IndexMaintainer indexMaintainer = index.getIndexMaintainer(parentTable, connection); - // Check that the columns required for the index pk are present in the view - Set> indexedColInfos = indexMaintainer.getIndexedColumnInfo(); - for (Pair colInfo : indexedColInfos) { - try { - String colFamily = colInfo.getFirst(); - String colName = colInfo.getSecond(); - if (colFamily == null) { - view.getColumnForColumnName(colName); - } else { - view.getColumnFamily(colFamily).getPColumnForColumnName(colName); - } - } catch (ColumnNotFoundException e) { - containsAllReqdCols = false; - break; - } - } - - // Ensure that constant columns (i.e. columns matched in the view WHERE clause) - // all exist in the index on the parent table. - for (PColumn col : view.getColumns()) { - if (col.isViewReferenced() || col.getViewConstant() != null) { - try { - // It'd be possible to use a local index that doesn't have all view - // constants, but the WHERE clause for the view statement (which is added to - // the index below) would fail to compile. - String indexColumnName = IndexUtil.getIndexColumnName(col); - index.getColumnForColumnName(indexColumnName); - } catch (ColumnNotFoundException e1) { - PColumn indexCol = null; - try { - String cf = col.getFamilyName()!=null ? - col.getFamilyName().getString() : null; - String colName = col.getName().getString(); - if (cf != null) { - indexCol = parentTable.getColumnFamily(cf) - .getPColumnForColumnName(colName); - } - else { - indexCol = parentTable.getColumnForColumnName(colName); - } - } catch (ColumnNotFoundException e2) { - // Ignore this index and continue with others - containsAllReqdCols = false; - break; - } - if (indexCol.getViewConstant()==null || Bytes.compareTo( - indexCol.getViewConstant(), col.getViewConstant())!=0) { - containsAllReqdCols = false; - break; - } - } - } - } - if (containsAllReqdCols) { - // Tack on view statement to index to get proper filtering for view - String viewStatement = IndexUtil.rewriteViewStatement(connection, index, - parentTable, view.getViewStatement()); - PName modifiedIndexName = PNameFactory.newName(view.getName().getString() - + QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR + - index.getName().getString()); - // add the index table with a new name so that it does not conflict with the - // existing index table and set update cache frequency to that of the view - if (Objects.equal(viewStatement, index.getViewStatement())) { - inheritedIndexes.add(index); - } else { - inheritedIndexes.add(PTableImpl.builderWithColumns(index, - getColumnsToClone(index)) - .setTableName(modifiedIndexName) - .setViewStatement(viewStatement) - .setUpdateCacheFrequency(view.getUpdateCacheFrequency()) - //retain the tenantId from the index being inherited - .setTenantId(index.getTenantId()) - .setPhysicalNames(Collections.singletonList(index.getPhysicalName())) - .build()); - } - } + return SchemaUtil.getPhysicalTableName(fullTableName, conf); + } + + public static boolean isDivergedView(PTable view) { + return view.getBaseColumnCount() == QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT; + } + + public static boolean isViewDiverging(PColumn columnToDelete, PTable view, long clientVersion) { + // If we are dropping a column from a pre-4.15 client, the only way to know if the + // view is diverging is by comparing the base column count + return !isDivergedView(view) && (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG + ? columnToDelete.getPosition() < view.getBaseColumnCount() + : columnToDelete.isDerived()); + } + + /** + * Adds indexes of the parent table to inheritedIndexes if the index contains all required columns + */ + public static void addIndexesFromParent(PhoenixConnection connection, PTable view, + PTable parentTable, List inheritedIndexes) throws SQLException { + List parentTableIndexes = parentTable.getIndexes(); + for (PTable index : parentTableIndexes) { + boolean containsAllReqdCols = true; + // Ensure that all columns required to create index exist in the view too, + // since view columns may be removed. + IndexMaintainer indexMaintainer = index.getIndexMaintainer(parentTable, connection); + // Check that the columns required for the index pk are present in the view + Set> indexedColInfos = indexMaintainer.getIndexedColumnInfo(); + for (Pair colInfo : indexedColInfos) { + try { + String colFamily = colInfo.getFirst(); + String colName = colInfo.getSecond(); + if (colFamily == null) { + view.getColumnForColumnName(colName); + } else { + view.getColumnFamily(colFamily).getPColumnForColumnName(colName); + } + } catch (ColumnNotFoundException e) { + containsAllReqdCols = false; + break; } - } - - public static PTable addDerivedColumnsAndIndexesFromAncestors(PhoenixConnection connection, - PTable table) throws SQLException { - List ancestorList = Lists.newArrayList(table); - //First generate a list of tables from child to base table. First element will be the - //ultimate descendant, last element will be the base table. - PName parentName = table.getParentName(); - while (parentName != null && parentName.getString().length() > 0) { - PTable currentTable = ancestorList.get(ancestorList.size() -1); - String parentTableName = SchemaUtil.getTableName(currentTable.getParentSchemaName(), - currentTable.getParentTableName()).getString(); - PTable parentTable; + } + + // Ensure that constant columns (i.e. columns matched in the view WHERE clause) + // all exist in the index on the parent table. + for (PColumn col : view.getColumns()) { + if (col.isViewReferenced() || col.getViewConstant() != null) { + try { + // It'd be possible to use a local index that doesn't have all view + // constants, but the WHERE clause for the view statement (which is added to + // the index below) would fail to compile. + String indexColumnName = IndexUtil.getIndexColumnName(col); + index.getColumnForColumnName(indexColumnName); + } catch (ColumnNotFoundException e1) { + PColumn indexCol = null; try { - parentTable = connection.getTable(parentTableName); - } catch (TableNotFoundException tnfe) { - //check to see if there's a tenant-owned parent - parentTable = connection.getTable(table.getTenantId().getString(), parentTableName); + String cf = col.getFamilyName() != null ? col.getFamilyName().getString() : null; + String colName = col.getName().getString(); + if (cf != null) { + indexCol = parentTable.getColumnFamily(cf).getPColumnForColumnName(colName); + } else { + indexCol = parentTable.getColumnForColumnName(colName); + } + } catch (ColumnNotFoundException e2) { + // Ignore this index and continue with others + containsAllReqdCols = false; + break; } - ancestorList.add(parentTable); - parentName = parentTable.getParentName(); - } - //now add the columns from all ancestors up from the base table to the top-most view - if (ancestorList.size() > 1) { - for (int k = ancestorList.size() -2; k >= 0; k--) { - ancestorList.set(k, addDerivedColumnsAndIndexesFromParent(connection, - ancestorList.get(k), ancestorList.get(k +1))); + if ( + indexCol.getViewConstant() == null + || Bytes.compareTo(indexCol.getViewConstant(), col.getViewConstant()) != 0 + ) { + containsAllReqdCols = false; + break; } - return ancestorList.get(0); + } + } + } + if (containsAllReqdCols) { + // Tack on view statement to index to get proper filtering for view + String viewStatement = + IndexUtil.rewriteViewStatement(connection, index, parentTable, view.getViewStatement()); + PName modifiedIndexName = PNameFactory.newName(view.getName().getString() + + QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR + index.getName().getString()); + // add the index table with a new name so that it does not conflict with the + // existing index table and set update cache frequency to that of the view + if (Objects.equal(viewStatement, index.getViewStatement())) { + inheritedIndexes.add(index); } else { - return table; + inheritedIndexes.add(PTableImpl.builderWithColumns(index, getColumnsToClone(index)) + .setTableName(modifiedIndexName).setViewStatement(viewStatement) + .setUpdateCacheFrequency(view.getUpdateCacheFrequency()) + // retain the tenantId from the index being inherited + .setTenantId(index.getTenantId()) + .setPhysicalNames(Collections.singletonList(index.getPhysicalName())).build()); } + } } - /** - * Inherit all indexes and columns from the parent - * @return table with inherited columns and indexes - */ - public static PTable addDerivedColumnsAndIndexesFromParent(PhoenixConnection connection, - PTable table, PTable parentTable) throws SQLException { - PTable pTable = addDerivedColumnsFromParent(connection, table, parentTable); - boolean hasIndexId = table.getViewIndexId() != null; - // For views : - if (!hasIndexId) { - // 1. need to resolve the views's own indexes so that any columns added by ancestors - // are included - List allIndexes = Lists.newArrayList(); - if (pTable !=null && pTable.getIndexes() !=null && !pTable.getIndexes().isEmpty()) { - for (PTable viewIndex : pTable.getIndexes()) { - PTable resolvedViewIndex = ViewUtil.addDerivedColumnsAndIndexesFromParent( - connection, viewIndex, pTable); - if (resolvedViewIndex!=null) - allIndexes.add(resolvedViewIndex); - } - } - - // 2. include any indexes from ancestors that can be used by this view - List inheritedIndexes = Lists.newArrayList(); - addIndexesFromParent(connection, pTable, parentTable, inheritedIndexes); - allIndexes.addAll(inheritedIndexes); - if (!allIndexes.isEmpty()) { - pTable = PTableImpl.builderWithColumns(pTable, getColumnsToClone(pTable)) - .setIndexes(allIndexes).build(); - } - } - return pTable; + } + + public static PTable addDerivedColumnsAndIndexesFromAncestors(PhoenixConnection connection, + PTable table) throws SQLException { + List ancestorList = Lists.newArrayList(table); + // First generate a list of tables from child to base table. First element will be the + // ultimate descendant, last element will be the base table. + PName parentName = table.getParentName(); + while (parentName != null && parentName.getString().length() > 0) { + PTable currentTable = ancestorList.get(ancestorList.size() - 1); + String parentTableName = SchemaUtil + .getTableName(currentTable.getParentSchemaName(), currentTable.getParentTableName()) + .getString(); + PTable parentTable; + try { + parentTable = connection.getTable(parentTableName); + } catch (TableNotFoundException tnfe) { + // check to see if there's a tenant-owned parent + parentTable = connection.getTable(table.getTenantId().getString(), parentTableName); + } + ancestorList.add(parentTable); + parentName = parentTable.getParentName(); } - - /** - * Inherit all columns from the parent unless it's an excluded column. - * If the same column is present in the parent and child (for table metadata created before - * PHOENIX-3534) we choose the child column over the parent column - * @return table with inherited columns - */ - public static PTable addDerivedColumnsFromParent(PhoenixConnection connection, - PTable view, PTable parentTable) - throws SQLException { - return addDerivedColumnsFromParent(connection, view, parentTable, true); + // now add the columns from all ancestors up from the base table to the top-most view + if (ancestorList.size() > 1) { + for (int k = ancestorList.size() - 2; k >= 0; k--) { + ancestorList.set(k, addDerivedColumnsAndIndexesFromParent(connection, ancestorList.get(k), + ancestorList.get(k + 1))); + } + return ancestorList.get(0); + } else { + return table; } - /** - * Inherit all columns from the parent unless it's an excluded column. - * If the same column is present in the parent and child (for table metadata created before - * PHOENIX-3534) we choose the child column over the parent column - * @return table with inherited columns - */ - public static PTable addDerivedColumnsFromParent(PhoenixConnection connection, - PTable view, PTable parentTable, - boolean recalculateBaseColumnCount) - throws SQLException { - // combine columns for view and view indexes - boolean hasIndexId = view.getViewIndexId() != null; - boolean isSalted = view.getBucketNum() != null; - boolean isDiverged = isDivergedView(view); - boolean isDivergedViewCreatedPre4_15 = isDiverged; - List allColumns = Lists.newArrayList(); - List excludedColumns = Lists.newArrayList(); - // add my own columns first in reverse order - List myColumns = view.getColumns(); - // skip salted column as it will be created automatically - myColumns = myColumns.subList(isSalted ? 1 : 0, myColumns.size()); - for (int i = myColumns.size() - 1; i >= 0; i--) { - PColumn pColumn = myColumns.get(i); - if (pColumn.isExcluded()) { - // Diverged views created pre-4.15 will not have EXCLUDED_COLUMN linking rows - isDivergedViewCreatedPre4_15 = false; - excludedColumns.add(pColumn); - } - allColumns.add(pColumn); + } + + /** + * Inherit all indexes and columns from the parent + * @return table with inherited columns and indexes + */ + public static PTable addDerivedColumnsAndIndexesFromParent(PhoenixConnection connection, + PTable table, PTable parentTable) throws SQLException { + PTable pTable = addDerivedColumnsFromParent(connection, table, parentTable); + boolean hasIndexId = table.getViewIndexId() != null; + // For views : + if (!hasIndexId) { + // 1. need to resolve the views's own indexes so that any columns added by ancestors + // are included + List allIndexes = Lists.newArrayList(); + if (pTable != null && pTable.getIndexes() != null && !pTable.getIndexes().isEmpty()) { + for (PTable viewIndex : pTable.getIndexes()) { + PTable resolvedViewIndex = + ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, viewIndex, pTable); + if (resolvedViewIndex != null) allIndexes.add(resolvedViewIndex); } + } + + // 2. include any indexes from ancestors that can be used by this view + List inheritedIndexes = Lists.newArrayList(); + addIndexesFromParent(connection, pTable, parentTable, inheritedIndexes); + allIndexes.addAll(inheritedIndexes); + if (!allIndexes.isEmpty()) { + pTable = PTableImpl.builderWithColumns(pTable, getColumnsToClone(pTable)) + .setIndexes(allIndexes).build(); + } + } + return pTable; + } + + /** + * Inherit all columns from the parent unless it's an excluded column. If the same column is + * present in the parent and child (for table metadata created before PHOENIX-3534) we choose the + * child column over the parent column + * @return table with inherited columns + */ + public static PTable addDerivedColumnsFromParent(PhoenixConnection connection, PTable view, + PTable parentTable) throws SQLException { + return addDerivedColumnsFromParent(connection, view, parentTable, true); + } + + /** + * Inherit all columns from the parent unless it's an excluded column. If the same column is + * present in the parent and child (for table metadata created before PHOENIX-3534) we choose the + * child column over the parent column + * @return table with inherited columns + */ + public static PTable addDerivedColumnsFromParent(PhoenixConnection connection, PTable view, + PTable parentTable, boolean recalculateBaseColumnCount) throws SQLException { + // combine columns for view and view indexes + boolean hasIndexId = view.getViewIndexId() != null; + boolean isSalted = view.getBucketNum() != null; + boolean isDiverged = isDivergedView(view); + boolean isDivergedViewCreatedPre4_15 = isDiverged; + List allColumns = Lists.newArrayList(); + List excludedColumns = Lists.newArrayList(); + // add my own columns first in reverse order + List myColumns = view.getColumns(); + // skip salted column as it will be created automatically + myColumns = myColumns.subList(isSalted ? 1 : 0, myColumns.size()); + for (int i = myColumns.size() - 1; i >= 0; i--) { + PColumn pColumn = myColumns.get(i); + if (pColumn.isExcluded()) { + // Diverged views created pre-4.15 will not have EXCLUDED_COLUMN linking rows + isDivergedViewCreatedPre4_15 = false; + excludedColumns.add(pColumn); + } + allColumns.add(pColumn); + } - // initialize map from with indexed expression to list of required data columns - // then remove the data columns that have not been dropped, so that we get the columns that - // have been dropped - Map> indexRequiredDroppedDataColMap = - Maps.newHashMapWithExpectedSize(view.getColumns().size()); - if (hasIndexId) { - int indexPosOffset = (isSalted ? 1 : 0) + (view.isMultiTenant() ? 1 : 0) + 1; - ColumnNameTrackingExpressionCompiler expressionCompiler = - new ColumnNameTrackingExpressionCompiler(); - for (int i = indexPosOffset; i < view.getPKColumns().size(); i++) { - PColumn indexColumn = view.getPKColumns().get(i); - try { - expressionCompiler.reset(); - String expressionStr = IndexUtil.getIndexColumnExpressionStr(indexColumn); - ParseNode parseNode = SQLParser.parseCondition(expressionStr); - parseNode.accept(expressionCompiler); - indexRequiredDroppedDataColMap.put(indexColumn, - Lists.newArrayList(expressionCompiler.getDataColumnNames())); - } catch (SQLException e) { - throw new RuntimeException(e); // Impossible - } - } + // initialize map from with indexed expression to list of required data columns + // then remove the data columns that have not been dropped, so that we get the columns that + // have been dropped + Map> indexRequiredDroppedDataColMap = + Maps.newHashMapWithExpectedSize(view.getColumns().size()); + if (hasIndexId) { + int indexPosOffset = (isSalted ? 1 : 0) + (view.isMultiTenant() ? 1 : 0) + 1; + ColumnNameTrackingExpressionCompiler expressionCompiler = + new ColumnNameTrackingExpressionCompiler(); + for (int i = indexPosOffset; i < view.getPKColumns().size(); i++) { + PColumn indexColumn = view.getPKColumns().get(i); + try { + expressionCompiler.reset(); + String expressionStr = IndexUtil.getIndexColumnExpressionStr(indexColumn); + ParseNode parseNode = SQLParser.parseCondition(expressionStr); + parseNode.accept(expressionCompiler); + indexRequiredDroppedDataColMap.put(indexColumn, + Lists.newArrayList(expressionCompiler.getDataColumnNames())); + } catch (SQLException e) { + throw new RuntimeException(e); // Impossible } + } + } - long maxTableTimestamp = view.getTimeStamp(); - long maxDDLTimestamp = view.getLastDDLTimestamp() != null ? view.getLastDDLTimestamp() : 0L; - int numPKCols = view.getPKColumns().size(); - // set the final table timestamp and DDL timestamp as the respective max timestamps of the - // view/view index or its ancestors - maxTableTimestamp = Math.max(maxTableTimestamp, parentTable.getTimeStamp()); - //Diverged views no longer inherit ddl timestamps from their ancestors because they don't - // inherit column changes - maxDDLTimestamp = Math.max(maxDDLTimestamp, - parentTable.getLastDDLTimestamp() != null ? parentTable.getLastDDLTimestamp() : 0L); - - if (hasIndexId) { - // add all pk columns of parent tables to indexes - // skip salted column as it will be added from the base table columns - int startIndex = parentTable.getBucketNum() != null ? 1 : 0; - for (int index=startIndex; index> entry : indexRequiredDroppedDataColMap - .entrySet()) { - entry.getValue().remove(dataColumnName); - } - } - } else if (!isDivergedViewCreatedPre4_15) { - // For diverged views created by a pre-4.15 client, we don't need to inherit columns - // from its ancestors - inheritColumnsFromParent(view, parentTable, isDiverged, excludedColumns, allColumns); + long maxTableTimestamp = view.getTimeStamp(); + long maxDDLTimestamp = view.getLastDDLTimestamp() != null ? view.getLastDDLTimestamp() : 0L; + int numPKCols = view.getPKColumns().size(); + // set the final table timestamp and DDL timestamp as the respective max timestamps of the + // view/view index or its ancestors + maxTableTimestamp = Math.max(maxTableTimestamp, parentTable.getTimeStamp()); + // Diverged views no longer inherit ddl timestamps from their ancestors because they don't + // inherit column changes + maxDDLTimestamp = Math.max(maxDDLTimestamp, + parentTable.getLastDDLTimestamp() != null ? parentTable.getLastDDLTimestamp() : 0L); + + if (hasIndexId) { + // add all pk columns of parent tables to indexes + // skip salted column as it will be added from the base table columns + int startIndex = parentTable.getBucketNum() != null ? 1 : 0; + for (int index = startIndex; index < parentTable.getPKColumns().size(); index++) { + PColumn pkColumn = parentTable.getPKColumns().get(index); + // don't add the salt column of ancestor tables for view indexes, or deleted columns + // or constant columns from the view where statement + if ( + pkColumn.equals(SaltingUtil.SALTING_COLUMN) || pkColumn.isExcluded() + || pkColumn.getViewConstant() != null + ) { + continue; } - // at this point indexRequiredDroppedDataColMap only contain the columns required by a view - // index that have dropped - for (Map.Entry> entry : indexRequiredDroppedDataColMap.entrySet()) { - if (!entry.getValue().isEmpty()) { - PColumn indexColumnToBeDropped = entry.getKey(); - if (SchemaUtil.isPKColumn(indexColumnToBeDropped)) { - // if an indexed column was dropped in an ancestor then we - // cannot use this index an more - // TODO figure out a way to actually drop this view index - return null; - } else { - allColumns.remove(indexColumnToBeDropped); - } - } + pkColumn = IndexUtil.getIndexPKColumn(++numPKCols, pkColumn); + int existingColumnIndex = allColumns.indexOf(pkColumn); + if (existingColumnIndex == -1) { + allColumns.add(0, pkColumn); } - - List columnsToAdd = Lists.newArrayList(); - int position = isSalted ? 1 : 0; - // allColumns contains the columns in the reverse order - for (int i = allColumns.size() - 1; i >= 0; i--) { - PColumn column = allColumns.get(i); - if (view.getColumns().contains(column)) { - // for views this column is not derived from an ancestor - columnsToAdd.add(new PColumnImpl(column, position++)); - } else { - columnsToAdd.add(new PColumnImpl(column, true, position++)); - } + } + for (int j = 0; j < parentTable.getColumns().size(); j++) { + PColumn tableColumn = parentTable.getColumns().get(j); + if (tableColumn.isExcluded()) { + continue; } - // we need to include the salt column when setting the base table column count in order to - // maintain b/w compatibility - int baseTableColumnCount = view.getBaseColumnCount(); - if (recalculateBaseColumnCount) { - baseTableColumnCount = isDiverged ? - QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT : - columnsToAdd.size() - myColumns.size() + (isSalted ? 1 : 0); + String dataColumnName = tableColumn.getName().getString(); + // remove from list of columns since it has not been dropped + for (Map.Entry> entry : indexRequiredDroppedDataColMap.entrySet()) { + entry.getValue().remove(dataColumnName); } - // Inherit view-modifiable properties from the parent table/view if the current view has - // not previously modified this property - long updateCacheFreq = (view.getType() != PTableType.VIEW || - view.hasViewModifiedUpdateCacheFrequency()) ? - view.getUpdateCacheFrequency() : parentTable.getUpdateCacheFrequency(); - Boolean useStatsForParallelization = (view.getType() != PTableType.VIEW || - view.hasViewModifiedUseStatsForParallelization()) ? - view.useStatsForParallelization() : parentTable.useStatsForParallelization(); - - // When creating a PTable for views or view indexes, use the baseTable PTable for attributes - // inherited from the physical base table. - // if a TableProperty is not valid on a view we set it to the base table value - // if a TableProperty is valid on a view and is not mutable on a view we set it to the base - // table value - // if a TableProperty is valid on a view and is mutable on a view, we use the value set - // on the view if the view had previously modified the property, otherwise we propagate the - // value from the base table (see PHOENIX-4763) - PTable pTable = PTableImpl.builderWithColumns(view, columnsToAdd) - .setImmutableRows(parentTable.isImmutableRows()) - .setDisableWAL(parentTable.isWALDisabled()) - .setMultiTenant(parentTable.isMultiTenant()) - .setStoreNulls(parentTable.getStoreNulls()) - .setTransactionProvider(parentTable.getTransactionProvider()) - .setAutoPartitionSeqName(parentTable.getAutoPartitionSeqName()) - .setAppendOnlySchema(parentTable.isAppendOnlySchema()) - .setBaseColumnCount(baseTableColumnCount) - .setBaseTableLogicalName(parentTable.getBaseTableLogicalName()) - .setTimeStamp(maxTableTimestamp) - .setExcludedColumns(ImmutableList.copyOf(excludedColumns)) - .setUpdateCacheFrequency(updateCacheFreq) - .setUseStatsForParallelization(useStatsForParallelization) - .setLastDDLTimestamp(maxDDLTimestamp) - .build(); - pTable = WhereConstantParser.addViewInfoToPColumnsIfNeeded(pTable); - - return pTable; + } + } else if (!isDivergedViewCreatedPre4_15) { + // For diverged views created by a pre-4.15 client, we don't need to inherit columns + // from its ancestors + inheritColumnsFromParent(view, parentTable, isDiverged, excludedColumns, allColumns); } - - /** - * Inherit all columns from the parent unless it's an excluded column. - * If the same column is present in the parent and child - * (for table metadata created before PHOENIX-3534 or when - * {@link org.apache.phoenix.query.QueryServices#ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK} is - * enabled) we choose the latest column. - * Note that we don't need to call this method for views created before 4.15 since they - * already contain all the columns from their ancestors. - * @param view PTable of the view - * @param parentTable PTable of the view's parent - * @param isDiverged true if it is a diverged view - * @param excludedColumns list of excluded columns - * @param allColumns list of all columns. Initially this contains just the columns in the view. - * We will populate inherited columns by adding them to this list - */ - static void inheritColumnsFromParent(PTable view, PTable parentTable, - boolean isDiverged, List excludedColumns, List allColumns) { - List currAncestorTableCols = PTableImpl.getColumnsToClone(parentTable); - if (currAncestorTableCols != null) { - // add the ancestor columns in reverse order so that the final column list - // (reversed outside of this method invocation) - // contains ancestor columns and then the view columns in the right order - for (int j = currAncestorTableCols.size() - 1; j >= 0; j--) { - PColumn ancestorColumn = currAncestorTableCols.get(j); - // for diverged views we always include pk columns of the base table. We - // have to include these pk columns to be able to support adding pk - // columns to the diverged view. - // We only include regular columns that were created before the view - // diverged. - if (isDiverged && ancestorColumn.getFamilyName() != null - && ancestorColumn.getTimestamp() > view.getTimeStamp()) { - // If this is a diverged view, the ancestor column is not a PK and the ancestor - // column was added after the view diverged, ignore this ancestor column. - continue; - } - // need to check if this ancestor column is in the list of excluded (dropped) - // columns of the view - int existingExcludedIndex = excludedColumns.indexOf(ancestorColumn); - if (existingExcludedIndex != -1) { - // if it is, only exclude the ancestor column if it was created before the - // column was dropped in the view in order to handle the case where - // a base table column is dropped in a view, then dropped in the - // base table and then added back to the base table - if (ancestorColumn.getTimestamp() <= excludedColumns.get(existingExcludedIndex) - .getTimestamp()) { - continue; - } - } - // A diverged view from a pre-4.15 client won't ever go in this case since - // isExcluded was introduced in 4.15. If this is a 4.15+ client, excluded columns - // will be identifiable via PColumn#isExcluded() - if (ancestorColumn.isExcluded()) { - excludedColumns.add(ancestorColumn); - } else { - int existingColumnIndex = allColumns.indexOf(ancestorColumn); - if (existingColumnIndex != -1) { - // For non-diverged views, if the same column exists in a parent and child, - // we keep the latest column. - PColumn existingColumn = allColumns.get(existingColumnIndex); - if (!isDiverged && ancestorColumn.getTimestamp() > - existingColumn.getTimestamp()) { - allColumns.remove(existingColumnIndex); - // Remove the existing column and add the ancestor - // column at the end and make sure to mark it as - // derived - allColumns.add(new PColumnImpl(ancestorColumn, true, - ancestorColumn.getPosition())); - } else { - // Since this is a column from the ancestor, - // mark it as derived - allColumns.set(existingColumnIndex, - new PColumnImpl(existingColumn, true, - existingColumn.getPosition())); - } - } else { - // Since this is a column from the ancestor, - // mark it as derived - allColumns.add(new PColumnImpl(ancestorColumn, true, - ancestorColumn.getPosition())); - } - } - } - } - // remove the excluded columns if the timestamp of the excludedColumn is newer - for (PColumn excludedColumn : excludedColumns) { - int index = allColumns.indexOf(excludedColumn); - if (index != -1) { - if (allColumns.get(index).getTimestamp() <= excludedColumn.getTimestamp()) { - allColumns.remove(excludedColumn); - } - } + // at this point indexRequiredDroppedDataColMap only contain the columns required by a view + // index that have dropped + for (Map.Entry> entry : indexRequiredDroppedDataColMap.entrySet()) { + if (!entry.getValue().isEmpty()) { + PColumn indexColumnToBeDropped = entry.getKey(); + if (SchemaUtil.isPKColumn(indexColumnToBeDropped)) { + // if an indexed column was dropped in an ancestor then we + // cannot use this index an more + // TODO figure out a way to actually drop this view index + return null; + } else { + allColumns.remove(indexColumnToBeDropped); } + } } - /** - * See PHOENIX-4763. If we are modifying any table-level properties that are mutable on a view, - * we mark these cells in SYSTEM.CATALOG with tags to indicate that this view property should - * not be kept in-sync with the base table and so we shouldn't propagate the base table's - * property value when resolving the view - * @param tableMetaData list of mutations on the view - * @param parent PTable of the parent or null - */ - public static void addTagsToPutsForViewAlteredProperties(List tableMetaData, - PTable parent, ExtendedCellBuilder extendedCellBuilder) { - byte[] parentUpdateCacheFreqBytes = null; - byte[] parentUseStatsForParallelizationBytes = null; - if (parent != null) { - parentUpdateCacheFreqBytes = new byte[PLong.INSTANCE.getByteSize()]; - PLong.INSTANCE.getCodec().encodeLong(parent.getUpdateCacheFrequency(), - parentUpdateCacheFreqBytes, 0); - if (parent.useStatsForParallelization() != null) { - parentUseStatsForParallelizationBytes = - PBoolean.INSTANCE.toBytes(parent.useStatsForParallelization()); - } + List columnsToAdd = Lists.newArrayList(); + int position = isSalted ? 1 : 0; + // allColumns contains the columns in the reverse order + for (int i = allColumns.size() - 1; i >= 0; i--) { + PColumn column = allColumns.get(i); + if (view.getColumns().contains(column)) { + // for views this column is not derived from an ancestor + columnsToAdd.add(new PColumnImpl(column, position++)); + } else { + columnsToAdd.add(new PColumnImpl(column, true, position++)); + } + } + // we need to include the salt column when setting the base table column count in order to + // maintain b/w compatibility + int baseTableColumnCount = view.getBaseColumnCount(); + if (recalculateBaseColumnCount) { + baseTableColumnCount = isDiverged + ? QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT + : columnsToAdd.size() - myColumns.size() + (isSalted ? 1 : 0); + } + // Inherit view-modifiable properties from the parent table/view if the current view has + // not previously modified this property + long updateCacheFreq = + (view.getType() != PTableType.VIEW || view.hasViewModifiedUpdateCacheFrequency()) + ? view.getUpdateCacheFrequency() + : parentTable.getUpdateCacheFrequency(); + Boolean useStatsForParallelization = + (view.getType() != PTableType.VIEW || view.hasViewModifiedUseStatsForParallelization()) + ? view.useStatsForParallelization() + : parentTable.useStatsForParallelization(); + + // When creating a PTable for views or view indexes, use the baseTable PTable for attributes + // inherited from the physical base table. + // if a TableProperty is not valid on a view we set it to the base table value + // if a TableProperty is valid on a view and is not mutable on a view we set it to the base + // table value + // if a TableProperty is valid on a view and is mutable on a view, we use the value set + // on the view if the view had previously modified the property, otherwise we propagate the + // value from the base table (see PHOENIX-4763) + PTable pTable = PTableImpl.builderWithColumns(view, columnsToAdd) + .setImmutableRows(parentTable.isImmutableRows()).setDisableWAL(parentTable.isWALDisabled()) + .setMultiTenant(parentTable.isMultiTenant()).setStoreNulls(parentTable.getStoreNulls()) + .setTransactionProvider(parentTable.getTransactionProvider()) + .setAutoPartitionSeqName(parentTable.getAutoPartitionSeqName()) + .setAppendOnlySchema(parentTable.isAppendOnlySchema()) + .setBaseColumnCount(baseTableColumnCount) + .setBaseTableLogicalName(parentTable.getBaseTableLogicalName()) + .setTimeStamp(maxTableTimestamp).setExcludedColumns(ImmutableList.copyOf(excludedColumns)) + .setUpdateCacheFrequency(updateCacheFreq) + .setUseStatsForParallelization(useStatsForParallelization) + .setLastDDLTimestamp(maxDDLTimestamp).build(); + pTable = WhereConstantParser.addViewInfoToPColumnsIfNeeded(pTable); + + return pTable; + } + + /** + * Inherit all columns from the parent unless it's an excluded column. If the same column is + * present in the parent and child (for table metadata created before PHOENIX-3534 or when + * {@link org.apache.phoenix.query.QueryServices#ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK} is + * enabled) we choose the latest column. Note that we don't need to call this method for views + * created before 4.15 since they already contain all the columns from their ancestors. + * @param view PTable of the view + * @param parentTable PTable of the view's parent + * @param isDiverged true if it is a diverged view + * @param excludedColumns list of excluded columns + * @param allColumns list of all columns. Initially this contains just the columns in the + * view. We will populate inherited columns by adding them to this list + */ + static void inheritColumnsFromParent(PTable view, PTable parentTable, boolean isDiverged, + List excludedColumns, List allColumns) { + List currAncestorTableCols = PTableImpl.getColumnsToClone(parentTable); + if (currAncestorTableCols != null) { + // add the ancestor columns in reverse order so that the final column list + // (reversed outside of this method invocation) + // contains ancestor columns and then the view columns in the right order + for (int j = currAncestorTableCols.size() - 1; j >= 0; j--) { + PColumn ancestorColumn = currAncestorTableCols.get(j); + // for diverged views we always include pk columns of the base table. We + // have to include these pk columns to be able to support adding pk + // columns to the diverged view. + // We only include regular columns that were created before the view + // diverged. + if ( + isDiverged && ancestorColumn.getFamilyName() != null + && ancestorColumn.getTimestamp() > view.getTimeStamp() + ) { + // If this is a diverged view, the ancestor column is not a PK and the ancestor + // column was added after the view diverged, ignore this ancestor column. + continue; + } + // need to check if this ancestor column is in the list of excluded (dropped) + // columns of the view + int existingExcludedIndex = excludedColumns.indexOf(ancestorColumn); + if (existingExcludedIndex != -1) { + // if it is, only exclude the ancestor column if it was created before the + // column was dropped in the view in order to handle the case where + // a base table column is dropped in a view, then dropped in the + // base table and then added back to the base table + if ( + ancestorColumn.getTimestamp() + <= excludedColumns.get(existingExcludedIndex).getTimestamp() + ) { + continue; + } } - for (Mutation m: tableMetaData) { - if (m instanceof Put) { - MetaDataUtil.conditionallyAddTagsToPutCells((Put)m, - PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY_BYTES, - extendedCellBuilder, - parentUpdateCacheFreqBytes, - MetaDataEndpointImplConstants.VIEW_MODIFIED_PROPERTY_BYTES); - MetaDataUtil.conditionallyAddTagsToPutCells((Put)m, - PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION_BYTES, - extendedCellBuilder, - parentUseStatsForParallelizationBytes, - MetaDataEndpointImplConstants.VIEW_MODIFIED_PROPERTY_BYTES); - //MetaDataUtil.conditionallyAddTagsToPutCells((Put)m, - // PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - // PhoenixDatabaseMetaData.PHOENIX_TTL_BYTES, - // extendedCellBuilder, - // parentPhoenixTTLBytes, - // MetaDataEndpointImpl.VIEW_MODIFIED_PROPERTY_BYTES); + // A diverged view from a pre-4.15 client won't ever go in this case since + // isExcluded was introduced in 4.15. If this is a 4.15+ client, excluded columns + // will be identifiable via PColumn#isExcluded() + if (ancestorColumn.isExcluded()) { + excludedColumns.add(ancestorColumn); + } else { + int existingColumnIndex = allColumns.indexOf(ancestorColumn); + if (existingColumnIndex != -1) { + // For non-diverged views, if the same column exists in a parent and child, + // we keep the latest column. + PColumn existingColumn = allColumns.get(existingColumnIndex); + if (!isDiverged && ancestorColumn.getTimestamp() > existingColumn.getTimestamp()) { + allColumns.remove(existingColumnIndex); + // Remove the existing column and add the ancestor + // column at the end and make sure to mark it as + // derived + allColumns.add(new PColumnImpl(ancestorColumn, true, ancestorColumn.getPosition())); + } else { + // Since this is a column from the ancestor, + // mark it as derived + allColumns.set(existingColumnIndex, + new PColumnImpl(existingColumn, true, existingColumn.getPosition())); } + } else { + // Since this is a column from the ancestor, + // mark it as derived + allColumns.add(new PColumnImpl(ancestorColumn, true, ancestorColumn.getPosition())); + } } + } + } + // remove the excluded columns if the timestamp of the excludedColumn is newer + for (PColumn excludedColumn : excludedColumns) { + int index = allColumns.indexOf(excludedColumn); + if (index != -1) { + if (allColumns.get(index).getTimestamp() <= excludedColumn.getTimestamp()) { + allColumns.remove(excludedColumn); + } + } + } + } + + /** + * See PHOENIX-4763. If we are modifying any table-level properties that are mutable on a view, we + * mark these cells in SYSTEM.CATALOG with tags to indicate that this view property should not be + * kept in-sync with the base table and so we shouldn't propagate the base table's property value + * when resolving the view + * @param tableMetaData list of mutations on the view + * @param parent PTable of the parent or null + */ + public static void addTagsToPutsForViewAlteredProperties(List tableMetaData, + PTable parent, ExtendedCellBuilder extendedCellBuilder) { + byte[] parentUpdateCacheFreqBytes = null; + byte[] parentUseStatsForParallelizationBytes = null; + if (parent != null) { + parentUpdateCacheFreqBytes = new byte[PLong.INSTANCE.getByteSize()]; + PLong.INSTANCE.getCodec().encodeLong(parent.getUpdateCacheFrequency(), + parentUpdateCacheFreqBytes, 0); + if (parent.useStatsForParallelization() != null) { + parentUseStatsForParallelizationBytes = + PBoolean.INSTANCE.toBytes(parent.useStatsForParallelization()); + } + } + for (Mutation m : tableMetaData) { + if (m instanceof Put) { + MetaDataUtil.conditionallyAddTagsToPutCells((Put) m, + PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY_BYTES, extendedCellBuilder, + parentUpdateCacheFreqBytes, MetaDataEndpointImplConstants.VIEW_MODIFIED_PROPERTY_BYTES); + MetaDataUtil.conditionallyAddTagsToPutCells((Put) m, + PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION_BYTES, extendedCellBuilder, + parentUseStatsForParallelizationBytes, + MetaDataEndpointImplConstants.VIEW_MODIFIED_PROPERTY_BYTES); + // MetaDataUtil.conditionallyAddTagsToPutCells((Put)m, + // PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + // PhoenixDatabaseMetaData.PHOENIX_TTL_BYTES, + // extendedCellBuilder, + // parentPhoenixTTLBytes, + // MetaDataEndpointImpl.VIEW_MODIFIED_PROPERTY_BYTES); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/WALAnnotationUtil.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/WALAnnotationUtil.java index 69afe0eb721..0c8b434f60e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/WALAnnotationUtil.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/WALAnnotationUtil.java @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,39 +22,39 @@ import org.apache.phoenix.execute.MutationState; /** - * Utility functions shared between IndexRegionObserver and MutationState for annotating the - * HBase WAL with Phoenix-level metadata about mutations. + * Utility functions shared between IndexRegionObserver and MutationState for annotating the HBase + * WAL with Phoenix-level metadata about mutations. */ public class WALAnnotationUtil { - /** - * Add metadata about a mutation onto the attributes of the mutation. This will be written as - * an annotation into the HBase write ahead log (WAL) when the mutation is processed - * server-side, usually in IndexRegionObserver - * @param m Mutation - * @param externalSchemaId Byte array of a lookup id to an external schema registry - */ - public static void annotateMutation(Mutation m, byte[] externalSchemaId) { - if (!m.getDurability().equals(Durability.SKIP_WAL)) { - if (externalSchemaId != null) { - m.setAttribute(MutationState.MutationMetadataType.EXTERNAL_SCHEMA_ID.toString(), - externalSchemaId); - } - } + /** + * Add metadata about a mutation onto the attributes of the mutation. This will be written as an + * annotation into the HBase write ahead log (WAL) when the mutation is processed server-side, + * usually in IndexRegionObserver + * @param m Mutation + * @param externalSchemaId Byte array of a lookup id to an external schema registry + */ + public static void annotateMutation(Mutation m, byte[] externalSchemaId) { + if (!m.getDurability().equals(Durability.SKIP_WAL)) { + if (externalSchemaId != null) { + m.setAttribute(MutationState.MutationMetadataType.EXTERNAL_SCHEMA_ID.toString(), + externalSchemaId); + } } + } - public static void annotateMutation(Mutation m, byte[] tenantId, byte[] schemaName, - byte[] logicalTableName, byte[] tableType, byte[] ddlTimestamp) { - if (!m.getDurability().equals(Durability.SKIP_WAL)) { - if (tenantId != null) { - m.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), tenantId); - } - m.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), schemaName); - m.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), - logicalTableName); - m.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), tableType); - m.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), ddlTimestamp); - } + public static void annotateMutation(Mutation m, byte[] tenantId, byte[] schemaName, + byte[] logicalTableName, byte[] tableType, byte[] ddlTimestamp) { + if (!m.getDurability().equals(Durability.SKIP_WAL)) { + if (tenantId != null) { + m.setAttribute(MutationState.MutationMetadataType.TENANT_ID.toString(), tenantId); + } + m.setAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString(), schemaName); + m.setAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString(), + logicalTableName); + m.setAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString(), tableType); + m.setAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString(), ddlTimestamp); } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java index 2d5033f0e64..206e3395277 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,11 +20,9 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; -import java.sql.Timestamp; import java.sql.Types; import java.util.Base64; import java.util.List; -import java.util.Properties; import javax.annotation.Nullable; @@ -41,6 +39,8 @@ import org.apache.phoenix.schema.types.PDataType.PDataCodec; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.ReadOnlyProps; @@ -48,189 +48,184 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; - /** {@link UpsertExecutor} over {@link CSVRecord}s. */ public class CsvUpsertExecutor extends UpsertExecutor { - private static final Logger LOGGER = LoggerFactory.getLogger(CsvUpsertExecutor.class); - - protected final String arrayElementSeparator; - - /** Testing constructor. Do not use in prod. */ - @VisibleForTesting - protected CsvUpsertExecutor(Connection conn, List columnInfoList, - PreparedStatement stmt, UpsertListener upsertListener, - String arrayElementSeparator) { - super(conn, columnInfoList, stmt, upsertListener); - this.arrayElementSeparator = arrayElementSeparator; - finishInit(); - } - - public CsvUpsertExecutor(Connection conn, String tableName, - List columnInfoList, UpsertListener upsertListener, - String arrayElementSeparator) { - super(conn, tableName, columnInfoList, upsertListener); - this.arrayElementSeparator = arrayElementSeparator; - finishInit(); - } - - @Override - protected void execute(CSVRecord csvRecord) { - try { - if (csvRecord.size() < conversionFunctions.size()) { - String message = String.format("CSV record does not have enough values (has %d, but needs %d)", - csvRecord.size(), conversionFunctions.size()); - throw new IllegalArgumentException(message); - } - for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) { - Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex)); - if (sqlValue != null) { - preparedStatement.setObject(fieldIndex + 1, sqlValue); - } else { - preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType()); - } - } - preparedStatement.execute(); - upsertListener.upsertDone(++upsertCount); - } catch (Exception e) { - if (LOGGER.isDebugEnabled()) { - // Even though this is an error we only log it with debug logging because we're notifying the - // listener, and it can do its own logging if needed - LOGGER.debug("Error on CSVRecord " + csvRecord, e); - } - upsertListener.errorOnRecord(csvRecord, e); + private static final Logger LOGGER = LoggerFactory.getLogger(CsvUpsertExecutor.class); + + protected final String arrayElementSeparator; + + /** Testing constructor. Do not use in prod. */ + @VisibleForTesting + protected CsvUpsertExecutor(Connection conn, List columnInfoList, + PreparedStatement stmt, UpsertListener upsertListener, + String arrayElementSeparator) { + super(conn, columnInfoList, stmt, upsertListener); + this.arrayElementSeparator = arrayElementSeparator; + finishInit(); + } + + public CsvUpsertExecutor(Connection conn, String tableName, List columnInfoList, + UpsertListener upsertListener, String arrayElementSeparator) { + super(conn, tableName, columnInfoList, upsertListener); + this.arrayElementSeparator = arrayElementSeparator; + finishInit(); + } + + @Override + protected void execute(CSVRecord csvRecord) { + try { + if (csvRecord.size() < conversionFunctions.size()) { + String message = + String.format("CSV record does not have enough values (has %d, but needs %d)", + csvRecord.size(), conversionFunctions.size()); + throw new IllegalArgumentException(message); + } + for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) { + Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex)); + if (sqlValue != null) { + preparedStatement.setObject(fieldIndex + 1, sqlValue); + } else { + preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType()); } + } + preparedStatement.execute(); + upsertListener.upsertDone(++upsertCount); + } catch (Exception e) { + if (LOGGER.isDebugEnabled()) { + // Even though this is an error we only log it with debug logging because we're notifying + // the + // listener, and it can do its own logging if needed + LOGGER.debug("Error on CSVRecord " + csvRecord, e); + } + upsertListener.errorOnRecord(csvRecord, e); } - - @Override - protected Function createConversionFunction(PDataType dataType) { - if (dataType.isArrayType()) { - return new ArrayDatatypeConversionFunction( - new StringToArrayConverter( - conn, - arrayElementSeparator, - PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE))); + } + + @Override + protected Function createConversionFunction(PDataType dataType) { + if (dataType.isArrayType()) { + return new ArrayDatatypeConversionFunction( + new StringToArrayConverter(conn, arrayElementSeparator, + PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE))); + } else { + return new SimpleDatatypeConversionFunction(dataType, this.conn); + } + } + + /** + * Performs typed conversion from String values to a given column value type. + */ + static class SimpleDatatypeConversionFunction implements Function { + + private final PDataType dataType; + private final PDataCodec codec; + private final DateUtil.DateTimeParser dateTimeParser; + private final String binaryEncoding; + + SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) { + ReadOnlyProps props; + try { + props = conn.unwrap(PhoenixConnection.class).getQueryServices().getProps(); + } catch (SQLException e) { + throw new RuntimeException(e); + } + this.dataType = dataType; + PDataCodec codec = dataType.getCodec(); + if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { + codec = DateUtil.getCodecFor(dataType); + // TODO: move to DateUtil + String dateFormat; + int dateSqlType = dataType.getResultSetSqlType(); + if (dateSqlType == Types.DATE) { + dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT); + } else if (dateSqlType == Types.TIME) { + dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT); } else { - return new SimpleDatatypeConversionFunction(dataType, this.conn); + dateFormat = + props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT); } + String timeZoneId = props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, + QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE); + this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, dataType, timeZoneId); + } else { + this.dateTimeParser = null; + } + this.codec = codec; + this.binaryEncoding = props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, + QueryServicesOptions.DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING); } - /** - * Performs typed conversion from String values to a given column value type. - */ - static class SimpleDatatypeConversionFunction implements Function { - - private final PDataType dataType; - private final PDataCodec codec; - private final DateUtil.DateTimeParser dateTimeParser; - private final String binaryEncoding; - - SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) { - ReadOnlyProps props; - try { - props = conn.unwrap(PhoenixConnection.class).getQueryServices().getProps(); - } catch (SQLException e) { - throw new RuntimeException(e); - } - this.dataType = dataType; - PDataCodec codec = dataType.getCodec(); - if(dataType.isCoercibleTo(PTimestamp.INSTANCE)) { - codec = DateUtil.getCodecFor(dataType); - // TODO: move to DateUtil - String dateFormat; - int dateSqlType = dataType.getResultSetSqlType(); - if (dateSqlType == Types.DATE) { - dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB, - DateUtil.DEFAULT_DATE_FORMAT); - } else if (dateSqlType == Types.TIME) { - dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB, - DateUtil.DEFAULT_TIME_FORMAT); - } else { - dateFormat = props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, - DateUtil.DEFAULT_TIMESTAMP_FORMAT); - } - String timeZoneId = props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, - QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE); - this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, dataType, timeZoneId); - } else { - this.dateTimeParser = null; - } - this.codec = codec; - this.binaryEncoding = props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, - QueryServicesOptions.DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING); + @Nullable + @Override + public Object apply(@Nullable String input) { + if (input == null || input.isEmpty()) { + return null; + } + if (dataType == PTimestamp.INSTANCE) { + return DateUtil.parseTimestamp(input); + } + if (dateTimeParser != null) { + long epochTime = dateTimeParser.parseDateTime(input); + byte[] byteValue = new byte[dataType.getByteSize()]; + codec.encodeLong(epochTime, byteValue, 0); + return dataType.toObject(byteValue); + } else if (dataType == PBoolean.INSTANCE) { + switch (input.toLowerCase()) { + case "true": + case "t": + case "1": + return Boolean.TRUE; + case "false": + case "f": + case "0": + return Boolean.FALSE; + default: + throw new RuntimeException("Invalid boolean value: '" + input + + "', must be one of ['true','t','1','false','f','0']"); } - - @Nullable - @Override - public Object apply(@Nullable String input) { - if (input == null || input.isEmpty()) { - return null; + } else if (dataType == PVarbinary.INSTANCE || dataType == PBinary.INSTANCE) { + EncodeFormat format = EncodeFormat.valueOf(binaryEncoding.toUpperCase()); + Object object = null; + switch (format) { + case BASE64: + object = Base64.getDecoder().decode(input); + if (object == null) { + throw new IllegalDataException("Input: [" + input + "] is not base64 encoded"); } - if (dataType == PTimestamp.INSTANCE) { - return DateUtil.parseTimestamp(input); - } - if (dateTimeParser != null) { - long epochTime = dateTimeParser.parseDateTime(input); - byte[] byteValue = new byte[dataType.getByteSize()]; - codec.encodeLong(epochTime, byteValue, 0); - return dataType.toObject(byteValue); - } else if (dataType == PBoolean.INSTANCE) { - switch (input.toLowerCase()) { - case "true": - case "t": - case "1": - return Boolean.TRUE; - case "false": - case "f": - case "0": - return Boolean.FALSE; - default: - throw new RuntimeException("Invalid boolean value: '" + input - + "', must be one of ['true','t','1','false','f','0']"); - } - }else if (dataType == PVarbinary.INSTANCE || dataType == PBinary.INSTANCE){ - EncodeFormat format = EncodeFormat.valueOf(binaryEncoding.toUpperCase()); - Object object = null; - switch (format) { - case BASE64: - object = Base64.getDecoder().decode(input); - if (object == null) { throw new IllegalDataException( - "Input: [" + input + "] is not base64 encoded"); } - break; - case ASCII: - object = Bytes.toBytes(input); - break; - default: - throw new IllegalDataException("Unsupported encoding \"" + binaryEncoding + "\""); - } - return object; - } - return dataType.toObject(input); + break; + case ASCII: + object = Bytes.toBytes(input); + break; + default: + throw new IllegalDataException("Unsupported encoding \"" + binaryEncoding + "\""); } + return object; + } + return dataType.toObject(input); } + } - /** - * Converts string representations of arrays into Phoenix arrays of the correct type. - */ - private static class ArrayDatatypeConversionFunction implements Function { + /** + * Converts string representations of arrays into Phoenix arrays of the correct type. + */ + private static class ArrayDatatypeConversionFunction implements Function { - private final StringToArrayConverter arrayConverter; + private final StringToArrayConverter arrayConverter; - private ArrayDatatypeConversionFunction(StringToArrayConverter arrayConverter) { - this.arrayConverter = arrayConverter; - } + private ArrayDatatypeConversionFunction(StringToArrayConverter arrayConverter) { + this.arrayConverter = arrayConverter; + } - @Nullable - @Override - public Object apply(@Nullable String input) { - try { - return arrayConverter.toArray(input); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } + @Nullable + @Override + public Object apply(@Nullable String input) { + try { + return arrayConverter.toArray(input); + } catch (SQLException e) { + throw new RuntimeException(e); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/csv/StringToArrayConverter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/csv/StringToArrayConverter.java index 005b1203c1c..893d1d883bb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/csv/StringToArrayConverter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/csv/StringToArrayConverter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,58 +20,47 @@ import java.sql.Array; import java.sql.Connection; import java.sql.SQLException; -import java.util.Properties; - -import javax.annotation.Nullable; import org.apache.phoenix.schema.types.PDataType; - -import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.base.Splitter; import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.util.DateUtil; /** * Converts strings with delimited values into Phoenix arrays. */ class StringToArrayConverter { - private final Splitter splitter; - private final Connection conn; - private final PDataType elementDataType; - private final CsvUpsertExecutor.SimpleDatatypeConversionFunction elementConvertFunction; + private final Splitter splitter; + private final Connection conn; + private final PDataType elementDataType; + private final CsvUpsertExecutor.SimpleDatatypeConversionFunction elementConvertFunction; - /** - * Instantiate with the array value separator and data type. - * - * @param conn Phoenix connection to target database - * @param separatorString string used to separate incoming array values in strings - * @param elementDataType datatype of the elements of arrays to be created - */ - public StringToArrayConverter(Connection conn, String separatorString, - PDataType elementDataType) { - this.conn = conn; - this.splitter = Splitter.on(separatorString); - this.elementDataType = elementDataType; - this.elementConvertFunction = new CsvUpsertExecutor.SimpleDatatypeConversionFunction(elementDataType, this.conn); - } + /** + * Instantiate with the array value separator and data type. + * @param conn Phoenix connection to target database + * @param separatorString string used to separate incoming array values in strings + * @param elementDataType datatype of the elements of arrays to be created + */ + public StringToArrayConverter(Connection conn, String separatorString, + PDataType elementDataType) { + this.conn = conn; + this.splitter = Splitter.on(separatorString); + this.elementDataType = elementDataType; + this.elementConvertFunction = + new CsvUpsertExecutor.SimpleDatatypeConversionFunction(elementDataType, this.conn); + } - /** - * Convert an input delimited string into a phoenix array of the configured type. - * - * @param input string containing delimited array values - * @return the array containing the values represented in the input string - */ - public Array toArray(String input) throws SQLException { - if (input == null || input.isEmpty()) { - return conn.createArrayOf(elementDataType.getSqlTypeName(), new Object[0]); - } - return conn.createArrayOf( - elementDataType.getSqlTypeName(), - Lists.newArrayList( - Iterables.transform( - splitter.split(input), - elementConvertFunction)).toArray()); + /** + * Convert an input delimited string into a phoenix array of the configured type. + * @param input string containing delimited array values + * @return the array containing the values represented in the input string + */ + public Array toArray(String input) throws SQLException { + if (input == null || input.isEmpty()) { + return conn.createArrayOf(elementDataType.getSqlTypeName(), new Object[0]); } + return conn.createArrayOf(elementDataType.getSqlTypeName(), Lists + .newArrayList(Iterables.transform(splitter.split(input), elementConvertFunction)).toArray()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/LinguisticSort.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/LinguisticSort.java index c1881c6440b..e40c46b33e7 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/LinguisticSort.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/LinguisticSort.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,1139 +35,1093 @@ import edu.umd.cs.findbugs.annotations.SuppressWarnings; - /** * This utility class was partially copied from Salesforce's internationalization utility library - * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. - * The i18n-util library is not maintained anymore, and it was using vulnerable dependencies. - * For more info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 - * - * Contains all the information about linguistic sorting. - * The intent of this is to provide the SQL changes to the RDBMS to ensure - * that the sorting uses the locale provided in Java, and to make sure that - * the collation in Java will correspond as much as possible to what is in the - * DB. - * - * Rolodex is a feature in alphabetic/syllabary languages to restrict the set - * of rows in a list to those that start with a certain letter. In SQL - * this is usually LIKE 'A%', which will include different letters. - * - * To get the list of valid nls_sorts, run this in oracle - * select value from v$nls_valid_values where parameter='SORT'; + * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. The + * i18n-util library is not maintained anymore, and it was using vulnerable dependencies. For more + * info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 Contains all the information about + * linguistic sorting. The intent of this is to provide the SQL changes to the RDBMS to ensure that + * the sorting uses the locale provided in Java, and to make sure that the collation in Java will + * correspond as much as possible to what is in the DB. Rolodex is a feature in alphabetic/syllabary + * languages to restrict the set of rows in a list to those that start with a certain letter. In SQL + * this is usually LIKE 'A%', which will include different letters. To get the list of valid + * nls_sorts, run this in oracle select value from v$nls_valid_values where parameter='SORT'; */ public enum LinguisticSort { - // English: - // Using oracle's upper() function to sort; digits come before letters, - // '[' is the lowest character after 'Z'. // balance-] - ENGLISH(Locale.ENGLISH, "[", false, false, LinguisticSort.Alphabets.STRING), // balance-] - - // German: - // Using oracle's nlssort() function to sort; digits come right after letters. - GERMAN(new Locale("de"), LinguisticSort.Alphabets.GERMAN, "0", true, false, - "nlssort({0}, ''nls_sort=xgerman'')"), - - // French: - // Using oracle's nlssort() function to sort; digits come right after letters. - FRENCH(new Locale("fr"), "0", false, false, "nlssort({0}, ''nls_sort=xfrench'')"), - - // Italian: - // Using oracle's nlssort() function to sort; digits come right after letters. - ITALIAN(new Locale("it"), "0", false, false, "nlssort({0}, ''nls_sort=italian'')"), - - // Spanish: - // Using oracle's nlssort() function to sort; digits come right after letters. - // Alphabet consists of A-Z plus N-tilde. However, CH and LL are not considered - // letters, so do not use Oracle's xspanish nlssort. - SPANISH(new Locale("es"), "0", false, false, "nlssort({0}, ''nls_sort=spanish'')"), - - // Catalan: - // Using oracle's nlssort() function to sort; digits come before letters, - // nothing sorts after the last legal catalan character. - CATALAN(new Locale("ca"), LinguisticSort.Alphabets.CATALAN, "0", true, false, - "nlssort({0}, ''nls_sort=catalan'')"), - - // Dutch: - // Using oracle's nlssort() function to sort; digits come right after letters. - DUTCH(new Locale("nl"), "0", false, false, "nlssort({0}, ''nls_sort=dutch'')"), - - // Portuguese: - // Using oracle's nlssort() function to sort; digits come right after letters. - PORTUGUESE(new Locale("pt"), "0", false, false, "nlssort({0}, ''nls_sort=west_european'')"), - - // Danish: - // Alphabet consists of A-Z followed by AE, O-stroke, and A-ring. - // Using oracle's nlssort() function to sort; digits come right after letters. - DANISH(new Locale("da"), "0", false, false, "nlssort({0}, ''nls_sort=danish'')"), - - // Norwegian: - // Alphabet consists of A-Z followed by AE, O-stroke, and A-ring. - // Using oracle's nlssort() function to sort; digits come right after letters. - NORWEGIAN(new Locale("no"), "0", false, false, - "nlssort({0}, ''nls_sort=norwegian'')"), - - // Swedish: - // Alphabet consists of A-Z followed by A-ring, A-diaeresis, and O-diaeresis. - // Using oracle's nlssort() function to sort; digits come before letters, - // nothing sorts after the last legal swedish character. - SWEDISH(new Locale("sv"), null, false, false, - "nlssort({0}, ''nls_sort=swedish'')"), - - // Finnish: - // Alphabet consists of A-Z, minus W, followed by A-ring, A-diaeresis, and O-diaeresis. - // We leave out W so that V's show up properly (bug #151961/W-513969) - // Using oracle's nlssort() function to sort; digits come right after letters. - FINNISH(new Locale("fi"), - new String[] { - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", - "Q", "R", "S", "T", "U", "V", "X", "Y", "Z", "\u00C5", "\u00C4", "\u00D6" }, - "0", false, false, "nlssort({0}, ''nls_sort=finnish'')"), - - // Czech: - // Alphabet consists of many Czech letters but not all english letters. - // Using oracle's nlssort() function to sort; digits come right after letters. - CZECH(new Locale("cs"), "0", true, false, - "nlssort({0}, ''nls_sort=xczech'')"), - - // Polish: - // Alphabet consists of many Polish letters but not all english letters. - // Using oracle's nlssort() function to sort. - POLISH(new Locale("pl"), "\u00DF", false, false, - "nlssort({0}, ''nls_sort=polish'')"), - - // Turkish: - // Use Turkish alphabet, which also indicates special handling in getUpperCaseValue(). - // Using oracle's nlssort() function to sort. - TURKISH(new Locale("tr"), LinguisticSort.Alphabets.TURKISH, null, false, false, - "nlssort({0}, ''nls_sort=xturkish'')"), - - // Traditional chinese: - // Use English alphabet. Using oracle's nlssort() function to sort by stroke. - CHINESE_HK(new Locale("zh", "HK"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, true, - "nlssort({0}, ''nls_sort=tchinese_radical_m'')"), - CHINESE_HK_STROKE(new Locale("zh", "HK", "STROKE"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", - true, true, "nlssort({0}, ''nls_sort=tchinese_stroke_m'')"), - - CHINESE_TW(new Locale("zh", "TW"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, true, - "nlssort({0}, ''nls_sort=tchinese_radical_m'')"), - CHINESE_TW_STROKE(new Locale("zh", "TW", "STROKE"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", - true, true, "nlssort({0}, ''nls_sort=tchinese_stroke_m'')"), - - - // Simplified chinese: - // Use English alphabet. Using oracle's nlssort() function to sort by pinyin. - CHINESE(new Locale("zh"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, true, - "nlssort({0}, ''nls_sort=schinese_radical_m'')"), - CHINESE_STROKE(new Locale("zh", "", "STROKE"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", - true, true, - "nlssort({0}, ''nls_sort=schinese_stroke_m'')"), - CHINESE_PINYIN(new Locale("zh", "", "PINYIN"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", - true, true, - "nlssort({0}, ''nls_sort=schinese_pinyin_m'')"), - - - // Japanese: - // Japanese alphabet. Using oracle's nlssort() function to sort. Special rolodex handling - JAPANESE(new Locale("ja"), LinguisticSort.Alphabets.JAPANESE, null, true, true, - "nlssort({0}, ''nls_sort=japanese_m'')"), - - // Korean: - // Use English alphabet. Using oracle's nlssort() function to sort. - KOREAN(new Locale("ko"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, true, - "nlssort({0}, ''nls_sort=korean_m'')"), - - // Russian: - // Using oracle's nlssort() function to sort. - RUSSIAN(new Locale("ru"), null, false, false, - "nlssort({0}, ''nls_sort=russian'')"), - - // Bulgarian: - // Using oracle's nlssort() function to sort. - BULGARIAN(new Locale("bg"), LinguisticSort.Alphabets.BULGARIAN, null, true, false, - "nlssort({0}, ''nls_sort=bulgarian'')"), - - // Indonesian - // Using oracle's nlssort() function to sort. - INDONESIAN(new Locale("in"), null, true, false, "nlssort({0}, ''nls_sort=indonesian'')"), - - // Romanian: - // Using oracle's nlssort() function to sort. - ROMANIAN(new Locale("ro"), - new String[] { "A", "\u0102", "\u00c2", "B", "C", "D", "E", "F", "G", "H", "I", - "\u00ce", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "\u015e", "T", - "\u0162", "U", "V", "W", "X", "Y", "Z" }, - null, true, false, "nlssort({0}, ''nls_sort=romanian'')"), - - // Vietnamese - // Using oracle's nlssort() function to sort. - VIETNAMESE(new Locale("vi"), - new String[] { - "A", "\u0102", "\u00c2", "B", "C", "D", "\u0110", "E", "\u00ca", "G", "H", - "I", "K", "L", "M", "N", "O", "\u00d4", "\u01a0", "P", "Q", "R", "S", "T", - "U", "\u01af", "V", "X", "Y" }, - null, false, false, "nlssort({0}, ''nls_sort=vietnamese'')"), - - // Ukrainian: - // Using oracle's nlssort() function to sort. - UKRAINIAN(new Locale("uk"), null, false, false, "nlssort({0}, ''nls_sort=ukrainian'')"), - - // Hungarian: - // Using oracle's nlssort() function to sort. - HUNGARIAN(new Locale("hu"), LinguisticSort.Alphabets.HUNGARIAN, null, false, false, - "nlssort({0}, ''nls_sort=xhungarian'')"), - - // Greek: - // Using oracle's nlssort() function to sort. - GREEK(new Locale("el"), null, false, false, "nlssort({0}, ''nls_sort=greek'')"), - - // Hebrew: - // Using oracle's nlssort() function to sort. - HEBREW(new Locale("iw"), null, true, false, "nlssort({0}, ''nls_sort=hebrew'')"), - - // Slovak: - // Using oracle's nlssort() function to sort. - SLOVAK(new Locale("sk"), LinguisticSort.Alphabets.SLOVAK, null, true, false, - "nlssort({0}, ''nls_sort=slovak'')"), - - // Serbian (cyrillic): - // Using oracle's nlssort() function to sort using it's default - SERBIAN_CYRILLIC(new Locale("sr"), null, false, false, - "nlssort({0}, ''nls_sort=generic_m'')"), - - // Serbian (cyrillic): - // Using oracle's nlssort() function to sort using it's default - SERBIAN_LATIN(new Locale("sh"), LinguisticSort.Alphabets.SERBIAN_LATIN, null, false, false, - "nlssort({0}, ''nls_sort=xcroatian'')"), - - // Serbian (cyrillic): - // Using oracle's nlssort() function to sort using it's default - BOSNIAN(new Locale("bs"), LinguisticSort.Alphabets.SERBIAN_LATIN, null, false, false, - "nlssort({0}, ''nls_sort=xcroatian'')"), - - - // Georgian: - // Using oracle's nlssort() function to sort, even though we're using binary for this. - GEORGIAN(new Locale("ka"), LinguisticSort.Alphabets.GEORGIAN, null, false, false, - "nlssort({0}, ''nls_sort=binary'')"), - - // BASQUE: - // Using oracle's nlssort() function to sort. - BASQUE(new Locale("eu"), LinguisticSort.Alphabets.BASQUE, null, false, false, - "nlssort({0}, ''nls_sort=west_european'')"), - - // MALTESE: - // Using oracle's nlssort() function to sort. - MALTESE(new Locale("mt"), null, false, false, "nlssort({0}, ''nls_sort=west_european'')"), - - // ROMANSH: - // Using oracle's nlssort() function to sort. - ROMANSH(new Locale("rm"), null, false, false, "nlssort({0}, ''nls_sort=west_european'')"), - - // LUXEMBOURGISH: - // Using oracle's nlssort() function to sort. - LUXEMBOURGISH(new Locale("lb"), LinguisticSort.Alphabets.LUXEMBOURGISH, null, false, false, - "nlssort({0}, ''nls_sort=west_european'')"), - - // IRISH: - // Using oracle's nlssort() function to sort. - IRISH(new Locale("ga"), null, false, false, "nlssort({0}, ''nls_sort=west_european'')"), - - // Slovenian: - // Using oracle's nlssort() function to sort. - SLOVENE(new Locale("sl"), LinguisticSort.Alphabets.SLOVENE, null, false, false, - "nlssort({0}, ''nls_sort=xslovenian'')"), - - // Croatian: - // Using oracle's nlssort() function to sort. - CROATIAN(new Locale("hr"), LinguisticSort.Alphabets.SERBIAN_LATIN, null, false, false, - "nlssort({0}, ''nls_sort=xcroatian'')"), - - // Malay - // Using oracle's nlssort() function to sort. - // We're assuming people are using the english alphabet, - // and not the arabic one (Bahasa Melayu) - MALAY(new Locale("ms"), null, true, false, "nlssort({0}, ''nls_sort=malay'')"), - - // Arabic: - // Using oracle's nlssort() function to sort. - ARABIC(new Locale("ar"), null, false, false, "nlssort({0}, ''nls_sort=arabic'')"), - - // Estonian: - // Using oracle's nlssort() function to sort. - ESTONIAN(new Locale("et"), LinguisticSort.Alphabets.ESTONIAN, null, true, false, - "nlssort({0}, ''nls_sort=estonian'')"), - - // Icelandic: - // Using oracle's nlssort() function to sort. - ICELANDIC(new Locale("is"), LinguisticSort.Alphabets.ICELANDIC, null, true, false, - "nlssort({0}, ''nls_sort=icelandic'')"), - - // Latvian: - // Using oracle's nlssort() function to sort. - LATVIAN(new Locale("lv"), LinguisticSort.Alphabets.LATVIAN, null, false, false, - "nlssort({0}, ''nls_sort=latvian'')"), - - // Lithuanian: - // Using oracle's nlssort() function to sort. - LITHUANIAN(new Locale("lt"), LinguisticSort.Alphabets.LITHUANIAN, null, false, false, - "nlssort({0}, ''nls_sort=lithuanian'')"), - - - // Languages not supported fully. - KYRGYZ(new Locale("ky"), LinguisticSort.Alphabets.KYRGYZ, null, true, false, - "nlssort({0}, ''nls_sort=binary'')"), - - KAZAKH(new Locale("kk"), LinguisticSort.Alphabets.KAZAKH, null, true, false, - "nlssort({0}, ''nls_sort=binary'')"), - - TAJIK(new Locale("tg"), LinguisticSort.Alphabets.TAJIK, null, true, false, - "nlssort({0}, ''nls_sort=russian'')"), - - BELARUSIAN(new Locale("be"), null, true, false, "nlssort({0}, ''nls_sort=russian'')"), - - TURKMEN(new Locale("tk"), LinguisticSort.Alphabets.TURKISH, null, false, false, - "nlssort({0}, ''nls_sort=xturkish'')"), - - AZERBAIJANI(new Locale("az"), LinguisticSort.Alphabets.AZERBAIJANI, null, false, false, - "nlssort({0}, ''nls_sort=xturkish'')"), - - ARMENIAN(new Locale("hy"), null, true, false, "nlssort({0}, ''nls_sort=binary'')"), - - THAI(new Locale("th"), null, true, false, "nlssort({0}, ''nls_sort=thai_dictionary'')"), - - // Binary? really - HINDI(new Locale("hi"), null, true, false, "nlssort({0}, ''nls_sort=binary'')"), - - URDU(new Locale("ur"), LinguisticSort.Alphabets.URDU, null, false, false, - "nlssort({0}, ''nls_sort=arabic'')"), - - // Bengali - BENGALI(new Locale("bn"), LinguisticSort.Alphabets.BENGALI, null, true, false, - "nlssort({0}, ''nls_sort=bengali'')"), - - TAMIL(new Locale("ta"), LinguisticSort.Alphabets.TAMIL, null, true, false, - "nlssort({0}, ''nls_sort=binary'')"), - - // Unused language for testing; Alphabet and sorting defaults to English - ESPERANTO(new Locale("eo"), LinguisticSort.Alphabets.ENGLISH, "[", false, false, - LinguisticSort.Alphabets.STRING); - - private static final Map BY_LOCALE = getByLocaleInfo(); - - /** - * Create the map that will be stuffed into BY_LOCALE. We have to fully create an object - * THEN stuff into a final field in a constructor (as unmodifiableMap does below) in order - * to get a proper guarantee from Java's memory model. - * - * See http://jeremymanson.blogspot.com/2008/07/immutability-in-java-part-2.html - */ - private static Map getByLocaleInfo() { - final Map byLocaleInfo = new HashMap(64); - for (LinguisticSort sort : values()) { - LinguisticSort duplicated = byLocaleInfo.put(sort.getLocale(), sort); - assert duplicated == null : "Two linguistic sorts with the same locale: " - + sort.getLocale(); - } - return Collections.unmodifiableMap(byLocaleInfo); + // English: + // Using oracle's upper() function to sort; digits come before letters, + // '[' is the lowest character after 'Z'. // balance-] + ENGLISH(Locale.ENGLISH, "[", false, false, LinguisticSort.Alphabets.STRING), // balance-] + + // German: + // Using oracle's nlssort() function to sort; digits come right after letters. + GERMAN(new Locale("de"), LinguisticSort.Alphabets.GERMAN, "0", true, false, + "nlssort({0}, ''nls_sort=xgerman'')"), + + // French: + // Using oracle's nlssort() function to sort; digits come right after letters. + FRENCH(new Locale("fr"), "0", false, false, "nlssort({0}, ''nls_sort=xfrench'')"), + + // Italian: + // Using oracle's nlssort() function to sort; digits come right after letters. + ITALIAN(new Locale("it"), "0", false, false, "nlssort({0}, ''nls_sort=italian'')"), + + // Spanish: + // Using oracle's nlssort() function to sort; digits come right after letters. + // Alphabet consists of A-Z plus N-tilde. However, CH and LL are not considered + // letters, so do not use Oracle's xspanish nlssort. + SPANISH(new Locale("es"), "0", false, false, "nlssort({0}, ''nls_sort=spanish'')"), + + // Catalan: + // Using oracle's nlssort() function to sort; digits come before letters, + // nothing sorts after the last legal catalan character. + CATALAN(new Locale("ca"), LinguisticSort.Alphabets.CATALAN, "0", true, false, + "nlssort({0}, ''nls_sort=catalan'')"), + + // Dutch: + // Using oracle's nlssort() function to sort; digits come right after letters. + DUTCH(new Locale("nl"), "0", false, false, "nlssort({0}, ''nls_sort=dutch'')"), + + // Portuguese: + // Using oracle's nlssort() function to sort; digits come right after letters. + PORTUGUESE(new Locale("pt"), "0", false, false, "nlssort({0}, ''nls_sort=west_european'')"), + + // Danish: + // Alphabet consists of A-Z followed by AE, O-stroke, and A-ring. + // Using oracle's nlssort() function to sort; digits come right after letters. + DANISH(new Locale("da"), "0", false, false, "nlssort({0}, ''nls_sort=danish'')"), + + // Norwegian: + // Alphabet consists of A-Z followed by AE, O-stroke, and A-ring. + // Using oracle's nlssort() function to sort; digits come right after letters. + NORWEGIAN(new Locale("no"), "0", false, false, "nlssort({0}, ''nls_sort=norwegian'')"), + + // Swedish: + // Alphabet consists of A-Z followed by A-ring, A-diaeresis, and O-diaeresis. + // Using oracle's nlssort() function to sort; digits come before letters, + // nothing sorts after the last legal swedish character. + SWEDISH(new Locale("sv"), null, false, false, "nlssort({0}, ''nls_sort=swedish'')"), + + // Finnish: + // Alphabet consists of A-Z, minus W, followed by A-ring, A-diaeresis, and O-diaeresis. + // We leave out W so that V's show up properly (bug #151961/W-513969) + // Using oracle's nlssort() function to sort; digits come right after letters. + FINNISH(new Locale("fi"), + new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", + "Q", "R", "S", "T", "U", "V", "X", "Y", "Z", "\u00C5", "\u00C4", "\u00D6" }, + "0", false, false, "nlssort({0}, ''nls_sort=finnish'')"), + + // Czech: + // Alphabet consists of many Czech letters but not all english letters. + // Using oracle's nlssort() function to sort; digits come right after letters. + CZECH(new Locale("cs"), "0", true, false, "nlssort({0}, ''nls_sort=xczech'')"), + + // Polish: + // Alphabet consists of many Polish letters but not all english letters. + // Using oracle's nlssort() function to sort. + POLISH(new Locale("pl"), "\u00DF", false, false, "nlssort({0}, ''nls_sort=polish'')"), + + // Turkish: + // Use Turkish alphabet, which also indicates special handling in getUpperCaseValue(). + // Using oracle's nlssort() function to sort. + TURKISH(new Locale("tr"), LinguisticSort.Alphabets.TURKISH, null, false, false, + "nlssort({0}, ''nls_sort=xturkish'')"), + + // Traditional chinese: + // Use English alphabet. Using oracle's nlssort() function to sort by stroke. + CHINESE_HK(new Locale("zh", "HK"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, true, + "nlssort({0}, ''nls_sort=tchinese_radical_m'')"), + CHINESE_HK_STROKE(new Locale("zh", "HK", "STROKE"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", + true, true, "nlssort({0}, ''nls_sort=tchinese_stroke_m'')"), + + CHINESE_TW(new Locale("zh", "TW"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, true, + "nlssort({0}, ''nls_sort=tchinese_radical_m'')"), + CHINESE_TW_STROKE(new Locale("zh", "TW", "STROKE"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", + true, true, "nlssort({0}, ''nls_sort=tchinese_stroke_m'')"), + + // Simplified chinese: + // Use English alphabet. Using oracle's nlssort() function to sort by pinyin. + CHINESE(new Locale("zh"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, true, + "nlssort({0}, ''nls_sort=schinese_radical_m'')"), + CHINESE_STROKE(new Locale("zh", "", "STROKE"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, + true, "nlssort({0}, ''nls_sort=schinese_stroke_m'')"), + CHINESE_PINYIN(new Locale("zh", "", "PINYIN"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, + true, "nlssort({0}, ''nls_sort=schinese_pinyin_m'')"), + + // Japanese: + // Japanese alphabet. Using oracle's nlssort() function to sort. Special rolodex handling + JAPANESE(new Locale("ja"), LinguisticSort.Alphabets.JAPANESE, null, true, true, + "nlssort({0}, ''nls_sort=japanese_m'')"), + + // Korean: + // Use English alphabet. Using oracle's nlssort() function to sort. + KOREAN(new Locale("ko"), LinguisticSort.Alphabets.ENGLISH, "\u03B1", true, true, + "nlssort({0}, ''nls_sort=korean_m'')"), + + // Russian: + // Using oracle's nlssort() function to sort. + RUSSIAN(new Locale("ru"), null, false, false, "nlssort({0}, ''nls_sort=russian'')"), + + // Bulgarian: + // Using oracle's nlssort() function to sort. + BULGARIAN(new Locale("bg"), LinguisticSort.Alphabets.BULGARIAN, null, true, false, + "nlssort({0}, ''nls_sort=bulgarian'')"), + + // Indonesian + // Using oracle's nlssort() function to sort. + INDONESIAN(new Locale("in"), null, true, false, "nlssort({0}, ''nls_sort=indonesian'')"), + + // Romanian: + // Using oracle's nlssort() function to sort. + ROMANIAN(new Locale("ro"), + new String[] { "A", "\u0102", "\u00c2", "B", "C", "D", "E", "F", "G", "H", "I", "\u00ce", "J", + "K", "L", "M", "N", "O", "P", "Q", "R", "S", "\u015e", "T", "\u0162", "U", "V", "W", "X", "Y", + "Z" }, + null, true, false, "nlssort({0}, ''nls_sort=romanian'')"), + + // Vietnamese + // Using oracle's nlssort() function to sort. + VIETNAMESE(new Locale("vi"), + new String[] { "A", "\u0102", "\u00c2", "B", "C", "D", "\u0110", "E", "\u00ca", "G", "H", "I", + "K", "L", "M", "N", "O", "\u00d4", "\u01a0", "P", "Q", "R", "S", "T", "U", "\u01af", "V", "X", + "Y" }, + null, false, false, "nlssort({0}, ''nls_sort=vietnamese'')"), + + // Ukrainian: + // Using oracle's nlssort() function to sort. + UKRAINIAN(new Locale("uk"), null, false, false, "nlssort({0}, ''nls_sort=ukrainian'')"), + + // Hungarian: + // Using oracle's nlssort() function to sort. + HUNGARIAN(new Locale("hu"), LinguisticSort.Alphabets.HUNGARIAN, null, false, false, + "nlssort({0}, ''nls_sort=xhungarian'')"), + + // Greek: + // Using oracle's nlssort() function to sort. + GREEK(new Locale("el"), null, false, false, "nlssort({0}, ''nls_sort=greek'')"), + + // Hebrew: + // Using oracle's nlssort() function to sort. + HEBREW(new Locale("iw"), null, true, false, "nlssort({0}, ''nls_sort=hebrew'')"), + + // Slovak: + // Using oracle's nlssort() function to sort. + SLOVAK(new Locale("sk"), LinguisticSort.Alphabets.SLOVAK, null, true, false, + "nlssort({0}, ''nls_sort=slovak'')"), + + // Serbian (cyrillic): + // Using oracle's nlssort() function to sort using it's default + SERBIAN_CYRILLIC(new Locale("sr"), null, false, false, "nlssort({0}, ''nls_sort=generic_m'')"), + + // Serbian (cyrillic): + // Using oracle's nlssort() function to sort using it's default + SERBIAN_LATIN(new Locale("sh"), LinguisticSort.Alphabets.SERBIAN_LATIN, null, false, false, + "nlssort({0}, ''nls_sort=xcroatian'')"), + + // Serbian (cyrillic): + // Using oracle's nlssort() function to sort using it's default + BOSNIAN(new Locale("bs"), LinguisticSort.Alphabets.SERBIAN_LATIN, null, false, false, + "nlssort({0}, ''nls_sort=xcroatian'')"), + + // Georgian: + // Using oracle's nlssort() function to sort, even though we're using binary for this. + GEORGIAN(new Locale("ka"), LinguisticSort.Alphabets.GEORGIAN, null, false, false, + "nlssort({0}, ''nls_sort=binary'')"), + + // BASQUE: + // Using oracle's nlssort() function to sort. + BASQUE(new Locale("eu"), LinguisticSort.Alphabets.BASQUE, null, false, false, + "nlssort({0}, ''nls_sort=west_european'')"), + + // MALTESE: + // Using oracle's nlssort() function to sort. + MALTESE(new Locale("mt"), null, false, false, "nlssort({0}, ''nls_sort=west_european'')"), + + // ROMANSH: + // Using oracle's nlssort() function to sort. + ROMANSH(new Locale("rm"), null, false, false, "nlssort({0}, ''nls_sort=west_european'')"), + + // LUXEMBOURGISH: + // Using oracle's nlssort() function to sort. + LUXEMBOURGISH(new Locale("lb"), LinguisticSort.Alphabets.LUXEMBOURGISH, null, false, false, + "nlssort({0}, ''nls_sort=west_european'')"), + + // IRISH: + // Using oracle's nlssort() function to sort. + IRISH(new Locale("ga"), null, false, false, "nlssort({0}, ''nls_sort=west_european'')"), + + // Slovenian: + // Using oracle's nlssort() function to sort. + SLOVENE(new Locale("sl"), LinguisticSort.Alphabets.SLOVENE, null, false, false, + "nlssort({0}, ''nls_sort=xslovenian'')"), + + // Croatian: + // Using oracle's nlssort() function to sort. + CROATIAN(new Locale("hr"), LinguisticSort.Alphabets.SERBIAN_LATIN, null, false, false, + "nlssort({0}, ''nls_sort=xcroatian'')"), + + // Malay + // Using oracle's nlssort() function to sort. + // We're assuming people are using the english alphabet, + // and not the arabic one (Bahasa Melayu) + MALAY(new Locale("ms"), null, true, false, "nlssort({0}, ''nls_sort=malay'')"), + + // Arabic: + // Using oracle's nlssort() function to sort. + ARABIC(new Locale("ar"), null, false, false, "nlssort({0}, ''nls_sort=arabic'')"), + + // Estonian: + // Using oracle's nlssort() function to sort. + ESTONIAN(new Locale("et"), LinguisticSort.Alphabets.ESTONIAN, null, true, false, + "nlssort({0}, ''nls_sort=estonian'')"), + + // Icelandic: + // Using oracle's nlssort() function to sort. + ICELANDIC(new Locale("is"), LinguisticSort.Alphabets.ICELANDIC, null, true, false, + "nlssort({0}, ''nls_sort=icelandic'')"), + + // Latvian: + // Using oracle's nlssort() function to sort. + LATVIAN(new Locale("lv"), LinguisticSort.Alphabets.LATVIAN, null, false, false, + "nlssort({0}, ''nls_sort=latvian'')"), + + // Lithuanian: + // Using oracle's nlssort() function to sort. + LITHUANIAN(new Locale("lt"), LinguisticSort.Alphabets.LITHUANIAN, null, false, false, + "nlssort({0}, ''nls_sort=lithuanian'')"), + + // Languages not supported fully. + KYRGYZ(new Locale("ky"), LinguisticSort.Alphabets.KYRGYZ, null, true, false, + "nlssort({0}, ''nls_sort=binary'')"), + + KAZAKH(new Locale("kk"), LinguisticSort.Alphabets.KAZAKH, null, true, false, + "nlssort({0}, ''nls_sort=binary'')"), + + TAJIK(new Locale("tg"), LinguisticSort.Alphabets.TAJIK, null, true, false, + "nlssort({0}, ''nls_sort=russian'')"), + + BELARUSIAN(new Locale("be"), null, true, false, "nlssort({0}, ''nls_sort=russian'')"), + + TURKMEN(new Locale("tk"), LinguisticSort.Alphabets.TURKISH, null, false, false, + "nlssort({0}, ''nls_sort=xturkish'')"), + + AZERBAIJANI(new Locale("az"), LinguisticSort.Alphabets.AZERBAIJANI, null, false, false, + "nlssort({0}, ''nls_sort=xturkish'')"), + + ARMENIAN(new Locale("hy"), null, true, false, "nlssort({0}, ''nls_sort=binary'')"), + + THAI(new Locale("th"), null, true, false, "nlssort({0}, ''nls_sort=thai_dictionary'')"), + + // Binary? really + HINDI(new Locale("hi"), null, true, false, "nlssort({0}, ''nls_sort=binary'')"), + + URDU(new Locale("ur"), LinguisticSort.Alphabets.URDU, null, false, false, + "nlssort({0}, ''nls_sort=arabic'')"), + + // Bengali + BENGALI(new Locale("bn"), LinguisticSort.Alphabets.BENGALI, null, true, false, + "nlssort({0}, ''nls_sort=bengali'')"), + + TAMIL(new Locale("ta"), LinguisticSort.Alphabets.TAMIL, null, true, false, + "nlssort({0}, ''nls_sort=binary'')"), + + // Unused language for testing; Alphabet and sorting defaults to English + ESPERANTO(new Locale("eo"), LinguisticSort.Alphabets.ENGLISH, "[", false, false, + LinguisticSort.Alphabets.STRING); + + private static final Map BY_LOCALE = getByLocaleInfo(); + + /** + * Create the map that will be stuffed into BY_LOCALE. We have to fully create an object THEN + * stuff into a final field in a constructor (as unmodifiableMap does below) in order to get a + * proper guarantee from Java's memory model. See + * http://jeremymanson.blogspot.com/2008/07/immutability-in-java-part-2.html + */ + private static Map getByLocaleInfo() { + final Map byLocaleInfo = new HashMap(64); + for (LinguisticSort sort : values()) { + LinguisticSort duplicated = byLocaleInfo.put(sort.getLocale(), sort); + assert duplicated == null : "Two linguistic sorts with the same locale: " + sort.getLocale(); } - - /** - * Get sorting info for the given locale. - */ - public static LinguisticSort get(Locale locale) { - // For non-UTF8 dbs, we always interpret everything as English. (We did not set - // the page encoding to UTF-8, and thus we may have incorrectly encoded data.) - // On all other instances, look for the language of the user's locale. This should - // succeed because every language we support are listed in data. But just in case, - // default to english also. - if (IS_MULTI_LINGUAL /*|| TestContext.isRunningTests()*/) { - LinguisticSort sort = BY_LOCALE.get(locale); - if (sort != null) { - return sort; - } - if (locale.getVariant().length() > 0) { - if ("zh".equals(locale.getLanguage())) { - // TW and HK are handled above, this handles SG - if (!"".equals(locale.getLanguage())) { - // This means it's standard. - return get(new Locale(locale.getLanguage(), "", locale.getVariant())); - } - } - return get(new Locale(locale.getLanguage(), locale.getLanguage())); - } - if (locale.getCountry().length() > 0) { - sort = BY_LOCALE.get(new Locale(locale.getLanguage())); - if (sort != null) { - return sort; - } - } + return Collections.unmodifiableMap(byLocaleInfo); + } + + /** + * Get sorting info for the given locale. + */ + public static LinguisticSort get(Locale locale) { + // For non-UTF8 dbs, we always interpret everything as English. (We did not set + // the page encoding to UTF-8, and thus we may have incorrectly encoded data.) + // On all other instances, look for the language of the user's locale. This should + // succeed because every language we support are listed in data. But just in case, + // default to english also. + if (IS_MULTI_LINGUAL /* || TestContext.isRunningTests() */) { + LinguisticSort sort = BY_LOCALE.get(locale); + if (sort != null) { + return sort; + } + if (locale.getVariant().length() > 0) { + if ("zh".equals(locale.getLanguage())) { + // TW and HK are handled above, this handles SG + if (!"".equals(locale.getLanguage())) { + // This means it's standard. + return get(new Locale(locale.getLanguage(), "", locale.getVariant())); + } } - return ENGLISH; + return get(new Locale(locale.getLanguage(), locale.getLanguage())); + } + if (locale.getCountry().length() > 0) { + sort = BY_LOCALE.get(new Locale(locale.getLanguage())); + if (sort != null) { + return sort; + } + } } + return ENGLISH; + } + + /** + * The locale for this LinguisticSort instance. + */ + private final Locale locale; + + /** + * Collator for this LinguisticSort instance. This may be different than the default collator for + * its locale. This is to better match Oracle's nls sort ordering. + */ + private final Collator collator; + + /** + * Array of letters (Strings) to show in the rolodex. An empty array for alphabet means that the + * rolodex is not supported for the locale. + */ + private final String[] alphabet; + + /** + * An optional String that sorts higher than all letters in the alphabet. Used when the generating + * rolodex sql for the last letter. + */ + private final String highValue; + + /** + * True normal secondary sorting is reversed, ie, if lower case letters are sorted before upper + * case. + */ + private final boolean reverseSecondary; + + /** + * True if the locale has double width alphabet, number or symbols, So we use Oracle's + * to_single_byte to convert into the half width letter. + */ + private final boolean hasDoubleWidth; + + /** + * A MessageFormat pattern for generating an oracle sql expression returning the collation key for + * sorting a sql expression. Not used by postgres. + */ + private final String collationKeySql; + + /** + * For upper-casing Java values and generating SQL to generate the same. Not used by postgres. + */ + private final OracleUpperTable upper; + + /** + * Constructor only used when building static data, where ICU should be used to derive the value + * for the alphabet + */ + LinguisticSort(Locale locale, String highValue, boolean reverseSecondary, boolean hasDoubleWidth, + String collationKeySql) { + this(locale, getAlphabetFromICU(locale), highValue, reverseSecondary, hasDoubleWidth, + collationKeySql); + } + + /** + * Mapping for locales and ULocale language tags to use for constructing an ICU4J collator. javac + * complains if we attempt to refer to a static defined inside the same class as an enum, so we + * need to use an inner class to have such a constant mapping. + */ + private static final class Icu4jCollatorOverrides { + static final Map OVERRIDES = getIcu4jCollatorOverrides(); /** - * The locale for this LinguisticSort instance. - */ - private final Locale locale; - - /** - * Collator for this LinguisticSort instance. This may be different than the - * default collator for its locale. This is to better match Oracle's nls sort - * ordering. + * ICU4J collator overrides */ - private final Collator collator; - - /** - * Array of letters (Strings) to show in the rolodex. An empty array for - * alphabet means that the rolodex is not supported for the locale. - */ - private final String[] alphabet; - - /** - * An optional String that sorts higher than all letters in the alphabet. - * Used when the generating rolodex sql for the last letter. - */ - private final String highValue; - - /** - * True normal secondary sorting is reversed, ie, if lower case letters - * are sorted before upper case. - */ - private final boolean reverseSecondary; - - /** - * True if the locale has double width alphabet, number or symbols, - * So we use Oracle's to_single_byte to convert into the half width letter. - */ - private final boolean hasDoubleWidth; - - /** - * A MessageFormat pattern for generating an oracle sql expression returning the - * collation key for sorting a sql expression. Not used by postgres. - */ - private final String collationKeySql; - - /** - * For upper-casing Java values and generating SQL to generate the same. Not used by postgres. - */ - private final OracleUpperTable upper; - - /** - * Constructor only used when building static data, where ICU should be used to derive the - * value for the alphabet - */ - LinguisticSort(Locale locale, String highValue, boolean reverseSecondary, - boolean hasDoubleWidth, String collationKeySql) { - this(locale, getAlphabetFromICU(locale), highValue, reverseSecondary, - hasDoubleWidth, collationKeySql); + private static Map getIcu4jCollatorOverrides() { + // Map between a Locale and a BCP47 language tag to use when calling ICU4J's + // Collator.getInstance(ULocale.forLanguageTag()). + Map overrides = new HashMap(7); + + // Built-in JDK collators for Chinese are behind the Unicode standard, so we need to + // override them. See discussion at + // https://stackoverflow.com/questions/33672422 + // /wrong-sorting-with-collator-using-locale-simplified-chinese + // Also see the following JDK collator bugs: + // https://bugs.openjdk.java.net/browse/JDK-6415666 + // https://bugs.openjdk.java.net/browse/JDK-2143916 + // https://bugs.openjdk.java.net/browse/JDK-6411864 + + // CHINESE_HK: + overrides.put(new Locale("zh", "HK"), "zh-HK-u-co-unihan"); + // CHINESE_HK_STROKE: + overrides.put(new Locale("zh", "HK", "STROKE"), "zh-HK-u-co-stroke"); + // CHINESE_TW: + overrides.put(new Locale("zh", "TW"), "zh-TW-u-co-unihan"); + // CHINESE_TW_STROKE: + overrides.put(new Locale("zh", "TW", "STROKE"), "zh-TW-u-co-stroke"); + // CHINESE: + overrides.put(new Locale("zh"), "zh-CN-u-co-unihan"); + // CHINESE_STROKE: + overrides.put(new Locale("zh", "", "STROKE"), "zh-CN-u-co-stroke"); + // CHINESE_PINYIN: + overrides.put(new Locale("zh", "", "PINYIN"), "zh-CN-u-co-pinyin"); + + return Collections.unmodifiableMap(overrides); } - - /** - * Mapping for locales and ULocale language tags to use for constructing an ICU4J collator. - * javac complains if we attempt to refer to a static defined inside the same class as an enum, - * so we need to use an inner class to have such a constant mapping. - */ - private static final class Icu4jCollatorOverrides { - static final Map OVERRIDES = getIcu4jCollatorOverrides(); - - /** - * ICU4J collator overrides - */ - private static Map getIcu4jCollatorOverrides() { - // Map between a Locale and a BCP47 language tag to use when calling ICU4J's - // Collator.getInstance(ULocale.forLanguageTag()). - Map overrides = new HashMap(7); - - // Built-in JDK collators for Chinese are behind the Unicode standard, so we need to - // override them. See discussion at - // https://stackoverflow.com/questions/33672422 - // /wrong-sorting-with-collator-using-locale-simplified-chinese - // Also see the following JDK collator bugs: - // https://bugs.openjdk.java.net/browse/JDK-6415666 - // https://bugs.openjdk.java.net/browse/JDK-2143916 - // https://bugs.openjdk.java.net/browse/JDK-6411864 - - // CHINESE_HK: - overrides.put(new Locale("zh", "HK"), "zh-HK-u-co-unihan"); - // CHINESE_HK_STROKE: - overrides.put(new Locale("zh", "HK", "STROKE"), "zh-HK-u-co-stroke"); - // CHINESE_TW: - overrides.put(new Locale("zh", "TW"), "zh-TW-u-co-unihan"); - // CHINESE_TW_STROKE: - overrides.put(new Locale("zh", "TW", "STROKE"), "zh-TW-u-co-stroke"); - // CHINESE: - overrides.put(new Locale("zh"), "zh-CN-u-co-unihan"); - // CHINESE_STROKE: - overrides.put(new Locale("zh", "", "STROKE"), "zh-CN-u-co-stroke"); - // CHINESE_PINYIN: - overrides.put(new Locale("zh", "", "PINYIN"), "zh-CN-u-co-pinyin"); - - return Collections.unmodifiableMap(overrides); - } + } + + /** + * Constructor only used when building static data + */ + LinguisticSort(Locale locale, String[] alphabet, String highValue, boolean reverseSecondary, + boolean hasDoubleWidth, String collationKeySql) { + this.locale = locale; + this.alphabet = alphabet; + this.highValue = highValue; + assert this.highValue == null || this.highValue.length() == 1; + this.reverseSecondary = reverseSecondary; + this.hasDoubleWidth = hasDoubleWidth; + this.collationKeySql = collationKeySql; + // Construct collator for this locale + if (LinguisticSort.Icu4jCollatorOverrides.OVERRIDES.containsKey(this.locale)) { + // Force ICU4J collators for specific locales so they match Oracle sort + this.collator = CollatorICU.wrap(com.ibm.icu.text.Collator.getInstance( + ULocale.forLanguageTag(LinguisticSort.Icu4jCollatorOverrides.OVERRIDES.get(this.locale)))); + } else if (this.locale.getVariant().length() > 0) { + // If there's a variant, use ICU4J to figure it out. + this.collator = + CollatorICU.wrap(com.ibm.icu.text.Collator.getInstance(ULocale.forLocale(this.locale))); + } else { + this.collator = Collator.getInstance(this.locale); } - - /** - * Constructor only used when building static data - */ - LinguisticSort(Locale locale, String[] alphabet, String highValue, boolean reverseSecondary, - boolean hasDoubleWidth, String collationKeySql) { - this.locale = locale; - this.alphabet = alphabet; - this.highValue = highValue; - assert this.highValue == null || this.highValue.length() == 1; - this.reverseSecondary = reverseSecondary; - this.hasDoubleWidth = hasDoubleWidth; - this.collationKeySql = collationKeySql; - // Construct collator for this locale - if (LinguisticSort.Icu4jCollatorOverrides.OVERRIDES.containsKey(this.locale)) { - // Force ICU4J collators for specific locales so they match Oracle sort - this.collator = CollatorICU.wrap(com.ibm.icu.text.Collator.getInstance( - ULocale.forLanguageTag(LinguisticSort - .Icu4jCollatorOverrides.OVERRIDES.get(this.locale)))); - } else if (this.locale.getVariant().length() > 0) { - // If there's a variant, use ICU4J to figure it out. - this.collator = CollatorICU.wrap(com.ibm.icu.text.Collator.getInstance( - ULocale.forLocale(this.locale))); - } else { - this.collator = Collator.getInstance(this.locale); - } - this.collator.setStrength(Collator.SECONDARY); - this.upper = OracleUpperTable.forLinguisticSort(name()); + this.collator.setStrength(Collator.SECONDARY); + this.upper = OracleUpperTable.forLinguisticSort(name()); + } + + /** Returns a new collator for this LinguisticSort instance. */ + public Collator getCollator() { + // Since RuleBasedCollator.compare() is synchronized, it is not nice to return + // this.collator here, because that would mean requests for the same language + // will be waiting for each other. Instead, return a clone. And, cloning + // RuleBasedCollator instances is much more efficient than creating one from + // the rules. + return (Collator) this.collator.clone(); + } + + /** + * @return a new collator for this LinguisticSort instance that is guaranteed to be + * case-insensitive. Danish collation, unfortunately, is a little odd, in that "v" and "w" + * are considered to be the same character. To make up for this, they made "v" and "V" a + * secondary difference, which makes Enum comparisons in FilterItem a little wonky. + * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4174436 + */ + public Collator getGuaranteedCaseInsensitiveCollator() { + Collator collator = getCollator(); + if ("da".equals(this.locale.getLanguage())) { + collator.setStrength(Collator.PRIMARY); } + return collator; + } + + Locale getLocale() { + return this.locale; + } + + /** Returns a new comparator for strings for this LinguisticSort instance. */ + @SuppressWarnings("unchecked") + // Converting from Comparator to Comparator + public Comparator getNonCachingComparator() { + return (Comparator) this.collator.clone(); + } + + /** + * @return a new comparator for strings for this LinguisticSort instance. + * @param size the number of elements to compare (default is 16). + */ + public Comparator getComparator(int size) { + return new LinguisticSort.CollatingComparator(getCollator(), size); + } + + /** + * A String comparator that uses the current collation + */ + static class CollatingComparator implements Comparator { + private final Collator collator; + private final Map cKeyMap; - /** - * @return a new collator for this LinguisticSort instance. - */ - public Collator getCollator() { - // Since RuleBasedCollator.compare() is synchronized, it is not nice to return - // this.collator here, because that would mean requests for the same language - // will be waiting for each other. Instead, return a clone. And, cloning - // RuleBasedCollator instances is much more efficient than creating one from - // the rules. - return (Collator) this.collator.clone(); + CollatingComparator(Collator collator) { + this(collator, 16); } - /** - * @return a new collator for this LinguisticSort instance that is guaranteed to be - * case-insensitive. Danish collation, unfortunately, is a little odd, in that "v" - * and "w" are considered to be the same character. To make up for this, they made - * "v" and "V" a secondary difference, which makes Enum comparisons in FilterItem - * a little wonky. http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4174436 - */ - public Collator getGuaranteedCaseInsensitiveCollator() { - Collator collator = getCollator(); - if ("da".equals(this.locale.getLanguage())) { - collator.setStrength(Collator.PRIMARY); - } - return collator; + CollatingComparator(Collator collator, int defaultSize) { + this.collator = collator; + cKeyMap = new HashMap<>(defaultSize); } - Locale getLocale() { - return this.locale; + @SuppressWarnings(value = "ES_COMPARING_PARAMETER_STRING_WITH_EQ", + justification = "Reference comparison used for performance improvement.") + public int compare(String o1, String o2) { + if (o1 == o2) { + return 0; + } else if (o2 == null) { + return 1; + } else if (o1 == null) { + return -1; + } + + return getCollationKey(o1).compareTo(getCollationKey(o2)); } - /** - * @return a new comparator for strings for this LinguisticSort instance. - */ - @SuppressWarnings("unchecked") - // Converting from Comparator to Comparator - public Comparator getNonCachingComparator() { - return (Comparator) this.collator.clone(); + private CollationKey getCollationKey(String comp) { + CollationKey key = cKeyMap.get(comp); + if (key == null) { + key = collator.getCollationKey(comp); + cKeyMap.put(comp, key); + } + return key; } - - /** - * @return a new comparator for strings for this LinguisticSort instance. - * @param size the number of elements to compare (default is 16). - */ - public Comparator getComparator(int size) { - return new LinguisticSort.CollatingComparator(getCollator(), size); + } + + /** + * Returns the number of letters to show in the rolodex. + */ + public int getAlphabetLength() { + return this.alphabet.length; + } + + /** + * Returns the n-th of letter in the rolodex. Note, a 'letter' in a language be composed of more + * than one unicode characters, for example, letter 'ch' in Czech. + */ + public String getAlphabet(int index) { + return this.alphabet[index]; + } + + // Used only for test code + String[] getAlphabet() { + return this.alphabet; + } + + /** + * Return the rolodexIndex for a string. + * @param searchTerm Must be a 1-char string + * @return the rolodexIndex, including Other (i.e. getAlphabetLength) if it doesn't fall into a + * bucket. If this language doesn't have a rolodex (e.g. Arabic, Latvian, etc.) return -1 + * @throws IllegalArgumentException if the string is null or not of length 1 + */ + public int getRolodexIndexForChar(String searchTerm) { + if (searchTerm == null || searchTerm.length() != 1) { + throw new IllegalArgumentException("Must be a one-length string"); } - /** - * A String comparator that uses the current collation - */ - static class CollatingComparator implements Comparator { - private final Collator collator; - private final Map cKeyMap; - - CollatingComparator(Collator collator) { - this(collator, 16); - } - - CollatingComparator(Collator collator, int defaultSize) { - this.collator = collator; - cKeyMap = new HashMap<>(defaultSize); - } - - @SuppressWarnings( - value = "ES_COMPARING_PARAMETER_STRING_WITH_EQ", - justification = "Reference comparison used for performance improvement.") - public int compare(String o1, String o2) { - if (o1 == o2) { - return 0; - } else if (o2 == null) { - return 1; - } else if (o1 == null) { - return -1; - } - - return getCollationKey(o1).compareTo(getCollationKey(o2)); - } - - private CollationKey getCollationKey(String comp) { - CollationKey key = cKeyMap.get(comp); - if (key == null) { - key = collator.getCollationKey(comp); - cKeyMap.put(comp, key); - } - return key; - } + if (this.getAlphabetLength() == 0) { + return -1; } - /** - * Returns the number of letters to show in the rolodex. - */ - public int getAlphabetLength() { - return this.alphabet.length; + for (int i = 0; i < this.getAlphabetLength(); i++) { + int comparison = this.collator.compare(searchTerm, this.getAlphabet(i)); + + if (comparison < 0) { + // If it's less than 'a', return Other + // Otherwise, it's less than the current index, but it wasn't 0 on the + // previous comparison, so return the previous rolodex letter. + return i == 0 ? this.getAlphabetLength() : (i - 1); + } else if (comparison == 0) { + return i; + } } - - /** - * Returns the n-th of letter in the rolodex. Note, a 'letter' - * in a language be composed of more than one unicode characters, - * for example, letter 'ch' in Czech. - */ - public String getAlphabet(int index) { - return this.alphabet[index]; + return this.getAlphabetLength(); + } + + /** + * Returns the sql expression to convert the given sql expression to upper case. + */ + public String getUpperCaseSql(String expr, boolean isPostgres) { + if (isPostgres) { + return "icu_upper(" + expr + ",'" + this.locale.toString() + "')"; + } else { + return upper.getSql(expr); } - - // Used only for test code - String[] getAlphabet() { - return this.alphabet; + } + + /** + * @return true if sql UPPER() is used in getUpperCaseSql(). Note that this is always false for + * postgres because postgres always use the icu_upper() function for all languages. + */ + public boolean usesUpperToGetUpperCase(boolean isPostgres) { + return !isPostgres && "upper(x)".equals(upper.getSql("x")); + } + + /** + * Returns the upper case value of the given value, or what would be the result of applying the + * sql expression in getUpperCaseSql() to the given value. + */ + public String getUpperCaseValue(String value, boolean isPostgres) { + String singleWidth = value; + if (this.hasDoubleWidth) { + singleWidth = toSingleWidth(value); } - - /** - * Return the rolodexIndex for a string. - * - * @param searchTerm Must be a 1-char string - * @return the rolodexIndex, including Other (i.e. getAlphabetLength) if it doesn't - * fall into a bucket. If this language doesn't have a rolodex (e.g. Arabic, - * Latvian, etc.) return -1 - * @throws IllegalArgumentException if the string is null or not of length 1 - */ - public int getRolodexIndexForChar(String searchTerm) { - if (searchTerm == null || searchTerm.length() != 1) { - throw new IllegalArgumentException("Must be a one-length string"); - } - - if (this.getAlphabetLength() == 0) { - return -1; - } - - for (int i = 0; i < this.getAlphabetLength(); i++) { - int comparison = this.collator.compare(searchTerm, this.getAlphabet(i)); - - if (comparison < 0) { - //If it's less than 'a', return Other - //Otherwise, it's less than the current index, but it wasn't 0 on the - // previous comparison, so return the previous rolodex letter. - return i == 0 ? this.getAlphabetLength() : (i - 1); - } else if (comparison == 0) { - return i; - } - } - return this.getAlphabetLength(); + if (isPostgres) { + return singleWidth.toUpperCase(this.locale); + } else { + return upper.toUpperCase(singleWidth); } - - /** - * Returns the sql expression to convert the given sql expression to upper case. - */ - public String getUpperCaseSql(String expr, boolean isPostgres) { - if (isPostgres) { - return "icu_upper(" + expr + ",'" + this.locale.toString() + "')"; - } else { - return upper.getSql(expr); - } + } + + private static final char[][] DOUBLE_TO_SINGLE = new char[256][]; + static { + DOUBLE_TO_SINGLE[0x20] = new char[256]; + DOUBLE_TO_SINGLE[0x20][0x18] = '`'; + DOUBLE_TO_SINGLE[0x20][0x19] = '\''; + DOUBLE_TO_SINGLE[0x20][0x1D] = '"'; + + DOUBLE_TO_SINGLE[0x22] = new char[256]; + DOUBLE_TO_SINGLE[0x22][0x3C] = '~'; + + DOUBLE_TO_SINGLE[0x30] = new char[256]; + DOUBLE_TO_SINGLE[0x30][0x00] = ' '; + + DOUBLE_TO_SINGLE[0xFE] = new char[256]; + DOUBLE_TO_SINGLE[0xFE][0x3F] = '^'; + + DOUBLE_TO_SINGLE[0xFF] = new char[256]; + DOUBLE_TO_SINGLE[0xFF][0x01] = '!'; + DOUBLE_TO_SINGLE[0xFF][0x03] = '#'; + DOUBLE_TO_SINGLE[0xFF][0x04] = '$'; + DOUBLE_TO_SINGLE[0xFF][0x05] = '%'; + DOUBLE_TO_SINGLE[0xFF][0x06] = '&'; + DOUBLE_TO_SINGLE[0xFF][0x08] = '('; + DOUBLE_TO_SINGLE[0xFF][0x09] = ')'; + DOUBLE_TO_SINGLE[0xFF][0x0A] = '*'; + DOUBLE_TO_SINGLE[0xFF][0x0B] = '+'; + DOUBLE_TO_SINGLE[0xFF][0x0C] = ','; + DOUBLE_TO_SINGLE[0xFF][0x0D] = '-'; + DOUBLE_TO_SINGLE[0xFF][0x0E] = '.'; + DOUBLE_TO_SINGLE[0xFF][0x0F] = '/'; + DOUBLE_TO_SINGLE[0xFF][0x10] = '0'; + DOUBLE_TO_SINGLE[0xFF][0x11] = '1'; + DOUBLE_TO_SINGLE[0xFF][0x12] = '2'; + DOUBLE_TO_SINGLE[0xFF][0x13] = '3'; + DOUBLE_TO_SINGLE[0xFF][0x14] = '4'; + DOUBLE_TO_SINGLE[0xFF][0x15] = '5'; + DOUBLE_TO_SINGLE[0xFF][0x16] = '6'; + DOUBLE_TO_SINGLE[0xFF][0x17] = '7'; + DOUBLE_TO_SINGLE[0xFF][0x18] = '8'; + DOUBLE_TO_SINGLE[0xFF][0x19] = '9'; + DOUBLE_TO_SINGLE[0xFF][0x1A] = ':'; + DOUBLE_TO_SINGLE[0xFF][0x1B] = ';'; + DOUBLE_TO_SINGLE[0xFF][0x1C] = '<'; + DOUBLE_TO_SINGLE[0xFF][0x1D] = '='; + DOUBLE_TO_SINGLE[0xFF][0x1E] = '>'; + DOUBLE_TO_SINGLE[0xFF][0x1F] = '?'; + DOUBLE_TO_SINGLE[0xFF][0x20] = '@'; + DOUBLE_TO_SINGLE[0xFF][0x21] = 'A'; + DOUBLE_TO_SINGLE[0xFF][0x22] = 'B'; + DOUBLE_TO_SINGLE[0xFF][0x23] = 'C'; + DOUBLE_TO_SINGLE[0xFF][0x24] = 'D'; + DOUBLE_TO_SINGLE[0xFF][0x25] = 'E'; + DOUBLE_TO_SINGLE[0xFF][0x26] = 'F'; + DOUBLE_TO_SINGLE[0xFF][0x27] = 'G'; + DOUBLE_TO_SINGLE[0xFF][0x28] = 'H'; + DOUBLE_TO_SINGLE[0xFF][0x29] = 'I'; + DOUBLE_TO_SINGLE[0xFF][0x2A] = 'J'; + DOUBLE_TO_SINGLE[0xFF][0x2B] = 'K'; + DOUBLE_TO_SINGLE[0xFF][0x2C] = 'L'; + DOUBLE_TO_SINGLE[0xFF][0x2D] = 'M'; + DOUBLE_TO_SINGLE[0xFF][0x2E] = 'N'; + DOUBLE_TO_SINGLE[0xFF][0x2F] = 'O'; + DOUBLE_TO_SINGLE[0xFF][0x30] = 'P'; + DOUBLE_TO_SINGLE[0xFF][0x31] = 'Q'; + DOUBLE_TO_SINGLE[0xFF][0x32] = 'R'; + DOUBLE_TO_SINGLE[0xFF][0x33] = 'S'; + DOUBLE_TO_SINGLE[0xFF][0x34] = 'T'; + DOUBLE_TO_SINGLE[0xFF][0x35] = 'U'; + DOUBLE_TO_SINGLE[0xFF][0x36] = 'V'; + DOUBLE_TO_SINGLE[0xFF][0x37] = 'W'; + DOUBLE_TO_SINGLE[0xFF][0x38] = 'X'; + DOUBLE_TO_SINGLE[0xFF][0x39] = 'Y'; + DOUBLE_TO_SINGLE[0xFF][0x3A] = 'Z'; + DOUBLE_TO_SINGLE[0xFF][0x3B] = '['; + DOUBLE_TO_SINGLE[0xFF][0x3C] = '\\'; + DOUBLE_TO_SINGLE[0xFF][0x3D] = ']'; + DOUBLE_TO_SINGLE[0xFF][0x3F] = '_'; + DOUBLE_TO_SINGLE[0xFF][0x41] = 'a'; + DOUBLE_TO_SINGLE[0xFF][0x42] = 'b'; + DOUBLE_TO_SINGLE[0xFF][0x43] = 'c'; + DOUBLE_TO_SINGLE[0xFF][0x44] = 'd'; + DOUBLE_TO_SINGLE[0xFF][0x45] = 'e'; + DOUBLE_TO_SINGLE[0xFF][0x46] = 'f'; + DOUBLE_TO_SINGLE[0xFF][0x47] = 'g'; + DOUBLE_TO_SINGLE[0xFF][0x48] = 'h'; + DOUBLE_TO_SINGLE[0xFF][0x49] = 'i'; + DOUBLE_TO_SINGLE[0xFF][0x4A] = 'j'; + DOUBLE_TO_SINGLE[0xFF][0x4B] = 'k'; + DOUBLE_TO_SINGLE[0xFF][0x4C] = 'l'; + DOUBLE_TO_SINGLE[0xFF][0x4D] = 'm'; + DOUBLE_TO_SINGLE[0xFF][0x4E] = 'n'; + DOUBLE_TO_SINGLE[0xFF][0x4F] = 'o'; + DOUBLE_TO_SINGLE[0xFF][0x50] = 'p'; + DOUBLE_TO_SINGLE[0xFF][0x51] = 'q'; + DOUBLE_TO_SINGLE[0xFF][0x52] = 'r'; + DOUBLE_TO_SINGLE[0xFF][0x53] = 's'; + DOUBLE_TO_SINGLE[0xFF][0x54] = 't'; + DOUBLE_TO_SINGLE[0xFF][0x55] = 'u'; + DOUBLE_TO_SINGLE[0xFF][0x56] = 'v'; + DOUBLE_TO_SINGLE[0xFF][0x57] = 'w'; + DOUBLE_TO_SINGLE[0xFF][0x58] = 'x'; + DOUBLE_TO_SINGLE[0xFF][0x59] = 'y'; + DOUBLE_TO_SINGLE[0xFF][0x5A] = 'z'; + DOUBLE_TO_SINGLE[0xFF][0x5B] = '{'; + DOUBLE_TO_SINGLE[0xFF][0x5C] = '|'; + DOUBLE_TO_SINGLE[0xFF][0x5D] = '}'; + } + + public static char toSingleWidth(char c) { + // Mask off high 2 bytes and index into char[][] + char[] cBucket = DOUBLE_TO_SINGLE[c >> 8]; + // If no bucket, then no translation so just use original char + if (cBucket == null) { + return c; } - - /** - * @return true if sql UPPER() is used in getUpperCaseSql(). Note that this is always false - * for postgres because postgres always use the icu_upper() function for all languages. - */ - public boolean usesUpperToGetUpperCase(boolean isPostgres) { - return !isPostgres && "upper(x)".equals(upper.getSql("x")); + // Mask off low 2 bytes and index into char[] + char cSingle = cBucket[c & 0x00ff]; + // If char at that index is zero, then no translation so just use original char + if (cSingle == 0) { + return c; } - - /** - * Returns the upper case value of the given value, or what would be the result - * of applying the sql expression in getUpperCaseSql() to the given value. - */ - public String getUpperCaseValue(String value, boolean isPostgres) { - String singleWidth = value; - if (this.hasDoubleWidth) { - singleWidth = toSingleWidth(value); - } - if (isPostgres) { - return singleWidth.toUpperCase(this.locale); - } else { - return upper.toUpperCase(singleWidth); - } + return cSingle; + } + + /** + * Convert double width ascii characters to single width. This is the equivalent of Oracle's + * to_single_byte(). + */ + public static String toSingleWidth(String value) { + int n = value.length(); + DeferredStringBuilder buf = new DeferredStringBuilder(value); + + for (int i = 0; i < n; i++) { + char c = value.charAt(i); + buf.append(toSingleWidth(c)); } - - private static final char[][] DOUBLE_TO_SINGLE = new char[256][]; - static { - DOUBLE_TO_SINGLE[0x20] = new char[256]; - DOUBLE_TO_SINGLE[0x20][0x18] = '`'; - DOUBLE_TO_SINGLE[0x20][0x19] = '\''; - DOUBLE_TO_SINGLE[0x20][0x1D] = '"'; - - DOUBLE_TO_SINGLE[0x22] = new char[256]; - DOUBLE_TO_SINGLE[0x22][0x3C] = '~'; - - DOUBLE_TO_SINGLE[0x30] = new char[256]; - DOUBLE_TO_SINGLE[0x30][0x00] = ' '; - - DOUBLE_TO_SINGLE[0xFE] = new char[256]; - DOUBLE_TO_SINGLE[0xFE][0x3F] = '^'; - - DOUBLE_TO_SINGLE[0xFF] = new char[256]; - DOUBLE_TO_SINGLE[0xFF][0x01] = '!'; - DOUBLE_TO_SINGLE[0xFF][0x03] = '#'; - DOUBLE_TO_SINGLE[0xFF][0x04] = '$'; - DOUBLE_TO_SINGLE[0xFF][0x05] = '%'; - DOUBLE_TO_SINGLE[0xFF][0x06] = '&'; - DOUBLE_TO_SINGLE[0xFF][0x08] = '('; - DOUBLE_TO_SINGLE[0xFF][0x09] = ')'; - DOUBLE_TO_SINGLE[0xFF][0x0A] = '*'; - DOUBLE_TO_SINGLE[0xFF][0x0B] = '+'; - DOUBLE_TO_SINGLE[0xFF][0x0C] = ','; - DOUBLE_TO_SINGLE[0xFF][0x0D] = '-'; - DOUBLE_TO_SINGLE[0xFF][0x0E] = '.'; - DOUBLE_TO_SINGLE[0xFF][0x0F] = '/'; - DOUBLE_TO_SINGLE[0xFF][0x10] = '0'; - DOUBLE_TO_SINGLE[0xFF][0x11] = '1'; - DOUBLE_TO_SINGLE[0xFF][0x12] = '2'; - DOUBLE_TO_SINGLE[0xFF][0x13] = '3'; - DOUBLE_TO_SINGLE[0xFF][0x14] = '4'; - DOUBLE_TO_SINGLE[0xFF][0x15] = '5'; - DOUBLE_TO_SINGLE[0xFF][0x16] = '6'; - DOUBLE_TO_SINGLE[0xFF][0x17] = '7'; - DOUBLE_TO_SINGLE[0xFF][0x18] = '8'; - DOUBLE_TO_SINGLE[0xFF][0x19] = '9'; - DOUBLE_TO_SINGLE[0xFF][0x1A] = ':'; - DOUBLE_TO_SINGLE[0xFF][0x1B] = ';'; - DOUBLE_TO_SINGLE[0xFF][0x1C] = '<'; - DOUBLE_TO_SINGLE[0xFF][0x1D] = '='; - DOUBLE_TO_SINGLE[0xFF][0x1E] = '>'; - DOUBLE_TO_SINGLE[0xFF][0x1F] = '?'; - DOUBLE_TO_SINGLE[0xFF][0x20] = '@'; - DOUBLE_TO_SINGLE[0xFF][0x21] = 'A'; - DOUBLE_TO_SINGLE[0xFF][0x22] = 'B'; - DOUBLE_TO_SINGLE[0xFF][0x23] = 'C'; - DOUBLE_TO_SINGLE[0xFF][0x24] = 'D'; - DOUBLE_TO_SINGLE[0xFF][0x25] = 'E'; - DOUBLE_TO_SINGLE[0xFF][0x26] = 'F'; - DOUBLE_TO_SINGLE[0xFF][0x27] = 'G'; - DOUBLE_TO_SINGLE[0xFF][0x28] = 'H'; - DOUBLE_TO_SINGLE[0xFF][0x29] = 'I'; - DOUBLE_TO_SINGLE[0xFF][0x2A] = 'J'; - DOUBLE_TO_SINGLE[0xFF][0x2B] = 'K'; - DOUBLE_TO_SINGLE[0xFF][0x2C] = 'L'; - DOUBLE_TO_SINGLE[0xFF][0x2D] = 'M'; - DOUBLE_TO_SINGLE[0xFF][0x2E] = 'N'; - DOUBLE_TO_SINGLE[0xFF][0x2F] = 'O'; - DOUBLE_TO_SINGLE[0xFF][0x30] = 'P'; - DOUBLE_TO_SINGLE[0xFF][0x31] = 'Q'; - DOUBLE_TO_SINGLE[0xFF][0x32] = 'R'; - DOUBLE_TO_SINGLE[0xFF][0x33] = 'S'; - DOUBLE_TO_SINGLE[0xFF][0x34] = 'T'; - DOUBLE_TO_SINGLE[0xFF][0x35] = 'U'; - DOUBLE_TO_SINGLE[0xFF][0x36] = 'V'; - DOUBLE_TO_SINGLE[0xFF][0x37] = 'W'; - DOUBLE_TO_SINGLE[0xFF][0x38] = 'X'; - DOUBLE_TO_SINGLE[0xFF][0x39] = 'Y'; - DOUBLE_TO_SINGLE[0xFF][0x3A] = 'Z'; - DOUBLE_TO_SINGLE[0xFF][0x3B] = '['; - DOUBLE_TO_SINGLE[0xFF][0x3C] = '\\'; - DOUBLE_TO_SINGLE[0xFF][0x3D] = ']'; - DOUBLE_TO_SINGLE[0xFF][0x3F] = '_'; - DOUBLE_TO_SINGLE[0xFF][0x41] = 'a'; - DOUBLE_TO_SINGLE[0xFF][0x42] = 'b'; - DOUBLE_TO_SINGLE[0xFF][0x43] = 'c'; - DOUBLE_TO_SINGLE[0xFF][0x44] = 'd'; - DOUBLE_TO_SINGLE[0xFF][0x45] = 'e'; - DOUBLE_TO_SINGLE[0xFF][0x46] = 'f'; - DOUBLE_TO_SINGLE[0xFF][0x47] = 'g'; - DOUBLE_TO_SINGLE[0xFF][0x48] = 'h'; - DOUBLE_TO_SINGLE[0xFF][0x49] = 'i'; - DOUBLE_TO_SINGLE[0xFF][0x4A] = 'j'; - DOUBLE_TO_SINGLE[0xFF][0x4B] = 'k'; - DOUBLE_TO_SINGLE[0xFF][0x4C] = 'l'; - DOUBLE_TO_SINGLE[0xFF][0x4D] = 'm'; - DOUBLE_TO_SINGLE[0xFF][0x4E] = 'n'; - DOUBLE_TO_SINGLE[0xFF][0x4F] = 'o'; - DOUBLE_TO_SINGLE[0xFF][0x50] = 'p'; - DOUBLE_TO_SINGLE[0xFF][0x51] = 'q'; - DOUBLE_TO_SINGLE[0xFF][0x52] = 'r'; - DOUBLE_TO_SINGLE[0xFF][0x53] = 's'; - DOUBLE_TO_SINGLE[0xFF][0x54] = 't'; - DOUBLE_TO_SINGLE[0xFF][0x55] = 'u'; - DOUBLE_TO_SINGLE[0xFF][0x56] = 'v'; - DOUBLE_TO_SINGLE[0xFF][0x57] = 'w'; - DOUBLE_TO_SINGLE[0xFF][0x58] = 'x'; - DOUBLE_TO_SINGLE[0xFF][0x59] = 'y'; - DOUBLE_TO_SINGLE[0xFF][0x5A] = 'z'; - DOUBLE_TO_SINGLE[0xFF][0x5B] = '{'; - DOUBLE_TO_SINGLE[0xFF][0x5C] = '|'; - DOUBLE_TO_SINGLE[0xFF][0x5D] = '}'; + return buf.toString(); + } + + /** + * Returns the sql expression to compute the linguistic sort collation key for the given sql + * expression. This supports sorting in the database, where sort order of different upper and + * lower cases are handled linguistically. + */ + public String getCollationKeySql(String expr, boolean isPostgres) { + if (isPostgres) { + return "icu_sortkey(" + expr + ",'" + this.locale.toString() + "')::text"; + } else { + return MessageFormat.format(this.collationKeySql, new Object[] { expr }); } - - public static char toSingleWidth(char c) { - // Mask off high 2 bytes and index into char[][] - char[] cBucket = DOUBLE_TO_SINGLE[c >> 8]; - // If no bucket, then no translation so just use original char - if (cBucket == null) { - return c; - } - // Mask off low 2 bytes and index into char[] - char cSingle = cBucket[c & 0x00ff]; - // If char at that index is zero, then no translation so just use original char - if (cSingle == 0) { - return c; - } - return cSingle; + } + + /** + * Returns the sql expression to compute the linguistic sort collation key for the upper case of + * given sql expression. This supports case-insensitive filtering in the database. + */ + public String getUpperCollationKeySql(String expr, boolean isPostgres) { + if ( + !isPostgres && String.format(upper.getSqlFormatString(), "{0}").equals(this.collationKeySql) + ) { + return getCollationKeySql(expr, false); } + return getCollationKeySql(getUpperCaseSql(expr, isPostgres), isPostgres); + } + + private String formatLetter(String letter, boolean isPostgres) { + return getCollationKeySql('\'' + letter + '\'', isPostgres); + } + + // + // Private Data + // + + // TODO: Make this an environment variable. + private static final boolean IS_MULTI_LINGUAL = + true; /* + * (SfdcEnvProvider.getEnv() == null || + * SfdcEnvProvider.getEnv().getIniFile().getString("Pages", "encoding").length() > 0); + */ + + static String[] getAlphabetFromICU(Locale locale) { + AlphabeticIndex index = new AlphabeticIndex(locale); + List alphabet = index.getBucketLabels(); + if (alphabet.size() > 6) { + // Strip off first and last (which are ...) + List alphabetWithoutEllipses = alphabet.subList(1, alphabet.size() - 1); + return alphabetWithoutEllipses.toArray(new String[alphabetWithoutEllipses.size()]); + } else { + return new String[0]; + } + } + + /** + * You can't refer to a static defined inside the same class as an enum, so you need an inner + * class to have such constants These are the alphabets that cannot be auto-derived from ICU's + * CLDR information + */ + static final class Alphabets { + static final String[] ENGLISH = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", + "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }; + static final String[] CATALAN = { "A", "B", "C", "\u00C7", "D", "E", "F", "G", "H", "I", "J", + "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }; + static final String[] BASQUE = { "A", "B", "C", "\u00C7", "D", "E", "F", "G", "H", "I", "J", + "K", "L", "M", "N", "\u00D1", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }; + static final String[] JAPANESE = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", + "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "\u30A2", "\u30AB", + "\u30B5", "\u30BF", "\u30CA", "\u30CF", "\u30DE", "\u30E4", "\u30E9", "\u30EF" }; + + // A, B, C, Cs, D, E, F, G, Gy, H, I, J, K, L, Ly, M, N, Ny, O, Ö, P, Q, R, S, Sz, T, + // Ty, U, Ü, V, W, X, Y, Z, Zs + static final String[] HUNGARIAN = { "A", "B", "C", "Cs", "D", "E", "F", "G", "Gy", "H", "I", + "J", "K", "L", "Ly", "M", "N", "Ny", "O", "\u00d6", "P", "Q", "R", "S", "Sz", "T", "Ty", "U", + "\u00dc", "V", "W", "X", "Y", "Z", "Zs" }; + + static final String[] TURKISH = + { "A", "B", "C", "\u00C7", "D", "E", "F", "G", "\u011E", "H", "I", "\u0130", "J", "K", "L", + "M", "N", "O", "\u00D6", "P", "R", "S", "\u015E", "T", "U", "\u00DC", "V", "Y", "Z" }; + + // A, B, C, Ç, D, E, Ə, F, G, Ğ, H, X, I, İ, J, K, Q, L, M, N, O, Ö, P, R, S, Ş, T, + // U, Ü, V, Y, Z + static final String[] AZERBAIJANI = { "A", "B", "C", "\u00C7", "D", "E", "\u018F", "F", "G", + "\u011E", "H", "X", "I", "\u0130", "J", "K", "Q", "L", "M", "N", "O", "\u00D6", "P", "R", "S", + "\u015E", "T", "U", "\u00DC", "V", "Y", "Z" }; + + // Russian without Ё, Ы, Э + static final String[] BULGARIAN = { "\u0410", "\u0411", "\u0412", "\u0413", "\u0414", "\u0415", + "\u0416", "\u0417", "\u0418", "\u0419", "\u041a", "\u041b", "\u041c", "\u041d", "\u041e", + "\u041f", "\u0420", "\u0421", "\u0422", "\u0423", "\u0424", "\u0425", "\u0426", "\u0427", + "\u0428", "\u0429", "\u042a", "\u042c", "\u042e", "\u042f" }; + + // A B C Č Ć D Đ Dž E F G H I J K L Lj M N Nj O P R S Š T U V Z Ž + static final String[] SERBIAN_LATIN = { "A", "B", "C", "\u010c", "\u0106", "D", "\u0110", + "D\u017e", "E", "F", "G", "H", "I", "J", "K", "L", "Lj", "M", "N", "Nj", "O", "P", "R", "S", + "\u0160", "T", "U", "V", "Z", "\u017d" }; + + // A Á Ä B C Č D Ď DZ DŽ E É F G H CH I Í J K L Ĺ Ľ M N Ň O Ó Ô P Q R Ŕ S Š T Ť U Ú V W + // X Y Ý Z Ž + static final String[] SLOVAK = { "A", "\u00c1", "\u00c4", "B", "C", "\u010c", "D", "\u010e", + "DZ", "D\u017d", "E", "\u00c9", "F", "G", "H", "CH", "I", "\u00cd", "J", "K", "L", "\u0139", + "\u013d", "M", "N", "\u0147", "O", "\u00d3", "\u00d4", "P", "Q", "R", "\u0154", "S", "\u0160", + "T", "\u0164", "U", "\u00da", "V", "W", "X", "Y", "\u00dd", "Z", "\u017d" }; + + // ა ბ გ დ ე ვ ზ თ ი კ ლ მ ნ ო პ ჟ რ ს ტ უ ფ ქ ღ .ყ შ ჩ ც ძ წ ჭ ხ ჯ ჰ + static final String[] GEORGIAN = { "\u10d0", "\u10d1", "\u10d2", "\u10d3", "\u10d4", "\u10d5", + "\u10d6", "\u10d7", "\u10d8", "\u10d9", "\u10da", "\u10db", "\u10dc", "\u10dd", "\u10de", + "\u10df", "\u10e0", "\u10e1", "\u10e2", "\u10e3", "\u10e4", "\u10e5", "\u10e6", "\u10e7", + "\u10e8", "\u10e9", "\u10ea", "\u10eb", "\u10ec", "\u10ed", "\u10ee", "\u10ef", "\u10f0" }; + + // A B C D E F G H I J K L M N O P Q R S Š Z Ž T U V W Õ Ä Ö Ü X Y + static final String[] ESTONIAN = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", + "M", "N", "O", "P", "Q", "R", "S", "\u0160", "Z", "\u017d", "T", "U", "V", "W", "\u00d5", + "\u00c4", "\u00d6", "\u00dc", "X", "Y" }; + + // A Á B D Ð E É F G H I Í J K L M N O Ó P R S T U Ú V X Y Ý Þ Æ Ö + static final String[] ICELANDIC = { "A", "\u00c1", "B", "D", "\u00d0", "E", "\u00c9", "F", "G", + "H", "I", "\u00cd", "J", "K", "L", "M", "N", "O", "\u00d3", "P", "R", "S", "T", "U", "\u00da", + "V", "X", "Y", "\u00dd", "\u00de", "\u00c6", "\u00d6" }; + + // A Ā B C Č D E Ē F G Ģ H I Ī J K Ķ L Ļ M N Ņ O P R S Š T U Ū V Z Ž + static final String[] LATVIAN = { "A", "\u0100", "B", "C", "\u010c", "D", "E", "\u0112", "F", + "G", "\u0122", "H", "I", "\u012a", "J", "K", "\u0136", "L", "\u013b", "M", "N", "\u0145", "O", + "P", "R", "S", "\u0160", "T", "U", "\u016a", "V", "Z", "\u017d" }; + + // A \u0104 B C \u010c D E \u0118 \u0116 F G H I \u012e Y J K L M N O P R S \u0160 T U + // \u0172 \u016a V Z \u017d + static final String[] LUXEMBOURGISH = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", + "L", "M", "N", "O", "P", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "Ä", "Ë", "É" }; + + // Russian with Ң, Ө, Ү + static final String[] KYRGYZ = + { "\u0410", "\u0411", "\u0412", "\u0413", "\u0414", "\u0415", "\u0401", "\u0416", "\u0417", + "\u0418", "\u0419", "\u041a", "\u041b", "\u041c", "\u041d", "\u04a2", "\u041e", "\u04e8", + "\u041f", "\u0420", "\u0421", "\u0422", "\u0423", "\u04ae", "\u0424", "\u0425", "\u0426", + "\u0427", "\u0428", "\u0429", "\u042a", "\u042b", "\u042c", "\u042d", "\u042e", "\u042f" }; + + // Kyrgyz with Ә, Ғ, Ұ, Һ, І (ICU4J doesn't have some of these characters for sorting...) + static final String[] KAZAKH = { "\u0410", "\u04d8", "\u0411", "\u0412", "\u0413", "\u0492", + "\u0414", "\u0415", "\u0401", "\u0416", "\u0417", "\u0418", "\u0419", "\u041a", "\u049a", + "\u041b", "\u041c", "\u041d", "\u04a2", "\u041e", "\u04e8", "\u041f", "\u0420", "\u0421", + "\u0422", "\u0423", "\u04b0", "\u04ae", "\u0424", "\u0425", "\u04ba", "\u0426", "\u0427", + "\u0428", "\u0429", "\u042a", "\u042b", "\u0406", "\u042c", "\u042d", "\u042e", "\u042f" }; + + // Cyrillic Variant + static final String[] TAJIK = + { "\u0410", "\u0411", "\u0412", "\u0413", "\u0492", "\u0414", "\u0415", "\u0401", "\u0416", + "\u0417", "\u0418", "\u04e2", "\u0419", "\u041a", "\u049a", "\u041b", "\u041c", "\u041d", + "\u041e", "\u041f", "\u0420", "\u0421", "\u0422", "\u0423", "\u04ee", "\u0424", "\u0425", + "\u04b2", "\u0427", "\u04b6", "\u0428", "\u042a", "\u042d", "\u042e", "\u042f" }; + + // اآبپتٹثجچحخدڈذرڑزژسشصضطظعغفقکگلمنوەھ۶ىے + static final String[] URDU = new String[] { "\u0627", "\u0622", "\u0628", "\u067e", "\u062a", + "\u0679", "\u062b", "\u062c", "\u0686", "\u062d", "\u062e", "\u062f", "\u0688", "\u0630", + "\u0631", "\u0691", "\u0632", "\u0698", "\u0633", "\u0634", "\u0635", "\u0636", "\u0637", + "\u0638", "\u0639", "\u063a", "\u0641", "\u0642", "\u06a9", "\u06af", "\u0644", "\u0645", + "\u0646", "\u0648", "\u06d5", "\u06be", "\u06f6", "\u0649", "\u06d2" }; + + // W-1308726: removed Ö and Ü; oracle treats them as the same characters as O and U. + // A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, ß, T, U, V, W, X, Y, Z + static final String[] GERMAN = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", + "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }; + + // ক,খ,গ,ঘ,ঙ,চ,ছ,জ,ঝ,ঞ,ট,ঠ,ড,ঢ,ণ,ত,দ,ধ,ন,প,ফ,ব,ভ,ম,য,র,ল,শ,ষ,স,হ,য়,ড়,ঢ,অ, + // আ,ই,ঈ,উ,ঊ,ঋ,ৠ,এ,ঐ,ও,ঔ + static final String[] BENGALI = { "\u0995", "\u0996", "\u0997", "\u0998", "\u0999", "\u099a", + "\u099b", "\u099c", "\u099d", "\u099e", "\u099f", "\u09a0", "\u09a1", "\u09a2", "\u09a3", + "\u09a4", "\u09a6", "\u09a7", "\u09a8", "\u09aa", "\u09ab", "\u09ac", "\u09ad", "\u09ae", + "\u09af", "\u09b0", "\u09b2", "\u09b6", "\u09b7", "\u09b8", "\u09b9", "\u09af\u09bc", + "\u09a1\u09bc", "\u09a2", "\u0985", "\u0986", "\u0987", "\u0988", "\u0989", "\u098a", + "\u098b", "\u09e0", "\u098f", "\u0990", "\u0993", "\u0994" }; + + // A, Ą, B, C, Č, D, E, Ę, Ė, F, G, H, I, Į, Y, J, K, L, M, N, O, P, R, S, Š, T, U, Ų, + // Ū, V, Z, Ž + static final String[] LITHUANIAN = { "A", "\u0104", "B", "C", "\u010c", "D", "E", "\u0118", + "\u0116", "F", "G", "H", "I", "\u012e", "Y", "J", "K", "L", "M", "N", "O", "P", "R", "S", + "\u0160", "T", "U", "\u0172", "\u016a", "V", "Z", "\u017d" }; + + // A, B, C, Č, D, E, F, G, H, I, J, K, L, M, N, O, P, R, S, Š, T, U, V, Z, Ž + static final String[] SLOVENE = { "A", "B", "C", "\u010c", "D", "E", "F", "G", "H", "I", "J", + "K", "L", "M", "N", "O", "P", "R", "S", "\u0160", "T", "U", "V", "Z", "\u017d" }; + + // Contains "TAMIL LETTER"s from http://www.unicode.org/charts/PDF/U0B80.pdf + // அ, ஆ, இ, ஈ, உ, ஊ, எ, ஏ, ஐ, ஒ, ஓ, ஔ, க, ங, ச, ஜ, ஞ, + // ட, ண, த, ந, ன, ப, ம, ய, ர, ற, ல, ள, ழ, வ, ஶ, ஷ, ஸ, ஹ + static final String[] TAMIL = + { "\u0B85", "\u0B86", "\u0B87", "\u0B88", "\u0B89", "\u0B8A", "\u0B8E", "\u0B8F", "\u0B90", + "\u0B92", "\u0B93", "\u0B94", "\u0B95", "\u0B99", "\u0B9A", "\u0B9C", "\u0B9E", "\u0B9F", + "\u0BA3", "\u0BA4", "\u0BA8", "\u0BA9", "\u0BAA", "\u0BAE", "\u0BAF", "\u0BB0", "\u0BB1", + "\u0BB2", "\u0BB3", "\u0BB4", "\u0BB5", "\u0BB6", "\u0BB7", "\u0BB8", "\u0BB9" }; + + static final String STRING = "upper({0})"; + + static final String[] JAPANESE_ROLODEX = { + // Notes: unistr('\xxxx') is the Oracle sql expression to get unicode + // character by code point. + // Two backslashes are converted to one backslash by java compiler. + /* 'A' */"unistr('\\3041')", /* 'Ka' */"unistr('\\30F5')", /* 'Sa' */"unistr('\\3055')", + /* 'Ta' */"unistr('\\305F')", /* 'Na' */"unistr('\\306A')", /* 'Ha' */"unistr('\\306F')", + /* 'Ma' */"unistr('\\307E')", /* 'Ya' */"unistr('\\3084')", /* 'Ra' */"unistr('\\3089')", + /* 'Wa' */"unistr('\\308E')", "unistr('\\309D')" }; + + // Notes: unistr('\xxxx') is the Oracle sql expression to get unicode character + // by code point. Two backslashes are converted to one backslash by java compiler. + static final String[] JAPANESE_ROLODEX_JAVA = { /* 'A' */"\u3041", /* 'Ka' */"\u30F5", + /* 'Sa' */"\u3055", /* 'Ta' */"\u305F", /* 'Na" */"\u306A", /* 'Ha' */"\u306F", + /* 'Ma' */"\u307E", /* 'Ya' */"\u3084", /* 'Ra' */"\u3089", /* 'Wa' */"\u308E", "\u3001" // this + // is + // the + // first + // character + // after + // the + // last + // valid + // kana + // in + // java + }; + } + + /** + * Apex and possibly other things collate based on upper case versions of strings. Always upper + * casing and then comparing is slow, though, so this method is intended to return a collator that + * is consistent with uppper-case-then-compare while perhaps doing something more efficient + */ + public Collator getUpperCaseCollator(final boolean isPostgres) { + final Collator innerCollator = getCollator(); + + // so far, the best I've been able to do that doesn't break sort order is to special + // case the english locale and scan for non-ascii characters before deciding how to + // proceed. With some work the same basic idea would work in many other locales but + // it would be very nice to find a more general and faster approach. The challenge + // is that upper casing effectively "normalizes" strings in a way that is very hard + // to replicate - for instance, western ligatures tend to get expanded by upper casing + // but Hangul ones don't. Even when that's all sorted out there's the issue that the + // built in collation rules for various locales are fairly narrowly focused. So, for + // instance, the English locale doesn't have rules for sorting Greek. With a case + // insensitive compare in the English locale, lower case Greek letters sort + // differently from upper case Greek letters but the English locale does upper case + // Greek letters. + if (!isPostgres && getLocale() == Locale.ENGLISH) { + innerCollator.setStrength(Collator.SECONDARY); + return new Collator() { + @Override + public int compare(String source, String target) { + // upper case only strings where the SECONDARY strength comparison + // (case insensitive comparison) is possibly different for upper + // cased and non upper cased strings + return innerCollator.compare(getUpperCaseIfNeeded(source), getUpperCaseIfNeeded(target)); + } - /** - * Convert double width ascii characters to single width. - * This is the equivalent of Oracle's to_single_byte(). - */ - public static String toSingleWidth(String value) { - int n = value.length(); - DeferredStringBuilder buf = new DeferredStringBuilder(value); - - for (int i = 0; i < n; i++) { - char c = value.charAt(i); - buf.append(toSingleWidth(c)); + /** + * Upper cases on any non-ascii character + */ + private String getUpperCaseIfNeeded(String string) { + for (int i = 0; i < string.length(); i++) { + final char ch = string.charAt(i); + if (ch > 127) { + // non-ascii character, bail and use the upper case version + return getUpperCaseValue(string, false); + } + } + // no non-ascii characters found, we don't need to upper case + // - sorting with strength SECONDARY is equivalent. + return string; } - return buf.toString(); - } - /** - * Returns the sql expression to compute the linguistic sort collation key for the - * given sql expression. This supports sorting in the database, where sort order - * of different upper and lower cases are handled linguistically. - */ - public String getCollationKeySql(String expr, boolean isPostgres) { - if (isPostgres) { - return "icu_sortkey(" + expr + ",'" + this.locale.toString() + "')::text"; - } else { - return MessageFormat.format(this.collationKeySql, new Object[] { expr }); + @Override + public CollationKey getCollationKey(String source) { + return innerCollator.getCollationKey(getUpperCaseIfNeeded(source)); } - } - /** - * Returns the sql expression to compute the linguistic sort collation key for the - * upper case of given sql expression. This supports case-insensitive filtering - * in the database. - */ - public String getUpperCollationKeySql(String expr, boolean isPostgres) { - if (!isPostgres && String.format(upper.getSqlFormatString(), "{0}") - .equals(this.collationKeySql)) { - return getCollationKeySql(expr, false); + @Override + public int hashCode() { + return LinguisticSort.this.hashCode(); } - return getCollationKeySql(getUpperCaseSql(expr, isPostgres), isPostgres); - } - private String formatLetter(String letter, boolean isPostgres) { - return getCollationKeySql('\'' + letter + '\'', isPostgres); - } + @Override + public boolean equals(Object that) { + return super.equals(that); + } + }; + } else { + return new Collator() { + @Override + public int compare(String source, String target) { + return innerCollator.compare(getUpperCaseValue(source, isPostgres), + getUpperCaseValue(target, isPostgres)); + } - // - // Private Data - // - - // TODO: Make this an environment variable. - private static final boolean IS_MULTI_LINGUAL = true; /*(SfdcEnvProvider.getEnv() == null || - SfdcEnvProvider.getEnv().getIniFile().getString("Pages", "encoding").length() > 0);*/ - - static String[] getAlphabetFromICU(Locale locale) { - AlphabeticIndex index = new AlphabeticIndex(locale); - List alphabet = index.getBucketLabels(); - if (alphabet.size() > 6) { - // Strip off first and last (which are ...) - List alphabetWithoutEllipses = alphabet.subList(1, alphabet.size() - 1); - return alphabetWithoutEllipses.toArray(new String[alphabetWithoutEllipses.size()]); - } else { - return new String[0]; + @Override + public CollationKey getCollationKey(String source) { + return innerCollator.getCollationKey(getUpperCaseValue(source, isPostgres)); } - } - /** - * You can't refer to a static defined inside the same class as an enum, so you need an - * inner class to have such constants - * These are the alphabets that cannot be auto-derived from ICU's CLDR information - */ - static final class Alphabets { - static final String[] ENGLISH = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", - "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }; - static final String[] CATALAN = { "A", "B", "C", "\u00C7", "D", "E", "F", "G", "H", "I", - "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }; - static final String[] BASQUE = { "A", "B", "C", "\u00C7", "D", "E", "F", "G", "H", "I", - "J", "K", "L", "M", "N", "\u00D1", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", - "Y", "Z" }; - static final String[] JAPANESE = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", - "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "\u30A2", - "\u30AB", "\u30B5", "\u30BF", "\u30CA", "\u30CF", "\u30DE", "\u30E4", "\u30E9", - "\u30EF" }; - - // A, B, C, Cs, D, E, F, G, Gy, H, I, J, K, L, Ly, M, N, Ny, O, Ö, P, Q, R, S, Sz, T, - // Ty, U, Ü, V, W, X, Y, Z, Zs - static final String[] HUNGARIAN = { "A", "B", "C", "Cs", "D", "E", "F", "G", "Gy", "H", - "I", "J", "K", "L", "Ly", "M", "N", "Ny", "O", "\u00d6", "P", "Q", "R", "S", "Sz", - "T", "Ty", "U", "\u00dc", "V", "W", "X", "Y", "Z", "Zs" }; - - static final String[] TURKISH = { "A", "B", "C", "\u00C7", "D", "E", "F", "G", "\u011E", - "H", "I", "\u0130", "J", "K", "L", "M", "N", "O", "\u00D6", "P", "R", "S", "\u015E", - "T", "U", "\u00DC", "V", "Y", "Z" }; - - // A, B, C, Ç, D, E, Ə, F, G, Ğ, H, X, I, İ, J, K, Q, L, M, N, O, Ö, P, R, S, Ş, T, - // U, Ü, V, Y, Z - static final String[] AZERBAIJANI = { "A", "B", "C", "\u00C7", "D", "E", "\u018F", "F", - "G", "\u011E", "H", "X", "I", "\u0130", "J", "K", "Q", "L", "M", "N", "O", "\u00D6", - "P", "R", "S", "\u015E", "T", "U", "\u00DC", "V", "Y", "Z" }; - - // Russian without Ё, Ы, Э - static final String[] BULGARIAN = { "\u0410", "\u0411", "\u0412", "\u0413", "\u0414", - "\u0415", "\u0416", "\u0417", "\u0418", "\u0419", "\u041a", "\u041b", "\u041c", - "\u041d", "\u041e", "\u041f", "\u0420", "\u0421", "\u0422", "\u0423", "\u0424", - "\u0425", "\u0426", "\u0427", "\u0428", "\u0429", "\u042a", "\u042c", "\u042e", - "\u042f" }; - - // A B C Č Ć D Đ Dž E F G H I J K L Lj M N Nj O P R S Š T U V Z Ž - static final String[] SERBIAN_LATIN = { "A", "B", "C", "\u010c", "\u0106", "D", "\u0110", - "D\u017e", "E", "F", "G", "H", "I", "J", "K", "L", "Lj", "M", "N", "Nj", "O", "P", "R", - "S", "\u0160", "T", "U", "V", "Z", "\u017d" }; - - // A Á Ä B C Č D Ď DZ DŽ E É F G H CH I Í J K L Ĺ Ľ M N Ň O Ó Ô P Q R Ŕ S Š T Ť U Ú V W - // X Y Ý Z Ž - static final String[] SLOVAK = { "A", "\u00c1", "\u00c4", "B", "C", "\u010c", "D", - "\u010e", "DZ", "D\u017d", "E", "\u00c9", "F", "G", "H", "CH", "I", "\u00cd", "J", - "K", "L", "\u0139", "\u013d", "M", "N", "\u0147", "O", "\u00d3", "\u00d4", "P", "Q", - "R", "\u0154", "S", "\u0160", "T", "\u0164", "U", "\u00da", "V", "W", "X", "Y", - "\u00dd", "Z", "\u017d" }; - - // ა ბ გ დ ე ვ ზ თ ი კ ლ მ ნ ო პ ჟ რ ს ტ უ ფ ქ ღ .ყ შ ჩ ც ძ წ ჭ ხ ჯ ჰ - static final String[] GEORGIAN = { "\u10d0", "\u10d1", "\u10d2", "\u10d3", "\u10d4", - "\u10d5", "\u10d6", "\u10d7", "\u10d8", "\u10d9", "\u10da", "\u10db", "\u10dc", - "\u10dd", "\u10de", "\u10df", "\u10e0", "\u10e1", "\u10e2", "\u10e3", "\u10e4", - "\u10e5", "\u10e6", "\u10e7", "\u10e8", "\u10e9", "\u10ea", "\u10eb", "\u10ec", - "\u10ed", "\u10ee", "\u10ef", "\u10f0" }; - - // A B C D E F G H I J K L M N O P Q R S Š Z Ž T U V W Õ Ä Ö Ü X Y - static final String[] ESTONIAN = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", - "L", "M", "N", "O", "P", "Q", "R", "S", "\u0160", "Z", "\u017d", "T", "U", "V", "W", - "\u00d5", "\u00c4", "\u00d6", "\u00dc", "X", "Y" }; - - // A Á B D Ð E É F G H I Í J K L M N O Ó P R S T U Ú V X Y Ý Þ Æ Ö - static final String[] ICELANDIC = { "A", "\u00c1", "B", "D", "\u00d0", "E", "\u00c9", "F", - "G", "H", "I", "\u00cd", "J", "K", "L", "M", "N", "O", "\u00d3", "P", "R", "S", "T", - "U", "\u00da", "V", "X", "Y", "\u00dd", "\u00de", "\u00c6", "\u00d6" }; - - // A Ā B C Č D E Ē F G Ģ H I Ī J K Ķ L Ļ M N Ņ O P R S Š T U Ū V Z Ž - static final String[] LATVIAN = { "A", "\u0100", "B", "C", "\u010c", "D", "E", "\u0112", - "F", "G", "\u0122", "H", "I", "\u012a", "J", "K", "\u0136", "L", "\u013b", "M", "N", - "\u0145", "O", "P", "R", "S", "\u0160", "T", "U", "\u016a", "V", "Z", "\u017d" }; - - // A \u0104 B C \u010c D E \u0118 \u0116 F G H I \u012e Y J K L M N O P R S \u0160 T U - // \u0172 \u016a V Z \u017d - static final String[] LUXEMBOURGISH = { "A", "B", "C", "D", "E", "F", "G", "H", "I", - "J", "K", "L", "M", "N", "O", "P", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "Ä", "Ë", "É" }; - - // Russian with Ң, Ө, Ү - static final String[] KYRGYZ = { "\u0410", "\u0411", "\u0412", "\u0413", "\u0414", - "\u0415", "\u0401", "\u0416", "\u0417", "\u0418", "\u0419", "\u041a", "\u041b", - "\u041c", "\u041d", "\u04a2", "\u041e", "\u04e8", "\u041f", "\u0420", "\u0421", - "\u0422", "\u0423", "\u04ae", "\u0424", "\u0425", "\u0426", "\u0427", "\u0428", - "\u0429", "\u042a", "\u042b", "\u042c", "\u042d", "\u042e", "\u042f" }; - - // Kyrgyz with Ә, Ғ, Ұ, Һ, І (ICU4J doesn't have some of these characters for sorting...) - static final String[] KAZAKH = { "\u0410", "\u04d8", "\u0411", "\u0412", "\u0413", - "\u0492", "\u0414", "\u0415", "\u0401", "\u0416", "\u0417", "\u0418", "\u0419", - "\u041a", "\u049a", "\u041b", "\u041c", "\u041d", "\u04a2", "\u041e", "\u04e8", - "\u041f", "\u0420", "\u0421", "\u0422", "\u0423", "\u04b0", "\u04ae", "\u0424", - "\u0425", "\u04ba", "\u0426", "\u0427", "\u0428", "\u0429", "\u042a", "\u042b", - "\u0406", "\u042c", "\u042d", "\u042e", "\u042f" }; - - // Cyrillic Variant - static final String[] TAJIK = { "\u0410", "\u0411", "\u0412", "\u0413", "\u0492", "\u0414", - "\u0415", "\u0401", "\u0416", "\u0417", "\u0418", "\u04e2", "\u0419", "\u041a", - "\u049a", "\u041b", "\u041c", "\u041d", "\u041e", "\u041f", "\u0420", "\u0421", - "\u0422", "\u0423", "\u04ee", "\u0424", "\u0425", "\u04b2", "\u0427", "\u04b6", - "\u0428", "\u042a", "\u042d", "\u042e", "\u042f" }; - - // اآبپتٹثجچحخدڈذرڑزژسشصضطظعغفقکگلمنوەھ۶ىے - static final String[] URDU = new String[] {"\u0627", "\u0622", "\u0628", "\u067e", - "\u062a", "\u0679", "\u062b", "\u062c", "\u0686", "\u062d", "\u062e", "\u062f", - "\u0688", "\u0630", "\u0631", "\u0691", "\u0632", "\u0698", "\u0633", "\u0634", - "\u0635", "\u0636", "\u0637", "\u0638", "\u0639", "\u063a", "\u0641", "\u0642", - "\u06a9", "\u06af", "\u0644", "\u0645", "\u0646", "\u0648", "\u06d5", "\u06be", - "\u06f6", "\u0649", "\u06d2" }; - - // W-1308726: removed Ö and Ü; oracle treats them as the same characters as O and U. - // A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, ß, T, U, V, W, X, Y, Z - static final String[] GERMAN = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", - "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" }; - - // ক,খ,গ,ঘ,ঙ,চ,ছ,জ,ঝ,ঞ,ট,ঠ,ড,ঢ,ণ,ত,দ,ধ,ন,প,ফ,ব,ভ,ম,য,র,ল,শ,ষ,স,হ,য়,ড়,ঢ,অ, - // আ,ই,ঈ,উ,ঊ,ঋ,ৠ,এ,ঐ,ও,ঔ - static final String[] BENGALI = { "\u0995", "\u0996", "\u0997", "\u0998", "\u0999", - "\u099a", "\u099b", "\u099c", "\u099d", "\u099e", "\u099f", "\u09a0", "\u09a1", - "\u09a2", "\u09a3", "\u09a4", "\u09a6", "\u09a7", "\u09a8", "\u09aa", "\u09ab", - "\u09ac", "\u09ad", "\u09ae", "\u09af", "\u09b0", "\u09b2", "\u09b6", "\u09b7", - "\u09b8", "\u09b9", "\u09af\u09bc", "\u09a1\u09bc", "\u09a2", "\u0985", "\u0986", - "\u0987", "\u0988", "\u0989", "\u098a", "\u098b", "\u09e0", "\u098f", "\u0990", - "\u0993", "\u0994" }; - - // A, Ą, B, C, Č, D, E, Ę, Ė, F, G, H, I, Į, Y, J, K, L, M, N, O, P, R, S, Š, T, U, Ų, - // Ū, V, Z, Ž - static final String[] LITHUANIAN = { "A", "\u0104", "B", "C", "\u010c", "D", "E", "\u0118", - "\u0116", "F", "G", "H", "I", "\u012e", "Y", "J", "K", "L", "M", "N", "O", "P", "R", - "S", "\u0160", "T", "U", "\u0172", "\u016a", "V", "Z", "\u017d" }; - - // A, B, C, Č, D, E, F, G, H, I, J, K, L, M, N, O, P, R, S, Š, T, U, V, Z, Ž - static final String[] SLOVENE = { "A", "B", "C", "\u010c", "D", "E", "F", "G", "H", "I", - "J", "K", "L", "M", "N", "O", "P", "R", "S", "\u0160", "T", "U", "V", "Z", "\u017d" }; - - // Contains "TAMIL LETTER"s from http://www.unicode.org/charts/PDF/U0B80.pdf - //அ, ஆ, இ, ஈ, உ, ஊ, எ, ஏ, ஐ, ஒ, ஓ, ஔ, க, ங, ச, ஜ, ஞ, - //ட, ண, த, ந, ன, ப, ம, ய, ர, ற, ல, ள, ழ, வ, ஶ, ஷ, ஸ, ஹ - static final String[] TAMIL = { "\u0B85", "\u0B86", "\u0B87", "\u0B88", "\u0B89", "\u0B8A", - "\u0B8E", "\u0B8F", "\u0B90", "\u0B92", "\u0B93", "\u0B94", "\u0B95", "\u0B99", - "\u0B9A", "\u0B9C", "\u0B9E", "\u0B9F", "\u0BA3", "\u0BA4", "\u0BA8", "\u0BA9", - "\u0BAA", "\u0BAE", "\u0BAF", "\u0BB0", "\u0BB1", "\u0BB2", "\u0BB3", "\u0BB4", - "\u0BB5", "\u0BB6", "\u0BB7", "\u0BB8", "\u0BB9" }; - - static final String STRING = "upper({0})"; - - static final String[] JAPANESE_ROLODEX = { - // Notes: unistr('\xxxx') is the Oracle sql expression to get unicode - // character by code point. - // Two backslashes are converted to one backslash by java compiler. - /* 'A' */"unistr('\\3041')", - /* 'Ka' */"unistr('\\30F5')", - /* 'Sa' */"unistr('\\3055')", - /* 'Ta' */"unistr('\\305F')", - /* 'Na' */"unistr('\\306A')", - /* 'Ha' */"unistr('\\306F')", - /* 'Ma' */"unistr('\\307E')", - /* 'Ya' */"unistr('\\3084')", - /* 'Ra' */"unistr('\\3089')", - /* 'Wa' */"unistr('\\308E')", "unistr('\\309D')" }; - - // Notes: unistr('\xxxx') is the Oracle sql expression to get unicode character - // by code point. Two backslashes are converted to one backslash by java compiler. - static final String[] JAPANESE_ROLODEX_JAVA = { - /* 'A' */"\u3041", - /* 'Ka' */"\u30F5", - /* 'Sa' */"\u3055", - /* 'Ta' */"\u305F", - /* 'Na" */"\u306A", - /* 'Ha' */"\u306F", - /* 'Ma' */"\u307E", - /* 'Ya' */"\u3084", - /* 'Ra' */"\u3089", - /* 'Wa' */"\u308E", - "\u3001" // this is the first character after the last valid kana in java - }; - } + @Override + public int hashCode() { + return LinguisticSort.this.hashCode(); + } - /** - * Apex and possibly other things collate based on upper case versions of strings. - * Always upper casing and then comparing is slow, though, so this method is intended - * to return a collator that is consistent with uppper-case-then-compare while perhaps - * doing something more efficient - */ - public Collator getUpperCaseCollator(final boolean isPostgres) { - final Collator innerCollator = getCollator(); - - // so far, the best I've been able to do that doesn't break sort order is to special - // case the english locale and scan for non-ascii characters before deciding how to - // proceed. With some work the same basic idea would work in many other locales but - // it would be very nice to find a more general and faster approach. The challenge - // is that upper casing effectively "normalizes" strings in a way that is very hard - // to replicate - for instance, western ligatures tend to get expanded by upper casing - // but Hangul ones don't. Even when that's all sorted out there's the issue that the - // built in collation rules for various locales are fairly narrowly focused. So, for - // instance, the English locale doesn't have rules for sorting Greek. With a case - // insensitive compare in the English locale, lower case Greek letters sort - // differently from upper case Greek letters but the English locale does upper case - // Greek letters. - if (!isPostgres && getLocale() == Locale.ENGLISH) { - innerCollator.setStrength(Collator.SECONDARY); - return new Collator() { - @Override - public int compare(String source, String target) { - // upper case only strings where the SECONDARY strength comparison - // (case insensitive comparison) is possibly different for upper - // cased and non upper cased strings - return innerCollator.compare(getUpperCaseIfNeeded(source), - getUpperCaseIfNeeded(target)); - } - - /** - * Upper cases on any non-ascii character - */ - private String getUpperCaseIfNeeded(String string) { - for (int i = 0; i < string.length(); i++) { - final char ch = string.charAt(i); - if (ch > 127) { - // non-ascii character, bail and use the upper case version - return getUpperCaseValue(string, false); - } - } - // no non-ascii characters found, we don't need to upper case - // - sorting with strength SECONDARY is equivalent. - return string; - } - - @Override - public CollationKey getCollationKey(String source) { - return innerCollator.getCollationKey(getUpperCaseIfNeeded(source)); - } - - @Override - public int hashCode() { - return LinguisticSort.this.hashCode(); - } - - @Override - public boolean equals(Object that) { - return super.equals(that); - } - }; - } else { - return new Collator() { - @Override - public int compare(String source, String target) { - return innerCollator.compare(getUpperCaseValue(source, isPostgres), - getUpperCaseValue(target, isPostgres)); - } - - @Override - public CollationKey getCollationKey(String source) { - return innerCollator.getCollationKey(getUpperCaseValue(source, isPostgres)); - } - - @Override - public int hashCode() { - return LinguisticSort.this.hashCode(); - } - - @Override - public boolean equals(Object that) { - return super.equals(that); - } - }; + @Override + public boolean equals(Object that) { + return super.equals(that); } + }; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/LocaleUtils.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/LocaleUtils.java index b07e5b66206..cb709d8572a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/LocaleUtils.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/LocaleUtils.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,61 +27,59 @@ /** * This utility class was partially copied from Salesforce's internationalization utility library - * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. - * The i18n-util library is not maintained anymore, and it was using vulnerable dependencies. - * For more info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 - * - * A collection of utilities for dealing with Locales. + * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. The + * i18n-util library is not maintained anymore, and it was using vulnerable dependencies. For more + * info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 A collection of utilities for + * dealing with Locales. */ public enum LocaleUtils { - INSTANCE; + INSTANCE; - public static LocaleUtils get() { - return INSTANCE; - } + public static LocaleUtils get() { + return INSTANCE; + } - // TODO: The number of locales in the system is rather small, - // but we should probably use a ConcurrentLruMap just in case. - private static final ConcurrentMap UNIQUE_LOCALE_MAP = - new ConcurrentHashMap<>(64, .75f, 2); + // TODO: The number of locales in the system is rather small, + // but we should probably use a ConcurrentLruMap just in case. + private static final ConcurrentMap UNIQUE_LOCALE_MAP = + new ConcurrentHashMap<>(64, .75f, 2); - /** - * Returns a locale for language-only ("en") or language/country ("en_UK") - * iso codes - */ - public Locale getLocaleByIsoCode(String isoCode) { - if (isoCode == null) { - return null; - } - if (isoCode.length() == 2) { - return uniqueifyLocale(new Locale(isoCode)); - } else if (isoCode.length() == 5) { - String countryIsoCode = isoCode.substring(3, 5); - String langIsoCode = isoCode.substring(0, 2); - return uniqueifyLocale(new Locale(langIsoCode, countryIsoCode)); - } else { - List split = Lists.newArrayList(Splitter.on('_').split(isoCode)); - String language = split.get(0); - String country = split.size() > 1 ? split.get(1) : ""; - String variant = split.size() > 2 ? split.get(2) : ""; - return uniqueifyLocale(new Locale(language, country, variant)); - } + /** + * Returns a locale for language-only ("en") or language/country ("en_UK") iso codes + */ + public Locale getLocaleByIsoCode(String isoCode) { + if (isoCode == null) { + return null; } + if (isoCode.length() == 2) { + return uniqueifyLocale(new Locale(isoCode)); + } else if (isoCode.length() == 5) { + String countryIsoCode = isoCode.substring(3, 5); + String langIsoCode = isoCode.substring(0, 2); + return uniqueifyLocale(new Locale(langIsoCode, countryIsoCode)); + } else { + List split = Lists.newArrayList(Splitter.on('_').split(isoCode)); + String language = split.get(0); + String country = split.size() > 1 ? split.get(1) : ""; + String variant = split.size() > 2 ? split.get(2) : ""; + return uniqueifyLocale(new Locale(language, country, variant)); + } + } - /** - * If you're going to cache a locale, it should call this function so that it caches - * @param value the locale to uniquify - * @return the unique locale - */ - static Locale uniqueifyLocale(Locale value) { - if (value == null) { - return null; - } - Locale oldValue = UNIQUE_LOCALE_MAP.get(value); - if (oldValue != null) { - return oldValue; - } - UNIQUE_LOCALE_MAP.put(value, value); - return value; + /** + * If you're going to cache a locale, it should call this function so that it caches + * @param value the locale to uniquify + * @return the unique locale + */ + static Locale uniqueifyLocale(Locale value) { + if (value == null) { + return null; + } + Locale oldValue = UNIQUE_LOCALE_MAP.get(value); + if (oldValue != null) { + return oldValue; } + UNIQUE_LOCALE_MAP.put(value, value); + return value; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/OracleUpper.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/OracleUpper.java index 128990d1803..2ae27ba6ee3 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/OracleUpper.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/OracleUpper.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,63 +21,63 @@ /** * This utility class was partially copied from Salesforce's internationalization utility library - * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. - * The i18n-util library is not maintained anymore, and it was using vulnerable dependencies. - * For more info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 - * - * OracleUpper is used in combination with OracleUpperTable to generate upper-case output - * consistent particular chosen Oracle expressions. - * + * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. The + * i18n-util library is not maintained anymore, and it was using vulnerable dependencies. For more + * info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 OracleUpper is used in combination + * with OracleUpperTable to generate upper-case output consistent particular chosen Oracle + * expressions. * @see OracleUpperTable */ public class OracleUpper { - private OracleUpper() { - // HideUtilityClassConstructor - } + private OracleUpper() { + // HideUtilityClassConstructor + } - /** - * Upper-case {@code value}, using the information in {@code t} to produce a result - * consistent with the PL/SQL expression used to generate t. - */ - public static String toUpperCase(OracleUpperTable t, String value) { - // Oracle's upper or nls_upper are known to disagree with Java on some particulars. - // We search for known exceptional characters and if found take measures to adjust - // Java's String.toUpperCase. In the average case we incur just a single relatively - // fast scan of the string. In typical bad cases we'll incur two extra String copies - // (one copy into the buffer, one out -- this on top of whatever's required by - // toUpperCase). Note that we have to match Oracle even for characters outside the - // language's alphabet since we still want to return records containing those characters. - char[] exceptions = t.getUpperCaseExceptions(); - if (exceptions.length > 0) { - // Prefer to use String.indexOf in the case of a single search char; it's faster by - // virtue of not requiring two loops and being intrinsic. - int nextExceptionIndex = (exceptions.length == 1) - ? value.indexOf(exceptions[0]) : StringUtils.indexOfAny(value, exceptions); + /** + * Upper-case {@code value}, using the information in {@code t} to produce a result consistent + * with the PL/SQL expression used to generate t. + */ + public static String toUpperCase(OracleUpperTable t, String value) { + // Oracle's upper or nls_upper are known to disagree with Java on some particulars. + // We search for known exceptional characters and if found take measures to adjust + // Java's String.toUpperCase. In the average case we incur just a single relatively + // fast scan of the string. In typical bad cases we'll incur two extra String copies + // (one copy into the buffer, one out -- this on top of whatever's required by + // toUpperCase). Note that we have to match Oracle even for characters outside the + // language's alphabet since we still want to return records containing those characters. + char[] exceptions = t.getUpperCaseExceptions(); + if (exceptions.length > 0) { + // Prefer to use String.indexOf in the case of a single search char; it's faster by + // virtue of not requiring two loops and being intrinsic. + int nextExceptionIndex = (exceptions.length == 1) + ? value.indexOf(exceptions[0]) + : StringUtils.indexOfAny(value, exceptions); - if (nextExceptionIndex >= 0) { - // Annoying case: we have found a character that we know Oracle handles differently - // than Java and we must adjust appropriately. - StringBuilder result = new StringBuilder(value.length()); - String rem = value; - do { - char nextException = rem.charAt(nextExceptionIndex); + if (nextExceptionIndex >= 0) { + // Annoying case: we have found a character that we know Oracle handles differently + // than Java and we must adjust appropriately. + StringBuilder result = new StringBuilder(value.length()); + String rem = value; + do { + char nextException = rem.charAt(nextExceptionIndex); - result.append(rem.substring(0, nextExceptionIndex).toUpperCase(t.getLocale())); - result.append(t.getUpperCaseExceptionMapping(nextException)); + result.append(rem.substring(0, nextExceptionIndex).toUpperCase(t.getLocale())); + result.append(t.getUpperCaseExceptionMapping(nextException)); - rem = rem.substring(nextExceptionIndex + 1); - nextExceptionIndex = (exceptions.length == 1) - ? rem.indexOf(exceptions[0]) : StringUtils.indexOfAny(rem, exceptions); - } while (nextExceptionIndex >= 0); - result.append(rem.toUpperCase(t.getLocale())); + rem = rem.substring(nextExceptionIndex + 1); + nextExceptionIndex = (exceptions.length == 1) + ? rem.indexOf(exceptions[0]) + : StringUtils.indexOfAny(rem, exceptions); + } while (nextExceptionIndex >= 0); + result.append(rem.toUpperCase(t.getLocale())); - return result.toString(); - } - } - - // Nice case: we know of no reason that Oracle and Java wouldn't agree when converting - // to upper case. - return value.toUpperCase(t.getLocale()); + return result.toString(); + } } + + // Nice case: we know of no reason that Oracle and Java wouldn't agree when converting + // to upper case. + return value.toUpperCase(t.getLocale()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/OracleUpperTable.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/OracleUpperTable.java index b453a1bbd5f..1409b29c923 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/OracleUpperTable.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/i18n/OracleUpperTable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,17 +23,16 @@ /** * This utility class was partially copied from Salesforce's internationalization utility library - * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. - * The i18n-util library is not maintained anymore, and it was using vulnerable dependencies. - * For more info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 - * - * Generated by i18n.OracleUpperTableGeneratorTest + * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. The + * i18n-util library is not maintained anymore, and it was using vulnerable dependencies. For more + * info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 Generated by + * i18n.OracleUpperTableGeneratorTest *

- * An instance of this enum codifies the difference between executing a - * {@link #getSqlFormatString() particular PL/SQL expression} in Oracle and executing - * {@link String#toUpperCase(Locale)} for a {@link #getLocale() particular locale} in Java. These - * differences (also called exceptions) are expressed by the output of - * {@link #getUpperCaseExceptions()} and {@link #getUpperCaseExceptionMapping(char)}. + * An instance of this enum codifies the difference between executing a {@link #getSqlFormatString() + * particular PL/SQL expression} in Oracle and executing {@link String#toUpperCase(Locale)} for a + * {@link #getLocale() particular locale} in Java. These differences (also called exceptions) are + * expressed by the output of {@link #getUpperCaseExceptions()} and + * {@link #getUpperCaseExceptionMapping(char)}. *

* The tables are generated by testing a particular set of characters that are known to contain * exceptions and {@link #toUpperCase(String) may be used} to compensate for exceptions found and @@ -58,280 +58,338 @@ *

  • U+03cd ύ
  • *
  • U+03ce ώ
  • * - * * @see OracleUpper */ public enum OracleUpperTable { - ENGLISH("upper(%s)", "en", "ß"), - GERMAN("nls_upper(%s, 'nls_sort=xgerman')", "de", ""), - FRENCH("nls_upper(%s, 'nls_sort=xfrench')", "fr", "ß"), - ITALIAN("nls_upper(%s, 'nls_sort=italian')", "it", "ß"), - SPANISH("nls_upper(%s, 'nls_sort=spanish')", "es", "ß"), - CATALAN("nls_upper(%s, 'nls_sort=catalan')", "ca", "ß"), - DUTCH("nls_upper(%s, 'nls_sort=dutch')", "nl", "ß"), - PORTUGUESE("nls_upper(%s, 'nls_sort=west_european')", "pt", "ß"), - DANISH("nls_upper(%s, 'nls_sort=danish')", "da", "ß"), - NORWEGIAN("nls_upper(%s, 'nls_sort=norwegian')", "no", "ß"), - SWEDISH("nls_upper(%s, 'nls_sort=swedish')", "sv", "ß"), - FINNISH("nls_upper(%s, 'nls_sort=finnish')", "fi", "ß"), - CZECH("nls_upper(%s, 'nls_sort=xczech')", "cs", "ß"), - POLISH("nls_upper(%s, 'nls_sort=polish')", "pl", "ß"), - TURKISH("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "tr", "ß"), - CHINESE_HK("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_radical_m')", "zh", ""), - CHINESE_HK_STROKE("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_stroke_m')", "zh", ""), - CHINESE_TW("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_radical_m')", "zh", ""), - CHINESE_TW_STROKE("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_stroke_m')", "zh", ""), - CHINESE("nls_upper(to_single_byte(%s), 'nls_sort=schinese_radical_m')", "zh", ""), - CHINESE_STROKE("nls_upper(to_single_byte(%s), 'nls_sort=schinese_stroke_m')", "zh", ""), - CHINESE_PINYIN("nls_upper(to_single_byte(%s), 'nls_sort=schinese_pinyin_m')", "zh", ""), - JAPANESE("nls_upper(to_single_byte(%s), 'nls_sort=japanese_m')", "ja", ""), - KOREAN("nls_upper(to_single_byte(%s), 'nls_sort=korean_m')", "ko", ""), - RUSSIAN("nls_upper(%s, 'nls_sort=russian')", "ru", "ß"), - BULGARIAN("nls_upper(%s, 'nls_sort=bulgarian')", "bg", "ß"), - INDONESIAN("nls_upper(%s, 'nls_sort=indonesian')", "in", "ß"), - ROMANIAN("nls_upper(%s, 'nls_sort=romanian')", "ro", "ß"), - VIETNAMESE("nls_upper(%s, 'nls_sort=vietnamese')", "vi", "ß"), - UKRAINIAN("nls_upper(%s, 'nls_sort=ukrainian')", "uk", "ß"), - HUNGARIAN("nls_upper(%s, 'nls_sort=xhungarian')", "hu", ""), - GREEK("nls_upper(%s, 'nls_sort=greek')", "el", "ßΆΈΉΊΌΎΏάέήίόύώ"), - HEBREW("nls_upper(%s, 'nls_sort=hebrew')", "iw", "ß"), - SLOVAK("nls_upper(%s, 'nls_sort=slovak')", "sk", "ß"), - SERBIAN_CYRILLIC("nls_upper(%s, 'nls_sort=generic_m')", "sr", ""), - SERBIAN_LATIN("nls_upper(%s, 'nls_sort=xcroatian')", "sh", "ß"), - BOSNIAN("nls_upper(%s, 'nls_sort=xcroatian')", "bs", "ß"), - GEORGIAN("nls_upper(%s, 'nls_sort=binary')", "ka", "ß"), - BASQUE("nls_upper(%s, 'nls_sort=west_european')", "eu", "ß"), - MALTESE("nls_upper(%s, 'nls_sort=west_european')", "mt", "ß"), - ROMANSH("nls_upper(%s, 'nls_sort=west_european')", "rm", "ß"), - LUXEMBOURGISH("nls_upper(%s, 'nls_sort=west_european')", "lb", "ß"), - IRISH("nls_upper(%s, 'nls_sort=west_european')", "ga", "ß"), - SLOVENE("nls_upper(%s, 'nls_sort=xslovenian')", "sl", "ß"), - CROATIAN("nls_upper(%s, 'nls_sort=xcroatian')", "hr", "ß"), - MALAY("nls_upper(%s, 'nls_sort=malay')", "ms", "ß"), - ARABIC("nls_upper(%s, 'nls_sort=arabic')", "ar", "ß"), - ESTONIAN("nls_upper(%s, 'nls_sort=estonian')", "et", "ß"), - ICELANDIC("nls_upper(%s, 'nls_sort=icelandic')", "is", "ß"), - LATVIAN("nls_upper(%s, 'nls_sort=latvian')", "lv", "ß"), - LITHUANIAN("nls_upper(%s, 'nls_sort=lithuanian')", "lt", "ß"), - KYRGYZ("nls_upper(%s, 'nls_sort=binary')", "ky", "ß"), - KAZAKH("nls_upper(%s, 'nls_sort=binary')", "kk", "ß"), - TAJIK("nls_upper(%s, 'nls_sort=russian')", "tg", "ß"), - BELARUSIAN("nls_upper(%s, 'nls_sort=russian')", "be", "ß"), - TURKMEN("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "tk", "iß"), - AZERBAIJANI("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "az", "ß"), - ARMENIAN("nls_upper(%s, 'nls_sort=binary')", "hy", "ß"), - THAI("nls_upper(%s, 'nls_sort=thai_dictionary')", "th", "ß"), - HINDI("nls_upper(%s, 'nls_sort=binary')", "hi", "ß"), - URDU("nls_upper(%s, 'nls_sort=arabic')", "ur", "ß"), - BENGALI("nls_upper(%s, 'nls_sort=bengali')", "bn", "ß"), - TAMIL("nls_upper(%s, 'nls_sort=binary')", "ta", "ß"), - ESPERANTO("upper(%s)", "eo", ""), - XWEST_EUROPEAN("NLS_UPPER(%s,'NLS_SORT=xwest_european')", "en", ""); + ENGLISH("upper(%s)", "en", "ß"), + GERMAN("nls_upper(%s, 'nls_sort=xgerman')", "de", ""), + FRENCH("nls_upper(%s, 'nls_sort=xfrench')", "fr", "ß"), + ITALIAN("nls_upper(%s, 'nls_sort=italian')", "it", "ß"), + SPANISH("nls_upper(%s, 'nls_sort=spanish')", "es", "ß"), + CATALAN("nls_upper(%s, 'nls_sort=catalan')", "ca", "ß"), + DUTCH("nls_upper(%s, 'nls_sort=dutch')", "nl", "ß"), + PORTUGUESE("nls_upper(%s, 'nls_sort=west_european')", "pt", "ß"), + DANISH("nls_upper(%s, 'nls_sort=danish')", "da", "ß"), + NORWEGIAN("nls_upper(%s, 'nls_sort=norwegian')", "no", "ß"), + SWEDISH("nls_upper(%s, 'nls_sort=swedish')", "sv", "ß"), + FINNISH("nls_upper(%s, 'nls_sort=finnish')", "fi", "ß"), + CZECH("nls_upper(%s, 'nls_sort=xczech')", "cs", "ß"), + POLISH("nls_upper(%s, 'nls_sort=polish')", "pl", "ß"), + TURKISH("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "tr", "ß"), + CHINESE_HK("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_radical_m')", "zh", ""), + CHINESE_HK_STROKE("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_stroke_m')", "zh", ""), + CHINESE_TW("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_radical_m')", "zh", ""), + CHINESE_TW_STROKE("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_stroke_m')", "zh", ""), + CHINESE("nls_upper(to_single_byte(%s), 'nls_sort=schinese_radical_m')", "zh", ""), + CHINESE_STROKE("nls_upper(to_single_byte(%s), 'nls_sort=schinese_stroke_m')", "zh", ""), + CHINESE_PINYIN("nls_upper(to_single_byte(%s), 'nls_sort=schinese_pinyin_m')", "zh", ""), + JAPANESE("nls_upper(to_single_byte(%s), 'nls_sort=japanese_m')", "ja", ""), + KOREAN("nls_upper(to_single_byte(%s), 'nls_sort=korean_m')", "ko", ""), + RUSSIAN("nls_upper(%s, 'nls_sort=russian')", "ru", "ß"), + BULGARIAN("nls_upper(%s, 'nls_sort=bulgarian')", "bg", "ß"), + INDONESIAN("nls_upper(%s, 'nls_sort=indonesian')", "in", "ß"), + ROMANIAN("nls_upper(%s, 'nls_sort=romanian')", "ro", "ß"), + VIETNAMESE("nls_upper(%s, 'nls_sort=vietnamese')", "vi", "ß"), + UKRAINIAN("nls_upper(%s, 'nls_sort=ukrainian')", "uk", "ß"), + HUNGARIAN("nls_upper(%s, 'nls_sort=xhungarian')", "hu", ""), + GREEK("nls_upper(%s, 'nls_sort=greek')", "el", "ßΆΈΉΊΌΎΏάέήίόύώ"), + HEBREW("nls_upper(%s, 'nls_sort=hebrew')", "iw", "ß"), + SLOVAK("nls_upper(%s, 'nls_sort=slovak')", "sk", "ß"), + SERBIAN_CYRILLIC("nls_upper(%s, 'nls_sort=generic_m')", "sr", ""), + SERBIAN_LATIN("nls_upper(%s, 'nls_sort=xcroatian')", "sh", "ß"), + BOSNIAN("nls_upper(%s, 'nls_sort=xcroatian')", "bs", "ß"), + GEORGIAN("nls_upper(%s, 'nls_sort=binary')", "ka", "ß"), + BASQUE("nls_upper(%s, 'nls_sort=west_european')", "eu", "ß"), + MALTESE("nls_upper(%s, 'nls_sort=west_european')", "mt", "ß"), + ROMANSH("nls_upper(%s, 'nls_sort=west_european')", "rm", "ß"), + LUXEMBOURGISH("nls_upper(%s, 'nls_sort=west_european')", "lb", "ß"), + IRISH("nls_upper(%s, 'nls_sort=west_european')", "ga", "ß"), + SLOVENE("nls_upper(%s, 'nls_sort=xslovenian')", "sl", "ß"), + CROATIAN("nls_upper(%s, 'nls_sort=xcroatian')", "hr", "ß"), + MALAY("nls_upper(%s, 'nls_sort=malay')", "ms", "ß"), + ARABIC("nls_upper(%s, 'nls_sort=arabic')", "ar", "ß"), + ESTONIAN("nls_upper(%s, 'nls_sort=estonian')", "et", "ß"), + ICELANDIC("nls_upper(%s, 'nls_sort=icelandic')", "is", "ß"), + LATVIAN("nls_upper(%s, 'nls_sort=latvian')", "lv", "ß"), + LITHUANIAN("nls_upper(%s, 'nls_sort=lithuanian')", "lt", "ß"), + KYRGYZ("nls_upper(%s, 'nls_sort=binary')", "ky", "ß"), + KAZAKH("nls_upper(%s, 'nls_sort=binary')", "kk", "ß"), + TAJIK("nls_upper(%s, 'nls_sort=russian')", "tg", "ß"), + BELARUSIAN("nls_upper(%s, 'nls_sort=russian')", "be", "ß"), + TURKMEN("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "tk", "iß"), + AZERBAIJANI("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "az", "ß"), + ARMENIAN("nls_upper(%s, 'nls_sort=binary')", "hy", "ß"), + THAI("nls_upper(%s, 'nls_sort=thai_dictionary')", "th", "ß"), + HINDI("nls_upper(%s, 'nls_sort=binary')", "hi", "ß"), + URDU("nls_upper(%s, 'nls_sort=arabic')", "ur", "ß"), + BENGALI("nls_upper(%s, 'nls_sort=bengali')", "bn", "ß"), + TAMIL("nls_upper(%s, 'nls_sort=binary')", "ta", "ß"), + ESPERANTO("upper(%s)", "eo", ""), + XWEST_EUROPEAN("NLS_UPPER(%s,'NLS_SORT=xwest_european')", "en", ""); - private final String sql; - private final Locale locale; - private final char[] exceptionChars; + private final String sql; + private final Locale locale; + private final char[] exceptionChars; - OracleUpperTable(String sql, String lang, String exceptionChars) { - this.sql = sql; - this.locale = new Locale(lang); - this.exceptionChars = exceptionChars.toCharArray(); - } + OracleUpperTable(String sql, String lang, String exceptionChars) { + this.sql = sql; + this.locale = new Locale(lang); + this.exceptionChars = exceptionChars.toCharArray(); + } - /** - * Return an array containing characters for which Java's String.toUpperCase method is known - * to deviate from the result of Oracle evaluating {@link #getSql(String) this expression}. - * - * @return an array containing all exceptional characters. - */ - final char[] getUpperCaseExceptions() { - return exceptionChars; - } + /** + * Return an array containing characters for which Java's String.toUpperCase method is known to + * deviate from the result of Oracle evaluating {@link #getSql(String) this expression}. + * @return an array containing all exceptional characters. + */ + final char[] getUpperCaseExceptions() { + return exceptionChars; + } - /** - * For a character, {@code exception}, contained in the String returned from - * {@link #getUpperCaseExceptions()}, this method returns the anticipated result of - * upper-casing the character in Oracle when evaluating - * {@link #getSql(String) this expression}. - * - * @return the upper case of {@code exception}, according to what Oracle would do. - * @throws IllegalArgumentException - * if the character is not contained in the String returned by - * {@link #getUpperCaseExceptions()}. - */ - final String getUpperCaseExceptionMapping(char exception) { - switch (exception) { - case 'i': - switch (this) { - case TURKMEN: return "İ"; // I - default: // fall out - } - break; - case 'ß': - switch (this) { - case ENGLISH: return "ß"; // SS - case FRENCH: return "ß"; // SS - case ITALIAN: return "ß"; // SS - case SPANISH: return "ß"; // SS - case CATALAN: return "ß"; // SS - case DUTCH: return "ß"; // SS - case PORTUGUESE: return "ß"; // SS - case DANISH: return "ß"; // SS - case NORWEGIAN: return "ß"; // SS - case SWEDISH: return "ß"; // SS - case FINNISH: return "ß"; // SS - case CZECH: return "ß"; // SS - case POLISH: return "ß"; // SS - case TURKISH: return "ß"; // SS - case RUSSIAN: return "ß"; // SS - case BULGARIAN: return "ß"; // SS - case INDONESIAN: return "ß"; // SS - case ROMANIAN: return "ß"; // SS - case VIETNAMESE: return "ß"; // SS - case UKRAINIAN: return "ß"; // SS - case GREEK: return "ß"; // SS - case HEBREW: return "ß"; // SS - case SLOVAK: return "ß"; // SS - case SERBIAN_LATIN: return "ß"; // SS - case BOSNIAN: return "ß"; // SS - case GEORGIAN: return "ß"; // SS - case BASQUE: return "ß"; // SS - case MALTESE: return "ß"; // SS - case ROMANSH: return "ß"; // SS - case LUXEMBOURGISH: return "ß"; // SS - case IRISH: return "ß"; // SS - case SLOVENE: return "ß"; // SS - case CROATIAN: return "ß"; // SS - case MALAY: return "ß"; // SS - case ARABIC: return "ß"; // SS - case ESTONIAN: return "ß"; // SS - case ICELANDIC: return "ß"; // SS - case LATVIAN: return "ß"; // SS - case LITHUANIAN: return "ß"; // SS - case KYRGYZ: return "ß"; // SS - case KAZAKH: return "ß"; // SS - case TAJIK: return "ß"; // SS - case BELARUSIAN: return "ß"; // SS - case TURKMEN: return "ß"; // SS - case AZERBAIJANI: return "ß"; // SS - case ARMENIAN: return "ß"; // SS - case THAI: return "ß"; // SS - case HINDI: return "ß"; // SS - case URDU: return "ß"; // SS - case BENGALI: return "ß"; // SS - case TAMIL: return "ß"; // SS - default: // fall out - } - break; - case 'Ά': - switch (this) { - case GREEK: return "Α"; // Ά - default: // fall out - } - break; - case 'Έ': - switch (this) { - case GREEK: return "Ε"; // Έ - default: // fall out - } - break; - case 'Ή': - switch (this) { - case GREEK: return "Η"; // Ή - default: // fall out - } - break; - case 'Ί': - switch (this) { - case GREEK: return "Ι"; // Ί - default: // fall out - } - break; - case 'Ό': - switch (this) { - case GREEK: return "Ο"; // Ό - default: // fall out - } - break; - case 'Ύ': - switch (this) { - case GREEK: return "Υ"; // Ύ - default: // fall out - } - break; - case 'Ώ': - switch (this) { - case GREEK: return "Ω"; // Ώ - default: // fall out - } - break; - case 'ά': - switch (this) { - case GREEK: return "Α"; // Ά - default: // fall out - } - break; - case 'έ': - switch (this) { - case GREEK: return "Ε"; // Έ - default: // fall out - } - break; - case 'ή': - switch (this) { - case GREEK: return "Η"; // Ή - default: // fall out - } - break; - case 'ί': - switch (this) { - case GREEK: return "Ι"; // Ί - default: // fall out - } - break; - case 'ό': - switch (this) { - case GREEK: return "Ο"; // Ό - default: // fall out - } - break; - case 'ύ': - switch (this) { - case GREEK: return "Υ"; // Ύ - default: // fall out - } - break; - case 'ώ': - switch (this) { - case GREEK: return "Ω"; // Ώ - default: // fall out - } - break; + /** + * For a character, {@code exception}, contained in the String returned from + * {@link #getUpperCaseExceptions()}, this method returns the anticipated result of upper-casing + * the character in Oracle when evaluating {@link #getSql(String) this expression}. + * @return the upper case of {@code exception}, according to what Oracle would do. if the + * character is not contained in the String returned by {@link #getUpperCaseExceptions()}. + */ + final String getUpperCaseExceptionMapping(char exception) { + switch (exception) { + case 'i': + switch (this) { + case TURKMEN: + return "İ"; // I + default: // fall out + } + break; + case 'ß': + switch (this) { + case ENGLISH: + return "ß"; // SS + case FRENCH: + return "ß"; // SS + case ITALIAN: + return "ß"; // SS + case SPANISH: + return "ß"; // SS + case CATALAN: + return "ß"; // SS + case DUTCH: + return "ß"; // SS + case PORTUGUESE: + return "ß"; // SS + case DANISH: + return "ß"; // SS + case NORWEGIAN: + return "ß"; // SS + case SWEDISH: + return "ß"; // SS + case FINNISH: + return "ß"; // SS + case CZECH: + return "ß"; // SS + case POLISH: + return "ß"; // SS + case TURKISH: + return "ß"; // SS + case RUSSIAN: + return "ß"; // SS + case BULGARIAN: + return "ß"; // SS + case INDONESIAN: + return "ß"; // SS + case ROMANIAN: + return "ß"; // SS + case VIETNAMESE: + return "ß"; // SS + case UKRAINIAN: + return "ß"; // SS + case GREEK: + return "ß"; // SS + case HEBREW: + return "ß"; // SS + case SLOVAK: + return "ß"; // SS + case SERBIAN_LATIN: + return "ß"; // SS + case BOSNIAN: + return "ß"; // SS + case GEORGIAN: + return "ß"; // SS + case BASQUE: + return "ß"; // SS + case MALTESE: + return "ß"; // SS + case ROMANSH: + return "ß"; // SS + case LUXEMBOURGISH: + return "ß"; // SS + case IRISH: + return "ß"; // SS + case SLOVENE: + return "ß"; // SS + case CROATIAN: + return "ß"; // SS + case MALAY: + return "ß"; // SS + case ARABIC: + return "ß"; // SS + case ESTONIAN: + return "ß"; // SS + case ICELANDIC: + return "ß"; // SS + case LATVIAN: + return "ß"; // SS + case LITHUANIAN: + return "ß"; // SS + case KYRGYZ: + return "ß"; // SS + case KAZAKH: + return "ß"; // SS + case TAJIK: + return "ß"; // SS + case BELARUSIAN: + return "ß"; // SS + case TURKMEN: + return "ß"; // SS + case AZERBAIJANI: + return "ß"; // SS + case ARMENIAN: + return "ß"; // SS + case THAI: + return "ß"; // SS + case HINDI: + return "ß"; // SS + case URDU: + return "ß"; // SS + case BENGALI: + return "ß"; // SS + case TAMIL: + return "ß"; // SS + default: // fall out + } + break; + case 'Ά': + switch (this) { + case GREEK: + return "Α"; // Ά + default: // fall out + } + break; + case 'Έ': + switch (this) { + case GREEK: + return "Ε"; // Έ + default: // fall out + } + break; + case 'Ή': + switch (this) { + case GREEK: + return "Η"; // Ή + default: // fall out + } + break; + case 'Ί': + switch (this) { + case GREEK: + return "Ι"; // Ί + default: // fall out + } + break; + case 'Ό': + switch (this) { + case GREEK: + return "Ο"; // Ό + default: // fall out + } + break; + case 'Ύ': + switch (this) { + case GREEK: + return "Υ"; // Ύ + default: // fall out + } + break; + case 'Ώ': + switch (this) { + case GREEK: + return "Ω"; // Ώ + default: // fall out + } + break; + case 'ά': + switch (this) { + case GREEK: + return "Α"; // Ά + default: // fall out } - throw new IllegalArgumentException( - "No upper case mapping for char=" + exception - + " and this=" + this); + break; + case 'έ': + switch (this) { + case GREEK: + return "Ε"; // Έ + default: // fall out + } + break; + case 'ή': + switch (this) { + case GREEK: + return "Η"; // Ή + default: // fall out + } + break; + case 'ί': + switch (this) { + case GREEK: + return "Ι"; // Ί + default: // fall out + } + break; + case 'ό': + switch (this) { + case GREEK: + return "Ο"; // Ό + default: // fall out + } + break; + case 'ύ': + switch (this) { + case GREEK: + return "Υ"; // Ύ + default: // fall out + } + break; + case 'ώ': + switch (this) { + case GREEK: + return "Ω"; // Ώ + default: // fall out + } + break; } + throw new IllegalArgumentException( + "No upper case mapping for char=" + exception + " and this=" + this); + } - @SuppressWarnings(value = "EI_EXPOSE_REP", justification = "By design.") - public final Locale getLocale() { - return locale; - } + @SuppressWarnings(value = "EI_EXPOSE_REP", justification = "By design.") + public final Locale getLocale() { + return locale; + } - public String getSqlFormatString() { - return sql; - } + public String getSqlFormatString() { + return sql; + } - public String getSql(String expr) { - return String.format(sql, expr); - } + public String getSql(String expr) { + return String.format(sql, expr); + } - public String toUpperCase(String value) { - return OracleUpper.toUpperCase(this, value); - } + public String toUpperCase(String value) { + return OracleUpper.toUpperCase(this, value); + } - public static OracleUpperTable forLinguisticSort(String sort) { - return Enum.valueOf(OracleUpperTable.class, sort); - } + public static OracleUpperTable forLinguisticSort(String sort) { + return Enum.valueOf(OracleUpperTable.class, sort); + } } - diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/BsonDataFormat.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/BsonDataFormat.java index 46ac53f3870..28ae41bb93c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/BsonDataFormat.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/BsonDataFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +17,11 @@ */ package org.apache.phoenix.util.json; -import com.jayway.jsonpath.Configuration; -import com.jayway.jsonpath.JsonPath; -import com.jayway.jsonpath.Option; -import com.jayway.jsonpath.PathNotFoundException; +import java.nio.ByteBuffer; +import java.sql.Types; +import java.util.List; +import java.util.stream.Collectors; + import org.apache.hadoop.hbase.util.Bytes; import org.bson.BsonBinaryReader; import org.bson.BsonDocument; @@ -32,171 +33,167 @@ import org.bson.codecs.RawBsonDocumentCodec; import org.bson.io.ByteBufferBsonInput; -import java.nio.ByteBuffer; -import java.sql.Types; -import java.util.List; -import java.util.stream.Collectors; +import com.jayway.jsonpath.Configuration; +import com.jayway.jsonpath.JsonPath; +import com.jayway.jsonpath.Option; +import com.jayway.jsonpath.PathNotFoundException; public class BsonDataFormat implements JsonDataFormat { - @Override - public byte[] toBytes(Object object) { - return Bytes.toBytes(((RawBsonDocument) object).getByteBuffer().asNIO()); - } - - @Override - public Object toObject(String value) { - return RawBsonDocument.parse(value); - } - - @Override - public Object toObject(byte[] bytes, int offset, int length) { - return new RawBsonDocument(bytes, offset, length); - } - - @Override - public int estimateByteSize(Object o) { - RawBsonDocument rawBSON = (RawBsonDocument) o; - return rawBSON.size(); - } - - @Override - public int getValueType(Object obj, String jsonPathExprStr) { - BsonValue value = getBsonValue(jsonPathExprStr, (RawBsonDocument) obj); - return getSqlType(value); - } - - @Override - public Object getValue(Object obj, String jsonPathExprStr) { - BsonValue value = getBsonValue(jsonPathExprStr, (RawBsonDocument) obj); - return getValue(value); - } - - private Object getValue(BsonValue value) { - if (value != null) { - switch (value.getBsonType()) { - case INT32: - return value.asInt32().getValue(); - case INT64: - return value.asInt64().getValue(); - case STRING: - case SYMBOL: - return value.asString().getValue(); - case DECIMAL128: - return value.asDecimal128().doubleValue(); - case DOUBLE: - return value.asDouble().getValue(); - case BOOLEAN: - return value.asBoolean().getValue(); - case BINARY: - return value.asBinary().getData(); - case DATE_TIME: - return value.asDateTime().getValue(); - case DOCUMENT: - return value.asDocument().toJson(); - case ARRAY: - return readArray(value).toString(); - default: - return null; - } - } - return null; - } - - @Override - public ByteBuffer updateValue(Object top, String jsonPathExprStr, String newVal) { - Configuration conf = Configuration.builder().jsonProvider(new BsonJsonProvider()).build(); - BsonValue newValue = JsonPath.using(conf).parse(newVal).json(); - BsonDocument root = fromRaw((RawBsonDocument) top); - JsonPath.using(conf).parse(root).set(jsonPathExprStr, newValue); - RawBsonDocument - updated = - new RawBsonDocumentCodec().decode(new BsonDocumentReader(root), - DecoderContext.builder().build()); - return updated.getByteBuffer().asNIO(); - } - - // Ref: https://github.com/json-path/JsonPath/pull/828 - @Override - public boolean isPathValid(Object top, String path) { - try{ - Configuration conf = Configuration.builder().jsonProvider(new BsonJsonProvider()).build(); - BsonDocument root = fromRaw((RawBsonDocument) top); - JsonPath.using(conf).parse(root).read(path); - return true; - } - catch (PathNotFoundException e){ - return false; - } - } - - private BsonValue getBsonValue(String jsonPathExprStr, RawBsonDocument top) { - Configuration conf = getConfiguration(); - BsonValue value = JsonPath.using(conf).parse(top).read(jsonPathExprStr, BsonValue.class); - return value; - } - - private List readArray(BsonValue value) { - return value.asArray().stream().map(e -> { - // The reason for handling string in a special way is because: - // Given a string array in JSON - ["hello","world"] - // A string array when converted to a string returns - // as [hello, world] - the quotes stripped - // This change allows to retain those quotes. - if (e.isString() || e.isSymbol()) { - return "\"" + getValue(e) + "\""; - } else { - return String.valueOf(getValue(e)); - } - }).collect(Collectors.toList()); - } - - private Configuration getConfiguration() { - Configuration conf = Configuration.builder().jsonProvider(new BsonJsonProvider()).build(); - // This options will make us work in lax mode. - conf = conf.addOptions(Option.SUPPRESS_EXCEPTIONS); - return conf; - } - - // Transform to an in memory BsonDocument instance - private BsonDocument fromRaw(RawBsonDocument rawDocument) { - // Transform to an in memory BsonDocument instance - BsonBinaryReader - bsonReader = - new BsonBinaryReader(new ByteBufferBsonInput(rawDocument.getByteBuffer())); - try { - return new BsonDocumentCodec().decode(bsonReader, DecoderContext.builder().build()); - } finally { - bsonReader.close(); - } - } - - private int getSqlType(BsonValue value) { - if (value == null) { - return Types.NULL; - } - switch (value.getBsonType()) { + @Override + public byte[] toBytes(Object object) { + return Bytes.toBytes(((RawBsonDocument) object).getByteBuffer().asNIO()); + } + + @Override + public Object toObject(String value) { + return RawBsonDocument.parse(value); + } + + @Override + public Object toObject(byte[] bytes, int offset, int length) { + return new RawBsonDocument(bytes, offset, length); + } + + @Override + public int estimateByteSize(Object o) { + RawBsonDocument rawBSON = (RawBsonDocument) o; + return rawBSON.size(); + } + + @Override + public int getValueType(Object obj, String jsonPathExprStr) { + BsonValue value = getBsonValue(jsonPathExprStr, (RawBsonDocument) obj); + return getSqlType(value); + } + + @Override + public Object getValue(Object obj, String jsonPathExprStr) { + BsonValue value = getBsonValue(jsonPathExprStr, (RawBsonDocument) obj); + return getValue(value); + } + + private Object getValue(BsonValue value) { + if (value != null) { + switch (value.getBsonType()) { case INT32: - return Types.INTEGER; + return value.asInt32().getValue(); case INT64: - return Types.BIGINT; - case DECIMAL128: - case DOUBLE: - return Types.DOUBLE; + return value.asInt64().getValue(); case STRING: case SYMBOL: - return Types.VARCHAR; + return value.asString().getValue(); + case DECIMAL128: + return value.asDecimal128().doubleValue(); + case DOUBLE: + return value.asDouble().getValue(); case BOOLEAN: - return Types.BOOLEAN; + return value.asBoolean().getValue(); case BINARY: - return Types.BINARY; + return value.asBinary().getData(); case DATE_TIME: - return Types.DATE; - case ARRAY: - return Types.ARRAY; + return value.asDateTime().getValue(); case DOCUMENT: - return Types.NVARCHAR; + return value.asDocument().toJson(); + case ARRAY: + return readArray(value).toString(); default: - return Types.OTHER; - } + return null; + } + } + return null; + } + + @Override + public ByteBuffer updateValue(Object top, String jsonPathExprStr, String newVal) { + Configuration conf = Configuration.builder().jsonProvider(new BsonJsonProvider()).build(); + BsonValue newValue = JsonPath.using(conf).parse(newVal).json(); + BsonDocument root = fromRaw((RawBsonDocument) top); + JsonPath.using(conf).parse(root).set(jsonPathExprStr, newValue); + RawBsonDocument updated = new RawBsonDocumentCodec().decode(new BsonDocumentReader(root), + DecoderContext.builder().build()); + return updated.getByteBuffer().asNIO(); + } + + // Ref: https://github.com/json-path/JsonPath/pull/828 + @Override + public boolean isPathValid(Object top, String path) { + try { + Configuration conf = Configuration.builder().jsonProvider(new BsonJsonProvider()).build(); + BsonDocument root = fromRaw((RawBsonDocument) top); + JsonPath.using(conf).parse(root).read(path); + return true; + } catch (PathNotFoundException e) { + return false; + } + } + + private BsonValue getBsonValue(String jsonPathExprStr, RawBsonDocument top) { + Configuration conf = getConfiguration(); + BsonValue value = JsonPath.using(conf).parse(top).read(jsonPathExprStr, BsonValue.class); + return value; + } + + private List readArray(BsonValue value) { + return value.asArray().stream().map(e -> { + // The reason for handling string in a special way is because: + // Given a string array in JSON - ["hello","world"] + // A string array when converted to a string returns + // as [hello, world] - the quotes stripped + // This change allows to retain those quotes. + if (e.isString() || e.isSymbol()) { + return "\"" + getValue(e) + "\""; + } else { + return String.valueOf(getValue(e)); + } + }).collect(Collectors.toList()); + } + + private Configuration getConfiguration() { + Configuration conf = Configuration.builder().jsonProvider(new BsonJsonProvider()).build(); + // This options will make us work in lax mode. + conf = conf.addOptions(Option.SUPPRESS_EXCEPTIONS); + return conf; + } + + // Transform to an in memory BsonDocument instance + private BsonDocument fromRaw(RawBsonDocument rawDocument) { + // Transform to an in memory BsonDocument instance + BsonBinaryReader bsonReader = + new BsonBinaryReader(new ByteBufferBsonInput(rawDocument.getByteBuffer())); + try { + return new BsonDocumentCodec().decode(bsonReader, DecoderContext.builder().build()); + } finally { + bsonReader.close(); + } + } + + private int getSqlType(BsonValue value) { + if (value == null) { + return Types.NULL; + } + switch (value.getBsonType()) { + case INT32: + return Types.INTEGER; + case INT64: + return Types.BIGINT; + case DECIMAL128: + case DOUBLE: + return Types.DOUBLE; + case STRING: + case SYMBOL: + return Types.VARCHAR; + case BOOLEAN: + return Types.BOOLEAN; + case BINARY: + return Types.BINARY; + case DATE_TIME: + return Types.DATE; + case ARRAY: + return Types.ARRAY; + case DOCUMENT: + return Types.NVARCHAR; + default: + return Types.OTHER; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/BsonJsonProvider.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/BsonJsonProvider.java index f9bedf428b1..c22dac0b97a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/BsonJsonProvider.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/BsonJsonProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,11 @@ */ package org.apache.phoenix.util.json; -import com.jayway.jsonpath.InvalidJsonException; -import com.jayway.jsonpath.spi.json.AbstractJsonProvider; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + import org.bson.BsonArray; import org.bson.BsonBinary; import org.bson.BsonBoolean; @@ -34,218 +37,212 @@ import org.bson.json.JsonReader; import org.bson.types.ObjectId; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; +import com.jayway.jsonpath.InvalidJsonException; +import com.jayway.jsonpath.spi.json.AbstractJsonProvider; public class BsonJsonProvider extends AbstractJsonProvider { - @Override - public Object parse(final String json) throws InvalidJsonException { - JsonReader jsonReader = new JsonReader(json); - BsonType bsonType = jsonReader.readBsonType(); - switch (bsonType) { - case ARRAY: - return BsonArray.parse(json); - case DOCUMENT: - return BsonDocument.parse(json); - case STRING: - return new BsonString(jsonReader.readString()); - case INT32: - return new BsonInt32(jsonReader.readInt32()); - default: - throw new InvalidJsonException(String.format("Unsupported bson type %s", bsonType)); - } - } - - @Override - public Object parse(InputStream jsonStream, String charset) throws InvalidJsonException { - return null; - } - - @Override - public String toJson(Object obj) { - return null; - } - - @Override - public Object createArray() { - return new BsonArray(); - } - - @Override - public boolean isArray(final Object obj) { - - return (obj instanceof BsonArray || obj instanceof List); - } - - @Override - public Object getArrayIndex(final Object obj, final int idx) { - - return toBsonArray(obj).get(idx); + @Override + public Object parse(final String json) throws InvalidJsonException { + JsonReader jsonReader = new JsonReader(json); + BsonType bsonType = jsonReader.readBsonType(); + switch (bsonType) { + case ARRAY: + return BsonArray.parse(json); + case DOCUMENT: + return BsonDocument.parse(json); + case STRING: + return new BsonString(jsonReader.readString()); + case INT32: + return new BsonInt32(jsonReader.readInt32()); + default: + throw new InvalidJsonException(String.format("Unsupported bson type %s", bsonType)); + } + } + + @Override + public Object parse(InputStream jsonStream, String charset) throws InvalidJsonException { + return null; + } + + @Override + public String toJson(Object obj) { + return null; + } + + @Override + public Object createArray() { + return new BsonArray(); + } + + @Override + public boolean isArray(final Object obj) { + + return (obj instanceof BsonArray || obj instanceof List); + } + + @Override + public Object getArrayIndex(final Object obj, final int idx) { + + return toBsonArray(obj).get(idx); + } + + @Override + public void setArrayIndex(final Object array, final int index, final Object newValue) { + if (!isArray(array)) { + throw new UnsupportedOperationException(); + } else { + BsonArray arr = toBsonArray(array); + if (index == arr.size()) { + arr.add(toBsonValue(newValue)); + } else { + arr.set(index, toBsonValue(newValue)); + } } + } - @Override - public void setArrayIndex(final Object array, final int index, final Object newValue) { - if (!isArray(array)) { - throw new UnsupportedOperationException(); - } else { - BsonArray arr = toBsonArray(array); - if (index == arr.size()) { - arr.add(toBsonValue(newValue)); - } else { - arr.set(index, toBsonValue(newValue)); - } - } - } - - @Override - public Object createMap() { - return new BsonDocument(); - } + @Override + public Object createMap() { + return new BsonDocument(); + } - @Override - public boolean isMap(final Object obj) { - return (obj instanceof BsonDocument); - } + @Override + public boolean isMap(final Object obj) { + return (obj instanceof BsonDocument); + } - @Override - public Object getMapValue(final Object obj, final String key) { - BsonDocument bsonDocument = toBsonDocument(obj); - Object o = bsonDocument.get(key); - if (!bsonDocument.containsKey(key)) { - return UNDEFINED; - } else { - return unwrap(o); - } + @Override + public Object getMapValue(final Object obj, final String key) { + BsonDocument bsonDocument = toBsonDocument(obj); + Object o = bsonDocument.get(key); + if (!bsonDocument.containsKey(key)) { + return UNDEFINED; + } else { + return unwrap(o); } - - @Override - public Iterable toIterable(final Object obj) { - BsonArray arr = toBsonArray(obj); - List values = new ArrayList(arr.size()); - for (Object o : arr) { - values.add(toJavaType(toBsonValue(o))); - } - return values; - } - - @Override - public void setProperty(final Object obj, final Object key, final Object value) { - if (isMap(obj)) { - toBsonDocument(obj).put(key.toString(), toBsonValue(value)); - } else { - BsonArray array = toBsonArray(obj); - int index; - if (key != null) { - index = key instanceof Integer ? (Integer) key : Integer.parseInt(key.toString()); - } else { - index = array.size(); - } - - if (index == array.size()) { - array.add(toBsonValue(value)); - } else { - array.set(index, toBsonValue(value)); - } - } - } - - private static BsonArray toBsonArray(final Object o) { - return (BsonArray) o; - } - - private static BsonDocument toBsonDocument(final Object o) { - return (BsonDocument) o; - } - - /** - * Refer to this link for background on the implementation : - * https://github.com/spring-projects/spring-data-mongodb/blob/main/spring-data-mongodb/src/main/java/org/springframework/data/mongodb/util/BsonUtils.java#L66 - * @param source - * @return - */ - private static BsonValue toBsonValue(Object source) { - - if (source instanceof BsonValue) { - return (BsonValue) source; - } - - if (source instanceof String) { - return new BsonString((String) source); - } - - if (source instanceof ObjectId) { - return new BsonObjectId((ObjectId) source); - } - - if (source instanceof Double) { - return new BsonDouble((Double) source); - } - - if (source instanceof Integer) { - return new BsonInt32((Integer) source); - } - - if (source instanceof Long) { - return new BsonInt64((Long) source); - } - - if (source instanceof byte[]) { - return new BsonBinary((byte[]) source); - } - - if (source instanceof Boolean) { - return new BsonBoolean((Boolean) source); - } - - if (source instanceof Float) { - return new BsonDouble((Float) source); - } - - throw new IllegalArgumentException(String.format("Unable to convert %s (%s) to BsonValue.", source, - source != null ? source.getClass().getName() : "null")); - } - - /** - * Extract the corresponding plain value from {@link BsonValue}. Eg. plain {@link String} from - * {@link org.bson.BsonString}. - * - * @param value must not be {@literal null}. - * @return - * @since 2.1 - */ - public static Object toJavaType(BsonValue value) { - - switch (value.getBsonType()) { - case INT32: - return value.asInt32().getValue(); - case INT64: - return value.asInt64().getValue(); - case STRING: - return value.asString().getValue(); - case DECIMAL128: - return value.asDecimal128().doubleValue(); - case DOUBLE: - return value.asDouble().getValue(); - case BOOLEAN: - return value.asBoolean().getValue(); - case OBJECT_ID: - return value.asObjectId().getValue(); - case BINARY: - return value.asBinary().getData(); - case DATE_TIME: - return new Date(value.asDateTime().getValue()); - case SYMBOL: - return value.asSymbol().getSymbol(); - case ARRAY: - return value.asArray().toArray(); - case DOCUMENT: - return Document.parse(value.asDocument().toJson()); - default: - return value; - } + } + + @Override + public Iterable toIterable(final Object obj) { + BsonArray arr = toBsonArray(obj); + List values = new ArrayList(arr.size()); + for (Object o : arr) { + values.add(toJavaType(toBsonValue(o))); + } + return values; + } + + @Override + public void setProperty(final Object obj, final Object key, final Object value) { + if (isMap(obj)) { + toBsonDocument(obj).put(key.toString(), toBsonValue(value)); + } else { + BsonArray array = toBsonArray(obj); + int index; + if (key != null) { + index = key instanceof Integer ? (Integer) key : Integer.parseInt(key.toString()); + } else { + index = array.size(); + } + + if (index == array.size()) { + array.add(toBsonValue(value)); + } else { + array.set(index, toBsonValue(value)); + } + } + } + + private static BsonArray toBsonArray(final Object o) { + return (BsonArray) o; + } + + private static BsonDocument toBsonDocument(final Object o) { + return (BsonDocument) o; + } + + /** + * Refer to this link for background on the implementation : + * https://github.com/spring-projects/spring-data-mongodb/blob/main/spring-data-mongodb/src/main/java/org/springframework/data/mongodb/util/BsonUtils.java#L66 + */ + private static BsonValue toBsonValue(Object source) { + + if (source instanceof BsonValue) { + return (BsonValue) source; + } + + if (source instanceof String) { + return new BsonString((String) source); + } + + if (source instanceof ObjectId) { + return new BsonObjectId((ObjectId) source); + } + + if (source instanceof Double) { + return new BsonDouble((Double) source); + } + + if (source instanceof Integer) { + return new BsonInt32((Integer) source); + } + + if (source instanceof Long) { + return new BsonInt64((Long) source); + } + + if (source instanceof byte[]) { + return new BsonBinary((byte[]) source); + } + + if (source instanceof Boolean) { + return new BsonBoolean((Boolean) source); + } + + if (source instanceof Float) { + return new BsonDouble((Float) source); + } + + throw new IllegalArgumentException(String.format("Unable to convert %s (%s) to BsonValue.", + source, source != null ? source.getClass().getName() : "null")); + } + + /** + * Extract the corresponding plain value from {@link BsonValue}. Eg. plain {@link String} from + * {@link org.bson.BsonString}. + * @param value must not be {@literal null}. + * @since 2.1 + */ + public static Object toJavaType(BsonValue value) { + + switch (value.getBsonType()) { + case INT32: + return value.asInt32().getValue(); + case INT64: + return value.asInt64().getValue(); + case STRING: + return value.asString().getValue(); + case DECIMAL128: + return value.asDecimal128().doubleValue(); + case DOUBLE: + return value.asDouble().getValue(); + case BOOLEAN: + return value.asBoolean().getValue(); + case OBJECT_ID: + return value.asObjectId().getValue(); + case BINARY: + return value.asBinary().getData(); + case DATE_TIME: + return new Date(value.asDateTime().getValue()); + case SYMBOL: + return value.asSymbol().getSymbol(); + case ARRAY: + return value.asArray().toArray(); + case DOCUMENT: + return Document.parse(value.asDocument().toJson()); + default: + return value; } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonDataFormat.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonDataFormat.java index 0d1189151e9..bc4a207da92 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonDataFormat.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonDataFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,67 +20,44 @@ import java.nio.ByteBuffer; public interface JsonDataFormat { - /** - * Return the byte[] of the Json Object of the underlying format. - * @param object - * @return - */ - byte[] toBytes(Object object); - - /** - * Return the Object corresponding to the Data format in which JSON is stored - * @param value - * @return - */ - Object toObject(String value); - - /** - * Return the Object corresponding to the Data format in which JSON is stored - * @param bytes - * @param offset - * @param length - * @return - */ - Object toObject(byte[] bytes, int offset, int length); - - /** - * Get the estimated size of the object - Json - * @param o - * @return - */ - int estimateByteSize(Object o); - - /** - * Get the type of the value in the Json in the specified path. The type confirms to a - * java.sql.Types - * @param obj - * @param jsonPathExprStr - * @return - */ - int getValueType(Object obj, String jsonPathExprStr); - - /** - * Get the value from Json in the specified path - * @param obj - * @param jsonPathExprStr - * @return - */ - Object getValue(Object obj, String jsonPathExprStr); - - /** - * Update the value in the Json path and return the ByteBuffer - * @param top - * @param jsonPathExprStr - * @param newVal - * @return - */ - ByteBuffer updateValue(Object top, String jsonPathExprStr, String newVal); - - /** - * Checks if the path is valid in a JSON document. - * @param top - * @param path - * @return - */ - boolean isPathValid(Object top, String path); -} \ No newline at end of file + /** + * Return the byte[] of the Json Object of the underlying format. + */ + byte[] toBytes(Object object); + + /** + * Return the Object corresponding to the Data format in which JSON is stored + */ + Object toObject(String value); + + /** + * Return the Object corresponding to the Data format in which JSON is stored + */ + Object toObject(byte[] bytes, int offset, int length); + + /** + * Get the estimated size of the object - Json + */ + int estimateByteSize(Object o); + + /** + * Get the type of the value in the Json in the specified path. The type confirms to a + * java.sql.Types + */ + int getValueType(Object obj, String jsonPathExprStr); + + /** + * Get the value from Json in the specified path + */ + Object getValue(Object obj, String jsonPathExprStr); + + /** + * Update the value in the Json path and return the ByteBuffer + */ + ByteBuffer updateValue(Object top, String jsonPathExprStr, String newVal); + + /** + * Checks if the path is valid in a JSON document. + */ + boolean isPathValid(Object top, String path); +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonDataFormatFactory.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonDataFormatFactory.java index 7795733e2c5..3ba0888af29 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonDataFormatFactory.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonDataFormatFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,13 +18,13 @@ package org.apache.phoenix.util.json; public class JsonDataFormatFactory { - public enum DataFormat { - BSON, - STRING - } - public static JsonDataFormat getJsonDataFormat(DataFormat type) { - if(type == DataFormat.BSON) - return new BsonDataFormat(); - else return null; - } + public enum DataFormat { + BSON, + STRING + } + + public static JsonDataFormat getJsonDataFormat(DataFormat type) { + if (type == DataFormat.BSON) return new BsonDataFormat(); + else return null; + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java index 1882d8aa036..7fa99f4ff73 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,218 +39,221 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.CaseFormat; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.UpsertExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.CaseFormat; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; - /** {@link UpsertExecutor} over {@link Map} objects, as parsed from JSON. */ public class JsonUpsertExecutor extends UpsertExecutor, Object> { - protected static final Logger LOGGER = LoggerFactory.getLogger(JsonUpsertExecutor.class); + protected static final Logger LOGGER = LoggerFactory.getLogger(JsonUpsertExecutor.class); - /** Testing constructor. Do not use in prod. */ - @VisibleForTesting - protected JsonUpsertExecutor(Connection conn, List columnInfoList, - PreparedStatement stmt, UpsertListener> upsertListener) { - super(conn, columnInfoList, stmt, upsertListener); - finishInit(); - } + /** Testing constructor. Do not use in prod. */ + @VisibleForTesting + protected JsonUpsertExecutor(Connection conn, List columnInfoList, + PreparedStatement stmt, UpsertListener> upsertListener) { + super(conn, columnInfoList, stmt, upsertListener); + finishInit(); + } - public JsonUpsertExecutor(Connection conn, String tableName, List columnInfoList, - UpsertExecutor.UpsertListener> upsertListener) { - super(conn, tableName, columnInfoList, upsertListener); - finishInit(); - } + public JsonUpsertExecutor(Connection conn, String tableName, List columnInfoList, + UpsertExecutor.UpsertListener> upsertListener) { + super(conn, tableName, columnInfoList, upsertListener); + finishInit(); + } - @Override - protected void execute(Map record) { - int fieldIndex = 0; - String colName = null; - try { - if (record.size() < conversionFunctions.size()) { - String message = String.format("JSON record does not have enough values (has %d, but needs %d)", - record.size(), conversionFunctions.size()); - throw new IllegalArgumentException(message); - } - for (fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) { - colName = CaseFormat.UPPER_UNDERSCORE.to( - CaseFormat.LOWER_UNDERSCORE, columnInfos.get(fieldIndex).getColumnName()); - if (colName.contains(".")) { - StringBuilder sb = new StringBuilder(); - String[] parts = colName.split("\\."); - // assume first part is the column family name; omita - for (int i = 1; i < parts.length; i++) { - sb.append(parts[i]); - if (i != parts.length - 1) { - sb.append("."); - } - } - colName = sb.toString(); - } - if (colName.contains("\"")) { - colName = colName.replace("\"", ""); - } - Object sqlValue = conversionFunctions.get(fieldIndex).apply(record.get(colName)); - if (sqlValue != null) { - preparedStatement.setObject(fieldIndex + 1, sqlValue); - } else { - preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType()); - } - } - preparedStatement.execute(); - upsertListener.upsertDone(++upsertCount); - } catch (Exception e) { - if (LOGGER.isDebugEnabled()) { - // Even though this is an error we only log it with debug logging because we're notifying the - // listener, and it can do its own logging if needed - LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e); + @Override + protected void execute(Map record) { + int fieldIndex = 0; + String colName = null; + try { + if (record.size() < conversionFunctions.size()) { + String message = + String.format("JSON record does not have enough values (has %d, but needs %d)", + record.size(), conversionFunctions.size()); + throw new IllegalArgumentException(message); + } + for (fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) { + colName = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_UNDERSCORE, + columnInfos.get(fieldIndex).getColumnName()); + if (colName.contains(".")) { + StringBuilder sb = new StringBuilder(); + String[] parts = colName.split("\\."); + // assume first part is the column family name; omita + for (int i = 1; i < parts.length; i++) { + sb.append(parts[i]); + if (i != parts.length - 1) { + sb.append("."); } - upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e)); + } + colName = sb.toString(); } + if (colName.contains("\"")) { + colName = colName.replace("\"", ""); + } + Object sqlValue = conversionFunctions.get(fieldIndex).apply(record.get(colName)); + if (sqlValue != null) { + preparedStatement.setObject(fieldIndex + 1, sqlValue); + } else { + preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType()); + } + } + preparedStatement.execute(); + upsertListener.upsertDone(++upsertCount); + } catch (Exception e) { + if (LOGGER.isDebugEnabled()) { + // Even though this is an error we only log it with debug logging because we're notifying + // the + // listener, and it can do its own logging if needed + LOGGER.debug( + "Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e); + } + upsertListener.errorOnRecord(record, + new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e)); } + } - @Override - public void close() throws IOException { - try { - preparedStatement.close(); - } catch (SQLException e) { - // An exception while closing the prepared statement is most likely a sign of a real problem, so we don't - // want to hide it with closeQuietly or something similar - throw new RuntimeException(e); - } + @Override + public void close() throws IOException { + try { + preparedStatement.close(); + } catch (SQLException e) { + // An exception while closing the prepared statement is most likely a sign of a real problem, + // so we don't + // want to hide it with closeQuietly or something similar + throw new RuntimeException(e); } + } - @Override - protected Function createConversionFunction(PDataType dataType) { - if (dataType.isArrayType()) { - return new ArrayDatatypeConversionFunction( - new ObjectToArrayConverter( - conn, - PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE))); - } else { - return new SimpleDatatypeConversionFunction(dataType, this.conn); - } + @Override + protected Function createConversionFunction(PDataType dataType) { + if (dataType.isArrayType()) { + return new ArrayDatatypeConversionFunction(new ObjectToArrayConverter(conn, + PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE))); + } else { + return new SimpleDatatypeConversionFunction(dataType, this.conn); } + } - /** - * Performs typed conversion from String values to a given column value type. - */ - static class SimpleDatatypeConversionFunction implements Function { + /** + * Performs typed conversion from String values to a given column value type. + */ + static class SimpleDatatypeConversionFunction implements Function { - private final PDataType dataType; - private final DateUtil.DateTimeParser dateTimeParser; - private final String binaryEncoding; + private final PDataType dataType; + private final DateUtil.DateTimeParser dateTimeParser; + private final String binaryEncoding; - SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) { - Properties props; - try { - props = conn.getClientInfo(); - } catch (SQLException e) { - throw new RuntimeException(e); - } - this.dataType = dataType; - if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { - // TODO: move to DateUtil - String dateFormat; - int dateSqlType = dataType.getResultSetSqlType(); - if (dateSqlType == Types.DATE) { - dateFormat = props.getProperty(QueryServices.DATE_FORMAT_ATTRIB, - DateUtil.DEFAULT_DATE_FORMAT); - } else if (dateSqlType == Types.TIME) { - dateFormat = props.getProperty(QueryServices.TIME_FORMAT_ATTRIB, - DateUtil.DEFAULT_TIME_FORMAT); - } else { - dateFormat = props.getProperty(QueryServices.TIMESTAMP_FORMAT_ATTRIB, - DateUtil.DEFAULT_TIMESTAMP_FORMAT); - } - String timeZoneId = props.getProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, - QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE); - this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, dataType, timeZoneId); - } else { - this.dateTimeParser = null; - } - this.binaryEncoding = props.getProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, - QueryServicesOptions.DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING); + SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) { + Properties props; + try { + props = conn.getClientInfo(); + } catch (SQLException e) { + throw new RuntimeException(e); + } + this.dataType = dataType; + if (dataType.isCoercibleTo(PTimestamp.INSTANCE)) { + // TODO: move to DateUtil + String dateFormat; + int dateSqlType = dataType.getResultSetSqlType(); + if (dateSqlType == Types.DATE) { + dateFormat = + props.getProperty(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT); + } else if (dateSqlType == Types.TIME) { + dateFormat = + props.getProperty(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT); + } else { + dateFormat = props.getProperty(QueryServices.TIMESTAMP_FORMAT_ATTRIB, + DateUtil.DEFAULT_TIMESTAMP_FORMAT); } + String timeZoneId = props.getProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, + QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE); + this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, dataType, timeZoneId); + } else { + this.dateTimeParser = null; + } + this.binaryEncoding = props.getProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, + QueryServicesOptions.DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING); + } - @Nullable - @Override - public Object apply(@Nullable Object input) { - if (input == null) { - return null; - } - if (dataType == PTimestamp.INSTANCE) { - return DateUtil.parseTimestamp(input.toString()); - } - if (dateTimeParser != null && input instanceof String) { - final String s = (String) input; - long epochTime = dateTimeParser.parseDateTime(s); - byte[] byteValue = new byte[dataType.getByteSize()]; - dataType.getCodec().encodeLong(epochTime, byteValue, 0); - return dataType.toObject(byteValue); - }else if (dataType == PBoolean.INSTANCE) { - switch (input.toString()) { - case "true": - case "t": - case "T": - case "1": - return Boolean.TRUE; - case "false": - case "f": - case "F": - case "0": - return Boolean.FALSE; - default: - throw new RuntimeException("Invalid boolean value: '" + input - + "', must be one of ['true','t','1','false','f','0']"); - } - }else if (dataType == PVarbinary.INSTANCE || dataType == PBinary.INSTANCE){ - EncodeFormat format = EncodeFormat.valueOf(binaryEncoding.toUpperCase()); - Object object = null; - switch (format) { - case BASE64: - object = Base64.getDecoder().decode(input.toString()); - if (object == null) { throw new IllegalDataException( - "Input: [" + input + "] is not base64 encoded"); } - break; - case ASCII: - object = Bytes.toBytes(input.toString()); - break; - default: - throw new IllegalDataException("Unsupported encoding \"" + binaryEncoding + "\""); - } - return object; + @Nullable + @Override + public Object apply(@Nullable Object input) { + if (input == null) { + return null; + } + if (dataType == PTimestamp.INSTANCE) { + return DateUtil.parseTimestamp(input.toString()); + } + if (dateTimeParser != null && input instanceof String) { + final String s = (String) input; + long epochTime = dateTimeParser.parseDateTime(s); + byte[] byteValue = new byte[dataType.getByteSize()]; + dataType.getCodec().encodeLong(epochTime, byteValue, 0); + return dataType.toObject(byteValue); + } else if (dataType == PBoolean.INSTANCE) { + switch (input.toString()) { + case "true": + case "t": + case "T": + case "1": + return Boolean.TRUE; + case "false": + case "f": + case "F": + case "0": + return Boolean.FALSE; + default: + throw new RuntimeException("Invalid boolean value: '" + input + + "', must be one of ['true','t','1','false','f','0']"); } - - return dataType.toObject(input, dataType); + } else if (dataType == PVarbinary.INSTANCE || dataType == PBinary.INSTANCE) { + EncodeFormat format = EncodeFormat.valueOf(binaryEncoding.toUpperCase()); + Object object = null; + switch (format) { + case BASE64: + object = Base64.getDecoder().decode(input.toString()); + if (object == null) { + throw new IllegalDataException("Input: [" + input + "] is not base64 encoded"); + } + break; + case ASCII: + object = Bytes.toBytes(input.toString()); + break; + default: + throw new IllegalDataException("Unsupported encoding \"" + binaryEncoding + "\""); } + return object; + } + + return dataType.toObject(input, dataType); } + } - /** - * Converts string representations of arrays into Phoenix arrays of the correct type. - */ - private static class ArrayDatatypeConversionFunction implements Function { + /** + * Converts string representations of arrays into Phoenix arrays of the correct type. + */ + private static class ArrayDatatypeConversionFunction implements Function { - private final ObjectToArrayConverter arrayConverter; + private final ObjectToArrayConverter arrayConverter; - private ArrayDatatypeConversionFunction(ObjectToArrayConverter arrayConverter) { - this.arrayConverter = arrayConverter; - } + private ArrayDatatypeConversionFunction(ObjectToArrayConverter arrayConverter) { + this.arrayConverter = arrayConverter; + } - @Nullable - @Override - public Object apply(@Nullable Object input) { - try { - return arrayConverter.toArray(input); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } + @Nullable + @Override + public Object apply(@Nullable Object input) { + try { + return arrayConverter.toArray(input); + } catch (SQLException e) { + throw new RuntimeException(e); + } } + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/ObjectToArrayConverter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/ObjectToArrayConverter.java index 654143a2ae6..dc915674f4e 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/ObjectToArrayConverter.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/json/ObjectToArrayConverter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,7 +23,6 @@ import java.util.List; import org.apache.phoenix.schema.types.PDataType; - import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; @@ -32,38 +31,36 @@ */ class ObjectToArrayConverter { - private final Connection conn; - private final PDataType elementDataType; - private final JsonUpsertExecutor.SimpleDatatypeConversionFunction elementConvertFunction; + private final Connection conn; + private final PDataType elementDataType; + private final JsonUpsertExecutor.SimpleDatatypeConversionFunction elementConvertFunction; - /** - * Instantiate with the array value separator and data type. - * - * @param conn Phoenix connection to target database - * @param elementDataType datatype of the elements of arrays to be created - */ - public ObjectToArrayConverter(Connection conn, PDataType elementDataType) { - this.conn = conn; - this.elementDataType = elementDataType; - this.elementConvertFunction = - new JsonUpsertExecutor.SimpleDatatypeConversionFunction(elementDataType, this.conn); - } + /** + * Instantiate with the array value separator and data type. + * @param conn Phoenix connection to target database + * @param elementDataType datatype of the elements of arrays to be created + */ + public ObjectToArrayConverter(Connection conn, PDataType elementDataType) { + this.conn = conn; + this.elementDataType = elementDataType; + this.elementConvertFunction = + new JsonUpsertExecutor.SimpleDatatypeConversionFunction(elementDataType, this.conn); + } - /** - * Convert an input delimited string into a phoenix array of the configured type. - * - * @param input string containing delimited array values - * @return the array containing the values represented in the input string - */ - public Array toArray(Object input) throws SQLException { - if (input == null) { - return conn.createArrayOf(elementDataType.getSqlTypeName(), new Object[0]); - } - List list = (List) input; - if (list.isEmpty()) { - return conn.createArrayOf(elementDataType.getSqlTypeName(), new Object[0]); - } - return conn.createArrayOf(elementDataType.getSqlTypeName(), - Lists.newArrayList(Iterables.transform(list, elementConvertFunction)).toArray()); + /** + * Convert an input delimited string into a phoenix array of the configured type. + * @param input string containing delimited array values + * @return the array containing the values represented in the input string + */ + public Array toArray(Object input) throws SQLException { + if (input == null) { + return conn.createArrayOf(elementDataType.getSqlTypeName(), new Object[0]); + } + List list = (List) input; + if (list.isEmpty()) { + return conn.createArrayOf(elementDataType.getSqlTypeName(), new Object[0]); } + return conn.createArrayOf(elementDataType.getSqlTypeName(), + Lists.newArrayList(Iterables.transform(list, elementConvertFunction)).toArray()); + } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java b/phoenix-core-client/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java index d32f9020703..cfd325c8f40 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/util/regex/RegexUpsertExecutor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,59 +22,65 @@ import java.util.List; import java.util.Map; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.UpsertExecutor; import org.apache.phoenix.util.json.JsonUpsertExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; - -/** {@link UpsertExecutor} over {@link Map} objects, convert input record into {@link Map} objects by using regex. */ +/** + * {@link UpsertExecutor} over {@link Map} objects, convert input record into {@link Map} objects by + * using regex. + */ public class RegexUpsertExecutor extends JsonUpsertExecutor { - protected static final Logger LOGGER = LoggerFactory.getLogger(RegexUpsertExecutor.class); + protected static final Logger LOGGER = LoggerFactory.getLogger(RegexUpsertExecutor.class); - /** Testing constructor. Do not use in prod. */ - @VisibleForTesting - protected RegexUpsertExecutor(Connection conn, List columnInfoList, - PreparedStatement stmt, UpsertListener> upsertListener) { - super(conn, columnInfoList, stmt, upsertListener); - } + /** Testing constructor. Do not use in prod. */ + @VisibleForTesting + protected RegexUpsertExecutor(Connection conn, List columnInfoList, + PreparedStatement stmt, UpsertListener> upsertListener) { + super(conn, columnInfoList, stmt, upsertListener); + } - public RegexUpsertExecutor(Connection conn, String tableName, List columnInfoList, - UpsertExecutor.UpsertListener> upsertListener) { - super(conn, tableName, columnInfoList, upsertListener); - } + public RegexUpsertExecutor(Connection conn, String tableName, List columnInfoList, + UpsertExecutor.UpsertListener> upsertListener) { + super(conn, tableName, columnInfoList, upsertListener); + } - @Override - protected void execute(Map record) { - int fieldIndex = 0; - String colName = null; - try { - if (record.size() < conversionFunctions.size()) { - String message = String.format("Input record does not have enough values based on regex (has %d, but needs %d)", - record.size(), conversionFunctions.size()); - throw new IllegalArgumentException(message); - } - for (fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) { - colName = columnInfos.get(fieldIndex).getColumnName(); - Object sqlValue = conversionFunctions.get(fieldIndex).apply(record.get(colName)); - if (sqlValue != null) { - preparedStatement.setObject(fieldIndex + 1, sqlValue); - } else { - preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType()); - } - } - preparedStatement.execute(); - upsertListener.upsertDone(++upsertCount); - } catch (Exception e) { - if (LOGGER.isDebugEnabled()) { - // Even though this is an error we only log it with debug logging because we're notifying the - // listener, and it can do its own logging if needed - LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e); - } - upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e)); + @Override + protected void execute(Map record) { + int fieldIndex = 0; + String colName = null; + try { + if (record.size() < conversionFunctions.size()) { + String message = String.format( + "Input record does not have enough values based on regex (has %d, but needs %d)", + record.size(), conversionFunctions.size()); + throw new IllegalArgumentException(message); + } + for (fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) { + colName = columnInfos.get(fieldIndex).getColumnName(); + Object sqlValue = conversionFunctions.get(fieldIndex).apply(record.get(colName)); + if (sqlValue != null) { + preparedStatement.setObject(fieldIndex + 1, sqlValue); + } else { + preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType()); } + } + preparedStatement.execute(); + upsertListener.upsertDone(++upsertCount); + } catch (Exception e) { + if (LOGGER.isDebugEnabled()) { + // Even though this is an error we only log it with debug logging because we're notifying + // the + // listener, and it can do its own logging if needed + LOGGER.debug( + "Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e); + } + upsertListener.errorOnRecord(record, + new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e)); } -} \ No newline at end of file + } +} diff --git a/phoenix-core-server/pom.xml b/phoenix-core-server/pom.xml index bb55875b464..45611f77507 100644 --- a/phoenix-core-server/pom.xml +++ b/phoenix-core-server/pom.xml @@ -15,181 +15,178 @@ See the License for the specific language governing permissions and limitations under the License. --> - - - phoenix - org.apache.phoenix - 5.3.0-SNAPSHOT - - 4.0.0 - phoenix-core-server - Phoenix Core Server + + 4.0.0 + + org.apache.phoenix + phoenix + 5.3.0-SNAPSHOT + + phoenix-core-server + Phoenix Core Server - - - org.apache.phoenix - phoenix-core-client - + + + org.apache.phoenix + phoenix-core-client + - - - org.apache.phoenix.thirdparty - phoenix-shaded-guava - + + + org.apache.phoenix.thirdparty + phoenix-shaded-guava + - - - org.apache.phoenix - phoenix-hbase-compat-${hbase.compat.version} - true - + + + org.apache.phoenix + phoenix-hbase-compat-${hbase.compat.version} + true + - - - org.apache.hadoop - hadoop-common - - - org.apache.hadoop - hadoop-yarn-api - - - org.apache.hadoop - hadoop-mapreduce-client-core - + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-yarn-api + + + org.apache.hadoop + hadoop-mapreduce-client-core + - - - org.apache.hbase - hbase-common - - - org.apache.hbase - hbase-mapreduce - - - org.apache.hbase - hbase-metrics - - - org.apache.hbase - hbase-server - - - org.apache.hbase - hbase-zookeeper - - - org.apache.hbase - hbase-metrics-api - - - org.apache.hbase - hbase-client - - - org.apache.hbase - hbase-protocol - - - org.apache.hbase - hbase-protocol-shaded - + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-mapreduce + + + org.apache.hbase + hbase-metrics + + + org.apache.hbase + hbase-server + + + org.apache.hbase + hbase-zookeeper + + + org.apache.hbase + hbase-metrics-api + + + org.apache.hbase + hbase-client + + + org.apache.hbase + hbase-protocol + + + org.apache.hbase + hbase-protocol-shaded + - - - org.apache.zookeeper - zookeeper - - - org.apache.zookeeper - zookeeper-jute - + + + org.apache.zookeeper + zookeeper + + + org.apache.zookeeper + zookeeper-jute + - - - org.apache.omid - omid-hbase-coprocessor - - - org.apache.omid - omid-commit-table - + + + org.apache.omid + omid-hbase-coprocessor + + + org.apache.omid + omid-commit-table + + + + org.antlr + antlr-runtime + + + com.github.stephenc.findbugs + findbugs-annotations + + + org.apache.htrace + htrace-core + + + com.google.protobuf + protobuf-java + + + com.fasterxml.jackson.core + jackson-annotations + + + org.slf4j + slf4j-api + + + joda-time + joda-time + + + org.apache.commons + commons-csv + + + org.apache.phoenix.thirdparty + phoenix-shaded-commons-cli + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + org.apache.commons + commons-lang3 + + + commons-codec + commons-codec + + + com.google.code.findbugs + jsr305 + + - - - org.antlr - antlr-runtime - - - com.github.stephenc.findbugs - findbugs-annotations - - - org.apache.htrace - htrace-core - - - com.google.protobuf - protobuf-java - - - com.fasterxml.jackson.core - jackson-annotations - - - org.slf4j - slf4j-api - - - joda-time - joda-time - - - org.apache.commons - commons-csv - - - org.apache.phoenix.thirdparty - phoenix-shaded-commons-cli - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - org.apache.commons - commons-lang3 - - - commons-codec - commons-codec - - - com.google.code.findbugs - jsr305 - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - maven-dependency-plugin - - true - - - - + + + + org.codehaus.mojo + build-helper-maven-plugin + + + maven-dependency-plugin + + true + + + + diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java index 0a04ad47873..3ab54ad7c73 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.ipc; +import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_INVALIDATE_CACHE_HANDLER_COUNT; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; @@ -24,237 +26,237 @@ import org.apache.phoenix.compat.hbase.CompatPhoenixRpcScheduler; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_INVALIDATE_CACHE_HANDLER_COUNT; - /** - * {@link RpcScheduler} that first checks to see if this is an index or metadata update before passing off the - * call to the delegate {@link RpcScheduler}. + * {@link RpcScheduler} that first checks to see if this is an index or metadata update before + * passing off the call to the delegate {@link RpcScheduler}. */ public class PhoenixRpcScheduler extends CompatPhoenixRpcScheduler { + // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4 + private static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY = + "ipc.server.callqueue.handler.factor"; + private static final String CALLQUEUE_LENGTH_CONF_KEY = "ipc.server.max.callqueue.length"; + private static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10; + + private int indexPriority; + private int metadataPriority; + private int serverSidePriority; + private int invalidateMetadataCachePriority; + private RpcExecutor indexCallExecutor; + private RpcExecutor metadataCallExecutor; + private RpcExecutor serverSideCallExecutor; + // Executor for invalidating server side metadata cache RPCs. + private RpcExecutor invalidateMetadataCacheCallExecutor; + private int port; + + public PhoenixRpcScheduler(Configuration conf, RpcScheduler delegate, int indexPriority, + int metadataPriority, int serversidePriority, int invalidateMetadataCachePriority, + PriorityFunction priorityFunction, Abortable abortable) { // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4 - private static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY = "ipc.server.callqueue.handler.factor"; - private static final String CALLQUEUE_LENGTH_CONF_KEY = "ipc.server.max.callqueue.length"; - private static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10; - - private int indexPriority; - private int metadataPriority; - private int serverSidePriority; - private int invalidateMetadataCachePriority; - private RpcExecutor indexCallExecutor; - private RpcExecutor metadataCallExecutor; - private RpcExecutor serverSideCallExecutor; - // Executor for invalidating server side metadata cache RPCs. - private RpcExecutor invalidateMetadataCacheCallExecutor; - private int port; - - - public PhoenixRpcScheduler(Configuration conf, RpcScheduler delegate, int indexPriority, - int metadataPriority, int serversidePriority, - int invalidateMetadataCachePriority, - PriorityFunction priorityFunction, Abortable abortable) { - // copied from org.apache.hadoop.hbase.ipc.SimpleRpcScheduler in HBase 0.98.4 - int indexHandlerCount = conf.getInt(QueryServices.INDEX_HANDLER_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_INDEX_HANDLER_COUNT); - int metadataHandlerCount = conf.getInt(QueryServices.METADATA_HANDLER_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_METADATA_HANDLER_COUNT); - int serverSideHandlerCount = conf.getInt(QueryServices.SERVER_SIDE_HANDLER_COUNT_ATTRIB, QueryServicesOptions.DEFAULT_SERVERSIDE_HANDLER_COUNT); - int invalidateMetadataCacheHandlerCount = conf.getInt( - QueryServices.INVALIDATE_CACHE_HANDLER_COUNT_ATTRIB, - DEFAULT_INVALIDATE_CACHE_HANDLER_COUNT); - int maxIndexQueueLength = conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, indexHandlerCount*DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); - int maxMetadataQueueLength = conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, metadataHandlerCount*DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); - int maxServerSideQueueLength = conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, serverSideHandlerCount*DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); - int maxInvalidateMetadataCacheQueueLength = conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, - invalidateMetadataCacheHandlerCount * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); - - - this.indexPriority = indexPriority; - this.metadataPriority = metadataPriority; - this.serverSidePriority = serversidePriority; - this.invalidateMetadataCachePriority = invalidateMetadataCachePriority; - this.delegate = delegate; - this.indexCallExecutor = new BalancedQueueRpcExecutor("Index", indexHandlerCount, maxIndexQueueLength, priorityFunction,conf,abortable); - this.metadataCallExecutor = new BalancedQueueRpcExecutor("Metadata", metadataHandlerCount, maxMetadataQueueLength, priorityFunction,conf,abortable); - this.serverSideCallExecutor = new BalancedQueueRpcExecutor("ServerSide", serverSideHandlerCount, maxServerSideQueueLength, priorityFunction,conf,abortable); - this.invalidateMetadataCacheCallExecutor = new BalancedQueueRpcExecutor( - "InvalidateMetadataCache", invalidateMetadataCacheHandlerCount, - maxInvalidateMetadataCacheQueueLength, priorityFunction, conf, abortable); - } - - @Override - public void init(Context context) { - delegate.init(context); - this.port = context.getListenerAddress().getPort(); - } - - @Override - public void start() { - delegate.start(); - indexCallExecutor.start(port); - metadataCallExecutor.start(port); - serverSideCallExecutor.start(port); - invalidateMetadataCacheCallExecutor.start(port); - } - - @Override - public void stop() { - delegate.stop(); - indexCallExecutor.stop(); - metadataCallExecutor.stop(); - serverSideCallExecutor.stop(); - invalidateMetadataCacheCallExecutor.stop(); - } - - @Override - public boolean compatDispatch(CallRunner callTask) throws IOException, InterruptedException { - RpcCall call = callTask.getRpcCall(); - int priority = call.getHeader().getPriority(); - if (indexPriority == priority) { - return indexCallExecutor.dispatch(callTask); - } else if (metadataPriority == priority) { - return metadataCallExecutor.dispatch(callTask); - } else if (serverSidePriority == priority) { - return serverSideCallExecutor.dispatch(callTask); - } else if (invalidateMetadataCachePriority == priority) { - return invalidateMetadataCacheCallExecutor.dispatch(callTask); - } else { - return delegate.dispatch(callTask); - } - } - - @Override - public CallQueueInfo getCallQueueInfo() { - return delegate.getCallQueueInfo(); - } - - @Override - public int getGeneralQueueLength() { - // not the best way to calculate, but don't have a better way to hook - // into metrics at the moment - return this.delegate.getGeneralQueueLength() - + this.indexCallExecutor.getQueueLength() - + this.metadataCallExecutor.getQueueLength() - + this.serverSideCallExecutor.getQueueLength() - + this.invalidateMetadataCacheCallExecutor.getQueueLength(); - } - - @Override - public int getPriorityQueueLength() { - return this.delegate.getPriorityQueueLength(); - } - - @Override - public int getReplicationQueueLength() { - return this.delegate.getReplicationQueueLength(); - } - - @Override - public int getActiveRpcHandlerCount() { - return this.delegate.getActiveRpcHandlerCount() - + this.indexCallExecutor.getActiveHandlerCount() - + this.metadataCallExecutor.getActiveHandlerCount() - + this.serverSideCallExecutor.getActiveHandlerCount() - + this.invalidateMetadataCacheCallExecutor.getActiveHandlerCount(); - } - - @Override - public long getNumGeneralCallsDropped() { - return delegate.getNumGeneralCallsDropped(); - } - - @Override - public long getNumLifoModeSwitches() { - return delegate.getNumLifoModeSwitches(); - } - - @VisibleForTesting - public void setIndexExecutorForTesting(RpcExecutor executor) { - this.indexCallExecutor = executor; - } - - @VisibleForTesting - public void setMetadataExecutorForTesting(RpcExecutor executor) { - this.metadataCallExecutor = executor; - } - - @VisibleForTesting - public void setInvalidateMetadataCacheExecutorForTesting(RpcExecutor executor) { - this.invalidateMetadataCacheCallExecutor = executor; - } - - @VisibleForTesting - public void setServerSideExecutorForTesting(RpcExecutor executor) { - this.serverSideCallExecutor = executor; - } - - @VisibleForTesting - public RpcExecutor getIndexExecutorForTesting() { - return this.indexCallExecutor; - } - - @VisibleForTesting - public RpcExecutor getMetadataExecutorForTesting() { - return this.metadataCallExecutor; - } - - @VisibleForTesting - public RpcExecutor getServerSideExecutorForTesting() { - return this.serverSideCallExecutor; - } - - @Override - public int getWriteQueueLength() { - return delegate.getWriteQueueLength(); - } - - @Override - public int getReadQueueLength() { - return delegate.getReadQueueLength(); - } - - @Override - public int getScanQueueLength() { - return delegate.getScanQueueLength(); - } - - @Override - public int getActiveWriteRpcHandlerCount() { - return delegate.getActiveWriteRpcHandlerCount(); - } - - @Override - public int getActiveReadRpcHandlerCount() { - return delegate.getActiveReadRpcHandlerCount(); - } - - @Override - public int getActiveScanRpcHandlerCount() { - return delegate.getActiveScanRpcHandlerCount(); - } - - @Override - public int getMetaPriorityQueueLength() { - return this.delegate.getMetaPriorityQueueLength(); - } - - @Override - public int getActiveGeneralRpcHandlerCount() { - return this.delegate.getActiveGeneralRpcHandlerCount(); - } - - @Override - public int getActivePriorityRpcHandlerCount() { - return this.delegate.getActivePriorityRpcHandlerCount(); - } - - @Override - public int getActiveMetaPriorityRpcHandlerCount() { - return this.delegate.getActiveMetaPriorityRpcHandlerCount(); - } - - @Override - public int getActiveReplicationRpcHandlerCount() { - return this.delegate.getActiveReplicationRpcHandlerCount(); + int indexHandlerCount = conf.getInt(QueryServices.INDEX_HANDLER_COUNT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_HANDLER_COUNT); + int metadataHandlerCount = conf.getInt(QueryServices.METADATA_HANDLER_COUNT_ATTRIB, + QueryServicesOptions.DEFAULT_METADATA_HANDLER_COUNT); + int serverSideHandlerCount = conf.getInt(QueryServices.SERVER_SIDE_HANDLER_COUNT_ATTRIB, + QueryServicesOptions.DEFAULT_SERVERSIDE_HANDLER_COUNT); + int invalidateMetadataCacheHandlerCount = conf.getInt( + QueryServices.INVALIDATE_CACHE_HANDLER_COUNT_ATTRIB, DEFAULT_INVALIDATE_CACHE_HANDLER_COUNT); + int maxIndexQueueLength = conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, + indexHandlerCount * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + int maxMetadataQueueLength = conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, + metadataHandlerCount * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + int maxServerSideQueueLength = conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, + serverSideHandlerCount * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + int maxInvalidateMetadataCacheQueueLength = conf.getInt(CALLQUEUE_LENGTH_CONF_KEY, + invalidateMetadataCacheHandlerCount * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + + this.indexPriority = indexPriority; + this.metadataPriority = metadataPriority; + this.serverSidePriority = serversidePriority; + this.invalidateMetadataCachePriority = invalidateMetadataCachePriority; + this.delegate = delegate; + this.indexCallExecutor = new BalancedQueueRpcExecutor("Index", indexHandlerCount, + maxIndexQueueLength, priorityFunction, conf, abortable); + this.metadataCallExecutor = new BalancedQueueRpcExecutor("Metadata", metadataHandlerCount, + maxMetadataQueueLength, priorityFunction, conf, abortable); + this.serverSideCallExecutor = new BalancedQueueRpcExecutor("ServerSide", serverSideHandlerCount, + maxServerSideQueueLength, priorityFunction, conf, abortable); + this.invalidateMetadataCacheCallExecutor = + new BalancedQueueRpcExecutor("InvalidateMetadataCache", invalidateMetadataCacheHandlerCount, + maxInvalidateMetadataCacheQueueLength, priorityFunction, conf, abortable); + } + + @Override + public void init(Context context) { + delegate.init(context); + this.port = context.getListenerAddress().getPort(); + } + + @Override + public void start() { + delegate.start(); + indexCallExecutor.start(port); + metadataCallExecutor.start(port); + serverSideCallExecutor.start(port); + invalidateMetadataCacheCallExecutor.start(port); + } + + @Override + public void stop() { + delegate.stop(); + indexCallExecutor.stop(); + metadataCallExecutor.stop(); + serverSideCallExecutor.stop(); + invalidateMetadataCacheCallExecutor.stop(); + } + + @Override + public boolean compatDispatch(CallRunner callTask) throws IOException, InterruptedException { + RpcCall call = callTask.getRpcCall(); + int priority = call.getHeader().getPriority(); + if (indexPriority == priority) { + return indexCallExecutor.dispatch(callTask); + } else if (metadataPriority == priority) { + return metadataCallExecutor.dispatch(callTask); + } else if (serverSidePriority == priority) { + return serverSideCallExecutor.dispatch(callTask); + } else if (invalidateMetadataCachePriority == priority) { + return invalidateMetadataCacheCallExecutor.dispatch(callTask); + } else { + return delegate.dispatch(callTask); } + } + + @Override + public CallQueueInfo getCallQueueInfo() { + return delegate.getCallQueueInfo(); + } + + @Override + public int getGeneralQueueLength() { + // not the best way to calculate, but don't have a better way to hook + // into metrics at the moment + return this.delegate.getGeneralQueueLength() + this.indexCallExecutor.getQueueLength() + + this.metadataCallExecutor.getQueueLength() + this.serverSideCallExecutor.getQueueLength() + + this.invalidateMetadataCacheCallExecutor.getQueueLength(); + } + + @Override + public int getPriorityQueueLength() { + return this.delegate.getPriorityQueueLength(); + } + + @Override + public int getReplicationQueueLength() { + return this.delegate.getReplicationQueueLength(); + } + + @Override + public int getActiveRpcHandlerCount() { + return this.delegate.getActiveRpcHandlerCount() + this.indexCallExecutor.getActiveHandlerCount() + + this.metadataCallExecutor.getActiveHandlerCount() + + this.serverSideCallExecutor.getActiveHandlerCount() + + this.invalidateMetadataCacheCallExecutor.getActiveHandlerCount(); + } + + @Override + public long getNumGeneralCallsDropped() { + return delegate.getNumGeneralCallsDropped(); + } + + @Override + public long getNumLifoModeSwitches() { + return delegate.getNumLifoModeSwitches(); + } + + @VisibleForTesting + public void setIndexExecutorForTesting(RpcExecutor executor) { + this.indexCallExecutor = executor; + } + + @VisibleForTesting + public void setMetadataExecutorForTesting(RpcExecutor executor) { + this.metadataCallExecutor = executor; + } + + @VisibleForTesting + public void setInvalidateMetadataCacheExecutorForTesting(RpcExecutor executor) { + this.invalidateMetadataCacheCallExecutor = executor; + } + + @VisibleForTesting + public void setServerSideExecutorForTesting(RpcExecutor executor) { + this.serverSideCallExecutor = executor; + } + + @VisibleForTesting + public RpcExecutor getIndexExecutorForTesting() { + return this.indexCallExecutor; + } + + @VisibleForTesting + public RpcExecutor getMetadataExecutorForTesting() { + return this.metadataCallExecutor; + } + + @VisibleForTesting + public RpcExecutor getServerSideExecutorForTesting() { + return this.serverSideCallExecutor; + } + + @Override + public int getWriteQueueLength() { + return delegate.getWriteQueueLength(); + } + + @Override + public int getReadQueueLength() { + return delegate.getReadQueueLength(); + } + + @Override + public int getScanQueueLength() { + return delegate.getScanQueueLength(); + } + + @Override + public int getActiveWriteRpcHandlerCount() { + return delegate.getActiveWriteRpcHandlerCount(); + } + + @Override + public int getActiveReadRpcHandlerCount() { + return delegate.getActiveReadRpcHandlerCount(); + } + + @Override + public int getActiveScanRpcHandlerCount() { + return delegate.getActiveScanRpcHandlerCount(); + } + + @Override + public int getMetaPriorityQueueLength() { + return this.delegate.getMetaPriorityQueueLength(); + } + + @Override + public int getActiveGeneralRpcHandlerCount() { + return this.delegate.getActiveGeneralRpcHandlerCount(); + } + + @Override + public int getActivePriorityRpcHandlerCount() { + return this.delegate.getActivePriorityRpcHandlerCount(); + } + + @Override + public int getActiveMetaPriorityRpcHandlerCount() { + return this.delegate.getActiveMetaPriorityRpcHandlerCount(); + } + + @Override + public int getActiveReplicationRpcHandlerCount() { + return this.delegate.getActiveReplicationRpcHandlerCount(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java index 75042a45dba..814d1111cb6 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,69 +22,68 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory; import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.IndexUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - /** * Factory to create a {@link PhoenixRpcScheduler}. In this package so we can access the * {@link SimpleRpcSchedulerFactory}. */ public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory { - private static final Logger LOGGER = - LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class); - private static final String VERSION_TOO_OLD_FOR_INDEX_RPC = - "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled."; + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class); + private static final String VERSION_TOO_OLD_FOR_INDEX_RPC = + "Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled."; - @Override - public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) { - // create the delegate scheduler - RpcScheduler delegate; - try { - // happens in <=0.98.4 where the scheduler factory is not visible - delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable); - } catch (IllegalAccessError e) { - LOGGER.error(VERSION_TOO_OLD_FOR_INDEX_RPC); - throw e; - } + @Override + public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, + Abortable abortable) { + // create the delegate scheduler + RpcScheduler delegate; + try { + // happens in <=0.98.4 where the scheduler factory is not visible + delegate = new SimpleRpcSchedulerFactory().create(conf, priorityFunction, abortable); + } catch (IllegalAccessError e) { + LOGGER.error(VERSION_TOO_OLD_FOR_INDEX_RPC); + throw e; + } - // get the index priority configs - int indexPriority = IndexUtil.getIndexPriority(conf); - validatePriority(indexPriority); - // get the metadata priority configs - int metadataPriority = IndexUtil.getMetadataPriority(conf); - validatePriority(metadataPriority); - // get the server side priority configs - int serverSidePriority = IndexUtil.getServerSidePriority(conf); - validatePriority(serverSidePriority); + // get the index priority configs + int indexPriority = IndexUtil.getIndexPriority(conf); + validatePriority(indexPriority); + // get the metadata priority configs + int metadataPriority = IndexUtil.getMetadataPriority(conf); + validatePriority(metadataPriority); + // get the server side priority configs + int serverSidePriority = IndexUtil.getServerSidePriority(conf); + validatePriority(serverSidePriority); - // validate index and metadata priorities are not the same - Preconditions.checkArgument(indexPriority != metadataPriority, - "Index and Metadata priority must not be same " + indexPriority); - LOGGER.info("Using custom Phoenix Index RPC Handling with index rpc priority " - + indexPriority + " and metadata rpc priority " + metadataPriority); + // validate index and metadata priorities are not the same + Preconditions.checkArgument(indexPriority != metadataPriority, + "Index and Metadata priority must not be same " + indexPriority); + LOGGER.info("Using custom Phoenix Index RPC Handling with index rpc priority " + indexPriority + + " and metadata rpc priority " + metadataPriority); - int invalidateCachePriority = IndexUtil.getInvalidateMetadataCachePriority(conf); - validatePriority(invalidateCachePriority); - PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, delegate, indexPriority, - metadataPriority, serverSidePriority, invalidateCachePriority, priorityFunction, - abortable); - return scheduler; - } + int invalidateCachePriority = IndexUtil.getInvalidateMetadataCachePriority(conf); + validatePriority(invalidateCachePriority); + PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, delegate, indexPriority, + metadataPriority, serverSidePriority, invalidateCachePriority, priorityFunction, abortable); + return scheduler; + } - @Override - public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) { - return create(configuration, priorityFunction, null); - } + @Override + public RpcScheduler create(Configuration configuration, PriorityFunction priorityFunction) { + return create(configuration, priorityFunction, null); + } - /** - * Validates that the given priority does not overlap with the HBase priority range - */ - private void validatePriority(int priority) { - Preconditions.checkArgument( priority < HConstants.NORMAL_QOS || priority > HConstants.HIGH_QOS, "priority cannot be within hbase priority range " - + HConstants.NORMAL_QOS +" to " + HConstants.HIGH_QOS ); - } -} \ No newline at end of file + /** + * Validates that the given priority does not overlap with the HBase priority range + */ + private void validatePriority(int priority) { + Preconditions.checkArgument(priority < HConstants.NORMAL_QOS || priority > HConstants.HIGH_QOS, + "priority cannot be within hbase priority range " + HConstants.NORMAL_QOS + " to " + + HConstants.HIGH_QOS); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcUtil.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcUtil.java index 11806198318..5202b39fced 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.ipc; - public class RpcUtil { - public static RpcCall getRpcContext() { - return RpcServer.CurCall.get(); - } - - public static void setRpcContext(RpcCall c){ - RpcServer.CurCall.set(c); - } + public static RpcCall getRpcContext() { + return RpcServer.CurCall.get(); + } + + public static void setRpcContext(RpcCall c) { + RpcServer.CurCall.set(c); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java index 0a5e8edf4d5..ccaf1ffa18a 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/controller/InterRegionServerMetadataRpcControllerFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,36 +26,36 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; /** - * {@link RpcControllerFactory} that should only be used when creating Table for - * making remote RPCs to the region servers hosting Phoenix SYSTEM tables. + * {@link RpcControllerFactory} that should only be used when creating Table for making remote RPCs + * to the region servers hosting Phoenix SYSTEM tables. */ public class InterRegionServerMetadataRpcControllerFactory extends RpcControllerFactory { - public InterRegionServerMetadataRpcControllerFactory(Configuration conf) { - super(conf); - } - - @Override - public HBaseRpcController newController() { - HBaseRpcController delegate = super.newController(); - return getController(delegate); - } - - @Override - public HBaseRpcController newController(CellScanner cellScanner) { - HBaseRpcController delegate = super.newController(cellScanner); - return getController(delegate); - } - - @Override - public HBaseRpcController newController(List cellIterables) { - HBaseRpcController delegate = super.newController(cellIterables); - return getController(delegate); - } - - private HBaseRpcController getController(HBaseRpcController delegate) { - // construct a chain of controllers: metadata and delegate controller - return new MetadataRpcController(delegate, conf); - } + public InterRegionServerMetadataRpcControllerFactory(Configuration conf) { + super(conf); + } + + @Override + public HBaseRpcController newController() { + HBaseRpcController delegate = super.newController(); + return getController(delegate); + } + + @Override + public HBaseRpcController newController(CellScanner cellScanner) { + HBaseRpcController delegate = super.newController(cellScanner); + return getController(delegate); + } + + @Override + public HBaseRpcController newController(List cellIterables) { + HBaseRpcController delegate = super.newController(cellIterables); + return getController(delegate); + } + + private HBaseRpcController getController(HBaseRpcController delegate) { + // construct a chain of controllers: metadata and delegate controller + return new MetadataRpcController(delegate, conf); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java index 283bb6ed45f..78f5ac357ac 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/ipc/controller/ServerRpcControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ */ @Deprecated public class ServerRpcControllerFactory extends InterRegionServerMetadataRpcControllerFactory { - public ServerRpcControllerFactory(Configuration conf) { - super(conf); - } -} \ No newline at end of file + public ServerRpcControllerFactory(Configuration conf) { + super(conf); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java index 2abca2dab29..24769e2f9b9 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -43,92 +42,95 @@ import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.tuple.MultiKeyValueTuple; import org.apache.phoenix.util.ServerUtil; + /* * Scanner to read data store and regenerate the local index data */ public class DataTableLocalIndexRegionScanner extends DelegateRegionScanner { - MultiKeyValueTuple result = new MultiKeyValueTuple(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - KeyValueBuilder kvBuilder = GenericKeyValueBuilder.INSTANCE; - private List indexMaintainers; - private byte[] startKey; - private byte[] endKey; - private byte[] localIndexFamily; - private Region region; - long maxBatchSizeBytes; - int maxBatchSize; - private MutationList mutationList; - - - /** - * @param scanner Scanner for data table stores - * @param region - * @param indexMaintainers Maintainer of local Indexes which needs to built - * @param localIndexFamily LocalIndex family needs to be built. - * @param conf - * @throws IOException - */ - public DataTableLocalIndexRegionScanner(RegionScanner scanner, Region region, - List indexMaintainers, byte[] localIndexFamily,Configuration conf) throws IOException { - super(scanner); - this.indexMaintainers = indexMaintainers; - this.startKey = region.getRegionInfo().getStartKey(); - this.endKey = region.getRegionInfo().getEndKey(); - this.localIndexFamily = localIndexFamily; - this.region=region; - maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); - maxBatchSizeBytes = conf.getLongBytes(MUTATE_BATCH_SIZE_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES); - mutationList=new UngroupedAggregateRegionObserver.MutationList(maxBatchSize); - } + MultiKeyValueTuple result = new MultiKeyValueTuple(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + KeyValueBuilder kvBuilder = GenericKeyValueBuilder.INSTANCE; + private List indexMaintainers; + private byte[] startKey; + private byte[] endKey; + private byte[] localIndexFamily; + private Region region; + long maxBatchSizeBytes; + int maxBatchSize; + private MutationList mutationList; - @Override - public boolean next(List outResult, ScannerContext scannerContext) throws IOException { - return next(outResult); - } + /** + * @param scanner Scanner for data table stores + * @param indexMaintainers Maintainer of local Indexes which needs to built + * @param localIndexFamily LocalIndex family needs to be built. + */ + public DataTableLocalIndexRegionScanner(RegionScanner scanner, Region region, + List indexMaintainers, byte[] localIndexFamily, Configuration conf) + throws IOException { + super(scanner); + this.indexMaintainers = indexMaintainers; + this.startKey = region.getRegionInfo().getStartKey(); + this.endKey = region.getRegionInfo().getEndKey(); + this.localIndexFamily = localIndexFamily; + this.region = region; + maxBatchSize = + conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); + maxBatchSizeBytes = conf.getLongBytes(MUTATE_BATCH_SIZE_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES); + mutationList = new UngroupedAggregateRegionObserver.MutationList(maxBatchSize); + } - @Override - public boolean next(List results) throws IOException { - List dataTableResults = new ArrayList(); - boolean next = super.next(dataTableResults); - addMutations(dataTableResults); - if (ServerUtil.readyToCommit(mutationList.size(), mutationList.byteSize(), maxBatchSize, maxBatchSizeBytes)||!next) { - region.batchMutate(mutationList.toArray(new Mutation[mutationList.size()])); - mutationList.clear(); - } - return next; + @Override + public boolean next(List outResult, ScannerContext scannerContext) throws IOException { + return next(outResult); + } + + @Override + public boolean next(List results) throws IOException { + List dataTableResults = new ArrayList(); + boolean next = super.next(dataTableResults); + addMutations(dataTableResults); + if ( + ServerUtil.readyToCommit(mutationList.size(), mutationList.byteSize(), maxBatchSize, + maxBatchSizeBytes) || !next + ) { + region.batchMutate(mutationList.toArray(new Mutation[mutationList.size()])); + mutationList.clear(); } + return next; + } - private void addMutations(List dataTableResults) throws IOException { - if (!dataTableResults.isEmpty()) { - result.setKeyValues(dataTableResults); - for (IndexMaintainer maintainer : indexMaintainers) { - result.getKey(ptr); - ValueGetter valueGetter = maintainer - .createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), dataTableResults); - List list = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, - dataTableResults.get(0).getTimestamp(), startKey, endKey, false) - .getFamilyCellMap().get(localIndexFamily); - Put put = null; - Delete del = null; - for (Cell cell : list) { - if (cell.getType() == Cell.Type.Put) { - if (put == null) { - put = new Put(CellUtil.cloneRow(cell)); - mutationList.add(put); - } - put.add(cell); - } else { - if (del == null) { - del = new Delete(CellUtil.cloneRow(cell)); - mutationList.add(del); - } - del.add(cell); - } - } + private void addMutations(List dataTableResults) throws IOException { + if (!dataTableResults.isEmpty()) { + result.setKeyValues(dataTableResults); + for (IndexMaintainer maintainer : indexMaintainers) { + result.getKey(ptr); + ValueGetter valueGetter = maintainer + .createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), dataTableResults); + List list = + maintainer + .buildUpdateMutation(kvBuilder, valueGetter, ptr, + dataTableResults.get(0).getTimestamp(), startKey, endKey, false) + .getFamilyCellMap().get(localIndexFamily); + Put put = null; + Delete del = null; + for (Cell cell : list) { + if (cell.getType() == Cell.Type.Put) { + if (put == null) { + put = new Put(CellUtil.cloneRow(cell)); + mutationList.add(put); + } + put.add(cell); + } else { + if (del == null) { + del = new Delete(CellUtil.cloneRow(cell)); + mutationList.add(del); } + del.add(cell); + } } + } } - + } } diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java index 901c48815d0..9274d73b35f 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,128 +48,113 @@ * that sort lowest and 'top' is the second half of the file with keys that sort greater than those * of the bottom half. The top includes the split files midkey, of the key that follows if it does * not exist in the file. - * *

    * This type works in tandem with the {@link Reference} type. This class is used reading while - * Reference is used writing. - * - * - * This file is not splitable. Calls to #midkey() return null. + * Reference is used writing. This file is not splitable. Calls to #midkey() return null. */ public class IndexHalfStoreFileReader extends CompatIndexHalfStoreFileReader { - private final boolean top; - // This is the key we split around. Its the first possible entry on a row: - // i.e. empty column and a timestamp of LATEST_TIMESTAMP. - private final byte[] splitkey; - private final byte[] splitRow; - private final Map indexMaintainers; - private final byte[][] viewConstants; - private final int offset; - private final RegionInfo childRegionInfo; - private final byte[] regionStartKeyInHFile; - private final AtomicInteger refCount; - private final RegionInfo currentRegion; - - /** - * @param fs - * @param p - * @param cacheConf - * @param in - * @param size - * @param r - * @param conf - * @param indexMaintainers - * @param viewConstants - * @param regionInfo - * @param regionStartKeyInHFile - * @param splitKey - * @throws IOException - */ - public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf, - final FSDataInputStreamWrapper in, long size, final Reference r, - final Configuration conf, - final Map indexMaintainers, - final byte[][] viewConstants, final RegionInfo regionInfo, - byte[] regionStartKeyInHFile, byte[] splitKey, boolean primaryReplicaStoreFile, - AtomicInteger refCount, RegionInfo currentRegion) throws IOException { - super(fs, cacheConf, conf, new ReaderContext(p, in, size, new HFileSystem(fs), - primaryReplicaStoreFile, - ReaderType.STREAM), - new HFileInfo(new ReaderContext(p, in, size, new HFileSystem(fs), - primaryReplicaStoreFile, ReaderType.STREAM), conf), p); - getHFileReader().getHFileInfo().initMetaAndIndex(getHFileReader()); - this.splitkey = splitKey == null ? r.getSplitKey() : splitKey; - // Is it top or bottom half? - this.top = Reference.isTopFileRegion(r.getFileRegion()); - this.splitRow = CellUtil.cloneRow(new KeyValue.KeyOnlyKeyValue(splitkey)); - this.indexMaintainers = indexMaintainers; - this.viewConstants = viewConstants; - this.childRegionInfo = regionInfo; - this.regionStartKeyInHFile = regionStartKeyInHFile; - this.offset = regionStartKeyInHFile.length; - this.refCount = refCount; - this.currentRegion = currentRegion; - } + private final boolean top; + // This is the key we split around. Its the first possible entry on a row: + // i.e. empty column and a timestamp of LATEST_TIMESTAMP. + private final byte[] splitkey; + private final byte[] splitRow; + private final Map indexMaintainers; + private final byte[][] viewConstants; + private final int offset; + private final RegionInfo childRegionInfo; + private final byte[] regionStartKeyInHFile; + private final AtomicInteger refCount; + private final RegionInfo currentRegion; - public int getOffset() { - return offset; - } + /** + */ + public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf, + final FSDataInputStreamWrapper in, long size, final Reference r, final Configuration conf, + final Map indexMaintainers, + final byte[][] viewConstants, final RegionInfo regionInfo, byte[] regionStartKeyInHFile, + byte[] splitKey, boolean primaryReplicaStoreFile, AtomicInteger refCount, + RegionInfo currentRegion) throws IOException { + super(fs, cacheConf, conf, + new ReaderContext(p, in, size, new HFileSystem(fs), primaryReplicaStoreFile, + ReaderType.STREAM), + new HFileInfo(new ReaderContext(p, in, size, new HFileSystem(fs), primaryReplicaStoreFile, + ReaderType.STREAM), conf), + p); + getHFileReader().getHFileInfo().initMetaAndIndex(getHFileReader()); + this.splitkey = splitKey == null ? r.getSplitKey() : splitKey; + // Is it top or bottom half? + this.top = Reference.isTopFileRegion(r.getFileRegion()); + this.splitRow = CellUtil.cloneRow(new KeyValue.KeyOnlyKeyValue(splitkey)); + this.indexMaintainers = indexMaintainers; + this.viewConstants = viewConstants; + this.childRegionInfo = regionInfo; + this.regionStartKeyInHFile = regionStartKeyInHFile; + this.offset = regionStartKeyInHFile.length; + this.refCount = refCount; + this.currentRegion = currentRegion; + } - public byte[][] getViewConstants() { - return viewConstants; - } + public int getOffset() { + return offset; + } - public Map getIndexMaintainers() { - return indexMaintainers; - } + public byte[][] getViewConstants() { + return viewConstants; + } - public RegionInfo getRegionInfo() { - return childRegionInfo; - } + public Map getIndexMaintainers() { + return indexMaintainers; + } - public byte[] getRegionStartKeyInHFile() { - return regionStartKeyInHFile; - } + public RegionInfo getRegionInfo() { + return childRegionInfo; + } - public byte[] getSplitkey() { - return splitkey; - } + public byte[] getRegionStartKeyInHFile() { + return regionStartKeyInHFile; + } - public byte[] getSplitRow() { - return splitRow; - } + public byte[] getSplitkey() { + return splitkey; + } - public boolean isTop() { - return top; - } - - @Override - public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread, - boolean isCompaction, long readPt, long scannerOrder, - boolean canOptimizeForNonNullColumn) { - refCount.incrementAndGet(); - return new LocalIndexStoreFileScanner(this, cacheBlocks, pread, isCompaction, readPt, - scannerOrder, canOptimizeForNonNullColumn); - } + public byte[] getSplitRow() { + return splitRow; + } + + public boolean isTop() { + return top; + } - @Override - public boolean passesKeyRangeFilter(Scan scan) { - if (scan.getAttribute(SCAN_START_ROW_SUFFIX) == null) { - // Scan from compaction. - return true; - } - byte[] startKey = currentRegion.getStartKey(); - byte[] endKey = currentRegion.getEndKey(); - // If the region start key is not the prefix of the scan start row then we can return empty - // scanners. This is possible during merge where one of the child region scan should not return any - // results as we go through merged region. - int prefixLength = scan.getStartRow().length - scan.getAttribute(SCAN_START_ROW_SUFFIX).length; - if (Bytes.compareTo(scan.getStartRow(), 0, prefixLength, - (startKey.length == 0 ? new byte[endKey.length] : startKey), 0, - (startKey.length == 0 ? endKey.length : startKey.length)) != 0) { - return false; - } - return true; + @Override + public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread, + boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { + refCount.incrementAndGet(); + return new LocalIndexStoreFileScanner(this, cacheBlocks, pread, isCompaction, readPt, + scannerOrder, canOptimizeForNonNullColumn); + } + + @Override + public boolean passesKeyRangeFilter(Scan scan) { + if (scan.getAttribute(SCAN_START_ROW_SUFFIX) == null) { + // Scan from compaction. + return true; + } + byte[] startKey = currentRegion.getStartKey(); + byte[] endKey = currentRegion.getEndKey(); + // If the region start key is not the prefix of the scan start row then we can return empty + // scanners. This is possible during merge where one of the child region scan should not return + // any + // results as we go through merged region. + int prefixLength = scan.getStartRow().length - scan.getAttribute(SCAN_START_ROW_SUFFIX).length; + if ( + Bytes.compareTo(scan.getStartRow(), 0, prefixLength, + (startKey.length == 0 ? new byte[endKey.length] : startKey), 0, + (startKey.length == 0 ? endKey.length : startKey.length)) != 0 + ) { + return false; } -} \ No newline at end of file + return true; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java index d216d83d88c..da1bc9b2365 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -60,6 +60,7 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.QueryUtil; @@ -67,244 +68,230 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +public class IndexHalfStoreFileReaderGenerator implements RegionObserver, RegionCoprocessor { -public class IndexHalfStoreFileReaderGenerator implements RegionObserver, RegionCoprocessor{ - - private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair"; - public static final Logger LOGGER = - LoggerFactory.getLogger(IndexHalfStoreFileReaderGenerator.class); + private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair"; + public static final Logger LOGGER = + LoggerFactory.getLogger(IndexHalfStoreFileReaderGenerator.class); - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - @Override - public StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, - FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, StoreFileReader reader) throws IOException { - TableName tableName = ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(); - Region region = ctx.getEnvironment().getRegion(); - RegionInfo childRegion = region.getRegionInfo(); - byte[] splitKey = null; - if (reader == null && r != null) { - if (!p.toString().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - return reader; - } - Table metaTable = null; - byte[] regionStartKeyInHFile = null; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } - try (PhoenixConnection conn = - QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()) - .unwrap(PhoenixConnection.class)) { - // This is the CQSI shared Connection. MUST NOT be closed. - Connection hbaseConn = conn.getQueryServices().getAdmin().getConnection(); - Scan scan = - MetaTableAccessor.getScanForTableName(hbaseConn.getConfiguration(), - tableName); - SingleColumnValueFilter scvf = null; - if (Reference.isTopFileRegion(r.getFileRegion())) { - scvf = - new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, - HConstants.SPLITB_QUALIFIER, CompareOperator.EQUAL, - RegionInfoUtil.toByteArray(region.getRegionInfo())); - scvf.setFilterIfMissing(true); - } else { - scvf = - new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, - HConstants.SPLITA_QUALIFIER, CompareOperator.EQUAL, - RegionInfoUtil.toByteArray(region.getRegionInfo())); - scvf.setFilterIfMissing(true); - } - if (scvf != null) { - scan.setFilter(scvf); - } - metaTable = hbaseConn.getTable(TableName.META_TABLE_NAME); - Result result = null; - try (ResultScanner scanner = metaTable.getScanner(scan)) { - result = scanner.next(); - } - if (result == null || result.isEmpty()) { - List mergeRegions = - CompatUtil.getMergeRegions(ctx.getEnvironment().getConnection(), - region.getRegionInfo()); - if (mergeRegions == null || mergeRegions.isEmpty()) { - return reader; - } - byte[] splitRow = - CellUtil.cloneRow(KeyValueUtil.createKeyValueFromKey(r.getSplitKey())); - // We need not change any thing in first region data because first region start - // key - // is equal to merged region start key. So returning same reader. - if (Bytes.compareTo(mergeRegions.get(0).getStartKey(), splitRow) == 0) { - if (mergeRegions.get(0).getStartKey().length == 0 && region.getRegionInfo() - .getEndKey().length != mergeRegions.get(0).getEndKey().length) { - childRegion = mergeRegions.get(0); - regionStartKeyInHFile = - mergeRegions.get(0).getStartKey().length == 0 - ? new byte[mergeRegions.get(0).getEndKey().length] - : mergeRegions.get(0).getStartKey(); - } else { - return reader; - } - } else { - for (RegionInfo mergeRegion : mergeRegions.subList(1, - mergeRegions.size())) { - if (Bytes.compareTo(mergeRegion.getStartKey(), splitRow) == 0) { - childRegion = mergeRegion; - regionStartKeyInHFile = mergeRegion.getStartKey(); - break; - } - } - } - splitKey = - KeyValueUtil.createFirstOnRow( - region.getRegionInfo().getStartKey().length == 0 - ? new byte[region.getRegionInfo().getEndKey().length] - : region.getRegionInfo().getStartKey()) - .getKey(); - } else { - RegionInfo parentRegion = MetaTableAccessor.getRegionInfo(result); - regionStartKeyInHFile = - parentRegion.getStartKey().length == 0 - ? new byte[parentRegion.getEndKey().length] - : parentRegion.getStartKey(); - } + @Override + public StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, + FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, + Reference r, StoreFileReader reader) throws IOException { + TableName tableName = ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(); + Region region = ctx.getEnvironment().getRegion(); + RegionInfo childRegion = region.getRegionInfo(); + byte[] splitKey = null; + if (reader == null && r != null) { + if (!p.toString().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + return reader; + } + Table metaTable = null; + byte[] regionStartKeyInHFile = null; - PTable dataTable = - IndexUtil.getPDataTable(conn, - ctx.getEnvironment().getRegion().getTableDescriptor()); - List indexes = dataTable.getIndexes(); - Map indexMaintainers = - new HashMap(); - for (PTable index : indexes) { - if (index.getIndexType() == IndexType.LOCAL) { - IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, conn); - indexMaintainers.put( - new ImmutableBytesWritable( - index.getviewIndexIdType().toBytes(index.getViewIndexId())), - indexMaintainer); - } - } - if (indexMaintainers.isEmpty()) { - return reader; - } - byte[][] viewConstants = getViewConstants(dataTable); - return new IndexHalfStoreFileReader(fs, p, cacheConf, in, size, r, - ctx.getEnvironment().getConfiguration(), indexMaintainers, viewConstants, - childRegion, regionStartKeyInHFile, splitKey, - childRegion.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID, - new AtomicInteger(0), region.getRegionInfo()); - } catch (SQLException e) { - throw new IOException(e); - } finally { - if (metaTable != null) metaTable.close(); + try (PhoenixConnection conn = + QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()) + .unwrap(PhoenixConnection.class)) { + // This is the CQSI shared Connection. MUST NOT be closed. + Connection hbaseConn = conn.getQueryServices().getAdmin().getConnection(); + Scan scan = MetaTableAccessor.getScanForTableName(hbaseConn.getConfiguration(), tableName); + SingleColumnValueFilter scvf = null; + if (Reference.isTopFileRegion(r.getFileRegion())) { + scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, + CompareOperator.EQUAL, RegionInfoUtil.toByteArray(region.getRegionInfo())); + scvf.setFilterIfMissing(true); + } else { + scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, + CompareOperator.EQUAL, RegionInfoUtil.toByteArray(region.getRegionInfo())); + scvf.setFilterIfMissing(true); + } + if (scvf != null) { + scan.setFilter(scvf); + } + metaTable = hbaseConn.getTable(TableName.META_TABLE_NAME); + Result result = null; + try (ResultScanner scanner = metaTable.getScanner(scan)) { + result = scanner.next(); + } + if (result == null || result.isEmpty()) { + List mergeRegions = CompatUtil + .getMergeRegions(ctx.getEnvironment().getConnection(), region.getRegionInfo()); + if (mergeRegions == null || mergeRegions.isEmpty()) { + return reader; + } + byte[] splitRow = CellUtil.cloneRow(KeyValueUtil.createKeyValueFromKey(r.getSplitKey())); + // We need not change any thing in first region data because first region start + // key + // is equal to merged region start key. So returning same reader. + if (Bytes.compareTo(mergeRegions.get(0).getStartKey(), splitRow) == 0) { + if ( + mergeRegions.get(0).getStartKey().length == 0 + && region.getRegionInfo().getEndKey().length + != mergeRegions.get(0).getEndKey().length + ) { + childRegion = mergeRegions.get(0); + regionStartKeyInHFile = mergeRegions.get(0).getStartKey().length == 0 + ? new byte[mergeRegions.get(0).getEndKey().length] + : mergeRegions.get(0).getStartKey(); + } else { + return reader; } + } else { + for (RegionInfo mergeRegion : mergeRegions.subList(1, mergeRegions.size())) { + if (Bytes.compareTo(mergeRegion.getStartKey(), splitRow) == 0) { + childRegion = mergeRegion; + regionStartKeyInHFile = mergeRegion.getStartKey(); + break; + } + } + } + splitKey = KeyValueUtil.createFirstOnRow(region.getRegionInfo().getStartKey().length == 0 + ? new byte[region.getRegionInfo().getEndKey().length] + : region.getRegionInfo().getStartKey()).getKey(); + } else { + RegionInfo parentRegion = MetaTableAccessor.getRegionInfo(result); + regionStartKeyInHFile = parentRegion.getStartKey().length == 0 + ? new byte[parentRegion.getEndKey().length] + : parentRegion.getStartKey(); } - return reader; + + PTable dataTable = + IndexUtil.getPDataTable(conn, ctx.getEnvironment().getRegion().getTableDescriptor()); + List indexes = dataTable.getIndexes(); + Map indexMaintainers = + new HashMap(); + for (PTable index : indexes) { + if (index.getIndexType() == IndexType.LOCAL) { + IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, conn); + indexMaintainers.put(new ImmutableBytesWritable( + index.getviewIndexIdType().toBytes(index.getViewIndexId())), indexMaintainer); + } + } + if (indexMaintainers.isEmpty()) { + return reader; + } + byte[][] viewConstants = getViewConstants(dataTable); + return new IndexHalfStoreFileReader(fs, p, cacheConf, in, size, r, + ctx.getEnvironment().getConfiguration(), indexMaintainers, viewConstants, childRegion, + regionStartKeyInHFile, splitKey, + childRegion.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID, new AtomicInteger(0), + region.getRegionInfo()); + } catch (SQLException e) { + throw new IOException(e); + } finally { + if (metaTable != null) metaTable.close(); + } } + return reader; + } - @Override - public InternalScanner preCompact(ObserverContext c, Store store, - InternalScanner s, ScanType scanType, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException { + @Override + public InternalScanner preCompact(ObserverContext c, Store store, + InternalScanner s, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { - if (!isLocalIndexStore(store)) { return s; } - if (!store.hasReferences()) { - InternalScanner repairScanner = null; - if (request.isMajor() && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))) { - LOGGER.info("we have found inconsistent data for local index for region:" - + c.getEnvironment().getRegion().getRegionInfo()); - if (c.getEnvironment().getConfiguration().getBoolean(LOCAL_INDEX_AUTOMATIC_REPAIR, true)) { - LOGGER.info("Starting automatic repair of local Index for region:" - + c.getEnvironment().getRegion().getRegionInfo()); - repairScanner = getRepairScanner(c.getEnvironment(), store); - } - } - if (repairScanner != null) { - if (s!=null) { - s.close(); - } - return repairScanner; - } else { - return s; - } + if (!isLocalIndexStore(store)) { + return s; + } + if (!store.hasReferences()) { + InternalScanner repairScanner = null; + if ( + request.isMajor() + && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store)) + ) { + LOGGER.info("we have found inconsistent data for local index for region:" + + c.getEnvironment().getRegion().getRegionInfo()); + if (c.getEnvironment().getConfiguration().getBoolean(LOCAL_INDEX_AUTOMATIC_REPAIR, true)) { + LOGGER.info("Starting automatic repair of local Index for region:" + + c.getEnvironment().getRegion().getRegionInfo()); + repairScanner = getRepairScanner(c.getEnvironment(), store); } + } + if (repairScanner != null) { + if (s != null) { + s.close(); + } + return repairScanner; + } else { return s; + } } + return s; + } - private byte[][] getViewConstants(PTable dataTable) { - int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); - byte[][] viewConstants = null; - int nViewConstants = 0; - if (dataTable.getType() == PTableType.VIEW) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - List dataPkColumns = dataTable.getPKColumns(); - for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { - PColumn dataPKColumn = dataPkColumns.get(i); - if (dataPKColumn.getViewConstant() != null) { - nViewConstants++; - } - } - if (nViewConstants > 0) { - viewConstants = new byte[nViewConstants][]; - int j = 0; - for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { - PColumn dataPkColumn = dataPkColumns.get(i); - if (dataPkColumn.getViewConstant() != null) { - if (IndexUtil.getViewConstantValue(dataPkColumn, ptr)) { - viewConstants[j++] = ByteUtil.copyKeyBytesIfNecessary(ptr); - } else { - throw new IllegalStateException(); - } - } - } - } + private byte[][] getViewConstants(PTable dataTable) { + int dataPosOffset = + (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); + byte[][] viewConstants = null; + int nViewConstants = 0; + if (dataTable.getType() == PTableType.VIEW) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + List dataPkColumns = dataTable.getPKColumns(); + for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { + PColumn dataPKColumn = dataPkColumns.get(i); + if (dataPKColumn.getViewConstant() != null) { + nViewConstants++; } - return viewConstants; - } - - /** - * @param env - * @param store Local Index store - * @param scan - * @param scanType - * @param earliestPutTs - * @param request - * @return StoreScanner for new Local Index data for a passed store and Null if repair is not possible - * @throws IOException - */ - private InternalScanner getRepairScanner(RegionCoprocessorEnvironment env, Store store) throws IOException { - //List scannersForStoreFiles = Lists.newArrayListWithExpectedSize(store.getStorefilesCount()); - Scan scan = new Scan(); - scan.readVersions(store.getColumnFamilyDescriptor().getMaxVersions()); - for (Store s : env.getRegion().getStores()) { - if (!isLocalIndexStore(s)) { - scan.addFamily(s.getColumnFamilyDescriptor().getName()); - } - } - try (PhoenixConnection conn = QueryUtil.getConnectionOnServer( - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - PTable dataPTable = IndexUtil.getPDataTable(conn, env.getRegion().getTableDescriptor()); - final List maintainers = Lists - .newArrayListWithExpectedSize(dataPTable.getIndexes().size()); - for (PTable index : dataPTable.getIndexes()) { - if (index.getIndexType() == IndexType.LOCAL) { - maintainers.add(index.getIndexMaintainer(dataPTable, conn)); - } + } + if (nViewConstants > 0) { + viewConstants = new byte[nViewConstants][]; + int j = 0; + for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { + PColumn dataPkColumn = dataPkColumns.get(i); + if (dataPkColumn.getViewConstant() != null) { + if (IndexUtil.getViewConstantValue(dataPkColumn, ptr)) { + viewConstants[j++] = ByteUtil.copyKeyBytesIfNecessary(ptr); + } else { + throw new IllegalStateException(); } - return new DataTableLocalIndexRegionScanner( - env.getRegion().getScanner(scan), env.getRegion(), maintainers, - store.getColumnFamilyDescriptor().getName(), - env.getConfiguration()); - } catch (SQLException e) { - throw new IOException(e); + } } + } } + return viewConstants; + } - private boolean isLocalIndexStore(Store store) { - return store.getColumnFamilyDescriptor().getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX); + /** + * @param store Local Index store + * @return StoreScanner for new Local Index data for a passed store and Null if repair is not + * possible + */ + private InternalScanner getRepairScanner(RegionCoprocessorEnvironment env, Store store) + throws IOException { + // List scannersForStoreFiles = + // Lists.newArrayListWithExpectedSize(store.getStorefilesCount()); + Scan scan = new Scan(); + scan.readVersions(store.getColumnFamilyDescriptor().getMaxVersions()); + for (Store s : env.getRegion().getStores()) { + if (!isLocalIndexStore(s)) { + scan.addFamily(s.getColumnFamilyDescriptor().getName()); + } } + try (PhoenixConnection conn = + QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class)) { + PTable dataPTable = IndexUtil.getPDataTable(conn, env.getRegion().getTableDescriptor()); + final List maintainers = + Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size()); + for (PTable index : dataPTable.getIndexes()) { + if (index.getIndexType() == IndexType.LOCAL) { + maintainers.add(index.getIndexMaintainer(dataPTable, conn)); + } + } + return new DataTableLocalIndexRegionScanner(env.getRegion().getScanner(scan), env.getRegion(), + maintainers, store.getColumnFamilyDescriptor().getName(), env.getConfiguration()); + } catch (SQLException e) { + throw new IOException(e); + } + } + + private boolean isLocalIndexStore(Store store) { + return store.getColumnFamilyDescriptor().getNameAsString() + .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java index 4659484e9ed..95cb642dbfd 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,12 +35,11 @@ public class IndexKeyValueSkipListSet extends KeyValueSkipListSet { /** * Create a new {@link IndexKeyValueSkipListSet} based on the passed comparator. * @param comparator to use when comparing keyvalues. It is used both to determine sort order as - * well as object equality in the map. + * well as object equality in the map. * @return a map that uses the passed comparator */ public static IndexKeyValueSkipListSet create(CellComparator comparator) { - ConcurrentSkipListMap delegate = - new ConcurrentSkipListMap(comparator); + ConcurrentSkipListMap delegate = new ConcurrentSkipListMap(comparator); IndexKeyValueSkipListSet ret = new IndexKeyValueSkipListSet(delegate); return ret; } @@ -54,23 +53,22 @@ public IndexKeyValueSkipListSet(ConcurrentSkipListMap delegate) { } /** - * Add the passed KeyValue to the set, only if one is not already set. This is equivalent - * to + * Add the passed KeyValue to the set, only if one is not already set. This is equivalent to + * *

    -   * if (!set.containsKey(key))
    -   *   return set.put(key);
    -   * else
    -   *  return map.set(key);
    +   * if (!set.containsKey(key)) return set.put(key);
    +   * else return map.set(key);
        * 
    + * * except that the action is performed atomically. * @param kv KeyValue to add * @return the previous value associated with the specified key, or null if there was no * previously stored key - * @throws ClassCastException if the specified key cannot be compared with the keys currently in - * the map + * @throws ClassCastException if the specified key cannot be compared with the keys currently in + * the map * @throws NullPointerException if the specified key is null */ public Cell putIfAbsent(Cell kv) { return this.delegate.putIfAbsent(kv, kv); } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java index b68abd93b88..a0065947845 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.KeyValue; - import java.util.Collection; import java.util.Comparator; import java.util.Iterator; @@ -30,19 +25,22 @@ import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.KeyValue; + /** * A {@link java.util.Set} of {@link KeyValue}s implemented on top of a - * {@link java.util.concurrent.ConcurrentSkipListMap}. Works like a - * {@link java.util.concurrent.ConcurrentSkipListSet} in all but one regard: - * An add will overwrite if already an entry for the added key. In other words, - * where CSLS does "Adds the specified element to this set if it is not already - * present.", this implementation "Adds the specified element to this set EVEN - * if it is already present overwriting what was there previous". The call to - * add returns true if no value in the backing map or false if there was an - * entry with same key (though value may be different). - *

    Otherwise, - * has same attributes as ConcurrentSkipListSet: e.g. tolerant of concurrent - * get and set and won't throw ConcurrentModificationException when iterating. + * {@link java.util.concurrent.ConcurrentSkipListMap}. Works like a + * {@link java.util.concurrent.ConcurrentSkipListSet} in all but one regard: An add will overwrite + * if already an entry for the added key. In other words, where CSLS does "Adds the specified + * element to this set if it is not already present.", this implementation "Adds the specified + * element to this set EVEN if it is already present overwriting what was there previous". The call + * to add returns true if no value in the backing map or false if there was an entry with same key + * (though value may be different). + *

    + * Otherwise, has same attributes as ConcurrentSkipListSet: e.g. tolerant of concurrent get and set + * and won't throw ConcurrentModificationException when iterating. */ public class KeyValueSkipListSet implements NavigableSet { private final ConcurrentNavigableMap delegatee; @@ -75,8 +73,7 @@ public SortedSet headSet(final Cell toElement) { return headSet(toElement, false); } - public NavigableSet headSet(final Cell toElement, - boolean inclusive) { + public NavigableSet headSet(final Cell toElement, boolean inclusive) { return new KeyValueSkipListSet(this.delegatee.headMap(toElement, inclusive)); } @@ -104,8 +101,8 @@ public SortedSet subSet(Cell fromElement, Cell toElement) { throw new UnsupportedOperationException("Not implemented"); } - public NavigableSet subSet(Cell fromElement, - boolean fromInclusive, Cell toElement, boolean toInclusive) { + public NavigableSet subSet(Cell fromElement, boolean fromInclusive, Cell toElement, + boolean toInclusive) { throw new UnsupportedOperationException("Not implemented"); } @@ -142,7 +139,7 @@ public void clear() { } public boolean contains(Object o) { - //noinspection SuspiciousMethodCalls + // noinspection SuspiciousMethodCalls return this.delegatee.containsKey(o); } diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java index 38bc8c5207a..019035341ab 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,5 +20,5 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver; public class LocalIndexSplitter implements RegionObserver { - + } diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java index 8d10bde8814..228ea038c54 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,259 +17,250 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.apache.hadoop.hbase.KeyValue.ROW_LENGTH_SIZE; + import java.io.IOException; import java.util.Map.Entry; import java.util.Optional; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; -import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.compat.hbase.CompatLocalIndexStoreFileScanner; import org.apache.phoenix.index.IndexMaintainer; import org.apache.phoenix.util.PhoenixKeyValueUtil; -import static org.apache.hadoop.hbase.KeyValue.ROW_LENGTH_SIZE; - public class LocalIndexStoreFileScanner extends CompatLocalIndexStoreFileScanner { - private IndexHalfStoreFileReader reader; - private boolean changeBottomKeys; - private CellComparatorImpl comparator; - @SuppressWarnings("deprecation") - public LocalIndexStoreFileScanner(IndexHalfStoreFileReader reader, boolean cacheBlocks, boolean pread, - boolean isCompaction, long readPt, long scannerOrder, - boolean canOptimizeForNonNullColumn) { - super(reader, cacheBlocks, pread, isCompaction, readPt, scannerOrder, - canOptimizeForNonNullColumn); - this.reader = reader; - this.changeBottomKeys = - this.reader.getRegionInfo().getStartKey().length == 0 - && this.reader.getSplitRow().length != this.reader.getOffset(); - this.comparator = (CellComparatorImpl) reader.getComparator(); - } + private IndexHalfStoreFileReader reader; + private boolean changeBottomKeys; + private CellComparatorImpl comparator; - @Override - public Cell next() throws IOException { - Cell next = super.next(); - while(next !=null && !isSatisfiedMidKeyCondition(next)) { - next = super.next(); - } - while(super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) { - super.next(); - } - if (next!=null && (reader.isTop() || changeBottomKeys)) { - next = getChangedKey(next, !reader.isTop() && changeBottomKeys); - } - return next; - } + @SuppressWarnings("deprecation") + public LocalIndexStoreFileScanner(IndexHalfStoreFileReader reader, boolean cacheBlocks, + boolean pread, boolean isCompaction, long readPt, long scannerOrder, + boolean canOptimizeForNonNullColumn) { + super(reader, cacheBlocks, pread, isCompaction, readPt, scannerOrder, + canOptimizeForNonNullColumn); + this.reader = reader; + this.changeBottomKeys = this.reader.getRegionInfo().getStartKey().length == 0 + && this.reader.getSplitRow().length != this.reader.getOffset(); + this.comparator = (CellComparatorImpl) reader.getComparator(); + } - @Override - public Cell peek() { - Cell peek = super.peek(); - if (peek != null && (reader.isTop() || changeBottomKeys)) { - peek = getChangedKey(peek, !reader.isTop() && changeBottomKeys); - } - return peek; + @Override + public Cell next() throws IOException { + Cell next = super.next(); + while (next != null && !isSatisfiedMidKeyCondition(next)) { + next = super.next(); } - - private Cell getChangedKey(Cell next, boolean changeBottomKeys) { - // If it is a top store file change the StartKey with SplitKey in Key - //and produce the new value corresponding to the change in key - byte[] changedKey = getNewRowkeyByRegionStartKeyReplacedWithSplitKey(next, changeBottomKeys); - KeyValue changedKv = - new KeyValue(changedKey, 0, changedKey.length, next.getFamilyArray(), - next.getFamilyOffset(), next.getFamilyLength(), next.getQualifierArray(), - next.getQualifierOffset(), next.getQualifierLength(), - next.getTimestamp(), Type.codeToType(next.getTypeByte()), - next.getValueArray(), next.getValueOffset(), next.getValueLength(), - next.getTagsArray(), next.getTagsOffset(), next.getTagsLength()); - return changedKv; + while (super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) { + super.next(); } - - /** - * Enforce seek all the time for local index store file scanner otherwise some times hbase - * might return fake kvs not in physical files. - */ - @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { - boolean requestSeek = super.requestSeek(kv, forward, useBloom); - if(requestSeek) { - Cell peek = super.peek(); - if (Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - peek.getRowArray(), peek.getRowOffset(), peek.getRowLength()) == 0) { - return forward ? reseek(kv): seek(kv); - } - } - return requestSeek; + if (next != null && (reader.isTop() || changeBottomKeys)) { + next = getChangedKey(next, !reader.isTop() && changeBottomKeys); } + return next; + } - @Override - public boolean seek(Cell key) throws IOException { - return seekOrReseek(key, true); + @Override + public Cell peek() { + Cell peek = super.peek(); + if (peek != null && (reader.isTop() || changeBottomKeys)) { + peek = getChangedKey(peek, !reader.isTop() && changeBottomKeys); } + return peek; + } + + private Cell getChangedKey(Cell next, boolean changeBottomKeys) { + // If it is a top store file change the StartKey with SplitKey in Key + // and produce the new value corresponding to the change in key + byte[] changedKey = getNewRowkeyByRegionStartKeyReplacedWithSplitKey(next, changeBottomKeys); + KeyValue changedKv = new KeyValue(changedKey, 0, changedKey.length, next.getFamilyArray(), + next.getFamilyOffset(), next.getFamilyLength(), next.getQualifierArray(), + next.getQualifierOffset(), next.getQualifierLength(), next.getTimestamp(), + Type.codeToType(next.getTypeByte()), next.getValueArray(), next.getValueOffset(), + next.getValueLength(), next.getTagsArray(), next.getTagsOffset(), next.getTagsLength()); + return changedKv; + } - @Override - public boolean reseek(Cell key) throws IOException { - return seekOrReseek(key, false); + /** + * Enforce seek all the time for local index store file scanner otherwise some times hbase might + * return fake kvs not in physical files. + */ + @Override + public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { + boolean requestSeek = super.requestSeek(kv, forward, useBloom); + if (requestSeek) { + Cell peek = super.peek(); + if ( + Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), peek.getRowArray(), + peek.getRowOffset(), peek.getRowLength()) == 0 + ) { + return forward ? reseek(kv) : seek(kv); + } } + return requestSeek; + } - @Override - public boolean seekToPreviousRow(Cell key) throws IOException { - KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(key); - if (reader.isTop()) { - Optional firstKey = reader.getFirstKey(); - // This will be null when the file is empty in which we can not seekBefore to - // any key - if (firstKey.isPresent()) { - return false; - } - if (this.comparator.compare(kv, firstKey.get(), true) <= 0) { - return super.seekToPreviousRow(key); - } - Cell replacedKey = getKeyPresentInHFiles(kv); - boolean seekToPreviousRow = super.seekToPreviousRow(replacedKey); - while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) { - seekToPreviousRow = super.seekToPreviousRow(super.peek()); - } - return seekToPreviousRow; - } else { - // The equals sign isn't strictly necessary just here to be consistent with - // seekTo - KeyValue splitKeyValue = new KeyValue.KeyOnlyKeyValue(reader.getSplitkey()); - if (this.comparator.compare(kv, splitKeyValue, true) >= 0) { - boolean seekToPreviousRow = super.seekToPreviousRow(kv); - while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) { - seekToPreviousRow = super.seekToPreviousRow(super.peek()); - } - return seekToPreviousRow; - } - } + @Override + public boolean seek(Cell key) throws IOException { + return seekOrReseek(key, true); + } + + @Override + public boolean reseek(Cell key) throws IOException { + return seekOrReseek(key, false); + } + + @Override + public boolean seekToPreviousRow(Cell key) throws IOException { + KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(key); + if (reader.isTop()) { + Optional firstKey = reader.getFirstKey(); + // This will be null when the file is empty in which we can not seekBefore to + // any key + if (firstKey.isPresent()) { + return false; + } + if (this.comparator.compare(kv, firstKey.get(), true) <= 0) { + return super.seekToPreviousRow(key); + } + Cell replacedKey = getKeyPresentInHFiles(kv); + boolean seekToPreviousRow = super.seekToPreviousRow(replacedKey); + while (super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) { + seekToPreviousRow = super.seekToPreviousRow(super.peek()); + } + return seekToPreviousRow; + } else { + // The equals sign isn't strictly necessary just here to be consistent with + // seekTo + KeyValue splitKeyValue = new KeyValue.KeyOnlyKeyValue(reader.getSplitkey()); + if (this.comparator.compare(kv, splitKeyValue, true) >= 0) { boolean seekToPreviousRow = super.seekToPreviousRow(kv); - while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) { - seekToPreviousRow = super.seekToPreviousRow(super.peek()); + while (super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) { + seekToPreviousRow = super.seekToPreviousRow(super.peek()); } return seekToPreviousRow; + } } + boolean seekToPreviousRow = super.seekToPreviousRow(kv); + while (super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) { + seekToPreviousRow = super.seekToPreviousRow(super.peek()); + } + return seekToPreviousRow; + } - @Override - public boolean seekToLastRow() throws IOException { - boolean seekToLastRow = super.seekToLastRow(); - while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) { - seekToLastRow = super.seekToPreviousRow(super.peek()); - } - return seekToLastRow; + @Override + public boolean seekToLastRow() throws IOException { + boolean seekToLastRow = super.seekToLastRow(); + while (super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) { + seekToLastRow = super.seekToPreviousRow(super.peek()); } + return seekToLastRow; + } - private boolean isSatisfiedMidKeyCondition(Cell kv) { - ImmutableBytesWritable rowKey = - new ImmutableBytesWritable(kv.getRowArray(), kv.getRowOffset() + reader.getOffset(), - kv.getRowLength() - reader.getOffset()); - Entry entry = reader.getIndexMaintainers().entrySet().iterator().next(); - IndexMaintainer indexMaintainer = entry.getValue(); - byte[] viewIndexId = indexMaintainer.getViewIndexIdFromIndexRowKey(rowKey); - IndexMaintainer actualIndexMaintainer = reader.getIndexMaintainers().get(new ImmutableBytesWritable(viewIndexId)); - if(actualIndexMaintainer != null) { - byte[] dataRowKey = actualIndexMaintainer.buildDataRowKey(rowKey, reader.getViewConstants()); + private boolean isSatisfiedMidKeyCondition(Cell kv) { + ImmutableBytesWritable rowKey = new ImmutableBytesWritable(kv.getRowArray(), + kv.getRowOffset() + reader.getOffset(), kv.getRowLength() - reader.getOffset()); + Entry entry = + reader.getIndexMaintainers().entrySet().iterator().next(); + IndexMaintainer indexMaintainer = entry.getValue(); + byte[] viewIndexId = indexMaintainer.getViewIndexIdFromIndexRowKey(rowKey); + IndexMaintainer actualIndexMaintainer = + reader.getIndexMaintainers().get(new ImmutableBytesWritable(viewIndexId)); + if (actualIndexMaintainer != null) { + byte[] dataRowKey = actualIndexMaintainer.buildDataRowKey(rowKey, reader.getViewConstants()); - int compareResult = Bytes.compareTo(dataRowKey, reader.getSplitRow()); - if (reader.isTop()) { - if (compareResult >= 0) { - return true; - } - } else { - if (compareResult < 0) { - return true; - } - } + int compareResult = Bytes.compareTo(dataRowKey, reader.getSplitRow()); + if (reader.isTop()) { + if (compareResult >= 0) { + return true; } - return false; + } else { + if (compareResult < 0) { + return true; + } + } } + return false; + } - /** - * In case of top half store, the passed key will be with the start key of the daughter region. - * But in the actual HFiles, the key will be with the start key of the old parent region. In - * order to make the real seek in the HFiles, we need to build the old key. - * - * The logic here is just replace daughter region start key with parent region start key - * in the key part. - * - * @param key - * - */ - private KeyValue getKeyPresentInHFiles(Cell keyValue) { - int rowLength = keyValue.getRowLength(); - int rowOffset = keyValue.getRowOffset(); + /** + * In case of top half store, the passed key will be with the start key of the daughter region. + * But in the actual HFiles, the key will be with the start key of the old parent region. In order + * to make the real seek in the HFiles, we need to build the old key. The logic here is just + * replace daughter region start key with parent region start key in the key part. + */ + private KeyValue getKeyPresentInHFiles(Cell keyValue) { + int rowLength = keyValue.getRowLength(); + int rowOffset = keyValue.getRowOffset(); - short length = (short) (rowLength - reader.getSplitRow().length + reader.getOffset()); - byte[] replacedKey = - new byte[length + keyValue.getRowArray().length - (rowOffset + rowLength) + ROW_LENGTH_SIZE]; - System.arraycopy(Bytes.toBytes(length), 0, replacedKey, 0, ROW_LENGTH_SIZE); - System.arraycopy(reader.getRegionStartKeyInHFile(), 0, replacedKey, ROW_LENGTH_SIZE, reader.getOffset()); - System.arraycopy(keyValue.getRowArray(), keyValue.getRowOffset() + reader.getSplitRow().length, - replacedKey, reader.getOffset() + ROW_LENGTH_SIZE, rowLength - - reader.getSplitRow().length); - System.arraycopy(keyValue.getRowArray(), rowOffset + rowLength, replacedKey, - reader.getOffset() + keyValue.getRowLength() - reader.getSplitRow().length - + ROW_LENGTH_SIZE, keyValue.getRowArray().length - (rowOffset + rowLength)); - return new KeyValue.KeyOnlyKeyValue(replacedKey); - } - - /** - * - * @param cell - * @param isSeek pass true for seek, false for reseek. - * @return - * @throws IOException - */ - public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{ - Cell keyToSeek = cell; - KeyValue splitKeyValue = new KeyValue.KeyOnlyKeyValue(reader.getSplitkey()); - if (reader.isTop()) { - if(this.comparator.compare(cell, splitKeyValue, true) < 0){ - if(!isSeek && realSeekDone()) { - return true; - } - return seekOrReseekToProperKey(isSeek, keyToSeek); - } - keyToSeek = getKeyPresentInHFiles(cell); - return seekOrReseekToProperKey(isSeek, keyToSeek); - } else { - if (this.comparator.compare(cell, splitKeyValue, true) >= 0) { - close(); - return false; - } - if(!isSeek && reader.getRegionInfo().getStartKey().length == 0 && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length) { - keyToSeek = getKeyPresentInHFiles(cell); - } + short length = (short) (rowLength - reader.getSplitRow().length + reader.getOffset()); + byte[] replacedKey = + new byte[length + keyValue.getRowArray().length - (rowOffset + rowLength) + ROW_LENGTH_SIZE]; + System.arraycopy(Bytes.toBytes(length), 0, replacedKey, 0, ROW_LENGTH_SIZE); + System.arraycopy(reader.getRegionStartKeyInHFile(), 0, replacedKey, ROW_LENGTH_SIZE, + reader.getOffset()); + System.arraycopy(keyValue.getRowArray(), keyValue.getRowOffset() + reader.getSplitRow().length, + replacedKey, reader.getOffset() + ROW_LENGTH_SIZE, rowLength - reader.getSplitRow().length); + System.arraycopy(keyValue.getRowArray(), rowOffset + rowLength, replacedKey, + reader.getOffset() + keyValue.getRowLength() - reader.getSplitRow().length + ROW_LENGTH_SIZE, + keyValue.getRowArray().length - (rowOffset + rowLength)); + return new KeyValue.KeyOnlyKeyValue(replacedKey); + } + + /** + * @param isSeek pass true for seek, false for reseek. + */ + public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException { + Cell keyToSeek = cell; + KeyValue splitKeyValue = new KeyValue.KeyOnlyKeyValue(reader.getSplitkey()); + if (reader.isTop()) { + if (this.comparator.compare(cell, splitKeyValue, true) < 0) { + if (!isSeek && realSeekDone()) { + return true; } return seekOrReseekToProperKey(isSeek, keyToSeek); + } + keyToSeek = getKeyPresentInHFiles(cell); + return seekOrReseekToProperKey(isSeek, keyToSeek); + } else { + if (this.comparator.compare(cell, splitKeyValue, true) >= 0) { + close(); + return false; + } + if ( + !isSeek && reader.getRegionInfo().getStartKey().length == 0 + && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length + ) { + keyToSeek = getKeyPresentInHFiles(cell); + } } + return seekOrReseekToProperKey(isSeek, keyToSeek); + } - private boolean seekOrReseekToProperKey(boolean isSeek, Cell kv) - throws IOException { - boolean seekOrReseek = isSeek ? super.seek(kv) : super.reseek(kv); - while (seekOrReseek && super.peek() != null - && !isSatisfiedMidKeyCondition(super.peek())) { - super.next(); - seekOrReseek = super.peek() != null; - } - return seekOrReseek; + private boolean seekOrReseekToProperKey(boolean isSeek, Cell kv) throws IOException { + boolean seekOrReseek = isSeek ? super.seek(kv) : super.reseek(kv); + while (seekOrReseek && super.peek() != null && !isSatisfiedMidKeyCondition(super.peek())) { + super.next(); + seekOrReseek = super.peek() != null; } + return seekOrReseek; + } - private byte[] getNewRowkeyByRegionStartKeyReplacedWithSplitKey(Cell kv, boolean changeBottomKeys) { - int lenOfRemainingKey = kv.getRowLength() - reader.getOffset(); - byte[] keyReplacedStartKey = new byte[lenOfRemainingKey + reader.getSplitRow().length]; - System.arraycopy(changeBottomKeys ? new byte[reader.getSplitRow().length] : reader.getSplitRow(), 0, - keyReplacedStartKey, 0, reader.getSplitRow().length); - System.arraycopy(kv.getRowArray(), kv.getRowOffset() + reader.getOffset(), keyReplacedStartKey, - reader.getSplitRow().length, lenOfRemainingKey); - return keyReplacedStartKey; - } + private byte[] getNewRowkeyByRegionStartKeyReplacedWithSplitKey(Cell kv, + boolean changeBottomKeys) { + int lenOfRemainingKey = kv.getRowLength() - reader.getOffset(); + byte[] keyReplacedStartKey = new byte[lenOfRemainingKey + reader.getSplitRow().length]; + System.arraycopy( + changeBottomKeys ? new byte[reader.getSplitRow().length] : reader.getSplitRow(), 0, + keyReplacedStartKey, 0, reader.getSplitRow().length); + System.arraycopy(kv.getRowArray(), kv.getRowOffset() + reader.getOffset(), keyReplacedStartKey, + reader.getSplitRow().length, lenOfRemainingKey); + return keyReplacedStartKey; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java index 7bdb2c0b8bb..07e15bf19a5 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,10 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; - import java.util.List; import java.util.Map; @@ -26,38 +24,37 @@ import org.apache.hadoop.hbase.PrivateCellUtil; /** - * ScannerContext has all methods package visible. To properly update the context progress for our scanners we - * need this helper + * ScannerContext has all methods package visible. To properly update the context progress for our + * scanners we need this helper */ public class ScannerContextUtil { - public static void incrementSizeProgress(ScannerContext sc, List cells) { - for (Cell cell : cells) { - sc.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), - cell.heapSize()); - } - } - - public static void updateMetrics(ScannerContext src, ScannerContext dst) { - if (src != null && dst != null && src.isTrackingMetrics() && dst.isTrackingMetrics()) { - for (Map.Entry entry : src.getMetrics().getMetricsMap().entrySet()) { - dst.metrics.addToCounter(entry.getKey(), entry.getValue()); - } - } - } - - public static ScannerContext copyNoLimitScanner(ScannerContext sc) { - return new ScannerContext(sc.keepProgress, null, sc.isTrackingMetrics()); - } - - public static void updateTimeProgress(ScannerContext sc) { - sc.updateTimeProgress(); + public static void incrementSizeProgress(ScannerContext sc, List cells) { + for (Cell cell : cells) { + sc.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), cell.heapSize()); } + } - /** - * Set returnImmediately on the ScannerContext to true, it will have the same behavior - * as reaching the time limit. Use this to make RSRpcService.scan return immediately. - */ - public static void setReturnImmediately(ScannerContext sc) { - sc.returnImmediately(); + public static void updateMetrics(ScannerContext src, ScannerContext dst) { + if (src != null && dst != null && src.isTrackingMetrics() && dst.isTrackingMetrics()) { + for (Map.Entry entry : src.getMetrics().getMetricsMap().entrySet()) { + dst.metrics.addToCounter(entry.getKey(), entry.getValue()); + } } + } + + public static ScannerContext copyNoLimitScanner(ScannerContext sc) { + return new ScannerContext(sc.keepProgress, null, sc.isTrackingMetrics()); + } + + public static void updateTimeProgress(ScannerContext sc) { + sc.updateTimeProgress(); + } + + /** + * Set returnImmediately on the ScannerContext to true, it will have the same behavior as reaching + * the time limit. Use this to make RSRpcService.scan return immediately. + */ + public static void setReturnImmediately(ScannerContext sc) { + sc.returnImmediately(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java index e30370f1905..9950222e488 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/BinaryCompatibleBaseDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,10 +30,10 @@ import org.slf4j.LoggerFactory; /** - * This class is a copy paste version of org.apache.hadoop.hbase.codec.BaseDecoder class. - * This class is meant to be used in {@link IndexedWALEditCodec} when runtime version of - * HBase is older than 1.1.3. This is needed to handle binary incompatibility introduced by - * HBASE-14501. See PHOENIX-2629 and PHOENIX-2636 for details. + * This class is a copy paste version of org.apache.hadoop.hbase.codec.BaseDecoder class. This class + * is meant to be used in {@link IndexedWALEditCodec} when runtime version of HBase is older than + * 1.1.3. This is needed to handle binary incompatibility introduced by HBASE-14501. See + * PHOENIX-2629 and PHOENIX-2636 for details. */ public abstract class BinaryCompatibleBaseDecoder implements Codec.Decoder { protected static final Logger LOGGER = LoggerFactory.getLogger(BinaryCompatibleBaseDecoder.class); @@ -62,13 +62,14 @@ public boolean advance() throws IOException { if (firstByte == -1) { return false; } else { - ((PBIS)in).unread(firstByte); + ((PBIS) in).unread(firstByte); } try { this.current = parseCell(); } catch (IOException ioEx) { - ((PBIS)in).resetBuf(1); // reset the buffer in case the underlying stream is read from upper layers + ((PBIS) in).resetBuf(1); // reset the buffer in case the underlying stream is read from upper + // layers rethrowEofException(ioEx); } return true; @@ -97,8 +98,7 @@ protected InputStream getInputStream() { /** * Extract a Cell. * @return a parsed Cell or throws an Exception. EOFException or a generic IOException maybe - * thrown if EOF is reached prematurely. Does not return null. - * @throws IOException + * thrown if EOF is reached prematurely. Does not return null. */ @Nonnull protected abstract Cell parseCell() throws IOException; @@ -107,4 +107,4 @@ protected InputStream getInputStream() { public Cell current() { return this.current; } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedHLogReader.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedHLogReader.java index 0f6c8a64303..c1c20f72118 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedHLogReader.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedHLogReader.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import org.apache.phoenix.compat.hbase.CompatIndexedHLogReader; - /** - * A WALReader that can also deserialize custom WALEdit s that contain index information. - * - * This is basically a wrapper around a SequenceFileLogReader that has a custom - * SequenceFileLogReader.WALReader#next(Object) method that only replaces the creation of the WALEdit with our own custom - * type + * A WALReader that can also deserialize custom WALEdit s that contain index information. This is + * basically a wrapper around a SequenceFileLogReader that has a custom + * SequenceFileLogReader.WALReader#next(Object) method that only replaces the creation of the + * WALEdit with our own custom type *

    * This is a little bit of a painful way of going about this, but saves the effort of hacking the * HBase source (and deal with getting it reviewed and backported, etc.) and still works. @@ -40,4 +37,4 @@ */ public class IndexedHLogReader extends CompatIndexedHLogReader { -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java index ebd212e8a49..705208cfc9d 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.DataInput; @@ -38,7 +37,6 @@ import org.apache.phoenix.hbase.index.wal.KeyValueCodec; import org.apache.phoenix.util.PhoenixKeyValueUtil; - /** * Support custom indexing {@link KeyValue}s when written to the WAL. *

    @@ -52,43 +50,48 @@ public class IndexedWALEditCodec extends WALCellCodec { // the stream private static final int REGULAR_KEY_VALUE_MARKER = 0; private CompressionContext compression; - private static final int MIN_BINARY_COMPATIBLE_INDEX_CODEC_VERSION = VersionUtil.encodeVersion("1", "1", "3"); + private static final int MIN_BINARY_COMPATIBLE_INDEX_CODEC_VERSION = + VersionUtil.encodeVersion("1", "1", "3"); private final boolean useDefaultDecoder; private static boolean isUseDefaultDecoder() { - String hbaseVersion = VersionInfo.getVersion(); - return VersionUtil.encodeVersion(hbaseVersion) >= MIN_BINARY_COMPATIBLE_INDEX_CODEC_VERSION; + String hbaseVersion = VersionInfo.getVersion(); + return VersionUtil.encodeVersion(hbaseVersion) >= MIN_BINARY_COMPATIBLE_INDEX_CODEC_VERSION; } /* * No-args constructor must be provided for WALSplitter/RPC Codec path */ public IndexedWALEditCodec() { - super(); - this.compression = null; - this.useDefaultDecoder = isUseDefaultDecoder(); + super(); + this.compression = null; + this.useDefaultDecoder = isUseDefaultDecoder(); } /* * Two-args Configuration and CompressionContext codec must be provided for WALCellCodec path */ public IndexedWALEditCodec(Configuration conf, CompressionContext compression) { - super(conf, compression); - this.compression = compression; - this.useDefaultDecoder = isUseDefaultDecoder(); + super(conf, compression); + this.compression = compression; + this.useDefaultDecoder = isUseDefaultDecoder(); } @Override public Decoder getDecoder(InputStream is) { // compression isn't enabled if (this.compression == null) { - return useDefaultDecoder ? new IndexKeyValueDecoder(is) : new BinaryCompatibleIndexKeyValueDecoder(is); + return useDefaultDecoder + ? new IndexKeyValueDecoder(is) + : new BinaryCompatibleIndexKeyValueDecoder(is); } // there is compression, so we get the standard decoder to handle reading those kvs Decoder decoder = super.getDecoder(is); // compression is on, reqturn our custom decoder - return useDefaultDecoder ? new CompressedIndexKeyValueDecoder(is, decoder) : new BinaryCompatibleCompressedIndexKeyValueDecoder(is, decoder); + return useDefaultDecoder + ? new CompressedIndexKeyValueDecoder(is, decoder) + : new BinaryCompatibleCompressedIndexKeyValueDecoder(is, decoder); } @Override @@ -107,22 +110,19 @@ public Encoder getEncoder(OutputStream os) { * Returns a DataInput given an InputStream */ private static DataInput getDataInput(InputStream is) { - return is instanceof DataInput - ? (DataInput) is - : new DataInputStream(is); + return is instanceof DataInput ? (DataInput) is : new DataInputStream(is); } /** * Returns a DataOutput given an OutputStream */ private static DataOutput getDataOutput(OutputStream os) { - return os instanceof DataOutput - ? (DataOutput) os - : new DataOutputStream(os); + return os instanceof DataOutput ? (DataOutput) os : new DataOutputStream(os); } private static abstract class PhoenixBaseDecoder extends BaseDecoder { protected DataInput dataInput; + public PhoenixBaseDecoder(InputStream in) { super(in); dataInput = getDataInput(this.in); @@ -135,16 +135,16 @@ public PhoenixBaseDecoder(InputStream in) { public static class IndexKeyValueDecoder extends PhoenixBaseDecoder { /** - * Create a Decoder on the given input stream with the given Decoder to parse - * generic {@link KeyValue}s. + * Create a Decoder on the given input stream with the given Decoder to parse generic + * {@link KeyValue}s. * @param is stream to read from */ - public IndexKeyValueDecoder(InputStream is){ + public IndexKeyValueDecoder(InputStream is) { super(is); } @Override - protected KeyValue parseCell() throws IOException{ + protected KeyValue parseCell() throws IOException { return KeyValueCodec.readKeyValue(this.dataInput); } } @@ -154,11 +154,11 @@ public static class CompressedIndexKeyValueDecoder extends PhoenixBaseDecoder { private Decoder decoder; /** - * Create a Decoder on the given input stream with the given Decoder to parse - * generic {@link KeyValue}s. - * @param is stream to read from + * Create a Decoder on the given input stream with the given Decoder to parse generic + * {@link KeyValue}s. + * @param is stream to read from * @param compressedDecoder decoder for generic {@link KeyValue}s. Should support the expected - * compression. + * compression. */ public CompressedIndexKeyValueDecoder(InputStream is, Decoder compressedDecoder) { super(is); @@ -171,7 +171,7 @@ protected Cell parseCell() throws IOException { int marker = this.in.read(); if (marker < 0) { throw new EOFException( - "Unexepcted end of stream found while reading next (Indexed) KeyValue"); + "Unexepcted end of stream found while reading next (Indexed) KeyValue"); } // do the normal thing, if its a regular kv @@ -189,6 +189,7 @@ protected Cell parseCell() throws IOException { private static abstract class PhoenixBaseEncoder extends BaseEncoder { protected DataOutput dataOutput; + public PhoenixBaseEncoder(OutputStream out) { super(out); dataOutput = getDataOutput(this.out); @@ -240,96 +241,100 @@ public void flush() throws IOException { @Override public void write(Cell cell) throws IOException { - //make sure we are open + // make sure we are open checkFlushed(); - //write the special marker so we can figure out which kind of kv is it + // write the special marker so we can figure out which kind of kv is it int marker = IndexedWALEditCodec.REGULAR_KEY_VALUE_MARKER; if (cell instanceof IndexedKeyValue) { marker = KeyValueCodec.INDEX_TYPE_LENGTH_MARKER; } out.write(marker); - //then serialize based on the marker + // then serialize based on the marker if (marker == IndexedWALEditCodec.REGULAR_KEY_VALUE_MARKER) { this.compressedKvEncoder.write(cell); - } - else{ + } else { KeyValueCodec.write(this.dataOutput, PhoenixKeyValueUtil.maybeCopyCell(cell)); } } } - - private static abstract class BinaryCompatiblePhoenixBaseDecoder extends BinaryCompatibleBaseDecoder { - protected DataInput dataInput; - public BinaryCompatiblePhoenixBaseDecoder(InputStream in) { - super(in); - dataInput = getDataInput(this.in); - } + + private static abstract class BinaryCompatiblePhoenixBaseDecoder + extends BinaryCompatibleBaseDecoder { + protected DataInput dataInput; + + public BinaryCompatiblePhoenixBaseDecoder(InputStream in) { + super(in); + dataInput = getDataInput(this.in); + } } - + /** - * This class is meant to be used when runtime version of HBase - * HBase is older than 1.1.3. This is needed to handle binary incompatibility introduced by - * HBASE-14501. See PHOENIX-2629 and PHOENIX-2636 for details. + * This class is meant to be used when runtime version of HBase HBase is older than 1.1.3. This is + * needed to handle binary incompatibility introduced by HBASE-14501. See PHOENIX-2629 and + * PHOENIX-2636 for details. */ - private static class BinaryCompatibleIndexKeyValueDecoder extends BinaryCompatiblePhoenixBaseDecoder { - /** - * Create a Decoder on the given input stream with the given Decoder to parse - * generic {@link KeyValue}s. - * @param is stream to read from - */ - public BinaryCompatibleIndexKeyValueDecoder(InputStream is){ - super(is); - } + private static class BinaryCompatibleIndexKeyValueDecoder + extends BinaryCompatiblePhoenixBaseDecoder { + /** + * Create a Decoder on the given input stream with the given Decoder to parse generic + * {@link KeyValue}s. + * @param is stream to read from + */ + public BinaryCompatibleIndexKeyValueDecoder(InputStream is) { + super(is); + } - @Override - protected KeyValue parseCell() throws IOException{ - return KeyValueCodec.readKeyValue(this.dataInput); - } + @Override + protected KeyValue parseCell() throws IOException { + return KeyValueCodec.readKeyValue(this.dataInput); + } } - + /** - * This class is meant to be used when runtime version of HBase - * HBase is older than 1.1.3. This is needed to handle binary incompatibility introduced by - * HBASE-14501. See PHOENIX-2629 and PHOENIX-2636 for details. + * This class is meant to be used when runtime version of HBase HBase is older than 1.1.3. This is + * needed to handle binary incompatibility introduced by HBASE-14501. See PHOENIX-2629 and + * PHOENIX-2636 for details. */ - private static class BinaryCompatibleCompressedIndexKeyValueDecoder extends BinaryCompatiblePhoenixBaseDecoder { - - private Decoder decoder; - - /** - * Create a Decoder on the given input stream with the given Decoder to parse - * generic {@link KeyValue}s. - * @param is stream to read from - * @param compressedDecoder decoder for generic {@link KeyValue}s. Should support the expected - * compression. - */ - public BinaryCompatibleCompressedIndexKeyValueDecoder(InputStream is, Decoder compressedDecoder) { - super(is); - this.decoder = compressedDecoder; - } + private static class BinaryCompatibleCompressedIndexKeyValueDecoder + extends BinaryCompatiblePhoenixBaseDecoder { - @Override - protected Cell parseCell() throws IOException { - // reader the marker - int marker = this.in.read(); - if (marker < 0) { - throw new EOFException( - "Unexepcted end of stream found while reading next (Indexed) KeyValue"); - } + private Decoder decoder; - // do the normal thing, if its a regular kv - if (marker == REGULAR_KEY_VALUE_MARKER) { - if (!this.decoder.advance()) { - throw new IOException("Could not read next key-value from generic KeyValue Decoder!"); - } - return this.decoder.current(); - } + /** + * Create a Decoder on the given input stream with the given Decoder to parse generic + * {@link KeyValue}s. + * @param is stream to read from + * @param compressedDecoder decoder for generic {@link KeyValue}s. Should support the expected + * compression. + */ + public BinaryCompatibleCompressedIndexKeyValueDecoder(InputStream is, + Decoder compressedDecoder) { + super(is); + this.decoder = compressedDecoder; + } - // its an indexedKeyValue, so parse it out specially - return KeyValueCodec.readKeyValue(this.dataInput); + @Override + protected Cell parseCell() throws IOException { + // reader the marker + int marker = this.in.read(); + if (marker < 0) { + throw new EOFException( + "Unexepcted end of stream found while reading next (Indexed) KeyValue"); } + + // do the normal thing, if its a regular kv + if (marker == REGULAR_KEY_VALUE_MARKER) { + if (!this.decoder.advance()) { + throw new IOException("Could not read next key-value from generic KeyValue Decoder!"); + } + return this.decoder.current(); + } + + // its an indexedKeyValue, so parse it out specially + return KeyValueCodec.readKeyValue(this.dataInput); + } } - -} \ No newline at end of file + +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/GlobalCache.java b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/GlobalCache.java index fb54e8dc25e..45f6f5dbbda 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/GlobalCache.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/GlobalCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,197 +37,188 @@ import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.PMetaDataEntity; import org.apache.phoenix.schema.metrics.MetricsMetadataSource; -import org.apache.phoenix.util.SizedUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.cache.Cache; import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.phoenix.thirdparty.com.google.common.cache.Weigher; - +import org.apache.phoenix.util.SizedUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * - * Global root cache for the server. Each tenant is managed as a child tenant cache of this one. Queries - * not associated with a particular tenant use this as their tenant cache. - * - * + * Global root cache for the server. Each tenant is managed as a child tenant cache of this one. + * Queries not associated with a particular tenant use this as their tenant cache. * @since 0.1 */ public class GlobalCache extends TenantCacheImpl { - private static final Logger LOGGER = LoggerFactory.getLogger(GlobalCache.class); - private static volatile GlobalCache INSTANCE; - - private final Configuration config; - // TODO: Use Guava cache with auto removal after lack of access - private final ConcurrentMap perTenantCacheMap = new ConcurrentHashMap(); - // Cache for lastest PTable for a given Phoenix table - private volatile Cache metaDataCache; - private MetricsMetadataSource metricsSource; - - public long clearTenantCache() { - long unfreedBytes = getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory(); - if (unfreedBytes != 0 && LOGGER.isDebugEnabled()) { - LOGGER.debug("Found " + (getMemoryManager().getMaxMemory() - - getMemoryManager().getAvailableMemory()) + - " bytes not freed from global cache"); - } - removeAllServerCache(); - for (Map.Entry entry : perTenantCacheMap.entrySet()) { - TenantCache cache = entry.getValue(); - long unfreedTenantBytes = cache.getMemoryManager().getMaxMemory() - cache.getMemoryManager().getAvailableMemory(); - if (unfreedTenantBytes != 0 && LOGGER.isDebugEnabled()) { - ImmutableBytesWritable cacheId = entry.getKey(); - LOGGER.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant " + - Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(), - cacheId.getLength())); - } - unfreedBytes += unfreedTenantBytes; - cache.removeAllServerCache(); - } - perTenantCacheMap.clear(); - return unfreedBytes; + private static final Logger LOGGER = LoggerFactory.getLogger(GlobalCache.class); + private static volatile GlobalCache INSTANCE; + + private final Configuration config; + // TODO: Use Guava cache with auto removal after lack of access + private final ConcurrentMap perTenantCacheMap = + new ConcurrentHashMap(); + // Cache for lastest PTable for a given Phoenix table + private volatile Cache metaDataCache; + private MetricsMetadataSource metricsSource; + + public long clearTenantCache() { + long unfreedBytes = getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory(); + if (unfreedBytes != 0 && LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Found " + (getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory()) + + " bytes not freed from global cache"); } - - public Cache getMetaDataCache() { - // Lazy initialize QueryServices so that we only attempt to create an HBase Configuration - // object upon the first attempt to connect to any cluster. Otherwise, an attempt will be - // made at driver initialization time which is too early for some systems. - Cache result = metaDataCache; - if (result == null) { - synchronized(this) { - result = metaDataCache; - if(result == null) { - long maxTTL = config.getLong( - QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS); - long maxSize = config.getLongBytes( - QueryServices.MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE); - metaDataCache = result = CacheBuilder.newBuilder() - .maximumWeight(maxSize) - .expireAfterAccess(maxTTL, TimeUnit.MILLISECONDS) - .weigher(new Weigher() { - @Override - public int weigh(ImmutableBytesPtr key, PMetaDataEntity table) { - return SizedUtil.IMMUTABLE_BYTES_PTR_SIZE + key.getLength() + table.getEstimatedSize(); - } - }) - .removalListener(notification -> { - if (this.metricsSource != null) { - if (notification.wasEvicted()) { - metricsSource.incrementMetadataCacheEvictionCount(); - } else { - metricsSource.incrementMetadataCacheRemovalCount(); - } - if (notification.getValue() != null) { - metricsSource.decrementMetadataCacheUsedSize( - notification.getValue().getEstimatedSize()); - } - } - }) - .build(); - } - } - } - return result; + removeAllServerCache(); + for (Map.Entry entry : perTenantCacheMap.entrySet()) { + TenantCache cache = entry.getValue(); + long unfreedTenantBytes = + cache.getMemoryManager().getMaxMemory() - cache.getMemoryManager().getAvailableMemory(); + if (unfreedTenantBytes != 0 && LOGGER.isDebugEnabled()) { + ImmutableBytesWritable cacheId = entry.getKey(); + LOGGER.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant " + + Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(), cacheId.getLength())); + } + unfreedBytes += unfreedTenantBytes; + cache.removeAllServerCache(); } + perTenantCacheMap.clear(); + return unfreedBytes; + } - public static GlobalCache getInstance(RegionCoprocessorEnvironment env) { - GlobalCache result = INSTANCE; + public Cache getMetaDataCache() { + // Lazy initialize QueryServices so that we only attempt to create an HBase Configuration + // object upon the first attempt to connect to any cluster. Otherwise, an attempt will be + // made at driver initialization time which is too early for some systems. + Cache result = metaDataCache; + if (result == null) { + synchronized (this) { + result = metaDataCache; if (result == null) { - synchronized(GlobalCache.class) { - result = INSTANCE; - if(result == null) { - INSTANCE = result = new GlobalCache(env.getConfiguration()); + long maxTTL = + config.getLong(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS); + long maxSize = config.getLongBytes(QueryServices.MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE); + metaDataCache = result = CacheBuilder.newBuilder().maximumWeight(maxSize) + .expireAfterAccess(maxTTL, TimeUnit.MILLISECONDS) + .weigher(new Weigher() { + @Override + public int weigh(ImmutableBytesPtr key, PMetaDataEntity table) { + return SizedUtil.IMMUTABLE_BYTES_PTR_SIZE + key.getLength() + + table.getEstimatedSize(); + } + }).removalListener(notification -> { + if (this.metricsSource != null) { + if (notification.wasEvicted()) { + metricsSource.incrementMetadataCacheEvictionCount(); + } else { + metricsSource.incrementMetadataCacheRemovalCount(); + } + if (notification.getValue() != null) { + metricsSource + .decrementMetadataCacheUsedSize(notification.getValue().getEstimatedSize()); } - } + } + }).build(); } - return result; + } } - - /** - * Get the tenant cache associated with the tenantId. If tenantId is not applicable, null may be - * used in which case a global tenant cache is returned. - * @param env the HBase configuration - * @param tenantId the tenant ID or null if not applicable. - * @return TenantCache - */ - public static TenantCache getTenantCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId) { - GlobalCache globalCache = GlobalCache.getInstance(env); - TenantCache tenantCache = tenantId == null ? globalCache : globalCache.getChildTenantCache(tenantId); - return tenantCache; - } - - private static long getMaxMemorySize(Configuration config) { - long maxSize = Runtime.getRuntime().maxMemory() * - config.getInt(MAX_MEMORY_PERC_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MEMORY_PERC) / 100; - maxSize = Math.min(maxSize, config.getLongBytes(MAX_MEMORY_SIZE_ATTRIB, Long.MAX_VALUE)); - return maxSize; - } - - private GlobalCache(Configuration config) { - super(new GlobalMemoryManager(getMaxMemorySize(config)), - config.getInt( - QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS), - config.getInt( - QueryServices.MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS)); - this.config = config; - } - - public Configuration getConfig() { - return config; - } - - /** - * Retrieve the tenant cache given an tenantId. - * @param tenantId the ID that identifies the tenant - * @return the existing or newly created TenantCache - */ - public TenantCache getChildTenantCache(ImmutableBytesPtr tenantId) { - TenantCache tenantCache = perTenantCacheMap.get(tenantId); - if (tenantCache == null) { - int maxTenantMemoryPerc = config.getInt( - MAX_TENANT_MEMORY_PERC_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_TENANT_MEMORY_PERC); - int maxServerCacheTimeToLive = config.getInt( - QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS); - int maxServerCachePersistenceTimeToLive = config.getInt( - QueryServices.MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS); - TenantCacheImpl newTenantCache = new TenantCacheImpl( - new ChildMemoryManager(getMemoryManager(), maxTenantMemoryPerc), - maxServerCacheTimeToLive, maxServerCachePersistenceTimeToLive); - tenantCache = perTenantCacheMap.putIfAbsent(tenantId, newTenantCache); - if (tenantCache == null) { - tenantCache = newTenantCache; - } + return result; + } + + public static GlobalCache getInstance(RegionCoprocessorEnvironment env) { + GlobalCache result = INSTANCE; + if (result == null) { + synchronized (GlobalCache.class) { + result = INSTANCE; + if (result == null) { + INSTANCE = result = new GlobalCache(env.getConfiguration()); } - return tenantCache; + } } + return result; + } + + /** + * Get the tenant cache associated with the tenantId. If tenantId is not applicable, null may be + * used in which case a global tenant cache is returned. + * @param env the HBase configuration + * @param tenantId the tenant ID or null if not applicable. + */ + public static TenantCache getTenantCache(RegionCoprocessorEnvironment env, + ImmutableBytesPtr tenantId) { + GlobalCache globalCache = GlobalCache.getInstance(env); + TenantCache tenantCache = + tenantId == null ? globalCache : globalCache.getChildTenantCache(tenantId); + return tenantCache; + } + + private static long getMaxMemorySize(Configuration config) { + long maxSize = Runtime.getRuntime().maxMemory() + * config.getInt(MAX_MEMORY_PERC_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MEMORY_PERC) / 100; + maxSize = Math.min(maxSize, config.getLongBytes(MAX_MEMORY_SIZE_ATTRIB, Long.MAX_VALUE)); + return maxSize; + } - public void setMetricsSource(MetricsMetadataSource metricsSource) { - this.metricsSource = metricsSource; + private GlobalCache(Configuration config) { + super(new GlobalMemoryManager(getMaxMemorySize(config)), + config.getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS), + config.getInt(QueryServices.MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS)); + this.config = config; + } + + public Configuration getConfig() { + return config; + } + + /** + * Retrieve the tenant cache given an tenantId. + * @param tenantId the ID that identifies the tenant + * @return the existing or newly created TenantCache + */ + public TenantCache getChildTenantCache(ImmutableBytesPtr tenantId) { + TenantCache tenantCache = perTenantCacheMap.get(tenantId); + if (tenantCache == null) { + int maxTenantMemoryPerc = config.getInt(MAX_TENANT_MEMORY_PERC_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_TENANT_MEMORY_PERC); + int maxServerCacheTimeToLive = + config.getInt(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS); + int maxServerCachePersistenceTimeToLive = + config.getInt(QueryServices.MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_PERSISTENCE_TIME_TO_LIVE_MS); + TenantCacheImpl newTenantCache = + new TenantCacheImpl(new ChildMemoryManager(getMemoryManager(), maxTenantMemoryPerc), + maxServerCacheTimeToLive, maxServerCachePersistenceTimeToLive); + tenantCache = perTenantCacheMap.putIfAbsent(tenantId, newTenantCache); + if (tenantCache == null) { + tenantCache = newTenantCache; + } } + return tenantCache; + } - public static class FunctionBytesPtr extends ImmutableBytesPtr { + public void setMetricsSource(MetricsMetadataSource metricsSource) { + this.metricsSource = metricsSource; + } - public FunctionBytesPtr(byte[] key) { - super(key); - } + public static class FunctionBytesPtr extends ImmutableBytesPtr { - @Override - public boolean equals(Object obj) { - if(obj instanceof FunctionBytesPtr) return super.equals(obj); - return false; - } + public FunctionBytesPtr(byte[] key) { + super(key); + } - @Override - public int hashCode() { - return super.hashCode(); - } + @Override + public boolean equals(Object obj) { + if (obj instanceof FunctionBytesPtr) return super.equals(obj); + return false; + } + + @Override + public int hashCode() { + return super.hashCode(); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java index bd45b5d485e..aa8440b1694 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,146 +15,136 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.cache.aggcache; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.util.Closeables; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.Closeable; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; -import java.nio.MappedByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.channels.FileChannel.MapMode; import java.util.Map; import java.util.UUID; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.util.Closeables; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** - * This class abstracts a SpillFile It is a accessible on a per page basis - * For every SpillFile object a single spill file is always created. - * Additional overflow files are dynamically created in case the page index requested is not covered by - * the spillFiles allocated so far + * This class abstracts a SpillFile It is a accessible on a per page basis For every SpillFile + * object a single spill file is always created. Additional overflow files are dynamically created + * in case the page index requested is not covered by the spillFiles allocated so far */ public class SpillFile implements Closeable { - private static final Logger LOGGER = LoggerFactory.getLogger(SpillFile.class); - // Default size for a single spillFile 2GB - private static final int SPILL_FILE_SIZE = Integer.MAX_VALUE; - // Page size for a spill file 4K - static final int DEFAULT_PAGE_SIZE = 4096; - // Map of initial SpillFile at index 0, and overflow spillFiles - private Map tempFiles; - // Custom spill files directory - private File spillFilesDirectory = null; + private static final Logger LOGGER = LoggerFactory.getLogger(SpillFile.class); + // Default size for a single spillFile 2GB + private static final int SPILL_FILE_SIZE = Integer.MAX_VALUE; + // Page size for a spill file 4K + static final int DEFAULT_PAGE_SIZE = 4096; + // Map of initial SpillFile at index 0, and overflow spillFiles + private Map tempFiles; + // Custom spill files directory + private File spillFilesDirectory = null; - // Wrapper class for a TempFile: File + RandomAccessFile - private static class TempFile implements Closeable { - private final RandomAccessFile rndFile; - private final File file; + // Wrapper class for a TempFile: File + RandomAccessFile + private static class TempFile implements Closeable { + private final RandomAccessFile rndFile; + private final File file; - public TempFile(File file, RandomAccessFile rndFile) { - this.file = file; - this.rndFile = rndFile; - } + public TempFile(File file, RandomAccessFile rndFile) { + this.file = file; + this.rndFile = rndFile; + } - @Override - public void close() throws IOException { - Closeables.closeQuietly(rndFile.getChannel()); - Closeables.closeQuietly(rndFile); + @Override + public void close() throws IOException { + Closeables.closeQuietly(rndFile.getChannel()); + Closeables.closeQuietly(rndFile); - if (file != null) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Deleting tempFile: " + file.getAbsolutePath()); - } - try { - file.delete(); - } catch (SecurityException e) { - LOGGER.warn("IOException thrown while closing Closeable." + e); - } - } + if (file != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Deleting tempFile: " + file.getAbsolutePath()); + } + try { + file.delete(); + } catch (SecurityException e) { + LOGGER.warn("IOException thrown while closing Closeable." + e); } + } } + } - private SpillFile(File spillFilesDirectory) throws IOException { - this.spillFilesDirectory = spillFilesDirectory; - this.tempFiles = Maps.newHashMap(); - // Init the first pre-allocated spillFile - tempFiles.put(0, createTempFile()); - } + private SpillFile(File spillFilesDirectory) throws IOException { + this.spillFilesDirectory = spillFilesDirectory; + this.tempFiles = Maps.newHashMap(); + // Init the first pre-allocated spillFile + tempFiles.put(0, createTempFile()); + } - /** - * Create a new SpillFile using the Java TempFile creation function. SpillFile is access in - * pages. - */ - public static SpillFile createSpillFile(File spillFilesDir) { - try { - return new SpillFile(spillFilesDir); - } catch (IOException ioe) { - throw new RuntimeException("Could not create Spillfile " + ioe); - } + /** + * Create a new SpillFile using the Java TempFile creation function. SpillFile is access in pages. + */ + public static SpillFile createSpillFile(File spillFilesDir) { + try { + return new SpillFile(spillFilesDir); + } catch (IOException ioe) { + throw new RuntimeException("Could not create Spillfile " + ioe); } - - - private TempFile createTempFile() throws IOException { - // Create temp file in temp dir or custom dir if provided - File tempFile = File.createTempFile(UUID.randomUUID().toString(), - null, spillFilesDirectory); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Creating new SpillFile: " + tempFile.getAbsolutePath()); - } - RandomAccessFile file = new RandomAccessFile(tempFile, "rw"); - file.setLength(SPILL_FILE_SIZE); - - return new TempFile(tempFile, file); + } + + private TempFile createTempFile() throws IOException { + // Create temp file in temp dir or custom dir if provided + File tempFile = File.createTempFile(UUID.randomUUID().toString(), null, spillFilesDirectory); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Creating new SpillFile: " + tempFile.getAbsolutePath()); } + RandomAccessFile file = new RandomAccessFile(tempFile, "rw"); + file.setLength(SPILL_FILE_SIZE); - /** - * Random access to a page of the current spill file - * @param index - * @return a file seeked to the correct page - */ - public RandomAccessFile getPage(int index) { - try { - TempFile tempFile = null; - int fileIndex = 0; + return new TempFile(tempFile, file); + } + + /** + * Random access to a page of the current spill file + * @return a file seeked to the correct page + */ + public RandomAccessFile getPage(int index) { + try { + TempFile tempFile = null; + int fileIndex = 0; - long offset = (long) index * (long) DEFAULT_PAGE_SIZE; - if (offset >= SPILL_FILE_SIZE) { - // Offset exceeds the first SpillFile size - // Get the index of the file that should contain the pageID - fileIndex = (int) (offset / SPILL_FILE_SIZE); - if (!tempFiles.containsKey(fileIndex)) { - // Dynamically add new spillFiles if directory grows beyond - // max page ID. - tempFile = createTempFile(); - tempFiles.put(fileIndex, tempFile); - } - } - tempFile = tempFiles.get(fileIndex); - RandomAccessFile file = tempFile.rndFile; - file.seek(offset); - return file; - } catch (IOException ioe) { - // Close resource - close(); - throw new RuntimeException("Could not get page at index: " + index); - } catch (IllegalArgumentException iae) { - // Close resource - close(); - throw iae; + long offset = (long) index * (long) DEFAULT_PAGE_SIZE; + if (offset >= SPILL_FILE_SIZE) { + // Offset exceeds the first SpillFile size + // Get the index of the file that should contain the pageID + fileIndex = (int) (offset / SPILL_FILE_SIZE); + if (!tempFiles.containsKey(fileIndex)) { + // Dynamically add new spillFiles if directory grows beyond + // max page ID. + tempFile = createTempFile(); + tempFiles.put(fileIndex, tempFile); } + } + tempFile = tempFiles.get(fileIndex); + RandomAccessFile file = tempFile.rndFile; + file.seek(offset); + return file; + } catch (IOException ioe) { + // Close resource + close(); + throw new RuntimeException("Could not get page at index: " + index); + } catch (IllegalArgumentException iae) { + // Close resource + close(); + throw iae; } + } - @Override - public void close() { - for(TempFile file : tempFiles.values()) { - // Swallow IOExceptions - Closeables.closeQuietly(file); - } + @Override + public void close() { + for (TempFile file : tempFiles.values()) { + // Swallow IOExceptions + Closeables.closeQuietly(file); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java index a4bca6620f0..ebf7a00c189 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.WritableUtils; import org.apache.phoenix.expression.aggregator.Aggregator; @@ -43,13 +42,12 @@ import org.apache.phoenix.schema.ValueBitSet; import org.apache.phoenix.schema.tuple.SingleKeyValueTuple; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.Closeables; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.TupleUtil; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * Class servers as an adapter between the in-memory LRU cache and the Spill data structures. It * takes care of serializing / deserializing the key/value groupby tuples, and spilling them to the @@ -57,270 +55,257 @@ */ public class SpillManager implements Closeable { - // Wrapper class for DESERIALIZED groupby key/value tuples - public static class CacheEntry implements - Map.Entry { - - protected T key; - protected Aggregator[] aggs; - - public CacheEntry(T key, Aggregator[] aggs) { - this.key = key; - this.aggs = aggs; - } - - public Aggregator[] getValue(Configuration conf) { - return aggs; - } - - public int getKeyLength() { - return key.getLength(); - } - - @Override - public Aggregator[] getValue() { - return aggs; - } - - @Override - public Aggregator[] setValue(Aggregator[] arg0) { - this.aggs = arg0; - return aggs; - } + // Wrapper class for DESERIALIZED groupby key/value tuples + public static class CacheEntry + implements Map.Entry { - @Override - public T getKey() { - return key; - } + protected T key; + protected Aggregator[] aggs; + public CacheEntry(T key, Aggregator[] aggs) { + this.key = key; + this.aggs = aggs; } - private final ArrayList spillMaps; - private final int numSpillFiles; - - private final ServerAggregators aggregators; - private final Configuration conf; - - /** - * SpillManager takes care of spilling and loading tuples from spilled data structs - * @param numSpillFiles - * @param serverAggregators - */ - public SpillManager(int numSpillFiles, ServerAggregators serverAggregators, - Configuration conf, SpillableGroupByCache.QueryCache cache) { - try { - int estValueSize = serverAggregators.getEstimatedByteSize(); - spillMaps = Lists.newArrayList(); - this.numSpillFiles = numSpillFiles; - this.aggregators = serverAggregators; - this.conf = conf; - File spillFilesDir = conf.get(QueryServices.SPOOL_DIRECTORY) != null ? - new File(conf.get(QueryServices.SPOOL_DIRECTORY)) : null; - - // Ensure that a single element fits onto a page!!! - Preconditions.checkArgument(SpillFile.DEFAULT_PAGE_SIZE > estValueSize); - - // Create a list of spillFiles - // Each Spillfile only handles up to 2GB data - for (int i = 0; i < numSpillFiles; i++) { - SpillFile file = SpillFile.createSpillFile(spillFilesDir); - spillMaps.add(new SpillMap(file, SpillFile.DEFAULT_PAGE_SIZE, estValueSize, cache)); - } - } catch (IOException ioe) { - throw new RuntimeException("Could not init the SpillManager"); - } + public Aggregator[] getValue(Configuration conf) { + return aggs; } - // serialize a key/value tuple into a byte array - // WARNING: expensive - private byte[] serialize(ImmutableBytesPtr key, Aggregator[] aggs, - ServerAggregators serverAggs) throws IOException { - - DataOutputStream output = null; - ByteArrayOutputStream bai = null; - try { - bai = new ByteArrayOutputStream(); - output = new DataOutputStream(bai); - // key length - WritableUtils.writeVInt(output, key.getLength()); - // key - output.write(key.get(), key.getOffset(), key.getLength()); - byte[] aggsByte = serverAggs.toBytes(aggs); - // aggs length - WritableUtils.writeVInt(output, aggsByte.length); - // aggs - output.write(aggsByte); - return bai.toByteArray(); - } finally { - - if (bai != null) { - bai.close(); - bai = null; - } - if (output != null) { - output.close(); - output = null; - } - } + public int getKeyLength() { + return key.getLength(); } - /** - * Helper method to deserialize the key part from a serialized byte array - * @param data - * @return - * @throws IOException - */ - static ImmutableBytesPtr getKey(byte[] data) throws IOException { - DataInputStream input = null; - try { - input = new DataInputStream(new ByteArrayInputStream(data)); - // key length - int keyLength = WritableUtils.readVInt(input); - int offset = WritableUtils.getVIntSize(keyLength); - // key - return new ImmutableBytesPtr(data, offset, keyLength); - } finally { - if (input != null) { - input.close(); - input = null; - } - } + @Override + public Aggregator[] getValue() { + return aggs; } - - // Instantiate Aggregators from a serialized byte array - private Aggregator[] getAggregators(byte[] data) throws IOException { - DataInputStream input = null; - try { - input = new DataInputStream(new ByteArrayInputStream(data)); - // key length - int keyLength = WritableUtils.readVInt(input); - int vIntKeyLength = WritableUtils.getVIntSize(keyLength); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(data, vIntKeyLength, keyLength); - - // value length - input.skip(keyLength); - int valueLength = WritableUtils.readVInt(input); - int vIntValLength = WritableUtils.getVIntSize(keyLength); - Cell keyValue = - PhoenixKeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(), - QueryConstants.SINGLE_COLUMN_FAMILY, QueryConstants.SINGLE_COLUMN, - QueryConstants.AGG_TIMESTAMP, data, vIntKeyLength + keyLength + vIntValLength, valueLength); - Tuple result = new SingleKeyValueTuple(keyValue); - TupleUtil.getAggregateValue(result, ptr); - KeyValueSchema schema = aggregators.getValueSchema(); - ValueBitSet tempValueSet = ValueBitSet.newInstance(schema); - tempValueSet.clear(); - tempValueSet.or(ptr); - - int i = 0, maxOffset = ptr.getOffset() + ptr.getLength(); - SingleAggregateFunction[] funcArray = aggregators.getFunctions(); - Aggregator[] sAggs = new Aggregator[funcArray.length]; - Boolean hasValue; - schema.iterator(ptr); - while ((hasValue = schema.next(ptr, i, maxOffset, tempValueSet)) != null) { - SingleAggregateFunction func = funcArray[i]; - sAggs[i++] = - hasValue ? func.newServerAggregator(conf, ptr) : func - .newServerAggregator(conf); - } - return sAggs; - - } finally { - Closeables.closeQuietly(input); - } + @Override + public Aggregator[] setValue(Aggregator[] arg0) { + this.aggs = arg0; + return aggs; } - /** - * Helper function to deserialize a byte array into a CacheEntry - * @param - * @param bytes - * @throws IOException - */ - @SuppressWarnings("unchecked") - public CacheEntry toCacheEntry(byte[] bytes) - throws IOException { - ImmutableBytesPtr key = SpillManager.getKey(bytes); - Aggregator[] aggs = getAggregators(bytes); - - return new CacheEntry((K) key, aggs); + @Override + public T getKey() { + return key; } - // Determines the partition, i.e. spillFile the tuple should get spilled to. - private int getPartition(ImmutableBytesWritable key) { - // Simple implementation hash mod numFiles - return (int)(Math.abs((long)key.hashCode()) % numSpillFiles); + } + + private final ArrayList spillMaps; + private final int numSpillFiles; + + private final ServerAggregators aggregators; + private final Configuration conf; + + /** + * SpillManager takes care of spilling and loading tuples from spilled data structs + */ + public SpillManager(int numSpillFiles, ServerAggregators serverAggregators, Configuration conf, + SpillableGroupByCache.QueryCache cache) { + try { + int estValueSize = serverAggregators.getEstimatedByteSize(); + spillMaps = Lists.newArrayList(); + this.numSpillFiles = numSpillFiles; + this.aggregators = serverAggregators; + this.conf = conf; + File spillFilesDir = conf.get(QueryServices.SPOOL_DIRECTORY) != null + ? new File(conf.get(QueryServices.SPOOL_DIRECTORY)) + : null; + + // Ensure that a single element fits onto a page!!! + Preconditions.checkArgument(SpillFile.DEFAULT_PAGE_SIZE > estValueSize); + + // Create a list of spillFiles + // Each Spillfile only handles up to 2GB data + for (int i = 0; i < numSpillFiles; i++) { + SpillFile file = SpillFile.createSpillFile(spillFilesDir); + spillMaps.add(new SpillMap(file, SpillFile.DEFAULT_PAGE_SIZE, estValueSize, cache)); + } + } catch (IOException ioe) { + throw new RuntimeException("Could not init the SpillManager"); } - - /** - * Function that spills a key/value groupby tuple into a partition Spilling always triggers a - * serialize call - * @param key - * @param value - * @throws IOException - */ - public void spill(ImmutableBytesWritable key, Aggregator[] value) throws IOException { - SpillMap spillMap = spillMaps.get(getPartition(key)); - ImmutableBytesPtr keyPtr = new ImmutableBytesPtr(key); - byte[] data = serialize(keyPtr, value, aggregators); - spillMap.put(keyPtr, data); + } + + // serialize a key/value tuple into a byte array + // WARNING: expensive + private byte[] serialize(ImmutableBytesPtr key, Aggregator[] aggs, ServerAggregators serverAggs) + throws IOException { + + DataOutputStream output = null; + ByteArrayOutputStream bai = null; + try { + bai = new ByteArrayOutputStream(); + output = new DataOutputStream(bai); + // key length + WritableUtils.writeVInt(output, key.getLength()); + // key + output.write(key.get(), key.getOffset(), key.getLength()); + byte[] aggsByte = serverAggs.toBytes(aggs); + // aggs length + WritableUtils.writeVInt(output, aggsByte.length); + // aggs + output.write(aggsByte); + return bai.toByteArray(); + } finally { + + if (bai != null) { + bai.close(); + bai = null; + } + if (output != null) { + output.close(); + output = null; + } } - - /** - * Function that loads a spilled key/value groupby tuple from one of the spill partitions into - * the LRU cache. Loading always involves deserialization - * @throws IOException - */ - public Aggregator[] loadEntry(ImmutableBytesWritable key) throws IOException { - SpillMap spillMap = spillMaps.get(getPartition(key)); - byte[] data = spillMap.get(key); - if (data != null) { - return getAggregators(data); - } - return null; + } + + /** + * Helper method to deserialize the key part from a serialized byte array + */ + static ImmutableBytesPtr getKey(byte[] data) throws IOException { + DataInputStream input = null; + try { + input = new DataInputStream(new ByteArrayInputStream(data)); + // key length + int keyLength = WritableUtils.readVInt(input); + int offset = WritableUtils.getVIntSize(keyLength); + // key + return new ImmutableBytesPtr(data, offset, keyLength); + } finally { + if (input != null) { + input.close(); + input = null; + } } - - /** - * Close the attached spillMap - */ - @Override - public void close() { - for (int i = 0; i < spillMaps.size(); i++) { - Closeables.closeQuietly(spillMaps.get(i).getSpillFile()); - } + } + + // Instantiate Aggregators from a serialized byte array + private Aggregator[] getAggregators(byte[] data) throws IOException { + DataInputStream input = null; + try { + input = new DataInputStream(new ByteArrayInputStream(data)); + // key length + int keyLength = WritableUtils.readVInt(input); + int vIntKeyLength = WritableUtils.getVIntSize(keyLength); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(data, vIntKeyLength, keyLength); + + // value length + input.skip(keyLength); + int valueLength = WritableUtils.readVInt(input); + int vIntValLength = WritableUtils.getVIntSize(keyLength); + Cell keyValue = PhoenixKeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(), + QueryConstants.SINGLE_COLUMN_FAMILY, QueryConstants.SINGLE_COLUMN, + QueryConstants.AGG_TIMESTAMP, data, vIntKeyLength + keyLength + vIntValLength, valueLength); + Tuple result = new SingleKeyValueTuple(keyValue); + TupleUtil.getAggregateValue(result, ptr); + KeyValueSchema schema = aggregators.getValueSchema(); + ValueBitSet tempValueSet = ValueBitSet.newInstance(schema); + tempValueSet.clear(); + tempValueSet.or(ptr); + + int i = 0, maxOffset = ptr.getOffset() + ptr.getLength(); + SingleAggregateFunction[] funcArray = aggregators.getFunctions(); + Aggregator[] sAggs = new Aggregator[funcArray.length]; + Boolean hasValue; + schema.iterator(ptr); + while ((hasValue = schema.next(ptr, i, maxOffset, tempValueSet)) != null) { + SingleAggregateFunction func = funcArray[i]; + sAggs[i++] = + hasValue ? func.newServerAggregator(conf, ptr) : func.newServerAggregator(conf); + } + return sAggs; + + } finally { + Closeables.closeQuietly(input); } - - /** - * Function returns an iterator over all spilled Tuples - */ - public SpillMapIterator newDataIterator() { - return new SpillMapIterator(); + } + + /** + * Helper function to deserialize a byte array into a CacheEntry + * @param + */ + @SuppressWarnings("unchecked") + public CacheEntry toCacheEntry(byte[] bytes) + throws IOException { + ImmutableBytesPtr key = SpillManager.getKey(bytes); + Aggregator[] aggs = getAggregators(bytes); + + return new CacheEntry((K) key, aggs); + } + + // Determines the partition, i.e. spillFile the tuple should get spilled to. + private int getPartition(ImmutableBytesWritable key) { + // Simple implementation hash mod numFiles + return (int) (Math.abs((long) key.hashCode()) % numSpillFiles); + } + + /** + * Function that spills a key/value groupby tuple into a partition Spilling always triggers a + * serialize call + */ + public void spill(ImmutableBytesWritable key, Aggregator[] value) throws IOException { + SpillMap spillMap = spillMaps.get(getPartition(key)); + ImmutableBytesPtr keyPtr = new ImmutableBytesPtr(key); + byte[] data = serialize(keyPtr, value, aggregators); + spillMap.put(keyPtr, data); + } + + /** + * Function that loads a spilled key/value groupby tuple from one of the spill partitions into the + * LRU cache. Loading always involves deserialization + */ + public Aggregator[] loadEntry(ImmutableBytesWritable key) throws IOException { + SpillMap spillMap = spillMaps.get(getPartition(key)); + byte[] data = spillMap.get(key); + if (data != null) { + return getAggregators(data); + } + return null; + } + + /** + * Close the attached spillMap + */ + @Override + public void close() { + for (int i = 0; i < spillMaps.size(); i++) { + Closeables.closeQuietly(spillMaps.get(i).getSpillFile()); } + } - private final class SpillMapIterator implements Iterator { + /** + * Function returns an iterator over all spilled Tuples + */ + public SpillMapIterator newDataIterator() { + return new SpillMapIterator(); + } - int index = 0; - Iterator spillIter = spillMaps.get(index).iterator(); + private final class SpillMapIterator implements Iterator { - @Override - public boolean hasNext() { - if (!spillIter.hasNext()) { - if (index < (numSpillFiles - 1)) { - // Current spillFile exhausted get iterator over new one - spillIter = spillMaps.get(++index).iterator(); - } - } - return spillIter.hasNext(); - } + int index = 0; + Iterator spillIter = spillMaps.get(index).iterator(); - @Override - public byte[] next() { - return spillIter.next(); + @Override + public boolean hasNext() { + if (!spillIter.hasNext()) { + if (index < (numSpillFiles - 1)) { + // Current spillFile exhausted get iterator over new one + spillIter = spillMaps.get(++index).iterator(); } + } + return spillIter.hasNext(); + } - @Override - public void remove() { - throw new IllegalAccessError("Remove is not supported for this type of iterator"); - } + @Override + public byte[] next() { + return spillIter.next(); + } + + @Override + public void remove() { + throw new IllegalAccessError("Remove is not supported for this type of iterator"); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillMap.java b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillMap.java index e700569a278..10a627a1183 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillMap.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillMap.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.cache.aggcache; import java.io.IOException; @@ -30,492 +29,489 @@ import java.util.Set; import org.apache.hadoop.hbase.util.Bytes; - +import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.thirdparty.com.google.common.hash.BloomFilter; import org.apache.phoenix.thirdparty.com.google.common.hash.Funnels; -import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; - /** - * Class implements an active spilled partition serialized tuples are first written into an in-memory data structure - * that represents a single page. As the page fills up, it is written to the current spillFile or spill partition For - * fast tuple discovery, the class maintains a per page bloom-filter and never de-serializes elements. The element - * spilling employs an extentible hashing technique. + * Class implements an active spilled partition serialized tuples are first written into an + * in-memory data structure that represents a single page. As the page fills up, it is written to + * the current spillFile or spill partition For fast tuple discovery, the class maintains a per page + * bloom-filter and never de-serializes elements. The element spilling employs an extentible hashing + * technique. */ public class SpillMap extends AbstractMap implements Iterable { - // Threshold is typically the page size - private final int thresholdBytes; - private final int pageInserts; - // Global directory depth - private int globalDepth; - private int curMapBufferIndex; - private SpillFile spillFile; - // Directory of hash buckets --> extendible hashing implementation - private FileMap[] directory; - private final SpillableGroupByCache.QueryCache cache; - - public SpillMap(SpillFile file, int thresholdBytes, int estValueSize, SpillableGroupByCache.QueryCache cache) - throws IOException { - this.thresholdBytes = thresholdBytes - Bytes.SIZEOF_INT; - this.pageInserts = thresholdBytes / estValueSize; - this.spillFile = file; - this.cache = cache; - - // Init the e-hashing directory structure - globalDepth = 1; - directory = new FileMap[(1 << globalDepth)]; - - for (int i = 0; i < directory.length; i++) { - // Create an empty bucket list - directory[i] = new FileMap(i, this.thresholdBytes, pageInserts, file); - directory[i].flushBuffer(); - } - directory[0].pageIn(); - curMapBufferIndex = 0; + // Threshold is typically the page size + private final int thresholdBytes; + private final int pageInserts; + // Global directory depth + private int globalDepth; + private int curMapBufferIndex; + private SpillFile spillFile; + // Directory of hash buckets --> extendible hashing implementation + private FileMap[] directory; + private final SpillableGroupByCache.QueryCache cache; + + public SpillMap(SpillFile file, int thresholdBytes, int estValueSize, + SpillableGroupByCache.QueryCache cache) throws IOException { + this.thresholdBytes = thresholdBytes - Bytes.SIZEOF_INT; + this.pageInserts = thresholdBytes / estValueSize; + this.spillFile = file; + this.cache = cache; + + // Init the e-hashing directory structure + globalDepth = 1; + directory = new FileMap[(1 << globalDepth)]; + + for (int i = 0; i < directory.length; i++) { + // Create an empty bucket list + directory[i] = new FileMap(i, this.thresholdBytes, pageInserts, file); + directory[i].flushBuffer(); } - - // Get the directoy index for a specific key - private int getBucketIndex(ImmutableBytesPtr key) { - // Get key hash - int hashCode = key.hashCode(); - - // Mask all but globalDepth low n bits - return hashCode & ((1 << globalDepth) - 1); + directory[0].pageIn(); + curMapBufferIndex = 0; + } + + // Get the directoy index for a specific key + private int getBucketIndex(ImmutableBytesPtr key) { + // Get key hash + int hashCode = key.hashCode(); + + // Mask all but globalDepth low n bits + return hashCode & ((1 << globalDepth) - 1); + } + + // Function redistributes the elements in the current index + // to two new buckets, based on the bit at localDepth + 1 position. + // Optionally this function also doubles the directory to allow + // for bucket splits + private void redistribute(int index, ImmutableBytesPtr keyNew, byte[] valueNew) { + // Get the respective bucket + FileMap byteMap = directory[index]; + + // Get the actual bucket index, that the directory index points to + int mappedIdx = byteMap.pageIndex; + + int localDepth = byteMap.localDepth; + ArrayList buckets = Lists.newArrayList(); + // Get all directory entries that point to the same bucket. + // TODO: can be made faster! + for (int i = 0; i < directory.length; i++) { + if (directory[i].pageIndex == mappedIdx) { + buckets.add(i); + } } - // Function redistributes the elements in the current index - // to two new buckets, based on the bit at localDepth + 1 position. - // Optionally this function also doubles the directory to allow - // for bucket splits - private void redistribute(int index, ImmutableBytesPtr keyNew, byte[] valueNew) { - // Get the respective bucket - FileMap byteMap = directory[index]; - - // Get the actual bucket index, that the directory index points to - int mappedIdx = byteMap.pageIndex; - - int localDepth = byteMap.localDepth; - ArrayList buckets = Lists.newArrayList(); - // Get all directory entries that point to the same bucket. - // TODO: can be made faster! - for (int i = 0; i < directory.length; i++) { - if (directory[i].pageIndex == mappedIdx) { - buckets.add(i); - } - } - - // Assuming no directory doubling for now - // compute the two new bucket Ids for splitting - // SpillFile adds new files dynamically in case the directory points to pageIDs - // that exceed the size limit of a single file. - - // TODO verify if some sort of de-fragmentation might be helpful - int tmpIndex = index ^ ((1 << localDepth)); - int b1Index = Math.min(index, tmpIndex); - int b2Index = Math.max(index, tmpIndex); - - // Create two new split buckets - FileMap b1 = new FileMap(b1Index, thresholdBytes, pageInserts, spillFile); - FileMap b2 = new FileMap(b2Index, thresholdBytes, pageInserts, spillFile); - - // redistribute old elements into b1 and b2 - for (Entry element : byteMap.pageMap.entrySet()) { - ImmutableBytesPtr key = element.getKey(); - byte[] value = element.getValue(); - // Only add key during redistribution if its not in the cache - // Otherwise this is an good point to reduce the number of spilled elements - if (!cache.isKeyContained(key)) { - // Re-distribute element onto the new 2 split buckets - if ((key.hashCode() & ((1 << localDepth))) != 0) { - b2.addElement(null, key, value); - } else { - b1.addElement(null, key, value); - } - } + // Assuming no directory doubling for now + // compute the two new bucket Ids for splitting + // SpillFile adds new files dynamically in case the directory points to pageIDs + // that exceed the size limit of a single file. + + // TODO verify if some sort of de-fragmentation might be helpful + int tmpIndex = index ^ ((1 << localDepth)); + int b1Index = Math.min(index, tmpIndex); + int b2Index = Math.max(index, tmpIndex); + + // Create two new split buckets + FileMap b1 = new FileMap(b1Index, thresholdBytes, pageInserts, spillFile); + FileMap b2 = new FileMap(b2Index, thresholdBytes, pageInserts, spillFile); + + // redistribute old elements into b1 and b2 + for (Entry element : byteMap.pageMap.entrySet()) { + ImmutableBytesPtr key = element.getKey(); + byte[] value = element.getValue(); + // Only add key during redistribution if its not in the cache + // Otherwise this is an good point to reduce the number of spilled elements + if (!cache.isKeyContained(key)) { + // Re-distribute element onto the new 2 split buckets + if ((key.hashCode() & ((1 << localDepth))) != 0) { + b2.addElement(null, key, value); + } else { + b1.addElement(null, key, value); } + } + } - // Clear and GC the old now redistributed bucket - byteMap.pageMap.clear(); - byteMap = null; + // Clear and GC the old now redistributed bucket + byteMap.pageMap.clear(); + byteMap = null; - // Increase local bucket depths - b1.localDepth = localDepth + 1; - b2.localDepth = localDepth + 1; - boolean doubleDir = false; + // Increase local bucket depths + b1.localDepth = localDepth + 1; + b2.localDepth = localDepth + 1; + boolean doubleDir = false; - if (globalDepth < (localDepth + 1)) { - // Double directory structure and re-adjust pointers - doubleDir = true; + if (globalDepth < (localDepth + 1)) { + // Double directory structure and re-adjust pointers + doubleDir = true; - b2Index = doubleDirectory(b2Index, keyNew); - } + b2Index = doubleDirectory(b2Index, keyNew); + } - if (!doubleDir) { - // This is a bit more tricky, we have to cover scenarios where - // globalDepth - localDepth > 1 - // Here even after bucket splitting, multiple directory entries point to - // the new buckets - for (int i = 0; i < buckets.size(); i++) { - if ((buckets.get(i) & (1 << (localDepth))) != 0) { - directory[buckets.get(i)] = b2; - } else { - directory[buckets.get(i)] = b1; - } - } + if (!doubleDir) { + // This is a bit more tricky, we have to cover scenarios where + // globalDepth - localDepth > 1 + // Here even after bucket splitting, multiple directory entries point to + // the new buckets + for (int i = 0; i < buckets.size(); i++) { + if ((buckets.get(i) & (1 << (localDepth))) != 0) { + directory[buckets.get(i)] = b2; } else { - // Update the directory indexes in case of directory doubling - directory[b1Index] = b1; - directory[b2Index] = b2; + directory[buckets.get(i)] = b1; } + } + } else { + // Update the directory indexes in case of directory doubling + directory[b1Index] = b1; + directory[b2Index] = b2; } + } - // Doubles the directory and readjusts pointers. - private int doubleDirectory(int b2Index, ImmutableBytesPtr keyNew) { - // Double the directory in size, second half points to original first half - int newDirSize = 1 << (globalDepth + 1); + // Doubles the directory and readjusts pointers. + private int doubleDirectory(int b2Index, ImmutableBytesPtr keyNew) { + // Double the directory in size, second half points to original first half + int newDirSize = 1 << (globalDepth + 1); - // Ensure that the new directory size does not exceed size limits - Preconditions.checkArgument(newDirSize < Integer.MAX_VALUE); + // Ensure that the new directory size does not exceed size limits + Preconditions.checkArgument(newDirSize < Integer.MAX_VALUE); - // Double it! - FileMap[] newDirectory = new FileMap[newDirSize]; - for (int i = 0; i < directory.length; i++) { - newDirectory[i] = directory[i]; - newDirectory[i + directory.length] = directory[i]; + // Double it! + FileMap[] newDirectory = new FileMap[newDirSize]; + for (int i = 0; i < directory.length; i++) { + newDirectory[i] = directory[i]; + newDirectory[i + directory.length] = directory[i]; + } + directory = newDirectory; + newDirectory = null; + + // Adjust the index for new split bucket, according to the directory double + b2Index = (keyNew.hashCode() & ((1 << globalDepth) - 1)) | (1 << globalDepth); + + // Increment global depth + globalDepth++; + + return b2Index; + } + + /** + * Get a key from the spillable data structures. page is determined via hash partitioning, and a + * bloomFilter check is used to determine if its worth paging in the data. + */ + @Override + public byte[] get(Object key) { + if (!(key instanceof ImmutableBytesPtr)) { + // TODO ... work on type safety + } + ImmutableBytesPtr ikey = (ImmutableBytesPtr) key; + byte[] value = null; + + int bucketIndex = getBucketIndex(ikey); + FileMap byteMap = directory[bucketIndex]; + + // Decision based on bucket ID, not the directory ID due to the n:1 relationship + if (directory[curMapBufferIndex].pageIndex != byteMap.pageIndex) { + // map not paged in + FileMap curByteMap = directory[curMapBufferIndex]; + + // Use bloomFilter to check if key was spilled before + if (byteMap.containsKey(ikey.copyBytesIfNecessary())) { + // ensure consistency and flush current memory page to disk + // fflush current buffer + curByteMap.flushBuffer(); + // page in new buffer + byteMap.pageIn(); + // update index + curMapBufferIndex = bucketIndex; + } + } + // get KV from current map + value = byteMap.getPagedInElement(ikey); + return value; + } + + // Similar as get(Object key) function, however + // always pages in page a key is spilled to, no bloom filter decision + private byte[] getAlways(ImmutableBytesPtr key) { + byte[] value = null; + int bucketIndex = getBucketIndex(key); + FileMap byteMap = directory[bucketIndex]; + + if (directory[curMapBufferIndex].pageIndex != byteMap.pageIndex) { + FileMap curByteMap = directory[curMapBufferIndex]; + // ensure consistency and flush current memory page to disk + curByteMap.flushBuffer(); + + byteMap.pageIn(); + curMapBufferIndex = bucketIndex; + } + // get KV from current queue + value = byteMap.getPagedInElement(key); + return value; + } + + /** + * Spill a key First we discover if the key has been spilled before and load it into memory: #ref + * get() if it was loaded before just replace the old value in the memory page if it was not + * loaded before try to store it in the current page alternatively if not enough memory available, + * request new page. + */ + @Override + public byte[] put(ImmutableBytesPtr key, byte[] value) { + boolean redistributed = false; + // page in element and replace if present + byte[] spilledValue = getAlways(key); + + FileMap byteMap = directory[curMapBufferIndex]; + int index = curMapBufferIndex; + + // TODO: We split buckets until the new element fits onto a + // one of the new buckets. Might consider the use of an overflow + // bucket, especially in case the directory runs out of page IDs. + while (!byteMap.canFit(spilledValue, value)) { + // Element does not fit... Split the bucket! + redistribute(index, key, value); + redistributed = true; + + index = getBucketIndex(key); + byteMap = directory[index]; + } + // Ensure that all pages that were paged in during redistribution are flushed back out + // to disk to keep memory footprint small. + if (redistributed) { + for (int i = 0; i < directory.length; i++) { + if (directory[i].pageIndex != byteMap.pageIndex) { + directory[i].flushBuffer(); } - directory = newDirectory; - newDirectory = null; - - // Adjust the index for new split bucket, according to the directory double - b2Index = (keyNew.hashCode() & ((1 << globalDepth) - 1)) | (1 << globalDepth); + } + // Ensure the page that receives the new key is in memory + spilledValue = getAlways(key); + } + byteMap.addElement(spilledValue, key, value); + + return value; + } + + /** + * Function returns the current spill file + */ + public SpillFile getSpillFile() { + return spillFile; + } + + /** + * This inner class represents the currently mapped file region. It uses a Map to represent the + * current in memory page for easy get() and update() calls on an individual key The class keeps + * track of the current size of the in memory page and handles flushing and paging in respectively + */ + private static class FileMap { + private final SpillFile spillFile; + private final int pageIndex; + private final int thresholdBytes; + private long totalResultSize; + private boolean pagedIn; + private int localDepth; + // dirtyPage flag tracks if a paged in page was modified + // if not, no need to flush it back out to disk + private boolean dirtyPage; + // Use a map for in memory page representation + Map pageMap = Maps.newHashMap(); + // Used to determine is an element was written to this page before or not + BloomFilter bFilter; + + public FileMap(int id, int thresholdBytes, int pageInserts, SpillFile spillFile) { + this.spillFile = spillFile; + // size threshold of a page + this.thresholdBytes = thresholdBytes; + this.pageIndex = id; + pageMap.clear(); + bFilter = BloomFilter.create(Funnels.byteArrayFunnel(), pageInserts); + pagedIn = true; + totalResultSize = 0; + localDepth = 1; + dirtyPage = true; + } - // Increment global depth - globalDepth++; + private boolean containsKey(byte[] key) { + return bFilter.mightContain(key); + } - return b2Index; + private boolean canFit(byte[] curValue, byte[] newValue) { + if (thresholdBytes < newValue.length) { + // TODO resize page size if single element is too big, + // Can this ever happen? + throw new RuntimeException("page size too small to store a single KV element"); + } + + int resultSize = newValue.length + Bytes.SIZEOF_INT; + if (curValue != null) { + // Key existed before + // Ensure to compensate for potential larger byte[] for agg + resultSize = Math.max(0, resultSize - (curValue.length + Bytes.SIZEOF_INT)); + } + + if ((thresholdBytes - totalResultSize) <= (resultSize)) { + // KV does not fit + return false; + } + // KV fits + return true; } - /** - * Get a key from the spillable data structures. page is determined via hash partitioning, and a bloomFilter check - * is used to determine if its worth paging in the data. - */ - @Override - public byte[] get(Object key) { - if (!(key instanceof ImmutableBytesPtr)) { - // TODO ... work on type safety - } - ImmutableBytesPtr ikey = (ImmutableBytesPtr)key; - byte[] value = null; - - int bucketIndex = getBucketIndex(ikey); - FileMap byteMap = directory[bucketIndex]; - - // Decision based on bucket ID, not the directory ID due to the n:1 relationship - if (directory[curMapBufferIndex].pageIndex != byteMap.pageIndex) { - // map not paged in - FileMap curByteMap = directory[curMapBufferIndex]; - - // Use bloomFilter to check if key was spilled before - if (byteMap.containsKey(ikey.copyBytesIfNecessary())) { - // ensure consistency and flush current memory page to disk - // fflush current buffer - curByteMap.flushBuffer(); - // page in new buffer - byteMap.pageIn(); - // update index - curMapBufferIndex = bucketIndex; + // Flush the current page to the memory mapped byte buffer + private void flushBuffer() { + if (pagedIn) { + // Only flush if page was changed + if (dirtyPage) { + Collection values = pageMap.values(); + RandomAccessFile file = spillFile.getPage(pageIndex); + // number of elements + try { + file.writeInt(values.size()); + int written = Bytes.SIZEOF_INT; + for (byte[] value : values) { + written += Bytes.SIZEOF_INT + value.length; + // safety check + if (written > SpillFile.DEFAULT_PAGE_SIZE) { + throw new BufferOverflowException(); + } + // element length + file.writeInt(value.length); + // element + file.write(value, 0, value.length); } + } catch (IOException ioe) { + // Error during key access on spilled resource + // TODO rework error handling + throw new RuntimeException(ioe); + } } - // get KV from current map - value = byteMap.getPagedInElement(ikey); - return value; + // Reset page stats + pageMap.clear(); + totalResultSize = 0; + } + pagedIn = false; + dirtyPage = false; } - // Similar as get(Object key) function, however - // always pages in page a key is spilled to, no bloom filter decision - private byte[] getAlways(ImmutableBytesPtr key) { - byte[] value = null; - int bucketIndex = getBucketIndex(key); - FileMap byteMap = directory[bucketIndex]; - - if (directory[curMapBufferIndex].pageIndex != byteMap.pageIndex) { - FileMap curByteMap = directory[curMapBufferIndex]; - // ensure consistency and flush current memory page to disk - curByteMap.flushBuffer(); - - byteMap.pageIn(); - curMapBufferIndex = bucketIndex; + // load a page into a map for fast element access + private void pageIn() { + if (!pagedIn) { + RandomAccessFile file = spillFile.getPage(pageIndex); + try { + int numElements = file.readInt(); + for (int i = 0; i < numElements; i++) { + int kvSize = file.readInt(); + byte[] data = new byte[kvSize]; + file.readFully(data); + pageMap.put(SpillManager.getKey(data), data); + totalResultSize += (data.length + Bytes.SIZEOF_INT); + } + } catch (IOException ioe) { + // Error during key access on spilled resource + // TODO rework error handling + throw new RuntimeException(ioe); } - // get KV from current queue - value = byteMap.getPagedInElement(key); - return value; + pagedIn = true; + dirtyPage = false; + } } /** - * Spill a key First we discover if the key has been spilled before and load it into memory: #ref get() if it was - * loaded before just replace the old value in the memory page if it was not loaded before try to store it in the - * current page alternatively if not enough memory available, request new page. + * Return a cache element currently page into memory Direct access via mapped page map */ - @Override - public byte[] put(ImmutableBytesPtr key, byte[] value) { - boolean redistributed = false; - // page in element and replace if present - byte[] spilledValue = getAlways(key); - - FileMap byteMap = directory[curMapBufferIndex]; - int index = curMapBufferIndex; - - // TODO: We split buckets until the new element fits onto a - // one of the new buckets. Might consider the use of an overflow - // bucket, especially in case the directory runs out of page IDs. - while (!byteMap.canFit(spilledValue, value)) { - // Element does not fit... Split the bucket! - redistribute(index, key, value); - redistributed = true; - - index = getBucketIndex(key); - byteMap = directory[index]; - } - // Ensure that all pages that were paged in during redistribution are flushed back out - // to disk to keep memory footprint small. - if (redistributed) { - for (int i = 0; i < directory.length; i++) { - if (directory[i].pageIndex != byteMap.pageIndex) { - directory[i].flushBuffer(); - } - } - // Ensure the page that receives the new key is in memory - spilledValue = getAlways(key); - } - byteMap.addElement(spilledValue, key, value); - - return value; + public byte[] getPagedInElement(ImmutableBytesPtr key) { + return pageMap.get(key); } /** - * Function returns the current spill file + * Inserts / Replaces cache element in the currently loaded page. Direct access via mapped page + * map */ - public SpillFile getSpillFile() { - return spillFile; + public void addElement(byte[] spilledValue, ImmutableBytesPtr key, byte[] value) { + + // put Element into map + pageMap.put(key, value); + // Update bloom filter + bFilter.put(key.copyBytesIfNecessary()); + // track current Map size to prevent Buffer overflows + if (spilledValue != null) { + // if previous key was present, just add the size difference + totalResultSize += Math.max(0, value.length - (spilledValue.length)); + } else { + // Add new size information + totalResultSize += (value.length + Bytes.SIZEOF_INT); + } + + dirtyPage = true; } /** - * This inner class represents the currently mapped file region. It uses a Map to represent the current in memory - * page for easy get() and update() calls on an individual key The class keeps track of the current size of the in - * memory page and handles flushing and paging in respectively + * Returns a value iterator over the pageMap */ - private static class FileMap { - private final SpillFile spillFile; - private final int pageIndex; - private final int thresholdBytes; - private long totalResultSize; - private boolean pagedIn; - private int localDepth; - // dirtyPage flag tracks if a paged in page was modified - // if not, no need to flush it back out to disk - private boolean dirtyPage; - // Use a map for in memory page representation - Map pageMap = Maps.newHashMap(); - // Used to determine is an element was written to this page before or not - BloomFilter bFilter; - - public FileMap(int id, int thresholdBytes, int pageInserts, SpillFile spillFile) { - this.spillFile = spillFile; - // size threshold of a page - this.thresholdBytes = thresholdBytes; - this.pageIndex = id; - pageMap.clear(); - bFilter = BloomFilter.create(Funnels.byteArrayFunnel(), pageInserts); - pagedIn = true; - totalResultSize = 0; - localDepth = 1; - dirtyPage = true; - } - - private boolean containsKey(byte[] key) { - return bFilter.mightContain(key); - } - - private boolean canFit(byte[] curValue, byte[] newValue) { - if (thresholdBytes < newValue.length) { - // TODO resize page size if single element is too big, - // Can this ever happen? - throw new RuntimeException("page size too small to store a single KV element"); - } - - int resultSize = newValue.length + Bytes.SIZEOF_INT; - if (curValue != null) { - // Key existed before - // Ensure to compensate for potential larger byte[] for agg - resultSize = Math.max(0, resultSize - (curValue.length + Bytes.SIZEOF_INT)); - } - - if ((thresholdBytes - totalResultSize) <= (resultSize)) { - // KV does not fit - return false; - } - // KV fits - return true; - } - - // Flush the current page to the memory mapped byte buffer - private void flushBuffer() { - if (pagedIn) { - // Only flush if page was changed - if (dirtyPage) { - Collection values = pageMap.values(); - RandomAccessFile file = spillFile.getPage(pageIndex); - // number of elements - try { - file.writeInt(values.size()); - int written = Bytes.SIZEOF_INT; - for (byte[] value : values) { - written += Bytes.SIZEOF_INT + value.length; - // safety check - if (written > SpillFile.DEFAULT_PAGE_SIZE) { - throw new BufferOverflowException(); - } - // element length - file.writeInt(value.length); - // element - file.write(value, 0, value.length); - } - } catch (IOException ioe) { - // Error during key access on spilled resource - // TODO rework error handling - throw new RuntimeException(ioe); - } - } - // Reset page stats - pageMap.clear(); - totalResultSize = 0; - } - pagedIn = false; - dirtyPage = false; - } - - // load a page into a map for fast element access - private void pageIn() { - if (!pagedIn) { - RandomAccessFile file = spillFile.getPage(pageIndex); - try { - int numElements = file.readInt(); - for (int i = 0; i < numElements; i++) { - int kvSize = file.readInt(); - byte[] data = new byte[kvSize]; - file.readFully(data); - pageMap.put(SpillManager.getKey(data), data); - totalResultSize += (data.length + Bytes.SIZEOF_INT); - } - } catch (IOException ioe) { - // Error during key access on spilled resource - // TODO rework error handling - throw new RuntimeException(ioe); - } - pagedIn = true; - dirtyPage = false; - } - } - - /** - * Return a cache element currently page into memory Direct access via mapped page map - * - * @param key - * @return - */ - public byte[] getPagedInElement(ImmutableBytesPtr key) { - return pageMap.get(key); - } - - /** - * Inserts / Replaces cache element in the currently loaded page. Direct access via mapped page map - * - * @param key - * @param value - */ - public void addElement(byte[] spilledValue, ImmutableBytesPtr key, byte[] value) { - - // put Element into map - pageMap.put(key, value); - // Update bloom filter - bFilter.put(key.copyBytesIfNecessary()); - // track current Map size to prevent Buffer overflows - if (spilledValue != null) { - // if previous key was present, just add the size difference - totalResultSize += Math.max(0, value.length - (spilledValue.length)); - } else { - // Add new size information - totalResultSize += (value.length + Bytes.SIZEOF_INT); - } - - dirtyPage = true; - } - - /** - * Returns a value iterator over the pageMap - */ - public Iterator getPageMapEntries() { - pageIn(); - return pageMap.values().iterator(); - } + public Iterator getPageMapEntries() { + pageIn(); + return pageMap.values().iterator(); } - - /** - * Iterate over all spilled elements, including the ones that are currently paged into memory - */ - @Override - public Iterator iterator() { - directory[curMapBufferIndex].flushBuffer(); - - return new Iterator() { - int pageIndex = 0; - Iterator entriesIter = directory[pageIndex].getPageMapEntries(); - HashSet dups = new HashSet(); - - @Override - public boolean hasNext() { - if (!entriesIter.hasNext()) { - boolean found = false; - // Clear in memory map - - while (!found) { - pageIndex++; - if (pageIndex >= directory.length) { return false; } - directory[pageIndex - 1].pageMap.clear(); - // get keys from all spilled pages - if (!dups.contains(directory[pageIndex].pageIndex)) { - dups.add(directory[pageIndex].pageIndex); - entriesIter = directory[pageIndex].getPageMapEntries(); - if (entriesIter.hasNext()) { - found = true; - } - } - } - } - dups.add(directory[pageIndex].pageIndex); - return true; - } - - @Override - public byte[] next() { - // get elements from in memory map first - return entriesIter.next(); + } + + /** + * Iterate over all spilled elements, including the ones that are currently paged into memory + */ + @Override + public Iterator iterator() { + directory[curMapBufferIndex].flushBuffer(); + + return new Iterator() { + int pageIndex = 0; + Iterator entriesIter = directory[pageIndex].getPageMapEntries(); + HashSet dups = new HashSet(); + + @Override + public boolean hasNext() { + if (!entriesIter.hasNext()) { + boolean found = false; + // Clear in memory map + + while (!found) { + pageIndex++; + if (pageIndex >= directory.length) { + return false; } - - @Override - public void remove() { - throw new IllegalAccessError("Iterator does not support removal operation"); + directory[pageIndex - 1].pageMap.clear(); + // get keys from all spilled pages + if (!dups.contains(directory[pageIndex].pageIndex)) { + dups.add(directory[pageIndex].pageIndex); + entriesIter = directory[pageIndex].getPageMapEntries(); + if (entriesIter.hasNext()) { + found = true; + } } - }; - } - - // TODO implement this method to make the SpillMap a true Map implementation - @Override - public Set> entrySet() { - throw new IllegalAccessError("entrySet is not supported for this type of cache"); - } + } + } + dups.add(directory[pageIndex].pageIndex); + return true; + } + + @Override + public byte[] next() { + // get elements from in memory map first + return entriesIter.next(); + } + + @Override + public void remove() { + throw new IllegalAccessError("Iterator does not support removal operation"); + } + }; + } + + // TODO implement this method to make the SpillMap a true Map implementation + @Override + public Set> entrySet() { + throw new IllegalAccessError("entrySet is not supported for this type of cache"); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java index f677b081f23..bd06fea240c 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.cache.aggcache; import static org.apache.phoenix.query.QueryConstants.AGG_TIMESTAMP; @@ -62,371 +61,359 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * The main entry point is in GroupedAggregateRegionObserver. It instantiates a SpillableGroupByCache and invokes a - * get() method on it. There is no: {@code "if key not exists -> put into map" } case, since the cache is a Loading cache and - * therefore handles the put under the covers. I tried to implement the final cache element accesses (RegionScanner - * below) streaming, i.e. there is just an iterator on it and removed the existing result materialization. - * SpillableGroupByCache implements a LRU cache using a LinkedHashMap with access order. There is a configurable an - * upper and lower size limit in bytes which are used as follows to compute the initial cache size in number of - * elements: Max(lowerBoundElements, Min(upperBoundElements, estimatedCacheSize)). Once the number of cached elements - * exceeds this number, the cache size is increased by a factor of 1.5. This happens until the additional memory to grow - * the cache cannot be requested. At this point the Cache starts spilling elements. As long as no eviction happens no - * spillable data structures are allocated, this only happens as soon as the first element is evicted from the cache. We - * cannot really make any assumptions on which keys arrive at the map, but assume the LRU would at least cover the cases - * where some keys have a slight skew and they should stay memory resident. Once a key gets evicted, the spillManager is - * instantiated. It basically takes care of spilling an element to disk and does all the SERDE work. It pre-allocates a - * configurable number of SpillFiles (spill partition) which are memory mapped temp files. The SpillManager keeps a list - * of these and hash distributes the keys within this list. Once an element gets spilled, it is serialized and will only - * get deserialized again, when it is requested from the client, i.e. loaded back into the LRU cache. The SpillManager - * holds a single SpillMap object in memory for every spill partition (SpillFile). The SpillMap is an in memory Map - * representation of a single page of spilled serialized key/value pairs. To achieve fast key lookup the key is hash - * partitioned into random pages of the current spill file. The code implements an extendible hashing approach which - * dynamically adjusts the hash function, in order to adapt to growing number of storage pages and avoiding long chains - * of overflow buckets. For an excellent discussion of the algorithm please refer to the following online resource: - * http://db.inf.uni-tuebingen.de/files/teaching/ws1011/db2/db2-hash-indexes.pdf . For this, each SpillFile keeps a - * directory of pointers to Integer.MAX_VALUE 4K pages in memory, which allows each directory to address more pages than - * a single memory mapped temp file could theoretically store. In case directory doubling, requests a page index that - * exceeds the limits of the initial temp file limits, the implementation dynamically allocates additional temp files to - * the SpillFile. The directory starts with a global depth of 1 and therefore a directory size of 2 buckets. Only during - * bucket split and directory doubling more than one page is temporarily kept in memory until all elements have been - * redistributed. The current implementation conducts bucket splits as long as an element does not fit onto a page. No - * overflow chain is created, which might be an alternative. For get requests, each directory entry maintains a - * bloomFilter to prevent page-in operations in case an element has never been spilled before. The deserialization is - * only triggered when a key a loaded back into the LRU cache. The aggregators are returned from the LRU cache and the - * next value is computed. In case the key is not found on any page, the Loader create new aggregators for it. + * The main entry point is in GroupedAggregateRegionObserver. It instantiates a + * SpillableGroupByCache and invokes a get() method on it. There is no: + * {@code "if key not exists -> put into map" } case, since the cache is a Loading cache and + * therefore handles the put under the covers. I tried to implement the final cache element accesses + * (RegionScanner below) streaming, i.e. there is just an iterator on it and removed the existing + * result materialization. SpillableGroupByCache implements a LRU cache using a LinkedHashMap with + * access order. There is a configurable an upper and lower size limit in bytes which are used as + * follows to compute the initial cache size in number of elements: Max(lowerBoundElements, + * Min(upperBoundElements, estimatedCacheSize)). Once the number of cached elements exceeds this + * number, the cache size is increased by a factor of 1.5. This happens until the additional memory + * to grow the cache cannot be requested. At this point the Cache starts spilling elements. As long + * as no eviction happens no spillable data structures are allocated, this only happens as soon as + * the first element is evicted from the cache. We cannot really make any assumptions on which keys + * arrive at the map, but assume the LRU would at least cover the cases where some keys have a + * slight skew and they should stay memory resident. Once a key gets evicted, the spillManager is + * instantiated. It basically takes care of spilling an element to disk and does all the SERDE work. + * It pre-allocates a configurable number of SpillFiles (spill partition) which are memory mapped + * temp files. The SpillManager keeps a list of these and hash distributes the keys within this + * list. Once an element gets spilled, it is serialized and will only get deserialized again, when + * it is requested from the client, i.e. loaded back into the LRU cache. The SpillManager holds a + * single SpillMap object in memory for every spill partition (SpillFile). The SpillMap is an in + * memory Map representation of a single page of spilled serialized key/value pairs. To achieve fast + * key lookup the key is hash partitioned into random pages of the current spill file. The code + * implements an extendible hashing approach which dynamically adjusts the hash function, in order + * to adapt to growing number of storage pages and avoiding long chains of overflow buckets. For an + * excellent discussion of the algorithm please refer to the following online resource: + * http://db.inf.uni-tuebingen.de/files/teaching/ws1011/db2/db2-hash-indexes.pdf . For this, each + * SpillFile keeps a directory of pointers to Integer.MAX_VALUE 4K pages in memory, which allows + * each directory to address more pages than a single memory mapped temp file could theoretically + * store. In case directory doubling, requests a page index that exceeds the limits of the initial + * temp file limits, the implementation dynamically allocates additional temp files to the + * SpillFile. The directory starts with a global depth of 1 and therefore a directory size of 2 + * buckets. Only during bucket split and directory doubling more than one page is temporarily kept + * in memory until all elements have been redistributed. The current implementation conducts bucket + * splits as long as an element does not fit onto a page. No overflow chain is created, which might + * be an alternative. For get requests, each directory entry maintains a bloomFilter to prevent + * page-in operations in case an element has never been spilled before. The deserialization is only + * triggered when a key a loaded back into the LRU cache. The aggregators are returned from the LRU + * cache and the next value is computed. In case the key is not found on any page, the Loader create + * new aggregators for it. */ public class SpillableGroupByCache implements GroupByCache { - private static final Logger LOGGER = LoggerFactory.getLogger(SpillableGroupByCache.class); - - // Min size of 1st level main memory cache in bytes --> lower bound - private static final int SPGBY_CACHE_MIN_SIZE = 4096; // 4K - - // TODO Generally better to use Collection API with generics instead of - // array types - private final LinkedHashMap cache; - private final ConcurrentMap - aggregateValueToLastScannedRowKeys; - private final boolean isIncompatibleClient; - private SpillManager spillManager = null; - private long totalNumElements; - private final ServerAggregators aggregators; - private final RegionCoprocessorEnvironment env; - private final MemoryChunk chunk; - - /* - * inner class that makes cache queryable for other classes that should not get the full instance. Queryable view of - * the cache - */ - public class QueryCache { - public boolean isKeyContained(ImmutableBytesPtr key) { - return cache.containsKey(key); - } + private static final Logger LOGGER = LoggerFactory.getLogger(SpillableGroupByCache.class); + + // Min size of 1st level main memory cache in bytes --> lower bound + private static final int SPGBY_CACHE_MIN_SIZE = 4096; // 4K + + // TODO Generally better to use Collection API with generics instead of + // array types + private final LinkedHashMap cache; + private final ConcurrentMap aggregateValueToLastScannedRowKeys; + private final boolean isIncompatibleClient; + private SpillManager spillManager = null; + private long totalNumElements; + private final ServerAggregators aggregators; + private final RegionCoprocessorEnvironment env; + private final MemoryChunk chunk; + + /* + * inner class that makes cache queryable for other classes that should not get the full instance. + * Queryable view of the cache + */ + public class QueryCache { + public boolean isKeyContained(ImmutableBytesPtr key) { + return cache.containsKey(key); } - - /** - * Instantiates a Loading LRU Cache that stores key / aggregator[] tuples used for group by queries - * - * @param env - * @param tenantId - * @param aggs - * @param estSizeNum - * @param isIncompatibleClient - */ - public SpillableGroupByCache(final RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, - ServerAggregators aggs, final int estSizeNum, - boolean isIncompatibleClient) { - this.isIncompatibleClient = isIncompatibleClient; - totalNumElements = 0; - this.aggregators = aggs; - this.env = env; - - final int estValueSize = aggregators.getEstimatedByteSize(); - final TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId); - - // Compute Map initial map - final Configuration conf = env.getConfiguration(); - final long maxCacheSizeConf = conf.getLongBytes(GROUPBY_MAX_CACHE_SIZE_ATTRIB, - DEFAULT_GROUPBY_MAX_CACHE_MAX); - final int numSpillFilesConf = conf.getInt(GROUPBY_SPILL_FILES_ATTRIB, DEFAULT_GROUPBY_SPILL_FILES); - - final int maxSizeNum = (int)(maxCacheSizeConf / estValueSize); - final int minSizeNum = (SPGBY_CACHE_MIN_SIZE / estValueSize); - - // use upper and lower bounds for the cache size - final int maxCacheSize = Math.max(minSizeNum, Math.min(maxSizeNum, estSizeNum)); - final long estSize = GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(maxCacheSize, estValueSize); - try { - this.chunk = tenantCache.getMemoryManager().allocate(estSize); - } catch (InsufficientMemoryException ime) { - LOGGER.error("Requested Map size exceeds memory limit, " + - "please decrease max size via config paramter: " - + GROUPBY_MAX_CACHE_SIZE_ATTRIB); - throw ime; - } - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize); - } - - aggregateValueToLastScannedRowKeys = Maps.newConcurrentMap(); - // LRU cache implemented as LinkedHashMap with access order - cache = new LinkedHashMap(maxCacheSize, 0.75f, true) { - boolean spill = false; - int cacheSize = maxCacheSize; - - @Override - protected boolean removeEldestEntry(Map.Entry eldest) { - if (!spill && size() > cacheSize) { // increase allocation - cacheSize *= 1.5f; - long estSize = GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(cacheSize, estValueSize); - try { - chunk.resize(estSize); - } catch (InsufficientMemoryException im) { - // Cannot extend Map anymore, start spilling - spill = true; - } - } - - if (spill) { - try { - if (spillManager == null) { - // Lazy instantiation of spillable data - // structures - // - // Only create spill data structs if LRU - // cache is too small - spillManager = new SpillManager(numSpillFilesConf, aggregators, env.getConfiguration(), - new QueryCache()); - } - spillManager.spill(eldest.getKey(), eldest.getValue()); - } catch (IOException ioe) { - // Ensure that we always close and delete the temp files - try { - throw new RuntimeException(ioe); - } finally { - Closeables.closeQuietly(SpillableGroupByCache.this); - } - } - return true; - } - - return false; - } - }; + } + + /** + * Instantiates a Loading LRU Cache that stores key / aggregator[] tuples used for group by + * queries + */ + public SpillableGroupByCache(final RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, + ServerAggregators aggs, final int estSizeNum, boolean isIncompatibleClient) { + this.isIncompatibleClient = isIncompatibleClient; + totalNumElements = 0; + this.aggregators = aggs; + this.env = env; + + final int estValueSize = aggregators.getEstimatedByteSize(); + final TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId); + + // Compute Map initial map + final Configuration conf = env.getConfiguration(); + final long maxCacheSizeConf = + conf.getLongBytes(GROUPBY_MAX_CACHE_SIZE_ATTRIB, DEFAULT_GROUPBY_MAX_CACHE_MAX); + final int numSpillFilesConf = + conf.getInt(GROUPBY_SPILL_FILES_ATTRIB, DEFAULT_GROUPBY_SPILL_FILES); + + final int maxSizeNum = (int) (maxCacheSizeConf / estValueSize); + final int minSizeNum = (SPGBY_CACHE_MIN_SIZE / estValueSize); + + // use upper and lower bounds for the cache size + final int maxCacheSize = Math.max(minSizeNum, Math.min(maxSizeNum, estSizeNum)); + final long estSize = + GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(maxCacheSize, estValueSize); + try { + this.chunk = tenantCache.getMemoryManager().allocate(estSize); + } catch (InsufficientMemoryException ime) { + LOGGER.error("Requested Map size exceeds memory limit, " + + "please decrease max size via config paramter: " + GROUPBY_MAX_CACHE_SIZE_ATTRIB); + throw ime; } - /** - * Size function returns the current number of cached elements - */ - @Override - public long size() { - return totalNumElements; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize); } - /** - * Extract an element from the Cache If element is not present in in-memory cache / or in spill files cache - * implements an implicit put() of a new key/value tuple and loads it into the cache - */ - @Override - public Aggregator[] cache(ImmutableBytesPtr cacheKey) { - ImmutableBytesPtr key = new ImmutableBytesPtr(cacheKey); - Aggregator[] rowAggregators = cache.get(key); - if (rowAggregators == null) { - // If Aggregators not found for this distinct - // value, clone our original one (we need one - // per distinct value) - if (spillManager != null) { - // Spill manager present, check if key has been - // spilled before - try { - rowAggregators = spillManager.loadEntry(key); - } catch (IOException ioe) { - // Ensure that we always close and delete the temp files - try { - throw new RuntimeException(ioe); - } finally { - Closeables.closeQuietly(SpillableGroupByCache.this); - } - } - } - if (rowAggregators == null) { - // No, key never spilled before, create a new tuple - rowAggregators = aggregators.newAggregators(env.getConfiguration()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Adding new aggregate bucket for row key " - + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength())); - } - } - if (cache.put(key, rowAggregators) == null) { - totalNumElements++; - } + aggregateValueToLastScannedRowKeys = Maps.newConcurrentMap(); + // LRU cache implemented as LinkedHashMap with access order + cache = new LinkedHashMap(maxCacheSize, 0.75f, true) { + boolean spill = false; + int cacheSize = maxCacheSize; + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + if (!spill && size() > cacheSize) { // increase allocation + cacheSize *= 1.5f; + long estSize = + GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(cacheSize, estValueSize); + try { + chunk.resize(estSize); + } catch (InsufficientMemoryException im) { + // Cannot extend Map anymore, start spilling + spill = true; + } } - return rowAggregators; - } - /** - * Iterator over the cache and the spilled data structures by returning CacheEntries. CacheEntries are either - * extracted from the LRU cache or from the spillable data structures.The key/value tuples are returned in - * non-deterministic order. - */ - private final class EntryIterator implements Iterator> { - final Iterator> cacheIter; - final Iterator spilledCacheIter; - - private EntryIterator() { - cacheIter = cache.entrySet().iterator(); - if (spillManager != null) { - spilledCacheIter = spillManager.newDataIterator(); - } else { - spilledCacheIter = null; + if (spill) { + try { + if (spillManager == null) { + // Lazy instantiation of spillable data + // structures + // + // Only create spill data structs if LRU + // cache is too small + spillManager = new SpillManager(numSpillFilesConf, aggregators, + env.getConfiguration(), new QueryCache()); } - } - - @Override - public boolean hasNext() { - return cacheIter.hasNext(); - } - - @Override - public Map.Entry next() { - if (spilledCacheIter != null && spilledCacheIter.hasNext()) { - try { - byte[] value = spilledCacheIter.next(); - // Deserialize into a CacheEntry - Map.Entry spilledEntry = spillManager.toCacheEntry(value); - - boolean notFound = false; - // check against map and return only if not present - while (cache.containsKey(spilledEntry.getKey())) { - // LRU Cache entries always take precedence, - // since they are more up to date - if (spilledCacheIter.hasNext()) { - value = spilledCacheIter.next(); - spilledEntry = spillManager.toCacheEntry(value); - } else { - notFound = true; - break; - } - } - if (!notFound) { - // Return a spilled entry, this only happens if the - // entry was not - // found in the LRU cache - return spilledEntry; - } - } catch (IOException ioe) { - // TODO rework error handling - throw new RuntimeException(ioe); - } + spillManager.spill(eldest.getKey(), eldest.getValue()); + } catch (IOException ioe) { + // Ensure that we always close and delete the temp files + try { + throw new RuntimeException(ioe); + } finally { + Closeables.closeQuietly(SpillableGroupByCache.this); } - // Spilled elements exhausted - // Finally return all elements from LRU cache - Map.Entry entry = cacheIter.next(); - return new CacheEntry(entry.getKey(), entry.getValue()); + } + return true; } - /** - * Remove??? Denied!!! - */ - @Override - public void remove() { - throw new IllegalAccessError("Remove is not supported for this type of iterator"); + return false; + } + }; + } + + /** + * Size function returns the current number of cached elements + */ + @Override + public long size() { + return totalNumElements; + } + + /** + * Extract an element from the Cache If element is not present in in-memory cache / or in spill + * files cache implements an implicit put() of a new key/value tuple and loads it into the cache + */ + @Override + public Aggregator[] cache(ImmutableBytesPtr cacheKey) { + ImmutableBytesPtr key = new ImmutableBytesPtr(cacheKey); + Aggregator[] rowAggregators = cache.get(key); + if (rowAggregators == null) { + // If Aggregators not found for this distinct + // value, clone our original one (we need one + // per distinct value) + if (spillManager != null) { + // Spill manager present, check if key has been + // spilled before + try { + rowAggregators = spillManager.loadEntry(key); + } catch (IOException ioe) { + // Ensure that we always close and delete the temp files + try { + throw new RuntimeException(ioe); + } finally { + Closeables.closeQuietly(SpillableGroupByCache.this); + } + } + } + if (rowAggregators == null) { + // No, key never spilled before, create a new tuple + rowAggregators = aggregators.newAggregators(env.getConfiguration()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Adding new aggregate bucket for row key " + + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength())); } + } + if (cache.put(key, rowAggregators) == null) { + totalNumElements++; + } + } + return rowAggregators; + } + + /** + * Iterator over the cache and the spilled data structures by returning CacheEntries. CacheEntries + * are either extracted from the LRU cache or from the spillable data structures.The key/value + * tuples are returned in non-deterministic order. + */ + private final class EntryIterator + implements Iterator> { + final Iterator> cacheIter; + final Iterator spilledCacheIter; + + private EntryIterator() { + cacheIter = cache.entrySet().iterator(); + if (spillManager != null) { + spilledCacheIter = spillManager.newDataIterator(); + } else { + spilledCacheIter = null; + } } - /** - * Closes cache and releases spill resources - * - * @throws IOException - */ @Override - public void close() throws IOException { - // Close spillable resources - Closeables.closeQuietly(spillManager); - Closeables.closeQuietly(chunk); + public boolean hasNext() { + return cacheIter.hasNext(); } @Override - public RegionScanner getScanner(final RegionScanner s) { - final Iterator> cacheIter = new EntryIterator(); - - // scanner using the spillable implementation - return new BaseRegionScanner(s) { - @Override - public void close() throws IOException { - try { - s.close(); - } finally { - // Always close gbCache and swallow possible Exceptions - Closeables.closeQuietly(SpillableGroupByCache.this); - } - } - - public boolean next(List result, ScannerContext scannerContext) - throws IOException { - return next(result); - } - - @Override - public boolean next(List results) throws IOException { - if (!cacheIter.hasNext()) { - return false; - } - Map.Entry ce = cacheIter.next(); - ImmutableBytesWritable aggregateGroupValPtr = ce.getKey(); - Aggregator[] aggs = ce.getValue(); - byte[] aggregateArrayBytes = aggregators.toBytes(aggs); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Adding new distinct group: " - + Bytes.toStringBinary(aggregateGroupValPtr.get(), - aggregateGroupValPtr.getOffset(), aggregateGroupValPtr.getLength()) - + " with aggregators " + Arrays.toString(aggs) + " value = " - + Bytes.toStringBinary(aggregateArrayBytes)); - } - if (!isIncompatibleClient) { - ImmutableBytesWritable lastScannedRowKey = - aggregateValueToLastScannedRowKeys.get(aggregateGroupValPtr); - byte[] aggregateGroupValueBytes = new byte[aggregateGroupValPtr.getLength()]; - System.arraycopy(aggregateGroupValPtr.get(), aggregateGroupValPtr.getOffset(), - aggregateGroupValueBytes, 0, - aggregateGroupValueBytes.length); - byte[] finalValue = - ByteUtil.concat( - PInteger.INSTANCE.toBytes(aggregateGroupValueBytes.length), - aggregateGroupValueBytes, aggregateArrayBytes); - results.add( - PhoenixKeyValueUtil.newKeyValue( - lastScannedRowKey.get(), - lastScannedRowKey.getOffset(), - lastScannedRowKey.getLength(), - GROUPED_AGGREGATOR_VALUE_BYTES, - GROUPED_AGGREGATOR_VALUE_BYTES, - AGG_TIMESTAMP, - finalValue, - 0, - finalValue.length)); - } else { - results.add(PhoenixKeyValueUtil.newKeyValue( - aggregateGroupValPtr.get(), - aggregateGroupValPtr.getOffset(), - aggregateGroupValPtr.getLength(), - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, - AGG_TIMESTAMP, - aggregateArrayBytes, 0, - aggregateArrayBytes.length)); - } - return cacheIter.hasNext(); + public Map.Entry next() { + if (spilledCacheIter != null && spilledCacheIter.hasNext()) { + try { + byte[] value = spilledCacheIter.next(); + // Deserialize into a CacheEntry + Map.Entry spilledEntry = + spillManager.toCacheEntry(value); + + boolean notFound = false; + // check against map and return only if not present + while (cache.containsKey(spilledEntry.getKey())) { + // LRU Cache entries always take precedence, + // since they are more up to date + if (spilledCacheIter.hasNext()) { + value = spilledCacheIter.next(); + spilledEntry = spillManager.toCacheEntry(value); + } else { + notFound = true; + break; } - }; + } + if (!notFound) { + // Return a spilled entry, this only happens if the + // entry was not + // found in the LRU cache + return spilledEntry; + } + } catch (IOException ioe) { + // TODO rework error handling + throw new RuntimeException(ioe); + } + } + // Spilled elements exhausted + // Finally return all elements from LRU cache + Map.Entry entry = cacheIter.next(); + return new CacheEntry(entry.getKey(), entry.getValue()); } + /** + * Remove??? Denied!!! + */ @Override - public void cacheAggregateRowKey(ImmutableBytesPtr value, ImmutableBytesPtr rowKey) { - aggregateValueToLastScannedRowKeys.put(value, rowKey); + public void remove() { + throw new IllegalAccessError("Remove is not supported for this type of iterator"); } -} \ No newline at end of file + } + + /** + * Closes cache and releases spill resources + */ + @Override + public void close() throws IOException { + // Close spillable resources + Closeables.closeQuietly(spillManager); + Closeables.closeQuietly(chunk); + } + + @Override + public RegionScanner getScanner(final RegionScanner s) { + final Iterator> cacheIter = new EntryIterator(); + + // scanner using the spillable implementation + return new BaseRegionScanner(s) { + @Override + public void close() throws IOException { + try { + s.close(); + } finally { + // Always close gbCache and swallow possible Exceptions + Closeables.closeQuietly(SpillableGroupByCache.this); + } + } + + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); + } + + @Override + public boolean next(List results) throws IOException { + if (!cacheIter.hasNext()) { + return false; + } + Map.Entry ce = cacheIter.next(); + ImmutableBytesWritable aggregateGroupValPtr = ce.getKey(); + Aggregator[] aggs = ce.getValue(); + byte[] aggregateArrayBytes = aggregators.toBytes(aggs); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Adding new distinct group: " + + Bytes.toStringBinary(aggregateGroupValPtr.get(), aggregateGroupValPtr.getOffset(), + aggregateGroupValPtr.getLength()) + + " with aggregators " + Arrays.toString(aggs) + " value = " + + Bytes.toStringBinary(aggregateArrayBytes)); + } + if (!isIncompatibleClient) { + ImmutableBytesWritable lastScannedRowKey = + aggregateValueToLastScannedRowKeys.get(aggregateGroupValPtr); + byte[] aggregateGroupValueBytes = new byte[aggregateGroupValPtr.getLength()]; + System.arraycopy(aggregateGroupValPtr.get(), aggregateGroupValPtr.getOffset(), + aggregateGroupValueBytes, 0, aggregateGroupValueBytes.length); + byte[] finalValue = + ByteUtil.concat(PInteger.INSTANCE.toBytes(aggregateGroupValueBytes.length), + aggregateGroupValueBytes, aggregateArrayBytes); + results.add( + PhoenixKeyValueUtil.newKeyValue(lastScannedRowKey.get(), lastScannedRowKey.getOffset(), + lastScannedRowKey.getLength(), GROUPED_AGGREGATOR_VALUE_BYTES, + GROUPED_AGGREGATOR_VALUE_BYTES, AGG_TIMESTAMP, finalValue, 0, finalValue.length)); + } else { + results.add(PhoenixKeyValueUtil.newKeyValue(aggregateGroupValPtr.get(), + aggregateGroupValPtr.getOffset(), aggregateGroupValPtr.getLength(), + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, aggregateArrayBytes, 0, + aggregateArrayBytes.length)); + } + return cacheIter.hasNext(); + } + }; + } + + @Override + public void cacheAggregateRowKey(ImmutableBytesPtr value, ImmutableBytesPtr rowKey) { + aggregateValueToLastScannedRowKeys.put(value, rowKey); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/AddColumnMutator.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/AddColumnMutator.java index 4bb268081ce..7c948c7752d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/AddColumnMutator.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/AddColumnMutator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,24 @@ */ package org.apache.phoenix.coprocessor; -import org.apache.phoenix.query.QueryConstants; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME_INDEX; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FAMILY_NAME_INDEX; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME_INDEX; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID_INDEX; +import static org.apache.phoenix.util.SchemaUtil.getVarChars; + +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.client.Mutation; @@ -30,6 +46,7 @@ import org.apache.phoenix.coprocessorclient.MetaDataProtocol.MutationCode; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; +import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.ColumnFamilyNotFoundException; import org.apache.phoenix.schema.ColumnNotFoundException; import org.apache.phoenix.schema.PColumn; @@ -41,6 +58,7 @@ import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PSmallint; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; @@ -51,407 +69,369 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME_INDEX; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FAMILY_NAME_INDEX; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME_INDEX; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID_INDEX; -import static org.apache.phoenix.util.SchemaUtil.getVarChars; - public class AddColumnMutator implements ColumnMutator { - private static final Logger logger = LoggerFactory.getLogger(AddColumnMutator.class); + private static final Logger logger = LoggerFactory.getLogger(AddColumnMutator.class); - private int getInteger(Put p, byte[] family, byte[] qualifier) { - List cells = p.get(family, qualifier); - if (cells != null && cells.size() > 0) { - Cell cell = cells.get(0); - return (Integer)PInteger.INSTANCE.toObject(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength()); - } - return 0; + private int getInteger(Put p, byte[] family, byte[] qualifier) { + List cells = p.get(family, qualifier); + if (cells != null && cells.size() > 0) { + Cell cell = cells.get(0); + return (Integer) PInteger.INSTANCE.toObject(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength()); } + return 0; + } + + @Override + public MutateColumnType getMutateColumnType() { + return MutateColumnType.ADD_COLUMN; + } - @Override - public MutateColumnType getMutateColumnType() { - return MutateColumnType.ADD_COLUMN; + /** + * Validates that we can add the column to the base table by ensuring that if the same column + * already exists in any child view all of the column properties match + */ + @Override + public MetaDataMutationResult validateWithChildViews(PTable table, List childViews, + List tableMetadata, byte[] schemaName, byte[] tableName) throws SQLException { + // Disallow if trying to switch tenancy of a table that has views + if ( + !childViews.isEmpty() + && switchAttribute(table.isMultiTenant(), tableMetadata, MULTI_TENANT_BYTES) + ) { + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), null); } - /** - * Validates that we can add the column to the base table by ensuring that if the same column - * already exists in any child view all of the column properties match - */ - @Override - public MetaDataMutationResult validateWithChildViews(PTable table, List childViews, - List tableMetadata, - byte[] schemaName, byte[] tableName) - throws SQLException { - // Disallow if trying to switch tenancy of a table that has views - if (!childViews.isEmpty() && switchAttribute(table.isMultiTenant(), - tableMetadata, MULTI_TENANT_BYTES)) { - return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION - , EnvironmentEdgeManager.currentTimeMillis(), null); + List columnPutsForBaseTable = Lists.newArrayListWithExpectedSize(tableMetadata.size()); + boolean salted = table.getBucketNum() != null; + // Isolate the puts relevant to adding columns + for (Mutation m : tableMetadata) { + if (m instanceof Put) { + byte[][] rkmd = new byte[5][]; + int pkCount = getVarChars(m.getRow(), rkmd); + // check if this put is for adding a column + if ( + pkCount > COLUMN_NAME_INDEX && rkmd[COLUMN_NAME_INDEX] != null + && rkmd[COLUMN_NAME_INDEX].length > 0 + && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 + && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0 + ) { + columnPutsForBaseTable.add((Put) m); } + } + } + for (PTable view : childViews) { + /* + * Disallow adding columns to a base table with APPEND_ONLY_SCHEMA since this creates a gap in + * the column positions for every view (PHOENIX-4737). + */ + if (!columnPutsForBaseTable.isEmpty() && view.isAppendOnlySchema()) { + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), null); + } - List columnPutsForBaseTable = - Lists.newArrayListWithExpectedSize(tableMetadata.size()); - boolean salted = table.getBucketNum()!=null; - // Isolate the puts relevant to adding columns - for (Mutation m : tableMetadata) { - if (m instanceof Put) { - byte[][] rkmd = new byte[5][]; - int pkCount = getVarChars(m.getRow(), rkmd); - // check if this put is for adding a column - if (pkCount > COLUMN_NAME_INDEX && rkmd[COLUMN_NAME_INDEX] != null - && rkmd[COLUMN_NAME_INDEX].length > 0 - && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 - && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) { - columnPutsForBaseTable.add((Put)m); - } - } + // add the new columns to the child view + List viewPkCols = new ArrayList<>(view.getPKColumns()); + // remove salted column + if (salted) { + viewPkCols.remove(0); + } + // remove pk columns that are present in the parent + viewPkCols.removeAll(table.getPKColumns()); + boolean addedPkColumn = false; + for (Put columnToBeAdded : columnPutsForBaseTable) { + PColumn existingViewColumn = null; + byte[][] rkmd = new byte[5][]; + getVarChars(columnToBeAdded.getRow(), rkmd); + String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]); + String columnFamily = + rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]); + try { + existingViewColumn = columnFamily == null + ? view.getColumnForColumnName(columnName) + : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName); + } catch (ColumnFamilyNotFoundException e) { + // ignore since it means that the column family is not present for the column to + // be added. + } catch (ColumnNotFoundException e) { + // ignore since it means the column is not present in the view } - for (PTable view : childViews) { + + boolean isCurrColumnToBeAddPkCol = columnFamily == null; + addedPkColumn |= isCurrColumnToBeAddPkCol; + if (existingViewColumn != null) { + if ( + EncodedColumnsUtil.usesEncodedColumnNames(table) + && !SchemaUtil.isPKColumn(existingViewColumn) + ) { /* - * Disallow adding columns to a base table with APPEND_ONLY_SCHEMA since this - * creates a gap in the column positions for every view (PHOENIX-4737). + * If the column already exists in a view, then we cannot add the column to the base + * table. The reason is subtle and is as follows: consider the case where a table has + * two views where both the views have the same key value column KV. Now, we dole out + * encoded column qualifiers for key value columns in views by using the counters stored + * in the base physical table. So the KV column can have different column qualifiers for + * the two views. For example, 11 for VIEW1 and 12 for VIEW2. This naturally extends to + * rows being inserted using the two views having different column qualifiers for the + * column named KV. Now, when an attempt is made to add column KV to the base table, we + * cannot decide which column qualifier should that column be assigned. It cannot be a + * number different than 11 or 12 since a query like SELECT KV FROM BASETABLE would + * return null for KV which is incorrect since column KV is present in rows inserted + * from the two views. We cannot use 11 or 12 either because we will then incorrectly + * return value of KV column inserted using only one view. */ - if (!columnPutsForBaseTable.isEmpty() && view.isAppendOnlySchema()) { - return new MetaDataMutationResult( - MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - - // add the new columns to the child view - List viewPkCols = new ArrayList<>(view.getPKColumns()); - // remove salted column - if (salted) { - viewPkCols.remove(0); - } - // remove pk columns that are present in the parent - viewPkCols.removeAll(table.getPKColumns()); - boolean addedPkColumn = false; - for (Put columnToBeAdded : columnPutsForBaseTable) { - PColumn existingViewColumn = null; - byte[][] rkmd = new byte[5][]; - getVarChars(columnToBeAdded.getRow(), rkmd); - String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]); - String columnFamily = - rkmd[FAMILY_NAME_INDEX] == null ? null - : Bytes.toString(rkmd[FAMILY_NAME_INDEX]); - try { - existingViewColumn = - columnFamily == null ? view.getColumnForColumnName(columnName) - : view.getColumnFamily(columnFamily) - .getPColumnForColumnName(columnName); - } catch (ColumnFamilyNotFoundException e) { - // ignore since it means that the column family is not present for the column to - // be added. - } catch (ColumnNotFoundException e) { - // ignore since it means the column is not present in the view - } + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table); + } + // Validate data type is same + int baseColumnDataType = getInteger(columnToBeAdded, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES); + if (baseColumnDataType != existingViewColumn.getDataType().getSqlType()) { + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table); + } - boolean isCurrColumnToBeAddPkCol = columnFamily == null; - addedPkColumn |= isCurrColumnToBeAddPkCol; - if (existingViewColumn != null) { - if (EncodedColumnsUtil.usesEncodedColumnNames(table) - && !SchemaUtil.isPKColumn(existingViewColumn)) { - /* - * If the column already exists in a view, then we cannot add the column to - * the base table. The reason is subtle and is as follows: consider the case - * where a table has two views where both the views have the same key value - * column KV. Now, we dole out encoded column qualifiers for key value - * columns in views by using the counters stored in the base physical table. - * So the KV column can have different column qualifiers for the two views. - * For example, 11 for VIEW1 and 12 for VIEW2. This naturally extends to - * rows being inserted using the two views having different column - * qualifiers for the column named KV. Now, when an attempt is made to add - * column KV to the base table, we cannot decide which column qualifier - * should that column be assigned. It cannot be a number different than 11 - * or 12 since a query like SELECT KV FROM BASETABLE would return null for - * KV which is incorrect since column KV is present in rows inserted from - * the two views. We cannot use 11 or 12 either because we will then - * incorrectly return value of KV column inserted using only one view. - */ - return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table); - } - // Validate data type is same - int baseColumnDataType = - getInteger(columnToBeAdded, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES); - if (baseColumnDataType != existingViewColumn.getDataType().getSqlType()) { - return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table); - } + // Validate max length is same + int maxLength = getInteger(columnToBeAdded, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES); + int existingMaxLength = + existingViewColumn.getMaxLength() == null ? 0 : existingViewColumn.getMaxLength(); + if (maxLength != existingMaxLength) { + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table); + } - // Validate max length is same - int maxLength = - getInteger(columnToBeAdded, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES); - int existingMaxLength = - existingViewColumn.getMaxLength() == null ? 0 - : existingViewColumn.getMaxLength(); - if (maxLength != existingMaxLength) { - return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table); - } + // Validate scale is same + int scale = getInteger(columnToBeAdded, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES); + int existingScale = + existingViewColumn.getScale() == null ? 0 : existingViewColumn.getScale(); + if (scale != existingScale) { + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table); + } - // Validate scale is same - int scale = - getInteger(columnToBeAdded, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES); - int existingScale = - existingViewColumn.getScale() == null ? 0 - : existingViewColumn.getScale(); - if (scale != existingScale) { - return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table); - } + // Validate sort order is same + int sortOrder = getInteger(columnToBeAdded, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES); + if (sortOrder != existingViewColumn.getSortOrder().getSystemValue()) { + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table); + } - // Validate sort order is same - int sortOrder = - getInteger(columnToBeAdded, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES); - if (sortOrder != existingViewColumn.getSortOrder().getSystemValue()) { - return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table); - } - - // if the column to be added to the base table is a pk column, then we need to - // validate that the key slot position is the same - if (isCurrColumnToBeAddPkCol) { - List keySeqCells = - columnToBeAdded.get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.KEY_SEQ_BYTES); - if (keySeqCells != null && keySeqCells.size() > 0) { - Cell cell = keySeqCells.get(0); - int keySeq = - PSmallint.INSTANCE.getCodec().decodeInt(cell.getValueArray(), - cell.getValueOffset(), SortOrder.getDefault()); - // we need to take into account the columns inherited from the base table - // if the table is salted we don't include the salted column (which is - // present in getPKColumns()) - int pkPosition = SchemaUtil.getPKPosition(view, existingViewColumn) - + 1 - (salted ? 1 : 0); - if (pkPosition != keySeq) { - return new MetaDataMutationResult( - MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), - table); - } - } - } - } - if (existingViewColumn!=null && isCurrColumnToBeAddPkCol) { - viewPkCols.remove(existingViewColumn); - } - } - /* - * Allow adding a pk columns to base table : 1. if all the view pk columns are exactly - * the same as the base table pk columns 2. if we are adding all the existing view pk - * columns to the base table - */ - if (addedPkColumn && !viewPkCols.isEmpty()) { + // if the column to be added to the base table is a pk column, then we need to + // validate that the key slot position is the same + if (isCurrColumnToBeAddPkCol) { + List keySeqCells = columnToBeAdded.get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.KEY_SEQ_BYTES); + if (keySeqCells != null && keySeqCells.size() > 0) { + Cell cell = keySeqCells.get(0); + int keySeq = PSmallint.INSTANCE.getCodec().decodeInt(cell.getValueArray(), + cell.getValueOffset(), SortOrder.getDefault()); + // we need to take into account the columns inherited from the base table + // if the table is salted we don't include the salted column (which is + // present in getPKColumns()) + int pkPosition = + SchemaUtil.getPKPosition(view, existingViewColumn) + 1 - (salted ? 1 : 0); + if (pkPosition != keySeq) { return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table); + EnvironmentEdgeManager.currentTimeMillis(), table); + } } + } } - return null; + if (existingViewColumn != null && isCurrColumnToBeAddPkCol) { + viewPkCols.remove(existingViewColumn); + } + } + /* + * Allow adding a pk columns to base table : 1. if all the view pk columns are exactly the + * same as the base table pk columns 2. if we are adding all the existing view pk columns to + * the base table + */ + if (addedPkColumn && !viewPkCols.isEmpty()) { + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table); + } } + return null; + } - private boolean switchAttribute(boolean currAttribute, List tableMetaData, - byte[] attrQualifier) { - for (Mutation m : tableMetaData) { - if (m instanceof Put) { - Put p = (Put)m; - List cells = p.get(TABLE_FAMILY_BYTES, attrQualifier); - if (cells != null && cells.size() > 0) { - Cell cell = cells.get(0); - boolean newAttribute = (boolean)PBoolean.INSTANCE.toObject(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength()); - return currAttribute != newAttribute; - } - } + private boolean switchAttribute(boolean currAttribute, List tableMetaData, + byte[] attrQualifier) { + for (Mutation m : tableMetaData) { + if (m instanceof Put) { + Put p = (Put) m; + List cells = p.get(TABLE_FAMILY_BYTES, attrQualifier); + if (cells != null && cells.size() > 0) { + Cell cell = cells.get(0); + boolean newAttribute = (boolean) PBoolean.INSTANCE.toObject(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength()); + return currAttribute != newAttribute; } - return false; + } } + return false; + } - @Override - public MetaDataMutationResult validateAndAddMetadata(PTable table, byte[][] rowKeyMetaData, - List tableMetaData, - Region region, - List invalidateList, - List locks, - long clientTimeStamp, - long clientVersion, - ExtendedCellBuilder extendedCellBuilder, - final boolean isAddingColumns) { - byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX]; - byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX]; - byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX]; - PTableType type = table.getType(); - byte[] tableHeaderRowKey = SchemaUtil.getTableKey(tenantId, - schemaName, tableName); - List additionalTableMetadataMutations = - Lists.newArrayListWithExpectedSize(2); + @Override + public MetaDataMutationResult validateAndAddMetadata(PTable table, byte[][] rowKeyMetaData, + List tableMetaData, Region region, List invalidateList, + List locks, long clientTimeStamp, long clientVersion, + ExtendedCellBuilder extendedCellBuilder, final boolean isAddingColumns) { + byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX]; + byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX]; + byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX]; + PTableType type = table.getType(); + byte[] tableHeaderRowKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName); + List additionalTableMetadataMutations = Lists.newArrayListWithExpectedSize(2); - boolean addingCol = false; - for (Mutation m : tableMetaData) { - byte[] key = m.getRow(); - boolean addingPKColumn = false; - int pkCount = getVarChars(key, rowKeyMetaData); - // this means we have are adding a column - if (pkCount > COLUMN_NAME_INDEX - && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 - && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) { - try { - addingCol = true; - byte[] familyName = null; - byte[] colName = null; - if (pkCount > FAMILY_NAME_INDEX) { - familyName = rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]; - } - if (pkCount > COLUMN_NAME_INDEX) { - colName = rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]; - } - if (table.getExcludedColumns().contains( - PColumnImpl.createExcludedColumn(MetaDataEndpointImpl.newPName(familyName), - MetaDataEndpointImpl.newPName(colName), 0l))) { - // if this column was previously dropped in a view - // do not allow adding the column back - return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - if (familyName!=null && familyName.length > 0) { - MetaDataMutationResult result = - compareWithPkColumns(colName, table, familyName); - if (result != null) { - return result; - } - PColumnFamily family = - table.getColumnFamily(familyName); - family.getPColumnForColumnNameBytes(colName); - } else if (colName!=null && colName.length > 0) { - addingPKColumn = true; - table.getPKColumn(Bytes.toString(colName)); - } else { - continue; - } - return new MetaDataMutationResult(MutationCode.COLUMN_ALREADY_EXISTS, - EnvironmentEdgeManager.currentTimeMillis(), table); - } catch (ColumnFamilyNotFoundException e) { - continue; - } catch (ColumnNotFoundException e) { - if (addingPKColumn) { - // We may be adding a DESC column, so if table is already - // able to be rowKeyOptimized, it should continue to be so. - if (table.rowKeyOrderOptimizable()) { - UpgradeUtil.addRowKeyOrderOptimizableCell( - additionalTableMetadataMutations, tableHeaderRowKey, - clientTimeStamp); - } else if (table.getType() == PTableType.VIEW){ - // Don't allow view PK to diverge from table PK as our upgrade code - // does not handle this. - return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - // Add all indexes to invalidate list, as they will all be - // adding the same PK column. No need to lock them, as we - // have the parent table lock at this point. - for (PTable index : table.getIndexes()) { - invalidateList.add(new ImmutableBytesPtr(SchemaUtil - .getTableKey(tenantId, index.getSchemaName() - .getBytes(), index.getTableName() - .getBytes()))); - // We may be adding a DESC column, so if index is already - // able to be rowKeyOptimized, it should continue to be so. - if (index.rowKeyOrderOptimizable()) { - byte[] indexHeaderRowKey = - SchemaUtil.getTableKey(index.getTenantId() == null ? - ByteUtil.EMPTY_BYTE_ARRAY : - index.getTenantId().getBytes(), - index.getSchemaName().getBytes(), - index.getTableName().getBytes()); - UpgradeUtil.addRowKeyOrderOptimizableCell( - additionalTableMetadataMutations, indexHeaderRowKey, - clientTimeStamp); - } - } - } - continue; - } - } else if (pkCount == COLUMN_NAME_INDEX && - ! (Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 && - Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0 ) ) { - // Invalidate any table with mutations - // TODO: this likely means we don't need the above logic that - // loops through the indexes if adding a PK column, since we'd - // always have header rows for those. - invalidateList.add(new ImmutableBytesPtr(SchemaUtil - .getTableKey(tenantId, - rowKeyMetaData[SCHEMA_NAME_INDEX], - rowKeyMetaData[TABLE_NAME_INDEX]))); + boolean addingCol = false; + for (Mutation m : tableMetaData) { + byte[] key = m.getRow(); + boolean addingPKColumn = false; + int pkCount = getVarChars(key, rowKeyMetaData); + // this means we have are adding a column + if ( + pkCount > COLUMN_NAME_INDEX + && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 + && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0 + ) { + try { + addingCol = true; + byte[] familyName = null; + byte[] colName = null; + if (pkCount > FAMILY_NAME_INDEX) { + familyName = rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]; + } + if (pkCount > COLUMN_NAME_INDEX) { + colName = rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]; + } + if ( + table.getExcludedColumns() + .contains(PColumnImpl.createExcludedColumn(MetaDataEndpointImpl.newPName(familyName), + MetaDataEndpointImpl.newPName(colName), 0l)) + ) { + // if this column was previously dropped in a view + // do not allow adding the column back + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + if (familyName != null && familyName.length > 0) { + MetaDataMutationResult result = compareWithPkColumns(colName, table, familyName); + if (result != null) { + return result; } - } - - //We're changing the application-facing schema by adding a column or changing properties, so update the DDL - // timestamp - long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis(); - if (MetaDataUtil.isTableDirectlyQueried(table.getType())) { - additionalTableMetadataMutations.add(MetaDataUtil.getLastDDLTimestampUpdate(tableHeaderRowKey, - clientTimeStamp, serverTimestamp)); - } - //we don't need to update the DDL timestamp for child views, because when we look up - // a PTable, we'll take the max timestamp of a view and all its ancestors. This is true - // whether the view is diverged or not. - tableMetaData.addAll(additionalTableMetadataMutations); - if (type == PTableType.VIEW) { - if ( EncodedColumnsUtil.usesEncodedColumnNames(table) && addingCol && - !table.isAppendOnlySchema()) { - // When adding a column to a view that uses encoded column name - // scheme, we need to modify the CQ counters stored in the view's - // physical table. So to make sure clients get the latest PTable, we - // need to invalidate the cache entry. - // If the table uses APPEND_ONLY_SCHEMA we use the position of the - // column as the encoded column qualifier and so we don't need to - // update the CQ counter in the view physical table (see - // PHOENIX-4737) - invalidateList.add(new ImmutableBytesPtr( - MetaDataUtil.getPhysicalTableRowForView(table))); + PColumnFamily family = table.getColumnFamily(familyName); + family.getPColumnForColumnNameBytes(colName); + } else if (colName != null && colName.length > 0) { + addingPKColumn = true; + table.getPKColumn(Bytes.toString(colName)); + } else { + continue; + } + return new MetaDataMutationResult(MutationCode.COLUMN_ALREADY_EXISTS, + EnvironmentEdgeManager.currentTimeMillis(), table); + } catch (ColumnFamilyNotFoundException e) { + continue; + } catch (ColumnNotFoundException e) { + if (addingPKColumn) { + // We may be adding a DESC column, so if table is already + // able to be rowKeyOptimized, it should continue to be so. + if (table.rowKeyOrderOptimizable()) { + UpgradeUtil.addRowKeyOrderOptimizableCell(additionalTableMetadataMutations, + tableHeaderRowKey, clientTimeStamp); + } else if (table.getType() == PTableType.VIEW) { + // Don't allow view PK to diverge from table PK as our upgrade code + // does not handle this. + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), null); } - // Pass in null as the parent PTable, since we always want to tag the cells - // in this case, irrespective of the property values of the parent - ViewUtil.addTagsToPutsForViewAlteredProperties(tableMetaData, null, - extendedCellBuilder); + // Add all indexes to invalidate list, as they will all be + // adding the same PK column. No need to lock them, as we + // have the parent table lock at this point. + for (PTable index : table.getIndexes()) { + invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(tenantId, + index.getSchemaName().getBytes(), index.getTableName().getBytes()))); + // We may be adding a DESC column, so if index is already + // able to be rowKeyOptimized, it should continue to be so. + if (index.rowKeyOrderOptimizable()) { + byte[] indexHeaderRowKey = SchemaUtil.getTableKey( + index.getTenantId() == null + ? ByteUtil.EMPTY_BYTE_ARRAY + : index.getTenantId().getBytes(), + index.getSchemaName().getBytes(), index.getTableName().getBytes()); + UpgradeUtil.addRowKeyOrderOptimizableCell(additionalTableMetadataMutations, + indexHeaderRowKey, clientTimeStamp); + } + } + } + continue; } - return null; + } else if ( + pkCount == COLUMN_NAME_INDEX + && !(Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 + && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) + ) { + // Invalidate any table with mutations + // TODO: this likely means we don't need the above logic that + // loops through the indexes if adding a PK column, since we'd + // always have header rows for those. + invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(tenantId, + rowKeyMetaData[SCHEMA_NAME_INDEX], rowKeyMetaData[TABLE_NAME_INDEX]))); + } } - private MetaDataMutationResult compareWithPkColumns(byte[] colName, PTable table, byte[] familyName) { - // check if column is matching with any of pk columns if given - // column belongs to default CF - if (Bytes.compareTo(familyName, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES) == 0 - && colName != null && colName.length > 0) { - for (PColumn pColumn : table.getPKColumns()) { - if (Bytes.compareTo( - pColumn.getName().getBytes(), colName) == 0) { - return new MetaDataMutationResult(MutationCode.COLUMN_ALREADY_EXISTS, - EnvironmentEdgeManager.currentTimeMillis(), table); - } - } - } - return null; + // We're changing the application-facing schema by adding a column or changing properties, so + // update the DDL + // timestamp + long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis(); + if (MetaDataUtil.isTableDirectlyQueried(table.getType())) { + additionalTableMetadataMutations.add(MetaDataUtil.getLastDDLTimestampUpdate(tableHeaderRowKey, + clientTimeStamp, serverTimestamp)); + } + // we don't need to update the DDL timestamp for child views, because when we look up + // a PTable, we'll take the max timestamp of a view and all its ancestors. This is true + // whether the view is diverged or not. + tableMetaData.addAll(additionalTableMetadataMutations); + if (type == PTableType.VIEW) { + if ( + EncodedColumnsUtil.usesEncodedColumnNames(table) && addingCol && !table.isAppendOnlySchema() + ) { + // When adding a column to a view that uses encoded column name + // scheme, we need to modify the CQ counters stored in the view's + // physical table. So to make sure clients get the latest PTable, we + // need to invalidate the cache entry. + // If the table uses APPEND_ONLY_SCHEMA we use the position of the + // column as the encoded column qualifier and so we don't need to + // update the CQ counter in the view physical table (see + // PHOENIX-4737) + invalidateList.add(new ImmutableBytesPtr(MetaDataUtil.getPhysicalTableRowForView(table))); + } + // Pass in null as the parent PTable, since we always want to tag the cells + // in this case, irrespective of the property values of the parent + ViewUtil.addTagsToPutsForViewAlteredProperties(tableMetaData, null, extendedCellBuilder); } + return null; + } - @Override - public List> getTableAndDroppedColumnPairs() { - return Collections.emptyList(); + private MetaDataMutationResult compareWithPkColumns(byte[] colName, PTable table, + byte[] familyName) { + // check if column is matching with any of pk columns if given + // column belongs to default CF + if ( + Bytes.compareTo(familyName, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES) == 0 + && colName != null && colName.length > 0 + ) { + for (PColumn pColumn : table.getPKColumns()) { + if (Bytes.compareTo(pColumn.getName().getBytes(), colName) == 0) { + return new MetaDataMutationResult(MutationCode.COLUMN_ALREADY_EXISTS, + EnvironmentEdgeManager.currentTimeMillis(), table); + } + } } + return null; + } + + @Override + public List> getTableAndDroppedColumnPairs() { + return Collections.emptyList(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseMetaDataEndpointObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseMetaDataEndpointObserver.java index cac4c948644..eb169b3384c 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseMetaDataEndpointObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseMetaDataEndpointObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,94 +29,98 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; -public class BaseMetaDataEndpointObserver implements MetaDataEndpointObserver, PhoenixCoprocessor{ +public class BaseMetaDataEndpointObserver implements MetaDataEndpointObserver, PhoenixCoprocessor { - @Override - public void start(CoprocessorEnvironment env) throws IOException { + @Override + public void start(CoprocessorEnvironment env) throws IOException { - } + } - @Override - public void stop(CoprocessorEnvironment env) throws IOException { + @Override + public void stop(CoprocessorEnvironment env) throws IOException { - } + } - @Override - public void preGetTable( - org.apache.hadoop.hbase.coprocessor.ObserverContext ctx, - String tenantId, String tableName, TableName physicalTableName) throws IOException { + @Override + public void preGetTable( + org.apache.hadoop.hbase.coprocessor.ObserverContext ctx, + String tenantId, String tableName, TableName physicalTableName) throws IOException { - } + } - - @Override - public void preCreateTable(ObserverContext ctx, String tenantId, - String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType, - Set familySet, Set indexes) throws IOException { + @Override + public void preCreateTable(ObserverContext ctx, + String tenantId, String tableName, TableName physicalTableName, + TableName parentPhysicalTableName, PTableType tableType, Set familySet, + Set indexes) throws IOException { - } + } - @Override - public void preDropTable(ObserverContext ctx, String tenantId, - String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType, - List indexes) throws IOException { + @Override + public void preDropTable(ObserverContext ctx, + String tenantId, String tableName, TableName physicalTableName, + TableName parentPhysicalTableName, PTableType tableType, List indexes) + throws IOException { - } + } - @Override - public void preAlterTable(ObserverContext ctx, String tenantId, - String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType type) throws IOException { + @Override + public void preAlterTable(ObserverContext ctx, + String tenantId, String tableName, TableName physicalTableName, + TableName parentPhysicalTableName, PTableType type) throws IOException { - } + } - @Override - public void preGetSchema(ObserverContext ctx, String schemaName) - throws IOException { + @Override + public void preGetSchema(ObserverContext ctx, + String schemaName) throws IOException { - } + } - @Override - public void preCreateSchema(ObserverContext ctx, String schemaName) - throws IOException { + @Override + public void preCreateSchema(ObserverContext ctx, + String schemaName) throws IOException { - } + } - @Override - public void preDropSchema(ObserverContext ctx, String schemaName) throws IOException { + @Override + public void preDropSchema(ObserverContext ctx, + String schemaName) throws IOException { - } + } - @Override - public void preCreateFunction(ObserverContext ctx, String tenantId, - String functionName) throws IOException { + @Override + public void preCreateFunction(ObserverContext ctx, + String tenantId, String functionName) throws IOException { - } + } - @Override - public void preDropFunction(ObserverContext ctx, String tenantId, String functionName) - throws IOException {} + @Override + public void preDropFunction(ObserverContext ctx, + String tenantId, String functionName) throws IOException { + } - @Override - public void preGetFunctions(ObserverContext ctx, String tenantId, String functionName) - throws IOException { + @Override + public void preGetFunctions(ObserverContext ctx, + String tenantId, String functionName) throws IOException { - } + } - @Override - public void preIndexUpdate(ObserverContext ctx, String tenantId, - String indexName, TableName physicalTableName, TableName parentPhysicalTableName, PIndexState newState) - throws IOException { + @Override + public void preIndexUpdate(ObserverContext ctx, + String tenantId, String indexName, TableName physicalTableName, + TableName parentPhysicalTableName, PIndexState newState) throws IOException { - } + } - @Override - public void preCreateViewAddChildLink( - final ObserverContext ctx, - final String tableName) throws IOException {} + @Override + public void preCreateViewAddChildLink( + final ObserverContext ctx, final String tableName) + throws IOException { + } - @Override - public void preUpsertTaskDetails( - final ObserverContext ctx, - final String tableName) throws IOException { - } + @Override + public void preUpsertTaskDetails(final ObserverContext ctx, + final String tableName) throws IOException { + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java index 23a9f075c48..9d3ffdfa76f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,41 +22,38 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScannerContext; public abstract class BaseRegionScanner extends DelegateRegionScanner { - public BaseRegionScanner(RegionScanner delegate) { - super(delegate); - } - - @Override - public boolean isFilterDone() { - return false; - } - - @Override - public abstract boolean next(List results) throws IOException; - - @Override - public abstract boolean next(List result, ScannerContext scannerContext) - throws IOException; - - @Override - public boolean reseek(byte[] row) throws IOException { - throw new DoNotRetryIOException("Unsupported"); - } - - @Override - public boolean nextRaw(List result) throws IOException { - return next(result); - } - - @Override - public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { - return next(result, scannerContext); - } + public BaseRegionScanner(RegionScanner delegate) { + super(delegate); + } + + @Override + public boolean isFilterDone() { + return false; + } + + @Override + public abstract boolean next(List results) throws IOException; + + @Override + public abstract boolean next(List result, ScannerContext scannerContext) throws IOException; + + @Override + public boolean reseek(byte[] row) throws IOException { + throw new DoNotRetryIOException("Unsupported"); + } + + @Override + public boolean nextRaw(List result) throws IOException { + return next(result); + } + + @Override + public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { + return next(result, scannerContext); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java index aab5c178a3b..c7c8dcf0c4b 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.util.ScanUtil.getPageSizeMsForFilter; + import java.io.IOException; import java.sql.SQLException; import java.util.List; @@ -60,508 +62,501 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.StaleRegionBoundaryCacheException; import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.ScanUtil; -import org.apache.phoenix.schema.PTable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.phoenix.util.ScanUtil.getPageSizeMsForFilter; - abstract public class BaseScannerRegionObserver implements RegionObserver { - private static final Logger LOGGER = LoggerFactory.getLogger(BaseScannerRegionObserver.class); - - /** - * Used by logger to identify coprocessor - */ - @Override - public String toString() { - return this.getClass().getName(); + private static final Logger LOGGER = LoggerFactory.getLogger(BaseScannerRegionObserver.class); + + /** + * Used by logger to identify coprocessor + */ + @Override + public String toString() { + return this.getClass().getName(); + } + + private static void throwIfScanOutOfRegion(Scan scan, Region region) + throws DoNotRetryIOException { + boolean isLocalIndex = ScanUtil.isLocalIndex(scan); + byte[] lowerInclusiveScanKey = scan.getStartRow(); + byte[] upperExclusiveScanKey = scan.getStopRow(); + byte[] lowerInclusiveRegionKey = region.getRegionInfo().getStartKey(); + byte[] upperExclusiveRegionKey = region.getRegionInfo().getEndKey(); + boolean isStaleRegionBoundaries; + if (isLocalIndex) { + // For local indexes we have to abort any scan that was open during a split. + // We detect that condition as follows: + // 1. The scanner's stop row has to always match the region's end key. + // 2. Phoenix sets the SCAN_ACTUAL_START_ROW attribute to the scan's original start row + // We cannot directly compare that with the region's start key, but can enforce that + // the original start row still falls within the new region. + byte[] expectedUpperRegionKey = + scan.getAttribute(BaseScannerRegionObserverConstants.EXPECTED_UPPER_REGION_KEY) == null + ? scan.getStopRow() + : scan.getAttribute(BaseScannerRegionObserverConstants.EXPECTED_UPPER_REGION_KEY); + + byte[] actualStartRow = + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW); + isStaleRegionBoundaries = (expectedUpperRegionKey != null + && Bytes.compareTo(upperExclusiveRegionKey, expectedUpperRegionKey) != 0) + || (actualStartRow != null && Bytes.compareTo(actualStartRow, lowerInclusiveRegionKey) < 0); + } else { + if (scan.isReversed()) { + isStaleRegionBoundaries = + Bytes.compareTo(upperExclusiveScanKey, lowerInclusiveRegionKey) < 0 + || (Bytes.compareTo(lowerInclusiveScanKey, upperExclusiveRegionKey) > 0 + && upperExclusiveRegionKey.length != 0) + || (upperExclusiveRegionKey.length != 0 && lowerInclusiveScanKey.length == 0); + } else { + isStaleRegionBoundaries = + Bytes.compareTo(lowerInclusiveScanKey, lowerInclusiveRegionKey) < 0 + || (Bytes.compareTo(upperExclusiveScanKey, upperExclusiveRegionKey) > 0 + && upperExclusiveRegionKey.length != 0) + || (upperExclusiveRegionKey.length != 0 && upperExclusiveScanKey.length == 0); + } } - - - private static void throwIfScanOutOfRegion(Scan scan, Region region) throws DoNotRetryIOException { - boolean isLocalIndex = ScanUtil.isLocalIndex(scan); - byte[] lowerInclusiveScanKey = scan.getStartRow(); - byte[] upperExclusiveScanKey = scan.getStopRow(); - byte[] lowerInclusiveRegionKey = region.getRegionInfo().getStartKey(); - byte[] upperExclusiveRegionKey = region.getRegionInfo().getEndKey(); - boolean isStaleRegionBoundaries; - if (isLocalIndex) { - // For local indexes we have to abort any scan that was open during a split. - // We detect that condition as follows: - // 1. The scanner's stop row has to always match the region's end key. - // 2. Phoenix sets the SCAN_ACTUAL_START_ROW attribute to the scan's original start row - // We cannot directly compare that with the region's start key, but can enforce that - // the original start row still falls within the new region. - byte[] expectedUpperRegionKey = - scan.getAttribute(BaseScannerRegionObserverConstants.EXPECTED_UPPER_REGION_KEY) == null ? scan.getStopRow() : scan - .getAttribute(BaseScannerRegionObserverConstants.EXPECTED_UPPER_REGION_KEY); - - byte[] actualStartRow = scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW); - isStaleRegionBoundaries = (expectedUpperRegionKey != null && - Bytes.compareTo(upperExclusiveRegionKey, expectedUpperRegionKey) != 0) || - (actualStartRow != null && Bytes.compareTo(actualStartRow, lowerInclusiveRegionKey) < 0); - } else { - if (scan.isReversed()) { - isStaleRegionBoundaries = - Bytes.compareTo(upperExclusiveScanKey, lowerInclusiveRegionKey) < 0 || - (Bytes.compareTo(lowerInclusiveScanKey, upperExclusiveRegionKey) > - 0 && upperExclusiveRegionKey.length != 0) || - (upperExclusiveRegionKey.length != 0 && - lowerInclusiveScanKey.length == 0); - } else { - isStaleRegionBoundaries = - Bytes.compareTo(lowerInclusiveScanKey, lowerInclusiveRegionKey) < 0 || - (Bytes.compareTo(upperExclusiveScanKey, upperExclusiveRegionKey) > - 0 && upperExclusiveRegionKey.length != 0) || - (upperExclusiveRegionKey.length != 0 && - upperExclusiveScanKey.length == 0); - } - } - if (isStaleRegionBoundaries) { - LOGGER.error("Throwing StaleRegionBoundaryCacheException due to mismatched scan " - + "boundaries. Region: {} , lowerInclusiveScanKey: {} , " - + "upperExclusiveScanKey: {} , lowerInclusiveRegionKey: {} , " - + "upperExclusiveRegionKey: {} , scan reversed: {}", - region.getRegionInfo().getRegionNameAsString(), - Bytes.toStringBinary(lowerInclusiveScanKey), - Bytes.toStringBinary(upperExclusiveScanKey), - Bytes.toStringBinary(lowerInclusiveRegionKey), - Bytes.toStringBinary(upperExclusiveRegionKey), - scan.isReversed()); - Exception cause = new StaleRegionBoundaryCacheException( - region.getRegionInfo().getTable().getNameAsString()); - throw new DoNotRetryIOException(cause.getMessage(), cause); - } - if (isLocalIndex) { - ScanUtil.setupLocalIndexScan(scan); - } + if (isStaleRegionBoundaries) { + LOGGER.error( + "Throwing StaleRegionBoundaryCacheException due to mismatched scan " + + "boundaries. Region: {} , lowerInclusiveScanKey: {} , " + + "upperExclusiveScanKey: {} , lowerInclusiveRegionKey: {} , " + + "upperExclusiveRegionKey: {} , scan reversed: {}", + region.getRegionInfo().getRegionNameAsString(), Bytes.toStringBinary(lowerInclusiveScanKey), + Bytes.toStringBinary(upperExclusiveScanKey), Bytes.toStringBinary(lowerInclusiveRegionKey), + Bytes.toStringBinary(upperExclusiveRegionKey), scan.isReversed()); + Exception cause = + new StaleRegionBoundaryCacheException(region.getRegionInfo().getTable().getNameAsString()); + throw new DoNotRetryIOException(cause.getMessage(), cause); } - - abstract protected boolean isRegionObserverFor(Scan scan); - abstract protected RegionScanner doPostScannerOpen(ObserverContext c, final Scan scan, final RegionScanner s) throws Throwable; - - protected boolean skipRegionBoundaryCheck(Scan scan) { - byte[] skipCheckBytes = scan.getAttribute(BaseScannerRegionObserverConstants.SKIP_REGION_BOUNDARY_CHECK); - return skipCheckBytes != null && Bytes.toBoolean(skipCheckBytes); + if (isLocalIndex) { + ScanUtil.setupLocalIndexScan(scan); } - - @Override - public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext c, - Scan scan) throws IOException { - byte[] txnScn = scan.getAttribute(BaseScannerRegionObserverConstants.TX_SCN); - if (txnScn!=null) { - TimeRange timeRange = scan.getTimeRange(); - scan.setTimeRange(timeRange.getMin(), Bytes.toLong(txnScn)); - } - if (isRegionObserverFor(scan)) { - // For local indexes, we need to throw if out of region as we'll get inconsistent - // results otherwise while in other cases, it may just mean out client-side data - // on region boundaries is out of date and can safely be ignored. - if (!skipRegionBoundaryCheck(scan) || ScanUtil.isLocalIndex(scan)) { - throwIfScanOutOfRegion(scan, c.getEnvironment().getRegion()); - } - // Muck with the start/stop row of the scan and set as reversed at the - // last possible moment. You need to swap the start/stop and make the - // start exclusive and the stop inclusive. - ScanUtil.setupReverseScan(scan); - // Set the paging filter. Make sure that the paging filter is the top level - // filter if paging is enabled, that is pageSizeMsBytes != null. - if (!(scan.getFilter() instanceof PagingFilter)) { - byte[] pageSizeMsBytes = - scan.getAttribute(BaseScannerRegionObserverConstants.SERVER_PAGE_SIZE_MS); - if (pageSizeMsBytes != null) { - scan.setFilter(new PagingFilter(scan.getFilter(), - getPageSizeMsForFilter(scan))); - } - } + } + + abstract protected boolean isRegionObserverFor(Scan scan); + + abstract protected RegionScanner doPostScannerOpen( + ObserverContext c, final Scan scan, final RegionScanner s) + throws Throwable; + + protected boolean skipRegionBoundaryCheck(Scan scan) { + byte[] skipCheckBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.SKIP_REGION_BOUNDARY_CHECK); + return skipCheckBytes != null && Bytes.toBoolean(skipCheckBytes); + } + + @Override + public void preScannerOpen( + org.apache.hadoop.hbase.coprocessor.ObserverContext c, Scan scan) + throws IOException { + byte[] txnScn = scan.getAttribute(BaseScannerRegionObserverConstants.TX_SCN); + if (txnScn != null) { + TimeRange timeRange = scan.getTimeRange(); + scan.setTimeRange(timeRange.getMin(), Bytes.toLong(txnScn)); + } + if (isRegionObserverFor(scan)) { + // For local indexes, we need to throw if out of region as we'll get inconsistent + // results otherwise while in other cases, it may just mean out client-side data + // on region boundaries is out of date and can safely be ignored. + if (!skipRegionBoundaryCheck(scan) || ScanUtil.isLocalIndex(scan)) { + throwIfScanOutOfRegion(scan, c.getEnvironment().getRegion()); + } + // Muck with the start/stop row of the scan and set as reversed at the + // last possible moment. You need to swap the start/stop and make the + // start exclusive and the stop inclusive. + ScanUtil.setupReverseScan(scan); + // Set the paging filter. Make sure that the paging filter is the top level + // filter if paging is enabled, that is pageSizeMsBytes != null. + if (!(scan.getFilter() instanceof PagingFilter)) { + byte[] pageSizeMsBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.SERVER_PAGE_SIZE_MS); + if (pageSizeMsBytes != null) { + scan.setFilter(new PagingFilter(scan.getFilter(), getPageSizeMsForFilter(scan))); } + } + } + } + + private class RegionScannerHolder extends DelegateRegionScanner { + private final Scan scan; + private final ObserverContext c; + private boolean wasOverriden; + + public RegionScannerHolder(ObserverContext c, Scan scan, + final RegionScanner scanner) { + super(scanner); + this.c = c; + this.scan = scan; } - private class RegionScannerHolder extends DelegateRegionScanner { - private final Scan scan; - private final ObserverContext c; - private boolean wasOverriden; - - public RegionScannerHolder(ObserverContext c, Scan scan, - final RegionScanner scanner) { - super(scanner); - this.c = c; - this.scan = scan; - } - - private void overrideDelegate() throws IOException { - if (wasOverriden) { - return; - } - boolean success = false; - // Save the current span. When done with the child span, reset the span back to - // what it was. Otherwise, this causes the thread local storing the current span - // to not be reset back to null causing catastrophic infinite loops - // and region servers to crash. See https://issues.apache.org/jira/browse/PHOENIX-1596 - // TraceScope can't be used here because closing the scope will end up calling - // currentSpan.stop() and that should happen only when we are closing the scanner. - final Span savedSpan = Trace.currentSpan(); - final Span child = Trace.startSpan(BaseScannerRegionObserverConstants.SCANNER_OPENED_TRACE_INFO, savedSpan).getSpan(); - try { - RegionScanner scanner = doPostScannerOpen(c, scan, delegate); - scanner = new DelegateRegionScanner(scanner) { - // This isn't very obvious but close() could be called in a thread - // that is different from the thread that created the scanner. - @Override - public void close() throws IOException { - try { - delegate.close(); - } finally { - if (child != null) { - child.stop(); - } - } - } - }; - this.delegate = scanner; - wasOverriden = true; - success = true; - } catch (Throwable t) { - ClientUtil.throwIOException(c.getEnvironment().getRegionInfo().getRegionNameAsString(), t); - } finally { - try { - if (!success && child != null) { - child.stop(); - } - } finally { - Trace.continueSpan(savedSpan); - } - } - } - - @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { - overrideDelegate(); - boolean res = super.next(result, scannerContext); - ScannerContextUtil.incrementSizeProgress(scannerContext, result); - return res; - } - - @Override - public boolean next(List result) throws IOException { - overrideDelegate(); - return super.next(result); - } - - @Override - public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { - overrideDelegate(); - boolean res = super.nextRaw(result, scannerContext); - ScannerContextUtil.incrementSizeProgress(scannerContext, result); - return res; - } - - @Override - public boolean nextRaw(List result) throws IOException { - overrideDelegate(); - return super.nextRaw(result); + private void overrideDelegate() throws IOException { + if (wasOverriden) { + return; + } + boolean success = false; + // Save the current span. When done with the child span, reset the span back to + // what it was. Otherwise, this causes the thread local storing the current span + // to not be reset back to null causing catastrophic infinite loops + // and region servers to crash. See https://issues.apache.org/jira/browse/PHOENIX-1596 + // TraceScope can't be used here because closing the scope will end up calling + // currentSpan.stop() and that should happen only when we are closing the scanner. + final Span savedSpan = Trace.currentSpan(); + final Span child = + Trace.startSpan(BaseScannerRegionObserverConstants.SCANNER_OPENED_TRACE_INFO, savedSpan) + .getSpan(); + try { + RegionScanner scanner = doPostScannerOpen(c, scan, delegate); + scanner = new DelegateRegionScanner(scanner) { + // This isn't very obvious but close() could be called in a thread + // that is different from the thread that created the scanner. + @Override + public void close() throws IOException { + try { + delegate.close(); + } finally { + if (child != null) { + child.stop(); + } } - @Override - public RegionScanner getNewRegionScanner(Scan scan) throws IOException { - try { - return new RegionScannerHolder(c, scan, - ((DelegateRegionScanner) delegate).getNewRegionScanner(scan)); - } catch (ClassCastException e) { - throw new DoNotRetryIOException(e); - } - } - } - - /** - * Wrapper for {@link #postScannerOpen(ObserverContext, Scan, RegionScanner)} that ensures no non IOException is thrown, - * to prevent the coprocessor from becoming blacklisted. - * - */ - @Override - public final RegionScanner postScannerOpen( - final ObserverContext c, final Scan scan, - final RegionScanner s) throws IOException { + } + }; + this.delegate = scanner; + wasOverriden = true; + success = true; + } catch (Throwable t) { + ClientUtil.throwIOException(c.getEnvironment().getRegionInfo().getRegionNameAsString(), t); + } finally { try { - if (!isRegionObserverFor(scan)) { - return s; - } - byte[] emptyCF = scan.getAttribute( - BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME); - byte[] emptyCQ = scan.getAttribute( - BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME); - // Make sure PageRegionScanner wraps only the lowest region scanner, i.e., HBase region - // scanner. We assume here every Phoenix region scanner extends DelegateRegionScanner. - if (s instanceof DelegateRegionScanner) { - return new RegionScannerHolder(c, scan, s); - } else { - // An old client may not set these attributes which are required by TTLRegionScanner - if (emptyCF != null && emptyCQ != null) { - return new RegionScannerHolder(c, scan, - new TTLRegionScanner(c.getEnvironment(), scan, - new PagingRegionScanner(c.getEnvironment().getRegion(), s, - scan))); - } - return new RegionScannerHolder(c, scan, - new PagingRegionScanner(c.getEnvironment().getRegion(), s, scan)); - - } - } catch (Throwable t) { - // If the exception is NotServingRegionException then throw it as - // StaleRegionBoundaryCacheException to handle it by phoenix client other wise hbase - // client may recreate scans with wrong region boundaries. - if(t instanceof NotServingRegionException) { - LOGGER.error("postScannerOpen error for region {} . " - + "Thorwing it as StaleRegionBoundaryCacheException", - s.getRegionInfo().getRegionNameAsString(), t); - Exception cause = new StaleRegionBoundaryCacheException(c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString()); - throw new DoNotRetryIOException(cause.getMessage(), cause); - } else { - LOGGER.error("postScannerOpen error for region {}", - s.getRegionInfo().getRegionNameAsString(), t); - } - ClientUtil.throwIOException(c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t); - return null; // impossible + if (!success && child != null) { + child.stop(); + } + } finally { + Trace.continueSpan(savedSpan); } + } } - /** - * Return wrapped scanner that catches unexpected exceptions (i.e. Phoenix bugs) and - * re-throws as DoNotRetryIOException to prevent needless retrying hanging the query - * for 30 seconds. Unfortunately, until HBASE-7481 gets fixed, there's no way to do - * the same from a custom filter. - * @param offset starting position in the rowkey. - * @param scan - * @param tupleProjector - * @param dataRegion - * @param indexMaintainer - * @param viewConstants - */ - RegionScanner getWrappedScanner(final ObserverContext c, - final RegionScanner s, final int offset, final Scan scan, - final ColumnReference[] dataColumns, final TupleProjector tupleProjector, - final Region dataRegion, final IndexMaintainer indexMaintainer, - final byte[][] viewConstants, final TupleProjector projector, - final ImmutableBytesWritable ptr, final boolean useQualiferAsListIndex) - throws IOException { - - RegionScannerFactory regionScannerFactory = new NonAggregateRegionScannerFactory(c.getEnvironment()); - - return regionScannerFactory.getWrappedScanner(c.getEnvironment(), s, null, null, offset, scan, dataColumns, tupleProjector, - dataRegion, indexMaintainer, null, viewConstants, null, null, projector, ptr, useQualiferAsListIndex); - } - - public void setScanOptionsForFlushesAndCompactions(ScanOptions options) { - // We want the store to give us all the deleted cells to StoreCompactionScanner - options.setKeepDeletedCells(KeepDeletedCells.TTL); - options.setTTL(HConstants.FOREVER); - options.setMaxVersions(Integer.MAX_VALUE); - options.setMinVersions(Integer.MAX_VALUE); + @Override + public boolean next(List result, ScannerContext scannerContext) throws IOException { + overrideDelegate(); + boolean res = super.next(result, scannerContext); + ScannerContextUtil.incrementSizeProgress(scannerContext, result); + return res; } @Override - public void preCompactScannerOpen(ObserverContext c, Store store, - ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException { - Configuration conf = c.getEnvironment().getConfiguration(); - if (isPhoenixTableTTLEnabled(conf)) { - setScanOptionsForFlushesAndCompactions(options); - return; - } - long maxLookbackAgeInMillis = getMaxLookbackAge(c); - if (isMaxLookbackTimeEnabled(maxLookbackAgeInMillis)) { - setScanOptionsForFlushesAndCompactionsWhenPhoenixTTLIsDisabled(conf, options, store, - scanType, maxLookbackAgeInMillis); - } + public boolean next(List result) throws IOException { + overrideDelegate(); + return super.next(result); } @Override - public void preFlushScannerOpen(ObserverContext c, Store store, - ScanOptions options, FlushLifeCycleTracker tracker) throws IOException { - Configuration conf = c.getEnvironment().getConfiguration(); - - if (isPhoenixTableTTLEnabled(conf)) { - setScanOptionsForFlushesAndCompactions(options); - return; - } - - long maxLookbackAgeInMillis = getMaxLookbackAge(c); - if (isMaxLookbackTimeEnabled(maxLookbackAgeInMillis)) { - setScanOptionsForFlushesAndCompactionsWhenPhoenixTTLIsDisabled(conf, options, store, - ScanType.COMPACT_RETAIN_DELETES, maxLookbackAgeInMillis); - } + public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { + overrideDelegate(); + boolean res = super.nextRaw(result, scannerContext); + ScannerContextUtil.incrementSizeProgress(scannerContext, result); + return res; } @Override - public void preMemStoreCompactionCompactScannerOpen( - ObserverContext c, Store store, ScanOptions options) - throws IOException { - Configuration conf = c.getEnvironment().getConfiguration(); - if (isPhoenixTableTTLEnabled(conf)) { - setScanOptionsForFlushesAndCompactions(options); - return; - } - long maxLookbackAgeInMillis = getMaxLookbackAge(c); - if (isMaxLookbackTimeEnabled(maxLookbackAgeInMillis)) { - MemoryCompactionPolicy inMemPolicy = - store.getColumnFamilyDescriptor().getInMemoryCompaction(); - ScanType scanType; - //the eager and adaptive in-memory compaction policies can purge versions; the others - // can't. (Eager always does; adaptive sometimes does) - if (inMemPolicy.equals(MemoryCompactionPolicy.EAGER) || - inMemPolicy.equals(MemoryCompactionPolicy.ADAPTIVE)) { - scanType = ScanType.COMPACT_DROP_DELETES; - } else { - scanType = ScanType.COMPACT_RETAIN_DELETES; - } - setScanOptionsForFlushesAndCompactionsWhenPhoenixTTLIsDisabled(conf, options, store, - scanType, maxLookbackAgeInMillis); - } + public boolean nextRaw(List result) throws IOException { + overrideDelegate(); + return super.nextRaw(result); } @Override - public void preStoreScannerOpen(ObserverContext c, Store store, - ScanOptions options) throws IOException { - - Configuration conf = c.getEnvironment().getConfiguration(); - if (isPhoenixTableTTLEnabled(conf)) { - setScanOptionsForFlushesAndCompactions(options); - return; - } - if (!storeFileScanDoesntNeedAlteration(options)) { - //PHOENIX-4277 -- When doing a point-in-time (SCN) Scan, HBase by default will hide - // mutations that happen before a delete marker. This overrides that behavior. - options.setMinVersions(options.getMinVersions()); - KeepDeletedCells keepDeletedCells = KeepDeletedCells.TRUE; - if (store.getColumnFamilyDescriptor().getTimeToLive() != HConstants.FOREVER) { - keepDeletedCells = KeepDeletedCells.TTL; - } - options.setKeepDeletedCells(keepDeletedCells); + public RegionScanner getNewRegionScanner(Scan scan) throws IOException { + try { + return new RegionScannerHolder(c, scan, + ((DelegateRegionScanner) delegate).getNewRegionScanner(scan)); + } catch (ClassCastException e) { + throw new DoNotRetryIOException(e); + } + } + } + + /** + * Wrapper for {@link #postScannerOpen(ObserverContext, Scan, RegionScanner)} that ensures no non + * IOException is thrown, to prevent the coprocessor from becoming blacklisted. + */ + @Override + public final RegionScanner postScannerOpen(final ObserverContext c, + final Scan scan, final RegionScanner s) throws IOException { + try { + if (!isRegionObserverFor(scan)) { + return s; + } + byte[] emptyCF = + scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME); + byte[] emptyCQ = + scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME); + // Make sure PageRegionScanner wraps only the lowest region scanner, i.e., HBase region + // scanner. We assume here every Phoenix region scanner extends DelegateRegionScanner. + if (s instanceof DelegateRegionScanner) { + return new RegionScannerHolder(c, scan, s); + } else { + // An old client may not set these attributes which are required by TTLRegionScanner + if (emptyCF != null && emptyCQ != null) { + return new RegionScannerHolder(c, scan, new TTLRegionScanner(c.getEnvironment(), scan, + new PagingRegionScanner(c.getEnvironment().getRegion(), s, scan))); } + return new RegionScannerHolder(c, scan, + new PagingRegionScanner(c.getEnvironment().getRegion(), s, scan)); + + } + } catch (Throwable t) { + // If the exception is NotServingRegionException then throw it as + // StaleRegionBoundaryCacheException to handle it by phoenix client other wise hbase + // client may recreate scans with wrong region boundaries. + if (t instanceof NotServingRegionException) { + LOGGER.error( + "postScannerOpen error for region {} . " + + "Thorwing it as StaleRegionBoundaryCacheException", + s.getRegionInfo().getRegionNameAsString(), t); + Exception cause = new StaleRegionBoundaryCacheException( + c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString()); + throw new DoNotRetryIOException(cause.getMessage(), cause); + } else { + LOGGER.error("postScannerOpen error for region {}", + s.getRegionInfo().getRegionNameAsString(), t); + } + ClientUtil.throwIOException( + c.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(), t); + return null; // impossible } - - private boolean storeFileScanDoesntNeedAlteration(ScanOptions options) { - Scan scan = options.getScan(); - boolean isRaw = scan.isRaw(); - //true if keep deleted cells is either TRUE or TTL - boolean keepDeletedCells = options.getKeepDeletedCells().equals(KeepDeletedCells.TRUE) || - options.getKeepDeletedCells().equals(KeepDeletedCells.TTL); - boolean timeRangeIsLatest = scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP; - boolean timestampIsTransactional = - isTransactionalTimestamp(scan.getTimeRange().getMax()); - return isRaw - || keepDeletedCells - || timeRangeIsLatest - || timestampIsTransactional; + } + + /** + * Return wrapped scanner that catches unexpected exceptions (i.e. Phoenix bugs) and re-throws as + * DoNotRetryIOException to prevent needless retrying hanging the query for 30 seconds. + * Unfortunately, until HBASE-7481 gets fixed, there's no way to do the same from a custom filter. + * @param offset starting position in the rowkey. + */ + RegionScanner getWrappedScanner(final ObserverContext c, + final RegionScanner s, final int offset, final Scan scan, final ColumnReference[] dataColumns, + final TupleProjector tupleProjector, final Region dataRegion, + final IndexMaintainer indexMaintainer, final byte[][] viewConstants, + final TupleProjector projector, final ImmutableBytesWritable ptr, + final boolean useQualiferAsListIndex) throws IOException { + + RegionScannerFactory regionScannerFactory = + new NonAggregateRegionScannerFactory(c.getEnvironment()); + + return regionScannerFactory.getWrappedScanner(c.getEnvironment(), s, null, null, offset, scan, + dataColumns, tupleProjector, dataRegion, indexMaintainer, null, viewConstants, null, null, + projector, ptr, useQualiferAsListIndex); + } + + public void setScanOptionsForFlushesAndCompactions(ScanOptions options) { + // We want the store to give us all the deleted cells to StoreCompactionScanner + options.setKeepDeletedCells(KeepDeletedCells.TTL); + options.setTTL(HConstants.FOREVER); + options.setMaxVersions(Integer.MAX_VALUE); + options.setMinVersions(Integer.MAX_VALUE); + } + + @Override + public void preCompactScannerOpen(ObserverContext c, Store store, + ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { + Configuration conf = c.getEnvironment().getConfiguration(); + if (isPhoenixTableTTLEnabled(conf)) { + setScanOptionsForFlushesAndCompactions(options); + return; } - - private boolean isTransactionalTimestamp(long ts) { - //have to use the HBase edge manager because the Phoenix one is in phoenix-core - return ts > (long) (EnvironmentEdgeManager.currentTime() * 1.1); + long maxLookbackAgeInMillis = getMaxLookbackAge(c); + if (isMaxLookbackTimeEnabled(maxLookbackAgeInMillis)) { + setScanOptionsForFlushesAndCompactionsWhenPhoenixTTLIsDisabled(conf, options, store, scanType, + maxLookbackAgeInMillis); } + } - /* - * If KeepDeletedCells.FALSE, KeepDeletedCells.TTL , - * let delete markers age once lookback age is done. - */ - public KeepDeletedCells getKeepDeletedCells(ScanOptions options, ScanType scanType) { - //if we're doing a minor compaction or flush, always set keep deleted cells - //to true. Otherwise, if keep deleted cells is false or TTL, use KeepDeletedCells TTL, - //where the value of the ttl might be overriden to the max lookback age elsewhere - return (options.getKeepDeletedCells() == KeepDeletedCells.TRUE - || scanType.equals(ScanType.COMPACT_RETAIN_DELETES)) ? - KeepDeletedCells.TRUE : KeepDeletedCells.TTL; - } + @Override + public void preFlushScannerOpen(ObserverContext c, Store store, + ScanOptions options, FlushLifeCycleTracker tracker) throws IOException { + Configuration conf = c.getEnvironment().getConfiguration(); - /* - * if the user set a TTL we should leave MIN_VERSIONS at the default (0 in most of the cases). - * Otherwise the data (1st version) will not be removed after the TTL. If no TTL, we want - * Math.max(maxVersions, minVersions, 1) - */ - public int getMinVersions(ScanOptions options, ColumnFamilyDescriptor cfDescriptor) { - return cfDescriptor.getTimeToLive() != HConstants.FOREVER ? options.getMinVersions() - : Math.max(Math.max(options.getMinVersions(), - cfDescriptor.getMaxVersions()),1); + if (isPhoenixTableTTLEnabled(conf)) { + setScanOptionsForFlushesAndCompactions(options); + return; } - /** - * - * @param conf HBase Configuration - * @param columnDescriptor ColumnFamilyDescriptor for the store being compacted - * @param options ScanOptions of overrides to the compaction scan - * @return Time to live in milliseconds, based on both HBase TTL and Phoenix max lookback age - */ - public long getTimeToLiveForCompactions(Configuration conf, - ColumnFamilyDescriptor columnDescriptor, - ScanOptions options, long maxLookbackTtl) { - long ttlConfigured = columnDescriptor.getTimeToLive(); - long ttlInMillis = ttlConfigured * 1000; - if (isMaxLookbackTimeEnabled(maxLookbackTtl)) { - if (ttlConfigured == HConstants.FOREVER - && columnDescriptor.getKeepDeletedCells() != KeepDeletedCells.TRUE) { - // If user configured default TTL(FOREVER) and keep deleted cells to false or - // TTL then to remove unwanted delete markers we should change ttl to max lookback age - ttlInMillis = maxLookbackTtl; - } else { - //if there is a TTL, use TTL instead of max lookback age. - // Max lookback age should be more recent or equal to TTL - ttlInMillis = Math.max(ttlInMillis, maxLookbackTtl); - } - } - - return ttlInMillis; + long maxLookbackAgeInMillis = getMaxLookbackAge(c); + if (isMaxLookbackTimeEnabled(maxLookbackAgeInMillis)) { + setScanOptionsForFlushesAndCompactionsWhenPhoenixTTLIsDisabled(conf, options, store, + ScanType.COMPACT_RETAIN_DELETES, maxLookbackAgeInMillis); } - - public void setScanOptionsForFlushesAndCompactionsWhenPhoenixTTLIsDisabled(Configuration conf, - ScanOptions options, - final Store store, - ScanType type, long maxLookbackAge) { - ColumnFamilyDescriptor cfDescriptor = store.getColumnFamilyDescriptor(); - options.setTTL(getTimeToLiveForCompactions(conf, cfDescriptor, - options, maxLookbackAge)); - options.setKeepDeletedCells(getKeepDeletedCells(options, type)); - options.setMaxVersions(Integer.MAX_VALUE); - options.setMinVersions(getMinVersions(options, cfDescriptor)); + } + + @Override + public void preMemStoreCompactionCompactScannerOpen( + ObserverContext c, Store store, ScanOptions options) + throws IOException { + Configuration conf = c.getEnvironment().getConfiguration(); + if (isPhoenixTableTTLEnabled(conf)) { + setScanOptionsForFlushesAndCompactions(options); + return; } - - public static boolean isMaxLookbackTimeEnabled(Configuration conf){ - return isMaxLookbackTimeEnabled(conf.getLong(BaseScannerRegionObserverConstants.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, - BaseScannerRegionObserverConstants.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE)); + long maxLookbackAgeInMillis = getMaxLookbackAge(c); + if (isMaxLookbackTimeEnabled(maxLookbackAgeInMillis)) { + MemoryCompactionPolicy inMemPolicy = + store.getColumnFamilyDescriptor().getInMemoryCompaction(); + ScanType scanType; + // the eager and adaptive in-memory compaction policies can purge versions; the others + // can't. (Eager always does; adaptive sometimes does) + if ( + inMemPolicy.equals(MemoryCompactionPolicy.EAGER) + || inMemPolicy.equals(MemoryCompactionPolicy.ADAPTIVE) + ) { + scanType = ScanType.COMPACT_DROP_DELETES; + } else { + scanType = ScanType.COMPACT_RETAIN_DELETES; + } + setScanOptionsForFlushesAndCompactionsWhenPhoenixTTLIsDisabled(conf, options, store, scanType, + maxLookbackAgeInMillis); } + } - public static boolean isMaxLookbackTimeEnabled(long maxLookbackTime){ - return maxLookbackTime > 0L; - } + @Override + public void preStoreScannerOpen(ObserverContext c, Store store, + ScanOptions options) throws IOException { - private static long getMaxLookbackAge(ObserverContext c) { - TableName tableName = c.getEnvironment().getRegion().getRegionInfo().getTable(); - String fullTableName = tableName.getNameAsString(); - Configuration conf = c.getEnvironment().getConfiguration(); - PTable table; - try(PhoenixConnection conn = QueryUtil.getConnectionOnServer( - conf).unwrap(PhoenixConnection.class)) { - table = conn.getTableNoCache(fullTableName); - } - catch (SQLException e) { - if (e instanceof TableNotFoundException) { - LOGGER.debug("Ignoring HBase table that is not a Phoenix table: {}", fullTableName); - // non-Phoenix HBase tables won't be found, do nothing - } else { - LOGGER.error("Unable to fetch table level max lookback age for {}", fullTableName, e); - } - return MetaDataUtil.getMaxLookbackAge(conf, null); - } - return MetaDataUtil.getMaxLookbackAge(conf, table.getMaxLookbackAge()); + Configuration conf = c.getEnvironment().getConfiguration(); + if (isPhoenixTableTTLEnabled(conf)) { + setScanOptionsForFlushesAndCompactions(options); + return; + } + if (!storeFileScanDoesntNeedAlteration(options)) { + // PHOENIX-4277 -- When doing a point-in-time (SCN) Scan, HBase by default will hide + // mutations that happen before a delete marker. This overrides that behavior. + options.setMinVersions(options.getMinVersions()); + KeepDeletedCells keepDeletedCells = KeepDeletedCells.TRUE; + if (store.getColumnFamilyDescriptor().getTimeToLive() != HConstants.FOREVER) { + keepDeletedCells = KeepDeletedCells.TTL; + } + options.setKeepDeletedCells(keepDeletedCells); + } + } + + private boolean storeFileScanDoesntNeedAlteration(ScanOptions options) { + Scan scan = options.getScan(); + boolean isRaw = scan.isRaw(); + // true if keep deleted cells is either TRUE or TTL + boolean keepDeletedCells = options.getKeepDeletedCells().equals(KeepDeletedCells.TRUE) + || options.getKeepDeletedCells().equals(KeepDeletedCells.TTL); + boolean timeRangeIsLatest = scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP; + boolean timestampIsTransactional = isTransactionalTimestamp(scan.getTimeRange().getMax()); + return isRaw || keepDeletedCells || timeRangeIsLatest || timestampIsTransactional; + } + + private boolean isTransactionalTimestamp(long ts) { + // have to use the HBase edge manager because the Phoenix one is in phoenix-core + return ts > (long) (EnvironmentEdgeManager.currentTime() * 1.1); + } + + /* + * If KeepDeletedCells.FALSE, KeepDeletedCells.TTL , let delete markers age once lookback age is + * done. + */ + public KeepDeletedCells getKeepDeletedCells(ScanOptions options, ScanType scanType) { + // if we're doing a minor compaction or flush, always set keep deleted cells + // to true. Otherwise, if keep deleted cells is false or TTL, use KeepDeletedCells TTL, + // where the value of the ttl might be overriden to the max lookback age elsewhere + return (options.getKeepDeletedCells() == KeepDeletedCells.TRUE + || scanType.equals(ScanType.COMPACT_RETAIN_DELETES)) + ? KeepDeletedCells.TRUE + : KeepDeletedCells.TTL; + } + + /* + * if the user set a TTL we should leave MIN_VERSIONS at the default (0 in most of the cases). + * Otherwise the data (1st version) will not be removed after the TTL. If no TTL, we want + * Math.max(maxVersions, minVersions, 1) + */ + public int getMinVersions(ScanOptions options, ColumnFamilyDescriptor cfDescriptor) { + return cfDescriptor.getTimeToLive() != HConstants.FOREVER + ? options.getMinVersions() + : Math.max(Math.max(options.getMinVersions(), cfDescriptor.getMaxVersions()), 1); + } + + /** + * @param conf HBase Configuration + * @param columnDescriptor ColumnFamilyDescriptor for the store being compacted + * @param options ScanOptions of overrides to the compaction scan + * @return Time to live in milliseconds, based on both HBase TTL and Phoenix max lookback age + */ + public long getTimeToLiveForCompactions(Configuration conf, + ColumnFamilyDescriptor columnDescriptor, ScanOptions options, long maxLookbackTtl) { + long ttlConfigured = columnDescriptor.getTimeToLive(); + long ttlInMillis = ttlConfigured * 1000; + if (isMaxLookbackTimeEnabled(maxLookbackTtl)) { + if ( + ttlConfigured == HConstants.FOREVER + && columnDescriptor.getKeepDeletedCells() != KeepDeletedCells.TRUE + ) { + // If user configured default TTL(FOREVER) and keep deleted cells to false or + // TTL then to remove unwanted delete markers we should change ttl to max lookback age + ttlInMillis = maxLookbackTtl; + } else { + // if there is a TTL, use TTL instead of max lookback age. + // Max lookback age should be more recent or equal to TTL + ttlInMillis = Math.max(ttlInMillis, maxLookbackTtl); + } } - public static boolean isPhoenixTableTTLEnabled(Configuration conf) { - return conf.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); + return ttlInMillis; + } + + public void setScanOptionsForFlushesAndCompactionsWhenPhoenixTTLIsDisabled(Configuration conf, + ScanOptions options, final Store store, ScanType type, long maxLookbackAge) { + ColumnFamilyDescriptor cfDescriptor = store.getColumnFamilyDescriptor(); + options.setTTL(getTimeToLiveForCompactions(conf, cfDescriptor, options, maxLookbackAge)); + options.setKeepDeletedCells(getKeepDeletedCells(options, type)); + options.setMaxVersions(Integer.MAX_VALUE); + options.setMinVersions(getMinVersions(options, cfDescriptor)); + } + + public static boolean isMaxLookbackTimeEnabled(Configuration conf) { + return isMaxLookbackTimeEnabled( + conf.getLong(BaseScannerRegionObserverConstants.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, + BaseScannerRegionObserverConstants.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE)); + } + + public static boolean isMaxLookbackTimeEnabled(long maxLookbackTime) { + return maxLookbackTime > 0L; + } + + private static long getMaxLookbackAge(ObserverContext c) { + TableName tableName = c.getEnvironment().getRegion().getRegionInfo().getTable(); + String fullTableName = tableName.getNameAsString(); + Configuration conf = c.getEnvironment().getConfiguration(); + PTable table; + try (PhoenixConnection conn = + QueryUtil.getConnectionOnServer(conf).unwrap(PhoenixConnection.class)) { + table = conn.getTableNoCache(fullTableName); + } catch (SQLException e) { + if (e instanceof TableNotFoundException) { + LOGGER.debug("Ignoring HBase table that is not a Phoenix table: {}", fullTableName); + // non-Phoenix HBase tables won't be found, do nothing + } else { + LOGGER.error("Unable to fetch table level max lookback age for {}", fullTableName, e); + } + return MetaDataUtil.getMaxLookbackAge(conf, null); } + return MetaDataUtil.getMaxLookbackAge(conf, table.getMaxLookbackAge()); + } + + public static boolean isPhoenixTableTTLEnabled(Configuration conf) { + return conf.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CDCGlobalIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CDCGlobalIndexRegionScanner.java index f0c20f1ccf8..ba2d02fb4ee 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CDCGlobalIndexRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CDCGlobalIndexRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,15 @@ */ package org.apache.phoenix.coprocessor; -import com.fasterxml.jackson.core.JsonProcessingException; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.CDC_DATA_TABLE_DEF; +import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilder; import org.apache.hadoop.hbase.CellBuilderFactory; @@ -45,218 +53,194 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; - -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.CDC_DATA_TABLE_DEF; -import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY; +import com.fasterxml.jackson.core.JsonProcessingException; public class CDCGlobalIndexRegionScanner extends UncoveredGlobalIndexRegionScanner { - private static final Logger LOGGER = - LoggerFactory.getLogger(CDCGlobalIndexRegionScanner.class); - private CDCTableInfo cdcDataTableInfo; - private CDCChangeBuilder changeBuilder; - - public CDCGlobalIndexRegionScanner(final RegionScanner innerScanner, - final Region region, - final Scan scan, - final RegionCoprocessorEnvironment env, - final Scan dataTableScan, - final TupleProjector tupleProjector, - final IndexMaintainer indexMaintainer, - final byte[][] viewConstants, - final ImmutableBytesWritable ptr, - final long pageSizeMs, - final long queryLimit) throws IOException { - super(innerScanner, region, scan, env, dataTableScan, tupleProjector, indexMaintainer, - viewConstants, ptr, pageSizeMs, queryLimit); - CDCUtil.setupScanForCDC(dataTableScan); - cdcDataTableInfo = CDCTableInfo.createFromProto(CDCInfoProtos.CDCTableDef - .parseFrom(scan.getAttribute(CDC_DATA_TABLE_DEF))); - changeBuilder = new CDCChangeBuilder(cdcDataTableInfo); - } + private static final Logger LOGGER = LoggerFactory.getLogger(CDCGlobalIndexRegionScanner.class); + private CDCTableInfo cdcDataTableInfo; + private CDCChangeBuilder changeBuilder; - @Override - protected Scan prepareDataTableScan(Collection dataRowKeys) throws IOException { - //TODO: Get Timerange from the start row and end row of the index scan object - // and set it in the datatable scan object. -// if (scan.getStartRow().length == 8) { -// startTimeRange = PLong.INSTANCE.getCodec().decodeLong( -// scan.getStartRow(), 0, SortOrder.getDefault()); -// } -// if (scan.getStopRow().length == 8) { -// stopTimeRange = PLong.INSTANCE.getCodec().decodeLong( -// scan.getStopRow(), 0, SortOrder.getDefault()); -// } - return CDCUtil.setupScanForCDC(prepareDataTableScan(dataRowKeys, true)); - } + public CDCGlobalIndexRegionScanner(final RegionScanner innerScanner, final Region region, + final Scan scan, final RegionCoprocessorEnvironment env, final Scan dataTableScan, + final TupleProjector tupleProjector, final IndexMaintainer indexMaintainer, + final byte[][] viewConstants, final ImmutableBytesWritable ptr, final long pageSizeMs, + final long queryLimit) throws IOException { + super(innerScanner, region, scan, env, dataTableScan, tupleProjector, indexMaintainer, + viewConstants, ptr, pageSizeMs, queryLimit); + CDCUtil.setupScanForCDC(dataTableScan); + cdcDataTableInfo = CDCTableInfo + .createFromProto(CDCInfoProtos.CDCTableDef.parseFrom(scan.getAttribute(CDC_DATA_TABLE_DEF))); + changeBuilder = new CDCChangeBuilder(cdcDataTableInfo); + } - protected boolean getNextCoveredIndexRow(List result) throws IOException { - if (indexRowIterator.hasNext()) { - List indexRow = indexRowIterator.next(); - // firstCell: Picking the earliest cell in the index row so that - // timestamp of the cell and the row will be same. - Cell firstIndexCell = indexRow.get(indexRow.size() - 1); - byte[] indexRowKey = ImmutableBytesPtr.cloneCellRowIfNecessary(firstIndexCell); - ImmutableBytesPtr dataRowKey = new ImmutableBytesPtr( - indexToDataRowKeyMap.get(indexRowKey)); - Result dataRow = dataRows.get(dataRowKey); - Long changeTS = firstIndexCell.getTimestamp(); - TupleProjector dataTableProjector = cdcDataTableInfo.getDataTableProjector(); - Expression[] expressions = dataTableProjector != null ? - dataTableProjector.getExpressions() : null; - boolean isSingleCell = dataTableProjector != null; - byte[] emptyCQ = EncodedColumnsUtil.getEmptyKeyValueInfo( - cdcDataTableInfo.getQualifierEncodingScheme()).getFirst(); - changeBuilder.initChange(changeTS); - try { - if (dataRow != null) { - int curColumnNum = 0; - List cdcColumnInfoList = - this.cdcDataTableInfo.getColumnInfoList(); - cellLoop: - for (Cell cell : dataRow.rawCells()) { - if (! changeBuilder.isChangeRelevant(cell)) { - continue; - } - byte[] cellFam = ImmutableBytesPtr.cloneCellFamilyIfNecessary(cell); - byte[] cellQual = ImmutableBytesPtr.cloneCellQualifierIfNecessary(cell); - if (cell.getType() == Cell.Type.DeleteFamily) { - if (changeTS == cell.getTimestamp()) { - changeBuilder.markAsDeletionEvent(); - } else if (changeTS > cell.getTimestamp() - && changeBuilder.getLastDeletedTimestamp() == 0L) { - // Cells with timestamp less than the lowerBoundTsForPreImage - // can not be part of the PreImage as there is a Delete Family - // marker after that. - changeBuilder.setLastDeletedTimestamp(cell.getTimestamp()); - } - } else if ((cell.getType() == Cell.Type.DeleteColumn - || cell.getType() == Cell.Type.Put) - && !Arrays.equals(cellQual, emptyCQ)) { - if (! changeBuilder.isChangeRelevant(cell)) { - // We don't need to build the change image, just skip it. - continue; - } - // In this case, cell is the row, meaning we loop over rows.. - if (isSingleCell) { - while (curColumnNum < cdcColumnInfoList.size()) { - boolean hasValue = dataTableProjector.getSchema().extractValue( - cell, (SingleCellColumnExpression) - expressions[curColumnNum], ptr); - if (hasValue) { - Object cellValue = getColumnValue(ptr.get(), - ptr.getOffset(), ptr.getLength(), - cdcColumnInfoList.get(curColumnNum).getColumnType()); - changeBuilder.registerChange(cell, curColumnNum, cellValue); - } - ++curColumnNum; - } - break cellLoop; - } - while (true) { - CDCTableInfo.CDCColumnInfo currentColumnInfo = - cdcColumnInfoList.get(curColumnNum); - int columnComparisonResult = CDCUtil.compareCellFamilyAndQualifier( - cellFam, cellQual, - currentColumnInfo.getColumnFamily(), - currentColumnInfo.getColumnQualifier()); - if (columnComparisonResult > 0) { - if (++curColumnNum >= cdcColumnInfoList.size()) { - // Have no more column definitions, so the rest of the cells - // must be for dropped columns and so can be ignored. - break cellLoop; - } - // Continue looking for the right column definition - // for this cell. - continue; - } else if (columnComparisonResult < 0) { - // We didn't find a column definition for this cell, ignore the - // current cell but continue working on the rest of the cells. - continue cellLoop; - } + @Override + protected Scan prepareDataTableScan(Collection dataRowKeys) throws IOException { + // TODO: Get Timerange from the start row and end row of the index scan object + // and set it in the datatable scan object. + // if (scan.getStartRow().length == 8) { + // startTimeRange = PLong.INSTANCE.getCodec().decodeLong( + // scan.getStartRow(), 0, SortOrder.getDefault()); + // } + // if (scan.getStopRow().length == 8) { + // stopTimeRange = PLong.INSTANCE.getCodec().decodeLong( + // scan.getStopRow(), 0, SortOrder.getDefault()); + // } + return CDCUtil.setupScanForCDC(prepareDataTableScan(dataRowKeys, true)); + } - // else, found the column definition. - Object cellValue = cell.getType() == Cell.Type.DeleteColumn ? null - : getColumnValue(cell, cdcColumnInfoList.get(curColumnNum) - .getColumnType()); - changeBuilder.registerChange(cell, curColumnNum, cellValue); - // Done processing the current cell, check the next cell. - break; - } - } - } - if (changeBuilder.isNonEmptyEvent()) { - Result cdcRow = getCDCImage(indexRowKey, firstIndexCell); - if (cdcRow != null && tupleProjector != null) { - if (firstIndexCell.getType() == Cell.Type.DeleteFamily) { - // result is of type EncodedColumnQualiferCellsList for queries with - // Order by clause. It fails when Delete Family cell is added to it - // as it expects column qualifier bytes which is not available. - // Adding empty PUT cell as a placeholder. - result.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(indexRowKey) - .setFamily(ImmutableBytesPtr.cloneCellFamilyIfNecessary( - firstIndexCell)) - .setQualifier(indexMaintainer.getEmptyKeyValueQualifier()) - .setTimestamp(firstIndexCell.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(EMPTY_BYTE_ARRAY).build()); - } else { - result.add(firstIndexCell); - } - IndexUtil.addTupleAsOneCell(result, new ResultTuple(cdcRow), - tupleProjector, ptr); - } else { - result.clear(); - } - } else { - result.clear(); - } - } else { - result.clear(); + protected boolean getNextCoveredIndexRow(List result) throws IOException { + if (indexRowIterator.hasNext()) { + List indexRow = indexRowIterator.next(); + // firstCell: Picking the earliest cell in the index row so that + // timestamp of the cell and the row will be same. + Cell firstIndexCell = indexRow.get(indexRow.size() - 1); + byte[] indexRowKey = ImmutableBytesPtr.cloneCellRowIfNecessary(firstIndexCell); + ImmutableBytesPtr dataRowKey = new ImmutableBytesPtr(indexToDataRowKeyMap.get(indexRowKey)); + Result dataRow = dataRows.get(dataRowKey); + Long changeTS = firstIndexCell.getTimestamp(); + TupleProjector dataTableProjector = cdcDataTableInfo.getDataTableProjector(); + Expression[] expressions = + dataTableProjector != null ? dataTableProjector.getExpressions() : null; + boolean isSingleCell = dataTableProjector != null; + byte[] emptyCQ = EncodedColumnsUtil + .getEmptyKeyValueInfo(cdcDataTableInfo.getQualifierEncodingScheme()).getFirst(); + changeBuilder.initChange(changeTS); + try { + if (dataRow != null) { + int curColumnNum = 0; + List cdcColumnInfoList = + this.cdcDataTableInfo.getColumnInfoList(); + cellLoop: for (Cell cell : dataRow.rawCells()) { + if (!changeBuilder.isChangeRelevant(cell)) { + continue; + } + byte[] cellFam = ImmutableBytesPtr.cloneCellFamilyIfNecessary(cell); + byte[] cellQual = ImmutableBytesPtr.cloneCellQualifierIfNecessary(cell); + if (cell.getType() == Cell.Type.DeleteFamily) { + if (changeTS == cell.getTimestamp()) { + changeBuilder.markAsDeletionEvent(); + } else if ( + changeTS > cell.getTimestamp() && changeBuilder.getLastDeletedTimestamp() == 0L + ) { + // Cells with timestamp less than the lowerBoundTsForPreImage + // can not be part of the PreImage as there is a Delete Family + // marker after that. + changeBuilder.setLastDeletedTimestamp(cell.getTimestamp()); + } + } else if ( + (cell.getType() == Cell.Type.DeleteColumn || cell.getType() == Cell.Type.Put) + && !Arrays.equals(cellQual, emptyCQ) + ) { + if (!changeBuilder.isChangeRelevant(cell)) { + // We don't need to build the change image, just skip it. + continue; + } + // In this case, cell is the row, meaning we loop over rows.. + if (isSingleCell) { + while (curColumnNum < cdcColumnInfoList.size()) { + boolean hasValue = dataTableProjector.getSchema().extractValue(cell, + (SingleCellColumnExpression) expressions[curColumnNum], ptr); + if (hasValue) { + Object cellValue = getColumnValue(ptr.get(), ptr.getOffset(), ptr.getLength(), + cdcColumnInfoList.get(curColumnNum).getColumnType()); + changeBuilder.registerChange(cell, curColumnNum, cellValue); + } + ++curColumnNum; + } + break cellLoop; + } + while (true) { + CDCTableInfo.CDCColumnInfo currentColumnInfo = cdcColumnInfoList.get(curColumnNum); + int columnComparisonResult = + CDCUtil.compareCellFamilyAndQualifier(cellFam, cellQual, + currentColumnInfo.getColumnFamily(), currentColumnInfo.getColumnQualifier()); + if (columnComparisonResult > 0) { + if (++curColumnNum >= cdcColumnInfoList.size()) { + // Have no more column definitions, so the rest of the cells + // must be for dropped columns and so can be ignored. + break cellLoop; + } + // Continue looking for the right column definition + // for this cell. + continue; + } else if (columnComparisonResult < 0) { + // We didn't find a column definition for this cell, ignore the + // current cell but continue working on the rest of the cells. + continue cellLoop; } - return true; - } catch (Throwable e) { - LOGGER.error("Exception in UncoveredIndexRegionScanner for region " - + region.getRegionInfo().getRegionNameAsString(), e); - throw e; + // else, found the column definition. + Object cellValue = cell.getType() == Cell.Type.DeleteColumn + ? null + : getColumnValue(cell, cdcColumnInfoList.get(curColumnNum).getColumnType()); + changeBuilder.registerChange(cell, curColumnNum, cellValue); + // Done processing the current cell, check the next cell. + break; + } } + } + if (changeBuilder.isNonEmptyEvent()) { + Result cdcRow = getCDCImage(indexRowKey, firstIndexCell); + if (cdcRow != null && tupleProjector != null) { + if (firstIndexCell.getType() == Cell.Type.DeleteFamily) { + // result is of type EncodedColumnQualiferCellsList for queries with + // Order by clause. It fails when Delete Family cell is added to it + // as it expects column qualifier bytes which is not available. + // Adding empty PUT cell as a placeholder. + result + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(indexRowKey) + .setFamily(ImmutableBytesPtr.cloneCellFamilyIfNecessary(firstIndexCell)) + .setQualifier(indexMaintainer.getEmptyKeyValueQualifier()) + .setTimestamp(firstIndexCell.getTimestamp()).setType(Cell.Type.Put) + .setValue(EMPTY_BYTE_ARRAY).build()); + } else { + result.add(firstIndexCell); + } + IndexUtil.addTupleAsOneCell(result, new ResultTuple(cdcRow), tupleProjector, ptr); + } else { + result.clear(); + } + } else { + result.clear(); + } + } else { + result.clear(); } - return false; - } - private Result getCDCImage(byte[] indexRowKey, Cell firstCell) throws JsonProcessingException { - byte[] value = JacksonUtil.getObjectWriter(HashMap.class).writeValueAsBytes( - changeBuilder.buildCDCEvent()); - CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - Result cdcRow = Result.create(Arrays.asList(builder - .setRow(indexRowKey) - .setFamily(ImmutableBytesPtr.cloneCellFamilyIfNecessary(firstCell)) - .setQualifier(cdcDataTableInfo.getCdcJsonColQualBytes()) - .setTimestamp(changeBuilder.getChangeTimestamp()) - .setValue(value) - .setType(Cell.Type.Put) - .build())); - return cdcRow; + return true; + } catch (Throwable e) { + LOGGER.error("Exception in UncoveredIndexRegionScanner for region " + + region.getRegionInfo().getRegionNameAsString(), e); + throw e; + } } + return false; + } - private Object getColumnValue(Cell cell, PDataType dataType) { - return getColumnValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - dataType); - } + private Result getCDCImage(byte[] indexRowKey, Cell firstCell) throws JsonProcessingException { + byte[] value = + JacksonUtil.getObjectWriter(HashMap.class).writeValueAsBytes(changeBuilder.buildCDCEvent()); + CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + Result cdcRow = Result.create(Arrays.asList( + builder.setRow(indexRowKey).setFamily(ImmutableBytesPtr.cloneCellFamilyIfNecessary(firstCell)) + .setQualifier(cdcDataTableInfo.getCdcJsonColQualBytes()) + .setTimestamp(changeBuilder.getChangeTimestamp()).setValue(value).setType(Cell.Type.Put) + .build())); + return cdcRow; + } - private Object getColumnValue(byte[] cellValue, int offset, int length, PDataType dataType) { - Object value; - if (CDCUtil.isBinaryType(dataType)) { - value = ImmutableBytesPtr.copyBytesIfNecessary(cellValue, offset, length); - } else { - value = dataType.toObject(cellValue, offset, length); - } - return CDCUtil.getColumnEncodedValue(value, dataType); + private Object getColumnValue(Cell cell, PDataType dataType) { + return getColumnValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), + dataType); + } + + private Object getColumnValue(byte[] cellValue, int offset, int length, PDataType dataType) { + Object value; + if (CDCUtil.isBinaryType(dataType)) { + value = ImmutableBytesPtr.copyBytesIfNecessary(cellValue, offset, length); + } else { + value = dataType.toObject(cellValue, offset, length); } + return CDCUtil.getColumnEncodedValue(value, dataType); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ChildLinkMetaDataEndpoint.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ChildLinkMetaDataEndpoint.java index 90f516c170d..b04f4d0bf56 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ChildLinkMetaDataEndpoint.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ChildLinkMetaDataEndpoint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,17 +17,20 @@ */ package org.apache.phoenix.coprocessor; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; +import static org.apache.phoenix.coprocessor.MetaDataEndpointImpl.mutateRowsWithLocks; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.phoenix.coprocessor.generated.ChildLinkMetaDataProtos.CreateViewAddChildLinkRequest; import org.apache.phoenix.coprocessor.generated.ChildLinkMetaDataProtos.ChildLinkMetaDataService; +import org.apache.phoenix.coprocessor.generated.ChildLinkMetaDataProtos.CreateViewAddChildLinkRequest; import org.apache.phoenix.coprocessor.generated.MetaDataProtos; import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; @@ -40,75 +43,74 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -import static org.apache.phoenix.coprocessor.MetaDataEndpointImpl.mutateRowsWithLocks; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; /** - * Endpoint co-processor through which Phoenix metadata mutations for SYSTEM.CHILD_LINK flow. - * The {@code parent->child } links ({@link org.apache.phoenix.schema.PTable.LinkType#CHILD_TABLE}) - * are stored in the SYSTEM.CHILD_LINK table. + * Endpoint co-processor through which Phoenix metadata mutations for SYSTEM.CHILD_LINK flow. The + * {@code parent->child } links ({@link org.apache.phoenix.schema.PTable.LinkType#CHILD_TABLE}) are + * stored in the SYSTEM.CHILD_LINK table. */ -public class ChildLinkMetaDataEndpoint extends ChildLinkMetaDataService implements RegionCoprocessor { +public class ChildLinkMetaDataEndpoint extends ChildLinkMetaDataService + implements RegionCoprocessor { - private static final Logger LOGGER = LoggerFactory.getLogger(ChildLinkMetaDataEndpoint.class); - private RegionCoprocessorEnvironment env; - private PhoenixMetaDataCoprocessorHost phoenixAccessCoprocessorHost; - private boolean accessCheckEnabled; + private static final Logger LOGGER = LoggerFactory.getLogger(ChildLinkMetaDataEndpoint.class); + private RegionCoprocessorEnvironment env; + private PhoenixMetaDataCoprocessorHost phoenixAccessCoprocessorHost; + private boolean accessCheckEnabled; - @Override - public void start(CoprocessorEnvironment env) throws IOException { - if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment) env; - } else { - throw new CoprocessorException("Must be loaded on a table region!"); - } - this.phoenixAccessCoprocessorHost = new PhoenixMetaDataCoprocessorHost(this.env); - this.accessCheckEnabled = env.getConfiguration().getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (env instanceof RegionCoprocessorEnvironment) { + this.env = (RegionCoprocessorEnvironment) env; + } else { + throw new CoprocessorException("Must be loaded on a table region!"); } + this.phoenixAccessCoprocessorHost = new PhoenixMetaDataCoprocessorHost(this.env); + this.accessCheckEnabled = env.getConfiguration().getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); + } - @Override - public Iterable getServices() { - return Collections.singleton(this); - } + @Override + public Iterable getServices() { + return Collections.singleton(this); + } - @Override - public void createViewAddChildLink(RpcController controller, - CreateViewAddChildLinkRequest request, RpcCallback done) { + @Override + public void createViewAddChildLink(RpcController controller, + CreateViewAddChildLinkRequest request, RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - try { - List childLinkMutations = ProtobufUtil.getMutations(request); - if (childLinkMutations.isEmpty()) { - done.run(builder.build()); - return; - } - byte[][] rowKeyMetaData = new byte[3][]; - MetaDataUtil.getTenantIdAndSchemaAndTableName(childLinkMutations, rowKeyMetaData); - byte[] parentSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] parentTableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - String fullparentTableName = SchemaUtil.getTableName(parentSchemaName, parentTableName); + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + try { + List childLinkMutations = ProtobufUtil.getMutations(request); + if (childLinkMutations.isEmpty()) { + done.run(builder.build()); + return; + } + byte[][] rowKeyMetaData = new byte[3][]; + MetaDataUtil.getTenantIdAndSchemaAndTableName(childLinkMutations, rowKeyMetaData); + byte[] parentSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] parentTableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + String fullparentTableName = SchemaUtil.getTableName(parentSchemaName, parentTableName); - getCoprocessorHost().preCreateViewAddChildLink(fullparentTableName); + getCoprocessorHost().preCreateViewAddChildLink(fullparentTableName); - // From 4.15 the parent->child links are stored in a separate table SYSTEM.CHILD_LINK - mutateRowsWithLocks(this.accessCheckEnabled, this.env.getRegion(), childLinkMutations, - Collections.emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); + // From 4.15 the parent->child links are stored in a separate table SYSTEM.CHILD_LINK + mutateRowsWithLocks(this.accessCheckEnabled, this.env.getRegion(), childLinkMutations, + Collections. emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); - } catch (Throwable t) { - LOGGER.error("Unable to write mutations to " + - PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME, t); - builder.setReturnCode(MetaDataProtos.MutationCode.UNABLE_TO_CREATE_CHILD_LINK); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - } - } + } catch (Throwable t) { + LOGGER.error("Unable to write mutations to " + PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME, + t); + builder.setReturnCode(MetaDataProtos.MutationCode.UNABLE_TO_CREATE_CHILD_LINK); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + } + } - private PhoenixMetaDataCoprocessorHost getCoprocessorHost() { - return phoenixAccessCoprocessorHost; - } + private PhoenixMetaDataCoprocessorHost getCoprocessorHost() { + return phoenixAccessCoprocessorHost; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ColumnMutator.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ColumnMutator.java index 9dacff345e4..edda0bd53b3 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ColumnMutator.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ColumnMutator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,10 @@ */ package org.apache.phoenix.coprocessor; +import java.io.IOException; +import java.sql.SQLException; +import java.util.List; + import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.regionserver.Region; @@ -26,42 +30,35 @@ import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PTable; -import java.io.IOException; -import java.sql.SQLException; -import java.util.List; - public interface ColumnMutator { - enum MutateColumnType { - ADD_COLUMN, DROP_COLUMN - } + enum MutateColumnType { + ADD_COLUMN, + DROP_COLUMN + } - /** - * Validates the column to be added or dropped against all child views - */ - MetaDataProtocol.MetaDataMutationResult validateWithChildViews(PTable table, List childViews, - List tableMetadata, - byte[] schemaName, byte[] tableName) - throws IOException, SQLException; + /** + * Validates the column to be added or dropped against all child views + */ + MetaDataProtocol.MetaDataMutationResult validateWithChildViews(PTable table, + List childViews, List tableMetadata, byte[] schemaName, byte[] tableName) + throws IOException, SQLException; - /** - * Validates that the column being added or dropped against the table or view itself - * Adds to the list of mutations required to add or drop columns - */ - MetaDataProtocol.MetaDataMutationResult validateAndAddMetadata(PTable table, byte[][] rowKeyMetaData, - List tableMetadata, Region region, - List invalidateList, - List locks, - long clientTimeStamp, - long clientVersion, - ExtendedCellBuilder extendedCellBuilder, - boolean isAddingOrDroppingColumns) - throws IOException, SQLException; + /** + * Validates that the column being added or dropped against the table or view itself Adds to the + * list of mutations required to add or drop columns + */ + MetaDataProtocol.MetaDataMutationResult validateAndAddMetadata(PTable table, + byte[][] rowKeyMetaData, List tableMetadata, Region region, + List invalidateList, List locks, long clientTimeStamp, + long clientVersion, ExtendedCellBuilder extendedCellBuilder, boolean isAddingOrDroppingColumns) + throws IOException, SQLException; - /** - * @return list of pair of table and column being dropped, used to drop any indexes that require the column - */ - List> getTableAndDroppedColumnPairs(); + /** + * Returns list of pair of table and column being dropped, used to drop any indexes that require + * the column + */ + List> getTableAndDroppedColumnPairs(); - MutateColumnType getMutateColumnType(); + MutateColumnType getMutateColumnType(); } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CompactionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CompactionScanner.java index 241ea00f36b..d86d29237a2 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CompactionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/CompactionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,20 +17,37 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.coprocessor.CompactionScanner.MatcherType.GLOBAL_INDEXES; +import static org.apache.phoenix.coprocessor.CompactionScanner.MatcherType.GLOBAL_VIEWS; +import static org.apache.phoenix.coprocessor.CompactionScanner.MatcherType.TENANT_INDEXES; +import static org.apache.phoenix.coprocessor.CompactionScanner.MatcherType.TENANT_VIEWS; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_TTL; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAMESPACE_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; +import static org.apache.phoenix.query.QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX; +import static org.apache.phoenix.query.QueryServices.PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT; +import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT; +import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY; + import java.io.IOException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; -import java.util.Iterator; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedList; +import java.util.List; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -51,7 +68,10 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.coprocessorclient.RowKeyMatcher; import org.apache.phoenix.coprocessorclient.TableInfo; +import org.apache.phoenix.coprocessorclient.TableTTLInfo; +import org.apache.phoenix.coprocessorclient.TableTTLInfoCache; import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.apache.phoenix.expression.RowKeyColumnExpression; @@ -61,8 +81,12 @@ import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.IllegalDataException; +import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PName; import org.apache.phoenix.schema.PNameFactory; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.schema.RowKeyValueAccessor; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; @@ -71,14 +95,9 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.CDCUtil; -import org.apache.phoenix.util.PhoenixRuntime; -import org.apache.phoenix.coprocessorclient.RowKeyMatcher; -import org.apache.phoenix.coprocessorclient.TableTTLInfoCache; -import org.apache.phoenix.coprocessorclient.TableTTLInfo; -import org.apache.phoenix.schema.PTableType; -import org.apache.phoenix.schema.RowKeyValueAccessor; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.MetaDataUtil; +import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.SchemaUtil; @@ -86,2560 +105,2347 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.phoenix.coprocessor.CompactionScanner.MatcherType.GLOBAL_INDEXES; -import static org.apache.phoenix.coprocessor.CompactionScanner.MatcherType.GLOBAL_VIEWS; -import static org.apache.phoenix.coprocessor.CompactionScanner.MatcherType.TENANT_INDEXES; -import static org.apache.phoenix.coprocessor.CompactionScanner.MatcherType.TENANT_VIEWS; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_TTL; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAMESPACE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; -import static org.apache.phoenix.query.QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX; -import static org.apache.phoenix.query.QueryServices.PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT; -import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT; -import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import org.apache.phoenix.schema.PColumn; -import org.apache.phoenix.schema.PTable; - /** * The store scanner that implements compaction for Phoenix. Phoenix coproc overrides the scan * options so that HBase store scanner retains all cells during compaction and flushes. Then this * store scanner decides which cells to retain. This is required to ensure rows do not expire - * partially and to preserve all cells within Phoenix max lookback window. - * - * The compaction process is optimized for Phoenix. This optimization assumes that - * . A given delete family or delete family version marker is inserted to all column families - * . A given delete family version marker always delete a full version of a row. Please note - * delete family version markers are used only on index tables where mutations are always - * full row mutations. - * - * During major compaction, minor compaction and memstore flush, all cells (and delete markers) - * that are visible through the max lookback window are retained. Outside the max lookback window, - * (1) extra put cell versions, (2) delete markers and deleted cells that are not supposed to be - * kept (by the KeepDeletedCell option), and (3) expired cells are removed during major compaction. - * During flushes and minor compaction, expired cells and delete markers are not removed however - * deleted cells that are not supposed to be kept (by the KeepDeletedCell option) and extra put - * cell versions are removed. - * + * partially and to preserve all cells within Phoenix max lookback window. The compaction process is + * optimized for Phoenix. This optimization assumes that . A given delete family or delete family + * version marker is inserted to all column families . A given delete family version marker always + * delete a full version of a row. Please note delete family version markers are used only on index + * tables where mutations are always full row mutations. During major compaction, minor compaction + * and memstore flush, all cells (and delete markers) that are visible through the max lookback + * window are retained. Outside the max lookback window, (1) extra put cell versions, (2) delete + * markers and deleted cells that are not supposed to be kept (by the KeepDeletedCell option), and + * (3) expired cells are removed during major compaction. During flushes and minor compaction, + * expired cells and delete markers are not removed however deleted cells that are not supposed to + * be kept (by the KeepDeletedCell option) and extra put cell versions are removed. */ public class CompactionScanner implements InternalScanner { - private static final Logger LOGGER = LoggerFactory.getLogger(CompactionScanner.class); - public static final String SEPARATOR = ":"; - private final InternalScanner storeScanner; - private final Region region; - private final Store store; - private final RegionCoprocessorEnvironment env; - private long maxLookbackWindowStart; - private final long maxLookbackInMillis; - private int minVersion; - private int maxVersion; - private final boolean emptyCFStore; - private final boolean localIndex; - private final int familyCount; - private KeepDeletedCells keepDeletedCells; - private long compactionTime; - private final byte[] emptyCF; - private final byte[] emptyCQ; - private final byte[] storeColumnFamily; - private final String tableName; - private final String columnFamilyName; - private static Map maxLookbackMap = new ConcurrentHashMap<>(); - private PhoenixLevelRowCompactor phoenixLevelRowCompactor; - private HBaseLevelRowCompactor hBaseLevelRowCompactor; - private boolean major; - private long inputCellCount = 0; - private long outputCellCount = 0; - private boolean phoenixLevelOnly = false; - private boolean isCDCIndex; - - // Only for forcing minor compaction while testing - private static boolean forceMinorCompaction = false; - - public CompactionScanner(RegionCoprocessorEnvironment env, - Store store, - InternalScanner storeScanner, - long maxLookbackAgeInMillis, - boolean major, - boolean keepDeleted, - PTable table) throws IOException { - this.storeScanner = storeScanner; - this.region = env.getRegion(); - this.store = store; - this.env = env; - // Empty column family and qualifier are always needed to compute which all empty cells to retain - // even during minor compactions. If required empty cells are not retained during - // minor compactions then we can run into the risk of partial row expiry on next major compaction. - this.emptyCF = SchemaUtil.getEmptyColumnFamily(table); - this.emptyCQ = SchemaUtil.getEmptyColumnQualifier(table); - compactionTime = EnvironmentEdgeManager.currentTimeMillis(); - columnFamilyName = store.getColumnFamilyName(); - storeColumnFamily = columnFamilyName.getBytes(); - tableName = region.getRegionInfo().getTable().getNameAsString(); - String dataTableName = table.getName().toString(); - Long overriddenMaxLookback = maxLookbackMap.get(tableName + SEPARATOR + columnFamilyName); - this.maxLookbackInMillis = overriddenMaxLookback == null ? - maxLookbackAgeInMillis : Math.max(maxLookbackAgeInMillis, overriddenMaxLookback); - // The oldest scn is current time - maxLookbackInMillis. Phoenix sets the scan time range - // for scn queries [0, scn). This means that the maxlookback size should be - // maxLookbackInMillis + 1 so that the oldest scn does not return empty row - this.maxLookbackWindowStart = this.maxLookbackInMillis == 0 ? compactionTime : compactionTime - (this.maxLookbackInMillis + 1); - ColumnFamilyDescriptor cfd = store.getColumnFamilyDescriptor(); - this.major = major && ! forceMinorCompaction; - this.minVersion = cfd.getMinVersions(); - this.maxVersion = cfd.getMaxVersions(); - this.keepDeletedCells = keepDeleted ? KeepDeletedCells.TTL : cfd.getKeepDeletedCells(); - familyCount = region.getTableDescriptor().getColumnFamilies().length; - localIndex = columnFamilyName.startsWith(LOCAL_INDEX_COLUMN_FAMILY_PREFIX); - emptyCFStore = familyCount == 1 || columnFamilyName.equals(Bytes.toString(emptyCF)) - || localIndex; - - isCDCIndex = table != null ? CDCUtil.isCDCIndex(table) : false; - // Initialize the tracker that computes the TTL for the compacting table. - // The TTL tracker can be - // simple (one single TTL for the table) when the compacting table is not Partitioned - // complex when the TTL can vary per row when the compacting table is Partitioned. - TTLTracker - ttlTracker = - this.major ? - createTTLTrackerFor(env, store, table): - new TableTTLTrackerForFlushesAndMinor(tableName); - - phoenixLevelRowCompactor = new PhoenixLevelRowCompactor(ttlTracker); - hBaseLevelRowCompactor = new HBaseLevelRowCompactor(ttlTracker); - - LOGGER.info("Starting CompactionScanner for table " + tableName + " store " - + columnFamilyName + (this.major ? " major " : " not major ") + "compaction ttl " - + ttlTracker.getRowContext().getTTL() + "ms " + "max lookback " + this.maxLookbackInMillis + "ms"); - LOGGER.info(String.format("CompactionScanner params:- (" + - "physical-data-tablename = %s, compaction-tablename = %s, region = %s, " + - "start-key = %s, end-key = %s, " + - "emptyCF = %s, emptyCQ = %s, " + - "minVersion = %d, maxVersion = %d, keepDeletedCells = %s, " + - "familyCount = %d, localIndex = %s, emptyCFStore = %s, " + - "compactionTime = %d, maxLookbackWindowStart = %d, maxLookbackInMillis = %d, major = %s)", - dataTableName, tableName, region.getRegionInfo().getEncodedName(), - Bytes.toStringBinary(region.getRegionInfo().getStartKey()), - Bytes.toStringBinary(region.getRegionInfo().getEndKey()), - Bytes.toString(this.emptyCF), Bytes.toString(emptyCQ), - this.minVersion, this.maxVersion, this.keepDeletedCells.name(), - this.familyCount, this.localIndex, this.emptyCFStore, - compactionTime, maxLookbackWindowStart, maxLookbackInMillis, this.major)); - - } - - @VisibleForTesting - public static void setForceMinorCompaction(boolean doMinorCompaction) { - forceMinorCompaction = doMinorCompaction; - } - - @VisibleForTesting - public static boolean getForceMinorCompaction() { - return forceMinorCompaction; + private static final Logger LOGGER = LoggerFactory.getLogger(CompactionScanner.class); + public static final String SEPARATOR = ":"; + private final InternalScanner storeScanner; + private final Region region; + private final Store store; + private final RegionCoprocessorEnvironment env; + private long maxLookbackWindowStart; + private final long maxLookbackInMillis; + private int minVersion; + private int maxVersion; + private final boolean emptyCFStore; + private final boolean localIndex; + private final int familyCount; + private KeepDeletedCells keepDeletedCells; + private long compactionTime; + private final byte[] emptyCF; + private final byte[] emptyCQ; + private final byte[] storeColumnFamily; + private final String tableName; + private final String columnFamilyName; + private static Map maxLookbackMap = new ConcurrentHashMap<>(); + private PhoenixLevelRowCompactor phoenixLevelRowCompactor; + private HBaseLevelRowCompactor hBaseLevelRowCompactor; + private boolean major; + private long inputCellCount = 0; + private long outputCellCount = 0; + private boolean phoenixLevelOnly = false; + private boolean isCDCIndex; + + // Only for forcing minor compaction while testing + private static boolean forceMinorCompaction = false; + + public CompactionScanner(RegionCoprocessorEnvironment env, Store store, + InternalScanner storeScanner, long maxLookbackAgeInMillis, boolean major, boolean keepDeleted, + PTable table) throws IOException { + this.storeScanner = storeScanner; + this.region = env.getRegion(); + this.store = store; + this.env = env; + // Empty column family and qualifier are always needed to compute which all empty cells to + // retain + // even during minor compactions. If required empty cells are not retained during + // minor compactions then we can run into the risk of partial row expiry on next major + // compaction. + this.emptyCF = SchemaUtil.getEmptyColumnFamily(table); + this.emptyCQ = SchemaUtil.getEmptyColumnQualifier(table); + compactionTime = EnvironmentEdgeManager.currentTimeMillis(); + columnFamilyName = store.getColumnFamilyName(); + storeColumnFamily = columnFamilyName.getBytes(); + tableName = region.getRegionInfo().getTable().getNameAsString(); + String dataTableName = table.getName().toString(); + Long overriddenMaxLookback = maxLookbackMap.get(tableName + SEPARATOR + columnFamilyName); + this.maxLookbackInMillis = overriddenMaxLookback == null + ? maxLookbackAgeInMillis + : Math.max(maxLookbackAgeInMillis, overriddenMaxLookback); + // The oldest scn is current time - maxLookbackInMillis. Phoenix sets the scan time range + // for scn queries [0, scn). This means that the maxlookback size should be + // maxLookbackInMillis + 1 so that the oldest scn does not return empty row + this.maxLookbackWindowStart = this.maxLookbackInMillis == 0 + ? compactionTime + : compactionTime - (this.maxLookbackInMillis + 1); + ColumnFamilyDescriptor cfd = store.getColumnFamilyDescriptor(); + this.major = major && !forceMinorCompaction; + this.minVersion = cfd.getMinVersions(); + this.maxVersion = cfd.getMaxVersions(); + this.keepDeletedCells = keepDeleted ? KeepDeletedCells.TTL : cfd.getKeepDeletedCells(); + familyCount = region.getTableDescriptor().getColumnFamilies().length; + localIndex = columnFamilyName.startsWith(LOCAL_INDEX_COLUMN_FAMILY_PREFIX); + emptyCFStore = + familyCount == 1 || columnFamilyName.equals(Bytes.toString(emptyCF)) || localIndex; + + isCDCIndex = table != null ? CDCUtil.isCDCIndex(table) : false; + // Initialize the tracker that computes the TTL for the compacting table. + // The TTL tracker can be + // simple (one single TTL for the table) when the compacting table is not Partitioned + // complex when the TTL can vary per row when the compacting table is Partitioned. + TTLTracker ttlTracker = this.major + ? createTTLTrackerFor(env, store, table) + : new TableTTLTrackerForFlushesAndMinor(tableName); + + phoenixLevelRowCompactor = new PhoenixLevelRowCompactor(ttlTracker); + hBaseLevelRowCompactor = new HBaseLevelRowCompactor(ttlTracker); + + LOGGER.info("Starting CompactionScanner for table " + tableName + " store " + columnFamilyName + + (this.major ? " major " : " not major ") + "compaction ttl " + + ttlTracker.getRowContext().getTTL() + "ms " + "max lookback " + this.maxLookbackInMillis + + "ms"); + LOGGER.info(String.format( + "CompactionScanner params:- (" + + "physical-data-tablename = %s, compaction-tablename = %s, region = %s, " + + "start-key = %s, end-key = %s, " + "emptyCF = %s, emptyCQ = %s, " + + "minVersion = %d, maxVersion = %d, keepDeletedCells = %s, " + + "familyCount = %d, localIndex = %s, emptyCFStore = %s, " + + "compactionTime = %d, maxLookbackWindowStart = %d, maxLookbackInMillis = %d, major = %s)", + dataTableName, tableName, region.getRegionInfo().getEncodedName(), + Bytes.toStringBinary(region.getRegionInfo().getStartKey()), + Bytes.toStringBinary(region.getRegionInfo().getEndKey()), Bytes.toString(this.emptyCF), + Bytes.toString(emptyCQ), this.minVersion, this.maxVersion, this.keepDeletedCells.name(), + this.familyCount, this.localIndex, this.emptyCFStore, compactionTime, maxLookbackWindowStart, + maxLookbackInMillis, this.major)); + + } + + @VisibleForTesting + public static void setForceMinorCompaction(boolean doMinorCompaction) { + forceMinorCompaction = doMinorCompaction; + } + + @VisibleForTesting + public static boolean getForceMinorCompaction() { + return forceMinorCompaction; + } + + /** + * Helper method to create TTL tracker for various phoenix data model objects i.e views, view + * indexes ... + */ + private TTLTracker createTTLTrackerFor(RegionCoprocessorEnvironment env, Store store, + PTable baseTable) throws IOException { + + boolean isViewTTLEnabled = + env.getConfiguration().getBoolean(QueryServices.PHOENIX_VIEW_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_ENABLED); + boolean isLongViewIndexEnabled = + env.getConfiguration().getBoolean(QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_LONG_VIEW_INDEX_ENABLED); + + int viewTTLTenantViewsPerScanLimit = -1; + if (isViewTTLEnabled) { + // if view ttl enabled then we need to limit the number of rows scanned + // when querying syscat for views with TTL enabled/set + viewTTLTenantViewsPerScanLimit = + env.getConfiguration().getInt(PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT, + DEFAULT_PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT); + } + // If VIEW TTL is not enabled then return TTL tracker for base HBase tables. + // since TTL can be set only at the table level. + if (!isViewTTLEnabled) { + return new NonPartitionedTableTTLTracker(baseTable, store); } - /** - * Helper method to create TTL tracker for various phoenix data model objects - * i.e views, view indexes ... - * @param env - * @param store - * @param baseTable - * @return - */ - private TTLTracker createTTLTrackerFor(RegionCoprocessorEnvironment env, - Store store, PTable baseTable) throws IOException { - - boolean isViewTTLEnabled = - env.getConfiguration().getBoolean(QueryServices.PHOENIX_VIEW_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_VIEW_TTL_ENABLED); - boolean isLongViewIndexEnabled = - env.getConfiguration().getBoolean(QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_LONG_VIEW_INDEX_ENABLED); - - int viewTTLTenantViewsPerScanLimit = -1; - if (isViewTTLEnabled) { - // if view ttl enabled then we need to limit the number of rows scanned - // when querying syscat for views with TTL enabled/set - viewTTLTenantViewsPerScanLimit = env.getConfiguration().getInt( - PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT, - DEFAULT_PHOENIX_VIEW_TTL_TENANT_VIEWS_PER_SCAN_LIMIT); - } - // If VIEW TTL is not enabled then return TTL tracker for base HBase tables. - // since TTL can be set only at the table level. - if (!isViewTTLEnabled) { - return new NonPartitionedTableTTLTracker(baseTable, store); - } - - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - String compactionTableName = env.getRegion().getRegionInfo().getTable().getNameAsString(); - String schemaName = SchemaUtil.getSchemaNameFromFullName(baseTable.getName().toString()); - String tableName = SchemaUtil.getTableNameFromFullName(baseTable.getName().toString()); - - boolean isSharedIndex = false; - if (compactionTableName.startsWith(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX)) { - isSharedIndex = true; - } + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + String compactionTableName = env.getRegion().getRegionInfo().getTable().getNameAsString(); + String schemaName = SchemaUtil.getSchemaNameFromFullName(baseTable.getName().toString()); + String tableName = SchemaUtil.getTableNameFromFullName(baseTable.getName().toString()); - // NonPartitioned: Salt bucket property can be separately set for base tables and indexes. - // Partitioned: Salt bucket property can be set only for the base table. - // Global views, Tenant views, view indexes inherit the salt bucket property from their - // base table. - boolean isSalted = baseTable.getBucketNum() != null; - try (PhoenixConnection serverConnection = QueryUtil.getConnectionOnServer(new Properties(), - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - - byte[] childLinkTableNameBytes = SchemaUtil.isNamespaceMappingEnabled( - PTableType.SYSTEM, env.getConfiguration()) ? - SYSTEM_CHILD_LINK_NAMESPACE_BYTES : - SYSTEM_CHILD_LINK_NAME_BYTES; - Table childLinkHTable = serverConnection.getQueryServices().getTable(childLinkTableNameBytes); - // If there is at least one child view for this table then it is a partitioned table. - boolean isPartitioned = ViewUtil.hasChildViews( - childLinkHTable, - EMPTY_BYTE_ARRAY, - Bytes.toBytes(schemaName), - Bytes.toBytes(tableName), - currentTime); - - return isPartitioned ? - new PartitionedTableTTLTracker(baseTable, isSalted, isSharedIndex, - isLongViewIndexEnabled, viewTTLTenantViewsPerScanLimit) : - new NonPartitionedTableTTLTracker(baseTable, store); - - } catch (SQLException e) { - throw new IOException(e); - } + boolean isSharedIndex = false; + if (compactionTableName.startsWith(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX)) { + isSharedIndex = true; } - /** - * Any coprocessors within a JVM can extend the max lookback window for a column family - * by calling this static method. - */ - public static void overrideMaxLookback(String tableName, String columnFamilyName, - long maxLookbackInMillis) { - if (tableName == null || columnFamilyName == null) { - return; - } - Long old = maxLookbackMap.putIfAbsent(tableName + SEPARATOR + columnFamilyName, - maxLookbackInMillis); - if (old != null) { - maxLookbackMap.put(tableName + SEPARATOR + columnFamilyName, maxLookbackInMillis); - } + // NonPartitioned: Salt bucket property can be separately set for base tables and indexes. + // Partitioned: Salt bucket property can be set only for the base table. + // Global views, Tenant views, view indexes inherit the salt bucket property from their + // base table. + boolean isSalted = baseTable.getBucketNum() != null; + try (PhoenixConnection serverConnection = + QueryUtil.getConnectionOnServer(new Properties(), env.getConfiguration()) + .unwrap(PhoenixConnection.class)) { + + byte[] childLinkTableNameBytes = + SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, env.getConfiguration()) + ? SYSTEM_CHILD_LINK_NAMESPACE_BYTES + : SYSTEM_CHILD_LINK_NAME_BYTES; + Table childLinkHTable = serverConnection.getQueryServices().getTable(childLinkTableNameBytes); + // If there is at least one child view for this table then it is a partitioned table. + boolean isPartitioned = ViewUtil.hasChildViews(childLinkHTable, EMPTY_BYTE_ARRAY, + Bytes.toBytes(schemaName), Bytes.toBytes(tableName), currentTime); + + return isPartitioned + ? new PartitionedTableTTLTracker(baseTable, isSalted, isSharedIndex, isLongViewIndexEnabled, + viewTTLTenantViewsPerScanLimit) + : new NonPartitionedTableTTLTracker(baseTable, store); + + } catch (SQLException e) { + throw new IOException(e); } - - public static long getMaxLookbackInMillis(String tableName, String columnFamilyName, - long maxLookbackInMillis) { - if (tableName == null || columnFamilyName == null) { - return maxLookbackInMillis; - } - Long value = maxLookbackMap.get(tableName + CompactionScanner.SEPARATOR + columnFamilyName); - return value == null - ? maxLookbackInMillis - : maxLookbackMap.get(tableName + CompactionScanner.SEPARATOR + columnFamilyName); - } - static class CellTimeComparator implements Comparator { - public static final CellTimeComparator COMPARATOR = new CellTimeComparator(); - @Override public int compare(Cell o1, Cell o2) { - long ts1 = o1.getTimestamp(); - long ts2 = o2.getTimestamp(); - if (ts1 == ts2) return 0; - if (ts1 > ts2) return -1; - return 1; - } - - @Override public boolean equals(Object obj) { - return false; - } + } + + /** + * Any coprocessors within a JVM can extend the max lookback window for a column family by calling + * this static method. + */ + public static void overrideMaxLookback(String tableName, String columnFamilyName, + long maxLookbackInMillis) { + if (tableName == null || columnFamilyName == null) { + return; } - /* - private void printRow(List result, String title, boolean sort) { - List row; - if (sort) { - row = new ArrayList<>(result); - Collections.sort(row, CellTimeComparator.COMPARATOR); - } else { - row = result; - } - System.out.println("---- " + title + " ----"); - System.out.println((major ? "Major " : "Not major ") - + "compaction time: " + compactionTime); - System.out.println("Max lookback window start time: " + maxLookbackWindowStart); - System.out.println("Max lookback in ms: " + maxLookbackInMillis); - System.out.println("TTL in ms: " + ttlInMillis); - boolean maxLookbackLine = false; - boolean ttlLine = false; - for (Cell cell : row) { - if (!maxLookbackLine && cell.getTimestamp() < maxLookbackWindowStart) { - System.out.println("-----> Max lookback window start time: " + maxLookbackWindowStart); - maxLookbackLine = true; - } else if (!ttlLine && cell.getTimestamp() < ttlWindowStart) { - System.out.println("-----> TTL window start time: " + ttlWindowStart); - ttlLine = true; - } - System.out.println(cell); - } + Long old = + maxLookbackMap.putIfAbsent(tableName + SEPARATOR + columnFamilyName, maxLookbackInMillis); + if (old != null) { + maxLookbackMap.put(tableName + SEPARATOR + columnFamilyName, maxLookbackInMillis); } - */ + } - @Override - public boolean next(List result) throws IOException { - boolean hasMore = storeScanner.next(result); - inputCellCount += result.size(); - if (!result.isEmpty()) { - // printRow(result, "Input for " + tableName + " " + columnFamilyName, true); // This is for debugging - phoenixLevelRowCompactor.compact(result, false); - outputCellCount += result.size(); - // printRow(result, "Output for " + tableName + " " + columnFamilyName, true); // This is for debugging - } - return hasMore; + public static long getMaxLookbackInMillis(String tableName, String columnFamilyName, + long maxLookbackInMillis) { + if (tableName == null || columnFamilyName == null) { + return maxLookbackInMillis; } + Long value = maxLookbackMap.get(tableName + CompactionScanner.SEPARATOR + columnFamilyName); + return value == null + ? maxLookbackInMillis + : maxLookbackMap.get(tableName + CompactionScanner.SEPARATOR + columnFamilyName); + } + + static class CellTimeComparator implements Comparator { + public static final CellTimeComparator COMPARATOR = new CellTimeComparator(); @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result); + public int compare(Cell o1, Cell o2) { + long ts1 = o1.getTimestamp(); + long ts2 = o2.getTimestamp(); + if (ts1 == ts2) return 0; + if (ts1 > ts2) return -1; + return 1; } @Override - public void close() throws IOException { - LOGGER.info("Closing CompactionScanner for table " + tableName + " store " - + columnFamilyName + (major ? " major " : " not major ") + "compaction retained " - + outputCellCount + " of " + inputCellCount + " cells" - + (phoenixLevelOnly ? " phoenix level only" : "")); - if (forceMinorCompaction) { - forceMinorCompaction = false; - } - storeScanner.close(); + public boolean equals(Object obj) { + return false; } - - enum MatcherType { - GLOBAL_VIEWS, GLOBAL_INDEXES, TENANT_VIEWS, TENANT_INDEXES + } + /* + * private void printRow(List result, String title, boolean sort) { List row; if + * (sort) { row = new ArrayList<>(result); Collections.sort(row, CellTimeComparator.COMPARATOR); } + * else { row = result; } System.out.println("---- " + title + " ----"); System.out.println((major + * ? "Major " : "Not major ") + "compaction time: " + compactionTime); + * System.out.println("Max lookback window start time: " + maxLookbackWindowStart); + * System.out.println("Max lookback in ms: " + maxLookbackInMillis); + * System.out.println("TTL in ms: " + ttlInMillis); boolean maxLookbackLine = false; boolean + * ttlLine = false; for (Cell cell : row) { if (!maxLookbackLine && cell.getTimestamp() < + * maxLookbackWindowStart) { System.out.println("-----> Max lookback window start time: " + + * maxLookbackWindowStart); maxLookbackLine = true; } else if (!ttlLine && cell.getTimestamp() < + * ttlWindowStart) { System.out.println("-----> TTL window start time: " + ttlWindowStart); + * ttlLine = true; } System.out.println(cell); } } + */ + + @Override + public boolean next(List result) throws IOException { + boolean hasMore = storeScanner.next(result); + inputCellCount += result.size(); + if (!result.isEmpty()) { + // printRow(result, "Input for " + tableName + " " + columnFamilyName, true); // This is for + // debugging + phoenixLevelRowCompactor.compact(result, false); + outputCellCount += result.size(); + // printRow(result, "Output for " + tableName + " " + columnFamilyName, true); // This is for + // debugging + } + return hasMore; + } + + @Override + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); + } + + @Override + public void close() throws IOException { + LOGGER.info("Closing CompactionScanner for table " + tableName + " store " + columnFamilyName + + (major ? " major " : " not major ") + "compaction retained " + outputCellCount + " of " + + inputCellCount + " cells" + (phoenixLevelOnly ? " phoenix level only" : "")); + if (forceMinorCompaction) { + forceMinorCompaction = false; + } + storeScanner.close(); + } + + enum MatcherType { + GLOBAL_VIEWS, + GLOBAL_INDEXES, + TENANT_VIEWS, + TENANT_INDEXES + } + + /** + * Helper class for managing various RowKeyMatchers and TTLCaches For matcher types => + * GLOBAL_VIEWS: GLOBAL_INDEXES: Assumption is that the number of Global views in a system is + * bounded.(can fit in memory) Reads the ROW_KEY_MATCHER and TTL attribute from SYSCAT for all + * global views. SYSCAT query is not batched. TENANT_VIEWS: Reads the ROW_KEY_MATCHER and TTL + * attribute from SYSCAT in a batched fashion. TENANT_ID from the region startKey is used to + * further filter the SYSCAT query. The batch size is controlled by viewTTLTenantViewsPerScanLimit + * attribute. Special case: when the data type of the TENANT_ID is not a VARCHAR/CHAR, then SYSCAT + * queries cannot be bounded or batched using the TENANT_ID, since the TENANT_ID attribute in + * SYSCAT is a VARCHAR. TENANT_INDEXES: Reads the ROW_KEY_MATCHER and TTL attribute from SYSCAT in + * a batched fashion. Since TENANT_ID is not the leading part of the row key and thus not in a + * lexicographic order for a given region, the TENANT_ID cannot be used to query SYSCAT in a batch + * of more than one. The batch size is configured to one => uses the TENANT_ID of the current row. + */ + /// TODO : Needs to convert debug to trace logging. + private class PartitionedTableRowKeyMatcher { + + private static final int NO_BATCH = -1; + + private boolean isSharedIndex = false; + private boolean isMultiTenant = false; + private boolean isSalted = false; + private boolean shouldBatchCatalogAccess = true; + private RowKeyParser rowKeyParser; + private PTable baseTable; + private RowKeyMatcher globalViewMatcher; + private RowKeyMatcher tenantViewMatcher; + private RowKeyMatcher globalIndexMatcher; + private RowKeyMatcher tenantIndexMatcher; + private TableTTLInfoCache globalViewTTLCache; + private TableTTLInfoCache tenantViewTTLCache; + private TableTTLInfoCache globalIndexTTLCache; + private TableTTLInfoCache tenantIndexTTLCache; + private String startTenantId = ""; + private String endTenantId = ""; + private String lastTenantId = ""; + private String currentTenantId = ""; + private int viewTTLTenantViewsPerScanLimit; + + public PartitionedTableRowKeyMatcher(PTable table, boolean isSalted, boolean isSharedIndex, + boolean isLongViewIndexEnabled, int viewTTLTenantViewsPerScanLimit) throws SQLException { + this.baseTable = table; + this.globalViewTTLCache = new TableTTLInfoCache(); + this.globalIndexTTLCache = new TableTTLInfoCache(); + this.tenantViewTTLCache = new TableTTLInfoCache(); + this.tenantIndexTTLCache = new TableTTLInfoCache(); + this.rowKeyParser = new RowKeyParser(baseTable, isLongViewIndexEnabled); + PDataType tenantIdType = this.rowKeyParser.getTenantIdDataType(); + this.shouldBatchCatalogAccess = + (tenantIdType.getSqlType() == Types.VARCHAR || tenantIdType.getSqlType() == Types.CHAR); + this.isSharedIndex = isSharedIndex || localIndex; + this.isSalted = isSalted; + this.isMultiTenant = table.isMultiTenant(); + this.viewTTLTenantViewsPerScanLimit = viewTTLTenantViewsPerScanLimit; + initializeMatchers(); } /** - * Helper class for managing various RowKeyMatchers and TTLCaches - * For matcher types => - * GLOBAL_VIEWS: - * GLOBAL_INDEXES: - * Assumption is that the number of Global views in a system is bounded.(can fit in memory) - * Reads the ROW_KEY_MATCHER and TTL attribute from SYSCAT for all global views. - * SYSCAT query is not batched. - * - * TENANT_VIEWS: - * Reads the ROW_KEY_MATCHER and TTL attribute from SYSCAT in a batched fashion. - * TENANT_ID from the region startKey is used to further filter the SYSCAT query. - * The batch size is controlled by viewTTLTenantViewsPerScanLimit attribute. - * Special case: when the data type of the TENANT_ID is not a VARCHAR/CHAR, - * then SYSCAT queries cannot be bounded or batched using the TENANT_ID, - * since the TENANT_ID attribute in SYSCAT is a VARCHAR. - * - * TENANT_INDEXES: - * Reads the ROW_KEY_MATCHER and TTL attribute from SYSCAT in a batched fashion. - * Since TENANT_ID is not the leading part of the row key and thus not in a - * lexicographic order for a given region, - * the TENANT_ID cannot be used to query SYSCAT in a batch of more than one. - * The batch size is configured to one => uses the TENANT_ID of the current row. - * + * Initialize the various matchers Case : multi-tenant */ - /// TODO : Needs to convert debug to trace logging. - private class PartitionedTableRowKeyMatcher { - - private static final int NO_BATCH = -1; - - private boolean isSharedIndex = false; - private boolean isMultiTenant = false; - private boolean isSalted = false; - private boolean shouldBatchCatalogAccess = true; - private RowKeyParser rowKeyParser; - private PTable baseTable; - private RowKeyMatcher globalViewMatcher; - private RowKeyMatcher tenantViewMatcher; - private RowKeyMatcher globalIndexMatcher; - private RowKeyMatcher tenantIndexMatcher; - private TableTTLInfoCache globalViewTTLCache; - private TableTTLInfoCache tenantViewTTLCache; - private TableTTLInfoCache globalIndexTTLCache; - private TableTTLInfoCache tenantIndexTTLCache; - private String startTenantId = ""; - private String endTenantId = ""; - private String lastTenantId = ""; - private String currentTenantId = ""; - private int viewTTLTenantViewsPerScanLimit; - - public PartitionedTableRowKeyMatcher( - PTable table, - boolean isSalted, - boolean isSharedIndex, - boolean isLongViewIndexEnabled, - int viewTTLTenantViewsPerScanLimit) throws SQLException { - this.baseTable = table; - this.globalViewTTLCache = new TableTTLInfoCache(); - this.globalIndexTTLCache = new TableTTLInfoCache(); - this.tenantViewTTLCache = new TableTTLInfoCache(); - this.tenantIndexTTLCache = new TableTTLInfoCache(); - this.rowKeyParser = new RowKeyParser(baseTable, isLongViewIndexEnabled); - PDataType tenantIdType = this.rowKeyParser.getTenantIdDataType(); - this.shouldBatchCatalogAccess = (tenantIdType.getSqlType() == Types.VARCHAR || - tenantIdType.getSqlType() == Types.CHAR); - this.isSharedIndex = isSharedIndex || localIndex ; - this.isSalted = isSalted; - this.isMultiTenant = table.isMultiTenant(); - this.viewTTLTenantViewsPerScanLimit = viewTTLTenantViewsPerScanLimit; - initializeMatchers(); - } - - - /** - * Initialize the various matchers - * Case : multi-tenant - * @throws SQLException - */ - - private void initializeMatchers() throws SQLException { - - if (this.isSharedIndex) { - this.globalIndexMatcher = initializeMatcher(GLOBAL_INDEXES); - // Matcher for TENANT_INDEXES will be created/refreshed when processing the rows. - } else if (this.isMultiTenant) { - this.globalViewMatcher = initializeMatcher(GLOBAL_VIEWS); - this.tenantViewMatcher = initializeMatcher(TENANT_VIEWS); - } else { - this.globalViewMatcher = initializeMatcher(GLOBAL_VIEWS); - } - } - // Queries SYSCAT to find the ROW_KEY_MATCHER and TTL attributes for various matcher types. - // The attributes are populated/initialized into local cache objects. - // TTL => TableTTLInfoCache - // ROW_KEY_MATCHER => RowKeyMatcher (TrieIndex) - private RowKeyMatcher initializeMatcher(MatcherType type) throws SQLException { - List tableList = null; - RowKeyMatcher matcher = new RowKeyMatcher(); - String regionName = region.getRegionInfo().getEncodedName(); + private void initializeMatchers() throws SQLException { + + if (this.isSharedIndex) { + this.globalIndexMatcher = initializeMatcher(GLOBAL_INDEXES); + // Matcher for TENANT_INDEXES will be created/refreshed when processing the rows. + } else if (this.isMultiTenant) { + this.globalViewMatcher = initializeMatcher(GLOBAL_VIEWS); + this.tenantViewMatcher = initializeMatcher(TENANT_VIEWS); + } else { + this.globalViewMatcher = initializeMatcher(GLOBAL_VIEWS); + } + } + // Queries SYSCAT to find the ROW_KEY_MATCHER and TTL attributes for various matcher types. + // The attributes are populated/initialized into local cache objects. + // TTL => TableTTLInfoCache + // ROW_KEY_MATCHER => RowKeyMatcher (TrieIndex) + private RowKeyMatcher initializeMatcher(MatcherType type) throws SQLException { + List tableList = null; + RowKeyMatcher matcher = new RowKeyMatcher(); + String regionName = region.getRegionInfo().getEncodedName(); + + switch (type) { + case GLOBAL_INDEXES: + tableList = getMatchPatternsForGlobalPartitions(this.baseTable.getName().getString(), + env.getConfiguration(), false, true); + break; + case TENANT_INDEXES: + try { + startTenantId = + rowKeyParser.getTenantIdFromRowKey(region.getRegionInfo().getStartKey()); + endTenantId = rowKeyParser.getTenantIdFromRowKey(region.getRegionInfo().getEndKey()); + } catch (SQLException sqle) { + LOGGER.error(sqle.getMessage()); + throw sqle; + } + if (startTenantId != null && !startTenantId.isEmpty()) { + tableList = getMatchPatternsForTenant(this.baseTable.getName().getString(), + env.getConfiguration(), true, false, regionName, startTenantId); + } + break; + case GLOBAL_VIEWS: + tableList = getMatchPatternsForGlobalPartitions(this.baseTable.getName().getString(), + env.getConfiguration(), true, false); + break; + case TENANT_VIEWS: + try { + startTenantId = + rowKeyParser.getTenantIdFromRowKey(region.getRegionInfo().getStartKey()); + endTenantId = rowKeyParser.getTenantIdFromRowKey(region.getRegionInfo().getEndKey()); + } catch (SQLException sqle) { + LOGGER.error(sqle.getMessage()); + throw sqle; + } + + if (shouldBatchCatalogAccess) { + tableList = getMatchPatternsForTenantBatch(this.baseTable.getName().getString(), + env.getConfiguration(), regionName, startTenantId, viewTTLTenantViewsPerScanLimit); + + } else if (startTenantId != null && !startTenantId.isEmpty()) { + tableList = getMatchPatternsForTenant(this.baseTable.getName().getString(), + env.getConfiguration(), true, false, regionName, startTenantId); + } + break; + default: + tableList = new ArrayList<>(); + break; + } + + if (tableList != null && !tableList.isEmpty()) { + tableList.forEach(m -> { + if (m.getTTL() != TTL_NOT_DEFINED) { + // add the ttlInfo to the cache. + // each new/unique ttlInfo object added returns a unique tableId. + int tableId = -1; switch (type) { - case GLOBAL_INDEXES: - tableList = getMatchPatternsForGlobalPartitions( - this.baseTable.getName().getString(), - env.getConfiguration(), - false, true); - break; - case TENANT_INDEXES: - try { - startTenantId = rowKeyParser.getTenantIdFromRowKey( - region.getRegionInfo().getStartKey()); - endTenantId = rowKeyParser.getTenantIdFromRowKey( - region.getRegionInfo().getEndKey() - ); - } catch (SQLException sqle) { - LOGGER.error(sqle.getMessage()); - throw sqle; - } - if (startTenantId != null && !startTenantId.isEmpty()) { - tableList = getMatchPatternsForTenant( - this.baseTable.getName().getString(), - env.getConfiguration(), true, false, - regionName, startTenantId); - } + case GLOBAL_INDEXES: + tableId = globalIndexTTLCache.addTable(m); break; - case GLOBAL_VIEWS: - tableList = getMatchPatternsForGlobalPartitions( - this.baseTable.getName().getString(), - env.getConfiguration(), - true, false); + case TENANT_INDEXES: + tableId = tenantIndexTTLCache.addTable(m); break; - case TENANT_VIEWS: - try { - startTenantId = rowKeyParser.getTenantIdFromRowKey( - region.getRegionInfo().getStartKey()); - endTenantId = rowKeyParser.getTenantIdFromRowKey( - region.getRegionInfo().getEndKey() - ); - } catch (SQLException sqle) { - LOGGER.error(sqle.getMessage()); - throw sqle; - } - - if (shouldBatchCatalogAccess) { - tableList = getMatchPatternsForTenantBatch( - this.baseTable.getName().getString(), - env.getConfiguration(), - regionName, startTenantId, viewTTLTenantViewsPerScanLimit); - - } else if (startTenantId != null && !startTenantId.isEmpty()) { - tableList = getMatchPatternsForTenant( - this.baseTable.getName().getString(), - env.getConfiguration(), true, false, - regionName, startTenantId); - } + case GLOBAL_VIEWS: + tableId = globalViewTTLCache.addTable(m); break; - default: - tableList = new ArrayList<>(); + case TENANT_VIEWS: + tableId = tenantViewTTLCache.addTable(m); break; } - if (tableList != null && !tableList.isEmpty()) { - tableList.forEach(m -> { - if (m.getTTL() != TTL_NOT_DEFINED) { - // add the ttlInfo to the cache. - // each new/unique ttlInfo object added returns a unique tableId. - int tableId = -1; - switch (type) { - case GLOBAL_INDEXES: - tableId = globalIndexTTLCache.addTable(m); - break; - case TENANT_INDEXES: - tableId = tenantIndexTTLCache.addTable(m); - break; - case GLOBAL_VIEWS: - tableId = globalViewTTLCache.addTable(m); - break; - case TENANT_VIEWS: - tableId = tenantViewTTLCache.addTable(m); - break; - } - - // map the match pattern to the tableId using matcher index. - matcher.put(m.getMatchPattern(), tableId); - LOGGER.debug("Matcher updated (init) {} : {}", type.toString(), m); - } - }); - } + // map the match pattern to the tableId using matcher index. + matcher.put(m.getMatchPattern(), tableId); + LOGGER.debug("Matcher updated (init) {} : {}", type.toString(), m); + } + }); + } - LOGGER.debug(String.format("Initialized matcher for type r=%s, t=%s :- " + - "s=%s, e=%s, c=%s, l=%s", - regionName, - type, - startTenantId, - endTenantId, - currentTenantId, - lastTenantId)); + LOGGER.debug( + String.format("Initialized matcher for type r=%s, t=%s :- " + "s=%s, e=%s, c=%s, l=%s", + regionName, type, startTenantId, endTenantId, currentTenantId, lastTenantId)); - return matcher; - } + return matcher; + } - // The tenant views/indexes that have TTL set are queried in batches. - // Refresh the tenant view/index matcher with the next batch of tenant views - // that have ttl set. - private void refreshMatcher(MatcherType type) throws SQLException { - List tableList = null; - String regionName = region.getRegionInfo().getEncodedName(); - int catalogAccessBatchSize = NO_BATCH; + // The tenant views/indexes that have TTL set are queried in batches. + // Refresh the tenant view/index matcher with the next batch of tenant views + // that have ttl set. + private void refreshMatcher(MatcherType type) throws SQLException { + List tableList = null; + String regionName = region.getRegionInfo().getEncodedName(); + int catalogAccessBatchSize = NO_BATCH; + switch (type) { + case TENANT_INDEXES: + this.tenantIndexMatcher = new RowKeyMatcher(); + this.tenantIndexTTLCache = new TableTTLInfoCache(); + if (currentTenantId != null && !currentTenantId.isEmpty()) { + tableList = getMatchPatternsForTenant(this.baseTable.getName().getString(), + env.getConfiguration(), false, true, regionName, currentTenantId); + } + break; + case TENANT_VIEWS: + this.tenantViewMatcher = new RowKeyMatcher(); + this.tenantViewTTLCache = new TableTTLInfoCache(); + + if (shouldBatchCatalogAccess) { + tableList = getMatchPatternsForTenantBatch(this.baseTable.getName().getString(), + env.getConfiguration(), regionName, currentTenantId, viewTTLTenantViewsPerScanLimit); + + } else if (currentTenantId != null && !currentTenantId.isEmpty()) { + tableList = getMatchPatternsForTenant(this.baseTable.getName().getString(), + env.getConfiguration(), true, false, regionName, currentTenantId); + } + + break; + default: + throw new SQLException("Refresh for type " + type.toString() + " is not supported"); + } + + if (tableList != null && !tableList.isEmpty()) { + tableList.forEach(m -> { + if (m.getTTL() != TTL_NOT_DEFINED) { + // add the ttlInfo to the cache. + // each new/unique ttlInfo object added returns a unique tableId. + int tableId = -1; switch (type) { - case TENANT_INDEXES: - this.tenantIndexMatcher = new RowKeyMatcher(); - this.tenantIndexTTLCache = new TableTTLInfoCache(); - if (currentTenantId != null && !currentTenantId.isEmpty()) { - tableList = getMatchPatternsForTenant( - this.baseTable.getName().getString(), - env.getConfiguration(), false, true, - regionName, currentTenantId); - } + case TENANT_INDEXES: + tableId = tenantIndexTTLCache.addTable(m); + // map the match pattern to the tableId using matcher index. + this.tenantIndexMatcher.put(m.getMatchPattern(), tableId); break; - case TENANT_VIEWS: - this.tenantViewMatcher = new RowKeyMatcher(); - this.tenantViewTTLCache = new TableTTLInfoCache(); - - if (shouldBatchCatalogAccess) { - tableList = getMatchPatternsForTenantBatch( - this.baseTable.getName().getString(), - env.getConfiguration(), - regionName, currentTenantId, viewTTLTenantViewsPerScanLimit); - - } else if (currentTenantId != null && !currentTenantId.isEmpty()) { - tableList = getMatchPatternsForTenant( - this.baseTable.getName().getString(), - env.getConfiguration(), true, false, - regionName, currentTenantId); - } - + case TENANT_VIEWS: + tableId = tenantViewTTLCache.addTable(m); + // map the match pattern to the tableId using matcher index. + this.tenantViewMatcher.put(m.getMatchPattern(), tableId); break; - default: - throw new SQLException("Refresh for type " + type.toString() + " is not supported"); } - - if (tableList != null && !tableList.isEmpty()) { - tableList.forEach(m -> { - if (m.getTTL() != TTL_NOT_DEFINED) { - // add the ttlInfo to the cache. - // each new/unique ttlInfo object added returns a unique tableId. - int tableId = -1; - switch (type) { - case TENANT_INDEXES: - tableId = tenantIndexTTLCache.addTable(m); - // map the match pattern to the tableId using matcher index. - this.tenantIndexMatcher.put(m.getMatchPattern(), tableId); - break; - case TENANT_VIEWS: - tableId = tenantViewTTLCache.addTable(m); - // map the match pattern to the tableId using matcher index. - this.tenantViewMatcher.put(m.getMatchPattern(), tableId); - break; - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Refreshed matcher for type (updated) {}, {} : {}", - regionName, type.toString(), m); - } - - } - }); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Refreshed matcher for type r={}, t={}:- " + - "rs={}, re={}, s={}, e={}, c={}, l={}", - regionName, - type, - Bytes.toStringBinary(region.getRegionInfo().getStartKey()), - Bytes.toStringBinary(region.getRegionInfo().getEndKey()), - startTenantId, - endTenantId, - currentTenantId, - lastTenantId); - } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Refreshed matcher for type (updated) {}, {} : {}", regionName, + type.toString(), m); } - } - - // Match row key using the appropriate matcher - private TableTTLInfo match(byte[] rowkey, int offset, MatcherType matcherType) - throws SQLException { - Integer tableId = null; - TableTTLInfoCache tableTTLInfoCache = null; - RowKeyMatcher matcher = null; - - if (this.isSharedIndex && matcherType.compareTo(TENANT_INDEXES) == 0) { - currentTenantId = rowKeyParser.getTenantIdFromRowKey(rowkey, true); - if (Bytes.BYTES_COMPARATOR.compare( - Bytes.toBytes(currentTenantId), - Bytes.toBytes(lastTenantId)) != 0) { - refreshMatcher(TENANT_INDEXES); - } - matcher = this.tenantIndexMatcher; - tableTTLInfoCache = this.tenantIndexTTLCache; - } else if (this.isSharedIndex && - (matcherType.compareTo(GLOBAL_INDEXES) == 0)) { - matcher = this.globalIndexMatcher; - tableTTLInfoCache = this.globalIndexTTLCache; - } else if (this.isMultiTenant && - (matcherType.compareTo(TENANT_VIEWS) == 0)) { - // Check whether we need to retrieve the next batch of tenants - // If the current tenant from the row is greater than the last tenant row - // in the tenantViewTTLCache/tenantViewMatcher then refresh the cache. - currentTenantId = rowKeyParser.getTenantIdFromRowKey(rowkey); - if (((shouldBatchCatalogAccess) && (Bytes.BYTES_COMPARATOR.compare( - Bytes.toBytes(currentTenantId), - Bytes.toBytes(lastTenantId)) > 0)) - || ((!shouldBatchCatalogAccess) && (Bytes.BYTES_COMPARATOR.compare( - Bytes.toBytes(currentTenantId), - Bytes.toBytes(lastTenantId)) != 0))) { - refreshMatcher(TENANT_VIEWS); - } + } + }); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace( + "Refreshed matcher for type r={}, t={}:- " + "rs={}, re={}, s={}, e={}, c={}, l={}", + regionName, type, Bytes.toStringBinary(region.getRegionInfo().getStartKey()), + Bytes.toStringBinary(region.getRegionInfo().getEndKey()), startTenantId, endTenantId, + currentTenantId, lastTenantId); + } + } + } - matcher = this.tenantViewMatcher; - tableTTLInfoCache = this.tenantViewTTLCache; - } else if (matcherType.compareTo(GLOBAL_VIEWS) == 0) { - matcher = this.globalViewMatcher; - tableTTLInfoCache = this.globalViewTTLCache; - } else { - matcher = null; - tableTTLInfoCache = null; - } - tableId = matcher != null ? matcher.match(rowkey, offset) : null; - TableTTLInfo tableTTLInfo = tableTTLInfoCache != null ? - tableTTLInfoCache.getTableById(tableId) : null; + // Match row key using the appropriate matcher + private TableTTLInfo match(byte[] rowkey, int offset, MatcherType matcherType) + throws SQLException { + Integer tableId = null; + TableTTLInfoCache tableTTLInfoCache = null; + RowKeyMatcher matcher = null; + + if (this.isSharedIndex && matcherType.compareTo(TENANT_INDEXES) == 0) { + currentTenantId = rowKeyParser.getTenantIdFromRowKey(rowkey, true); + if ( + Bytes.BYTES_COMPARATOR.compare(Bytes.toBytes(currentTenantId), + Bytes.toBytes(lastTenantId)) != 0 + ) { + refreshMatcher(TENANT_INDEXES); + } + matcher = this.tenantIndexMatcher; + tableTTLInfoCache = this.tenantIndexTTLCache; + } else if (this.isSharedIndex && (matcherType.compareTo(GLOBAL_INDEXES) == 0)) { + matcher = this.globalIndexMatcher; + tableTTLInfoCache = this.globalIndexTTLCache; + } else if (this.isMultiTenant && (matcherType.compareTo(TENANT_VIEWS) == 0)) { + // Check whether we need to retrieve the next batch of tenants + // If the current tenant from the row is greater than the last tenant row + // in the tenantViewTTLCache/tenantViewMatcher then refresh the cache. + currentTenantId = rowKeyParser.getTenantIdFromRowKey(rowkey); + if ( + ((shouldBatchCatalogAccess) && (Bytes.BYTES_COMPARATOR + .compare(Bytes.toBytes(currentTenantId), Bytes.toBytes(lastTenantId)) > 0)) + || ((!shouldBatchCatalogAccess) && (Bytes.BYTES_COMPARATOR + .compare(Bytes.toBytes(currentTenantId), Bytes.toBytes(lastTenantId)) != 0)) + ) { + refreshMatcher(TENANT_VIEWS); + } + + matcher = this.tenantViewMatcher; + tableTTLInfoCache = this.tenantViewTTLCache; + } else if (matcherType.compareTo(GLOBAL_VIEWS) == 0) { + matcher = this.globalViewMatcher; + tableTTLInfoCache = this.globalViewTTLCache; + } else { + matcher = null; + tableTTLInfoCache = null; + } + tableId = matcher != null ? matcher.match(rowkey, offset) : null; + TableTTLInfo tableTTLInfo = + tableTTLInfoCache != null ? tableTTLInfoCache.getTableById(tableId) : null; + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace( + String.format("Matched matcher for type r=%s, t=%s, r=%s:- " + "s=%s, e=%s, c=%s, l=%s", + region.getRegionInfo().getEncodedName(), matcherType, Bytes.toStringBinary(rowkey), + startTenantId, endTenantId, currentTenantId, lastTenantId)); + } + + return tableTTLInfo; - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Matched matcher for type r=%s, t=%s, r=%s:- " + - "s=%s, e=%s, c=%s, l=%s", - region.getRegionInfo().getEncodedName(), - matcherType, - Bytes.toStringBinary(rowkey), - startTenantId, - endTenantId, - currentTenantId, - lastTenantId)); - } + } - return tableTTLInfo; + private List getMatchPatternsForGlobalPartitions(String physicalTableName, + Configuration configuration, boolean globalViews, boolean globalIndexes) throws SQLException { + List tableTTLInfoList = Lists.newArrayList(); + if (globalViews || globalIndexes) { + Set globalViewSet = getGlobalViews(physicalTableName, configuration); + if (globalViewSet.size() > 0) { + getTTLInfo(physicalTableName, globalViewSet, configuration, globalIndexes, + tableTTLInfoList); } + } + return tableTTLInfoList; + } - private List getMatchPatternsForGlobalPartitions(String physicalTableName, - Configuration configuration, boolean globalViews, boolean globalIndexes) - throws SQLException { + private List getMatchPatternsForTenantBatch(String physicalTableName, + Configuration configuration, String regionName, String startTenantId, int batchSize) + throws SQLException { - List tableTTLInfoList = Lists.newArrayList(); - if (globalViews || globalIndexes) { - Set globalViewSet = getGlobalViews(physicalTableName, configuration); - if (globalViewSet.size() > 0) { - getTTLInfo(physicalTableName, globalViewSet, configuration, globalIndexes, - tableTTLInfoList); - } - } - return tableTTLInfoList; - } + List tableTTLInfoList = Lists.newArrayList(); - private List getMatchPatternsForTenantBatch(String physicalTableName, - Configuration configuration, String regionName, - String startTenantId, int batchSize) throws SQLException { + // Batching is enabled only for TENANT_VIEWS. + Set tenantViewSet = + getNextTenantViews(physicalTableName, configuration, regionName, startTenantId, batchSize); + if (tenantViewSet.size() > 0) { + getTTLInfo(physicalTableName, tenantViewSet, configuration, false, tableTTLInfoList); + } + return tableTTLInfoList; + } - List tableTTLInfoList = Lists.newArrayList(); + private List getMatchPatternsForTenant(String physicalTableName, + Configuration configuration, boolean tenantViews, boolean tenantIndexes, String regionName, + String tenantId) throws SQLException { - // Batching is enabled only for TENANT_VIEWS. - Set tenantViewSet = getNextTenantViews(physicalTableName, configuration, - regionName, startTenantId, batchSize); - if (tenantViewSet.size() > 0) { - getTTLInfo(physicalTableName, tenantViewSet, configuration, - false, tableTTLInfoList); - } - return tableTTLInfoList; + List tableTTLInfoList = Lists.newArrayList(); + if (tenantViews || tenantIndexes) { + // Get all TENANT_VIEWS AND TENANT_INDEXES. + Set tenantViewSet = + getNextTenantViews(physicalTableName, configuration, regionName, tenantId, NO_BATCH); + if (tenantViewSet.size() > 0) { + getTTLInfo(physicalTableName, tenantViewSet, configuration, tenantIndexes, + tableTTLInfoList); } + } - private List getMatchPatternsForTenant(String physicalTableName, - Configuration configuration, boolean tenantViews, boolean tenantIndexes, - String regionName, String tenantId) throws SQLException { - - List tableTTLInfoList = Lists.newArrayList(); - if (tenantViews || tenantIndexes) { - // Get all TENANT_VIEWS AND TENANT_INDEXES. - Set tenantViewSet = getNextTenantViews(physicalTableName, configuration, - regionName, tenantId, NO_BATCH); - if (tenantViewSet.size() > 0) { - getTTLInfo(physicalTableName, tenantViewSet, configuration, - tenantIndexes, tableTTLInfoList); - } - } - - return tableTTLInfoList; - } + return tableTTLInfoList; + } - /** - * Get the ROW_KEY_MATCHER AND TTL field values for various view related entities - - * GLOBAL_VIEWS AND GLOBAL_INDEXES for a given HBase table (Physical Phoenix table) - * - * @param physicalTableName - * @param configuration - * @return - * @throws SQLException - */ - private Set getGlobalViews(String physicalTableName, Configuration configuration) - throws SQLException { - - Set globalViewSet = new HashSet<>(); - try (Connection serverConnection = QueryUtil.getConnectionOnServer(new Properties(), - configuration)) { - String - globalViewsSQLFormat = - "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, " + - "COLUMN_NAME AS PHYSICAL_TABLE_TENANT_ID, " + - "COLUMN_FAMILY AS PHYSICAL_TABLE_FULL_NAME " + - "FROM SYSTEM.CATALOG " + - "WHERE " + "LINK_TYPE = 2 " + - "AND TABLE_TYPE IS NULL " + - "AND COLUMN_FAMILY = '%s' " + - "AND TENANT_ID IS NULL"; - String globalViewSQL = String.format(globalViewsSQLFormat, physicalTableName); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("globalViewSQL: {}", globalViewSQL); - } + /** + * Get the ROW_KEY_MATCHER AND TTL field values for various view related entities - GLOBAL_VIEWS + * AND GLOBAL_INDEXES for a given HBase table (Physical Phoenix table) + */ + private Set getGlobalViews(String physicalTableName, Configuration configuration) + throws SQLException { + + Set globalViewSet = new HashSet<>(); + try (Connection serverConnection = + QueryUtil.getConnectionOnServer(new Properties(), configuration)) { + String globalViewsSQLFormat = "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, " + + "COLUMN_NAME AS PHYSICAL_TABLE_TENANT_ID, " + + "COLUMN_FAMILY AS PHYSICAL_TABLE_FULL_NAME " + "FROM SYSTEM.CATALOG " + "WHERE " + + "LINK_TYPE = 2 " + "AND TABLE_TYPE IS NULL " + "AND COLUMN_FAMILY = '%s' " + + "AND TENANT_ID IS NULL"; + String globalViewSQL = String.format(globalViewsSQLFormat, physicalTableName); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("globalViewSQL: {}", globalViewSQL); + } + + try (PhoenixPreparedStatement globalViewStmt = + serverConnection.prepareStatement(globalViewSQL).unwrap(PhoenixPreparedStatement.class)) { + try (ResultSet globalViewRS = globalViewStmt.executeQuery()) { + while (globalViewRS.next()) { + String tid = globalViewRS.getString("TENANT_ID"); + String schem = globalViewRS.getString("TABLE_SCHEM"); + String tName = globalViewRS.getString("TABLE_NAME"); + String tenantId = tid == null || tid.isEmpty() ? "NULL" : "'" + tid + "'"; + String schemCol = schem == null || schem.isEmpty() ? "NULL" : "'" + schem + "'"; + TableInfo tableInfo = + new TableInfo(tenantId.getBytes(), schemCol.getBytes(), tName.getBytes()); + globalViewSet.add(tableInfo); + } + } + } + } + return globalViewSet; + } - try (PhoenixPreparedStatement globalViewStmt = serverConnection.prepareStatement( - globalViewSQL).unwrap(PhoenixPreparedStatement.class)) { - try (ResultSet globalViewRS = globalViewStmt.executeQuery()) { - while (globalViewRS.next()) { - String tid = globalViewRS.getString("TENANT_ID"); - String schem = globalViewRS.getString("TABLE_SCHEM"); - String tName = globalViewRS.getString("TABLE_NAME"); - String tenantId = tid == null || tid.isEmpty() ? "NULL" : "'" + tid + "'"; - String - schemCol = - schem == null || schem.isEmpty() ? "NULL" : "'" + schem + "'"; - TableInfo - tableInfo = - new TableInfo(tenantId.getBytes(), schemCol.getBytes(), - tName.getBytes()); - globalViewSet.add(tableInfo); - } - } - } - } - return globalViewSet; - } + /** + * Get the ROW_KEY_MATCHER AND TTL field values for various view related entities - TENANT_VIEWS + * AND TENANT_INDEXES for a given HBase table (Physical Phoenix table) when batch <= 0 Get all + * the tenant views defined for a given tenant when batch > 0 Get the tenant views defined for + * tenants starting from passed in tenantId in a query more/batch style + */ + private Set getNextTenantViews(String physicalTableName, Configuration configuration, + String regionName, String fromTenantId, int batchSize) throws SQLException { + + Set tenantViewSet = new HashSet<>(); + try (Connection serverConnection = + QueryUtil.getConnectionOnServer(new Properties(), configuration)) { + String tenantViewsSQLFormat = + "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME," + "COLUMN_NAME AS PHYSICAL_TABLE_TENANT_ID, " + + "COLUMN_FAMILY AS PHYSICAL_TABLE_FULL_NAME " + "FROM SYSTEM.CATALOG " + + "WHERE LINK_TYPE = 2 " + "AND COLUMN_FAMILY = '%s' " + "AND TENANT_ID IS NOT NULL "; + if (batchSize <= 0) { + tenantViewsSQLFormat += + ((fromTenantId != null && fromTenantId.length() > 0) ? "AND TENANT_ID = ? " : ""); - /** - * Get the ROW_KEY_MATCHER AND TTL field values for various view related entities - - * TENANT_VIEWS AND TENANT_INDEXES for a given HBase table (Physical Phoenix table) - * - * when batch <= 0 - * Get all the tenant views defined for a given tenant - * when batch > 0 - * Get the tenant views defined for tenants starting from passed in tenantId - * in a query more/batch style - * - * @param physicalTableName - * @param configuration - * @param regionName - * @param fromTenantId - * @param batchSize - * @return - * @throws SQLException - */ - private Set getNextTenantViews( - String physicalTableName, - Configuration configuration, - String regionName, - String fromTenantId, - int batchSize - ) throws SQLException { - - Set tenantViewSet = new HashSet<>(); - try (Connection serverConnection = QueryUtil.getConnectionOnServer(new Properties(), - configuration)) { - String - tenantViewsSQLFormat = - "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME," + - "COLUMN_NAME AS PHYSICAL_TABLE_TENANT_ID, " + - "COLUMN_FAMILY AS PHYSICAL_TABLE_FULL_NAME " + - "FROM SYSTEM.CATALOG " + - "WHERE LINK_TYPE = 2 " + - "AND COLUMN_FAMILY = '%s' " + - "AND TENANT_ID IS NOT NULL "; - if (batchSize <= 0) { - tenantViewsSQLFormat += - ((fromTenantId != null && fromTenantId.length() > 0) - ? "AND TENANT_ID = ? " - : ""); - - } else { - tenantViewsSQLFormat += - ((fromTenantId != null && fromTenantId.length() > 0) - ? "AND TENANT_ID >= ? " + "LIMIT " + batchSize - : ""); - } - - String tenantViewSQL = String.format(tenantViewsSQLFormat, physicalTableName); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("tenantViewSQL " + - "region-name = %s, " + - "start-tenant-id = %s, " + - "batch = %d, " + - "sql = %s ", - regionName, - fromTenantId, - batchSize, - tenantViewSQL)); - } + } else { + tenantViewsSQLFormat += ((fromTenantId != null && fromTenantId.length() > 0) + ? "AND TENANT_ID >= ? " + "LIMIT " + batchSize + : ""); + } + + String tenantViewSQL = String.format(tenantViewsSQLFormat, physicalTableName); + if (LOGGER.isTraceEnabled()) { + LOGGER + .trace(String.format("tenantViewSQL " + "region-name = %s, " + "start-tenant-id = %s, " + + "batch = %d, " + "sql = %s ", regionName, fromTenantId, batchSize, tenantViewSQL)); + } + + try (PhoenixPreparedStatement tenantViewStmt = + serverConnection.prepareStatement(tenantViewSQL).unwrap(PhoenixPreparedStatement.class)) { + int paramPos = 1; + if (fromTenantId != null && fromTenantId.length() > 0) { + tenantViewStmt.setString(paramPos, fromTenantId); + } + try (ResultSet tenantViewRS = tenantViewStmt.executeQuery()) { + while (tenantViewRS.next()) { + String tid = tenantViewRS.getString("TENANT_ID"); + String schem = tenantViewRS.getString("TABLE_SCHEM"); + String tName = tenantViewRS.getString("TABLE_NAME"); + String tenantId = tid == null || tid.isEmpty() ? "NULL" : "'" + tid + "'"; + String schemCol = schem == null || schem.isEmpty() ? "NULL" : "'" + schem + "'"; + TableInfo tableInfo = + new TableInfo(tenantId.getBytes(), schemCol.getBytes(), tName.getBytes()); + lastTenantId = tid == null || tid.isEmpty() ? "" : tid; + tenantViewSet.add(tableInfo); + } + } + } + } + return tenantViewSet; + } - try (PhoenixPreparedStatement tenantViewStmt = serverConnection.prepareStatement( - tenantViewSQL).unwrap(PhoenixPreparedStatement.class)) { - int paramPos = 1; - if (fromTenantId != null && fromTenantId.length() > 0) { - tenantViewStmt.setString(paramPos, fromTenantId); + /** + * Get the view/shared-index details (TTL, ROW_KEY_MATCHER) for a given set of views + */ + private void getTTLInfo(String physicalTableName, Set viewSet, + Configuration configuration, boolean isSharedIndex, List tableTTLInfoList) + throws SQLException { + + if (viewSet.size() == 0) { + return; + } + String viewsClause = new StringBuilder(viewSet.stream() + .map((v) -> String.format("(%s, %s,'%s')", Bytes.toString(v.getTenantId()), + Bytes.toString(v.getSchemaName()), Bytes.toString(v.getTableName()))) + .collect(Collectors.joining(","))).toString(); + String viewsWithTTLSQL = "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, " + + "TTL, ROW_KEY_MATCHER " + "FROM SYSTEM.CATALOG " + "WHERE TABLE_TYPE = 'v' AND " + + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN " + "(" + viewsClause.toString() + ")"; + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("ViewsWithTTLSQL : %s", viewsWithTTLSQL)); + } + + try (Connection serverConnection = + QueryUtil.getConnectionOnServer(new Properties(), configuration)) { + + try (PhoenixPreparedStatement viewTTLStmt = serverConnection + .prepareStatement(viewsWithTTLSQL).unwrap(PhoenixPreparedStatement.class)) { + + try (ResultSet viewTTLRS = viewTTLStmt.executeQuery()) { + while (viewTTLRS.next()) { + String tid = viewTTLRS.getString("TENANT_ID"); + String schem = viewTTLRS.getString("TABLE_SCHEM"); + String tName = viewTTLRS.getString("TABLE_NAME"); + String viewTTLStr = viewTTLRS.getString("TTL"); + int viewTTL = viewTTLStr == null || viewTTLStr.isEmpty() + ? TTL_NOT_DEFINED + : Integer.valueOf(viewTTLStr); + byte[] rowKeyMatcher = viewTTLRS.getBytes("ROW_KEY_MATCHER"); + byte[] tenantIdBytes = + tid == null || tid.isEmpty() ? EMPTY_BYTE_ARRAY : tid.getBytes(); + + String fullTableName = SchemaUtil.getTableName(schem, tName); + Properties tenantProps = new Properties(); + if (tid != null) { + tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tid); + } + + if (isSharedIndex) { + try (Connection tableConnection = + QueryUtil.getConnectionOnServer(tenantProps, configuration)) { + + PTable pTable = PhoenixRuntime.getTableNoCache(tableConnection, fullTableName); + for (PTable index : pTable.getIndexes()) { + // Handling the case when it is a table level index. + // In those cases view-index-id = null + if (index.getViewIndexId() == null) { + continue; } - try (ResultSet tenantViewRS = tenantViewStmt.executeQuery()) { - while (tenantViewRS.next()) { - String tid = tenantViewRS.getString("TENANT_ID"); - String schem = tenantViewRS.getString("TABLE_SCHEM"); - String tName = tenantViewRS.getString("TABLE_NAME"); - String tenantId = tid == null || tid.isEmpty() ? "NULL" : "'" + tid + "'"; - String schemCol = schem == null || schem.isEmpty() - ? "NULL" : "'" + schem + "'"; - TableInfo - tableInfo = - new TableInfo(tenantId.getBytes(), schemCol.getBytes(), - tName.getBytes()); - lastTenantId = tid == null || tid.isEmpty() ? "" : tid; - tenantViewSet.add(tableInfo); - } + PDataType viewIndexIdType = index.getviewIndexIdType(); + byte[] viewIndexIdBytes = PSmallint.INSTANCE.toBytes(index.getViewIndexId()); + if (viewIndexIdType.compareTo(PLong.INSTANCE) == 0) { + viewIndexIdBytes = PLong.INSTANCE.toBytes(index.getViewIndexId()); } + tableTTLInfoList + .add(new TableTTLInfo(pTable.getPhysicalName().getBytes(), tenantIdBytes, + index.getTableName().getBytes(), viewIndexIdBytes, index.getTTL())); + } + } + } else { + tableTTLInfoList.add(new TableTTLInfo(physicalTableName.getBytes(), tenantIdBytes, + fullTableName.getBytes(), rowKeyMatcher, viewTTL)); + } } - return tenantViewSet; + } } + } + } - /** - * Get the view/shared-index details (TTL, ROW_KEY_MATCHER) for a given set of views - * @param physicalTableName - * @param viewSet - * @param configuration - * @param isSharedIndex - * @param tableTTLInfoList - * @throws SQLException - */ - private void getTTLInfo(String physicalTableName, - Set viewSet, Configuration configuration, - boolean isSharedIndex, List tableTTLInfoList) - throws SQLException { - - if (viewSet.size() == 0) { - return; - } - String - viewsClause = - new StringBuilder(viewSet.stream() - .map((v) -> String.format("(%s, %s,'%s')", - Bytes.toString(v.getTenantId()), - Bytes.toString(v.getSchemaName()), - Bytes.toString(v.getTableName()))) - .collect(Collectors.joining(","))).toString(); - String - viewsWithTTLSQL = - "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, " + - "TTL, ROW_KEY_MATCHER " + - "FROM SYSTEM.CATALOG " + - "WHERE TABLE_TYPE = 'v' AND " + - "(TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN " + - "(" + viewsClause.toString() + ")"; - if (LOGGER.isTraceEnabled()) { - LOGGER.trace( - String.format("ViewsWithTTLSQL : %s", viewsWithTTLSQL)); - } + public boolean isSharedIndex() { + return isSharedIndex; + } - try (Connection serverConnection = QueryUtil.getConnectionOnServer(new Properties(), - configuration)) { - - try ( - PhoenixPreparedStatement - viewTTLStmt = - serverConnection.prepareStatement(viewsWithTTLSQL) - .unwrap(PhoenixPreparedStatement.class)) { - - try (ResultSet viewTTLRS = viewTTLStmt.executeQuery()) { - while (viewTTLRS.next()) { - String tid = viewTTLRS.getString("TENANT_ID"); - String schem = viewTTLRS.getString("TABLE_SCHEM"); - String tName = viewTTLRS.getString("TABLE_NAME"); - String viewTTLStr = viewTTLRS.getString("TTL"); - int viewTTL = viewTTLStr == null || viewTTLStr.isEmpty() ? - TTL_NOT_DEFINED : Integer.valueOf(viewTTLStr); - byte[] rowKeyMatcher = viewTTLRS.getBytes("ROW_KEY_MATCHER"); - byte[] - tenantIdBytes = - tid == null || tid.isEmpty() ? EMPTY_BYTE_ARRAY : tid.getBytes(); - - String fullTableName = SchemaUtil.getTableName(schem, tName); - Properties tenantProps = new Properties(); - if (tid != null) { - tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tid); - } - - if (isSharedIndex) { - try (Connection - tableConnection = - QueryUtil.getConnectionOnServer(tenantProps, configuration)) { - - PTable - pTable = - PhoenixRuntime.getTableNoCache( - tableConnection, fullTableName); - for (PTable index : pTable.getIndexes()) { - // Handling the case when it is a table level index. - // In those cases view-index-id = null - if (index.getViewIndexId() == null) { - continue; - } - PDataType viewIndexIdType = index.getviewIndexIdType(); - byte[] - viewIndexIdBytes = - PSmallint.INSTANCE.toBytes(index.getViewIndexId()); - if (viewIndexIdType.compareTo(PLong.INSTANCE) == 0) { - viewIndexIdBytes = - PLong.INSTANCE.toBytes(index.getViewIndexId()); - } - tableTTLInfoList.add( - new TableTTLInfo(pTable.getPhysicalName().getBytes(), - tenantIdBytes, index.getTableName().getBytes(), - viewIndexIdBytes, index.getTTL())); - } - - } - } else { - tableTTLInfoList.add( - new TableTTLInfo(physicalTableName.getBytes(), - tenantIdBytes, fullTableName.getBytes(), - rowKeyMatcher, viewTTL)); - } - } - } - } - } - } + public boolean isMultiTenant() { + return isMultiTenant; + } + public boolean isSalted() { + return isSalted; + } - public boolean isSharedIndex() { - return isSharedIndex; - } + public RowKeyParser getRowKeyParser() { + return rowKeyParser; + } - public boolean isMultiTenant() { - return isMultiTenant; - } + public PTable getBaseTable() { + return baseTable; + } - public boolean isSalted() { - return isSalted; - } + public RowKeyMatcher getGlobalViewMatcher() { + return globalViewMatcher; + } - public RowKeyParser getRowKeyParser() { - return rowKeyParser; - } + public RowKeyMatcher getTenantViewMatcher() { + return tenantViewMatcher; + } - public PTable getBaseTable() { - return baseTable; - } + public RowKeyMatcher getGlobalIndexMatcher() { + return globalIndexMatcher; + } - public RowKeyMatcher getGlobalViewMatcher() { - return globalViewMatcher; - } + public RowKeyMatcher getTenantIndexMatcher() { + return tenantIndexMatcher; + } - public RowKeyMatcher getTenantViewMatcher() { - return tenantViewMatcher; - } + public TableTTLInfoCache getGlobalViewTTLCache() { + return globalViewTTLCache; + } - public RowKeyMatcher getGlobalIndexMatcher() { - return globalIndexMatcher; - } + public TableTTLInfoCache getTenantViewTTLCache() { + return tenantViewTTLCache; + } - public RowKeyMatcher getTenantIndexMatcher() { - return tenantIndexMatcher; - } + public TableTTLInfoCache getGlobalIndexTTLCache() { + return globalIndexTTLCache; + } + public TableTTLInfoCache getTenantIndexTTLCache() { + return tenantIndexTTLCache; + } - public TableTTLInfoCache getGlobalViewTTLCache() { - return globalViewTTLCache; - } + public int getNumGlobalEntries() { + return globalViewMatcher == null ? 0 : globalViewMatcher.getNumEntries(); + } - public TableTTLInfoCache getTenantViewTTLCache() { - return tenantViewTTLCache; - } + public int getNumTenantEntries() { + return tenantViewMatcher == null ? 0 : tenantViewMatcher.getNumEntries(); + } - public TableTTLInfoCache getGlobalIndexTTLCache() { - return globalIndexTTLCache; - } + public int getNumGlobalIndexEntries() { + return globalIndexMatcher == null ? 0 : globalIndexMatcher.getNumEntries(); + } - public TableTTLInfoCache getTenantIndexTTLCache() { - return tenantIndexTTLCache; - } + public int getNumTenantIndexEntries() { + return tenantIndexMatcher == null ? 0 : tenantIndexMatcher.getNumEntries(); + } - public int getNumGlobalEntries() { - return globalViewMatcher == null ? 0 : globalViewMatcher.getNumEntries(); - } + public int getNumTablesInGlobalCache() { + return globalViewTTLCache == null ? 0 : globalViewTTLCache.getNumTablesInCache(); + } - public int getNumTenantEntries() { - return tenantViewMatcher == null ? 0 : tenantViewMatcher.getNumEntries(); - } + public int getNumTablesInTenantCache() { + return tenantViewTTLCache == null ? 0 : tenantViewTTLCache.getNumTablesInCache(); + } - public int getNumGlobalIndexEntries() { - return globalIndexMatcher == null ? 0 : globalIndexMatcher.getNumEntries(); - } + public int getNumTablesInGlobalIndexCache() { + return globalIndexTTLCache == null ? 0 : globalIndexTTLCache.getNumTablesInCache(); + } - public int getNumTenantIndexEntries() { - return tenantIndexMatcher == null ? 0 : tenantIndexMatcher.getNumEntries(); - } + public int getNumTablesInTenantIndexCache() { + return tenantIndexTTLCache == null ? 0 : tenantIndexTTLCache.getNumTablesInCache(); + } - public int getNumTablesInGlobalCache() { - return globalViewTTLCache == null ? 0 : globalViewTTLCache.getNumTablesInCache(); - } + public int getNumTablesInCache() { + int totalNumTables = 0; + totalNumTables += globalViewTTLCache == null ? 0 : globalViewTTLCache.getNumTablesInCache(); + totalNumTables += tenantViewTTLCache == null ? 0 : tenantViewTTLCache.getNumTablesInCache(); + totalNumTables += globalIndexTTLCache == null ? 0 : globalIndexTTLCache.getNumTablesInCache(); + totalNumTables += tenantIndexTTLCache == null ? 0 : tenantIndexTTLCache.getNumTablesInCache(); + return totalNumTables; + } - public int getNumTablesInTenantCache() { - return tenantViewTTLCache == null ? 0 : tenantViewTTLCache.getNumTablesInCache(); - } + } - public int getNumTablesInGlobalIndexCache() { - return globalIndexTTLCache == null ? 0 : globalIndexTTLCache.getNumTablesInCache(); - } + /** + * The implementation classes will track TTL for various Phoenix Objects. Tables - Partitioned + * (HBase Tables with Views and View-Indexes) and Non-Partitioned (Simple HBase Tables And + * Indexes) For Flushes and Minor compaction we do not need to track the TTL. + */ + private interface TTLTracker { + // Set the TTL for the given row in the row-context being tracked. + void setTTL(Cell firstCell) throws IOException; - public int getNumTablesInTenantIndexCache() { - return tenantIndexTTLCache == null ? 0 : tenantIndexTTLCache.getNumTablesInCache(); - } + // get the row context for the current row. + RowContext getRowContext(); - public int getNumTablesInCache() { - int totalNumTables = 0; - totalNumTables += - globalViewTTLCache == null ? 0 : globalViewTTLCache.getNumTablesInCache(); - totalNumTables += - tenantViewTTLCache == null ? 0 : tenantViewTTLCache.getNumTablesInCache(); - totalNumTables += - globalIndexTTLCache == null ? 0 : globalIndexTTLCache.getNumTablesInCache(); - totalNumTables += - tenantIndexTTLCache == null ? 0 : tenantIndexTTLCache.getNumTablesInCache(); - return totalNumTables; - } + // set the row context for the current row. + void setRowContext(RowContext rowContext); + } - } + /** + * This tracker will be used for memstore flushes and minor compaction where we do not need to + * track the TTL. + */ + private class TableTTLTrackerForFlushesAndMinor implements TTLTracker { - /** - * The implementation classes will track TTL for various Phoenix Objects. - * Tables - Partitioned (HBase Tables with Views and View-Indexes) - * and Non-Partitioned (Simple HBase Tables And Indexes) - * For Flushes and Minor compaction we do not need to track the TTL. - */ - private interface TTLTracker { - // Set the TTL for the given row in the row-context being tracked. - void setTTL(Cell firstCell) throws IOException; - // get the row context for the current row. - RowContext getRowContext(); - // set the row context for the current row. - void setRowContext(RowContext rowContext); + private long ttl; + private RowContext rowContext; + + public TableTTLTrackerForFlushesAndMinor(String tableName) { + + ttl = DEFAULT_TTL; + LOGGER.info( + String.format("TableTTLTrackerForFlushesAndMinor params:- " + "(table-name=%s, ttl=%d)", + tableName, ttl * 1000)); } - /** - * This tracker will be used for memstore flushes and minor compaction where we do not need to - * track the TTL. - */ - private class TableTTLTrackerForFlushesAndMinor implements TTLTracker { + @Override + public void setTTL(Cell firstCell) { + if (this.rowContext == null) { + this.rowContext = new RowContext(); + } + this.rowContext.setTTL(ttl); - private long ttl; - private RowContext rowContext; + } - public TableTTLTrackerForFlushesAndMinor(String tableName) { + @Override + public RowContext getRowContext() { + if (this.rowContext == null) { + this.rowContext = new RowContext(); + this.rowContext.setTTL(ttl); + } + return rowContext; + } - ttl = DEFAULT_TTL; - LOGGER.info(String.format( - "TableTTLTrackerForFlushesAndMinor params:- " + - "(table-name=%s, ttl=%d)", - tableName, ttl*1000)); - } + @Override + public void setRowContext(RowContext rowContext) { + this.rowContext = rowContext; + this.rowContext.setTTL(ttl); + } + } - @Override - public void setTTL(Cell firstCell) { - if (this.rowContext == null) { - this.rowContext = new RowContext(); - } - this.rowContext.setTTL(ttl); + private class NonPartitionedTableTTLTracker implements TTLTracker { - } + private long ttl; + private RowContext rowContext; - @Override - public RowContext getRowContext() { - if (this.rowContext == null) { - this.rowContext = new RowContext(); - this.rowContext.setTTL(ttl); - } - return rowContext; - } + public NonPartitionedTableTTLTracker(PTable pTable, Store store) { - @Override - public void setRowContext(RowContext rowContext) { - this.rowContext = rowContext; - this.rowContext.setTTL(ttl); - } + boolean isSystemTable = pTable.getType() == PTableType.SYSTEM; + if (isSystemTable) { + ColumnFamilyDescriptor cfd = store.getColumnFamilyDescriptor(); + ttl = cfd.getTimeToLive(); + } else { + ttl = pTable.getTTL() != TTL_NOT_DEFINED ? pTable.getTTL() : DEFAULT_TTL; + } + LOGGER.info(String.format( + "NonPartitionedTableTTLTracker params:- " + "(physical-name=%s, ttl=%d, isSystemTable=%s)", + pTable.getName().toString(), ttl * 1000, isSystemTable)); } - private class NonPartitionedTableTTLTracker implements TTLTracker { + @Override + public void setTTL(Cell firstCell) { + if (this.rowContext == null) { + this.rowContext = new RowContext(); + } + this.rowContext.setTTL(ttl); - private long ttl; - private RowContext rowContext; + } - public NonPartitionedTableTTLTracker( - PTable pTable, - Store store) { + @Override + public RowContext getRowContext() { + if (this.rowContext == null) { + this.rowContext = new RowContext(); + this.rowContext.setTTL(ttl); + } + return rowContext; + } - boolean isSystemTable = pTable.getType() == PTableType.SYSTEM; - if (isSystemTable) { - ColumnFamilyDescriptor cfd = store.getColumnFamilyDescriptor(); - ttl = cfd.getTimeToLive(); - } else { - ttl = pTable.getTTL() != TTL_NOT_DEFINED ? pTable.getTTL() : DEFAULT_TTL; - } - LOGGER.info(String.format( - "NonPartitionedTableTTLTracker params:- " + - "(physical-name=%s, ttl=%d, isSystemTable=%s)", - pTable.getName().toString(), ttl*1000, isSystemTable)); + @Override + public void setRowContext(RowContext rowContext) { + this.rowContext = rowContext; + this.rowContext.setTTL(ttl); + } + } + + private class PartitionedTableTTLTracker implements TTLTracker { + private final Logger LOGGER = LoggerFactory.getLogger(PartitionedTableTTLTracker.class); + + // Default or Table-Level TTL + private long ttl; + private RowContext rowContext; + + private boolean isSharedIndex = false; + private boolean isMultiTenant = false; + private boolean isSalted = false; + private boolean isLongViewIndexEnabled = false; + private int startingPKPosition; + private PartitionedTableRowKeyMatcher tableRowKeyMatcher; + + public PartitionedTableTTLTracker(PTable table, boolean isSalted, boolean isSharedIndex, + boolean isLongViewIndexEnabled, int viewTTLTenantViewsPerScanLimit) throws IOException { + + try { + // Initialize the various matcher indexes + this.tableRowKeyMatcher = new PartitionedTableRowKeyMatcher(table, isSalted, isSharedIndex, + isLongViewIndexEnabled, viewTTLTenantViewsPerScanLimit); + this.ttl = table.getTTL() != TTL_NOT_DEFINED ? table.getTTL() : DEFAULT_TTL; + this.isSharedIndex = isSharedIndex || localIndex; + this.isLongViewIndexEnabled = isLongViewIndexEnabled; + this.isSalted = isSalted; + this.isMultiTenant = table.isMultiTenant(); + + this.startingPKPosition = getStartingPKPosition(); + ; + LOGGER.info(String.format( + "PartitionedTableTTLTracker params:- " + "region-name = %s, table-name = %s, " + + "multi-tenant = %s, shared-index = %s, salted = %s, " + + "default-ttl = %d, startingPKPosition = %d", + region.getRegionInfo().getEncodedName(), + region.getRegionInfo().getTable().getNameAsString(), this.isMultiTenant, + this.isSharedIndex, this.isSalted, this.ttl, this.startingPKPosition)); + + } catch (SQLException e) { + LOGGER.error(String.format("Failed to read from catalog: " + e.getMessage())); + throw new IOException(e); + } finally { + if (tableRowKeyMatcher != null) { + LOGGER.info(String.format( + "PartitionedTableTTLTracker stats " + "(index-entries, table-entries) for region = %s:-" + + "global-views = %d, %d, " + "tenant-views = %d, %d, " + "global-indexes = %d, %d " + + "tenant-indexes = %d, %d ", + region.getRegionInfo().getEncodedName(), tableRowKeyMatcher.getNumGlobalEntries(), + tableRowKeyMatcher.getNumTablesInGlobalCache(), + tableRowKeyMatcher.getNumTenantEntries(), + tableRowKeyMatcher.getNumTablesInTenantCache(), + tableRowKeyMatcher.getNumGlobalIndexEntries(), + tableRowKeyMatcher.getNumTablesInGlobalIndexCache(), + tableRowKeyMatcher.getNumTenantIndexEntries(), + tableRowKeyMatcher.getNumTablesInTenantIndexCache())); + } else { + LOGGER.error(String.format("Failed to initialize: tableRowKeyMatcher is null")); } + } + } - @Override - public void setTTL(Cell firstCell) { - if (this.rowContext == null) { - this.rowContext = new RowContext(); - } - this.rowContext.setTTL(ttl); + private int getStartingPKPosition() { + int startingPKPosition = 0; + if (this.isMultiTenant && this.isSalted && this.isSharedIndex) { + // case multi-tenanted, salted, is a shared-index => + // startingPKPosition = 1 skip the salt-byte and starting at the viewIndexId + startingPKPosition = 1; + } else if (this.isMultiTenant && this.isSalted && !this.isSharedIndex) { + // case multi-tenanted, salted, not a shared-index => + // startingPKPosition = 2 skip salt byte + tenant-id to search the global space + // if above search returned no results + // then search using the following start position + // startingPKPosition = 1 skip salt-byte to search the tenant space + startingPKPosition = 2; + } else if (this.isMultiTenant && !this.isSalted && this.isSharedIndex) { + // case multi-tenanted, not-salted, is a shared-index => + // startingPKPosition = 0, the first key will the viewIndexId + startingPKPosition = 0; + } else if (this.isMultiTenant && !this.isSalted && !this.isSharedIndex) { + // case multi-tenanted, not-salted, not a shared-index => + // startingPKPosition = 1 skip tenant-id to search the global space + // if above search returned no results + // then search using the following start position + // startingPKPosition = 0 to search the tenant space + startingPKPosition = 1; + } else if (!this.isMultiTenant && this.isSalted && this.isSharedIndex) { + // case non-multi-tenanted, salted, shared-index => + // startingPKPosition = 1 skip salt-byte search using the viewIndexId + startingPKPosition = 1; + } else if (!this.isMultiTenant && this.isSalted && !this.isSharedIndex) { + // case non-multi-tenanted, salted, not a shared-index => + // start at the global pk position after skipping the salt byte + // startingPKPosition = 1 skip salt-byte + startingPKPosition = 1; + } else if (!this.isMultiTenant && !this.isSalted && this.isSharedIndex) { + // case non-multi-tenanted, not-salted, is a shared-index => + // startingPKPosition = 0 the first key will the viewIndexId + startingPKPosition = 0; + } else { + // case non-multi-tenanted, not-salted, not a view-index-table => + // startingPKPosition = 0 + startingPKPosition = 0; + } + return startingPKPosition; + } - } + @Override + public void setTTL(Cell firstCell) throws IOException { + + boolean matched = false; + TableTTLInfo tableTTLInfo = null; + List pkPositions = null; + long rowTTLInSecs = ttl; + long matchedOffset = -1; + int pkPosition = startingPKPosition; + MatcherType matchedType = null; + try { + // pkPositions holds the byte offsets for the PKs of the base table + // for the current row + pkPositions = isSharedIndex + ? (isSalted ? Arrays.asList(0, 1) : Arrays.asList(0)) + : tableRowKeyMatcher.getRowKeyParser().parsePKPositions(firstCell); + // The startingPKPosition was initialized in the following manner => + // see getStartingPKPosition() + // case multi-tenant, salted, is-shared-index => startingPKPosition = 1 + // case multi-tenant, salted, not-shared-index => startingPKPosition = 2 + // case multi-tenant, not-salted, is-shared-index => startingPKPosition = 0 + // case multi-tenant, not-salted, not-shared-index => startingPKPosition = 1 + // case non-multi-tenant, salted, is-shared-index => startingPKPosition = 1 + // case non-multi-tenant, salted, not-shared-index => startingPKPosition = 1 + // case non-multi-tenant, not-salted, is-shared-index => startingPKPosition = 0 + // case non-multi-tenant, not-salted, not-shared-index => startingPKPosition = 0 + int offset = pkPositions.get(pkPosition); + byte[] rowKey = CellUtil.cloneRow(firstCell); + Integer tableId = null; + // Search using the starting offset (startingPKPosition offset) + if (isSharedIndex) { + // case index table, first check the global indexes + matchedType = GLOBAL_INDEXES; + tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, GLOBAL_INDEXES); + if (tableTTLInfo == null) { + matchedType = TENANT_INDEXES; + tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, TENANT_INDEXES); + } + + } else if (isMultiTenant) { + // case multi-tenant, non-index tables, global space + matchedType = GLOBAL_VIEWS; + tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, GLOBAL_VIEWS); + if (tableTTLInfo == null) { + // search returned no results, determine the new pkPosition(offset) to use + // Search using the new offset + pkPosition = this.isSalted ? 1 : 0; + offset = pkPositions.get(pkPosition); + // case multi-tenant, non-index tables, tenant space + matchedType = TENANT_VIEWS; + tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, TENANT_VIEWS); + } + } else { + // case non-multi-tenant and non-index tables, global space + matchedType = GLOBAL_VIEWS; + tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, GLOBAL_VIEWS); + } + matched = tableTTLInfo != null; + matchedOffset = matched ? offset : -1; + rowTTLInSecs = matched ? tableTTLInfo.getTTL() : ttl; /* in secs */ + if (this.rowContext == null) { + this.rowContext = new RowContext(); + } + this.rowContext.setTTL(rowTTLInSecs); + } catch (SQLException e) { + LOGGER.error(String.format("Exception when visiting table: " + e.getMessage())); + throw new IOException(e); + } finally { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format( + "visiting row-key = %s, region = %s, " + "table-ttl-info=%s, " + + "matched = %s, matched-type = %s, match-pattern = %s, " + + "ttl = %d, matched-offset = %d, " + "pk-pos = %d, pk-pos-list = %s", + CellUtil.getCellKeyAsString(firstCell), + CompactionScanner.this.store.getRegionInfo().getEncodedName(), + matched ? tableTTLInfo : "NULL", matched, matchedType, + matched ? Bytes.toStringBinary(tableTTLInfo.getMatchPattern()) : "NULL", rowTTLInSecs, + matchedOffset, pkPosition, + pkPositions != null + ? pkPositions.stream().map((p) -> String.valueOf(p)).collect(Collectors.joining(",")) + : "")); + } + } - @Override - public RowContext getRowContext() { - if (this.rowContext == null) { - this.rowContext = new RowContext(); - this.rowContext.setTTL(ttl); - } - return rowContext; - } + } - @Override - public void setRowContext(RowContext rowContext) { - this.rowContext = rowContext; - this.rowContext.setTTL(ttl); - } + @Override + public RowContext getRowContext() { + if (this.rowContext == null) { + this.rowContext = new RowContext(); + this.rowContext.setTTL(ttl); + } + return rowContext; } - private class PartitionedTableTTLTracker implements TTLTracker { - private final Logger LOGGER = LoggerFactory.getLogger( - PartitionedTableTTLTracker.class); - - // Default or Table-Level TTL - private long ttl; - private RowContext rowContext; - - private boolean isSharedIndex = false; - private boolean isMultiTenant = false; - private boolean isSalted = false; - private boolean isLongViewIndexEnabled = false; - private int startingPKPosition; - private PartitionedTableRowKeyMatcher tableRowKeyMatcher; - - public PartitionedTableTTLTracker( - PTable table, - boolean isSalted, - boolean isSharedIndex, - boolean isLongViewIndexEnabled, - int viewTTLTenantViewsPerScanLimit - ) throws IOException { - - try { - // Initialize the various matcher indexes - this.tableRowKeyMatcher = - new PartitionedTableRowKeyMatcher(table, isSalted, isSharedIndex, - isLongViewIndexEnabled, viewTTLTenantViewsPerScanLimit); - this.ttl = table.getTTL() != TTL_NOT_DEFINED ? table.getTTL() : DEFAULT_TTL; - this.isSharedIndex = isSharedIndex || localIndex; - this.isLongViewIndexEnabled = isLongViewIndexEnabled; - this.isSalted = isSalted; - this.isMultiTenant = table.isMultiTenant(); - - this.startingPKPosition = getStartingPKPosition();; - LOGGER.info(String.format( - "PartitionedTableTTLTracker params:- " + - "region-name = %s, table-name = %s, " + - "multi-tenant = %s, shared-index = %s, salted = %s, " + - "default-ttl = %d, startingPKPosition = %d", - region.getRegionInfo().getEncodedName(), - region.getRegionInfo().getTable().getNameAsString(), this.isMultiTenant, - this.isSharedIndex, this.isSalted, this.ttl, this.startingPKPosition)); - - } catch (SQLException e) { - LOGGER.error(String.format("Failed to read from catalog: " + e.getMessage())); - throw new IOException(e); - } finally { - if (tableRowKeyMatcher != null) { - LOGGER.info(String.format( - "PartitionedTableTTLTracker stats " + - "(index-entries, table-entries) for region = %s:-" + - "global-views = %d, %d, " + - "tenant-views = %d, %d, " + - "global-indexes = %d, %d " + - "tenant-indexes = %d, %d ", - region.getRegionInfo().getEncodedName(), - tableRowKeyMatcher.getNumGlobalEntries(), - tableRowKeyMatcher.getNumTablesInGlobalCache(), - tableRowKeyMatcher.getNumTenantEntries(), - tableRowKeyMatcher.getNumTablesInTenantCache(), - tableRowKeyMatcher.getNumGlobalIndexEntries(), - tableRowKeyMatcher.getNumTablesInGlobalIndexCache(), - tableRowKeyMatcher.getNumTenantIndexEntries(), - tableRowKeyMatcher.getNumTablesInTenantIndexCache())); - } else { - LOGGER.error(String.format("Failed to initialize: tableRowKeyMatcher is null")); - } - } - } + @Override + public void setRowContext(RowContext rowContext) { + this.rowContext = rowContext; + this.rowContext.setTTL(ttl); + } + } + + public class RowKeyParser { + private final RowKeyColumnExpression[] baseTableColExprs; + private final List baseTablePKColumns; + private final PColumn[] sharedIndexPKColumns; + private final boolean isSalted; + private final boolean isLongViewIndexEnabled; + private final boolean isMultiTenant; + private final PDataType tenantDataType; + + public RowKeyParser(PTable table, boolean isLongViewIndexEnabled) { + this.isLongViewIndexEnabled = isLongViewIndexEnabled; + isSalted = table.getBucketNum() != null; + isMultiTenant = table.isMultiTenant(); + + // Get the TENANT_ID data type, this will be used to determine if the queries to + // SYSCAT will be batched. + tenantDataType = table.getRowKeySchema().getField(isSalted ? 1 : 0).getDataType(); + + // Initialize the ColumnExpressions for the base table PK Columns + baseTablePKColumns = table.getPKColumns(); + baseTableColExprs = new RowKeyColumnExpression[baseTablePKColumns.size()]; + int saltPos = isSalted ? 0 : -1; + for (int i = 0; i < baseTablePKColumns.size(); i++) { + PColumn column = baseTablePKColumns.get(i); + baseTableColExprs[i] = + new RowKeyColumnExpression(column, new RowKeyValueAccessor(baseTablePKColumns, i)); + } + + // Initialize the shared index PK columns to be used in getTenantIdFromRowKey() + // to create a RowKeyColumnExpression for tenantId parsing. + // position 0 : salt byte if salted else index_id + // position 1 : index_id if salted else tenant_id + // position 2 : tenant_id if salted and multi-tenanted else empty + sharedIndexPKColumns = new PColumn[3]; + if (saltPos == 0) { + sharedIndexPKColumns[saltPos] = baseTablePKColumns.get(saltPos); + } + final int tenantPos = isMultiTenant ? (saltPos + 1) : -1; + if ((tenantPos == 0) || (tenantPos == 1)) { + sharedIndexPKColumns[tenantPos] = new PColumn() { + + @Override + public PName getName() { + return PNameFactory.newName("_INDEX_ID"); + } + + @Override + public PName getFamilyName() { + return null; + } + + @Override + public int getPosition() { + return tenantPos; + } + + @Override + public Integer getArraySize() { + return 0; + } + + @Override + public byte[] getViewConstant() { + return new byte[0]; + } + + @Override + public boolean isViewReferenced() { + return false; + } - private int getStartingPKPosition() { - int startingPKPosition = 0; - if (this.isMultiTenant && this.isSalted && this.isSharedIndex) { - // case multi-tenanted, salted, is a shared-index => - // startingPKPosition = 1 skip the salt-byte and starting at the viewIndexId - startingPKPosition = 1; - } else if (this.isMultiTenant && this.isSalted && !this.isSharedIndex) { - // case multi-tenanted, salted, not a shared-index => - // startingPKPosition = 2 skip salt byte + tenant-id to search the global space - // if above search returned no results - // then search using the following start position - // startingPKPosition = 1 skip salt-byte to search the tenant space - startingPKPosition = 2; - } else if (this.isMultiTenant && !this.isSalted && this.isSharedIndex) { - // case multi-tenanted, not-salted, is a shared-index => - // startingPKPosition = 0, the first key will the viewIndexId - startingPKPosition = 0; - } else if (this.isMultiTenant && !this.isSalted && !this.isSharedIndex) { - // case multi-tenanted, not-salted, not a shared-index => - // startingPKPosition = 1 skip tenant-id to search the global space - // if above search returned no results - // then search using the following start position - // startingPKPosition = 0 to search the tenant space - startingPKPosition = 1; - } else if (!this.isMultiTenant && this.isSalted && this.isSharedIndex) { - // case non-multi-tenanted, salted, shared-index => - // startingPKPosition = 1 skip salt-byte search using the viewIndexId - startingPKPosition = 1; - } else if (!this.isMultiTenant && this.isSalted && !this.isSharedIndex) { - // case non-multi-tenanted, salted, not a shared-index => - // start at the global pk position after skipping the salt byte - // startingPKPosition = 1 skip salt-byte - startingPKPosition = 1; - } else if (!this.isMultiTenant && !this.isSalted && this.isSharedIndex) { - // case non-multi-tenanted, not-salted, is a shared-index => - // startingPKPosition = 0 the first key will the viewIndexId - startingPKPosition = 0; - } else { - // case non-multi-tenanted, not-salted, not a view-index-table => - // startingPKPosition = 0 - startingPKPosition = 0; - } - return startingPKPosition; - } + @Override + public int getEstimatedSize() { + return 0; + } - @Override - public void setTTL(Cell firstCell) throws IOException { - - boolean matched = false; - TableTTLInfo tableTTLInfo = null; - List pkPositions = null; - long rowTTLInSecs = ttl; - long matchedOffset = -1; - int pkPosition = startingPKPosition; - MatcherType matchedType = null; - try { - // pkPositions holds the byte offsets for the PKs of the base table - // for the current row - pkPositions = isSharedIndex ? - (isSalted ? - Arrays.asList(0, 1) : - Arrays.asList(0)) : - tableRowKeyMatcher.getRowKeyParser().parsePKPositions(firstCell); - // The startingPKPosition was initialized in the following manner => - // see getStartingPKPosition() - // case multi-tenant, salted, is-shared-index => startingPKPosition = 1 - // case multi-tenant, salted, not-shared-index => startingPKPosition = 2 - // case multi-tenant, not-salted, is-shared-index => startingPKPosition = 0 - // case multi-tenant, not-salted, not-shared-index => startingPKPosition = 1 - // case non-multi-tenant, salted, is-shared-index => startingPKPosition = 1 - // case non-multi-tenant, salted, not-shared-index => startingPKPosition = 1 - // case non-multi-tenant, not-salted, is-shared-index => startingPKPosition = 0 - // case non-multi-tenant, not-salted, not-shared-index => startingPKPosition = 0 - int offset = pkPositions.get(pkPosition); - byte[] rowKey = CellUtil.cloneRow(firstCell); - Integer tableId = null; - // Search using the starting offset (startingPKPosition offset) - if (isSharedIndex) { - // case index table, first check the global indexes - matchedType = GLOBAL_INDEXES; - tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, GLOBAL_INDEXES); - if (tableTTLInfo == null) { - matchedType = TENANT_INDEXES; - tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, TENANT_INDEXES); - } - - } else if (isMultiTenant) { - // case multi-tenant, non-index tables, global space - matchedType = GLOBAL_VIEWS; - tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, GLOBAL_VIEWS); - if (tableTTLInfo == null) { - // search returned no results, determine the new pkPosition(offset) to use - // Search using the new offset - pkPosition = this.isSalted ? 1 : 0; - offset = pkPositions.get(pkPosition); - // case multi-tenant, non-index tables, tenant space - matchedType = TENANT_VIEWS; - tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, TENANT_VIEWS); - } - } else { - // case non-multi-tenant and non-index tables, global space - matchedType = GLOBAL_VIEWS; - tableTTLInfo = tableRowKeyMatcher.match(rowKey, offset, GLOBAL_VIEWS); - } - matched = tableTTLInfo != null; - matchedOffset = matched ? offset : -1; - rowTTLInSecs = matched ? tableTTLInfo.getTTL() : ttl; /* in secs */ - if (this.rowContext == null) { - this.rowContext = new RowContext(); - } - this.rowContext.setTTL(rowTTLInSecs); - } catch (SQLException e) { - LOGGER.error(String.format("Exception when visiting table: " + e.getMessage())); - throw new IOException(e); - } finally { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("visiting row-key = %s, region = %s, " + - "table-ttl-info=%s, " + - "matched = %s, matched-type = %s, match-pattern = %s, " + - "ttl = %d, matched-offset = %d, " + - "pk-pos = %d, pk-pos-list = %s", - CellUtil.getCellKeyAsString(firstCell), - CompactionScanner.this.store.getRegionInfo().getEncodedName(), - matched ? tableTTLInfo : "NULL", - matched, - matchedType, - matched ? Bytes.toStringBinary(tableTTLInfo.getMatchPattern()) : "NULL", - rowTTLInSecs, - matchedOffset, - pkPosition, - pkPositions != null ? pkPositions.stream() - .map((p) -> String.valueOf(p)) - .collect(Collectors.joining(",")) : "")); - } - } - - } - - @Override - public RowContext getRowContext() { - if (this.rowContext == null) { - this.rowContext = new RowContext(); - this.rowContext.setTTL(ttl); - } - return rowContext; - } - - @Override - public void setRowContext(RowContext rowContext) { - this.rowContext = rowContext; - this.rowContext.setTTL(ttl); - } - } - - public class RowKeyParser { - private final RowKeyColumnExpression[] baseTableColExprs; - private final List baseTablePKColumns; - private final PColumn[] sharedIndexPKColumns; - private final boolean isSalted; - private final boolean isLongViewIndexEnabled; - private final boolean isMultiTenant; - private final PDataType tenantDataType; - - public RowKeyParser(PTable table, boolean isLongViewIndexEnabled) { - this.isLongViewIndexEnabled = isLongViewIndexEnabled; - isSalted = table.getBucketNum() != null; - isMultiTenant = table.isMultiTenant(); - - // Get the TENANT_ID data type, this will be used to determine if the queries to - // SYSCAT will be batched. - tenantDataType = table.getRowKeySchema().getField(isSalted ? 1 : 0).getDataType(); - - // Initialize the ColumnExpressions for the base table PK Columns - baseTablePKColumns = table.getPKColumns(); - baseTableColExprs = new RowKeyColumnExpression[baseTablePKColumns.size()]; - int saltPos = isSalted ? 0 : -1; - for (int i = 0; i < baseTablePKColumns.size(); i++) { - PColumn column = baseTablePKColumns.get(i); - baseTableColExprs[i] = new RowKeyColumnExpression( - column, - new RowKeyValueAccessor(baseTablePKColumns, i)); - } - - // Initialize the shared index PK columns to be used in getTenantIdFromRowKey() - // to create a RowKeyColumnExpression for tenantId parsing. - // position 0 : salt byte if salted else index_id - // position 1 : index_id if salted else tenant_id - // position 2 : tenant_id if salted and multi-tenanted else empty - sharedIndexPKColumns = new PColumn[3]; - if (saltPos == 0) { - sharedIndexPKColumns[saltPos] = baseTablePKColumns.get(saltPos); - } - final int tenantPos = isMultiTenant ? (saltPos + 1) : -1; - if ((tenantPos == 0) || (tenantPos == 1)) { - sharedIndexPKColumns[tenantPos] = new PColumn() { - - @Override - public PName getName() { - return PNameFactory.newName("_INDEX_ID"); - } - - @Override - public PName getFamilyName() { - return null; - } - - @Override - public int getPosition() { - return tenantPos; - } - - @Override - public Integer getArraySize() { - return 0; - } + @Override + public String getExpressionStr() { + return ""; + } - @Override - public byte[] getViewConstant() { - return new byte[0]; - } - - @Override - public boolean isViewReferenced() { - return false; - } - - @Override - public int getEstimatedSize() { - return 0; - } + @Override + public long getTimestamp() { + return 0; + } - @Override - public String getExpressionStr() { - return ""; - } + @Override + public boolean isDerived() { + return false; + } - @Override - public long getTimestamp() { - return 0; - } + @Override + public boolean isExcluded() { + return false; + } - @Override - public boolean isDerived() { - return false; - } + @Override + public boolean isRowTimestamp() { + return false; + } - @Override - public boolean isExcluded() { - return false; - } + @Override + public boolean isDynamic() { + return false; + } - @Override - public boolean isRowTimestamp() { - return false; - } + @Override + public byte[] getColumnQualifierBytes() { + return new byte[0]; + } - @Override - public boolean isDynamic() { - return false; - } + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return RowKeyParser.this.isLongViewIndexEnabled ? PLong.INSTANCE : PSmallint.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return 0; + } + + @Override + public Integer getScale() { + return 0; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.ASC; + } + }; + sharedIndexPKColumns[tenantPos + 1] = baseTablePKColumns.get(tenantPos); + } - @Override - public byte[] getColumnQualifierBytes() { - return new byte[0]; - } + } - @Override - public boolean isNullable() { - return false; - } + // accessor method for tenantDataType + public PDataType getTenantIdDataType() { + return tenantDataType; + } - @Override - public PDataType getDataType() { - return RowKeyParser.this.isLongViewIndexEnabled ? - PLong.INSTANCE : PSmallint.INSTANCE; - } + // Parse the row key cell to find the PK position boundaries + public List parsePKPositions(Cell inputCell) { + RowKeyTuple inputTuple = new RowKeyTuple(); + inputTuple.setKey(inputCell.getRowArray(), inputCell.getRowOffset(), + inputCell.getRowLength()); + + int lastPos = 0; + List pkPositions = new ArrayList<>(); + pkPositions.add(lastPos); + // Use the RowKeyColumnExpression to parse the PK positions + for (int i = 0; i < baseTableColExprs.length; i++) { + RowKeyColumnExpression expr = baseTableColExprs[i]; + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + expr.evaluate(inputTuple, ptr); + int separatorLength = baseTablePKColumns.get(i).getDataType().isFixedWidth() ? 0 : 1; + int endPos = lastPos + ptr.getLength() + separatorLength; + pkPositions.add(endPos); + lastPos = endPos; + } + return pkPositions; + } - @Override - public Integer getMaxLength() { - return 0; - } + // Parse the row key to extract the TENANT_ID as a String + private String getTenantIdFromRowKey(byte[] rowKey) throws SQLException { + return getTenantIdFromRowKey(rowKey, false); + } - @Override - public Integer getScale() { - return 0; - } + // Parse the row key to extract the TENANT_ID as a String + private String getTenantIdFromRowKey(byte[] rowKey, boolean isSharedIndex) throws SQLException { + // case: when it is the start of the first region or end of the last region + if ( + (rowKey != null && ByteUtil.isEmptyOrNull(rowKey, 0, rowKey.length)) || (!isMultiTenant) + ) { + return ""; + } + // Construct a cell from the rowKey for us evaluate the tenantId + Cell rowKeyCell = + CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowKey).setFamily(emptyCF) + .setQualifier(emptyCQ).setTimestamp(EnvironmentEdgeManager.currentTimeMillis()) + .setType(Cell.Type.Put).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); + // Evaluating and converting a byte ptr to tenantId + // Sometimes the underlying byte ptr is padded with null bytes (0x0) + // in case of salted regions. + int tenantIdPosition = (isSalted ? 1 : 0) + (isSharedIndex ? 1 : 0); + RowKeyColumnExpression expr; + PDataType dataType; + RowKeyTuple inputTuple = new RowKeyTuple(); + if (isSharedIndex) { + expr = new RowKeyColumnExpression(sharedIndexPKColumns[tenantIdPosition], + new RowKeyValueAccessor(Arrays.asList(sharedIndexPKColumns), tenantIdPosition)); + dataType = sharedIndexPKColumns[tenantIdPosition].getDataType(); + + // Constructing a RowKeyTuple for expression evaluation + inputTuple.setKey(rowKeyCell.getRowArray(), rowKeyCell.getRowOffset(), + rowKeyCell.getRowLength()); + } else { + expr = baseTableColExprs[tenantIdPosition]; + dataType = baseTablePKColumns.get(tenantIdPosition).getDataType(); + inputTuple.setKey(rowKeyCell.getRowArray(), rowKeyCell.getRowOffset(), + rowKeyCell.getRowLength()); + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + String tenantId = ""; + try { + expr.evaluate(inputTuple, ptr); + dataType.pad(ptr, expr.getMaxLength(), expr.getSortOrder()); + tenantId = ByteUtil.isEmptyOrNull(ptr.get(), ptr.getOffset(), ptr.getLength()) + ? "" + : dataType.toObject(ptr).toString(); + } catch (IllegalDataException ex) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TENANTID_IS_OF_WRONG_TYPE).build() + .buildException(); + } finally { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("TenantId in getTenantIdFromRowKey: {}, {}", + CompactionScanner.this.store.getRegionInfo().getEncodedName(), tenantId); + } + } + return tenantId; + } + } + + /** + * The context for a given row during compaction. A row may have multiple compaction row versions. + * CompactionScanner uses the same row context for these versions. + */ + class RowContext { + Cell familyDeleteMarker = null; + Cell familyVersionDeleteMarker = null; + List columnDeleteMarkers = new ArrayList<>(); + int version = 0; + long maxTimestamp; + long minTimestamp; + long ttl; + long ttlWindowStart; + long maxLookbackWindowStartForRow; + + private void init() { + familyDeleteMarker = null; + familyVersionDeleteMarker = null; + columnDeleteMarkers.clear(); + version = 0; + } - @Override - public SortOrder getSortOrder() { - return SortOrder.ASC; - } - }; - sharedIndexPKColumns[tenantPos+1] = baseTablePKColumns.get(tenantPos); - } + public void setTTL(long ttlInSecs) { + this.ttl = ttlInSecs * 1000; + this.ttlWindowStart = ttlInSecs == HConstants.FOREVER ? 1 : compactionTime - ttl; + this.maxLookbackWindowStartForRow = Math.max(ttlWindowStart, maxLookbackWindowStart); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("RowContext:- (ttlWindowStart=%d, maxLookbackWindowStart=%d)", + ttlWindowStart, maxLookbackWindowStart)); + } - } + } - // accessor method for tenantDataType - public PDataType getTenantIdDataType() { - return tenantDataType; - } + public long getTTL() { + return ttl; + } - // Parse the row key cell to find the PK position boundaries - public List parsePKPositions(Cell inputCell) { - RowKeyTuple inputTuple = new RowKeyTuple(); - inputTuple.setKey(inputCell.getRowArray(), - inputCell.getRowOffset(), - inputCell.getRowLength()); - - int lastPos = 0; - List pkPositions = new ArrayList<>(); - pkPositions.add(lastPos); - // Use the RowKeyColumnExpression to parse the PK positions - for (int i = 0; i < baseTableColExprs.length; i++) { - RowKeyColumnExpression expr = baseTableColExprs[i]; - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - expr.evaluate(inputTuple, ptr); - int separatorLength = baseTablePKColumns.get(i).getDataType().isFixedWidth() ? 0 : 1; - int endPos = lastPos + ptr.getLength() + separatorLength; - pkPositions.add(endPos); - lastPos = endPos; - } - return pkPositions; - } + public long getTtlWindowStart() { + return ttlWindowStart; + } - // Parse the row key to extract the TENANT_ID as a String - private String getTenantIdFromRowKey(byte[] rowKey) throws SQLException { - return getTenantIdFromRowKey(rowKey, false); - } + public long getMaxLookbackWindowStart() { + return maxLookbackWindowStartForRow; + } - // Parse the row key to extract the TENANT_ID as a String - private String getTenantIdFromRowKey(byte[] rowKey, boolean isSharedIndex) throws SQLException { - // case: when it is the start of the first region or end of the last region - if ((rowKey != null && ByteUtil.isEmptyOrNull(rowKey, 0, rowKey.length)) || (!isMultiTenant)) { - return ""; - } - // Construct a cell from the rowKey for us evaluate the tenantId - Cell rowKeyCell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(rowKey) - .setFamily(emptyCF) - .setQualifier(emptyCQ) - .setTimestamp(EnvironmentEdgeManager.currentTimeMillis()) - .setType(Cell.Type.Put) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build(); - // Evaluating and converting a byte ptr to tenantId - // Sometimes the underlying byte ptr is padded with null bytes (0x0) - // in case of salted regions. - int tenantIdPosition = (isSalted ? 1 : 0) + (isSharedIndex ? 1 : 0); - RowKeyColumnExpression expr; - PDataType dataType; - RowKeyTuple inputTuple = new RowKeyTuple(); - if (isSharedIndex) { - expr = new RowKeyColumnExpression( - sharedIndexPKColumns[tenantIdPosition], - new RowKeyValueAccessor(Arrays.asList(sharedIndexPKColumns), tenantIdPosition)); - dataType = sharedIndexPKColumns[tenantIdPosition].getDataType(); - - // Constructing a RowKeyTuple for expression evaluation - inputTuple.setKey(rowKeyCell.getRowArray(), - rowKeyCell.getRowOffset(), - rowKeyCell.getRowLength()); - } else { - expr = baseTableColExprs[tenantIdPosition]; - dataType = baseTablePKColumns.get(tenantIdPosition).getDataType(); - inputTuple.setKey(rowKeyCell.getRowArray(), - rowKeyCell.getRowOffset(), - rowKeyCell.getRowLength()); - } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - String tenantId = ""; - try { - expr.evaluate(inputTuple, ptr); - dataType.pad(ptr, expr.getMaxLength(), expr.getSortOrder()); - tenantId = ByteUtil.isEmptyOrNull(ptr.get(), ptr.getOffset(), - ptr.getLength()) ? "" : dataType.toObject(ptr).toString(); - } catch(IllegalDataException ex) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TENANTID_IS_OF_WRONG_TYPE) - .build().buildException(); - } - finally { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("TenantId in getTenantIdFromRowKey: {}, {}", CompactionScanner.this.store.getRegionInfo().getEncodedName(), tenantId); - } - } - return tenantId; - } + private void addColumnDeleteMarker(Cell deleteMarker) { + if (columnDeleteMarkers.isEmpty()) { + columnDeleteMarkers.add(deleteMarker); + return; + } + int i = 0; + // Replace the existing delete marker for the same column + for (Cell cell : columnDeleteMarkers) { + if ( + cell.getType() == deleteMarker.getType() && CellUtil.matchingColumn(cell, deleteMarker) + ) { + columnDeleteMarkers.remove(i); + break; + } + i++; + } + columnDeleteMarkers.add(deleteMarker); } + private void retainFamilyDeleteMarker(List retainedCells) { + if (familyVersionDeleteMarker != null) { + retainedCells.add(familyVersionDeleteMarker); + // Set it to null so it will be used once + familyVersionDeleteMarker = null; + } else { + // The same delete family marker may be retained multiple times. Duplicates will be + // removed later + retainedCells.add(familyDeleteMarker); + } + } /** - * The context for a given row during compaction. A row may have multiple compaction row - * versions. CompactionScanner uses the same row context for these versions. + * Based on the column delete markers decide if the cells should be retained. If a deleted cell + * is retained, the delete marker is also retained. */ - class RowContext { - Cell familyDeleteMarker = null; - Cell familyVersionDeleteMarker = null; - List columnDeleteMarkers = new ArrayList<>(); - int version = 0; - long maxTimestamp; - long minTimestamp; - long ttl; - long ttlWindowStart; - long maxLookbackWindowStartForRow; - - private void init() { - familyDeleteMarker = null; - familyVersionDeleteMarker = null; - columnDeleteMarkers.clear(); - version = 0; - } - - public void setTTL(long ttlInSecs) { - this.ttl = ttlInSecs*1000; - this.ttlWindowStart = ttlInSecs == HConstants.FOREVER ? 1 : compactionTime - ttl ; - this.maxLookbackWindowStartForRow = Math.max(ttlWindowStart, maxLookbackWindowStart); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("RowContext:- (ttlWindowStart=%d, maxLookbackWindowStart=%d)", - ttlWindowStart, maxLookbackWindowStart)); - } - - } - public long getTTL() { - return ttl; - } - public long getTtlWindowStart() { - return ttlWindowStart; - } - public long getMaxLookbackWindowStart() { - return maxLookbackWindowStartForRow; - } - - - private void addColumnDeleteMarker(Cell deleteMarker) { - if (columnDeleteMarkers.isEmpty()) { - columnDeleteMarkers.add(deleteMarker); - return; - } - int i = 0; - // Replace the existing delete marker for the same column - for (Cell cell : columnDeleteMarkers) { - if (cell.getType() == deleteMarker.getType() && - CellUtil.matchingColumn(cell, deleteMarker)) { - columnDeleteMarkers.remove(i); - break; - } - i++; - } - columnDeleteMarkers.add(deleteMarker); - } - - private void retainFamilyDeleteMarker(List retainedCells) { - if (familyVersionDeleteMarker != null) { - retainedCells.add(familyVersionDeleteMarker); - // Set it to null so it will be used once - familyVersionDeleteMarker = null; + private void retainCell(Cell cell, List retainedCells, KeepDeletedCells keepDeletedCells, + long ttlWindowStart) { + int i = 0; + for (Cell dm : columnDeleteMarkers) { + if (cell.getTimestamp() > dm.getTimestamp()) { + continue; + } + if ((CellUtil.matchingFamily(cell, dm)) && CellUtil.matchingQualifier(cell, dm)) { + if (dm.getType() == Cell.Type.Delete) { + if (cell.getTimestamp() == dm.getTimestamp()) { + // Delete is for deleting a specific cell version. Thus, it can be used + // to delete only one cell. + columnDeleteMarkers.remove(i); } else { - // The same delete family marker may be retained multiple times. Duplicates will be - // removed later - retainedCells.add(familyDeleteMarker); - } - } - /** - * Based on the column delete markers decide if the cells should be retained. If a - * deleted cell is retained, the delete marker is also retained. - */ - private void retainCell(Cell cell, List retainedCells, - KeepDeletedCells keepDeletedCells, long ttlWindowStart) { - int i = 0; - for (Cell dm : columnDeleteMarkers) { - if (cell.getTimestamp() > dm.getTimestamp()) { - continue; - } - if ((CellUtil.matchingFamily(cell, dm)) && - CellUtil.matchingQualifier(cell, dm)) { - if (dm.getType() == Cell.Type.Delete) { - if (cell.getTimestamp() == dm.getTimestamp()) { - // Delete is for deleting a specific cell version. Thus, it can be used - // to delete only one cell. - columnDeleteMarkers.remove(i); - } else { - continue; - } - } - if (maxTimestamp >= ttlWindowStart) { - // Inside the TTL window - if (keepDeletedCells != KeepDeletedCells.FALSE ) { - retainedCells.add(cell); - retainedCells.add(dm); - } - } else if (keepDeletedCells == KeepDeletedCells.TTL && - dm.getTimestamp() >= ttlWindowStart) { - retainedCells.add(cell); - retainedCells.add(dm); - } - return; - } - i++; - } - // No delete marker for this cell - retainedCells.add(cell); - } - /** - * This method finds out the maximum and minimum timestamp of the cells of the next row - * version. Cells are organized into columns based on the pair of family name and column - * qualifier. This means that the delete family markers for a column family will have their - * own column. However, the delete column markers will be packed with the put cells. The cells - * within a column are ordered in descending timestamps. - */ - private void getNextRowVersionTimestamps(LinkedList> columns, - byte[] columnFamily) { - maxTimestamp = 0; - minTimestamp = Long.MAX_VALUE; - Cell firstCell; - LinkedList deleteColumn = null; - long ts; - // The next row version is formed by the first cell of each column. Similarly, the min - // max timestamp of the cells of a row version is determined by looking at just first - // cell of the columns - for (LinkedList column : columns) { - firstCell = column.getFirst(); - ts = firstCell.getTimestamp(); - if ((firstCell.getType() == Cell.Type.DeleteFamily || - firstCell.getType() == Cell.Type.DeleteFamilyVersion) && - CellUtil.matchingFamily(firstCell, columnFamily)) { - deleteColumn = column; - } - if (maxTimestamp < ts) { - maxTimestamp = ts; - } - if (minTimestamp > ts) { - minTimestamp = ts; - } - } - if (deleteColumn != null) { - // A row version cannot cross a family delete marker by definition. This means - // min timestamp cannot be lower than the delete markers timestamp - for (Cell cell : deleteColumn) { - ts = cell.getTimestamp(); - if (ts < maxTimestamp) { - minTimestamp = ts + 1; - break; - } - } - } - } - - /** - * This is used for Phoenix level compaction - */ - private void getNextRowVersionTimestamps(List row, byte[] columnFamily) { - maxTimestamp = 0; - minTimestamp = Long.MAX_VALUE; - Cell deleteFamily = null; - long ts; - // The next row version is formed by the first cell of each column. Similarly, the min - // max timestamp of the cells of a row version is determined by looking at just first - // cell of the columns - for (Cell cell : row) { - ts = cell.getTimestamp(); - if ((cell.getType() == Cell.Type.DeleteFamily || - cell.getType() == Cell.Type.DeleteFamilyVersion) && - CellUtil.matchingFamily(cell, columnFamily)) { - deleteFamily = cell; - } - if (maxTimestamp < ts) { - maxTimestamp = ts; - } - if (minTimestamp > ts) { - minTimestamp = ts; - } - } - if (deleteFamily != null) { - // A row version cannot cross a family delete marker by definition. This means - // min timestamp cannot be lower than the delete markers timestamp - ts = deleteFamily.getTimestamp(); - if (ts < maxTimestamp) { - minTimestamp = ts + 1; - } - } - } + continue; + } + } + if (maxTimestamp >= ttlWindowStart) { + // Inside the TTL window + if (keepDeletedCells != KeepDeletedCells.FALSE) { + retainedCells.add(cell); + retainedCells.add(dm); + } + } else + if (keepDeletedCells == KeepDeletedCells.TTL && dm.getTimestamp() >= ttlWindowStart) { + retainedCells.add(cell); + retainedCells.add(dm); + } + return; + } + i++; + } + // No delete marker for this cell + retainedCells.add(cell); } /** - * HBaseLevelRowCompactor ensures that the cells of a given row are retained according to the - * HBase data retention rules. - * + * This method finds out the maximum and minimum timestamp of the cells of the next row version. + * Cells are organized into columns based on the pair of family name and column qualifier. This + * means that the delete family markers for a column family will have their own column. However, + * the delete column markers will be packed with the put cells. The cells within a column are + * ordered in descending timestamps. */ - class HBaseLevelRowCompactor { - private RowContext rowContext = new RowContext(); - private CompactionRowVersion rowVersion = new CompactionRowVersion(); - private TTLTracker rowTracker; - - HBaseLevelRowCompactor(TTLTracker rowTracker) { - this.rowTracker = rowTracker; - } + private void getNextRowVersionTimestamps(LinkedList> columns, + byte[] columnFamily) { + maxTimestamp = 0; + minTimestamp = Long.MAX_VALUE; + Cell firstCell; + LinkedList deleteColumn = null; + long ts; + // The next row version is formed by the first cell of each column. Similarly, the min + // max timestamp of the cells of a row version is determined by looking at just first + // cell of the columns + for (LinkedList column : columns) { + firstCell = column.getFirst(); + ts = firstCell.getTimestamp(); + if ( + (firstCell.getType() == Cell.Type.DeleteFamily + || firstCell.getType() == Cell.Type.DeleteFamilyVersion) + && CellUtil.matchingFamily(firstCell, columnFamily) + ) { + deleteColumn = column; + } + if (maxTimestamp < ts) { + maxTimestamp = ts; + } + if (minTimestamp > ts) { + minTimestamp = ts; + } + } + if (deleteColumn != null) { + // A row version cannot cross a family delete marker by definition. This means + // min timestamp cannot be lower than the delete markers timestamp + for (Cell cell : deleteColumn) { + ts = cell.getTimestamp(); + if (ts < maxTimestamp) { + minTimestamp = ts + 1; + break; + } + } + } + } - /** - * A compaction row version includes the latest put cell versions from each column such that - * the cell versions do not cross delete family markers. In other words, the compaction row - * versions are built from cell versions that are all either before or after the next delete - * family or delete family version maker if family delete markers exist. Also, when the cell - * timestamps are ordered for a given row version, the difference between two subsequent - * timestamps has to be less than the ttl value. This is taken care before calling - * HBaseLevelRowCompactor#compact(). - * - * Compaction row versions are disjoint sets. A compaction row version does not share a cell - * version with the next compaction row version. A compaction row version includes at most - * one cell version from a column. - * - * After creating the first compaction row version, we form the next compaction row version - * from the remaining cell versions. - * - * Compaction row versions are used for compaction purposes to efficiently determine which - * cell versions to retain based on the HBase data retention parameters. - */ - class CompactionRowVersion { - // Cells included in the row version - List cells = new ArrayList<>(); - // The timestamp of the row version - long ts = 0; - // The version of a row version. It is the minimum of the versions of the cells included - // in the row version - int version = 0; - - private void init() { - cells.clear(); - } - @Override - public String toString() { - StringBuilder output = new StringBuilder(); - output.append("Cell count: " + cells.size() + "\n"); - for (Cell cell : cells) { - output.append(cell + "\n"); - } - output.append("ts:" + ts + " v:" + version); - return output.toString(); - } - } + /** + * This is used for Phoenix level compaction + */ + private void getNextRowVersionTimestamps(List row, byte[] columnFamily) { + maxTimestamp = 0; + minTimestamp = Long.MAX_VALUE; + Cell deleteFamily = null; + long ts; + // The next row version is formed by the first cell of each column. Similarly, the min + // max timestamp of the cells of a row version is determined by looking at just first + // cell of the columns + for (Cell cell : row) { + ts = cell.getTimestamp(); + if ( + (cell.getType() == Cell.Type.DeleteFamily + || cell.getType() == Cell.Type.DeleteFamilyVersion) + && CellUtil.matchingFamily(cell, columnFamily) + ) { + deleteFamily = cell; + } + if (maxTimestamp < ts) { + maxTimestamp = ts; + } + if (minTimestamp > ts) { + minTimestamp = ts; + } + } + if (deleteFamily != null) { + // A row version cannot cross a family delete marker by definition. This means + // min timestamp cannot be lower than the delete markers timestamp + ts = deleteFamily.getTimestamp(); + if (ts < maxTimestamp) { + minTimestamp = ts + 1; + } + } + } + } + + /** + * HBaseLevelRowCompactor ensures that the cells of a given row are retained according to the + * HBase data retention rules. + */ + class HBaseLevelRowCompactor { + private RowContext rowContext = new RowContext(); + private CompactionRowVersion rowVersion = new CompactionRowVersion(); + private TTLTracker rowTracker; + + HBaseLevelRowCompactor(TTLTracker rowTracker) { + this.rowTracker = rowTracker; + } - /** - * Decide if compaction row versions inside the TTL window should be retained. The - * versions are retained if one of the following conditions holds - * 1. The compaction row version is alive and its version is less than VERSIONS - * 2. The compaction row version is deleted and KeepDeletedCells is not FALSE - * - */ - private void retainInsideTTLWindow(CompactionRowVersion rowVersion, RowContext rowContext, - List retainedCells) { - if (rowContext.familyDeleteMarker == null - && rowContext.familyVersionDeleteMarker == null) { - // The compaction row version is alive - if (rowVersion.version < maxVersion) { - // Rule 1 - retainCells(rowVersion, rowContext, retainedCells); - } - } else { - // Deleted - if (rowVersion.version < maxVersion && keepDeletedCells != KeepDeletedCells.FALSE) { - // Retain based on rule 2 - retainCells(rowVersion, rowContext, retainedCells); - rowContext.retainFamilyDeleteMarker(retainedCells); - } - } - } + /** + * A compaction row version includes the latest put cell versions from each column such that the + * cell versions do not cross delete family markers. In other words, the compaction row versions + * are built from cell versions that are all either before or after the next delete family or + * delete family version maker if family delete markers exist. Also, when the cell timestamps + * are ordered for a given row version, the difference between two subsequent timestamps has to + * be less than the ttl value. This is taken care before calling + * HBaseLevelRowCompactor#compact(). Compaction row versions are disjoint sets. A compaction row + * version does not share a cell version with the next compaction row version. A compaction row + * version includes at most one cell version from a column. After creating the first compaction + * row version, we form the next compaction row version from the remaining cell versions. + * Compaction row versions are used for compaction purposes to efficiently determine which cell + * versions to retain based on the HBase data retention parameters. + */ + class CompactionRowVersion { + // Cells included in the row version + List cells = new ArrayList<>(); + // The timestamp of the row version + long ts = 0; + // The version of a row version. It is the minimum of the versions of the cells included + // in the row version + int version = 0; + + private void init() { + cells.clear(); + } + + @Override + public String toString() { + StringBuilder output = new StringBuilder(); + output.append("Cell count: " + cells.size() + "\n"); + for (Cell cell : cells) { + output.append(cell + "\n"); + } + output.append("ts:" + ts + " v:" + version); + return output.toString(); + } + } - /** - * Decide if compaction row versions outside the TTL window should be retained. The - * versions are retained if one of the following conditions holds - * - * 1. Live row versions less than MIN_VERSIONS are retained - * 2. Delete row versions whose delete markers are inside the TTL window and - * KeepDeletedCells is TTL are retained - */ - private void retainOutsideTTLWindow(CompactionRowVersion rowVersion, RowContext rowContext, - List retainedCells) { - if (rowContext.familyDeleteMarker == null - && rowContext.familyVersionDeleteMarker == null) { - // Live compaction row version - if (rowVersion.version < minVersion) { - // Rule 1 - retainCells(rowVersion, rowContext, retainedCells); - } - } else { - // Deleted compaction row version - if (keepDeletedCells == KeepDeletedCells.TTL - && rowContext.familyDeleteMarker != null - && rowContext.familyDeleteMarker.getTimestamp() > rowContext.getTtlWindowStart()) { - // Rule 2 - retainCells(rowVersion, rowContext, retainedCells); - rowContext.retainFamilyDeleteMarker(retainedCells); - } - } - } + /** + * Decide if compaction row versions inside the TTL window should be retained. The versions are + * retained if one of the following conditions holds 1. The compaction row version is alive and + * its version is less than VERSIONS 2. The compaction row version is deleted and + * KeepDeletedCells is not FALSE + */ + private void retainInsideTTLWindow(CompactionRowVersion rowVersion, RowContext rowContext, + List retainedCells) { + if (rowContext.familyDeleteMarker == null && rowContext.familyVersionDeleteMarker == null) { + // The compaction row version is alive + if (rowVersion.version < maxVersion) { + // Rule 1 + retainCells(rowVersion, rowContext, retainedCells); + } + } else { + // Deleted + if (rowVersion.version < maxVersion && keepDeletedCells != KeepDeletedCells.FALSE) { + // Retain based on rule 2 + retainCells(rowVersion, rowContext, retainedCells); + rowContext.retainFamilyDeleteMarker(retainedCells); + } + } + } - private void retainCells(CompactionRowVersion rowVersion, RowContext rowContext, - List retainedCells) { - if (rowContext.columnDeleteMarkers == null) { - retainedCells.addAll(rowVersion.cells); - return; - } - for (Cell cell : rowVersion.cells) { - rowContext.retainCell(cell, retainedCells, keepDeletedCells, rowContext.getTtlWindowStart()); - } - } + /** + * Decide if compaction row versions outside the TTL window should be retained. The versions are + * retained if one of the following conditions holds 1. Live row versions less than MIN_VERSIONS + * are retained 2. Delete row versions whose delete markers are inside the TTL window and + * KeepDeletedCells is TTL are retained + */ + private void retainOutsideTTLWindow(CompactionRowVersion rowVersion, RowContext rowContext, + List retainedCells) { + if (rowContext.familyDeleteMarker == null && rowContext.familyVersionDeleteMarker == null) { + // Live compaction row version + if (rowVersion.version < minVersion) { + // Rule 1 + retainCells(rowVersion, rowContext, retainedCells); + } + } else { + // Deleted compaction row version + if ( + keepDeletedCells == KeepDeletedCells.TTL && rowContext.familyDeleteMarker != null + && rowContext.familyDeleteMarker.getTimestamp() > rowContext.getTtlWindowStart() + ) { + // Rule 2 + retainCells(rowVersion, rowContext, retainedCells); + rowContext.retainFamilyDeleteMarker(retainedCells); + } + } + } - /** - * Form the next compaction row version by picking (removing) the first cell from each - * column. Put cells are used to form the next compaction row version. Delete markers - * are added to the row context which are processed to decide which row versions - * or cell version to delete. - */ - private void formNextCompactionRowVersion(LinkedList> columns, - RowContext rowContext, List retainedCells) { - rowVersion.init(); - rowContext.getNextRowVersionTimestamps(columns, storeColumnFamily); - rowVersion.ts = rowContext.maxTimestamp; - for (LinkedList column : columns) { - Cell cell = column.getFirst(); - if (column.getFirst().getTimestamp() < rowContext.minTimestamp) { - continue; - } - if (cell.getType() == Cell.Type.DeleteFamily) { - if (cell.getTimestamp() >= rowContext.maxTimestamp) { - rowContext.familyDeleteMarker = cell; - column.removeFirst(); - break; - } - continue; - } - else if (cell.getType() == Cell.Type.DeleteFamilyVersion) { - if (cell.getTimestamp() == rowVersion.ts) { - rowContext.familyVersionDeleteMarker = cell; - column.removeFirst(); - break; - } - continue; - } - column.removeFirst(); - if (cell.getType() == Cell.Type.DeleteColumn || - cell.getType() == Cell.Type.Delete) { - rowContext.addColumnDeleteMarker(cell); - continue; - } - rowVersion.cells.add(cell); - } - if (rowVersion.cells.isEmpty()) { - return; - } - rowVersion.version = rowContext.version++; - if (rowVersion.ts >= rowContext.getTtlWindowStart()) { - retainInsideTTLWindow(rowVersion, rowContext, retainedCells); - } else { - retainOutsideTTLWindow(rowVersion, rowContext, retainedCells); - } - } + private void retainCells(CompactionRowVersion rowVersion, RowContext rowContext, + List retainedCells) { + if (rowContext.columnDeleteMarkers == null) { + retainedCells.addAll(rowVersion.cells); + return; + } + for (Cell cell : rowVersion.cells) { + rowContext.retainCell(cell, retainedCells, keepDeletedCells, + rowContext.getTtlWindowStart()); + } + } - private void formCompactionRowVersions(LinkedList> columns, - List result) { - rowContext.init(); - rowTracker.setRowContext(rowContext); - while (!columns.isEmpty()) { - formNextCompactionRowVersion(columns, rowContext, result); - // Remove the columns that are empty - Iterator> iterator = columns.iterator(); - while (iterator.hasNext()) { - LinkedList column = iterator.next(); - if (column.isEmpty()) { - iterator.remove(); - } - } - } - } + /** + * Form the next compaction row version by picking (removing) the first cell from each column. + * Put cells are used to form the next compaction row version. Delete markers are added to the + * row context which are processed to decide which row versions or cell version to delete. + */ + private void formNextCompactionRowVersion(LinkedList> columns, + RowContext rowContext, List retainedCells) { + rowVersion.init(); + rowContext.getNextRowVersionTimestamps(columns, storeColumnFamily); + rowVersion.ts = rowContext.maxTimestamp; + for (LinkedList column : columns) { + Cell cell = column.getFirst(); + if (column.getFirst().getTimestamp() < rowContext.minTimestamp) { + continue; + } + if (cell.getType() == Cell.Type.DeleteFamily) { + if (cell.getTimestamp() >= rowContext.maxTimestamp) { + rowContext.familyDeleteMarker = cell; + column.removeFirst(); + break; + } + continue; + } else if (cell.getType() == Cell.Type.DeleteFamilyVersion) { + if (cell.getTimestamp() == rowVersion.ts) { + rowContext.familyVersionDeleteMarker = cell; + column.removeFirst(); + break; + } + continue; + } + column.removeFirst(); + if (cell.getType() == Cell.Type.DeleteColumn || cell.getType() == Cell.Type.Delete) { + rowContext.addColumnDeleteMarker(cell); + continue; + } + rowVersion.cells.add(cell); + } + if (rowVersion.cells.isEmpty()) { + return; + } + rowVersion.version = rowContext.version++; + if (rowVersion.ts >= rowContext.getTtlWindowStart()) { + retainInsideTTLWindow(rowVersion, rowContext, retainedCells); + } else { + retainOutsideTTLWindow(rowVersion, rowContext, retainedCells); + } + } - /** - * Group the cells that are ordered lexicographically into columns based on - * the pair of family name and column qualifier. While doing that also add the delete - * markers to a separate list. - */ - private void formColumns(List result, LinkedList> columns) { - Cell currentColumnCell = null; - LinkedList currentColumn = null; - for (Cell cell : result) { - if (currentColumnCell == null) { - currentColumn = new LinkedList<>(); - currentColumnCell = cell; - currentColumn.add(cell); - } else if (!CellUtil.matchingColumn(cell, currentColumnCell)) { - columns.add(currentColumn); - currentColumn = new LinkedList<>(); - currentColumnCell = cell; - currentColumn.add(cell); - } else { - currentColumn.add(cell); - } - } - if (currentColumn != null) { - columns.add(currentColumn); - } - } + private void formCompactionRowVersions(LinkedList> columns, + List result) { + rowContext.init(); + rowTracker.setRowContext(rowContext); + while (!columns.isEmpty()) { + formNextCompactionRowVersion(columns, rowContext, result); + // Remove the columns that are empty + Iterator> iterator = columns.iterator(); + while (iterator.hasNext()) { + LinkedList column = iterator.next(); + if (column.isEmpty()) { + iterator.remove(); + } + } + } + } - /** - * Compacts a single row at the HBase level. The result parameter is the input row and - * modified to be the output of the compaction. - */ - private void compact(List result) { - if (result.isEmpty()) { - return; - } - LinkedList> columns = new LinkedList<>(); - formColumns(result, columns); - result.clear(); - formCompactionRowVersions(columns, result); + /** + * Group the cells that are ordered lexicographically into columns based on the pair of family + * name and column qualifier. While doing that also add the delete markers to a separate list. + */ + private void formColumns(List result, LinkedList> columns) { + Cell currentColumnCell = null; + LinkedList currentColumn = null; + for (Cell cell : result) { + if (currentColumnCell == null) { + currentColumn = new LinkedList<>(); + currentColumnCell = cell; + currentColumn.add(cell); + } else if (!CellUtil.matchingColumn(cell, currentColumnCell)) { + columns.add(currentColumn); + currentColumn = new LinkedList<>(); + currentColumnCell = cell; + currentColumn.add(cell); + } else { + currentColumn.add(cell); } + } + if (currentColumn != null) { + columns.add(currentColumn); + } } /** - * PhoenixLevelRowCompactor ensures that the cells of the latest row version and the - * row versions that are visible through the max lookback window are retained including delete - * markers placed after these cells. This is the complete set of cells that Phoenix - * needs for its queries. Beyond these cells, HBase retention rules may require more - * cells to be retained. These cells are identified by the HBase level compaction implemented - * by HBaseLevelRowCompactor. - * + * Compacts a single row at the HBase level. The result parameter is the input row and modified + * to be the output of the compaction. */ - class PhoenixLevelRowCompactor { - private RowContext rowContext = new RowContext(); - List lastRowVersion = new ArrayList<>(); - List emptyColumn = new ArrayList<>(); - List phoenixResult = new ArrayList<>(); - List trimmedRow = new ArrayList<>(); - List trimmedEmptyColumn = new ArrayList<>(); - private TTLTracker rowTracker; - - PhoenixLevelRowCompactor(TTLTracker rowTracker) { - this.rowTracker = rowTracker; - } + private void compact(List result) { + if (result.isEmpty()) { + return; + } + LinkedList> columns = new LinkedList<>(); + formColumns(result, columns); + result.clear(); + formCompactionRowVersions(columns, result); + } + } + + /** + * PhoenixLevelRowCompactor ensures that the cells of the latest row version and the row versions + * that are visible through the max lookback window are retained including delete markers placed + * after these cells. This is the complete set of cells that Phoenix needs for its queries. Beyond + * these cells, HBase retention rules may require more cells to be retained. These cells are + * identified by the HBase level compaction implemented by HBaseLevelRowCompactor. + */ + class PhoenixLevelRowCompactor { + private RowContext rowContext = new RowContext(); + List lastRowVersion = new ArrayList<>(); + List emptyColumn = new ArrayList<>(); + List phoenixResult = new ArrayList<>(); + List trimmedRow = new ArrayList<>(); + List trimmedEmptyColumn = new ArrayList<>(); + private TTLTracker rowTracker; + + PhoenixLevelRowCompactor(TTLTracker rowTracker) { + this.rowTracker = rowTracker; + } - /** - * The cells of the row (i.e., result) read from HBase store are lexicographically ordered - * for tables using the key part of the cells which includes row, family, qualifier, - * timestamp and type. The cells belong of a column are ordered from the latest to - * the oldest. The method leverages this ordering and groups the cells into their columns - * based on the pair of family name and column qualifier. - * - * The cells within the max lookback window except the once at the lower edge of the - * max lookback window (the last row of the max lookback window) are retained immediately. - * - * This method also returned the remaining cells (outside the max lookback window) of - * the empty colum - */ - private void getLastRowVersionInMaxLookbackWindow(List result, - List lastRowVersion, List retainedCells, List emptyColumn) { - Cell currentColumnCell = null; - boolean isEmptyColumn = false; - for (Cell cell : result) { - long maxLookbackWindowStart = rowTracker.getRowContext().getMaxLookbackWindowStart(); - if (cell.getTimestamp() > maxLookbackWindowStart) { - retainedCells.add(cell); - continue; - } - if (!major && cell.getType() != Cell.Type.Put) { - retainedCells.add(cell); - } - if (currentColumnCell == null || - !CellUtil.matchingColumn(cell, currentColumnCell)) { - currentColumnCell = cell; - isEmptyColumn = ScanUtil.isEmptyColumn(cell, emptyCF, emptyCQ); - if ((cell.getType() != Cell.Type.Delete - && cell.getType() != Cell.Type.DeleteColumn) - || cell.getTimestamp() == maxLookbackWindowStart) { - // Include only delete family markers and put cells - // The last row version can also be the cells with timestamp - // same as timestamp of start of max lookback window - lastRowVersion.add(cell); - } - } else if (isEmptyColumn) { - // We only need to keep one cell for every column for the last row version. - // So here we just form the empty column beyond the last row version. - // Empty column needs to be collected during minor compactions also - // else we will see partial row expiry. - emptyColumn.add(cell); - } - } - } + /** + * The cells of the row (i.e., result) read from HBase store are lexicographically ordered for + * tables using the key part of the cells which includes row, family, qualifier, timestamp and + * type. The cells belong of a column are ordered from the latest to the oldest. The method + * leverages this ordering and groups the cells into their columns based on the pair of family + * name and column qualifier. The cells within the max lookback window except the once at the + * lower edge of the max lookback window (the last row of the max lookback window) are retained + * immediately. This method also returned the remaining cells (outside the max lookback window) + * of the empty colum + */ + private void getLastRowVersionInMaxLookbackWindow(List result, List lastRowVersion, + List retainedCells, List emptyColumn) { + Cell currentColumnCell = null; + boolean isEmptyColumn = false; + for (Cell cell : result) { + long maxLookbackWindowStart = rowTracker.getRowContext().getMaxLookbackWindowStart(); + if (cell.getTimestamp() > maxLookbackWindowStart) { + retainedCells.add(cell); + continue; + } + if (!major && cell.getType() != Cell.Type.Put) { + retainedCells.add(cell); + } + if (currentColumnCell == null || !CellUtil.matchingColumn(cell, currentColumnCell)) { + currentColumnCell = cell; + isEmptyColumn = ScanUtil.isEmptyColumn(cell, emptyCF, emptyCQ); + if ( + (cell.getType() != Cell.Type.Delete && cell.getType() != Cell.Type.DeleteColumn) + || cell.getTimestamp() == maxLookbackWindowStart + ) { + // Include only delete family markers and put cells + // The last row version can also be the cells with timestamp + // same as timestamp of start of max lookback window + lastRowVersion.add(cell); + } + } else if (isEmptyColumn) { + // We only need to keep one cell for every column for the last row version. + // So here we just form the empty column beyond the last row version. + // Empty column needs to be collected during minor compactions also + // else we will see partial row expiry. + emptyColumn.add(cell); + } + } + } - /** - * Close the gap between the two timestamps, max and min, with the minimum number of cells - * from the input list such that the timestamp difference between two cells should - * not more than ttl. The cells that are used to close the gap are added to the output - * list. The input list is a list of empty cells in decreasing order of timestamp. - */ - private void closeGap(long max, long min, long ttl, List input, List output) { - int previous = -1; - long ts; - for (Cell cell : input) { - ts = cell.getTimestamp(); - if (ts >= max) { - previous++; - continue; - } - if (previous == -1 && max - ts > ttl) { - // Means even the first empty cells in the input list which is closest to - // max timestamp can't close the gap. So, gap can't be closed by empty cells at all. - break; - } - if (max - ts > ttl) { - max = input.get(previous).getTimestamp(); - output.add(input.remove(previous)); - if (max - min > ttl) { - closeGap(max, min, ttl, input, output); - } - return; - } - previous++; - } - if (previous > -1 && max - min > ttl) { - // This covers the case we need to retain the last empty cell in the input list. The close gap - // algorithm is such that if we need to retain the i th empty cell in the input list then we - // will get to know that once we are iterating on i+1 th empty cell. So, to retain last empty cell - // in input list we need to check the min timestamp. - output.add(input.remove(previous)); - } - } + /** + * Close the gap between the two timestamps, max and min, with the minimum number of cells from + * the input list such that the timestamp difference between two cells should not more than ttl. + * The cells that are used to close the gap are added to the output list. The input list is a + * list of empty cells in decreasing order of timestamp. + */ + private void closeGap(long max, long min, long ttl, List input, List output) { + int previous = -1; + long ts; + for (Cell cell : input) { + ts = cell.getTimestamp(); + if (ts >= max) { + previous++; + continue; + } + if (previous == -1 && max - ts > ttl) { + // Means even the first empty cells in the input list which is closest to + // max timestamp can't close the gap. So, gap can't be closed by empty cells at all. + break; + } + if (max - ts > ttl) { + max = input.get(previous).getTimestamp(); + output.add(input.remove(previous)); + if (max - min > ttl) { + closeGap(max, min, ttl, input, output); + } + return; + } + previous++; + } + if (previous > -1 && max - min > ttl) { + // This covers the case we need to retain the last empty cell in the input list. The close + // gap + // algorithm is such that if we need to retain the i th empty cell in the input list then we + // will get to know that once we are iterating on i+1 th empty cell. So, to retain last + // empty cell + // in input list we need to check the min timestamp. + output.add(input.remove(previous)); + } + } - /** - * Retains minimum empty cells needed during minor compaction to not loose data/partial row expiry - * on next major compaction. - * @param emptyColumn Empty column cells in decreasing order of timestamp. - * @param retainedCells Cells to be retained. - */ - private void retainEmptyCellsInMinorCompaction(List emptyColumn, List retainedCells) { - if (emptyColumn.isEmpty()) { - return; - } - else if (familyCount == 1 || localIndex) { - // We are compacting empty column family store and its single column family so - // just need to retain empty cells till min timestamp of last row version. Can't - // minimize the retained empty cells further as we don't know actual TTL during - // minor compactions. - long minRowTimestamp = rowContext.minTimestamp; - for (Cell emptyCell: emptyColumn) { - if (emptyCell.getTimestamp() > minRowTimestamp) { - retainedCells.add(emptyCell); - } - } - return; - } - // For multi-column family, w/o doing region level scan we can't put a bound on timestamp - // till which we should retain the empty cells. The empty cells can be needed to close the gap - // b/w empty column family cell and non-empty column family cell. - retainedCells.addAll(emptyColumn); - } + /** + * Retains minimum empty cells needed during minor compaction to not loose data/partial row + * expiry on next major compaction. + * @param emptyColumn Empty column cells in decreasing order of timestamp. + * @param retainedCells Cells to be retained. + */ + private void retainEmptyCellsInMinorCompaction(List emptyColumn, + List retainedCells) { + if (emptyColumn.isEmpty()) { + return; + } else if (familyCount == 1 || localIndex) { + // We are compacting empty column family store and its single column family so + // just need to retain empty cells till min timestamp of last row version. Can't + // minimize the retained empty cells further as we don't know actual TTL during + // minor compactions. + long minRowTimestamp = rowContext.minTimestamp; + for (Cell emptyCell : emptyColumn) { + if (emptyCell.getTimestamp() > minRowTimestamp) { + retainedCells.add(emptyCell); + } + } + return; + } + // For multi-column family, w/o doing region level scan we can't put a bound on timestamp + // till which we should retain the empty cells. The empty cells can be needed to close the gap + // b/w empty column family cell and non-empty column family cell. + retainedCells.addAll(emptyColumn); + } - /** - * Retain the last row version visible through the max lookback window - */ - private void retainCellsOfLastRowVersion(List lastRow, - List emptyColumn, List retainedCells) { - if (lastRow.isEmpty()) { - return; - } - rowContext.init(); - rowTracker.setRowContext(rowContext); - long ttl = rowContext.getTTL(); - rowContext.getNextRowVersionTimestamps(lastRow, storeColumnFamily); - Cell firstCell = lastRow.get(0); - if (firstCell.getType() == Cell.Type.DeleteFamily || - firstCell.getType() == Cell.Type.DeleteFamilyVersion) { - if (firstCell.getTimestamp() >= rowContext.maxTimestamp) { - // This means that the row version outside the max lookback window is - // deleted and thus should not be visible to the scn queries - return; - } - } + /** + * Retain the last row version visible through the max lookback window + */ + private void retainCellsOfLastRowVersion(List lastRow, List emptyColumn, + List retainedCells) { + if (lastRow.isEmpty()) { + return; + } + rowContext.init(); + rowTracker.setRowContext(rowContext); + long ttl = rowContext.getTTL(); + rowContext.getNextRowVersionTimestamps(lastRow, storeColumnFamily); + Cell firstCell = lastRow.get(0); + if ( + firstCell.getType() == Cell.Type.DeleteFamily + || firstCell.getType() == Cell.Type.DeleteFamilyVersion + ) { + if (firstCell.getTimestamp() >= rowContext.maxTimestamp) { + // This means that the row version outside the max lookback window is + // deleted and thus should not be visible to the scn queries + return; + } + } + + if (major && compactionTime - rowContext.maxTimestamp > maxLookbackInMillis + ttl) { + // Only do this check for major compaction as for minor compactions we don't expire cells. + // The row version should not be visible via the max lookback window. Nothing to do + return; + } + retainedCells.addAll(lastRow); + // If the gap between two back to back mutations is more than ttl then the older + // mutation will be considered expired and masked. If the length of the time range of + // a row version is not more than ttl, then we know the cells covered by the row + // version are not apart from each other more than ttl and will not be masked. + if (major && rowContext.maxTimestamp - rowContext.minTimestamp <= ttl) { + // Skip this check for minor compactions as we don't compute actual TTL for + // minor compactions and don't expire cells. + return; + } + // The quick time range check did not pass. We need get at least one empty cell to cover + // the gap so that the row version will not be masked by PhoenixTTLRegionScanner. + if (emptyColumn.isEmpty()) { + return; + } else if (!major) { + retainEmptyCellsInMinorCompaction(emptyColumn, retainedCells); + return; + } + int size = lastRow.size(); + long tsArray[] = new long[size]; + int i = 0; + for (Cell cell : lastRow) { + tsArray[i++] = cell.getTimestamp(); + } + Arrays.sort(tsArray); + for (i = size - 1; i > 0; i--) { + if (tsArray[i] - tsArray[i - 1] > ttl) { + closeGap(tsArray[i], tsArray[i - 1], ttl, emptyColumn, retainedCells); + } + } + } - if (major && compactionTime - rowContext.maxTimestamp > maxLookbackInMillis + ttl) { - // Only do this check for major compaction as for minor compactions we don't expire cells. - // The row version should not be visible via the max lookback window. Nothing to do - return; - } - retainedCells.addAll(lastRow); - // If the gap between two back to back mutations is more than ttl then the older - // mutation will be considered expired and masked. If the length of the time range of - // a row version is not more than ttl, then we know the cells covered by the row - // version are not apart from each other more than ttl and will not be masked. - if (major && rowContext.maxTimestamp - rowContext.minTimestamp <= ttl) { - // Skip this check for minor compactions as we don't compute actual TTL for - // minor compactions and don't expire cells. - return; - } - // The quick time range check did not pass. We need get at least one empty cell to cover - // the gap so that the row version will not be masked by PhoenixTTLRegionScanner. - if (emptyColumn.isEmpty()) { - return; - } - else if (! major) { - retainEmptyCellsInMinorCompaction(emptyColumn, retainedCells); - return; - } - int size = lastRow.size(); - long tsArray[] = new long[size]; - int i = 0; - for (Cell cell : lastRow) { - tsArray[i++] = cell.getTimestamp(); - } - Arrays.sort(tsArray); - for (i = size - 1; i > 0; i--) { - if (tsArray[i] - tsArray[i - 1] > ttl) { - closeGap(tsArray[i], tsArray[i - 1], ttl, emptyColumn, retainedCells); - } - } + /** + * For a CDC index, we retain all cells within the max lookback window as opposed to retaining + * all row versions visible through max lookback window we do for other tables + */ + private boolean retainCellsForCDCIndex(List result, List retainedCells) { + for (Cell cell : result) { + if (cell.getTimestamp() >= rowTracker.getRowContext().getMaxLookbackWindowStart()) { + retainedCells.add(cell); } + } + return true; + } - /** - * For a CDC index, we retain all cells within the max lookback window as opposed to - * retaining all row versions visible through max lookback window we do for other tables - */ - private boolean retainCellsForCDCIndex(List result, List retainedCells) { - for (Cell cell : result) { - if (cell.getTimestamp() >= rowTracker.getRowContext().getMaxLookbackWindowStart()) { - retainedCells.add(cell); - } - } - return true; - } + /** + * The retained cells includes the cells that are visible through the max lookback window and + * the additional empty column cells that are needed to reduce large time between the cells of + * the last row version. + */ + private boolean retainCellsForMaxLookback(List result, boolean regionLevel, + List retainedCells) { + + lastRowVersion.clear(); + emptyColumn.clear(); + if (isCDCIndex) { + return retainCellsForCDCIndex(result, retainedCells); + } + getLastRowVersionInMaxLookbackWindow(result, lastRowVersion, retainedCells, emptyColumn); + if (lastRowVersion.isEmpty()) { + return true; + } + if (!major) { + // We do not expire cells for minor compaction and memstore flushes + retainCellsOfLastRowVersion(lastRowVersion, emptyColumn, retainedCells); + return true; + } + long ttl = rowTracker.getRowContext().getTTL(); + long maxTimestamp = 0; + long minTimestamp = Long.MAX_VALUE; + long ts; + for (Cell cell : lastRowVersion) { + ts = cell.getTimestamp(); + if (ts > maxTimestamp) { + maxTimestamp = ts; + } + ts = cell.getTimestamp(); + if (ts < minTimestamp) { + minTimestamp = ts; + } + } + if (compactionTime - maxTimestamp > maxLookbackInMillis + ttl) { + if (!emptyCFStore && !regionLevel) { + // The row version is more than maxLookbackInMillis + ttl old. We cannot decide + // if we should retain it with the store level compaction when the current + // store is not the empty column family store. + return false; + } + return true; + } + // If the time gap between two back to back mutations is more than ttl then we know + // that the row is expired within the time gap. + if (maxTimestamp - minTimestamp > ttl) { + if ((familyCount > 1 && !regionLevel && !localIndex)) { + // When there are more than one column family for a given table and a row + // version constructed at the store level covers a time span larger than ttl, + // we need region level compaction to see if the other stores have more cells + // for any of these large time gaps. A store level compaction may incorrectly + // remove some cells due to a large time gap which may not there at the region + // level. + return false; + } + // We either have one column family or are doing region level compaction. In both + // case, we can safely trim the cells beyond the first time gap larger ttl. + // Here we are interested in the gaps between the cells of the last row version + // amd thus we need to examine the gaps between these cells and the empty column. + // Please note that empty column is always updated for every mutation and so we + // just need empty column cells for the gap analysis. + int size = lastRowVersion.size(); + size += emptyColumn.size(); + long tsArray[] = new long[size]; + int i = 0; + for (Cell cell : lastRowVersion) { + tsArray[i++] = cell.getTimestamp(); + } + for (Cell cell : emptyColumn) { + tsArray[i++] = cell.getTimestamp(); + } + Arrays.sort(tsArray); + boolean gapFound = false; + // Since timestamps are sorted in ascending order, traverse them in reverse order + for (i = size - 1; i > 0; i--) { + if (tsArray[i] - tsArray[i - 1] > ttl) { + minTimestamp = tsArray[i]; + gapFound = true; + break; + } + } + if (gapFound) { + trimmedRow.clear(); + for (Cell cell : lastRowVersion) { + if (cell.getTimestamp() >= minTimestamp) { + trimmedRow.add(cell); + } + } + lastRowVersion.clear(); + lastRowVersion.addAll(trimmedRow); + trimmedEmptyColumn.clear(); + ; + for (Cell cell : emptyColumn) { + if (cell.getTimestamp() >= minTimestamp) { + trimmedEmptyColumn.add(cell); + } + } + emptyColumn = trimmedEmptyColumn; + } + } + retainCellsOfLastRowVersion(lastRowVersion, emptyColumn, retainedCells); + return true; + } - /** - * The retained cells includes the cells that are visible through the max lookback - * window and the additional empty column cells that are needed to reduce large time - * between the cells of the last row version. - */ - private boolean retainCellsForMaxLookback(List result, boolean regionLevel, - List retainedCells) { - - lastRowVersion.clear(); - emptyColumn.clear(); - if (isCDCIndex) { - return retainCellsForCDCIndex(result, retainedCells); - } - getLastRowVersionInMaxLookbackWindow(result, lastRowVersion, retainedCells, - emptyColumn); - if (lastRowVersion.isEmpty()) { - return true; - } - if (!major) { - // We do not expire cells for minor compaction and memstore flushes - retainCellsOfLastRowVersion(lastRowVersion, emptyColumn, retainedCells); - return true; - } - long ttl = rowTracker.getRowContext().getTTL(); - long maxTimestamp = 0; - long minTimestamp = Long.MAX_VALUE; - long ts; - for (Cell cell : lastRowVersion) { - ts =cell.getTimestamp(); - if (ts > maxTimestamp) { - maxTimestamp = ts; - } - ts = cell.getTimestamp(); - if (ts < minTimestamp) { - minTimestamp = ts; - } - } - if (compactionTime - maxTimestamp > maxLookbackInMillis + ttl) { - if (!emptyCFStore && !regionLevel) { - // The row version is more than maxLookbackInMillis + ttl old. We cannot decide - // if we should retain it with the store level compaction when the current - // store is not the empty column family store. - return false; - } - return true; - } - // If the time gap between two back to back mutations is more than ttl then we know - // that the row is expired within the time gap. - if (maxTimestamp - minTimestamp > ttl) { - if ((familyCount > 1 && !regionLevel && !localIndex)) { - // When there are more than one column family for a given table and a row - // version constructed at the store level covers a time span larger than ttl, - // we need region level compaction to see if the other stores have more cells - // for any of these large time gaps. A store level compaction may incorrectly - // remove some cells due to a large time gap which may not there at the region - // level. - return false; - } - // We either have one column family or are doing region level compaction. In both - // case, we can safely trim the cells beyond the first time gap larger ttl. - // Here we are interested in the gaps between the cells of the last row version - // amd thus we need to examine the gaps between these cells and the empty column. - // Please note that empty column is always updated for every mutation and so we - // just need empty column cells for the gap analysis. - int size = lastRowVersion.size(); - size += emptyColumn.size(); - long tsArray[] = new long[size]; - int i = 0; - for (Cell cell : lastRowVersion) { - tsArray[i++] = cell.getTimestamp(); - } - for (Cell cell : emptyColumn) { - tsArray[i++] = cell.getTimestamp(); - } - Arrays.sort(tsArray); - boolean gapFound = false; - // Since timestamps are sorted in ascending order, traverse them in reverse order - for (i = size - 1; i > 0; i--) { - if (tsArray[i] - tsArray[i - 1] > ttl) { - minTimestamp = tsArray[i]; - gapFound = true; - break; - } - } - if (gapFound) { - trimmedRow.clear(); - for (Cell cell : lastRowVersion) { - if (cell.getTimestamp() >= minTimestamp) { - trimmedRow.add(cell); - } - } - lastRowVersion.clear(); - lastRowVersion.addAll(trimmedRow); - trimmedEmptyColumn.clear();; - for (Cell cell : emptyColumn) { - if (cell.getTimestamp() >= minTimestamp) { - trimmedEmptyColumn.add(cell); - } - } - emptyColumn = trimmedEmptyColumn; - } - } - retainCellsOfLastRowVersion(lastRowVersion, emptyColumn, retainedCells); - return true; - } - private void removeDuplicates(List input, List output) { - Cell previousCell = null; - for (Cell cell : input) { - if (previousCell == null || - cell.getTimestamp() != previousCell.getTimestamp() || - cell.getType() != previousCell.getType() || - !CellUtil.matchingColumn(cell, previousCell)) { - output.add(cell); - } - previousCell = cell; - } - } - /** - * Compacts a single row at the Phoenix level. The result parameter is the input row and - * modified to be the output of the compaction process. - */ - private void compact(List result, boolean regionLevel) throws IOException { - if (result.isEmpty()) { - return; - } - phoenixResult.clear(); - rowTracker.setTTL(result.get(0)); - // For multi-CF case, always do region level scan for empty CF store during major compaction else - // we could end-up removing some empty cells which are needed to close the gap b/w empty CF cell and - // non-empty CF cell to prevent partial row expiry. This can happen when last row version of non-empty - // CF cell outside max lookback window is older than last row version of empty CF cell. - if (major && familyCount > 1 && ! localIndex && emptyCFStore && ! regionLevel) { - compactRegionLevel(result, phoenixResult); - } - else if (!retainCellsForMaxLookback(result, regionLevel, phoenixResult)) { - if (familyCount == 1 || regionLevel) { - throw new RuntimeException("UNEXPECTED"); - } - phoenixResult.clear(); - compactRegionLevel(result, phoenixResult); - } - if (maxVersion == 1 - && (!major - || (minVersion == 0 && keepDeletedCells == KeepDeletedCells.FALSE))) { - // We need Phoenix level compaction only - Collections.sort(phoenixResult, CellComparator.getInstance()); - result.clear(); - removeDuplicates(phoenixResult, result); - phoenixLevelOnly = true; - return; - } - // We may need to retain more cells, and so we need to run HBase level compaction - // too. The result of two compactions will be merged and duplicate cells are removed. - int phoenixResultSize = phoenixResult.size(); - List hbaseResult = new ArrayList<>(result); - hBaseLevelRowCompactor.compact(hbaseResult); - phoenixResult.addAll(hbaseResult); - Collections.sort(phoenixResult, CellComparator.getInstance()); - result.clear(); - removeDuplicates(phoenixResult, result); - if (result.size() > phoenixResultSize) { - LOGGER.debug("HBase level compaction retained " + - (result.size() - phoenixResultSize) + " more cells"); - } - } + private void removeDuplicates(List input, List output) { + Cell previousCell = null; + for (Cell cell : input) { + if ( + previousCell == null || cell.getTimestamp() != previousCell.getTimestamp() + || cell.getType() != previousCell.getType() + || !CellUtil.matchingColumn(cell, previousCell) + ) { + output.add(cell); + } + previousCell = cell; + } + } - private int compareTypes(Cell a, Cell b) { - Cell.Type aType = a.getType(); - Cell.Type bType = b.getType(); + /** + * Compacts a single row at the Phoenix level. The result parameter is the input row and + * modified to be the output of the compaction process. + */ + private void compact(List result, boolean regionLevel) throws IOException { + if (result.isEmpty()) { + return; + } + phoenixResult.clear(); + rowTracker.setTTL(result.get(0)); + // For multi-CF case, always do region level scan for empty CF store during major compaction + // else + // we could end-up removing some empty cells which are needed to close the gap b/w empty CF + // cell and + // non-empty CF cell to prevent partial row expiry. This can happen when last row version of + // non-empty + // CF cell outside max lookback window is older than last row version of empty CF cell. + if (major && familyCount > 1 && !localIndex && emptyCFStore && !regionLevel) { + compactRegionLevel(result, phoenixResult); + } else if (!retainCellsForMaxLookback(result, regionLevel, phoenixResult)) { + if (familyCount == 1 || regionLevel) { + throw new RuntimeException("UNEXPECTED"); + } + phoenixResult.clear(); + compactRegionLevel(result, phoenixResult); + } + if ( + maxVersion == 1 + && (!major || (minVersion == 0 && keepDeletedCells == KeepDeletedCells.FALSE)) + ) { + // We need Phoenix level compaction only + Collections.sort(phoenixResult, CellComparator.getInstance()); + result.clear(); + removeDuplicates(phoenixResult, result); + phoenixLevelOnly = true; + return; + } + // We may need to retain more cells, and so we need to run HBase level compaction + // too. The result of two compactions will be merged and duplicate cells are removed. + int phoenixResultSize = phoenixResult.size(); + List hbaseResult = new ArrayList<>(result); + hBaseLevelRowCompactor.compact(hbaseResult); + phoenixResult.addAll(hbaseResult); + Collections.sort(phoenixResult, CellComparator.getInstance()); + result.clear(); + removeDuplicates(phoenixResult, result); + if (result.size() > phoenixResultSize) { + LOGGER.debug( + "HBase level compaction retained " + (result.size() - phoenixResultSize) + " more cells"); + } + } - if (aType == bType) { - return 0; - } - if (aType == Cell.Type.DeleteFamily) { - return -1; - } - if (bType == Cell.Type.DeleteFamily) { - return 1; - } - if (aType == Cell.Type.DeleteFamilyVersion) { - return -1; - } - if (bType == Cell.Type.DeleteFamilyVersion) { - return 1; - } - if (aType == Cell.Type.DeleteColumn) { - return -1; - } - return 1; - } + private int compareTypes(Cell a, Cell b) { + Cell.Type aType = a.getType(); + Cell.Type bType = b.getType(); + + if (aType == bType) { + return 0; + } + if (aType == Cell.Type.DeleteFamily) { + return -1; + } + if (bType == Cell.Type.DeleteFamily) { + return 1; + } + if (aType == Cell.Type.DeleteFamilyVersion) { + return -1; + } + if (bType == Cell.Type.DeleteFamilyVersion) { + return 1; + } + if (aType == Cell.Type.DeleteColumn) { + return -1; + } + return 1; + } - private int compare(Cell a, Cell b) { - int result; - result = Bytes.compareTo(a.getFamilyArray(), a.getFamilyOffset(), - a.getFamilyLength(), - b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); - if (result != 0) { - return result; - } - result = Bytes.compareTo(a.getQualifierArray(), a.getQualifierOffset(), - a.getQualifierLength(), - b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); - if (result != 0) { - return result; - } - if (a.getTimestamp() > b.getTimestamp()) { - return -1; - } - if (a.getTimestamp() < b.getTimestamp()) { - return 1; - } - return compareTypes(a, b); - } + private int compare(Cell a, Cell b) { + int result; + result = Bytes.compareTo(a.getFamilyArray(), a.getFamilyOffset(), a.getFamilyLength(), + b.getFamilyArray(), b.getFamilyOffset(), b.getFamilyLength()); + if (result != 0) { + return result; + } + result = + Bytes.compareTo(a.getQualifierArray(), a.getQualifierOffset(), a.getQualifierLength(), + b.getQualifierArray(), b.getQualifierOffset(), b.getQualifierLength()); + if (result != 0) { + return result; + } + if (a.getTimestamp() > b.getTimestamp()) { + return -1; + } + if (a.getTimestamp() < b.getTimestamp()) { + return 1; + } + return compareTypes(a, b); + } - /** - * The generates the intersection of regionResult and input. The result is the resulting - * intersection. - */ - private void trimRegionResult(List regionResult, List input, - List result) { - if (regionResult.isEmpty()) { - return; - } - int index = 0; - int size = regionResult.size(); - int compare; - for (Cell originalCell : input) { - Cell regionCell = regionResult.get(index); - compare = compare(originalCell, regionCell); - while (compare > 0) { - index++; - if (index == size) { - break; - } - regionCell = regionResult.get(index); - compare = compare(originalCell, regionCell); - } - if (compare == 0) { - result.add(originalCell); - index++; - } - if (index == size) { - break; - } - } - } + /** + * The generates the intersection of regionResult and input. The result is the resulting + * intersection. + */ + private void trimRegionResult(List regionResult, List input, List result) { + if (regionResult.isEmpty()) { + return; + } + int index = 0; + int size = regionResult.size(); + int compare; + for (Cell originalCell : input) { + Cell regionCell = regionResult.get(index); + compare = compare(originalCell, regionCell); + while (compare > 0) { + index++; + if (index == size) { + break; + } + regionCell = regionResult.get(index); + compare = compare(originalCell, regionCell); + } + if (compare == 0) { + result.add(originalCell); + index++; + } + if (index == size) { + break; + } + } + } - /** - * This is used only when the Phoenix level compaction cannot be done at the store level. - */ - private void compactRegionLevel(List input, List result) throws IOException { - byte[] rowKey = CellUtil.cloneRow(input.get(0)); - Scan scan = new Scan(); - scan.setRaw(true); - scan.readAllVersions(); - // compaction + 1 because the upper limit of the time range is not inclusive - scan.setTimeRange(0, compactionTime + 1); - scan.withStartRow(rowKey, true); - scan.withStopRow(rowKey, true); - RegionScanner scanner = region.getScanner(scan); - - List regionResult = new ArrayList<>(result.size()); - scanner.next(regionResult); - scanner.close(); - Collections.sort(regionResult, CellComparator.getInstance()); - compact(regionResult, true); - result.clear(); - trimRegionResult(regionResult, input, result); - } + /** + * This is used only when the Phoenix level compaction cannot be done at the store level. + */ + private void compactRegionLevel(List input, List result) throws IOException { + byte[] rowKey = CellUtil.cloneRow(input.get(0)); + Scan scan = new Scan(); + scan.setRaw(true); + scan.readAllVersions(); + // compaction + 1 because the upper limit of the time range is not inclusive + scan.setTimeRange(0, compactionTime + 1); + scan.withStartRow(rowKey, true); + scan.withStopRow(rowKey, true); + RegionScanner scanner = region.getScanner(scan); + + List regionResult = new ArrayList<>(result.size()); + scanner.next(regionResult); + scanner.close(); + Collections.sort(regionResult, CellComparator.getInstance()); + compact(regionResult, true); + result.clear(); + trimRegionResult(regionResult, input, result); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java index d8c7a5d1134..41a2ea830e5 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,95 +40,96 @@ */ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEnvironment { - private final Configuration config; - private RegionCoprocessorEnvironment delegate; - private ConnectionType connectionType; - - public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate, ConnectionType connectionType) { - this.delegate = delegate; - this.connectionType = connectionType; - this.config = ConnectionFactory.getTypeSpecificConfiguration(connectionType, delegate.getConfiguration()); - } - - @Override - public int getVersion() { - return delegate.getVersion(); - } - - @Override - public String getHBaseVersion() { - return delegate.getHBaseVersion(); - } - - @Override - public int getPriority() { - return delegate.getPriority(); - } - - @Override - public int getLoadSequence() { - return delegate.getLoadSequence(); - } - - @Override - public Configuration getConfiguration() { - return config; - } - - @Override - public ClassLoader getClassLoader() { - return delegate.getClassLoader(); - } - - @Override - public Region getRegion() { - return delegate.getRegion(); - } - - @Override - public RegionInfo getRegionInfo() { - return delegate.getRegionInfo(); - } - - @Override - public ConcurrentMap getSharedData() { - return delegate.getSharedData(); - } - - @Override - public RegionCoprocessor getInstance() { - return delegate.getInstance(); - } - - @Override - public OnlineRegions getOnlineRegions() { - return delegate.getOnlineRegions(); - } - - @Override - public ServerName getServerName() { - return delegate.getServerName(); - } - - @Override - public Connection getConnection() { - return ConnectionFactory.getConnection(connectionType, delegate); - } - - @Override - public MetricRegistry getMetricRegistryForRegionServer() { - return delegate.getMetricRegistryForRegionServer(); - } - - @Override - public Connection createConnection(Configuration conf) throws IOException { - return delegate.createConnection(conf); - } - - @Override - public RawCellBuilder getCellBuilder() { - return delegate.getCellBuilder(); - } - - + private final Configuration config; + private RegionCoprocessorEnvironment delegate; + private ConnectionType connectionType; + + public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment delegate, + ConnectionType connectionType) { + this.delegate = delegate; + this.connectionType = connectionType; + this.config = + ConnectionFactory.getTypeSpecificConfiguration(connectionType, delegate.getConfiguration()); + } + + @Override + public int getVersion() { + return delegate.getVersion(); + } + + @Override + public String getHBaseVersion() { + return delegate.getHBaseVersion(); + } + + @Override + public int getPriority() { + return delegate.getPriority(); + } + + @Override + public int getLoadSequence() { + return delegate.getLoadSequence(); + } + + @Override + public Configuration getConfiguration() { + return config; + } + + @Override + public ClassLoader getClassLoader() { + return delegate.getClassLoader(); + } + + @Override + public Region getRegion() { + return delegate.getRegion(); + } + + @Override + public RegionInfo getRegionInfo() { + return delegate.getRegionInfo(); + } + + @Override + public ConcurrentMap getSharedData() { + return delegate.getSharedData(); + } + + @Override + public RegionCoprocessor getInstance() { + return delegate.getInstance(); + } + + @Override + public OnlineRegions getOnlineRegions() { + return delegate.getOnlineRegions(); + } + + @Override + public ServerName getServerName() { + return delegate.getServerName(); + } + + @Override + public Connection getConnection() { + return ConnectionFactory.getConnection(connectionType, delegate); + } + + @Override + public MetricRegistry getMetricRegistryForRegionServer() { + return delegate.getMetricRegistryForRegionServer(); + } + + @Override + public Connection createConnection(Configuration conf) throws IOException { + return delegate.createConnection(conf); + } + + @Override + public RawCellBuilder getCellBuilder() { + return delegate.getCellBuilder(); + } + } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java index 62ee95f7832..2c7d30d0e62 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -49,281 +49,271 @@ public class DelegateRegionObserver implements RegionObserver { - protected final RegionObserver delegate; - - public DelegateRegionObserver(RegionObserver delegate) { - this.delegate = delegate; - } - - @Override - public void preOpen(ObserverContext c) throws IOException { - delegate.preOpen(c); - } - - @Override - public void postOpen(ObserverContext c) { - delegate.postOpen(c); - } - - @Override - public void preFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext c, - org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException { - delegate.preFlush(c, tracker); - ; - } - - @Override - public InternalScanner preFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext c, - Store store, InternalScanner scanner, org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) - throws IOException { - return delegate.preFlush(c, store, scanner, tracker); - } - - @Override - public void postFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext c, - org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException { - delegate.postFlush(c, tracker); - } - - - @Override - public void postFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext c, - Store store, StoreFile resultFile, org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) - throws IOException { - delegate.postFlush(c, store, resultFile, tracker); - } - - - - @Override - public void preClose(ObserverContext c, boolean abortRequested) - throws IOException { - delegate.preClose(c, abortRequested); - } - - @Override - public void postClose(ObserverContext c, boolean abortRequested) { - delegate.postClose(c, abortRequested); - } - - @Override - public void - preGetOp(ObserverContext c, Get get, List result) - throws IOException { - delegate.preGetOp(c, get, result); - } - - @Override - public void postGetOp(ObserverContext c, Get get, - List result) throws IOException { - delegate.postGetOp(c, get, result); - } - - @Override - public boolean preExists(ObserverContext c, Get get, - boolean exists) throws IOException { - return delegate.preExists(c, get, exists); - } - - @Override - public boolean postExists(ObserverContext c, Get get, - boolean exists) throws IOException { - return delegate.postExists(c, get, exists); - } - - @Override - public void prePut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException { - delegate.prePut(c, put, edit, durability); - } - - @Override - public void postPut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException { - delegate.postPut(c, put, edit, durability); - } - - @Override - public void preDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException { - delegate.preDelete(c, delete, edit, durability); - } - - @Override - public void prePrepareTimeStampForDeleteVersion( - ObserverContext c, Mutation mutation, Cell cell, - byte[] byteNow, Get get) throws IOException { - delegate.prePrepareTimeStampForDeleteVersion(c, mutation, cell, byteNow, get); - } - - @Override - public void postDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException { - delegate.postDelete(c, delete, edit, durability); - } - - @Override - public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { - delegate.preBatchMutate(c, miniBatchOp); - } - - @Override - public void postBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { - delegate.postBatchMutate(c, miniBatchOp); - } - - @Override - public void postStartRegionOperation(ObserverContext ctx, - Operation operation) throws IOException { - delegate.postStartRegionOperation(ctx, operation); - } - - @Override - public void postCloseRegionOperation(ObserverContext ctx, - Operation operation) throws IOException { - delegate.postCloseRegionOperation(ctx, operation); - } - - @Override - public void postBatchMutateIndispensably(ObserverContext ctx, - MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException { - delegate.postBatchMutateIndispensably(ctx, miniBatchOp, success); - } - - @Override - public Result preAppend(ObserverContext c, Append append) - throws IOException { - return delegate.preAppend(c, append); - } - - @Override - public Result preAppendAfterRowLock(ObserverContext c, - Append append) throws IOException { - return delegate.preAppendAfterRowLock(c, append); - } - - @Override - public Result postAppend(ObserverContext c, Append append, - Result result) throws IOException { - return delegate.postAppend(c, append, result); - } - - @Override - public Result - preIncrement(ObserverContext c, Increment increment) - throws IOException { - return delegate.preIncrement(c, increment); - } - - @Override - public Result preIncrementAfterRowLock(ObserverContext c, - Increment increment) throws IOException { - return delegate.preIncrementAfterRowLock(c, increment); - } - - @Override - public Result postIncrement(ObserverContext c, - Increment increment, Result result) throws IOException { - return delegate.postIncrement(c, increment, result); - } - - @Override - public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext c, - Scan scan) throws IOException { - delegate.preScannerOpen(c, scan); - } - - @Override - public RegionScanner postScannerOpen(ObserverContext c, - Scan scan, RegionScanner s) throws IOException { - return delegate.postScannerOpen(c, scan, s); - } - - @Override - public boolean preScannerNext(ObserverContext c, - InternalScanner s, List result, int limit, boolean hasNext) throws IOException { - return delegate.preScannerNext(c, s, result, limit, hasNext); - } - - @Override - public boolean postScannerNext(ObserverContext c, - InternalScanner s, List result, int limit, boolean hasNext) throws IOException { - return delegate.postScannerNext(c, s, result, limit, hasNext); - } - - - - @Override - public void preScannerClose(ObserverContext c, InternalScanner s) - throws IOException { - delegate.preScannerClose(c, s); - } - - @Override - public void - postScannerClose(ObserverContext c, InternalScanner s) - throws IOException { - delegate.postScannerClose(c, s); - } - - @Override - public void preWALRestore(ObserverContext ctx, RegionInfo info, - WALKey logKey, WALEdit logEdit) throws IOException { - delegate.preWALRestore(ctx, info, logKey, logEdit); - } - - - @Override - public void postWALRestore(ObserverContext ctx, RegionInfo info, - WALKey logKey, WALEdit logEdit) throws IOException { - delegate.postWALRestore(ctx, info, logKey, logEdit); - } - - - - @Override - public void preBulkLoadHFile(ObserverContext ctx, - List> familyPaths) throws IOException { - delegate.preBulkLoadHFile(ctx, familyPaths); - } - - - @Override - public Cell postMutationBeforeWAL(ObserverContext ctx, - MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException { - return delegate.postMutationBeforeWAL(ctx, opType, mutation, oldCell, newCell); - } - - @Override - public DeleteTracker postInstantiateDeleteTracker( - ObserverContext ctx, DeleteTracker delTracker) - throws IOException { - return delegate.postInstantiateDeleteTracker(ctx, delTracker); - } - - @Override - public void preCommitStoreFile(ObserverContext ctx, byte[] family, - List> pairs) throws IOException { - delegate.preCommitStoreFile(ctx, family, pairs); - - } - - @Override - public void postCommitStoreFile(ObserverContext ctx, byte[] family, Path srcPath, - Path dstPath) throws IOException { - delegate.postCommitStoreFile(ctx, family, srcPath, dstPath); - - } - - @Override - public void postBulkLoadHFile(ObserverContext ctx, - List> stagingFamilyPaths, Map> finalPaths) - throws IOException { - delegate.postBulkLoadHFile(ctx, stagingFamilyPaths, finalPaths); - } - - - + protected final RegionObserver delegate; + + public DelegateRegionObserver(RegionObserver delegate) { + this.delegate = delegate; + } + + @Override + public void preOpen(ObserverContext c) throws IOException { + delegate.preOpen(c); + } + + @Override + public void postOpen(ObserverContext c) { + delegate.postOpen(c); + } + + @Override + public void preFlush( + org.apache.hadoop.hbase.coprocessor.ObserverContext c, + org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException { + delegate.preFlush(c, tracker); + ; + } + + @Override + public InternalScanner preFlush( + org.apache.hadoop.hbase.coprocessor.ObserverContext c, + Store store, InternalScanner scanner, + org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException { + return delegate.preFlush(c, store, scanner, tracker); + } + + @Override + public void postFlush( + org.apache.hadoop.hbase.coprocessor.ObserverContext c, + org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException { + delegate.postFlush(c, tracker); + } + + @Override + public void postFlush( + org.apache.hadoop.hbase.coprocessor.ObserverContext c, + Store store, StoreFile resultFile, + org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException { + delegate.postFlush(c, store, resultFile, tracker); + } + + @Override + public void preClose(ObserverContext c, boolean abortRequested) + throws IOException { + delegate.preClose(c, abortRequested); + } + + @Override + public void postClose(ObserverContext c, boolean abortRequested) { + delegate.postClose(c, abortRequested); + } + + @Override + public void preGetOp(ObserverContext c, Get get, List result) + throws IOException { + delegate.preGetOp(c, get, result); + } + + @Override + public void postGetOp(ObserverContext c, Get get, List result) + throws IOException { + delegate.postGetOp(c, get, result); + } + + @Override + public boolean preExists(ObserverContext c, Get get, boolean exists) + throws IOException { + return delegate.preExists(c, get, exists); + } + + @Override + public boolean postExists(ObserverContext c, Get get, + boolean exists) throws IOException { + return delegate.postExists(c, get, exists); + } + + @Override + public void prePut(ObserverContext c, Put put, WALEdit edit, + Durability durability) throws IOException { + delegate.prePut(c, put, edit, durability); + } + + @Override + public void postPut(ObserverContext c, Put put, WALEdit edit, + Durability durability) throws IOException { + delegate.postPut(c, put, edit, durability); + } + + @Override + public void preDelete(ObserverContext c, Delete delete, + WALEdit edit, Durability durability) throws IOException { + delegate.preDelete(c, delete, edit, durability); + } + + @Override + public void prePrepareTimeStampForDeleteVersion(ObserverContext c, + Mutation mutation, Cell cell, byte[] byteNow, Get get) throws IOException { + delegate.prePrepareTimeStampForDeleteVersion(c, mutation, cell, byteNow, get); + } + + @Override + public void postDelete(ObserverContext c, Delete delete, + WALEdit edit, Durability durability) throws IOException { + delegate.postDelete(c, delete, edit, durability); + } + + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + delegate.preBatchMutate(c, miniBatchOp); + } + + @Override + public void postBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + delegate.postBatchMutate(c, miniBatchOp); + } + + @Override + public void postStartRegionOperation(ObserverContext ctx, + Operation operation) throws IOException { + delegate.postStartRegionOperation(ctx, operation); + } + + @Override + public void postCloseRegionOperation(ObserverContext ctx, + Operation operation) throws IOException { + delegate.postCloseRegionOperation(ctx, operation); + } + + @Override + public void postBatchMutateIndispensably(ObserverContext ctx, + MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException { + delegate.postBatchMutateIndispensably(ctx, miniBatchOp, success); + } + + @Override + public Result preAppend(ObserverContext c, Append append) + throws IOException { + return delegate.preAppend(c, append); + } + + @Override + public Result preAppendAfterRowLock(ObserverContext c, + Append append) throws IOException { + return delegate.preAppendAfterRowLock(c, append); + } + + @Override + public Result postAppend(ObserverContext c, Append append, + Result result) throws IOException { + return delegate.postAppend(c, append, result); + } + + @Override + public Result preIncrement(ObserverContext c, Increment increment) + throws IOException { + return delegate.preIncrement(c, increment); + } + + @Override + public Result preIncrementAfterRowLock(ObserverContext c, + Increment increment) throws IOException { + return delegate.preIncrementAfterRowLock(c, increment); + } + + @Override + public Result postIncrement(ObserverContext c, Increment increment, + Result result) throws IOException { + return delegate.postIncrement(c, increment, result); + } + + @Override + public void preScannerOpen( + org.apache.hadoop.hbase.coprocessor.ObserverContext c, Scan scan) + throws IOException { + delegate.preScannerOpen(c, scan); + } + + @Override + public RegionScanner postScannerOpen(ObserverContext c, Scan scan, + RegionScanner s) throws IOException { + return delegate.postScannerOpen(c, scan, s); + } + + @Override + public boolean preScannerNext(ObserverContext c, InternalScanner s, + List result, int limit, boolean hasNext) throws IOException { + return delegate.preScannerNext(c, s, result, limit, hasNext); + } + + @Override + public boolean postScannerNext(ObserverContext c, InternalScanner s, + List result, int limit, boolean hasNext) throws IOException { + return delegate.postScannerNext(c, s, result, limit, hasNext); + } + + @Override + public void preScannerClose(ObserverContext c, InternalScanner s) + throws IOException { + delegate.preScannerClose(c, s); + } + + @Override + public void postScannerClose(ObserverContext c, InternalScanner s) + throws IOException { + delegate.postScannerClose(c, s); + } + + @Override + public void preWALRestore(ObserverContext ctx, + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + delegate.preWALRestore(ctx, info, logKey, logEdit); + } + + @Override + public void postWALRestore(ObserverContext ctx, + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + delegate.postWALRestore(ctx, info, logKey, logEdit); + } + + @Override + public void preBulkLoadHFile(ObserverContext ctx, + List> familyPaths) throws IOException { + delegate.preBulkLoadHFile(ctx, familyPaths); + } + + @Override + public Cell postMutationBeforeWAL(ObserverContext ctx, + MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException { + return delegate.postMutationBeforeWAL(ctx, opType, mutation, oldCell, newCell); + } + + @Override + public DeleteTracker postInstantiateDeleteTracker( + ObserverContext ctx, DeleteTracker delTracker) + throws IOException { + return delegate.postInstantiateDeleteTracker(ctx, delTracker); + } + + @Override + public void preCommitStoreFile(ObserverContext ctx, byte[] family, + List> pairs) throws IOException { + delegate.preCommitStoreFile(ctx, family, pairs); + + } + + @Override + public void postCommitStoreFile(ObserverContext ctx, byte[] family, + Path srcPath, Path dstPath) throws IOException { + delegate.postCommitStoreFile(ctx, family, srcPath, dstPath); + + } + + @Override + public void postBulkLoadHFile(ObserverContext ctx, + List> stagingFamilyPaths, Map> finalPaths) + throws IOException { + delegate.postBulkLoadHFile(ctx, stagingFamilyPaths, finalPaths); + } + } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java index 3d562d57619..eef955e38fa 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,8 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.util.ScanUtil.isDummy; + import java.io.IOException; import java.util.List; @@ -28,96 +30,91 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.ScannerContextUtil; -import static org.apache.phoenix.util.ScanUtil.isDummy; - public class DelegateRegionScanner implements RegionScanner { - protected RegionScanner delegate; - - public DelegateRegionScanner(RegionScanner scanner) { - this.delegate = scanner; - } - - @Override - public boolean isFilterDone() throws IOException { - return delegate.isFilterDone(); - } - - @Override - public boolean reseek(byte[] row) throws IOException { - return delegate.reseek(row); - } - - @Override - public long getMvccReadPoint() { - return delegate.getMvccReadPoint(); - } - - @Override - public void close() throws IOException { - delegate.close(); - } - - @Override - public long getMaxResultSize() { - return delegate.getMaxResultSize(); + protected RegionScanner delegate; + + public DelegateRegionScanner(RegionScanner scanner) { + this.delegate = scanner; + } + + @Override + public boolean isFilterDone() throws IOException { + return delegate.isFilterDone(); + } + + @Override + public boolean reseek(byte[] row) throws IOException { + return delegate.reseek(row); + } + + @Override + public long getMvccReadPoint() { + return delegate.getMvccReadPoint(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + + @Override + public long getMaxResultSize() { + return delegate.getMaxResultSize(); + } + + @Override + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result, false, scannerContext); + } + + @Override + public boolean next(List result) throws IOException { + return next(result, false, null); + } + + @Override + public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { + return next(result, true, scannerContext); + } + + @Override + public boolean nextRaw(List result) throws IOException { + return next(result, true, null); + } + + @Override + public int getBatch() { + return delegate.getBatch(); + } + + @Override + public RegionInfo getRegionInfo() { + return delegate.getRegionInfo(); + } + + public RegionScanner getNewRegionScanner(Scan scan) throws IOException { + try { + return ((DelegateRegionScanner) delegate).getNewRegionScanner(scan); + } catch (ClassCastException e) { + throw new DoNotRetryIOException(e); } - - @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result, false, scannerContext); - } - - @Override - public boolean next(List result) throws IOException { - return next(result, false, null); - } - - @Override - public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { - return next(result, true, scannerContext); - } - - @Override - public boolean nextRaw(List result) throws IOException { - return next(result, true, null); - } - - @Override - public int getBatch() { - return delegate.getBatch(); - } - - - @Override - public RegionInfo getRegionInfo() { - return delegate.getRegionInfo(); - } - - public RegionScanner getNewRegionScanner(Scan scan) throws IOException { - try { - return ((DelegateRegionScanner) delegate).getNewRegionScanner(scan); - } catch (ClassCastException e) { - throw new DoNotRetryIOException(e); - } - } - - private boolean next(List result, boolean raw, ScannerContext scannerContext) - throws IOException { - if (scannerContext != null) { - ScannerContext noLimitContext = ScannerContextUtil - .copyNoLimitScanner(scannerContext); - boolean hasMore = raw - ? delegate.nextRaw(result, noLimitContext) - : delegate.next(result, noLimitContext); - if (isDummy(result)) { - // when a dummy row is returned by a lower layer, set returnImmediately - // on the ScannerContext to force HBase to return a response to the client - ScannerContextUtil.setReturnImmediately(scannerContext); - } - ScannerContextUtil.updateMetrics(noLimitContext, scannerContext); - return hasMore; - } - return raw ? delegate.nextRaw(result) : delegate.next(result); + } + + private boolean next(List result, boolean raw, ScannerContext scannerContext) + throws IOException { + if (scannerContext != null) { + ScannerContext noLimitContext = ScannerContextUtil.copyNoLimitScanner(scannerContext); + boolean hasMore = + raw ? delegate.nextRaw(result, noLimitContext) : delegate.next(result, noLimitContext); + if (isDummy(result)) { + // when a dummy row is returned by a lower layer, set returnImmediately + // on the ScannerContext to force HBase to return a response to the client + ScannerContextUtil.setReturnImmediately(scannerContext); + } + ScannerContextUtil.updateMetrics(noLimitContext, scannerContext); + return hasMore; } -} \ No newline at end of file + return raw ? delegate.nextRaw(result) : delegate.next(result); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DropColumnMutator.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DropColumnMutator.java index 3e77cac9cc1..f53ee61c5b6 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DropColumnMutator.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DropColumnMutator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,22 @@ */ package org.apache.phoenix.coprocessor; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME_INDEX; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FAMILY_NAME_INDEX; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME_INDEX; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID_INDEX; +import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT; +import static org.apache.phoenix.util.SchemaUtil.getVarChars; +import static org.apache.phoenix.util.ViewUtil.isViewDiverging; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.List; +import java.util.ListIterator; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.client.Delete; @@ -48,6 +63,7 @@ import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.QueryUtil; @@ -55,252 +71,220 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.SQLException; -import java.util.List; -import java.util.ListIterator; - -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME_INDEX; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FAMILY_NAME_INDEX; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME_INDEX; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID_INDEX; -import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT; -import static org.apache.phoenix.util.SchemaUtil.getVarChars; -import static org.apache.phoenix.util.ViewUtil.isViewDiverging; - public class DropColumnMutator implements ColumnMutator { - private List> tableAndDroppedColPairs; - private Configuration conf; + private List> tableAndDroppedColPairs; + private Configuration conf; - private static final Logger logger = LoggerFactory.getLogger(DropColumnMutator.class); + private static final Logger logger = LoggerFactory.getLogger(DropColumnMutator.class); - public DropColumnMutator(Configuration conf) { - this.tableAndDroppedColPairs = Lists.newArrayList(); - this.conf = conf; - } + public DropColumnMutator(Configuration conf) { + this.tableAndDroppedColPairs = Lists.newArrayList(); + this.conf = conf; + } - @Override - public MutateColumnType getMutateColumnType() { - return MutateColumnType.DROP_COLUMN; - } + @Override + public MutateColumnType getMutateColumnType() { + return MutateColumnType.DROP_COLUMN; + } - /** - * Checks to see if the column being dropped is required by a child view - */ - @Override - public MetaDataMutationResult validateWithChildViews(PTable table, List childViews, - List tableMetadata, - byte[] schemaName, byte[] tableName) - throws IOException, SQLException { - List columnDeletesForBaseTable = Lists.newArrayListWithExpectedSize(5); - for (Mutation m : tableMetadata) { - if (m instanceof Delete) { - byte[][] rkmd = new byte[5][]; - int pkCount = getVarChars(m.getRow(), rkmd); - if (pkCount > COLUMN_NAME_INDEX - && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 - && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) { - columnDeletesForBaseTable.add((Delete) m); - } - } + /** + * Checks to see if the column being dropped is required by a child view + */ + @Override + public MetaDataMutationResult validateWithChildViews(PTable table, List childViews, + List tableMetadata, byte[] schemaName, byte[] tableName) + throws IOException, SQLException { + List columnDeletesForBaseTable = Lists.newArrayListWithExpectedSize(5); + for (Mutation m : tableMetadata) { + if (m instanceof Delete) { + byte[][] rkmd = new byte[5][]; + int pkCount = getVarChars(m.getRow(), rkmd); + if ( + pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 + && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0 + ) { + columnDeletesForBaseTable.add((Delete) m); + } + } + } + for (PTable view : childViews) { + for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) { + PColumn existingViewColumn = null; + byte[][] rkmd = new byte[5][]; + getVarChars(columnDeleteForBaseTable.getRow(), rkmd); + String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]); + String columnFamily = + rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]); + try { + existingViewColumn = columnFamily == null + ? view.getColumnForColumnName(columnName) + : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName); + } catch (ColumnFamilyNotFoundException e) { + // ignore since it means that the column family is not present for the column to + // be added. + } catch (ColumnNotFoundException e) { + // ignore since it means the column is not present in the view } - for (PTable view : childViews) { - for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) { - PColumn existingViewColumn = null; - byte[][] rkmd = new byte[5][]; - getVarChars(columnDeleteForBaseTable.getRow(), rkmd); - String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]); - String columnFamily = - rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes - .toString(rkmd[FAMILY_NAME_INDEX]); - try { - existingViewColumn = columnFamily == null ? - view.getColumnForColumnName(columnName) : - view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName); - } catch (ColumnFamilyNotFoundException e) { - // ignore since it means that the column family is not present for the column to - // be added. - } catch (ColumnNotFoundException e) { - // ignore since it means the column is not present in the view - } - - // check if the view where expression contains the column being dropped and prevent - // it - if (existingViewColumn != null && view.getViewStatement() != null) { - ParseNode viewWhere = - new SQLParser(view.getViewStatement()).parseQuery().getWhere(); - try (PhoenixConnection conn = - QueryUtil.getConnectionOnServer(conf) - .unwrap(PhoenixConnection.class)) { - PhoenixStatement statement = new PhoenixStatement(conn); - TableRef baseTableRef = new TableRef(view); - ColumnResolver columnResolver = - FromCompiler.getResolver(baseTableRef); - StatementContext context = - new StatementContext(statement, columnResolver); - Expression whereExpression = - WhereCompiler.compile(context, viewWhere); - Expression colExpression = new ColumnRef(baseTableRef, - existingViewColumn.getPosition()).newColumnExpression(); - MetaDataEndpointImpl.ColumnFinder columnFinder = - new MetaDataEndpointImpl.ColumnFinder(colExpression); - whereExpression.accept(columnFinder); - if (columnFinder.getColumnFound()) { - return new MetaDataMutationResult( - MetaDataProtocol.MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table); - } - } - } - if (existingViewColumn != null) { - tableAndDroppedColPairs.add(new Pair(view, existingViewColumn)); - } + // check if the view where expression contains the column being dropped and prevent + // it + if (existingViewColumn != null && view.getViewStatement() != null) { + ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere(); + try (PhoenixConnection conn = + QueryUtil.getConnectionOnServer(conf).unwrap(PhoenixConnection.class)) { + PhoenixStatement statement = new PhoenixStatement(conn); + TableRef baseTableRef = new TableRef(view); + ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef); + StatementContext context = new StatementContext(statement, columnResolver); + Expression whereExpression = WhereCompiler.compile(context, viewWhere); + Expression colExpression = + new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression(); + MetaDataEndpointImpl.ColumnFinder columnFinder = + new MetaDataEndpointImpl.ColumnFinder(colExpression); + whereExpression.accept(columnFinder); + if (columnFinder.getColumnFound()) { + return new MetaDataMutationResult( + MetaDataProtocol.MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table); } + } + } + if (existingViewColumn != null) { + tableAndDroppedColPairs.add(new Pair(view, existingViewColumn)); } - return null; - } + } - @Override - public MetaDataMutationResult validateAndAddMetadata(PTable table, - byte[][] rowKeyMetaData, - List tableMetaData, - Region region, - List invalidateList, - List locks, - long clientTimeStamp, - long clientVersion, - ExtendedCellBuilder extendedCellBuilder, - final boolean isDroppingColumns) - throws SQLException { - byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX]; - byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX]; - byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX]; - boolean isView = table.getType() == PTableType.VIEW; - boolean deletePKColumn = false; + } + return null; + } - byte[] tableHeaderRowKey = SchemaUtil.getTableKey(tenantId, - schemaName, tableName); - List additionalTableMetaData = Lists.newArrayList(); - ListIterator iterator = tableMetaData.listIterator(); - while (iterator.hasNext()) { - Mutation mutation = iterator.next(); - byte[] key = mutation.getRow(); - int pkCount = getVarChars(key, rowKeyMetaData); - if (isView && mutation instanceof Put) { - PColumn column = null; - // checking put from the view or index - if (Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 - && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) { - column = MetaDataUtil.getColumn(pkCount, rowKeyMetaData, table); - } else { - for (int i = 0; i < table.getIndexes().size(); i++) { - PTableImpl indexTable = (PTableImpl) table.getIndexes().get(i); - byte[] indexTableName = indexTable.getTableName().getBytes(); - byte[] indexSchema = indexTable.getSchemaName().getBytes(); - if (Bytes.compareTo(indexSchema, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 - && Bytes.compareTo(indexTableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) { - column = MetaDataUtil.getColumn(pkCount, rowKeyMetaData, indexTable); - break; - } - } - } - if (column == null) - continue; - // ignore any puts that modify the ordinal positions of columns - iterator.remove(); - } else if (mutation instanceof Delete) { - if (pkCount > COLUMN_NAME_INDEX - && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 - && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) { - PColumn columnToDelete = null; - try { - columnToDelete = MetaDataUtil.getColumn(pkCount, rowKeyMetaData, table); - if (columnToDelete == null) - continue; - deletePKColumn = columnToDelete.getFamilyName() == null; - if (isView) { - // if we are dropping a derived column add it to the excluded - // column list. Note that this is only done for 4.15+ clients - // since old clients do not have the isDerived field - if (columnToDelete.isDerived()) { - mutation = MetaDataUtil.cloneDeleteToPutAndAddColumn((Delete) - mutation, TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, - PTable.LinkType.EXCLUDED_COLUMN. - getSerializedValueAsByteArray()); - iterator.set(mutation); - } + @Override + public MetaDataMutationResult validateAndAddMetadata(PTable table, byte[][] rowKeyMetaData, + List tableMetaData, Region region, List invalidateList, + List locks, long clientTimeStamp, long clientVersion, + ExtendedCellBuilder extendedCellBuilder, final boolean isDroppingColumns) throws SQLException { + byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX]; + byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX]; + byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX]; + boolean isView = table.getType() == PTableType.VIEW; + boolean deletePKColumn = false; - if (isViewDiverging(columnToDelete, table, clientVersion)) { - // If the column being dropped is inherited from the base table, - // then the view is about to diverge itself from the base table. - // The consequence of this divergence is that that any further - // meta-data changes made to the base table will not be - // propagated to the hierarchy of views where this view is the root. - byte[] viewKey = SchemaUtil.getTableKey(tenantId, schemaName, - tableName); - Put updateBaseColumnCountPut = new Put(viewKey); - byte[] baseColumnCountPtr = - new byte[PInteger.INSTANCE.getByteSize()]; - PInteger.INSTANCE.getCodec().encodeInt( - DIVERGED_VIEW_BASE_COLUMN_COUNT, baseColumnCountPtr, 0); - updateBaseColumnCountPut.addColumn( - PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, - clientTimeStamp, - baseColumnCountPtr); - additionalTableMetaData.add(updateBaseColumnCountPut); - } - } - if (columnToDelete.isViewReferenced()) { - // Disallow deletion of column referenced in WHERE clause of view - return new MetaDataProtocol.MetaDataMutationResult( - MetaDataProtocol.MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table, - columnToDelete); - } - // drop any indexes that need the column that is going to be dropped - tableAndDroppedColPairs.add(new Pair<>(table, columnToDelete)); - } catch (ColumnFamilyNotFoundException | ColumnNotFoundException e) { - return new MetaDataProtocol.MetaDataMutationResult( - MetaDataProtocol.MutationCode.COLUMN_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete); - } - } + byte[] tableHeaderRowKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName); + List additionalTableMetaData = Lists.newArrayList(); + ListIterator iterator = tableMetaData.listIterator(); + while (iterator.hasNext()) { + Mutation mutation = iterator.next(); + byte[] key = mutation.getRow(); + int pkCount = getVarChars(key, rowKeyMetaData); + if (isView && mutation instanceof Put) { + PColumn column = null; + // checking put from the view or index + if ( + Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 + && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0 + ) { + column = MetaDataUtil.getColumn(pkCount, rowKeyMetaData, table); + } else { + for (int i = 0; i < table.getIndexes().size(); i++) { + PTableImpl indexTable = (PTableImpl) table.getIndexes().get(i); + byte[] indexTableName = indexTable.getTableName().getBytes(); + byte[] indexSchema = indexTable.getSchemaName().getBytes(); + if ( + Bytes.compareTo(indexSchema, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 + && Bytes.compareTo(indexTableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0 + ) { + column = MetaDataUtil.getColumn(pkCount, rowKeyMetaData, indexTable); + break; } - - } - //We're changing the application-facing schema by dropping a column, so update the DDL - // timestamp to current _server_ timestamp - if (MetaDataUtil.isTableDirectlyQueried(table.getType())) { - long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis(); - additionalTableMetaData.add(MetaDataUtil.getLastDDLTimestampUpdate(tableHeaderRowKey, - clientTimeStamp, serverTimestamp)); + } } - //we don't need to update the DDL timestamp for any child views we may have, because - // when we look up a PTable for any of those child views, we'll take the max timestamp - // of the view and all its ancestors. This is true - // whether the view is diverged or not. - tableMetaData.addAll(additionalTableMetaData); - if (deletePKColumn) { - if (table.getPKColumns().size() == 1) { - return new MetaDataProtocol.MetaDataMutationResult( - MetaDataProtocol.MutationCode.NO_PK_COLUMNS, - EnvironmentEdgeManager.currentTimeMillis(), null); + if (column == null) continue; + // ignore any puts that modify the ordinal positions of columns + iterator.remove(); + } else if (mutation instanceof Delete) { + if ( + pkCount > COLUMN_NAME_INDEX + && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 + && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0 + ) { + PColumn columnToDelete = null; + try { + columnToDelete = MetaDataUtil.getColumn(pkCount, rowKeyMetaData, table); + if (columnToDelete == null) continue; + deletePKColumn = columnToDelete.getFamilyName() == null; + if (isView) { + // if we are dropping a derived column add it to the excluded + // column list. Note that this is only done for 4.15+ clients + // since old clients do not have the isDerived field + if (columnToDelete.isDerived()) { + mutation = MetaDataUtil.cloneDeleteToPutAndAddColumn((Delete) mutation, + TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, + PTable.LinkType.EXCLUDED_COLUMN.getSerializedValueAsByteArray()); + iterator.set(mutation); + } + + if (isViewDiverging(columnToDelete, table, clientVersion)) { + // If the column being dropped is inherited from the base table, + // then the view is about to diverge itself from the base table. + // The consequence of this divergence is that that any further + // meta-data changes made to the base table will not be + // propagated to the hierarchy of views where this view is the root. + byte[] viewKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName); + Put updateBaseColumnCountPut = new Put(viewKey); + byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()]; + PInteger.INSTANCE.getCodec().encodeInt(DIVERGED_VIEW_BASE_COLUMN_COUNT, + baseColumnCountPtr, 0); + updateBaseColumnCountPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp, + baseColumnCountPtr); + additionalTableMetaData.add(updateBaseColumnCountPut); + } + } + if (columnToDelete.isViewReferenced()) { + // Disallow deletion of column referenced in WHERE clause of view + return new MetaDataProtocol.MetaDataMutationResult( + MetaDataProtocol.MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete); } + // drop any indexes that need the column that is going to be dropped + tableAndDroppedColPairs.add(new Pair<>(table, columnToDelete)); + } catch (ColumnFamilyNotFoundException | ColumnNotFoundException e) { + return new MetaDataProtocol.MetaDataMutationResult( + MetaDataProtocol.MutationCode.COLUMN_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete); + } } - long currentTime = MetaDataUtil.getClientTimeStamp(tableMetaData); + } + + } + // We're changing the application-facing schema by dropping a column, so update the DDL + // timestamp to current _server_ timestamp + if (MetaDataUtil.isTableDirectlyQueried(table.getType())) { + long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis(); + additionalTableMetaData.add(MetaDataUtil.getLastDDLTimestampUpdate(tableHeaderRowKey, + clientTimeStamp, serverTimestamp)); + } + // we don't need to update the DDL timestamp for any child views we may have, because + // when we look up a PTable for any of those child views, we'll take the max timestamp + // of the view and all its ancestors. This is true + // whether the view is diverged or not. + tableMetaData.addAll(additionalTableMetaData); + if (deletePKColumn) { + if (table.getPKColumns().size() == 1) { return new MetaDataProtocol.MetaDataMutationResult( - MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS, currentTime, null); + MetaDataProtocol.MutationCode.NO_PK_COLUMNS, EnvironmentEdgeManager.currentTimeMillis(), + null); + } } + long currentTime = MetaDataUtil.getClientTimeStamp(tableMetaData); + return new MetaDataProtocol.MetaDataMutationResult( + MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS, currentTime, null); + } - @Override - public List> getTableAndDroppedColumnPairs() { - return tableAndDroppedColPairs; - } + @Override + public List> getTableAndDroppedColumnPairs() { + return tableAndDroppedColPairs; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java index f5ef7a87dc2..5b2766953bc 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +17,38 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.hbase.index.write.AbstractParallelWriterIndexCommitter.INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY; +import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.BEYOND_MAX_LOOKBACK_INVALID; +import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.BEYOND_MAX_LOOKBACK_MISSING; +import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.EXTRA_CELLS; +import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.EXTRA_ROW; +import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.INVALID_ROW; +import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.MISSING_ROW; +import static org.apache.phoenix.query.QueryConstants.UNVERIFIED_BYTES; +import static org.apache.phoenix.query.QueryConstants.VERIFIED_BYTES; +import static org.apache.phoenix.query.QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS; +import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB; +import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB; +import static org.apache.phoenix.util.ScanUtil.isDummy; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; - import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -36,12 +62,12 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.phoenix.execute.MutationState; -import org.apache.phoenix.hbase.index.IndexRegionObserver; -import org.apache.phoenix.hbase.index.ValueGetter; import org.apache.phoenix.compile.ScanRanges; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; +import org.apache.phoenix.execute.MutationState; import org.apache.phoenix.filter.SkipScanFilter; +import org.apache.phoenix.hbase.index.IndexRegionObserver; +import org.apache.phoenix.hbase.index.ValueGetter; import org.apache.phoenix.hbase.index.parallel.EarlyExitFailure; import org.apache.phoenix.hbase.index.parallel.TaskBatch; import org.apache.phoenix.hbase.index.parallel.TaskRunner; @@ -51,8 +77,8 @@ import org.apache.phoenix.hbase.index.table.HTableFactory; import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.apache.phoenix.hbase.index.write.IndexWriterUtils; import org.apache.phoenix.hbase.index.util.IndexManagementUtil; +import org.apache.phoenix.hbase.index.write.IndexWriterUtils; import org.apache.phoenix.index.IndexMaintainer; import org.apache.phoenix.index.PhoenixIndexCodec; import org.apache.phoenix.jdbc.PhoenixConnection; @@ -63,1456 +89,1483 @@ import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.schema.transform.TransformMaintainer; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.util.ClientUtil; -import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.IndexUtil; -import org.apache.phoenix.util.QueryUtil; -import org.apache.phoenix.util.ServerUtil; import org.apache.phoenix.util.MetaDataUtil; +import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.ScanUtil; +import org.apache.phoenix.util.ServerUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; - -import static org.apache.phoenix.hbase.index.write.AbstractParallelWriterIndexCommitter.INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY; -import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.BEYOND_MAX_LOOKBACK_INVALID; -import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.BEYOND_MAX_LOOKBACK_MISSING; -import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.EXTRA_CELLS; -import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.EXTRA_ROW; -import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.INVALID_ROW; -import static org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType.MISSING_ROW; -import static org.apache.phoenix.query.QueryConstants.UNVERIFIED_BYTES; -import static org.apache.phoenix.query.QueryConstants.VERIFIED_BYTES; -import static org.apache.phoenix.query.QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS; -import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB; -import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB; -import static org.apache.phoenix.util.ScanUtil.isDummy; - /** - * This is an abstract region scanner which is used to scan index or data table rows locally. From the data table rows, - * expected index table mutations are generated. These expected index mutations are used for both repairing and - * rebuilding index table rows and also verifying them. + * This is an abstract region scanner which is used to scan index or data table rows locally. From + * the data table rows, expected index table mutations are generated. These expected index mutations + * are used for both repairing and rebuilding index table rows and also verifying them. */ public abstract class GlobalIndexRegionScanner extends BaseRegionScanner { - private static final Logger LOGGER = LoggerFactory.getLogger(GlobalIndexRegionScanner.class); - - public static final String NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY = "phoenix.index.verify.threads.max"; - public static final int DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS = 16; - public static final String INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY = "phoenix.index.verify.row.count.per.task"; - public static final int DEFAULT_INDEX_VERIFY_ROW_COUNTS_PER_TASK = 2048; - public static final String NO_EXPECTED_MUTATION = "No expected mutation"; - public static final String ACTUAL_MUTATION_IS_NULL_OR_EMPTY = "actualMutationList is null or empty"; - public static final String ERROR_MESSAGE_MISSING_INDEX_ROW_BEYOND_MAX_LOOKBACK = "Missing index row beyond maxLookBack"; - public static final String ERROR_MESSAGE_MISSING_INDEX_ROW = "Missing index row"; - public static final String ERROR_MESSAGE_EXTRA_INDEX_ROW = "Extra index row"; - public static final String PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS = - "phoenix.index.mr.log.beyond.max.lookback.errors"; - public static final boolean DEFAULT_PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS = false; - - protected final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver; - - protected IndexTool.IndexDisableLoggingType disableLoggingVerifyType = IndexTool.IndexDisableLoggingType.NONE; - protected byte[][] viewConstants; - protected IndexVerificationOutputRepository verificationOutputRepository = null; - protected boolean skipped = false; - protected boolean shouldRetry = false; - protected boolean shouldVerifyCheckDone = false; - final protected RegionCoprocessorEnvironment env; - protected byte[][] regionEndKeys; - protected byte[] nextStartKey; - protected boolean hasMoreIncr = false; - protected long minTimestamp = 0; - protected byte[] indexRowKeyforReadRepair; - protected Table dataHTable = null; - protected long pageSizeInRows = Long.MAX_VALUE; - protected int rowCountPerTask; - protected boolean hasMore; - protected int maxBatchSize; - protected final long maxBatchSizeBytes; - protected final long blockingMemstoreSize; - protected final byte[] clientVersionBytes; - protected boolean useProto = true; - protected byte[] indexMetaData; - protected Scan scan; - protected RegionScanner innerScanner; - protected Region region; - protected IndexMaintainer indexMaintainer; - protected Table indexHTable = null; - protected TaskRunner pool; - protected String exceptionMessage; - protected HTableFactory hTableFactory; - protected int indexTableTTL; - protected long maxLookBackInMills; - protected IndexToolVerificationResult verificationResult = null; - protected IndexVerificationResultRepository verificationResultRepository = null; - protected Map> familyMap; - protected IndexTool.IndexVerifyType verifyType = IndexTool.IndexVerifyType.NONE; - protected boolean verify = false; - protected byte[] tenantId; - protected byte[] schemaName; - protected byte[] logicalTableName; - protected byte[] tableType; - protected byte[] lastDdlTimestamp; - - // This relies on Hadoop Configuration to handle warning about deprecated configs and - // to set the correct non-deprecated configs when an old one shows up. - static { - Configuration.addDeprecation("index.verify.threads.max", NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY); - Configuration.addDeprecation("index.verify.row.count.per.task", INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY); - } - - public GlobalIndexRegionScanner(final RegionScanner innerScanner, - final Region region, - final Scan scan, - final RegionCoprocessorEnvironment env, - final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) - throws IOException { - super(innerScanner); - final Configuration config = env.getConfiguration(); - if (scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING) != null) { - byte[] pageSizeFromScan = - scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGE_ROWS); - if (pageSizeFromScan != null) { - pageSizeInRows = Bytes.toLong(pageSizeFromScan); - } else { - pageSizeInRows = - config.getLong(INDEX_REBUILD_PAGE_SIZE_IN_ROWS, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_PAGE_SIZE_IN_ROWS); - } - } - maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); - maxBatchSizeBytes = config.getLongBytes(MUTATE_BATCH_SIZE_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES); - blockingMemstoreSize = UngroupedAggregateRegionObserver.getBlockingMemstoreSize(region, config); - clientVersionBytes = scan.getAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION); - familyMap = scan.getFamilyMap(); - if (familyMap.isEmpty()) { - familyMap = null; - } - indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD); - if (indexMetaData == null) { - indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_MD); - } - tenantId = scan.getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()); - schemaName = scan.getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()); - logicalTableName = scan.getAttribute( - MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()); - tableType = scan.getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()); - lastDdlTimestamp = scan.getAttribute( - MutationState.MutationMetadataType.TIMESTAMP.toString()); - byte[] transforming = scan.getAttribute(BaseScannerRegionObserverConstants.DO_TRANSFORMING); - List maintainers = null; - if (transforming == null) { - maintainers = IndexMaintainer.deserialize(indexMetaData, true); - } else { - maintainers = TransformMaintainer.deserialize(indexMetaData); - } - indexMaintainer = maintainers.get(0); - this.scan = scan; - this.innerScanner = innerScanner; - this.region = region; - this.env = env; - this.ungroupedAggregateRegionObserver = ungroupedAggregateRegionObserver; - byte[] valueBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_VERIFY_TYPE); - if (valueBytes != null) { - verifyType = IndexTool.IndexVerifyType.fromValue(valueBytes); - if (verifyType != IndexTool.IndexVerifyType.NONE) { - verify = true; - } - } - // Create the following objects only for rebuilds by IndexTool - hTableFactory = IndexWriterUtils.getDefaultDelegateHTableFactory(env); - maxLookBackInMills = MetaDataUtil.getMaxLookbackAge(config, ScanUtil.getMaxLookbackAgeFromScanAttribute(scan)); - rowCountPerTask = config.getInt(INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY, - DEFAULT_INDEX_VERIFY_ROW_COUNTS_PER_TASK); - - pool = new WaitForCompletionTaskRunner(ThreadPoolManager.getExecutor( - new ThreadPoolBuilder("IndexVerify", - env.getConfiguration()).setMaxThread(NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY, - DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS).setCoreTimeout( - INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env)); - - if (verify) { - boolean shouldLogBeyondMaxLookbackInvalidRows; - byte[] scanParamShouldLogBeyondMaxLookbackInvalidRows = - scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_DISABLE_LOGGING_BEYOND_MAXLOOKBACK_AGE); - if (scanParamShouldLogBeyondMaxLookbackInvalidRows != null) { - shouldLogBeyondMaxLookbackInvalidRows = - Boolean.parseBoolean(Bytes.toString(scanParamShouldLogBeyondMaxLookbackInvalidRows)); - } else { - shouldLogBeyondMaxLookbackInvalidRows = - env.getConfiguration().getBoolean(PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS, - DEFAULT_PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS); - } - viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); - byte[] disableLoggingValueBytes = - scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_DISABLE_LOGGING_VERIFY_TYPE); - if (disableLoggingValueBytes != null) { - disableLoggingVerifyType = - IndexTool.IndexDisableLoggingType.fromValue(disableLoggingValueBytes); - } - verificationOutputRepository = - new IndexVerificationOutputRepository(indexMaintainer.getIndexTableName() - , hTableFactory, disableLoggingVerifyType); - verificationOutputRepository.setShouldLogBeyondMaxLookback(shouldLogBeyondMaxLookbackInvalidRows); - verificationResult = new IndexToolVerificationResult(scan); - verificationResultRepository = - new IndexVerificationResultRepository(indexMaintainer.getIndexTableName(), hTableFactory); - nextStartKey = null; - } - computeMinTimestamp(config); - } - - /** - * For CDC indexes we do not need to consider rows outside max lookback window or before - * the index create time. minTimestamp needs to be computed and used for CDC indexes always - * even when it is not set on the scan - */ - private void computeMinTimestamp(Configuration config) throws IOException { - minTimestamp = scan.getTimeRange().getMin(); - if (indexMaintainer.isCDCIndex()) { - minTimestamp = EnvironmentEdgeManager.currentTimeMillis() - maxLookBackInMills; - try (PhoenixConnection conn = - QueryUtil.getConnectionOnServer(config).unwrap(PhoenixConnection.class)) { - PTable indexTable = conn.getTableNoCache(indexMaintainer.getLogicalIndexName()); - minTimestamp = Math.max(indexTable.getTimeStamp() + 1, minTimestamp); - } catch (SQLException e) { - LOGGER.error( - "Unable to get the PTable for the index table " - + indexMaintainer.getLogicalIndexName() + " " + e); - throw new IOException(e); - } - } + private static final Logger LOGGER = LoggerFactory.getLogger(GlobalIndexRegionScanner.class); + + public static final String NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY = + "phoenix.index.verify.threads.max"; + public static final int DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS = 16; + public static final String INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY = + "phoenix.index.verify.row.count.per.task"; + public static final int DEFAULT_INDEX_VERIFY_ROW_COUNTS_PER_TASK = 2048; + public static final String NO_EXPECTED_MUTATION = "No expected mutation"; + public static final String ACTUAL_MUTATION_IS_NULL_OR_EMPTY = + "actualMutationList is null or empty"; + public static final String ERROR_MESSAGE_MISSING_INDEX_ROW_BEYOND_MAX_LOOKBACK = + "Missing index row beyond maxLookBack"; + public static final String ERROR_MESSAGE_MISSING_INDEX_ROW = "Missing index row"; + public static final String ERROR_MESSAGE_EXTRA_INDEX_ROW = "Extra index row"; + public static final String PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS = + "phoenix.index.mr.log.beyond.max.lookback.errors"; + public static final boolean DEFAULT_PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS = false; + + protected final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver; + + protected IndexTool.IndexDisableLoggingType disableLoggingVerifyType = + IndexTool.IndexDisableLoggingType.NONE; + protected byte[][] viewConstants; + protected IndexVerificationOutputRepository verificationOutputRepository = null; + protected boolean skipped = false; + protected boolean shouldRetry = false; + protected boolean shouldVerifyCheckDone = false; + final protected RegionCoprocessorEnvironment env; + protected byte[][] regionEndKeys; + protected byte[] nextStartKey; + protected boolean hasMoreIncr = false; + protected long minTimestamp = 0; + protected byte[] indexRowKeyforReadRepair; + protected Table dataHTable = null; + protected long pageSizeInRows = Long.MAX_VALUE; + protected int rowCountPerTask; + protected boolean hasMore; + protected int maxBatchSize; + protected final long maxBatchSizeBytes; + protected final long blockingMemstoreSize; + protected final byte[] clientVersionBytes; + protected boolean useProto = true; + protected byte[] indexMetaData; + protected Scan scan; + protected RegionScanner innerScanner; + protected Region region; + protected IndexMaintainer indexMaintainer; + protected Table indexHTable = null; + protected TaskRunner pool; + protected String exceptionMessage; + protected HTableFactory hTableFactory; + protected int indexTableTTL; + protected long maxLookBackInMills; + protected IndexToolVerificationResult verificationResult = null; + protected IndexVerificationResultRepository verificationResultRepository = null; + protected Map> familyMap; + protected IndexTool.IndexVerifyType verifyType = IndexTool.IndexVerifyType.NONE; + protected boolean verify = false; + protected byte[] tenantId; + protected byte[] schemaName; + protected byte[] logicalTableName; + protected byte[] tableType; + protected byte[] lastDdlTimestamp; + + // This relies on Hadoop Configuration to handle warning about deprecated configs and + // to set the correct non-deprecated configs when an old one shows up. + static { + Configuration.addDeprecation("index.verify.threads.max", + NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY); + Configuration.addDeprecation("index.verify.row.count.per.task", + INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY); + } + + public GlobalIndexRegionScanner(final RegionScanner innerScanner, final Region region, + final Scan scan, final RegionCoprocessorEnvironment env, + final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) throws IOException { + super(innerScanner); + final Configuration config = env.getConfiguration(); + if (scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING) != null) { + byte[] pageSizeFromScan = + scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGE_ROWS); + if (pageSizeFromScan != null) { + pageSizeInRows = Bytes.toLong(pageSizeFromScan); + } else { + pageSizeInRows = config.getLong(INDEX_REBUILD_PAGE_SIZE_IN_ROWS, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_PAGE_SIZE_IN_ROWS); + } } - public static long getTimestamp(Mutation m) { - for (List cells : m.getFamilyCellMap().values()) { - for (Cell cell : cells) { - return cell.getTimestamp(); - } - } - throw new IllegalStateException("No cell found"); + maxBatchSize = + config.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); + maxBatchSizeBytes = config.getLongBytes(MUTATE_BATCH_SIZE_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES); + blockingMemstoreSize = UngroupedAggregateRegionObserver.getBlockingMemstoreSize(region, config); + clientVersionBytes = scan.getAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION); + familyMap = scan.getFamilyMap(); + if (familyMap.isEmpty()) { + familyMap = null; } - - protected static boolean isTimestampBeforeTTL(int tableTTL, long currentTime, long tsToCheck) { - if (tableTTL == HConstants.FOREVER) { - return false; - } - return tsToCheck < (currentTime - tableTTL * 1000L); + indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD); + if (indexMetaData == null) { + indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_MD); } - - protected static boolean isTimestampBeyondMaxLookBack(long maxLookBackInMills, - long currentTime, long tsToCheck) { - if (!BaseScannerRegionObserver.isMaxLookbackTimeEnabled(maxLookBackInMills)) { - // By definition, if the max lookback feature is not enabled, then delete markers and rows - // version can be removed by compaction any time, and thus there is no window in which these mutations are - // preserved, i.e., the max lookback window size is zero. This means all the mutations are effectively - // beyond the zero size max lookback window. - return true; - } - return tsToCheck < (currentTime - maxLookBackInMills); + tenantId = scan.getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()); + schemaName = scan.getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()); + logicalTableName = + scan.getAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()); + tableType = scan.getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()); + lastDdlTimestamp = scan.getAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString()); + byte[] transforming = scan.getAttribute(BaseScannerRegionObserverConstants.DO_TRANSFORMING); + List maintainers = null; + if (transforming == null) { + maintainers = IndexMaintainer.deserialize(indexMetaData, true); + } else { + maintainers = TransformMaintainer.deserialize(indexMetaData); } - - protected boolean isColumnIncluded(Cell cell) { - byte[] family = CellUtil.cloneFamily(cell); - if (!familyMap.containsKey(family)) { - return false; - } - NavigableSet set = familyMap.get(family); - if (set == null || set.isEmpty()) { - return true; - } - byte[] qualifier = CellUtil.cloneQualifier(cell); - return set.contains(qualifier); - } - - @VisibleForTesting - public boolean shouldVerify(IndexTool.IndexVerifyType verifyType, - byte[] indexRowKey, Scan scan, Region region, IndexMaintainer indexMaintainer, - IndexVerificationResultRepository verificationResultRepository, boolean shouldVerifyCheckDone) throws IOException { - this.verifyType = verifyType; - this.indexRowKeyforReadRepair = indexRowKey; - this.scan = scan; - this.region = region; - this.indexMaintainer = indexMaintainer; - this.verificationResultRepository = verificationResultRepository; - this.shouldVerifyCheckDone = shouldVerifyCheckDone; - return shouldVerify(); - } - - public boolean shouldVerify() throws IOException { - // In case of read repair, proceed with rebuild - // All other types of rebuilds/verification should be incrementally performed if appropriate param is passed - byte[] lastVerifyTimeValue = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY); - Long lastVerifyTime = lastVerifyTimeValue == null ? 0 : Bytes.toLong(lastVerifyTimeValue); - if(indexRowKeyforReadRepair != null || lastVerifyTime == 0 || shouldVerifyCheckDone) { - return true; - } - - IndexToolVerificationResult verificationResultTemp = verificationResultRepository - .getVerificationResult(lastVerifyTime, scan, region, indexMaintainer.getIndexTableName()); - if(verificationResultTemp != null) { - verificationResult = verificationResultTemp; - } - - shouldVerifyCheckDone = true; - if (verificationResult != null && verificationResult.getShouldRetry()) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("ShouldRetry is true. " + region.getRegionInfo().getRegionNameAsString()); - } - return true; - } - return verificationResultTemp == null; + indexMaintainer = maintainers.get(0); + this.scan = scan; + this.innerScanner = innerScanner; + this.region = region; + this.env = env; + this.ungroupedAggregateRegionObserver = ungroupedAggregateRegionObserver; + byte[] valueBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_VERIFY_TYPE); + if (valueBytes != null) { + verifyType = IndexTool.IndexVerifyType.fromValue(valueBytes); + if (verifyType != IndexTool.IndexVerifyType.NONE) { + verify = true; + } } - - @Override - public RegionInfo getRegionInfo() { - return region.getRegionInfo(); + // Create the following objects only for rebuilds by IndexTool + hTableFactory = IndexWriterUtils.getDefaultDelegateHTableFactory(env); + maxLookBackInMills = + MetaDataUtil.getMaxLookbackAge(config, ScanUtil.getMaxLookbackAgeFromScanAttribute(scan)); + rowCountPerTask = config.getInt(INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY, + DEFAULT_INDEX_VERIFY_ROW_COUNTS_PER_TASK); + + pool = new WaitForCompletionTaskRunner( + ThreadPoolManager.getExecutor(new ThreadPoolBuilder("IndexVerify", env.getConfiguration()) + .setMaxThread(NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY, + DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS) + .setCoreTimeout(INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env)); + + if (verify) { + boolean shouldLogBeyondMaxLookbackInvalidRows; + byte[] scanParamShouldLogBeyondMaxLookbackInvalidRows = scan.getAttribute( + BaseScannerRegionObserverConstants.INDEX_REBUILD_DISABLE_LOGGING_BEYOND_MAXLOOKBACK_AGE); + if (scanParamShouldLogBeyondMaxLookbackInvalidRows != null) { + shouldLogBeyondMaxLookbackInvalidRows = + Boolean.parseBoolean(Bytes.toString(scanParamShouldLogBeyondMaxLookbackInvalidRows)); + } else { + shouldLogBeyondMaxLookbackInvalidRows = + env.getConfiguration().getBoolean(PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS, + DEFAULT_PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS); + } + viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); + byte[] disableLoggingValueBytes = scan + .getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_DISABLE_LOGGING_VERIFY_TYPE); + if (disableLoggingValueBytes != null) { + disableLoggingVerifyType = + IndexTool.IndexDisableLoggingType.fromValue(disableLoggingValueBytes); + } + verificationOutputRepository = new IndexVerificationOutputRepository( + indexMaintainer.getIndexTableName(), hTableFactory, disableLoggingVerifyType); + verificationOutputRepository + .setShouldLogBeyondMaxLookback(shouldLogBeyondMaxLookbackInvalidRows); + verificationResult = new IndexToolVerificationResult(scan); + verificationResultRepository = + new IndexVerificationResultRepository(indexMaintainer.getIndexTableName(), hTableFactory); + nextStartKey = null; } - - @Override - public boolean isFilterDone() { - return false; + computeMinTimestamp(config); + } + + /** + * For CDC indexes we do not need to consider rows outside max lookback window or before the index + * create time. minTimestamp needs to be computed and used for CDC indexes always even when it is + * not set on the scan + */ + private void computeMinTimestamp(Configuration config) throws IOException { + minTimestamp = scan.getTimeRange().getMin(); + if (indexMaintainer.isCDCIndex()) { + minTimestamp = EnvironmentEdgeManager.currentTimeMillis() - maxLookBackInMills; + try (PhoenixConnection conn = + QueryUtil.getConnectionOnServer(config).unwrap(PhoenixConnection.class)) { + PTable indexTable = conn.getTableNoCache(indexMaintainer.getLogicalIndexName()); + minTimestamp = Math.max(indexTable.getTimeStamp() + 1, minTimestamp); + } catch (SQLException e) { + LOGGER.error("Unable to get the PTable for the index table " + + indexMaintainer.getLogicalIndexName() + " " + e); + throw new IOException(e); + } } + } - private void closeTables() throws IOException { - hTableFactory.shutdown(); - if (indexHTable != null) { - indexHTable.close(); - } - if (dataHTable != null) { - dataHTable.close(); - } + public static long getTimestamp(Mutation m) { + for (List cells : m.getFamilyCellMap().values()) { + for (Cell cell : cells) { + return cell.getTimestamp(); + } } + throw new IllegalStateException("No cell found"); + } - @Override - public void close() throws IOException { - innerScanner.close(); - if (indexRowKeyforReadRepair != null) { - closeTables(); - return; - } - if (verify) { - try { - if (verificationResultRepository != null) { - verificationResultRepository.logToIndexToolResultTable(verificationResult, - verifyType, region.getRegionInfo().getRegionName(), skipped, shouldRetry); - } - } finally { - this.pool.stop("IndexRegionObserverRegionScanner is closing"); - closeTables(); - if (verificationResultRepository != null) { - verificationResultRepository.close(); - } - if (verificationOutputRepository != null) { - verificationOutputRepository.close(); - } - } - } - else { - this.pool.stop("GlobalIndexRegionScanner is closing"); - closeTables(); - } + protected static boolean isTimestampBeforeTTL(int tableTTL, long currentTime, long tsToCheck) { + if (tableTTL == HConstants.FOREVER) { + return false; } - - @VisibleForTesting - public int setIndexTableTTL(int ttl) { - indexTableTTL = ttl; - return 0; + return tsToCheck < (currentTime - tableTTL * 1000L); + } + + protected static boolean isTimestampBeyondMaxLookBack(long maxLookBackInMills, long currentTime, + long tsToCheck) { + if (!BaseScannerRegionObserver.isMaxLookbackTimeEnabled(maxLookBackInMills)) { + // By definition, if the max lookback feature is not enabled, then delete markers and rows + // version can be removed by compaction any time, and thus there is no window in which these + // mutations are + // preserved, i.e., the max lookback window size is zero. This means all the mutations are + // effectively + // beyond the zero size max lookback window. + return true; } + return tsToCheck < (currentTime - maxLookBackInMills); + } - @VisibleForTesting - public int setIndexMaintainer(IndexMaintainer indexMaintainer) { - this.indexMaintainer = indexMaintainer; - return 0; + protected boolean isColumnIncluded(Cell cell) { + byte[] family = CellUtil.cloneFamily(cell); + if (!familyMap.containsKey(family)) { + return false; } - - @VisibleForTesting - public long setMaxLookBackInMills(long maxLookBackInMills) { - this.maxLookBackInMills = maxLookBackInMills; - return 0; + NavigableSet set = familyMap.get(family); + if (set == null || set.isEmpty()) { + return true; } - - public void logToIndexToolOutputTable(byte[] dataRowKey, byte[] indexRowKey, long dataRowTs, long indexRowTs, - String errorMsg, boolean isBeforeRebuild, - IndexVerificationOutputRepository.IndexVerificationErrorType errorType) throws IOException { - logToIndexToolOutputTable(dataRowKey, indexRowKey, dataRowTs, indexRowTs, errorMsg, null, - null, isBeforeRebuild, errorType); + byte[] qualifier = CellUtil.cloneQualifier(cell); + return set.contains(qualifier); + } + + @VisibleForTesting + public boolean shouldVerify(IndexTool.IndexVerifyType verifyType, byte[] indexRowKey, Scan scan, + Region region, IndexMaintainer indexMaintainer, + IndexVerificationResultRepository verificationResultRepository, boolean shouldVerifyCheckDone) + throws IOException { + this.verifyType = verifyType; + this.indexRowKeyforReadRepair = indexRowKey; + this.scan = scan; + this.region = region; + this.indexMaintainer = indexMaintainer; + this.verificationResultRepository = verificationResultRepository; + this.shouldVerifyCheckDone = shouldVerifyCheckDone; + return shouldVerify(); + } + + public boolean shouldVerify() throws IOException { + // In case of read repair, proceed with rebuild + // All other types of rebuilds/verification should be incrementally performed if appropriate + // param is passed + byte[] lastVerifyTimeValue = + scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY); + Long lastVerifyTime = lastVerifyTimeValue == null ? 0 : Bytes.toLong(lastVerifyTimeValue); + if (indexRowKeyforReadRepair != null || lastVerifyTime == 0 || shouldVerifyCheckDone) { + return true; } - protected byte[] getDataTableName() { - return region.getRegionInfo().getTable().getName(); + IndexToolVerificationResult verificationResultTemp = verificationResultRepository + .getVerificationResult(lastVerifyTime, scan, region, indexMaintainer.getIndexTableName()); + if (verificationResultTemp != null) { + verificationResult = verificationResultTemp; } - @VisibleForTesting - public void logToIndexToolOutputTable(byte[] dataRowKey, byte[] indexRowKey, long dataRowTs, long indexRowTs, - String errorMsg, byte[] expectedVaue, byte[] actualValue, boolean isBeforeRebuild, - IndexVerificationOutputRepository.IndexVerificationErrorType errorType) throws IOException { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - byte[] dataTableName = getDataTableName(); - verificationOutputRepository.logToIndexToolOutputTable(dataRowKey, indexRowKey, dataRowTs, indexRowTs, - errorMsg, expectedVaue, actualValue, scan.getTimeRange().getMax(), - dataTableName, isBeforeRebuild, errorType); + shouldVerifyCheckDone = true; + if (verificationResult != null && verificationResult.getShouldRetry()) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("ShouldRetry is true. " + region.getRegionInfo().getRegionNameAsString()); + } + return true; } - - private static Cell getCell(Mutation m, byte[] family, byte[] qualifier) { - List cellList = m.getFamilyCellMap().get(family); - if (cellList == null) { - return null; - } - for (Cell cell : cellList) { - if (CellUtil.matchingQualifier(cell, qualifier)) { - return cell; - } - } - return null; + return verificationResultTemp == null; + } + + @Override + public RegionInfo getRegionInfo() { + return region.getRegionInfo(); + } + + @Override + public boolean isFilterDone() { + return false; + } + + private void closeTables() throws IOException { + hTableFactory.shutdown(); + if (indexHTable != null) { + indexHTable.close(); } - - private void logMismatch(Mutation expected, Mutation actual, int iteration, IndexToolVerificationResult.PhaseResult verificationPhaseResult, boolean isBeforeRebuild) throws IOException { - if (getTimestamp(expected) != getTimestamp(actual)) { - String errorMsg = "Not matching timestamp"; - byte[] dataKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(expected.getRow()), viewConstants); - logToIndexToolOutputTable(dataKey, expected.getRow(), getTimestamp(expected), getTimestamp(actual), - errorMsg, null, null, isBeforeRebuild, INVALID_ROW); - return; - } - int expectedCellCount = 0; - for (List cells : expected.getFamilyCellMap().values()) { - if (cells == null) { - continue; - } - for (Cell expectedCell : cells) { - expectedCellCount++; - byte[] family = CellUtil.cloneFamily(expectedCell); - byte[] qualifier = CellUtil.cloneQualifier(expectedCell); - Cell actualCell = getCell(actual, family, qualifier); - if (actualCell == null || expectedCell.getType() != actualCell.getType()) { - byte[] dataKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(expected.getRow()), viewConstants); - String errorMsg = "Missing cell (in iteration " + iteration + ") " + Bytes.toString(family) + ":" + Bytes.toString(qualifier); - logToIndexToolOutputTable(dataKey, expected.getRow(), getTimestamp(expected), - getTimestamp(actual), errorMsg, isBeforeRebuild, INVALID_ROW); - verificationPhaseResult.setIndexHasMissingCellsCount(verificationPhaseResult.getIndexHasMissingCellsCount() + 1); - return; - } - if (!CellUtil.matchingValue(actualCell, expectedCell)) { - String errorMsg = "Not matching value (in iteration " + iteration + ") for " + Bytes.toString(family) + ":" + Bytes.toString(qualifier); - byte[] dataKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(expected.getRow()), viewConstants); - logToIndexToolOutputTable(dataKey, expected.getRow(), getTimestamp(expected), getTimestamp(actual), - errorMsg, CellUtil.cloneValue(expectedCell), - CellUtil.cloneValue(actualCell), isBeforeRebuild, INVALID_ROW); - return; - } - } - } - int actualCellCount = 0; - for (List cells : actual.getFamilyCellMap().values()) { - if (cells == null) { - continue; - } - actualCellCount += cells.size(); - } - if (expectedCellCount != actualCellCount) { - String errorMsg = "Index has extra cells (in iteration " + iteration + ")"; - byte[] dataKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(expected.getRow()), viewConstants); - logToIndexToolOutputTable(dataKey, expected.getRow(), getTimestamp(expected), getTimestamp(actual), - errorMsg, isBeforeRebuild, EXTRA_CELLS); - verificationPhaseResult.setIndexHasExtraCellsCount(verificationPhaseResult.getIndexHasExtraCellsCount() + 1); - } + if (dataHTable != null) { + dataHTable.close(); } - - private boolean isMatchingMutation(Mutation expected, Mutation actual) { - if (getTimestamp(expected) != getTimestamp(actual)) { - return false; - } - int expectedCellCount = 0; - for (List cells : expected.getFamilyCellMap().values()) { - if (cells == null) { - continue; - } - for (Cell expectedCell : cells) { - expectedCellCount++; - byte[] family = CellUtil.cloneFamily(expectedCell); - byte[] qualifier = CellUtil.cloneQualifier(expectedCell); - Cell actualCell = getCell(actual, family, qualifier); - if (actualCell == null || - expectedCell.getType() != actualCell.getType()) { - return false; - } - if (!CellUtil.matchingValue(actualCell, expectedCell)) { - return false; - } - } - } - int actualCellCount = 0; - for (List cells : actual.getFamilyCellMap().values()) { - if (cells == null) { - continue; - } - actualCellCount += cells.size(); - } - if (expectedCellCount != actualCellCount) { - return false; - } - return true; + } + + @Override + public void close() throws IOException { + innerScanner.close(); + if (indexRowKeyforReadRepair != null) { + closeTables(); + return; + } + if (verify) { + try { + if (verificationResultRepository != null) { + verificationResultRepository.logToIndexToolResultTable(verificationResult, verifyType, + region.getRegionInfo().getRegionName(), skipped, shouldRetry); + } + } finally { + this.pool.stop("IndexRegionObserverRegionScanner is closing"); + closeTables(); + if (verificationResultRepository != null) { + verificationResultRepository.close(); + } + if (verificationOutputRepository != null) { + verificationOutputRepository.close(); + } + } + } else { + this.pool.stop("GlobalIndexRegionScanner is closing"); + closeTables(); + } + } + + @VisibleForTesting + public int setIndexTableTTL(int ttl) { + indexTableTTL = ttl; + return 0; + } + + @VisibleForTesting + public int setIndexMaintainer(IndexMaintainer indexMaintainer) { + this.indexMaintainer = indexMaintainer; + return 0; + } + + @VisibleForTesting + public long setMaxLookBackInMills(long maxLookBackInMills) { + this.maxLookBackInMills = maxLookBackInMills; + return 0; + } + + public void logToIndexToolOutputTable(byte[] dataRowKey, byte[] indexRowKey, long dataRowTs, + long indexRowTs, String errorMsg, boolean isBeforeRebuild, + IndexVerificationOutputRepository.IndexVerificationErrorType errorType) throws IOException { + logToIndexToolOutputTable(dataRowKey, indexRowKey, dataRowTs, indexRowTs, errorMsg, null, null, + isBeforeRebuild, errorType); + } + + protected byte[] getDataTableName() { + return region.getRegionInfo().getTable().getName(); + } + + @VisibleForTesting + public void logToIndexToolOutputTable(byte[] dataRowKey, byte[] indexRowKey, long dataRowTs, + long indexRowTs, String errorMsg, byte[] expectedVaue, byte[] actualValue, + boolean isBeforeRebuild, IndexVerificationOutputRepository.IndexVerificationErrorType errorType) + throws IOException { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + byte[] dataTableName = getDataTableName(); + verificationOutputRepository.logToIndexToolOutputTable(dataRowKey, indexRowKey, dataRowTs, + indexRowTs, errorMsg, expectedVaue, actualValue, scan.getTimeRange().getMax(), dataTableName, + isBeforeRebuild, errorType); + } + + private static Cell getCell(Mutation m, byte[] family, byte[] qualifier) { + List cellList = m.getFamilyCellMap().get(family); + if (cellList == null) { + return null; + } + for (Cell cell : cellList) { + if (CellUtil.matchingQualifier(cell, qualifier)) { + return cell; + } + } + return null; + } + + private void logMismatch(Mutation expected, Mutation actual, int iteration, + IndexToolVerificationResult.PhaseResult verificationPhaseResult, boolean isBeforeRebuild) + throws IOException { + if (getTimestamp(expected) != getTimestamp(actual)) { + String errorMsg = "Not matching timestamp"; + byte[] dataKey = indexMaintainer + .buildDataRowKey(new ImmutableBytesWritable(expected.getRow()), viewConstants); + logToIndexToolOutputTable(dataKey, expected.getRow(), getTimestamp(expected), + getTimestamp(actual), errorMsg, null, null, isBeforeRebuild, INVALID_ROW); + return; + } + int expectedCellCount = 0; + for (List cells : expected.getFamilyCellMap().values()) { + if (cells == null) { + continue; + } + for (Cell expectedCell : cells) { + expectedCellCount++; + byte[] family = CellUtil.cloneFamily(expectedCell); + byte[] qualifier = CellUtil.cloneQualifier(expectedCell); + Cell actualCell = getCell(actual, family, qualifier); + if (actualCell == null || expectedCell.getType() != actualCell.getType()) { + byte[] dataKey = indexMaintainer + .buildDataRowKey(new ImmutableBytesWritable(expected.getRow()), viewConstants); + String errorMsg = "Missing cell (in iteration " + iteration + ") " + + Bytes.toString(family) + ":" + Bytes.toString(qualifier); + logToIndexToolOutputTable(dataKey, expected.getRow(), getTimestamp(expected), + getTimestamp(actual), errorMsg, isBeforeRebuild, INVALID_ROW); + verificationPhaseResult.setIndexHasMissingCellsCount( + verificationPhaseResult.getIndexHasMissingCellsCount() + 1); + return; + } + if (!CellUtil.matchingValue(actualCell, expectedCell)) { + String errorMsg = "Not matching value (in iteration " + iteration + ") for " + + Bytes.toString(family) + ":" + Bytes.toString(qualifier); + byte[] dataKey = indexMaintainer + .buildDataRowKey(new ImmutableBytesWritable(expected.getRow()), viewConstants); + logToIndexToolOutputTable(dataKey, expected.getRow(), getTimestamp(expected), + getTimestamp(actual), errorMsg, CellUtil.cloneValue(expectedCell), + CellUtil.cloneValue(actualCell), isBeforeRebuild, INVALID_ROW); + return; + } + } + } + int actualCellCount = 0; + for (List cells : actual.getFamilyCellMap().values()) { + if (cells == null) { + continue; + } + actualCellCount += cells.size(); + } + if (expectedCellCount != actualCellCount) { + String errorMsg = "Index has extra cells (in iteration " + iteration + ")"; + byte[] dataKey = indexMaintainer + .buildDataRowKey(new ImmutableBytesWritable(expected.getRow()), viewConstants); + logToIndexToolOutputTable(dataKey, expected.getRow(), getTimestamp(expected), + getTimestamp(actual), errorMsg, isBeforeRebuild, EXTRA_CELLS); + verificationPhaseResult + .setIndexHasExtraCellsCount(verificationPhaseResult.getIndexHasExtraCellsCount() + 1); } + } - private boolean isVerified(Put mutation) throws IOException { - List cellList = mutation.get(indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - indexMaintainer.getEmptyKeyValueQualifier()); - Cell cell = (cellList != null && !cellList.isEmpty()) ? cellList.get(0) : null; - if (cell == null) { - return false; - } - if (Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - VERIFIED_BYTES, 0, VERIFIED_BYTES.length) == 0) { - return true; - } - return false; - } - - /** - * This is to reorder the mutations in descending order by the tuple of timestamp and mutation type where - * delete comes before put - */ - public static final Comparator MUTATION_TS_DESC_COMPARATOR = new Comparator() { - @Override - public int compare(Mutation o1, Mutation o2) { - long ts1 = getTimestamp(o1); - long ts2 = getTimestamp(o2); - if (ts1 > ts2) { - return -1; - } - if (ts1 < ts2) { - return 1; - } - if (o1 instanceof Delete && o2 instanceof Put) { - return -1; - } - if (o1 instanceof Put && o2 instanceof Delete) { - return 1; - } - return 0; + private boolean isMatchingMutation(Mutation expected, Mutation actual) { + if (getTimestamp(expected) != getTimestamp(actual)) { + return false; + } + int expectedCellCount = 0; + for (List cells : expected.getFamilyCellMap().values()) { + if (cells == null) { + continue; + } + for (Cell expectedCell : cells) { + expectedCellCount++; + byte[] family = CellUtil.cloneFamily(expectedCell); + byte[] qualifier = CellUtil.cloneQualifier(expectedCell); + Cell actualCell = getCell(actual, family, qualifier); + if (actualCell == null || expectedCell.getType() != actualCell.getType()) { + return false; + } + if (!CellUtil.matchingValue(actualCell, expectedCell)) { + return false; + } + } + } + int actualCellCount = 0; + for (List cells : actual.getFamilyCellMap().values()) { + if (cells == null) { + continue; + } + actualCellCount += cells.size(); + } + if (expectedCellCount != actualCellCount) { + return false; + } + return true; + } + + private boolean isVerified(Put mutation) throws IOException { + List cellList = + mutation.get(indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), + indexMaintainer.getEmptyKeyValueQualifier()); + Cell cell = (cellList != null && !cellList.isEmpty()) ? cellList.get(0) : null; + if (cell == null) { + return false; + } + if ( + Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), + VERIFIED_BYTES, 0, VERIFIED_BYTES.length) == 0 + ) { + return true; + } + return false; + } + + /** + * This is to reorder the mutations in descending order by the tuple of timestamp and mutation + * type where delete comes before put + */ + public static final Comparator MUTATION_TS_DESC_COMPARATOR = + new Comparator() { + @Override + public int compare(Mutation o1, Mutation o2) { + long ts1 = getTimestamp(o1); + long ts2 = getTimestamp(o2); + if (ts1 > ts2) { + return -1; + } + if (ts1 < ts2) { + return 1; + } + if (o1 instanceof Delete && o2 instanceof Put) { + return -1; + } + if (o1 instanceof Put && o2 instanceof Delete) { + return 1; } + return 0; + } }; - private boolean isDeleteFamily(Mutation mutation) { - for (List cells : mutation.getFamilyCellMap().values()) { - for (Cell cell : cells) { - if (cell.getType() == Cell.Type.DeleteFamily) { - return true; - } - } - } - return false; - } - - private void updateUnverifiedIndexRowCounters(Put actual, long expectedTs, List indexRowsToBeDeleted, - IndexToolVerificationResult.PhaseResult verificationPhaseResult) { - // Get the empty column of the given index row - List cellList = actual.get(indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - indexMaintainer.getEmptyKeyValueQualifier()); - Cell cell = (cellList != null && !cellList.isEmpty()) ? cellList.get(0) : null; - if (cell == null) { - // There is no empty column on the given index row. We do not know if this is a row generated by the new - // or the old design - verificationPhaseResult.setUnknownIndexRowCount(verificationPhaseResult.getUnknownIndexRowCount() + 1); - return; - } - if (Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - VERIFIED_BYTES, 0, VERIFIED_BYTES.length) == 0) { - // This is a verified index row, so nothing to do here - return; - } else if (Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - UNVERIFIED_BYTES, 0, UNVERIFIED_BYTES.length) == 0) { - verificationPhaseResult.setUnverifiedIndexRowCount(verificationPhaseResult.getUnverifiedIndexRowCount() + 1); - return; - } - // The empty column value is neither "verified" or "unverified". This must be a row from the old design - verificationPhaseResult.setOldIndexRowCount(verificationPhaseResult.getOldIndexRowCount() + 1); - if (verifyType == IndexTool.IndexVerifyType.BEFORE || verifyType == IndexTool.IndexVerifyType.BOTH) { - long actualTs = getTimestamp(actual); - if (actualTs > expectedTs) { - indexRowsToBeDeleted.add(indexMaintainer.buildRowDeleteMutation(actual.getRow(), - IndexMaintainer.DeleteType.SINGLE_VERSION, actualTs)); - } + private boolean isDeleteFamily(Mutation mutation) { + for (List cells : mutation.getFamilyCellMap().values()) { + for (Cell cell : cells) { + if (cell.getType() == Cell.Type.DeleteFamily) { + return true; } + } } - - /** - * actualIndexMutationList is the list of all the mutations of a single extra index row (i.e. not referenced by data row) - * ordered by decreasing order of timestamps with Deletes before Puts - */ - private void logExtraIndexRowAndUpdateCounters(List actualIndexMutationList, - IndexToolVerificationResult.PhaseResult verificationPhaseResult, boolean isBeforeRebuild) throws IOException { - for (Mutation m : actualIndexMutationList) { - // this extra row in the index table has already been deleted - if ((m instanceof Delete)) { - return; - } - - // check the empty column status of latest (most recent) put mutation - if (isVerified((Put) m)) { - verificationPhaseResult.setExtraVerifiedIndexRowCount( - verificationPhaseResult.getExtraVerifiedIndexRowCount() + 1); - } else { - verificationPhaseResult.setExtraUnverifiedIndexRowCount( - verificationPhaseResult.getExtraUnverifiedIndexRowCount() + 1); + return false; + } + + private void updateUnverifiedIndexRowCounters(Put actual, long expectedTs, + List indexRowsToBeDeleted, + IndexToolVerificationResult.PhaseResult verificationPhaseResult) { + // Get the empty column of the given index row + List cellList = + actual.get(indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), + indexMaintainer.getEmptyKeyValueQualifier()); + Cell cell = (cellList != null && !cellList.isEmpty()) ? cellList.get(0) : null; + if (cell == null) { + // There is no empty column on the given index row. We do not know if this is a row generated + // by the new + // or the old design + verificationPhaseResult + .setUnknownIndexRowCount(verificationPhaseResult.getUnknownIndexRowCount() + 1); + return; + } + if ( + Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), + VERIFIED_BYTES, 0, VERIFIED_BYTES.length) == 0 + ) { + // This is a verified index row, so nothing to do here + return; + } else if ( + Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), + UNVERIFIED_BYTES, 0, UNVERIFIED_BYTES.length) == 0 + ) { + verificationPhaseResult + .setUnverifiedIndexRowCount(verificationPhaseResult.getUnverifiedIndexRowCount() + 1); + return; + } + // The empty column value is neither "verified" or "unverified". This must be a row from the old + // design + verificationPhaseResult.setOldIndexRowCount(verificationPhaseResult.getOldIndexRowCount() + 1); + if ( + verifyType == IndexTool.IndexVerifyType.BEFORE || verifyType == IndexTool.IndexVerifyType.BOTH + ) { + long actualTs = getTimestamp(actual); + if (actualTs > expectedTs) { + indexRowsToBeDeleted.add(indexMaintainer.buildRowDeleteMutation(actual.getRow(), + IndexMaintainer.DeleteType.SINGLE_VERSION, actualTs)); + } + } + } + + /** + * actualIndexMutationList is the list of all the mutations of a single extra index row (i.e. not + * referenced by data row) ordered by decreasing order of timestamps with Deletes before Puts + */ + private void logExtraIndexRowAndUpdateCounters(List actualIndexMutationList, + IndexToolVerificationResult.PhaseResult verificationPhaseResult, boolean isBeforeRebuild) + throws IOException { + for (Mutation m : actualIndexMutationList) { + // this extra row in the index table has already been deleted + if ((m instanceof Delete)) { + return; + } + + // check the empty column status of latest (most recent) put mutation + if (isVerified((Put) m)) { + verificationPhaseResult.setExtraVerifiedIndexRowCount( + verificationPhaseResult.getExtraVerifiedIndexRowCount() + 1); + } else { + verificationPhaseResult.setExtraUnverifiedIndexRowCount( + verificationPhaseResult.getExtraUnverifiedIndexRowCount() + 1); + } + + byte[] indexKey = m.getRow(); + byte[] dataKey = + indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexKey), viewConstants); + String errorMsg = ERROR_MESSAGE_EXTRA_INDEX_ROW; + IndexVerificationOutputRepository.IndexVerificationErrorType errorType = EXTRA_ROW; + logToIndexToolOutputTable(dataKey, indexKey, 0, getTimestamp(m), errorMsg, isBeforeRebuild, + errorType); + break; + } + } + + /** + * In this method, the actual list is repaired in memory using the expected list which is actually + * the output of rebuilding the index table row. The result of this repair is used only for + * verification. + */ + private void repairActualMutationList(List actualMutationList, + List expectedMutationList) throws IOException { + // Find the first (latest) actual unverified put mutation + List repairedMutationList = new ArrayList<>(expectedMutationList.size()); + for (Mutation actual : actualMutationList) { + if (actual instanceof Put && !isVerified((Put) actual)) { + long ts = getTimestamp(actual); + int expectedIndex; + int expectedListSize = expectedMutationList.size(); + for (expectedIndex = 0; expectedIndex < expectedListSize; expectedIndex++) { + if (getTimestamp(expectedMutationList.get(expectedIndex)) <= ts) { + if (expectedIndex > 0) { + expectedIndex--; } - - byte[] indexKey = m.getRow(); - byte[] dataKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexKey), viewConstants); - String errorMsg = ERROR_MESSAGE_EXTRA_INDEX_ROW; - IndexVerificationOutputRepository.IndexVerificationErrorType errorType = EXTRA_ROW; - logToIndexToolOutputTable(dataKey, indexKey, 0, getTimestamp(m), errorMsg, - isBeforeRebuild, errorType); break; - } + } + } + if (expectedIndex == expectedListSize) { + continue; + } + for (; expectedIndex < expectedListSize; expectedIndex++) { + Mutation mutation = expectedMutationList.get(expectedIndex); + if (mutation instanceof Put) { + mutation = new Put((Put) mutation); + } else { + mutation = new Delete((Delete) mutation); + } + repairedMutationList.add(mutation); + } + // Since we repair the entire history, there is no need to more than once + break; + } } - - /** - * In this method, the actual list is repaired in memory using the expected list which is actually the output of - * rebuilding the index table row. The result of this repair is used only for verification. - */ - private void repairActualMutationList(List actualMutationList, List expectedMutationList) - throws IOException { - // Find the first (latest) actual unverified put mutation - List repairedMutationList = new ArrayList<>(expectedMutationList.size()); - for (Mutation actual : actualMutationList) { - if (actual instanceof Put && !isVerified((Put) actual)) { - long ts = getTimestamp(actual); - int expectedIndex; - int expectedListSize = expectedMutationList.size(); - for (expectedIndex = 0; expectedIndex < expectedListSize; expectedIndex++) { - if (getTimestamp(expectedMutationList.get(expectedIndex)) <= ts) { - if (expectedIndex > 0) { - expectedIndex--; - } - break; - } - } - if (expectedIndex == expectedListSize) { - continue; - } - for (; expectedIndex < expectedListSize; expectedIndex++) { - Mutation mutation = expectedMutationList.get(expectedIndex); - if (mutation instanceof Put) { - mutation = new Put((Put) mutation); - } else { - mutation = new Delete((Delete) mutation); - } - repairedMutationList.add(mutation); - } - // Since we repair the entire history, there is no need to more than once - break; - } - } - if (repairedMutationList.isEmpty()) { - return; - } - actualMutationList.addAll(repairedMutationList); - Collections.sort(actualMutationList, MUTATION_TS_DESC_COMPARATOR); - } - - private void cleanUpActualMutationList(List actualMutationList) - throws IOException { - Iterator iterator = actualMutationList.iterator(); - Mutation previous = null; - while (iterator.hasNext()) { - Mutation mutation = iterator.next(); - if ((mutation instanceof Put && !isVerified((Put) mutation)) || - (mutation instanceof Delete && !isDeleteFamily(mutation))) { - iterator.remove(); - } else { - if (((previous instanceof Put && mutation instanceof Put) || - previous instanceof Delete && mutation instanceof Delete) && - isMatchingMutation(previous, mutation)) { - iterator.remove(); - } else { - previous = mutation; - } - } - } + if (repairedMutationList.isEmpty()) { + return; } - - /** - * There are two types of verification: without repair and with repair. Without-repair verification is done before - * or after index rebuild. It is done before index rebuild to identify the rows to be rebuilt. It is done after - * index rebuild to verify the rows that have been rebuilt. With-repair verification can be done anytime using - * the “-v ONLY” option to check the consistency of the index table. Note that with-repair verification simulates - * read repair in-memory for the purpose of verification, but does not actually repair the data in the index. - * - * Unverified Rows - * - * For each mutable data table mutation during regular data table updates, two operations are done on the data table. - * One is to read the existing row state, and the second is to update the data table for this row. The processing of - * concurrent data mutations are serialized once for reading the existing row states, and then serialized again - * for updating the data table. In other words, they go through locking twice, i.e., [lock, read, unlock] and - * [lock, write, unlock]. Because of this two phase locking, for a pair of concurrent mutations (for the same row), - * the same row state can be read from the data table. This means the same existing index row can be made unverified - * twice with different timestamps, one for each concurrent mutation. These unverified mutations can be repaired - * from the data table later during HBase scans using the index read repair process. This is one of the reasons - * for having extra unverified rows in the index table. The other reason is the data table write failures. - * When a data table write fails, it leaves an unverified index row behind. These rows are never returned to clients, - * instead they are repaired, which means either they are rebuilt from their data table rows or they are deleted if - * their data table rows do not exist. - * - * Delete Family Version Markers - * - * The family version delete markers are generated by the read repair to remove extra unverified rows. They only - * show up in the actual mutation list since they are not generated for regular table updates or index rebuilds. - * For the verification purpose, these delete markers can be treated as extra unverified rows and can be safely - * skipped. - * - * Delete Family Markers - * Delete family markers are generated during read repair, regular table updates and index rebuilds to delete index - * table rows. The read repair generates them to delete extra unverified rows. During regular table updates or - * index rebuilds, the delete family markers are used to delete index rows due to data table row deletes or - * data table row overwrites. - * - * Verification Algorithm - * - * IndexTool verification generates an expected list of index mutations from the data table rows and uses this list - * to check if index table rows are consistent with the data table. - * - * The expect list is generated using the index rebuild algorithm. This mean for a given row, the list can include - * a number of put and delete mutations such that the followings hold: - * - * Every mutation will include a set of cells with the same timestamp - * Every mutation has a different timestamp - * A delete mutation will include only delete family cells and it is for deleting the entire row and its versions - * Every put mutation is verified - * - * For both verification types, after the expected list of index mutations is constructed for a given data table, - * another list called the actual list of index mutations is constructed by reading the index table row using HBase - * raw scan and all versions of the cells of the row are retrieved. - * - * As in the construction for the expected list, the cells are grouped into a put and a delete set. The put and - * delete sets for a given row are further grouped based on their timestamps into put and delete mutations such that - * all the cells in a mutation have the timestamps. The put and delete mutations are then sorted within a single - * list. Mutations in this list are sorted in ascending order of their timestamp. This list is the actual list. - * - * For the without-repair verification, unverified mutations and family version delete markers are removed from - * the actual list and then the list is compared with the expected list. - * - * In case of the with-repair verification, the actual list is first repaired, then unverified mutations and family - * version delete markers are removed from the actual list and finally the list is compared with the expected list. - * - * The actual list is repaired as follows: Every unverified mutation is repaired using the method read repair uses. - * However, instead of going through actual repair implementation, the expected mutations are used for repair. - */ - - @VisibleForTesting - public boolean verifySingleIndexRow(byte[] indexRowKey, List actualMutationList, List expectedMutationList, - Set mostRecentIndexRowKeys, List indexRowsToBeDeleted, - IndexToolVerificationResult.PhaseResult verificationPhaseResult, - boolean isBeforeRebuild) - throws IOException { - if (expectedMutationList == null) { - throw new DoNotRetryIOException(NO_EXPECTED_MUTATION); - } - if (actualMutationList == null || actualMutationList.isEmpty()) { - throw new DoNotRetryIOException(ACTUAL_MUTATION_IS_NULL_OR_EMPTY); + actualMutationList.addAll(repairedMutationList); + Collections.sort(actualMutationList, MUTATION_TS_DESC_COMPARATOR); + } + + private void cleanUpActualMutationList(List actualMutationList) throws IOException { + Iterator iterator = actualMutationList.iterator(); + Mutation previous = null; + while (iterator.hasNext()) { + Mutation mutation = iterator.next(); + if ( + (mutation instanceof Put && !isVerified((Put) mutation)) + || (mutation instanceof Delete && !isDeleteFamily(mutation)) + ) { + iterator.remove(); + } else { + if ( + ((previous instanceof Put && mutation instanceof Put) + || previous instanceof Delete && mutation instanceof Delete) + && isMatchingMutation(previous, mutation) + ) { + iterator.remove(); + } else { + previous = mutation; } + } + } + } + + /** + * There are two types of verification: without repair and with repair. Without-repair + * verification is done before or after index rebuild. It is done before index rebuild to identify + * the rows to be rebuilt. It is done after index rebuild to verify the rows that have been + * rebuilt. With-repair verification can be done anytime using the “-v ONLY” option to check the + * consistency of the index table. Note that with-repair verification simulates read repair + * in-memory for the purpose of verification, but does not actually repair the data in the index. + * Unverified Rows For each mutable data table mutation during regular data table updates, two + * operations are done on the data table. One is to read the existing row state, and the second is + * to update the data table for this row. The processing of concurrent data mutations are + * serialized once for reading the existing row states, and then serialized again for updating the + * data table. In other words, they go through locking twice, i.e., [lock, read, unlock] and + * [lock, write, unlock]. Because of this two phase locking, for a pair of concurrent mutations + * (for the same row), the same row state can be read from the data table. This means the same + * existing index row can be made unverified twice with different timestamps, one for each + * concurrent mutation. These unverified mutations can be repaired from the data table later + * during HBase scans using the index read repair process. This is one of the reasons for having + * extra unverified rows in the index table. The other reason is the data table write failures. + * When a data table write fails, it leaves an unverified index row behind. These rows are never + * returned to clients, instead they are repaired, which means either they are rebuilt from their + * data table rows or they are deleted if their data table rows do not exist. Delete Family + * Version Markers The family version delete markers are generated by the read repair to remove + * extra unverified rows. They only show up in the actual mutation list since they are not + * generated for regular table updates or index rebuilds. For the verification purpose, these + * delete markers can be treated as extra unverified rows and can be safely skipped. Delete Family + * Markers Delete family markers are generated during read repair, regular table updates and index + * rebuilds to delete index table rows. The read repair generates them to delete extra unverified + * rows. During regular table updates or index rebuilds, the delete family markers are used to + * delete index rows due to data table row deletes or data table row overwrites. Verification + * Algorithm IndexTool verification generates an expected list of index mutations from the data + * table rows and uses this list to check if index table rows are consistent with the data table. + * The expect list is generated using the index rebuild algorithm. This mean for a given row, the + * list can include a number of put and delete mutations such that the followings hold: Every + * mutation will include a set of cells with the same timestamp Every mutation has a different + * timestamp A delete mutation will include only delete family cells and it is for deleting the + * entire row and its versions Every put mutation is verified For both verification types, after + * the expected list of index mutations is constructed for a given data table, another list called + * the actual list of index mutations is constructed by reading the index table row using HBase + * raw scan and all versions of the cells of the row are retrieved. As in the construction for the + * expected list, the cells are grouped into a put and a delete set. The put and delete sets for a + * given row are further grouped based on their timestamps into put and delete mutations such that + * all the cells in a mutation have the timestamps. The put and delete mutations are then sorted + * within a single list. Mutations in this list are sorted in ascending order of their timestamp. + * This list is the actual list. For the without-repair verification, unverified mutations and + * family version delete markers are removed from the actual list and then the list is compared + * with the expected list. In case of the with-repair verification, the actual list is first + * repaired, then unverified mutations and family version delete markers are removed from the + * actual list and finally the list is compared with the expected list. The actual list is + * repaired as follows: Every unverified mutation is repaired using the method read repair uses. + * However, instead of going through actual repair implementation, the expected mutations are used + * for repair. + */ + + @VisibleForTesting + public boolean verifySingleIndexRow(byte[] indexRowKey, List actualMutationList, + List expectedMutationList, Set mostRecentIndexRowKeys, + List indexRowsToBeDeleted, + IndexToolVerificationResult.PhaseResult verificationPhaseResult, boolean isBeforeRebuild) + throws IOException { + if (expectedMutationList == null) { + throw new DoNotRetryIOException(NO_EXPECTED_MUTATION); + } + if (actualMutationList == null || actualMutationList.isEmpty()) { + throw new DoNotRetryIOException(ACTUAL_MUTATION_IS_NULL_OR_EMPTY); + } - if (isBeforeRebuild) { - Mutation m = actualMutationList.get(0); - if (m instanceof Put && (mostRecentIndexRowKeys.isEmpty() || mostRecentIndexRowKeys.contains(m.getRow()))) { - // We do check here only the latest version as older versions will always be unverified before - // newer versions are inserted. - updateUnverifiedIndexRowCounters((Put) m, getTimestamp(expectedMutationList.get(0)), indexRowsToBeDeleted, verificationPhaseResult); - } - } - if (verifyType == IndexTool.IndexVerifyType.ONLY) { - repairActualMutationList(actualMutationList, expectedMutationList); - } - // actualMutationList can be empty after returning from this function - cleanUpActualMutationList(actualMutationList); - - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - int actualIndex = 0; - int expectedIndex = 0; - int expectedSize = expectedMutationList.size(); - int actualSize = actualMutationList.size(); - Mutation expected = null; - Mutation previousExpected; - Mutation actual = null; - while (expectedIndex < expectedSize && actualIndex = getTimestamp(expected) && actual instanceof Delete) { + actualIndex++; + if (actualIndex == actualSize) { + break; } actual = actualMutationList.get(actualIndex); - if (expected instanceof Put) { - if (previousExpected instanceof Delete) { - // Between an expected delete and put, there can be one or more deletes due to - // concurrent mutations or data table write failures. Skip all of them if any - // There cannot be any actual delete mutation between two expected put mutations. - while (getTimestamp(actual) >= getTimestamp(expected) && actual instanceof Delete) { - actualIndex++; - if (actualIndex == actualSize) { - break; - } - actual = actualMutationList.get(actualIndex); - } - if (actualIndex == actualSize) { - break; - } - } - if (actual instanceof Delete) { - break; - } - if (isMatchingMutation(expected, actual)) { - expectedIndex++; - actualIndex++; - continue; - } - } else { // expected instanceof Delete - // Between put and delete, delete and delete, or before the first delete, there can be other deletes. - // Skip all of them if any - while (getTimestamp(actual) > getTimestamp(expected) && actual instanceof Delete) { - actualIndex++; - if (actualIndex == actualSize) { - break; - } - actual = actualMutationList.get(actualIndex); - } - if (actualIndex == actualSize) { - break; - } - if (getTimestamp(actual) == getTimestamp(expected) && - (actual instanceof Delete && isDeleteFamily(actual))) { - expectedIndex++; - actualIndex++; - continue; - } - } + } + if (actualIndex == actualSize) { break; - } - - if (expectedIndex == expectedSize ){ - // every expected mutation has its matching one in the actual list. - verificationPhaseResult.setValidIndexRowCount(verificationPhaseResult.getValidIndexRowCount() + 1); - return true; - } - - if (isTimestampBeyondMaxLookBack(maxLookBackInMills, currentTime, getTimestamp(expectedMutationList.get(expectedIndex)))){ - if (expectedIndex > 0) { - // if current expected index mutation is beyond max look back window, we only need to make sure its latest - // mutation is a matching one, as an SCN query is required. - verificationPhaseResult. - setValidIndexRowCount(verificationPhaseResult.getValidIndexRowCount() + 1); - return true; - } - - // All expected mutations are beyond the maxLookBack window, none of them can find its matching one in actual list - // It may be caused by real bug or compaction on the index table. - // We report it as a failure, so "before" option can trigger the index rebuild for this row. - // This repair is required, when there is only one index row for a given data table row and the timestamp of that row - // can be beyond maxLookBack. - verificationPhaseResult. - setBeyondMaxLookBackInvalidIndexRowCount(verificationPhaseResult.getBeyondMaxLookBackInvalidIndexRowCount() + 1); - byte[] dataKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); - String errorMsg = String.format("Expect %1$s mutations but got %2$s (beyond maxLookBack)", - expectedSize, - actualSize); - logToIndexToolOutputTable(dataKey, indexRowKey, - getTimestamp(expectedMutationList.get(expectedIndex)), - 0, errorMsg, isBeforeRebuild, BEYOND_MAX_LOOKBACK_INVALID); - return false; - } - else { - if (actualIndex < actualSize && actual instanceof Put && expected instanceof Put){ - logMismatch(expected, actual, expectedIndex, verificationPhaseResult, isBeforeRebuild); - } - else { - if (expected == null) { - // Happens when the actualMutationList becomes empty after returning from - // the cleanUpActualMutationList function. - expected = expectedMutationList.get(0); - } - byte[] dataKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); - String errorMsg = String.format("Not matching index row. expectedIndex=%d. expectedMutationSize=%d. actualIndex=%d. actualMutationSize=%d. expectedType=%s. actualType=%s", - expectedIndex, expectedSize, actualIndex, actualSize, expected.getClass().getName(), (actualIndex < actualSize ? actual.getClass().getName() : "null")); - logToIndexToolOutputTable(dataKey, indexRowKey, - getTimestamp(expected), (actualIndex < actualSize ? getTimestamp(actual): 0L), errorMsg, isBeforeRebuild, - INVALID_ROW); - } - verificationPhaseResult.setInvalidIndexRowCount(verificationPhaseResult.getInvalidIndexRowCount() + 1); - return false; - } + } + } + if (actual instanceof Delete) { + break; + } + if (isMatchingMutation(expected, actual)) { + expectedIndex++; + actualIndex++; + continue; + } + } else { // expected instanceof Delete + // Between put and delete, delete and delete, or before the first delete, there can be other + // deletes. + // Skip all of them if any + while (getTimestamp(actual) > getTimestamp(expected) && actual instanceof Delete) { + actualIndex++; + if (actualIndex == actualSize) { + break; + } + actual = actualMutationList.get(actualIndex); + } + if (actualIndex == actualSize) { + break; + } + if ( + getTimestamp(actual) == getTimestamp(expected) + && (actual instanceof Delete && isDeleteFamily(actual)) + ) { + expectedIndex++; + actualIndex++; + continue; + } + } + break; } - protected void verifyIndexRows(Map> actualIndexMutationMap, - Map> expectedIndexMutationMap, - Set mostRecentIndexRowKeys, - List indexRowsToBeDeleted, - IndexToolVerificationResult.PhaseResult verificationPhaseResult, - boolean isBeforeRebuild) throws IOException { - Map> invalidIndexRows = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - try { - for (Map.Entry> entry : actualIndexMutationMap.entrySet()) { - byte[] indexRowKey = entry.getKey(); - List expectedMutationList = expectedIndexMutationMap.get(indexRowKey); - if (expectedMutationList != null) { - if (!verifySingleIndexRow(entry.getKey(), entry.getValue(), expectedMutationList, mostRecentIndexRowKeys, - indexRowsToBeDeleted, verificationPhaseResult, isBeforeRebuild)) { - invalidIndexRows.put(indexRowKey, expectedMutationList); - } - expectedIndexMutationMap.remove(indexRowKey); - } else { - logExtraIndexRowAndUpdateCounters(entry.getValue(), verificationPhaseResult, isBeforeRebuild); - indexRowsToBeDeleted.add(indexMaintainer.buildRowDeleteMutation(indexRowKey, - IndexMaintainer.DeleteType.ALL_VERSIONS, getTimestamp(entry.getValue().get(0)))); - } - } - } catch (Throwable t) { - if (indexHTable != null) { - ClientUtil.throwIOException(indexHTable.getName().toString(), t); - } else { - ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); - } - } - List expiredIndexRows = new ArrayList<>(); - // Check if any expected rows from index(which we didn't get) are already expired due to TTL - // TODO: metrics for expired rows - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - for (Map.Entry> entry: expectedIndexMutationMap.entrySet()) { - List mutationList = entry.getValue(); - if (isTimestampBeforeTTL(indexTableTTL, currentTime, getTimestamp(mutationList.get(mutationList.size() - 1)))) { - verificationPhaseResult.setExpiredIndexRowCount(verificationPhaseResult.getExpiredIndexRowCount() + 1); - expiredIndexRows.add(entry.getKey()); - } - } - // Remove the expired rows from indexMutationMap - for (byte[] indexKey : expiredIndexRows) { - expectedIndexMutationMap.remove(indexKey); - } - // Count and log missing rows - for (Map.Entry> entry: expectedIndexMutationMap.entrySet()) { - byte[] indexKey = entry.getKey(); - List mutationList = entry.getValue(); - Mutation mutation = mutationList.get(mutationList.size() - 1); - if (mutation instanceof Delete) { - continue; - } - currentTime = EnvironmentEdgeManager.currentTimeMillis(); - String errorMsg; - IndexVerificationOutputRepository.IndexVerificationErrorType errorType; - if (isTimestampBeyondMaxLookBack(maxLookBackInMills, currentTime, getTimestamp(mutation))){ - errorMsg = ERROR_MESSAGE_MISSING_INDEX_ROW_BEYOND_MAX_LOOKBACK; - errorType = BEYOND_MAX_LOOKBACK_MISSING; - verificationPhaseResult. - setBeyondMaxLookBackMissingIndexRowCount(verificationPhaseResult.getBeyondMaxLookBackMissingIndexRowCount() + 1); - } - else { - errorMsg = ERROR_MESSAGE_MISSING_INDEX_ROW; - errorType = MISSING_ROW; - verificationPhaseResult.setMissingIndexRowCount(verificationPhaseResult.getMissingIndexRowCount() + 1); - } - byte[] dataKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexKey), viewConstants); - logToIndexToolOutputTable(dataKey, indexKey, getTimestamp(mutation), 0, errorMsg, - isBeforeRebuild, errorType); - } - - // Leave the invalid and missing rows in indexMutationMap - expectedIndexMutationMap.putAll(invalidIndexRows); + if (expectedIndex == expectedSize) { + // every expected mutation has its matching one in the actual list. + verificationPhaseResult + .setValidIndexRowCount(verificationPhaseResult.getValidIndexRowCount() + 1); + return true; } - // After IndexerRegionScanner is removed, this method should become abstract - protected void commitBatch(List indexUpdates) throws IOException, InterruptedException{ - throw new DoNotRetryIOException("This method has not been implement by the sub class"); + if ( + isTimestampBeyondMaxLookBack(maxLookBackInMills, currentTime, + getTimestamp(expectedMutationList.get(expectedIndex))) + ) { + if (expectedIndex > 0) { + // if current expected index mutation is beyond max look back window, we only need to make + // sure its latest + // mutation is a matching one, as an SCN query is required. + verificationPhaseResult + .setValidIndexRowCount(verificationPhaseResult.getValidIndexRowCount() + 1); + return true; + } + + // All expected mutations are beyond the maxLookBack window, none of them can find its + // matching one in actual list + // It may be caused by real bug or compaction on the index table. + // We report it as a failure, so "before" option can trigger the index rebuild for this row. + // This repair is required, when there is only one index row for a given data table row and + // the timestamp of that row + // can be beyond maxLookBack. + verificationPhaseResult.setBeyondMaxLookBackInvalidIndexRowCount( + verificationPhaseResult.getBeyondMaxLookBackInvalidIndexRowCount() + 1); + byte[] dataKey = + indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); + String errorMsg = String.format("Expect %1$s mutations but got %2$s (beyond maxLookBack)", + expectedSize, actualSize); + logToIndexToolOutputTable(dataKey, indexRowKey, + getTimestamp(expectedMutationList.get(expectedIndex)), 0, errorMsg, isBeforeRebuild, + BEYOND_MAX_LOOKBACK_INVALID); + return false; + } else { + if (actualIndex < actualSize && actual instanceof Put && expected instanceof Put) { + logMismatch(expected, actual, expectedIndex, verificationPhaseResult, isBeforeRebuild); + } else { + if (expected == null) { + // Happens when the actualMutationList becomes empty after returning from + // the cleanUpActualMutationList function. + expected = expectedMutationList.get(0); + } + byte[] dataKey = + indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); + String errorMsg = String.format( + "Not matching index row. expectedIndex=%d. expectedMutationSize=%d. actualIndex=%d. actualMutationSize=%d. expectedType=%s. actualType=%s", + expectedIndex, expectedSize, actualIndex, actualSize, expected.getClass().getName(), + (actualIndex < actualSize ? actual.getClass().getName() : "null")); + logToIndexToolOutputTable(dataKey, indexRowKey, getTimestamp(expected), + (actualIndex < actualSize ? getTimestamp(actual) : 0L), errorMsg, isBeforeRebuild, + INVALID_ROW); + } + verificationPhaseResult + .setInvalidIndexRowCount(verificationPhaseResult.getInvalidIndexRowCount() + 1); + return false; } - - protected void updateIndexRows(Map> indexMutationMap, - List indexRowsToBeDeleted, - IndexToolVerificationResult verificationResult) throws IOException { - try { - int batchSize = 0; - List indexUpdates = new ArrayList(maxBatchSize); - for (List mutationList : indexMutationMap.values()) { - indexUpdates.addAll(mutationList); - batchSize += mutationList.size(); - if (batchSize >= maxBatchSize) { - commitBatch(indexUpdates); - batchSize = 0; - indexUpdates = new ArrayList(maxBatchSize); - } - } - if (batchSize > 0) { - commitBatch(indexUpdates); - } - batchSize = 0; - indexUpdates = new ArrayList(maxBatchSize); - for (Mutation mutation : indexRowsToBeDeleted) { - indexUpdates.add(mutation); - batchSize ++; - if (batchSize >= maxBatchSize) { - commitBatch(indexUpdates); - batchSize = 0; - indexUpdates = new ArrayList(maxBatchSize); - } - } - if (batchSize > 0) { - commitBatch(indexUpdates); - } - if (verify) { - verificationResult.setRebuiltIndexRowCount(verificationResult.getRebuiltIndexRowCount() + indexMutationMap.size()); - } - } catch (Throwable t) { - if (indexHTable != null) { - ClientUtil.throwIOException(indexHTable.getName().toString(), t); - } else { - ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); - } - } + } + + protected void verifyIndexRows(Map> actualIndexMutationMap, + Map> expectedIndexMutationMap, Set mostRecentIndexRowKeys, + List indexRowsToBeDeleted, + IndexToolVerificationResult.PhaseResult verificationPhaseResult, boolean isBeforeRebuild) + throws IOException { + Map> invalidIndexRows = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + try { + for (Map.Entry> entry : actualIndexMutationMap.entrySet()) { + byte[] indexRowKey = entry.getKey(); + List expectedMutationList = expectedIndexMutationMap.get(indexRowKey); + if (expectedMutationList != null) { + if ( + !verifySingleIndexRow(entry.getKey(), entry.getValue(), expectedMutationList, + mostRecentIndexRowKeys, indexRowsToBeDeleted, verificationPhaseResult, + isBeforeRebuild) + ) { + invalidIndexRows.put(indexRowKey, expectedMutationList); + } + expectedIndexMutationMap.remove(indexRowKey); + } else { + logExtraIndexRowAndUpdateCounters(entry.getValue(), verificationPhaseResult, + isBeforeRebuild); + indexRowsToBeDeleted.add(indexMaintainer.buildRowDeleteMutation(indexRowKey, + IndexMaintainer.DeleteType.ALL_VERSIONS, getTimestamp(entry.getValue().get(0)))); + } + } + } catch (Throwable t) { + if (indexHTable != null) { + ClientUtil.throwIOException(indexHTable.getName().toString(), t); + } else { + ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); + } } - - @VisibleForTesting - public List prepareActualIndexMutations(Result indexRow) throws IOException { - Put put = null; - Delete del = null; - for (Cell cell : indexRow.rawCells()) { - if (cell.getType() == Cell.Type.Put) { - if (put == null) { - put = new Put(CellUtil.cloneRow(cell)); - } - put.add(cell); - } else { - if (del == null) { - del = new Delete(CellUtil.cloneRow(cell)); - } - del.add(cell); - } - } - return getMutationsWithSameTS(put, del, MUTATION_TS_DESC_COMPARATOR); + List expiredIndexRows = new ArrayList<>(); + // Check if any expected rows from index(which we didn't get) are already expired due to TTL + // TODO: metrics for expired rows + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + for (Map.Entry> entry : expectedIndexMutationMap.entrySet()) { + List mutationList = entry.getValue(); + if ( + isTimestampBeforeTTL(indexTableTTL, currentTime, + getTimestamp(mutationList.get(mutationList.size() - 1))) + ) { + verificationPhaseResult + .setExpiredIndexRowCount(verificationPhaseResult.getExpiredIndexRowCount() + 1); + expiredIndexRows.add(entry.getKey()); + } } - - protected Scan prepareIndexScan(Map> indexMutationMap) throws IOException { - List keys = new ArrayList<>(indexMutationMap.size()); - for (byte[] indexKey : indexMutationMap.keySet()) { - keys.add(PVarbinary.INSTANCE.getKeyRange(indexKey, SortOrder.ASC)); - } - - ScanRanges scanRanges = ScanRanges.createPointLookup(keys); - Scan indexScan = new Scan(); - indexScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); - scanRanges.initializeScan(indexScan); - SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); - indexScan.setFilter(new SkipScanFilter(skipScanFilter, true, true)); - indexScan.setRaw(true); - indexScan.readAllVersions(); - indexScan.setCacheBlocks(false); - return indexScan; - } - - protected void submitTasks(TaskBatch tasks) throws IOException{ - Pair, List>> resultsAndFutures = null; - try { - LOGGER.debug("Waiting on index verify and/or rebuild tasks to complete..."); - resultsAndFutures = this.pool.submitUninterruptible(tasks); - } catch (ExecutionException e) { - throw new RuntimeException("Should not fail on the results while using a WaitForCompletionTaskRunner", e); - } catch (EarlyExitFailure e) { - throw new RuntimeException("Stopped while waiting for batch, quitting!", e); - } - int index = 0; - for (Boolean result : resultsAndFutures.getFirst()) { - if (result == null) { - Throwable cause = ServerUtil.getExceptionFromFailedFuture(resultsAndFutures.getSecond().get(index)); - // there was a failure - throw new IOException(exceptionMessage, cause); - } - index++; - } + // Remove the expired rows from indexMutationMap + for (byte[] indexKey : expiredIndexRows) { + expectedIndexMutationMap.remove(indexKey); } - - - - /** - * This is to reorder the mutations in ascending order by the tuple of timestamp and mutation type where - * put comes before delete - */ - public static final Comparator MUTATION_TS_COMPARATOR = new Comparator() { - @Override - public int compare(Mutation o1, Mutation o2) { - long ts1 = getTimestamp(o1); - long ts2 = getTimestamp(o2); - if (ts1 < ts2) { - return -1; - } - if (ts1 > ts2) { - return 1; - } - if (o1 instanceof Put && o2 instanceof Delete) { - return -1; - } - if (o1 instanceof Delete && o2 instanceof Put) { - return 1; - } - return 0; - } - }; - - public static List getMutationsWithSameTS(Put put, Delete del) { - // Reorder the mutations on the same row so that delete comes before put when they have the same timestamp - return getMutationsWithSameTS(put, del, MUTATION_TS_COMPARATOR); + // Count and log missing rows + for (Map.Entry> entry : expectedIndexMutationMap.entrySet()) { + byte[] indexKey = entry.getKey(); + List mutationList = entry.getValue(); + Mutation mutation = mutationList.get(mutationList.size() - 1); + if (mutation instanceof Delete) { + continue; + } + currentTime = EnvironmentEdgeManager.currentTimeMillis(); + String errorMsg; + IndexVerificationOutputRepository.IndexVerificationErrorType errorType; + if (isTimestampBeyondMaxLookBack(maxLookBackInMills, currentTime, getTimestamp(mutation))) { + errorMsg = ERROR_MESSAGE_MISSING_INDEX_ROW_BEYOND_MAX_LOOKBACK; + errorType = BEYOND_MAX_LOOKBACK_MISSING; + verificationPhaseResult.setBeyondMaxLookBackMissingIndexRowCount( + verificationPhaseResult.getBeyondMaxLookBackMissingIndexRowCount() + 1); + } else { + errorMsg = ERROR_MESSAGE_MISSING_INDEX_ROW; + errorType = MISSING_ROW; + verificationPhaseResult + .setMissingIndexRowCount(verificationPhaseResult.getMissingIndexRowCount() + 1); + } + byte[] dataKey = + indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexKey), viewConstants); + logToIndexToolOutputTable(dataKey, indexKey, getTimestamp(mutation), 0, errorMsg, + isBeforeRebuild, errorType); } - public static List getMutationsWithSameTS(Put put, Delete del, Comparator comparator) { - List mutationList = Lists.newArrayListWithExpectedSize(2); - if (put != null) { - mutationList.add(put); - } - if (del != null) { - mutationList.add(del); - } - // Group the cells within a mutation based on their timestamps and create a separate mutation for each group - mutationList = (List) IndexManagementUtil.flattenMutationsByTimestamp(mutationList); - // Reorder the mutations - Collections.sort(mutationList, comparator); - return mutationList; - } - - private static Put prepareIndexPutForRebuild(IndexMaintainer indexMaintainer, - ImmutableBytesPtr rowKeyPtr, ValueGetter mergedRowVG, long ts) throws IOException { - Put indexPut = indexMaintainer.buildUpdateMutation(GenericKeyValueBuilder.INSTANCE, - mergedRowVG, rowKeyPtr, ts, null, null, false); - if (indexPut == null) { - // No covered column. Just prepare an index row with the empty column - byte[] indexRowKey = indexMaintainer.buildRowKey(mergedRowVG, rowKeyPtr, - null, null, ts); - indexPut = new Put(indexRowKey); - } else { - IndexUtil.removeEmptyColumn(indexPut, indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - indexMaintainer.getEmptyKeyValueQualifier()); - } - indexPut.addColumn(indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - indexMaintainer.getEmptyKeyValueQualifier(), ts, VERIFIED_BYTES); - return indexPut; + // Leave the invalid and missing rows in indexMutationMap + expectedIndexMutationMap.putAll(invalidIndexRows); + } + + // After IndexerRegionScanner is removed, this method should become abstract + protected void commitBatch(List indexUpdates) throws IOException, InterruptedException { + throw new DoNotRetryIOException("This method has not been implement by the sub class"); + } + + protected void updateIndexRows(Map> indexMutationMap, + List indexRowsToBeDeleted, IndexToolVerificationResult verificationResult) + throws IOException { + try { + int batchSize = 0; + List indexUpdates = new ArrayList(maxBatchSize); + for (List mutationList : indexMutationMap.values()) { + indexUpdates.addAll(mutationList); + batchSize += mutationList.size(); + if (batchSize >= maxBatchSize) { + commitBatch(indexUpdates); + batchSize = 0; + indexUpdates = new ArrayList(maxBatchSize); + } + } + if (batchSize > 0) { + commitBatch(indexUpdates); + } + batchSize = 0; + indexUpdates = new ArrayList(maxBatchSize); + for (Mutation mutation : indexRowsToBeDeleted) { + indexUpdates.add(mutation); + batchSize++; + if (batchSize >= maxBatchSize) { + commitBatch(indexUpdates); + batchSize = 0; + indexUpdates = new ArrayList(maxBatchSize); + } + } + if (batchSize > 0) { + commitBatch(indexUpdates); + } + if (verify) { + verificationResult.setRebuiltIndexRowCount( + verificationResult.getRebuiltIndexRowCount() + indexMutationMap.size()); + } + } catch (Throwable t) { + if (indexHTable != null) { + ClientUtil.throwIOException(indexHTable.getName().toString(), t); + } else { + ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); + } } - - public static void removeColumn(Put put, Cell deleteCell) { - byte[] family = CellUtil.cloneFamily(deleteCell); - List cellList = put.getFamilyCellMap().get(family); - if (cellList == null) { - return; - } - Iterator cellIterator = cellList.iterator(); - while (cellIterator.hasNext()) { - Cell cell = cellIterator.next(); - if (CellUtil.matchingQualifier(cell, deleteCell)) { - cellIterator.remove(); - if (cellList.isEmpty()) { - put.getFamilyCellMap().remove(family); - } - return; - } - } + } + + @VisibleForTesting + public List prepareActualIndexMutations(Result indexRow) throws IOException { + Put put = null; + Delete del = null; + for (Cell cell : indexRow.rawCells()) { + if (cell.getType() == Cell.Type.Put) { + if (put == null) { + put = new Put(CellUtil.cloneRow(cell)); + } + put.add(cell); + } else { + if (del == null) { + del = new Delete(CellUtil.cloneRow(cell)); + } + del.add(cell); + } } + return getMutationsWithSameTS(put, del, MUTATION_TS_DESC_COMPARATOR); + } - public static void apply(Put destination, Put source) throws IOException { - for (List cells : source.getFamilyCellMap().values()) { - for (Cell cell : cells) { - if (!destination.has(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell))) { - destination.add(cell); - } - } - } + protected Scan prepareIndexScan(Map> indexMutationMap) throws IOException { + List keys = new ArrayList<>(indexMutationMap.size()); + for (byte[] indexKey : indexMutationMap.keySet()) { + keys.add(PVarbinary.INSTANCE.getKeyRange(indexKey, SortOrder.ASC)); } - public static Put applyNew(Put destination, Put source) throws IOException { - Put next = new Put(destination); - apply(next, source); - return next; - } - - private static void applyDeleteOnPut(Delete del, Put put) throws IOException { - for (List cells : del.getFamilyCellMap().values()) { - for (Cell cell : cells) { - switch (cell.getType()) { - case DeleteFamily: - put.getFamilyCellMap().remove(CellUtil.cloneFamily(cell)); - break; - case DeleteColumn: - removeColumn(put, cell); - break; - default: - // We do not expect this can happen - throw new DoNotRetryIOException("Single version delete marker in data mutation " + - del); - } - } - } + ScanRanges scanRanges = ScanRanges.createPointLookup(keys); + Scan indexScan = new Scan(); + indexScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); + scanRanges.initializeScan(indexScan); + SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); + indexScan.setFilter(new SkipScanFilter(skipScanFilter, true, true)); + indexScan.setRaw(true); + indexScan.readAllVersions(); + indexScan.setCacheBlocks(false); + return indexScan; + } + + protected void submitTasks(TaskBatch tasks) throws IOException { + Pair, List>> resultsAndFutures = null; + try { + LOGGER.debug("Waiting on index verify and/or rebuild tasks to complete..."); + resultsAndFutures = this.pool.submitUninterruptible(tasks); + } catch (ExecutionException e) { + throw new RuntimeException( + "Should not fail on the results while using a WaitForCompletionTaskRunner", e); + } catch (EarlyExitFailure e) { + throw new RuntimeException("Stopped while waiting for batch, quitting!", e); } + int index = 0; + for (Boolean result : resultsAndFutures.getFirst()) { + if (result == null) { + Throwable cause = + ServerUtil.getExceptionFromFailedFuture(resultsAndFutures.getSecond().get(index)); + // there was a failure + throw new IOException(exceptionMessage, cause); + } + index++; + } + } - /** - * Generate the index update for a data row from the mutation that are obtained by merging - * the previous data row state with the pending row mutation for index rebuild. This method is - * called only for global indexes including covered full, covered partial, uncovered full, and - * uncovered partial indexes. - * pendingMutations is a sorted list of data table mutations that are used to replay index - * table mutations. This list is sorted in ascending order by the tuple of row key, timestamp - * and mutation type where delete comes after put. - */ - public static List prepareIndexMutationsForRebuild(IndexMaintainer indexMaintainer, - Put dataPut, Delete dataDel) throws IOException { - List dataMutations = getMutationsWithSameTS(dataPut, dataDel); - List indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size()); - // The row key ptr of the data table row for which we will build index rows here - ImmutableBytesPtr rowKeyPtr = (dataPut != null) ? new ImmutableBytesPtr(dataPut.getRow()) : - new ImmutableBytesPtr(dataDel.getRow()); - // Start with empty data table row - Put currentDataRowState = null; - // The index row key corresponding to the current data row - byte[] indexRowKeyForCurrentDataRow = null; - int dataMutationListSize = dataMutations.size(); - for (int i = 0; i < dataMutationListSize; i++) { - Mutation mutation = dataMutations.get(i); - long ts = getTimestamp(mutation); - Delete deleteToApply = null; - if (mutation instanceof Put) { - if (i < dataMutationListSize - 1) { - // If there is a pair of a put and delete mutation with the same timestamp then apply the delete - // mutation on the put. If the delete mutation deletes all the cells in the put mutation, the family - // cell map of the put mutation becomes empty and the mutation is ignored later - Mutation nextMutation = dataMutations.get(i + 1); - if (getTimestamp(nextMutation) == ts && nextMutation instanceof Delete) { - applyDeleteOnPut((Delete) nextMutation, (Put) mutation); - if(mutation.getFamilyCellMap().size() != 0) { - // Apply the delete mutation on the current data row state too - if (currentDataRowState != null) { - applyDeleteOnPut((Delete) nextMutation, currentDataRowState); - if (currentDataRowState.getFamilyCellMap().size() == 0) { - currentDataRowState = null; - indexRowKeyForCurrentDataRow = null; - } - } - } else { - /** - * When the mutation is empty after the delete mutation is applied, we should - * reuse the logical of applying a delete mutation on currentDataRowState. - */ - deleteToApply = (Delete)nextMutation; - } - // This increment is to skip the next (delete) mutation as we have already processed it - i++; - } - } - - if (mutation.getFamilyCellMap().size() != 0) { - // Add this put on top of the current data row state to get the next data row state - Put nextDataRow = (currentDataRowState == null) ? new Put((Put) mutation) : - applyNew((Put) mutation, currentDataRowState); - if (!indexMaintainer.shouldPrepareIndexMutations(nextDataRow)) { - currentDataRowState = nextDataRow; - if (indexRowKeyForCurrentDataRow != null) { - Mutation del = indexMaintainer.buildRowDeleteMutation( - indexRowKeyForCurrentDataRow, - IndexMaintainer.DeleteType.ALL_VERSIONS, ts); - indexMutations.add(del); - } - indexRowKeyForCurrentDataRow = null; - continue; - } - ValueGetter nextDataRowVG = new IndexUtil.SimpleValueGetter(nextDataRow); - Put indexPut = prepareIndexPutForRebuild(indexMaintainer, rowKeyPtr, - nextDataRowVG, ts); - indexMutations.add(indexPut); - // Delete the current index row if the new index key is different than the current one - if (indexRowKeyForCurrentDataRow != null) { - if (Bytes.compareTo(indexPut.getRow(), indexRowKeyForCurrentDataRow) != 0) { - Mutation del = indexMaintainer.buildRowDeleteMutation( - indexRowKeyForCurrentDataRow, - IndexMaintainer.DeleteType.ALL_VERSIONS, ts); - indexMutations.add(del); - } - } - // For the next iteration of the for loop - currentDataRowState = nextDataRow; - indexRowKeyForCurrentDataRow = indexPut.getRow(); - continue; - } - } else if(mutation instanceof Delete) { - deleteToApply = (Delete)mutation; - } + /** + * This is to reorder the mutations in ascending order by the tuple of timestamp and mutation type + * where put comes before delete + */ + public static final Comparator MUTATION_TS_COMPARATOR = new Comparator() { + @Override + public int compare(Mutation o1, Mutation o2) { + long ts1 = getTimestamp(o1); + long ts2 = getTimestamp(o2); + if (ts1 < ts2) { + return -1; + } + if (ts1 > ts2) { + return 1; + } + if (o1 instanceof Put && o2 instanceof Delete) { + return -1; + } + if (o1 instanceof Delete && o2 instanceof Put) { + return 1; + } + return 0; + } + }; + + public static List getMutationsWithSameTS(Put put, Delete del) { + // Reorder the mutations on the same row so that delete comes before put when they have the same + // timestamp + return getMutationsWithSameTS(put, del, MUTATION_TS_COMPARATOR); + } + + public static List getMutationsWithSameTS(Put put, Delete del, + Comparator comparator) { + List mutationList = Lists.newArrayListWithExpectedSize(2); + if (put != null) { + mutationList.add(put); + } + if (del != null) { + mutationList.add(del); + } + // Group the cells within a mutation based on their timestamps and create a separate mutation + // for each group + mutationList = (List) IndexManagementUtil.flattenMutationsByTimestamp(mutationList); + // Reorder the mutations + Collections.sort(mutationList, comparator); + return mutationList; + } + + private static Put prepareIndexPutForRebuild(IndexMaintainer indexMaintainer, + ImmutableBytesPtr rowKeyPtr, ValueGetter mergedRowVG, long ts) throws IOException { + Put indexPut = indexMaintainer.buildUpdateMutation(GenericKeyValueBuilder.INSTANCE, mergedRowVG, + rowKeyPtr, ts, null, null, false); + if (indexPut == null) { + // No covered column. Just prepare an index row with the empty column + byte[] indexRowKey = indexMaintainer.buildRowKey(mergedRowVG, rowKeyPtr, null, null, ts); + indexPut = new Put(indexRowKey); + } else { + IndexUtil.removeEmptyColumn(indexPut, + indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), + indexMaintainer.getEmptyKeyValueQualifier()); + } + indexPut.addColumn(indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), + indexMaintainer.getEmptyKeyValueQualifier(), ts, VERIFIED_BYTES); + return indexPut; + } + + public static void removeColumn(Put put, Cell deleteCell) { + byte[] family = CellUtil.cloneFamily(deleteCell); + List cellList = put.getFamilyCellMap().get(family); + if (cellList == null) { + return; + } + Iterator cellIterator = cellList.iterator(); + while (cellIterator.hasNext()) { + Cell cell = cellIterator.next(); + if (CellUtil.matchingQualifier(cell, deleteCell)) { + cellIterator.remove(); + if (cellList.isEmpty()) { + put.getFamilyCellMap().remove(family); + } + return; + } + } + } - if (deleteToApply != null && currentDataRowState != null) { - // We apply delete mutations only on the current data row state to obtain the next data row state. - // For the index table, we are only interested in if the index row should be deleted or not. - // There is no need to apply column deletes to index rows since index rows are always full rows - // and all the cells in an index row have the same timestamp value. Because of this index rows - // versions do not share cells. - applyDeleteOnPut(deleteToApply, currentDataRowState); - Put nextDataRowState = currentDataRowState; - if (nextDataRowState.getFamilyCellMap().size() == 0) { - if (indexRowKeyForCurrentDataRow != null) { - Mutation - del = - indexMaintainer.buildRowDeleteMutation(indexRowKeyForCurrentDataRow, - IndexMaintainer.DeleteType.ALL_VERSIONS, ts); - indexMutations.add(del); - if (indexMaintainer.isCDCIndex()) { - // CDC Index needs two delete markers one for deleting the index row, - // and the other for referencing the data table delete mutation with - // the right index row key, that is, the index row key starting with ts - indexMutations.add(IndexRegionObserver.getDeleteIndexMutation( - currentDataRowState, indexMaintainer, ts, rowKeyPtr)); - } - } - currentDataRowState = null; - indexRowKeyForCurrentDataRow = null; - } else if (indexRowKeyForCurrentDataRow != null) { - if (!indexMaintainer.shouldPrepareIndexMutations(nextDataRowState)) { - currentDataRowState = nextDataRowState; - Mutation del = indexMaintainer.buildRowDeleteMutation( - indexRowKeyForCurrentDataRow, - IndexMaintainer.DeleteType.ALL_VERSIONS, ts); - indexMutations.add(del); - indexRowKeyForCurrentDataRow = null; - continue; - } - ValueGetter nextDataRowVG = new IndexUtil.SimpleValueGetter(nextDataRowState); - Put indexPut = prepareIndexPutForRebuild(indexMaintainer, rowKeyPtr, - nextDataRowVG, ts); - indexMutations.add(indexPut); - // Delete the current index row if the new index key is different than the current one - if (indexRowKeyForCurrentDataRow != null) { - if (Bytes.compareTo(indexPut.getRow(), indexRowKeyForCurrentDataRow) != 0) { - Mutation del = indexMaintainer.buildRowDeleteMutation( - indexRowKeyForCurrentDataRow, - IndexMaintainer.DeleteType.ALL_VERSIONS, ts); - indexMutations.add(del); - } - } - indexRowKeyForCurrentDataRow = indexPut.getRow(); - } else { - currentDataRowState = nextDataRowState; - indexRowKeyForCurrentDataRow = null; - } - } + public static void apply(Put destination, Put source) throws IOException { + for (List cells : source.getFamilyCellMap().values()) { + for (Cell cell : cells) { + if (!destination.has(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell))) { + destination.add(cell); } - return indexMutations; + } } - - @VisibleForTesting - public int prepareIndexMutations(Put put, Delete del, Map> indexMutationMap, - Set mostRecentIndexRowKeys) throws IOException { - List indexMutations; - - indexMutations = prepareIndexMutationsForRebuild(indexMaintainer, put, del); - Collections.reverse(indexMutations); - - boolean mostRecentDone = false; - // Do not populate mostRecentIndexRowKeys when verifyType is NONE or AFTER - if (verifyType == IndexTool.IndexVerifyType.NONE || verifyType == IndexTool.IndexVerifyType.AFTER) { - mostRecentDone = true; + } + + public static Put applyNew(Put destination, Put source) throws IOException { + Put next = new Put(destination); + apply(next, source); + return next; + } + + private static void applyDeleteOnPut(Delete del, Put put) throws IOException { + for (List cells : del.getFamilyCellMap().values()) { + for (Cell cell : cells) { + switch (cell.getType()) { + case DeleteFamily: + put.getFamilyCellMap().remove(CellUtil.cloneFamily(cell)); + break; + case DeleteColumn: + removeColumn(put, cell); + break; + default: + // We do not expect this can happen + throw new DoNotRetryIOException("Single version delete marker in data mutation " + del); } - for (Mutation mutation : indexMutations) { - byte[] indexRowKey = mutation.getRow(); - List mutationList = indexMutationMap.get(indexRowKey); - if (mutationList == null) { - if (!mostRecentDone) { - if (mutation instanceof Put) { - mostRecentIndexRowKeys.add(indexRowKey); - mostRecentDone = true; - } + } + } + } + + /** + * Generate the index update for a data row from the mutation that are obtained by merging the + * previous data row state with the pending row mutation for index rebuild. This method is called + * only for global indexes including covered full, covered partial, uncovered full, and uncovered + * partial indexes. pendingMutations is a sorted list of data table mutations that are used to + * replay index table mutations. This list is sorted in ascending order by the tuple of row key, + * timestamp and mutation type where delete comes after put. + */ + public static List prepareIndexMutationsForRebuild(IndexMaintainer indexMaintainer, + Put dataPut, Delete dataDel) throws IOException { + List dataMutations = getMutationsWithSameTS(dataPut, dataDel); + List indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size()); + // The row key ptr of the data table row for which we will build index rows here + ImmutableBytesPtr rowKeyPtr = (dataPut != null) + ? new ImmutableBytesPtr(dataPut.getRow()) + : new ImmutableBytesPtr(dataDel.getRow()); + // Start with empty data table row + Put currentDataRowState = null; + // The index row key corresponding to the current data row + byte[] indexRowKeyForCurrentDataRow = null; + int dataMutationListSize = dataMutations.size(); + for (int i = 0; i < dataMutationListSize; i++) { + Mutation mutation = dataMutations.get(i); + long ts = getTimestamp(mutation); + Delete deleteToApply = null; + if (mutation instanceof Put) { + if (i < dataMutationListSize - 1) { + // If there is a pair of a put and delete mutation with the same timestamp then apply the + // delete + // mutation on the put. If the delete mutation deletes all the cells in the put mutation, + // the family + // cell map of the put mutation becomes empty and the mutation is ignored later + Mutation nextMutation = dataMutations.get(i + 1); + if (getTimestamp(nextMutation) == ts && nextMutation instanceof Delete) { + applyDeleteOnPut((Delete) nextMutation, (Put) mutation); + if (mutation.getFamilyCellMap().size() != 0) { + // Apply the delete mutation on the current data row state too + if (currentDataRowState != null) { + applyDeleteOnPut((Delete) nextMutation, currentDataRowState); + if (currentDataRowState.getFamilyCellMap().size() == 0) { + currentDataRowState = null; + indexRowKeyForCurrentDataRow = null; } - mutationList = new ArrayList<>(); - mutationList.add(mutation); - indexMutationMap.put(indexRowKey, mutationList); + } } else { - mutationList.add(mutation); - } - } - return indexMutations.size(); - } - - protected RegionScanner getLocalScanner() throws IOException { - // override the filter to skip scan and open new scanner - // when lower bound of timerange is passed or newStartKey was populated - // from previous call to next() - if(minTimestamp!= 0) { - Scan incrScan = new Scan(scan); - incrScan.setTimeRange(minTimestamp, scan.getTimeRange().getMax()); - incrScan.setRaw(true); - incrScan.readAllVersions(); - incrScan.getFamilyMap().clear(); - incrScan.setCacheBlocks(false); - for (byte[] family : scan.getFamilyMap().keySet()) { - incrScan.addFamily(family); + /** + * When the mutation is empty after the delete mutation is applied, we should reuse + * the logical of applying a delete mutation on currentDataRowState. + */ + deleteToApply = (Delete) nextMutation; } - ScanUtil.adjustScanFilterForGlobalIndexRegionScanner(incrScan); - if(nextStartKey != null) { - incrScan.withStartRow(nextStartKey); + // This increment is to skip the next (delete) mutation as we have already processed it + i++; + } + } + + if (mutation.getFamilyCellMap().size() != 0) { + // Add this put on top of the current data row state to get the next data row state + Put nextDataRow = (currentDataRowState == null) + ? new Put((Put) mutation) + : applyNew((Put) mutation, currentDataRowState); + if (!indexMaintainer.shouldPrepareIndexMutations(nextDataRow)) { + currentDataRowState = nextDataRow; + if (indexRowKeyForCurrentDataRow != null) { + Mutation del = indexMaintainer.buildRowDeleteMutation(indexRowKeyForCurrentDataRow, + IndexMaintainer.DeleteType.ALL_VERSIONS, ts); + indexMutations.add(del); } - List keys = new ArrayList<>(); - - try (RegionScanner scanner = new TTLRegionScanner(env, incrScan, - new PagingRegionScanner(region, region.getScanner(incrScan), incrScan))) { - List row = new ArrayList<>(); - int rowCount = 0; - // collect row keys that have been modified in the given time-range - // up to the size of page to build skip scan filter - do { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - hasMoreIncr = scanner.nextRaw(row); - if (!row.isEmpty()) { - if (isDummy(row)) { - row.clear(); - continue; - } - keys.add(PVarbinary.INSTANCE.getKeyRange(CellUtil.cloneRow(row.get(0)), - SortOrder.ASC)); - rowCount++; - } - row.clear(); - } while (hasMoreIncr && rowCount < pageSizeInRows); + indexRowKeyForCurrentDataRow = null; + continue; + } + ValueGetter nextDataRowVG = new IndexUtil.SimpleValueGetter(nextDataRow); + Put indexPut = prepareIndexPutForRebuild(indexMaintainer, rowKeyPtr, nextDataRowVG, ts); + indexMutations.add(indexPut); + // Delete the current index row if the new index key is different than the current one + if (indexRowKeyForCurrentDataRow != null) { + if (Bytes.compareTo(indexPut.getRow(), indexRowKeyForCurrentDataRow) != 0) { + Mutation del = indexMaintainer.buildRowDeleteMutation(indexRowKeyForCurrentDataRow, + IndexMaintainer.DeleteType.ALL_VERSIONS, ts); + indexMutations.add(del); } - if (!hasMoreIncr && keys.isEmpty()) { - return null; + } + // For the next iteration of the for loop + currentDataRowState = nextDataRow; + indexRowKeyForCurrentDataRow = indexPut.getRow(); + continue; + } + } else if (mutation instanceof Delete) { + deleteToApply = (Delete) mutation; + } + + if (deleteToApply != null && currentDataRowState != null) { + // We apply delete mutations only on the current data row state to obtain the next data row + // state. + // For the index table, we are only interested in if the index row should be deleted or not. + // There is no need to apply column deletes to index rows since index rows are always full + // rows + // and all the cells in an index row have the same timestamp value. Because of this index + // rows + // versions do not share cells. + applyDeleteOnPut(deleteToApply, currentDataRowState); + Put nextDataRowState = currentDataRowState; + if (nextDataRowState.getFamilyCellMap().size() == 0) { + if (indexRowKeyForCurrentDataRow != null) { + Mutation del = indexMaintainer.buildRowDeleteMutation(indexRowKeyForCurrentDataRow, + IndexMaintainer.DeleteType.ALL_VERSIONS, ts); + indexMutations.add(del); + if (indexMaintainer.isCDCIndex()) { + // CDC Index needs two delete markers one for deleting the index row, + // and the other for referencing the data table delete mutation with + // the right index row key, that is, the index row key starting with ts + indexMutations.add(IndexRegionObserver.getDeleteIndexMutation(currentDataRowState, + indexMaintainer, ts, rowKeyPtr)); } - if (keys.isEmpty()) { - return innerScanner; + } + currentDataRowState = null; + indexRowKeyForCurrentDataRow = null; + } else if (indexRowKeyForCurrentDataRow != null) { + if (!indexMaintainer.shouldPrepareIndexMutations(nextDataRowState)) { + currentDataRowState = nextDataRowState; + Mutation del = indexMaintainer.buildRowDeleteMutation(indexRowKeyForCurrentDataRow, + IndexMaintainer.DeleteType.ALL_VERSIONS, ts); + indexMutations.add(del); + indexRowKeyForCurrentDataRow = null; + continue; + } + ValueGetter nextDataRowVG = new IndexUtil.SimpleValueGetter(nextDataRowState); + Put indexPut = prepareIndexPutForRebuild(indexMaintainer, rowKeyPtr, nextDataRowVG, ts); + indexMutations.add(indexPut); + // Delete the current index row if the new index key is different than the current one + if (indexRowKeyForCurrentDataRow != null) { + if (Bytes.compareTo(indexPut.getRow(), indexRowKeyForCurrentDataRow) != 0) { + Mutation del = indexMaintainer.buildRowDeleteMutation(indexRowKeyForCurrentDataRow, + IndexMaintainer.DeleteType.ALL_VERSIONS, ts); + indexMutations.add(del); } - ScanRanges scanRanges = ScanRanges.createPointLookup(keys); - scanRanges.initializeScan(incrScan); - SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); - incrScan.setFilter(new SkipScanFilter(skipScanFilter, true, true)); - //putting back the min time to 0 for index and data reads - incrScan.setTimeRange(0, scan.getTimeRange().getMax()); - scan.setTimeRange(0, scan.getTimeRange().getMax()); - return region.getScanner(incrScan); + } + indexRowKeyForCurrentDataRow = indexPut.getRow(); + } else { + currentDataRowState = nextDataRowState; + indexRowKeyForCurrentDataRow = null; } + } + } + return indexMutations; + } + + @VisibleForTesting + public int prepareIndexMutations(Put put, Delete del, + Map> indexMutationMap, Set mostRecentIndexRowKeys) + throws IOException { + List indexMutations; + + indexMutations = prepareIndexMutationsForRebuild(indexMaintainer, put, del); + Collections.reverse(indexMutations); + + boolean mostRecentDone = false; + // Do not populate mostRecentIndexRowKeys when verifyType is NONE or AFTER + if ( + verifyType == IndexTool.IndexVerifyType.NONE || verifyType == IndexTool.IndexVerifyType.AFTER + ) { + mostRecentDone = true; + } + for (Mutation mutation : indexMutations) { + byte[] indexRowKey = mutation.getRow(); + List mutationList = indexMutationMap.get(indexRowKey); + if (mutationList == null) { + if (!mostRecentDone) { + if (mutation instanceof Put) { + mostRecentIndexRowKeys.add(indexRowKey); + mostRecentDone = true; + } + } + mutationList = new ArrayList<>(); + mutationList.add(mutation); + indexMutationMap.put(indexRowKey, mutationList); + } else { + mutationList.add(mutation); + } + } + return indexMutations.size(); + } + + protected RegionScanner getLocalScanner() throws IOException { + // override the filter to skip scan and open new scanner + // when lower bound of timerange is passed or newStartKey was populated + // from previous call to next() + if (minTimestamp != 0) { + Scan incrScan = new Scan(scan); + incrScan.setTimeRange(minTimestamp, scan.getTimeRange().getMax()); + incrScan.setRaw(true); + incrScan.readAllVersions(); + incrScan.getFamilyMap().clear(); + incrScan.setCacheBlocks(false); + for (byte[] family : scan.getFamilyMap().keySet()) { + incrScan.addFamily(family); + } + ScanUtil.adjustScanFilterForGlobalIndexRegionScanner(incrScan); + if (nextStartKey != null) { + incrScan.withStartRow(nextStartKey); + } + List keys = new ArrayList<>(); + + try (RegionScanner scanner = new TTLRegionScanner(env, incrScan, + new PagingRegionScanner(region, region.getScanner(incrScan), incrScan))) { + List row = new ArrayList<>(); + int rowCount = 0; + // collect row keys that have been modified in the given time-range + // up to the size of page to build skip scan filter + do { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + hasMoreIncr = scanner.nextRaw(row); + if (!row.isEmpty()) { + if (isDummy(row)) { + row.clear(); + continue; + } + keys.add(PVarbinary.INSTANCE.getKeyRange(CellUtil.cloneRow(row.get(0)), SortOrder.ASC)); + rowCount++; + } + row.clear(); + } while (hasMoreIncr && rowCount < pageSizeInRows); + } + if (!hasMoreIncr && keys.isEmpty()) { + return null; + } + if (keys.isEmpty()) { return innerScanner; + } + ScanRanges scanRanges = ScanRanges.createPointLookup(keys); + scanRanges.initializeScan(incrScan); + SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); + incrScan.setFilter(new SkipScanFilter(skipScanFilter, true, true)); + // putting back the min time to 0 for index and data reads + incrScan.setTimeRange(0, scan.getTimeRange().getMax()); + scan.setTimeRange(0, scan.getTimeRange().getMax()); + return region.getScanner(incrScan); } + return innerScanner; + } - @Override - public long getMaxResultSize() { - return scan.getMaxResultSize(); - } + @Override + public long getMaxResultSize() { + return scan.getMaxResultSize(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupByCache.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupByCache.java index 0633989d1a9..a0497380d72 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupByCache.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupByCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,16 +24,15 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; /** - * - * Interface to abstract the way in which distinct group by - * elements are cached - * - * + * Interface to abstract the way in which distinct group by elements are cached * @since 3.0.0 */ public interface GroupByCache extends Closeable { - long size(); - Aggregator[] cache(ImmutableBytesPtr key); - RegionScanner getScanner(RegionScanner s); - void cacheAggregateRowKey(ImmutableBytesPtr value, ImmutableBytesPtr rowKey); + long size(); + + Aggregator[] cache(ImmutableBytesPtr key); + + RegionScanner getScanner(RegionScanner s); + + void cacheAggregateRowKey(ImmutableBytesPtr value, ImmutableBytesPtr rowKey); } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java index f36412f03f4..b62dc4c12f1 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -79,6 +79,7 @@ import org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.Closeables; import org.apache.phoenix.util.EncodedColumnsUtil; @@ -93,933 +94,885 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; - /** * Region observer that aggregates grouped rows (i.e. SQL query with GROUP BY clause) - * * @since 0.1 */ -public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver implements RegionCoprocessor { - private static final Logger LOGGER = LoggerFactory - .getLogger(GroupedAggregateRegionObserver.class); - public static final int MIN_DISTINCT_VALUES = 100; - - @Override - public Optional getRegionObserver() { - return Optional.of(this); +public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver + implements RegionCoprocessor { + private static final Logger LOGGER = + LoggerFactory.getLogger(GroupedAggregateRegionObserver.class); + public static final int MIN_DISTINCT_VALUES = 100; + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + /** + * Replaces the RegionScanner s with a RegionScanner that groups by the key formed by the list of + * expressions from the scan and returns the aggregated rows of each group. For example, given the + * following original rows in the RegionScanner: KEY COL1 row1 a row2 b row3 a row4 a the + * following rows will be returned for COUNT(*): KEY COUNT a 3 b 1 The client is required to do a + * sort and a final aggregation, since multiple rows with the same key may be returned from + * different regions. + */ + @Override + protected RegionScanner doPostScannerOpen(ObserverContext c, + Scan scan, RegionScanner s) throws IOException { + boolean keyOrdered = false; + byte[] expressionBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.UNORDERED_GROUP_BY_EXPRESSIONS); + + if (expressionBytes == null) { + expressionBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS); + keyOrdered = true; } - - - /** - * Replaces the RegionScanner s with a RegionScanner that groups by the key formed by the list - * of expressions from the scan and returns the aggregated rows of each group. For example, - * given the following original rows in the RegionScanner: KEY COL1 row1 a row2 b row3 a row4 a - * the following rows will be returned for COUNT(*): KEY COUNT a 3 b 1 The client is required to - * do a sort and a final aggregation, since multiple rows with the same key may be returned from - * different regions. - */ - @Override - protected RegionScanner doPostScannerOpen(ObserverContext c, - Scan scan, RegionScanner s) throws IOException { - boolean keyOrdered = false; - byte[] expressionBytes = scan.getAttribute(BaseScannerRegionObserverConstants.UNORDERED_GROUP_BY_EXPRESSIONS); - - if (expressionBytes == null) { - expressionBytes = scan.getAttribute(BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS); - keyOrdered = true; - } - int offset = 0; - boolean useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan); - if (ScanUtil.isLocalIndex(scan)) { - /* - * For local indexes, we need to set an offset on row key expressions to skip - * the region start key. - */ - Region region = c.getEnvironment().getRegion(); - offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : - region.getRegionInfo().getEndKey().length; - ScanUtil.setRowKeyOffset(scan, offset); - } - - List expressions = deserializeGroupByExpressions(expressionBytes, 0); - final TenantCache tenantCache = GlobalCache.getTenantCache(c.getEnvironment(), ScanUtil.getTenantId(scan)); - try (MemoryChunk em = tenantCache.getMemoryManager().allocate(0)) { - ServerAggregators aggregators = - ServerAggregators.deserialize(scan - .getAttribute(BaseScannerRegionObserverConstants.AGGREGATORS), c - .getEnvironment().getConfiguration(), em); - - RegionScanner innerScanner = s; - List indexMaintainers = - IndexUtil.deSerializeIndexMaintainersFromScan(scan); - TupleProjector tupleProjector = null; - byte[][] viewConstants = null; - ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); - - final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan); - final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); - boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan)); - if (ScanUtil.isLocalOrUncoveredGlobalIndex(scan) - || (j == null && p != null)) { - if (dataColumns != null) { - tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns); - viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); - } - ImmutableBytesPtr tempPtr = new ImmutableBytesPtr(); - innerScanner = - getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, - c.getEnvironment().getRegion(), indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex); - } - - if (j != null) { - innerScanner = - new HashJoinRegionScanner(innerScanner, scan, p, j, ScanUtil.getTenantId(scan), - c.getEnvironment(), useQualifierAsIndex, useNewValueColumnQualifier); - } - - long limit = Long.MAX_VALUE; - byte[] limitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.GROUP_BY_LIMIT); - if (limitBytes != null) { - limit = PInteger.INSTANCE.getCodec().decodeInt(limitBytes, 0, SortOrder.getDefault()); - } - long pageSizeMs = getPageSizeMsForRegionScanner(scan); - if (keyOrdered) { // Optimize by taking advantage that the rows are - // already in the required group by key order - return new OrderedGroupByRegionScanner(c, scan, innerScanner, expressions, aggregators, limit, pageSizeMs); - } else { // Otherwse, collect them all up in an in memory map - return new UnorderedGroupByRegionScanner(c, scan, innerScanner, expressions, aggregators, limit, pageSizeMs); - } - } + int offset = 0; + boolean useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan); + if (ScanUtil.isLocalIndex(scan)) { + /* + * For local indexes, we need to set an offset on row key expressions to skip the region start + * key. + */ + Region region = c.getEnvironment().getRegion(); + offset = region.getRegionInfo().getStartKey().length != 0 + ? region.getRegionInfo().getStartKey().length + : region.getRegionInfo().getEndKey().length; + ScanUtil.setRowKeyOffset(scan, offset); } - public static long sizeOfUnorderedGroupByMap(int nRows, int valueSize) { - return SizedUtil.sizeOfMap(nRows, SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE, valueSize); + List expressions = deserializeGroupByExpressions(expressionBytes, 0); + final TenantCache tenantCache = + GlobalCache.getTenantCache(c.getEnvironment(), ScanUtil.getTenantId(scan)); + try (MemoryChunk em = tenantCache.getMemoryManager().allocate(0)) { + ServerAggregators aggregators = ServerAggregators.deserialize( + scan.getAttribute(BaseScannerRegionObserverConstants.AGGREGATORS), + c.getEnvironment().getConfiguration(), em); + + RegionScanner innerScanner = s; + List indexMaintainers = IndexUtil.deSerializeIndexMaintainersFromScan(scan); + TupleProjector tupleProjector = null; + byte[][] viewConstants = null; + ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); + + final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan); + final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); + boolean useQualifierAsIndex = EncodedColumnsUtil + .useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan)); + if (ScanUtil.isLocalOrUncoveredGlobalIndex(scan) || (j == null && p != null)) { + if (dataColumns != null) { + tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns); + viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); + } + ImmutableBytesPtr tempPtr = new ImmutableBytesPtr(); + innerScanner = getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, + c.getEnvironment().getRegion(), indexMaintainers == null ? null : indexMaintainers.get(0), + viewConstants, p, tempPtr, useQualifierAsIndex); + } + + if (j != null) { + innerScanner = + new HashJoinRegionScanner(innerScanner, scan, p, j, ScanUtil.getTenantId(scan), + c.getEnvironment(), useQualifierAsIndex, useNewValueColumnQualifier); + } + + long limit = Long.MAX_VALUE; + byte[] limitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.GROUP_BY_LIMIT); + if (limitBytes != null) { + limit = PInteger.INSTANCE.getCodec().decodeInt(limitBytes, 0, SortOrder.getDefault()); + } + long pageSizeMs = getPageSizeMsForRegionScanner(scan); + if (keyOrdered) { // Optimize by taking advantage that the rows are + // already in the required group by key order + return new OrderedGroupByRegionScanner(c, scan, innerScanner, expressions, aggregators, + limit, pageSizeMs); + } else { // Otherwse, collect them all up in an in memory map + return new UnorderedGroupByRegionScanner(c, scan, innerScanner, expressions, aggregators, + limit, pageSizeMs); + } } - - private List deserializeGroupByExpressions(byte[] expressionBytes, int offset) - throws IOException { - List expressions = new ArrayList(3); - ByteArrayInputStream stream = new ByteArrayInputStream(expressionBytes); + } + + public static long sizeOfUnorderedGroupByMap(int nRows, int valueSize) { + return SizedUtil.sizeOfMap(nRows, SizedUtil.IMMUTABLE_BYTES_WRITABLE_SIZE, valueSize); + } + + private List deserializeGroupByExpressions(byte[] expressionBytes, int offset) + throws IOException { + List expressions = new ArrayList(3); + ByteArrayInputStream stream = new ByteArrayInputStream(expressionBytes); + try { + DataInputStream input = new DataInputStream(stream); + while (true) { try { - DataInputStream input = new DataInputStream(stream); - while (true) { - try { - int expressionOrdinal = WritableUtils.readVInt(input); - Expression expression = - ExpressionType.values()[expressionOrdinal].newInstance(); - expression.readFields(input); - if (offset != 0) { - IndexUtil.setRowKeyExpressionOffset(expression, offset); - } - expressions.add(expression); - } catch (EOFException e) { - break; - } - } - } finally { - stream.close(); + int expressionOrdinal = WritableUtils.readVInt(input); + Expression expression = ExpressionType.values()[expressionOrdinal].newInstance(); + expression.readFields(input); + if (offset != 0) { + IndexUtil.setRowKeyExpressionOffset(expression, offset); + } + expressions.add(expression); + } catch (EOFException e) { + break; } - return expressions; + } + } finally { + stream.close(); + } + return expressions; + } + + /** + * Cache for distinct values and their aggregations which is completely in-memory (as opposed to + * spilling to disk). Used when GROUPBY_SPILLABLE_ATTRIB is set to false. The memory usage is + * tracked at a coursed grain and will throw and abort if too much is used. + * @since 3.0.0 + */ + private static final class InMemoryGroupByCache implements GroupByCache { + private final MemoryChunk chunk; + private final Map aggregateMap; + private final ServerAggregators aggregators; + private final RegionCoprocessorEnvironment env; + private final byte[] customAnnotations; + private final ConcurrentMap aggregateValueToLastScannedRowKeys; + private final boolean isIncompatibleClient; + + private int estDistVals; + + InMemoryGroupByCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, + byte[] customAnnotations, ServerAggregators aggregators, int estDistVals, + boolean isIncompatibleClient) { + this.isIncompatibleClient = isIncompatibleClient; + int estValueSize = aggregators.getEstimatedByteSize(); + long estSize = sizeOfUnorderedGroupByMap(estDistVals, estValueSize); + TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId); + this.env = env; + this.estDistVals = estDistVals; + this.aggregators = aggregators; + this.aggregateMap = Maps.newHashMapWithExpectedSize(estDistVals); + this.chunk = tenantCache.getMemoryManager().allocate(estSize); + this.customAnnotations = customAnnotations; + aggregateValueToLastScannedRowKeys = Maps.newConcurrentMap(); } - /** - * - * Cache for distinct values and their aggregations which is completely - * in-memory (as opposed to spilling to disk). Used when GROUPBY_SPILLABLE_ATTRIB - * is set to false. The memory usage is tracked at a coursed grain and will - * throw and abort if too much is used. - * - * - * @since 3.0.0 - */ - private static final class InMemoryGroupByCache implements GroupByCache { - private final MemoryChunk chunk; - private final Map aggregateMap; - private final ServerAggregators aggregators; - private final RegionCoprocessorEnvironment env; - private final byte[] customAnnotations; - private final ConcurrentMap - aggregateValueToLastScannedRowKeys; - private final boolean isIncompatibleClient; - - private int estDistVals; + @Override + public void close() throws IOException { + this.chunk.close(); + } - InMemoryGroupByCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, - byte[] customAnnotations, ServerAggregators aggregators, - int estDistVals, - boolean isIncompatibleClient) { - this.isIncompatibleClient = isIncompatibleClient; - int estValueSize = aggregators.getEstimatedByteSize(); - long estSize = sizeOfUnorderedGroupByMap(estDistVals, estValueSize); - TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId); - this.env = env; - this.estDistVals = estDistVals; - this.aggregators = aggregators; - this.aggregateMap = Maps.newHashMapWithExpectedSize(estDistVals); - this.chunk = tenantCache.getMemoryManager().allocate(estSize); - this.customAnnotations = customAnnotations; - aggregateValueToLastScannedRowKeys = Maps.newConcurrentMap(); + @Override + public Aggregator[] cache(ImmutableBytesPtr cacheKey) { + ImmutableBytesPtr key = new ImmutableBytesPtr(cacheKey); + Aggregator[] rowAggregators = aggregateMap.get(key); + if (rowAggregators == null) { + // If Aggregators not found for this distinct + // value, clone our original one (we need one + // per distinct value) + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations( + "Adding new aggregate bucket for row key " + + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()), + customAnnotations)); } + rowAggregators = aggregators.newAggregators(env.getConfiguration()); + aggregateMap.put(key, rowAggregators); - @Override - public void close() throws IOException { - this.chunk.close(); + if (aggregateMap.size() > estDistVals) { // increase allocation + estDistVals *= 1.5f; + long estSize = sizeOfUnorderedGroupByMap(estDistVals, aggregators.getEstimatedByteSize()); + chunk.resize(estSize); } + } + return rowAggregators; + } - @Override - public Aggregator[] cache(ImmutableBytesPtr cacheKey) { - ImmutableBytesPtr key = new ImmutableBytesPtr(cacheKey); - Aggregator[] rowAggregators = aggregateMap.get(key); - if (rowAggregators == null) { - // If Aggregators not found for this distinct - // value, clone our original one (we need one - // per distinct value) - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key " - + Bytes.toStringBinary(key.get(), key.getOffset(), - key.getLength()), customAnnotations)); - } - rowAggregators = - aggregators.newAggregators(env.getConfiguration()); - aggregateMap.put(key, rowAggregators); - - if (aggregateMap.size() > estDistVals) { // increase allocation - estDistVals *= 1.5f; - long estSize = sizeOfUnorderedGroupByMap(estDistVals, aggregators.getEstimatedByteSize()); - chunk.resize(estSize); - } - } - return rowAggregators; + @Override + public RegionScanner getScanner(final RegionScanner s) { + // Compute final allocation + long estSize = + sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize()); + chunk.resize(estSize); + + final List aggResults = new ArrayList(aggregateMap.size()); + + for (Map.Entry entry : aggregateMap.entrySet()) { + ImmutableBytesWritable aggregateGroupValPtr = entry.getKey(); + Aggregator[] rowAggregators = entry.getValue(); + // Generate byte array of Aggregators and set as value of row + byte[] aggregateArrayBytes = aggregators.toBytes(rowAggregators); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations("Adding new distinct group: " + + Bytes.toStringBinary(aggregateGroupValPtr.get(), aggregateGroupValPtr.getOffset(), + aggregateGroupValPtr.getLength()) + + " with aggregators " + Arrays.asList(rowAggregators) + " value = " + + Bytes.toStringBinary(aggregateArrayBytes), customAnnotations)); } + if (!isIncompatibleClient) { + ImmutableBytesWritable lastScannedRowKey = + aggregateValueToLastScannedRowKeys.get(aggregateGroupValPtr); + byte[] aggregateGroupValueBytes = new byte[aggregateGroupValPtr.getLength()]; + System.arraycopy(aggregateGroupValPtr.get(), aggregateGroupValPtr.getOffset(), + aggregateGroupValueBytes, 0, aggregateGroupValueBytes.length); + byte[] finalValue = + ByteUtil.concat(PInteger.INSTANCE.toBytes(aggregateGroupValueBytes.length), + aggregateGroupValueBytes, aggregateArrayBytes); + aggResults.add( + PhoenixKeyValueUtil.newKeyValue(lastScannedRowKey.get(), lastScannedRowKey.getOffset(), + lastScannedRowKey.getLength(), GROUPED_AGGREGATOR_VALUE_BYTES, + GROUPED_AGGREGATOR_VALUE_BYTES, AGG_TIMESTAMP, finalValue, 0, finalValue.length)); + } else { + aggResults.add(PhoenixKeyValueUtil.newKeyValue(aggregateGroupValPtr.get(), + aggregateGroupValPtr.getOffset(), aggregateGroupValPtr.getLength(), + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, aggregateArrayBytes, 0, + aggregateArrayBytes.length)); + } + } + // scanner using the non spillable, memory-only implementation + return new BaseRegionScanner(s) { + private int index = 0; - @Override - public RegionScanner getScanner(final RegionScanner s) { - // Compute final allocation - long estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize()); - chunk.resize(estSize); - - final List aggResults = new ArrayList(aggregateMap.size()); - - for (Map.Entry entry : aggregateMap.entrySet()) { - ImmutableBytesWritable aggregateGroupValPtr = entry.getKey(); - Aggregator[] rowAggregators = entry.getValue(); - // Generate byte array of Aggregators and set as value of row - byte[] aggregateArrayBytes = aggregators.toBytes(rowAggregators); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("Adding new distinct group: " - + Bytes.toStringBinary(aggregateGroupValPtr.get(), - aggregateGroupValPtr.getOffset(), aggregateGroupValPtr.getLength()) - + " with aggregators " + Arrays.asList(rowAggregators) + " value = " - + Bytes.toStringBinary(aggregateArrayBytes), customAnnotations)); - } - if (!isIncompatibleClient) { - ImmutableBytesWritable lastScannedRowKey = - aggregateValueToLastScannedRowKeys.get(aggregateGroupValPtr); - byte[] aggregateGroupValueBytes = new byte[aggregateGroupValPtr.getLength()]; - System.arraycopy(aggregateGroupValPtr.get(), aggregateGroupValPtr.getOffset(), - aggregateGroupValueBytes, 0, - aggregateGroupValueBytes.length); - byte[] finalValue = - ByteUtil.concat( - PInteger.INSTANCE.toBytes(aggregateGroupValueBytes.length), - aggregateGroupValueBytes, aggregateArrayBytes); - aggResults.add( - PhoenixKeyValueUtil.newKeyValue( - lastScannedRowKey.get(), - lastScannedRowKey.getOffset(), - lastScannedRowKey.getLength(), - GROUPED_AGGREGATOR_VALUE_BYTES, - GROUPED_AGGREGATOR_VALUE_BYTES, - AGG_TIMESTAMP, - finalValue, - 0, - finalValue.length)); - } else { - aggResults.add( - PhoenixKeyValueUtil.newKeyValue( - aggregateGroupValPtr.get(), - aggregateGroupValPtr.getOffset(), - aggregateGroupValPtr.getLength(), - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, - AGG_TIMESTAMP, - aggregateArrayBytes, - 0, - aggregateArrayBytes.length)); - } - } - // scanner using the non spillable, memory-only implementation - return new BaseRegionScanner(s) { - private int index = 0; - - public boolean next(List result, ScannerContext scannerContext) - throws IOException { - return next(result); - } - - @Override - public void close() throws IOException { - try { - s.close(); - } finally { - InMemoryGroupByCache.this.close(); - } - } - - @Override - public boolean next(List results) throws IOException { - if (index >= aggResults.size()) { - return false; - } - results.add(aggResults.get(index)); - index++; - return index < aggResults.size(); - } - }; + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); } @Override - public void cacheAggregateRowKey(ImmutableBytesPtr value, ImmutableBytesPtr rowKey) { - aggregateValueToLastScannedRowKeys.put(value, rowKey); + public void close() throws IOException { + try { + s.close(); + } finally { + InMemoryGroupByCache.this.close(); + } } @Override - public long size() { - return aggregateMap.size(); + public boolean next(List results) throws IOException { + if (index >= aggResults.size()) { + return false; + } + results.add(aggResults.get(index)); + index++; + return index < aggResults.size(); } + }; + } + @Override + public void cacheAggregateRowKey(ImmutableBytesPtr value, ImmutableBytesPtr rowKey) { + aggregateValueToLastScannedRowKeys.put(value, rowKey); } - private static final class GroupByCacheFactory { - public static final GroupByCacheFactory INSTANCE = new GroupByCacheFactory(); + @Override + public long size() { + return aggregateMap.size(); + } - private GroupByCacheFactory() { - } + } - GroupByCache newCache(RegionCoprocessorEnvironment env, - ImmutableBytesPtr tenantId, - byte[] customAnnotations, - ServerAggregators aggregators, - int estDistVals, - boolean isIncompatibleClient) { - Configuration conf = env.getConfiguration(); - boolean spillableEnabled = - conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE); - if (spillableEnabled) { - return new SpillableGroupByCache(env, tenantId, aggregators, estDistVals, - isIncompatibleClient); - } - return new InMemoryGroupByCache(env, tenantId, customAnnotations, aggregators, - estDistVals, isIncompatibleClient); - } - } + private static final class GroupByCacheFactory { + public static final GroupByCacheFactory INSTANCE = new GroupByCacheFactory(); - /** - * Used for an aggregate query in which the key order does not necessarily match the group by - * key order. In this case, we must collect all distinct groups within a region into a map, - * aggregating as we go. - */ - private static class UnorderedGroupByRegionScanner extends BaseRegionScanner { - private final Region region; - private final Pair minMaxQualifiers; - private final boolean useQualifierAsIndex; - private final PTable.QualifierEncodingScheme encodingScheme; - private final ServerAggregators aggregators; - private final long limit; - private final List expressions; - private final long pageSizeMs; - private RegionScanner regionScanner = null; - private final GroupByCache groupByCache; - private final Scan scan; - private final byte[] scanStartRowKey; - private final boolean includeStartRowKey; - private final byte[] actualScanStartRowKey; - private final boolean actualScanIncludeStartRowKey; - private boolean firstScan = true; - private boolean skipValidRowsSent = false; - private byte[] lastReturnedRowKey = null; + private GroupByCacheFactory() { + } - private UnorderedGroupByRegionScanner(final ObserverContext c, - final Scan scan, final RegionScanner scanner, final List expressions, - final ServerAggregators aggregators, final long limit, final long pageSizeMs) { - super(scanner); - this.region = c.getEnvironment().getRegion(); - this.scan = scan; - scanStartRowKey = - ServerUtil.getScanStartRowKeyFromScanOrRegionBoundaries(scan, region); - includeStartRowKey = scan.includeStartRow(); - // Retrieve start rowkey of the previous scan. This would be different than - // current scan start rowkey if the region has recently moved or split or merged. - this.actualScanStartRowKey = - scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW); - this.actualScanIncludeStartRowKey = true; - this.aggregators = aggregators; - this.limit = limit; - this.pageSizeMs = pageSizeMs; - this.expressions = expressions; - RegionCoprocessorEnvironment env = c.getEnvironment(); - Configuration conf = env.getConfiguration(); - int estDistVals = conf.getInt(GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB, DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES); - byte[] estDistValsBytes = scan.getAttribute(BaseScannerRegionObserverConstants.ESTIMATED_DISTINCT_VALUES); - if (estDistValsBytes != null) { - // Allocate 1.5x estimation - estDistVals = Math.max(MIN_DISTINCT_VALUES, - (int) (Bytes.toInt(estDistValsBytes) * 1.5f)); - } + GroupByCache newCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, + byte[] customAnnotations, ServerAggregators aggregators, int estDistVals, + boolean isIncompatibleClient) { + Configuration conf = env.getConfiguration(); + boolean spillableEnabled = + conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE); + if (spillableEnabled) { + return new SpillableGroupByCache(env, tenantId, aggregators, estDistVals, + isIncompatibleClient); + } + return new InMemoryGroupByCache(env, tenantId, customAnnotations, aggregators, estDistVals, + isIncompatibleClient); + } + } + + /** + * Used for an aggregate query in which the key order does not necessarily match the group by key + * order. In this case, we must collect all distinct groups within a region into a map, + * aggregating as we go. + */ + private static class UnorderedGroupByRegionScanner extends BaseRegionScanner { + private final Region region; + private final Pair minMaxQualifiers; + private final boolean useQualifierAsIndex; + private final PTable.QualifierEncodingScheme encodingScheme; + private final ServerAggregators aggregators; + private final long limit; + private final List expressions; + private final long pageSizeMs; + private RegionScanner regionScanner = null; + private final GroupByCache groupByCache; + private final Scan scan; + private final byte[] scanStartRowKey; + private final boolean includeStartRowKey; + private final byte[] actualScanStartRowKey; + private final boolean actualScanIncludeStartRowKey; + private boolean firstScan = true; + private boolean skipValidRowsSent = false; + private byte[] lastReturnedRowKey = null; + + private UnorderedGroupByRegionScanner(final ObserverContext c, + final Scan scan, final RegionScanner scanner, final List expressions, + final ServerAggregators aggregators, final long limit, final long pageSizeMs) { + super(scanner); + this.region = c.getEnvironment().getRegion(); + this.scan = scan; + scanStartRowKey = ServerUtil.getScanStartRowKeyFromScanOrRegionBoundaries(scan, region); + includeStartRowKey = scan.includeStartRow(); + // Retrieve start rowkey of the previous scan. This would be different than + // current scan start rowkey if the region has recently moved or split or merged. + this.actualScanStartRowKey = + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW); + this.actualScanIncludeStartRowKey = true; + this.aggregators = aggregators; + this.limit = limit; + this.pageSizeMs = pageSizeMs; + this.expressions = expressions; + RegionCoprocessorEnvironment env = c.getEnvironment(); + Configuration conf = env.getConfiguration(); + int estDistVals = conf.getInt(GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB, + DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES); + byte[] estDistValsBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.ESTIMATED_DISTINCT_VALUES); + if (estDistValsBytes != null) { + // Allocate 1.5x estimation + estDistVals = Math.max(MIN_DISTINCT_VALUES, (int) (Bytes.toInt(estDistValsBytes) * 1.5f)); + } + + minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); + useQualifierAsIndex = EncodedColumnsUtil + .useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan)); + final boolean spillableEnabled = + conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE); + encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); + final boolean isIncompatibleClient = + ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); + groupByCache = GroupByCacheFactory.INSTANCE.newCache(env, ScanUtil.getTenantId(scan), + ScanUtil.getCustomAnnotations(scan), aggregators, estDistVals, isIncompatibleClient); + if (LOGGER.isDebugEnabled()) { + LOGGER + .debug( + LogUtil.addCustomAnnotations( + "Grouped aggregation over unordered rows with scan " + scan + ", group by " + + expressions + ", aggregators " + aggregators, + ScanUtil.getCustomAnnotations(scan))); + LOGGER.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, + ScanUtil.getCustomAnnotations(scan))); + } + } - minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); - useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan)); - final boolean spillableEnabled = conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE); - encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); - final boolean isIncompatibleClient = - ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); - groupByCache = GroupByCacheFactory.INSTANCE.newCache( - env, ScanUtil.getTenantId(scan), ScanUtil.getCustomAnnotations(scan), - aggregators, estDistVals, isIncompatibleClient); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations( - "Grouped aggregation over unordered rows with scan " + scan - + ", group by " + expressions + ", aggregators " + aggregators, - ScanUtil.getCustomAnnotations(scan))); - LOGGER.debug(LogUtil.addCustomAnnotations( - "Spillable groupby enabled: " + spillableEnabled, - ScanUtil.getCustomAnnotations(scan))); - } - } + @Override + public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { + return next(results, scannerContext); + } - @Override - public boolean nextRaw(List results, ScannerContext scannerContext) - throws IOException { - return next(results, scannerContext); - } + @Override + public boolean next(List resultsToReturn) throws IOException { + return next(resultsToReturn, null); + } - @Override - public boolean next(List resultsToReturn) throws IOException { - return next(resultsToReturn, null); + @Override + public boolean next(List resultsToReturn, ScannerContext scannerContext) + throws IOException { + if (firstScan && actualScanStartRowKey != null) { + if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) { + if (hasRegionMoved()) { + LOGGER.info( + "Region has moved.. Actual scan start rowkey {} is not same " + + "as current scan start rowkey {}", + Bytes.toStringBinary(actualScanStartRowKey), Bytes.toStringBinary(scanStartRowKey)); + // If region has moved in the middle of the scan operation, after resetting + // the scanner, hbase client uses (latest received rowkey + \x00) as new + // start rowkey for resuming the scan operation on the new scanner. + if ( + Bytes.compareTo(ByteUtil.concat(actualScanStartRowKey, ByteUtil.ZERO_BYTE), + scanStartRowKey) == 0 + ) { + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, + actualScanStartRowKey); + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, + Bytes.toBytes(actualScanIncludeStartRowKey)); + } else { + // This happens when the server side scanner has already sent some + // rows back to the client and region has moved, so now we need to + // use skipValidRowsSent flag and also reset the scanner + // at paging region scanner level to re-read the previously sent + // values in order to re-compute the aggregation and then return + // only the next rowkey that was not yet sent back to the client. + skipValidRowsSent = true; + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, + actualScanStartRowKey); + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, + Bytes.toBytes(actualScanIncludeStartRowKey)); + } + } } - - @Override - public boolean next(List resultsToReturn, ScannerContext scannerContext) - throws IOException { - if (firstScan && actualScanStartRowKey != null) { - if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) { - if (hasRegionMoved()) { - LOGGER.info("Region has moved.. Actual scan start rowkey {} is not same " - + "as current scan start rowkey {}", - Bytes.toStringBinary(actualScanStartRowKey), - Bytes.toStringBinary(scanStartRowKey)); - // If region has moved in the middle of the scan operation, after resetting - // the scanner, hbase client uses (latest received rowkey + \x00) as new - // start rowkey for resuming the scan operation on the new scanner. - if (Bytes.compareTo( - ByteUtil.concat(actualScanStartRowKey, ByteUtil.ZERO_BYTE), - scanStartRowKey) == 0) { - scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, - actualScanStartRowKey); - scan.setAttribute( - QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, - Bytes.toBytes(actualScanIncludeStartRowKey)); - } else { - // This happens when the server side scanner has already sent some - // rows back to the client and region has moved, so now we need to - // use skipValidRowsSent flag and also reset the scanner - // at paging region scanner level to re-read the previously sent - // values in order to re-compute the aggregation and then return - // only the next rowkey that was not yet sent back to the client. - skipValidRowsSent = true; - scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, - actualScanStartRowKey); - scan.setAttribute( - QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, - Bytes.toBytes(actualScanIncludeStartRowKey)); - } - } - } + } + if (firstScan) { + firstScan = false; + } + boolean moreRows = nextInternal(resultsToReturn, scannerContext); + if (ScanUtil.isDummy(resultsToReturn)) { + return true; + } + if (skipValidRowsSent) { + while (true) { + if (!moreRows) { + skipValidRowsSent = false; + if (resultsToReturn.size() > 0) { + lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); } - if (firstScan) { - firstScan = false; + return moreRows; + } + Cell firstCell = resultsToReturn.get(0); + byte[] resultRowKey = new byte[firstCell.getRowLength()]; + System.arraycopy(firstCell.getRowArray(), firstCell.getRowOffset(), resultRowKey, 0, + resultRowKey.length); + // In case of regular scans, if the region moves and scanner is reset, + // hbase client checks the last returned row by the server, gets the + // rowkey and appends "\x00" byte, before resuming the scan. With this, + // scan includeStartRowKey is set to true. + // However, same is not the case with reverse scans. For the reverse scan, + // hbase client checks the last returned row by the server, gets the + // rowkey and treats it as startRowKey for resuming the scan. With this, + // scan includeStartRowKey is set to false. + // Hence, we need to cover both cases here. + if (Bytes.compareTo(resultRowKey, scanStartRowKey) == 0) { + // This can be true for reverse scan case. + skipValidRowsSent = false; + if (includeStartRowKey) { + if (resultsToReturn.size() > 0) { + lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); + } + return moreRows; } - boolean moreRows = nextInternal(resultsToReturn, scannerContext); + // If includeStartRowKey is false and the current rowkey is matching + // with scanStartRowKey, return the next row result. + resultsToReturn.clear(); + moreRows = nextInternal(resultsToReturn, scannerContext); if (ScanUtil.isDummy(resultsToReturn)) { - return true; - } - if (skipValidRowsSent) { - while (true) { - if (!moreRows) { - skipValidRowsSent = false; - if (resultsToReturn.size() > 0) { - lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); - } - return moreRows; - } - Cell firstCell = resultsToReturn.get(0); - byte[] resultRowKey = new byte[firstCell.getRowLength()]; - System.arraycopy(firstCell.getRowArray(), firstCell.getRowOffset(), - resultRowKey, 0, resultRowKey.length); - // In case of regular scans, if the region moves and scanner is reset, - // hbase client checks the last returned row by the server, gets the - // rowkey and appends "\x00" byte, before resuming the scan. With this, - // scan includeStartRowKey is set to true. - // However, same is not the case with reverse scans. For the reverse scan, - // hbase client checks the last returned row by the server, gets the - // rowkey and treats it as startRowKey for resuming the scan. With this, - // scan includeStartRowKey is set to false. - // Hence, we need to cover both cases here. - if (Bytes.compareTo(resultRowKey, scanStartRowKey) == 0) { - // This can be true for reverse scan case. - skipValidRowsSent = false; - if (includeStartRowKey) { - if (resultsToReturn.size() > 0) { - lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); - } - return moreRows; - } - // If includeStartRowKey is false and the current rowkey is matching - // with scanStartRowKey, return the next row result. - resultsToReturn.clear(); - moreRows = nextInternal(resultsToReturn, scannerContext); - if (ScanUtil.isDummy(resultsToReturn)) { - return true; - } - if (resultsToReturn.size() > 0) { - lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); - } - return moreRows; - } else if ( - Bytes.compareTo( - ByteUtil.concat(resultRowKey, ByteUtil.ZERO_BYTE), - scanStartRowKey) == 0) { - // This can be true for regular scan case. - skipValidRowsSent = false; - if (includeStartRowKey) { - // If includeStartRowKey is true and the (current rowkey + "\0xx") is - // matching with scanStartRowKey, return the next row result. - resultsToReturn.clear(); - moreRows = nextInternal(resultsToReturn, scannerContext); - if (ScanUtil.isDummy(resultsToReturn)) { - return true; - } - if (resultsToReturn.size() > 0) { - lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); - } - return moreRows; - } - } - // In the loop, keep iterating through rows. - resultsToReturn.clear(); - moreRows = nextInternal(resultsToReturn, scannerContext); - if (ScanUtil.isDummy(resultsToReturn)) { - return true; - } - } + return true; } if (resultsToReturn.size() > 0) { - lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); + lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); } return moreRows; - } - - /** - * Perform the next operation to grab the next row's worth of values. - * - * @param resultsToReturn output list of cells that are read as part of this operation. - * @return true if more rows exist after this one, false if scanner is done. - * @throws IOException if something goes wrong. - */ - private boolean nextInternal(List resultsToReturn, ScannerContext scannerContext) - throws IOException { - boolean hasMore; - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - long now; - Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); - boolean acquiredLock = false; - try { - region.startRegionOperation(); - acquiredLock = true; - synchronized (delegate) { - if (regionScanner != null) { - return regionScanner.next(resultsToReturn, scannerContext); - } - do { - List results = useQualifierAsIndex ? - new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), - minMaxQualifiers.getSecond(), encodingScheme) : - new ArrayList(); - // Results are potentially returned even when the return - // value of s.next is false - // since this is an indication of whether or not there are - // more values after the - // ones returned - hasMore = (scannerContext == null) - ? delegate.nextRaw(results) - : delegate.nextRaw(results, scannerContext); - if (!results.isEmpty()) { - if (isDummy(results)) { - return getDummyResult(resultsToReturn); - } - result.setKeyValues(results); - ImmutableBytesPtr key = - TupleUtil.getConcatenatedValue(result, expressions); - ImmutableBytesPtr originalRowKey = new ImmutableBytesPtr(); - result.getKey(originalRowKey); - Aggregator[] rowAggregators = groupByCache.cache(key); - groupByCache.cacheAggregateRowKey(key, originalRowKey); - // Aggregate values here - aggregators.aggregate(rowAggregators, result); - } - now = EnvironmentEdgeManager.currentTimeMillis(); - if (hasMore && groupByCache.size() < limit - && (now - startTime) >= pageSizeMs) { - return getDummyResult(resultsToReturn); - } - } while (hasMore && groupByCache.size() < limit); - regionScanner = groupByCache.getScanner(delegate); - // Do not sort here, but sort back on the client instead - // The reason is that if the scan ever extends beyond a region - // (which can happen if we're basing our parallelization split - // points on old metadata), we'll get incorrect query results. - return regionScanner.next(resultsToReturn); - } - } catch (Exception e) { - LOGGER.error("Unordered group-by scanner next encountered error for region {}", - region.getRegionInfo().getRegionNameAsString(), e); - if (e instanceof IOException) { - throw e; - } else { - throw new IOException(e); - } - } finally { - if (acquiredLock) region.closeRegionOperation(); + } else if ( + Bytes.compareTo(ByteUtil.concat(resultRowKey, ByteUtil.ZERO_BYTE), scanStartRowKey) == 0 + ) { + // This can be true for regular scan case. + skipValidRowsSent = false; + if (includeStartRowKey) { + // If includeStartRowKey is true and the (current rowkey + "\0xx") is + // matching with scanStartRowKey, return the next row result. + resultsToReturn.clear(); + moreRows = nextInternal(resultsToReturn, scannerContext); + if (ScanUtil.isDummy(resultsToReturn)) { + return true; + } + if (resultsToReturn.size() > 0) { + lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); + } + return moreRows; } + } + // In the loop, keep iterating through rows. + resultsToReturn.clear(); + moreRows = nextInternal(resultsToReturn, scannerContext); + if (ScanUtil.isDummy(resultsToReturn)) { + return true; + } } + } + if (resultsToReturn.size() > 0) { + lastReturnedRowKey = CellUtil.cloneRow(resultsToReturn.get(0)); + } + return moreRows; + } - /** - * Retrieve dummy rowkey and return to the client. - * - * @param resultsToReturn dummy cell. - * @return always true, because some rows are likely to exist as we are returning - * dummy result to the client. - */ - private boolean getDummyResult(List resultsToReturn) { - if (lastReturnedRowKey != null) { - ScanUtil.getDummyResult(lastReturnedRowKey, resultsToReturn); - return true; + /** + * Perform the next operation to grab the next row's worth of values. + * @param resultsToReturn output list of cells that are read as part of this operation. + * @return true if more rows exist after this one, false if scanner is done. + * @throws IOException if something goes wrong. + */ + private boolean nextInternal(List resultsToReturn, ScannerContext scannerContext) + throws IOException { + boolean hasMore; + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long now; + Tuple result = + useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); + boolean acquiredLock = false; + try { + region.startRegionOperation(); + acquiredLock = true; + synchronized (delegate) { + if (regionScanner != null) { + return regionScanner.next(resultsToReturn, scannerContext); + } + do { + List results = useQualifierAsIndex + ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), + minMaxQualifiers.getSecond(), encodingScheme) + : new ArrayList(); + // Results are potentially returned even when the return + // value of s.next is false + // since this is an indication of whether or not there are + // more values after the + // ones returned + hasMore = (scannerContext == null) + ? delegate.nextRaw(results) + : delegate.nextRaw(results, scannerContext); + if (!results.isEmpty()) { + if (isDummy(results)) { + return getDummyResult(resultsToReturn); + } + result.setKeyValues(results); + ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(result, expressions); + ImmutableBytesPtr originalRowKey = new ImmutableBytesPtr(); + result.getKey(originalRowKey); + Aggregator[] rowAggregators = groupByCache.cache(key); + groupByCache.cacheAggregateRowKey(key, originalRowKey); + // Aggregate values here + aggregators.aggregate(rowAggregators, result); } - if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) { - if (hasRegionMoved()) { - byte[] lastByte = - new byte[]{scanStartRowKey[scanStartRowKey.length - 1]}; - if (scanStartRowKey.length > 1 && Bytes.compareTo(lastByte, - ByteUtil.ZERO_BYTE) == 0) { - byte[] prevKey = new byte[scanStartRowKey.length - 1]; - System.arraycopy(scanStartRowKey, 0, prevKey, 0, - prevKey.length); - ScanUtil.getDummyResult(prevKey, resultsToReturn); - } else { - ScanUtil.getDummyResult(scanStartRowKey, - resultsToReturn); - } - } else { - ScanUtil.getDummyResult(scanStartRowKey, resultsToReturn); - } - } else { - ScanUtil.getDummyResult(scanStartRowKey, resultsToReturn); + now = EnvironmentEdgeManager.currentTimeMillis(); + if (hasMore && groupByCache.size() < limit && (now - startTime) >= pageSizeMs) { + return getDummyResult(resultsToReturn); } - return true; + } while (hasMore && groupByCache.size() < limit); + regionScanner = groupByCache.getScanner(delegate); + // Do not sort here, but sort back on the client instead + // The reason is that if the scan ever extends beyond a region + // (which can happen if we're basing our parallelization split + // points on old metadata), we'll get incorrect query results. + return regionScanner.next(resultsToReturn); } - - /** - * Return true if the region has moved in the middle of an ongoing scan operation, - * resulting in scanner reset. Based on the return value of this function, we need to - * either scan the region as if we are scanning for the first time or we need to scan - * the region considering that we have already returned some rows back to client and - * we need to resume from the last row that we returned to the client. - * - * @return true if the region has moved in the middle of an ongoing scan operation. - */ - private boolean hasRegionMoved() { - return Bytes.compareTo(actualScanStartRowKey, scanStartRowKey) != 0 - || actualScanIncludeStartRowKey != includeStartRowKey; + } catch (Exception e) { + LOGGER.error("Unordered group-by scanner next encountered error for region {}", + region.getRegionInfo().getRegionNameAsString(), e); + if (e instanceof IOException) { + throw e; + } else { + throw new IOException(e); } + } finally { + if (acquiredLock) region.closeRegionOperation(); + } + } - @Override - public void close() throws IOException { - if (regionScanner != null) { - regionScanner.close(); - } else { - Closeables.closeQuietly(groupByCache); - } + /** + * Retrieve dummy rowkey and return to the client. + * @param resultsToReturn dummy cell. + * @return always true, because some rows are likely to exist as we are returning dummy result + * to the client. + */ + private boolean getDummyResult(List resultsToReturn) { + if (lastReturnedRowKey != null) { + ScanUtil.getDummyResult(lastReturnedRowKey, resultsToReturn); + return true; + } + if (scanStartRowKey.length > 0 && !ScanUtil.isLocalIndex(scan)) { + if (hasRegionMoved()) { + byte[] lastByte = new byte[] { scanStartRowKey[scanStartRowKey.length - 1] }; + if (scanStartRowKey.length > 1 && Bytes.compareTo(lastByte, ByteUtil.ZERO_BYTE) == 0) { + byte[] prevKey = new byte[scanStartRowKey.length - 1]; + System.arraycopy(scanStartRowKey, 0, prevKey, 0, prevKey.length); + ScanUtil.getDummyResult(prevKey, resultsToReturn); + } else { + ScanUtil.getDummyResult(scanStartRowKey, resultsToReturn); + } + } else { + ScanUtil.getDummyResult(scanStartRowKey, resultsToReturn); } + } else { + ScanUtil.getDummyResult(scanStartRowKey, resultsToReturn); + } + return true; } /** - * Used for an aggregate query in which the key order match the group by key order. In this - * case, we can do the aggregation as we scan, by detecting when the group by key changes. + * Return true if the region has moved in the middle of an ongoing scan operation, resulting in + * scanner reset. Based on the return value of this function, we need to either scan the region + * as if we are scanning for the first time or we need to scan the region considering that we + * have already returned some rows back to client and we need to resume from the last row that + * we returned to the client. + * @return true if the region has moved in the middle of an ongoing scan operation. */ - private static class OrderedGroupByRegionScanner extends BaseRegionScanner { - private final Scan scan; - private final Region region; - private final Pair minMaxQualifiers; - private final boolean useQualifierAsIndex; - private final PTable.QualifierEncodingScheme encodingScheme; - private final ServerAggregators aggregators; - private final long limit; - private final List expressions; - private final long pageSizeMs; - private long rowCount = 0; - private ImmutableBytesPtr currentKey = null; - private final ImmutableBytesPtr currentKeyRowKey = new ImmutableBytesPtr(); - private final boolean isIncompatibleClient; - private final byte[] initStartRowKey; - private final boolean includeInitStartRowKey; - private byte[] previousResultRowKey; + private boolean hasRegionMoved() { + return Bytes.compareTo(actualScanStartRowKey, scanStartRowKey) != 0 + || actualScanIncludeStartRowKey != includeStartRowKey; + } - private OrderedGroupByRegionScanner(final ObserverContext c, - final Scan scan, final RegionScanner scanner, final List expressions, - final ServerAggregators aggregators, final long limit, final long pageSizeMs) { - super(scanner); - this.scan = scan; - isIncompatibleClient = ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); - this.aggregators = aggregators; - this.limit = limit; - this.pageSizeMs = pageSizeMs; - this.expressions = expressions; - region = c.getEnvironment().getRegion(); - minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); - useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers); - encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); - initStartRowKey = ServerUtil.getScanStartRowKeyFromScanOrRegionBoundaries(scan, - region); - includeInitStartRowKey = scan.includeStartRow(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations( - "Grouped aggregation over ordered rows with scan " + scan + ", group by " - + expressions + ", aggregators " + aggregators, - ScanUtil.getCustomAnnotations(scan))); - } - } + @Override + public void close() throws IOException { + if (regionScanner != null) { + regionScanner.close(); + } else { + Closeables.closeQuietly(groupByCache); + } + } + } + + /** + * Used for an aggregate query in which the key order match the group by key order. In this case, + * we can do the aggregation as we scan, by detecting when the group by key changes. + */ + private static class OrderedGroupByRegionScanner extends BaseRegionScanner { + private final Scan scan; + private final Region region; + private final Pair minMaxQualifiers; + private final boolean useQualifierAsIndex; + private final PTable.QualifierEncodingScheme encodingScheme; + private final ServerAggregators aggregators; + private final long limit; + private final List expressions; + private final long pageSizeMs; + private long rowCount = 0; + private ImmutableBytesPtr currentKey = null; + private final ImmutableBytesPtr currentKeyRowKey = new ImmutableBytesPtr(); + private final boolean isIncompatibleClient; + private final byte[] initStartRowKey; + private final boolean includeInitStartRowKey; + private byte[] previousResultRowKey; + + private OrderedGroupByRegionScanner(final ObserverContext c, + final Scan scan, final RegionScanner scanner, final List expressions, + final ServerAggregators aggregators, final long limit, final long pageSizeMs) { + super(scanner); + this.scan = scan; + isIncompatibleClient = ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); + this.aggregators = aggregators; + this.limit = limit; + this.pageSizeMs = pageSizeMs; + this.expressions = expressions; + region = c.getEnvironment().getRegion(); + minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); + useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers); + encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); + initStartRowKey = ServerUtil.getScanStartRowKeyFromScanOrRegionBoundaries(scan, region); + includeInitStartRowKey = scan.includeStartRow(); + if (LOGGER.isDebugEnabled()) { + LOGGER + .debug( + LogUtil.addCustomAnnotations( + "Grouped aggregation over ordered rows with scan " + scan + ", group by " + + expressions + ", aggregators " + aggregators, + ScanUtil.getCustomAnnotations(scan))); + } + } - @Override - public boolean nextRaw(List results, ScannerContext scannerContext) - throws IOException { - return next(results, scannerContext); - } + @Override + public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { + return next(results, scannerContext); + } - @Override - public boolean next(List results) throws IOException { - return next(results, null); - } - @Override - public boolean next(List results, ScannerContext scannerContext) throws IOException { - boolean hasMore; - boolean atLimit; - boolean aggBoundary = false; - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - long now; - Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); - ImmutableBytesPtr key = null; - Aggregator[] rowAggregators = aggregators.getAggregators(); - // If we're calculating no aggregate functions, we can exit at the - // start of a new row. Otherwise, we have to wait until an agg - int countOffset = rowAggregators.length == 0 ? 1 : 0; - boolean acquiredLock = false; - try { - region.startRegionOperation(); - acquiredLock = true; - synchronized (delegate) { - do { - List kvs = useQualifierAsIndex ? - new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), - minMaxQualifiers.getSecond(), encodingScheme) : - new ArrayList(); - // Results are potentially returned even when the return - // value of s.next is false - // since this is an indication of whether or not there - // are more values after the - // ones returned - hasMore = (scannerContext == null) - ? delegate.nextRaw(kvs) - : delegate.nextRaw(kvs, scannerContext); - if (!kvs.isEmpty()) { - if (isDummy(kvs)) { - updateDummyWithPrevRowKey(results, initStartRowKey, - includeInitStartRowKey, scan); - return true; - } - result.setKeyValues(kvs); - key = TupleUtil.getConcatenatedValue(result, expressions); - aggBoundary = currentKey != null && currentKey.compareTo(key) != 0; - if (!aggBoundary) { - aggregators.aggregate(rowAggregators, result); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations( - "Row passed filters: " + kvs - + ", aggregated values: " - + Arrays.asList(rowAggregators), - ScanUtil.getCustomAnnotations(scan))); - } - currentKey = key; - if (result.size() > 0) { - result.getKey(currentKeyRowKey); - } - } - } - atLimit = rowCount + countOffset >= limit; - // Do rowCount + 1 b/c we don't have to wait for a complete - // row in the case of a DISTINCT with a LIMIT - now = EnvironmentEdgeManager.currentTimeMillis(); - } while (hasMore && !aggBoundary && !atLimit && (now - startTime) < pageSizeMs); + @Override + public boolean next(List results) throws IOException { + return next(results, null); + } + + @Override + public boolean next(List results, ScannerContext scannerContext) throws IOException { + boolean hasMore; + boolean atLimit; + boolean aggBoundary = false; + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long now; + Tuple result = + useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); + ImmutableBytesPtr key = null; + Aggregator[] rowAggregators = aggregators.getAggregators(); + // If we're calculating no aggregate functions, we can exit at the + // start of a new row. Otherwise, we have to wait until an agg + int countOffset = rowAggregators.length == 0 ? 1 : 0; + boolean acquiredLock = false; + try { + region.startRegionOperation(); + acquiredLock = true; + synchronized (delegate) { + do { + List kvs = useQualifierAsIndex + ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), + minMaxQualifiers.getSecond(), encodingScheme) + : new ArrayList(); + // Results are potentially returned even when the return + // value of s.next is false + // since this is an indication of whether or not there + // are more values after the + // ones returned + hasMore = (scannerContext == null) + ? delegate.nextRaw(kvs) + : delegate.nextRaw(kvs, scannerContext); + if (!kvs.isEmpty()) { + if (isDummy(kvs)) { + updateDummyWithPrevRowKey(results, initStartRowKey, includeInitStartRowKey, scan); + return true; + } + result.setKeyValues(kvs); + key = TupleUtil.getConcatenatedValue(result, expressions); + aggBoundary = currentKey != null && currentKey.compareTo(key) != 0; + if (!aggBoundary) { + aggregators.aggregate(rowAggregators, result); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations("Row passed filters: " + kvs + + ", aggregated values: " + Arrays.asList(rowAggregators), + ScanUtil.getCustomAnnotations(scan))); } - } catch (Exception e) { - LOGGER.error("Ordered group-by scanner next encountered error for region {}", - region.getRegionInfo().getRegionNameAsString(), e); - if (e instanceof IOException) { - throw e; - } else { - throw new IOException(e); + currentKey = key; + if (result.size() > 0) { + result.getKey(currentKeyRowKey); } - } finally { - if (acquiredLock) region.closeRegionOperation(); + } } - try { - if (hasMore && !aggBoundary && !atLimit && (now - startTime) >= pageSizeMs) { - updateDummyWithPrevRowKey(results, initStartRowKey, - includeInitStartRowKey, scan); - return true; - } - if (currentKey != null) { - if (!isIncompatibleClient) { - byte[] aggregateArrayBytes = aggregators.toBytes(rowAggregators); - byte[] aggregateGroupValueBytes = new byte[currentKey.getLength()]; - System.arraycopy(currentKey.get(), currentKey.getOffset(), - aggregateGroupValueBytes, 0, - aggregateGroupValueBytes.length); - byte[] finalValue = - ByteUtil.concat( - PInteger.INSTANCE.toBytes(aggregateGroupValueBytes.length), - aggregateGroupValueBytes, aggregateArrayBytes); - Cell keyValue = - PhoenixKeyValueUtil.newKeyValue( - currentKeyRowKey.get(), - currentKeyRowKey.getOffset(), - currentKeyRowKey.getLength(), - GROUPED_AGGREGATOR_VALUE_BYTES, - GROUPED_AGGREGATOR_VALUE_BYTES, - AGG_TIMESTAMP, - finalValue, - 0, - finalValue.length); - results.add(keyValue); - } else { - byte[] value = aggregators.toBytes(rowAggregators); - Cell keyValue = - PhoenixKeyValueUtil.newKeyValue( - currentKey.get(), - currentKey.getOffset(), - currentKey.getLength(), - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, - AGG_TIMESTAMP, - value, - 0, - value.length); - results.add(keyValue); - } - // If we're at an aggregation boundary, reset the - // aggregators and - // aggregate with the current result (which is not a part of - // the returned result). - if (aggBoundary) { - aggregators.reset(rowAggregators); - aggregators.aggregate(rowAggregators, result); - currentKey = key; - if (result.size() > 0) { - result.getKey(currentKeyRowKey); - } - rowCount++; - atLimit |= rowCount >= limit; - } - } - // Continue if there are more - if (!atLimit && (hasMore || aggBoundary)) { - if (!results.isEmpty()) { - previousResultRowKey = CellUtil.cloneRow(results.get(results.size() - 1)); - } - return true; - } - currentKey = null; - return false; - } catch (Exception e) { - LOGGER.error("Ordered group-by scanner next encountered some issue for" - + " region {}", region.getRegionInfo().getRegionNameAsString(), e); - if (e instanceof IOException) { - throw e; - } else { - throw new IOException(e); - } + atLimit = rowCount + countOffset >= limit; + // Do rowCount + 1 b/c we don't have to wait for a complete + // row in the case of a DISTINCT with a LIMIT + now = EnvironmentEdgeManager.currentTimeMillis(); + } while (hasMore && !aggBoundary && !atLimit && (now - startTime) < pageSizeMs); + } + } catch (Exception e) { + LOGGER.error("Ordered group-by scanner next encountered error for region {}", + region.getRegionInfo().getRegionNameAsString(), e); + if (e instanceof IOException) { + throw e; + } else { + throw new IOException(e); + } + } finally { + if (acquiredLock) region.closeRegionOperation(); + } + try { + if (hasMore && !aggBoundary && !atLimit && (now - startTime) >= pageSizeMs) { + updateDummyWithPrevRowKey(results, initStartRowKey, includeInitStartRowKey, scan); + return true; + } + if (currentKey != null) { + if (!isIncompatibleClient) { + byte[] aggregateArrayBytes = aggregators.toBytes(rowAggregators); + byte[] aggregateGroupValueBytes = new byte[currentKey.getLength()]; + System.arraycopy(currentKey.get(), currentKey.getOffset(), aggregateGroupValueBytes, 0, + aggregateGroupValueBytes.length); + byte[] finalValue = + ByteUtil.concat(PInteger.INSTANCE.toBytes(aggregateGroupValueBytes.length), + aggregateGroupValueBytes, aggregateArrayBytes); + Cell keyValue = + PhoenixKeyValueUtil.newKeyValue(currentKeyRowKey.get(), currentKeyRowKey.getOffset(), + currentKeyRowKey.getLength(), GROUPED_AGGREGATOR_VALUE_BYTES, + GROUPED_AGGREGATOR_VALUE_BYTES, AGG_TIMESTAMP, finalValue, 0, finalValue.length); + results.add(keyValue); + } else { + byte[] value = aggregators.toBytes(rowAggregators); + Cell keyValue = PhoenixKeyValueUtil.newKeyValue(currentKey.get(), + currentKey.getOffset(), currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, + AGG_TIMESTAMP, value, 0, value.length); + results.add(keyValue); + } + // If we're at an aggregation boundary, reset the + // aggregators and + // aggregate with the current result (which is not a part of + // the returned result). + if (aggBoundary) { + aggregators.reset(rowAggregators); + aggregators.aggregate(rowAggregators, result); + currentKey = key; + if (result.size() > 0) { + result.getKey(currentKeyRowKey); } + rowCount++; + atLimit |= rowCount >= limit; + } + } + // Continue if there are more + if (!atLimit && (hasMore || aggBoundary)) { + if (!results.isEmpty()) { + previousResultRowKey = CellUtil.cloneRow(results.get(results.size() - 1)); + } + return true; } + currentKey = null; + return false; + } catch (Exception e) { + LOGGER.error("Ordered group-by scanner next encountered some issue for" + " region {}", + region.getRegionInfo().getRegionNameAsString(), e); + if (e instanceof IOException) { + throw e; + } else { + throw new IOException(e); + } + } + } - /** - * Add dummy cell to the result list based on either the previous rowkey returned to the - * client or the start rowkey and start rowkey include params. - * - * @param result result to add the dummy cell to. - * @param initStartRowKey scan start rowkey. - * @param includeInitStartRowKey scan start rowkey included. - * @param scan scan object. - */ - private void updateDummyWithPrevRowKey(List result, byte[] initStartRowKey, - boolean includeInitStartRowKey, Scan scan) { - result.clear(); - if (previousResultRowKey != null) { - getDummyResult(previousResultRowKey, result); + /** + * Add dummy cell to the result list based on either the previous rowkey returned to the client + * or the start rowkey and start rowkey include params. + * @param result result to add the dummy cell to. + * @param initStartRowKey scan start rowkey. + * @param includeInitStartRowKey scan start rowkey included. + * @param scan scan object. + */ + private void updateDummyWithPrevRowKey(List result, byte[] initStartRowKey, + boolean includeInitStartRowKey, Scan scan) { + result.clear(); + if (previousResultRowKey != null) { + getDummyResult(previousResultRowKey, result); + } else { + if (includeInitStartRowKey && initStartRowKey.length > 0) { + byte[] prevKey; + // In order to generate largest possible rowkey that is less than + // initStartRowKey, we need to check size of the region name that can be + // used by hbase client for meta lookup, in case meta cache is expired at + // client. + // Once we know regionLookupInMetaLen, use it to generate largest possible + // rowkey that is lower than initStartRowKey by using + // ByteUtil#previousKeyWithLength function, which appends "\\xFF" bytes to + // prev rowkey upto the length provided. e.g. for the given key + // "\\x01\\xC1\\x06", the previous key with length 5 would be + // "\\x01\\xC1\\x05\\xFF\\xFF" by padding 2 bytes "\\xFF". + // The length of the largest scan start rowkey should not exceed + // HConstants#MAX_ROW_LENGTH. + int regionLookupInMetaLen = + RegionInfo.createRegionName(region.getTableDescriptor().getTableName(), new byte[1], + HConstants.NINES, false).length; + if ( + Bytes.compareTo(initStartRowKey, initStartRowKey.length - 1, 1, ByteUtil.ZERO_BYTE, 0, + 1) == 0 + ) { + // If initStartRowKey has last byte as "\\x00", we can discard the last + // byte and send the key as dummy rowkey. + prevKey = new byte[initStartRowKey.length - 1]; + System.arraycopy(initStartRowKey, 0, prevKey, 0, prevKey.length); + } else + if (initStartRowKey.length < (HConstants.MAX_ROW_LENGTH - 1 - regionLookupInMetaLen)) { + prevKey = ByteUtil.previousKeyWithLength( + ByteUtil.concat(initStartRowKey, + new byte[HConstants.MAX_ROW_LENGTH - initStartRowKey.length - 1 + - regionLookupInMetaLen]), + HConstants.MAX_ROW_LENGTH - 1 - regionLookupInMetaLen); } else { - if (includeInitStartRowKey && initStartRowKey.length > 0) { - byte[] prevKey; - // In order to generate largest possible rowkey that is less than - // initStartRowKey, we need to check size of the region name that can be - // used by hbase client for meta lookup, in case meta cache is expired at - // client. - // Once we know regionLookupInMetaLen, use it to generate largest possible - // rowkey that is lower than initStartRowKey by using - // ByteUtil#previousKeyWithLength function, which appends "\\xFF" bytes to - // prev rowkey upto the length provided. e.g. for the given key - // "\\x01\\xC1\\x06", the previous key with length 5 would be - // "\\x01\\xC1\\x05\\xFF\\xFF" by padding 2 bytes "\\xFF". - // The length of the largest scan start rowkey should not exceed - // HConstants#MAX_ROW_LENGTH. - int regionLookupInMetaLen = - RegionInfo.createRegionName(region.getTableDescriptor().getTableName(), - new byte[1], HConstants.NINES, false).length; - if (Bytes.compareTo(initStartRowKey, initStartRowKey.length - 1, - 1, ByteUtil.ZERO_BYTE, 0, 1) == 0) { - // If initStartRowKey has last byte as "\\x00", we can discard the last - // byte and send the key as dummy rowkey. - prevKey = new byte[initStartRowKey.length - 1]; - System.arraycopy(initStartRowKey, 0, prevKey, 0, prevKey.length); - } else if (initStartRowKey.length < (HConstants.MAX_ROW_LENGTH - 1 - - regionLookupInMetaLen)) { - prevKey = ByteUtil.previousKeyWithLength(ByteUtil.concat(initStartRowKey, - new byte[HConstants.MAX_ROW_LENGTH - initStartRowKey.length - - 1 - regionLookupInMetaLen]), - HConstants.MAX_ROW_LENGTH - 1 - regionLookupInMetaLen); - } else { - prevKey = initStartRowKey; - } - getDummyResult(prevKey, result); - } else { - getDummyResult(initStartRowKey, result); - } + prevKey = initStartRowKey; } + getDummyResult(prevKey, result); + } else { + getDummyResult(initStartRowKey, result); } - + } } - @Override - protected boolean isRegionObserverFor(Scan scan) { - return scan.getAttribute(BaseScannerRegionObserverConstants.UNORDERED_GROUP_BY_EXPRESSIONS) != null || - scan.getAttribute(BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS) != null; - } + } + + @Override + protected boolean isRegionObserverFor(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.UNORDERED_GROUP_BY_EXPRESSIONS) + != null + || scan.getAttribute(BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS) + != null; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java index 6a2b3459da6..590e953b4db 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,10 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.util.ScanUtil.getDummyResult; +import static org.apache.phoenix.util.ScanUtil.getPageSizeMsForRegionScanner; +import static org.apache.phoenix.util.ScanUtil.isDummy; + import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; @@ -43,8 +47,8 @@ import org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.KeyValueColumnExpression; -import org.apache.phoenix.iterate.RegionScannerFactory; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.iterate.RegionScannerFactory; import org.apache.phoenix.join.HashJoinInfo; import org.apache.phoenix.parse.JoinTableNode.JoinType; import org.apache.phoenix.schema.IllegalDataException; @@ -59,366 +63,355 @@ import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.TupleUtil; -import static org.apache.phoenix.util.ScanUtil.getDummyResult; -import static org.apache.phoenix.util.ScanUtil.getPageSizeMsForRegionScanner; -import static org.apache.phoenix.util.ScanUtil.isDummy; - public class HashJoinRegionScanner implements RegionScanner { - private final RegionScanner scanner; - private final TupleProjector projector; - private final HashJoinInfo joinInfo; - private final RegionCoprocessorEnvironment env; - private Queue resultQueue; - private boolean hasMore; - private long count; - private long limit; - private HashCache[] hashCaches; - private List[] tempTuples; - private ValueBitSet tempDestBitSet; - private ValueBitSet[] tempSrcBitSet; - private final boolean useQualifierAsListIndex; - private final boolean useNewValueColumnQualifier; - private final boolean addArrayCell; - private final long pageSizeMs; - - @SuppressWarnings("unchecked") - public HashJoinRegionScanner(RegionScanner scanner, Scan scan, TupleProjector projector, - HashJoinInfo joinInfo, ImmutableBytesPtr tenantId, - RegionCoprocessorEnvironment env, boolean useQualifierAsIndex, - boolean useNewValueColumnQualifier) - throws IOException { - - this(env, scanner, scan, null, null, projector, joinInfo, - tenantId, useQualifierAsIndex, useNewValueColumnQualifier); + private final RegionScanner scanner; + private final TupleProjector projector; + private final HashJoinInfo joinInfo; + private final RegionCoprocessorEnvironment env; + private Queue resultQueue; + private boolean hasMore; + private long count; + private long limit; + private HashCache[] hashCaches; + private List[] tempTuples; + private ValueBitSet tempDestBitSet; + private ValueBitSet[] tempSrcBitSet; + private final boolean useQualifierAsListIndex; + private final boolean useNewValueColumnQualifier; + private final boolean addArrayCell; + private final long pageSizeMs; + + @SuppressWarnings("unchecked") + public HashJoinRegionScanner(RegionScanner scanner, Scan scan, TupleProjector projector, + HashJoinInfo joinInfo, ImmutableBytesPtr tenantId, RegionCoprocessorEnvironment env, + boolean useQualifierAsIndex, boolean useNewValueColumnQualifier) throws IOException { + + this(env, scanner, scan, null, null, projector, joinInfo, tenantId, useQualifierAsIndex, + useNewValueColumnQualifier); + } + + @SuppressWarnings("unchecked") + public HashJoinRegionScanner(RegionCoprocessorEnvironment env, RegionScanner scanner, Scan scan, + final Set arrayKVRefs, final Expression[] arrayFuncRefs, + TupleProjector projector, HashJoinInfo joinInfo, ImmutableBytesPtr tenantId, + boolean useQualifierAsIndex, boolean useNewValueColumnQualifier) throws IOException { + + this.env = env; + this.scanner = scanner; + this.projector = projector; + this.joinInfo = joinInfo; + this.resultQueue = new LinkedList(); + this.hasMore = true; + this.count = 0; + this.limit = Long.MAX_VALUE; + for (JoinType type : joinInfo.getJoinTypes()) { + if ( + type != JoinType.Inner && type != JoinType.Left && type != JoinType.Semi + && type != JoinType.Anti + ) throw new DoNotRetryIOException( + "Got join type '" + type + "'. Expect only INNER or LEFT with hash-joins."); + } + if (joinInfo.getLimit() != null) { + this.limit = joinInfo.getLimit(); + } + int count = joinInfo.getJoinIds().length; + this.tempTuples = new List[count]; + this.hashCaches = new HashCache[count]; + this.tempSrcBitSet = new ValueBitSet[count]; + TenantCache cache = GlobalCache.getTenantCache(env, tenantId); + for (int i = 0; i < count; i++) { + ImmutableBytesPtr joinId = joinInfo.getJoinIds()[i]; + if (joinId.getLength() == 0) { // semi-join optimized into skip-scan + hashCaches[i] = null; + tempSrcBitSet[i] = null; + tempTuples[i] = null; + continue; + } + HashCache hashCache = (HashCache) cache.getServerCache(joinId); + if (hashCache == null) { + Exception cause = new HashJoinCacheNotFoundException(Bytes.toLong(joinId.get())); + throw new DoNotRetryIOException(cause.getMessage(), cause); + } + + hashCaches[i] = hashCache; + tempSrcBitSet[i] = ValueBitSet.newInstance(joinInfo.getSchemas()[i]); + } + if (this.projector != null) { + this.tempDestBitSet = ValueBitSet.newInstance(joinInfo.getJoinedSchema()); + this.projector.setValueBitSet(tempDestBitSet); + } + this.useQualifierAsListIndex = useQualifierAsIndex; + this.useNewValueColumnQualifier = useNewValueColumnQualifier; + this.addArrayCell = (arrayFuncRefs != null && arrayFuncRefs.length > 0 && arrayKVRefs != null + && arrayKVRefs.size() > 0); + this.pageSizeMs = getPageSizeMsForRegionScanner(scan); + } + + private void processResults(List result, boolean hasBatchLimit) throws IOException { + if (result.isEmpty()) return; + Tuple tuple = useQualifierAsListIndex + ? new PositionBasedResultTuple(result) + : new ResultTuple(Result.create(result)); + boolean projected = false; + + // For backward compatibility. In new versions, HashJoinInfo.forceProjection() + // always returns true. + if (joinInfo.forceProjection()) { + tuple = projector.projectResults(tuple, useNewValueColumnQualifier); + projected = true; } - @SuppressWarnings("unchecked") - public HashJoinRegionScanner(RegionCoprocessorEnvironment env, RegionScanner scanner, Scan scan, - final Set arrayKVRefs, - final Expression[] arrayFuncRefs, TupleProjector projector, - HashJoinInfo joinInfo, ImmutableBytesPtr tenantId, - boolean useQualifierAsIndex, boolean useNewValueColumnQualifier) - throws IOException { - - this.env = env; - this.scanner = scanner; - this.projector = projector; - this.joinInfo = joinInfo; - this.resultQueue = new LinkedList(); - this.hasMore = true; - this.count = 0; - this.limit = Long.MAX_VALUE; - for (JoinType type : joinInfo.getJoinTypes()) { - if (type != JoinType.Inner && type != JoinType.Left && type != JoinType.Semi && type != JoinType.Anti) - throw new DoNotRetryIOException("Got join type '" + type + "'. Expect only INNER or LEFT with hash-joins."); - } - if (joinInfo.getLimit() != null) { - this.limit = joinInfo.getLimit(); - } - int count = joinInfo.getJoinIds().length; - this.tempTuples = new List[count]; - this.hashCaches = new HashCache[count]; - this.tempSrcBitSet = new ValueBitSet[count]; - TenantCache cache = GlobalCache.getTenantCache(env, tenantId); + // TODO: fix below Scanner.next() and Scanner.nextRaw() methods as well. + if (hasBatchLimit) + throw new UnsupportedOperationException("Cannot support join operations in scans with limit"); + + int count = joinInfo.getJoinIds().length; + boolean cont = true; + for (int i = 0; i < count; i++) { + if (!(joinInfo.earlyEvaluation()[i]) || hashCaches[i] == null) continue; + ImmutableBytesPtr key = + TupleUtil.getConcatenatedValue(tuple, joinInfo.getJoinExpressions()[i]); + tempTuples[i] = hashCaches[i].get(key); + JoinType type = joinInfo.getJoinTypes()[i]; + if ( + ((type == JoinType.Inner || type == JoinType.Semi) && tempTuples[i] == null) + || (type == JoinType.Anti && tempTuples[i] != null) + ) { + cont = false; + break; + } + } + if (cont) { + if (projector == null) { + int dup = 1; for (int i = 0; i < count; i++) { - ImmutableBytesPtr joinId = joinInfo.getJoinIds()[i]; - if (joinId.getLength() == 0) { // semi-join optimized into skip-scan - hashCaches[i] = null; - tempSrcBitSet[i] = null; - tempTuples[i] = null; - continue; - } - HashCache hashCache = (HashCache)cache.getServerCache(joinId); - if (hashCache == null) { - Exception cause = new HashJoinCacheNotFoundException(Bytes.toLong(joinId.get())); - throw new DoNotRetryIOException(cause.getMessage(), cause); - } - - hashCaches[i] = hashCache; - tempSrcBitSet[i] = ValueBitSet.newInstance(joinInfo.getSchemas()[i]); + dup *= (tempTuples[i] == null ? 1 : tempTuples[i].size()); } - if (this.projector != null) { - this.tempDestBitSet = ValueBitSet.newInstance(joinInfo.getJoinedSchema()); - this.projector.setValueBitSet(tempDestBitSet); + for (int i = 0; i < dup; i++) { + offerResult(tuple, projected, result); } - this.useQualifierAsListIndex = useQualifierAsIndex; - this.useNewValueColumnQualifier = useNewValueColumnQualifier; - this.addArrayCell = (arrayFuncRefs != null && arrayFuncRefs.length > 0 && - arrayKVRefs != null && arrayKVRefs.size() > 0); - this.pageSizeMs = getPageSizeMsForRegionScanner(scan); - } - - private void processResults(List result, boolean hasBatchLimit) throws IOException { - if (result.isEmpty()) - return; - Tuple tuple = useQualifierAsListIndex ? new PositionBasedResultTuple(result) : new ResultTuple(Result.create(result)); - boolean projected = false; - - // For backward compatibility. In new versions, HashJoinInfo.forceProjection() - // always returns true. - if (joinInfo.forceProjection()) { - tuple = projector.projectResults(tuple, useNewValueColumnQualifier); - projected = true; + } else { + KeyValueSchema schema = joinInfo.getJoinedSchema(); + if (!joinInfo.forceProjection()) { // backward compatibility + tuple = projector.projectResults(tuple, useNewValueColumnQualifier); + projected = true; } - - // TODO: fix below Scanner.next() and Scanner.nextRaw() methods as well. - if (hasBatchLimit) - throw new UnsupportedOperationException("Cannot support join operations in scans with limit"); - - int count = joinInfo.getJoinIds().length; - boolean cont = true; + offerResult(tuple, projected, result); for (int i = 0; i < count; i++) { - if (!(joinInfo.earlyEvaluation()[i]) || hashCaches[i] == null) - continue; - ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(tuple, joinInfo.getJoinExpressions()[i]); - tempTuples[i] = hashCaches[i].get(key); - JoinType type = joinInfo.getJoinTypes()[i]; - if (((type == JoinType.Inner || type == JoinType.Semi) && tempTuples[i] == null) - || (type == JoinType.Anti && tempTuples[i] != null)) { - cont = false; - break; - } - } - if (cont) { - if (projector == null) { - int dup = 1; - for (int i = 0; i < count; i++) { - dup *= (tempTuples[i] == null ? 1 : tempTuples[i].size()); - } - for (int i = 0; i < dup; i++) { - offerResult(tuple, projected, result); - } - } else { - KeyValueSchema schema = joinInfo.getJoinedSchema(); - if (!joinInfo.forceProjection()) { // backward compatibility - tuple = projector.projectResults(tuple, useNewValueColumnQualifier); - projected = true; - } - offerResult(tuple, projected, result); - for (int i = 0; i < count; i++) { - boolean earlyEvaluation = joinInfo.earlyEvaluation()[i]; - JoinType type = joinInfo.getJoinTypes()[i]; - if (earlyEvaluation && (type == JoinType.Semi || type == JoinType.Anti)) - continue; - int j = resultQueue.size(); - while (j-- > 0) { - Tuple lhs = resultQueue.poll(); - if (!earlyEvaluation) { - ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(lhs, joinInfo.getJoinExpressions()[i]); - tempTuples[i] = hashCaches[i].get(key); - if (tempTuples[i] == null) { - if (type == JoinType.Inner || type == JoinType.Semi) { - continue; - } else if (type == JoinType.Anti) { - offerResult(lhs, projected, result); - continue; - } - } - } - if (tempTuples[i] == null) { - Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? - lhs : mergeProjectedValue( - lhs, schema, tempDestBitSet, null, - joinInfo.getSchemas()[i], tempSrcBitSet[i], - joinInfo.getFieldPositions()[i]); - offerResult(joined, projected, result); - continue; - } - for (Tuple t : tempTuples[i]) { - Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? - lhs : mergeProjectedValue( - lhs, schema, tempDestBitSet, t, - joinInfo.getSchemas()[i], tempSrcBitSet[i], - joinInfo.getFieldPositions()[i]); - offerResult(joined, projected, result); - } - } + boolean earlyEvaluation = joinInfo.earlyEvaluation()[i]; + JoinType type = joinInfo.getJoinTypes()[i]; + if (earlyEvaluation && (type == JoinType.Semi || type == JoinType.Anti)) continue; + int j = resultQueue.size(); + while (j-- > 0) { + Tuple lhs = resultQueue.poll(); + if (!earlyEvaluation) { + ImmutableBytesPtr key = + TupleUtil.getConcatenatedValue(lhs, joinInfo.getJoinExpressions()[i]); + tempTuples[i] = hashCaches[i].get(key); + if (tempTuples[i] == null) { + if (type == JoinType.Inner || type == JoinType.Semi) { + continue; + } else if (type == JoinType.Anti) { + offerResult(lhs, projected, result); + continue; } + } } - // apply post-join filter - Expression postFilter = joinInfo.getPostJoinFilterExpression(); - if (postFilter != null) { - for (Iterator iter = resultQueue.iterator(); iter.hasNext();) { - Tuple t = iter.next(); - postFilter.reset(); - ImmutableBytesPtr tempPtr = new ImmutableBytesPtr(); - try { - if (!postFilter.evaluate(t, tempPtr) || tempPtr.getLength() == 0) { - iter.remove(); - continue; - } - } catch (IllegalDataException e) { - iter.remove(); - continue; - } - Boolean b = (Boolean)postFilter.getDataType().toObject(tempPtr); - if (!Boolean.TRUE.equals(b)) { - iter.remove(); - } - } + if (tempTuples[i] == null) { + Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET + ? lhs + : mergeProjectedValue(lhs, schema, tempDestBitSet, null, joinInfo.getSchemas()[i], + tempSrcBitSet[i], joinInfo.getFieldPositions()[i]); + offerResult(joined, projected, result); + continue; } + for (Tuple t : tempTuples[i]) { + Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET + ? lhs + : mergeProjectedValue(lhs, schema, tempDestBitSet, t, joinInfo.getSchemas()[i], + tempSrcBitSet[i], joinInfo.getFieldPositions()[i]); + offerResult(joined, projected, result); + } + } } - } - - private boolean shouldAdvance() { - if (!resultQueue.isEmpty()) - return false; - - return hasMore; - } - - private boolean nextInQueue(List results) { - if (resultQueue.isEmpty()) { - return false; - } - - Tuple tuple = resultQueue.poll(); - for (int i = 0; i < tuple.size(); i++) { - results.add(tuple.getValue(i)); + } + // apply post-join filter + Expression postFilter = joinInfo.getPostJoinFilterExpression(); + if (postFilter != null) { + for (Iterator iter = resultQueue.iterator(); iter.hasNext();) { + Tuple t = iter.next(); + postFilter.reset(); + ImmutableBytesPtr tempPtr = new ImmutableBytesPtr(); + try { + if (!postFilter.evaluate(t, tempPtr) || tempPtr.getLength() == 0) { + iter.remove(); + continue; + } + } catch (IllegalDataException e) { + iter.remove(); + continue; + } + Boolean b = (Boolean) postFilter.getDataType().toObject(tempPtr); + if (!Boolean.TRUE.equals(b)) { + iter.remove(); + } } - return (count++ < limit) && (resultQueue.isEmpty() ? hasMore : true); + } } + } - @Override - public long getMvccReadPoint() { - return scanner.getMvccReadPoint(); - } + private boolean shouldAdvance() { + if (!resultQueue.isEmpty()) return false; - @Override - public RegionInfo getRegionInfo() { - return scanner.getRegionInfo(); - } - - @Override - public boolean isFilterDone() throws IOException { - return scanner.isFilterDone() && resultQueue.isEmpty(); - } + return hasMore; + } - @Override - public boolean nextRaw(List result) throws IOException { - return next(result, true, null); + private boolean nextInQueue(List results) { + if (resultQueue.isEmpty()) { + return false; } - @Override - public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { - return next(result, true, scannerContext); + Tuple tuple = resultQueue.poll(); + for (int i = 0; i < tuple.size(); i++) { + results.add(tuple.getValue(i)); } - - - private boolean next(List result, boolean raw, ScannerContext scannerContext) throws IOException { - try { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - while (shouldAdvance()) { - if (scannerContext != null) { - hasMore = raw - ? scanner.nextRaw(result, scannerContext) - : scanner.next(result, scannerContext); - } else { - hasMore = raw ? scanner.nextRaw(result) : scanner.next(result); - } - if (isDummy(result)) { - return true; - } - if (result.isEmpty()) { - return hasMore; - } - Cell cell = result.get(0); - processResults(result, false); - if (EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { - byte[] rowKey = CellUtil.cloneRow(cell); - result.clear(); - getDummyResult(rowKey, result); - return true; - } - result.clear(); - } - - return nextInQueue(result); - } catch (Throwable t) { - ClientUtil.throwIOException(env.getRegion().getRegionInfo().getRegionNameAsString(), t); - return false; // impossible + return (count++ < limit) && (resultQueue.isEmpty() ? hasMore : true); + } + + @Override + public long getMvccReadPoint() { + return scanner.getMvccReadPoint(); + } + + @Override + public RegionInfo getRegionInfo() { + return scanner.getRegionInfo(); + } + + @Override + public boolean isFilterDone() throws IOException { + return scanner.isFilterDone() && resultQueue.isEmpty(); + } + + @Override + public boolean nextRaw(List result) throws IOException { + return next(result, true, null); + } + + @Override + public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { + return next(result, true, scannerContext); + } + + private boolean next(List result, boolean raw, ScannerContext scannerContext) + throws IOException { + try { + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + while (shouldAdvance()) { + if (scannerContext != null) { + hasMore = + raw ? scanner.nextRaw(result, scannerContext) : scanner.next(result, scannerContext); + } else { + hasMore = raw ? scanner.nextRaw(result) : scanner.next(result); } - } - - @Override - public boolean reseek(byte[] row) throws IOException { - return scanner.reseek(row); - } + if (isDummy(result)) { + return true; + } + if (result.isEmpty()) { + return hasMore; + } + Cell cell = result.get(0); + processResults(result, false); + if (EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { + byte[] rowKey = CellUtil.cloneRow(cell); + result.clear(); + getDummyResult(rowKey, result); + return true; + } + result.clear(); + } - @Override - public void close() throws IOException { - scanner.close(); + return nextInQueue(result); + } catch (Throwable t) { + ClientUtil.throwIOException(env.getRegion().getRegionInfo().getRegionNameAsString(), t); + return false; // impossible } - - @Override - public boolean next(List result) throws IOException { - return next(result, false, null); + } + + @Override + public boolean reseek(byte[] row) throws IOException { + return scanner.reseek(row); + } + + @Override + public void close() throws IOException { + scanner.close(); + } + + @Override + public boolean next(List result) throws IOException { + return next(result, false, null); + } + + @Override + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result, false, scannerContext); + } + + @Override + public long getMaxResultSize() { + return this.scanner.getMaxResultSize(); + } + + @Override + public int getBatch() { + return this.scanner.getBatch(); + } + + // PHOENIX-4791 Propagate array element cell through hash join + private void offerResult(Tuple tuple, boolean projected, List result) { + if (!projected || !addArrayCell) { + resultQueue.offer(tuple); + return; } - @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result, false, scannerContext); + Cell projectedCell = tuple.getValue(0); + int arrayCellPosition = RegionScannerFactory.getArrayCellPosition(result); + Cell arrayCell = result.get(arrayCellPosition); + + List cells = new ArrayList(2); + cells.add(projectedCell); + cells.add(arrayCell); + MultiKeyValueTuple multi = new MultiKeyValueTuple(cells); + resultQueue.offer(multi); + } + + // PHOENIX-4917 Merge array element cell through hash join. + // Merge into first cell, then reattach array cell. + private Tuple mergeProjectedValue(Tuple dest, KeyValueSchema destSchema, ValueBitSet destBitSet, + Tuple src, KeyValueSchema srcSchema, ValueBitSet srcBitSet, int offset) throws IOException { + + if (dest instanceof ProjectedValueTuple) { + return TupleProjector.mergeProjectedValue((ProjectedValueTuple) dest, destBitSet, src, + srcBitSet, offset, useNewValueColumnQualifier); } - @Override - public long getMaxResultSize() { - return this.scanner.getMaxResultSize(); - } + ProjectedValueTuple first = projector.projectResults(new SingleKeyValueTuple(dest.getValue(0))); + ProjectedValueTuple merged = TupleProjector.mergeProjectedValue(first, destBitSet, src, + srcBitSet, offset, useNewValueColumnQualifier); - @Override - public int getBatch() { - return this.scanner.getBatch(); + int size = dest.size(); + if (size == 1) { + return merged; } - // PHOENIX-4791 Propagate array element cell through hash join - private void offerResult(Tuple tuple, boolean projected, List result) { - if (!projected || !addArrayCell) { - resultQueue.offer(tuple); - return; - } - - Cell projectedCell = tuple.getValue(0); - int arrayCellPosition = RegionScannerFactory.getArrayCellPosition(result); - Cell arrayCell = result.get(arrayCellPosition); - - List cells = new ArrayList(2); - cells.add(projectedCell); - cells.add(arrayCell); - MultiKeyValueTuple multi = new MultiKeyValueTuple(cells); - resultQueue.offer(multi); - } - - // PHOENIX-4917 Merge array element cell through hash join. - // Merge into first cell, then reattach array cell. - private Tuple mergeProjectedValue( - Tuple dest, KeyValueSchema destSchema, ValueBitSet destBitSet, Tuple src, - KeyValueSchema srcSchema, ValueBitSet srcBitSet, int offset) - throws IOException { - - if (dest instanceof ProjectedValueTuple) { - return TupleProjector.mergeProjectedValue( - (ProjectedValueTuple) dest, destBitSet, src, - srcBitSet, offset, useNewValueColumnQualifier); - } - - ProjectedValueTuple first = projector.projectResults( - new SingleKeyValueTuple(dest.getValue(0))); - ProjectedValueTuple merged = TupleProjector.mergeProjectedValue( - first, destBitSet, src, - srcBitSet, offset, useNewValueColumnQualifier); - - int size = dest.size(); - if (size == 1) { - return merged; - } - - List cells = new ArrayList(size); - cells.add(merged.getValue(0)); - for (int i = 1; i < size; i++) { - cells.add(dest.getValue(i)); - } - MultiKeyValueTuple multi = new MultiKeyValueTuple(cells); - return multi; + List cells = new ArrayList(size); + cells.add(merged.getValue(0)); + for (int i = 1; i < size; i++) { + cells.add(dest.getValue(i)); } + MultiKeyValueTuple multi = new MultiKeyValueTuple(cells); + return multi; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java index 4fd416d5054..c48d168a143 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,19 +26,15 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; - import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.regionserver.ScannerContext; -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -48,399 +44,418 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; +import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.query.HBaseFactoryProvider; -import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.hbase.index.parallel.Task; import org.apache.phoenix.hbase.index.parallel.TaskBatch; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.index.GlobalIndexChecker; import org.apache.phoenix.mapreduce.index.IndexTool; +import org.apache.phoenix.query.HBaseFactoryProvider; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.ScanUtil; -import org.apache.phoenix.util.ServerUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This is a data table region scanner which scans data table rows locally. From the data table rows, expected - * index table mutations are generated. These expected index mutations are used for both rebuilding index table - * rows and also verifying them. The HBase client available to region servers are used to update or verify index - * table rows. + * This is a data table region scanner which scans data table rows locally. From the data table + * rows, expected index table mutations are generated. These expected index mutations are used for + * both rebuilding index table rows and also verifying them. The HBase client available to region + * servers are used to update or verify index table rows. */ public class IndexRebuildRegionScanner extends GlobalIndexRegionScanner { - private static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildRegionScanner.class); - private static boolean ignoreIndexRebuildForTesting = false; - private static boolean throwExceptionForRebuild = false; - public static void setIgnoreIndexRebuildForTesting(boolean ignore) { ignoreIndexRebuildForTesting = ignore; } - public static void setThrowExceptionForRebuild(boolean throwException) { throwExceptionForRebuild = throwException; } - private int singleRowRebuildReturnCode; + private static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildRegionScanner.class); + private static boolean ignoreIndexRebuildForTesting = false; + private static boolean throwExceptionForRebuild = false; + public static void setIgnoreIndexRebuildForTesting(boolean ignore) { + ignoreIndexRebuildForTesting = ignore; + } - @VisibleForTesting - public IndexRebuildRegionScanner(final RegionScanner innerScanner, - final Region region, - final Scan scan, - final RegionCoprocessorEnvironment env, - final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) - throws IOException { - super(innerScanner, region, scan, env, ungroupedAggregateRegionObserver); + public static void setThrowExceptionForRebuild(boolean throwException) { + throwExceptionForRebuild = throwException; + } - indexHTable = hTableFactory.getTable(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); - if (BaseScannerRegionObserver.isPhoenixTableTTLEnabled(env.getConfiguration())) { - indexTableTTL = ScanUtil.getTTL(scan); - } else { - indexTableTTL = indexHTable.getDescriptor().getColumnFamilies()[0].getTimeToLive(); - } - indexRowKeyforReadRepair = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_ROW_KEY); - if (indexRowKeyforReadRepair != null) { - setReturnCodeForSingleRowRebuild(); - pageSizeInRows = 1; - return; - } - try (org.apache.hadoop.hbase.client.Connection connection = - HBaseFactoryProvider.getHConnectionFactory().createConnection(env.getConfiguration())) { - regionEndKeys = connection.getRegionLocator(indexHTable.getName()).getEndKeys(); - } - } + private int singleRowRebuildReturnCode; + @VisibleForTesting + public IndexRebuildRegionScanner(final RegionScanner innerScanner, final Region region, + final Scan scan, final RegionCoprocessorEnvironment env, + final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) throws IOException { + super(innerScanner, region, scan, env, ungroupedAggregateRegionObserver); - - private void setReturnCodeForSingleRowRebuild() throws IOException { - try (RegionScanner scanner = region.getScanner(scan)) { - List row = new ArrayList<>(); - scanner.next(row); - // Check if the data table row we have just scanned matches with the index row key. - // If not, there is no need to build the index row from this data table row, - // and just return zero row count. - if (row.isEmpty()) { - singleRowRebuildReturnCode = GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue(); - } else { - Put put = new Put(CellUtil.cloneRow(row.get(0))); - for (Cell cell : row) { - put.add(cell); - } - if (indexMaintainer.checkIndexRow(indexRowKeyforReadRepair, put)) { - singleRowRebuildReturnCode = GlobalIndexChecker.RebuildReturnCode.INDEX_ROW_EXISTS.getValue(); - } else { - singleRowRebuildReturnCode = GlobalIndexChecker.RebuildReturnCode.NO_INDEX_ROW.getValue(); - } - } - } + indexHTable = + hTableFactory.getTable(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); + if (BaseScannerRegionObserver.isPhoenixTableTTLEnabled(env.getConfiguration())) { + indexTableTTL = ScanUtil.getTTL(scan); + } else { + indexTableTTL = indexHTable.getDescriptor().getColumnFamilies()[0].getTimeToLive(); } - - - protected void commitBatch(List indexUpdates) throws IOException, InterruptedException { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - indexHTable.batch(indexUpdates, null); + indexRowKeyforReadRepair = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_ROW_KEY); + if (indexRowKeyforReadRepair != null) { + setReturnCodeForSingleRowRebuild(); + pageSizeInRows = 1; + return; } + try (org.apache.hadoop.hbase.client.Connection connection = + HBaseFactoryProvider.getHConnectionFactory().createConnection(env.getConfiguration())) { + regionEndKeys = connection.getRegionLocator(indexHTable.getName()).getEndKeys(); + } + } - protected void rebuildIndexRows(Map> indexMutationMap, - List indexRowsToBeDeleted, - IndexToolVerificationResult verificationResult) throws IOException { - if (ignoreIndexRebuildForTesting) { - return; + private void setReturnCodeForSingleRowRebuild() throws IOException { + try (RegionScanner scanner = region.getScanner(scan)) { + List row = new ArrayList<>(); + scanner.next(row); + // Check if the data table row we have just scanned matches with the index row key. + // If not, there is no need to build the index row from this data table row, + // and just return zero row count. + if (row.isEmpty()) { + singleRowRebuildReturnCode = GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue(); + } else { + Put put = new Put(CellUtil.cloneRow(row.get(0))); + for (Cell cell : row) { + put.add(cell); } - if (throwExceptionForRebuild) { - throw new IOException("Exception for testing. Something happened"); + if (indexMaintainer.checkIndexRow(indexRowKeyforReadRepair, put)) { + singleRowRebuildReturnCode = + GlobalIndexChecker.RebuildReturnCode.INDEX_ROW_EXISTS.getValue(); + } else { + singleRowRebuildReturnCode = GlobalIndexChecker.RebuildReturnCode.NO_INDEX_ROW.getValue(); } - updateIndexRows(indexMutationMap, indexRowsToBeDeleted, verificationResult); + } } + } + protected void commitBatch(List indexUpdates) throws IOException, InterruptedException { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + indexHTable.batch(indexUpdates, null); + } - private Map> populateActualIndexMutationMap(Map> expectedIndexMutationMap) throws IOException { - Map> actualIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - Scan indexScan = prepareIndexScan(expectedIndexMutationMap); - try (ResultScanner resultScanner = indexHTable.getScanner(indexScan)) { - for (Result result = resultScanner.next(); (result != null); result = resultScanner.next()) { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - List mutationList = prepareActualIndexMutations(result); - actualIndexMutationMap.put(result.getRow(), mutationList); - } - } catch (Throwable t) { - ClientUtil.throwIOException(indexHTable.getName().toString(), t); - } - return actualIndexMutationMap; + protected void rebuildIndexRows(Map> indexMutationMap, + List indexRowsToBeDeleted, IndexToolVerificationResult verificationResult) + throws IOException { + if (ignoreIndexRebuildForTesting) { + return; } + if (throwExceptionForRebuild) { + throw new IOException("Exception for testing. Something happened"); + } + updateIndexRows(indexMutationMap, indexRowsToBeDeleted, verificationResult); + } - - private void rebuildAndOrVerifyIndexRows(Map> expectedIndexMutationMap, - Set mostRecentIndexRowKeys, - IndexToolVerificationResult verificationResult) throws IOException { - List indexRowsToBeDeleted = new ArrayList<>(); - if (verifyType == IndexTool.IndexVerifyType.NONE) { - rebuildIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); - return; - } - if (verifyType == IndexTool.IndexVerifyType.ONLY) { - Map> actualIndexMutationMap = populateActualIndexMutationMap(expectedIndexMutationMap); - verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, mostRecentIndexRowKeys, Collections.EMPTY_LIST, verificationResult.getBefore(), true); - return; - } - if (verifyType == IndexTool.IndexVerifyType.BEFORE) { - Map> actualIndexMutationMap = populateActualIndexMutationMap(expectedIndexMutationMap); - verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, mostRecentIndexRowKeys, indexRowsToBeDeleted, verificationResult.getBefore(), true); - if (!expectedIndexMutationMap.isEmpty() || !indexRowsToBeDeleted.isEmpty()) { - rebuildIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); - } - return; - } - if (verifyType == IndexTool.IndexVerifyType.AFTER) { - rebuildIndexRows(expectedIndexMutationMap, Collections.EMPTY_LIST, verificationResult); - Map> actualIndexMutationMap = populateActualIndexMutationMap(expectedIndexMutationMap); - verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, mostRecentIndexRowKeys, Collections.EMPTY_LIST, verificationResult.getAfter(), false); - return; - } - if (verifyType == IndexTool.IndexVerifyType.BOTH) { - Map> actualIndexMutationMap = populateActualIndexMutationMap(expectedIndexMutationMap); - verifyIndexRows(actualIndexMutationMap,expectedIndexMutationMap, mostRecentIndexRowKeys, indexRowsToBeDeleted, verificationResult.getBefore(), true); - if (!expectedIndexMutationMap.isEmpty() || !indexRowsToBeDeleted.isEmpty()) { - rebuildIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); - } - if (!expectedIndexMutationMap.isEmpty()) { - actualIndexMutationMap = populateActualIndexMutationMap(expectedIndexMutationMap); - verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, mostRecentIndexRowKeys, Collections.EMPTY_LIST, verificationResult.getAfter(), false); - } - } + private Map> populateActualIndexMutationMap( + Map> expectedIndexMutationMap) throws IOException { + Map> actualIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + Scan indexScan = prepareIndexScan(expectedIndexMutationMap); + try (ResultScanner resultScanner = indexHTable.getScanner(indexScan)) { + for (Result result = resultScanner.next(); (result != null); result = resultScanner.next()) { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + List mutationList = prepareActualIndexMutations(result); + actualIndexMutationMap.put(result.getRow(), mutationList); + } + } catch (Throwable t) { + ClientUtil.throwIOException(indexHTable.getName().toString(), t); } + return actualIndexMutationMap; + } - private void addRebuildAndOrVerifyTask(TaskBatch tasks, - final Map> indexMutationMap, - final Set mostRecentIndexRowKeys, - final IndexToolVerificationResult verificationResult) { - tasks.add(new Task() { - @Override - public Boolean call() throws Exception { - try { - //in HBase 1.x we could check if the coproc environment was closed or aborted, - //but in HBase 2.x the coproc environment can't check region server services - if (Thread.currentThread().isInterrupted()) { - exceptionMessage = "Pool closed, not attempting to rebuild and/or verify index rows! " + indexHTable.getName(); - throw new IOException(exceptionMessage); - } - rebuildAndOrVerifyIndexRows(indexMutationMap, mostRecentIndexRowKeys, verificationResult); - } catch (Exception e) { - throw e; - } - return Boolean.TRUE; - } - }); + private void rebuildAndOrVerifyIndexRows(Map> expectedIndexMutationMap, + Set mostRecentIndexRowKeys, IndexToolVerificationResult verificationResult) + throws IOException { + List indexRowsToBeDeleted = new ArrayList<>(); + if (verifyType == IndexTool.IndexVerifyType.NONE) { + rebuildIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); + return; + } + if (verifyType == IndexTool.IndexVerifyType.ONLY) { + Map> actualIndexMutationMap = + populateActualIndexMutationMap(expectedIndexMutationMap); + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, mostRecentIndexRowKeys, + Collections.EMPTY_LIST, verificationResult.getBefore(), true); + return; + } + if (verifyType == IndexTool.IndexVerifyType.BEFORE) { + Map> actualIndexMutationMap = + populateActualIndexMutationMap(expectedIndexMutationMap); + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, mostRecentIndexRowKeys, + indexRowsToBeDeleted, verificationResult.getBefore(), true); + if (!expectedIndexMutationMap.isEmpty() || !indexRowsToBeDeleted.isEmpty()) { + rebuildIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); + } + return; + } + if (verifyType == IndexTool.IndexVerifyType.AFTER) { + rebuildIndexRows(expectedIndexMutationMap, Collections.EMPTY_LIST, verificationResult); + Map> actualIndexMutationMap = + populateActualIndexMutationMap(expectedIndexMutationMap); + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, mostRecentIndexRowKeys, + Collections.EMPTY_LIST, verificationResult.getAfter(), false); + return; + } + if (verifyType == IndexTool.IndexVerifyType.BOTH) { + Map> actualIndexMutationMap = + populateActualIndexMutationMap(expectedIndexMutationMap); + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, mostRecentIndexRowKeys, + indexRowsToBeDeleted, verificationResult.getBefore(), true); + if (!expectedIndexMutationMap.isEmpty() || !indexRowsToBeDeleted.isEmpty()) { + rebuildIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); + } + if (!expectedIndexMutationMap.isEmpty()) { + actualIndexMutationMap = populateActualIndexMutationMap(expectedIndexMutationMap); + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, mostRecentIndexRowKeys, + Collections.EMPTY_LIST, verificationResult.getAfter(), false); + } } + } - public static List>> getPerTaskIndexMutationMaps( - TreeMap> indexMutationMap, byte[][] endKeys, int maxMapSize) { - List>> mapList = new ArrayList<>(); - int regionCount = endKeys.length; - int regionIndex = 0; - byte[] indexKey = indexMutationMap.firstKey(); - Map> perTaskIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - mapList.add(perTaskIndexMutationMap); - // Find the region including the first index key - while (regionIndex < regionCount - 1 && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0) { - regionIndex++; + private void addRebuildAndOrVerifyTask(TaskBatch tasks, + final Map> indexMutationMap, final Set mostRecentIndexRowKeys, + final IndexToolVerificationResult verificationResult) { + tasks.add(new Task() { + @Override + public Boolean call() throws Exception { + try { + // in HBase 1.x we could check if the coproc environment was closed or aborted, + // but in HBase 2.x the coproc environment can't check region server services + if (Thread.currentThread().isInterrupted()) { + exceptionMessage = "Pool closed, not attempting to rebuild and/or verify index rows! " + + indexHTable.getName(); + throw new IOException(exceptionMessage); + } + rebuildAndOrVerifyIndexRows(indexMutationMap, mostRecentIndexRowKeys, verificationResult); + } catch (Exception e) { + throw e; } - for (Map.Entry> entry: indexMutationMap.entrySet()) { - indexKey = entry.getKey(); - if (perTaskIndexMutationMap.size() == maxMapSize || - (regionIndex < regionCount - 1 && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0)) { - perTaskIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - mapList.add(perTaskIndexMutationMap); - // Find the region including indexKey - while (regionIndex < regionCount - 1 && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0) { - regionIndex++; - } - } - perTaskIndexMutationMap.put(indexKey, entry.getValue()); + return Boolean.TRUE; + } + }); + } + + public static List>> getPerTaskIndexMutationMaps( + TreeMap> indexMutationMap, byte[][] endKeys, int maxMapSize) { + List>> mapList = new ArrayList<>(); + int regionCount = endKeys.length; + int regionIndex = 0; + byte[] indexKey = indexMutationMap.firstKey(); + Map> perTaskIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + mapList.add(perTaskIndexMutationMap); + // Find the region including the first index key + while ( + regionIndex < regionCount - 1 + && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0 + ) { + regionIndex++; + } + for (Map.Entry> entry : indexMutationMap.entrySet()) { + indexKey = entry.getKey(); + if ( + perTaskIndexMutationMap.size() == maxMapSize || (regionIndex < regionCount - 1 + && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0) + ) { + perTaskIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + mapList.add(perTaskIndexMutationMap); + // Find the region including indexKey + while ( + regionIndex < regionCount - 1 + && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0 + ) { + regionIndex++; } - return mapList; + } + perTaskIndexMutationMap.put(indexKey, entry.getValue()); } + return mapList; + } - private void verifyAndOrRebuildIndex(Map> indexMutationMap, - Set mostRecentIndexRowKeys) throws IOException { - if (indexMutationMap.size() == 0) { - return; - } - List>> mapList = getPerTaskIndexMutationMaps((TreeMap)indexMutationMap, - regionEndKeys, rowCountPerTask); - int taskCount = mapList.size(); - TaskBatch tasks = new TaskBatch<>(taskCount); - List verificationResultList = new ArrayList<>(taskCount); - for (int i = 0; i < taskCount; i++) { - IndexToolVerificationResult perTaskVerificationResult = new IndexToolVerificationResult(scan); - verificationResultList.add(perTaskVerificationResult); - addRebuildAndOrVerifyTask(tasks, mapList.get(i), mostRecentIndexRowKeys, perTaskVerificationResult); - } - submitTasks(tasks); - if (verify) { - for (IndexToolVerificationResult result : verificationResultList) { - verificationResult.add(result); - } - } + private void verifyAndOrRebuildIndex(Map> indexMutationMap, + Set mostRecentIndexRowKeys) throws IOException { + if (indexMutationMap.size() == 0) { + return; + } + List>> mapList = + getPerTaskIndexMutationMaps((TreeMap) indexMutationMap, regionEndKeys, rowCountPerTask); + int taskCount = mapList.size(); + TaskBatch tasks = new TaskBatch<>(taskCount); + List verificationResultList = new ArrayList<>(taskCount); + for (int i = 0; i < taskCount; i++) { + IndexToolVerificationResult perTaskVerificationResult = new IndexToolVerificationResult(scan); + verificationResultList.add(perTaskVerificationResult); + addRebuildAndOrVerifyTask(tasks, mapList.get(i), mostRecentIndexRowKeys, + perTaskVerificationResult); + } + submitTasks(tasks); + if (verify) { + for (IndexToolVerificationResult result : verificationResultList) { + verificationResult.add(result); + } } + } - @Override - public boolean next(List results) throws IOException { - if (indexRowKeyforReadRepair != null && - singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) { - byte[] rowCountBytes = - PLong.INSTANCE.toBytes((long) singleRowRebuildReturnCode); - byte[] rowKey; - byte[] startKey = scan.getStartRow().length > 0 ? scan.getStartRow() : - region.getRegionInfo().getStartKey(); - byte[] endKey = scan.getStopRow().length > 0 ? scan.getStopRow() : - region.getRegionInfo().getEndKey(); - final boolean isIncompatibleClient = - ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); - if (!isIncompatibleClient) { - rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); - if (rowKey == null) { - if (scan.includeStartRow()) { - rowKey = startKey; - } else if (scan.includeStopRow()) { - rowKey = endKey; - } else { - rowKey = HConstants.EMPTY_END_ROW; - } - } - } else { - rowKey = UNGROUPED_AGG_ROW_KEY; - } - final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); - results.add(aggKeyValue); - return false; + @Override + public boolean next(List results) throws IOException { + if ( + indexRowKeyforReadRepair != null + && singleRowRebuildReturnCode == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue() + ) { + byte[] rowCountBytes = PLong.INSTANCE.toBytes((long) singleRowRebuildReturnCode); + byte[] rowKey; + byte[] startKey = + scan.getStartRow().length > 0 ? scan.getStartRow() : region.getRegionInfo().getStartKey(); + byte[] endKey = + scan.getStopRow().length > 0 ? scan.getStopRow() : region.getRegionInfo().getEndKey(); + final boolean isIncompatibleClient = + ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); + if (!isIncompatibleClient) { + rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); + if (rowKey == null) { + if (scan.includeStartRow()) { + rowKey = startKey; + } else if (scan.includeStopRow()) { + rowKey = endKey; + } else { + rowKey = HConstants.EMPTY_END_ROW; + } } - Map> indexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - Set mostRecentIndexRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); - Cell lastCell = null; - int dataRowCount = 0; - int indexMutationCount = 0; - region.startRegionOperation(); - RegionScanner localScanner = null; - try { - localScanner = getLocalScanner(); - if (localScanner == null) { - return false; + } else { + rowKey = UNGROUPED_AGG_ROW_KEY; + } + final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, SINGLE_COLUMN_FAMILY, + SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); + results.add(aggKeyValue); + return false; + } + Map> indexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + Set mostRecentIndexRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); + Cell lastCell = null; + int dataRowCount = 0; + int indexMutationCount = 0; + region.startRegionOperation(); + RegionScanner localScanner = null; + try { + localScanner = getLocalScanner(); + if (localScanner == null) { + return false; + } + synchronized (localScanner) { + if (!shouldVerify()) { + skipped = true; + return false; + } + do { + /* + * If region is closing and there are large number of rows being verified/rebuilt with + * IndexTool, not having this check will impact/delay the region closing -- affecting the + * availability as this method holds the read lock on the region. + */ + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + List row = new ArrayList<>(); + hasMore = localScanner.nextRaw(row); + if (!row.isEmpty()) { + lastCell = row.get(0); // lastCell is any cell from the last visited row + if (isDummy(row)) { + break; } - synchronized (localScanner) { - if (!shouldVerify()) { - skipped = true; - return false; + Put put = null; + Delete del = null; + for (Cell cell : row) { + if (cell.getTimestamp() < minTimestamp && indexMaintainer.isCDCIndex()) { + continue; + } + if (cell.getType().equals(Cell.Type.Put)) { + if (familyMap != null && !isColumnIncluded(cell)) { + continue; } - do { - /* - If region is closing and there are large number of rows being verified/rebuilt with IndexTool, - not having this check will impact/delay the region closing -- affecting the availability - as this method holds the read lock on the region. - * */ - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - List row = new ArrayList<>(); - hasMore = localScanner.nextRaw(row); - if (!row.isEmpty()) { - lastCell = row.get(0); // lastCell is any cell from the last visited row - if (isDummy(row)) { - break; - } - Put put = null; - Delete del = null; - for (Cell cell : row) { - if (cell.getTimestamp() < minTimestamp - && indexMaintainer.isCDCIndex()) { - continue; - } - if (cell.getType().equals(Cell.Type.Put)) { - if (familyMap != null && !isColumnIncluded(cell)) { - continue; - } - if (put == null) { - put = new Put(CellUtil.cloneRow(cell)); - } - put.add(cell); - } else { - if (del == null) { - del = new Delete(CellUtil.cloneRow(cell)); - } - del.add(cell); - } - } - if (put == null && del == null) { - continue; - } - indexMutationCount += prepareIndexMutations(put, del, indexMutationMap, mostRecentIndexRowKeys); - dataRowCount++; - } - } while (hasMore && indexMutationCount < pageSizeInRows - && dataRowCount < pageSizeInRows); - if (!indexMutationMap.isEmpty()) { - if (indexRowKeyforReadRepair != null) { - rebuildIndexRows(indexMutationMap, Collections.EMPTY_LIST, verificationResult); - } else { - verifyAndOrRebuildIndex(indexMutationMap, mostRecentIndexRowKeys); - } + if (put == null) { + put = new Put(CellUtil.cloneRow(cell)); } - if (verify) { - verificationResult.setScannedDataRowCount(verificationResult.getScannedDataRowCount() + dataRowCount); + put.add(cell); + } else { + if (del == null) { + del = new Delete(CellUtil.cloneRow(cell)); } + del.add(cell); + } } - } catch (Throwable e) { - LOGGER.error("Exception in IndexRebuildRegionScanner for region " - + region.getRegionInfo().getRegionNameAsString(), e); - this.shouldRetry = true; - throw e; - } finally { - region.closeRegionOperation(); - if (localScanner!=null && localScanner!=innerScanner) { - localScanner.close(); + if (put == null && del == null) { + continue; } + indexMutationCount += + prepareIndexMutations(put, del, indexMutationMap, mostRecentIndexRowKeys); + dataRowCount++; + } + } while (hasMore && indexMutationCount < pageSizeInRows && dataRowCount < pageSizeInRows); + if (!indexMutationMap.isEmpty()) { + if (indexRowKeyforReadRepair != null) { + rebuildIndexRows(indexMutationMap, Collections.EMPTY_LIST, verificationResult); + } else { + verifyAndOrRebuildIndex(indexMutationMap, mostRecentIndexRowKeys); + } } - if (indexRowKeyforReadRepair != null) { - dataRowCount = singleRowRebuildReturnCode; - } - if (minTimestamp != 0) { - nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); + if (verify) { + verificationResult + .setScannedDataRowCount(verificationResult.getScannedDataRowCount() + dataRowCount); } - byte[] rowCountBytes = PLong.INSTANCE.toBytes((long) dataRowCount); - final Cell aggKeyValue; - if (lastCell == null) { - byte[] rowKey; - byte[] startKey = scan.getStartRow().length > 0 ? scan.getStartRow() : - region.getRegionInfo().getStartKey(); - byte[] endKey = scan.getStopRow().length > 0 ? scan.getStopRow() : - region.getRegionInfo().getEndKey(); - final boolean isIncompatibleClient = - ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); - if (!isIncompatibleClient) { - rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); - if (rowKey == null) { - if (scan.includeStartRow()) { - rowKey = startKey; - } else if (scan.includeStopRow()) { - rowKey = endKey; - } else { - rowKey = HConstants.EMPTY_END_ROW; - } - } - } else { - rowKey = UNGROUPED_AGG_ROW_KEY; - } - aggKeyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); - } else { - aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); + } + } catch (Throwable e) { + LOGGER.error("Exception in IndexRebuildRegionScanner for region " + + region.getRegionInfo().getRegionNameAsString(), e); + this.shouldRetry = true; + throw e; + } finally { + region.closeRegionOperation(); + if (localScanner != null && localScanner != innerScanner) { + localScanner.close(); + } + } + if (indexRowKeyforReadRepair != null) { + dataRowCount = singleRowRebuildReturnCode; + } + if (minTimestamp != 0) { + nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); + } + byte[] rowCountBytes = PLong.INSTANCE.toBytes((long) dataRowCount); + final Cell aggKeyValue; + if (lastCell == null) { + byte[] rowKey; + byte[] startKey = + scan.getStartRow().length > 0 ? scan.getStartRow() : region.getRegionInfo().getStartKey(); + byte[] endKey = + scan.getStopRow().length > 0 ? scan.getStopRow() : region.getRegionInfo().getEndKey(); + final boolean isIncompatibleClient = + ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); + if (!isIncompatibleClient) { + rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); + if (rowKey == null) { + if (scan.includeStartRow()) { + rowKey = startKey; + } else if (scan.includeStopRow()) { + rowKey = endKey; + } else { + rowKey = HConstants.EMPTY_END_ROW; + } } - results.add(aggKeyValue); - return hasMore || hasMoreIncr; + } else { + rowKey = UNGROUPED_AGG_ROW_KEY; + } + aggKeyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, + AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); + } else { + aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } + results.add(aggKeyValue); + return hasMore || hasMoreIncr; + } - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result); - } + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java index ef444c1c8d6..87680061bcf 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,11 +33,9 @@ import java.util.TreeMap; import java.util.TreeSet; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.regionserver.ScannerContext; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -48,445 +46,465 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; +import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.query.HBaseFactoryProvider; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.compile.ScanRanges; import org.apache.phoenix.filter.SkipScanFilter; import org.apache.phoenix.hbase.index.parallel.Task; import org.apache.phoenix.hbase.index.parallel.TaskBatch; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.mapreduce.index.IndexTool; +import org.apache.phoenix.query.HBaseFactoryProvider; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.ScanUtil; -import org.apache.phoenix.util.ServerUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This is an index table region scanner which scans index table rows locally and then extracts data table row keys - * from them. Using the data table row keys, the data table rows are scanned using the HBase client available to - * region servers. From the data table rows, expected index table mutations are generated. These expected - * index mutations are used for both repairing the index table rows and verifying them. + * This is an index table region scanner which scans index table rows locally and then extracts data + * table row keys from them. Using the data table row keys, the data table rows are scanned using + * the HBase client available to region servers. From the data table rows, expected index table + * mutations are generated. These expected index mutations are used for both repairing the index + * table rows and verifying them. */ public class IndexRepairRegionScanner extends GlobalIndexRegionScanner { - private static final Logger LOGGER = LoggerFactory.getLogger(IndexRepairRegionScanner.class); + private static final Logger LOGGER = LoggerFactory.getLogger(IndexRepairRegionScanner.class); - public IndexRepairRegionScanner(final RegionScanner innerScanner, - final Region region, - final Scan scan, - final RegionCoprocessorEnvironment env, - final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) - throws IOException { - super(innerScanner, region, scan, env, ungroupedAggregateRegionObserver); + public IndexRepairRegionScanner(final RegionScanner innerScanner, final Region region, + final Scan scan, final RegionCoprocessorEnvironment env, + final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) throws IOException { + super(innerScanner, region, scan, env, ungroupedAggregateRegionObserver); - byte[] dataTableName = scan.getAttribute(PHYSICAL_DATA_TABLE_NAME); - dataHTable = hTableFactory.getTable(new ImmutableBytesPtr(dataTableName)); - if (BaseScannerRegionObserver.isPhoenixTableTTLEnabled(env.getConfiguration())) { - indexTableTTL = ScanUtil.getTTL(scan); - } else { - indexTableTTL = indexHTable.getDescriptor().getColumnFamilies()[0].getTimeToLive(); - } - try (org.apache.hadoop.hbase.client.Connection connection = - HBaseFactoryProvider.getHConnectionFactory().createConnection(env.getConfiguration())) { - regionEndKeys = connection.getRegionLocator(dataHTable.getName()).getEndKeys(); - } + byte[] dataTableName = scan.getAttribute(PHYSICAL_DATA_TABLE_NAME); + dataHTable = hTableFactory.getTable(new ImmutableBytesPtr(dataTableName)); + if (BaseScannerRegionObserver.isPhoenixTableTTLEnabled(env.getConfiguration())) { + indexTableTTL = ScanUtil.getTTL(scan); + } else { + indexTableTTL = indexHTable.getDescriptor().getColumnFamilies()[0].getTimeToLive(); } - - @Override - public byte[] getDataTableName() { - return dataHTable.getName().toBytes(); + try (org.apache.hadoop.hbase.client.Connection connection = + HBaseFactoryProvider.getHConnectionFactory().createConnection(env.getConfiguration())) { + regionEndKeys = connection.getRegionLocator(dataHTable.getName()).getEndKeys(); } + } - public void prepareExpectedIndexMutations(Result dataRow, Map> expectedIndexMutationMap) throws IOException { - Put put = null; - Delete del = null; - for (Cell cell : dataRow.rawCells()) { - if (cell.getType() == Cell.Type.Put) { - if (put == null) { - put = new Put(CellUtil.cloneRow(cell)); - } - put.add(cell); - } else { - if (del == null) { - del = new Delete(CellUtil.cloneRow(cell)); - } - del.add(cell); - } + @Override + public byte[] getDataTableName() { + return dataHTable.getName().toBytes(); + } + + public void prepareExpectedIndexMutations(Result dataRow, + Map> expectedIndexMutationMap) throws IOException { + Put put = null; + Delete del = null; + for (Cell cell : dataRow.rawCells()) { + if (cell.getType() == Cell.Type.Put) { + if (put == null) { + put = new Put(CellUtil.cloneRow(cell)); } - List indexMutations = prepareIndexMutationsForRebuild(indexMaintainer, put, del); - Collections.reverse(indexMutations); - for (Mutation mutation : indexMutations) { - byte[] indexRowKey = mutation.getRow(); - List mutationList = expectedIndexMutationMap.get(indexRowKey); - if (mutationList == null) { - mutationList = new ArrayList<>(); - mutationList.add(mutation); - expectedIndexMutationMap.put(indexRowKey, mutationList); - } else { - mutationList.add(mutation); - } + put.add(cell); + } else { + if (del == null) { + del = new Delete(CellUtil.cloneRow(cell)); } + del.add(cell); + } } - - protected void commitBatch(List indexUpdates) throws IOException, InterruptedException { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - region.batchMutate(indexUpdates.toArray(new Mutation[indexUpdates.size()])); + List indexMutations = prepareIndexMutationsForRebuild(indexMaintainer, put, del); + Collections.reverse(indexMutations); + for (Mutation mutation : indexMutations) { + byte[] indexRowKey = mutation.getRow(); + List mutationList = expectedIndexMutationMap.get(indexRowKey); + if (mutationList == null) { + mutationList = new ArrayList<>(); + mutationList.add(mutation); + expectedIndexMutationMap.put(indexRowKey, mutationList); + } else { + mutationList.add(mutation); + } } + } + + protected void commitBatch(List indexUpdates) throws IOException, InterruptedException { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + region.batchMutate(indexUpdates.toArray(new Mutation[indexUpdates.size()])); + } - protected void repairIndexRows(Map> indexMutationMap, - List indexRowsToBeDeleted, - IndexToolVerificationResult verificationResult) throws IOException { - updateIndexRows(indexMutationMap, indexRowsToBeDeleted, verificationResult); + protected void repairIndexRows(Map> indexMutationMap, + List indexRowsToBeDeleted, IndexToolVerificationResult verificationResult) + throws IOException { + updateIndexRows(indexMutationMap, indexRowsToBeDeleted, verificationResult); + } + + private Map> populateExpectedIndexMutationMap(Set dataRowKeys) + throws IOException { + Map> expectedIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + List keys = new ArrayList<>(dataRowKeys.size()); + for (byte[] indexKey : dataRowKeys) { + keys.add(PVarbinary.INSTANCE.getKeyRange(indexKey, SortOrder.ASC)); } + ScanRanges scanRanges = ScanRanges.createPointLookup(keys); + Scan dataScan = new Scan(); + dataScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); + scanRanges.initializeScan(dataScan); + SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); + dataScan.setFilter(new SkipScanFilter(skipScanFilter, true, true)); + dataScan.setRaw(true); + dataScan.readAllVersions(); + dataScan.setCacheBlocks(false); + try (ResultScanner resultScanner = dataHTable.getScanner(dataScan)) { + for (Result result = resultScanner.next(); (result != null); result = resultScanner.next()) { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + prepareExpectedIndexMutations(result, expectedIndexMutationMap); + } + } catch (Throwable t) { + ClientUtil.throwIOException(dataHTable.getName().toString(), t); + } + return expectedIndexMutationMap; + } - private Map> populateExpectedIndexMutationMap(Set dataRowKeys) throws IOException { - Map> expectedIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - List keys = new ArrayList<>(dataRowKeys.size()); - for (byte[] indexKey: dataRowKeys) { - keys.add(PVarbinary.INSTANCE.getKeyRange(indexKey, SortOrder.ASC)); - } - ScanRanges scanRanges = ScanRanges.createPointLookup(keys); - Scan dataScan = new Scan(); - dataScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); - scanRanges.initializeScan(dataScan); - SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); - dataScan.setFilter(new SkipScanFilter(skipScanFilter, true, true)); - dataScan.setRaw(true); - dataScan.readAllVersions(); - dataScan.setCacheBlocks(false); - try (ResultScanner resultScanner = dataHTable.getScanner(dataScan)) { - for (Result result = resultScanner.next(); (result != null); result = resultScanner.next()) { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - prepareExpectedIndexMutations(result, expectedIndexMutationMap); - } - } catch (Throwable t) { - ClientUtil.throwIOException(dataHTable.getName().toString(), t); + private Map> populateActualIndexMutationMap( + Map> expectedIndexMutationMap) throws IOException { + Map> actualIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + Scan indexScan = prepareIndexScan(expectedIndexMutationMap); + try (RegionScanner regionScanner = region.getScanner(indexScan)) { + do { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + List row = new ArrayList(); + hasMore = regionScanner.nextRaw(row); + if (!row.isEmpty()) { + populateIndexMutationFromIndexRow(row, actualIndexMutationMap); } - return expectedIndexMutationMap; + } while (hasMore); + } catch (Throwable t) { + ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); } + return actualIndexMutationMap; + } - private Map> populateActualIndexMutationMap(Map> expectedIndexMutationMap) throws IOException { - Map> actualIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - Scan indexScan = prepareIndexScan(expectedIndexMutationMap); - try (RegionScanner regionScanner = region.getScanner(indexScan)) { - do { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - List row = new ArrayList(); - hasMore = regionScanner.nextRaw(row); - if (!row.isEmpty()) { - populateIndexMutationFromIndexRow(row, actualIndexMutationMap); - } - } while (hasMore); - } catch (Throwable t) { - ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); + private Map> populateActualIndexMutationMap() throws IOException { + Map> actualIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + Scan indexScan = new Scan(); + indexScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); + indexScan.setRaw(true); + indexScan.readAllVersions(); + indexScan.setCacheBlocks(false); + try (RegionScanner regionScanner = region.getScanner(indexScan)) { + do { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + List row = new ArrayList(); + hasMore = regionScanner.nextRaw(row); + if (!row.isEmpty()) { + populateIndexMutationFromIndexRow(row, actualIndexMutationMap); } - return actualIndexMutationMap; + } while (hasMore); + } catch (Throwable t) { + ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); } + return actualIndexMutationMap; + } - private Map> populateActualIndexMutationMap() throws IOException { - Map> actualIndexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - Scan indexScan = new Scan(); - indexScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); - indexScan.setRaw(true); - indexScan.readAllVersions(); - indexScan.setCacheBlocks(false); - try (RegionScanner regionScanner = region.getScanner(indexScan)) { - do { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - List row = new ArrayList(); - hasMore = regionScanner.nextRaw(row); - if (!row.isEmpty()) { - populateIndexMutationFromIndexRow(row, actualIndexMutationMap); - } - } while (hasMore); - } catch (Throwable t) { - ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); - } - return actualIndexMutationMap; + private void repairAndOrVerifyIndexRows(Set dataRowKeys, + Map> actualIndexMutationMap, + IndexToolVerificationResult verificationResult) throws IOException { + List indexRowsToBeDeleted = new ArrayList<>(); + Map> expectedIndexMutationMap = + populateExpectedIndexMutationMap(dataRowKeys); + if (verifyType == IndexTool.IndexVerifyType.NONE) { + repairIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); + return; + } + if (verifyType == IndexTool.IndexVerifyType.ONLY) { + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, + indexRowsToBeDeleted, verificationResult.getBefore(), true); + return; + } + if (verifyType == IndexTool.IndexVerifyType.BEFORE) { + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, + indexRowsToBeDeleted, verificationResult.getBefore(), true); + if (!expectedIndexMutationMap.isEmpty() || !indexRowsToBeDeleted.isEmpty()) { + repairIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); + } + return; } + if (verifyType == IndexTool.IndexVerifyType.AFTER) { + repairIndexRows(expectedIndexMutationMap, Collections.EMPTY_LIST, verificationResult); + actualIndexMutationMap = populateActualIndexMutationMap(); + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, + indexRowsToBeDeleted, verificationResult.getAfter(), false); + return; + } + if (verifyType == IndexTool.IndexVerifyType.BOTH) { + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, + indexRowsToBeDeleted, verificationResult.getBefore(), true); + if (!expectedIndexMutationMap.isEmpty() || !indexRowsToBeDeleted.isEmpty()) { + repairIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); + } + if (!expectedIndexMutationMap.isEmpty()) { + actualIndexMutationMap = populateActualIndexMutationMap(expectedIndexMutationMap); + verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, + Collections.EMPTY_LIST, verificationResult.getAfter(), false); + } + } + } - private void repairAndOrVerifyIndexRows(Set dataRowKeys, - Map> actualIndexMutationMap, - IndexToolVerificationResult verificationResult) throws IOException { - List indexRowsToBeDeleted = new ArrayList<>(); - Map> expectedIndexMutationMap = populateExpectedIndexMutationMap(dataRowKeys); - if (verifyType == IndexTool.IndexVerifyType.NONE) { - repairIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); - return; - } - if (verifyType == IndexTool.IndexVerifyType.ONLY) { - verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, indexRowsToBeDeleted, verificationResult.getBefore(), true); - return; - } - if (verifyType == IndexTool.IndexVerifyType.BEFORE) { - verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, indexRowsToBeDeleted, verificationResult.getBefore(), true); - if (!expectedIndexMutationMap.isEmpty() || !indexRowsToBeDeleted.isEmpty()) { - repairIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); - } - return; - } - if (verifyType == IndexTool.IndexVerifyType.AFTER) { - repairIndexRows(expectedIndexMutationMap, Collections.EMPTY_LIST, verificationResult); - actualIndexMutationMap = populateActualIndexMutationMap(); - verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, indexRowsToBeDeleted, verificationResult.getAfter(), false); - return; + private void addRepairAndOrVerifyTask(TaskBatch tasks, final Set dataRowKeys, + final Map> actualIndexMutationMap, + final IndexToolVerificationResult verificationResult) { + tasks.add(new Task() { + @Override + public Boolean call() throws Exception { + try { + // in HBase 1.x we could check if the coproc environment was closed or aborted, + // but in HBase 2.x the coproc environment can't check region server services + if (Thread.currentThread().isInterrupted()) { + exceptionMessage = "Pool closed, not attempting to rebuild and/or verify index rows! " + + indexHTable.getName(); + throw new IOException(exceptionMessage); + } + repairAndOrVerifyIndexRows(dataRowKeys, actualIndexMutationMap, verificationResult); + } catch (Exception e) { + throw e; } - if (verifyType == IndexTool.IndexVerifyType.BOTH) { - verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, indexRowsToBeDeleted, verificationResult.getBefore(), true); - if (!expectedIndexMutationMap.isEmpty() || !indexRowsToBeDeleted.isEmpty()) { - repairIndexRows(expectedIndexMutationMap, indexRowsToBeDeleted, verificationResult); - } - if (!expectedIndexMutationMap.isEmpty()) { - actualIndexMutationMap = populateActualIndexMutationMap(expectedIndexMutationMap); - verifyIndexRows(actualIndexMutationMap, expectedIndexMutationMap, Collections.EMPTY_SET, Collections.EMPTY_LIST, verificationResult.getAfter(), false); - } + return Boolean.TRUE; + } + }); + } + + public static List> getPerTaskDataRowKeys(TreeSet dataRowKeys, + byte[][] endKeys, int maxSetSize) { + List> setList = new ArrayList<>(); + int regionCount = endKeys.length; + int regionIndex = 0; + byte[] indexKey = dataRowKeys.first(); + Set perTaskDataRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); + setList.add(perTaskDataRowKeys); + // Find the region including the first data row key + while ( + regionIndex < regionCount - 1 + && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0 + ) { + regionIndex++; + } + for (byte[] dataRowKey : dataRowKeys) { + indexKey = dataRowKey; + if ( + perTaskDataRowKeys.size() == maxSetSize || (regionIndex < regionCount - 1 + && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0) + ) { + perTaskDataRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); + setList.add(perTaskDataRowKeys); + // Find the region including indexKey + while ( + regionIndex < regionCount - 1 + && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0 + ) { + regionIndex++; } + } + perTaskDataRowKeys.add(dataRowKey); } + return setList; + } - private void addRepairAndOrVerifyTask(TaskBatch tasks, - final Set dataRowKeys, - final Map> actualIndexMutationMap, - final IndexToolVerificationResult verificationResult) { - tasks.add(new Task() { - @Override - public Boolean call() throws Exception { - try { - //in HBase 1.x we could check if the coproc environment was closed or aborted, - //but in HBase 2.x the coproc environment can't check region server services - if (Thread.currentThread().isInterrupted()) { - exceptionMessage = "Pool closed, not attempting to rebuild and/or verify index rows! " + indexHTable.getName(); - throw new IOException(exceptionMessage); - } - repairAndOrVerifyIndexRows(dataRowKeys, actualIndexMutationMap, verificationResult); - } catch (Exception e) { - throw e; - } - return Boolean.TRUE; - } - }); + private Set getDataRowKeys(Map> indexMutationMap) { + Set dataRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); + for (byte[] indexRowKey : indexMutationMap.keySet()) { + byte[] dataRowKey = + indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); + dataRowKeys.add(dataRowKey); } + return dataRowKeys; + } - public static List> getPerTaskDataRowKeys(TreeSet dataRowKeys, - byte[][] endKeys, int maxSetSize) { - List> setList = new ArrayList<>(); - int regionCount = endKeys.length; - int regionIndex = 0; - byte[] indexKey = dataRowKeys.first(); - Set perTaskDataRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); - setList.add(perTaskDataRowKeys); - // Find the region including the first data row key - while (regionIndex < regionCount - 1 && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0) { - regionIndex++; - } - for (byte[] dataRowKey: dataRowKeys) { - indexKey = dataRowKey; - if (perTaskDataRowKeys.size() == maxSetSize || - (regionIndex < regionCount - 1 && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0)) { - perTaskDataRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); - setList.add(perTaskDataRowKeys); - // Find the region including indexKey - while (regionIndex < regionCount - 1 && Bytes.BYTES_COMPARATOR.compare(indexKey, endKeys[regionIndex]) > 0) { - regionIndex++; - } - } - perTaskDataRowKeys.add(dataRowKey); + /** + * @param indexMutationMap actual index mutations for a page + * @param dataRowKeysSetList List of per-task data row keys + * @return For each set of data row keys, split the acutal index mutation map into a per-task + * index mutation map and return the list of all index mutation maps. + */ + private List>> getPerTaskIndexMutationMap( + Map> indexMutationMap, List> dataRowKeysSetList) { + List>> mapList = + Lists.newArrayListWithExpectedSize(dataRowKeysSetList.size()); + for (int i = 0; i < dataRowKeysSetList.size(); ++i) { + Map> perTaskIndexMutationMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + mapList.add(perTaskIndexMutationMap); + } + for (Map.Entry> entry : indexMutationMap.entrySet()) { + byte[] indexRowKey = entry.getKey(); + List actualMutationList = entry.getValue(); + byte[] dataRowKey = + indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); + for (int i = 0; i < dataRowKeysSetList.size(); ++i) { + if (dataRowKeysSetList.get(i).contains(dataRowKey)) { + mapList.get(i).put(indexRowKey, actualMutationList); + break; } - return setList; + } } + return mapList; + } - private Set getDataRowKeys(Map> indexMutationMap) { - Set dataRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); - for (byte[] indexRowKey: indexMutationMap.keySet()) { - byte[] dataRowKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); - dataRowKeys.add(dataRowKey); - } - return dataRowKeys; + private void verifyAndOrRepairIndex(Map> actualIndexMutationMap) + throws IOException { + if (actualIndexMutationMap.size() == 0) { + return; + } + Set dataRowKeys = getDataRowKeys(actualIndexMutationMap); + List> setList = + getPerTaskDataRowKeys((TreeSet) dataRowKeys, regionEndKeys, rowCountPerTask); + List>> indexMutationMapList = + getPerTaskIndexMutationMap(actualIndexMutationMap, setList); + int taskCount = setList.size(); + TaskBatch tasks = new TaskBatch<>(taskCount); + List verificationResultList = new ArrayList<>(taskCount); + for (int i = 0; i < taskCount; i++) { + IndexToolVerificationResult perTaskVerificationResult = new IndexToolVerificationResult(scan); + verificationResultList.add(perTaskVerificationResult); + addRepairAndOrVerifyTask(tasks, setList.get(i), indexMutationMapList.get(i), + perTaskVerificationResult); } + submitTasks(tasks); + if (verify) { + for (IndexToolVerificationResult result : verificationResultList) { + verificationResult.add(result); + } + } + } - /** - * @param indexMutationMap actual index mutations for a page - * @param dataRowKeysSetList List of per-task data row keys - * @return For each set of data row keys, split the acutal index mutation map into - * a per-task index mutation map and return the list of all index mutation maps. - */ - private List>> getPerTaskIndexMutationMap( - Map> indexMutationMap, List> dataRowKeysSetList) { - List>> mapList = Lists.newArrayListWithExpectedSize(dataRowKeysSetList.size()); - for (int i = 0; i < dataRowKeysSetList.size(); ++i) { - Map> perTaskIndexMutationMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - mapList.add(perTaskIndexMutationMap); + private int populateIndexMutationFromIndexRow(List row, + Map> indexMutationMap) throws IOException { + Put put = null; + Delete del = null; + for (Cell cell : row) { + if (cell.getType() == Cell.Type.Put) { + if (put == null) { + put = new Put(CellUtil.cloneRow(cell)); } - for (Map.Entry> entry : indexMutationMap.entrySet()) { - byte[] indexRowKey = entry.getKey(); - List actualMutationList = entry.getValue(); - byte[] dataRowKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); - for (int i = 0; i < dataRowKeysSetList.size(); ++i) { - if (dataRowKeysSetList.get(i).contains(dataRowKey)) { - mapList.get(i).put(indexRowKey, actualMutationList); - break; - } - } + put.add(cell); + } else { + if (del == null) { + del = new Delete(CellUtil.cloneRow(cell)); } - return mapList; + del.add(cell); + } } + byte[] indexRowKey; + if (put != null) { + indexRowKey = put.getRow(); + } else if (del != null) { + indexRowKey = del.getRow(); + } else { + return 0; + } + List mutationList = getMutationsWithSameTS(put, del, MUTATION_TS_DESC_COMPARATOR); + indexMutationMap.put(indexRowKey, mutationList); + return mutationList.size(); + } - private void verifyAndOrRepairIndex(Map> actualIndexMutationMap) throws IOException { - if (actualIndexMutationMap.size() == 0) { - return; + @Override + public boolean next(List results) throws IOException { + Map> indexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + Cell lastCell = null; + int rowCount = 0; + int indexMutationCount = 0; + region.startRegionOperation(); + RegionScanner localScanner = null; + try { + localScanner = getLocalScanner(); + if (localScanner == null) { + return false; + } + synchronized (localScanner) { + if (!shouldVerify()) { + skipped = true; + return false; } - Set dataRowKeys = getDataRowKeys(actualIndexMutationMap); - List> setList = getPerTaskDataRowKeys((TreeSet) dataRowKeys, - regionEndKeys, rowCountPerTask); - List>> indexMutationMapList = getPerTaskIndexMutationMap(actualIndexMutationMap, setList); - int taskCount = setList.size(); - TaskBatch tasks = new TaskBatch<>(taskCount); - List verificationResultList = new ArrayList<>(taskCount); - for (int i = 0; i < taskCount; i++) { - IndexToolVerificationResult perTaskVerificationResult = new IndexToolVerificationResult(scan); - verificationResultList.add(perTaskVerificationResult); - addRepairAndOrVerifyTask(tasks, setList.get(i), indexMutationMapList.get(i), perTaskVerificationResult); + do { + /* + * If region is closing and there are large number of rows being verified/rebuilt with + * IndexTool, not having this check will impact/delay the region closing -- affecting the + * availability as this method holds the read lock on the region. + */ + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + List row = new ArrayList(); + hasMore = localScanner.nextRaw(row); + if (!row.isEmpty()) { + lastCell = row.get(0); // lastCell is any cell from the last visited row + if (isDummy(row)) { + break; + } + indexMutationCount += populateIndexMutationFromIndexRow(row, indexMutationMap); + rowCount++; + } + } while (hasMore && indexMutationCount < pageSizeInRows); + if (!indexMutationMap.isEmpty()) { + verifyAndOrRepairIndex(indexMutationMap); } - submitTasks(tasks); if (verify) { - for (IndexToolVerificationResult result : verificationResultList) { - verificationResult.add(result); - } + verificationResult + .setScannedDataRowCount(verificationResult.getScannedDataRowCount() + rowCount); } + } + } catch (Throwable e) { + LOGGER.error("Exception in IndexRepairRegionScanner for region " + + region.getRegionInfo().getRegionNameAsString(), e); + throw e; + } finally { + region.closeRegionOperation(); + if (localScanner != null && localScanner != innerScanner) { + localScanner.close(); + } } - private int populateIndexMutationFromIndexRow(List row, Map> indexMutationMap) - throws IOException { - Put put = null; - Delete del = null; - for (Cell cell : row) { - if (cell.getType() == Cell.Type.Put) { - if (put == null) { - put = new Put(CellUtil.cloneRow(cell)); - } - put.add(cell); - } else { - if (del == null) { - del = new Delete(CellUtil.cloneRow(cell)); - } - del.add(cell); - } - } - byte[] indexRowKey; - if (put != null) { - indexRowKey = put.getRow(); - } else if (del != null) { - indexRowKey = del.getRow(); - } - else { - return 0; - } - List mutationList = getMutationsWithSameTS(put, del, MUTATION_TS_DESC_COMPARATOR); - indexMutationMap.put(indexRowKey, mutationList); - return mutationList.size(); + if (minTimestamp != 0) { + nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); } - - @Override - public boolean next(List results) throws IOException { - Map> indexMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - Cell lastCell = null; - int rowCount = 0; - int indexMutationCount = 0; - region.startRegionOperation(); - RegionScanner localScanner = null; - try { - localScanner = getLocalScanner(); - if (localScanner == null) { - return false; - } - synchronized (localScanner) { - if (!shouldVerify()) { - skipped = true; - return false; - } - do { - /* - If region is closing and there are large number of rows being verified/rebuilt with IndexTool, - not having this check will impact/delay the region closing -- affecting the availability - as this method holds the read lock on the region. - */ - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - List row = new ArrayList(); - hasMore = localScanner.nextRaw(row); - if (!row.isEmpty()) { - lastCell = row.get(0); // lastCell is any cell from the last visited row - if (isDummy(row)) { - break; - } - indexMutationCount += populateIndexMutationFromIndexRow(row, indexMutationMap); - rowCount++; - } - } while (hasMore && indexMutationCount < pageSizeInRows); - if (!indexMutationMap.isEmpty()) { - verifyAndOrRepairIndex(indexMutationMap); - } - if (verify) { - verificationResult.setScannedDataRowCount(verificationResult.getScannedDataRowCount() + rowCount); - } - } - } catch (Throwable e) { - LOGGER.error("Exception in IndexRepairRegionScanner for region " - + region.getRegionInfo().getRegionNameAsString(), e); - throw e; - } finally { - region.closeRegionOperation(); - if (localScanner!=null && localScanner!=innerScanner) { - localScanner.close(); - } - } - - if (minTimestamp != 0) { - nextStartKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(CellUtil.cloneRow(lastCell)); - } - byte[] rowCountBytes = PLong.INSTANCE.toBytes((long) rowCount); - final Cell aggKeyValue; - if (lastCell == null) { - byte[] rowKey; - byte[] startKey = scan.getStartRow().length > 0 ? scan.getStartRow() : - region.getRegionInfo().getStartKey(); - byte[] endKey = scan.getStopRow().length > 0 ? scan.getStopRow() : - region.getRegionInfo().getEndKey(); - final boolean isIncompatibleClient = - ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); - if (!isIncompatibleClient) { - rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); - if (rowKey == null) { - if (scan.includeStartRow()) { - rowKey = startKey; - } else if (scan.includeStopRow()) { - rowKey = endKey; - } else { - rowKey = HConstants.EMPTY_END_ROW; - } - } - } else { - rowKey = UNGROUPED_AGG_ROW_KEY; - } - aggKeyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); - } else { - aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); + byte[] rowCountBytes = PLong.INSTANCE.toBytes((long) rowCount); + final Cell aggKeyValue; + if (lastCell == null) { + byte[] rowKey; + byte[] startKey = + scan.getStartRow().length > 0 ? scan.getStartRow() : region.getRegionInfo().getStartKey(); + byte[] endKey = + scan.getStopRow().length > 0 ? scan.getStopRow() : region.getRegionInfo().getEndKey(); + final boolean isIncompatibleClient = + ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); + if (!isIncompatibleClient) { + rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); + if (rowKey == null) { + if (scan.includeStartRow()) { + rowKey = startKey; + } else if (scan.includeStopRow()) { + rowKey = endKey; + } else { + rowKey = HConstants.EMPTY_END_ROW; + } } - results.add(aggKeyValue); - return hasMore || hasMoreIncr; + } else { + rowKey = UNGROUPED_AGG_ROW_KEY; + } + aggKeyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, + AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); + } else { + aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } + results.add(aggKeyValue); + return hasMore || hasMoreIncr; + } - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result); - } + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexToolVerificationResult.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexToolVerificationResult.java index 8c8fb3bbcf1..6877bdbb20b 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexToolVerificationResult.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexToolVerificationResult.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +17,6 @@ */ package org.apache.phoenix.coprocessor; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.schema.types.PBoolean; - import static org.apache.phoenix.mapreduce.index.IndexVerificationResultRepository.AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES; import static org.apache.phoenix.mapreduce.index.IndexVerificationResultRepository.AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES; import static org.apache.phoenix.mapreduce.index.IndexVerificationResultRepository.AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES; @@ -51,599 +45,704 @@ import static org.apache.phoenix.mapreduce.index.IndexVerificationResultRepository.SCANNED_DATA_ROW_COUNT_BYTES; import static org.apache.phoenix.mapreduce.index.IndexVerificationResultRepository.SHOULD_RETRY_BYTES; -public class IndexToolVerificationResult { - - public void setScannedDataRowCount(long scannedDataRowCount) { - this.scannedDataRowCount = scannedDataRowCount; - } - - public void setShouldRetry(boolean shouldRetry) { - this.shouldRetry = shouldRetry; - } - - public void setRebuiltIndexRowCount(long rebuiltIndexRowCount) { - this.rebuiltIndexRowCount = rebuiltIndexRowCount; - } - - public PhaseResult getBefore() { - return before; - } - - public void setBefore(PhaseResult before) { - this.before = before; - } - - public PhaseResult getAfter() { - return after; - } - - public void setAfter(PhaseResult after) { - this.after = after; - } - - public byte[] getStartRow() { - return startRow; - } - - public byte[] getStopRow() { - return stopRow; - } - - public long getScanMaxTs() { - return scanMaxTs; - } - - public IndexToolVerificationResult(long scanMaxTs) { - this.scanMaxTs = scanMaxTs; - } +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.schema.types.PBoolean; - public IndexToolVerificationResult(byte[] startRow, byte[] stopRow, long scanMaxTs) { - this.setStartRow(startRow); - this.setStopRow(stopRow); - this.scanMaxTs = scanMaxTs; - } +public class IndexToolVerificationResult { - public IndexToolVerificationResult(Scan scan) { - this.setStartRow(scan.getStartRow()); - this.setStopRow(scan.getStopRow()); - this.scanMaxTs = scan.getTimeRange().getMax(); + public void setScannedDataRowCount(long scannedDataRowCount) { + this.scannedDataRowCount = scannedDataRowCount; + } + + public void setShouldRetry(boolean shouldRetry) { + this.shouldRetry = shouldRetry; + } + + public void setRebuiltIndexRowCount(long rebuiltIndexRowCount) { + this.rebuiltIndexRowCount = rebuiltIndexRowCount; + } + + public PhaseResult getBefore() { + return before; + } + + public void setBefore(PhaseResult before) { + this.before = before; + } + + public PhaseResult getAfter() { + return after; + } + + public void setAfter(PhaseResult after) { + this.after = after; + } + + public byte[] getStartRow() { + return startRow; + } + + public byte[] getStopRow() { + return stopRow; + } + + public long getScanMaxTs() { + return scanMaxTs; + } + + public IndexToolVerificationResult(long scanMaxTs) { + this.scanMaxTs = scanMaxTs; + } + + public IndexToolVerificationResult(byte[] startRow, byte[] stopRow, long scanMaxTs) { + this.setStartRow(startRow); + this.setStopRow(stopRow); + this.scanMaxTs = scanMaxTs; + } + + public IndexToolVerificationResult(Scan scan) { + this.setStartRow(scan.getStartRow()); + this.setStopRow(scan.getStopRow()); + this.scanMaxTs = scan.getTimeRange().getMax(); + } + + public byte[] getRegion() { + return region; + } + + public void setStartRow(byte[] startRow) { + this.startRow = startRow; + } + + public void setStopRow(byte[] stopRow) { + this.stopRow = stopRow; + } + + public static class PhaseResult { + private long validIndexRowCount = 0; + private long expiredIndexRowCount = 0; + private long missingIndexRowCount = 0; + private long invalidIndexRowCount = 0; + private long beyondMaxLookBackMissingIndexRowCount = 0; + private long beyondMaxLookBackInvalidIndexRowCount = 0; + private long indexHasExtraCellsCount = 0; + private long indexHasMissingCellsCount = 0; + private long unverifiedIndexRowCount = 0; + private long oldIndexRowCount = 0; + private long unknownIndexRowCount = 0; + private long extraVerifiedIndexRowCount = 0; + private long extraUnverifiedIndexRowCount = 0; + + public void add(PhaseResult phaseResult) { + setBeyondMaxLookBackMissingIndexRowCount(getBeyondMaxLookBackMissingIndexRowCount() + + phaseResult.getBeyondMaxLookBackMissingIndexRowCount()); + setBeyondMaxLookBackInvalidIndexRowCount(getBeyondMaxLookBackInvalidIndexRowCount() + + phaseResult.getBeyondMaxLookBackInvalidIndexRowCount()); + setValidIndexRowCount(getValidIndexRowCount() + phaseResult.getValidIndexRowCount()); + setExpiredIndexRowCount(getExpiredIndexRowCount() + phaseResult.getExpiredIndexRowCount()); + setMissingIndexRowCount(getMissingIndexRowCount() + phaseResult.getMissingIndexRowCount()); + setInvalidIndexRowCount(getInvalidIndexRowCount() + phaseResult.getInvalidIndexRowCount()); + setIndexHasExtraCellsCount( + getIndexHasExtraCellsCount() + phaseResult.getIndexHasExtraCellsCount()); + setIndexHasMissingCellsCount( + getIndexHasMissingCellsCount() + phaseResult.getIndexHasMissingCellsCount()); + setUnverifiedIndexRowCount( + getUnverifiedIndexRowCount() + phaseResult.getUnverifiedIndexRowCount()); + setUnknownIndexRowCount(getUnknownIndexRowCount() + phaseResult.getUnknownIndexRowCount()); + setOldIndexRowCount(getOldIndexRowCount() + phaseResult.getOldIndexRowCount()); + setExtraVerifiedIndexRowCount( + getExtraVerifiedIndexRowCount() + phaseResult.getExtraVerifiedIndexRowCount()); + setExtraUnverifiedIndexRowCount( + getExtraUnverifiedIndexRowCount() + phaseResult.getExtraUnverifiedIndexRowCount()); + } + + public PhaseResult() { + } + + public PhaseResult(long validIndexRowCount, long expiredIndexRowCount, + long missingIndexRowCount, long invalidIndexRowCount, + long beyondMaxLookBackMissingIndexRowCount, long beyondMaxLookBackInvalidIndexRowCount, + long indexHasExtraCellsCount, long indexHasMissingCellsCount, long extraVerifiedIndexRowCount, + long extraUnverifiedIndexRowCount) { + this.setValidIndexRowCount(validIndexRowCount); + this.setExpiredIndexRowCount(expiredIndexRowCount); + this.setMissingIndexRowCount(missingIndexRowCount); + this.setInvalidIndexRowCount(invalidIndexRowCount); + this.setBeyondMaxLookBackInvalidIndexRowCount(beyondMaxLookBackInvalidIndexRowCount); + this.setBeyondMaxLookBackMissingIndexRowCount(beyondMaxLookBackMissingIndexRowCount); + this.setIndexHasExtraCellsCount(indexHasExtraCellsCount); + this.setIndexHasMissingCellsCount(indexHasMissingCellsCount); + this.setExtraVerifiedIndexRowCount(extraVerifiedIndexRowCount); + this.setExtraUnverifiedIndexRowCount(extraUnverifiedIndexRowCount); + } + + public long getTotalCount() { + return getValidIndexRowCount() + getExpiredIndexRowCount() + getMissingIndexRowCount() + + getInvalidIndexRowCount() + getBeyondMaxLookBackMissingIndexRowCount() + + getBeyondMaxLookBackInvalidIndexRowCount(); + } + + public long getIndexHasExtraCellsCount() { + return indexHasExtraCellsCount; + } + + public long getIndexHasMissingCellsCount() { + return indexHasMissingCellsCount; + } + + public long getTotalExtraIndexRowsCount() { + return getExtraVerifiedIndexRowCount() + getExtraUnverifiedIndexRowCount(); } - public byte[] getRegion() { - return region; - } + @Override + public String toString() { + return "PhaseResult{" + - public void setStartRow(byte[] startRow) { - this.startRow = startRow; + "validIndexRowCount=" + validIndexRowCount + ", expiredIndexRowCount=" + + expiredIndexRowCount + ", missingIndexRowCount=" + missingIndexRowCount + + ", invalidIndexRowCount=" + invalidIndexRowCount + + ", beyondMaxLookBackMissingIndexRowCount=" + getBeyondMaxLookBackMissingIndexRowCount() + + ", beyondMaxLookBackInvalidIndexRowCount=" + getBeyondMaxLookBackInvalidIndexRowCount() + + ", extraCellsOnIndexCount=" + indexHasExtraCellsCount + ", missingCellsOnIndexCount=" + + indexHasMissingCellsCount + ", unverifiedIndexRowCount=" + unverifiedIndexRowCount + + ", oldIndexRowCount=" + oldIndexRowCount + ", unknownIndexRowCount=" + + unknownIndexRowCount + ", extraVerifiedIndexRowCount=" + extraVerifiedIndexRowCount + + ", extraUnverifiedIndexRowCount=" + extraUnverifiedIndexRowCount + '}'; } - public void setStopRow(byte[] stopRow) { - this.stopRow = stopRow; + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (!(o instanceof PhaseResult)) { + return false; + } + PhaseResult pr = (PhaseResult) o; + + return this.expiredIndexRowCount == pr.expiredIndexRowCount + && this.validIndexRowCount == pr.validIndexRowCount + && this.invalidIndexRowCount == pr.invalidIndexRowCount + && this.missingIndexRowCount == pr.missingIndexRowCount + && this.beyondMaxLookBackInvalidIndexRowCount == pr.beyondMaxLookBackInvalidIndexRowCount + && this.beyondMaxLookBackMissingIndexRowCount == pr.beyondMaxLookBackMissingIndexRowCount + && this.indexHasMissingCellsCount == pr.indexHasMissingCellsCount + && this.indexHasExtraCellsCount == pr.indexHasExtraCellsCount + && this.oldIndexRowCount == pr.oldIndexRowCount + && this.unknownIndexRowCount == pr.unknownIndexRowCount + && this.extraVerifiedIndexRowCount == pr.extraVerifiedIndexRowCount + && this.extraUnverifiedIndexRowCount == pr.extraUnverifiedIndexRowCount; } - public static class PhaseResult { - private long validIndexRowCount = 0; - private long expiredIndexRowCount = 0; - private long missingIndexRowCount = 0; - private long invalidIndexRowCount = 0; - private long beyondMaxLookBackMissingIndexRowCount = 0; - private long beyondMaxLookBackInvalidIndexRowCount = 0; - private long indexHasExtraCellsCount = 0; - private long indexHasMissingCellsCount = 0; - private long unverifiedIndexRowCount = 0; - private long oldIndexRowCount = 0; - private long unknownIndexRowCount = 0; - private long extraVerifiedIndexRowCount = 0; - private long extraUnverifiedIndexRowCount = 0; - - public void add(PhaseResult phaseResult) { - setBeyondMaxLookBackMissingIndexRowCount(getBeyondMaxLookBackMissingIndexRowCount() + - phaseResult.getBeyondMaxLookBackMissingIndexRowCount()); - setBeyondMaxLookBackInvalidIndexRowCount(getBeyondMaxLookBackInvalidIndexRowCount() + - phaseResult.getBeyondMaxLookBackInvalidIndexRowCount()); - setValidIndexRowCount(getValidIndexRowCount() + phaseResult.getValidIndexRowCount()); - setExpiredIndexRowCount(getExpiredIndexRowCount() + phaseResult.getExpiredIndexRowCount()); - setMissingIndexRowCount(getMissingIndexRowCount() + phaseResult.getMissingIndexRowCount()); - setInvalidIndexRowCount(getInvalidIndexRowCount() + phaseResult.getInvalidIndexRowCount()); - setIndexHasExtraCellsCount(getIndexHasExtraCellsCount() + phaseResult.getIndexHasExtraCellsCount()); - setIndexHasMissingCellsCount(getIndexHasMissingCellsCount() + phaseResult.getIndexHasMissingCellsCount()); - setUnverifiedIndexRowCount(getUnverifiedIndexRowCount() + phaseResult.getUnverifiedIndexRowCount()); - setUnknownIndexRowCount(getUnknownIndexRowCount() + phaseResult.getUnknownIndexRowCount()); - setOldIndexRowCount(getOldIndexRowCount() + phaseResult.getOldIndexRowCount()); - setExtraVerifiedIndexRowCount(getExtraVerifiedIndexRowCount() + - phaseResult.getExtraVerifiedIndexRowCount()); - setExtraUnverifiedIndexRowCount(getExtraUnverifiedIndexRowCount() + - phaseResult.getExtraUnverifiedIndexRowCount()); - } - - public PhaseResult() { - } - - public PhaseResult(long validIndexRowCount, long expiredIndexRowCount, - long missingIndexRowCount, long invalidIndexRowCount, - long beyondMaxLookBackMissingIndexRowCount, - long beyondMaxLookBackInvalidIndexRowCount, - long indexHasExtraCellsCount, long indexHasMissingCellsCount, - long extraVerifiedIndexRowCount, long extraUnverifiedIndexRowCount) { - this.setValidIndexRowCount(validIndexRowCount); - this.setExpiredIndexRowCount(expiredIndexRowCount); - this.setMissingIndexRowCount(missingIndexRowCount); - this.setInvalidIndexRowCount(invalidIndexRowCount); - this.setBeyondMaxLookBackInvalidIndexRowCount(beyondMaxLookBackInvalidIndexRowCount); - this.setBeyondMaxLookBackMissingIndexRowCount(beyondMaxLookBackMissingIndexRowCount); - this.setIndexHasExtraCellsCount(indexHasExtraCellsCount); - this.setIndexHasMissingCellsCount(indexHasMissingCellsCount); - this.setExtraVerifiedIndexRowCount(extraVerifiedIndexRowCount); - this.setExtraUnverifiedIndexRowCount(extraUnverifiedIndexRowCount); - } - - - public long getTotalCount() { - return getValidIndexRowCount() + getExpiredIndexRowCount() + getMissingIndexRowCount() + getInvalidIndexRowCount() - + getBeyondMaxLookBackMissingIndexRowCount() + getBeyondMaxLookBackInvalidIndexRowCount(); - } - - public long getIndexHasExtraCellsCount() { - return indexHasExtraCellsCount; - } - - public long getIndexHasMissingCellsCount() { - return indexHasMissingCellsCount; - } - - public long getTotalExtraIndexRowsCount() { - return getExtraVerifiedIndexRowCount() + getExtraUnverifiedIndexRowCount() ; - } - - @Override - public String toString() { - return "PhaseResult{" + - - "validIndexRowCount=" + validIndexRowCount + - ", expiredIndexRowCount=" + expiredIndexRowCount + - ", missingIndexRowCount=" + missingIndexRowCount + - ", invalidIndexRowCount=" + invalidIndexRowCount + - ", beyondMaxLookBackMissingIndexRowCount=" + getBeyondMaxLookBackMissingIndexRowCount() + - ", beyondMaxLookBackInvalidIndexRowCount=" + getBeyondMaxLookBackInvalidIndexRowCount() + - ", extraCellsOnIndexCount=" + indexHasExtraCellsCount + - ", missingCellsOnIndexCount=" + indexHasMissingCellsCount + - ", unverifiedIndexRowCount=" + unverifiedIndexRowCount + - ", oldIndexRowCount=" + oldIndexRowCount + - ", unknownIndexRowCount=" + unknownIndexRowCount + - ", extraVerifiedIndexRowCount=" + extraVerifiedIndexRowCount + - ", extraUnverifiedIndexRowCount=" + extraUnverifiedIndexRowCount + - '}'; - } - - @Override - public boolean equals(Object o) { - if (o == null) { - return false; - } - if (!(o instanceof PhaseResult)) { - return false; - } - PhaseResult pr = (PhaseResult) o; - - return this.expiredIndexRowCount == pr.expiredIndexRowCount - && this.validIndexRowCount == pr.validIndexRowCount - && this.invalidIndexRowCount == pr.invalidIndexRowCount - && this.missingIndexRowCount == pr.missingIndexRowCount - && this.beyondMaxLookBackInvalidIndexRowCount == pr.beyondMaxLookBackInvalidIndexRowCount - && this.beyondMaxLookBackMissingIndexRowCount== pr.beyondMaxLookBackMissingIndexRowCount - && this.indexHasMissingCellsCount == pr.indexHasMissingCellsCount - && this.indexHasExtraCellsCount == pr.indexHasExtraCellsCount - && this.oldIndexRowCount == pr.oldIndexRowCount - && this.unknownIndexRowCount == pr.unknownIndexRowCount - && this.extraVerifiedIndexRowCount == pr.extraVerifiedIndexRowCount - && this.extraUnverifiedIndexRowCount == pr.extraUnverifiedIndexRowCount; - } - - @Override - public int hashCode() { - long result = 17; - result = 31 * result + getExpiredIndexRowCount(); - result = 31 * result + getValidIndexRowCount(); - result = 31 * result + getMissingIndexRowCount(); - result = 31 * result + getInvalidIndexRowCount(); - result = 31 * result + getBeyondMaxLookBackMissingIndexRowCount(); - result = 31 * result + getBeyondMaxLookBackInvalidIndexRowCount(); - result = 31 * result + getIndexHasMissingCellsCount(); - result = 31 * result + getIndexHasExtraCellsCount(); - result = 31 * result + getUnverifiedIndexRowCount(); - result = 31 * result + getOldIndexRowCount(); - result = 31 * result + getUnknownIndexRowCount(); - result = 31 * result + getExtraVerifiedIndexRowCount(); - result = 31 * result + getExtraUnverifiedIndexRowCount(); - return (int) result; - } - - public long getValidIndexRowCount() { - return validIndexRowCount; - } - - public void setValidIndexRowCount(long validIndexRowCount) { - this.validIndexRowCount = validIndexRowCount; - } - - public long getExpiredIndexRowCount() { - return expiredIndexRowCount; - } - - public void setExpiredIndexRowCount(long expiredIndexRowCount) { - this.expiredIndexRowCount = expiredIndexRowCount; - } - - public long getMissingIndexRowCount() { - return missingIndexRowCount; - } - - public void setMissingIndexRowCount(long missingIndexRowCount) { - this.missingIndexRowCount = missingIndexRowCount; - } - - public long getInvalidIndexRowCount() { - return invalidIndexRowCount; - } - - public void setInvalidIndexRowCount(long invalidIndexRowCount) { - this.invalidIndexRowCount = invalidIndexRowCount; - } - - public long getBeyondMaxLookBackMissingIndexRowCount() { - return beyondMaxLookBackMissingIndexRowCount; - } - - public void setBeyondMaxLookBackMissingIndexRowCount(long beyondMaxLookBackMissingIndexRowCount) { - this.beyondMaxLookBackMissingIndexRowCount = beyondMaxLookBackMissingIndexRowCount; - } - - public long getBeyondMaxLookBackInvalidIndexRowCount() { - return beyondMaxLookBackInvalidIndexRowCount; - } - - public void setBeyondMaxLookBackInvalidIndexRowCount(long beyondMaxLookBackInvalidIndexRowCount) { - this.beyondMaxLookBackInvalidIndexRowCount = beyondMaxLookBackInvalidIndexRowCount; - } - - public void setIndexHasMissingCellsCount(long indexHasMissingCellsCount) { - this.indexHasMissingCellsCount = indexHasMissingCellsCount; - } - - public void setIndexHasExtraCellsCount(long indexHasExtraCellsCount) { - this.indexHasExtraCellsCount = indexHasExtraCellsCount; - } - - public long getUnverifiedIndexRowCount() { - return unverifiedIndexRowCount; - } - - public void setUnverifiedIndexRowCount(long unverifiedIndexRowCount) { - this.unverifiedIndexRowCount = unverifiedIndexRowCount; - } - - public long getOldIndexRowCount() { - return oldIndexRowCount; - } - - public void setOldIndexRowCount(long oldIndexRowCount) { - this.oldIndexRowCount = oldIndexRowCount; - } - - public long getUnknownIndexRowCount() { - return unknownIndexRowCount; - } - - public void setUnknownIndexRowCount(long unknownIndexRowCount) { - this.unknownIndexRowCount = unknownIndexRowCount; - } - - public long getExtraVerifiedIndexRowCount() { return extraVerifiedIndexRowCount; } - - public void setExtraVerifiedIndexRowCount(long extraVerifiedIndexRowCount) { - this.extraVerifiedIndexRowCount = extraVerifiedIndexRowCount; - } - - public long getExtraUnverifiedIndexRowCount() { return extraUnverifiedIndexRowCount; } - - public void setExtraUnverifiedIndexRowCount(long extraUnverifiedIndexRowCount) { - this.extraUnverifiedIndexRowCount = extraUnverifiedIndexRowCount; - } - } - - private long scannedDataRowCount = 0; - private long rebuiltIndexRowCount = 0; - private byte[] startRow; - private byte[] stopRow; - private long scanMaxTs; - private byte[] region; - private boolean shouldRetry = false; - private PhaseResult before = new PhaseResult(); - private PhaseResult after = new PhaseResult(); - @Override - public String toString() { - return "VerificationResult{" + - "scannedDataRowCount=" + getScannedDataRowCount() + - ", rebuiltIndexRowCount=" + getRebuiltIndexRowCount() + - ", before=" + getBefore() + - ", after=" + getAfter() + - '}'; + public int hashCode() { + long result = 17; + result = 31 * result + getExpiredIndexRowCount(); + result = 31 * result + getValidIndexRowCount(); + result = 31 * result + getMissingIndexRowCount(); + result = 31 * result + getInvalidIndexRowCount(); + result = 31 * result + getBeyondMaxLookBackMissingIndexRowCount(); + result = 31 * result + getBeyondMaxLookBackInvalidIndexRowCount(); + result = 31 * result + getIndexHasMissingCellsCount(); + result = 31 * result + getIndexHasExtraCellsCount(); + result = 31 * result + getUnverifiedIndexRowCount(); + result = 31 * result + getOldIndexRowCount(); + result = 31 * result + getUnknownIndexRowCount(); + result = 31 * result + getExtraVerifiedIndexRowCount(); + result = 31 * result + getExtraUnverifiedIndexRowCount(); + return (int) result; } - public long getScannedDataRowCount() { - return scannedDataRowCount; + public long getValidIndexRowCount() { + return validIndexRowCount; } - public boolean getShouldRetry() { - return shouldRetry; + public void setValidIndexRowCount(long validIndexRowCount) { + this.validIndexRowCount = validIndexRowCount; } - public long getRebuiltIndexRowCount() { - return rebuiltIndexRowCount; + public long getExpiredIndexRowCount() { + return expiredIndexRowCount; } - public long getBeforeRebuildValidIndexRowCount() { - return getBefore().getValidIndexRowCount(); + public void setExpiredIndexRowCount(long expiredIndexRowCount) { + this.expiredIndexRowCount = expiredIndexRowCount; } - public long getBeforeRebuildExpiredIndexRowCount() { - return getBefore().getExpiredIndexRowCount(); + public long getMissingIndexRowCount() { + return missingIndexRowCount; } - public long getBeforeRebuildInvalidIndexRowCount() { - return getBefore().getInvalidIndexRowCount(); + public void setMissingIndexRowCount(long missingIndexRowCount) { + this.missingIndexRowCount = missingIndexRowCount; } - public long getBeforeRebuildUnverifiedIndexRowCount() { - return getBefore().getUnverifiedIndexRowCount(); + public long getInvalidIndexRowCount() { + return invalidIndexRowCount; } - public long getBeforeRebuildOldIndexRowCount() { - return getBefore().getOldIndexRowCount(); + public void setInvalidIndexRowCount(long invalidIndexRowCount) { + this.invalidIndexRowCount = invalidIndexRowCount; } - public long getBeforeRebuildUnknownIndexRowCount() { - return getBefore().getUnknownIndexRowCount(); + public long getBeyondMaxLookBackMissingIndexRowCount() { + return beyondMaxLookBackMissingIndexRowCount; } - public long getBeforeRebuildBeyondMaxLookBackMissingIndexRowCount() { - return before.getBeyondMaxLookBackMissingIndexRowCount(); + public void + setBeyondMaxLookBackMissingIndexRowCount(long beyondMaxLookBackMissingIndexRowCount) { + this.beyondMaxLookBackMissingIndexRowCount = beyondMaxLookBackMissingIndexRowCount; } - public long getBeforeRebuildBeyondMaxLookBackInvalidIndexRowCount() { - return before.getBeyondMaxLookBackInvalidIndexRowCount(); + public long getBeyondMaxLookBackInvalidIndexRowCount() { + return beyondMaxLookBackInvalidIndexRowCount; } - public long getBeforeRebuildMissingIndexRowCount() { - return getBefore().getMissingIndexRowCount(); + public void + setBeyondMaxLookBackInvalidIndexRowCount(long beyondMaxLookBackInvalidIndexRowCount) { + this.beyondMaxLookBackInvalidIndexRowCount = beyondMaxLookBackInvalidIndexRowCount; } - public long getBeforeIndexHasMissingCellsCount() {return getBefore().getIndexHasMissingCellsCount(); } - - public long getBeforeIndexHasExtraCellsCount() {return getBefore().getIndexHasExtraCellsCount(); } - - public long getBeforeRepairExtraVerifiedIndexRowCount() { return getBefore().getExtraVerifiedIndexRowCount(); } - - public long getBeforeRepairExtraUnverifiedIndexRowCount() { return getBefore().getExtraUnverifiedIndexRowCount(); } - - public long getAfterRebuildValidIndexRowCount() { - return getAfter().getValidIndexRowCount(); + public void setIndexHasMissingCellsCount(long indexHasMissingCellsCount) { + this.indexHasMissingCellsCount = indexHasMissingCellsCount; } - public long getAfterRebuildExpiredIndexRowCount() { - return getAfter().getExpiredIndexRowCount(); + public void setIndexHasExtraCellsCount(long indexHasExtraCellsCount) { + this.indexHasExtraCellsCount = indexHasExtraCellsCount; } - public long getAfterRebuildInvalidIndexRowCount() { - return getAfter().getInvalidIndexRowCount(); + public long getUnverifiedIndexRowCount() { + return unverifiedIndexRowCount; } - public long getAfterRebuildMissingIndexRowCount() { - return getAfter().getMissingIndexRowCount(); + public void setUnverifiedIndexRowCount(long unverifiedIndexRowCount) { + this.unverifiedIndexRowCount = unverifiedIndexRowCount; } - public long getAfterRebuildBeyondMaxLookBackMissingIndexRowCount() { - return after.getBeyondMaxLookBackMissingIndexRowCount(); + public long getOldIndexRowCount() { + return oldIndexRowCount; } - public long getAfterRebuildBeyondMaxLookBackInvalidIndexRowCount() { - return after.getBeyondMaxLookBackInvalidIndexRowCount(); + public void setOldIndexRowCount(long oldIndexRowCount) { + this.oldIndexRowCount = oldIndexRowCount; } - public long getAfterIndexHasMissingCellsCount() { return getAfter().getIndexHasMissingCellsCount(); } - - public long getAfterIndexHasExtraCellsCount() { return getAfter().getIndexHasExtraCellsCount(); } - - public long getAfterRepairExtraVerifiedIndexRowCount() { return getAfter().getExtraVerifiedIndexRowCount(); } - - public long getAfterRepairExtraUnverifiedIndexRowCount() { return getAfter().getExtraUnverifiedIndexRowCount(); } - - private void addScannedDataRowCount(long count) { - this.setScannedDataRowCount(this.getScannedDataRowCount() + count); + public long getUnknownIndexRowCount() { + return unknownIndexRowCount; } - private void addRebuiltIndexRowCount(long count) { - this.setRebuiltIndexRowCount(this.getRebuiltIndexRowCount() + count); + public void setUnknownIndexRowCount(long unknownIndexRowCount) { + this.unknownIndexRowCount = unknownIndexRowCount; } - private void addBeforeRebuildValidIndexRowCount(long count) { - getBefore().setValidIndexRowCount(getBefore().getValidIndexRowCount() + count); + public long getExtraVerifiedIndexRowCount() { + return extraVerifiedIndexRowCount; } - private void addBeforeRebuildExpiredIndexRowCount(long count) { - getBefore().setExpiredIndexRowCount(getBefore().getExpiredIndexRowCount() + count); + public void setExtraVerifiedIndexRowCount(long extraVerifiedIndexRowCount) { + this.extraVerifiedIndexRowCount = extraVerifiedIndexRowCount; } - private void addBeforeRebuildMissingIndexRowCount(long count) { - getBefore().setMissingIndexRowCount(getBefore().getMissingIndexRowCount() + count); + public long getExtraUnverifiedIndexRowCount() { + return extraUnverifiedIndexRowCount; } - private void addBeforeRebuildInvalidIndexRowCount(long count) { - getBefore().setInvalidIndexRowCount(getBefore().getInvalidIndexRowCount() + count); + public void setExtraUnverifiedIndexRowCount(long extraUnverifiedIndexRowCount) { + this.extraUnverifiedIndexRowCount = extraUnverifiedIndexRowCount; } + } - private void addBeforeRebuildBeyondMaxLookBackMissingIndexRowCount(long count) { - before.setBeyondMaxLookBackMissingIndexRowCount(before.getBeyondMaxLookBackMissingIndexRowCount() + count); - } + private long scannedDataRowCount = 0; + private long rebuiltIndexRowCount = 0; + private byte[] startRow; + private byte[] stopRow; + private long scanMaxTs; + private byte[] region; + private boolean shouldRetry = false; + private PhaseResult before = new PhaseResult(); + private PhaseResult after = new PhaseResult(); - private void addBeforeRebuildBeyondMaxLookBackInvalidIndexRowCount(long count) { - before.setBeyondMaxLookBackInvalidIndexRowCount(before.getBeyondMaxLookBackInvalidIndexRowCount() + count); - } - public void addBeforeIndexHasMissingCellsCount(long count) { - getBefore().setIndexHasMissingCellsCount(getBefore().getIndexHasMissingCellsCount() + count); - } + @Override + public String toString() { + return "VerificationResult{" + "scannedDataRowCount=" + getScannedDataRowCount() + + ", rebuiltIndexRowCount=" + getRebuiltIndexRowCount() + ", before=" + getBefore() + + ", after=" + getAfter() + '}'; + } - public void addBeforeIndexHasExtraCellsCount(long count) { - getBefore().setIndexHasExtraCellsCount(getBefore().getIndexHasExtraCellsCount() + count); - } + public long getScannedDataRowCount() { + return scannedDataRowCount; + } - public void addBeforeUnverifiedIndexRowCount(long count) { - getBefore().setUnverifiedIndexRowCount(getBefore().getUnverifiedIndexRowCount() + count); - } + public boolean getShouldRetry() { + return shouldRetry; + } - public void addBeforeOldIndexRowCount(long count) { - getBefore().setOldIndexRowCount(getBefore().getOldIndexRowCount() + count); - } + public long getRebuiltIndexRowCount() { + return rebuiltIndexRowCount; + } - public void addBeforeUnknownIndexRowCount(long count) { - getBefore().setUnknownIndexRowCount(getBefore().getUnknownIndexRowCount() + count); - } + public long getBeforeRebuildValidIndexRowCount() { + return getBefore().getValidIndexRowCount(); + } - public void addBeforeRepairExtraVerifiedIndexRowCount(long count) { - getBefore().setExtraVerifiedIndexRowCount(getBefore().getExtraVerifiedIndexRowCount() + count); - } + public long getBeforeRebuildExpiredIndexRowCount() { + return getBefore().getExpiredIndexRowCount(); + } - public void addBeforeRepairExtraUnverifiedIndexRowCount(long count) { - getBefore().setExtraUnverifiedIndexRowCount(getBefore().getExtraUnverifiedIndexRowCount() + count); - } + public long getBeforeRebuildInvalidIndexRowCount() { + return getBefore().getInvalidIndexRowCount(); + } - private void addAfterRebuildValidIndexRowCount(long count) { - getAfter().setValidIndexRowCount(getAfter().getValidIndexRowCount() + count); - } + public long getBeforeRebuildUnverifiedIndexRowCount() { + return getBefore().getUnverifiedIndexRowCount(); + } - private void addAfterRebuildExpiredIndexRowCount(long count) { - getAfter().setExpiredIndexRowCount(getAfter().getExpiredIndexRowCount() + count); - } + public long getBeforeRebuildOldIndexRowCount() { + return getBefore().getOldIndexRowCount(); + } - private void addAfterRebuildMissingIndexRowCount(long count) { - getAfter().setMissingIndexRowCount(getAfter().getMissingIndexRowCount() + count); - } + public long getBeforeRebuildUnknownIndexRowCount() { + return getBefore().getUnknownIndexRowCount(); + } - private void addAfterRebuildInvalidIndexRowCount(long count) { - getAfter().setInvalidIndexRowCount(getAfter().getInvalidIndexRowCount() + count); - } + public long getBeforeRebuildBeyondMaxLookBackMissingIndexRowCount() { + return before.getBeyondMaxLookBackMissingIndexRowCount(); + } - private void addAfterRebuildBeyondMaxLookBackMissingIndexRowCount(long count) { - after.setBeyondMaxLookBackMissingIndexRowCount(after.getBeyondMaxLookBackMissingIndexRowCount() + count); - } + public long getBeforeRebuildBeyondMaxLookBackInvalidIndexRowCount() { + return before.getBeyondMaxLookBackInvalidIndexRowCount(); + } - private void addAfterRebuildBeyondMaxLookBackInvalidIndexRowCount(long count) { - after.setBeyondMaxLookBackInvalidIndexRowCount(after.getBeyondMaxLookBackInvalidIndexRowCount() + count); - } + public long getBeforeRebuildMissingIndexRowCount() { + return getBefore().getMissingIndexRowCount(); + } - public void addAfterIndexHasMissingCellsCount(long count) { - getAfter().setIndexHasMissingCellsCount(getAfter().getIndexHasMissingCellsCount() + count); - } + public long getBeforeIndexHasMissingCellsCount() { + return getBefore().getIndexHasMissingCellsCount(); + } - public void addAfterIndexHasExtraCellsCount(long count) { - getAfter().setIndexHasExtraCellsCount(getAfter().getIndexHasExtraCellsCount() + count); - } + public long getBeforeIndexHasExtraCellsCount() { + return getBefore().getIndexHasExtraCellsCount(); + } - public void addAfterRepairExtraVerifiedIndexRowCount(long count) { - getAfter().setExtraVerifiedIndexRowCount(getAfter().getExtraVerifiedIndexRowCount() + count); - } + public long getBeforeRepairExtraVerifiedIndexRowCount() { + return getBefore().getExtraVerifiedIndexRowCount(); + } - public void addAfterRepairExtraUnverifiedIndexRowCount(long count) { - getAfter().setExtraUnverifiedIndexRowCount(getAfter().getExtraUnverifiedIndexRowCount() + count); - } + public long getBeforeRepairExtraUnverifiedIndexRowCount() { + return getBefore().getExtraUnverifiedIndexRowCount(); + } - private static boolean isAfterRebuildInvalidIndexRowCount(Cell cell) { - if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES, 0, - AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES.length) == 0) { - return true; - } - return false; - } + public long getAfterRebuildValidIndexRowCount() { + return getAfter().getValidIndexRowCount(); + } - private long getValue(Cell cell) { - return Long.parseLong(Bytes.toString(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength())); - } - - public void update(Cell cell) { - if (CellUtil - .matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, SCANNED_DATA_ROW_COUNT_BYTES)) { - addScannedDataRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, REBUILT_INDEX_ROW_COUNT_BYTES)) { - addRebuiltIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, SHOULD_RETRY_BYTES)) { - setShouldRetry((boolean) PBoolean.INSTANCE.toObject(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength())); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_VALID_INDEX_ROW_COUNT_BYTES)) { - addBeforeRebuildValidIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES)) { - addBeforeRebuildExpiredIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES)) { - addBeforeRebuildMissingIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES)) { - addBeforeRebuildInvalidIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES)) { - addBeforeRebuildBeyondMaxLookBackMissingIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES)) { - addBeforeRebuildBeyondMaxLookBackInvalidIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES)) { - addBeforeIndexHasExtraCellsCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES)) { - addBeforeIndexHasMissingCellsCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT_BYTES)) { - addBeforeUnverifiedIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_OLD_INDEX_ROW_COUNT_BYTES)) { - addBeforeOldIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT_BYTES)) { - addBeforeUnknownIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES)) { - addBeforeRepairExtraVerifiedIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES)) { - addBeforeRepairExtraUnverifiedIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_VALID_INDEX_ROW_COUNT_BYTES)) { - addAfterRebuildValidIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES)) { - addAfterRebuildExpiredIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES)) { - addAfterRebuildMissingIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES)) { - addAfterRebuildInvalidIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES)) { - addAfterRebuildBeyondMaxLookBackMissingIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES)) { - addAfterRebuildBeyondMaxLookBackInvalidIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES)) { - addAfterIndexHasExtraCellsCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES)) { - addAfterIndexHasMissingCellsCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES)) { - addAfterRepairExtraVerifiedIndexRowCount(getValue(cell)); - } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES)) { - addAfterRepairExtraUnverifiedIndexRowCount(getValue(cell)); - } - } - - public boolean isVerificationFailed() { - //we don't want to count max look back failures alone as failing an index rebuild job - //so we omit them from the below calculation. - if (getAfter().getInvalidIndexRowCount() + getAfter().getMissingIndexRowCount() - + getAfter().getExtraVerifiedIndexRowCount() > 0) { - return true; - } - return false; - } + public long getAfterRebuildExpiredIndexRowCount() { + return getAfter().getExpiredIndexRowCount(); + } - public void add(IndexToolVerificationResult verificationResult) { - setScannedDataRowCount(getScannedDataRowCount() + verificationResult.getScannedDataRowCount()); - setRebuiltIndexRowCount(getRebuiltIndexRowCount() + verificationResult.getRebuiltIndexRowCount()); - getBefore().add(verificationResult.getBefore()); - getAfter().add(verificationResult.getAfter()); - } + public long getAfterRebuildInvalidIndexRowCount() { + return getAfter().getInvalidIndexRowCount(); + } + + public long getAfterRebuildMissingIndexRowCount() { + return getAfter().getMissingIndexRowCount(); + } + + public long getAfterRebuildBeyondMaxLookBackMissingIndexRowCount() { + return after.getBeyondMaxLookBackMissingIndexRowCount(); + } + + public long getAfterRebuildBeyondMaxLookBackInvalidIndexRowCount() { + return after.getBeyondMaxLookBackInvalidIndexRowCount(); + } + + public long getAfterIndexHasMissingCellsCount() { + return getAfter().getIndexHasMissingCellsCount(); + } + + public long getAfterIndexHasExtraCellsCount() { + return getAfter().getIndexHasExtraCellsCount(); + } + + public long getAfterRepairExtraVerifiedIndexRowCount() { + return getAfter().getExtraVerifiedIndexRowCount(); + } + + public long getAfterRepairExtraUnverifiedIndexRowCount() { + return getAfter().getExtraUnverifiedIndexRowCount(); + } + + private void addScannedDataRowCount(long count) { + this.setScannedDataRowCount(this.getScannedDataRowCount() + count); + } + + private void addRebuiltIndexRowCount(long count) { + this.setRebuiltIndexRowCount(this.getRebuiltIndexRowCount() + count); + } + + private void addBeforeRebuildValidIndexRowCount(long count) { + getBefore().setValidIndexRowCount(getBefore().getValidIndexRowCount() + count); + } + + private void addBeforeRebuildExpiredIndexRowCount(long count) { + getBefore().setExpiredIndexRowCount(getBefore().getExpiredIndexRowCount() + count); + } + + private void addBeforeRebuildMissingIndexRowCount(long count) { + getBefore().setMissingIndexRowCount(getBefore().getMissingIndexRowCount() + count); + } + + private void addBeforeRebuildInvalidIndexRowCount(long count) { + getBefore().setInvalidIndexRowCount(getBefore().getInvalidIndexRowCount() + count); + } + + private void addBeforeRebuildBeyondMaxLookBackMissingIndexRowCount(long count) { + before.setBeyondMaxLookBackMissingIndexRowCount( + before.getBeyondMaxLookBackMissingIndexRowCount() + count); + } + + private void addBeforeRebuildBeyondMaxLookBackInvalidIndexRowCount(long count) { + before.setBeyondMaxLookBackInvalidIndexRowCount( + before.getBeyondMaxLookBackInvalidIndexRowCount() + count); + } + + public void addBeforeIndexHasMissingCellsCount(long count) { + getBefore().setIndexHasMissingCellsCount(getBefore().getIndexHasMissingCellsCount() + count); + } + + public void addBeforeIndexHasExtraCellsCount(long count) { + getBefore().setIndexHasExtraCellsCount(getBefore().getIndexHasExtraCellsCount() + count); + } + + public void addBeforeUnverifiedIndexRowCount(long count) { + getBefore().setUnverifiedIndexRowCount(getBefore().getUnverifiedIndexRowCount() + count); + } + + public void addBeforeOldIndexRowCount(long count) { + getBefore().setOldIndexRowCount(getBefore().getOldIndexRowCount() + count); + } + + public void addBeforeUnknownIndexRowCount(long count) { + getBefore().setUnknownIndexRowCount(getBefore().getUnknownIndexRowCount() + count); + } + + public void addBeforeRepairExtraVerifiedIndexRowCount(long count) { + getBefore().setExtraVerifiedIndexRowCount(getBefore().getExtraVerifiedIndexRowCount() + count); + } + + public void addBeforeRepairExtraUnverifiedIndexRowCount(long count) { + getBefore() + .setExtraUnverifiedIndexRowCount(getBefore().getExtraUnverifiedIndexRowCount() + count); + } + + private void addAfterRebuildValidIndexRowCount(long count) { + getAfter().setValidIndexRowCount(getAfter().getValidIndexRowCount() + count); + } + + private void addAfterRebuildExpiredIndexRowCount(long count) { + getAfter().setExpiredIndexRowCount(getAfter().getExpiredIndexRowCount() + count); + } + + private void addAfterRebuildMissingIndexRowCount(long count) { + getAfter().setMissingIndexRowCount(getAfter().getMissingIndexRowCount() + count); + } + + private void addAfterRebuildInvalidIndexRowCount(long count) { + getAfter().setInvalidIndexRowCount(getAfter().getInvalidIndexRowCount() + count); + } + + private void addAfterRebuildBeyondMaxLookBackMissingIndexRowCount(long count) { + after.setBeyondMaxLookBackMissingIndexRowCount( + after.getBeyondMaxLookBackMissingIndexRowCount() + count); + } + + private void addAfterRebuildBeyondMaxLookBackInvalidIndexRowCount(long count) { + after.setBeyondMaxLookBackInvalidIndexRowCount( + after.getBeyondMaxLookBackInvalidIndexRowCount() + count); + } + + public void addAfterIndexHasMissingCellsCount(long count) { + getAfter().setIndexHasMissingCellsCount(getAfter().getIndexHasMissingCellsCount() + count); + } + + public void addAfterIndexHasExtraCellsCount(long count) { + getAfter().setIndexHasExtraCellsCount(getAfter().getIndexHasExtraCellsCount() + count); + } + + public void addAfterRepairExtraVerifiedIndexRowCount(long count) { + getAfter().setExtraVerifiedIndexRowCount(getAfter().getExtraVerifiedIndexRowCount() + count); + } + + public void addAfterRepairExtraUnverifiedIndexRowCount(long count) { + getAfter() + .setExtraUnverifiedIndexRowCount(getAfter().getExtraUnverifiedIndexRowCount() + count); + } + + private static boolean isAfterRebuildInvalidIndexRowCount(Cell cell) { + if ( + Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES, 0, + AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES.length) == 0 + ) { + return true; + } + return false; + } + + private long getValue(Cell cell) { + return Long.parseLong( + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + } + + public void update(Cell cell) { + if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, SCANNED_DATA_ROW_COUNT_BYTES)) { + addScannedDataRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, REBUILT_INDEX_ROW_COUNT_BYTES) + ) { + addRebuiltIndexRowCount(getValue(cell)); + } else if (CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, SHOULD_RETRY_BYTES)) { + setShouldRetry((boolean) PBoolean.INSTANCE.toObject(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_VALID_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeRebuildValidIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeRebuildExpiredIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeRebuildMissingIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeRebuildInvalidIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeRebuildBeyondMaxLookBackMissingIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeRebuildBeyondMaxLookBackInvalidIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES) + ) { + addBeforeIndexHasExtraCellsCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES) + ) { + addBeforeIndexHasMissingCellsCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeUnverifiedIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_OLD_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeOldIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeUnknownIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeRepairExtraVerifiedIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES) + ) { + addBeforeRepairExtraUnverifiedIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_VALID_INDEX_ROW_COUNT_BYTES) + ) { + addAfterRebuildValidIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES) + ) { + addAfterRebuildExpiredIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES) + ) { + addAfterRebuildMissingIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES) + ) { + addAfterRebuildInvalidIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES) + ) { + addAfterRebuildBeyondMaxLookBackMissingIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES) + ) { + addAfterRebuildBeyondMaxLookBackInvalidIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES) + ) { + addAfterIndexHasExtraCellsCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES) + ) { + addAfterIndexHasMissingCellsCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES) + ) { + addAfterRepairExtraVerifiedIndexRowCount(getValue(cell)); + } else if ( + CellUtil.matchingColumn(cell, RESULT_TABLE_COLUMN_FAMILY, + AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES) + ) { + addAfterRepairExtraUnverifiedIndexRowCount(getValue(cell)); + } + } + + public boolean isVerificationFailed() { + // we don't want to count max look back failures alone as failing an index rebuild job + // so we omit them from the below calculation. + if ( + getAfter().getInvalidIndexRowCount() + getAfter().getMissingIndexRowCount() + + getAfter().getExtraVerifiedIndexRowCount() > 0 + ) { + return true; + } + return false; + } + + public void add(IndexToolVerificationResult verificationResult) { + setScannedDataRowCount(getScannedDataRowCount() + verificationResult.getScannedDataRowCount()); + setRebuiltIndexRowCount( + getRebuiltIndexRowCount() + verificationResult.getRebuiltIndexRowCount()); + getBefore().add(verificationResult.getBefore()); + getAfter().add(verificationResult.getAfter()); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexerRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexerRegionScanner.java index d0b56b35b54..2a11b764e05 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexerRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/IndexerRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; @@ -56,6 +55,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.cache.ServerCacheClient; import org.apache.phoenix.compile.ScanRanges; +import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.filter.SkipScanFilter; import org.apache.phoenix.hbase.index.ValueGetter; import org.apache.phoenix.hbase.index.parallel.EarlyExitFailure; @@ -65,8 +65,6 @@ import org.apache.phoenix.hbase.index.parallel.ThreadPoolManager; import org.apache.phoenix.hbase.index.parallel.WaitForCompletionTaskRunner; import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder; - -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.index.PhoenixIndexCodec; import org.apache.phoenix.mapreduce.index.IndexTool; @@ -75,6 +73,8 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.IndexUtil; @@ -84,406 +84,423 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; - public class IndexerRegionScanner extends GlobalIndexRegionScanner { - private static final Logger LOGGER = LoggerFactory.getLogger(IndexerRegionScanner.class); - protected Map indexKeyToDataPutMap; - protected UngroupedAggregateRegionObserver.MutationList mutations; - private boolean partialRebuild = false; - - IndexerRegionScanner (final RegionScanner innerScanner, final Region region, final Scan scan, - final RegionCoprocessorEnvironment env, - UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) throws IOException { - super(innerScanner, region, scan, env, ungroupedAggregateRegionObserver); - indexHTable = hTableFactory.getTable(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); - if (BaseScannerRegionObserver.isPhoenixTableTTLEnabled(env.getConfiguration())) { - indexTableTTL = ScanUtil.getTTL(scan); - } else { - indexTableTTL = indexHTable.getDescriptor().getColumnFamilies()[0].getTimeToLive(); - } - pool = new WaitForCompletionTaskRunner(ThreadPoolManager.getExecutor( - new ThreadPoolBuilder("IndexVerify", - env.getConfiguration()).setMaxThread(NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY, - DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS).setCoreTimeout( - INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env)); - if (scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING) == null) { - partialRebuild = true; - } - if (verify) { - indexKeyToDataPutMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - verificationResult = new IndexToolVerificationResult(scan); - verificationResultRepository = - new IndexVerificationResultRepository(indexMaintainer.getIndexTableName(), hTableFactory); - } else { - mutations = new UngroupedAggregateRegionObserver.MutationList(maxBatchSize); - } + private static final Logger LOGGER = LoggerFactory.getLogger(IndexerRegionScanner.class); + protected Map indexKeyToDataPutMap; + protected UngroupedAggregateRegionObserver.MutationList mutations; + private boolean partialRebuild = false; + + IndexerRegionScanner(final RegionScanner innerScanner, final Region region, final Scan scan, + final RegionCoprocessorEnvironment env, + UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) throws IOException { + super(innerScanner, region, scan, env, ungroupedAggregateRegionObserver); + indexHTable = + hTableFactory.getTable(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); + if (BaseScannerRegionObserver.isPhoenixTableTTLEnabled(env.getConfiguration())) { + indexTableTTL = ScanUtil.getTTL(scan); + } else { + indexTableTTL = indexHTable.getDescriptor().getColumnFamilies()[0].getTimeToLive(); } - - @Override - public RegionInfo getRegionInfo() { - return region.getRegionInfo(); + pool = new WaitForCompletionTaskRunner( + ThreadPoolManager.getExecutor(new ThreadPoolBuilder("IndexVerify", env.getConfiguration()) + .setMaxThread(NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY, + DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS) + .setCoreTimeout(INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env)); + if (scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING) == null) { + partialRebuild = true; } - - @Override - public boolean isFilterDone() { return false; } - - @Override - public void close() throws IOException { - innerScanner.close(); - try { - if (verify) { - verificationResultRepository.logToIndexToolResultTable(verificationResult, - IndexTool.IndexVerifyType.ONLY, region.getRegionInfo().getRegionName()); - } - } finally { - this.pool.stop("IndexerRegionScanner is closing"); - if (verify) { - verificationResultRepository.close(); - } - super.close(); - } + if (verify) { + indexKeyToDataPutMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + verificationResult = new IndexToolVerificationResult(scan); + verificationResultRepository = + new IndexVerificationResultRepository(indexMaintainer.getIndexTableName(), hTableFactory); + } else { + mutations = new UngroupedAggregateRegionObserver.MutationList(maxBatchSize); } - - private boolean verifySingleIndexRow(Result indexRow, final Put dataRow, - IndexToolVerificationResult.PhaseResult verificationPhaseResult) throws IOException { - ValueGetter valueGetter = new IndexUtil.SimpleValueGetter(dataRow); - long ts = IndexUtil.getMaxTimestamp(dataRow); - Put indexPut = indexMaintainer.buildUpdateMutation(GenericKeyValueBuilder.INSTANCE, - valueGetter, new ImmutableBytesWritable(dataRow.getRow()), ts, null, null, false); - - if (indexPut == null) { - // This means the data row does not have any covered column values - indexPut = new Put(indexRow.getRow()); - } - // Add the empty column - indexPut.addColumn(indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - indexMaintainer.getEmptyKeyValueQualifier(), ts, EMPTY_COLUMN_VALUE_BYTES); - - int cellCount = 0; - long currentTime = EnvironmentEdgeManager.currentTime(); - for (List cells : indexPut.getFamilyCellMap().values()) { - if (cells == null) { - break; - } - for (Cell expectedCell : cells) { - byte[] family = CellUtil.cloneFamily(expectedCell); - byte[] qualifier = CellUtil.cloneQualifier(expectedCell); - Cell actualCell = indexRow.getColumnLatestCell(family, qualifier); - if (actualCell == null) { - // Check if cell expired as per the current server's time and data table ttl - // Index table should have the same ttl as the data table, hence we might not - // get a value back from index if it has already expired between our rebuild and - // verify - - // or if cell timestamp is beyond maxlookback - if (isTimestampBeforeTTL(indexTableTTL, currentTime, expectedCell.getTimestamp())) { - continue; - } - - return false; - } - if (actualCell.getTimestamp() < ts) { - // Skip older cells since a Phoenix index row is composed of cells with the same timestamp - continue; - } - // Check all columns - if (!CellUtil.matchingValue(actualCell, expectedCell) || actualCell.getTimestamp() != ts) { - if(isTimestampBeyondMaxLookBack(maxLookBackInMills, currentTime, actualCell.getTimestamp())) { - verificationPhaseResult - .setBeyondMaxLookBackInvalidIndexRowCount(verificationPhaseResult - .getBeyondMaxLookBackInvalidIndexRowCount()+1); - continue; - } - return false; - } - cellCount++; - } - } - return cellCount == indexRow.rawCells().length; + } + + @Override + public RegionInfo getRegionInfo() { + return region.getRegionInfo(); + } + + @Override + public boolean isFilterDone() { + return false; + } + + @Override + public void close() throws IOException { + innerScanner.close(); + try { + if (verify) { + verificationResultRepository.logToIndexToolResultTable(verificationResult, + IndexTool.IndexVerifyType.ONLY, region.getRegionInfo().getRegionName()); + } + } finally { + this.pool.stop("IndexerRegionScanner is closing"); + if (verify) { + verificationResultRepository.close(); + } + super.close(); } - - private void verifyIndexRows(List keys, Map perTaskDataKeyToDataPutMap, - IndexToolVerificationResult.PhaseResult verificationPhaseResult) throws IOException { - ScanRanges scanRanges = ScanRanges.createPointLookup(keys); - Scan indexScan = new Scan(); - indexScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); - scanRanges.initializeScan(indexScan); - SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); - indexScan.setFilter(skipScanFilter); - indexScan.setCacheBlocks(false); - try (ResultScanner resultScanner = indexHTable.getScanner(indexScan)) { - for (Result result = resultScanner.next(); (result != null); result = resultScanner.next()) { - Put dataPut = indexKeyToDataPutMap.get(result.getRow()); - if (dataPut == null) { - // This should never happen - exceptionMessage = "Index verify failed - Missing data row - " + indexHTable.getName(); - throw new IOException(exceptionMessage); - } - if (verifySingleIndexRow(result, dataPut, verificationPhaseResult)) { - verificationPhaseResult.setValidIndexRowCount(verificationPhaseResult.getValidIndexRowCount()+1); - } else { - verificationPhaseResult.setInvalidIndexRowCount(verificationPhaseResult.getInvalidIndexRowCount()+1); - } - perTaskDataKeyToDataPutMap.remove(dataPut.getRow()); - } - } catch (Throwable t) { - ClientUtil.throwIOException(indexHTable.getName().toString(), t); + } + + private boolean verifySingleIndexRow(Result indexRow, final Put dataRow, + IndexToolVerificationResult.PhaseResult verificationPhaseResult) throws IOException { + ValueGetter valueGetter = new IndexUtil.SimpleValueGetter(dataRow); + long ts = IndexUtil.getMaxTimestamp(dataRow); + Put indexPut = indexMaintainer.buildUpdateMutation(GenericKeyValueBuilder.INSTANCE, valueGetter, + new ImmutableBytesWritable(dataRow.getRow()), ts, null, null, false); + + if (indexPut == null) { + // This means the data row does not have any covered column values + indexPut = new Put(indexRow.getRow()); + } + // Add the empty column + indexPut.addColumn(indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), + indexMaintainer.getEmptyKeyValueQualifier(), ts, EMPTY_COLUMN_VALUE_BYTES); + + int cellCount = 0; + long currentTime = EnvironmentEdgeManager.currentTime(); + for (List cells : indexPut.getFamilyCellMap().values()) { + if (cells == null) { + break; + } + for (Cell expectedCell : cells) { + byte[] family = CellUtil.cloneFamily(expectedCell); + byte[] qualifier = CellUtil.cloneQualifier(expectedCell); + Cell actualCell = indexRow.getColumnLatestCell(family, qualifier); + if (actualCell == null) { + // Check if cell expired as per the current server's time and data table ttl + // Index table should have the same ttl as the data table, hence we might not + // get a value back from index if it has already expired between our rebuild and + // verify + + // or if cell timestamp is beyond maxlookback + if (isTimestampBeforeTTL(indexTableTTL, currentTime, expectedCell.getTimestamp())) { + continue; + } + + return false; } - // Check if any expected rows from index(which we didn't get) are already expired due to TTL - if (!perTaskDataKeyToDataPutMap.isEmpty()) { - Iterator> itr = perTaskDataKeyToDataPutMap.entrySet().iterator(); - long currentTime = EnvironmentEdgeManager.currentTime(); - while(itr.hasNext()) { - Entry entry = itr.next(); - long ts = IndexUtil.getMaxTimestamp(entry.getValue()); - if (isTimestampBeforeTTL(indexTableTTL, currentTime, ts)) { - itr.remove(); - verificationPhaseResult.setExpiredIndexRowCount(verificationPhaseResult.getExpiredIndexRowCount()+1); - } - } + if (actualCell.getTimestamp() < ts) { + // Skip older cells since a Phoenix index row is composed of cells with the same timestamp + continue; } - // Check if any expected rows from index(which we didn't get) are beyond max look back and have been compacted away - if (!perTaskDataKeyToDataPutMap.isEmpty()) { - for (Entry entry : perTaskDataKeyToDataPutMap.entrySet()) { - Put put = entry.getValue(); - long ts = IndexUtil.getMaxTimestamp(put); - long currentTime = EnvironmentEdgeManager.currentTime(); - if (isTimestampBeyondMaxLookBack(maxLookBackInMills, currentTime, ts)) { - verificationPhaseResult. - setBeyondMaxLookBackMissingIndexRowCount(verificationPhaseResult.getBeyondMaxLookBackMissingIndexRowCount() + 1); - } else { - verificationPhaseResult.setMissingIndexRowCount( - verificationPhaseResult.getMissingIndexRowCount() + 1); - } - } + // Check all columns + if (!CellUtil.matchingValue(actualCell, expectedCell) || actualCell.getTimestamp() != ts) { + if ( + isTimestampBeyondMaxLookBack(maxLookBackInMills, currentTime, actualCell.getTimestamp()) + ) { + verificationPhaseResult.setBeyondMaxLookBackInvalidIndexRowCount( + verificationPhaseResult.getBeyondMaxLookBackInvalidIndexRowCount() + 1); + continue; + } + return false; } + cellCount++; + } } - - private void addVerifyTask(TaskBatch tasks, final List keys, final Map perTaskDataKeyToDataPutMap, - final IndexToolVerificationResult.PhaseResult verificationPhaseResult) { - tasks.add(new Task() { - @Override - public Boolean call() throws Exception { - try { - if (Thread.currentThread().isInterrupted()) { - exceptionMessage = "Pool closed, not attempting to verify index rows! " + indexHTable.getName(); - throw new IOException(exceptionMessage); - } - verifyIndexRows(keys, perTaskDataKeyToDataPutMap, verificationPhaseResult); - } catch (Exception e) { - throw e; - } - return Boolean.TRUE; - } - }); - } - - private void parallelizeIndexVerify(IndexToolVerificationResult.PhaseResult verificationPhaseResult) throws IOException { - int taskCount = (indexKeyToDataPutMap.size() + rowCountPerTask - 1) / rowCountPerTask; - TaskBatch tasks = new TaskBatch<>(taskCount); - - List> dataPutMapList = new ArrayList<>(taskCount); - List verificationPhaseResultList = new ArrayList<>(taskCount); - List keys = new ArrayList<>(rowCountPerTask); - - Map perTaskDataKeyToDataPutMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - - dataPutMapList.add(perTaskDataKeyToDataPutMap); - - IndexToolVerificationResult.PhaseResult perTaskVerificationPhaseResult = new IndexToolVerificationResult.PhaseResult(); - verificationPhaseResultList.add(perTaskVerificationPhaseResult); - - for (Map.Entry entry: indexKeyToDataPutMap.entrySet()) { - keys.add(PVarbinary.INSTANCE.getKeyRange(entry.getKey(), SortOrder.ASC)); - perTaskDataKeyToDataPutMap.put(entry.getValue().getRow(), entry.getValue()); - if (keys.size() == rowCountPerTask) { - addVerifyTask(tasks, keys, perTaskDataKeyToDataPutMap, perTaskVerificationPhaseResult); - keys = new ArrayList<>(rowCountPerTask); - perTaskDataKeyToDataPutMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - dataPutMapList.add(perTaskDataKeyToDataPutMap); - perTaskVerificationPhaseResult = new IndexToolVerificationResult.PhaseResult(); - verificationPhaseResultList.add(perTaskVerificationPhaseResult); - } + return cellCount == indexRow.rawCells().length; + } + + private void verifyIndexRows(List keys, Map perTaskDataKeyToDataPutMap, + IndexToolVerificationResult.PhaseResult verificationPhaseResult) throws IOException { + ScanRanges scanRanges = ScanRanges.createPointLookup(keys); + Scan indexScan = new Scan(); + indexScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); + scanRanges.initializeScan(indexScan); + SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); + indexScan.setFilter(skipScanFilter); + indexScan.setCacheBlocks(false); + try (ResultScanner resultScanner = indexHTable.getScanner(indexScan)) { + for (Result result = resultScanner.next(); (result != null); result = resultScanner.next()) { + Put dataPut = indexKeyToDataPutMap.get(result.getRow()); + if (dataPut == null) { + // This should never happen + exceptionMessage = "Index verify failed - Missing data row - " + indexHTable.getName(); + throw new IOException(exceptionMessage); } - if (keys.size() > 0) { - addVerifyTask(tasks, keys, perTaskDataKeyToDataPutMap, perTaskVerificationPhaseResult); + if (verifySingleIndexRow(result, dataPut, verificationPhaseResult)) { + verificationPhaseResult + .setValidIndexRowCount(verificationPhaseResult.getValidIndexRowCount() + 1); + } else { + verificationPhaseResult + .setInvalidIndexRowCount(verificationPhaseResult.getInvalidIndexRowCount() + 1); } - Pair, List>> resultsAndFutures = null; - try { - LOGGER.debug("Waiting on index verify tasks to complete..."); - resultsAndFutures = this.pool.submitUninterruptible(tasks); - } catch (ExecutionException e) { - throw new RuntimeException("Should not fail on the results while using a WaitForCompletionTaskRunner", e); - } catch (EarlyExitFailure e) { - throw new RuntimeException("Stopped while waiting for batch, quitting!", e); + perTaskDataKeyToDataPutMap.remove(dataPut.getRow()); + } + } catch (Throwable t) { + ClientUtil.throwIOException(indexHTable.getName().toString(), t); + } + // Check if any expected rows from index(which we didn't get) are already expired due to TTL + if (!perTaskDataKeyToDataPutMap.isEmpty()) { + Iterator> itr = perTaskDataKeyToDataPutMap.entrySet().iterator(); + long currentTime = EnvironmentEdgeManager.currentTime(); + while (itr.hasNext()) { + Entry entry = itr.next(); + long ts = IndexUtil.getMaxTimestamp(entry.getValue()); + if (isTimestampBeforeTTL(indexTableTTL, currentTime, ts)) { + itr.remove(); + verificationPhaseResult + .setExpiredIndexRowCount(verificationPhaseResult.getExpiredIndexRowCount() + 1); } - int index = 0; - for (Boolean result : resultsAndFutures.getFirst()) { - if (result == null) { - Throwable cause = ServerUtil.getExceptionFromFailedFuture(resultsAndFutures.getSecond().get(index)); - // there was a failure - throw new IOException(exceptionMessage, cause); - } - index++; + } + } + // Check if any expected rows from index(which we didn't get) are beyond max look back and have + // been compacted away + if (!perTaskDataKeyToDataPutMap.isEmpty()) { + for (Entry entry : perTaskDataKeyToDataPutMap.entrySet()) { + Put put = entry.getValue(); + long ts = IndexUtil.getMaxTimestamp(put); + long currentTime = EnvironmentEdgeManager.currentTime(); + if (isTimestampBeyondMaxLookBack(maxLookBackInMills, currentTime, ts)) { + verificationPhaseResult.setBeyondMaxLookBackMissingIndexRowCount( + verificationPhaseResult.getBeyondMaxLookBackMissingIndexRowCount() + 1); + } else { + verificationPhaseResult + .setMissingIndexRowCount(verificationPhaseResult.getMissingIndexRowCount() + 1); } - for (IndexToolVerificationResult.PhaseResult result : verificationPhaseResultList) { - verificationPhaseResult.add(result); + } + } + } + + private void addVerifyTask(TaskBatch tasks, final List keys, + final Map perTaskDataKeyToDataPutMap, + final IndexToolVerificationResult.PhaseResult verificationPhaseResult) { + tasks.add(new Task() { + @Override + public Boolean call() throws Exception { + try { + if (Thread.currentThread().isInterrupted()) { + exceptionMessage = + "Pool closed, not attempting to verify index rows! " + indexHTable.getName(); + throw new IOException(exceptionMessage); + } + verifyIndexRows(keys, perTaskDataKeyToDataPutMap, verificationPhaseResult); + } catch (Exception e) { + throw e; } + return Boolean.TRUE; + } + }); + } + + private void parallelizeIndexVerify( + IndexToolVerificationResult.PhaseResult verificationPhaseResult) throws IOException { + int taskCount = (indexKeyToDataPutMap.size() + rowCountPerTask - 1) / rowCountPerTask; + TaskBatch tasks = new TaskBatch<>(taskCount); + + List> dataPutMapList = new ArrayList<>(taskCount); + List verificationPhaseResultList = + new ArrayList<>(taskCount); + List keys = new ArrayList<>(rowCountPerTask); + + Map perTaskDataKeyToDataPutMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + + dataPutMapList.add(perTaskDataKeyToDataPutMap); + + IndexToolVerificationResult.PhaseResult perTaskVerificationPhaseResult = + new IndexToolVerificationResult.PhaseResult(); + verificationPhaseResultList.add(perTaskVerificationPhaseResult); + + for (Map.Entry entry : indexKeyToDataPutMap.entrySet()) { + keys.add(PVarbinary.INSTANCE.getKeyRange(entry.getKey(), SortOrder.ASC)); + perTaskDataKeyToDataPutMap.put(entry.getValue().getRow(), entry.getValue()); + if (keys.size() == rowCountPerTask) { + addVerifyTask(tasks, keys, perTaskDataKeyToDataPutMap, perTaskVerificationPhaseResult); + keys = new ArrayList<>(rowCountPerTask); + perTaskDataKeyToDataPutMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + dataPutMapList.add(perTaskDataKeyToDataPutMap); + perTaskVerificationPhaseResult = new IndexToolVerificationResult.PhaseResult(); + verificationPhaseResultList.add(perTaskVerificationPhaseResult); + } } - - private void verifyIndex() throws IOException { - IndexToolVerificationResult nextVerificationResult = new IndexToolVerificationResult(scan); - nextVerificationResult.setScannedDataRowCount(indexKeyToDataPutMap.size()); - IndexToolVerificationResult.PhaseResult verificationPhaseResult = new IndexToolVerificationResult.PhaseResult(); - // For these options we start with verifying index rows - parallelizeIndexVerify(verificationPhaseResult); - nextVerificationResult.getBefore().add(verificationPhaseResult); - indexKeyToDataPutMap.clear(); - verificationResult.add(nextVerificationResult); + if (keys.size() > 0) { + addVerifyTask(tasks, keys, perTaskDataKeyToDataPutMap, perTaskVerificationPhaseResult); } - - private void setMutationAttributes(Mutation m, byte[] uuidValue) { - ScanUtil.annotateMutationWithMetadataAttributes(tenantId, schemaName, - logicalTableName, tableType, lastDdlTimestamp, m); - m.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, - indexMetaData); - m.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); - m.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, - BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES); - m.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, clientVersionBytes); - // Since we're replaying existing mutations, it makes no sense to write them to the wal - m.setDurability(Durability.SKIP_WAL); + Pair, List>> resultsAndFutures = null; + try { + LOGGER.debug("Waiting on index verify tasks to complete..."); + resultsAndFutures = this.pool.submitUninterruptible(tasks); + } catch (ExecutionException e) { + throw new RuntimeException( + "Should not fail on the results while using a WaitForCompletionTaskRunner", e); + } catch (EarlyExitFailure e) { + throw new RuntimeException("Stopped while waiting for batch, quitting!", e); } - - private byte[] commitIfReady(byte[] uuidValue, UngroupedAggregateRegionObserver.MutationList mutationList) throws IOException { - if (ServerUtil.readyToCommit(mutationList.size(), mutationList.byteSize(), maxBatchSize, maxBatchSizeBytes)) { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutationList, blockingMemstoreSize); - uuidValue = ServerCacheClient.generateId(); - mutationList.clear(); - } - return uuidValue; + int index = 0; + for (Boolean result : resultsAndFutures.getFirst()) { + if (result == null) { + Throwable cause = + ServerUtil.getExceptionFromFailedFuture(resultsAndFutures.getSecond().get(index)); + // there was a failure + throw new IOException(exceptionMessage, cause); + } + index++; } - - @Override - public boolean next(List results) throws IOException { - Cell lastCell = null; - int rowCount = 0; - region.startRegionOperation(); - try { - synchronized (innerScanner) { - byte[] uuidValue = ServerCacheClient.generateId(); - do { - List row = new ArrayList<>(); - hasMore = innerScanner.nextRaw(row); - if (!row.isEmpty()) { - lastCell = row.get(0); // lastCell is any cell from the last visited row - if (isDummy(row)) { - break; - } - Put put = null; - Delete del = null; - for (Cell cell : row) { - if (cell.getType().equals(Cell.Type.Put)) { - if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) { - continue; - } - if (put == null) { - put = new Put(CellUtil.cloneRow(cell)); - } - put.add(cell); - } else { - if (del == null) { - del = new Delete(CellUtil.cloneRow(cell)); - } - del.add(cell); - } - } - if (put == null && del == null) { - continue; - } - if (!verify) { - if (put != null) { - setMutationAttributes(put, uuidValue); - mutations.add(put); - } - if (del != null) { - setMutationAttributes(del, uuidValue); - mutations.add(del); - } - uuidValue = commitIfReady(uuidValue, mutations); - } else { - indexKeyToDataPutMap - .put(indexMaintainer.getIndexRowKey(put), put); - } - rowCount++; - - } - } while (hasMore && rowCount < pageSizeInRows); - if (verify) { - verifyIndex(); - } else if (!mutations.isEmpty()) { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, blockingMemstoreSize); - mutations.clear(); + for (IndexToolVerificationResult.PhaseResult result : verificationPhaseResultList) { + verificationPhaseResult.add(result); + } + } + + private void verifyIndex() throws IOException { + IndexToolVerificationResult nextVerificationResult = new IndexToolVerificationResult(scan); + nextVerificationResult.setScannedDataRowCount(indexKeyToDataPutMap.size()); + IndexToolVerificationResult.PhaseResult verificationPhaseResult = + new IndexToolVerificationResult.PhaseResult(); + // For these options we start with verifying index rows + parallelizeIndexVerify(verificationPhaseResult); + nextVerificationResult.getBefore().add(verificationPhaseResult); + indexKeyToDataPutMap.clear(); + verificationResult.add(nextVerificationResult); + } + + private void setMutationAttributes(Mutation m, byte[] uuidValue) { + ScanUtil.annotateMutationWithMetadataAttributes(tenantId, schemaName, logicalTableName, + tableType, lastDdlTimestamp, m); + m.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, + indexMetaData); + m.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); + m.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, + BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES); + m.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, clientVersionBytes); + // Since we're replaying existing mutations, it makes no sense to write them to the wal + m.setDurability(Durability.SKIP_WAL); + } + + private byte[] commitIfReady(byte[] uuidValue, + UngroupedAggregateRegionObserver.MutationList mutationList) throws IOException { + if ( + ServerUtil.readyToCommit(mutationList.size(), mutationList.byteSize(), maxBatchSize, + maxBatchSizeBytes) + ) { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutationList, + blockingMemstoreSize); + uuidValue = ServerCacheClient.generateId(); + mutationList.clear(); + } + return uuidValue; + } + + @Override + public boolean next(List results) throws IOException { + Cell lastCell = null; + int rowCount = 0; + region.startRegionOperation(); + try { + synchronized (innerScanner) { + byte[] uuidValue = ServerCacheClient.generateId(); + do { + List row = new ArrayList<>(); + hasMore = innerScanner.nextRaw(row); + if (!row.isEmpty()) { + lastCell = row.get(0); // lastCell is any cell from the last visited row + if (isDummy(row)) { + break; + } + Put put = null; + Delete del = null; + for (Cell cell : row) { + if (cell.getType().equals(Cell.Type.Put)) { + if (!partialRebuild && familyMap != null && !isColumnIncluded(cell)) { + continue; + } + if (put == null) { + put = new Put(CellUtil.cloneRow(cell)); + } + put.add(cell); + } else { + if (del == null) { + del = new Delete(CellUtil.cloneRow(cell)); } + del.add(cell); + } } - } catch (IOException e) { - LOGGER.error(String.format("IOException during rebuilding: %s", Throwables.getStackTraceAsString(e))); - throw e; - } finally { - region.closeRegionOperation(); - if (verify) { - indexKeyToDataPutMap.clear(); - } else { - mutations.clear(); + if (put == null && del == null) { + continue; } - } - byte[] rowCountBytes = PLong.INSTANCE.toBytes((long) rowCount); - final Cell aggKeyValue; - if (lastCell == null) { - byte[] rowKey; - byte[] startKey = scan.getStartRow().length > 0 ? scan.getStartRow() : - region.getRegionInfo().getStartKey(); - byte[] endKey = scan.getStopRow().length > 0 ? scan.getStopRow() : - region.getRegionInfo().getEndKey(); - final boolean isIncompatibleClient = - ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); - if (!isIncompatibleClient) { - rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); - if (rowKey == null) { - if (scan.includeStartRow()) { - rowKey = startKey; - } else if (scan.includeStopRow()) { - rowKey = endKey; - } else { - rowKey = HConstants.EMPTY_END_ROW; - } - } + if (!verify) { + if (put != null) { + setMutationAttributes(put, uuidValue); + mutations.add(put); + } + if (del != null) { + setMutationAttributes(del, uuidValue); + mutations.add(del); + } + uuidValue = commitIfReady(uuidValue, mutations); } else { - rowKey = UNGROUPED_AGG_ROW_KEY; + indexKeyToDataPutMap.put(indexMaintainer.getIndexRowKey(put), put); } - aggKeyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); - } else { - aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), - SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); - } - results.add(aggKeyValue); - return hasMore; - } + rowCount++; - @Override - public long getMaxResultSize() { - return scan.getMaxResultSize(); + } + } while (hasMore && rowCount < pageSizeInRows); + if (verify) { + verifyIndex(); + } else if (!mutations.isEmpty()) { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + ungroupedAggregateRegionObserver.commitBatchWithRetries(region, mutations, + blockingMemstoreSize); + mutations.clear(); + } + } + } catch (IOException e) { + LOGGER.error( + String.format("IOException during rebuilding: %s", Throwables.getStackTraceAsString(e))); + throw e; + } finally { + region.closeRegionOperation(); + if (verify) { + indexKeyToDataPutMap.clear(); + } else { + mutations.clear(); + } } - - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result); + byte[] rowCountBytes = PLong.INSTANCE.toBytes((long) rowCount); + final Cell aggKeyValue; + if (lastCell == null) { + byte[] rowKey; + byte[] startKey = + scan.getStartRow().length > 0 ? scan.getStartRow() : region.getRegionInfo().getStartKey(); + byte[] endKey = + scan.getStopRow().length > 0 ? scan.getStopRow() : region.getRegionInfo().getEndKey(); + final boolean isIncompatibleClient = + ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); + if (!isIncompatibleClient) { + rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); + if (rowKey == null) { + if (scan.includeStartRow()) { + rowKey = startKey; + } else if (scan.includeStopRow()) { + rowKey = endKey; + } else { + rowKey = HConstants.EMPTY_END_ROW; + } + } + } else { + rowKey = UNGROUPED_AGG_ROW_KEY; + } + aggKeyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, + AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); + } else { + aggKeyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); } + results.add(aggKeyValue); + return hasMore; + } + + @Override + public long getMaxResultSize() { + return scan.getMaxResultSize(); + } + + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index 435c8886f1c..7bb11dbff81 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,6 +22,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CDC_INCLUDE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES; @@ -50,6 +51,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LAST_DDL_TIMESTAMP_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT_BYTES; @@ -57,8 +59,6 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHYSICAL_TABLE_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PK_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.RETURN_TYPE_BYTES; @@ -69,6 +69,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORAGE_SCHEME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORE_NULLS_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STREAMING_TOPIC_NAME_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES; @@ -76,6 +77,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTIONAL_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTION_PROVIDER_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION_BYTES; @@ -84,13 +86,11 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA_TYPE_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES; +import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; import static org.apache.phoenix.query.QueryServices.SKIP_SYSTEM_TABLES_EXISTENCE_CHECK; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_LOOKBACK_AGE_BYTES; +import static org.apache.phoenix.schema.PTable.LinkType.PARENT_TABLE; import static org.apache.phoenix.schema.PTable.LinkType.PHYSICAL_TABLE; import static org.apache.phoenix.schema.PTable.LinkType.VIEW_INDEX_PARENT_TABLE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CDC_INCLUDE_BYTES; -import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; -import static org.apache.phoenix.schema.PTable.LinkType.PARENT_TABLE; import static org.apache.phoenix.schema.PTable.ViewType.MAPPED; import static org.apache.phoenix.schema.PTableImpl.getColumnsToClone; import static org.apache.phoenix.schema.PTableType.CDC; @@ -116,11 +116,11 @@ import java.util.Iterator; import java.util.List; import java.util.NavigableMap; +import java.util.Objects; import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.stream.Collectors; -import java.util.Objects; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -260,11 +260,14 @@ import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.cache.Cache; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.util.ByteUtil; -import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.CDCUtil; +import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.MetaDataUtil; @@ -279,4939 +282,5071 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.cache.Cache; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import com.google.protobuf.ByteString; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; /** - * Endpoint co-processor through which all Phoenix metadata mutations flow. - * Phoenix metadata is stored in SYSTEM.CATALOG. The table specific information - * is stored in a single header row. Column information is stored in a separate - * row per column. Linking information (indexes, views etc) are stored using a - * separate row for each link that uses the {@link LinkType} column value. The - * {@code parent->child } links are stored in a separate SYSTEM.CHILD_LINK table. - * Metadata for all tables/views/indexes in the same schema are stored in a - * single region which is enforced using the {@link MetaDataSplitPolicy}. + * Endpoint co-processor through which all Phoenix metadata mutations flow. Phoenix metadata is + * stored in SYSTEM.CATALOG. The table specific information is stored in a single header row. Column + * information is stored in a separate row per column. Linking information (indexes, views etc) are + * stored using a separate row for each link that uses the {@link LinkType} column value. The + * {@code parent->child } links are stored in a separate SYSTEM.CHILD_LINK table. Metadata for all + * tables/views/indexes in the same schema are stored in a single region which is enforced using the + * {@link MetaDataSplitPolicy}. *

    - * While creating child views we only store columns added by the view. When - * resolving a view we resolve all its parents and add their columns to the - * PTable that is returned. We lock the parent table while creating an index to - * ensure its metadata doesn't change. - * While adding or dropping columns we lock the table or view to ensure that - * concurrent conflicting changes are prevented. We also validate that there are - * no existing conflicting child view columns when we add a column to a parent. - * While dropping a column from a parent we check if there are any child views - * that need the column and throw an exception. If there are view indexes that - * required the column we drop them as well. - * While dropping a table or view that has children using the cascade option, we - * do not drop the child view metadata which will be removed at compaction time. - * If we recreate a table or view that was dropped whose child metadata hasn't - * been removed yet, we delete the child view metadata. When resolving a view, - * we resolve all its parents, if any of them are dropped the child view is + * While creating child views we only store columns added by the view. When resolving a view we + * resolve all its parents and add their columns to the PTable that is returned. We lock the parent + * table while creating an index to ensure its metadata doesn't change. While adding or dropping + * columns we lock the table or view to ensure that concurrent conflicting changes are prevented. We + * also validate that there are no existing conflicting child view columns when we add a column to a + * parent. While dropping a column from a parent we check if there are any child views that need the + * column and throw an exception. If there are view indexes that required the column we drop them as + * well. While dropping a table or view that has children using the cascade option, we do not drop + * the child view metadata which will be removed at compaction time. If we recreate a table or view + * that was dropped whose child metadata hasn't been removed yet, we delete the child view metadata. + * When resolving a view, we resolve all its parents, if any of them are dropped the child view is * considered to be dropped and we throw a TableNotFoundException. *

    - * We only allow mutations to the latest version of a Phoenix table (i.e. the - * timeStamp must be increasing). For adding/dropping columns we use a sequence - * number on the table to ensure that the client has the latest version. - * + * We only allow mutations to the latest version of a Phoenix table (i.e. the timeStamp must be + * increasing). For adding/dropping columns we use a sequence number on the table to ensure that the + * client has the latest version. * @since 0.1 */ @SuppressWarnings("deprecation") @CoreCoprocessor public class MetaDataEndpointImpl extends MetaDataProtocol implements RegionCoprocessor { - private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataEndpointImpl.class); - - private static final byte[] CHILD_TABLE_BYTES = new byte[]{PTable.LinkType.CHILD_TABLE.getSerializedValue()}; - private static final byte[] PHYSICAL_TABLE_BYTES = - new byte[]{PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()}; - - private LockManager lockManager; - private long metadataCacheRowLockTimeout; - - // KeyValues for Table - private static final Cell TABLE_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, - TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES); - private static final Cell TABLE_SEQ_NUM_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, -TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES); - private static final Cell COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_COUNT_BYTES); - private static final Cell SALT_BUCKETS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SALT_BUCKETS_BYTES); - private static final Cell PK_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PK_NAME_BYTES); - private static final Cell DATA_TABLE_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES); - private static final Cell INDEX_STATE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES); - private static final Cell IMMUTABLE_ROWS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IMMUTABLE_ROWS_BYTES); - private static final Cell VIEW_EXPRESSION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_STATEMENT_BYTES); - private static final Cell DEFAULT_COLUMN_FAMILY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_COLUMN_FAMILY_NAME_BYTES); - private static final Cell DISABLE_WAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DISABLE_WAL_BYTES); - private static final Cell MULTI_TENANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MULTI_TENANT_BYTES); - private static final Cell VIEW_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES); - private static final Cell VIEW_INDEX_ID_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_BYTES); - /** - * A designator for choosing the right type for viewIndex (Short vs Long) to be backward compatible. - **/ - private static final Cell VIEW_INDEX_ID_DATA_TYPE_BYTES_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_DATA_TYPE_BYTES); - private static final Cell INDEX_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_TYPE_BYTES); - private static final Cell INDEX_DISABLE_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES); - private static final Cell STORE_NULLS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORE_NULLS_BYTES); - private static final Cell EMPTY_KEYVALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); - private static final Cell BASE_COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES); - private static final Cell ROW_KEY_ORDER_OPTIMIZABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE_BYTES); - private static final Cell TRANSACTIONAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TRANSACTIONAL_BYTES); - private static final Cell TRANSACTION_PROVIDER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TRANSACTION_PROVIDER_BYTES); - private static final Cell PHYSICAL_TABLE_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PHYSICAL_TABLE_NAME_BYTES); - private static final Cell UPDATE_CACHE_FREQUENCY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, UPDATE_CACHE_FREQUENCY_BYTES); - private static final Cell IS_NAMESPACE_MAPPED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, - TABLE_FAMILY_BYTES, IS_NAMESPACE_MAPPED_BYTES); - private static final Cell AUTO_PARTITION_SEQ_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, AUTO_PARTITION_SEQ_BYTES); - private static final Cell APPEND_ONLY_SCHEMA_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, APPEND_ONLY_SCHEMA_BYTES); - private static final Cell STORAGE_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORAGE_SCHEME_BYTES); - private static final Cell ENCODING_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ENCODING_SCHEME_BYTES); - private static final Cell USE_STATS_FOR_PARALLELIZATION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, USE_STATS_FOR_PARALLELIZATION_BYTES); - private static final Cell LAST_DDL_TIMESTAMP_KV = - createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, LAST_DDL_TIMESTAMP_BYTES); - private static final Cell CHANGE_DETECTION_ENABLED_KV = - createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, - CHANGE_DETECTION_ENABLED_BYTES); - private static final Cell SCHEMA_VERSION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, - TABLE_FAMILY_BYTES, SCHEMA_VERSION_BYTES); - private static final Cell EXTERNAL_SCHEMA_ID_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, - TABLE_FAMILY_BYTES, EXTERNAL_SCHEMA_ID_BYTES); - private static final Cell STREAMING_TOPIC_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, - TABLE_FAMILY_BYTES, STREAMING_TOPIC_NAME_BYTES); - private static final Cell CDC_INCLUDE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, - TABLE_FAMILY_BYTES, CDC_INCLUDE_BYTES); - private static final Cell INDEX_WHERE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, - TABLE_FAMILY_BYTES, INDEX_WHERE_BYTES); - - private static final Cell MAX_LOOKBACK_AGE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_LOOKBACK_AGE_BYTES); - - private static final Cell TTL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, - TABLE_FAMILY_BYTES, TTL_BYTES); - private static final Cell ROW_KEY_MATCHER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, - TABLE_FAMILY_BYTES, ROW_KEY_MATCHER_BYTES); - - private static final List TABLE_KV_COLUMNS = Lists.newArrayList( - EMPTY_KEYVALUE_KV, - TABLE_TYPE_KV, - TABLE_SEQ_NUM_KV, - COLUMN_COUNT_KV, - SALT_BUCKETS_KV, - PK_NAME_KV, - DATA_TABLE_NAME_KV, - INDEX_STATE_KV, - IMMUTABLE_ROWS_KV, - VIEW_EXPRESSION_KV, - DEFAULT_COLUMN_FAMILY_KV, - DISABLE_WAL_KV, - MULTI_TENANT_KV, - VIEW_TYPE_KV, - VIEW_INDEX_ID_KV, - VIEW_INDEX_ID_DATA_TYPE_BYTES_KV, - INDEX_TYPE_KV, - INDEX_DISABLE_TIMESTAMP_KV, - STORE_NULLS_KV, - BASE_COLUMN_COUNT_KV, - ROW_KEY_ORDER_OPTIMIZABLE_KV, - TRANSACTIONAL_KV, - TRANSACTION_PROVIDER_KV, - PHYSICAL_TABLE_NAME_KV, - UPDATE_CACHE_FREQUENCY_KV, - IS_NAMESPACE_MAPPED_KV, - AUTO_PARTITION_SEQ_KV, - APPEND_ONLY_SCHEMA_KV, - STORAGE_SCHEME_KV, - ENCODING_SCHEME_KV, - USE_STATS_FOR_PARALLELIZATION_KV, - LAST_DDL_TIMESTAMP_KV, - CHANGE_DETECTION_ENABLED_KV, - SCHEMA_VERSION_KV, - EXTERNAL_SCHEMA_ID_KV, - STREAMING_TOPIC_NAME_KV, - INDEX_WHERE_KV, - MAX_LOOKBACK_AGE_KV, - CDC_INCLUDE_KV, - TTL_KV, - ROW_KEY_MATCHER_KV - ); - - static { - Collections.sort(TABLE_KV_COLUMNS, CellComparatorImpl.COMPARATOR); - } - - private static final int TABLE_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(TABLE_TYPE_KV); - private static final int TABLE_SEQ_NUM_INDEX = TABLE_KV_COLUMNS.indexOf(TABLE_SEQ_NUM_KV); - private static final int COLUMN_COUNT_INDEX = TABLE_KV_COLUMNS.indexOf(COLUMN_COUNT_KV); - private static final int SALT_BUCKETS_INDEX = TABLE_KV_COLUMNS.indexOf(SALT_BUCKETS_KV); - private static final int PK_NAME_INDEX = TABLE_KV_COLUMNS.indexOf(PK_NAME_KV); - private static final int DATA_TABLE_NAME_INDEX = TABLE_KV_COLUMNS.indexOf(DATA_TABLE_NAME_KV); - private static final int INDEX_STATE_INDEX = TABLE_KV_COLUMNS.indexOf(INDEX_STATE_KV); - private static final int IMMUTABLE_ROWS_INDEX = TABLE_KV_COLUMNS.indexOf(IMMUTABLE_ROWS_KV); - private static final int VIEW_STATEMENT_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_EXPRESSION_KV); - private static final int DEFAULT_COLUMN_FAMILY_INDEX = TABLE_KV_COLUMNS.indexOf(DEFAULT_COLUMN_FAMILY_KV); - private static final int DISABLE_WAL_INDEX = TABLE_KV_COLUMNS.indexOf(DISABLE_WAL_KV); - private static final int MULTI_TENANT_INDEX = TABLE_KV_COLUMNS.indexOf(MULTI_TENANT_KV); - private static final int VIEW_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_TYPE_KV); - private static final int VIEW_INDEX_ID_DATA_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_INDEX_ID_DATA_TYPE_BYTES_KV); - private static final int VIEW_INDEX_ID_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_INDEX_ID_KV); - private static final int INDEX_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(INDEX_TYPE_KV); - private static final int STORE_NULLS_INDEX = TABLE_KV_COLUMNS.indexOf(STORE_NULLS_KV); - private static final int BASE_COLUMN_COUNT_INDEX = TABLE_KV_COLUMNS.indexOf(BASE_COLUMN_COUNT_KV); - private static final int ROW_KEY_ORDER_OPTIMIZABLE_INDEX = TABLE_KV_COLUMNS.indexOf(ROW_KEY_ORDER_OPTIMIZABLE_KV); - private static final int TRANSACTIONAL_INDEX = TABLE_KV_COLUMNS.indexOf(TRANSACTIONAL_KV); - private static final int TRANSACTION_PROVIDER_INDEX = TABLE_KV_COLUMNS.indexOf(TRANSACTION_PROVIDER_KV); - private static final int UPDATE_CACHE_FREQUENCY_INDEX = TABLE_KV_COLUMNS.indexOf(UPDATE_CACHE_FREQUENCY_KV); - private static final int INDEX_DISABLE_TIMESTAMP = TABLE_KV_COLUMNS.indexOf(INDEX_DISABLE_TIMESTAMP_KV); - private static final int IS_NAMESPACE_MAPPED_INDEX = TABLE_KV_COLUMNS.indexOf(IS_NAMESPACE_MAPPED_KV); - private static final int AUTO_PARTITION_SEQ_INDEX = TABLE_KV_COLUMNS.indexOf(AUTO_PARTITION_SEQ_KV); - private static final int APPEND_ONLY_SCHEMA_INDEX = TABLE_KV_COLUMNS.indexOf(APPEND_ONLY_SCHEMA_KV); - private static final int STORAGE_SCHEME_INDEX = TABLE_KV_COLUMNS.indexOf(STORAGE_SCHEME_KV); - private static final int QUALIFIER_ENCODING_SCHEME_INDEX = TABLE_KV_COLUMNS.indexOf(ENCODING_SCHEME_KV); - private static final int USE_STATS_FOR_PARALLELIZATION_INDEX = TABLE_KV_COLUMNS.indexOf(USE_STATS_FOR_PARALLELIZATION_KV); - private static final int PHYSICAL_TABLE_NAME_INDEX = TABLE_KV_COLUMNS.indexOf(PHYSICAL_TABLE_NAME_KV); - private static final int LAST_DDL_TIMESTAMP_INDEX = - TABLE_KV_COLUMNS.indexOf(LAST_DDL_TIMESTAMP_KV); - private static final int CHANGE_DETECTION_ENABLED_INDEX = - TABLE_KV_COLUMNS.indexOf(CHANGE_DETECTION_ENABLED_KV); - private static final int SCHEMA_VERSION_INDEX = TABLE_KV_COLUMNS.indexOf(SCHEMA_VERSION_KV); - private static final int EXTERNAL_SCHEMA_ID_INDEX = - TABLE_KV_COLUMNS.indexOf(EXTERNAL_SCHEMA_ID_KV); - private static final int STREAMING_TOPIC_NAME_INDEX = - TABLE_KV_COLUMNS.indexOf(STREAMING_TOPIC_NAME_KV); - private static final int CDC_INCLUDE_INDEX = TABLE_KV_COLUMNS.indexOf(CDC_INCLUDE_KV); - private static final int INDEX_WHERE_INDEX = - TABLE_KV_COLUMNS.indexOf(INDEX_WHERE_KV); - - private static final int MAX_LOOKBACK_AGE_INDEX = TABLE_KV_COLUMNS.indexOf(MAX_LOOKBACK_AGE_KV); - private static final int TTL_INDEX = TABLE_KV_COLUMNS.indexOf(TTL_KV); - private static final int ROW_KEY_MATCHER_INDEX = TABLE_KV_COLUMNS.indexOf(ROW_KEY_MATCHER_KV); - // KeyValues for Column - private static final KeyValue DECIMAL_DIGITS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES); - private static final KeyValue COLUMN_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES); - private static final KeyValue NULLABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NULLABLE_BYTES); - private static final KeyValue DATA_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES); - private static final KeyValue ORDINAL_POSITION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES); - private static final KeyValue SORT_ORDER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES); - private static final KeyValue ARRAY_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES); - private static final KeyValue VIEW_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES); - private static final KeyValue IS_VIEW_REFERENCED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_VIEW_REFERENCED_BYTES); - private static final KeyValue COLUMN_DEF_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_DEF_BYTES); - private static final KeyValue IS_ROW_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ROW_TIMESTAMP_BYTES); - private static final KeyValue COLUMN_QUALIFIER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_BYTES); - // this key value is used to represent a column derived from a parent that was deleted (by - // storing a value of LinkType.EXCLUDED_COLUMN) - private static final KeyValue LINK_TYPE_KV = - createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); - - private static final List COLUMN_KV_COLUMNS = Lists.newArrayList( - DECIMAL_DIGITS_KV, - COLUMN_SIZE_KV, - NULLABLE_KV, - DATA_TYPE_KV, - ORDINAL_POSITION_KV, - SORT_ORDER_KV, - DATA_TABLE_NAME_KV, // included in both column and table row for metadata APIs - ARRAY_SIZE_KV, - VIEW_CONSTANT_KV, - IS_VIEW_REFERENCED_KV, - COLUMN_DEF_KV, - IS_ROW_TIMESTAMP_KV, - COLUMN_QUALIFIER_KV, - LINK_TYPE_KV - ); - - static { - COLUMN_KV_COLUMNS.sort(CellComparator.getInstance()); - } - private static final Cell QUALIFIER_COUNTER_KV = - KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, - COLUMN_QUALIFIER_COUNTER_BYTES); - private static final int DECIMAL_DIGITS_INDEX = COLUMN_KV_COLUMNS.indexOf(DECIMAL_DIGITS_KV); - private static final int COLUMN_SIZE_INDEX = COLUMN_KV_COLUMNS.indexOf(COLUMN_SIZE_KV); - private static final int NULLABLE_INDEX = COLUMN_KV_COLUMNS.indexOf(NULLABLE_KV); - private static final int DATA_TYPE_INDEX = COLUMN_KV_COLUMNS.indexOf(DATA_TYPE_KV); - private static final int ORDINAL_POSITION_INDEX = COLUMN_KV_COLUMNS.indexOf(ORDINAL_POSITION_KV); - private static final int SORT_ORDER_INDEX = COLUMN_KV_COLUMNS.indexOf(SORT_ORDER_KV); - private static final int ARRAY_SIZE_INDEX = COLUMN_KV_COLUMNS.indexOf(ARRAY_SIZE_KV); - private static final int VIEW_CONSTANT_INDEX = COLUMN_KV_COLUMNS.indexOf(VIEW_CONSTANT_KV); - private static final int IS_VIEW_REFERENCED_INDEX = COLUMN_KV_COLUMNS.indexOf(IS_VIEW_REFERENCED_KV); - private static final int COLUMN_DEF_INDEX = COLUMN_KV_COLUMNS.indexOf(COLUMN_DEF_KV); - private static final int IS_ROW_TIMESTAMP_INDEX = COLUMN_KV_COLUMNS.indexOf(IS_ROW_TIMESTAMP_KV); - private static final int COLUMN_QUALIFIER_INDEX = COLUMN_KV_COLUMNS.indexOf(COLUMN_QUALIFIER_KV); - // the index of the key value is used to represent a column derived from a parent that was - // deleted (by storing a value of LinkType.EXCLUDED_COLUMN) - private static final int EXCLUDED_COLUMN_LINK_TYPE_KV_INDEX = - COLUMN_KV_COLUMNS.indexOf(LINK_TYPE_KV); - - // index for link type key value that is used to store linking rows - private static final int LINK_TYPE_INDEX = 0; - private static final Cell CLASS_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CLASS_NAME_BYTES); - private static final Cell JAR_PATH_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, JAR_PATH_BYTES); - private static final Cell RETURN_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, RETURN_TYPE_BYTES); - private static final Cell NUM_ARGS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NUM_ARGS_BYTES); - private static final Cell TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TYPE_BYTES); - private static final Cell IS_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_CONSTANT_BYTES); - private static final Cell DEFAULT_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_VALUE_BYTES); - private static final Cell MIN_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MIN_VALUE_BYTES); - private static final Cell MAX_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_VALUE_BYTES); - private static final Cell IS_ARRAY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ARRAY_BYTES); - - private static final List FUNCTION_KV_COLUMNS = Arrays.asList( - EMPTY_KEYVALUE_KV, - CLASS_NAME_KV, - JAR_PATH_KV, - RETURN_TYPE_KV, - NUM_ARGS_KV - ); - static { - Collections.sort(FUNCTION_KV_COLUMNS, CellComparatorImpl.COMPARATOR); - } - - private static final int CLASS_NAME_INDEX = FUNCTION_KV_COLUMNS.indexOf(CLASS_NAME_KV); - private static final int JAR_PATH_INDEX = FUNCTION_KV_COLUMNS.indexOf(JAR_PATH_KV); - private static final int RETURN_TYPE_INDEX = FUNCTION_KV_COLUMNS.indexOf(RETURN_TYPE_KV); - private static final int NUM_ARGS_INDEX = FUNCTION_KV_COLUMNS.indexOf(NUM_ARGS_KV); - - private static final List FUNCTION_ARG_KV_COLUMNS = Arrays.asList( - TYPE_KV, - IS_ARRAY_KV, - IS_CONSTANT_KV, - DEFAULT_VALUE_KV, - MIN_VALUE_KV, - MAX_VALUE_KV - ); - static { - Collections.sort(FUNCTION_ARG_KV_COLUMNS, CellComparatorImpl.COMPARATOR); - } - - private static final int IS_ARRAY_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(IS_ARRAY_KV); - private static final int IS_CONSTANT_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(IS_CONSTANT_KV); - private static final int DEFAULT_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(DEFAULT_VALUE_KV); - private static final int MIN_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(MIN_VALUE_KV); - private static final int MAX_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(MAX_VALUE_KV); - - public static PName newPName(byte[] buffer) { - return buffer == null ? null : newPName(buffer, 0, buffer.length); - } - - public static PName newPName(byte[] keyBuffer, int keyOffset, int keyLength) { - if (keyLength <= 0) { - return null; - } - int length = getVarCharLength(keyBuffer, keyOffset, keyLength); - return PNameFactory.newName(keyBuffer, keyOffset, length); + private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataEndpointImpl.class); + + private static final byte[] CHILD_TABLE_BYTES = + new byte[] { PTable.LinkType.CHILD_TABLE.getSerializedValue() }; + private static final byte[] PHYSICAL_TABLE_BYTES = + new byte[] { PTable.LinkType.PHYSICAL_TABLE.getSerializedValue() }; + + private LockManager lockManager; + private long metadataCacheRowLockTimeout; + + // KeyValues for Table + private static final Cell TABLE_TYPE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES); + private static final Cell TABLE_SEQ_NUM_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES); + private static final Cell COLUMN_COUNT_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_COUNT_BYTES); + private static final Cell SALT_BUCKETS_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SALT_BUCKETS_BYTES); + private static final Cell PK_NAME_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PK_NAME_BYTES); + private static final Cell DATA_TABLE_NAME_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES); + private static final Cell INDEX_STATE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES); + private static final Cell IMMUTABLE_ROWS_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IMMUTABLE_ROWS_BYTES); + private static final Cell VIEW_EXPRESSION_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_STATEMENT_BYTES); + private static final Cell DEFAULT_COLUMN_FAMILY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, + TABLE_FAMILY_BYTES, DEFAULT_COLUMN_FAMILY_NAME_BYTES); + private static final Cell DISABLE_WAL_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DISABLE_WAL_BYTES); + private static final Cell MULTI_TENANT_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MULTI_TENANT_BYTES); + private static final Cell VIEW_TYPE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES); + private static final Cell VIEW_INDEX_ID_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_BYTES); + /** + * A designator for choosing the right type for viewIndex (Short vs Long) to be backward + * compatible. + **/ + private static final Cell VIEW_INDEX_ID_DATA_TYPE_BYTES_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_DATA_TYPE_BYTES); + private static final Cell INDEX_TYPE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_TYPE_BYTES); + private static final Cell INDEX_DISABLE_TIMESTAMP_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES); + private static final Cell STORE_NULLS_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORE_NULLS_BYTES); + private static final Cell EMPTY_KEYVALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, + TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); + private static final Cell BASE_COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, + TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES); + private static final Cell ROW_KEY_ORDER_OPTIMIZABLE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, + MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE_BYTES); + private static final Cell TRANSACTIONAL_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TRANSACTIONAL_BYTES); + private static final Cell TRANSACTION_PROVIDER_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TRANSACTION_PROVIDER_BYTES); + private static final Cell PHYSICAL_TABLE_NAME_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PHYSICAL_TABLE_NAME_BYTES); + private static final Cell UPDATE_CACHE_FREQUENCY_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, UPDATE_CACHE_FREQUENCY_BYTES); + private static final Cell IS_NAMESPACE_MAPPED_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_NAMESPACE_MAPPED_BYTES); + private static final Cell AUTO_PARTITION_SEQ_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, AUTO_PARTITION_SEQ_BYTES); + private static final Cell APPEND_ONLY_SCHEMA_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, APPEND_ONLY_SCHEMA_BYTES); + private static final Cell STORAGE_SCHEME_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORAGE_SCHEME_BYTES); + private static final Cell ENCODING_SCHEME_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ENCODING_SCHEME_BYTES); + private static final Cell USE_STATS_FOR_PARALLELIZATION_KV = createFirstOnRow( + ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, USE_STATS_FOR_PARALLELIZATION_BYTES); + private static final Cell LAST_DDL_TIMESTAMP_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, LAST_DDL_TIMESTAMP_BYTES); + private static final Cell CHANGE_DETECTION_ENABLED_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CHANGE_DETECTION_ENABLED_BYTES); + private static final Cell SCHEMA_VERSION_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SCHEMA_VERSION_BYTES); + private static final Cell EXTERNAL_SCHEMA_ID_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, EXTERNAL_SCHEMA_ID_BYTES); + private static final Cell STREAMING_TOPIC_NAME_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STREAMING_TOPIC_NAME_BYTES); + private static final Cell CDC_INCLUDE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CDC_INCLUDE_BYTES); + private static final Cell INDEX_WHERE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_WHERE_BYTES); + + private static final Cell MAX_LOOKBACK_AGE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_LOOKBACK_AGE_BYTES); + + private static final Cell TTL_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TTL_BYTES); + private static final Cell ROW_KEY_MATCHER_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ROW_KEY_MATCHER_BYTES); + + private static final List TABLE_KV_COLUMNS = Lists.newArrayList(EMPTY_KEYVALUE_KV, + TABLE_TYPE_KV, TABLE_SEQ_NUM_KV, COLUMN_COUNT_KV, SALT_BUCKETS_KV, PK_NAME_KV, + DATA_TABLE_NAME_KV, INDEX_STATE_KV, IMMUTABLE_ROWS_KV, VIEW_EXPRESSION_KV, + DEFAULT_COLUMN_FAMILY_KV, DISABLE_WAL_KV, MULTI_TENANT_KV, VIEW_TYPE_KV, VIEW_INDEX_ID_KV, + VIEW_INDEX_ID_DATA_TYPE_BYTES_KV, INDEX_TYPE_KV, INDEX_DISABLE_TIMESTAMP_KV, STORE_NULLS_KV, + BASE_COLUMN_COUNT_KV, ROW_KEY_ORDER_OPTIMIZABLE_KV, TRANSACTIONAL_KV, TRANSACTION_PROVIDER_KV, + PHYSICAL_TABLE_NAME_KV, UPDATE_CACHE_FREQUENCY_KV, IS_NAMESPACE_MAPPED_KV, + AUTO_PARTITION_SEQ_KV, APPEND_ONLY_SCHEMA_KV, STORAGE_SCHEME_KV, ENCODING_SCHEME_KV, + USE_STATS_FOR_PARALLELIZATION_KV, LAST_DDL_TIMESTAMP_KV, CHANGE_DETECTION_ENABLED_KV, + SCHEMA_VERSION_KV, EXTERNAL_SCHEMA_ID_KV, STREAMING_TOPIC_NAME_KV, INDEX_WHERE_KV, + MAX_LOOKBACK_AGE_KV, CDC_INCLUDE_KV, TTL_KV, ROW_KEY_MATCHER_KV); + + static { + Collections.sort(TABLE_KV_COLUMNS, CellComparatorImpl.COMPARATOR); + } + + private static final int TABLE_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(TABLE_TYPE_KV); + private static final int TABLE_SEQ_NUM_INDEX = TABLE_KV_COLUMNS.indexOf(TABLE_SEQ_NUM_KV); + private static final int COLUMN_COUNT_INDEX = TABLE_KV_COLUMNS.indexOf(COLUMN_COUNT_KV); + private static final int SALT_BUCKETS_INDEX = TABLE_KV_COLUMNS.indexOf(SALT_BUCKETS_KV); + private static final int PK_NAME_INDEX = TABLE_KV_COLUMNS.indexOf(PK_NAME_KV); + private static final int DATA_TABLE_NAME_INDEX = TABLE_KV_COLUMNS.indexOf(DATA_TABLE_NAME_KV); + private static final int INDEX_STATE_INDEX = TABLE_KV_COLUMNS.indexOf(INDEX_STATE_KV); + private static final int IMMUTABLE_ROWS_INDEX = TABLE_KV_COLUMNS.indexOf(IMMUTABLE_ROWS_KV); + private static final int VIEW_STATEMENT_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_EXPRESSION_KV); + private static final int DEFAULT_COLUMN_FAMILY_INDEX = + TABLE_KV_COLUMNS.indexOf(DEFAULT_COLUMN_FAMILY_KV); + private static final int DISABLE_WAL_INDEX = TABLE_KV_COLUMNS.indexOf(DISABLE_WAL_KV); + private static final int MULTI_TENANT_INDEX = TABLE_KV_COLUMNS.indexOf(MULTI_TENANT_KV); + private static final int VIEW_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_TYPE_KV); + private static final int VIEW_INDEX_ID_DATA_TYPE_INDEX = + TABLE_KV_COLUMNS.indexOf(VIEW_INDEX_ID_DATA_TYPE_BYTES_KV); + private static final int VIEW_INDEX_ID_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_INDEX_ID_KV); + private static final int INDEX_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(INDEX_TYPE_KV); + private static final int STORE_NULLS_INDEX = TABLE_KV_COLUMNS.indexOf(STORE_NULLS_KV); + private static final int BASE_COLUMN_COUNT_INDEX = TABLE_KV_COLUMNS.indexOf(BASE_COLUMN_COUNT_KV); + private static final int ROW_KEY_ORDER_OPTIMIZABLE_INDEX = + TABLE_KV_COLUMNS.indexOf(ROW_KEY_ORDER_OPTIMIZABLE_KV); + private static final int TRANSACTIONAL_INDEX = TABLE_KV_COLUMNS.indexOf(TRANSACTIONAL_KV); + private static final int TRANSACTION_PROVIDER_INDEX = + TABLE_KV_COLUMNS.indexOf(TRANSACTION_PROVIDER_KV); + private static final int UPDATE_CACHE_FREQUENCY_INDEX = + TABLE_KV_COLUMNS.indexOf(UPDATE_CACHE_FREQUENCY_KV); + private static final int INDEX_DISABLE_TIMESTAMP = + TABLE_KV_COLUMNS.indexOf(INDEX_DISABLE_TIMESTAMP_KV); + private static final int IS_NAMESPACE_MAPPED_INDEX = + TABLE_KV_COLUMNS.indexOf(IS_NAMESPACE_MAPPED_KV); + private static final int AUTO_PARTITION_SEQ_INDEX = + TABLE_KV_COLUMNS.indexOf(AUTO_PARTITION_SEQ_KV); + private static final int APPEND_ONLY_SCHEMA_INDEX = + TABLE_KV_COLUMNS.indexOf(APPEND_ONLY_SCHEMA_KV); + private static final int STORAGE_SCHEME_INDEX = TABLE_KV_COLUMNS.indexOf(STORAGE_SCHEME_KV); + private static final int QUALIFIER_ENCODING_SCHEME_INDEX = + TABLE_KV_COLUMNS.indexOf(ENCODING_SCHEME_KV); + private static final int USE_STATS_FOR_PARALLELIZATION_INDEX = + TABLE_KV_COLUMNS.indexOf(USE_STATS_FOR_PARALLELIZATION_KV); + private static final int PHYSICAL_TABLE_NAME_INDEX = + TABLE_KV_COLUMNS.indexOf(PHYSICAL_TABLE_NAME_KV); + private static final int LAST_DDL_TIMESTAMP_INDEX = + TABLE_KV_COLUMNS.indexOf(LAST_DDL_TIMESTAMP_KV); + private static final int CHANGE_DETECTION_ENABLED_INDEX = + TABLE_KV_COLUMNS.indexOf(CHANGE_DETECTION_ENABLED_KV); + private static final int SCHEMA_VERSION_INDEX = TABLE_KV_COLUMNS.indexOf(SCHEMA_VERSION_KV); + private static final int EXTERNAL_SCHEMA_ID_INDEX = + TABLE_KV_COLUMNS.indexOf(EXTERNAL_SCHEMA_ID_KV); + private static final int STREAMING_TOPIC_NAME_INDEX = + TABLE_KV_COLUMNS.indexOf(STREAMING_TOPIC_NAME_KV); + private static final int CDC_INCLUDE_INDEX = TABLE_KV_COLUMNS.indexOf(CDC_INCLUDE_KV); + private static final int INDEX_WHERE_INDEX = TABLE_KV_COLUMNS.indexOf(INDEX_WHERE_KV); + + private static final int MAX_LOOKBACK_AGE_INDEX = TABLE_KV_COLUMNS.indexOf(MAX_LOOKBACK_AGE_KV); + private static final int TTL_INDEX = TABLE_KV_COLUMNS.indexOf(TTL_KV); + private static final int ROW_KEY_MATCHER_INDEX = TABLE_KV_COLUMNS.indexOf(ROW_KEY_MATCHER_KV); + // KeyValues for Column + private static final KeyValue DECIMAL_DIGITS_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES); + private static final KeyValue COLUMN_SIZE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES); + private static final KeyValue NULLABLE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NULLABLE_BYTES); + private static final KeyValue DATA_TYPE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES); + private static final KeyValue ORDINAL_POSITION_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES); + private static final KeyValue SORT_ORDER_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES); + private static final KeyValue ARRAY_SIZE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES); + private static final KeyValue VIEW_CONSTANT_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES); + private static final KeyValue IS_VIEW_REFERENCED_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_VIEW_REFERENCED_BYTES); + private static final KeyValue COLUMN_DEF_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_DEF_BYTES); + private static final KeyValue IS_ROW_TIMESTAMP_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ROW_TIMESTAMP_BYTES); + private static final KeyValue COLUMN_QUALIFIER_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_BYTES); + // this key value is used to represent a column derived from a parent that was deleted (by + // storing a value of LinkType.EXCLUDED_COLUMN) + private static final KeyValue LINK_TYPE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); + + private static final List COLUMN_KV_COLUMNS = + Lists.newArrayList(DECIMAL_DIGITS_KV, COLUMN_SIZE_KV, NULLABLE_KV, DATA_TYPE_KV, + ORDINAL_POSITION_KV, SORT_ORDER_KV, DATA_TABLE_NAME_KV, // included in both column and table + // row for metadata APIs + ARRAY_SIZE_KV, VIEW_CONSTANT_KV, IS_VIEW_REFERENCED_KV, COLUMN_DEF_KV, IS_ROW_TIMESTAMP_KV, + COLUMN_QUALIFIER_KV, LINK_TYPE_KV); + + static { + COLUMN_KV_COLUMNS.sort(CellComparator.getInstance()); + } + private static final Cell QUALIFIER_COUNTER_KV = KeyValueUtil.createFirstOnRow( + ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_COUNTER_BYTES); + private static final int DECIMAL_DIGITS_INDEX = COLUMN_KV_COLUMNS.indexOf(DECIMAL_DIGITS_KV); + private static final int COLUMN_SIZE_INDEX = COLUMN_KV_COLUMNS.indexOf(COLUMN_SIZE_KV); + private static final int NULLABLE_INDEX = COLUMN_KV_COLUMNS.indexOf(NULLABLE_KV); + private static final int DATA_TYPE_INDEX = COLUMN_KV_COLUMNS.indexOf(DATA_TYPE_KV); + private static final int ORDINAL_POSITION_INDEX = COLUMN_KV_COLUMNS.indexOf(ORDINAL_POSITION_KV); + private static final int SORT_ORDER_INDEX = COLUMN_KV_COLUMNS.indexOf(SORT_ORDER_KV); + private static final int ARRAY_SIZE_INDEX = COLUMN_KV_COLUMNS.indexOf(ARRAY_SIZE_KV); + private static final int VIEW_CONSTANT_INDEX = COLUMN_KV_COLUMNS.indexOf(VIEW_CONSTANT_KV); + private static final int IS_VIEW_REFERENCED_INDEX = + COLUMN_KV_COLUMNS.indexOf(IS_VIEW_REFERENCED_KV); + private static final int COLUMN_DEF_INDEX = COLUMN_KV_COLUMNS.indexOf(COLUMN_DEF_KV); + private static final int IS_ROW_TIMESTAMP_INDEX = COLUMN_KV_COLUMNS.indexOf(IS_ROW_TIMESTAMP_KV); + private static final int COLUMN_QUALIFIER_INDEX = COLUMN_KV_COLUMNS.indexOf(COLUMN_QUALIFIER_KV); + // the index of the key value is used to represent a column derived from a parent that was + // deleted (by storing a value of LinkType.EXCLUDED_COLUMN) + private static final int EXCLUDED_COLUMN_LINK_TYPE_KV_INDEX = + COLUMN_KV_COLUMNS.indexOf(LINK_TYPE_KV); + + // index for link type key value that is used to store linking rows + private static final int LINK_TYPE_INDEX = 0; + private static final Cell CLASS_NAME_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CLASS_NAME_BYTES); + private static final Cell JAR_PATH_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, JAR_PATH_BYTES); + private static final Cell RETURN_TYPE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, RETURN_TYPE_BYTES); + private static final Cell NUM_ARGS_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NUM_ARGS_BYTES); + private static final Cell TYPE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TYPE_BYTES); + private static final Cell IS_CONSTANT_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_CONSTANT_BYTES); + private static final Cell DEFAULT_VALUE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_VALUE_BYTES); + private static final Cell MIN_VALUE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MIN_VALUE_BYTES); + private static final Cell MAX_VALUE_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_VALUE_BYTES); + private static final Cell IS_ARRAY_KV = + createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ARRAY_BYTES); + + private static final List FUNCTION_KV_COLUMNS = Arrays. asList(EMPTY_KEYVALUE_KV, + CLASS_NAME_KV, JAR_PATH_KV, RETURN_TYPE_KV, NUM_ARGS_KV); + static { + Collections.sort(FUNCTION_KV_COLUMNS, CellComparatorImpl.COMPARATOR); + } + + private static final int CLASS_NAME_INDEX = FUNCTION_KV_COLUMNS.indexOf(CLASS_NAME_KV); + private static final int JAR_PATH_INDEX = FUNCTION_KV_COLUMNS.indexOf(JAR_PATH_KV); + private static final int RETURN_TYPE_INDEX = FUNCTION_KV_COLUMNS.indexOf(RETURN_TYPE_KV); + private static final int NUM_ARGS_INDEX = FUNCTION_KV_COLUMNS.indexOf(NUM_ARGS_KV); + + private static final List FUNCTION_ARG_KV_COLUMNS = Arrays. asList(TYPE_KV, + IS_ARRAY_KV, IS_CONSTANT_KV, DEFAULT_VALUE_KV, MIN_VALUE_KV, MAX_VALUE_KV); + static { + Collections.sort(FUNCTION_ARG_KV_COLUMNS, CellComparatorImpl.COMPARATOR); + } + + private static final int IS_ARRAY_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(IS_ARRAY_KV); + private static final int IS_CONSTANT_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(IS_CONSTANT_KV); + private static final int DEFAULT_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(DEFAULT_VALUE_KV); + private static final int MIN_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(MIN_VALUE_KV); + private static final int MAX_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(MAX_VALUE_KV); + + public static PName newPName(byte[] buffer) { + return buffer == null ? null : newPName(buffer, 0, buffer.length); + } + + public static PName newPName(byte[] keyBuffer, int keyOffset, int keyLength) { + if (keyLength <= 0) { + return null; + } + int length = getVarCharLength(keyBuffer, keyOffset, keyLength); + return PNameFactory.newName(keyBuffer, keyOffset, length); + } + + private static boolean failConcurrentMutateAddColumnOneTimeForTesting = false; + private RegionCoprocessorEnvironment env; + + private PhoenixMetaDataCoprocessorHost phoenixAccessCoprocessorHost; + private boolean accessCheckEnabled; + private boolean blockWriteRebuildIndex; + private int maxIndexesPerTable; + private boolean isTablesMappingEnabled; + private boolean invalidateServerCacheEnabled; + + // this flag denotes that we will continue to write parent table column metadata while creating + // a child view and also block metadata changes that were previously propagated to children + // before 4.15, so that we can rollback the upgrade to 4.15 if required + private boolean allowSplittableSystemCatalogRollback; + + private boolean isSystemCatalogSplittable; + + protected boolean getMetadataReadLockEnabled; + + private MetricsMetadataSource metricsSource; + + public static void setFailConcurrentMutateAddColumnOneTimeForTesting(boolean fail) { + failConcurrentMutateAddColumnOneTimeForTesting = fail; + } + + /** + * Stores a reference to the coprocessor environment provided by the + * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on + * a table region, so always expects this to be an instance of + * {@link RegionCoprocessorEnvironment}. + * @param env the environment provided by the coprocessor host + * @throws IOException if the provided environment is not an instance of + * {@code RegionCoprocessorEnvironment} + */ + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (env instanceof RegionCoprocessorEnvironment) { + this.env = (RegionCoprocessorEnvironment) env; + } else { + throw new CoprocessorException("Must be loaded on a table region!"); } - private static boolean failConcurrentMutateAddColumnOneTimeForTesting = false; - private RegionCoprocessorEnvironment env; - - private PhoenixMetaDataCoprocessorHost phoenixAccessCoprocessorHost; - private boolean accessCheckEnabled; - private boolean blockWriteRebuildIndex; - private int maxIndexesPerTable; - private boolean isTablesMappingEnabled; - private boolean invalidateServerCacheEnabled; - - // this flag denotes that we will continue to write parent table column metadata while creating - // a child view and also block metadata changes that were previously propagated to children - // before 4.15, so that we can rollback the upgrade to 4.15 if required - private boolean allowSplittableSystemCatalogRollback; - - private boolean isSystemCatalogSplittable; + this.lockManager = new LockManager(); + phoenixAccessCoprocessorHost = new PhoenixMetaDataCoprocessorHost(this.env); + Configuration config = env.getConfiguration(); + this.metadataCacheRowLockTimeout = + config.getLong(QueryServices.PHOENIX_METADATA_CACHE_UPDATE_ROWLOCK_TIMEOUT, + QueryServices.DEFAULT_PHOENIX_METADATA_CACHE_UPDATE_ROWLOCK_TIMEOUT); + this.accessCheckEnabled = config.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); + this.blockWriteRebuildIndex = config.getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, + QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE); + this.maxIndexesPerTable = config.getInt(QueryServices.MAX_INDEXES_PER_TABLE, + QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE); + this.isTablesMappingEnabled = + SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, new ReadOnlyProps(config.iterator())); + this.allowSplittableSystemCatalogRollback = + config.getBoolean(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, + QueryServicesOptions.DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK); + this.invalidateServerCacheEnabled = + config.getBoolean(QueryServices.PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED); + this.getMetadataReadLockEnabled = + config.getBoolean(QueryServices.PHOENIX_GET_METADATA_READ_LOCK_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_GET_METADATA_READ_LOCK_ENABLED); + this.isSystemCatalogSplittable = MetaDataSplitPolicy.isSystemCatalogSplittable(config); + + LOGGER.info("Starting Tracing-Metrics Systems"); + // Start the phoenix trace collection + Tracing.addTraceMetricsSource(); + Metrics.ensureConfigured(); + metricsSource = MetricsMetadataSourceFactory.getMetadataMetricsSource(); + GlobalCache.getInstance(this.env).setMetricsSource(metricsSource); + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + SchemaRegistryRepositoryFactory.close(); + } + + @Override + public Iterable getServices() { + return Collections.singleton(this); + } + + @Override + public void getTable(RpcController controller, GetTableRequest request, + RpcCallback done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + byte[] tenantId = request.getTenantId().toByteArray(); + byte[] schemaName = request.getSchemaName().toByteArray(); + byte[] tableName = request.getTableName().toByteArray(); + byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); + long tableTimeStamp = request.getTableTimestamp(); + try { + // TODO: check that key is within region.getStartKey() and region.getEndKey() + // and return special code to force client to lookup region from meta. + Region region = env.getRegion(); + MetaDataMutationResult result = checkTableKeyInRegion(key, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + PTable table = doGetTable(tenantId, schemaName, tableName, request.getClientTimestamp(), null, + request.getClientVersion()); + if (table == null) { + builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); + builder.setMutationTime(currentTime); + done.run(builder.build()); + return; + } + getCoprocessorHost().preGetTable(Bytes.toString(tenantId), + SchemaUtil.getTableName(schemaName, tableName), + TableName.valueOf(table.getPhysicalName().getBytes())); + + if ( + request.getClientVersion() < MIN_SPLITTABLE_SYSTEM_CATALOG + && table.getType() == PTableType.VIEW && table.getViewType() != MAPPED + ) { + try (PhoenixConnection connection = + getServerConnectionForMetaData(env.getConfiguration()).unwrap(PhoenixConnection.class)) { + PTable pTable = connection.getTableNoCache(table.getParentName().getString()); + table = ViewUtil.addDerivedColumnsFromParent(connection, table, pTable); + } + } + builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS); + builder.setMutationTime(currentTime); + if (blockWriteRebuildIndex) { + long disableIndexTimestamp = table.getIndexDisableTimestamp(); + long minNonZerodisableIndexTimestamp = + disableIndexTimestamp > 0 ? disableIndexTimestamp : Long.MAX_VALUE; + for (PTable index : table.getIndexes()) { + disableIndexTimestamp = index.getIndexDisableTimestamp(); + if ( + disableIndexTimestamp > 0 + && (index.getIndexState() == PIndexState.ACTIVE + || index.getIndexState() == PIndexState.PENDING_ACTIVE + || index.getIndexState() == PIndexState.PENDING_DISABLE) + && disableIndexTimestamp < minNonZerodisableIndexTimestamp + ) { + minNonZerodisableIndexTimestamp = disableIndexTimestamp; + } + } + // Freeze time for table at min non-zero value of INDEX_DISABLE_TIMESTAMP + // This will keep the table consistent with index as the table has had one more + // batch applied to it. + if (minNonZerodisableIndexTimestamp != Long.MAX_VALUE) { + // Subtract one because we add one due to timestamp granularity in Windows + builder.setMutationTime(minNonZerodisableIndexTimestamp - 1); + } + } + // the PTable of views and indexes on views might get updated because a column is added to one + // of + // their parents (this won't change the timestamp) + if (table.getType() != PTableType.TABLE || table.getTimeStamp() != tableTimeStamp) { + builder.setTable(PTableImpl.toProto(table)); + } + done.run(builder.build()); + } catch (Throwable t) { + LOGGER.error("getTable failed", t); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t)); + } + } + + private PhoenixMetaDataCoprocessorHost getCoprocessorHost() { + return phoenixAccessCoprocessorHost; + } + + private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, Region region, + long clientTimeStamp, int clientVersion) throws IOException, SQLException { + Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + PTable newTable; + region.startRegionOperation(); + try (RegionScanner scanner = region.getScanner(scan)) { + PTable oldTable = (PTable) metaDataCache.getIfPresent(cacheKey); + if (oldTable == null) { + metricsSource.incrementMetadataCacheMissCount(); + } else { + metricsSource.incrementMetadataCacheHitCount(); + } + long tableTimeStamp = oldTable == null ? MIN_TABLE_TIMESTAMP - 1 : oldTable.getTimeStamp(); + newTable = getTable(scanner, clientTimeStamp, tableTimeStamp, clientVersion); + if ( + newTable != null && (oldTable == null || tableTimeStamp < newTable.getTimeStamp() + || (blockWriteRebuildIndex && newTable.getIndexDisableTimestamp() > 0)) + ) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Caching table " + + Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(), cacheKey.getLength()) + + " at seqNum " + newTable.getSequenceNumber() + " with newer timestamp " + + newTable.getTimeStamp() + " versus " + tableTimeStamp); + } + metaDataCache.put(cacheKey, newTable); + metricsSource.incrementMetadataCacheAddCount(); + metricsSource.incrementMetadataCacheUsedSize(newTable.getEstimatedSize()); + } + } finally { + region.closeRegionOperation(); + } + return newTable; + } + + private List buildFunctions(List keys, Region region, long clientTimeStamp, + boolean isReplace, List deleteMutationsForReplace) throws IOException, SQLException { + List keyRanges = Lists.newArrayListWithExpectedSize(keys.size()); + for (byte[] key : keys) { + byte[] stopKey = ByteUtil.concat(key, QueryConstants.SEPARATOR_BYTE_ARRAY); + ByteUtil.nextKey(stopKey, stopKey.length); + keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, false, SortOrder.ASC)); + } + Scan scan = new Scan(); + scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp); + ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges); + scanRanges.initializeScan(scan); + scan.setFilter(scanRanges.getSkipScanFilter()); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + List functions = new ArrayList(); + PFunction function = null; + try (RegionScanner scanner = region.getScanner(scan)) { + for (int i = 0; i < keys.size(); i++) { + function = null; + function = getFunction(scanner, isReplace, clientTimeStamp, deleteMutationsForReplace); + if (function == null) { + return null; + } + byte[] functionKey = SchemaUtil.getFunctionKey(function.getTenantId() == null + ? ByteUtil.EMPTY_BYTE_ARRAY + : function.getTenantId().getBytes(), Bytes.toBytes(function.getFunctionName())); + metaDataCache.put(new FunctionBytesPtr(functionKey), function); + metricsSource.incrementMetadataCacheAddCount(); + metricsSource.incrementMetadataCacheUsedSize(function.getEstimatedSize()); + functions.add(function); + } + return functions; + } + } + + private List buildSchemas(List keys, Region region, long clientTimeStamp, + ImmutableBytesPtr cacheKey) throws IOException, SQLException { + List keyRanges = Lists.newArrayListWithExpectedSize(keys.size()); + for (byte[] key : keys) { + byte[] stopKey = ByteUtil.concat(key, QueryConstants.SEPARATOR_BYTE_ARRAY); + ByteUtil.nextKey(stopKey, stopKey.length); + keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, false, SortOrder.ASC)); + } + Scan scan = new Scan(); + if ( + clientTimeStamp != HConstants.LATEST_TIMESTAMP + && clientTimeStamp != HConstants.OLDEST_TIMESTAMP + ) { + scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1); + } else { + scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp); + } + ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges); + scanRanges.initializeScan(scan); + scan.setFilter(scanRanges.getSkipScanFilter()); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + List schemas = new ArrayList(); + PSchema schema = null; + try (RegionScanner scanner = region.getScanner(scan)) { + for (int i = 0; i < keys.size(); i++) { + schema = null; + schema = getSchema(scanner, clientTimeStamp); + if (schema == null) { + return null; + } + metaDataCache.put(cacheKey, schema); + metricsSource.incrementMetadataCacheAddCount(); + metricsSource.incrementMetadataCacheUsedSize(schema.getEstimatedSize()); + schemas.add(schema); + } + return schemas; + } + } + + private void addIndexToTable(PName tenantId, PName schemaName, PName indexName, PName tableName, + long clientTimeStamp, List indexes, int clientVersion) + throws IOException, SQLException { + byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(); + PTable indexTable = doGetTable(tenantIdBytes, schemaName.getBytes(), indexName.getBytes(), + clientTimeStamp, null, clientVersion); + if (indexTable == null) { + ClientUtil.throwIOException("Index not found", + new TableNotFoundException(schemaName.getString(), indexName.getString())); + return; + } + indexes.add(indexTable); + } + + private void addExcludedColumnToTable(List pColumns, PName colName, PName famName, + long timestamp) { + PColumnImpl pColumn = PColumnImpl.createExcludedColumn(famName, colName, timestamp); + pColumns.add(pColumn); + } + + private void addColumnToTable(List results, PName colName, PName famName, + Cell[] colKeyValues, List columns, boolean isSalted, int baseColumnCount, + boolean isRegularView, long timestamp) { + int i = 0; + int j = 0; + while (i < results.size() && j < COLUMN_KV_COLUMNS.size()) { + Cell kv = results.get(i); + Cell searchKv = COLUMN_KV_COLUMNS.get(j); + int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), searchKv.getQualifierArray(), searchKv.getQualifierOffset(), + searchKv.getQualifierLength()); + if (cmp == 0) { + colKeyValues[j++] = kv; + i++; + } else if (cmp > 0) { + colKeyValues[j++] = null; + } else { + i++; // shouldn't happen - means unexpected KV in system table column row + } + } - protected boolean getMetadataReadLockEnabled; + if ( + colKeyValues[DATA_TYPE_INDEX] == null || colKeyValues[NULLABLE_INDEX] == null + || colKeyValues[ORDINAL_POSITION_INDEX] == null + ) { + throw new IllegalStateException( + "Didn't find all required key values in '" + colName.getString() + "' column metadata row"); + } - private MetricsMetadataSource metricsSource; + Cell columnSizeKv = colKeyValues[COLUMN_SIZE_INDEX]; + Integer maxLength = columnSizeKv == null + ? null + : PInteger.INSTANCE.getCodec().decodeInt(columnSizeKv.getValueArray(), + columnSizeKv.getValueOffset(), SortOrder.getDefault()); + Cell decimalDigitKv = colKeyValues[DECIMAL_DIGITS_INDEX]; + Integer scale = decimalDigitKv == null + ? null + : PInteger.INSTANCE.getCodec().decodeInt(decimalDigitKv.getValueArray(), + decimalDigitKv.getValueOffset(), SortOrder.getDefault()); + Cell ordinalPositionKv = colKeyValues[ORDINAL_POSITION_INDEX]; + int position = PInteger.INSTANCE.getCodec().decodeInt(ordinalPositionKv.getValueArray(), + ordinalPositionKv.getValueOffset(), SortOrder.getDefault()) + (isSalted ? 1 : 0); + ; + + // if this column was inherited from a parent and was dropped then we create an excluded column + // which will be used to exclude the parent column while combining columns from ancestors + Cell excludedColumnKv = colKeyValues[EXCLUDED_COLUMN_LINK_TYPE_KV_INDEX]; + if ( + excludedColumnKv != null + && colKeyValues[DATA_TYPE_INDEX].getTimestamp() <= excludedColumnKv.getTimestamp() + ) { + LinkType linkType = LinkType + .fromSerializedValue(excludedColumnKv.getValueArray()[excludedColumnKv.getValueOffset()]); + if (linkType == LinkType.EXCLUDED_COLUMN) { + addExcludedColumnToTable(columns, colName, famName, excludedColumnKv.getTimestamp()); + } else { + // if we have a column metadata row that has a link type keyvalue it should + // represent an excluded column by containing the LinkType.EXCLUDED_COLUMN + throw new IllegalStateException( + "Link type should be EXCLUDED_COLUMN but found an unxpected link type for key value " + + excludedColumnKv); + } + return; + } - public static void setFailConcurrentMutateAddColumnOneTimeForTesting(boolean fail) { - failConcurrentMutateAddColumnOneTimeForTesting = fail; + Cell nullableKv = colKeyValues[NULLABLE_INDEX]; + boolean isNullable = PInteger.INSTANCE.getCodec().decodeInt(nullableKv.getValueArray(), + nullableKv.getValueOffset(), SortOrder.getDefault()) != ResultSetMetaData.columnNoNulls; + Cell dataTypeKv = colKeyValues[DATA_TYPE_INDEX]; + PDataType dataType = PDataType.fromTypeId(PInteger.INSTANCE.getCodec() + .decodeInt(dataTypeKv.getValueArray(), dataTypeKv.getValueOffset(), SortOrder.getDefault())); + if (maxLength == null && dataType == PBinary.INSTANCE) { + dataType = PVarbinary.INSTANCE; // For + } + // backward + // compatibility. + Cell sortOrderKv = colKeyValues[SORT_ORDER_INDEX]; + SortOrder sortOrder = sortOrderKv == null + ? SortOrder.getDefault() + : SortOrder.fromSystemValue(PInteger.INSTANCE.getCodec().decodeInt( + sortOrderKv.getValueArray(), sortOrderKv.getValueOffset(), SortOrder.getDefault())); + + Cell arraySizeKv = colKeyValues[ARRAY_SIZE_INDEX]; + Integer arraySize = arraySizeKv == null + ? null + : PInteger.INSTANCE.getCodec().decodeInt(arraySizeKv.getValueArray(), + arraySizeKv.getValueOffset(), SortOrder.getDefault()); + + Cell viewConstantKv = colKeyValues[VIEW_CONSTANT_INDEX]; + byte[] viewConstant = viewConstantKv == null + ? null + : new ImmutableBytesPtr(viewConstantKv.getValueArray(), viewConstantKv.getValueOffset(), + viewConstantKv.getValueLength()).copyBytesIfNecessary(); + Cell isViewReferencedKv = colKeyValues[IS_VIEW_REFERENCED_INDEX]; + boolean isViewReferenced = isViewReferencedKv != null + && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isViewReferencedKv.getValueArray(), + isViewReferencedKv.getValueOffset(), isViewReferencedKv.getValueLength())); + Cell columnDefKv = colKeyValues[COLUMN_DEF_INDEX]; + String expressionStr = columnDefKv == null + ? null + : (String) PVarchar.INSTANCE.toObject(columnDefKv.getValueArray(), + columnDefKv.getValueOffset(), columnDefKv.getValueLength()); + Cell isRowTimestampKV = colKeyValues[IS_ROW_TIMESTAMP_INDEX]; + boolean isRowTimestamp = isRowTimestampKV == null + ? false + : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isRowTimestampKV.getValueArray(), + isRowTimestampKV.getValueOffset(), isRowTimestampKV.getValueLength())); + + boolean isPkColumn = famName == null || famName.getString() == null; + Cell columnQualifierKV = colKeyValues[COLUMN_QUALIFIER_INDEX]; + // Older tables won't have column qualifier metadata present. To make things simpler, just set + // the + // column qualifier bytes by using the column name. + byte[] columnQualifierBytes = columnQualifierKV != null + ? Arrays.copyOfRange(columnQualifierKV.getValueArray(), columnQualifierKV.getValueOffset(), + columnQualifierKV.getValueOffset() + columnQualifierKV.getValueLength()) + : (isPkColumn ? null : colName.getBytes()); + PColumn column = new PColumnImpl(colName, famName, dataType, maxLength, scale, isNullable, + position - 1, sortOrder, arraySize, viewConstant, isViewReferenced, expressionStr, + isRowTimestamp, false, columnQualifierBytes, timestamp); + columns.add(column); + } + + private void addArgumentToFunction(List results, PName functionName, PName type, + Cell[] functionKeyValues, List arguments, short argPosition) + throws SQLException { + int i = 0; + int j = 0; + while (i < results.size() && j < FUNCTION_ARG_KV_COLUMNS.size()) { + Cell kv = results.get(i); + Cell searchKv = FUNCTION_ARG_KV_COLUMNS.get(j); + int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), searchKv.getQualifierArray(), searchKv.getQualifierOffset(), + searchKv.getQualifierLength()); + if (cmp == 0) { + functionKeyValues[j++] = kv; + i++; + } else if (cmp > 0) { + functionKeyValues[j++] = null; + } else { + i++; // shouldn't happen - means unexpected KV in system table column row + } } - /** - * Stores a reference to the coprocessor environment provided by the - * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this - * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded - * on a table region, so always expects this to be an instance of - * {@link RegionCoprocessorEnvironment}. - * - * @param env the environment provided by the coprocessor host - * @throws IOException if the provided environment is not an instance of - * {@code RegionCoprocessorEnvironment} - */ - @Override - public void start(CoprocessorEnvironment env) throws IOException { - if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment) env; - } else { - throw new CoprocessorException("Must be loaded on a table region!"); - } + Cell isArrayKv = functionKeyValues[IS_ARRAY_INDEX]; + boolean isArrayType = isArrayKv == null + ? false + : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isArrayKv.getValueArray(), + isArrayKv.getValueOffset(), isArrayKv.getValueLength())); + Cell isConstantKv = functionKeyValues[IS_CONSTANT_INDEX]; + boolean isConstant = isConstantKv == null + ? false + : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isConstantKv.getValueArray(), + isConstantKv.getValueOffset(), isConstantKv.getValueLength())); + Cell defaultValueKv = functionKeyValues[DEFAULT_VALUE_INDEX]; + String defaultValue = defaultValueKv == null + ? null + : (String) PVarchar.INSTANCE.toObject(defaultValueKv.getValueArray(), + defaultValueKv.getValueOffset(), defaultValueKv.getValueLength()); + Cell minValueKv = functionKeyValues[MIN_VALUE_INDEX]; + String minValue = minValueKv == null + ? null + : (String) PVarchar.INSTANCE.toObject(minValueKv.getValueArray(), minValueKv.getValueOffset(), + minValueKv.getValueLength()); + Cell maxValueKv = functionKeyValues[MAX_VALUE_INDEX]; + String maxValue = maxValueKv == null + ? null + : (String) PVarchar.INSTANCE.toObject(maxValueKv.getValueArray(), maxValueKv.getValueOffset(), + maxValueKv.getValueLength()); + FunctionArgument arg = new FunctionArgument(type.getString(), isArrayType, isConstant, + defaultValue == null + ? null + : LiteralExpression.newConstant((new LiteralParseNode(defaultValue)).getValue()), + minValue == null + ? null + : LiteralExpression.newConstant((new LiteralParseNode(minValue)).getValue()), + maxValue == null + ? null + : LiteralExpression.newConstant((new LiteralParseNode(maxValue)).getValue()), + argPosition); + arguments.add(arg); + } + + private PName getPhysicalTableName(Region region, byte[] tenantId, byte[] schema, byte[] table, + long timestamp) throws IOException { + byte[] key = SchemaUtil.getTableKey(tenantId, schema, table); + Scan scan = MetaDataUtil.newTableRowsScan(key, MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); + scan.addColumn(TABLE_FAMILY_BYTES, PHYSICAL_TABLE_NAME_BYTES); + try (RegionScanner scanner = region.getScanner(scan)) { + List results = Lists.newArrayList(); + scanner.next(results); + Cell physicalTableNameKv = null; + if (results.size() > 0) { + physicalTableNameKv = results.get(0); + } + PName physicalTableName = physicalTableNameKv != null + ? newPName(physicalTableNameKv.getValueArray(), physicalTableNameKv.getValueOffset(), + physicalTableNameKv.getValueLength()) + : null; + return physicalTableName; + } + } + + private PTable getTable(RegionScanner scanner, long clientTimeStamp, long tableTimeStamp, + int clientVersion) throws IOException, SQLException { + List results = Lists.newArrayList(); + scanner.next(results); + if (results.isEmpty()) { + return null; + } + List tableCellList = results; + results = Lists.newArrayList(); + List> allColumnCellList = Lists.newArrayList(); - this.lockManager = new LockManager(); - phoenixAccessCoprocessorHost = new PhoenixMetaDataCoprocessorHost(this.env); - Configuration config = env.getConfiguration(); - this.metadataCacheRowLockTimeout = - config.getLong(QueryServices.PHOENIX_METADATA_CACHE_UPDATE_ROWLOCK_TIMEOUT, - QueryServices.DEFAULT_PHOENIX_METADATA_CACHE_UPDATE_ROWLOCK_TIMEOUT); - this.accessCheckEnabled = config.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); - this.blockWriteRebuildIndex = config.getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, - QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE); - this.maxIndexesPerTable = config.getInt(QueryServices.MAX_INDEXES_PER_TABLE, - QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE); - this.isTablesMappingEnabled = SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, - new ReadOnlyProps(config.iterator())); - this.allowSplittableSystemCatalogRollback = config.getBoolean(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, - QueryServicesOptions.DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK); - this.invalidateServerCacheEnabled - = config.getBoolean(QueryServices.PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_METADATA_INVALIDATE_CACHE_ENABLED); - this.getMetadataReadLockEnabled - = config.getBoolean(QueryServices.PHOENIX_GET_METADATA_READ_LOCK_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_GET_METADATA_READ_LOCK_ENABLED); - this.isSystemCatalogSplittable = MetaDataSplitPolicy.isSystemCatalogSplittable(config); - - LOGGER.info("Starting Tracing-Metrics Systems"); - // Start the phoenix trace collection - Tracing.addTraceMetricsSource(); - Metrics.ensureConfigured(); - metricsSource = MetricsMetadataSourceFactory.getMetadataMetricsSource(); - GlobalCache.getInstance(this.env).setMetricsSource(metricsSource); + do { + if (results.size() > 0) { + allColumnCellList.add(results); + results = Lists.newArrayList(); + } + } while (scanner.next(results)); + if (results != null && results.size() > 0) { + allColumnCellList.add(results); } - @Override - public void stop(CoprocessorEnvironment env) throws IOException { - SchemaRegistryRepositoryFactory.close(); + return getTableFromCells(tableCellList, allColumnCellList, clientTimeStamp, clientVersion); + } + + private PTable getTableFromCells(List tableCellList, List> allColumnCellList, + long clientTimeStamp, int clientVersion) throws IOException, SQLException { + return getTableFromCells(tableCellList, allColumnCellList, clientTimeStamp, clientVersion, + null); + } + + /** + * Utility method to get a PTable from the HBase Cells either read from SYSTEM.CATALOG or + * generated by a DDL statement. Optionally, an existing PTable can be provided so that its + * properties can be merged with the "new" PTable created from the Cell. This is useful when + * generating an updated PTable following an ALTER DDL statement + * @param tableCellList Cells from the header row containing table level properties + * @param allColumnCellList Cells from column or link rows + * @param clientTimeStamp client-provided timestamp + * @param clientVersion client-provided version + * @param oldTable Optional parameters containing properties for an existing PTable + */ + private PTable getTableFromCells(List tableCellList, List> allColumnCellList, + long clientTimeStamp, int clientVersion, PTable oldTable) throws IOException, SQLException { + Cell[] tableKeyValues = new Cell[TABLE_KV_COLUMNS.size()]; + Cell[] colKeyValues = new Cell[COLUMN_KV_COLUMNS.size()]; + + // Create PTable based on KeyValues from scan + Cell keyValue = tableCellList.get(0); + byte[] keyBuffer = keyValue.getRowArray(); + int keyLength = keyValue.getRowLength(); + int keyOffset = keyValue.getRowOffset(); + PName tenantId = newPName(keyBuffer, keyOffset, keyLength); + int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length; + if (tenantIdLength == 0) { + tenantId = null; + } + PName schemaName = newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength); + int schemaNameLength = schemaName.getBytes().length; + int tableNameLength = keyLength - schemaNameLength - 1 - tenantIdLength - 1; + byte[] tableNameBytes = new byte[tableNameLength]; + System.arraycopy(keyBuffer, keyOffset + schemaNameLength + 1 + tenantIdLength + 1, + tableNameBytes, 0, tableNameLength); + PName tableName = PNameFactory.newName(tableNameBytes); + + int offset = tenantIdLength + schemaNameLength + tableNameLength + 3; + // This will prevent the client from continually looking for the current + // table when we know that there will never be one since we disallow updates + // unless the table is the latest + + long timeStamp = keyValue.getTimestamp(); + PTableImpl.Builder builder = null; + if (oldTable != null) { + builder = PTableImpl.builderFromExisting(oldTable); + List columns = oldTable.getColumns(); + if (oldTable.getBucketNum() != null && oldTable.getBucketNum() > 0) { + // if it's salted, skip the salt column -- it will get added back during + // the build process + columns = columns.stream().skip(1).collect(Collectors.toList()); + } + builder.setColumns(columns); + } else { + builder = new PTableImpl.Builder(); + } + builder.setTenantId(tenantId); + builder.setSchemaName(schemaName); + builder.setTableName(tableName); + + int i = 0; + int j = 0; + while (i < tableCellList.size() && j < TABLE_KV_COLUMNS.size()) { + Cell kv = tableCellList.get(i); + Cell searchKv = TABLE_KV_COLUMNS.get(j); + int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), searchKv.getQualifierArray(), searchKv.getQualifierOffset(), + searchKv.getQualifierLength()); + if (cmp == 0) { + timeStamp = Math.max(timeStamp, kv.getTimestamp()); // Find max timestamp of table + // header row + tableKeyValues[j++] = kv; + i++; + } else if (cmp > 0) { + timeStamp = Math.max(timeStamp, kv.getTimestamp()); + tableKeyValues[j++] = null; + } else { + i++; // shouldn't happen - means unexpected KV in system table header row + } + } + // TABLE_TYPE, TABLE_SEQ_NUM and COLUMN_COUNT are required. + if ( + tableKeyValues[TABLE_TYPE_INDEX] == null || tableKeyValues[TABLE_SEQ_NUM_INDEX] == null + || tableKeyValues[COLUMN_COUNT_INDEX] == null + ) { + // since we allow SYSTEM.CATALOG to split in certain cases there might be child links or + // other metadata rows that are invalid and should be ignored + Cell cell = tableCellList.get(0); + LOGGER.error("Found invalid metadata rows for rowkey " + + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + return null; } - @Override - public Iterable getServices() { - return Collections.singleton(this); + Cell tableTypeKv = tableKeyValues[TABLE_TYPE_INDEX]; + PTableType tableType = + PTableType.fromSerializedValue(tableTypeKv.getValueArray()[tableTypeKv.getValueOffset()]); + builder.setType(tableType); + + Cell tableSeqNumKv = tableKeyValues[TABLE_SEQ_NUM_INDEX]; + long tableSeqNum = PLong.INSTANCE.getCodec().decodeLong(tableSeqNumKv.getValueArray(), + tableSeqNumKv.getValueOffset(), SortOrder.getDefault()); + builder.setSequenceNumber(tableSeqNum); + + Cell columnCountKv = tableKeyValues[COLUMN_COUNT_INDEX]; + int columnCount = PInteger.INSTANCE.getCodec().decodeInt(columnCountKv.getValueArray(), + columnCountKv.getValueOffset(), SortOrder.getDefault()); + + Cell pkNameKv = tableKeyValues[PK_NAME_INDEX]; + PName pkName = pkNameKv != null + ? newPName(pkNameKv.getValueArray(), pkNameKv.getValueOffset(), pkNameKv.getValueLength()) + : null; + builder.setPkName(pkName != null ? pkName : oldTable != null ? oldTable.getPKName() : null); + + Cell saltBucketNumKv = tableKeyValues[SALT_BUCKETS_INDEX]; + Integer saltBucketNum = saltBucketNumKv != null + ? (Integer) PInteger.INSTANCE.getCodec().decodeInt(saltBucketNumKv.getValueArray(), + saltBucketNumKv.getValueOffset(), SortOrder.getDefault()) + : null; + if (saltBucketNum != null && saltBucketNum.intValue() == 0) { + saltBucketNum = null; // Zero salt buckets means not salted + } + builder.setBucketNum( + saltBucketNum != null ? saltBucketNum : oldTable != null ? oldTable.getBucketNum() : null); + + // data table name is used to find the parent table for indexes later + Cell dataTableNameKv = tableKeyValues[DATA_TABLE_NAME_INDEX]; + PName dataTableName = dataTableNameKv != null + ? newPName(dataTableNameKv.getValueArray(), dataTableNameKv.getValueOffset(), + dataTableNameKv.getValueLength()) + : null; + + Cell physicalTableNameKv = tableKeyValues[PHYSICAL_TABLE_NAME_INDEX]; + PName physicalTableName = physicalTableNameKv != null + ? newPName(physicalTableNameKv.getValueArray(), physicalTableNameKv.getValueOffset(), + physicalTableNameKv.getValueLength()) + : null; + builder.setPhysicalTableName(physicalTableName != null ? physicalTableName + : oldTable != null ? oldTable.getPhysicalName(true) + : null); + + Cell indexStateKv = tableKeyValues[INDEX_STATE_INDEX]; + PIndexState indexState = indexStateKv == null + ? null + : PIndexState + .fromSerializedValue(indexStateKv.getValueArray()[indexStateKv.getValueOffset()]); + builder.setState( + indexState != null ? indexState : oldTable != null ? oldTable.getIndexState() : null); + + Cell immutableRowsKv = tableKeyValues[IMMUTABLE_ROWS_INDEX]; + boolean isImmutableRows = immutableRowsKv != null + && (Boolean) PBoolean.INSTANCE.toObject(immutableRowsKv.getValueArray(), + immutableRowsKv.getValueOffset(), immutableRowsKv.getValueLength()); + builder.setImmutableRows( + immutableRowsKv != null ? isImmutableRows : oldTable != null && oldTable.isImmutableRows()); + + Cell defaultFamilyNameKv = tableKeyValues[DEFAULT_COLUMN_FAMILY_INDEX]; + PName defaultFamilyName = defaultFamilyNameKv != null + ? newPName(defaultFamilyNameKv.getValueArray(), defaultFamilyNameKv.getValueOffset(), + defaultFamilyNameKv.getValueLength()) + : null; + builder.setDefaultFamilyName(defaultFamilyName != null ? defaultFamilyName + : oldTable != null ? oldTable.getDefaultFamilyName() + : null); + + Cell viewStatementKv = tableKeyValues[VIEW_STATEMENT_INDEX]; + String viewStatement = viewStatementKv != null + ? (String) PVarchar.INSTANCE.toObject(viewStatementKv.getValueArray(), + viewStatementKv.getValueOffset(), viewStatementKv.getValueLength()) + : null; + builder.setViewStatement(viewStatement != null ? viewStatement + : oldTable != null ? oldTable.getViewStatement() + : null); + + Cell disableWALKv = tableKeyValues[DISABLE_WAL_INDEX]; + boolean disableWAL = disableWALKv == null + ? PTable.DEFAULT_DISABLE_WAL + : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(disableWALKv.getValueArray(), + disableWALKv.getValueOffset(), disableWALKv.getValueLength())); + builder.setDisableWAL( + disableWALKv != null ? disableWAL : oldTable != null && oldTable.isWALDisabled()); + + Cell multiTenantKv = tableKeyValues[MULTI_TENANT_INDEX]; + boolean multiTenant = multiTenantKv != null + && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(multiTenantKv.getValueArray(), + multiTenantKv.getValueOffset(), multiTenantKv.getValueLength())); + builder.setMultiTenant( + multiTenantKv != null ? multiTenant : oldTable != null && oldTable.isMultiTenant()); + + Cell storeNullsKv = tableKeyValues[STORE_NULLS_INDEX]; + boolean storeNulls = storeNullsKv != null + && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(storeNullsKv.getValueArray(), + storeNullsKv.getValueOffset(), storeNullsKv.getValueLength())); + builder.setStoreNulls( + storeNullsKv != null ? storeNulls : oldTable != null && oldTable.getStoreNulls()); + + Cell transactionalKv = tableKeyValues[TRANSACTIONAL_INDEX]; + Cell transactionProviderKv = tableKeyValues[TRANSACTION_PROVIDER_INDEX]; + TransactionFactory.Provider transactionProvider = null; + if (transactionProviderKv == null) { + if ( + transactionalKv != null + && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(transactionalKv.getValueArray(), + transactionalKv.getValueOffset(), transactionalKv.getValueLength())) + ) { + // For backward compat, prior to client setting TRANSACTION_PROVIDER + transactionProvider = TransactionFactory.Provider.NOTAVAILABLE; + } + } else { + transactionProvider = TransactionFactory.Provider + .fromCode(PTinyint.INSTANCE.getCodec().decodeByte(transactionProviderKv.getValueArray(), + transactionProviderKv.getValueOffset(), SortOrder.getDefault())); + } + builder.setTransactionProvider( + transactionProviderKv != null || transactionalKv != null ? transactionProvider + : oldTable != null ? oldTable.getTransactionProvider() + : null); + + Cell viewTypeKv = tableKeyValues[VIEW_TYPE_INDEX]; + ViewType viewType = viewTypeKv == null + ? null + : ViewType.fromSerializedValue(viewTypeKv.getValueArray()[viewTypeKv.getValueOffset()]); + builder + .setViewType(viewType != null ? viewType : oldTable != null ? oldTable.getViewType() : null); + + PDataType viewIndexIdType = + oldTable != null ? oldTable.getviewIndexIdType() : getViewIndexIdType(tableKeyValues); + builder.setViewIndexIdType(viewIndexIdType); + + Long viewIndexId = getViewIndexId(tableKeyValues, viewIndexIdType); + builder.setViewIndexId( + viewIndexId != null ? viewIndexId : oldTable != null ? oldTable.getViewIndexId() : null); + + Cell indexTypeKv = tableKeyValues[INDEX_TYPE_INDEX]; + IndexType indexType = indexTypeKv == null + ? null + : IndexType.fromSerializedValue(indexTypeKv.getValueArray()[indexTypeKv.getValueOffset()]); + builder.setIndexType( + indexType != null ? indexType : oldTable != null ? oldTable.getIndexType() : null); + + Cell baseColumnCountKv = tableKeyValues[BASE_COLUMN_COUNT_INDEX]; + int baseColumnCount = baseColumnCountKv == null + ? 0 + : PInteger.INSTANCE.getCodec().decodeInt(baseColumnCountKv.getValueArray(), + baseColumnCountKv.getValueOffset(), SortOrder.getDefault()); + builder.setBaseColumnCount(baseColumnCountKv != null ? baseColumnCount + : oldTable != null ? oldTable.getBaseColumnCount() + : 0); + + Cell rowKeyOrderOptimizableKv = tableKeyValues[ROW_KEY_ORDER_OPTIMIZABLE_INDEX]; + boolean rowKeyOrderOptimizable = rowKeyOrderOptimizableKv != null + && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(rowKeyOrderOptimizableKv.getValueArray(), + rowKeyOrderOptimizableKv.getValueOffset(), rowKeyOrderOptimizableKv.getValueLength())); + builder.setRowKeyOrderOptimizable(rowKeyOrderOptimizableKv != null + ? rowKeyOrderOptimizable + : oldTable != null && oldTable.rowKeyOrderOptimizable()); + + Cell updateCacheFrequencyKv = tableKeyValues[UPDATE_CACHE_FREQUENCY_INDEX]; + long updateCacheFrequency = updateCacheFrequencyKv == null + ? 0 + : PLong.INSTANCE.getCodec().decodeLong(updateCacheFrequencyKv.getValueArray(), + updateCacheFrequencyKv.getValueOffset(), SortOrder.getDefault()); + builder.setUpdateCacheFrequency(updateCacheFrequencyKv != null ? updateCacheFrequency + : oldTable != null ? oldTable.getUpdateCacheFrequency() + : 0); + + // Check the cell tag to see whether the view has modified this property + final byte[] tagUpdateCacheFreq = (updateCacheFrequencyKv == null) + ? HConstants.EMPTY_BYTE_ARRAY + : TagUtil.concatTags(HConstants.EMPTY_BYTE_ARRAY, updateCacheFrequencyKv); + boolean viewModifiedUpdateCacheFrequency = (PTableType.VIEW.equals(tableType)) && Bytes + .contains(tagUpdateCacheFreq, MetaDataEndpointImplConstants.VIEW_MODIFIED_PROPERTY_BYTES); + builder.setViewModifiedUpdateCacheFrequency( + !Bytes.equals(tagUpdateCacheFreq, HConstants.EMPTY_BYTE_ARRAY) + ? viewModifiedUpdateCacheFrequency + : oldTable != null && oldTable.hasViewModifiedUpdateCacheFrequency()); + + Cell indexDisableTimestampKv = tableKeyValues[INDEX_DISABLE_TIMESTAMP]; + long indexDisableTimestamp = indexDisableTimestampKv == null + ? 0L + : PLong.INSTANCE.getCodec().decodeLong(indexDisableTimestampKv.getValueArray(), + indexDisableTimestampKv.getValueOffset(), SortOrder.getDefault()); + builder.setIndexDisableTimestamp(indexDisableTimestampKv != null ? indexDisableTimestamp + : oldTable != null ? oldTable.getIndexDisableTimestamp() + : 0L); + + Cell isNamespaceMappedKv = tableKeyValues[IS_NAMESPACE_MAPPED_INDEX]; + boolean isNamespaceMapped = isNamespaceMappedKv != null + && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isNamespaceMappedKv.getValueArray(), + isNamespaceMappedKv.getValueOffset(), isNamespaceMappedKv.getValueLength())); + builder.setNamespaceMapped(isNamespaceMappedKv != null + ? isNamespaceMapped + : oldTable != null && oldTable.isNamespaceMapped()); + + Cell autoPartitionSeqKv = tableKeyValues[AUTO_PARTITION_SEQ_INDEX]; + String autoPartitionSeq = autoPartitionSeqKv != null + ? (String) PVarchar.INSTANCE.toObject(autoPartitionSeqKv.getValueArray(), + autoPartitionSeqKv.getValueOffset(), autoPartitionSeqKv.getValueLength()) + : null; + builder.setAutoPartitionSeqName(autoPartitionSeq != null ? autoPartitionSeq + : oldTable != null ? oldTable.getAutoPartitionSeqName() + : null); + + Cell isAppendOnlySchemaKv = tableKeyValues[APPEND_ONLY_SCHEMA_INDEX]; + boolean isAppendOnlySchema = isAppendOnlySchemaKv != null + && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isAppendOnlySchemaKv.getValueArray(), + isAppendOnlySchemaKv.getValueOffset(), isAppendOnlySchemaKv.getValueLength())); + builder.setAppendOnlySchema(isAppendOnlySchemaKv != null + ? isAppendOnlySchema + : oldTable != null && oldTable.isAppendOnlySchema()); + + Cell storageSchemeKv = tableKeyValues[STORAGE_SCHEME_INDEX]; + // TODO: change this once we start having other values for storage schemes + ImmutableStorageScheme storageScheme = storageSchemeKv == null + ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN + : ImmutableStorageScheme + .fromSerializedValue((byte) PTinyint.INSTANCE.toObject(storageSchemeKv.getValueArray(), + storageSchemeKv.getValueOffset(), storageSchemeKv.getValueLength())); + builder.setImmutableStorageScheme(storageSchemeKv != null ? storageScheme + : oldTable != null ? oldTable.getImmutableStorageScheme() + : ImmutableStorageScheme.ONE_CELL_PER_COLUMN); + + Cell encodingSchemeKv = tableKeyValues[QUALIFIER_ENCODING_SCHEME_INDEX]; + QualifierEncodingScheme encodingScheme = encodingSchemeKv == null + ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + : QualifierEncodingScheme + .fromSerializedValue((byte) PTinyint.INSTANCE.toObject(encodingSchemeKv.getValueArray(), + encodingSchemeKv.getValueOffset(), encodingSchemeKv.getValueLength())); + builder.setQualifierEncodingScheme(encodingSchemeKv != null ? encodingScheme + : oldTable != null ? oldTable.getEncodingScheme() + : QualifierEncodingScheme.NON_ENCODED_QUALIFIERS); + + Cell useStatsForParallelizationKv = tableKeyValues[USE_STATS_FOR_PARALLELIZATION_INDEX]; + Boolean useStatsForParallelization = useStatsForParallelizationKv == null + ? null + : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(useStatsForParallelizationKv.getValueArray(), + useStatsForParallelizationKv.getValueOffset(), + useStatsForParallelizationKv.getValueLength())); + builder + .setUseStatsForParallelization(useStatsForParallelization != null ? useStatsForParallelization + : oldTable != null ? oldTable.useStatsForParallelization() + : null); + Cell lastDDLTimestampKv = tableKeyValues[LAST_DDL_TIMESTAMP_INDEX]; + Long lastDDLTimestamp = lastDDLTimestampKv == null + ? null + : PLong.INSTANCE.getCodec().decodeLong(lastDDLTimestampKv.getValueArray(), + lastDDLTimestampKv.getValueOffset(), SortOrder.getDefault()); + builder.setLastDDLTimestamp(lastDDLTimestampKv != null ? lastDDLTimestamp + : oldTable != null ? oldTable.getLastDDLTimestamp() + : null); + + Cell changeDetectionEnabledKv = tableKeyValues[CHANGE_DETECTION_ENABLED_INDEX]; + boolean isChangeDetectionEnabled = changeDetectionEnabledKv != null + && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(changeDetectionEnabledKv.getValueArray(), + changeDetectionEnabledKv.getValueOffset(), changeDetectionEnabledKv.getValueLength())); + builder.setIsChangeDetectionEnabled(changeDetectionEnabledKv != null + ? isChangeDetectionEnabled + : oldTable != null && oldTable.isChangeDetectionEnabled()); + + Cell schemaVersionKv = tableKeyValues[SCHEMA_VERSION_INDEX]; + String schemaVersion = schemaVersionKv != null + ? (String) PVarchar.INSTANCE.toObject(schemaVersionKv.getValueArray(), + schemaVersionKv.getValueOffset(), schemaVersionKv.getValueLength()) + : null; + builder.setSchemaVersion(schemaVersion != null ? schemaVersion + : oldTable != null ? oldTable.getSchemaVersion() + : null); + + Cell externalSchemaIdKv = tableKeyValues[EXTERNAL_SCHEMA_ID_INDEX]; + String externalSchemaId = externalSchemaIdKv != null + ? (String) PVarchar.INSTANCE.toObject(externalSchemaIdKv.getValueArray(), + externalSchemaIdKv.getValueOffset(), externalSchemaIdKv.getValueLength()) + : null; + builder.setExternalSchemaId(externalSchemaId != null ? externalSchemaId + : oldTable != null ? oldTable.getExternalSchemaId() + : null); + + Cell streamingTopicNameKv = tableKeyValues[STREAMING_TOPIC_NAME_INDEX]; + String streamingTopicName = streamingTopicNameKv != null + ? (String) PVarchar.INSTANCE.toObject(streamingTopicNameKv.getValueArray(), + streamingTopicNameKv.getValueOffset(), streamingTopicNameKv.getValueLength()) + : null; + builder.setStreamingTopicName(streamingTopicName != null ? streamingTopicName + : oldTable != null ? oldTable.getStreamingTopicName() + : null); + + Cell includeSpecKv = tableKeyValues[CDC_INCLUDE_INDEX]; + String includeSpec = includeSpecKv != null + ? (String) PVarchar.INSTANCE.toObject(includeSpecKv.getValueArray(), + includeSpecKv.getValueOffset(), includeSpecKv.getValueLength()) + : null; + builder + .setCDCIncludeScopes(includeSpec != null ? CDCUtil.makeChangeScopeEnumsFromString(includeSpec) + : oldTable != null ? oldTable.getCDCIncludeScopes() + : null); + + Cell indexWhereKv = tableKeyValues[INDEX_WHERE_INDEX]; + String indexWhere = indexWhereKv != null + ? (String) PVarchar.INSTANCE.toObject(indexWhereKv.getValueArray(), + indexWhereKv.getValueOffset(), indexWhereKv.getValueLength()) + : null; + builder.setIndexWhere( + indexWhere != null ? indexWhere : oldTable != null ? oldTable.getIndexWhere() : null); + + Cell maxLookbackAgeKv = tableKeyValues[MAX_LOOKBACK_AGE_INDEX]; + Long maxLookbackAge = maxLookbackAgeKv == null + ? null + : PLong.INSTANCE.getCodec().decodeLong(maxLookbackAgeKv.getValueArray(), + maxLookbackAgeKv.getValueOffset(), SortOrder.getDefault()); + if (tableType == PTableType.VIEW) { + byte[] viewKey = SchemaUtil.getTableKey(tenantId == null ? null : tenantId.getBytes(), + schemaName == null ? null : schemaName.getBytes(), tableNameBytes); + maxLookbackAge = scanMaxLookbackAgeFromParent(viewKey, clientTimeStamp); + } + Cell ttlKv = tableKeyValues[TTL_INDEX]; + int ttl = TTL_NOT_DEFINED; + if (ttlKv != null) { + String ttlStr = (String) PVarchar.INSTANCE.toObject(ttlKv.getValueArray(), + ttlKv.getValueOffset(), ttlKv.getValueLength()); + ttl = Integer.parseInt(ttlStr); + } + ttl = ttlKv != null ? ttl : oldTable != null ? oldTable.getTTL() : TTL_NOT_DEFINED; + if (tableType == VIEW && viewType != MAPPED && ttl == TTL_NOT_DEFINED) { + // Scan SysCat to get TTL from Parent View/Table + byte[] viewKey = SchemaUtil.getTableKey(tenantId == null ? null : tenantId.getBytes(), + schemaName == null ? null : schemaName.getBytes(), tableNameBytes); + ttl = getTTLFromHierarchy(viewKey, clientTimeStamp, false); + + // TODO: Need to Update Cache for Alter Commands, can use PHOENIX-6883. } - @Override - public void getTable(RpcController controller, GetTableRequest request, - RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - byte[] tenantId = request.getTenantId().toByteArray(); - byte[] schemaName = request.getSchemaName().toByteArray(); - byte[] tableName = request.getTableName().toByteArray(); - byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); - long tableTimeStamp = request.getTableTimestamp(); - try { - // TODO: check that key is within region.getStartKey() and region.getEndKey() - // and return special code to force client to lookup region from meta. - Region region = env.getRegion(); - MetaDataMutationResult result = checkTableKeyInRegion(key, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } + Cell rowKeyMatcherKv = tableKeyValues[ROW_KEY_MATCHER_INDEX]; + byte[] rowKeyMatcher = + rowKeyMatcherKv != null ? CellUtil.cloneValue(rowKeyMatcherKv) : HConstants.EMPTY_BYTE_ARRAY; + builder.setRowKeyMatcher(rowKeyMatcher != null ? rowKeyMatcher + : oldTable != null ? oldTable.getRowKeyMatcher() + : HConstants.EMPTY_BYTE_ARRAY); + + // Check the cell tag to see whether the view has modified this property + final byte[] tagUseStatsForParallelization = (useStatsForParallelizationKv == null) + ? HConstants.EMPTY_BYTE_ARRAY + : TagUtil.concatTags(HConstants.EMPTY_BYTE_ARRAY, useStatsForParallelizationKv); + boolean viewModifiedUseStatsForParallelization = + (PTableType.VIEW.equals(tableType)) && Bytes.contains(tagUseStatsForParallelization, + MetaDataEndpointImplConstants.VIEW_MODIFIED_PROPERTY_BYTES); + builder.setViewModifiedUseStatsForParallelization(viewModifiedUseStatsForParallelization + || (oldTable != null && oldTable.hasViewModifiedUseStatsForParallelization())); + + boolean setPhysicalName = false; + List columns = Lists.newArrayListWithExpectedSize(columnCount); + List indexes = Lists.newArrayList(); + List physicalTables = Lists.newArrayList(); + PName parentTableName = tableType == INDEX || tableType == CDC ? dataTableName : null; + PName parentSchemaName = tableType == INDEX || tableType == CDC ? schemaName : null; + PName parentLogicalName = null; + EncodedCQCounter cqCounter = null; + if (oldTable != null) { + cqCounter = oldTable.getEncodedCQCounter(); + } else { + cqCounter = + (!EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme) || tableType == PTableType.VIEW) + ? PTable.EncodedCQCounter.NULL_COUNTER + : new EncodedCQCounter(); + } - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - PTable table = - doGetTable(tenantId, schemaName, tableName, request.getClientTimestamp(), - null, request.getClientVersion()); - if (table == null) { - builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); - builder.setMutationTime(currentTime); - done.run(builder.build()); - return; + if (timeStamp == HConstants.LATEST_TIMESTAMP) { + timeStamp = lastDDLTimestamp != null ? lastDDLTimestamp : clientTimeStamp; + } + builder.setTimeStamp(timeStamp); + + PTable transformingNewTable = null; + boolean isRegularView = (tableType == PTableType.VIEW && viewType != MAPPED); + boolean isThisAViewIndex = false; + for (List columnCellList : allColumnCellList) { + + Cell colKv = columnCellList.get(LINK_TYPE_INDEX); + int colKeyLength = colKv.getRowLength(); + + PName colName = + newPName(colKv.getRowArray(), colKv.getRowOffset() + offset, colKeyLength - offset); + if (colName == null) { + continue; + } + int colKeyOffset = offset + colName.getBytes().length + 1; + PName famName = newPName(colKv.getRowArray(), colKv.getRowOffset() + colKeyOffset, + colKeyLength - colKeyOffset); + + if (isQualifierCounterKV(colKv)) { + if (famName != null) { + Integer value = PInteger.INSTANCE.getCodec().decodeInt(colKv.getValueArray(), + colKv.getValueOffset(), SortOrder.ASC); + cqCounter.setValue(famName.getString(), value); + } + } else if ( + Bytes.compareTo(LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length, colKv.getQualifierArray(), + colKv.getQualifierOffset(), colKv.getQualifierLength()) == 0 + ) { + LinkType linkType = + LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]); + if (linkType == LinkType.INDEX_TABLE) { + addIndexToTable(tenantId, schemaName, famName, tableName, clientTimeStamp, indexes, + clientVersion); + } else if (linkType == PHYSICAL_TABLE) { + // famName contains the logical name of the parent table. We need to get the actual + // physical name of the table + PTable parentTable = null; + // call getTable() on famName only if it does not start with _IDX_. + // Table name starting with _IDX_ always must refer to HBase table that is + // shared by all view indexes on the given table/view hierarchy. + // _IDX_ is HBase table that does not have corresponding PTable representation + // in Phoenix, hence there is no point of calling getTable(). + if ( + !famName.getString().startsWith(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX) + && indexType != IndexType.LOCAL + ) { + try { + parentTable = doGetTable(null, + SchemaUtil.getSchemaNameFromFullName(famName.getBytes()) + .getBytes(StandardCharsets.UTF_8), + SchemaUtil.getTableNameFromFullName(famName.getBytes()) + .getBytes(StandardCharsets.UTF_8), + clientTimeStamp, clientVersion); + } catch (SQLException e) { + if (e.getErrorCode() != SQLExceptionCode.GET_TABLE_ERROR.getErrorCode()) { + LOGGER.error("Error while retrieving getTable for PHYSICAL_TABLE link to {}", + famName, e); + throw e; + } } - getCoprocessorHost().preGetTable(Bytes.toString(tenantId), SchemaUtil.getTableName(schemaName, tableName), - TableName.valueOf(table.getPhysicalName().getBytes())); - - if (request.getClientVersion() < MIN_SPLITTABLE_SYSTEM_CATALOG - && table.getType() == PTableType.VIEW - && table.getViewType() != MAPPED) { - try (PhoenixConnection connection = getServerConnectionForMetaData( - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - PTable pTable = connection.getTableNoCache(table.getParentName().getString()); - table = ViewUtil.addDerivedColumnsFromParent(connection, table, pTable); - } + if (isSystemCatalogSplittable && (parentTable == null || isTableDeleted(parentTable))) { + // parentTable is neither in the cache nor in the local region. Since + // famName is only logical name, we need to find the physical table. + // Hence, it is recommended to scan SYSTEM.CATALOG table again using + // separate CQSI connection as SYSTEM.CATALOG is splittable so the + // PTable with famName might be available on different region. + try (PhoenixConnection connection = + getServerConnectionForMetaData(env.getConfiguration()) + .unwrap(PhoenixConnection.class)) { + parentTable = connection.getTableNoCache(famName.getString()); + } catch (TableNotFoundException e) { + // It is ok to swallow this exception since this could be a view index and _IDX_ + // table is not there. + } } - builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS); - builder.setMutationTime(currentTime); - if (blockWriteRebuildIndex) { - long disableIndexTimestamp = table.getIndexDisableTimestamp(); - long minNonZerodisableIndexTimestamp = disableIndexTimestamp > 0 ? disableIndexTimestamp : Long.MAX_VALUE; - for (PTable index : table.getIndexes()) { - disableIndexTimestamp = index.getIndexDisableTimestamp(); - if (disableIndexTimestamp > 0 - && (index.getIndexState() == PIndexState.ACTIVE - || index.getIndexState() == PIndexState.PENDING_ACTIVE - || index.getIndexState() == PIndexState.PENDING_DISABLE) - && disableIndexTimestamp < minNonZerodisableIndexTimestamp) { - minNonZerodisableIndexTimestamp = disableIndexTimestamp; - } - } - // Freeze time for table at min non-zero value of INDEX_DISABLE_TIMESTAMP - // This will keep the table consistent with index as the table has had one more - // batch applied to it. - if (minNonZerodisableIndexTimestamp != Long.MAX_VALUE) { - // Subtract one because we add one due to timestamp granularity in Windows - builder.setMutationTime(minNonZerodisableIndexTimestamp - 1); - } + } + + if (parentTable == null || isTableDeleted(parentTable)) { + if (indexType == IndexType.LOCAL) { + PName tablePhysicalName = getPhysicalTableName(env.getRegion(), null, + SchemaUtil.getSchemaNameFromFullName(famName.getBytes()) + .getBytes(StandardCharsets.UTF_8), + SchemaUtil.getTableNameFromFullName(famName.getBytes()) + .getBytes(StandardCharsets.UTF_8), + clientTimeStamp); + if (tablePhysicalName == null) { + physicalTables.add(famName); + setPhysicalName = true; + } else { + physicalTables.add(SchemaUtil.getPhysicalHBaseTableName(schemaName, + tablePhysicalName, isNamespaceMapped)); + setPhysicalName = true; + } + } else { + physicalTables.add(famName); + setPhysicalName = true; } - // the PTable of views and indexes on views might get updated because a column is added to one of - // their parents (this won't change the timestamp) - if (table.getType() != PTableType.TABLE || table.getTimeStamp() != tableTimeStamp) { - builder.setTable(PTableImpl.toProto(table)); + // If this is a view index, then one of the link is IDX_VW -> _IDX_ PhysicalTable link. + // Since famName is _IDX_ and we can't get this table hence it is null, we need to use + // actual view name + parentLogicalName = (tableType == INDEX + ? SchemaUtil.getTableName(parentSchemaName, parentTableName) + : famName); + } else { + String parentPhysicalTableName = parentTable.getPhysicalName().getString(); + physicalTables.add(PNameFactory.newName(parentPhysicalTableName)); + setPhysicalName = true; + parentLogicalName = + SchemaUtil.getTableName(parentTable.getSchemaName(), parentTable.getTableName()); + } + } else if (linkType == PARENT_TABLE) { + parentTableName = + PNameFactory.newName(SchemaUtil.getTableNameFromFullName(famName.getBytes())); + parentSchemaName = + PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(famName.getBytes())); + } else if (linkType == LinkType.EXCLUDED_COLUMN) { + // add the excludedColumn + addExcludedColumnToTable(columns, colName, famName, colKv.getTimestamp()); + } else if (linkType == LinkType.TRANSFORMING_NEW_TABLE) { + transformingNewTable = + doGetTable((tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes()), + SchemaUtil.getSchemaNameFromFullName(famName.getBytes()).getBytes(), + SchemaUtil.getTableNameFromFullName(famName.getBytes()).getBytes(), clientTimeStamp, + null, clientVersion); + if (transformingNewTable == null) { + // It could be global + transformingNewTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY, + SchemaUtil.getSchemaNameFromFullName(famName.getBytes()).getBytes(), + SchemaUtil.getTableNameFromFullName(famName.getBytes()).getBytes(), clientTimeStamp, + null, clientVersion); + if (transformingNewTable == null) { + ClientUtil.throwIOException("Transforming new table not found", + new TableNotFoundException(schemaName.getString(), famName.getString())); } - done.run(builder.build()); - } catch (Throwable t) { - LOGGER.error("getTable failed", t); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t)); - } + } + } else if (linkType == VIEW_INDEX_PARENT_TABLE) { + byte[] viewKey = getTableKey(tenantId == null ? null : tenantId.getBytes(), + parentSchemaName == null ? null : parentSchemaName.getBytes(), + parentTableName.getBytes()); + // parentViewType should not be Mapped + maxLookbackAge = scanMaxLookbackAgeFromParent(viewKey, clientTimeStamp); + ttl = getTTLFromHierarchy(viewKey, clientTimeStamp, true); + isThisAViewIndex = true; + } + } else { + long columnTimestamp = columnCellList.get(0).getTimestamp() != HConstants.LATEST_TIMESTAMP + ? columnCellList.get(0).getTimestamp() + : timeStamp; + boolean isSalted = saltBucketNum != null + || (oldTable != null && oldTable.getBucketNum() != null && oldTable.getBucketNum() > 0); + addColumnToTable(columnCellList, colName, famName, colKeyValues, columns, isSalted, + baseColumnCount, isRegularView, columnTimestamp); + } } - - private PhoenixMetaDataCoprocessorHost getCoprocessorHost() { - return phoenixAccessCoprocessorHost; + if (tableType == INDEX && !isThisAViewIndex) { + byte[] tableKey = SchemaUtil.getTableKey(tenantId == null ? null : tenantId.getBytes(), + parentSchemaName == null ? null : parentSchemaName.getBytes(), parentTableName.getBytes()); + maxLookbackAge = scanMaxLookbackAgeFromParent(tableKey, clientTimeStamp); } - - private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, Region region, - long clientTimeStamp, int clientVersion) - throws IOException, SQLException { - Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp); - Cache metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache(); - PTable newTable; - region.startRegionOperation(); - try (RegionScanner scanner = region.getScanner(scan)) { - PTable oldTable = (PTable) metaDataCache.getIfPresent(cacheKey); - if (oldTable == null) { - metricsSource.incrementMetadataCacheMissCount(); - } else { - metricsSource.incrementMetadataCacheHitCount(); - } - long tableTimeStamp = oldTable == null ? MIN_TABLE_TIMESTAMP - 1 : oldTable.getTimeStamp(); - newTable = getTable(scanner, clientTimeStamp, tableTimeStamp, clientVersion); - if (newTable != null - && (oldTable == null || tableTimeStamp < newTable.getTimeStamp() - || (blockWriteRebuildIndex && newTable.getIndexDisableTimestamp() > 0))) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Caching table " - + Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(), - cacheKey.getLength()) - + " at seqNum " + newTable.getSequenceNumber() - + " with newer timestamp " + newTable.getTimeStamp() + " versus " - + tableTimeStamp); - } - metaDataCache.put(cacheKey, newTable); - metricsSource.incrementMetadataCacheAddCount(); - metricsSource.incrementMetadataCacheUsedSize(newTable.getEstimatedSize()); - } - } finally { - region.closeRegionOperation(); - } - return newTable; + builder.setMaxLookbackAge(maxLookbackAge != null + ? maxLookbackAge + : (oldTable != null ? oldTable.getMaxLookbackAge() : null)); + + if (tableType == INDEX && !isThisAViewIndex && ttl == TTL_NOT_DEFINED) { + // If this is an index on Table get TTL from Table + byte[] tableKey = getTableKey(tenantId == null ? null : tenantId.getBytes(), + parentSchemaName == null ? null : parentSchemaName.getBytes(), parentTableName.getBytes()); + ttl = getTTLForTable(tableKey, clientTimeStamp); } - - private List buildFunctions(List keys, Region region, - long clientTimeStamp, boolean isReplace, List deleteMutationsForReplace) throws IOException, SQLException { - List keyRanges = Lists.newArrayListWithExpectedSize(keys.size()); - for (byte[] key : keys) { - byte[] stopKey = ByteUtil.concat(key, QueryConstants.SEPARATOR_BYTE_ARRAY); - ByteUtil.nextKey(stopKey, stopKey.length); - keyRanges - .add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, false, SortOrder.ASC)); - } - Scan scan = new Scan(); - scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp); - ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges); - scanRanges.initializeScan(scan); - scan.setFilter(scanRanges.getSkipScanFilter()); - Cache metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache(); - List functions = new ArrayList(); - PFunction function = null; - try (RegionScanner scanner = region.getScanner(scan)) { - for (int i = 0; i < keys.size(); i++) { - function = null; - function = - getFunction(scanner, isReplace, clientTimeStamp, deleteMutationsForReplace); - if (function == null) { - return null; - } - byte[] functionKey = - SchemaUtil.getFunctionKey( - function.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : function - .getTenantId().getBytes(), Bytes.toBytes(function - .getFunctionName())); - metaDataCache.put(new FunctionBytesPtr(functionKey), function); - metricsSource.incrementMetadataCacheAddCount(); - metricsSource.incrementMetadataCacheUsedSize(function.getEstimatedSize()); - functions.add(function); - } - return functions; - } + builder.setTTL(ttl); + builder.setEncodedCQCounter(cqCounter); + + builder.setIndexes(indexes != null ? indexes + : oldTable != null ? oldTable.getIndexes() + : Collections. emptyList()); + + if (physicalTables == null || physicalTables.size() == 0) { + builder.setPhysicalNames( + oldTable != null ? oldTable.getPhysicalNames() : ImmutableList. of()); + } else { + builder.setPhysicalNames(ImmutableList.copyOf(physicalTables)); } - - private List buildSchemas(List keys, Region region, long clientTimeStamp, - ImmutableBytesPtr cacheKey) throws IOException, SQLException { - List keyRanges = Lists.newArrayListWithExpectedSize(keys.size()); - for (byte[] key : keys) { - byte[] stopKey = ByteUtil.concat(key, QueryConstants.SEPARATOR_BYTE_ARRAY); - ByteUtil.nextKey(stopKey, stopKey.length); - keyRanges - .add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, false, SortOrder.ASC)); - } - Scan scan = new Scan(); - if (clientTimeStamp != HConstants.LATEST_TIMESTAMP - && clientTimeStamp != HConstants.OLDEST_TIMESTAMP) { - scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1); - } else { - scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp); - } - ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges); - scanRanges.initializeScan(scan); - scan.setFilter(scanRanges.getSkipScanFilter()); - Cache metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache(); - List schemas = new ArrayList(); - PSchema schema = null; - try (RegionScanner scanner = region.getScanner(scan)) { - for (int i = 0; i < keys.size(); i++) { - schema = null; - schema = getSchema(scanner, clientTimeStamp); - if (schema == null) { - return null; - } - metaDataCache.put(cacheKey, schema); - metricsSource.incrementMetadataCacheAddCount(); - metricsSource.incrementMetadataCacheUsedSize(schema.getEstimatedSize()); - schemas.add(schema); - } - return schemas; - } + if (!setPhysicalName && oldTable != null) { + builder.setPhysicalTableName(oldTable.getPhysicalName(true)); } - - private void addIndexToTable(PName tenantId, PName schemaName, PName indexName, PName tableName, - long clientTimeStamp, List indexes, int clientVersion) - throws IOException, SQLException { - byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(); - PTable indexTable = doGetTable(tenantIdBytes, schemaName.getBytes(), indexName.getBytes(), clientTimeStamp, - null, clientVersion); - if (indexTable == null) { - ClientUtil.throwIOException("Index not found", new TableNotFoundException(schemaName.getString(), indexName.getString())); - return; - } - indexes.add(indexTable); - } - - private void addExcludedColumnToTable(List pColumns, PName colName, PName famName, long timestamp) { - PColumnImpl pColumn = PColumnImpl.createExcludedColumn(famName, colName, timestamp); - pColumns.add(pColumn); - } - - private void addColumnToTable(List results, PName colName, PName famName, - Cell[] colKeyValues, List columns, boolean isSalted, int baseColumnCount, - boolean isRegularView, long timestamp) { - int i = 0; - int j = 0; - while (i < results.size() && j < COLUMN_KV_COLUMNS.size()) { - Cell kv = results.get(i); - Cell searchKv = COLUMN_KV_COLUMNS.get(j); - int cmp = - Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength(), searchKv.getQualifierArray(), - searchKv.getQualifierOffset(), searchKv.getQualifierLength()); - if (cmp == 0) { - colKeyValues[j++] = kv; - i++; - } else if (cmp > 0) { - colKeyValues[j++] = null; - } else { - i++; // shouldn't happen - means unexpected KV in system table column row + builder.setTransformingNewTable(transformingNewTable); + + builder.setExcludedColumns(ImmutableList. of()); + builder.setBaseTableLogicalName(parentLogicalName != null ? parentLogicalName + : oldTable != null ? oldTable.getBaseTableLogicalName() + : null); + builder.setParentTableName(parentTableName != null ? parentTableName + : oldTable != null ? oldTable.getParentTableName() + : null); + builder.setParentSchemaName(parentSchemaName != null ? parentSchemaName + : oldTable != null ? oldTable.getParentSchemaName() + : null); + + builder.addOrSetColumns(columns); + // Avoid querying the stats table because we're holding the rowLock here. Issuing an RPC to a + // remote + // server while holding this lock is a bad idea and likely to cause contention. + return builder.build(); + } + + private Long scanMaxLookbackAgeFromParent(byte[] key, long clientTimeStamp) throws IOException { + Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp); + try ( + Table sysCat = ServerUtil.getHTableForCoprocessorScan(this.env, + SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration())); + ResultScanner scanner = sysCat.getScanner(scan)) { + Result result = scanner.next(); + boolean startCheckingForLink = false; + byte[] parentTableKey = null; + do { + if (result == null) { + return null; + } else if (startCheckingForLink) { + byte[] linkTypeBytes = result.getValue(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); + if (linkTypeBytes != null) { + LinkType linkType = LinkType.fromSerializedValue(linkTypeBytes[0]); + int rowKeyColMetadataLength = 5; + byte[][] rowKeyMetaData = new byte[rowKeyColMetadataLength][]; + getVarChars(result.getRow(), rowKeyColMetadataLength, rowKeyMetaData); + if (linkType == VIEW_INDEX_PARENT_TABLE) { + parentTableKey = getParentTableKeyFromChildRowKeyMetaData(rowKeyMetaData); + return scanMaxLookbackAgeFromParent(parentTableKey, clientTimeStamp); + } else if (linkType == PHYSICAL_TABLE) { + parentTableKey = getParentTableKeyFromChildRowKeyMetaData(rowKeyMetaData); } - } + } + } else { + byte[] maxLookbackAgeInBytes = + result.getValue(TABLE_FAMILY_BYTES, MAX_LOOKBACK_AGE_BYTES); + if (maxLookbackAgeInBytes != null) { + return PLong.INSTANCE.getCodec().decodeLong(maxLookbackAgeInBytes, 0, + SortOrder.getDefault()); + } + } + result = scanner.next(); + startCheckingForLink = true; + } while (result != null); + return parentTableKey == null + ? null + : scanMaxLookbackAgeFromParent(parentTableKey, clientTimeStamp); + } + } + + private byte[] getParentTableKeyFromChildRowKeyMetaData(byte[][] rowKeyMetaData) { + byte[] parentTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + String parentSchema = SchemaUtil + .getSchemaNameFromFullName(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]); + byte[] parentSchemaName = + parentSchema != null ? parentSchema.getBytes(StandardCharsets.UTF_8) : null; + byte[] parentTableName = + SchemaUtil.getTableNameFromFullName(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]) + .getBytes(StandardCharsets.UTF_8); + return SchemaUtil.getTableKey(parentTenantId, parentSchemaName, parentTableName); + } + + /** + * Method to return TTL value defined at current level or up the Hierarchy of the view. + * @param viewKey Key of the view for which we have to find TTL + * @param clientTimeStamp Client TimeStamp + * @return TTL value for a given view, if nothing is defined anywhere then return + * TTL_NOT_DEFINED(0). + */ + + private int getTTLFromHierarchy(byte[] viewKey, long clientTimeStamp, boolean checkForMappedView) + throws IOException, SQLException { + Scan scan = MetaDataUtil.newTableRowsScan(viewKey, MIN_TABLE_TIMESTAMP, clientTimeStamp); + Table sysCat = ServerUtil.getHTableForCoprocessorScan(this.env, + SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration())); + ResultScanner scanner = sysCat.getScanner(scan); + Result result = scanner.next(); + + byte[] tableKey = null; + do { + + if (result == null) { + return TTL_NOT_DEFINED; + } - if (colKeyValues[DATA_TYPE_INDEX] == null || colKeyValues[NULLABLE_INDEX] == null - || colKeyValues[ORDINAL_POSITION_INDEX] == null) { - throw new IllegalStateException("Didn't find all required key values in '" - + colName.getString() + "' column metadata row"); - } + // return TTL_NOT_DEFINED for Index on a Mapped View. + if (checkForMappedView && checkIfViewIsMappedView(result)) { + return TTL_NOT_DEFINED; + } + + byte[] linkTypeBytes = result.getValue(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); + byte[][] rowKeyMetaData = new byte[5][]; + getVarChars(result.getRow(), 5, rowKeyMetaData); + // Check if TTL is defined at the current given level + if (result.getValue(TABLE_FAMILY_BYTES, TTL_BYTES) != null) { + String ttlStr = (String) PVarchar.INSTANCE + .toObject(result.getValue(DEFAULT_COLUMN_FAMILY_BYTES, TTL_BYTES)); + return Integer.parseInt(ttlStr); + } else if (linkTypeBytes != null) { + String parentSchema = SchemaUtil + .getSchemaNameFromFullName(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]); + byte[] parentViewSchemaName = + parentSchema != null ? parentSchema.getBytes(StandardCharsets.UTF_8) : null; + byte[] parentViewName = SchemaUtil + .getTableNameFromFullName(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]) + .getBytes(StandardCharsets.UTF_8); + // Get TTL from up the hierarchy, Checking for Parent view link and getting TTL from it. + if (LinkType.fromSerializedValue(linkTypeBytes[0]) == PARENT_TABLE) { + byte[] parentViewTenantId = result.getValue(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES); + byte[] parentViewKey = + SchemaUtil.getTableKey(parentViewTenantId, parentViewSchemaName, parentViewName); + return getTTLFromHierarchy(parentViewKey, clientTimeStamp, false); + } + + // Store tableKey to use if we don't find TTL at current level and from + // parent views above the hierarchy + if (LinkType.fromSerializedValue(linkTypeBytes[0]) == PHYSICAL_TABLE) { + tableKey = SchemaUtil.getTableKey(null, parentViewSchemaName, parentViewName); + } + } + + result = scanner.next(); + } while (result != null); + + // Return TTL defined at Table level for the given hierarchy as we didn't find TTL any of the + // views. + return getTTLForTable(tableKey, clientTimeStamp); + + } + + private boolean checkIfViewIsMappedView(Result result) { + byte[] viewTypeBytes = result.getValue(TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES); + if (viewTypeBytes != null && ViewType.fromSerializedValue(viewTypeBytes[0]) == MAPPED) { + return true; + } + return false; + } + + /*** + * Get TTL Value stored in SYSCAT for a given table + * @param tableKey of table for which we are fining TTL + * @param clientTimeStamp client TimeStamp value + * @return TTL defined for a given table if it is null then return TTL_NOT_DEFINED(0) + */ + private int getTTLForTable(byte[] tableKey, long clientTimeStamp) throws IOException { + Scan scan = MetaDataUtil.newTableRowsScan(tableKey, MIN_TABLE_TIMESTAMP, clientTimeStamp); + Table sysCat = ServerUtil.getHTableForCoprocessorScan(this.env, + SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration())); + ResultScanner scanner = sysCat.getScanner(scan); + Result result = scanner.next(); + do { + if (result == null) { + return TTL_NOT_DEFINED; + } + if (result.getValue(TABLE_FAMILY_BYTES, TTL_BYTES) != null) { + String ttlStr = (String) PVarchar.INSTANCE + .toObject(result.getValue(DEFAULT_COLUMN_FAMILY_BYTES, TTL_BYTES)); + return Integer.parseInt(ttlStr); + } + result = scanner.next(); + } while (result != null); + return TTL_NOT_DEFINED; + } + + private Long getViewIndexId(Cell[] tableKeyValues, PDataType viewIndexIdType) { + Cell viewIndexIdKv = tableKeyValues[VIEW_INDEX_ID_INDEX]; + return viewIndexIdKv == null ? null : decodeViewIndexId(viewIndexIdKv, viewIndexIdType); + } + + private PTable modifyIndexStateForOldClient(int clientVersion, PTable table) throws SQLException { + if (table == null) { + return table; + } + // PHOENIX-5073 Sets the index state based on the client version in case of old clients. + // If client is not yet up to 4.12, then translate PENDING_ACTIVE to ACTIVE (as would have + // been the value in those versions) since the client won't have this index state in its + // enum. + if ( + table.getIndexState() == PIndexState.PENDING_ACTIVE + && clientVersion < MetaDataProtocol.MIN_PENDING_ACTIVE_INDEX + ) { + table = PTableImpl.builderWithColumns(table, PTableImpl.getColumnsToClone(table)) + .setState(PIndexState.ACTIVE).build(); + } + // If client is not yet up to 4.14, then translate PENDING_DISABLE to DISABLE + // since the client won't have this index state in its enum. + if ( + table.getIndexState() == PIndexState.PENDING_DISABLE + && clientVersion < MetaDataProtocol.MIN_PENDING_DISABLE_INDEX + ) { + // note: for older clients, we have to rely on the rebuilder to transition + // PENDING_DISABLE -> DISABLE + table = PTableImpl.builderWithColumns(table, PTableImpl.getColumnsToClone(table)) + .setState(PIndexState.DISABLE).build(); + } + return table; + } + + /** + * Returns viewIndexId based on its underlying data type + */ + private Long decodeViewIndexId(Cell viewIndexIdKv, PDataType viewIndexIdType) { + return viewIndexIdType.getCodec().decodeLong(viewIndexIdKv.getValueArray(), + viewIndexIdKv.getValueOffset(), SortOrder.getDefault()); + } + + private PDataType getViewIndexIdType(Cell[] tableKeyValues) { + Cell dataTypeKv = tableKeyValues[VIEW_INDEX_ID_DATA_TYPE_INDEX]; + return dataTypeKv == null + ? MetaDataUtil.getLegacyViewIndexIdDataType() + : PDataType.fromTypeId(PInteger.INSTANCE.getCodec().decodeInt(dataTypeKv.getValueArray(), + dataTypeKv.getValueOffset(), SortOrder.getDefault())); + } + + private boolean isQualifierCounterKV(Cell kv) { + int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), QUALIFIER_COUNTER_KV.getQualifierArray(), + QUALIFIER_COUNTER_KV.getQualifierOffset(), QUALIFIER_COUNTER_KV.getQualifierLength()); + return cmp == 0; + } + + private PSchema getSchema(RegionScanner scanner, long clientTimeStamp) + throws IOException, SQLException { + List results = Lists.newArrayList(); + scanner.next(results); + if (results.isEmpty()) { + return null; + } - Cell columnSizeKv = colKeyValues[COLUMN_SIZE_INDEX]; - Integer maxLength = - columnSizeKv == null ? null : PInteger.INSTANCE.getCodec().decodeInt( - columnSizeKv.getValueArray(), columnSizeKv.getValueOffset(), SortOrder.getDefault()); - Cell decimalDigitKv = colKeyValues[DECIMAL_DIGITS_INDEX]; - Integer scale = - decimalDigitKv == null ? null : PInteger.INSTANCE.getCodec().decodeInt( - decimalDigitKv.getValueArray(), decimalDigitKv.getValueOffset(), SortOrder.getDefault()); - Cell ordinalPositionKv = colKeyValues[ORDINAL_POSITION_INDEX]; - int position = - PInteger.INSTANCE.getCodec().decodeInt(ordinalPositionKv.getValueArray(), - ordinalPositionKv.getValueOffset(), SortOrder.getDefault()) + (isSalted ? 1 : 0); - ; - - // if this column was inherited from a parent and was dropped then we create an excluded column - // which will be used to exclude the parent column while combining columns from ancestors - Cell excludedColumnKv = colKeyValues[EXCLUDED_COLUMN_LINK_TYPE_KV_INDEX]; - if (excludedColumnKv != null && colKeyValues[DATA_TYPE_INDEX] - .getTimestamp() <= excludedColumnKv.getTimestamp()) { - LinkType linkType = - LinkType.fromSerializedValue( - excludedColumnKv.getValueArray()[excludedColumnKv.getValueOffset()]); - if (linkType == LinkType.EXCLUDED_COLUMN) { - addExcludedColumnToTable(columns, colName, famName, excludedColumnKv.getTimestamp()); - } else { - // if we have a column metadata row that has a link type keyvalue it should - // represent an excluded column by containing the LinkType.EXCLUDED_COLUMN - throw new IllegalStateException( - "Link type should be EXCLUDED_COLUMN but found an unxpected link type for key value " - + excludedColumnKv); - } - return; - } + Cell keyValue = results.get(0); + byte[] keyBuffer = keyValue.getRowArray(); + int keyLength = keyValue.getRowLength(); + int keyOffset = keyValue.getRowOffset(); + PName tenantId = newPName(keyBuffer, keyOffset, keyLength); + int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length; + if (tenantIdLength == 0) { + tenantId = null; + } + PName schemaName = + newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength - tenantIdLength - 1); + long timeStamp = keyValue.getTimestamp(); + return new PSchema(schemaName.getString(), timeStamp); + } + + private PFunction getFunction(RegionScanner scanner, final boolean isReplace, + long clientTimeStamp, List deleteMutationsForReplace) + throws IOException, SQLException { + List results = Lists.newArrayList(); + scanner.next(results); + if (results.isEmpty()) { + return null; + } + Cell[] functionKeyValues = new Cell[FUNCTION_KV_COLUMNS.size()]; + Cell[] functionArgKeyValues = new Cell[FUNCTION_ARG_KV_COLUMNS.size()]; + // Create PFunction based on KeyValues from scan + Cell keyValue = results.get(0); + byte[] keyBuffer = keyValue.getRowArray(); + int keyLength = keyValue.getRowLength(); + int keyOffset = keyValue.getRowOffset(); + long currentTimeMillis = EnvironmentEdgeManager.currentTimeMillis(); + if (isReplace) { + long deleteTimeStamp = clientTimeStamp == HConstants.LATEST_TIMESTAMP + ? currentTimeMillis - 1 + : (keyValue.getTimestamp() < clientTimeStamp + ? clientTimeStamp - 1 + : keyValue.getTimestamp()); + deleteMutationsForReplace.add(new Delete(keyBuffer, keyOffset, keyLength, deleteTimeStamp)); + } + PName tenantId = newPName(keyBuffer, keyOffset, keyLength); + int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length; + if (tenantIdLength == 0) { + tenantId = null; + } + PName functionName = + newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength - tenantIdLength - 1); + int functionNameLength = functionName.getBytes().length + 1; + int offset = tenantIdLength + functionNameLength + 1; + + long timeStamp = keyValue.getTimestamp(); + + int i = 0; + int j = 0; + while (i < results.size() && j < FUNCTION_KV_COLUMNS.size()) { + Cell kv = results.get(i); + Cell searchKv = FUNCTION_KV_COLUMNS.get(j); + int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), searchKv.getQualifierArray(), searchKv.getQualifierOffset(), + searchKv.getQualifierLength()); + if (cmp == 0) { + timeStamp = Math.max(timeStamp, kv.getTimestamp()); // Find max timestamp of table + // header row + functionKeyValues[j++] = kv; + i++; + } else if (cmp > 0) { + timeStamp = Math.max(timeStamp, kv.getTimestamp()); + functionKeyValues[j++] = null; + } else { + i++; // shouldn't happen - means unexpected KV in system table header row + } + } + // CLASS_NAME,NUM_ARGS and JAR_PATH are required. + if (functionKeyValues[CLASS_NAME_INDEX] == null || functionKeyValues[NUM_ARGS_INDEX] == null) { + throw new IllegalStateException( + "Didn't find expected key values for function row in metadata row"); + } - Cell nullableKv = colKeyValues[NULLABLE_INDEX]; - boolean isNullable = - PInteger.INSTANCE.getCodec().decodeInt(nullableKv.getValueArray(), - nullableKv.getValueOffset(), SortOrder.getDefault()) != ResultSetMetaData.columnNoNulls; - Cell dataTypeKv = colKeyValues[DATA_TYPE_INDEX]; - PDataType dataType = - PDataType.fromTypeId(PInteger.INSTANCE.getCodec().decodeInt( - dataTypeKv.getValueArray(), dataTypeKv.getValueOffset(), SortOrder.getDefault())); - if (maxLength == null && dataType == PBinary.INSTANCE) { - dataType = PVarbinary.INSTANCE; // For - } - // backward - // compatibility. - Cell sortOrderKv = colKeyValues[SORT_ORDER_INDEX]; - SortOrder sortOrder = - sortOrderKv == null ? SortOrder.getDefault() : SortOrder.fromSystemValue(PInteger.INSTANCE - .getCodec().decodeInt(sortOrderKv.getValueArray(), - sortOrderKv.getValueOffset(), SortOrder.getDefault())); - - Cell arraySizeKv = colKeyValues[ARRAY_SIZE_INDEX]; - Integer arraySize = arraySizeKv == null ? null : - PInteger.INSTANCE.getCodec().decodeInt(arraySizeKv.getValueArray(), arraySizeKv.getValueOffset(), SortOrder.getDefault()); - - Cell viewConstantKv = colKeyValues[VIEW_CONSTANT_INDEX]; - byte[] viewConstant = - viewConstantKv == null ? null : new ImmutableBytesPtr( - viewConstantKv.getValueArray(), viewConstantKv.getValueOffset(), - viewConstantKv.getValueLength()).copyBytesIfNecessary(); - Cell isViewReferencedKv = colKeyValues[IS_VIEW_REFERENCED_INDEX]; - boolean isViewReferenced = isViewReferencedKv != null && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isViewReferencedKv.getValueArray(), isViewReferencedKv.getValueOffset(), isViewReferencedKv.getValueLength())); - Cell columnDefKv = colKeyValues[COLUMN_DEF_INDEX]; - String expressionStr = columnDefKv == null ? null : (String) PVarchar.INSTANCE.toObject(columnDefKv.getValueArray(), columnDefKv.getValueOffset(), columnDefKv.getValueLength()); - Cell isRowTimestampKV = colKeyValues[IS_ROW_TIMESTAMP_INDEX]; - boolean isRowTimestamp = - isRowTimestampKV == null ? false : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject( - isRowTimestampKV.getValueArray(), isRowTimestampKV.getValueOffset(), - isRowTimestampKV.getValueLength())); - - boolean isPkColumn = famName == null || famName.getString() == null; - Cell columnQualifierKV = colKeyValues[COLUMN_QUALIFIER_INDEX]; - // Older tables won't have column qualifier metadata present. To make things simpler, just set the - // column qualifier bytes by using the column name. - byte[] columnQualifierBytes = columnQualifierKV != null ? - Arrays.copyOfRange(columnQualifierKV.getValueArray(), - columnQualifierKV.getValueOffset(), columnQualifierKV.getValueOffset() - + columnQualifierKV.getValueLength()) : (isPkColumn ? null : colName.getBytes()); - PColumn column = - new PColumnImpl(colName, famName, dataType, maxLength, scale, isNullable, - position - 1, sortOrder, arraySize, viewConstant, isViewReferenced, - expressionStr, isRowTimestamp, false, columnQualifierBytes, - timestamp); - columns.add(column); - } - - private void addArgumentToFunction(List results, PName functionName, PName type, - Cell[] functionKeyValues, List arguments, short argPosition) throws SQLException { - int i = 0; - int j = 0; - while (i < results.size() && j < FUNCTION_ARG_KV_COLUMNS.size()) { - Cell kv = results.get(i); - Cell searchKv = FUNCTION_ARG_KV_COLUMNS.get(j); - int cmp = - Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength(), searchKv.getQualifierArray(), - searchKv.getQualifierOffset(), searchKv.getQualifierLength()); - if (cmp == 0) { - functionKeyValues[j++] = kv; - i++; - } else if (cmp > 0) { - functionKeyValues[j++] = null; - } else { - i++; // shouldn't happen - means unexpected KV in system table column row - } - } + Cell classNameKv = functionKeyValues[CLASS_NAME_INDEX]; + PName className = newPName(classNameKv.getValueArray(), classNameKv.getValueOffset(), + classNameKv.getValueLength()); + Cell jarPathKv = functionKeyValues[JAR_PATH_INDEX]; + PName jarPath = null; + if (jarPathKv != null) { + jarPath = + newPName(jarPathKv.getValueArray(), jarPathKv.getValueOffset(), jarPathKv.getValueLength()); + } + Cell numArgsKv = functionKeyValues[NUM_ARGS_INDEX]; + int numArgs = PInteger.INSTANCE.getCodec().decodeInt(numArgsKv.getValueArray(), + numArgsKv.getValueOffset(), SortOrder.getDefault()); + Cell returnTypeKv = functionKeyValues[RETURN_TYPE_INDEX]; + PName returnType = returnTypeKv == null + ? null + : newPName(returnTypeKv.getValueArray(), returnTypeKv.getValueOffset(), + returnTypeKv.getValueLength()); + + List arguments = Lists.newArrayListWithExpectedSize(numArgs); + for (int k = 0; k < numArgs; k++) { + results.clear(); + scanner.next(results); + if (results.isEmpty()) { + break; + } + Cell typeKv = results.get(0); + if (isReplace) { + long deleteTimeStamp = clientTimeStamp == HConstants.LATEST_TIMESTAMP + ? currentTimeMillis - 1 + : (typeKv.getTimestamp() < clientTimeStamp ? clientTimeStamp - 1 : typeKv.getTimestamp()); + deleteMutationsForReplace.add(new Delete(typeKv.getRowArray(), typeKv.getRowOffset(), + typeKv.getRowLength(), deleteTimeStamp)); + } + int typeKeyLength = typeKv.getRowLength(); + PName typeName = + newPName(typeKv.getRowArray(), typeKv.getRowOffset() + offset, typeKeyLength - offset - 3); + + int argPositionOffset = offset + typeName.getBytes().length + 1; + short argPosition = Bytes.toShort(typeKv.getRowArray(), + typeKv.getRowOffset() + argPositionOffset, typeKeyLength - argPositionOffset); + addArgumentToFunction(results, functionName, typeName, functionArgKeyValues, arguments, + argPosition); + } + Collections.sort(arguments, new Comparator() { + @Override + public int compare(FunctionArgument o1, FunctionArgument o2) { + return o1.getArgPosition() - o2.getArgPosition(); + } + }); + return new PFunction(tenantId, functionName.getString(), arguments, returnType.getString(), + className.getString(), jarPath == null ? null : jarPath.getString(), timeStamp); + } + + private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, Region region, + long clientTimeStamp) throws IOException { + if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) { + return null; + } - Cell isArrayKv = functionKeyValues[IS_ARRAY_INDEX]; - boolean isArrayType = - isArrayKv == null ? false : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject( - isArrayKv.getValueArray(), isArrayKv.getValueOffset(), - isArrayKv.getValueLength())); - Cell isConstantKv = functionKeyValues[IS_CONSTANT_INDEX]; - boolean isConstant = - isConstantKv == null ? false : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject( - isConstantKv.getValueArray(), isConstantKv.getValueOffset(), - isConstantKv.getValueLength())); - Cell defaultValueKv = functionKeyValues[DEFAULT_VALUE_INDEX]; - String defaultValue = - defaultValueKv == null ? null : (String) PVarchar.INSTANCE.toObject( - defaultValueKv.getValueArray(), defaultValueKv.getValueOffset(), - defaultValueKv.getValueLength()); - Cell minValueKv = functionKeyValues[MIN_VALUE_INDEX]; - String minValue = - minValueKv == null ? null : (String) PVarchar.INSTANCE.toObject( - minValueKv.getValueArray(), minValueKv.getValueOffset(), - minValueKv.getValueLength()); - Cell maxValueKv = functionKeyValues[MAX_VALUE_INDEX]; - String maxValue = - maxValueKv == null ? null : (String) PVarchar.INSTANCE.toObject( - maxValueKv.getValueArray(), maxValueKv.getValueOffset(), - maxValueKv.getValueLength()); - FunctionArgument arg = - new FunctionArgument(type.getString(), isArrayType, isConstant, - defaultValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(defaultValue)).getValue()), - minValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(minValue)).getValue()), - maxValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(maxValue)).getValue()), - argPosition); - arguments.add(arg); - } - - private PName getPhysicalTableName(Region region, byte[] tenantId, byte[] schema, byte[] table, long timestamp) throws IOException { - byte[] key = SchemaUtil.getTableKey(tenantId, schema, table); - Scan scan = MetaDataUtil.newTableRowsScan(key, MetaDataProtocol.MIN_TABLE_TIMESTAMP, - timestamp); - scan.addColumn(TABLE_FAMILY_BYTES, PHYSICAL_TABLE_NAME_BYTES); - try (RegionScanner scanner = region.getScanner(scan)) { - List results = Lists.newArrayList(); - scanner.next(results); - Cell physicalTableNameKv = null; - if (results.size() > 0) { - physicalTableNameKv = results.get(0); - } - PName physicalTableName = - physicalTableNameKv != null ? newPName(physicalTableNameKv.getValueArray(), - physicalTableNameKv.getValueOffset(), physicalTableNameKv.getValueLength()) : null; - return physicalTableName; - } + Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP); + scan.setFilter(new FirstKeyOnlyFilter()); + scan.setRaw(true); + List results = Lists. newArrayList(); + try (RegionScanner scanner = region.getScanner(scan)) { + scanner.next(results); + } + for (Cell kv : results) { + KeyValue.Type type = Type.codeToType(kv.getTypeByte()); + if (type == Type.DeleteFamily) { // Row was deleted + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + PTable table = newDeletedTableMarker(kv.getTimestamp()); + metaDataCache.put(cacheKey, table); + metricsSource.incrementMetadataCacheAddCount(); + metricsSource.incrementMetadataCacheUsedSize(table.getEstimatedSize()); + return table; + } } + return null; + } - private PTable getTable(RegionScanner scanner, long clientTimeStamp, long tableTimeStamp, - int clientVersion) - throws IOException, SQLException { - List results = Lists.newArrayList(); - scanner.next(results); - if (results.isEmpty()) { - return null; - } - List tableCellList = results; - results = Lists.newArrayList(); - List> allColumnCellList = Lists.newArrayList(); + private PFunction buildDeletedFunction(byte[] key, ImmutableBytesPtr cacheKey, Region region, + long clientTimeStamp) throws IOException { + if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) { + return null; + } - do { - if (results.size() > 0) { - allColumnCellList.add(results); - results = Lists.newArrayList(); - } - } while (scanner.next(results)); - if (results != null && results.size() > 0) { - allColumnCellList.add(results); - } + Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP); + scan.setFilter(new FirstKeyOnlyFilter()); + scan.setRaw(true); + List results = Lists. newArrayList(); + try (RegionScanner scanner = region.getScanner(scan);) { + scanner.next(results); + } + // HBase ignores the time range on a raw scan (HBASE-7362) + if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) { + Cell kv = results.get(0); + if (kv.getTypeByte() == Type.Delete.getCode()) { + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + PFunction function = newDeletedFunctionMarker(kv.getTimestamp()); + metaDataCache.put(cacheKey, function); + metricsSource.incrementMetadataCacheAddCount(); + metricsSource.incrementMetadataCacheUsedSize(function.getEstimatedSize()); + return function; + } + } + return null; + } - return getTableFromCells(tableCellList, allColumnCellList, clientTimeStamp, clientVersion); + private PSchema buildDeletedSchema(byte[] key, ImmutableBytesPtr cacheKey, Region region, + long clientTimeStamp) throws IOException { + if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) { + return null; } - private PTable getTableFromCells(List tableCellList, List> allColumnCellList, - long clientTimeStamp, int clientVersion) - throws IOException, SQLException { - return getTableFromCells(tableCellList, allColumnCellList, clientTimeStamp, clientVersion, null); + Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP); + scan.setFilter(new FirstKeyOnlyFilter()); + scan.setRaw(true); + List results = Lists. newArrayList(); + try (RegionScanner scanner = region.getScanner(scan);) { + scanner.next(results); } + // HBase ignores the time range on a raw scan (HBASE-7362) + if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) { + Cell kv = results.get(0); + if (kv.getTypeByte() == Type.Delete.getCode()) { + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + PSchema schema = newDeletedSchemaMarker(kv.getTimestamp()); + metaDataCache.put(cacheKey, schema); + metricsSource.incrementMetadataCacheAddCount(); + metricsSource.incrementMetadataCacheUsedSize(schema.getEstimatedSize()); + return schema; + } + } + return null; + } + + private static PTable newDeletedTableMarker(long timestamp) { + try { + return new PTableImpl.Builder().setType(PTableType.TABLE).setTimeStamp(timestamp) + .setPkColumns(Collections. emptyList()) + .setAllColumns(Collections. emptyList()) + .setFamilyAttributes(Collections. emptyList()) + .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA).setIndexes(Collections. emptyList()) + .setPhysicalNames(Collections. emptyList()).build(); + } catch (SQLException e) { + // Should never happen + return null; + } + } + + private static PFunction newDeletedFunctionMarker(long timestamp) { + return new PFunction(timestamp); + } + + private static PSchema newDeletedSchemaMarker(long timestamp) { + return new PSchema(timestamp); + } + + private static boolean isTableDeleted(PTable table) { + return table.getName() == null; + } + + private static boolean isSchemaDeleted(PSchema schema) { + return schema.getSchemaName() == null; + } + + private static boolean isFunctionDeleted(PFunction function) { + return function.getFunctionName() == null; + } + + private PTable loadTable(RegionCoprocessorEnvironment env, byte[] key, ImmutableBytesPtr cacheKey, + long clientTimeStamp, long asOfTimeStamp, int clientVersion) throws IOException, SQLException { + Region region = env.getRegion(); + PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion); + // We always cache the latest version - fault in if not in cache + if ( + table != null + || (table = buildTable(key, cacheKey, region, asOfTimeStamp, clientVersion)) != null + ) { + return table; + } + // if not found then check if newer table already exists and add delete marker for timestamp + // found + if ( + table == null && (table = buildDeletedTable(key, cacheKey, region, clientTimeStamp)) != null + ) { + return table; + } + return null; + } + + /** + * Returns a PTable if its found in the cache. + */ + private PTable getTableFromCache(ImmutableBytesPtr cacheKey, long clientTimeStamp, + int clientVersion) { + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + PTable table = (PTable) metaDataCache.getIfPresent(cacheKey); + if (table == null) { + metricsSource.incrementMetadataCacheMissCount(); + } else { + metricsSource.incrementMetadataCacheHitCount(); + } + return table; + } + + private PFunction loadFunction(RegionCoprocessorEnvironment env, byte[] key, + ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp, boolean isReplace, + List deleteMutationsForReplace) throws IOException, SQLException { + Region region = env.getRegion(); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + PFunction function = (PFunction) metaDataCache.getIfPresent(cacheKey); + // We always cache the latest version - fault in if not in cache + if (function != null && !isReplace) { + metricsSource.incrementMetadataCacheHitCount(); + return function; + } + metricsSource.incrementMetadataCacheMissCount(); + ArrayList arrayList = new ArrayList(1); + arrayList.add(key); + List functions = + buildFunctions(arrayList, region, asOfTimeStamp, isReplace, deleteMutationsForReplace); + if (functions != null) return functions.get(0); + // if not found then check if newer table already exists and add delete marker for timestamp + // found + if ( + function == null + && (function = buildDeletedFunction(key, cacheKey, region, clientTimeStamp)) != null + ) { + return function; + } + return null; + } + + private PSchema loadSchema(RegionCoprocessorEnvironment env, byte[] key, + ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp) + throws IOException, SQLException { + Region region = env.getRegion(); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + PSchema schema = (PSchema) metaDataCache.getIfPresent(cacheKey); + // We always cache the latest version - fault in if not in cache + if (schema != null) { + metricsSource.incrementMetadataCacheHitCount(); + return schema; + } + metricsSource.incrementMetadataCacheMissCount(); + ArrayList arrayList = new ArrayList(1); + arrayList.add(key); + List schemas = buildSchemas(arrayList, region, asOfTimeStamp, cacheKey); + if (schemas != null) return schemas.get(0); + // if not found then check if newer schema already exists and add delete marker for timestamp + // found + if ( + schema == null + && (schema = buildDeletedSchema(key, cacheKey, region, clientTimeStamp)) != null + ) { + return schema; + } + return null; + } + + /** Returns null if the physical table row information is not present. */ + private static void getParentAndPhysicalNames(List tableMetadata, + byte[][] parentTenantSchemaTableNames, byte[][] physicalSchemaTableNames) { + int size = tableMetadata.size(); + byte[][] rowKeyMetaData = new byte[3][]; + MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); + Mutation physicalTableRow = null; + Mutation parentTableRow = null; + boolean physicalTableLinkFound = false; + boolean parentTableLinkFound = false; + if (size >= 2) { + int i = size - 1; + while (i >= 1) { + Mutation m = tableMetadata.get(i); + if (m instanceof Put) { + LinkType linkType = MetaDataUtil.getLinkType(m); + if (linkType == PHYSICAL_TABLE) { + physicalTableRow = m; + physicalTableLinkFound = true; + } + if (linkType == PARENT_TABLE) { + parentTableRow = m; + parentTableLinkFound = true; + } + } + if (physicalTableLinkFound && parentTableLinkFound) { + break; + } + i--; + } + } + if (!parentTableLinkFound) { + parentTenantSchemaTableNames[0] = null; + parentTenantSchemaTableNames[1] = null; + parentTenantSchemaTableNames[2] = null; - /** - * Utility method to get a PTable from the HBase Cells either read from SYSTEM.CATALOG or - * generated by a DDL statement. Optionally, an existing PTable can be provided so that its - * properties can be merged with the "new" PTable created from the Cell. This is useful when - * generating an updated PTable following an ALTER DDL statement - * @param tableCellList Cells from the header row containing table level properties - * @param allColumnCellList Cells from column or link rows - * @param clientTimeStamp client-provided timestamp - * @param clientVersion client-provided version - * @param oldTable Optional parameters containing properties for an existing PTable - * @return - * @throws IOException - * @throws SQLException - */ - private PTable getTableFromCells(List tableCellList, List> allColumnCellList, - long clientTimeStamp, int clientVersion, PTable oldTable) - throws IOException, SQLException { - Cell[] tableKeyValues = new Cell[TABLE_KV_COLUMNS.size()]; - Cell[] colKeyValues = new Cell[COLUMN_KV_COLUMNS.size()]; - - // Create PTable based on KeyValues from scan - Cell keyValue = tableCellList.get(0); - byte[] keyBuffer = keyValue.getRowArray(); - int keyLength = keyValue.getRowLength(); - int keyOffset = keyValue.getRowOffset(); - PName tenantId = newPName(keyBuffer, keyOffset, keyLength); - int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length; - if (tenantIdLength == 0) { - tenantId = null; - } - PName schemaName = newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength); - int schemaNameLength = schemaName.getBytes().length; - int tableNameLength = keyLength - schemaNameLength - 1 - tenantIdLength - 1; - byte[] tableNameBytes = new byte[tableNameLength]; - System.arraycopy(keyBuffer, keyOffset + schemaNameLength + 1 + tenantIdLength + 1, - tableNameBytes, 0, tableNameLength); - PName tableName = PNameFactory.newName(tableNameBytes); - - int offset = tenantIdLength + schemaNameLength + tableNameLength + 3; - // This will prevent the client from continually looking for the current - // table when we know that there will never be one since we disallow updates - // unless the table is the latest - - long timeStamp = keyValue.getTimestamp(); - PTableImpl.Builder builder = null; - if (oldTable != null) { - builder = PTableImpl.builderFromExisting(oldTable); - List columns = oldTable.getColumns(); - if (oldTable.getBucketNum() != null && oldTable.getBucketNum() > 0) { - //if it's salted, skip the salt column -- it will get added back during - //the build process - columns = columns.stream().skip(1).collect(Collectors.toList()); - } - builder.setColumns(columns); + } + if (!physicalTableLinkFound) { + physicalSchemaTableNames[0] = null; + physicalSchemaTableNames[1] = null; + physicalSchemaTableNames[2] = null; + } + if (physicalTableLinkFound) { + getSchemaTableNames(physicalTableRow, physicalSchemaTableNames); + } + if (parentTableLinkFound) { + getSchemaTableNames(parentTableRow, parentTenantSchemaTableNames); + } + } + + private static void getSchemaTableNames(Mutation row, byte[][] schemaTableNames) { + byte[][] rowKeyMetaData = new byte[5][]; + getVarChars(row.getRow(), 5, rowKeyMetaData); + byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + byte[] colBytes = rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]; + byte[] famBytes = rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]; + if ((colBytes == null || colBytes.length == 0) && (famBytes != null && famBytes.length > 0)) { + byte[] sName = + SchemaUtil.getSchemaNameFromFullName(famBytes).getBytes(StandardCharsets.UTF_8); + byte[] tName = SchemaUtil.getTableNameFromFullName(famBytes).getBytes(StandardCharsets.UTF_8); + schemaTableNames[0] = tenantId; + schemaTableNames[1] = sName; + schemaTableNames[2] = tName; + } + } + + @Override + public void createTable(RpcController controller, CreateTableRequest request, + RpcCallback done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + byte[][] rowKeyMetaData = new byte[3][]; + byte[] schemaName = null; + byte[] tableName = null; + String fullTableName = null; + try { + int clientVersion = request.getClientVersion(); + List tableMetadata = ProtobufUtil.getMutations(request); + MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); + byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + fullTableName = SchemaUtil.getTableName(schemaName, tableName); + boolean isNamespaceMapped = MetaDataUtil.isNameSpaceMapped(tableMetadata, + GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable()); + final IndexType indexType = MetaDataUtil.getIndexType(tableMetadata, + GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable()); + byte[] parentSchemaName = null; + byte[] parentTableName = null; + PTable parentTable = + request.hasParentTable() ? PTableImpl.createFromProto(request.getParentTable()) : null; + PTableType tableType = MetaDataUtil.getTableType(tableMetadata, + GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable()); + + // Load table to see if it already exists + byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName); + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey); + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); + boolean isChangeDetectionEnabled = MetaDataUtil.getChangeDetectionEnabled(tableMetadata); + + PTable table = null; + // Get as of latest timestamp so we can detect if we have a newer table that already + // exists without making an additional query + table = loadTable(env, tableKey, cacheKey, clientTimeStamp, HConstants.LATEST_TIMESTAMP, + clientVersion); + if (table != null) { + if (table.getTimeStamp() < clientTimeStamp) { + // If the table is older than the client time stamp and it's deleted, + // continue + if (!isTableDeleted(table)) { + builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.setTable(PTableImpl.toProto(table)); + done.run(builder.build()); + return; + } } else { - builder = new PTableImpl.Builder(); - } - builder.setTenantId(tenantId); - builder.setSchemaName(schemaName); - builder.setTableName(tableName); - - int i = 0; - int j = 0; - while (i < tableCellList.size() && j < TABLE_KV_COLUMNS.size()) { - Cell kv = tableCellList.get(i); - Cell searchKv = TABLE_KV_COLUMNS.get(j); - int cmp = - Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength(), searchKv.getQualifierArray(), - searchKv.getQualifierOffset(), searchKv.getQualifierLength()); - if (cmp == 0) { - timeStamp = Math.max(timeStamp, kv.getTimestamp()); // Find max timestamp of table - // header row - tableKeyValues[j++] = kv; - i++; - } else if (cmp > 0) { - timeStamp = Math.max(timeStamp, kv.getTimestamp()); - tableKeyValues[j++] = null; - } else { - i++; // shouldn't happen - means unexpected KV in system table header row + builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_TABLE_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.setTable(PTableImpl.toProto(table)); + done.run(builder.build()); + return; + } + } + + // check if the table was previously dropped, but had child views that have not + // yet been cleaned up. + // Note that for old clients connecting to a 4.15 server whose metadata hasn't been + // upgraded, we disallow dropping a base table that has child views, so in that case + // this is a no-op (See PHOENIX-5544) + if (!Bytes.toString(schemaName).equals(QueryConstants.SYSTEM_SCHEMA_NAME)) { + ServerViewUtil.dropChildViews(env, tenantIdBytes, schemaName, tableName, + getSystemTableForChildLinks(clientVersion, env.getConfiguration()).getName()); + } + + byte[] parentTableKey = null; + Set indexes = new HashSet(); + ; + byte[] cPhysicalName = + SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped).getBytes(); + byte[] cParentPhysicalName = null; + if (tableType == PTableType.VIEW) { + byte[][] parentSchemaTableNames = new byte[3][]; + byte[][] parentPhysicalSchemaTableNames = new byte[3][]; + getParentAndPhysicalNames(tableMetadata, parentSchemaTableNames, + parentPhysicalSchemaTableNames); + if (parentPhysicalSchemaTableNames[2] != null) { + if (parentTable == null) { + // This is needed when we connect with a 4.14 client to + // a 4.15.0+ server. + // In that case we need to resolve the parent table on + // the server. + parentTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY, parentPhysicalSchemaTableNames[1], + parentPhysicalSchemaTableNames[2], clientTimeStamp, clientVersion); + if (parentTable == null) { + builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; } - } - // TABLE_TYPE, TABLE_SEQ_NUM and COLUMN_COUNT are required. - if (tableKeyValues[TABLE_TYPE_INDEX] == null || tableKeyValues[TABLE_SEQ_NUM_INDEX] == null - || tableKeyValues[COLUMN_COUNT_INDEX] == null) { - // since we allow SYSTEM.CATALOG to split in certain cases there might be child links or - // other metadata rows that are invalid and should be ignored - Cell cell = tableCellList.get(0); - LOGGER.error("Found invalid metadata rows for rowkey " + - Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); - return null; - } - - Cell tableTypeKv = tableKeyValues[TABLE_TYPE_INDEX]; - PTableType tableType = - PTableType - .fromSerializedValue(tableTypeKv.getValueArray()[tableTypeKv.getValueOffset()]); - builder.setType(tableType); - - Cell tableSeqNumKv = tableKeyValues[TABLE_SEQ_NUM_INDEX]; - long tableSeqNum = - PLong.INSTANCE.getCodec().decodeLong(tableSeqNumKv.getValueArray(), - tableSeqNumKv.getValueOffset(), SortOrder.getDefault()); - builder.setSequenceNumber(tableSeqNum); - - Cell columnCountKv = tableKeyValues[COLUMN_COUNT_INDEX]; - int columnCount = - PInteger.INSTANCE.getCodec().decodeInt(columnCountKv.getValueArray(), - columnCountKv.getValueOffset(), SortOrder.getDefault()); - - Cell pkNameKv = tableKeyValues[PK_NAME_INDEX]; - PName pkName = - pkNameKv != null ? newPName(pkNameKv.getValueArray(), pkNameKv.getValueOffset(), - pkNameKv.getValueLength()) : null; - builder.setPkName(pkName != null ? pkName : oldTable != null ? oldTable.getPKName() : null); - - Cell saltBucketNumKv = tableKeyValues[SALT_BUCKETS_INDEX]; - Integer saltBucketNum = - saltBucketNumKv != null ? (Integer) PInteger.INSTANCE.getCodec().decodeInt( - saltBucketNumKv.getValueArray(), saltBucketNumKv.getValueOffset(), SortOrder.getDefault()) : null; - if (saltBucketNum != null && saltBucketNum.intValue() == 0) { - saltBucketNum = null; // Zero salt buckets means not salted - } - builder.setBucketNum(saltBucketNum != null ? saltBucketNum : oldTable != null ? oldTable.getBucketNum() : null); - - //data table name is used to find the parent table for indexes later - Cell dataTableNameKv = tableKeyValues[DATA_TABLE_NAME_INDEX]; - PName dataTableName = - dataTableNameKv != null ? newPName(dataTableNameKv.getValueArray(), - dataTableNameKv.getValueOffset(), dataTableNameKv.getValueLength()) : null; - - Cell physicalTableNameKv = tableKeyValues[PHYSICAL_TABLE_NAME_INDEX]; - PName physicalTableName = - physicalTableNameKv != null ? newPName(physicalTableNameKv.getValueArray(), - physicalTableNameKv.getValueOffset(), physicalTableNameKv.getValueLength()) : null; - builder.setPhysicalTableName(physicalTableName != null ? physicalTableName : oldTable != null ? oldTable.getPhysicalName(true) : null); - - Cell indexStateKv = tableKeyValues[INDEX_STATE_INDEX]; - PIndexState indexState = - indexStateKv == null ? null : PIndexState.fromSerializedValue(indexStateKv - .getValueArray()[indexStateKv.getValueOffset()]); - builder.setState(indexState != null ? indexState : oldTable != null ? oldTable.getIndexState() : null); - - Cell immutableRowsKv = tableKeyValues[IMMUTABLE_ROWS_INDEX]; - boolean isImmutableRows = immutableRowsKv != null && (Boolean) PBoolean.INSTANCE.toObject( - immutableRowsKv.getValueArray(), immutableRowsKv.getValueOffset(), - immutableRowsKv.getValueLength()); - builder.setImmutableRows(immutableRowsKv != null ? isImmutableRows : - oldTable != null && oldTable.isImmutableRows()); - - Cell defaultFamilyNameKv = tableKeyValues[DEFAULT_COLUMN_FAMILY_INDEX]; - PName defaultFamilyName = defaultFamilyNameKv != null ? newPName(defaultFamilyNameKv.getValueArray(), defaultFamilyNameKv.getValueOffset(), defaultFamilyNameKv.getValueLength()) : null; - builder.setDefaultFamilyName(defaultFamilyName != null ? defaultFamilyName : oldTable != null ? oldTable.getDefaultFamilyName() : null); - - Cell viewStatementKv = tableKeyValues[VIEW_STATEMENT_INDEX]; - String viewStatement = viewStatementKv != null ? (String) PVarchar.INSTANCE.toObject(viewStatementKv.getValueArray(), viewStatementKv.getValueOffset(), - viewStatementKv.getValueLength()) : null; - builder.setViewStatement(viewStatement != null ? viewStatement : oldTable != null ? oldTable.getViewStatement() : null); - - Cell disableWALKv = tableKeyValues[DISABLE_WAL_INDEX]; - boolean disableWAL = disableWALKv == null ? PTable.DEFAULT_DISABLE_WAL : Boolean.TRUE.equals( - PBoolean.INSTANCE.toObject(disableWALKv.getValueArray(), disableWALKv.getValueOffset(), disableWALKv.getValueLength())); - builder.setDisableWAL(disableWALKv != null ? disableWAL : - oldTable != null && oldTable.isWALDisabled()); - - Cell multiTenantKv = tableKeyValues[MULTI_TENANT_INDEX]; - boolean multiTenant = multiTenantKv != null && Boolean.TRUE.equals( - PBoolean.INSTANCE.toObject(multiTenantKv.getValueArray(), - multiTenantKv.getValueOffset(), multiTenantKv.getValueLength())); - builder.setMultiTenant(multiTenantKv != null ? multiTenant : - oldTable != null && oldTable.isMultiTenant()); - - Cell storeNullsKv = tableKeyValues[STORE_NULLS_INDEX]; - boolean storeNulls = storeNullsKv != null && Boolean.TRUE.equals( - PBoolean.INSTANCE.toObject(storeNullsKv.getValueArray(), storeNullsKv.getValueOffset(), - storeNullsKv.getValueLength())); - builder.setStoreNulls(storeNullsKv != null ? storeNulls : - oldTable != null && oldTable.getStoreNulls()); - - Cell transactionalKv = tableKeyValues[TRANSACTIONAL_INDEX]; - Cell transactionProviderKv = tableKeyValues[TRANSACTION_PROVIDER_INDEX]; - TransactionFactory.Provider transactionProvider = null; - if (transactionProviderKv == null) { - if (transactionalKv != null && Boolean.TRUE.equals( - PBoolean.INSTANCE.toObject( - transactionalKv.getValueArray(), - transactionalKv.getValueOffset(), - transactionalKv.getValueLength()))) { - // For backward compat, prior to client setting TRANSACTION_PROVIDER - transactionProvider = TransactionFactory.Provider.NOTAVAILABLE; + if ( + parentSchemaTableNames[2] != null + && Bytes.compareTo(parentSchemaTableNames[2], parentPhysicalSchemaTableNames[2]) + != 0 + ) { + // if view is created on view + byte[] tenantId = parentSchemaTableNames[0] == null + ? ByteUtil.EMPTY_BYTE_ARRAY + : parentSchemaTableNames[0]; + parentTable = doGetTable(tenantId, parentSchemaTableNames[1], + parentSchemaTableNames[2], clientTimeStamp, clientVersion); + if (parentTable == null) { + // it could be a global view + parentTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY, parentSchemaTableNames[1], + parentSchemaTableNames[2], clientTimeStamp, clientVersion); + } } + if (parentTable == null) { + builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + } + parentTableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, + parentPhysicalSchemaTableNames[1], parentPhysicalSchemaTableNames[2]); + cParentPhysicalName = parentTable.getPhysicalName().getBytes(); + for (PTable index : parentTable.getIndexes()) { + indexes.add(TableName.valueOf(index.getPhysicalName().getBytes())); + } } else { - transactionProvider = TransactionFactory.Provider.fromCode( - PTinyint.INSTANCE.getCodec().decodeByte( - transactionProviderKv.getValueArray(), - transactionProviderKv.getValueOffset(), - SortOrder.getDefault())); - } - builder.setTransactionProvider(transactionProviderKv != null || transactionalKv != null - ? transactionProvider : oldTable != null ? oldTable.getTransactionProvider() : null); - - Cell viewTypeKv = tableKeyValues[VIEW_TYPE_INDEX]; - ViewType viewType = viewTypeKv == null ? null : ViewType.fromSerializedValue(viewTypeKv.getValueArray()[viewTypeKv.getValueOffset()]); - builder.setViewType(viewType != null ? viewType : oldTable != null ? oldTable.getViewType() : null); - - PDataType viewIndexIdType = oldTable != null ? oldTable.getviewIndexIdType() : - getViewIndexIdType(tableKeyValues); - builder.setViewIndexIdType(viewIndexIdType); - - Long viewIndexId = getViewIndexId(tableKeyValues, viewIndexIdType); - builder.setViewIndexId(viewIndexId != null ? viewIndexId : oldTable != null ? oldTable.getViewIndexId() : null); - - Cell indexTypeKv = tableKeyValues[INDEX_TYPE_INDEX]; - IndexType indexType = indexTypeKv == null ? null : IndexType.fromSerializedValue(indexTypeKv.getValueArray()[indexTypeKv.getValueOffset()]); - builder.setIndexType(indexType != null ? indexType : oldTable != null ? oldTable.getIndexType() : null); - - Cell baseColumnCountKv = tableKeyValues[BASE_COLUMN_COUNT_INDEX]; - int baseColumnCount = baseColumnCountKv == null ? 0 : PInteger.INSTANCE.getCodec().decodeInt(baseColumnCountKv.getValueArray(), - baseColumnCountKv.getValueOffset(), SortOrder.getDefault()); - builder.setBaseColumnCount(baseColumnCountKv != null ? baseColumnCount : oldTable != null ? oldTable.getBaseColumnCount() : 0); - - Cell rowKeyOrderOptimizableKv = tableKeyValues[ROW_KEY_ORDER_OPTIMIZABLE_INDEX]; - boolean rowKeyOrderOptimizable = rowKeyOrderOptimizableKv != null && Boolean.TRUE.equals( - PBoolean.INSTANCE.toObject(rowKeyOrderOptimizableKv.getValueArray(), - rowKeyOrderOptimizableKv.getValueOffset(), - rowKeyOrderOptimizableKv.getValueLength())); - builder.setRowKeyOrderOptimizable(rowKeyOrderOptimizableKv != null ? rowKeyOrderOptimizable : - oldTable != null && oldTable.rowKeyOrderOptimizable()); - - Cell updateCacheFrequencyKv = tableKeyValues[UPDATE_CACHE_FREQUENCY_INDEX]; - long updateCacheFrequency = updateCacheFrequencyKv == null ? 0 : - PLong.INSTANCE.getCodec().decodeLong(updateCacheFrequencyKv.getValueArray(), - updateCacheFrequencyKv.getValueOffset(), SortOrder.getDefault()); - builder.setUpdateCacheFrequency(updateCacheFrequencyKv != null ? updateCacheFrequency : oldTable != null ? oldTable.getUpdateCacheFrequency() : 0); - - // Check the cell tag to see whether the view has modified this property - final byte[] tagUpdateCacheFreq = (updateCacheFrequencyKv == null) ? - HConstants.EMPTY_BYTE_ARRAY : - TagUtil.concatTags(HConstants.EMPTY_BYTE_ARRAY, updateCacheFrequencyKv); - boolean viewModifiedUpdateCacheFrequency = (PTableType.VIEW.equals(tableType)) && - Bytes.contains(tagUpdateCacheFreq, MetaDataEndpointImplConstants.VIEW_MODIFIED_PROPERTY_BYTES); - builder.setViewModifiedUpdateCacheFrequency(!Bytes.equals(tagUpdateCacheFreq, - HConstants.EMPTY_BYTE_ARRAY) ? viewModifiedUpdateCacheFrequency : - oldTable != null && oldTable.hasViewModifiedUpdateCacheFrequency()); - - Cell indexDisableTimestampKv = tableKeyValues[INDEX_DISABLE_TIMESTAMP]; - long indexDisableTimestamp = indexDisableTimestampKv == null ? 0L : PLong.INSTANCE.getCodec().decodeLong(indexDisableTimestampKv.getValueArray(), - indexDisableTimestampKv.getValueOffset(), SortOrder.getDefault()); - builder.setIndexDisableTimestamp(indexDisableTimestampKv != null ? - indexDisableTimestamp : oldTable != null ? oldTable.getIndexDisableTimestamp() : 0L); - - Cell isNamespaceMappedKv = tableKeyValues[IS_NAMESPACE_MAPPED_INDEX]; - boolean isNamespaceMapped = isNamespaceMappedKv != null && Boolean.TRUE.equals( - PBoolean.INSTANCE.toObject(isNamespaceMappedKv.getValueArray(), - isNamespaceMappedKv.getValueOffset(), isNamespaceMappedKv.getValueLength())); - builder.setNamespaceMapped(isNamespaceMappedKv != null ? isNamespaceMapped : - oldTable != null && oldTable.isNamespaceMapped()); - - Cell autoPartitionSeqKv = tableKeyValues[AUTO_PARTITION_SEQ_INDEX]; - String autoPartitionSeq = autoPartitionSeqKv != null ? (String) PVarchar.INSTANCE.toObject(autoPartitionSeqKv.getValueArray(), autoPartitionSeqKv.getValueOffset(), - autoPartitionSeqKv.getValueLength()) : null; - builder.setAutoPartitionSeqName(autoPartitionSeq != null - ? autoPartitionSeq : oldTable != null ? oldTable.getAutoPartitionSeqName() : null); - - Cell isAppendOnlySchemaKv = tableKeyValues[APPEND_ONLY_SCHEMA_INDEX]; - boolean isAppendOnlySchema = isAppendOnlySchemaKv != null && Boolean.TRUE.equals( - PBoolean.INSTANCE.toObject(isAppendOnlySchemaKv.getValueArray(), - isAppendOnlySchemaKv.getValueOffset(), isAppendOnlySchemaKv.getValueLength())); - builder.setAppendOnlySchema(isAppendOnlySchemaKv != null ? isAppendOnlySchema : - oldTable != null && oldTable.isAppendOnlySchema()); - - Cell storageSchemeKv = tableKeyValues[STORAGE_SCHEME_INDEX]; - //TODO: change this once we start having other values for storage schemes - ImmutableStorageScheme storageScheme = storageSchemeKv == null ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN : ImmutableStorageScheme - .fromSerializedValue((byte) PTinyint.INSTANCE.toObject(storageSchemeKv.getValueArray(), - storageSchemeKv.getValueOffset(), storageSchemeKv.getValueLength())); - builder.setImmutableStorageScheme(storageSchemeKv != null ? storageScheme : - oldTable != null ? oldTable.getImmutableStorageScheme() : ImmutableStorageScheme.ONE_CELL_PER_COLUMN); - - Cell encodingSchemeKv = tableKeyValues[QUALIFIER_ENCODING_SCHEME_INDEX]; - QualifierEncodingScheme encodingScheme = encodingSchemeKv == null ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : QualifierEncodingScheme - .fromSerializedValue((byte) PTinyint.INSTANCE.toObject(encodingSchemeKv.getValueArray(), - encodingSchemeKv.getValueOffset(), encodingSchemeKv.getValueLength())); - builder.setQualifierEncodingScheme(encodingSchemeKv != null ? encodingScheme : - oldTable != null ? oldTable.getEncodingScheme() : QualifierEncodingScheme.NON_ENCODED_QUALIFIERS); - - Cell useStatsForParallelizationKv = tableKeyValues[USE_STATS_FOR_PARALLELIZATION_INDEX]; - Boolean useStatsForParallelization = useStatsForParallelizationKv == null ? null : - Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(useStatsForParallelizationKv.getValueArray(), useStatsForParallelizationKv.getValueOffset(), useStatsForParallelizationKv.getValueLength())); - builder.setUseStatsForParallelization(useStatsForParallelization != null ? - useStatsForParallelization : oldTable != null ? oldTable.useStatsForParallelization() : null); - Cell lastDDLTimestampKv = tableKeyValues[LAST_DDL_TIMESTAMP_INDEX]; - Long lastDDLTimestamp = lastDDLTimestampKv == null ? - null : PLong.INSTANCE.getCodec().decodeLong(lastDDLTimestampKv.getValueArray(), - lastDDLTimestampKv.getValueOffset(), SortOrder.getDefault()); - builder.setLastDDLTimestamp(lastDDLTimestampKv != null ? lastDDLTimestamp : - oldTable != null ? oldTable.getLastDDLTimestamp() : null); - - Cell changeDetectionEnabledKv = tableKeyValues[CHANGE_DETECTION_ENABLED_INDEX]; - boolean isChangeDetectionEnabled = changeDetectionEnabledKv != null - && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(changeDetectionEnabledKv.getValueArray(), - changeDetectionEnabledKv.getValueOffset(), - changeDetectionEnabledKv.getValueLength())); - builder.setIsChangeDetectionEnabled(changeDetectionEnabledKv != null ? - isChangeDetectionEnabled : oldTable != null && oldTable.isChangeDetectionEnabled()); - - Cell schemaVersionKv = tableKeyValues[SCHEMA_VERSION_INDEX]; - String schemaVersion = schemaVersionKv != null ? (String) PVarchar.INSTANCE.toObject( - schemaVersionKv.getValueArray(), schemaVersionKv.getValueOffset(), schemaVersionKv.getValueLength()) - : null; - builder.setSchemaVersion(schemaVersion != null ? - schemaVersion : oldTable != null ? oldTable.getSchemaVersion() : null); - - Cell externalSchemaIdKv = tableKeyValues[EXTERNAL_SCHEMA_ID_INDEX]; - String externalSchemaId = externalSchemaIdKv != null ? - (String) PVarchar.INSTANCE.toObject(externalSchemaIdKv.getValueArray(), - externalSchemaIdKv.getValueOffset(), externalSchemaIdKv.getValueLength()) - : null; - builder.setExternalSchemaId(externalSchemaId != null ? externalSchemaId : - oldTable != null ? oldTable.getExternalSchemaId() : null); - - Cell streamingTopicNameKv = tableKeyValues[STREAMING_TOPIC_NAME_INDEX]; - String streamingTopicName = streamingTopicNameKv != null ? - (String) PVarchar.INSTANCE.toObject(streamingTopicNameKv.getValueArray(), - streamingTopicNameKv.getValueOffset(), streamingTopicNameKv.getValueLength()) - : null; - builder.setStreamingTopicName(streamingTopicName != null ? streamingTopicName : - oldTable != null ? oldTable.getStreamingTopicName() : null); - - Cell includeSpecKv = tableKeyValues[CDC_INCLUDE_INDEX]; - String includeSpec = includeSpecKv != null ? - (String) PVarchar.INSTANCE.toObject(includeSpecKv.getValueArray(), - includeSpecKv.getValueOffset(), includeSpecKv.getValueLength()) - : null; - builder.setCDCIncludeScopes(includeSpec != null ? - CDCUtil.makeChangeScopeEnumsFromString(includeSpec) : - oldTable != null ? oldTable.getCDCIncludeScopes() : null); - - Cell indexWhereKv = tableKeyValues[INDEX_WHERE_INDEX]; - String indexWhere = indexWhereKv != null - ? (String) PVarchar.INSTANCE.toObject(indexWhereKv.getValueArray(), - indexWhereKv.getValueOffset(), indexWhereKv.getValueLength()) - : null; - builder.setIndexWhere(indexWhere != null ? indexWhere - : oldTable != null ? oldTable.getIndexWhere() : null); - - Cell maxLookbackAgeKv = tableKeyValues[MAX_LOOKBACK_AGE_INDEX]; - Long maxLookbackAge = maxLookbackAgeKv == null ? null : - PLong.INSTANCE.getCodec().decodeLong(maxLookbackAgeKv.getValueArray(), - maxLookbackAgeKv.getValueOffset(), SortOrder.getDefault()); - if (tableType == PTableType.VIEW) { - byte[] viewKey = SchemaUtil.getTableKey(tenantId == null ? null : tenantId.getBytes(), - schemaName == null ? null : schemaName.getBytes(), tableNameBytes); - maxLookbackAge = scanMaxLookbackAgeFromParent(viewKey, clientTimeStamp); - } - Cell ttlKv = tableKeyValues[TTL_INDEX]; - int ttl = TTL_NOT_DEFINED; - if (ttlKv != null) { - String ttlStr = (String) PVarchar.INSTANCE.toObject( - ttlKv.getValueArray(), - ttlKv.getValueOffset(), - ttlKv.getValueLength()); - ttl = Integer.parseInt(ttlStr); - } - ttl = ttlKv != null ? ttl : oldTable != null - ? oldTable.getTTL() : TTL_NOT_DEFINED; - if (tableType == VIEW && viewType != MAPPED && ttl == TTL_NOT_DEFINED) { - //Scan SysCat to get TTL from Parent View/Table - byte[] viewKey = SchemaUtil.getTableKey(tenantId == null ? null : tenantId.getBytes(), - schemaName == null ? null : schemaName.getBytes(), tableNameBytes); - ttl = getTTLFromHierarchy(viewKey, clientTimeStamp, false); - - // TODO: Need to Update Cache for Alter Commands, can use PHOENIX-6883. - } - - Cell rowKeyMatcherKv = tableKeyValues[ROW_KEY_MATCHER_INDEX]; - byte[] rowKeyMatcher = rowKeyMatcherKv != null - ? CellUtil.cloneValue(rowKeyMatcherKv) - : HConstants.EMPTY_BYTE_ARRAY; - builder.setRowKeyMatcher(rowKeyMatcher != null ? rowKeyMatcher - : oldTable != null ? oldTable.getRowKeyMatcher() : HConstants.EMPTY_BYTE_ARRAY); - - - // Check the cell tag to see whether the view has modified this property - final byte[] tagUseStatsForParallelization = (useStatsForParallelizationKv == null) ? - HConstants.EMPTY_BYTE_ARRAY : - TagUtil.concatTags(HConstants.EMPTY_BYTE_ARRAY, useStatsForParallelizationKv); - boolean viewModifiedUseStatsForParallelization = (PTableType.VIEW.equals(tableType)) && - Bytes.contains(tagUseStatsForParallelization, MetaDataEndpointImplConstants.VIEW_MODIFIED_PROPERTY_BYTES); - builder.setViewModifiedUseStatsForParallelization(viewModifiedUseStatsForParallelization || - (oldTable != null && oldTable.hasViewModifiedUseStatsForParallelization())); - - boolean setPhysicalName = false; - List columns = Lists.newArrayListWithExpectedSize(columnCount); - List indexes = Lists.newArrayList(); - List physicalTables = Lists.newArrayList(); - PName parentTableName = tableType == INDEX || tableType == CDC ? dataTableName : null; - PName parentSchemaName = tableType == INDEX || tableType == CDC ? schemaName : null; - PName parentLogicalName = null; - EncodedCQCounter cqCounter = null; - if (oldTable != null) { - cqCounter = oldTable.getEncodedCQCounter(); - } else { - cqCounter = (!EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme) || tableType == PTableType.VIEW) ? - PTable.EncodedCQCounter.NULL_COUNTER : - new EncodedCQCounter(); - } + // Mapped View + cParentPhysicalName = SchemaUtil + .getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped).getBytes(); - if (timeStamp == HConstants.LATEST_TIMESTAMP) { - timeStamp = lastDDLTimestamp != null ? lastDDLTimestamp : clientTimeStamp; } - builder.setTimeStamp(timeStamp); - + parentSchemaName = parentPhysicalSchemaTableNames[1]; + parentTableName = parentPhysicalSchemaTableNames[2]; - PTable transformingNewTable = null; - boolean isRegularView = (tableType == PTableType.VIEW && viewType != MAPPED); - boolean isThisAViewIndex = false; - for (List columnCellList : allColumnCellList) { - - Cell colKv = columnCellList.get(LINK_TYPE_INDEX); - int colKeyLength = colKv.getRowLength(); + } else if (tableType == PTableType.INDEX) { + parentSchemaName = schemaName; + /* + * For an index we lock the parent table's row which could be a physical table or a view. If + * the parent table is a physical table, then the tenantIdBytes is empty because we allow + * creating an index with a tenant connection only if the parent table is a view. + */ + parentTableName = MetaDataUtil.getParentTableName(tableMetadata); + parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, parentSchemaName, parentTableName); + if (parentTable == null) { + // This is needed when we connect with a 4.14 client to a 4.15.0+ server. + // In that case we need to resolve the parent table on the server. + parentTable = doGetTable(tenantIdBytes, parentSchemaName, parentTableName, + clientTimeStamp, null, request.getClientVersion()); + } + if (IndexType.LOCAL == indexType) { + cPhysicalName = parentTable.getPhysicalName().getBytes(); + cParentPhysicalName = parentTable.getPhysicalName().getBytes(); + } else if (parentTable.getType() == PTableType.VIEW) { + // The view index physical table name is constructed from logical name of base table. + // For example, _IDX_SC.TBL1 is the view index name and SC.TBL1 is the logical name of the + // base table. + String namepaceMappedParentLogicalName = MetaDataUtil + .getNamespaceMappedName(parentTable.getBaseTableLogicalName(), isNamespaceMapped); + cPhysicalName = MetaDataUtil.getViewIndexPhysicalName( + namepaceMappedParentLogicalName.getBytes(StandardCharsets.UTF_8)); + cParentPhysicalName = parentTable.getPhysicalName().getBytes(); + } else { + cParentPhysicalName = SchemaUtil + .getPhysicalHBaseTableName(parentSchemaName, parentTableName, isNamespaceMapped) + .getBytes(); + } + } + + getCoprocessorHost().preCreateTable(Bytes.toString(tenantIdBytes), fullTableName, + (tableType == PTableType.VIEW) ? null : TableName.valueOf(cPhysicalName), + cParentPhysicalName == null ? null : TableName.valueOf(cParentPhysicalName), tableType, + /* TODO: During inital create we may not need the family map */ + Collections. emptySet(), indexes); + + Region region = env.getRegion(); + List locks = Lists.newArrayList(); + // Place a lock using key for the table to be created + try { + acquireLock(region, tableKey, locks, false); + + // If the table key resides outside the region, return without doing anything + MetaDataMutationResult result = checkTableKeyInRegion(tableKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + + if (parentTableName != null) { + // From 4.15 onwards we only need to lock the parent table : + // 1) when creating an index on a table or a view + // 2) if allowSplittableSystemCatalogRollback is true we try to lock the parent table to + // prevent it + // from changing concurrently while a view is being created + if (tableType == PTableType.INDEX || allowSplittableSystemCatalogRollback) { + result = checkTableKeyInRegion(parentTableKey, region); + if (result != null) { + LOGGER.error("Unable to lock parentTableKey " + Bytes.toStringBinary(parentTableKey)); + // if allowSplittableSystemCatalogRollback is true and we can't lock the + // parentTableKey (because + // SYSTEM.CATALOG already split) return UNALLOWED_TABLE_MUTATION so that the client + // knows the create statement failed + MetaDataProtos.MutationCode code = tableType == PTableType.INDEX + ? MetaDataProtos.MutationCode.TABLE_NOT_IN_REGION + : MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION; + builder.setReturnCode(code); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + acquireLock(region, parentTableKey, locks, false); + } + // make sure we haven't gone over our threshold for indexes on this table. + if (execeededIndexQuota(tableType, parentTable)) { + builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + } + + // Add cell for ROW_KEY_ORDER_OPTIMIZABLE = true, as we know that new tables + // conform the correct row key. The exception is for a VIEW, which the client + // sends over depending on its base physical table. + if (tableType != PTableType.VIEW) { + UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, tableKey, clientTimeStamp); + } + // If the parent table of the view has the auto partition sequence name attribute, modify + // the + // tableMetadata and set the view statement and partition column correctly + if (parentTable != null && parentTable.getAutoPartitionSeqName() != null) { + long autoPartitionNum = 1; + try (PhoenixConnection connection = + getServerConnectionForMetaData(env.getConfiguration()).unwrap(PhoenixConnection.class); + Statement stmt = connection.createStatement()) { + String seqName = parentTable.getAutoPartitionSeqName(); + // Not going through the standard route of using statement.execute() as that code path + // is blocked if the metadata hasn't been been upgraded to the new minor release. + String seqNextValueSql = String.format("SELECT NEXT VALUE FOR %s", seqName); + PhoenixStatement ps = stmt.unwrap(PhoenixStatement.class); + QueryPlan plan = ps.compileQuery(seqNextValueSql); + ResultIterator resultIterator = plan.iterator(); + PhoenixResultSet rs = + ps.newResultSet(resultIterator, plan.getProjector(), plan.getContext()); + rs.next(); + autoPartitionNum = rs.getLong(1); + } catch (SequenceNotFoundException e) { + builder.setReturnCode(MetaDataProtos.MutationCode.AUTO_PARTITION_SEQUENCE_NOT_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + PColumn autoPartitionCol = + parentTable.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parentTable)); + if (!PLong.INSTANCE.isCoercibleTo(autoPartitionCol.getDataType(), autoPartitionNum)) { + builder.setReturnCode(MetaDataProtos.MutationCode.CANNOT_COERCE_AUTO_PARTITION_ID); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + builder.setAutoPartitionNum(autoPartitionNum); + + // set the VIEW STATEMENT column of the header row + Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata); + NavigableMap> familyCellMap = tableHeaderPut.getFamilyCellMap(); + List cells = familyCellMap.get(TABLE_FAMILY_BYTES); + Cell cell = cells.get(0); + String autoPartitionWhere = QueryUtil.getViewPartitionClause( + MetaDataUtil.getAutoPartitionColumnName(parentTable), autoPartitionNum); + String hbaseVersion = VersionInfo.getVersion(); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + KeyValueBuilder kvBuilder = KeyValueBuilder.get(hbaseVersion); + MetaDataUtil.getMutationValue(tableHeaderPut, VIEW_STATEMENT_BYTES, kvBuilder, ptr); + byte[] value = ptr.copyBytesIfNecessary(); + byte[] viewStatement = null; + // if we have an existing where clause add the auto partition where clause to it + if (!Bytes.equals(value, QueryConstants.EMPTY_COLUMN_VALUE_BYTES)) { + viewStatement = + Bytes.add(value, Bytes.toBytes(" AND "), Bytes.toBytes(autoPartitionWhere)); + } else { + viewStatement = + Bytes.toBytes(QueryUtil.getViewStatement(parentTable.getSchemaName().getString(), + parentTable.getTableName().getString(), autoPartitionWhere)); + } + Cell viewStatementCell = PhoenixKeyValueUtil.newKeyValue(cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength(), VIEW_STATEMENT_BYTES, 0, VIEW_STATEMENT_BYTES.length, + cell.getTimestamp(), viewStatement, 0, viewStatement.length, cell.getType()); + cells.add(viewStatementCell); + + // set the IS_VIEW_REFERENCED column of the auto partition column row + Put autoPartitionPut = + MetaDataUtil.getPutOnlyAutoPartitionColumn(parentTable, tableMetadata); + familyCellMap = autoPartitionPut.getFamilyCellMap(); + cells = familyCellMap.get(TABLE_FAMILY_BYTES); + cell = cells.get(0); + PDataType dataType = autoPartitionCol.getDataType(); + Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE); + byte[] bytes = new byte[dataType.getByteSize() + 1]; + dataType.toBytes(val, bytes, 0); + Cell viewConstantCell = PhoenixKeyValueUtil.newKeyValue(cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength(), VIEW_CONSTANT_BYTES, 0, VIEW_CONSTANT_BYTES.length, + cell.getTimestamp(), bytes, 0, bytes.length, cell.getType()); + cells.add(viewConstantCell); + } + Long indexId = null; + if (request.hasAllocateIndexId() && request.getAllocateIndexId()) { + String tenantIdStr = tenantIdBytes.length == 0 ? null : Bytes.toString(tenantIdBytes); + try (PhoenixConnection connection = getServerConnectionForMetaData(env.getConfiguration()) + .unwrap(PhoenixConnection.class)) { + PName physicalName = parentTable.getPhysicalName(); + long seqValue = getViewIndexSequenceValue(connection, tenantIdStr, parentTable); + Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata); + NavigableMap> familyCellMap = tableHeaderPut.getFamilyCellMap(); + List cells = familyCellMap.get(TABLE_FAMILY_BYTES); + Cell cell = cells.get(0); + PDataType dataType = MetaDataUtil.getIndexDataType(tableMetadata, + GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable()); + Object val = dataType.toObject(seqValue, PLong.INSTANCE); + byte[] bytes = new byte[dataType.getByteSize() + 1]; + dataType.toBytes(val, bytes, 0); + Cell indexIdCell = + PhoenixKeyValueUtil.newKeyValue(cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), + cell.getFamilyLength(), VIEW_INDEX_ID_BYTES, 0, VIEW_INDEX_ID_BYTES.length, + cell.getTimestamp(), bytes, 0, bytes.length, cell.getType()); + cells.add(indexIdCell); + indexId = seqValue; + } + } + + // The mutations to create a table are written in the following order: + // 1. Write the child link as if the next two steps fail we + // ignore missing children while processing a parent + // (this is already done at this point, as a separate client-server RPC + // to the ChildLinkMetaDataEndpoint coprocessor) + // 2. Update the encoded column qualifier for the parent table if its on a + // different region server (for tables that use column qualifier encoding) + // if the next step fails we end up wasting a few col qualifiers + // 3. Finally write the mutations to create the table - PName colName = newPName(colKv.getRowArray(), colKv.getRowOffset() + offset, colKeyLength - offset); - if (colName == null) { - continue; + if (tableType == PTableType.VIEW) { + // If we are connecting with an old client to a server that has new metadata + // i.e. it was previously connected to by a 4.15 client, then the client will + // also send the parent->child link metadata to SYSTEM.CATALOG rather than using + // the new ChildLinkMetaDataEndpoint coprocessor. In this case, we must continue + // doing the server-server RPC to send these mutations to SYSTEM.CHILD_LINK. + if ( + clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG + && getSystemTableForChildLinks(clientVersion, env.getConfiguration()) + .equals(SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, + env.getConfiguration())) + ) { + List childLinkMutations = + MetaDataUtil.removeChildLinkMutations(tableMetadata); + MetaDataResponse response = + processRemoteRegionMutations(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, + childLinkMutations, UNABLE_TO_CREATE_CHILD_LINK); + if (response != null) { + done.run(response); + return; } - int colKeyOffset = offset + colName.getBytes().length + 1; - PName famName = newPName(colKv.getRowArray(), colKv.getRowOffset() + colKeyOffset, colKeyLength - colKeyOffset); - - if (isQualifierCounterKV(colKv)) { - if (famName != null) { - Integer value = PInteger.INSTANCE.getCodec().decodeInt(colKv.getValueArray(), colKv.getValueOffset(), SortOrder.ASC); - cqCounter.setValue(famName.getString(), value); - } - } else if (Bytes.compareTo(LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length, colKv.getQualifierArray(), colKv.getQualifierOffset(), colKv.getQualifierLength()) == 0) { - LinkType linkType = LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]); - if (linkType == LinkType.INDEX_TABLE) { - addIndexToTable(tenantId, schemaName, famName, tableName, clientTimeStamp, indexes, clientVersion); - } else if (linkType == PHYSICAL_TABLE) { - // famName contains the logical name of the parent table. We need to get the actual physical name of the table - PTable parentTable = null; - // call getTable() on famName only if it does not start with _IDX_. - // Table name starting with _IDX_ always must refer to HBase table that is - // shared by all view indexes on the given table/view hierarchy. - // _IDX_ is HBase table that does not have corresponding PTable representation - // in Phoenix, hence there is no point of calling getTable(). - if (!famName.getString().startsWith(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX) - && indexType != IndexType.LOCAL) { - try { - parentTable = doGetTable(null, - SchemaUtil.getSchemaNameFromFullName(famName.getBytes()) - .getBytes(StandardCharsets.UTF_8), - SchemaUtil.getTableNameFromFullName(famName.getBytes()) - .getBytes(StandardCharsets.UTF_8), clientTimeStamp, - clientVersion); - } catch (SQLException e) { - if (e.getErrorCode() - != SQLExceptionCode.GET_TABLE_ERROR.getErrorCode()) { - LOGGER.error( - "Error while retrieving getTable for PHYSICAL_TABLE link to {}", - famName, e); - throw e; - } - } - if (isSystemCatalogSplittable - && (parentTable == null || isTableDeleted(parentTable))) { - // parentTable is neither in the cache nor in the local region. Since - // famName is only logical name, we need to find the physical table. - // Hence, it is recommended to scan SYSTEM.CATALOG table again using - // separate CQSI connection as SYSTEM.CATALOG is splittable so the - // PTable with famName might be available on different region. - try (PhoenixConnection connection = getServerConnectionForMetaData( - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - parentTable = connection.getTableNoCache(famName.getString()); - } catch (TableNotFoundException e) { - // It is ok to swallow this exception since this could be a view index and _IDX_ table is not there. - } - } - } - - if (parentTable == null || isTableDeleted(parentTable)) { - if (indexType == IndexType.LOCAL) { - PName tablePhysicalName = getPhysicalTableName( - env.getRegion(),null, - SchemaUtil.getSchemaNameFromFullName( - famName.getBytes()).getBytes(StandardCharsets.UTF_8), - SchemaUtil.getTableNameFromFullName( - famName.getBytes()).getBytes(StandardCharsets.UTF_8), - clientTimeStamp); - if (tablePhysicalName == null) { - physicalTables.add(famName); - setPhysicalName = true; - } else { - physicalTables.add(SchemaUtil.getPhysicalHBaseTableName(schemaName, tablePhysicalName, isNamespaceMapped)); - setPhysicalName = true; - } - } else { - physicalTables.add(famName); - setPhysicalName = true; - } - // If this is a view index, then one of the link is IDX_VW -> _IDX_ PhysicalTable link. Since famName is _IDX_ and we can't get this table hence it is null, we need to use actual view name - parentLogicalName = (tableType == INDEX ? SchemaUtil.getTableName(parentSchemaName, parentTableName) : famName); - } else { - String parentPhysicalTableName = parentTable.getPhysicalName().getString(); - physicalTables.add(PNameFactory.newName(parentPhysicalTableName)); - setPhysicalName = true; - parentLogicalName = SchemaUtil.getTableName(parentTable.getSchemaName(), parentTable.getTableName()); - } - } else if (linkType == PARENT_TABLE) { - parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(famName.getBytes())); - parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(famName.getBytes())); - } else if (linkType == LinkType.EXCLUDED_COLUMN) { - // add the excludedColumn - addExcludedColumnToTable(columns, colName, famName, colKv.getTimestamp()); - } else if (linkType == LinkType.TRANSFORMING_NEW_TABLE) { - transformingNewTable = doGetTable((tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes()) - , SchemaUtil.getSchemaNameFromFullName(famName.getBytes()).getBytes(), SchemaUtil.getTableNameFromFullName(famName.getBytes()).getBytes(), clientTimeStamp, null, clientVersion); - if (transformingNewTable == null) { - // It could be global - transformingNewTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY - , SchemaUtil.getSchemaNameFromFullName(famName.getBytes()).getBytes(), SchemaUtil.getTableNameFromFullName(famName.getBytes()).getBytes(), clientTimeStamp, null, clientVersion); - if (transformingNewTable == null) { - ClientUtil.throwIOException("Transforming new table not found", new TableNotFoundException(schemaName.getString(), famName.getString())); - } - } - } else if (linkType == VIEW_INDEX_PARENT_TABLE) { - byte[] viewKey = getTableKey(tenantId == null ? null : tenantId.getBytes(), - parentSchemaName == null ? null : parentSchemaName.getBytes(), - parentTableName.getBytes()); - //parentViewType should not be Mapped - maxLookbackAge = scanMaxLookbackAgeFromParent(viewKey, clientTimeStamp); - ttl = getTTLFromHierarchy(viewKey, clientTimeStamp, true); - isThisAViewIndex = true; - } - } else { - long columnTimestamp = - columnCellList.get(0).getTimestamp() != HConstants.LATEST_TIMESTAMP ? - columnCellList.get(0).getTimestamp() : timeStamp; - boolean isSalted = saltBucketNum != null - || (oldTable != null && - oldTable.getBucketNum() != null && oldTable.getBucketNum() > 0); - addColumnToTable(columnCellList, colName, famName, colKeyValues, columns, - isSalted, baseColumnCount, isRegularView, columnTimestamp); + } + // Pass in the parent's PTable so that we only tag cells corresponding to the + // view's property in case they are different from the parent + ViewUtil.addTagsToPutsForViewAlteredProperties(tableMetadata, parentTable, + (ExtendedCellBuilder) env.getCellBuilder()); + } + // set the last DDL timestamp to the current server time since we're creating the + // table/index/views. + tableMetadata.add(MetaDataUtil.getLastDDLTimestampUpdate(tableKey, clientTimeStamp, + EnvironmentEdgeManager.currentTimeMillis())); + if (tableType == INDEX) { + // Invalidate the cache on each regionserver for parent table/view. + List requests = new ArrayList<>(); + requests.add(new InvalidateServerMetadataCacheRequest(tenantIdBytes, parentSchemaName, + parentTableName)); + invalidateServerMetadataCache(requests); + long currentTimestamp = EnvironmentEdgeManager.currentTimeMillis(); + // If table type is index, then update the last ddl timestamp of the parent + // table or immediate parent view. + tableMetadata.add(MetaDataUtil.getLastDDLTimestampUpdate(parentTableKey, currentTimestamp, + currentTimestamp)); + } + + // and if we're doing change detection on this table or view, notify the + // external schema registry and get its schema id + if (isChangeDetectionEnabled) { + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + try { + exportSchema(tableMetadata, tableKey, clientTimeStamp, clientVersion, null); + metricsSource.incrementCreateExportCount(); + metricsSource + .updateCreateExportTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); + } catch (IOException ie) { + metricsSource.incrementCreateExportFailureCount(); + metricsSource.updateCreateExportFailureTime( + EnvironmentEdgeManager.currentTimeMillis() - startTime); + // If we fail to write to the schema registry, fail the entire + // CREATE TABLE or VIEW operation so we stay consistent + LOGGER.error("Error writing schema to external schema registry", ie); + builder.setReturnCode(MetaDataProtos.MutationCode.ERROR_WRITING_TO_SCHEMA_REGISTRY); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + } + + // When we drop a view we first drop the view metadata and then drop the parent->child + // linking row + List localMutations = Lists.newArrayListWithExpectedSize(tableMetadata.size()); + List remoteMutations = Lists.newArrayListWithExpectedSize(2); + // check to see if there are any mutations that should not be applied to this region + separateLocalAndRemoteMutations(region, tableMetadata, localMutations, remoteMutations); + if (!remoteMutations.isEmpty()) { + // there should only be remote mutations if we are creating a view that uses + // encoded column qualifiers (the remote mutations are to update the encoded + // column qualifier counter on the parent table) + if ( + parentTable != null && tableType == PTableType.VIEW + && parentTable.getEncodingScheme() != QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + ) { + // TODO: Avoid doing server-server RPC when we have held row locks + MetaDataResponse response = processRemoteRegionMutations(SYSTEM_CATALOG_NAME_BYTES, + remoteMutations, MetaDataProtos.MutationCode.UNABLE_TO_UPDATE_PARENT_TABLE); + clearRemoteTableFromCache(clientTimeStamp, + parentTable.getSchemaName() != null + ? parentTable.getSchemaName().getBytes() + : ByteUtil.EMPTY_BYTE_ARRAY, + parentTable.getTableName().getBytes()); + if (response != null) { + done.run(response); + return; } + } else { + String msg = "Found unexpected mutations while creating " + fullTableName; + LOGGER.error(msg); + for (Mutation m : remoteMutations) { + LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow())); + LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap()); + } + throw new IllegalStateException(msg); + } + } + + // TODO: Switch this to HRegion#batchMutate when we want to support indexes on the + // system table. Basically, we get all the locks that we don't already hold for all the + // tableMetadata rows. This ensures we don't have deadlock situations (ensuring + // primary and then index table locks are held, in that order). For now, we just don't + // support + // indexing on the system table. This is an issue because of the way we manage batch + // mutation + // in the Indexer. + mutateRowsWithLocks(this.accessCheckEnabled, region, localMutations, + Collections. emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); + + // Invalidate the cache - the next getTable call will add it + // TODO: consider loading the table that was just created here, patching up the parent + // table, and updating the cache + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + if (parentTableKey != null) { + metaDataCache.invalidate(new ImmutableBytesPtr(parentTableKey)); + } + metaDataCache.invalidate(cacheKey); + // Get timeStamp from mutations - the above method sets it if it's unset + long currentTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); + builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); + if (indexId != null) { + builder.setViewIndexId(indexId); + builder.setViewIndexIdType(PLong.INSTANCE.getSqlType()); + } + builder.setMutationTime(currentTimeStamp); + // send the newly built table back because we generated the DDL timestamp server + // side and the client doesn't have it. + if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { + // if a client uses a connection with currentSCN=t to create the table, + // the table is created with timestamp 't' but the timestamp range in the scan + // used by buildTable does not include 't' due to how SCN is implemented. + clientTimeStamp += 1; } - if (tableType == INDEX && ! isThisAViewIndex) { - byte[] tableKey = SchemaUtil.getTableKey(tenantId == null ? null : tenantId.getBytes(), - parentSchemaName == null ? null : parentSchemaName.getBytes(), parentTableName.getBytes()); - maxLookbackAge = scanMaxLookbackAgeFromParent(tableKey, clientTimeStamp); - } - builder.setMaxLookbackAge(maxLookbackAge != null ? maxLookbackAge : - (oldTable != null ? oldTable.getMaxLookbackAge() : null)); - - if(tableType == INDEX && !isThisAViewIndex && ttl == TTL_NOT_DEFINED) { - //If this is an index on Table get TTL from Table - byte[] tableKey = getTableKey(tenantId == null ? null : tenantId.getBytes(), - parentSchemaName == null ? null : parentSchemaName.getBytes(), - parentTableName.getBytes()); - ttl = getTTLForTable(tableKey, clientTimeStamp); + PTable newTable = buildTable(tableKey, cacheKey, region, clientTimeStamp, clientVersion); + if (newTable != null) { + builder.setTable(PTableImpl.toProto(newTable)); } - builder.setTTL(ttl); - builder.setEncodedCQCounter(cqCounter); - builder.setIndexes(indexes != null ? indexes : oldTable != null - ? oldTable.getIndexes() : Collections.emptyList()); + done.run(builder.build()); - if (physicalTables == null || physicalTables.size() == 0) { - builder.setPhysicalNames(oldTable != null ? oldTable.getPhysicalNames() - : ImmutableList.of()); - } else { - builder.setPhysicalNames(ImmutableList.copyOf(physicalTables)); - } - if (!setPhysicalName && oldTable != null) { - builder.setPhysicalTableName(oldTable.getPhysicalName(true)); - } - builder.setTransformingNewTable(transformingNewTable); - - builder.setExcludedColumns(ImmutableList.of()); - builder.setBaseTableLogicalName(parentLogicalName != null ? - parentLogicalName : oldTable != null ? oldTable.getBaseTableLogicalName() : null); - builder.setParentTableName(parentTableName != null ? - parentTableName : oldTable != null ? oldTable.getParentTableName() : null); - builder.setParentSchemaName(parentSchemaName != null ? parentSchemaName : - oldTable != null ? oldTable.getParentSchemaName() : null); - - builder.addOrSetColumns(columns); - // Avoid querying the stats table because we're holding the rowLock here. Issuing an RPC to a remote - // server while holding this lock is a bad idea and likely to cause contention. - return builder.build(); - } - - private Long scanMaxLookbackAgeFromParent(byte[] key, long clientTimeStamp) throws IOException { - Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp); - try(Table sysCat = ServerUtil.getHTableForCoprocessorScan(this.env, - SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration())); - ResultScanner scanner = sysCat.getScanner(scan)) { - Result result = scanner.next(); - boolean startCheckingForLink = false; - byte[] parentTableKey = null; - do { - if (result == null) { - return null; - } - else if (startCheckingForLink) { - byte[] linkTypeBytes = result.getValue(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); - if (linkTypeBytes != null) { - LinkType linkType = LinkType.fromSerializedValue(linkTypeBytes[0]); - int rowKeyColMetadataLength = 5; - byte[][] rowKeyMetaData = new byte[rowKeyColMetadataLength][]; - getVarChars(result.getRow(), rowKeyColMetadataLength, rowKeyMetaData); - if (linkType == VIEW_INDEX_PARENT_TABLE) { - parentTableKey = getParentTableKeyFromChildRowKeyMetaData(rowKeyMetaData); - return scanMaxLookbackAgeFromParent(parentTableKey, clientTimeStamp); - } - else if (linkType == PHYSICAL_TABLE) { - parentTableKey = getParentTableKeyFromChildRowKeyMetaData(rowKeyMetaData); - } - } - } - else { - byte[] maxLookbackAgeInBytes = result.getValue(TABLE_FAMILY_BYTES, MAX_LOOKBACK_AGE_BYTES); - if (maxLookbackAgeInBytes != null) { - return PLong.INSTANCE.getCodec().decodeLong(maxLookbackAgeInBytes, 0, SortOrder.getDefault()); - } - } - result = scanner.next(); - startCheckingForLink = true; - } while (result != null); - return parentTableKey == null ? null : scanMaxLookbackAgeFromParent(parentTableKey, clientTimeStamp); - } + updateCreateTableDdlSuccessMetrics(tableType); + LOGGER.info("{} created successfully, tableName: {}", tableType, fullTableName); + } finally { + ServerUtil.releaseRowLocks(locks); + } + } catch (Throwable t) { + LOGGER.error("createTable failed", t); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException(fullTableName, t)); + } + } + + private void updateCreateTableDdlSuccessMetrics(PTableType tableType) { + if (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) { + metricsSource.incrementCreateTableCount(); + } else if (tableType == PTableType.VIEW) { + metricsSource.incrementCreateViewCount(); + } else if (tableType == PTableType.INDEX) { + metricsSource.incrementCreateIndexCount(); + } + } + + private void exportSchema(List tableMetadata, byte[] tableKey, long clientTimestamp, + int clientVersion, PTable oldTable) throws SQLException, IOException { + List tableCellList = MetaDataUtil.getTableCellsFromMutations(tableMetadata); + + List> allColumnsCellList = + MetaDataUtil.getColumnAndLinkCellsFromMutations(tableMetadata); + // getTableFromCells assumes the Cells are sorted as they would be when reading from HBase + Collections.sort(tableCellList, KeyValue.COMPARATOR); + for (List columnCellList : allColumnsCellList) { + Collections.sort(columnCellList, KeyValue.COMPARATOR); } - private byte[] getParentTableKeyFromChildRowKeyMetaData(byte[][] rowKeyMetaData) { - byte[] parentTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - String - parentSchema = - SchemaUtil.getSchemaNameFromFullName( - rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]); - byte[] - parentSchemaName = - parentSchema != null ? parentSchema.getBytes(StandardCharsets.UTF_8) : null; - byte[] - parentTableName = - SchemaUtil.getTableNameFromFullName( - rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]) - .getBytes(StandardCharsets.UTF_8); - return SchemaUtil.getTableKey(parentTenantId, parentSchemaName, parentTableName); - } - - /** - * Method to return TTL value defined at current level or up the Hierarchy of the view. - * @param viewKey Key of the view for which we have to find TTL - * @param clientTimeStamp Client TimeStamp - * @return TTL value for a given view, if nothing is defined anywhere then return - * TTL_NOT_DEFINED(0). - * @throws IOException - * @throws SQLException - */ + PTable newTable = getTableFromCells(tableCellList, allColumnsCellList, clientTimestamp, + clientVersion, oldTable); + PTable parentTable = null; + // if this is a view, we need to get the columns from its parent table / view + if (newTable != null && newTable.getType().equals(PTableType.VIEW)) { + // TODO why creating generic connection and not getConnectionOnServer? + try (PhoenixConnection conn = + (PhoenixConnection) ConnectionUtil.getInputConnection(env.getConfiguration())) { + newTable = ViewUtil.addDerivedColumnsAndIndexesFromAncestors(conn, newTable); + } + } + Configuration conf = env.getConfiguration(); + SchemaRegistryRepository exporter = + SchemaRegistryRepositoryFactory.getSchemaRegistryRepository(conf); + if (exporter != null) { + SchemaWriter schemaWriter = SchemaWriterFactory.getSchemaWriter(conf); + // we export to an external schema registry, then put the schema id + // to lookup the schema in the registry into SYSTEM.CATALOG so we + // can look it up later (and use it in WAL annotations) + + // Note that if we succeed here but the write to SYSTEM.CATALOG fails, + // we can have "orphaned" rows in the schema registry because there's + // no way to make this fully atomic. + String externalSchemaId = exporter.exportSchema(schemaWriter, newTable); + tableMetadata.add(MetaDataUtil.getExternalSchemaIdUpdate(tableKey, externalSchemaId)); - private int getTTLFromHierarchy(byte[] viewKey, long clientTimeStamp, boolean checkForMappedView) throws IOException, SQLException { - Scan scan = MetaDataUtil.newTableRowsScan(viewKey, MIN_TABLE_TIMESTAMP, clientTimeStamp); - Table sysCat = ServerUtil.getHTableForCoprocessorScan(this.env, - SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, - env.getConfiguration())); - ResultScanner scanner = sysCat.getScanner(scan); - Result result = scanner.next(); + } + } + + private long getViewIndexSequenceValue(PhoenixConnection connection, String tenantIdStr, + PTable parentTable) throws SQLException { + int nSequenceSaltBuckets = connection.getQueryServices().getSequenceSaltBuckets(); + // parentTable is parent of the view index which is the view. View table name is _IDX_+logical + // name of base table + // Since parent is the view, the parentTable.getBaseTableLogicalName() returns the logical full + // name of the base table + PName parentName = parentTable.getBaseTableLogicalName(); + if (parentName == null) { + parentName = SchemaUtil.getPhysicalHBaseTableName(parentTable.getSchemaName(), + parentTable.getTableName(), parentTable.isNamespaceMapped()); + } + SequenceKey key = MetaDataUtil.getViewIndexSequenceKey(tenantIdStr, parentName, + nSequenceSaltBuckets, parentTable.isNamespaceMapped()); + // Earlier sequence was created at (SCN-1/LATEST_TIMESTAMP) and incremented at the client + // max(SCN,dataTable.getTimestamp), but it seems we should + // use always LATEST_TIMESTAMP to avoid seeing wrong sequence values by different connection + // having SCN + // or not. + long sequenceTimestamp = HConstants.LATEST_TIMESTAMP; + try { + connection.getQueryServices().createSequence(key.getTenantId(), key.getSchemaName(), + key.getSequenceName(), Short.MIN_VALUE, 1, 1, Long.MIN_VALUE, Long.MAX_VALUE, false, + sequenceTimestamp); + } catch (SequenceAlreadyExistsException e) { + // someone else got here first and created the sequence, or it was pre-existing. Not a + // problem. + } - byte[] tableKey = null; - do { + long[] seqValues = new long[1]; + SQLException[] sqlExceptions = new SQLException[1]; + connection.getQueryServices().incrementSequences( + Collections.singletonList(new SequenceAllocation(key, 1)), HConstants.LATEST_TIMESTAMP, + seqValues, sqlExceptions); + if (sqlExceptions[0] != null) { + throw sqlExceptions[0]; + } + return seqValues[0]; + } + + private boolean execeededIndexQuota(PTableType tableType, PTable parentTable) { + return PTableType.INDEX == tableType && parentTable.getIndexes().size() >= maxIndexesPerTable; + } + + private void separateLocalAndRemoteMutations(Region region, List mutations, + List localMutations, List remoteMutations) { + RegionInfo regionInfo = region.getRegionInfo(); + for (Mutation mutation : mutations) { + if (regionInfo.containsRow(mutation.getRow())) { + localMutations.add(mutation); + } else { + remoteMutations.add(mutation); + } + } + } + + @Override + public void dropTable(RpcController controller, DropTableRequest request, + RpcCallback done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + boolean isCascade = request.getCascade(); + byte[][] rowKeyMetaData = new byte[3][]; + String tableType = request.getTableType(); + byte[] schemaName = null; + byte[] tableOrViewName = null; + boolean dropTableStats = false; + final int clientVersion = request.getClientVersion(); + try { + List tableMetadata = ProtobufUtil.getMutations(request); + List childLinkMutations = Lists.newArrayList(); + MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); + byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + tableOrViewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + String fullTableName = SchemaUtil.getTableName(schemaName, tableOrViewName); + PTableType pTableType = PTableType.fromSerializedValue(tableType); + // Disallow deletion of a system table + if (pTableType == PTableType.SYSTEM) { + builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + + List tableNamesToDelete = Lists.newArrayList(); + List sharedTablesToDelete = Lists.newArrayList(); + + byte[] lockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableOrViewName); + Region region = env.getRegion(); + MetaDataMutationResult result = checkTableKeyInRegion(lockKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + + byte[] parentTableName = MetaDataUtil.getParentTableName(tableMetadata); + byte[] parentLockKey = null; + // Only lock parent table for indexes + if (parentTableName != null && pTableType == PTableType.INDEX) { + parentLockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, parentTableName); + result = checkTableKeyInRegion(parentLockKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + } - if (result == null) { - return TTL_NOT_DEFINED; + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); + PTable loadedTable = doGetTable(tenantIdBytes, schemaName, tableOrViewName, clientTimeStamp, + null, request.getClientVersion()); + if (loadedTable == null) { + builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + getCoprocessorHost().preDropTable(Bytes.toString(tenantIdBytes), + SchemaUtil.getTableName(schemaName, tableOrViewName), + TableName.valueOf(loadedTable.getPhysicalName().getBytes()), + getParentPhysicalTableName(loadedTable), pTableType, loadedTable.getIndexes()); + + if (pTableType == PTableType.TABLE || pTableType == PTableType.VIEW) { + // check to see if the table has any child views + try (Table hTable = ServerUtil.getHTableForCoprocessorScan(env, + getSystemTableForChildLinks(clientVersion, env.getConfiguration()))) { + // This call needs to be done before acquiring the row lock on the header row + // for the table/view being dropped, otherwise the calls to resolve its child + // views via PhoenixRuntime.getTableNoCache() will deadlock since this call + // itself needs to get the parent table which needs to acquire a write lock + // on the same header row + Pair, List> descendantViews = + findAllDescendantViews(hTable, env.getConfiguration(), tenantIdBytes, schemaName, + tableOrViewName, clientTimeStamp, true); + List legitimateChildViews = descendantViews.getFirst(); + List orphanChildViews = descendantViews.getSecond(); + if (!legitimateChildViews.isEmpty()) { + if (!isCascade) { + LOGGER.error( + "DROP without CASCADE on tables or views with child views " + "is not permitted"); + // DROP without CASCADE on tables/views with child views is + // not permitted + builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; } - - //return TTL_NOT_DEFINED for Index on a Mapped View. - if (checkForMappedView && checkIfViewIsMappedView(result)) { - return TTL_NOT_DEFINED; + if ( + clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && !SchemaUtil + .getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, env.getConfiguration()) + .equals(hTable.getName()) + ) { + // (See PHOENIX-5544) For an old client connecting to a non-upgraded + // server, we disallow dropping a base table/view that has child views. + LOGGER.error("Dropping a table or view that has child views is " + + "not permitted for old clients connecting to a new server " + + "with old metadata (even if CASCADE is provided). " + + "Please upgrade the client at least to " + + MIN_SPLITTABLE_SYSTEM_CATALOG_VERSION); + builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; } - - byte[] linkTypeBytes = result.getValue(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES); - byte[][] rowKeyMetaData = new byte[5][]; - getVarChars(result.getRow(), 5, rowKeyMetaData); - //Check if TTL is defined at the current given level - if (result.getValue(TABLE_FAMILY_BYTES, TTL_BYTES) != null) { - String ttlStr = (String) PVarchar.INSTANCE.toObject( - result.getValue(DEFAULT_COLUMN_FAMILY_BYTES, TTL_BYTES)); - return Integer.parseInt(ttlStr); - } else if (linkTypeBytes != null ) { - String parentSchema =SchemaUtil.getSchemaNameFromFullName( - rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]); - byte[] parentViewSchemaName = parentSchema != null - ? parentSchema.getBytes(StandardCharsets.UTF_8) : null; - byte[] parentViewName = SchemaUtil.getTableNameFromFullName( - rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]) - .getBytes(StandardCharsets.UTF_8); - //Get TTL from up the hierarchy, Checking for Parent view link and getting TTL from it. - if (LinkType.fromSerializedValue(linkTypeBytes[0]) == PARENT_TABLE) { - byte[] parentViewTenantId = result.getValue(TABLE_FAMILY_BYTES, - PARENT_TENANT_ID_BYTES); - byte[] parentViewKey = SchemaUtil.getTableKey(parentViewTenantId, - parentViewSchemaName, parentViewName); - return getTTLFromHierarchy(parentViewKey, clientTimeStamp, false); - } - - //Store tableKey to use if we don't find TTL at current level and from - //parent views above the hierarchy - if (LinkType.fromSerializedValue(linkTypeBytes[0]) == PHYSICAL_TABLE) { - tableKey = SchemaUtil.getTableKey(null, parentViewSchemaName, - parentViewName); - } + } + + // If the CASCADE option is provided and we have at least one legitimate/orphan + // view stemming from this parent and the client is 4.15+ (or older but + // connecting to an upgraded server), we use the SYSTEM.TASK table to + // asynchronously drop child views + if ( + isCascade && !(legitimateChildViews.isEmpty() && orphanChildViews.isEmpty()) + && (clientVersion >= MIN_SPLITTABLE_SYSTEM_CATALOG || SchemaUtil + .getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, env.getConfiguration()) + .equals(hTable.getName())) + ) { + try (PhoenixConnection conn = getServerConnectionForMetaData(env.getConfiguration()) + .unwrap(PhoenixConnection.class)) { + ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder().setConn(conn) + .setTaskType(PTable.TaskType.DROP_CHILD_VIEWS) + .setTenantId(Bytes.toString(tenantIdBytes)) + .setSchemaName(Bytes.toString(schemaName)) + .setTableName(Bytes.toString(tableOrViewName)) + .setTaskStatus(PTable.TaskStatus.CREATED.toString()).setData(null).setPriority(null) + .setStartTs(null).setEndTs(null).setAccessCheckEnabled(this.accessCheckEnabled) + .build()); + } catch (Throwable t) { + LOGGER.error("Adding a task to drop child views failed!", t); } - - result = scanner.next(); - } while (result != null); - - //Return TTL defined at Table level for the given hierarchy as we didn't find TTL any of the views. - return getTTLForTable(tableKey, clientTimeStamp); - - } - - private boolean checkIfViewIsMappedView(Result result) { - byte[] viewTypeBytes = result.getValue(TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES); - if (viewTypeBytes != null && ViewType.fromSerializedValue(viewTypeBytes[0]) == MAPPED) { - return true; + } + } + } + + List locks = Lists.newArrayList(); + try { + acquireLock(region, lockKey, locks, false); + if (parentLockKey != null) { + acquireLock(region, parentLockKey, locks, false); + } + List requests = new ArrayList<>(); + requests.add( + new InvalidateServerMetadataCacheRequest(tenantIdBytes, schemaName, tableOrViewName)); + if (pTableType == INDEX) { + requests.add( + new InvalidateServerMetadataCacheRequest(tenantIdBytes, schemaName, parentTableName)); + long currentTimestamp = EnvironmentEdgeManager.currentTimeMillis(); + // If table type is index, then update the last ddl timestamp of the parent + // table or immediate parent view. + tableMetadata.add(MetaDataUtil.getLastDDLTimestampUpdate(parentLockKey, currentTimestamp, + currentTimestamp)); + } + invalidateServerMetadataCache(requests); + List invalidateList = new ArrayList<>(); + result = doDropTable(lockKey, tenantIdBytes, schemaName, tableOrViewName, parentTableName, + PTableType.fromSerializedValue(tableType), tableMetadata, childLinkMutations, + invalidateList, tableNamesToDelete, sharedTablesToDelete, request.getClientVersion()); + if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + done.run(MetaDataMutationResult.toProto(result)); + return; } - return false; + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + + List localMutations = Lists.newArrayListWithExpectedSize(tableMetadata.size()); + List remoteMutations = Lists.newArrayList(); + separateLocalAndRemoteMutations(region, tableMetadata, localMutations, remoteMutations); + if (!remoteMutations.isEmpty()) { + // while dropping a table all the mutations should be local + String msg = "Found unexpected mutations while dropping table " + + SchemaUtil.getTableName(schemaName, tableOrViewName); + LOGGER.error(msg); + for (Mutation m : remoteMutations) { + LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow())); + LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap()); + } + throw new IllegalStateException(msg); + } + + // drop rows from catalog on this region + mutateRowsWithLocks(this.accessCheckEnabled, region, localMutations, + Collections. emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); + + long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata); + for (ImmutableBytesPtr ckey : invalidateList) { + PTable table = newDeletedTableMarker(currentTime); + metaDataCache.put(ckey, table); + metricsSource.incrementMetadataCacheAddCount(); + metricsSource.incrementMetadataCacheUsedSize(table.getEstimatedSize()); + } + if (parentLockKey != null) { + ImmutableBytesPtr parentCacheKey = new ImmutableBytesPtr(parentLockKey); + metaDataCache.invalidate(parentCacheKey); + } + + // after the view metadata is dropped, drop parent->child link + MetaDataResponse response = processRemoteRegionMutations( + getSystemTableForChildLinks(request.getClientVersion(), env.getConfiguration()).getName(), + childLinkMutations, MetaDataProtos.MutationCode.UNABLE_TO_DELETE_CHILD_LINK); + if (response != null) { + done.run(response); + return; + } + + done.run(MetaDataMutationResult.toProto(result)); + dropTableStats = true; + + updateDropTableDdlSuccessMetrics(pTableType); + LOGGER.info("{} dropped successfully, tableName: {}", pTableType, fullTableName); + } finally { + ServerUtil.releaseRowLocks(locks); + if (dropTableStats) { + Thread statsDeleteHandler = new Thread( + new StatsDeleteHandler(env, loadedTable, tableNamesToDelete, sharedTablesToDelete), + "thread-statsdeletehandler"); + statsDeleteHandler.setDaemon(true); + statsDeleteHandler.start(); + } + } + } catch (Throwable t) { + LOGGER.error("dropTable failed", t); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException(SchemaUtil.getTableName(schemaName, tableOrViewName), t)); + } + } + + private void updateDropTableDdlSuccessMetrics(PTableType pTableType) { + if (pTableType == PTableType.TABLE || pTableType == PTableType.SYSTEM) { + metricsSource.incrementDropTableCount(); + } else if (pTableType == PTableType.VIEW) { + metricsSource.incrementDropViewCount(); + } else if (pTableType == PTableType.INDEX) { + metricsSource.incrementDropIndexCount(); + } + } + + private static class StatsDeleteHandler implements Runnable { + PTable deletedTable; + List physicalTableNames; + List sharedTableStates; + RegionCoprocessorEnvironment env; + + StatsDeleteHandler(RegionCoprocessorEnvironment env, PTable deletedTable, + List physicalTableNames, List sharedTableStates) { + this.deletedTable = deletedTable; + this.physicalTableNames = physicalTableNames; + this.sharedTableStates = sharedTableStates; + this.env = env; } - /*** - * Get TTL Value stored in SYSCAT for a given table - * @param tableKey of table for which we are fining TTL - * @param clientTimeStamp client TimeStamp value - * @return TTL defined for a given table if it is null then return TTL_NOT_DEFINED(0) - * @throws IOException - */ - private int getTTLForTable(byte[] tableKey, long clientTimeStamp) throws IOException { - Scan scan = MetaDataUtil.newTableRowsScan(tableKey, MIN_TABLE_TIMESTAMP, clientTimeStamp); - Table sysCat = ServerUtil.getHTableForCoprocessorScan(this.env, - SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, - env.getConfiguration())); - ResultScanner scanner = sysCat.getScanner(scan); - Result result = scanner.next(); - do { - if (result == null) { - return TTL_NOT_DEFINED; - } - if (result.getValue(TABLE_FAMILY_BYTES, TTL_BYTES) != null) { - String ttlStr = (String) PVarchar.INSTANCE.toObject( - result.getValue(DEFAULT_COLUMN_FAMILY_BYTES, TTL_BYTES)); - return Integer.parseInt(ttlStr); + @Override + public void run() { + try { + User.runAsLoginUser(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + try ( + PhoenixConnection connection = getServerConnectionForMetaData(env.getConfiguration()) + .unwrap(PhoenixConnection.class)) { + try { + MetaDataUtil.deleteFromStatsTable(connection, deletedTable, physicalTableNames, + sharedTableStates); + LOGGER.info("Table stats deleted successfully, tablename is {}.", + deletedTable.getPhysicalName().getString()); + } catch (Throwable t) { + LOGGER.warn("Exception while deleting stats of table " + + deletedTable.getPhysicalName().getString() + + " please check and delete stats manually"); + } } - result = scanner.next(); - } while (result != null); - return TTL_NOT_DEFINED; + return null; + } + }); + } catch (IOException e) { + LOGGER.warn("Exception while deleting stats of table " + + deletedTable.getPhysicalName().getString() + " please check and delete stats manually"); + } } + } - private Long getViewIndexId(Cell[] tableKeyValues, PDataType viewIndexIdType) { - Cell viewIndexIdKv = tableKeyValues[VIEW_INDEX_ID_INDEX]; - return viewIndexIdKv == null ? null : - decodeViewIndexId(viewIndexIdKv, viewIndexIdType); + protected RowLock acquireLock(Region region, byte[] lockKey, List locks, + boolean readLock) throws IOException { + RowLock rowLock = region.getRowLock(lockKey, this.getMetadataReadLockEnabled && readLock); + if (rowLock == null) { + throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(lockKey)); } - - private PTable modifyIndexStateForOldClient(int clientVersion, PTable table) - throws SQLException { - if (table == null) { - return table; - } - // PHOENIX-5073 Sets the index state based on the client version in case of old clients. - // If client is not yet up to 4.12, then translate PENDING_ACTIVE to ACTIVE (as would have - // been the value in those versions) since the client won't have this index state in its - // enum. - if (table.getIndexState() == PIndexState.PENDING_ACTIVE - && clientVersion < MetaDataProtocol.MIN_PENDING_ACTIVE_INDEX) { - table = - PTableImpl.builderWithColumns(table, PTableImpl.getColumnsToClone(table)) - .setState(PIndexState.ACTIVE).build(); - } - // If client is not yet up to 4.14, then translate PENDING_DISABLE to DISABLE - // since the client won't have this index state in its enum. - if (table.getIndexState() == PIndexState.PENDING_DISABLE - && clientVersion < MetaDataProtocol.MIN_PENDING_DISABLE_INDEX) { - // note: for older clients, we have to rely on the rebuilder to transition - // PENDING_DISABLE -> DISABLE - table = - PTableImpl.builderWithColumns(table, PTableImpl.getColumnsToClone(table)) - .setState(PIndexState.DISABLE).build(); - } - return table; + if (locks != null) { + locks.add(rowLock); } + return rowLock; + } - /** - * Returns viewIndexId based on its underlying data type - * - * @param viewIndexIdKv - * @param viewIndexIdType - * @return - */ - private Long decodeViewIndexId(Cell viewIndexIdKv, PDataType viewIndexIdType) { - return viewIndexIdType.getCodec().decodeLong(viewIndexIdKv.getValueArray(), - viewIndexIdKv.getValueOffset(), SortOrder.getDefault()); + private MetaDataResponse processRemoteRegionMutations(byte[] systemTableName, + List remoteMutations, MetaDataProtos.MutationCode mutationCode) throws IOException { + if (remoteMutations.isEmpty()) { + return null; } - - private PDataType getViewIndexIdType(Cell[] tableKeyValues) { - Cell dataTypeKv = tableKeyValues[VIEW_INDEX_ID_DATA_TYPE_INDEX]; - return dataTypeKv == null ? - MetaDataUtil.getLegacyViewIndexIdDataType() : - PDataType.fromTypeId(PInteger.INSTANCE.getCodec() - .decodeInt(dataTypeKv.getValueArray(), dataTypeKv.getValueOffset(), SortOrder.getDefault())); + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + try (Table hTable = ServerUtil.getHTableForCoprocessorScan(env, + SchemaUtil.getPhysicalTableName(systemTableName, env.getConfiguration()))) { + hTable.batch(remoteMutations, null); + } catch (Throwable t) { + LOGGER.error("Unable to write mutations to " + Bytes.toString(systemTableName), t); + builder.setReturnCode(mutationCode); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + return builder.build(); } - - private boolean isQualifierCounterKV(Cell kv) { - int cmp = - Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength(), QUALIFIER_COUNTER_KV.getQualifierArray(), - QUALIFIER_COUNTER_KV.getQualifierOffset(), QUALIFIER_COUNTER_KV.getQualifierLength()); - return cmp == 0; + return null; + } + + private MetaDataMutationResult doDropTable(byte[] key, byte[] tenantId, byte[] schemaName, + byte[] tableName, byte[] parentTableName, PTableType tableType, List catalogMutations, + List childLinkMutations, List invalidateList, + List tableNamesToDelete, List sharedTablesToDelete, int clientVersion) + throws IOException, SQLException { + + Region region = env.getRegion(); + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(catalogMutations); + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); + + PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion); + + // We always cache the latest version - fault in if not in cache + if ( + table != null + || (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion)) + != null + ) { + if (table.getTimeStamp() < clientTimeStamp) { + if (isTableDeleted(table) || tableType != table.getType()) { + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + } else { + return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } } - - private PSchema getSchema(RegionScanner scanner, long clientTimeStamp) throws IOException, SQLException { - List results = Lists.newArrayList(); + // We didn't find a table at the latest timestamp, so either there is no table or + // there was a table, but it's been deleted. In either case we want to return. + if (table == null) { + if (buildDeletedTable(key, cacheKey, region, clientTimeStamp) != null) { + return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + // Make sure we're not deleting the "wrong" child + if ( + parentTableName != null && table.getParentTableName() != null + && !Arrays.equals(parentTableName, table.getParentTableName().getBytes()) + ) { + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + // Since we don't allow back in time DDL, we know if we have a table it's the one + // we want to delete. FIXME: we shouldn't need a scan here, but should be able to + // use the table to generate the Delete markers. + Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp); + List indexNames = Lists.newArrayList(); + List results = Lists.newArrayList(); + try (RegionScanner scanner = region.getScanner(scan);) { + scanner.next(results); + if (results.isEmpty()) { // Should not be possible + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + + // Add to list of HTables to delete, unless it's a view or its a shared index + if (tableType == INDEX && table.getViewIndexId() != null) { + sharedTablesToDelete.add(new SharedTableState(table)); + } else if (tableType != PTableType.VIEW && tableType != PTableType.CDC) { + tableNamesToDelete.add(table.getPhysicalName().getBytes()); + } + invalidateList.add(cacheKey); + byte[][] rowKeyMetaData = new byte[5][]; + do { + Cell kv = results.get(LINK_TYPE_INDEX); + int nColumns = + getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), 0, rowKeyMetaData); + if ( + nColumns == 5 && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0 + && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), + kv.getQualifierLength(), LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0 + ) { + LinkType linkType = LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]); + if ( + rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0 + && linkType == LinkType.INDEX_TABLE + ) { + indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]); + } else if ( + tableType == PTableType.VIEW && (linkType == PARENT_TABLE || linkType == PHYSICAL_TABLE) + ) { + // Populate the delete mutations for parent->child link for the child view + // in question, which we issue to SYSTEM.CHILD_LINK later + Cell parentTenantIdCell = MetaDataUtil.getCell(results, PARENT_TENANT_ID_BYTES); + PName parentTenantId = parentTenantIdCell != null + ? PNameFactory.newName(parentTenantIdCell.getValueArray(), + parentTenantIdCell.getValueOffset(), parentTenantIdCell.getValueLength()) + : null; + byte[] linkKey = + MetaDataUtil.getChildLinkKey(parentTenantId, table.getParentSchemaName(), + table.getParentTableName(), table.getTenantId(), table.getName()); + Delete linkDelete = new Delete(linkKey, clientTimeStamp); + childLinkMutations.add(linkDelete); + } + } + Delete delete = + new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp); + catalogMutations.add(delete); + results.clear(); scanner.next(results); - if (results.isEmpty()) { - return null; - } + } while (!results.isEmpty()); + } - Cell keyValue = results.get(0); - byte[] keyBuffer = keyValue.getRowArray(); - int keyLength = keyValue.getRowLength(); - int keyOffset = keyValue.getRowOffset(); - PName tenantId = newPName(keyBuffer, keyOffset, keyLength); - int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length; - if (tenantIdLength == 0) { - tenantId = null; - } - PName schemaName = newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength - tenantIdLength - 1); - long timeStamp = keyValue.getTimestamp(); - return new PSchema(schemaName.getString(), timeStamp); + // Recursively delete indexes + for (byte[] indexName : indexNames) { + byte[] indexKey = SchemaUtil.getTableKey(tenantId, schemaName, indexName); + // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870). + // FIXME: the version of the Delete constructor without the lock args was introduced + // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version + // of the client. + Delete delete = new Delete(indexKey, clientTimeStamp); + catalogMutations.add(delete); + MetaDataMutationResult result = doDropTable(indexKey, tenantId, schemaName, indexName, + tableName, PTableType.INDEX, catalogMutations, childLinkMutations, invalidateList, + tableNamesToDelete, sharedTablesToDelete, clientVersion); + if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + return result; + } } - private PFunction getFunction(RegionScanner scanner, final boolean isReplace, long clientTimeStamp, List deleteMutationsForReplace) - throws IOException, SQLException { - List results = Lists.newArrayList(); - scanner.next(results); - if (results.isEmpty()) { - return null; - } - Cell[] functionKeyValues = new Cell[FUNCTION_KV_COLUMNS.size()]; - Cell[] functionArgKeyValues = new Cell[FUNCTION_ARG_KV_COLUMNS.size()]; - // Create PFunction based on KeyValues from scan - Cell keyValue = results.get(0); - byte[] keyBuffer = keyValue.getRowArray(); - int keyLength = keyValue.getRowLength(); - int keyOffset = keyValue.getRowOffset(); - long currentTimeMillis = EnvironmentEdgeManager.currentTimeMillis(); - if (isReplace) { - long deleteTimeStamp = - clientTimeStamp == HConstants.LATEST_TIMESTAMP ? currentTimeMillis - 1 - : (keyValue.getTimestamp() < clientTimeStamp ? clientTimeStamp - 1 - : keyValue.getTimestamp()); - deleteMutationsForReplace.add(new Delete(keyBuffer, keyOffset, keyLength, deleteTimeStamp)); - } - PName tenantId = newPName(keyBuffer, keyOffset, keyLength); - int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length; - if (tenantIdLength == 0) { - tenantId = null; - } - PName functionName = - newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength - tenantIdLength - 1); - int functionNameLength = functionName.getBytes().length + 1; - int offset = tenantIdLength + functionNameLength + 1; - - long timeStamp = keyValue.getTimestamp(); - - int i = 0; - int j = 0; - while (i < results.size() && j < FUNCTION_KV_COLUMNS.size()) { - Cell kv = results.get(i); - Cell searchKv = FUNCTION_KV_COLUMNS.get(j); - int cmp = - Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength(), searchKv.getQualifierArray(), - searchKv.getQualifierOffset(), searchKv.getQualifierLength()); - if (cmp == 0) { - timeStamp = Math.max(timeStamp, kv.getTimestamp()); // Find max timestamp of table - // header row - functionKeyValues[j++] = kv; - i++; - } else if (cmp > 0) { - timeStamp = Math.max(timeStamp, kv.getTimestamp()); - functionKeyValues[j++] = null; - } else { - i++; // shouldn't happen - means unexpected KV in system table header row + if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && tableType == PTableType.VIEW) { + try (PhoenixConnection connection = + getServerConnectionForMetaData(env.getConfiguration()).unwrap(PhoenixConnection.class)) { + PTable pTable = connection.getTableNoCache(table.getParentName().getString()); + table = ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable); + } + } + return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, + EnvironmentEdgeManager.currentTimeMillis(), table, tableNamesToDelete, sharedTablesToDelete); + } + + /** + * Validate if mutation is allowed on a parent table/view based on their child views. If this + * method returns MetaDataMutationResult, mutation is not allowed, and returned object will + * contain returnCode (MutationCode) to indicate the underlying problem (validation failure code). + * @param expectedType expected type of PTable + * @param clientTimeStamp client timestamp, e.g check + * {@link MetaDataUtil#getClientTimeStamp(List)} + * @param tenantId tenant Id + * @param schemaName schema name + * @param tableOrViewName table or view name + * @param childViews child views of table or parent view. Usually this is an empty list + * passed to this method, and this method will add child views retrieved + * using + * {@link ViewUtil#findAllDescendantViews(Table, Configuration, byte[], byte[], byte[], long, boolean)} + * @param clientVersion client version, used to determine if mutation is allowed. + * @return Optional.empty() if mutation is allowed on parent table/view. If not allowed, returned + * Optional object will contain metaDataMutationResult with MutationCode. + * @throws IOException if something goes wrong while retrieving child views using + * {@link ViewUtil#findAllDescendantViews(Table, Configuration, byte[], byte[], byte[], long, boolean)} + * @throws SQLException if something goes wrong while retrieving child views using + * {@link ViewUtil#findAllDescendantViews(Table, Configuration, byte[], byte[], byte[], long, boolean)} + */ + private Optional validateIfMutationAllowedOnParent( + final PTable parentTable, final List tableMetadata, final PTableType expectedType, + final long clientTimeStamp, final byte[] tenantId, final byte[] schemaName, + final byte[] tableOrViewName, final List childViews, final int clientVersion) + throws IOException, SQLException { + boolean isMutationAllowed = true; + boolean isSchemaMutationAllowed = true; + Pair scanSysCatForTTLDefinedOnAnyChildPair = new Pair<>(true, false); + if (expectedType == PTableType.TABLE || expectedType == PTableType.VIEW) { + try ( + Table hTable = ServerUtil.getHTableForCoprocessorScan(env, + getSystemTableForChildLinks(clientVersion, env.getConfiguration())); + Table sysCat = ServerUtil.getHTableForCoprocessorScan(env, + SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()))) { + List legitimateChildViews = new ArrayList<>(); + + childViews.addAll(findAllDescendantViews(hTable, sysCat, env.getConfiguration(), tenantId, + schemaName, tableOrViewName, clientTimeStamp, new ArrayList<>(), new ArrayList<>(), false, + scanSysCatForTTLDefinedOnAnyChildPair).getFirst()); + } + + if (!childViews.isEmpty()) { + // From 4.15 onwards we allow SYSTEM.CATALOG to split and no longer + // propagate parent metadata changes to child views. + // If the client is on a version older than 4.15 we have to block adding a + // column to a parent as we no longer lock the parent on the + // server side while creating a child view to prevent conflicting changes. + // This is handled on the client side from 4.15 onwards. + // Also if QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK is true, + // we block adding a column to a parent so that we can rollback the + // upgrade if required. + if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG) { + isMutationAllowed = false; + LOGGER.error("Unable to add or drop a column as the client is older than {}", + MIN_SPLITTABLE_SYSTEM_CATALOG_VERSION); + } else if (allowSplittableSystemCatalogRollback) { + isMutationAllowed = false; + LOGGER.error("Unable to add or drop a column as the {} config is set to true", + QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK); + } + } + + if ( + scanSysCatForTTLDefinedOnAnyChildPair.getSecond() + && validateTTLAttributeSettingForEntity(tableMetadata, TTL_BYTES) + ) { + // We got here means There was already TTL defined at one of the child, and we are + // trying to set TTL at current level which should not be allowed as TTL can only + // be defined at one level in hierarchy. + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TTL_ALREADY_DEFINED_IN_HIERARCHY) + .setSchemaName(Arrays.toString(schemaName)).setTableName(Arrays.toString(tableOrViewName)) + .build().buildException(); + } + } + if (!isMutationAllowed) { + MetaDataMutationResult metaDataMutationResult = + new MetaDataMutationResult(MetaDataProtocol.MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), null); + return Optional.of(metaDataMutationResult); + } + if (!isSchemaMutationAllowed) { + MetaDataMutationResult metaDataMutationResult = + new MetaDataMutationResult(MetaDataProtocol.MutationCode.UNALLOWED_SCHEMA_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), null); + return Optional.of(metaDataMutationResult); + } + return Optional.empty(); + } + + private MetaDataMutationResult mutateColumn(final List tableMetadata, + final ColumnMutator mutator, final int clientVersion, final PTable parentTable, + final PTable transformingNewTable, boolean isAddingOrDroppingColumns) throws IOException { + byte[][] rowKeyMetaData = new byte[5][]; + MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); + byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] tableOrViewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableOrViewName); + String fullTableName = SchemaUtil.getTableName(schemaName, tableOrViewName); + // server-side, except for indexing, we always expect the keyvalues to be standard KeyValues + PTableType expectedType = MetaDataUtil.getTableType(tableMetadata, + GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable()); + List tableNamesToDelete = Lists.newArrayList(); + List sharedTablesToDelete = Lists.newArrayList(); + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); + try { + Region region = env.getRegion(); + MetaDataMutationResult result = checkTableKeyInRegion(key, region); + if (result != null) { + return result; + } + + List locks = Lists.newArrayList(); + try { + List childViews = Lists.newArrayList(); + Optional mutationResult = + validateIfMutationAllowedOnParent(parentTable, tableMetadata, expectedType, + clientTimeStamp, tenantId, schemaName, tableOrViewName, childViews, clientVersion); + // only if mutation is allowed, we should get Optional.empty() here + if (mutationResult.isPresent()) { + return mutationResult.get(); + } + // We take a write row lock for tenantId, schemaName, tableOrViewName + acquireLock(region, key, locks, false); + // Invalidate the cache from all the regionservers. + List requests = new ArrayList<>(); + requests + .add(new InvalidateServerMetadataCacheRequest(tenantId, schemaName, tableOrViewName)); + invalidateServerMetadataCache(requests); + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); + List invalidateList = new ArrayList<>(); + invalidateList.add(cacheKey); + PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion); + if (failConcurrentMutateAddColumnOneTimeForTesting) { + failConcurrentMutateAddColumnOneTimeForTesting = false; + return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table); + } + if (LOGGER.isDebugEnabled()) { + if (table == null) { + LOGGER.debug("Table " + Bytes.toStringBinary(key) + + " not found in cache. Will build through scan"); + } else { + LOGGER.debug("Table " + Bytes.toStringBinary(key) + " found in cache with timestamp " + + table.getTimeStamp() + " seqNum " + table.getSequenceNumber()); + } + } + // Get client timeStamp from mutations + if ( + table == null && (table = + buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion)) == null + ) { + // if not found then call newerTableExists and add delete marker for timestamp + // found + table = buildDeletedTable(key, cacheKey, region, clientTimeStamp); + if (table != null) { + LOGGER.info("Found newer table deleted as of " + table.getTimeStamp() + + " versus client timestamp of " + clientTimeStamp); + return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + + // if this is a view or view index then we need to include columns and + // indexes derived from its ancestors + if (parentTable != null) { + Properties props = new Properties(); + if (tenantId != null) { + props.setProperty(TENANT_ID_ATTRIB, Bytes.toString(tenantId)); + } + if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { + props.setProperty("CurrentSCN", Long.toString(clientTimeStamp)); + } + try (PhoenixConnection connection = + getServerConnectionForMetaData(props, env.getConfiguration()) + .unwrap(PhoenixConnection.class)) { + table = ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, parentTable); + } + } + if (transformingNewTable != null) { + table = PTableImpl.builderWithColumns(table, getColumnsToClone(table)) + .setTransformingNewTable(transformingNewTable).build(); + } + + if (table.getTimeStamp() >= clientTimeStamp) { + LOGGER.info("Found newer table as of " + table.getTimeStamp() + + " versus client timestamp of " + clientTimeStamp); + return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), table); + } else if (isTableDeleted(table)) { + return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + // lookup TABLE_SEQ_NUM in tableMetaData + long expectedSeqNum = MetaDataUtil.getSequenceNumber(tableMetadata) - 1; + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum " + + expectedSeqNum + " and found seqNum " + table.getSequenceNumber() + " with " + + table.getColumns().size() + " columns: " + table.getColumns()); + } + if (expectedSeqNum != table.getSequenceNumber()) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("For table " + Bytes.toStringBinary(key) + + " returning CONCURRENT_TABLE_MUTATION due to unexpected seqNum"); + } + return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), table); + } + + PTableType type = table.getType(); + if (type == PTableType.INDEX) { + // Disallow mutation of an index table + return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, + EnvironmentEdgeManager.currentTimeMillis(), null); + } else { + // We said to drop a table, but found a view or visa versa + if (type != expectedType) { + return new MetaDataProtocol.MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + } + + if (!childViews.isEmpty()) { + // validate the add or drop column mutations + result = mutator.validateWithChildViews(table, childViews, tableMetadata, schemaName, + tableOrViewName); + if (result != null) { + return result; + } + } + + getCoprocessorHost().preAlterTable(Bytes.toString(tenantId), + SchemaUtil.getTableName(schemaName, tableOrViewName), + TableName.valueOf(table.getPhysicalName().getBytes()), getParentPhysicalTableName(table), + table.getType()); + + result = mutator.validateAndAddMetadata(table, rowKeyMetaData, tableMetadata, region, + invalidateList, locks, clientTimeStamp, clientVersion, + ((ExtendedCellBuilder) env.getCellBuilder()), isAddingOrDroppingColumns); + // if the update mutation caused tables to be deleted, the mutation code returned + // will be MutationCode.TABLE_ALREADY_EXISTS + if (result != null && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + return result; + } + + // drop any indexes on the base table that need the column that is going to be + // dropped + List> tableAndDroppedColumnPairs = + mutator.getTableAndDroppedColumnPairs(); + Iterator> iterator = tableAndDroppedColumnPairs.iterator(); + while (iterator.hasNext()) { + Pair pair = iterator.next(); + // remove the current table and column being dropped from the list and drop any + // indexes that require the column being dropped while holding the row lock + if (table.equals(pair.getFirst())) { + iterator.remove(); + result = dropIndexes(env, pair.getFirst(), invalidateList, locks, clientTimeStamp, + tableMetadata, pair.getSecond(), tableNamesToDelete, sharedTablesToDelete, + clientVersion); + if (result != null && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + return result; } + } + } + + if ( + table.isChangeDetectionEnabled() || MetaDataUtil.getChangeDetectionEnabled(tableMetadata) + ) { + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + try { + exportSchema(tableMetadata, key, clientTimeStamp, clientVersion, table); + metricsSource.incrementAlterExportCount(); + metricsSource + .updateAlterExportTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); + } catch (Exception e) { + LOGGER.error("Error writing to schema registry", e); + metricsSource.incrementAlterExportFailureCount(); + metricsSource + .updateAlterExportFailureTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); + result = new MetaDataMutationResult(MutationCode.ERROR_WRITING_TO_SCHEMA_REGISTRY, + EnvironmentEdgeManager.currentTimeMillis(), table); + return result; + } } - // CLASS_NAME,NUM_ARGS and JAR_PATH are required. - if (functionKeyValues[CLASS_NAME_INDEX] == null || functionKeyValues[NUM_ARGS_INDEX] == null) { - throw new IllegalStateException( - "Didn't find expected key values for function row in metadata row"); - } - - Cell classNameKv = functionKeyValues[CLASS_NAME_INDEX]; - PName className = newPName(classNameKv.getValueArray(), classNameKv.getValueOffset(), - classNameKv.getValueLength()); - Cell jarPathKv = functionKeyValues[JAR_PATH_INDEX]; - PName jarPath = null; - if (jarPathKv != null) { - jarPath = newPName(jarPathKv.getValueArray(), jarPathKv.getValueOffset(), - jarPathKv.getValueLength()); - } - Cell numArgsKv = functionKeyValues[NUM_ARGS_INDEX]; - int numArgs = - PInteger.INSTANCE.getCodec().decodeInt(numArgsKv.getValueArray(), - numArgsKv.getValueOffset(), SortOrder.getDefault()); - Cell returnTypeKv = functionKeyValues[RETURN_TYPE_INDEX]; - PName returnType = - returnTypeKv == null ? null : newPName(returnTypeKv.getValueArray(), - returnTypeKv.getValueOffset(), returnTypeKv.getValueLength()); - - List arguments = Lists.newArrayListWithExpectedSize(numArgs); - for (int k = 0; k < numArgs; k++) { - results.clear(); - scanner.next(results); - if (results.isEmpty()) { - break; + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + + // The mutations to add a column are written in the following order: + // 1. Update the encoded column qualifier for the parent table if its on a + // different region server (for tables that use column qualifier encoding) + // if the next step fails we end up wasting a few col qualifiers + // 2. Write the mutations to add the column + + List localMutations = Lists.newArrayListWithExpectedSize(tableMetadata.size()); + List remoteMutations = Lists.newArrayList(); + separateLocalAndRemoteMutations(region, tableMetadata, localMutations, remoteMutations); + if (!remoteMutations.isEmpty()) { + // there should only be remote mutations if we are adding a column to a view + // that uses encoded column qualifiers (the remote mutations are to update the + // encoded column qualifier counter on the parent table) + if ( + (mutator.getMutateColumnType() == ColumnMutator.MutateColumnType.ADD_COLUMN + && type == PTableType.VIEW + && table.getEncodingScheme() != QualifierEncodingScheme.NON_ENCODED_QUALIFIERS) + ) { + processRemoteRegionMutations(SYSTEM_CATALOG_NAME_BYTES, remoteMutations, + MetaDataProtos.MutationCode.UNABLE_TO_UPDATE_PARENT_TABLE); + // if we're a view or index, clear the cache for our parent + if ((type == PTableType.VIEW || type == INDEX) && table.getParentTableName() != null) { + clearRemoteTableFromCache(clientTimeStamp, + table.getParentSchemaName() != null + ? table.getParentSchemaName().getBytes() + : ByteUtil.EMPTY_BYTE_ARRAY, + table.getParentTableName().getBytes()); } - Cell typeKv = results.get(0); - if (isReplace) { - long deleteTimeStamp = - clientTimeStamp == HConstants.LATEST_TIMESTAMP ? currentTimeMillis - 1 - : (typeKv.getTimestamp() < clientTimeStamp ? clientTimeStamp - 1 - : typeKv.getTimestamp()); - deleteMutationsForReplace.add(new Delete(typeKv.getRowArray(), typeKv - .getRowOffset(), typeKv.getRowLength(), deleteTimeStamp)); + } else { + String msg = + "Found unexpected mutations while adding or dropping column " + "to " + fullTableName; + LOGGER.error(msg); + for (Mutation m : remoteMutations) { + LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow())); + LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap()); } - int typeKeyLength = typeKv.getRowLength(); - PName typeName = - newPName(typeKv.getRowArray(), typeKv.getRowOffset() + offset, typeKeyLength - - offset - 3); - - int argPositionOffset = offset + typeName.getBytes().length + 1; - short argPosition = Bytes.toShort(typeKv.getRowArray(), typeKv.getRowOffset() + argPositionOffset, typeKeyLength - - argPositionOffset); - addArgumentToFunction(results, functionName, typeName, functionArgKeyValues, arguments, argPosition); - } - Collections.sort(arguments, new Comparator() { - @Override - public int compare(FunctionArgument o1, FunctionArgument o2) { - return o1.getArgPosition() - o2.getArgPosition(); + throw new IllegalStateException(msg); + } + } + mutateRowsWithLocks(this.accessCheckEnabled, region, localMutations, + Collections. emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); + // Invalidate from cache + for (ImmutableBytesPtr invalidateKey : invalidateList) { + metaDataCache.invalidate(invalidateKey); + } + // Get client timeStamp from mutations, since it may get updated by the + // mutateRowsWithLocks call + long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata); + // if the update mutation caused tables to be deleted just return the result which + // will contain the table to be deleted + if (result != null && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + return result; + } else { + PTable oldTable = table; + table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion); + if (table != null && hasInheritableTablePropertyChanged(table, oldTable)) { + invalidateAllChildTablesAndIndexes(table, childViews); + } + if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && type == PTableType.VIEW) { + try ( + PhoenixConnection connection = getServerConnectionForMetaData(env.getConfiguration()) + .unwrap(PhoenixConnection.class)) { + PTable pTable = connection.getTableNoCache(table.getParentName().getString()); + table = ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable); } - }); - return new PFunction(tenantId, functionName.getString(), arguments, returnType.getString(), - className.getString(), jarPath == null ? null : jarPath.getString(), timeStamp); + } + return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, table, + tableNamesToDelete, sharedTablesToDelete); + } + } finally { + ServerUtil.releaseRowLocks(locks); + // drop indexes on views that require the column being dropped. These could be on a + // different region server so don't hold row locks while dropping them + for (Pair pair : mutator.getTableAndDroppedColumnPairs()) { + result = dropRemoteIndexes(env, pair.getFirst(), clientTimeStamp, pair.getSecond(), + tableNamesToDelete, sharedTablesToDelete); + if (result != null && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + return result; + } + } + } + } catch (Throwable t) { + ClientUtil.throwIOException(fullTableName, t); + return null; // impossible } - - private PTable buildDeletedTable(byte[] key, ImmutableBytesPtr cacheKey, Region region, - long clientTimeStamp) throws IOException { - if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) { - return null; - } - - Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP); - scan.setFilter(new FirstKeyOnlyFilter()); - scan.setRaw(true); - List results = Lists.newArrayList(); - try (RegionScanner scanner = region.getScanner(scan)) { - scanner.next(results); - } - for (Cell kv : results) { - KeyValue.Type type = Type.codeToType(kv.getTypeByte()); - if (type == Type.DeleteFamily) { // Row was deleted - Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - PTable table = newDeletedTableMarker(kv.getTimestamp()); - metaDataCache.put(cacheKey, table); - metricsSource.incrementMetadataCacheAddCount(); - metricsSource.incrementMetadataCacheUsedSize(table.getEstimatedSize()); - return table; - } - } - return null; + } + + private void invalidateServerMetadataCache(List requests) + throws Throwable { + if (!this.invalidateServerCacheEnabled) { + LOGGER.info("Skip invalidating server metadata cache since conf property" + + " phoenix.metadata.invalidate.cache.enabled is set to false"); + return; } - - - private PFunction buildDeletedFunction(byte[] key, ImmutableBytesPtr cacheKey, Region region, - long clientTimeStamp) throws IOException { - if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) { - return null; - } - - Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP); - scan.setFilter(new FirstKeyOnlyFilter()); - scan.setRaw(true); - List results = Lists.newArrayList(); - try (RegionScanner scanner = region.getScanner(scan);) { - scanner.next(results); - } - // HBase ignores the time range on a raw scan (HBASE-7362) - if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) { - Cell kv = results.get(0); - if (kv.getTypeByte() == Type.Delete.getCode()) { - Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - PFunction function = newDeletedFunctionMarker(kv.getTimestamp()); - metaDataCache.put(cacheKey, function); - metricsSource.incrementMetadataCacheAddCount(); - metricsSource.incrementMetadataCacheUsedSize(function.getEstimatedSize()); - return function; - } - } - return null; + try (PhoenixConnection connection = + getServerConnectionForMetaData(env.getConfiguration()).unwrap(PhoenixConnection.class)) { + ConnectionQueryServices queryServices = connection.getQueryServices(); + queryServices.invalidateServerMetadataCache(requests); } - - private PSchema buildDeletedSchema(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) - throws IOException { - if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) { - return null; - } - - Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP); - scan.setFilter(new FirstKeyOnlyFilter()); - scan.setRaw(true); - List results = Lists.newArrayList(); - try (RegionScanner scanner = region.getScanner(scan);) { - scanner.next(results); - } - // HBase ignores the time range on a raw scan (HBASE-7362) - if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) { - Cell kv = results.get(0); - if (kv.getTypeByte() == Type.Delete.getCode()) { - Cache metaDataCache = GlobalCache.getInstance(this.env) - .getMetaDataCache(); - PSchema schema = newDeletedSchemaMarker(kv.getTimestamp()); - metaDataCache.put(cacheKey, schema); - metricsSource.incrementMetadataCacheAddCount(); - metricsSource.incrementMetadataCacheUsedSize(schema.getEstimatedSize()); - return schema; - } - } - return null; + } + + private boolean hasInheritableTablePropertyChanged(PTable newTable, PTable oldTable) { + return !Objects.equals(newTable.getMaxLookbackAge(), oldTable.getMaxLookbackAge()); + } + + private void invalidateAllChildTablesAndIndexes(PTable table, List childViews) { + List invalidateList = new ArrayList(); + if (table.getIndexes() != null) { + for (PTable index : table.getIndexes()) { + invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(index))); + } } - - private static PTable newDeletedTableMarker(long timestamp) { - try { - return new PTableImpl.Builder() - .setType(PTableType.TABLE) - .setTimeStamp(timestamp) - .setPkColumns(Collections.emptyList()) - .setAllColumns(Collections.emptyList()) - .setFamilyAttributes(Collections.emptyList()) - .setRowKeySchema(RowKeySchema.EMPTY_SCHEMA) - .setIndexes(Collections.emptyList()) - .setPhysicalNames(Collections.emptyList()) - .build(); - } catch (SQLException e) { - // Should never happen - return null; + for (PTable childView : childViews) { + invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(childView))); + if (childView.getIndexes() != null) { + for (PTable viewIndex : childView.getIndexes()) { + invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(viewIndex))); } + } } - - private static PFunction newDeletedFunctionMarker(long timestamp) { - return new PFunction(timestamp); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + for (ImmutableBytesPtr invalidateKey : invalidateList) { + metaDataCache.invalidate(invalidateKey); } - - private static PSchema newDeletedSchemaMarker(long timestamp) { - return new PSchema(timestamp); + } + + /** + * Removes the table from the server side cache + */ + private void clearRemoteTableFromCache(long clientTimeStamp, byte[] schemaName, byte[] tableName) + throws SQLException { + // remove the parent table from the metadata cache as we just mutated the table + Properties props = new Properties(); + if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { + props.setProperty("CurrentSCN", Long.toString(clientTimeStamp)); } - - private static boolean isTableDeleted(PTable table) { - return table.getName() == null; + try ( + PhoenixConnection connection = getServerConnectionForMetaData(props, env.getConfiguration()) + .unwrap(PhoenixConnection.class)) { + ConnectionQueryServices queryServices = connection.getQueryServices(); + queryServices.clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY, schemaName, tableName, + clientTimeStamp); } - - private static boolean isSchemaDeleted(PSchema schema) { - return schema.getSchemaName() == null; + } + + // Checks whether a non-zero TTL value is being set. + private boolean validateTTLAttributeSettingForEntity(final List tableMetadata, + final byte[] ttlBytes) { + for (Mutation m : tableMetadata) { + if (m instanceof Put) { + Put p = (Put) m; + List cells = p.get(TABLE_FAMILY_BYTES, ttlBytes); + if (cells != null && cells.size() > 0) { + Cell cell = cells.get(0); + String newTTLStr = (String) PVarchar.INSTANCE.toObject(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength()); + int newTTL = Integer.parseInt(newTTLStr); + return newTTL != TTL_NOT_DEFINED; + } + } } + return false; + } - private static boolean isFunctionDeleted(PFunction function) { - return function.getFunctionName() == null; - } + public static class ColumnFinder extends StatelessTraverseAllExpressionVisitor { + private boolean columnFound; + private final Expression columnExpression; - private PTable loadTable(RegionCoprocessorEnvironment env, byte[] key, - ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp, int clientVersion) - throws IOException, SQLException { - Region region = env.getRegion(); - PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion); - // We always cache the latest version - fault in if not in cache - if (table != null || (table = buildTable(key, cacheKey, region, asOfTimeStamp, clientVersion)) != null) { - return table; - } - // if not found then check if newer table already exists and add delete marker for timestamp - // found - if (table == null - && (table = buildDeletedTable(key, cacheKey, region, clientTimeStamp)) != null) { - return table; - } - return null; + public ColumnFinder(Expression columnExpression) { + this.columnExpression = columnExpression; + columnFound = false; } - /** - * Returns a PTable if its found in the cache. - */ - private PTable getTableFromCache(ImmutableBytesPtr cacheKey, long clientTimeStamp, int clientVersion) { - Cache metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache(); - PTable table = (PTable) metaDataCache.getIfPresent(cacheKey); - if (table == null) { - metricsSource.incrementMetadataCacheMissCount(); - } else { - metricsSource.incrementMetadataCacheHitCount(); - } - return table; + private Void process(Expression expression) { + if (expression.equals(columnExpression)) { + columnFound = true; + } + return null; } - private PFunction loadFunction(RegionCoprocessorEnvironment env, byte[] key, - ImmutableBytesPtr cacheKey, long clientTimeStamp, long asOfTimeStamp, boolean isReplace, List deleteMutationsForReplace) - throws IOException, SQLException { - Region region = env.getRegion(); - Cache metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache(); - PFunction function = (PFunction) metaDataCache.getIfPresent(cacheKey); - // We always cache the latest version - fault in if not in cache - if (function != null && !isReplace) { - metricsSource.incrementMetadataCacheHitCount(); - return function; - } - metricsSource.incrementMetadataCacheMissCount(); - ArrayList arrayList = new ArrayList(1); - arrayList.add(key); - List functions = buildFunctions(arrayList, region, asOfTimeStamp, isReplace, deleteMutationsForReplace); - if (functions != null) return functions.get(0); - // if not found then check if newer table already exists and add delete marker for timestamp - // found - if (function == null - && (function = buildDeletedFunction(key, cacheKey, region, clientTimeStamp)) != null) { - return function; - } - return null; + @Override + public Void visit(KeyValueColumnExpression expression) { + return process(expression); } - private PSchema loadSchema(RegionCoprocessorEnvironment env, byte[] key, ImmutableBytesPtr cacheKey, - long clientTimeStamp, long asOfTimeStamp) throws IOException, SQLException { - Region region = env.getRegion(); - Cache metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache(); - PSchema schema = (PSchema) metaDataCache.getIfPresent(cacheKey); - // We always cache the latest version - fault in if not in cache - if (schema != null) { - metricsSource.incrementMetadataCacheHitCount(); - return schema; - } - metricsSource.incrementMetadataCacheMissCount(); - ArrayList arrayList = new ArrayList(1); - arrayList.add(key); - List schemas = buildSchemas(arrayList, region, asOfTimeStamp, cacheKey); - if (schemas != null) return schemas.get(0); - // if not found then check if newer schema already exists and add delete marker for timestamp - // found - if (schema == null - && (schema = buildDeletedSchema(key, cacheKey, region, clientTimeStamp)) != null) { - return schema; - } - return null; + @Override + public Void visit(RowKeyColumnExpression expression) { + return process(expression); } - /** - * @return null if the physical table row information is not present. - */ - private static void getParentAndPhysicalNames(List tableMetadata, byte[][] parentTenantSchemaTableNames, byte[][] physicalSchemaTableNames) { - int size = tableMetadata.size(); - byte[][] rowKeyMetaData = new byte[3][]; - MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); - Mutation physicalTableRow = null; - Mutation parentTableRow = null; - boolean physicalTableLinkFound = false; - boolean parentTableLinkFound = false; - if (size >= 2) { - int i = size - 1; - while (i >= 1) { - Mutation m = tableMetadata.get(i); - if (m instanceof Put) { - LinkType linkType = MetaDataUtil.getLinkType(m); - if (linkType == PHYSICAL_TABLE) { - physicalTableRow = m; - physicalTableLinkFound = true; - } - if (linkType == PARENT_TABLE) { - parentTableRow = m; - parentTableLinkFound = true; - } - } - if (physicalTableLinkFound && parentTableLinkFound) { - break; - } - i--; - } - } - if (!parentTableLinkFound) { - parentTenantSchemaTableNames[0] = null; - parentTenantSchemaTableNames[1] = null; - parentTenantSchemaTableNames[2] = null; - - } - if (!physicalTableLinkFound) { - physicalSchemaTableNames[0] = null; - physicalSchemaTableNames[1] = null; - physicalSchemaTableNames[2] = null; - } - if (physicalTableLinkFound) { - getSchemaTableNames(physicalTableRow, physicalSchemaTableNames); - } - if (parentTableLinkFound) { - getSchemaTableNames(parentTableRow, parentTenantSchemaTableNames); - } + @Override + public Void visit(ProjectedColumnExpression expression) { + return process(expression); } - private static void getSchemaTableNames(Mutation row, byte[][] schemaTableNames) { - byte[][] rowKeyMetaData = new byte[5][]; - getVarChars(row.getRow(), 5, rowKeyMetaData); - byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - byte[] colBytes = rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]; - byte[] famBytes = rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]; - if ((colBytes == null || colBytes.length == 0) && (famBytes != null && famBytes.length > 0)) { - byte[] sName = - SchemaUtil.getSchemaNameFromFullName(famBytes).getBytes(StandardCharsets.UTF_8); - byte[] tName = - SchemaUtil.getTableNameFromFullName(famBytes).getBytes(StandardCharsets.UTF_8); - schemaTableNames[0] = tenantId; - schemaTableNames[1] = sName; - schemaTableNames[2] = tName; - } + public boolean getColumnFound() { + return columnFound; } - - @Override - public void createTable(RpcController controller, CreateTableRequest request, - RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - byte[][] rowKeyMetaData = new byte[3][]; - byte[] schemaName = null; - byte[] tableName = null; - String fullTableName = null; - try { - int clientVersion = request.getClientVersion(); - List tableMetadata = ProtobufUtil.getMutations(request); - MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); - byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - fullTableName = SchemaUtil.getTableName(schemaName, tableName); - boolean isNamespaceMapped = MetaDataUtil.isNameSpaceMapped(tableMetadata, GenericKeyValueBuilder.INSTANCE, - new ImmutableBytesWritable()); - final IndexType indexType = MetaDataUtil.getIndexType(tableMetadata, GenericKeyValueBuilder.INSTANCE, - new ImmutableBytesWritable()); - byte[] parentSchemaName = null; - byte[] parentTableName = null; - PTable parentTable = request.hasParentTable() ? PTableImpl.createFromProto(request.getParentTable()) : null; - PTableType tableType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable()); - - // Load table to see if it already exists - byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName); - ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey); - long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); - boolean isChangeDetectionEnabled = MetaDataUtil.getChangeDetectionEnabled(tableMetadata); - - PTable table = null; - // Get as of latest timestamp so we can detect if we have a newer table that already - // exists without making an additional query - table = loadTable(env, tableKey, cacheKey, clientTimeStamp, HConstants.LATEST_TIMESTAMP, - clientVersion); - if (table != null) { - if (table.getTimeStamp() < clientTimeStamp) { - // If the table is older than the client time stamp and it's deleted, - // continue - if (!isTableDeleted(table)) { - builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - builder.setTable(PTableImpl.toProto(table)); - done.run(builder.build()); - return; - } - } else { - builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_TABLE_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - builder.setTable(PTableImpl.toProto(table)); - done.run(builder.build()); - return; - } - } - - // check if the table was previously dropped, but had child views that have not - // yet been cleaned up. - // Note that for old clients connecting to a 4.15 server whose metadata hasn't been - // upgraded, we disallow dropping a base table that has child views, so in that case - // this is a no-op (See PHOENIX-5544) - if (!Bytes.toString(schemaName).equals(QueryConstants.SYSTEM_SCHEMA_NAME)) { - ServerViewUtil.dropChildViews(env, tenantIdBytes, schemaName, tableName, - getSystemTableForChildLinks(clientVersion, env.getConfiguration()) - .getName()); - } - - byte[] parentTableKey = null; - Set indexes = new HashSet(); - ; - byte[] cPhysicalName = SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped) - .getBytes(); - byte[] cParentPhysicalName = null; - if (tableType == PTableType.VIEW) { - byte[][] parentSchemaTableNames = new byte[3][]; - byte[][] parentPhysicalSchemaTableNames = new byte[3][]; - getParentAndPhysicalNames(tableMetadata, parentSchemaTableNames, parentPhysicalSchemaTableNames); - if (parentPhysicalSchemaTableNames[2] != null) { - if (parentTable == null) { - // This is needed when we connect with a 4.14 client to - // a 4.15.0+ server. - // In that case we need to resolve the parent table on - // the server. - parentTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY, - parentPhysicalSchemaTableNames[1], - parentPhysicalSchemaTableNames[2], clientTimeStamp, clientVersion); - if (parentTable == null) { - builder.setReturnCode( - MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - if (parentSchemaTableNames[2] != null - && Bytes.compareTo(parentSchemaTableNames[2], - parentPhysicalSchemaTableNames[2]) != 0) { - // if view is created on view - byte[] tenantId = parentSchemaTableNames[0] == null - ? ByteUtil.EMPTY_BYTE_ARRAY - : parentSchemaTableNames[0]; - parentTable = doGetTable(tenantId, parentSchemaTableNames[1], - parentSchemaTableNames[2], clientTimeStamp, clientVersion); - if (parentTable == null) { - // it could be a global view - parentTable = doGetTable(ByteUtil.EMPTY_BYTE_ARRAY, - parentSchemaTableNames[1], parentSchemaTableNames[2], - clientTimeStamp, clientVersion); - } - } - if (parentTable == null) { - builder.setReturnCode( - MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - } - parentTableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, - parentPhysicalSchemaTableNames[1], parentPhysicalSchemaTableNames[2]); - cParentPhysicalName = parentTable.getPhysicalName().getBytes(); - for (PTable index : parentTable.getIndexes()) { - indexes.add(TableName.valueOf(index.getPhysicalName().getBytes())); - } - } else { - // Mapped View - cParentPhysicalName = SchemaUtil.getPhysicalHBaseTableName( - schemaName, tableName, isNamespaceMapped).getBytes(); - - } - parentSchemaName = parentPhysicalSchemaTableNames[1]; - parentTableName = parentPhysicalSchemaTableNames[2]; - - } else if (tableType == PTableType.INDEX) { - parentSchemaName = schemaName; - /* - * For an index we lock the parent table's row which could be a physical table or a view. - * If the parent table is a physical table, then the tenantIdBytes is empty because - * we allow creating an index with a tenant connection only if the parent table is a view. - */ - parentTableName = MetaDataUtil.getParentTableName(tableMetadata); - parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, parentSchemaName, parentTableName); - if (parentTable == null) { - // This is needed when we connect with a 4.14 client to a 4.15.0+ server. - // In that case we need to resolve the parent table on the server. - parentTable = - doGetTable(tenantIdBytes, parentSchemaName, parentTableName, clientTimeStamp, null, - request.getClientVersion()); - } - if (IndexType.LOCAL == indexType) { - cPhysicalName = parentTable.getPhysicalName().getBytes(); - cParentPhysicalName = parentTable.getPhysicalName().getBytes(); - } else if (parentTable.getType() == PTableType.VIEW) { - // The view index physical table name is constructed from logical name of base table. - // For example, _IDX_SC.TBL1 is the view index name and SC.TBL1 is the logical name of the base table. - String namepaceMappedParentLogicalName = MetaDataUtil.getNamespaceMappedName(parentTable.getBaseTableLogicalName(), isNamespaceMapped); - cPhysicalName = MetaDataUtil.getViewIndexPhysicalName(namepaceMappedParentLogicalName.getBytes(StandardCharsets.UTF_8)); - cParentPhysicalName = parentTable.getPhysicalName().getBytes(); - } else { - cParentPhysicalName = SchemaUtil - .getPhysicalHBaseTableName(parentSchemaName, parentTableName, isNamespaceMapped).getBytes(); - } - } - - getCoprocessorHost().preCreateTable(Bytes.toString(tenantIdBytes), - fullTableName, - (tableType == PTableType.VIEW) ? null : TableName.valueOf(cPhysicalName), - cParentPhysicalName == null ? null : TableName.valueOf(cParentPhysicalName), tableType, - /* TODO: During inital create we may not need the family map */ - Collections.emptySet(), indexes); - - Region region = env.getRegion(); - List locks = Lists.newArrayList(); - // Place a lock using key for the table to be created - try { - acquireLock(region, tableKey, locks, false); - - // If the table key resides outside the region, return without doing anything - MetaDataMutationResult result = checkTableKeyInRegion(tableKey, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - - if (parentTableName != null) { - // From 4.15 onwards we only need to lock the parent table : - // 1) when creating an index on a table or a view - // 2) if allowSplittableSystemCatalogRollback is true we try to lock the parent table to prevent it - // from changing concurrently while a view is being created - if (tableType == PTableType.INDEX || allowSplittableSystemCatalogRollback) { - result = checkTableKeyInRegion(parentTableKey, region); - if (result != null) { - LOGGER.error("Unable to lock parentTableKey " + Bytes.toStringBinary(parentTableKey)); - // if allowSplittableSystemCatalogRollback is true and we can't lock the parentTableKey (because - // SYSTEM.CATALOG already split) return UNALLOWED_TABLE_MUTATION so that the client - // knows the create statement failed - MetaDataProtos.MutationCode code = tableType == PTableType.INDEX ? - MetaDataProtos.MutationCode.TABLE_NOT_IN_REGION : - MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION; - builder.setReturnCode(code); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - acquireLock(region, parentTableKey, locks, false); - } - // make sure we haven't gone over our threshold for indexes on this table. - if (execeededIndexQuota(tableType, parentTable)) { - builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - } - - // Add cell for ROW_KEY_ORDER_OPTIMIZABLE = true, as we know that new tables - // conform the correct row key. The exception is for a VIEW, which the client - // sends over depending on its base physical table. - if (tableType != PTableType.VIEW) { - UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, tableKey, clientTimeStamp); - } - // If the parent table of the view has the auto partition sequence name attribute, modify the - // tableMetadata and set the view statement and partition column correctly - if (parentTable != null && parentTable.getAutoPartitionSeqName() != null) { - long autoPartitionNum = 1; - try (PhoenixConnection connection = getServerConnectionForMetaData( - env.getConfiguration()).unwrap(PhoenixConnection.class); - Statement stmt = connection.createStatement()) { - String seqName = parentTable.getAutoPartitionSeqName(); - // Not going through the standard route of using statement.execute() as that code path - // is blocked if the metadata hasn't been been upgraded to the new minor release. - String seqNextValueSql = String.format("SELECT NEXT VALUE FOR %s", seqName); - PhoenixStatement ps = stmt.unwrap(PhoenixStatement.class); - QueryPlan plan = ps.compileQuery(seqNextValueSql); - ResultIterator resultIterator = plan.iterator(); - PhoenixResultSet rs = ps.newResultSet(resultIterator, plan.getProjector(), plan.getContext()); - rs.next(); - autoPartitionNum = rs.getLong(1); - } catch (SequenceNotFoundException e) { - builder.setReturnCode(MetaDataProtos.MutationCode.AUTO_PARTITION_SEQUENCE_NOT_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - PColumn autoPartitionCol = parentTable.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parentTable)); - if (!PLong.INSTANCE.isCoercibleTo(autoPartitionCol.getDataType(), autoPartitionNum)) { - builder.setReturnCode(MetaDataProtos.MutationCode.CANNOT_COERCE_AUTO_PARTITION_ID); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - builder.setAutoPartitionNum(autoPartitionNum); - - // set the VIEW STATEMENT column of the header row - Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata); - NavigableMap> familyCellMap = tableHeaderPut.getFamilyCellMap(); - List cells = familyCellMap.get(TABLE_FAMILY_BYTES); - Cell cell = cells.get(0); - String autoPartitionWhere = QueryUtil.getViewPartitionClause(MetaDataUtil.getAutoPartitionColumnName(parentTable), autoPartitionNum); - String hbaseVersion = VersionInfo.getVersion(); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - KeyValueBuilder kvBuilder = KeyValueBuilder.get(hbaseVersion); - MetaDataUtil.getMutationValue(tableHeaderPut, VIEW_STATEMENT_BYTES, kvBuilder, ptr); - byte[] value = ptr.copyBytesIfNecessary(); - byte[] viewStatement = null; - // if we have an existing where clause add the auto partition where clause to it - if (!Bytes.equals(value, QueryConstants.EMPTY_COLUMN_VALUE_BYTES)) { - viewStatement = Bytes.add(value, Bytes.toBytes(" AND "), Bytes.toBytes(autoPartitionWhere)); - } else { - viewStatement = Bytes.toBytes(QueryUtil.getViewStatement(parentTable.getSchemaName().getString(), parentTable.getTableName().getString(), autoPartitionWhere)); - } - Cell viewStatementCell = - PhoenixKeyValueUtil.newKeyValue(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), - cell.getFamilyOffset(), cell.getFamilyLength(), - VIEW_STATEMENT_BYTES, 0, VIEW_STATEMENT_BYTES.length, - cell.getTimestamp(), viewStatement, 0, viewStatement.length, - cell.getType()); - cells.add(viewStatementCell); - - // set the IS_VIEW_REFERENCED column of the auto partition column row - Put autoPartitionPut = MetaDataUtil.getPutOnlyAutoPartitionColumn(parentTable, tableMetadata); - familyCellMap = autoPartitionPut.getFamilyCellMap(); - cells = familyCellMap.get(TABLE_FAMILY_BYTES); - cell = cells.get(0); - PDataType dataType = autoPartitionCol.getDataType(); - Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE); - byte[] bytes = new byte[dataType.getByteSize() + 1]; - dataType.toBytes(val, bytes, 0); - Cell viewConstantCell = - PhoenixKeyValueUtil.newKeyValue(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), - cell.getFamilyOffset(), cell.getFamilyLength(), - VIEW_CONSTANT_BYTES, 0, VIEW_CONSTANT_BYTES.length, - cell.getTimestamp(), bytes, 0, bytes.length, cell.getType()); - cells.add(viewConstantCell); - } - Long indexId = null; - if (request.hasAllocateIndexId() && request.getAllocateIndexId()) { - String tenantIdStr = tenantIdBytes.length == 0 ? null : Bytes.toString(tenantIdBytes); - try (PhoenixConnection connection = getServerConnectionForMetaData( - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - PName physicalName = parentTable.getPhysicalName(); - long seqValue = getViewIndexSequenceValue(connection, tenantIdStr, parentTable); - Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata); - NavigableMap> familyCellMap = tableHeaderPut.getFamilyCellMap(); - List cells = familyCellMap.get(TABLE_FAMILY_BYTES); - Cell cell = cells.get(0); - PDataType dataType = MetaDataUtil.getIndexDataType(tableMetadata, - GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable()); - Object val = dataType.toObject(seqValue, PLong.INSTANCE); - byte[] bytes = new byte[dataType.getByteSize() + 1]; - dataType.toBytes(val, bytes, 0); - Cell indexIdCell = - PhoenixKeyValueUtil.newKeyValue(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength(), - cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength(), VIEW_INDEX_ID_BYTES, 0, - VIEW_INDEX_ID_BYTES.length, cell.getTimestamp(), bytes, 0, - bytes.length, cell.getType()); - cells.add(indexIdCell); - indexId = seqValue; - } - } - - // The mutations to create a table are written in the following order: - // 1. Write the child link as if the next two steps fail we - // ignore missing children while processing a parent - // (this is already done at this point, as a separate client-server RPC - // to the ChildLinkMetaDataEndpoint coprocessor) - // 2. Update the encoded column qualifier for the parent table if its on a - // different region server (for tables that use column qualifier encoding) - // if the next step fails we end up wasting a few col qualifiers - // 3. Finally write the mutations to create the table - - if (tableType == PTableType.VIEW) { - // If we are connecting with an old client to a server that has new metadata - // i.e. it was previously connected to by a 4.15 client, then the client will - // also send the parent->child link metadata to SYSTEM.CATALOG rather than using - // the new ChildLinkMetaDataEndpoint coprocessor. In this case, we must continue - // doing the server-server RPC to send these mutations to SYSTEM.CHILD_LINK. - if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && - getSystemTableForChildLinks(clientVersion, env.getConfiguration()).equals( - SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, - env.getConfiguration()))) { - List childLinkMutations = - MetaDataUtil.removeChildLinkMutations(tableMetadata); - MetaDataResponse response = - processRemoteRegionMutations( - PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, - childLinkMutations, UNABLE_TO_CREATE_CHILD_LINK); - if (response != null) { - done.run(response); - return; - } - } - // Pass in the parent's PTable so that we only tag cells corresponding to the - // view's property in case they are different from the parent - ViewUtil.addTagsToPutsForViewAlteredProperties(tableMetadata, parentTable, - (ExtendedCellBuilder)env.getCellBuilder()); - } - //set the last DDL timestamp to the current server time since we're creating the - // table/index/views. - tableMetadata.add(MetaDataUtil.getLastDDLTimestampUpdate(tableKey, - clientTimeStamp, EnvironmentEdgeManager.currentTimeMillis())); - if (tableType == INDEX) { - // Invalidate the cache on each regionserver for parent table/view. - List requests = new ArrayList<>(); - requests.add(new InvalidateServerMetadataCacheRequest(tenantIdBytes, - parentSchemaName, parentTableName)); - invalidateServerMetadataCache(requests); - long currentTimestamp = EnvironmentEdgeManager.currentTimeMillis(); - // If table type is index, then update the last ddl timestamp of the parent - // table or immediate parent view. - tableMetadata.add(MetaDataUtil.getLastDDLTimestampUpdate(parentTableKey, - currentTimestamp, currentTimestamp)); - } - - //and if we're doing change detection on this table or view, notify the - //external schema registry and get its schema id - if (isChangeDetectionEnabled) { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - try { - exportSchema(tableMetadata, tableKey, clientTimeStamp, clientVersion, null); - metricsSource.incrementCreateExportCount(); - metricsSource.updateCreateExportTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); - } catch (IOException ie){ - metricsSource.incrementCreateExportFailureCount(); - metricsSource.updateCreateExportFailureTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); - //If we fail to write to the schema registry, fail the entire - //CREATE TABLE or VIEW operation so we stay consistent - LOGGER.error("Error writing schema to external schema registry", ie); - builder.setReturnCode( - MetaDataProtos.MutationCode.ERROR_WRITING_TO_SCHEMA_REGISTRY); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - } - - // When we drop a view we first drop the view metadata and then drop the parent->child linking row - List localMutations = - Lists.newArrayListWithExpectedSize(tableMetadata.size()); - List remoteMutations = Lists.newArrayListWithExpectedSize(2); - // check to see if there are any mutations that should not be applied to this region - separateLocalAndRemoteMutations(region, tableMetadata, localMutations, remoteMutations); - if (!remoteMutations.isEmpty()) { - // there should only be remote mutations if we are creating a view that uses - // encoded column qualifiers (the remote mutations are to update the encoded - // column qualifier counter on the parent table) - if (parentTable != null && tableType == PTableType.VIEW && parentTable - .getEncodingScheme() != QualifierEncodingScheme.NON_ENCODED_QUALIFIERS) { - // TODO: Avoid doing server-server RPC when we have held row locks - MetaDataResponse response = - processRemoteRegionMutations( - SYSTEM_CATALOG_NAME_BYTES, - remoteMutations, MetaDataProtos.MutationCode.UNABLE_TO_UPDATE_PARENT_TABLE); - clearRemoteTableFromCache(clientTimeStamp, - parentTable.getSchemaName() != null - ? parentTable.getSchemaName().getBytes() - : ByteUtil.EMPTY_BYTE_ARRAY, - parentTable.getTableName().getBytes()); - if (response != null) { - done.run(response); - return; - } - } else { - String msg = "Found unexpected mutations while creating " + fullTableName; - LOGGER.error(msg); - for (Mutation m : remoteMutations) { - LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow())); - LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap()); - } - throw new IllegalStateException(msg); - } - } - - // TODO: Switch this to HRegion#batchMutate when we want to support indexes on the - // system table. Basically, we get all the locks that we don't already hold for all the - // tableMetadata rows. This ensures we don't have deadlock situations (ensuring - // primary and then index table locks are held, in that order). For now, we just don't support - // indexing on the system table. This is an issue because of the way we manage batch mutation - // in the Indexer. - mutateRowsWithLocks(this.accessCheckEnabled, region, localMutations, Collections.emptySet(), - HConstants.NO_NONCE, HConstants.NO_NONCE); - - // Invalidate the cache - the next getTable call will add it - // TODO: consider loading the table that was just created here, patching up the parent table, and updating the cache - Cache metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache(); - if (parentTableKey != null) { - metaDataCache.invalidate(new ImmutableBytesPtr(parentTableKey)); - } - metaDataCache.invalidate(cacheKey); - // Get timeStamp from mutations - the above method sets it if it's unset - long currentTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); - builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); - if (indexId != null) { - builder.setViewIndexId(indexId); - builder.setViewIndexIdType(PLong.INSTANCE.getSqlType()); - } - builder.setMutationTime(currentTimeStamp); - //send the newly built table back because we generated the DDL timestamp server - // side and the client doesn't have it. - if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { - // if a client uses a connection with currentSCN=t to create the table, - // the table is created with timestamp 't' but the timestamp range in the scan - // used by buildTable does not include 't' due to how SCN is implemented. - clientTimeStamp += 1; - } - PTable newTable = buildTable(tableKey, cacheKey, region, - clientTimeStamp, clientVersion); - if (newTable != null) { - builder.setTable(PTableImpl.toProto(newTable)); - } - - done.run(builder.build()); - - updateCreateTableDdlSuccessMetrics(tableType); - LOGGER.info("{} created successfully, tableName: {}", tableType, fullTableName); - } finally { - ServerUtil.releaseRowLocks(locks); - } - } catch (Throwable t) { - LOGGER.error("createTable failed", t); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException(fullTableName, t)); - } + } + + @Override + public void addColumn(RpcController controller, final AddColumnRequest request, + RpcCallback done) { + try { + List tableMetaData = ProtobufUtil.getMutations(request); + PTable parentTable = + request.hasParentTable() ? PTableImpl.createFromProto(request.getParentTable()) : null; + PTable transformingNewTable = request.hasTransformingNewTable() + ? PTableImpl.createFromProto(request.getTransformingNewTable()) + : null; + boolean addingColumns = request.getAddingColumns(); + MetaDataMutationResult result = mutateColumn(tableMetaData, new AddColumnMutator(), + request.getClientVersion(), parentTable, transformingNewTable, addingColumns); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + + if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) { + metricsSource.incrementAlterAddColumnCount(); + LOGGER.info("Column(s) added successfully, tableName: {}", + result.getTable().getTableName()); + } + } + } catch (Throwable e) { + LOGGER.error("Add column failed: ", e); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException("Error when adding column: ", e)); } - - private void updateCreateTableDdlSuccessMetrics(PTableType tableType) { - if (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) { - metricsSource.incrementCreateTableCount(); - } else if (tableType == PTableType.VIEW) { - metricsSource.incrementCreateViewCount(); - } else if (tableType == PTableType.INDEX) { - metricsSource.incrementCreateIndexCount(); - } + } + + private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName, + long clientTimeStamp, int clientVersion) throws IOException, SQLException { + return doGetTable(tenantId, schemaName, tableName, clientTimeStamp, null, clientVersion); + } + + /** + * Looks up the table locally if its present on this region. + */ + private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName, + long clientTimeStamp, RowLock rowLock, int clientVersion) throws IOException, SQLException { + Region region = env.getRegion(); + final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); + // if this region doesn't contain the metadata rows then fail + if (!region.getRegionInfo().containsRow(key)) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_ERROR) + .setSchemaName(Bytes.toString(schemaName)).setTableName(Bytes.toString(tableName)).build() + .buildException(); } - private void exportSchema(List tableMetadata, byte[] tableKey, long clientTimestamp, - int clientVersion, PTable oldTable) throws SQLException, IOException { - List tableCellList = MetaDataUtil.getTableCellsFromMutations(tableMetadata); - - List> allColumnsCellList = MetaDataUtil.getColumnAndLinkCellsFromMutations(tableMetadata); - //getTableFromCells assumes the Cells are sorted as they would be when reading from HBase - Collections.sort(tableCellList, KeyValue.COMPARATOR); - for (List columnCellList : allColumnsCellList) { - Collections.sort(columnCellList, KeyValue.COMPARATOR); - } - - PTable newTable = getTableFromCells(tableCellList, allColumnsCellList, clientTimestamp, - clientVersion, oldTable); - PTable parentTable = null; - //if this is a view, we need to get the columns from its parent table / view - if (newTable != null && newTable.getType().equals(PTableType.VIEW)) { - // TODO why creating generic connection and not getConnectionOnServer? - try (PhoenixConnection conn = (PhoenixConnection) - ConnectionUtil.getInputConnection(env.getConfiguration())) { - newTable = ViewUtil.addDerivedColumnsAndIndexesFromAncestors(conn, newTable); - } - } - Configuration conf = env.getConfiguration(); - SchemaRegistryRepository exporter = SchemaRegistryRepositoryFactory. - getSchemaRegistryRepository(conf); - if (exporter != null) { - SchemaWriter schemaWriter = SchemaWriterFactory.getSchemaWriter(conf); - //we export to an external schema registry, then put the schema id - //to lookup the schema in the registry into SYSTEM.CATALOG so we - //can look it up later (and use it in WAL annotations) - - //Note that if we succeed here but the write to SYSTEM.CATALOG fails, - //we can have "orphaned" rows in the schema registry because there's - //no way to make this fully atomic. - String externalSchemaId = - exporter.exportSchema(schemaWriter, newTable); - tableMetadata.add(MetaDataUtil.getExternalSchemaIdUpdate(tableKey, - externalSchemaId)); - + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); + // Ask Lars about the expense of this call - if we don't take the lock, we still won't get + // partial results + // get the co-processor environment + // TODO: check that key is within region.getStartKey() and region.getEndKey() + // and return special code to force client to lookup region from meta. + /* + * Lock directly on key, though it may be an index table. This will just prevent a table from + * getting rebuilt too often. + */ + final boolean wasLocked = (rowLock != null); + try { + if (!wasLocked) { + rowLock = acquireLock(region, key, null, true); + } + PTable table = + getTableFromCacheWithModifiedIndexState(clientTimeStamp, clientVersion, cacheKey); + // We only cache the latest, so we'll end up building the table with every call if the + // client connection has specified an SCN. + // TODO: If we indicate to the client that we're returning an older version, but there's a + // newer version available, the client + // can safely not call this, since we only allow modifications to the latest. + if (table != null && table.getTimeStamp() < clientTimeStamp) { + // Table on client is up-to-date with table on server, so just return + if (isTableDeleted(table)) { + return null; } + return table; + } + // take Phoenix row level write-lock as we need to protect metadata cache update + // after scanning SYSTEM.CATALOG to retrieve the PTable object + LockManager.RowLock phoenixRowLock = + lockManager.lockRow(key, this.metadataCacheRowLockTimeout); + try { + table = getTableFromCacheWithModifiedIndexState(clientTimeStamp, clientVersion, cacheKey); + if (table != null && table.getTimeStamp() < clientTimeStamp) { + if (isTableDeleted(table)) { + return null; + } + return table; + } + // Query for the latest table first, since it's not cached + table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion); + if ( + (table != null && table.getTimeStamp() <= clientTimeStamp) + || (blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0) + ) { + return table; + } + // Otherwise, query for an older version of the table - it won't be cached + table = buildTable(key, cacheKey, region, clientTimeStamp, clientVersion); + return table; + } finally { + phoenixRowLock.release(); + } + } finally { + if (!wasLocked && rowLock != null) { + rowLock.release(); + } } - - private long getViewIndexSequenceValue(PhoenixConnection connection, String tenantIdStr, PTable parentTable) throws SQLException { - int nSequenceSaltBuckets = connection.getQueryServices().getSequenceSaltBuckets(); - // parentTable is parent of the view index which is the view. View table name is _IDX_+logical name of base table - // Since parent is the view, the parentTable.getBaseTableLogicalName() returns the logical full name of the base table - PName parentName = parentTable.getBaseTableLogicalName(); - if (parentName == null) { - parentName = SchemaUtil.getPhysicalHBaseTableName(parentTable.getSchemaName(), parentTable.getTableName(), parentTable.isNamespaceMapped()); + } + + private PTable getTableFromCacheWithModifiedIndexState(long clientTimeStamp, int clientVersion, + ImmutableBytesPtr cacheKey) throws SQLException { + PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion); + table = modifyIndexStateForOldClient(clientVersion, table); + return table; + } + + private List doGetFunctions(List keys, long clientTimeStamp) + throws IOException, SQLException { + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + Region region = env.getRegion(); + Collections.sort(keys, new Comparator() { + @Override + public int compare(byte[] o1, byte[] o2) { + return Bytes.compareTo(o1, o2); + } + }); + /* + * Lock directly on key, though it may be an index table. This will just prevent a table from + * getting rebuilt too often. + */ + List rowLocks = new ArrayList(keys.size()); + ; + try { + for (int i = 0; i < keys.size(); i++) { + acquireLock(region, keys.get(i), rowLocks, true); + } + + List functionsAvailable = new ArrayList(keys.size()); + int numFunctions = keys.size(); + Iterator iterator = keys.iterator(); + while (iterator.hasNext()) { + byte[] key = iterator.next(); + PFunction function = (PFunction) metaDataCache.getIfPresent(new FunctionBytesPtr(key)); + if (function == null) { + metricsSource.incrementMetadataCacheMissCount(); + } else { + metricsSource.incrementMetadataCacheHitCount(); } - SequenceKey key = MetaDataUtil.getViewIndexSequenceKey(tenantIdStr, parentName, - nSequenceSaltBuckets, parentTable.isNamespaceMapped()); - // Earlier sequence was created at (SCN-1/LATEST_TIMESTAMP) and incremented at the client max(SCN,dataTable.getTimestamp), but it seems we should - // use always LATEST_TIMESTAMP to avoid seeing wrong sequence values by different connection having SCN - // or not. - long sequenceTimestamp = HConstants.LATEST_TIMESTAMP; - try { - connection.getQueryServices().createSequence(key.getTenantId(), key.getSchemaName(), key.getSequenceName(), - Short.MIN_VALUE, 1, 1, Long.MIN_VALUE, Long.MAX_VALUE, false, sequenceTimestamp); - } catch (SequenceAlreadyExistsException e) { - //someone else got here first and created the sequence, or it was pre-existing. Not a problem. + if (function != null && function.getTimeStamp() < clientTimeStamp) { + if (isFunctionDeleted(function)) { + return null; + } + functionsAvailable.add(function); + iterator.remove(); } + } + if (functionsAvailable.size() == numFunctions) return functionsAvailable; - - long[] seqValues = new long[1]; - SQLException[] sqlExceptions = new SQLException[1]; - connection.getQueryServices().incrementSequences(Collections.singletonList(new SequenceAllocation(key, 1)), - HConstants.LATEST_TIMESTAMP, seqValues, sqlExceptions); - if (sqlExceptions[0] != null) { - throw sqlExceptions[0]; - } - return seqValues[0]; + // Query for the latest table first, since it's not cached + List buildFunctions = + buildFunctions(keys, region, clientTimeStamp, false, Collections. emptyList()); + if (buildFunctions == null || buildFunctions.isEmpty()) { + return null; + } + functionsAvailable.addAll(buildFunctions); + if (functionsAvailable.size() == numFunctions) return functionsAvailable; + return null; + } finally { + ServerUtil.releaseRowLocks(rowLocks); } - - private boolean execeededIndexQuota(PTableType tableType, PTable parentTable) { - return PTableType.INDEX == tableType && parentTable.getIndexes().size() >= maxIndexesPerTable; + } + + @Override + public void dropColumn(RpcController controller, final DropColumnRequest request, + RpcCallback done) { + List tableMetaData = null; + try { + tableMetaData = ProtobufUtil.getMutations(request); + PTable parentTable = + request.hasParentTable() ? PTableImpl.createFromProto(request.getParentTable()) : null; + MetaDataMutationResult result = + mutateColumn(tableMetaData, new DropColumnMutator(env.getConfiguration()), + request.getClientVersion(), parentTable, null, true); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + + if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) { + metricsSource.incrementAlterDropColumnCount(); + LOGGER.info("Column(s) dropped successfully, tableName: {}", + result.getTable().getTableName()); + } + } + } catch (Throwable e) { + LOGGER.error("Drop column failed: ", e); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException("Error when dropping column: ", e)); } - - private void separateLocalAndRemoteMutations(Region region, List mutations, - List localMutations, List remoteMutations) { - RegionInfo regionInfo = region.getRegionInfo(); - for (Mutation mutation : mutations) { - if (regionInfo.containsRow(mutation.getRow())) { - localMutations.add(mutation); - } else { - remoteMutations.add(mutation); - } - } + } + + private MetaDataMutationResult dropIndexes(RegionCoprocessorEnvironment env, PTable table, + List invalidateList, List locks, long clientTimeStamp, + List tableMetaData, PColumn columnToDelete, List tableNamesToDelete, + List sharedTablesToDelete, int clientVersion) throws Throwable { + // Look for columnToDelete in any indexes. If found as PK column, get lock and drop the + // index and then invalidate it + // Covered columns are deleted from the index by the client + Region region = env.getRegion(); + PhoenixConnection connection = table.getIndexes().isEmpty() + ? null + : getServerConnectionForMetaData(env.getConfiguration()).unwrap(PhoenixConnection.class); + for (PTable index : table.getIndexes()) { + // ignore any indexes derived from ancestors + if (index.getName().getString().contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { + continue; + } + byte[] tenantId = + index.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : index.getTenantId().getBytes(); + IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection); + byte[] indexKey = SchemaUtil.getTableKey(tenantId, index.getSchemaName().getBytes(), + index.getTableName().getBytes()); + Pair columnToDeleteInfo = new Pair<>( + columnToDelete.getFamilyName().getString(), columnToDelete.getName().getString()); + ColumnReference colDropRef = new ColumnReference(columnToDelete.getFamilyName().getBytes(), + columnToDelete.getColumnQualifierBytes()); + boolean isColumnIndexed = indexMaintainer.getIndexedColumnInfo().contains(columnToDeleteInfo); + boolean isCoveredColumn = indexMaintainer.getCoveredColumns().contains(colDropRef); + // If index requires this column for its pk, then drop it + if (isColumnIndexed) { + // Drop the index table. The doDropTable will expand + // this to all of the table rows and invalidate the + // index table + Delete delete = new Delete(indexKey, clientTimeStamp); + byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantId, table.getSchemaName().getBytes(), + table.getTableName().getBytes(), index.getTableName().getBytes()); + // Drop the link between the parent table and the + // index table + Delete linkDelete = new Delete(linkKey, clientTimeStamp); + tableMetaData.add(delete); + tableMetaData.add(linkDelete); + // Since we're dropping the index, lock it to ensure + // that a change in index state doesn't + // occur while we're dropping it. + acquireLock(region, indexKey, locks, false); + // invalidate server metadata cache when dropping index + List requests = new ArrayList<>(); + requests.add(new InvalidateServerMetadataCacheRequest(tenantId, + index.getSchemaName().getBytes(), index.getTableName().getBytes())); + + invalidateServerMetadataCache(requests); + List childLinksMutations = Lists.newArrayList(); + MetaDataMutationResult result = doDropTable(indexKey, tenantId, + index.getSchemaName().getBytes(), index.getTableName().getBytes(), + table.getName().getBytes(), index.getType(), tableMetaData, childLinksMutations, + invalidateList, tableNamesToDelete, sharedTablesToDelete, clientVersion); + if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + return result; + } + metricsSource.incrementDropIndexCount(); + LOGGER.info("INDEX dropped successfully, tableName: {}", result.getTable().getTableName()); + + // there should be no child links to delete since we are just dropping an index + if (!childLinksMutations.isEmpty()) { + LOGGER.error( + "Found unexpected child link mutations while dropping an index " + childLinksMutations); + } + invalidateList.add(new ImmutableBytesPtr(indexKey)); + } + // If the dropped column is a covered index column, invalidate the index + else if (isCoveredColumn) { + invalidateList.add(new ImmutableBytesPtr(indexKey)); + } } - - @Override - public void dropTable(RpcController controller, DropTableRequest request, - RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - boolean isCascade = request.getCascade(); - byte[][] rowKeyMetaData = new byte[3][]; - String tableType = request.getTableType(); - byte[] schemaName = null; - byte[] tableOrViewName = null; - boolean dropTableStats = false; - final int clientVersion = request.getClientVersion(); - try { - List tableMetadata = ProtobufUtil.getMutations(request); - List childLinkMutations = Lists.newArrayList(); - MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); - byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - tableOrViewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - String fullTableName = SchemaUtil.getTableName(schemaName, tableOrViewName); - PTableType pTableType = PTableType.fromSerializedValue(tableType); - // Disallow deletion of a system table - if (pTableType == PTableType.SYSTEM) { - builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - - List tableNamesToDelete = Lists.newArrayList(); - List sharedTablesToDelete = Lists.newArrayList(); - - byte[] lockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableOrViewName); - Region region = env.getRegion(); - MetaDataMutationResult result = checkTableKeyInRegion(lockKey, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - - byte[] parentTableName = MetaDataUtil.getParentTableName(tableMetadata); - byte[] parentLockKey = null; - // Only lock parent table for indexes - if (parentTableName != null && pTableType == PTableType.INDEX) { - parentLockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, parentTableName); - result = checkTableKeyInRegion(parentLockKey, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - } - - long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); - PTable loadedTable = doGetTable(tenantIdBytes, schemaName, tableOrViewName, - clientTimeStamp, null, request.getClientVersion()); - if (loadedTable == null) { - builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - getCoprocessorHost().preDropTable(Bytes.toString(tenantIdBytes), - SchemaUtil.getTableName(schemaName, tableOrViewName), - TableName.valueOf(loadedTable.getPhysicalName().getBytes()), - getParentPhysicalTableName(loadedTable), pTableType, loadedTable.getIndexes()); - - if (pTableType == PTableType.TABLE || pTableType == PTableType.VIEW) { - // check to see if the table has any child views - try (Table hTable = ServerUtil.getHTableForCoprocessorScan(env, - getSystemTableForChildLinks(clientVersion, env.getConfiguration()))) { - // This call needs to be done before acquiring the row lock on the header row - // for the table/view being dropped, otherwise the calls to resolve its child - // views via PhoenixRuntime.getTableNoCache() will deadlock since this call - // itself needs to get the parent table which needs to acquire a write lock - // on the same header row - Pair, List> descendantViews = - findAllDescendantViews(hTable, env.getConfiguration(), - tenantIdBytes, schemaName, tableOrViewName, clientTimeStamp, - true); - List legitimateChildViews = descendantViews.getFirst(); - List orphanChildViews = descendantViews.getSecond(); - if (!legitimateChildViews.isEmpty()) { - if (!isCascade) { - LOGGER.error("DROP without CASCADE on tables or views with child views " - + "is not permitted"); - // DROP without CASCADE on tables/views with child views is - // not permitted - builder.setReturnCode( - MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && - !SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, - env.getConfiguration()).equals(hTable.getName())) { - // (See PHOENIX-5544) For an old client connecting to a non-upgraded - // server, we disallow dropping a base table/view that has child views. - LOGGER.error("Dropping a table or view that has child views is " - + "not permitted for old clients connecting to a new server " - + "with old metadata (even if CASCADE is provided). " - + "Please upgrade the client at least to " - + MIN_SPLITTABLE_SYSTEM_CATALOG_VERSION); - builder.setReturnCode( - MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - } - - // If the CASCADE option is provided and we have at least one legitimate/orphan - // view stemming from this parent and the client is 4.15+ (or older but - // connecting to an upgraded server), we use the SYSTEM.TASK table to - // asynchronously drop child views - if (isCascade && !(legitimateChildViews.isEmpty() && orphanChildViews.isEmpty()) - && (clientVersion >= MIN_SPLITTABLE_SYSTEM_CATALOG || - SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, - env.getConfiguration()).equals(hTable.getName()))) { - try (PhoenixConnection conn = getServerConnectionForMetaData( - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder() - .setConn(conn) - .setTaskType(PTable.TaskType.DROP_CHILD_VIEWS) - .setTenantId(Bytes.toString(tenantIdBytes)) - .setSchemaName(Bytes.toString(schemaName)) - .setTableName(Bytes.toString(tableOrViewName)) - .setTaskStatus( - PTable.TaskStatus.CREATED.toString()) - .setData(null) - .setPriority(null) - .setStartTs(null) - .setEndTs(null) - .setAccessCheckEnabled(this.accessCheckEnabled) - .build()); - } catch (Throwable t) { - LOGGER.error("Adding a task to drop child views failed!", t); - } - } - } - } - - List locks = Lists.newArrayList(); - try { - acquireLock(region, lockKey, locks, false); - if (parentLockKey != null) { - acquireLock(region, parentLockKey, locks, false); - } - List requests = new ArrayList<>(); - requests.add(new InvalidateServerMetadataCacheRequest(tenantIdBytes, schemaName, - tableOrViewName)); - if (pTableType == INDEX) { - requests.add(new InvalidateServerMetadataCacheRequest(tenantIdBytes, schemaName, - parentTableName)); - long currentTimestamp = EnvironmentEdgeManager.currentTimeMillis(); - // If table type is index, then update the last ddl timestamp of the parent - // table or immediate parent view. - tableMetadata.add(MetaDataUtil.getLastDDLTimestampUpdate(parentLockKey, - currentTimestamp, currentTimestamp)); - } - invalidateServerMetadataCache(requests); - List invalidateList = new ArrayList<>(); - result = doDropTable(lockKey, tenantIdBytes, schemaName, tableOrViewName, - parentTableName, PTableType.fromSerializedValue(tableType), tableMetadata, - childLinkMutations, invalidateList, tableNamesToDelete, - sharedTablesToDelete, request.getClientVersion()); - if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - - List localMutations = - Lists.newArrayListWithExpectedSize(tableMetadata.size()); - List remoteMutations = Lists.newArrayList(); - separateLocalAndRemoteMutations(region, tableMetadata, localMutations, - remoteMutations); - if (!remoteMutations.isEmpty()) { - // while dropping a table all the mutations should be local - String msg = "Found unexpected mutations while dropping table " - + SchemaUtil.getTableName(schemaName, tableOrViewName); - LOGGER.error(msg); - for (Mutation m : remoteMutations) { - LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow())); - LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap()); - } - throw new IllegalStateException(msg); - } - - // drop rows from catalog on this region - mutateRowsWithLocks(this.accessCheckEnabled, region, localMutations, - Collections.emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); - - long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata); - for (ImmutableBytesPtr ckey : invalidateList) { - PTable table = newDeletedTableMarker(currentTime); - metaDataCache.put(ckey, table); - metricsSource.incrementMetadataCacheAddCount(); - metricsSource.incrementMetadataCacheUsedSize(table.getEstimatedSize()); - } - if (parentLockKey != null) { - ImmutableBytesPtr parentCacheKey = new ImmutableBytesPtr(parentLockKey); - metaDataCache.invalidate(parentCacheKey); - } - - // after the view metadata is dropped, drop parent->child link - MetaDataResponse response = processRemoteRegionMutations( - getSystemTableForChildLinks(request.getClientVersion(), - env.getConfiguration()).getName(), childLinkMutations, - MetaDataProtos.MutationCode.UNABLE_TO_DELETE_CHILD_LINK); - if (response != null) { - done.run(response); - return; - } - - done.run(MetaDataMutationResult.toProto(result)); - dropTableStats = true; - - updateDropTableDdlSuccessMetrics(pTableType); - LOGGER.info("{} dropped successfully, tableName: {}", pTableType, fullTableName); - } finally { - ServerUtil.releaseRowLocks(locks); - if (dropTableStats) { - Thread statsDeleteHandler = new Thread(new StatsDeleteHandler(env, - loadedTable, tableNamesToDelete, sharedTablesToDelete), - "thread-statsdeletehandler"); - statsDeleteHandler.setDaemon(true); - statsDeleteHandler.start(); - } - } - } catch (Throwable t) { - LOGGER.error("dropTable failed", t); - ProtobufUtil.setControllerException(controller, ClientUtil.createIOException( - SchemaUtil.getTableName(schemaName, tableOrViewName), t)); - } + if (connection != null) { + connection.close(); } - - private void updateDropTableDdlSuccessMetrics(PTableType pTableType) { - if (pTableType == PTableType.TABLE || pTableType == PTableType.SYSTEM) { - metricsSource.incrementDropTableCount(); - } else if (pTableType == PTableType.VIEW) { - metricsSource.incrementDropViewCount(); - } else if (pTableType == PTableType.INDEX) { - metricsSource.incrementDropIndexCount(); + return null; + } + + private MetaDataMutationResult dropRemoteIndexes(RegionCoprocessorEnvironment env, PTable table, + long clientTimeStamp, PColumn columnToDelete, List tableNamesToDelete, + List sharedTablesToDelete) throws SQLException { + // Look for columnToDelete in any indexes. If found as PK column, get lock and drop the + // index and then invalidate it + // Covered columns are deleted from the index by the client + PhoenixConnection connection = table.getIndexes().isEmpty() + ? null + : getServerConnectionForMetaData(env.getConfiguration()).unwrap(PhoenixConnection.class); + for (PTable index : table.getIndexes()) { + byte[] tenantId = + index.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : index.getTenantId().getBytes(); + IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection); + byte[] indexKey = SchemaUtil.getTableKey(tenantId, index.getSchemaName().getBytes(), + index.getTableName().getBytes()); + Pair columnToDeleteInfo = new Pair<>( + columnToDelete.getFamilyName().getString(), columnToDelete.getName().getString()); + ColumnReference colDropRef = new ColumnReference(columnToDelete.getFamilyName().getBytes(), + columnToDelete.getColumnQualifierBytes()); + boolean isColumnIndexed = indexMaintainer.getIndexedColumnInfo().contains(columnToDeleteInfo); + boolean isCoveredColumn = indexMaintainer.getCoveredColumns().contains(colDropRef); + // If index requires this column for its pk, then drop it + if (isColumnIndexed) { + // Drop the index table. The doDropTable will expand + // this to all of the table rows and invalidate the + // index table + Delete delete = new Delete(indexKey, clientTimeStamp); + byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantId, table.getSchemaName().getBytes(), + table.getTableName().getBytes(), index.getTableName().getBytes()); + // Drop the link between the parent table and the + // index table + Delete linkDelete = new Delete(linkKey, clientTimeStamp); + List remoteDropMetadata = Lists.newArrayListWithExpectedSize(2); + remoteDropMetadata.add(delete); + remoteDropMetadata.add(linkDelete); + // if the index is not present on the current region make an rpc to drop it + Properties props = new Properties(); + if (tenantId != null) { + props.setProperty(TENANT_ID_ATTRIB, Bytes.toString(tenantId)); } + if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { + props.setProperty("CurrentSCN", Long.toString(clientTimeStamp)); + } + ConnectionQueryServices queryServices = connection.getQueryServices(); + MetaDataMutationResult result = + queryServices.dropTable(remoteDropMetadata, PTableType.INDEX, false); + if (result.getTableNamesToDelete() != null && !result.getTableNamesToDelete().isEmpty()) + tableNamesToDelete.addAll(result.getTableNamesToDelete()); + if (result.getSharedTablesToDelete() != null && !result.getSharedTablesToDelete().isEmpty()) + sharedTablesToDelete.addAll(result.getSharedTablesToDelete()); + if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + return result; + } + } + // If the dropped column is a covered index column, invalidate the index + else if (isCoveredColumn) { + clearRemoteTableFromCache(clientTimeStamp, + index.getSchemaName() != null + ? index.getSchemaName().getBytes() + : ByteUtil.EMPTY_BYTE_ARRAY, + index.getTableName().getBytes()); + } } - - private static class StatsDeleteHandler implements Runnable { - PTable deletedTable; - List physicalTableNames; - List sharedTableStates; - RegionCoprocessorEnvironment env; - - StatsDeleteHandler(RegionCoprocessorEnvironment env, PTable deletedTable, List physicalTableNames, - List sharedTableStates) { - this.deletedTable = deletedTable; - this.physicalTableNames = physicalTableNames; - this.sharedTableStates = sharedTableStates; - this.env = env; - } - - @Override - public void run() { - try { - User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - try (PhoenixConnection connection = getServerConnectionForMetaData( - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - try { - MetaDataUtil.deleteFromStatsTable(connection, deletedTable, - physicalTableNames, sharedTableStates); - LOGGER.info("Table stats deleted successfully, tablename is {}." - , deletedTable.getPhysicalName().getString()); - } catch(Throwable t) { - LOGGER.warn("Exception while deleting stats of table " - + deletedTable.getPhysicalName().getString() - + " please check and delete stats manually"); - } - } - return null; - } - }); - } catch (IOException e) { - LOGGER.warn("Exception while deleting stats of table " - + deletedTable.getPhysicalName().getString() - + " please check and delete stats manually"); - } - } + if (connection != null) { + connection.close(); } - - protected RowLock acquireLock(Region region, byte[] lockKey, List locks, boolean readLock) throws IOException { - RowLock rowLock = region.getRowLock(lockKey, this.getMetadataReadLockEnabled && readLock); - if (rowLock == null) { - throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(lockKey)); - } - if (locks != null) { - locks.add(rowLock); - } - return rowLock; - } - - private MetaDataResponse processRemoteRegionMutations(byte[] systemTableName, - List remoteMutations, MetaDataProtos.MutationCode mutationCode) throws IOException { - if (remoteMutations.isEmpty()) { - return null; - } - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - try (Table hTable = - ServerUtil.getHTableForCoprocessorScan(env, - SchemaUtil.getPhysicalTableName(systemTableName, env.getConfiguration()))) { - hTable.batch(remoteMutations, null); - } catch (Throwable t) { - LOGGER.error("Unable to write mutations to " + Bytes.toString(systemTableName), t); - builder.setReturnCode(mutationCode); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - return builder.build(); - } - return null; + return null; + } + + @Override + public void clearCache(RpcController controller, ClearCacheRequest request, + RpcCallback done) { + GlobalCache cache = GlobalCache.getInstance(this.env); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + metaDataCache.invalidateAll(); + long unfreedBytes = cache.clearTenantCache(); + ClearCacheResponse.Builder builder = ClearCacheResponse.newBuilder(); + builder.setUnfreedBytes(unfreedBytes); + done.run(builder.build()); + } + + @Override + public void getVersion(RpcController controller, GetVersionRequest request, + RpcCallback done) { + + GetVersionResponse.Builder builder = GetVersionResponse.newBuilder(); + Configuration config = env.getConfiguration(); + if ( + isTablesMappingEnabled + && MetaDataProtocol.MIN_NAMESPACE_MAPPED_PHOENIX_VERSION > request.getClientVersion() + ) { + LOGGER.error( + "Old client is not compatible when" + " system tables are upgraded to map to namespace"); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException( + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, + isTablesMappingEnabled).toString(), + new DoNotRetryIOException("Old client is not compatible when" + + " system tables are upgraded to map to namespace"))); } - - private MetaDataMutationResult doDropTable(byte[] key, byte[] tenantId, byte[] schemaName, - byte[] tableName, byte[] parentTableName, PTableType tableType, - List catalogMutations, List childLinkMutations, - List invalidateList, List tableNamesToDelete, - List sharedTablesToDelete, int clientVersion) - throws IOException, SQLException { - + long version = MetaDataUtil.encodeVersion(env.getHBaseVersion(), config); + + PTable systemCatalog = null; + try { + systemCatalog = + doGetTable(ByteUtil.EMPTY_BYTE_ARRAY, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES, HConstants.LATEST_TIMESTAMP, null, + request.getClientVersion()); + } catch (Throwable t) { + boolean isErrorSwallowed = false; + if ( + t instanceof SQLException + && ((SQLException) t).getErrorCode() == SQLExceptionCode.GET_TABLE_ERROR.getErrorCode() + ) { Region region = env.getRegion(); - long clientTimeStamp = MetaDataUtil.getClientTimeStamp(catalogMutations); - ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); - - PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion); - - // We always cache the latest version - fault in if not in cache - if (table != null || (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, - clientVersion)) != null) { - if (table.getTimeStamp() < clientTimeStamp) { - if (isTableDeleted(table) || tableType != table.getType()) { - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - } else { - return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - } - // We didn't find a table at the latest timestamp, so either there is no table or - // there was a table, but it's been deleted. In either case we want to return. - if (table == null) { - if (buildDeletedTable(key, cacheKey, region, clientTimeStamp) != null) { - return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - // Make sure we're not deleting the "wrong" child - if (parentTableName != null && table.getParentTableName() != null && - !Arrays.equals(parentTableName, table.getParentTableName().getBytes())) { - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - // Since we don't allow back in time DDL, we know if we have a table it's the one - // we want to delete. FIXME: we shouldn't need a scan here, but should be able to - // use the table to generate the Delete markers. - Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp); - List indexNames = Lists.newArrayList(); - List results = Lists.newArrayList(); - try (RegionScanner scanner = region.getScanner(scan);) { - scanner.next(results); - if (results.isEmpty()) { // Should not be possible - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - - // Add to list of HTables to delete, unless it's a view or its a shared index - if (tableType == INDEX && table.getViewIndexId() != null) { - sharedTablesToDelete.add(new SharedTableState(table)); - } else if (tableType != PTableType.VIEW && tableType != PTableType.CDC) { - tableNamesToDelete.add(table.getPhysicalName().getBytes()); - } - invalidateList.add(cacheKey); - byte[][] rowKeyMetaData = new byte[5][]; - do { - Cell kv = results.get(LINK_TYPE_INDEX); - int nColumns = getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - 0, rowKeyMetaData); - if (nColumns == 5 - && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0 - && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength(), - LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0) { - LinkType linkType = LinkType.fromSerializedValue( - kv.getValueArray()[kv.getValueOffset()]); - if (rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0 && - linkType == LinkType.INDEX_TABLE) { - indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]); - } else if (tableType == PTableType.VIEW && (linkType == PARENT_TABLE || - linkType == PHYSICAL_TABLE)) { - // Populate the delete mutations for parent->child link for the child view - // in question, which we issue to SYSTEM.CHILD_LINK later - Cell parentTenantIdCell = MetaDataUtil.getCell(results, - PARENT_TENANT_ID_BYTES); - PName parentTenantId = parentTenantIdCell != null ? - PNameFactory.newName(parentTenantIdCell.getValueArray(), - parentTenantIdCell.getValueOffset(), - parentTenantIdCell.getValueLength()) : null; - byte[] linkKey = MetaDataUtil.getChildLinkKey(parentTenantId, - table.getParentSchemaName(), table.getParentTableName(), - table.getTenantId(), table.getName()); - Delete linkDelete = new Delete(linkKey, clientTimeStamp); - childLinkMutations.add(linkDelete); - } - } - Delete delete = new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - clientTimeStamp); - catalogMutations.add(delete); - results.clear(); - scanner.next(results); - } while (!results.isEmpty()); - } - - // Recursively delete indexes - for (byte[] indexName : indexNames) { - byte[] indexKey = SchemaUtil.getTableKey(tenantId, schemaName, indexName); - // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870). - // FIXME: the version of the Delete constructor without the lock args was introduced - // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version - // of the client. - Delete delete = new Delete(indexKey, clientTimeStamp); - catalogMutations.add(delete); - MetaDataMutationResult result = doDropTable(indexKey, tenantId, schemaName, indexName, - tableName, PTableType.INDEX, catalogMutations, childLinkMutations, - invalidateList, tableNamesToDelete, sharedTablesToDelete, clientVersion); - if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - return result; - } - } - - if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && tableType == PTableType.VIEW) { - try (PhoenixConnection connection = getServerConnectionForMetaData( - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - PTable pTable = connection.getTableNoCache(table.getParentName().getString()); - table = ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, pTable); - } - } - return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, - EnvironmentEdgeManager.currentTimeMillis(), table, tableNamesToDelete, - sharedTablesToDelete); - } - - /** - * Validate if mutation is allowed on a parent table/view based on their child views. - * If this method returns MetaDataMutationResult, mutation is not allowed, and returned object - * will contain returnCode (MutationCode) to indicate the underlying problem - * (validation failure code). - * - * @param expectedType expected type of PTable - * @param clientTimeStamp client timestamp, e.g check - * {@link MetaDataUtil#getClientTimeStamp(List)} - * @param tenantId tenant Id - * @param schemaName schema name - * @param tableOrViewName table or view name - * @param childViews child views of table or parent view. Usually this is an empty list - * passed to this method, and this method will add child views retrieved using - * {@link ViewUtil#findAllDescendantViews(Table, Configuration, byte[], byte[], byte[], - * long, boolean)} - * @param clientVersion client version, used to determine if mutation is allowed. - * @return Optional.empty() if mutation is allowed on parent table/view. If not allowed, - * returned Optional object will contain metaDataMutationResult with MutationCode. - * @throws IOException if something goes wrong while retrieving child views using - * {@link ViewUtil#findAllDescendantViews(Table, Configuration, byte[], byte[], byte[], - * long, boolean)} - * @throws SQLException if something goes wrong while retrieving child views using - * {@link ViewUtil#findAllDescendantViews(Table, Configuration, byte[], byte[], byte[], - * long, boolean)} - */ - private Optional validateIfMutationAllowedOnParent( - final PTable parentTable, - final List tableMetadata, - final PTableType expectedType, final long clientTimeStamp, - final byte[] tenantId, final byte[] schemaName, - final byte[] tableOrViewName, final List childViews, - final int clientVersion) throws IOException, SQLException { - boolean isMutationAllowed = true; - boolean isSchemaMutationAllowed = true; - Pair scanSysCatForTTLDefinedOnAnyChildPair = new Pair<>(true, false); - if (expectedType == PTableType.TABLE || expectedType == PTableType.VIEW) { - try (Table hTable = ServerUtil.getHTableForCoprocessorScan(env, - getSystemTableForChildLinks(clientVersion, env.getConfiguration())); - Table sysCat = ServerUtil.getHTableForCoprocessorScan(env, - SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, - env.getConfiguration()))) { - List legitimateChildViews = new ArrayList<>(); - - childViews.addAll(findAllDescendantViews(hTable, sysCat, env.getConfiguration(), - tenantId, schemaName, tableOrViewName, clientTimeStamp, new ArrayList<>(), - new ArrayList<>(), false, - scanSysCatForTTLDefinedOnAnyChildPair) - .getFirst()); - } - - if (!childViews.isEmpty()) { - // From 4.15 onwards we allow SYSTEM.CATALOG to split and no longer - // propagate parent metadata changes to child views. - // If the client is on a version older than 4.15 we have to block adding a - // column to a parent as we no longer lock the parent on the - // server side while creating a child view to prevent conflicting changes. - // This is handled on the client side from 4.15 onwards. - // Also if QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK is true, - // we block adding a column to a parent so that we can rollback the - // upgrade if required. - if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG) { - isMutationAllowed = false; - LOGGER.error("Unable to add or drop a column as the client is older than {}", - MIN_SPLITTABLE_SYSTEM_CATALOG_VERSION); - } else if (allowSplittableSystemCatalogRollback) { - isMutationAllowed = false; - LOGGER.error("Unable to add or drop a column as the {} config is set to true", - QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK); - } - } - - if (scanSysCatForTTLDefinedOnAnyChildPair.getSecond() - && validateTTLAttributeSettingForEntity(tableMetadata, TTL_BYTES)) { - //We got here means There was already TTL defined at one of the child, and we are - //trying to set TTL at current level which should not be allowed as TTL can only - //be defined at one level in hierarchy. - throw new SQLExceptionInfo.Builder(SQLExceptionCode. - TTL_ALREADY_DEFINED_IN_HIERARCHY) - .setSchemaName(Arrays.toString(schemaName)) - .setTableName(Arrays.toString(tableOrViewName)) - .build() - .buildException(); - } - } - if (!isMutationAllowed) { - MetaDataMutationResult metaDataMutationResult = - new MetaDataMutationResult( - MetaDataProtocol.MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), null); - return Optional.of(metaDataMutationResult); - } - if (!isSchemaMutationAllowed) { - MetaDataMutationResult metaDataMutationResult = - new MetaDataMutationResult( - MetaDataProtocol.MutationCode.UNALLOWED_SCHEMA_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), null); - return Optional.of(metaDataMutationResult); - } - return Optional.empty(); - } - - private MetaDataMutationResult mutateColumn( - final List tableMetadata, - final ColumnMutator mutator, final int clientVersion, - final PTable parentTable, final PTable transformingNewTable, boolean isAddingOrDroppingColumns) throws IOException { - byte[][] rowKeyMetaData = new byte[5][]; - MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); - byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] tableOrViewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableOrViewName); - String fullTableName = SchemaUtil.getTableName(schemaName, tableOrViewName); - // server-side, except for indexing, we always expect the keyvalues to be standard KeyValues - PTableType expectedType = MetaDataUtil.getTableType(tableMetadata, - GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable()); - List tableNamesToDelete = Lists.newArrayList(); - List sharedTablesToDelete = Lists.newArrayList(); + final byte[] key = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES, + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES); + if ( + !region.getRegionInfo().containsRow(key) + && request.getClientVersion() < MIN_SPLITTABLE_SYSTEM_CATALOG + ) { + LOGGER.debug("The pre-4.15 client is trying to get SYSTEM.CATALOG " + + "region that contains head row"); + isErrorSwallowed = true; + } + } + if (!isErrorSwallowed) { + LOGGER.error("loading system catalog table inside getVersion failed", t); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException( + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, + isTablesMappingEnabled).toString(), + t)); + } + } + // In case this is the first connection, system catalog does not exist, and so we don't + // set the optional system catalog timestamp. + if (systemCatalog != null) { + builder.setSystemCatalogTimestamp(systemCatalog.getTimeStamp()); + } + builder.setVersion(version); + done.run(builder.build()); + } + + @Override + public void updateIndexState(RpcController controller, UpdateIndexStateRequest request, + RpcCallback done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + byte[] schemaName = null; + byte[] tableName = null; + try { + byte[][] rowKeyMetaData = new byte[3][]; + List tableMetadata = ProtobufUtil.getMutations(request); + MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); + byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); + Region region = env.getRegion(); + MetaDataMutationResult result = checkTableKeyInRegion(key, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + long timeStamp = HConstants.LATEST_TIMESTAMP; + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); + List newKVs = tableMetadata.get(0).getFamilyCellMap().get(TABLE_FAMILY_BYTES); + Cell newKV = null; + int disableTimeStampKVIndex = -1; + int indexStateKVIndex = 0; + int index = 0; + for (Cell cell : newKVs) { + if ( + Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), INDEX_STATE_BYTES, 0, INDEX_STATE_BYTES.length) == 0 + ) { + newKV = cell; + indexStateKVIndex = index; + timeStamp = cell.getTimestamp(); + } else if ( + Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), INDEX_DISABLE_TIMESTAMP_BYTES, 0, + INDEX_DISABLE_TIMESTAMP_BYTES.length) == 0 + ) { + disableTimeStampKVIndex = index; + } + index++; + } + PIndexState newState = + PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]); + RowLock rowLock = acquireLock(region, key, null, false); + if (rowLock == null) { + throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key)); + } + + Get get = new Get(key); + get.addColumn(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES); + get.addColumn(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES); + get.addColumn(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES); + get.addColumn(TABLE_FAMILY_BYTES, + MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE_BYTES); + try (RegionScanner scanner = region.getScanner(new Scan(get))) { + List cells = new ArrayList<>(); + scanner.next(cells); + if (cells.isEmpty()) { + builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + Result currentResult = Result.create(cells); + Cell dataTableKV = + currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES); + Cell currentStateKV = + currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES); + Cell currentDisableTimeStamp = + currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES); + boolean rowKeyOrderOptimizable = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, + MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE_BYTES) != null; + + // check permission on data table long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); - try { - Region region = env.getRegion(); - MetaDataMutationResult result = checkTableKeyInRegion(key, region); - if (result != null) { - return result; + PTable loadedTable = doGetTable(tenantId, schemaName, tableName, clientTimeStamp, null, + request.getClientVersion()); + if (loadedTable == null) { + builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + List requests = new ArrayList<>(); + requests.add(new InvalidateServerMetadataCacheRequest(tenantId, schemaName, tableName)); + invalidateServerMetadataCache(requests); + getCoprocessorHost().preIndexUpdate(Bytes.toString(tenantId), + SchemaUtil.getTableName(schemaName, tableName), + TableName.valueOf(loadedTable.getPhysicalName().getBytes()), + getParentPhysicalTableName(loadedTable), newState); + + PIndexState currentState = PIndexState + .fromSerializedValue(currentStateKV.getValueArray()[currentStateKV.getValueOffset()]); + // Timestamp of INDEX_STATE gets updated with each call + long actualTimestamp = currentStateKV.getTimestamp(); + long curTimeStampVal = 0; + long newDisableTimeStamp = 0; + if ((currentDisableTimeStamp != null && currentDisableTimeStamp.getValueLength() > 0)) { + curTimeStampVal = (Long) PLong.INSTANCE.toObject(currentDisableTimeStamp.getValueArray(), + currentDisableTimeStamp.getValueOffset(), currentDisableTimeStamp.getValueLength()); + // new DisableTimeStamp is passed in + if (disableTimeStampKVIndex >= 0) { + Cell newDisableTimeStampCell = newKVs.get(disableTimeStampKVIndex); + long expectedTimestamp = newDisableTimeStampCell.getTimestamp(); + // If the index status has been updated after the upper bound of the scan we use + // to partially rebuild the index, then we need to fail the rebuild because an + // index write failed before the rebuild was complete. + if (actualTimestamp > expectedTimestamp) { + builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; } - - List locks = Lists.newArrayList(); - try { - List childViews = Lists.newArrayList(); - Optional mutationResult = validateIfMutationAllowedOnParent( - parentTable, tableMetadata, - expectedType, clientTimeStamp, tenantId, schemaName, tableOrViewName, - childViews, clientVersion); - // only if mutation is allowed, we should get Optional.empty() here - if (mutationResult.isPresent()) { - return mutationResult.get(); - } - // We take a write row lock for tenantId, schemaName, tableOrViewName - acquireLock(region, key, locks, false); - // Invalidate the cache from all the regionservers. - List requests = new ArrayList<>(); - requests.add(new InvalidateServerMetadataCacheRequest(tenantId, schemaName, - tableOrViewName)); - invalidateServerMetadataCache(requests); - ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); - List invalidateList = new ArrayList<>(); - invalidateList.add(cacheKey); - PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion); - if (failConcurrentMutateAddColumnOneTimeForTesting) { - failConcurrentMutateAddColumnOneTimeForTesting = false; - return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table); - } - if (LOGGER.isDebugEnabled()) { - if (table == null) { - LOGGER.debug("Table " + Bytes.toStringBinary(key) - + " not found in cache. Will build through scan"); - } else { - LOGGER.debug("Table " + Bytes.toStringBinary(key) - + " found in cache with timestamp " + table.getTimeStamp() - + " seqNum " + table.getSequenceNumber()); - } - } - // Get client timeStamp from mutations - if (table == null && (table = buildTable(key, cacheKey, region, - HConstants.LATEST_TIMESTAMP, clientVersion)) == null) { - // if not found then call newerTableExists and add delete marker for timestamp - // found - table = buildDeletedTable(key, cacheKey, region, clientTimeStamp); - if (table != null) { - LOGGER.info("Found newer table deleted as of " + table.getTimeStamp() - + " versus client timestamp of " + clientTimeStamp); - return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - - // if this is a view or view index then we need to include columns and - // indexes derived from its ancestors - if (parentTable != null) { - Properties props = new Properties(); - if (tenantId != null) { - props.setProperty(TENANT_ID_ATTRIB, Bytes.toString(tenantId)); - } - if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { - props.setProperty("CurrentSCN", Long.toString(clientTimeStamp)); - } - try (PhoenixConnection connection = getServerConnectionForMetaData(props, - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - table = ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, table, - parentTable); - } - } - if (transformingNewTable !=null) { - table = PTableImpl.builderWithColumns(table, getColumnsToClone(table)) - .setTransformingNewTable(transformingNewTable).build(); - } - - if (table.getTimeStamp() >= clientTimeStamp) { - LOGGER.info("Found newer table as of " + table.getTimeStamp() - + " versus client timestamp of " + clientTimeStamp); - return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), table); - } else if (isTableDeleted(table)) { - return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - // lookup TABLE_SEQ_NUM in tableMetaData - long expectedSeqNum = MetaDataUtil.getSequenceNumber(tableMetadata) - 1; - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum " - + expectedSeqNum + " and found seqNum " + table.getSequenceNumber() - + " with " + table.getColumns().size() + " columns: " - + table.getColumns()); - } - if (expectedSeqNum != table.getSequenceNumber()) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("For table " + Bytes.toStringBinary(key) - + " returning CONCURRENT_TABLE_MUTATION due to unexpected seqNum"); - } - return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), table); - } - - PTableType type = table.getType(); - if (type == PTableType.INDEX) { - // Disallow mutation of an index table - return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, - EnvironmentEdgeManager.currentTimeMillis(), null); - } else { - // We said to drop a table, but found a view or visa versa - if (type != expectedType) { - return new MetaDataProtocol.MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - } - - if (!childViews.isEmpty()) { - // validate the add or drop column mutations - result = mutator.validateWithChildViews(table, childViews, tableMetadata, - schemaName, tableOrViewName); - if (result != null) { - return result; - } - } - - getCoprocessorHost().preAlterTable(Bytes.toString(tenantId), - SchemaUtil.getTableName(schemaName, tableOrViewName), - TableName.valueOf(table.getPhysicalName().getBytes()), - getParentPhysicalTableName(table), table.getType()); - - result = mutator.validateAndAddMetadata(table, rowKeyMetaData, tableMetadata, - region, invalidateList, locks, clientTimeStamp, clientVersion, - ((ExtendedCellBuilder) env.getCellBuilder()), isAddingOrDroppingColumns); - // if the update mutation caused tables to be deleted, the mutation code returned - // will be MutationCode.TABLE_ALREADY_EXISTS - if (result != null - && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - return result; - } - - // drop any indexes on the base table that need the column that is going to be - // dropped - List> tableAndDroppedColumnPairs = - mutator.getTableAndDroppedColumnPairs(); - Iterator> iterator = tableAndDroppedColumnPairs.iterator(); - while (iterator.hasNext()) { - Pair pair = iterator.next(); - // remove the current table and column being dropped from the list and drop any - // indexes that require the column being dropped while holding the row lock - if (table.equals(pair.getFirst())) { - iterator.remove(); - result = dropIndexes(env, pair.getFirst(), invalidateList, locks, - clientTimeStamp, tableMetadata, pair.getSecond(), - tableNamesToDelete, sharedTablesToDelete, clientVersion); - if (result != null - && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - return result; - } - } - } - - if (table.isChangeDetectionEnabled() || MetaDataUtil.getChangeDetectionEnabled(tableMetadata)) { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - try { - exportSchema(tableMetadata, key, clientTimeStamp, clientVersion, table); - metricsSource.incrementAlterExportCount(); - metricsSource.updateAlterExportTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); - } catch (Exception e) { - LOGGER.error("Error writing to schema registry", e); - metricsSource.incrementAlterExportFailureCount(); - metricsSource.updateAlterExportFailureTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); - result = new MetaDataMutationResult(MutationCode.ERROR_WRITING_TO_SCHEMA_REGISTRY, - EnvironmentEdgeManager.currentTimeMillis(), table); - return result; - } - } - Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - - // The mutations to add a column are written in the following order: - // 1. Update the encoded column qualifier for the parent table if its on a - // different region server (for tables that use column qualifier encoding) - // if the next step fails we end up wasting a few col qualifiers - // 2. Write the mutations to add the column - - List localMutations = - Lists.newArrayListWithExpectedSize(tableMetadata.size()); - List remoteMutations = Lists.newArrayList(); - separateLocalAndRemoteMutations(region, tableMetadata, localMutations, - remoteMutations); - if (!remoteMutations.isEmpty()) { - // there should only be remote mutations if we are adding a column to a view - // that uses encoded column qualifiers (the remote mutations are to update the - // encoded column qualifier counter on the parent table) - if (( mutator.getMutateColumnType() == ColumnMutator.MutateColumnType.ADD_COLUMN - && type == PTableType.VIEW - && table.getEncodingScheme() != - QualifierEncodingScheme.NON_ENCODED_QUALIFIERS)) { - processRemoteRegionMutations( - SYSTEM_CATALOG_NAME_BYTES, remoteMutations, - MetaDataProtos.MutationCode.UNABLE_TO_UPDATE_PARENT_TABLE); - //if we're a view or index, clear the cache for our parent - if ((type == PTableType.VIEW || type == INDEX) && table.getParentTableName() != null) { - clearRemoteTableFromCache(clientTimeStamp, - table.getParentSchemaName() != null - ? table.getParentSchemaName().getBytes() - : ByteUtil.EMPTY_BYTE_ARRAY, - table.getParentTableName().getBytes()); - } - } else { - String msg = "Found unexpected mutations while adding or dropping column " - + "to " + fullTableName; - LOGGER.error(msg); - for (Mutation m : remoteMutations) { - LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow())); - LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap()); - } - throw new IllegalStateException(msg); - } - } - mutateRowsWithLocks(this.accessCheckEnabled, region, localMutations, - Collections.emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); - // Invalidate from cache - for (ImmutableBytesPtr invalidateKey : invalidateList) { - metaDataCache.invalidate(invalidateKey); - } - // Get client timeStamp from mutations, since it may get updated by the - // mutateRowsWithLocks call - long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata); - // if the update mutation caused tables to be deleted just return the result which - // will contain the table to be deleted - if (result != null - && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - return result; - } else { - PTable oldTable = table; - table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, - clientVersion); - if (table != null && hasInheritableTablePropertyChanged(table, oldTable)) { - invalidateAllChildTablesAndIndexes(table, childViews); - } - if (clientVersion < MIN_SPLITTABLE_SYSTEM_CATALOG && type == PTableType.VIEW) { - try (PhoenixConnection connection = getServerConnectionForMetaData( - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - PTable pTable = connection.getTableNoCache( - table.getParentName().getString()); - table = ViewUtil.addDerivedColumnsAndIndexesFromParent(connection, - table, pTable); - } - } - return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, - currentTime, table, tableNamesToDelete, sharedTablesToDelete); - } - } finally { - ServerUtil.releaseRowLocks(locks); - // drop indexes on views that require the column being dropped. These could be on a - // different region server so don't hold row locks while dropping them - for (Pair pair : mutator.getTableAndDroppedColumnPairs()) { - result = dropRemoteIndexes(env, pair.getFirst(), clientTimeStamp, - pair.getSecond(), tableNamesToDelete, sharedTablesToDelete); - if (result != null - && result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - return result; - } - } + newDisableTimeStamp = + (Long) PLong.INSTANCE.toObject(newDisableTimeStampCell.getValueArray(), + newDisableTimeStampCell.getValueOffset(), newDisableTimeStampCell.getValueLength()); + // We use the sign of the INDEX_DISABLE_TIMESTAMP to differentiate the keep-index-active + // (negative) + // from block-writes-to-data-table case. In either case, we want to keep the oldest + // timestamp to + // drive the partial index rebuild rather than update it with each attempt to update the + // index + // when a new data table write occurs. + // We do legitimately move the INDEX_DISABLE_TIMESTAMP to be newer when we're rebuilding + // the + // index in which case the state will be INACTIVE or PENDING_ACTIVE. + if ( + curTimeStampVal != 0 + && (newState == PIndexState.DISABLE || newState == PIndexState.PENDING_ACTIVE + || newState == PIndexState.PENDING_DISABLE) + && Math.abs(curTimeStampVal) < Math.abs(newDisableTimeStamp) + ) { + // do not reset disable timestamp as we want to keep the min + newKVs.remove(disableTimeStampKVIndex); + disableTimeStampKVIndex = -1; } - } catch (Throwable t) { - ClientUtil.throwIOException(fullTableName, t); - return null; // impossible + } } - } - - private void invalidateServerMetadataCache(List requests) - throws Throwable { - if (!this.invalidateServerCacheEnabled) { - LOGGER.info("Skip invalidating server metadata cache since conf property" - + " phoenix.metadata.invalidate.cache.enabled is set to false"); + // Detect invalid transitions + if (currentState == PIndexState.BUILDING) { + if (newState == PIndexState.USABLE) { + builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); return; - } - try (PhoenixConnection connection = getServerConnectionForMetaData( - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - ConnectionQueryServices queryServices = connection.getQueryServices(); - queryServices.invalidateServerMetadataCache(requests); - } - } - - private boolean hasInheritableTablePropertyChanged(PTable newTable, PTable oldTable) { - return ! Objects.equals(newTable.getMaxLookbackAge(), oldTable.getMaxLookbackAge()); - } - - private void invalidateAllChildTablesAndIndexes(PTable table, List childViews) { - List invalidateList = new ArrayList(); - if (table.getIndexes() != null) { - for(PTable index: table.getIndexes()) { - invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(index))); + } + } else if (currentState == PIndexState.DISABLE) { + // Index already disabled, so can't revert to PENDING_DISABLE + if (newState == PIndexState.PENDING_DISABLE) { + // returning TABLE_ALREADY_EXISTS here means the client doesn't throw an exception + builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + // Can't transition back to INACTIVE if INDEX_DISABLE_TIMESTAMP is 0 + if ( + newState != PIndexState.BUILDING && newState != PIndexState.DISABLE + && (newState != PIndexState.INACTIVE || curTimeStampVal == 0) + ) { + builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); + return; + } + // Done building, but was disable before that, so that in disabled state + if (newState == PIndexState.ACTIVE) { + newState = PIndexState.DISABLE; + } + } + if ( + newState == PIndexState.PENDING_DISABLE && currentState != PIndexState.PENDING_DISABLE + && currentState != PIndexState.INACTIVE + ) { + // reset count for first PENDING_DISABLE + newKVs.add(PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES, timeStamp, Bytes.toBytes(0L))); + } + if (currentState == PIndexState.PENDING_DISABLE) { + if (newState == PIndexState.ACTIVE) { + // before making index ACTIVE check if all clients succeed otherwise keep it + // PENDING_DISABLE + byte[] count; + try (RegionScanner countScanner = region.getScanner(new Scan(get))) { + List countCells = new ArrayList<>(); + countScanner.next(countCells); + count = Result.create(countCells).getValue(TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES); } - } - for(PTable childView: childViews) { - invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(childView))); - if (childView.getIndexes() != null) { - for(PTable viewIndex: childView.getIndexes()) { - invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(viewIndex))); - } + if (count != null && Bytes.toLong(count) != 0) { + newState = PIndexState.PENDING_DISABLE; + newKVs.remove(disableTimeStampKVIndex); + newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, + INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue()))); + } else if (disableTimeStampKVIndex == -1) { // clear disableTimestamp if client didn't + // pass it in + newKVs.add(PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, timeStamp, + PLong.INSTANCE.toBytes(0))); + disableTimeStampKVIndex = newKVs.size() - 1; } + } else if (newState == PIndexState.DISABLE) { + // reset the counter for pending disable when transitioning from PENDING_DISABLE to + // DISABLE + newKVs.add(PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES, timeStamp, Bytes.toBytes(0L))); + } + + } + + if ( + newState == PIndexState.ACTIVE || newState == PIndexState.PENDING_ACTIVE + || newState == PIndexState.DISABLE + ) { + newKVs.add(PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES, timeStamp, Bytes.toBytes(0L))); + } + + if (currentState == PIndexState.BUILDING && newState != PIndexState.ACTIVE) { + timeStamp = currentStateKV.getTimestamp(); + } + if ( + (currentState == PIndexState.ACTIVE || currentState == PIndexState.PENDING_ACTIVE) + && newState == PIndexState.UNUSABLE + ) { + newState = PIndexState.INACTIVE; + newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, + INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue()))); + } else if ( + (currentState == PIndexState.INACTIVE || currentState == PIndexState.PENDING_ACTIVE) + && newState == PIndexState.USABLE + ) { + // Don't allow manual state change to USABLE (i.e. ACTIVE) if non zero + // INDEX_DISABLE_TIMESTAMP + if (curTimeStampVal != 0) { + newState = currentState; + } else { + newState = PIndexState.ACTIVE; + } + newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, + INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue()))); + } + + PTable returnTable = null; + if (currentState != newState || disableTimeStampKVIndex != -1) { + // make a copy of tableMetadata so we can add to it + tableMetadata = new ArrayList(tableMetadata); + // Always include the empty column value at latest timestamp so + // that clients pull over update. + Put emptyValue = new Put(key); + emptyValue.addColumn(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, + HConstants.LATEST_TIMESTAMP, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + tableMetadata.add(emptyValue); + byte[] dataTableKey = null; + if (dataTableKV != null) { + dataTableKey = + SchemaUtil.getTableKey(tenantId, schemaName, CellUtil.cloneValue(dataTableKV)); + // insert an empty KV to trigger time stamp update on data table row + Put p = new Put(dataTableKey); + p.addColumn(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, + HConstants.LATEST_TIMESTAMP, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + tableMetadata.add(p); + } + boolean setRowKeyOrderOptimizableCell = + newState == PIndexState.BUILDING && !rowKeyOrderOptimizable; + // We're starting a rebuild of the index, so add our rowKeyOrderOptimizable cell + // so that the row keys get generated using the new row key format + if (setRowKeyOrderOptimizableCell) { + UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, key, timeStamp); + } + // We are updating the state of an index, so update the DDL timestamp. + long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis(); + tableMetadata + .add(MetaDataUtil.getLastDDLTimestampUpdate(key, clientTimeStamp, serverTimestamp)); + mutateRowsWithLocks(this.accessCheckEnabled, region, tableMetadata, + Collections. emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); + // Invalidate from cache + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + metaDataCache.invalidate(cacheKey); + if (dataTableKey != null) { + metaDataCache.invalidate(new ImmutableBytesPtr(dataTableKey)); + } + if ( + setRowKeyOrderOptimizableCell || disableTimeStampKVIndex != -1 + || currentState.isDisabled() || newState == PIndexState.BUILDING + ) { + returnTable = doGetTable(tenantId, schemaName, tableName, HConstants.LATEST_TIMESTAMP, + rowLock, request.getClientVersion()); + } + } + // Get client timeStamp from mutations, since it may get updated by the + // mutateRowsWithLocks call + long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata); + builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS); + builder.setMutationTime(currentTime); + if (returnTable != null) { + builder.setTable(PTableImpl.toProto(returnTable)); } - Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - for(ImmutableBytesPtr invalidateKey: invalidateList) { - metaDataCache.invalidate(invalidateKey); - } + done.run(builder.build()); + return; + } finally { + rowLock.release(); + } + } catch (Throwable t) { + LOGGER.error("updateIndexState failed", t); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t)); } - - /** - * Removes the table from the server side cache - */ - private void clearRemoteTableFromCache(long clientTimeStamp, byte[] schemaName, byte[] tableName) throws SQLException { - // remove the parent table from the metadata cache as we just mutated the table - Properties props = new Properties(); - if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { - props.setProperty("CurrentSCN", Long.toString(clientTimeStamp)); - } - try (PhoenixConnection connection = getServerConnectionForMetaData(props, - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - ConnectionQueryServices queryServices = connection.getQueryServices(); - queryServices.clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY, schemaName, tableName, - clientTimeStamp); - } + } + + private static MetaDataMutationResult checkKeyInRegion(byte[] key, Region region, + MutationCode code) { + return ServerUtil.isKeyInRegion(key, region) + ? null + : new MetaDataMutationResult(code, EnvironmentEdgeManager.currentTimeMillis(), null); + } + + private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, Region region) { + MetaDataMutationResult result = checkKeyInRegion(key, region, MutationCode.TABLE_NOT_IN_REGION); + if (result != null) { + LOGGER.error("Table rowkey " + Bytes.toStringBinary(key) + " is not in the current region " + + region.getRegionInfo()); } + return result; - // Checks whether a non-zero TTL value is being set. - private boolean validateTTLAttributeSettingForEntity( - final List tableMetadata, - final byte[] ttlBytes) { - for (Mutation m : tableMetadata) { - if (m instanceof Put) { - Put p = (Put)m; - List cells = p.get(TABLE_FAMILY_BYTES, ttlBytes); - if (cells != null && cells.size() > 0) { - Cell cell = cells.get(0); - String newTTLStr = (String) PVarchar.INSTANCE.toObject(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength()); - int newTTL = Integer.parseInt(newTTLStr); - return newTTL != TTL_NOT_DEFINED; - } - } - } - return false; - } + } - public static class ColumnFinder extends StatelessTraverseAllExpressionVisitor { - private boolean columnFound; - private final Expression columnExpression; + private static MetaDataMutationResult checkFunctionKeyInRegion(byte[] key, Region region) { + return checkKeyInRegion(key, region, MutationCode.FUNCTION_NOT_IN_REGION); + } - public ColumnFinder(Expression columnExpression) { - this.columnExpression = columnExpression; - columnFound = false; - } + private static MetaDataMutationResult checkSchemaKeyInRegion(byte[] key, Region region) { + return checkKeyInRegion(key, region, MutationCode.SCHEMA_NOT_IN_REGION); - private Void process(Expression expression) { - if (expression.equals(columnExpression)) { - columnFound = true; - } - return null; - } + } - @Override - public Void visit(KeyValueColumnExpression expression) { - return process(expression); - } + private static class ViewInfo { + private byte[] tenantId; + private byte[] schemaName; + private byte[] viewName; - @Override - public Void visit(RowKeyColumnExpression expression) { - return process(expression); - } - - @Override - public Void visit(ProjectedColumnExpression expression) { - return process(expression); - } - - public boolean getColumnFound() { - return columnFound; - } + public ViewInfo(byte[] tenantId, byte[] schemaName, byte[] viewName) { + super(); + this.tenantId = tenantId; + this.schemaName = schemaName; + this.viewName = viewName; } - @Override - public void addColumn(RpcController controller, final AddColumnRequest request, - RpcCallback done) { - try { - List tableMetaData = ProtobufUtil.getMutations(request); - PTable parentTable = request.hasParentTable() ? PTableImpl.createFromProto(request.getParentTable()) : null; - PTable transformingNewTable = request.hasTransformingNewTable() ? PTableImpl.createFromProto(request.getTransformingNewTable()) : null; - boolean addingColumns = request.getAddingColumns(); - MetaDataMutationResult result = mutateColumn(tableMetaData, new AddColumnMutator(), - request.getClientVersion(), parentTable, transformingNewTable, addingColumns); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - - if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) { - metricsSource.incrementAlterAddColumnCount(); - LOGGER.info("Column(s) added successfully, tableName: {}", - result.getTable().getTableName()); - } - } - } catch (Throwable e) { - LOGGER.error("Add column failed: ", e); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException("Error when adding column: ", e)); - } + public byte[] getTenantId() { + return tenantId; } - private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName, - long clientTimeStamp, int clientVersion) throws IOException, SQLException { - return doGetTable(tenantId, schemaName, tableName, clientTimeStamp, null, clientVersion); + public byte[] getSchemaName() { + return schemaName; } - /** - * Looks up the table locally if its present on this region. - */ - private PTable doGetTable(byte[] tenantId, byte[] schemaName, byte[] tableName, - long clientTimeStamp, RowLock rowLock, int clientVersion) throws IOException, SQLException { - Region region = env.getRegion(); - final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); - // if this region doesn't contain the metadata rows then fail - if (!region.getRegionInfo().containsRow(key)) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_ERROR) - .setSchemaName(Bytes.toString(schemaName)) - .setTableName(Bytes.toString(tableName)).build().buildException(); - } - - ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); - // Ask Lars about the expense of this call - if we don't take the lock, we still won't get - // partial results - // get the co-processor environment - // TODO: check that key is within region.getStartKey() and region.getEndKey() - // and return special code to force client to lookup region from meta. - /* - * Lock directly on key, though it may be an index table. This will just prevent a table - * from getting rebuilt too often. - */ - final boolean wasLocked = (rowLock != null); - try { - if (!wasLocked) { - rowLock = acquireLock(region, key, null, true); - } - PTable table = - getTableFromCacheWithModifiedIndexState(clientTimeStamp, clientVersion, cacheKey); - // We only cache the latest, so we'll end up building the table with every call if the - // client connection has specified an SCN. - // TODO: If we indicate to the client that we're returning an older version, but there's a - // newer version available, the client - // can safely not call this, since we only allow modifications to the latest. - if (table != null && table.getTimeStamp() < clientTimeStamp) { - // Table on client is up-to-date with table on server, so just return - if (isTableDeleted(table)) { - return null; - } - return table; - } - // take Phoenix row level write-lock as we need to protect metadata cache update - // after scanning SYSTEM.CATALOG to retrieve the PTable object - LockManager.RowLock phoenixRowLock = - lockManager.lockRow(key, this.metadataCacheRowLockTimeout); - try { - table = getTableFromCacheWithModifiedIndexState(clientTimeStamp, clientVersion, - cacheKey); - if (table != null && table.getTimeStamp() < clientTimeStamp) { - if (isTableDeleted(table)) { - return null; - } - return table; - } - // Query for the latest table first, since it's not cached - table = - buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion); - if ((table != null && table.getTimeStamp() <= clientTimeStamp) || ( - blockWriteRebuildIndex && table.getIndexDisableTimestamp() > 0)) { - return table; - } - // Otherwise, query for an older version of the table - it won't be cached - table = buildTable(key, cacheKey, region, clientTimeStamp, clientVersion); - return table; - } finally { - phoenixRowLock.release(); - } - } finally { - if (!wasLocked && rowLock != null) { - rowLock.release(); - } - } + public byte[] getViewName() { + return viewName; } - - private PTable getTableFromCacheWithModifiedIndexState(long clientTimeStamp, int clientVersion, - ImmutableBytesPtr cacheKey) throws SQLException { - PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion); - table = modifyIndexStateForOldClient(clientVersion, table); - return table; + } + + @Override + public void clearTableFromCache(RpcController controller, ClearTableFromCacheRequest request, + RpcCallback done) { + byte[] schemaName = request.getSchemaName().toByteArray(); + byte[] tableName = request.getTableName().toByteArray(); + try { + byte[] tenantId = request.getTenantId().toByteArray(); + byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + metaDataCache.invalidate(cacheKey); + } catch (Throwable t) { + LOGGER.error("clearTableFromCache failed", t); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t)); } - - private List doGetFunctions(List keys, long clientTimeStamp) throws IOException, SQLException { - Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - Region region = env.getRegion(); - Collections.sort(keys, new Comparator() { - @Override - public int compare(byte[] o1, byte[] o2) { - return Bytes.compareTo(o1, o2); - } - }); - /* - * Lock directly on key, though it may be an index table. This will just prevent a table - * from getting rebuilt too often. - */ - List rowLocks = new ArrayList(keys.size()); - ; - try { - for (int i = 0; i < keys.size(); i++) { - acquireLock(region, keys.get(i), rowLocks, true); - } - - List functionsAvailable = new ArrayList(keys.size()); - int numFunctions = keys.size(); - Iterator iterator = keys.iterator(); - while (iterator.hasNext()) { - byte[] key = iterator.next(); - PFunction function = (PFunction) metaDataCache.getIfPresent(new FunctionBytesPtr(key)); - if (function == null) { - metricsSource.incrementMetadataCacheMissCount(); - } else { - metricsSource.incrementMetadataCacheHitCount(); - } - if (function != null && function.getTimeStamp() < clientTimeStamp) { - if (isFunctionDeleted(function)) { - return null; - } - functionsAvailable.add(function); - iterator.remove(); - } - } - if (functionsAvailable.size() == numFunctions) return functionsAvailable; - - // Query for the latest table first, since it's not cached - List buildFunctions = - buildFunctions(keys, region, clientTimeStamp, false, - Collections.emptyList()); - if (buildFunctions == null || buildFunctions.isEmpty()) { - return null; - } - functionsAvailable.addAll(buildFunctions); - if (functionsAvailable.size() == numFunctions) return functionsAvailable; - return null; - } finally { - ServerUtil.releaseRowLocks(rowLocks); - } + } + + @Override + public void getSchema(RpcController controller, GetSchemaRequest request, + RpcCallback done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + Region region = env.getRegion(); + String schemaName = request.getSchemaName(); + byte[] lockKey = SchemaUtil.getSchemaKey(schemaName); + MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; } - - @Override - public void dropColumn(RpcController controller, final DropColumnRequest request, - RpcCallback done) { - List tableMetaData = null; - try { - tableMetaData = ProtobufUtil.getMutations(request); - PTable parentTable = request.hasParentTable() ? PTableImpl.createFromProto(request.getParentTable()) : null; - MetaDataMutationResult result = mutateColumn(tableMetaData, new DropColumnMutator(env.getConfiguration()), - request.getClientVersion(), parentTable,null, true); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - - if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) { - metricsSource.incrementAlterDropColumnCount(); - LOGGER.info("Column(s) dropped successfully, tableName: {}", - result.getTable().getTableName()); - } - } - } catch (Throwable e) { - LOGGER.error("Drop column failed: ", e); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException("Error when dropping column: ", e)); - } + long clientTimeStamp = request.getClientTimestamp(); + List locks = Lists.newArrayList(); + try { + getCoprocessorHost().preGetSchema(schemaName); + acquireLock(region, lockKey, locks, false); + // Get as of latest timestamp so we can detect if we have a + // newer schema that already + // exists without making an additional query + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(lockKey); + PSchema schema = loadSchema(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp); + if (schema != null) { + if (schema.getTimeStamp() < clientTimeStamp) { + if (!isSchemaDeleted(schema)) { + builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_ALREADY_EXISTS); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.setSchema(PSchema.toProto(schema)); + done.run(builder.build()); + return; + } else { + builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_SCHEMA_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.setSchema(PSchema.toProto(schema)); + done.run(builder.build()); + return; + } + } + } + } catch (Exception e) { + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_NOT_FOUND); + builder.setMutationTime(currentTime); + done.run(builder.build()); + return; + } finally { + ServerUtil.releaseRowLocks(locks); } - - private MetaDataMutationResult dropIndexes(RegionCoprocessorEnvironment env, PTable table, - List invalidateList, List locks, long clientTimeStamp, - List tableMetaData, PColumn columnToDelete, List tableNamesToDelete, - List sharedTablesToDelete, int clientVersion) - throws Throwable { - // Look for columnToDelete in any indexes. If found as PK column, get lock and drop the - // index and then invalidate it - // Covered columns are deleted from the index by the client - Region region = env.getRegion(); - PhoenixConnection connection = table.getIndexes().isEmpty() ? null : - getServerConnectionForMetaData(env.getConfiguration()).unwrap( - PhoenixConnection.class); - for (PTable index : table.getIndexes()) { - // ignore any indexes derived from ancestors - if (index.getName().getString().contains( - QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { - continue; - } - byte[] tenantId = index.getTenantId() == null ? - ByteUtil.EMPTY_BYTE_ARRAY : index.getTenantId().getBytes(); - IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection); - byte[] indexKey = - SchemaUtil.getTableKey(tenantId, index.getSchemaName().getBytes(), index - .getTableName().getBytes()); - Pair columnToDeleteInfo = - new Pair<>(columnToDelete.getFamilyName().getString(), - columnToDelete.getName().getString()); - ColumnReference colDropRef = new ColumnReference( - columnToDelete.getFamilyName().getBytes(), - columnToDelete.getColumnQualifierBytes()); - boolean isColumnIndexed = indexMaintainer.getIndexedColumnInfo().contains( - columnToDeleteInfo); - boolean isCoveredColumn = indexMaintainer.getCoveredColumns().contains(colDropRef); - // If index requires this column for its pk, then drop it - if (isColumnIndexed) { - // Drop the index table. The doDropTable will expand - // this to all of the table rows and invalidate the - // index table - Delete delete = new Delete(indexKey, clientTimeStamp); - byte[] linkKey = - MetaDataUtil.getParentLinkKey(tenantId, table.getSchemaName().getBytes(), - table.getTableName().getBytes(), index.getTableName().getBytes()); - // Drop the link between the parent table and the - // index table - Delete linkDelete = new Delete(linkKey, clientTimeStamp); - tableMetaData.add(delete); - tableMetaData.add(linkDelete); - // Since we're dropping the index, lock it to ensure - // that a change in index state doesn't - // occur while we're dropping it. - acquireLock(region, indexKey, locks, false); - // invalidate server metadata cache when dropping index - List requests = new ArrayList<>(); - requests.add(new InvalidateServerMetadataCacheRequest(tenantId, - index.getSchemaName().getBytes(), - index.getTableName().getBytes())); - - invalidateServerMetadataCache(requests); - List childLinksMutations = Lists.newArrayList(); - MetaDataMutationResult result = doDropTable(indexKey, tenantId, - index.getSchemaName().getBytes(), index.getTableName().getBytes(), - table.getName().getBytes(), index.getType(), tableMetaData, - childLinksMutations, invalidateList, tableNamesToDelete, - sharedTablesToDelete, clientVersion); - if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - return result; - } - metricsSource.incrementDropIndexCount(); - LOGGER.info("INDEX dropped successfully, tableName: {}", - result.getTable().getTableName()); - - // there should be no child links to delete since we are just dropping an index - if (!childLinksMutations.isEmpty()) { - LOGGER.error("Found unexpected child link mutations while dropping an index " - + childLinksMutations); - } - invalidateList.add(new ImmutableBytesPtr(indexKey)); - } - // If the dropped column is a covered index column, invalidate the index - else if (isCoveredColumn) { - invalidateList.add(new ImmutableBytesPtr(indexKey)); - } - } - if (connection != null) { - connection.close(); - } - return null; + } + + @Override + public void getFunctions(RpcController controller, GetFunctionsRequest request, + RpcCallback done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + byte[] tenantId = request.getTenantId().toByteArray(); + List functionNames = new ArrayList<>(request.getFunctionNamesCount()); + try { + Region region = env.getRegion(); + List functionNamesList = request.getFunctionNamesList(); + List functionTimestampsList = request.getFunctionTimestampsList(); + List keys = new ArrayList(request.getFunctionNamesCount()); + List> functions = + new ArrayList>(request.getFunctionNamesCount()); + for (int i = 0; i < functionNamesList.size(); i++) { + byte[] functionName = functionNamesList.get(i).toByteArray(); + functionNames.add(Bytes.toString(functionName)); + byte[] key = SchemaUtil.getFunctionKey(tenantId, functionName); + MetaDataMutationResult result = checkFunctionKeyInRegion(key, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + functions.add(new Pair(functionName, functionTimestampsList.get(i))); + keys.add(key); + } + + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + List functionsAvailable = doGetFunctions(keys, request.getClientTimestamp()); + if (functionsAvailable == null) { + builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_NOT_FOUND); + builder.setMutationTime(currentTime); + done.run(builder.build()); + return; + } + builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_ALREADY_EXISTS); + builder.setMutationTime(currentTime); + + for (PFunction function : functionsAvailable) { + builder.addFunction(PFunction.toProto(function)); + } + done.run(builder.build()); + return; + } catch (Throwable t) { + LOGGER.error("getFunctions failed", t); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException(functionNames.toString(), t)); } - - private MetaDataMutationResult dropRemoteIndexes(RegionCoprocessorEnvironment env, PTable table, - long clientTimeStamp, PColumn columnToDelete, - List tableNamesToDelete, - List sharedTablesToDelete) - throws SQLException { - // Look for columnToDelete in any indexes. If found as PK column, get lock and drop the - // index and then invalidate it - // Covered columns are deleted from the index by the client - PhoenixConnection connection = table.getIndexes().isEmpty() ? null : - getServerConnectionForMetaData(env.getConfiguration()).unwrap( - PhoenixConnection.class); - for (PTable index : table.getIndexes()) { - byte[] tenantId = index.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : index.getTenantId().getBytes(); - IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection); - byte[] indexKey = - SchemaUtil.getTableKey(tenantId, index.getSchemaName().getBytes(), index - .getTableName().getBytes()); - Pair columnToDeleteInfo = - new Pair<>(columnToDelete.getFamilyName().getString(), columnToDelete.getName().getString()); - ColumnReference colDropRef = - new ColumnReference(columnToDelete.getFamilyName().getBytes(), - columnToDelete.getColumnQualifierBytes()); - boolean isColumnIndexed = indexMaintainer.getIndexedColumnInfo().contains(columnToDeleteInfo); - boolean isCoveredColumn = indexMaintainer.getCoveredColumns().contains(colDropRef); - // If index requires this column for its pk, then drop it - if (isColumnIndexed) { - // Drop the index table. The doDropTable will expand - // this to all of the table rows and invalidate the - // index table - Delete delete = new Delete(indexKey, clientTimeStamp); - byte[] linkKey = - MetaDataUtil.getParentLinkKey(tenantId, table.getSchemaName().getBytes(), - table.getTableName().getBytes(), index.getTableName().getBytes()); - // Drop the link between the parent table and the - // index table - Delete linkDelete = new Delete(linkKey, clientTimeStamp); - List remoteDropMetadata = Lists.newArrayListWithExpectedSize(2); - remoteDropMetadata.add(delete); - remoteDropMetadata.add(linkDelete); - // if the index is not present on the current region make an rpc to drop it - Properties props = new Properties(); - if (tenantId != null) { - props.setProperty(TENANT_ID_ATTRIB, Bytes.toString(tenantId)); - } - if (clientTimeStamp != HConstants.LATEST_TIMESTAMP) { - props.setProperty("CurrentSCN", Long.toString(clientTimeStamp)); - } - ConnectionQueryServices queryServices = connection.getQueryServices(); - MetaDataMutationResult result = - queryServices.dropTable(remoteDropMetadata, PTableType.INDEX, false); - if (result.getTableNamesToDelete() != null && !result.getTableNamesToDelete().isEmpty()) - tableNamesToDelete.addAll(result.getTableNamesToDelete()); - if (result.getSharedTablesToDelete() != null && !result.getSharedTablesToDelete().isEmpty()) - sharedTablesToDelete.addAll(result.getSharedTablesToDelete()); - if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - return result; - } - } - // If the dropped column is a covered index column, invalidate the index - else if (isCoveredColumn) { - clearRemoteTableFromCache(clientTimeStamp, index.getSchemaName() != null ? - index.getSchemaName().getBytes() : ByteUtil.EMPTY_BYTE_ARRAY, index.getTableName().getBytes()); + } + + @Override + public void createFunction(RpcController controller, CreateFunctionRequest request, + RpcCallback done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + byte[][] rowKeyMetaData = new byte[2][]; + byte[] functionName = null; + try { + List functionMetaData = ProtobufUtil.getMutations(request); + boolean temporaryFunction = request.getTemporary(); + MetaDataUtil.getTenantIdAndFunctionName(functionMetaData, rowKeyMetaData); + byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX]; + byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName); + Region region = env.getRegion(); + MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + List locks = Lists.newArrayList(); + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData); + try { + acquireLock(region, lockKey, locks, false); + // Get as of latest timestamp so we can detect if we have a newer function that already + // exists without making an additional query + ImmutableBytesPtr cacheKey = new FunctionBytesPtr(lockKey); + PFunction function = loadFunction(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp, + request.getReplace(), functionMetaData); + if (function != null) { + if (function.getTimeStamp() < clientTimeStamp) { + // If the function is older than the client time stamp and it's deleted, + // continue + if (!isFunctionDeleted(function)) { + builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_ALREADY_EXISTS); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.addFunction(PFunction.toProto(function)); + done.run(builder.build()); + if (!request.getReplace()) { + return; + } } + } else { + builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_FUNCTION_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.addFunction(PFunction.toProto(function)); + done.run(builder.build()); + return; + } } - if (connection != null) { - connection.close(); + // Don't store function info for temporary functions. + if (!temporaryFunction) { + mutateRowsWithLocks(this.accessCheckEnabled, region, functionMetaData, + Collections. emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); } - return null; - } - @Override - public void clearCache(RpcController controller, ClearCacheRequest request, - RpcCallback done) { - GlobalCache cache = GlobalCache.getInstance(this.env); + // Invalidate the cache - the next getFunction call will add it + // TODO: consider loading the function that was just created here, patching up the parent + // function, and updating the cache Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - metaDataCache.invalidateAll(); - long unfreedBytes = cache.clearTenantCache(); - ClearCacheResponse.Builder builder = ClearCacheResponse.newBuilder(); - builder.setUnfreedBytes(unfreedBytes); + GlobalCache.getInstance(this.env).getMetaDataCache(); + metaDataCache.invalidate(cacheKey); + // Get timeStamp from mutations - the above method sets it if it's unset + long currentTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData); + builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_NOT_FOUND); + builder.setMutationTime(currentTimeStamp); done.run(builder.build()); - } - @Override - public void getVersion(RpcController controller, GetVersionRequest request, RpcCallback done) { - - GetVersionResponse.Builder builder = GetVersionResponse.newBuilder(); - Configuration config = env.getConfiguration(); - if (isTablesMappingEnabled - && MetaDataProtocol.MIN_NAMESPACE_MAPPED_PHOENIX_VERSION > request.getClientVersion()) { - LOGGER.error("Old client is not compatible when" + " system tables are upgraded to map to namespace"); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException( - SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, - isTablesMappingEnabled).toString(), - new DoNotRetryIOException( - "Old client is not compatible when" + " system tables are upgraded to map to namespace"))); - } - long version = MetaDataUtil.encodeVersion(env.getHBaseVersion(), config); - - PTable systemCatalog = null; - try { - systemCatalog = - doGetTable(ByteUtil.EMPTY_BYTE_ARRAY, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES, HConstants.LATEST_TIMESTAMP, null, - request.getClientVersion()); - } catch (Throwable t) { - boolean isErrorSwallowed = false; - if (t instanceof SQLException && - ((SQLException) t).getErrorCode() == SQLExceptionCode.GET_TABLE_ERROR.getErrorCode()) { - Region region = env.getRegion(); - final byte[] key = SchemaUtil.getTableKey( - ByteUtil.EMPTY_BYTE_ARRAY, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES); - if (!region.getRegionInfo().containsRow(key) && - request.getClientVersion() < MIN_SPLITTABLE_SYSTEM_CATALOG) { - LOGGER.debug("The pre-4.15 client is trying to get SYSTEM.CATALOG " + - "region that contains head row"); - isErrorSwallowed = true; - } - } - if (!isErrorSwallowed) { - LOGGER.error("loading system catalog table inside getVersion failed", t); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException( - SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, - isTablesMappingEnabled).toString(), t)); - } - } - // In case this is the first connection, system catalog does not exist, and so we don't - // set the optional system catalog timestamp. - if (systemCatalog != null) { - builder.setSystemCatalogTimestamp(systemCatalog.getTimeStamp()); - } - builder.setVersion(version); - done.run(builder.build()); - } - - @Override - public void updateIndexState(RpcController controller, UpdateIndexStateRequest request, - RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - byte[] schemaName = null; - byte[] tableName = null; - try { - byte[][] rowKeyMetaData = new byte[3][]; - List tableMetadata = ProtobufUtil.getMutations(request); - MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData); - byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); - Region region = env.getRegion(); - MetaDataMutationResult result = checkTableKeyInRegion(key, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - long timeStamp = HConstants.LATEST_TIMESTAMP; - ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); - List newKVs = tableMetadata.get(0).getFamilyCellMap().get(TABLE_FAMILY_BYTES); - Cell newKV = null; - int disableTimeStampKVIndex = -1; - int indexStateKVIndex = 0; - int index = 0; - for (Cell cell : newKVs) { - if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - INDEX_STATE_BYTES, 0, INDEX_STATE_BYTES.length) == 0) { - newKV = cell; - indexStateKVIndex = index; - timeStamp = cell.getTimestamp(); - } else if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - INDEX_DISABLE_TIMESTAMP_BYTES, 0, INDEX_DISABLE_TIMESTAMP_BYTES.length) == 0) { - disableTimeStampKVIndex = index; - } - index++; - } - PIndexState newState = - PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]); - RowLock rowLock = acquireLock(region, key, null, false); - if (rowLock == null) { - throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key)); - } - - Get get = new Get(key); - get.addColumn(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES); - get.addColumn(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES); - get.addColumn(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES); - get.addColumn(TABLE_FAMILY_BYTES, MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE_BYTES); - try (RegionScanner scanner = region.getScanner(new Scan(get))) { - List cells = new ArrayList<>(); - scanner.next(cells); - if (cells.isEmpty()) { - builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - Result currentResult = Result.create(cells); - Cell dataTableKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES); - Cell currentStateKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES); - Cell currentDisableTimeStamp = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES); - boolean rowKeyOrderOptimizable = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, MetaDataEndpointImplConstants.ROW_KEY_ORDER_OPTIMIZABLE_BYTES) != null; - - //check permission on data table - long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata); - PTable loadedTable = - doGetTable(tenantId, schemaName, tableName, clientTimeStamp, null, - request.getClientVersion()); - if (loadedTable == null) { - builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - List requests = new ArrayList<>(); - requests.add(new InvalidateServerMetadataCacheRequest(tenantId, schemaName, - tableName)); - invalidateServerMetadataCache(requests); - getCoprocessorHost().preIndexUpdate(Bytes.toString(tenantId), - SchemaUtil.getTableName(schemaName, tableName), - TableName.valueOf(loadedTable.getPhysicalName().getBytes()), - getParentPhysicalTableName(loadedTable), - newState); - - PIndexState currentState = - PIndexState.fromSerializedValue(currentStateKV.getValueArray()[currentStateKV - .getValueOffset()]); - // Timestamp of INDEX_STATE gets updated with each call - long actualTimestamp = currentStateKV.getTimestamp(); - long curTimeStampVal = 0; - long newDisableTimeStamp = 0; - if ((currentDisableTimeStamp != null && currentDisableTimeStamp.getValueLength() > 0)) { - curTimeStampVal = (Long) PLong.INSTANCE.toObject(currentDisableTimeStamp.getValueArray(), - currentDisableTimeStamp.getValueOffset(), currentDisableTimeStamp.getValueLength()); - // new DisableTimeStamp is passed in - if (disableTimeStampKVIndex >= 0) { - Cell newDisableTimeStampCell = newKVs.get(disableTimeStampKVIndex); - long expectedTimestamp = newDisableTimeStampCell.getTimestamp(); - // If the index status has been updated after the upper bound of the scan we use - // to partially rebuild the index, then we need to fail the rebuild because an - // index write failed before the rebuild was complete. - if (actualTimestamp > expectedTimestamp) { - builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - newDisableTimeStamp = (Long) PLong.INSTANCE.toObject(newDisableTimeStampCell.getValueArray(), - newDisableTimeStampCell.getValueOffset(), newDisableTimeStampCell.getValueLength()); - // We use the sign of the INDEX_DISABLE_TIMESTAMP to differentiate the keep-index-active (negative) - // from block-writes-to-data-table case. In either case, we want to keep the oldest timestamp to - // drive the partial index rebuild rather than update it with each attempt to update the index - // when a new data table write occurs. - // We do legitimately move the INDEX_DISABLE_TIMESTAMP to be newer when we're rebuilding the - // index in which case the state will be INACTIVE or PENDING_ACTIVE. - if (curTimeStampVal != 0 - && (newState == PIndexState.DISABLE || newState == PIndexState.PENDING_ACTIVE || newState == PIndexState.PENDING_DISABLE) - && Math.abs(curTimeStampVal) < Math.abs(newDisableTimeStamp)) { - // do not reset disable timestamp as we want to keep the min - newKVs.remove(disableTimeStampKVIndex); - disableTimeStampKVIndex = -1; - } - } - } - // Detect invalid transitions - if (currentState == PIndexState.BUILDING) { - if (newState == PIndexState.USABLE) { - builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - } else if (currentState == PIndexState.DISABLE) { - // Index already disabled, so can't revert to PENDING_DISABLE - if (newState == PIndexState.PENDING_DISABLE) { - // returning TABLE_ALREADY_EXISTS here means the client doesn't throw an exception - builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - // Can't transition back to INACTIVE if INDEX_DISABLE_TIMESTAMP is 0 - if (newState != PIndexState.BUILDING && newState != PIndexState.DISABLE && - (newState != PIndexState.INACTIVE || curTimeStampVal == 0)) { - builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - return; - } - // Done building, but was disable before that, so that in disabled state - if (newState == PIndexState.ACTIVE) { - newState = PIndexState.DISABLE; - } - } - if (newState == PIndexState.PENDING_DISABLE && currentState != PIndexState.PENDING_DISABLE - && currentState != PIndexState.INACTIVE) { - // reset count for first PENDING_DISABLE - newKVs.add(PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES, timeStamp, Bytes.toBytes(0L))); - } - if (currentState == PIndexState.PENDING_DISABLE) { - if (newState == PIndexState.ACTIVE) { - //before making index ACTIVE check if all clients succeed otherwise keep it PENDING_DISABLE - byte[] count; - try (RegionScanner countScanner = region.getScanner(new Scan(get))) { - List countCells = new ArrayList<>(); - countScanner.next(countCells); - count = Result.create(countCells) - .getValue(TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES); - } - if (count != null && Bytes.toLong(count) != 0) { - newState = PIndexState.PENDING_DISABLE; - newKVs.remove(disableTimeStampKVIndex); - newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, - INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue()))); - } else if (disableTimeStampKVIndex == -1) { // clear disableTimestamp if client didn't pass it in - newKVs.add(PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, timeStamp, PLong.INSTANCE.toBytes(0))); - disableTimeStampKVIndex = newKVs.size() - 1; - } - } else if (newState == PIndexState.DISABLE) { - //reset the counter for pending disable when transitioning from PENDING_DISABLE to DISABLE - newKVs.add(PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES, timeStamp, Bytes.toBytes(0L))); - } - - } - - if (newState == PIndexState.ACTIVE || newState == PIndexState.PENDING_ACTIVE || newState == PIndexState.DISABLE) { - newKVs.add(PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES, timeStamp, Bytes.toBytes(0L))); - } - - if (currentState == PIndexState.BUILDING && newState != PIndexState.ACTIVE) { - timeStamp = currentStateKV.getTimestamp(); - } - if ((currentState == PIndexState.ACTIVE || currentState == PIndexState.PENDING_ACTIVE) && newState == PIndexState.UNUSABLE) { - newState = PIndexState.INACTIVE; - newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, - INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue()))); - } else if ((currentState == PIndexState.INACTIVE || currentState == PIndexState.PENDING_ACTIVE) && newState == PIndexState.USABLE) { - // Don't allow manual state change to USABLE (i.e. ACTIVE) if non zero INDEX_DISABLE_TIMESTAMP - if (curTimeStampVal != 0) { - newState = currentState; - } else { - newState = PIndexState.ACTIVE; - } - newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, - INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue()))); - } - - PTable returnTable = null; - if (currentState != newState || disableTimeStampKVIndex != -1) { - // make a copy of tableMetadata so we can add to it - tableMetadata = new ArrayList(tableMetadata); - // Always include the empty column value at latest timestamp so - // that clients pull over update. - Put emptyValue = new Put(key); - emptyValue.addColumn(TABLE_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_BYTES, - HConstants.LATEST_TIMESTAMP, - QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - tableMetadata.add(emptyValue); - byte[] dataTableKey = null; - if (dataTableKV != null) { - dataTableKey = SchemaUtil.getTableKey(tenantId, schemaName, CellUtil.cloneValue(dataTableKV)); - // insert an empty KV to trigger time stamp update on data table row - Put p = new Put(dataTableKey); - p.addColumn(TABLE_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_BYTES, - HConstants.LATEST_TIMESTAMP, - QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - tableMetadata.add(p); - } - boolean setRowKeyOrderOptimizableCell = newState == PIndexState.BUILDING && !rowKeyOrderOptimizable; - // We're starting a rebuild of the index, so add our rowKeyOrderOptimizable cell - // so that the row keys get generated using the new row key format - if (setRowKeyOrderOptimizableCell) { - UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, key, timeStamp); - } - // We are updating the state of an index, so update the DDL timestamp. - long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis(); - tableMetadata.add(MetaDataUtil.getLastDDLTimestampUpdate( - key, clientTimeStamp, serverTimestamp)); - mutateRowsWithLocks(this.accessCheckEnabled, region, tableMetadata, Collections.emptySet(), - HConstants.NO_NONCE, HConstants.NO_NONCE); - // Invalidate from cache - Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - metaDataCache.invalidate(cacheKey); - if (dataTableKey != null) { - metaDataCache.invalidate(new ImmutableBytesPtr(dataTableKey)); - } - if (setRowKeyOrderOptimizableCell || disableTimeStampKVIndex != -1 - || currentState.isDisabled() || newState == PIndexState.BUILDING) { - returnTable = doGetTable(tenantId, schemaName, tableName, - HConstants.LATEST_TIMESTAMP, rowLock, request.getClientVersion()); - } - } - // Get client timeStamp from mutations, since it may get updated by the - // mutateRowsWithLocks call - long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata); - builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS); - builder.setMutationTime(currentTime); - if (returnTable != null) { - builder.setTable(PTableImpl.toProto(returnTable)); - } - done.run(builder.build()); - return; - } finally { - rowLock.release(); - } - } catch (Throwable t) { - LOGGER.error("updateIndexState failed", t); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t)); - } - } - - private static MetaDataMutationResult checkKeyInRegion(byte[] key, Region region, MutationCode code) { - return ServerUtil.isKeyInRegion(key, region) ? null : - new MetaDataMutationResult(code, EnvironmentEdgeManager.currentTimeMillis(), null); + metricsSource.incrementCreateFunctionCount(); + LOGGER.info("FUNCTION created successfully, functionName: {}", + Bytes.toString(functionName)); + } finally { + ServerUtil.releaseRowLocks(locks); + } + } catch (Throwable t) { + LOGGER.error("createFunction failed", t); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException(Bytes.toString(functionName), t)); } + } + + @Override + public void dropFunction(RpcController controller, DropFunctionRequest request, + RpcCallback done) { + byte[][] rowKeyMetaData = new byte[2][]; + byte[] functionName = null; + try { + List functionMetaData = ProtobufUtil.getMutations(request); + MetaDataUtil.getTenantIdAndFunctionName(functionMetaData, rowKeyMetaData); + byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; + functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX]; + byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName); + Region region = env.getRegion(); + MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + List locks = Lists.newArrayList(); + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData); + try { + acquireLock(region, lockKey, locks, false); + List keys = new ArrayList(1); + keys.add(lockKey); + List invalidateList = new ArrayList(); - private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, Region region) { - MetaDataMutationResult result = checkKeyInRegion(key, region, MutationCode.TABLE_NOT_IN_REGION); - if (result != null) { - LOGGER.error("Table rowkey " + Bytes.toStringBinary(key) - + " is not in the current region " + region.getRegionInfo()); + result = doDropFunction(clientTimeStamp, keys, functionMetaData, invalidateList); + if (result.getMutationCode() != MutationCode.FUNCTION_ALREADY_EXISTS) { + done.run(MetaDataMutationResult.toProto(result)); + return; } - return result; + mutateRowsWithLocks(this.accessCheckEnabled, region, functionMetaData, + Collections. emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + long currentTime = MetaDataUtil.getClientTimeStamp(functionMetaData); + for (ImmutableBytesPtr ptr : invalidateList) { + metaDataCache.invalidate(ptr); + PFunction function = newDeletedFunctionMarker(currentTime); + metaDataCache.put(ptr, function); + metricsSource.incrementMetadataCacheAddCount(); + metricsSource.incrementMetadataCacheUsedSize(function.getEstimatedSize()); + } + + done.run(MetaDataMutationResult.toProto(result)); + + metricsSource.incrementDropFunctionCount(); + LOGGER.info("FUNCTION dropped successfully, functionName: {}", + Bytes.toString(functionName)); + } finally { + ServerUtil.releaseRowLocks(locks); + } + } catch (Throwable t) { + LOGGER.error("dropFunction failed", t); + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException(Bytes.toString(functionName), t)); } - - private static MetaDataMutationResult checkFunctionKeyInRegion(byte[] key, Region region) { - return checkKeyInRegion(key, region, MutationCode.FUNCTION_NOT_IN_REGION); - } - - private static MetaDataMutationResult checkSchemaKeyInRegion(byte[] key, Region region) { - return checkKeyInRegion(key, region, MutationCode.SCHEMA_NOT_IN_REGION); - + } + + private MetaDataMutationResult doDropFunction(long clientTimeStamp, List keys, + List functionMetaData, List invalidateList) + throws IOException, SQLException { + List keysClone = new ArrayList(keys); + List functions = doGetFunctions(keysClone, clientTimeStamp); + // We didn't find a table at the latest timestamp, so either there is no table or + // there was a table, but it's been deleted. In either case we want to return. + if (functions == null || functions.isEmpty()) { + if ( + buildDeletedFunction(keys.get(0), new FunctionBytesPtr(keys.get(0)), env.getRegion(), + clientTimeStamp) != null + ) { + return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); } - private static class ViewInfo { - private byte[] tenantId; - private byte[] schemaName; - private byte[] viewName; - - public ViewInfo(byte[] tenantId, byte[] schemaName, byte[] viewName) { - super(); - this.tenantId = tenantId; - this.schemaName = schemaName; - this.viewName = viewName; - } - - public byte[] getTenantId() { - return tenantId; + if (functions != null && !functions.isEmpty()) { + if (functions.get(0).getTimeStamp() < clientTimeStamp) { + // If the function is older than the client time stamp and it's deleted, + // continue + if (isFunctionDeleted(functions.get(0))) { + return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); } - - public byte[] getSchemaName() { - return schemaName; - } - - public byte[] getViewName() { - return viewName; - } - } - - @Override - public void clearTableFromCache(RpcController controller, ClearTableFromCacheRequest request, - RpcCallback done) { - byte[] schemaName = request.getSchemaName().toByteArray(); - byte[] tableName = request.getTableName().toByteArray(); - try { - byte[] tenantId = request.getTenantId().toByteArray(); - byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName); - ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key); - Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - metaDataCache.invalidate(cacheKey); - } catch (Throwable t) { - LOGGER.error("clearTableFromCache failed", t); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t)); - } - } - - @Override - public void getSchema(RpcController controller, GetSchemaRequest request, RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + invalidateList.add(new FunctionBytesPtr(keys.get(0))); Region region = env.getRegion(); - String schemaName = request.getSchemaName(); - byte[] lockKey = SchemaUtil.getSchemaKey(schemaName); - MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - long clientTimeStamp = request.getClientTimestamp(); - List locks = Lists.newArrayList(); - try { - getCoprocessorHost().preGetSchema(schemaName); - acquireLock(region, lockKey, locks, false); - // Get as of latest timestamp so we can detect if we have a - // newer schema that already - // exists without making an additional query - ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(lockKey); - PSchema schema = loadSchema(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp); - if (schema != null) { - if (schema.getTimeStamp() < clientTimeStamp) { - if (!isSchemaDeleted(schema)) { - builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_ALREADY_EXISTS); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - builder.setSchema(PSchema.toProto(schema)); - done.run(builder.build()); - return; - } else { - builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_SCHEMA_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - builder.setSchema(PSchema.toProto(schema)); - done.run(builder.build()); - return; - } - } - } - } catch (Exception e) { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_NOT_FOUND); - builder.setMutationTime(currentTime); - done.run(builder.build()); - return; - } finally { - ServerUtil.releaseRowLocks(locks); + Scan scan = + MetaDataUtil.newTableRowsScan(keys.get(0), MIN_TABLE_TIMESTAMP, clientTimeStamp); + List results = Lists.newArrayList(); + try (RegionScanner scanner = region.getScanner(scan)) { + scanner.next(results); + if (results.isEmpty()) { // Should not be possible + return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + do { + Cell kv = results.get(0); + Delete delete = + new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp); + functionMetaData.add(delete); + results.clear(); + scanner.next(results); + } while (!results.isEmpty()); } + return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, + EnvironmentEdgeManager.currentTimeMillis(), functions, true); + } } - - @Override - public void getFunctions(RpcController controller, GetFunctionsRequest request, - RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - byte[] tenantId = request.getTenantId().toByteArray(); - List functionNames = new ArrayList<>(request.getFunctionNamesCount()); - try { - Region region = env.getRegion(); - List functionNamesList = request.getFunctionNamesList(); - List functionTimestampsList = request.getFunctionTimestampsList(); - List keys = new ArrayList(request.getFunctionNamesCount()); - List> functions = new ArrayList>(request.getFunctionNamesCount()); - for (int i = 0; i < functionNamesList.size(); i++) { - byte[] functionName = functionNamesList.get(i).toByteArray(); - functionNames.add(Bytes.toString(functionName)); - byte[] key = SchemaUtil.getFunctionKey(tenantId, functionName); - MetaDataMutationResult result = checkFunctionKeyInRegion(key, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - functions.add(new Pair(functionName, functionTimestampsList.get(i))); - keys.add(key); - } - - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - List functionsAvailable = doGetFunctions(keys, request.getClientTimestamp()); - if (functionsAvailable == null) { - builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_NOT_FOUND); - builder.setMutationTime(currentTime); - done.run(builder.build()); - return; - } - builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_ALREADY_EXISTS); - builder.setMutationTime(currentTime); - - for (PFunction function : functionsAvailable) { - builder.addFunction(PFunction.toProto(function)); + return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + } + + @Override + public void createSchema(RpcController controller, CreateSchemaRequest request, + RpcCallback done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + String schemaName = null; + try { + List schemaMutations = ProtobufUtil.getMutations(request); + schemaName = request.getSchemaName(); + // don't do the user permission checks for the SYSTEM schema, because an ordinary + // user has to be able to create it if it doesn't already exist when bootstrapping + // the system tables + if (!schemaName.equals(QueryConstants.SYSTEM_SCHEMA_NAME)) { + getCoprocessorHost().preCreateSchema(schemaName); + } + Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(schemaMutations); + + byte[] lockKey = m.getRow(); + Region region = env.getRegion(); + MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + List locks = Lists.newArrayList(); + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMutations); + try { + acquireLock(region, lockKey, locks, false); + // Get as of latest timestamp so we can detect if we have a newer schema that already exists + // without + // making an additional query + ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(lockKey); + PSchema schema = loadSchema(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp); + if (schema != null) { + if (schema.getTimeStamp() < clientTimeStamp) { + if (!isSchemaDeleted(schema)) { + builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_ALREADY_EXISTS); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.setSchema(PSchema.toProto(schema)); + done.run(builder.build()); + return; } + } else { + builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_SCHEMA_FOUND); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + builder.setSchema(PSchema.toProto(schema)); done.run(builder.build()); return; - } catch (Throwable t) { - LOGGER.error("getFunctions failed", t); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException(functionNames.toString(), t)); + } } - } + mutateRowsWithLocks(this.accessCheckEnabled, region, schemaMutations, + Collections. emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); - @Override - public void createFunction(RpcController controller, CreateFunctionRequest request, - RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - byte[][] rowKeyMetaData = new byte[2][]; - byte[] functionName = null; - try { - List functionMetaData = ProtobufUtil.getMutations(request); - boolean temporaryFunction = request.getTemporary(); - MetaDataUtil.getTenantIdAndFunctionName(functionMetaData, rowKeyMetaData); - byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX]; - byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName); - Region region = env.getRegion(); - MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - List locks = Lists.newArrayList(); - long clientTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData); - try { - acquireLock(region, lockKey, locks, false); - // Get as of latest timestamp so we can detect if we have a newer function that already - // exists without making an additional query - ImmutableBytesPtr cacheKey = new FunctionBytesPtr(lockKey); - PFunction function = - loadFunction(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp, request.getReplace(), functionMetaData); - if (function != null) { - if (function.getTimeStamp() < clientTimeStamp) { - // If the function is older than the client time stamp and it's deleted, - // continue - if (!isFunctionDeleted(function)) { - builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_ALREADY_EXISTS); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - builder.addFunction(PFunction.toProto(function)); - done.run(builder.build()); - if (!request.getReplace()) { - return; - } - } - } else { - builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_FUNCTION_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - builder.addFunction(PFunction.toProto(function)); - done.run(builder.build()); - return; - } - } - // Don't store function info for temporary functions. - if (!temporaryFunction) { - mutateRowsWithLocks(this.accessCheckEnabled, region, functionMetaData, - Collections.emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); - } - - // Invalidate the cache - the next getFunction call will add it - // TODO: consider loading the function that was just created here, patching up the parent function, and updating the cache - Cache metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache(); - metaDataCache.invalidate(cacheKey); - // Get timeStamp from mutations - the above method sets it if it's unset - long currentTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData); - builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_NOT_FOUND); - builder.setMutationTime(currentTimeStamp); - done.run(builder.build()); - - metricsSource.incrementCreateFunctionCount(); - LOGGER.info("FUNCTION created successfully, functionName: {}", - Bytes.toString(functionName)); - } finally { - ServerUtil.releaseRowLocks(locks); - } - } catch (Throwable t) { - LOGGER.error("createFunction failed", t); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException(Bytes.toString(functionName), t)); - } - } - - @Override - public void dropFunction(RpcController controller, DropFunctionRequest request, - RpcCallback done) { - byte[][] rowKeyMetaData = new byte[2][]; - byte[] functionName = null; - try { - List functionMetaData = ProtobufUtil.getMutations(request); - MetaDataUtil.getTenantIdAndFunctionName(functionMetaData, rowKeyMetaData); - byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX]; - functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX]; - byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName); - Region region = env.getRegion(); - MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - List locks = Lists.newArrayList(); - long clientTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData); - try { - acquireLock(region, lockKey, locks, false); - List keys = new ArrayList(1); - keys.add(lockKey); - List invalidateList = new ArrayList(); - - result = doDropFunction(clientTimeStamp, keys, functionMetaData, invalidateList); - if (result.getMutationCode() != MutationCode.FUNCTION_ALREADY_EXISTS) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - mutateRowsWithLocks(this.accessCheckEnabled, region, functionMetaData, Collections.emptySet(), - HConstants.NO_NONCE, HConstants.NO_NONCE); - - Cache metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache(); - long currentTime = MetaDataUtil.getClientTimeStamp(functionMetaData); - for (ImmutableBytesPtr ptr : invalidateList) { - metaDataCache.invalidate(ptr); - PFunction function = newDeletedFunctionMarker(currentTime); - metaDataCache.put(ptr, function); - metricsSource.incrementMetadataCacheAddCount(); - metricsSource.incrementMetadataCacheUsedSize(function.getEstimatedSize()); - } - - done.run(MetaDataMutationResult.toProto(result)); - - metricsSource.incrementDropFunctionCount(); - LOGGER.info("FUNCTION dropped successfully, functionName: {}", - Bytes.toString(functionName)); - } finally { - ServerUtil.releaseRowLocks(locks); - } - } catch (Throwable t) { - LOGGER.error("dropFunction failed", t); - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException(Bytes.toString(functionName), t)); + // Invalidate the cache - the next getSchema call will add it + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + if (cacheKey != null) { + metaDataCache.invalidate(cacheKey); } - } - private MetaDataMutationResult doDropFunction(long clientTimeStamp, List keys, List functionMetaData, List invalidateList) - throws IOException, SQLException { - List keysClone = new ArrayList(keys); - List functions = doGetFunctions(keysClone, clientTimeStamp); - // We didn't find a table at the latest timestamp, so either there is no table or - // there was a table, but it's been deleted. In either case we want to return. - if (functions == null || functions.isEmpty()) { - if (buildDeletedFunction(keys.get(0), new FunctionBytesPtr(keys.get(0)), env.getRegion(), clientTimeStamp) != null) { - return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, EnvironmentEdgeManager.currentTimeMillis(), null); - } - return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null); - } + // Get timeStamp from mutations - the above method sets it if + // it's unset + long currentTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMutations); + builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_NOT_FOUND); + builder.setMutationTime(currentTimeStamp); + done.run(builder.build()); - if (functions != null && !functions.isEmpty()) { - if (functions.get(0).getTimeStamp() < clientTimeStamp) { - // If the function is older than the client time stamp and it's deleted, - // continue - if (isFunctionDeleted(functions.get(0))) { - return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - invalidateList.add(new FunctionBytesPtr(keys.get(0))); - Region region = env.getRegion(); - Scan scan = MetaDataUtil.newTableRowsScan(keys.get(0), MIN_TABLE_TIMESTAMP, clientTimeStamp); - List results = Lists.newArrayList(); - try (RegionScanner scanner = region.getScanner(scan)) { - scanner.next(results); - if (results.isEmpty()) { // Should not be possible - return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null); - } - do { - Cell kv = results.get(0); - Delete delete = new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp); - functionMetaData.add(delete); - results.clear(); - scanner.next(results); - } while (!results.isEmpty()); - } - return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, - EnvironmentEdgeManager.currentTimeMillis(), functions, true); - } - } - return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); + metricsSource.incrementCreateSchemaCount(); + LOGGER.info("SCHEMA created successfully, schemaName: {}", schemaName); + } finally { + ServerUtil.releaseRowLocks(locks); + } + } catch (Throwable t) { + LOGGER.error("Creating the schema" + schemaName + "failed", t); + ProtobufUtil.setControllerException(controller, ClientUtil.createIOException(schemaName, t)); } - - @Override - public void createSchema(RpcController controller, CreateSchemaRequest request, - RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - String schemaName = null; - try { - List schemaMutations = ProtobufUtil.getMutations(request); - schemaName = request.getSchemaName(); - //don't do the user permission checks for the SYSTEM schema, because an ordinary - //user has to be able to create it if it doesn't already exist when bootstrapping - //the system tables - if (!schemaName.equals(QueryConstants.SYSTEM_SCHEMA_NAME)) { - getCoprocessorHost().preCreateSchema(schemaName); - } - Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(schemaMutations); - - byte[] lockKey = m.getRow(); - Region region = env.getRegion(); - MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - List locks = Lists.newArrayList(); - long clientTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMutations); - try { - acquireLock(region, lockKey, locks, false); - // Get as of latest timestamp so we can detect if we have a newer schema that already exists without - // making an additional query - ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(lockKey); - PSchema schema = loadSchema(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp); - if (schema != null) { - if (schema.getTimeStamp() < clientTimeStamp) { - if (!isSchemaDeleted(schema)) { - builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_ALREADY_EXISTS); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - builder.setSchema(PSchema.toProto(schema)); - done.run(builder.build()); - return; - } - } else { - builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_SCHEMA_FOUND); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - builder.setSchema(PSchema.toProto(schema)); - done.run(builder.build()); - return; - } - } - mutateRowsWithLocks(this.accessCheckEnabled, region, schemaMutations, Collections.emptySet(), - HConstants.NO_NONCE, HConstants.NO_NONCE); - - // Invalidate the cache - the next getSchema call will add it - Cache metaDataCache = - GlobalCache.getInstance(this.env).getMetaDataCache(); - if (cacheKey != null) { - metaDataCache.invalidate(cacheKey); - } - - // Get timeStamp from mutations - the above method sets it if - // it's unset - long currentTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMutations); - builder.setReturnCode(MetaDataProtos.MutationCode.SCHEMA_NOT_FOUND); - builder.setMutationTime(currentTimeStamp); - done.run(builder.build()); - - metricsSource.incrementCreateSchemaCount(); - LOGGER.info("SCHEMA created successfully, schemaName: {}", schemaName); - } finally { - ServerUtil.releaseRowLocks(locks); - } - } catch (Throwable t) { - LOGGER.error("Creating the schema" + schemaName + "failed", t); - ProtobufUtil.setControllerException(controller, ClientUtil.createIOException(schemaName, t)); - } + } + + @Override + public void dropSchema(RpcController controller, DropSchemaRequest request, + RpcCallback done) { + String schemaName = null; + try { + List schemaMetaData = ProtobufUtil.getMutations(request); + schemaName = request.getSchemaName(); + getCoprocessorHost().preDropSchema(schemaName); + byte[] lockKey = SchemaUtil.getSchemaKey(schemaName); + Region region = env.getRegion(); + MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region); + if (result != null) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + List locks = Lists.newArrayList(); + long clientTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMetaData); + try { + acquireLock(region, lockKey, locks, false); + List invalidateList = new ArrayList(1); + result = doDropSchema(clientTimeStamp, schemaName, lockKey, schemaMetaData, invalidateList); + if (result.getMutationCode() != MutationCode.SCHEMA_ALREADY_EXISTS) { + done.run(MetaDataMutationResult.toProto(result)); + return; + } + mutateRowsWithLocks(this.accessCheckEnabled, region, schemaMetaData, + Collections. emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); + Cache metaDataCache = + GlobalCache.getInstance(this.env).getMetaDataCache(); + long currentTime = MetaDataUtil.getClientTimeStamp(schemaMetaData); + for (ImmutableBytesPtr ptr : invalidateList) { + metaDataCache.invalidate(ptr); + PSchema schema = newDeletedSchemaMarker(currentTime); + metaDataCache.put(ptr, schema); + metricsSource.incrementMetadataCacheAddCount(); + metricsSource.incrementMetadataCacheUsedSize(schema.getEstimatedSize()); + } + done.run(MetaDataMutationResult.toProto(result)); + + metricsSource.incrementDropSchemaCount(); + LOGGER.info("SCHEMA dropped successfully, schemaName: {}", schemaName); + } finally { + ServerUtil.releaseRowLocks(locks); + } + } catch (Throwable t) { + LOGGER.error("drop schema failed:", t); + ProtobufUtil.setControllerException(controller, ClientUtil.createIOException(schemaName, t)); } - - @Override - public void dropSchema(RpcController controller, DropSchemaRequest request, RpcCallback done) { - String schemaName = null; - try { - List schemaMetaData = ProtobufUtil.getMutations(request); - schemaName = request.getSchemaName(); - getCoprocessorHost().preDropSchema(schemaName); - byte[] lockKey = SchemaUtil.getSchemaKey(schemaName); - Region region = env.getRegion(); - MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region); - if (result != null) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - List locks = Lists.newArrayList(); - long clientTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMetaData); - try { - acquireLock(region, lockKey, locks, false); - List invalidateList = new ArrayList(1); - result = doDropSchema(clientTimeStamp, schemaName, lockKey, schemaMetaData, invalidateList); - if (result.getMutationCode() != MutationCode.SCHEMA_ALREADY_EXISTS) { - done.run(MetaDataMutationResult.toProto(result)); - return; - } - mutateRowsWithLocks(this.accessCheckEnabled, region, schemaMetaData, Collections.emptySet(), - HConstants.NO_NONCE, HConstants.NO_NONCE); - Cache metaDataCache = GlobalCache.getInstance(this.env) - .getMetaDataCache(); - long currentTime = MetaDataUtil.getClientTimeStamp(schemaMetaData); - for (ImmutableBytesPtr ptr : invalidateList) { - metaDataCache.invalidate(ptr); - PSchema schema = newDeletedSchemaMarker(currentTime); - metaDataCache.put(ptr, schema); - metricsSource.incrementMetadataCacheAddCount(); - metricsSource.incrementMetadataCacheUsedSize(schema.getEstimatedSize()); - } - done.run(MetaDataMutationResult.toProto(result)); - - metricsSource.incrementDropSchemaCount(); - LOGGER.info("SCHEMA dropped successfully, schemaName: {}", schemaName); - } finally { - ServerUtil.releaseRowLocks(locks); - } - } catch (Throwable t) { - LOGGER.error("drop schema failed:", t); - ProtobufUtil.setControllerException(controller, ClientUtil.createIOException(schemaName, t)); - } + } + + private MetaDataMutationResult doDropSchema(long clientTimeStamp, String schemaName, byte[] key, + List schemaMutations, List invalidateList) + throws IOException, SQLException { + PSchema schema = + loadSchema(env, key, new ImmutableBytesPtr(key), clientTimeStamp, clientTimeStamp); + boolean areTablesExists = false; + if (schema == null) { + return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); } - - private MetaDataMutationResult doDropSchema(long clientTimeStamp, String schemaName, byte[] key, - List schemaMutations, List invalidateList) throws IOException, SQLException { - PSchema schema = loadSchema(env, key, new ImmutableBytesPtr(key), clientTimeStamp, clientTimeStamp); - boolean areTablesExists = false; - if (schema == null) { - return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - if (schema.getTimeStamp() < clientTimeStamp) { - Region region = env.getRegion(); - Scan scan = MetaDataUtil.newTableRowsScan(SchemaUtil.getKeyForSchema(null, schemaName), MIN_TABLE_TIMESTAMP, - clientTimeStamp); - List results = Lists.newArrayList(); - try (RegionScanner scanner = region.getScanner(scan);) { - scanner.next(results); - if (results.isEmpty()) { // Should not be possible - return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, - EnvironmentEdgeManager.currentTimeMillis(), null); - } - do { - Cell kv = results.get(0); - if (Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), key, 0, - key.length) != 0) { - areTablesExists = true; - break; - } - results.clear(); - scanner.next(results); - } while (!results.isEmpty()); - } - if (areTablesExists) { - return new MetaDataMutationResult(MutationCode.TABLES_EXIST_ON_SCHEMA, schema, - EnvironmentEdgeManager.currentTimeMillis()); - } - invalidateList.add(new ImmutableBytesPtr(key)); - return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema, - EnvironmentEdgeManager.currentTimeMillis()); + if (schema.getTimeStamp() < clientTimeStamp) { + Region region = env.getRegion(); + Scan scan = MetaDataUtil.newTableRowsScan(SchemaUtil.getKeyForSchema(null, schemaName), + MIN_TABLE_TIMESTAMP, clientTimeStamp); + List results = Lists.newArrayList(); + try (RegionScanner scanner = region.getScanner(scan);) { + scanner.next(results); + if (results.isEmpty()) { // Should not be possible + return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); } - return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), - null); - + do { + Cell kv = results.get(0); + if ( + Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), key, 0, + key.length) != 0 + ) { + areTablesExists = true; + break; + } + results.clear(); + scanner.next(results); + } while (!results.isEmpty()); + } + if (areTablesExists) { + return new MetaDataMutationResult(MutationCode.TABLES_EXIST_ON_SCHEMA, schema, + EnvironmentEdgeManager.currentTimeMillis()); + } + invalidateList.add(new ImmutableBytesPtr(key)); + return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema, + EnvironmentEdgeManager.currentTimeMillis()); } - - /** - * Perform atomic mutations on rows within a region - * - * @param accessCheckEnabled Use the login user to mutate rows if enabled - * @param region Region containing rows to be mutated - * @param mutations List of mutations for rows that must be contained within the region - * @param rowsToLock Rows to lock - * @param nonceGroup Optional nonce group of the operation - * @param nonce Optional nonce of the operation - * @throws IOException - */ - static void mutateRowsWithLocks(final boolean accessCheckEnabled, final Region region, - final List mutations, final Set rowsToLock, final long nonceGroup, - final long nonce) throws IOException { - // We need to mutate SYSTEM.CATALOG or SYSTEM.CHILD_LINK with HBase/login user - // if access is enabled. - if (accessCheckEnabled) { - User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - final RpcCall rpcContext = RpcUtil.getRpcContext(); - // Setting RPC context as null so that user can be resetted - try { - RpcUtil.setRpcContext(null); - region.mutateRowsWithLocks(mutations, rowsToLock, nonceGroup, nonce); - } catch (Throwable e) { - throw new IOException(e); - } finally { - // Setting RPC context back to original context of the RPC - RpcUtil.setRpcContext(rpcContext); - } - return null; - } - }); - } else { + return new MetaDataMutationResult(MutationCode.SCHEMA_NOT_FOUND, + EnvironmentEdgeManager.currentTimeMillis(), null); + + } + + /** + * Perform atomic mutations on rows within a region + * @param accessCheckEnabled Use the login user to mutate rows if enabled + * @param region Region containing rows to be mutated + * @param mutations List of mutations for rows that must be contained within the region + * @param rowsToLock Rows to lock + * @param nonceGroup Optional nonce group of the operation + * @param nonce Optional nonce of the operation + */ + static void mutateRowsWithLocks(final boolean accessCheckEnabled, final Region region, + final List mutations, final Set rowsToLock, final long nonceGroup, + final long nonce) throws IOException { + // We need to mutate SYSTEM.CATALOG or SYSTEM.CHILD_LINK with HBase/login user + // if access is enabled. + if (accessCheckEnabled) { + User.runAsLoginUser(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + final RpcCall rpcContext = RpcUtil.getRpcContext(); + // Setting RPC context as null so that user can be resetted + try { + RpcUtil.setRpcContext(null); region.mutateRowsWithLocks(mutations, rowsToLock, nonceGroup, nonce); - } - } - - private TableName getParentPhysicalTableName(PTable table) { - return (table - .getType() == PTableType.VIEW || (table.getType() == INDEX && table.getViewIndexId() != null)) - ? TableName.valueOf(table.getPhysicalName().getBytes()) - : table.getType() == PTableType.INDEX - ? TableName - .valueOf(SchemaUtil - .getPhysicalHBaseTableName(table.getParentSchemaName(), - table.getParentTableName(), table.isNamespaceMapped()) - .getBytes()) - : TableName - .valueOf( - SchemaUtil - .getPhysicalHBaseTableName(table.getSchemaName(), - table.getTableName(), table.isNamespaceMapped()) - .getBytes()); - } - - /** - * Get the server side connection by skipping system table existence check. - * - * @param config The configuration object. - * @return Connection object. - * @throws SQLException If the Connection could not be retrieved. - */ - private static Connection getServerConnectionForMetaData(final Configuration config) - throws SQLException { - Preconditions.checkNotNull(config, "The configs must not be null"); - return getServerConnectionForMetaData(new Properties(), config); - } - - /** - * Get the server side connection by skipping system table existence check. - * - * @param props The properties to be used while retrieving the Connection. Adds skipping of - * the system table existence check to the properties. - * @param config The configuration object. - * @return Connection object. - * @throws SQLException If the Connection could not be retrieved. - */ - private static Connection getServerConnectionForMetaData(final Properties props, - final Configuration config) - throws SQLException { - Preconditions.checkNotNull(props, "The properties must not be null"); - Preconditions.checkNotNull(config, "The configs must not be null"); - // No need to check for system table existence as the coproc is already running, - // hence the system tables are already created. - // Similarly, no need to check for client - server version compatibility as - // this is already server hosting SYSTEM.CATALOG region(s). - props.setProperty(SKIP_SYSTEM_TABLES_EXISTENCE_CHECK, Boolean.TRUE.toString()); - return QueryUtil.getConnectionOnServer(props, config); + } catch (Throwable e) { + throw new IOException(e); + } finally { + // Setting RPC context back to original context of the RPC + RpcUtil.setRpcContext(rpcContext); + } + return null; + } + }); + } else { + region.mutateRowsWithLocks(mutations, rowsToLock, nonceGroup, nonce); } + } + + private TableName getParentPhysicalTableName(PTable table) { + return (table.getType() == PTableType.VIEW + || (table.getType() == INDEX && table.getViewIndexId() != null)) + ? TableName.valueOf(table.getPhysicalName().getBytes()) + : table.getType() == PTableType.INDEX + ? TableName.valueOf(SchemaUtil.getPhysicalHBaseTableName(table.getParentSchemaName(), + table.getParentTableName(), table.isNamespaceMapped()).getBytes()) + : TableName.valueOf(SchemaUtil.getPhysicalHBaseTableName(table.getSchemaName(), + table.getTableName(), table.isNamespaceMapped()).getBytes()); + } + + /** + * Get the server side connection by skipping system table existence check. + * @param config The configuration object. + * @return Connection object. + * @throws SQLException If the Connection could not be retrieved. + */ + private static Connection getServerConnectionForMetaData(final Configuration config) + throws SQLException { + Preconditions.checkNotNull(config, "The configs must not be null"); + return getServerConnectionForMetaData(new Properties(), config); + } + + /** + * Get the server side connection by skipping system table existence check. + * @param props The properties to be used while retrieving the Connection. Adds skipping of the + * system table existence check to the properties. + * @param config The configuration object. + * @return Connection object. + * @throws SQLException If the Connection could not be retrieved. + */ + private static Connection getServerConnectionForMetaData(final Properties props, + final Configuration config) throws SQLException { + Preconditions.checkNotNull(props, "The properties must not be null"); + Preconditions.checkNotNull(config, "The configs must not be null"); + // No need to check for system table existence as the coproc is already running, + // hence the system tables are already created. + // Similarly, no need to check for client - server version compatibility as + // this is already server hosting SYSTEM.CATALOG region(s). + props.setProperty(SKIP_SYSTEM_TABLES_EXISTENCE_CHECK, Boolean.TRUE.toString()); + return QueryUtil.getConnectionOnServer(props, config); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointObserver.java index d569c294fa0..04e77b278cb 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,44 +31,48 @@ public interface MetaDataEndpointObserver extends Coprocessor { - void preGetTable( ObserverContext ctx, String tenantId,String tableName, - TableName physicalTableName) throws IOException; + void preGetTable(ObserverContext ctx, String tenantId, + String tableName, TableName physicalTableName) throws IOException; - void preCreateTable(final ObserverContext ctx, final String tenantId, - String tableName, TableName physicalTableName, final TableName parentPhysicalTableName, - PTableType tableType, final Set familySet, Set indexes) throws IOException; + void preCreateTable(final ObserverContext ctx, + final String tenantId, String tableName, TableName physicalTableName, + final TableName parentPhysicalTableName, PTableType tableType, final Set familySet, + Set indexes) throws IOException; - void preDropTable(final ObserverContext ctx, final String tenantId, - final String tableName,TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType, List indexes) throws IOException; + void preDropTable(final ObserverContext ctx, + final String tenantId, final String tableName, TableName physicalTableName, + TableName parentPhysicalTableName, PTableType tableType, List indexes) + throws IOException; - void preAlterTable(final ObserverContext ctx, final String tenantId,final String tableName, - final TableName physicalTableName,final TableName parentPhysicalTableName, PTableType type) throws IOException; + void preAlterTable(final ObserverContext ctx, + final String tenantId, final String tableName, final TableName physicalTableName, + final TableName parentPhysicalTableName, PTableType type) throws IOException; - void preGetSchema(final ObserverContext ctx, final String schemaName) - throws IOException; + void preGetSchema(final ObserverContext ctx, + final String schemaName) throws IOException; - void preCreateSchema(final ObserverContext ctx, final String schemaName) - throws IOException; + void preCreateSchema(final ObserverContext ctx, + final String schemaName) throws IOException; - void preDropSchema(final ObserverContext ctx, final String schemaName) - throws IOException; + void preDropSchema(final ObserverContext ctx, + final String schemaName) throws IOException; - void preCreateFunction(final ObserverContext ctx, final String tenantId, - final String functionName) throws IOException; + void preCreateFunction(final ObserverContext ctx, + final String tenantId, final String functionName) throws IOException; - void preDropFunction(final ObserverContext ctx, final String tenantId, - final String functionName) throws IOException; + void preDropFunction(final ObserverContext ctx, + final String tenantId, final String functionName) throws IOException; - void preGetFunctions(final ObserverContext ctx, final String tenantId, - final String functionName) throws IOException; + void preGetFunctions(final ObserverContext ctx, + final String tenantId, final String functionName) throws IOException; - void preIndexUpdate(ObserverContext ctx, String tenantId, - String indexName, TableName physicalTableName, TableName parentPhysicalTableName, PIndexState newState) throws IOException; + void preIndexUpdate(ObserverContext ctx, String tenantId, + String indexName, TableName physicalTableName, TableName parentPhysicalTableName, + PIndexState newState) throws IOException; - void preCreateViewAddChildLink(final ObserverContext ctx, - final String tableName) throws IOException; + void preCreateViewAddChildLink(final ObserverContext ctx, + final String tableName) throws IOException; - void preUpsertTaskDetails( - ObserverContext ctx, - String tableName) throws IOException; + void preUpsertTaskDetails(ObserverContext ctx, + String tableName) throws IOException; } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java index 0562dfea816..159214b3042 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -82,6 +82,10 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; @@ -96,610 +100,623 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; - - /** - * Coprocessor for metadata related operations. This coprocessor would only be registered - * to SYSTEM.TABLE. + * Coprocessor for metadata related operations. This coprocessor would only be registered to + * SYSTEM.TABLE. */ @SuppressWarnings("deprecation") -public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor { - public static final Logger LOGGER = LoggerFactory.getLogger(MetaDataRegionObserver.class); - public static final String REBUILD_INDEX_APPEND_TO_URL_STRING = "REBUILDINDEX"; - // PHOENIX-5094 To differentiate the increment in PENDING_DISABLE_COUNT made by client or index - // rebuilder, we are using large value for index rebuilder - public static final long PENDING_DISABLE_INACTIVE_STATE_COUNT = 10000L; - private static final byte[] SYSTEM_CATALOG_KEY = SchemaUtil.getTableKey( - ByteUtil.EMPTY_BYTE_ARRAY, - QueryConstants.SYSTEM_SCHEMA_NAME_BYTES, - PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES); - protected ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1); - private ScheduledThreadPoolExecutor truncateTaskExectuor = new ScheduledThreadPoolExecutor(1, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("task-truncated-%d").build()); - private boolean enableRebuildIndex = QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD; - private long rebuildIndexTimeInterval = QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL; - private static Map batchExecutedPerTableMap = new HashMap(); - @GuardedBy("MetaDataRegionObserver.class") - private static Properties rebuildIndexConnectionProps; - // Added for test purposes - private long initialRebuildTaskDelay; - private long statsTruncateTaskDelay; - - @Override - public void preClose(final ObserverContext c, - boolean abortRequested) { - executor.shutdownNow(); - GlobalCache.getInstance(c.getEnvironment()).getMetaDataCache().invalidateAll(); +public class MetaDataRegionObserver implements RegionObserver, RegionCoprocessor { + public static final Logger LOGGER = LoggerFactory.getLogger(MetaDataRegionObserver.class); + public static final String REBUILD_INDEX_APPEND_TO_URL_STRING = "REBUILDINDEX"; + // PHOENIX-5094 To differentiate the increment in PENDING_DISABLE_COUNT made by client or index + // rebuilder, we are using large value for index rebuilder + public static final long PENDING_DISABLE_INACTIVE_STATE_COUNT = 10000L; + private static final byte[] SYSTEM_CATALOG_KEY = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, + QueryConstants.SYSTEM_SCHEMA_NAME_BYTES, PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES); + protected ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1); + private ScheduledThreadPoolExecutor truncateTaskExectuor = new ScheduledThreadPoolExecutor(1, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("task-truncated-%d").build()); + private boolean enableRebuildIndex = QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD; + private long rebuildIndexTimeInterval = + QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL; + private static Map batchExecutedPerTableMap = new HashMap(); + @GuardedBy("MetaDataRegionObserver.class") + private static Properties rebuildIndexConnectionProps; + // Added for test purposes + private long initialRebuildTaskDelay; + private long statsTruncateTaskDelay; + + @Override + public void preClose(final ObserverContext c, + boolean abortRequested) { + executor.shutdownNow(); + GlobalCache.getInstance(c.getEnvironment()).getMetaDataCache().invalidateAll(); + } + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void start(CoprocessorEnvironment env) throws IOException { + // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves + // among region servers because we relies on server time of RS which is hosting + // SYSTEM.CATALOG + Configuration config = env.getConfiguration(); + long sleepTime = config.getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB, + QueryServicesOptions.DEFAULT_CLOCK_SKEW_INTERVAL); + try { + if (sleepTime > 0) { + Thread.sleep(sleepTime); + } + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); } - - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - @Override - public void start(CoprocessorEnvironment env) throws IOException { - // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves - // among region servers because we relies on server time of RS which is hosting - // SYSTEM.CATALOG - Configuration config = env.getConfiguration(); - long sleepTime = config.getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB, - QueryServicesOptions.DEFAULT_CLOCK_SKEW_INTERVAL); + enableRebuildIndex = config.getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD); + rebuildIndexTimeInterval = + config.getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL); + initialRebuildTaskDelay = config.getLong(QueryServices.INDEX_REBUILD_TASK_INITIAL_DELAY, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY); + statsTruncateTaskDelay = config.getLong(QueryServices.START_TRUNCATE_TASK_DELAY, + QueryServicesOptions.DEFAULT_START_TRUNCATE_TASK_DELAY); + } + + @Override + public void postOpen(ObserverContext e) { + final RegionCoprocessorEnvironment env = e.getEnvironment(); + + Runnable r = new Runnable() { + @Override + public void run() { + Table metaTable = null; + Table statsTable = null; try { - if(sleepTime > 0) { - Thread.sleep(sleepTime); - } - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - } - enableRebuildIndex = - config.getBoolean( - QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD); - rebuildIndexTimeInterval = - config.getLong( - QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL); - initialRebuildTaskDelay = - config.getLong( - QueryServices.INDEX_REBUILD_TASK_INITIAL_DELAY, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY); - statsTruncateTaskDelay = - config.getLong( - QueryServices.START_TRUNCATE_TASK_DELAY, - QueryServicesOptions.DEFAULT_START_TRUNCATE_TASK_DELAY); - } - - @Override - public void postOpen(ObserverContext e) { - final RegionCoprocessorEnvironment env = e.getEnvironment(); - - Runnable r = new Runnable() { + ReadOnlyProps props = new ReadOnlyProps(env.getConfiguration().iterator()); + Thread.sleep(1000); + metaTable = env.getConnection().getTable( + SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props)); + statsTable = env.getConnection().getTable( + SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, props)); + final Table mTable = metaTable; + final Table sTable = statsTable; + User.runAsLoginUser(new PrivilegedExceptionAction() { @Override - public void run() { - Table metaTable = null; - Table statsTable = null; - try { - ReadOnlyProps props=new ReadOnlyProps(env.getConfiguration().iterator()); - Thread.sleep(1000); - metaTable = env.getConnection().getTable( - SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props)); - statsTable = env.getConnection().getTable( - SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, props)); - final Table mTable=metaTable; - final Table sTable=statsTable; - User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - if (UpgradeUtil.truncateStats(mTable, sTable)) { - LOGGER.info("Stats are successfully truncated for upgrade 4.7!!"); - } - return null; - } - }); - - } catch (Exception exception) { - LOGGER.warn("Exception while truncate stats..," - + " please check and delete stats manually inorder to get proper result with old client!!"); - LOGGER.warn(exception.getStackTrace().toString()); - } finally { - try { - if (metaTable != null) { - metaTable.close(); - } - if (statsTable != null) { - statsTable.close(); - } - } catch (IOException e) {} - } + public Void run() throws Exception { + if (UpgradeUtil.truncateStats(mTable, sTable)) { + LOGGER.info("Stats are successfully truncated for upgrade 4.7!!"); + } + return null; } - }; - - if (env.getConfiguration() - .getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED)) { - truncateTaskExectuor.schedule(r, statsTruncateTaskDelay, TimeUnit.MILLISECONDS); - } else { - LOGGER.info("Stats collection is disabled"); - } - - - if (!enableRebuildIndex) { - LOGGER.info("Failure Index Rebuild is skipped by configuration."); - return; - } - // Ensure we only run one of the index rebuilder tasks - if (ServerUtil.isKeyInRegion(SYSTEM_CATALOG_KEY, e.getEnvironment().getRegion())) { - try { - Class.forName(PhoenixDriver.class.getName()); - initRebuildIndexConnectionProps(e.getEnvironment().getConfiguration()); - // starts index rebuild schedule work - BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment()); - executor.scheduleWithFixedDelay(task, initialRebuildTaskDelay, rebuildIndexTimeInterval, TimeUnit.MILLISECONDS); - } catch (ClassNotFoundException ex) { - LOGGER.error("BuildIndexScheduleTask cannot start!", ex); + }); + + } catch (Exception exception) { + LOGGER.warn("Exception while truncate stats..," + + " please check and delete stats manually inorder to get proper result with old client!!"); + LOGGER.warn(exception.getStackTrace().toString()); + } finally { + try { + if (metaTable != null) { + metaTable.close(); + } + if (statsTable != null) { + statsTable.close(); } + } catch (IOException e) { + } } + } + }; + + if ( + env.getConfiguration().getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED) + ) { + truncateTaskExectuor.schedule(r, statsTruncateTaskDelay, TimeUnit.MILLISECONDS); + } else { + LOGGER.info("Stats collection is disabled"); } - /** - * Task runs periodically to build indexes whose INDEX_NEED_PARTIALLY_REBUILD is set true - * - */ - public static class BuildIndexScheduleTask extends TimerTask { - RegionCoprocessorEnvironment env; - private final long rebuildIndexBatchSize; - private final long configuredBatches; - private final long indexDisableTimestampThreshold; - private final long pendingDisableThreshold; - private final ReadOnlyProps props; - private final List onlyTheseTables; - - public BuildIndexScheduleTask(RegionCoprocessorEnvironment env) { - this(env,null); - } + if (!enableRebuildIndex) { + LOGGER.info("Failure Index Rebuild is skipped by configuration."); + return; + } + // Ensure we only run one of the index rebuilder tasks + if (ServerUtil.isKeyInRegion(SYSTEM_CATALOG_KEY, e.getEnvironment().getRegion())) { + try { + Class.forName(PhoenixDriver.class.getName()); + initRebuildIndexConnectionProps(e.getEnvironment().getConfiguration()); + // starts index rebuild schedule work + BuildIndexScheduleTask task = new BuildIndexScheduleTask(e.getEnvironment()); + executor.scheduleWithFixedDelay(task, initialRebuildTaskDelay, rebuildIndexTimeInterval, + TimeUnit.MILLISECONDS); + } catch (ClassNotFoundException ex) { + LOGGER.error("BuildIndexScheduleTask cannot start!", ex); + } + } + } + + /** + * Task runs periodically to build indexes whose INDEX_NEED_PARTIALLY_REBUILD is set true + */ + public static class BuildIndexScheduleTask extends TimerTask { + RegionCoprocessorEnvironment env; + private final long rebuildIndexBatchSize; + private final long configuredBatches; + private final long indexDisableTimestampThreshold; + private final long pendingDisableThreshold; + private final ReadOnlyProps props; + private final List onlyTheseTables; + + public BuildIndexScheduleTask(RegionCoprocessorEnvironment env) { + this(env, null); + } - public BuildIndexScheduleTask(RegionCoprocessorEnvironment env, List onlyTheseTables) { - this.onlyTheseTables = onlyTheseTables == null ? null : ImmutableList.copyOf(onlyTheseTables); - this.env = env; - Configuration configuration = env.getConfiguration(); - this.rebuildIndexBatchSize = configuration.getLong( - QueryServices.INDEX_FAILURE_HANDLING_REBUILD_PERIOD, HConstants.LATEST_TIMESTAMP); - this.configuredBatches = configuration.getLong( - QueryServices.INDEX_FAILURE_HANDLING_REBUILD_NUMBER_OF_BATCHES_PER_TABLE, 10); - this.indexDisableTimestampThreshold = - configuration.getLong(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD); - this.pendingDisableThreshold = - configuration.getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD, - QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD); - this.props = new ReadOnlyProps(env.getConfiguration().iterator()); - } + public BuildIndexScheduleTask(RegionCoprocessorEnvironment env, List onlyTheseTables) { + this.onlyTheseTables = onlyTheseTables == null ? null : ImmutableList.copyOf(onlyTheseTables); + this.env = env; + Configuration configuration = env.getConfiguration(); + this.rebuildIndexBatchSize = configuration + .getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_PERIOD, HConstants.LATEST_TIMESTAMP); + this.configuredBatches = configuration + .getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_NUMBER_OF_BATCHES_PER_TABLE, 10); + this.indexDisableTimestampThreshold = + configuration.getLong(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD); + this.pendingDisableThreshold = + configuration.getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD, + QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD); + this.props = new ReadOnlyProps(env.getConfiguration().iterator()); + } - public List decrementIndexesPendingDisableCount(PhoenixConnection conn, PTable dataPTable, List indexes){ - List indexesIncremented = new ArrayList<>(); - for(PTable index :indexes) { - try { - String indexName = index.getName().getString(); - IndexUtil.incrementCounterForIndex(conn, indexName, -PENDING_DISABLE_INACTIVE_STATE_COUNT); - indexesIncremented.add(index); - }catch(Exception e) { - LOGGER.warn("Decrement of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT - +" for index :" + index.getName().getString() + "of table: " - + dataPTable.getName().getString(), e); - } - } - return indexesIncremented; + public List decrementIndexesPendingDisableCount(PhoenixConnection conn, + PTable dataPTable, List indexes) { + List indexesIncremented = new ArrayList<>(); + for (PTable index : indexes) { + try { + String indexName = index.getName().getString(); + IndexUtil.incrementCounterForIndex(conn, indexName, + -PENDING_DISABLE_INACTIVE_STATE_COUNT); + indexesIncremented.add(index); + } catch (Exception e) { + LOGGER.warn("Decrement of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT + " for index :" + + index.getName().getString() + "of table: " + dataPTable.getName().getString(), e); } + } + return indexesIncremented; + } - @Override - public void run() { - // FIXME: we should replay the data table Put, as doing a partial index build would only add - // the new rows and not delete the previous index value. Also, we should restrict the scan - // to only data within this region (as otherwise *every* region will be running this code - // separately, all updating the same data. - RegionScanner scanner = null; - PhoenixConnection conn = null; + @Override + public void run() { + // FIXME: we should replay the data table Put, as doing a partial index build would only add + // the new rows and not delete the previous index value. Also, we should restrict the scan + // to only data within this region (as otherwise *every* region will be running this code + // separately, all updating the same data. + RegionScanner scanner = null; + PhoenixConnection conn = null; + try { + Scan scan = new Scan(); + SingleColumnValueFilter filter = + new SingleColumnValueFilter(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, + CompareFilter.CompareOp.NOT_EQUAL, PLong.INSTANCE.toBytes(0L)); + filter.setFilterIfMissing(true); + scan.setFilter(filter); + scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.TABLE_NAME_BYTES); + scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES); + scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_STATE_BYTES); + scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES); + + Map>> dataTableToIndexesMap = null; + boolean hasMore = false; + List results = new ArrayList(); + scanner = this.env.getRegion().getScanner(scan); + + do { + results.clear(); + hasMore = scanner.next(results); + if (results.isEmpty()) { + LOGGER.debug("Found no indexes with non zero INDEX_DISABLE_TIMESTAMP"); + break; + } + + Result r = Result.create(results); + byte[] disabledTimeStamp = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES); + Cell indexStateCell = r.getColumnLatestCell(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.INDEX_STATE_BYTES); + + if (disabledTimeStamp == null || disabledTimeStamp.length == 0) { + LOGGER.debug("Null or empty INDEX_DISABLE_TIMESTAMP"); + continue; + } + + byte[] dataTable = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES); + if ((dataTable == null || dataTable.length == 0) || indexStateCell == null) { + // data table name can't be empty + LOGGER.debug("Null or data table name or index state"); + continue; + } + + byte[] indexStateBytes = CellUtil.cloneValue(indexStateCell); + byte[][] rowKeyMetaData = new byte[3][]; + SchemaUtil.getVarChars(r.getRow(), 3, rowKeyMetaData); + byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] indexTable = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + + // validity check + if (indexTable == null || indexTable.length == 0) { + LOGGER.debug("We find IndexTable empty during rebuild scan:" + scan + + "so, Index rebuild has been skipped for row=" + r); + continue; + } + + String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTable); + if (onlyTheseTables != null && !onlyTheseTables.contains(dataTableFullName)) { + LOGGER.debug("Could not find " + dataTableFullName + " in " + onlyTheseTables); + continue; + } + + if (conn == null) { + conn = getRebuildIndexConnection(env.getConfiguration()); + dataTableToIndexesMap = Maps.newHashMap(); + } + PTable dataPTable = conn.getTableNoCache(dataTableFullName); + + String indexTableFullName = SchemaUtil.getTableName(schemaName, indexTable); + PTable indexPTable = conn.getTableNoCache(indexTableFullName); + // Sanity check in case index was removed from table + if (!dataPTable.getIndexes().contains(indexPTable)) { + LOGGER + .debug(dataTableFullName + " does not contain " + indexPTable.getName().getString()); + continue; + } + + PIndexState indexState = PIndexState.fromSerializedValue(indexStateBytes[0]); + long pendingDisableCountLastUpdatedTs = + IndexUtil.getIndexPendingDisableCountLastUpdatedTimestamp(conn, indexTableFullName); + long elapsedSinceDisable = + EnvironmentEdgeManager.currentTimeMillis() - pendingDisableCountLastUpdatedTs; + + // on an index write failure, the server side transitions to PENDING_DISABLE, then the + // client + // retries, and after retries are exhausted, disables the index + if (indexState == PIndexState.PENDING_DISABLE) { + if (elapsedSinceDisable > pendingDisableThreshold) { + // too long in PENDING_DISABLE - + // client didn't disable the index because last time when + // PENDING_DISABLE_COUNT was updated is greater than pendingDisableThreshold, + // so we do it here + IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.DISABLE, + pendingDisableCountLastUpdatedTs); + } + continue; + } + + // Only perform relatively expensive check for all regions online when index + // is disabled or pending active since that's the state it's placed into when + // an index write fails. + if ( + (indexState.isDisabled() || indexState == PIndexState.PENDING_ACTIVE) + && !tableRegionsOnline(this.env.getConfiguration(), indexPTable) + ) { + LOGGER.debug("Index rebuild has been skipped because not all regions of" + + " index table=" + indexPTable.getName() + " are online."); + continue; + } + + if (elapsedSinceDisable > indexDisableTimestampThreshold) { + /* + * It has been too long since the index has been disabled and any future attempts to + * reenable it likely will fail. So we are going to mark the index as disabled and set + * the index disable timestamp to 0 so that the rebuild task won't pick up this index + * again for rebuild. + */ try { - Scan scan = new Scan(); - SingleColumnValueFilter filter = new SingleColumnValueFilter(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, - CompareFilter.CompareOp.NOT_EQUAL, PLong.INSTANCE.toBytes(0L)); - filter.setFilterIfMissing(true); - scan.setFilter(filter); - scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.TABLE_NAME_BYTES); - scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES); - scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.INDEX_STATE_BYTES); - scan.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES); - - Map>> dataTableToIndexesMap = null; - boolean hasMore = false; - List results = new ArrayList(); - scanner = this.env.getRegion().getScanner(scan); - - do { - results.clear(); - hasMore = scanner.next(results); - if (results.isEmpty()) { - LOGGER.debug("Found no indexes with non zero INDEX_DISABLE_TIMESTAMP"); - break; - } - - Result r = Result.create(results); - byte[] disabledTimeStamp = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES); - Cell indexStateCell = r.getColumnLatestCell(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES); - - if (disabledTimeStamp == null || disabledTimeStamp.length == 0) { - LOGGER.debug("Null or empty INDEX_DISABLE_TIMESTAMP"); - continue; - } - - byte[] dataTable = r.getValue(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES); - if ((dataTable == null || dataTable.length == 0) || indexStateCell == null) { - // data table name can't be empty - LOGGER.debug("Null or data table name or index state"); - continue; - } - - byte[] indexStateBytes = CellUtil.cloneValue(indexStateCell); - byte[][] rowKeyMetaData = new byte[3][]; - SchemaUtil.getVarChars(r.getRow(), 3, rowKeyMetaData); - byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] indexTable = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - - // validity check - if (indexTable == null || indexTable.length == 0) { - LOGGER.debug("We find IndexTable empty during rebuild scan:" + scan - + "so, Index rebuild has been skipped for row=" + r); - continue; - } - - String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTable); - if (onlyTheseTables != null && !onlyTheseTables.contains(dataTableFullName)) { - LOGGER.debug("Could not find " + dataTableFullName + - " in " + onlyTheseTables); - continue; - } - - if (conn == null) { - conn = getRebuildIndexConnection(env.getConfiguration()); - dataTableToIndexesMap = Maps.newHashMap(); - } - PTable dataPTable = conn.getTableNoCache(dataTableFullName); - - String indexTableFullName = SchemaUtil.getTableName(schemaName, indexTable); - PTable indexPTable = conn.getTableNoCache(indexTableFullName); - // Sanity check in case index was removed from table - if (!dataPTable.getIndexes().contains(indexPTable)) { - LOGGER.debug(dataTableFullName + " does not contain " + - indexPTable.getName().getString()); - continue; - } - - PIndexState indexState = PIndexState.fromSerializedValue(indexStateBytes[0]); - long pendingDisableCountLastUpdatedTs = - IndexUtil.getIndexPendingDisableCountLastUpdatedTimestamp(conn, indexTableFullName); - long elapsedSinceDisable = - EnvironmentEdgeManager.currentTimeMillis() - pendingDisableCountLastUpdatedTs; - - // on an index write failure, the server side transitions to PENDING_DISABLE, then the client - // retries, and after retries are exhausted, disables the index - if (indexState == PIndexState.PENDING_DISABLE) { - if (elapsedSinceDisable > pendingDisableThreshold) { - // too long in PENDING_DISABLE - - // client didn't disable the index because last time when - // PENDING_DISABLE_COUNT was updated is greater than pendingDisableThreshold, - // so we do it here - IndexUtil.updateIndexState(conn, indexTableFullName, - PIndexState.DISABLE, pendingDisableCountLastUpdatedTs); - } - continue; - } - - // Only perform relatively expensive check for all regions online when index - // is disabled or pending active since that's the state it's placed into when - // an index write fails. - if ((indexState.isDisabled() || indexState == PIndexState.PENDING_ACTIVE) - && !tableRegionsOnline(this.env.getConfiguration(), indexPTable)) { - LOGGER.debug("Index rebuild has been skipped because not all regions of" + - " index table=" + indexPTable.getName() + " are online."); - continue; - } - - if (elapsedSinceDisable > indexDisableTimestampThreshold) { - /* - * It has been too long since the index has been disabled and any future - * attempts to reenable it likely will fail. So we are going to mark the - * index as disabled and set the index disable timestamp to 0 so that the - * rebuild task won't pick up this index again for rebuild. - */ - try { - IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.DISABLE, 0l); - LOGGER.error("Unable to rebuild index " + indexTableFullName - + ". Won't attempt again since index disable timestamp is" + - " older than current time by " + indexDisableTimestampThreshold - + " milliseconds. Manual intervention needed to re-build" + - " the index"); - } catch (Throwable ex) { - LOGGER.error( - "Unable to mark index " + indexTableFullName + " as disabled.", ex); - } - continue; // don't attempt another rebuild irrespective of whether - // updateIndexState worked or not - } - // Allow index to begin incremental maintenance as index is back online and we - // cannot transition directly from DISABLED -> ACTIVE - if (indexState == PIndexState.DISABLE) { - if(IndexUtil.getIndexPendingDisableCount(conn, indexTableFullName) < PENDING_DISABLE_INACTIVE_STATE_COUNT){ - // to avoid incrementing again - IndexUtil.incrementCounterForIndex(conn, indexTableFullName, PENDING_DISABLE_INACTIVE_STATE_COUNT); - } - IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.INACTIVE, null); - continue; // Must wait until clients start to do index maintenance again - } else if (indexState == PIndexState.PENDING_ACTIVE) { - IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, null); - continue; // Must wait until clients start to do index maintenance again - } else if (indexState != PIndexState.INACTIVE && indexState != PIndexState.ACTIVE) { - LOGGER.warn("Unexpected index state of " + indexTableFullName + "=" - + indexState + ". Skipping partial rebuild attempt."); - continue; - } - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - long forwardOverlapDurationMs = env.getConfiguration().getLong( - QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME); - // Wait until no failures have occurred in at least forwardOverlapDurationMs - if (indexStateCell.getTimestamp() + forwardOverlapDurationMs > currentTime) { - LOGGER.debug("Still must wait " + (indexStateCell.getTimestamp() + - forwardOverlapDurationMs - currentTime) + - " before starting rebuild for " + indexTableFullName); - continue; // Haven't waited long enough yet - } - Long upperBoundOfRebuild = indexStateCell.getTimestamp() + forwardOverlapDurationMs; - // Pass in upperBoundOfRebuild when setting index state or increasing disable ts - // and fail if index timestamp > upperBoundOfRebuild. - List> indexesToPartiallyRebuild = dataTableToIndexesMap.get(dataPTable); - if (indexesToPartiallyRebuild == null) { - indexesToPartiallyRebuild = Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size()); - dataTableToIndexesMap.put(dataPTable, indexesToPartiallyRebuild); + IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.DISABLE, 0l); + LOGGER.error("Unable to rebuild index " + indexTableFullName + + ". Won't attempt again since index disable timestamp is" + + " older than current time by " + indexDisableTimestampThreshold + + " milliseconds. Manual intervention needed to re-build" + " the index"); + } catch (Throwable ex) { + LOGGER.error("Unable to mark index " + indexTableFullName + " as disabled.", ex); + } + continue; // don't attempt another rebuild irrespective of whether + // updateIndexState worked or not + } + // Allow index to begin incremental maintenance as index is back online and we + // cannot transition directly from DISABLED -> ACTIVE + if (indexState == PIndexState.DISABLE) { + if ( + IndexUtil.getIndexPendingDisableCount(conn, indexTableFullName) + < PENDING_DISABLE_INACTIVE_STATE_COUNT + ) { + // to avoid incrementing again + IndexUtil.incrementCounterForIndex(conn, indexTableFullName, + PENDING_DISABLE_INACTIVE_STATE_COUNT); + } + IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.INACTIVE, null); + continue; // Must wait until clients start to do index maintenance again + } else if (indexState == PIndexState.PENDING_ACTIVE) { + IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, null); + continue; // Must wait until clients start to do index maintenance again + } else if (indexState != PIndexState.INACTIVE && indexState != PIndexState.ACTIVE) { + LOGGER.warn("Unexpected index state of " + indexTableFullName + "=" + indexState + + ". Skipping partial rebuild attempt."); + continue; + } + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + long forwardOverlapDurationMs = env.getConfiguration().getLong( + QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME); + // Wait until no failures have occurred in at least forwardOverlapDurationMs + if (indexStateCell.getTimestamp() + forwardOverlapDurationMs > currentTime) { + LOGGER.debug("Still must wait " + + (indexStateCell.getTimestamp() + forwardOverlapDurationMs - currentTime) + + " before starting rebuild for " + indexTableFullName); + continue; // Haven't waited long enough yet + } + Long upperBoundOfRebuild = indexStateCell.getTimestamp() + forwardOverlapDurationMs; + // Pass in upperBoundOfRebuild when setting index state or increasing disable ts + // and fail if index timestamp > upperBoundOfRebuild. + List> indexesToPartiallyRebuild = + dataTableToIndexesMap.get(dataPTable); + if (indexesToPartiallyRebuild == null) { + indexesToPartiallyRebuild = + Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size()); + dataTableToIndexesMap.put(dataPTable, indexesToPartiallyRebuild); + } + LOGGER.debug("We have found " + indexPTable.getIndexState() + " Index:" + + indexPTable.getName() + " on data table:" + dataPTable.getName() + + " which failed to be updated at " + indexPTable.getIndexDisableTimestamp()); + indexesToPartiallyRebuild.add(new Pair(indexPTable, upperBoundOfRebuild)); + } while (hasMore); + + if (dataTableToIndexesMap != null) { + long backwardOverlapDurationMs = env.getConfiguration().getLong( + QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_BACKWARD_TIME_ATTRIB, + env.getConfiguration().getLong( + QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_BACKWARD_TIME)); + for (Map.Entry>> entry : dataTableToIndexesMap + .entrySet()) { + PTable dataPTable = entry.getKey(); + List> pairs = entry.getValue(); + List indexesToPartiallyRebuild = + Lists.newArrayListWithExpectedSize(pairs.size()); + try (Table metaTable = env.getConnection().getTable(SchemaUtil + .getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props))) { + long earliestDisableTimestamp = Long.MAX_VALUE; + long latestUpperBoundTimestamp = Long.MIN_VALUE; + List maintainers = Lists.newArrayListWithExpectedSize(pairs.size()); + int signOfDisableTimeStamp = 0; + for (Pair pair : pairs) { + // We need a way of differentiating the block writes to data table case from + // the leave index active case. In either case, we need to know the time stamp + // at which writes started failing so we can rebuild from that point. If we + // keep the index active *and* have a positive INDEX_DISABLE_TIMESTAMP_BYTES, + // then writes to the data table will be blocked (this is client side logic + // and we can't change this in a minor release). So we use the sign of the + // time stamp to differentiate. + PTable index = pair.getFirst(); + Long upperBoundTimestamp = pair.getSecond(); + long disabledTimeStampVal = index.getIndexDisableTimestamp(); + if (disabledTimeStampVal != 0) { + if ( + signOfDisableTimeStamp != 0 + && signOfDisableTimeStamp != Long.signum(disabledTimeStampVal) + ) { + LOGGER + .warn("Found unexpected mix of signs with " + "INDEX_DISABLE_TIMESTAMP for " + + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild); + } + signOfDisableTimeStamp = Long.signum(disabledTimeStampVal); + disabledTimeStampVal = Math.abs(disabledTimeStampVal); + if (disabledTimeStampVal < earliestDisableTimestamp) { + earliestDisableTimestamp = disabledTimeStampVal; + } + + indexesToPartiallyRebuild.add(index); + maintainers.add(index.getIndexMaintainer(dataPTable, conn)); + } + if (upperBoundTimestamp > latestUpperBoundTimestamp) { + latestUpperBoundTimestamp = upperBoundTimestamp; + } + } + // No indexes are disabled, so skip this table + if (earliestDisableTimestamp == Long.MAX_VALUE) { + LOGGER.debug("No indexes are disabled so continuing"); + continue; + } + long scanBeginTime = + Math.max(0, earliestDisableTimestamp - backwardOverlapDurationMs); + long scanEndTime = + Math.min(latestUpperBoundTimestamp, getTimestampForBatch(scanBeginTime, + batchExecutedPerTableMap.get(dataPTable.getName()))); + LOGGER + .info("Starting to build " + dataPTable + " indexes " + indexesToPartiallyRebuild + + " from timestamp=" + scanBeginTime + " until " + scanEndTime); + + TableRef tableRef = + new TableRef(null, dataPTable, HConstants.LATEST_TIMESTAMP, false); + // TODO Need to set high timeout + PostDDLCompiler compiler = new PostDDLCompiler(conn); + MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), null, null, + null, scanEndTime); + Scan dataTableScan = + IndexManagementUtil.newLocalStateScan(plan.getContext().getScan(), maintainers); + + dataTableScan.setTimeRange(scanBeginTime, scanEndTime); + dataTableScan.setCacheBlocks(false); + dataTableScan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, + TRUE_BYTES); + + ImmutableBytesWritable indexMetaDataPtr = + new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); + IndexMaintainer.serializeAdditional(dataPTable, indexMetaDataPtr, + indexesToPartiallyRebuild, conn); + byte[] attribValue = ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr); + // TODO : use array of index names as Scan attribute for only + // specific index maintainer lookup at the server side. + // ScanUtil.setWALAnnotationAttributes(dataPTable, dataTableScan); + dataTableScan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue); + ScanUtil.setClientVersion(dataTableScan, MetaDataProtocol.PHOENIX_VERSION); + LOGGER.info("Starting to partially build indexes:" + indexesToPartiallyRebuild + + " on data table:" + dataPTable.getName() + " with the earliest disable timestamp:" + + earliestDisableTimestamp + " till " + + (scanEndTime == HConstants.LATEST_TIMESTAMP ? "LATEST_TIMESTAMP" : scanEndTime)); + MutationState mutationState = plan.execute(); + long rowCount = mutationState.getUpdateCount(); + decrementIndexesPendingDisableCount(conn, dataPTable, indexesToPartiallyRebuild); + if (scanEndTime == latestUpperBoundTimestamp) { + LOGGER.info("Rebuild completed for all inactive/disabled indexes in data table:" + + dataPTable.getName()); + } + LOGGER.info(" no. of datatable rows read in rebuilding process is " + rowCount); + for (PTable indexPTable : indexesToPartiallyRebuild) { + String indexTableFullName = SchemaUtil.getTableName( + indexPTable.getSchemaName().getString(), indexPTable.getTableName().getString()); + try { + if (scanEndTime == latestUpperBoundTimestamp) { + IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L, + latestUpperBoundTimestamp); + batchExecutedPerTableMap.remove(dataPTable.getName()); + LOGGER.info( + "Making Index:" + indexPTable.getTableName() + " active after rebuilding"); + } else { + // Increment timestamp so that client sees updated disable timestamp + IndexUtil.updateIndexState(conn, indexTableFullName, + indexPTable.getIndexState(), scanEndTime * signOfDisableTimeStamp, + latestUpperBoundTimestamp); + Long noOfBatches = batchExecutedPerTableMap.get(dataPTable.getName()); + if (noOfBatches == null) { + noOfBatches = 0l; } - LOGGER.debug("We have found " + indexPTable.getIndexState() + " Index:" + - indexPTable.getName() + " on data table:" + dataPTable.getName() + - " which failed to be updated at " - + indexPTable.getIndexDisableTimestamp()); - indexesToPartiallyRebuild.add(new Pair(indexPTable,upperBoundOfRebuild)); - } while (hasMore); - - if (dataTableToIndexesMap != null) { - long backwardOverlapDurationMs = env.getConfiguration().getLong( - QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_BACKWARD_TIME_ATTRIB, - env.getConfiguration().getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_BACKWARD_TIME)); - for (Map.Entry>> entry : dataTableToIndexesMap.entrySet()) { - PTable dataPTable = entry.getKey(); - List> pairs = entry.getValue(); - List indexesToPartiallyRebuild = Lists.newArrayListWithExpectedSize(pairs.size()); - try ( - Table metaTable = env.getConnection().getTable( - SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props))) { - long earliestDisableTimestamp = Long.MAX_VALUE; - long latestUpperBoundTimestamp = Long.MIN_VALUE; - List maintainers = Lists - .newArrayListWithExpectedSize(pairs.size()); - int signOfDisableTimeStamp = 0; - for (Pair pair : pairs) { - // We need a way of differentiating the block writes to data table case from - // the leave index active case. In either case, we need to know the time stamp - // at which writes started failing so we can rebuild from that point. If we - // keep the index active *and* have a positive INDEX_DISABLE_TIMESTAMP_BYTES, - // then writes to the data table will be blocked (this is client side logic - // and we can't change this in a minor release). So we use the sign of the - // time stamp to differentiate. - PTable index = pair.getFirst(); - Long upperBoundTimestamp = pair.getSecond(); - long disabledTimeStampVal = index.getIndexDisableTimestamp(); - if (disabledTimeStampVal != 0) { - if (signOfDisableTimeStamp != 0 && signOfDisableTimeStamp != Long.signum(disabledTimeStampVal)) { - LOGGER.warn("Found unexpected mix of signs with " + - "INDEX_DISABLE_TIMESTAMP for " + - dataPTable.getName().getString() + " with " + - indexesToPartiallyRebuild); - } - signOfDisableTimeStamp = Long.signum(disabledTimeStampVal); - disabledTimeStampVal = Math.abs(disabledTimeStampVal); - if (disabledTimeStampVal < earliestDisableTimestamp) { - earliestDisableTimestamp = disabledTimeStampVal; - } - - indexesToPartiallyRebuild.add(index); - maintainers.add(index.getIndexMaintainer(dataPTable, conn)); - } - if (upperBoundTimestamp > latestUpperBoundTimestamp) { - latestUpperBoundTimestamp = upperBoundTimestamp; - } - } - // No indexes are disabled, so skip this table - if (earliestDisableTimestamp == Long.MAX_VALUE) { - LOGGER.debug("No indexes are disabled so continuing"); - continue; - } - long scanBeginTime = Math.max(0, earliestDisableTimestamp - backwardOverlapDurationMs); - long scanEndTime = Math.min(latestUpperBoundTimestamp, - getTimestampForBatch(scanBeginTime,batchExecutedPerTableMap.get(dataPTable.getName()))); - LOGGER.info("Starting to build " + dataPTable + " indexes " - + indexesToPartiallyRebuild + " from timestamp=" + - scanBeginTime + " until " + scanEndTime); - - TableRef tableRef = new TableRef(null, dataPTable, HConstants.LATEST_TIMESTAMP, false); - // TODO Need to set high timeout - PostDDLCompiler compiler = new PostDDLCompiler(conn); - MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), null, null, null, scanEndTime); - Scan dataTableScan = IndexManagementUtil.newLocalStateScan(plan.getContext().getScan(), maintainers); - - dataTableScan.setTimeRange(scanBeginTime, scanEndTime); - dataTableScan.setCacheBlocks(false); - dataTableScan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, TRUE_BYTES); - - ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable( - ByteUtil.EMPTY_BYTE_ARRAY); - IndexMaintainer.serializeAdditional(dataPTable, indexMetaDataPtr, indexesToPartiallyRebuild, - conn); - byte[] attribValue = ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr); - // TODO : use array of index names as Scan attribute for only - // specific index maintainer lookup at the server side. - // ScanUtil.setWALAnnotationAttributes(dataPTable, dataTableScan); - dataTableScan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue); - ScanUtil.setClientVersion(dataTableScan, MetaDataProtocol.PHOENIX_VERSION); - LOGGER.info("Starting to partially build indexes:" + indexesToPartiallyRebuild - + " on data table:" + dataPTable.getName() + " with the earliest disable timestamp:" - + earliestDisableTimestamp + " till " - + (scanEndTime == HConstants.LATEST_TIMESTAMP ? "LATEST_TIMESTAMP" : scanEndTime)); - MutationState mutationState = plan.execute(); - long rowCount = mutationState.getUpdateCount(); - decrementIndexesPendingDisableCount(conn, dataPTable, indexesToPartiallyRebuild); - if (scanEndTime == latestUpperBoundTimestamp) { - LOGGER.info("Rebuild completed for all inactive/disabled indexes in data table:" - + dataPTable.getName()); - } - LOGGER.info(" no. of datatable rows read in rebuilding process is " + rowCount); - for (PTable indexPTable : indexesToPartiallyRebuild) { - String indexTableFullName = SchemaUtil.getTableName( - indexPTable.getSchemaName().getString(), - indexPTable.getTableName().getString()); - try { - if (scanEndTime == latestUpperBoundTimestamp) { - IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L, - latestUpperBoundTimestamp); - batchExecutedPerTableMap.remove(dataPTable.getName()); - LOGGER.info("Making Index:" + indexPTable.getTableName() + " active after rebuilding"); - } else { - // Increment timestamp so that client sees updated disable timestamp - IndexUtil.updateIndexState(conn, indexTableFullName, indexPTable.getIndexState(), - scanEndTime * signOfDisableTimeStamp, latestUpperBoundTimestamp); - Long noOfBatches = batchExecutedPerTableMap.get(dataPTable.getName()); - if (noOfBatches == null) { - noOfBatches = 0l; - } - batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches); - LOGGER.info( - "During Round-robin build: Successfully updated index disabled timestamp for " - + indexTableFullName + " to " + scanEndTime); - } - } catch (SQLException e) { - LOGGER.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, e); - } - } - } catch (Exception e) { - LOGGER.error("Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e); - } - } - } - } catch (Throwable t) { - LOGGER.warn("ScheduledBuildIndexTask failed!", t); - } finally { - if (scanner != null) { - try { - scanner.close(); - } catch (IOException ignored) { - LOGGER.debug("ScheduledBuildIndexTask can't close scanner.", ignored); - } - } - if (conn != null) { - try { - conn.close(); - } catch (SQLException ignored) { - LOGGER.debug("ScheduledBuildIndexTask can't close connection", ignored); - } - } - } + batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches); + LOGGER.info( + "During Round-robin build: Successfully updated index disabled timestamp for " + + indexTableFullName + " to " + scanEndTime); + } + } catch (SQLException e) { + LOGGER.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, + e); + } + } + } catch (Exception e) { + LOGGER.error( + "Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e); + } + } } - - private long getTimestampForBatch(long disabledTimeStamp, Long noOfBatches) { - if (disabledTimeStamp < 0 || rebuildIndexBatchSize > (HConstants.LATEST_TIMESTAMP - - disabledTimeStamp)) { return HConstants.LATEST_TIMESTAMP; } - long timestampForNextBatch = disabledTimeStamp + rebuildIndexBatchSize; - if (timestampForNextBatch < 0 || timestampForNextBatch > EnvironmentEdgeManager.currentTimeMillis() - || (noOfBatches != null && noOfBatches > configuredBatches)) { - // if timestampForNextBatch cross current time , then we should - // build the complete index - timestampForNextBatch = HConstants.LATEST_TIMESTAMP; - } - return timestampForNextBatch; + } catch (Throwable t) { + LOGGER.warn("ScheduledBuildIndexTask failed!", t); + } finally { + if (scanner != null) { + try { + scanner.close(); + } catch (IOException ignored) { + LOGGER.debug("ScheduledBuildIndexTask can't close scanner.", ignored); + } } - } - - @VisibleForTesting - public static synchronized void initRebuildIndexConnectionProps(Configuration config) { - if (rebuildIndexConnectionProps == null) { - Properties props = new Properties(); - long indexRebuildQueryTimeoutMs = - config.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); - long indexRebuildRPCTimeoutMs = - config.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); - long indexRebuildClientScannerTimeOutMs = - config.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); - int indexRebuildRpcRetriesCounter = - config.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); - // Set various phoenix and hbase level timeouts and rpc retries - props.setProperty(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - Long.toString(indexRebuildQueryTimeoutMs)); - props.setProperty(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - Long.toString(indexRebuildClientScannerTimeOutMs)); - props.setProperty(HConstants.HBASE_RPC_TIMEOUT_KEY, - Long.toString(indexRebuildRPCTimeoutMs)); - props.setProperty(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - Long.toString(indexRebuildRpcRetriesCounter)); - // don't run a second index populations upsert select - props.setProperty(QueryServices.INDEX_POPULATION_SLEEP_TIME, "0"); - rebuildIndexConnectionProps = PropertiesUtil.combineProperties(props, config); + if (conn != null) { + try { + conn.close(); + } catch (SQLException ignored) { + LOGGER.debug("ScheduledBuildIndexTask can't close connection", ignored); + } } + } } - public static PhoenixConnection getRebuildIndexConnection(Configuration config) - throws SQLException { - initRebuildIndexConnectionProps(config); - //return QueryUtil.getConnectionOnServer(rebuildIndexConnectionProps, config).unwrap(PhoenixConnection.class); - return QueryUtil.getConnectionOnServerWithCustomUrl(rebuildIndexConnectionProps, - REBUILD_INDEX_APPEND_TO_URL_STRING).unwrap(PhoenixConnection.class); + private long getTimestampForBatch(long disabledTimeStamp, Long noOfBatches) { + if ( + disabledTimeStamp < 0 + || rebuildIndexBatchSize > (HConstants.LATEST_TIMESTAMP - disabledTimeStamp) + ) { + return HConstants.LATEST_TIMESTAMP; + } + long timestampForNextBatch = disabledTimeStamp + rebuildIndexBatchSize; + if ( + timestampForNextBatch < 0 + || timestampForNextBatch > EnvironmentEdgeManager.currentTimeMillis() + || (noOfBatches != null && noOfBatches > configuredBatches) + ) { + // if timestampForNextBatch cross current time , then we should + // build the complete index + timestampForNextBatch = HConstants.LATEST_TIMESTAMP; + } + return timestampForNextBatch; } - - public static boolean tableRegionsOnline(Configuration conf, PTable table) { - try (ClusterConnection hcon = - (ClusterConnection) ConnectionFactory.createConnection(conf)) { - List locations = hcon.locateRegions( - org.apache.hadoop.hbase.TableName.valueOf(table.getPhysicalName().getBytes())); - - for (HRegionLocation loc : locations) { - try { - ServerName sn = loc.getServerName(); - if (sn == null) continue; - - AdminProtos.AdminService.BlockingInterface admin = hcon.getAdmin(sn); - HBaseRpcController controller = hcon.getRpcControllerFactory().newController(); - org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRegionInfo(controller, - admin, loc.getRegion().getRegionName()); - } catch (RemoteException e) { - LOGGER.debug("Cannot get region " + loc.getRegion().getEncodedName() + " info due to error:" + e); - return false; - } - } - } catch (IOException ex) { - LOGGER.warn("tableRegionsOnline failed due to:", ex); - return false; + } + + @VisibleForTesting + public static synchronized void initRebuildIndexConnectionProps(Configuration config) { + if (rebuildIndexConnectionProps == null) { + Properties props = new Properties(); + long indexRebuildQueryTimeoutMs = + config.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); + long indexRebuildRPCTimeoutMs = config.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); + long indexRebuildClientScannerTimeOutMs = + config.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); + int indexRebuildRpcRetriesCounter = + config.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); + // Set various phoenix and hbase level timeouts and rpc retries + props.setProperty(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, + Long.toString(indexRebuildQueryTimeoutMs)); + props.setProperty(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + Long.toString(indexRebuildClientScannerTimeOutMs)); + props.setProperty(HConstants.HBASE_RPC_TIMEOUT_KEY, Long.toString(indexRebuildRPCTimeoutMs)); + props.setProperty(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + Long.toString(indexRebuildRpcRetriesCounter)); + // don't run a second index populations upsert select + props.setProperty(QueryServices.INDEX_POPULATION_SLEEP_TIME, "0"); + rebuildIndexConnectionProps = PropertiesUtil.combineProperties(props, config); + } + } + + public static PhoenixConnection getRebuildIndexConnection(Configuration config) + throws SQLException { + initRebuildIndexConnectionProps(config); + // return QueryUtil.getConnectionOnServer(rebuildIndexConnectionProps, + // config).unwrap(PhoenixConnection.class); + return QueryUtil.getConnectionOnServerWithCustomUrl(rebuildIndexConnectionProps, + REBUILD_INDEX_APPEND_TO_URL_STRING).unwrap(PhoenixConnection.class); + } + + public static boolean tableRegionsOnline(Configuration conf, PTable table) { + try (ClusterConnection hcon = (ClusterConnection) ConnectionFactory.createConnection(conf)) { + List locations = hcon.locateRegions( + org.apache.hadoop.hbase.TableName.valueOf(table.getPhysicalName().getBytes())); + + for (HRegionLocation loc : locations) { + try { + ServerName sn = loc.getServerName(); + if (sn == null) continue; + + AdminProtos.AdminService.BlockingInterface admin = hcon.getAdmin(sn); + HBaseRpcController controller = hcon.getRpcControllerFactory().newController(); + org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRegionInfo(controller, admin, + loc.getRegion().getRegionName()); + } catch (RemoteException e) { + LOGGER.debug( + "Cannot get region " + loc.getRegion().getEncodedName() + " info due to error:" + e); + return false; } - return true; + } + } catch (IOException ex) { + LOGGER.warn("tableRegionsOnline failed due to:", ex); + return false; } -} \ No newline at end of file + return true; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/OmidGCProcessor.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/OmidGCProcessor.java index 379e90a1aec..333a6a387bb 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/OmidGCProcessor.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/OmidGCProcessor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,38 +17,37 @@ */ package org.apache.phoenix.coprocessor; +import java.io.IOException; +import java.util.Optional; + import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.omid.transaction.OmidCompactor; -import java.io.IOException; -import java.util.Optional; - - public class OmidGCProcessor extends DelegateRegionObserver implements RegionCoprocessor { - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } - public OmidGCProcessor() { - super(new OmidCompactor(true)); - } + public OmidGCProcessor() { + super(new OmidCompactor(true)); + } - @Override - public void start(CoprocessorEnvironment env) throws IOException { - if (delegate instanceof RegionCoprocessor) { - ((RegionCoprocessor)delegate).start(env); - } + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (delegate instanceof RegionCoprocessor) { + ((RegionCoprocessor) delegate).start(env); } + } - @Override - public void stop(CoprocessorEnvironment env) throws IOException { - if (delegate instanceof RegionCoprocessor) { - ((RegionCoprocessor)delegate).stop(env); - } + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + if (delegate instanceof RegionCoprocessor) { + ((RegionCoprocessor) delegate).stop(env); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/OmidTransactionalProcessor.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/OmidTransactionalProcessor.java index c6c250bf9d8..1da216e4677 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/OmidTransactionalProcessor.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/OmidTransactionalProcessor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,39 +17,39 @@ */ package org.apache.phoenix.coprocessor; +import java.io.IOException; +import java.util.Optional; + import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.omid.transaction.OmidSnapshotFilter; import org.apache.phoenix.transaction.OmidTransactionProvider; -import java.io.IOException; -import java.util.Optional; +public class OmidTransactionalProcessor extends DelegateRegionObserver + implements RegionCoprocessor { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } -public class OmidTransactionalProcessor extends DelegateRegionObserver implements RegionCoprocessor { - - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - public OmidTransactionalProcessor() { - super(new OmidSnapshotFilter(OmidTransactionProvider.getInstance().getCommitTableClient())); - } + public OmidTransactionalProcessor() { + super(new OmidSnapshotFilter(OmidTransactionProvider.getInstance().getCommitTableClient())); + } - @Override - public void start(CoprocessorEnvironment env) throws IOException { - if (delegate instanceof RegionCoprocessor) { - ((RegionCoprocessor)delegate).start(env); - } + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (delegate instanceof RegionCoprocessor) { + ((RegionCoprocessor) delegate).start(env); } + } - @Override - public void stop(CoprocessorEnvironment env) throws IOException { - if (delegate instanceof RegionCoprocessor) { - ((RegionCoprocessor)delegate).stop(env); - } + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + if (delegate instanceof RegionCoprocessor) { + ((RegionCoprocessor) delegate).stop(env); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PagingRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PagingRegionScanner.java index 66aa9598a8a..7853c049ed8 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PagingRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PagingRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,272 +39,267 @@ import org.slf4j.LoggerFactory; /** - * PagingRegionScanner works with PagingFilter to make sure that the time between two rows - * returned by the HBase region scanner should not exceed the configured page size in ms - * (on PagingFilter). When the page size is reached (because there are too many cells/rows - * to be filtered out), PagingFilter stops the HBase region scanner and sets its state - * to STOPPED. In this case, the HBase region scanner next() returns false and - * PagingFilter#isStopped() returns true. PagingRegionScanner is responsible for detecting - * PagingFilter has stopped the scanner, and returning a dummy result to signal to - * Phoenix client to resume the scan operation by skipping this dummy result and calling - * ResultScanner#next(). - * - * PagingRegionScanner also converts a multi-key point lookup scan into N single point lookup - * scans to allow individual scan to leverage HBase bloom filter. This conversion is done within - * the MultiKeyPointLookup inner class. + * PagingRegionScanner works with PagingFilter to make sure that the time between two rows returned + * by the HBase region scanner should not exceed the configured page size in ms (on PagingFilter). + * When the page size is reached (because there are too many cells/rows to be filtered out), + * PagingFilter stops the HBase region scanner and sets its state to STOPPED. In this case, the + * HBase region scanner next() returns false and PagingFilter#isStopped() returns true. + * PagingRegionScanner is responsible for detecting PagingFilter has stopped the scanner, and + * returning a dummy result to signal to Phoenix client to resume the scan operation by skipping + * this dummy result and calling ResultScanner#next(). PagingRegionScanner also converts a multi-key + * point lookup scan into N single point lookup scans to allow individual scan to leverage HBase + * bloom filter. This conversion is done within the MultiKeyPointLookup inner class. */ public class PagingRegionScanner extends BaseRegionScanner { - private static final Logger LOGGER = LoggerFactory.getLogger(PagingRegionScanner.class); - private Region region; - private Scan scan; - private PagingFilter pagingFilter; - private MultiKeyPointLookup multiKeyPointLookup = null; - private boolean initialized = false; - - private class MultiKeyPointLookup { - private SkipScanFilter skipScanFilter; - private List pointLookupRanges = null; - private int lookupPosition = 0; - private byte[] lookupKeyPrefix = null; - private long pageSizeMs; - - private MultiKeyPointLookup(SkipScanFilter skipScanFilter) throws IOException { - this.skipScanFilter = skipScanFilter; - pageSizeMs = ScanUtil.getPageSizeMsForRegionScanner(scan); - pointLookupRanges = skipScanFilter.getPointLookupKeyRanges(); - lookupPosition = findLookupPosition(scan.getStartRow()); - if (skipScanFilter.getOffset() > 0) { - lookupKeyPrefix = new byte[skipScanFilter.getOffset()]; - System.arraycopy(scan.getStartRow(), 0, lookupKeyPrefix, 0, - skipScanFilter.getOffset()); - } - // A point lookup scan does not need to have a paging filter - if (pagingFilter != null) { - scan.setFilter(pagingFilter.getDelegateFilter()); - } - } - - private int findLookupPosition(byte[] startRowKey) { - for (int i = 0; i < pointLookupRanges.size(); i++) { - byte[] rowKey = pointLookupRanges.get(i).getLowerRange(); - if (Bytes.compareTo(startRowKey, skipScanFilter.getOffset(), - startRowKey.length - skipScanFilter.getOffset(), rowKey, 0, - rowKey.length) <= 0) { - return i; - } - } - return pointLookupRanges.size(); - } + private static final Logger LOGGER = LoggerFactory.getLogger(PagingRegionScanner.class); + private Region region; + private Scan scan; + private PagingFilter pagingFilter; + private MultiKeyPointLookup multiKeyPointLookup = null; + private boolean initialized = false; - private boolean verifyStartRowKey(byte[] startRowKey) { - // The startRowKey may not be one of the point lookup keys. This happens when - // the region moves and the HBase client adjusts the scan start row key. - lookupPosition = findLookupPosition(startRowKey); - if (lookupPosition == pointLookupRanges.size()) { - return false; - } - byte[] rowKey = pointLookupRanges.get(lookupPosition++).getLowerRange(); - scan.withStopRow(rowKey, true); - scan.withStopRow(rowKey, true); - return true; - } - - private RegionScanner getNewScanner() throws IOException { - if (lookupPosition >= pointLookupRanges.size()) { - return null; - } - byte[] rowKey = pointLookupRanges.get(lookupPosition++).getLowerRange(); - byte[] adjustedRowKey = rowKey; - if (lookupKeyPrefix != null) { - int len = rowKey.length + lookupKeyPrefix.length; - adjustedRowKey = new byte[len]; - System.arraycopy(lookupKeyPrefix, 0, adjustedRowKey, 0, - lookupKeyPrefix.length); - System.arraycopy(rowKey, 0, adjustedRowKey, lookupKeyPrefix.length, - rowKey.length); - } - scan.withStartRow(adjustedRowKey, true); - scan.withStopRow(adjustedRowKey, true); - return region.getScanner(scan); - } + private class MultiKeyPointLookup { + private SkipScanFilter skipScanFilter; + private List pointLookupRanges = null; + private int lookupPosition = 0; + private byte[] lookupKeyPrefix = null; + private long pageSizeMs; - private boolean hasMore() { - return lookupPosition < pointLookupRanges.size(); - } - private boolean next(List results, boolean raw, RegionScanner scanner, - ScannerContext scannerContext) throws IOException { - try { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - while (true) { - boolean hasMore; - if (scannerContext != null) { - hasMore = raw - ? scanner.nextRaw(results, scannerContext) - : scanner.next(results, scannerContext); - } else { - hasMore = raw ? scanner.nextRaw(results) : scanner.next(results); - } - if (hasMore) { - // Since each scan is supposed to return only one row (even when the - // start and stop row key are not the same, which happens after region - // moves or when there are delete markers in the table), this should not - // happen - LOGGER.warn("Each scan is supposed to return only one row, scan " + scan - + ", region " + region); - } - if (!results.isEmpty()) { - return hasMore(); - } - // The scanner returned an empty result. This means that one of the rows - // has been deleted. - if (!hasMore()) { - return false; - } - - if (EnvironmentEdgeManager.currentTimeMillis() - startTime > pageSizeMs) { - byte[] rowKey = pointLookupRanges.get(lookupPosition - 1).getLowerRange(); - ScanUtil.getDummyResult(rowKey, results); - return true; - } + private MultiKeyPointLookup(SkipScanFilter skipScanFilter) throws IOException { + this.skipScanFilter = skipScanFilter; + pageSizeMs = ScanUtil.getPageSizeMsForRegionScanner(scan); + pointLookupRanges = skipScanFilter.getPointLookupKeyRanges(); + lookupPosition = findLookupPosition(scan.getStartRow()); + if (skipScanFilter.getOffset() > 0) { + lookupKeyPrefix = new byte[skipScanFilter.getOffset()]; + System.arraycopy(scan.getStartRow(), 0, lookupKeyPrefix, 0, skipScanFilter.getOffset()); + } + // A point lookup scan does not need to have a paging filter + if (pagingFilter != null) { + scan.setFilter(pagingFilter.getDelegateFilter()); + } + } - RegionScanner regionScanner = getNewScanner(); - if (regionScanner == null) { - return false; - } - scanner.close(); - scanner = regionScanner; - } - } catch (Exception e) { - lookupPosition--; - throw e; - } finally { - scanner.close(); - } + private int findLookupPosition(byte[] startRowKey) { + for (int i = 0; i < pointLookupRanges.size(); i++) { + byte[] rowKey = pointLookupRanges.get(i).getLowerRange(); + if ( + Bytes.compareTo(startRowKey, skipScanFilter.getOffset(), + startRowKey.length - skipScanFilter.getOffset(), rowKey, 0, rowKey.length) <= 0 + ) { + return i; } + } + return pointLookupRanges.size(); } - public PagingRegionScanner(Region region, RegionScanner scanner, Scan scan) { - super(scanner); - this.region = region; - this.scan = scan; - pagingFilter = ScanUtil.getPhoenixPagingFilter(scan); + private boolean verifyStartRowKey(byte[] startRowKey) { + // The startRowKey may not be one of the point lookup keys. This happens when + // the region moves and the HBase client adjusts the scan start row key. + lookupPosition = findLookupPosition(startRowKey); + if (lookupPosition == pointLookupRanges.size()) { + return false; + } + byte[] rowKey = pointLookupRanges.get(lookupPosition++).getLowerRange(); + scan.withStopRow(rowKey, true); + scan.withStopRow(rowKey, true); + return true; } - void init() throws IOException { - if (initialized) { - return; - } - TableDescriptor tableDescriptor = region.getTableDescriptor(); - BloomType bloomFilterType = tableDescriptor.getColumnFamilies()[0].getBloomFilterType(); - if (bloomFilterType == BloomType.ROW) { - // Check if the scan is a multi-point-lookup scan if so remove it from the scan - SkipScanFilter skipScanFilter = ScanUtil.removeSkipScanFilter(scan); - if (skipScanFilter != null) { - multiKeyPointLookup = new MultiKeyPointLookup(skipScanFilter); - } - } - initialized = true; + private RegionScanner getNewScanner() throws IOException { + if (lookupPosition >= pointLookupRanges.size()) { + return null; + } + byte[] rowKey = pointLookupRanges.get(lookupPosition++).getLowerRange(); + byte[] adjustedRowKey = rowKey; + if (lookupKeyPrefix != null) { + int len = rowKey.length + lookupKeyPrefix.length; + adjustedRowKey = new byte[len]; + System.arraycopy(lookupKeyPrefix, 0, adjustedRowKey, 0, lookupKeyPrefix.length); + System.arraycopy(rowKey, 0, adjustedRowKey, lookupKeyPrefix.length, rowKey.length); + } + scan.withStartRow(adjustedRowKey, true); + scan.withStopRow(adjustedRowKey, true); + return region.getScanner(scan); } - private boolean next(List results, boolean raw, ScannerContext scannerContext) - throws IOException { - init(); - if (pagingFilter != null) { - pagingFilter.init(); - } - byte[] adjustedStartRowKey = - scan.getAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY); - byte[] adjustedStartRowKeyIncludeBytes = - scan.getAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE); - // If scanners at higher level needs to re-scan the data that were already scanned - // earlier, they can provide adjusted new start row key for the scan and whether to - // include it. - // If they are set as the scan attributes, close the scanner, reopen it with - // updated start row key and whether to include it. Update mvcc read point from the - // previous scanner and set it back to the new scanner to maintain the read - // consistency for the given region. - // Once done, continue the scan operation and reset the attributes. - if (adjustedStartRowKey != null && adjustedStartRowKeyIncludeBytes != null) { - long mvccReadPoint = delegate.getMvccReadPoint(); - delegate.close(); - scan.withStartRow(adjustedStartRowKey, - Bytes.toBoolean(adjustedStartRowKeyIncludeBytes)); - PackagePrivateFieldAccessor.setMvccReadPoint(scan, mvccReadPoint); - if (multiKeyPointLookup != null - && !multiKeyPointLookup.verifyStartRowKey(adjustedStartRowKey)) { - return false; - } - delegate = region.getScanner(scan); - scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, null); - scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, null); - - } else { - if (multiKeyPointLookup != null) { - RegionScanner regionScanner = multiKeyPointLookup.getNewScanner(); - if (regionScanner == null) { - return false; - } - delegate.close(); - delegate = regionScanner; - } - } + private boolean hasMore() { + return lookupPosition < pointLookupRanges.size(); + } - if (multiKeyPointLookup != null) { - return multiKeyPointLookup.next(results, raw, delegate, scannerContext); - } - boolean hasMore; - if (scannerContext != null) { + private boolean next(List results, boolean raw, RegionScanner scanner, + ScannerContext scannerContext) throws IOException { + try { + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + while (true) { + boolean hasMore; + if (scannerContext != null) { hasMore = raw - ? delegate.nextRaw(results, scannerContext) - : delegate.next(results, scannerContext); - } else { - hasMore = raw ? delegate.nextRaw(results) : delegate.next(results); - } - if (pagingFilter == null) { - return hasMore; - } - if (!hasMore) { - // There is no more row from the HBase region scanner. We need to check if - // PagingFilter has stopped the region scanner - if (pagingFilter.isStopped()) { - if (results.isEmpty()) { - byte[] rowKey = pagingFilter.getCurrentRowKeyToBeExcluded(); - LOGGER.info("Page filter stopped, generating dummy key {} ", - Bytes.toStringBinary(rowKey)); - ScanUtil.getDummyResult(rowKey, results); - } - return true; - } + ? scanner.nextRaw(results, scannerContext) + : scanner.next(results, scannerContext); + } else { + hasMore = raw ? scanner.nextRaw(results) : scanner.next(results); + } + if (hasMore) { + // Since each scan is supposed to return only one row (even when the + // start and stop row key are not the same, which happens after region + // moves or when there are delete markers in the table), this should not + // happen + LOGGER.warn( + "Each scan is supposed to return only one row, scan " + scan + ", region " + region); + } + if (!results.isEmpty()) { + return hasMore(); + } + // The scanner returned an empty result. This means that one of the rows + // has been deleted. + if (!hasMore()) { return false; - } else { - // We got a row from the HBase scanner within the configured time (i.e., - // the page size). We need to start a new page on the next next() call. + } + + if (EnvironmentEdgeManager.currentTimeMillis() - startTime > pageSizeMs) { + byte[] rowKey = pointLookupRanges.get(lookupPosition - 1).getLowerRange(); + ScanUtil.getDummyResult(rowKey, results); return true; + } + + RegionScanner regionScanner = getNewScanner(); + if (regionScanner == null) { + return false; + } + scanner.close(); + scanner = regionScanner; } + } catch (Exception e) { + lookupPosition--; + throw e; + } finally { + scanner.close(); + } } + } - @Override - public boolean next(List results) throws IOException { - return next(results, false, null); - } + public PagingRegionScanner(Region region, RegionScanner scanner, Scan scan) { + super(scanner); + this.region = region; + this.scan = scan; + pagingFilter = ScanUtil.getPhoenixPagingFilter(scan); + } - @Override - public boolean nextRaw(List results) throws IOException { - return next(results, true, null); + void init() throws IOException { + if (initialized) { + return; } + TableDescriptor tableDescriptor = region.getTableDescriptor(); + BloomType bloomFilterType = tableDescriptor.getColumnFamilies()[0].getBloomFilterType(); + if (bloomFilterType == BloomType.ROW) { + // Check if the scan is a multi-point-lookup scan if so remove it from the scan + SkipScanFilter skipScanFilter = ScanUtil.removeSkipScanFilter(scan); + if (skipScanFilter != null) { + multiKeyPointLookup = new MultiKeyPointLookup(skipScanFilter); + } + } + initialized = true; + } - @Override - public boolean next(List results, ScannerContext scannerContext) throws IOException { - return next(results, false, scannerContext); + private boolean next(List results, boolean raw, ScannerContext scannerContext) + throws IOException { + init(); + if (pagingFilter != null) { + pagingFilter.init(); } + byte[] adjustedStartRowKey = + scan.getAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY); + byte[] adjustedStartRowKeyIncludeBytes = + scan.getAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE); + // If scanners at higher level needs to re-scan the data that were already scanned + // earlier, they can provide adjusted new start row key for the scan and whether to + // include it. + // If they are set as the scan attributes, close the scanner, reopen it with + // updated start row key and whether to include it. Update mvcc read point from the + // previous scanner and set it back to the new scanner to maintain the read + // consistency for the given region. + // Once done, continue the scan operation and reset the attributes. + if (adjustedStartRowKey != null && adjustedStartRowKeyIncludeBytes != null) { + long mvccReadPoint = delegate.getMvccReadPoint(); + delegate.close(); + scan.withStartRow(adjustedStartRowKey, Bytes.toBoolean(adjustedStartRowKeyIncludeBytes)); + PackagePrivateFieldAccessor.setMvccReadPoint(scan, mvccReadPoint); + if ( + multiKeyPointLookup != null && !multiKeyPointLookup.verifyStartRowKey(adjustedStartRowKey) + ) { + return false; + } + delegate = region.getScanner(scan); + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY, null); + scan.setAttribute(QueryServices.PHOENIX_PAGING_NEW_SCAN_START_ROWKEY_INCLUDE, null); - @Override - public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { - return next(results, true, scannerContext); + } else { + if (multiKeyPointLookup != null) { + RegionScanner regionScanner = multiKeyPointLookup.getNewScanner(); + if (regionScanner == null) { + return false; + } + delegate.close(); + delegate = regionScanner; + } } - @Override - public RegionScanner getNewRegionScanner(Scan scan) throws IOException { - return new PagingRegionScanner(region, region.getScanner(scan), scan); + if (multiKeyPointLookup != null) { + return multiKeyPointLookup.next(results, raw, delegate, scannerContext); } + boolean hasMore; + if (scannerContext != null) { + hasMore = + raw ? delegate.nextRaw(results, scannerContext) : delegate.next(results, scannerContext); + } else { + hasMore = raw ? delegate.nextRaw(results) : delegate.next(results); + } + if (pagingFilter == null) { + return hasMore; + } + if (!hasMore) { + // There is no more row from the HBase region scanner. We need to check if + // PagingFilter has stopped the region scanner + if (pagingFilter.isStopped()) { + if (results.isEmpty()) { + byte[] rowKey = pagingFilter.getCurrentRowKeyToBeExcluded(); + LOGGER.info("Page filter stopped, generating dummy key {} ", + Bytes.toStringBinary(rowKey)); + ScanUtil.getDummyResult(rowKey, results); + } + return true; + } + return false; + } else { + // We got a row from the HBase scanner within the configured time (i.e., + // the page size). We need to start a new page on the next next() call. + return true; + } + } + + @Override + public boolean next(List results) throws IOException { + return next(results, false, null); + } + + @Override + public boolean nextRaw(List results) throws IOException { + return next(results, true, null); + } + + @Override + public boolean next(List results, ScannerContext scannerContext) throws IOException { + return next(results, false, scannerContext); + } + + @Override + public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { + return next(results, true, scannerContext); + } + + @Override + public RegionScanner getNewRegionScanner(Scan scan) throws IOException { + return new PagingRegionScanner(region, region.getScanner(scan), scan); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java index b3b648a6777..cd605e51f85 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -79,662 +79,714 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver { - private PhoenixMetaDataControllerEnvironment env; - AtomicReference> accessControllers = new AtomicReference<>(); - private boolean hbaseAccessControllerEnabled; - private boolean accessCheckEnabled; - private boolean execPermissionsCheckEnabled; - private UserProvider userProvider; - private AccessChecker accessChecker; - private Connection serverConnection; - public static final Logger LOGGER = LoggerFactory.getLogger(PhoenixAccessController.class); - private static final Logger AUDITLOG = - LoggerFactory.getLogger("SecurityLogger."+PhoenixAccessController.class.getName()); - - @Override - public Optional getPhoenixObserver() { - return Optional.of(this); - } - - private List getAccessControllers() throws IOException { - ArrayList oldAccessControllers = accessControllers.get(); - if (oldAccessControllers == null) { - oldAccessControllers = new ArrayList<>(); - RegionCoprocessorHost cpHost = this.env.getCoprocessorHost(); - for (RegionCoprocessor cp : cpHost.findCoprocessors(RegionCoprocessor.class)) { - if (cp instanceof AccessControlService.Interface && cp instanceof MasterObserver) { - oldAccessControllers.add((MasterObserver)cp); - if (cp.getClass().getName().equals( - org.apache.hadoop.hbase.security.access.AccessController.class.getName())) { - hbaseAccessControllerEnabled = true; - } - } - } - accessControllers.set(oldAccessControllers); + private PhoenixMetaDataControllerEnvironment env; + AtomicReference> accessControllers = new AtomicReference<>(); + private boolean hbaseAccessControllerEnabled; + private boolean accessCheckEnabled; + private boolean execPermissionsCheckEnabled; + private UserProvider userProvider; + private AccessChecker accessChecker; + private Connection serverConnection; + public static final Logger LOGGER = LoggerFactory.getLogger(PhoenixAccessController.class); + private static final Logger AUDITLOG = + LoggerFactory.getLogger("SecurityLogger." + PhoenixAccessController.class.getName()); + + @Override + public Optional getPhoenixObserver() { + return Optional.of(this); + } + + private List getAccessControllers() throws IOException { + ArrayList oldAccessControllers = accessControllers.get(); + if (oldAccessControllers == null) { + oldAccessControllers = new ArrayList<>(); + RegionCoprocessorHost cpHost = this.env.getCoprocessorHost(); + for (RegionCoprocessor cp : cpHost.findCoprocessors(RegionCoprocessor.class)) { + if (cp instanceof AccessControlService.Interface && cp instanceof MasterObserver) { + oldAccessControllers.add((MasterObserver) cp); + if ( + cp.getClass().getName() + .equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName()) + ) { + hbaseAccessControllerEnabled = true; + } } - return accessControllers.get(); + } + accessControllers.set(oldAccessControllers); } - - public ObserverContext getMasterObsevrverContext() throws IOException { - return new ObserverContextImpl(getActiveUser()); + return accessControllers.get(); + } + + public ObserverContext getMasterObsevrverContext() + throws IOException { + return new ObserverContextImpl(getActiveUser()); + } + + @Override + public void preGetTable(ObserverContext ctx, + String tenantId, String tableName, TableName physicalTableName) throws IOException { + if (!accessCheckEnabled) { + return; } - - @Override - public void preGetTable(ObserverContext ctx, String tenantId, - String tableName, TableName physicalTableName) throws IOException { - if (!accessCheckEnabled) { return; } - if (this.execPermissionsCheckEnabled) { - requireAccess("GetTable" + tenantId, physicalTableName, Action.READ, Action.EXEC); - } else { - requireAccess("GetTable" + tenantId, physicalTableName, Action.READ); - } + if (this.execPermissionsCheckEnabled) { + requireAccess("GetTable" + tenantId, physicalTableName, Action.READ, Action.EXEC); + } else { + requireAccess("GetTable" + tenantId, physicalTableName, Action.READ); + } + } + + @Override + public void start(CoprocessorEnvironment env) throws IOException { + Configuration conf = env.getConfiguration(); + this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); + if (this.accessCheckEnabled) { + // We cannot use try-with-resources with this, as the connection would get closed, but remain + // in the cache, every invocation after the first would get a closed conn. + serverConnection = ServerUtil.ConnectionFactory.getConnection( + ServerUtil.ConnectionType.DEFAULT_SERVER_CONNECTION, + ((PhoenixMetaDataControllerEnvironment) env).getRegionCoprocessorEnvironment()); + } else { + LOGGER.warn("PhoenixAccessController has been loaded with authorization checks disabled."); + } + this.execPermissionsCheckEnabled = + conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, + AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS); + if (env instanceof PhoenixMetaDataControllerEnvironment) { + this.env = (PhoenixMetaDataControllerEnvironment) env; + } else { + throw new IllegalArgumentException( + "Not a valid environment, should be loaded by PhoenixMetaDataControllerEnvironment"); } - @Override - public void start(CoprocessorEnvironment env) throws IOException { - Configuration conf = env.getConfiguration(); - this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); - if (this.accessCheckEnabled) { - //We cannot use try-with-resources with this, as the connection would get closed, but remain - //in the cache, every invocation after the first would get a closed conn. - serverConnection = ServerUtil.ConnectionFactory.getConnection( - ServerUtil.ConnectionType.DEFAULT_SERVER_CONNECTION, - ((PhoenixMetaDataControllerEnvironment) env).getRegionCoprocessorEnvironment()); - } else { - LOGGER.warn( - "PhoenixAccessController has been loaded with authorization checks disabled."); - } - this.execPermissionsCheckEnabled = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, - AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS); - if (env instanceof PhoenixMetaDataControllerEnvironment) { - this.env = (PhoenixMetaDataControllerEnvironment)env; - } else { - throw new IllegalArgumentException( - "Not a valid environment, should be loaded by PhoenixMetaDataControllerEnvironment"); - } - - accessChecker = new AccessChecker(env.getConfiguration()); - // set the user-provider. - this.userProvider = UserProvider.instantiate(env.getConfiguration()); - // init superusers and add the server principal (if using security) - // or process owner as default super user. - Superusers.initialize(env.getConfiguration()); - } - - @Override - public void stop(CoprocessorEnvironment env) throws IOException { - super.stop(env); - } - - @Override - public void preCreateTable(ObserverContext ctx, String tenantId, - String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType, - Set familySet, Set indexes) throws IOException { - if (!accessCheckEnabled) { return; } - - if (tableType != PTableType.VIEW && tableType != PTableType.CDC) { - TableDescriptorBuilder tableDescBuilder = TableDescriptorBuilder.newBuilder(physicalTableName); - for (byte[] familyName : familySet) { - tableDescBuilder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(familyName).build()); - } - final TableDescriptor htd = tableDescBuilder.build(); - for (MasterObserver observer : getAccessControllers()) { - observer.preCreateTable(getMasterObsevrverContext(), htd, null); - } - } - - // Index, view and CDC require read access on parent physical table. - Set physicalTablesChecked = new HashSet(); - if (tableType == PTableType.VIEW || tableType == PTableType.INDEX || tableType == PTableType.CDC) { - physicalTablesChecked.add(parentPhysicalTableName); - if (execPermissionsCheckEnabled) { - requireAccess("Create" + tableType, parentPhysicalTableName, Action.READ, Action.EXEC); - } else { - requireAccess("Create" + tableType, parentPhysicalTableName, Action.READ); - } - } + accessChecker = new AccessChecker(env.getConfiguration()); + // set the user-provider. + this.userProvider = UserProvider.instantiate(env.getConfiguration()); + // init superusers and add the server principal (if using security) + // or process owner as default super user. + Superusers.initialize(env.getConfiguration()); + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + super.stop(env); + } + + @Override + public void preCreateTable(ObserverContext ctx, + String tenantId, String tableName, TableName physicalTableName, + TableName parentPhysicalTableName, PTableType tableType, Set familySet, + Set indexes) throws IOException { + if (!accessCheckEnabled) { + return; + } - if (tableType == PTableType.VIEW) { + if (tableType != PTableType.VIEW && tableType != PTableType.CDC) { + TableDescriptorBuilder tableDescBuilder = + TableDescriptorBuilder.newBuilder(physicalTableName); + for (byte[] familyName : familySet) { + tableDescBuilder + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(familyName).build()); + } + final TableDescriptor htd = tableDescBuilder.build(); + for (MasterObserver observer : getAccessControllers()) { + observer.preCreateTable(getMasterObsevrverContext(), htd, null); + } + } - Action[] requiredActions = execPermissionsCheckEnabled ? - new Action[]{ Action.READ, Action.EXEC } : new Action[] { Action.READ}; - for (TableName index : indexes) { - if (!physicalTablesChecked.add(index)) { - // skip check for local index as we have already check the ACLs above - // And for same physical table multiple times like view index table - continue; - } + // Index, view and CDC require read access on parent physical table. + Set physicalTablesChecked = new HashSet(); + if ( + tableType == PTableType.VIEW || tableType == PTableType.INDEX || tableType == PTableType.CDC + ) { + physicalTablesChecked.add(parentPhysicalTableName); + if (execPermissionsCheckEnabled) { + requireAccess("Create" + tableType, parentPhysicalTableName, Action.READ, Action.EXEC); + } else { + requireAccess("Create" + tableType, parentPhysicalTableName, Action.READ); + } + } - User user = getActiveUser(); - List permissionForUser = getPermissionForUser( - getUserPermissions(index), user.getShortName()); - Set requireAccess = new HashSet<>(); - Set accessExists = new HashSet<>(); - if (permissionForUser != null) { - for (UserPermission userPermission : permissionForUser) { - for (Action action : Arrays.asList(requiredActions)) { - if (!userPermission.getPermission().implies(action)) { - requireAccess.add(action); - } - } - } - if (!requireAccess.isEmpty()) { - for (UserPermission userPermission : permissionForUser) { - accessExists.addAll(Arrays.asList( - userPermission.getPermission().getActions())); - } - } - } else { - requireAccess.addAll(Arrays.asList(requiredActions)); - } - if (!requireAccess.isEmpty()) { - byte[] indexPhysicalTable = index.getName(); - handleRequireAccessOnDependentTable("Create" + tableType, user.getName(), - TableName.valueOf(indexPhysicalTable), tableName, requireAccess, accessExists); - } - } + if (tableType == PTableType.VIEW) { + Action[] requiredActions = execPermissionsCheckEnabled + ? new Action[] { Action.READ, Action.EXEC } + : new Action[] { Action.READ }; + for (TableName index : indexes) { + if (!physicalTablesChecked.add(index)) { + // skip check for local index as we have already check the ACLs above + // And for same physical table multiple times like view index table + continue; } - if (tableType == PTableType.INDEX) { - // All the users who have READ access on data table should have access to Index table as well. - // WRITE is needed for the index updates done by the user who has WRITE access on data table. - // CREATE is needed during the drop of the table. - // We are doing this because existing user while querying data table should not see access denied for the - // new indexes. - // TODO: confirm whether granting permission from coprocessor is a security leak.(currently it is done if - // automatic grant is enabled explicitly by user in configuration - // skip check for local index - if (physicalTableName != null && !parentPhysicalTableName.equals(physicalTableName) - && !MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) { - List actions = new ArrayList<>(Arrays.asList(Action.READ, Action.WRITE, Action.CREATE, Action.ADMIN)); - if (execPermissionsCheckEnabled) { - actions.add(Action.EXEC); - } - authorizeOrGrantAccessToUsers("Create" + tableType, parentPhysicalTableName, - actions, physicalTableName); + User user = getActiveUser(); + List permissionForUser = + getPermissionForUser(getUserPermissions(index), user.getShortName()); + Set requireAccess = new HashSet<>(); + Set accessExists = new HashSet<>(); + if (permissionForUser != null) { + for (UserPermission userPermission : permissionForUser) { + for (Action action : Arrays.asList(requiredActions)) { + if (!userPermission.getPermission().implies(action)) { + requireAccess.add(action); + } } + } + if (!requireAccess.isEmpty()) { + for (UserPermission userPermission : permissionForUser) { + accessExists.addAll(Arrays.asList(userPermission.getPermission().getActions())); + } + } + } else { + requireAccess.addAll(Arrays.asList(requiredActions)); } - } - - - public void handleRequireAccessOnDependentTable(String request, String userName, TableName dependentTable, - String requestTable, Set requireAccess, Set accessExists) throws IOException { + if (!requireAccess.isEmpty()) { + byte[] indexPhysicalTable = index.getName(); + handleRequireAccessOnDependentTable("Create" + tableType, user.getName(), + TableName.valueOf(indexPhysicalTable), tableName, requireAccess, accessExists); + } + } - Set unionSet = new HashSet(); - unionSet.addAll(requireAccess); - unionSet.addAll(accessExists); - AUDITLOG.info(request + ": Automatically granting access to index table during creation of view:" - + requestTable + authString(userName, dependentTable, requireAccess)); - grantPermissions(userName, dependentTable.getName(), unionSet.toArray(new Action[0])); - } - - private void grantPermissions(final String toUser, final byte[] table, final Action... actions) throws IOException { - User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - try { - AccessControlClient.grant(serverConnection, TableName.valueOf(table), toUser , null, null, - actions); - } catch (Throwable e) { - throw new DoNotRetryIOException(e); - } - return null; - } - }); } - private void authorizeOrGrantAccessToUsers(final String request, final TableName fromTable, - final List requiredActionsOnTable, final TableName toTable) - throws IOException { - User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public Void run() throws IOException { - List userPermissions = getUserPermissions(fromTable); - List permissionsOnTheTable = getUserPermissions(toTable); - if (userPermissions != null) { - for (UserPermission userPermission : userPermissions) { - Set requireAccess = new HashSet(); - Set accessExists = new HashSet(); - List permsToTable = getPermissionForUser(permissionsOnTheTable, - userPermission.getUser()); - for (Action action : requiredActionsOnTable) { - boolean haveAccess=false; - if (userPermission.getPermission().implies(action)) { - if (permsToTable == null) { - requireAccess.add(action); - } else { - for (UserPermission permToTable : permsToTable) { - if (permToTable.getPermission().implies(action)) { - haveAccess=true; - } - } - if (!haveAccess) { - requireAccess.add(action); - } - } - } - } - if (permsToTable != null) { - // Append access to already existing access for the user - for (UserPermission permToTable : permsToTable) { - accessExists.addAll(Arrays.asList( - permToTable.getPermission().getActions())); - } - } - if (!requireAccess.isEmpty()) { - if (AuthUtil.isGroupPrincipal(userPermission.getUser())){ - AUDITLOG.warn("Users of GROUP:" + userPermission.getUser() - + " will not have following access " + requireAccess - + " to the newly created index " + toTable - + ", Automatic grant is not yet allowed on Groups"); - continue; - } - handleRequireAccessOnDependentTable(request, - userPermission.getUser(), toTable, - toTable.getNameAsString(), requireAccess, accessExists); - } - } - } - return null; - } - }); + if (tableType == PTableType.INDEX) { + // All the users who have READ access on data table should have access to Index table as well. + // WRITE is needed for the index updates done by the user who has WRITE access on data table. + // CREATE is needed during the drop of the table. + // We are doing this because existing user while querying data table should not see access + // denied for the + // new indexes. + // TODO: confirm whether granting permission from coprocessor is a security leak.(currently it + // is done if + // automatic grant is enabled explicitly by user in configuration + // skip check for local index + if ( + physicalTableName != null && !parentPhysicalTableName.equals(physicalTableName) + && !MetaDataUtil.isViewIndex(physicalTableName.getNameAsString()) + ) { + List actions = + new ArrayList<>(Arrays.asList(Action.READ, Action.WRITE, Action.CREATE, Action.ADMIN)); + if (execPermissionsCheckEnabled) { + actions.add(Action.EXEC); + } + authorizeOrGrantAccessToUsers("Create" + tableType, parentPhysicalTableName, actions, + physicalTableName); + } } - - private List getPermissionForUser(List perms, String user) { - if (perms != null) { - // get list of permissions for the user as multiple implementation of AccessControl coprocessors can give - // permissions for same users - List permissions = new ArrayList<>(); - for (UserPermission p : perms) { - if (p.getUser().equals(user)){ - permissions.add(p); - } - } - if (!permissions.isEmpty()){ - return permissions; - } + } + + public void handleRequireAccessOnDependentTable(String request, String userName, + TableName dependentTable, String requestTable, Set requireAccess, + Set accessExists) throws IOException { + + Set unionSet = new HashSet(); + unionSet.addAll(requireAccess); + unionSet.addAll(accessExists); + AUDITLOG + .info(request + ": Automatically granting access to index table during creation of view:" + + requestTable + authString(userName, dependentTable, requireAccess)); + grantPermissions(userName, dependentTable.getName(), unionSet.toArray(new Action[0])); + } + + private void grantPermissions(final String toUser, final byte[] table, final Action... actions) + throws IOException { + User.runAsLoginUser(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + try { + AccessControlClient.grant(serverConnection, TableName.valueOf(table), toUser, null, null, + actions); + } catch (Throwable e) { + throw new DoNotRetryIOException(e); } return null; - } - - @Override - public void preDropTable(ObserverContext ctx, String tenantId, - String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType, - List indexes) throws IOException { - if (!accessCheckEnabled) { return; } - - for (MasterObserver observer : getAccessControllers()) { - if (tableType != PTableType.VIEW && tableType != PTableType.CDC) { - observer.preDeleteTable(getMasterObsevrverContext(), physicalTableName); - } - if (indexes != null) { - for (PTable index : indexes) { - observer.preDeleteTable(getMasterObsevrverContext(), - TableName.valueOf(index.getPhysicalName().getBytes())); + } + }); + } + + private void authorizeOrGrantAccessToUsers(final String request, final TableName fromTable, + final List requiredActionsOnTable, final TableName toTable) throws IOException { + User.runAsLoginUser(new PrivilegedExceptionAction() { + @Override + public Void run() throws IOException { + List userPermissions = getUserPermissions(fromTable); + List permissionsOnTheTable = getUserPermissions(toTable); + if (userPermissions != null) { + for (UserPermission userPermission : userPermissions) { + Set requireAccess = new HashSet(); + Set accessExists = new HashSet(); + List permsToTable = + getPermissionForUser(permissionsOnTheTable, userPermission.getUser()); + for (Action action : requiredActionsOnTable) { + boolean haveAccess = false; + if (userPermission.getPermission().implies(action)) { + if (permsToTable == null) { + requireAccess.add(action); + } else { + for (UserPermission permToTable : permsToTable) { + if (permToTable.getPermission().implies(action)) { + haveAccess = true; + } + } + if (!haveAccess) { + requireAccess.add(action); + } } + } } - } - //checking similar permission checked during the create of the view. - if (tableType == PTableType.VIEW || tableType == PTableType.INDEX - || tableType == PTableType.CDC) { - if (execPermissionsCheckEnabled) { - requireAccess("Drop "+tableType, parentPhysicalTableName, Action.READ, Action.EXEC); - } else { - requireAccess("Drop "+tableType, parentPhysicalTableName, Action.READ); + if (permsToTable != null) { + // Append access to already existing access for the user + for (UserPermission permToTable : permsToTable) { + accessExists.addAll(Arrays.asList(permToTable.getPermission().getActions())); + } } - } - } - - @Override - public void preAlterTable(ObserverContext ctx, String tenantId, - String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType) throws IOException { - if (!accessCheckEnabled) { return; } - for (MasterObserver observer : getAccessControllers()) { - if (tableType != PTableType.VIEW) { - observer.preModifyTable(getMasterObsevrverContext(), physicalTableName, null, - TableDescriptorBuilder.newBuilder(physicalTableName).build()); + if (!requireAccess.isEmpty()) { + if (AuthUtil.isGroupPrincipal(userPermission.getUser())) { + AUDITLOG.warn( + "Users of GROUP:" + userPermission.getUser() + " will not have following access " + + requireAccess + " to the newly created index " + toTable + + ", Automatic grant is not yet allowed on Groups"); + continue; + } + handleRequireAccessOnDependentTable(request, userPermission.getUser(), toTable, + toTable.getNameAsString(), requireAccess, accessExists); } + } } - if (tableType == PTableType.VIEW) { - if (execPermissionsCheckEnabled) { - requireAccess("Alter "+tableType, parentPhysicalTableName, Action.READ, Action.EXEC); - } else { - requireAccess("Alter "+tableType, parentPhysicalTableName, Action.READ); - } + return null; + } + }); + } + + private List getPermissionForUser(List perms, String user) { + if (perms != null) { + // get list of permissions for the user as multiple implementation of AccessControl + // coprocessors can give + // permissions for same users + List permissions = new ArrayList<>(); + for (UserPermission p : perms) { + if (p.getUser().equals(user)) { + permissions.add(p); } + } + if (!permissions.isEmpty()) { + return permissions; + } + } + return null; + } + + @Override + public void preDropTable(ObserverContext ctx, + String tenantId, String tableName, TableName physicalTableName, + TableName parentPhysicalTableName, PTableType tableType, List indexes) + throws IOException { + if (!accessCheckEnabled) { + return; } - @Override - public void preGetSchema(ObserverContext ctx, String schemaName) - throws IOException { - if (!accessCheckEnabled) { return; } - for (MasterObserver observer : getAccessControllers()) { - observer.preListNamespaceDescriptors(getMasterObsevrverContext(), - Arrays.asList(NamespaceDescriptor.create(schemaName).build())); + for (MasterObserver observer : getAccessControllers()) { + if (tableType != PTableType.VIEW && tableType != PTableType.CDC) { + observer.preDeleteTable(getMasterObsevrverContext(), physicalTableName); + } + if (indexes != null) { + for (PTable index : indexes) { + observer.preDeleteTable(getMasterObsevrverContext(), + TableName.valueOf(index.getPhysicalName().getBytes())); } + } + } + // checking similar permission checked during the create of the view. + if ( + tableType == PTableType.VIEW || tableType == PTableType.INDEX || tableType == PTableType.CDC + ) { + if (execPermissionsCheckEnabled) { + requireAccess("Drop " + tableType, parentPhysicalTableName, Action.READ, Action.EXEC); + } else { + requireAccess("Drop " + tableType, parentPhysicalTableName, Action.READ); + } + } + } + + @Override + public void preAlterTable(ObserverContext ctx, + String tenantId, String tableName, TableName physicalTableName, + TableName parentPhysicalTableName, PTableType tableType) throws IOException { + if (!accessCheckEnabled) { + return; } + for (MasterObserver observer : getAccessControllers()) { + if (tableType != PTableType.VIEW) { + observer.preModifyTable(getMasterObsevrverContext(), physicalTableName, null, + TableDescriptorBuilder.newBuilder(physicalTableName).build()); + } + } + if (tableType == PTableType.VIEW) { + if (execPermissionsCheckEnabled) { + requireAccess("Alter " + tableType, parentPhysicalTableName, Action.READ, Action.EXEC); + } else { + requireAccess("Alter " + tableType, parentPhysicalTableName, Action.READ); + } + } + } - @Override - public void preCreateSchema(ObserverContext ctx, String schemaName) - throws IOException { - if (!accessCheckEnabled) { return; } - for (MasterObserver observer : getAccessControllers()) { - observer.preCreateNamespace(getMasterObsevrverContext(), - NamespaceDescriptor.create(schemaName).build()); - } + @Override + public void preGetSchema(ObserverContext ctx, + String schemaName) throws IOException { + if (!accessCheckEnabled) { + return; } + for (MasterObserver observer : getAccessControllers()) { + observer.preListNamespaceDescriptors(getMasterObsevrverContext(), + Arrays.asList(NamespaceDescriptor.create(schemaName).build())); + } + } - @Override - public void preDropSchema(ObserverContext ctx, String schemaName) - throws IOException { - if (!accessCheckEnabled) { return; } - for (MasterObserver observer : getAccessControllers()) { - observer.preDeleteNamespace(getMasterObsevrverContext(), schemaName); - } + @Override + public void preCreateSchema(ObserverContext ctx, + String schemaName) throws IOException { + if (!accessCheckEnabled) { + return; } + for (MasterObserver observer : getAccessControllers()) { + observer.preCreateNamespace(getMasterObsevrverContext(), + NamespaceDescriptor.create(schemaName).build()); + } + } - @Override - public void preIndexUpdate(ObserverContext ctx, String tenantId, - String indexName, TableName physicalTableName, TableName parentPhysicalTableName, PIndexState newState) - throws IOException { - if (!accessCheckEnabled) { return; } - for (MasterObserver observer : getAccessControllers()) { - observer.preModifyTable(getMasterObsevrverContext(), physicalTableName, null, - TableDescriptorBuilder.newBuilder(physicalTableName).build()); + @Override + public void preDropSchema(ObserverContext ctx, + String schemaName) throws IOException { + if (!accessCheckEnabled) { + return; + } + for (MasterObserver observer : getAccessControllers()) { + observer.preDeleteNamespace(getMasterObsevrverContext(), schemaName); + } + } + + @Override + public void preIndexUpdate(ObserverContext ctx, + String tenantId, String indexName, TableName physicalTableName, + TableName parentPhysicalTableName, PIndexState newState) throws IOException { + if (!accessCheckEnabled) { + return; + } + for (MasterObserver observer : getAccessControllers()) { + observer.preModifyTable(getMasterObsevrverContext(), physicalTableName, null, + TableDescriptorBuilder.newBuilder(physicalTableName).build()); + } + // Check for read access in case of rebuild + if (newState == PIndexState.BUILDING) { + if (execPermissionsCheckEnabled) { + requireAccess("Rebuild:", parentPhysicalTableName, Action.READ, Action.EXEC); + } else { + requireAccess("Rebuild:", parentPhysicalTableName, Action.READ); + } + } + } + + /** + * Gets all the permissions for a given tableName for all the users Also, get the permissions at + * table's namespace level and merge all of them + */ + private List getUserPermissions(final TableName tableName) throws IOException { + List userPermissions = + User.runAsLoginUser(new PrivilegedExceptionAction>() { + @Override + public List run() throws Exception { + final List userPermissions = new ArrayList(); + final RpcCall rpcContext = RpcUtil.getRpcContext(); + try { + // Setting RPC context as null so that user can be resetted + RpcUtil.setRpcContext(null); + // Merge permissions from all accessController coprocessors loaded in memory + for (MasterObserver service : getAccessControllers()) { + // Use AccessControlClient API's if the accessController is an instance of + // org.apache.hadoop.hbase.security.access.AccessController + if ( + service.getClass().getName() + .equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName()) + ) { + userPermissions.addAll(AccessControlClient.getUserPermissions(serverConnection, + tableName.getNameWithNamespaceInclAsString())); + userPermissions.addAll(AccessControlClient.getUserPermissions(serverConnection, + AuthUtil.toGroupEntry(tableName.getNamespaceAsString()))); + } + } + } catch (Throwable e) { + if (e instanceof Exception) { + throw (Exception) e; + } else if (e instanceof Error) { + throw (Error) e; + } + throw new Exception(e); + } finally { + // Setting RPC context back to original context of the RPC + RpcUtil.setRpcContext(rpcContext); + } + return userPermissions; } - // Check for read access in case of rebuild - if (newState == PIndexState.BUILDING) { - if (execPermissionsCheckEnabled) { - requireAccess("Rebuild:", parentPhysicalTableName, Action.READ, Action.EXEC); + }); + getUserDefinedPermissions(tableName, userPermissions); + return userPermissions; + } + + private void getUserDefinedPermissions(final TableName tableName, + final List userPermissions) throws IOException { + User.runAsLoginUser(new PrivilegedExceptionAction>() { + @Override + public List run() throws Exception { + final RpcCall rpcContext = RpcUtil.getRpcContext(); + try { + // Setting RPC context as null so that user can be resetted + RpcUtil.setRpcContext(null); + for (MasterObserver service : getAccessControllers()) { + if ( + service.getClass().getName() + .equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName()) + ) { + continue; } else { - requireAccess("Rebuild:", parentPhysicalTableName, Action.READ); + getUserPermsFromUserDefinedAccessController(userPermissions, + (AccessControlService.Interface) service); } + } + } catch (Throwable e) { + if (e instanceof Exception) { + throw (Exception) e; + } else if (e instanceof Error) { + throw (Error) e; + } + throw new Exception(e); + } finally { + // Setting RPC context back to original context of the RPC + RpcUtil.setRpcContext(rpcContext); } - } + return userPermissions; + } - /** - * Gets all the permissions for a given tableName for all the users - * Also, get the permissions at table's namespace level and merge all of them - * @throws IOException - */ - private List getUserPermissions(final TableName tableName) throws IOException { - List userPermissions = - User.runAsLoginUser(new PrivilegedExceptionAction>() { - @Override - public List run() throws Exception { - final List userPermissions = new ArrayList(); - final RpcCall rpcContext = RpcUtil.getRpcContext(); - try { - // Setting RPC context as null so that user can be resetted - RpcUtil.setRpcContext(null); - // Merge permissions from all accessController coprocessors loaded in memory - for (MasterObserver service : getAccessControllers()) { - // Use AccessControlClient API's if the accessController is an instance of org.apache.hadoop.hbase.security.access.AccessController - if (service.getClass().getName().equals( - org.apache.hadoop.hbase.security.access.AccessController.class.getName())) { - userPermissions.addAll(AccessControlClient.getUserPermissions( - serverConnection, tableName.getNameWithNamespaceInclAsString())); - userPermissions.addAll(AccessControlClient.getUserPermissions( - serverConnection, AuthUtil.toGroupEntry(tableName.getNamespaceAsString()))); - } - } - } catch (Throwable e) { - if (e instanceof Exception) { - throw (Exception) e; - } else if (e instanceof Error) { - throw (Error) e; - } - throw new Exception(e); - } finally { - // Setting RPC context back to original context of the RPC - RpcUtil.setRpcContext(rpcContext); - } - return userPermissions; - } - }); - getUserDefinedPermissions(tableName, userPermissions); - return userPermissions; - } - - private void getUserDefinedPermissions(final TableName tableName, - final List userPermissions) throws IOException { - User.runAsLoginUser(new PrivilegedExceptionAction>() { - @Override - public List run() throws Exception { - final RpcCall rpcContext = RpcUtil.getRpcContext(); - try { - // Setting RPC context as null so that user can be resetted - RpcUtil.setRpcContext(null); - for (MasterObserver service : getAccessControllers()) { - if (service.getClass().getName().equals( - org.apache.hadoop.hbase.security.access.AccessController.class - .getName())) { - continue; - } else { - getUserPermsFromUserDefinedAccessController(userPermissions, - (AccessControlService.Interface) service); - } - } - } catch (Throwable e) { - if (e instanceof Exception) { - throw (Exception) e; - } else if (e instanceof Error) { - throw (Error) e; - } - throw new Exception(e); - } finally { - // Setting RPC context back to original context of the RPC - RpcUtil.setRpcContext(rpcContext); - } - return userPermissions; - } - private void getUserPermsFromUserDefinedAccessController(final List userPermissions, AccessControlService.Interface service) { + private void getUserPermsFromUserDefinedAccessController( + final List userPermissions, AccessControlService.Interface service) { - RpcController controller = new ServerRpcController(); + RpcController controller = new ServerRpcController(); - AccessControlProtos.GetUserPermissionsRequest.Builder builderTablePerms = AccessControlProtos.GetUserPermissionsRequest - .newBuilder(); - builderTablePerms.setTableName(ProtobufUtil.toProtoTableName(tableName)); - builderTablePerms.setType(AccessControlProtos.Permission.Type.Table); - AccessControlProtos.GetUserPermissionsRequest requestTablePerms = builderTablePerms.build(); + AccessControlProtos.GetUserPermissionsRequest.Builder builderTablePerms = + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + builderTablePerms.setTableName(ProtobufUtil.toProtoTableName(tableName)); + builderTablePerms.setType(AccessControlProtos.Permission.Type.Table); + AccessControlProtos.GetUserPermissionsRequest requestTablePerms = builderTablePerms.build(); - callGetUserPermissionsRequest(userPermissions, service, requestTablePerms, controller); + callGetUserPermissionsRequest(userPermissions, service, requestTablePerms, controller); - AccessControlProtos.GetUserPermissionsRequest.Builder builderNamespacePerms = AccessControlProtos.GetUserPermissionsRequest - .newBuilder(); - builderNamespacePerms.setNamespaceName(ByteString.copyFrom(tableName.getNamespace())); - builderNamespacePerms.setType(AccessControlProtos.Permission.Type.Namespace); - AccessControlProtos.GetUserPermissionsRequest requestNamespacePerms = builderNamespacePerms.build(); + AccessControlProtos.GetUserPermissionsRequest.Builder builderNamespacePerms = + AccessControlProtos.GetUserPermissionsRequest.newBuilder(); + builderNamespacePerms.setNamespaceName(ByteString.copyFrom(tableName.getNamespace())); + builderNamespacePerms.setType(AccessControlProtos.Permission.Type.Namespace); + AccessControlProtos.GetUserPermissionsRequest requestNamespacePerms = + builderNamespacePerms.build(); - callGetUserPermissionsRequest(userPermissions, service, requestNamespacePerms, controller); + callGetUserPermissionsRequest(userPermissions, service, requestNamespacePerms, controller); - } + } - private void callGetUserPermissionsRequest(final List userPermissions, AccessControlService.Interface service - , AccessControlProtos.GetUserPermissionsRequest request, RpcController controller) { - service.getUserPermissions(controller, request, - new RpcCallback() { - @Override - public void run(AccessControlProtos.GetUserPermissionsResponse message) { - if (message != null) { - for (AccessControlProtos.UserPermission perm : message - .getUserPermissionList()) { - userPermissions.add(AccessControlUtil.toUserPermission(perm)); - } - } - } - }); + private void callGetUserPermissionsRequest(final List userPermissions, + AccessControlService.Interface service, + AccessControlProtos.GetUserPermissionsRequest request, RpcController controller) { + service.getUserPermissions(controller, request, + new RpcCallback() { + @Override + public void run(AccessControlProtos.GetUserPermissionsResponse message) { + if (message != null) { + for (AccessControlProtos.UserPermission perm : message.getUserPermissionList()) { + userPermissions.add(AccessControlUtil.toUserPermission(perm)); + } + } } - }); + }); + } + }); + } + + /** + * Authorizes that the current user has all the given permissions for the given table and for the + * hbase namespace of the table + * @param tableName Table requested + * @throws IOException if obtaining the current user fails + * @throws AccessDeniedException if user has no authorization + */ + private void requireAccess(String request, TableName tableName, Action... permissions) + throws IOException { + User user = getActiveUser(); + AuthResult result = null; + List requiredAccess = new ArrayList(); + + for (Action permission : permissions) { + if (hasAccess(getUserPermissions(tableName), tableName, permission, user)) { + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + null, null); + } else { + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + null, null); + requiredAccess.add(permission); + } + logResult(result); } - - /** - * Authorizes that the current user has all the given permissions for the - * given table and for the hbase namespace of the table - * @param tableName Table requested - * @throws IOException if obtaining the current user fails - * @throws AccessDeniedException if user has no authorization - */ - private void requireAccess(String request, TableName tableName, Action... permissions) throws IOException { - User user = getActiveUser(); - AuthResult result = null; - List requiredAccess = new ArrayList(); - - for (Action permission : permissions) { - if (hasAccess(getUserPermissions(tableName), tableName, permission, user)) { - result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, null, null); - } else { - result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, null, null); - requiredAccess.add(permission); - } - logResult(result); - } - if (!requiredAccess.isEmpty()) { - result = AuthResult.deny(request, "Insufficient permissions", user, requiredAccess.get(0), tableName, null, - null); - } - if (!result.isAllowed()) { throw new AccessDeniedException("Insufficient permissions " - + authString(user.getName(), tableName, new HashSet(Arrays.asList(permissions)))); } + if (!requiredAccess.isEmpty()) { + result = AuthResult.deny(request, "Insufficient permissions", user, requiredAccess.get(0), + tableName, null, null); } - - /** - * Checks if the user has access to the table for the specified action. - * @param perms All table and table's namespace permissions - * @param table tablename - * @param action action for access is required - * @return true if the user has access to the table for specified action, false otherwise - */ - private boolean hasAccess(List perms, TableName table, Permission.Action action, User user) { - if (Superusers.isSuperUser(user)){ + if (!result.isAllowed()) { + throw new AccessDeniedException("Insufficient permissions " + authString(user.getName(), + tableName, new HashSet(Arrays.asList(permissions)))); + } + } + + /** + * Checks if the user has access to the table for the specified action. + * @param perms All table and table's namespace permissions + * @param table tablename + * @param action action for access is required + * @return true if the user has access to the table for specified action, false otherwise + */ + private boolean hasAccess(List perms, TableName table, Permission.Action action, + User user) { + if (Superusers.isSuperUser(user)) { + return true; + } + if (perms != null) { + if ( + hbaseAccessControllerEnabled + && accessChecker.getAuthManager().authorizeUserTable(user, table, action) + ) { + return true; + } + List permissionsForUser = getPermissionForUser(perms, user.getShortName()); + if (permissionsForUser != null) { + for (UserPermission permissionForUser : permissionsForUser) { + if (permissionForUser.getPermission().implies(action)) { return true; + } } - if (perms != null) { - if (hbaseAccessControllerEnabled - && accessChecker.getAuthManager().authorizeUserTable(user, table, action)) { - return true; - } - List permissionsForUser = - getPermissionForUser(perms, user.getShortName()); - if (permissionsForUser != null) { - for (UserPermission permissionForUser : permissionsForUser) { - if (permissionForUser.getPermission().implies(action)) { return true; } - } - } - String[] groupNames = user.getGroupNames(); - if (groupNames != null) { - for (String group : groupNames) { - List groupPerms = - getPermissionForUser(perms, (AuthUtil.toGroupEntry(group))); - if (groupPerms != null) for (UserPermission permissionForUser : groupPerms) { - if (permissionForUser.getPermission().implies(action)) { return true; } - } - } + } + String[] groupNames = user.getGroupNames(); + if (groupNames != null) { + for (String group : groupNames) { + List groupPerms = + getPermissionForUser(perms, (AuthUtil.toGroupEntry(group))); + if (groupPerms != null) for (UserPermission permissionForUser : groupPerms) { + if (permissionForUser.getPermission().implies(action)) { + return true; } - } else if (LOGGER.isDebugEnabled()) { - LOGGER.debug("No permissions found for table=" + - table + " or namespace=" + table.getNamespaceAsString()); + } } - return false; + } + } else if (LOGGER.isDebugEnabled()) { + LOGGER.debug("No permissions found for table=" + table + " or namespace=" + + table.getNamespaceAsString()); } - - private User getActiveUser() throws IOException { - Optional user = RpcServer.getRequestUser(); - if (!user.isPresent()) { - // for non-rpc handling, fallback to system user - return userProvider.getCurrent(); - } - return user.get(); + return false; + } + + private User getActiveUser() throws IOException { + Optional user = RpcServer.getRequestUser(); + if (!user.isPresent()) { + // for non-rpc handling, fallback to system user + return userProvider.getCurrent(); } - - private void logResult(AuthResult result) { - if (AUDITLOG.isTraceEnabled()) { - Optional remoteAddr = RpcServer.getRemoteAddress(); - AUDITLOG.trace("Access " + (result.isAllowed() ? "allowed" : "denied") + " for user " - + (result.getUser() != null ? result.getUser().getShortName() : "UNKNOWN") + "; reason: " - + result.getReason() + "; remote address: " + (remoteAddr.isPresent() ? remoteAddr.get() : "") + "; request: " - + result.getRequest() + "; context: " + result.toContextString()); - } + return user.get(); + } + + private void logResult(AuthResult result) { + if (AUDITLOG.isTraceEnabled()) { + Optional remoteAddr = RpcServer.getRemoteAddress(); + AUDITLOG.trace("Access " + (result.isAllowed() ? "allowed" : "denied") + " for user " + + (result.getUser() != null ? result.getUser().getShortName() : "UNKNOWN") + "; reason: " + + result.getReason() + "; remote address: " + + (remoteAddr.isPresent() ? remoteAddr.get() : "") + "; request: " + result.getRequest() + + "; context: " + result.toContextString()); } + } - private static final class Superusers { - private static final Logger LOGGER = LoggerFactory.getLogger(Superusers.class); + private static final class Superusers { + private static final Logger LOGGER = LoggerFactory.getLogger(Superusers.class); - /** Configuration key for superusers */ - public static final String SUPERUSER_CONF_KEY = org.apache.hadoop.hbase.security.Superusers.SUPERUSER_CONF_KEY; // Not getting a name + /** Configuration key for superusers */ + public static final String SUPERUSER_CONF_KEY = + org.apache.hadoop.hbase.security.Superusers.SUPERUSER_CONF_KEY; // Not getting a name - private static List superUsers; - private static List superGroups; - private static User systemUser; + private static List superUsers; + private static List superGroups; + private static User systemUser; - private Superusers(){} - - /** - * Should be called only once to pre-load list of super users and super - * groups from Configuration. This operation is idempotent. - * @param conf configuration to load users from - * @throws IOException if unable to initialize lists of superusers or super groups - * @throws IllegalStateException if current user is null - */ - public static void initialize(Configuration conf) throws IOException { - superUsers = new ArrayList<>(); - superGroups = new ArrayList<>(); - systemUser = User.getCurrent(); - - if (systemUser == null) { - throw new IllegalStateException("Unable to obtain the current user, " - + "authorization checks for internal operations will not work correctly!"); - } + private Superusers() { + } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Current user name is " + systemUser.getShortName()); - } - String currentUser = systemUser.getShortName(); - String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]); - for (String name : superUserList) { - if (AuthUtil.isGroupPrincipal(name)) { - superGroups.add(AuthUtil.getGroupName(name)); - } else { - superUsers.add(name); - } - } - superUsers.add(currentUser); + /** + * Should be called only once to pre-load list of super users and super groups from + * Configuration. This operation is idempotent. + * @param conf configuration to load users from + * @throws IOException if unable to initialize lists of superusers or super groups + * @throws IllegalStateException if current user is null + */ + public static void initialize(Configuration conf) throws IOException { + superUsers = new ArrayList<>(); + superGroups = new ArrayList<>(); + systemUser = User.getCurrent(); + + if (systemUser == null) { + throw new IllegalStateException("Unable to obtain the current user, " + + "authorization checks for internal operations will not work correctly!"); + } + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Current user name is " + systemUser.getShortName()); + } + String currentUser = systemUser.getShortName(); + String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]); + for (String name : superUserList) { + if (AuthUtil.isGroupPrincipal(name)) { + superGroups.add(AuthUtil.getGroupName(name)); + } else { + superUsers.add(name); } + } + superUsers.add(currentUser); + } - /** - * @return true if current user is a super user (whether as user running process, - * declared as individual superuser or member of supergroup), false otherwise. - * @param user to check - * @throws IllegalStateException if lists of superusers/super groups - * haven't been initialized properly - */ - public static boolean isSuperUser(User user) { - if (superUsers == null) { - throw new IllegalStateException("Super users/super groups lists" - + " haven't been initialized properly."); - } - if (superUsers.contains(user.getShortName())) { - return true; - } - - for (String group : user.getGroupNames()) { - if (superGroups.contains(group)) { - return true; - } - } - return false; + /** + * @return true if current user is a super user (whether as user running process, declared as + * individual superuser or member of supergroup), false otherwise. + * @param user to check + * @throws IllegalStateException if lists of superusers/super groups haven't been initialized + * properly + */ + public static boolean isSuperUser(User user) { + if (superUsers == null) { + throw new IllegalStateException( + "Super users/super groups lists" + " haven't been initialized properly."); + } + if (superUsers.contains(user.getShortName())) { + return true; + } + + for (String group : user.getGroupNames()) { + if (superGroups.contains(group)) { + return true; } - + } + return false; } - public String authString(String user, TableName table, Set actions) { - StringBuilder sb = new StringBuilder(); - sb.append(" (user=").append(user != null ? user : "UNKNOWN").append(", "); - sb.append("scope=").append(table == null ? "GLOBAL" : table.getNameWithNamespaceInclAsString()).append(", "); - sb.append(actions.size() > 1 ? "actions=" : "action=").append(actions.toString()) - .append(")"); - return sb.toString(); - } + } + + public String authString(String user, TableName table, Set actions) { + StringBuilder sb = new StringBuilder(); + sb.append(" (user=").append(user != null ? user : "UNKNOWN").append(", "); + sb.append("scope=").append(table == null ? "GLOBAL" : table.getNameWithNamespaceInclAsString()) + .append(", "); + sb.append(actions.size() > 1 ? "actions=" : "action=").append(actions.toString()).append(")"); + return sb.toString(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixCoprocessor.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixCoprocessor.java index d993c694158..1bfcaf5d765 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixCoprocessor.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixCoprocessor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.Coprocessor; public interface PhoenixCoprocessor extends Coprocessor { - default Optional getPhoenixObserver() { - return Optional.empty(); - } + default Optional getPhoenixObserver() { + return Optional.empty(); } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java index 5a9be7cef6d..39c30d324a2 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,6 @@ package org.apache.phoenix.coprocessor; import java.io.IOException; -import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.Optional; @@ -40,228 +39,237 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; +public class PhoenixMetaDataCoprocessorHost extends CoprocessorHost { + private RegionCoprocessorEnvironment env; + public static final String PHOENIX_META_DATA_COPROCESSOR_CONF_KEY = + "hbase.coprocessor.phoenix.classes"; + private static final String DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY = + "org.apache.phoenix.coprocessor.PhoenixAccessController"; -public class PhoenixMetaDataCoprocessorHost - extends CoprocessorHost { - private RegionCoprocessorEnvironment env; - public static final String PHOENIX_META_DATA_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.phoenix.classes"; - private static final String DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY = - "org.apache.phoenix.coprocessor.PhoenixAccessController"; - - PhoenixMetaDataCoprocessorHost(RegionCoprocessorEnvironment env) throws IOException { - super(null); - this.env = env; - this.conf = new Configuration(); - for (Entry entry : env.getConfiguration()) { - conf.set(entry.getKey(), entry.getValue()); - } - boolean accessCheckEnabled = this.conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); - if (this.conf.get(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY) == null && accessCheckEnabled) { - this.conf.set(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY, DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY); - } - loadSystemCoprocessors(conf, PHOENIX_META_DATA_COPROCESSOR_CONF_KEY); + PhoenixMetaDataCoprocessorHost(RegionCoprocessorEnvironment env) throws IOException { + super(null); + this.env = env; + this.conf = new Configuration(); + for (Entry entry : env.getConfiguration()) { + conf.set(entry.getKey(), entry.getValue()); + } + boolean accessCheckEnabled = this.conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); + if (this.conf.get(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY) == null && accessCheckEnabled) { + this.conf.set(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY, + DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY); } + loadSystemCoprocessors(conf, PHOENIX_META_DATA_COPROCESSOR_CONF_KEY); + } - private ObserverGetter phoenixObserverGetter = - PhoenixCoprocessor::getPhoenixObserver; - - private abstract class PhoenixObserverOperation extends ObserverOperationWithoutResult { - PhoenixObserverOperation() { - super(phoenixObserverGetter); - } + private ObserverGetter phoenixObserverGetter = + PhoenixCoprocessor::getPhoenixObserver; - public PhoenixObserverOperation(User user) { - super(phoenixObserverGetter, user); - } + private abstract class PhoenixObserverOperation + extends ObserverOperationWithoutResult { + PhoenixObserverOperation() { + super(phoenixObserverGetter); + } - public PhoenixObserverOperation(User user, boolean bypassable) { - super(phoenixObserverGetter, user, bypassable); - } - - void callObserver() throws IOException { - Optional observer = phoenixObserverGetter.apply(getEnvironment().getInstance()); - if (observer.isPresent()) { - call(observer.get()); - } - } + public PhoenixObserverOperation(User user) { + super(phoenixObserverGetter, user); + } - @Override - protected void postEnvCall() {} + public PhoenixObserverOperation(User user, boolean bypassable) { + super(phoenixObserverGetter, user, bypassable); } - private boolean execOperation( - final PhoenixObserverOperation ctx) - throws IOException { - if (ctx == null) return false; - boolean bypass = false; - for (PhoenixMetaDataControllerEnvironment env : coprocEnvironments) { - if (env.getInstance() instanceof MetaDataEndpointObserver) { - ctx.prepare(env); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - ctx.callObserver(); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - bypass |= ctx.shouldBypass(); - if (bypass) { - break; - } - } - ctx.postEnvCall(); - } - return bypass; + void callObserver() throws IOException { + Optional observer = + phoenixObserverGetter.apply(getEnvironment().getInstance()); + if (observer.isPresent()) { + call(observer.get()); + } } - + @Override - protected void handleCoprocessorThrowable(final PhoenixMetaDataControllerEnvironment env, final Throwable e) - throws IOException { - if (e instanceof IOException) { - if (e.getCause() instanceof DoNotRetryIOException) { throw (IOException)e.getCause(); } - } - super.handleCoprocessorThrowable(env, e); + protected void postEnvCall() { } + } - /** - * Encapsulation of the environment of each coprocessor - */ - public static class PhoenixMetaDataControllerEnvironment extends BaseEnvironment - implements CoprocessorEnvironment { - - private RegionCoprocessorEnvironment env; - - PhoenixMetaDataControllerEnvironment(RegionCoprocessorEnvironment env, PhoenixCoprocessor instance, - int priority, int sequence, Configuration conf) { - super(instance, priority, sequence, conf); - this.env = env; - } - - public RegionCoprocessorHost getCoprocessorHost() { - return ((HRegion)env.getRegion()).getCoprocessorHost(); + private boolean execOperation(final PhoenixObserverOperation ctx) throws IOException { + if (ctx == null) return false; + boolean bypass = false; + for (PhoenixMetaDataControllerEnvironment env : coprocEnvironments) { + if (env.getInstance() instanceof MetaDataEndpointObserver) { + ctx.prepare(env); + Thread currentThread = Thread.currentThread(); + ClassLoader cl = currentThread.getContextClassLoader(); + try { + currentThread.setContextClassLoader(env.getClassLoader()); + ctx.callObserver(); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } finally { + currentThread.setContextClassLoader(cl); } - - RegionCoprocessorEnvironment getRegionCoprocessorEnvironment() { - return env; + bypass |= ctx.shouldBypass(); + if (bypass) { + break; } + } + ctx.postEnvCall(); } - + return bypass; + } - void preGetTable(final String tenantId, final String tableName, final TableName physicalTableName) - throws IOException { - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) throws IOException { - observer.preGetTable(this, tenantId, tableName, physicalTableName); - } - }); + @Override + protected void handleCoprocessorThrowable(final PhoenixMetaDataControllerEnvironment env, + final Throwable e) throws IOException { + if (e instanceof IOException) { + if (e.getCause() instanceof DoNotRetryIOException) { + throw (IOException) e.getCause(); + } } + super.handleCoprocessorThrowable(env, e); + } - void preCreateTable(final String tenantId, final String tableName, final TableName physicalTableName, - final TableName parentPhysicalTableName, final PTableType tableType, final Set familySet, final Set indexes) - throws IOException { - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) throws IOException { - observer.preCreateTable(this, tenantId, tableName, physicalTableName, parentPhysicalTableName, tableType, - familySet, indexes); - } - }); - } + /** + * Encapsulation of the environment of each coprocessor + */ + public static class PhoenixMetaDataControllerEnvironment extends + BaseEnvironment implements CoprocessorEnvironment { - void preCreateViewAddChildLink(final String tableName) throws IOException { - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) throws IOException { - observer.preCreateViewAddChildLink(this, tableName); - } - }); - } + private RegionCoprocessorEnvironment env; - void preDropTable(final String tenantId, final String tableName, final TableName physicalTableName, - final TableName parentPhysicalTableName, final PTableType tableType, final List indexes) throws IOException { - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) throws IOException { - observer.preDropTable(this, tenantId, tableName, physicalTableName, parentPhysicalTableName, tableType, indexes); - } - }); + PhoenixMetaDataControllerEnvironment(RegionCoprocessorEnvironment env, + PhoenixCoprocessor instance, int priority, int sequence, Configuration conf) { + super(instance, priority, sequence, conf); + this.env = env; } - void preAlterTable(final String tenantId, final String tableName, final TableName physicalTableName, - final TableName parentPhysicalTableName, final PTableType type) throws IOException { - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) throws IOException { - observer.preAlterTable(this, tenantId, tableName, physicalTableName, parentPhysicalTableName, type); - } - }); + public RegionCoprocessorHost getCoprocessorHost() { + return ((HRegion) env.getRegion()).getCoprocessorHost(); } - void preGetSchema(final String schemaName) throws IOException { - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) throws IOException { - observer.preGetSchema(this, schemaName); - } - }); + RegionCoprocessorEnvironment getRegionCoprocessorEnvironment() { + return env; } + } - public void preCreateSchema(final String schemaName) throws IOException { + void preGetTable(final String tenantId, final String tableName, final TableName physicalTableName) + throws IOException { + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preGetTable(this, tenantId, tableName, physicalTableName); + } + }); + } - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) throws IOException { - observer.preCreateSchema(this, schemaName); - } - }); - } + void preCreateTable(final String tenantId, final String tableName, + final TableName physicalTableName, final TableName parentPhysicalTableName, + final PTableType tableType, final Set familySet, final Set indexes) + throws IOException { + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preCreateTable(this, tenantId, tableName, physicalTableName, + parentPhysicalTableName, tableType, familySet, indexes); + } + }); + } - void preDropSchema(final String schemaName) throws IOException { - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) throws IOException { - observer.preDropSchema(this, schemaName); - } - }); - } + void preCreateViewAddChildLink(final String tableName) throws IOException { + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preCreateViewAddChildLink(this, tableName); + } + }); + } - void preIndexUpdate(final String tenantId, final String indexName, final TableName physicalTableName, - final TableName parentPhysicalTableName, final PIndexState newState) throws IOException { - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) throws IOException { - observer.preIndexUpdate(this, tenantId, indexName, physicalTableName, parentPhysicalTableName, newState); - } - }); - } + void preDropTable(final String tenantId, final String tableName, + final TableName physicalTableName, final TableName parentPhysicalTableName, + final PTableType tableType, final List indexes) throws IOException { + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preDropTable(this, tenantId, tableName, physicalTableName, parentPhysicalTableName, + tableType, indexes); + } + }); + } - @Override - public PhoenixCoprocessor checkAndGetInstance(Class implClass) - throws InstantiationException, IllegalAccessException { - if (PhoenixCoprocessor.class - .isAssignableFrom(implClass)) { return (PhoenixCoprocessor)implClass.newInstance(); } - return null; - } + void preAlterTable(final String tenantId, final String tableName, + final TableName physicalTableName, final TableName parentPhysicalTableName, + final PTableType type) throws IOException { + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preAlterTable(this, tenantId, tableName, physicalTableName, + parentPhysicalTableName, type); + } + }); + } - @Override - public PhoenixMetaDataControllerEnvironment createEnvironment(PhoenixCoprocessor instance, int priority, - int sequence, Configuration conf) { - return new PhoenixMetaDataControllerEnvironment(env, instance, priority, sequence, conf); - } + void preGetSchema(final String schemaName) throws IOException { + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preGetSchema(this, schemaName); + } + }); + } + + public void preCreateSchema(final String schemaName) throws IOException { + + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preCreateSchema(this, schemaName); + } + }); + } - void preUpsertTaskDetails(final String tableName) throws IOException { - execOperation(new PhoenixObserverOperation() { - @Override - public void call(MetaDataEndpointObserver observer) - throws IOException { - observer.preUpsertTaskDetails(this, tableName); - } - }); + void preDropSchema(final String schemaName) throws IOException { + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preDropSchema(this, schemaName); + } + }); + } + + void preIndexUpdate(final String tenantId, final String indexName, + final TableName physicalTableName, final TableName parentPhysicalTableName, + final PIndexState newState) throws IOException { + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preIndexUpdate(this, tenantId, indexName, physicalTableName, + parentPhysicalTableName, newState); + } + }); + } + + @Override + public PhoenixCoprocessor checkAndGetInstance(Class implClass) + throws InstantiationException, IllegalAccessException { + if (PhoenixCoprocessor.class.isAssignableFrom(implClass)) { + return (PhoenixCoprocessor) implClass.newInstance(); } + return null; + } + + @Override + public PhoenixMetaDataControllerEnvironment createEnvironment(PhoenixCoprocessor instance, + int priority, int sequence, Configuration conf) { + return new PhoenixMetaDataControllerEnvironment(env, instance, priority, sequence, conf); + } + + void preUpsertTaskDetails(final String tableName) throws IOException { + execOperation(new PhoenixObserverOperation() { + @Override + public void call(MetaDataEndpointObserver observer) throws IOException { + observer.preUpsertTaskDetails(this, tableName); + } + }); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixRegionServerEndpoint.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixRegionServerEndpoint.java index 59fd1209db4..360b3ebe188 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixRegionServerEndpoint.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixRegionServerEndpoint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,10 +17,6 @@ */ package org.apache.phoenix.coprocessor; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; - import java.io.IOException; import java.util.Collections; @@ -39,79 +35,84 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + /** * This is first implementation of RegionServer coprocessor introduced by Phoenix. */ -public class PhoenixRegionServerEndpoint - extends RegionServerEndpointProtos.RegionServerEndpointService - implements RegionServerCoprocessor { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRegionServerEndpoint.class); - private MetricsMetadataCachingSource metricsSource; - protected Configuration conf; +public class PhoenixRegionServerEndpoint extends + RegionServerEndpointProtos.RegionServerEndpointService implements RegionServerCoprocessor { + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRegionServerEndpoint.class); + private MetricsMetadataCachingSource metricsSource; + protected Configuration conf; - @Override - public void start(CoprocessorEnvironment env) throws IOException { - this.conf = env.getConfiguration(); - this.metricsSource = MetricsPhoenixCoprocessorSourceFactory - .getInstance().getMetadataCachingSource(); - } + @Override + public void start(CoprocessorEnvironment env) throws IOException { + this.conf = env.getConfiguration(); + this.metricsSource = + MetricsPhoenixCoprocessorSourceFactory.getInstance().getMetadataCachingSource(); + } - @Override - public void validateLastDDLTimestamp(RpcController controller, - RegionServerEndpointProtos.ValidateLastDDLTimestampRequest request, - RpcCallback done) { - metricsSource.incrementValidateTimestampRequestCount(); - ServerMetadataCache cache = getServerMetadataCache(); - for (RegionServerEndpointProtos.LastDDLTimestampRequest lastDDLTimestampRequest - : request.getLastDDLTimestampRequestsList()) { - byte[] tenantID = lastDDLTimestampRequest.getTenantId().toByteArray(); - byte[] schemaName = lastDDLTimestampRequest.getSchemaName().toByteArray(); - byte[] tableName = lastDDLTimestampRequest.getTableName().toByteArray(); - long clientLastDDLTimestamp = lastDDLTimestampRequest.getLastDDLTimestamp(); - String tenantIDStr = Bytes.toString(tenantID); - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - try { - VerifyLastDDLTimestamp.verifyLastDDLTimestamp(cache, tenantID, schemaName, - tableName, clientLastDDLTimestamp); - } catch (Throwable t) { - String errorMsg = String.format("Verifying last ddl timestamp FAILED for " - + "tenantID: %s, fullTableName: %s", tenantIDStr, fullTableName); - LOGGER.error(errorMsg, t); - IOException ioe = ClientUtil.createIOException(errorMsg, t); - ProtobufUtil.setControllerException(controller, ioe); - //If an index was dropped and a client tries to query it, we will validate table - //first and encounter stale metadata, if we don't break the coproc will run into - //table not found error since it will not be able to validate the dropped index. - //this should be fine for views too since we will update the entire hierarchy. - break; - } - } + @Override + public void validateLastDDLTimestamp(RpcController controller, + RegionServerEndpointProtos.ValidateLastDDLTimestampRequest request, + RpcCallback done) { + metricsSource.incrementValidateTimestampRequestCount(); + ServerMetadataCache cache = getServerMetadataCache(); + for (RegionServerEndpointProtos.LastDDLTimestampRequest lastDDLTimestampRequest : request + .getLastDDLTimestampRequestsList()) { + byte[] tenantID = lastDDLTimestampRequest.getTenantId().toByteArray(); + byte[] schemaName = lastDDLTimestampRequest.getSchemaName().toByteArray(); + byte[] tableName = lastDDLTimestampRequest.getTableName().toByteArray(); + long clientLastDDLTimestamp = lastDDLTimestampRequest.getLastDDLTimestamp(); + String tenantIDStr = Bytes.toString(tenantID); + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + try { + VerifyLastDDLTimestamp.verifyLastDDLTimestamp(cache, tenantID, schemaName, tableName, + clientLastDDLTimestamp); + } catch (Throwable t) { + String errorMsg = String.format( + "Verifying last ddl timestamp FAILED for " + "tenantID: %s, fullTableName: %s", + tenantIDStr, fullTableName); + LOGGER.error(errorMsg, t); + IOException ioe = ClientUtil.createIOException(errorMsg, t); + ProtobufUtil.setControllerException(controller, ioe); + // If an index was dropped and a client tries to query it, we will validate table + // first and encounter stale metadata, if we don't break the coproc will run into + // table not found error since it will not be able to validate the dropped index. + // this should be fine for views too since we will update the entire hierarchy. + break; + } } + } - @Override - public void invalidateServerMetadataCache(RpcController controller, - RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest request, - RpcCallback done) { - for (RegionServerEndpointProtos.InvalidateServerMetadataCache invalidateCacheRequest - : request.getInvalidateServerMetadataCacheRequestsList()) { - byte[] tenantID = invalidateCacheRequest.getTenantId().toByteArray(); - byte[] schemaName = invalidateCacheRequest.getSchemaName().toByteArray(); - byte[] tableName = invalidateCacheRequest.getTableName().toByteArray(); - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - String tenantIDStr = Bytes.toString(tenantID); - LOGGER.info("PhoenixRegionServerEndpoint invalidating the cache for tenantID: {}," - + " tableName: {}", tenantIDStr, fullTableName); - ServerMetadataCache cache = getServerMetadataCache(); - cache.invalidate(tenantID, schemaName, tableName); - } + @Override + public void invalidateServerMetadataCache(RpcController controller, + RegionServerEndpointProtos.InvalidateServerMetadataCacheRequest request, + RpcCallback done) { + for (RegionServerEndpointProtos.InvalidateServerMetadataCache invalidateCacheRequest : request + .getInvalidateServerMetadataCacheRequestsList()) { + byte[] tenantID = invalidateCacheRequest.getTenantId().toByteArray(); + byte[] schemaName = invalidateCacheRequest.getSchemaName().toByteArray(); + byte[] tableName = invalidateCacheRequest.getTableName().toByteArray(); + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + String tenantIDStr = Bytes.toString(tenantID); + LOGGER.info( + "PhoenixRegionServerEndpoint invalidating the cache for tenantID: {}," + " tableName: {}", + tenantIDStr, fullTableName); + ServerMetadataCache cache = getServerMetadataCache(); + cache.invalidate(tenantID, schemaName, tableName); } + } - @Override - public Iterable getServices() { - return Collections.singletonList(this); - } + @Override + public Iterable getServices() { + return Collections.singletonList(this); + } - public ServerMetadataCache getServerMetadataCache() { - return ServerMetadataCacheImpl.getInstance(conf); - } -} \ No newline at end of file + public ServerMetadataCache getServerMetadataCache() { + return ServerMetadataCacheImpl.getInstance(conf); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixTTLRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixTTLRegionObserver.java index 01db8261894..5a36a3418b9 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixTTLRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/PhoenixTTLRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,16 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.util.ScanUtil.getDummyResult; +import static org.apache.phoenix.util.ScanUtil.getPageSizeMsForRegionScanner; +import static org.apache.phoenix.util.ScanUtil.isDummy; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -44,296 +54,287 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.SQLException; -import java.util.Iterator; -import java.util.List; -import java.util.Optional; - -import static org.apache.phoenix.util.ScanUtil.getDummyResult; -import static org.apache.phoenix.util.ScanUtil.getPageSizeMsForRegionScanner; -import static org.apache.phoenix.util.ScanUtil.isDummy; - /** * Coprocessor that checks whether the row is expired based on the TTL spec. */ -public class PhoenixTTLRegionObserver extends BaseScannerRegionObserver implements RegionCoprocessor { - private static final Logger LOG = LoggerFactory.getLogger(PhoenixTTLRegionObserver.class); - private MetricsPhoenixTTLSource metricSource; +public class PhoenixTTLRegionObserver extends BaseScannerRegionObserver + implements RegionCoprocessor { + private static final Logger LOG = LoggerFactory.getLogger(PhoenixTTLRegionObserver.class); + private MetricsPhoenixTTLSource metricSource; + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void start(CoprocessorEnvironment e) throws IOException { + metricSource = MetricsPhoenixCoprocessorSourceFactory.getInstance().getPhoenixTTLSource(); + } + + @Override + protected boolean isRegionObserverFor(Scan scan) { + return ScanUtil.isMaskTTLExpiredRows(scan) || ScanUtil.isDeleteTTLExpiredRows(scan); + } + + @Override + protected RegionScanner doPostScannerOpen(final ObserverContext c, + final Scan scan, final RegionScanner s) throws IOException, SQLException { + if (ScanUtil.isMaskTTLExpiredRows(scan) && ScanUtil.isDeleteTTLExpiredRows(scan)) { + throw new IOException("Both mask and delete expired rows property cannot be set"); + } else if (ScanUtil.isMaskTTLExpiredRows(scan)) { + metricSource.incrementMaskExpiredRequestCount(); + scan.setAttribute(PhoenixTTLRegionScanner.MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID_ATTR, + Bytes.toBytes(String.format("MASK-EXPIRED-%d", metricSource.getMaskExpiredRequestCount()))); + } else if (ScanUtil.isDeleteTTLExpiredRows(scan)) { + metricSource.incrementDeleteExpiredRequestCount(); + scan.setAttribute(PhoenixTTLRegionScanner.MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID_ATTR, Bytes + .toBytes(String.format("DELETE-EXPIRED-%d", metricSource.getDeleteExpiredRequestCount()))); + } + LOG.trace(String.format( + "********** PHOENIX-TTL: PhoenixTTLRegionObserver::postScannerOpen TTL for table = " + + "[%s], scan = [%s], TTL = %d ***************, " + "numMaskExpiredRequestCount=%d, " + + "numDeleteExpiredRequestCount=%d", + s.getRegionInfo().getTable().getNameAsString(), scan.toJSON(Integer.MAX_VALUE), + ScanUtil.getTTL(scan), metricSource.getMaskExpiredRequestCount(), + metricSource.getDeleteExpiredRequestCount())); + return new PhoenixTTLRegionScanner(c.getEnvironment(), scan, s); + } + + /** + * A region scanner that checks the TTL expiration of rows + */ + private static class PhoenixTTLRegionScanner extends BaseRegionScanner { + private static final String MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID_ATTR = + "MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID"; + + private final RegionCoprocessorEnvironment env; + private final RegionScanner scanner; + private final Scan scan; + private final byte[] emptyCF; + private final byte[] emptyCQ; + private final Region region; + private final long minTimestamp; + private final long maxTimestamp; + private final long now; + private final boolean deleteIfExpired; + private final boolean maskIfExpired; + private final String requestId; + private final byte[] scanTableName; + private long numRowsExpired; + private long numRowsScanned; + private long numRowsDeleted; + private boolean reported = false; + private long pageSizeMs; + + public PhoenixTTLRegionScanner(RegionCoprocessorEnvironment env, Scan scan, + RegionScanner scanner) throws IOException { + super(scanner); + this.env = env; + this.scan = scan; + this.scanner = scanner; + byte[] requestIdBytes = scan.getAttribute(MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID_ATTR); + this.requestId = Bytes.toString(requestIdBytes); + + deleteIfExpired = ScanUtil.isDeleteTTLExpiredRows(scan); + maskIfExpired = !deleteIfExpired && ScanUtil.isMaskTTLExpiredRows(scan); + + region = env.getRegion(); + emptyCF = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME); + emptyCQ = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME); + scanTableName = + scan.getAttribute(BaseScannerRegionObserverConstants.PHOENIX_TTL_SCAN_TABLE_NAME); + + byte[] txnScn = scan.getAttribute(BaseScannerRegionObserverConstants.TX_SCN); + if (txnScn != null) { + TimeRange timeRange = scan.getTimeRange(); + scan.setTimeRange(timeRange.getMin(), Bytes.toLong(txnScn)); + } + minTimestamp = scan.getTimeRange().getMin(); + maxTimestamp = scan.getTimeRange().getMax(); + now = maxTimestamp != HConstants.LATEST_TIMESTAMP + ? maxTimestamp + : EnvironmentEdgeManager.currentTimeMillis(); + pageSizeMs = getPageSizeMsForRegionScanner(scan); + } @Override - public Optional getRegionObserver() { - return Optional.of(this); + public int getBatch() { + return scanner.getBatch(); } @Override - public void start(CoprocessorEnvironment e) throws IOException { - metricSource = MetricsPhoenixCoprocessorSourceFactory.getInstance().getPhoenixTTLSource(); + public long getMaxResultSize() { + return scanner.getMaxResultSize(); } @Override - protected boolean isRegionObserverFor(Scan scan) { - return ScanUtil.isMaskTTLExpiredRows(scan) || ScanUtil.isDeleteTTLExpiredRows(scan); + public boolean next(List result) throws IOException { + return doNext(result, false); } @Override - protected RegionScanner doPostScannerOpen(final ObserverContext c, final Scan scan, - final RegionScanner s) throws IOException, SQLException { - if (ScanUtil.isMaskTTLExpiredRows(scan) && ScanUtil.isDeleteTTLExpiredRows(scan)) { - throw new IOException("Both mask and delete expired rows property cannot be set"); - } else if (ScanUtil.isMaskTTLExpiredRows(scan)) { - metricSource.incrementMaskExpiredRequestCount(); - scan.setAttribute(PhoenixTTLRegionScanner.MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID_ATTR, - Bytes.toBytes(String.format("MASK-EXPIRED-%d", - metricSource.getMaskExpiredRequestCount()))); - } else if (ScanUtil.isDeleteTTLExpiredRows(scan)) { - metricSource.incrementDeleteExpiredRequestCount(); - scan.setAttribute(PhoenixTTLRegionScanner.MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID_ATTR, - Bytes.toBytes(String.format("DELETE-EXPIRED-%d", - metricSource.getDeleteExpiredRequestCount()))); - } - LOG.trace(String.format( - "********** PHOENIX-TTL: PhoenixTTLRegionObserver::postScannerOpen TTL for table = " - + "[%s], scan = [%s], TTL = %d ***************, " - + "numMaskExpiredRequestCount=%d, " - + "numDeleteExpiredRequestCount=%d", - s.getRegionInfo().getTable().getNameAsString(), - scan.toJSON(Integer.MAX_VALUE), - ScanUtil.getTTL(scan), - metricSource.getMaskExpiredRequestCount(), - metricSource.getDeleteExpiredRequestCount() - )); - return new PhoenixTTLRegionScanner(c.getEnvironment(), scan, s); + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); } - /** - * A region scanner that checks the TTL expiration of rows - */ - private static class PhoenixTTLRegionScanner extends BaseRegionScanner { - private static final String MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID_ATTR = - "MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID"; - - private final RegionCoprocessorEnvironment env; - private final RegionScanner scanner; - private final Scan scan; - private final byte[] emptyCF; - private final byte[] emptyCQ; - private final Region region; - private final long minTimestamp; - private final long maxTimestamp; - private final long now; - private final boolean deleteIfExpired; - private final boolean maskIfExpired; - private final String requestId; - private final byte[] scanTableName; - private long numRowsExpired; - private long numRowsScanned; - private long numRowsDeleted; - private boolean reported = false; - private long pageSizeMs; - - public PhoenixTTLRegionScanner(RegionCoprocessorEnvironment env, Scan scan, - RegionScanner scanner) throws IOException { - super(scanner); - this.env = env; - this.scan = scan; - this.scanner = scanner; - byte[] requestIdBytes = scan.getAttribute(MASK_PHOENIX_TTL_EXPIRED_REQUEST_ID_ATTR); - this.requestId = Bytes.toString(requestIdBytes); - - deleteIfExpired = ScanUtil.isDeleteTTLExpiredRows(scan); - maskIfExpired = !deleteIfExpired && ScanUtil.isMaskTTLExpiredRows(scan); - - region = env.getRegion(); - emptyCF = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME); - emptyCQ = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME); - scanTableName = scan.getAttribute(BaseScannerRegionObserverConstants.PHOENIX_TTL_SCAN_TABLE_NAME); - - byte[] txnScn = scan.getAttribute(BaseScannerRegionObserverConstants.TX_SCN); - if (txnScn != null) { - TimeRange timeRange = scan.getTimeRange(); - scan.setTimeRange(timeRange.getMin(), Bytes.toLong(txnScn)); - } - minTimestamp = scan.getTimeRange().getMin(); - maxTimestamp = scan.getTimeRange().getMax(); - now = maxTimestamp != HConstants.LATEST_TIMESTAMP ? - maxTimestamp : - EnvironmentEdgeManager.currentTimeMillis(); - pageSizeMs = getPageSizeMsForRegionScanner(scan); - } - - @Override public int getBatch() { - return scanner.getBatch(); - } - - @Override public long getMaxResultSize() { - return scanner.getMaxResultSize(); - } - - @Override public boolean next(List result) throws IOException { - return doNext(result, false); - } - - @Override public boolean next(List result, ScannerContext scannerContext) - throws IOException { - return next(result); - } - - @Override public boolean nextRaw(List result, ScannerContext scannerContext) - throws IOException { - return nextRaw(result); - } + @Override + public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { + return nextRaw(result); + } - @Override public void close() throws IOException { - if (!reported) { - LOG.debug(String.format( - "PHOENIX-TTL-SCAN-STATS-ON-CLOSE: " + "request-id:[%s,%s] = [%d, %d, %d]", - this.requestId, Bytes.toString(scanTableName), - this.numRowsScanned, this.numRowsExpired, this.numRowsDeleted)); - reported = true; - } - scanner.close(); - } + @Override + public void close() throws IOException { + if (!reported) { + LOG.debug( + String.format("PHOENIX-TTL-SCAN-STATS-ON-CLOSE: " + "request-id:[%s,%s] = [%d, %d, %d]", + this.requestId, Bytes.toString(scanTableName), this.numRowsScanned, this.numRowsExpired, + this.numRowsDeleted)); + reported = true; + } + scanner.close(); + } - @Override public RegionInfo getRegionInfo() { - return scanner.getRegionInfo(); - } + @Override + public RegionInfo getRegionInfo() { + return scanner.getRegionInfo(); + } - @Override public boolean reseek(byte[] row) throws IOException { - return scanner.reseek(row); - } + @Override + public boolean reseek(byte[] row) throws IOException { + return scanner.reseek(row); + } - @Override public long getMvccReadPoint() { - return scanner.getMvccReadPoint(); - } + @Override + public long getMvccReadPoint() { + return scanner.getMvccReadPoint(); + } - @Override public boolean nextRaw(List result) throws IOException { - return doNext(result, true); - } + @Override + public boolean nextRaw(List result) throws IOException { + return doNext(result, true); + } - private boolean doNext(List result, boolean raw) throws IOException { - try { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - boolean hasMore; - do { - hasMore = raw ? scanner.nextRaw(result) : scanner.next(result); - if (result.isEmpty()) { - break; - } - if (isDummy(result)) { - return true; - } - - /** - Note : That both MaskIfExpiredRequest and DeleteIfExpiredRequest cannot be set at the same time. - Case : MaskIfExpiredRequest, If row not expired then return. - */ - numRowsScanned++; - if (maskIfExpired && checkRowNotExpired(result)) { - break; - } - - /** - Case : DeleteIfExpiredRequest, If deleted then return. - So that it will count towards the aggregate deleted count. - */ - if (deleteIfExpired && deleteRowIfExpired(result)) { - numRowsDeleted++; - break; - } - // skip this row - // 1. if the row has expired (checkRowNotExpired returned false) - // 2. if the row was not deleted (deleteRowIfExpired returned false and - // do not want it to count towards the deleted count) - if (maskIfExpired) { - numRowsExpired++; - } - if (hasMore && (EnvironmentEdgeManager.currentTimeMillis() - startTime) >= pageSizeMs) { - byte[] rowKey = CellUtil.cloneRow(result.get(0)); - result.clear(); - getDummyResult(rowKey, result); - return true; - } - result.clear(); - } while (hasMore); - return hasMore; - } catch (Throwable t) { - ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); - return false; // impossible - } - } + private boolean doNext(List result, boolean raw) throws IOException { + try { + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + boolean hasMore; + do { + hasMore = raw ? scanner.nextRaw(result) : scanner.next(result); + if (result.isEmpty()) { + break; + } + if (isDummy(result)) { + return true; + } + + /** + * Note : That both MaskIfExpiredRequest and DeleteIfExpiredRequest cannot be set at the + * same time. Case : MaskIfExpiredRequest, If row not expired then return. + */ + numRowsScanned++; + if (maskIfExpired && checkRowNotExpired(result)) { + break; + } + + /** + * Case : DeleteIfExpiredRequest, If deleted then return. So that it will count towards + * the aggregate deleted count. + */ + if (deleteIfExpired && deleteRowIfExpired(result)) { + numRowsDeleted++; + break; + } + // skip this row + // 1. if the row has expired (checkRowNotExpired returned false) + // 2. if the row was not deleted (deleteRowIfExpired returned false and + // do not want it to count towards the deleted count) + if (maskIfExpired) { + numRowsExpired++; + } + if (hasMore && (EnvironmentEdgeManager.currentTimeMillis() - startTime) >= pageSizeMs) { + byte[] rowKey = CellUtil.cloneRow(result.get(0)); + result.clear(); + getDummyResult(rowKey, result); + return true; + } + result.clear(); + } while (hasMore); + return hasMore; + } catch (Throwable t) { + ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); + return false; // impossible + } + } - /** - * @param cellList is an input and output parameter and will either include a valid row or be an empty list - * @return true if row expired and deleted or empty, otherwise false - * @throws IOException - */ - private boolean deleteRowIfExpired(List cellList) throws IOException { - - long cellListSize = cellList.size(); - if (cellListSize == 0) { - return true; - } - - Iterator cellIterator = cellList.iterator(); - Cell firstCell = cellIterator.next(); - byte[] rowKey = new byte[firstCell.getRowLength()]; - System.arraycopy(firstCell.getRowArray(), firstCell.getRowOffset(), rowKey, 0, - firstCell.getRowLength()); - - boolean isRowExpired = !checkRowNotExpired(cellList); - if (isRowExpired) { - long ttl = ScanUtil.getTTL(this.scan); - long ts = ScanUtil.getMaxTimestamp(cellList); - LOG.trace(String.format( - "PHOENIX-TTL: Deleting row = [%s] belonging to table = %s, " - + "scn = %s, now = %d, delete-ts = %d, max-ts = %d", - Bytes.toString(rowKey), - Bytes.toString(scanTableName), - maxTimestamp != HConstants.LATEST_TIMESTAMP, - now, now - ttl, ts)); - Delete del = new Delete(rowKey, now - ttl); - Mutation[] mutations = new Mutation[] { del }; - region.batchMutate(mutations); - return true; - } - return false; - } + /** + * @param cellList is an input and output parameter and will either include a valid row or be an + * empty list + * @return true if row expired and deleted or empty, otherwise false + */ + private boolean deleteRowIfExpired(List cellList) throws IOException { + + long cellListSize = cellList.size(); + if (cellListSize == 0) { + return true; + } + + Iterator cellIterator = cellList.iterator(); + Cell firstCell = cellIterator.next(); + byte[] rowKey = new byte[firstCell.getRowLength()]; + System.arraycopy(firstCell.getRowArray(), firstCell.getRowOffset(), rowKey, 0, + firstCell.getRowLength()); + + boolean isRowExpired = !checkRowNotExpired(cellList); + if (isRowExpired) { + long ttl = ScanUtil.getTTL(this.scan); + long ts = ScanUtil.getMaxTimestamp(cellList); + LOG.trace(String.format( + "PHOENIX-TTL: Deleting row = [%s] belonging to table = %s, " + + "scn = %s, now = %d, delete-ts = %d, max-ts = %d", + Bytes.toString(rowKey), Bytes.toString(scanTableName), + maxTimestamp != HConstants.LATEST_TIMESTAMP, now, now - ttl, ts)); + Delete del = new Delete(rowKey, now - ttl); + Mutation[] mutations = new Mutation[] { del }; + region.batchMutate(mutations); + return true; + } + return false; + } - /** - * @param cellList is an input and output parameter and will either include a valid row - * or be an empty list - * @return true if row not expired, otherwise false - * @throws IOException - */ - private boolean checkRowNotExpired(List cellList) throws IOException { - long cellListSize = cellList.size(); - Cell cell = null; - if (cellListSize == 0) { - return true; - } - Iterator cellIterator = cellList.iterator(); - while (cellIterator.hasNext()) { - cell = cellIterator.next(); - if (ScanUtil.isEmptyColumn(cell, this.emptyCF, this.emptyCQ)) { - LOG.trace(String.format("** PHOENIX-TTL: Row expired for [%s], expired = %s **", - cell.toString(), ScanUtil.isTTLExpired(cell, this.scan, this.now))); - return !ScanUtil.isTTLExpired(cell, this.scan, this.now); - } - } - LOG.warn("The empty column does not exist in a row in " + region.getRegionInfo() - .getTable().getNameAsString()); - return true; + /** + * @param cellList is an input and output parameter and will either include a valid row or be an + * empty list + * @return true if row not expired, otherwise false + */ + private boolean checkRowNotExpired(List cellList) throws IOException { + long cellListSize = cellList.size(); + Cell cell = null; + if (cellListSize == 0) { + return true; + } + Iterator cellIterator = cellList.iterator(); + while (cellIterator.hasNext()) { + cell = cellIterator.next(); + if (ScanUtil.isEmptyColumn(cell, this.emptyCF, this.emptyCQ)) { + LOG.trace(String.format("** PHOENIX-TTL: Row expired for [%s], expired = %s **", + cell.toString(), ScanUtil.isTTLExpired(cell, this.scan, this.now))); + return !ScanUtil.isTTLExpired(cell, this.scan, this.now); } + } + LOG.warn("The empty column does not exist in a row in " + + region.getRegionInfo().getTable().getNameAsString()); + return true; + } - @Override - public RegionScanner getNewRegionScanner(Scan scan) throws IOException { - try { - return new PhoenixTTLRegionScanner(env, scan, - ((DelegateRegionScanner)delegate).getNewRegionScanner(scan)); - } catch (ClassCastException e) { - throw new DoNotRetryIOException(e); - } - } + @Override + public RegionScanner getNewRegionScanner(Scan scan) throws IOException { + try { + return new PhoenixTTLRegionScanner(env, scan, + ((DelegateRegionScanner) delegate).getNewRegionScanner(scan)); + } catch (ClassCastException e) { + throw new DoNotRetryIOException(e); + } } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ReplicationSinkEndpoint.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ReplicationSinkEndpoint.java index e84967b6375..cd30c9a83e3 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ReplicationSinkEndpoint.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ReplicationSinkEndpoint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessor; import org.apache.phoenix.compat.hbase.ReplicationSinkCompatEndpoint; diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java index baf4e051478..9bee6b614e9 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; + import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.List; @@ -49,140 +51,141 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; - /** - * - * Wraps the scan performing a non aggregate query to prevent needless retries - * if a Phoenix bug is encountered from our custom filter expression evaluation. - * Unfortunately, until HBASE-7481 gets fixed, there's no way to do this from our - * custom filters. - * - * + * Wraps the scan performing a non aggregate query to prevent needless retries if a Phoenix bug is + * encountered from our custom filter expression evaluation. Unfortunately, until HBASE-7481 gets + * fixed, there's no way to do this from our custom filters. * @since 0.1 */ public class ScanRegionObserver extends BaseScannerRegionObserver implements RegionCoprocessor { - private static final Logger LOGGER = LoggerFactory.getLogger(ScanRegionObserver.class); + private static final Logger LOGGER = LoggerFactory.getLogger(ScanRegionObserver.class); - private static boolean readRepairTransformingTable = false; - private static GlobalIndexChecker.GlobalIndexScanner globalIndexScanner; - private static GlobalIndexChecker globalIndexChecker = new GlobalIndexChecker(); - private static GlobalIndexCheckerSource metricsSource = MetricsIndexerSourceFactory.getInstance().getGlobalIndexCheckerSource(); + private static boolean readRepairTransformingTable = false; + private static GlobalIndexChecker.GlobalIndexScanner globalIndexScanner; + private static GlobalIndexChecker globalIndexChecker = new GlobalIndexChecker(); + private static GlobalIndexCheckerSource metricsSource = + MetricsIndexerSourceFactory.getInstance().getGlobalIndexCheckerSource(); - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } - @Override - public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { - try { - preBatchMutateWithExceptions(miniBatchOp, c.getEnvironment().getRegion() - .getTableDescriptor().getTableName().getNameAsString()); - } catch(Throwable t) { - // Wrap all exceptions in an IOException to prevent region server crashes - throw ClientUtil.createIOException("Unable to Put cells corresponding to dynamic" + - "column metadata for " + - c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(), t); - } + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + try { + preBatchMutateWithExceptions(miniBatchOp, + c.getEnvironment().getRegion().getTableDescriptor().getTableName().getNameAsString()); + } catch (Throwable t) { + // Wrap all exceptions in an IOException to prevent region server crashes + throw ClientUtil + .createIOException("Unable to Put cells corresponding to dynamic" + "column metadata for " + + c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(), t); } + } - /** - * In case we are supporting exposing dynamic columns for wildcard queries, which is based on - * the client-side config - * {@link org.apache.phoenix.query.QueryServices#WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB}, - * we previously set attributes on the Put mutations where the key is the column family and - * the value is the serialized list of dynamic columns. - * Here we iterate over all Put mutations and add metadata for the list of dynamic columns for - * each column family in its own cell under reserved qualifiers. See PHOENIX-374 - * @param miniBatchOp batch of mutations getting applied to region - * @param tableName Name of table served by region - * @throws IOException If an I/O error occurs when parsing protobuf - */ - private void preBatchMutateWithExceptions(MiniBatchOperationInProgress miniBatchOp, - String tableName) - throws IOException { - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - // There is at max 1 extra Put (for dynamic column shadow cells) per original Put - Put dynColShadowCellsPut = null; - if (m instanceof Put && Bytes.equals(m.getAttribute( - ScanRegionObserverConstants.DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION), TRUE_BYTES)) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Adding dynamic column metadata for table: " + tableName + ". Put :" + - m.toString()); - } - NavigableMap> famCellMap = m.getFamilyCellMap(); - for (byte[] fam : famCellMap.keySet()) { - byte[] serializedDynColsList = m.getAttribute(Bytes.toString(fam)); - if (serializedDynColsList == null) { - // There are no dynamic columns for this column family - continue; - } - List dynColsInThisFam = DynamicColumnMetaDataProtos. - DynamicColumnMetaData.parseFrom(serializedDynColsList) - .getDynamicColumnsList(); - if (dynColsInThisFam.isEmpty()) { - continue; - } - if (dynColShadowCellsPut == null) { - dynColShadowCellsPut = new Put(m.getRow()); - } - for (PTableProtos.PColumn dynColProto : dynColsInThisFam) { - // Add a column for this dynamic column to the metadata Put operation - dynColShadowCellsPut.addColumn(fam, - getQualifierForDynamicColumnMetaDataCell(dynColProto), - dynColProto.toByteArray()); - } - } - } - if (dynColShadowCellsPut != null) { - miniBatchOp.addOperationsFromCP(i, new Mutation[]{dynColShadowCellsPut}); - } + /** + * In case we are supporting exposing dynamic columns for wildcard queries, which is based on the + * client-side config + * {@link org.apache.phoenix.query.QueryServices#WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB}, we + * previously set attributes on the Put mutations where the key is the column family and the value + * is the serialized list of dynamic columns. Here we iterate over all Put mutations and add + * metadata for the list of dynamic columns for each column family in its own cell under reserved + * qualifiers. See PHOENIX-374 + * @param miniBatchOp batch of mutations getting applied to region + * @param tableName Name of table served by region + * @throws IOException If an I/O error occurs when parsing protobuf + */ + private void preBatchMutateWithExceptions(MiniBatchOperationInProgress miniBatchOp, + String tableName) throws IOException { + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + // There is at max 1 extra Put (for dynamic column shadow cells) per original Put + Put dynColShadowCellsPut = null; + if ( + m instanceof Put && Bytes.equals( + m.getAttribute(ScanRegionObserverConstants.DYNAMIC_COLUMN_METADATA_STORED_FOR_MUTATION), + TRUE_BYTES) + ) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Adding dynamic column metadata for table: " + tableName + ". Put :" + m.toString()); } + NavigableMap> famCellMap = m.getFamilyCellMap(); + for (byte[] fam : famCellMap.keySet()) { + byte[] serializedDynColsList = m.getAttribute(Bytes.toString(fam)); + if (serializedDynColsList == null) { + // There are no dynamic columns for this column family + continue; + } + List dynColsInThisFam = + DynamicColumnMetaDataProtos.DynamicColumnMetaData.parseFrom(serializedDynColsList) + .getDynamicColumnsList(); + if (dynColsInThisFam.isEmpty()) { + continue; + } + if (dynColShadowCellsPut == null) { + dynColShadowCellsPut = new Put(m.getRow()); + } + for (PTableProtos.PColumn dynColProto : dynColsInThisFam) { + // Add a column for this dynamic column to the metadata Put operation + dynColShadowCellsPut.addColumn(fam, + getQualifierForDynamicColumnMetaDataCell(dynColProto), dynColProto.toByteArray()); + } + } + } + if (dynColShadowCellsPut != null) { + miniBatchOp.addOperationsFromCP(i, new Mutation[] { dynColShadowCellsPut }); + } } + } - /** - * We store the metadata for each dynamic cell in a separate cell in the same column family. - * The column qualifier for this cell is: - * {@link ScanRegionObserverConstants#DYN_COLS_METADATA_CELL_QUALIFIER} concatenated with the - * qualifier of the actual dynamic column - * @param dynColProto Protobuf representation of the dynamic column PColumn - * @return Final qualifier for the metadata cell - * @throws IOException If an I/O error occurs when parsing the byte array output stream - */ - private static byte[] getQualifierForDynamicColumnMetaDataCell(PTableProtos.PColumn dynColProto) + /** + * We store the metadata for each dynamic cell in a separate cell in the same column family. The + * column qualifier for this cell is: + * {@link ScanRegionObserverConstants#DYN_COLS_METADATA_CELL_QUALIFIER} concatenated with the + * qualifier of the actual dynamic column + * @param dynColProto Protobuf representation of the dynamic column PColumn + * @return Final qualifier for the metadata cell + * @throws IOException If an I/O error occurs when parsing the byte array output stream + */ + private static byte[] getQualifierForDynamicColumnMetaDataCell(PTableProtos.PColumn dynColProto) throws IOException { - PColumn dynCol = PColumnImpl.createFromProto(dynColProto); - ByteArrayOutputStream qual = new ByteArrayOutputStream(); - qual.write(ScanRegionObserverConstants.DYN_COLS_METADATA_CELL_QUALIFIER); - qual.write(dynCol.getColumnQualifierBytes()); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Storing shadow cell for dynamic column metadata for dynamic column : " + - dynCol.getFamilyName().getString() + "." + dynCol.getName().getString()); - } - return qual.toByteArray(); + PColumn dynCol = PColumnImpl.createFromProto(dynColProto); + ByteArrayOutputStream qual = new ByteArrayOutputStream(); + qual.write(ScanRegionObserverConstants.DYN_COLS_METADATA_CELL_QUALIFIER); + qual.write(dynCol.getColumnQualifierBytes()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Storing shadow cell for dynamic column metadata for dynamic column : " + + dynCol.getFamilyName().getString() + "." + dynCol.getName().getString()); } + return qual.toByteArray(); + } - @Override - protected RegionScanner doPostScannerOpen(final ObserverContext c, final Scan scan, final RegionScanner s) throws Throwable { - NonAggregateRegionScannerFactory nonAggregateROUtil = new NonAggregateRegionScannerFactory(c.getEnvironment()); - if (scan.getAttribute(BaseScannerRegionObserverConstants.READ_REPAIR_TRANSFORMING_TABLE) != null) { - readRepairTransformingTable = true; - globalIndexScanner = globalIndexChecker.new GlobalIndexScanner(c.getEnvironment(), scan, s, metricsSource); - return nonAggregateROUtil.getRegionScanner(scan, globalIndexScanner); - } - return nonAggregateROUtil.getRegionScanner(scan, s); + @Override + protected RegionScanner doPostScannerOpen(final ObserverContext c, + final Scan scan, final RegionScanner s) throws Throwable { + NonAggregateRegionScannerFactory nonAggregateROUtil = + new NonAggregateRegionScannerFactory(c.getEnvironment()); + if ( + scan.getAttribute(BaseScannerRegionObserverConstants.READ_REPAIR_TRANSFORMING_TABLE) != null + ) { + readRepairTransformingTable = true; + globalIndexScanner = + globalIndexChecker.new GlobalIndexScanner(c.getEnvironment(), scan, s, metricsSource); + return nonAggregateROUtil.getRegionScanner(scan, globalIndexScanner); } + return nonAggregateROUtil.getRegionScanner(scan, s); + } - @Override - protected boolean skipRegionBoundaryCheck(Scan scan) { - return super.skipRegionBoundaryCheck(scan) || ScanUtil.isSimpleScan(scan); - } + @Override + protected boolean skipRegionBoundaryCheck(Scan scan) { + return super.skipRegionBoundaryCheck(scan) || ScanUtil.isSimpleScan(scan); + } - @Override - protected boolean isRegionObserverFor(Scan scan) { - return ScanUtil.isNonAggregateScan(scan); - } + @Override + protected boolean isRegionObserverFor(Scan scan) { + return ScanUtil.isNonAggregateScan(scan); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java index e5744bdc1cc..0e077a80cae 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SequenceRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.coprocessorclient.SequenceRegionObserverConstants.CURRENT_VALUE_ATTRIB; +import static org.apache.phoenix.coprocessorclient.SequenceRegionObserverConstants.MAX_TIMERANGE_ATTRIB; +import static org.apache.phoenix.coprocessorclient.SequenceRegionObserverConstants.OPERATION_ATTRIB; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -57,6 +60,7 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; @@ -65,410 +69,417 @@ import org.apache.phoenix.util.SequenceUtil; import org.apache.phoenix.util.ServerUtil; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - -import static org.apache.phoenix.coprocessorclient.SequenceRegionObserverConstants.CURRENT_VALUE_ATTRIB; -import static org.apache.phoenix.coprocessorclient.SequenceRegionObserverConstants.MAX_TIMERANGE_ATTRIB; -import static org.apache.phoenix.coprocessorclient.SequenceRegionObserverConstants.OPERATION_ATTRIB; - /** - * - * Region observer coprocessor for sequence operations: - * 1) For creating a sequence, as checkAndPut does not allow us to scope the - * Get done for the check with a TimeRange. - * 2) For incrementing a sequence, as increment does not a) allow us to set the - * timestamp of the key value being incremented and b) recognize when the key - * value being incremented does not exist - * 3) For deleting a sequence, as checkAndDelete does not allow us to scope - * the Get done for the check with a TimeRange. - * - * + * Region observer coprocessor for sequence operations: 1) For creating a sequence, as checkAndPut + * does not allow us to scope the Get done for the check with a TimeRange. 2) For incrementing a + * sequence, as increment does not a) allow us to set the timestamp of the key value being + * incremented and b) recognize when the key value being incremented does not exist 3) For deleting + * a sequence, as checkAndDelete does not allow us to scope the Get done for the check with a + * TimeRange. * @since 3.0.0 */ public class SequenceRegionObserver implements RegionObserver, RegionCoprocessor { - private static final byte[] SUCCESS_VALUE = PInteger.INSTANCE.toBytes(Integer.valueOf(Sequence.SUCCESS)); + private static final byte[] SUCCESS_VALUE = + PInteger.INSTANCE.toBytes(Integer.valueOf(Sequence.SUCCESS)); - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } - - private static Result getErrorResult(byte[] row, long timestamp, int errorCode) { - byte[] errorCodeBuf = new byte[PInteger.INSTANCE.getByteSize()]; - PInteger.INSTANCE.getCodec().encodeInt(errorCode, errorCodeBuf, 0); - return Result.create(Collections.singletonList( - PhoenixKeyValueUtil.newKeyValue(row, - PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_BYTES, timestamp, errorCodeBuf))); - } - - /** - * Use PreIncrement hook of BaseRegionObserver to overcome deficiencies in Increment - * implementation (HBASE-10254): - * 1) Lack of recognition and identification of when the key value to increment doesn't exist - * 2) Lack of the ability to set the timestamp of the updated key value. - * Works the same as existing region.increment(), except assumes there is a single column to - * increment and uses Phoenix LONG encoding. - * - * @since 3.0.0 - */ - @Override - public Result preIncrement( - org.apache.hadoop.hbase.coprocessor.ObserverContext e, - Increment increment) throws IOException { - RegionCoprocessorEnvironment env = e.getEnvironment(); - // We need to set this to prevent region.increment from being called - e.bypass(); - Region region = env.getRegion(); - byte[] row = increment.getRow(); - List locks = Lists.newArrayList(); - TimeRange tr = increment.getTimeRange(); - region.startRegionOperation(); - try { - ServerUtil.acquireLock(region, row, locks); - long maxTimestamp = tr.getMax(); - boolean validateOnly = true; - Get get = new Get(row); - get.setTimeRange(tr.getMin(), tr.getMax()); - for (Map.Entry> entry : increment.getFamilyCellMap().entrySet()) { - byte[] cf = entry.getKey(); - for (Cell cq : entry.getValue()) { - long value = Bytes.toLong(cq.getValueArray(), cq.getValueOffset()); - get.addColumn(cf, CellUtil.cloneQualifier(cq)); - long cellTimestamp = cq.getTimestamp(); - // Workaround HBASE-15698 by using the lowest of the timestamps found - // on the Increment or any of its Cells. - if (cellTimestamp > 0 && cellTimestamp < maxTimestamp) { - maxTimestamp = cellTimestamp; - get.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, maxTimestamp); - } - validateOnly &= (Sequence.ValueOp.VALIDATE_SEQUENCE.ordinal() == value); - } - } - try (RegionScanner scanner = region.getScanner(new Scan(get))) { - List currentCells = new ArrayList<>(); - scanner.next(currentCells); - // These cells are returned by this method, and may be backed by ByteBuffers - // that we free when the RegionScanner is closed on return - PhoenixKeyValueUtil.maybeCopyCellList(currentCells); - if (currentCells.isEmpty()) { - return getErrorResult(row, maxTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode()); - } - Result result = Result.create(currentCells); - Cell currentValueKV = Sequence.getCurrentValueKV(result); - Cell incrementByKV = Sequence.getIncrementByKV(result); - Cell cacheSizeKV = Sequence.getCacheSizeKV(result); - - long currentValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault()); - long incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), incrementByKV.getValueOffset(), SortOrder.getDefault()); - long cacheSize = PLong.INSTANCE.getCodec().decodeLong(cacheSizeKV.getValueArray(), cacheSizeKV.getValueOffset(), SortOrder.getDefault()); - - // Hold timestamp constant for sequences, so that clients always only see the latest - // value regardless of when they connect. - long timestamp = currentValueKV.getTimestamp(); - Put put = new Put(row, timestamp); - - int numIncrementKVs = increment.getFamilyCellMap().get(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES).size(); - // creates the list of KeyValues used for the Result that will be returned - List cells = Sequence.getCells(result, numIncrementKVs); - - //if client is 3.0/4.0 preserve the old behavior (older clients won't have newer columns present in the increment) - if (numIncrementKVs != Sequence.NUM_SEQUENCE_KEY_VALUES) { - currentValue += incrementBy * cacheSize; - // Hold timestamp constant for sequences, so that clients always only see the latest value - // regardless of when they connect. - Cell newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp); - put.add(newCurrentValueKV); - Sequence.replaceCurrentValueKV(cells, newCurrentValueKV); - } - else { - Cell cycleKV = Sequence.getCycleKV(result); - Cell limitReachedKV = Sequence.getLimitReachedKV(result); - Cell minValueKV = Sequence.getMinValueKV(result); - Cell maxValueKV = Sequence.getMaxValueKV(result); - - boolean increasingSeq = incrementBy > 0 ? true : false; - - // if the minValue, maxValue, cycle and limitReached is null this sequence has been upgraded from - // a lower version. Set minValue, maxValue, cycle and limitReached to Long.MIN_VALUE, Long.MAX_VALUE, true and false - // respectively in order to maintain existing behavior and also update the KeyValues on the server - boolean limitReached; - if (limitReachedKV == null) { - limitReached = false; - Cell newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp); - put.add(newLimitReachedKV); - Sequence.replaceLimitReachedKV(cells, newLimitReachedKV); - } - else { - limitReached = (Boolean) PBoolean.INSTANCE.toObject(limitReachedKV.getValueArray(), - limitReachedKV.getValueOffset(), limitReachedKV.getValueLength()); - } - long minValue; - if (minValueKV == null) { - minValue = Long.MIN_VALUE; - Cell newMinValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, minValue, timestamp); - put.add(newMinValueKV); - Sequence.replaceMinValueKV(cells, newMinValueKV); - } - else { - minValue = PLong.INSTANCE.getCodec().decodeLong(minValueKV.getValueArray(), - minValueKV.getValueOffset(), SortOrder.getDefault()); - } - long maxValue; - if (maxValueKV == null) { - maxValue = Long.MAX_VALUE; - Cell newMaxValueKV = createKeyValue(row, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, maxValue, timestamp); - put.add(newMaxValueKV); - Sequence.replaceMaxValueKV(cells, newMaxValueKV); - } - else { - maxValue = PLong.INSTANCE.getCodec().decodeLong(maxValueKV.getValueArray(), - maxValueKV.getValueOffset(), SortOrder.getDefault()); - } - boolean cycle; - if (cycleKV == null) { - cycle = false; - Cell newCycleKV = createKeyValue(row, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, cycle, timestamp); - put.add(newCycleKV); - Sequence.replaceCycleValueKV(cells, newCycleKV); - } - else { - cycle = (Boolean) PBoolean.INSTANCE.toObject(cycleKV.getValueArray(), - cycleKV.getValueOffset(), cycleKV.getValueLength()); - } - - long numSlotsToAllocate = calculateNumSlotsToAllocate(increment); + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } - // We don't support Bulk Allocations on sequences that have the CYCLE flag set to true - if (cycle && !SequenceUtil.isCycleAllowed(numSlotsToAllocate)) { - return getErrorResult(row, maxTimestamp, SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED.getErrorCode()); - } - - // Bulk Allocations are expressed by NEXT VALUES FOR - if (SequenceUtil.isBulkAllocation(numSlotsToAllocate)) { - if (SequenceUtil.checkIfLimitReached(currentValue, minValue, maxValue, incrementBy, cacheSize, numSlotsToAllocate)) { - // If we try to allocate more slots than the limit we return an error. - // Allocating sequence values in bulk should be an all or nothing operation. - // If the operation succeeds clients are guaranteed that they have reserved - // all the slots requested. - return getErrorResult(row, maxTimestamp, SequenceUtil.getLimitReachedErrorCode(increasingSeq).getErrorCode()); - } - } - - if (validateOnly) { - return result; - } - - // return if we have run out of sequence values - if (limitReached) { - if (cycle) { - // reset currentValue of the Sequence row to minValue/maxValue - currentValue = increasingSeq ? minValue : maxValue; - } - else { - return getErrorResult(row, maxTimestamp, SequenceUtil.getLimitReachedErrorCode(increasingSeq).getErrorCode()); - } - } - - // check if the limit was reached - limitReached = SequenceUtil.checkIfLimitReached(currentValue, minValue, maxValue, incrementBy, cacheSize, numSlotsToAllocate); - - // update currentValue - currentValue += incrementBy * (SequenceUtil.isBulkAllocation(numSlotsToAllocate) ? numSlotsToAllocate : cacheSize); - // update the currentValue of the Result row - Cell newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, currentValue, timestamp); - Sequence.replaceCurrentValueKV(cells, newCurrentValueKV); - put.add(newCurrentValueKV); - // set the LIMIT_REACHED column to true, so that no new values can be used - Cell newLimitReachedKV = createKeyValue(row, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp); - put.add(newLimitReachedKV); - } - // update the KeyValues on the server - Mutation[] mutations = new Mutation[]{put}; - region.batchMutate(mutations); - // return a Result with the updated KeyValues - return Result.create(cells); - } finally { - ServerUtil.releaseRowLocks(locks); - } - } catch (Throwable t) { - ClientUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t); - return null; // Impossible - } finally { - region.closeRegionOperation(); + private static Result getErrorResult(byte[] row, long timestamp, int errorCode) { + byte[] errorCodeBuf = new byte[PInteger.INSTANCE.getByteSize()]; + PInteger.INSTANCE.getCodec().encodeInt(errorCode, errorCodeBuf, 0); + return Result.create(Collections.singletonList( + PhoenixKeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_BYTES, timestamp, errorCodeBuf))); + } + + /** + * Use PreIncrement hook of BaseRegionObserver to overcome deficiencies in Increment + * implementation (HBASE-10254): 1) Lack of recognition and identification of when the key value + * to increment doesn't exist 2) Lack of the ability to set the timestamp of the updated key + * value. Works the same as existing region.increment(), except assumes there is a single column + * to increment and uses Phoenix LONG encoding. + * @since 3.0.0 + */ + @Override + public Result preIncrement( + org.apache.hadoop.hbase.coprocessor.ObserverContext e, + Increment increment) throws IOException { + RegionCoprocessorEnvironment env = e.getEnvironment(); + // We need to set this to prevent region.increment from being called + e.bypass(); + Region region = env.getRegion(); + byte[] row = increment.getRow(); + List locks = Lists.newArrayList(); + TimeRange tr = increment.getTimeRange(); + region.startRegionOperation(); + try { + ServerUtil.acquireLock(region, row, locks); + long maxTimestamp = tr.getMax(); + boolean validateOnly = true; + Get get = new Get(row); + get.setTimeRange(tr.getMin(), tr.getMax()); + for (Map.Entry> entry : increment.getFamilyCellMap().entrySet()) { + byte[] cf = entry.getKey(); + for (Cell cq : entry.getValue()) { + long value = Bytes.toLong(cq.getValueArray(), cq.getValueOffset()); + get.addColumn(cf, CellUtil.cloneQualifier(cq)); + long cellTimestamp = cq.getTimestamp(); + // Workaround HBASE-15698 by using the lowest of the timestamps found + // on the Increment or any of its Cells. + if (cellTimestamp > 0 && cellTimestamp < maxTimestamp) { + maxTimestamp = cellTimestamp; + get.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, maxTimestamp); + } + validateOnly &= (Sequence.ValueOp.VALIDATE_SEQUENCE.ordinal() == value); } - } + } + try (RegionScanner scanner = region.getScanner(new Scan(get))) { + List currentCells = new ArrayList<>(); + scanner.next(currentCells); + // These cells are returned by this method, and may be backed by ByteBuffers + // that we free when the RegionScanner is closed on return + PhoenixKeyValueUtil.maybeCopyCellList(currentCells); + if (currentCells.isEmpty()) { + return getErrorResult(row, maxTimestamp, + SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode()); + } + Result result = Result.create(currentCells); + Cell currentValueKV = Sequence.getCurrentValueKV(result); + Cell incrementByKV = Sequence.getIncrementByKV(result); + Cell cacheSizeKV = Sequence.getCacheSizeKV(result); - - /** - * Creates a new KeyValue for a long value - * - * @param key - * key used while creating KeyValue - * @param cqBytes - * column qualifier of KeyValue - * @return return the KeyValue that was created - */ - Cell createKeyValue(byte[] key, byte[] cqBytes, long value, long timestamp) { - byte[] valueBuffer = new byte[PLong.INSTANCE.getByteSize()]; - PLong.INSTANCE.getCodec().encodeLong(value, valueBuffer, 0); - return PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, cqBytes, timestamp, valueBuffer); - } - - /** - * Creates a new KeyValue for a boolean value and adds it to the given put - * - * @param key - * key used while creating KeyValue - * @param cqBytes - * column qualifier of KeyValue - * @return return the KeyValue that was created - */ - private Cell createKeyValue(byte[] key, byte[] cqBytes, boolean value, long timestamp) throws IOException { - // create new key value for put - return PhoenixKeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, cqBytes, - timestamp, value ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); - } + long currentValue = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), + currentValueKV.getValueOffset(), SortOrder.getDefault()); + long incrementBy = PLong.INSTANCE.getCodec().decodeLong(incrementByKV.getValueArray(), + incrementByKV.getValueOffset(), SortOrder.getDefault()); + long cacheSize = PLong.INSTANCE.getCodec().decodeLong(cacheSizeKV.getValueArray(), + cacheSizeKV.getValueOffset(), SortOrder.getDefault()); - /** - * Override the preAppend for checkAndPut and checkAndDelete, as we need the ability to - * a) set the TimeRange for the Get being done and - * b) return something back to the client to indicate success/failure - */ - @SuppressWarnings("deprecation") - @Override - public Result preAppend( - org.apache.hadoop.hbase.coprocessor.ObserverContext e, - Append append) throws IOException { - byte[] opBuf = append.getAttribute(OPERATION_ATTRIB); - if (opBuf == null) { - return null; - } - Sequence.MetaOp op = Sequence.MetaOp.values()[opBuf[0]]; - Cell keyValue = append.getFamilyCellMap().values().iterator().next().iterator().next(); + // Hold timestamp constant for sequences, so that clients always only see the latest + // value regardless of when they connect. + long timestamp = currentValueKV.getTimestamp(); + Put put = new Put(row, timestamp); - long clientTimestamp = HConstants.LATEST_TIMESTAMP; - long minGetTimestamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP; - long maxGetTimestamp = HConstants.LATEST_TIMESTAMP; - boolean hadClientTimestamp; - byte[] clientTimestampBuf = null; - if (op == Sequence.MetaOp.RETURN_SEQUENCE) { - // When returning sequences, this allows us to send the expected timestamp - // of the sequence to make sure we don't reset any other sequence - hadClientTimestamp = true; - clientTimestamp = minGetTimestamp = keyValue.getTimestamp(); - maxGetTimestamp = minGetTimestamp + 1; + int numIncrementKVs = increment.getFamilyCellMap() + .get(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES).size(); + // creates the list of KeyValues used for the Result that will be returned + List cells = Sequence.getCells(result, numIncrementKVs); + + // if client is 3.0/4.0 preserve the old behavior (older clients won't have newer columns + // present in the increment) + if (numIncrementKVs != Sequence.NUM_SEQUENCE_KEY_VALUES) { + currentValue += incrementBy * cacheSize; + // Hold timestamp constant for sequences, so that clients always only see the latest value + // regardless of when they connect. + Cell newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, + currentValue, timestamp); + put.add(newCurrentValueKV); + Sequence.replaceCurrentValueKV(cells, newCurrentValueKV); } else { - clientTimestampBuf = append.getAttribute(MAX_TIMERANGE_ATTRIB); - if (clientTimestampBuf != null) { - clientTimestamp = maxGetTimestamp = Bytes.toLong(clientTimestampBuf); + Cell cycleKV = Sequence.getCycleKV(result); + Cell limitReachedKV = Sequence.getLimitReachedKV(result); + Cell minValueKV = Sequence.getMinValueKV(result); + Cell maxValueKV = Sequence.getMaxValueKV(result); + + boolean increasingSeq = incrementBy > 0 ? true : false; + + // if the minValue, maxValue, cycle and limitReached is null this sequence has been + // upgraded from + // a lower version. Set minValue, maxValue, cycle and limitReached to Long.MIN_VALUE, + // Long.MAX_VALUE, true and false + // respectively in order to maintain existing behavior and also update the KeyValues on + // the server + boolean limitReached; + if (limitReachedKV == null) { + limitReached = false; + Cell newLimitReachedKV = createKeyValue(row, + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp); + put.add(newLimitReachedKV); + Sequence.replaceLimitReachedKV(cells, newLimitReachedKV); + } else { + limitReached = (Boolean) PBoolean.INSTANCE.toObject(limitReachedKV.getValueArray(), + limitReachedKV.getValueOffset(), limitReachedKV.getValueLength()); + } + long minValue; + if (minValueKV == null) { + minValue = Long.MIN_VALUE; + Cell newMinValueKV = + createKeyValue(row, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, minValue, timestamp); + put.add(newMinValueKV); + Sequence.replaceMinValueKV(cells, newMinValueKV); + } else { + minValue = PLong.INSTANCE.getCodec().decodeLong(minValueKV.getValueArray(), + minValueKV.getValueOffset(), SortOrder.getDefault()); + } + long maxValue; + if (maxValueKV == null) { + maxValue = Long.MAX_VALUE; + Cell newMaxValueKV = + createKeyValue(row, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, maxValue, timestamp); + put.add(newMaxValueKV); + Sequence.replaceMaxValueKV(cells, newMaxValueKV); + } else { + maxValue = PLong.INSTANCE.getCodec().decodeLong(maxValueKV.getValueArray(), + maxValueKV.getValueOffset(), SortOrder.getDefault()); + } + boolean cycle; + if (cycleKV == null) { + cycle = false; + Cell newCycleKV = + createKeyValue(row, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, cycle, timestamp); + put.add(newCycleKV); + Sequence.replaceCycleValueKV(cells, newCycleKV); + } else { + cycle = (Boolean) PBoolean.INSTANCE.toObject(cycleKV.getValueArray(), + cycleKV.getValueOffset(), cycleKV.getValueLength()); + } + + long numSlotsToAllocate = calculateNumSlotsToAllocate(increment); + + // We don't support Bulk Allocations on sequences that have the CYCLE flag set to true + if (cycle && !SequenceUtil.isCycleAllowed(numSlotsToAllocate)) { + return getErrorResult(row, maxTimestamp, + SQLExceptionCode.NUM_SEQ_TO_ALLOCATE_NOT_SUPPORTED.getErrorCode()); + } + + // Bulk Allocations are expressed by NEXT VALUES FOR + if (SequenceUtil.isBulkAllocation(numSlotsToAllocate)) { + if ( + SequenceUtil.checkIfLimitReached(currentValue, minValue, maxValue, incrementBy, + cacheSize, numSlotsToAllocate) + ) { + // If we try to allocate more slots than the limit we return an error. + // Allocating sequence values in bulk should be an all or nothing operation. + // If the operation succeeds clients are guaranteed that they have reserved + // all the slots requested. + return getErrorResult(row, maxTimestamp, + SequenceUtil.getLimitReachedErrorCode(increasingSeq).getErrorCode()); } - hadClientTimestamp = (clientTimestamp != HConstants.LATEST_TIMESTAMP); - if (hadClientTimestamp) { - // Prevent race condition of creating two sequences at the same timestamp - // by looking for a sequence at or after the timestamp at which it'll be - // created. - if (op == Sequence.MetaOp.CREATE_SEQUENCE) { - maxGetTimestamp = clientTimestamp + 1; - } + } + + if (validateOnly) { + return result; + } + + // return if we have run out of sequence values + if (limitReached) { + if (cycle) { + // reset currentValue of the Sequence row to minValue/maxValue + currentValue = increasingSeq ? minValue : maxValue; } else { - clientTimestamp = EnvironmentEdgeManager.currentTimeMillis(); - maxGetTimestamp = clientTimestamp + 1; - clientTimestampBuf = Bytes.toBytes(clientTimestamp); + return getErrorResult(row, maxTimestamp, + SequenceUtil.getLimitReachedErrorCode(increasingSeq).getErrorCode()); } + } + + // check if the limit was reached + limitReached = SequenceUtil.checkIfLimitReached(currentValue, minValue, maxValue, + incrementBy, cacheSize, numSlotsToAllocate); + + // update currentValue + currentValue += incrementBy + * (SequenceUtil.isBulkAllocation(numSlotsToAllocate) ? numSlotsToAllocate : cacheSize); + // update the currentValue of the Result row + Cell newCurrentValueKV = createKeyValue(row, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, + currentValue, timestamp); + Sequence.replaceCurrentValueKV(cells, newCurrentValueKV); + put.add(newCurrentValueKV); + // set the LIMIT_REACHED column to true, so that no new values can be used + Cell newLimitReachedKV = createKeyValue(row, + PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, limitReached, timestamp); + put.add(newLimitReachedKV); } + // update the KeyValues on the server + Mutation[] mutations = new Mutation[] { put }; + region.batchMutate(mutations); + // return a Result with the updated KeyValues + return Result.create(cells); + } finally { + ServerUtil.releaseRowLocks(locks); + } + } catch (Throwable t) { + ClientUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t); + return null; // Impossible + } finally { + region.closeRegionOperation(); + } + } - RegionCoprocessorEnvironment env = e.getEnvironment(); - // We need to set this to prevent region.append from being called - e.bypass(); - Region region = env.getRegion(); - byte[] row = append.getRow(); - List locks = Lists.newArrayList(); - region.startRegionOperation(); - try { - ServerUtil.acquireLock(region, row, locks); + /** + * Creates a new KeyValue for a long value + * @param key key used while creating KeyValue + * @param cqBytes column qualifier of KeyValue + * @return return the KeyValue that was created + */ + Cell createKeyValue(byte[] key, byte[] cqBytes, long value, long timestamp) { + byte[] valueBuffer = new byte[PLong.INSTANCE.getByteSize()]; + PLong.INSTANCE.getCodec().encodeLong(value, valueBuffer, 0); + return PhoenixKeyValueUtil.newKeyValue(key, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, cqBytes, timestamp, valueBuffer); + } - byte[] family = CellUtil.cloneFamily(keyValue); - byte[] qualifier = CellUtil.cloneQualifier(keyValue); + /** + * Creates a new KeyValue for a boolean value and adds it to the given put + * @param key key used while creating KeyValue + * @param cqBytes column qualifier of KeyValue + * @return return the KeyValue that was created + */ + private Cell createKeyValue(byte[] key, byte[] cqBytes, boolean value, long timestamp) + throws IOException { + // create new key value for put + return PhoenixKeyValueUtil.newKeyValue(key, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, cqBytes, timestamp, + value ? PDataType.TRUE_BYTES : PDataType.FALSE_BYTES); + } - Get get = new Get(row); - get.setTimeRange(minGetTimestamp, maxGetTimestamp); - get.addColumn(family, qualifier); - try (RegionScanner scanner = region.getScanner(new Scan(get))) { - List cells = new ArrayList<>(); - scanner.next(cells); - if (cells.isEmpty()) { - if (op == Sequence.MetaOp.DROP_SEQUENCE || op == Sequence.MetaOp.RETURN_SEQUENCE) { - return getErrorResult(row, clientTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode()); - } - } else { - if (op == Sequence.MetaOp.CREATE_SEQUENCE) { - return getErrorResult(row, clientTimestamp, SQLExceptionCode.SEQUENCE_ALREADY_EXIST.getErrorCode()); - } - } - Mutation m = null; - switch (op) { - case RETURN_SEQUENCE: - KeyValue currentValueKV = PhoenixKeyValueUtil.maybeCopyCell(cells.get(0)); - long expectedValue = PLong.INSTANCE.getCodec().decodeLong(append.getAttribute(CURRENT_VALUE_ATTRIB), 0, SortOrder.getDefault()); - long value = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), - currentValueKV.getValueOffset(), SortOrder.getDefault()); - // Timestamp should match exactly, or we may have the wrong sequence - if (expectedValue != value || currentValueKV.getTimestamp() != clientTimestamp) { - return Result.create(Collections.singletonList( - PhoenixKeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_BYTES, currentValueKV.getTimestamp(), ByteUtil.EMPTY_BYTE_ARRAY))); - } - m = new Put(row, currentValueKV.getTimestamp()); - m.getFamilyCellMap().putAll(append.getFamilyCellMap()); - break; - case DROP_SEQUENCE: - m = new Delete(row, clientTimestamp); - break; - case CREATE_SEQUENCE: - m = new Put(row, clientTimestamp); - m.getFamilyCellMap().putAll(append.getFamilyCellMap()); - break; - } - if (!hadClientTimestamp) { - for (List kvs : m.getFamilyCellMap().values()) { - for (Cell kv : kvs) { - ((ExtendedCell)kv).setTimestamp(clientTimestampBuf); - } - } - } - Mutation[] mutations = new Mutation[]{m}; - region.batchMutate(mutations); - long serverTimestamp = MetaDataUtil.getClientTimeStamp(m); - // Return result with single KeyValue. The only piece of information - // the client cares about is the timestamp, which is the timestamp of - // when the mutation was actually performed (useful in the case of . - return Result.create(Collections.singletonList( - PhoenixKeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, serverTimestamp, SUCCESS_VALUE))); - } finally { - ServerUtil.releaseRowLocks(locks); - } - } catch (Throwable t) { - ClientUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t); - return null; // Impossible - } finally { - region.closeRegionOperation(); + /** + * Override the preAppend for checkAndPut and checkAndDelete, as we need the ability to a) set the + * TimeRange for the Get being done and b) return something back to the client to indicate + * success/failure + */ + @SuppressWarnings("deprecation") + @Override + public Result preAppend( + org.apache.hadoop.hbase.coprocessor.ObserverContext e, + Append append) throws IOException { + byte[] opBuf = append.getAttribute(OPERATION_ATTRIB); + if (opBuf == null) { + return null; + } + Sequence.MetaOp op = Sequence.MetaOp.values()[opBuf[0]]; + Cell keyValue = append.getFamilyCellMap().values().iterator().next().iterator().next(); + + long clientTimestamp = HConstants.LATEST_TIMESTAMP; + long minGetTimestamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP; + long maxGetTimestamp = HConstants.LATEST_TIMESTAMP; + boolean hadClientTimestamp; + byte[] clientTimestampBuf = null; + if (op == Sequence.MetaOp.RETURN_SEQUENCE) { + // When returning sequences, this allows us to send the expected timestamp + // of the sequence to make sure we don't reset any other sequence + hadClientTimestamp = true; + clientTimestamp = minGetTimestamp = keyValue.getTimestamp(); + maxGetTimestamp = minGetTimestamp + 1; + } else { + clientTimestampBuf = append.getAttribute(MAX_TIMERANGE_ATTRIB); + if (clientTimestampBuf != null) { + clientTimestamp = maxGetTimestamp = Bytes.toLong(clientTimestampBuf); + } + hadClientTimestamp = (clientTimestamp != HConstants.LATEST_TIMESTAMP); + if (hadClientTimestamp) { + // Prevent race condition of creating two sequences at the same timestamp + // by looking for a sequence at or after the timestamp at which it'll be + // created. + if (op == Sequence.MetaOp.CREATE_SEQUENCE) { + maxGetTimestamp = clientTimestamp + 1; } + } else { + clientTimestamp = EnvironmentEdgeManager.currentTimeMillis(); + maxGetTimestamp = clientTimestamp + 1; + clientTimestampBuf = Bytes.toBytes(clientTimestamp); + } } - - /** - * Determines whether a request for incrementing the sequence was a bulk allocation and if so - * what the number of slots to allocate is. This is triggered by the NEXT VALUES FOR expression. - * For backwards compatibility with older clients, we default the value to 1 which preserves - * existing behavior when invoking NEXT VALUE FOR. - */ - private long calculateNumSlotsToAllocate(final Increment increment) { - long numToAllocate = 1; - byte[] numToAllocateBytes = increment.getAttribute(SequenceRegionObserverConstants.NUM_TO_ALLOCATE); - if (numToAllocateBytes != null) { - numToAllocate = Bytes.toLong(numToAllocateBytes); + + RegionCoprocessorEnvironment env = e.getEnvironment(); + // We need to set this to prevent region.append from being called + e.bypass(); + Region region = env.getRegion(); + byte[] row = append.getRow(); + List locks = Lists.newArrayList(); + region.startRegionOperation(); + try { + ServerUtil.acquireLock(region, row, locks); + + byte[] family = CellUtil.cloneFamily(keyValue); + byte[] qualifier = CellUtil.cloneQualifier(keyValue); + + Get get = new Get(row); + get.setTimeRange(minGetTimestamp, maxGetTimestamp); + get.addColumn(family, qualifier); + try (RegionScanner scanner = region.getScanner(new Scan(get))) { + List cells = new ArrayList<>(); + scanner.next(cells); + if (cells.isEmpty()) { + if (op == Sequence.MetaOp.DROP_SEQUENCE || op == Sequence.MetaOp.RETURN_SEQUENCE) { + return getErrorResult(row, clientTimestamp, + SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode()); + } + } else { + if (op == Sequence.MetaOp.CREATE_SEQUENCE) { + return getErrorResult(row, clientTimestamp, + SQLExceptionCode.SEQUENCE_ALREADY_EXIST.getErrorCode()); + } } - return numToAllocate; + Mutation m = null; + switch (op) { + case RETURN_SEQUENCE: + KeyValue currentValueKV = PhoenixKeyValueUtil.maybeCopyCell(cells.get(0)); + long expectedValue = PLong.INSTANCE.getCodec() + .decodeLong(append.getAttribute(CURRENT_VALUE_ATTRIB), 0, SortOrder.getDefault()); + long value = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), + currentValueKV.getValueOffset(), SortOrder.getDefault()); + // Timestamp should match exactly, or we may have the wrong sequence + if (expectedValue != value || currentValueKV.getTimestamp() != clientTimestamp) { + return Result.create(Collections.singletonList(PhoenixKeyValueUtil.newKeyValue(row, + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_BYTES, currentValueKV.getTimestamp(), + ByteUtil.EMPTY_BYTE_ARRAY))); + } + m = new Put(row, currentValueKV.getTimestamp()); + m.getFamilyCellMap().putAll(append.getFamilyCellMap()); + break; + case DROP_SEQUENCE: + m = new Delete(row, clientTimestamp); + break; + case CREATE_SEQUENCE: + m = new Put(row, clientTimestamp); + m.getFamilyCellMap().putAll(append.getFamilyCellMap()); + break; + } + if (!hadClientTimestamp) { + for (List kvs : m.getFamilyCellMap().values()) { + for (Cell kv : kvs) { + ((ExtendedCell) kv).setTimestamp(clientTimestampBuf); + } + } + } + Mutation[] mutations = new Mutation[] { m }; + region.batchMutate(mutations); + long serverTimestamp = MetaDataUtil.getClientTimeStamp(m); + // Return result with single KeyValue. The only piece of information + // the client cares about is the timestamp, which is the timestamp of + // when the mutation was actually performed (useful in the case of . + return Result.create(Collections.singletonList( + PhoenixKeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_BYTES, serverTimestamp, SUCCESS_VALUE))); + } finally { + ServerUtil.releaseRowLocks(locks); + } + } catch (Throwable t) { + ClientUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t); + return null; // Impossible + } finally { + region.closeRegionOperation(); + } + } + + /** + * Determines whether a request for incrementing the sequence was a bulk allocation and if so what + * the number of slots to allocate is. This is triggered by the NEXT VALUES FOR expression. + * For backwards compatibility with older clients, we default the value to 1 which preserves + * existing behavior when invoking NEXT VALUE FOR. + */ + private long calculateNumSlotsToAllocate(final Increment increment) { + long numToAllocate = 1; + byte[] numToAllocateBytes = + increment.getAttribute(SequenceRegionObserverConstants.NUM_TO_ALLOCATE); + if (numToAllocateBytes != null) { + numToAllocate = Bytes.toLong(numToAllocateBytes); } + return numToAllocate; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ServerCachingEndpointImpl.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ServerCachingEndpointImpl.java index 98e4b498ada..a41f151a7a3 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ServerCachingEndpointImpl.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/ServerCachingEndpointImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,56 +44,54 @@ import com.google.protobuf.Service; /** - * * Server-side implementation of {@link ServerCachingProtocol} - * - * * @since 0.1 */ -public class ServerCachingEndpointImpl extends ServerCachingService implements RegionCoprocessor - { +public class ServerCachingEndpointImpl extends ServerCachingService implements RegionCoprocessor { private RegionCoprocessorEnvironment env; - + @Override public Iterable getServices() { - return Collections.singleton(this); + return Collections.singleton(this); } @Override public void addServerCache(RpcController controller, AddServerCacheRequest request, - RpcCallback done) { - ImmutableBytesPtr tenantId = null; - if (request.hasTenantId()) { - tenantId = new ImmutableBytesPtr(request.getTenantId().toByteArray()); - } - TenantCache tenantCache = GlobalCache.getTenantCache(this.env, tenantId); - ImmutableBytesWritable cachePtr = - org.apache.phoenix.protobuf.ProtobufUtil - .toImmutableBytesWritable(request.getCachePtr()); - byte[] txState = request.hasTxState() ? request.getTxState().toByteArray() : ByteUtil.EMPTY_BYTE_ARRAY; + RpcCallback done) { + ImmutableBytesPtr tenantId = null; + if (request.hasTenantId()) { + tenantId = new ImmutableBytesPtr(request.getTenantId().toByteArray()); + } + TenantCache tenantCache = GlobalCache.getTenantCache(this.env, tenantId); + ImmutableBytesWritable cachePtr = + org.apache.phoenix.protobuf.ProtobufUtil.toImmutableBytesWritable(request.getCachePtr()); + byte[] txState = + request.hasTxState() ? request.getTxState().toByteArray() : ByteUtil.EMPTY_BYTE_ARRAY; - try { - @SuppressWarnings("unchecked") - Class serverCacheFactoryClass = - (Class) Class.forName(request.getCacheFactory().getClassName()); - ServerCacheFactory cacheFactory = serverCacheFactoryClass.newInstance(); - tenantCache.addServerCache(new ImmutableBytesPtr(request.getCacheId().toByteArray()), - cachePtr, txState, cacheFactory, request.hasHasProtoBufIndexMaintainer() && request.getHasProtoBufIndexMaintainer(), - request.getUsePersistentCache(), request.hasClientVersion() ? request.getClientVersion() : ScanUtil.UNKNOWN_CLIENT_VERSION); - } catch (Throwable e) { - ProtobufUtil.setControllerException(controller, - ClientUtil.createIOException("Error when adding cache: ", e)); - } - AddServerCacheResponse.Builder responseBuilder = AddServerCacheResponse.newBuilder(); - responseBuilder.setReturn(true); - AddServerCacheResponse result = responseBuilder.build(); - done.run(result); + try { + @SuppressWarnings("unchecked") + Class serverCacheFactoryClass = + (Class) Class.forName(request.getCacheFactory().getClassName()); + ServerCacheFactory cacheFactory = serverCacheFactoryClass.newInstance(); + tenantCache.addServerCache(new ImmutableBytesPtr(request.getCacheId().toByteArray()), + cachePtr, txState, cacheFactory, + request.hasHasProtoBufIndexMaintainer() && request.getHasProtoBufIndexMaintainer(), + request.getUsePersistentCache(), + request.hasClientVersion() ? request.getClientVersion() : ScanUtil.UNKNOWN_CLIENT_VERSION); + } catch (Throwable e) { + ProtobufUtil.setControllerException(controller, + ClientUtil.createIOException("Error when adding cache: ", e)); + } + AddServerCacheResponse.Builder responseBuilder = AddServerCacheResponse.newBuilder(); + responseBuilder.setReturn(true); + AddServerCacheResponse result = responseBuilder.build(); + done.run(result); } @Override public void removeServerCache(RpcController controller, RemoveServerCacheRequest request, - RpcCallback done) { + RpcCallback done) { ImmutableBytesPtr tenantId = null; if (request.hasTenantId()) { tenantId = new ImmutableBytesPtr(request.getTenantId().toByteArray()); diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SuffixFilter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SuffixFilter.java index 57dffbc9f7a..8e6295dbb0d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SuffixFilter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SuffixFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,48 +25,44 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * * Matches rows that end with a given byte array suffix - * - * * @since 3.0 */ public class SuffixFilter extends FilterBase { - protected byte[] suffix = null; + protected byte[] suffix = null; + + public SuffixFilter(final byte[] suffix) { + this.suffix = suffix; + } - public SuffixFilter(final byte[] suffix) { - this.suffix = suffix; - } - - @Override - public byte[] toByteArray() throws IOException { - return suffix; - } + @Override + public byte[] toByteArray() throws IOException { + return suffix; + } - @Override - public ReturnCode filterCell(Cell ignored) throws IOException { - return ReturnCode.INCLUDE; - } + @Override + public ReturnCode filterCell(Cell ignored) throws IOException { + return ReturnCode.INCLUDE; + } - @Override - public ReturnCode filterKeyValue(Cell ignored) throws IOException { - return ReturnCode.INCLUDE; - } + @Override + public ReturnCode filterKeyValue(Cell ignored) throws IOException { + return ReturnCode.INCLUDE; + } - @Override - public boolean filterRowKey(byte[] buffer, int offset, int length) { - if (buffer == null || this.suffix == null) return true; - if (length < suffix.length) return true; - // if they are equal, return false => pass row - // else return true, filter row - // if we are passed the suffix, set flag - int cmp = Bytes.compareTo(buffer, offset + (length - this.suffix.length), - this.suffix.length, this.suffix, 0, this.suffix.length); - return cmp != 0; - } + @Override + public boolean filterRowKey(byte[] buffer, int offset, int length) { + if (buffer == null || this.suffix == null) return true; + if (length < suffix.length) return true; + // if they are equal, return false => pass row + // else return true, filter row + // if we are passed the suffix, set flag + int cmp = Bytes.compareTo(buffer, offset + (length - this.suffix.length), this.suffix.length, + this.suffix, 0, this.suffix.length); + return cmp != 0; + } - - public static SuffixFilter parseFrom(final byte [] pbBytes) throws DeserializationException { - return new SuffixFilter(pbBytes); - } -} \ No newline at end of file + public static SuffixFilter parseFrom(final byte[] pbBytes) throws DeserializationException { + return new SuffixFilter(pbBytes); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SystemCatalogRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SystemCatalogRegionObserver.java index 848e7ea8511..91286391a2b 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SystemCatalogRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/SystemCatalogRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,11 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.util.ScanUtil.UNKNOWN_CLIENT_VERSION; + +import java.io.IOException; +import java.util.Optional; + import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; @@ -25,31 +30,26 @@ import org.apache.phoenix.filter.SystemCatalogViewIndexIdFilter; import org.apache.phoenix.util.ScanUtil; -import java.io.IOException; -import java.util.Optional; - -import static org.apache.phoenix.util.ScanUtil.UNKNOWN_CLIENT_VERSION; - /** * Coprocessor that checks whether the VIEW_INDEX_ID needs to retrieve. */ public class SystemCatalogRegionObserver implements RegionObserver, RegionCoprocessor { - @Override - public void preScannerOpen(ObserverContext e, Scan scan) - throws IOException { - int clientVersion = ScanUtil.getClientVersion(scan); - /* - ScanUtil.getClientVersion returns UNKNOWN_CLIENT_VERSION if the phoenix client version - isn't set. We only want to retrieve the data based on the client version, and we don't - want to change the behavior other than Phoenix env. - */ - if (clientVersion != UNKNOWN_CLIENT_VERSION) { - ScanUtil.andFilterAtBeginning(scan, new SystemCatalogViewIndexIdFilter(clientVersion)); - } + @Override + public void preScannerOpen(ObserverContext e, Scan scan) + throws IOException { + int clientVersion = ScanUtil.getClientVersion(scan); + /* + * ScanUtil.getClientVersion returns UNKNOWN_CLIENT_VERSION if the phoenix client version isn't + * set. We only want to retrieve the data based on the client version, and we don't want to + * change the behavior other than Phoenix env. + */ + if (clientVersion != UNKNOWN_CLIENT_VERSION) { + ScanUtil.andFilterAtBeginning(scan, new SystemCatalogViewIndexIdFilter(clientVersion)); } + } - @Override - public Optional getRegionObserver() { - return Optional.of(this); - } -} \ No newline at end of file + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TTLRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TTLRegionScanner.java index 2543128ece8..ee4e577fc2d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TTLRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TTLRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,11 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.isPhoenixTableTTLEnabled; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.IS_PHOENIX_TTL_SCAN_TABLE_SYSTEM; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -33,234 +38,227 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.ScanUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME; -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME; -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.IS_PHOENIX_TTL_SCAN_TABLE_SYSTEM; -import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.isPhoenixTableTTLEnabled; - /** - * TTLRegionScanner masks expired rows using the empty column cell timestamp + * TTLRegionScanner masks expired rows using the empty column cell timestamp */ public class TTLRegionScanner extends BaseRegionScanner { - private static final Logger LOG = - LoggerFactory.getLogger(TTLRegionScanner.class); - private final boolean isMaskingEnabled; - private final RegionCoprocessorEnvironment env; - private Scan scan; - private long rowCount = 0; - private long maxRowCount = Long.MAX_VALUE; - private long pageSizeMs; - long ttl; - long ttlWindowStart; - byte[] emptyCQ; - byte[] emptyCF; - private boolean initialized = false; + private static final Logger LOG = LoggerFactory.getLogger(TTLRegionScanner.class); + private final boolean isMaskingEnabled; + private final RegionCoprocessorEnvironment env; + private Scan scan; + private long rowCount = 0; + private long maxRowCount = Long.MAX_VALUE; + private long pageSizeMs; + long ttl; + long ttlWindowStart; + byte[] emptyCQ; + byte[] emptyCF; + private boolean initialized = false; - public TTLRegionScanner(final RegionCoprocessorEnvironment env, final Scan scan, - final RegionScanner s) { - super(s); - this.env = env; - this.scan = scan; - this.pageSizeMs = ScanUtil.getPageSizeMsForRegionScanner(scan); - emptyCQ = scan.getAttribute(EMPTY_COLUMN_QUALIFIER_NAME); - emptyCF = scan.getAttribute(EMPTY_COLUMN_FAMILY_NAME); - long currentTime = scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP ? - EnvironmentEdgeManager.currentTimeMillis() : scan.getTimeRange().getMax(); - byte[] isSystemTable = scan.getAttribute(IS_PHOENIX_TTL_SCAN_TABLE_SYSTEM); - if (isPhoenixTableTTLEnabled(env.getConfiguration()) && (isSystemTable == null - || !Bytes.toBoolean(isSystemTable))) { - ttl = ScanUtil.getTTL(this.scan); - } else { - ttl = env.getRegion().getTableDescriptor().getColumnFamilies()[0].getTimeToLive(); - } - // Regardless if the Phoenix Table TTL feature is disabled cluster wide or the client is - // an older client and does not supply the empty column parameters, the masking should not - // be done here. We also disable masking when TTL is HConstants.FOREVER. - isMaskingEnabled = emptyCF != null && emptyCQ != null && ttl != HConstants.FOREVER - && (isPhoenixTableTTLEnabled(env.getConfiguration()) && (isSystemTable == null - || !Bytes.toBoolean(isSystemTable))); - - ttlWindowStart = ttl == HConstants.FOREVER ? 1 : currentTime - ttl * 1000; - ttl *= 1000; + public TTLRegionScanner(final RegionCoprocessorEnvironment env, final Scan scan, + final RegionScanner s) { + super(s); + this.env = env; + this.scan = scan; + this.pageSizeMs = ScanUtil.getPageSizeMsForRegionScanner(scan); + emptyCQ = scan.getAttribute(EMPTY_COLUMN_QUALIFIER_NAME); + emptyCF = scan.getAttribute(EMPTY_COLUMN_FAMILY_NAME); + long currentTime = scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP + ? EnvironmentEdgeManager.currentTimeMillis() + : scan.getTimeRange().getMax(); + byte[] isSystemTable = scan.getAttribute(IS_PHOENIX_TTL_SCAN_TABLE_SYSTEM); + if ( + isPhoenixTableTTLEnabled(env.getConfiguration()) + && (isSystemTable == null || !Bytes.toBoolean(isSystemTable)) + ) { + ttl = ScanUtil.getTTL(this.scan); + } else { + ttl = env.getRegion().getTableDescriptor().getColumnFamilies()[0].getTimeToLive(); } + // Regardless if the Phoenix Table TTL feature is disabled cluster wide or the client is + // an older client and does not supply the empty column parameters, the masking should not + // be done here. We also disable masking when TTL is HConstants.FOREVER. + isMaskingEnabled = emptyCF != null && emptyCQ != null && ttl != HConstants.FOREVER + && (isPhoenixTableTTLEnabled(env.getConfiguration()) + && (isSystemTable == null || !Bytes.toBoolean(isSystemTable))); - private void init() throws IOException { - // HBase PageFilter will also count the expired rows. - // Instead of using PageFilter for counting, we will count returned row here. - PageFilter pageFilter = ScanUtil.removePageFilter(scan); - if (pageFilter != null) { - maxRowCount = pageFilter.getPageSize(); - delegate.close(); - delegate = ((DelegateRegionScanner)delegate).getNewRegionScanner(scan); - } - } + ttlWindowStart = ttl == HConstants.FOREVER ? 1 : currentTime - ttl * 1000; + ttl *= 1000; + } - private boolean isExpired(List result) throws IOException { - long maxTimestamp = 0; - long minTimestamp = Long.MAX_VALUE; - long ts; - boolean found = false; - for (Cell c : result) { - ts = c.getTimestamp(); - if (!found && ScanUtil.isEmptyColumn(c, emptyCF, emptyCQ)) { - if (ts < ttlWindowStart) { - return true; - } - found = true; - } - if (maxTimestamp < ts) { - maxTimestamp = ts; - } - if (minTimestamp > ts) { - minTimestamp = ts; - } - } - if (!found) { - LOG.warn("No empty column cell " + env.getRegion().getRegionInfo().getTable()); - } - if (maxTimestamp - minTimestamp <= ttl) { - return false; - } - // We need check if the gap between two consecutive cell timestamps is more than ttl - // and if so trim the cells beyond the gap - Scan singleRowScan = new Scan(); - singleRowScan.setRaw(true); - singleRowScan.readAllVersions(); - singleRowScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); - byte[] rowKey = CellUtil.cloneRow(result.get(0)); - singleRowScan.withStartRow(rowKey, true); - singleRowScan.withStopRow(rowKey, true); - RegionScanner scanner = ((DelegateRegionScanner)delegate).getNewRegionScanner(singleRowScan); - List row = new ArrayList<>(); - scanner.next(row); - scanner.close(); - if (row.isEmpty()) { - return true; - } - int size = row.size(); - long tsArray[] = new long[size]; - int i = 0; - for (Cell cell : row) { - tsArray[i++] = cell.getTimestamp(); - } - Arrays.sort(tsArray); - for (i = size - 1; i > 0; i--) { - if (tsArray[i] - tsArray[i - 1] > ttl) { - minTimestamp = tsArray[i]; - break; - } - } - Iterator iterator = result.iterator(); - while(iterator.hasNext()) { - if (iterator.next().getTimestamp() < minTimestamp) { - iterator.remove(); - } - } - return false; + private void init() throws IOException { + // HBase PageFilter will also count the expired rows. + // Instead of using PageFilter for counting, we will count returned row here. + PageFilter pageFilter = ScanUtil.removePageFilter(scan); + if (pageFilter != null) { + maxRowCount = pageFilter.getPageSize(); + delegate.close(); + delegate = ((DelegateRegionScanner) delegate).getNewRegionScanner(scan); } + } - private boolean skipExpired(List result, boolean raw, boolean hasMore) throws IOException { - boolean expired = isExpired(result); - if (!expired) { - return hasMore; - } - result.clear(); - if (!hasMore) { - return false; + private boolean isExpired(List result) throws IOException { + long maxTimestamp = 0; + long minTimestamp = Long.MAX_VALUE; + long ts; + boolean found = false; + for (Cell c : result) { + ts = c.getTimestamp(); + if (!found && ScanUtil.isEmptyColumn(c, emptyCF, emptyCQ)) { + if (ts < ttlWindowStart) { + return true; } - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - do { - hasMore = raw ? delegate.nextRaw(result) : delegate.next(result); - if (result.isEmpty() || ScanUtil.isDummy(result)) { - return hasMore; - } - if (!isExpired(result)) { - return hasMore; - } - Cell cell = result.get(0); - result.clear(); - if (EnvironmentEdgeManager.currentTimeMillis() - startTime > pageSizeMs) { - ScanUtil.getDummyResult(CellUtil.cloneRow(cell), result); - return hasMore; - } - } while (hasMore); - return false; + found = true; + } + if (maxTimestamp < ts) { + maxTimestamp = ts; + } + if (minTimestamp > ts) { + minTimestamp = ts; + } } - - private boolean next(List result, boolean raw, ScannerContext scannerContext) - throws IOException { - boolean hasMore; - if (!isMaskingEnabled) { - if (scannerContext != null) { - hasMore = raw - ? delegate.nextRaw(result, scannerContext) - : delegate.next(result, scannerContext); - } else { - hasMore = raw ? delegate.nextRaw(result) : delegate.next(result); - } - return hasMore; - } - if (!initialized) { - init(); - initialized = true; - } - - if (scannerContext != null) { - hasMore = raw - ? delegate.nextRaw(result, scannerContext) - : delegate.next(result, scannerContext); - } else { - hasMore = raw ? delegate.nextRaw(result) : delegate.next(result); - } - - if (result.isEmpty() || ScanUtil.isDummy(result)) { - return hasMore; - } - hasMore = skipExpired(result, raw, hasMore); - if (result.isEmpty() || ScanUtil.isDummy(result)) { - return hasMore; - } - rowCount++; - if (rowCount >= maxRowCount) { - return false; - } - return hasMore; + if (!found) { + LOG.warn("No empty column cell " + env.getRegion().getRegionInfo().getTable()); + } + if (maxTimestamp - minTimestamp <= ttl) { + return false; + } + // We need check if the gap between two consecutive cell timestamps is more than ttl + // and if so trim the cells beyond the gap + Scan singleRowScan = new Scan(); + singleRowScan.setRaw(true); + singleRowScan.readAllVersions(); + singleRowScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); + byte[] rowKey = CellUtil.cloneRow(result.get(0)); + singleRowScan.withStartRow(rowKey, true); + singleRowScan.withStopRow(rowKey, true); + RegionScanner scanner = ((DelegateRegionScanner) delegate).getNewRegionScanner(singleRowScan); + List row = new ArrayList<>(); + scanner.next(row); + scanner.close(); + if (row.isEmpty()) { + return true; + } + int size = row.size(); + long tsArray[] = new long[size]; + int i = 0; + for (Cell cell : row) { + tsArray[i++] = cell.getTimestamp(); + } + Arrays.sort(tsArray); + for (i = size - 1; i > 0; i--) { + if (tsArray[i] - tsArray[i - 1] > ttl) { + minTimestamp = tsArray[i]; + break; + } + } + Iterator iterator = result.iterator(); + while (iterator.hasNext()) { + if (iterator.next().getTimestamp() < minTimestamp) { + iterator.remove(); + } } + return false; + } - @Override - public boolean next(List results) throws IOException { - return next(results, false, null); + private boolean skipExpired(List result, boolean raw, boolean hasMore) throws IOException { + boolean expired = isExpired(result); + if (!expired) { + return hasMore; } + result.clear(); + if (!hasMore) { + return false; + } + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + do { + hasMore = raw ? delegate.nextRaw(result) : delegate.next(result); + if (result.isEmpty() || ScanUtil.isDummy(result)) { + return hasMore; + } + if (!isExpired(result)) { + return hasMore; + } + Cell cell = result.get(0); + result.clear(); + if (EnvironmentEdgeManager.currentTimeMillis() - startTime > pageSizeMs) { + ScanUtil.getDummyResult(CellUtil.cloneRow(cell), result); + return hasMore; + } + } while (hasMore); + return false; + } - @Override - public boolean nextRaw(List results) throws IOException { - return next(results, true, null); + private boolean next(List result, boolean raw, ScannerContext scannerContext) + throws IOException { + boolean hasMore; + if (!isMaskingEnabled) { + if (scannerContext != null) { + hasMore = + raw ? delegate.nextRaw(result, scannerContext) : delegate.next(result, scannerContext); + } else { + hasMore = raw ? delegate.nextRaw(result) : delegate.next(result); + } + return hasMore; + } + if (!initialized) { + init(); + initialized = true; } - @Override - public boolean next(List results, ScannerContext scannerContext) throws IOException { - return next(results, false, scannerContext); + if (scannerContext != null) { + hasMore = + raw ? delegate.nextRaw(result, scannerContext) : delegate.next(result, scannerContext); + } else { + hasMore = raw ? delegate.nextRaw(result) : delegate.next(result); } - @Override - public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { - return next(results, true, scannerContext); + if (result.isEmpty() || ScanUtil.isDummy(result)) { + return hasMore; + } + hasMore = skipExpired(result, raw, hasMore); + if (result.isEmpty() || ScanUtil.isDummy(result)) { + return hasMore; } + rowCount++; + if (rowCount >= maxRowCount) { + return false; + } + return hasMore; + } - @Override - public RegionScanner getNewRegionScanner(Scan scan) throws IOException { - try { - return new TTLRegionScanner(env, scan, - ((DelegateRegionScanner)delegate).getNewRegionScanner(scan)); - } catch (ClassCastException e) { - throw new DoNotRetryIOException(e); - } + @Override + public boolean next(List results) throws IOException { + return next(results, false, null); + } + + @Override + public boolean nextRaw(List results) throws IOException { + return next(results, true, null); + } + + @Override + public boolean next(List results, ScannerContext scannerContext) throws IOException { + return next(results, false, scannerContext); + } + + @Override + public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { + return next(results, true, scannerContext); + } + + @Override + public RegionScanner getNewRegionScanner(Scan scan) throws IOException { + try { + return new TTLRegionScanner(env, scan, + ((DelegateRegionScanner) delegate).getNewRegionScanner(scan)); + } catch (ClassCastException e) { + throw new DoNotRetryIOException(e); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpoint.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpoint.java index bd6d0bd1c84..756946560bf 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpoint.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpoint.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessor; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; +import static org.apache.phoenix.coprocessor.MetaDataEndpointImpl.mutateRowsWithLocks; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Mutation; @@ -29,10 +31,8 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.phoenix.coprocessor.generated.MetaDataProtos; import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse; -import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos - .TaskMetaDataService; -import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos - .TaskMutateRequest; +import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos.TaskMetaDataService; +import org.apache.phoenix.coprocessor.generated.TaskMetaDataProtos.TaskMutateRequest; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.protobuf.ProtobufUtil; import org.apache.phoenix.query.QueryServices; @@ -43,78 +43,63 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -import static org.apache.phoenix.coprocessor.MetaDataEndpointImpl - .mutateRowsWithLocks; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; /** - * Phoenix metadata mutations for SYSTEM.TASK flows through this co-processor - * Endpoint. + * Phoenix metadata mutations for SYSTEM.TASK flows through this co-processor Endpoint. */ -public class TaskMetaDataEndpoint extends TaskMetaDataService - implements RegionCoprocessor { +public class TaskMetaDataEndpoint extends TaskMetaDataService implements RegionCoprocessor { - private static final Logger LOGGER = - LoggerFactory.getLogger(TaskMetaDataEndpoint.class); + private static final Logger LOGGER = LoggerFactory.getLogger(TaskMetaDataEndpoint.class); - private RegionCoprocessorEnvironment env; - private PhoenixMetaDataCoprocessorHost phoenixAccessCoprocessorHost; - private boolean accessCheckEnabled; + private RegionCoprocessorEnvironment env; + private PhoenixMetaDataCoprocessorHost phoenixAccessCoprocessorHost; + private boolean accessCheckEnabled; - @Override - public void start(CoprocessorEnvironment env) throws IOException { - if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment) env; - } else { - throw new CoprocessorException("Must be loaded on a table region!"); - } - this.phoenixAccessCoprocessorHost = - new PhoenixMetaDataCoprocessorHost(this.env); - this.accessCheckEnabled = env.getConfiguration().getBoolean( - QueryServices.PHOENIX_ACLS_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (env instanceof RegionCoprocessorEnvironment) { + this.env = (RegionCoprocessorEnvironment) env; + } else { + throw new CoprocessorException("Must be loaded on a table region!"); } + this.phoenixAccessCoprocessorHost = new PhoenixMetaDataCoprocessorHost(this.env); + this.accessCheckEnabled = env.getConfiguration().getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); + } - @Override - public Iterable getServices() { - return Collections.singleton(this); - } + @Override + public Iterable getServices() { + return Collections.singleton(this); + } - @Override - public void upsertTaskDetails(RpcController controller, - TaskMutateRequest request, RpcCallback done) { - MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); - try { - List taskMutations = ProtobufUtil.getMutations(request); - if (taskMutations.isEmpty()) { - done.run(builder.build()); - return; - } - byte[][] rowKeyMetaData = new byte[3][]; - MetaDataUtil.getTenantIdAndSchemaAndTableName(taskMutations, - rowKeyMetaData); - byte[] schemaName = - rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; - byte[] tableName = - rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; - String fullTableName = SchemaUtil.getTableName(schemaName, - tableName); + @Override + public void upsertTaskDetails(RpcController controller, TaskMutateRequest request, + RpcCallback done) { + MetaDataResponse.Builder builder = MetaDataResponse.newBuilder(); + try { + List taskMutations = ProtobufUtil.getMutations(request); + if (taskMutations.isEmpty()) { + done.run(builder.build()); + return; + } + byte[][] rowKeyMetaData = new byte[3][]; + MetaDataUtil.getTenantIdAndSchemaAndTableName(taskMutations, rowKeyMetaData); + byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]; + byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]; + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - phoenixAccessCoprocessorHost.preUpsertTaskDetails(fullTableName); + phoenixAccessCoprocessorHost.preUpsertTaskDetails(fullTableName); - mutateRowsWithLocks(this.accessCheckEnabled, this.env.getRegion(), - taskMutations, Collections.emptySet(), HConstants.NO_NONCE, - HConstants.NO_NONCE); - } catch (Throwable t) { - LOGGER.error("Unable to write mutations to {}", - PhoenixDatabaseMetaData.SYSTEM_TASK_NAME, t); - builder.setReturnCode( - MetaDataProtos.MutationCode.UNABLE_TO_UPSERT_TASK); - builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); - done.run(builder.build()); - } + mutateRowsWithLocks(this.accessCheckEnabled, this.env.getRegion(), taskMutations, + Collections.emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE); + } catch (Throwable t) { + LOGGER.error("Unable to write mutations to {}", PhoenixDatabaseMetaData.SYSTEM_TASK_NAME, t); + builder.setReturnCode(MetaDataProtos.MutationCode.UNABLE_TO_UPSERT_TASK); + builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis()); + done.run(builder.build()); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java index 207286523f4..03e47ab04a6 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TaskRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +21,6 @@ import java.lang.reflect.Method; import java.sql.SQLException; import java.sql.Timestamp; - import java.util.List; import java.util.Map; import java.util.Optional; @@ -31,12 +30,6 @@ import javax.annotation.concurrent.GuardedBy; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.node.ObjectNode; -import org.apache.phoenix.schema.task.ServerTask; -import org.apache.phoenix.schema.task.SystemTaskParams; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -48,242 +41,237 @@ import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.TaskType; +import org.apache.phoenix.schema.task.ServerTask; +import org.apache.phoenix.schema.task.SystemTaskParams; import org.apache.phoenix.schema.task.Task; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.JacksonUtil; import org.apache.phoenix.util.QueryUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ObjectNode; + /** - * Coprocessor for task related operations. This coprocessor would only be registered - * to SYSTEM.TASK table + * Coprocessor for task related operations. This coprocessor would only be registered to SYSTEM.TASK + * table */ public class TaskRegionObserver implements RegionObserver, RegionCoprocessor { - public static final Logger LOGGER = LoggerFactory.getLogger(TaskRegionObserver.class); - public static final String TASK_DETAILS = "TaskDetails"; - - protected ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(TaskType.values().length); - private long timeInterval = QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS; - private long timeMaxInterval = QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS; - @GuardedBy("TaskRegionObserver.class") - private long initialDelay = QueryServicesOptions.DEFAULT_TASK_HANDLING_INITIAL_DELAY_MS; - - private static Map classMap = ImmutableMap.builder() - .put(TaskType.DROP_CHILD_VIEWS, "org.apache.phoenix.coprocessor.tasks.DropChildViewsTask") - .put(TaskType.INDEX_REBUILD, "org.apache.phoenix.coprocessor.tasks.IndexRebuildTask") - .put(TaskType.TRANSFORM_MONITOR, "org.apache.phoenix.coprocessor.tasks.TransformMonitorTask") - .build(); - - public enum TaskResultCode { - SUCCESS, - FAIL, - SKIPPED, + public static final Logger LOGGER = LoggerFactory.getLogger(TaskRegionObserver.class); + public static final String TASK_DETAILS = "TaskDetails"; + + protected ScheduledThreadPoolExecutor executor = + new ScheduledThreadPoolExecutor(TaskType.values().length); + private long timeInterval = QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS; + private long timeMaxInterval = QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS; + @GuardedBy("TaskRegionObserver.class") + private long initialDelay = QueryServicesOptions.DEFAULT_TASK_HANDLING_INITIAL_DELAY_MS; + + private static Map classMap = ImmutableMap. builder() + .put(TaskType.DROP_CHILD_VIEWS, "org.apache.phoenix.coprocessor.tasks.DropChildViewsTask") + .put(TaskType.INDEX_REBUILD, "org.apache.phoenix.coprocessor.tasks.IndexRebuildTask") + .put(TaskType.TRANSFORM_MONITOR, "org.apache.phoenix.coprocessor.tasks.TransformMonitorTask") + .build(); + + public enum TaskResultCode { + SUCCESS, + FAIL, + SKIPPED, + } + + public static class TaskResult { + private TaskResultCode resultCode; + private String details; + + public TaskResult(TaskResultCode resultCode, String details) { + this.resultCode = resultCode; + this.details = details; } - public static class TaskResult { - private TaskResultCode resultCode; - private String details; - - public TaskResult(TaskResultCode resultCode, String details) { - this.resultCode = resultCode; - this.details = details; - } - - public TaskResultCode getResultCode() { - return resultCode; - } - - public String getDetails() { - return details; - } - - @Override - public String toString() { - String result = resultCode.name(); - if (!Strings.isNullOrEmpty(details)) { - result = result + " - " + details; - } - return result; - } + public TaskResultCode getResultCode() { + return resultCode; } - @Override - public void preClose(final ObserverContext c, - boolean abortRequested) { - executor.shutdownNow(); + public String getDetails() { + return details; } @Override - public Optional getRegionObserver() { - return Optional.of(this); + public String toString() { + String result = resultCode.name(); + if (!Strings.isNullOrEmpty(details)) { + result = result + " - " + details; + } + return result; } - - @Override - public void start(CoprocessorEnvironment env) throws IOException { - Configuration config = env.getConfiguration(); - timeInterval = - config.getLong( - QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB, - QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS); - timeMaxInterval = - config.getLong( - QueryServices.TASK_HANDLING_MAX_INTERVAL_MS_ATTRIB, - QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS); - initialDelay = - config.getLong( - QueryServices.TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB, - QueryServicesOptions.DEFAULT_TASK_HANDLING_INITIAL_DELAY_MS); + } + + @Override + public void preClose(final ObserverContext c, + boolean abortRequested) { + executor.shutdownNow(); + } + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void start(CoprocessorEnvironment env) throws IOException { + Configuration config = env.getConfiguration(); + timeInterval = config.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB, + QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS); + timeMaxInterval = config.getLong(QueryServices.TASK_HANDLING_MAX_INTERVAL_MS_ATTRIB, + QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS); + initialDelay = config.getLong(QueryServices.TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB, + QueryServicesOptions.DEFAULT_TASK_HANDLING_INITIAL_DELAY_MS); + } + + @Override + public void postOpen(ObserverContext e) { + final RegionCoprocessorEnvironment env = e.getEnvironment(); + + SelfHealingTask task = new SelfHealingTask(e.getEnvironment(), timeMaxInterval); + executor.scheduleWithFixedDelay(task, initialDelay, timeInterval, TimeUnit.MILLISECONDS); + } + + public static class SelfHealingTask extends TimerTask { + protected RegionCoprocessorEnvironment env; + protected long timeMaxInterval; + protected boolean accessCheckEnabled; + + public SelfHealingTask(RegionCoprocessorEnvironment env, long timeMaxInterval) { + this.env = env; + this.accessCheckEnabled = env.getConfiguration().getBoolean( + QueryServices.PHOENIX_ACLS_ENABLED, QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); + this.timeMaxInterval = timeMaxInterval; } @Override - public void postOpen(ObserverContext e) { - final RegionCoprocessorEnvironment env = e.getEnvironment(); + public void run() { + PhoenixConnection connForTask = null; + try { + connForTask = + QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class); + String[] excludeStates = new String[] { PTable.TaskStatus.FAILED.toString(), + PTable.TaskStatus.COMPLETED.toString() }; + List taskRecords = Task.queryTaskTable(connForTask, excludeStates); + for (Task.TaskRecord taskRecord : taskRecords) { + try { + TaskType taskType = taskRecord.getTaskType(); + if (!classMap.containsKey(taskType)) { + LOGGER.warn("Don't know how to execute task type: " + taskType.name()); + continue; + } - SelfHealingTask task = new SelfHealingTask(e.getEnvironment(), timeMaxInterval); - executor.scheduleWithFixedDelay(task, initialDelay, timeInterval, TimeUnit.MILLISECONDS); - } + String className = classMap.get(taskType); + + Class concreteClass = Class.forName(className); + + Object obj = concreteClass.newInstance(); + Method runMethod = concreteClass.getDeclaredMethod("run", Task.TaskRecord.class); + Method checkCurretResult = + concreteClass.getDeclaredMethod("checkCurrentResult", Task.TaskRecord.class); + Method initMethod = concreteClass.getSuperclass().getDeclaredMethod("init", + RegionCoprocessorEnvironment.class, Long.class); + initMethod.invoke(obj, env, timeMaxInterval); + + // if current status is already Started, check if we need to re-run. + // Task can be async and already Started before. + TaskResult result = null; + if ( + taskRecord.getStatus() != null + && taskRecord.getStatus().equals(PTable.TaskStatus.STARTED.toString()) + ) { + result = (TaskResult) checkCurretResult.invoke(obj, taskRecord); + } - public static class SelfHealingTask extends TimerTask { - protected RegionCoprocessorEnvironment env; - protected long timeMaxInterval; - protected boolean accessCheckEnabled; + if ( + result == null || taskRecord.getStatus().equals(PTable.TaskStatus.RETRY.toString()) + ) { + // reread task record. There might be async setting of task status + taskRecord = Task + .queryTaskTable(connForTask, taskRecord.getTimeStamp(), taskRecord.getSchemaName(), + taskRecord.getTableName(), taskType, taskRecord.getTenantId(), null) + .get(0); + if ( + taskRecord.getStatus() != null + && !taskRecord.getStatus().equals(PTable.TaskStatus.CREATED.toString()) + && !taskRecord.getStatus().equals(PTable.TaskStatus.RETRY.toString()) + ) { + continue; + } + + // Change task status to STARTED + ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder().setConn(connForTask) + .setTaskType(taskRecord.getTaskType()).setTenantId(taskRecord.getTenantId()) + .setSchemaName(taskRecord.getSchemaName()).setTableName(taskRecord.getTableName()) + .setTaskStatus(PTable.TaskStatus.STARTED.toString()).setData(taskRecord.getData()) + .setPriority(taskRecord.getPriority()).setStartTs(taskRecord.getTimeStamp()) + .setEndTs(null).setAccessCheckEnabled(true).build()); + + // invokes the method at runtime + result = (TaskResult) runMethod.invoke(obj, taskRecord); + } - public SelfHealingTask(RegionCoprocessorEnvironment env, long timeMaxInterval) { - this.env = env; - this.accessCheckEnabled = env.getConfiguration().getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); - this.timeMaxInterval = timeMaxInterval; - } + if (result != null) { + String taskStatus = PTable.TaskStatus.FAILED.toString(); + if (result.getResultCode() == TaskResultCode.SUCCESS) { + taskStatus = PTable.TaskStatus.COMPLETED.toString(); + } else if (result.getResultCode() == TaskResultCode.SKIPPED) { + // We will pickup this task again + continue; + } - @Override - public void run() { - PhoenixConnection connForTask = null; - try { - connForTask = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class); - String[] excludeStates = new String[] { PTable.TaskStatus.FAILED.toString(), - PTable.TaskStatus.COMPLETED.toString() }; - List taskRecords = Task.queryTaskTable(connForTask, excludeStates); - for (Task.TaskRecord taskRecord : taskRecords){ - try { - TaskType taskType = taskRecord.getTaskType(); - if (!classMap.containsKey(taskType)) { - LOGGER.warn("Don't know how to execute task type: " + taskType.name()); - continue; - } - - String className = classMap.get(taskType); - - Class concreteClass = Class.forName(className); - - Object obj = concreteClass.newInstance(); - Method runMethod = concreteClass.getDeclaredMethod("run", - Task.TaskRecord.class); - Method checkCurretResult = concreteClass.getDeclaredMethod("checkCurrentResult", Task.TaskRecord.class); - Method initMethod = concreteClass.getSuperclass().getDeclaredMethod("init", - RegionCoprocessorEnvironment.class, Long.class); - initMethod.invoke(obj, env, timeMaxInterval); - - // if current status is already Started, check if we need to re-run. - // Task can be async and already Started before. - TaskResult result = null; - if (taskRecord.getStatus() != null - && taskRecord.getStatus().equals(PTable.TaskStatus.STARTED.toString())) { - result = (TaskResult) checkCurretResult.invoke(obj, taskRecord); - } - - if (result == null || taskRecord.getStatus().equals(PTable.TaskStatus.RETRY.toString())) { - // reread task record. There might be async setting of task status - taskRecord = - Task.queryTaskTable(connForTask, taskRecord.getTimeStamp(), - taskRecord.getSchemaName(), taskRecord.getTableName(), - taskType, taskRecord.getTenantId(), null).get(0); - if (taskRecord.getStatus() != null - && !taskRecord.getStatus().equals(PTable.TaskStatus.CREATED.toString()) - && !taskRecord.getStatus().equals(PTable.TaskStatus.RETRY.toString())) { - continue; - } - - // Change task status to STARTED - ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder() - .setConn(connForTask) - .setTaskType(taskRecord.getTaskType()) - .setTenantId(taskRecord.getTenantId()) - .setSchemaName(taskRecord.getSchemaName()) - .setTableName(taskRecord.getTableName()) - .setTaskStatus(PTable.TaskStatus.STARTED.toString()) - .setData(taskRecord.getData()) - .setPriority(taskRecord.getPriority()) - .setStartTs(taskRecord.getTimeStamp()) - .setEndTs(null) - .setAccessCheckEnabled(true) - .build()); - - // invokes the method at runtime - result = (TaskResult) runMethod.invoke(obj, taskRecord); - } - - if (result != null) { - String taskStatus = PTable.TaskStatus.FAILED.toString(); - if (result.getResultCode() == TaskResultCode.SUCCESS) { - taskStatus = PTable.TaskStatus.COMPLETED.toString(); - } else if (result.getResultCode() == TaskResultCode.SKIPPED) { - // We will pickup this task again - continue; - } - - setEndTaskStatus(connForTask, taskRecord, taskStatus); - } - - } - catch (Throwable t) { - LOGGER.warn("Exception while running self healingtask. " + - "It will be retried in the next system task table scan : " + - " taskType : " + taskRecord.getTaskType().name() + - taskRecord.getSchemaName() + "." + taskRecord.getTableName() + - " with tenant id " + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) + - " and timestamp " + taskRecord.getTimeStamp().toString(), t); - } - } - } catch (Throwable t) { - LOGGER.error("SelfHealingTask failed!", t); - } finally { - if (connForTask != null) { - try { - connForTask.close(); - } catch (SQLException ignored) { - LOGGER.debug("SelfHealingTask can't close connection", ignored); - } - } + setEndTaskStatus(connForTask, taskRecord, taskStatus); } - } - public static void setEndTaskStatus(PhoenixConnection connForTask, Task.TaskRecord taskRecord, String taskStatus) - throws IOException, SQLException { - // update data with details. - String data = taskRecord.getData(); - if (Strings.isNullOrEmpty(data)) { - data = "{}"; - } - JsonNode jsonNode = JacksonUtil.getObjectReader().readTree(data); - ((ObjectNode) jsonNode).put(TASK_DETAILS, taskStatus); - data = jsonNode.toString(); - - Timestamp endTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); - ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder() - .setConn(connForTask) - .setTaskType(taskRecord.getTaskType()) - .setTenantId(taskRecord.getTenantId()) - .setSchemaName(taskRecord.getSchemaName()) - .setTableName(taskRecord.getTableName()) - .setTaskStatus(taskStatus) - .setData(data) - .setPriority(taskRecord.getPriority()) - .setStartTs(taskRecord.getTimeStamp()) - .setEndTs(endTs) - .setAccessCheckEnabled(false) - .build()); + } catch (Throwable t) { + LOGGER.warn("Exception while running self healingtask. " + + "It will be retried in the next system task table scan : " + " taskType : " + + taskRecord.getTaskType().name() + taskRecord.getSchemaName() + "." + + taskRecord.getTableName() + " with tenant id " + + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) + + " and timestamp " + taskRecord.getTimeStamp().toString(), t); + } + } + } catch (Throwable t) { + LOGGER.error("SelfHealingTask failed!", t); + } finally { + if (connForTask != null) { + try { + connForTask.close(); + } catch (SQLException ignored) { + LOGGER.debug("SelfHealingTask can't close connection", ignored); + } } + } + } + + public static void setEndTaskStatus(PhoenixConnection connForTask, Task.TaskRecord taskRecord, + String taskStatus) throws IOException, SQLException { + // update data with details. + String data = taskRecord.getData(); + if (Strings.isNullOrEmpty(data)) { + data = "{}"; + } + JsonNode jsonNode = JacksonUtil.getObjectReader().readTree(data); + ((ObjectNode) jsonNode).put(TASK_DETAILS, taskStatus); + data = jsonNode.toString(); + + Timestamp endTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis()); + ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder().setConn(connForTask) + .setTaskType(taskRecord.getTaskType()).setTenantId(taskRecord.getTenantId()) + .setSchemaName(taskRecord.getSchemaName()).setTableName(taskRecord.getTableName()) + .setTaskStatus(taskStatus).setData(data).setPriority(taskRecord.getPriority()) + .setStartTs(taskRecord.getTimeStamp()).setEndTs(endTs).setAccessCheckEnabled(false) + .build()); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TephraTransactionalProcessor.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TephraTransactionalProcessor.java index ad27c612419..86fbccfaa55 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TephraTransactionalProcessor.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/TephraTransactionalProcessor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,17 +23,18 @@ import org.slf4j.LoggerFactory; /** - * Tephra support has been removed, see PHOENIX-6627. However we preserve a class - * of this name for now with a no-op implementation, in case the user has not - * followed proper upgrade or migration procedure for former Tephra managed transactional - * tables. Although expected but unavailable functionality will be missing, regionservers - * will not crash due to a failure to load a coprocessor of this name. + * Tephra support has been removed, see PHOENIX-6627. However we preserve a class of this name for + * now with a no-op implementation, in case the user has not followed proper upgrade or migration + * procedure for former Tephra managed transactional tables. Although expected but unavailable + * functionality will be missing, regionservers will not crash due to a failure to load a + * coprocessor of this name. */ public class TephraTransactionalProcessor implements RegionObserver, RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(TephraTransactionalProcessor.class); static { - LOG.error("Tephra support has been removed, see https://issues.apache.org/jira/browse/PHOENIX-6627."); + LOG.error( + "Tephra support has been removed, see https://issues.apache.org/jira/browse/PHOENIX-6627."); } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredGlobalIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredGlobalIndexRegionScanner.java index ba56124424f..d4c76673dd4 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredGlobalIndexRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredGlobalIndexRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,6 +33,11 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.RegionScanner; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.execute.TupleProjector; import org.apache.phoenix.hbase.index.parallel.EarlyExitFailure; @@ -43,14 +48,9 @@ import org.apache.phoenix.hbase.index.parallel.ThreadPoolManager; import org.apache.phoenix.hbase.index.parallel.WaitForCompletionTaskRunner; import org.apache.phoenix.hbase.index.table.HTableFactory; +import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.hbase.index.write.IndexWriterUtils; import org.apache.phoenix.index.IndexMaintainer; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.regionserver.RegionScanner; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.ScanUtil; @@ -59,176 +59,167 @@ import org.slf4j.LoggerFactory; /** - * This is an index table region scanner which scans index table rows locally and then extracts - * data table row keys from them. Using the data table row keys, the data table rows are scanned - * using the HBase client available to region servers. + * This is an index table region scanner which scans index table rows locally and then extracts data + * table row keys from them. Using the data table row keys, the data table rows are scanned using + * the HBase client available to region servers. */ public class UncoveredGlobalIndexRegionScanner extends UncoveredIndexRegionScanner { - private static final Logger LOGGER = - LoggerFactory.getLogger(UncoveredGlobalIndexRegionScanner.class); - public static final String NUM_CONCURRENT_INDEX_THREADS_CONF_KEY = "phoenix.index.threads.max"; - public static final int DEFAULT_CONCURRENT_INDEX_THREADS = 16; - public static final String INDEX_ROW_COUNTS_PER_TASK_CONF_KEY = "phoenix.index.row.count.per.task"; - public static final int DEFAULT_INDEX_ROW_COUNTS_PER_TASK = 2048; + private static final Logger LOGGER = + LoggerFactory.getLogger(UncoveredGlobalIndexRegionScanner.class); + public static final String NUM_CONCURRENT_INDEX_THREADS_CONF_KEY = "phoenix.index.threads.max"; + public static final int DEFAULT_CONCURRENT_INDEX_THREADS = 16; + public static final String INDEX_ROW_COUNTS_PER_TASK_CONF_KEY = + "phoenix.index.row.count.per.task"; + public static final int DEFAULT_INDEX_ROW_COUNTS_PER_TASK = 2048; - protected byte[][] regionEndKeys; - protected final Table dataHTable; - protected final int rowCountPerTask; - protected final TaskRunner pool; - protected String exceptionMessage; - protected final HTableFactory hTableFactory; + protected byte[][] regionEndKeys; + protected final Table dataHTable; + protected final int rowCountPerTask; + protected final TaskRunner pool; + protected String exceptionMessage; + protected final HTableFactory hTableFactory; - // This relies on Hadoop Configuration to handle warning about deprecated configs and - // to set the correct non-deprecated configs when an old one shows up. - static { - Configuration.addDeprecation("index.threads.max", NUM_CONCURRENT_INDEX_THREADS_CONF_KEY); - Configuration.addDeprecation("index.row.count.per.task", INDEX_ROW_COUNTS_PER_TASK_CONF_KEY); - } + // This relies on Hadoop Configuration to handle warning about deprecated configs and + // to set the correct non-deprecated configs when an old one shows up. + static { + Configuration.addDeprecation("index.threads.max", NUM_CONCURRENT_INDEX_THREADS_CONF_KEY); + Configuration.addDeprecation("index.row.count.per.task", INDEX_ROW_COUNTS_PER_TASK_CONF_KEY); + } - public UncoveredGlobalIndexRegionScanner(final RegionScanner innerScanner, - final Region region, - final Scan scan, - final RegionCoprocessorEnvironment env, - final Scan dataTableScan, - final TupleProjector tupleProjector, - final IndexMaintainer indexMaintainer, - final byte[][] viewConstants, - final ImmutableBytesWritable ptr, - final long pageSizeMs, - final long queryLimit) - throws IOException { - super(innerScanner, region, scan, env, dataTableScan, tupleProjector, indexMaintainer, - viewConstants, ptr, pageSizeMs, queryLimit); - final Configuration config = env.getConfiguration(); - hTableFactory = IndexWriterUtils.getDefaultDelegateHTableFactory(env); - rowCountPerTask = config.getInt(INDEX_ROW_COUNTS_PER_TASK_CONF_KEY, - DEFAULT_INDEX_ROW_COUNTS_PER_TASK); + public UncoveredGlobalIndexRegionScanner(final RegionScanner innerScanner, final Region region, + final Scan scan, final RegionCoprocessorEnvironment env, final Scan dataTableScan, + final TupleProjector tupleProjector, final IndexMaintainer indexMaintainer, + final byte[][] viewConstants, final ImmutableBytesWritable ptr, final long pageSizeMs, + final long queryLimit) throws IOException { + super(innerScanner, region, scan, env, dataTableScan, tupleProjector, indexMaintainer, + viewConstants, ptr, pageSizeMs, queryLimit); + final Configuration config = env.getConfiguration(); + hTableFactory = IndexWriterUtils.getDefaultDelegateHTableFactory(env); + rowCountPerTask = + config.getInt(INDEX_ROW_COUNTS_PER_TASK_CONF_KEY, DEFAULT_INDEX_ROW_COUNTS_PER_TASK); - pool = new WaitForCompletionTaskRunner(ThreadPoolManager.getExecutor( - new ThreadPoolBuilder("Uncovered Global Index", - env.getConfiguration()).setMaxThread(NUM_CONCURRENT_INDEX_THREADS_CONF_KEY, - DEFAULT_CONCURRENT_INDEX_THREADS).setCoreTimeout( - INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env)); - byte[] dataTableName = scan.getAttribute(PHYSICAL_DATA_TABLE_NAME); - dataHTable = hTableFactory.getTable(new ImmutableBytesPtr(dataTableName)); - regionEndKeys = hTableFactory.getConnection().getRegionLocator(dataHTable.getName()).getEndKeys(); - if (indexMaintainer.isUncovered()) { - // Empty column should also be added to the data columns to join for uncovered - // global indexes. This is required to verify the index row against the data table row and repair it - ScanUtil.addEmptyColumnToScan(dataTableScan, indexMaintainer.getDataEmptyKeyValueCF(), - indexMaintainer.getEmptyKeyValueQualifierForDataTable()); - } + pool = new WaitForCompletionTaskRunner(ThreadPoolManager + .getExecutor(new ThreadPoolBuilder("Uncovered Global Index", env.getConfiguration()) + .setMaxThread(NUM_CONCURRENT_INDEX_THREADS_CONF_KEY, DEFAULT_CONCURRENT_INDEX_THREADS) + .setCoreTimeout(INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env)); + byte[] dataTableName = scan.getAttribute(PHYSICAL_DATA_TABLE_NAME); + dataHTable = hTableFactory.getTable(new ImmutableBytesPtr(dataTableName)); + regionEndKeys = + hTableFactory.getConnection().getRegionLocator(dataHTable.getName()).getEndKeys(); + if (indexMaintainer.isUncovered()) { + // Empty column should also be added to the data columns to join for uncovered + // global indexes. This is required to verify the index row against the data table row and + // repair it + ScanUtil.addEmptyColumnToScan(dataTableScan, indexMaintainer.getDataEmptyKeyValueCF(), + indexMaintainer.getEmptyKeyValueQualifierForDataTable()); } + } - @Override - public void close() throws IOException { - innerScanner.close(); - hTableFactory.shutdown(); - if (dataHTable != null) { - dataHTable.close(); - } - this.pool.stop("UncoveredGlobalIndexRegionScanner is closing"); + @Override + public void close() throws IOException { + innerScanner.close(); + hTableFactory.shutdown(); + if (dataHTable != null) { + dataHTable.close(); } + this.pool.stop("UncoveredGlobalIndexRegionScanner is closing"); + } - protected void scanDataRows(Collection dataRowKeys, long startTime) throws IOException { - Scan dataScan = prepareDataTableScan(dataRowKeys); - if (dataScan == null) { - return; + protected void scanDataRows(Collection dataRowKeys, long startTime) throws IOException { + Scan dataScan = prepareDataTableScan(dataRowKeys); + if (dataScan == null) { + return; + } + try (ResultScanner resultScanner = dataHTable.getScanner(dataScan)) { + for (Result result = resultScanner.next(); (result != null); result = resultScanner.next()) { + if (ScanUtil.isDummy(result)) { + state = State.SCANNING_DATA_INTERRUPTED; + break; } - try (ResultScanner resultScanner = dataHTable.getScanner(dataScan)) { - for (Result result = resultScanner.next(); (result != null); - result = resultScanner.next()) { - if (ScanUtil.isDummy(result)) { - state = State.SCANNING_DATA_INTERRUPTED; - break; - } - dataRows.put(new ImmutableBytesPtr(result.getRow()), result); - if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) >= pageSizeMs) { - state = State.SCANNING_DATA_INTERRUPTED; - break; - } - } - if (state == State.SCANNING_DATA_INTERRUPTED) { - LOGGER.info("One of the scan tasks in UncoveredGlobalIndexRegionScanner" - + " for region " + region.getRegionInfo().getRegionNameAsString() - + " could not complete on time (in " + pageSizeMs + " ms) and" - + " will be resubmitted"); - } - } catch (Throwable t) { - exceptionMessage = "scanDataRows fails for at least one task"; - ClientUtil.throwIOException(dataHTable.getName().toString(), t); + dataRows.put(new ImmutableBytesPtr(result.getRow()), result); + if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) >= pageSizeMs) { + state = State.SCANNING_DATA_INTERRUPTED; + break; } + } + if (state == State.SCANNING_DATA_INTERRUPTED) { + LOGGER.info("One of the scan tasks in UncoveredGlobalIndexRegionScanner" + " for region " + + region.getRegionInfo().getRegionNameAsString() + " could not complete on time (in " + + pageSizeMs + " ms) and" + " will be resubmitted"); + } + } catch (Throwable t) { + exceptionMessage = "scanDataRows fails for at least one task"; + ClientUtil.throwIOException(dataHTable.getName().toString(), t); } + } - private void addTasksForScanningDataTableRowsInParallel(TaskBatch tasks, - final Set dataRowKeys, - final long startTime) { - tasks.add(new Task() { - @Override - public Boolean call() throws Exception { - try { - //in HBase 1.x we could check if the coproc environment was closed or aborted, - //but in HBase 2.x the coproc environment can't check region server services - if (Thread.currentThread().isInterrupted()) { - exceptionMessage = "Pool closed, not retrieving data table rows for " - + region.getRegionInfo().getRegionNameAsString(); - throw new IOException(exceptionMessage); - } - scanDataRows(dataRowKeys, startTime); - } catch (Exception e) { - throw e; - } - return Boolean.TRUE; - } - }); - } - - - protected void submitTasks(TaskBatch tasks) throws IOException { - Pair, List>> resultsAndFutures = null; + private void addTasksForScanningDataTableRowsInParallel(TaskBatch tasks, + final Set dataRowKeys, final long startTime) { + tasks.add(new Task() { + @Override + public Boolean call() throws Exception { try { - LOGGER.debug("Waiting on index tasks to complete..."); - resultsAndFutures = this.pool.submitUninterruptible(tasks); - } catch (ExecutionException e) { - throw new RuntimeException( - "Should not fail on the results while using a WaitForCompletionTaskRunner", e); - } catch (EarlyExitFailure e) { - throw new RuntimeException("Stopped while waiting for batch, quitting!", e); - } - int index = 0; - for (Boolean result : resultsAndFutures.getFirst()) { - if (result == null) { - Throwable cause = ServerUtil.getExceptionFromFailedFuture( - resultsAndFutures.getSecond().get(index)); - // there was a failure - throw new IOException(exceptionMessage == null ? "" : exceptionMessage, cause); - } - index++; + // in HBase 1.x we could check if the coproc environment was closed or aborted, + // but in HBase 2.x the coproc environment can't check region server services + if (Thread.currentThread().isInterrupted()) { + exceptionMessage = "Pool closed, not retrieving data table rows for " + + region.getRegionInfo().getRegionNameAsString(); + throw new IOException(exceptionMessage); + } + scanDataRows(dataRowKeys, startTime); + } catch (Exception e) { + throw e; } + return Boolean.TRUE; + } + }); + } + + protected void submitTasks(TaskBatch tasks) throws IOException { + Pair, List>> resultsAndFutures = null; + try { + LOGGER.debug("Waiting on index tasks to complete..."); + resultsAndFutures = this.pool.submitUninterruptible(tasks); + } catch (ExecutionException e) { + throw new RuntimeException( + "Should not fail on the results while using a WaitForCompletionTaskRunner", e); + } catch (EarlyExitFailure e) { + throw new RuntimeException("Stopped while waiting for batch, quitting!", e); + } + int index = 0; + for (Boolean result : resultsAndFutures.getFirst()) { + if (result == null) { + Throwable cause = + ServerUtil.getExceptionFromFailedFuture(resultsAndFutures.getSecond().get(index)); + // there was a failure + throw new IOException(exceptionMessage == null ? "" : exceptionMessage, cause); + } + index++; } + } - @Override - protected void scanDataTableRows(long startTime) throws IOException { - if (indexToDataRowKeyMap.size() == 0) { - state = State.READY; - return; - } - TreeSet dataRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); - for (byte[] dataRowKey: indexToDataRowKeyMap.values()) { - dataRowKeys.add(dataRowKey); - } - List> setList = IndexRepairRegionScanner.getPerTaskDataRowKeys(dataRowKeys, - regionEndKeys, rowCountPerTask); - int taskCount = setList.size(); - TaskBatch tasks = new TaskBatch<>(taskCount); - for (int i = 0; i < taskCount; i++) { - addTasksForScanningDataTableRowsInParallel(tasks, setList.get(i), startTime); - } - submitTasks(tasks); - if (state == State.SCANNING_DATA_INTERRUPTED) { - state = State.SCANNING_DATA; - } else { - state = State.READY; - } + @Override + protected void scanDataTableRows(long startTime) throws IOException { + if (indexToDataRowKeyMap.size() == 0) { + state = State.READY; + return; + } + TreeSet dataRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); + for (byte[] dataRowKey : indexToDataRowKeyMap.values()) { + dataRowKeys.add(dataRowKey); + } + List> setList = + IndexRepairRegionScanner.getPerTaskDataRowKeys(dataRowKeys, regionEndKeys, rowCountPerTask); + int taskCount = setList.size(); + TaskBatch tasks = new TaskBatch<>(taskCount); + for (int i = 0; i < taskCount; i++) { + addTasksForScanningDataTableRowsInParallel(tasks, setList.get(i), startTime); + } + submitTasks(tasks); + if (state == State.SCANNING_DATA_INTERRUPTED) { + state = State.SCANNING_DATA; + } else { + state = State.READY; } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredIndexRegionScanner.java index d8cab301f6b..f83b85c1b7e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredIndexRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredIndexRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,21 @@ */ package org.apache.phoenix.coprocessor; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.INDEX_PAGE_ROWS; +import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.SERVER_PAGE_SIZE_MS; +import static org.apache.phoenix.query.QueryServices.INDEX_PAGE_SIZE_IN_ROWS; +import static org.apache.phoenix.util.ScanUtil.getDummyResult; +import static org.apache.phoenix.util.ScanUtil.isDummy; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -54,439 +69,411 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; +public abstract class UncoveredIndexRegionScanner extends BaseRegionScanner { + private static final Logger LOGGER = LoggerFactory.getLogger(UncoveredIndexRegionScanner.class); -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME; -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME; -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.INDEX_PAGE_ROWS; -import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.SERVER_PAGE_SIZE_MS; -import static org.apache.phoenix.query.QueryServices.INDEX_PAGE_SIZE_IN_ROWS; -import static org.apache.phoenix.util.ScanUtil.getDummyResult; -import static org.apache.phoenix.util.ScanUtil.isDummy; + /** + * The states of the processing a page of index rows + */ + protected enum State { + INITIAL, + SCANNING_INDEX, + SCANNING_DATA, + SCANNING_DATA_INTERRUPTED, + READY + } -public abstract class UncoveredIndexRegionScanner extends BaseRegionScanner { - private static final Logger LOGGER = - LoggerFactory.getLogger(UncoveredIndexRegionScanner.class); + protected State state = State.INITIAL; + protected final byte[][] viewConstants; + protected final RegionCoprocessorEnvironment env; + protected long pageSizeInRows; + protected final long ageThreshold; + protected byte[] emptyCF; + protected byte[] emptyCQ; + protected final Scan scan; + protected final Scan dataTableScan; + protected final RegionScanner innerScanner; + protected final Region region; + protected final IndexMaintainer indexMaintainer; + protected final TupleProjector tupleProjector; + protected final ImmutableBytesWritable ptr; + protected List> indexRows = null; + protected Map dataRows = null; + protected Iterator> indexRowIterator = null; + protected Map indexToDataRowKeyMap = null; + protected int indexRowCount = 0; + protected final long pageSizeMs; + protected byte[] lastIndexRowKey = null; + private byte[] previousResultRowKey = null; + private final byte[] initStartRowKey; + private final boolean includeInitStartRowKey; - /** - * The states of the processing a page of index rows - */ - protected enum State { - INITIAL, SCANNING_INDEX, SCANNING_DATA, SCANNING_DATA_INTERRUPTED, READY + public UncoveredIndexRegionScanner(final RegionScanner innerScanner, final Region region, + final Scan scan, final RegionCoprocessorEnvironment env, final Scan dataTableScan, + final TupleProjector tupleProjector, final IndexMaintainer indexMaintainer, + final byte[][] viewConstants, final ImmutableBytesWritable ptr, final long pageSizeMs, + final long queryLimit) { + super(innerScanner); + final Configuration config = env.getConfiguration(); + + byte[] pageSizeFromScan = scan.getAttribute(INDEX_PAGE_ROWS); + if (pageSizeFromScan != null) { + pageSizeInRows = (int) Bytes.toLong(pageSizeFromScan); + } else { + pageSizeInRows = (int) config.getLong(INDEX_PAGE_SIZE_IN_ROWS, + QueryServicesOptions.DEFAULT_INDEX_PAGE_SIZE_IN_ROWS); + } + if (queryLimit != -1) { + pageSizeInRows = Long.min(pageSizeInRows, queryLimit); } - protected State state = State.INITIAL; - protected final byte[][] viewConstants; - protected final RegionCoprocessorEnvironment env; - protected long pageSizeInRows; - protected final long ageThreshold; - protected byte[] emptyCF; - protected byte[] emptyCQ; - protected final Scan scan; - protected final Scan dataTableScan; - protected final RegionScanner innerScanner; - protected final Region region; - protected final IndexMaintainer indexMaintainer; - protected final TupleProjector tupleProjector; - protected final ImmutableBytesWritable ptr; - protected List> indexRows = null; - protected Map dataRows = null; - protected Iterator> indexRowIterator = null; - protected Map indexToDataRowKeyMap = null; - protected int indexRowCount = 0; - protected final long pageSizeMs; - protected byte[] lastIndexRowKey = null; - private byte[] previousResultRowKey = null; - private final byte[] initStartRowKey; - private final boolean includeInitStartRowKey; - public UncoveredIndexRegionScanner(final RegionScanner innerScanner, - final Region region, - final Scan scan, - final RegionCoprocessorEnvironment env, - final Scan dataTableScan, - final TupleProjector tupleProjector, - final IndexMaintainer indexMaintainer, - final byte[][] viewConstants, - final ImmutableBytesWritable ptr, - final long pageSizeMs, - final long queryLimit) { - super(innerScanner); - final Configuration config = env.getConfiguration(); + ageThreshold = env.getConfiguration().getLong( + QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, + QueryServicesOptions.DEFAULT_GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS); + emptyCF = scan.getAttribute(EMPTY_COLUMN_FAMILY_NAME); + emptyCQ = scan.getAttribute(EMPTY_COLUMN_QUALIFIER_NAME); + this.indexMaintainer = indexMaintainer; + this.viewConstants = viewConstants; + this.scan = scan; + this.dataTableScan = dataTableScan; + this.innerScanner = innerScanner; + this.region = region; + this.env = env; + this.ptr = ptr; + this.tupleProjector = tupleProjector; + this.pageSizeMs = pageSizeMs; + // If scan start rowkey is empty, use region boundaries. Reverse region boundaries + // for reverse scan. + this.initStartRowKey = ServerUtil.getScanStartRowKeyFromScanOrRegionBoundaries(scan, region); + this.includeInitStartRowKey = scan.includeStartRow(); + } - byte[] pageSizeFromScan = - scan.getAttribute(INDEX_PAGE_ROWS); - if (pageSizeFromScan != null) { - pageSizeInRows = (int) Bytes.toLong(pageSizeFromScan); - } else { - pageSizeInRows = (int) - config.getLong(INDEX_PAGE_SIZE_IN_ROWS, - QueryServicesOptions.DEFAULT_INDEX_PAGE_SIZE_IN_ROWS); - } - if (queryLimit != -1) { - pageSizeInRows = Long.min(pageSizeInRows, queryLimit); - } + @Override + public long getMvccReadPoint() { + return innerScanner.getMvccReadPoint(); + } - ageThreshold = env.getConfiguration().getLong( - QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, - QueryServicesOptions.DEFAULT_GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS); - emptyCF = scan.getAttribute(EMPTY_COLUMN_FAMILY_NAME); - emptyCQ = scan.getAttribute(EMPTY_COLUMN_QUALIFIER_NAME); - this.indexMaintainer = indexMaintainer; - this.viewConstants = viewConstants; - this.scan = scan; - this.dataTableScan = dataTableScan; - this.innerScanner = innerScanner; - this.region = region; - this.env = env; - this.ptr = ptr; - this.tupleProjector = tupleProjector; - this.pageSizeMs = pageSizeMs; - // If scan start rowkey is empty, use region boundaries. Reverse region boundaries - // for reverse scan. - this.initStartRowKey = - ServerUtil.getScanStartRowKeyFromScanOrRegionBoundaries(scan, region); - this.includeInitStartRowKey = scan.includeStartRow(); - } + @Override + public RegionInfo getRegionInfo() { + return region.getRegionInfo(); + } - @Override - public long getMvccReadPoint() { - return innerScanner.getMvccReadPoint(); - } - @Override - public RegionInfo getRegionInfo() { - return region.getRegionInfo(); - } + @Override + public boolean isFilterDone() { + return false; + } - @Override - public boolean isFilterDone() { - return false; - } + @Override + public void close() throws IOException { + innerScanner.close(); + } - @Override - public void close() throws IOException { - innerScanner.close(); - } + @Override + public long getMaxResultSize() { + return innerScanner.getMaxResultSize(); + } - @Override - public long getMaxResultSize() { - return innerScanner.getMaxResultSize(); - } + @Override + public int getBatch() { + return innerScanner.getBatch(); + } - @Override - public int getBatch() { - return innerScanner.getBatch(); - } + protected abstract void scanDataTableRows(long startTime) throws IOException; - protected abstract void scanDataTableRows(long startTime) throws IOException; + protected Scan prepareDataTableScan(Collection dataRowKeys) throws IOException { + return prepareDataTableScan(dataRowKeys, false); + } - protected Scan prepareDataTableScan(Collection dataRowKeys) throws IOException { - return prepareDataTableScan(dataRowKeys, false); + protected Scan prepareDataTableScan(Collection dataRowKeys, + boolean includeMultipleVersions) throws IOException { + List keys = new ArrayList<>(dataRowKeys.size()); + for (byte[] dataRowKey : dataRowKeys) { + // If the data table scan was interrupted because of paging we retry the scan + // but on retry we should only fetch data table rows which we haven't already + // fetched. + if (!dataRows.containsKey(new ImmutableBytesPtr(dataRowKey))) { + keys.add(PVarbinary.INSTANCE.getKeyRange(dataRowKey, SortOrder.ASC)); + } + } + if (!keys.isEmpty()) { + ScanRanges scanRanges = ScanRanges.createPointLookup(keys); + Scan dataScan = new Scan(dataTableScan); + dataScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); + scanRanges.initializeScan(dataScan); + SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); + dataScan.setFilter(new SkipScanFilter(skipScanFilter, includeMultipleVersions, true)); + dataScan.setAttribute(SERVER_PAGE_SIZE_MS, Bytes.toBytes(Long.valueOf(pageSizeMs))); + return dataScan; + } else { + LOGGER.info("All data rows have already been fetched"); + return null; } + } - protected Scan prepareDataTableScan(Collection dataRowKeys, - boolean includeMultipleVersions) throws IOException { - List keys = new ArrayList<>(dataRowKeys.size()); - for (byte[] dataRowKey : dataRowKeys) { - // If the data table scan was interrupted because of paging we retry the scan - // but on retry we should only fetch data table rows which we haven't already - // fetched. - if (!dataRows.containsKey(new ImmutableBytesPtr(dataRowKey))) { - keys.add(PVarbinary.INSTANCE.getKeyRange(dataRowKey, SortOrder.ASC)); - } + protected boolean scanIndexTableRows(List result, final long startTime, + final byte[] actualStartKey, final int offset) throws IOException { + boolean hasMore = false; + if (actualStartKey != null) { + do { + hasMore = innerScanner.nextRaw(result); + if (result.isEmpty()) { + return hasMore; + } + if (ScanUtil.isDummy(result)) { + return true; } - if (!keys.isEmpty()) { - ScanRanges scanRanges = ScanRanges.createPointLookup(keys); - Scan dataScan = new Scan(dataTableScan); - dataScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax()); - scanRanges.initializeScan(dataScan); - SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); - dataScan.setFilter(new SkipScanFilter(skipScanFilter, includeMultipleVersions, true)); - dataScan.setAttribute(SERVER_PAGE_SIZE_MS, - Bytes.toBytes(Long.valueOf(pageSizeMs))); - return dataScan; + Cell firstCell = result.get(0); + if ( + Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), + firstCell.getRowLength(), actualStartKey, 0, actualStartKey.length) < 0 + ) { + result.clear(); + if (EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { + byte[] rowKey = CellUtil.cloneRow(firstCell); + ScanUtil.getDummyResult(rowKey, result); + return true; + } } else { - LOGGER.info("All data rows have already been fetched"); - return null; + break; } + } while (hasMore); } - protected boolean scanIndexTableRows(List result, - final long startTime, - final byte[] actualStartKey, - final int offset) throws IOException { - boolean hasMore = false; - if (actualStartKey != null) { - do { - hasMore = innerScanner.nextRaw(result); - if (result.isEmpty()) { - return hasMore; - } - if (ScanUtil.isDummy(result)) { - return true; - } - Cell firstCell = result.get(0); - if (Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), - firstCell.getRowLength(), actualStartKey, 0, actualStartKey.length) < 0) { - result.clear(); - if (EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { - byte[] rowKey = CellUtil.cloneRow(firstCell); - ScanUtil.getDummyResult(rowKey, result); - return true; - } - } else { - break; - } - } while (hasMore); + do { + List row = new ArrayList(); + if (result.isEmpty()) { + hasMore = innerScanner.nextRaw(row); + } else { + row.addAll(result); + result.clear(); + } + if (!row.isEmpty()) { + if (isDummy(row)) { + result.addAll(row); + // We got a dummy request from lower layers. This means that + // the scan took more than pageSizeMs. Just return true here. + // The client will drop this dummy request and continue to scan. + // Then the lower layer scanner will continue + // wherever it stopped due to this dummy request + return true; + } + Cell firstCell = row.get(0); + lastIndexRowKey = ImmutableBytesPtr.copyBytesIfNecessary(firstCell.getRowArray(), + firstCell.getRowOffset() + offset, firstCell.getRowLength() - offset); + indexToDataRowKeyMap.put(offset == 0 ? lastIndexRowKey : CellUtil.cloneRow(firstCell), + indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(lastIndexRowKey), + viewConstants)); + indexRows.add(row); + indexRowCount++; + if (hasMore && (EnvironmentEdgeManager.currentTimeMillis() - startTime) >= pageSizeMs) { + getDummyResult(lastIndexRowKey, result); + // We do not need to change the state, State.SCANNING_INDEX + // since we will continue scanning the index table after + // the client drops the dummy request and then calls the next + // method on its ResultScanner within ScanningResultIterator + return true; } + } + } while (hasMore && indexRowCount < pageSizeInRows); + return hasMore; + } - do { - List row = new ArrayList(); - if (result.isEmpty()) { - hasMore = innerScanner.nextRaw(row); - } else { - row.addAll(result); - result.clear(); - } - if (!row.isEmpty()) { - if (isDummy(row)) { - result.addAll(row); - // We got a dummy request from lower layers. This means that - // the scan took more than pageSizeMs. Just return true here. - // The client will drop this dummy request and continue to scan. - // Then the lower layer scanner will continue - // wherever it stopped due to this dummy request - return true; - } - Cell firstCell = row.get(0); - lastIndexRowKey = ImmutableBytesPtr.copyBytesIfNecessary(firstCell.getRowArray(), - firstCell.getRowOffset() + offset, - firstCell.getRowLength() - offset); - indexToDataRowKeyMap.put( - offset == 0 ? lastIndexRowKey : CellUtil.cloneRow(firstCell), - indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(lastIndexRowKey), - viewConstants)); - indexRows.add(row); - indexRowCount++; - if (hasMore && (EnvironmentEdgeManager.currentTimeMillis() - startTime) - >= pageSizeMs) { - getDummyResult(lastIndexRowKey, result); - // We do not need to change the state, State.SCANNING_INDEX - // since we will continue scanning the index table after - // the client drops the dummy request and then calls the next - // method on its ResultScanner within ScanningResultIterator - return true; - } - } - } while (hasMore && indexRowCount < pageSizeInRows); - return hasMore; - } + protected boolean scanIndexTableRows(List result, final long startTime) throws IOException { + return scanIndexTableRows(result, startTime, null, 0); + } - protected boolean scanIndexTableRows(List result, - final long startTime) throws IOException { - return scanIndexTableRows(result, startTime, null, 0); + private boolean verifyIndexRowAndRepairIfNecessary(Result dataRow, byte[] indexRowKey, + long indexTimestamp) throws IOException { + Put put = new Put(dataRow.getRow()); + for (Cell cell : dataRow.rawCells()) { + put.add(cell); } - - private boolean verifyIndexRowAndRepairIfNecessary(Result dataRow, byte[] indexRowKey, - long indexTimestamp) - throws IOException { - Put put = new Put(dataRow.getRow()); - for (Cell cell : dataRow.rawCells()) { - put.add(cell); - } - if (indexMaintainer.checkIndexRow(indexRowKey, put)) { - if (IndexUtil.getMaxTimestamp(put) != indexTimestamp) { - Mutation[] mutations; - Put indexPut = new Put(indexRowKey); - indexPut.addColumn(emptyCF, emptyCQ, indexTimestamp, QueryConstants.VERIFIED_BYTES); - if ((EnvironmentEdgeManager.currentTimeMillis() - indexTimestamp) > ageThreshold) { - Delete indexDelete = indexMaintainer.buildRowDeleteMutation(indexRowKey, - IndexMaintainer.DeleteType.SINGLE_VERSION, indexTimestamp); - mutations = new Mutation[]{indexPut, indexDelete}; - } else { - mutations = new Mutation[]{indexPut}; - } - region.batchMutate(mutations); - } - return true; - } - if (indexMaintainer.isAgedEnough(IndexUtil.getMaxTimestamp(put), ageThreshold)) { - region.delete(indexMaintainer.createDelete(indexRowKey, IndexUtil.getMaxTimestamp(put), false)); + if (indexMaintainer.checkIndexRow(indexRowKey, put)) { + if (IndexUtil.getMaxTimestamp(put) != indexTimestamp) { + Mutation[] mutations; + Put indexPut = new Put(indexRowKey); + indexPut.addColumn(emptyCF, emptyCQ, indexTimestamp, QueryConstants.VERIFIED_BYTES); + if ((EnvironmentEdgeManager.currentTimeMillis() - indexTimestamp) > ageThreshold) { + Delete indexDelete = indexMaintainer.buildRowDeleteMutation(indexRowKey, + IndexMaintainer.DeleteType.SINGLE_VERSION, indexTimestamp); + mutations = new Mutation[] { indexPut, indexDelete }; + } else { + mutations = new Mutation[] { indexPut }; } - return false; + region.batchMutate(mutations); + } + return true; } + if (indexMaintainer.isAgedEnough(IndexUtil.getMaxTimestamp(put), ageThreshold)) { + region + .delete(indexMaintainer.createDelete(indexRowKey, IndexUtil.getMaxTimestamp(put), false)); + } + return false; + } - protected boolean getNextCoveredIndexRow(List result) throws IOException { - if (indexRowIterator.hasNext()) { - List indexRow = indexRowIterator.next(); - result.addAll(indexRow); - try { - byte[] indexRowKey = CellUtil.cloneRow(indexRow.get(0)); - Result dataRow = dataRows.get(new ImmutableBytesPtr( - indexToDataRowKeyMap.get(indexRowKey))); + protected boolean getNextCoveredIndexRow(List result) throws IOException { + if (indexRowIterator.hasNext()) { + List indexRow = indexRowIterator.next(); + result.addAll(indexRow); + try { + byte[] indexRowKey = CellUtil.cloneRow(indexRow.get(0)); + Result dataRow = dataRows.get(new ImmutableBytesPtr(indexToDataRowKeyMap.get(indexRowKey))); - if (dataRow != null) { - long ts = indexRow.get(0).getTimestamp(); - if (!indexMaintainer.isUncovered() - || verifyIndexRowAndRepairIfNecessary(dataRow, indexRowKey, ts)) { - if (tupleProjector != null) { - IndexUtil.addTupleAsOneCell(result, new ResultTuple(dataRow), - tupleProjector, ptr); - } - } else { - result.clear(); - } - } else { - if (indexMaintainer.isUncovered()) { - long ts = indexRow.get(0).getTimestamp(); - // Since we also scan the empty column for uncovered global indexes, this mean the data row - // does not exist. Delete the index row if the index is an uncovered global index - if (indexMaintainer.isAgedEnough(ts, ageThreshold)) { - region.delete(indexMaintainer.createDelete(indexRowKey, ts, false)); - } - result.clear(); - } else { - // The data row satisfying the scan does not exist. This could be because - // the data row may not include the columns corresponding to the uncovered - // index columns either. Just return the index row. Nothing to do here - } - } - } catch (Throwable e) { - LOGGER.error("Exception in UncoveredIndexRegionScanner for region " - + region.getRegionInfo().getRegionNameAsString(), e); - throw e; + if (dataRow != null) { + long ts = indexRow.get(0).getTimestamp(); + if ( + !indexMaintainer.isUncovered() + || verifyIndexRowAndRepairIfNecessary(dataRow, indexRowKey, ts) + ) { + if (tupleProjector != null) { + IndexUtil.addTupleAsOneCell(result, new ResultTuple(dataRow), tupleProjector, ptr); } - return true; + } else { + result.clear(); + } } else { - return false; + if (indexMaintainer.isUncovered()) { + long ts = indexRow.get(0).getTimestamp(); + // Since we also scan the empty column for uncovered global indexes, this mean the data + // row + // does not exist. Delete the index row if the index is an uncovered global index + if (indexMaintainer.isAgedEnough(ts, ageThreshold)) { + region.delete(indexMaintainer.createDelete(indexRowKey, ts, false)); + } + result.clear(); + } else { + // The data row satisfying the scan does not exist. This could be because + // the data row may not include the columns corresponding to the uncovered + // index columns either. Just return the index row. Nothing to do here + } } + } catch (Throwable e) { + LOGGER.error("Exception in UncoveredIndexRegionScanner for region " + + region.getRegionInfo().getRegionNameAsString(), e); + throw e; + } + return true; + } else { + return false; } + } - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result); - } + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); + } - /** - * A page of index rows are scanned and then their corresponding data table rows are retrieved - * from the data table regions in parallel. These data rows are then joined with index rows. - * The join is for adding uncovered columns to index rows. - * - * This implementation conforms to server paging such that if the server side operation takes - * more than pageSizeInMs, a dummy result is returned to signal the client that more work - * to do on the server side. This is done to prevent RPC timeouts. - * - * @param result - * @return boolean to indicate if there are more rows to scan - * @throws IOException - */ - @Override - public boolean next(List result) throws IOException { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - boolean hasMore; - region.startRegionOperation(); - try { - synchronized (innerScanner) { - if (state == State.READY && !indexRowIterator.hasNext()) { - state = State.INITIAL; - } - if (state == State.INITIAL) { - indexRowCount = 0; - indexRows = new ArrayList<>(); - dataRows = Maps.newConcurrentMap(); - indexToDataRowKeyMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - state = State.SCANNING_INDEX; - } - if (state == State.SCANNING_INDEX) { - hasMore = scanIndexTableRows(result, startTime); - if (isDummy(result)) { - updateDummyWithPrevRowKey(result, initStartRowKey, includeInitStartRowKey, - scan); - return hasMore; - } - state = State.SCANNING_DATA; - } - if (state == State.SCANNING_DATA) { - scanDataTableRows(startTime); - indexRowIterator = indexRows.iterator(); - } - if (state == State.READY) { - boolean moreRows = getNextCoveredIndexRow(result); - if (!result.isEmpty()) { - previousResultRowKey = CellUtil.cloneRow(result.get(0)); - } - return moreRows; - } else { - updateDummyWithPrevRowKey(result, initStartRowKey, includeInitStartRowKey, - scan); - return true; - } - } - } catch (Throwable e) { - LOGGER.error("Exception in UncoveredIndexRegionScanner for region " - + region.getRegionInfo().getRegionNameAsString(), e); - throw e; - } finally { - region.closeRegionOperation(); + /** + * A page of index rows are scanned and then their corresponding data table rows are retrieved + * from the data table regions in parallel. These data rows are then joined with index rows. The + * join is for adding uncovered columns to index rows. This implementation conforms to server + * paging such that if the server side operation takes more than pageSizeInMs, a dummy result is + * returned to signal the client that more work to do on the server side. This is done to prevent + * RPC timeouts. + * @return boolean to indicate if there are more rows to scan + */ + @Override + public boolean next(List result) throws IOException { + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + boolean hasMore; + region.startRegionOperation(); + try { + synchronized (innerScanner) { + if (state == State.READY && !indexRowIterator.hasNext()) { + state = State.INITIAL; } - } - - /** - * Add dummy cell to the result list based on either the previous rowkey returned to the - * client or the start rowkey and start rowkey include params. - * - * @param result result to add the dummy cell to. - * @param initStartRowKey scan start rowkey. - * @param includeInitStartRowKey scan start rowkey included. - * @param scan scan object. - */ - private void updateDummyWithPrevRowKey(List result, byte[] initStartRowKey, - boolean includeInitStartRowKey, Scan scan) { - result.clear(); - if (previousResultRowKey != null) { - getDummyResult(previousResultRowKey, result); + if (state == State.INITIAL) { + indexRowCount = 0; + indexRows = new ArrayList<>(); + dataRows = Maps.newConcurrentMap(); + indexToDataRowKeyMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + state = State.SCANNING_INDEX; + } + if (state == State.SCANNING_INDEX) { + hasMore = scanIndexTableRows(result, startTime); + if (isDummy(result)) { + updateDummyWithPrevRowKey(result, initStartRowKey, includeInitStartRowKey, scan); + return hasMore; + } + state = State.SCANNING_DATA; + } + if (state == State.SCANNING_DATA) { + scanDataTableRows(startTime); + indexRowIterator = indexRows.iterator(); + } + if (state == State.READY) { + boolean moreRows = getNextCoveredIndexRow(result); + if (!result.isEmpty()) { + previousResultRowKey = CellUtil.cloneRow(result.get(0)); + } + return moreRows; } else { - if (includeInitStartRowKey && initStartRowKey.length > 0) { - byte[] prevKey; - // In order to generate largest possible rowkey that is less than - // initStartRowKey, we need to check size of the region name that can be - // used by hbase client for meta lookup, in case meta cache is expired at client. - // Once we know regionLookupInMetaLen, use it to generate largest possible - // rowkey that is lower than initStartRowKey by using - // ByteUtil#previousKeyWithLength function, which appends "\\xFF" bytes to - // prev rowkey upto the length provided. e.g. for the given key - // "\\x01\\xC1\\x06", the previous key with length 5 would be - // "\\x01\\xC1\\x05\\xFF\\xFF" by padding 2 bytes "\\xFF". - // The length of the largest scan start rowkey should not exceed - // HConstants#MAX_ROW_LENGTH. - int regionLookupInMetaLen = - RegionInfo.createRegionName(region.getTableDescriptor().getTableName(), - new byte[1], HConstants.NINES, false).length; - if (Bytes.compareTo(initStartRowKey, initStartRowKey.length - 1, - 1, ByteUtil.ZERO_BYTE, 0, 1) == 0) { - // If initStartRowKey has last byte as "\\x00", we can discard the last - // byte and send the key as dummy rowkey. - prevKey = new byte[initStartRowKey.length - 1]; - System.arraycopy(initStartRowKey, 0, prevKey, 0, prevKey.length); - } else if (initStartRowKey.length < - (HConstants.MAX_ROW_LENGTH - 1 - regionLookupInMetaLen)) { - prevKey = ByteUtil.previousKeyWithLength(ByteUtil.concat(initStartRowKey, - new byte[HConstants.MAX_ROW_LENGTH - - initStartRowKey.length - 1 - regionLookupInMetaLen]), - HConstants.MAX_ROW_LENGTH - 1 - regionLookupInMetaLen); - } else { - prevKey = initStartRowKey; - } - getDummyResult(prevKey, result); - } else { - getDummyResult(initStartRowKey, result); - } + updateDummyWithPrevRowKey(result, initStartRowKey, includeInitStartRowKey, scan); + return true; } + } + } catch (Throwable e) { + LOGGER.error("Exception in UncoveredIndexRegionScanner for region " + + region.getRegionInfo().getRegionNameAsString(), e); + throw e; + } finally { + region.closeRegionOperation(); + } + } + + /** + * Add dummy cell to the result list based on either the previous rowkey returned to the client or + * the start rowkey and start rowkey include params. + * @param result result to add the dummy cell to. + * @param initStartRowKey scan start rowkey. + * @param includeInitStartRowKey scan start rowkey included. + * @param scan scan object. + */ + private void updateDummyWithPrevRowKey(List result, byte[] initStartRowKey, + boolean includeInitStartRowKey, Scan scan) { + result.clear(); + if (previousResultRowKey != null) { + getDummyResult(previousResultRowKey, result); + } else { + if (includeInitStartRowKey && initStartRowKey.length > 0) { + byte[] prevKey; + // In order to generate largest possible rowkey that is less than + // initStartRowKey, we need to check size of the region name that can be + // used by hbase client for meta lookup, in case meta cache is expired at client. + // Once we know regionLookupInMetaLen, use it to generate largest possible + // rowkey that is lower than initStartRowKey by using + // ByteUtil#previousKeyWithLength function, which appends "\\xFF" bytes to + // prev rowkey upto the length provided. e.g. for the given key + // "\\x01\\xC1\\x06", the previous key with length 5 would be + // "\\x01\\xC1\\x05\\xFF\\xFF" by padding 2 bytes "\\xFF". + // The length of the largest scan start rowkey should not exceed + // HConstants#MAX_ROW_LENGTH. + int regionLookupInMetaLen = + RegionInfo.createRegionName(region.getTableDescriptor().getTableName(), new byte[1], + HConstants.NINES, false).length; + if ( + Bytes.compareTo(initStartRowKey, initStartRowKey.length - 1, 1, ByteUtil.ZERO_BYTE, 0, 1) + == 0 + ) { + // If initStartRowKey has last byte as "\\x00", we can discard the last + // byte and send the key as dummy rowkey. + prevKey = new byte[initStartRowKey.length - 1]; + System.arraycopy(initStartRowKey, 0, prevKey, 0, prevKey.length); + } else + if (initStartRowKey.length < (HConstants.MAX_ROW_LENGTH - 1 - regionLookupInMetaLen)) { + prevKey = + ByteUtil.previousKeyWithLength( + ByteUtil.concat(initStartRowKey, + new byte[HConstants.MAX_ROW_LENGTH - initStartRowKey.length - 1 + - regionLookupInMetaLen]), + HConstants.MAX_ROW_LENGTH - 1 - regionLookupInMetaLen); + } else { + prevKey = initStartRowKey; + } + getDummyResult(prevKey, result); + } else { + getDummyResult(initStartRowKey, result); + } } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java index 39b8733a0ef..a0bef0f1a6f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,11 @@ */ package org.apache.phoenix.coprocessor; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Result; @@ -35,103 +40,84 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - - public class UncoveredLocalIndexRegionScanner extends UncoveredIndexRegionScanner { - private static final Logger LOGGER = - LoggerFactory.getLogger(UncoveredLocalIndexRegionScanner.class); - final int offset; - final byte[] actualStartKey; + private static final Logger LOGGER = + LoggerFactory.getLogger(UncoveredLocalIndexRegionScanner.class); + final int offset; + final byte[] actualStartKey; - public UncoveredLocalIndexRegionScanner(final RegionScanner innerScanner, - final Region region, - final Scan scan, - final RegionCoprocessorEnvironment env, - final Scan dataTableScan, - final TupleProjector tupleProjector, - final IndexMaintainer indexMaintainer, - final byte[][] viewConstants, - final ImmutableBytesWritable ptr, - final long pageSizeMs, - final int offset, - final byte[] actualStartKey, - final long queryLimit) { - super(innerScanner, region, scan, env, dataTableScan, tupleProjector, indexMaintainer, - viewConstants, ptr, pageSizeMs, queryLimit); - this.offset = offset; - this.actualStartKey = actualStartKey; - } + public UncoveredLocalIndexRegionScanner(final RegionScanner innerScanner, final Region region, + final Scan scan, final RegionCoprocessorEnvironment env, final Scan dataTableScan, + final TupleProjector tupleProjector, final IndexMaintainer indexMaintainer, + final byte[][] viewConstants, final ImmutableBytesWritable ptr, final long pageSizeMs, + final int offset, final byte[] actualStartKey, final long queryLimit) { + super(innerScanner, region, scan, env, dataTableScan, tupleProjector, indexMaintainer, + viewConstants, ptr, pageSizeMs, queryLimit); + this.offset = offset; + this.actualStartKey = actualStartKey; + } - protected void scanDataRows(Collection dataRowKeys, long startTime) throws IOException { - Scan dataScan = prepareDataTableScan(dataRowKeys); - if (dataScan == null) { - return; - } - try (RegionScanner regionScanner = region.getScanner(dataScan)) { - boolean hasMore; - do { - List row = new ArrayList(); - hasMore = regionScanner.nextRaw(row); - - if (!row.isEmpty()) { - if (ScanUtil.isDummy(row)) { - state = State.SCANNING_DATA_INTERRUPTED; - break; - } - Cell firstCell = row.get(0); - dataRows.put(new ImmutableBytesPtr(CellUtil.cloneRow(firstCell)), - Result.create(row)); - if (hasMore && - (EnvironmentEdgeManager.currentTimeMillis() - startTime) >= - pageSizeMs) { - state = State.SCANNING_DATA_INTERRUPTED; - break; - } - } - } while (hasMore); - if (state == State.SCANNING_DATA_INTERRUPTED) { - LOGGER.info("Data table scan is interrupted in " - + "UncoveredLocalIndexRegionScanner for region " - + region.getRegionInfo().getRegionNameAsString() - + " as it could not complete on time (in " + pageSizeMs + " ms), and" - + " it will be resubmitted"); - } - } + protected void scanDataRows(Collection dataRowKeys, long startTime) throws IOException { + Scan dataScan = prepareDataTableScan(dataRowKeys); + if (dataScan == null) { + return; } + try (RegionScanner regionScanner = region.getScanner(dataScan)) { + boolean hasMore; + do { + List row = new ArrayList(); + hasMore = regionScanner.nextRaw(row); - @Override - protected void scanDataTableRows(long startTime) - throws IOException { - if (indexToDataRowKeyMap.size() == 0) { - state = State.READY; - return; - } - scanDataRows(indexToDataRowKeyMap.values(), startTime); - if (state == State.SCANNING_DATA_INTERRUPTED) { - state = State.SCANNING_DATA; - } else { - state = State.READY; + if (!row.isEmpty()) { + if (ScanUtil.isDummy(row)) { + state = State.SCANNING_DATA_INTERRUPTED; + break; + } + Cell firstCell = row.get(0); + dataRows.put(new ImmutableBytesPtr(CellUtil.cloneRow(firstCell)), Result.create(row)); + if (hasMore && (EnvironmentEdgeManager.currentTimeMillis() - startTime) >= pageSizeMs) { + state = State.SCANNING_DATA_INTERRUPTED; + break; + } } + } while (hasMore); + if (state == State.SCANNING_DATA_INTERRUPTED) { + LOGGER.info( + "Data table scan is interrupted in " + "UncoveredLocalIndexRegionScanner for region " + + region.getRegionInfo().getRegionNameAsString() + + " as it could not complete on time (in " + pageSizeMs + " ms), and" + + " it will be resubmitted"); + } } + } - @Override - protected boolean scanIndexTableRows(List result, - final long startTime) throws IOException { - return scanIndexTableRows(result, startTime, actualStartKey, offset); + @Override + protected void scanDataTableRows(long startTime) throws IOException { + if (indexToDataRowKeyMap.size() == 0) { + state = State.READY; + return; } - - @Override - public boolean next(List result) throws IOException { - boolean hasMore = super.next(result); - ServerIndexUtil.wrapResultUsingOffset(result, offset); - return hasMore; + scanDataRows(indexToDataRowKeyMap.values(), startTime); + if (state == State.SCANNING_DATA_INTERRUPTED) { + state = State.SCANNING_DATA; + } else { + state = State.READY; } + } - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result); - } + @Override + protected boolean scanIndexTableRows(List result, final long startTime) throws IOException { + return scanIndexTableRows(result, startTime, actualStartKey, offset); + } + + @Override + public boolean next(List result) throws IOException { + boolean hasMore = super.next(result); + ServerIndexUtil.wrapResultUsingOffset(result, offset); + return hasMore; + } + + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index c19e1959e2a..b93fff069f6 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,13 +17,13 @@ */ package org.apache.phoenix.coprocessor; -import static org.apache.phoenix.util.ScanUtil.adjustScanFilterForGlobalIndexRegionScanner; import static org.apache.phoenix.query.QueryConstants.AGG_TIMESTAMP; import static org.apache.phoenix.query.QueryConstants.SINGLE_COLUMN; import static org.apache.phoenix.query.QueryConstants.SINGLE_COLUMN_FAMILY; import static org.apache.phoenix.query.QueryConstants.UNGROUPED_AGG_ROW_KEY; import static org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker.COMPACTION_UPDATE_STATS_ROW_COUNT; import static org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker.CONCURRENT_UPDATE_STATS_ROW_COUNT; +import static org.apache.phoenix.util.ScanUtil.adjustScanFilterForGlobalIndexRegionScanner; import java.io.ByteArrayInputStream; import java.io.DataInputStream; @@ -32,11 +32,9 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; import javax.annotation.concurrent.GuardedBy; @@ -61,7 +59,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.ipc.controller.InterRegionServerIndexRpcControllerFactory; -import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; @@ -94,7 +91,6 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.join.HashJoinInfo; import org.apache.phoenix.mapreduce.index.IndexTool; -import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.ColumnFamilyNotFoundException; @@ -106,10 +102,11 @@ import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker; import org.apache.phoenix.schema.stats.StatisticsCollector; import org.apache.phoenix.schema.stats.StatisticsCollectorFactory; - import org.apache.phoenix.schema.stats.StatsCollectionDisabledOnServerException; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EncodedColumnsUtil; @@ -124,909 +121,905 @@ import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.ServerUtil; import org.apache.phoenix.util.ServerUtil.ConnectionType; -import org.apache.phoenix.util.MetaDataUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** - * Region observer that aggregates ungrouped rows(i.e. SQL query with aggregation function and no GROUP BY). - * - * + * Region observer that aggregates ungrouped rows(i.e. SQL query with aggregation function and no + * GROUP BY). * @since 0.1 */ -public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver implements RegionCoprocessor { - // TODO: move all constants into a single class - public static final String UNGROUPED_AGG = "UngroupedAgg"; - public static final String DELETE_AGG = "DeleteAgg"; - public static final String DELETE_CQ = "DeleteCQ"; - public static final String DELETE_CF = "DeleteCF"; - public static final String EMPTY_CF = "EmptyCF"; - /** - * This lock used for synchronizing the state of - * {@link UngroupedAggregateRegionObserver#scansReferenceCount}, - * {@link UngroupedAggregateRegionObserver#isRegionClosingOrSplitting} variables used to avoid possible - * dead lock situation in case below steps: - * 1. We get read lock when we start writing local indexes, deletes etc.. - * 2. when memstore reach threshold, flushes happen. Since they use read (shared) lock they - * happen without any problem until someone tries to obtain write lock. - * 3. at one moment we decide to split/bulkload/close and try to acquire write lock. - * 4. Since that moment all attempts to get read lock will be blocked. I.e. no more - * flushes will happen. But we continue to fill memstore with local index batches and - * finally we get RTBE. - * - * The solution to this is to not allow or delay operations acquire the write lock. - * 1) In case of split we just throw IOException so split won't happen but it will not cause any harm. - * 2) In case of bulkload failing it by throwing the exception. - * 3) In case of region close by balancer/move wait before closing the reason and fail the query which - * does write after reading. - * - * See PHOENIX-3111 for more info. +public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver + implements RegionCoprocessor { + // TODO: move all constants into a single class + public static final String UNGROUPED_AGG = "UngroupedAgg"; + public static final String DELETE_AGG = "DeleteAgg"; + public static final String DELETE_CQ = "DeleteCQ"; + public static final String DELETE_CF = "DeleteCF"; + public static final String EMPTY_CF = "EmptyCF"; + /** + * This lock used for synchronizing the state of + * {@link UngroupedAggregateRegionObserver#scansReferenceCount}, + * {@link UngroupedAggregateRegionObserver#isRegionClosingOrSplitting} variables used to avoid + * possible dead lock situation in case below steps: 1. We get read lock when we start writing + * local indexes, deletes etc.. 2. when memstore reach threshold, flushes happen. Since they use + * read (shared) lock they happen without any problem until someone tries to obtain write lock. 3. + * at one moment we decide to split/bulkload/close and try to acquire write lock. 4. Since that + * moment all attempts to get read lock will be blocked. I.e. no more flushes will happen. But we + * continue to fill memstore with local index batches and finally we get RTBE. The solution to + * this is to not allow or delay operations acquire the write lock. 1) In case of split we just + * throw IOException so split won't happen but it will not cause any harm. 2) In case of bulkload + * failing it by throwing the exception. 3) In case of region close by balancer/move wait before + * closing the reason and fail the query which does write after reading. See PHOENIX-3111 for more + * info. + */ + + private final Object lock = new Object(); + /** + * To maintain the number of scans used for create index, delete and upsert select operations + * which reads and writes to same region in coprocessors. + */ + @GuardedBy("lock") + private int scansReferenceCount = 0; + @GuardedBy("lock") + private boolean isRegionClosingOrSplitting = false; + private static final Logger LOGGER = + LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class); + private Configuration upsertSelectConfig; + private Configuration compactionConfig; + private Configuration indexWriteConfig; + private ReadOnlyProps indexWriteProps; + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void start(CoprocessorEnvironment e) throws IOException { + /* + * We need to create a copy of region's configuration since we don't want any side effect of + * setting the RpcControllerFactory. */ - - private final Object lock = new Object(); - /** - * To maintain the number of scans used for create index, delete and upsert select operations - * which reads and writes to same region in coprocessors. + upsertSelectConfig = PropertiesUtil.cloneConfig(e.getConfiguration()); + /* + * Till PHOENIX-3995 is fixed, we need to use the InterRegionServerIndexRpcControllerFactory. + * Although this would cause remote RPCs to use index handlers on the destination region + * servers, it is better than using the regular priority handlers which could result in a + * deadlock. */ - @GuardedBy("lock") - private int scansReferenceCount = 0; - @GuardedBy("lock") - private boolean isRegionClosingOrSplitting = false; - private static final Logger LOGGER = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class); - private Configuration upsertSelectConfig; - private Configuration compactionConfig; - private Configuration indexWriteConfig; - private ReadOnlyProps indexWriteProps; - - @Override - public Optional getRegionObserver() { - return Optional.of(this); + upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, + InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class); + + compactionConfig = ServerUtil.getCompactionConfig(e.getConfiguration()); + + // For retries of index write failures, use the same # of retries as the rebuilder + indexWriteConfig = PropertiesUtil.cloneConfig(e.getConfiguration()); + indexWriteConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + e.getConfiguration().getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER)); + indexWriteProps = new ReadOnlyProps(indexWriteConfig.iterator()); + } + + Configuration getUpsertSelectConfig() { + return upsertSelectConfig; + } + + void incrementScansReferenceCount() throws IOException { + synchronized (lock) { + if (isRegionClosingOrSplitting) { + throw new IOException( + "Temporarily unable to write from scan because region is closing or splitting"); + } + scansReferenceCount++; + lock.notifyAll(); } + } + + void decrementScansReferenceCount() { + synchronized (lock) { + scansReferenceCount--; + if (scansReferenceCount < 0) { + LOGGER.warn( + "Scan reference count went below zero. Something isn't correct. Resetting it back to zero"); + scansReferenceCount = 0; + } + lock.notifyAll(); + } + } + + void commitBatchWithRetries(final Region region, final List localRegionMutations, + final long blockingMemstoreSize) throws IOException { + try { + commitBatch(region, localRegionMutations, blockingMemstoreSize); + } catch (IOException e) { + handleIndexWriteException(localRegionMutations, e, new MutateCommand() { + @Override + public void doMutation() throws IOException { + commitBatch(region, localRegionMutations, blockingMemstoreSize); + } - @Override - public void start(CoprocessorEnvironment e) throws IOException { - /* - * We need to create a copy of region's configuration since we don't want any side effect of - * setting the RpcControllerFactory. - */ - upsertSelectConfig = PropertiesUtil.cloneConfig(e.getConfiguration()); - /* - * Till PHOENIX-3995 is fixed, we need to use the - * InterRegionServerIndexRpcControllerFactory. Although this would cause remote RPCs to use - * index handlers on the destination region servers, it is better than using the regular - * priority handlers which could result in a deadlock. - */ - upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, - InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class); - - compactionConfig = ServerUtil.getCompactionConfig(e.getConfiguration()); + @Override + public List getMutationList() { + return localRegionMutations; + } + }); + } + } - // For retries of index write failures, use the same # of retries as the rebuilder - indexWriteConfig = PropertiesUtil.cloneConfig(e.getConfiguration()); - indexWriteConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - e.getConfiguration().getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER)); - indexWriteProps = new ReadOnlyProps(indexWriteConfig.iterator()); + void commitBatch(Region region, List mutations, long blockingMemstoreSize) + throws IOException { + if (mutations.isEmpty()) { + return; } - Configuration getUpsertSelectConfig() { - return upsertSelectConfig; + Mutation[] mutationArray = new Mutation[mutations.size()]; + // When memstore size reaches blockingMemstoreSize we are waiting 3 seconds for the + // flush happen which decrease the memstore size and then writes allowed on the region. + for (int i = 0; blockingMemstoreSize > 0 + && region.getMemStoreHeapSize() + region.getMemStoreOffHeapSize() > blockingMemstoreSize + && i < 30; i++) { + try { + checkForRegionClosingOrSplitting(); + Thread.sleep(100); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException(e); + } } + // TODO: should we use the one that is all or none? + LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + + region.getRegionInfo().getTable().getNameAsString()); + region.batchMutate(mutations.toArray(mutationArray)); + } + + static void setIndexAndTransactionProperties(List mutations, byte[] indexUUID, + byte[] indexMaintainersPtr, byte[] txState, byte[] clientVersionBytes, boolean useIndexProto) { + for (Mutation m : mutations) { + if (indexMaintainersPtr != null) { + m.setAttribute( + useIndexProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, + indexMaintainersPtr); + } + if (indexUUID != null) { + m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID); + } + if (txState != null) { + m.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); + } + if (clientVersionBytes != null) { + m.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, clientVersionBytes); + } + } + } - void incrementScansReferenceCount() throws IOException { - synchronized (lock) { - if (isRegionClosingOrSplitting) { - throw new IOException("Temporarily unable to write from scan because region is closing or splitting"); - } - scansReferenceCount++; - lock.notifyAll(); - } + private void commitBatchWithTable(Table table, List mutations) throws IOException { + if (mutations.isEmpty()) { + return; } - void decrementScansReferenceCount() { - synchronized (lock) { - scansReferenceCount--; - if (scansReferenceCount < 0) { - LOGGER.warn( - "Scan reference count went below zero. Something isn't correct. Resetting it back to zero"); - scansReferenceCount = 0; - } - lock.notifyAll(); - } + LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + table); + try { + Object[] results = new Object[mutations.size()]; + table.batch(mutations, results); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + + /** + * There is a chance that region might be closing while running balancer/move/merge. In this case + * if the memstore size reaches blockingMemstoreSize better to fail query because there is a high + * chance that flush might not proceed and memstore won't be freed up. + */ + void checkForRegionClosingOrSplitting() throws IOException { + synchronized (lock) { + if (isRegionClosingOrSplitting) { + lock.notifyAll(); + throw new IOException( + "Region is getting closed. Not allowing to write to avoid possible deadlock."); + } + } + } + + @Override + public void preScannerOpen(ObserverContext e, Scan scan) + throws IOException { + super.preScannerOpen(e, scan); + if (ScanUtil.isAnalyzeTable(scan)) { + scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_START_ROW, + scan.getStartRow()); + scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_STOP_ROW, + scan.getStopRow()); + scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_START_ROW, + Bytes.toBytes(scan.includeStartRow())); + scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_STOP_ROW, + Bytes.toBytes(scan.includeStopRow())); + // We are setting the start row and stop row such that it covers the entire region. As part + // of Phonenix-1263 we are storing the guideposts against the physical table rather than + // individual tenant specific tables. + scan.withStartRow(HConstants.EMPTY_START_ROW); + scan.withStopRow(HConstants.EMPTY_END_ROW); + scan.setFilter(null); } + } - void commitBatchWithRetries(final Region region, final List localRegionMutations, final long blockingMemstoreSize) throws IOException { - try { - commitBatch(region, localRegionMutations, blockingMemstoreSize); - } catch (IOException e) { - handleIndexWriteException(localRegionMutations, e, new MutateCommand() { - @Override - public void doMutation() throws IOException { - commitBatch(region, localRegionMutations, blockingMemstoreSize); - } + public static class MutationList extends ArrayList { + private long byteSize = 0L; - @Override - public List getMutationList() { - return localRegionMutations; - } - }); - } + public MutationList() { + super(); } - void commitBatch(Region region, List mutations, long blockingMemstoreSize) throws IOException { - if (mutations.isEmpty()) { - return; - } + public MutationList(int size) { + super(size); + } - Mutation[] mutationArray = new Mutation[mutations.size()]; - // When memstore size reaches blockingMemstoreSize we are waiting 3 seconds for the - // flush happen which decrease the memstore size and then writes allowed on the region. - for (int i = 0; blockingMemstoreSize > 0 - && region.getMemStoreHeapSize() + region.getMemStoreOffHeapSize() > blockingMemstoreSize - && i < 30; i++) { - try { - checkForRegionClosingOrSplitting(); - Thread.sleep(100); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IOException(e); - } + @Override + public boolean add(Mutation e) { + boolean r = super.add(e); + if (r) { + this.byteSize += PhoenixKeyValueUtil.calculateMutationDiskSize(e); } - // TODO: should we use the one that is all or none? - LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString()); - region.batchMutate(mutations.toArray(mutationArray)); + return r; } - static void setIndexAndTransactionProperties(List mutations, byte[] indexUUID, - byte[] indexMaintainersPtr, byte[] txState, - byte[] clientVersionBytes, boolean useIndexProto) { - for (Mutation m : mutations) { - if (indexMaintainersPtr != null) { - m.setAttribute(useIndexProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, indexMaintainersPtr); - } - if (indexUUID != null) { - m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID); - } - if (txState != null) { - m.setAttribute(BaseScannerRegionObserverConstants.TX_STATE, txState); - } - if (clientVersionBytes != null) { - m.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, clientVersionBytes); - } - } + public long byteSize() { + return byteSize; } - private void commitBatchWithTable(Table table, List mutations) throws IOException { - if (mutations.isEmpty()) { - return; - } - - LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + table); - try { - Object[] results = new Object[mutations.size()]; - table.batch(mutations, results); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new RuntimeException(e); - } + @Override + public void clear() { + byteSize = 0l; + super.clear(); } + } - /** - * There is a chance that region might be closing while running balancer/move/merge. In this - * case if the memstore size reaches blockingMemstoreSize better to fail query because there is - * a high chance that flush might not proceed and memstore won't be freed up. - * @throws IOException - */ - void checkForRegionClosingOrSplitting() throws IOException { - synchronized (lock) { - if (isRegionClosingOrSplitting) { - lock.notifyAll(); - throw new IOException("Region is getting closed. Not allowing to write to avoid possible deadlock."); - } - } - } + static long getBlockingMemstoreSize(Region region, Configuration conf) { + long flushSize = region.getTableDescriptor().getMemStoreFlushSize(); - @Override - public void preScannerOpen(ObserverContext e, Scan scan) - throws IOException { - super.preScannerOpen(e, scan); - if (ScanUtil.isAnalyzeTable(scan)) { - scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_START_ROW, - scan.getStartRow()); - scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_STOP_ROW, - scan.getStopRow()); - scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_START_ROW, - Bytes.toBytes(scan.includeStartRow())); - scan.setAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_STOP_ROW, - Bytes.toBytes(scan.includeStopRow())); - // We are setting the start row and stop row such that it covers the entire region. As part - // of Phonenix-1263 we are storing the guideposts against the physical table rather than - // individual tenant specific tables. - scan.withStartRow(HConstants.EMPTY_START_ROW); - scan.withStopRow(HConstants.EMPTY_END_ROW); - scan.setFilter(null); + if (flushSize <= 0) { + flushSize = conf.getLongBytes(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, + TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); + } + return flushSize * (conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, + HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER) - 1); + } + + @Override + protected RegionScanner doPostScannerOpen(final ObserverContext c, + final Scan scan, final RegionScanner s) throws IOException, SQLException { + final RegionCoprocessorEnvironment env = c.getEnvironment(); + final Region region = env.getRegion(); + long ts = scan.getTimeRange().getMax(); + boolean localIndexScan = ScanUtil.isLocalIndex(scan); + boolean uncoveredGlobalIndexScan = ScanUtil.isUncoveredGlobalIndex(scan); + if (ScanUtil.isAnalyzeTable(scan)) { + byte[] gp_width_bytes = + scan.getAttribute(BaseScannerRegionObserverConstants.GUIDEPOST_WIDTH_BYTES); + byte[] gp_per_region_bytes = + scan.getAttribute(BaseScannerRegionObserverConstants.GUIDEPOST_PER_REGION); + // Let this throw, as this scan is being done for the sole purpose of collecting stats + StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(env, + region.getRegionInfo().getTable().getNameAsString(), ts, gp_width_bytes, + gp_per_region_bytes); + if (statsCollector instanceof NoOpStatisticsCollector) { + throw new StatsCollectionDisabledOnServerException(); + } else { + return collectStats(s, statsCollector, region, scan, env.getConfiguration()); + } + } else if (ScanUtil.isIndexRebuild(scan)) { + return User.runAsLoginUser(new PrivilegedExceptionAction() { + @Override + public RegionScanner run() throws Exception { + return rebuildIndices(s, region, scan, env); } + }); } - public static class MutationList extends ArrayList { - private long byteSize = 0L; + boolean useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan); + int offsetToBe = 0; + if (localIndexScan) { + offsetToBe = region.getRegionInfo().getStartKey().length != 0 + ? region.getRegionInfo().getStartKey().length + : region.getRegionInfo().getEndKey().length; + } + final int offset = offsetToBe; + byte[] descRowKeyTableBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.UPGRADE_DESC_ROW_KEY); + boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null; + List indexMaintainers = IndexUtil.deSerializeIndexMaintainersFromScan(scan); + RegionScanner theScanner = s; + byte[] upsertSelectTable = + scan.getAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_TABLE); + boolean isDelete = false; + if (upsertSelectTable == null) { + byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_AGG); + isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0; + } + TupleProjector tupleProjector = null; + byte[][] viewConstants = null; + ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); + final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan); + final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); + boolean useQualifierAsIndex = + EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan)); + if ( + ((localIndexScan || uncoveredGlobalIndexScan) && !isDelete && !isDescRowKeyOrderUpgrade) + || (j == null && p != null) + ) { + if (dataColumns != null) { + tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns); + } + viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); + ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); + theScanner = getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, + region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, + tempPtr, useQualifierAsIndex); + } - public MutationList() { - super(); + if (j != null) { + theScanner = new HashJoinRegionScanner(theScanner, scan, p, j, ScanUtil.getTenantId(scan), + env, useQualifierAsIndex, useNewValueColumnQualifier); + } + return new UngroupedAggregateRegionScanner(c, theScanner, region, scan, env, this); + } + + public static void checkForLocalIndexColumnFamilies(Region region, + List indexMaintainers) throws IOException { + TableDescriptor tableDesc = region.getTableDescriptor(); + String schemaName = tableDesc.getTableName().getNamespaceAsString() + .equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR) + ? SchemaUtil.getSchemaNameFromFullName(tableDesc.getTableName().getNameAsString()) + : tableDesc.getTableName().getNamespaceAsString(); + String tableName = + SchemaUtil.getTableNameFromFullName(tableDesc.getTableName().getNameAsString()); + for (IndexMaintainer indexMaintainer : indexMaintainers) { + Set coveredColumns = indexMaintainer.getCoveredColumns(); + if (coveredColumns.isEmpty()) { + byte[] localIndexCf = indexMaintainer.getEmptyKeyValueFamily().get(); + // When covered columns empty we store index data in default column family so check for it. + if (tableDesc.getColumnFamily(localIndexCf) == null) { + ClientUtil.throwIOException("Column Family Not Found", + new ColumnFamilyNotFoundException(schemaName, tableName, Bytes.toString(localIndexCf))); } - - public MutationList(int size) { - super(size); + } + for (ColumnReference reference : coveredColumns) { + byte[] cf = IndexUtil.getLocalIndexColumnFamily(reference.getFamily()); + ColumnFamilyDescriptor family = region.getTableDescriptor().getColumnFamily(cf); + if (family == null) { + ClientUtil.throwIOException("Column Family Not Found", + new ColumnFamilyNotFoundException(schemaName, tableName, Bytes.toString(cf))); } - + } + } + } + + void commit(final Region region, List mutations, byte[] indexUUID, + final long blockingMemStoreSize, byte[] indexMaintainersPtr, byte[] txState, + final Table targetHTable, boolean useIndexProto, boolean isPKChanging, + byte[] clientVersionBytes) throws IOException { + final List localRegionMutations = Lists.newArrayList(); + final List remoteRegionMutations = Lists.newArrayList(); + setIndexAndTransactionProperties(mutations, indexUUID, indexMaintainersPtr, txState, + clientVersionBytes, useIndexProto); + separateLocalAndRemoteMutations(targetHTable, region, mutations, localRegionMutations, + remoteRegionMutations, isPKChanging); + commitBatchWithRetries(region, localRegionMutations, blockingMemStoreSize); + try { + commitBatchWithTable(targetHTable, remoteRegionMutations); + } catch (IOException e) { + handleIndexWriteException(remoteRegionMutations, e, new MutateCommand() { @Override - public boolean add(Mutation e) { - boolean r = super.add(e); - if (r) { - this.byteSize += PhoenixKeyValueUtil.calculateMutationDiskSize(e); - } - return r; - } - - public long byteSize() { - return byteSize; + public void doMutation() throws IOException { + commitBatchWithTable(targetHTable, remoteRegionMutations); } @Override - public void clear() { - byteSize = 0l; - super.clear(); + public List getMutationList() { + return remoteRegionMutations; } + }); } - - static long getBlockingMemstoreSize(Region region, Configuration conf) { - long flushSize = region.getTableDescriptor().getMemStoreFlushSize(); - - if (flushSize <= 0) { - flushSize = conf.getLongBytes(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, - TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); + localRegionMutations.clear(); + remoteRegionMutations.clear(); + } + + private void handleIndexWriteException(final List localRegionMutations, + IOException origIOE, MutateCommand mutateCommand) throws IOException { + long serverTimestamp = ClientUtil.parseTimestampFromRemoteException(origIOE); + SQLException inferredE = ClientUtil.parseLocalOrRemoteServerException(origIOE); + if ( + inferredE != null + && inferredE.getErrorCode() == SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode() + ) { + // For an index write failure, the data table write succeeded, + // so when we retry we need to set REPLAY_WRITES + for (Mutation mutation : localRegionMutations) { + if (PhoenixIndexMetaData.isIndexRebuild(mutation.getAttributesMap())) { + mutation.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, + BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES); + } else { + mutation.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, + BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES); } - return flushSize * (conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, - HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER) - 1); + // use the server timestamp for index write retrys + PhoenixKeyValueUtil.setTimestamp(mutation, serverTimestamp); + } + IndexWriteException iwe = PhoenixIndexFailurePolicyHelper.getIndexWriteException(inferredE); + try (PhoenixConnection conn = + QueryUtil.getConnectionOnServer(indexWriteConfig).unwrap(PhoenixConnection.class)) { + PhoenixIndexFailurePolicyHelper.doBatchWithRetries(mutateCommand, iwe, conn, + indexWriteProps); + } catch (Exception e) { + throw new DoNotRetryIOException(e); + } + } else { + throw origIOE; } - - @Override - protected RegionScanner doPostScannerOpen(final ObserverContext c, final Scan scan, - final RegionScanner s) throws IOException, SQLException { - final RegionCoprocessorEnvironment env = c.getEnvironment(); - final Region region = env.getRegion(); - long ts = scan.getTimeRange().getMax(); - boolean localIndexScan = ScanUtil.isLocalIndex(scan); - boolean uncoveredGlobalIndexScan = ScanUtil.isUncoveredGlobalIndex(scan); - if (ScanUtil.isAnalyzeTable(scan)) { - byte[] gp_width_bytes = - scan.getAttribute(BaseScannerRegionObserverConstants.GUIDEPOST_WIDTH_BYTES); - byte[] gp_per_region_bytes = - scan.getAttribute(BaseScannerRegionObserverConstants.GUIDEPOST_PER_REGION); - // Let this throw, as this scan is being done for the sole purpose of collecting stats - StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector( - env, region.getRegionInfo().getTable().getNameAsString(), ts, - gp_width_bytes, gp_per_region_bytes); - if (statsCollector instanceof NoOpStatisticsCollector) { - throw new StatsCollectionDisabledOnServerException(); - } else { - return collectStats(s, statsCollector, region, scan, env.getConfiguration()); - } - } else if (ScanUtil.isIndexRebuild(scan)) { - return User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public RegionScanner run() throws Exception { - return rebuildIndices(s, region, scan, env); - } - }); + } + + private void separateLocalAndRemoteMutations(Table targetHTable, Region region, + List mutations, List localRegionMutations, + List remoteRegionMutations, boolean isPKChanging) { + boolean areMutationsInSameTable = areMutationsInSameTable(targetHTable, region); + // if we're writing to the same table, but the PK can change, that means that some + // mutations might be in our current region, and others in a different one. + if (areMutationsInSameTable && isPKChanging) { + RegionInfo regionInfo = region.getRegionInfo(); + for (Mutation mutation : mutations) { + if (regionInfo.containsRow(mutation.getRow())) { + localRegionMutations.add(mutation); + } else { + remoteRegionMutations.add(mutation); } - - boolean useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan); - int offsetToBe = 0; - if (localIndexScan) { - offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : - region.getRegionInfo().getEndKey().length; + } + } else if (areMutationsInSameTable && !isPKChanging) { + localRegionMutations.addAll(mutations); + } else { + remoteRegionMutations.addAll(mutations); + } + } + + private boolean areMutationsInSameTable(Table targetHTable, Region region) { + return (targetHTable == null || Bytes.compareTo(targetHTable.getName().getName(), + region.getTableDescriptor().getTableName().getName()) == 0); + } + + @Override + public InternalScanner preCompact(ObserverContext c, Store store, + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { + + final TableName tableName = c.getEnvironment().getRegion().getRegionInfo().getTable(); + // Compaction and split upcalls run with the effective user context of the requesting user. + // This will lead to failure of cross cluster RPC if the effective user is not + // the login user. Switch to the login user context to ensure we have the expected + // security context. + return User.runAsLoginUser(new PrivilegedExceptionAction() { + @Override + public InternalScanner run() throws Exception { + InternalScanner internalScanner = scanner; + boolean keepDeleted = false; + boolean isMultiTenantIndexTable = false; + if (tableName.getNameAsString().startsWith(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX)) { + isMultiTenantIndexTable = true; } - final int offset = offsetToBe; - byte[] descRowKeyTableBytes = scan.getAttribute(BaseScannerRegionObserverConstants.UPGRADE_DESC_ROW_KEY); - boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null; - List indexMaintainers = IndexUtil.deSerializeIndexMaintainersFromScan(scan); - RegionScanner theScanner = s; - byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_TABLE); - boolean isDelete = false; - if (upsertSelectTable == null) { - byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_AGG); - isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0; + final String fullTableName = isMultiTenantIndexTable + ? SchemaUtil.getParentTableNameFromIndexTable(tableName.getNameAsString(), + MetaDataUtil.VIEW_INDEX_TABLE_PREFIX) + : tableName.getNameAsString(); + PTable table = null; + Long maxLookbackAge = null; + try (PhoenixConnection conn = + QueryUtil.getConnectionOnServer(compactionConfig).unwrap(PhoenixConnection.class)) { + table = conn.getTableNoCache(fullTableName); + maxLookbackAge = table.getMaxLookbackAge(); + } catch (Exception e) { + if (e instanceof TableNotFoundException) { + LOGGER.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName); + // non-Phoenix HBase tables won't be found, do nothing + } else { + LOGGER.error("Unable to modify compaction scanner to retain deleted " + + "cells for a table with disabled Index; " + fullTableName, e); + } } - TupleProjector tupleProjector = null; - byte[][] viewConstants = null; - ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); - final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan); - final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); - boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan)); - if (((localIndexScan || uncoveredGlobalIndexScan) && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) { - if (dataColumns != null) { - tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns); + // The previous indexing design needs to retain delete markers and deleted + // cells to rebuild disabled indexes. Thus, we skip major compaction for + // them. GlobalIndexChecker is the coprocessor introduced by the current + // indexing design. + if ( + table != null && !PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME.equals(fullTableName) + && !ServerUtil.hasCoprocessor(c.getEnvironment(), GlobalIndexChecker.class.getName()) + ) { + List indexes = PTableType.INDEX.equals(table.getType()) + ? Lists.newArrayList(table) + : table.getIndexes(); + // FIXME need to handle views and indexes on views as well + for (PTable index : indexes) { + if (index.getIndexDisableTimestamp() != 0) { + LOGGER.info("Modifying major compaction scanner to retain " + + "deleted cells for a table with disabled index: " + fullTableName); + keepDeleted = true; + break; } - viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); - ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); - theScanner = - getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, - region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex); + } } - - if (j != null) { - theScanner = new HashJoinRegionScanner(theScanner, scan, p, j, ScanUtil.getTenantId(scan), env, useQualifierAsIndex, useNewValueColumnQualifier); + if (table != null && isPhoenixTableTTLEnabled(c.getEnvironment().getConfiguration())) { + internalScanner = new CompactionScanner(c.getEnvironment(), store, scanner, + MetaDataUtil.getMaxLookbackAge(c.getEnvironment().getConfiguration(), maxLookbackAge), + request.isMajor() || request.isAllFiles(), keepDeleted, table); + } else if (isPhoenixTableTTLEnabled(c.getEnvironment().getConfiguration())) { + LOGGER.warn("Skipping compaction for table: {} " + "as failed to retrieve PTable object", + fullTableName); } - return new UngroupedAggregateRegionScanner(c, theScanner, region, scan, env, this); - } - - public static void checkForLocalIndexColumnFamilies(Region region, - List indexMaintainers) throws IOException { - TableDescriptor tableDesc = region.getTableDescriptor(); - String schemaName = - tableDesc.getTableName().getNamespaceAsString() - .equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR) ? SchemaUtil - .getSchemaNameFromFullName(tableDesc.getTableName().getNameAsString()) - : tableDesc.getTableName().getNamespaceAsString(); - String tableName = SchemaUtil.getTableNameFromFullName(tableDesc.getTableName().getNameAsString()); - for (IndexMaintainer indexMaintainer : indexMaintainers) { - Set coveredColumns = indexMaintainer.getCoveredColumns(); - if (coveredColumns.isEmpty()) { - byte[] localIndexCf = indexMaintainer.getEmptyKeyValueFamily().get(); - // When covered columns empty we store index data in default column family so check for it. - if (tableDesc.getColumnFamily(localIndexCf) == null) { - ClientUtil.throwIOException("Column Family Not Found", - new ColumnFamilyNotFoundException(schemaName, tableName, Bytes - .toString(localIndexCf))); - } - } - for (ColumnReference reference : coveredColumns) { - byte[] cf = IndexUtil.getLocalIndexColumnFamily(reference.getFamily()); - ColumnFamilyDescriptor family = region.getTableDescriptor().getColumnFamily(cf); - if (family == null) { - ClientUtil.throwIOException("Column Family Not Found", - new ColumnFamilyNotFoundException(schemaName, tableName, Bytes.toString(cf))); - } + if (scanType.equals(ScanType.COMPACT_DROP_DELETES)) { + try { + long clientTimeStamp = EnvironmentEdgeManager.currentTimeMillis(); + DelegateRegionCoprocessorEnvironment compactionConfEnv = + new DelegateRegionCoprocessorEnvironment(c.getEnvironment(), + ConnectionType.COMPACTION_CONNECTION); + StatisticsCollector statisticsCollector = StatisticsCollectorFactory + .createStatisticsCollector(compactionConfEnv, tableName.getNameAsString(), + clientTimeStamp, store.getColumnFamilyDescriptor().getName()); + statisticsCollector.init(); + internalScanner = statisticsCollector.createCompactionScanner(compactionConfEnv, store, + internalScanner); + } catch (Exception e) { + // If we can't reach the stats table, don't interrupt the normal + // compaction operation, just log a warning. + if (LOGGER.isWarnEnabled()) { + LOGGER.warn("Unable to collect stats for " + tableName, e); } + } } + return internalScanner; + } + }); + } + + static PTable deserializeTable(byte[] b) { + try { + PTableProtos.PTable ptableProto = PTableProtos.PTable.parseFrom(b); + return PTableImpl.createFromProto(ptableProto); + } catch (IOException e) { + throw new RuntimeException(e); } - - void commit(final Region region, List mutations, byte[] indexUUID, final long blockingMemStoreSize, - byte[] indexMaintainersPtr, byte[] txState, final Table targetHTable, boolean useIndexProto, - boolean isPKChanging, byte[] clientVersionBytes) - throws IOException { - final List localRegionMutations = Lists.newArrayList(); - final List remoteRegionMutations = Lists.newArrayList(); - setIndexAndTransactionProperties(mutations, indexUUID, indexMaintainersPtr, txState, clientVersionBytes, useIndexProto); - separateLocalAndRemoteMutations(targetHTable, region, mutations, localRegionMutations, remoteRegionMutations, - isPKChanging); - commitBatchWithRetries(region, localRegionMutations, blockingMemStoreSize); - try { - commitBatchWithTable(targetHTable, remoteRegionMutations); - } catch (IOException e) { - handleIndexWriteException(remoteRegionMutations, e, new MutateCommand() { - @Override - public void doMutation() throws IOException { - commitBatchWithTable(targetHTable, remoteRegionMutations); - } - - @Override - public List getMutationList() { - return remoteRegionMutations; - } - }); - } - localRegionMutations.clear(); - remoteRegionMutations.clear(); + } + + private RegionScanner getRegionScanner(final RegionScanner innerScanner, final Region region, + final Scan scan, final RegionCoprocessorEnvironment env, final boolean oldCoproc) + throws IOException { + if (oldCoproc) { + return new IndexerRegionScanner(innerScanner, region, scan, env, this); + } else { + if (region.getTableDescriptor().hasCoprocessor(GlobalIndexChecker.class.getCanonicalName())) { + return new IndexRepairRegionScanner(innerScanner, region, scan, env, this); + } else { + return new IndexRebuildRegionScanner(innerScanner, region, scan, env, this); + } } - - private void handleIndexWriteException(final List localRegionMutations, IOException origIOE, - MutateCommand mutateCommand) throws IOException { - long serverTimestamp = ClientUtil.parseTimestampFromRemoteException(origIOE); - SQLException inferredE = ClientUtil.parseLocalOrRemoteServerException(origIOE); - if (inferredE != null && inferredE.getErrorCode() == SQLExceptionCode.INDEX_WRITE_FAILURE.getErrorCode()) { - // For an index write failure, the data table write succeeded, - // so when we retry we need to set REPLAY_WRITES - for (Mutation mutation : localRegionMutations) { - if (PhoenixIndexMetaData.isIndexRebuild(mutation.getAttributesMap())) { - mutation.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, - BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES); - } else { - mutation.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, - BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES); - } - // use the server timestamp for index write retrys - PhoenixKeyValueUtil.setTimestamp(mutation, serverTimestamp); - } - IndexWriteException iwe = PhoenixIndexFailurePolicyHelper.getIndexWriteException(inferredE); - try (PhoenixConnection conn = - QueryUtil.getConnectionOnServer(indexWriteConfig) - .unwrap(PhoenixConnection.class)) { - PhoenixIndexFailurePolicyHelper.doBatchWithRetries(mutateCommand, iwe, conn, - indexWriteProps); - } catch (Exception e) { - throw new DoNotRetryIOException(e); - } - } else { - throw origIOE; - } + } + + private RegionScanner rebuildIndices(RegionScanner innerScanner, final Region region, + final Scan scan, final RegionCoprocessorEnvironment env) throws IOException { + boolean oldCoproc = + region.getTableDescriptor().hasCoprocessor(Indexer.class.getCanonicalName()); + byte[] valueBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_VERIFY_TYPE); + IndexTool.IndexVerifyType verifyType = (valueBytes != null) + ? IndexTool.IndexVerifyType.fromValue(valueBytes) + : IndexTool.IndexVerifyType.NONE; + if (oldCoproc && verifyType == IndexTool.IndexVerifyType.ONLY) { + return new IndexerRegionScanner(innerScanner, region, scan, env, this); } - - private void separateLocalAndRemoteMutations(Table targetHTable, Region region, List mutations, - List localRegionMutations, List remoteRegionMutations, - boolean isPKChanging) { - boolean areMutationsInSameTable = areMutationsInSameTable(targetHTable, region); - //if we're writing to the same table, but the PK can change, that means that some - //mutations might be in our current region, and others in a different one. - if (areMutationsInSameTable && isPKChanging) { - RegionInfo regionInfo = region.getRegionInfo(); - for (Mutation mutation : mutations) { - if (regionInfo.containsRow(mutation.getRow())) { - localRegionMutations.add(mutation); - } else { - remoteRegionMutations.add(mutation); - } - } - } else if (areMutationsInSameTable && !isPKChanging) { - localRegionMutations.addAll(mutations); - } else { - remoteRegionMutations.addAll(mutations); - } + RegionScanner scanner; + if (!scan.isRaw()) { + Scan rawScan = new Scan(scan); + rawScan.setRaw(true); + rawScan.readAllVersions(); + rawScan.getFamilyMap().clear(); + adjustScanFilterForGlobalIndexRegionScanner(rawScan); + rawScan.setCacheBlocks(false); + for (byte[] family : scan.getFamilyMap().keySet()) { + rawScan.addFamily(family); + } + scanner = ((DelegateRegionScanner) innerScanner).getNewRegionScanner(rawScan); + innerScanner.close(); + } else { + if (adjustScanFilterForGlobalIndexRegionScanner(scan)) { + scanner = ((DelegateRegionScanner) innerScanner).getNewRegionScanner(scan); + innerScanner.close(); + } else { + scanner = innerScanner; + } } - - private boolean areMutationsInSameTable(Table targetHTable, Region region) { - return (targetHTable == null || Bytes.compareTo(targetHTable.getName().getName(), - region.getTableDescriptor().getTableName().getName()) == 0); + return getRegionScanner(scanner, region, scan, env, oldCoproc); + } + + private RegionScanner collectStats(final RegionScanner innerScanner, StatisticsCollector stats, + final Region region, final Scan scan, Configuration config) throws IOException { + ScannerContext groupScannerContext; + if (scan.isScanMetricsEnabled()) { + groupScannerContext = + ScannerContext.newBuilder().setTrackMetrics(scan.isScanMetricsEnabled()).build(); + } else { + groupScannerContext = null; } - - @Override - public InternalScanner preCompact(ObserverContext c, Store store, - InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException { - - final TableName tableName = c.getEnvironment().getRegion().getRegionInfo().getTable(); - // Compaction and split upcalls run with the effective user context of the requesting user. - // This will lead to failure of cross cluster RPC if the effective user is not - // the login user. Switch to the login user context to ensure we have the expected - // security context. - return User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public InternalScanner run() throws Exception { - InternalScanner internalScanner = scanner; - boolean keepDeleted = false; - boolean isMultiTenantIndexTable = false; - if (tableName.getNameAsString().startsWith(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX)) { - isMultiTenantIndexTable = true; - } - final String fullTableName = isMultiTenantIndexTable ? - SchemaUtil.getParentTableNameFromIndexTable(tableName.getNameAsString(), - MetaDataUtil.VIEW_INDEX_TABLE_PREFIX) : - tableName.getNameAsString(); - PTable table = null; - Long maxLookbackAge = null; - try (PhoenixConnection conn = QueryUtil.getConnectionOnServer( - compactionConfig).unwrap(PhoenixConnection.class)) { - table = conn.getTableNoCache(fullTableName); - maxLookbackAge = table.getMaxLookbackAge(); - } catch (Exception e) { - if (e instanceof TableNotFoundException) { - LOGGER.debug("Ignoring HBase table that is not a Phoenix table: " - + fullTableName); - // non-Phoenix HBase tables won't be found, do nothing - } else { - LOGGER.error( - "Unable to modify compaction scanner to retain deleted " - + "cells for a table with disabled Index; " - + fullTableName, e); - } - } - // The previous indexing design needs to retain delete markers and deleted - // cells to rebuild disabled indexes. Thus, we skip major compaction for - // them. GlobalIndexChecker is the coprocessor introduced by the current - // indexing design. - if (table != null && - !PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME.equals(fullTableName) && - !ServerUtil.hasCoprocessor(c.getEnvironment(), - GlobalIndexChecker.class.getName())) { - List - indexes = - PTableType.INDEX.equals(table.getType()) ? - Lists.newArrayList(table) : - table.getIndexes(); - // FIXME need to handle views and indexes on views as well - for (PTable index : indexes) { - if (index.getIndexDisableTimestamp() != 0) { - LOGGER.info("Modifying major compaction scanner to retain " - + "deleted cells for a table with disabled index: " - + fullTableName); - keepDeleted = true; - break; - } - } - } - if (table != null - && isPhoenixTableTTLEnabled(c.getEnvironment().getConfiguration())) { - internalScanner = - new CompactionScanner(c.getEnvironment(), store, scanner, - MetaDataUtil.getMaxLookbackAge( - c.getEnvironment().getConfiguration(), - maxLookbackAge), - request.isMajor() || request.isAllFiles(), - keepDeleted, table - ); - } - else if (isPhoenixTableTTLEnabled(c.getEnvironment().getConfiguration())) { - LOGGER.warn("Skipping compaction for table: {} " + - "as failed to retrieve PTable object", fullTableName); - } - if (scanType.equals(ScanType.COMPACT_DROP_DELETES)) { - try { - long clientTimeStamp = EnvironmentEdgeManager.currentTimeMillis(); - DelegateRegionCoprocessorEnvironment - compactionConfEnv = - new DelegateRegionCoprocessorEnvironment(c.getEnvironment(), - ConnectionType.COMPACTION_CONNECTION); - StatisticsCollector - statisticsCollector = - StatisticsCollectorFactory.createStatisticsCollector( - compactionConfEnv, tableName.getNameAsString(), - clientTimeStamp, - store.getColumnFamilyDescriptor().getName()); - statisticsCollector.init(); - internalScanner = - statisticsCollector.createCompactionScanner(compactionConfEnv, - store, internalScanner); - } catch (Exception e) { - // If we can't reach the stats table, don't interrupt the normal - // compaction operation, just log a warning. - if (LOGGER.isWarnEnabled()) { - LOGGER.warn("Unable to collect stats for " + tableName, e); - } - } - } - return internalScanner; - } - }); + StatsCollectionCallable callable = + new StatsCollectionCallable(stats, region, innerScanner, config, scan); + byte[] asyncBytes = + scan.getAttribute(BaseScannerRegionObserverConstants.RUN_UPDATE_STATS_ASYNC_ATTRIB); + boolean async = false; + if (asyncBytes != null) { + async = Bytes.toBoolean(asyncBytes); } - - static PTable deserializeTable(byte[] b) { - try { - PTableProtos.PTable ptableProto = PTableProtos.PTable.parseFrom(b); - return PTableImpl.createFromProto(ptableProto); - } catch (IOException e) { - throw new RuntimeException(e); - } + long rowCount = 0; // in case of async, we report 0 as number of rows updated + StatisticsCollectionRunTracker statsRunTracker = + StatisticsCollectionRunTracker.getInstance(config); + final boolean runUpdateStats = statsRunTracker + .addUpdateStatsCommandRegion(region.getRegionInfo(), scan.getFamilyMap().keySet()); + if (runUpdateStats) { + if (!async) { + rowCount = callable.call(); + } else { + statsRunTracker.runTask(callable); + } + } else { + rowCount = CONCURRENT_UPDATE_STATS_ROW_COUNT; + LOGGER.info( + "UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region " + + region.getRegionInfo().getRegionNameAsString()); } + final boolean isIncompatibleClient = + ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); + byte[] rowKey = getRowKeyForCollectStats(region, scan, isIncompatibleClient); + byte[] rowCountBytes = PLong.INSTANCE.toBytes(rowCount); + final Cell aggKeyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, SINGLE_COLUMN_FAMILY, + SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); + RegionScanner scanner = new BaseRegionScanner(innerScanner) { + @Override + public RegionInfo getRegionInfo() { + return region.getRegionInfo(); + } - private RegionScanner getRegionScanner(final RegionScanner innerScanner, final Region region, final Scan scan, - final RegionCoprocessorEnvironment env, final boolean oldCoproc) - throws IOException { - if (oldCoproc) { - return new IndexerRegionScanner(innerScanner, region, scan, env, this); - } else { - if (region.getTableDescriptor().hasCoprocessor(GlobalIndexChecker.class.getCanonicalName())) { - return new IndexRepairRegionScanner(innerScanner, region, scan, env, this); - } else { - return new IndexRebuildRegionScanner(innerScanner, region, scan, env, this); - } - } - } + @Override + public boolean isFilterDone() { + return true; + } - private RegionScanner rebuildIndices(RegionScanner innerScanner, final Region region, final Scan scan, - final RegionCoprocessorEnvironment env) throws IOException { - boolean oldCoproc = region.getTableDescriptor().hasCoprocessor(Indexer.class.getCanonicalName()); - byte[] valueBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_VERIFY_TYPE); - IndexTool.IndexVerifyType verifyType = (valueBytes != null) ? - IndexTool.IndexVerifyType.fromValue(valueBytes) : IndexTool.IndexVerifyType.NONE; - if (oldCoproc && verifyType == IndexTool.IndexVerifyType.ONLY) { - return new IndexerRegionScanner(innerScanner, region, scan, env, this); - } - RegionScanner scanner; - if (!scan.isRaw()) { - Scan rawScan = new Scan(scan); - rawScan.setRaw(true); - rawScan.readAllVersions(); - rawScan.getFamilyMap().clear(); - adjustScanFilterForGlobalIndexRegionScanner(rawScan); - rawScan.setCacheBlocks(false); - for (byte[] family : scan.getFamilyMap().keySet()) { - rawScan.addFamily(family); - } - scanner = ((DelegateRegionScanner)innerScanner).getNewRegionScanner(rawScan); - innerScanner.close(); - } else { - if (adjustScanFilterForGlobalIndexRegionScanner(scan)) { - scanner = ((DelegateRegionScanner) innerScanner).getNewRegionScanner(scan); - innerScanner.close(); - } else { - scanner = innerScanner; - } + @Override + public void close() throws IOException { + // If we ran/scheduled StatsCollectionCallable the delegate + // scanner is closed there. Otherwise close it here. + if (!runUpdateStats) { + super.close(); } - return getRegionScanner(scanner, region, scan, env, oldCoproc); - } + } - private RegionScanner collectStats(final RegionScanner innerScanner, StatisticsCollector stats, - final Region region, final Scan scan, Configuration config) throws IOException { - ScannerContext groupScannerContext; - if (scan.isScanMetricsEnabled()) { - groupScannerContext = ScannerContext.newBuilder() - .setTrackMetrics(scan.isScanMetricsEnabled()).build(); - } else { - groupScannerContext = null; - } - StatsCollectionCallable callable = - new StatsCollectionCallable(stats, region, innerScanner, config, scan); - byte[] asyncBytes = scan.getAttribute(BaseScannerRegionObserverConstants.RUN_UPDATE_STATS_ASYNC_ATTRIB); - boolean async = false; - if (asyncBytes != null) { - async = Bytes.toBoolean(asyncBytes); - } - long rowCount = 0; // in case of async, we report 0 as number of rows updated - StatisticsCollectionRunTracker statsRunTracker = - StatisticsCollectionRunTracker.getInstance(config); - final boolean runUpdateStats = statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(), scan.getFamilyMap().keySet()); - if (runUpdateStats) { - if (!async) { - rowCount = callable.call(); - } else { - statsRunTracker.runTask(callable); - } - } else { - rowCount = CONCURRENT_UPDATE_STATS_ROW_COUNT; - LOGGER.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region " - + region.getRegionInfo().getRegionNameAsString()); + @Override + public boolean next(List results, ScannerContext scannerContext) throws IOException { + if (groupScannerContext != null && scannerContext != null) { + ScannerContextUtil.updateMetrics(groupScannerContext, scannerContext); } - final boolean isIncompatibleClient = - ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); - byte[] rowKey = getRowKeyForCollectStats(region, scan, isIncompatibleClient); - byte[] rowCountBytes = PLong.INSTANCE.toBytes(rowCount); - final Cell aggKeyValue = - PhoenixKeyValueUtil.newKeyValue(rowKey, SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); - RegionScanner scanner = new BaseRegionScanner(innerScanner) { - @Override - public RegionInfo getRegionInfo() { - return region.getRegionInfo(); - } - - @Override - public boolean isFilterDone() { - return true; - } - - @Override - public void close() throws IOException { - // If we ran/scheduled StatsCollectionCallable the delegate - // scanner is closed there. Otherwise close it here. - if (!runUpdateStats) { - super.close(); - } - } - - @Override - public boolean next(List results, ScannerContext scannerContext) - throws IOException { - if (groupScannerContext != null && scannerContext != null) { - ScannerContextUtil.updateMetrics(groupScannerContext, scannerContext); - } - return next(results); - } - - @Override - public boolean nextRaw(List results, ScannerContext scannerContext) - throws IOException { - return next(results, scannerContext); - } + return next(results); + } - @Override - public boolean next(List results) throws IOException { - results.add(aggKeyValue); - return false; - } + @Override + public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { + return next(results, scannerContext); + } - @Override - public long getMaxResultSize() { - return scan.getMaxResultSize(); - } - }; - return scanner; - } + @Override + public boolean next(List results) throws IOException { + results.add(aggKeyValue); + return false; + } - private static byte[] getRowKeyForCollectStats(Region region, Scan scan, - boolean isIncompatibleClient) { - byte[] rowKey; - if (!isIncompatibleClient) { - byte[] startKey = scan.getAttribute( - BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_START_ROW) == null ? - region.getRegionInfo().getStartKey() : scan.getAttribute( - BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_START_ROW); - byte[] endKey = scan.getAttribute( - BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_STOP_ROW) == null ? - region.getRegionInfo().getEndKey() : scan.getAttribute( - BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_STOP_ROW); - rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); - if (rowKey == null) { - if (scan.getAttribute( - BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_START_ROW) != - null && Bytes.toBoolean(scan.getAttribute( - BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_START_ROW))) { - rowKey = startKey; - } else if (scan.getAttribute( - BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_STOP_ROW) != - null && Bytes.toBoolean(scan.getAttribute( - BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_STOP_ROW))) { - rowKey = endKey; - } else { - rowKey = HConstants.EMPTY_END_ROW; - } - } + @Override + public long getMaxResultSize() { + return scan.getMaxResultSize(); + } + }; + return scanner; + } + + private static byte[] getRowKeyForCollectStats(Region region, Scan scan, + boolean isIncompatibleClient) { + byte[] rowKey; + if (!isIncompatibleClient) { + byte[] startKey = + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_START_ROW) == null + ? region.getRegionInfo().getStartKey() + : scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_START_ROW); + byte[] endKey = + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_STOP_ROW) == null + ? region.getRegionInfo().getEndKey() + : scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_ACTUAL_STOP_ROW); + rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); + if (rowKey == null) { + if ( + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_START_ROW) + != null + && Bytes.toBoolean( + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_START_ROW)) + ) { + rowKey = startKey; + } else if ( + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_STOP_ROW) + != null + && Bytes.toBoolean( + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ANALYZE_INCLUDE_STOP_ROW)) + ) { + rowKey = endKey; } else { - rowKey = UNGROUPED_AGG_ROW_KEY; + rowKey = HConstants.EMPTY_END_ROW; } - return rowKey; + } + } else { + rowKey = UNGROUPED_AGG_ROW_KEY; + } + return rowKey; + } + + /** + * Callable to encapsulate the collection of stats triggered by UPDATE STATISTICS command. Package + * private for tests. + */ + static class StatsCollectionCallable implements Callable { + private final StatisticsCollector statsCollector; + private final Region region; + private final RegionScanner innerScanner; + private final Configuration config; + private final Scan scan; + + StatsCollectionCallable(StatisticsCollector s, Region r, RegionScanner rs, Configuration config, + Scan scan) { + this.statsCollector = s; + this.region = r; + this.innerScanner = rs; + this.config = config; + this.scan = scan; } - /** - * - * Callable to encapsulate the collection of stats triggered by - * UPDATE STATISTICS command. - * - * Package private for tests. - */ - static class StatsCollectionCallable implements Callable { - private final StatisticsCollector statsCollector; - private final Region region; - private final RegionScanner innerScanner; - private final Configuration config; - private final Scan scan; - - StatsCollectionCallable(StatisticsCollector s, Region r, RegionScanner rs, - Configuration config, Scan scan) { - this.statsCollector = s; - this.region = r; - this.innerScanner = rs; - this.config = config; - this.scan = scan; - } - - @Override - public Long call() throws IOException { - return collectStatsInternal(); - } - - private boolean areStatsBeingCollectedViaCompaction() { - return StatisticsCollectionRunTracker.getInstance(config) - .areStatsBeingCollectedOnCompaction(region.getRegionInfo()); - } + @Override + public Long call() throws IOException { + return collectStatsInternal(); + } - private long collectStatsInternal() throws IOException { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - region.startRegionOperation(); - boolean hasMore = false; - boolean noErrors = false; - boolean compactionRunning = areStatsBeingCollectedViaCompaction(); - long rowCount = 0; - try { - if (!compactionRunning) { - statsCollector.init(); - synchronized (innerScanner) { - do { - List results = new ArrayList<>(); - hasMore = innerScanner.nextRaw(results); - statsCollector.collectStatistics(results); - rowCount++; - compactionRunning = areStatsBeingCollectedViaCompaction(); - } while (hasMore && !compactionRunning); - noErrors = true; - } - } - return compactionRunning ? COMPACTION_UPDATE_STATS_ROW_COUNT : rowCount; - } catch (IOException e) { - LOGGER.error("IOException in update stats: " + Throwables.getStackTraceAsString(e)); - throw e; - } finally { - try { - if (noErrors && !compactionRunning) { - statsCollector.updateStatistics(region, scan); - LOGGER.info("UPDATE STATISTICS finished successfully for scanner: " - + innerScanner + ". Number of rows scanned: " + rowCount - + ". Time: " + (EnvironmentEdgeManager.currentTimeMillis() - startTime)); - } - if (compactionRunning) { - LOGGER.info("UPDATE STATISTICS stopped in between because major compaction was running for region " - + region.getRegionInfo().getRegionNameAsString()); - } - } finally { - try { - StatisticsCollectionRunTracker.getInstance(config).removeUpdateStatsCommandRegion(region.getRegionInfo(), scan.getFamilyMap().keySet()); - statsCollector.close(); - } finally { - try { - innerScanner.close(); - } finally { - region.closeRegionOperation(); - } - } - } - } - } + private boolean areStatsBeingCollectedViaCompaction() { + return StatisticsCollectionRunTracker.getInstance(config) + .areStatsBeingCollectedOnCompaction(region.getRegionInfo()); } - static List deserializeExpressions(byte[] b) { - ByteArrayInputStream stream = new ByteArrayInputStream(b); + private long collectStatsInternal() throws IOException { + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + region.startRegionOperation(); + boolean hasMore = false; + boolean noErrors = false; + boolean compactionRunning = areStatsBeingCollectedViaCompaction(); + long rowCount = 0; + try { + if (!compactionRunning) { + statsCollector.init(); + synchronized (innerScanner) { + do { + List results = new ArrayList<>(); + hasMore = innerScanner.nextRaw(results); + statsCollector.collectStatistics(results); + rowCount++; + compactionRunning = areStatsBeingCollectedViaCompaction(); + } while (hasMore && !compactionRunning); + noErrors = true; + } + } + return compactionRunning ? COMPACTION_UPDATE_STATS_ROW_COUNT : rowCount; + } catch (IOException e) { + LOGGER.error("IOException in update stats: " + Throwables.getStackTraceAsString(e)); + throw e; + } finally { try { - DataInputStream input = new DataInputStream(stream); - int size = WritableUtils.readVInt(input); - List selectExpressions = Lists.newArrayListWithExpectedSize(size); - for (int i = 0; i < size; i++) { - ExpressionType type = ExpressionType.values()[WritableUtils.readVInt(input)]; - Expression selectExpression = type.newInstance(); - selectExpression.readFields(input); - selectExpressions.add(selectExpression); - } - return selectExpressions; - } catch (IOException e) { - throw new RuntimeException(e); + if (noErrors && !compactionRunning) { + statsCollector.updateStatistics(region, scan); + LOGGER.info("UPDATE STATISTICS finished successfully for scanner: " + innerScanner + + ". Number of rows scanned: " + rowCount + ". Time: " + + (EnvironmentEdgeManager.currentTimeMillis() - startTime)); + } + if (compactionRunning) { + LOGGER.info( + "UPDATE STATISTICS stopped in between because major compaction was running for region " + + region.getRegionInfo().getRegionNameAsString()); + } } finally { + try { + StatisticsCollectionRunTracker.getInstance(config) + .removeUpdateStatsCommandRegion(region.getRegionInfo(), scan.getFamilyMap().keySet()); + statsCollector.close(); + } finally { try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); + innerScanner.close(); + } finally { + region.closeRegionOperation(); } + } } + } } - - // Don't allow splitting/closing if operations need read and write to same region are going on in the - // the coprocessors to avoid dead lock scenario. See PHOENIX-3111. - private void waitForScansToFinish(ObserverContext c) throws IOException { - int maxWaitTime = c.getEnvironment().getConfiguration().getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - long start = EnvironmentEdgeManager.currentTimeMillis(); - synchronized (lock) { - isRegionClosingOrSplitting = true; - while (scansReferenceCount > 0) { - try { - lock.wait(1000); - if (EnvironmentEdgeManager.currentTimeMillis() - start >= maxWaitTime) { - isRegionClosingOrSplitting = false; // must reset in case split is not retried - throw new IOException(String.format( - "Operations like local index building/delete/upsert select" - + " might be going on so not allowing to split/close. scansReferenceCount=%s region=%s", - scansReferenceCount, - c.getEnvironment().getRegionInfo().getRegionNameAsString())); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } + } + + static List deserializeExpressions(byte[] b) { + ByteArrayInputStream stream = new ByteArrayInputStream(b); + try { + DataInputStream input = new DataInputStream(stream); + int size = WritableUtils.readVInt(input); + List selectExpressions = Lists.newArrayListWithExpectedSize(size); + for (int i = 0; i < size; i++) { + ExpressionType type = ExpressionType.values()[WritableUtils.readVInt(input)]; + Expression selectExpression = type.newInstance(); + selectExpression.readFields(input); + selectExpressions.add(selectExpression); + } + return selectExpressions; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } - - @Override - public void preBulkLoadHFile(ObserverContext c, - List> familyPaths) throws IOException { - // Don't allow bulkload if operations need read and write to same region are going on in the - // the coprocessors to avoid dead lock scenario. See PHOENIX-3111. - synchronized (lock) { - if (scansReferenceCount > 0) { - throw new DoNotRetryIOException("Operations like local index building/delete/upsert select" - + " might be going on so not allowing to bulkload."); - } + } + + // Don't allow splitting/closing if operations need read and write to same region are going on in + // the + // the coprocessors to avoid dead lock scenario. See PHOENIX-3111. + private void waitForScansToFinish(ObserverContext c) + throws IOException { + int maxWaitTime = c.getEnvironment().getConfiguration().getInt( + HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + long start = EnvironmentEdgeManager.currentTimeMillis(); + synchronized (lock) { + isRegionClosingOrSplitting = true; + while (scansReferenceCount > 0) { + try { + lock.wait(1000); + if (EnvironmentEdgeManager.currentTimeMillis() - start >= maxWaitTime) { + isRegionClosingOrSplitting = false; // must reset in case split is not retried + throw new IOException(String.format( + "Operations like local index building/delete/upsert select" + + " might be going on so not allowing to split/close. scansReferenceCount=%s region=%s", + scansReferenceCount, c.getEnvironment().getRegionInfo().getRegionNameAsString())); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); } + } } + } - @Override - public void preClose(ObserverContext c, boolean abortRequested) - throws IOException { - waitForScansToFinish(c); - } - - @Override - protected boolean isRegionObserverFor(Scan scan) { - return scan.getAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG) != null; + @Override + public void preBulkLoadHFile(ObserverContext c, + List> familyPaths) throws IOException { + // Don't allow bulkload if operations need read and write to same region are going on in the + // the coprocessors to avoid dead lock scenario. See PHOENIX-3111. + synchronized (lock) { + if (scansReferenceCount > 0) { + throw new DoNotRetryIOException("Operations like local index building/delete/upsert select" + + " might be going on so not allowing to bulkload."); + } } + } + + @Override + public void preClose(ObserverContext c, boolean abortRequested) + throws IOException { + waitForScansToFinish(c); + } + + @Override + protected boolean isRegionObserverFor(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG) != null; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionScanner.java index dc574c015e1..4e58f033f78 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -123,623 +123,644 @@ public class UngroupedAggregateRegionScanner extends BaseRegionScanner { - private static final Logger LOGGER = LoggerFactory.getLogger(UngroupedAggregateRegionScanner.class); - - private long pageSizeMs; - private int maxBatchSize = 0; - private Scan scan; - private RegionScanner innerScanner; - private Region region; - private final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver; - private final RegionCoprocessorEnvironment env; - private final boolean useQualifierAsIndex; - private boolean needToWrite = false; - private final Pair minMaxQualifiers; - private byte[][] values = null; - private final PTable.QualifierEncodingScheme encodingScheme; - private PTable writeToTable = null; - private PTable projectedTable = null; - private final boolean isDescRowKeyOrderUpgrade; - private final int offset; - private final boolean buildLocalIndex; - private final List indexMaintainers; - private boolean isPKChanging = false; - private final long ts; - private PhoenixTransactionProvider txnProvider = null; - private final UngroupedAggregateRegionObserver.MutationList indexMutations; - private boolean isDelete = false; - private final byte[] replayMutations; - private boolean isUpsert = false; - private List selectExpressions = null; - private byte[] deleteCQ = null; - private byte[] deleteCF = null; - private byte[] emptyCF = null; - private byte[] emptyCQ = null; - private final byte[] indexUUID; - private final byte[] txState; - private final byte[] clientVersionBytes; - private final long blockingMemStoreSize; - private long maxBatchSizeBytes = 0L; - private Table targetHTable = null; - private boolean incrScanRefCount = false; - private byte[] indexMaintainersPtr; - private boolean useIndexProto; - - public UngroupedAggregateRegionScanner(final ObserverContext c, - final RegionScanner innerScanner, final Region region, final Scan scan, - final RegionCoprocessorEnvironment env, - final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) - throws IOException, SQLException{ - super(innerScanner); - this.env = env; - this.region = region; - this.scan = scan; - this.ungroupedAggregateRegionObserver = ungroupedAggregateRegionObserver; - this.innerScanner = innerScanner; - Configuration conf = env.getConfiguration(); - pageSizeMs = getPageSizeMsForRegionScanner(scan); - ts = scan.getTimeRange().getMax(); - boolean localIndexScan = ScanUtil.isLocalIndex(scan); - encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); - int offsetToBe = 0; - if (localIndexScan) { - /* - * For local indexes, we need to set an offset on row key expressions to skip - * the region start key. - */ - offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : - region.getRegionInfo().getEndKey().length; - ScanUtil.setRowKeyOffset(scan, offsetToBe); - } - offset = offsetToBe; - - byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY); - isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null; - if (isDescRowKeyOrderUpgrade) { - LOGGER.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString()); - projectedTable = deserializeTable(descRowKeyTableBytes); - try { - writeToTable = PTableImpl.builderWithColumns(projectedTable, - getColumnsToClone(projectedTable)) - .setRowKeyOrderOptimizable(true) - .build(); - } catch (SQLException e) { - ClientUtil.throwIOException("Upgrade failed", e); // Impossible - } - values = new byte[projectedTable.getPKColumns().size()][]; - } - boolean useProto = false; - byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO); - useProto = localIndexBytes != null; - if (localIndexBytes == null) { - localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD); - } - indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto); - indexMutations = localIndexBytes == null ? new UngroupedAggregateRegionObserver.MutationList() : new UngroupedAggregateRegionObserver.MutationList(1024); - byte[] transforming = scan.getAttribute(BaseScannerRegionObserverConstants.DO_TRANSFORMING); - - replayMutations = scan.getAttribute(REPLAY_WRITES); - indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID); - txState = scan.getAttribute(BaseScannerRegionObserverConstants.TX_STATE); - clientVersionBytes = scan.getAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION); - if (txState != null) { - int clientVersion = clientVersionBytes == null ? ScanUtil.UNKNOWN_CLIENT_VERSION : Bytes.toInt(clientVersionBytes); - txnProvider = TransactionFactory.getTransactionProvider(txState, clientVersion); - } - byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_TABLE); - if (upsertSelectTable != null) { - isUpsert = true; - projectedTable = deserializeTable(upsertSelectTable); - //The Connection is a singleton. It MUST NOT be closed. - targetHTable = ServerUtil.ConnectionFactory.getConnection( - ServerUtil.ConnectionType.DEFAULT_SERVER_CONNECTION, - env).getTable(TableName.valueOf(projectedTable.getPhysicalName().getBytes())); - selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_EXPRS)); - values = new byte[projectedTable.getPKColumns().size()][]; - isPKChanging = ExpressionUtil.isPkPositionChanging(new TableRef(projectedTable), selectExpressions); - } else { - byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_AGG); - isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0; - if (!isDelete) { - deleteCF = scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_CF); - deleteCQ = scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_CQ); - } - emptyCF = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_CF); - emptyCQ = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER); - if (emptyCF != null && emptyCQ == null) { - // In case some old version sets EMPTY_CF but not EMPTY_COLUMN_QUALIFIER - // Not sure if it's really needed, but better safe than sorry - emptyCQ = QueryConstants.EMPTY_COLUMN_BYTES; - } - - } - ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); - useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan)); - - /** - * Slow down the writes if the memstore size more than - * (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size - * bytes. This avoids flush storm to hdfs for cases like index building where reads and - * write happen to all the table regions in the server. - */ - blockingMemStoreSize = getBlockingMemstoreSize(region, conf) ; - - buildLocalIndex = indexMaintainers != null && dataColumns==null && !localIndexScan; - if(buildLocalIndex) { - checkForLocalIndexColumnFamilies(region, indexMaintainers); - } - if (isDescRowKeyOrderUpgrade || isDelete || isUpsert - || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) { - needToWrite = true; - if((isUpsert && (targetHTable == null || - !targetHTable.getName().equals(region.getTableDescriptor().getTableName())))) { - needToWrite = false; - } - maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); - maxBatchSizeBytes = conf.getLongBytes(MUTATE_BATCH_SIZE_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES); - } - minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " " + region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan))); - } - useIndexProto = true; - indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD); - // for backward compatiblity fall back to look by the old attribute - if (indexMaintainersPtr == null) { - indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_MD); - useIndexProto = false; - } - - if (needToWrite) { - ungroupedAggregateRegionObserver.incrementScansReferenceCount(); - incrScanRefCount = true; - } + private static final Logger LOGGER = + LoggerFactory.getLogger(UngroupedAggregateRegionScanner.class); + + private long pageSizeMs; + private int maxBatchSize = 0; + private Scan scan; + private RegionScanner innerScanner; + private Region region; + private final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver; + private final RegionCoprocessorEnvironment env; + private final boolean useQualifierAsIndex; + private boolean needToWrite = false; + private final Pair minMaxQualifiers; + private byte[][] values = null; + private final PTable.QualifierEncodingScheme encodingScheme; + private PTable writeToTable = null; + private PTable projectedTable = null; + private final boolean isDescRowKeyOrderUpgrade; + private final int offset; + private final boolean buildLocalIndex; + private final List indexMaintainers; + private boolean isPKChanging = false; + private final long ts; + private PhoenixTransactionProvider txnProvider = null; + private final UngroupedAggregateRegionObserver.MutationList indexMutations; + private boolean isDelete = false; + private final byte[] replayMutations; + private boolean isUpsert = false; + private List selectExpressions = null; + private byte[] deleteCQ = null; + private byte[] deleteCF = null; + private byte[] emptyCF = null; + private byte[] emptyCQ = null; + private final byte[] indexUUID; + private final byte[] txState; + private final byte[] clientVersionBytes; + private final long blockingMemStoreSize; + private long maxBatchSizeBytes = 0L; + private Table targetHTable = null; + private boolean incrScanRefCount = false; + private byte[] indexMaintainersPtr; + private boolean useIndexProto; + + public UngroupedAggregateRegionScanner(final ObserverContext c, + final RegionScanner innerScanner, final Region region, final Scan scan, + final RegionCoprocessorEnvironment env, + final UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) + throws IOException, SQLException { + super(innerScanner); + this.env = env; + this.region = region; + this.scan = scan; + this.ungroupedAggregateRegionObserver = ungroupedAggregateRegionObserver; + this.innerScanner = innerScanner; + Configuration conf = env.getConfiguration(); + pageSizeMs = getPageSizeMsForRegionScanner(scan); + ts = scan.getTimeRange().getMax(); + boolean localIndexScan = ScanUtil.isLocalIndex(scan); + encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); + int offsetToBe = 0; + if (localIndexScan) { + /* + * For local indexes, we need to set an offset on row key expressions to skip the region start + * key. + */ + offsetToBe = region.getRegionInfo().getStartKey().length != 0 + ? region.getRegionInfo().getStartKey().length + : region.getRegionInfo().getEndKey().length; + ScanUtil.setRowKeyOffset(scan, offsetToBe); } - - @Override - public RegionInfo getRegionInfo() { - return region.getRegionInfo(); + offset = offsetToBe; + + byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY); + isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null; + if (isDescRowKeyOrderUpgrade) { + LOGGER.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString()); + projectedTable = deserializeTable(descRowKeyTableBytes); + try { + writeToTable = + PTableImpl.builderWithColumns(projectedTable, getColumnsToClone(projectedTable)) + .setRowKeyOrderOptimizable(true).build(); + } catch (SQLException e) { + ClientUtil.throwIOException("Upgrade failed", e); // Impossible + } + values = new byte[projectedTable.getPKColumns().size()][]; + } + boolean useProto = false; + byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO); + useProto = localIndexBytes != null; + if (localIndexBytes == null) { + localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD); } + indexMaintainers = + localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto); + indexMutations = localIndexBytes == null + ? new UngroupedAggregateRegionObserver.MutationList() + : new UngroupedAggregateRegionObserver.MutationList(1024); + byte[] transforming = scan.getAttribute(BaseScannerRegionObserverConstants.DO_TRANSFORMING); + + replayMutations = scan.getAttribute(REPLAY_WRITES); + indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID); + txState = scan.getAttribute(BaseScannerRegionObserverConstants.TX_STATE); + clientVersionBytes = scan.getAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION); + if (txState != null) { + int clientVersion = clientVersionBytes == null + ? ScanUtil.UNKNOWN_CLIENT_VERSION + : Bytes.toInt(clientVersionBytes); + txnProvider = TransactionFactory.getTransactionProvider(txState, clientVersion); + } + byte[] upsertSelectTable = + scan.getAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_TABLE); + if (upsertSelectTable != null) { + isUpsert = true; + projectedTable = deserializeTable(upsertSelectTable); + // The Connection is a singleton. It MUST NOT be closed. + targetHTable = ServerUtil.ConnectionFactory + .getConnection(ServerUtil.ConnectionType.DEFAULT_SERVER_CONNECTION, env) + .getTable(TableName.valueOf(projectedTable.getPhysicalName().getBytes())); + selectExpressions = deserializeExpressions( + scan.getAttribute(BaseScannerRegionObserverConstants.UPSERT_SELECT_EXPRS)); + values = new byte[projectedTable.getPKColumns().size()][]; + isPKChanging = + ExpressionUtil.isPkPositionChanging(new TableRef(projectedTable), selectExpressions); + } else { + byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_AGG); + isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0; + if (!isDelete) { + deleteCF = scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_CF); + deleteCQ = scan.getAttribute(BaseScannerRegionObserverConstants.DELETE_CQ); + } + emptyCF = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_CF); + emptyCQ = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER); + if (emptyCF != null && emptyCQ == null) { + // In case some old version sets EMPTY_CF but not EMPTY_COLUMN_QUALIFIER + // Not sure if it's really needed, but better safe than sorry + emptyCQ = QueryConstants.EMPTY_COLUMN_BYTES; + } - @Override - public boolean isFilterDone() { - return false; + } + ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); + useQualifierAsIndex = + EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan)); + + /** + * Slow down the writes if the memstore size more than (hbase.hregion.memstore.block.multiplier + * - 1) times hbase.hregion.memstore.flush.size bytes. This avoids flush storm to hdfs for cases + * like index building where reads and write happen to all the table regions in the server. + */ + blockingMemStoreSize = getBlockingMemstoreSize(region, conf); + + buildLocalIndex = indexMaintainers != null && dataColumns == null && !localIndexScan; + if (buildLocalIndex) { + checkForLocalIndexColumnFamilies(region, indexMaintainers); + } + if ( + isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) + || emptyCF != null || buildLocalIndex + ) { + needToWrite = true; + if ( + (isUpsert && (targetHTable == null + || !targetHTable.getName().equals(region.getTableDescriptor().getTableName()))) + ) { + needToWrite = false; + } + maxBatchSize = + conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); + maxBatchSizeBytes = conf.getLongBytes(MUTATE_BATCH_SIZE_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES); + } + minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(LogUtil.addCustomAnnotations( + "Starting ungrouped coprocessor scan " + scan + " " + region.getRegionInfo(), + ScanUtil.getCustomAnnotations(scan))); + } + useIndexProto = true; + indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD); + // for backward compatiblity fall back to look by the old attribute + if (indexMaintainersPtr == null) { + indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_MD); + useIndexProto = false; } - @Override - public void close() throws IOException { - if (needToWrite && incrScanRefCount) { - ungroupedAggregateRegionObserver.decrementScansReferenceCount(); - } + if (needToWrite) { + ungroupedAggregateRegionObserver.incrementScansReferenceCount(); + incrScanRefCount = true; + } + } + + @Override + public RegionInfo getRegionInfo() { + return region.getRegionInfo(); + } + + @Override + public boolean isFilterDone() { + return false; + } + + @Override + public void close() throws IOException { + if (needToWrite && incrScanRefCount) { + ungroupedAggregateRegionObserver.decrementScansReferenceCount(); + } + try { + if (targetHTable != null) { try { - if (targetHTable != null) { - try { - targetHTable.close(); - } catch (IOException e) { - LOGGER.error("Closing table: " + targetHTable + " failed: ", e); - } - } - } finally { - innerScanner.close(); + targetHTable.close(); + } catch (IOException e) { + LOGGER.error("Closing table: " + targetHTable + " failed: ", e); } + } + } finally { + innerScanner.close(); } - - boolean descRowKeyOrderUpgrade(List results, ImmutableBytesWritable ptr, - UngroupedAggregateRegionObserver.MutationList mutations) throws IOException { - Arrays.fill(values, null); - Cell firstKV = results.get(0); - RowKeySchema schema = projectedTable.getRowKeySchema(); - int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr); - for (int i = 0; i < schema.getFieldCount(); i++) { - Boolean hasValue = schema.next(ptr, i, maxOffset); - if (hasValue == null) { - break; - } - ValueSchema.Field field = schema.getField(i); - if (field.getSortOrder() == SortOrder.DESC) { - // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case - if (field.getDataType().isArrayType()) { - field.getDataType().coerceBytes(ptr, null, field.getDataType(), - field.getMaxLength(), field.getScale(), field.getSortOrder(), - field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte - } - // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters - else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) { - int len = ptr.getLength(); - while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) { - len--; - } - ptr.set(ptr.get(), ptr.getOffset(), len); - // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171) - } else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) { - byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength()); - ptr.set(invertedBytes); - } - } else if (field.getDataType() == PBinary.INSTANCE) { - // Remove trailing space characters so that the setValues call below will replace them - // with the correct zero byte character. Note this is somewhat dangerous as these - // could be legit, but I don't know what the alternative is. - int len = ptr.getLength(); - while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) { - len--; - } - ptr.set(ptr.get(), ptr.getOffset(), len); - } - values[i] = ptr.copyBytes(); - } - writeToTable.newKey(ptr, values); - if (Bytes.compareTo( - firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), - ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) { - return false; + } + + boolean descRowKeyOrderUpgrade(List results, ImmutableBytesWritable ptr, + UngroupedAggregateRegionObserver.MutationList mutations) throws IOException { + Arrays.fill(values, null); + Cell firstKV = results.get(0); + RowKeySchema schema = projectedTable.getRowKeySchema(); + int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, + firstKV.getRowLength(), ptr); + for (int i = 0; i < schema.getFieldCount(); i++) { + Boolean hasValue = schema.next(ptr, i, maxOffset); + if (hasValue == null) { + break; + } + ValueSchema.Field field = schema.getField(i); + if (field.getSortOrder() == SortOrder.DESC) { + // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this + // case + if (field.getDataType().isArrayType()) { + field.getDataType().coerceBytes(ptr, null, field.getDataType(), field.getMaxLength(), + field.getScale(), field.getSortOrder(), field.getMaxLength(), field.getScale(), + field.getSortOrder(), true); // force to use correct separator byte } - byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr); - if (offset > 0) { // for local indexes (prepend region start key) - byte[] newRowWithOffset = new byte[offset + newRow.length]; - System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset); - System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length); - newRow = newRowWithOffset; - } - byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength()); - for (Cell cell : results) { - // Copy existing cell but with new row key - Cell newCell = - CellBuilderFactory.create(CellBuilderType.DEEP_COPY). - setRow(newRow). - setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()). - setQualifier(cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength()). - setTimestamp(cell.getTimestamp()). - setType(cell.getType()).setValue(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength()).build(); - switch (cell.getType()) { - case Put: - // If Put, point delete old Put - Delete del = new Delete(oldRow); - Cell newDelCell = - CellBuilderFactory.create(CellBuilderType.DEEP_COPY). - setRow(newRow). - setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength()). - setQualifier(cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength()). - setTimestamp(cell.getTimestamp()). - setType(Cell.Type.Delete). - setValue(ByteUtil.EMPTY_BYTE_ARRAY, - 0, 0).build(); - del.add(newDelCell); - mutations.add(del); - - Put put = new Put(newRow); - put.add(newCell); - mutations.add(put); - break; - case Delete: - case DeleteColumn: - case DeleteFamily: - case DeleteFamilyVersion: - Delete delete = new Delete(newRow); - delete.add(newCell); - mutations.add(delete); - break; - } + // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing + // space characters + else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) { + int len = ptr.getLength(); + while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) { + len--; + } + ptr.set(ptr.get(), ptr.getOffset(), len); + // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they + // should be (PHOENIX-2171) + } else + if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) { + byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength()); + ptr.set(invertedBytes); + } + } else if (field.getDataType() == PBinary.INSTANCE) { + // Remove trailing space characters so that the setValues call below will replace them + // with the correct zero byte character. Note this is somewhat dangerous as these + // could be legit, but I don't know what the alternative is. + int len = ptr.getLength(); + while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) { + len--; } - return true; + ptr.set(ptr.get(), ptr.getOffset(), len); + } + values[i] = ptr.copyBytes(); } - - void buildLocalIndex(Tuple result, List results, ImmutableBytesWritable ptr) throws IOException { - for (IndexMaintainer maintainer : indexMaintainers) { - if (!results.isEmpty()) { - result.getKey(ptr); - ValueGetter valueGetter = - maintainer.createGetterFromKeyValues( - ImmutableBytesPtr.copyBytesIfNecessary(ptr), - results); - Put put = maintainer.buildUpdateMutation(GenericKeyValueBuilder.INSTANCE, - valueGetter, ptr, results.get(0).getTimestamp(), - env.getRegion().getRegionInfo().getStartKey(), - env.getRegion().getRegionInfo().getEndKey(), - false); - - if (txnProvider != null) { - put = txnProvider.markPutAsCommitted(put, ts, ts); - } - indexMutations.add(put); - } - } - result.setKeyValues(results); + writeToTable.newKey(ptr, values); + if ( + Bytes.compareTo(firstKV.getRowArray(), firstKV.getRowOffset() + offset, + firstKV.getRowLength(), ptr.get(), ptr.getOffset() + offset, ptr.getLength()) == 0 + ) { + return false; } - void deleteRow(List results, UngroupedAggregateRegionObserver.MutationList mutations) { - Cell firstKV = results.get(0); - Delete delete = new Delete(firstKV.getRowArray(), - firstKV.getRowOffset(), firstKV.getRowLength(),ts); - if (replayMutations != null) { - delete.setAttribute(REPLAY_WRITES, replayMutations); - } - byte[] sourceOperationBytes = - scan.getAttribute(SOURCE_OPERATION_ATTRIB); - if (sourceOperationBytes != null) { - delete.setAttribute(SOURCE_OPERATION_ATTRIB, sourceOperationBytes); + byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr); + if (offset > 0) { // for local indexes (prepend region start key) + byte[] newRowWithOffset = new byte[offset + newRow.length]; + System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset); + System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length); + newRow = newRowWithOffset; + } + byte[] oldRow = + Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength()); + for (Cell cell : results) { + // Copy existing cell but with new row key + Cell newCell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(newRow) + .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) + .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + .setTimestamp(cell.getTimestamp()).setType(cell.getType()) + .setValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()).build(); + switch (cell.getType()) { + case Put: + // If Put, point delete old Put + Delete del = new Delete(oldRow); + Cell newDelCell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(newRow) + .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) + .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + .setTimestamp(cell.getTimestamp()).setType(Cell.Type.Delete) + .setValue(ByteUtil.EMPTY_BYTE_ARRAY, 0, 0).build(); + del.add(newDelCell); + mutations.add(del); + + Put put = new Put(newRow); + put.add(newCell); + mutations.add(put); + break; + case Delete: + case DeleteColumn: + case DeleteFamily: + case DeleteFamilyVersion: + Delete delete = new Delete(newRow); + delete.add(newCell); + mutations.add(delete); + break; + } + } + return true; + } + + void buildLocalIndex(Tuple result, List results, ImmutableBytesWritable ptr) + throws IOException { + for (IndexMaintainer maintainer : indexMaintainers) { + if (!results.isEmpty()) { + result.getKey(ptr); + ValueGetter valueGetter = maintainer + .createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), results); + Put put = maintainer.buildUpdateMutation(GenericKeyValueBuilder.INSTANCE, valueGetter, ptr, + results.get(0).getTimestamp(), env.getRegion().getRegionInfo().getStartKey(), + env.getRegion().getRegionInfo().getEndKey(), false); + + if (txnProvider != null) { + put = txnProvider.markPutAsCommitted(put, ts, ts); } - - mutations.add(delete); + indexMutations.add(put); + } + } + result.setKeyValues(results); + } + + void deleteRow(List results, UngroupedAggregateRegionObserver.MutationList mutations) { + Cell firstKV = results.get(0); + Delete delete = + new Delete(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), ts); + if (replayMutations != null) { + delete.setAttribute(REPLAY_WRITES, replayMutations); + } + byte[] sourceOperationBytes = scan.getAttribute(SOURCE_OPERATION_ATTRIB); + if (sourceOperationBytes != null) { + delete.setAttribute(SOURCE_OPERATION_ATTRIB, sourceOperationBytes); } - void deleteCForQ(Tuple result, List results, UngroupedAggregateRegionObserver.MutationList mutations) { - // No need to search for delete column, since we project only it - // if no empty key value is being set - if (emptyCF == null || - result.getValue(deleteCF, deleteCQ) != null) { - Delete delete = new Delete(results.get(0).getRowArray(), - results.get(0).getRowOffset(), - results.get(0).getRowLength()); - delete.addColumns(deleteCF, deleteCQ, ts); - // TODO: We need to set SOURCE_OPERATION_ATTRIB here also. The control will come here if - // TODO: we drop a column. We also delete metadata from SYSCAT table for the dropped column - // TODO: and delete the column. In short, we need to set this attribute for the DM for SYSCAT metadata - // TODO: and for data table rows. - mutations.add(delete); - } + mutations.add(delete); + } + + void deleteCForQ(Tuple result, List results, + UngroupedAggregateRegionObserver.MutationList mutations) { + // No need to search for delete column, since we project only it + // if no empty key value is being set + if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) { + Delete delete = new Delete(results.get(0).getRowArray(), results.get(0).getRowOffset(), + results.get(0).getRowLength()); + delete.addColumns(deleteCF, deleteCQ, ts); + // TODO: We need to set SOURCE_OPERATION_ATTRIB here also. The control will come here if + // TODO: we drop a column. We also delete metadata from SYSCAT table for the dropped column + // TODO: and delete the column. In short, we need to set this attribute for the DM for SYSCAT + // metadata + // TODO: and for data table rows. + mutations.add(delete); } - void upsert(Tuple result, ImmutableBytesWritable ptr, UngroupedAggregateRegionObserver.MutationList mutations) { - Arrays.fill(values, null); - int bucketNumOffset = 0; - if (projectedTable.getBucketNum() != null) { - values[0] = new byte[] { 0 }; - bucketNumOffset = 1; - } - int i = bucketNumOffset; - List projectedColumns = projectedTable.getColumns(); - for (; i < projectedTable.getPKColumns().size(); i++) { - Expression expression = selectExpressions.get(i - bucketNumOffset); - if (expression.evaluate(result, ptr)) { - values[i] = ptr.copyBytes(); - // If SortOrder from expression in SELECT doesn't match the - // column being projected into then invert the bits. - if (expression.getSortOrder() != - projectedColumns.get(i).getSortOrder()) { - SortOrder.invert(values[i], 0, values[i], 0, - values[i].length); - } - } else { - values[i] = ByteUtil.EMPTY_BYTE_ARRAY; - } - } - projectedTable.newKey(ptr, values); - PRow row = projectedTable.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false); - for (; i < projectedColumns.size(); i++) { - Expression expression = selectExpressions.get(i - bucketNumOffset); - if (expression.evaluate(result, ptr)) { - PColumn column = projectedColumns.get(i); - if (!column.getDataType().isSizeCompatible(ptr, null, - expression.getDataType(), expression.getSortOrder(), - expression.getMaxLength(), expression.getScale(), - column.getMaxLength(), column.getScale())) { - throw new DataExceedsCapacityException( - column.getDataType(), - column.getMaxLength(), - column.getScale(), - column.getName().getString()); - } - column.getDataType().coerceBytes(ptr, null, - expression.getDataType(), expression.getMaxLength(), - expression.getScale(), expression.getSortOrder(), - column.getMaxLength(), column.getScale(), - column.getSortOrder(), projectedTable.rowKeyOrderOptimizable()); - byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); - row.setValue(column, bytes); - } - } - for (Mutation mutation : row.toRowMutations()) { - if (replayMutations != null) { - mutation.setAttribute(REPLAY_WRITES, replayMutations); - } else if (txnProvider != null && projectedTable.getType() == PTableType.INDEX) { - mutation = txnProvider.markPutAsCommitted((Put)mutation, ts, ts); - } - mutations.add(mutation); - } - for (i = 0; i < selectExpressions.size(); i++) { - selectExpressions.get(i).reset(); + } + + void upsert(Tuple result, ImmutableBytesWritable ptr, + UngroupedAggregateRegionObserver.MutationList mutations) { + Arrays.fill(values, null); + int bucketNumOffset = 0; + if (projectedTable.getBucketNum() != null) { + values[0] = new byte[] { 0 }; + bucketNumOffset = 1; + } + int i = bucketNumOffset; + List projectedColumns = projectedTable.getColumns(); + for (; i < projectedTable.getPKColumns().size(); i++) { + Expression expression = selectExpressions.get(i - bucketNumOffset); + if (expression.evaluate(result, ptr)) { + values[i] = ptr.copyBytes(); + // If SortOrder from expression in SELECT doesn't match the + // column being projected into then invert the bits. + if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) { + SortOrder.invert(values[i], 0, values[i], 0, values[i].length); } + } else { + values[i] = ByteUtil.EMPTY_BYTE_ARRAY; + } } - - void insertEmptyKeyValue(List results, UngroupedAggregateRegionObserver.MutationList mutations) { - Set timeStamps = - Sets.newHashSetWithExpectedSize(results.size()); - for (Cell kv : results) { - long kvts = kv.getTimestamp(); - if (!timeStamps.contains(kvts)) { - Put put = new Put(kv.getRowArray(), kv.getRowOffset(), - kv.getRowLength()); - // The value is not dependent on encoding ("x") - put.addColumn(emptyCF, emptyCQ, kvts, - QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - mutations.add(put); - timeStamps.add(kvts); - } + projectedTable.newKey(ptr, values); + PRow row = projectedTable.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false); + for (; i < projectedColumns.size(); i++) { + Expression expression = selectExpressions.get(i - bucketNumOffset); + if (expression.evaluate(result, ptr)) { + PColumn column = projectedColumns.get(i); + if ( + !column.getDataType().isSizeCompatible(ptr, null, expression.getDataType(), + expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), + column.getMaxLength(), column.getScale()) + ) { + throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), + column.getScale(), column.getName().getString()); } + column.getDataType().coerceBytes(ptr, null, expression.getDataType(), + expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), + column.getMaxLength(), column.getScale(), column.getSortOrder(), + projectedTable.rowKeyOrderOptimizable()); + byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); + row.setValue(column, bytes); + } } - - @Override - public boolean nextRaw(List results, ScannerContext scannerContext) - throws IOException { - return next(results, scannerContext); + for (Mutation mutation : row.toRowMutations()) { + if (replayMutations != null) { + mutation.setAttribute(REPLAY_WRITES, replayMutations); + } else if (txnProvider != null && projectedTable.getType() == PTableType.INDEX) { + mutation = txnProvider.markPutAsCommitted((Put) mutation, ts, ts); + } + mutations.add(mutation); } - - @Override - public boolean next(List resultsToReturn) throws IOException { - return next(resultsToReturn, null); + for (i = 0; i < selectExpressions.size(); i++) { + selectExpressions.get(i).reset(); } - - @Override - public boolean next(List resultsToReturn, ScannerContext scannerContext) - throws IOException { - boolean hasMore; - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - Configuration conf = env.getConfiguration(); - final TenantCache tenantCache = GlobalCache.getTenantCache(env, ScanUtil.getTenantId(scan)); - try (MemoryManager.MemoryChunk em = tenantCache.getMemoryManager().allocate(0)) { - Aggregators aggregators = ServerAggregators.deserialize( - scan.getAttribute(BaseScannerRegionObserverConstants.AGGREGATORS), conf, em); - Aggregator[] rowAggregators = aggregators.getAggregators(); - aggregators.reset(rowAggregators); - Cell lastCell = null; - boolean hasAny = false; - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); - UngroupedAggregateRegionObserver.MutationList mutations = new UngroupedAggregateRegionObserver.MutationList(); - if (isDescRowKeyOrderUpgrade || isDelete || isUpsert - || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) { - mutations = new UngroupedAggregateRegionObserver.MutationList(Ints.saturatedCast(maxBatchSize + maxBatchSize / 10)); - } - region.startRegionOperation(); - try { - synchronized (innerScanner) { - do { - ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); - List results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList(); - // Results are potentially returned even when the return value of s.next is false - // since this is an indication of whether or not there are more values after the - // ones returned - hasMore = (scannerContext == null) - ? innerScanner.nextRaw(results) - : innerScanner.nextRaw(results, scannerContext); - if (isDummy(results)) { - if (!hasAny) { - resultsToReturn.addAll(results); - return true; - } - break; - } - if (!results.isEmpty()) { - lastCell = results.get(0); - result.setKeyValues(results); - if (isDescRowKeyOrderUpgrade) { - if (!descRowKeyOrderUpgrade(results, ptr, mutations)) { - continue; - } - } else if (buildLocalIndex) { - buildLocalIndex(result, results, ptr); - } else if (isDelete) { - deleteRow(results, mutations); - } else if (isUpsert) { - upsert(result, ptr, mutations); - } else if (deleteCF != null && deleteCQ != null) { - deleteCForQ(result, results, mutations); - } - if (emptyCF != null) { - /* - * If we've specified an emptyCF, then we need to insert an empty - * key value "retroactively" for any key value that is visible at - * the timestamp that the DDL was issued. Key values that are not - * visible at this timestamp will not ever be projected up to - * scans past this timestamp, so don't need to be considered. - * We insert one empty key value per row per timestamp. - */ - insertEmptyKeyValue(results, mutations); - } - if (ServerUtil.readyToCommit(mutations.size(), mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) { - annotateAndCommit(mutations); - } - // Commit in batches based on UPSERT_BATCH_SIZE_BYTES_ATTRIB in config - - if (ServerUtil.readyToCommit(indexMutations.size(), indexMutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) { - setIndexAndTransactionProperties(indexMutations, indexUUID, indexMaintainersPtr, txState, clientVersionBytes, useIndexProto); - ungroupedAggregateRegionObserver.commitBatch(region, indexMutations, blockingMemStoreSize); - indexMutations.clear(); - } - aggregators.aggregate(rowAggregators, result); - hasAny = true; - } - } while (hasMore && (EnvironmentEdgeManager.currentTimeMillis() - startTime) < pageSizeMs); - if (!mutations.isEmpty()) { - annotateAndCommit(mutations); - } - if (!indexMutations.isEmpty()) { - ungroupedAggregateRegionObserver.commitBatch(region, indexMutations, blockingMemStoreSize); - indexMutations.clear(); - } - } - } catch (InsufficientMemoryException e) { - throw new DoNotRetryIOException(e); - } catch (DataExceedsCapacityException e) { - throw new DoNotRetryIOException(e.getMessage(), e); - } catch (Throwable e) { - LOGGER.error("Exception in UngroupedAggregateRegionScanner for region " - + region.getRegionInfo().getRegionNameAsString(), e); - throw e; - } finally { - region.closeRegionOperation(); + } + + void insertEmptyKeyValue(List results, + UngroupedAggregateRegionObserver.MutationList mutations) { + Set timeStamps = Sets.newHashSetWithExpectedSize(results.size()); + for (Cell kv : results) { + long kvts = kv.getTimestamp(); + if (!timeStamps.contains(kvts)) { + Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); + // The value is not dependent on encoding ("x") + put.addColumn(emptyCF, emptyCQ, kvts, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + mutations.add(put); + timeStamps.add(kvts); + } + } + } + + @Override + public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { + return next(results, scannerContext); + } + + @Override + public boolean next(List resultsToReturn) throws IOException { + return next(resultsToReturn, null); + } + + @Override + public boolean next(List resultsToReturn, ScannerContext scannerContext) + throws IOException { + boolean hasMore; + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + Configuration conf = env.getConfiguration(); + final TenantCache tenantCache = GlobalCache.getTenantCache(env, ScanUtil.getTenantId(scan)); + try (MemoryManager.MemoryChunk em = tenantCache.getMemoryManager().allocate(0)) { + Aggregators aggregators = ServerAggregators + .deserialize(scan.getAttribute(BaseScannerRegionObserverConstants.AGGREGATORS), conf, em); + Aggregator[] rowAggregators = aggregators.getAggregators(); + aggregators.reset(rowAggregators); + Cell lastCell = null; + boolean hasAny = false; + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + Tuple result = + useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); + UngroupedAggregateRegionObserver.MutationList mutations = + new UngroupedAggregateRegionObserver.MutationList(); + if ( + isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) + || emptyCF != null || buildLocalIndex + ) { + mutations = new UngroupedAggregateRegionObserver.MutationList( + Ints.saturatedCast(maxBatchSize + maxBatchSize / 10)); + } + region.startRegionOperation(); + try { + synchronized (innerScanner) { + do { + ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting(); + List results = useQualifierAsIndex + ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), + minMaxQualifiers.getSecond(), encodingScheme) + : new ArrayList(); + // Results are potentially returned even when the return value of s.next is false + // since this is an indication of whether or not there are more values after the + // ones returned + hasMore = (scannerContext == null) + ? innerScanner.nextRaw(results) + : innerScanner.nextRaw(results, scannerContext); + if (isDummy(results)) { + if (!hasAny) { + resultsToReturn.addAll(results); + return true; + } + break; } - Cell keyValue; - if (hasAny) { - byte[] value = aggregators.toBytes(rowAggregators); - if (pageSizeMs == Long.MAX_VALUE) { - byte[] rowKey; - final boolean isIncompatibleClient = - ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); - if (!isIncompatibleClient) { - rowKey = CellUtil.cloneRow(lastCell); - } else { - // Paging is not set. To be compatible with older clients, do not set the row key - rowKey = UNGROUPED_AGG_ROW_KEY; - } - keyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, SINGLE_COLUMN_FAMILY, - SINGLE_COLUMN, - AGG_TIMESTAMP, value, 0, value.length); - } else { - keyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - AGG_TIMESTAMP, value, 0, value.length); + if (!results.isEmpty()) { + lastCell = results.get(0); + result.setKeyValues(results); + if (isDescRowKeyOrderUpgrade) { + if (!descRowKeyOrderUpgrade(results, ptr, mutations)) { + continue; } - resultsToReturn.add(keyValue); + } else if (buildLocalIndex) { + buildLocalIndex(result, results, ptr); + } else if (isDelete) { + deleteRow(results, mutations); + } else if (isUpsert) { + upsert(result, ptr, mutations); + } else if (deleteCF != null && deleteCQ != null) { + deleteCForQ(result, results, mutations); + } + if (emptyCF != null) { + /* + * If we've specified an emptyCF, then we need to insert an empty key value + * "retroactively" for any key value that is visible at the timestamp that the DDL + * was issued. Key values that are not visible at this timestamp will not ever be + * projected up to scans past this timestamp, so don't need to be considered. We + * insert one empty key value per row per timestamp. + */ + insertEmptyKeyValue(results, mutations); + } + if ( + ServerUtil.readyToCommit(mutations.size(), mutations.byteSize(), maxBatchSize, + maxBatchSizeBytes) + ) { + annotateAndCommit(mutations); + } + // Commit in batches based on UPSERT_BATCH_SIZE_BYTES_ATTRIB in config + + if ( + ServerUtil.readyToCommit(indexMutations.size(), indexMutations.byteSize(), + maxBatchSize, maxBatchSizeBytes) + ) { + setIndexAndTransactionProperties(indexMutations, indexUUID, indexMaintainersPtr, + txState, clientVersionBytes, useIndexProto); + ungroupedAggregateRegionObserver.commitBatch(region, indexMutations, + blockingMemStoreSize); + indexMutations.clear(); + } + aggregators.aggregate(rowAggregators, result); + hasAny = true; } - return hasMore; + } while ( + hasMore && (EnvironmentEdgeManager.currentTimeMillis() - startTime) < pageSizeMs + ); + if (!mutations.isEmpty()) { + annotateAndCommit(mutations); + } + if (!indexMutations.isEmpty()) { + ungroupedAggregateRegionObserver.commitBatch(region, indexMutations, + blockingMemStoreSize); + indexMutations.clear(); + } } - } - - private void annotateAndCommit(UngroupedAggregateRegionObserver.MutationList mutations) throws IOException { - annotateDataMutations(mutations, scan); - if (isDelete || isUpsert) { - annotateDataMutationsWithExternalSchemaId(mutations, scan); + } catch (InsufficientMemoryException e) { + throw new DoNotRetryIOException(e); + } catch (DataExceedsCapacityException e) { + throw new DoNotRetryIOException(e.getMessage(), e); + } catch (Throwable e) { + LOGGER.error("Exception in UngroupedAggregateRegionScanner for region " + + region.getRegionInfo().getRegionNameAsString(), e); + throw e; + } finally { + region.closeRegionOperation(); + } + Cell keyValue; + if (hasAny) { + byte[] value = aggregators.toBytes(rowAggregators); + if (pageSizeMs == Long.MAX_VALUE) { + byte[] rowKey; + final boolean isIncompatibleClient = + ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); + if (!isIncompatibleClient) { + rowKey = CellUtil.cloneRow(lastCell); + } else { + // Paging is not set. To be compatible with older clients, do not set the row key + rowKey = UNGROUPED_AGG_ROW_KEY; + } + keyValue = PhoenixKeyValueUtil.newKeyValue(rowKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, + AGG_TIMESTAMP, value, 0, value.length); + } else { + keyValue = PhoenixKeyValueUtil.newKeyValue(CellUtil.cloneRow(lastCell), + SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); } - ungroupedAggregateRegionObserver.commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState, - targetHTable, useIndexProto, isPKChanging, clientVersionBytes); - mutations.clear(); + resultsToReturn.add(keyValue); + } + return hasMore; } + } - @Override - public long getMaxResultSize() { - return scan.getMaxResultSize(); + private void annotateAndCommit(UngroupedAggregateRegionObserver.MutationList mutations) + throws IOException { + annotateDataMutations(mutations, scan); + if (isDelete || isUpsert) { + annotateDataMutationsWithExternalSchemaId(mutations, scan); } - - private void annotateDataMutations(UngroupedAggregateRegionObserver.MutationList mutationsList, - Scan scan) { - - byte[] tenantId = - scan.getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()); - byte[] schemaName = - scan.getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()); - byte[] logicalTableName = - scan.getAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()); - byte[] tableType = - scan.getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()); - byte[] ddlTimestamp = - scan.getAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString()); - - for (Mutation m : mutationsList) { - annotateMutation(m, tenantId, schemaName, logicalTableName, tableType, ddlTimestamp); - } + ungroupedAggregateRegionObserver.commit(region, mutations, indexUUID, blockingMemStoreSize, + indexMaintainersPtr, txState, targetHTable, useIndexProto, isPKChanging, clientVersionBytes); + mutations.clear(); + } + + @Override + public long getMaxResultSize() { + return scan.getMaxResultSize(); + } + + private void annotateDataMutations(UngroupedAggregateRegionObserver.MutationList mutationsList, + Scan scan) { + + byte[] tenantId = scan.getAttribute(MutationState.MutationMetadataType.TENANT_ID.toString()); + byte[] schemaName = + scan.getAttribute(MutationState.MutationMetadataType.SCHEMA_NAME.toString()); + byte[] logicalTableName = + scan.getAttribute(MutationState.MutationMetadataType.LOGICAL_TABLE_NAME.toString()); + byte[] tableType = scan.getAttribute(MutationState.MutationMetadataType.TABLE_TYPE.toString()); + byte[] ddlTimestamp = + scan.getAttribute(MutationState.MutationMetadataType.TIMESTAMP.toString()); + + for (Mutation m : mutationsList) { + annotateMutation(m, tenantId, schemaName, logicalTableName, tableType, ddlTimestamp); } - - private void annotateDataMutationsWithExternalSchemaId( - UngroupedAggregateRegionObserver.MutationList mutationsList, - Scan scan) { - byte[] externalSchemaRegistryId = scan.getAttribute( - MutationState.MutationMetadataType.EXTERNAL_SCHEMA_ID.toString()); - for (Mutation m : mutationsList) { - annotateMutation(m, externalSchemaRegistryId); - } + } + + private void annotateDataMutationsWithExternalSchemaId( + UngroupedAggregateRegionObserver.MutationList mutationsList, Scan scan) { + byte[] externalSchemaRegistryId = + scan.getAttribute(MutationState.MutationMetadataType.EXTERNAL_SCHEMA_ID.toString()); + for (Mutation m : mutationsList) { + annotateMutation(m, externalSchemaRegistryId); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/VerifyLastDDLTimestamp.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/VerifyLastDDLTimestamp.java index 4c5bf09704a..d55a44caf4e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/VerifyLastDDLTimestamp.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/VerifyLastDDLTimestamp.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,43 +27,41 @@ import org.slf4j.LoggerFactory; /** - * Client provides last DDL timestamp of tables/views/indexes included in read/write operation - * This verifies that client has the latest version of LAST_DDL_TIMESTAMP version. - * If client's provided LAST_DDL_TIMESTAMP is less than what is present in SYSTEM.CATALOG - * then it throws StaleMetadataCacheException. + * Client provides last DDL timestamp of tables/views/indexes included in read/write operation This + * verifies that client has the latest version of LAST_DDL_TIMESTAMP version. If client's provided + * LAST_DDL_TIMESTAMP is less than what is present in SYSTEM.CATALOG then it throws + * StaleMetadataCacheException. */ public class VerifyLastDDLTimestamp { - private static final Logger LOGGER = LoggerFactory.getLogger(VerifyLastDDLTimestamp.class); + private static final Logger LOGGER = LoggerFactory.getLogger(VerifyLastDDLTimestamp.class); - private VerifyLastDDLTimestamp() { - // Not to be instantiated. - } + private VerifyLastDDLTimestamp() { + // Not to be instantiated. + } - /** - * Verify that LAST_DDL_TIMESTAMP provided by the client is up to date. If it is stale it will - * throw StaleMetadataCacheException. - * - * @param tenantID tenant id - * @param schemaName schema name - * @param tableName table name - * @param clientLastDDLTimestamp last ddl timestamp provided by client - * @param cache ServerMetadataCache - * @throws SQLException StaleMetadataCacheException if client provided timestamp - * is stale. - */ - public static void verifyLastDDLTimestamp(ServerMetadataCache cache, byte[] tenantID, - byte[] schemaName, byte[] tableName, long clientLastDDLTimestamp) - throws SQLException { - long lastDDLTimestamp = cache.getLastDDLTimestampForTable(tenantID, schemaName, tableName); - // Is it possible to have client last ddl timestamp greater than server side? - if (clientLastDDLTimestamp < lastDDLTimestamp) { - LOGGER.error("Stale metadata for LAST_DDL_TIMESTAMP for tenantID: {}, schema: {}," - + " table: {}, client provided timestamp: {}, server timestamp: {}", - Bytes.toString(tenantID), Bytes.toString(schemaName), - Bytes.toString(tableName), clientLastDDLTimestamp, lastDDLTimestamp); - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - throw new StaleMetadataCacheException("Stale metadata cache for table name: " - + fullTableName); - } + /** + * Verify that LAST_DDL_TIMESTAMP provided by the client is up to date. If it is stale it will + * throw StaleMetadataCacheException. + * @param tenantID tenant id + * @param schemaName schema name + * @param tableName table name + * @param clientLastDDLTimestamp last ddl timestamp provided by client + * @param cache ServerMetadataCache + * @throws SQLException StaleMetadataCacheException if client provided timestamp is stale. + */ + public static void verifyLastDDLTimestamp(ServerMetadataCache cache, byte[] tenantID, + byte[] schemaName, byte[] tableName, long clientLastDDLTimestamp) throws SQLException { + long lastDDLTimestamp = cache.getLastDDLTimestampForTable(tenantID, schemaName, tableName); + // Is it possible to have client last ddl timestamp greater than server side? + if (clientLastDDLTimestamp < lastDDLTimestamp) { + LOGGER.error( + "Stale metadata for LAST_DDL_TIMESTAMP for tenantID: {}, schema: {}," + + " table: {}, client provided timestamp: {}, server timestamp: {}", + Bytes.toString(tenantID), Bytes.toString(schemaName), Bytes.toString(tableName), + clientLastDDLTimestamp, lastDDLTimestamp); + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + throw new StaleMetadataCacheException( + "Stale metadata cache for table name: " + fullTableName); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/BaseTask.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/BaseTask.java index b0a24aa6e1f..55615b3ad28 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/BaseTask.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/BaseTask.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,13 +22,16 @@ import org.apache.phoenix.schema.task.Task; public abstract class BaseTask { - protected long timeMaxInterval; - protected RegionCoprocessorEnvironment env; - public void init(RegionCoprocessorEnvironment env, Long interval) { - this.env = env; - this.timeMaxInterval = interval; - } - public abstract TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord); + protected long timeMaxInterval; + protected RegionCoprocessorEnvironment env; - public abstract TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord) throws Exception; + public void init(RegionCoprocessorEnvironment env, Long interval) { + this.env = env; + this.timeMaxInterval = interval; + } + + public abstract TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord); + + public abstract TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord) + throws Exception; } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java index 8d619e2bb2e..a54052f11d3 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,91 +17,90 @@ */ package org.apache.phoenix.coprocessor.tasks; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; + +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Properties; + import org.apache.phoenix.coprocessor.TaskRegionObserver; import org.apache.phoenix.coprocessorclient.MetaDataProtocol; -import org.apache.phoenix.util.EnvironmentEdgeManager; -import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.util.ServerViewUtil; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.schema.MetaDataClient; import org.apache.phoenix.schema.task.Task; +import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.QueryUtil; +import org.apache.phoenix.util.SchemaUtil; +import org.apache.phoenix.util.ServerViewUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.Properties; - -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; - /** * Task runs periodically to clean up task of child views whose parent is dropped - * */ public class DropChildViewsTask extends BaseTask { - public static final Logger LOGGER = LoggerFactory.getLogger(DropChildViewsTask.class); + public static final Logger LOGGER = LoggerFactory.getLogger(DropChildViewsTask.class); - public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) { - PhoenixConnection pconn = null; - Timestamp timestamp = taskRecord.getTimeStamp(); - try { - String tenantId = taskRecord.getTenantId(); - if (tenantId != null) { - Properties tenantProps = new Properties(); - tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - pconn = QueryUtil.getConnectionOnServer(tenantProps, env.getConfiguration()) - .unwrap(PhoenixConnection.class); - } - else { - pconn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class); - } + public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) { + PhoenixConnection pconn = null; + Timestamp timestamp = taskRecord.getTimeStamp(); + try { + String tenantId = taskRecord.getTenantId(); + if (tenantId != null) { + Properties tenantProps = new Properties(); + tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + pconn = QueryUtil.getConnectionOnServer(tenantProps, env.getConfiguration()) + .unwrap(PhoenixConnection.class); + } else { + pconn = + QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class); + } - MetaDataProtocol.MetaDataMutationResult result = new MetaDataClient(pconn).updateCache(pconn.getTenantId(), - taskRecord.getSchemaName(), taskRecord.getTableName(), true); - if (result.getMutationCode() != MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS) { - ServerViewUtil.dropChildViews(env, taskRecord.getTenantIdBytes(), - taskRecord.getSchemaNameBytes(), taskRecord.getTableNameBytes(), - SchemaUtil.getPhysicalTableName( - SYSTEM_CHILD_LINK_NAME_BYTES, - env.getConfiguration()).getName()); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, ""); - } else if (EnvironmentEdgeManager.currentTimeMillis() < timeMaxInterval + timestamp.getTime()) { - // skip this task as it has not been expired and its parent table has not been dropped yet - LOGGER.info("Skipping a child view drop task. " + - "The parent table has not been dropped yet : " + - taskRecord.getSchemaName() + "." + taskRecord.getTableName() + - " with tenant id " + (tenantId == null ? " IS NULL" : tenantId) + - " and timestamp " + timestamp.toString()); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, ""); - } - else { - LOGGER.warn(" A drop child view task has expired and will be marked as failed : " + - taskRecord.getSchemaName() + "." + taskRecord.getTableName() + - " with tenant id " + (tenantId == null ? " IS NULL" : tenantId) + - " and timestamp " + timestamp.toString()); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, "Expired"); - } + MetaDataProtocol.MetaDataMutationResult result = new MetaDataClient(pconn).updateCache( + pconn.getTenantId(), taskRecord.getSchemaName(), taskRecord.getTableName(), true); + if (result.getMutationCode() != MetaDataProtocol.MutationCode.TABLE_ALREADY_EXISTS) { + ServerViewUtil.dropChildViews(env, taskRecord.getTenantIdBytes(), + taskRecord.getSchemaNameBytes(), taskRecord.getTableNameBytes(), SchemaUtil + .getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, env.getConfiguration()).getName()); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, ""); + } else + if (EnvironmentEdgeManager.currentTimeMillis() < timeMaxInterval + timestamp.getTime()) { + // skip this task as it has not been expired and its parent table has not been dropped yet + LOGGER.info( + "Skipping a child view drop task. " + "The parent table has not been dropped yet : " + + taskRecord.getSchemaName() + "." + taskRecord.getTableName() + " with tenant id " + + (tenantId == null ? " IS NULL" : tenantId) + " and timestamp " + + timestamp.toString()); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, ""); + } else { + LOGGER.warn(" A drop child view task has expired and will be marked as failed : " + + taskRecord.getSchemaName() + "." + taskRecord.getTableName() + " with tenant id " + + (tenantId == null ? " IS NULL" : tenantId) + " and timestamp " + + timestamp.toString()); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, + "Expired"); } - catch (Throwable t) { - LOGGER.error("Exception while dropping a child view task. " + - taskRecord.getSchemaName() + "." + taskRecord.getTableName() + - " with tenant id " + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) + - " and timestamp " + timestamp.toString(), t); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, t.toString()); - } finally { - if (pconn != null) { - try { - pconn.close(); - } catch (SQLException ignored) { - LOGGER.debug("DropChildViewsTask can't close connection", ignored); - } - } + } catch (Throwable t) { + LOGGER.error("Exception while dropping a child view task. " + taskRecord.getSchemaName() + "." + + taskRecord.getTableName() + " with tenant id " + + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) + + " and timestamp " + timestamp.toString(), t); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, + t.toString()); + } finally { + if (pconn != null) { + try { + pconn.close(); + } catch (SQLException ignored) { + LOGGER.debug("DropChildViewsTask can't close connection", ignored); } + } } + } - public TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord) throws Exception { - return null; - } + public TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord) + throws Exception { + return null; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java index 7989760cb30..6a0a8a176cd 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,175 +17,169 @@ */ package org.apache.phoenix.coprocessor.tasks; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.node.ObjectNode; -import org.apache.phoenix.coprocessorclient.tasks.IndexRebuildTaskConstants; -import org.apache.phoenix.schema.task.ServerTask; -import org.apache.phoenix.schema.task.SystemTaskParams; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.mapreduce.Cluster; import org.apache.hadoop.mapreduce.Job; import org.apache.phoenix.coprocessor.TaskRegionObserver; +import org.apache.phoenix.coprocessorclient.tasks.IndexRebuildTaskConstants; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.mapreduce.index.IndexTool; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.task.ServerTask; +import org.apache.phoenix.schema.task.SystemTaskParams; import org.apache.phoenix.schema.task.Task; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.phoenix.util.JacksonUtil; import org.apache.phoenix.util.QueryUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.Map; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ObjectNode; /** * Task runs periodically to rebuild indexes for System.Task entries. - * */ -public class IndexRebuildTask extends BaseTask { - public static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildTask.class); - - @Override - public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) { - Connection conn = null; +public class IndexRebuildTask extends BaseTask { + public static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildTask.class); + + @Override + public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) { + Connection conn = null; + + try { + // We have to clone the configuration because env.getConfiguration is readonly. + Configuration conf = HBaseConfiguration.create(env.getConfiguration()); + conn = QueryUtil.getConnectionOnServer(env.getConfiguration()); + + conf.set(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString()); + + String data = taskRecord.getData(); + if (Strings.isNullOrEmpty(taskRecord.getData())) { + data = "{}"; + } + JsonNode jsonNode = JacksonUtil.getObjectReader(JsonNode.class).readValue(data); + String indexName = getIndexName(jsonNode); + + if (Strings.isNullOrEmpty(indexName)) { + String str = "Index name is not found. Index rebuild cannot continue " + "Data : " + data; + LOGGER.warn(str); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, str); + } + + boolean shouldDisable = false; + if (jsonNode.has(IndexRebuildTaskConstants.DISABLE_BEFORE)) { + String disableBefore = jsonNode.get(IndexRebuildTaskConstants.DISABLE_BEFORE).toString(); + if (!Strings.isNullOrEmpty(disableBefore)) { + shouldDisable = Boolean.valueOf(disableBefore); + } + } - try { - // We have to clone the configuration because env.getConfiguration is readonly. - Configuration conf = HBaseConfiguration.create(env.getConfiguration()); - conn = QueryUtil.getConnectionOnServer(env.getConfiguration()); - - conf.set(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString()); - - String data = taskRecord.getData(); - if (Strings.isNullOrEmpty(taskRecord.getData())) { - data = "{}"; - } - JsonNode jsonNode = JacksonUtil.getObjectReader(JsonNode.class).readValue(data); - String indexName = getIndexName(jsonNode); - - if (Strings.isNullOrEmpty(indexName)) { - String str = "Index name is not found. Index rebuild cannot continue " + - "Data : " + data; - LOGGER.warn(str); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, str); - } - - boolean shouldDisable = false; - if (jsonNode.has(IndexRebuildTaskConstants.DISABLE_BEFORE)) { - String disableBefore = jsonNode.get(IndexRebuildTaskConstants.DISABLE_BEFORE).toString(); - if (!Strings.isNullOrEmpty(disableBefore)) { - shouldDisable = Boolean.valueOf(disableBefore); - } - } - - boolean rebuildAll = false; - if (jsonNode.has(IndexRebuildTaskConstants.REBUILD_ALL)) { - String rebuildAllStr = jsonNode.get(IndexRebuildTaskConstants.REBUILD_ALL).toString(); - if (!Strings.isNullOrEmpty(rebuildAllStr)) { - rebuildAll = Boolean.valueOf(rebuildAllStr); - } - } - - // Run index tool async. - boolean runForeground = false; - Map.Entry indexToolRes = IndexTool - .run(conf, taskRecord.getSchemaName(), taskRecord.getTableName(), indexName, - false, taskRecord.getTenantId(), shouldDisable, rebuildAll, runForeground); - int status = indexToolRes.getKey(); - if (status != 0) { - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, "Index tool returned : " + status); - } - - Job job = indexToolRes.getValue(); - - ((ObjectNode) jsonNode).put(IndexRebuildTaskConstants.JOB_ID, job.getJobID().toString()); - ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder() - .setConn(conn.unwrap(PhoenixConnection.class)) - .setTaskType(taskRecord.getTaskType()) - .setTenantId(taskRecord.getTenantId()) - .setSchemaName(taskRecord.getSchemaName()) - .setTableName(taskRecord.getTableName()) - .setTaskStatus(PTable.TaskStatus.STARTED.toString()) - .setData(jsonNode.toString()) - .setPriority(taskRecord.getPriority()) - .setStartTs(taskRecord.getTimeStamp()) - .setEndTs(null) - .setAccessCheckEnabled(true) - .build()); - // It will take some time to finish, so we will check the status in a separate task. - return null; + boolean rebuildAll = false; + if (jsonNode.has(IndexRebuildTaskConstants.REBUILD_ALL)) { + String rebuildAllStr = jsonNode.get(IndexRebuildTaskConstants.REBUILD_ALL).toString(); + if (!Strings.isNullOrEmpty(rebuildAllStr)) { + rebuildAll = Boolean.valueOf(rebuildAllStr); } - catch (Throwable t) { - LOGGER.warn("Exception while running index rebuild task. " + - "It will be retried in the next system task table scan : " + - taskRecord.getSchemaName() + "." + taskRecord.getTableName() + - " with tenant id " + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) + - " and data " + taskRecord.getData(), t); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, t.toString()); - } finally { - if (conn != null) { - try { - conn.close(); - } catch (SQLException e) { - LOGGER.debug("IndexRebuildTask can't close connection"); - } - } + } + + // Run index tool async. + boolean runForeground = false; + Map.Entry indexToolRes = + IndexTool.run(conf, taskRecord.getSchemaName(), taskRecord.getTableName(), indexName, false, + taskRecord.getTenantId(), shouldDisable, rebuildAll, runForeground); + int status = indexToolRes.getKey(); + if (status != 0) { + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, + "Index tool returned : " + status); + } + + Job job = indexToolRes.getValue(); + + ((ObjectNode) jsonNode).put(IndexRebuildTaskConstants.JOB_ID, job.getJobID().toString()); + ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder() + .setConn(conn.unwrap(PhoenixConnection.class)).setTaskType(taskRecord.getTaskType()) + .setTenantId(taskRecord.getTenantId()).setSchemaName(taskRecord.getSchemaName()) + .setTableName(taskRecord.getTableName()).setTaskStatus(PTable.TaskStatus.STARTED.toString()) + .setData(jsonNode.toString()).setPriority(taskRecord.getPriority()) + .setStartTs(taskRecord.getTimeStamp()).setEndTs(null).setAccessCheckEnabled(true).build()); + // It will take some time to finish, so we will check the status in a separate task. + return null; + } catch (Throwable t) { + LOGGER.warn("Exception while running index rebuild task. " + + "It will be retried in the next system task table scan : " + taskRecord.getSchemaName() + + "." + taskRecord.getTableName() + " with tenant id " + + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) + " and data " + + taskRecord.getData(), t); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, + t.toString()); + } finally { + if (conn != null) { + try { + conn.close(); + } catch (SQLException e) { + LOGGER.debug("IndexRebuildTask can't close connection"); } - + } } - private String getIndexName(JsonNode jsonNode) { - String indexName = null; - // Get index name from data column. - if (jsonNode.has(IndexRebuildTaskConstants.INDEX_NAME)) { - indexName = jsonNode.get(IndexRebuildTaskConstants.INDEX_NAME).toString().replaceAll("\"", ""); - } - return indexName; - } + } - private String getJobID(String data) throws JsonProcessingException { - if (Strings.isNullOrEmpty(data)) { - data = "{}"; - } - JsonNode jsonNode = JacksonUtil.getObjectReader().readTree(data); - String jobId = null; - if (jsonNode.has(IndexRebuildTaskConstants.JOB_ID)) { - jobId = jsonNode.get(IndexRebuildTaskConstants.JOB_ID).textValue().replaceAll("\"", ""); - } - return jobId; + private String getIndexName(JsonNode jsonNode) { + String indexName = null; + // Get index name from data column. + if (jsonNode.has(IndexRebuildTaskConstants.INDEX_NAME)) { + indexName = + jsonNode.get(IndexRebuildTaskConstants.INDEX_NAME).toString().replaceAll("\"", ""); } + return indexName; + } - @Override - public TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord) - throws Exception { - - String jobID = getJobID(taskRecord.getData()); - if (jobID != null) { - Configuration conf = HBaseConfiguration.create(env.getConfiguration()); - Configuration configuration = HBaseConfiguration.addHbaseResources(conf); - Cluster cluster = new Cluster(configuration); - - Job job = cluster.getJob(org.apache.hadoop.mapreduce.JobID.forName(jobID)); - if (job == null) { - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, ""); - } - if (job != null && job.isComplete()) { - if (job.isSuccessful()) { - LOGGER.warn("IndexRebuildTask checkCurrentResult job is successful " - + taskRecord.getTableName()); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, ""); - } else { - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, - "Index is DISABLED"); - } - } - + private String getJobID(String data) throws JsonProcessingException { + if (Strings.isNullOrEmpty(data)) { + data = "{}"; + } + JsonNode jsonNode = JacksonUtil.getObjectReader().readTree(data); + String jobId = null; + if (jsonNode.has(IndexRebuildTaskConstants.JOB_ID)) { + jobId = jsonNode.get(IndexRebuildTaskConstants.JOB_ID).textValue().replaceAll("\"", ""); + } + return jobId; + } + + @Override + public TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord) + throws Exception { + + String jobID = getJobID(taskRecord.getData()); + if (jobID != null) { + Configuration conf = HBaseConfiguration.create(env.getConfiguration()); + Configuration configuration = HBaseConfiguration.addHbaseResources(conf); + Cluster cluster = new Cluster(configuration); + + Job job = cluster.getJob(org.apache.hadoop.mapreduce.JobID.forName(jobID)); + if (job == null) { + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, ""); + } + if (job != null && job.isComplete()) { + if (job.isSuccessful()) { + LOGGER.warn( + "IndexRebuildTask checkCurrentResult job is successful " + taskRecord.getTableName()); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, ""); + } else { + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, + "Index is DISABLED"); } - return null; + } + } + return null; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/TransformMonitorTask.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/TransformMonitorTask.java index 392102beb95..2ffe9f2b4cc 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/TransformMonitorTask.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/tasks/TransformMonitorTask.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,11 @@ */ package org.apache.phoenix.coprocessor.tasks; -import org.apache.phoenix.schema.task.ServerTask; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.DEFAULT_TRANSFORM_RETRY_COUNT; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.TRANSFORM_RETRY_COUNT_VALUE; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtilHelper.DEFAULT_TRANSFORM_MONITOR_ENABLED; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtilHelper.TRANSFORM_MONITOR_ENABLED; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.mapreduce.Cluster; @@ -27,167 +30,191 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.mapreduce.transform.TransformTool; import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.task.ServerTask; import org.apache.phoenix.schema.task.SystemTaskParams; import org.apache.phoenix.schema.task.Task; import org.apache.phoenix.schema.transform.SystemTransformRecord; import org.apache.phoenix.schema.transform.Transform; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.SchemaUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.DEFAULT_TRANSFORM_RETRY_COUNT; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.TRANSFORM_RETRY_COUNT_VALUE; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtilHelper.DEFAULT_TRANSFORM_MONITOR_ENABLED; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtilHelper.TRANSFORM_MONITOR_ENABLED; - /** * Task runs periodically to monitor and orchestrate ongoing transforms in System.Transform table. - * */ -public class TransformMonitorTask extends BaseTask { - public static final String DEFAULT = "IndexName"; +public class TransformMonitorTask extends BaseTask { + public static final String DEFAULT = "IndexName"; - public static final Logger LOGGER = LoggerFactory.getLogger(TransformMonitorTask.class); + public static final Logger LOGGER = LoggerFactory.getLogger(TransformMonitorTask.class); - private static boolean isDisabled = false; + private static boolean isDisabled = false; - // Called from testong - @VisibleForTesting - public static void disableTransformMonitorTask(boolean disabled) { - isDisabled = disabled; - } + // Called from testong + @VisibleForTesting + public static void disableTransformMonitorTask(boolean disabled) { + isDisabled = disabled; + } - @Override - public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) { - Configuration conf = HBaseConfiguration.create(env.getConfiguration()); - Configuration configuration = HBaseConfiguration.addHbaseResources(conf); - boolean transformMonitorEnabled = configuration.getBoolean(TRANSFORM_MONITOR_ENABLED, DEFAULT_TRANSFORM_MONITOR_ENABLED); - if (!transformMonitorEnabled || isDisabled) { - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, "TransformMonitor is disabled"); - } + @Override + public TaskRegionObserver.TaskResult run(Task.TaskRecord taskRecord) { + Configuration conf = HBaseConfiguration.create(env.getConfiguration()); + Configuration configuration = HBaseConfiguration.addHbaseResources(conf); + boolean transformMonitorEnabled = + configuration.getBoolean(TRANSFORM_MONITOR_ENABLED, DEFAULT_TRANSFORM_MONITOR_ENABLED); + if (!transformMonitorEnabled || isDisabled) { + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, + "TransformMonitor is disabled"); + } - try (PhoenixConnection conn = QueryUtil.getConnectionOnServer(conf).unwrap(PhoenixConnection.class)){ - SystemTransformRecord systemTransformRecord = Transform.getTransformRecord(taskRecord.getSchemaName(), - taskRecord.getTableName(), null, taskRecord.getTenantId(), conn); - if (systemTransformRecord == null) { - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, - "No transform record is found"); - } - String tableName = SchemaUtil.getTableName(systemTransformRecord.getSchemaName(), - systemTransformRecord.getLogicalTableName()); + try (PhoenixConnection conn = + QueryUtil.getConnectionOnServer(conf).unwrap(PhoenixConnection.class)) { + SystemTransformRecord systemTransformRecord = + Transform.getTransformRecord(taskRecord.getSchemaName(), taskRecord.getTableName(), null, + taskRecord.getTenantId(), conn); + if (systemTransformRecord == null) { + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, + "No transform record is found"); + } + String tableName = SchemaUtil.getTableName(systemTransformRecord.getSchemaName(), + systemTransformRecord.getLogicalTableName()); - if (systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.CREATED.name())) { - LOGGER.info("Transform is created, starting the TransformTool ", tableName); - // Kick a TransformTool run, it will already update transform record status and job id - TransformTool transformTool = TransformTool.runTransformTool(systemTransformRecord, conf, false, null, null, false, false); - if (transformTool == null) { - // This is not a map/reduce error. There must be some unexpected issue. So, retrying will not solve the underlying issue. - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, "TransformTool run failed. Check the parameters."); - } - } else if (systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.COMPLETED.name())) { - LOGGER.info("Transform is completed, TransformMonitor is done ", tableName); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, ""); - } else if (systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.PENDING_CUTOVER.name()) - && !PTable.TransformType.isPartialTransform(systemTransformRecord.getTransformType())) { - LOGGER.info("Transform is pending cutover ", tableName); - Transform.doCutover(conn, systemTransformRecord); + if ( + systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.CREATED.name()) + ) { + LOGGER.info("Transform is created, starting the TransformTool ", tableName); + // Kick a TransformTool run, it will already update transform record status and job id + TransformTool transformTool = TransformTool.runTransformTool(systemTransformRecord, conf, + false, null, null, false, false); + if (transformTool == null) { + // This is not a map/reduce error. There must be some unexpected issue. So, retrying will + // not solve the underlying issue. + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, + "TransformTool run failed. Check the parameters."); + } + } else if ( + systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.COMPLETED.name()) + ) { + LOGGER.info("Transform is completed, TransformMonitor is done ", tableName); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, ""); + } else if ( + systemTransformRecord.getTransformStatus() + .equals(PTable.TransformStatus.PENDING_CUTOVER.name()) + && !PTable.TransformType.isPartialTransform(systemTransformRecord.getTransformType()) + ) { + LOGGER.info("Transform is pending cutover ", tableName); + Transform.doCutover(conn, systemTransformRecord); - PTable.TransformType partialTransform = PTable.TransformType.getPartialTransform(systemTransformRecord.getTransformType()); - if (partialTransform != null) { - // Update transform to be partial - SystemTransformRecord.SystemTransformBuilder builder = new SystemTransformRecord.SystemTransformBuilder(systemTransformRecord); - builder.setTransformType(partialTransform); - // Decrement retry count since TransformTool will increment it. Should we set it to 0? - builder.setTransformRetryCount(systemTransformRecord.getTransformRetryCount()-1); - Transform.upsertTransform(builder.build(), conn); + PTable.TransformType partialTransform = + PTable.TransformType.getPartialTransform(systemTransformRecord.getTransformType()); + if (partialTransform != null) { + // Update transform to be partial + SystemTransformRecord.SystemTransformBuilder builder = + new SystemTransformRecord.SystemTransformBuilder(systemTransformRecord); + builder.setTransformType(partialTransform); + // Decrement retry count since TransformTool will increment it. Should we set it to 0? + builder.setTransformRetryCount(systemTransformRecord.getTransformRetryCount() - 1); + Transform.upsertTransform(builder.build(), conn); - // Fix unverified rows. Running partial transform will make the transform status go back to started - long startFromTs = 0; - if (systemTransformRecord.getTransformLastStateTs() != null) { - startFromTs = systemTransformRecord.getTransformLastStateTs().getTime()-1; - } - TransformTool.runTransformTool(systemTransformRecord, conf, true, startFromTs, null, true, false); + // Fix unverified rows. Running partial transform will make the transform status go back + // to started + long startFromTs = 0; + if (systemTransformRecord.getTransformLastStateTs() != null) { + startFromTs = systemTransformRecord.getTransformLastStateTs().getTime() - 1; + } + TransformTool.runTransformTool(systemTransformRecord, conf, true, startFromTs, null, true, + false); - // In the future, if we are changing the PK structure, we need to run indextools as well - } else { - // No partial transform needed so, we update state of the transform - LOGGER.warn("No partial type of the transform is found. Completing the transform ", tableName); - Transform.updateTransformRecord(conn, systemTransformRecord, PTable.TransformStatus.COMPLETED); - } - } else if (systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.STARTED.name()) || - (systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.PENDING_CUTOVER.name()) - && PTable.TransformType.isPartialTransform(systemTransformRecord.getTransformType()))) { - LOGGER.info(systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.STARTED.name()) ? - "Transform is started, we will monitor ": "Partial transform is going on, we will monitor" , tableName); - // Monitor the job of transform tool and decide to retry - String jobId = systemTransformRecord.getTransformJobId(); - if (jobId != null) { - Cluster cluster = new Cluster(configuration); + // In the future, if we are changing the PK structure, we need to run indextools as well + } else { + // No partial transform needed so, we update state of the transform + LOGGER.warn("No partial type of the transform is found. Completing the transform ", + tableName); + Transform.updateTransformRecord(conn, systemTransformRecord, + PTable.TransformStatus.COMPLETED); + } + } else if ( + systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.STARTED.name()) + || (systemTransformRecord.getTransformStatus() + .equals(PTable.TransformStatus.PENDING_CUTOVER.name()) + && PTable.TransformType.isPartialTransform(systemTransformRecord.getTransformType())) + ) { + LOGGER.info( + systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.STARTED.name()) + ? "Transform is started, we will monitor " + : "Partial transform is going on, we will monitor", + tableName); + // Monitor the job of transform tool and decide to retry + String jobId = systemTransformRecord.getTransformJobId(); + if (jobId != null) { + Cluster cluster = new Cluster(configuration); - Job job = cluster.getJob(org.apache.hadoop.mapreduce.JobID.forName(jobId)); - if (job == null) { - LOGGER.warn(String.format("Transform job with Id=%s is not found", jobId)); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, "The job cannot be found"); - } - if (job != null && job.isComplete()) { - if (job.isSuccessful()) { - LOGGER.warn("TransformTool job is successful. Transform should have been in a COMPLETED state " - + taskRecord.getTableName()); - } else { - // Retry TransformTool run - int maxRetryCount = configuration.getInt(TRANSFORM_RETRY_COUNT_VALUE, DEFAULT_TRANSFORM_RETRY_COUNT); - if (systemTransformRecord.getTransformRetryCount() < maxRetryCount) { - // Retry count will be incremented in TransformTool - TransformTool.runTransformTool(systemTransformRecord, conf, false, null, null, false, true); - } - } - } - } - } else if (systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.FAILED.name())) { - String str = "Transform is marked as failed because either TransformTool is run on the foreground and failed " + - "or it is run as async but there is something wrong with the TransformTool parameters"; - LOGGER.error(str); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, str); - } else if (systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.PAUSED.name())) { - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, - "Transform is paused. No need to monitor"); + Job job = cluster.getJob(org.apache.hadoop.mapreduce.JobID.forName(jobId)); + if (job == null) { + LOGGER.warn(String.format("Transform job with Id=%s is not found", jobId)); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, + "The job cannot be found"); + } + if (job != null && job.isComplete()) { + if (job.isSuccessful()) { + LOGGER.warn( + "TransformTool job is successful. Transform should have been in a COMPLETED state " + + taskRecord.getTableName()); } else { - String str = "Transform status is not known " + systemTransformRecord.getString(); - LOGGER.error(str); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, str); + // Retry TransformTool run + int maxRetryCount = + configuration.getInt(TRANSFORM_RETRY_COUNT_VALUE, DEFAULT_TRANSFORM_RETRY_COUNT); + if (systemTransformRecord.getTransformRetryCount() < maxRetryCount) { + // Retry count will be incremented in TransformTool + TransformTool.runTransformTool(systemTransformRecord, conf, false, null, null, + false, true); + } } - - // Update task status to RETRY so that it is retried - ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder() - .setConn(conn) - .setTaskType(taskRecord.getTaskType()) - .setTenantId(taskRecord.getTenantId()) - .setSchemaName(taskRecord.getSchemaName()) - .setTableName(taskRecord.getTableName()) - .setTaskStatus(PTable.TaskStatus.RETRY.toString()) - .setData(taskRecord.getData()) - .setPriority(taskRecord.getPriority()) - .setStartTs(taskRecord.getTimeStamp()) - .setEndTs(null) - .setAccessCheckEnabled(true) - .build()); - return null; + } } - catch (Throwable t) { - LOGGER.warn("Exception while running transform monitor task. " + - "It will be retried in the next system task table scan : " + - taskRecord.getSchemaName() + "." + taskRecord.getTableName() + - " with tenant id " + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) + - " and data " + taskRecord.getData(), t); - return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, t.toString()); - } - } + } else if ( + systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.FAILED.name()) + ) { + String str = + "Transform is marked as failed because either TransformTool is run on the foreground and failed " + + "or it is run as async but there is something wrong with the TransformTool parameters"; + LOGGER.error(str); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, str); + } else if ( + systemTransformRecord.getTransformStatus().equals(PTable.TransformStatus.PAUSED.name()) + ) { + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, + "Transform is paused. No need to monitor"); + } else { + String str = "Transform status is not known " + systemTransformRecord.getString(); + LOGGER.error(str); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, str); + } - @Override - public TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord) - throws Exception { - // We don't need to check MR job result here since the job itself changes task state. - return null; + // Update task status to RETRY so that it is retried + ServerTask.addTask(new SystemTaskParams.SystemTaskParamsBuilder().setConn(conn) + .setTaskType(taskRecord.getTaskType()).setTenantId(taskRecord.getTenantId()) + .setSchemaName(taskRecord.getSchemaName()).setTableName(taskRecord.getTableName()) + .setTaskStatus(PTable.TaskStatus.RETRY.toString()).setData(taskRecord.getData()) + .setPriority(taskRecord.getPriority()).setStartTs(taskRecord.getTimeStamp()).setEndTs(null) + .setAccessCheckEnabled(true).build()); + return null; + } catch (Throwable t) { + LOGGER.warn("Exception while running transform monitor task. " + + "It will be retried in the next system task table scan : " + taskRecord.getSchemaName() + + "." + taskRecord.getTableName() + " with tenant id " + + (taskRecord.getTenantId() == null ? " IS NULL" : taskRecord.getTenantId()) + " and data " + + taskRecord.getData(), t); + return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, + t.toString()); } + } + + @Override + public TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord) + throws Exception { + // We don't need to check MR job result here since the job itself changes task state. + return null; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/CapturingAbortable.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/CapturingAbortable.java index c52e749ce32..58d12d1e9b5 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/CapturingAbortable.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/CapturingAbortable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -63,4 +63,4 @@ public void throwCauseIfAborted() throws Throwable { } throw cause; } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java index d4c977c5cbf..2cc0e762c4f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +17,15 @@ */ package org.apache.phoenix.hbase.index; - import static org.apache.hadoop.hbase.HConstants.OperationStatusCode.SUCCESS; +import static org.apache.phoenix.coprocessor.IndexRebuildRegionScanner.applyNew; +import static org.apache.phoenix.coprocessor.IndexRebuildRegionScanner.removeColumn; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.UPSERT_CF; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.UPSERT_STATUS_CQ; import static org.apache.phoenix.hbase.index.util.IndexManagementUtil.rethrowIndexingException; +import static org.apache.phoenix.index.PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB; +import static org.apache.phoenix.index.PhoenixIndexBuilderHelper.RETURN_RESULT; +import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY; import java.io.ByteArrayInputStream; import java.io.DataInputStream; @@ -37,20 +41,12 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.phoenix.execute.MutationState; -import org.apache.phoenix.expression.CaseExpression; -import org.apache.phoenix.index.PhoenixIndexBuilderHelper; -import org.apache.phoenix.schema.types.PInteger; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; @@ -66,11 +62,13 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; import org.apache.hadoop.hbase.regionserver.OperationStatus; import org.apache.hadoop.hbase.regionserver.Region; @@ -87,6 +85,8 @@ import org.apache.phoenix.coprocessor.DelegateRegionCoprocessorEnvironment; import org.apache.phoenix.coprocessor.generated.PTableProtos; import org.apache.phoenix.exception.DataExceedsCapacityException; +import org.apache.phoenix.execute.MutationState; +import org.apache.phoenix.expression.CaseExpression; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.ExpressionType; import org.apache.phoenix.expression.KeyValueColumnExpression; @@ -107,6 +107,7 @@ import org.apache.phoenix.hbase.index.write.IndexWriter; import org.apache.phoenix.hbase.index.write.LazyParallelWriterIndexCommitter; import org.apache.phoenix.index.IndexMaintainer; +import org.apache.phoenix.index.PhoenixIndexBuilderHelper; import org.apache.phoenix.index.PhoenixIndexMetaData; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.query.QueryConstants; @@ -117,9 +118,15 @@ import org.apache.phoenix.schema.PTableImpl; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.tuple.MultiKeyValueTuple; import org.apache.phoenix.schema.transform.TransformMaintainer; +import org.apache.phoenix.schema.tuple.MultiKeyValueTuple; +import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.trace.TracingUtils; import org.apache.phoenix.trace.util.NullSpan; import org.apache.phoenix.util.ByteUtil; @@ -134,209 +141,215 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import static org.apache.phoenix.coprocessor.IndexRebuildRegionScanner.applyNew; -import static org.apache.phoenix.coprocessor.IndexRebuildRegionScanner.removeColumn; -import static org.apache.phoenix.index.PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB; -import static org.apache.phoenix.index.PhoenixIndexBuilderHelper.RETURN_RESULT; -import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY; - /** * Do all the work of managing index updates from a single coprocessor. All Puts/Delets are passed - * to an {@link IndexBuilder} to determine the actual updates to make. - * We don't need to implement {@link #postPut(ObserverContext, Put, WALEdit, Durability)} and - * {@link #postDelete(ObserverContext, Delete, WALEdit, Durability)} hooks because - * Phoenix always does batch mutations. + * to an {@link IndexBuilder} to determine the actual updates to make. We don't need to implement + * {@link #postPut(ObserverContext, Put, WALEdit, Durability)} and + * {@link #postDelete(ObserverContext, Delete, WALEdit, Durability)} hooks because Phoenix always + * does batch mutations. *

    */ public class IndexRegionObserver implements RegionCoprocessor, RegionObserver { - private static final Logger LOG = LoggerFactory.getLogger(IndexRegionObserver.class); - private static final OperationStatus IGNORE = new OperationStatus(SUCCESS); - private static final OperationStatus NOWRITE = new OperationStatus(SUCCESS); - public static final String PHOENIX_APPEND_METADATA_TO_WAL = "phoenix.append.metadata.to.wal"; - public static final boolean DEFAULT_PHOENIX_APPEND_METADATA_TO_WAL = false; - /** - * Class to represent pending data table rows - * */ - private class PendingRow { - private int count; - private boolean usable; - private ImmutableBytesPtr rowKey; - private BatchMutateContext lastContext; - - PendingRow(ImmutableBytesPtr rowKey, BatchMutateContext context) { - count = 1; - usable = true; - lastContext = context; - this.rowKey = rowKey; - } + private static final Logger LOG = LoggerFactory.getLogger(IndexRegionObserver.class); + private static final OperationStatus IGNORE = new OperationStatus(SUCCESS); + private static final OperationStatus NOWRITE = new OperationStatus(SUCCESS); + public static final String PHOENIX_APPEND_METADATA_TO_WAL = "phoenix.append.metadata.to.wal"; + public static final boolean DEFAULT_PHOENIX_APPEND_METADATA_TO_WAL = false; - public boolean add(BatchMutateContext context) { - synchronized (this) { - if (usable) { - count++; - lastContext = context; - return true; - } - } - return false; - } + /** + * Class to represent pending data table rows + */ + private class PendingRow { + private int count; + private boolean usable; + private ImmutableBytesPtr rowKey; + private BatchMutateContext lastContext; + + PendingRow(ImmutableBytesPtr rowKey, BatchMutateContext context) { + count = 1; + usable = true; + lastContext = context; + this.rowKey = rowKey; + } - public void remove() { - synchronized (this) { - count--; - if (count == 0) { - pendingRows.remove(rowKey); - usable = false; - } - } + public boolean add(BatchMutateContext context) { + synchronized (this) { + if (usable) { + count++; + lastContext = context; + return true; } - - public int getCount() { - return count; } + return false; + } - public BatchMutateContext getLastContext() { - return lastContext; + public void remove() { + synchronized (this) { + count--; + if (count == 0) { + pendingRows.remove(rowKey); + usable = false; + } } } - private static boolean ignoreIndexRebuildForTesting = false; + public int getCount() { + return count; + } + + public BatchMutateContext getLastContext() { + return lastContext; + } + } + + private static boolean ignoreIndexRebuildForTesting = false; private static boolean failPreIndexUpdatesForTesting = false; private static boolean failPostIndexUpdatesForTesting = false; private static boolean failDataTableUpdatesForTesting = false; - public static void setIgnoreIndexRebuildForTesting(boolean ignore) { ignoreIndexRebuildForTesting = ignore; } + public static void setIgnoreIndexRebuildForTesting(boolean ignore) { + ignoreIndexRebuildForTesting = ignore; + } - public static void setFailPreIndexUpdatesForTesting(boolean fail) { failPreIndexUpdatesForTesting = fail; } + public static void setFailPreIndexUpdatesForTesting(boolean fail) { + failPreIndexUpdatesForTesting = fail; + } - public static void setFailPostIndexUpdatesForTesting(boolean fail) { failPostIndexUpdatesForTesting = fail; } + public static void setFailPostIndexUpdatesForTesting(boolean fail) { + failPostIndexUpdatesForTesting = fail; + } public static void setFailDataTableUpdatesForTesting(boolean fail) { - failDataTableUpdatesForTesting = fail; + failDataTableUpdatesForTesting = fail; } public enum BatchMutatePhase { - PRE, POST, FAILED + PRE, + POST, + FAILED } // Hack to get around not being able to save any state between // coprocessor calls. TODO: remove after HBASE-18127 when available /** - * The concurrent batch of mutations is a set such that every pair of batches in this set has at least one common row. - * Since a BatchMutateContext object of a batch is modified only after the row locks for all the rows that are mutated - * by this batch are acquired, there can be only one thread can acquire the locks for its batch and safely access - * all the batch contexts in the set of concurrent batches. Because of this, we do not read atomic variables or - * additional locks to serialize the access to the BatchMutateContext objects. + * The concurrent batch of mutations is a set such that every pair of batches in this set has at + * least one common row. Since a BatchMutateContext object of a batch is modified only after the + * row locks for all the rows that are mutated by this batch are acquired, there can be only one + * thread can acquire the locks for its batch and safely access all the batch contexts in the set + * of concurrent batches. Because of this, we do not read atomic variables or additional locks to + * serialize the access to the BatchMutateContext objects. */ public static class BatchMutateContext { - private volatile BatchMutatePhase currentPhase = BatchMutatePhase.PRE; - // The max of reference counts on the pending rows of this batch at the time this batch arrives - private int maxPendingRowCount = 0; - private final int clientVersion; - // The collection of index mutations that will be applied before the data table mutations. The empty column (i.e., - // the verified column) will have the value false ("unverified") on these mutations - private ListMultimap preIndexUpdates; - // The collection of index mutations that will be applied after the data table mutations. The empty column (i.e., - // the verified column) will have the value true ("verified") on the put mutations - private ListMultimap postIndexUpdates; - // The collection of candidate index mutations that will be applied after the data table mutations - private ListMultimap> indexUpdates; - private List rowLocks = Lists.newArrayListWithExpectedSize(QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); - // TreeSet to improve locking efficiency and avoid deadlock (PHOENIX-6871 and HBASE-17924) - private Set rowsToLock = new TreeSet<>(); - // The current and next states of the data rows corresponding to the pending mutations - private HashMap> dataRowStates; - // The previous concurrent batch contexts - private HashMap lastConcurrentBatchContext = null; - // The latches of the threads waiting for this batch to complete - private List waitList = null; - private Map multiMutationMap; - // store current cells into a map where the key is ColumnReference of the column family and - // column qualifier, and value is a pair of cell and a boolean. The value of the boolean - // will be true if the expression is CaseExpression and Else-clause is evaluated to be - // true, will be null if there is no expression on this column, otherwise false - // This is only initialized for single row atomic mutation. - private Map> currColumnCellExprMap; - - //list containing the original mutations from the MiniBatchOperationInProgress. Contains - // any annotations we were sent by the client, and can be used in hooks that don't get - // passed MiniBatchOperationInProgress, like preWALAppend() - private List originalMutations; - private boolean hasAtomic; - private boolean hasDelete; - private boolean hasUncoveredIndex; - private boolean hasGlobalIndex; - private boolean hasLocalIndex; - private boolean hasTransform; - private boolean returnResult; - - public BatchMutateContext() { - this.clientVersion = 0; - } - public BatchMutateContext(int clientVersion) { - this.clientVersion = clientVersion; - } - - public void populateOriginalMutations(MiniBatchOperationInProgress miniBatchOp) { - originalMutations = new ArrayList(miniBatchOp.size()); - for (int k = 0; k < miniBatchOp.size(); k++) { - originalMutations.add(miniBatchOp.getOperation(k)); - } - } - public List getOriginalMutations() { - return originalMutations; - } + private volatile BatchMutatePhase currentPhase = BatchMutatePhase.PRE; + // The max of reference counts on the pending rows of this batch at the time this batch arrives + private int maxPendingRowCount = 0; + private final int clientVersion; + // The collection of index mutations that will be applied before the data table mutations. The + // empty column (i.e., + // the verified column) will have the value false ("unverified") on these mutations + private ListMultimap preIndexUpdates; + // The collection of index mutations that will be applied after the data table mutations. The + // empty column (i.e., + // the verified column) will have the value true ("verified") on the put mutations + private ListMultimap postIndexUpdates; + // The collection of candidate index mutations that will be applied after the data table + // mutations + private ListMultimap> indexUpdates; + private List rowLocks = + Lists.newArrayListWithExpectedSize(QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); + // TreeSet to improve locking efficiency and avoid deadlock (PHOENIX-6871 and HBASE-17924) + private Set rowsToLock = new TreeSet<>(); + // The current and next states of the data rows corresponding to the pending mutations + private HashMap> dataRowStates; + // The previous concurrent batch contexts + private HashMap lastConcurrentBatchContext = null; + // The latches of the threads waiting for this batch to complete + private List waitList = null; + private Map multiMutationMap; + // store current cells into a map where the key is ColumnReference of the column family and + // column qualifier, and value is a pair of cell and a boolean. The value of the boolean + // will be true if the expression is CaseExpression and Else-clause is evaluated to be + // true, will be null if there is no expression on this column, otherwise false + // This is only initialized for single row atomic mutation. + private Map> currColumnCellExprMap; + + // list containing the original mutations from the MiniBatchOperationInProgress. Contains + // any annotations we were sent by the client, and can be used in hooks that don't get + // passed MiniBatchOperationInProgress, like preWALAppend() + private List originalMutations; + private boolean hasAtomic; + private boolean hasDelete; + private boolean hasUncoveredIndex; + private boolean hasGlobalIndex; + private boolean hasLocalIndex; + private boolean hasTransform; + private boolean returnResult; + + public BatchMutateContext() { + this.clientVersion = 0; + } + + public BatchMutateContext(int clientVersion) { + this.clientVersion = clientVersion; + } - public BatchMutatePhase getCurrentPhase() { - return currentPhase; + public void populateOriginalMutations(MiniBatchOperationInProgress miniBatchOp) { + originalMutations = new ArrayList(miniBatchOp.size()); + for (int k = 0; k < miniBatchOp.size(); k++) { + originalMutations.add(miniBatchOp.getOperation(k)); } + } - public Put getNextDataRowState(ImmutableBytesPtr rowKeyPtr) { - Pair rowState = dataRowStates.get(rowKeyPtr); - if (rowState != null) { - return rowState.getSecond(); - } - return null; + public List getOriginalMutations() { + return originalMutations; + } + + public BatchMutatePhase getCurrentPhase() { + return currentPhase; + } + + public Put getNextDataRowState(ImmutableBytesPtr rowKeyPtr) { + Pair rowState = dataRowStates.get(rowKeyPtr); + if (rowState != null) { + return rowState.getSecond(); } + return null; + } - public CountDownLatch getCountDownLatch() { - synchronized (this) { - if (currentPhase != BatchMutatePhase.PRE) { - return null; - } - if (waitList == null) { - waitList = new ArrayList<>(); - } - CountDownLatch countDownLatch = new CountDownLatch(1); - waitList.add(countDownLatch); - return countDownLatch; - } + public CountDownLatch getCountDownLatch() { + synchronized (this) { + if (currentPhase != BatchMutatePhase.PRE) { + return null; + } + if (waitList == null) { + waitList = new ArrayList<>(); + } + CountDownLatch countDownLatch = new CountDownLatch(1); + waitList.add(countDownLatch); + return countDownLatch; } + } - public void countDownAllLatches() { - synchronized (this) { - if (waitList != null) { - for (CountDownLatch countDownLatch : waitList) { - countDownLatch.countDown(); - } - } + public void countDownAllLatches() { + synchronized (this) { + if (waitList != null) { + for (CountDownLatch countDownLatch : waitList) { + countDownLatch.countDown(); } + } } + } - public int getMaxPendingRowCount() { - return maxPendingRowCount; - } + public int getMaxPendingRowCount() { + return maxPendingRowCount; + } } private ThreadLocal batchMutateContext = - new ThreadLocal(); + new ThreadLocal(); /** * Configuration key for if the indexer should check the version of HBase is running. Generally, @@ -344,12 +357,15 @@ public int getMaxPendingRowCount() { */ public static final String CHECK_VERSION_CONF_KEY = "com.saleforce.hbase.index.checkversion"; - public static final String INDEX_LAZY_POST_BATCH_WRITE = "org.apache.hadoop.hbase.index.lazy.post_batch.write"; + public static final String INDEX_LAZY_POST_BATCH_WRITE = + "org.apache.hadoop.hbase.index.lazy.post_batch.write"; private static final boolean INDEX_LAZY_POST_BATCH_WRITE_DEFAULT = false; - private static final String INDEXER_INDEX_WRITE_SLOW_THRESHOLD_KEY = "phoenix.indexer.slow.post.batch.mutate.threshold"; + private static final String INDEXER_INDEX_WRITE_SLOW_THRESHOLD_KEY = + "phoenix.indexer.slow.post.batch.mutate.threshold"; private static final long INDEXER_INDEX_WRITE_SLOW_THRESHOLD_DEFAULT = 3_000; - private static final String INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_KEY = "phoenix.indexer.slow.pre.increment"; + private static final String INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_KEY = + "phoenix.indexer.slow.pre.increment"; private static final long INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_DEFAULT = 3_000; // Index writers get invoked before and after data table updates @@ -386,67 +402,72 @@ public Optional getRegionObserver() { @Override public void start(CoprocessorEnvironment e) throws IOException { - try { - final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e; - String serverName = env.getServerName().getServerName(); - if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) { - // make sure the right version <-> combinations are allowed. - String errormsg = Indexer.validateVersion(env.getHBaseVersion(), env.getConfiguration()); - if (errormsg != null) { - throw new FatalIndexBuildingFailureException(errormsg); - } - } - - this.builder = new IndexBuildManager(env); - // Clone the config since it is shared - DelegateRegionCoprocessorEnvironment indexWriterEnv = new DelegateRegionCoprocessorEnvironment(env, ConnectionType.INDEX_WRITER_CONNECTION); - // setup the actual index preWriter - this.preWriter = new IndexWriter(indexWriterEnv, serverName + "-index-preWriter", false); - if (env.getConfiguration().getBoolean(INDEX_LAZY_POST_BATCH_WRITE, INDEX_LAZY_POST_BATCH_WRITE_DEFAULT)) { - this.postWriter = new IndexWriter(indexWriterEnv, new LazyParallelWriterIndexCommitter(), serverName + "-index-postWriter", false); - } - else { - this.postWriter = this.preWriter; + try { + final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e; + String serverName = env.getServerName().getServerName(); + if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) { + // make sure the right version <-> combinations are allowed. + String errormsg = Indexer.validateVersion(env.getHBaseVersion(), env.getConfiguration()); + if (errormsg != null) { + throw new FatalIndexBuildingFailureException(errormsg); } + } - this.rowLockWaitDuration = env.getConfiguration().getInt("hbase.rowlock.wait.duration", - DEFAULT_ROWLOCK_WAIT_DURATION); - this.lockManager = new LockManager(); - this.concurrentMutationWaitDuration = env.getConfiguration().getInt("phoenix.index.concurrent.wait.duration.ms", - DEFAULT_CONCURRENT_MUTATION_WAIT_DURATION_IN_MS); - // Metrics impl for the Indexer -- avoiding unnecessary indirection for hadoop-1/2 compat - this.metricSource = MetricsIndexerSourceFactory.getInstance().getIndexerSource(); - setSlowThresholds(e.getConfiguration()); - this.dataTableName = env.getRegionInfo().getTable().getNameAsString(); - this.shouldWALAppend = env.getConfiguration().getBoolean(PHOENIX_APPEND_METADATA_TO_WAL, - DEFAULT_PHOENIX_APPEND_METADATA_TO_WAL); - this.isNamespaceEnabled = SchemaUtil.isNamespaceMappingEnabled(PTableType.INDEX, - env.getConfiguration()); - TableDescriptor tableDescriptor = env.getRegion().getTableDescriptor(); - BloomType bloomFilterType = tableDescriptor.getColumnFamilies()[0].getBloomFilterType(); - // when the table descriptor changes, the coproc is reloaded - this.useBloomFilter = bloomFilterType == BloomType.ROW; - } catch (NoSuchMethodError ex) { - disabled = true; - LOG.error("Must be too early a version of HBase. Disabled coprocessor ", ex); + this.builder = new IndexBuildManager(env); + // Clone the config since it is shared + DelegateRegionCoprocessorEnvironment indexWriterEnv = + new DelegateRegionCoprocessorEnvironment(env, ConnectionType.INDEX_WRITER_CONNECTION); + // setup the actual index preWriter + this.preWriter = new IndexWriter(indexWriterEnv, serverName + "-index-preWriter", false); + if ( + env.getConfiguration().getBoolean(INDEX_LAZY_POST_BATCH_WRITE, + INDEX_LAZY_POST_BATCH_WRITE_DEFAULT) + ) { + this.postWriter = new IndexWriter(indexWriterEnv, new LazyParallelWriterIndexCommitter(), + serverName + "-index-postWriter", false); + } else { + this.postWriter = this.preWriter; } + + this.rowLockWaitDuration = + env.getConfiguration().getInt("hbase.rowlock.wait.duration", DEFAULT_ROWLOCK_WAIT_DURATION); + this.lockManager = new LockManager(); + this.concurrentMutationWaitDuration = + env.getConfiguration().getInt("phoenix.index.concurrent.wait.duration.ms", + DEFAULT_CONCURRENT_MUTATION_WAIT_DURATION_IN_MS); + // Metrics impl for the Indexer -- avoiding unnecessary indirection for hadoop-1/2 compat + this.metricSource = MetricsIndexerSourceFactory.getInstance().getIndexerSource(); + setSlowThresholds(e.getConfiguration()); + this.dataTableName = env.getRegionInfo().getTable().getNameAsString(); + this.shouldWALAppend = env.getConfiguration().getBoolean(PHOENIX_APPEND_METADATA_TO_WAL, + DEFAULT_PHOENIX_APPEND_METADATA_TO_WAL); + this.isNamespaceEnabled = + SchemaUtil.isNamespaceMappingEnabled(PTableType.INDEX, env.getConfiguration()); + TableDescriptor tableDescriptor = env.getRegion().getTableDescriptor(); + BloomType bloomFilterType = tableDescriptor.getColumnFamilies()[0].getBloomFilterType(); + // when the table descriptor changes, the coproc is reloaded + this.useBloomFilter = bloomFilterType == BloomType.ROW; + } catch (NoSuchMethodError ex) { + disabled = true; + LOG.error("Must be too early a version of HBase. Disabled coprocessor ", ex); + } } /** * Extracts the slow call threshold values from the configuration. */ private void setSlowThresholds(Configuration c) { - slowIndexPrepareThreshold = c.getLong(INDEXER_INDEX_WRITE_SLOW_THRESHOLD_KEY, - INDEXER_INDEX_WRITE_SLOW_THRESHOLD_DEFAULT); - slowPreIncrementThreshold = c.getLong(INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_KEY, - INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_DEFAULT); + slowIndexPrepareThreshold = + c.getLong(INDEXER_INDEX_WRITE_SLOW_THRESHOLD_KEY, INDEXER_INDEX_WRITE_SLOW_THRESHOLD_DEFAULT); + slowPreIncrementThreshold = c.getLong(INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_KEY, + INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_DEFAULT); } private String getCallTooSlowMessage(String callName, long duration, long threshold) { - StringBuilder sb = new StringBuilder(64); - sb.append("(callTooSlow) ").append(callName).append(" duration=").append(duration); - sb.append("ms, threshold=").append(threshold).append("ms"); - return sb.toString(); + StringBuilder sb = new StringBuilder(64); + sb.append("(callTooSlow) ").append(callName).append(" duration=").append(duration); + sb.append("ms, threshold=").append(threshold).append("ms"); + return sb.toString(); } @Override @@ -455,8 +476,8 @@ public void stop(CoprocessorEnvironment e) throws IOException { return; } if (this.disabled) { - return; - } + return; + } this.stopped = true; String msg = "Indexer is being stopped"; this.builder.stop(msg); @@ -465,1557 +486,1586 @@ public void stop(CoprocessorEnvironment e) throws IOException { } /** - * We use an Increment to serialize the ON DUPLICATE KEY clause so that the HBase plumbing - * sets up the necessary locks and mvcc to allow an atomic update. The Increment is not a - * real increment, though, it's really more of a Put. We translate the Increment into a - * list of mutations, at most a single Put and Delete that are the changes upon executing - * the list of ON DUPLICATE KEY clauses for this row. + * We use an Increment to serialize the ON DUPLICATE KEY clause so that the HBase plumbing sets up + * the necessary locks and mvcc to allow an atomic update. The Increment is not a real increment, + * though, it's really more of a Put. We translate the Increment into a list of mutations, at most + * a single Put and Delete that are the changes upon executing the list of ON DUPLICATE KEY + * clauses for this row. */ @Override public Result preIncrementAfterRowLock(final ObserverContext e, - final Increment inc) throws IOException { - long start = EnvironmentEdgeManager.currentTimeMillis(); - try { - List mutations = this.builder.executeAtomicOp(inc); - if (mutations == null) { - return null; - } + final Increment inc) throws IOException { + long start = EnvironmentEdgeManager.currentTimeMillis(); + try { + List mutations = this.builder.executeAtomicOp(inc); + if (mutations == null) { + return null; + } - // Causes the Increment to be ignored as we're committing the mutations - // ourselves below. - e.bypass(); - // ON DUPLICATE KEY IGNORE will return empty list if row already exists - // as no action is required in that case. - if (!mutations.isEmpty()) { - Region region = e.getEnvironment().getRegion(); - // Otherwise, submit the mutations directly here - region.batchMutate(mutations.toArray(new Mutation[0])); - } - return Result.EMPTY_RESULT; - } catch (Throwable t) { - throw ClientUtil.createIOException( - "Unable to process ON DUPLICATE IGNORE for " + - e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + - "(" + Bytes.toStringBinary(inc.getRow()) + ")", t); - } finally { - long duration = EnvironmentEdgeManager.currentTimeMillis() - start; - if (duration >= slowIndexPrepareThreshold) { - if (LOG.isDebugEnabled()) { - LOG.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold)); - } - metricSource.incrementSlowDuplicateKeyCheckCalls(dataTableName); - } - metricSource.updateDuplicateKeyCheckTime(dataTableName, duration); + // Causes the Increment to be ignored as we're committing the mutations + // ourselves below. + e.bypass(); + // ON DUPLICATE KEY IGNORE will return empty list if row already exists + // as no action is required in that case. + if (!mutations.isEmpty()) { + Region region = e.getEnvironment().getRegion(); + // Otherwise, submit the mutations directly here + region.batchMutate(mutations.toArray(new Mutation[0])); + } + return Result.EMPTY_RESULT; + } catch (Throwable t) { + throw ClientUtil.createIOException("Unable to process ON DUPLICATE IGNORE for " + + e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + "(" + + Bytes.toStringBinary(inc.getRow()) + ")", t); + } finally { + long duration = EnvironmentEdgeManager.currentTimeMillis() - start; + if (duration >= slowIndexPrepareThreshold) { + if (LOG.isDebugEnabled()) { + LOG.debug( + getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold)); + } + metricSource.incrementSlowDuplicateKeyCheckCalls(dataTableName); } + metricSource.updateDuplicateKeyCheckTime(dataTableName, duration); + } } @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { - if (this.disabled) { - return; - } - try { - preBatchMutateWithExceptions(c, miniBatchOp); - return; - } catch (Throwable t) { - rethrowIndexingException(t); - } - throw new RuntimeException( - "Somehow didn't return an index update but also didn't propagate the failure to the client!"); + MiniBatchOperationInProgress miniBatchOp) throws IOException { + if (this.disabled) { + return; + } + try { + preBatchMutateWithExceptions(c, miniBatchOp); + return; + } catch (Throwable t) { + rethrowIndexingException(t); + } + throw new RuntimeException( + "Somehow didn't return an index update but also didn't propagate the failure to the client!"); } private void populateRowsToLock(MiniBatchOperationInProgress miniBatchOp, - BatchMutateContext context) { - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - if (this.builder.isAtomicOp(m) || context.returnResult || - this.builder.isEnabled(m)) { - ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); - context.rowsToLock.add(row); - } + BatchMutateContext context) { + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if (this.builder.isAtomicOp(m) || context.returnResult || this.builder.isEnabled(m)) { + ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); + context.rowsToLock.add(row); } + } } - /** - * Add the mutations generated by the ON DUPLICATE KEY UPDATE to the current batch. - * MiniBatchOperationInProgress#addOperationsFromCP() allows coprocessors to attach additional mutations - * to the incoming mutation. These additional mutations are only executed if the status of the original - * mutation is set to NOT_RUN. For atomic mutations, we want HBase to ignore the incoming mutation and - * instead execute the mutations generated by the server for that atomic mutation. But we can’t achieve - * this behavior just by setting the status of the original mutation to IGNORE because that will also - * ignore the additional mutations added by the coprocessors. To get around this, we need to do a fixup - * of the original mutation in the batch. Since we always generate one Put mutation from the incoming atomic - * Put mutation, we can transfer the cells from the generated Put mutation to the original atomic Put mutation in the batch. - * The additional mutations (Delete) can then be added to the operationsFromCoprocessors array. - */ + /** + * Add the mutations generated by the ON DUPLICATE KEY UPDATE to the current batch. + * MiniBatchOperationInProgress#addOperationsFromCP() allows coprocessors to attach additional + * mutations to the incoming mutation. These additional mutations are only executed if the status + * of the original mutation is set to NOT_RUN. For atomic mutations, we want HBase to ignore the + * incoming mutation and instead execute the mutations generated by the server for that atomic + * mutation. But we can’t achieve this behavior just by setting the status of the original + * mutation to IGNORE because that will also ignore the additional mutations added by the + * coprocessors. To get around this, we need to do a fixup of the original mutation in the batch. + * Since we always generate one Put mutation from the incoming atomic Put mutation, we can + * transfer the cells from the generated Put mutation to the original atomic Put mutation in the + * batch. The additional mutations (Delete) can then be added to the operationsFromCoprocessors + * array. + */ private void addOnDupMutationsToBatch(MiniBatchOperationInProgress miniBatchOp, - int index, List mutations) { - List deleteMutations = Lists.newArrayListWithExpectedSize(mutations.size()); - for (Mutation m : mutations) { - if (m instanceof Put) { - // fix the incoming atomic mutation - Mutation original = miniBatchOp.getOperation(index); - original.getFamilyCellMap().putAll(m.getFamilyCellMap()); - } else if (m instanceof Delete) { - deleteMutations.add((Delete)m); - } + int index, List mutations) { + List deleteMutations = Lists.newArrayListWithExpectedSize(mutations.size()); + for (Mutation m : mutations) { + if (m instanceof Put) { + // fix the incoming atomic mutation + Mutation original = miniBatchOp.getOperation(index); + original.getFamilyCellMap().putAll(m.getFamilyCellMap()); + } else if (m instanceof Delete) { + deleteMutations.add((Delete) m); } + } + + if (!deleteMutations.isEmpty()) { + miniBatchOp.addOperationsFromCP(index, + deleteMutations.toArray(new Mutation[deleteMutations.size()])); + } + } + + private void addOnDupMutationsToBatch(MiniBatchOperationInProgress miniBatchOp, + BatchMutateContext context) throws IOException { + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if ((this.builder.isAtomicOp(m) || this.builder.returnResult(m)) && m instanceof Put) { + List mutations = generateOnDupMutations(context, (Put) m, miniBatchOp); + if (!mutations.isEmpty()) { + addOnDupMutationsToBatch(miniBatchOp, i, mutations); + } else { + // empty list of generated mutations implies + // 1) ON DUPLICATE KEY IGNORE if row already exists, OR + // 2) ON DUPLICATE KEY UPDATE if CASE expression is specified and in each of + // them the new value is the same as the old value in the ELSE-clause (empty + // cell timestamp will NOT be updated) + byte[] retVal = PInteger.INSTANCE.toBytes(0); + List cells = new ArrayList<>(); + cells.add(PhoenixKeyValueUtil.newKeyValue(m.getRow(), Bytes.toBytes(UPSERT_CF), + Bytes.toBytes(UPSERT_STATUS_CQ), 0, retVal, 0, retVal.length)); - if (!deleteMutations.isEmpty()) { - miniBatchOp.addOperationsFromCP(index, - deleteMutations.toArray(new Mutation[deleteMutations.size()])); - } - } - - private void addOnDupMutationsToBatch(MiniBatchOperationInProgress miniBatchOp, - BatchMutateContext context) throws IOException { - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - if ((this.builder.isAtomicOp(m) || this.builder.returnResult(m)) && m instanceof Put) { - List mutations = generateOnDupMutations(context, (Put)m, miniBatchOp); - if (!mutations.isEmpty()) { - addOnDupMutationsToBatch(miniBatchOp, i, mutations); - } else { - // empty list of generated mutations implies - // 1) ON DUPLICATE KEY IGNORE if row already exists, OR - // 2) ON DUPLICATE KEY UPDATE if CASE expression is specified and in each of - // them the new value is the same as the old value in the ELSE-clause (empty - // cell timestamp will NOT be updated) - byte[] retVal = PInteger.INSTANCE.toBytes(0); - List cells = new ArrayList<>(); - cells.add(PhoenixKeyValueUtil.newKeyValue(m.getRow(), Bytes.toBytes(UPSERT_CF), - Bytes.toBytes(UPSERT_STATUS_CQ), 0, retVal, 0, retVal.length)); - - if (context.returnResult) { - context.currColumnCellExprMap.forEach( - (key, value) -> cells.add(value.getFirst())); - cells.sort(CellComparator.getInstance()); - } - - // put Result in OperationStatus for returning update status from conditional - // upserts, where 0 represents the row is not updated - Result result = Result.create(cells); - miniBatchOp.setOperationStatus(i, - new OperationStatus(SUCCESS, result)); - } - } else if (context.returnResult) { - Map> currColumnCellExprMap = new HashMap<>(); - byte[] rowKey = m.getRow(); - ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(rowKey); - Pair dataRowState = context.dataRowStates.get(rowKeyPtr); - Put currentDataRowState = dataRowState != null ? dataRowState.getFirst() : null; - if (currentDataRowState != null) { - updateCurrColumnCellExpr(currentDataRowState, currColumnCellExprMap); - context.currColumnCellExprMap = currColumnCellExprMap; - } + if (context.returnResult) { + context.currColumnCellExprMap.forEach((key, value) -> cells.add(value.getFirst())); + cells.sort(CellComparator.getInstance()); } + + // put Result in OperationStatus for returning update status from conditional + // upserts, where 0 represents the row is not updated + Result result = Result.create(cells); + miniBatchOp.setOperationStatus(i, new OperationStatus(SUCCESS, result)); + } + } else if (context.returnResult) { + Map> currColumnCellExprMap = new HashMap<>(); + byte[] rowKey = m.getRow(); + ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(rowKey); + Pair dataRowState = context.dataRowStates.get(rowKeyPtr); + Put currentDataRowState = dataRowState != null ? dataRowState.getFirst() : null; + if (currentDataRowState != null) { + updateCurrColumnCellExpr(currentDataRowState, currColumnCellExprMap); + context.currColumnCellExprMap = currColumnCellExprMap; + } } + } } private void lockRows(BatchMutateContext context) throws IOException { - for (ImmutableBytesPtr rowKey : context.rowsToLock) { - context.rowLocks.add(lockManager.lockRow(rowKey, rowLockWaitDuration)); - } + for (ImmutableBytesPtr rowKey : context.rowsToLock) { + context.rowLocks.add(lockManager.lockRow(rowKey, rowLockWaitDuration)); + } } - private void unlockRows(BatchMutateContext context) throws IOException { - for (RowLock rowLock : context.rowLocks) { - rowLock.release(); - } - context.rowLocks.clear(); - } - - private Collection groupMutations(MiniBatchOperationInProgress miniBatchOp, - BatchMutateContext context) throws IOException { - context.multiMutationMap = new HashMap<>(); - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - // skip this mutation if we aren't enabling indexing - // unfortunately, we really should ask if the raw mutation (rather than the combined mutation) - // should be indexed, which means we need to expose another method on the builder. Such is the - // way optimization go though. - if (!isAtomicOperationComplete(miniBatchOp.getOperationStatus(i)) && this.builder.isEnabled(m)) { - ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); - MultiMutation stored = context.multiMutationMap.get(row); - if (stored == null) { - // we haven't seen this row before, so add it - stored = new MultiMutation(row); - context.multiMutationMap.put(row, stored); - } - stored.addAll(m); - Mutation[] mutationsAddedByCP = miniBatchOp.getOperationsFromCoprocessors(i); - if (mutationsAddedByCP != null) { - for (Mutation addedMutation : mutationsAddedByCP) { - stored.addAll(addedMutation); - } - } - } - } - return context.multiMutationMap.values(); + private void unlockRows(BatchMutateContext context) throws IOException { + for (RowLock rowLock : context.rowLocks) { + rowLock.release(); } + context.rowLocks.clear(); + } - public static void setTimestamps(MiniBatchOperationInProgress miniBatchOp, - IndexBuildManager builder, long ts) throws IOException { - for (Integer i = 0; i < miniBatchOp.size(); i++) { - if (isAtomicOperationComplete(miniBatchOp.getOperationStatus(i))) { - continue; - } - Mutation m = miniBatchOp.getOperation(i); - // skip this mutation if we aren't enabling indexing or not an atomic op - // or if it is an atomic op and its timestamp is already set(not LATEST) - if (!builder.isEnabled(m) && - !((builder.isAtomicOp(m) || builder.returnResult(m)) && - IndexUtil.getMaxTimestamp(m) == HConstants.LATEST_TIMESTAMP)) { - continue; - } - setTimestampOnMutation(m, ts); - - // set the timestamps on any additional mutations added - Mutation[] mutationsAddedByCP = miniBatchOp.getOperationsFromCoprocessors(i); - if (mutationsAddedByCP != null) { - for (Mutation addedMutation : mutationsAddedByCP) { - setTimestampOnMutation(addedMutation, ts); - } - } + private Collection + groupMutations(MiniBatchOperationInProgress miniBatchOp, BatchMutateContext context) + throws IOException { + context.multiMutationMap = new HashMap<>(); + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + // skip this mutation if we aren't enabling indexing + // unfortunately, we really should ask if the raw mutation (rather than the combined mutation) + // should be indexed, which means we need to expose another method on the builder. Such is the + // way optimization go though. + if ( + !isAtomicOperationComplete(miniBatchOp.getOperationStatus(i)) && this.builder.isEnabled(m) + ) { + ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); + MultiMutation stored = context.multiMutationMap.get(row); + if (stored == null) { + // we haven't seen this row before, so add it + stored = new MultiMutation(row); + context.multiMutationMap.put(row, stored); + } + stored.addAll(m); + Mutation[] mutationsAddedByCP = miniBatchOp.getOperationsFromCoprocessors(i); + if (mutationsAddedByCP != null) { + for (Mutation addedMutation : mutationsAddedByCP) { + stored.addAll(addedMutation); + } } + } } + return context.multiMutationMap.values(); + } - private static void setTimestampOnMutation(Mutation m, long ts) throws IOException { - for (List cells : m.getFamilyCellMap().values()) { - for (Cell cell : cells) { - CellUtil.setTimestamp(cell, ts); - } + public static void setTimestamps(MiniBatchOperationInProgress miniBatchOp, + IndexBuildManager builder, long ts) throws IOException { + for (Integer i = 0; i < miniBatchOp.size(); i++) { + if (isAtomicOperationComplete(miniBatchOp.getOperationStatus(i))) { + continue; + } + Mutation m = miniBatchOp.getOperation(i); + // skip this mutation if we aren't enabling indexing or not an atomic op + // or if it is an atomic op and its timestamp is already set(not LATEST) + if ( + !builder.isEnabled(m) && !((builder.isAtomicOp(m) || builder.returnResult(m)) + && IndexUtil.getMaxTimestamp(m) == HConstants.LATEST_TIMESTAMP) + ) { + continue; + } + setTimestampOnMutation(m, ts); + + // set the timestamps on any additional mutations added + Mutation[] mutationsAddedByCP = miniBatchOp.getOperationsFromCoprocessors(i); + if (mutationsAddedByCP != null) { + for (Mutation addedMutation : mutationsAddedByCP) { + setTimestampOnMutation(addedMutation, ts); } + } } + } - /** - * This method applies pending delete mutations on the next row states - */ - private void applyPendingDeleteMutations(MiniBatchOperationInProgress miniBatchOp, - BatchMutateContext context) throws IOException { - for (int i = 0; i < miniBatchOp.size(); i++) { - if (miniBatchOp.getOperationStatus(i) == IGNORE) { - continue; - } - Mutation m = miniBatchOp.getOperation(i); - if (!this.builder.isEnabled(m)) { - continue; - } - if (!(m instanceof Delete)) { - continue; - } - - if (!applyOnePendingDeleteMutation(context, (Delete) m)) { - miniBatchOp.setOperationStatus(i, NOWRITE); - } - } + private static void setTimestampOnMutation(Mutation m, long ts) throws IOException { + for (List cells : m.getFamilyCellMap().values()) { + for (Cell cell : cells) { + CellUtil.setTimestamp(cell, ts); + } } + } - /** - * This method returns true if the pending delete mutation needs to be applied - * and false f the delete mutation can be ignored for example in the case of - * delete on non-existing row. - */ - private boolean applyOnePendingDeleteMutation(BatchMutateContext context, Delete delete) { - ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(delete.getRow()); - Pair dataRowState = context.dataRowStates.get(rowKeyPtr); - if (dataRowState == null) { - dataRowState = new Pair(null, null); - context.dataRowStates.put(rowKeyPtr, dataRowState); - } - Put nextDataRowState = dataRowState.getSecond(); - if (nextDataRowState == null) { - if (dataRowState.getFirst() == null) { - // This is a delete row mutation on a non-existing row. There is no need to apply this mutation - // on the data table - return false; - } - } + /** + * This method applies pending delete mutations on the next row states + */ + private void applyPendingDeleteMutations(MiniBatchOperationInProgress miniBatchOp, + BatchMutateContext context) throws IOException { + for (int i = 0; i < miniBatchOp.size(); i++) { + if (miniBatchOp.getOperationStatus(i) == IGNORE) { + continue; + } + Mutation m = miniBatchOp.getOperation(i); + if (!this.builder.isEnabled(m)) { + continue; + } + if (!(m instanceof Delete)) { + continue; + } - for (List cells : delete.getFamilyCellMap().values()) { - for (Cell cell : cells) { - switch (cell.getType()) { - case DeleteFamily: - case DeleteFamilyVersion: - nextDataRowState.getFamilyCellMap().remove(CellUtil.cloneFamily(cell)); - break; - case DeleteColumn: - case Delete: - removeColumn(nextDataRowState, cell); - } - } - } - if (nextDataRowState != null && nextDataRowState.getFamilyCellMap().size() == 0) { - dataRowState.setSecond(null); - } - return true; - } - - /** - * This method applies the pending put mutations on the the next row states. - * Before this method is called, the next row states is set to current row states. - */ - private void applyPendingPutMutations(MiniBatchOperationInProgress miniBatchOp, - BatchMutateContext context, long now) throws IOException { - for (Integer i = 0; i < miniBatchOp.size(); i++) { - if (isAtomicOperationComplete(miniBatchOp.getOperationStatus(i))) { - continue; - } - Mutation m = miniBatchOp.getOperation(i); - // skip this mutation if we aren't enabling indexing - if (!this.builder.isEnabled(m)) { - continue; - } + if (!applyOnePendingDeleteMutation(context, (Delete) m)) { + miniBatchOp.setOperationStatus(i, NOWRITE); + } + } + } - if (!(m instanceof Put)) { - continue; - } + /** + * This method returns true if the pending delete mutation needs to be applied and false f the + * delete mutation can be ignored for example in the case of delete on non-existing row. + */ + private boolean applyOnePendingDeleteMutation(BatchMutateContext context, Delete delete) { + ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(delete.getRow()); + Pair dataRowState = context.dataRowStates.get(rowKeyPtr); + if (dataRowState == null) { + dataRowState = new Pair(null, null); + context.dataRowStates.put(rowKeyPtr, dataRowState); + } + Put nextDataRowState = dataRowState.getSecond(); + if (nextDataRowState == null) { + if (dataRowState.getFirst() == null) { + // This is a delete row mutation on a non-existing row. There is no need to apply this + // mutation + // on the data table + return false; + } + } - ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(m.getRow()); - Pair dataRowState = context.dataRowStates.get(rowKeyPtr); - if (dataRowState == null) { - dataRowState = new Pair(null, null); - context.dataRowStates.put(rowKeyPtr, dataRowState); - } - Put nextDataRowState = dataRowState.getSecond(); - dataRowState.setSecond((nextDataRowState != null) ? applyNew((Put) m, nextDataRowState) : new Put((Put) m)); - - Mutation[] mutationsAddedByCP = miniBatchOp.getOperationsFromCoprocessors(i); - if (mutationsAddedByCP != null) { - // all added mutations are of type delete corresponding to set nulls - for (Mutation addedMutation : mutationsAddedByCP) { - applyOnePendingDeleteMutation(context, (Delete)addedMutation); - } - } + for (List cells : delete.getFamilyCellMap().values()) { + for (Cell cell : cells) { + switch (cell.getType()) { + case DeleteFamily: + case DeleteFamilyVersion: + nextDataRowState.getFamilyCellMap().remove(CellUtil.cloneFamily(cell)); + break; + case DeleteColumn: + case Delete: + removeColumn(nextDataRowState, cell); } + } + } + if (nextDataRowState != null && nextDataRowState.getFamilyCellMap().size() == 0) { + dataRowState.setSecond(null); } + return true; + } - /** - * * Prepares next data row state - */ - private void prepareDataRowStates(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp, - BatchMutateContext context, - long now) throws IOException { - if (context.rowsToLock.size() == 0) { - return; - } - applyPendingPutMutations(miniBatchOp, context, now); - applyPendingDeleteMutations(miniBatchOp, context); - } - - /** - * The index update generation for local indexes uses the existing index update generation code (i.e., - * the {@link IndexBuilder} implementation). - */ - private void handleLocalIndexUpdates(TableName table, - MiniBatchOperationInProgress miniBatchOp, - Collection pendingMutations, - PhoenixIndexMetaData indexMetaData) throws Throwable { - ListMultimap> indexUpdates = ArrayListMultimap.>create(); - this.builder.getIndexUpdates(indexUpdates, miniBatchOp, pendingMutations, indexMetaData); - byte[] tableName = table.getName(); - HTableInterfaceReference hTableInterfaceReference = - new HTableInterfaceReference(new ImmutableBytesPtr(tableName)); - List> localIndexUpdates = indexUpdates.removeAll(hTableInterfaceReference); - if (localIndexUpdates == null || localIndexUpdates.isEmpty()) { - return; - } - List localUpdates = new ArrayList(); - Iterator> indexUpdatesItr = localIndexUpdates.iterator(); - while (indexUpdatesItr.hasNext()) { - Pair next = indexUpdatesItr.next(); - localUpdates.add(next.getFirst()); - } - if (!localUpdates.isEmpty()) { - Mutation[] mutationsAddedByCP = miniBatchOp.getOperationsFromCoprocessors(0); - if (mutationsAddedByCP != null) { - localUpdates.addAll(Arrays.asList(mutationsAddedByCP)); - } - miniBatchOp.addOperationsFromCP(0, localUpdates.toArray(new Mutation[localUpdates.size()])); + /** + * This method applies the pending put mutations on the the next row states. Before this method is + * called, the next row states is set to current row states. + */ + private void applyPendingPutMutations(MiniBatchOperationInProgress miniBatchOp, + BatchMutateContext context, long now) throws IOException { + for (Integer i = 0; i < miniBatchOp.size(); i++) { + if (isAtomicOperationComplete(miniBatchOp.getOperationStatus(i))) { + continue; + } + Mutation m = miniBatchOp.getOperation(i); + // skip this mutation if we aren't enabling indexing + if (!this.builder.isEnabled(m)) { + continue; + } + + if (!(m instanceof Put)) { + continue; + } + + ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(m.getRow()); + Pair dataRowState = context.dataRowStates.get(rowKeyPtr); + if (dataRowState == null) { + dataRowState = new Pair(null, null); + context.dataRowStates.put(rowKeyPtr, dataRowState); + } + Put nextDataRowState = dataRowState.getSecond(); + dataRowState.setSecond( + (nextDataRowState != null) ? applyNew((Put) m, nextDataRowState) : new Put((Put) m)); + + Mutation[] mutationsAddedByCP = miniBatchOp.getOperationsFromCoprocessors(i); + if (mutationsAddedByCP != null) { + // all added mutations are of type delete corresponding to set nulls + for (Mutation addedMutation : mutationsAddedByCP) { + applyOnePendingDeleteMutation(context, (Delete) addedMutation); } + } } + } - /** - * Determines if any of the data table mutations in the given batch does not include all - * the indexed columns or the where clause columns for partial uncovered indexes. - */ - private boolean isPartialUncoveredIndexMutation(PhoenixIndexMetaData indexMetaData, - MiniBatchOperationInProgress miniBatchOp) { - int indexedColumnCount = 0; - for (IndexMaintainer indexMaintainer : indexMetaData.getIndexMaintainers()) { - indexedColumnCount += indexMaintainer.getIndexedColumns().size(); - if (indexMaintainer.getIndexWhereColumns() != null) { - indexedColumnCount += indexMaintainer.getIndexWhereColumns().size(); - } - } - Set columns = new HashSet(indexedColumnCount); - for (IndexMaintainer indexMaintainer : indexMetaData.getIndexMaintainers()) { - columns.addAll(indexMaintainer.getIndexedColumns()); - if (indexMaintainer.getIndexWhereColumns() != null) { - columns.addAll(indexMaintainer.getIndexWhereColumns()); - } - } - for (int i = 0; i < miniBatchOp.size(); i++) { - if (isAtomicOperationComplete(miniBatchOp.getOperationStatus(i))) { - continue; - } - Mutation m = miniBatchOp.getOperation(i); - if (!this.builder.isEnabled(m)) { - continue; - } - for (ColumnReference column : columns) { - if (m.get(column.getFamily(), column.getQualifier()).isEmpty()) { - // The returned list is empty, which means the indexed column is not - // included. This mutation would result in partial index update (and thus - // index column values should be retrieved from the existing data table row) - return true; - } - } - } - return false; + /** + * * Prepares next data row state + */ + private void prepareDataRowStates(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp, BatchMutateContext context, long now) + throws IOException { + if (context.rowsToLock.size() == 0) { + return; } - /** - * Retrieve the data row state either from memory or disk. The rows are locked by the caller. - */ - private void getCurrentRowStates(ObserverContext c, - BatchMutateContext context) throws IOException { - Set keys = new HashSet(context.rowsToLock.size()); - for (ImmutableBytesPtr rowKeyPtr : context.rowsToLock) { - PendingRow pendingRow = new PendingRow(rowKeyPtr, context); - // Add the data table rows in the mini batch to the per region collection of pending - // rows. This will be used to detect concurrent updates - PendingRow existingPendingRow = pendingRows.putIfAbsent(rowKeyPtr, pendingRow); - if (existingPendingRow == null) { - // There was no pending row for this row key. We need to retrieve this row from disk - keys.add(PVarbinary.INSTANCE.getKeyRange(rowKeyPtr.get(), SortOrder.ASC)); - } else { - // There is a pending row for this row key. We need to retrieve the row from memory - BatchMutateContext lastContext = existingPendingRow.getLastContext(); - if (existingPendingRow.add(context)) { - BatchMutatePhase phase = lastContext.getCurrentPhase(); - Preconditions.checkArgument(phase != BatchMutatePhase.POST, - "the phase of the last batch cannot be POST"); - if (phase == BatchMutatePhase.PRE) { - if (context.lastConcurrentBatchContext == null) { - context.lastConcurrentBatchContext = new HashMap<>(); - } - context.lastConcurrentBatchContext.put(rowKeyPtr, lastContext); - if (context.maxPendingRowCount < existingPendingRow.getCount()) { - context.maxPendingRowCount = existingPendingRow.getCount(); - } - Put put = lastContext.getNextDataRowState(rowKeyPtr); - if (put != null) { - context.dataRowStates.put(rowKeyPtr, new Pair<>(put, new Put(put))); - } - } else { - // The last batch for this row key failed. We cannot use the memory state. - // So we need to retrieve this row from disk - keys.add(PVarbinary.INSTANCE.getKeyRange(rowKeyPtr.get(), SortOrder.ASC)); - } - } else { - // The existing pending row is removed from the map. That means there is no - // pending row for this row key anymore. We need to add the new one to the map - pendingRows.put(rowKeyPtr, pendingRow); - keys.add(PVarbinary.INSTANCE.getKeyRange(rowKeyPtr.get(), SortOrder.ASC)); - } - } - } - if (keys.isEmpty()) { - return; - } + applyPendingPutMutations(miniBatchOp, context, now); + applyPendingDeleteMutations(miniBatchOp, context); + } - if (this.useBloomFilter) { - for (KeyRange key : keys) { - // Scan.java usage alters scan instances, safer to create scan instance per usage - Scan scan = new Scan(); - // create a scan with same start/stop row key scan#isGetScan() - // for bloom filters scan should be a get - scan.withStartRow(key.getLowerRange(), true); - scan.withStopRow(key.getLowerRange(), true); - readDataTableRows(c, context, scan); - } - } - else { - Scan scan = new Scan(); - ScanRanges scanRanges = ScanRanges.createPointLookup(new ArrayList(keys)); - scanRanges.initializeScan(scan); - SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); - scan.setFilter(skipScanFilter); - readDataTableRows(c, context, scan); - } + /** + * The index update generation for local indexes uses the existing index update generation code + * (i.e., the {@link IndexBuilder} implementation). + */ + private void handleLocalIndexUpdates(TableName table, + MiniBatchOperationInProgress miniBatchOp, + Collection pendingMutations, PhoenixIndexMetaData indexMetaData) + throws Throwable { + ListMultimap> indexUpdates = + ArrayListMultimap.> create(); + this.builder.getIndexUpdates(indexUpdates, miniBatchOp, pendingMutations, indexMetaData); + byte[] tableName = table.getName(); + HTableInterfaceReference hTableInterfaceReference = + new HTableInterfaceReference(new ImmutableBytesPtr(tableName)); + List> localIndexUpdates = + indexUpdates.removeAll(hTableInterfaceReference); + if (localIndexUpdates == null || localIndexUpdates.isEmpty()) { + return; + } + List localUpdates = new ArrayList(); + Iterator> indexUpdatesItr = localIndexUpdates.iterator(); + while (indexUpdatesItr.hasNext()) { + Pair next = indexUpdatesItr.next(); + localUpdates.add(next.getFirst()); } + if (!localUpdates.isEmpty()) { + Mutation[] mutationsAddedByCP = miniBatchOp.getOperationsFromCoprocessors(0); + if (mutationsAddedByCP != null) { + localUpdates.addAll(Arrays.asList(mutationsAddedByCP)); + } + miniBatchOp.addOperationsFromCP(0, localUpdates.toArray(new Mutation[localUpdates.size()])); + } + } - private void readDataTableRows(ObserverContext c, - BatchMutateContext context, Scan scan) throws IOException { - try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) { - boolean more = true; - while (more) { - List cells = new ArrayList(); - more = scanner.next(cells); - if (cells.isEmpty()) { - continue; - } - byte[] rowKey = CellUtil.cloneRow(cells.get(0)); - Put put = new Put(rowKey); - for (Cell cell : cells) { - put.add(cell); - } - context.dataRowStates.put(new ImmutableBytesPtr(rowKey), new Pair(put, new Put(put))); - } - } + /** + * Determines if any of the data table mutations in the given batch does not include all the + * indexed columns or the where clause columns for partial uncovered indexes. + */ + private boolean isPartialUncoveredIndexMutation(PhoenixIndexMetaData indexMetaData, + MiniBatchOperationInProgress miniBatchOp) { + int indexedColumnCount = 0; + for (IndexMaintainer indexMaintainer : indexMetaData.getIndexMaintainers()) { + indexedColumnCount += indexMaintainer.getIndexedColumns().size(); + if (indexMaintainer.getIndexWhereColumns() != null) { + indexedColumnCount += indexMaintainer.getIndexWhereColumns().size(); + } } - public static Mutation getDeleteIndexMutation(Put dataRowState, IndexMaintainer indexMaintainer, - long ts, ImmutableBytesPtr rowKeyPtr) { - ValueGetter dataRowVG = new IndexUtil.SimpleValueGetter(dataRowState); - byte[] indexRowKey = indexMaintainer.buildRowKey(dataRowVG, rowKeyPtr, null, null, ts); - return indexMaintainer.buildRowDeleteMutation(indexRowKey, - IndexMaintainer.DeleteType.ALL_VERSIONS, ts); + Set columns = new HashSet(indexedColumnCount); + for (IndexMaintainer indexMaintainer : indexMetaData.getIndexMaintainers()) { + columns.addAll(indexMaintainer.getIndexedColumns()); + if (indexMaintainer.getIndexWhereColumns() != null) { + columns.addAll(indexMaintainer.getIndexWhereColumns()); + } + } + for (int i = 0; i < miniBatchOp.size(); i++) { + if (isAtomicOperationComplete(miniBatchOp.getOperationStatus(i))) { + continue; + } + Mutation m = miniBatchOp.getOperation(i); + if (!this.builder.isEnabled(m)) { + continue; + } + for (ColumnReference column : columns) { + if (m.get(column.getFamily(), column.getQualifier()).isEmpty()) { + // The returned list is empty, which means the indexed column is not + // included. This mutation would result in partial index update (and thus + // index column values should be retrieved from the existing data table row) + return true; + } + } } + return false; + } - /** - * Generate the index update for a data row from the mutation that are obtained by merging the previous data row - * state with the pending row mutation. - */ - private void prepareIndexMutations(BatchMutateContext context, List maintainers, long ts) - throws IOException { - List> indexTables = new ArrayList<>(maintainers.size()); - for (IndexMaintainer indexMaintainer : maintainers) { - if (indexMaintainer.isLocalIndex()) { - continue; + /** + * Retrieve the data row state either from memory or disk. The rows are locked by the caller. + */ + private void getCurrentRowStates(ObserverContext c, + BatchMutateContext context) throws IOException { + Set keys = new HashSet(context.rowsToLock.size()); + for (ImmutableBytesPtr rowKeyPtr : context.rowsToLock) { + PendingRow pendingRow = new PendingRow(rowKeyPtr, context); + // Add the data table rows in the mini batch to the per region collection of pending + // rows. This will be used to detect concurrent updates + PendingRow existingPendingRow = pendingRows.putIfAbsent(rowKeyPtr, pendingRow); + if (existingPendingRow == null) { + // There was no pending row for this row key. We need to retrieve this row from disk + keys.add(PVarbinary.INSTANCE.getKeyRange(rowKeyPtr.get(), SortOrder.ASC)); + } else { + // There is a pending row for this row key. We need to retrieve the row from memory + BatchMutateContext lastContext = existingPendingRow.getLastContext(); + if (existingPendingRow.add(context)) { + BatchMutatePhase phase = lastContext.getCurrentPhase(); + Preconditions.checkArgument(phase != BatchMutatePhase.POST, + "the phase of the last batch cannot be POST"); + if (phase == BatchMutatePhase.PRE) { + if (context.lastConcurrentBatchContext == null) { + context.lastConcurrentBatchContext = new HashMap<>(); } - HTableInterfaceReference hTableInterfaceReference = - new HTableInterfaceReference(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); - indexTables.add(new Pair<>(indexMaintainer, hTableInterfaceReference)); - } - for (Map.Entry> entry : context.dataRowStates.entrySet()) { - ImmutableBytesPtr rowKeyPtr = entry.getKey(); - Pair dataRowState = entry.getValue(); - Put currentDataRowState = dataRowState.getFirst(); - Put nextDataRowState = dataRowState.getSecond(); - if (currentDataRowState == null && nextDataRowState == null) { - continue; + context.lastConcurrentBatchContext.put(rowKeyPtr, lastContext); + if (context.maxPendingRowCount < existingPendingRow.getCount()) { + context.maxPendingRowCount = existingPendingRow.getCount(); } - for (Pair pair : indexTables) { - IndexMaintainer indexMaintainer = pair.getFirst(); - HTableInterfaceReference hTableInterfaceReference = pair.getSecond(); - if (nextDataRowState != null - && indexMaintainer.shouldPrepareIndexMutations(nextDataRowState)) { - ValueGetter nextDataRowVG = new IndexUtil.SimpleValueGetter(nextDataRowState); - Put indexPut = indexMaintainer.buildUpdateMutation(GenericKeyValueBuilder.INSTANCE, - nextDataRowVG, rowKeyPtr, ts, null, null, false); - if (indexPut == null) { - // No covered column. Just prepare an index row with the empty column - byte[] indexRowKey = indexMaintainer.buildRowKey(nextDataRowVG, rowKeyPtr, - null, null, ts); - indexPut = new Put(indexRowKey); - } else { - IndexUtil.removeEmptyColumn(indexPut, indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - indexMaintainer.getEmptyKeyValueQualifier()); - } - indexPut.addColumn( - indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - indexMaintainer.getEmptyKeyValueQualifier(), ts, - QueryConstants.UNVERIFIED_BYTES); - context.indexUpdates.put(hTableInterfaceReference, - new Pair(indexPut, rowKeyPtr.get())); - // Delete the current index row if the new index key is different than the current one - if (currentDataRowState != null) { - ValueGetter currentDataRowVG = new IndexUtil.SimpleValueGetter(currentDataRowState); - byte[] indexRowKeyForCurrentDataRow = indexMaintainer.buildRowKey(currentDataRowVG, rowKeyPtr, - null, null, ts); - if (Bytes.compareTo(indexPut.getRow(), indexRowKeyForCurrentDataRow) != 0) { - Mutation del = indexMaintainer.buildRowDeleteMutation(indexRowKeyForCurrentDataRow, - IndexMaintainer.DeleteType.ALL_VERSIONS, ts); - context.indexUpdates.put(hTableInterfaceReference, - new Pair(del, rowKeyPtr.get())); - } - } - } else if (currentDataRowState != null - && indexMaintainer.shouldPrepareIndexMutations(currentDataRowState)) { - context.indexUpdates.put(hTableInterfaceReference, - new Pair(getDeleteIndexMutation(currentDataRowState, - indexMaintainer, ts, rowKeyPtr), rowKeyPtr.get())); - if (indexMaintainer.isCDCIndex()) { - // CDC Index needs two delete markers one for deleting the index row, and - // the other for referencing the data table delete mutation with the - // right index row key, that is, the index row key starting with ts - Put cdcDataRowState = new Put(currentDataRowState.getRow()); - cdcDataRowState.addColumn(indexMaintainer.getDataEmptyKeyValueCF(), - indexMaintainer.getEmptyKeyValueQualifierForDataTable(), ts, - ByteUtil.EMPTY_BYTE_ARRAY); - context.indexUpdates.put(hTableInterfaceReference, - new Pair(getDeleteIndexMutation(cdcDataRowState, - indexMaintainer, ts, rowKeyPtr), rowKeyPtr.get())); - } - } + Put put = lastContext.getNextDataRowState(rowKeyPtr); + if (put != null) { + context.dataRowStates.put(rowKeyPtr, new Pair<>(put, new Put(put))); } + } else { + // The last batch for this row key failed. We cannot use the memory state. + // So we need to retrieve this row from disk + keys.add(PVarbinary.INSTANCE.getKeyRange(rowKeyPtr.get(), SortOrder.ASC)); + } + } else { + // The existing pending row is removed from the map. That means there is no + // pending row for this row key anymore. We need to add the new one to the map + pendingRows.put(rowKeyPtr, pendingRow); + keys.add(PVarbinary.INSTANCE.getKeyRange(rowKeyPtr.get(), SortOrder.ASC)); } + } } - - /** - * This method prepares unverified index mutations which are applied to index tables before the data table is - * updated. In the three-phase update approach, in phase 1, the status of existing index rows is set to "unverified" - * (these rows will be deleted from the index table in phase 3), and/or new put mutations are added with the - * unverified status. In phase 2, data table mutations are applied. In phase 3, the status for an index table row is - * either set to "verified" or the row is deleted. - */ - private void preparePreIndexMutations(BatchMutateContext context, - long batchTimestamp, - PhoenixIndexMetaData indexMetaData) throws Throwable { - List maintainers = indexMetaData.getIndexMaintainers(); - // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = Trace.startSpan("Starting to build index updates")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; - } - current.addTimelineAnnotation("Built index updates, doing preStep"); - // The rest of this method is for handling global index updates - context.indexUpdates = ArrayListMultimap.>create(); - prepareIndexMutations(context, maintainers, batchTimestamp); - - context.preIndexUpdates = ArrayListMultimap.create(); - int updateCount = 0; - for (IndexMaintainer indexMaintainer : maintainers) { - updateCount++; - byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(); - byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier(); - HTableInterfaceReference hTableInterfaceReference = - new HTableInterfaceReference(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); - List > updates = context.indexUpdates.get(hTableInterfaceReference); - for (Pair update : updates) { - Mutation m = update.getFirst(); - if (m instanceof Put) { - // This will be done before the data table row is updated (i.e., in the first write phase) - context.preIndexUpdates.put(hTableInterfaceReference, m); - } else { - // Set the status of the index row to "unverified" - Put unverifiedPut = new Put(m.getRow()); - unverifiedPut.addColumn( - emptyCF, emptyCQ, batchTimestamp, QueryConstants.UNVERIFIED_BYTES); - // This will be done before the data table row is updated (i.e., in the first write phase) - context.preIndexUpdates.put(hTableInterfaceReference, unverifiedPut); - } - } - } - TracingUtils.addAnnotation(current, "index update count", updateCount); - } + if (keys.isEmpty()) { + return; } - protected PhoenixIndexMetaData getPhoenixIndexMetaData(ObserverContext observerContext, - MiniBatchOperationInProgress miniBatchOp) - throws IOException { - IndexMetaData indexMetaData = this.builder.getIndexMetaData(miniBatchOp); - if (!(indexMetaData instanceof PhoenixIndexMetaData)) { - throw new DoNotRetryIOException( - "preBatchMutateWithExceptions: indexMetaData is not an instance of "+PhoenixIndexMetaData.class.getName() + - ", current table is:" + - observerContext.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString()); - } - return (PhoenixIndexMetaData)indexMetaData; - } - - private void preparePostIndexMutations(BatchMutateContext context, - long batchTimestamp, - PhoenixIndexMetaData indexMetaData) { - context.postIndexUpdates = ArrayListMultimap.create(); - List maintainers = indexMetaData.getIndexMaintainers(); - for (IndexMaintainer indexMaintainer : maintainers) { - byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(); - byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier(); - HTableInterfaceReference hTableInterfaceReference = - new HTableInterfaceReference(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); - List> updates = context.indexUpdates.get(hTableInterfaceReference); - for (Pair update : updates) { - Mutation m = update.getFirst(); - if (m instanceof Put) { - if (!indexMaintainer.isUncovered()) { - Put verifiedPut = new Put(m.getRow()); - // Set the status of the index row to "verified" - verifiedPut.addColumn(emptyCF, emptyCQ, batchTimestamp, - QueryConstants.VERIFIED_BYTES); - context.postIndexUpdates.put(hTableInterfaceReference, verifiedPut); - } - } else { - context.postIndexUpdates.put(hTableInterfaceReference, m); - } - } - } - // all cleanup will be done in postBatchMutateIndispensably() + if (this.useBloomFilter) { + for (KeyRange key : keys) { + // Scan.java usage alters scan instances, safer to create scan instance per usage + Scan scan = new Scan(); + // create a scan with same start/stop row key scan#isGetScan() + // for bloom filters scan should be a get + scan.withStartRow(key.getLowerRange(), true); + scan.withStopRow(key.getLowerRange(), true); + readDataTableRows(c, context, scan); + } + } else { + Scan scan = new Scan(); + ScanRanges scanRanges = ScanRanges.createPointLookup(new ArrayList(keys)); + scanRanges.initializeScan(scan); + SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); + scan.setFilter(skipScanFilter); + readDataTableRows(c, context, scan); } + } - private static void identifyIndexMaintainerTypes(PhoenixIndexMetaData indexMetaData, BatchMutateContext context) { - for (IndexMaintainer indexMaintainer : indexMetaData.getIndexMaintainers()) { - if (indexMaintainer instanceof TransformMaintainer) { - context.hasTransform = true; - } else if (indexMaintainer.isLocalIndex()) { - context.hasLocalIndex = true; - } else if (indexMaintainer.isUncovered()) { - context.hasUncoveredIndex = true; - } else { - context.hasGlobalIndex = true; - } - } + private void readDataTableRows(ObserverContext c, + BatchMutateContext context, Scan scan) throws IOException { + try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) { + boolean more = true; + while (more) { + List cells = new ArrayList(); + more = scanner.next(cells); + if (cells.isEmpty()) { + continue; + } + byte[] rowKey = CellUtil.cloneRow(cells.get(0)); + Put put = new Put(rowKey); + for (Cell cell : cells) { + put.add(cell); + } + context.dataRowStates.put(new ImmutableBytesPtr(rowKey), + new Pair(put, new Put(put))); + } } + } - private void identifyMutationTypes(MiniBatchOperationInProgress miniBatchOp, - BatchMutateContext context) { - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - if (this.builder.returnResult(m) && miniBatchOp.size() == 1) { - context.returnResult = true; - } - if (this.builder.isAtomicOp(m) || this.builder.returnResult(m)) { - context.hasAtomic = true; - if (context.hasDelete) { - return; - } - } else if (m instanceof Delete) { - context.hasDelete = true; - } - if (context.hasAtomic || context.returnResult) { - return; + public static Mutation getDeleteIndexMutation(Put dataRowState, IndexMaintainer indexMaintainer, + long ts, ImmutableBytesPtr rowKeyPtr) { + ValueGetter dataRowVG = new IndexUtil.SimpleValueGetter(dataRowState); + byte[] indexRowKey = indexMaintainer.buildRowKey(dataRowVG, rowKeyPtr, null, null, ts); + return indexMaintainer.buildRowDeleteMutation(indexRowKey, + IndexMaintainer.DeleteType.ALL_VERSIONS, ts); + } + + /** + * Generate the index update for a data row from the mutation that are obtained by merging the + * previous data row state with the pending row mutation. + */ + private void prepareIndexMutations(BatchMutateContext context, List maintainers, + long ts) throws IOException { + List> indexTables = + new ArrayList<>(maintainers.size()); + for (IndexMaintainer indexMaintainer : maintainers) { + if (indexMaintainer.isLocalIndex()) { + continue; + } + HTableInterfaceReference hTableInterfaceReference = + new HTableInterfaceReference(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); + indexTables.add(new Pair<>(indexMaintainer, hTableInterfaceReference)); + } + for (Map.Entry> entry : context.dataRowStates.entrySet()) { + ImmutableBytesPtr rowKeyPtr = entry.getKey(); + Pair dataRowState = entry.getValue(); + Put currentDataRowState = dataRowState.getFirst(); + Put nextDataRowState = dataRowState.getSecond(); + if (currentDataRowState == null && nextDataRowState == null) { + continue; + } + for (Pair pair : indexTables) { + IndexMaintainer indexMaintainer = pair.getFirst(); + HTableInterfaceReference hTableInterfaceReference = pair.getSecond(); + if ( + nextDataRowState != null && indexMaintainer.shouldPrepareIndexMutations(nextDataRowState) + ) { + ValueGetter nextDataRowVG = new IndexUtil.SimpleValueGetter(nextDataRowState); + Put indexPut = indexMaintainer.buildUpdateMutation(GenericKeyValueBuilder.INSTANCE, + nextDataRowVG, rowKeyPtr, ts, null, null, false); + if (indexPut == null) { + // No covered column. Just prepare an index row with the empty column + byte[] indexRowKey = + indexMaintainer.buildRowKey(nextDataRowVG, rowKeyPtr, null, null, ts); + indexPut = new Put(indexRowKey); + } else { + IndexUtil.removeEmptyColumn(indexPut, + indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), + indexMaintainer.getEmptyKeyValueQualifier()); + } + indexPut.addColumn(indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), + indexMaintainer.getEmptyKeyValueQualifier(), ts, QueryConstants.UNVERIFIED_BYTES); + context.indexUpdates.put(hTableInterfaceReference, + new Pair(indexPut, rowKeyPtr.get())); + // Delete the current index row if the new index key is different than the current one + if (currentDataRowState != null) { + ValueGetter currentDataRowVG = new IndexUtil.SimpleValueGetter(currentDataRowState); + byte[] indexRowKeyForCurrentDataRow = + indexMaintainer.buildRowKey(currentDataRowVG, rowKeyPtr, null, null, ts); + if (Bytes.compareTo(indexPut.getRow(), indexRowKeyForCurrentDataRow) != 0) { + Mutation del = indexMaintainer.buildRowDeleteMutation(indexRowKeyForCurrentDataRow, + IndexMaintainer.DeleteType.ALL_VERSIONS, ts); + context.indexUpdates.put(hTableInterfaceReference, + new Pair(del, rowKeyPtr.get())); } + } + } else if ( + currentDataRowState != null + && indexMaintainer.shouldPrepareIndexMutations(currentDataRowState) + ) { + context.indexUpdates.put(hTableInterfaceReference, + new Pair( + getDeleteIndexMutation(currentDataRowState, indexMaintainer, ts, rowKeyPtr), + rowKeyPtr.get())); + if (indexMaintainer.isCDCIndex()) { + // CDC Index needs two delete markers one for deleting the index row, and + // the other for referencing the data table delete mutation with the + // right index row key, that is, the index row key starting with ts + Put cdcDataRowState = new Put(currentDataRowState.getRow()); + cdcDataRowState.addColumn(indexMaintainer.getDataEmptyKeyValueCF(), + indexMaintainer.getEmptyKeyValueQualifierForDataTable(), ts, + ByteUtil.EMPTY_BYTE_ARRAY); + context.indexUpdates.put(hTableInterfaceReference, + new Pair( + getDeleteIndexMutation(cdcDataRowState, indexMaintainer, ts, rowKeyPtr), + rowKeyPtr.get())); + } } + } } + } - /** - * Wait for the previous batches to complete. If any of the previous batch fails then this - * batch will fail too and needs to be retried. The rows are locked by the caller. - * @param table - * @param context - * @throws Throwable - */ - private void waitForPreviousConcurrentBatch(TableName table, BatchMutateContext context) - throws Throwable { - for (BatchMutateContext lastContext : context.lastConcurrentBatchContext.values()) { - BatchMutatePhase phase = lastContext.getCurrentPhase(); - if (phase == BatchMutatePhase.FAILED) { - context.currentPhase = BatchMutatePhase.FAILED; - break; - } else if (phase == BatchMutatePhase.PRE) { - CountDownLatch countDownLatch = lastContext.getCountDownLatch(); - if (countDownLatch == null) { - // phase changed from PRE to either FAILED or POST - if (phase == BatchMutatePhase.FAILED) { - context.currentPhase = BatchMutatePhase.FAILED; - break; - } - continue; - } - // Release the locks so that the previous concurrent mutation can go into the post phase - unlockRows(context); - // Wait for at most one concurrentMutationWaitDuration for each level in the dependency tree of batches. - // lastContext.getMaxPendingRowCount() is the depth of the subtree rooted at the batch pointed by lastContext - if (!countDownLatch.await((lastContext.getMaxPendingRowCount() + 1) * concurrentMutationWaitDuration, - TimeUnit.MILLISECONDS)) { - context.currentPhase = BatchMutatePhase.FAILED; - LOG.debug(String.format("latch timeout context %s last %s", context, lastContext)); - break; - } - if (lastContext.getCurrentPhase() == BatchMutatePhase.FAILED) { - context.currentPhase = BatchMutatePhase.FAILED; - break; - } - // Acquire the locks again before letting the region proceed with data table updates - lockRows(context); - LOG.debug(String.format("context %s last %s exit phase %s", context, lastContext, - lastContext.getCurrentPhase())); - } - } - if (context.currentPhase == BatchMutatePhase.FAILED) { - // This batch needs to be retried since one of the previous concurrent batches has not completed yet. - // Throwing an IOException will result in retries of this batch. Removal of reference counts and - // locks for the rows of this batch will be done in postBatchMutateIndispensably() - throw new IOException("One of the previous concurrent mutations has not completed. " + - "The batch needs to be retried " + table.getNameAsString()); + /** + * This method prepares unverified index mutations which are applied to index tables before the + * data table is updated. In the three-phase update approach, in phase 1, the status of existing + * index rows is set to "unverified" (these rows will be deleted from the index table in phase 3), + * and/or new put mutations are added with the unverified status. In phase 2, data table mutations + * are applied. In phase 3, the status for an index table row is either set to "verified" or the + * row is deleted. + */ + private void preparePreIndexMutations(BatchMutateContext context, long batchTimestamp, + PhoenixIndexMetaData indexMetaData) throws Throwable { + List maintainers = indexMetaData.getIndexMaintainers(); + // get the current span, or just use a null-span to avoid a bunch of if statements + try (TraceScope scope = Trace.startSpan("Starting to build index updates")) { + Span current = scope.getSpan(); + if (current == null) { + current = NullSpan.INSTANCE; + } + current.addTimelineAnnotation("Built index updates, doing preStep"); + // The rest of this method is for handling global index updates + context.indexUpdates = + ArrayListMultimap.> create(); + prepareIndexMutations(context, maintainers, batchTimestamp); + + context.preIndexUpdates = ArrayListMultimap. create(); + int updateCount = 0; + for (IndexMaintainer indexMaintainer : maintainers) { + updateCount++; + byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(); + byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier(); + HTableInterfaceReference hTableInterfaceReference = + new HTableInterfaceReference(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); + List> updates = context.indexUpdates.get(hTableInterfaceReference); + for (Pair update : updates) { + Mutation m = update.getFirst(); + if (m instanceof Put) { + // This will be done before the data table row is updated (i.e., in the first write + // phase) + context.preIndexUpdates.put(hTableInterfaceReference, m); + } else { + // Set the status of the index row to "unverified" + Put unverifiedPut = new Put(m.getRow()); + unverifiedPut.addColumn(emptyCF, emptyCQ, batchTimestamp, + QueryConstants.UNVERIFIED_BYTES); + // This will be done before the data table row is updated (i.e., in the first write + // phase) + context.preIndexUpdates.put(hTableInterfaceReference, unverifiedPut); + } } + } + TracingUtils.addAnnotation(current, "index update count", updateCount); } + } - private boolean shouldSleep(BatchMutateContext context) { - for (ImmutableBytesPtr ptr : context.rowsToLock) { - for (Set set : batchesWithLastTimestamp) { - if (set.contains(ptr)) { - return true; - } - } - } - return false; + protected PhoenixIndexMetaData getPhoenixIndexMetaData( + ObserverContext observerContext, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + IndexMetaData indexMetaData = this.builder.getIndexMetaData(miniBatchOp); + if (!(indexMetaData instanceof PhoenixIndexMetaData)) { + throw new DoNotRetryIOException( + "preBatchMutateWithExceptions: indexMetaData is not an instance of " + + PhoenixIndexMetaData.class.getName() + ", current table is:" + observerContext + .getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString()); } - private long getBatchTimestamp(BatchMutateContext context, TableName table) - throws InterruptedException { - synchronized (this) { - long ts = EnvironmentEdgeManager.currentTimeMillis(); - if (ts != lastTimestamp) { - // The timestamp for this batch will be different from the last batch processed. - lastTimestamp = ts; - batchesWithLastTimestamp.clear(); - batchesWithLastTimestamp.add(context.rowsToLock); - return ts; - } else { - if (!shouldSleep(context)) { - // There is no need to sleep as the last batches with the same timestamp - // do not have a common row this batch - batchesWithLastTimestamp.add(context.rowsToLock); - return ts; - } - } - } - // Sleep for one millisecond. The sleep is necessary to get different timestamps - // for concurrent batches that share common rows. - Thread.sleep(1); - LOG.debug("slept 1ms for " + table.getNameAsString()); - synchronized (this) { - long ts = EnvironmentEdgeManager.currentTimeMillis(); - if (ts != lastTimestamp) { - // The timestamp for this batch will be different from the last batch processed. - lastTimestamp = ts; - batchesWithLastTimestamp.clear(); - } - // We do not have to check again if we need to sleep again since we got the next - // timestamp while holding the row locks. This mean there cannot be a new - // mutation with the same row attempting get the same timestamp - batchesWithLastTimestamp.add(context.rowsToLock); - return ts; + return (PhoenixIndexMetaData) indexMetaData; + } + + private void preparePostIndexMutations(BatchMutateContext context, long batchTimestamp, + PhoenixIndexMetaData indexMetaData) { + context.postIndexUpdates = ArrayListMultimap. create(); + List maintainers = indexMetaData.getIndexMaintainers(); + for (IndexMaintainer indexMaintainer : maintainers) { + byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(); + byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier(); + HTableInterfaceReference hTableInterfaceReference = + new HTableInterfaceReference(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); + List> updates = context.indexUpdates.get(hTableInterfaceReference); + for (Pair update : updates) { + Mutation m = update.getFirst(); + if (m instanceof Put) { + if (!indexMaintainer.isUncovered()) { + Put verifiedPut = new Put(m.getRow()); + // Set the status of the index row to "verified" + verifiedPut.addColumn(emptyCF, emptyCQ, batchTimestamp, QueryConstants.VERIFIED_BYTES); + context.postIndexUpdates.put(hTableInterfaceReference, verifiedPut); + } + } else { + context.postIndexUpdates.put(hTableInterfaceReference, m); } + } + } + // all cleanup will be done in postBatchMutateIndispensably() + } + + private static void identifyIndexMaintainerTypes(PhoenixIndexMetaData indexMetaData, + BatchMutateContext context) { + for (IndexMaintainer indexMaintainer : indexMetaData.getIndexMaintainers()) { + if (indexMaintainer instanceof TransformMaintainer) { + context.hasTransform = true; + } else if (indexMaintainer.isLocalIndex()) { + context.hasLocalIndex = true; + } else if (indexMaintainer.isUncovered()) { + context.hasUncoveredIndex = true; + } else { + context.hasGlobalIndex = true; + } } - public void preBatchMutateWithExceptions(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws Throwable { - PhoenixIndexMetaData indexMetaData = getPhoenixIndexMetaData(c, miniBatchOp); - BatchMutateContext context = new BatchMutateContext(indexMetaData.getClientVersion()); - setBatchMutateContext(c, context); - identifyIndexMaintainerTypes(indexMetaData, context); - identifyMutationTypes(miniBatchOp, context); - context.populateOriginalMutations(miniBatchOp); + } + private void identifyMutationTypes(MiniBatchOperationInProgress miniBatchOp, + BatchMutateContext context) { + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if (this.builder.returnResult(m) && miniBatchOp.size() == 1) { + context.returnResult = true; + } + if (this.builder.isAtomicOp(m) || this.builder.returnResult(m)) { + context.hasAtomic = true; if (context.hasDelete) { - // Need to add cell tags to Delete Marker before we do any index processing - // since we add tags to tables which doesn't have indexes also. - ServerIndexUtil.setDeleteAttributes(miniBatchOp); + return; } + } else if (m instanceof Delete) { + context.hasDelete = true; + } + if (context.hasAtomic || context.returnResult) { + return; + } + } + } - // Exclusively lock all rows to do consistent writes over multiple tables - // (i.e., the data and its index tables) - populateRowsToLock(miniBatchOp, context); - // early exit if it turns out we don't have any update for indexes - if (context.rowsToLock.isEmpty()) { - return; - } + /** + * Wait for the previous batches to complete. If any of the previous batch fails then this batch + * will fail too and needs to be retried. The rows are locked by the caller. + */ + private void waitForPreviousConcurrentBatch(TableName table, BatchMutateContext context) + throws Throwable { + for (BatchMutateContext lastContext : context.lastConcurrentBatchContext.values()) { + BatchMutatePhase phase = lastContext.getCurrentPhase(); + if (phase == BatchMutatePhase.FAILED) { + context.currentPhase = BatchMutatePhase.FAILED; + break; + } else if (phase == BatchMutatePhase.PRE) { + CountDownLatch countDownLatch = lastContext.getCountDownLatch(); + if (countDownLatch == null) { + // phase changed from PRE to either FAILED or POST + if (phase == BatchMutatePhase.FAILED) { + context.currentPhase = BatchMutatePhase.FAILED; + break; + } + continue; + } + // Release the locks so that the previous concurrent mutation can go into the post phase + unlockRows(context); + // Wait for at most one concurrentMutationWaitDuration for each level in the dependency tree + // of batches. + // lastContext.getMaxPendingRowCount() is the depth of the subtree rooted at the batch + // pointed by lastContext + if ( + !countDownLatch.await( + (lastContext.getMaxPendingRowCount() + 1) * concurrentMutationWaitDuration, + TimeUnit.MILLISECONDS) + ) { + context.currentPhase = BatchMutatePhase.FAILED; + LOG.debug(String.format("latch timeout context %s last %s", context, lastContext)); + break; + } + if (lastContext.getCurrentPhase() == BatchMutatePhase.FAILED) { + context.currentPhase = BatchMutatePhase.FAILED; + break; + } + // Acquire the locks again before letting the region proceed with data table updates lockRows(context); - long onDupCheckTime = 0; - - if (context.hasAtomic || context.returnResult || context.hasGlobalIndex || - context.hasUncoveredIndex || context.hasTransform) { - // Retrieve the current row states from the data table while holding the lock. - // This is needed for both atomic mutations and global indexes - long start = EnvironmentEdgeManager.currentTimeMillis(); - context.dataRowStates = new HashMap>(context.rowsToLock.size()); - if (context.hasGlobalIndex || context.hasTransform || context.hasAtomic || - context.returnResult || context.hasDelete || (context.hasUncoveredIndex && - isPartialUncoveredIndexMutation(indexMetaData, miniBatchOp))) { - getCurrentRowStates(c, context); - } - onDupCheckTime += (EnvironmentEdgeManager.currentTimeMillis() - start); - } - - if (context.hasAtomic || context.returnResult) { - long start = EnvironmentEdgeManager.currentTimeMillis(); - // add the mutations for conditional updates to the mini batch - addOnDupMutationsToBatch(miniBatchOp, context); - - // release locks for ON DUPLICATE KEY IGNORE since we won't be changing those rows - // this is needed so that we can exit early - releaseLocksForOnDupIgnoreMutations(miniBatchOp, context); - onDupCheckTime += (EnvironmentEdgeManager.currentTimeMillis() - start); - metricSource.updateDuplicateKeyCheckTime(dataTableName, onDupCheckTime); + LOG.debug(String.format("context %s last %s exit phase %s", context, lastContext, + lastContext.getCurrentPhase())); + } + } + if (context.currentPhase == BatchMutatePhase.FAILED) { + // This batch needs to be retried since one of the previous concurrent batches has not + // completed yet. + // Throwing an IOException will result in retries of this batch. Removal of reference counts + // and + // locks for the rows of this batch will be done in postBatchMutateIndispensably() + throw new IOException("One of the previous concurrent mutations has not completed. " + + "The batch needs to be retried " + table.getNameAsString()); + } + } - // early exit if we are not changing any rows - if (context.rowsToLock.isEmpty()) { - return; - } + private boolean shouldSleep(BatchMutateContext context) { + for (ImmutableBytesPtr ptr : context.rowsToLock) { + for (Set set : batchesWithLastTimestamp) { + if (set.contains(ptr)) { + return true; } + } + } + return false; + } - TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); - long batchTimestamp = getBatchTimestamp(context, table); - // Update the timestamps of the data table mutations to prevent overlapping timestamps - // (which prevents index inconsistencies as this case is not handled). - setTimestamps(miniBatchOp, builder, batchTimestamp); - if (context.hasGlobalIndex || context.hasUncoveredIndex || context.hasTransform) { - // Prepare next data rows states for pending mutations (for global indexes) - prepareDataRowStates(c, miniBatchOp, context, batchTimestamp); - // early exit if it turns out we don't have any edits - long start = EnvironmentEdgeManager.currentTimeMillis(); - preparePreIndexMutations(context, batchTimestamp, indexMetaData); - metricSource.updateIndexPrepareTime(dataTableName, - EnvironmentEdgeManager.currentTimeMillis() - start); - // Release the locks before making RPC calls for index updates - unlockRows(context); - // Do the first phase index updates - doPre(context); - // Acquire the locks again before letting the region proceed with data table updates - lockRows(context); - if (context.lastConcurrentBatchContext != null) { - waitForPreviousConcurrentBatch(table, context); - } - preparePostIndexMutations(context, batchTimestamp, indexMetaData); - } - if (context.hasLocalIndex) { - // Group all the updates for a single row into a single update to be processed (for local indexes) - Collection mutations = groupMutations(miniBatchOp, context); - handleLocalIndexUpdates(table, miniBatchOp, mutations, indexMetaData); - } - if (failDataTableUpdatesForTesting) { - throw new DoNotRetryIOException("Simulating the data table write failure"); + private long getBatchTimestamp(BatchMutateContext context, TableName table) + throws InterruptedException { + synchronized (this) { + long ts = EnvironmentEdgeManager.currentTimeMillis(); + if (ts != lastTimestamp) { + // The timestamp for this batch will be different from the last batch processed. + lastTimestamp = ts; + batchesWithLastTimestamp.clear(); + batchesWithLastTimestamp.add(context.rowsToLock); + return ts; + } else { + if (!shouldSleep(context)) { + // There is no need to sleep as the last batches with the same timestamp + // do not have a common row this batch + batchesWithLastTimestamp.add(context.rowsToLock); + return ts; } + } } + // Sleep for one millisecond. The sleep is necessary to get different timestamps + // for concurrent batches that share common rows. + Thread.sleep(1); + LOG.debug("slept 1ms for " + table.getNameAsString()); + synchronized (this) { + long ts = EnvironmentEdgeManager.currentTimeMillis(); + if (ts != lastTimestamp) { + // The timestamp for this batch will be different from the last batch processed. + lastTimestamp = ts; + batchesWithLastTimestamp.clear(); + } + // We do not have to check again if we need to sleep again since we got the next + // timestamp while holding the row locks. This mean there cannot be a new + // mutation with the same row attempting get the same timestamp + batchesWithLastTimestamp.add(context.rowsToLock); + return ts; + } + } - /** - * In case of ON DUPLICATE KEY IGNORE, if the row already exists no mutations will be - * generated so release the row lock. - */ - private void releaseLocksForOnDupIgnoreMutations(MiniBatchOperationInProgress miniBatchOp, - BatchMutateContext context) { - for (int i = 0; i < miniBatchOp.size(); i++) { - if (!isAtomicOperationComplete(miniBatchOp.getOperationStatus(i))) { - continue; - } - Mutation m = miniBatchOp.getOperation(i); - if (!this.builder.isAtomicOp(m) && !this.builder.returnResult(m)) { - continue; - } - ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); - Iterator rowLockIterator = context.rowLocks.iterator(); - while(rowLockIterator.hasNext()){ - RowLock rowLock = rowLockIterator.next(); - ImmutableBytesPtr rowKey = rowLock.getRowKey(); - if (row.equals(rowKey)) { - PendingRow pendingRow = pendingRows.get(rowKey); - if (pendingRow != null) { - pendingRow.remove(); - } - rowLock.release(); - rowLockIterator.remove(); - context.rowsToLock.remove(row); - break; - } - } - } + public void preBatchMutateWithExceptions(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws Throwable { + PhoenixIndexMetaData indexMetaData = getPhoenixIndexMetaData(c, miniBatchOp); + BatchMutateContext context = new BatchMutateContext(indexMetaData.getClientVersion()); + setBatchMutateContext(c, context); + identifyIndexMaintainerTypes(indexMetaData, context); + identifyMutationTypes(miniBatchOp, context); + context.populateOriginalMutations(miniBatchOp); + + if (context.hasDelete) { + // Need to add cell tags to Delete Marker before we do any index processing + // since we add tags to tables which doesn't have indexes also. + ServerIndexUtil.setDeleteAttributes(miniBatchOp); } - private void setBatchMutateContext(ObserverContext c, BatchMutateContext context) { - this.batchMutateContext.set(context); + // Exclusively lock all rows to do consistent writes over multiple tables + // (i.e., the data and its index tables) + populateRowsToLock(miniBatchOp, context); + // early exit if it turns out we don't have any update for indexes + if (context.rowsToLock.isEmpty()) { + return; } + lockRows(context); + long onDupCheckTime = 0; + + if ( + context.hasAtomic || context.returnResult || context.hasGlobalIndex + || context.hasUncoveredIndex || context.hasTransform + ) { + // Retrieve the current row states from the data table while holding the lock. + // This is needed for both atomic mutations and global indexes + long start = EnvironmentEdgeManager.currentTimeMillis(); + context.dataRowStates = + new HashMap>(context.rowsToLock.size()); + if ( + context.hasGlobalIndex || context.hasTransform || context.hasAtomic || context.returnResult + || context.hasDelete + || (context.hasUncoveredIndex + && isPartialUncoveredIndexMutation(indexMetaData, miniBatchOp)) + ) { + getCurrentRowStates(c, context); + } + onDupCheckTime += (EnvironmentEdgeManager.currentTimeMillis() - start); + } + + if (context.hasAtomic || context.returnResult) { + long start = EnvironmentEdgeManager.currentTimeMillis(); + // add the mutations for conditional updates to the mini batch + addOnDupMutationsToBatch(miniBatchOp, context); - private BatchMutateContext getBatchMutateContext(ObserverContext c) { - return this.batchMutateContext.get(); + // release locks for ON DUPLICATE KEY IGNORE since we won't be changing those rows + // this is needed so that we can exit early + releaseLocksForOnDupIgnoreMutations(miniBatchOp, context); + onDupCheckTime += (EnvironmentEdgeManager.currentTimeMillis() - start); + metricSource.updateDuplicateKeyCheckTime(dataTableName, onDupCheckTime); + + // early exit if we are not changing any rows + if (context.rowsToLock.isEmpty()) { + return; + } } - private void removeBatchMutateContext(ObserverContext c) { - this.batchMutateContext.remove(); + TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable(); + long batchTimestamp = getBatchTimestamp(context, table); + // Update the timestamps of the data table mutations to prevent overlapping timestamps + // (which prevents index inconsistencies as this case is not handled). + setTimestamps(miniBatchOp, builder, batchTimestamp); + if (context.hasGlobalIndex || context.hasUncoveredIndex || context.hasTransform) { + // Prepare next data rows states for pending mutations (for global indexes) + prepareDataRowStates(c, miniBatchOp, context, batchTimestamp); + // early exit if it turns out we don't have any edits + long start = EnvironmentEdgeManager.currentTimeMillis(); + preparePreIndexMutations(context, batchTimestamp, indexMetaData); + metricSource.updateIndexPrepareTime(dataTableName, + EnvironmentEdgeManager.currentTimeMillis() - start); + // Release the locks before making RPC calls for index updates + unlockRows(context); + // Do the first phase index updates + doPre(context); + // Acquire the locks again before letting the region proceed with data table updates + lockRows(context); + if (context.lastConcurrentBatchContext != null) { + waitForPreviousConcurrentBatch(table, context); + } + preparePostIndexMutations(context, batchTimestamp, indexMetaData); } + if (context.hasLocalIndex) { + // Group all the updates for a single row into a single update to be processed (for local + // indexes) + Collection mutations = groupMutations(miniBatchOp, context); + handleLocalIndexUpdates(table, miniBatchOp, mutations, indexMetaData); + } + if (failDataTableUpdatesForTesting) { + throw new DoNotRetryIOException("Simulating the data table write failure"); + } + } - @Override - public void preWALAppend(ObserverContext c, WALKey key, - WALEdit edit) { - if (shouldWALAppend) { - BatchMutateContext context = getBatchMutateContext(c); - appendMutationAttributesToWALKey(key, context); + /** + * In case of ON DUPLICATE KEY IGNORE, if the row already exists no mutations will be generated so + * release the row lock. + */ + private void releaseLocksForOnDupIgnoreMutations( + MiniBatchOperationInProgress miniBatchOp, BatchMutateContext context) { + for (int i = 0; i < miniBatchOp.size(); i++) { + if (!isAtomicOperationComplete(miniBatchOp.getOperationStatus(i))) { + continue; + } + Mutation m = miniBatchOp.getOperation(i); + if (!this.builder.isAtomicOp(m) && !this.builder.returnResult(m)) { + continue; + } + ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); + Iterator rowLockIterator = context.rowLocks.iterator(); + while (rowLockIterator.hasNext()) { + RowLock rowLock = rowLockIterator.next(); + ImmutableBytesPtr rowKey = rowLock.getRowKey(); + if (row.equals(rowKey)) { + PendingRow pendingRow = pendingRows.get(rowKey); + if (pendingRow != null) { + pendingRow.remove(); + } + rowLock.release(); + rowLockIterator.remove(); + context.rowsToLock.remove(row); + break; } + } } + } - public void appendMutationAttributesToWALKey(WALKey key, - IndexRegionObserver.BatchMutateContext context) { - if (context != null && context.getOriginalMutations().size() > 0) { - Mutation firstMutation = context.getOriginalMutations().get(0); - Map attrMap = firstMutation.getAttributesMap(); - for (MutationState.MutationMetadataType metadataType : - MutationState.MutationMetadataType.values()) { - String metadataTypeKey = metadataType.toString(); - if (attrMap.containsKey(metadataTypeKey)) { - IndexRegionObserver.appendToWALKey(key, metadataTypeKey, - attrMap.get(metadataTypeKey)); - } - } + private void setBatchMutateContext(ObserverContext c, + BatchMutateContext context) { + this.batchMutateContext.set(context); + } + + private BatchMutateContext + getBatchMutateContext(ObserverContext c) { + return this.batchMutateContext.get(); + } + + private void removeBatchMutateContext(ObserverContext c) { + this.batchMutateContext.remove(); + } + + @Override + public void preWALAppend(ObserverContext c, WALKey key, + WALEdit edit) { + if (shouldWALAppend) { + BatchMutateContext context = getBatchMutateContext(c); + appendMutationAttributesToWALKey(key, context); + } + } + + public void appendMutationAttributesToWALKey(WALKey key, + IndexRegionObserver.BatchMutateContext context) { + if (context != null && context.getOriginalMutations().size() > 0) { + Mutation firstMutation = context.getOriginalMutations().get(0); + Map attrMap = firstMutation.getAttributesMap(); + for (MutationState.MutationMetadataType metadataType : MutationState.MutationMetadataType + .values()) { + String metadataTypeKey = metadataType.toString(); + if (attrMap.containsKey(metadataTypeKey)) { + IndexRegionObserver.appendToWALKey(key, metadataTypeKey, attrMap.get(metadataTypeKey)); } + } } + } - /** - * When this hook is called, all the rows in the batch context are locked if the batch of - * mutations is successful. Because the rows are locked, we can safely make updates to - * pending row states in memory and perform the necessary cleanup in that case. - * - * However, when the batch fails, then some of the rows may not be locked. In that case, - * we remove the pending row states from the concurrent hash map without updating them since - * pending rows states become invalid when a batch fails. - */ + /** + * When this hook is called, all the rows in the batch context are locked if the batch of + * mutations is successful. Because the rows are locked, we can safely make updates to pending row + * states in memory and perform the necessary cleanup in that case. However, when the batch fails, + * then some of the rows may not be locked. In that case, we remove the pending row states from + * the concurrent hash map without updating them since pending rows states become invalid when a + * batch fails. + */ @Override public void postBatchMutateIndispensably(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException { - if (this.disabled) { - return; + MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException { + if (this.disabled) { + return; + } + BatchMutateContext context = getBatchMutateContext(c); + if (context == null) { + return; + } + try { + if (success) { + context.currentPhase = BatchMutatePhase.POST; + if ((context.hasAtomic || context.returnResult) && miniBatchOp.size() == 1) { + if (!isAtomicOperationComplete(miniBatchOp.getOperationStatus(0))) { + byte[] retVal = PInteger.INSTANCE.toBytes(1); + Cell cell = PhoenixKeyValueUtil.newKeyValue(miniBatchOp.getOperation(0).getRow(), + Bytes.toBytes(UPSERT_CF), Bytes.toBytes(UPSERT_STATUS_CQ), 0, retVal, 0, + retVal.length); + List cells = new ArrayList<>(); + cells.add(cell); + + addCellsIfResultReturned(miniBatchOp, context, cells); + + Result result = Result.create(cells); + miniBatchOp.setOperationStatus(0, new OperationStatus(SUCCESS, result)); + } + } + } else { + context.currentPhase = BatchMutatePhase.FAILED; } - BatchMutateContext context = getBatchMutateContext(c); - if (context == null) { - return; + context.countDownAllLatches(); + removePendingRows(context); + if (context.indexUpdates != null) { + context.indexUpdates.clear(); } - try { - if (success) { - context.currentPhase = BatchMutatePhase.POST; - if ((context.hasAtomic || context.returnResult) && miniBatchOp.size() == 1) { - if (!isAtomicOperationComplete(miniBatchOp.getOperationStatus(0))) { - byte[] retVal = PInteger.INSTANCE.toBytes(1); - Cell cell = PhoenixKeyValueUtil.newKeyValue( - miniBatchOp.getOperation(0).getRow(), Bytes.toBytes(UPSERT_CF), - Bytes.toBytes(UPSERT_STATUS_CQ), 0, retVal, 0, retVal.length); - List cells = new ArrayList<>(); - cells.add(cell); - - addCellsIfResultReturned(miniBatchOp, context, cells); - - Result result = Result.create(cells); - miniBatchOp.setOperationStatus(0, - new OperationStatus(SUCCESS, result)); - } - } - } else { - context.currentPhase = BatchMutatePhase.FAILED; - } - context.countDownAllLatches(); - removePendingRows(context); - if (context.indexUpdates != null) { - context.indexUpdates.clear(); - } - unlockRows(context); - this.builder.batchCompleted(miniBatchOp); + unlockRows(context); + this.builder.batchCompleted(miniBatchOp); - if (success) { // The pre-index and data table updates are successful, and now, do post index updates - doPost(c, context); - } - } finally { - removeBatchMutateContext(c); - } - } - - /** - * If the result needs to be returned for the given update operation, identify the updated row - * cells and add the input list of cells. - * - * @param miniBatchOp Batch of mutations getting applied to region. - * @param context The BatchMutateContext object shared during coproc hooks execution as part of - * the batch mutate life cycle. - * @param cells The list of cells to be returned back to the client. - */ - private static void addCellsIfResultReturned(MiniBatchOperationInProgress miniBatchOp, - BatchMutateContext context, List cells) { - if (context.returnResult) { - Map> currColumnCellExprMap = - context.currColumnCellExprMap; - if (currColumnCellExprMap == null) { - return; - } - Mutation mutation = miniBatchOp.getOperation(0); - if (mutation instanceof Put) { - updateColumnCellExprMap(mutation, currColumnCellExprMap); - } - Mutation[] mutations = miniBatchOp.getOperationsFromCoprocessors(0); - if (mutations != null) { - for (Mutation m : mutations) { - updateColumnCellExprMap(m, currColumnCellExprMap); - } - } - for (Pair cellPair : currColumnCellExprMap.values()) { - cells.add(cellPair.getFirst()); - } - cells.sort(CellComparator.getInstance()); + if (success) { // The pre-index and data table updates are successful, and now, do post index + // updates + doPost(c, context); + } + } finally { + removeBatchMutateContext(c); + } + } + + /** + * If the result needs to be returned for the given update operation, identify the updated row + * cells and add the input list of cells. + * @param miniBatchOp Batch of mutations getting applied to region. + * @param context The BatchMutateContext object shared during coproc hooks execution as part + * of the batch mutate life cycle. + * @param cells The list of cells to be returned back to the client. + */ + private static void addCellsIfResultReturned(MiniBatchOperationInProgress miniBatchOp, + BatchMutateContext context, List cells) { + if (context.returnResult) { + Map> currColumnCellExprMap = + context.currColumnCellExprMap; + if (currColumnCellExprMap == null) { + return; + } + Mutation mutation = miniBatchOp.getOperation(0); + if (mutation instanceof Put) { + updateColumnCellExprMap(mutation, currColumnCellExprMap); + } + Mutation[] mutations = miniBatchOp.getOperationsFromCoprocessors(0); + if (mutations != null) { + for (Mutation m : mutations) { + updateColumnCellExprMap(m, currColumnCellExprMap); } + } + for (Pair cellPair : currColumnCellExprMap.values()) { + cells.add(cellPair.getFirst()); + } + cells.sort(CellComparator.getInstance()); } + } - /** - * Update the contents of {@code currColumnCellExprMap} based on the mutation that was - * successfully applied to the row. - * - * @param mutation The Mutation object which is applied to the row. - * @param currColumnCellExprMap The map of column to cell reference. - */ - private static void updateColumnCellExprMap(Mutation mutation, - Map> - currColumnCellExprMap) { - if (mutation != null) { - for (Map.Entry> entry : - mutation.getFamilyCellMap().entrySet()) { - for (Cell entryCell : entry.getValue()) { - byte[] family = CellUtil.cloneFamily(entryCell); - byte[] qualifier = CellUtil.cloneQualifier(entryCell); - ColumnReference colRef = new ColumnReference(family, qualifier); - if (mutation instanceof Put) { - currColumnCellExprMap.put(colRef, new Pair<>(entryCell, null)); - } else if (mutation instanceof Delete) { - currColumnCellExprMap.remove(colRef); - } - } - } + /** + * Update the contents of {@code currColumnCellExprMap} based on the mutation that was + * successfully applied to the row. + * @param mutation The Mutation object which is applied to the row. + * @param currColumnCellExprMap The map of column to cell reference. + */ + private static void updateColumnCellExprMap(Mutation mutation, + Map> currColumnCellExprMap) { + if (mutation != null) { + for (Map.Entry> entry : mutation.getFamilyCellMap().entrySet()) { + for (Cell entryCell : entry.getValue()) { + byte[] family = CellUtil.cloneFamily(entryCell); + byte[] qualifier = CellUtil.cloneQualifier(entryCell); + ColumnReference colRef = new ColumnReference(family, qualifier); + if (mutation instanceof Put) { + currColumnCellExprMap.put(colRef, new Pair<>(entryCell, null)); + } else if (mutation instanceof Delete) { + currColumnCellExprMap.remove(colRef); + } } + } } + } - private void doPost(ObserverContext c, BatchMutateContext context) throws IOException { - long start = EnvironmentEdgeManager.currentTimeMillis(); + private void doPost(ObserverContext c, BatchMutateContext context) + throws IOException { + long start = EnvironmentEdgeManager.currentTimeMillis(); - try { - if (failPostIndexUpdatesForTesting) { - throw new DoNotRetryIOException("Simulating the last (i.e., post) index table write failure"); - } - doIndexWritesWithExceptions(context, true); - metricSource.updatePostIndexUpdateTime(dataTableName, - EnvironmentEdgeManager.currentTimeMillis() - start); - } catch (Throwable e) { - metricSource.updatePostIndexUpdateFailureTime(dataTableName, - EnvironmentEdgeManager.currentTimeMillis() - start); - metricSource.incrementPostIndexUpdateFailures(dataTableName); - // Ignore the failures in the third write phase + try { + if (failPostIndexUpdatesForTesting) { + throw new DoNotRetryIOException( + "Simulating the last (i.e., post) index table write failure"); } + doIndexWritesWithExceptions(context, true); + metricSource.updatePostIndexUpdateTime(dataTableName, + EnvironmentEdgeManager.currentTimeMillis() - start); + } catch (Throwable e) { + metricSource.updatePostIndexUpdateFailureTime(dataTableName, + EnvironmentEdgeManager.currentTimeMillis() - start); + metricSource.incrementPostIndexUpdateFailures(dataTableName); + // Ignore the failures in the third write phase + } } private void doIndexWritesWithExceptions(BatchMutateContext context, boolean post) - throws IOException { - ListMultimap indexUpdates = post ? context.postIndexUpdates : context.preIndexUpdates; - //short circuit, if we don't need to do any work + throws IOException { + ListMultimap indexUpdates = + post ? context.postIndexUpdates : context.preIndexUpdates; + // short circuit, if we don't need to do any work - if (context == null || indexUpdates == null || indexUpdates.isEmpty()) { - return; - } + if (context == null || indexUpdates == null || indexUpdates.isEmpty()) { + return; + } - // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = Trace.startSpan("Completing " + (post ? "post" : "pre") + " index writes")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; - } - current.addTimelineAnnotation("Actually doing " + (post ? "post" : "pre") + " index update for first time"); - if (post) { - postWriter.write(indexUpdates, false, context.clientVersion); - } else { - preWriter.write(indexUpdates, false, context.clientVersion); - } + // get the current span, or just use a null-span to avoid a bunch of if statements + try (TraceScope scope = + Trace.startSpan("Completing " + (post ? "post" : "pre") + " index writes")) { + Span current = scope.getSpan(); + if (current == null) { + current = NullSpan.INSTANCE; + } + current.addTimelineAnnotation( + "Actually doing " + (post ? "post" : "pre") + " index update for first time"); + if (post) { + postWriter.write(indexUpdates, false, context.clientVersion); + } else { + preWriter.write(indexUpdates, false, context.clientVersion); } + } } private void removePendingRows(BatchMutateContext context) { - for (ImmutableBytesPtr rowKey : context.rowsToLock) { - PendingRow pendingRow = pendingRows.get(rowKey); - if (pendingRow != null) { - pendingRow.remove(); - } + for (ImmutableBytesPtr rowKey : context.rowsToLock) { + PendingRow pendingRow = pendingRows.get(rowKey); + if (pendingRow != null) { + pendingRow.remove(); } + } } private void doPre(BatchMutateContext context) throws IOException { - long start = 0; - try { - start = EnvironmentEdgeManager.currentTimeMillis(); - if (failPreIndexUpdatesForTesting) { - throw new DoNotRetryIOException("Simulating the first (i.e., pre) index table write failure"); - } - doIndexWritesWithExceptions(context, false); - metricSource.updatePreIndexUpdateTime(dataTableName, - EnvironmentEdgeManager.currentTimeMillis() - start); - } catch (Throwable e) { - metricSource.updatePreIndexUpdateFailureTime(dataTableName, - EnvironmentEdgeManager.currentTimeMillis() - start); - metricSource.incrementPreIndexUpdateFailures(dataTableName); - // Re-acquire all locks since we released them before making index updates - // Removal of reference counts and locks for the rows of this batch will be - // done in postBatchMutateIndispensably() - lockRows(context); - rethrowIndexingException(e); + long start = 0; + try { + start = EnvironmentEdgeManager.currentTimeMillis(); + if (failPreIndexUpdatesForTesting) { + throw new DoNotRetryIOException( + "Simulating the first (i.e., pre) index table write failure"); } + doIndexWritesWithExceptions(context, false); + metricSource.updatePreIndexUpdateTime(dataTableName, + EnvironmentEdgeManager.currentTimeMillis() - start); + } catch (Throwable e) { + metricSource.updatePreIndexUpdateFailureTime(dataTableName, + EnvironmentEdgeManager.currentTimeMillis() - start); + metricSource.incrementPreIndexUpdateFailures(dataTableName); + // Re-acquire all locks since we released them before making index updates + // Removal of reference counts and locks for the rows of this batch will be + // done in postBatchMutateIndispensably() + lockRows(context); + rethrowIndexingException(e); + } } private void extractExpressionsAndColumns(DataInputStream input, - List>> operations, - final Set colsReadInExpr) throws IOException { - while (true) { - ExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { - @Override - public Void visit(KeyValueColumnExpression expression) { - colsReadInExpr.add(new ColumnReference(expression.getColumnFamily(), expression.getColumnQualifier())); - return null; - } - }; - try { - int nExpressions = WritableUtils.readVInt(input); - List expressions = Lists.newArrayListWithExpectedSize(nExpressions); - for (int i = 0; i < nExpressions; i++) { - Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); - expression.readFields(input); - expressions.add(expression); - expression.accept(visitor); - } - PTableProtos.PTable tableProto = PTableProtos.PTable.parseDelimitedFrom(input); - PTable table = PTableImpl.createFromProto(tableProto); - operations.add(new Pair<>(table, expressions)); - } catch (EOFException e) { - break; - } + List>> operations, final Set colsReadInExpr) + throws IOException { + while (true) { + ExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { + @Override + public Void visit(KeyValueColumnExpression expression) { + colsReadInExpr.add( + new ColumnReference(expression.getColumnFamily(), expression.getColumnQualifier())); + return null; + } + }; + try { + int nExpressions = WritableUtils.readVInt(input); + List expressions = Lists.newArrayListWithExpectedSize(nExpressions); + for (int i = 0; i < nExpressions; i++) { + Expression expression = + ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); + expression.readFields(input); + expressions.add(expression); + expression.accept(visitor); + } + PTableProtos.PTable tableProto = PTableProtos.PTable.parseDelimitedFrom(input); + PTable table = PTableImpl.createFromProto(tableProto); + operations.add(new Pair<>(table, expressions)); + } catch (EOFException e) { + break; } + } } - /** - * This function has been adapted from PhoenixIndexBuilder#executeAtomicOp(). - * The critical difference being that the code in PhoenixIndexBuilder#executeAtomicOp() - * generates the mutations by reading the latest data table row from HBase but in order - * to correctly support concurrent index mutations we need to always read the latest - * data table row from memory. - * It takes in an atomic Put mutation and generates a list of Put and Delete mutations. - * The mutation list will be empty in two cases: - * 1) ON DUPLICATE KEY IGNORE and the row already exists; - * 2) ON DUPLICATE KEY UPDATE if CASE expression is specified and in each of them the new - * value is the same as the old value in the ELSE-clause. - * Otherwise, we will generate one Put mutation and optionally one Delete mutation (with - * DeleteColumn type cells for all columns set to null). - */ - private List generateOnDupMutations(BatchMutateContext context, - Put atomicPut, - MiniBatchOperationInProgress miniBatchOp) - throws IOException { - List mutations = Lists.newArrayListWithExpectedSize(2); - byte[] opBytes = atomicPut.getAttribute(ATOMIC_OP_ATTRIB); - byte[] returnResult = atomicPut.getAttribute(RETURN_RESULT); - if ((opBytes == null && returnResult == null) || - (opBytes == null && miniBatchOp.size() != 1)) { - // Unexpected - // Either mutation should be atomic by providing non-null ON DUPLICATE KEY, or - // if the result needs to be returned, only single row must be updated as part of - // the batch mutation. - return null; - } - Put put = null; - Delete delete = null; - - // mutations returned by this function will have the LATEST timestamp - // later these timestamps will be updated by the IndexRegionObserver#setTimestamps() function - long ts = HConstants.LATEST_TIMESTAMP; - - // store current cells into a map where the key is ColumnReference of the column family and - // column qualifier, and value is a pair of cell and a boolean. The value of the boolean - // will be true if the expression is CaseExpression and Else-clause is evaluated to be - // true, will be null if there is no expression on this column, otherwise false - Map> currColumnCellExprMap = new HashMap<>(); - - byte[] rowKey = atomicPut.getRow(); - ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(rowKey); - // Get the latest data row state - Pair dataRowState = context.dataRowStates.get(rowKeyPtr); - Put currentDataRowState = dataRowState != null ? dataRowState.getFirst() : null; - - // if result needs to be returned but the DML does not have ON DUPLICATE KEY present, - // perform the mutation and return the result. - if (opBytes == null) { - mutations.add(atomicPut); - updateCurrColumnCellExpr(currentDataRowState != null ? currentDataRowState : atomicPut, - currColumnCellExprMap); - if (context.returnResult) { - context.currColumnCellExprMap = currColumnCellExprMap; - } - return mutations; - } - - if (PhoenixIndexBuilderHelper.isDupKeyIgnore(opBytes)) { - if (currentDataRowState == null) { - // new row - mutations.add(atomicPut); - updateCurrColumnCellExpr(atomicPut, currColumnCellExprMap); - } else { - updateCurrColumnCellExpr(currentDataRowState, currColumnCellExprMap); - } - if (context.returnResult) { - context.currColumnCellExprMap = currColumnCellExprMap; - } - return mutations; + /** + * This function has been adapted from PhoenixIndexBuilder#executeAtomicOp(). The critical + * difference being that the code in PhoenixIndexBuilder#executeAtomicOp() generates the mutations + * by reading the latest data table row from HBase but in order to correctly support concurrent + * index mutations we need to always read the latest data table row from memory. It takes in an + * atomic Put mutation and generates a list of Put and Delete mutations. The mutation list will be + * empty in two cases: 1) ON DUPLICATE KEY IGNORE and the row already exists; 2) ON DUPLICATE KEY + * UPDATE if CASE expression is specified and in each of them the new value is the same as the old + * value in the ELSE-clause. Otherwise, we will generate one Put mutation and optionally one + * Delete mutation (with DeleteColumn type cells for all columns set to null). + */ + private List generateOnDupMutations(BatchMutateContext context, Put atomicPut, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + List mutations = Lists.newArrayListWithExpectedSize(2); + byte[] opBytes = atomicPut.getAttribute(ATOMIC_OP_ATTRIB); + byte[] returnResult = atomicPut.getAttribute(RETURN_RESULT); + if ((opBytes == null && returnResult == null) || (opBytes == null && miniBatchOp.size() != 1)) { + // Unexpected + // Either mutation should be atomic by providing non-null ON DUPLICATE KEY, or + // if the result needs to be returned, only single row must be updated as part of + // the batch mutation. + return null; + } + Put put = null; + Delete delete = null; + + // mutations returned by this function will have the LATEST timestamp + // later these timestamps will be updated by the IndexRegionObserver#setTimestamps() function + long ts = HConstants.LATEST_TIMESTAMP; + + // store current cells into a map where the key is ColumnReference of the column family and + // column qualifier, and value is a pair of cell and a boolean. The value of the boolean + // will be true if the expression is CaseExpression and Else-clause is evaluated to be + // true, will be null if there is no expression on this column, otherwise false + Map> currColumnCellExprMap = new HashMap<>(); + + byte[] rowKey = atomicPut.getRow(); + ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(rowKey); + // Get the latest data row state + Pair dataRowState = context.dataRowStates.get(rowKeyPtr); + Put currentDataRowState = dataRowState != null ? dataRowState.getFirst() : null; + + // if result needs to be returned but the DML does not have ON DUPLICATE KEY present, + // perform the mutation and return the result. + if (opBytes == null) { + mutations.add(atomicPut); + updateCurrColumnCellExpr(currentDataRowState != null ? currentDataRowState : atomicPut, + currColumnCellExprMap); + if (context.returnResult) { + context.currColumnCellExprMap = currColumnCellExprMap; } + return mutations; + } - ByteArrayInputStream stream = new ByteArrayInputStream(opBytes); - DataInputStream input = new DataInputStream(stream); - boolean skipFirstOp = input.readBoolean(); - short repeat = input.readShort(); - - List>> operations = Lists.newArrayListWithExpectedSize(3); - final Set colsReadInExpr = new HashSet<>(); - // deserialize the conditional update expressions and - // extract the columns that are read in the conditional expressions - extractExpressionsAndColumns(input, operations, colsReadInExpr); - int estimatedSize = colsReadInExpr.size(); - - // initialized to either the incoming new row or the current row - // stores the intermediate values as we apply conditional update expressions - List flattenedCells; - // read the column values requested in the get from the current data row - List cells = IndexUtil.readColumnsFromRow(currentDataRowState, colsReadInExpr); - - if (currentDataRowState == null) { // row doesn't exist - updateCurrColumnCellExpr(atomicPut, currColumnCellExprMap); - if (skipFirstOp) { - if (operations.size() <= 1 && repeat <= 1) { - // early exit since there is only one ON DUPLICATE KEY UPDATE - // clause which is ignored because the row doesn't exist so - // simply use the values in UPSERT VALUES - mutations.add(atomicPut); - if (context.returnResult) { - context.currColumnCellExprMap = currColumnCellExprMap; - } - return mutations; - } - // If there are multiple ON DUPLICATE KEY UPDATE on a new row, - // the first one is skipped - repeat--; - } - // Base current state off of new row - flattenedCells = flattenCells(atomicPut); + if (PhoenixIndexBuilderHelper.isDupKeyIgnore(opBytes)) { + if (currentDataRowState == null) { + // new row + mutations.add(atomicPut); + updateCurrColumnCellExpr(atomicPut, currColumnCellExprMap); } else { - // Base current state off of existing row - flattenedCells = cells; - // store all current cells from currentDataRowState - updateCurrColumnCellExpr(currentDataRowState, currColumnCellExprMap); + updateCurrColumnCellExpr(currentDataRowState, currColumnCellExprMap); } + if (context.returnResult) { + context.currColumnCellExprMap = currColumnCellExprMap; + } + return mutations; + } - if (context.returnResult) { + ByteArrayInputStream stream = new ByteArrayInputStream(opBytes); + DataInputStream input = new DataInputStream(stream); + boolean skipFirstOp = input.readBoolean(); + short repeat = input.readShort(); + + List>> operations = Lists.newArrayListWithExpectedSize(3); + final Set colsReadInExpr = new HashSet<>(); + // deserialize the conditional update expressions and + // extract the columns that are read in the conditional expressions + extractExpressionsAndColumns(input, operations, colsReadInExpr); + int estimatedSize = colsReadInExpr.size(); + + // initialized to either the incoming new row or the current row + // stores the intermediate values as we apply conditional update expressions + List flattenedCells; + // read the column values requested in the get from the current data row + List cells = IndexUtil.readColumnsFromRow(currentDataRowState, colsReadInExpr); + + if (currentDataRowState == null) { // row doesn't exist + updateCurrColumnCellExpr(atomicPut, currColumnCellExprMap); + if (skipFirstOp) { + if (operations.size() <= 1 && repeat <= 1) { + // early exit since there is only one ON DUPLICATE KEY UPDATE + // clause which is ignored because the row doesn't exist so + // simply use the values in UPSERT VALUES + mutations.add(atomicPut); + if (context.returnResult) { context.currColumnCellExprMap = currColumnCellExprMap; - } - - MultiKeyValueTuple tuple = new MultiKeyValueTuple(flattenedCells); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - // for each conditional upsert in the batch - for (int opIndex = 0; opIndex < operations.size(); opIndex++) { - Pair> operation = operations.get(opIndex); - PTable table = operation.getFirst(); - List expressions = operation.getSecond(); - for (int j = 0; j < repeat; j++) { // repeater loop - ptr.set(rowKey); - // Sort the list of cells (if they've been flattened in which case they're - // not necessarily ordered correctly). - if (flattenedCells != null) { - Collections.sort(flattenedCells, CellComparator.getInstance()); - } - PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false); - int adjust = table.getBucketNum() == null ? 1 : 2; - for (int i = 0; i < expressions.size(); i++) { - Expression expression = expressions.get(i); - ptr.set(EMPTY_BYTE_ARRAY); - expression.evaluate(tuple, ptr); - PColumn column = table.getColumns().get(i + adjust); - Object value = expression.getDataType().toObject(ptr, column.getSortOrder()); - // We are guaranteed that the two column will have the same type - if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), - expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), - column.getMaxLength(), column.getScale())) { - throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), - column.getScale(), column.getName().getString()); - } - column.getDataType().coerceBytes(ptr, value, expression.getDataType(), expression.getMaxLength(), - expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), - column.getSortOrder(), table.rowKeyOrderOptimizable()); - byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); - row.setValue(column, bytes); - - // If the column exist in currColumnCellExprMap, set the boolean value in the - // map to be true if the expression is CaseExpression and the Else-clause is - // evaluated to be true - ColumnReference colRef = new ColumnReference(column.getFamilyName().getBytes(), - column.getColumnQualifierBytes()); - if (currColumnCellExprMap.containsKey(colRef)) { - Pair valuePair = currColumnCellExprMap.get(colRef); - if (expression instanceof CaseExpression - && ((CaseExpression) expression).evaluateIndexOf(tuple, ptr) - == expression.getChildren().size() - 1) { - valuePair.setSecond(true); - } else { - valuePair.setSecond(false); - } - } - } - List updatedCells = Lists.newArrayListWithExpectedSize(estimatedSize); - List newMutations = row.toRowMutations(); - for (Mutation source : newMutations) { - flattenCells(source, updatedCells); - } - // update the cells to the latest values calculated above - flattenedCells = mergeCells(flattenedCells, updatedCells); - // we need to retrieve empty cell later on which relies on binary search - flattenedCells.sort(CellComparator.getInstance()); - tuple.setKeyValues(flattenedCells); } - // Repeat only applies to first statement - repeat = 1; - } - - put = new Put(rowKey); - delete = new Delete(rowKey); - transferAttributes(atomicPut, put); - transferAttributes(atomicPut, delete); - for (int i = 0; i < tuple.size(); i++) { - Cell cell = tuple.getValue(i); - if (cell.getType() == Cell.Type.Put) { - if (checkCellNeedUpdate(cell, currColumnCellExprMap)) { - put.add(cell); - } - } else { - delete.add(cell); - } - } - - if (!put.isEmpty() || !delete.isEmpty()) { - PTable table = operations.get(0).getFirst(); - addEmptyKVCellToPut(put, tuple, table); - } - - if (!put.isEmpty()) { - mutations.add(put); - } - if (!delete.isEmpty()) { - mutations.add(delete); + return mutations; + } + // If there are multiple ON DUPLICATE KEY UPDATE on a new row, + // the first one is skipped + repeat--; } + // Base current state off of new row + flattenedCells = flattenCells(atomicPut); + } else { + // Base current state off of existing row + flattenedCells = cells; + // store all current cells from currentDataRowState + updateCurrColumnCellExpr(currentDataRowState, currColumnCellExprMap); + } - return mutations; - } + if (context.returnResult) { + context.currColumnCellExprMap = currColumnCellExprMap; + } - /** - * Create or Update ColumnRef to Cell map based on the Put mutation. - * - * @param put The Put mutation representing the current or new/updated state of the row. - * @param currColumnCellExprMap ColumnRef to Cell mapping for all the cells involved in the - * given mutation. - */ - private static void updateCurrColumnCellExpr(Put put, - Map> - currColumnCellExprMap) { - for (Map.Entry> entry : - put.getFamilyCellMap().entrySet()) { - for (Cell cell : entry.getValue()) { - byte[] family = CellUtil.cloneFamily(cell); - byte[] qualifier = CellUtil.cloneQualifier(cell); - ColumnReference colRef = new ColumnReference(family, qualifier); - currColumnCellExprMap.put(colRef, new Pair<>(cell, null)); + MultiKeyValueTuple tuple = new MultiKeyValueTuple(flattenedCells); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + + // for each conditional upsert in the batch + for (int opIndex = 0; opIndex < operations.size(); opIndex++) { + Pair> operation = operations.get(opIndex); + PTable table = operation.getFirst(); + List expressions = operation.getSecond(); + for (int j = 0; j < repeat; j++) { // repeater loop + ptr.set(rowKey); + // Sort the list of cells (if they've been flattened in which case they're + // not necessarily ordered correctly). + if (flattenedCells != null) { + Collections.sort(flattenedCells, CellComparator.getInstance()); + } + PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false); + int adjust = table.getBucketNum() == null ? 1 : 2; + for (int i = 0; i < expressions.size(); i++) { + Expression expression = expressions.get(i); + ptr.set(EMPTY_BYTE_ARRAY); + expression.evaluate(tuple, ptr); + PColumn column = table.getColumns().get(i + adjust); + Object value = expression.getDataType().toObject(ptr, column.getSortOrder()); + // We are guaranteed that the two column will have the same type + if ( + !column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), + expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), + column.getMaxLength(), column.getScale()) + ) { + throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), + column.getScale(), column.getName().getString()); + } + column.getDataType().coerceBytes(ptr, value, expression.getDataType(), + expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), + column.getMaxLength(), column.getScale(), column.getSortOrder(), + table.rowKeyOrderOptimizable()); + byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); + row.setValue(column, bytes); + + // If the column exist in currColumnCellExprMap, set the boolean value in the + // map to be true if the expression is CaseExpression and the Else-clause is + // evaluated to be true + ColumnReference colRef = new ColumnReference(column.getFamilyName().getBytes(), + column.getColumnQualifierBytes()); + if (currColumnCellExprMap.containsKey(colRef)) { + Pair valuePair = currColumnCellExprMap.get(colRef); + if ( + expression instanceof CaseExpression + && ((CaseExpression) expression).evaluateIndexOf(tuple, ptr) + == expression.getChildren().size() - 1 + ) { + valuePair.setSecond(true); + } else { + valuePair.setSecond(false); } + } } + List updatedCells = Lists.newArrayListWithExpectedSize(estimatedSize); + List newMutations = row.toRowMutations(); + for (Mutation source : newMutations) { + flattenCells(source, updatedCells); + } + // update the cells to the latest values calculated above + flattenedCells = mergeCells(flattenedCells, updatedCells); + // we need to retrieve empty cell later on which relies on binary search + flattenedCells.sort(CellComparator.getInstance()); + tuple.setKeyValues(flattenedCells); + } + // Repeat only applies to first statement + repeat = 1; } - private void addEmptyKVCellToPut(Put put, MultiKeyValueTuple tuple, PTable table) throws IOException { - byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table); - byte[] emptyCQ = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); - Cell emptyKVCell = tuple.getValue(emptyCF, emptyCQ); - if (emptyKVCell != null) { - put.add(emptyKVCell); + put = new Put(rowKey); + delete = new Delete(rowKey); + transferAttributes(atomicPut, put); + transferAttributes(atomicPut, delete); + for (int i = 0; i < tuple.size(); i++) { + Cell cell = tuple.getValue(i); + if (cell.getType() == Cell.Type.Put) { + if (checkCellNeedUpdate(cell, currColumnCellExprMap)) { + put.add(cell); } + } else { + delete.add(cell); + } } - private static List flattenCells(Mutation m) { - List flattenedCells = new ArrayList<>(); - flattenCells(m, flattenedCells); - return flattenedCells; + if (!put.isEmpty() || !delete.isEmpty()) { + PTable table = operations.get(0).getFirst(); + addEmptyKVCellToPut(put, tuple, table); } - private static void flattenCells(Mutation m, List flattenedCells) { - for (List cells : m.getFamilyCellMap().values()) { - flattenedCells.addAll(cells); - } + if (!put.isEmpty()) { + mutations.add(put); + } + if (!delete.isEmpty()) { + mutations.add(delete); } - /** - * This function is to check if a cell need to be updated, based on the current cells' values. - * The cell will not be updated only if the column exist in the expression in which CASE is - * specified and the new value is the same as the old value in the ELSE-clause, otherwise it - * should be updated. - * - * @param cell the cell with new value to be checked - * @param colCellExprMap the column reference map with cell current value - * @return true if the cell need update, false otherwise - */ - private boolean checkCellNeedUpdate(Cell cell, - Map> colCellExprMap) { + return mutations; + } + + /** + * Create or Update ColumnRef to Cell map based on the Put mutation. + * @param put The Put mutation representing the current or new/updated state of + * the row. + * @param currColumnCellExprMap ColumnRef to Cell mapping for all the cells involved in the given + * mutation. + */ + private static void updateCurrColumnCellExpr(Put put, + Map> currColumnCellExprMap) { + for (Map.Entry> entry : put.getFamilyCellMap().entrySet()) { + for (Cell cell : entry.getValue()) { byte[] family = CellUtil.cloneFamily(cell); byte[] qualifier = CellUtil.cloneQualifier(cell); ColumnReference colRef = new ColumnReference(family, qualifier); + currColumnCellExprMap.put(colRef, new Pair<>(cell, null)); + } + } + } - // if cell not exist in the map, meaning that they are new and need update - if (colCellExprMap.isEmpty() || !colCellExprMap.containsKey(colRef)) { - return true; - } - - Pair valuePair = colCellExprMap.get(colRef); - Boolean isInCaseExpressionElseClause = valuePair.getSecond(); - if (isInCaseExpressionElseClause == null) { - return false; - } - if (!isInCaseExpressionElseClause) { - return true; - } - Cell oldCell = valuePair.getFirst(); - ImmutableBytesPtr newValuePtr = new ImmutableBytesPtr(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength()); - ImmutableBytesPtr oldValuePtr = new ImmutableBytesPtr(oldCell.getValueArray(), - oldCell.getValueOffset(), oldCell.getValueLength()); - return !Bytes.equals(oldValuePtr.get(), oldValuePtr.getOffset(), oldValuePtr.getLength(), - newValuePtr.get(), newValuePtr.getOffset(), newValuePtr.getLength()); - } - - /** - * ensure that the generated mutations have all the attributes like schema - */ - private static void transferAttributes(Mutation source, Mutation target) { - for (Map.Entry entry : source.getAttributesMap().entrySet()) { - target.setAttribute(entry.getKey(), entry.getValue()); - } + private void addEmptyKVCellToPut(Put put, MultiKeyValueTuple tuple, PTable table) + throws IOException { + byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table); + byte[] emptyCQ = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); + Cell emptyKVCell = tuple.getValue(emptyCF, emptyCQ); + if (emptyKVCell != null) { + put.add(emptyKVCell); } + } - /** - * First take all the cells that are present in the latest. Then look at current - * and any cell not present in latest is taken. - */ - private static List mergeCells(List current, List latest) { - Map latestColVals = Maps.newHashMapWithExpectedSize(latest.size() + current.size()); + private static List flattenCells(Mutation m) { + List flattenedCells = new ArrayList<>(); + flattenCells(m, flattenedCells); + return flattenedCells; + } - // first take everything present in latest - for (Cell cell : latest) { - byte[] family = CellUtil.cloneFamily(cell); - byte[] qualifier = CellUtil.cloneQualifier(cell); - ColumnReference colInfo = new ColumnReference(family, qualifier); - latestColVals.put(colInfo, cell); - } + private static void flattenCells(Mutation m, List flattenedCells) { + for (List cells : m.getFamilyCellMap().values()) { + flattenedCells.addAll(cells); + } + } - // check for any leftovers in current - for (Cell cell : current) { - byte[] family = CellUtil.cloneFamily(cell); - byte[] qualifier = CellUtil.cloneQualifier(cell); - ColumnReference colInfo = new ColumnReference(family, qualifier); - if (!latestColVals.containsKey(colInfo)) { - latestColVals.put(colInfo, cell); - } - } - return Lists.newArrayList(latestColVals.values()); + /** + * This function is to check if a cell need to be updated, based on the current cells' values. The + * cell will not be updated only if the column exist in the expression in which CASE is specified + * and the new value is the same as the old value in the ELSE-clause, otherwise it should be + * updated. + * @param cell the cell with new value to be checked + * @param colCellExprMap the column reference map with cell current value + * @return true if the cell need update, false otherwise + */ + private boolean checkCellNeedUpdate(Cell cell, + Map> colCellExprMap) { + byte[] family = CellUtil.cloneFamily(cell); + byte[] qualifier = CellUtil.cloneQualifier(cell); + ColumnReference colRef = new ColumnReference(family, qualifier); + + // if cell not exist in the map, meaning that they are new and need update + if (colCellExprMap.isEmpty() || !colCellExprMap.containsKey(colRef)) { + return true; } - public static void appendToWALKey(WALKey key, String attrKey, byte[] attrValue) { - key.addExtendedAttribute(attrKey, attrValue); + Pair valuePair = colCellExprMap.get(colRef); + Boolean isInCaseExpressionElseClause = valuePair.getSecond(); + if (isInCaseExpressionElseClause == null) { + return false; } + if (!isInCaseExpressionElseClause) { + return true; + } + Cell oldCell = valuePair.getFirst(); + ImmutableBytesPtr newValuePtr = + new ImmutableBytesPtr(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + ImmutableBytesPtr oldValuePtr = new ImmutableBytesPtr(oldCell.getValueArray(), + oldCell.getValueOffset(), oldCell.getValueLength()); + return !Bytes.equals(oldValuePtr.get(), oldValuePtr.getOffset(), oldValuePtr.getLength(), + newValuePtr.get(), newValuePtr.getOffset(), newValuePtr.getLength()); + } - public static byte[] getAttributeValueFromWALKey(WALKey key, String attrKey) { - return key.getExtendedAttribute(attrKey); + /** + * ensure that the generated mutations have all the attributes like schema + */ + private static void transferAttributes(Mutation source, Mutation target) { + for (Map.Entry entry : source.getAttributesMap().entrySet()) { + target.setAttribute(entry.getKey(), entry.getValue()); } + } - public static Map getAttributeValuesFromWALKey(WALKey key) { - return new HashMap(key.getExtendedAttributes()); + /** + * First take all the cells that are present in the latest. Then look at current and any cell not + * present in latest is taken. + */ + private static List mergeCells(List current, List latest) { + Map latestColVals = + Maps.newHashMapWithExpectedSize(latest.size() + current.size()); + + // first take everything present in latest + for (Cell cell : latest) { + byte[] family = CellUtil.cloneFamily(cell); + byte[] qualifier = CellUtil.cloneQualifier(cell); + ColumnReference colInfo = new ColumnReference(family, qualifier); + latestColVals.put(colInfo, cell); } - /** - * Determines whether the atomic operation is complete based on the operation status. - * HBase returns null Result by default for successful Put and Delete mutations, only for - * Increment and Append mutations, non-null Result is returned by default. - * @param status the operation status. - * @return true if the atomic operation is completed, false otherwise. - */ - public static boolean isAtomicOperationComplete(OperationStatus status) { - return status.getOperationStatusCode() == SUCCESS && status.getResult() != null; + // check for any leftovers in current + for (Cell cell : current) { + byte[] family = CellUtil.cloneFamily(cell); + byte[] qualifier = CellUtil.cloneQualifier(cell); + ColumnReference colInfo = new ColumnReference(family, qualifier); + if (!latestColVals.containsKey(colInfo)) { + latestColVals.put(colInfo, cell); + } } + return Lists.newArrayList(latestColVals.values()); + } + + public static void appendToWALKey(WALKey key, String attrKey, byte[] attrValue) { + key.addExtendedAttribute(attrKey, attrValue); + } + + public static byte[] getAttributeValueFromWALKey(WALKey key, String attrKey) { + return key.getExtendedAttribute(attrKey); + } + + public static Map getAttributeValuesFromWALKey(WALKey key) { + return new HashMap(key.getExtendedAttributes()); + } + + /** + * Determines whether the atomic operation is complete based on the operation status. HBase + * returns null Result by default for successful Put and Delete mutations, only for Increment and + * Append mutations, non-null Result is returned by default. + * @param status the operation status. + * @return true if the atomic operation is completed, false otherwise. + */ + public static boolean isAtomicOperationComplete(OperationStatus status) { + return status.getOperationStatusCode() == SUCCESS && status.getResult() != null; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionSplitPolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionSplitPolicy.java index ee94494d740..5ce5736989d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionSplitPolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionSplitPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,45 +31,45 @@ */ public class IndexRegionSplitPolicy extends SteppingSplitPolicy { - @Override - protected boolean skipStoreFileRangeCheck(String familyName) { - if (familyName.startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - return true; - } - return false; + @Override + protected boolean skipStoreFileRangeCheck(String familyName) { + if (familyName.startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + return true; } + return false; + } - @Override - protected byte[] getSplitPoint() { - byte[] oldSplitPoint = super.getSplitPoint(); - if (oldSplitPoint == null) return null; - List stores = region.getStores(); - byte[] splitPointFromLargestStore = null; - long largestStoreSize = 0; - boolean isLocalIndexKey = false; - for (HStore s : stores) { - if (s.getColumnFamilyName() - .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - Optional splitPoint = s.getSplitPoint(); - if (oldSplitPoint != null && splitPoint.isPresent() - && Bytes.compareTo(oldSplitPoint, splitPoint.get()) == 0) { - isLocalIndexKey = true; - } - } + @Override + protected byte[] getSplitPoint() { + byte[] oldSplitPoint = super.getSplitPoint(); + if (oldSplitPoint == null) return null; + List stores = region.getStores(); + byte[] splitPointFromLargestStore = null; + long largestStoreSize = 0; + boolean isLocalIndexKey = false; + for (HStore s : stores) { + if (s.getColumnFamilyName().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + Optional splitPoint = s.getSplitPoint(); + if ( + oldSplitPoint != null && splitPoint.isPresent() + && Bytes.compareTo(oldSplitPoint, splitPoint.get()) == 0 + ) { + isLocalIndexKey = true; } - if (!isLocalIndexKey) return oldSplitPoint; + } + } + if (!isLocalIndexKey) return oldSplitPoint; - for (HStore s : stores) { - if (!s.getColumnFamilyName() - .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { - Optional splitPoint = s.getSplitPoint(); - long storeSize = s.getSize(); - if (splitPoint.isPresent() && largestStoreSize < storeSize) { - splitPointFromLargestStore = splitPoint.get(); - largestStoreSize = storeSize; - } - } + for (HStore s : stores) { + if (!s.getColumnFamilyName().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) { + Optional splitPoint = s.getSplitPoint(); + long storeSize = s.getSize(); + if (splitPoint.isPresent() && largestStoreSize < storeSize) { + splitPointFromLargestStore = splitPoint.get(); + largestStoreSize = storeSize; } - return splitPointFromLargestStore; + } } + return splitPointFromLargestStore; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java index cc7b771a47c..03233619ff8 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -74,6 +74,8 @@ import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache; import org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy; import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.apache.phoenix.trace.TracingUtils; import org.apache.phoenix.trace.util.NullSpan; import org.apache.phoenix.util.ClientUtil; @@ -84,9 +86,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; - /** * Do all the work of managing index updates from a single coprocessor. All Puts/Delets are passed * to an {@link IndexBuilder} to determine the actual updates to make. @@ -104,8 +103,8 @@ * batches. *

    * We don't need to implement {@link #postPut(ObserverContext, Put, WALEdit, Durability)} and - * {@link #postDelete(ObserverContext, Delete, WALEdit, Durability)} hooks because - * Phoenix always does batch mutations. + * {@link #postDelete(ObserverContext, Delete, WALEdit, Durability)} hooks because Phoenix always + * does batch mutations. *

    */ public class Indexer implements RegionObserver, RegionCoprocessor { @@ -113,7 +112,6 @@ public class Indexer implements RegionObserver, RegionCoprocessor { private static final Logger LOGGER = LoggerFactory.getLogger(Indexer.class); private static final OperationStatus IGNORE = new OperationStatus(OperationStatusCode.SUCCESS); private static final OperationStatus NOWRITE = new OperationStatus(OperationStatusCode.SUCCESS); - protected IndexWriter writer; protected IndexBuildManager builder; @@ -122,17 +120,18 @@ public class Indexer implements RegionObserver, RegionCoprocessor { // Hack to get around not being able to save any state between // coprocessor calls. TODO: remove after HBASE-18127 when available private static class BatchMutateContext { - public final int clientVersion; - public Collection> indexUpdates = Collections.emptyList(); - public List rowLocks = Lists.newArrayListWithExpectedSize(QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); + public final int clientVersion; + public Collection> indexUpdates = Collections.emptyList(); + public List rowLocks = + Lists.newArrayListWithExpectedSize(QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); - public BatchMutateContext(int clientVersion) { - this.clientVersion = clientVersion; - } + public BatchMutateContext(int clientVersion) { + this.clientVersion = clientVersion; + } } - + private ThreadLocal batchMutateContext = - new ThreadLocal(); + new ThreadLocal(); /** * Configuration key for if the indexer should check the version of HBase is running. Generally, @@ -140,17 +139,23 @@ public BatchMutateContext(int clientVersion) { */ public static final String CHECK_VERSION_CONF_KEY = "com.saleforce.hbase.index.checkversion"; - private static final String INDEX_RECOVERY_FAILURE_POLICY_KEY = "org.apache.hadoop.hbase.index.recovery.failurepolicy"; + private static final String INDEX_RECOVERY_FAILURE_POLICY_KEY = + "org.apache.hadoop.hbase.index.recovery.failurepolicy"; - private static final String INDEXER_INDEX_WRITE_SLOW_THRESHOLD_KEY = "phoenix.indexer.slow.post.batch.mutate.threshold"; + private static final String INDEXER_INDEX_WRITE_SLOW_THRESHOLD_KEY = + "phoenix.indexer.slow.post.batch.mutate.threshold"; private static final long INDEXER_INDEX_WRITE_SLOW_THRESHOLD_DEFAULT = 3_000; - private static final String INDEXER_INDEX_PREPARE_SLOW_THRESHOLD_KEY = "phoenix.indexer.slow.pre.batch.mutate.threshold"; + private static final String INDEXER_INDEX_PREPARE_SLOW_THRESHOLD_KEY = + "phoenix.indexer.slow.pre.batch.mutate.threshold"; private static final long INDEXER_INDEX_PREPARE_SLOW_THREHSOLD_DEFAULT = 3_000; - private static final String INDEXER_PRE_WAL_RESTORE_SLOW_THRESHOLD_KEY = "phoenix.indexer.slow.pre.wal.restore.threshold"; + private static final String INDEXER_PRE_WAL_RESTORE_SLOW_THRESHOLD_KEY = + "phoenix.indexer.slow.pre.wal.restore.threshold"; private static final long INDEXER_PRE_WAL_RESTORE_SLOW_THRESHOLD_DEFAULT = 3_000; - private static final String INDEXER_POST_OPEN_SLOW_THRESHOLD_KEY = "phoenix.indexer.slow.open.threshold"; + private static final String INDEXER_POST_OPEN_SLOW_THRESHOLD_KEY = + "phoenix.indexer.slow.open.threshold"; private static final long INDEXER_POST_OPEN_SLOW_THRESHOLD_DEFAULT = 3_000; - private static final String INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_KEY = "phoenix.indexer.slow.pre.increment"; + private static final String INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_KEY = + "phoenix.indexer.slow.pre.increment"; private static final long INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_DEFAULT = 3_000; /** @@ -177,15 +182,15 @@ public BatchMutateContext(int clientVersion) { private long slowPreIncrementThreshold; private int rowLockWaitDuration; private String dataTableName; - + public static final String RecoveryFailurePolicyKeyForTesting = INDEX_RECOVERY_FAILURE_POLICY_KEY; - public static final int INDEXING_SUPPORTED_MAJOR_VERSION = VersionUtil - .encodeMaxPatchVersion(0, 94); - public static final int INDEXING_SUPPORTED__MIN_MAJOR_VERSION = VersionUtil - .encodeVersion("0.94.0"); - private static final int INDEX_WAL_COMPRESSION_MINIMUM_SUPPORTED_VERSION = VersionUtil - .encodeVersion("0.94.9"); + public static final int INDEXING_SUPPORTED_MAJOR_VERSION = + VersionUtil.encodeMaxPatchVersion(0, 94); + public static final int INDEXING_SUPPORTED__MIN_MAJOR_VERSION = + VersionUtil.encodeVersion("0.94.0"); + private static final int INDEX_WAL_COMPRESSION_MINIMUM_SUPPORTED_VERSION = + VersionUtil.encodeVersion("0.94.9"); private static final int DEFAULT_ROWLOCK_WAIT_DURATION = 30000; @@ -196,83 +201,83 @@ public Optional getRegionObserver() { @Override public void start(CoprocessorEnvironment e) throws IOException { - try { - final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e; - String serverName = env.getServerName().getServerName(); - if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) { - // make sure the right version <-> combinations are allowed. - String errormsg = Indexer.validateVersion(env.getHBaseVersion(), env.getConfiguration()); - if (errormsg != null) { - throw new FatalIndexBuildingFailureException(errormsg); - } - } - - this.builder = new IndexBuildManager(env); - // Clone the config since it is shared - DelegateRegionCoprocessorEnvironment indexWriterEnv = new DelegateRegionCoprocessorEnvironment(env, ConnectionType.INDEX_WRITER_CONNECTION); - // setup the actual index writer - this.writer = new IndexWriter(indexWriterEnv, serverName + "-index-writer"); - - this.rowLockWaitDuration = env.getConfiguration().getInt("hbase.rowlock.wait.duration", - DEFAULT_ROWLOCK_WAIT_DURATION); - this.lockManager = new LockManager(); - - // Metrics impl for the Indexer -- avoiding unnecessary indirection for hadoop-1/2 compat - this.metricSource = MetricsIndexerSourceFactory.getInstance().getIndexerSource(); - setSlowThresholds(e.getConfiguration()); - this.dataTableName = env.getRegionInfo().getTable().getNameAsString(); - try { - // get the specified failure policy. We only ever override it in tests, but we need to do it - // here - Class policyClass = - env.getConfiguration().getClass(INDEX_RECOVERY_FAILURE_POLICY_KEY, - StoreFailuresInCachePolicy.class, IndexFailurePolicy.class); - IndexFailurePolicy policy = - policyClass.getConstructor(PerRegionIndexWriteCache.class).newInstance(failedIndexEdits); - LOGGER.debug("Setting up recovery writter with failure policy: " + policy.getClass()); - recoveryWriter = - new RecoveryIndexWriter(policy, indexWriterEnv, serverName + "-recovery-writer"); - } catch (Exception ex) { - throw new IOException("Could not instantiate recovery failure policy!", ex); + try { + final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e; + String serverName = env.getServerName().getServerName(); + if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) { + // make sure the right version <-> combinations are allowed. + String errormsg = Indexer.validateVersion(env.getHBaseVersion(), env.getConfiguration()); + if (errormsg != null) { + throw new FatalIndexBuildingFailureException(errormsg); } - } catch (NoSuchMethodError ex) { - disabled = true; - LOGGER.error("Must be too early a version of HBase. Disabled coprocessor ", ex); } + + this.builder = new IndexBuildManager(env); + // Clone the config since it is shared + DelegateRegionCoprocessorEnvironment indexWriterEnv = + new DelegateRegionCoprocessorEnvironment(env, ConnectionType.INDEX_WRITER_CONNECTION); + // setup the actual index writer + this.writer = new IndexWriter(indexWriterEnv, serverName + "-index-writer"); + + this.rowLockWaitDuration = + env.getConfiguration().getInt("hbase.rowlock.wait.duration", DEFAULT_ROWLOCK_WAIT_DURATION); + this.lockManager = new LockManager(); + + // Metrics impl for the Indexer -- avoiding unnecessary indirection for hadoop-1/2 compat + this.metricSource = MetricsIndexerSourceFactory.getInstance().getIndexerSource(); + setSlowThresholds(e.getConfiguration()); + this.dataTableName = env.getRegionInfo().getTable().getNameAsString(); + try { + // get the specified failure policy. We only ever override it in tests, but we need to do it + // here + Class policyClass = + env.getConfiguration().getClass(INDEX_RECOVERY_FAILURE_POLICY_KEY, + StoreFailuresInCachePolicy.class, IndexFailurePolicy.class); + IndexFailurePolicy policy = + policyClass.getConstructor(PerRegionIndexWriteCache.class).newInstance(failedIndexEdits); + LOGGER.debug("Setting up recovery writter with failure policy: " + policy.getClass()); + recoveryWriter = + new RecoveryIndexWriter(policy, indexWriterEnv, serverName + "-recovery-writer"); + } catch (Exception ex) { + throw new IOException("Could not instantiate recovery failure policy!", ex); + } + } catch (NoSuchMethodError ex) { + disabled = true; + LOGGER.error("Must be too early a version of HBase. Disabled coprocessor ", ex); + } } /** * Extracts the slow call threshold values from the configuration. */ private void setSlowThresholds(Configuration c) { - slowIndexPrepareThreshold = c.getLong(INDEXER_INDEX_WRITE_SLOW_THRESHOLD_KEY, - INDEXER_INDEX_WRITE_SLOW_THRESHOLD_DEFAULT); - slowIndexWriteThreshold = c.getLong(INDEXER_INDEX_PREPARE_SLOW_THRESHOLD_KEY, - INDEXER_INDEX_PREPARE_SLOW_THREHSOLD_DEFAULT); - slowPreWALRestoreThreshold = c.getLong(INDEXER_PRE_WAL_RESTORE_SLOW_THRESHOLD_KEY, - INDEXER_PRE_WAL_RESTORE_SLOW_THRESHOLD_DEFAULT); - slowPostOpenThreshold = c.getLong(INDEXER_POST_OPEN_SLOW_THRESHOLD_KEY, - INDEXER_POST_OPEN_SLOW_THRESHOLD_DEFAULT); - slowPreIncrementThreshold = c.getLong(INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_KEY, - INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_DEFAULT); + slowIndexPrepareThreshold = + c.getLong(INDEXER_INDEX_WRITE_SLOW_THRESHOLD_KEY, INDEXER_INDEX_WRITE_SLOW_THRESHOLD_DEFAULT); + slowIndexWriteThreshold = c.getLong(INDEXER_INDEX_PREPARE_SLOW_THRESHOLD_KEY, + INDEXER_INDEX_PREPARE_SLOW_THREHSOLD_DEFAULT); + slowPreWALRestoreThreshold = c.getLong(INDEXER_PRE_WAL_RESTORE_SLOW_THRESHOLD_KEY, + INDEXER_PRE_WAL_RESTORE_SLOW_THRESHOLD_DEFAULT); + slowPostOpenThreshold = + c.getLong(INDEXER_POST_OPEN_SLOW_THRESHOLD_KEY, INDEXER_POST_OPEN_SLOW_THRESHOLD_DEFAULT); + slowPreIncrementThreshold = c.getLong(INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_KEY, + INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_DEFAULT); } private String getCallTooSlowMessage(String callName, long duration, long threshold) { - StringBuilder sb = new StringBuilder(64); - sb.append("(callTooSlow) ").append(callName).append(" duration=").append(duration); - sb.append("ms, threshold=").append(threshold).append("ms"); - return sb.toString(); + StringBuilder sb = new StringBuilder(64); + sb.append("(callTooSlow) ").append(callName).append(" duration=").append(duration); + sb.append("ms, threshold=").append(threshold).append("ms"); + return sb.toString(); } - @Override public void stop(CoprocessorEnvironment e) throws IOException { if (this.stopped) { return; } if (this.disabled) { - return; - } + return; + } this.stopped = true; String msg = "Indexer is being stopped"; this.builder.stop(msg); @@ -281,345 +286,344 @@ public void stop(CoprocessorEnvironment e) throws IOException { } /** - * We use an Increment to serialize the ON DUPLICATE KEY clause so that the HBase plumbing - * sets up the necessary locks and mvcc to allow an atomic update. The Increment is not a - * real increment, though, it's really more of a Put. We translate the Increment into a - * list of mutations, at most a single Put and Delete that are the changes upon executing - * the list of ON DUPLICATE KEY clauses for this row. + * We use an Increment to serialize the ON DUPLICATE KEY clause so that the HBase plumbing sets up + * the necessary locks and mvcc to allow an atomic update. The Increment is not a real increment, + * though, it's really more of a Put. We translate the Increment into a list of mutations, at most + * a single Put and Delete that are the changes upon executing the list of ON DUPLICATE KEY + * clauses for this row. */ @Override public Result preIncrementAfterRowLock(final ObserverContext e, - final Increment inc) throws IOException { - long start = EnvironmentEdgeManager.currentTimeMillis(); - try { - List mutations = this.builder.executeAtomicOp(inc); - if (mutations == null) { - return null; - } + final Increment inc) throws IOException { + long start = EnvironmentEdgeManager.currentTimeMillis(); + try { + List mutations = this.builder.executeAtomicOp(inc); + if (mutations == null) { + return null; + } - // Causes the Increment to be ignored as we're committing the mutations - // ourselves below. - e.bypass(); - // ON DUPLICATE KEY IGNORE will return empty list if row already exists - // as no action is required in that case. - if (!mutations.isEmpty()) { - Region region = e.getEnvironment().getRegion(); - // Otherwise, submit the mutations directly here - region.batchMutate(mutations.toArray(new Mutation[0])); - } - return Result.EMPTY_RESULT; - } catch (Throwable t) { - throw ClientUtil.createIOException( - "Unable to process ON DUPLICATE IGNORE for " + - e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + - "(" + Bytes.toStringBinary(inc.getRow()) + ")", t); - } finally { - long duration = EnvironmentEdgeManager.currentTimeMillis() - start; - if (duration >= slowIndexPrepareThreshold) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock", - duration, slowPreIncrementThreshold)); - } - metricSource.incrementSlowDuplicateKeyCheckCalls(dataTableName); - } - metricSource.updateDuplicateKeyCheckTime(dataTableName, duration); + // Causes the Increment to be ignored as we're committing the mutations + // ourselves below. + e.bypass(); + // ON DUPLICATE KEY IGNORE will return empty list if row already exists + // as no action is required in that case. + if (!mutations.isEmpty()) { + Region region = e.getEnvironment().getRegion(); + // Otherwise, submit the mutations directly here + region.batchMutate(mutations.toArray(new Mutation[0])); } + return Result.EMPTY_RESULT; + } catch (Throwable t) { + throw ClientUtil.createIOException("Unable to process ON DUPLICATE IGNORE for " + + e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + "(" + + Bytes.toStringBinary(inc.getRow()) + ")", t); + } finally { + long duration = EnvironmentEdgeManager.currentTimeMillis() - start; + if (duration >= slowIndexPrepareThreshold) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold)); + } + metricSource.incrementSlowDuplicateKeyCheckCalls(dataTableName); + } + metricSource.updateDuplicateKeyCheckTime(dataTableName, duration); + } } @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { - if (this.disabled) { - return; - } - long start = EnvironmentEdgeManager.currentTimeMillis(); - try { - preBatchMutateWithExceptions(c, miniBatchOp); - return; - } catch (Throwable t) { - rethrowIndexingException(t); - } finally { - long duration = EnvironmentEdgeManager.currentTimeMillis() - start; - if (duration >= slowIndexPrepareThreshold) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(getCallTooSlowMessage("preBatchMutate", - duration, slowIndexPrepareThreshold)); - } - metricSource.incrementNumSlowIndexPrepareCalls(dataTableName); - } - metricSource.updateIndexPrepareTime(dataTableName, duration); + MiniBatchOperationInProgress miniBatchOp) throws IOException { + if (this.disabled) { + return; + } + long start = EnvironmentEdgeManager.currentTimeMillis(); + try { + preBatchMutateWithExceptions(c, miniBatchOp); + return; + } catch (Throwable t) { + rethrowIndexingException(t); + } finally { + long duration = EnvironmentEdgeManager.currentTimeMillis() - start; + if (duration >= slowIndexPrepareThreshold) { + if (LOGGER.isDebugEnabled()) { + LOGGER + .debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold)); + } + metricSource.incrementNumSlowIndexPrepareCalls(dataTableName); } - throw new RuntimeException( - "Somehow didn't return an index update but also didn't propagate the failure to the client!"); + metricSource.updateIndexPrepareTime(dataTableName, duration); + } + throw new RuntimeException( + "Somehow didn't return an index update but also didn't propagate the failure to the client!"); } private static void setTimeStamp(KeyValue kv, byte[] tsBytes) { - int tsOffset = kv.getTimestampOffset(); - System.arraycopy(tsBytes, 0, kv.getBuffer(), tsOffset, Bytes.SIZEOF_LONG); + int tsOffset = kv.getTimestampOffset(); + System.arraycopy(tsBytes, 0, kv.getBuffer(), tsOffset, Bytes.SIZEOF_LONG); } public void preBatchMutateWithExceptions(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws Throwable { + MiniBatchOperationInProgress miniBatchOp) throws Throwable { // Need to add cell tags to Delete Marker before we do any index processing // since we add tags to tables which doesn't have indexes also. ServerIndexUtil.setDeleteAttributes(miniBatchOp); - // first group all the updates for a single row into a single update to be processed - Map mutationsMap = - new HashMap(); - - Durability defaultDurability = Durability.SYNC_WAL; - if (c.getEnvironment().getRegion() != null) { - defaultDurability = c.getEnvironment().getRegion().getTableDescriptor().getDurability(); - defaultDurability = (defaultDurability == Durability.USE_DEFAULT) ? - Durability.SYNC_WAL : defaultDurability; + // first group all the updates for a single row into a single update to be processed + Map mutationsMap = + new HashMap(); + + Durability defaultDurability = Durability.SYNC_WAL; + if (c.getEnvironment().getRegion() != null) { + defaultDurability = c.getEnvironment().getRegion().getTableDescriptor().getDurability(); + defaultDurability = + (defaultDurability == Durability.USE_DEFAULT) ? Durability.SYNC_WAL : defaultDurability; + } + /* + * Exclusively lock all rows so we get a consistent read while determining the index updates + */ + BatchMutateContext context = + new BatchMutateContext(this.builder.getIndexMetaData(miniBatchOp).getClientVersion()); + setBatchMutateContext(c, context); + Durability durability = Durability.SKIP_WAL; + boolean copyMutations = false; + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if (this.builder.isAtomicOp(m)) { + miniBatchOp.setOperationStatus(i, IGNORE); + continue; } - /* - * Exclusively lock all rows so we get a consistent read - * while determining the index updates - */ - BatchMutateContext context = new BatchMutateContext(this.builder.getIndexMetaData(miniBatchOp).getClientVersion()); - setBatchMutateContext(c, context); - Durability durability = Durability.SKIP_WAL; - boolean copyMutations = false; - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - if (this.builder.isAtomicOp(m)) { - miniBatchOp.setOperationStatus(i, IGNORE); - continue; - } - if (this.builder.isEnabled(m)) { - context.rowLocks.add(lockManager.lockRow(m.getRow(), rowLockWaitDuration)); - Durability effectiveDurablity = (m.getDurability() == Durability.USE_DEFAULT) ? - defaultDurability : m.getDurability(); - if (effectiveDurablity.ordinal() > durability.ordinal()) { - durability = effectiveDurablity; - } - // Track whether or not we need to - ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); - if (mutationsMap.containsKey(row)) { - copyMutations = true; - } else { - mutationsMap.put(row, null); - } - } + if (this.builder.isEnabled(m)) { + context.rowLocks.add(lockManager.lockRow(m.getRow(), rowLockWaitDuration)); + Durability effectiveDurablity = + (m.getDurability() == Durability.USE_DEFAULT) ? defaultDurability : m.getDurability(); + if (effectiveDurablity.ordinal() > durability.ordinal()) { + durability = effectiveDurablity; + } + // Track whether or not we need to + ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); + if (mutationsMap.containsKey(row)) { + copyMutations = true; + } else { + mutationsMap.put(row, null); + } } + } - // early exit if it turns out we don't have any edits - if (mutationsMap.isEmpty()) { - return; - } + // early exit if it turns out we don't have any edits + if (mutationsMap.isEmpty()) { + return; + } - // If we're copying the mutations - Collection originalMutations; - Collection mutations; - if (copyMutations) { - originalMutations = null; - mutations = mutationsMap.values(); - } else { - originalMutations = Lists.newArrayListWithExpectedSize(mutationsMap.size()); - mutations = originalMutations; - } - - Mutation firstMutation = miniBatchOp.getOperation(0); - ReplayWrite replayWrite = this.builder.getReplayWrite(firstMutation); - boolean resetTimeStamp = replayWrite == null; - long now = EnvironmentEdgeManager.currentTimeMillis(); - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - // skip this mutation if we aren't enabling indexing - // unfortunately, we really should ask if the raw mutation (rather than the combined mutation) - // should be indexed, which means we need to expose another method on the builder. Such is the - // way optimization go though. - if (miniBatchOp.getOperationStatus(i) != IGNORE && this.builder.isEnabled(m)) { - if (resetTimeStamp) { - // Unless we're replaying edits to rebuild the index, we update the time stamp - // of the data table to prevent overlapping time stamps (which prevents index - // inconsistencies as this case isn't handled correctly currently). - for (List cells : m.getFamilyCellMap().values()) { - for (Cell cell : cells) { - CellUtil.setTimestamp(cell, now); - } - } - } - // No need to write the table mutations when we're rebuilding - // the index as they're already written and just being replayed. - if (replayWrite == ReplayWrite.INDEX_ONLY - || replayWrite == ReplayWrite.REBUILD_INDEX_ONLY) { - miniBatchOp.setOperationStatus(i, NOWRITE); - } - - // Only copy mutations if we found duplicate rows - // which only occurs when we're partially rebuilding - // the index (since we'll potentially have both a - // Put and a Delete mutation for the same row). - if (copyMutations) { - // Add the mutation to the batch set - - ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); - MultiMutation stored = mutationsMap.get(row); - // we haven't seen this row before, so add it - if (stored == null) { - stored = new MultiMutation(row); - mutationsMap.put(row, stored); - } - stored.addAll(m); - } else { - originalMutations.add(m); - } + // If we're copying the mutations + Collection originalMutations; + Collection mutations; + if (copyMutations) { + originalMutations = null; + mutations = mutationsMap.values(); + } else { + originalMutations = Lists.newArrayListWithExpectedSize(mutationsMap.size()); + mutations = originalMutations; + } + + Mutation firstMutation = miniBatchOp.getOperation(0); + ReplayWrite replayWrite = this.builder.getReplayWrite(firstMutation); + boolean resetTimeStamp = replayWrite == null; + long now = EnvironmentEdgeManager.currentTimeMillis(); + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + // skip this mutation if we aren't enabling indexing + // unfortunately, we really should ask if the raw mutation (rather than the combined mutation) + // should be indexed, which means we need to expose another method on the builder. Such is the + // way optimization go though. + if (miniBatchOp.getOperationStatus(i) != IGNORE && this.builder.isEnabled(m)) { + if (resetTimeStamp) { + // Unless we're replaying edits to rebuild the index, we update the time stamp + // of the data table to prevent overlapping time stamps (which prevents index + // inconsistencies as this case isn't handled correctly currently). + for (List cells : m.getFamilyCellMap().values()) { + for (Cell cell : cells) { + CellUtil.setTimestamp(cell, now); + } } - } - - // dump all the index updates into a single WAL. They will get combined in the end anyways, so - // don't worry which one we get - WALEdit edit = miniBatchOp.getWalEdit(0); - if (edit == null) { - edit = new WALEdit(); - miniBatchOp.setWalEdit(0, edit); - } - - if (copyMutations || replayWrite != null) { - mutations = IndexManagementUtil.flattenMutationsByTimestamp(mutations); - } + } + // No need to write the table mutations when we're rebuilding + // the index as they're already written and just being replayed. + if ( + replayWrite == ReplayWrite.INDEX_ONLY || replayWrite == ReplayWrite.REBUILD_INDEX_ONLY + ) { + miniBatchOp.setOperationStatus(i, NOWRITE); + } - // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = Trace.startSpan("Starting to build index updates")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; + // Only copy mutations if we found duplicate rows + // which only occurs when we're partially rebuilding + // the index (since we'll potentially have both a + // Put and a Delete mutation for the same row). + if (copyMutations) { + // Add the mutation to the batch set + + ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); + MultiMutation stored = mutationsMap.get(row); + // we haven't seen this row before, so add it + if (stored == null) { + stored = new MultiMutation(row); + mutationsMap.put(row, stored); } - long start = EnvironmentEdgeManager.currentTimeMillis(); + stored.addAll(m); + } else { + originalMutations.add(m); + } + } + } - // get the index updates for all elements in this batch - Collection> indexUpdates = - this.builder.getIndexUpdate(miniBatchOp, mutations); + // dump all the index updates into a single WAL. They will get combined in the end anyways, so + // don't worry which one we get + WALEdit edit = miniBatchOp.getWalEdit(0); + if (edit == null) { + edit = new WALEdit(); + miniBatchOp.setWalEdit(0, edit); + } + if (copyMutations || replayWrite != null) { + mutations = IndexManagementUtil.flattenMutationsByTimestamp(mutations); + } - long duration = EnvironmentEdgeManager.currentTimeMillis() - start; - if (duration >= slowIndexPrepareThreshold) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(getCallTooSlowMessage( - "indexPrepare", duration, slowIndexPrepareThreshold)); - } - metricSource.incrementNumSlowIndexPrepareCalls(dataTableName); - } - metricSource.updateIndexPrepareTime(dataTableName, duration); - current.addTimelineAnnotation("Built index updates, doing preStep"); - TracingUtils.addAnnotation(current, "index update count", indexUpdates.size()); - byte[] tableName = c.getEnvironment().getRegion().getTableDescriptor().getTableName().getName(); - Iterator> indexUpdatesItr = indexUpdates.iterator(); - List localUpdates = new ArrayList(indexUpdates.size()); - while (indexUpdatesItr.hasNext()) { - Pair next = indexUpdatesItr.next(); - if (Bytes.compareTo(next.getSecond(), tableName) == 0) { - localUpdates.add(next.getFirst()); - indexUpdatesItr.remove(); - } - } - if (!localUpdates.isEmpty()) { - miniBatchOp.addOperationsFromCP(0, - localUpdates.toArray(new Mutation[localUpdates.size()])); - } - if (!indexUpdates.isEmpty()) { - context.indexUpdates = indexUpdates; - // write index updates to WAL - if (durability != Durability.SKIP_WAL) { - // we have all the WAL durability, so we just update the WAL entry and move on - for (Pair entry : indexUpdates) { - edit.add(IndexedKeyValue.newIndexedKeyValue(entry.getSecond(), - entry.getFirst())); - } - } - } + // get the current span, or just use a null-span to avoid a bunch of if statements + try (TraceScope scope = Trace.startSpan("Starting to build index updates")) { + Span current = scope.getSpan(); + if (current == null) { + current = NullSpan.INSTANCE; + } + long start = EnvironmentEdgeManager.currentTimeMillis(); + + // get the index updates for all elements in this batch + Collection> indexUpdates = + this.builder.getIndexUpdate(miniBatchOp, mutations); + + long duration = EnvironmentEdgeManager.currentTimeMillis() - start; + if (duration >= slowIndexPrepareThreshold) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold)); + } + metricSource.incrementNumSlowIndexPrepareCalls(dataTableName); + } + metricSource.updateIndexPrepareTime(dataTableName, duration); + current.addTimelineAnnotation("Built index updates, doing preStep"); + TracingUtils.addAnnotation(current, "index update count", indexUpdates.size()); + byte[] tableName = + c.getEnvironment().getRegion().getTableDescriptor().getTableName().getName(); + Iterator> indexUpdatesItr = indexUpdates.iterator(); + List localUpdates = new ArrayList(indexUpdates.size()); + while (indexUpdatesItr.hasNext()) { + Pair next = indexUpdatesItr.next(); + if (Bytes.compareTo(next.getSecond(), tableName) == 0) { + localUpdates.add(next.getFirst()); + indexUpdatesItr.remove(); + } + } + if (!localUpdates.isEmpty()) { + miniBatchOp.addOperationsFromCP(0, localUpdates.toArray(new Mutation[localUpdates.size()])); + } + if (!indexUpdates.isEmpty()) { + context.indexUpdates = indexUpdates; + // write index updates to WAL + if (durability != Durability.SKIP_WAL) { + // we have all the WAL durability, so we just update the WAL entry and move on + for (Pair entry : indexUpdates) { + edit.add(IndexedKeyValue.newIndexedKeyValue(entry.getSecond(), entry.getFirst())); } + } + } + } } - private void setBatchMutateContext(ObserverContext c, BatchMutateContext context) { - this.batchMutateContext.set(context); + private void setBatchMutateContext(ObserverContext c, + BatchMutateContext context) { + this.batchMutateContext.set(context); } - - private BatchMutateContext getBatchMutateContext(ObserverContext c) { - return this.batchMutateContext.get(); + + private BatchMutateContext + getBatchMutateContext(ObserverContext c) { + return this.batchMutateContext.get(); } - + private void removeBatchMutateContext(ObserverContext c) { - this.batchMutateContext.remove(); + this.batchMutateContext.remove(); } @Override public void postBatchMutateIndispensably(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException { - if (this.disabled) { - return; + MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException { + if (this.disabled) { + return; + } + long start = EnvironmentEdgeManager.currentTimeMillis(); + BatchMutateContext context = getBatchMutateContext(c); + if (context == null) { + return; + } + try { + for (RowLock rowLock : context.rowLocks) { + rowLock.release(); } - long start = EnvironmentEdgeManager.currentTimeMillis(); - BatchMutateContext context = getBatchMutateContext(c); - if (context == null) { - return; + this.builder.batchCompleted(miniBatchOp); + + if (success) { // if miniBatchOp was successfully written, write index updates + doPost(c, context); } - try { - for (RowLock rowLock : context.rowLocks) { - rowLock.release(); - } - this.builder.batchCompleted(miniBatchOp); + } finally { + removeBatchMutateContext(c); + long duration = EnvironmentEdgeManager.currentTimeMillis() - start; + if (duration >= slowIndexWriteThreshold) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, + slowIndexWriteThreshold)); + } + metricSource.incrementNumSlowIndexWriteCalls(dataTableName); + } + metricSource.updateIndexWriteTime(dataTableName, duration); + } + } - if (success) { // if miniBatchOp was successfully written, write index updates - doPost(c, context); - } - } finally { - removeBatchMutateContext(c); - long duration = EnvironmentEdgeManager.currentTimeMillis() - start; - if (duration >= slowIndexWriteThreshold) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably", - duration, slowIndexWriteThreshold)); - } - metricSource.incrementNumSlowIndexWriteCalls(dataTableName); - } - metricSource.updateIndexWriteTime(dataTableName, duration); - } + private void doPost(ObserverContext c, BatchMutateContext context) + throws IOException { + try { + doPostWithExceptions(c, context); + return; + } catch (Throwable e) { + rethrowIndexingException(e); + } + throw new RuntimeException( + "Somehow didn't complete the index update, but didn't return succesfully either!"); } - private void doPost(ObserverContext c, BatchMutateContext context) throws IOException { - try { - doPostWithExceptions(c,context); - return; - } catch (Throwable e) { - rethrowIndexingException(e); - } - throw new RuntimeException( - "Somehow didn't complete the index update, but didn't return succesfully either!"); + private void doPostWithExceptions(ObserverContext c, + BatchMutateContext context) throws IOException { + // short circuit, if we don't need to do any work + if (context == null || context.indexUpdates.isEmpty()) { + return; } - private void doPostWithExceptions(ObserverContext c, BatchMutateContext context) - throws IOException { - //short circuit, if we don't need to do any work - if (context == null || context.indexUpdates.isEmpty()) { - return; + // get the current span, or just use a null-span to avoid a bunch of if statements + try (TraceScope scope = Trace.startSpan("Completing index writes")) { + Span current = scope.getSpan(); + if (current == null) { + current = NullSpan.INSTANCE; } + long start = EnvironmentEdgeManager.currentTimeMillis(); - // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = Trace.startSpan("Completing index writes")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; - } - long start = EnvironmentEdgeManager.currentTimeMillis(); - - current.addTimelineAnnotation("Actually doing index update for first time"); - writer.writeAndHandleFailure(context.indexUpdates, false, context.clientVersion); - - long duration = EnvironmentEdgeManager.currentTimeMillis() - start; - if (duration >= slowIndexWriteThreshold) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(getCallTooSlowMessage("indexWrite", - duration, slowIndexWriteThreshold)); - } - metricSource.incrementNumSlowIndexWriteCalls(dataTableName); - } - metricSource.updateIndexWriteTime(dataTableName, duration); + current.addTimelineAnnotation("Actually doing index update for first time"); + writer.writeAndHandleFailure(context.indexUpdates, false, context.clientVersion); + + long duration = EnvironmentEdgeManager.currentTimeMillis() - start; + if (duration >= slowIndexWriteThreshold) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold)); + } + metricSource.incrementNumSlowIndexWriteCalls(dataTableName); } + metricSource.updateIndexWriteTime(dataTableName, duration); + } } /** @@ -628,9 +632,11 @@ private void doPostWithExceptions(ObserverContext * @return the mutations to apply to the index tables */ private Collection> extractIndexUpdate(WALEdit edit) { - // Avoid multiple internal array resizings. Initial size of 64, unless we have fewer cells in the edit + // Avoid multiple internal array resizings. Initial size of 64, unless we have fewer cells in + // the edit int initialSize = Math.min(edit.size(), 64); - Collection> indexUpdates = new ArrayList>(initialSize); + Collection> indexUpdates = + new ArrayList>(initialSize); for (Cell kv : edit.getCells()) { if (kv instanceof IndexedKeyValue) { IndexedKeyValue ikv = (IndexedKeyValue) kv; @@ -643,51 +649,52 @@ private Collection> extractIndexUpdate(WALEdit edit) { @Override public void postOpen(final ObserverContext c) { - Multimap updates = failedIndexEdits.getEdits(c.getEnvironment().getRegion()); - + Multimap updates = + failedIndexEdits.getEdits(c.getEnvironment().getRegion()); + if (this.disabled) { - return; + return; } long start = EnvironmentEdgeManager.currentTimeMillis(); try { - //if we have no pending edits to complete, then we are done - if (updates == null || updates.size() == 0) { - return; - } + // if we have no pending edits to complete, then we are done + if (updates == null || updates.size() == 0) { + return; + } - LOGGER.info("Found some outstanding index updates that didn't succeed during" - + " WAL replay - attempting to replay now."); + LOGGER.info("Found some outstanding index updates that didn't succeed during" + + " WAL replay - attempting to replay now."); - // do the usual writer stuff, killing the server again, if we can't manage to make the index - // writes succeed again - try { - writer.writeAndHandleFailure(updates, true, ScanUtil.UNKNOWN_CLIENT_VERSION); - } catch (IOException e) { - LOGGER.error("During WAL replay of outstanding index updates, " - + "Exception is thrown instead of killing server during index writing", e); - } + // do the usual writer stuff, killing the server again, if we can't manage to make the index + // writes succeed again + try { + writer.writeAndHandleFailure(updates, true, ScanUtil.UNKNOWN_CLIENT_VERSION); + } catch (IOException e) { + LOGGER.error("During WAL replay of outstanding index updates, " + + "Exception is thrown instead of killing server during index writing", e); + } } finally { - long duration = EnvironmentEdgeManager.currentTimeMillis() - start; - if (duration >= slowPostOpenThreshold) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold)); - } - metricSource.incrementNumSlowPostOpenCalls(dataTableName); - } - metricSource.updatePostOpenTime(dataTableName, duration); + long duration = EnvironmentEdgeManager.currentTimeMillis() - start; + if (duration >= slowPostOpenThreshold) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold)); + } + metricSource.incrementNumSlowPostOpenCalls(dataTableName); + } + metricSource.updatePostOpenTime(dataTableName, duration); } } @Override - public void preWALRestore( - org.apache.hadoop.hbase.coprocessor.ObserverContext ctx, - org.apache.hadoop.hbase.client.RegionInfo info, org.apache.hadoop.hbase.wal.WALKey logKey, WALEdit logEdit) - throws IOException { - - if (this.disabled) { - return; - } + public void preWALRestore( + org.apache.hadoop.hbase.coprocessor.ObserverContext ctx, + org.apache.hadoop.hbase.client.RegionInfo info, org.apache.hadoop.hbase.wal.WALKey logKey, + WALEdit logEdit) throws IOException { + + if (this.disabled) { + return; + } // TODO check the regions in transition. If the server on which the region lives is this one, // then we should rety that write later in postOpen. @@ -695,28 +702,27 @@ public void preWALRestore( // into their own recovered.edits file. This then lets us do a straightforward recovery of each // region (and more efficiently as we aren't writing quite as hectically from this one place). - long start = EnvironmentEdgeManager.currentTimeMillis(); - try { - /* - * Basically, we let the index regions recover for a little while long before retrying in the - * hopes they come up before the primary table finishes. - */ - Collection> indexUpdates = extractIndexUpdate(logEdit); - recoveryWriter.writeAndHandleFailure(indexUpdates, true, ScanUtil.UNKNOWN_CLIENT_VERSION); - } finally { - long duration = EnvironmentEdgeManager.currentTimeMillis() - start; - if (duration >= slowPreWALRestoreThreshold) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(getCallTooSlowMessage("preWALRestore", - duration, slowPreWALRestoreThreshold)); - } - metricSource.incrementNumSlowPreWALRestoreCalls(dataTableName); - } - metricSource.updatePreWALRestoreTime(dataTableName, duration); + long start = EnvironmentEdgeManager.currentTimeMillis(); + try { + /* + * Basically, we let the index regions recover for a little while long before retrying in the + * hopes they come up before the primary table finishes. + */ + Collection> indexUpdates = extractIndexUpdate(logEdit); + recoveryWriter.writeAndHandleFailure(indexUpdates, true, ScanUtil.UNKNOWN_CLIENT_VERSION); + } finally { + long duration = EnvironmentEdgeManager.currentTimeMillis() - start; + if (duration >= slowPreWALRestoreThreshold) { + if (LOGGER.isDebugEnabled()) { + LOGGER + .debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold)); + } + metricSource.incrementNumSlowPreWALRestoreCalls(dataTableName); } + metricSource.updatePreWALRestoreTime(dataTableName, duration); + } } - /** * Exposed for testing! * @return the currently instantiated index builder @@ -725,31 +731,31 @@ public IndexBuilder getBuilderForTesting() { return this.builder.getBuilderForTesting(); } - /** - * Validate that the version and configuration parameters are supported - * @param hbaseVersion current version of HBase on which this coprocessor is installed - * @param conf configuration to check for allowed parameters (e.g. WAL Compression only {@code if >= - * 0.94.9) } - * @return null if the version is supported, the error message to display otherwise - */ - public static String validateVersion(String hbaseVersion, Configuration conf) { - int encodedVersion = VersionUtil.encodeVersion(hbaseVersion); - // above 0.94 everything should be supported - if (encodedVersion > INDEXING_SUPPORTED_MAJOR_VERSION) { - return null; - } - // check to see if its at least 0.94 - if (encodedVersion < INDEXING_SUPPORTED__MIN_MAJOR_VERSION) { - return "Indexing not supported for versions older than 0.94.X"; - } - // if less than 0.94.9, we need to check if WAL Compression is enabled - if (encodedVersion < INDEX_WAL_COMPRESSION_MINIMUM_SUPPORTED_VERSION) { - if (conf.getBoolean(HConstants.ENABLE_WAL_COMPRESSION, false)) { - return "Indexing not supported with WAL Compression for versions of HBase older than 0.94.9 - found version:" - + hbaseVersion; - } - } - return null; + /** + * Validate that the version and configuration parameters are supported + * @param hbaseVersion current version of HBase on which this coprocessor is installed + * @param conf configuration to check for allowed parameters (e.g. WAL Compression only + * {@code if >= + * 0.94.9) } + * @return null if the version is supported, the error message to display otherwise + */ + public static String validateVersion(String hbaseVersion, Configuration conf) { + int encodedVersion = VersionUtil.encodeVersion(hbaseVersion); + // above 0.94 everything should be supported + if (encodedVersion > INDEXING_SUPPORTED_MAJOR_VERSION) { + return null; } + // check to see if its at least 0.94 + if (encodedVersion < INDEXING_SUPPORTED__MIN_MAJOR_VERSION) { + return "Indexing not supported for versions older than 0.94.X"; + } + // if less than 0.94.9, we need to check if WAL Compression is enabled + if (encodedVersion < INDEX_WAL_COMPRESSION_MINIMUM_SUPPORTED_VERSION) { + if (conf.getBoolean(HConstants.ENABLE_WAL_COMPRESSION, false)) { + return "Indexing not supported with WAL Compression for versions of HBase older than 0.94.9 - found version:" + + hbaseVersion; + } + } + return null; + } } - diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/LockManager.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/LockManager.java index ec189d3d0e4..af0123256bb 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/LockManager.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/LockManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,175 +32,165 @@ import org.slf4j.LoggerFactory; /** - * - * Manages reentrant row locks based on row keys. Phoenix needs to manage - * its own locking due to secondary indexes needing a consistent snapshot from - * the time the mvcc is acquired until the time it is advanced (PHOENIX-4053). - * + * Manages reentrant row locks based on row keys. Phoenix needs to manage its own locking due to + * secondary indexes needing a consistent snapshot from the time the mvcc is acquired until the time + * it is advanced (PHOENIX-4053). */ public class LockManager { - private static final Logger LOGGER = LoggerFactory.getLogger(LockManager.class); - - private final ConcurrentHashMap lockedRows = - new ConcurrentHashMap<>(); - - public LockManager () { + private static final Logger LOGGER = LoggerFactory.getLogger(LockManager.class); + + private final ConcurrentHashMap lockedRows = + new ConcurrentHashMap<>(); + + public LockManager() { + } + + /** + * Lock the row or throw otherwise + * @return RowLock used to eventually release the lock + * @throws TimeoutIOException if the lock could not be acquired within the allowed + * rowLockWaitDuration and InterruptedException if interrupted while + * waiting to acquire lock. + */ + public RowLock lockRow(ImmutableBytesPtr rowKey, long waitDurationMs) throws IOException { + RowLockImpl rowLock = new RowLockImpl(rowKey); + TraceScope traceScope = null; + + // If we're tracing start a span to show how long this took. + if (Trace.isTracing()) { + traceScope = Trace.startSpan("LockManager.lockRow"); + traceScope.getSpan().addTimelineAnnotation("Getting a row lock"); } - - /** - * Lock the row or throw otherwise - * @param rowKey - * @param waitDurationMs - * @return RowLock used to eventually release the lock - * @throws TimeoutIOException if the lock could not be acquired within the - * allowed rowLockWaitDuration and InterruptedException if interrupted while - * waiting to acquire lock. - */ - public RowLock lockRow(ImmutableBytesPtr rowKey, long waitDurationMs) throws IOException { - RowLockImpl rowLock = new RowLockImpl(rowKey); - TraceScope traceScope = null; - - // If we're tracing start a span to show how long this took. - if (Trace.isTracing()) { - traceScope = Trace.startSpan("LockManager.lockRow"); - traceScope.getSpan().addTimelineAnnotation("Getting a row lock"); + boolean success = false; + try { + while (true) { + RowLockImpl existingRowLock = lockedRows.putIfAbsent(rowKey, rowLock); + if (existingRowLock == null) { + // The row was not locked + success = true; + return rowLock; } - boolean success = false; - try { - while (true) { - RowLockImpl existingRowLock = lockedRows.putIfAbsent(rowKey, rowLock); - if (existingRowLock == null) { - // The row was not locked - success = true; - return rowLock; - } - // The row is already locked by a different thread. Wait for the lock to be released - // for waitDurationMs time - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - RowLockImpl usableRowLock = existingRowLock.lock(waitDurationMs); - if (usableRowLock != null) { - success = true; - return usableRowLock; - } - // The existing lock was released and removed from the hash map before the current - // thread attempt to lock - long now = EnvironmentEdgeManager.currentTimeMillis(); - long timePassed = now - startTime; - if (timePassed > waitDurationMs) { - throw new TimeoutIOException("Timed out waiting for lock for row: " + rowKey); - } - waitDurationMs -= timePassed; - } - } catch (InterruptedException ie) { - LOGGER.warn("Thread interrupted waiting for lock on row: " + rowKey); - InterruptedIOException iie = new InterruptedIOException(); - iie.initCause(ie); - Thread.currentThread().interrupt(); - throw iie; - } finally { - if (traceScope != null) { - if (!success) { - traceScope.getSpan().addTimelineAnnotation("Failed to get row lock"); - } - traceScope.close(); - } + // The row is already locked by a different thread. Wait for the lock to be released + // for waitDurationMs time + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + RowLockImpl usableRowLock = existingRowLock.lock(waitDurationMs); + if (usableRowLock != null) { + success = true; + return usableRowLock; } + // The existing lock was released and removed from the hash map before the current + // thread attempt to lock + long now = EnvironmentEdgeManager.currentTimeMillis(); + long timePassed = now - startTime; + if (timePassed > waitDurationMs) { + throw new TimeoutIOException("Timed out waiting for lock for row: " + rowKey); + } + waitDurationMs -= timePassed; + } + } catch (InterruptedException ie) { + LOGGER.warn("Thread interrupted waiting for lock on row: " + rowKey); + InterruptedIOException iie = new InterruptedIOException(); + iie.initCause(ie); + Thread.currentThread().interrupt(); + throw iie; + } finally { + if (traceScope != null) { + if (!success) { + traceScope.getSpan().addTimelineAnnotation("Failed to get row lock"); + } + traceScope.close(); + } } - - public RowLock lockRow(byte[] row, long waitDurationMs) throws IOException { - ImmutableBytesPtr rowKey = new ImmutableBytesPtr(row); - return lockRow(rowKey, waitDurationMs); + } + + public RowLock lockRow(byte[] row, long waitDurationMs) throws IOException { + ImmutableBytesPtr rowKey = new ImmutableBytesPtr(row); + return lockRow(rowKey, waitDurationMs); + } + + /** + * Class used to represent a lock on a row. + */ + public class RowLockImpl implements RowLock { + private final ImmutableBytesPtr rowKey; + private int count = 1; + private boolean usable = true; + private final ReentrantLock lock = new ReentrantLock(true); + private String threadName; + + private RowLockImpl(ImmutableBytesPtr rowKey) { + this.rowKey = rowKey; + lock.lock(); + threadName = Thread.currentThread().getName(); } - /** - * Class used to represent a lock on a row. - */ - public class RowLockImpl implements RowLock { - private final ImmutableBytesPtr rowKey; - private int count = 1; - private boolean usable = true; - private final ReentrantLock lock = new ReentrantLock(true); - private String threadName; - - private RowLockImpl(ImmutableBytesPtr rowKey) { - this.rowKey = rowKey; - lock.lock(); - threadName = Thread.currentThread().getName(); + RowLockImpl lock(long waitDuration) throws InterruptedException, TimeoutIOException { + synchronized (this) { + if (!usable) { + return null; } - - RowLockImpl lock(long waitDuration) throws InterruptedException, TimeoutIOException { - synchronized (this) { - if (!usable) { - return null; - } - count++; - } - boolean success = false; - threadName = Thread.currentThread().getName(); - try { - if (!lock.tryLock(waitDuration, TimeUnit.MILLISECONDS)) { - throw new TimeoutIOException("Timed out waiting for lock for row: " + rowKey); - } - success = true; - } finally { - if (!success) { - cleanUp(); - return null; - } - } - return this; + count++; + } + boolean success = false; + threadName = Thread.currentThread().getName(); + try { + if (!lock.tryLock(waitDuration, TimeUnit.MILLISECONDS)) { + throw new TimeoutIOException("Timed out waiting for lock for row: " + rowKey); } - - private void cleanUp() { - synchronized (this) { - count--; - if (count == 0) { - RowLockImpl removed = lockedRows.remove(rowKey); - assert removed == this : "We should never remove a different lock"; - usable = false; - } else { - assert count > 0 : "Reference count should never be less than zero"; - } - } - } - @Override - public void release() { - lock.unlock(); - cleanUp(); + success = true; + } finally { + if (!success) { + cleanUp(); + return null; } + } + return this; + } - @Override - public ImmutableBytesPtr getRowKey() { - return rowKey; + private void cleanUp() { + synchronized (this) { + count--; + if (count == 0) { + RowLockImpl removed = lockedRows.remove(rowKey); + assert removed == this : "We should never remove a different lock"; + usable = false; + } else { + assert count > 0 : "Reference count should never be less than zero"; } + } + } - @Override - public String toString() { - return "RowLockImpl{" + - "row=" + rowKey + - ", count=" + count + - ", threadName=" + threadName + - ", lock=" + lock + - ", usable=" + usable + - "}"; - } + @Override + public void release() { + lock.unlock(); + cleanUp(); } + @Override + public ImmutableBytesPtr getRowKey() { + return rowKey; + } + + @Override + public String toString() { + return "RowLockImpl{" + "row=" + rowKey + ", count=" + count + ", threadName=" + threadName + + ", lock=" + lock + ", usable=" + usable + "}"; + } + } + + /** + * Row lock held by a given thread. One thread may acquire multiple locks on the same row + * simultaneously. The locks must be released by calling release() from the same thread. + */ + public interface RowLock { /** - * Row lock held by a given thread. - * One thread may acquire multiple locks on the same row simultaneously. - * The locks must be released by calling release() from the same thread. + * Release the given lock. If there are no remaining locks held by the current thread then + * unlock the row and allow other threads to acquire the lock. + * @throws IllegalArgumentException if called by a different thread than the lock owning thread */ - public interface RowLock { - /** - * Release the given lock. If there are no remaining locks held by the current thread - * then unlock the row and allow other threads to acquire the lock. - * @throws IllegalArgumentException if called by a different thread than the lock owning - * thread - */ - void release(); - - ImmutableBytesPtr getRowKey(); - } + void release(); + + ImmutableBytesPtr getRowKey(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java index 3353f9e0134..299ea1774fc 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/balancer/IndexLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java index bbe94df7a1a..5da933bd92e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexBuilder.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.hbase.index.builder; @@ -31,113 +39,115 @@ /** * Basic implementation of the {@link IndexBuilder} that doesn't do any actual work of indexing. *

    - * You should extend this class, rather than implementing IndexBuilder directly to maintain compatability going forward. + * You should extend this class, rather than implementing IndexBuilder directly to maintain + * compatability going forward. *

    - * Generally, you should consider using one of the implemented IndexBuilders (e.g {@link NonTxIndexBuilder}) as there is - * a lot of work required to keep an index table up-to-date. + * Generally, you should consider using one of the implemented IndexBuilders (e.g + * {@link NonTxIndexBuilder}) as there is a lot of work required to keep an index table up-to-date. */ public abstract class BaseIndexBuilder implements IndexBuilder { - public static final String CODEC_CLASS_NAME_KEY = "org.apache.hadoop.hbase.index.codec.class"; - private static final Logger LOGGER = LoggerFactory.getLogger(BaseIndexBuilder.class); - - protected boolean stopped; - protected RegionCoprocessorEnvironment env; - protected IndexCodec codec; - - @Override - public void extendBaseIndexBuilderInstead() {} - - @Override - public void setup(RegionCoprocessorEnvironment env) throws IOException { - this.env = env; - // setup the phoenix codec. Generally, this will just be in standard one, but abstracting here - // so we can use it later when generalizing covered indexes - Configuration conf = env.getConfiguration(); - Class codecClass = conf.getClass(CODEC_CLASS_NAME_KEY, null, IndexCodec.class); - try { - Constructor meth = codecClass.getDeclaredConstructor(new Class[0]); - meth.setAccessible(true); - this.codec = meth.newInstance(); - this.codec.initialize(conf, env.getRegion().getRegionInfo().getTable().getName()); - } catch (Exception e) { - throw new IOException(e); - } + public static final String CODEC_CLASS_NAME_KEY = "org.apache.hadoop.hbase.index.codec.class"; + private static final Logger LOGGER = LoggerFactory.getLogger(BaseIndexBuilder.class); + + protected boolean stopped; + protected RegionCoprocessorEnvironment env; + protected IndexCodec codec; + + @Override + public void extendBaseIndexBuilderInstead() { + } + + @Override + public void setup(RegionCoprocessorEnvironment env) throws IOException { + this.env = env; + // setup the phoenix codec. Generally, this will just be in standard one, but abstracting here + // so we can use it later when generalizing covered indexes + Configuration conf = env.getConfiguration(); + Class codecClass = + conf.getClass(CODEC_CLASS_NAME_KEY, null, IndexCodec.class); + try { + Constructor meth = codecClass.getDeclaredConstructor(new Class[0]); + meth.setAccessible(true); + this.codec = meth.newInstance(); + this.codec.initialize(conf, env.getRegion().getRegionInfo().getTable().getName()); + } catch (Exception e) { + throw new IOException(e); } - - @Override - public void batchStarted(MiniBatchOperationInProgress miniBatchOp, IndexMetaData context) throws IOException { - // noop - } - - @Override - public IndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) throws IOException { - return IndexMetaData.NULL_INDEX_META_DATA; - } - - @Override - public void batchCompleted(MiniBatchOperationInProgress miniBatchOp) { - // noop - } - - /** - * By default, we always attempt to index the mutation. Commonly this can be slow (because the framework spends the - * time to do the indexing, only to realize that you don't need it) or not ideal (if you want to turn on/off - * indexing on a table without completely reloading it). - */ - @Override - public boolean isEnabled(Mutation m) { - // ask the codec to see if we should even attempt indexing - return this.codec.isEnabled(m); - } - - @Override - public boolean isAtomicOp(Mutation m) { - return false; - } - - @Override - public List executeAtomicOp(Increment inc) throws IOException { - return null; - } - - /** - * Exposed for testing! - * - * @param codec - * codec to use for this instance of the builder - */ - public void setIndexCodecForTesting(IndexCodec codec) { - this.codec = codec; - } - - @Override - public Collection> getIndexUpdateForFilteredRows(Collection filtered, IndexMetaData context) - throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void stop(String why) { - LOGGER.debug("Stopping because: " + why); - this.stopped = true; - } - - @Override - public boolean isStopped() { - return this.stopped; - } - - @Override - public ReplayWrite getReplayWrite(Mutation m) { - return null; - } - - @Override - public boolean returnResult(Mutation m) { - return false; - } - - public RegionCoprocessorEnvironment getEnv() { - return this.env; - } -} \ No newline at end of file + } + + @Override + public void batchStarted(MiniBatchOperationInProgress miniBatchOp, + IndexMetaData context) throws IOException { + // noop + } + + @Override + public IndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) + throws IOException { + return IndexMetaData.NULL_INDEX_META_DATA; + } + + @Override + public void batchCompleted(MiniBatchOperationInProgress miniBatchOp) { + // noop + } + + /** + * By default, we always attempt to index the mutation. Commonly this can be slow (because the + * framework spends the time to do the indexing, only to realize that you don't need it) or not + * ideal (if you want to turn on/off indexing on a table without completely reloading it). + */ + @Override + public boolean isEnabled(Mutation m) { + // ask the codec to see if we should even attempt indexing + return this.codec.isEnabled(m); + } + + @Override + public boolean isAtomicOp(Mutation m) { + return false; + } + + @Override + public List executeAtomicOp(Increment inc) throws IOException { + return null; + } + + /** + * Exposed for testing! codec to use for this instance of the builder + */ + public void setIndexCodecForTesting(IndexCodec codec) { + this.codec = codec; + } + + @Override + public Collection> getIndexUpdateForFilteredRows(Collection filtered, + IndexMetaData context) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void stop(String why) { + LOGGER.debug("Stopping because: " + why); + this.stopped = true; + } + + @Override + public boolean isStopped() { + return this.stopped; + } + + @Override + public ReplayWrite getReplayWrite(Mutation m) { + return null; + } + + @Override + public boolean returnResult(Mutation m) { + return false; + } + + public RegionCoprocessorEnvironment getEnv() { + return this.env; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java index 68076a4d795..0b50df8cca4 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import java.util.Collection; import java.util.List; -import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Stoppable; @@ -34,10 +33,11 @@ import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.ReplayWrite; import org.apache.phoenix.hbase.index.covered.IndexMetaData; +import org.apache.phoenix.hbase.index.covered.data.CachedLocalTable; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.apache.phoenix.hbase.index.covered.data.CachedLocalTable; import org.apache.phoenix.index.PhoenixIndexMetaData; +import org.apache.phoenix.thirdparty.com.google.common.collect.ListMultimap; import org.apache.phoenix.util.IndexUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,7 +54,7 @@ public class IndexBuildManager implements Stoppable { /** * @param env environment in which this is running. Used to setup the - * {@link IndexBuilder} and executor + * {@link IndexBuilder} and executor * @throws IOException if an {@link IndexBuilder} cannot be correctly steup */ public IndexBuildManager(RegionCoprocessorEnvironment env) throws IOException { @@ -63,68 +63,68 @@ public IndexBuildManager(RegionCoprocessorEnvironment env) throws IOException { this.delegate = getIndexBuilder(env); this.regionCoprocessorEnvironment = env; } - + private static IndexBuilder getIndexBuilder(RegionCoprocessorEnvironment e) throws IOException { Configuration conf = e.getConfiguration(); Class builderClass = - conf.getClass(IndexUtil.INDEX_BUILDER_CONF_KEY, null, IndexBuilder.class); + conf.getClass(IndexUtil.INDEX_BUILDER_CONF_KEY, null, IndexBuilder.class); try { IndexBuilder builder = builderClass.newInstance(); builder.setup(e); return builder; } catch (InstantiationException e1) { - throw new IOException("Couldn't instantiate index builder:" + builderClass - + ", disabling indexing on table " + e.getRegion().getTableDescriptor().getTableName().getNameAsString()); + throw new IOException( + "Couldn't instantiate index builder:" + builderClass + ", disabling indexing on table " + + e.getRegion().getTableDescriptor().getTableName().getNameAsString()); } catch (IllegalAccessException e1) { - throw new IOException("Couldn't instantiate index builder:" + builderClass - + ", disabling indexing on table " + e.getRegion().getTableDescriptor().getTableName().getNameAsString()); + throw new IOException( + "Couldn't instantiate index builder:" + builderClass + ", disabling indexing on table " + + e.getRegion().getTableDescriptor().getTableName().getNameAsString()); } } - public IndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) throws IOException { - return this.delegate.getIndexMetaData(miniBatchOp); + public IndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) + throws IOException { + return this.delegate.getIndexMetaData(miniBatchOp); } - public void getIndexUpdates(ListMultimap> indexUpdates, - MiniBatchOperationInProgress miniBatchOp, - Collection mutations, - IndexMetaData indexMetaData) throws Throwable { + public void getIndexUpdates( + ListMultimap> indexUpdates, + MiniBatchOperationInProgress miniBatchOp, Collection mutations, + IndexMetaData indexMetaData) throws Throwable { // notify the delegate that we have started processing a batch this.delegate.batchStarted(miniBatchOp, indexMetaData); - CachedLocalTable cachedLocalTable = - CachedLocalTable.build( - mutations, - (PhoenixIndexMetaData)indexMetaData, - this.regionCoprocessorEnvironment.getRegion()); + CachedLocalTable cachedLocalTable = CachedLocalTable.build(mutations, + (PhoenixIndexMetaData) indexMetaData, this.regionCoprocessorEnvironment.getRegion()); // Avoid the Object overhead of the executor when it's not actually parallelizing anything. for (Mutation m : mutations) { - Collection> updates = delegate.getIndexUpdate(m, indexMetaData, cachedLocalTable); + Collection> updates = + delegate.getIndexUpdate(m, indexMetaData, cachedLocalTable); for (Pair update : updates) { - indexUpdates.put(new HTableInterfaceReference(new ImmutableBytesPtr(update.getSecond())), new Pair<>(update.getFirst(), m.getRow())); + indexUpdates.put(new HTableInterfaceReference(new ImmutableBytesPtr(update.getSecond())), + new Pair<>(update.getFirst(), m.getRow())); } } } public Collection> getIndexUpdate( - MiniBatchOperationInProgress miniBatchOp, - Collection mutations) throws Throwable { + MiniBatchOperationInProgress miniBatchOp, Collection mutations) + throws Throwable { // notify the delegate that we have started processing a batch final IndexMetaData indexMetaData = this.delegate.getIndexMetaData(miniBatchOp); this.delegate.batchStarted(miniBatchOp, indexMetaData); - CachedLocalTable cachedLocalTable = - CachedLocalTable.build( - mutations, - (PhoenixIndexMetaData)indexMetaData, - this.regionCoprocessorEnvironment.getRegion()); + CachedLocalTable cachedLocalTable = CachedLocalTable.build(mutations, + (PhoenixIndexMetaData) indexMetaData, this.regionCoprocessorEnvironment.getRegion()); // Avoid the Object overhead of the executor when it's not actually parallelizing anything. ArrayList> results = new ArrayList<>(mutations.size()); for (Mutation m : mutations) { - Collection> updates = delegate.getIndexUpdate(m, indexMetaData, cachedLocalTable); + Collection> updates = + delegate.getIndexUpdate(m, indexMetaData, cachedLocalTable); if (PhoenixIndexMetaData.isIndexRebuild(m.getAttributesMap())) { for (Pair update : updates) { update.getFirst().setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, - BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES); + BaseScannerRegionObserverConstants.REPLAY_INDEX_REBUILD_WRITES); } } results.addAll(updates); @@ -132,8 +132,8 @@ public Collection> getIndexUpdate( return results; } - public Collection> getIndexUpdateForFilteredRows( - Collection filtered, IndexMetaData indexMetaData) throws IOException { + public Collection> getIndexUpdateForFilteredRows(Collection filtered, + IndexMetaData indexMetaData) throws IOException { // this is run async, so we can take our time here return delegate.getIndexUpdateForFilteredRows(filtered, indexMetaData); } @@ -142,8 +142,8 @@ public void batchCompleted(MiniBatchOperationInProgress miniBatchOp) { delegate.batchCompleted(miniBatchOp); } - public void batchStarted(MiniBatchOperationInProgress miniBatchOp, IndexMetaData indexMetaData) - throws IOException { + public void batchStarted(MiniBatchOperationInProgress miniBatchOp, + IndexMetaData indexMetaData) throws IOException { delegate.batchStarted(miniBatchOp, indexMetaData); } @@ -156,9 +156,9 @@ public boolean isAtomicOp(Mutation m) { } public List executeAtomicOp(Increment inc) throws IOException { - return delegate.executeAtomicOp(inc); + return delegate.executeAtomicOp(inc); } - + @Override public void stop(String why) { if (stopped) { diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java index 23a642ed69f..38599038cd8 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -69,36 +69,34 @@ public interface IndexBuilder extends Stoppable { * Implementers must ensure that this method is thread-safe - it could (and probably will) be * called concurrently for different mutations, which may or may not be part of the same batch. * @param mutation update to the primary table to be indexed. - * @param context index meta data for the mutation + * @param context index meta data for the mutation * @return a Map of the mutations to {@code make -> target } index table name * @throws IOException on failure */ - public Collection> getIndexUpdate(Mutation mutation, IndexMetaData context, LocalHBaseState localHBaseState) throws IOException; + public Collection> getIndexUpdate(Mutation mutation, IndexMetaData context, + LocalHBaseState localHBaseState) throws IOException; - /** - * Build an index update to cleanup the index when we remove KeyValue s via the normal flush or compaction - * mechanisms. Currently not implemented by any implementors nor called, but left here to be implemented if we - * ever need it. In Jesse's words: - * - * Arguably, this is a correctness piece that should be used, but isn't. Basically, it *could* be that - * if a compaction/flush were to remove a key (too old, too many versions) you might want to cleanup the index table - * as well, if it were to get out of sync with the primary table. For instance, you might get multiple versions of - * the same row, which should eventually age of the oldest version. However, in the index table there would only - * ever be two entries for that row - the first one, matching the original row, and the delete marker for the index - * update, set when we got a newer version of the primary row. So, a basic HBase scan wouldn't show the index update - * b/c its covered by the delete marker, but an older timestamp based read would actually show the index row, even - * after the primary table row is gone due to MAX_VERSIONS requirement. - * - * @param filtered KeyValue s that previously existed, but won't be included - * in further output from HBase. - * @param context TODO - * - * @return a {@link Map} of the mutations to {@code make -> target } index table name - * @throws IOException on failure - */ - public Collection> getIndexUpdateForFilteredRows( - Collection filtered, IndexMetaData context) - throws IOException; + /** + * Build an index update to cleanup the index when we remove KeyValue s via the normal flush or + * compaction mechanisms. Currently not implemented by any implementors nor called, but left here + * to be implemented if we ever need it. In Jesse's words: Arguably, this is a correctness piece + * that should be used, but isn't. Basically, it *could* be that if a compaction/flush were to + * remove a key (too old, too many versions) you might want to cleanup the index table as well, if + * it were to get out of sync with the primary table. For instance, you might get multiple + * versions of the same row, which should eventually age of the oldest version. However, in the + * index table there would only ever be two entries for that row - the first one, matching the + * original row, and the delete marker for the index update, set when we got a newer version of + * the primary row. So, a basic HBase scan wouldn't show the index update b/c its covered by the + * delete marker, but an older timestamp based read would actually show the index row, even after + * the primary table row is gone due to MAX_VERSIONS requirement. + * @param filtered KeyValue s that previously existed, but won't be included in further output + * from HBase. + * @param context TODO + * @return a {@link Map} of the mutations to {@code make -> target } index table name + * @throws IOException on failure + */ + public Collection> getIndexUpdateForFilteredRows(Collection filtered, + IndexMetaData context) throws IOException; /** * Notification that a batch of updates has successfully been written. @@ -113,13 +111,14 @@ public Collection> getIndexUpdateForFilteredRows( * after the {@link #getIndexUpdate} methods. Therefore, you will likely need an attribute * on your {@link Put}/{@link Delete} to indicate it is a batch operation. * @param miniBatchOp the full batch operation to be written - * @param context TODO - * @throws IOException + * @param context TODO */ - public void batchStarted(MiniBatchOperationInProgress miniBatchOp, IndexMetaData context) throws IOException; + public void batchStarted(MiniBatchOperationInProgress miniBatchOp, + IndexMetaData context) throws IOException; + + public IndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) + throws IOException; - public IndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) throws IOException; - /** * This allows the codec to dynamically change whether or not indexing should take place for a * table. If it doesn't take place, we can save a lot of time on the regular Put patch. By making @@ -133,7 +132,7 @@ public Collection> getIndexUpdateForFilteredRows( * basis, as each codec is instantiated per-region. */ public boolean isEnabled(Mutation m); - + /** * True if mutation has an ON DUPLICATE KEY clause * @param m mutation @@ -144,8 +143,8 @@ public Collection> getIndexUpdateForFilteredRows( /** * Calculate the mutations based on the ON DUPLICATE KEY clause * @param inc increment to run against - * @return list of mutations as a result of executing the ON DUPLICATE KEY clause - * or null if Increment does not represent an ON DUPLICATE KEY clause. + * @return list of mutations as a result of executing the ON DUPLICATE KEY clause or null if + * Increment does not represent an ON DUPLICATE KEY clause. */ public List executeAtomicOp(Increment inc) throws IOException; @@ -153,9 +152,8 @@ public Collection> getIndexUpdateForFilteredRows( /** * True if mutation needs to return result. - * * @param m Mutation object. * @return True if mutation needs to return result, False otherwise. */ boolean returnResult(Mutation m); -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/CoveredColumns.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/CoveredColumns.java index c6474e1cebb..91c82bc8f83 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/CoveredColumns.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/CoveredColumns.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,8 +32,8 @@ public class CoveredColumns { Set columns = new HashSet(); - public Collection findNonCoveredColumns( - Collection columns2) { + public Collection + findNonCoveredColumns(Collection columns2) { List uncovered = new ArrayList(); for (ColumnReference column : columns2) { if (!columns.contains(column)) { diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java index 614b7d9acda..90840b1864c 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.hbase.index.covered; @@ -34,226 +42,231 @@ /** * Manage the state of the HRegion's view of the table, for the single row. *

    - * Currently, this is a single-use object - you need to create a new one for each row that you need to manage. In the - * future, we could make this object reusable, but for the moment its easier to manage as a throw-away object. + * Currently, this is a single-use object - you need to create a new one for each row that you need + * to manage. In the future, we could make this object reusable, but for the moment its easier to + * manage as a throw-away object. *

    - * This class is not thread-safe - it requires external synchronization is access concurrently. + * This class is not thread-safe - it requires external synchronization is access + * concurrently. */ public class LocalTableState implements TableState { - private long ts; - private KeyValueStore memstore; - private LocalHBaseState table; - private Mutation update; - private Set trackedColumns = new HashSet(); - private ScannerBuilder scannerBuilder; - private List kvs = new ArrayList(); - private List hints; - private CoveredColumns columnSet; + private long ts; + private KeyValueStore memstore; + private LocalHBaseState table; + private Mutation update; + private Set trackedColumns = new HashSet(); + private ScannerBuilder scannerBuilder; + private List kvs = new ArrayList(); + private List hints; + private CoveredColumns columnSet; - public LocalTableState(LocalHBaseState table, Mutation update) { - this.table = table; - this.update = update; - this.memstore = new IndexMemStore(); - this.scannerBuilder = new ScannerBuilder(memstore, update); - this.columnSet = new CoveredColumns(); - } + public LocalTableState(LocalHBaseState table, Mutation update) { + this.table = table; + this.update = update; + this.memstore = new IndexMemStore(); + this.scannerBuilder = new ScannerBuilder(memstore, update); + this.columnSet = new CoveredColumns(); + } - public void addPendingUpdates(Cell... kvs) { - if (kvs == null) return; - addPendingUpdates(Arrays.asList(kvs)); - } + public void addPendingUpdates(Cell... kvs) { + if (kvs == null) return; + addPendingUpdates(Arrays.asList(kvs)); + } - public void addPendingUpdates(List kvs) { - if (kvs == null) return; - setPendingUpdates(kvs); - addUpdate(kvs); - } + public void addPendingUpdates(List kvs) { + if (kvs == null) return; + setPendingUpdates(kvs); + addUpdate(kvs); + } - private void addUpdate(List list) { - addUpdate(list, true); - } + private void addUpdate(List list) { + addUpdate(list, true); + } - private void addUpdate(List list, boolean overwrite) { - if (list == null) return; - for (Cell kv : list) { - this.memstore.add(kv, overwrite); - } + private void addUpdate(List list, boolean overwrite) { + if (list == null) return; + for (Cell kv : list) { + this.memstore.add(kv, overwrite); } + } - private void addUpdateCells(List list, boolean overwrite) { - if (list == null) return; - // Avoid a copy of the Cell into a KeyValue if it's already a KeyValue - for (Cell c : list) { - this.memstore.add(c, overwrite); - } + private void addUpdateCells(List list, boolean overwrite) { + if (list == null) return; + // Avoid a copy of the Cell into a KeyValue if it's already a KeyValue + for (Cell c : list) { + this.memstore.add(c, overwrite); } + } - @Override - public long getCurrentTimestamp() { - return this.ts; - } + @Override + public long getCurrentTimestamp() { + return this.ts; + } + + /** + * Set the current timestamp up to which the table should allow access to the underlying table. + * This overrides the timestamp view provided by the indexer - use with care! + * @param timestamp timestamp up to which the table should allow access. + */ + public void setCurrentTimestamp(long timestamp) { + this.ts = timestamp; + } + + public void resetTrackedColumns() { + this.trackedColumns.clear(); + } - /** - * Set the current timestamp up to which the table should allow access to the underlying table. - * This overrides the timestamp view provided by the indexer - use with care! - * @param timestamp timestamp up to which the table should allow access. - */ - public void setCurrentTimestamp(long timestamp) { - this.ts = timestamp; + public Set getTrackedColumns() { + return this.trackedColumns; + } + + /** + * Get a scanner on the columns that are needed by the index. + *

    + * The returned scanner is already pre-seeked to the first {@link KeyValue} that matches the given + * columns with a timestamp earlier than the timestamp to which the table is currently set (the + * current state of the table for which we need to build an update). + *

    + * If none of the passed columns matches any of the columns in the pending update (as determined + * by {@link ColumnReference#matchesFamily(byte[])} and + * {@link ColumnReference#matchesQualifier(byte[])}, then an empty scanner will be returned. This + * is because it doesn't make sense to build index updates when there is no change in the table + * state for any of the columns you are indexing. + *

    + * NOTE: This method should not be used during + * IndexCodec#getIndexDeletes(TableState, BatchState, byte[], byte[]) as the pending update will + * not yet have been applied - you are merely attempting to cleanup the current state and + * therefore do not need to track the indexed columns. + *

    + * As a side-effect, we update a timestamp for the next-most-recent timestamp for the columns you + * request - you will never see a column with the timestamp we are tracking, but the next oldest + * timestamp for that column. + * @param indexedColumns the columns to that will be indexed + * @param ignoreNewerMutations ignore mutations newer than m when determining current state. + * Useful when replaying mutation state for partial index rebuild + * where writes succeeded to the data table, but not to the index + * table. + * @param indexMetaData TODO + * @return an iterator over the columns and the {@link IndexUpdate} that should be passed back to + * the builder. Even if no update is necessary for the requested columns, you still need + * to return the {@link IndexUpdate}, just don't set the update for the + * {@link IndexUpdate}. + */ + public Pair getIndexedColumnsTableState( + Collection indexedColumns, boolean ignoreNewerMutations, + boolean isStateForDeletes, IndexMetaData indexMetaData) throws IOException { + // check to see if we haven't initialized any columns yet + Collection toCover = + this.columnSet.findNonCoveredColumns(indexedColumns); + + // add the covered columns to the set + for (ColumnReference ref : toCover) { + this.columnSet.addColumn(ref); } - - public void resetTrackedColumns() { - this.trackedColumns.clear(); + boolean requiresPriorRowState = indexMetaData.requiresPriorRowState(update); + if (!toCover.isEmpty()) { + // no need to perform scan to find prior row values when the indexed columns are immutable, as + // by definition, there won't be any. If we have indexed non row key columns, then we need to + // look up the row so that we can formulate the delete of the index row correctly. We'll + // always + // have our "empty" key value column, so we check if we have more than that as a basis for + // needing to lookup the prior row values. + if (requiresPriorRowState) { + // add the current state of the row. Uses listCells() to avoid a new array creation. + this.addUpdateCells(this.table.getCurrentRowState(update, toCover, ignoreNewerMutations), + false); + } } - public Set getTrackedColumns() { - return this.trackedColumns; + // filter out things with a newer timestamp and track the column references to which it applies + ColumnTracker tracker = new ColumnTracker(indexedColumns); + synchronized (this.trackedColumns) { + // we haven't seen this set of columns before, so we need to create a new tracker + if (!this.trackedColumns.contains(tracker)) { + this.trackedColumns.add(tracker); + } } - /** - * Get a scanner on the columns that are needed by the index. - *

    - * The returned scanner is already pre-seeked to the first {@link KeyValue} that matches the given - * columns with a timestamp earlier than the timestamp to which the table is currently set (the - * current state of the table for which we need to build an update). - *

    - * If none of the passed columns matches any of the columns in the pending update (as determined - * by {@link ColumnReference#matchesFamily(byte[])} and - * {@link ColumnReference#matchesQualifier(byte[])}, then an empty scanner will be returned. This - * is because it doesn't make sense to build index updates when there is no change in the table - * state for any of the columns you are indexing. - *

    - * NOTE: This method should not be used during - * IndexCodec#getIndexDeletes(TableState, BatchState, byte[], byte[]) as the pending update will not yet have been - * applied - you are merely attempting to cleanup the current state and therefore do not - * need to track the indexed columns. - *

    - * As a side-effect, we update a timestamp for the next-most-recent timestamp for the columns you - * request - you will never see a column with the timestamp we are tracking, but the next oldest - * timestamp for that column. - * @param indexedColumns the columns to that will be indexed - * @param ignoreNewerMutations ignore mutations newer than m when determining current state. Useful - * when replaying mutation state for partial index rebuild where writes succeeded to the data - * table, but not to the index table. - * @param indexMetaData TODO - * @return an iterator over the columns and the {@link IndexUpdate} that should be passed back to - * the builder. Even if no update is necessary for the requested columns, you still need - * to return the {@link IndexUpdate}, just don't set the update for the - * {@link IndexUpdate}. - * @throws IOException - */ - public Pair getIndexedColumnsTableState( - Collection indexedColumns, boolean ignoreNewerMutations, boolean isStateForDeletes, IndexMetaData indexMetaData) throws IOException { - // check to see if we haven't initialized any columns yet - Collection toCover = this.columnSet.findNonCoveredColumns(indexedColumns); - - // add the covered columns to the set - for (ColumnReference ref : toCover) { - this.columnSet.addColumn(ref); - } - boolean requiresPriorRowState = indexMetaData.requiresPriorRowState(update); - if (!toCover.isEmpty()) { - // no need to perform scan to find prior row values when the indexed columns are immutable, as - // by definition, there won't be any. If we have indexed non row key columns, then we need to - // look up the row so that we can formulate the delete of the index row correctly. We'll always - // have our "empty" key value column, so we check if we have more than that as a basis for - // needing to lookup the prior row values. - if (requiresPriorRowState) { - // add the current state of the row. Uses listCells() to avoid a new array creation. - this.addUpdateCells(this.table.getCurrentRowState(update, toCover, ignoreNewerMutations), false); - } - } + CoveredDeleteScanner scanner = + this.scannerBuilder.buildIndexedColumnScanner(indexedColumns, tracker, ts, + // If we're determining the index state for deletes and either + // a) we've looked up the prior row state or + // b) we're inserting immutable data + // then allow a null scanner to be returned. + // FIXME: this is crappy code - we need to simplify the global mutable secondary index + // implementation + // TODO: use mutable transactional secondary index implementation instead + // (PhoenixTransactionalIndexer) + isStateForDeletes && (requiresPriorRowState || insertingData(update))); + return new Pair(scanner, new IndexUpdate(tracker)); + } - // filter out things with a newer timestamp and track the column references to which it applies - ColumnTracker tracker = new ColumnTracker(indexedColumns); - synchronized (this.trackedColumns) { - // we haven't seen this set of columns before, so we need to create a new tracker - if (!this.trackedColumns.contains(tracker)) { - this.trackedColumns.add(tracker); - } + private static boolean insertingData(Mutation m) { + for (Collection cells : m.getFamilyCellMap().values()) { + for (Cell cell : cells) { + if (cell.getType() != Cell.Type.Put) { + return false; } - - CoveredDeleteScanner scanner = this.scannerBuilder.buildIndexedColumnScanner(indexedColumns, tracker, ts, - // If we're determining the index state for deletes and either - // a) we've looked up the prior row state or - // b) we're inserting immutable data - // then allow a null scanner to be returned. - // FIXME: this is crappy code - we need to simplify the global mutable secondary index implementation - // TODO: use mutable transactional secondary index implementation instead (PhoenixTransactionalIndexer) - isStateForDeletes && (requiresPriorRowState || insertingData(update))); - return new Pair(scanner, new IndexUpdate(tracker)); + } } + return true; + } - - private static boolean insertingData(Mutation m) { - for (Collection cells : m.getFamilyCellMap().values()) { - for (Cell cell : cells) { - if (cell.getType() != Cell.Type.Put) { - return false; - } - } - } - return true; - } + @Override + public byte[] getCurrentRowKey() { + return this.update.getRow(); + } - @Override - public byte[] getCurrentRowKey() { - return this.update.getRow(); - } + /** + */ + public void setHints(List hints) { + this.hints = hints; + } - /** - * @param hints - */ - public void setHints(List hints) { - this.hints = hints; - } + @Override + public List getIndexColumnHints() { + return this.hints; + } - @Override - public List getIndexColumnHints() { - return this.hints; - } + @Override + public Collection getPendingUpdate() { + return this.kvs; + } - @Override - public Collection getPendingUpdate() { - return this.kvs; - } + /** + * Set the {@link KeyValue}s in the update for which we are currently building an index update, + * but don't actually apply them. pending {@link KeyValue}s + */ + public void setPendingUpdates(Collection update) { + this.kvs.clear(); + this.kvs.addAll(update); + } - /** - * Set the {@link KeyValue}s in the update for which we are currently building an index update, but don't actually - * apply them. - * - * @param update - * pending {@link KeyValue}s - */ - public void setPendingUpdates(Collection update) { - this.kvs.clear(); - this.kvs.addAll(update); - } - - /** - * Apply the {@link KeyValue}s set in {@link #setPendingUpdates(Collection)}. - */ - public void applyPendingUpdates() { - this.addUpdate(kvs); - } + /** + * Apply the {@link KeyValue}s set in {@link #setPendingUpdates(Collection)}. + */ + public void applyPendingUpdates() { + this.addUpdate(kvs); + } - /** - * Rollback all the given values from the underlying state. - * - * @param values - */ - public void rollback(Collection values) { - for (KeyValue kv : values) { - this.memstore.rollback(kv); - } + /** + * Rollback all the given values from the underlying state. + */ + public void rollback(Collection values) { + for (KeyValue kv : values) { + this.memstore.rollback(kv); } + } - @Override - public Pair getIndexUpdateState(Collection indexedColumns, boolean ignoreNewerMutations, boolean isStateForDeletes, IndexMetaData indexMetaData) - throws IOException { - Pair pair = getIndexedColumnsTableState(indexedColumns, ignoreNewerMutations, isStateForDeletes, indexMetaData); - ValueGetter valueGetter = IndexManagementUtil.createGetterFromScanner(pair.getFirst(), getCurrentRowKey()); - return new Pair(valueGetter, pair.getSecond()); - } -} \ No newline at end of file + @Override + public Pair getIndexUpdateState( + Collection indexedColumns, boolean ignoreNewerMutations, + boolean isStateForDeletes, IndexMetaData indexMetaData) throws IOException { + Pair pair = getIndexedColumnsTableState(indexedColumns, + ignoreNewerMutations, isStateForDeletes, indexMetaData); + ValueGetter valueGetter = + IndexManagementUtil.createGetterFromScanner(pair.getFirst(), getCurrentRowKey()); + return new Pair(valueGetter, pair.getSecond()); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java index 41d0d6cf235..e1ee48bc974 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.hbase.index.covered; @@ -28,232 +36,219 @@ /** * Build covered indexes for phoenix updates. *

    - * Before any call to prePut/preDelete, the row has already been locked. This ensures that we don't need to do any extra - * synchronization in the IndexBuilder. + * Before any call to prePut/preDelete, the row has already been locked. This ensures that we don't + * need to do any extra synchronization in the IndexBuilder. *

    - * NOTE: This implementation doesn't cleanup the index when we remove a key-value on compaction or flush, leading to a - * bloated index that needs to be cleaned up by a background process. + * NOTE: This implementation doesn't cleanup the index when we remove a key-value on compaction or + * flush, leading to a bloated index that needs to be cleaned up by a background process. */ public class NonTxIndexBuilder extends BaseIndexBuilder { - private static final Logger LOGGER = LoggerFactory.getLogger(NonTxIndexBuilder.class); + private static final Logger LOGGER = LoggerFactory.getLogger(NonTxIndexBuilder.class); - @Override - public void setup(RegionCoprocessorEnvironment env) throws IOException { - super.setup(env); - } - - @Override - public Collection> getIndexUpdate(Mutation mutation, IndexMetaData indexMetaData, LocalHBaseState localHBaseState) throws IOException { - // create a state manager, so we can manage each batch - LocalTableState state = new LocalTableState(localHBaseState, mutation); - // build the index updates for each group - IndexUpdateManager manager = new IndexUpdateManager(indexMetaData); + @Override + public void setup(RegionCoprocessorEnvironment env) throws IOException { + super.setup(env); + } - batchMutationAndAddUpdates(manager, state, mutation, indexMetaData); + @Override + public Collection> getIndexUpdate(Mutation mutation, + IndexMetaData indexMetaData, LocalHBaseState localHBaseState) throws IOException { + // create a state manager, so we can manage each batch + LocalTableState state = new LocalTableState(localHBaseState, mutation); + // build the index updates for each group + IndexUpdateManager manager = new IndexUpdateManager(indexMetaData); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Found index updates for Mutation: " + mutation + "\n" + manager); - } + batchMutationAndAddUpdates(manager, state, mutation, indexMetaData); - return manager.toMap(); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Found index updates for Mutation: " + mutation + "\n" + manager); } - /** - * Split the mutation into batches based on the timestamps of each keyvalue. We need to check each key-value in the - * update to see if it matches the others. Generally, this will be the case, but you can add kvs to a mutation that - * don't all have the timestamp, so we need to manage everything in batches based on timestamp. - *

    - * Adds all the updates in the {@link Mutation} to the state, as a side-effect. - * @param state - * current state of the row for the mutation. - * @param m - * mutation to batch - * @param indexMetaData TODO - * @param updateMap - * index updates into which to add new updates. Modified as a side-effect. - * - * @throws IOException - */ - private void batchMutationAndAddUpdates(IndexUpdateManager manager, LocalTableState state, Mutation m, IndexMetaData indexMetaData) throws IOException { - // The cells of a mutation are broken up into time stamp batches prior to this call (in Indexer). - long ts = m.getFamilyCellMap().values().iterator().next().iterator().next().getTimestamp(); - Batch batch = new Batch(ts); - for (List family : m.getFamilyCellMap().values()) { - for (Cell kv : family) { - batch.add(kv); - if(ts != kv.getTimestamp()) { - throw new IllegalStateException("Time stamps must match for all cells in a batch"); - } - } + return manager.toMap(); + } + + /** + * Split the mutation into batches based on the timestamps of each keyvalue. We need to check each + * key-value in the update to see if it matches the others. Generally, this will be the case, but + * you can add kvs to a mutation that don't all have the timestamp, so we need to manage + * everything in batches based on timestamp. + *

    + * Adds all the updates in the {@link Mutation} to the state, as a side-effect. current state of + * the row for the mutation. mutation to batch + * @param indexMetaData TODO index updates into which to add new updates. Modified as a + * side-effect. + */ + private void batchMutationAndAddUpdates(IndexUpdateManager manager, LocalTableState state, + Mutation m, IndexMetaData indexMetaData) throws IOException { + // The cells of a mutation are broken up into time stamp batches prior to this call (in + // Indexer). + long ts = m.getFamilyCellMap().values().iterator().next().iterator().next().getTimestamp(); + Batch batch = new Batch(ts); + for (List family : m.getFamilyCellMap().values()) { + for (Cell kv : family) { + batch.add(kv); + if (ts != kv.getTimestamp()) { + throw new IllegalStateException("Time stamps must match for all cells in a batch"); } - - addMutationsForBatch(manager, batch, state, indexMetaData); + } } - /** - * For a single batch, get all the index updates and add them to the updateMap - *

    - * This method manages cleaning up the entire history of the row from the given timestamp forward for out-of-order - * (e.g. 'back in time') updates. - *

    - * If things arrive out of order (client is using custom timestamps) we should still see the index in the correct - * order (assuming we scan after the out-of-order update in finished). Therefore, we when we aren't the most recent - * update to the index, we need to delete the state at the current timestamp (similar to above), but also issue a - * delete for the added index updates at the next newest timestamp of any of the columns in the update; we need to - * cleanup the insert so it looks like it was also deleted at that next newest timestamp. However, its not enough to - * just update the one in front of us - that column will likely be applied to index entries up the entire history in - * front of us, which also needs to be fixed up. - *

    - * However, the current update usually will be the most recent thing to be added. In that case, all we need to is - * issue a delete for the previous index row (the state of the row, without the update applied) at the current - * timestamp. This gets rid of anything currently in the index for the current state of the row (at the timestamp). - * Then we can just follow that by applying the pending update and building the index update based on the new row - * state. - * - * @param updateMap - * map to update with new index elements - * @param batch - * timestamp-based batch of edits - * @param state - * local state to update and pass to the codec - * @param indexMetaData TODO - * @return true if we cleaned up the current state forward (had a back-in-time put), false - * otherwise - * @throws IOException + addMutationsForBatch(manager, batch, state, indexMetaData); + } + + /** + * For a single batch, get all the index updates and add them to the updateMap + *

    + * This method manages cleaning up the entire history of the row from the given timestamp forward + * for out-of-order (e.g. 'back in time') updates. + *

    + * If things arrive out of order (client is using custom timestamps) we should still see the index + * in the correct order (assuming we scan after the out-of-order update in finished). Therefore, + * we when we aren't the most recent update to the index, we need to delete the state at the + * current timestamp (similar to above), but also issue a delete for the added index updates at + * the next newest timestamp of any of the columns in the update; we need to cleanup the insert so + * it looks like it was also deleted at that next newest timestamp. However, its not enough to + * just update the one in front of us - that column will likely be applied to index entries up the + * entire history in front of us, which also needs to be fixed up. + *

    + * However, the current update usually will be the most recent thing to be added. In that case, + * all we need to is issue a delete for the previous index row (the state of the row, without the + * update applied) at the current timestamp. This gets rid of anything currently in the index for + * the current state of the row (at the timestamp). Then we can just follow that by applying the + * pending update and building the index update based on the new row state. map to update with new + * index elements timestamp-based batch of edits local state to update and pass to the codec + * @param indexMetaData TODO + * @return true if we cleaned up the current state forward (had a back-in-time put), + * false otherwise + */ + private boolean addMutationsForBatch(IndexUpdateManager updateMap, Batch batch, + LocalTableState state, IndexMetaData indexMetaData) throws IOException { + + // need a temporary manager for the current batch. It should resolve any conflicts for the + // current batch. Essentially, we can get the case where a batch doesn't change the current + // state of the index (all Puts are covered by deletes), in which case we don't want to add + // anything + // A. Get the correct values for the pending state in the batch + // A.1 start by cleaning up the current state - as long as there are key-values in the batch + // that are indexed, we need to change the current state of the index. Its up to the codec to + // determine if we need to make any cleanup given the pending update. + long batchTs = batch.getTimestamp(); + state.setPendingUpdates(batch.getKvs()); + addCleanupForCurrentBatch(updateMap, batchTs, state, indexMetaData); + + // A.2 do a single pass first for the updates to the current state + state.applyPendingUpdates(); + addUpdateForGivenTimestamp(batchTs, state, updateMap, indexMetaData); + // FIXME: PHOENIX-4057 do not attempt to issue index updates + // for out-of-order mutations since it corrupts the index. + return false; + } + + private long addUpdateForGivenTimestamp(long ts, LocalTableState state, + IndexUpdateManager updateMap, IndexMetaData indexMetaData) throws IOException { + state.setCurrentTimestamp(ts); + ts = addCurrentStateMutationsForBatch(updateMap, state, indexMetaData); + return ts; + } + + private void addCleanupForCurrentBatch(IndexUpdateManager updateMap, long batchTs, + LocalTableState state, IndexMetaData indexMetaData) throws IOException { + // get the cleanup for the current state + state.setCurrentTimestamp(batchTs); + addDeleteUpdatesToMap(updateMap, state, batchTs, indexMetaData); + // ignore any index tracking from the delete + state.resetTrackedColumns(); + } + + /** + * Add the necessary mutations for the pending batch on the local state. Handles rolling up + * through history to determine the index changes after applying the batch (for the case where the + * batch is back in time). to update with index mutations current state of the table + * @param indexMetaData TODO to apply to the current state + * @return the minimum timestamp across all index columns requested. If + * {@link ColumnTracker#isNewestTime(long)} returns true on the returned + * timestamp, we know that this was not a back-in-time update. + */ + private long addCurrentStateMutationsForBatch(IndexUpdateManager updateMap, LocalTableState state, + IndexMetaData indexMetaData) throws IOException { + + // get the index updates for this current batch + Iterable upserts = codec.getIndexUpserts(state, indexMetaData, + env.getRegionInfo().getStartKey(), env.getRegionInfo().getEndKey(), false); + state.resetTrackedColumns(); + + /* + * go through all the pending updates. If we are sure that all the entries are the latest + * timestamp, we can just add the index updates and move on. However, if there are columns that + * we skip past (based on the timestamp of the batch), we need to roll back up the history. + * Regardless of whether or not they are the latest timestamp, the entries here are going to be + * correct for the current batch timestamp, so we add them to the updates. The only thing we + * really care about it if we need to roll up the history and fix it as we go. */ - private boolean addMutationsForBatch(IndexUpdateManager updateMap, Batch batch, LocalTableState state, - IndexMetaData indexMetaData) throws IOException { - - // need a temporary manager for the current batch. It should resolve any conflicts for the - // current batch. Essentially, we can get the case where a batch doesn't change the current - // state of the index (all Puts are covered by deletes), in which case we don't want to add - // anything - // A. Get the correct values for the pending state in the batch - // A.1 start by cleaning up the current state - as long as there are key-values in the batch - // that are indexed, we need to change the current state of the index. Its up to the codec to - // determine if we need to make any cleanup given the pending update. - long batchTs = batch.getTimestamp(); - state.setPendingUpdates(batch.getKvs()); - addCleanupForCurrentBatch(updateMap, batchTs, state, indexMetaData); - - // A.2 do a single pass first for the updates to the current state - state.applyPendingUpdates(); - addUpdateForGivenTimestamp(batchTs, state, updateMap, indexMetaData); - // FIXME: PHOENIX-4057 do not attempt to issue index updates - // for out-of-order mutations since it corrupts the index. - return false; + // timestamp of the next update we need to track + long minTs = ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP; + for (IndexUpdate update : upserts) { + // this is the one bit where we check the timestamps + final ColumnTracker tracker = update.getIndexedColumns(); + long trackerTs = tracker.getTS(); + // update the next min TS we need to track + if (trackerTs < minTs) { + minTs = tracker.getTS(); + } + + // FIXME: PHOENIX-4057 do not attempt to issue index updates + // for out-of-order mutations since it corrupts the index. + if (tracker.hasNewerTimestamps()) { + continue; + } + + // only make the put if the index update has been setup + if (update.isValid()) { + byte[] table = update.getTableName(); + Mutation mutation = update.getUpdate(); + updateMap.addIndexUpdate(table, mutation); + } } - - private long addUpdateForGivenTimestamp(long ts, LocalTableState state, IndexUpdateManager updateMap, IndexMetaData indexMetaData) - throws IOException { - state.setCurrentTimestamp(ts); - ts = addCurrentStateMutationsForBatch(updateMap, state, indexMetaData); - return ts; - } - - private void addCleanupForCurrentBatch(IndexUpdateManager updateMap, long batchTs, LocalTableState state, IndexMetaData indexMetaData) - throws IOException { - // get the cleanup for the current state - state.setCurrentTimestamp(batchTs); - addDeleteUpdatesToMap(updateMap, state, batchTs, indexMetaData); - // ignore any index tracking from the delete - state.resetTrackedColumns(); - } - - /** - * Add the necessary mutations for the pending batch on the local state. Handles rolling up through history to - * determine the index changes after applying the batch (for the case where the batch is back in time). - * - * @param updateMap - * to update with index mutations - * @param state - * current state of the table - * @param indexMetaData TODO - * @param batch - * to apply to the current state - * @return the minimum timestamp across all index columns requested. If {@link ColumnTracker#isNewestTime(long)} - * returns true on the returned timestamp, we know that this was not a back-in-time update. - * @throws IOException - */ - private long addCurrentStateMutationsForBatch(IndexUpdateManager updateMap, LocalTableState state, IndexMetaData indexMetaData) - throws IOException { - - // get the index updates for this current batch - Iterable upserts = codec.getIndexUpserts( - state, indexMetaData, - env.getRegionInfo().getStartKey(), env.getRegionInfo().getEndKey(), false); - state.resetTrackedColumns(); - - /* - * go through all the pending updates. If we are sure that all the entries are the latest timestamp, we can just - * add the index updates and move on. However, if there are columns that we skip past (based on the timestamp of - * the batch), we need to roll back up the history. Regardless of whether or not they are the latest timestamp, - * the entries here are going to be correct for the current batch timestamp, so we add them to the updates. The - * only thing we really care about it if we need to roll up the history and fix it as we go. - */ - // timestamp of the next update we need to track - long minTs = ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP; - for (IndexUpdate update : upserts) { - // this is the one bit where we check the timestamps - final ColumnTracker tracker = update.getIndexedColumns(); - long trackerTs = tracker.getTS(); - // update the next min TS we need to track - if (trackerTs < minTs) { - minTs = tracker.getTS(); - } - - // FIXME: PHOENIX-4057 do not attempt to issue index updates - // for out-of-order mutations since it corrupts the index. - if (tracker.hasNewerTimestamps()) { - continue; - } - - // only make the put if the index update has been setup - if (update.isValid()) { - byte[] table = update.getTableName(); - Mutation mutation = update.getUpdate(); - updateMap.addIndexUpdate(table, mutation); - } + return minTs; + } + + /** + * Get the index deletes from the codec + * {@link IndexCodec#getIndexDeletes(TableState, IndexMetaData, byte[], byte[])} and then add them + * to the update map. + *

    + * Expects the {@link LocalTableState} to already be correctly setup (correct timestamp, updates + * applied, etc). + * @param indexMetaData TODO + */ + protected void addDeleteUpdatesToMap(IndexUpdateManager updateMap, LocalTableState state, long ts, + IndexMetaData indexMetaData) throws IOException { + Iterable cleanup = codec.getIndexDeletes(state, indexMetaData, + env.getRegionInfo().getStartKey(), env.getRegionInfo().getEndKey()); + if (cleanup != null) { + for (IndexUpdate d : cleanup) { + if (!d.isValid()) { + continue; } - return minTs; - } - - /** - * Get the index deletes from the codec {@link IndexCodec#getIndexDeletes(TableState, IndexMetaData, byte[], byte[])} and then add them to the - * update map. - *

    - * Expects the {@link LocalTableState} to already be correctly setup (correct timestamp, updates applied, etc). - * @param indexMetaData TODO - * - * @throws IOException - */ - protected void addDeleteUpdatesToMap(IndexUpdateManager updateMap, LocalTableState state, long ts, IndexMetaData indexMetaData) - throws IOException { - Iterable cleanup = codec.getIndexDeletes(state, indexMetaData, env.getRegionInfo().getStartKey(), env.getRegionInfo().getEndKey()); - if (cleanup != null) { - for (IndexUpdate d : cleanup) { - if (!d.isValid()) { - continue; - } - // FIXME: PHOENIX-4057 do not attempt to issue index updates - // for out-of-order mutations since it corrupts the index. - final ColumnTracker tracker = d.getIndexedColumns(); - if (tracker.hasNewerTimestamps()) { - continue; - } - - // override the timestamps in the delete to match the current batch. - Delete remove = (Delete)d.getUpdate(); - remove.setTimestamp(ts); - updateMap.addIndexUpdate(d.getTableName(), remove); - } + // FIXME: PHOENIX-4057 do not attempt to issue index updates + // for out-of-order mutations since it corrupts the index. + final ColumnTracker tracker = d.getIndexedColumns(); + if (tracker.hasNewerTimestamps()) { + continue; } - } - @Override - public Collection> getIndexUpdateForFilteredRows(Collection filtered, IndexMetaData indexMetaData) - throws IOException { - // TODO Implement IndexBuilder.getIndexUpdateForFilteredRows - return null; + // override the timestamps in the delete to match the current batch. + Delete remove = (Delete) d.getUpdate(); + remove.setTimestamp(ts); + updateMap.addIndexUpdate(d.getTableName(), remove); + } } -} \ No newline at end of file + } + + @Override + public Collection> getIndexUpdateForFilteredRows(Collection filtered, + IndexMetaData indexMetaData) throws IOException { + // TODO Implement IndexBuilder.getIndexUpdateForFilteredRows + return null; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java index 60b8b5a9366..855c6999553 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/data/CachedLocalTable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -43,166 +43,159 @@ import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PVarbinary; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; public class CachedLocalTable implements LocalHBaseState { - private final Map> rowKeyPtrToCells; - private final Region region; + private final Map> rowKeyPtrToCells; + private final Region region; - private CachedLocalTable(Map> rowKeyPtrToCells, Region region) { - this.rowKeyPtrToCells = rowKeyPtrToCells; - this.region = region; - } + private CachedLocalTable(Map> rowKeyPtrToCells, Region region) { + this.rowKeyPtrToCells = rowKeyPtrToCells; + this.region = region; + } - @Override - public List getCurrentRowState( - Mutation mutation, - Collection columnReferences, - boolean ignoreNewerMutations) throws IOException { + @Override + public List getCurrentRowState(Mutation mutation, + Collection columnReferences, boolean ignoreNewerMutations) + throws IOException { - if(ignoreNewerMutations) { - return doScan(mutation, columnReferences); - } - - byte[] rowKey = mutation.getRow(); - return this.rowKeyPtrToCells.get(new ImmutableBytesPtr(rowKey)); + if (ignoreNewerMutations) { + return doScan(mutation, columnReferences); } - private List doScan(Mutation mutation, Collection columnReferences) throws IOException { - byte[] rowKey = mutation.getRow(); - // need to use a scan here so we can get raw state, which Get doesn't provide. - Scan scan = IndexManagementUtil.newLocalStateScan(Collections.singletonList(columnReferences)); - scan.withStartRow(rowKey); - scan.withStopRow(rowKey, true); - - // Provides a means of client indicating that newer cells should not be considered, - // enabling mutations to be replayed to partially rebuild the index when a write fails. - // When replaying mutations we want the oldest timestamp (as anything newer we be replayed) - //long ts = getOldestTimestamp(m.getFamilyCellMap().values()); - long ts = getMutationTimestampWhenAllCellTimestampIsSame(mutation); - scan.setTimeRange(0,ts); - - try (RegionScanner regionScanner = region.getScanner(scan)) { - List cells = new ArrayList(1); - boolean more = regionScanner.next(cells); - assert !more : "Got more than one result when scanning" - + " a single row in the primary table!"; - - return cells; - } + byte[] rowKey = mutation.getRow(); + return this.rowKeyPtrToCells.get(new ImmutableBytesPtr(rowKey)); + } + + private List doScan(Mutation mutation, + Collection columnReferences) throws IOException { + byte[] rowKey = mutation.getRow(); + // need to use a scan here so we can get raw state, which Get doesn't provide. + Scan scan = IndexManagementUtil.newLocalStateScan(Collections.singletonList(columnReferences)); + scan.withStartRow(rowKey); + scan.withStopRow(rowKey, true); + + // Provides a means of client indicating that newer cells should not be considered, + // enabling mutations to be replayed to partially rebuild the index when a write fails. + // When replaying mutations we want the oldest timestamp (as anything newer we be replayed) + // long ts = getOldestTimestamp(m.getFamilyCellMap().values()); + long ts = getMutationTimestampWhenAllCellTimestampIsSame(mutation); + scan.setTimeRange(0, ts); + + try (RegionScanner regionScanner = region.getScanner(scan)) { + List cells = new ArrayList(1); + boolean more = regionScanner.next(cells); + assert !more + : "Got more than one result when scanning" + " a single row in the primary table!"; + + return cells; } - - @VisibleForTesting - public static CachedLocalTable build(Map> rowKeyPtrToCells) { - return new CachedLocalTable(rowKeyPtrToCells, null); + } + + @VisibleForTesting + public static CachedLocalTable build(Map> rowKeyPtrToCells) { + return new CachedLocalTable(rowKeyPtrToCells, null); + } + + public static CachedLocalTable build( + Collection dataTableMutationsWithSameRowKeyAndTimestamp, + final PhoenixIndexMetaData indexMetaData, Region region) throws IOException { + if (indexMetaData.getReplayWrite() != null) { + return new CachedLocalTable(Collections.emptyMap(), region); } - - public static CachedLocalTable build( - Collection dataTableMutationsWithSameRowKeyAndTimestamp, - final PhoenixIndexMetaData indexMetaData, - Region region) throws IOException { - if(indexMetaData.getReplayWrite() != null) - { - return new CachedLocalTable(Collections.emptyMap(), region); - } - return preScanAllRequiredRows(dataTableMutationsWithSameRowKeyAndTimestamp, indexMetaData, region); + return preScanAllRequiredRows(dataTableMutationsWithSameRowKeyAndTimestamp, indexMetaData, + region); + } + + /** + * Pre-scan all the required rows before we building the indexes for the + * dataTableMutationsWithSameRowKeyAndTimestamp parameter. Note: When we calling this method, for + * single mutation in the dataTableMutationsWithSameRowKeyAndTimestamp parameter, all cells in the + * mutation have the same rowKey and timestamp. + */ + public static CachedLocalTable preScanAllRequiredRows( + Collection dataTableMutationsWithSameRowKeyAndTimestamp, + PhoenixIndexMetaData indexMetaData, Region region) throws IOException { + Set keys = new HashSet(dataTableMutationsWithSameRowKeyAndTimestamp.size()); + for (Mutation mutation : dataTableMutationsWithSameRowKeyAndTimestamp) { + if (indexMetaData.requiresPriorRowState(mutation)) { + keys.add(PVarbinary.INSTANCE.getKeyRange(mutation.getRow(), SortOrder.ASC)); + } + } + if (keys.isEmpty()) { + return new CachedLocalTable(Collections.emptyMap(), region); } - /** - * Pre-scan all the required rows before we building the indexes for the dataTableMutationsWithSameRowKeyAndTimestamp - * parameter. - * Note: When we calling this method, for single mutation in the dataTableMutationsWithSameRowKeyAndTimestamp - * parameter, all cells in the mutation have the same rowKey and timestamp. - * @param dataTableMutationsWithSameRowKeyAndTimestamp - * @param indexMetaData - * @param region - * @throws IOException - */ - public static CachedLocalTable preScanAllRequiredRows( - Collection dataTableMutationsWithSameRowKeyAndTimestamp, - PhoenixIndexMetaData indexMetaData, - Region region) throws IOException { - Set keys = - new HashSet(dataTableMutationsWithSameRowKeyAndTimestamp.size()); - for (Mutation mutation : dataTableMutationsWithSameRowKeyAndTimestamp) { - if (indexMetaData.requiresPriorRowState(mutation)) { - keys.add(PVarbinary.INSTANCE.getKeyRange(mutation.getRow(), SortOrder.ASC)); - } - } - if (keys.isEmpty()) { - return new CachedLocalTable(Collections.emptyMap(), region); - } - - List indexTableMaintainers = indexMetaData.getIndexMaintainers(); - Set getterColumnReferences = Sets.newHashSet(); - for (IndexMaintainer indexTableMaintainer : indexTableMaintainers) { - getterColumnReferences.addAll( - indexTableMaintainer.getAllColumns()); - } - - getterColumnReferences.add(new ColumnReference( - indexTableMaintainers.get(0).getDataEmptyKeyValueCF(), - indexTableMaintainers.get(0).getEmptyKeyValueQualifier())); - - Scan scan = IndexManagementUtil.newLocalStateScan( - Collections.singletonList(getterColumnReferences)); - ScanRanges scanRanges = ScanRanges.createPointLookup(new ArrayList(keys)); - scanRanges.initializeScan(scan); - SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); - - if(indexMetaData.getReplayWrite() != null) { - /** - * Because of previous {@link IndexManagementUtil#flattenMutationsByTimestamp}(which is called - * in {@link IndexRegionObserver#groupMutations} or {@link Indexer#preBatchMutateWithExceptions}), - * for single mutation in the dataTableMutationsWithSameRowKeyAndTimestamp, all cells in the mutation - * have the same rowKey and timestamp. - */ - long timestamp = getMaxTimestamp(dataTableMutationsWithSameRowKeyAndTimestamp); - scan.setTimeRange(0, timestamp); - scan.setFilter(new SkipScanFilter(skipScanFilter, true, true)); - } else { - assert scan.isRaw(); - scan.readVersions(1); - scan.setFilter(skipScanFilter); - } - - Map> rowKeyPtrToCells = - new HashMap>(); - try (RegionScanner scanner = region.getScanner(scan)) { - boolean more = true; - while(more) { - List cells = new ArrayList(); - more = scanner.next(cells); - if (cells.isEmpty()) { - continue; - } - Cell cell = cells.get(0); - byte[] rowKey = CellUtil.cloneRow(cell); - rowKeyPtrToCells.put(new ImmutableBytesPtr(rowKey), cells); - } - } + List indexTableMaintainers = indexMetaData.getIndexMaintainers(); + Set getterColumnReferences = Sets.newHashSet(); + for (IndexMaintainer indexTableMaintainer : indexTableMaintainers) { + getterColumnReferences.addAll(indexTableMaintainer.getAllColumns()); + } - return new CachedLocalTable(rowKeyPtrToCells, region); + getterColumnReferences + .add(new ColumnReference(indexTableMaintainers.get(0).getDataEmptyKeyValueCF(), + indexTableMaintainers.get(0).getEmptyKeyValueQualifier())); + + Scan scan = + IndexManagementUtil.newLocalStateScan(Collections.singletonList(getterColumnReferences)); + ScanRanges scanRanges = ScanRanges.createPointLookup(new ArrayList(keys)); + scanRanges.initializeScan(scan); + SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter(); + + if (indexMetaData.getReplayWrite() != null) { + /** + * Because of previous {@link IndexManagementUtil#flattenMutationsByTimestamp}(which is called + * in {@link IndexRegionObserver#groupMutations} or + * {@link Indexer#preBatchMutateWithExceptions}), for single mutation in the + * dataTableMutationsWithSameRowKeyAndTimestamp, all cells in the mutation have the same + * rowKey and timestamp. + */ + long timestamp = getMaxTimestamp(dataTableMutationsWithSameRowKeyAndTimestamp); + scan.setTimeRange(0, timestamp); + scan.setFilter(new SkipScanFilter(skipScanFilter, true, true)); + } else { + assert scan.isRaw(); + scan.readVersions(1); + scan.setFilter(skipScanFilter); } - private static long getMaxTimestamp(Collection dataTableMutationsWithSameRowKeyAndTimestamp) { - long maxTimestamp = Long.MIN_VALUE; - for(Mutation mutation : dataTableMutationsWithSameRowKeyAndTimestamp) { - /** - * all the cells in this mutation have the same timestamp. - */ - long timestamp = getMutationTimestampWhenAllCellTimestampIsSame(mutation); - if(timestamp > maxTimestamp) { - maxTimestamp = timestamp; - } + Map> rowKeyPtrToCells = + new HashMap>(); + try (RegionScanner scanner = region.getScanner(scan)) { + boolean more = true; + while (more) { + List cells = new ArrayList(); + more = scanner.next(cells); + if (cells.isEmpty()) { + continue; } - return maxTimestamp; + Cell cell = cells.get(0); + byte[] rowKey = CellUtil.cloneRow(cell); + rowKeyPtrToCells.put(new ImmutableBytesPtr(rowKey), cells); + } } - private static long getMutationTimestampWhenAllCellTimestampIsSame(Mutation mutation) { - return mutation.getFamilyCellMap().values().iterator().next().get(0).getTimestamp(); + return new CachedLocalTable(rowKeyPtrToCells, region); + } + + private static long + getMaxTimestamp(Collection dataTableMutationsWithSameRowKeyAndTimestamp) { + long maxTimestamp = Long.MIN_VALUE; + for (Mutation mutation : dataTableMutationsWithSameRowKeyAndTimestamp) { + /** + * all the cells in this mutation have the same timestamp. + */ + long timestamp = getMutationTimestampWhenAllCellTimestampIsSame(mutation); + if (timestamp > maxTimestamp) { + maxTimestamp = timestamp; + } } + return maxTimestamp; + } + + private static long getMutationTimestampWhenAllCellTimestampIsSame(Mutation mutation) { + return mutation.getFamilyCellMap().values().iterator().next().get(0).getTimestamp(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java index cfaa78ed124..e23d6ad4258 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,13 +18,11 @@ package org.apache.phoenix.hbase.index.covered.data; import java.util.Iterator; -import java.util.SortedSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.IndexKeyValueSkipListSet; import org.apache.hadoop.hbase.regionserver.MemStore; import org.apache.hadoop.hbase.util.Bytes; @@ -40,20 +38,18 @@ * sizing (for right now). We still support the concurrent access (in case indexes are built in * parallel). *

    - * - We basically wrap a KeyValueSkipListSet, just like a regular MemStore, except we are: + * We basically wrap a KeyValueSkipListSet, just like a regular MemStore, except we are: *

      - *
    1. not dealing with - *
        - *
      • space considerations
      • - *
      • a snapshot set
      • - *
      - *
    2. - *
    3. ignoring memstore timestamps in favor of deciding when we want to overwrite keys based on how - * we obtain them
    4. - *
    5. ignoring time range updates (so - * ReseekableScanner#shouldUseScanner(Scan, SortedSet, long) isn't supported from - * {@link #getScanner()}).
    6. + *
    7. not dealing with + *
        + *
      • space considerations
      • + *
      • a snapshot set
      • + *
      + *
    8. + *
    9. ignoring memstore timestamps in favor of deciding when we want to overwrite keys based on how + * we obtain them
    10. + *
    11. ignoring time range updates (so ReseekableScanner#shouldUseScanner(Scan, SortedSet, long) + * isn't supported from {@link #getScanner()}).
    12. *
    *

    * We can ignore the memstore timestamps because we know that anything we get from the local region @@ -67,9 +63,9 @@ * the previous implementation. Further, by being smart about how we manage the KVs, we can drop the * extra object creation we were doing to wrap the pending KVs (which we did previously to ensure * they sorted before the ones we got from the HRegion). We overwrite {@link KeyValue}s when we add - * them from external sources #add(KeyValue, boolean), but then don't overwrite existing - * keyvalues when read them from the underlying table (because pending keyvalues should always - * overwrite current ones) - this logic is all contained in LocalTableState. + * them from external sources #add(KeyValue, boolean), but then don't overwrite existing keyvalues + * when read them from the underlying table (because pending keyvalues should always overwrite + * current ones) - this logic is all contained in LocalTableState. * @see LocalTableState */ public class IndexMemStore implements KeyValueStore { @@ -79,11 +75,11 @@ public class IndexMemStore implements KeyValueStore { private CellComparator comparator; public IndexMemStore() { - this(new DelegateComparator(new CellComparatorImpl()){ - @Override - public int compare(Cell leftCell, Cell rightCell) { - return super.compare(leftCell, rightCell, true); - } + this(new DelegateComparator(new CellComparatorImpl()) { + @Override + public int compare(Cell leftCell, Cell rightCell) { + return super.compare(leftCell, rightCell, true); + } }); } @@ -126,8 +122,8 @@ private void dump() { } private String toString(Cell kv) { - return kv.toString() + "/value=" + - Bytes.toStringBinary(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return kv.toString() + "/value=" + + Bytes.toStringBinary(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); } @Override @@ -146,7 +142,7 @@ public void rollback(Cell kv) { public ReseekableScanner getScanner() { return new MemStoreScanner(); } - + /* * MemStoreScanner implements the ReseekableScanner. It lets the caller scan the contents of a * memstore -- both current map and snapshot. This behaves as if it were a real scanner but does @@ -238,7 +234,8 @@ public synchronized boolean reseek(Cell key) { * Unfortunately the Java API does not offer a method to get it. So we remember the last keys * we iterated to and restore the reseeked set to at least that point. */ - kvsetIt = kvsetAtCreation.tailSet(getHighest(PhoenixKeyValueUtil.maybeCopyCell(key), kvsetItRow)).iterator(); + kvsetIt = kvsetAtCreation + .tailSet(getHighest(PhoenixKeyValueUtil.maybeCopyCell(key), kvsetItRow)).iterator(); return seekInSubLists(); } @@ -283,4 +280,4 @@ public synchronized void close() { this.kvsetItRow = null; } } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java index ea62cc55dac..a6fdc21542d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/parallel/ThreadPoolManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,13 +26,13 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.util.Threads; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Manage access to thread pools @@ -44,22 +44,20 @@ public class ThreadPoolManager { /** * Get an executor for the given name, based on the passed {@link Configuration}. If a thread pool * already exists with that name, it will be returned. - * @param builder - * @param env * @return a {@link ThreadPoolExecutor} for the given name. Thread pool that only shuts down when * there are no more explicit references to it. You do not need to shutdown the threadpool * on your own - it is managed for you. When you are done, you merely need to release your * reference. If you do attempt to shutdown the pool, you should be careful to call - * {@link ThreadPoolExecutor#shutdown()} XOR {@link ThreadPoolExecutor#shutdownNow()} - extra calls to either can lead to - * early shutdown of the pool. + * {@link ThreadPoolExecutor#shutdown()} XOR {@link ThreadPoolExecutor#shutdownNow()} - + * extra calls to either can lead to early shutdown of the pool. */ public static synchronized ThreadPoolExecutor getExecutor(ThreadPoolBuilder builder, - RegionCoprocessorEnvironment env) { + RegionCoprocessorEnvironment env) { return getExecutor(builder, env.getSharedData()); } static synchronized ThreadPoolExecutor getExecutor(ThreadPoolBuilder builder, - Map poolCache) { + Map poolCache) { ThreadPoolExecutor pool = (ThreadPoolExecutor) poolCache.get(builder.getName()); if (pool == null || pool.isTerminating() || pool.isShutdown()) { pool = getDefaultExecutor(builder); @@ -72,7 +70,6 @@ static synchronized ThreadPoolExecutor getExecutor(ThreadPoolBuilder builder, } /** - * @param conf */ private static ShutdownOnUnusedThreadPoolExecutor getDefaultExecutor(ThreadPoolBuilder builder) { int maxThreads = builder.getMaxThreads(); @@ -88,11 +85,11 @@ private static ShutdownOnUnusedThreadPoolExecutor getDefaultExecutor(ThreadPoolB // usual policy and throw a RejectedExecutionException because we are shutting down anyways and // the worst thing is that this gets unloaded. ShutdownOnUnusedThreadPoolExecutor pool = - new ShutdownOnUnusedThreadPoolExecutor(maxThreads, maxThreads, keepAliveTime, - TimeUnit.SECONDS, new LinkedBlockingQueue(), - new ThreadFactoryBuilder().setNameFormat(builder.getName()+"-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - builder.getName()); + new ShutdownOnUnusedThreadPoolExecutor(maxThreads, maxThreads, keepAliveTime, + TimeUnit.SECONDS, new LinkedBlockingQueue(), + new ThreadFactoryBuilder().setNameFormat(builder.getName() + "-pool-%d") + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + builder.getName()); pool.allowCoreThreadTimeOut(true); return pool; } @@ -109,8 +106,8 @@ private static class ShutdownOnUnusedThreadPoolExecutor extends ThreadPoolExecut private String poolName; public ShutdownOnUnusedThreadPoolExecutor(int coreThreads, int maxThreads, long keepAliveTime, - TimeUnit timeUnit, BlockingQueue workQueue, ThreadFactory threadFactory, - String poolName) { + TimeUnit timeUnit, BlockingQueue workQueue, ThreadFactory threadFactory, + String poolName) { super(coreThreads, maxThreads, keepAliveTime, timeUnit, workQueue, threadFactory); this.references = new AtomicInteger(); this.poolName = poolName; @@ -145,4 +142,4 @@ public List shutdownNow() { } } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java index a973f02e1ec..176ddaec62a 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.wal; import java.io.DataOutput; @@ -32,143 +31,137 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; -import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; public class IndexedKeyValue extends KeyValue { - private static int calcHashCode(ImmutableBytesPtr indexTableName, Mutation mutation) { - final int prime = 31; - int result = 1; - result = prime * result + indexTableName.hashCode(); - result = prime * result + Arrays.hashCode(mutation.getRow()); - return result; - } - - private ImmutableBytesPtr indexTableName; - private Mutation mutation; - // optimization check to ensure that batches don't get replayed to the index more than once - private boolean batchFinished = false; - private int hashCode; - - public static IndexedKeyValue newIndexedKeyValue(byte[] bs, Mutation m){ - Cell indexWALCell = adaptFirstCellFromMutation(m); - return new IndexedKeyValue(indexWALCell, bs, m); - } - - private static Cell adaptFirstCellFromMutation(Mutation m) { - if (m != null && m.getFamilyCellMap() != null && - m.getFamilyCellMap().firstEntry() != null && - m.getFamilyCellMap().firstEntry().getValue() != null - && m.getFamilyCellMap().firstEntry().getValue().get(0) != null) { - //have to replace the column family with WALEdit.METAFAMILY to make sure - //that IndexedKeyValues don't get replicated. The superclass KeyValue fields - //like row, qualifier and value are placeholders to prevent NPEs - // when using the KeyValue APIs. See PHOENIX-5188 / 5455 - Cell mutationCell = m.getFamilyCellMap().firstEntry().getValue().get(0); - CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - return builder.setFamily(WALEdit.METAFAMILY). - setQualifier(mutationCell.getQualifierArray()). - setRow(m.getRow()). - setTimestamp(mutationCell.getTimestamp()). - setValue(mutationCell.getValueArray()).setType(Cell.Type.Put).build(); - } else { - throw new IllegalArgumentException("Tried to create an IndexedKeyValue with a " + - "Mutation with no Cells!"); - } - - } - - //used for deserialization - public IndexedKeyValue() {} - - private IndexedKeyValue(Cell c, byte[] bs, Mutation mutation){ - super(c); - this.indexTableName = new ImmutableBytesPtr(bs); - this.mutation = mutation; - this.hashCode = calcHashCode(indexTableName, mutation); - } - - public byte[] getIndexTable() { - return this.indexTableName.get(); - } - - public Mutation getMutation() { - return mutation; - } - - @Override - public String toString() { - return "IndexWrite:\n\ttable: " + indexTableName + "\n\tmutation:" + mutation; - } - - /** - * This is a very heavy-weight operation and should only be done when absolutely necessary - it does a full - * serialization of the underyling mutation to compare the underlying data. - */ - @Override - public boolean equals(Object obj) { - if(obj == null) return false; - if (this == obj) return true; - if (getClass() != obj.getClass()) return false; - IndexedKeyValue other = (IndexedKeyValue)obj; - if (hashCode() != other.hashCode()) return false; - if (!other.indexTableName.equals(this.indexTableName)) return false; - byte[] current = this.getMutationBytes(); - byte[] otherMutation = other.getMutationBytes(); - return Bytes.equals(current, otherMutation); - } - - private byte[] getMutationBytes() { - try { - MutationProto m = toMutationProto(this.mutation); - return m.toByteArray(); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to get bytes for mutation!", e); - } - } - - @Override - public int hashCode() { - return hashCode; - } - - /** - * Internal write the underlying data for the entry - this does not do any special prefixing. - * Writing should be done via {@link KeyValueCodec#write(DataOutput, KeyValue)} to ensure - * consistent reading/writing of {@link IndexedKeyValue}s. - * - * @param out - * to write data to. Does not close or flush the passed object. - * @throws IOException - * if there is a problem writing the underlying data - */ - void writeData(DataOutput out) throws IOException { - Bytes.writeByteArray(out, this.indexTableName.get()); - MutationProto m = toMutationProto(this.mutation); - Bytes.writeByteArray(out, m.toByteArray()); - } - - public boolean getBatchFinished() { - return this.batchFinished; + private static int calcHashCode(ImmutableBytesPtr indexTableName, Mutation mutation) { + final int prime = 31; + int result = 1; + result = prime * result + indexTableName.hashCode(); + result = prime * result + Arrays.hashCode(mutation.getRow()); + return result; + } + + private ImmutableBytesPtr indexTableName; + private Mutation mutation; + // optimization check to ensure that batches don't get replayed to the index more than once + private boolean batchFinished = false; + private int hashCode; + + public static IndexedKeyValue newIndexedKeyValue(byte[] bs, Mutation m) { + Cell indexWALCell = adaptFirstCellFromMutation(m); + return new IndexedKeyValue(indexWALCell, bs, m); + } + + private static Cell adaptFirstCellFromMutation(Mutation m) { + if ( + m != null && m.getFamilyCellMap() != null && m.getFamilyCellMap().firstEntry() != null + && m.getFamilyCellMap().firstEntry().getValue() != null + && m.getFamilyCellMap().firstEntry().getValue().get(0) != null + ) { + // have to replace the column family with WALEdit.METAFAMILY to make sure + // that IndexedKeyValues don't get replicated. The superclass KeyValue fields + // like row, qualifier and value are placeholders to prevent NPEs + // when using the KeyValue APIs. See PHOENIX-5188 / 5455 + Cell mutationCell = m.getFamilyCellMap().firstEntry().getValue().get(0); + CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + return builder.setFamily(WALEdit.METAFAMILY).setQualifier(mutationCell.getQualifierArray()) + .setRow(m.getRow()).setTimestamp(mutationCell.getTimestamp()) + .setValue(mutationCell.getValueArray()).setType(Cell.Type.Put).build(); + } else { + throw new IllegalArgumentException( + "Tried to create an IndexedKeyValue with a " + "Mutation with no Cells!"); } - public void markBatchFinished() { - this.batchFinished = true; + } + + // used for deserialization + public IndexedKeyValue() { + } + + private IndexedKeyValue(Cell c, byte[] bs, Mutation mutation) { + super(c); + this.indexTableName = new ImmutableBytesPtr(bs); + this.mutation = mutation; + this.hashCode = calcHashCode(indexTableName, mutation); + } + + public byte[] getIndexTable() { + return this.indexTableName.get(); + } + + public Mutation getMutation() { + return mutation; + } + + @Override + public String toString() { + return "IndexWrite:\n\ttable: " + indexTableName + "\n\tmutation:" + mutation; + } + + /** + * This is a very heavy-weight operation and should only be done when absolutely necessary - it + * does a full serialization of the underyling mutation to compare the underlying data. + */ + @Override + public boolean equals(Object obj) { + if (obj == null) return false; + if (this == obj) return true; + if (getClass() != obj.getClass()) return false; + IndexedKeyValue other = (IndexedKeyValue) obj; + if (hashCode() != other.hashCode()) return false; + if (!other.indexTableName.equals(this.indexTableName)) return false; + byte[] current = this.getMutationBytes(); + byte[] otherMutation = other.getMutationBytes(); + return Bytes.equals(current, otherMutation); + } + + private byte[] getMutationBytes() { + try { + MutationProto m = toMutationProto(this.mutation); + return m.toByteArray(); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to get bytes for mutation!", e); } - - protected MutationProto toMutationProto(Mutation mutation) throws IOException { - MutationProto m = null; - if(mutation instanceof Put){ - m = org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(MutationType.PUT, - mutation); - } else if(mutation instanceof Delete) { - m = org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(MutationType.DELETE, - mutation); - } else { - throw new IOException("Put/Delete mutations only supported"); - } - return m; + } + + @Override + public int hashCode() { + return hashCode; + } + + /** + * Internal write the underlying data for the entry - this does not do any special prefixing. + * Writing should be done via {@link KeyValueCodec#write(DataOutput, KeyValue)} to ensure + * consistent reading/writing of {@link IndexedKeyValue}s. to write data to. Does not close or + * flush the passed object. if there is a problem writing the underlying data + */ + void writeData(DataOutput out) throws IOException { + Bytes.writeByteArray(out, this.indexTableName.get()); + MutationProto m = toMutationProto(this.mutation); + Bytes.writeByteArray(out, m.toByteArray()); + } + + public boolean getBatchFinished() { + return this.batchFinished; + } + + public void markBatchFinished() { + this.batchFinished = true; + } + + protected MutationProto toMutationProto(Mutation mutation) throws IOException { + MutationProto m = null; + if (mutation instanceof Put) { + m = org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(MutationType.PUT, mutation); + } else if (mutation instanceof Delete) { + m = org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(MutationType.DELETE, mutation); + } else { + throw new IOException("Put/Delete mutations only supported"); } -} \ No newline at end of file + return m; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java index 8b8949e6b42..1863ecc9e31 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/wal/KeyValueCodec.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.wal; import java.io.DataInput; @@ -30,6 +29,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; + /** * Codec to encode/decode KeyValue s and IndexedKeyValue s within a WALEdit */ @@ -51,7 +51,7 @@ public class KeyValueCodec { public static List readKeyValues(DataInput in) throws IOException { int size = in.readInt(); if (size == 0) { - return Collections.emptyList(); + return Collections. emptyList(); } List kvs = new ArrayList(size); for (int i = 0; i < size; i++) { @@ -76,7 +76,7 @@ public static KeyValue readKeyValue(DataInput in) throws IOException { ClientProtos.MutationProto mProto = ClientProtos.MutationProto.parseFrom(mutationData); Mutation mutation = org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto); IndexedKeyValue kv = null; - if (mutation != null){ + if (mutation != null) { kv = IndexedKeyValue.newIndexedKeyValue(indexTableName.copyBytesIfNecessary(), mutation); } else { kv = new IndexedKeyValue(); @@ -91,7 +91,7 @@ public static KeyValue readKeyValue(DataInput in) throws IOException { * Write a {@link KeyValue} or an {@link IndexedKeyValue} to the output stream. These can be read * back via {@link #readKeyValue(DataInput)} or {@link #readKeyValues(DataInput)}. * @param out to write to - * @param kv {@link KeyValue} to which to write + * @param kv {@link KeyValue} to which to write * @throws IOException if there is an error writing */ public static void write(DataOutput out, KeyValue kv) throws IOException { @@ -99,7 +99,7 @@ public static void write(DataOutput out, KeyValue kv) throws IOException { out.writeInt(INDEX_TYPE_LENGTH_MARKER); ((IndexedKeyValue) kv).writeData(out); } else { - KeyValue.write(kv, out); + KeyValue.write(kv, out); } } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java index f1fbc6002b9..a234773469d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/AbstractParallelWriterIndexCommitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,202 +40,219 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.KeyValueBuilder; import org.apache.phoenix.index.PhoenixIndexFailurePolicy; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.apache.phoenix.util.ServerIndexUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; - /** * Abstract class to Write index updates to the index tables in parallel. */ public abstract class AbstractParallelWriterIndexCommitter implements IndexCommitter { - public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "phoenix.index.writer.threads.max"; - private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10; - public static final String INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY = "phoenix.index.writer.threads.keepalivetime"; - private static final Logger LOG = LoggerFactory.getLogger(IndexWriter.class); - - protected HTableFactory retryingFactory; - protected HTableFactory noRetriesFactory; - protected Stoppable stopped; - protected QuickFailingTaskRunner pool; - protected KeyValueBuilder kvBuilder; - protected RegionCoprocessorEnvironment env; - protected TaskBatch tasks; - protected boolean disableIndexOnFailure = false; - - // This relies on Hadoop Configuration to handle warning about deprecated configs and - // to set the correct non-deprecated configs when an old one shows up. - static { - Configuration.addDeprecation("index.writer.threads.max", NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY); - Configuration.addDeprecation("index.writer.threads.keepalivetime", INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY); - } + public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = + "phoenix.index.writer.threads.max"; + private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10; + public static final String INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY = + "phoenix.index.writer.threads.keepalivetime"; + private static final Logger LOG = LoggerFactory.getLogger(IndexWriter.class); - public AbstractParallelWriterIndexCommitter() {} + protected HTableFactory retryingFactory; + protected HTableFactory noRetriesFactory; + protected Stoppable stopped; + protected QuickFailingTaskRunner pool; + protected KeyValueBuilder kvBuilder; + protected RegionCoprocessorEnvironment env; + protected TaskBatch tasks; + protected boolean disableIndexOnFailure = false; - // For testing - public AbstractParallelWriterIndexCommitter(String hbaseVersion) { - kvBuilder = KeyValueBuilder.get(hbaseVersion); - } + // This relies on Hadoop Configuration to handle warning about deprecated configs and + // to set the correct non-deprecated configs when an old one shows up. + static { + Configuration.addDeprecation("index.writer.threads.max", + NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY); + Configuration.addDeprecation("index.writer.threads.keepalivetime", + INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY); + } - @Override - public void setup(IndexWriter parent, RegionCoprocessorEnvironment env, String name, boolean disableIndexOnFailure) { - this.env = env; - this.disableIndexOnFailure = disableIndexOnFailure; - Configuration conf = env.getConfiguration(); - setup(IndexWriterUtils.getDefaultDelegateHTableFactory(env), - ThreadPoolManager.getExecutor( - new ThreadPoolBuilder(name, conf).setMaxThread(NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY, - DEFAULT_CONCURRENT_INDEX_WRITER_THREADS).setCoreTimeout( - INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env), parent, env); - this.kvBuilder = KeyValueBuilder.get(env.getHBaseVersion()); - } + public AbstractParallelWriterIndexCommitter() { + } + + // For testing + public AbstractParallelWriterIndexCommitter(String hbaseVersion) { + kvBuilder = KeyValueBuilder.get(hbaseVersion); + } + + @Override + public void setup(IndexWriter parent, RegionCoprocessorEnvironment env, String name, + boolean disableIndexOnFailure) { + this.env = env; + this.disableIndexOnFailure = disableIndexOnFailure; + Configuration conf = env.getConfiguration(); + setup(IndexWriterUtils.getDefaultDelegateHTableFactory(env), + ThreadPoolManager.getExecutor(new ThreadPoolBuilder(name, conf) + .setMaxThread(NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY, + DEFAULT_CONCURRENT_INDEX_WRITER_THREADS) + .setCoreTimeout(INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env), + parent, env); + this.kvBuilder = KeyValueBuilder.get(env.getHBaseVersion()); + } - /** - * Setup this. - *

    - * Exposed for TESTING + /** + * Setup this. + *

    + * Exposed for TESTING + */ + public void setup(HTableFactory factory, ExecutorService pool, Stoppable stop, + RegionCoprocessorEnvironment env) { + this.retryingFactory = factory; + this.noRetriesFactory = IndexWriterUtils.getNoRetriesHTableFactory(env); + this.pool = new QuickFailingTaskRunner(pool); + this.stopped = stop; + this.env = env; + } + + @Override + public void write(Multimap toWrite, + final boolean allowLocalUpdates, final int clientVersion) + throws SingleIndexWriteFailureException { + /* + * This bit here is a little odd, so let's explain what's going on. Basically, we want to do the + * writes in parallel to each index table, so each table gets its own task and is submitted to + * the pool. Where it gets tricky is that we want to block the calling thread until one of two + * things happens: (1) all index tables get successfully updated, or (2) any one of the index + * table writes fail; in either case, we should return as quickly as possible. We get a little + * more complicated in that if we do get a single failure, but any of the index writes hasn't + * been started yet (its been queued up, but not submitted to a thread) we want to that task to + * fail immediately as we know that write is a waste and will need to be replayed anyways. */ - public void setup(HTableFactory factory, ExecutorService pool,Stoppable stop, RegionCoprocessorEnvironment env) { - this.retryingFactory = factory; - this.noRetriesFactory = IndexWriterUtils.getNoRetriesHTableFactory(env); - this.pool = new QuickFailingTaskRunner(pool); - this.stopped = stop; - this.env = env; - } - @Override - public void write(Multimap toWrite, final boolean allowLocalUpdates, final int clientVersion) throws SingleIndexWriteFailureException { - /* - * This bit here is a little odd, so let's explain what's going on. Basically, we want to do the writes in - * parallel to each index table, so each table gets its own task and is submitted to the pool. Where it gets - * tricky is that we want to block the calling thread until one of two things happens: (1) all index tables get - * successfully updated, or (2) any one of the index table writes fail; in either case, we should return as - * quickly as possible. We get a little more complicated in that if we do get a single failure, but any of the - * index writes hasn't been started yet (its been queued up, but not submitted to a thread) we want to that task - * to fail immediately as we know that write is a waste and will need to be replayed anyways. + Set>> entries = toWrite.asMap().entrySet(); + tasks = new TaskBatch(entries.size()); + for (Entry> entry : entries) { + // get the mutations for each table. We leak the implementation here a little bit to save + // doing a complete copy over of all the index update for each table. + final List mutations = + kvBuilder.cloneIfNecessary((List) entry.getValue()); + final HTableInterfaceReference tableReference = entry.getKey(); + if ( + env != null && !allowLocalUpdates + && tableReference.getTableName() + .equals(env.getRegion().getTableDescriptor().getTableName().getNameAsString()) + ) { + continue; + } + /* + * Write a batch of index updates to an index table. This operation stops (is cancelable) via + * two mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the + * running thread. The former will only work if we are not in the midst of writing the current + * batch to the table, though we do check these status variables before starting and before + * writing the batch. The latter usage, interrupting the thread, will work in the previous + * situations as was at some points while writing the batch, depending on the underlying + * writer implementation (HTableInterface#batch is blocking, but doesn't elaborate when is + * supports an interrupt). + */ + tasks.add(new Task() { + + /** + * Do the actual write to the primary table. */ + @SuppressWarnings("deprecation") + @Override + public Void call() throws Exception { + // this may have been queued, so another task infront of us may have failed, so we should + // early exit, if that's the case + throwFailureIfDone(); - Set>> entries = toWrite.asMap().entrySet(); - tasks = new TaskBatch(entries.size()); - for (Entry> entry : entries) { - // get the mutations for each table. We leak the implementation here a little bit to save - // doing a complete copy over of all the index update for each table. - final List mutations = kvBuilder.cloneIfNecessary((List)entry.getValue()); - final HTableInterfaceReference tableReference = entry.getKey(); - if (env != null - && !allowLocalUpdates - && tableReference.getTableName().equals( - env.getRegion().getTableDescriptor().getTableName().getNameAsString())) { - continue; - } - /* - * Write a batch of index updates to an index table. This operation stops (is cancelable) via two - * mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the running thread. - * The former will only work if we are not in the midst of writing the current batch to the table, though we - * do check these status variables before starting and before writing the batch. The latter usage, - * interrupting the thread, will work in the previous situations as was at some points while writing the - * batch, depending on the underlying writer implementation (HTableInterface#batch is blocking, but doesn't - * elaborate when is supports an interrupt). - */ - tasks.add(new Task() { - - /** - * Do the actual write to the primary table. - * - * @return - */ - @SuppressWarnings("deprecation") - @Override - public Void call() throws Exception { - // this may have been queued, so another task infront of us may have failed, so we should - // early exit, if that's the case - throwFailureIfDone(); - - if (LOG.isTraceEnabled()) { - LOG.trace("Writing index update:" + mutations + " to table: " + tableReference); - } - try { - if (allowLocalUpdates - && env != null - && tableReference.getTableName().equals( - env.getRegion().getTableDescriptor().getTableName().getNameAsString())) { - try { - throwFailureIfDone(); - ServerIndexUtil.writeLocalUpdates(env.getRegion(), mutations, true); - return null; - } catch (IOException ignored) { - // when it's failed we fall back to the standard & slow way - if (LOG.isDebugEnabled()) { - LOG.debug("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error=" - + ignored); - } - } - } - // if the client can retry index writes, then we don't need to retry here - HTableFactory factory; - if (disableIndexOnFailure) { - factory = clientVersion < MetaDataProtocol.MIN_CLIENT_RETRY_INDEX_WRITES ? retryingFactory : noRetriesFactory; - } - else { - factory = retryingFactory; - } - try (Table table = factory.getTable(tableReference.get())) { - throwFailureIfDone(); - table.batch(mutations, null); - } - } catch (SingleIndexWriteFailureException e) { - throw e; - } catch (IOException e) { - throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e, PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env)); - } catch (InterruptedException e) { - // reset the interrupt status on the thread - Thread.currentThread().interrupt(); - throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e, PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env)); - } - return null; + if (LOG.isTraceEnabled()) { + LOG.trace("Writing index update:" + mutations + " to table: " + tableReference); + } + try { + if ( + allowLocalUpdates && env != null + && tableReference.getTableName() + .equals(env.getRegion().getTableDescriptor().getTableName().getNameAsString()) + ) { + try { + throwFailureIfDone(); + ServerIndexUtil.writeLocalUpdates(env.getRegion(), mutations, true); + return null; + } catch (IOException ignored) { + // when it's failed we fall back to the standard & slow way + if (LOG.isDebugEnabled()) { + LOG.debug( + "indexRegion.batchMutate failed and fall back to HTable.batch(). Got error=" + + ignored); } + } + } + // if the client can retry index writes, then we don't need to retry here + HTableFactory factory; + if (disableIndexOnFailure) { + factory = clientVersion < MetaDataProtocol.MIN_CLIENT_RETRY_INDEX_WRITES + ? retryingFactory + : noRetriesFactory; + } else { + factory = retryingFactory; + } + try (Table table = factory.getTable(tableReference.get())) { + throwFailureIfDone(); + table.batch(mutations, null); + } + } catch (SingleIndexWriteFailureException e) { + throw e; + } catch (IOException e) { + throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e, + PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env)); + } catch (InterruptedException e) { + // reset the interrupt status on the thread + Thread.currentThread().interrupt(); + throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e, + PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env)); + } + return null; + } - private void throwFailureIfDone() throws SingleIndexWriteFailureException { - if (this.isBatchFailed() || Thread.currentThread().isInterrupted()) { throw new SingleIndexWriteFailureException( - "Pool closed, not attempting to write to the index!", null); } + private void throwFailureIfDone() throws SingleIndexWriteFailureException { + if (this.isBatchFailed() || Thread.currentThread().isInterrupted()) { + throw new SingleIndexWriteFailureException( + "Pool closed, not attempting to write to the index!", null); + } - } - }); } + }); } + } - protected void propagateFailure(Throwable throwable) throws SingleIndexWriteFailureException { - try { - throw throwable; - } catch (SingleIndexWriteFailureException e1) { - throw e1; - } catch (Throwable e1) { - throw new SingleIndexWriteFailureException("Got an abort notification while writing to the index!", e1); - } - + protected void propagateFailure(Throwable throwable) throws SingleIndexWriteFailureException { + try { + throw throwable; + } catch (SingleIndexWriteFailureException e1) { + throw e1; + } catch (Throwable e1) { + throw new SingleIndexWriteFailureException( + "Got an abort notification while writing to the index!", e1); } - /** - * {@inheritDoc} - *

    - * This method should only be called once. Stopped state ({@link #isStopped()}) is managed by the external - * {@link Stoppable}. This call does not delegate the stop down to the {@link Stoppable} passed in the constructor. - * - * @param why - * the reason for stopping - */ - @Override - public void stop(String why) { - LOG.info("Shutting down " + this.getClass().getSimpleName() + " because " + why); - this.pool.stop(why); - this.retryingFactory.shutdown(); - this.noRetriesFactory.shutdown(); - } + } - @Override - public boolean isStopped() { - return this.stopped.isStopped(); - } + /** + * {@inheritDoc} + *

    + * This method should only be called once. Stopped state ({@link #isStopped()}) is managed + * by the external {@link Stoppable}. This call does not delegate the stop down to the + * {@link Stoppable} passed in the constructor. the reason for stopping + */ + @Override + public void stop(String why) { + LOG.info("Shutting down " + this.getClass().getSimpleName() + " because " + why); + this.pool.stop(why); + this.retryingFactory.shutdown(); + this.noRetriesFactory.shutdown(); + } + + @Override + public boolean isStopped() { + return this.stopped.isStopped(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/DelegateIndexFailurePolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/DelegateIndexFailurePolicy.java index 7f68a51b3bb..a992a886d19 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/DelegateIndexFailurePolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/DelegateIndexFailurePolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,39 +23,38 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; - import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; public class DelegateIndexFailurePolicy implements IndexFailurePolicy { - private IndexFailurePolicy delegate; - - public DelegateIndexFailurePolicy(IndexFailurePolicy delegate) { - this.delegate = delegate; - } - - @Override - public void handleFailure(Multimap attempted, Exception cause) - throws IOException { - delegate.handleFailure(attempted, cause); - } - - @Override - public boolean isStopped() { - return delegate.isStopped(); - } - - @Override - public void setup(Stoppable parent, RegionCoprocessorEnvironment env) { - delegate.setup(parent, env); - } - - @Override - public void stop(String arg0) { - delegate.stop(arg0); - } - - public void setDelegate(IndexFailurePolicy delegate) { - this.delegate = delegate; - } + private IndexFailurePolicy delegate; + + public DelegateIndexFailurePolicy(IndexFailurePolicy delegate) { + this.delegate = delegate; + } + + @Override + public void handleFailure(Multimap attempted, Exception cause) + throws IOException { + delegate.handleFailure(attempted, cause); + } + + @Override + public boolean isStopped() { + return delegate.isStopped(); + } + + @Override + public void setup(Stoppable parent, RegionCoprocessorEnvironment env) { + delegate.setup(parent, env); + } + + @Override + public void stop(String arg0) { + delegate.stop(arg0); + } + + public void setDelegate(IndexFailurePolicy delegate) { + this.delegate = delegate; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexCommitter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexCommitter.java index 9520239cbc3..ff745b48205 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexCommitter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexCommitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,22 +17,22 @@ */ package org.apache.phoenix.hbase.index.write; +import java.io.IOException; + import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; - -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; - -import java.io.IOException; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; /** * Write the index updates to the index tables */ public interface IndexCommitter extends Stoppable { - void setup(IndexWriter parent, RegionCoprocessorEnvironment env, String name, boolean disableIndexOnFailure); + void setup(IndexWriter parent, RegionCoprocessorEnvironment env, String name, + boolean disableIndexOnFailure); - public void write(Multimap toWrite, boolean allowLocalUpdates, int clientVersion) - throws IOException; -} \ No newline at end of file + public void write(Multimap toWrite, boolean allowLocalUpdates, + int clientVersion) throws IOException; +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexFailurePolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexFailurePolicy.java index 5a7792e56f7..161e83e2499 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexFailurePolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexFailurePolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; - import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; /** @@ -35,9 +34,8 @@ public interface IndexFailurePolicy extends Stoppable { /** * Handle the failure of the attempted index updates * @param attempted map of index {@code table -> mutations } to apply - * @param cause reason why there was a failure - * @throws IOException + * @param cause reason why there was a failure */ - public void - handleFailure(Multimap attempted, Exception cause) throws IOException; -} \ No newline at end of file + public void handleFailure(Multimap attempted, Exception cause) + throws IOException; +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java index 5b8a187d0c6..e7fba79dc76 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,11 +32,10 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.index.PhoenixIndexFailurePolicy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Do the actual work of writing to the index tables. @@ -48,7 +47,8 @@ public class IndexWriter implements Stoppable { private static final Logger LOGGER = LoggerFactory.getLogger(IndexWriter.class); public static final String INDEX_COMMITTER_CONF_KEY = "phoenix.index.writer.commiter.class"; - public static final String INDEX_FAILURE_POLICY_CONF_KEY = "phoenix.index.writer.failurepolicy.class"; + public static final String INDEX_FAILURE_POLICY_CONF_KEY = + "phoenix.index.writer.failurepolicy.class"; private AtomicBoolean stopped = new AtomicBoolean(false); private IndexCommitter writer; private IndexFailurePolicy failurePolicy; @@ -62,29 +62,32 @@ public class IndexWriter implements Stoppable { /** * @throws IOException if the {@link IndexWriter} or {@link IndexFailurePolicy} cannot be - * instantiated + * instantiated */ public IndexWriter(RegionCoprocessorEnvironment env, String name) throws IOException { this(getCommitter(env), getFailurePolicy(env), env, name, true); } - public IndexWriter(RegionCoprocessorEnvironment env, String name, boolean disableIndexOnFailure) throws IOException { + public IndexWriter(RegionCoprocessorEnvironment env, String name, boolean disableIndexOnFailure) + throws IOException { this(getCommitter(env), getFailurePolicy(env), env, name, disableIndexOnFailure); } - public IndexWriter(RegionCoprocessorEnvironment env, IndexCommitter indexCommitter, String name, boolean disableIndexOnFailure) throws IOException { + + public IndexWriter(RegionCoprocessorEnvironment env, IndexCommitter indexCommitter, String name, + boolean disableIndexOnFailure) throws IOException { this(indexCommitter, getFailurePolicy(env), env, name, disableIndexOnFailure); } public static IndexCommitter getCommitter(RegionCoprocessorEnvironment env) throws IOException { - return getCommitter(env,TrackingParallelWriterIndexCommitter.class); + return getCommitter(env, TrackingParallelWriterIndexCommitter.class); } - - public static IndexCommitter getCommitter(RegionCoprocessorEnvironment env, Class defaultClass) throws IOException { + + public static IndexCommitter getCommitter(RegionCoprocessorEnvironment env, + Class defaultClass) throws IOException { Configuration conf = env.getConfiguration(); try { IndexCommitter committer = - conf.getClass(INDEX_COMMITTER_CONF_KEY, defaultClass, - IndexCommitter.class).newInstance(); + conf.getClass(INDEX_COMMITTER_CONF_KEY, defaultClass, IndexCommitter.class).newInstance(); return committer; } catch (InstantiationException e) { throw new IOException(e); @@ -94,12 +97,11 @@ public static IndexCommitter getCommitter(RegionCoprocessorEnvironment env, Clas } public static IndexFailurePolicy getFailurePolicy(RegionCoprocessorEnvironment env) - throws IOException { + throws IOException { Configuration conf = env.getConfiguration(); try { - IndexFailurePolicy committer = - conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, PhoenixIndexFailurePolicy.class, - IndexFailurePolicy.class).newInstance(); + IndexFailurePolicy committer = conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, + PhoenixIndexFailurePolicy.class, IndexFailurePolicy.class).newInstance(); return committer; } catch (InstantiationException e) { throw new IOException(e); @@ -109,20 +111,18 @@ public static IndexFailurePolicy getFailurePolicy(RegionCoprocessorEnvironment e } public IndexWriter(IndexCommitter committer, IndexFailurePolicy policy, - RegionCoprocessorEnvironment env, String name, boolean disableIndexOnFailure) { + RegionCoprocessorEnvironment env, String name, boolean disableIndexOnFailure) { this(committer, policy); this.writer.setup(this, env, name, disableIndexOnFailure); this.failurePolicy.setup(this, env); } + /** * Directly specify the {@link IndexCommitter} and {@link IndexFailurePolicy}. Both are expected * to be fully setup before calling. - * @param committer - * @param policy - * @param env */ public IndexWriter(IndexCommitter committer, IndexFailurePolicy policy, - RegionCoprocessorEnvironment env, String name) { + RegionCoprocessorEnvironment env, String name) { this(committer, policy); this.writer.setup(this, env, name, true); this.failurePolicy.setup(this, env); @@ -132,7 +132,7 @@ public IndexWriter(IndexCommitter committer, IndexFailurePolicy policy, * Create an {@link IndexWriter} with an already setup {@link IndexCommitter} and * {@link IndexFailurePolicy}. * @param committer to write updates - * @param policy to handle failures + * @param policy to handle failures */ IndexWriter(IndexCommitter committer, IndexFailurePolicy policy) { this.writer = committer; @@ -141,11 +141,9 @@ public IndexWriter(IndexCommitter committer, IndexFailurePolicy policy, /** * see #writeAndHandleFailure(Collection). - * @param toWrite - * @throws IOException */ - public void writeAndHandleFailure(Multimap toWrite, - boolean allowLocalUpdates, int clientVersion) throws IOException { + public void writeAndHandleFailure(Multimap toWrite, + boolean allowLocalUpdates, int clientVersion) throws IOException { try { write(toWrite, allowLocalUpdates, clientVersion); if (LOGGER.isTraceEnabled()) { @@ -166,15 +164,14 @@ public void writeAndHandleFailure(Multimap t * then decides how to handle the failure. By default, we use a {@link KillServerOnFailurePolicy}, * which ensures that the server crashes when an index write fails, ensuring that we get WAL * replay of the index edits. - * @param indexUpdates Updates to write + * @param indexUpdates Updates to write * @param clientVersion version of the client - * @throws IOException */ public void writeAndHandleFailure(Collection> indexUpdates, - boolean allowLocalUpdates, int clientVersion) throws IOException { - // convert the strings to htableinterfaces to which we can talk and group by TABLE - Multimap toWrite = resolveTableReferences(indexUpdates); - writeAndHandleFailure(toWrite, allowLocalUpdates, clientVersion); + boolean allowLocalUpdates, int clientVersion) throws IOException { + // convert the strings to htableinterfaces to which we can talk and group by TABLE + Multimap toWrite = resolveTableReferences(indexUpdates); + writeAndHandleFailure(toWrite, allowLocalUpdates, clientVersion); } /** @@ -190,24 +187,24 @@ public void writeAndHandleFailure(Collection> indexUpdate * to ensure a timely recovery of the failed index writes. * @param toWrite Updates to write * @throws IndexWriteException if we cannot successfully write to the index. Whether or not we - * stop early depends on the {@link IndexCommitter}. + * stop early depends on the {@link IndexCommitter}. */ - public void write(Collection> toWrite, int clientVersion) throws IOException { - write(resolveTableReferences(toWrite), false, clientVersion); - } + public void write(Collection> toWrite, int clientVersion) + throws IOException { + write(resolveTableReferences(toWrite), false, clientVersion); + } - public void write(Collection> toWrite, boolean allowLocalUpdates, int clientVersion) throws IOException { - write(resolveTableReferences(toWrite), allowLocalUpdates, clientVersion); - } - - /** + public void write(Collection> toWrite, boolean allowLocalUpdates, + int clientVersion) throws IOException { + write(resolveTableReferences(toWrite), allowLocalUpdates, clientVersion); + } + + /** * see #write(Collection) - * @param toWrite - * @throws IndexWriteException */ - public void write(Multimap toWrite, boolean allowLocalUpdates, int clientVersion) - throws IOException { - this.writer.write(toWrite, allowLocalUpdates, clientVersion); + public void write(Multimap toWrite, boolean allowLocalUpdates, + int clientVersion) throws IOException { + this.writer.write(toWrite, allowLocalUpdates, clientVersion); } /** @@ -215,13 +212,13 @@ public void write(Multimap toWrite, boolean * @param indexUpdates from the index builder * @return pairs that can then be written by an {@link IndexWriter}. */ - protected Multimap resolveTableReferences( - Collection> indexUpdates) { - Multimap updates = ArrayListMultimap - . create(); + protected Multimap + resolveTableReferences(Collection> indexUpdates) { + Multimap updates = + ArrayListMultimap. create(); // simple map to make lookups easy while we build the map of tables to create Map tables = - new HashMap(updates.size()); + new HashMap(updates.size()); for (Pair entry : indexUpdates) { byte[] tableName = entry.getSecond(); ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName); @@ -251,4 +248,4 @@ public void stop(String why) { public boolean isStopped() { return this.stopped.get(); } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java index 01f587bd18b..aec731587c8 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,11 +26,8 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.HConstants; import org.apache.phoenix.hbase.index.table.HTableFactory; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.apache.phoenix.hbase.index.util.IndexManagementUtil; -import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.ServerUtil.ConnectionFactory; import org.apache.phoenix.util.ServerUtil.ConnectionType; import org.slf4j.Logger; @@ -42,11 +39,11 @@ public class IndexWriterUtils { /** * Maximum number of threads to allow per-table when writing. Each writer thread (from - * IndexWriterUtils#NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY) has a single HTable. - * However, each table is backed by a threadpool to manage the updates to that table. this - * specifies the number of threads to allow in each of those tables. Generally, you shouldn't need - * to change this, unless you have a small number of indexes to which most of the writes go. - * Defaults to: {@value #DEFAULT_NUM_PER_TABLE_THREADS}. + * IndexWriterUtils#NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY) has a single HTable. However, + * each table is backed by a threadpool to manage the updates to that table. this specifies the + * number of threads to allow in each of those tables. Generally, you shouldn't need to change + * this, unless you have a small number of indexes to which most of the writes go. Defaults to: + * {@value #DEFAULT_NUM_PER_TABLE_THREADS}. *

    * For tables to which there are not a lot of writes, the thread pool automatically will decrease * the number of threads to one (though it can burst up to the specified max for any given table), @@ -60,91 +57,94 @@ public class IndexWriterUtils { * coprocesor hooks, so we can't modify this behavior. */ public static final String INDEX_WRITER_PER_TABLE_THREADS_CONF_KEY = - "index.writer.threads.pertable.max"; + "index.writer.threads.pertable.max"; public static final int DEFAULT_NUM_PER_TABLE_THREADS = Integer.MAX_VALUE; /** Configuration key that HBase uses to set the max number of threads for an HTable */ public static final String HTABLE_THREAD_KEY = "hbase.htable.threads.max"; - public static final String INDEX_WRITES_THREAD_MAX_PER_REGIONSERVER_KEY = "phoenix.index.writes.threads.max"; - public static final String HTABLE_KEEP_ALIVE_KEY = "hbase.htable.threads.keepalivetime"; - - @Deprecated - public static final String INDEX_WRITER_RPC_RETRIES_NUMBER = "phoenix.index.writes.rpc.retries.number"; - /** - * Based on the logic in HBase's AsyncProcess, a default of 11 retries with a pause of 100ms - * approximates 48 sec total retry time (factoring in backoffs). The total time should be less - * than HBase's rpc timeout (default of 60 sec) or else the client will retry before receiving - * the response - */ - @Deprecated - public static final int DEFAULT_INDEX_WRITER_RPC_RETRIES_NUMBER = 11; - @Deprecated - public static final String INDEX_WRITER_RPC_PAUSE = "phoenix.index.writes.rpc.pause"; - @Deprecated - public static final int DEFAULT_INDEX_WRITER_RPC_PAUSE = 100; + public static final String INDEX_WRITES_THREAD_MAX_PER_REGIONSERVER_KEY = + "phoenix.index.writes.threads.max"; + public static final String HTABLE_KEEP_ALIVE_KEY = "hbase.htable.threads.keepalivetime"; + + @Deprecated + public static final String INDEX_WRITER_RPC_RETRIES_NUMBER = + "phoenix.index.writes.rpc.retries.number"; + /** + * Based on the logic in HBase's AsyncProcess, a default of 11 retries with a pause of 100ms + * approximates 48 sec total retry time (factoring in backoffs). The total time should be less + * than HBase's rpc timeout (default of 60 sec) or else the client will retry before receiving the + * response + */ + @Deprecated + public static final int DEFAULT_INDEX_WRITER_RPC_RETRIES_NUMBER = 11; + @Deprecated + public static final String INDEX_WRITER_RPC_PAUSE = "phoenix.index.writes.rpc.pause"; + @Deprecated + public static final int DEFAULT_INDEX_WRITER_RPC_PAUSE = 100; private IndexWriterUtils() { // private ctor for utilites } - public static HTableFactory getDefaultDelegateHTableFactory(RegionCoprocessorEnvironment env) { - return new CoprocessorHConnectionTableFactory(env, - ConnectionType.INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS); + public static HTableFactory getDefaultDelegateHTableFactory(RegionCoprocessorEnvironment env) { + return new CoprocessorHConnectionTableFactory(env, + ConnectionType.INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS); + } + + /** + * Retry server-server index write rpc only once, and let the client retry the data write instead + * to avoid tying up the handler + */ + public static HTableFactory getNoRetriesHTableFactory(RegionCoprocessorEnvironment env) { + return new CoprocessorHConnectionTableFactory(env, + ConnectionType.INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS_NO_RETRIES); + } + + /** + * {@code HTableFactory} that creates HTables by using a {@link CoprocessorHConnection} This + * factory was added as a workaround to the bug reported in + * https://issues.apache.org/jira/browse/HBASE-18359 + */ + public static class CoprocessorHConnectionTableFactory implements HTableFactory { + @GuardedBy("CoprocessorHConnectionTableFactory.this") + private RegionCoprocessorEnvironment env; + private ConnectionType connectionType; + + CoprocessorHConnectionTableFactory(RegionCoprocessorEnvironment env, + ConnectionType connectionType) { + this.env = env; + this.connectionType = connectionType; } - /** - * Retry server-server index write rpc only once, and let the client retry the data write - * instead to avoid tying up the handler - */ - public static HTableFactory getNoRetriesHTableFactory(RegionCoprocessorEnvironment env) { - return new CoprocessorHConnectionTableFactory(env, - ConnectionType.INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS_NO_RETRIES); + public Connection getConnection() throws IOException { + return ConnectionFactory.getConnection(connectionType, env); } - /** - * {@code HTableFactory} that creates HTables by using a {@link CoprocessorHConnection} This - * factory was added as a workaround to the bug reported in - * https://issues.apache.org/jira/browse/HBASE-18359 - */ - public static class CoprocessorHConnectionTableFactory implements HTableFactory { - @GuardedBy("CoprocessorHConnectionTableFactory.this") - private RegionCoprocessorEnvironment env; - private ConnectionType connectionType; - - CoprocessorHConnectionTableFactory(RegionCoprocessorEnvironment env, ConnectionType connectionType) { - this.env = env; - this.connectionType = connectionType; - } + @Override + public Table getTable(ImmutableBytesPtr tablename) throws IOException { + return getTable(tablename, null); + } - public Connection getConnection() throws IOException { - return ConnectionFactory.getConnection(connectionType, env); - } - @Override - public Table getTable(ImmutableBytesPtr tablename) throws IOException { - return getTable(tablename, null); - } + @Override + public synchronized void shutdown() { + // We need not close the cached connections as they are shared across the server. + } - @Override - public synchronized void shutdown() { - // We need not close the cached connections as they are shared across the server. + @Override + public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException { + Connection connection = null; + try { + connection = getConnection(); + if (pool == null) { + return connection.getTable(TableName.valueOf(tablename.copyBytesIfNecessary())); } - - @Override - public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool) - throws IOException { - Connection connection = null; - try { - connection = getConnection(); - if (pool == null) { - return connection.getTable(TableName.valueOf(tablename.copyBytesIfNecessary())); - } - return connection.getTable(TableName.valueOf(tablename.copyBytesIfNecessary()), pool); - } catch (IllegalArgumentException e) { - if (connection == null || connection.isClosed()) { - throw new IOException("Connection is null or closed. Please retry again."); - } - throw e; - } + return connection.getTable(TableName.valueOf(tablename.copyBytesIfNecessary()), pool); + } catch (IllegalArgumentException e) { + if (connection == null || connection.isClosed()) { + throw new IOException("Connection is null or closed. Please retry again."); } + throw e; + } } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java index efb7a742c77..962d498e4ec 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,11 +24,10 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.phoenix.hbase.index.builder.FatalIndexBuildingFailureException; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; - /** * Naive failure policy - kills the server on which it resides */ @@ -57,15 +56,15 @@ public boolean isStopped() { } @Override - public void - handleFailure(Multimap attempted, Exception cause) throws IOException{ + public void handleFailure(Multimap attempted, Exception cause) + throws IOException { // cleanup resources this.stop("Killing ourselves because of an error:" + cause); // notify the regionserver of the failure String msg = - "Could not update the index table, killing server region because couldn't write to an index table"; + "Could not update the index table, killing server region because couldn't write to an index table"; LOGGER.error(msg, cause); - throw new FatalIndexBuildingFailureException(msg,cause); + throw new FatalIndexBuildingFailureException(msg, cause); } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/LazyParallelWriterIndexCommitter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/LazyParallelWriterIndexCommitter.java index 4ed6925311c..ddbf9f003e8 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/LazyParallelWriterIndexCommitter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/LazyParallelWriterIndexCommitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,17 +19,15 @@ /** * Like the {@link ParallelWriterIndexCommitter}, but does not block - * - * */ public class LazyParallelWriterIndexCommitter extends AbstractParallelWriterIndexCommitter { - // for testing - public LazyParallelWriterIndexCommitter(String hbaseVersion) { - super(hbaseVersion); - } + // for testing + public LazyParallelWriterIndexCommitter(String hbaseVersion) { + super(hbaseVersion); + } - public LazyParallelWriterIndexCommitter() { - super(); - } + public LazyParallelWriterIndexCommitter() { + super(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/LeaveIndexActiveFailurePolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/LeaveIndexActiveFailurePolicy.java index 7564fd9cc9d..6feab40960c 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/LeaveIndexActiveFailurePolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/LeaveIndexActiveFailurePolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,40 +23,37 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; -import org.apache.phoenix.util.ServerUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; +import org.apache.phoenix.util.ServerUtil; /** - * - * Implementation of IndexFailurePolicy which takes no action when an - * index cannot be updated. As with the standard flow of control, an - * exception will still be thrown back to the client. Using this failure - * policy means that the action to take upon failure is completely up - * to the client. - * + * Implementation of IndexFailurePolicy which takes no action when an index cannot be updated. As + * with the standard flow of control, an exception will still be thrown back to the client. Using + * this failure policy means that the action to take upon failure is completely up to the client. */ public class LeaveIndexActiveFailurePolicy implements IndexFailurePolicy { - @Override - public boolean isStopped() { - return false; - } - - @Override - public void stop(String arg0) { - } - - @Override - public void setup(Stoppable parent, RegionCoprocessorEnvironment env) { - } - - @Override - public void handleFailure(Multimap attempted, Exception cause) - throws IOException { - // get timestamp of first cell - long ts = attempted.values().iterator().next().getFamilyCellMap().values().iterator().next().get(0).getTimestamp(); - throw ServerUtil.wrapInDoNotRetryIOException("Unable to update the following indexes: " + attempted.keySet(), cause, ts); - } + @Override + public boolean isStopped() { + return false; + } + + @Override + public void stop(String arg0) { + } + + @Override + public void setup(Stoppable parent, RegionCoprocessorEnvironment env) { + } + + @Override + public void handleFailure(Multimap attempted, Exception cause) + throws IOException { + // get timestamp of first cell + long ts = attempted.values().iterator().next().getFamilyCellMap().values().iterator().next() + .get(0).getTimestamp(); + throw ServerUtil.wrapInDoNotRetryIOException( + "Unable to update the following indexes: " + attempted.keySet(), cause, ts); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java index 46b44aa2e5a..1c3c728ad9a 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.hbase.index.write; @@ -15,47 +23,46 @@ import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException; import org.apache.phoenix.hbase.index.parallel.EarlyExitFailure; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; - /** - * Write index updates to the index tables in parallel. We attempt to early exit from the writes if any of the index - * updates fails. Completion is determined by the following criteria: * + * Write index updates to the index tables in parallel. We attempt to early exit from the writes if + * any of the index updates fails. Completion is determined by the following criteria: * *

      *
    1. All index writes have returned, OR
    2. *
    3. Any single index write has failed
    4. *
    - * We attempt to quickly determine if any write has failed and not write to the remaining indexes to ensure a timely - * recovery of the failed index writes. + * We attempt to quickly determine if any write has failed and not write to the remaining indexes to + * ensure a timely recovery of the failed index writes. */ public class ParallelWriterIndexCommitter extends AbstractParallelWriterIndexCommitter { - private static final Logger LOGGER = LoggerFactory.getLogger(ParallelWriterIndexCommitter.class); + private static final Logger LOGGER = LoggerFactory.getLogger(ParallelWriterIndexCommitter.class); + public ParallelWriterIndexCommitter() { + } - public ParallelWriterIndexCommitter() {} - - // For testing - public ParallelWriterIndexCommitter(String hbaseVersion) { - super(hbaseVersion); - } + // For testing + public ParallelWriterIndexCommitter(String hbaseVersion) { + super(hbaseVersion); + } + @Override + public void write(Multimap toWrite, + final boolean allowLocalUpdates, final int clientVersion) + throws SingleIndexWriteFailureException { - - @Override - public void write(Multimap toWrite, final boolean allowLocalUpdates, final int clientVersion) throws SingleIndexWriteFailureException { - - super.write(toWrite, allowLocalUpdates, clientVersion); - // actually submit the tasks to the pool and wait for them to finish/fail - try { - pool.submitUninterruptible(tasks); - } catch (EarlyExitFailure e) { - propagateFailure(e); - } catch (ExecutionException e) { - LOGGER.error("Found a failed index update!"); - propagateFailure(e.getCause()); - } - + super.write(toWrite, allowLocalUpdates, clientVersion); + // actually submit the tasks to the pool and wait for them to finish/fail + try { + pool.submitUninterruptible(tasks); + } catch (EarlyExitFailure e) { + propagateFailure(e); + } catch (ExecutionException e) { + LOGGER.error("Found a failed index update!"); + propagateFailure(e.getCause()); } -} \ No newline at end of file + + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java index 4076053be30..25d0ebeca43 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,117 +34,110 @@ import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Used to recover failed index edits during WAL replay *

    - * We attempt to do the index updates in parallel using a backing threadpool. All threads are daemon threads, so it will - * not block the region from shutting down. + * We attempt to do the index updates in parallel using a backing threadpool. All threads are daemon + * threads, so it will not block the region from shutting down. */ public class RecoveryIndexWriter extends IndexWriter { - private static final Logger LOGGER = LoggerFactory.getLogger(RecoveryIndexWriter.class); - private Set nonExistingTablesList = new HashSet(); - private Admin admin; + private static final Logger LOGGER = LoggerFactory.getLogger(RecoveryIndexWriter.class); + private Set nonExistingTablesList = + new HashSet(); + private Admin admin; - /** - * Directly specify the {@link IndexCommitter} and {@link IndexFailurePolicy}. Both are expected to be fully setup - * before calling. - * - * @param policy - * @param env - * @param name - * @throws IOException - */ - public RecoveryIndexWriter(IndexFailurePolicy policy, RegionCoprocessorEnvironment env, String name) - throws IOException { - super(new TrackingParallelWriterIndexCommitter(), policy, env, name); - Connection hConn = null; - try { - hConn = ConnectionFactory.createConnection(env.getConfiguration()); - this.admin = hConn.getAdmin(); - } catch (Exception e) { - // Close the connection only if an exception occurs - if (hConn != null) { - hConn.close(); - } - throw e; - } + /** + * Directly specify the {@link IndexCommitter} and {@link IndexFailurePolicy}. Both are expected + * to be fully setup before calling. + */ + public RecoveryIndexWriter(IndexFailurePolicy policy, RegionCoprocessorEnvironment env, + String name) throws IOException { + super(new TrackingParallelWriterIndexCommitter(), policy, env, name); + Connection hConn = null; + try { + hConn = ConnectionFactory.createConnection(env.getConfiguration()); + this.admin = hConn.getAdmin(); + } catch (Exception e) { + // Close the connection only if an exception occurs + if (hConn != null) { + hConn.close(); + } + throw e; } + } - @Override - public void write(Collection> toWrite, boolean allowLocalUpdates, int clientVersion) throws IOException { - try { - write(resolveTableReferences(toWrite), allowLocalUpdates, clientVersion); - } catch (MultiIndexWriteFailureException e) { - for (HTableInterfaceReference table : e.getFailedTables()) { - if (!admin.tableExists(TableName.valueOf(table.getTableName()))) { - LOGGER.warn("Failure due to non existing table: " + table.getTableName()); - nonExistingTablesList.add(table); - } else { - throw e; - } - } + @Override + public void write(Collection> toWrite, boolean allowLocalUpdates, + int clientVersion) throws IOException { + try { + write(resolveTableReferences(toWrite), allowLocalUpdates, clientVersion); + } catch (MultiIndexWriteFailureException e) { + for (HTableInterfaceReference table : e.getFailedTables()) { + if (!admin.tableExists(TableName.valueOf(table.getTableName()))) { + LOGGER.warn("Failure due to non existing table: " + table.getTableName()); + nonExistingTablesList.add(table); + } else { + throw e; } + } } + } - /** - * Convert the passed index updates to {@link HTableInterfaceReference}s. - * - * @param indexUpdates - * from the index builder - * @return pairs that can then be written by an {@link RecoveryIndexWriter}. - */ - @Override - protected Multimap resolveTableReferences( - Collection> indexUpdates) { - Multimap updates = ArrayListMultimap - . create(); + /** + * Convert the passed index updates to {@link HTableInterfaceReference}s. from the index builder + * @return pairs that can then be written by an {@link RecoveryIndexWriter}. + */ + @Override + protected Multimap + resolveTableReferences(Collection> indexUpdates) { + Multimap updates = + ArrayListMultimap. create(); - // simple map to make lookups easy while we build the map of tables to create - Map tables = new HashMap( - updates.size()); - for (Pair entry : indexUpdates) { - byte[] tableName = entry.getSecond(); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName); - HTableInterfaceReference table = tables.get(ptr); - if (nonExistingTablesList.contains(table)) { - LOGGER.debug("Edits found for non existing table: " + - table.getTableName() + " so skipping it!!"); - continue; - } - if (table == null) { - table = new HTableInterfaceReference(ptr); - tables.put(ptr, table); - } - updates.put(table, entry.getFirst()); + // simple map to make lookups easy while we build the map of tables to create + Map tables = + new HashMap(updates.size()); + for (Pair entry : indexUpdates) { + byte[] tableName = entry.getSecond(); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName); + HTableInterfaceReference table = tables.get(ptr); + if (nonExistingTablesList.contains(table)) { + LOGGER.debug( + "Edits found for non existing table: " + table.getTableName() + " so skipping it!!"); + continue; + } + if (table == null) { + table = new HTableInterfaceReference(ptr); + tables.put(ptr, table); + } + updates.put(table, entry.getFirst()); - } - return updates; } + return updates; + } - @Override - public void stop(String why) { - super.stop(why); - if (admin != null) { - if (admin.getConnection() != null) { - try { - admin.getConnection().close(); - } catch (IOException e) { - LOGGER.error("Closing the connection failed: ", e); - } - } - try { - admin.close(); - } catch (IOException e) { - LOGGER.error("Closing the admin failed: ", e); - } + @Override + public void stop(String why) { + super.stop(why); + if (admin != null) { + if (admin.getConnection() != null) { + try { + admin.getConnection().close(); + } catch (IOException e) { + LOGGER.error("Closing the connection failed: ", e); } + } + try { + admin.close(); + } catch (IOException e) { + LOGGER.error("Closing the admin failed: ", e); + } } - -} \ No newline at end of file + } + +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java index 569ecdca8f1..ddd9bc8108d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java @@ -1,14 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.hbase.index.write; +import static org.apache.phoenix.util.ServerUtil.wrapInDoNotRetryIOException; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -41,262 +51,278 @@ import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.KeyValueBuilder; import org.apache.phoenix.index.PhoenixIndexFailurePolicy; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.ServerIndexUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; -import static org.apache.phoenix.util.ServerUtil.wrapInDoNotRetryIOException; - /** - * Like the {@link ParallelWriterIndexCommitter}, but blocks until all writes have attempted to allow the caller to - * retrieve the failed and succeeded index updates. Therefore, this class will be a lot slower, in the face of failures, - * when compared to the {@link ParallelWriterIndexCommitter} (though as fast for writes), so it should be used only when - * you need to at least attempt all writes and know their result; for instance, this is fine for doing WAL recovery - - * it's not a performance intensive situation and we want to limit the the edits we need to retry. + * Like the {@link ParallelWriterIndexCommitter}, but blocks until all writes have attempted to + * allow the caller to retrieve the failed and succeeded index updates. Therefore, this class will + * be a lot slower, in the face of failures, when compared to the + * {@link ParallelWriterIndexCommitter} (though as fast for writes), so it should be used only when + * you need to at least attempt all writes and know their result; for instance, this is fine for + * doing WAL recovery - it's not a performance intensive situation and we want to limit the the + * edits we need to retry. *

    - * On failure to #write(Multimap), we return a MultiIndexWriteFailureException that contains the list of - * {@link HTableInterfaceReference} that didn't complete successfully. + * On failure to #write(Multimap), we return a MultiIndexWriteFailureException that contains the + * list of {@link HTableInterfaceReference} that didn't complete successfully. *

    * Failures to write to the index can happen several different ways: *

      - *
    1. this is {@link #stop(String) stopped} or aborted (via the passed {@link Abortable}. This causing any - * pending tasks to fail whatever they are doing as fast as possible. Any writes that have not begun are not even - * attempted and marked as failures.
    2. - *
    3. A batch write fails. This is the generic HBase write failure - it may occur because the index table is not - * available, .META. or -ROOT- is unavailable, or any other (of many) possible HBase exceptions.
    4. + *
    5. this is {@link #stop(String) stopped} or aborted (via the passed {@link Abortable}. + * This causing any pending tasks to fail whatever they are doing as fast as possible. Any writes + * that have not begun are not even attempted and marked as failures.
    6. + *
    7. A batch write fails. This is the generic HBase write failure - it may occur because the index + * table is not available, .META. or -ROOT- is unavailable, or any other (of many) possible HBase + * exceptions.
    8. *
    - * Regardless of how the write fails, we still wait for all writes to complete before passing the failure back to the - * client. + * Regardless of how the write fails, we still wait for all writes to complete before passing the + * failure back to the client. */ public class TrackingParallelWriterIndexCommitter implements IndexCommitter { - private static final Logger LOGGER = - LoggerFactory.getLogger(TrackingParallelWriterIndexCommitter.class); + private static final Logger LOGGER = + LoggerFactory.getLogger(TrackingParallelWriterIndexCommitter.class); - public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "phoenix.index.writer.threads.max"; - private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10; - private static final String INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY = "phoenix.index.writer.threads.keepalivetime"; + public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = + "phoenix.index.writer.threads.max"; + private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10; + private static final String INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY = + "phoenix.index.writer.threads.keepalivetime"; - private TaskRunner pool; - private HTableFactory retryingFactory; - private HTableFactory noRetriesFactory; - private Stoppable stopped; - private RegionCoprocessorEnvironment env; - private KeyValueBuilder kvBuilder; - protected boolean disableIndexOnFailure = false; + private TaskRunner pool; + private HTableFactory retryingFactory; + private HTableFactory noRetriesFactory; + private Stoppable stopped; + private RegionCoprocessorEnvironment env; + private KeyValueBuilder kvBuilder; + protected boolean disableIndexOnFailure = false; - // This relies on Hadoop Configuration to handle warning about deprecated configs and - // to set the correct non-deprecated configs when an old one shows up. - static { - Configuration.addDeprecation("index.writer.threads.max", NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY); - Configuration.addDeprecation("index.writer.threads.keepalivetime", INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY); - } + // This relies on Hadoop Configuration to handle warning about deprecated configs and + // to set the correct non-deprecated configs when an old one shows up. + static { + Configuration.addDeprecation("index.writer.threads.max", + NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY); + Configuration.addDeprecation("index.writer.threads.keepalivetime", + INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY); + } - // for testing - public TrackingParallelWriterIndexCommitter(String hbaseVersion) { - kvBuilder = KeyValueBuilder.get(hbaseVersion); - } + // for testing + public TrackingParallelWriterIndexCommitter(String hbaseVersion) { + kvBuilder = KeyValueBuilder.get(hbaseVersion); + } - public TrackingParallelWriterIndexCommitter() { - } + public TrackingParallelWriterIndexCommitter() { + } - @Override - public void setup(IndexWriter parent, RegionCoprocessorEnvironment env, String name, boolean disableIndexOnFailure) { - this.env = env; - this.disableIndexOnFailure = disableIndexOnFailure; - Configuration conf = env.getConfiguration(); - setup(IndexWriterUtils.getDefaultDelegateHTableFactory(env), - ThreadPoolManager.getExecutor( - new ThreadPoolBuilder(name, conf).setMaxThread(NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY, - DEFAULT_CONCURRENT_INDEX_WRITER_THREADS).setCoreTimeout( - INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env), parent, env); - this.kvBuilder = KeyValueBuilder.get(env.getHBaseVersion()); - } - - /** - * Setup this. - *

    - * Exposed for TESTING - */ - void setup(HTableFactory factory, ExecutorService pool, Stoppable stop, - RegionCoprocessorEnvironment env) { - this.pool = new WaitForCompletionTaskRunner(pool); - this.retryingFactory = factory; - this.noRetriesFactory = IndexWriterUtils.getNoRetriesHTableFactory(env); - this.stopped = stop; - this.env = env; - } - - @Override - public void write(Multimap toWrite, final boolean allowLocalUpdates, final int clientVersion) throws IOException { - Set>> entries = toWrite.asMap().entrySet(); - TaskBatch tasks = new TaskBatch(entries.size()); - List tables = new ArrayList(entries.size()); - for (Entry> entry : entries) { - // get the mutations for each table. We leak the implementation here a little bit to save - // doing a complete copy over of all the index update for each table. - final List mutations = kvBuilder.cloneIfNecessary((List)entry.getValue()); - // track each reference so we can get at it easily later, when determing failures - final HTableInterfaceReference tableReference = entry.getKey(); - final RegionCoprocessorEnvironment env = this.env; - if (env != null - && !allowLocalUpdates - && tableReference.getTableName().equals( - env.getRegion().getTableDescriptor().getTableName().getNameAsString())) { - continue; - } - tables.add(tableReference); + @Override + public void setup(IndexWriter parent, RegionCoprocessorEnvironment env, String name, + boolean disableIndexOnFailure) { + this.env = env; + this.disableIndexOnFailure = disableIndexOnFailure; + Configuration conf = env.getConfiguration(); + setup(IndexWriterUtils.getDefaultDelegateHTableFactory(env), + ThreadPoolManager.getExecutor(new ThreadPoolBuilder(name, conf) + .setMaxThread(NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY, + DEFAULT_CONCURRENT_INDEX_WRITER_THREADS) + .setCoreTimeout(INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env), + parent, env); + this.kvBuilder = KeyValueBuilder.get(env.getHBaseVersion()); + } - /* - * Write a batch of index updates to an index table. This operation stops (is cancelable) via two - * mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the running thread. - * The former will only work if we are not in the midst of writing the current batch to the table, though we - * do check these status variables before starting and before writing the batch. The latter usage, - * interrupting the thread, will work in the previous situations as was at some points while writing the - * batch, depending on the underlying writer implementation (HTableInterface#batch is blocking, but doesn't - * elaborate when is supports an interrupt). - */ - tasks.add(new Task() { + /** + * Setup this. + *

    + * Exposed for TESTING + */ + void setup(HTableFactory factory, ExecutorService pool, Stoppable stop, + RegionCoprocessorEnvironment env) { + this.pool = new WaitForCompletionTaskRunner(pool); + this.retryingFactory = factory; + this.noRetriesFactory = IndexWriterUtils.getNoRetriesHTableFactory(env); + this.stopped = stop; + this.env = env; + } - /** - * Do the actual write to the primary table. - */ - @SuppressWarnings("deprecation") - @Override - public Boolean call() throws Exception { - try { - // this may have been queued, but there was an abort/stop so we try to early exit - throwFailureIfDone(); - if (allowLocalUpdates - && env != null - && tableReference.getTableName().equals( - env.getRegion().getTableDescriptor().getTableName().getNameAsString())) { - try { - throwFailureIfDone(); - ServerIndexUtil.writeLocalUpdates(env.getRegion(), mutations, true); - return Boolean.TRUE; - } catch (IOException ignord) { - // when it's failed we fall back to the standard & slow way - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("indexRegion.batchMutate failed and fall " + - "back to HTable.batch(). Got error=" + ignord); - } - } - } + @Override + public void write(Multimap toWrite, + final boolean allowLocalUpdates, final int clientVersion) throws IOException { + Set>> entries = toWrite.asMap().entrySet(); + TaskBatch tasks = new TaskBatch(entries.size()); + List tables = new ArrayList(entries.size()); + for (Entry> entry : entries) { + // get the mutations for each table. We leak the implementation here a little bit to save + // doing a complete copy over of all the index update for each table. + final List mutations = + kvBuilder.cloneIfNecessary((List) entry.getValue()); + // track each reference so we can get at it easily later, when determing failures + final HTableInterfaceReference tableReference = entry.getKey(); + final RegionCoprocessorEnvironment env = this.env; + if ( + env != null && !allowLocalUpdates + && tableReference.getTableName() + .equals(env.getRegion().getTableDescriptor().getTableName().getNameAsString()) + ) { + continue; + } + tables.add(tableReference); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Writing index update:" + mutations + " to table: " - + tableReference); - } - // if the client can retry index writes, then we don't need to retry here - HTableFactory factory; - if (disableIndexOnFailure) { - factory = clientVersion < MetaDataProtocol.MIN_CLIENT_RETRY_INDEX_WRITES ? retryingFactory : noRetriesFactory; - } - else { - factory = retryingFactory; - } - try (Table table = factory.getTable(tableReference.get())) { - throwFailureIfDone(); - table.batch(mutations, null); - } - } catch (InterruptedException e) { - // reset the interrupt status on the thread - Thread.currentThread().interrupt(); - throw e; - } catch (Exception e) { - throw e; - } - return Boolean.TRUE; - } - - private void throwFailureIfDone() throws SingleIndexWriteFailureException { - if (stopped.isStopped() - || (env != null && (env.getConnection() == null || env.getConnection().isClosed() - || env.getConnection().isAborted())) - || Thread.currentThread().isInterrupted()) { throw new SingleIndexWriteFailureException( - "Pool closed, not attempting to write to the index!", null); } + /* + * Write a batch of index updates to an index table. This operation stops (is cancelable) via + * two mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the + * running thread. The former will only work if we are not in the midst of writing the current + * batch to the table, though we do check these status variables before starting and before + * writing the batch. The latter usage, interrupting the thread, will work in the previous + * situations as was at some points while writing the batch, depending on the underlying + * writer implementation (HTableInterface#batch is blocking, but doesn't elaborate when is + * supports an interrupt). + */ + tasks.add(new Task() { + /** + * Do the actual write to the primary table. + */ + @SuppressWarnings("deprecation") + @Override + public Boolean call() throws Exception { + try { + // this may have been queued, but there was an abort/stop so we try to early exit + throwFailureIfDone(); + if ( + allowLocalUpdates && env != null + && tableReference.getTableName() + .equals(env.getRegion().getTableDescriptor().getTableName().getNameAsString()) + ) { + try { + throwFailureIfDone(); + ServerIndexUtil.writeLocalUpdates(env.getRegion(), mutations, true); + return Boolean.TRUE; + } catch (IOException ignord) { + // when it's failed we fall back to the standard & slow way + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("indexRegion.batchMutate failed and fall " + + "back to HTable.batch(). Got error=" + ignord); } - }); - } - - Pair, List>> resultsAndFutures = null; - try { - LOGGER.debug("Waiting on index update tasks to complete..."); - resultsAndFutures = this.pool.submitUninterruptible(tasks); - } catch (ExecutionException e) { - throw new RuntimeException("Should not fail on the results while using a WaitForCompletionTaskRunner", e); - } catch (EarlyExitFailure e) { - throw new RuntimeException("Stopped while waiting for batch, quiting!", e); - } - - // track the failures. We only ever access this on return from our calls, so no extra - // synchronization is needed. We could update all the failures as we find them, but that add a - // lot of locking overhead, and just doing the copy later is about as efficient. - List failedTables = new ArrayList(); - List> failedFutures = new ArrayList<>(); - int index = 0; - for (Boolean result : resultsAndFutures.getFirst()) { - // there was a failure - if (result == null) { - // we know which table failed by the index of the result - failedTables.add(tables.get(index)); - failedFutures.add(resultsAndFutures.getSecond().get(index)); + } } - index++; - } - // if any of the tasks failed, then we need to propagate the failure - if (failedTables.size() > 0) { - Throwable cause = logFailedTasksAndGetCause(failedFutures, failedTables); - // make the list unmodifiable to avoid any more synchronization concerns - MultiIndexWriteFailureException exception = null; - // DisableIndexOnFailure flag is used by the old design. Setting the cause in MIWFE - // does not work for old design, so only do this for new design + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Writing index update:" + mutations + " to table: " + tableReference); + } + // if the client can retry index writes, then we don't need to retry here + HTableFactory factory; if (disableIndexOnFailure) { - exception = new MultiIndexWriteFailureException(Collections.unmodifiableList(failedTables), - disableIndexOnFailure && PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env)); - throw exception; + factory = clientVersion < MetaDataProtocol.MIN_CLIENT_RETRY_INDEX_WRITES + ? retryingFactory + : noRetriesFactory; } else { - exception = new MultiIndexWriteFailureException(Collections.unmodifiableList(failedTables), - false, cause); - throw wrapInDoNotRetryIOException("At least one index write failed after retries", exception, - EnvironmentEdgeManager.currentTimeMillis()); + factory = retryingFactory; } + try (Table table = factory.getTable(tableReference.get())) { + throwFailureIfDone(); + table.batch(mutations, null); + } + } catch (InterruptedException e) { + // reset the interrupt status on the thread + Thread.currentThread().interrupt(); + throw e; + } catch (Exception e) { + throw e; + } + return Boolean.TRUE; } - return; - } - private Throwable logFailedTasksAndGetCause(List> failedFutures, - List failedTables) { - int i = 0; - Throwable t = null; - for (Future future : failedFutures) { - try { - future.get(); - } catch (InterruptedException | ExecutionException e) { - LOGGER.warn("Index Write failed for table " + failedTables.get(i), e); - if (t == null) { - t = e; - } - } - i++; + private void throwFailureIfDone() throws SingleIndexWriteFailureException { + if ( + stopped.isStopped() || (env != null && (env.getConnection() == null + || env.getConnection().isClosed() || env.getConnection().isAborted())) + || Thread.currentThread().isInterrupted() + ) { + throw new SingleIndexWriteFailureException( + "Pool closed, not attempting to write to the index!", null); + } + } - return t; + }); + } + + Pair, List>> resultsAndFutures = null; + try { + LOGGER.debug("Waiting on index update tasks to complete..."); + resultsAndFutures = this.pool.submitUninterruptible(tasks); + } catch (ExecutionException e) { + throw new RuntimeException( + "Should not fail on the results while using a WaitForCompletionTaskRunner", e); + } catch (EarlyExitFailure e) { + throw new RuntimeException("Stopped while waiting for batch, quiting!", e); + } + + // track the failures. We only ever access this on return from our calls, so no extra + // synchronization is needed. We could update all the failures as we find them, but that add a + // lot of locking overhead, and just doing the copy later is about as efficient. + List failedTables = new ArrayList(); + List> failedFutures = new ArrayList<>(); + int index = 0; + for (Boolean result : resultsAndFutures.getFirst()) { + // there was a failure + if (result == null) { + // we know which table failed by the index of the result + failedTables.add(tables.get(index)); + failedFutures.add(resultsAndFutures.getSecond().get(index)); + } + index++; } - @Override - public void stop(String why) { - LOGGER.info("Shutting down " + this.getClass().getSimpleName()); - this.pool.stop(why); - this.retryingFactory.shutdown(); - this.noRetriesFactory.shutdown(); + // if any of the tasks failed, then we need to propagate the failure + if (failedTables.size() > 0) { + Throwable cause = logFailedTasksAndGetCause(failedFutures, failedTables); + // make the list unmodifiable to avoid any more synchronization concerns + MultiIndexWriteFailureException exception = null; + // DisableIndexOnFailure flag is used by the old design. Setting the cause in MIWFE + // does not work for old design, so only do this for new design + if (disableIndexOnFailure) { + exception = new MultiIndexWriteFailureException(Collections.unmodifiableList(failedTables), + disableIndexOnFailure && PhoenixIndexFailurePolicy.getDisableIndexOnFailure(env)); + throw exception; + } else { + exception = new MultiIndexWriteFailureException(Collections.unmodifiableList(failedTables), + false, cause); + throw wrapInDoNotRetryIOException("At least one index write failed after retries", + exception, EnvironmentEdgeManager.currentTimeMillis()); + } } + return; + } - @Override - public boolean isStopped() { - return this.stopped.isStopped(); + private Throwable logFailedTasksAndGetCause(List> failedFutures, + List failedTables) { + int i = 0; + Throwable t = null; + for (Future future : failedFutures) { + try { + future.get(); + } catch (InterruptedException | ExecutionException e) { + LOGGER.warn("Index Write failed for table " + failedTables.get(i), e); + if (t == null) { + t = e; + } + } + i++; } + return t; + } + + @Override + public void stop(String why) { + LOGGER.info("Shutting down " + this.getClass().getSimpleName()); + this.pool.stop(why); + this.retryingFactory.shutdown(); + this.noRetriesFactory.shutdown(); + } + + @Override + public boolean isStopped() { + return this.stopped.isStopped(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java index 6d52e6ded6d..17ddf5d0ed5 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/recovery/PerRegionIndexWriteCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,23 +23,18 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.regionserver.Region; - +import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; -import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; - - public class PerRegionIndexWriteCache { private Map> cache = - new HashMap>(); - + new HashMap>(); /** * Get the edits for the current region. Removes the edits from the cache. To add them back, call * #addEdits(HRegion, HTableInterfaceReference, Collection). - * @param region * @return Get the edits for the given region. Returns null if there are no pending edits * for the region */ @@ -48,12 +43,9 @@ public Multimap getEdits(Region region) { } /** - * @param region - * @param table - * @param collection */ public void addEdits(Region region, HTableInterfaceReference table, - Collection collection) { + Collection collection) { Multimap edits = cache.get(region); if (edits == null) { edits = ArrayListMultimap. create(); @@ -61,4 +53,4 @@ public void addEdits(Region region, HTableInterfaceReference table, } edits.putAll(table, collection); } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java index 16251147b90..897b80eba71 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/write/recovery/StoreFailuresInCachePolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,13 +24,12 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.regionserver.Region; - -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.write.IndexFailurePolicy; import org.apache.phoenix.hbase.index.write.KillServerOnFailurePolicy; import org.apache.phoenix.hbase.index.write.TrackingParallelWriterIndexCommitter; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; /** * Tracks any failed writes in The {@link PerRegionIndexWriteCache}, given a @@ -60,19 +59,19 @@ public void setup(Stoppable parent, RegionCoprocessorEnvironment env) { } @Override - public void handleFailure(Multimap attempted, Exception cause) throws IOException { + public void handleFailure(Multimap attempted, Exception cause) + throws IOException { // if its not an exception we can handle, let the delegate take care of it if (!(cause instanceof MultiIndexWriteFailureException)) { delegate.handleFailure(attempted, cause); } List failedTables = - ((MultiIndexWriteFailureException) cause).getFailedTables(); + ((MultiIndexWriteFailureException) cause).getFailedTables(); for (HTableInterfaceReference table : failedTables) { cache.addEdits(this.region, table, attempted.get(table)); } } - @Override public void stop(String why) { this.delegate.stop(why); @@ -82,4 +81,4 @@ public void stop(String why) { public boolean isStopped() { return this.delegate.isStopped(); } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java b/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java index 62b86771031..a5549ee4ff7 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,8 +29,8 @@ import java.sql.SQLException; import java.util.Iterator; import java.util.List; -import java.util.Random; import java.util.Optional; +import java.util.Random; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -80,605 +80,640 @@ import org.slf4j.LoggerFactory; /** - * - * Coprocessor that verifies the scanned rows of a non-transactional global index. - * - * If an index row is unverified (i.e., the row status is unverified), the following steps are taken : - * (1) We generate the data row key from the index row key, and check if the data row exists. If not, this unverified - * index row is skipped (i.e., not returned to the client), and it is deleted if it is old enough. The age check is - * necessary in order not to delete the index rows that are currently being updated. If the data row exists, - * we continue with the rest of the steps. - * (2) The index row is rebuilt from the data row. - * (3) The current scanner is closed as the newly rebuilt row will not be visible to the current scanner. - * (4) if the data row does not point back to the unverified index row (i.e., the index row key generated from the data - * row does not match with the row key of the unverified index row), this unverified row is skipped and and it is - * deleted if it is old enough. A new scanner is opened starting form the index row after this unverified index row. - * (5) if the data points back to the unverified index row then, a new scanner is opened starting form the index row. - * The next row is scanned to check if it is verified. if it is verified, it is returned to the client. If not, then - * it means the data table row timestamp is lower than than the timestamp of the unverified index row, and - * the index row that has been rebuilt from the data table row is masked by this unverified row. This happens if the - * first phase updates (i.e., unverified index row updates) complete but the second phase updates (i.e., data table - * row updates) fail. There could be back to back such events so we need to scan older versions to retrieve - * the verified version that is masked by the unverified version(s). - * + * Coprocessor that verifies the scanned rows of a non-transactional global index. If an index row + * is unverified (i.e., the row status is unverified), the following steps are taken : (1) We + * generate the data row key from the index row key, and check if the data row exists. If not, this + * unverified index row is skipped (i.e., not returned to the client), and it is deleted if it is + * old enough. The age check is necessary in order not to delete the index rows that are currently + * being updated. If the data row exists, we continue with the rest of the steps. (2) The index row + * is rebuilt from the data row. (3) The current scanner is closed as the newly rebuilt row will not + * be visible to the current scanner. (4) if the data row does not point back to the unverified + * index row (i.e., the index row key generated from the data row does not match with the row key of + * the unverified index row), this unverified row is skipped and and it is deleted if it is old + * enough. A new scanner is opened starting form the index row after this unverified index row. (5) + * if the data points back to the unverified index row then, a new scanner is opened starting form + * the index row. The next row is scanned to check if it is verified. if it is verified, it is + * returned to the client. If not, then it means the data table row timestamp is lower than than the + * timestamp of the unverified index row, and the index row that has been rebuilt from the data + * table row is masked by this unverified row. This happens if the first phase updates (i.e., + * unverified index row updates) complete but the second phase updates (i.e., data table row + * updates) fail. There could be back to back such events so we need to scan older versions to + * retrieve the verified version that is masked by the unverified version(s). */ -public class GlobalIndexChecker extends BaseScannerRegionObserver implements RegionCoprocessor{ - private static final Logger LOG = - LoggerFactory.getLogger(GlobalIndexChecker.class); - private static final String REPAIR_LOGGING_PERCENT_ATTRIB = "phoenix.index.repair.logging.percent"; - private static final double DEFAULT_REPAIR_LOGGING_PERCENT = 100; +public class GlobalIndexChecker extends BaseScannerRegionObserver implements RegionCoprocessor { + private static final Logger LOG = LoggerFactory.getLogger(GlobalIndexChecker.class); + private static final String REPAIR_LOGGING_PERCENT_ATTRIB = + "phoenix.index.repair.logging.percent"; + private static final double DEFAULT_REPAIR_LOGGING_PERCENT = 100; - private GlobalIndexCheckerSource metricsSource; - private CoprocessorEnvironment env; + private GlobalIndexCheckerSource metricsSource; + private CoprocessorEnvironment env; - public enum RebuildReturnCode { - NO_DATA_ROW(0), - NO_INDEX_ROW(1), - INDEX_ROW_EXISTS(2); - private int value; + public enum RebuildReturnCode { + NO_DATA_ROW(0), + NO_INDEX_ROW(1), + INDEX_ROW_EXISTS(2); - RebuildReturnCode(int value) { - this.value = value; - } + private int value; - public int getValue() { - return value; - } + RebuildReturnCode(int value) { + this.value = value; } - /** - * Class that verifies a given row of a non-transactional global index. - * An instance of this class is created for each scanner on an index - * and used to verify individual rows and rebuild them if they are not valid - */ - public class GlobalIndexScanner extends BaseRegionScanner { - private RegionScanner scanner; - private long ageThreshold; - private Scan scan; - private Scan indexScan; - private Scan singleRowIndexScan; - private Scan buildIndexScanForDataTable = null; - private Table dataHTable = null; - private byte[] emptyCF; - private byte[] emptyCQ; - private IndexMaintainer indexMaintainer = null; - private byte[][] viewConstants = null; - private RegionCoprocessorEnvironment env; - private Region region; - private long minTimestamp; - private long maxTimestamp; - private GlobalIndexCheckerSource metricsSource; - private long rowCount = 0; - private long pageSize = Long.MAX_VALUE; - private boolean hasMore; - private double loggingPercent; - private Random random; - private String indexName; - private long pageSizeMs; - private boolean initialized = false; - - public GlobalIndexScanner(RegionCoprocessorEnvironment env, - Scan scan, - RegionScanner scanner, - GlobalIndexCheckerSource metricsSource) throws IOException { - super(scanner); - this.env = env; - this.scan = scan; - this.scanner = scanner; - this.metricsSource = metricsSource; - - region = env.getRegion(); - emptyCF = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME); - emptyCQ = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME); - ageThreshold = env.getConfiguration().getLong( - QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, - QueryServicesOptions.DEFAULT_GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS); - minTimestamp = scan.getTimeRange().getMin(); - maxTimestamp = scan.getTimeRange().getMax(); - byte[] indexTableNameBytes = region.getRegionInfo().getTable().getName(); - this.indexName = Bytes.toString(indexTableNameBytes); - byte[] md = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD); - List maintainers = IndexMaintainer.deserialize(md, true); - indexMaintainer = getIndexMaintainer(maintainers, indexTableNameBytes); - if (indexMaintainer == null) { - throw new DoNotRetryIOException( - "repairIndexRows: IndexMaintainer is not included in scan attributes for " + - region.getRegionInfo().getTable().getNameAsString()); - } - loggingPercent = env.getConfiguration().getDouble(REPAIR_LOGGING_PERCENT_ATTRIB, - DEFAULT_REPAIR_LOGGING_PERCENT); - random = new Random(EnvironmentEdgeManager.currentTimeMillis()); - pageSizeMs = getPageSizeMsForRegionScanner(scan); - } + public int getValue() { + return value; + } + } + + /** + * Class that verifies a given row of a non-transactional global index. An instance of this class + * is created for each scanner on an index and used to verify individual rows and rebuild them if + * they are not valid + */ + public class GlobalIndexScanner extends BaseRegionScanner { + private RegionScanner scanner; + private long ageThreshold; + private Scan scan; + private Scan indexScan; + private Scan singleRowIndexScan; + private Scan buildIndexScanForDataTable = null; + private Table dataHTable = null; + private byte[] emptyCF; + private byte[] emptyCQ; + private IndexMaintainer indexMaintainer = null; + private byte[][] viewConstants = null; + private RegionCoprocessorEnvironment env; + private Region region; + private long minTimestamp; + private long maxTimestamp; + private GlobalIndexCheckerSource metricsSource; + private long rowCount = 0; + private long pageSize = Long.MAX_VALUE; + private boolean hasMore; + private double loggingPercent; + private Random random; + private String indexName; + private long pageSizeMs; + private boolean initialized = false; + + public GlobalIndexScanner(RegionCoprocessorEnvironment env, Scan scan, RegionScanner scanner, + GlobalIndexCheckerSource metricsSource) throws IOException { + super(scanner); + this.env = env; + this.scan = scan; + this.scanner = scanner; + this.metricsSource = metricsSource; + + region = env.getRegion(); + emptyCF = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME); + emptyCQ = scan.getAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME); + ageThreshold = env.getConfiguration().getLong( + QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, + QueryServicesOptions.DEFAULT_GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS); + minTimestamp = scan.getTimeRange().getMin(); + maxTimestamp = scan.getTimeRange().getMax(); + byte[] indexTableNameBytes = region.getRegionInfo().getTable().getName(); + this.indexName = Bytes.toString(indexTableNameBytes); + byte[] md = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD); + List maintainers = IndexMaintainer.deserialize(md, true); + indexMaintainer = getIndexMaintainer(maintainers, indexTableNameBytes); + if (indexMaintainer == null) { + throw new DoNotRetryIOException( + "repairIndexRows: IndexMaintainer is not included in scan attributes for " + + region.getRegionInfo().getTable().getNameAsString()); + } + loggingPercent = env.getConfiguration().getDouble(REPAIR_LOGGING_PERCENT_ATTRIB, + DEFAULT_REPAIR_LOGGING_PERCENT); + random = new Random(EnvironmentEdgeManager.currentTimeMillis()); + pageSizeMs = getPageSizeMsForRegionScanner(scan); + } - @Override - public int getBatch() { - return scanner.getBatch(); - } + @Override + public int getBatch() { + return scanner.getBatch(); + } + + @Override + public long getMaxResultSize() { + return scanner.getMaxResultSize(); + } - @Override - public long getMaxResultSize() { - return scanner.getMaxResultSize(); + private void init() throws IOException { + PageFilter pageFilter = ScanUtil.removePageFilter(scan); + if (pageFilter != null) { + pageSize = pageFilter.getPageSize(); + scanner.close(); + scanner = ((DelegateRegionScanner) delegate).getNewRegionScanner(scan); + } else { + pageSize = Long.MAX_VALUE; + } + + Filter filter = scan.getFilter(); + Filter delegateFilter = filter; + if (filter instanceof PagingFilter) { + delegateFilter = ((PagingFilter) filter).getDelegateFilter(); + } + if (shouldCreateUnverifiedRowFilter(delegateFilter)) { + // we need to ensure that the PagingFilter remains the + // topmost (or outermost) filter so wrap the UnverifiedRowFilter + // around the original delegate and then set the UnverifiedRowFilter + // as the delegate of the PagingFilter + UnverifiedRowFilter unverifiedRowFilter = + new UnverifiedRowFilter(delegateFilter, emptyCF, emptyCQ); + if (filter instanceof PagingFilter) { + ((PagingFilter) filter).setDelegateFilter(unverifiedRowFilter); + } else { + scan.setFilter(unverifiedRowFilter); } + scanner.close(); + scanner = ((DelegateRegionScanner) delegate).getNewRegionScanner(scan); + } + } - private void init() throws IOException { - PageFilter pageFilter = ScanUtil.removePageFilter(scan); - if (pageFilter != null) { - pageSize = pageFilter.getPageSize(); - scanner.close(); - scanner = ((DelegateRegionScanner)delegate).getNewRegionScanner(scan); - } - else { - pageSize = Long.MAX_VALUE; - } + private boolean shouldCreateUnverifiedRowFilter(Filter delegateFilter) { + if (delegateFilter == null) { + return false; + } + Filter wrappedFilter = delegateFilter; + if (delegateFilter instanceof FilterList) { + List filters = ((FilterList) delegateFilter).getFilters(); + wrappedFilter = filters.get(0); + } + // Optimization since FirstKeyOnlyFilter and EmptyColumnOnlyFilter + // always include the empty column in the scan result + if ( + wrappedFilter instanceof FirstKeyOnlyFilter + || wrappedFilter instanceof EmptyColumnOnlyFilter + ) { + return false; + } + return true; + } - Filter filter = scan.getFilter(); - Filter delegateFilter = filter; - if (filter instanceof PagingFilter) { - delegateFilter = ((PagingFilter) filter).getDelegateFilter(); - } - if (shouldCreateUnverifiedRowFilter(delegateFilter)) { - // we need to ensure that the PagingFilter remains the - // topmost (or outermost) filter so wrap the UnverifiedRowFilter - // around the original delegate and then set the UnverifiedRowFilter - // as the delegate of the PagingFilter - UnverifiedRowFilter unverifiedRowFilter = - new UnverifiedRowFilter(delegateFilter, emptyCF, emptyCQ); - if (filter instanceof PagingFilter) { - ((PagingFilter) filter).setDelegateFilter(unverifiedRowFilter); - } else { - scan.setFilter(unverifiedRowFilter); - } - scanner.close(); - scanner = ((DelegateRegionScanner) delegate).getNewRegionScanner(scan); - } + public boolean next(List result, boolean raw, ScannerContext scannerContext) + throws IOException { + try { + if (!initialized) { + init(); + initialized = true; } - - private boolean shouldCreateUnverifiedRowFilter(Filter delegateFilter) { - if (delegateFilter == null) { - return false; - } - Filter wrappedFilter = delegateFilter; - if (delegateFilter instanceof FilterList) { - List filters = ((FilterList) delegateFilter).getFilters(); - wrappedFilter = filters.get(0); - } - // Optimization since FirstKeyOnlyFilter and EmptyColumnOnlyFilter - // always include the empty column in the scan result - if (wrappedFilter instanceof FirstKeyOnlyFilter - || wrappedFilter instanceof EmptyColumnOnlyFilter) { - return false; - } + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + do { + if (raw) { + hasMore = (scannerContext == null) + ? scanner.nextRaw(result) + : scanner.nextRaw(result, scannerContext); + } else { + hasMore = (scannerContext == null) + ? scanner.next(result) + : scanner.next(result, scannerContext); + } + if (result.isEmpty()) { + return hasMore; + } + if (isDummy(result)) { return true; + } + Cell cell = result.get(0); + if (verifyRowAndRepairIfNecessary(result)) { + break; + } + if (hasMore && (EnvironmentEdgeManager.currentTimeMillis() - startTime) >= pageSizeMs) { + byte[] rowKey = CellUtil.cloneRow(cell); + result.clear(); + getDummyResult(rowKey, result); + return true; + } + // skip this row as it is invalid + // if there is no more row, then result will be an empty list + } while (hasMore); + rowCount++; + if (rowCount == pageSize) { + return false; } + return hasMore; + } catch (Throwable t) { + ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); + return false; // impossible + } + } - public boolean next(List result, boolean raw, ScannerContext scannerContext) - throws IOException { - try { - if (!initialized) { - init(); - initialized = true; - } - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - do { - if (raw) { - hasMore = (scannerContext == null) - ? scanner.nextRaw(result) - : scanner.nextRaw(result, scannerContext); - } else { - hasMore = (scannerContext == null) - ? scanner.next(result) - : scanner.next(result, scannerContext); - } - if (result.isEmpty()) { - return hasMore; - } - if (isDummy(result)) { - return true; - } - Cell cell = result.get(0); - if (verifyRowAndRepairIfNecessary(result)) { - break; - } - if (hasMore && (EnvironmentEdgeManager.currentTimeMillis() - startTime) >= - pageSizeMs) { - byte[] rowKey = CellUtil.cloneRow(cell); - result.clear(); - getDummyResult(rowKey, result); - return true; - } - // skip this row as it is invalid - // if there is no more row, then result will be an empty list - } while (hasMore); - rowCount++; - if (rowCount == pageSize) { - return false; - } - return hasMore; - } catch (Throwable t) { - ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); - return false; // impossible - } - } + @Override + public boolean next(List result) throws IOException { + return next(result, false, null); + } - @Override - public boolean next(List result) throws IOException { - return next(result, false, null); - } + @Override + public boolean nextRaw(List result) throws IOException { + return next(result, true, null); + } - @Override - public boolean nextRaw(List result) throws IOException { - return next(result, true, null); - } + @Override + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result, false, scannerContext); + } - @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result, false, scannerContext); - } + @Override + public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { + return next(result, true, scannerContext); + } - @Override - public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { - return next(result, true, scannerContext); - } + @Override + public void close() throws IOException { + scanner.close(); + if (dataHTable != null) { + dataHTable.close(); + } + } - @Override - public void close() throws IOException { - scanner.close(); - if (dataHTable != null) { - dataHTable.close(); - } - } + @Override + public RegionInfo getRegionInfo() { + return scanner.getRegionInfo(); + } - @Override - public RegionInfo getRegionInfo() { - return scanner.getRegionInfo(); - } + @Override + public boolean reseek(byte[] row) throws IOException { + return scanner.reseek(row); + } - @Override - public boolean reseek(byte[] row) throws IOException { - return scanner.reseek(row); - } + @Override + public long getMvccReadPoint() { + return scanner.getMvccReadPoint(); + } - @Override - public long getMvccReadPoint() { - return scanner.getMvccReadPoint(); + private void repairIndexRows(byte[] indexRowKey, long ts, List row) throws IOException { + if (buildIndexScanForDataTable == null) { + buildIndexScanForDataTable = new Scan(); + indexScan = new Scan(scan); + singleRowIndexScan = new Scan(scan); + // Scanners to be opened on index table using indexScan and singleRowIndexScan do + // require scanning the latest/newer version of cells i.e. new rows updated on the + // index table as part of the read-repair operation. + // Both scan objects copy mvcc read point from the original scan object used to + // open GlobalIndexScanner. Hence, we need to reset them to -1 so that if the + // region moves in the middle of the ongoing scan on the data table, the reset + // mvcc value of -1 will ensure that new scanners opened on index table using + // indexScan and singleRowIndexScan are able to read the latest snapshot of the + // index updates. + PackagePrivateFieldAccessor.setMvccReadPoint(indexScan, -1); + PackagePrivateFieldAccessor.setMvccReadPoint(singleRowIndexScan, -1); + byte[] dataTableName = + scan.getAttribute(BaseScannerRegionObserverConstants.PHYSICAL_DATA_TABLE_NAME); + dataHTable = ServerUtil.ConnectionFactory + .getConnection(ServerUtil.ConnectionType.INDEX_WRITER_CONNECTION, env) + .getTable(TableName.valueOf(dataTableName)); + + viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); + // The following attributes are set to instruct UngroupedAggregateRegionObserver to do + // partial index rebuild + // i.e., rebuild a subset of index rows. + buildIndexScanForDataTable.setAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG, + TRUE_BYTES); + buildIndexScanForDataTable.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, + scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD)); + ScanUtil.annotateScanWithMetadataAttributes(scan, buildIndexScanForDataTable); + buildIndexScanForDataTable.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, + TRUE_BYTES); + buildIndexScanForDataTable.setAttribute( + BaseScannerRegionObserverConstants.SKIP_REGION_BOUNDARY_CHECK, Bytes.toBytes(true)); + buildIndexScanForDataTable + .setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ); + // Scan only columns included in the index table plus the empty column + for (ColumnReference column : indexMaintainer.getAllColumnsForDataTable()) { + buildIndexScanForDataTable.addColumn(column.getFamily(), column.getQualifier()); } - - private void repairIndexRows(byte[] indexRowKey, long ts, List row) throws IOException { - if (buildIndexScanForDataTable == null) { - buildIndexScanForDataTable = new Scan(); - indexScan = new Scan(scan); - singleRowIndexScan = new Scan(scan); - // Scanners to be opened on index table using indexScan and singleRowIndexScan do - // require scanning the latest/newer version of cells i.e. new rows updated on the - // index table as part of the read-repair operation. - // Both scan objects copy mvcc read point from the original scan object used to - // open GlobalIndexScanner. Hence, we need to reset them to -1 so that if the - // region moves in the middle of the ongoing scan on the data table, the reset - // mvcc value of -1 will ensure that new scanners opened on index table using - // indexScan and singleRowIndexScan are able to read the latest snapshot of the - // index updates. - PackagePrivateFieldAccessor.setMvccReadPoint(indexScan, -1); - PackagePrivateFieldAccessor.setMvccReadPoint(singleRowIndexScan, -1); - byte[] dataTableName = scan.getAttribute(BaseScannerRegionObserverConstants.PHYSICAL_DATA_TABLE_NAME); - dataHTable = - ServerUtil.ConnectionFactory. - getConnection(ServerUtil.ConnectionType.INDEX_WRITER_CONNECTION, env). - getTable(TableName.valueOf(dataTableName)); - - viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); - // The following attributes are set to instruct UngroupedAggregateRegionObserver to do partial index rebuild - // i.e., rebuild a subset of index rows. - buildIndexScanForDataTable.setAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG, TRUE_BYTES); - buildIndexScanForDataTable.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD)); - ScanUtil.annotateScanWithMetadataAttributes(scan, buildIndexScanForDataTable); - buildIndexScanForDataTable.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, TRUE_BYTES); - buildIndexScanForDataTable.setAttribute(BaseScannerRegionObserverConstants.SKIP_REGION_BOUNDARY_CHECK, Bytes.toBytes(true)); - buildIndexScanForDataTable.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ); - // Scan only columns included in the index table plus the empty column - for (ColumnReference column : indexMaintainer.getAllColumnsForDataTable()) { - buildIndexScanForDataTable.addColumn(column.getFamily(), column.getQualifier()); - } - buildIndexScanForDataTable.addColumn(indexMaintainer.getDataEmptyKeyValueCF(), indexMaintainer.getEmptyKeyValueQualifierForDataTable()); - } - // Rebuild the index row from the corresponding the row in the the data table - // Get the data row key from the index row key - byte[] dataRowKey = indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); - buildIndexScanForDataTable.withStartRow(dataRowKey, true); - buildIndexScanForDataTable.withStopRow(dataRowKey, true); - buildIndexScanForDataTable.setTimeRange(0, maxTimestamp); - // Pass the index row key to the partial index builder which will rebuild the index row and check if the - // row key of this rebuilt index row matches with the passed index row key - buildIndexScanForDataTable.setAttribute(BaseScannerRegionObserverConstants.INDEX_ROW_KEY, indexRowKey); - Result result = null; - try (ResultScanner resultScanner = dataHTable.getScanner(buildIndexScanForDataTable)){ - result = resultScanner.next(); - } catch (Throwable t) { - ClientUtil.throwIOException(dataHTable.getName().toString(), t); - } - // A single cell will be returned. We decode that here - byte[] value = result.value(); - long code = PLong.INSTANCE.getCodec().decodeLong(new ImmutableBytesWritable(value), SortOrder.getDefault()); - if (code == RebuildReturnCode.NO_DATA_ROW.getValue()) { - // This means there does not exist a data table row for the data row key derived from - // this unverified index row. So, no index row has been built - // Delete the unverified row from index if it is old enough - if (indexMaintainer.isAgedEnough(ts, ageThreshold)) { - region.delete(indexMaintainer.createDelete(indexRowKey, ts, false)); - } - // Skip this unverified row (i.e., do not return it to the client). Just retuning empty row is - // sufficient to do that - row.clear(); - return; - } - // An index row has been built. Close the current scanner as the newly built row will not be visible to it - scanner.close(); - if (code == RebuildReturnCode.NO_INDEX_ROW.getValue()) { - // This means there exists a data table row for the data row key derived from this unverified index row - // but the data table row does not point back to the index row. - // Delete the unverified row from index if it is old enough - if (indexMaintainer.isAgedEnough(ts, ageThreshold)) { - region.delete(indexMaintainer.createDelete(indexRowKey, ts, false)); - } - // Open a new scanner starting from the row after the current row - indexScan.withStartRow(indexRowKey, false); - scanner = ((DelegateRegionScanner)delegate).getNewRegionScanner(indexScan); - hasMore = true; - // Skip this unverified row (i.e., do not return it to the client). Just retuning empty row is - // sufficient to do that - row.clear(); - return; - } - // code == RebuildReturnCode.INDEX_ROW_EXISTS.getValue() - // Open a new scanner starting from the current row - indexScan.withStartRow(indexRowKey, true); - scanner = ((DelegateRegionScanner)delegate).getNewRegionScanner(indexScan); - hasMore = scanner.next(row); - if (row.isEmpty()) { - // This means the index row has been deleted before opening the new scanner. - return; - } - if (isDummy(row)) { - return; - } - // Check if the index row still exist after rebuild - if (Bytes.compareTo(row.get(0).getRowArray(), row.get(0).getRowOffset(), row.get(0).getRowLength(), - indexRowKey, 0, indexRowKey.length) != 0) { - // This means the index row has been deleted before opening the new scanner. We got a different row - // If this row is "verified" (or empty) then we are good to go. - if (verifyRowAndRemoveEmptyColumn(row)) { - return; - } - // The row is "unverified". Rewind the scanner and let the row be scanned again - // so that it can be repaired - scanner.close(); - scanner =((DelegateRegionScanner)delegate).getNewRegionScanner(indexScan); - hasMore = true; - row.clear(); - return; - } - // The index row still exist after rebuild - // Check if the index row is still unverified - if (verifyRowAndRemoveEmptyColumn(row)) { - // The index row status is "verified". This row is good to return to the client. We are done here. - return; - } - // The index row is still "unverified" after rebuild. This means that the data table row timestamp is - // lower than the timestamp of the unverified index row (ts) and the index row that is built from - // the data table row is masked by this unverified row. This happens if the first phase updates (i.e., - // unverified index row updates) complete but the second phase updates (i.e., data table updates) fail. - // There could be back to back such events so we need a loop to go through them - do { - // First delete the unverified row from index if it is old enough - if (indexMaintainer.isAgedEnough(ts, ageThreshold)) { - region.delete(indexMaintainer.createDelete(indexRowKey, ts, true)); - } - // Now we will do a single row scan to retrieve the verified index row built from the data table row. - // Note we cannot read all versions in one scan as the max number of row versions for an index table - // can be 1. In that case, we will get only one (i.e., the most recent) version instead of all versions - singleRowIndexScan.withStartRow(indexRowKey, true); - singleRowIndexScan.withStopRow(indexRowKey, true); - singleRowIndexScan.setTimeRange(minTimestamp, ts); - RegionScanner singleRowScanner = ((DelegateRegionScanner)delegate).getNewRegionScanner(singleRowIndexScan); - row.clear(); - singleRowScanner.next(row); - singleRowScanner.close(); - if (row.isEmpty()) { - // This can happen if the unverified row matches the filter - // but after repair the verified row doesn't match the filter - return; - } - if (isDummy(row)) { - return; - } - if (verifyRowAndRemoveEmptyColumn(row)) { - // The index row status is "verified". This row is good to return to the client. We are done here. - return; - } - ts = getMaxTimestamp(row); - } while (Bytes.compareTo(row.get(0).getRowArray(), row.get(0).getRowOffset(), row.get(0).getRowLength(), - indexRowKey, 0, indexRowKey.length) == 0); - // This should not happen at all - Cell cell = row.get(0); - byte[] rowKey = CellUtil.cloneRow(cell); - throw new DoNotRetryIOException("The scan returned a row with row key (" + Bytes.toStringBinary(rowKey) + - ") different than indexRowKey (" + Bytes.toStringBinary(indexRowKey) + ") for table " + - region.getRegionInfo().getTable().getNameAsString()); + buildIndexScanForDataTable.addColumn(indexMaintainer.getDataEmptyKeyValueCF(), + indexMaintainer.getEmptyKeyValueQualifierForDataTable()); + } + // Rebuild the index row from the corresponding the row in the the data table + // Get the data row key from the index row key + byte[] dataRowKey = + indexMaintainer.buildDataRowKey(new ImmutableBytesWritable(indexRowKey), viewConstants); + buildIndexScanForDataTable.withStartRow(dataRowKey, true); + buildIndexScanForDataTable.withStopRow(dataRowKey, true); + buildIndexScanForDataTable.setTimeRange(0, maxTimestamp); + // Pass the index row key to the partial index builder which will rebuild the index row and + // check if the + // row key of this rebuilt index row matches with the passed index row key + buildIndexScanForDataTable.setAttribute(BaseScannerRegionObserverConstants.INDEX_ROW_KEY, + indexRowKey); + Result result = null; + try (ResultScanner resultScanner = dataHTable.getScanner(buildIndexScanForDataTable)) { + result = resultScanner.next(); + } catch (Throwable t) { + ClientUtil.throwIOException(dataHTable.getName().toString(), t); + } + // A single cell will be returned. We decode that here + byte[] value = result.value(); + long code = PLong.INSTANCE.getCodec().decodeLong(new ImmutableBytesWritable(value), + SortOrder.getDefault()); + if (code == RebuildReturnCode.NO_DATA_ROW.getValue()) { + // This means there does not exist a data table row for the data row key derived from + // this unverified index row. So, no index row has been built + // Delete the unverified row from index if it is old enough + if (indexMaintainer.isAgedEnough(ts, ageThreshold)) { + region.delete(indexMaintainer.createDelete(indexRowKey, ts, false)); } - - private boolean isEmptyColumn(Cell cell) { - return Bytes.compareTo(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), - emptyCF, 0, emptyCF.length) == 0 && - Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), - emptyCQ, 0, emptyCQ.length) == 0; + // Skip this unverified row (i.e., do not return it to the client). Just retuning empty row + // is + // sufficient to do that + row.clear(); + return; + } + // An index row has been built. Close the current scanner as the newly built row will not be + // visible to it + scanner.close(); + if (code == RebuildReturnCode.NO_INDEX_ROW.getValue()) { + // This means there exists a data table row for the data row key derived from this + // unverified index row + // but the data table row does not point back to the index row. + // Delete the unverified row from index if it is old enough + if (indexMaintainer.isAgedEnough(ts, ageThreshold)) { + region.delete(indexMaintainer.createDelete(indexRowKey, ts, false)); } - - /** - * An index row is composed of cells with the same timestamp. However, if there are multiple versions of an - * index row, HBase can return an index row with cells from multiple versions, and thus it can return cells - * with different timestamps. This happens if the version of the row we are reading does not have a value - * (i.e., effectively has null value) for a column whereas an older version has a value for the column. - * In this case, we need to remove the older cells for correctness. - */ - private void removeOlderCells(List cellList) { - Iterator cellIterator = cellList.iterator(); - if (!cellIterator.hasNext()) { - return; - } - Cell cell = cellIterator.next(); - long maxTs = cell.getTimestamp(); - long ts; - boolean allTheSame = true; - while (cellIterator.hasNext()) { - cell = cellIterator.next(); - ts = cell.getTimestamp(); - if (ts != maxTs) { - if (ts > maxTs) { - maxTs = ts; - } - allTheSame = false; - } - } - if (allTheSame) { - return; - } - cellIterator = cellList.iterator(); - while (cellIterator.hasNext()) { - cell = cellIterator.next(); - if (cell.getTimestamp() != maxTs) { - cellIterator.remove(); - } - } + // Open a new scanner starting from the row after the current row + indexScan.withStartRow(indexRowKey, false); + scanner = ((DelegateRegionScanner) delegate).getNewRegionScanner(indexScan); + hasMore = true; + // Skip this unverified row (i.e., do not return it to the client). Just retuning empty row + // is + // sufficient to do that + row.clear(); + return; + } + // code == RebuildReturnCode.INDEX_ROW_EXISTS.getValue() + // Open a new scanner starting from the current row + indexScan.withStartRow(indexRowKey, true); + scanner = ((DelegateRegionScanner) delegate).getNewRegionScanner(indexScan); + hasMore = scanner.next(row); + if (row.isEmpty()) { + // This means the index row has been deleted before opening the new scanner. + return; + } + if (isDummy(row)) { + return; + } + // Check if the index row still exist after rebuild + if ( + Bytes.compareTo(row.get(0).getRowArray(), row.get(0).getRowOffset(), + row.get(0).getRowLength(), indexRowKey, 0, indexRowKey.length) != 0 + ) { + // This means the index row has been deleted before opening the new scanner. We got a + // different row + // If this row is "verified" (or empty) then we are good to go. + if (verifyRowAndRemoveEmptyColumn(row)) { + return; } - - private boolean verifyRowAndRemoveEmptyColumn(List cellList) throws IOException { - if (indexMaintainer.isUncovered()) { - return true; - } - if (!indexMaintainer.isImmutableRows()) { - removeOlderCells(cellList); - } - long cellListSize = cellList.size(); - Cell cell = null; - if (cellListSize == 0) { - return true; - } - Iterator cellIterator = cellList.iterator(); - while (cellIterator.hasNext()) { - cell = cellIterator.next(); - if (isEmptyColumn(cell)) { - if (indexMaintainer instanceof TransformMaintainer) { - // This is a transforming table. After cutoff, if there are new mutations on the table, - // their empty col value would be x. So, we are only interested in unverified ones. - if (Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - UNVERIFIED_BYTES, 0, UNVERIFIED_BYTES.length) == 0) { - return false; - } - } else { - if (Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - VERIFIED_BYTES, 0, VERIFIED_BYTES.length) != 0) { - return false; - } - } - return true; - } - } - // This index row does not have an empty column cell. It must be removed by compaction. This row will - // be treated as unverified so that it can be repaired - return false; + // The row is "unverified". Rewind the scanner and let the row be scanned again + // so that it can be repaired + scanner.close(); + scanner = ((DelegateRegionScanner) delegate).getNewRegionScanner(indexScan); + hasMore = true; + row.clear(); + return; + } + // The index row still exist after rebuild + // Check if the index row is still unverified + if (verifyRowAndRemoveEmptyColumn(row)) { + // The index row status is "verified". This row is good to return to the client. We are done + // here. + return; + } + // The index row is still "unverified" after rebuild. This means that the data table row + // timestamp is + // lower than the timestamp of the unverified index row (ts) and the index row that is built + // from + // the data table row is masked by this unverified row. This happens if the first phase + // updates (i.e., + // unverified index row updates) complete but the second phase updates (i.e., data table + // updates) fail. + // There could be back to back such events so we need a loop to go through them + do { + // First delete the unverified row from index if it is old enough + if (indexMaintainer.isAgedEnough(ts, ageThreshold)) { + region.delete(indexMaintainer.createDelete(indexRowKey, ts, true)); } - - private long getMaxTimestamp(List cellList) { - long maxTs = 0; - long ts = 0; - Iterator cellIterator = cellList.iterator(); - while (cellIterator.hasNext()) { - Cell cell = cellIterator.next(); - ts = cell.getTimestamp(); - if (ts > maxTs) { - maxTs = ts; - } - } - return maxTs; + // Now we will do a single row scan to retrieve the verified index row built from the data + // table row. + // Note we cannot read all versions in one scan as the max number of row versions for an + // index table + // can be 1. In that case, we will get only one (i.e., the most recent) version instead of + // all versions + singleRowIndexScan.withStartRow(indexRowKey, true); + singleRowIndexScan.withStopRow(indexRowKey, true); + singleRowIndexScan.setTimeRange(minTimestamp, ts); + RegionScanner singleRowScanner = + ((DelegateRegionScanner) delegate).getNewRegionScanner(singleRowIndexScan); + row.clear(); + singleRowScanner.next(row); + singleRowScanner.close(); + if (row.isEmpty()) { + // This can happen if the unverified row matches the filter + // but after repair the verified row doesn't match the filter + return; } + if (isDummy(row)) { + return; + } + if (verifyRowAndRemoveEmptyColumn(row)) { + // The index row status is "verified". This row is good to return to the client. We are + // done here. + return; + } + ts = getMaxTimestamp(row); + } while ( + Bytes.compareTo(row.get(0).getRowArray(), row.get(0).getRowOffset(), + row.get(0).getRowLength(), indexRowKey, 0, indexRowKey.length) == 0 + ); + // This should not happen at all + Cell cell = row.get(0); + byte[] rowKey = CellUtil.cloneRow(cell); + throw new DoNotRetryIOException( + "The scan returned a row with row key (" + Bytes.toStringBinary(rowKey) + + ") different than indexRowKey (" + Bytes.toStringBinary(indexRowKey) + ") for table " + + region.getRegionInfo().getTable().getNameAsString()); + } - /** - * @param cellList is an input and output parameter and will either include a valid row or be an empty list - * @return true if there exists more rows, otherwise false - * @throws IOException - */ - private boolean verifyRowAndRepairIfNecessary(List cellList) throws IOException { - metricsSource.incrementIndexInspections(indexName); - Cell cell = cellList.get(0); - if (verifyRowAndRemoveEmptyColumn(cellList)) { - return true; - } else { - long repairStart = EnvironmentEdgeManager.currentTimeMillis(); - - byte[] rowKey = CellUtil.cloneRow(cell); - long ts = cellList.get(0).getTimestamp(); - cellList.clear(); - long repairTime; - try { - repairIndexRows(rowKey, ts, cellList); - repairTime = EnvironmentEdgeManager.currentTimeMillis() - repairStart; - metricsSource.incrementIndexRepairs(indexName); - metricsSource.updateUnverifiedIndexRowAge(indexName, - EnvironmentEdgeManager.currentTimeMillis() - ts); - metricsSource.updateIndexRepairTime(indexName, - EnvironmentEdgeManager.currentTimeMillis() - repairStart); - if (shouldLog()) { - LOG.info("Index row repair on region {} took {} ms.", - env.getRegionInfo().getRegionNameAsString(), repairTime); - } - } catch (IOException e) { - repairTime = EnvironmentEdgeManager.currentTimeMillis() - repairStart; - metricsSource.incrementIndexRepairFailures(indexName); - metricsSource.updateIndexRepairFailureTime(indexName, - EnvironmentEdgeManager.currentTimeMillis() - repairStart); - if (shouldLog()) { - LOG.warn("Index row repair failure on region {} took {} ms.", - env.getRegionInfo().getRegionNameAsString(), repairTime); - } - throw e; - } - - if (cellList.isEmpty()) { - // This means that the index row is invalid. Return false to tell the caller that this row should be skipped - return false; - } - return true; - } + private boolean isEmptyColumn(Cell cell) { + return Bytes.compareTo(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), + emptyCF, 0, emptyCF.length) == 0 + && Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength(), emptyCQ, 0, emptyCQ.length) == 0; + } + + /** + * An index row is composed of cells with the same timestamp. However, if there are multiple + * versions of an index row, HBase can return an index row with cells from multiple versions, + * and thus it can return cells with different timestamps. This happens if the version of the + * row we are reading does not have a value (i.e., effectively has null value) for a column + * whereas an older version has a value for the column. In this case, we need to remove the + * older cells for correctness. + */ + private void removeOlderCells(List cellList) { + Iterator cellIterator = cellList.iterator(); + if (!cellIterator.hasNext()) { + return; + } + Cell cell = cellIterator.next(); + long maxTs = cell.getTimestamp(); + long ts; + boolean allTheSame = true; + while (cellIterator.hasNext()) { + cell = cellIterator.next(); + ts = cell.getTimestamp(); + if (ts != maxTs) { + if (ts > maxTs) { + maxTs = ts; + } + allTheSame = false; + } + } + if (allTheSame) { + return; + } + cellIterator = cellList.iterator(); + while (cellIterator.hasNext()) { + cell = cellIterator.next(); + if (cell.getTimestamp() != maxTs) { + cellIterator.remove(); } + } + } - private boolean shouldLog() { - if (loggingPercent == 0) { - return false; + private boolean verifyRowAndRemoveEmptyColumn(List cellList) throws IOException { + if (indexMaintainer.isUncovered()) { + return true; + } + if (!indexMaintainer.isImmutableRows()) { + removeOlderCells(cellList); + } + long cellListSize = cellList.size(); + Cell cell = null; + if (cellListSize == 0) { + return true; + } + Iterator cellIterator = cellList.iterator(); + while (cellIterator.hasNext()) { + cell = cellIterator.next(); + if (isEmptyColumn(cell)) { + if (indexMaintainer instanceof TransformMaintainer) { + // This is a transforming table. After cutoff, if there are new mutations on the table, + // their empty col value would be x. So, we are only interested in unverified ones. + if ( + Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), + UNVERIFIED_BYTES, 0, UNVERIFIED_BYTES.length) == 0 + ) { + return false; + } + } else { + if ( + Bytes.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), + VERIFIED_BYTES, 0, VERIFIED_BYTES.length) != 0 + ) { + return false; } - return (random.nextDouble() <= (loggingPercent / 100.0d)); + } + return true; } + } + // This index row does not have an empty column cell. It must be removed by compaction. This + // row will + // be treated as unverified so that it can be repaired + return false; } - @Override - protected boolean isRegionObserverFor(Scan scan) { - return scan.getAttribute(BaseScannerRegionObserverConstants.CHECK_VERIFY_COLUMN) != null; + private long getMaxTimestamp(List cellList) { + long maxTs = 0; + long ts = 0; + Iterator cellIterator = cellList.iterator(); + while (cellIterator.hasNext()) { + Cell cell = cellIterator.next(); + ts = cell.getTimestamp(); + if (ts > maxTs) { + maxTs = ts; + } + } + return maxTs; } - @Override - protected RegionScanner doPostScannerOpen(final ObserverContext c, final Scan scan, - final RegionScanner s) throws IOException, SQLException { - return new GlobalIndexScanner(c.getEnvironment(), scan, s, metricsSource); - } + /** + * @param cellList is an input and output parameter and will either include a valid row or be an + * empty list + * @return true if there exists more rows, otherwise false + */ + private boolean verifyRowAndRepairIfNecessary(List cellList) throws IOException { + metricsSource.incrementIndexInspections(indexName); + Cell cell = cellList.get(0); + if (verifyRowAndRemoveEmptyColumn(cellList)) { + return true; + } else { + long repairStart = EnvironmentEdgeManager.currentTimeMillis(); + + byte[] rowKey = CellUtil.cloneRow(cell); + long ts = cellList.get(0).getTimestamp(); + cellList.clear(); + long repairTime; + try { + repairIndexRows(rowKey, ts, cellList); + repairTime = EnvironmentEdgeManager.currentTimeMillis() - repairStart; + metricsSource.incrementIndexRepairs(indexName); + metricsSource.updateUnverifiedIndexRowAge(indexName, + EnvironmentEdgeManager.currentTimeMillis() - ts); + metricsSource.updateIndexRepairTime(indexName, + EnvironmentEdgeManager.currentTimeMillis() - repairStart); + if (shouldLog()) { + LOG.info("Index row repair on region {} took {} ms.", + env.getRegionInfo().getRegionNameAsString(), repairTime); + } + } catch (IOException e) { + repairTime = EnvironmentEdgeManager.currentTimeMillis() - repairStart; + metricsSource.incrementIndexRepairFailures(indexName); + metricsSource.updateIndexRepairFailureTime(indexName, + EnvironmentEdgeManager.currentTimeMillis() - repairStart); + if (shouldLog()) { + LOG.warn("Index row repair failure on region {} took {} ms.", + env.getRegionInfo().getRegionNameAsString(), repairTime); + } + throw e; + } - @Override - public Optional getRegionObserver() { - return Optional.of(this); + if (cellList.isEmpty()) { + // This means that the index row is invalid. Return false to tell the caller that this row + // should be skipped + return false; + } + return true; + } } - @Override - public void start(CoprocessorEnvironment e) throws IOException { - this.env = e; - this.metricsSource = MetricsIndexerSourceFactory.getInstance().getGlobalIndexCheckerSource(); + private boolean shouldLog() { + if (loggingPercent == 0) { + return false; + } + return (random.nextDouble() <= (loggingPercent / 100.0d)); } + } + + @Override + protected boolean isRegionObserverFor(Scan scan) { + return scan.getAttribute(BaseScannerRegionObserverConstants.CHECK_VERIFY_COLUMN) != null; + } + + @Override + protected RegionScanner doPostScannerOpen(final ObserverContext c, + final Scan scan, final RegionScanner s) throws IOException, SQLException { + return new GlobalIndexScanner(c.getEnvironment(), scan, s, metricsSource); + } + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void start(CoprocessorEnvironment e) throws IOException { + this.env = e; + this.metricsSource = MetricsIndexerSourceFactory.getInstance().getGlobalIndexCheckerSource(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java index 97c8f7bb7d3..b0c94f6b51b 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -59,224 +59,231 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableImpl; import org.apache.phoenix.schema.tuple.MultiKeyValueTuple; -import org.apache.phoenix.util.ByteUtil; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.ByteUtil; /** * Index builder for covered-columns index that ties into phoenix for faster use. */ public class PhoenixIndexBuilder extends NonTxIndexBuilder { - private PhoenixIndexMetaDataBuilder indexMetaDataBuilder; - - @Override - public void setup(RegionCoprocessorEnvironment env) throws IOException { - super.setup(env); - this.indexMetaDataBuilder = new PhoenixIndexMetaDataBuilder(env); - } - - private static List flattenCells(Mutation m, int estimatedSize) throws IOException { - List flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize); - flattenCells(m, flattenedCells); - return flattenedCells; - } - - private static void flattenCells(Mutation m, List flattenedCells) throws IOException { - for (List cells : m.getFamilyCellMap().values()) { - flattenedCells.addAll(cells); - } - } - - @Override - public PhoenixIndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) throws IOException { - return indexMetaDataBuilder.getIndexMetaData(miniBatchOp); - } + private PhoenixIndexMetaDataBuilder indexMetaDataBuilder; - protected PhoenixIndexCodec getCodec() { - return (PhoenixIndexCodec)codec; - } + @Override + public void setup(RegionCoprocessorEnvironment env) throws IOException { + super.setup(env); + this.indexMetaDataBuilder = new PhoenixIndexMetaDataBuilder(env); + } + + private static List flattenCells(Mutation m, int estimatedSize) throws IOException { + List flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize); + flattenCells(m, flattenedCells); + return flattenedCells; + } - @Override - public void batchStarted(MiniBatchOperationInProgress miniBatchOp, IndexMetaData context) throws IOException { + private static void flattenCells(Mutation m, List flattenedCells) throws IOException { + for (List cells : m.getFamilyCellMap().values()) { + flattenedCells.addAll(cells); } - - @Override - public boolean isAtomicOp(Mutation m) { - return m.getAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB) != null; + } + + @Override + public PhoenixIndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) + throws IOException { + return indexMetaDataBuilder.getIndexMetaData(miniBatchOp); + } + + protected PhoenixIndexCodec getCodec() { + return (PhoenixIndexCodec) codec; + } + + @Override + public void batchStarted(MiniBatchOperationInProgress miniBatchOp, + IndexMetaData context) throws IOException { + } + + @Override + public boolean isAtomicOp(Mutation m) { + return m.getAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB) != null; + } + + private static void transferCells(Mutation source, Mutation target) { + target.getFamilyCellMap().putAll(source.getFamilyCellMap()); + } + + private static void transferAttributes(Mutation source, Mutation target) { + for (Map.Entry entry : source.getAttributesMap().entrySet()) { + target.setAttribute(entry.getKey(), entry.getValue()); } + } - private static void transferCells(Mutation source, Mutation target) { - target.getFamilyCellMap().putAll(source.getFamilyCellMap()); + private static List convertIncrementToPutInSingletonList(Increment inc) { + byte[] rowKey = inc.getRow(); + Put put = new Put(rowKey); + transferCells(inc, put); + transferAttributes(inc, put); + return Collections. singletonList(put); + } + + @Override + public List executeAtomicOp(Increment inc) throws IOException { + byte[] opBytes = inc.getAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB); + if (opBytes == null) { // Unexpected + return null; + } + inc.setAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB, null); + Put put = null; + Delete delete = null; + // We cannot neither use the time stamp in the Increment to set the Get time range + // nor set the Put/Delete time stamp and have this be atomic as HBase does not + // handle that. Though we disallow using ON DUPLICATE KEY clause when the + // CURRENT_SCN is set, we still may have a time stamp set as of when the table + // was resolved on the client side. We need to ignore this as well due to limitations + // in HBase, but this isn't too bad as the time will be very close the the current + // time anyway. + long ts = HConstants.LATEST_TIMESTAMP; + byte[] rowKey = inc.getRow(); + final Get get = new Get(rowKey); + if (PhoenixIndexBuilderHelper.isDupKeyIgnore(opBytes)) { + get.setFilter(new FirstKeyOnlyFilter()); + try (RegionScanner scanner = this.env.getRegion().getScanner(new Scan(get))) { + List cells = new ArrayList<>(); + scanner.next(cells); + return cells.isEmpty() + ? convertIncrementToPutInSingletonList(inc) + : Collections. emptyList(); + } } - private static void transferAttributes(Mutation source, Mutation target) { - for (Map.Entry entry : source.getAttributesMap().entrySet()) { - target.setAttribute(entry.getKey(), entry.getValue()); + ByteArrayInputStream stream = new ByteArrayInputStream(opBytes); + DataInputStream input = new DataInputStream(stream); + boolean skipFirstOp = input.readBoolean(); + short repeat = input.readShort(); + final int[] estimatedSizeHolder = { 0 }; + List>> operations = Lists.newArrayListWithExpectedSize(3); + while (true) { + ExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { + @Override + public Void visit(KeyValueColumnExpression expression) { + get.addColumn(expression.getColumnFamily(), expression.getColumnQualifier()); + estimatedSizeHolder[0]++; + return null; + } + }; + try { + int nExpressions = WritableUtils.readVInt(input); + List expressions = Lists.newArrayListWithExpectedSize(nExpressions); + for (int i = 0; i < nExpressions; i++) { + Expression expression = + ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); + expression.readFields(input); + expressions.add(expression); + expression.accept(visitor); } + PTableProtos.PTable tableProto = PTableProtos.PTable.parseDelimitedFrom(input); + PTable table = PTableImpl.createFromProto(tableProto); + operations.add(new Pair<>(table, expressions)); + } catch (EOFException e) { + break; + } } - private static List convertIncrementToPutInSingletonList(Increment inc) { - byte[] rowKey = inc.getRow(); - Put put = new Put(rowKey); - transferCells(inc, put); - transferAttributes(inc, put); - return Collections.singletonList(put); + int estimatedSize = estimatedSizeHolder[0]; + if (get.getFamilyMap().isEmpty()) { + get.setFilter(new FirstKeyOnlyFilter()); } - - @Override - public List executeAtomicOp(Increment inc) throws IOException { - byte[] opBytes = inc.getAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB); - if (opBytes == null) { // Unexpected - return null; + MultiKeyValueTuple tuple; + List flattenedCells = null; + List cells = ((HRegion) this.env.getRegion()).get(get, false); + if (cells.isEmpty()) { + if (skipFirstOp) { + if (operations.size() <= 1 && repeat <= 1) { + return convertIncrementToPutInSingletonList(inc); } - inc.setAttribute(PhoenixIndexBuilderHelper.ATOMIC_OP_ATTRIB, null); - Put put = null; - Delete delete = null; - // We cannot neither use the time stamp in the Increment to set the Get time range - // nor set the Put/Delete time stamp and have this be atomic as HBase does not - // handle that. Though we disallow using ON DUPLICATE KEY clause when the - // CURRENT_SCN is set, we still may have a time stamp set as of when the table - // was resolved on the client side. We need to ignore this as well due to limitations - // in HBase, but this isn't too bad as the time will be very close the the current - // time anyway. - long ts = HConstants.LATEST_TIMESTAMP; - byte[] rowKey = inc.getRow(); - final Get get = new Get(rowKey); - if (PhoenixIndexBuilderHelper.isDupKeyIgnore(opBytes)) { - get.setFilter(new FirstKeyOnlyFilter()); - try (RegionScanner scanner = this.env.getRegion().getScanner(new Scan(get))) { - List cells = new ArrayList<>(); - scanner.next(cells); - return cells.isEmpty() - ? convertIncrementToPutInSingletonList(inc) - : Collections.emptyList(); - } - } - ByteArrayInputStream stream = new ByteArrayInputStream(opBytes); - DataInputStream input = new DataInputStream(stream); - boolean skipFirstOp = input.readBoolean(); - short repeat = input.readShort(); - final int[] estimatedSizeHolder = {0}; - List>> operations = Lists.newArrayListWithExpectedSize(3); - while (true) { - ExpressionVisitor visitor = new StatelessTraverseAllExpressionVisitor() { - @Override - public Void visit(KeyValueColumnExpression expression) { - get.addColumn(expression.getColumnFamily(), expression.getColumnQualifier()); - estimatedSizeHolder[0]++; - return null; - } - }; - try { - int nExpressions = WritableUtils.readVInt(input); - Listexpressions = Lists.newArrayListWithExpectedSize(nExpressions); - for (int i = 0; i < nExpressions; i++) { - Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); - expression.readFields(input); - expressions.add(expression); - expression.accept(visitor); - } - PTableProtos.PTable tableProto = PTableProtos.PTable.parseDelimitedFrom(input); - PTable table = PTableImpl.createFromProto(tableProto); - operations.add(new Pair<>(table, expressions)); - } catch (EOFException e) { - break; - } + repeat--; // Skip first operation (if first wasn't ON DUPLICATE KEY IGNORE) + } + // Base current state off of new row + flattenedCells = flattenCells(inc, estimatedSize); + tuple = new MultiKeyValueTuple(flattenedCells); + } else { + // Base current state off of existing row + tuple = new MultiKeyValueTuple(cells); + } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + for (int opIndex = 0; opIndex < operations.size(); opIndex++) { + Pair> operation = operations.get(opIndex); + PTable table = operation.getFirst(); + List expressions = operation.getSecond(); + for (int j = 0; j < repeat; j++) { // repeater loop + ptr.set(rowKey); + // Sort the list of cells (if they've been flattened in which case they're not necessarily + // ordered correctly). We only need the list sorted if the expressions are going to be + // executed, not when the outer loop is exited. Hence we do it here, at the top of the loop. + if (flattenedCells != null) { + Collections.sort(flattenedCells, CellComparatorImpl.COMPARATOR); } - int estimatedSize = estimatedSizeHolder[0]; - if (get.getFamilyMap().isEmpty()) { - get.setFilter(new FirstKeyOnlyFilter()); + PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false); + int adjust = table.getBucketNum() == null ? 1 : 2; + for (int i = 0; i < expressions.size(); i++) { + Expression expression = expressions.get(i); + ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); + expression.evaluate(tuple, ptr); + PColumn column = table.getColumns().get(i + adjust); + Object value = expression.getDataType().toObject(ptr, column.getSortOrder()); + // We are guaranteed that the two column will have the + // same type. + if ( + !column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), + expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), + column.getMaxLength(), column.getScale()) + ) { + throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), + column.getScale(), column.getName().getString()); + } + column.getDataType().coerceBytes(ptr, value, expression.getDataType(), + expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), + column.getMaxLength(), column.getScale(), column.getSortOrder(), + table.rowKeyOrderOptimizable()); + byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); + row.setValue(column, bytes); } - MultiKeyValueTuple tuple; - List flattenedCells = null; - Listcells = ((HRegion)this.env.getRegion()).get(get, false); - if (cells.isEmpty()) { - if (skipFirstOp) { - if (operations.size() <= 1 && repeat <= 1) { - return convertIncrementToPutInSingletonList(inc); - } - repeat--; // Skip first operation (if first wasn't ON DUPLICATE KEY IGNORE) - } - // Base current state off of new row - flattenedCells = flattenCells(inc, estimatedSize); - tuple = new MultiKeyValueTuple(flattenedCells); - } else { - // Base current state off of existing row - tuple = new MultiKeyValueTuple(cells); + flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize); + List mutations = row.toRowMutations(); + for (Mutation source : mutations) { + flattenCells(source, flattenedCells); } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - for (int opIndex = 0; opIndex < operations.size(); opIndex++) { - Pair> operation = operations.get(opIndex); - PTable table = operation.getFirst(); - List expressions = operation.getSecond(); - for (int j = 0; j < repeat; j++) { // repeater loop - ptr.set(rowKey); - // Sort the list of cells (if they've been flattened in which case they're not necessarily - // ordered correctly). We only need the list sorted if the expressions are going to be - // executed, not when the outer loop is exited. Hence we do it here, at the top of the loop. - if (flattenedCells != null) { - Collections.sort(flattenedCells,CellComparatorImpl.COMPARATOR); - } - PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false); - int adjust = table.getBucketNum() == null ? 1 : 2; - for (int i = 0; i < expressions.size(); i++) { - Expression expression = expressions.get(i); - ptr.set(ByteUtil.EMPTY_BYTE_ARRAY); - expression.evaluate(tuple, ptr); - PColumn column = table.getColumns().get(i + adjust); - Object value = expression.getDataType().toObject(ptr, column.getSortOrder()); - // We are guaranteed that the two column will have the - // same type. - if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), - expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), - column.getMaxLength(), column.getScale())) { - throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), - column.getScale(), column.getName().getString()); - } - column.getDataType().coerceBytes(ptr, value, expression.getDataType(), expression.getMaxLength(), - expression.getScale(), expression.getSortOrder(),column.getMaxLength(), column.getScale(), - column.getSortOrder(), table.rowKeyOrderOptimizable()); - byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); - row.setValue(column, bytes); - } - flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize); - List mutations = row.toRowMutations(); - for (Mutation source : mutations) { - flattenCells(source, flattenedCells); - } - tuple.setKeyValues(flattenedCells); - } - // Repeat only applies to first statement - repeat = 1; + tuple.setKeyValues(flattenedCells); + } + // Repeat only applies to first statement + repeat = 1; + } + + List mutations = Lists.newArrayListWithExpectedSize(2); + for (int i = 0; i < tuple.size(); i++) { + Cell cell = tuple.getValue(i); + if (cell.getType() == Cell.Type.Put) { + if (put == null) { + put = new Put(rowKey); + transferAttributes(inc, put); + mutations.add(put); } - - List mutations = Lists.newArrayListWithExpectedSize(2); - for (int i = 0; i < tuple.size(); i++) { - Cell cell = tuple.getValue(i); - if (cell.getType() == Cell.Type.Put) { - if (put == null) { - put = new Put(rowKey); - transferAttributes(inc, put); - mutations.add(put); - } - put.add(cell); - } else { - if (delete == null) { - delete = new Delete(rowKey); - transferAttributes(inc, delete); - mutations.add(delete); - } - delete.add(cell); - } + put.add(cell); + } else { + if (delete == null) { + delete = new Delete(rowKey); + transferAttributes(inc, delete); + mutations.add(delete); } - return mutations; + delete.add(cell); + } } + return mutations; + } - @Override - public ReplayWrite getReplayWrite(Mutation m) { - return PhoenixIndexMetaData.getReplayWrite(m.getAttributesMap()); - } + @Override + public ReplayWrite getReplayWrite(Mutation m) { + return PhoenixIndexMetaData.getReplayWrite(m.getAttributesMap()); + } - @Override - public boolean returnResult(Mutation m) { - return m.getAttribute(PhoenixIndexBuilderHelper.RETURN_RESULT) != null; - } -} \ No newline at end of file + @Override + public boolean returnResult(Mutation m) { + return m.getAttribute(PhoenixIndexBuilderHelper.RETURN_RESULT) != null; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java index 31d58b70b6b..8e11b12ee5a 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -58,6 +58,10 @@ import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.PIndexState; import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.QueryUtil; @@ -66,326 +70,329 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; - /** - * - * Handler called in the event that index updates cannot be written to their - * region server. First attempts to disable the index and failing that falls - * back to the default behavior of killing the region server. - * + * Handler called in the event that index updates cannot be written to their region server. First + * attempts to disable the index and failing that falls back to the default behavior of killing the + * region server. */ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexFailurePolicy.class); - public static final String THROW_INDEX_WRITE_FAILURE = "THROW_INDEX_WRITE_FAILURE"; - public static final String DISABLE_INDEX_ON_WRITE_FAILURE = "DISABLE_INDEX_ON_WRITE_FAILURE"; - public static final String REBUILD_INDEX_ON_WRITE_FAILURE = "REBUILD_INDEX_ON_WRITE_FAILURE"; - public static final String BLOCK_DATA_TABLE_WRITES_ON_WRITE_FAILURE = "BLOCK_DATA_TABLE_WRITES_ON_WRITE_FAILURE"; - private RegionCoprocessorEnvironment env; - private boolean blockDataTableWritesOnFailure; - private boolean disableIndexOnFailure; - private boolean rebuildIndexOnFailure; - private boolean throwIndexWriteFailure; + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexFailurePolicy.class); + public static final String THROW_INDEX_WRITE_FAILURE = "THROW_INDEX_WRITE_FAILURE"; + public static final String DISABLE_INDEX_ON_WRITE_FAILURE = "DISABLE_INDEX_ON_WRITE_FAILURE"; + public static final String REBUILD_INDEX_ON_WRITE_FAILURE = "REBUILD_INDEX_ON_WRITE_FAILURE"; + public static final String BLOCK_DATA_TABLE_WRITES_ON_WRITE_FAILURE = + "BLOCK_DATA_TABLE_WRITES_ON_WRITE_FAILURE"; + private RegionCoprocessorEnvironment env; + private boolean blockDataTableWritesOnFailure; + private boolean disableIndexOnFailure; + private boolean rebuildIndexOnFailure; + private boolean throwIndexWriteFailure; - public PhoenixIndexFailurePolicy() { - super(new KillServerOnFailurePolicy()); + public PhoenixIndexFailurePolicy() { + super(new KillServerOnFailurePolicy()); + } + + @Override + public void setup(Stoppable parent, RegionCoprocessorEnvironment env) { + super.setup(parent, env); + this.env = env; + rebuildIndexOnFailure = + env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD); + TableDescriptor htd = env.getRegion().getTableDescriptor(); + // If rebuild index is turned off globally, no need to check the table because the background + // thread + // won't be running in this case + if (rebuildIndexOnFailure) { + String value = htd.getValue(REBUILD_INDEX_ON_WRITE_FAILURE); + if (value != null) { + rebuildIndexOnFailure = Boolean.parseBoolean(value); + } + } + disableIndexOnFailure = getDisableIndexOnFailure(env); + String value = htd.getValue(BLOCK_DATA_TABLE_WRITES_ON_WRITE_FAILURE); + if (value == null) { + blockDataTableWritesOnFailure = + env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, + QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE); + } else { + blockDataTableWritesOnFailure = Boolean.parseBoolean(value); } - @Override - public void setup(Stoppable parent, RegionCoprocessorEnvironment env) { - super.setup(parent, env); - this.env = env; - rebuildIndexOnFailure = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD); - TableDescriptor htd = env.getRegion().getTableDescriptor(); - // If rebuild index is turned off globally, no need to check the table because the background thread - // won't be running in this case - if (rebuildIndexOnFailure) { - String value = htd.getValue(REBUILD_INDEX_ON_WRITE_FAILURE); - if (value != null) { - rebuildIndexOnFailure = Boolean.parseBoolean(value); - } - } - disableIndexOnFailure = getDisableIndexOnFailure(env); - String value = htd.getValue(BLOCK_DATA_TABLE_WRITES_ON_WRITE_FAILURE); - if (value == null) { - blockDataTableWritesOnFailure = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE, - QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE); - } else { - blockDataTableWritesOnFailure = Boolean.parseBoolean(value); - } - - value = htd.getValue(THROW_INDEX_WRITE_FAILURE); - if (value == null) { - throwIndexWriteFailure = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_THROW_EXCEPTION_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_FAILURE_THROW_EXCEPTION); - } else { - throwIndexWriteFailure = Boolean.parseBoolean(value); - } + value = htd.getValue(THROW_INDEX_WRITE_FAILURE); + if (value == null) { + throwIndexWriteFailure = + env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_THROW_EXCEPTION_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_FAILURE_THROW_EXCEPTION); + } else { + throwIndexWriteFailure = Boolean.parseBoolean(value); + } - boolean killServer = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_KILL_SERVER, true); - if (!killServer) { - setDelegate(new LeaveIndexActiveFailurePolicy()); - } // else, default in constructor is KillServerOnFailurePolicy + boolean killServer = + env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_KILL_SERVER, true); + if (!killServer) { + setDelegate(new LeaveIndexActiveFailurePolicy()); + } // else, default in constructor is KillServerOnFailurePolicy + } + + /** + * Check config for whether to disable index on index write failures + */ + public static boolean getDisableIndexOnFailure(RegionCoprocessorEnvironment env) { + TableDescriptor htd = env.getRegion().getTableDescriptor(); + Configuration config = env.getConfiguration(); + String value = htd.getValue(PhoenixIndexFailurePolicy.DISABLE_INDEX_ON_WRITE_FAILURE); + boolean disableIndexOnFailure; + if (value == null) { + disableIndexOnFailure = config.getBoolean(QueryServices.INDEX_FAILURE_DISABLE_INDEX, + QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX); + } else { + disableIndexOnFailure = Boolean.parseBoolean(value); } + return disableIndexOnFailure; + } - /** - * Check config for whether to disable index on index write failures - * @param env - * @return - */ - public static boolean getDisableIndexOnFailure(RegionCoprocessorEnvironment env) { - TableDescriptor htd = env.getRegion().getTableDescriptor(); - Configuration config = env.getConfiguration(); - String value = htd.getValue(PhoenixIndexFailurePolicy.DISABLE_INDEX_ON_WRITE_FAILURE); - boolean disableIndexOnFailure; - if (value == null) { - disableIndexOnFailure = - config.getBoolean(QueryServices.INDEX_FAILURE_DISABLE_INDEX, - QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX); + /** + * Attempt to disable the index table when we can't write to it, preventing future updates until + * the index is brought up to date, but allowing historical reads to continue until then. + *

    + * In the case that we cannot reach the metadata information, we will fall back to the default + * policy and kill this server, so we can attempt to replay the edits on restart. + *

    + * @param attempted the mutations that were attempted to be written and the tables to which they + * were written + * @param cause root cause of the failure + */ + @Override + public void handleFailure(Multimap attempted, Exception cause) + throws IOException { + boolean throwing = true; + long timestamp = HConstants.LATEST_TIMESTAMP; + // we should check if failed list of mutation are part of Index Rebuilder or not. + // If its part of Index Rebuilder, we throw exception and do retries. + // If succeeds, we don't update Index State. + // Once those retries are exhausted, we transition Index to DISABLE + // It's being handled as part of PhoenixIndexFailurePolicy.doBatchWithRetries + Mutation checkMutationForRebuilder = attempted.entries().iterator().next().getValue(); + boolean isIndexRebuild = + PhoenixIndexMetaData.isIndexRebuild(checkMutationForRebuilder.getAttributesMap()); + if (isIndexRebuild) { + SQLException sqlException = new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_WRITE_FAILURE) + .setRootCause(cause).setMessage(cause.getLocalizedMessage()).build().buildException(); + IOException ioException = ServerUtil.wrapInDoNotRetryIOException( + "Retrying Index rebuild mutation, we will update Index state to DISABLE " + + "if all retries are exhusated", + sqlException, timestamp); + throw ioException; + } + try { + timestamp = handleFailureWithExceptions(attempted, cause); + throwing = false; + } catch (Throwable t) { + LOGGER.warn("handleFailure failed", t); + super.handleFailure(attempted, cause); + throwing = false; + } finally { + if (!throwing) { + SQLException sqlException = + new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_WRITE_FAILURE).setRootCause(cause) + .setMessage(cause.getLocalizedMessage()).build().buildException(); + IOException ioException = + ServerUtil.wrapInDoNotRetryIOException(null, sqlException, timestamp); + // Here we throw index write failure to client so it can retry index mutation. + if (throwIndexWriteFailure) { + throw ioException; } else { - disableIndexOnFailure = Boolean.parseBoolean(value); + LOGGER.warn("Swallowing index write failure", ioException); } - return disableIndexOnFailure; + } } + } - /** - * Attempt to disable the index table when we can't write to it, preventing future updates until the index is - * brought up to date, but allowing historical reads to continue until then. - *

    - * In the case that we cannot reach the metadata information, we will fall back to the default policy and kill - * this server, so we can attempt to replay the edits on restart. - *

    - * @param attempted the mutations that were attempted to be written and the tables to which they were written - * @param cause root cause of the failure - */ - @Override - public void handleFailure(Multimap attempted, Exception cause) throws IOException { - boolean throwing = true; - long timestamp = HConstants.LATEST_TIMESTAMP; - // we should check if failed list of mutation are part of Index Rebuilder or not. - // If its part of Index Rebuilder, we throw exception and do retries. - // If succeeds, we don't update Index State. - // Once those retries are exhausted, we transition Index to DISABLE - // It's being handled as part of PhoenixIndexFailurePolicy.doBatchWithRetries - Mutation checkMutationForRebuilder = attempted.entries().iterator().next().getValue(); - boolean isIndexRebuild = - PhoenixIndexMetaData.isIndexRebuild(checkMutationForRebuilder.getAttributesMap()); - if (isIndexRebuild) { - SQLException sqlException = - new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_WRITE_FAILURE) - .setRootCause(cause).setMessage(cause.getLocalizedMessage()).build() - .buildException(); - IOException ioException = ServerUtil.wrapInDoNotRetryIOException( - "Retrying Index rebuild mutation, we will update Index state to DISABLE " - + "if all retries are exhusated", sqlException, timestamp); - throw ioException; - } - try { - timestamp = handleFailureWithExceptions(attempted, cause); - throwing = false; - } catch (Throwable t) { - LOGGER.warn("handleFailure failed", t); - super.handleFailure(attempted, cause); - throwing = false; - } finally { - if (!throwing) { - SQLException sqlException = - new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_WRITE_FAILURE) - .setRootCause(cause).setMessage(cause.getLocalizedMessage()).build() - .buildException(); - IOException ioException = ServerUtil.wrapInDoNotRetryIOException(null, sqlException, timestamp); - // Here we throw index write failure to client so it can retry index mutation. - if (throwIndexWriteFailure) { - throw ioException; - } else { - LOGGER.warn("Swallowing index write failure", ioException); - } + private long handleFailureWithExceptions(Multimap attempted, + final Exception cause) throws Throwable { + Set refs = attempted.asMap().keySet(); + final Map indexTableNames = new HashMap(refs.size()); + // start by looking at all the tables to which we attempted to write + long timestamp = 0; + final boolean leaveIndexActive = blockDataTableWritesOnFailure || !disableIndexOnFailure; + // if using TrackingParallelWriter, we know which indexes failed and only disable those + Set failedTables = cause instanceof MultiIndexWriteFailureException + ? new HashSet( + ((MultiIndexWriteFailureException) cause).getFailedTables()) + : Collections. emptySet(); + + for (HTableInterfaceReference ref : refs) { + if (failedTables.size() > 0 && !failedTables.contains(ref)) { + continue; // leave index active if its writes succeeded + } + long minTimeStamp = 0; + + // get the minimum timestamp across all the mutations we attempted on that table + // FIXME: all cell timestamps should be the same + Collection mutations = attempted.get(ref); + if (mutations != null) { + for (Mutation m : mutations) { + for (List kvs : m.getFamilyCellMap().values()) { + for (Cell kv : kvs) { + if ( + minTimeStamp == 0 || (kv.getTimestamp() >= 0 && minTimeStamp > kv.getTimestamp()) + ) { + minTimeStamp = kv.getTimestamp(); + } } + } } + } + timestamp = minTimeStamp; + + // If the data table has local index column families then get local indexes to disable. + if ( + ref.getTableName() + .equals(env.getRegion().getTableDescriptor().getTableName().getNameAsString()) + && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDescriptor()) + ) { + for (String tableName : getLocalIndexNames(ref, mutations)) { + indexTableNames.put(tableName, minTimeStamp); + } + // client disables the index, so we pass the index names in the thrown exception + if (cause instanceof MultiIndexWriteFailureException) { + List failedLocalIndexes = + Lists.newArrayList(Iterables.transform(indexTableNames.entrySet(), + new Function, HTableInterfaceReference>() { + @Override + public HTableInterfaceReference apply(Entry input) { + return new HTableInterfaceReference( + new ImmutableBytesPtr(Bytes.toBytes(input.getKey()))); + } + })); + ((MultiIndexWriteFailureException) cause).setFailedTables(failedLocalIndexes); + } + } else { + indexTableNames.put(ref.getTableName(), minTimeStamp); + } } - private long handleFailureWithExceptions(Multimap attempted, - final Exception cause) throws Throwable { - Set refs = attempted.asMap().keySet(); - final Map indexTableNames = new HashMap(refs.size()); - // start by looking at all the tables to which we attempted to write - long timestamp = 0; - final boolean leaveIndexActive = blockDataTableWritesOnFailure || !disableIndexOnFailure; - // if using TrackingParallelWriter, we know which indexes failed and only disable those - Set failedTables = cause instanceof MultiIndexWriteFailureException - ? new HashSet(((MultiIndexWriteFailureException)cause).getFailedTables()) - : Collections.emptySet(); - - for (HTableInterfaceReference ref : refs) { - if (failedTables.size() > 0 && !failedTables.contains(ref)) { - continue; // leave index active if its writes succeeded - } - long minTimeStamp = 0; + // Nothing to do if we're not disabling the index and not rebuilding on failure + if (!disableIndexOnFailure && !rebuildIndexOnFailure) { + return timestamp; + } - // get the minimum timestamp across all the mutations we attempted on that table - // FIXME: all cell timestamps should be the same - Collection mutations = attempted.get(ref); - if (mutations != null) { - for (Mutation m : mutations) { - for (List kvs : m.getFamilyCellMap().values()) { - for (Cell kv : kvs) { - if (minTimeStamp == 0 || (kv.getTimestamp() >= 0 && minTimeStamp > kv.getTimestamp())) { - minTimeStamp = kv.getTimestamp(); - } - } - } - } + final PIndexState newState = + disableIndexOnFailure ? PIndexState.PENDING_DISABLE : PIndexState.PENDING_ACTIVE; + final long fTimestamp = timestamp; + // for all the index tables that we've found, try to disable them and if that fails, try to + return User.runAsLoginUser(new PrivilegedExceptionAction() { + @Override + public Long run() throws Exception { + for (Map.Entry tableTimeElement : indexTableNames.entrySet()) { + String indexTableName = tableTimeElement.getKey(); + long minTimeStamp = tableTimeElement.getValue(); + // We need a way of differentiating the block writes to data table case from + // the leave index active case. In either case, we need to know the time stamp + // at which writes started failing so we can rebuild from that point. If we + // keep the index active *and* have a positive INDEX_DISABLE_TIMESTAMP_BYTES, + // then writes to the data table will be blocked (this is client side logic + // and we can't change this in a minor release). So we use the sign of the + // time stamp to differentiate. + if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) { + minTimeStamp *= -1; + } + // Disable the index by using the updateIndexState method of MetaDataProtocol end point + // coprocessor. + try (Table systemTable = env.getConnection().getTable(SchemaUtil.getPhysicalTableName( + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()))) { + MetaDataMutationResult result = + IndexUtil.updateIndexState(indexTableName, minTimeStamp, systemTable, newState); + if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) { + LOGGER.info( + "Index " + indexTableName + " has been dropped. Ignore uncommitted mutations"); + continue; } - timestamp = minTimeStamp; - - // If the data table has local index column families then get local indexes to disable. - if (ref.getTableName().equals(env.getRegion().getTableDescriptor().getTableName().getNameAsString()) - && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDescriptor())) { - for (String tableName : getLocalIndexNames(ref, mutations)) { - indexTableNames.put(tableName, minTimeStamp); - } - // client disables the index, so we pass the index names in the thrown exception - if (cause instanceof MultiIndexWriteFailureException) { - List failedLocalIndexes = - Lists.newArrayList(Iterables.transform(indexTableNames.entrySet(), - new Function, HTableInterfaceReference>() { - @Override - public HTableInterfaceReference apply(Entry input) { - return new HTableInterfaceReference(new ImmutableBytesPtr( - Bytes.toBytes(input.getKey()))); - } - })); - ((MultiIndexWriteFailureException) cause).setFailedTables(failedLocalIndexes); + if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { + if (leaveIndexActive) { + LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = " + + result.getMutationCode()); + // If we're not disabling the index, then we don't want to throw as throwing + // will lead to the RS being shutdown. + if (blockDataTableWritesOnFailure) { + throw new DoNotRetryIOException( + "Attempt to update INDEX_DISABLE_TIMESTAMP failed."); } + } else { + LOGGER.warn("Attempt to disable index " + indexTableName + " failed with code = " + + result.getMutationCode() + ". Will use default failure policy instead."); + throw new DoNotRetryIOException( + "Attempt to disable " + indexTableName + " failed."); + } + } + LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName + + " due to an exception while" + " writing updates. indexState=" + newState, cause); + } catch (Throwable t) { + if (t instanceof Exception) { + throw (Exception) t; } else { - indexTableNames.put(ref.getTableName(), minTimeStamp); + throw new Exception(t); } + } } + // Return the cell time stamp (note they should all be the same) + return fTimestamp; + } + }); + } - // Nothing to do if we're not disabling the index and not rebuilding on failure - if (!disableIndexOnFailure && !rebuildIndexOnFailure) { - return timestamp; - } - - final PIndexState newState = disableIndexOnFailure ? PIndexState.PENDING_DISABLE : PIndexState.PENDING_ACTIVE; - final long fTimestamp = timestamp; - // for all the index tables that we've found, try to disable them and if that fails, try to - return User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public Long run() throws Exception { - for (Map.Entry tableTimeElement : indexTableNames.entrySet()) { - String indexTableName = tableTimeElement.getKey(); - long minTimeStamp = tableTimeElement.getValue(); - // We need a way of differentiating the block writes to data table case from - // the leave index active case. In either case, we need to know the time stamp - // at which writes started failing so we can rebuild from that point. If we - // keep the index active *and* have a positive INDEX_DISABLE_TIMESTAMP_BYTES, - // then writes to the data table will be blocked (this is client side logic - // and we can't change this in a minor release). So we use the sign of the - // time stamp to differentiate. - if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) { - minTimeStamp *= -1; - } - // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor. - try (Table systemTable = env.getConnection().getTable(SchemaUtil.getPhysicalTableName( - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()))) { - MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp, - systemTable, newState); - if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) { - LOGGER.info("Index " + indexTableName + - " has been dropped. Ignore uncommitted mutations"); - continue; - } - if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { - if (leaveIndexActive) { - LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " - + " failed with code = " - + result.getMutationCode()); - // If we're not disabling the index, then we don't want to throw as throwing - // will lead to the RS being shutdown. - if (blockDataTableWritesOnFailure) { throw new DoNotRetryIOException( - "Attempt to update INDEX_DISABLE_TIMESTAMP failed."); } - } else { - LOGGER.warn("Attempt to disable index " + indexTableName + - " failed with code = " + result.getMutationCode() + - ". Will use default failure policy instead."); - throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed."); - } - } - LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + - indexTableName + " due to an exception while" + - " writing updates. indexState=" + newState, - cause); - } catch (Throwable t) { - if (t instanceof Exception) { - throw (Exception)t; - } else { - throw new Exception(t); - } - } - } - // Return the cell time stamp (note they should all be the same) - return fTimestamp; - } - }); - } + private Collection getLocalIndexNames(HTableInterfaceReference ref, + Collection mutations) throws IOException { + Set indexTableNames = new HashSet(1); + PhoenixConnection conn = null; + try { + conn = QueryUtil.getConnectionOnServer(this.env.getConfiguration()) + .unwrap(PhoenixConnection.class); + PTable dataTable = conn.getTableNoCache(ref.getTableName()); + List indexes = dataTable.getIndexes(); + // local index used to get view id from index mutation row key. + PTable localIndex = null; + Map localIndexNames = + new HashMap(); + for (PTable index : indexes) { + if (localIndex == null) localIndex = index; + localIndexNames.put( + new ImmutableBytesWritable(index.getviewIndexIdType().toBytes(index.getViewIndexId())), + index.getName().getString()); + } + if (localIndex == null) { + return Collections.emptySet(); + } - private Collection getLocalIndexNames(HTableInterfaceReference ref, - Collection mutations) throws IOException { - Set indexTableNames = new HashSet(1); - PhoenixConnection conn = null; + IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable, conn); + RegionInfo regionInfo = this.env.getRegion().getRegionInfo(); + int offset = regionInfo.getStartKey().length == 0 + ? regionInfo.getEndKey().length + : regionInfo.getStartKey().length; + byte[] viewId = null; + for (Mutation mutation : mutations) { + viewId = indexMaintainer.getViewIndexIdFromIndexRowKey( + new ImmutableBytesWritable(mutation.getRow(), offset, mutation.getRow().length - offset)); + String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId)); + if (indexTableName == null) { + LOGGER.error("Unable to find local index on " + ref.getTableName() + " with viewID of " + + Bytes.toStringBinary(viewId)); + } else { + indexTableNames.add(indexTableName); + } + } + } catch (SQLException e) { + throw new IOException(e); + } finally { + if (conn != null) { try { - conn = QueryUtil.getConnectionOnServer(this.env.getConfiguration()).unwrap( - PhoenixConnection.class); - PTable dataTable = conn.getTableNoCache(ref.getTableName()); - List indexes = dataTable.getIndexes(); - // local index used to get view id from index mutation row key. - PTable localIndex = null; - Map localIndexNames = - new HashMap(); - for (PTable index : indexes) { - if (localIndex == null) localIndex = index; - localIndexNames.put(new ImmutableBytesWritable(index.getviewIndexIdType().toBytes( - index.getViewIndexId())), index.getName().getString()); - } - if (localIndex == null) { - return Collections.emptySet(); - } - - IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable, conn); - RegionInfo regionInfo = this.env.getRegion().getRegionInfo(); - int offset = - regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length - : regionInfo.getStartKey().length; - byte[] viewId = null; - for (Mutation mutation : mutations) { - viewId = - indexMaintainer.getViewIndexIdFromIndexRowKey( - new ImmutableBytesWritable(mutation.getRow(), offset, - mutation.getRow().length - offset)); - String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId)); - if (indexTableName == null) { - LOGGER.error("Unable to find local index on " + ref.getTableName() + - " with viewID of " + Bytes.toStringBinary(viewId)); - } else { - indexTableNames.add(indexTableName); - } - } + conn.close(); } catch (SQLException e) { - throw new IOException(e); - } finally { - if (conn != null) { - try { - conn.close(); - } catch (SQLException e) { - throw new IOException(e); - } - } + throw new IOException(e); } - return indexTableNames; + } } -} \ No newline at end of file + return indexTableNames; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaDataBuilder.java b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaDataBuilder.java index bdb03883444..795dca2749b 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaDataBuilder.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaDataBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,67 +41,81 @@ import org.apache.phoenix.util.ScanUtil; public class PhoenixIndexMetaDataBuilder { - private final RegionCoprocessorEnvironment env; - - PhoenixIndexMetaDataBuilder(RegionCoprocessorEnvironment env) { - this.env = env; + private final RegionCoprocessorEnvironment env; + + PhoenixIndexMetaDataBuilder(RegionCoprocessorEnvironment env) { + this.env = env; + } + + public PhoenixIndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) + throws IOException { + IndexMetaDataCache indexMetaDataCache = + getIndexMetaDataCache(env, miniBatchOp.getOperation(0).getAttributesMap()); + return new PhoenixIndexMetaData(indexMetaDataCache, + miniBatchOp.getOperation(0).getAttributesMap()); + } + + private static IndexMetaDataCache getIndexMetaDataCache(RegionCoprocessorEnvironment env, + Map attributes) throws IOException { + if (attributes == null) { + return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE; } - - public PhoenixIndexMetaData getIndexMetaData(MiniBatchOperationInProgress miniBatchOp) throws IOException { - IndexMetaDataCache indexMetaDataCache = getIndexMetaDataCache(env, miniBatchOp.getOperation(0).getAttributesMap()); - return new PhoenixIndexMetaData(indexMetaDataCache, miniBatchOp.getOperation(0).getAttributesMap()); + byte[] uuid = attributes.get(PhoenixIndexCodec.INDEX_UUID); + if (uuid == null) { + return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE; } + byte[] md = attributes.get(PhoenixIndexCodec.INDEX_PROTO_MD); + if (md == null) { + md = attributes.get(PhoenixIndexCodec.INDEX_MD); + } + if (md != null) { + boolean useProto = md != null; + byte[] txState = attributes.get(BaseScannerRegionObserverConstants.TX_STATE); + final List indexMaintainers = IndexMaintainer.deserialize(md, useProto); + byte[] clientVersionBytes = attributes.get(BaseScannerRegionObserverConstants.CLIENT_VERSION); + final int clientVersion = clientVersionBytes == null + ? ScanUtil.UNKNOWN_CLIENT_VERSION + : Bytes.toInt(clientVersionBytes); + final PhoenixTransactionContext txnContext = + TransactionFactory.getTransactionContext(txState, clientVersion); + return new IndexMetaDataCache() { - private static IndexMetaDataCache getIndexMetaDataCache(RegionCoprocessorEnvironment env, Map attributes) throws IOException { - if (attributes == null) { return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE; } - byte[] uuid = attributes.get(PhoenixIndexCodec.INDEX_UUID); - if (uuid == null) { return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE; } - byte[] md = attributes.get(PhoenixIndexCodec.INDEX_PROTO_MD); - if (md == null) { - md = attributes.get(PhoenixIndexCodec.INDEX_MD); + @Override + public void close() throws IOException { } - if (md != null) { - boolean useProto = md != null; - byte[] txState = attributes.get(BaseScannerRegionObserverConstants.TX_STATE); - final List indexMaintainers = IndexMaintainer.deserialize(md, useProto); - byte[] clientVersionBytes = attributes.get(BaseScannerRegionObserverConstants.CLIENT_VERSION); - final int clientVersion = clientVersionBytes == null ? ScanUtil.UNKNOWN_CLIENT_VERSION : Bytes.toInt(clientVersionBytes); - final PhoenixTransactionContext txnContext = TransactionFactory.getTransactionContext(txState, clientVersion); - return new IndexMetaDataCache() { - - @Override - public void close() throws IOException {} - @Override - public List getIndexMaintainers() { - return indexMaintainers; - } - - @Override - public PhoenixTransactionContext getTransactionContext() { - return txnContext; - } + @Override + public List getIndexMaintainers() { + return indexMaintainers; + } - @Override - public int getClientVersion() { - return clientVersion; - } + @Override + public PhoenixTransactionContext getTransactionContext() { + return txnContext; + } - }; - } else { - byte[] tenantIdBytes = attributes.get(PhoenixRuntime.TENANT_ID_ATTRIB); - ImmutableBytesPtr tenantId = tenantIdBytes == null ? null : new ImmutableBytesPtr(tenantIdBytes); - TenantCache cache = GlobalCache.getTenantCache(env, tenantId); - IndexMetaDataCache indexCache = (IndexMetaDataCache)cache.getServerCache(new ImmutableBytesPtr(uuid)); - if (indexCache == null) { - String msg = "key=" + ServerCacheClient.idToString(uuid) + " region=" + env.getRegion() + "host=" - + env.getServerName().getServerName(); - SQLException e = new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_METADATA_NOT_FOUND).setMessage(msg) - .build().buildException(); - ClientUtil.throwIOException("Index update failed", e); // will not return - } - return indexCache; + @Override + public int getClientVersion() { + return clientVersion; } + }; + } else { + byte[] tenantIdBytes = attributes.get(PhoenixRuntime.TENANT_ID_ATTRIB); + ImmutableBytesPtr tenantId = + tenantIdBytes == null ? null : new ImmutableBytesPtr(tenantIdBytes); + TenantCache cache = GlobalCache.getTenantCache(env, tenantId); + IndexMetaDataCache indexCache = + (IndexMetaDataCache) cache.getServerCache(new ImmutableBytesPtr(uuid)); + if (indexCache == null) { + String msg = "key=" + ServerCacheClient.idToString(uuid) + " region=" + env.getRegion() + + "host=" + env.getServerName().getServerName(); + SQLException e = new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_METADATA_NOT_FOUND) + .setMessage(msg).build().buildException(); + ClientUtil.throwIOException("Index update failed", e); // will not return + } + return indexCache; } + + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java index 74d19198f55..a4506ee3c9f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -54,188 +54,198 @@ import org.slf4j.LoggerFactory; /** - * Do all the work of managing local index updates for a transactional table from a single coprocessor. Since the transaction - * manager essentially time orders writes through conflict detection, the logic to maintain a secondary index is quite a - * bit simpler than the non transactional case. For example, there's no need to muck with the WAL, as failure scenarios - * are handled by aborting the transaction. + * Do all the work of managing local index updates for a transactional table from a single + * coprocessor. Since the transaction manager essentially time orders writes through conflict + * detection, the logic to maintain a secondary index is quite a bit simpler than the non + * transactional case. For example, there's no need to muck with the WAL, as failure scenarios are + * handled by aborting the transaction. */ public class PhoenixTransactionalIndexer implements RegionObserver, RegionCoprocessor { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTransactionalIndexer.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTransactionalIndexer.class); - // Hack to get around not being able to save any state between - // coprocessor calls. TODO: remove after HBASE-18127 when available - private static class BatchMutateContext { - public Collection> indexUpdates = Collections.emptyList(); - public final int clientVersion; + // Hack to get around not being able to save any state between + // coprocessor calls. TODO: remove after HBASE-18127 when available + private static class BatchMutateContext { + public Collection> indexUpdates = Collections.emptyList(); + public final int clientVersion; - public BatchMutateContext(int clientVersion) { - this.clientVersion = clientVersion; - } - } - - private ThreadLocal batchMutateContext = - new ThreadLocal(); - - private PhoenixIndexCodec codec; - private IndexWriter writer; - private boolean stopped; - - @Override - public Optional getRegionObserver() { - return Optional.of(this); + public BatchMutateContext(int clientVersion) { + this.clientVersion = clientVersion; } - - @Override - public void start(CoprocessorEnvironment e) throws IOException { - final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment)e; - String serverName = env.getServerName().getServerName(); - codec = new PhoenixIndexCodec(env.getConfiguration(), env.getRegionInfo().getTable().getName()); - DelegateRegionCoprocessorEnvironment indexWriterEnv = new DelegateRegionCoprocessorEnvironment(env, ConnectionType.INDEX_WRITER_CONNECTION); - // setup the actual index writer - // For transactional tables, we keep the index active upon a write failure - // since we have the all versus none behavior for transactions. Also, we - // fail on any write exception since this will end up failing the transaction. - this.writer = new IndexWriter(IndexWriter.getCommitter(indexWriterEnv, ParallelWriterIndexCommitter.class), - new LeaveIndexActiveFailurePolicy(), indexWriterEnv, serverName + "-tx-index-writer"); + } + + private ThreadLocal batchMutateContext = + new ThreadLocal(); + + private PhoenixIndexCodec codec; + private IndexWriter writer; + private boolean stopped; + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public void start(CoprocessorEnvironment e) throws IOException { + final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e; + String serverName = env.getServerName().getServerName(); + codec = new PhoenixIndexCodec(env.getConfiguration(), env.getRegionInfo().getTable().getName()); + DelegateRegionCoprocessorEnvironment indexWriterEnv = + new DelegateRegionCoprocessorEnvironment(env, ConnectionType.INDEX_WRITER_CONNECTION); + // setup the actual index writer + // For transactional tables, we keep the index active upon a write failure + // since we have the all versus none behavior for transactions. Also, we + // fail on any write exception since this will end up failing the transaction. + this.writer = + new IndexWriter(IndexWriter.getCommitter(indexWriterEnv, ParallelWriterIndexCommitter.class), + new LeaveIndexActiveFailurePolicy(), indexWriterEnv, serverName + "-tx-index-writer"); + } + + @Override + public void stop(CoprocessorEnvironment e) throws IOException { + if (this.stopped) { + return; } - - @Override - public void stop(CoprocessorEnvironment e) throws IOException { - if (this.stopped) { return; } - this.stopped = true; - String msg = "TxIndexer is being stopped"; - this.writer.stop(msg); + this.stopped = true; + String msg = "TxIndexer is being stopped"; + this.writer.stop(msg); + } + + private static Iterator + getMutationIterator(final MiniBatchOperationInProgress miniBatchOp) { + return new Iterator() { + private int i = 0; + + @Override + public boolean hasNext() { + return i < miniBatchOp.size(); + } + + @Override + public Mutation next() { + return miniBatchOp.getOperation(i++); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } + + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + + Mutation m = miniBatchOp.getOperation(0); + if (!codec.isEnabled(m)) { + return; } - private static Iterator getMutationIterator(final MiniBatchOperationInProgress miniBatchOp) { - return new Iterator() { - private int i = 0; - - @Override - public boolean hasNext() { - return i < miniBatchOp.size(); - } - - @Override - public Mutation next() { - return miniBatchOp.getOperation(i++); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - }; + PhoenixIndexMetaData indexMetaData = + new PhoenixIndexMetaDataBuilder(c.getEnvironment()).getIndexMetaData(miniBatchOp); + if ( + indexMetaData.getClientVersion() >= MetaDataProtocol.MIN_TX_CLIENT_SIDE_MAINTENANCE + && !indexMetaData.hasLocalIndexes() + ) { // Still generate index updates server side for local indexes + return; } - - @Override - public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException { - - Mutation m = miniBatchOp.getOperation(0); - if (!codec.isEnabled(m)) { - return; - } - - PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaDataBuilder(c.getEnvironment()).getIndexMetaData(miniBatchOp); - if ( indexMetaData.getClientVersion() >= MetaDataProtocol.MIN_TX_CLIENT_SIDE_MAINTENANCE - && !indexMetaData.hasLocalIndexes()) { // Still generate index updates server side for local indexes - return; - } - BatchMutateContext context = new BatchMutateContext(indexMetaData.getClientVersion()); - setBatchMutateContext(c, context); - - Collection> indexUpdates = null; - // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = Trace.startSpan("Starting to build index updates")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; - } - - RegionCoprocessorEnvironment env = c.getEnvironment(); - PhoenixTransactionContext txnContext = indexMetaData.getTransactionContext(); - if (txnContext == null) { - throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString()); - } - PhoenixTxIndexMutationGenerator generator = new PhoenixTxIndexMutationGenerator(env.getConfiguration(), indexMetaData, - env.getRegionInfo().getTable().getName(), - env.getRegionInfo().getStartKey(), - env.getRegionInfo().getEndKey()); - try (Table htable = env.getConnection().getTable(env.getRegionInfo().getTable())) { - // get the index updates for all elements in this batch - indexUpdates = generator.getIndexUpdates(htable, getMutationIterator(miniBatchOp)); - } - byte[] tableName = c.getEnvironment().getRegionInfo().getTable().getName(); - Iterator> indexUpdatesItr = indexUpdates.iterator(); - List localUpdates = new ArrayList(indexUpdates.size()); - while(indexUpdatesItr.hasNext()) { - Pair next = indexUpdatesItr.next(); - if (Bytes.compareTo(next.getSecond(), tableName) == 0) { - // These mutations will not go through the preDelete hooks, so we - // must manually convert them here. - Mutation mutation = TransactionUtil.convertIfDelete(next.getFirst()); - localUpdates.add(mutation); - indexUpdatesItr.remove(); - } - } - if (!localUpdates.isEmpty()) { - miniBatchOp.addOperationsFromCP(0, - localUpdates.toArray(new Mutation[localUpdates.size()])); - } - if (!indexUpdates.isEmpty()) { - context.indexUpdates = indexUpdates; - } - - current.addTimelineAnnotation("Built index updates, doing preStep"); - TracingUtils.addAnnotation(current, "index update count", context.indexUpdates.size()); - } catch (Throwable t) { - String msg = "Failed to update index with entries:" + indexUpdates; - LOGGER.error(msg, t); - ClientUtil.throwIOException(msg, t); + BatchMutateContext context = new BatchMutateContext(indexMetaData.getClientVersion()); + setBatchMutateContext(c, context); + + Collection> indexUpdates = null; + // get the current span, or just use a null-span to avoid a bunch of if statements + try (TraceScope scope = Trace.startSpan("Starting to build index updates")) { + Span current = scope.getSpan(); + if (current == null) { + current = NullSpan.INSTANCE; + } + + RegionCoprocessorEnvironment env = c.getEnvironment(); + PhoenixTransactionContext txnContext = indexMetaData.getTransactionContext(); + if (txnContext == null) { + throw new NullPointerException("Expected to find transaction in metadata for " + + env.getRegionInfo().getTable().getNameAsString()); + } + PhoenixTxIndexMutationGenerator generator = new PhoenixTxIndexMutationGenerator( + env.getConfiguration(), indexMetaData, env.getRegionInfo().getTable().getName(), + env.getRegionInfo().getStartKey(), env.getRegionInfo().getEndKey()); + try (Table htable = env.getConnection().getTable(env.getRegionInfo().getTable())) { + // get the index updates for all elements in this batch + indexUpdates = generator.getIndexUpdates(htable, getMutationIterator(miniBatchOp)); + } + byte[] tableName = c.getEnvironment().getRegionInfo().getTable().getName(); + Iterator> indexUpdatesItr = indexUpdates.iterator(); + List localUpdates = new ArrayList(indexUpdates.size()); + while (indexUpdatesItr.hasNext()) { + Pair next = indexUpdatesItr.next(); + if (Bytes.compareTo(next.getSecond(), tableName) == 0) { + // These mutations will not go through the preDelete hooks, so we + // must manually convert them here. + Mutation mutation = TransactionUtil.convertIfDelete(next.getFirst()); + localUpdates.add(mutation); + indexUpdatesItr.remove(); } + } + if (!localUpdates.isEmpty()) { + miniBatchOp.addOperationsFromCP(0, localUpdates.toArray(new Mutation[localUpdates.size()])); + } + if (!indexUpdates.isEmpty()) { + context.indexUpdates = indexUpdates; + } + + current.addTimelineAnnotation("Built index updates, doing preStep"); + TracingUtils.addAnnotation(current, "index update count", context.indexUpdates.size()); + } catch (Throwable t) { + String msg = "Failed to update index with entries:" + indexUpdates; + LOGGER.error(msg, t); + ClientUtil.throwIOException(msg, t); } - - @Override - public void postBatchMutateIndispensably(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException { - BatchMutateContext context = getBatchMutateContext(c); - if (context == null || context.indexUpdates == null) { - return; + } + + @Override + public void postBatchMutateIndispensably(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException { + BatchMutateContext context = getBatchMutateContext(c); + if (context == null || context.indexUpdates == null) { + return; + } + // get the current span, or just use a null-span to avoid a bunch of if statements + try (TraceScope scope = Trace.startSpan("Starting to write index updates")) { + Span current = scope.getSpan(); + if (current == null) { + current = NullSpan.INSTANCE; + } + + if (success) { // if miniBatchOp was successfully written, write index updates + if (!context.indexUpdates.isEmpty()) { + this.writer.write(context.indexUpdates, false, context.clientVersion); } - // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = Trace.startSpan("Starting to write index updates")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; - } - - if (success) { // if miniBatchOp was successfully written, write index updates - if (!context.indexUpdates.isEmpty()) { - this.writer.write(context.indexUpdates, false, context.clientVersion); - } - current.addTimelineAnnotation("Wrote index updates"); - } - } catch (Throwable t) { - String msg = "Failed to write index updates:" + context.indexUpdates; - LOGGER.error(msg, t); - ClientUtil.throwIOException(msg, t); - } finally { - removeBatchMutateContext(c); - } + current.addTimelineAnnotation("Wrote index updates"); + } + } catch (Throwable t) { + String msg = "Failed to write index updates:" + context.indexUpdates; + LOGGER.error(msg, t); + ClientUtil.throwIOException(msg, t); + } finally { + removeBatchMutateContext(c); } + } - private void setBatchMutateContext(ObserverContext c, BatchMutateContext context) { - this.batchMutateContext.set(context); - } - - private BatchMutateContext getBatchMutateContext(ObserverContext c) { - return this.batchMutateContext.get(); - } - - private void removeBatchMutateContext(ObserverContext c) { - this.batchMutateContext.remove(); - } + private void setBatchMutateContext(ObserverContext c, + BatchMutateContext context) { + this.batchMutateContext.set(context); + } + + private BatchMutateContext + getBatchMutateContext(ObserverContext c) { + return this.batchMutateContext.get(); + } + + private void removeBatchMutateContext(ObserverContext c) { + this.batchMutateContext.remove(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java index bed1ead96da..7ced35bda6b 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,10 +20,6 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -39,109 +35,114 @@ import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** - * Scan grouper that creates a scan group if a plan is row key ordered or if a - * scan crosses region boundaries + * Scan grouper that creates a scan group if a plan is row key ordered or if a scan crosses region + * boundaries */ public class MapReduceParallelScanGrouper implements ParallelScanGrouper { - private static final MapReduceParallelScanGrouper INSTANCE = new MapReduceParallelScanGrouper(); - - public static MapReduceParallelScanGrouper getInstance() { - return INSTANCE; - } - - @VisibleForTesting - MapReduceParallelScanGrouper() {} - - @Override - public boolean shouldStartNewScan(QueryPlan plan, Scan lastScan, - byte[] startKey, boolean crossesRegionBoundary) { - return (!plan.isRowKeyOrdered() || crossesRegionBoundary) && lastScan != null; - } - - @Override - public List getRegionBoundaries(StatementContext context, byte[] tableName) throws SQLException { - String snapshotName; - Configuration conf = context.getConnection().getQueryServices().getConfiguration(); - if ((snapshotName = getSnapshotName(conf)) != null) { - return getRegionLocationsFromSnapshot(conf, snapshotName); - } else { - return context.getConnection().getQueryServices().getAllTableRegions(tableName, - context.getStatement().getQueryTimeoutInMillis()); - } - } - - /** - * {@inheritDoc}. - */ - @Override - public List getRegionBoundaries(StatementContext context, byte[] tableName, - byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) throws SQLException { - String snapshotName; - Configuration conf = context.getConnection().getQueryServices().getConfiguration(); - if ((snapshotName = getSnapshotName(conf)) != null) { - return getRegionLocationsFromSnapshot(conf, snapshotName); - } else { - return context.getConnection().getQueryServices() - .getTableRegions(tableName, startRegionBoundaryKey, stopRegionBoundaryKey, - context.getStatement().getQueryTimeoutInMillis()); - } - } - - private List getRegionLocationsFromSnapshot(Configuration conf, - String snapshotName) { - try { - Path rootDir = new Path(conf.get(HConstants.HBASE_DIR)); - FileSystem fs = rootDir.getFileSystem(conf); - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); - SnapshotDescription snapshotDescription = - SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); - SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDescription); - return getRegionLocationsFromManifest(manifest); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - /** - * Get list of region locations from SnapshotManifest - * BaseResultIterators assume that regions are sorted using RegionInfo.COMPARATOR - */ - private List getRegionLocationsFromManifest(SnapshotManifest manifest) { - List regionManifests = manifest.getRegionManifests(); - Preconditions.checkNotNull(regionManifests); - - List regionInfos = Lists.newArrayListWithCapacity(regionManifests.size()); - List hRegionLocations = Lists.newArrayListWithCapacity(regionManifests.size()); - - for (SnapshotRegionManifest regionManifest : regionManifests) { - RegionInfo regionInfo = ProtobufUtil.toRegionInfo(regionManifest.getRegionInfo()); - if (isValidRegion(regionInfo)) { - regionInfos.add(regionInfo); - } - } - - regionInfos.sort(RegionInfo.COMPARATOR); - - for (RegionInfo regionInfo : regionInfos) { - hRegionLocations.add(new HRegionLocation(regionInfo, null)); - } - - return hRegionLocations; - } - - // Exclude offline split parent regions - private boolean isValidRegion(RegionInfo hri) { - if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) { - return false; - } - return true; - } - - private String getSnapshotName(Configuration conf) { - return conf.get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); - } + private static final MapReduceParallelScanGrouper INSTANCE = new MapReduceParallelScanGrouper(); + + public static MapReduceParallelScanGrouper getInstance() { + return INSTANCE; + } + + @VisibleForTesting + MapReduceParallelScanGrouper() { + } + + @Override + public boolean shouldStartNewScan(QueryPlan plan, Scan lastScan, byte[] startKey, + boolean crossesRegionBoundary) { + return (!plan.isRowKeyOrdered() || crossesRegionBoundary) && lastScan != null; + } + + @Override + public List getRegionBoundaries(StatementContext context, byte[] tableName) + throws SQLException { + String snapshotName; + Configuration conf = context.getConnection().getQueryServices().getConfiguration(); + if ((snapshotName = getSnapshotName(conf)) != null) { + return getRegionLocationsFromSnapshot(conf, snapshotName); + } else { + return context.getConnection().getQueryServices().getAllTableRegions(tableName, + context.getStatement().getQueryTimeoutInMillis()); + } + } + + /** + * {@inheritDoc}. + */ + @Override + public List getRegionBoundaries(StatementContext context, byte[] tableName, + byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) throws SQLException { + String snapshotName; + Configuration conf = context.getConnection().getQueryServices().getConfiguration(); + if ((snapshotName = getSnapshotName(conf)) != null) { + return getRegionLocationsFromSnapshot(conf, snapshotName); + } else { + return context.getConnection().getQueryServices().getTableRegions(tableName, + startRegionBoundaryKey, stopRegionBoundaryKey, + context.getStatement().getQueryTimeoutInMillis()); + } + } + + private List getRegionLocationsFromSnapshot(Configuration conf, + String snapshotName) { + try { + Path rootDir = new Path(conf.get(HConstants.HBASE_DIR)); + FileSystem fs = rootDir.getFileSystem(conf); + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); + SnapshotDescription snapshotDescription = + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDescription); + return getRegionLocationsFromManifest(manifest); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Get list of region locations from SnapshotManifest BaseResultIterators assume that regions are + * sorted using RegionInfo.COMPARATOR + */ + private List getRegionLocationsFromManifest(SnapshotManifest manifest) { + List regionManifests = manifest.getRegionManifests(); + Preconditions.checkNotNull(regionManifests); + + List regionInfos = Lists.newArrayListWithCapacity(regionManifests.size()); + List hRegionLocations = Lists.newArrayListWithCapacity(regionManifests.size()); + + for (SnapshotRegionManifest regionManifest : regionManifests) { + RegionInfo regionInfo = ProtobufUtil.toRegionInfo(regionManifest.getRegionInfo()); + if (isValidRegion(regionInfo)) { + regionInfos.add(regionInfo); + } + } + + regionInfos.sort(RegionInfo.COMPARATOR); + + for (RegionInfo regionInfo : regionInfos) { + hRegionLocations.add(new HRegionLocation(regionInfo, null)); + } + + return hRegionLocations; + } + + // Exclude offline split parent regions + private boolean isValidRegion(RegionInfo hri) { + if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) { + return false; + } + return true; + } + + private String getSnapshotName(Configuration conf) { + return conf.get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java index c7dd29fc9d2..8523ab8a713 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/NonAggregateRegionScannerFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.iterate; import static org.apache.phoenix.util.EncodedColumnsUtil.getMinMaxQualifiersFromScan; @@ -78,6 +77,9 @@ import org.apache.phoenix.schema.tuple.ResultTuple; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.transaction.PhoenixTransactionContext; import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.util.ByteUtil; @@ -85,591 +87,542 @@ import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.ScanUtil; - -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class NonAggregateRegionScannerFactory extends RegionScannerFactory { - private static final Logger LOGGER = - LoggerFactory.getLogger(NonAggregateRegionScannerFactory.class); + private static final Logger LOGGER = + LoggerFactory.getLogger(NonAggregateRegionScannerFactory.class); + + public NonAggregateRegionScannerFactory(RegionCoprocessorEnvironment env) { + this.env = env; + } + + @Override + public RegionScanner getRegionScanner(final Scan scan, final RegionScanner s) throws Throwable { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + int offset = 0; + if (ScanUtil.isLocalIndex(scan)) { + /* + * For local indexes, we need to set an offset on row key expressions to skip the region start + * key. + */ + Region region = getRegion(); + offset = region.getRegionInfo().getStartKey().length != 0 + ? region.getRegionInfo().getStartKey().length + : region.getRegionInfo().getEndKey().length; + ScanUtil.setRowKeyOffset(scan, offset); + } + byte[] scanOffsetBytes = scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_OFFSET); + Integer scanOffset = null; + if (scanOffsetBytes != null) { + scanOffset = (Integer) PInteger.INSTANCE.toObject(scanOffsetBytes); + } + RegionScanner innerScanner = s; + PTable.QualifierEncodingScheme encodingScheme = + EncodedColumnsUtil.getQualifierEncodingScheme(scan); + boolean useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan); + + Set serverParsedKVRefs = Sets.newHashSet(); + KeyValueSchema kvSchema = null; + ValueBitSet kvSchemaBitSet = null; + List resultList = getServerParsedExpressions(scan, serverParsedKVRefs); + Expression[] serverParsedFuncRefs = resultList.toArray(new Expression[0]); + if (serverParsedFuncRefs != null && serverParsedFuncRefs.length > 0) { + KeyValueSchema.KeyValueSchemaBuilder builder = new KeyValueSchema.KeyValueSchemaBuilder(0); + for (Expression expression : serverParsedFuncRefs) { + builder.addField(expression); + } + kvSchema = builder.build(); + kvSchemaBitSet = ValueBitSet.newInstance(kvSchema); + } + TupleProjector tupleProjector = null; + Region dataRegion = null; + IndexMaintainer indexMaintainer = null; + byte[][] viewConstants = null; + PhoenixTransactionContext tx = null; + ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); + if (dataColumns != null || ScanUtil.isUncoveredGlobalIndex(scan)) { + if (dataColumns != null) { + tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns); + } + dataRegion = env.getRegion(); + int clientVersion = ScanUtil.getClientVersion(scan); + List indexMaintainers = IndexUtil.deSerializeIndexMaintainersFromScan(scan); + indexMaintainer = indexMaintainers.get(0); + viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); + byte[] txState = scan.getAttribute(BaseScannerRegionObserverConstants.TX_STATE); + tx = TransactionFactory.getTransactionContext(txState, clientVersion); + } - public NonAggregateRegionScannerFactory(RegionCoprocessorEnvironment env) { - this.env = env; + final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan); + final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); + boolean useQualifierAsIndex = + EncodedColumnsUtil.useQualifierAsIndex(getMinMaxQualifiersFromScan(scan)) + && scan.getAttribute(BaseScannerRegionObserverConstants.TOPN) != null; + // setting dataRegion in case of a non-coprocessor environment + if ( + dataRegion == null + && env.getConfiguration().get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY) != null + ) { + dataRegion = env.getRegion(); } + innerScanner = getWrappedScanner(env, innerScanner, serverParsedKVRefs, serverParsedFuncRefs, + offset, scan, dataColumns, tupleProjector, dataRegion, indexMaintainer, tx, viewConstants, + kvSchema, kvSchemaBitSet, j == null ? p : null, ptr, useQualifierAsIndex); + + final ImmutableBytesPtr tenantId = ScanUtil.getTenantId(scan); + if (j != null) { + innerScanner = new HashJoinRegionScanner(env, innerScanner, scan, serverParsedKVRefs, + serverParsedFuncRefs, p, j, tenantId, useQualifierAsIndex, useNewValueColumnQualifier); + } + if (scanOffset != null) { + final boolean isIncompatibleClient = + ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); + RegionScannerResultIterator iterator = new RegionScannerResultIterator(scan, innerScanner, + getMinMaxQualifiersFromScan(scan), encodingScheme); + ScannerContext sc = iterator.getRegionScannerContext(); + innerScanner = getOffsetScanner(innerScanner, + new OffsetResultIterator(iterator, scanOffset, getPageSizeMsForRegionScanner(scan), + isIncompatibleClient), + scan.getAttribute(QueryConstants.LAST_SCAN) != null, isIncompatibleClient, scan, sc); + } + boolean spoolingEnabled = + env.getConfiguration().getBoolean(QueryServices.SERVER_ORDERBY_SPOOLING_ENABLED_ATTRIB, + QueryServicesOptions.DEFAULT_SERVER_ORDERBY_SPOOLING_ENABLED); + long thresholdBytes = + env.getConfiguration().getLongBytes(QueryServices.SERVER_SPOOL_THRESHOLD_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_SERVER_SPOOL_THRESHOLD_BYTES); + OrderedResultIteratorWithScannerContext ic = + deserializeFromScan(scan, innerScanner, spoolingEnabled, thresholdBytes); + final OrderedResultIterator iterator = ic.getIterator(); + if (iterator == null) { + return innerScanner; + } + // TODO:the above wrapped scanner should be used here also + return getTopNScanner(env, innerScanner, iterator, tenantId, ic.getScannerContext()); + } + + private List getServerParsedExpressions(Scan scan, + Set serverParsedKVRefs) { + Expression[] serverParsedArrayFuncRefs = null; + if (scan.getAttribute(BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX) != null) { + serverParsedArrayFuncRefs = deserializeServerParsedPositionalExpressionInfoFromScan(scan, + BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX, serverParsedKVRefs); + } + List resultList = new ArrayList<>(); + if (serverParsedArrayFuncRefs != null) { + Collections.addAll(resultList, serverParsedArrayFuncRefs); + } + deserializeAndAddComplexDataTypeFunctions(scan, + BaseScannerRegionObserverConstants.JSON_VALUE_FUNCTION, serverParsedKVRefs, resultList); + deserializeAndAddComplexDataTypeFunctions(scan, + BaseScannerRegionObserverConstants.BSON_VALUE_FUNCTION, serverParsedKVRefs, resultList); + Expression[] serverParsedJsonQueryFuncRefs = null; + if (scan.getAttribute(BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION) != null) { + serverParsedJsonQueryFuncRefs = deserializeServerParsedPositionalExpressionInfoFromScan(scan, + BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION, serverParsedKVRefs); + } + if (serverParsedJsonQueryFuncRefs != null) { + Collections.addAll(resultList, serverParsedJsonQueryFuncRefs); + } + return resultList; + } + + private void deserializeAndAddComplexDataTypeFunctions(Scan scan, String functionName, + Set serverParsedKVRefs, List resultList) { + if (scan.getAttribute(functionName) != null) { + Expression[] serverParsedJsonValueFuncRefs = + deserializeServerParsedPositionalExpressionInfoFromScan(scan, functionName, + serverParsedKVRefs); + if (serverParsedJsonValueFuncRefs != null) { + Collections.addAll(resultList, serverParsedJsonValueFuncRefs); + } + } + } + + @VisibleForTesting + static OrderedResultIteratorWithScannerContext deserializeFromScan(Scan scan, RegionScanner s, + boolean spoolingEnabled, long thresholdBytes) { + byte[] topN = scan.getAttribute(BaseScannerRegionObserverConstants.TOPN); + if (topN == null) { + return new OrderedResultIteratorWithScannerContext(null, null); + } + int clientVersion = ScanUtil.getClientVersion(scan); + // Client including and after 4.15 and 5.1 are not going to serialize thresholdBytes + // so we need to decode this only for older clients to not break wire compat + boolean shouldDecodeSpoolThreshold = + (scan.getAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION) == null) + || (VersionUtil.decodeMajorVersion(clientVersion) > 5) + || (VersionUtil.decodeMajorVersion(clientVersion) == 5 + && clientVersion < MetaDataProtocol.MIN_5_x_DISABLE_SERVER_SPOOL_THRESHOLD) + || (VersionUtil.decodeMajorVersion(clientVersion) == 4 + && clientVersion < MetaDataProtocol.MIN_4_x_DISABLE_SERVER_SPOOL_THRESHOLD); + ByteArrayInputStream stream = new ByteArrayInputStream(topN); // TODO: size? + try { + DataInputStream input = new DataInputStream(stream); + if (shouldDecodeSpoolThreshold) { + // Read off the scan but ignore, we won't honor client sent thresholdbytes, but the + // one set on server + WritableUtils.readVInt(input); + } + int limit = WritableUtils.readVInt(input); + int estimatedRowSize = WritableUtils.readVInt(input); + int size = WritableUtils.readVInt(input); + List orderByExpressions = Lists.newArrayListWithExpectedSize(size); + for (int i = 0; i < size; i++) { + OrderByExpression orderByExpression = new OrderByExpression(); + orderByExpression.readFields(input); + orderByExpressions.add(orderByExpression); + } + PTable.QualifierEncodingScheme encodingScheme = + EncodedColumnsUtil.getQualifierEncodingScheme(scan); + RegionScannerResultIterator inner = new RegionScannerResultIterator(scan, s, + EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan), encodingScheme); + OrderedResultIterator iterator = new OrderedResultIterator(inner, orderByExpressions, + spoolingEnabled, thresholdBytes, limit >= 0 ? limit : null, null, estimatedRowSize, + getPageSizeMsForRegionScanner(scan), scan, s.getRegionInfo()); + return new OrderedResultIteratorWithScannerContext(inner.getRegionScannerContext(), iterator); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } - @Override - public RegionScanner getRegionScanner(final Scan scan, final RegionScanner s) throws Throwable { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - int offset = 0; - if (ScanUtil.isLocalIndex(scan)) { - /* - * For local indexes, we need to set an offset on row key expressions to skip - * the region start key. - */ - Region region = getRegion(); - offset = region.getRegionInfo().getStartKey().length != 0 ? - region.getRegionInfo().getStartKey().length : - region.getRegionInfo().getEndKey().length; - ScanUtil.setRowKeyOffset(scan, offset); - } - byte[] scanOffsetBytes = scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_OFFSET); - Integer scanOffset = null; - if (scanOffsetBytes != null) { - scanOffset = (Integer)PInteger.INSTANCE.toObject(scanOffsetBytes); - } - RegionScanner innerScanner = s; - PTable.QualifierEncodingScheme encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); - boolean useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan); - - Set serverParsedKVRefs = Sets.newHashSet(); - KeyValueSchema kvSchema = null; - ValueBitSet kvSchemaBitSet = null; - List resultList = getServerParsedExpressions(scan, serverParsedKVRefs); - Expression[] serverParsedFuncRefs = resultList.toArray(new Expression[0]); - if (serverParsedFuncRefs != null && serverParsedFuncRefs.length > 0) { - KeyValueSchema.KeyValueSchemaBuilder - builder = - new KeyValueSchema.KeyValueSchemaBuilder(0); - for (Expression expression : serverParsedFuncRefs) { - builder.addField(expression); - } - kvSchema = builder.build(); - kvSchemaBitSet = ValueBitSet.newInstance(kvSchema); - } - TupleProjector tupleProjector = null; - Region dataRegion = null; - IndexMaintainer indexMaintainer = null; - byte[][] viewConstants = null; - PhoenixTransactionContext tx = null; - ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan); - if (dataColumns != null || ScanUtil.isUncoveredGlobalIndex(scan)) { - if (dataColumns != null) { - tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns); - } - dataRegion = env.getRegion(); - int clientVersion = ScanUtil.getClientVersion(scan); - List indexMaintainers = - IndexUtil.deSerializeIndexMaintainersFromScan(scan); - indexMaintainer = indexMaintainers.get(0); - viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan); - byte[] txState = scan.getAttribute(BaseScannerRegionObserverConstants.TX_STATE); - tx = TransactionFactory.getTransactionContext(txState, clientVersion); - } + private static class OrderedResultIteratorWithScannerContext { + private ScannerContext scannerContext; + private OrderedResultIterator iterator; - final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan); - final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); - boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(getMinMaxQualifiersFromScan(scan)) - && scan.getAttribute(BaseScannerRegionObserverConstants.TOPN) != null; - // setting dataRegion in case of a non-coprocessor environment - if (dataRegion == null && - env.getConfiguration().get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY) != null) { - dataRegion = env.getRegion(); - } - innerScanner = - getWrappedScanner(env, innerScanner, serverParsedKVRefs, serverParsedFuncRefs, - offset, scan, dataColumns, tupleProjector, dataRegion, indexMaintainer, tx, - viewConstants, kvSchema, kvSchemaBitSet, j == null ? p : null, ptr, - useQualifierAsIndex); - - final ImmutableBytesPtr tenantId = ScanUtil.getTenantId(scan); - if (j != null) { - innerScanner = - new HashJoinRegionScanner(env, innerScanner, scan, serverParsedKVRefs, - serverParsedFuncRefs, p, j, tenantId, useQualifierAsIndex, - useNewValueColumnQualifier); - } - if (scanOffset != null) { - final boolean isIncompatibleClient = - ScanUtil.isIncompatibleClientForServerReturnValidRowKey(scan); - RegionScannerResultIterator iterator = new RegionScannerResultIterator(scan, - innerScanner, - getMinMaxQualifiersFromScan(scan), - encodingScheme); - ScannerContext sc = iterator.getRegionScannerContext(); - innerScanner = getOffsetScanner( - innerScanner, - new OffsetResultIterator( - iterator, - scanOffset, - getPageSizeMsForRegionScanner(scan), - isIncompatibleClient), - scan.getAttribute(QueryConstants.LAST_SCAN) != null, - isIncompatibleClient, - scan, sc); - } - boolean spoolingEnabled = - env.getConfiguration().getBoolean( - QueryServices.SERVER_ORDERBY_SPOOLING_ENABLED_ATTRIB, - QueryServicesOptions.DEFAULT_SERVER_ORDERBY_SPOOLING_ENABLED); - long thresholdBytes = - env.getConfiguration() - .getLongBytes(QueryServices.SERVER_SPOOL_THRESHOLD_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_SERVER_SPOOL_THRESHOLD_BYTES); - OrderedResultIteratorWithScannerContext ic - = deserializeFromScan(scan, innerScanner, spoolingEnabled, thresholdBytes); - final OrderedResultIterator iterator = ic.getIterator(); - if (iterator == null) { - return innerScanner; - } - // TODO:the above wrapped scanner should be used here also - return getTopNScanner(env, innerScanner, iterator, tenantId, ic.getScannerContext()); + OrderedResultIteratorWithScannerContext(ScannerContext sc, OrderedResultIterator ori) { + this.scannerContext = sc; + this.iterator = ori; } - private List getServerParsedExpressions(Scan scan, - Set serverParsedKVRefs) { - Expression[] serverParsedArrayFuncRefs = null; - if (scan.getAttribute(BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX) != null) { - serverParsedArrayFuncRefs = - deserializeServerParsedPositionalExpressionInfoFromScan(scan, - BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX, serverParsedKVRefs); - } - List resultList = new ArrayList<>(); - if (serverParsedArrayFuncRefs != null) { - Collections.addAll(resultList, serverParsedArrayFuncRefs); - } - deserializeAndAddComplexDataTypeFunctions(scan, - BaseScannerRegionObserverConstants.JSON_VALUE_FUNCTION, serverParsedKVRefs, - resultList); - deserializeAndAddComplexDataTypeFunctions(scan, - BaseScannerRegionObserverConstants.BSON_VALUE_FUNCTION, serverParsedKVRefs, - resultList); - Expression[] serverParsedJsonQueryFuncRefs = null; - if (scan.getAttribute(BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION) != null) { - serverParsedJsonQueryFuncRefs = - deserializeServerParsedPositionalExpressionInfoFromScan(scan, - BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION, serverParsedKVRefs); - } - if (serverParsedJsonQueryFuncRefs != null) { - Collections.addAll(resultList, serverParsedJsonQueryFuncRefs); - } - return resultList; + public ScannerContext getScannerContext() { + return scannerContext; } - private void deserializeAndAddComplexDataTypeFunctions(Scan scan, - String functionName, - Set - serverParsedKVRefs, - List resultList) { - if (scan.getAttribute(functionName) != null) { - Expression[] serverParsedJsonValueFuncRefs = - deserializeServerParsedPositionalExpressionInfoFromScan(scan, - functionName, serverParsedKVRefs); - if (serverParsedJsonValueFuncRefs != null) { - Collections.addAll(resultList, serverParsedJsonValueFuncRefs); - } - } + public OrderedResultIterator getIterator() { + return iterator; } + } - @VisibleForTesting - static OrderedResultIteratorWithScannerContext deserializeFromScan(Scan scan, RegionScanner s, - boolean spoolingEnabled, long thresholdBytes) { - byte[] topN = scan.getAttribute(BaseScannerRegionObserverConstants.TOPN); - if (topN == null) { - return new OrderedResultIteratorWithScannerContext(null, null); + private Expression[] deserializeServerParsedPositionalExpressionInfoFromScan(Scan scan, + String scanAttribute, Set serverParsedKVRefs) { + byte[] specificArrayIdx = scan.getAttribute(scanAttribute); + if (specificArrayIdx == null) { + return null; + } + ByteArrayInputStream stream = new ByteArrayInputStream(specificArrayIdx); + try { + DataInputStream input = new DataInputStream(stream); + int kvRefSize = WritableUtils.readVInt(input); + for (int i = 0; i < kvRefSize; i++) { + PTable.ImmutableStorageScheme scheme = EncodedColumnsUtil.getImmutableStorageScheme(scan); + KeyValueColumnExpression kvExp = scheme != PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN + ? new SingleCellColumnExpression(scheme) + : new KeyValueColumnExpression(); + kvExp.readFields(input); + serverParsedKVRefs.add(kvExp); + } + int kvFuncSize = WritableUtils.readVInt(input); + Expression[] funcRefs = new Expression[kvFuncSize]; + for (int i = 0; i < kvFuncSize; i++) { + ScalarFunction func = null; + if (scanAttribute.equals(BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX)) { + func = new ArrayIndexFunction(); + } else if (scanAttribute.equals(BaseScannerRegionObserverConstants.JSON_VALUE_FUNCTION)) { + func = new JsonValueFunction(); + } else if (scanAttribute.equals(BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION)) { + func = new JsonQueryFunction(); + } else if (scanAttribute.equals(BaseScannerRegionObserverConstants.BSON_VALUE_FUNCTION)) { + func = new BsonValueFunction(); } - int clientVersion = ScanUtil.getClientVersion(scan); - // Client including and after 4.15 and 5.1 are not going to serialize thresholdBytes - // so we need to decode this only for older clients to not break wire compat - boolean shouldDecodeSpoolThreshold = - (scan.getAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION) == null) - || (VersionUtil.decodeMajorVersion(clientVersion) > 5) - || (VersionUtil.decodeMajorVersion(clientVersion) == 5 - && clientVersion < MetaDataProtocol.MIN_5_x_DISABLE_SERVER_SPOOL_THRESHOLD) - || (VersionUtil.decodeMajorVersion(clientVersion) == 4 - && clientVersion < MetaDataProtocol.MIN_4_x_DISABLE_SERVER_SPOOL_THRESHOLD); - ByteArrayInputStream stream = new ByteArrayInputStream(topN); // TODO: size? - try { - DataInputStream input = new DataInputStream(stream); - if (shouldDecodeSpoolThreshold) { - // Read off the scan but ignore, we won't honor client sent thresholdbytes, but the - // one set on server - WritableUtils.readVInt(input); - } - int limit = WritableUtils.readVInt(input); - int estimatedRowSize = WritableUtils.readVInt(input); - int size = WritableUtils.readVInt(input); - List orderByExpressions = Lists.newArrayListWithExpectedSize(size); - for (int i = 0; i < size; i++) { - OrderByExpression orderByExpression = new OrderByExpression(); - orderByExpression.readFields(input); - orderByExpressions.add(orderByExpression); - } - PTable.QualifierEncodingScheme encodingScheme = - EncodedColumnsUtil.getQualifierEncodingScheme(scan); - RegionScannerResultIterator inner = new RegionScannerResultIterator(scan, s, - EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan), encodingScheme); - OrderedResultIterator iterator - = new OrderedResultIterator(inner, orderByExpressions, spoolingEnabled, - thresholdBytes, limit >= 0 ? limit : null, null, estimatedRowSize, - getPageSizeMsForRegionScanner(scan), scan, s.getRegionInfo()); - return new OrderedResultIteratorWithScannerContext(inner.getRegionScannerContext(), - iterator); - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); + if (func != null) { + func.readFields(input); + funcRefs[i] = func; + } + } + return funcRefs; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + private RegionScanner getOffsetScanner(final RegionScanner s, final OffsetResultIterator iterator, + final boolean isLastScan, final boolean incompatibleClient, final Scan scan, + final ScannerContext sc) throws IOException { + final Tuple firstTuple; + final Region region = getRegion(); + region.startRegionOperation(); + final byte[] initStartRowKey = scan.getStartRow().length > 0 + ? scan.getStartRow() + : (scan.isReversed() + ? region.getRegionInfo().getEndKey() + : region.getRegionInfo().getStartKey()); + byte[] prevScanStartRowKey = + scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW); + // If the region has moved after server has returned dummy or valid row to client, + // prevScanStartRowKey would be different from actual scan start rowkey. + // If the region moves after dummy was returned, we do not need to set row count to + // offset. However, if the region moves after valid row was returned, we do need to + // set row count to offset because we return valid row only after offset num of rows + // are skipped. + if ( + Bytes.compareTo(prevScanStartRowKey, initStartRowKey) != 0 && Bytes + .compareTo(ByteUtil.concat(prevScanStartRowKey, ByteUtil.ZERO_BYTE), initStartRowKey) != 0 + ) { + iterator.setRowCountToOffset(); + } + try { + Tuple tuple = iterator.next(); + if (tuple == null && !isLastScan) { + List kvList = new ArrayList<>(1); + KeyValue kv; + byte[] remainingOffset = PInteger.INSTANCE.toBytes(iterator.getRemainingOffset()); + if (incompatibleClient) { + kv = new KeyValue(QueryConstants.OFFSET_ROW_KEY_BYTES, QueryConstants.OFFSET_FAMILY, + QueryConstants.OFFSET_COLUMN, remainingOffset); + } else { + Tuple lastScannedTuple = iterator.getLastScannedTuple(); + if (lastScannedTuple != null) { + kv = getOffsetKvWithLastScannedRowKey(remainingOffset, lastScannedTuple); + } else { + byte[] rowKey; + byte[] startKey = scan.getStartRow().length > 0 + ? scan.getStartRow() + : region.getRegionInfo().getStartKey(); + byte[] endKey = + scan.getStopRow().length > 0 ? scan.getStopRow() : region.getRegionInfo().getEndKey(); + rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); + if (rowKey == null) { + if (scan.includeStartRow()) { + rowKey = startKey; + } else if (scan.includeStopRow()) { + rowKey = endKey; + } else { + rowKey = HConstants.EMPTY_END_ROW; + } } + kv = new KeyValue(rowKey, QueryConstants.OFFSET_FAMILY, QueryConstants.OFFSET_COLUMN, + remainingOffset); + } } + kvList.add(kv); + Result r = Result.create(kvList); + firstTuple = new ResultTuple(r); + } else { + firstTuple = tuple; + } + } catch (Throwable t) { + ClientUtil.throwIOException(getRegion().getRegionInfo().getRegionNameAsString(), t); + return null; + } finally { + region.closeRegionOperation(); } - private static class OrderedResultIteratorWithScannerContext { - private ScannerContext scannerContext; - private OrderedResultIterator iterator; - - OrderedResultIteratorWithScannerContext(ScannerContext sc, OrderedResultIterator ori) { - this.scannerContext = sc; - this.iterator = ori; - } + return new BaseRegionScanner(s) { + private Tuple tuple = firstTuple; + private byte[] previousResultRowKey; + private ScannerContext regionScannerContext = sc; - public ScannerContext getScannerContext() { - return scannerContext; - } + @Override + public boolean isFilterDone() { + return tuple == null; + } - public OrderedResultIterator getIterator() { - return iterator; - } - } + @Override + public boolean next(List results) throws IOException { + return next(results, null); + } - private Expression[] deserializeServerParsedPositionalExpressionInfoFromScan(Scan scan, - String scanAttribute, Set serverParsedKVRefs) { - byte[] specificArrayIdx = scan.getAttribute(scanAttribute); - if (specificArrayIdx == null) { - return null; - } - ByteArrayInputStream stream = new ByteArrayInputStream(specificArrayIdx); + @Override + public boolean next(List results, ScannerContext scannerContext) throws IOException { try { - DataInputStream input = new DataInputStream(stream); - int kvRefSize = WritableUtils.readVInt(input); - for (int i = 0; i < kvRefSize; i++) { - PTable.ImmutableStorageScheme scheme = EncodedColumnsUtil.getImmutableStorageScheme(scan); - KeyValueColumnExpression kvExp = scheme != PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN ? new SingleCellColumnExpression(scheme) - : new KeyValueColumnExpression(); - kvExp.readFields(input); - serverParsedKVRefs.add(kvExp); - } - int kvFuncSize = WritableUtils.readVInt(input); - Expression[] funcRefs = new Expression[kvFuncSize]; - for (int i = 0; i < kvFuncSize; i++) { - ScalarFunction func = null; - if (scanAttribute.equals(BaseScannerRegionObserverConstants.SPECIFIC_ARRAY_INDEX)) { - func = new ArrayIndexFunction(); - } else if (scanAttribute.equals(BaseScannerRegionObserverConstants.JSON_VALUE_FUNCTION)) { - func = new JsonValueFunction(); - } else if (scanAttribute.equals(BaseScannerRegionObserverConstants.JSON_QUERY_FUNCTION)) { - func = new JsonQueryFunction(); - } else if (scanAttribute.equals(BaseScannerRegionObserverConstants.BSON_VALUE_FUNCTION)) { - func = new BsonValueFunction(); - } - if (func != null) { - func.readFields(input); - funcRefs[i] = func; - } - } - return funcRefs; - } catch (IOException e) { - throw new RuntimeException(e); - } finally { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); + if (isFilterDone()) { + return false; + } + Tuple nextTuple = iterator.next(); + if (tuple.size() > 0 && !isDummy(tuple)) { + for (int i = 0; i < tuple.size(); i++) { + results.add(tuple.getValue(i)); + if (i == 0) { + previousResultRowKey = CellUtil.cloneRow(tuple.getValue(i)); + } } - } - } - - private RegionScanner getOffsetScanner(final RegionScanner s, - final OffsetResultIterator iterator, - final boolean isLastScan, - final boolean incompatibleClient, - final Scan scan, final ScannerContext sc) - throws IOException { - final Tuple firstTuple; - final Region region = getRegion(); - region.startRegionOperation(); - final byte[] initStartRowKey = scan.getStartRow().length > 0 ? scan.getStartRow() : - (scan.isReversed() ? region.getRegionInfo().getEndKey() : - region.getRegionInfo().getStartKey()); - byte[] prevScanStartRowKey = - scan.getAttribute(BaseScannerRegionObserverConstants.SCAN_ACTUAL_START_ROW); - // If the region has moved after server has returned dummy or valid row to client, - // prevScanStartRowKey would be different from actual scan start rowkey. - // If the region moves after dummy was returned, we do not need to set row count to - // offset. However, if the region moves after valid row was returned, we do need to - // set row count to offset because we return valid row only after offset num of rows - // are skipped. - if (Bytes.compareTo(prevScanStartRowKey, initStartRowKey) != 0 && Bytes.compareTo( - ByteUtil.concat(prevScanStartRowKey, ByteUtil.ZERO_BYTE), - initStartRowKey) != 0) { - iterator.setRowCountToOffset(); - } - try { - Tuple tuple = iterator.next(); - if (tuple == null && !isLastScan) { - List kvList = new ArrayList<>(1); - KeyValue kv; - byte[] remainingOffset = - PInteger.INSTANCE.toBytes(iterator.getRemainingOffset()); - if (incompatibleClient) { - kv = new KeyValue( - QueryConstants.OFFSET_ROW_KEY_BYTES, - QueryConstants.OFFSET_FAMILY, - QueryConstants.OFFSET_COLUMN, - remainingOffset); - } else { - Tuple lastScannedTuple = iterator.getLastScannedTuple(); - if (lastScannedTuple != null) { - kv = getOffsetKvWithLastScannedRowKey(remainingOffset, lastScannedTuple); - } else { - byte[] rowKey; - byte[] startKey = scan.getStartRow().length > 0 ? scan.getStartRow() : - region.getRegionInfo().getStartKey(); - byte[] endKey = scan.getStopRow().length > 0 ? scan.getStopRow() : - region.getRegionInfo().getEndKey(); - rowKey = ByteUtil.getLargestPossibleRowKeyInRange(startKey, endKey); - if (rowKey == null) { - if (scan.includeStartRow()) { - rowKey = startKey; - } else if (scan.includeStopRow()) { - rowKey = endKey; - } else { - rowKey = HConstants.EMPTY_END_ROW; - } - } - kv = new KeyValue( - rowKey, - QueryConstants.OFFSET_FAMILY, - QueryConstants.OFFSET_COLUMN, - remainingOffset); - } - } - kvList.add(kv); - Result r = Result.create(kvList); - firstTuple = new ResultTuple(r); + } else { + if (nextTuple == null) { + byte[] remainingOffset = PInteger.INSTANCE.toBytes(iterator.getRemainingOffset()); + KeyValue kv; + if (incompatibleClient) { + kv = new KeyValue(QueryConstants.OFFSET_ROW_KEY_BYTES, QueryConstants.OFFSET_FAMILY, + QueryConstants.OFFSET_COLUMN, remainingOffset); + } else { + kv = getOffsetKvWithLastScannedRowKey(remainingOffset, tuple); + } + results.add(kv); } else { - firstTuple = tuple; + updateDummyWithPrevRowKey(results, initStartRowKey, previousResultRowKey); } + } + tuple = nextTuple; + if (regionScannerContext != null) { + ScannerContextUtil.updateMetrics(regionScannerContext, scannerContext); + regionScannerContext = null; + } + return !isFilterDone(); } catch (Throwable t) { - ClientUtil.throwIOException(getRegion().getRegionInfo().getRegionNameAsString(), t); - return null; - } finally { - region.closeRegionOperation(); + LOGGER.error("Error while iterating Offset scanner.", t); + ClientUtil.throwIOException(getRegion().getRegionInfo().getRegionNameAsString(), t); + return false; } + } - return new BaseRegionScanner(s) { - private Tuple tuple = firstTuple; - private byte[] previousResultRowKey; - private ScannerContext regionScannerContext = sc; - - @Override - public boolean isFilterDone() { - return tuple == null; - } + @Override + public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { + return next(results, scannerContext); + } - @Override - public boolean next(List results) throws IOException { - return next(results, null); - } - - @Override - public boolean next(List results, ScannerContext scannerContext) - throws IOException { - try { - if (isFilterDone()) { - return false; - } - Tuple nextTuple = iterator.next(); - if (tuple.size() > 0 && !isDummy(tuple)) { - for (int i = 0; i < tuple.size(); i++) { - results.add(tuple.getValue(i)); - if (i == 0) { - previousResultRowKey = CellUtil.cloneRow(tuple.getValue(i)); - } - } - } else { - if (nextTuple == null) { - byte[] remainingOffset = - PInteger.INSTANCE.toBytes(iterator.getRemainingOffset()); - KeyValue kv; - if (incompatibleClient) { - kv = new KeyValue( - QueryConstants.OFFSET_ROW_KEY_BYTES, - QueryConstants.OFFSET_FAMILY, - QueryConstants.OFFSET_COLUMN, - remainingOffset); - } else { - kv = getOffsetKvWithLastScannedRowKey(remainingOffset, tuple); - } - results.add(kv); - } else { - updateDummyWithPrevRowKey(results, initStartRowKey, - previousResultRowKey); - } - } - tuple = nextTuple; - if (regionScannerContext != null) { - ScannerContextUtil.updateMetrics(regionScannerContext, scannerContext); - regionScannerContext = null; - } - return !isFilterDone(); - } catch (Throwable t) { - LOGGER.error("Error while iterating Offset scanner.", t); - ClientUtil.throwIOException(getRegion().getRegionInfo().getRegionNameAsString(), t); - return false; - } - } - - @Override - public boolean nextRaw(List results, ScannerContext scannerContext) - throws IOException { - return next(results, scannerContext); - } - - @Override - public void close() throws IOException { - try { - s.close(); - } finally { - try { - iterator.close(); - } catch (SQLException e) { - ClientUtil.throwIOException(getRegion().getRegionInfo().getRegionNameAsString(), e); - } - } - } - }; - } - - /** - * Add dummy cell to the result list based on either the previous rowkey returned to the - * client or the start rowkey of the scan or region start key. - * - * @param result result row. - * @param initStartRowKey scan start rowkey. - * @param previousResultRowKey previous result rowkey returned to client. - */ - private void updateDummyWithPrevRowKey(final List result, - final byte[] initStartRowKey, - final byte[] previousResultRowKey) { - result.clear(); - if (previousResultRowKey != null) { - getDummyResult(previousResultRowKey, result); - } else { - getDummyResult(initStartRowKey, result); + @Override + public void close() throws IOException { + try { + s.close(); + } finally { + try { + iterator.close(); + } catch (SQLException e) { + ClientUtil.throwIOException(getRegion().getRegionInfo().getRegionNameAsString(), e); + } } + } + }; + } + + /** + * Add dummy cell to the result list based on either the previous rowkey returned to the client or + * the start rowkey of the scan or region start key. + * @param result result row. + * @param initStartRowKey scan start rowkey. + * @param previousResultRowKey previous result rowkey returned to client. + */ + private void updateDummyWithPrevRowKey(final List result, final byte[] initStartRowKey, + final byte[] previousResultRowKey) { + result.clear(); + if (previousResultRowKey != null) { + getDummyResult(previousResultRowKey, result); + } else { + getDummyResult(initStartRowKey, result); } - - private static KeyValue getOffsetKvWithLastScannedRowKey(byte[] value, Tuple tuple) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - tuple.getKey(ptr); - byte[] rowKey = new byte[ptr.getLength()]; - System.arraycopy(ptr.get(), ptr.getOffset(), rowKey, 0, - rowKey.length); - return new KeyValue( - rowKey, - QueryConstants.OFFSET_FAMILY, - QueryConstants.OFFSET_COLUMN, - value); + } + + private static KeyValue getOffsetKvWithLastScannedRowKey(byte[] value, Tuple tuple) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + tuple.getKey(ptr); + byte[] rowKey = new byte[ptr.getLength()]; + System.arraycopy(ptr.get(), ptr.getOffset(), rowKey, 0, rowKey.length); + return new KeyValue(rowKey, QueryConstants.OFFSET_FAMILY, QueryConstants.OFFSET_COLUMN, value); + } + + /** + * Return region scanner that does TopN. We only need to call startRegionOperation and + * closeRegionOperation when getting the first Tuple (which forces running through the entire + * region) since after this everything is held in memory + */ + private RegionScanner getTopNScanner(RegionCoprocessorEnvironment env, final RegionScanner s, + final OrderedResultIterator iterator, ImmutableBytesPtr tenantId, ScannerContext sc) + throws Throwable { + + final Tuple firstTuple; + TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId); + long estSize = iterator.getEstimatedByteSize(); + final MemoryManager.MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize); + final Region region = getRegion(); + region.startRegionOperation(); + try { + // Once we return from the first call to next, we've run through and cached + // the topN rows, so we no longer need to start/stop a region operation. + firstTuple = iterator.next(); + // Now that the topN are cached, we can resize based on the real size + long actualSize = iterator.getByteSize(); + chunk.resize(actualSize); + } catch (Throwable t) { + ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); + return null; + } finally { + region.closeRegionOperation(); } - - /** - * Return region scanner that does TopN. - * We only need to call startRegionOperation and closeRegionOperation when - * getting the first Tuple (which forces running through the entire region) - * since after this everything is held in memory - */ - private RegionScanner getTopNScanner(RegionCoprocessorEnvironment env, final RegionScanner s, - final OrderedResultIterator iterator, - ImmutableBytesPtr tenantId, ScannerContext sc) - throws Throwable { - - final Tuple firstTuple; - TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId); - long estSize = iterator.getEstimatedByteSize(); - final MemoryManager.MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize); - final Region region = getRegion(); - region.startRegionOperation(); + return new BaseRegionScanner(s) { + private Tuple tuple = firstTuple; + private ScannerContext regionScannerContext = sc; + + @Override + public boolean isFilterDone() { + return tuple == null; + } + + @Override + public boolean next(List results) throws IOException { + return next(results, null); + } + + @Override + public boolean next(List results, ScannerContext scannerContext) throws IOException { try { - // Once we return from the first call to next, we've run through and cached - // the topN rows, so we no longer need to start/stop a region operation. - firstTuple = iterator.next(); - // Now that the topN are cached, we can resize based on the real size - long actualSize = iterator.getByteSize(); - chunk.resize(actualSize); + if (isFilterDone()) { + return false; + } + if (isDummy(tuple)) { + ScanUtil.getDummyResult(CellUtil.cloneRow(tuple.getValue(0)), results); + } else { + for (int i = 0; i < tuple.size(); i++) { + results.add(tuple.getValue(i)); + } + } + tuple = iterator.next(); + if (regionScannerContext != null) { + ScannerContextUtil.updateMetrics(regionScannerContext, scannerContext); + regionScannerContext = null; + } + return !isFilterDone(); } catch (Throwable t) { - ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); - return null; - } finally { - region.closeRegionOperation(); + ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); + return false; } - return new BaseRegionScanner(s) { - private Tuple tuple = firstTuple; - private ScannerContext regionScannerContext = sc; + } - @Override - public boolean isFilterDone() { - return tuple == null; - } - - @Override - public boolean next(List results) throws IOException { - return next(results, null); - } + @Override + public boolean nextRaw(List results, ScannerContext scannerContext) throws IOException { + return next(results, scannerContext); + } - @Override - public boolean next(List results, ScannerContext scannerContext) - throws IOException { - try { - if (isFilterDone()) { - return false; - } - if (isDummy(tuple)) { - ScanUtil.getDummyResult(CellUtil.cloneRow(tuple.getValue(0)), results); - } else { - for (int i = 0; i < tuple.size(); i++) { - results.add(tuple.getValue(i)); - } - } - tuple = iterator.next(); - if (regionScannerContext != null) { - ScannerContextUtil.updateMetrics(regionScannerContext, scannerContext); - regionScannerContext = null; - } - return !isFilterDone(); - } catch (Throwable t) { - ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t); - return false; - } - } - - @Override - public boolean nextRaw(List results, ScannerContext scannerContext) - throws IOException { - return next(results, scannerContext); - } - - @Override - public void close() throws IOException { - try { - s.close(); - } finally { - try { - if(iterator != null) { - iterator.close(); - } - } catch (SQLException e) { - ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), e); - } finally { - chunk.close(); - } - } + @Override + public void close() throws IOException { + try { + s.close(); + } finally { + try { + if (iterator != null) { + iterator.close(); } - }; - } + } catch (SQLException e) { + ClientUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), e); + } finally { + chunk.close(); + } + } + } + }; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java index 430645cc243..132d1530c3f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,26 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.iterate; -import static org.apache.phoenix.coprocessorclient.ScanRegionObserverConstants.WILDCARD_SCAN_INCLUDES_DYNAMIC_COLUMNS; import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.CDC_DATA_TABLE_DEF; +import static org.apache.phoenix.coprocessorclient.ScanRegionObserverConstants.WILDCARD_SCAN_INCLUDES_DYNAMIC_COLUMNS; import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.phoenix.coprocessor.CDCGlobalIndexRegionScanner; -import org.apache.phoenix.coprocessor.UncoveredGlobalIndexRegionScanner; -import org.apache.phoenix.coprocessor.UncoveredLocalIndexRegionScanner; -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; -import org.apache.phoenix.schema.KeyValueSchema; -import org.apache.phoenix.schema.PColumn; -import org.apache.phoenix.schema.PColumnImpl; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.ValueBitSet; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.ListIterator; +import java.util.Set; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.RegionInfo; @@ -49,8 +45,11 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.WritableUtils; -import org.apache.phoenix.coprocessor.BaseScannerRegionObserver; +import org.apache.phoenix.coprocessor.CDCGlobalIndexRegionScanner; +import org.apache.phoenix.coprocessor.UncoveredGlobalIndexRegionScanner; +import org.apache.phoenix.coprocessor.UncoveredLocalIndexRegionScanner; import org.apache.phoenix.coprocessor.generated.DynamicColumnMetaDataProtos; +import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.execute.TupleProjector; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.ExpressionType; @@ -58,162 +57,142 @@ import org.apache.phoenix.hbase.index.covered.update.ColumnReference; import org.apache.phoenix.index.IndexMaintainer; import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.schema.KeyValueSchema; +import org.apache.phoenix.schema.PColumn; +import org.apache.phoenix.schema.PColumnImpl; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.ValueBitSet; import org.apache.phoenix.schema.tuple.MultiKeyValueTuple; import org.apache.phoenix.schema.tuple.PositionBasedResultTuple; import org.apache.phoenix.schema.tuple.ResultTuple; import org.apache.phoenix.schema.tuple.Tuple; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.transaction.PhoenixTransactionContext; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; -import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.ServerIndexUtil; -import org.apache.phoenix.util.ServerUtil; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.ListIterator; -import java.util.Set; public abstract class RegionScannerFactory { protected RegionCoprocessorEnvironment env; /** - * Returns the region based on the value of the - * region context - * @return + * Returns the region based on the value of the region context */ public Region getRegion() { return env.getRegion(); } /** - * Returns a processed region scanner based on the query - * conditions. Thie functionality is abstracted out of - * the non-aggregate region observer class for better - * usage + * Returns a processed region scanner based on the query conditions. Thie functionality is + * abstracted out of the non-aggregate region observer class for better usage * @param scan input scan - * @param s input region scanner - * @return - * @throws Throwable + * @param s input region scanner */ - public abstract RegionScanner getRegionScanner(final Scan scan, final RegionScanner s) throws Throwable; + public abstract RegionScanner getRegionScanner(final Scan scan, final RegionScanner s) + throws Throwable; /** - * Return wrapped scanner that catches unexpected exceptions (i.e. Phoenix bugs) and - * re-throws as DoNotRetryIOException to prevent needless retrying hanging the query - * for 30 seconds. Unfortunately, until HBASE-7481 gets fixed, there's no way to do - * the same from a custom filter. - * @param serverParsedKVRefs - * @param serverParsedFuncRefs + * Return wrapped scanner that catches unexpected exceptions (i.e. Phoenix bugs) and re-throws as + * DoNotRetryIOException to prevent needless retrying hanging the query for 30 seconds. + * Unfortunately, until HBASE-7481 gets fixed, there's no way to do the same from a custom filter. * @param offset starting position in the rowkey. - * @param scan - * @param tupleProjector - * @param dataRegion - * @param indexMaintainer - * @param tx current transaction - * @param viewConstants + * @param tx current transaction */ public RegionScanner getWrappedScanner(final RegionCoprocessorEnvironment env, - final RegionScanner regionScanner, final Set serverParsedKVRefs, - final Expression[] serverParsedFuncRefs, - final int offset, final Scan scan, - final ColumnReference[] dataColumns, final TupleProjector tupleProjector, - final Region dataRegion, final IndexMaintainer indexMaintainer, - PhoenixTransactionContext tx, - final byte[][] viewConstants, final KeyValueSchema kvSchema, - final ValueBitSet kvSchemaBitSet, final TupleProjector projector, - final ImmutableBytesWritable ptr, final boolean useQualifierAsListIndex) throws IOException { + final RegionScanner regionScanner, final Set serverParsedKVRefs, + final Expression[] serverParsedFuncRefs, final int offset, final Scan scan, + final ColumnReference[] dataColumns, final TupleProjector tupleProjector, + final Region dataRegion, final IndexMaintainer indexMaintainer, PhoenixTransactionContext tx, + final byte[][] viewConstants, final KeyValueSchema kvSchema, final ValueBitSet kvSchemaBitSet, + final TupleProjector projector, final ImmutableBytesWritable ptr, + final boolean useQualifierAsListIndex) throws IOException { return new RegionScanner() { private RegionScanner s = regionScanner; private RegionInfo regionInfo = env.getRegionInfo(); private byte[] actualStartKey = getActualStartKey(); - private boolean useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan); + private boolean useNewValueColumnQualifier = + EncodedColumnsUtil.useNewValueColumnQualifier(scan); final long pageSizeMs = ScanUtil.getPageSizeMsForRegionScanner(scan); Expression extraWhere = null; long extraLimit = -1; { - // for indexes construct the row filter for uncovered columns if it exists - if (ScanUtil.isLocalOrUncoveredGlobalIndex(scan)) { - byte[] expBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER); - if (expBytes == null) { - // For older clients - expBytes = scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_FILTER); - } - if (expBytes != null) { - try { - ByteArrayInputStream stream = new ByteArrayInputStream(expBytes); - DataInputStream input = new DataInputStream(stream); - extraWhere = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); - extraWhere.readFields(input); - } catch (IOException io) { - // should not happen since we're reading from a byte[] - throw new RuntimeException(io); - } - } - byte[] limitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_LIMIT); - if (limitBytes == null) { - // For older clients - limitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_LIMIT); + // for indexes construct the row filter for uncovered columns if it exists + if (ScanUtil.isLocalOrUncoveredGlobalIndex(scan)) { + byte[] expBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_FILTER); + if (expBytes == null) { + // For older clients + expBytes = scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_FILTER); + } + if (expBytes != null) { + try { + ByteArrayInputStream stream = new ByteArrayInputStream(expBytes); + DataInputStream input = new DataInputStream(stream); + extraWhere = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance(); + extraWhere.readFields(input); + } catch (IOException io) { + // should not happen since we're reading from a byte[] + throw new RuntimeException(io); + } + } + byte[] limitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_LIMIT); + if (limitBytes == null) { + // For older clients + limitBytes = scan.getAttribute(BaseScannerRegionObserverConstants.LOCAL_INDEX_LIMIT); + } + if (limitBytes != null) { + extraLimit = Bytes.toLong(limitBytes); + } + if ( + ScanUtil.isLocalOrUncoveredGlobalIndex(scan) && (tupleProjector != null + || (indexMaintainer != null && indexMaintainer.isUncovered())) + ) { + + PTable.ImmutableStorageScheme storageScheme = indexMaintainer.getIndexStorageScheme(); + Scan dataTableScan = new Scan(); + if (dataColumns != null) { + for (int i = 0; i < dataColumns.length; i++) { + if (storageScheme == PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { + dataTableScan.addFamily(dataColumns[i].getFamily()); + } else { + dataTableScan.addColumn(dataColumns[i].getFamily(), + dataColumns[i].getQualifier()); + } } - if (limitBytes != null) { - extraLimit = Bytes.toLong(limitBytes); + } else if (indexMaintainer.isUncovered()) { + // Indexed columns and the columns in index where clause should also be added + // to the data columns to scan for uncovered global indexes. This is required + // to verify the index row against the data table row. + for (ColumnReference column : indexMaintainer.getAllColumnsForDataTable()) { + dataTableScan.addColumn(column.getFamily(), column.getQualifier()); } - if (ScanUtil.isLocalOrUncoveredGlobalIndex(scan) - && (tupleProjector != null - || (indexMaintainer != null && indexMaintainer.isUncovered()))) { - - PTable.ImmutableStorageScheme storageScheme = - indexMaintainer.getIndexStorageScheme(); - Scan dataTableScan = new Scan(); - if (dataColumns != null) { - for (int i = 0; i < dataColumns.length; i++) { - if (storageScheme == - PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) { - dataTableScan.addFamily(dataColumns[i].getFamily()); - } else { - dataTableScan.addColumn(dataColumns[i].getFamily(), - dataColumns[i].getQualifier()); - } - } - } else if (indexMaintainer.isUncovered()) { - // Indexed columns and the columns in index where clause should also be added - // to the data columns to scan for uncovered global indexes. This is required - // to verify the index row against the data table row. - for (ColumnReference column : indexMaintainer.getAllColumnsForDataTable()) { - dataTableScan.addColumn(column.getFamily(), column.getQualifier()); - } - } - if (ScanUtil.isLocalIndex(scan)) { - s = new UncoveredLocalIndexRegionScanner(regionScanner, dataRegion, scan, env, - dataTableScan, tupleProjector, indexMaintainer, viewConstants, ptr, - pageSizeMs, offset, actualStartKey, extraLimit); - } else { - if (scan.getAttribute(CDC_DATA_TABLE_DEF) != null) { - s = new CDCGlobalIndexRegionScanner(regionScanner, dataRegion, scan, env, - dataTableScan, tupleProjector, indexMaintainer, viewConstants, ptr, - pageSizeMs, extraLimit); - } else { - s = new UncoveredGlobalIndexRegionScanner(regionScanner, dataRegion, scan, env, - dataTableScan, tupleProjector, indexMaintainer, viewConstants, ptr, - pageSizeMs, extraLimit); - } - } + } + if (ScanUtil.isLocalIndex(scan)) { + s = new UncoveredLocalIndexRegionScanner(regionScanner, dataRegion, scan, env, + dataTableScan, tupleProjector, indexMaintainer, viewConstants, ptr, pageSizeMs, + offset, actualStartKey, extraLimit); + } else { + if (scan.getAttribute(CDC_DATA_TABLE_DEF) != null) { + s = new CDCGlobalIndexRegionScanner(regionScanner, dataRegion, scan, env, + dataTableScan, tupleProjector, indexMaintainer, viewConstants, ptr, pageSizeMs, + extraLimit); + } else { + s = new UncoveredGlobalIndexRegionScanner(regionScanner, dataRegion, scan, env, + dataTableScan, tupleProjector, indexMaintainer, viewConstants, ptr, pageSizeMs, + extraLimit); } + } } + } } // Get the actual scan start row of local index. This will be used to compare the row // key of the results less than scan start row when there are references. public byte[] getActualStartKey() { - return ScanUtil.isLocalIndex(scan) ? ScanUtil.getActualStartRow(scan, regionInfo) - : null; + return ScanUtil.isLocalIndex(scan) ? ScanUtil.getActualStartRow(scan, regionInfo) : null; } @Override @@ -224,9 +203,8 @@ public boolean next(List results) throws IOException { @Override public boolean next(List results, ScannerContext scannerContext) throws IOException { try { - boolean next = (scannerContext == null) - ? s.next(results) - : s.next(results, scannerContext); + boolean next = + (scannerContext == null) ? s.next(results) : s.next(results, scannerContext); if (ScanUtil.isDummy(results)) { return true; } @@ -237,7 +215,6 @@ public boolean next(List results, ScannerContext scannerContext) throws IO } } - @Override public void close() throws IOException { s.close(); @@ -271,20 +248,18 @@ public boolean nextRaw(List result) throws IOException { @Override public boolean nextRaw(List result, ScannerContext scannerContext) throws IOException { try { - boolean next = (scannerContext == null) - ? s.nextRaw(result) - : s.nextRaw(result, scannerContext); + boolean next = + (scannerContext == null) ? s.nextRaw(result) : s.nextRaw(result, scannerContext); if (ScanUtil.isDummy(result)) { return true; } if (result.size() == 0) { return next; } - if ((ScanUtil.isLocalOrUncoveredGlobalIndex(scan)) - && !ScanUtil.isAnalyzeTable(scan)) { + if ((ScanUtil.isLocalOrUncoveredGlobalIndex(scan)) && !ScanUtil.isAnalyzeTable(scan)) { if (ScanUtil.isLocalIndex(scan) && tupleProjector == null) { if (actualStartKey != null) { - next = scanTillScanStartRow(s, result, null); + next = scanTillScanStartRow(s, result, null); if (result.isEmpty() || ScanUtil.isDummy(result)) { return next; } @@ -293,40 +268,45 @@ public boolean nextRaw(List result, ScannerContext scannerContext) throws } if (extraWhere != null) { - Tuple merged = useQualifierAsListIndex ? new PositionBasedResultTuple(result) : - new ResultTuple(Result.create(result)); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - extraWhere.evaluate(merged, ptr); - if (!Boolean.TRUE.equals(extraWhere.getDataType().toObject(ptr))) { - result.clear(); - return next; - } + Tuple merged = useQualifierAsListIndex + ? new PositionBasedResultTuple(result) + : new ResultTuple(Result.create(result)); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + extraWhere.evaluate(merged, ptr); + if (!Boolean.TRUE.equals(extraWhere.getDataType().toObject(ptr))) { + result.clear(); + return next; + } } } Cell serverParsedResultCell = null; - if (serverParsedFuncRefs != null && serverParsedFuncRefs.length > 0 && serverParsedKVRefs.size() > 0) { + if ( + serverParsedFuncRefs != null && serverParsedFuncRefs.length > 0 + && serverParsedKVRefs.size() > 0 + ) { int resultPosition = replaceServerParsedExpressionElement(serverParsedKVRefs, - serverParsedFuncRefs, result); + serverParsedFuncRefs, result); serverParsedResultCell = result.get(resultPosition); } if (projector != null) { - Tuple toProject = useQualifierAsListIndex ? new PositionBasedResultTuple(result) : - new ResultTuple(Result.create(result)); + Tuple toProject = useQualifierAsListIndex + ? new PositionBasedResultTuple(result) + : new ResultTuple(Result.create(result)); Pair mergedTupleDynColsPair = getTupleWithDynColsIfRequired(result, - projector.projectResults(toProject, useNewValueColumnQualifier)); + projector.projectResults(toProject, useNewValueColumnQualifier)); Tuple tupleWithDynColsIfReqd = mergedTupleDynColsPair.getFirst(); byte[] serializedDynColsList = mergedTupleDynColsPair.getSecond(); result.clear(); result.add(tupleWithDynColsIfReqd.mergeWithDynColsListBytesAndGetValue(0, - serializedDynColsList)); + serializedDynColsList)); if (serverParsedResultCell != null) { result.add(serverParsedResultCell); } } if (extraLimit >= 0 && --extraLimit == 0) { - return false; + return false; } // There is a scanattribute set to retrieve the specific array element if (scannerContext != null) { @@ -345,13 +325,14 @@ public boolean nextRaw(List result, ScannerContext scannerContext) throws * to create a tuple projector for dynamic columns. Finally, merge this with the projected * values corresponding to the known columns * @param result list of cells returned from the scan - * @param tuple projected value tuple from known schema/columns - * @return A pair, whose first part is a combined projected value tuple containing the - * known column values along with resolved dynamic column values and whose second part is - * the serialized list of dynamic column PColumns. In case dynamic columns are not - * to be exposed or are not present, this returns the original tuple and an empty byte array. + * @param tuple projected value tuple from known schema/columns + * @return A pair, whose first part is a combined projected value tuple containing the known + * column values along with resolved dynamic column values and whose second part is + * the serialized list of dynamic column PColumns. In case dynamic columns are not to + * be exposed or are not present, this returns the original tuple and an empty byte + * array. * @throws IOException Thrown if there is an error parsing protobuf or merging projected - * values + * values */ private Pair getTupleWithDynColsIfRequired(List result, Tuple tuple) throws IOException { @@ -359,13 +340,14 @@ private Pair getTupleWithDynColsIfRequired(List result, Tup if (Bytes.equals(scan.getAttribute(WILDCARD_SCAN_INCLUDES_DYNAMIC_COLUMNS), TRUE_BYTES)) { List dynCols = new ArrayList<>(); List dynColCells = new ArrayList<>(); - TupleProjector dynColTupleProj = TupleProjector.getDynamicColumnsTupleProjector(result, - dynCols, dynColCells); + TupleProjector dynColTupleProj = + TupleProjector.getDynamicColumnsTupleProjector(result, dynCols, dynColCells); if (dynColTupleProj != null) { - Tuple toProject = useQualifierAsListIndex ? new PositionBasedResultTuple(dynColCells) : - new ResultTuple(Result.create(dynColCells)); - Tuple dynColsProjectedTuple = dynColTupleProj - .projectResults(toProject, useNewValueColumnQualifier); + Tuple toProject = useQualifierAsListIndex + ? new PositionBasedResultTuple(dynColCells) + : new ResultTuple(Result.create(dynColCells)); + Tuple dynColsProjectedTuple = + dynColTupleProj.projectResults(toProject, useNewValueColumnQualifier); ValueBitSet destBitSet = projector.getValueBitSet(); // In case we are not projecting any non-row key columns, the field count for the @@ -375,15 +357,15 @@ private Pair getTupleWithDynColsIfRequired(List result, Tup destBitSet = dynColTupleProj.getValueBitSet(); } // Add dynamic column data at the end of the projected tuple - Tuple mergedTuple = TupleProjector.mergeProjectedValue( - (TupleProjector.ProjectedValueTuple)tuple, destBitSet, dynColsProjectedTuple, - dynColTupleProj.getValueBitSet(), projector.getSchema().getFieldCount(), - useNewValueColumnQualifier); + Tuple mergedTuple = + TupleProjector.mergeProjectedValue((TupleProjector.ProjectedValueTuple) tuple, + destBitSet, dynColsProjectedTuple, dynColTupleProj.getValueBitSet(), + projector.getSchema().getFieldCount(), useNewValueColumnQualifier); // We send the serialized list of PColumns for dynamic columns back to the client // so that the client can process the corresponding projected values DynamicColumnMetaDataProtos.DynamicColumnMetaData.Builder dynColsListBuilder = - DynamicColumnMetaDataProtos.DynamicColumnMetaData.newBuilder(); + DynamicColumnMetaDataProtos.DynamicColumnMetaData.newBuilder(); for (PColumn dynCol : dynCols) { dynColsListBuilder.addDynamicColumns(PColumnImpl.toProto(dynCol)); } @@ -393,20 +375,21 @@ private Pair getTupleWithDynColsIfRequired(List result, Tup return new Pair<>(tuple, new byte[0]); } - /** - * When there is a merge in progress while scanning local indexes we might get the key values less than scan start row. - * In that case we need to scan until get the row key more or equal to scan start key. - * TODO try to fix this case in LocalIndexStoreFileScanner when there is a merge. + * When there is a merge in progress while scanning local indexes we might get the key values + * less than scan start row. In that case we need to scan until get the row key more or equal + * to scan start key. TODO try to fix this case in LocalIndexStoreFileScanner when there is a + * merge. */ - private boolean scanTillScanStartRow(final RegionScanner s, - List result, - ScannerContext scannerContext) throws IOException { + private boolean scanTillScanStartRow(final RegionScanner s, List result, + ScannerContext scannerContext) throws IOException { boolean next = true; Cell firstCell = result.get(0); long startTime = EnvironmentEdgeManager.currentTimeMillis(); - while (Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), - firstCell.getRowLength(), actualStartKey, 0, actualStartKey.length) < 0) { + while ( + Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), + firstCell.getRowLength(), actualStartKey, 0, actualStartKey.length) < 0 + ) { if (EnvironmentEdgeManager.currentTimeMillis() - startTime >= pageSizeMs) { byte[] rowKey = CellUtil.cloneRow(result.get(0)); result.clear(); @@ -414,7 +397,7 @@ private boolean scanTillScanStartRow(final RegionScanner s, return true; } result.clear(); - if(scannerContext == null) { + if (scannerContext == null) { next = s.nextRaw(result); } else { next = s.nextRaw(result, scannerContext); @@ -431,8 +414,8 @@ private boolean scanTillScanStartRow(final RegionScanner s, } private int replaceServerParsedExpressionElement( - final Set serverParsedKVRefs, - final Expression[] serverParsedFuncRefs, List result) { + final Set serverParsedKVRefs, + final Expression[] serverParsedFuncRefs, List result) { // make a copy of the results array here, as we're modifying it below MultiKeyValueTuple tuple = new MultiKeyValueTuple(ImmutableList.copyOf(result)); // The size of both the arrays would be same? @@ -444,11 +427,12 @@ private int replaceServerParsedExpressionElement( ListIterator itr = result.listIterator(); while (itr.hasNext()) { Cell kv = itr.next(); - if (Bytes.equals(kvExp.getColumnFamily(), 0, kvExp.getColumnFamily().length, - kv.getFamilyArray(), kv.getFamilyOffset(), - kv.getFamilyLength()) && Bytes.equals(kvExp.getColumnQualifier(), 0, - kvExp.getColumnQualifier().length, kv.getQualifierArray(), - kv.getQualifierOffset(), kv.getQualifierLength())) { + if ( + Bytes.equals(kvExp.getColumnFamily(), 0, kvExp.getColumnFamily().length, + kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()) + && Bytes.equals(kvExp.getColumnQualifier(), 0, kvExp.getColumnQualifier().length, + kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()) + ) { // remove the kv that has the full array/json values. itr.remove(); break; @@ -459,11 +443,11 @@ private int replaceServerParsedExpressionElement( byte[] value = kvSchema.toBytes(tuple, serverParsedFuncRefs, kvSchemaBitSet, ptr); // Add a dummy kv with the exact value of the array index or json value result.add(new KeyValue(rowKv.getRowArray(), rowKv.getRowOffset(), rowKv.getRowLength(), - QueryConstants.ARRAY_VALUE_COLUMN_FAMILY, 0, - QueryConstants.ARRAY_VALUE_COLUMN_FAMILY.length, - QueryConstants.ARRAY_VALUE_COLUMN_QUALIFIER, 0, - QueryConstants.ARRAY_VALUE_COLUMN_QUALIFIER.length, HConstants.LATEST_TIMESTAMP, - KeyValue.Type.codeToType(rowKv.getType().getCode()), value, 0, value.length)); + QueryConstants.ARRAY_VALUE_COLUMN_FAMILY, 0, + QueryConstants.ARRAY_VALUE_COLUMN_FAMILY.length, + QueryConstants.ARRAY_VALUE_COLUMN_QUALIFIER, 0, + QueryConstants.ARRAY_VALUE_COLUMN_QUALIFIER.length, HConstants.LATEST_TIMESTAMP, + KeyValue.Type.codeToType(rowKv.getType().getCode()), value, 0, value.length)); return getArrayCellPosition(result); } @@ -479,8 +463,8 @@ public int getBatch() { }; } - // PHOENIX-4791 Share position of array element cell - public static int getArrayCellPosition(List result) { - return result.size() - 1; - } + // PHOENIX-4791 Share position of array element cell + public static int getArrayCellPosition(List result) { + return result.size() - 1; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java index d186ef881e8..ac5fb17975c 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.iterate; +import static org.apache.phoenix.util.ScanUtil.isDummy; + import java.io.IOException; import java.sql.SQLException; import java.util.ArrayList; @@ -37,71 +39,72 @@ import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EncodedColumnsUtil; -import static org.apache.phoenix.util.ScanUtil.isDummy; +public class RegionScannerResultIterator extends BaseResultIterator { + private final RegionScanner scanner; + private final Pair minMaxQualifiers; + private final boolean useQualifierAsIndex; + private final QualifierEncodingScheme encodingScheme; + private final ScannerContext regionScannerContext; + public RegionScannerResultIterator(Scan scan, RegionScanner scanner, + Pair minMaxQualifiers, QualifierEncodingScheme encodingScheme) { + this.scanner = scanner; + this.useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers); + this.minMaxQualifiers = minMaxQualifiers; + this.encodingScheme = encodingScheme; + if (scan.isScanMetricsEnabled()) { + regionScannerContext = + ScannerContext.newBuilder().setTrackMetrics(scan.isScanMetricsEnabled()).build(); + } else { + regionScannerContext = null; + } + } -public class RegionScannerResultIterator extends BaseResultIterator { - private final RegionScanner scanner; - private final Pair minMaxQualifiers; - private final boolean useQualifierAsIndex; - private final QualifierEncodingScheme encodingScheme; - private final ScannerContext regionScannerContext; - - public RegionScannerResultIterator(Scan scan, RegionScanner scanner, - Pair minMaxQualifiers, - QualifierEncodingScheme encodingScheme) { - this.scanner = scanner; - this.useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers); - this.minMaxQualifiers = minMaxQualifiers; - this.encodingScheme = encodingScheme; - if (scan.isScanMetricsEnabled()) { - regionScannerContext = ScannerContext.newBuilder() - .setTrackMetrics(scan.isScanMetricsEnabled()).build(); + @Override + public Tuple next() throws SQLException { + // XXX: No access here to the region instance to enclose this with startRegionOperation / + // stopRegionOperation + synchronized (scanner) { + try { + // TODO: size + List results = useQualifierAsIndex + ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), + minMaxQualifiers.getSecond(), encodingScheme) + : new ArrayList(); + // Results are potentially returned even when the return value of s.next is false + // since this is an indication of whether or not there are more values after the + // ones returned + boolean hasMore; + if (regionScannerContext == null) { + hasMore = scanner.nextRaw(results); } else { - regionScannerContext = null; + hasMore = scanner.nextRaw(results, regionScannerContext); } - } - - @Override - public Tuple next() throws SQLException { - // XXX: No access here to the region instance to enclose this with startRegionOperation / - // stopRegionOperation - synchronized (scanner) { - try { - // TODO: size - List results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList(); - // Results are potentially returned even when the return value of s.next is false - // since this is an indication of whether or not there are more values after the - // ones returned - boolean hasMore; - if (regionScannerContext == null) { - hasMore = scanner.nextRaw(results); - } else { - hasMore = scanner.nextRaw(results, regionScannerContext); - } - if (!hasMore && results.isEmpty()) { - return null; - } - if (isDummy(results)) { - return new ResultTuple(Result.create(results)); - } - // We instantiate a new tuple because in all cases currently we hang on to it - // (i.e. to compute and hold onto the TopN). - Tuple tuple = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); - tuple.setKeyValues(results); - return tuple; - } catch (IOException e) { - throw ClientUtil.parseServerException(e); - } + if (!hasMore && results.isEmpty()) { + return null; + } + if (isDummy(results)) { + return new ResultTuple(Result.create(results)); } + // We instantiate a new tuple because in all cases currently we hang on to it + // (i.e. to compute and hold onto the TopN). + Tuple tuple = + useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple(); + tuple.setKeyValues(results); + return tuple; + } catch (IOException e) { + throw ClientUtil.parseServerException(e); + } } + } - public ScannerContext getRegionScannerContext() { - return regionScannerContext; - } - @Override - public String toString() { - return "RegionScannerResultIterator [scanner=" + scanner + "]"; - } + public ScannerContext getRegionScannerContext() { + return regionScannerContext; + } + + @Override + public String toString() { + return "RegionScannerResultIterator [scanner=" + scanner + "]"; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java index 992ee46a7d9..e4baee82f83 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,25 +15,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.iterate; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Properties; import java.util.concurrent.ConcurrentMap; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.ExtendedCellBuilder; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AbstractClientScanner; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -41,14 +45,11 @@ import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.regionserver.OnlineRegions; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; @@ -62,8 +63,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Properties; - /** * Scan over a region from restored snapshot */ @@ -76,8 +75,8 @@ public class SnapshotScanner extends AbstractClientScanner { private List values; private StatisticsCollector statisticsCollector; - public SnapshotScanner(Configuration conf, FileSystem fs, Path rootDir, - TableDescriptor htd, RegionInfo hri, Scan scan) throws Throwable{ + public SnapshotScanner(Configuration conf, FileSystem fs, Path rootDir, TableDescriptor htd, + RegionInfo hri, Scan scan) throws Throwable { LOGGER.info("Creating SnapshotScanner for region: " + hri); @@ -95,20 +94,24 @@ public SnapshotScanner(Configuration conf, FileSystem fs, Path rootDir, // Collect statistics during scan if ANALYZE_TABLE attribute is set if (ScanUtil.isAnalyzeTable(scan)) { this.scanner = region.getScanner(scan); - PhoenixConnection connection = (PhoenixConnection) ConnectionUtil.getInputConnection(conf, new Properties()); + PhoenixConnection connection = + (PhoenixConnection) ConnectionUtil.getInputConnection(conf, new Properties()); String tableName = region.getTableDescriptor().getTableName().getNameAsString(); - TableName physicalTableName = SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, conf); + TableName physicalTableName = + SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, conf); Table table = connection.getQueryServices().getTable(physicalTableName.getName()); - StatisticsWriter statsWriter = StatisticsWriter.newWriter(connection, tableName, HConstants.LATEST_TIMESTAMP); - statisticsCollector = new DefaultStatisticsCollector(conf, region, - tableName, null, null, null, statsWriter, table); + StatisticsWriter statsWriter = + StatisticsWriter.newWriter(connection, tableName, HConstants.LATEST_TIMESTAMP); + statisticsCollector = new DefaultStatisticsCollector(conf, region, tableName, null, null, + null, statsWriter, table); } else if (scan.getAttribute(BaseScannerRegionObserverConstants.NON_AGGREGATE_QUERY) != null) { RegionScannerFactory regionScannerFactory = new NonAggregateRegionScannerFactory(snapshotEnv); this.scanner = regionScannerFactory.getRegionScanner(scan, region.getScanner(scan)); statisticsCollector = new NoOpStatisticsCollector(); } else { - /* future work : Snapshot M/R jobs for aggregate queries*/ - throw new UnsupportedOperationException("Snapshot M/R jobs not available for aggregate queries"); + /* future work : Snapshot M/R jobs for aggregate queries */ + throw new UnsupportedOperationException( + "Snapshot M/R jobs not available for aggregate queries"); } statisticsCollector.init(); @@ -119,19 +122,17 @@ public SnapshotScanner(Configuration conf, FileSystem fs, Path rootDir, * Initialize region for snapshot scanner utility. This is client side region initialization and * hence it should follow the same region init pattern as the one used by hbase * ClientSideRegionScanner. - * - * @param conf The configuration. - * @param fs The filesystem instance. + * @param conf The configuration. + * @param fs The filesystem instance. * @param rootDir Restored region root dir. - * @param htd The table descriptor instance used to retrieve the region root dir. - * @param hri The region info. + * @param htd The table descriptor instance used to retrieve the region root dir. + * @param hri The region info. * @throws IOException If region init throws IOException. */ private void initRegionForSnapshotScanner(Configuration conf, FileSystem fs, Path rootDir, - TableDescriptor htd, - RegionInfo hri) throws IOException { + TableDescriptor htd, RegionInfo hri) throws IOException { region = HRegion.newHRegion(CommonFSUtils.getTableDir(rootDir, htd.getTableName()), null, fs, - conf, hri, htd, null); + conf, hri, htd, null); region.setRestoredRegion(true); // non RS process does not have a block cache, and this a client side scanner, // create one for MapReduce jobs to cache the INDEX block by setting to use @@ -140,12 +141,11 @@ private void initRegionForSnapshotScanner(Configuration conf, FileSystem fs, Pat // HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY is only available from 2.4.6 // We are using the string directly here to let Phoenix compile with earlier versions. // Note that it won't do anything before HBase 2.4.6 - conf.setIfUnset("hfile.onheap.block.cache.fixed.size", - String.valueOf(32 * 1024 * 1024L)); + conf.setIfUnset("hfile.onheap.block.cache.fixed.size", String.valueOf(32 * 1024 * 1024L)); // don't allow L2 bucket cache for non RS process to avoid unexpected disk usage. conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY); - //PHOENIX-7367 - non RS process doesn't have MemstoreLab's ChunkCreator initialized - //so we disable it to avoid NPE while closing the memstore as part of region close + // PHOENIX-7367 - non RS process doesn't have MemstoreLab's ChunkCreator initialized + // so we disable it to avoid NPE while closing the memstore as part of region close conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, false); region.setBlockCache(BlockCacheFactory.createBlockCache(conf)); // we won't initialize the MobFileCache when not running in RS process. so provided an @@ -156,7 +156,6 @@ private void initRegionForSnapshotScanner(Configuration conf, FileSystem fs, Pat region.initialize(); } - @Override public Result next() throws IOException { values.clear(); @@ -165,7 +164,7 @@ public Result next() throws IOException { if (hasMore || !values.isEmpty()) { return Result.create(values); } else { - //we are done + // we are done return null; } } @@ -244,40 +243,40 @@ public ClassLoader getClassLoader() { throw new UnsupportedOperationException(); } - @Override - public RegionCoprocessor getInstance() { + @Override + public RegionCoprocessor getInstance() { throw new UnsupportedOperationException(); - } + } - @Override - public OnlineRegions getOnlineRegions() { + @Override + public OnlineRegions getOnlineRegions() { throw new UnsupportedOperationException(); - } + } - @Override - public ServerName getServerName() { + @Override + public ServerName getServerName() { throw new UnsupportedOperationException(); - } + } - @Override - public Connection getConnection() { + @Override + public Connection getConnection() { throw new UnsupportedOperationException(); - } + } - @Override - public MetricRegistry getMetricRegistryForRegionServer() { + @Override + public MetricRegistry getMetricRegistryForRegionServer() { throw new UnsupportedOperationException(); - } + } - @Override - public Connection createConnection(Configuration conf) throws IOException { + @Override + public Connection createConnection(Configuration conf) throws IOException { throw new UnsupportedOperationException(); - } + } - @Override - public ExtendedCellBuilder getCellBuilder() { + @Override + public ExtendedCellBuilder getCellBuilder() { throw new UnsupportedOperationException(); - } + } }; } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java index b0cd3817396..82b76d2ba81 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,9 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.iterate; +import static org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toRegionInfo; + import java.io.IOException; import java.sql.SQLException; import java.util.ArrayList; @@ -42,20 +43,17 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.monitoring.ScanMetricsHolder; import org.apache.phoenix.schema.tuple.Tuple; -import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.ClientUtil; +import org.apache.phoenix.util.ScanUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toRegionInfo; - /** - * Iterator to scan over an HBase snapshot based on input HBase Scan object. - * This iterator is generated by Phoenix during the query plan scan generation, - * hence it will include scan attributes and custom filters. - * Restores HBase snapshot and determines the valid regions that intersect - * with the input Scan boundaries. Launches SnapshotScanner for each of them. - * Deletes the restored snapshot when iterator is closed. + * Iterator to scan over an HBase snapshot based on input HBase Scan object. This iterator is + * generated by Phoenix during the query plan scan generation, hence it will include scan attributes + * and custom filters. Restores HBase snapshot and determines the valid regions that intersect with + * the input Scan boundaries. Launches SnapshotScanner for each of them. Deletes the restored + * snapshot when iterator is closed. */ public class TableSnapshotResultIterator implements ResultIterator { @@ -81,8 +79,9 @@ public class TableSnapshotResultIterator implements ResultIterator { private final boolean isMapReduceContext; private final long maxQueryEndTime; - public TableSnapshotResultIterator(Configuration configuration, Scan scan, ScanMetricsHolder scanMetricsHolder, StatementContext context, boolean isMapReduceContext, long maxQueryEndTime) - throws IOException { + public TableSnapshotResultIterator(Configuration configuration, Scan scan, + ScanMetricsHolder scanMetricsHolder, StatementContext context, boolean isMapReduceContext, + long maxQueryEndTime) throws IOException { this.configuration = configuration; this.currentRegion = -1; this.scan = scan; @@ -93,10 +92,9 @@ public TableSnapshotResultIterator(Configuration configuration, Scan scan, ScanM this.restoreDir = new Path(configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY)); } else { this.restoreDir = new Path(configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY), - UUID.randomUUID().toString()); + UUID.randomUUID().toString()); } - this.snapshotName = configuration.get( - PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); + this.snapshotName = configuration.get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); this.rootDir = CommonFSUtils.getRootDir(configuration); this.fs = rootDir.getFileSystem(configuration); this.isMapReduceContext = isMapReduceContext; @@ -106,9 +104,8 @@ public TableSnapshotResultIterator(Configuration configuration, Scan scan, ScanM private void init() throws IOException { if (!PhoenixConfigurationUtil.isMRSnapshotManagedExternally(configuration)) { - RestoreSnapshotHelper.RestoreMetaChanges meta = - RestoreSnapshotHelper.copySnapshotForScanner(this.configuration, this.fs, this.rootDir, - this.restoreDir, this.snapshotName); + RestoreSnapshotHelper.RestoreMetaChanges meta = RestoreSnapshotHelper.copySnapshotForScanner( + this.configuration, this.fs, this.rootDir, this.restoreDir, this.snapshotName); List restoredRegions = meta.getRegionsToAdd(); this.htd = meta.getTableDescriptor(); this.regions = new ArrayList<>(restoredRegions.size()); @@ -120,9 +117,9 @@ private void init() throws IOException { } else { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotProtos.SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = - SnapshotManifest.open(configuration, fs, snapshotDir, snapshotDesc); + SnapshotManifest.open(configuration, fs, snapshotDir, snapshotDesc); List regionManifests = manifest.getRegionManifests(); this.regions = new ArrayList<>(regionManifests.size()); this.htd = manifest.getTableDescriptor(); @@ -139,15 +136,14 @@ private void init() throws IOException { } /** - * Exclude offline split parent regions and - * regions that don't intersect with provided scan + * Exclude offline split parent regions and regions that don't intersect with provided scan */ private boolean isValidRegion(RegionInfo hri) { if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) { return false; } - return PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), - hri.getStartKey(), hri.getEndKey()); + return PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), + hri.getEndKey()); } public boolean initSnapshotScanner() throws SQLException { @@ -157,13 +153,12 @@ public boolean initSnapshotScanner() throws SQLException { ResultIterator delegate = this.scanIterator; if (delegate == UNINITIALIZED_SCANNER) { ++this.currentRegion; - if (this.currentRegion >= this.regions.size()) - return false; + if (this.currentRegion >= this.regions.size()) return false; try { RegionInfo hri = regions.get(this.currentRegion); - this.scanIterator = - new ScanningResultIterator(new SnapshotScanner(configuration, fs, restoreDir, htd, hri, scan), - scan, scanMetricsHolder, context, isMapReduceContext, maxQueryEndTime); + this.scanIterator = new ScanningResultIterator( + new SnapshotScanner(configuration, fs, restoreDir, htd, hri, scan), scan, + scanMetricsHolder, context, isMapReduceContext, maxQueryEndTime); } catch (Throwable e) { throw ClientUtil.parseServerException(e); } @@ -174,8 +169,7 @@ public boolean initSnapshotScanner() throws SQLException { @Override public Tuple next() throws SQLException { while (true) { - if (!initSnapshotScanner()) - return null; + if (!initSnapshotScanner()) return null; try { lastTuple = scanIterator.next(); if (lastTuple != null) { @@ -207,14 +201,14 @@ public void close() throws SQLException { } } - @Override - public void explain(List planSteps) { - // noop - } + @Override + public void explain(List planSteps) { + // noop + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java index 142aea63b76..b24bab69a24 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,13 +27,6 @@ import java.util.Set; import java.util.UUID; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; @@ -57,6 +50,16 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.IndexType; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.base.Splitter; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.PhoenixRuntime; @@ -66,415 +69,402 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.base.Splitter; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * Base tool for running MapReduce-based ingests of data. */ public abstract class AbstractBulkLoadTool extends Configured implements Tool { - protected static final Logger LOGGER = LoggerFactory.getLogger(AbstractBulkLoadTool.class); - - static final Option ZK_QUORUM_OPT = new Option("z", "zookeeper", true, "Supply zookeeper connection details (optional)"); - static final Option INPUT_PATH_OPT = new Option("i", "input", true, "Input path(s) (comma-separated, mandatory)"); - static final Option OUTPUT_PATH_OPT = new Option("o", "output", true, "Output path for temporary HFiles (optional)"); - static final Option SCHEMA_NAME_OPT = new Option("s", "schema", true, "Phoenix schema name (optional)"); - static final Option TABLE_NAME_OPT = new Option("t", "table", true, "Phoenix table name (mandatory)"); - static final Option INDEX_TABLE_NAME_OPT = new Option("it", "index-table", true, "Phoenix index table name when just loading this particualar index table"); - static final Option IMPORT_COLUMNS_OPT = new Option("c", "import-columns", true, "Comma-separated list of columns to be imported"); - static final Option IGNORE_ERRORS_OPT = new Option("g", "ignore-errors", false, "Ignore input errors"); - static final Option HELP_OPT = new Option("h", "help", false, "Show this help and quit"); - static final Option SKIP_HEADER_OPT = new Option("k", "skip-header", false, "Skip the first line of CSV files (the header)"); - static final Option ENABLE_CORRUPT_INDEXES = new Option( "corruptindexes", "corruptindexes", false, "Allow bulk loading into non-empty tables with global secondary indexes"); - - /** - * Set configuration values based on parsed command line options. - * - * @param cmdLine supplied command line options - * @param importColumns descriptors of columns to be imported - * @param conf job configuration - */ - protected abstract void configureOptions(CommandLine cmdLine, List importColumns, - Configuration conf) throws SQLException; - protected abstract void setupJob(Job job); - - protected Options getOptions() { - Options options = new Options(); - options.addOption(INPUT_PATH_OPT); - options.addOption(TABLE_NAME_OPT); - options.addOption(INDEX_TABLE_NAME_OPT); - options.addOption(ZK_QUORUM_OPT); - options.addOption(OUTPUT_PATH_OPT); - options.addOption(SCHEMA_NAME_OPT); - options.addOption(IMPORT_COLUMNS_OPT); - options.addOption(IGNORE_ERRORS_OPT); - options.addOption(HELP_OPT); - options.addOption(SKIP_HEADER_OPT); - options.addOption(ENABLE_CORRUPT_INDEXES); - return options; + protected static final Logger LOGGER = LoggerFactory.getLogger(AbstractBulkLoadTool.class); + + static final Option ZK_QUORUM_OPT = + new Option("z", "zookeeper", true, "Supply zookeeper connection details (optional)"); + static final Option INPUT_PATH_OPT = + new Option("i", "input", true, "Input path(s) (comma-separated, mandatory)"); + static final Option OUTPUT_PATH_OPT = + new Option("o", "output", true, "Output path for temporary HFiles (optional)"); + static final Option SCHEMA_NAME_OPT = + new Option("s", "schema", true, "Phoenix schema name (optional)"); + static final Option TABLE_NAME_OPT = + new Option("t", "table", true, "Phoenix table name (mandatory)"); + static final Option INDEX_TABLE_NAME_OPT = new Option("it", "index-table", true, + "Phoenix index table name when just loading this particualar index table"); + static final Option IMPORT_COLUMNS_OPT = + new Option("c", "import-columns", true, "Comma-separated list of columns to be imported"); + static final Option IGNORE_ERRORS_OPT = + new Option("g", "ignore-errors", false, "Ignore input errors"); + static final Option HELP_OPT = new Option("h", "help", false, "Show this help and quit"); + static final Option SKIP_HEADER_OPT = + new Option("k", "skip-header", false, "Skip the first line of CSV files (the header)"); + static final Option ENABLE_CORRUPT_INDEXES = new Option("corruptindexes", "corruptindexes", false, + "Allow bulk loading into non-empty tables with global secondary indexes"); + + /** + * Set configuration values based on parsed command line options. + * @param cmdLine supplied command line options + * @param importColumns descriptors of columns to be imported + * @param conf job configuration + */ + protected abstract void configureOptions(CommandLine cmdLine, List importColumns, + Configuration conf) throws SQLException; + + protected abstract void setupJob(Job job); + + protected Options getOptions() { + Options options = new Options(); + options.addOption(INPUT_PATH_OPT); + options.addOption(TABLE_NAME_OPT); + options.addOption(INDEX_TABLE_NAME_OPT); + options.addOption(ZK_QUORUM_OPT); + options.addOption(OUTPUT_PATH_OPT); + options.addOption(SCHEMA_NAME_OPT); + options.addOption(IMPORT_COLUMNS_OPT); + options.addOption(IGNORE_ERRORS_OPT); + options.addOption(HELP_OPT); + options.addOption(SKIP_HEADER_OPT); + options.addOption(ENABLE_CORRUPT_INDEXES); + return options; + } + + /** + * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are + * missing. + * @param args supplied command line arguments + * @return the parsed command line + */ + protected CommandLine parseOptions(String[] args) { + + Options options = getOptions(); + + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); } - /** - * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are - * missing. - * - * @param args supplied command line arguments - * @return the parsed command line - */ - protected CommandLine parseOptions(String[] args) { - - Options options = getOptions(); - - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); - } - - if (cmdLine.hasOption(HELP_OPT.getOpt())) { - printHelpAndExit(options, 0); - } - - if (!cmdLine.hasOption(TABLE_NAME_OPT.getOpt())) { - throw new IllegalStateException(TABLE_NAME_OPT.getLongOpt() + " is a mandatory " + - "parameter"); - } - - if (!cmdLine.getArgList().isEmpty()) { - throw new IllegalStateException("Got unexpected extra parameters: " - + cmdLine.getArgList()); - } - - if (!cmdLine.hasOption(INPUT_PATH_OPT.getOpt())) { - throw new IllegalStateException(INPUT_PATH_OPT.getLongOpt() + " is a mandatory " + - "parameter"); - } - - return cmdLine; + if (cmdLine.hasOption(HELP_OPT.getOpt())) { + printHelpAndExit(options, 0); } - private void printHelpAndExit(String errorMessage, Options options) { - System.err.println(errorMessage); - printHelpAndExit(options, 1); + if (!cmdLine.hasOption(TABLE_NAME_OPT.getOpt())) { + throw new IllegalStateException( + TABLE_NAME_OPT.getLongOpt() + " is a mandatory " + "parameter"); } - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); + if (!cmdLine.getArgList().isEmpty()) { + throw new IllegalStateException("Got unexpected extra parameters: " + cmdLine.getArgList()); } - @Override - public int run(String[] args) throws Exception { - - Configuration conf = HBaseConfiguration.create(getConf()); - - CommandLine cmdLine = null; - try { - cmdLine = parseOptions(args); - } catch (IllegalStateException e) { - printHelpAndExit(e.getMessage(), getOptions()); - } - try { - return loadData(conf, cmdLine); - } catch (Exception e) { - e.printStackTrace(); - return -1; - } + if (!cmdLine.hasOption(INPUT_PATH_OPT.getOpt())) { + throw new IllegalStateException( + INPUT_PATH_OPT.getLongOpt() + " is a mandatory " + "parameter"); } + return cmdLine; + } - private int loadData(Configuration conf, CommandLine cmdLine) throws Exception { - String tableName = cmdLine.getOptionValue(TABLE_NAME_OPT.getOpt()); - String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPT.getOpt()); - String indexTableName = cmdLine.getOptionValue(INDEX_TABLE_NAME_OPT.getOpt()); + private void printHelpAndExit(String errorMessage, Options options) { + System.err.println(errorMessage); + printHelpAndExit(options, 1); + } - String qualifiedTableName = SchemaUtil.getQualifiedTableName(schemaName, tableName); - String qualifiedIndexTableName = null; - if (indexTableName != null){ - qualifiedIndexTableName = SchemaUtil.getQualifiedTableName(schemaName, indexTableName); - } - if (cmdLine.hasOption(ZK_QUORUM_OPT.getOpt())) { - // ZK_QUORUM_OPT is optional, but if it's there, use it for both the conn and the job. - String zkQuorum = cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt()); - ConnectionInfo info = - ConnectionInfo.create(PhoenixRuntime.JDBC_PROTOCOL_ZK + ":" + zkQuorum, conf, - null, null); - LOGGER.info("Configuring HBase connection to {}", info); - for (Map.Entry entry : info.asProps()) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Setting {} = {}", entry.getKey(), entry.getValue()); - } - conf.set(entry.getKey(), entry.getValue()); - } - } - // Skip the first line of the CSV file(s)? - if (cmdLine.hasOption(SKIP_HEADER_OPT.getOpt())) { - PhoenixTextInputFormat.setSkipHeader(conf); - } + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } - final String inputPaths = cmdLine.getOptionValue(INPUT_PATH_OPT.getOpt()); - final Path outputPath; - List tablesToBeLoaded = new ArrayList(); - boolean hasLocalIndexes = false; + @Override + public int run(String[] args) throws Exception { - try (Connection conn = QueryUtil.getConnection(conf)) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(), - qualifiedTableName); - } - List importColumns = buildImportColumns(conn, cmdLine, qualifiedTableName); - Preconditions.checkNotNull(importColumns); - Preconditions.checkArgument(!importColumns.isEmpty(), "Column info list is empty"); - FormatToBytesWritableMapper.configureColumnInfoList(conf, importColumns); - boolean ignoreInvalidRows = cmdLine.hasOption(IGNORE_ERRORS_OPT.getOpt()); - conf.setBoolean(FormatToBytesWritableMapper.IGNORE_INVALID_ROW_CONFKEY, - ignoreInvalidRows); - conf.set(FormatToBytesWritableMapper.TABLE_NAME_CONFKEY, - SchemaUtil.getEscapedFullTableName(qualifiedTableName)); - // give subclasses their hook - configureOptions(cmdLine, importColumns, conf); - String sName = SchemaUtil.normalizeIdentifier(schemaName); - String tName = SchemaUtil.normalizeIdentifier(tableName); - - String tn = SchemaUtil.getEscapedTableName(sName, tName); - ResultSet rsempty = - conn.createStatement().executeQuery("SELECT * FROM " + tn + " LIMIT 1"); - boolean tableNotEmpty = rsempty.next(); - rsempty.close(); - - try { - validateTable(conn, sName, tName); - } finally { - conn.close(); - } - - if (cmdLine.hasOption(OUTPUT_PATH_OPT.getOpt())) { - outputPath = new Path(cmdLine.getOptionValue(OUTPUT_PATH_OPT.getOpt())); - } else { - outputPath = new Path("/tmp/" + UUID.randomUUID()); - } + Configuration conf = HBaseConfiguration.create(getConf()); - PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName); - tablesToBeLoaded.add( - new TargetTableRef(qualifiedTableName, table.getPhysicalName().getString())); - boolean hasGlobalIndexes = false; - for (PTable index : table.getIndexes()) { - if (index.getIndexType() == IndexType.LOCAL) { - hasLocalIndexes = - qualifiedIndexTableName == null ? true - : index.getTableName().getString() - .equals(qualifiedIndexTableName); - if (hasLocalIndexes && hasGlobalIndexes) { - break; - } - } - if (IndexUtil.isGlobalIndex(index)) { - hasGlobalIndexes = true; - if (hasLocalIndexes && hasGlobalIndexes) { - break; - } - } - } + CommandLine cmdLine = null; + try { + cmdLine = parseOptions(args); + } catch (IllegalStateException e) { + printHelpAndExit(e.getMessage(), getOptions()); + } + try { + return loadData(conf, cmdLine); + } catch (Exception e) { + e.printStackTrace(); + return -1; + } + } - if (hasGlobalIndexes && tableNotEmpty - && !cmdLine.hasOption(ENABLE_CORRUPT_INDEXES.getOpt())) { - throw new IllegalStateException( - "Bulk Loading error: Bulk loading is disabled for non" - + " empty tables with global indexes, because it will corrupt" - + " the global index table in most cases.\n" - + "Use the --corruptindexes option to override this check."); - } + private int loadData(Configuration conf, CommandLine cmdLine) throws Exception { + String tableName = cmdLine.getOptionValue(TABLE_NAME_OPT.getOpt()); + String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPT.getOpt()); + String indexTableName = cmdLine.getOptionValue(INDEX_TABLE_NAME_OPT.getOpt()); - // using conn after it's been closed... o.O - tablesToBeLoaded.addAll(getIndexTables(conn, qualifiedTableName)); - - // When loading a single index table, check index table name is correct - if (qualifiedIndexTableName != null) { - TargetTableRef targetIndexRef = null; - for (TargetTableRef tmpTable : tablesToBeLoaded) { - if (tmpTable.getLogicalName() - .compareToIgnoreCase(qualifiedIndexTableName) == 0) { - targetIndexRef = tmpTable; - break; - } - } - if (targetIndexRef == null) { - throw new IllegalStateException("Bulk Loader error: index table " - + qualifiedIndexTableName + " doesn't exist"); - } - tablesToBeLoaded.clear(); - tablesToBeLoaded.add(targetIndexRef); - } + String qualifiedTableName = SchemaUtil.getQualifiedTableName(schemaName, tableName); + String qualifiedIndexTableName = null; + if (indexTableName != null) { + qualifiedIndexTableName = SchemaUtil.getQualifiedTableName(schemaName, indexTableName); + } + if (cmdLine.hasOption(ZK_QUORUM_OPT.getOpt())) { + // ZK_QUORUM_OPT is optional, but if it's there, use it for both the conn and the job. + String zkQuorum = cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt()); + ConnectionInfo info = + ConnectionInfo.create(PhoenixRuntime.JDBC_PROTOCOL_ZK + ":" + zkQuorum, conf, null, null); + LOGGER.info("Configuring HBase connection to {}", info); + for (Map.Entry entry : info.asProps()) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Setting {} = {}", entry.getKey(), entry.getValue()); } - - return submitJob(conf, tableName, inputPaths, outputPath, tablesToBeLoaded, hasLocalIndexes); + conf.set(entry.getKey(), entry.getValue()); + } } - - /** - * Submits the jobs to the cluster. - * Loads the HFiles onto the respective tables. - * @throws Exception - */ - public int submitJob(final Configuration conf, final String qualifiedTableName, - final String inputPaths, final Path outputPath, List tablesToBeLoaded, boolean hasLocalIndexes) throws Exception { - - Job job = Job.getInstance(conf, "Phoenix MapReduce import for " + qualifiedTableName); - FileInputFormat.addInputPaths(job, inputPaths); - FileOutputFormat.setOutputPath(job, outputPath); - - job.setInputFormatClass(PhoenixTextInputFormat.class); - job.setMapOutputKeyClass(TableRowkeyPair.class); - job.setMapOutputValueClass(ImmutableBytesWritable.class); - job.setOutputKeyClass(TableRowkeyPair.class); - job.setOutputValueClass(KeyValue.class); - job.setReducerClass(FormatToKeyValueReducer.class); - byte[][] splitKeysBeforeJob = null; - try(org.apache.hadoop.hbase.client.Connection hbaseConn = - ConnectionFactory.createConnection(job.getConfiguration())) { - RegionLocator regionLocator = null; - if(hasLocalIndexes) { - try{ - regionLocator = hbaseConn.getRegionLocator( - TableName.valueOf(qualifiedTableName)); - splitKeysBeforeJob = regionLocator.getStartKeys(); - } finally { - if (regionLocator != null) regionLocator.close(); - } - } - MultiHfileOutputFormat.configureIncrementalLoad(job, tablesToBeLoaded); - - final String tableNamesAsJson = TargetTableRefFunctions.NAMES_TO_JSON - .apply(tablesToBeLoaded); - final String logicalNamesAsJson = TargetTableRefFunctions.LOGICAL_NAMES_TO_JSON - .apply(tablesToBeLoaded); - - job.getConfiguration().set(FormatToBytesWritableMapper.TABLE_NAMES_CONFKEY, - tableNamesAsJson); - job.getConfiguration().set(FormatToBytesWritableMapper.LOGICAL_NAMES_CONFKEY, - logicalNamesAsJson); - - // give subclasses their hook - setupJob(job); - - LOGGER.info("Running MapReduce import job from {} to {}", inputPaths, outputPath); - boolean success = job.waitForCompletion(true); - - if (success) { - if (hasLocalIndexes) { - try { - regionLocator = hbaseConn.getRegionLocator( - TableName.valueOf(qualifiedTableName)); - if(!IndexUtil.matchingSplitKeys(splitKeysBeforeJob, - regionLocator.getStartKeys())) { - LOGGER.error("The table " + qualifiedTableName + " has local indexes and" - + " there is split key mismatch before and after running" - + " bulkload job. Please rerun the job otherwise there may be" - + " inconsistencies between actual data and index data."); - return -1; - } - } finally { - if (regionLocator != null) regionLocator.close(); - } - } - LOGGER.info("Loading HFiles from {}", outputPath); - completebulkload(conf,outputPath,tablesToBeLoaded); - LOGGER.info("Removing output directory {}", outputPath); - if(!outputPath.getFileSystem(conf).delete(outputPath, true)) { - LOGGER.error("Failed to delete the output directory {}", outputPath); - } - return 0; - } else { - return -1; - } - } + // Skip the first line of the CSV file(s)? + if (cmdLine.hasOption(SKIP_HEADER_OPT.getOpt())) { + PhoenixTextInputFormat.setSkipHeader(conf); } - private void completebulkload(Configuration conf,Path outputPath , List tablesToBeLoaded) throws Exception { - Set tableNames = new HashSet<>(tablesToBeLoaded.size()); - for(TargetTableRef table : tablesToBeLoaded) { - if(tableNames.contains(table.getPhysicalName())){ - continue; - } - tableNames.add(table.getPhysicalName()); - BulkLoadHFiles loader = BulkLoadHFiles.create(conf); - String tableName = table.getPhysicalName(); - Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputPath, tableName); - LOGGER.info("Loading HFiles for {} from {}", tableName , tableOutputPath); - loader.bulkLoad(TableName.valueOf(tableName), tableOutputPath); - LOGGER.info("Incremental load complete for table=" + tableName); + final String inputPaths = cmdLine.getOptionValue(INPUT_PATH_OPT.getOpt()); + final Path outputPath; + List tablesToBeLoaded = new ArrayList(); + boolean hasLocalIndexes = false; + + try (Connection conn = QueryUtil.getConnection(conf)) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Reading columns from {} :: {}", ((PhoenixConnection) conn).getURL(), + qualifiedTableName); + } + List importColumns = buildImportColumns(conn, cmdLine, qualifiedTableName); + Preconditions.checkNotNull(importColumns); + Preconditions.checkArgument(!importColumns.isEmpty(), "Column info list is empty"); + FormatToBytesWritableMapper.configureColumnInfoList(conf, importColumns); + boolean ignoreInvalidRows = cmdLine.hasOption(IGNORE_ERRORS_OPT.getOpt()); + conf.setBoolean(FormatToBytesWritableMapper.IGNORE_INVALID_ROW_CONFKEY, ignoreInvalidRows); + conf.set(FormatToBytesWritableMapper.TABLE_NAME_CONFKEY, + SchemaUtil.getEscapedFullTableName(qualifiedTableName)); + // give subclasses their hook + configureOptions(cmdLine, importColumns, conf); + String sName = SchemaUtil.normalizeIdentifier(schemaName); + String tName = SchemaUtil.normalizeIdentifier(tableName); + + String tn = SchemaUtil.getEscapedTableName(sName, tName); + ResultSet rsempty = conn.createStatement().executeQuery("SELECT * FROM " + tn + " LIMIT 1"); + boolean tableNotEmpty = rsempty.next(); + rsempty.close(); + + try { + validateTable(conn, sName, tName); + } finally { + conn.close(); + } + + if (cmdLine.hasOption(OUTPUT_PATH_OPT.getOpt())) { + outputPath = new Path(cmdLine.getOptionValue(OUTPUT_PATH_OPT.getOpt())); + } else { + outputPath = new Path("/tmp/" + UUID.randomUUID()); + } + + PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName); + tablesToBeLoaded + .add(new TargetTableRef(qualifiedTableName, table.getPhysicalName().getString())); + boolean hasGlobalIndexes = false; + for (PTable index : table.getIndexes()) { + if (index.getIndexType() == IndexType.LOCAL) { + hasLocalIndexes = qualifiedIndexTableName == null + ? true + : index.getTableName().getString().equals(qualifiedIndexTableName); + if (hasLocalIndexes && hasGlobalIndexes) { + break; + } } - } - - /** - * Build up the list of columns to be imported. The list is taken from the command line if - * present, otherwise it is taken from the table description. - * - * @param conn connection to Phoenix - * @param cmdLine supplied command line options - * @param qualifiedTableName table name (possibly with schema) of the table to be imported - * @return the list of columns to be imported - */ - List buildImportColumns(Connection conn, CommandLine cmdLine, - String qualifiedTableName) throws SQLException { - List userSuppliedColumnNames = null; - if (cmdLine.hasOption(IMPORT_COLUMNS_OPT.getOpt())) { - userSuppliedColumnNames = Lists.newArrayList( - Splitter.on(",").trimResults().split - (cmdLine.getOptionValue(IMPORT_COLUMNS_OPT.getOpt()))); + if (IndexUtil.isGlobalIndex(index)) { + hasGlobalIndexes = true; + if (hasLocalIndexes && hasGlobalIndexes) { + break; + } + } + } + + if ( + hasGlobalIndexes && tableNotEmpty && !cmdLine.hasOption(ENABLE_CORRUPT_INDEXES.getOpt()) + ) { + throw new IllegalStateException("Bulk Loading error: Bulk loading is disabled for non" + + " empty tables with global indexes, because it will corrupt" + + " the global index table in most cases.\n" + + "Use the --corruptindexes option to override this check."); + } + + // using conn after it's been closed... o.O + tablesToBeLoaded.addAll(getIndexTables(conn, qualifiedTableName)); + + // When loading a single index table, check index table name is correct + if (qualifiedIndexTableName != null) { + TargetTableRef targetIndexRef = null; + for (TargetTableRef tmpTable : tablesToBeLoaded) { + if (tmpTable.getLogicalName().compareToIgnoreCase(qualifiedIndexTableName) == 0) { + targetIndexRef = tmpTable; + break; + } } - return SchemaUtil.generateColumnInfo( - conn, qualifiedTableName, userSuppliedColumnNames, true); + if (targetIndexRef == null) { + throw new IllegalStateException( + "Bulk Loader error: index table " + qualifiedIndexTableName + " doesn't exist"); + } + tablesToBeLoaded.clear(); + tablesToBeLoaded.add(targetIndexRef); + } } - /** - * Perform any required validation on the table being bulk loaded into: - * - ensure no column family names start with '_', as they'd be ignored leading to problems. - * @throws java.sql.SQLException - */ - private void validateTable(Connection conn, String schemaName, - String tableName) throws SQLException { - - ResultSet rs = conn.getMetaData().getColumns( - null, StringUtil.escapeLike(schemaName), - StringUtil.escapeLike(tableName), null); - while (rs.next()) { - String familyName = rs.getString(PhoenixDatabaseMetaData.COLUMN_FAMILY); - if (familyName != null && familyName.startsWith("_")) { - if (QueryConstants.DEFAULT_COLUMN_FAMILY.equals(familyName)) { - throw new IllegalStateException( - "Bulk Loader error: All column names that are not part of the " + - "primary key constraint must be prefixed with a column family " + - "name (i.e. f.my_column VARCHAR)"); - } else { - throw new IllegalStateException("Bulk Loader error: Column family name " + - "must not start with '_': " + familyName); - } + return submitJob(conf, tableName, inputPaths, outputPath, tablesToBeLoaded, hasLocalIndexes); + } + + /** + * Submits the jobs to the cluster. Loads the HFiles onto the respective tables. + */ + public int submitJob(final Configuration conf, final String qualifiedTableName, + final String inputPaths, final Path outputPath, List tablesToBeLoaded, + boolean hasLocalIndexes) throws Exception { + + Job job = Job.getInstance(conf, "Phoenix MapReduce import for " + qualifiedTableName); + FileInputFormat.addInputPaths(job, inputPaths); + FileOutputFormat.setOutputPath(job, outputPath); + + job.setInputFormatClass(PhoenixTextInputFormat.class); + job.setMapOutputKeyClass(TableRowkeyPair.class); + job.setMapOutputValueClass(ImmutableBytesWritable.class); + job.setOutputKeyClass(TableRowkeyPair.class); + job.setOutputValueClass(KeyValue.class); + job.setReducerClass(FormatToKeyValueReducer.class); + byte[][] splitKeysBeforeJob = null; + try (org.apache.hadoop.hbase.client.Connection hbaseConn = + ConnectionFactory.createConnection(job.getConfiguration())) { + RegionLocator regionLocator = null; + if (hasLocalIndexes) { + try { + regionLocator = hbaseConn.getRegionLocator(TableName.valueOf(qualifiedTableName)); + splitKeysBeforeJob = regionLocator.getStartKeys(); + } finally { + if (regionLocator != null) regionLocator.close(); + } + } + MultiHfileOutputFormat.configureIncrementalLoad(job, tablesToBeLoaded); + + final String tableNamesAsJson = TargetTableRefFunctions.NAMES_TO_JSON.apply(tablesToBeLoaded); + final String logicalNamesAsJson = + TargetTableRefFunctions.LOGICAL_NAMES_TO_JSON.apply(tablesToBeLoaded); + + job.getConfiguration().set(FormatToBytesWritableMapper.TABLE_NAMES_CONFKEY, tableNamesAsJson); + job.getConfiguration().set(FormatToBytesWritableMapper.LOGICAL_NAMES_CONFKEY, + logicalNamesAsJson); + + // give subclasses their hook + setupJob(job); + + LOGGER.info("Running MapReduce import job from {} to {}", inputPaths, outputPath); + boolean success = job.waitForCompletion(true); + + if (success) { + if (hasLocalIndexes) { + try { + regionLocator = hbaseConn.getRegionLocator(TableName.valueOf(qualifiedTableName)); + if (!IndexUtil.matchingSplitKeys(splitKeysBeforeJob, regionLocator.getStartKeys())) { + LOGGER.error("The table " + qualifiedTableName + " has local indexes and" + + " there is split key mismatch before and after running" + + " bulkload job. Please rerun the job otherwise there may be" + + " inconsistencies between actual data and index data."); + return -1; } + } finally { + if (regionLocator != null) regionLocator.close(); + } + } + LOGGER.info("Loading HFiles from {}", outputPath); + completebulkload(conf, outputPath, tablesToBeLoaded); + LOGGER.info("Removing output directory {}", outputPath); + if (!outputPath.getFileSystem(conf).delete(outputPath, true)) { + LOGGER.error("Failed to delete the output directory {}", outputPath); } - rs.close(); + return 0; + } else { + return -1; + } } - - /** - * Get the index tables of current data table - * @throws java.sql.SQLException - */ - private List getIndexTables(Connection conn, String qualifiedTableName) - throws SQLException { - PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName); - List indexTables = new ArrayList(); - for(PTable indexTable : table.getIndexes()){ - indexTables.add(new TargetTableRef(indexTable.getName().getString(), indexTable - .getPhysicalName().getString())); + } + + private void completebulkload(Configuration conf, Path outputPath, + List tablesToBeLoaded) throws Exception { + Set tableNames = new HashSet<>(tablesToBeLoaded.size()); + for (TargetTableRef table : tablesToBeLoaded) { + if (tableNames.contains(table.getPhysicalName())) { + continue; + } + tableNames.add(table.getPhysicalName()); + BulkLoadHFiles loader = BulkLoadHFiles.create(conf); + String tableName = table.getPhysicalName(); + Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputPath, tableName); + LOGGER.info("Loading HFiles for {} from {}", tableName, tableOutputPath); + loader.bulkLoad(TableName.valueOf(tableName), tableOutputPath); + LOGGER.info("Incremental load complete for table=" + tableName); + } + } + + /** + * Build up the list of columns to be imported. The list is taken from the command line if + * present, otherwise it is taken from the table description. + * @param conn connection to Phoenix + * @param cmdLine supplied command line options + * @param qualifiedTableName table name (possibly with schema) of the table to be imported + * @return the list of columns to be imported + */ + List buildImportColumns(Connection conn, CommandLine cmdLine, + String qualifiedTableName) throws SQLException { + List userSuppliedColumnNames = null; + if (cmdLine.hasOption(IMPORT_COLUMNS_OPT.getOpt())) { + userSuppliedColumnNames = Lists.newArrayList( + Splitter.on(",").trimResults().split(cmdLine.getOptionValue(IMPORT_COLUMNS_OPT.getOpt()))); + } + return SchemaUtil.generateColumnInfo(conn, qualifiedTableName, userSuppliedColumnNames, true); + } + + /** + * Perform any required validation on the table being bulk loaded into: - ensure no column family + * names start with '_', as they'd be ignored leading to problems. + * @throws java.sql.SQLException + */ + private void validateTable(Connection conn, String schemaName, String tableName) + throws SQLException { + + ResultSet rs = conn.getMetaData().getColumns(null, StringUtil.escapeLike(schemaName), + StringUtil.escapeLike(tableName), null); + while (rs.next()) { + String familyName = rs.getString(PhoenixDatabaseMetaData.COLUMN_FAMILY); + if (familyName != null && familyName.startsWith("_")) { + if (QueryConstants.DEFAULT_COLUMN_FAMILY.equals(familyName)) { + throw new IllegalStateException( + "Bulk Loader error: All column names that are not part of the " + + "primary key constraint must be prefixed with a column family " + + "name (i.e. f.my_column VARCHAR)"); + } else { + throw new IllegalStateException( + "Bulk Loader error: Column family name " + "must not start with '_': " + familyName); } - return indexTables; + } + } + rs.close(); + } + + /** + * Get the index tables of current data table + * @throws java.sql.SQLException + */ + private List getIndexTables(Connection conn, String qualifiedTableName) + throws SQLException { + PTable table = PhoenixRuntime.getTable(conn, qualifiedTableName); + List indexTables = new ArrayList(); + for (PTable indexTable : table.getIndexes()) { + indexTables.add(new TargetTableRef(indexTable.getName().getString(), + indexTable.getPhysicalName().getString())); } + return indexTables; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java index f3141ed0d8d..7853719cf94 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +26,6 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.query.QueryServices; - import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; /** @@ -34,61 +33,58 @@ */ public class CsvBulkImportUtil { - /** - * Configure a job configuration for a bulk CSV import. - * - * @param conf job configuration to be set up - * @param fieldDelimiter field delimiter character for the CSV input - * @param quoteChar quote character for the CSV input - * @param escapeChar escape character for the CSV input - * @param arrayDelimiter array delimiter character, can be null - * @param binaryEncoding - */ - public static void initCsvImportJob(Configuration conf, char fieldDelimiter, Character quoteChar, - Character escapeChar, String arrayDelimiter, String binaryEncoding) { - setChar(conf, CsvToKeyValueMapper.FIELD_DELIMITER_CONFKEY, fieldDelimiter); - setChar(conf, CsvToKeyValueMapper.QUOTE_CHAR_CONFKEY, quoteChar); - setChar(conf, CsvToKeyValueMapper.ESCAPE_CHAR_CONFKEY, escapeChar); - if (arrayDelimiter != null) { - conf.set(CsvToKeyValueMapper.ARRAY_DELIMITER_CONFKEY, arrayDelimiter); - } - if(binaryEncoding!=null){ - conf.set(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, binaryEncoding); - } + /** + * Configure a job configuration for a bulk CSV import. + * @param conf job configuration to be set up + * @param fieldDelimiter field delimiter character for the CSV input + * @param quoteChar quote character for the CSV input + * @param escapeChar escape character for the CSV input + * @param arrayDelimiter array delimiter character, can be null + */ + public static void initCsvImportJob(Configuration conf, char fieldDelimiter, Character quoteChar, + Character escapeChar, String arrayDelimiter, String binaryEncoding) { + setChar(conf, CsvToKeyValueMapper.FIELD_DELIMITER_CONFKEY, fieldDelimiter); + setChar(conf, CsvToKeyValueMapper.QUOTE_CHAR_CONFKEY, quoteChar); + setChar(conf, CsvToKeyValueMapper.ESCAPE_CHAR_CONFKEY, escapeChar); + if (arrayDelimiter != null) { + conf.set(CsvToKeyValueMapper.ARRAY_DELIMITER_CONFKEY, arrayDelimiter); } - - /** - * Configure an {@link ImportPreUpsertKeyValueProcessor} for a CSV bulk import job. - * - * @param conf job configuration - * @param processorClass class to be used for performing pre-upsert processing - */ - public static void configurePreUpsertProcessor(Configuration conf, - Class processorClass) { - conf.setClass(PhoenixConfigurationUtil.UPSERT_HOOK_CLASS_CONFKEY, processorClass, - ImportPreUpsertKeyValueProcessor.class); + if (binaryEncoding != null) { + conf.set(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, binaryEncoding); } + } - @VisibleForTesting - static void setChar(Configuration conf, String confKey, Character charValue) { - if(charValue!=null) { - conf.set(confKey, Bytes.toString(Base64.getEncoder().encode( - charValue.toString().getBytes(StandardCharsets.UTF_8)))); - } - } + /** + * Configure an {@link ImportPreUpsertKeyValueProcessor} for a CSV bulk import job. + * @param conf job configuration + * @param processorClass class to be used for performing pre-upsert processing + */ + public static void configurePreUpsertProcessor(Configuration conf, + Class processorClass) { + conf.setClass(PhoenixConfigurationUtil.UPSERT_HOOK_CLASS_CONFKEY, processorClass, + ImportPreUpsertKeyValueProcessor.class); + } - @VisibleForTesting - static Character getCharacter(Configuration conf, String confKey) { - String strValue = conf.get(confKey); - if (strValue == null) { - return null; - } - return new String(Base64.getDecoder().decode(strValue.getBytes(StandardCharsets.UTF_8)), - StandardCharsets.UTF_8).charAt(0); + @VisibleForTesting + static void setChar(Configuration conf, String confKey, Character charValue) { + if (charValue != null) { + conf.set(confKey, Bytes.toString( + Base64.getEncoder().encode(charValue.toString().getBytes(StandardCharsets.UTF_8)))); } + } - public static Path getOutputPath(Path outputdir, String tableName) { - return new Path(outputdir, - tableName.replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR)); + @VisibleForTesting + static Character getCharacter(Configuration conf, String confKey) { + String strValue = conf.get(confKey); + if (strValue == null) { + return null; } + return new String(Base64.getDecoder().decode(strValue.getBytes(StandardCharsets.UTF_8)), + StandardCharsets.UTF_8).charAt(0); + } + + public static Path getOutputPath(Path outputdir, String tableName) { + return new Path(outputdir, + tableName.replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR)); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java index 155ca308c3c..f0871bfcbc1 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,101 +20,101 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; import org.apache.commons.lang3.StringEscapeUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.ToolRunner; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; import org.apache.phoenix.util.ColumnInfo; public class CsvBulkLoadTool extends AbstractBulkLoadTool { - static final Option DELIMITER_OPT = new Option("d", "delimiter", true, "Input delimiter, defaults to comma"); - static final Option QUOTE_OPT = new Option("q", "quote", true, "Supply a custom phrase delimiter, defaults to double quote character"); - static final Option ESCAPE_OPT = new Option("e", "escape", true, "Supply a custom escape character, default is a backslash"); - static final Option ARRAY_DELIMITER_OPT = new Option("a", "array-delimiter", true, "Array element delimiter (optional)"); - static final Option binaryEncodingOption = new Option("b", "binaryEncoding", true, "Specifies binary encoding"); + static final Option DELIMITER_OPT = + new Option("d", "delimiter", true, "Input delimiter, defaults to comma"); + static final Option QUOTE_OPT = new Option("q", "quote", true, + "Supply a custom phrase delimiter, defaults to double quote character"); + static final Option ESCAPE_OPT = + new Option("e", "escape", true, "Supply a custom escape character, default is a backslash"); + static final Option ARRAY_DELIMITER_OPT = + new Option("a", "array-delimiter", true, "Array element delimiter (optional)"); + static final Option binaryEncodingOption = + new Option("b", "binaryEncoding", true, "Specifies binary encoding"); - @Override - protected Options getOptions() { - Options options = super.getOptions(); - options.addOption(DELIMITER_OPT); - options.addOption(QUOTE_OPT); - options.addOption(ESCAPE_OPT); - options.addOption(ARRAY_DELIMITER_OPT); - options.addOption(binaryEncodingOption); - return options; - } + @Override + protected Options getOptions() { + Options options = super.getOptions(); + options.addOption(DELIMITER_OPT); + options.addOption(QUOTE_OPT); + options.addOption(ESCAPE_OPT); + options.addOption(ARRAY_DELIMITER_OPT); + options.addOption(binaryEncodingOption); + return options; + } - @Override - protected void configureOptions(CommandLine cmdLine, List importColumns, - Configuration conf) throws SQLException { + @Override + protected void configureOptions(CommandLine cmdLine, List importColumns, + Configuration conf) throws SQLException { - // we don't parse ZK_QUORUM_OPT here because we need it in order to - // create the connection we need to build importColumns. + // we don't parse ZK_QUORUM_OPT here because we need it in order to + // create the connection we need to build importColumns. - Character delimiterChar = ','; - if (cmdLine.hasOption(DELIMITER_OPT.getOpt())) { - String delimString = StringEscapeUtils.unescapeJava(cmdLine.getOptionValue - (DELIMITER_OPT.getOpt())); - if (delimString.length() != 1) { - throw new IllegalArgumentException("Illegal delimiter character: " + delimString); - } - delimiterChar = delimString.charAt(0); - } + Character delimiterChar = ','; + if (cmdLine.hasOption(DELIMITER_OPT.getOpt())) { + String delimString = + StringEscapeUtils.unescapeJava(cmdLine.getOptionValue(DELIMITER_OPT.getOpt())); + if (delimString.length() != 1) { + throw new IllegalArgumentException("Illegal delimiter character: " + delimString); + } + delimiterChar = delimString.charAt(0); + } - Character quoteChar = '"'; - if (cmdLine.hasOption(QUOTE_OPT.getOpt())) { - String quoteString = StringEscapeUtils.unescapeJava(cmdLine.getOptionValue(QUOTE_OPT - .getOpt())); - if(quoteString.length() == 0) { - quoteChar = null; - } else if (quoteString.length() != 1) { - throw new IllegalArgumentException("Illegal quote character: " + quoteString); - } else { - quoteChar = quoteString.charAt(0); - } - } + Character quoteChar = '"'; + if (cmdLine.hasOption(QUOTE_OPT.getOpt())) { + String quoteString = + StringEscapeUtils.unescapeJava(cmdLine.getOptionValue(QUOTE_OPT.getOpt())); + if (quoteString.length() == 0) { + quoteChar = null; + } else if (quoteString.length() != 1) { + throw new IllegalArgumentException("Illegal quote character: " + quoteString); + } else { + quoteChar = quoteString.charAt(0); + } + } - Character escapeChar = '\\'; - if (cmdLine.hasOption(ESCAPE_OPT.getOpt())) { - String escapeString = cmdLine.getOptionValue(ESCAPE_OPT.getOpt()); - if(escapeString.length() == 0) { - escapeChar = null; - } else if (escapeString.length() != 1) { - throw new IllegalArgumentException("Illegal escape character: " + escapeString); - } else { - escapeChar = escapeString.charAt(0); - } - } - - String binaryEncoding = null; - if (cmdLine.hasOption(binaryEncodingOption.getOpt())) { - binaryEncoding = cmdLine.getOptionValue(binaryEncodingOption.getOpt()); - } - - CsvBulkImportUtil.initCsvImportJob( - conf, - delimiterChar, - quoteChar, - escapeChar, - cmdLine.getOptionValue(ARRAY_DELIMITER_OPT.getOpt()), - binaryEncoding); + Character escapeChar = '\\'; + if (cmdLine.hasOption(ESCAPE_OPT.getOpt())) { + String escapeString = cmdLine.getOptionValue(ESCAPE_OPT.getOpt()); + if (escapeString.length() == 0) { + escapeChar = null; + } else if (escapeString.length() != 1) { + throw new IllegalArgumentException("Illegal escape character: " + escapeString); + } else { + escapeChar = escapeString.charAt(0); + } } - @Override - protected void setupJob(Job job) { - // Allow overriding the job jar setting by using a -D system property at startup - if (job.getJar() == null) { - job.setJarByClass(CsvToKeyValueMapper.class); - } - job.setMapperClass(CsvToKeyValueMapper.class); + String binaryEncoding = null; + if (cmdLine.hasOption(binaryEncodingOption.getOpt())) { + binaryEncoding = cmdLine.getOptionValue(binaryEncodingOption.getOpt()); } - public static void main(String[] args) throws Exception { - int exitStatus = ToolRunner.run(new CsvBulkLoadTool(), args); - System.exit(exitStatus); + CsvBulkImportUtil.initCsvImportJob(conf, delimiterChar, quoteChar, escapeChar, + cmdLine.getOptionValue(ARRAY_DELIMITER_OPT.getOpt()), binaryEncoding); + } + + @Override + protected void setupJob(Job job) { + // Allow overriding the job jar setting by using a -D system property at startup + if (job.getJar() == null) { + job.setJarByClass(CsvToKeyValueMapper.class); } + job.setMapperClass(CsvToKeyValueMapper.class); + } + + public static void main(String[] args) throws Exception { + int exitStatus = ToolRunner.run(new CsvBulkLoadTool(), args); + System.exit(exitStatus); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java index a951939c83e..d3bd69ec405 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,88 +25,81 @@ import org.apache.commons.csv.CSVParser; import org.apache.commons.csv.CSVRecord; import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.phoenix.util.CSVCommonsLoader; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.UpsertExecutor; import org.apache.phoenix.util.csv.CsvUpsertExecutor; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; - /** * MapReduce mapper that converts CSV input lines into KeyValues that can be written to HFiles. - * - * KeyValues are produced by executing UPSERT statements on a Phoenix connection and then - * extracting the created KeyValues and rolling back the statement execution before it is - * committed to HBase. + * KeyValues are produced by executing UPSERT statements on a Phoenix connection and then extracting + * the created KeyValues and rolling back the statement execution before it is committed to HBase. */ public class CsvToKeyValueMapper extends FormatToBytesWritableMapper { - /** Configuration key for the field delimiter for input csv records */ - public static final String FIELD_DELIMITER_CONFKEY = "phoenix.mapreduce.import.fielddelimiter"; - - /** Configuration key for the quote char for input csv records */ - public static final String QUOTE_CHAR_CONFKEY = "phoenix.mapreduce.import.quotechar"; - - /** Configuration key for the escape char for input csv records */ - public static final String ESCAPE_CHAR_CONFKEY = "phoenix.mapreduce.import.escapechar"; - - /** Configuration key for the array element delimiter for input arrays */ - public static final String ARRAY_DELIMITER_CONFKEY = "phoenix.mapreduce.import.arraydelimiter"; - - private CsvLineParser lineParser; - - @Override - protected LineParser getLineParser() { - return lineParser; - } - - @Override - protected void setup(Context context) throws IOException, InterruptedException { - super.setup(context); - Configuration conf = context.getConfiguration(); - lineParser = new CsvLineParser( - CsvBulkImportUtil.getCharacter(conf, FIELD_DELIMITER_CONFKEY), - CsvBulkImportUtil.getCharacter(conf, QUOTE_CHAR_CONFKEY), - CsvBulkImportUtil.getCharacter(conf, ESCAPE_CHAR_CONFKEY)); + /** Configuration key for the field delimiter for input csv records */ + public static final String FIELD_DELIMITER_CONFKEY = "phoenix.mapreduce.import.fielddelimiter"; + + /** Configuration key for the quote char for input csv records */ + public static final String QUOTE_CHAR_CONFKEY = "phoenix.mapreduce.import.quotechar"; + + /** Configuration key for the escape char for input csv records */ + public static final String ESCAPE_CHAR_CONFKEY = "phoenix.mapreduce.import.escapechar"; + + /** Configuration key for the array element delimiter for input arrays */ + public static final String ARRAY_DELIMITER_CONFKEY = "phoenix.mapreduce.import.arraydelimiter"; + + private CsvLineParser lineParser; + + @Override + protected LineParser getLineParser() { + return lineParser; + } + + @Override + protected void setup(Context context) throws IOException, InterruptedException { + super.setup(context); + Configuration conf = context.getConfiguration(); + lineParser = new CsvLineParser(CsvBulkImportUtil.getCharacter(conf, FIELD_DELIMITER_CONFKEY), + CsvBulkImportUtil.getCharacter(conf, QUOTE_CHAR_CONFKEY), + CsvBulkImportUtil.getCharacter(conf, ESCAPE_CHAR_CONFKEY)); + } + + @VisibleForTesting + @Override + protected UpsertExecutor buildUpsertExecutor(Configuration conf) { + String tableName = conf.get(TABLE_NAME_CONFKEY); + String arraySeparator = + conf.get(ARRAY_DELIMITER_CONFKEY, CSVCommonsLoader.DEFAULT_ARRAY_ELEMENT_SEPARATOR); + Preconditions.checkNotNull(tableName, "table name is not configured"); + + List columnInfoList = buildColumnInfoList(conf); + + return new CsvUpsertExecutor(conn, tableName, columnInfoList, upsertListener, arraySeparator); + } + + /** + * Parses a single CSV input line, returning a {@code CSVRecord}. + */ + @VisibleForTesting + static class CsvLineParser implements LineParser { + private final CSVFormat csvFormat; + + CsvLineParser(Character fieldDelimiter, Character quote, Character escape) { + this.csvFormat = CSVFormat.DEFAULT.withIgnoreEmptyLines(true).withDelimiter(fieldDelimiter) + .withEscape(escape).withQuote(quote); } - @VisibleForTesting @Override - protected UpsertExecutor buildUpsertExecutor(Configuration conf) { - String tableName = conf.get(TABLE_NAME_CONFKEY); - String arraySeparator = conf.get(ARRAY_DELIMITER_CONFKEY, - CSVCommonsLoader.DEFAULT_ARRAY_ELEMENT_SEPARATOR); - Preconditions.checkNotNull(tableName, "table name is not configured"); - - List columnInfoList = buildColumnInfoList(conf); - - return new CsvUpsertExecutor(conn, tableName, columnInfoList, upsertListener, arraySeparator); - } - - /** - * Parses a single CSV input line, returning a {@code CSVRecord}. - */ - @VisibleForTesting - static class CsvLineParser implements LineParser { - private final CSVFormat csvFormat; - - CsvLineParser(Character fieldDelimiter, Character quote, Character escape) { - this.csvFormat = CSVFormat.DEFAULT - .withIgnoreEmptyLines(true) - .withDelimiter(fieldDelimiter) - .withEscape(escape) - .withQuote(quote); - } - - @Override - public CSVRecord parse(String input) throws IOException { - // TODO Creating a new parser for each line seems terribly inefficient but - // there's no public way to parse single lines via commons-csv. We should update - // it to create a LineParser class like this one. - CSVParser csvParser = new CSVParser(new StringReader(input), csvFormat); - return Iterables.getFirst(csvParser, null); - } + public CSVRecord parse(String input) throws IOException { + // TODO Creating a new parser for each line seems terribly inefficient but + // there's no public way to parse single lines via commons-csv. We should update + // it to create a LineParser class like this one. + CSVParser csvParser = new CSVParser(new StringReader(input), csvFormat); + return Iterables.getFirst(csvParser, null); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java index a2b9be8fe41..ebf2a5221ac 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -49,6 +49,14 @@ import org.apache.phoenix.schema.PColumnFamily; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.ImmutableStorageScheme; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; +import org.apache.phoenix.thirdparty.com.google.common.base.Splitter; +import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.IndexUtil; @@ -60,369 +68,356 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.base.Splitter; -import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * Base class for converting some input source format into {@link ImmutableBytesWritable}s that - * contains packed in a single byte array values for all columns. - * Assumes input format is text-based, with one row per line. Depends on an online cluster - * to retrieve {@link ColumnInfo} from the target table. + * contains packed in a single byte array values for all columns. Assumes input format is + * text-based, with one row per line. Depends on an online cluster to retrieve {@link ColumnInfo} + * from the target table. */ -public abstract class FormatToBytesWritableMapper extends Mapper { +public abstract class FormatToBytesWritableMapper + extends Mapper { - protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToBytesWritableMapper.class); + protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToBytesWritableMapper.class); - protected static final String COUNTER_GROUP_NAME = "Phoenix MapReduce Import"; + protected static final String COUNTER_GROUP_NAME = "Phoenix MapReduce Import"; - /** Configuration key for the name of the output table */ - public static final String TABLE_NAME_CONFKEY = "phoenix.mapreduce.import.tablename"; + /** Configuration key for the name of the output table */ + public static final String TABLE_NAME_CONFKEY = "phoenix.mapreduce.import.tablename"; - /** Configuration key for the columns to be imported */ - public static final String COLUMN_INFO_CONFKEY = "phoenix.mapreduce.import.columninfos"; + /** Configuration key for the columns to be imported */ + public static final String COLUMN_INFO_CONFKEY = "phoenix.mapreduce.import.columninfos"; - /** Configuration key for the flag to ignore invalid rows */ - public static final String IGNORE_INVALID_ROW_CONFKEY = "phoenix.mapreduce.import.ignoreinvalidrow"; + /** Configuration key for the flag to ignore invalid rows */ + public static final String IGNORE_INVALID_ROW_CONFKEY = + "phoenix.mapreduce.import.ignoreinvalidrow"; - /** Configuration key for the table names */ - public static final String TABLE_NAMES_CONFKEY = "phoenix.mapreduce.import.tablenames"; + /** Configuration key for the table names */ + public static final String TABLE_NAMES_CONFKEY = "phoenix.mapreduce.import.tablenames"; - /** Configuration key for the table logical names */ - public static final String LOGICAL_NAMES_CONFKEY = "phoenix.mapreduce.import.logicalnames"; + /** Configuration key for the table logical names */ + public static final String LOGICAL_NAMES_CONFKEY = "phoenix.mapreduce.import.logicalnames"; - /** - * Parses a single input line, returning a {@code T}. - */ - public interface LineParser { - T parse(String input) throws IOException; - } + /** + * Parses a single input line, returning a {@code T}. + */ + public interface LineParser { + T parse(String input) throws IOException; + } - protected PhoenixConnection conn; - protected UpsertExecutor upsertExecutor; - protected ImportPreUpsertKeyValueProcessor preUpdateProcessor; - protected IndexStatusUpdater[] indexStatusUpdaters; - protected List tableNames; - protected List logicalNames; - protected MapperUpsertListener upsertListener; + protected PhoenixConnection conn; + protected UpsertExecutor upsertExecutor; + protected ImportPreUpsertKeyValueProcessor preUpdateProcessor; + protected IndexStatusUpdater[] indexStatusUpdaters; + protected List tableNames; + protected List logicalNames; + protected MapperUpsertListener upsertListener; - /* - lookup table for column index. Index in the List matches to the index in tableNames List - */ - protected Map columnIndexes; + /* + * lookup table for column index. Index in the List matches to the index in tableNames List + */ + protected Map columnIndexes; - protected abstract UpsertExecutor buildUpsertExecutor(Configuration conf); - protected abstract LineParser getLineParser(); + protected abstract UpsertExecutor buildUpsertExecutor(Configuration conf); - @Override - protected void setup(Context context) throws IOException, InterruptedException { + protected abstract LineParser getLineParser(); - Configuration conf = context.getConfiguration(); + @Override + protected void setup(Context context) throws IOException, InterruptedException { - // pass client configuration into driver - Properties clientInfos = new Properties(); - for (Map.Entry entry : conf) { - clientInfos.setProperty(entry.getKey(), entry.getValue()); - } + Configuration conf = context.getConfiguration(); - try { - conn = (PhoenixConnection) QueryUtil.getConnectionOnServer(clientInfos, conf); - // We are dependent on rolling back before performing commits, so we need to be sure - // that auto-commit is not turned on - conn.setAutoCommit(false); + // pass client configuration into driver + Properties clientInfos = new Properties(); + for (Map.Entry entry : conf) { + clientInfos.setProperty(entry.getKey(), entry.getValue()); + } - final String tableNamesConf = conf.get(TABLE_NAMES_CONFKEY); - final String logicalNamesConf = conf.get(LOGICAL_NAMES_CONFKEY); - tableNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(tableNamesConf); - logicalNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(logicalNamesConf); + try { + conn = (PhoenixConnection) QueryUtil.getConnectionOnServer(clientInfos, conf); + // We are dependent on rolling back before performing commits, so we need to be sure + // that auto-commit is not turned on + conn.setAutoCommit(false); - initColumnIndexes(); - } catch (SQLException e) { - throw new RuntimeException(e); - } + final String tableNamesConf = conf.get(TABLE_NAMES_CONFKEY); + final String logicalNamesConf = conf.get(LOGICAL_NAMES_CONFKEY); + tableNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(tableNamesConf); + logicalNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(logicalNamesConf); - upsertListener = new MapperUpsertListener( - context, conf.getBoolean(IGNORE_INVALID_ROW_CONFKEY, true)); - upsertExecutor = buildUpsertExecutor(conf); - preUpdateProcessor = PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); + initColumnIndexes(); + } catch (SQLException e) { + throw new RuntimeException(e); } - @Override - protected void map(LongWritable key, Text value, Context context) throws IOException, - InterruptedException { - if (conn == null) { - throw new RuntimeException("Connection not initialized."); - } - try { - RECORD record = null; - try { - record = getLineParser().parse(value.toString()); - } catch (IOException e) { - context.getCounter(COUNTER_GROUP_NAME, "Parser errors").increment(1L); - return; - } - - if (record == null) { - context.getCounter(COUNTER_GROUP_NAME, "Empty records").increment(1L); - return; - } - upsertExecutor.execute(ImmutableList.of(record)); - Map> map = new HashMap<>(); - Iterator>> uncommittedDataIterator - = PhoenixRuntime.getUncommittedDataIterator(conn, true); - while (uncommittedDataIterator.hasNext()) { - Pair> kvPair = uncommittedDataIterator.next(); - List keyValueList = kvPair.getSecond(); - byte[] tableName = kvPair.getFirst(); - keyValueList = preUpdateProcessor.preUpsert(tableName, keyValueList); - // Create a list of KV for each table - for (int i = 0; i < tableNames.size(); i++) { - if (Bytes.compareTo(Bytes.toBytes(tableNames.get(i)), tableName) == 0) { - if (!map.containsKey(i)) { - map.put(i, new ArrayList()); - } - List cellsForTable = map.get(i); - if (indexStatusUpdaters[i] != null) { - indexStatusUpdaters[i].setVerified(keyValueList); - } - cellsForTable.addAll(keyValueList); - break; - } - } + upsertListener = + new MapperUpsertListener(context, conf.getBoolean(IGNORE_INVALID_ROW_CONFKEY, true)); + upsertExecutor = buildUpsertExecutor(conf); + preUpdateProcessor = PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); + } + + @Override + protected void map(LongWritable key, Text value, Context context) + throws IOException, InterruptedException { + if (conn == null) { + throw new RuntimeException("Connection not initialized."); + } + try { + RECORD record = null; + try { + record = getLineParser().parse(value.toString()); + } catch (IOException e) { + context.getCounter(COUNTER_GROUP_NAME, "Parser errors").increment(1L); + return; + } + + if (record == null) { + context.getCounter(COUNTER_GROUP_NAME, "Empty records").increment(1L); + return; + } + upsertExecutor.execute(ImmutableList. of(record)); + Map> map = new HashMap<>(); + Iterator>> uncommittedDataIterator = + PhoenixRuntime.getUncommittedDataIterator(conn, true); + while (uncommittedDataIterator.hasNext()) { + Pair> kvPair = uncommittedDataIterator.next(); + List keyValueList = kvPair.getSecond(); + byte[] tableName = kvPair.getFirst(); + keyValueList = preUpdateProcessor.preUpsert(tableName, keyValueList); + // Create a list of KV for each table + for (int i = 0; i < tableNames.size(); i++) { + if (Bytes.compareTo(Bytes.toBytes(tableNames.get(i)), tableName) == 0) { + if (!map.containsKey(i)) { + map.put(i, new ArrayList()); } - for (Map.Entry> rowEntry : map.entrySet()) { - int tableIndex = rowEntry.getKey(); - List lkv = rowEntry.getValue(); - // All KV values combines to a single byte array - writeAggregatedRow(context, tableNames.get(tableIndex), lkv); + List cellsForTable = map.get(i); + if (indexStatusUpdaters[i] != null) { + indexStatusUpdaters[i].setVerified(keyValueList); } - conn.rollback(); - } catch (Exception e) { - throw new RuntimeException(e); + cellsForTable.addAll(keyValueList); + break; + } } + } + for (Map.Entry> rowEntry : map.entrySet()) { + int tableIndex = rowEntry.getKey(); + List lkv = rowEntry.getValue(); + // All KV values combines to a single byte array + writeAggregatedRow(context, tableNames.get(tableIndex), lkv); + } + conn.rollback(); + } catch (Exception e) { + throw new RuntimeException(e); } - - /* - Map all unique pairs to index. Table name is part of TableRowkey, so we do - not care about it - */ - private void initColumnIndexes() throws SQLException { - columnIndexes = new TreeMap<>(Bytes.BYTES_COMPARATOR); - indexStatusUpdaters = new IndexStatusUpdater[logicalNames.size()]; - int columnIndex = 0; - for (int index = 0; index < logicalNames.size(); index++) { - PTable table = conn.getTable(logicalNames.get(index)); - if (!table.getImmutableStorageScheme().equals(ImmutableStorageScheme.ONE_CELL_PER_COLUMN)) { - List cfs = table.getColumnFamilies(); - for (int i = 0; i < cfs.size(); i++) { - byte[] family = cfs.get(i).getName().getBytes(); - byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, - QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); - columnIndexes.put(cfn, new Integer(columnIndex)); - columnIndex++; - } - } else { - List cls = table.getColumns(); - for (int i = 0; i < cls.size(); i++) { - PColumn c = cls.get(i); - byte[] family = new byte[0]; - byte[] cq; - if (!SchemaUtil.isPKColumn(c)) { - family = c.getFamilyName().getBytes(); - cq = c.getColumnQualifierBytes(); - } else { - cq = c.getName().getBytes(); - } - byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq); - if (!columnIndexes.containsKey(cfn)) { - columnIndexes.put(cfn, new Integer(columnIndex)); - columnIndex++; - } - } - } - byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table); - byte[] emptyKeyValue = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); - byte[] cfn = Bytes.add(emptyColumnFamily, QueryConstants.NAMESPACE_SEPARATOR_BYTES, emptyKeyValue); + } + + /* + * Map all unique pairs to index. Table name is part of TableRowkey, so we do not + * care about it + */ + private void initColumnIndexes() throws SQLException { + columnIndexes = new TreeMap<>(Bytes.BYTES_COMPARATOR); + indexStatusUpdaters = new IndexStatusUpdater[logicalNames.size()]; + int columnIndex = 0; + for (int index = 0; index < logicalNames.size(); index++) { + PTable table = conn.getTable(logicalNames.get(index)); + if (!table.getImmutableStorageScheme().equals(ImmutableStorageScheme.ONE_CELL_PER_COLUMN)) { + List cfs = table.getColumnFamilies(); + for (int i = 0; i < cfs.size(); i++) { + byte[] family = cfs.get(i).getName().getBytes(); + byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, + QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); + columnIndexes.put(cfn, new Integer(columnIndex)); + columnIndex++; + } + } else { + List cls = table.getColumns(); + for (int i = 0; i < cls.size(); i++) { + PColumn c = cls.get(i); + byte[] family = new byte[0]; + byte[] cq; + if (!SchemaUtil.isPKColumn(c)) { + family = c.getFamilyName().getBytes(); + cq = c.getColumnQualifierBytes(); + } else { + cq = c.getName().getBytes(); + } + byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq); + if (!columnIndexes.containsKey(cfn)) { columnIndexes.put(cfn, new Integer(columnIndex)); columnIndex++; - if (IndexUtil.isGlobalIndex(table)) { - indexStatusUpdaters[index] = - new IndexStatusUpdater(emptyColumnFamily, emptyKeyValue); - } + } } + } + byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table); + byte[] emptyKeyValue = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); + byte[] cfn = + Bytes.add(emptyColumnFamily, QueryConstants.NAMESPACE_SEPARATOR_BYTES, emptyKeyValue); + columnIndexes.put(cfn, new Integer(columnIndex)); + columnIndex++; + if (IndexUtil.isGlobalIndex(table)) { + indexStatusUpdaters[index] = new IndexStatusUpdater(emptyColumnFamily, emptyKeyValue); + } } - - /** - * Find the column index which will replace the column name in - * the aggregated array and will be restored in Reducer - * - * @param cell KeyValue for the column - * @return column index for the specified cell or -1 if was not found - */ - private int findIndex(Cell cell) throws IOException { - byte[] familyName = Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength()); - byte[] cq = Bytes.copy(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); - byte[] cfn = Bytes.add(familyName, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq); - if(columnIndexes.containsKey(cfn)) { - return columnIndexes.get(cfn); - } - return -1; + } + + /** + * Find the column index which will replace the column name in the aggregated array and will be + * restored in Reducer + * @param cell KeyValue for the column + * @return column index for the specified cell or -1 if was not found + */ + private int findIndex(Cell cell) throws IOException { + byte[] familyName = + Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); + byte[] cq = + Bytes.copy(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); + byte[] cfn = Bytes.add(familyName, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq); + if (columnIndexes.containsKey(cfn)) { + return columnIndexes.get(cfn); } - - /** - * Collect all column values for the same Row. RowKey may be different if indexes are involved, - * so it writes a separate record for each unique RowKey - * - * @param context Current mapper context - * @param tableName Table index in tableNames list - * @param lkv List of KV values that will be combined in a single ImmutableBytesWritable - * @throws IOException - * @throws InterruptedException - */ - - private void writeAggregatedRow(Context context, String tableName, List lkv) - throws IOException, InterruptedException { - ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); - DataOutputStream outputStream = new DataOutputStream(bos); - ImmutableBytesWritable outputKey =null; - if (!lkv.isEmpty()) { - for (Cell cell : lkv) { - if (outputKey == null || Bytes.compareTo(outputKey.get(), outputKey.getOffset(), - outputKey.getLength(), cell.getRowArray(), cell.getRowOffset(), cell - .getRowLength()) != 0) { - // This a the first RowKey or a different from previous - if (outputKey != null) { //It's a different RowKey, so we need to write it - ImmutableBytesWritable aggregatedArray = - new ImmutableBytesWritable(bos.toByteArray()); - outputStream.close(); - context.write(new TableRowkeyPair(tableName, outputKey), aggregatedArray); - } - outputKey = new ImmutableBytesWritable(cell.getRowArray(), cell.getRowOffset() - , cell.getRowLength()); - bos = new ByteArrayOutputStream(1024); - outputStream = new DataOutputStream(bos); - } - /* - The order of aggregation: type, index of column, length of value, value itself - */ - int i = findIndex(cell); - if(i == -1) { - //That may happen when we load only local indexes. Since KV pairs for both - // table and local index are going to the same physical table at that point - // we skip those KVs that are not belongs to loca index - continue; - } - outputStream.writeByte(cell.getType().getCode()); - WritableUtils.writeVLong(outputStream,cell.getTimestamp()); - WritableUtils.writeVInt(outputStream, i); - WritableUtils.writeVInt(outputStream, cell.getValueLength()); - outputStream.write(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); - - } + return -1; + } + + /** + * Collect all column values for the same Row. RowKey may be different if indexes are involved, so + * it writes a separate record for each unique RowKey + * @param context Current mapper context + * @param tableName Table index in tableNames list + * @param lkv List of KV values that will be combined in a single ImmutableBytesWritable + */ + + private void writeAggregatedRow(Context context, String tableName, List lkv) + throws IOException, InterruptedException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); + DataOutputStream outputStream = new DataOutputStream(bos); + ImmutableBytesWritable outputKey = null; + if (!lkv.isEmpty()) { + for (Cell cell : lkv) { + if ( + outputKey == null + || Bytes.compareTo(outputKey.get(), outputKey.getOffset(), outputKey.getLength(), + cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) != 0 + ) { + // This a the first RowKey or a different from previous + if (outputKey != null) { // It's a different RowKey, so we need to write it ImmutableBytesWritable aggregatedArray = new ImmutableBytesWritable(bos.toByteArray()); outputStream.close(); context.write(new TableRowkeyPair(tableName, outputKey), aggregatedArray); + } + outputKey = new ImmutableBytesWritable(cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength()); + bos = new ByteArrayOutputStream(1024); + outputStream = new DataOutputStream(bos); } - } - - @Override - protected void cleanup(Context context) throws IOException, InterruptedException { - try { - if (conn != null) { - conn.close(); - } - } catch (SQLException e) { - throw new RuntimeException(e); + /* + * The order of aggregation: type, index of column, length of value, value itself + */ + int i = findIndex(cell); + if (i == -1) { + // That may happen when we load only local indexes. Since KV pairs for both + // table and local index are going to the same physical table at that point + // we skip those KVs that are not belongs to loca index + continue; } + outputStream.writeByte(cell.getType().getCode()); + WritableUtils.writeVLong(outputStream, cell.getTimestamp()); + WritableUtils.writeVInt(outputStream, i); + WritableUtils.writeVInt(outputStream, cell.getValueLength()); + outputStream.write(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + + } + ImmutableBytesWritable aggregatedArray = new ImmutableBytesWritable(bos.toByteArray()); + outputStream.close(); + context.write(new TableRowkeyPair(tableName, outputKey), aggregatedArray); } - - /** - * Write the list of to-import columns to a job configuration. - * - * @param conf configuration to be written to - * @param columnInfoList list of ColumnInfo objects to be configured for import - */ - @VisibleForTesting - static void configureColumnInfoList(Configuration conf, List columnInfoList) { - conf.set(COLUMN_INFO_CONFKEY, Joiner.on("|").useForNull("").join(columnInfoList)); - } - - /** - * Build the list of ColumnInfos for the import based on information in the configuration. - */ - @VisibleForTesting - static List buildColumnInfoList(Configuration conf) { - - return Lists.newArrayList( - Iterables.transform( - Splitter.on("|").split(conf.get(COLUMN_INFO_CONFKEY)), - new Function() { - @Nullable - @Override - public ColumnInfo apply(@Nullable String input) { - if (input == null || input.isEmpty()) { - // An empty string represents a null that was passed in to - // the configuration, which corresponds to an input column - // which is to be skipped - return null; - } - return ColumnInfo.fromString(input); - } - })); + } + + @Override + protected void cleanup(Context context) throws IOException, InterruptedException { + try { + if (conn != null) { + conn.close(); + } + } catch (SQLException e) { + throw new RuntimeException(e); } - - /** - * Listener that logs successful upserts and errors to job counters. - */ - @VisibleForTesting - static class MapperUpsertListener implements UpsertExecutor.UpsertListener { - - private final Mapper.Context context; - private final boolean ignoreRecordErrors; - - private MapperUpsertListener( - Mapper.Context context, - boolean ignoreRecordErrors) { - this.context = context; - this.ignoreRecordErrors = ignoreRecordErrors; - } - + } + + /** + * Write the list of to-import columns to a job configuration. + * @param conf configuration to be written to + * @param columnInfoList list of ColumnInfo objects to be configured for import + */ + @VisibleForTesting + static void configureColumnInfoList(Configuration conf, List columnInfoList) { + conf.set(COLUMN_INFO_CONFKEY, Joiner.on("|").useForNull("").join(columnInfoList)); + } + + /** + * Build the list of ColumnInfos for the import based on information in the configuration. + */ + @VisibleForTesting + static List buildColumnInfoList(Configuration conf) { + + return Lists.newArrayList(Iterables.transform( + Splitter.on("|").split(conf.get(COLUMN_INFO_CONFKEY)), new Function() { + @Nullable @Override - public void upsertDone(long upsertCount) { - context.getCounter(COUNTER_GROUP_NAME, "Upserts Done").increment(1L); + public ColumnInfo apply(@Nullable String input) { + if (input == null || input.isEmpty()) { + // An empty string represents a null that was passed in to + // the configuration, which corresponds to an input column + // which is to be skipped + return null; + } + return ColumnInfo.fromString(input); } + })); + } + + /** + * Listener that logs successful upserts and errors to job counters. + */ + @VisibleForTesting + static class MapperUpsertListener implements UpsertExecutor.UpsertListener { + + private final Mapper.Context context; + private final boolean ignoreRecordErrors; + + private MapperUpsertListener( + Mapper.Context context, + boolean ignoreRecordErrors) { + this.context = context; + this.ignoreRecordErrors = ignoreRecordErrors; + } - @Override - public void errorOnRecord(T record, Throwable throwable) { - LOGGER.error("Error on record " + record, throwable); - context.getCounter(COUNTER_GROUP_NAME, "Errors on records").increment(1L); - if (!ignoreRecordErrors) { - Throwables.propagate(throwable); - } - } + @Override + public void upsertDone(long upsertCount) { + context.getCounter(COUNTER_GROUP_NAME, "Upserts Done").increment(1L); } - /** - * A default implementation of {@code ImportPreUpsertKeyValueProcessor} that is used if no - * specific class is configured. This implementation simply passes through the KeyValue - * list that is passed in. - */ - public static class DefaultImportPreUpsertKeyValueProcessor implements - ImportPreUpsertKeyValueProcessor { + @Override + public void errorOnRecord(T record, Throwable throwable) { + LOGGER.error("Error on record " + record, throwable); + context.getCounter(COUNTER_GROUP_NAME, "Errors on records").increment(1L); + if (!ignoreRecordErrors) { + Throwables.propagate(throwable); + } + } + } - @Override - public List preUpsert(byte[] tableName, List keyValues) { - return keyValues; - } + /** + * A default implementation of {@code ImportPreUpsertKeyValueProcessor} that is used if no + * specific class is configured. This implementation simply passes through the KeyValue list that + * is passed in. + */ + public static class DefaultImportPreUpsertKeyValueProcessor + implements ImportPreUpsertKeyValueProcessor { + + @Override + public List preUpsert(byte[] tableName, List keyValues) { + return keyValues; } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java index 4d85a12a935..bf095df8b68 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -56,127 +56,125 @@ * Reducer class for the bulkload jobs. */ public class FormatToKeyValueReducer - extends Reducer { + extends Reducer { - protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToKeyValueReducer.class); + protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToKeyValueReducer.class); + protected List tableNames; + protected List logicalNames; + protected KeyValueBuilder builder; + private Map> columnIndexes; - protected List tableNames; - protected List logicalNames; - protected KeyValueBuilder builder; - private Map> columnIndexes; + @Override + protected void setup(Context context) throws IOException, InterruptedException { + Configuration conf = context.getConfiguration(); - - @Override - protected void setup(Context context) throws IOException, InterruptedException { - Configuration conf = context.getConfiguration(); - - // pass client configuration into driver - Properties clientInfos = new Properties(); - for (Map.Entry entry : conf) { - clientInfos.setProperty(entry.getKey(), entry.getValue()); - } - try (PhoenixConnection conn = (PhoenixConnection) QueryUtil - .getConnectionOnServer(clientInfos, conf)) { - builder = conn.getKeyValueBuilder(); - final String tableNamesConf = conf.get(FormatToBytesWritableMapper.TABLE_NAMES_CONFKEY); - final String logicalNamesConf = conf.get(FormatToBytesWritableMapper.LOGICAL_NAMES_CONFKEY); - tableNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(tableNamesConf); - logicalNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(logicalNamesConf); - initColumnsMap(conn); - } catch (SQLException e) { - throw new RuntimeException(e); - } + // pass client configuration into driver + Properties clientInfos = new Properties(); + for (Map.Entry entry : conf) { + clientInfos.setProperty(entry.getKey(), entry.getValue()); } + try (PhoenixConnection conn = + (PhoenixConnection) QueryUtil.getConnectionOnServer(clientInfos, conf)) { + builder = conn.getKeyValueBuilder(); + final String tableNamesConf = conf.get(FormatToBytesWritableMapper.TABLE_NAMES_CONFKEY); + final String logicalNamesConf = conf.get(FormatToBytesWritableMapper.LOGICAL_NAMES_CONFKEY); + tableNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(tableNamesConf); + logicalNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(logicalNamesConf); + initColumnsMap(conn); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } - private void initColumnsMap(PhoenixConnection conn) throws SQLException { - Map indexMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - columnIndexes = new HashMap<>(); - int columnIndex = 0; - for (int index = 0; index < logicalNames.size(); index++) { - PTable table = conn.getTable(logicalNames.get(index)); - if (!table.getImmutableStorageScheme().equals(ImmutableStorageScheme.ONE_CELL_PER_COLUMN)) { - List cfs = table.getColumnFamilies(); - for (int i = 0; i < cfs.size(); i++) { - byte[] family = cfs.get(i).getName().getBytes(); - Pair pair = new Pair<>(family, - QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); - columnIndexes.put(new Integer(columnIndex), pair); - columnIndex++; - } - } else { - List cls = table.getColumns(); - for (int i = 0; i < cls.size(); i++) { - PColumn c = cls.get(i); - byte[] family = new byte[0]; - byte[] cq; - if (!SchemaUtil.isPKColumn(c)) { - family = c.getFamilyName().getBytes(); - cq = c.getColumnQualifierBytes(); - } else { - cq = c.getName().getBytes(); - } - byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq); - Pair pair = new Pair<>(family, cq); - if (!indexMap.containsKey(cfn)) { - indexMap.put(cfn, new Integer(columnIndex)); - columnIndexes.put(new Integer(columnIndex), pair); - columnIndex++; - } - } - } - byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table); - byte[] emptyKeyValue = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); - Pair pair = new Pair<>(emptyColumnFamily, emptyKeyValue); + private void initColumnsMap(PhoenixConnection conn) throws SQLException { + Map indexMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + columnIndexes = new HashMap<>(); + int columnIndex = 0; + for (int index = 0; index < logicalNames.size(); index++) { + PTable table = conn.getTable(logicalNames.get(index)); + if (!table.getImmutableStorageScheme().equals(ImmutableStorageScheme.ONE_CELL_PER_COLUMN)) { + List cfs = table.getColumnFamilies(); + for (int i = 0; i < cfs.size(); i++) { + byte[] family = cfs.get(i).getName().getBytes(); + Pair pair = + new Pair<>(family, QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES); + columnIndexes.put(new Integer(columnIndex), pair); + columnIndex++; + } + } else { + List cls = table.getColumns(); + for (int i = 0; i < cls.size(); i++) { + PColumn c = cls.get(i); + byte[] family = new byte[0]; + byte[] cq; + if (!SchemaUtil.isPKColumn(c)) { + family = c.getFamilyName().getBytes(); + cq = c.getColumnQualifierBytes(); + } else { + cq = c.getName().getBytes(); + } + byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq); + Pair pair = new Pair<>(family, cq); + if (!indexMap.containsKey(cfn)) { + indexMap.put(cfn, new Integer(columnIndex)); columnIndexes.put(new Integer(columnIndex), pair); columnIndex++; + } } + } + byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table); + byte[] emptyKeyValue = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); + Pair pair = new Pair<>(emptyColumnFamily, emptyKeyValue); + columnIndexes.put(new Integer(columnIndex), pair); + columnIndex++; } + } - @Override - protected void reduce(TableRowkeyPair key, Iterable values, - Reducer.Context context) - throws IOException, InterruptedException { - TreeSet map = new TreeSet(CellComparatorImpl.COMPARATOR); - for (ImmutableBytesWritable aggregatedArray : values) { - DataInputStream input = new DataInputStream(new ByteArrayInputStream(aggregatedArray.get())); - while (input.available() != 0) { - byte type = input.readByte(); - long timestamp = WritableUtils.readVLong(input); - int index = WritableUtils.readVInt(input); - ImmutableBytesWritable family; - ImmutableBytesWritable cq; - ImmutableBytesWritable value = QueryConstants.EMPTY_COLUMN_VALUE_BYTES_PTR; - Pair pair = columnIndexes.get(index); - family = new ImmutableBytesWritable(pair.getFirst()); - cq = new ImmutableBytesWritable(pair.getSecond()); - int len = WritableUtils.readVInt(input); - if (len > 0) { - byte[] array = new byte[len]; - input.read(array); - value = new ImmutableBytesWritable(array); - } - KeyValue kv; - KeyValue.Type kvType = KeyValue.Type.codeToType(type); - switch (kvType) { - case Put: // not null value - kv = builder.buildPut(key.getRowkey(), family, cq, timestamp, value); - break; - case DeleteColumn: // null value - kv = builder.buildDeleteColumns(key.getRowkey(), family, cq, timestamp); - break; - default: - throw new IOException("Unsupported KeyValue type " + kvType); - } - map.add(kv); - } - Closeables.closeQuietly(input); + @Override + protected void reduce(TableRowkeyPair key, Iterable values, + Reducer.Context context) + throws IOException, InterruptedException { + TreeSet map = new TreeSet(CellComparatorImpl.COMPARATOR); + for (ImmutableBytesWritable aggregatedArray : values) { + DataInputStream input = new DataInputStream(new ByteArrayInputStream(aggregatedArray.get())); + while (input.available() != 0) { + byte type = input.readByte(); + long timestamp = WritableUtils.readVLong(input); + int index = WritableUtils.readVInt(input); + ImmutableBytesWritable family; + ImmutableBytesWritable cq; + ImmutableBytesWritable value = QueryConstants.EMPTY_COLUMN_VALUE_BYTES_PTR; + Pair pair = columnIndexes.get(index); + family = new ImmutableBytesWritable(pair.getFirst()); + cq = new ImmutableBytesWritable(pair.getSecond()); + int len = WritableUtils.readVInt(input); + if (len > 0) { + byte[] array = new byte[len]; + input.read(array); + value = new ImmutableBytesWritable(array); } - context.setStatus("Read " + map.getClass()); - int index = 0; - for (KeyValue kv : map) { - context.write(key, kv); - if (++index % 100 == 0) context.setStatus("Wrote " + index); + KeyValue kv; + KeyValue.Type kvType = KeyValue.Type.codeToType(type); + switch (kvType) { + case Put: // not null value + kv = builder.buildPut(key.getRowkey(), family, cq, timestamp, value); + break; + case DeleteColumn: // null value + kv = builder.buildDeleteColumns(key.getRowkey(), family, cq, timestamp); + break; + default: + throw new IOException("Unsupported KeyValue type " + kvType); } + map.add(kv); + } + Closeables.closeQuietly(input); + } + context.setStatus("Read " + map.getClass()); + int index = 0; + for (KeyValue kv : map) { + context.write(key, kv); + if (++index % 100 == 0) context.setStatus("Wrote " + index); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java index 2de79342b36..511f18e81e4 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,26 +23,21 @@ /** * A listener hook to process KeyValues that are being written to HFiles for bulk import. - * Implementing this interface and configuring it via the {@link - * org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil#UPSERT_HOOK_CLASS_CONFKEY} - * configuration key. - * - * The intention of such a hook is to allow coproccessor-style operations to be peformed on - * data that is being bulk-loaded via MapReduce. + * Implementing this interface and configuring it via the + * {@link org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil#UPSERT_HOOK_CLASS_CONFKEY} + * configuration key. The intention of such a hook is to allow coproccessor-style operations to be + * peformed on data that is being bulk-loaded via MapReduce. */ public interface ImportPreUpsertKeyValueProcessor { - /** - * Process a list of KeyValues before they are written to an HFile. The supplied list of - * KeyValues contain all data that is to be written for a single Phoenix row. - * - * Implementors can filter certain KeyValues from the list, augment the list, or return the - * same list. - * - * @param tableName the table name for the key values that are being passed in - * @param keyValues list of KeyValues that are to be written to an HFile - * @return the list that will actually be written - */ - List preUpsert(byte[] tableName, List keyValues); + /** + * Process a list of KeyValues before they are written to an HFile. The supplied list of KeyValues + * contain all data that is to be written for a single Phoenix row. Implementors can filter + * certain KeyValues from the list, augment the list, or return the same list. + * @param tableName the table name for the key values that are being passed in + * @param keyValues list of KeyValues that are to be written to an HFile + * @return the list that will actually be written + */ + List preUpsert(byte[] tableName, List keyValues); } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/JsonBulkLoadTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/JsonBulkLoadTool.java index 4a14b3a0d7f..1ad21b22959 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/JsonBulkLoadTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/JsonBulkLoadTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,10 +20,10 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.ToolRunner; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.phoenix.util.ColumnInfo; /** @@ -32,22 +32,22 @@ */ public class JsonBulkLoadTool extends AbstractBulkLoadTool { - @Override - protected void configureOptions(CommandLine cmdLine, List importColumns, - Configuration conf) throws SQLException { - // noop - } + @Override + protected void configureOptions(CommandLine cmdLine, List importColumns, + Configuration conf) throws SQLException { + // noop + } - @Override - protected void setupJob(Job job) { - // Allow overriding the job jar setting by using a -D system property at startup - if (job.getJar() == null) { - job.setJarByClass(JsonToKeyValueMapper.class); - } - job.setMapperClass(JsonToKeyValueMapper.class); + @Override + protected void setupJob(Job job) { + // Allow overriding the job jar setting by using a -D system property at startup + if (job.getJar() == null) { + job.setJarByClass(JsonToKeyValueMapper.class); } + job.setMapperClass(JsonToKeyValueMapper.class); + } - public static void main(String[] args) throws Exception { - ToolRunner.run(new JsonBulkLoadTool(), args); - } + public static void main(String[] args) throws Exception { + ToolRunner.run(new JsonBulkLoadTool(), args); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/JsonToKeyValueMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/JsonToKeyValueMapper.java index fd4bb340583..ff7ca0d9c3e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/JsonToKeyValueMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/JsonToKeyValueMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,52 +22,49 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.JacksonUtil; import org.apache.phoenix.util.UpsertExecutor; import org.apache.phoenix.util.json.JsonUpsertExecutor; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - /** * MapReduce mapper that converts JSON input lines into KeyValues that can be written to HFiles. - * - * KeyValues are produced by executing UPSERT statements on a Phoenix connection and then - * extracting the created KeyValues and rolling back the statement execution before it is - * committed to HBase. + * KeyValues are produced by executing UPSERT statements on a Phoenix connection and then extracting + * the created KeyValues and rolling back the statement execution before it is committed to HBase. */ public class JsonToKeyValueMapper extends FormatToBytesWritableMapper> { - private LineParser> lineParser; + private LineParser> lineParser; - @Override - protected LineParser> getLineParser() { - return lineParser; - } + @Override + protected LineParser> getLineParser() { + return lineParser; + } - @Override - protected void setup(Context context) throws IOException, InterruptedException { - super.setup(context); - lineParser = new JsonLineParser(); - } + @Override + protected void setup(Context context) throws IOException, InterruptedException { + super.setup(context); + lineParser = new JsonLineParser(); + } - @VisibleForTesting - @Override - protected UpsertExecutor, ?> buildUpsertExecutor(Configuration conf) { - String tableName = conf.get(TABLE_NAME_CONFKEY); - Preconditions.checkNotNull(tableName, "table name is not configured"); + @VisibleForTesting + @Override + protected UpsertExecutor, ?> buildUpsertExecutor(Configuration conf) { + String tableName = conf.get(TABLE_NAME_CONFKEY); + Preconditions.checkNotNull(tableName, "table name is not configured"); - List columnInfoList = buildColumnInfoList(conf); + List columnInfoList = buildColumnInfoList(conf); - return new JsonUpsertExecutor(conn, tableName, columnInfoList, upsertListener); - } + return new JsonUpsertExecutor(conn, tableName, columnInfoList, upsertListener); + } - @VisibleForTesting - static class JsonLineParser implements LineParser> { - @Override - public Map parse(String input) throws IOException { - return JacksonUtil.getObjectReader(Map.class).readValue(input); - } + @VisibleForTesting + static class JsonLineParser implements LineParser> { + @Override + public Map parse(String input) throws IOException { + return JacksonUtil.getObjectReader(Map.class).readValue(input); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java index b792958b7ab..7f9eefa963f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -78,661 +78,629 @@ import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair; import org.apache.phoenix.mapreduce.bulkload.TargetTableRef; import org.apache.phoenix.mapreduce.bulkload.TargetTableRefFunctions; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import edu.umd.cs.findbugs.annotations.SuppressWarnings; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; - /** - * The MultiHfileOutputFormat class simplifies writing HFiles for multiple tables. - * It has been adapted from {#link HFileOutputFormat2} but differs from the fact it creates - * HFiles for multiple tables. + * The MultiHfileOutputFormat class simplifies writing HFiles for multiple tables. It has been + * adapted from {#link HFileOutputFormat2} but differs from the fact it creates HFiles for multiple + * tables. */ public class MultiHfileOutputFormat extends FileOutputFormat { - private static final Logger LOGGER = LoggerFactory.getLogger(MultiHfileOutputFormat.class); - - private static final String COMPRESSION_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.compression"; - private static final String BLOOM_TYPE_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.bloomtype"; - private static final String BLOCK_SIZE_FAMILIES_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.blocksize"; - private static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.families.datablock.encoding"; - - public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.datablock.encoding"; - - /* Delimiter property used to separate table name and column family */ - private static final String AT_DELIMITER = "@"; - - @Override - public RecordWriter getRecordWriter(TaskAttemptContext context) - throws IOException, InterruptedException { - return createRecordWriter(context, this.getOutputCommitter(context)); - } - - /** - * - * @param context - * @return - * @throws IOException - */ - static RecordWriter createRecordWriter( - final TaskAttemptContext context, final OutputCommitter committer) - throws IOException { - // Get the path of the temporary output file - final Path outputdir = ((PathOutputCommitter) committer).getWorkPath(); - final Configuration conf = context.getConfiguration(); - final FileSystem fs = outputdir.getFileSystem(conf); - - final long maxsize = conf.getLongBytes(HConstants.HREGION_MAX_FILESIZE, - HConstants.DEFAULT_MAX_FILE_SIZE); - // Invented config. Add to hbase-*.xml if other than default compression. - final String defaultCompressionStr = conf.get("hfile.compression", - Compression.Algorithm.NONE.getName()); - final Algorithm defaultCompression = HFileWriterImpl - .compressionByName(defaultCompressionStr); - final boolean compactionExclude = conf.getBoolean( - "hbase.mapreduce.hfileoutputformat.compaction.exclude", false); - - return new RecordWriter() { - // Map of families to writers and how much has been output on the writer. - private final Map writers = - new TreeMap(Bytes.BYTES_COMPARATOR); - private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY; - private final long now = EnvironmentEdgeManager.currentTimeMillis(); - private boolean rollRequested = false; - - @Override - public void write(TableRowkeyPair row, V cell) - throws IOException { - Cell kv = cell; - // null input == user explicitly wants to flush - if (row == null && kv == null) { - rollWriters(); - return; - } - - // phoenix-2216: start : extract table name from the rowkey - String tableName = row.getTableName(); - byte [] rowKey = row.getRowkey().get(); - int length = (PrivateCellUtil.estimatedSerializedSizeOf(kv)) - Bytes.SIZEOF_INT; - byte [] family = CellUtil.cloneFamily(kv); - byte[] tableAndFamily = join(tableName, Bytes.toString(family)); - WriterLength wl = this.writers.get(tableAndFamily); - // phoenix-2216: end - - // If this is a new column family, verify that the directory exists - if (wl == null) { - // phoenix-2216: start : create a directory for table and family within the output dir - Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputdir, tableName); - fs.mkdirs(new Path(tableOutputPath, Bytes.toString(family))); - // phoenix-2216: end - } - - // If any of the HFiles for the column families has reached - // maxsize, we need to roll all the writers - if (wl != null && wl.written + length >= maxsize) { - this.rollRequested = true; - } - - // This can only happen once a row is finished though - if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) { - rollWriters(); - } - - // create a new WAL writer, if necessary - if (wl == null || wl.writer == null) { - // phoenix-2216: start : passed even the table name - wl = getNewWriter(tableName,family, conf); - // phoenix-2216: end - } - - // we now have the proper WAL writer. full steam ahead - if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP) { - CellUtil.setTimestamp(cell, this.now); - } - wl.writer.append(kv); - wl.written += length; - - // Copy the row so we know when a row transition. - this.previousRow = rowKey; - } - - private void rollWriters() throws IOException { - for (WriterLength wl : this.writers.values()) { - if (wl.writer != null) { - LOGGER.info("Writer=" + wl.writer.getPath() + - ((wl.written == 0)? "": ", wrote=" + wl.written)); - close(wl.writer); - } - wl.writer = null; - wl.written = 0; - } - this.rollRequested = false; - } - - /* Create a new StoreFile.Writer. - * @param family - * @return A WriterLength, containing a new StoreFile.Writer. - * @throws IOException - */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED", - justification="Not important") - private WriterLength getNewWriter(final String tableName , byte[] family, Configuration conf) - throws IOException { - - WriterLength wl = new WriterLength(); - Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputdir, tableName); - Path familydir = new Path(tableOutputPath, Bytes.toString(family)); - - // phoenix-2216: start : fetching the configuration properties that were set to the table. - // create a map from column family to the compression algorithm for the table. - final Map compressionMap = createFamilyCompressionMap(conf,tableName); - final Map bloomTypeMap = createFamilyBloomTypeMap(conf,tableName); - final Map blockSizeMap = createFamilyBlockSizeMap(conf,tableName); - // phoenix-2216: end - - String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY); - final Map datablockEncodingMap = createFamilyDataBlockEncodingMap(conf,tableName); - final DataBlockEncoding overriddenEncoding; - if (dataBlockEncodingStr != null) { - overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr); - } else { - overriddenEncoding = null; - } - - Algorithm compression = compressionMap.get(family); - compression = compression == null ? defaultCompression : compression; - BloomType bloomType = bloomTypeMap.get(family); - bloomType = bloomType == null ? BloomType.NONE : bloomType; - Integer blockSize = blockSizeMap.get(family); - blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize; - DataBlockEncoding encoding = overriddenEncoding; - encoding = encoding == null ? datablockEncodingMap.get(family) : encoding; - encoding = encoding == null ? DataBlockEncoding.NONE : encoding; - Configuration tempConf = new Configuration(conf); - tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); - HFileContextBuilder contextBuilder = new HFileContextBuilder() - .withCompression(compression) - .withChecksumType(CompatUtil.getChecksumType(conf)) - .withBytesPerCheckSum(CompatUtil.getBytesPerChecksum(conf)) - .withBlockSize(blockSize) - .withDataBlockEncoding(encoding) - .withCellComparator(CellComparatorImpl.COMPARATOR); - HFileContext hFileContext = contextBuilder.build(); - - StoreFileWriter.Builder storeFileWriterBuilder = - new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs) - .withOutputDir(familydir).withBloomType(bloomType) - .withFileContext(hFileContext); - wl.writer = storeFileWriterBuilder.build(); - - // join and put it in the writers map . - // phoenix-2216: start : holds a map of writers where the - // key in the map is a join byte array of table name and family. - byte[] tableAndFamily = join(tableName, Bytes.toString(family)); - this.writers.put(tableAndFamily, wl); - // phoenix-2216: end - return wl; - } - - private void close(final StoreFileWriter w) throws IOException { - if (w != null) { - w.appendFileInfo(BULKLOAD_TIME_KEY, - Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis())); - w.appendFileInfo(BULKLOAD_TASK_KEY, - Bytes.toBytes(context.getTaskAttemptID().toString())); - w.appendFileInfo(MAJOR_COMPACTION_KEY, - Bytes.toBytes(true)); - w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, - Bytes.toBytes(compactionExclude)); - w.appendTrackedTimestampsToMetadata(); - w.close(); - } - } - - @Override - public void close(TaskAttemptContext c) throws IOException, InterruptedException { - for (WriterLength wl: this.writers.values()) { - close(wl.writer); - } - } - }; - } - - /* - * Data structure to hold a Writer and amount of data written on it. - */ - static class WriterLength { - long written = 0; - StoreFileWriter writer = null; - } - - /** - * joins the table name and the family with a delimiter. - * @param tableName - * @param family - * @return - */ - private static byte[] join(String tableName, String family) { - return Bytes.toBytes(tableName + AT_DELIMITER + family); - } - - /** - * Runs inside the task to deserialize column family to compression algorithm - * map from the configuration. - * - * @param conf to read the serialized values from - * @return a map from column family to the configured compression algorithm - */ - @VisibleForTesting - static Map createFamilyCompressionMap(Configuration conf,final String tableName) { - Map compressionMap = new TreeMap(Bytes.BYTES_COMPARATOR); - Map tableConfigs = getTableConfigurations(conf, tableName); - if (tableConfigs == null) { - return compressionMap; + private static final Logger LOGGER = LoggerFactory.getLogger(MultiHfileOutputFormat.class); + + private static final String COMPRESSION_FAMILIES_CONF_KEY = + "hbase.hfileoutputformat.families.compression"; + private static final String BLOOM_TYPE_FAMILIES_CONF_KEY = + "hbase.hfileoutputformat.families.bloomtype"; + private static final String BLOCK_SIZE_FAMILIES_CONF_KEY = + "hbase.mapreduce.hfileoutputformat.blocksize"; + private static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY = + "hbase.mapreduce.hfileoutputformat.families.datablock.encoding"; + + public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY = + "hbase.mapreduce.hfileoutputformat.datablock.encoding"; + + /* Delimiter property used to separate table name and column family */ + private static final String AT_DELIMITER = "@"; + + @Override + public RecordWriter getRecordWriter(TaskAttemptContext context) + throws IOException, InterruptedException { + return createRecordWriter(context, this.getOutputCommitter(context)); + } + + /** + * + */ + static RecordWriter createRecordWriter( + final TaskAttemptContext context, final OutputCommitter committer) throws IOException { + // Get the path of the temporary output file + final Path outputdir = ((PathOutputCommitter) committer).getWorkPath(); + final Configuration conf = context.getConfiguration(); + final FileSystem fs = outputdir.getFileSystem(conf); + + final long maxsize = + conf.getLongBytes(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE); + // Invented config. Add to hbase-*.xml if other than default compression. + final String defaultCompressionStr = + conf.get("hfile.compression", Compression.Algorithm.NONE.getName()); + final Algorithm defaultCompression = HFileWriterImpl.compressionByName(defaultCompressionStr); + final boolean compactionExclude = + conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", false); + + return new RecordWriter() { + // Map of families to writers and how much has been output on the writer. + private final Map writers = + new TreeMap(Bytes.BYTES_COMPARATOR); + private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY; + private final long now = EnvironmentEdgeManager.currentTimeMillis(); + private boolean rollRequested = false; + + @Override + public void write(TableRowkeyPair row, V cell) throws IOException { + Cell kv = cell; + // null input == user explicitly wants to flush + if (row == null && kv == null) { + rollWriters(); + return; } - Map stringMap = createFamilyConfValueMap(tableConfigs,COMPRESSION_FAMILIES_CONF_KEY); - for (Map.Entry e : stringMap.entrySet()) { - Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); - compressionMap.put(e.getKey(), algorithm); - } - return compressionMap; - } - /** - * Returns the set of configurations that have been configured for the table during job initialization. - * @param conf - * @param tableName - * @return - */ - private static Map getTableConfigurations(Configuration conf, final String tableName) { - String tableDefn = conf.get(tableName); - if (StringUtils.isEmpty(tableDefn)) { - return null; + // phoenix-2216: start : extract table name from the rowkey + String tableName = row.getTableName(); + byte[] rowKey = row.getRowkey().get(); + int length = (PrivateCellUtil.estimatedSerializedSizeOf(kv)) - Bytes.SIZEOF_INT; + byte[] family = CellUtil.cloneFamily(kv); + byte[] tableAndFamily = join(tableName, Bytes.toString(family)); + WriterLength wl = this.writers.get(tableAndFamily); + // phoenix-2216: end + + // If this is a new column family, verify that the directory exists + if (wl == null) { + // phoenix-2216: start : create a directory for table and family within the output dir + Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputdir, tableName); + fs.mkdirs(new Path(tableOutputPath, Bytes.toString(family))); + // phoenix-2216: end } - TargetTableRef table = TargetTableRefFunctions.FROM_JSON.apply(tableDefn); - Map tableConfigs = table.getConfiguration(); - return tableConfigs; - } - /** - * Runs inside the task to deserialize column family to bloom filter type - * map from the configuration. - * - * @param conf to read the serialized values from - * @return a map from column family to the the configured bloom filter type - */ - @VisibleForTesting - static Map createFamilyBloomTypeMap(Configuration conf,final String tableName) { - Map bloomTypeMap = new TreeMap(Bytes.BYTES_COMPARATOR); - Map tableConfigs = getTableConfigurations(conf, tableName); - if (tableConfigs == null) { - return bloomTypeMap; + // If any of the HFiles for the column families has reached + // maxsize, we need to roll all the writers + if (wl != null && wl.written + length >= maxsize) { + this.rollRequested = true; } - Map stringMap = createFamilyConfValueMap(tableConfigs,BLOOM_TYPE_FAMILIES_CONF_KEY); - for (Map.Entry e : stringMap.entrySet()) { - BloomType bloomType = BloomType.valueOf(e.getValue()); - bloomTypeMap.put(e.getKey(), bloomType); - } - return bloomTypeMap; - } - /** - * Runs inside the task to deserialize column family to block size - * map from the configuration. - * - * @param conf to read the serialized values from - * @return a map from column family to the configured block size - */ - @VisibleForTesting - static Map createFamilyBlockSizeMap(Configuration conf,final String tableName) { - Map blockSizeMap = new TreeMap(Bytes.BYTES_COMPARATOR); - Map tableConfigs = getTableConfigurations(conf, tableName); - if (tableConfigs == null) { - return blockSizeMap; - } - Map stringMap = createFamilyConfValueMap(tableConfigs,BLOCK_SIZE_FAMILIES_CONF_KEY); - for (Map.Entry e : stringMap.entrySet()) { - Integer blockSize = Integer.parseInt(e.getValue()); - blockSizeMap.put(e.getKey(), blockSize); + // This can only happen once a row is finished though + if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) { + rollWriters(); } - return blockSizeMap; - } - /** - * Runs inside the task to deserialize column family to data block encoding - * type map from the configuration. - * - * @param conf to read the serialized values from - * @return a map from column family to HFileDataBlockEncoder for the - * configured data block type for the family - */ - @VisibleForTesting - static Map createFamilyDataBlockEncodingMap(Configuration conf,final String tableName) { - - Map encoderMap = new TreeMap(Bytes.BYTES_COMPARATOR); - Map tableConfigs = getTableConfigurations(conf, tableName); - if (tableConfigs == null) { - return encoderMap; - } - Map stringMap = createFamilyConfValueMap(tableConfigs,DATABLOCK_ENCODING_FAMILIES_CONF_KEY); - for (Map.Entry e : stringMap.entrySet()) { - encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue()))); + // create a new WAL writer, if necessary + if (wl == null || wl.writer == null) { + // phoenix-2216: start : passed even the table name + wl = getNewWriter(tableName, family, conf); + // phoenix-2216: end } - return encoderMap; - } - - /** - * Run inside the task to deserialize column family to given conf value map. - * - * @param conf to read the serialized values from - * @param confName conf key to read from the configuration - * @return a map of column family to the given configuration value - */ - private static Map createFamilyConfValueMap(Map configs, String confName) { - Map confValMap = new TreeMap(Bytes.BYTES_COMPARATOR); - String confVal = configs.get(confName); - if (StringUtils.isEmpty(confVal)) { - return confValMap; - } - for (String familyConf : confVal.split("&")) { - String[] familySplit = familyConf.split("="); - if (familySplit.length != 2) { - continue; - } - try { - confValMap.put(URLDecoder.decode(familySplit[0], "UTF-8").getBytes(StandardCharsets.UTF_8), - URLDecoder.decode(familySplit[1], "UTF-8")); - } catch (UnsupportedEncodingException e) { - // will not happen with UTF-8 encoding - throw new AssertionError(e); - } + // we now have the proper WAL writer. full steam ahead + if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP) { + CellUtil.setTimestamp(cell, this.now); } - return confValMap; - } + wl.writer.append(kv); + wl.written += length; - - /** - * Configure job with a TotalOrderPartitioner, partitioning against - * splitPoints. Cleans up the partitions file after job exists. - */ - static void configurePartitioner(Job job, Set tablesStartKeys) - throws IOException { - - Configuration conf = job.getConfiguration(); - // create the partitions file - Path partitionsPath = new Path(conf.get("hadoop.tmp.dir"), "partitions_" + UUID.randomUUID()); - FileSystem fs = partitionsPath.getFileSystem(conf); - fs.makeQualified(partitionsPath); - writePartitions(conf, partitionsPath, tablesStartKeys); - fs.deleteOnExit(partitionsPath); - - // configure job to use it - job.setPartitionerClass(TotalOrderPartitioner.class); - TotalOrderPartitioner.setPartitionFile(conf, partitionsPath); - } + // Copy the row so we know when a row transition. + this.previousRow = rowKey; + } - @SuppressWarnings(value="EC_ARRAY_AND_NONARRAY", - justification="ImmutableBytesWritable DOES implement equals(byte])") - private static void writePartitions(Configuration conf, Path partitionsPath, - Set tablesStartKeys) throws IOException { - - LOGGER.info("Writing partition information to " + partitionsPath); - if (tablesStartKeys.isEmpty()) { - throw new IllegalArgumentException("No regions passed"); + private void rollWriters() throws IOException { + for (WriterLength wl : this.writers.values()) { + if (wl.writer != null) { + LOGGER.info( + "Writer=" + wl.writer.getPath() + ((wl.written == 0) ? "" : ", wrote=" + wl.written)); + close(wl.writer); + } + wl.writer = null; + wl.written = 0; } + this.rollRequested = false; + } - // We're generating a list of split points, and we don't ever - // have keys < the first region (which has an empty start key) - // so we need to remove it. Otherwise we would end up with an - // empty reducer with index 0 - TreeSet sorted = new TreeSet(tablesStartKeys); - - TableRowkeyPair first = sorted.first(); - if (!first.getRowkey().equals(HConstants.EMPTY_BYTE_ARRAY)) { - throw new IllegalArgumentException( - "First region of table should have empty start key. Instead has: " - + Bytes.toStringBinary(first.getRowkey().get())); + /* + * Create a new StoreFile.Writer. + * @return A WriterLength, containing a new StoreFile.Writer. + */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "BX_UNBOXING_IMMEDIATELY_REBOXED", + justification = "Not important") + private WriterLength getNewWriter(final String tableName, byte[] family, Configuration conf) + throws IOException { + + WriterLength wl = new WriterLength(); + Path tableOutputPath = CsvBulkImportUtil.getOutputPath(outputdir, tableName); + Path familydir = new Path(tableOutputPath, Bytes.toString(family)); + + // phoenix-2216: start : fetching the configuration properties that were set to the table. + // create a map from column family to the compression algorithm for the table. + final Map compressionMap = createFamilyCompressionMap(conf, tableName); + final Map bloomTypeMap = createFamilyBloomTypeMap(conf, tableName); + final Map blockSizeMap = createFamilyBlockSizeMap(conf, tableName); + // phoenix-2216: end + + String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY); + final Map datablockEncodingMap = + createFamilyDataBlockEncodingMap(conf, tableName); + final DataBlockEncoding overriddenEncoding; + if (dataBlockEncodingStr != null) { + overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr); + } else { + overriddenEncoding = null; } - sorted.remove(first); - // Write the actual file - FileSystem fs = partitionsPath.getFileSystem(conf); - SequenceFile.Writer writer = SequenceFile.createWriter( - fs, conf, partitionsPath, TableRowkeyPair.class, - NullWritable.class); + Algorithm compression = compressionMap.get(family); + compression = compression == null ? defaultCompression : compression; + BloomType bloomType = bloomTypeMap.get(family); + bloomType = bloomType == null ? BloomType.NONE : bloomType; + Integer blockSize = blockSizeMap.get(family); + blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize; + DataBlockEncoding encoding = overriddenEncoding; + encoding = encoding == null ? datablockEncodingMap.get(family) : encoding; + encoding = encoding == null ? DataBlockEncoding.NONE : encoding; + Configuration tempConf = new Configuration(conf); + tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); + HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression) + .withChecksumType(CompatUtil.getChecksumType(conf)) + .withBytesPerCheckSum(CompatUtil.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).withCellComparator(CellComparatorImpl.COMPARATOR); + HFileContext hFileContext = contextBuilder.build(); + + StoreFileWriter.Builder storeFileWriterBuilder = + new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs).withOutputDir(familydir) + .withBloomType(bloomType).withFileContext(hFileContext); + wl.writer = storeFileWriterBuilder.build(); + + // join and put it in the writers map . + // phoenix-2216: start : holds a map of writers where the + // key in the map is a join byte array of table name and family. + byte[] tableAndFamily = join(tableName, Bytes.toString(family)); + this.writers.put(tableAndFamily, wl); + // phoenix-2216: end + return wl; + } - try { - for (TableRowkeyPair startKey : sorted) { - writer.append(startKey, NullWritable.get()); - } - } finally { - writer.close(); + private void close(final StoreFileWriter w) throws IOException { + if (w != null) { + w.appendFileInfo(BULKLOAD_TIME_KEY, + Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis())); + w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString())); + w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true)); + w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude)); + w.appendTrackedTimestampsToMetadata(); + w.close(); } - - } + } - /** - * Serialize column family to compression algorithm map to configuration. - * Invoked while configuring the MR job for incremental load. - * - * @param table to read the properties from - * @param conf to persist serialized values into - * @throws IOException - * on failure to read column family descriptors - */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") - @VisibleForTesting - static String configureCompression(TableDescriptor tableDescriptor) - throws UnsupportedEncodingException { - - StringBuilder compressionConfigValue = new StringBuilder(); - if(tableDescriptor == null){ - // could happen with mock table instance - return compressionConfigValue.toString(); + @Override + public void close(TaskAttemptContext c) throws IOException, InterruptedException { + for (WriterLength wl : this.writers.values()) { + close(wl.writer); } - ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies(); - int i = 0; - for (ColumnFamilyDescriptor familyDescriptor : families) { - if (i++ > 0) { - compressionConfigValue.append('&'); - } - compressionConfigValue.append(URLEncoder.encode( - familyDescriptor.getNameAsString(), "UTF-8")); - compressionConfigValue.append('='); - compressionConfigValue.append(URLEncoder.encode( - familyDescriptor.getCompressionType().getName(), "UTF-8")); - } - return compressionConfigValue.toString(); + } + }; + } + + /* + * Data structure to hold a Writer and amount of data written on it. + */ + static class WriterLength { + long written = 0; + StoreFileWriter writer = null; + } + + /** + * joins the table name and the family with a delimiter. + */ + private static byte[] join(String tableName, String family) { + return Bytes.toBytes(tableName + AT_DELIMITER + family); + } + + /** + * Runs inside the task to deserialize column family to compression algorithm map from the + * configuration. + * @param conf to read the serialized values from + * @return a map from column family to the configured compression algorithm + */ + @VisibleForTesting + static Map createFamilyCompressionMap(Configuration conf, + final String tableName) { + Map compressionMap = new TreeMap(Bytes.BYTES_COMPARATOR); + Map tableConfigs = getTableConfigurations(conf, tableName); + if (tableConfigs == null) { + return compressionMap; + } + Map stringMap = + createFamilyConfValueMap(tableConfigs, COMPRESSION_FAMILIES_CONF_KEY); + for (Map.Entry e : stringMap.entrySet()) { + Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); + compressionMap.put(e.getKey(), algorithm); + } + return compressionMap; + } + + /** + * Returns the set of configurations that have been configured for the table during job + * initialization. + */ + private static Map getTableConfigurations(Configuration conf, + final String tableName) { + String tableDefn = conf.get(tableName); + if (StringUtils.isEmpty(tableDefn)) { + return null; + } + TargetTableRef table = TargetTableRefFunctions.FROM_JSON.apply(tableDefn); + Map tableConfigs = table.getConfiguration(); + return tableConfigs; + } + + /** + * Runs inside the task to deserialize column family to bloom filter type map from the + * configuration. + * @param conf to read the serialized values from + * @return a map from column family to the the configured bloom filter type + */ + @VisibleForTesting + static Map createFamilyBloomTypeMap(Configuration conf, + final String tableName) { + Map bloomTypeMap = new TreeMap(Bytes.BYTES_COMPARATOR); + Map tableConfigs = getTableConfigurations(conf, tableName); + if (tableConfigs == null) { + return bloomTypeMap; + } + Map stringMap = + createFamilyConfValueMap(tableConfigs, BLOOM_TYPE_FAMILIES_CONF_KEY); + for (Map.Entry e : stringMap.entrySet()) { + BloomType bloomType = BloomType.valueOf(e.getValue()); + bloomTypeMap.put(e.getKey(), bloomType); + } + return bloomTypeMap; + } + + /** + * Runs inside the task to deserialize column family to block size map from the configuration. + * @param conf to read the serialized values from + * @return a map from column family to the configured block size + */ + @VisibleForTesting + static Map createFamilyBlockSizeMap(Configuration conf, final String tableName) { + Map blockSizeMap = new TreeMap(Bytes.BYTES_COMPARATOR); + Map tableConfigs = getTableConfigurations(conf, tableName); + if (tableConfigs == null) { + return blockSizeMap; + } + Map stringMap = + createFamilyConfValueMap(tableConfigs, BLOCK_SIZE_FAMILIES_CONF_KEY); + for (Map.Entry e : stringMap.entrySet()) { + Integer blockSize = Integer.parseInt(e.getValue()); + blockSizeMap.put(e.getKey(), blockSize); + } + return blockSizeMap; + } + + /** + * Runs inside the task to deserialize column family to data block encoding type map from the + * configuration. + * @param conf to read the serialized values from + * @return a map from column family to HFileDataBlockEncoder for the configured data block type + * for the family + */ + @VisibleForTesting + static Map createFamilyDataBlockEncodingMap(Configuration conf, + final String tableName) { + + Map encoderMap = + new TreeMap(Bytes.BYTES_COMPARATOR); + Map tableConfigs = getTableConfigurations(conf, tableName); + if (tableConfigs == null) { + return encoderMap; + } + Map stringMap = + createFamilyConfValueMap(tableConfigs, DATABLOCK_ENCODING_FAMILIES_CONF_KEY); + for (Map.Entry e : stringMap.entrySet()) { + encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue()))); + } + return encoderMap; + } + + /** + * Run inside the task to deserialize column family to given conf value map. + * @param conf to read the serialized values from + * @param confName conf key to read from the configuration + * @return a map of column family to the given configuration value + */ + private static Map createFamilyConfValueMap(Map configs, + String confName) { + Map confValMap = new TreeMap(Bytes.BYTES_COMPARATOR); + String confVal = configs.get(confName); + if (StringUtils.isEmpty(confVal)) { + return confValMap; + } + for (String familyConf : confVal.split("&")) { + String[] familySplit = familyConf.split("="); + if (familySplit.length != 2) { + continue; + } + try { + confValMap.put(URLDecoder.decode(familySplit[0], "UTF-8").getBytes(StandardCharsets.UTF_8), + URLDecoder.decode(familySplit[1], "UTF-8")); + } catch (UnsupportedEncodingException e) { + // will not happen with UTF-8 encoding + throw new AssertionError(e); + } + } + return confValMap; + } + + /** + * Configure job with a TotalOrderPartitioner, partitioning against + * splitPoints. Cleans up the partitions file after job exists. + */ + static void configurePartitioner(Job job, Set tablesStartKeys) + throws IOException { + + Configuration conf = job.getConfiguration(); + // create the partitions file + Path partitionsPath = new Path(conf.get("hadoop.tmp.dir"), "partitions_" + UUID.randomUUID()); + FileSystem fs = partitionsPath.getFileSystem(conf); + fs.makeQualified(partitionsPath); + writePartitions(conf, partitionsPath, tablesStartKeys); + fs.deleteOnExit(partitionsPath); + + // configure job to use it + job.setPartitionerClass(TotalOrderPartitioner.class); + TotalOrderPartitioner.setPartitionFile(conf, partitionsPath); + } + + @SuppressWarnings(value = "EC_ARRAY_AND_NONARRAY", + justification = "ImmutableBytesWritable DOES implement equals(byte])") + private static void writePartitions(Configuration conf, Path partitionsPath, + Set tablesStartKeys) throws IOException { + + LOGGER.info("Writing partition information to " + partitionsPath); + if (tablesStartKeys.isEmpty()) { + throw new IllegalArgumentException("No regions passed"); } - /** - * Serialize column family to block size map to configuration. - * Invoked while configuring the MR job for incremental load. - * @param tableDescriptor to read the properties from - * @param conf to persist serialized values into - * - * @throws IOException - * on failure to read column family descriptors - */ - @VisibleForTesting - static String configureBlockSize(TableDescriptor tableDescriptor) - throws UnsupportedEncodingException { - StringBuilder blockSizeConfigValue = new StringBuilder(); - if (tableDescriptor == null) { - // could happen with mock table instance - return blockSizeConfigValue.toString(); - } - ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies(); - int i = 0; - for (ColumnFamilyDescriptor familyDescriptor : families) { - if (i++ > 0) { - blockSizeConfigValue.append('&'); - } - blockSizeConfigValue.append(URLEncoder.encode( - familyDescriptor.getNameAsString(), "UTF-8")); - blockSizeConfigValue.append('='); - blockSizeConfigValue.append(URLEncoder.encode( - String.valueOf(familyDescriptor.getBlocksize()), "UTF-8")); - } - return blockSizeConfigValue.toString(); + // We're generating a list of split points, and we don't ever + // have keys < the first region (which has an empty start key) + // so we need to remove it. Otherwise we would end up with an + // empty reducer with index 0 + TreeSet sorted = new TreeSet(tablesStartKeys); + + TableRowkeyPair first = sorted.first(); + if (!first.getRowkey().equals(HConstants.EMPTY_BYTE_ARRAY)) { + throw new IllegalArgumentException( + "First region of table should have empty start key. Instead has: " + + Bytes.toStringBinary(first.getRowkey().get())); } + sorted.remove(first); + + // Write the actual file + FileSystem fs = partitionsPath.getFileSystem(conf); + SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath, + TableRowkeyPair.class, NullWritable.class); - /** - * Serialize column family to bloom type map to configuration. - * Invoked while configuring the MR job for incremental load. - * @param tableDescriptor to read the properties from - * @param conf to persist serialized values into - * - * @throws IOException - * on failure to read column family descriptors - */ - static String configureBloomType(TableDescriptor tableDescriptor) - throws UnsupportedEncodingException { - - StringBuilder bloomTypeConfigValue = new StringBuilder(); - - if (tableDescriptor == null) { - // could happen with mock table instance - return bloomTypeConfigValue.toString(); + try { + for (TableRowkeyPair startKey : sorted) { + writer.append(startKey, NullWritable.get()); + } + } finally { + writer.close(); + } + + } + + /** + * Serialize column family to compression algorithm map to configuration. Invoked while + * configuring the MR job for incremental load. + * @param table to read the properties from + * @param conf to persist serialized values into on failure to read column family descriptors + */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") + @VisibleForTesting + static String configureCompression(TableDescriptor tableDescriptor) + throws UnsupportedEncodingException { + + StringBuilder compressionConfigValue = new StringBuilder(); + if (tableDescriptor == null) { + // could happen with mock table instance + return compressionConfigValue.toString(); + } + ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies(); + int i = 0; + for (ColumnFamilyDescriptor familyDescriptor : families) { + if (i++ > 0) { + compressionConfigValue.append('&'); + } + compressionConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8")); + compressionConfigValue.append('='); + compressionConfigValue + .append(URLEncoder.encode(familyDescriptor.getCompressionType().getName(), "UTF-8")); + } + return compressionConfigValue.toString(); + } + + /** + * Serialize column family to block size map to configuration. Invoked while configuring the MR + * job for incremental load. + * @param tableDescriptor to read the properties from + * @param conf to persist serialized values into on failure to read column family + * descriptors + */ + @VisibleForTesting + static String configureBlockSize(TableDescriptor tableDescriptor) + throws UnsupportedEncodingException { + StringBuilder blockSizeConfigValue = new StringBuilder(); + if (tableDescriptor == null) { + // could happen with mock table instance + return blockSizeConfigValue.toString(); + } + ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies(); + int i = 0; + for (ColumnFamilyDescriptor familyDescriptor : families) { + if (i++ > 0) { + blockSizeConfigValue.append('&'); + } + blockSizeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8")); + blockSizeConfigValue.append('='); + blockSizeConfigValue + .append(URLEncoder.encode(String.valueOf(familyDescriptor.getBlocksize()), "UTF-8")); + } + return blockSizeConfigValue.toString(); + } + + /** + * Serialize column family to bloom type map to configuration. Invoked while configuring the MR + * job for incremental load. + * @param tableDescriptor to read the properties from + * @param conf to persist serialized values into on failure to read column family + * descriptors + */ + static String configureBloomType(TableDescriptor tableDescriptor) + throws UnsupportedEncodingException { + + StringBuilder bloomTypeConfigValue = new StringBuilder(); + + if (tableDescriptor == null) { + // could happen with mock table instance + return bloomTypeConfigValue.toString(); + } + ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies(); + int i = 0; + for (ColumnFamilyDescriptor familyDescriptor : families) { + if (i++ > 0) { + bloomTypeConfigValue.append('&'); + } + bloomTypeConfigValue.append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8")); + bloomTypeConfigValue.append('='); + String bloomType = familyDescriptor.getBloomFilterType().toString(); + if (bloomType == null) { + bloomType = ColumnFamilyDescriptorBuilder.DEFAULT_BLOOMFILTER.toString(); + } + bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8")); + } + return bloomTypeConfigValue.toString(); + } + + /** + * Serialize column family to data block encoding map to configuration. Invoked while configuring + * the MR job for incremental load. + * @param table to read the properties from + * @param conf to persist serialized values into on failure to read column family descriptors + */ + static String configureDataBlockEncoding(TableDescriptor tableDescriptor) + throws UnsupportedEncodingException { + + StringBuilder dataBlockEncodingConfigValue = new StringBuilder(); + + if (tableDescriptor == null) { + // could happen with mock table instance + return dataBlockEncodingConfigValue.toString(); + } + ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies(); + int i = 0; + for (ColumnFamilyDescriptor familyDescriptor : families) { + if (i++ > 0) { + dataBlockEncodingConfigValue.append('&'); + } + dataBlockEncodingConfigValue + .append(URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8")); + dataBlockEncodingConfigValue.append('='); + DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding(); + if (encoding == null) { + encoding = DataBlockEncoding.NONE; + } + dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(), "UTF-8")); + } + return dataBlockEncodingConfigValue.toString(); + } + + /** + * Configures the job for MultiHfileOutputFormat. + */ + @SuppressWarnings("deprecation") + public static void configureIncrementalLoad(Job job, List tablesToBeLoaded) + throws IOException { + + Configuration conf = job.getConfiguration(); + job.setOutputFormatClass(MultiHfileOutputFormat.class); + conf.setStrings("io.serializations", conf.get("io.serializations"), + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); + + // tableStartKeys for all tables. + Set tablesStartKeys = Sets.newTreeSet(); + for (TargetTableRef table : tablesToBeLoaded) { + final String tableName = table.getPhysicalName(); + try (Connection hbaseConn = ConnectionFactory.createConnection(conf);) { + Set startKeys = + getRegionStartKeys(tableName, hbaseConn.getRegionLocator(TableName.valueOf(tableName))); + tablesStartKeys.addAll(startKeys); + TableDescriptor tableDescriptor = + hbaseConn.getTable(TableName.valueOf(tableName)).getDescriptor(); + String compressionConfig = configureCompression(tableDescriptor); + String bloomTypeConfig = configureBloomType(tableDescriptor); + String blockSizeConfig = configureBlockSize(tableDescriptor); + String blockEncodingConfig = configureDataBlockEncoding(tableDescriptor); + Map tableConfigs = Maps.newHashMap(); + if (StringUtils.isNotBlank(compressionConfig)) { + tableConfigs.put(COMPRESSION_FAMILIES_CONF_KEY, compressionConfig); } - ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies(); - int i = 0; - for (ColumnFamilyDescriptor familyDescriptor : families) { - if (i++ > 0) { - bloomTypeConfigValue.append('&'); - } - bloomTypeConfigValue.append(URLEncoder.encode( - familyDescriptor.getNameAsString(), "UTF-8")); - bloomTypeConfigValue.append('='); - String bloomType = familyDescriptor.getBloomFilterType().toString(); - if (bloomType == null) { - bloomType = ColumnFamilyDescriptorBuilder.DEFAULT_BLOOMFILTER.toString(); - } - bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8")); + if (StringUtils.isNotBlank(bloomTypeConfig)) { + tableConfigs.put(BLOOM_TYPE_FAMILIES_CONF_KEY, bloomTypeConfig); } - return bloomTypeConfigValue.toString(); - } - - /** - * Serialize column family to data block encoding map to configuration. - * Invoked while configuring the MR job for incremental load. - * - * @param table to read the properties from - * @param conf to persist serialized values into - * @throws IOException - * on failure to read column family descriptors - */ - static String configureDataBlockEncoding(TableDescriptor tableDescriptor) throws UnsupportedEncodingException { - - StringBuilder dataBlockEncodingConfigValue = new StringBuilder(); - - if (tableDescriptor == null) { - // could happen with mock table instance - return dataBlockEncodingConfigValue.toString(); + if (StringUtils.isNotBlank(blockSizeConfig)) { + tableConfigs.put(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfig); } - ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies(); - int i = 0; - for (ColumnFamilyDescriptor familyDescriptor : families) { - if (i++ > 0) { - dataBlockEncodingConfigValue.append('&'); - } - dataBlockEncodingConfigValue.append( - URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8")); - dataBlockEncodingConfigValue.append('='); - DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding(); - if (encoding == null) { - encoding = DataBlockEncoding.NONE; - } - dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(), - "UTF-8")); + if (StringUtils.isNotBlank(blockEncodingConfig)) { + tableConfigs.put(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, blockEncodingConfig); } - return dataBlockEncodingConfigValue.toString(); - } + table.setConfiguration(tableConfigs); + final String tableDefns = TargetTableRefFunctions.TO_JSON.apply(table); + // set the table definition in the config to be used during the RecordWriter.. + conf.set(tableName, tableDefns); - /** - * Configures the job for MultiHfileOutputFormat. - * @param job - * @param tablesToBeLoaded - * @throws IOException - */ - @SuppressWarnings("deprecation") - public static void configureIncrementalLoad(Job job, List tablesToBeLoaded) throws IOException { - - Configuration conf = job.getConfiguration(); - job.setOutputFormatClass(MultiHfileOutputFormat.class); - conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); - - // tableStartKeys for all tables. - Set tablesStartKeys = Sets.newTreeSet(); - for (TargetTableRef table : tablesToBeLoaded) { - final String tableName = table.getPhysicalName(); - try(Connection hbaseConn = ConnectionFactory.createConnection(conf);){ - Set startKeys = - getRegionStartKeys(tableName, - hbaseConn.getRegionLocator(TableName.valueOf(tableName))); - tablesStartKeys.addAll(startKeys); - TableDescriptor tableDescriptor = hbaseConn.getTable(TableName.valueOf(tableName)).getDescriptor(); - String compressionConfig = configureCompression(tableDescriptor); - String bloomTypeConfig = configureBloomType(tableDescriptor); - String blockSizeConfig = configureBlockSize(tableDescriptor); - String blockEncodingConfig = configureDataBlockEncoding(tableDescriptor); - Map tableConfigs = Maps.newHashMap(); - if(StringUtils.isNotBlank(compressionConfig)) { - tableConfigs.put(COMPRESSION_FAMILIES_CONF_KEY, compressionConfig); - } - if(StringUtils.isNotBlank(bloomTypeConfig)) { - tableConfigs.put(BLOOM_TYPE_FAMILIES_CONF_KEY,bloomTypeConfig); - } - if(StringUtils.isNotBlank(blockSizeConfig)) { - tableConfigs.put(BLOCK_SIZE_FAMILIES_CONF_KEY,blockSizeConfig); - } - if(StringUtils.isNotBlank(blockEncodingConfig)) { - tableConfigs.put(DATABLOCK_ENCODING_FAMILIES_CONF_KEY,blockEncodingConfig); - } - table.setConfiguration(tableConfigs); - final String tableDefns = TargetTableRefFunctions.TO_JSON.apply(table); - // set the table definition in the config to be used during the RecordWriter.. - conf.set(tableName, tableDefns); - - TargetTableRef tbl = TargetTableRefFunctions.FROM_JSON.apply(tableDefns); - LOGGER.info(" the table logical name is "+ tbl.getLogicalName()); - } - } - - LOGGER.info("Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count"); - job.setNumReduceTasks(tablesStartKeys.size()); - - configurePartitioner(job, tablesStartKeys); - TableMapReduceUtil.addDependencyJars(job); - TableMapReduceUtil.initCredentials(job); - - } - - /** - * Return the start keys of all of the regions in this table, - * as a list of ImmutableBytesWritable. - */ - private static Set getRegionStartKeys(String tableName , RegionLocator table) throws IOException { - byte[][] byteKeys = table.getStartKeys(); - Set ret = new TreeSet(); - for (byte[] byteKey : byteKeys) { - // phoenix-2216: start : passing the table name and startkey - ret.add(new TableRowkeyPair(tableName, new ImmutableBytesWritable(byteKey))); + TargetTableRef tbl = TargetTableRefFunctions.FROM_JSON.apply(tableDefns); + LOGGER.info(" the table logical name is " + tbl.getLogicalName()); } - return ret; } + + LOGGER.info( + "Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count"); + job.setNumReduceTasks(tablesStartKeys.size()); + + configurePartitioner(job, tablesStartKeys); + TableMapReduceUtil.addDependencyJars(job); + TableMapReduceUtil.initCredentials(job); + + } + + /** + * Return the start keys of all of the regions in this table, as a list of ImmutableBytesWritable. + */ + private static Set getRegionStartKeys(String tableName, RegionLocator table) + throws IOException { + byte[][] byteKeys = table.getStartKeys(); + Set ret = new TreeSet(); + for (byte[] byteKey : byteKeys) { + // phoenix-2216: start : passing the table name and startkey + ret.add(new TableRowkeyPair(tableName, new ImmutableBytesWritable(byteKey))); + } + return ret; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java index 39d03be9e61..4902f20882a 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,8 +33,6 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.FileWriter; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStreamWriter; @@ -54,13 +52,6 @@ import java.util.Objects; import java.util.Properties; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -78,6 +69,13 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.TableNotFoundException; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.PhoenixRuntime; import org.slf4j.Logger; @@ -85,879 +83,846 @@ /** * A tool to identify orphan views and links, and drop them - * */ public class OrphanViewTool extends Configured implements Tool { - private static final Logger LOGGER = LoggerFactory.getLogger(OrphanViewTool.class); - // Query all the views that are not "MAPPED" views - private static final String viewQuery = "SELECT " + - TENANT_ID + ", " + - TABLE_SCHEM + "," + - TABLE_NAME + - " FROM " + SYSTEM_CATALOG_NAME + - " WHERE "+ TABLE_TYPE + " = '" + PTableType.VIEW.getSerializedValue() +"' AND NOT " + - VIEW_TYPE + " = " + PTable.ViewType.MAPPED.getSerializedValue(); - // Query all physical links - private static final String physicalLinkQuery = "SELECT " + - TENANT_ID + ", " + - TABLE_SCHEM + ", " + - TABLE_NAME + ", " + - COLUMN_NAME + " AS PHYSICAL_TABLE_TENANT_ID, " + - COLUMN_FAMILY + " AS PHYSICAL_TABLE_FULL_NAME " + - " FROM " + SYSTEM_CATALOG_NAME + - " WHERE "+ LINK_TYPE + " = " + - PTable.LinkType.PHYSICAL_TABLE.getSerializedValue(); - // Query all child-parent links - private static final String childParentLinkQuery = "SELECT " + - TENANT_ID + ", " + - TABLE_SCHEM + ", " + - TABLE_NAME + ", " + - COLUMN_NAME + " AS PARENT_VIEW_TENANT_ID, " + - COLUMN_FAMILY + " AS PARENT_VIEW_FULL_NAME " + - " FROM " + SYSTEM_CATALOG_NAME + - " WHERE "+ LINK_TYPE + " = " + - PTable.LinkType.PARENT_TABLE.getSerializedValue(); - // Query all parent-child links - private static final String parentChildLinkQuery = "SELECT " + - TENANT_ID + ", " + - TABLE_SCHEM + ", " + - TABLE_NAME + ", " + - COLUMN_NAME + " AS CHILD_VIEW_TENANT_ID, " + - COLUMN_FAMILY + " AS CHILD_VIEW_FULL_NAME " + - " FROM " + SYSTEM_CHILD_LINK_NAME + - " WHERE "+ LINK_TYPE + " = " + - PTable.LinkType.CHILD_TABLE.getSerializedValue(); - - // Query all the tables that can be a base table - private static final String candidateBaseTableQuery = "SELECT " + - TENANT_ID + ", " + - TABLE_SCHEM + ", " + - TABLE_NAME + - " FROM " + SYSTEM_CATALOG_NAME + - " WHERE " + TABLE_TYPE + " != '" + PTableType.VIEW.getSerializedValue() + "' AND " + - TABLE_TYPE + " != '" + PTableType.INDEX.getSerializedValue() + "'"; - // The path of the directory of the output files - private String outputPath; - // The path of the directory of the input files - private String inputPath; - // The flag to indicate if the orphan views and links will be deleted - private boolean clean = false; - // The maximum level found in a view tree - private int maxViewLevel = 0; - // The age of a view - private static final long defaultAgeMs = 24*60*60*1000; // 1 day - private long ageMs = 0; - - // A separate file is maintained to list orphan views, and each type of orphan links - public static final byte VIEW = 0; - public static final byte PHYSICAL_TABLE_LINK = 1; - public static final byte PARENT_TABLE_LINK = 2; - public static final byte CHILD_TABLE_LINK = 3; - public static final byte ORPHAN_TYPE_COUNT = 4; - - BufferedWriter writer[] = new BufferedWriter[ORPHAN_TYPE_COUNT]; - BufferedReader reader[] = new BufferedReader[ORPHAN_TYPE_COUNT]; - - // The set of orphan views - HashMap orphanViewSet = new HashMap<>(); - // The array list of set of views such that the views in the first set are the first level views and the views - // in the second set is the second level views, and so on - List> viewSetArray = new ArrayList>(); - // The set of base tables - HashMap baseSet = new HashMap<>(); - // The set of orphan links. These links can be CHILD_TABLE, PARENT_TABLE, or PHYSICAL_TABLE links - HashSet orphanLinkSet = new HashSet<>(); - - public static final String fileName[] = {"OrphanView.txt", "OrphanPhysicalTableLink.txt", "OrphanParentTableLink.txt", "OrphanChildTableLink.txt"}; - private static final Option OUTPUT_PATH_OPTION = new Option("op", "output-path", true, - "Output path where the files listing orphan views and links are written"); - private static final Option INPUT_PATH_OPTION = new Option("ip", "input-path", true, - "Input path where the files listing orphan views and links are read"); - private static final Option CLEAN_ORPHAN_VIEWS_OPTION = new Option("c", "clean", false, - "If specified, cleans orphan views and links"); - private static final Option IDENTIFY_ORPHAN_VIEWS_OPTION = new Option("i", "identify", false, - "If specified, identifies orphan views and links"); - private static final Option AGE_OPTION = new Option("a", "age", true, - "The minimum age (in milliseconds) for the views (default value is " + Long.toString(defaultAgeMs) + ", i.e. 1 day)"); - private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); - - private Options getOptions() { - final Options options = new Options(); - options.addOption(OUTPUT_PATH_OPTION); - options.addOption(INPUT_PATH_OPTION); - options.addOption(CLEAN_ORPHAN_VIEWS_OPTION); - options.addOption(IDENTIFY_ORPHAN_VIEWS_OPTION); - options.addOption(AGE_OPTION); - options.addOption(HELP_OPTION); - return options; - } - - /** - * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are - * missing. - * @param args supplied command line arguments - */ - private void parseOptions(String[] args) throws Exception { - - final Options options = getOptions(); - - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); - } - if (cmdLine.hasOption(HELP_OPTION.getOpt())) { - printHelpAndExit(options, 0); - } - if (cmdLine.hasOption(OUTPUT_PATH_OPTION.getOpt()) && cmdLine.hasOption(INPUT_PATH_OPTION.getOpt())) { - throw new IllegalStateException("Specify either " + OUTPUT_PATH_OPTION.getLongOpt() + " or " - + INPUT_PATH_OPTION.getOpt()); - } - if (cmdLine.hasOption(INPUT_PATH_OPTION.getOpt()) && !cmdLine.hasOption(CLEAN_ORPHAN_VIEWS_OPTION.getOpt())) { - throw new IllegalStateException(INPUT_PATH_OPTION.getLongOpt() + " is only used with " - + IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt()); - } - if (cmdLine.hasOption(IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt()) && cmdLine.hasOption(CLEAN_ORPHAN_VIEWS_OPTION.getOpt())) { - throw new IllegalStateException("Specify either " + IDENTIFY_ORPHAN_VIEWS_OPTION.getLongOpt() + " or " - + IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt()); - } - if (cmdLine.hasOption(OUTPUT_PATH_OPTION.getOpt()) && (!cmdLine.hasOption(IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt()) && - !cmdLine.hasOption(CLEAN_ORPHAN_VIEWS_OPTION.getOpt()))) { - throw new IllegalStateException(OUTPUT_PATH_OPTION.getLongOpt() + " requires either " + - IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt() + " or " + CLEAN_ORPHAN_VIEWS_OPTION.getOpt()); - } - if (cmdLine.hasOption(CLEAN_ORPHAN_VIEWS_OPTION.getOpt())) { - clean = true; - } - else if (!cmdLine.hasOption(IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt())) { - throw new IllegalStateException("Specify either " + - IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt() + " or " + CLEAN_ORPHAN_VIEWS_OPTION.getOpt()); - } - if (cmdLine.hasOption(AGE_OPTION.getOpt())) { - ageMs = Long.parseLong(cmdLine.getOptionValue(AGE_OPTION.getOpt())); - } - - outputPath = cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()); - inputPath = cmdLine.getOptionValue(INPUT_PATH_OPTION.getOpt()); + private static final Logger LOGGER = LoggerFactory.getLogger(OrphanViewTool.class); + // Query all the views that are not "MAPPED" views + private static final String viewQuery = + "SELECT " + TENANT_ID + ", " + TABLE_SCHEM + "," + TABLE_NAME + " FROM " + SYSTEM_CATALOG_NAME + + " WHERE " + TABLE_TYPE + " = '" + PTableType.VIEW.getSerializedValue() + "' AND NOT " + + VIEW_TYPE + " = " + PTable.ViewType.MAPPED.getSerializedValue(); + // Query all physical links + private static final String physicalLinkQuery = "SELECT " + TENANT_ID + ", " + TABLE_SCHEM + ", " + + TABLE_NAME + ", " + COLUMN_NAME + " AS PHYSICAL_TABLE_TENANT_ID, " + COLUMN_FAMILY + + " AS PHYSICAL_TABLE_FULL_NAME " + " FROM " + SYSTEM_CATALOG_NAME + " WHERE " + LINK_TYPE + + " = " + PTable.LinkType.PHYSICAL_TABLE.getSerializedValue(); + // Query all child-parent links + private static final String childParentLinkQuery = "SELECT " + TENANT_ID + ", " + TABLE_SCHEM + + ", " + TABLE_NAME + ", " + COLUMN_NAME + " AS PARENT_VIEW_TENANT_ID, " + COLUMN_FAMILY + + " AS PARENT_VIEW_FULL_NAME " + " FROM " + SYSTEM_CATALOG_NAME + " WHERE " + LINK_TYPE + " = " + + PTable.LinkType.PARENT_TABLE.getSerializedValue(); + // Query all parent-child links + private static final String parentChildLinkQuery = "SELECT " + TENANT_ID + ", " + TABLE_SCHEM + + ", " + TABLE_NAME + ", " + COLUMN_NAME + " AS CHILD_VIEW_TENANT_ID, " + COLUMN_FAMILY + + " AS CHILD_VIEW_FULL_NAME " + " FROM " + SYSTEM_CHILD_LINK_NAME + " WHERE " + LINK_TYPE + + " = " + PTable.LinkType.CHILD_TABLE.getSerializedValue(); + + // Query all the tables that can be a base table + private static final String candidateBaseTableQuery = + "SELECT " + TENANT_ID + ", " + TABLE_SCHEM + ", " + TABLE_NAME + " FROM " + SYSTEM_CATALOG_NAME + + " WHERE " + TABLE_TYPE + " != '" + PTableType.VIEW.getSerializedValue() + "' AND " + + TABLE_TYPE + " != '" + PTableType.INDEX.getSerializedValue() + "'"; + // The path of the directory of the output files + private String outputPath; + // The path of the directory of the input files + private String inputPath; + // The flag to indicate if the orphan views and links will be deleted + private boolean clean = false; + // The maximum level found in a view tree + private int maxViewLevel = 0; + // The age of a view + private static final long defaultAgeMs = 24 * 60 * 60 * 1000; // 1 day + private long ageMs = 0; + + // A separate file is maintained to list orphan views, and each type of orphan links + public static final byte VIEW = 0; + public static final byte PHYSICAL_TABLE_LINK = 1; + public static final byte PARENT_TABLE_LINK = 2; + public static final byte CHILD_TABLE_LINK = 3; + public static final byte ORPHAN_TYPE_COUNT = 4; + + BufferedWriter writer[] = new BufferedWriter[ORPHAN_TYPE_COUNT]; + BufferedReader reader[] = new BufferedReader[ORPHAN_TYPE_COUNT]; + + // The set of orphan views + HashMap orphanViewSet = new HashMap<>(); + // The array list of set of views such that the views in the first set are the first level views + // and the views + // in the second set is the second level views, and so on + List> viewSetArray = new ArrayList>(); + // The set of base tables + HashMap baseSet = new HashMap<>(); + // The set of orphan links. These links can be CHILD_TABLE, PARENT_TABLE, or PHYSICAL_TABLE links + HashSet orphanLinkSet = new HashSet<>(); + + public static final String fileName[] = { "OrphanView.txt", "OrphanPhysicalTableLink.txt", + "OrphanParentTableLink.txt", "OrphanChildTableLink.txt" }; + private static final Option OUTPUT_PATH_OPTION = new Option("op", "output-path", true, + "Output path where the files listing orphan views and links are written"); + private static final Option INPUT_PATH_OPTION = new Option("ip", "input-path", true, + "Input path where the files listing orphan views and links are read"); + private static final Option CLEAN_ORPHAN_VIEWS_OPTION = + new Option("c", "clean", false, "If specified, cleans orphan views and links"); + private static final Option IDENTIFY_ORPHAN_VIEWS_OPTION = + new Option("i", "identify", false, "If specified, identifies orphan views and links"); + private static final Option AGE_OPTION = new Option("a", "age", true, + "The minimum age (in milliseconds) for the views (default value is " + + Long.toString(defaultAgeMs) + ", i.e. 1 day)"); + private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); + + private Options getOptions() { + final Options options = new Options(); + options.addOption(OUTPUT_PATH_OPTION); + options.addOption(INPUT_PATH_OPTION); + options.addOption(CLEAN_ORPHAN_VIEWS_OPTION); + options.addOption(IDENTIFY_ORPHAN_VIEWS_OPTION); + options.addOption(AGE_OPTION); + options.addOption(HELP_OPTION); + return options; + } + + /** + * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are + * missing. + * @param args supplied command line arguments + */ + private void parseOptions(String[] args) throws Exception { + + final Options options = getOptions(); + + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); } - - private void printHelpAndExit(String errorMessage, Options options) { - System.err.println(errorMessage); - printHelpAndExit(options, 1); + if (cmdLine.hasOption(HELP_OPTION.getOpt())) { + printHelpAndExit(options, 0); } - - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); + if ( + cmdLine.hasOption(OUTPUT_PATH_OPTION.getOpt()) + && cmdLine.hasOption(INPUT_PATH_OPTION.getOpt()) + ) { + throw new IllegalStateException( + "Specify either " + OUTPUT_PATH_OPTION.getLongOpt() + " or " + INPUT_PATH_OPTION.getOpt()); } - - /** - * The key that uniquely identifies a table (i.e., a base table or table view) - */ - private static class Key { - private String serializedValue; - - public Key (String tenantId, String schemaName, String tableName) throws IllegalArgumentException { - if (tableName == null) { - throw new IllegalArgumentException(); - } - serializedValue = (tenantId != null ? tenantId + "," : ",") + - (schemaName != null ? schemaName + "," : ",") + - tableName; - } - - public Key (String tenantId, String fullTableName) { - String[] columns = fullTableName.split("\\."); - String schemaName; - String tableName; - if (columns.length == 1) { - schemaName = null; - tableName = fullTableName; - } else { - schemaName = columns[0]; - tableName = columns[1]; - } - if (tableName == null || tableName.compareTo("") == 0) { - throw new IllegalArgumentException(); - } - serializedValue = (tenantId != null ? tenantId + "," : ",") + - (schemaName != null ? schemaName + "," : ",") + - tableName; - } - - public Key (String serializedKey) { - serializedValue = serializedKey; - if (this.getTableName() == null || this.getTableName().compareTo("") == 0) { - throw new IllegalArgumentException(); - } - } - - public String getTenantId() { - String[] columns = serializedValue.split(","); - return columns[0].compareTo("") == 0 ? null : columns[0]; - } - - public String getSchemaName() { - String[] columns = serializedValue.split(","); - return columns[1].compareTo("") == 0 ? null : columns[1]; - } - - public String getTableName() { - String[] columns = serializedValue.split(","); - return columns[2]; - } - - public String getSerializedValue() { - return serializedValue; - } - @Override - public int hashCode() { - return Objects.hash(getSerializedValue()); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (getClass() != obj.getClass()) - return false; - Key other = (Key) obj; - if (this.getSerializedValue().compareTo(other.getSerializedValue()) != 0) - return false; - return true; - } + if ( + cmdLine.hasOption(INPUT_PATH_OPTION.getOpt()) + && !cmdLine.hasOption(CLEAN_ORPHAN_VIEWS_OPTION.getOpt()) + ) { + throw new IllegalStateException(INPUT_PATH_OPTION.getLongOpt() + " is only used with " + + IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt()); + } + if ( + cmdLine.hasOption(IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt()) + && cmdLine.hasOption(CLEAN_ORPHAN_VIEWS_OPTION.getOpt()) + ) { + throw new IllegalStateException("Specify either " + IDENTIFY_ORPHAN_VIEWS_OPTION.getLongOpt() + + " or " + IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt()); + } + if ( + cmdLine.hasOption(OUTPUT_PATH_OPTION.getOpt()) + && (!cmdLine.hasOption(IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt()) + && !cmdLine.hasOption(CLEAN_ORPHAN_VIEWS_OPTION.getOpt())) + ) { + throw new IllegalStateException(OUTPUT_PATH_OPTION.getLongOpt() + " requires either " + + IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt() + " or " + CLEAN_ORPHAN_VIEWS_OPTION.getOpt()); + } + if (cmdLine.hasOption(CLEAN_ORPHAN_VIEWS_OPTION.getOpt())) { + clean = true; + } else if (!cmdLine.hasOption(IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt())) { + throw new IllegalStateException("Specify either " + IDENTIFY_ORPHAN_VIEWS_OPTION.getOpt() + + " or " + CLEAN_ORPHAN_VIEWS_OPTION.getOpt()); + } + if (cmdLine.hasOption(AGE_OPTION.getOpt())) { + ageMs = Long.parseLong(cmdLine.getOptionValue(AGE_OPTION.getOpt())); } - /** - * An abstract class to represent a table that can be a base table or table view - */ - private static abstract class Table { - protected Key key; - protected List childViews; + outputPath = cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()); + inputPath = cmdLine.getOptionValue(INPUT_PATH_OPTION.getOpt()); + } + + private void printHelpAndExit(String errorMessage, Options options) { + System.err.println(errorMessage); + printHelpAndExit(options, 1); + } + + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } + + /** + * The key that uniquely identifies a table (i.e., a base table or table view) + */ + private static class Key { + private String serializedValue; + + public Key(String tenantId, String schemaName, String tableName) + throws IllegalArgumentException { + if (tableName == null) { + throw new IllegalArgumentException(); + } + serializedValue = (tenantId != null ? tenantId + "," : ",") + + (schemaName != null ? schemaName + "," : ",") + tableName; + } - public void addChild(Key childView) { - if (childViews == null) { - childViews = new LinkedList<>(); - } - childViews.add(childView); - } + public Key(String tenantId, String fullTableName) { + String[] columns = fullTableName.split("\\."); + String schemaName; + String tableName; + if (columns.length == 1) { + schemaName = null; + tableName = fullTableName; + } else { + schemaName = columns[0]; + tableName = columns[1]; + } + if (tableName == null || tableName.compareTo("") == 0) { + throw new IllegalArgumentException(); + } + serializedValue = (tenantId != null ? tenantId + "," : ",") + + (schemaName != null ? schemaName + "," : ",") + tableName; + } - public boolean isParent() { - if (childViews == null || childViews.isEmpty()) { - return false; - } - return true; - } + public Key(String serializedKey) { + serializedValue = serializedKey; + if (this.getTableName() == null || this.getTableName().compareTo("") == 0) { + throw new IllegalArgumentException(); + } } - /** - * A class to represents a base table - */ - private static class Base extends Table { - public Base (Key key) { - this.key = key; - } + public String getTenantId() { + String[] columns = serializedValue.split(","); + return columns[0].compareTo("") == 0 ? null : columns[0]; } - /** - * A class to represents a table view - */ - private static class View extends Table { - Key parent; - Key base; + public String getSchemaName() { + String[] columns = serializedValue.split(","); + return columns[1].compareTo("") == 0 ? null : columns[1]; + } - public View (Key key) { - this.key = key; - } + public String getTableName() { + String[] columns = serializedValue.split(","); + return columns[2]; + } - public void setParent(Key parent) { - this.parent = parent; - } + public String getSerializedValue() { + return serializedValue; + } - public void setBase(Key base) { - this.base = base; - } + @Override + public int hashCode() { + return Objects.hash(getSerializedValue()); } - private static class Link { - Key src; - Key dst; - PTable.LinkType type; + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (getClass() != obj.getClass()) return false; + Key other = (Key) obj; + if (this.getSerializedValue().compareTo(other.getSerializedValue()) != 0) return false; + return true; + } + } + + /** + * An abstract class to represent a table that can be a base table or table view + */ + private static abstract class Table { + protected Key key; + protected List childViews; + + public void addChild(Key childView) { + if (childViews == null) { + childViews = new LinkedList<>(); + } + childViews.add(childView); + } - public Link(Key src, Key dst, PTable.LinkType type) { - this.src = src; - this.dst = dst; - this.type = type; - } + public boolean isParent() { + if (childViews == null || childViews.isEmpty()) { + return false; + } + return true; + } + } + + /** + * A class to represents a base table + */ + private static class Base extends Table { + public Base(Key key) { + this.key = key; + } + } - public String serialize() { - return src.getSerializedValue() + "," + dst.getSerializedValue() + "," + type.toString(); - } + /** + * A class to represents a table view + */ + private static class View extends Table { + Key parent; + Key base; - @Override - public int hashCode() { - return Objects.hash(serialize()); - } + public View(Key key) { + this.key = key; } - private void gracefullyDropView(PhoenixConnection phoenixConnection, - Configuration configuration, Key key) throws Exception { - PhoenixConnection tenantConnection = null; - boolean newConn = false; - try { - if (key.getTenantId() != null) { - Properties tenantProps = new Properties(); - tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, key.getTenantId()); - tenantConnection = ConnectionUtil.getInputConnection(configuration, tenantProps). - unwrap(PhoenixConnection.class); - newConn = true; - } else { - tenantConnection = phoenixConnection; - } - - MetaDataClient client = new MetaDataClient(tenantConnection); - org.apache.phoenix.parse.TableName pTableName = org.apache.phoenix.parse.TableName - .create(key.getSchemaName(), key.getTableName()); - try { - client.dropTable( - new DropTableStatement(pTableName, PTableType.VIEW, false, true, true)); - } - catch (TableNotFoundException e) { - LOGGER.info("Ignoring view " + pTableName + " as it has already been dropped"); - } - } finally { - if (newConn) { - // TODO can this be rewritten with try-with-resources ? - tryClosingConnection(tenantConnection); - } - } + public void setParent(Key parent) { + this.parent = parent; } - private void removeLink(PhoenixConnection phoenixConnection, Key src, Key dst, PTable.LinkType linkType) throws Exception { - String delTable = (linkType == PTable.LinkType.PHYSICAL_TABLE - || linkType == PTable.LinkType.PARENT_TABLE) ? SYSTEM_CATALOG_NAME - : SYSTEM_CHILD_LINK_NAME; + public void setBase(Key base) { + this.base = base; + } + } - String deleteQuery = String.format(" DELETE FROM %s WHERE " + TENANT_ID + " %s AND " - + TABLE_SCHEM + " %s AND " + TABLE_NAME + " = ? AND " + COLUMN_NAME + " %s AND " - + COLUMN_FAMILY + " = ? ", delTable, - src.getTenantId() == null ? " IS NULL" : " = ? ", - src.getSchemaName() == null ? " IS NULL " : " = ? ", - dst.getTenantId() == null ? " IS NULL" : " = ?"); + private static class Link { + Key src; + Key dst; + PTable.LinkType type; - try (PreparedStatement delStmt = phoenixConnection.prepareStatement(deleteQuery)) { - int param = 0; - if (src.getTenantId() != null) { - delStmt.setString(++param, src.getTenantId()); - } - if (src.getSchemaName() != null) { - delStmt.setString(++param, src.getSchemaName()); - } - delStmt.setString(++param, src.getTableName()); - if (dst.getTenantId() != null) { - delStmt.setString(++param, dst.getTenantId()); - } - if (dst.getSchemaName() == null) { - delStmt.setString(++param, dst.getTableName()); - } else { - delStmt.setString(++param, dst.getSchemaName() + "." + dst.getTableName()); - } - delStmt.execute(); - phoenixConnection.commit(); - } + public Link(Key src, Key dst, PTable.LinkType type) { + this.src = src; + this.dst = dst; + this.type = type; } - private byte getLinkType(PTable.LinkType linkType) { - byte type; - if (linkType == PTable.LinkType.PHYSICAL_TABLE) { - type = PHYSICAL_TABLE_LINK; - } - else if (linkType == PTable.LinkType.PARENT_TABLE) { - type = PARENT_TABLE_LINK; - } else if (linkType == PTable.LinkType.CHILD_TABLE) { - type = CHILD_TABLE_LINK; - } - else { - throw new AssertionError("Unknown Link Type"); - } - return type; + public String serialize() { + return src.getSerializedValue() + "," + dst.getSerializedValue() + "," + type.toString(); } - private PTable.LinkType getLinkType(byte linkType) { - PTable.LinkType type; - if (linkType == PHYSICAL_TABLE_LINK) { - type = PTable.LinkType.PHYSICAL_TABLE; - } - else if (linkType == PARENT_TABLE_LINK) { - type = PTable.LinkType.PARENT_TABLE; - } else if (linkType == CHILD_TABLE_LINK) { - type = PTable.LinkType.CHILD_TABLE; - } - else { - throw new AssertionError("Unknown Link Type"); - } - return type; - } - - private void removeOrLogOrphanLinks(PhoenixConnection phoenixConnection) { - for (Link link : orphanLinkSet) { - try { - byte linkType = getLinkType(link.type); - if (outputPath != null) { - writer[linkType].write(link.src.getSerializedValue() + "-->" + link.dst.getSerializedValue()); - writer[linkType].newLine(); - } - else if (!clean){ - System.out.println(link.src.getSerializedValue() + "-(" + link.type + ")->" + link.dst.getSerializedValue()); - } - if (clean) { - removeLink(phoenixConnection, link.src, link.dst, link.type); - } - } catch (Exception e) { - // ignore - } - } + @Override + public int hashCode() { + return Objects.hash(serialize()); } - - - private void forcefullyDropView(PhoenixConnection phoenixConnection, - Key key) throws Exception { - String deleteRowsFromCatalog = String.format("DELETE FROM " + SYSTEM_CATALOG_NAME - + " WHERE " + TENANT_ID + " %s AND " + TABLE_SCHEM + " %s AND " - + TABLE_NAME + " = ? ", - key.getTenantId() == null ? " IS NULL" : " = ? ", - key.getSchemaName() == null ? " IS NULL " : " = ? "); - String deleteRowsFromChildLink = String.format("DELETE FROM " + SYSTEM_CHILD_LINK_NAME - + " WHERE " + COLUMN_NAME + " %s AND " + COLUMN_FAMILY + " = ? ", - key.getTenantId() == null ? " IS NULL" : " = ? "); - try { - try (PreparedStatement delSysCat = - phoenixConnection.prepareStatement(deleteRowsFromCatalog)) { - int param = 0; - if (key.getTenantId() != null) { - delSysCat.setString(++param, key.getTenantId()); - } - if (key.getSchemaName() != null) { - delSysCat.setString(++param, key.getSchemaName()); - } - delSysCat.setString(++param, key.getTableName()); - delSysCat.execute(); - } - try (PreparedStatement delChLink = - phoenixConnection.prepareStatement(deleteRowsFromChildLink)) { - int param = 0; - if (key.getTenantId() != null) { - delChLink.setString(++param, key.getTenantId()); - } - delChLink.setString(++param, key.getSchemaName() == null - ? key.getTableName() : (key.getSchemaName() + "." + key.getTableName())); - delChLink.execute(); - } - phoenixConnection.commit(); - } catch (SQLException e) { - throw new IOException(e); - } + } + + private void gracefullyDropView(PhoenixConnection phoenixConnection, Configuration configuration, + Key key) throws Exception { + PhoenixConnection tenantConnection = null; + boolean newConn = false; + try { + if (key.getTenantId() != null) { + Properties tenantProps = new Properties(); + tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, key.getTenantId()); + tenantConnection = ConnectionUtil.getInputConnection(configuration, tenantProps) + .unwrap(PhoenixConnection.class); + newConn = true; + } else { + tenantConnection = phoenixConnection; + } + + MetaDataClient client = new MetaDataClient(tenantConnection); + org.apache.phoenix.parse.TableName pTableName = + org.apache.phoenix.parse.TableName.create(key.getSchemaName(), key.getTableName()); + try { + client.dropTable(new DropTableStatement(pTableName, PTableType.VIEW, false, true, true)); + } catch (TableNotFoundException e) { + LOGGER.info("Ignoring view " + pTableName + " as it has already been dropped"); + } + } finally { + if (newConn) { + // TODO can this be rewritten with try-with-resources ? + tryClosingConnection(tenantConnection); + } + } + } + + private void removeLink(PhoenixConnection phoenixConnection, Key src, Key dst, + PTable.LinkType linkType) throws Exception { + String delTable = + (linkType == PTable.LinkType.PHYSICAL_TABLE || linkType == PTable.LinkType.PARENT_TABLE) + ? SYSTEM_CATALOG_NAME + : SYSTEM_CHILD_LINK_NAME; + + String deleteQuery = String.format( + " DELETE FROM %s WHERE " + TENANT_ID + " %s AND " + TABLE_SCHEM + " %s AND " + TABLE_NAME + + " = ? AND " + COLUMN_NAME + " %s AND " + COLUMN_FAMILY + " = ? ", + delTable, src.getTenantId() == null ? " IS NULL" : " = ? ", + src.getSchemaName() == null ? " IS NULL " : " = ? ", + dst.getTenantId() == null ? " IS NULL" : " = ?"); + + try (PreparedStatement delStmt = phoenixConnection.prepareStatement(deleteQuery)) { + int param = 0; + if (src.getTenantId() != null) { + delStmt.setString(++param, src.getTenantId()); + } + if (src.getSchemaName() != null) { + delStmt.setString(++param, src.getSchemaName()); + } + delStmt.setString(++param, src.getTableName()); + if (dst.getTenantId() != null) { + delStmt.setString(++param, dst.getTenantId()); + } + if (dst.getSchemaName() == null) { + delStmt.setString(++param, dst.getTableName()); + } else { + delStmt.setString(++param, dst.getSchemaName() + "." + dst.getTableName()); + } + delStmt.execute(); + phoenixConnection.commit(); + } + } + + private byte getLinkType(PTable.LinkType linkType) { + byte type; + if (linkType == PTable.LinkType.PHYSICAL_TABLE) { + type = PHYSICAL_TABLE_LINK; + } else if (linkType == PTable.LinkType.PARENT_TABLE) { + type = PARENT_TABLE_LINK; + } else if (linkType == PTable.LinkType.CHILD_TABLE) { + type = CHILD_TABLE_LINK; + } else { + throw new AssertionError("Unknown Link Type"); } + return type; + } + + private PTable.LinkType getLinkType(byte linkType) { + PTable.LinkType type; + if (linkType == PHYSICAL_TABLE_LINK) { + type = PTable.LinkType.PHYSICAL_TABLE; + } else if (linkType == PARENT_TABLE_LINK) { + type = PTable.LinkType.PARENT_TABLE; + } else if (linkType == CHILD_TABLE_LINK) { + type = PTable.LinkType.CHILD_TABLE; + } else { + throw new AssertionError("Unknown Link Type"); + } + return type; + } - private void dropOrLogOrphanViews(PhoenixConnection phoenixConnection, Configuration configuration, - Key key) throws Exception { + private void removeOrLogOrphanLinks(PhoenixConnection phoenixConnection) { + for (Link link : orphanLinkSet) { + try { + byte linkType = getLinkType(link.type); if (outputPath != null) { - writer[VIEW].write(key.getSerializedValue()); - writer[VIEW].newLine(); - } - else if (!clean) { - System.out.println(key.getSerializedValue()); - return; - } - if (!clean) { - return; - } - gracefullyDropView(phoenixConnection, configuration, key); - } - - /** - * Go through all the views in the system catalog table and add them to orphanViewSet - * @param phoenixConnection - * @throws Exception - */ - private void populateOrphanViewSet(PhoenixConnection phoenixConnection) - throws Exception { - ResultSet viewRS = phoenixConnection.createStatement().executeQuery(viewQuery); - while (viewRS.next()) { - String tenantId = viewRS.getString(1); - String schemaName = viewRS.getString(2); - String tableName = viewRS.getString(3); - Key key = new Key(tenantId, schemaName, tableName); - View view = new View(key); - orphanViewSet.put(key, view); - } + writer[linkType] + .write(link.src.getSerializedValue() + "-->" + link.dst.getSerializedValue()); + writer[linkType].newLine(); + } else if (!clean) { + System.out.println(link.src.getSerializedValue() + "-(" + link.type + ")->" + + link.dst.getSerializedValue()); + } + if (clean) { + removeLink(phoenixConnection, link.src, link.dst, link.type); + } + } catch (Exception e) { + // ignore + } } - - /** - * Go through all the tables in the system catalog table and update baseSet - * @param phoenixConnection - * @throws Exception - */ - private void populateBaseSet(PhoenixConnection phoenixConnection) - throws Exception { - ResultSet baseTableRS = phoenixConnection.createStatement().executeQuery(candidateBaseTableQuery); - while (baseTableRS.next()) { - String tenantId = baseTableRS.getString(1); - String schemaName = baseTableRS.getString(2); - String tableName = baseTableRS.getString(3); - Key key = new Key(tenantId, schemaName, tableName); - Base base = new Base(key); - baseSet.put(key, base); - } + } + + private void forcefullyDropView(PhoenixConnection phoenixConnection, Key key) throws Exception { + String deleteRowsFromCatalog = String.format( + "DELETE FROM " + SYSTEM_CATALOG_NAME + " WHERE " + TENANT_ID + " %s AND " + TABLE_SCHEM + + " %s AND " + TABLE_NAME + " = ? ", + key.getTenantId() == null ? " IS NULL" : " = ? ", + key.getSchemaName() == null ? " IS NULL " : " = ? "); + String deleteRowsFromChildLink = + String.format("DELETE FROM " + SYSTEM_CHILD_LINK_NAME + " WHERE " + COLUMN_NAME + " %s AND " + + COLUMN_FAMILY + " = ? ", key.getTenantId() == null ? " IS NULL" : " = ? "); + try { + try ( + PreparedStatement delSysCat = phoenixConnection.prepareStatement(deleteRowsFromCatalog)) { + int param = 0; + if (key.getTenantId() != null) { + delSysCat.setString(++param, key.getTenantId()); + } + if (key.getSchemaName() != null) { + delSysCat.setString(++param, key.getSchemaName()); + } + delSysCat.setString(++param, key.getTableName()); + delSysCat.execute(); + } + try ( + PreparedStatement delChLink = phoenixConnection.prepareStatement(deleteRowsFromChildLink)) { + int param = 0; + if (key.getTenantId() != null) { + delChLink.setString(++param, key.getTenantId()); + } + delChLink.setString(++param, + key.getSchemaName() == null + ? key.getTableName() + : (key.getSchemaName() + "." + key.getTableName())); + delChLink.execute(); + } + phoenixConnection.commit(); + } catch (SQLException e) { + throw new IOException(e); } - - /** - * Go through all the physical links in the system catalog table and update the base table info of the - * view objects in orphanViewSet. If the base or view object does not exist for a given link, then add the link - * to orphanLinkSet - * @param phoenixConnection - * @throws Exception - */ - private void processPhysicalLinks(PhoenixConnection phoenixConnection) - throws Exception { - ResultSet physicalLinkRS = phoenixConnection.createStatement().executeQuery(physicalLinkQuery); - while (physicalLinkRS.next()) { - String tenantId = physicalLinkRS.getString(1); - String schemaName = physicalLinkRS.getString(2); - String tableName = physicalLinkRS.getString(3); - Key viewKey = new Key(tenantId, schemaName, tableName); - View view = orphanViewSet.get(viewKey); - - String baseTenantId = physicalLinkRS.getString(4); - String baseFullTableName = physicalLinkRS.getString(5); - Key baseKey = new Key(baseTenantId, baseFullTableName); - Base base = baseSet.get(baseKey); - - if (view == null || base == null) { - orphanLinkSet.add(new Link(viewKey, baseKey, PTable.LinkType.PHYSICAL_TABLE)); - } - else { - view.setBase(baseKey); - } - } + } + + private void dropOrLogOrphanViews(PhoenixConnection phoenixConnection, + Configuration configuration, Key key) throws Exception { + if (outputPath != null) { + writer[VIEW].write(key.getSerializedValue()); + writer[VIEW].newLine(); + } else if (!clean) { + System.out.println(key.getSerializedValue()); + return; } - - /** - * Go through all the child-parent links and update the parent field of the view objects of orphanViewSet. - * Check if the child does not exist add the link to orphanLinkSet. - * @param phoenixConnection - * @throws Exception - */ - private void processChildParentLinks(PhoenixConnection phoenixConnection) - throws Exception { - ResultSet childParentLinkRS = phoenixConnection.createStatement().executeQuery(childParentLinkQuery); - while (childParentLinkRS.next()) { - String childTenantId = childParentLinkRS.getString(1); - String childSchemaName = childParentLinkRS.getString(2); - String childTableName = childParentLinkRS.getString(3); - Key childKey = new Key(childTenantId, childSchemaName, childTableName); - View childView = orphanViewSet.get(childKey); - - String parentTenantId = childParentLinkRS.getString(4); - String parentFullTableName = childParentLinkRS.getString(5); - Key parentKey = new Key(parentTenantId, parentFullTableName); - View parentView = orphanViewSet.get(parentKey); - - // Check if parentTenantId is not set but it should have been the same as the childTenantId. Is this a bug? - if (childView != null && parentView == null && parentTenantId == null && childTenantId != null) { - Key anotherParentKey = new Key(childTenantId, parentFullTableName); - parentView = orphanViewSet.get(anotherParentKey); - if (parentView != null) { - parentKey = anotherParentKey; - } - } - - if (childView == null || parentView == null) { - orphanLinkSet.add(new Link(childKey, parentKey, PTable.LinkType.PARENT_TABLE)); - } - else { - childView.setParent(parentKey); - } - } + if (!clean) { + return; } - - /** - * Go through all the parent-child links and update the parent field of the - * child view objects of orphanViewSet and the child links of the parent objects (which can be a view from - * orphanViewSet or a base table from baseSet. Check if the child or parent does not exist, and if so, add the link - * to orphanLinkSet. - * @param phoenixConnection - * @throws Exception - */ - private void processParentChildLinks(PhoenixConnection phoenixConnection) - throws Exception { - ResultSet parentChildLinkRS = phoenixConnection.createStatement().executeQuery(parentChildLinkQuery); - while (parentChildLinkRS.next()) { - String tenantId = parentChildLinkRS.getString(1); - String schemaName = parentChildLinkRS.getString(2); - String tableName = parentChildLinkRS.getString(3); - Key parentKey = new Key(tenantId, schemaName, tableName); - Base base = baseSet.get(parentKey); - View parentView = orphanViewSet.get(parentKey); - - String childTenantId = parentChildLinkRS.getString(4); - String childFullTableName = parentChildLinkRS.getString(5); - Key childKey = new Key(childTenantId, childFullTableName); - View childView = orphanViewSet.get(childKey); - - if (childView == null) { - // No child for this link - orphanLinkSet.add(new Link(parentKey, childKey, PTable.LinkType.CHILD_TABLE)); - } - else if (base != null) { - base.addChild(childKey); - } - else if (parentView != null) { - parentView.addChild(childKey); - } - else { - // No parent for this link - orphanLinkSet.add(new Link(parentKey, childKey, PTable.LinkType.CHILD_TABLE)); - } - } + gracefullyDropView(phoenixConnection, configuration, key); + } + + /** + * Go through all the views in the system catalog table and add them to orphanViewSet + */ + private void populateOrphanViewSet(PhoenixConnection phoenixConnection) throws Exception { + ResultSet viewRS = phoenixConnection.createStatement().executeQuery(viewQuery); + while (viewRS.next()) { + String tenantId = viewRS.getString(1); + String schemaName = viewRS.getString(2); + String tableName = viewRS.getString(3); + Key key = new Key(tenantId, schemaName, tableName); + View view = new View(key); + orphanViewSet.put(key, view); } - - private void removeBaseTablesWithNoChildViewFromBaseSet() { - Iterator> iterator = baseSet.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - if (entry.getValue().childViews == null || entry.getValue().childViews.isEmpty()) { - iterator.remove(); - } - } + } + + /** + * Go through all the tables in the system catalog table and update baseSet + */ + private void populateBaseSet(PhoenixConnection phoenixConnection) throws Exception { + ResultSet baseTableRS = + phoenixConnection.createStatement().executeQuery(candidateBaseTableQuery); + while (baseTableRS.next()) { + String tenantId = baseTableRS.getString(1); + String schemaName = baseTableRS.getString(2); + String tableName = baseTableRS.getString(3); + Key key = new Key(tenantId, schemaName, tableName); + Base base = new Base(key); + baseSet.put(key, base); } - - /** - * Starting from the child views of the base tables from baseSet, visit views level by level and identify - * missing or broken links, and thereby identify orphan vies - */ - private void visitViewsLevelByLevelAndIdentifyOrphanViews() { - if (baseSet.isEmpty()) - return; - HashMap viewSet = new HashMap<>(); - viewSetArray.add(0, viewSet); - // Remove the child views of the tables of baseSet from orphanViewSet and add them to viewSetArray[0] - // if these views have the correct physical link - for (Map.Entry baseEntry : baseSet.entrySet()) { - for (Key child : baseEntry.getValue().childViews) { - View childView = orphanViewSet.get(child); - if (childView != null && - childView.base != null && childView.base.equals(baseEntry.getKey())) { - orphanViewSet.remove(child); - viewSet.put(child, childView); - } - } - } - HashMap parentViewSet = viewSet; - // Remove the child views of viewSetArray[N] from orphanViewSet and add them to viewSetArray[N+1] - // if these view have the correct physical link and parent link - maxViewLevel = 1; - for (int i = 1; !parentViewSet.isEmpty(); i++) { - HashMap childViewSet = new HashMap<>(); - viewSetArray.add(i, childViewSet); - for (Map.Entry viewEntry : parentViewSet.entrySet()) { - View parentView = viewEntry.getValue(); - Key parentKey = viewEntry.getKey(); - if (parentView.isParent()) { - for (Key child : parentView.childViews) { - View childView = orphanViewSet.get(child); - if (childView != null && - childView.parent != null && childView.parent.equals(parentKey) && - childView.base != null && childView.base.equals(parentView.base)) { - orphanViewSet.remove(child); - childViewSet.put(child, childView); - } - } - } - } - parentViewSet = childViewSet; - maxViewLevel += 1; - } + } + + /** + * Go through all the physical links in the system catalog table and update the base table info of + * the view objects in orphanViewSet. If the base or view object does not exist for a given link, + * then add the link to orphanLinkSet + */ + private void processPhysicalLinks(PhoenixConnection phoenixConnection) throws Exception { + ResultSet physicalLinkRS = phoenixConnection.createStatement().executeQuery(physicalLinkQuery); + while (physicalLinkRS.next()) { + String tenantId = physicalLinkRS.getString(1); + String schemaName = physicalLinkRS.getString(2); + String tableName = physicalLinkRS.getString(3); + Key viewKey = new Key(tenantId, schemaName, tableName); + View view = orphanViewSet.get(viewKey); + + String baseTenantId = physicalLinkRS.getString(4); + String baseFullTableName = physicalLinkRS.getString(5); + Key baseKey = new Key(baseTenantId, baseFullTableName); + Base base = baseSet.get(baseKey); + + if (view == null || base == null) { + orphanLinkSet.add(new Link(viewKey, baseKey, PTable.LinkType.PHYSICAL_TABLE)); + } else { + view.setBase(baseKey); + } } - - private void identifyOrphanViews(PhoenixConnection phoenixConnection) - throws Exception { - if (inputPath != null) { - readOrphanViews(); - return; - } - // Go through the views and add them to orphanViewSet - populateOrphanViewSet(phoenixConnection); - // Go through the candidate base tables and add them to baseSet - populateBaseSet(phoenixConnection); - // Go through physical links and update the views of orphanLinkSet - processPhysicalLinks(phoenixConnection); - // Go through the parent-child links and update the views of orphanViewSet and the tables of baseSet - processParentChildLinks(phoenixConnection); - // Go through index-view links and update the views of orphanLinkSet - processChildParentLinks(phoenixConnection); - - if (baseSet == null) - return; - // Remove the base tables with no child from baseSet - removeBaseTablesWithNoChildViewFromBaseSet(); - // Starting from the child views of the base tables, visit views level by level and identify - // missing or broken links and thereby identify orphan vies - visitViewsLevelByLevelAndIdentifyOrphanViews(); - } - - private void createSnapshot(PhoenixConnection phoenixConnection, long scn) - throws Exception { - try (Admin admin = phoenixConnection.getQueryServices().getAdmin()) { - admin.snapshot("OrphanViewTool." + scn, TableName - .valueOf(SYSTEM_CATALOG_NAME)); - admin.snapshot("OrphanViewTool." + (scn + 1), TableName - .valueOf(SYSTEM_CHILD_LINK_NAME)); - } + } + + /** + * Go through all the child-parent links and update the parent field of the view objects of + * orphanViewSet. Check if the child does not exist add the link to orphanLinkSet. + */ + private void processChildParentLinks(PhoenixConnection phoenixConnection) throws Exception { + ResultSet childParentLinkRS = + phoenixConnection.createStatement().executeQuery(childParentLinkQuery); + while (childParentLinkRS.next()) { + String childTenantId = childParentLinkRS.getString(1); + String childSchemaName = childParentLinkRS.getString(2); + String childTableName = childParentLinkRS.getString(3); + Key childKey = new Key(childTenantId, childSchemaName, childTableName); + View childView = orphanViewSet.get(childKey); + + String parentTenantId = childParentLinkRS.getString(4); + String parentFullTableName = childParentLinkRS.getString(5); + Key parentKey = new Key(parentTenantId, parentFullTableName); + View parentView = orphanViewSet.get(parentKey); + + // Check if parentTenantId is not set but it should have been the same as the childTenantId. + // Is this a bug? + if ( + childView != null && parentView == null && parentTenantId == null && childTenantId != null + ) { + Key anotherParentKey = new Key(childTenantId, parentFullTableName); + parentView = orphanViewSet.get(anotherParentKey); + if (parentView != null) { + parentKey = anotherParentKey; + } + } + + if (childView == null || parentView == null) { + orphanLinkSet.add(new Link(childKey, parentKey, PTable.LinkType.PARENT_TABLE)); + } else { + childView.setParent(parentKey); + } } - - private void readOrphanViews() throws Exception { - String aLine; - reader[VIEW] = new BufferedReader(new InputStreamReader( - new FileInputStream(Paths.get(inputPath, fileName[VIEW]).toFile()), StandardCharsets.UTF_8)); - while ((aLine = reader[VIEW].readLine()) != null) { - Key key = new Key(aLine); - orphanViewSet.put(key, new View(key)); - } + } + + /** + * Go through all the parent-child links and update the parent field of the child view objects of + * orphanViewSet and the child links of the parent objects (which can be a view from orphanViewSet + * or a base table from baseSet. Check if the child or parent does not exist, and if so, add the + * link to orphanLinkSet. + */ + private void processParentChildLinks(PhoenixConnection phoenixConnection) throws Exception { + ResultSet parentChildLinkRS = + phoenixConnection.createStatement().executeQuery(parentChildLinkQuery); + while (parentChildLinkRS.next()) { + String tenantId = parentChildLinkRS.getString(1); + String schemaName = parentChildLinkRS.getString(2); + String tableName = parentChildLinkRS.getString(3); + Key parentKey = new Key(tenantId, schemaName, tableName); + Base base = baseSet.get(parentKey); + View parentView = orphanViewSet.get(parentKey); + + String childTenantId = parentChildLinkRS.getString(4); + String childFullTableName = parentChildLinkRS.getString(5); + Key childKey = new Key(childTenantId, childFullTableName); + View childView = orphanViewSet.get(childKey); + + if (childView == null) { + // No child for this link + orphanLinkSet.add(new Link(parentKey, childKey, PTable.LinkType.CHILD_TABLE)); + } else if (base != null) { + base.addChild(childKey); + } else if (parentView != null) { + parentView.addChild(childKey); + } else { + // No parent for this link + orphanLinkSet.add(new Link(parentKey, childKey, PTable.LinkType.CHILD_TABLE)); + } } - - private void readAndRemoveOrphanLinks(PhoenixConnection phoenixConnection) throws Exception{ - String aLine; - for (byte i = VIEW+1; i < ORPHAN_TYPE_COUNT; i++) { - reader[i] = new BufferedReader(new InputStreamReader( - new FileInputStream(Paths.get(inputPath, fileName[i]).toFile()), StandardCharsets.UTF_8)); - while ((aLine = reader[i].readLine()) != null) { - String ends[] = aLine.split("-->"); - removeLink(phoenixConnection, new Key(ends[0]), new Key(ends[1]), getLinkType(i)); - } - } + } + + private void removeBaseTablesWithNoChildViewFromBaseSet() { + Iterator> iterator = baseSet.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + if (entry.getValue().childViews == null || entry.getValue().childViews.isEmpty()) { + iterator.remove(); + } } - - private void closeConnectionAndFiles(Connection connection) throws IOException { - tryClosingConnection(connection); - for (byte i = VIEW; i < ORPHAN_TYPE_COUNT; i++) { - if (writer[i] != null) { - writer[i].close(); - } - if (reader[i] != null) { - reader[i].close(); - } - } + } + + /** + * Starting from the child views of the base tables from baseSet, visit views level by level and + * identify missing or broken links, and thereby identify orphan vies + */ + private void visitViewsLevelByLevelAndIdentifyOrphanViews() { + if (baseSet.isEmpty()) return; + HashMap viewSet = new HashMap<>(); + viewSetArray.add(0, viewSet); + // Remove the child views of the tables of baseSet from orphanViewSet and add them to + // viewSetArray[0] + // if these views have the correct physical link + for (Map.Entry baseEntry : baseSet.entrySet()) { + for (Key child : baseEntry.getValue().childViews) { + View childView = orphanViewSet.get(child); + if ( + childView != null && childView.base != null && childView.base.equals(baseEntry.getKey()) + ) { + orphanViewSet.remove(child); + viewSet.put(child, childView); + } + } } - - /** - * Try closing a connection if it is not null - * @param connection connection object - * @throws RuntimeException if closing the connection fails - */ - private void tryClosingConnection(Connection connection) { - try { - if (connection != null) { - connection.close(); - } - } catch (SQLException sqlE) { - LOGGER.error("Failed to close connection: ", sqlE); - throw new RuntimeException("Failed to close connection with exception: ", sqlE); - } + HashMap parentViewSet = viewSet; + // Remove the child views of viewSetArray[N] from orphanViewSet and add them to + // viewSetArray[N+1] + // if these view have the correct physical link and parent link + maxViewLevel = 1; + for (int i = 1; !parentViewSet.isEmpty(); i++) { + HashMap childViewSet = new HashMap<>(); + viewSetArray.add(i, childViewSet); + for (Map.Entry viewEntry : parentViewSet.entrySet()) { + View parentView = viewEntry.getValue(); + Key parentKey = viewEntry.getKey(); + if (parentView.isParent()) { + for (Key child : parentView.childViews) { + View childView = orphanViewSet.get(child); + if ( + childView != null && childView.parent != null && childView.parent.equals(parentKey) + && childView.base != null && childView.base.equals(parentView.base) + ) { + orphanViewSet.remove(child); + childViewSet.put(child, childView); + } + } + } + } + parentViewSet = childViewSet; + maxViewLevel += 1; } + } - /** - * Examples for input arguments: - * -c : cleans orphan views - * -c -op /tmp/ : cleans orphan views and links, and logs their names to the files named Orphan*.txt in /tmp/ - * -i : identifies orphan views and links, and prints their names on the console - * -i -op /tmp/ : identifies orphan views and links, and logs the name of their names to files named Orphan*.txt in /tmp/ - * -c -ip /tmp/ : cleans the views listed in files at /tmp/ - */ - @Override - public int run(String[] args) throws Exception { - Connection connection = null; + private void identifyOrphanViews(PhoenixConnection phoenixConnection) throws Exception { + if (inputPath != null) { + readOrphanViews(); + return; + } + // Go through the views and add them to orphanViewSet + populateOrphanViewSet(phoenixConnection); + // Go through the candidate base tables and add them to baseSet + populateBaseSet(phoenixConnection); + // Go through physical links and update the views of orphanLinkSet + processPhysicalLinks(phoenixConnection); + // Go through the parent-child links and update the views of orphanViewSet and the tables of + // baseSet + processParentChildLinks(phoenixConnection); + // Go through index-view links and update the views of orphanLinkSet + processChildParentLinks(phoenixConnection); + + if (baseSet == null) return; + // Remove the base tables with no child from baseSet + removeBaseTablesWithNoChildViewFromBaseSet(); + // Starting from the child views of the base tables, visit views level by level and identify + // missing or broken links and thereby identify orphan vies + visitViewsLevelByLevelAndIdentifyOrphanViews(); + } + + private void createSnapshot(PhoenixConnection phoenixConnection, long scn) throws Exception { + try (Admin admin = phoenixConnection.getQueryServices().getAdmin()) { + admin.snapshot("OrphanViewTool." + scn, TableName.valueOf(SYSTEM_CATALOG_NAME)); + admin.snapshot("OrphanViewTool." + (scn + 1), TableName.valueOf(SYSTEM_CHILD_LINK_NAME)); + } + } + + private void readOrphanViews() throws Exception { + String aLine; + reader[VIEW] = new BufferedReader(new InputStreamReader( + new FileInputStream(Paths.get(inputPath, fileName[VIEW]).toFile()), StandardCharsets.UTF_8)); + while ((aLine = reader[VIEW].readLine()) != null) { + Key key = new Key(aLine); + orphanViewSet.put(key, new View(key)); + } + } + + private void readAndRemoveOrphanLinks(PhoenixConnection phoenixConnection) throws Exception { + String aLine; + for (byte i = VIEW + 1; i < ORPHAN_TYPE_COUNT; i++) { + reader[i] = new BufferedReader(new InputStreamReader( + new FileInputStream(Paths.get(inputPath, fileName[i]).toFile()), StandardCharsets.UTF_8)); + while ((aLine = reader[i].readLine()) != null) { + String ends[] = aLine.split("-->"); + removeLink(phoenixConnection, new Key(ends[0]), new Key(ends[1]), getLinkType(i)); + } + } + } + + private void closeConnectionAndFiles(Connection connection) throws IOException { + tryClosingConnection(connection); + for (byte i = VIEW; i < ORPHAN_TYPE_COUNT; i++) { + if (writer[i] != null) { + writer[i].close(); + } + if (reader[i] != null) { + reader[i].close(); + } + } + } + + /** + * Try closing a connection if it is not null + * @param connection connection object + * @throws RuntimeException if closing the connection fails + */ + private void tryClosingConnection(Connection connection) { + try { + if (connection != null) { + connection.close(); + } + } catch (SQLException sqlE) { + LOGGER.error("Failed to close connection: ", sqlE); + throw new RuntimeException("Failed to close connection with exception: ", sqlE); + } + } + + /** + * Examples for input arguments: -c : cleans orphan views -c -op /tmp/ : cleans orphan views and + * links, and logs their names to the files named Orphan*.txt in /tmp/ -i : identifies orphan + * views and links, and prints their names on the console -i -op /tmp/ : identifies orphan views + * and links, and logs the name of their names to files named Orphan*.txt in /tmp/ -c -ip /tmp/ : + * cleans the views listed in files at /tmp/ + */ + @Override + public int run(String[] args) throws Exception { + Connection connection = null; + try { + final Configuration configuration = HBaseConfiguration.addHbaseResources(getConf()); + + try { + parseOptions(args); + } catch (IllegalStateException e) { + printHelpAndExit(e.getMessage(), getOptions()); + } + if (outputPath != null) { + // Create files to log orphan views and links + for (int i = VIEW; i < ORPHAN_TYPE_COUNT; i++) { + File file = Paths.get(outputPath, fileName[i]).toFile(); + if (file.exists()) { + file.delete(); + } + file.createNewFile(); + writer[i] = new BufferedWriter( + new OutputStreamWriter(new FileOutputStream(file), StandardCharsets.UTF_8)); + } + } + Properties props = new Properties(); + long scn = EnvironmentEdgeManager.currentTimeMillis() - ageMs; + props.setProperty("CurrentSCN", Long.toString(scn)); + connection = ConnectionUtil.getInputConnection(configuration, props); + PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); + identifyOrphanViews(phoenixConnection); + if (clean) { + // Close the connection with SCN + phoenixConnection.close(); + connection = ConnectionUtil.getInputConnection(configuration); + phoenixConnection = connection.unwrap(PhoenixConnection.class); + // Take a snapshot of system tables to be modified + createSnapshot(phoenixConnection, scn); + } + for (Map.Entry entry : orphanViewSet.entrySet()) { try { - final Configuration configuration = HBaseConfiguration.addHbaseResources(getConf()); - - try { - parseOptions(args); - } catch (IllegalStateException e) { - printHelpAndExit(e.getMessage(), getOptions()); - } - if (outputPath != null) { - // Create files to log orphan views and links - for (int i = VIEW; i < ORPHAN_TYPE_COUNT; i++) { - File file = Paths.get(outputPath, fileName[i]).toFile(); - if (file.exists()) { - file.delete(); - } - file.createNewFile(); - writer[i] = new BufferedWriter(new OutputStreamWriter( - new FileOutputStream(file), StandardCharsets.UTF_8)); - } - } - Properties props = new Properties(); - long scn = EnvironmentEdgeManager.currentTimeMillis() - ageMs; - props.setProperty("CurrentSCN", Long.toString(scn)); - connection = ConnectionUtil.getInputConnection(configuration, props); - PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); - identifyOrphanViews(phoenixConnection); - if (clean) { - // Close the connection with SCN - phoenixConnection.close(); - connection = ConnectionUtil.getInputConnection(configuration); - phoenixConnection = connection.unwrap(PhoenixConnection.class); - // Take a snapshot of system tables to be modified - createSnapshot(phoenixConnection, scn); - } - for (Map.Entry entry : orphanViewSet.entrySet()) { - try { - dropOrLogOrphanViews(phoenixConnection, configuration, entry.getKey()); - } catch (Exception e) { - // Ignore - } - }; - if (clean) { - // Wait for the view drop tasks in the SYSTEM.TASK table to be processed - long timeInterval = configuration.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB, - QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS); - Thread.sleep(maxViewLevel * timeInterval); - // Clean up any remaining orphan view records from system tables - for (Map.Entry entry : orphanViewSet.entrySet()) { - try { - forcefullyDropView(phoenixConnection, entry.getKey()); - } catch (Exception e) { - // Ignore - } - }; - } - if (inputPath == null) { - removeOrLogOrphanLinks(phoenixConnection); - } - else { - readAndRemoveOrphanLinks(phoenixConnection); - } - return 0; - } catch (Exception ex) { - LOGGER.error("Orphan View Tool : An exception occurred " - + ExceptionUtils.getMessage(ex) + " at:\n" + - ExceptionUtils.getStackTrace(ex)); - return -1; - } finally { - // TODO use try-with-resources at least for the Connection ? - closeConnectionAndFiles(connection); - } + dropOrLogOrphanViews(phoenixConnection, configuration, entry.getKey()); + } catch (Exception e) { + // Ignore + } + } + ; + if (clean) { + // Wait for the view drop tasks in the SYSTEM.TASK table to be processed + long timeInterval = configuration.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB, + QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS); + Thread.sleep(maxViewLevel * timeInterval); + // Clean up any remaining orphan view records from system tables + for (Map.Entry entry : orphanViewSet.entrySet()) { + try { + forcefullyDropView(phoenixConnection, entry.getKey()); + } catch (Exception e) { + // Ignore + } + } + ; + } + if (inputPath == null) { + removeOrLogOrphanLinks(phoenixConnection); + } else { + readAndRemoveOrphanLinks(phoenixConnection); + } + return 0; + } catch (Exception ex) { + LOGGER.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + + " at:\n" + ExceptionUtils.getStackTrace(ex)); + return -1; + } finally { + // TODO use try-with-resources at least for the Connection ? + closeConnectionAndFiles(connection); } + } - public static void main(final String[] args) throws Exception { - int result = ToolRunner.run(new OrphanViewTool(), args); - System.exit(result); - } + public static void main(final String[] args) throws Exception { + int result = ToolRunner.run(new OrphanViewTool(), args); + System.exit(result); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java index f0e8e4a9d12..66f692905db 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,15 @@ */ package org.apache.phoenix.mapreduce; -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.ThreadLocalRandom; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; @@ -35,6 +41,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.db.DBWritable; import org.apache.phoenix.compile.QueryPlan; +import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.iterate.MapReduceParallelScanGrouper; import org.apache.phoenix.iterate.ParallelScanGrouper; import org.apache.phoenix.jdbc.PhoenixStatement; @@ -44,250 +51,241 @@ import org.apache.phoenix.query.HBaseFactoryProvider; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.stats.StatisticsUtil; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.PhoenixRuntime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Collections; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.ThreadLocalRandom; - /** * {@link InputFormat} implementation from Phoenix. - * */ -public class PhoenixInputFormat extends InputFormat { - - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixInputFormat.class); - - /** - * instantiated by framework - */ - public PhoenixInputFormat() { - } +public class PhoenixInputFormat extends InputFormat { - @Override - public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { - final Configuration configuration = context.getConfiguration(); - final QueryPlan queryPlan = getQueryPlan(context,configuration); - @SuppressWarnings("unchecked") - final Class inputClass = (Class) PhoenixConfigurationUtil.getInputClass(configuration); - return getPhoenixRecordReader(inputClass, configuration, queryPlan); - } + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixInputFormat.class); - @Override - public List getSplits(JobContext context) throws IOException, InterruptedException { - final Configuration configuration = context.getConfiguration(); - final QueryPlan queryPlan = getQueryPlan(context,configuration); - return generateSplits(queryPlan, configuration); - } + /** + * instantiated by framework + */ + public PhoenixInputFormat() { + } - /** - * Randomise the length parameter of the splits to ensure random execution order. - * Yarn orders splits by size before execution. - * - * @param splits - */ - protected void randomizeSplitLength(List splits) { - LOGGER.info("Randomizing split size"); - if (splits.size() == 0) { - return; - } - double defaultLength = 1000000d; - double totalLength = splits.stream().mapToDouble(s -> { - try { - return (double) s.getLength(); - } catch (IOException | InterruptedException e1) { - return defaultLength; - } - }).sum(); - long avgLength = (long) (totalLength / splits.size()); - splits.stream().forEach(s -> ((PhoenixInputSplit) s) - .setLength(avgLength + ThreadLocalRandom.current().nextInt(10000))); + @Override + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { + final Configuration configuration = context.getConfiguration(); + final QueryPlan queryPlan = getQueryPlan(context, configuration); + @SuppressWarnings("unchecked") + final Class inputClass = (Class) PhoenixConfigurationUtil.getInputClass(configuration); + return getPhoenixRecordReader(inputClass, configuration, queryPlan); + } + + @Override + public List getSplits(JobContext context) throws IOException, InterruptedException { + final Configuration configuration = context.getConfiguration(); + final QueryPlan queryPlan = getQueryPlan(context, configuration); + return generateSplits(queryPlan, configuration); + } + + /** + * Randomise the length parameter of the splits to ensure random execution order. Yarn orders + * splits by size before execution. + */ + protected void randomizeSplitLength(List splits) { + LOGGER.info("Randomizing split size"); + if (splits.size() == 0) { + return; } + double defaultLength = 1000000d; + double totalLength = splits.stream().mapToDouble(s -> { + try { + return (double) s.getLength(); + } catch (IOException | InterruptedException e1) { + return defaultLength; + } + }).sum(); + long avgLength = (long) (totalLength / splits.size()); + splits.stream().forEach(s -> ((PhoenixInputSplit) s) + .setLength(avgLength + ThreadLocalRandom.current().nextInt(10000))); + } + + protected List generateSplits(final QueryPlan qplan, Configuration config) + throws IOException { + // We must call this in order to initialize the scans and splits from the query plan + setupParallelScansFromQueryPlan(qplan); + final List splits = qplan.getSplits(); + Preconditions.checkNotNull(splits); + + // Get the RegionSizeCalculator + try (org.apache.hadoop.hbase.client.Connection connection = + HBaseFactoryProvider.getHConnectionFactory().createConnection(config)) { + RegionLocator regionLocator = connection.getRegionLocator( + TableName.valueOf(qplan.getTableRef().getTable().getPhysicalName().toString())); + RegionSizeCalculator sizeCalculator = + new RegionSizeCalculator(regionLocator, connection.getAdmin()); + + final List psplits = Lists.newArrayListWithExpectedSize(splits.size()); + for (List scans : qplan.getScans()) { + // Get the region location + HRegionLocation location = + regionLocator.getRegionLocation(scans.get(0).getStartRow(), false); + + String regionLocation = location.getHostname(); - protected List generateSplits(final QueryPlan qplan, Configuration config) - throws IOException { - // We must call this in order to initialize the scans and splits from the query plan - setupParallelScansFromQueryPlan(qplan); - final List splits = qplan.getSplits(); - Preconditions.checkNotNull(splits); - - // Get the RegionSizeCalculator - try (org.apache.hadoop.hbase.client.Connection connection = - HBaseFactoryProvider.getHConnectionFactory().createConnection(config)) { - RegionLocator regionLocator = - connection.getRegionLocator(TableName - .valueOf(qplan.getTableRef().getTable().getPhysicalName().toString())); - RegionSizeCalculator sizeCalculator = - new RegionSizeCalculator(regionLocator, connection.getAdmin()); - - final List psplits = Lists.newArrayListWithExpectedSize(splits.size()); - for (List scans : qplan.getScans()) { - // Get the region location - HRegionLocation location = - regionLocator.getRegionLocation(scans.get(0).getStartRow(), false); - - String regionLocation = location.getHostname(); - - // Get the region size - long regionSize = - sizeCalculator.getRegionSize(location.getRegion().getRegionName()); - - // Generate splits based off statistics, or just region splits? - boolean splitByStats = PhoenixConfigurationUtil.getSplitByStats(config); - - if (splitByStats) { - for (Scan aScan : scans) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Split for scan : " + aScan + "with scanAttribute : " - + aScan.getAttributesMap() - + " [scanCache, cacheBlock, scanBatch] : [" + aScan.getCaching() - + ", " + aScan.getCacheBlocks() + ", " + aScan.getBatch() - + "] and regionLocation : " + regionLocation); - } - - // The size is bogus, but it's not a problem - psplits.add(new PhoenixInputSplit(Collections.singletonList(aScan), - regionSize, regionLocation)); - } - } else { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Scan count[" + scans.size() + "] : " - + Bytes.toStringBinary(scans.get(0).getStartRow()) + " ~ " - + Bytes.toStringBinary(scans.get(scans.size() - 1).getStopRow())); - LOGGER.debug("First scan : " + scans.get(0) + "with scanAttribute : " - + scans.get(0).getAttributesMap() - + " [scanCache, cacheBlock, scanBatch] : " + "[" - + scans.get(0).getCaching() + ", " + scans.get(0).getCacheBlocks() - + ", " + scans.get(0).getBatch() + "] and regionLocation : " - + regionLocation); - - for (int i = 0, limit = scans.size(); i < limit; i++) { - LOGGER.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " - + Bytes.toStringBinary(scans.get(i).getAttribute( - BaseScannerRegionObserverConstants.EXPECTED_UPPER_REGION_KEY))); - } - } - - psplits.add(new PhoenixInputSplit(scans, regionSize, regionLocation)); - } + // Get the region size + long regionSize = sizeCalculator.getRegionSize(location.getRegion().getRegionName()); + + // Generate splits based off statistics, or just region splits? + boolean splitByStats = PhoenixConfigurationUtil.getSplitByStats(config); + + if (splitByStats) { + for (Scan aScan : scans) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Split for scan : " + aScan + "with scanAttribute : " + + aScan.getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : [" + + aScan.getCaching() + ", " + aScan.getCacheBlocks() + ", " + aScan.getBatch() + + "] and regionLocation : " + regionLocation); } - if (PhoenixConfigurationUtil.isMRRandomizeMapperExecutionOrder(config)) { - randomizeSplitLength(psplits); + // The size is bogus, but it's not a problem + psplits.add( + new PhoenixInputSplit(Collections.singletonList(aScan), regionSize, regionLocation)); + } + } else { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Scan count[" + scans.size() + "] : " + + Bytes.toStringBinary(scans.get(0).getStartRow()) + " ~ " + + Bytes.toStringBinary(scans.get(scans.size() - 1).getStopRow())); + LOGGER.debug("First scan : " + scans.get(0) + "with scanAttribute : " + + scans.get(0).getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : " + "[" + + scans.get(0).getCaching() + ", " + scans.get(0).getCacheBlocks() + ", " + + scans.get(0).getBatch() + "] and regionLocation : " + regionLocation); + + for (int i = 0, limit = scans.size(); i < limit; i++) { + LOGGER + .debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes.toStringBinary(scans.get(i) + .getAttribute(BaseScannerRegionObserverConstants.EXPECTED_UPPER_REGION_KEY))); } + } - return psplits; + psplits.add(new PhoenixInputSplit(scans, regionSize, regionLocation)); } + } + + if (PhoenixConfigurationUtil.isMRRandomizeMapperExecutionOrder(config)) { + randomizeSplitLength(psplits); + } + + return psplits; } + } - /** - * Returns the query plan associated with the select query. - * @param context - * @return - * @throws IOException - */ - protected QueryPlan getQueryPlan(final JobContext context, final Configuration configuration) - throws IOException { - Preconditions.checkNotNull(context); - try { - final String txnScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); - final String currentScnValue = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); - final String tenantId = configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID); - final Properties overridingProps = new Properties(); - if (txnScnValue == null && currentScnValue != null) { - overridingProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, currentScnValue); - } - if (tenantId != null && configuration.get(PhoenixRuntime.TENANT_ID_ATTRIB) == null){ - overridingProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - } - try (final Connection connection = ConnectionUtil.getInputConnection(configuration, overridingProps); - final Statement statement = connection.createStatement()) { - - MRJobType mrJobType = PhoenixConfigurationUtil.getMRJobType(configuration, MRJobType.QUERY.name()); - - String selectStatement; - switch (mrJobType) { - case UPDATE_STATS: - // This select statement indicates MR job for full table scan for stats collection - selectStatement = "SELECT * FROM " + PhoenixConfigurationUtil.getInputTableName(configuration); - break; - default: - selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); - } - Preconditions.checkNotNull(selectStatement); - - final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class); - // Optimize the query plan so that we potentially use secondary indexes - final QueryPlan queryPlan = pstmt.optimizeQuery(selectStatement); - final Scan scan = queryPlan.getContext().getScan(); - - if (mrJobType == MRJobType.UPDATE_STATS) { - StatisticsUtil.setScanAttributes(scan, null); - } - - // since we can't set a scn on connections with txn set TX_SCN attribute so that the max time range is set by BaseScannerRegionObserver - if (txnScnValue != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.TX_SCN, Bytes.toBytes(Long.parseLong(txnScnValue))); - } - - // setting the snapshot configuration - String snapshotName = configuration.get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); - String restoreDir = configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY); - boolean isSnapshotRestoreManagedExternally = PhoenixConfigurationUtil.isMRSnapshotManagedExternally(configuration); - Configuration config = queryPlan.getContext(). - getConnection().getQueryServices().getConfiguration(); - if (snapshotName != null) { - PhoenixConfigurationUtil.setSnapshotNameKey(config, snapshotName); - PhoenixConfigurationUtil.setRestoreDirKey(config, restoreDir); - PhoenixConfigurationUtil.setMRSnapshotManagedExternally(config, isSnapshotRestoreManagedExternally); - } else { - // making sure we unset snapshot name as new job doesn't need it - config.unset(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); - config.unset(PhoenixConfigurationUtil.RESTORE_DIR_KEY); - config.unset(PhoenixConfigurationUtil.MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE); - } - - return queryPlan; - } - } catch (Exception exception) { - LOGGER.error(String.format("Failed to get the query plan with error [%s]", - exception.getMessage())); - throw new RuntimeException(exception); + /** + * Returns the query plan associated with the select query. + */ + protected QueryPlan getQueryPlan(final JobContext context, final Configuration configuration) + throws IOException { + Preconditions.checkNotNull(context); + try { + final String txnScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); + final String currentScnValue = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); + final String tenantId = configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID); + final Properties overridingProps = new Properties(); + if (txnScnValue == null && currentScnValue != null) { + overridingProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, currentScnValue); + } + if (tenantId != null && configuration.get(PhoenixRuntime.TENANT_ID_ATTRIB) == null) { + overridingProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + } + try ( + final Connection connection = + ConnectionUtil.getInputConnection(configuration, overridingProps); + final Statement statement = connection.createStatement()) { + + MRJobType mrJobType = + PhoenixConfigurationUtil.getMRJobType(configuration, MRJobType.QUERY.name()); + + String selectStatement; + switch (mrJobType) { + case UPDATE_STATS: + // This select statement indicates MR job for full table scan for stats collection + selectStatement = + "SELECT * FROM " + PhoenixConfigurationUtil.getInputTableName(configuration); + break; + default: + selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); } - } + Preconditions.checkNotNull(selectStatement); - void setupParallelScansFromQueryPlan(QueryPlan queryPlan) { - setupParallelScansWithScanGrouper(queryPlan, MapReduceParallelScanGrouper.getInstance()); - } + final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class); + // Optimize the query plan so that we potentially use secondary indexes + final QueryPlan queryPlan = pstmt.optimizeQuery(selectStatement); + final Scan scan = queryPlan.getContext().getScan(); - RecordReader getPhoenixRecordReader(Class inputClass, - Configuration configuration, QueryPlan queryPlan) { - return new PhoenixRecordReader<>(inputClass , configuration, queryPlan, - MapReduceParallelScanGrouper.getInstance()); - } + if (mrJobType == MRJobType.UPDATE_STATS) { + StatisticsUtil.setScanAttributes(scan, null); + } - /** - * Initialize the query plan so it sets up the parallel scans - * @param queryPlan Query plan corresponding to the select query - * @param scanGrouper Parallel scan grouper - */ - void setupParallelScansWithScanGrouper(QueryPlan queryPlan, ParallelScanGrouper scanGrouper) { - Preconditions.checkNotNull(queryPlan); - try { - queryPlan.iterator(scanGrouper); - } catch (SQLException e) { - LOGGER.error(String.format("Setting up parallel scans for the query plan failed " - + "with error [%s]", e.getMessage())); - throw new RuntimeException(e); + // since we can't set a scn on connections with txn set TX_SCN attribute so that the max + // time range is set by BaseScannerRegionObserver + if (txnScnValue != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.TX_SCN, + Bytes.toBytes(Long.parseLong(txnScnValue))); } + + // setting the snapshot configuration + String snapshotName = configuration.get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); + String restoreDir = configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY); + boolean isSnapshotRestoreManagedExternally = + PhoenixConfigurationUtil.isMRSnapshotManagedExternally(configuration); + Configuration config = + queryPlan.getContext().getConnection().getQueryServices().getConfiguration(); + if (snapshotName != null) { + PhoenixConfigurationUtil.setSnapshotNameKey(config, snapshotName); + PhoenixConfigurationUtil.setRestoreDirKey(config, restoreDir); + PhoenixConfigurationUtil.setMRSnapshotManagedExternally(config, + isSnapshotRestoreManagedExternally); + } else { + // making sure we unset snapshot name as new job doesn't need it + config.unset(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); + config.unset(PhoenixConfigurationUtil.RESTORE_DIR_KEY); + config.unset(PhoenixConfigurationUtil.MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE); + } + + return queryPlan; + } + } catch (Exception exception) { + LOGGER.error( + String.format("Failed to get the query plan with error [%s]", exception.getMessage())); + throw new RuntimeException(exception); + } + } + + void setupParallelScansFromQueryPlan(QueryPlan queryPlan) { + setupParallelScansWithScanGrouper(queryPlan, MapReduceParallelScanGrouper.getInstance()); + } + + RecordReader getPhoenixRecordReader(Class inputClass, + Configuration configuration, QueryPlan queryPlan) { + return new PhoenixRecordReader<>(inputClass, configuration, queryPlan, + MapReduceParallelScanGrouper.getInstance()); + } + + /** + * Initialize the query plan so it sets up the parallel scans + * @param queryPlan Query plan corresponding to the select query + * @param scanGrouper Parallel scan grouper + */ + void setupParallelScansWithScanGrouper(QueryPlan queryPlan, ParallelScanGrouper scanGrouper) { + Preconditions.checkNotNull(queryPlan); + try { + queryPlan.iterator(scanGrouper); + } catch (SQLException e) { + LOGGER.error( + String.format("Setting up parallel scans for the query plan failed " + "with error [%s]", + e.getMessage())); + throw new RuntimeException(e); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java index a4dc1b789e8..38ea9d11df6 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,7 +29,6 @@ import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.phoenix.query.KeyRange; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; @@ -38,113 +37,123 @@ */ public class PhoenixInputSplit extends InputSplit implements Writable { - private List scans; - private KeyRange keyRange; - private String regionLocation = null; - private long splitSize = 0; - - /** - * No Arg constructor - */ - public PhoenixInputSplit() { - } - - /** - * - * @param scans - */ - public PhoenixInputSplit(final List scans) { - this(scans, 0, null); - } + private List scans; + private KeyRange keyRange; + private String regionLocation = null; + private long splitSize = 0; - public PhoenixInputSplit(final List scans, long splitSize, String regionLocation) { - Preconditions.checkNotNull(scans); - Preconditions.checkState(!scans.isEmpty()); - this.scans = scans; - this.splitSize = splitSize; - this.regionLocation = regionLocation; - init(); - } - - public List getScans() { - return scans; - } - - public KeyRange getKeyRange() { - return keyRange; - } - - private void init() { - this.keyRange = KeyRange.getKeyRange(scans.get(0).getStartRow(), scans.get(scans.size()-1).getStopRow()); - } - - @Override - public void readFields(DataInput input) throws IOException { - regionLocation = WritableUtils.readString(input); - splitSize = WritableUtils.readVLong(input); - int count = WritableUtils.readVInt(input); - scans = Lists.newArrayListWithExpectedSize(count); - for (int i = 0; i < count; i++) { - byte[] protoScanBytes = new byte[WritableUtils.readVInt(input)]; - input.readFully(protoScanBytes); - ClientProtos.Scan protoScan = ClientProtos.Scan.parseFrom(protoScanBytes); - Scan scan = ProtobufUtil.toScan(protoScan); - scans.add(scan); - } - init(); - } - - @Override - public void write(DataOutput output) throws IOException { - WritableUtils.writeString(output, regionLocation); - WritableUtils.writeVLong(output, splitSize); - - Preconditions.checkNotNull(scans); - WritableUtils.writeVInt(output, scans.size()); - for (Scan scan : scans) { - ClientProtos.Scan protoScan = ProtobufUtil.toScan(scan); - byte[] protoScanBytes = protoScan.toByteArray(); - WritableUtils.writeVInt(output, protoScanBytes.length); - output.write(protoScanBytes); - } - } + /** + * No Arg constructor + */ + public PhoenixInputSplit() { + } - @Override - public long getLength() throws IOException, InterruptedException { - return splitSize; - } + /** + * + */ + public PhoenixInputSplit(final List scans) { + this(scans, 0, null); + } + + public PhoenixInputSplit(final List scans, long splitSize, String regionLocation) { + Preconditions.checkNotNull(scans); + Preconditions.checkState(!scans.isEmpty()); + this.scans = scans; + this.splitSize = splitSize; + this.regionLocation = regionLocation; + init(); + } - @Override - public String[] getLocations() throws IOException, InterruptedException { - if (regionLocation == null) { - return new String[]{}; - } else { - return new String[]{regionLocation}; - } + public List getScans() { + return scans; + } + + public KeyRange getKeyRange() { + return keyRange; + } + + private void init() { + this.keyRange = + KeyRange.getKeyRange(scans.get(0).getStartRow(), scans.get(scans.size() - 1).getStopRow()); + } + + @Override + public void readFields(DataInput input) throws IOException { + regionLocation = WritableUtils.readString(input); + splitSize = WritableUtils.readVLong(input); + int count = WritableUtils.readVInt(input); + scans = Lists.newArrayListWithExpectedSize(count); + for (int i = 0; i < count; i++) { + byte[] protoScanBytes = new byte[WritableUtils.readVInt(input)]; + input.readFully(protoScanBytes); + ClientProtos.Scan protoScan = ClientProtos.Scan.parseFrom(protoScanBytes); + Scan scan = ProtobufUtil.toScan(protoScan); + scans.add(scan); } + init(); + } - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + keyRange.hashCode(); - return result; + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeString(output, regionLocation); + WritableUtils.writeVLong(output, splitSize); + + Preconditions.checkNotNull(scans); + WritableUtils.writeVInt(output, scans.size()); + for (Scan scan : scans) { + ClientProtos.Scan protoScan = ProtobufUtil.toScan(scan); + byte[] protoScanBytes = protoScan.toByteArray(); + WritableUtils.writeVInt(output, protoScanBytes.length); + output.write(protoScanBytes); } + } + + @Override + public long getLength() throws IOException, InterruptedException { + return splitSize; + } - @Override - public boolean equals(Object obj) { - if (this == obj) { return true; } - if (obj == null) { return false; } - if (!(obj instanceof PhoenixInputSplit)) { return false; } - PhoenixInputSplit other = (PhoenixInputSplit)obj; - if (keyRange == null) { - if (other.keyRange != null) { return false; } - } else if (!keyRange.equals(other.keyRange)) { return false; } - return true; + @Override + public String[] getLocations() throws IOException, InterruptedException { + if (regionLocation == null) { + return new String[] {}; + } else { + return new String[] { regionLocation }; } + } - public void setLength(long length) { - this.splitSize = length; + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + keyRange.hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof PhoenixInputSplit)) { + return false; } + PhoenixInputSplit other = (PhoenixInputSplit) obj; + if (keyRange == null) { + if (other.keyRange != null) { + return false; + } + } else if (!keyRange.equals(other.keyRange)) { + return false; + } + return true; + } + + public void setLength(long length) { + this.splitSize = length; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixJobCounters.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixJobCounters.java index 4a869d904da..bc9203fa4fa 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixJobCounters.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixJobCounters.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,11 @@ package org.apache.phoenix.mapreduce; /** - * Counters used during Map Reduce jobs - * + * Counters used during Map Reduce jobs */ public enum PhoenixJobCounters { - INPUT_RECORDS, - FAILED_RECORDS, - OUTPUT_RECORDS; + INPUT_RECORDS, + FAILED_RECORDS, + OUTPUT_RECORDS; } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputFormat.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputFormat.java index 9d7da2e2987..6d02926c4f8 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputFormat.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,10 @@ */ package org.apache.phoenix.mapreduce; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; @@ -25,19 +29,15 @@ import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.phoenix.mapreduce.util.DefaultMultiViewSplitStrategy; import org.apache.phoenix.mapreduce.util.DefaultPhoenixMultiViewListProvider; +import org.apache.phoenix.mapreduce.util.MultiViewSplitStrategy; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.mapreduce.util.PhoenixMultiViewListProvider; -import org.apache.phoenix.mapreduce.util.DefaultMultiViewSplitStrategy; import org.apache.phoenix.mapreduce.util.ViewInfoWritable; -import org.apache.phoenix.mapreduce.util.MultiViewSplitStrategy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - /* This is a generic MultiViewInputFormat class that using by the MR job. You can provide your own split strategy and provider class to customize your own business needed by @@ -45,69 +45,71 @@ overwrite and load class blow: MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ */ -public class PhoenixMultiViewInputFormat extends InputFormat { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMultiViewInputFormat.class); +public class PhoenixMultiViewInputFormat extends InputFormat { + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMultiViewInputFormat.class); - @Override public List getSplits(JobContext context) throws IOException { - List listOfInputSplit; - try { - final Configuration configuration = context.getConfiguration(); - Class defaultMultiInputStrategyClazz = DefaultPhoenixMultiViewListProvider.class; - if (configuration.get( - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ) != null) { - defaultMultiInputStrategyClazz = Class.forName(configuration.get( - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ)); - } - PhoenixMultiViewListProvider phoenixMultiViewListProvider = - (PhoenixMultiViewListProvider) defaultMultiInputStrategyClazz.newInstance(); - List views = - phoenixMultiViewListProvider.getPhoenixMultiViewList(configuration); + @Override + public List getSplits(JobContext context) throws IOException { + List listOfInputSplit; + try { + final Configuration configuration = context.getConfiguration(); + Class defaultMultiInputStrategyClazz = DefaultPhoenixMultiViewListProvider.class; + if ( + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ) != null + ) { + defaultMultiInputStrategyClazz = Class.forName( + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ)); + } + PhoenixMultiViewListProvider phoenixMultiViewListProvider = + (PhoenixMultiViewListProvider) defaultMultiInputStrategyClazz.newInstance(); + List views = + phoenixMultiViewListProvider.getPhoenixMultiViewList(configuration); - Class defaultDeletionMultiInputSplitStrategyClazz = - DefaultMultiViewSplitStrategy.class; - if (configuration.get( - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ) != null) { - defaultDeletionMultiInputSplitStrategyClazz = Class.forName(configuration.get( - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ)); - } - MultiViewSplitStrategy multiViewSplitStrategy = (MultiViewSplitStrategy) - defaultDeletionMultiInputSplitStrategyClazz.newInstance(); - listOfInputSplit = multiViewSplitStrategy.generateSplits(views, configuration); - } catch (ClassNotFoundException e) { - LOGGER.debug("PhoenixMultiViewInputFormat is getting ClassNotFoundException : " + - e.getMessage()); - throw new IOException( - "PhoenixMultiViewInputFormat is getting ClassNotFoundException : " + - e.getMessage(), e.getCause()); - } catch (InstantiationException e) { - LOGGER.debug("PhoenixMultiViewInputFormat is getting InstantiationException : " + - e.getMessage()); - throw new IOException( - "PhoenixMultiViewInputFormat is getting InstantiationException : " + - e.getMessage(), e.getCause()); - } catch (IllegalAccessException e) { - LOGGER.debug("PhoenixMultiViewInputFormat is getting IllegalAccessException : " + - e.getMessage()); - throw new IOException( - "PhoenixMultiViewInputFormat is getting IllegalAccessException : " + - e.getMessage(), e.getCause()); - } - - return listOfInputSplit == null ? new ArrayList() : listOfInputSplit; + Class defaultDeletionMultiInputSplitStrategyClazz = DefaultMultiViewSplitStrategy.class; + if ( + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ) + != null + ) { + defaultDeletionMultiInputSplitStrategyClazz = Class.forName( + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ)); + } + MultiViewSplitStrategy multiViewSplitStrategy = + (MultiViewSplitStrategy) defaultDeletionMultiInputSplitStrategyClazz.newInstance(); + listOfInputSplit = multiViewSplitStrategy.generateSplits(views, configuration); + } catch (ClassNotFoundException e) { + LOGGER + .debug("PhoenixMultiViewInputFormat is getting ClassNotFoundException : " + e.getMessage()); + throw new IOException( + "PhoenixMultiViewInputFormat is getting ClassNotFoundException : " + e.getMessage(), + e.getCause()); + } catch (InstantiationException e) { + LOGGER + .debug("PhoenixMultiViewInputFormat is getting InstantiationException : " + e.getMessage()); + throw new IOException( + "PhoenixMultiViewInputFormat is getting InstantiationException : " + e.getMessage(), + e.getCause()); + } catch (IllegalAccessException e) { + LOGGER + .debug("PhoenixMultiViewInputFormat is getting IllegalAccessException : " + e.getMessage()); + throw new IOException( + "PhoenixMultiViewInputFormat is getting IllegalAccessException : " + e.getMessage(), + e.getCause()); } - @Override - public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) { - final Configuration configuration = context.getConfiguration(); + return listOfInputSplit == null ? new ArrayList() : listOfInputSplit; + } - final Class inputClass = - (Class) PhoenixConfigurationUtil.getInputClass(configuration); - return getPhoenixRecordReader(inputClass, configuration); - } + @Override + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) { + final Configuration configuration = context.getConfiguration(); - private RecordReader getPhoenixRecordReader(Class inputClass, - Configuration configuration) { - return new PhoenixMultiViewReader<>(inputClass , configuration); - } -} \ No newline at end of file + final Class inputClass = (Class) PhoenixConfigurationUtil.getInputClass(configuration); + return getPhoenixRecordReader(inputClass, configuration); + } + + private RecordReader getPhoenixRecordReader(Class inputClass, + Configuration configuration) { + return new PhoenixMultiViewReader<>(inputClass, configuration); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputSplit.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputSplit.java index 37f42184422..03ede9f9463 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputSplit.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputSplit.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,61 +17,65 @@ */ package org.apache.phoenix.mapreduce; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.phoenix.mapreduce.util.ViewInfoTracker; -import org.apache.phoenix.mapreduce.util.ViewInfoWritable; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.phoenix.mapreduce.util.ViewInfoTracker; +import org.apache.phoenix.mapreduce.util.ViewInfoWritable; + /* Generic class that provide a list of views for the MR job. You can overwrite your own logic to filter/add views. */ public class PhoenixMultiViewInputSplit extends InputSplit implements Writable { - List viewInfoTrackerList; + List viewInfoTrackerList; - public PhoenixMultiViewInputSplit() { - this.viewInfoTrackerList = new ArrayList<>(); - } + public PhoenixMultiViewInputSplit() { + this.viewInfoTrackerList = new ArrayList<>(); + } - public PhoenixMultiViewInputSplit(List viewInfoTracker) { - this.viewInfoTrackerList = viewInfoTracker; - } + public PhoenixMultiViewInputSplit(List viewInfoTracker) { + this.viewInfoTrackerList = viewInfoTracker; + } - @Override public void write(DataOutput output) throws IOException { - WritableUtils.writeVInt(output, this.viewInfoTrackerList.size()); - for (ViewInfoWritable viewInfoWritable : this.viewInfoTrackerList) { - if (viewInfoWritable instanceof ViewInfoTracker) { - viewInfoWritable.write(output); - } - } + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeVInt(output, this.viewInfoTrackerList.size()); + for (ViewInfoWritable viewInfoWritable : this.viewInfoTrackerList) { + if (viewInfoWritable instanceof ViewInfoTracker) { + viewInfoWritable.write(output); + } } + } - @Override public void readFields(DataInput input) throws IOException { - int count = WritableUtils.readVInt(input); - for (int i = 0; i < count; i++) { - ViewInfoTracker viewInfoTracker = new ViewInfoTracker(); - viewInfoTracker.readFields(input); - this.viewInfoTrackerList.add(viewInfoTracker); - } + @Override + public void readFields(DataInput input) throws IOException { + int count = WritableUtils.readVInt(input); + for (int i = 0; i < count; i++) { + ViewInfoTracker viewInfoTracker = new ViewInfoTracker(); + viewInfoTracker.readFields(input); + this.viewInfoTrackerList.add(viewInfoTracker); } + } - @Override public long getLength() { - return 0; - } + @Override + public long getLength() { + return 0; + } - @Override public String[] getLocations() { - return new String[0]; - } + @Override + public String[] getLocations() { + return new String[0]; + } - public List getViewInfoTrackerList() { - return this.viewInfoTrackerList; - } -} \ No newline at end of file + public List getViewInfoTrackerList() { + return this.viewInfoTrackerList; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewReader.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewReader.java index f1d76625c10..7b878e61eb6 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewReader.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixMultiViewReader.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,10 @@ */ package org.apache.phoenix.mapreduce; +import java.io.IOException; +import java.util.Iterator; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; @@ -24,63 +28,63 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.phoenix.mapreduce.util.ViewInfoWritable; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Iterator; -import java.util.List; +public class PhoenixMultiViewReader extends RecordReader { + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMultiViewReader.class); -public class PhoenixMultiViewReader extends RecordReader { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMultiViewReader.class); + private Configuration configuration; + private Class inputClass; + Iterator it; - private Configuration configuration; - private Class inputClass; - Iterator it; + public PhoenixMultiViewReader() { - public PhoenixMultiViewReader() { + } - } + public PhoenixMultiViewReader(final Class inputClass, final Configuration configuration) { + this.configuration = configuration; + this.inputClass = inputClass; + } - public PhoenixMultiViewReader(final Class inputClass, final Configuration configuration) { - this.configuration = configuration; - this.inputClass = inputClass; + @Override + public void initialize(InputSplit split, TaskAttemptContext context) throws IOException { + if (split instanceof PhoenixMultiViewInputSplit) { + final PhoenixMultiViewInputSplit pSplit = (PhoenixMultiViewInputSplit) split; + final List viewInfoTracker = pSplit.getViewInfoTrackerList(); + it = viewInfoTracker.iterator(); + } else { + LOGGER.error("InputSplit class cannot cast to PhoenixMultiViewInputSplit."); + throw new IOException("InputSplit class cannot cast to PhoenixMultiViewInputSplit"); } + } - @Override public void initialize(InputSplit split, TaskAttemptContext context) - throws IOException { - if (split instanceof PhoenixMultiViewInputSplit) { - final PhoenixMultiViewInputSplit pSplit = (PhoenixMultiViewInputSplit)split; - final List viewInfoTracker = pSplit.getViewInfoTrackerList(); - it = viewInfoTracker.iterator(); - } else { - LOGGER.error("InputSplit class cannot cast to PhoenixMultiViewInputSplit."); - throw new IOException("InputSplit class cannot cast to PhoenixMultiViewInputSplit"); - } - } + @Override + public boolean nextKeyValue() throws IOException, InterruptedException { + return it.hasNext(); + } - @Override public boolean nextKeyValue() throws IOException, InterruptedException { - return it.hasNext(); - } + @Override + public NullWritable getCurrentKey() throws IOException, InterruptedException { + return null; + } - @Override public NullWritable getCurrentKey() throws IOException, InterruptedException { - return null; + @Override + public T getCurrentValue() throws IOException, InterruptedException { + ViewInfoWritable currentValue = null; + if (it.hasNext()) { + currentValue = it.next(); } + return (T) currentValue; + } - @Override public T getCurrentValue() throws IOException, InterruptedException { - ViewInfoWritable currentValue = null; - if (it.hasNext()) { - currentValue = it.next(); - } - return (T)currentValue; - } + @Override + public float getProgress() throws IOException, InterruptedException { + return 0; + } - @Override public float getProgress() throws IOException, InterruptedException { - return 0; - } - - @Override public void close() throws IOException { + @Override + public void close() throws IOException { - } -} \ No newline at end of file + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputCommitter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputCommitter.java index ffee5c7890b..adfb0063921 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputCommitter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputCommitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,28 +27,29 @@ * A no-op {@link OutputCommitter} */ public class PhoenixOutputCommitter extends OutputCommitter { - - public PhoenixOutputCommitter() {} - @Override - public void abortTask(TaskAttemptContext context) throws IOException { - } + public PhoenixOutputCommitter() { + } - @Override - public void commitTask(TaskAttemptContext context) throws IOException { - } + @Override + public void abortTask(TaskAttemptContext context) throws IOException { + } - @Override - public boolean needsTaskCommit(TaskAttemptContext context) throws IOException { - return true; - } + @Override + public void commitTask(TaskAttemptContext context) throws IOException { + } - @Override - public void setupJob(JobContext jobContext) throws IOException { - } + @Override + public boolean needsTaskCommit(TaskAttemptContext context) throws IOException { + return true; + } - @Override - public void setupTask(TaskAttemptContext context) throws IOException { - } + @Override + public void setupJob(JobContext jobContext) throws IOException { + } + + @Override + public void setupTask(TaskAttemptContext context) throws IOException { + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java index 23847cb3979..29a2c3925eb 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixOutputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,39 +34,40 @@ /** * {@link OutputFormat} implementation for Phoenix. - * */ -public class PhoenixOutputFormat extends OutputFormat { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixOutputFormat.class); - - public PhoenixOutputFormat() { - this(Collections.emptySet()); - } - - // FIXME Never used, and the ignore feature didn't work anyway - public PhoenixOutputFormat(Set propsToIgnore) { - } - - @Override - public void checkOutputSpecs(JobContext jobContext) throws IOException, InterruptedException { - } - - /** - * - */ - @Override - public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException, InterruptedException { - return new PhoenixOutputCommitter(); - } +public class PhoenixOutputFormat extends OutputFormat { + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixOutputFormat.class); + + public PhoenixOutputFormat() { + this(Collections. emptySet()); + } + + // FIXME Never used, and the ignore feature didn't work anyway + public PhoenixOutputFormat(Set propsToIgnore) { + } + + @Override + public void checkOutputSpecs(JobContext jobContext) throws IOException, InterruptedException { + } + + /** + * + */ + @Override + public OutputCommitter getOutputCommitter(TaskAttemptContext context) + throws IOException, InterruptedException { + return new PhoenixOutputCommitter(); + } - @Override - public RecordWriter getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException { - try { - return new PhoenixRecordWriter(context.getConfiguration()); - } catch (SQLException e) { - LOGGER.error("Error calling PhoenixRecordWriter " + e.getMessage()); - throw new RuntimeException(e); - } + @Override + public RecordWriter getRecordWriter(TaskAttemptContext context) + throws IOException, InterruptedException { + try { + return new PhoenixRecordWriter(context.getConfiguration()); + } catch (SQLException e) { + LOGGER.error("Error calling PhoenixRecordWriter " + e.getMessage()); + throw new RuntimeException(e); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java index 614f388488b..30d49a0bb78 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -48,150 +48,155 @@ import org.apache.phoenix.monitoring.ReadMetricQueue; import org.apache.phoenix.monitoring.ScanMetricsHolder; import org.apache.phoenix.query.ConnectionQueryServices; -import org.apache.phoenix.util.EnvironmentEdgeManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.base.Throwables; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.EnvironmentEdgeManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * {@link RecordReader} implementation that iterates over the the records. */ -public class PhoenixRecordReader extends RecordReader { - - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordReader.class); - protected final Configuration configuration; - protected final QueryPlan queryPlan; - private final ParallelScanGrouper scanGrouper; - private NullWritable key = NullWritable.get(); - private T value = null; - private Class inputClass; - private ResultIterator resultIterator = null; - private PhoenixResultSet resultSet; - - PhoenixRecordReader(Class inputClass, final Configuration configuration, - final QueryPlan queryPlan, final ParallelScanGrouper scanGrouper) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(queryPlan); - Preconditions.checkNotNull(scanGrouper); - this.inputClass = inputClass; - this.configuration = configuration; - this.queryPlan = queryPlan; - this.scanGrouper = scanGrouper; +public class PhoenixRecordReader extends RecordReader { + + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordReader.class); + protected final Configuration configuration; + protected final QueryPlan queryPlan; + private final ParallelScanGrouper scanGrouper; + private NullWritable key = NullWritable.get(); + private T value = null; + private Class inputClass; + private ResultIterator resultIterator = null; + private PhoenixResultSet resultSet; + + PhoenixRecordReader(Class inputClass, final Configuration configuration, + final QueryPlan queryPlan, final ParallelScanGrouper scanGrouper) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(queryPlan); + Preconditions.checkNotNull(scanGrouper); + this.inputClass = inputClass; + this.configuration = configuration; + this.queryPlan = queryPlan; + this.scanGrouper = scanGrouper; + } + + @Override + public void close() throws IOException { + if (resultIterator != null) { + try { + resultIterator.close(); + } catch (SQLException e) { + LOGGER.error(" Error closing resultset."); + throw new RuntimeException(e); + } } - - @Override - public void close() throws IOException { - if(resultIterator != null) { - try { - resultIterator.close(); - } catch (SQLException e) { - LOGGER.error(" Error closing resultset."); - throw new RuntimeException(e); + } + + @Override + public NullWritable getCurrentKey() throws IOException, InterruptedException { + return key; + } + + @Override + public T getCurrentValue() throws IOException, InterruptedException { + return value; + } + + @Override + public float getProgress() throws IOException, InterruptedException { + return 0; + } + + @Override + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { + final PhoenixInputSplit pSplit = (PhoenixInputSplit) split; + final List scans = pSplit.getScans(); + try { + LOGGER.info( + "Generating iterators for " + scans.size() + " scans in keyrange: " + pSplit.getKeyRange()); + List iterators = Lists.newArrayListWithExpectedSize(scans.size()); + StatementContext ctx = queryPlan.getContext(); + ReadMetricQueue readMetrics = ctx.getReadMetricsQueue(); + String tableName = queryPlan.getTableRef().getTable().getPhysicalName().getString(); + String snapshotName = this.configuration.get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); + + // Clear the table region boundary cache to make sure long running jobs stay up to date + byte[] tableNameBytes = queryPlan.getTableRef().getTable().getPhysicalName().getBytes(); + ConnectionQueryServices services = queryPlan.getContext().getConnection().getQueryServices(); + services.clearTableRegionCache(TableName.valueOf(tableNameBytes)); + + long renewScannerLeaseThreshold = queryPlan.getContext().getConnection().getQueryServices() + .getRenewLeaseThresholdMilliSeconds(); + for (Scan scan : scans) { + // For MR, skip the region boundary check exception if we encounter a split. ref: + // PHOENIX-2599 + scan.setAttribute(BaseScannerRegionObserverConstants.SKIP_REGION_BOUNDARY_CHECK, + Bytes.toBytes(true)); + + // Get QueryTimeout From Statement + final long startTime = EnvironmentEdgeManager.currentTimeMillis(); + final long maxQueryEndTime = + startTime + queryPlan.getContext().getStatement().getQueryTimeoutInMillis(); + PeekingResultIterator peekingResultIterator; + ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, + scan, queryPlan.getContext().getConnection().getLogLevel()); + if (snapshotName != null) { + // result iterator to read snapshots + final TableSnapshotResultIterator tableSnapshotResultIterator = + new TableSnapshotResultIterator(configuration, scan, scanMetricsHolder, + queryPlan.getContext(), true, maxQueryEndTime); + peekingResultIterator = LookAheadResultIterator.wrap(tableSnapshotResultIterator); + LOGGER.info("Adding TableSnapshotResultIterator for scan: " + scan); + } else { + final TableResultIterator tableResultIterator = new TableResultIterator( + queryPlan.getContext().getConnection().getMutationState(), scan, scanMetricsHolder, + renewScannerLeaseThreshold, queryPlan, this.scanGrouper, true, maxQueryEndTime); + peekingResultIterator = LookAheadResultIterator.wrap(tableResultIterator); + LOGGER.info("Adding TableResultIterator for scan: " + scan); } - } + iterators.add(peekingResultIterator); + } + ResultIterator iterator = queryPlan.useRoundRobinIterator() + ? RoundRobinResultIterator.newIterator(iterators, queryPlan) + : ConcatResultIterator.newIterator(iterators); + if (queryPlan.getContext().getSequenceManager().getSequenceCount() > 0) { + iterator = + new SequenceResultIterator(iterator, queryPlan.getContext().getSequenceManager()); + } + this.resultIterator = iterator; + // Clone the row projector as it's not thread safe and would be used simultaneously by + // multiple threads otherwise. + + this.resultSet = new PhoenixResultSet(this.resultIterator, + queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext()); + } catch (SQLException e) { + LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ", e.getMessage())); + Throwables.propagate(e); } + } - @Override - public NullWritable getCurrentKey() throws IOException, InterruptedException { - return key; + @Override + public boolean nextKeyValue() throws IOException, InterruptedException { + if (key == null) { + key = NullWritable.get(); } - - @Override - public T getCurrentValue() throws IOException, InterruptedException { - return value; + if (value == null) { + value = ReflectionUtils.newInstance(inputClass, this.configuration); } - - @Override - public float getProgress() throws IOException, InterruptedException { - return 0; - } - - @Override - public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { - final PhoenixInputSplit pSplit = (PhoenixInputSplit)split; - final List scans = pSplit.getScans(); - try { - LOGGER.info("Generating iterators for " + scans.size() + " scans in keyrange: " - + pSplit.getKeyRange()); - List iterators = Lists.newArrayListWithExpectedSize(scans.size()); - StatementContext ctx = queryPlan.getContext(); - ReadMetricQueue readMetrics = ctx.getReadMetricsQueue(); - String tableName = queryPlan.getTableRef().getTable().getPhysicalName().getString(); - String snapshotName = this.configuration.get(PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY); - - // Clear the table region boundary cache to make sure long running jobs stay up to date - byte[] tableNameBytes = queryPlan.getTableRef().getTable().getPhysicalName().getBytes(); - ConnectionQueryServices services = queryPlan.getContext().getConnection().getQueryServices(); - services.clearTableRegionCache(TableName.valueOf(tableNameBytes)); - - long renewScannerLeaseThreshold = queryPlan.getContext().getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds(); - for (Scan scan : scans) { - // For MR, skip the region boundary check exception if we encounter a split. ref: PHOENIX-2599 - scan.setAttribute(BaseScannerRegionObserverConstants.SKIP_REGION_BOUNDARY_CHECK, Bytes.toBytes(true)); - - //Get QueryTimeout From Statement - final long startTime = EnvironmentEdgeManager.currentTimeMillis(); - final long maxQueryEndTime = startTime + queryPlan.getContext().getStatement().getQueryTimeoutInMillis(); - PeekingResultIterator peekingResultIterator; - ScanMetricsHolder scanMetricsHolder = - ScanMetricsHolder.getInstance(readMetrics, tableName, scan, - queryPlan.getContext().getConnection().getLogLevel()); - if (snapshotName != null) { - // result iterator to read snapshots - final TableSnapshotResultIterator tableSnapshotResultIterator = new TableSnapshotResultIterator(configuration, scan, - scanMetricsHolder, queryPlan.getContext(), true, maxQueryEndTime); - peekingResultIterator = LookAheadResultIterator.wrap(tableSnapshotResultIterator); - LOGGER.info("Adding TableSnapshotResultIterator for scan: " + scan); - } else { - final TableResultIterator tableResultIterator = - new TableResultIterator( - queryPlan.getContext().getConnection().getMutationState(), scan, - scanMetricsHolder, renewScannerLeaseThreshold, queryPlan, - this.scanGrouper, true, maxQueryEndTime); - peekingResultIterator = LookAheadResultIterator.wrap(tableResultIterator); - LOGGER.info("Adding TableResultIterator for scan: " + scan); - } - iterators.add(peekingResultIterator); - } - ResultIterator iterator = queryPlan.useRoundRobinIterator() ? RoundRobinResultIterator.newIterator(iterators, queryPlan) : ConcatResultIterator.newIterator(iterators); - if(queryPlan.getContext().getSequenceManager().getSequenceCount() > 0) { - iterator = new SequenceResultIterator(iterator, queryPlan.getContext().getSequenceManager()); - } - this.resultIterator = iterator; - // Clone the row projector as it's not thread safe and would be used simultaneously by - // multiple threads otherwise. - - this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext()); - } catch (SQLException e) { - LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ", - e.getMessage())); - Throwables.propagate(e); - } - } - - @Override - public boolean nextKeyValue() throws IOException, InterruptedException { - if (key == null) { - key = NullWritable.get(); - } - if (value == null) { - value = ReflectionUtils.newInstance(inputClass, this.configuration); - } - Preconditions.checkNotNull(this.resultSet); - try { - if(!resultSet.next()) { - return false; - } - value.readFields(resultSet); - return true; - } catch (SQLException e) { - LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ", - e.getMessage())); - throw new RuntimeException(e); - } + Preconditions.checkNotNull(this.resultSet); + try { + if (!resultSet.next()) { + return false; + } + value.readFields(resultSet); + return true; + } catch (SQLException e) { + LOGGER.error( + String.format(" Error [%s] occurred while iterating over the resultset. ", e.getMessage())); + throw new RuntimeException(e); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWritable.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWritable.java index 0d3e7241023..6a5a4967729 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWritable.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWritable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,157 +36,163 @@ import org.apache.phoenix.util.ColumnInfo; import org.joda.time.DateTime; - public class PhoenixRecordWritable implements DBWritable { - private final List upsertValues = new ArrayList<>(); - private final Map resultMap = new LinkedHashMap<>(); - private List columnMetaDataList; - - /** For serialization; do not use. */ - public PhoenixRecordWritable() { - this(new ArrayList()); + private final List upsertValues = new ArrayList<>(); + private final Map resultMap = new LinkedHashMap<>(); + private List columnMetaDataList; + + /** For serialization; do not use. */ + public PhoenixRecordWritable() { + this(new ArrayList()); + } + + public PhoenixRecordWritable(List columnMetaDataList) { + this.columnMetaDataList = columnMetaDataList; + } + + /** + * Helper method to create a {@link Array} for a specific {@link PDataType}, and set it on the + * provided {@code stmt}. + */ + private static void setArrayInStatement(PreparedStatement stmt, PDataType type, Object[] obj, + int position) throws SQLException { + Array sqlArray = + stmt.getConnection().createArrayOf(PDataType.arrayBaseType(type).getSqlTypeName(), obj); + stmt.setArray(position, sqlArray); + } + + private static Object[] primativeArrayToObjectArray(byte[] a) { + final Byte[] ret = new Byte[a.length]; + for (int i = 0; i < a.length; i++) { + ret[i] = a[i]; } + return ret; + } - public PhoenixRecordWritable(List columnMetaDataList) { - this.columnMetaDataList = columnMetaDataList; + private static Object[] primativeArrayToObjectArray(short[] a) { + final Short[] ret = new Short[a.length]; + for (int i = 0; i < a.length; i++) { + ret[i] = a[i]; } + return ret; + } - /** - * Helper method to create a {@link Array} for a specific {@link PDataType}, and set it on - * the provided {@code stmt}. - */ - private static void setArrayInStatement(PreparedStatement stmt, PDataType type, - Object[] obj, int position) throws SQLException { - Array sqlArray = stmt.getConnection().createArrayOf( - PDataType.arrayBaseType(type).getSqlTypeName(), obj); - stmt.setArray(position, sqlArray); + private static Object[] primativeArrayToObjectArray(int[] a) { + final Integer[] ret = new Integer[a.length]; + for (int i = 0; i < a.length; i++) { + ret[i] = a[i]; } + return ret; + } - private static Object[] primativeArrayToObjectArray(byte[] a) { - final Byte[] ret = new Byte[a.length]; - for (int i = 0; i < a.length; i++) { - ret[i] = a[i]; - } - return ret; + private static Object[] primativeArrayToObjectArray(float[] a) { + final Float[] ret = new Float[a.length]; + for (int i = 0; i < a.length; i++) { + ret[i] = a[i]; } + return ret; + } - private static Object[] primativeArrayToObjectArray(short[] a) { - final Short[] ret = new Short[a.length]; - for (int i = 0; i < a.length; i++) { - ret[i] = a[i]; - } - return ret; + private static Object[] primativeArrayToObjectArray(double[] a) { + final Double[] ret = new Double[a.length]; + for (int i = 0; i < a.length; i++) { + ret[i] = a[i]; } + return ret; + } - private static Object[] primativeArrayToObjectArray(int[] a) { - final Integer[] ret = new Integer[a.length]; - for (int i = 0; i < a.length; i++) { - ret[i] = a[i]; - } - return ret; + private static Object[] primativeArrayToObjectArray(long[] a) { + final Long[] ret = new Long[a.length]; + for (int i = 0; i < a.length; i++) { + ret[i] = a[i]; } - - private static Object[] primativeArrayToObjectArray(float[] a) { - final Float[] ret = new Float[a.length]; - for (int i = 0; i < a.length; i++) { - ret[i] = a[i]; - } - return ret; + return ret; + } + + @Override + public void write(PreparedStatement statement) throws SQLException { + // make sure we at least line up in size + if (upsertValues.size() != columnMetaDataList.size()) { + throw new UnsupportedOperationException("Provided " + upsertValues.size() + + " upsert values, but column metadata expects " + columnMetaDataList.size() + " columns."); } - private static Object[] primativeArrayToObjectArray(double[] a) { - final Double[] ret = new Double[a.length]; - for (int i = 0; i < a.length; i++) { - ret[i] = a[i]; + // correlate each value (v) to a column type (c) and an index (i) + for (int i = 0; i < upsertValues.size(); i++) { + Object v = upsertValues.get(i); + ColumnInfo c = columnMetaDataList.get(i); + + if (v == null) { + statement.setNull(i + 1, c.getSqlType()); + continue; + } + + // both Java and Joda dates used to work in 4.2.3, but now they must be java.sql.Date + // can override any other types here as needed + final Object finalObj; + final PDataType finalType; + if (v instanceof DateTime) { + finalObj = new java.sql.Date(((DateTime) v).getMillis()); + finalType = PDate.INSTANCE; + } else if (v instanceof java.util.Date) { + finalObj = new java.sql.Date(((java.util.Date) v).getTime()); + finalType = PDate.INSTANCE; + } else { + finalObj = v; + finalType = c.getPDataType(); + } + + if (finalObj instanceof Object[]) { + setArrayInStatement(statement, finalType, (Object[]) finalObj, i + 1); + } else if (finalObj instanceof byte[]) { + // PVarbinary and PBinary are provided as byte[] but are treated as SQL objects + if (PDataType.equalsAny(finalType, PVarbinary.INSTANCE, PBinary.INSTANCE)) { + statement.setObject(i + 1, finalObj); + } else { + // otherwise set as array type + setArrayInStatement(statement, finalType, primativeArrayToObjectArray((byte[]) finalObj), + i + 1); } - return ret; + } else if (finalObj instanceof short[]) { + setArrayInStatement(statement, finalType, primativeArrayToObjectArray((short[]) finalObj), + i + 1); + } else if (finalObj instanceof int[]) { + setArrayInStatement(statement, finalType, primativeArrayToObjectArray((int[]) finalObj), + i + 1); + } else if (finalObj instanceof long[]) { + setArrayInStatement(statement, finalType, primativeArrayToObjectArray((long[]) finalObj), + i + 1); + } else if (finalObj instanceof float[]) { + setArrayInStatement(statement, finalType, primativeArrayToObjectArray((float[]) finalObj), + i + 1); + } else if (finalObj instanceof double[]) { + setArrayInStatement(statement, finalType, primativeArrayToObjectArray((double[]) finalObj), + i + 1); + } else { + statement.setObject(i + 1, finalObj); + } } - - private static Object[] primativeArrayToObjectArray(long[] a) { - final Long[] ret = new Long[a.length]; - for (int i = 0; i < a.length; i++) { - ret[i] = a[i]; - } - return ret; + } + + @Override + public void readFields(ResultSet resultSet) throws SQLException { + ResultSetMetaData metaData = resultSet.getMetaData(); + for (int i = 1; i <= metaData.getColumnCount(); i++) { + // return the contents of a PhoenixArray, if necessary + Object value = resultSet.getObject(i); + // put a (ColumnLabel -> value) entry into the result map + resultMap.put(metaData.getColumnLabel(i), value); } + } - @Override public void write(PreparedStatement statement) throws SQLException { - // make sure we at least line up in size - if (upsertValues.size() != columnMetaDataList.size()) { - throw new UnsupportedOperationException("Provided " + upsertValues.size() - + " upsert values, but column metadata expects " + columnMetaDataList.size() - + " columns."); - } + /** Append an object to the list of values to upsert. */ + public void add(Object value) { + upsertValues.add(value); + } - // correlate each value (v) to a column type (c) and an index (i) - for (int i = 0; i < upsertValues.size(); i++) { - Object v = upsertValues.get(i); - ColumnInfo c = columnMetaDataList.get(i); - - if (v == null) { - statement.setNull(i + 1, c.getSqlType()); - continue; - } - - // both Java and Joda dates used to work in 4.2.3, but now they must be java.sql.Date - // can override any other types here as needed - final Object finalObj; - final PDataType finalType; - if (v instanceof DateTime) { - finalObj = new java.sql.Date(((DateTime) v).getMillis()); - finalType = PDate.INSTANCE; - } else if (v instanceof java.util.Date) { - finalObj = new java.sql.Date(((java.util.Date) v).getTime()); - finalType = PDate.INSTANCE; - } else { - finalObj = v; - finalType = c.getPDataType(); - } - - if (finalObj instanceof Object[]) { - setArrayInStatement(statement, finalType, (Object[]) finalObj, i + 1); - } else if (finalObj instanceof byte[]) { - // PVarbinary and PBinary are provided as byte[] but are treated as SQL objects - if (PDataType.equalsAny(finalType, PVarbinary.INSTANCE, PBinary.INSTANCE)) { - statement.setObject(i + 1, finalObj); - } else { - // otherwise set as array type - setArrayInStatement(statement, finalType, primativeArrayToObjectArray((byte[]) finalObj), i + 1); - } - } else if (finalObj instanceof short[]) { - setArrayInStatement(statement, finalType, primativeArrayToObjectArray((short[]) finalObj), i + 1); - } else if (finalObj instanceof int[]) { - setArrayInStatement(statement, finalType, primativeArrayToObjectArray((int[]) finalObj), i + 1); - } else if (finalObj instanceof long[]) { - setArrayInStatement(statement, finalType, primativeArrayToObjectArray((long[]) finalObj), i + 1); - } else if (finalObj instanceof float[]) { - setArrayInStatement(statement, finalType, primativeArrayToObjectArray((float[]) finalObj), i + 1); - } else if (finalObj instanceof double[]) { - setArrayInStatement(statement, finalType, primativeArrayToObjectArray((double[]) finalObj), i + 1); - } else { - statement.setObject(i + 1, finalObj); - } - } - } - - @Override public void readFields(ResultSet resultSet) throws SQLException { - ResultSetMetaData metaData = resultSet.getMetaData(); - for (int i = 1; i <= metaData.getColumnCount(); i++) { - // return the contents of a PhoenixArray, if necessary - Object value = resultSet.getObject(i); - // put a (ColumnLabel -> value) entry into the result map - resultMap.put(metaData.getColumnLabel(i), value); - } - } - - /** Append an object to the list of values to upsert. */ - public void add(Object value) { - upsertValues.add(value); - } - - /** @return an immutable view on the {@link ResultSet} content. */ - public Map getResultMap() { - return Collections.unmodifiableMap(resultMap); - } + /** Returns an immutable view on the {@link ResultSet} content. */ + public Map getResultMap() { + return Collections.unmodifiableMap(resultMap); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java index 14e986c1597..08c4c480c4e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,70 +36,69 @@ /** * Default {@link RecordWriter} implementation from Phoenix - * */ -public class PhoenixRecordWriter extends RecordWriter { - - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordWriter.class); - - private final Connection conn; - private final PreparedStatement statement; - private final long batchSize; - private long numRecords = 0; - - public PhoenixRecordWriter(final Configuration configuration) throws SQLException { - this(configuration, Collections.emptySet()); - } - - public PhoenixRecordWriter(final Configuration configuration, Set propsToIgnore) throws SQLException { - Connection connection = null; - try { - connection = ConnectionUtil.getOutputConnection(configuration); - this.batchSize = PhoenixConfigurationUtil.getBatchSize(configuration); - final String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration); - this.statement = connection.prepareStatement(upsertQuery); - this.conn = connection; - } catch (Exception e) { - // Only close the connection in case of an exception, so cannot use try-with-resources - if (connection != null) { - connection.close(); - } - throw e; - } +public class PhoenixRecordWriter extends RecordWriter { + + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRecordWriter.class); + + private final Connection conn; + private final PreparedStatement statement; + private final long batchSize; + private long numRecords = 0; + + public PhoenixRecordWriter(final Configuration configuration) throws SQLException { + this(configuration, Collections. emptySet()); + } + + public PhoenixRecordWriter(final Configuration configuration, Set propsToIgnore) + throws SQLException { + Connection connection = null; + try { + connection = ConnectionUtil.getOutputConnection(configuration); + this.batchSize = PhoenixConfigurationUtil.getBatchSize(configuration); + final String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration); + this.statement = connection.prepareStatement(upsertQuery); + this.conn = connection; + } catch (Exception e) { + // Only close the connection in case of an exception, so cannot use try-with-resources + if (connection != null) { + connection.close(); + } + throw e; } + } - @Override - public void close(TaskAttemptContext context) throws IOException, InterruptedException { - try { - conn.commit(); - } catch (SQLException e) { - LOGGER.error("SQLException while performing the commit for the task."); - throw new RuntimeException(e); - } finally { - try { - statement.close(); - conn.close(); - } - catch (SQLException ex) { - LOGGER.error("SQLException while closing the connection for the task."); - throw new RuntimeException(ex); - } - } + @Override + public void close(TaskAttemptContext context) throws IOException, InterruptedException { + try { + conn.commit(); + } catch (SQLException e) { + LOGGER.error("SQLException while performing the commit for the task."); + throw new RuntimeException(e); + } finally { + try { + statement.close(); + conn.close(); + } catch (SQLException ex) { + LOGGER.error("SQLException while closing the connection for the task."); + throw new RuntimeException(ex); + } } + } - @Override - public void write(NullWritable n, T record) throws IOException, InterruptedException { - try { - record.write(statement); - numRecords++; - statement.execute(); - if (numRecords % batchSize == 0) { - LOGGER.debug("commit called on a batch of size : " + batchSize); - conn.commit(); - } - } catch (SQLException e) { - throw new RuntimeException("Exception while committing to database.", e); - } + @Override + public void write(NullWritable n, T record) throws IOException, InterruptedException { + try { + record.write(statement); + numRecords++; + statement.execute(); + if (numRecords % batchSize == 0) { + LOGGER.debug("commit called on a batch of size : " + batchSize); + conn.commit(); + } + } catch (SQLException e) { + throw new RuntimeException("Exception while committing to database.", e); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java index 24923944943..0678d576c26 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,18 @@ */ package org.apache.phoenix.mapreduce; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getCurrentScnValue; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getDisableLoggingVerifyType; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolDataTableName; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolIndexTableName; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolLastVerifyTime; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolSourceTable; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolStartTime; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexVerifyType; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIsTransforming; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.setCurrentScnValue; +import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; + import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; @@ -47,6 +59,7 @@ import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.PhoenixRuntime; @@ -54,166 +67,167 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getCurrentScnValue; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getDisableLoggingVerifyType; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolDataTableName; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolIndexTableName; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolLastVerifyTime; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolSourceTable; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexVerifyType; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolStartTime; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIsTransforming; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.setCurrentScnValue; -import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; - /** * {@link InputFormat} implementation from Phoenix for building index - * */ public class PhoenixServerBuildIndexInputFormat extends PhoenixInputFormat { - QueryPlan queryPlan = null; + QueryPlan queryPlan = null; - private static final Logger LOGGER = - LoggerFactory.getLogger(PhoenixServerBuildIndexInputFormat.class); + private static final Logger LOGGER = + LoggerFactory.getLogger(PhoenixServerBuildIndexInputFormat.class); - /** - * instantiated by framework - */ - public PhoenixServerBuildIndexInputFormat() { - } + /** + * instantiated by framework + */ + public PhoenixServerBuildIndexInputFormat() { + } - private interface QueryPlanBuilder { - QueryPlan getQueryPlan(PhoenixConnection phoenixConnection, String dataTableFullName, - String indexTableFullName) throws SQLException; - } + private interface QueryPlanBuilder { + QueryPlan getQueryPlan(PhoenixConnection phoenixConnection, String dataTableFullName, + String indexTableFullName) throws SQLException; + } - private class TransformingDataTableQueryPlanBuilder implements QueryPlanBuilder { - @Override - public QueryPlan getQueryPlan(PhoenixConnection phoenixConnection, String oldTableFullName, - String newTableFullName) throws SQLException { - PTable newTable = phoenixConnection.getTableNoCache(newTableFullName); - ServerBuildTransformingTableCompiler compiler = new ServerBuildTransformingTableCompiler(phoenixConnection, oldTableFullName); - MutationPlan plan = compiler.compile(newTable); - return plan.getQueryPlan(); - } + private class TransformingDataTableQueryPlanBuilder implements QueryPlanBuilder { + @Override + public QueryPlan getQueryPlan(PhoenixConnection phoenixConnection, String oldTableFullName, + String newTableFullName) throws SQLException { + PTable newTable = phoenixConnection.getTableNoCache(newTableFullName); + ServerBuildTransformingTableCompiler compiler = + new ServerBuildTransformingTableCompiler(phoenixConnection, oldTableFullName); + MutationPlan plan = compiler.compile(newTable); + return plan.getQueryPlan(); } + } - private class DataTableQueryPlanBuilder implements QueryPlanBuilder { - @Override - public QueryPlan getQueryPlan(PhoenixConnection phoenixConnection, String dataTableFullName, - String indexTableFullName) throws SQLException { - PTable indexTable = phoenixConnection.getTableNoCache(indexTableFullName); - ServerBuildIndexCompiler compiler = new ServerBuildIndexCompiler(phoenixConnection, dataTableFullName); - MutationPlan plan = compiler.compile(indexTable); - return plan.getQueryPlan(); - } + private class DataTableQueryPlanBuilder implements QueryPlanBuilder { + @Override + public QueryPlan getQueryPlan(PhoenixConnection phoenixConnection, String dataTableFullName, + String indexTableFullName) throws SQLException { + PTable indexTable = phoenixConnection.getTableNoCache(indexTableFullName); + ServerBuildIndexCompiler compiler = + new ServerBuildIndexCompiler(phoenixConnection, dataTableFullName); + MutationPlan plan = compiler.compile(indexTable); + return plan.getQueryPlan(); } + } - private class IndexTableQueryPlanBuilder implements QueryPlanBuilder { - @Override - public QueryPlan getQueryPlan(PhoenixConnection phoenixConnection, String dataTableFullName, - String indexTableFullName) throws SQLException { - QueryPlan plan; - try (final PhoenixStatement statement = new PhoenixStatement(phoenixConnection)) { - String query = "SELECT count(*) FROM " + indexTableFullName; - plan = statement.compileQuery(query); - TableRef tableRef = plan.getTableRef(); - Scan scan = plan.getContext().getScan(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - PTable pIndexTable = tableRef.getTable(); - PTable pDataTable = phoenixConnection.getTable(dataTableFullName); - IndexMaintainer.serialize(pDataTable, ptr, Collections.singletonList(pIndexTable), phoenixConnection); - scan.setAttribute(PhoenixIndexCodec.INDEX_NAME_FOR_IDX_MAINTAINER, - pIndexTable.getTableName().getBytes()); - ScanUtil.annotateScanWithMetadataAttributes(pDataTable, scan); - scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, - ByteUtil.copyKeyBytesIfNecessary(ptr)); - scan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, TRUE_BYTES); - ScanUtil.setScanAttributeForMaxLookbackAge(scan, pIndexTable.getMaxLookbackAge()); - ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); - } - return plan; - } + private class IndexTableQueryPlanBuilder implements QueryPlanBuilder { + @Override + public QueryPlan getQueryPlan(PhoenixConnection phoenixConnection, String dataTableFullName, + String indexTableFullName) throws SQLException { + QueryPlan plan; + try (final PhoenixStatement statement = new PhoenixStatement(phoenixConnection)) { + String query = "SELECT count(*) FROM " + indexTableFullName; + plan = statement.compileQuery(query); + TableRef tableRef = plan.getTableRef(); + Scan scan = plan.getContext().getScan(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + PTable pIndexTable = tableRef.getTable(); + PTable pDataTable = phoenixConnection.getTable(dataTableFullName); + IndexMaintainer.serialize(pDataTable, ptr, Collections.singletonList(pIndexTable), + phoenixConnection); + scan.setAttribute(PhoenixIndexCodec.INDEX_NAME_FOR_IDX_MAINTAINER, + pIndexTable.getTableName().getBytes()); + ScanUtil.annotateScanWithMetadataAttributes(pDataTable, scan); + scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr)); + scan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, TRUE_BYTES); + ScanUtil.setScanAttributeForMaxLookbackAge(scan, pIndexTable.getMaxLookbackAge()); + ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION); + } + return plan; } + } - private QueryPlanBuilder queryPlanBuilder; + private QueryPlanBuilder queryPlanBuilder; - @Override - protected QueryPlan getQueryPlan(final JobContext context, final Configuration configuration) - throws IOException { - Preconditions.checkNotNull(context); - if (queryPlan != null) { - return queryPlan; - } - final String txnScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); - final String currentScnValue = getCurrentScnValue(configuration); - final String startTimeValue = getIndexToolStartTime(configuration); - final String tenantId = configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID); - final String lastVerifyTime = getIndexToolLastVerifyTime(configuration); - - final Properties overridingProps = new Properties(); - if (txnScnValue==null && currentScnValue!=null) { - overridingProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, currentScnValue); - } - if (tenantId != null && configuration.get(PhoenixRuntime.TENANT_ID_ATTRIB) == null) { - overridingProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - } - String dataTableFullName = getIndexToolDataTableName(configuration); - String indexTableFullName = getIndexToolIndexTableName(configuration); - SourceTable sourceTable = getIndexToolSourceTable(configuration); - if (getIsTransforming(configuration) && - PhoenixConfigurationUtil.getTransformingTableType(configuration) == SourceTable.DATA_TABLE_SOURCE) { - queryPlanBuilder = new TransformingDataTableQueryPlanBuilder(); - } else { - queryPlanBuilder = sourceTable.equals(SourceTable.DATA_TABLE_SOURCE) ? - new DataTableQueryPlanBuilder() : new IndexTableQueryPlanBuilder(); - } + @Override + protected QueryPlan getQueryPlan(final JobContext context, final Configuration configuration) + throws IOException { + Preconditions.checkNotNull(context); + if (queryPlan != null) { + return queryPlan; + } + final String txnScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); + final String currentScnValue = getCurrentScnValue(configuration); + final String startTimeValue = getIndexToolStartTime(configuration); + final String tenantId = configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID); + final String lastVerifyTime = getIndexToolLastVerifyTime(configuration); + + final Properties overridingProps = new Properties(); + if (txnScnValue == null && currentScnValue != null) { + overridingProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, currentScnValue); + } + if (tenantId != null && configuration.get(PhoenixRuntime.TENANT_ID_ATTRIB) == null) { + overridingProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + } + String dataTableFullName = getIndexToolDataTableName(configuration); + String indexTableFullName = getIndexToolIndexTableName(configuration); + SourceTable sourceTable = getIndexToolSourceTable(configuration); + if ( + getIsTransforming(configuration) + && PhoenixConfigurationUtil.getTransformingTableType(configuration) + == SourceTable.DATA_TABLE_SOURCE + ) { + queryPlanBuilder = new TransformingDataTableQueryPlanBuilder(); + } else { + queryPlanBuilder = sourceTable.equals(SourceTable.DATA_TABLE_SOURCE) + ? new DataTableQueryPlanBuilder() + : new IndexTableQueryPlanBuilder(); + } - try (final Connection connection = ConnectionUtil.getInputConnection(configuration, overridingProps)) { - PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); - Long scn = (currentScnValue != null) ? Long.parseLong(currentScnValue) : EnvironmentEdgeManager.currentTimeMillis(); - setCurrentScnValue(configuration, scn); - - Long startTime = (startTimeValue == null) ? 0L : Long.parseLong(startTimeValue); - - queryPlan = queryPlanBuilder.getQueryPlan(phoenixConnection, dataTableFullName, indexTableFullName); - Scan scan = queryPlan.getContext().getScan(); - - Long lastVerifyTimeValue = lastVerifyTime == null ? 0L : Long.parseLong(lastVerifyTime); - try { - scan.setTimeRange(startTime, scn); - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING, TRUE_BYTES); - // Serialize page row size only if we're overriding, else use server side value - String rebuildPageRowSize = - configuration.get(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS); - if (rebuildPageRowSize != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGE_ROWS, - Bytes.toBytes(Long.parseLong(rebuildPageRowSize))); - } - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_VERIFY_TYPE, getIndexVerifyType(configuration).toBytes()); - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY, Bytes.toBytes(lastVerifyTimeValue)); - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_DISABLE_LOGGING_VERIFY_TYPE, - getDisableLoggingVerifyType(configuration).toBytes()); - String shouldLogMaxLookbackOutput = - configuration.get(IndexRebuildRegionScanner.PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS); - if (shouldLogMaxLookbackOutput != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_DISABLE_LOGGING_BEYOND_MAXLOOKBACK_AGE, - Bytes.toBytes(shouldLogMaxLookbackOutput)); - } - } catch (IOException e) { - throw new SQLException(e); - } - // since we can't set a scn on connections with txn set TX_SCN attribute so that the max time range is set by BaseScannerRegionObserver - if (txnScnValue != null) { - scan.setAttribute(BaseScannerRegionObserverConstants.TX_SCN, Bytes.toBytes(Long.parseLong(txnScnValue))); - } - return queryPlan; - } catch (Exception exception) { - LOGGER.error(String.format("Failed to get the query plan with error [%s]", - exception.getMessage())); - throw new RuntimeException(exception); + try (final Connection connection = + ConnectionUtil.getInputConnection(configuration, overridingProps)) { + PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); + Long scn = (currentScnValue != null) + ? Long.parseLong(currentScnValue) + : EnvironmentEdgeManager.currentTimeMillis(); + setCurrentScnValue(configuration, scn); + + Long startTime = (startTimeValue == null) ? 0L : Long.parseLong(startTimeValue); + + queryPlan = + queryPlanBuilder.getQueryPlan(phoenixConnection, dataTableFullName, indexTableFullName); + Scan scan = queryPlan.getContext().getScan(); + + Long lastVerifyTimeValue = lastVerifyTime == null ? 0L : Long.parseLong(lastVerifyTime); + try { + scan.setTimeRange(startTime, scn); + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGING, TRUE_BYTES); + // Serialize page row size only if we're overriding, else use server side value + String rebuildPageRowSize = + configuration.get(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS); + if (rebuildPageRowSize != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_PAGE_ROWS, + Bytes.toBytes(Long.parseLong(rebuildPageRowSize))); + } + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_REBUILD_VERIFY_TYPE, + getIndexVerifyType(configuration).toBytes()); + scan.setAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY, + Bytes.toBytes(lastVerifyTimeValue)); + scan.setAttribute( + BaseScannerRegionObserverConstants.INDEX_REBUILD_DISABLE_LOGGING_VERIFY_TYPE, + getDisableLoggingVerifyType(configuration).toBytes()); + String shouldLogMaxLookbackOutput = configuration + .get(IndexRebuildRegionScanner.PHOENIX_INDEX_MR_LOG_BEYOND_MAX_LOOKBACK_ERRORS); + if (shouldLogMaxLookbackOutput != null) { + scan.setAttribute( + BaseScannerRegionObserverConstants.INDEX_REBUILD_DISABLE_LOGGING_BEYOND_MAXLOOKBACK_AGE, + Bytes.toBytes(shouldLogMaxLookbackOutput)); } + } catch (IOException e) { + throw new SQLException(e); + } + // since we can't set a scn on connections with txn set TX_SCN attribute so that the max time + // range is set by BaseScannerRegionObserver + if (txnScnValue != null) { + scan.setAttribute(BaseScannerRegionObserverConstants.TX_SCN, + Bytes.toBytes(Long.parseLong(txnScnValue))); + } + return queryPlan; + } catch (Exception exception) { + LOGGER.error( + String.format("Failed to get the query plan with error [%s]", exception.getMessage())); + throw new RuntimeException(exception); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTTLDeleteJobMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTTLDeleteJobMapper.java index 807ad0cb2e2..ea76f3f590d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTTLDeleteJobMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTTLDeleteJobMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,11 @@ */ package org.apache.phoenix.mapreduce; +import java.io.IOException; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; @@ -29,11 +34,11 @@ import org.apache.phoenix.jdbc.PhoenixResultSet; import org.apache.phoenix.jdbc.PhoenixStatement; import org.apache.phoenix.mapreduce.util.ConnectionUtil; +import org.apache.phoenix.mapreduce.util.DefaultMultiViewJobStatusTracker; +import org.apache.phoenix.mapreduce.util.MultiViewJobStatusTracker; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.mapreduce.util.ViewInfoTracker; import org.apache.phoenix.mapreduce.util.ViewInfoWritable.ViewInfoJobState; -import org.apache.phoenix.mapreduce.util.MultiViewJobStatusTracker; -import org.apache.phoenix.mapreduce.util.DefaultMultiViewJobStatusTracker; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.types.PDataType; @@ -42,200 +47,186 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Properties; - -public class PhoenixTTLDeleteJobMapper extends Mapper { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTTLDeleteJobMapper.class); - private MultiViewJobStatusTracker multiViewJobStatusTracker; - private static final int DEFAULT_MAX_RETRIES = 3; - private static final int DEFAULT_RETRY_SLEEP_TIME_IN_MS = 10000; - - private void initMultiViewJobStatusTracker(Configuration config) throws Exception { - try { - Class defaultViewDeletionTrackerClass = DefaultMultiViewJobStatusTracker.class; - if (config.get( - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_TRACKER_CLAZZ) != null) { - LOGGER.info("Using customized tracker class : " + config.get( - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_TRACKER_CLAZZ)); - defaultViewDeletionTrackerClass = Class.forName(config.get( - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_TRACKER_CLAZZ)); - } else { - LOGGER.info("Using default tracker class "); - } - this.multiViewJobStatusTracker = (MultiViewJobStatusTracker) - defaultViewDeletionTrackerClass.newInstance(); - } catch (Exception e) { - LOGGER.error("Getting exception While initializing initMultiViewJobStatusTracker " + - "with error message " + e.getMessage()); - throw e; - } +public class PhoenixTTLDeleteJobMapper + extends Mapper { + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTTLDeleteJobMapper.class); + private MultiViewJobStatusTracker multiViewJobStatusTracker; + private static final int DEFAULT_MAX_RETRIES = 3; + private static final int DEFAULT_RETRY_SLEEP_TIME_IN_MS = 10000; + + private void initMultiViewJobStatusTracker(Configuration config) throws Exception { + try { + Class defaultViewDeletionTrackerClass = DefaultMultiViewJobStatusTracker.class; + if (config.get(PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_TRACKER_CLAZZ) != null) { + LOGGER.info("Using customized tracker class : " + + config.get(PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_TRACKER_CLAZZ)); + defaultViewDeletionTrackerClass = Class + .forName(config.get(PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_TRACKER_CLAZZ)); + } else { + LOGGER.info("Using default tracker class "); + } + this.multiViewJobStatusTracker = + (MultiViewJobStatusTracker) defaultViewDeletionTrackerClass.newInstance(); + } catch (Exception e) { + LOGGER.error("Getting exception While initializing initMultiViewJobStatusTracker " + + "with error message " + e.getMessage()); + throw e; } + } - @Override - protected void map(NullWritable key, ViewInfoTracker value, Context context) - throws IOException { - try { - final Configuration config = context.getConfiguration(); - - if (this.multiViewJobStatusTracker == null) { - initMultiViewJobStatusTracker(config); - } - - LOGGER.debug(String.format("Deleting from view %s, TenantID %s, and TTL value: %d", - value.getViewName(), value.getTenantId(), value.getTTL())); - - deleteExpiredRows(value, config, context); - - } catch (SQLException e) { - LOGGER.error("Mapper got an exception while deleting expired rows : " - + e.getMessage() ); - throw new IOException(e.getMessage(), e.getCause()); - } catch (Exception e) { - LOGGER.error("Getting IOException while running View TTL Deletion Job mapper " + - "with error : " + e.getMessage()); - throw new IOException(e.getMessage(), e.getCause()); - } - } + @Override + protected void map(NullWritable key, ViewInfoTracker value, Context context) throws IOException { + try { + final Configuration config = context.getConfiguration(); - private void deleteExpiredRows(ViewInfoTracker value, Configuration config, Context context) - throws Exception { - try (PhoenixConnection connection = - (PhoenixConnection) ConnectionUtil.getInputConnection(config)) { - if (value.getTenantId() != null && !value.getTenantId().equals("NULL")) { - Properties props = new Properties(); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, value.getTenantId()); - - try (PhoenixConnection tenantConnection = (PhoenixConnection) - DriverManager.getConnection(connection.getURL(), props)) { - deleteExpiredRows(tenantConnection, value, config, context); - } - } else { - deleteExpiredRows(connection, value, config, context); - } - } - } + if (this.multiViewJobStatusTracker == null) { + initMultiViewJobStatusTracker(config); + } + + LOGGER.debug(String.format("Deleting from view %s, TenantID %s, and TTL value: %d", + value.getViewName(), value.getTenantId(), value.getTTL())); - /* - * Each Mapper that receives a MultiPhoenixViewInputSplit will execute a DeleteMutation/Scan - * (With DELETE_TTL_EXPIRED attribute) per view for all the views and view indexes in the split. - * For each DeleteMutation, it bounded by the view start and stop keys for the region and - * TTL attributes and Delete Hint. - */ - private void deleteExpiredRows(PhoenixConnection connection, ViewInfoTracker viewInfoTracker, - Configuration config, Context context) throws Exception { - try (PhoenixStatement pstmt = - new PhoenixStatement(connection).unwrap(PhoenixStatement.class)) { - PTable pTable = connection.getTable(viewInfoTracker.getViewName()); - String deleteIfExpiredStatement = "SELECT /*+ NO_INDEX */ count(*) FROM " + - viewInfoTracker.getViewName(); - - if (viewInfoTracker.isIndexRelation()) { - pTable = connection.getTable(viewInfoTracker.getRelationName()); - deleteIfExpiredStatement = "SELECT count(*) FROM " + - viewInfoTracker.getRelationName(); - } - - String sourceTableName = pTable.getTableName().getString(); - this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, - ViewInfoJobState.INITIALIZED.getValue(), config, 0, context.getJobName()); - final QueryPlan queryPlan = pstmt.optimizeQuery(deleteIfExpiredStatement); - final Scan scan = queryPlan.getContext().getScan(); - byte[] emptyColumnFamilyName = SchemaUtil.getEmptyColumnFamily(pTable); - byte[] emptyColumnName = pTable.getEncodingScheme() == - PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS ? - QueryConstants.EMPTY_COLUMN_BYTES : - pTable.getEncodingScheme().encode( - QueryConstants.ENCODED_EMPTY_COLUMN_NAME); - - scan.setAttribute( - BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, emptyColumnFamilyName); - scan.setAttribute( - BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, emptyColumnName); - scan.setAttribute( - BaseScannerRegionObserverConstants.DELETE_PHOENIX_TTL_EXPIRED, PDataType.TRUE_BYTES); - scan.setAttribute( - BaseScannerRegionObserverConstants.TTL, - Bytes.toBytes(viewInfoTracker.getTTL())); - scan.setAttribute( - BaseScannerRegionObserverConstants.PHOENIX_TTL_SCAN_TABLE_NAME, - Bytes.toBytes(sourceTableName)); - - this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, - ViewInfoJobState.RUNNING.getValue(), config, 0, context.getJobName()); - - addingDeletionMarkWithRetries(pstmt, viewInfoTracker, config, context, - queryPlan); - } catch (Exception e) { - if (e instanceof SQLException && ((SQLException) e).getErrorCode() == - SQLExceptionCode.TABLE_UNDEFINED.getErrorCode()) { - this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, - ViewInfoJobState.DELETED.getValue(), config, 0, context.getJobName()); - } - LOGGER.error(String.format("Had an issue to process the view: %s, " + - "see error %s ", viewInfoTracker.toString(),e.getMessage())); + deleteExpiredRows(value, config, context); + + } catch (SQLException e) { + LOGGER.error("Mapper got an exception while deleting expired rows : " + e.getMessage()); + throw new IOException(e.getMessage(), e.getCause()); + } catch (Exception e) { + LOGGER.error("Getting IOException while running View TTL Deletion Job mapper " + + "with error : " + e.getMessage()); + throw new IOException(e.getMessage(), e.getCause()); + } + } + + private void deleteExpiredRows(ViewInfoTracker value, Configuration config, Context context) + throws Exception { + try (PhoenixConnection connection = + (PhoenixConnection) ConnectionUtil.getInputConnection(config)) { + if (value.getTenantId() != null && !value.getTenantId().equals("NULL")) { + Properties props = new Properties(); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, value.getTenantId()); + + try (PhoenixConnection tenantConnection = + (PhoenixConnection) DriverManager.getConnection(connection.getURL(), props)) { + deleteExpiredRows(tenantConnection, value, config, context); } + } else { + deleteExpiredRows(connection, value, config, context); + } } - - private boolean addingDeletionMarkWithRetries(PhoenixStatement stmt, - ViewInfoTracker viewInfoTracker, - Configuration config, Context context, - QueryPlan queryPlan) - throws Exception { - int retry = 0; - long startTime = System.currentTimeMillis(); - String viewInfo = viewInfoTracker.getTenantId() == null ? - viewInfoTracker.getViewName() : viewInfoTracker.getTenantId() - + "." + viewInfoTracker.getViewName(); - - while (retry < DEFAULT_MAX_RETRIES) { - try { - PhoenixResultSet rs = stmt.newResultSet( - queryPlan.iterator(), queryPlan.getProjector(), queryPlan.getContext()); - - long numberOfDeletedRows = 0; - if (rs.next()) { - numberOfDeletedRows = rs.getLong(1); - } - this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, numberOfDeletedRows, - ViewInfoJobState.SUCCEEDED.getValue(), config, - System.currentTimeMillis() - startTime, context.getJobName()); - PhoenixTTLTool.MR_COUNTER_METRICS metricsStatus = - viewInfoTracker.isIndexRelation() ? - PhoenixTTLTool.MR_COUNTER_METRICS.VIEW_INDEX_SUCCEED : - PhoenixTTLTool.MR_COUNTER_METRICS.VIEW_SUCCEED; - context.getCounter(metricsStatus).increment(1); - return true; - } catch (Exception e) { - PhoenixTTLTool.MR_COUNTER_METRICS metricsStatus = - viewInfoTracker.isIndexRelation() ? - PhoenixTTLTool.MR_COUNTER_METRICS.VIEW_INDEX_FAILED : - PhoenixTTLTool.MR_COUNTER_METRICS.VIEW_FAILED; - if (e instanceof SQLException && ((SQLException) e).getErrorCode() == - SQLExceptionCode.TABLE_UNDEFINED.getErrorCode()) { - LOGGER.info(viewInfo + " has been deleted : " + e.getMessage()); - this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, - ViewInfoJobState.DELETED.getValue(), config, 0, context.getJobName()); - context.getCounter(metricsStatus).increment(1); - return false; - } - retry++; - - if (retry == DEFAULT_MAX_RETRIES) { - LOGGER.error("Deleting " + viewInfo + " expired rows has an exception for : " - + e.getMessage()); - this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, - ViewInfoJobState.FAILED.getValue(), config, 0, context.getJobName()); - context.getCounter(metricsStatus).increment(1); - throw e; - } else { - Thread.sleep(DEFAULT_RETRY_SLEEP_TIME_IN_MS); - } - } + } + + /* + * Each Mapper that receives a MultiPhoenixViewInputSplit will execute a DeleteMutation/Scan (With + * DELETE_TTL_EXPIRED attribute) per view for all the views and view indexes in the split. For + * each DeleteMutation, it bounded by the view start and stop keys for the region and TTL + * attributes and Delete Hint. + */ + private void deleteExpiredRows(PhoenixConnection connection, ViewInfoTracker viewInfoTracker, + Configuration config, Context context) throws Exception { + try (PhoenixStatement pstmt = new PhoenixStatement(connection).unwrap(PhoenixStatement.class)) { + PTable pTable = connection.getTable(viewInfoTracker.getViewName()); + String deleteIfExpiredStatement = + "SELECT /*+ NO_INDEX */ count(*) FROM " + viewInfoTracker.getViewName(); + + if (viewInfoTracker.isIndexRelation()) { + pTable = connection.getTable(viewInfoTracker.getRelationName()); + deleteIfExpiredStatement = "SELECT count(*) FROM " + viewInfoTracker.getRelationName(); + } + + String sourceTableName = pTable.getTableName().getString(); + this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, + ViewInfoJobState.INITIALIZED.getValue(), config, 0, context.getJobName()); + final QueryPlan queryPlan = pstmt.optimizeQuery(deleteIfExpiredStatement); + final Scan scan = queryPlan.getContext().getScan(); + byte[] emptyColumnFamilyName = SchemaUtil.getEmptyColumnFamily(pTable); + byte[] emptyColumnName = + pTable.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + ? QueryConstants.EMPTY_COLUMN_BYTES + : pTable.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); + + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_FAMILY_NAME, + emptyColumnFamilyName); + scan.setAttribute(BaseScannerRegionObserverConstants.EMPTY_COLUMN_QUALIFIER_NAME, + emptyColumnName); + scan.setAttribute(BaseScannerRegionObserverConstants.DELETE_PHOENIX_TTL_EXPIRED, + PDataType.TRUE_BYTES); + scan.setAttribute(BaseScannerRegionObserverConstants.TTL, + Bytes.toBytes(viewInfoTracker.getTTL())); + scan.setAttribute(BaseScannerRegionObserverConstants.PHOENIX_TTL_SCAN_TABLE_NAME, + Bytes.toBytes(sourceTableName)); + + this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, + ViewInfoJobState.RUNNING.getValue(), config, 0, context.getJobName()); + + addingDeletionMarkWithRetries(pstmt, viewInfoTracker, config, context, queryPlan); + } catch (Exception e) { + if ( + e instanceof SQLException + && ((SQLException) e).getErrorCode() == SQLExceptionCode.TABLE_UNDEFINED.getErrorCode() + ) { + this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, + ViewInfoJobState.DELETED.getValue(), config, 0, context.getJobName()); + } + LOGGER.error(String.format("Had an issue to process the view: %s, " + "see error %s ", + viewInfoTracker.toString(), e.getMessage())); + } + } + + private boolean addingDeletionMarkWithRetries(PhoenixStatement stmt, + ViewInfoTracker viewInfoTracker, Configuration config, Context context, QueryPlan queryPlan) + throws Exception { + int retry = 0; + long startTime = System.currentTimeMillis(); + String viewInfo = viewInfoTracker.getTenantId() == null + ? viewInfoTracker.getViewName() + : viewInfoTracker.getTenantId() + "." + viewInfoTracker.getViewName(); + + while (retry < DEFAULT_MAX_RETRIES) { + try { + PhoenixResultSet rs = + stmt.newResultSet(queryPlan.iterator(), queryPlan.getProjector(), queryPlan.getContext()); + + long numberOfDeletedRows = 0; + if (rs.next()) { + numberOfDeletedRows = rs.getLong(1); + } + this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, numberOfDeletedRows, + ViewInfoJobState.SUCCEEDED.getValue(), config, System.currentTimeMillis() - startTime, + context.getJobName()); + PhoenixTTLTool.MR_COUNTER_METRICS metricsStatus = viewInfoTracker.isIndexRelation() + ? PhoenixTTLTool.MR_COUNTER_METRICS.VIEW_INDEX_SUCCEED + : PhoenixTTLTool.MR_COUNTER_METRICS.VIEW_SUCCEED; + context.getCounter(metricsStatus).increment(1); + return true; + } catch (Exception e) { + PhoenixTTLTool.MR_COUNTER_METRICS metricsStatus = viewInfoTracker.isIndexRelation() + ? PhoenixTTLTool.MR_COUNTER_METRICS.VIEW_INDEX_FAILED + : PhoenixTTLTool.MR_COUNTER_METRICS.VIEW_FAILED; + if ( + e instanceof SQLException + && ((SQLException) e).getErrorCode() == SQLExceptionCode.TABLE_UNDEFINED.getErrorCode() + ) { + LOGGER.info(viewInfo + " has been deleted : " + e.getMessage()); + this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, + ViewInfoJobState.DELETED.getValue(), config, 0, context.getJobName()); + context.getCounter(metricsStatus).increment(1); + return false; + } + retry++; + + if (retry == DEFAULT_MAX_RETRIES) { + LOGGER.error( + "Deleting " + viewInfo + " expired rows has an exception for : " + e.getMessage()); + this.multiViewJobStatusTracker.updateJobStatus(viewInfoTracker, 0, + ViewInfoJobState.FAILED.getValue(), config, 0, context.getJobName()); + context.getCounter(metricsStatus).increment(1); + throw e; + } else { + Thread.sleep(DEFAULT_RETRY_SLEEP_TIME_IN_MS); } - return false; + } } -} \ No newline at end of file + return false; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTTLTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTTLTool.java index b8d6b8a3261..abd48a87794 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTTLTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTTLTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,13 +17,9 @@ */ package org.apache.phoenix.mapreduce; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; +import java.sql.Connection; +import java.util.Properties; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -37,287 +33,294 @@ import org.apache.phoenix.mapreduce.util.ConnectionUtil; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.Connection; -import java.util.Properties; - public class PhoenixTTLTool extends Configured implements Tool { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTTLTool.class); - - public static enum MR_COUNTER_METRICS { - VIEW_FAILED, - VIEW_SUCCEED, - VIEW_INDEX_FAILED, - VIEW_INDEX_SUCCEED - } - - public static final String DELETE_ALL_VIEWS = "DELETE_ALL_VIEWS"; - public static final int DEFAULT_MAPPER_SPLIT_SIZE = 10; - public static final int DEFAULT_QUERY_BATCH_SIZE = 100; - - private static final Option DELETE_ALL_VIEWS_OPTION = new Option("a", "all", false, - "Delete all views from all tables."); - private static final Option VIEW_NAME_OPTION = new Option("v", "view", true, - "Delete Phoenix View Name"); - private static final Option TENANT_ID_OPTION = new Option("i", "id", true, - "Delete an view based on the tenant id."); - private static final Option JOB_PRIORITY_OPTION = new Option("p", "job-priority", true, - "Define job priority from 0(highest) to 4"); - private static final Option SPLIT_SIZE_OPTION = new Option("s", "split-size-per-mapper", true, - "Define split size for each mapper."); - private static final Option BATCH_SIZE_OPTION = new Option("b", "batch-size-for-query-more", true, - "Define batch size for fetching views metadata from syscat."); - private static final Option RUN_FOREGROUND_OPTION = new Option("runfg", - "run-foreground", false, "If specified, runs PhoenixTTLTool " + - "in Foreground. Default - Runs the build in background"); - - private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); - - private Configuration configuration; - private Connection connection; - private String viewName; - private String tenantId; - private String jobName; - private boolean isDeletingAllViews; - private JobPriority jobPriority; - private boolean isForeground; - private int splitSize; - private int batchSize; - private Job job; - - public void parseArgs(String[] args) { - CommandLine cmdLine; - try { - cmdLine = parseOptions(args); - } catch (IllegalStateException e) { - printHelpAndExit(e.getMessage(), getOptions()); - throw e; - } - - if (getConf() == null) { - setConf(HBaseConfiguration.create()); - } - - if (cmdLine.hasOption(DELETE_ALL_VIEWS_OPTION.getOpt())) { - this.isDeletingAllViews = true; - } else if (cmdLine.hasOption(VIEW_NAME_OPTION.getOpt())) { - viewName = cmdLine.getOptionValue(VIEW_NAME_OPTION.getOpt()); - this.isDeletingAllViews = false; - } - - if (cmdLine.hasOption(TENANT_ID_OPTION.getOpt())) { - tenantId = cmdLine.getOptionValue((TENANT_ID_OPTION.getOpt())); - } - - if (cmdLine.hasOption(SPLIT_SIZE_OPTION.getOpt())) { - splitSize = Integer.parseInt(cmdLine.getOptionValue(SPLIT_SIZE_OPTION.getOpt())); - } else { - splitSize = DEFAULT_MAPPER_SPLIT_SIZE; - } - - if (cmdLine.hasOption(BATCH_SIZE_OPTION.getOpt())) { - batchSize = Integer.parseInt(cmdLine.getOptionValue(SPLIT_SIZE_OPTION.getOpt())); - } else { - batchSize = DEFAULT_QUERY_BATCH_SIZE; - } - - isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); - } - - public String getJobPriority() { - return this.jobPriority.toString(); + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTTLTool.class); + + public static enum MR_COUNTER_METRICS { + VIEW_FAILED, + VIEW_SUCCEED, + VIEW_INDEX_FAILED, + VIEW_INDEX_SUCCEED + } + + public static final String DELETE_ALL_VIEWS = "DELETE_ALL_VIEWS"; + public static final int DEFAULT_MAPPER_SPLIT_SIZE = 10; + public static final int DEFAULT_QUERY_BATCH_SIZE = 100; + + private static final Option DELETE_ALL_VIEWS_OPTION = + new Option("a", "all", false, "Delete all views from all tables."); + private static final Option VIEW_NAME_OPTION = + new Option("v", "view", true, "Delete Phoenix View Name"); + private static final Option TENANT_ID_OPTION = + new Option("i", "id", true, "Delete an view based on the tenant id."); + private static final Option JOB_PRIORITY_OPTION = + new Option("p", "job-priority", true, "Define job priority from 0(highest) to 4"); + private static final Option SPLIT_SIZE_OPTION = + new Option("s", "split-size-per-mapper", true, "Define split size for each mapper."); + private static final Option BATCH_SIZE_OPTION = new Option("b", "batch-size-for-query-more", true, + "Define batch size for fetching views metadata from syscat."); + private static final Option RUN_FOREGROUND_OPTION = new Option("runfg", "run-foreground", false, + "If specified, runs PhoenixTTLTool " + "in Foreground. Default - Runs the build in background"); + + private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); + + private Configuration configuration; + private Connection connection; + private String viewName; + private String tenantId; + private String jobName; + private boolean isDeletingAllViews; + private JobPriority jobPriority; + private boolean isForeground; + private int splitSize; + private int batchSize; + private Job job; + + public void parseArgs(String[] args) { + CommandLine cmdLine; + try { + cmdLine = parseOptions(args); + } catch (IllegalStateException e) { + printHelpAndExit(e.getMessage(), getOptions()); + throw e; } - private JobPriority getJobPriority(CommandLine cmdLine) { - String jobPriorityOption = cmdLine.getOptionValue(JOB_PRIORITY_OPTION.getOpt()); - if (jobPriorityOption == null) { - return JobPriority.NORMAL; - } - - switch (jobPriorityOption) { - case "0" : return JobPriority.VERY_HIGH; - case "1" : return JobPriority.HIGH; - case "2" : return JobPriority.NORMAL; - case "3" : return JobPriority.LOW; - case "4" : return JobPriority.VERY_LOW; - default: - return JobPriority.NORMAL; - } + if (getConf() == null) { + setConf(HBaseConfiguration.create()); } - public Job getJob() { - return this.job; + if (cmdLine.hasOption(DELETE_ALL_VIEWS_OPTION.getOpt())) { + this.isDeletingAllViews = true; + } else if (cmdLine.hasOption(VIEW_NAME_OPTION.getOpt())) { + viewName = cmdLine.getOptionValue(VIEW_NAME_OPTION.getOpt()); + this.isDeletingAllViews = false; } - public boolean isDeletingAllViews() { - return this.isDeletingAllViews; + if (cmdLine.hasOption(TENANT_ID_OPTION.getOpt())) { + tenantId = cmdLine.getOptionValue((TENANT_ID_OPTION.getOpt())); } - public String getTenantId() { - return this.tenantId; + if (cmdLine.hasOption(SPLIT_SIZE_OPTION.getOpt())) { + splitSize = Integer.parseInt(cmdLine.getOptionValue(SPLIT_SIZE_OPTION.getOpt())); + } else { + splitSize = DEFAULT_MAPPER_SPLIT_SIZE; } - public String getViewName() { - return this.viewName; + if (cmdLine.hasOption(BATCH_SIZE_OPTION.getOpt())) { + batchSize = Integer.parseInt(cmdLine.getOptionValue(SPLIT_SIZE_OPTION.getOpt())); + } else { + batchSize = DEFAULT_QUERY_BATCH_SIZE; } - public int getSplitSize() { - return this.splitSize; - } - - public int getBatchSize() { - return this.batchSize; - } + isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); + } - public CommandLine parseOptions(String[] args) { - final Options options = getOptions(); - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("Error parsing command line options: " + e.getMessage(), - options); - } - - if (!cmdLine.hasOption(DELETE_ALL_VIEWS_OPTION.getOpt()) && - !cmdLine.hasOption(VIEW_NAME_OPTION.getOpt()) && - !cmdLine.hasOption(TENANT_ID_OPTION.getOpt())) { - throw new IllegalStateException("No deletion job is specified, " + - "please indicate deletion job for ALL/VIEW/TENANT level"); - } - - if (cmdLine.hasOption(HELP_OPTION.getOpt())) { - printHelpAndExit(options, 0); - } - - this.jobPriority = getJobPriority(cmdLine); - - return cmdLine; - } + public String getJobPriority() { + return this.jobPriority.toString(); + } - private Options getOptions() { - final Options options = new Options(); - options.addOption(DELETE_ALL_VIEWS_OPTION); - options.addOption(VIEW_NAME_OPTION); - options.addOption(TENANT_ID_OPTION); - options.addOption(HELP_OPTION); - options.addOption(JOB_PRIORITY_OPTION); - options.addOption(RUN_FOREGROUND_OPTION); - options.addOption(SPLIT_SIZE_OPTION); - options.addOption(BATCH_SIZE_OPTION); - - return options; + private JobPriority getJobPriority(CommandLine cmdLine) { + String jobPriorityOption = cmdLine.getOptionValue(JOB_PRIORITY_OPTION.getOpt()); + if (jobPriorityOption == null) { + return JobPriority.NORMAL; } - private void printHelpAndExit(String errorMessage, Options options) { - System.err.println(errorMessage); - LOGGER.error(errorMessage); - printHelpAndExit(options, 1); + switch (jobPriorityOption) { + case "0": + return JobPriority.VERY_HIGH; + case "1": + return JobPriority.HIGH; + case "2": + return JobPriority.NORMAL; + case "3": + return JobPriority.LOW; + case "4": + return JobPriority.VERY_LOW; + default: + return JobPriority.NORMAL; } - - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); + } + + public Job getJob() { + return this.job; + } + + public boolean isDeletingAllViews() { + return this.isDeletingAllViews; + } + + public String getTenantId() { + return this.tenantId; + } + + public String getViewName() { + return this.viewName; + } + + public int getSplitSize() { + return this.splitSize; + } + + public int getBatchSize() { + return this.batchSize; + } + + public CommandLine parseOptions(String[] args) { + final Options options = getOptions(); + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); } - public void setJobName(String jobName) { - this.jobName = jobName; + if ( + !cmdLine.hasOption(DELETE_ALL_VIEWS_OPTION.getOpt()) + && !cmdLine.hasOption(VIEW_NAME_OPTION.getOpt()) + && !cmdLine.hasOption(TENANT_ID_OPTION.getOpt()) + ) { + throw new IllegalStateException("No deletion job is specified, " + + "please indicate deletion job for ALL/VIEW/TENANT level"); } - public String getJobName() { - if (this.jobName == null) { - String jobName; - if (this.isDeletingAllViews) { - jobName = DELETE_ALL_VIEWS; - } else if (this.getViewName() != null) { - jobName = this.getViewName(); - } else { - jobName = this.tenantId; - } - this.jobName = "PhoenixTTLTool-" + jobName + "-"; - } - - return this.jobName; + if (cmdLine.hasOption(HELP_OPTION.getOpt())) { + printHelpAndExit(options, 0); } - public void setPhoenixTTLJobInputConfig(Configuration configuration) { - if (this.isDeletingAllViews) { - configuration.set(PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_ALL_VIEWS, - DELETE_ALL_VIEWS); - } else if (this.getViewName() != null) { - configuration.set(PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW, - this.viewName); - } - - if (this.tenantId != null) { - configuration.set(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID, this.tenantId); - } + this.jobPriority = getJobPriority(cmdLine); + + return cmdLine; + } + + private Options getOptions() { + final Options options = new Options(); + options.addOption(DELETE_ALL_VIEWS_OPTION); + options.addOption(VIEW_NAME_OPTION); + options.addOption(TENANT_ID_OPTION); + options.addOption(HELP_OPTION); + options.addOption(JOB_PRIORITY_OPTION); + options.addOption(RUN_FOREGROUND_OPTION); + options.addOption(SPLIT_SIZE_OPTION); + options.addOption(BATCH_SIZE_OPTION); + + return options; + } + + private void printHelpAndExit(String errorMessage, Options options) { + System.err.println(errorMessage); + LOGGER.error(errorMessage); + printHelpAndExit(options, 1); + } + + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } + + public void setJobName(String jobName) { + this.jobName = jobName; + } + + public String getJobName() { + if (this.jobName == null) { + String jobName; + if (this.isDeletingAllViews) { + jobName = DELETE_ALL_VIEWS; + } else if (this.getViewName() != null) { + jobName = this.getViewName(); + } else { + jobName = this.tenantId; + } + this.jobName = "PhoenixTTLTool-" + jobName + "-"; } - public void configureJob() throws Exception { - this.job = Job.getInstance(getConf(),getJobName()); - PhoenixMapReduceUtil.setInput(job, this); - - job.setJarByClass(PhoenixTTLTool.class); - job.setMapperClass(PhoenixTTLDeleteJobMapper.class); - job.setMapOutputKeyClass(NullWritable.class); - job.setMapOutputValueClass(NullWritable.class); - job.setOutputFormatClass(NullOutputFormat.class); - job.setNumReduceTasks(0); - job.setPriority(this.jobPriority); + return this.jobName; + } - TableMapReduceUtil.addDependencyJars(job); - LOGGER.info("PhoenixTTLTool is running for " + job.getJobName()); + public void setPhoenixTTLJobInputConfig(Configuration configuration) { + if (this.isDeletingAllViews) { + configuration.set(PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_ALL_VIEWS, + DELETE_ALL_VIEWS); + } else if (this.getViewName() != null) { + configuration.set(PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW, + this.viewName); } - public int runJob() { - try { - if (isForeground) { - LOGGER.info("Running PhoenixTTLTool in foreground. " + - "Runs full table scans. This may take a long time!"); - return (job.waitForCompletion(true)) ? 0 : 1; - } else { - LOGGER.info("Running PhoenixTTLTool in Background - Submit async and exit"); - job.submit(); - return 0; - } - } catch (Exception e) { - LOGGER.error("Caught exception " + e + " trying to run PhoenixTTLTool."); - return 1; - } + if (this.tenantId != null) { + configuration.set(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID, this.tenantId); } - - @Override - public int run(String[] args) throws Exception { - connection = null; - int ret; - try { - parseArgs(args); - configuration = HBaseConfiguration.addHbaseResources(getConf()); - connection = ConnectionUtil.getInputConnection(configuration, new Properties()); - configureJob(); - TableMapReduceUtil.initCredentials(job); - ret = runJob(); - } catch (Exception e) { - printHelpAndExit(e.getMessage(), getOptions()); - return -1; - } finally { - if (connection != null) { - connection.close(); - } - } - return ret; + } + + public void configureJob() throws Exception { + this.job = Job.getInstance(getConf(), getJobName()); + PhoenixMapReduceUtil.setInput(job, this); + + job.setJarByClass(PhoenixTTLTool.class); + job.setMapperClass(PhoenixTTLDeleteJobMapper.class); + job.setMapOutputKeyClass(NullWritable.class); + job.setMapOutputValueClass(NullWritable.class); + job.setOutputFormatClass(NullOutputFormat.class); + job.setNumReduceTasks(0); + job.setPriority(this.jobPriority); + + TableMapReduceUtil.addDependencyJars(job); + LOGGER.info("PhoenixTTLTool is running for " + job.getJobName()); + } + + public int runJob() { + try { + if (isForeground) { + LOGGER.info("Running PhoenixTTLTool in foreground. " + + "Runs full table scans. This may take a long time!"); + return (job.waitForCompletion(true)) ? 0 : 1; + } else { + LOGGER.info("Running PhoenixTTLTool in Background - Submit async and exit"); + job.submit(); + return 0; + } + } catch (Exception e) { + LOGGER.error("Caught exception " + e + " trying to run PhoenixTTLTool."); + return 1; } - - public static void main(final String[] args) throws Exception { - int result = ToolRunner.run(new PhoenixTTLTool(), args); - System.exit(result); + } + + @Override + public int run(String[] args) throws Exception { + connection = null; + int ret; + try { + parseArgs(args); + configuration = HBaseConfiguration.addHbaseResources(getConf()); + connection = ConnectionUtil.getInputConnection(configuration, new Properties()); + configureJob(); + TableMapReduceUtil.initCredentials(job); + ret = runJob(); + } catch (Exception e) { + printHelpAndExit(e.getMessage(), getOptions()); + return -1; + } finally { + if (connection != null) { + connection.close(); + } } -} \ No newline at end of file + return ret; + } + + public static void main(final String[] args) throws Exception { + int result = ToolRunner.run(new PhoenixTTLTool(), args); + System.exit(result); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java index 9f8080fe7a7..09a59f21cb2 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/PhoenixTextInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,8 +32,8 @@ import org.slf4j.LoggerFactory; /** - * Wrapper around TextInputFormat which can ignore the first line in the first InputSplit - * for a file. + * Wrapper around TextInputFormat which can ignore the first line in the first InputSplit for a + * file. */ public class PhoenixTextInputFormat extends TextInputFormat { public static final String SKIP_HEADER_KEY = "phoenix.input.format.skip.header"; @@ -43,21 +43,24 @@ public static void setSkipHeader(Configuration conf) { } @Override - public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) { - RecordReader rr = super.createRecordReader(split, context); - + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) { + RecordReader rr = super.createRecordReader(split, context); + return new PhoenixLineRecordReader((LineRecordReader) rr); } - public static class PhoenixLineRecordReader extends RecordReader { + public static class PhoenixLineRecordReader extends RecordReader { private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixLineRecordReader.class); private final LineRecordReader rr; + private PhoenixLineRecordReader(LineRecordReader rr) { this.rr = rr; } @Override - public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException, InterruptedException { + public void initialize(InputSplit genericSplit, TaskAttemptContext context) + throws IOException, InterruptedException { rr.initialize(genericSplit, context); final Configuration conf = context.getConfiguration(); final FileSplit split = (FileSplit) genericSplit; diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/RegexBulkLoadTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/RegexBulkLoadTool.java index c29c8edbf34..65db47ffc4a 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/RegexBulkLoadTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/RegexBulkLoadTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,55 +20,57 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.ToolRunner; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; import org.apache.phoenix.util.ColumnInfo; /** - * A tool for running MapReduce-based ingests of input data based on regex. - * Lists are converted into typed ARRAYS. + * A tool for running MapReduce-based ingests of input data based on regex. Lists are converted into + * typed ARRAYS. */ public class RegexBulkLoadTool extends AbstractBulkLoadTool { - static final Option REGEX_OPT = new Option("r", "regex", true, "Input regex String, defaults is (.*)"); - static final Option ARRAY_DELIMITER_OPT = new Option("a", "array-delimiter", true, "Array element delimiter (optional), defaults is ','"); + static final Option REGEX_OPT = + new Option("r", "regex", true, "Input regex String, defaults is (.*)"); + static final Option ARRAY_DELIMITER_OPT = + new Option("a", "array-delimiter", true, "Array element delimiter (optional), defaults is ','"); - @Override - protected Options getOptions() { - Options options = super.getOptions(); - options.addOption(REGEX_OPT); - options.addOption(ARRAY_DELIMITER_OPT); - return options; - } + @Override + protected Options getOptions() { + Options options = super.getOptions(); + options.addOption(REGEX_OPT); + options.addOption(ARRAY_DELIMITER_OPT); + return options; + } - @Override - protected void configureOptions(CommandLine cmdLine, List importColumns, - Configuration conf) throws SQLException { - if (cmdLine.hasOption(REGEX_OPT.getOpt())) { - String regexString = cmdLine.getOptionValue(REGEX_OPT.getOpt()); - conf.set(RegexToKeyValueMapper.REGEX_CONFKEY, regexString); - } - - if (cmdLine.hasOption(ARRAY_DELIMITER_OPT.getOpt())) { - String arraySeparator = cmdLine.getOptionValue(ARRAY_DELIMITER_OPT.getOpt()); - conf.set(RegexToKeyValueMapper.ARRAY_DELIMITER_CONFKEY, arraySeparator); - } + @Override + protected void configureOptions(CommandLine cmdLine, List importColumns, + Configuration conf) throws SQLException { + if (cmdLine.hasOption(REGEX_OPT.getOpt())) { + String regexString = cmdLine.getOptionValue(REGEX_OPT.getOpt()); + conf.set(RegexToKeyValueMapper.REGEX_CONFKEY, regexString); } - @Override - protected void setupJob(Job job) { - // Allow overriding the job jar setting by using a -D system property at startup - if (job.getJar() == null) { - job.setJarByClass(RegexToKeyValueMapper.class); - } - job.setMapperClass(RegexToKeyValueMapper.class); + if (cmdLine.hasOption(ARRAY_DELIMITER_OPT.getOpt())) { + String arraySeparator = cmdLine.getOptionValue(ARRAY_DELIMITER_OPT.getOpt()); + conf.set(RegexToKeyValueMapper.ARRAY_DELIMITER_CONFKEY, arraySeparator); } + } - public static void main(String[] args) throws Exception { - ToolRunner.run(new RegexBulkLoadTool(), args); + @Override + protected void setupJob(Job job) { + // Allow overriding the job jar setting by using a -D system property at startup + if (job.getJar() == null) { + job.setJarByClass(RegexToKeyValueMapper.class); } + job.setMapperClass(RegexToKeyValueMapper.class); + } + + public static void main(String[] args) throws Exception { + ToolRunner.run(new RegexBulkLoadTool(), args); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java index 3621c995a4a..33f3d382846 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,110 +28,107 @@ import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PTimestamp; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.UpsertExecutor; import org.apache.phoenix.util.regex.RegexUpsertExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - /** - * MapReduce mapper that converts input lines into KeyValues based on the Regex that can be written to HFiles. - * - * KeyValues are produced by executing UPSERT statements on a Phoenix connection and then - * extracting the created KeyValues and rolling back the statement execution before it is - * committed to HBase. + * MapReduce mapper that converts input lines into KeyValues based on the Regex that can be written + * to HFiles. KeyValues are produced by executing UPSERT statements on a Phoenix connection and then + * extracting the created KeyValues and rolling back the statement execution before it is committed + * to HBase. */ public class RegexToKeyValueMapper extends FormatToBytesWritableMapper> { - protected static final Logger LOGGER = LoggerFactory.getLogger(RegexToKeyValueMapper.class); + protected static final Logger LOGGER = LoggerFactory.getLogger(RegexToKeyValueMapper.class); - /** Configuration key for the regex */ - public static final String REGEX_CONFKEY = "phoenix.mapreduce.import.regex"; + /** Configuration key for the regex */ + public static final String REGEX_CONFKEY = "phoenix.mapreduce.import.regex"; - /** Configuration key for the array element delimiter for input arrays */ - public static final String ARRAY_DELIMITER_CONFKEY = "phoenix.mapreduce.import.arraydelimiter"; - - /** Configuration key for default array delimiter */ - public static final String ARRAY_DELIMITER_DEFAULT = ","; - - private LineParser> lineParser; - - @Override - protected LineParser> getLineParser() { - return lineParser; - } + /** Configuration key for the array element delimiter for input arrays */ + public static final String ARRAY_DELIMITER_CONFKEY = "phoenix.mapreduce.import.arraydelimiter"; - @Override - protected void setup(Context context) throws IOException, InterruptedException { - super.setup(context); - } + /** Configuration key for default array delimiter */ + public static final String ARRAY_DELIMITER_DEFAULT = ","; - @VisibleForTesting - @Override - protected UpsertExecutor, ?> buildUpsertExecutor(Configuration conf) { - String tableName = conf.get(TABLE_NAME_CONFKEY); - Preconditions.checkNotNull(tableName, "table name is not configured"); - - String regex = conf.get(REGEX_CONFKEY); - Preconditions.checkNotNull(regex, "regex is not configured"); - - List columnInfoList = buildColumnInfoList(conf); - - String arraySeparator = conf.get(ARRAY_DELIMITER_CONFKEY, ARRAY_DELIMITER_DEFAULT); - - lineParser = new RegexLineParser(regex, columnInfoList, arraySeparator); - - return new RegexUpsertExecutor(conn, tableName, columnInfoList, upsertListener); + private LineParser> lineParser; + + @Override + protected LineParser> getLineParser() { + return lineParser; + } + + @Override + protected void setup(Context context) throws IOException, InterruptedException { + super.setup(context); + } + + @VisibleForTesting + @Override + protected UpsertExecutor, ?> buildUpsertExecutor(Configuration conf) { + String tableName = conf.get(TABLE_NAME_CONFKEY); + Preconditions.checkNotNull(tableName, "table name is not configured"); + + String regex = conf.get(REGEX_CONFKEY); + Preconditions.checkNotNull(regex, "regex is not configured"); + + List columnInfoList = buildColumnInfoList(conf); + + String arraySeparator = conf.get(ARRAY_DELIMITER_CONFKEY, ARRAY_DELIMITER_DEFAULT); + + lineParser = new RegexLineParser(regex, columnInfoList, arraySeparator); + + return new RegexUpsertExecutor(conn, tableName, columnInfoList, upsertListener); + } + + /** + * Parses a single input line with regex, returning a {@link Map} objects. + */ + @VisibleForTesting + static class RegexLineParser implements LineParser> { + private Pattern inputPattern; + private List columnInfoList; + private String arraySeparator; + + public RegexLineParser(String regex, List columnInfo, String arraySep) { + inputPattern = Pattern.compile(regex); + columnInfoList = columnInfo; + arraySeparator = arraySep; } /** - * Parses a single input line with regex, returning a {@link Map} objects. + * based on the regex and input, providing mapping between schema and input */ - @VisibleForTesting - static class RegexLineParser implements LineParser> { - private Pattern inputPattern; - private List columnInfoList; - private String arraySeparator; - - public RegexLineParser(String regex, List columnInfo, String arraySep) { - inputPattern = Pattern.compile(regex); - columnInfoList = columnInfo; - arraySeparator = arraySep; - } - - /** - * based on the regex and input, providing mapping between schema and input - */ - @Override - public Map parse(String input) throws IOException { - Map data = new HashMap<>(); - Matcher m = inputPattern.matcher(input); - if (m.groupCount() != columnInfoList.size()) { - LOGGER.debug(String.format("based on the regex and input, input fileds %s size " + - "doesn't match the table columns %s size", - m.groupCount(), columnInfoList.size())); - return data; - } - - if (m.find( )) { - for (int i = 0; i < columnInfoList.size(); i++) { - ColumnInfo columnInfo = columnInfoList.get(i); - String colName = columnInfo.getColumnName(); - String value = m.group(i + 1); - PDataType pDataType = PDataType.fromTypeId(columnInfo.getSqlType()); - if (pDataType.isArrayType()) { - data.put(colName, Arrays.asList(value.split(arraySeparator))); - } else if (pDataType.isCoercibleTo(PTimestamp.INSTANCE)) { - data.put(colName, value); - } else { - data.put(colName, pDataType.toObject(value)); - } - } - } - return data; + @Override + public Map parse(String input) throws IOException { + Map data = new HashMap<>(); + Matcher m = inputPattern.matcher(input); + if (m.groupCount() != columnInfoList.size()) { + LOGGER.debug(String.format("based on the regex and input, input fileds %s size " + + "doesn't match the table columns %s size", m.groupCount(), columnInfoList.size())); + return data; + } + + if (m.find()) { + for (int i = 0; i < columnInfoList.size(); i++) { + ColumnInfo columnInfo = columnInfoList.get(i); + String colName = columnInfo.getColumnName(); + String value = m.group(i + 1); + PDataType pDataType = PDataType.fromTypeId(columnInfo.getSqlType()); + if (pDataType.isArrayType()) { + data.put(colName, Arrays.asList(value.split(arraySeparator))); + } else if (pDataType.isCoercibleTo(PTimestamp.INSTANCE)) { + data.put(colName, value); + } else { + data.put(colName, pDataType.toObject(value)); + } } + } + return data; } -} \ No newline at end of file + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java index b050ab74988..3dfce59b0ae 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TableRowkeyPair.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,114 +26,111 @@ import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.io.WritableUtils; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - - /** * A WritableComparable to hold the table name and the rowkey. */ public class TableRowkeyPair implements WritableComparable { - /* The qualified table name */ - private String tableName; - - /* The rowkey for the record */ - private ImmutableBytesWritable rowkey; - - /** - * Default constructor - */ - public TableRowkeyPair() { - super(); - } - - public TableRowkeyPair(String tableName, ImmutableBytesWritable rowkey) { - super(); - Preconditions.checkNotNull(tableName); - Preconditions.checkNotNull(rowkey); - this.tableName = tableName; - this.rowkey = rowkey; - } - - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public ImmutableBytesWritable getRowkey() { - return rowkey; + /* The qualified table name */ + private String tableName; + + /* The rowkey for the record */ + private ImmutableBytesWritable rowkey; + + /** + * Default constructor + */ + public TableRowkeyPair() { + super(); + } + + public TableRowkeyPair(String tableName, ImmutableBytesWritable rowkey) { + super(); + Preconditions.checkNotNull(tableName); + Preconditions.checkNotNull(rowkey); + this.tableName = tableName; + this.rowkey = rowkey; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public ImmutableBytesWritable getRowkey() { + return rowkey; + } + + public void setRowkey(ImmutableBytesWritable rowkey) { + this.rowkey = rowkey; + } + + @Override + public void readFields(DataInput input) throws IOException { + tableName = WritableUtils.readString(input); + rowkey = new ImmutableBytesWritable(); + rowkey.readFields(input); + } + + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeString(output, tableName); + rowkey.write(output); + } + + @Override + public int hashCode() { + int result = this.tableName.hashCode(); + result = 31 * result + this.rowkey.hashCode(); + return result; + } + + @Override + public int compareTo(TableRowkeyPair other) { + String otherTableName = other.getTableName(); + if (this.tableName.equals(otherTableName)) { + return this.rowkey.compareTo(other.getRowkey()); + } else { + return this.tableName.compareTo(otherTableName); } + } - public void setRowkey(ImmutableBytesWritable rowkey) { - this.rowkey = rowkey; - } - - @Override - public void readFields(DataInput input) throws IOException { - tableName = WritableUtils.readString(input); - rowkey = new ImmutableBytesWritable(); - rowkey.readFields(input); - } + /** Comparator for TableRowkeyPair. */ + public static class Comparator extends WritableComparator { - @Override - public void write(DataOutput output) throws IOException { - WritableUtils.writeString(output,tableName); - rowkey.write(output); - } - - @Override - public int hashCode() { - int result = this.tableName.hashCode(); - result = 31 * result + this.rowkey.hashCode(); - return result; + public Comparator() { + super(TableRowkeyPair.class); } @Override - public int compareTo(TableRowkeyPair other) { - String otherTableName = other.getTableName(); - if(this.tableName.equals(otherTableName)) { - return this.rowkey.compareTo(other.getRowkey()); - } else { - return this.tableName.compareTo(otherTableName); + public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { + try { + // Compare table names + int strL1 = readInt(b1, s1); + int strL2 = readInt(b2, s2); + int cmp = compareBytes(b1, s1 + Bytes.SIZEOF_INT, strL1, b2, s2 + Bytes.SIZEOF_INT, strL2); + if (cmp != 0) { + return cmp; } + // Compare row keys + int strL3 = readInt(b1, s1 + Bytes.SIZEOF_INT + strL1); + int strL4 = readInt(b2, s2 + Bytes.SIZEOF_INT + strL2); + int i = compareBytes(b1, s1 + Bytes.SIZEOF_INT * 2 + strL1, strL3, b2, + s2 + Bytes.SIZEOF_INT * 2 + strL2, strL4); + return i; + } catch (Exception ex) { + throw new IllegalArgumentException(ex); + } } + } - /** Comparator for TableRowkeyPair. */ - public static class Comparator extends WritableComparator { - - public Comparator() { - super(TableRowkeyPair.class); - } - - @Override - public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { - try { - // Compare table names - int strL1 = readInt(b1, s1); - int strL2 = readInt(b2, s2); - int cmp = compareBytes(b1, s1 + Bytes.SIZEOF_INT, strL1, b2, s2 + Bytes.SIZEOF_INT, strL2); - if (cmp != 0) { - return cmp; - } - // Compare row keys - int strL3 = readInt(b1, s1 + Bytes.SIZEOF_INT + strL1); - int strL4 = readInt(b2, s2 + Bytes.SIZEOF_INT + strL2); - int i = compareBytes(b1, s1 + Bytes.SIZEOF_INT*2 + strL1, strL3, b2, s2 - + Bytes.SIZEOF_INT*2 + strL2, strL4); - return i; - } catch(Exception ex) { - throw new IllegalArgumentException(ex); - } - } - } - - static { - WritableComparator.define(TableRowkeyPair.class, new Comparator()); - } + static { + WritableComparator.define(TableRowkeyPair.class, new Comparator()); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TargetTableRef.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TargetTableRef.java index dc867378c89..44ea91d1e22 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TargetTableRef.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TargetTableRef.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,52 +19,51 @@ import java.util.Map; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; + import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; - /** - * Represents the logical and physical name of a single table to which data is to be loaded. - * - * This class exists to allow for the difference between HBase physical table names and - * Phoenix logical table names. + * Represents the logical and physical name of a single table to which data is to be loaded. This + * class exists to allow for the difference between HBase physical table names and Phoenix logical + * table names. */ public class TargetTableRef { - @JsonProperty - private final String logicalName; + @JsonProperty + private final String logicalName; - @JsonProperty - private final String physicalName; + @JsonProperty + private final String physicalName; - @JsonProperty - private Map configuration = Maps.newHashMap(); + @JsonProperty + private Map configuration = Maps.newHashMap(); - public TargetTableRef(String name) { - this(name, name); - } + public TargetTableRef(String name) { + this(name, name); + } - @JsonCreator - public TargetTableRef(@JsonProperty("logicalName") String logicalName, - @JsonProperty("physicalName") String physicalName) { - this.logicalName = logicalName; - this.physicalName = physicalName; - } + @JsonCreator + public TargetTableRef(@JsonProperty("logicalName") String logicalName, + @JsonProperty("physicalName") String physicalName) { + this.logicalName = logicalName; + this.physicalName = physicalName; + } - public String getLogicalName() { - return logicalName; - } + public String getLogicalName() { + return logicalName; + } - public String getPhysicalName() { - return physicalName; - } + public String getPhysicalName() { + return physicalName; + } - public Map getConfiguration() { - return configuration; - } + public Map getConfiguration() { + return configuration; + } - public void setConfiguration(Map configuration) { - this.configuration = configuration; - } + public void setConfiguration(Map configuration) { + this.configuration = configuration; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TargetTableRefFunctions.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TargetTableRefFunctions.java index 9985379e576..f62170cf4d0 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TargetTableRefFunctions.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/bulkload/TargetTableRefFunctions.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,82 +26,86 @@ import org.apache.phoenix.util.JacksonUtil; /** - * Utility functions to get/put json. - * - */ + * Utility functions to get/put json. + */ public class TargetTableRefFunctions { - public static final Function TO_JSON = new Function() { - - @Override - public String apply(TargetTableRef input) { - try { - return JacksonUtil.getObjectWriter().writeValueAsString(input); - } catch (IOException e) { - throw new RuntimeException(e); - } - - } - }; - - public static final Function FROM_JSON = new Function() { - - @Override - public TargetTableRef apply(String json) { - try { - return JacksonUtil.getObjectReader(TargetTableRef.class).readValue(json); - } catch (IOException e) { - throw new RuntimeException(e); - } - - } - }; - - public static final Function,String> NAMES_TO_JSON = new Function,String>() { - - @Override - public String apply(List input) { - try { - List tableNames = Lists.newArrayListWithCapacity(input.size()); - for(TargetTableRef table : input) { - tableNames.add(table.getPhysicalName()); - } - return JacksonUtil.getObjectWriter().writeValueAsString(tableNames); - } catch (IOException e) { - throw new RuntimeException(e); - } - - } - }; - - public static final Function,String> LOGICAL_NAMES_TO_JSON = new Function,String>() { - - @Override - public String apply(List input) { - try { - List tableNames = Lists.newArrayListWithCapacity(input.size()); - for(TargetTableRef table : input) { - tableNames.add(table.getLogicalName()); - } - return JacksonUtil.getObjectWriter().writeValueAsString(tableNames); - } catch (IOException e) { - throw new RuntimeException(e); - } + public static final Function TO_JSON = + new Function() { + @Override + public String apply(TargetTableRef input) { + try { + return JacksonUtil.getObjectWriter().writeValueAsString(input); + } catch (IOException e) { + throw new RuntimeException(e); } + + } }; - public static final Function> NAMES_FROM_JSON = new Function>() { + public static final Function FROM_JSON = + new Function() { - @SuppressWarnings("unchecked") - @Override - public List apply(String json) { - try { - return JacksonUtil.getObjectReader(ArrayList.class).readValue(json); - } catch (IOException e) { - throw new RuntimeException(e); - } + @Override + public TargetTableRef apply(String json) { + try { + return JacksonUtil.getObjectReader(TargetTableRef.class).readValue(json); + } catch (IOException e) { + throw new RuntimeException(e); + } + + } + }; + + public static final Function, String> NAMES_TO_JSON = + new Function, String>() { + + @Override + public String apply(List input) { + try { + List tableNames = Lists.newArrayListWithCapacity(input.size()); + for (TargetTableRef table : input) { + tableNames.add(table.getPhysicalName()); + } + return JacksonUtil.getObjectWriter().writeValueAsString(tableNames); + } catch (IOException e) { + throw new RuntimeException(e); + } + + } + }; - } - }; + public static final Function, String> LOGICAL_NAMES_TO_JSON = + new Function, String>() { + + @Override + public String apply(List input) { + try { + List tableNames = Lists.newArrayListWithCapacity(input.size()); + for (TargetTableRef table : input) { + tableNames.add(table.getLogicalName()); + } + return JacksonUtil.getObjectWriter().writeValueAsString(tableNames); + } catch (IOException e) { + throw new RuntimeException(e); + } + + } + }; + + public static final Function> NAMES_FROM_JSON = + new Function>() { + + @SuppressWarnings("unchecked") + @Override + public List apply(String json) { + try { + return JacksonUtil.getObjectReader(ArrayList.class).readValue(json); + } catch (IOException e) { + throw new RuntimeException(e); + } + + } + }; } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java index 664bd5acddd..69e1de8c951 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,88 +31,88 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.mapreduce.TableOutputFormat; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.transaction.PhoenixTransactionProvider; import org.apache.phoenix.transaction.TransactionFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * Writes mutations directly to HBase using HBase front-door APIs. */ public class DirectHTableWriter { - private static final Logger LOGGER = LoggerFactory.getLogger(DirectHTableWriter.class); + private static final Logger LOGGER = LoggerFactory.getLogger(DirectHTableWriter.class); - private Configuration conf = null; - private Table table; - private Connection conn; + private Configuration conf = null; + private Table table; + private Connection conn; - public DirectHTableWriter(Configuration otherConf) { - setConf(otherConf); - } + public DirectHTableWriter(Configuration otherConf) { + setConf(otherConf); + } - protected void setConf(Configuration otherConf) { - this.conf = HBaseConfiguration.create(otherConf); + protected void setConf(Configuration otherConf) { + this.conf = HBaseConfiguration.create(otherConf); - String tableName = this.conf.get(TableOutputFormat.OUTPUT_TABLE); - if (tableName == null || tableName.length() <= 0) { - throw new IllegalArgumentException("Must specify table name"); - } + String tableName = this.conf.get(TableOutputFormat.OUTPUT_TABLE); + if (tableName == null || tableName.length() <= 0) { + throw new IllegalArgumentException("Must specify table name"); + } - try { - this.conn = ConnectionFactory.createConnection(this.conf); - this.table = conn.getTable(TableName.valueOf(tableName)); - LOGGER.info("Created table instance for " + tableName); - } catch (IOException e) { - LOGGER.error("IOException : ", e); - tryClosingResourceSilently(this.conn); - throw new RuntimeException(e); - } + try { + this.conn = ConnectionFactory.createConnection(this.conf); + this.table = conn.getTable(TableName.valueOf(tableName)); + LOGGER.info("Created table instance for " + tableName); + } catch (IOException e) { + LOGGER.error("IOException : ", e); + tryClosingResourceSilently(this.conn); + throw new RuntimeException(e); } + } - public void write(List mutations) throws IOException, InterruptedException { - Object[] results = new Object[mutations.size()]; - String txnIdStr = conf.get(PhoenixConfigurationUtil.TX_SCN_VALUE); - if (txnIdStr == null) { - table.batch(mutations, results); - } else { - long ts = Long.parseLong(txnIdStr); - PhoenixTransactionProvider provider = TransactionFactory.Provider.getDefault().getTransactionProvider(); - String txnProviderStr = conf.get(PhoenixConfigurationUtil.TX_PROVIDER); - if (txnProviderStr != null) { - provider = TransactionFactory.Provider.valueOf(txnProviderStr).getTransactionProvider(); - } - List shadowedMutations = Lists.newArrayListWithExpectedSize(mutations.size()); - for (Mutation m : mutations) { - if (m instanceof Put) { - shadowedMutations.add(provider.markPutAsCommitted((Put)m, ts, ts)); - } - } - table.batch(shadowedMutations, results); + public void write(List mutations) throws IOException, InterruptedException { + Object[] results = new Object[mutations.size()]; + String txnIdStr = conf.get(PhoenixConfigurationUtil.TX_SCN_VALUE); + if (txnIdStr == null) { + table.batch(mutations, results); + } else { + long ts = Long.parseLong(txnIdStr); + PhoenixTransactionProvider provider = + TransactionFactory.Provider.getDefault().getTransactionProvider(); + String txnProviderStr = conf.get(PhoenixConfigurationUtil.TX_PROVIDER); + if (txnProviderStr != null) { + provider = TransactionFactory.Provider.valueOf(txnProviderStr).getTransactionProvider(); + } + List shadowedMutations = Lists.newArrayListWithExpectedSize(mutations.size()); + for (Mutation m : mutations) { + if (m instanceof Put) { + shadowedMutations.add(provider.markPutAsCommitted((Put) m, ts, ts)); } + } + table.batch(shadowedMutations, results); } + } - protected Configuration getConf() { - return conf; - } + protected Configuration getConf() { + return conf; + } - protected Table getTable() { - return table; - } + protected Table getTable() { + return table; + } - private void tryClosingResourceSilently(Closeable res) { - if (res != null) { - try { - res.close(); - } catch (IOException e) { - LOGGER.error("Closing resource: " + res + " failed with error: ", e); - } - } + private void tryClosingResourceSilently(Closeable res) { + if (res != null) { + try { + res.close(); + } catch (IOException e) { + LOGGER.error("Closing resource: " + res + " failed with error: ", e); + } } + } - public void close() throws IOException { - tryClosingResourceSilently(this.table); - tryClosingResourceSilently(this.conn); - } + public void close() throws IOException { + tryClosingResourceSilently(this.table); + tryClosingResourceSilently(this.conn); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java index 78161a9c644..951e7e56c5f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.mapreduce.index; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_TTL; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; + import java.io.IOException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; @@ -33,9 +36,6 @@ import java.util.Properties; import java.util.Set; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; @@ -55,8 +55,12 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.parse.HintNode.Hint; import org.apache.phoenix.query.ConnectionQueryServices; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.MetaDataUtil; @@ -66,481 +70,471 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; - -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_TTL; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; - /** * Mapper that reads from the data table and checks the rows against the index table */ public class IndexScrutinyMapper extends Mapper { - private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyMapper.class); - protected Connection connection; - private List targetTblColumnMetadata; - private long batchSize; - // holds a batch of rows from the table the mapper is iterating over - // Each row is a pair - the row TS, and the row values - protected List>> currentBatchValues = new ArrayList<>(); - protected String targetTableQuery; - protected int numTargetPkCols; - protected boolean outputInvalidRows; - protected OutputFormat outputFormat = OutputFormat.FILE; - private String qSourceTable; - private String qTargetTable; - private long executeTimestamp; - private int numSourcePkCols; - private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable(); - private List sourceTblColumnMetadata; - - // used to write results to the output table - protected Connection outputConn; - protected PreparedStatement outputUpsertStmt; - private long outputMaxRows; - private MessageDigest md5; - private long ttl; - private long scnTimestamp; - private long maxLookbackAgeMillis; - - protected long getScrutinyTs(){ - return scnTimestamp; + private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyMapper.class); + protected Connection connection; + private List targetTblColumnMetadata; + private long batchSize; + // holds a batch of rows from the table the mapper is iterating over + // Each row is a pair - the row TS, and the row values + protected List>> currentBatchValues = new ArrayList<>(); + protected String targetTableQuery; + protected int numTargetPkCols; + protected boolean outputInvalidRows; + protected OutputFormat outputFormat = OutputFormat.FILE; + private String qSourceTable; + private String qTargetTable; + private long executeTimestamp; + private int numSourcePkCols; + private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable(); + private List sourceTblColumnMetadata; + + // used to write results to the output table + protected Connection outputConn; + protected PreparedStatement outputUpsertStmt; + private long outputMaxRows; + private MessageDigest md5; + private long ttl; + private long scnTimestamp; + private long maxLookbackAgeMillis; + + protected long getScrutinyTs() { + return scnTimestamp; + } + + @Override + protected void setup(final Context context) throws IOException, InterruptedException { + super.setup(context); + final Configuration configuration = context.getConfiguration(); + try { + // get a connection with correct CURRENT_SCN (so incoming writes don't throw off the + // scrutiny) + final Properties overrideProps = new Properties(); + String scn = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); + overrideProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, scn); + scnTimestamp = Long.parseLong(scn); + connection = ConnectionUtil.getOutputConnection(configuration, overrideProps); + PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); + connection.setAutoCommit(false); + batchSize = PhoenixConfigurationUtil.getScrutinyBatchSize(configuration); + outputInvalidRows = PhoenixConfigurationUtil.getScrutinyOutputInvalidRows(configuration); + outputFormat = PhoenixConfigurationUtil.getScrutinyOutputFormat(configuration); + executeTimestamp = PhoenixConfigurationUtil.getScrutinyExecuteTimestamp(configuration); + // get the index table and column names + String qDataTable = PhoenixConfigurationUtil.getScrutinyDataTableName(configuration); + final PTable pdataTable = phoenixConnection.getTable(qDataTable); + final String qIndexTable = PhoenixConfigurationUtil.getScrutinyIndexTableName(configuration); + final PTable pindexTable = phoenixConnection.getTable(qIndexTable); + // set the target table based on whether we're running the MR over the data or index + // table + SourceTable sourceTable = PhoenixConfigurationUtil.getScrutinySourceTable(configuration); + SourceTargetColumnNames columnNames = SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) + ? new SourceTargetColumnNames.DataSourceColNames(pdataTable, pindexTable) + : new SourceTargetColumnNames.IndexSourceColNames(pdataTable, pindexTable); + qSourceTable = columnNames.getQualifiedSourceTableName(); + qTargetTable = columnNames.getQualifiedTargetTableName(); + List targetColNames = columnNames.getTargetColNames(); + List sourceColNames = columnNames.getSourceColNames(); + List targetPkColNames = columnNames.getTargetPkColNames(); + String targetPksCsv = + Joiner.on(",").join(SchemaUtil.getEscapedFullColumnNames(targetPkColNames)); + numSourcePkCols = columnNames.getSourcePkColNames().size(); + numTargetPkCols = targetPkColNames.size(); + + if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { + outputConn = ConnectionUtil.getOutputConnection(configuration, new Properties()); + String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration); + this.outputUpsertStmt = outputConn.prepareStatement(upsertQuery); + } + outputMaxRows = PhoenixConfigurationUtil.getScrutinyOutputMax(configuration); + + // Create the query against the target table + // Our query projection should be all the index column names (or their data table + // equivalent + // name) + targetTableQuery = QueryUtil.constructSelectStatement(qTargetTable, + columnNames.getCastedTargetColNames(), targetPksCsv, Hint.NO_INDEX, false) + " IN "; + targetTblColumnMetadata = + PhoenixRuntime.generateColumnInfo(phoenixConnection, qTargetTable, targetColNames); + sourceTblColumnMetadata = + PhoenixRuntime.generateColumnInfo(phoenixConnection, qSourceTable, sourceColNames); + LOGGER.info("Target table base query: " + targetTableQuery); + md5 = MessageDigest.getInstance("MD5"); + ttl = getTableTTL(configuration); + Long maxLookbackAge = pdataTable.getMaxLookbackAge(); + maxLookbackAgeMillis = MetaDataUtil.getMaxLookbackAge(configuration, maxLookbackAge); + } catch (SQLException | NoSuchAlgorithmException e) { + tryClosingResourceSilently(this.outputUpsertStmt); + tryClosingResourceSilently(this.connection); + tryClosingResourceSilently(this.outputConn); + throw new RuntimeException(e); } + postSetup(); + } - @Override - protected void setup(final Context context) throws IOException, InterruptedException { - super.setup(context); - final Configuration configuration = context.getConfiguration(); - try { - // get a connection with correct CURRENT_SCN (so incoming writes don't throw off the - // scrutiny) - final Properties overrideProps = new Properties(); - String scn = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); - overrideProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, scn); - scnTimestamp = Long.parseLong(scn); - connection = ConnectionUtil.getOutputConnection(configuration, overrideProps); - PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); - connection.setAutoCommit(false); - batchSize = PhoenixConfigurationUtil.getScrutinyBatchSize(configuration); - outputInvalidRows = - PhoenixConfigurationUtil.getScrutinyOutputInvalidRows(configuration); - outputFormat = PhoenixConfigurationUtil.getScrutinyOutputFormat(configuration); - executeTimestamp = PhoenixConfigurationUtil.getScrutinyExecuteTimestamp(configuration); - // get the index table and column names - String qDataTable = PhoenixConfigurationUtil.getScrutinyDataTableName(configuration); - final PTable pdataTable = phoenixConnection.getTable(qDataTable); - final String qIndexTable = - PhoenixConfigurationUtil.getScrutinyIndexTableName(configuration); - final PTable pindexTable = phoenixConnection.getTable(qIndexTable); - // set the target table based on whether we're running the MR over the data or index - // table - SourceTable sourceTable = - PhoenixConfigurationUtil.getScrutinySourceTable(configuration); - SourceTargetColumnNames columnNames = - SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) - ? new SourceTargetColumnNames.DataSourceColNames(pdataTable, - pindexTable) - : new SourceTargetColumnNames.IndexSourceColNames(pdataTable, - pindexTable); - qSourceTable = columnNames.getQualifiedSourceTableName(); - qTargetTable = columnNames.getQualifiedTargetTableName(); - List targetColNames = columnNames.getTargetColNames(); - List sourceColNames = columnNames.getSourceColNames(); - List targetPkColNames = columnNames.getTargetPkColNames(); - String targetPksCsv = - Joiner.on(",").join(SchemaUtil.getEscapedFullColumnNames(targetPkColNames)); - numSourcePkCols = columnNames.getSourcePkColNames().size(); - numTargetPkCols = targetPkColNames.size(); - - if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { - outputConn = ConnectionUtil.getOutputConnection(configuration, new Properties()); - String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration); - this.outputUpsertStmt = outputConn.prepareStatement(upsertQuery); - } - outputMaxRows = PhoenixConfigurationUtil.getScrutinyOutputMax(configuration); - - // Create the query against the target table - // Our query projection should be all the index column names (or their data table - // equivalent - // name) - targetTableQuery = - QueryUtil.constructSelectStatement(qTargetTable, columnNames.getCastedTargetColNames(), targetPksCsv, - Hint.NO_INDEX, false) + " IN "; - targetTblColumnMetadata = - PhoenixRuntime.generateColumnInfo(phoenixConnection, qTargetTable, - targetColNames); - sourceTblColumnMetadata = - PhoenixRuntime.generateColumnInfo(phoenixConnection, qSourceTable, - sourceColNames); - LOGGER.info("Target table base query: " + targetTableQuery); - md5 = MessageDigest.getInstance("MD5"); - ttl = getTableTTL(configuration); - Long maxLookbackAge = pdataTable.getMaxLookbackAge(); - maxLookbackAgeMillis = MetaDataUtil.getMaxLookbackAge(configuration, maxLookbackAge); - } catch (SQLException | NoSuchAlgorithmException e) { - tryClosingResourceSilently(this.outputUpsertStmt); - tryClosingResourceSilently(this.connection); - tryClosingResourceSilently(this.outputConn); - throw new RuntimeException(e); - } - postSetup(); - } + protected void postSetup() { - protected void postSetup() { + } + private static void tryClosingResourceSilently(AutoCloseable res) { + if (res != null) { + try { + res.close(); + } catch (Exception e) { + LOGGER.error("Closing resource: " + res + " failed :", e); + } } - - private static void tryClosingResourceSilently(AutoCloseable res) { - if (res != null) { - try { - res.close(); - } catch (Exception e) { - LOGGER.error("Closing resource: " + res + " failed :", e); - } - } + } + + @Override + protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context) + throws IOException, InterruptedException { + try { + final List values = record.getValues(); + + context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); + currentBatchValues.add(new Pair<>(record.getRowTs(), values)); + if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize != 0) { + // if we haven't hit the batch size, just report progress and move on to next record + context.progress(); + return; + } else { + // otherwise, process the batch + processBatch(context); + } + context.progress(); // Make sure progress is reported to Application Master. + } catch (SQLException | IllegalArgumentException e) { + LOGGER.error(" Error while read/write of a record ", e); + context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); + throw new IOException(e); } - - @Override - protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context) - throws IOException, InterruptedException { - try { - final List values = record.getValues(); - - context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); - currentBatchValues.add(new Pair<>(record.getRowTs(), values)); - if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize != 0) { - // if we haven't hit the batch size, just report progress and move on to next record - context.progress(); - return; - } else { - // otherwise, process the batch - processBatch(context); - } - context.progress(); // Make sure progress is reported to Application Master. - } catch (SQLException | IllegalArgumentException e) { - LOGGER.error(" Error while read/write of a record ", e); - context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); - throw new IOException(e); - } + } + + @Override + protected void cleanup(Context context) throws IOException, InterruptedException { + super.cleanup(context); + tryClosingResourceSilently(this.outputUpsertStmt); + IOException throwException = null; + if (connection != null) { + try { + processBatch(context); + connection.close(); + } catch (SQLException e) { + LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e); + throwException = new IOException(e); + } } - - @Override - protected void cleanup(Context context) throws IOException, InterruptedException { - super.cleanup(context); - tryClosingResourceSilently(this.outputUpsertStmt); - IOException throwException = null; - if (connection != null) { - try { - processBatch(context); - connection.close(); - } catch (SQLException e) { - LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e); - throwException = new IOException(e); - } - } - tryClosingResourceSilently(this.outputConn); - if (throwException != null) { - throw throwException; - } + tryClosingResourceSilently(this.outputConn); + if (throwException != null) { + throw throwException; } - - protected void processBatch(Context context) - throws SQLException, IOException, InterruptedException { - if (currentBatchValues.size() == 0) return; - context.getCounter(PhoenixScrutinyJobCounters.BATCHES_PROCESSED_COUNT).increment(1); - // our query selection filter should be the PK columns of the target table (index or data - // table) - String inClause = - QueryUtil.constructParameterizedInClause(numTargetPkCols, - currentBatchValues.size()); - String indexQuery = targetTableQuery + inClause; - try (PreparedStatement targetStatement = connection.prepareStatement(indexQuery)) { - // while we build the PreparedStatement, we also maintain a hash of the target table - // PKs, - // which we use to join against the results of the query on the target table - Map>> targetPkToSourceValues = - buildTargetStatement(targetStatement); - - preQueryTargetTable(); - // fetch results from the target table and output invalid rows - queryTargetTable(context, targetStatement, targetPkToSourceValues); - - //check if any invalid rows are just temporary side effects of ttl or compaction, - //and if so remove them from the list and count them as separate metrics - categorizeInvalidRows(context, targetPkToSourceValues); - - if (outputInvalidRows) { - for (Pair> sourceRowWithoutTargetRow : targetPkToSourceValues.values()) { - List valuesWithoutTarget = sourceRowWithoutTargetRow.getSecond(); - if (OutputFormat.FILE.equals(outputFormat)) { - context.write( - new Text(Arrays.toString(valuesWithoutTarget.toArray())), - new Text("Target row not found")); - } else if (OutputFormat.TABLE.equals(outputFormat)) { - writeToOutputTable(context, valuesWithoutTarget, null, sourceRowWithoutTargetRow.getFirst(), -1L); - } - } - } - if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { - outputUpsertStmt.executeBatch(); // write out invalid rows to output table - outputConn.commit(); - } - currentBatchValues.clear(); + } + + protected void processBatch(Context context) + throws SQLException, IOException, InterruptedException { + if (currentBatchValues.size() == 0) return; + context.getCounter(PhoenixScrutinyJobCounters.BATCHES_PROCESSED_COUNT).increment(1); + // our query selection filter should be the PK columns of the target table (index or data + // table) + String inClause = + QueryUtil.constructParameterizedInClause(numTargetPkCols, currentBatchValues.size()); + String indexQuery = targetTableQuery + inClause; + try (PreparedStatement targetStatement = connection.prepareStatement(indexQuery)) { + // while we build the PreparedStatement, we also maintain a hash of the target table + // PKs, + // which we use to join against the results of the query on the target table + Map>> targetPkToSourceValues = + buildTargetStatement(targetStatement); + + preQueryTargetTable(); + // fetch results from the target table and output invalid rows + queryTargetTable(context, targetStatement, targetPkToSourceValues); + + // check if any invalid rows are just temporary side effects of ttl or compaction, + // and if so remove them from the list and count them as separate metrics + categorizeInvalidRows(context, targetPkToSourceValues); + + if (outputInvalidRows) { + for (Pair> sourceRowWithoutTargetRow : targetPkToSourceValues.values()) { + List valuesWithoutTarget = sourceRowWithoutTargetRow.getSecond(); + if (OutputFormat.FILE.equals(outputFormat)) { + context.write(new Text(Arrays.toString(valuesWithoutTarget.toArray())), + new Text("Target row not found")); + } else if (OutputFormat.TABLE.equals(outputFormat)) { + writeToOutputTable(context, valuesWithoutTarget, null, + sourceRowWithoutTargetRow.getFirst(), -1L); + } } + } + if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { + outputUpsertStmt.executeBatch(); // write out invalid rows to output table + outputConn.commit(); + } + currentBatchValues.clear(); } - - protected void preQueryTargetTable() { } - - protected void categorizeInvalidRows(Context context, - Map>> targetPkToSourceValues) { - Set>>> - entrySet = targetPkToSourceValues.entrySet(); - - Iterator>>> itr = entrySet.iterator(); - - // iterate and remove items simultaneously - while(itr.hasNext()) { - Map.Entry>> entry = itr.next(); - Pair> sourceValues = entry.getValue(); - Long sourceTS = sourceValues.getFirst(); - if (hasRowExpiredOnSource(sourceTS, ttl)) { - context.getCounter(PhoenixScrutinyJobCounters.EXPIRED_ROW_COUNT).increment(1L); - itr.remove(); //don't output to the scrutiny table - } else if (isRowOlderThanMaxLookback(sourceTS)) { - context.getCounter(PhoenixScrutinyJobCounters.BEYOND_MAX_LOOKBACK_COUNT).increment(1L); - //still output to the scrutiny table just in case it's useful - } else { - // otherwise it's invalid (e.g. data table rows without corresponding index row) - context.getCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT) - .increment(1L); - } - } + } + + protected void preQueryTargetTable() { + } + + protected void categorizeInvalidRows(Context context, + Map>> targetPkToSourceValues) { + Set>>> entrySet = targetPkToSourceValues.entrySet(); + + Iterator>>> itr = entrySet.iterator(); + + // iterate and remove items simultaneously + while (itr.hasNext()) { + Map.Entry>> entry = itr.next(); + Pair> sourceValues = entry.getValue(); + Long sourceTS = sourceValues.getFirst(); + if (hasRowExpiredOnSource(sourceTS, ttl)) { + context.getCounter(PhoenixScrutinyJobCounters.EXPIRED_ROW_COUNT).increment(1L); + itr.remove(); // don't output to the scrutiny table + } else if (isRowOlderThanMaxLookback(sourceTS)) { + context.getCounter(PhoenixScrutinyJobCounters.BEYOND_MAX_LOOKBACK_COUNT).increment(1L); + // still output to the scrutiny table just in case it's useful + } else { + // otherwise it's invalid (e.g. data table rows without corresponding index row) + context.getCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT).increment(1L); + } } - - protected boolean hasRowExpiredOnSource(Long sourceTS, Long ttl) { - long currentTS = EnvironmentEdgeManager.currentTimeMillis(); - return ttl != Integer.MAX_VALUE && sourceTS + ttl*1000 < currentTS; + } + + protected boolean hasRowExpiredOnSource(Long sourceTS, Long ttl) { + long currentTS = EnvironmentEdgeManager.currentTimeMillis(); + return ttl != Integer.MAX_VALUE && sourceTS + ttl * 1000 < currentTS; + } + + protected boolean isRowOlderThanMaxLookback(Long sourceTS) { + if ( + maxLookbackAgeMillis + == BaseScannerRegionObserverConstants.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE * 1000 + ) { + return false; } - - protected boolean isRowOlderThanMaxLookback(Long sourceTS){ - if (maxLookbackAgeMillis == BaseScannerRegionObserverConstants.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE * 1000){ - return false; - } - long now = EnvironmentEdgeManager.currentTimeMillis(); - long maxLookBackTimeMillis = now - maxLookbackAgeMillis; - return sourceTS <= maxLookBackTimeMillis; + long now = EnvironmentEdgeManager.currentTimeMillis(); + long maxLookBackTimeMillis = now - maxLookbackAgeMillis; + return sourceTS <= maxLookBackTimeMillis; + } + + private int getTableTTL(Configuration configuration) throws SQLException, IOException { + PTable pSourceTable = PhoenixRuntime.getTable(connection, qSourceTable); + if ( + pSourceTable.getType() == PTableType.INDEX + && pSourceTable.getIndexType() == PTable.IndexType.LOCAL + ) { + return Integer.MAX_VALUE; } - - private int getTableTTL(Configuration configuration) throws SQLException, IOException { - PTable pSourceTable = PhoenixRuntime.getTable(connection, qSourceTable); - if (pSourceTable.getType() == PTableType.INDEX - && pSourceTable.getIndexType() == PTable.IndexType.LOCAL) { - return Integer.MAX_VALUE; - } - ConnectionQueryServices - cqsi = connection.unwrap(PhoenixConnection.class).getQueryServices(); - String physicalTable = getSourceTableName(pSourceTable, - SchemaUtil.isNamespaceMappingEnabled(null, cqsi.getProps())); - if (configuration.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED)) { - return pSourceTable.getTTL() == TTL_NOT_DEFINED ? DEFAULT_TTL - : pSourceTable.getTTL(); - } else { - TableDescriptor tableDesc; - try (Admin admin = cqsi.getAdmin()) { - tableDesc = admin.getDescriptor(TableName - .valueOf(physicalTable)); - } - return tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(pSourceTable)). - getTimeToLive(); - } + ConnectionQueryServices cqsi = connection.unwrap(PhoenixConnection.class).getQueryServices(); + String physicalTable = + getSourceTableName(pSourceTable, SchemaUtil.isNamespaceMappingEnabled(null, cqsi.getProps())); + if ( + configuration.getBoolean(QueryServices.PHOENIX_TABLE_TTL_ENABLED, + QueryServicesOptions.DEFAULT_PHOENIX_TABLE_TTL_ENABLED) + ) { + return pSourceTable.getTTL() == TTL_NOT_DEFINED ? DEFAULT_TTL : pSourceTable.getTTL(); + } else { + TableDescriptor tableDesc; + try (Admin admin = cqsi.getAdmin()) { + tableDesc = admin.getDescriptor(TableName.valueOf(physicalTable)); + } + return tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(pSourceTable)) + .getTimeToLive(); } - - @VisibleForTesting - public static String getSourceTableName(PTable pSourceTable, boolean isNamespaceEnabled) { - String sourcePhysicalName = pSourceTable.getPhysicalName().getString(); - String physicalTable, table, schema; - if (pSourceTable.getType() == PTableType.VIEW - || MetaDataUtil.isViewIndex(sourcePhysicalName)) { - // in case of view and view index ptable, getPhysicalName() returns hbase tables - // i.e. without _IDX_ and with _IDX_ respectively - physicalTable = sourcePhysicalName; + } + + @VisibleForTesting + public static String getSourceTableName(PTable pSourceTable, boolean isNamespaceEnabled) { + String sourcePhysicalName = pSourceTable.getPhysicalName().getString(); + String physicalTable, table, schema; + if (pSourceTable.getType() == PTableType.VIEW || MetaDataUtil.isViewIndex(sourcePhysicalName)) { + // in case of view and view index ptable, getPhysicalName() returns hbase tables + // i.e. without _IDX_ and with _IDX_ respectively + physicalTable = sourcePhysicalName; + } else { + schema = pSourceTable.getSchemaName().toString(); + table = SchemaUtil.getTableNameFromFullName(pSourceTable.getPhysicalName().getString()); + physicalTable = + SchemaUtil.getPhysicalHBaseTableName(schema, table, isNamespaceEnabled).toString(); + } + return physicalTable; + } + + protected Map>> + buildTargetStatement(PreparedStatement targetStatement) throws SQLException { + Map>> targetPkToSourceValues = + new HashMap<>(currentBatchValues.size()); + int rsIndex = 1; + for (Pair> batchTsRow : currentBatchValues) { + List batchRow = batchTsRow.getSecond(); + // our original query against the source table (which provided the batchRow) projected + // with the data table PK cols first, so the first numTargetPkCols form the PK + String targetPkHash = getPkHash(batchRow.subList(0, numTargetPkCols)); + targetPkToSourceValues.put(targetPkHash, batchTsRow); + for (int i = 0; i < numTargetPkCols; i++) { + ColumnInfo targetPkInfo = targetTblColumnMetadata.get(i); + Object value = batchRow.get(i); + if (value == null) { + targetStatement.setNull(rsIndex++, targetPkInfo.getSqlType()); } else { - schema = pSourceTable.getSchemaName().toString(); - table = SchemaUtil.getTableNameFromFullName(pSourceTable.getPhysicalName().getString()); - physicalTable = SchemaUtil - .getPhysicalHBaseTableName(schema, table, isNamespaceEnabled).toString(); + targetStatement.setObject(rsIndex++, value, targetPkInfo.getSqlType()); } - return physicalTable; + } } - - protected Map>> buildTargetStatement(PreparedStatement targetStatement) - throws SQLException { - Map>> targetPkToSourceValues = - new HashMap<>(currentBatchValues.size()); - int rsIndex = 1; - for (Pair> batchTsRow : currentBatchValues) { - List batchRow = batchTsRow.getSecond(); - // our original query against the source table (which provided the batchRow) projected - // with the data table PK cols first, so the first numTargetPkCols form the PK - String targetPkHash = getPkHash(batchRow.subList(0, numTargetPkCols)); - targetPkToSourceValues.put(targetPkHash, batchTsRow); - for (int i = 0; i < numTargetPkCols; i++) { - ColumnInfo targetPkInfo = targetTblColumnMetadata.get(i); - Object value = batchRow.get(i); - if (value == null) { - targetStatement.setNull(rsIndex++, targetPkInfo.getSqlType()); - } else { - targetStatement.setObject(rsIndex++, value, targetPkInfo.getSqlType()); - } - } + return targetPkToSourceValues; + } + + protected void queryTargetTable(Context context, PreparedStatement targetStatement, + Map>> targetPkToSourceValues) + throws SQLException, IOException, InterruptedException { + ResultSet targetResultSet = targetStatement.executeQuery(); + while (targetResultSet.next()) { + indxWritable.readFields(targetResultSet); + List targetValues = indxWritable.getValues(); + // first grab the PK and try to join against the source input + // the query is such that first numTargetPkCols of the resultSet is the PK + List pkObjects = new ArrayList<>(numTargetPkCols); + for (int i = 0; i < numTargetPkCols; i++) { + Object pkPart = targetResultSet.getObject(i + 1); + pkObjects.add(pkPart); + } + Long targetTS = + targetResultSet.unwrap(PhoenixResultSet.class).getCurrentRow().getValue(0).getTimestamp(); + String targetPk = getPkHash(pkObjects); + + // use the pk to fetch the source table column values + Pair> sourceTsValues = targetPkToSourceValues.get(targetPk); + + Long sourceTS = sourceTsValues.getFirst(); + List sourceValues = sourceTsValues.getSecond(); + // compare values starting after the PK (i.e. covered columns) + boolean isIndexedCorrectly = + compareValues(numTargetPkCols, targetValues, sourceValues, context); + if (isIndexedCorrectly) { + context.getCounter(PhoenixScrutinyJobCounters.VALID_ROW_COUNT).increment(1); + } else { + context.getCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT).increment(1); + if (outputInvalidRows) { + outputInvalidRow(context, sourceValues, targetValues, sourceTS, targetTS); } - return targetPkToSourceValues; + } + targetPkToSourceValues.remove(targetPk); } - - protected void queryTargetTable(Context context, PreparedStatement targetStatement, - Map>> targetPkToSourceValues) - throws SQLException, IOException, InterruptedException { - ResultSet targetResultSet = targetStatement.executeQuery(); - while (targetResultSet.next()) { - indxWritable.readFields(targetResultSet); - List targetValues = indxWritable.getValues(); - // first grab the PK and try to join against the source input - // the query is such that first numTargetPkCols of the resultSet is the PK - List pkObjects = new ArrayList<>(numTargetPkCols); - for (int i = 0; i < numTargetPkCols; i++) { - Object pkPart = targetResultSet.getObject(i + 1); - pkObjects.add(pkPart); - } - Long targetTS = targetResultSet.unwrap(PhoenixResultSet.class).getCurrentRow().getValue(0).getTimestamp(); - String targetPk = getPkHash(pkObjects); - - // use the pk to fetch the source table column values - Pair> sourceTsValues = targetPkToSourceValues.get(targetPk); - - Long sourceTS = sourceTsValues.getFirst(); - List sourceValues = sourceTsValues.getSecond(); - // compare values starting after the PK (i.e. covered columns) - boolean isIndexedCorrectly = - compareValues(numTargetPkCols, targetValues, sourceValues, context); - if (isIndexedCorrectly) { - context.getCounter(PhoenixScrutinyJobCounters.VALID_ROW_COUNT).increment(1); - } else { - context.getCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT).increment(1); - if (outputInvalidRows) { - outputInvalidRow(context, sourceValues, targetValues, sourceTS, targetTS); - } - } - targetPkToSourceValues.remove(targetPk); - } + } + + private void outputInvalidRow(Context context, List sourceValues, + List targetValues, long sourceTS, long targetTS) + throws SQLException, IOException, InterruptedException { + if (OutputFormat.FILE.equals(outputFormat)) { + context.write(new Text(Arrays.toString(sourceValues.toArray())), + new Text(Arrays.toString(targetValues.toArray()))); + } else if (OutputFormat.TABLE.equals(outputFormat)) { + writeToOutputTable(context, sourceValues, targetValues, sourceTS, targetTS); } - - private void outputInvalidRow(Context context, List sourceValues, - List targetValues, long sourceTS, long targetTS) throws SQLException, IOException, InterruptedException { - if (OutputFormat.FILE.equals(outputFormat)) { - context.write(new Text(Arrays.toString(sourceValues.toArray())), - new Text(Arrays.toString(targetValues.toArray()))); - } else if (OutputFormat.TABLE.equals(outputFormat)) { - writeToOutputTable(context, sourceValues, targetValues, sourceTS, targetTS); - } + } + + // pass in null targetValues if the target row wasn't found + protected void writeToOutputTable(Context context, List sourceValues, + List targetValues, long sourceTS, long targetTS) throws SQLException { + if ( + context.getCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT).getValue() > outputMaxRows + ) { + return; } - - // pass in null targetValues if the target row wasn't found - protected void writeToOutputTable(Context context, List sourceValues, List targetValues, long sourceTS, long targetTS) - throws SQLException { - if (context.getCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT).getValue() > outputMaxRows) { - return; - } - int index = 1; - outputUpsertStmt.setString(index++, qSourceTable); // SOURCE_TABLE - outputUpsertStmt.setString(index++, qTargetTable); // TARGET_TABLE - outputUpsertStmt.setLong(index++, executeTimestamp); // SCRUTINY_EXECUTE_TIME - outputUpsertStmt.setString(index++, getPkHash(sourceValues.subList(0, numSourcePkCols))); // SOURCE_ROW_PK_HASH - outputUpsertStmt.setLong(index++, sourceTS); // SOURCE_TS - outputUpsertStmt.setLong(index++, targetTS); // TARGET_TS - outputUpsertStmt.setBoolean(index++, targetValues != null); // HAS_TARGET_ROW - outputUpsertStmt.setBoolean(index++, isRowOlderThanMaxLookback(sourceTS)); - index = setStatementObjects(sourceValues, index, sourceTblColumnMetadata); - if (targetValues != null) { - index = setStatementObjects(targetValues, index, targetTblColumnMetadata); - } else { // for case where target row wasn't found, put nulls in prepared statement - for (int i = 0; i < sourceValues.size(); i++) { - outputUpsertStmt.setNull(index++, targetTblColumnMetadata.get(i).getSqlType()); - } - } - outputUpsertStmt.addBatch(); + int index = 1; + outputUpsertStmt.setString(index++, qSourceTable); // SOURCE_TABLE + outputUpsertStmt.setString(index++, qTargetTable); // TARGET_TABLE + outputUpsertStmt.setLong(index++, executeTimestamp); // SCRUTINY_EXECUTE_TIME + outputUpsertStmt.setString(index++, getPkHash(sourceValues.subList(0, numSourcePkCols))); // SOURCE_ROW_PK_HASH + outputUpsertStmt.setLong(index++, sourceTS); // SOURCE_TS + outputUpsertStmt.setLong(index++, targetTS); // TARGET_TS + outputUpsertStmt.setBoolean(index++, targetValues != null); // HAS_TARGET_ROW + outputUpsertStmt.setBoolean(index++, isRowOlderThanMaxLookback(sourceTS)); + index = setStatementObjects(sourceValues, index, sourceTblColumnMetadata); + if (targetValues != null) { + index = setStatementObjects(targetValues, index, targetTblColumnMetadata); + } else { // for case where target row wasn't found, put nulls in prepared statement + for (int i = 0; i < sourceValues.size(); i++) { + outputUpsertStmt.setNull(index++, targetTblColumnMetadata.get(i).getSqlType()); + } } - - private int setStatementObjects(List values, int index, List colMetadata) - throws SQLException { - for (int i = 0; i < values.size(); i++) { - Object value = values.get(i); - ColumnInfo colInfo = colMetadata.get(i); - if (value != null) { - outputUpsertStmt.setObject(index++, value, colInfo.getSqlType()); - } else { - outputUpsertStmt.setNull(index++, colInfo.getSqlType()); - } - } - return index; + outputUpsertStmt.addBatch(); + } + + private int setStatementObjects(List values, int index, List colMetadata) + throws SQLException { + for (int i = 0; i < values.size(); i++) { + Object value = values.get(i); + ColumnInfo colInfo = colMetadata.get(i); + if (value != null) { + outputUpsertStmt.setObject(index++, value, colInfo.getSqlType()); + } else { + outputUpsertStmt.setNull(index++, colInfo.getSqlType()); + } } - - private boolean compareValues(int startIndex, List targetValues, - List sourceValues, Context context) throws SQLException { - if (targetValues == null || sourceValues == null) return false; - for (int i = startIndex; i < sourceValues.size(); i++) { - Object targetValue = targetValues.get(i); - Object sourceValue = sourceValues.get(i); - if (sourceValue == null && targetValue == null) { - continue; - } else if (sourceValue != null && targetValue != null) { - if (sourceValue.getClass().isArray()) { - if (compareArrayTypes(sourceValue, targetValue)) { - continue; - } - } else { - if (targetValue.equals(sourceValue)) { - continue; - } - } - } - context.getCounter(PhoenixScrutinyJobCounters.BAD_COVERED_COL_VAL_COUNT).increment(1); - return false; + return index; + } + + private boolean compareValues(int startIndex, List targetValues, + List sourceValues, Context context) throws SQLException { + if (targetValues == null || sourceValues == null) return false; + for (int i = startIndex; i < sourceValues.size(); i++) { + Object targetValue = targetValues.get(i); + Object sourceValue = sourceValues.get(i); + if (sourceValue == null && targetValue == null) { + continue; + } else if (sourceValue != null && targetValue != null) { + if (sourceValue.getClass().isArray()) { + if (compareArrayTypes(sourceValue, targetValue)) { + continue; + } + } else { + if (targetValue.equals(sourceValue)) { + continue; + } } - return true; + } + context.getCounter(PhoenixScrutinyJobCounters.BAD_COVERED_COL_VAL_COUNT).increment(1); + return false; } - - private boolean compareArrayTypes(Object sourceValue, Object targetValue) { - if (sourceValue.getClass().getComponentType().equals(byte.class)) { - return Arrays.equals((byte[]) sourceValue, (byte[]) targetValue); - } else if (sourceValue.getClass().getComponentType().equals(char.class)) { - return Arrays.equals((char[]) sourceValue, (char[]) targetValue); - } else if (sourceValue.getClass().getComponentType().equals(boolean.class)) { - return Arrays.equals((boolean[]) sourceValue, (boolean[]) targetValue); - } else if (sourceValue.getClass().getComponentType().equals(double.class)) { - return Arrays.equals((double[]) sourceValue, (double[]) targetValue); - } else if (sourceValue.getClass().getComponentType().equals(int.class)) { - return Arrays.equals((int[]) sourceValue, (int[]) targetValue); - } else if (sourceValue.getClass().getComponentType().equals(short.class)) { - return Arrays.equals((short[]) sourceValue, (short[]) targetValue); - } else if (sourceValue.getClass().getComponentType().equals(long.class)) { - return Arrays.equals((long[]) sourceValue, (long[]) targetValue); - } else if (sourceValue.getClass().getComponentType().equals(float.class)) { - return Arrays.equals((float[]) sourceValue, (float[]) targetValue); - } - return false; + return true; + } + + private boolean compareArrayTypes(Object sourceValue, Object targetValue) { + if (sourceValue.getClass().getComponentType().equals(byte.class)) { + return Arrays.equals((byte[]) sourceValue, (byte[]) targetValue); + } else if (sourceValue.getClass().getComponentType().equals(char.class)) { + return Arrays.equals((char[]) sourceValue, (char[]) targetValue); + } else if (sourceValue.getClass().getComponentType().equals(boolean.class)) { + return Arrays.equals((boolean[]) sourceValue, (boolean[]) targetValue); + } else if (sourceValue.getClass().getComponentType().equals(double.class)) { + return Arrays.equals((double[]) sourceValue, (double[]) targetValue); + } else if (sourceValue.getClass().getComponentType().equals(int.class)) { + return Arrays.equals((int[]) sourceValue, (int[]) targetValue); + } else if (sourceValue.getClass().getComponentType().equals(short.class)) { + return Arrays.equals((short[]) sourceValue, (short[]) targetValue); + } else if (sourceValue.getClass().getComponentType().equals(long.class)) { + return Arrays.equals((long[]) sourceValue, (long[]) targetValue); + } else if (sourceValue.getClass().getComponentType().equals(float.class)) { + return Arrays.equals((float[]) sourceValue, (float[]) targetValue); } - - private String getPkHash(List pkObjects) { - try { - for (int i = 0; i < pkObjects.size(); i++) { - md5.update(sourceTblColumnMetadata.get(i).getPDataType().toBytes(pkObjects.get(i))); - } - return Hex.encodeHexString(md5.digest()); - } finally { - md5.reset(); - } + return false; + } + + private String getPkHash(List pkObjects) { + try { + for (int i = 0; i < pkObjects.size(); i++) { + md5.update(sourceTblColumnMetadata.get(i).getPDataType().toBytes(pkObjects.get(i))); + } + return Hex.encodeHexString(md5.digest()); + } finally { + md5.reset(); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapperForTest.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapperForTest.java index 99d50ee47d8..69b4c7becee 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapperForTest.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapperForTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,26 +22,27 @@ public class IndexScrutinyMapperForTest extends IndexScrutinyMapper { - public static final int TEST_TABLE_TTL = 3600; - public static class ScrutinyTestClock extends EnvironmentEdge { - long initialTime; - long delta; + public static final int TEST_TABLE_TTL = 3600; - public ScrutinyTestClock(long delta) { - initialTime = System.currentTimeMillis() + delta; - this.delta = delta; - } + public static class ScrutinyTestClock extends EnvironmentEdge { + long initialTime; + long delta; - @Override - public long currentTime() { - return System.currentTimeMillis() + delta; - } + public ScrutinyTestClock(long delta) { + initialTime = System.currentTimeMillis() + delta; + this.delta = delta; } @Override - public void preQueryTargetTable() { - // change the current time past ttl - ScrutinyTestClock clock = new ScrutinyTestClock(TEST_TABLE_TTL*1000); - EnvironmentEdgeManager.injectEdge(clock); + public long currentTime() { + return System.currentTimeMillis() + delta; } + } + + @Override + public void preQueryTargetTable() { + // change the current time past ttl + ScrutinyTestClock clock = new ScrutinyTestClock(TEST_TABLE_TTL * 1000); + EnvironmentEdgeManager.injectEdge(clock); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTableOutput.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTableOutput.java index a0f00b9dcbc..d20da2fe949 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTableOutput.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTableOutput.java @@ -1,4 +1,3 @@ -/** /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,357 +35,317 @@ import org.apache.phoenix.mapreduce.index.SourceTargetColumnNames.IndexSourceColNames; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.util.QueryUtil; -import org.apache.phoenix.util.SchemaUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.QueryUtil; +import org.apache.phoenix.util.SchemaUtil; /** - * * IndexScrutiny MapReduce output table DDL and methods to get queries against the output tables - * */ public class IndexScrutinyTableOutput { - /** - * This table holds the invalid rows in the source table (either missing a target, or a bad - * covered column value). Dynamic columns hold the original source and target table column data. - */ - public static final String OUTPUT_TABLE_NAME = "PHOENIX_INDEX_SCRUTINY"; - public static final String SCRUTINY_EXECUTE_TIME_COL_NAME = "SCRUTINY_EXECUTE_TIME"; - public static final String TARGET_TABLE_COL_NAME = "TARGET_TABLE"; - public static final String SOURCE_TABLE_COL_NAME = "SOURCE_TABLE"; - public static final String OUTPUT_TABLE_DDL = - "CREATE TABLE IF NOT EXISTS " + OUTPUT_TABLE_NAME + "\n" + - "(\n" + - " " + SOURCE_TABLE_COL_NAME + " VARCHAR NOT NULL,\n" + - " " + TARGET_TABLE_COL_NAME + " VARCHAR NOT NULL,\n" + - " " + SCRUTINY_EXECUTE_TIME_COL_NAME + " BIGINT NOT NULL,\n" + - " SOURCE_ROW_PK_HASH VARCHAR NOT NULL,\n" + - " SOURCE_TS BIGINT,\n" + - " TARGET_TS BIGINT,\n" + - " HAS_TARGET_ROW BOOLEAN,\n" + - " BEYOND_MAX_LOOKBACK BOOLEAN,\n" + - " CONSTRAINT PK PRIMARY KEY\n" + - " (\n" + - " " + SOURCE_TABLE_COL_NAME + ",\n" + - " " + TARGET_TABLE_COL_NAME + ",\n" + - " " + SCRUTINY_EXECUTE_TIME_COL_NAME + ",\n" + // time at which the scrutiny ran - " SOURCE_ROW_PK_HASH\n" + // this hash makes the PK unique - " )\n" + // dynamic columns consisting of the source and target columns will follow - ") COLUMN_ENCODED_BYTES = 0 "; //column encoding not supported with dyn columns (PHOENIX-5107) - public static final String OUTPUT_TABLE_BEYOND_LOOKBACK_DDL = "" + - "ALTER TABLE " + OUTPUT_TABLE_NAME + "\n" + - " ADD IF NOT EXISTS BEYOND_MAX_LOOKBACK BOOLEAN"; + /** + * This table holds the invalid rows in the source table (either missing a target, or a bad + * covered column value). Dynamic columns hold the original source and target table column data. + */ + public static final String OUTPUT_TABLE_NAME = "PHOENIX_INDEX_SCRUTINY"; + public static final String SCRUTINY_EXECUTE_TIME_COL_NAME = "SCRUTINY_EXECUTE_TIME"; + public static final String TARGET_TABLE_COL_NAME = "TARGET_TABLE"; + public static final String SOURCE_TABLE_COL_NAME = "SOURCE_TABLE"; + public static final String OUTPUT_TABLE_DDL = "CREATE TABLE IF NOT EXISTS " + OUTPUT_TABLE_NAME + + "\n" + "(\n" + " " + SOURCE_TABLE_COL_NAME + " VARCHAR NOT NULL,\n" + " " + + TARGET_TABLE_COL_NAME + " VARCHAR NOT NULL,\n" + " " + SCRUTINY_EXECUTE_TIME_COL_NAME + + " BIGINT NOT NULL,\n" + " SOURCE_ROW_PK_HASH VARCHAR NOT NULL,\n" + + " SOURCE_TS BIGINT,\n" + " TARGET_TS BIGINT,\n" + " HAS_TARGET_ROW BOOLEAN,\n" + + " BEYOND_MAX_LOOKBACK BOOLEAN,\n" + " CONSTRAINT PK PRIMARY KEY\n" + " (\n" + + " " + SOURCE_TABLE_COL_NAME + ",\n" + " " + TARGET_TABLE_COL_NAME + ",\n" + + " " + SCRUTINY_EXECUTE_TIME_COL_NAME + ",\n" + // time at which the scrutiny ran + " SOURCE_ROW_PK_HASH\n" + // this hash makes the PK unique + " )\n" + // dynamic columns consisting of the source and target columns will follow + ") COLUMN_ENCODED_BYTES = 0 "; // column encoding not supported with dyn columns (PHOENIX-5107) + public static final String OUTPUT_TABLE_BEYOND_LOOKBACK_DDL = "" + "ALTER TABLE " + + OUTPUT_TABLE_NAME + "\n" + " ADD IF NOT EXISTS BEYOND_MAX_LOOKBACK BOOLEAN"; - /** - * This table holds metadata about a scrutiny job - result counters and queries to fetch invalid - * row data from the output table. The queries contain the dynamic columns which are equivalent - * to the original source/target table columns - */ - public static final String OUTPUT_METADATA_TABLE_NAME = "PHOENIX_INDEX_SCRUTINY_METADATA"; - public static final String OUTPUT_METADATA_DDL = - "CREATE TABLE IF NOT EXISTS " + OUTPUT_METADATA_TABLE_NAME + "\n" + - "(\n" + - " " + SOURCE_TABLE_COL_NAME + " VARCHAR NOT NULL,\n" + - " " + TARGET_TABLE_COL_NAME + " VARCHAR NOT NULL,\n" + - " " + SCRUTINY_EXECUTE_TIME_COL_NAME + " BIGINT NOT NULL,\n" + - " SOURCE_TYPE VARCHAR,\n" + // source is either data or index table - " CMD_LINE_ARGS VARCHAR,\n" + // arguments the tool was run with - " INPUT_RECORDS BIGINT,\n" + - " FAILED_RECORDS BIGINT,\n" + - " VALID_ROW_COUNT BIGINT,\n" + - " INVALID_ROW_COUNT BIGINT,\n" + - " INCORRECT_COVERED_COL_VAL_COUNT BIGINT,\n" + - " BATCHES_PROCESSED_COUNT BIGINT,\n" + - " SOURCE_DYNAMIC_COLS VARCHAR,\n" + - " TARGET_DYNAMIC_COLS VARCHAR,\n" + - " INVALID_ROWS_QUERY_ALL VARCHAR,\n" + // stored sql query to fetch all the invalid rows from the output table - " INVALID_ROWS_QUERY_MISSING_TARGET VARCHAR,\n" + // stored sql query to fetch all the invalid rows which are missing a target row - " INVALID_ROWS_QUERY_BAD_COVERED_COL_VAL VARCHAR,\n" + // stored sql query to fetch all the invalid rows which have bad covered column values - " INVALID_ROWS_QUERY_BEYOND_MAX_LOOKBACK VARCHAR,\n" + // stored sql query to fetch all the potentially invalid rows which are before max lookback age - " BEYOND_MAX_LOOKBACK_COUNT BIGINT,\n" + - " CONSTRAINT PK PRIMARY KEY\n" + - " (\n" + - " " + SOURCE_TABLE_COL_NAME + ",\n" + - " " + TARGET_TABLE_COL_NAME + ",\n" + - " " + SCRUTINY_EXECUTE_TIME_COL_NAME + "\n" + - " )\n" + - ")\n"; - public static final String OUTPUT_METADATA_BEYOND_LOOKBACK_COUNTER_DDL = "" + - "ALTER TABLE " + OUTPUT_METADATA_TABLE_NAME + "\n" + - " ADD IF NOT EXISTS INVALID_ROWS_QUERY_BEYOND_MAX_LOOKBACK VARCHAR, \n" + - " BEYOND_MAX_LOOKBACK_COUNT BIGINT"; + /** + * This table holds metadata about a scrutiny job - result counters and queries to fetch invalid + * row data from the output table. The queries contain the dynamic columns which are equivalent to + * the original source/target table columns + */ + public static final String OUTPUT_METADATA_TABLE_NAME = "PHOENIX_INDEX_SCRUTINY_METADATA"; + public static final String OUTPUT_METADATA_DDL = "CREATE TABLE IF NOT EXISTS " + + OUTPUT_METADATA_TABLE_NAME + "\n" + "(\n" + " " + SOURCE_TABLE_COL_NAME + + " VARCHAR NOT NULL,\n" + " " + TARGET_TABLE_COL_NAME + " VARCHAR NOT NULL,\n" + " " + + SCRUTINY_EXECUTE_TIME_COL_NAME + " BIGINT NOT NULL,\n" + " SOURCE_TYPE VARCHAR,\n" + // source + // is + // either + // data + // or + // index + // table + " CMD_LINE_ARGS VARCHAR,\n" + // arguments the tool was run with + " INPUT_RECORDS BIGINT,\n" + " FAILED_RECORDS BIGINT,\n" + " VALID_ROW_COUNT BIGINT,\n" + + " INVALID_ROW_COUNT BIGINT,\n" + " INCORRECT_COVERED_COL_VAL_COUNT BIGINT,\n" + + " BATCHES_PROCESSED_COUNT BIGINT,\n" + " SOURCE_DYNAMIC_COLS VARCHAR,\n" + + " TARGET_DYNAMIC_COLS VARCHAR,\n" + " INVALID_ROWS_QUERY_ALL VARCHAR,\n" + // stored sql + // query to + // fetch all + // the + // invalid + // rows from + // the output + // table + " INVALID_ROWS_QUERY_MISSING_TARGET VARCHAR,\n" + // stored sql query to fetch all the + // invalid rows which are missing a target + // row + " INVALID_ROWS_QUERY_BAD_COVERED_COL_VAL VARCHAR,\n" + // stored sql query to fetch all the + // invalid rows which have bad covered + // column values + " INVALID_ROWS_QUERY_BEYOND_MAX_LOOKBACK VARCHAR,\n" + // stored sql query to fetch all the + // potentially invalid rows which are + // before max lookback age + " BEYOND_MAX_LOOKBACK_COUNT BIGINT,\n" + " CONSTRAINT PK PRIMARY KEY\n" + " (\n" + + " " + SOURCE_TABLE_COL_NAME + ",\n" + " " + TARGET_TABLE_COL_NAME + ",\n" + + " " + SCRUTINY_EXECUTE_TIME_COL_NAME + "\n" + " )\n" + ")\n"; + public static final String OUTPUT_METADATA_BEYOND_LOOKBACK_COUNTER_DDL = + "" + "ALTER TABLE " + OUTPUT_METADATA_TABLE_NAME + "\n" + + " ADD IF NOT EXISTS INVALID_ROWS_QUERY_BEYOND_MAX_LOOKBACK VARCHAR, \n" + + " BEYOND_MAX_LOOKBACK_COUNT BIGINT"; - public static final String UPSERT_METADATA_SQL = "UPSERT INTO " + OUTPUT_METADATA_TABLE_NAME + " VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + public static final String UPSERT_METADATA_SQL = + "UPSERT INTO " + OUTPUT_METADATA_TABLE_NAME + " VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - /** - * Gets the parameterized upsert sql to the output table Used by the scrutiny MR job to write - * its results - * @param sourceDynamicCols list of source columns with their types - * @param targetDynamicCols list of target columns with their types - * @param connection connection to use - * @throws SQLException - */ - public static String constructOutputTableUpsert(List sourceDynamicCols, - List targetDynamicCols, Connection connection) throws SQLException { - List outputTableColumns = getOutputTableColumns(connection); + /** + * Gets the parameterized upsert sql to the output table Used by the scrutiny MR job to write its + * results + * @param sourceDynamicCols list of source columns with their types + * @param targetDynamicCols list of target columns with their types + * @param connection connection to use + */ + public static String constructOutputTableUpsert(List sourceDynamicCols, + List targetDynamicCols, Connection connection) throws SQLException { + List outputTableColumns = getOutputTableColumns(connection); - // construct a dynamic column upsert into the output table - List upsertCols = - Lists.newArrayList( - Iterables.concat(outputTableColumns, sourceDynamicCols, targetDynamicCols)); - String upsertStmt = - QueryUtil.constructUpsertStatement(IndexScrutinyTableOutput.OUTPUT_TABLE_NAME, - upsertCols, null); - return upsertStmt; - } + // construct a dynamic column upsert into the output table + List upsertCols = Lists + .newArrayList(Iterables.concat(outputTableColumns, sourceDynamicCols, targetDynamicCols)); + String upsertStmt = QueryUtil + .constructUpsertStatement(IndexScrutinyTableOutput.OUTPUT_TABLE_NAME, upsertCols, null); + return upsertStmt; + } - /** - * Get the sql to store as INVALID_ROWS_QUERY_ALL in the output metadata table - * @param conn - * @param columnNames - * @param scrutinyTimeMillis - * @return - * @throws SQLException - */ - public static String getSqlQueryAllInvalidRows(Connection conn, - SourceTargetColumnNames columnNames, long scrutinyTimeMillis) throws SQLException { - String paramQuery = getAllInvalidParamQuery(conn, columnNames); - paramQuery = bindPkCols(columnNames, scrutinyTimeMillis, paramQuery); - return paramQuery; - } + /** + * Get the sql to store as INVALID_ROWS_QUERY_ALL in the output metadata table + */ + public static String getSqlQueryAllInvalidRows(Connection conn, + SourceTargetColumnNames columnNames, long scrutinyTimeMillis) throws SQLException { + String paramQuery = getAllInvalidParamQuery(conn, columnNames); + paramQuery = bindPkCols(columnNames, scrutinyTimeMillis, paramQuery); + return paramQuery; + } - /** - * Get the sql to store as INVALID_ROWS_QUERY_MISSING_TARGET in the output metadata table - * @param conn - * @param columnNames - * @param scrutinyTimeMillis - * @return - * @throws SQLException - */ - public static String getSqlQueryMissingTargetRows(Connection conn, - SourceTargetColumnNames columnNames, long scrutinyTimeMillis) throws SQLException { - String paramQuery = getHasTargetRowQuery(conn, columnNames, scrutinyTimeMillis); - return paramQuery.replaceFirst("\\?", "false"); - } + /** + * Get the sql to store as INVALID_ROWS_QUERY_MISSING_TARGET in the output metadata table + */ + public static String getSqlQueryMissingTargetRows(Connection conn, + SourceTargetColumnNames columnNames, long scrutinyTimeMillis) throws SQLException { + String paramQuery = getHasTargetRowQuery(conn, columnNames, scrutinyTimeMillis); + return paramQuery.replaceFirst("\\?", "false"); + } - /** - * Get the sql to store as INVALID_ROWS_QUERY_BAD_COVERED_COL_VAL in the output metadata table - * @param conn - * @param columnNames - * @param scrutinyTimeMillis - * @return - * @throws SQLException - */ - public static String getSqlQueryBadCoveredColVal(Connection conn, - SourceTargetColumnNames columnNames, long scrutinyTimeMillis) throws SQLException { - String paramQuery = getHasTargetRowQuery(conn, columnNames, scrutinyTimeMillis); - return paramQuery.replaceFirst("\\?", "true"); - } + /** + * Get the sql to store as INVALID_ROWS_QUERY_BAD_COVERED_COL_VAL in the output metadata table + */ + public static String getSqlQueryBadCoveredColVal(Connection conn, + SourceTargetColumnNames columnNames, long scrutinyTimeMillis) throws SQLException { + String paramQuery = getHasTargetRowQuery(conn, columnNames, scrutinyTimeMillis); + return paramQuery.replaceFirst("\\?", "true"); + } - public static String getSqlQueryBeyondMaxLookback(Connection conn, + public static String getSqlQueryBeyondMaxLookback(Connection conn, SourceTargetColumnNames columnNames, long scrutinyTimeMillis) throws SQLException { - String whereQuery = - constructOutputTableQuery(conn, columnNames, - getPksCsv() + ", " + SchemaUtil.getEscapedFullColumnName("HAS_TARGET_ROW") - + ", " + SchemaUtil.getEscapedFullColumnName("BEYOND_MAX_LOOKBACK")); - String inClause = - " IN " + QueryUtil.constructParameterizedInClause(getPkCols().size() + 2, 1); - String paramQuery = whereQuery + inClause; - paramQuery = bindPkCols(columnNames, scrutinyTimeMillis, paramQuery); - paramQuery = paramQuery.replaceFirst("\\?", "false"); //has_target_row false - paramQuery = paramQuery.replaceFirst("\\?", "true"); //beyond_max_lookback true - return paramQuery; - } + String whereQuery = constructOutputTableQuery(conn, columnNames, + getPksCsv() + ", " + SchemaUtil.getEscapedFullColumnName("HAS_TARGET_ROW") + ", " + + SchemaUtil.getEscapedFullColumnName("BEYOND_MAX_LOOKBACK")); + String inClause = " IN " + QueryUtil.constructParameterizedInClause(getPkCols().size() + 2, 1); + String paramQuery = whereQuery + inClause; + paramQuery = bindPkCols(columnNames, scrutinyTimeMillis, paramQuery); + paramQuery = paramQuery.replaceFirst("\\?", "false"); // has_target_row false + paramQuery = paramQuery.replaceFirst("\\?", "true"); // beyond_max_lookback true + return paramQuery; + } - /** - * Query the metadata table for the given columns - * @param conn connection to use - * @param selectCols columns to select from the metadata table - * @param qSourceTableName source table full name - * @param qTargetTableName target table full name - * @param scrutinyTimeMillis time when scrutiny was run - * @return - * @throws SQLException - */ - public static ResultSet queryMetadata(Connection conn, List selectCols, - String qSourceTableName, String qTargetTableName, long scrutinyTimeMillis) - throws SQLException { - PreparedStatement ps = conn.prepareStatement(constructMetadataParamQuery(selectCols)); - ps.setString(1, qSourceTableName); - ps.setString(2, qTargetTableName); - ps.setLong(3, scrutinyTimeMillis); - return ps.executeQuery(); - } + /** + * Query the metadata table for the given columns + * @param conn connection to use + * @param selectCols columns to select from the metadata table + * @param qSourceTableName source table full name + * @param qTargetTableName target table full name + * @param scrutinyTimeMillis time when scrutiny was run + */ + public static ResultSet queryMetadata(Connection conn, List selectCols, + String qSourceTableName, String qTargetTableName, long scrutinyTimeMillis) throws SQLException { + PreparedStatement ps = conn.prepareStatement(constructMetadataParamQuery(selectCols)); + ps.setString(1, qSourceTableName); + ps.setString(2, qTargetTableName); + ps.setLong(3, scrutinyTimeMillis); + return ps.executeQuery(); + } - public static ResultSet queryAllLatestMetadata(Connection conn, String qSourceTableName, - String qTargetTableName) throws SQLException { - String sql = "SELECT MAX(" + SCRUTINY_EXECUTE_TIME_COL_NAME + ") " + - "FROM " + OUTPUT_METADATA_TABLE_NAME + - " WHERE " + SOURCE_TABLE_COL_NAME + " = ?" + " AND " + TARGET_TABLE_COL_NAME + "= ?"; - PreparedStatement stmt = conn.prepareStatement(sql); - stmt.setString(1, qSourceTableName); - stmt.setString(2, qTargetTableName); - ResultSet rs = stmt.executeQuery(); - long scrutinyTimeMillis = 0L; - if (rs.next()){ - scrutinyTimeMillis = rs.getLong(1); - } //even if we didn't find one, still need to do a query to return the right columns - return queryAllMetadata(conn, qSourceTableName, qTargetTableName, scrutinyTimeMillis); - } + public static ResultSet queryAllLatestMetadata(Connection conn, String qSourceTableName, + String qTargetTableName) throws SQLException { + String sql = + "SELECT MAX(" + SCRUTINY_EXECUTE_TIME_COL_NAME + ") " + "FROM " + OUTPUT_METADATA_TABLE_NAME + + " WHERE " + SOURCE_TABLE_COL_NAME + " = ?" + " AND " + TARGET_TABLE_COL_NAME + "= ?"; + PreparedStatement stmt = conn.prepareStatement(sql); + stmt.setString(1, qSourceTableName); + stmt.setString(2, qTargetTableName); + ResultSet rs = stmt.executeQuery(); + long scrutinyTimeMillis = 0L; + if (rs.next()) { + scrutinyTimeMillis = rs.getLong(1); + } // even if we didn't find one, still need to do a query to return the right columns + return queryAllMetadata(conn, qSourceTableName, qTargetTableName, scrutinyTimeMillis); + } - /** - * Query the metadata table for all columns - * @param conn connection to use - * @param qSourceTableName source table full name - * @param qTargetTableName target table full name - * @param scrutinyTimeMillis time when scrutiny was run - * @return - * @throws SQLException - */ - public static ResultSet queryAllMetadata(Connection conn, String qSourceTableName, - String qTargetTableName, long scrutinyTimeMillis) throws SQLException { - PTable pMetadata = conn.unwrap(PhoenixConnection.class).getTable( - OUTPUT_METADATA_TABLE_NAME); - List metadataCols = SchemaUtil.getColumnNames(pMetadata.getColumns()); - return queryMetadata(conn, metadataCols, qSourceTableName, qTargetTableName, - scrutinyTimeMillis); - } + /** + * Query the metadata table for all columns + * @param conn connection to use + * @param qSourceTableName source table full name + * @param qTargetTableName target table full name + * @param scrutinyTimeMillis time when scrutiny was run + */ + public static ResultSet queryAllMetadata(Connection conn, String qSourceTableName, + String qTargetTableName, long scrutinyTimeMillis) throws SQLException { + PTable pMetadata = conn.unwrap(PhoenixConnection.class).getTable(OUTPUT_METADATA_TABLE_NAME); + List metadataCols = SchemaUtil.getColumnNames(pMetadata.getColumns()); + return queryMetadata(conn, metadataCols, qSourceTableName, qTargetTableName, + scrutinyTimeMillis); + } - /** - * Writes the results of the given jobs to the metadata table - * @param conn connection to use - * @param cmdLineArgs arguments the {@code IndexScrutinyTool} was run with - * @param completedJobs completed MR jobs - * @throws IOException - * @throws SQLException - */ - public static void writeJobResults(Connection conn, String[] cmdLineArgs, List completedJobs) throws IOException, SQLException { - PreparedStatement pStmt = conn.prepareStatement(UPSERT_METADATA_SQL); - for (Job job : completedJobs) { - Configuration conf = job.getConfiguration(); - String qDataTable = PhoenixConfigurationUtil.getScrutinyDataTableName(conf); - PhoenixConnection phoenixConnection = conn.unwrap(PhoenixConnection.class); - final PTable pdataTable = phoenixConnection.getTable(qDataTable); - final String qIndexTable = PhoenixConfigurationUtil.getScrutinyIndexTableName(conf); - final PTable pindexTable = phoenixConnection.getTable(qIndexTable); - SourceTable sourceTable = PhoenixConfigurationUtil.getScrutinySourceTable(conf); - long scrutinyExecuteTime = - PhoenixConfigurationUtil.getScrutinyExecuteTimestamp(conf); - SourceTargetColumnNames columnNames = - SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) - ? new DataSourceColNames(pdataTable, - pindexTable) - : new IndexSourceColNames(pdataTable, - pindexTable); + /** + * Writes the results of the given jobs to the metadata table + * @param conn connection to use + * @param cmdLineArgs arguments the {@code IndexScrutinyTool} was run with + * @param completedJobs completed MR jobs + */ + public static void writeJobResults(Connection conn, String[] cmdLineArgs, List completedJobs) + throws IOException, SQLException { + PreparedStatement pStmt = conn.prepareStatement(UPSERT_METADATA_SQL); + for (Job job : completedJobs) { + Configuration conf = job.getConfiguration(); + String qDataTable = PhoenixConfigurationUtil.getScrutinyDataTableName(conf); + PhoenixConnection phoenixConnection = conn.unwrap(PhoenixConnection.class); + final PTable pdataTable = phoenixConnection.getTable(qDataTable); + final String qIndexTable = PhoenixConfigurationUtil.getScrutinyIndexTableName(conf); + final PTable pindexTable = phoenixConnection.getTable(qIndexTable); + SourceTable sourceTable = PhoenixConfigurationUtil.getScrutinySourceTable(conf); + long scrutinyExecuteTime = PhoenixConfigurationUtil.getScrutinyExecuteTimestamp(conf); + SourceTargetColumnNames columnNames = SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) + ? new DataSourceColNames(pdataTable, pindexTable) + : new IndexSourceColNames(pdataTable, pindexTable); - Counters counters = job.getCounters(); - int index = 1; - pStmt.setString(index++, columnNames.getQualifiedSourceTableName()); - pStmt.setString(index++, columnNames.getQualifiedTargetTableName()); - pStmt.setLong(index++, scrutinyExecuteTime); - pStmt.setString(index++, sourceTable.name()); - pStmt.setString(index++, Arrays.toString(cmdLineArgs)); - pStmt.setLong(index++, counters.findCounter(PhoenixJobCounters.INPUT_RECORDS).getValue()); - pStmt.setLong(index++, counters.findCounter(PhoenixJobCounters.FAILED_RECORDS).getValue()); - pStmt.setLong(index++, counters.findCounter(PhoenixScrutinyJobCounters.VALID_ROW_COUNT).getValue()); - pStmt.setLong(index++, counters.findCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT).getValue()); - pStmt.setLong(index++, counters.findCounter(PhoenixScrutinyJobCounters.BAD_COVERED_COL_VAL_COUNT).getValue()); - pStmt.setLong(index++, counters.findCounter(PhoenixScrutinyJobCounters.BATCHES_PROCESSED_COUNT).getValue()); - pStmt.setString(index++, Arrays.toString(columnNames.getSourceDynamicCols().toArray())); - pStmt.setString(index++, Arrays.toString(columnNames.getTargetDynamicCols().toArray())); - pStmt.setString(index++, getSqlQueryAllInvalidRows(conn, columnNames, scrutinyExecuteTime)); - pStmt.setString(index++, getSqlQueryMissingTargetRows(conn, columnNames, scrutinyExecuteTime)); - pStmt.setString(index++, getSqlQueryBadCoveredColVal(conn, columnNames, scrutinyExecuteTime)); - pStmt.setString(index++, getSqlQueryBeyondMaxLookback(conn, columnNames, scrutinyExecuteTime)); - pStmt.setLong(index++, - counters.findCounter(PhoenixScrutinyJobCounters.BEYOND_MAX_LOOKBACK_COUNT).getValue()); - pStmt.addBatch(); - } - pStmt.executeBatch(); - conn.commit(); + Counters counters = job.getCounters(); + int index = 1; + pStmt.setString(index++, columnNames.getQualifiedSourceTableName()); + pStmt.setString(index++, columnNames.getQualifiedTargetTableName()); + pStmt.setLong(index++, scrutinyExecuteTime); + pStmt.setString(index++, sourceTable.name()); + pStmt.setString(index++, Arrays.toString(cmdLineArgs)); + pStmt.setLong(index++, counters.findCounter(PhoenixJobCounters.INPUT_RECORDS).getValue()); + pStmt.setLong(index++, counters.findCounter(PhoenixJobCounters.FAILED_RECORDS).getValue()); + pStmt.setLong(index++, + counters.findCounter(PhoenixScrutinyJobCounters.VALID_ROW_COUNT).getValue()); + pStmt.setLong(index++, + counters.findCounter(PhoenixScrutinyJobCounters.INVALID_ROW_COUNT).getValue()); + pStmt.setLong(index++, + counters.findCounter(PhoenixScrutinyJobCounters.BAD_COVERED_COL_VAL_COUNT).getValue()); + pStmt.setLong(index++, + counters.findCounter(PhoenixScrutinyJobCounters.BATCHES_PROCESSED_COUNT).getValue()); + pStmt.setString(index++, Arrays.toString(columnNames.getSourceDynamicCols().toArray())); + pStmt.setString(index++, Arrays.toString(columnNames.getTargetDynamicCols().toArray())); + pStmt.setString(index++, getSqlQueryAllInvalidRows(conn, columnNames, scrutinyExecuteTime)); + pStmt.setString(index++, + getSqlQueryMissingTargetRows(conn, columnNames, scrutinyExecuteTime)); + pStmt.setString(index++, getSqlQueryBadCoveredColVal(conn, columnNames, scrutinyExecuteTime)); + pStmt.setString(index++, + getSqlQueryBeyondMaxLookback(conn, columnNames, scrutinyExecuteTime)); + pStmt.setLong(index++, + counters.findCounter(PhoenixScrutinyJobCounters.BEYOND_MAX_LOOKBACK_COUNT).getValue()); + pStmt.addBatch(); } + pStmt.executeBatch(); + conn.commit(); + } - /** - * Get the parameterized query to return all the invalid rows from a scrutiny job - */ - static String constructMetadataParamQuery(List metadataSelectCols) { - String pkColsCsv = getPksCsv(); - String query = - QueryUtil.constructSelectStatement(OUTPUT_METADATA_TABLE_NAME, metadataSelectCols, - pkColsCsv, null, true); - String inClause = " IN " + QueryUtil.constructParameterizedInClause(3, 1); - return query + inClause; - } + /** + * Get the parameterized query to return all the invalid rows from a scrutiny job + */ + static String constructMetadataParamQuery(List metadataSelectCols) { + String pkColsCsv = getPksCsv(); + String query = QueryUtil.constructSelectStatement(OUTPUT_METADATA_TABLE_NAME, + metadataSelectCols, pkColsCsv, null, true); + String inClause = " IN " + QueryUtil.constructParameterizedInClause(3, 1); + return query + inClause; + } - private static String getAllInvalidParamQuery(Connection conn, - SourceTargetColumnNames columnNames) throws SQLException { - String whereQuery = constructOutputTableQuery(conn, columnNames, getPksCsv()); - String inClause = " IN " + QueryUtil.constructParameterizedInClause(getPkCols().size(), 1); - String paramQuery = whereQuery + inClause; - return paramQuery; - } + private static String getAllInvalidParamQuery(Connection conn, + SourceTargetColumnNames columnNames) throws SQLException { + String whereQuery = constructOutputTableQuery(conn, columnNames, getPksCsv()); + String inClause = " IN " + QueryUtil.constructParameterizedInClause(getPkCols().size(), 1); + String paramQuery = whereQuery + inClause; + return paramQuery; + } - private static String bindPkCols(SourceTargetColumnNames columnNames, long scrutinyTimeMillis, - String paramQuery) { - paramQuery = - paramQuery.replaceFirst("\\?", - "'" + columnNames.getQualifiedSourceTableName() + "'"); - paramQuery = - paramQuery.replaceFirst("\\?", - "'" + columnNames.getQualifiedTargetTableName() + "'"); - paramQuery = paramQuery.replaceFirst("\\?", scrutinyTimeMillis + ""); - return paramQuery; - } + private static String bindPkCols(SourceTargetColumnNames columnNames, long scrutinyTimeMillis, + String paramQuery) { + paramQuery = + paramQuery.replaceFirst("\\?", "'" + columnNames.getQualifiedSourceTableName() + "'"); + paramQuery = + paramQuery.replaceFirst("\\?", "'" + columnNames.getQualifiedTargetTableName() + "'"); + paramQuery = paramQuery.replaceFirst("\\?", scrutinyTimeMillis + ""); + return paramQuery; + } - private static String getHasTargetRowQuery(Connection conn, SourceTargetColumnNames columnNames, - long scrutinyTimeMillis) throws SQLException { - String whereQuery = - constructOutputTableQuery(conn, columnNames, - getPksCsv() + ", " + SchemaUtil.getEscapedFullColumnName("HAS_TARGET_ROW")); - String inClause = - " IN " + QueryUtil.constructParameterizedInClause(getPkCols().size() + 1, 1); - String paramQuery = whereQuery + inClause; - paramQuery = bindPkCols(columnNames, scrutinyTimeMillis, paramQuery); - return paramQuery; - } + private static String getHasTargetRowQuery(Connection conn, SourceTargetColumnNames columnNames, + long scrutinyTimeMillis) throws SQLException { + String whereQuery = constructOutputTableQuery(conn, columnNames, + getPksCsv() + ", " + SchemaUtil.getEscapedFullColumnName("HAS_TARGET_ROW")); + String inClause = " IN " + QueryUtil.constructParameterizedInClause(getPkCols().size() + 1, 1); + String paramQuery = whereQuery + inClause; + paramQuery = bindPkCols(columnNames, scrutinyTimeMillis, paramQuery); + return paramQuery; + } - private static String getPksCsv() { - String pkColsCsv = Joiner.on(",").join(SchemaUtil.getEscapedFullColumnNames(getPkCols())); - return pkColsCsv; - } + private static String getPksCsv() { + String pkColsCsv = Joiner.on(",").join(SchemaUtil.getEscapedFullColumnNames(getPkCols())); + return pkColsCsv; + } - private static List getPkCols() { - return Arrays.asList(SOURCE_TABLE_COL_NAME, TARGET_TABLE_COL_NAME, - SCRUTINY_EXECUTE_TIME_COL_NAME); - } + private static List getPkCols() { + return Arrays.asList(SOURCE_TABLE_COL_NAME, TARGET_TABLE_COL_NAME, + SCRUTINY_EXECUTE_TIME_COL_NAME); + } - private static String constructOutputTableQuery(Connection connection, - SourceTargetColumnNames columnNames, String conditions) throws SQLException { - PTable pOutputTable = connection.unwrap(PhoenixConnection.class).getTable( - OUTPUT_TABLE_NAME); - List outputTableColumns = SchemaUtil.getColumnNames(pOutputTable.getColumns()); - List selectCols = - Lists.newArrayList( - Iterables.concat(outputTableColumns, columnNames.getUnqualifiedSourceColNames(), - columnNames.getUnqualifiedTargetColNames())); - String dynamicCols = - Joiner.on(",").join(Iterables.concat(columnNames.getSourceDynamicCols(), - columnNames.getTargetDynamicCols())); - // dynamic defined after the table name - // https://phoenix.apache.org/dynamic_columns.html - String dynamicTableName = OUTPUT_TABLE_NAME + "(" + dynamicCols + ")"; - return QueryUtil.constructSelectStatement(dynamicTableName, selectCols, conditions, null, true); - } + private static String constructOutputTableQuery(Connection connection, + SourceTargetColumnNames columnNames, String conditions) throws SQLException { + PTable pOutputTable = connection.unwrap(PhoenixConnection.class).getTable(OUTPUT_TABLE_NAME); + List outputTableColumns = SchemaUtil.getColumnNames(pOutputTable.getColumns()); + List selectCols = Lists.newArrayList(Iterables.concat(outputTableColumns, + columnNames.getUnqualifiedSourceColNames(), columnNames.getUnqualifiedTargetColNames())); + String dynamicCols = Joiner.on(",").join( + Iterables.concat(columnNames.getSourceDynamicCols(), columnNames.getTargetDynamicCols())); + // dynamic defined after the table name + // https://phoenix.apache.org/dynamic_columns.html + String dynamicTableName = OUTPUT_TABLE_NAME + "(" + dynamicCols + ")"; + return QueryUtil.constructSelectStatement(dynamicTableName, selectCols, conditions, null, true); + } - private static List getOutputTableColumns(Connection connection) throws SQLException { - PTable pOutputTable = - connection.unwrap(PhoenixConnection.class).getTable(OUTPUT_TABLE_NAME); - List outputTableColumns = SchemaUtil.getColumnNames(pOutputTable.getColumns()); - return outputTableColumns; - } + private static List getOutputTableColumns(Connection connection) throws SQLException { + PTable pOutputTable = connection.unwrap(PhoenixConnection.class).getTable(OUTPUT_TABLE_NAME); + List outputTableColumns = SchemaUtil.getColumnNames(pOutputTable.getColumns()); + return outputTableColumns; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java index cfd0adf8800..676de72605e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,14 +22,6 @@ import java.sql.SQLException; import java.util.List; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -55,493 +47,480 @@ import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil; import org.apache.phoenix.parse.HintNode.Hint; import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.phoenix.util.EnvironmentEdgeManager; +import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.QueryUtil; import org.apache.phoenix.util.SchemaUtil; -import org.apache.phoenix.util.MetaDataUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * An MR job to verify that the index table is in sync with the data table. - * */ public class IndexScrutinyTool extends Configured implements Tool { - private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyTool.class); - - private static final Option SCHEMA_NAME_OPTION = - new Option("s", "schema", true, "Phoenix schema name (optional)"); - private static final Option DATA_TABLE_OPTION = - new Option("dt", "data-table", true, "Data table name (mandatory)"); - private static final Option INDEX_TABLE_OPTION = - new Option("it", "index-table", true, - "Index table name (mandatory)."); - private static final Option TIMESTAMP = - new Option("t", "time", true, - "Timestamp in millis used to compare the index and data tables. Defaults to current time minus 60 seconds"); - - private static final Option RUN_FOREGROUND_OPTION = - new Option("runfg", "run-foreground", false, "Applicable on top of -direct option." - + "If specified, runs index scrutiny in Foreground. Default - Runs the build in background."); - - private static final Option SNAPSHOT_OPTION = //TODO check if this works - new Option("snap", "snapshot", false, - "If specified, uses Snapshots for async index building (optional)"); - - public static final Option BATCH_SIZE_OPTION = - new Option("b", "batch-size", true, "Number of rows to compare at a time"); - public static final Option SOURCE_TABLE_OPTION = - new Option("src", "source", true, - "Table to use as the source table, whose rows are iterated over and compared to the other table." - + " Options are DATA_TABLE_SOURCE, INDEX_TABLE_SOURCE, BOTH." - + " Defaults to BOTH, which does two separate jobs to iterate over both tables"); - - private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); - - private static final Option OUTPUT_INVALID_ROWS_OPTION = - new Option("o", "output", false, "Whether to output invalid rows"); - private static final Option OUTPUT_FORMAT_OPTION = - new Option("of", "output-format", true, - "Format in which to output invalid rows. Options are FILE, TABLE. Defaults to TABLE"); - private static final Option OUTPUT_PATH_OPTION = - new Option("op", "output-path", true, "Output path where the files are written"); - private static final Option OUTPUT_MAX = new Option("om", "output-max", true, "Max number of invalid rows to output per mapper. Defaults to 1M"); - private static final Option TENANT_ID_OPTION = new Option("tenant", "tenant-id", true, - "If specified, uses Tenant connection for tenant view index scrutiny (optional)"); - public static final String INDEX_JOB_NAME_TEMPLATE = "PHOENIX_SCRUTINY_[%s]_[%s]"; - - Class mapperClass = null; - - public IndexScrutinyTool(Class indexScrutinyMapperForTestClass) { - this.mapperClass = indexScrutinyMapperForTestClass; - } - - public IndexScrutinyTool() { } - + private static final Logger LOGGER = LoggerFactory.getLogger(IndexScrutinyTool.class); + + private static final Option SCHEMA_NAME_OPTION = + new Option("s", "schema", true, "Phoenix schema name (optional)"); + private static final Option DATA_TABLE_OPTION = + new Option("dt", "data-table", true, "Data table name (mandatory)"); + private static final Option INDEX_TABLE_OPTION = + new Option("it", "index-table", true, "Index table name (mandatory)."); + private static final Option TIMESTAMP = new Option("t", "time", true, + "Timestamp in millis used to compare the index and data tables. Defaults to current time minus 60 seconds"); + + private static final Option RUN_FOREGROUND_OPTION = + new Option("runfg", "run-foreground", false, "Applicable on top of -direct option." + + "If specified, runs index scrutiny in Foreground. Default - Runs the build in background."); + + private static final Option SNAPSHOT_OPTION = // TODO check if this works + new Option("snap", "snapshot", false, + "If specified, uses Snapshots for async index building (optional)"); + + public static final Option BATCH_SIZE_OPTION = + new Option("b", "batch-size", true, "Number of rows to compare at a time"); + public static final Option SOURCE_TABLE_OPTION = new Option("src", "source", true, + "Table to use as the source table, whose rows are iterated over and compared to the other table." + + " Options are DATA_TABLE_SOURCE, INDEX_TABLE_SOURCE, BOTH." + + " Defaults to BOTH, which does two separate jobs to iterate over both tables"); + + private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); + + private static final Option OUTPUT_INVALID_ROWS_OPTION = + new Option("o", "output", false, "Whether to output invalid rows"); + private static final Option OUTPUT_FORMAT_OPTION = new Option("of", "output-format", true, + "Format in which to output invalid rows. Options are FILE, TABLE. Defaults to TABLE"); + private static final Option OUTPUT_PATH_OPTION = + new Option("op", "output-path", true, "Output path where the files are written"); + private static final Option OUTPUT_MAX = new Option("om", "output-max", true, + "Max number of invalid rows to output per mapper. Defaults to 1M"); + private static final Option TENANT_ID_OPTION = new Option("tenant", "tenant-id", true, + "If specified, uses Tenant connection for tenant view index scrutiny (optional)"); + public static final String INDEX_JOB_NAME_TEMPLATE = "PHOENIX_SCRUTINY_[%s]_[%s]"; + + Class mapperClass = null; + + public IndexScrutinyTool(Class indexScrutinyMapperForTestClass) { + this.mapperClass = indexScrutinyMapperForTestClass; + } + + public IndexScrutinyTool() { + } + + /** + * Which table to use as the source table + */ + public enum SourceTable { + DATA_TABLE_SOURCE, + INDEX_TABLE_SOURCE, /** - * Which table to use as the source table + * Runs two separate jobs to iterate over both tables */ - public enum SourceTable { - DATA_TABLE_SOURCE, INDEX_TABLE_SOURCE, - /** - * Runs two separate jobs to iterate over both tables - */ - BOTH + BOTH + } + + public enum OutputFormat { + FILE, + TABLE + } + + private List jobs = Lists.newArrayList(); + + private Options getOptions() { + final Options options = new Options(); + options.addOption(SCHEMA_NAME_OPTION); + options.addOption(DATA_TABLE_OPTION); + options.addOption(INDEX_TABLE_OPTION); + options.addOption(RUN_FOREGROUND_OPTION); + options.addOption(OUTPUT_INVALID_ROWS_OPTION); + options.addOption(OUTPUT_FORMAT_OPTION); + options.addOption(OUTPUT_PATH_OPTION); + options.addOption(OUTPUT_MAX); + options.addOption(SNAPSHOT_OPTION); + options.addOption(HELP_OPTION); + options.addOption(TIMESTAMP); + options.addOption(BATCH_SIZE_OPTION); + options.addOption(SOURCE_TABLE_OPTION); + options.addOption(TENANT_ID_OPTION); + return options; + } + + /** + * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are + * missing. + * @param args supplied command line arguments + * @return the parsed command line + */ + private CommandLine parseOptions(String[] args) { + final Options options = getOptions(); + + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); } - public enum OutputFormat { - FILE, TABLE + if (cmdLine.hasOption(HELP_OPTION.getOpt())) { + printHelpAndExit(options, 0); } - private List jobs = Lists.newArrayList(); - - private Options getOptions() { - final Options options = new Options(); - options.addOption(SCHEMA_NAME_OPTION); - options.addOption(DATA_TABLE_OPTION); - options.addOption(INDEX_TABLE_OPTION); - options.addOption(RUN_FOREGROUND_OPTION); - options.addOption(OUTPUT_INVALID_ROWS_OPTION); - options.addOption(OUTPUT_FORMAT_OPTION); - options.addOption(OUTPUT_PATH_OPTION); - options.addOption(OUTPUT_MAX); - options.addOption(SNAPSHOT_OPTION); - options.addOption(HELP_OPTION); - options.addOption(TIMESTAMP); - options.addOption(BATCH_SIZE_OPTION); - options.addOption(SOURCE_TABLE_OPTION); - options.addOption(TENANT_ID_OPTION); - return options; - } - - /** - * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are - * missing. - * @param args supplied command line arguments - * @return the parsed command line - */ - private CommandLine parseOptions(String[] args) { - final Options options = getOptions(); - - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); - } - - if (cmdLine.hasOption(HELP_OPTION.getOpt())) { - printHelpAndExit(options, 0); - } + requireOption(cmdLine, DATA_TABLE_OPTION); + requireOption(cmdLine, INDEX_TABLE_OPTION); - requireOption(cmdLine, DATA_TABLE_OPTION); - requireOption(cmdLine, INDEX_TABLE_OPTION); + return cmdLine; + } - return cmdLine; + private void requireOption(CommandLine cmdLine, Option option) { + if (!cmdLine.hasOption(option.getOpt())) { + throw new IllegalStateException(option.getLongOpt() + " is a mandatory parameter"); + } + } + + private void printHelpAndExit(String errorMessage, Options options) { + System.err.println(errorMessage); + printHelpAndExit(options, 1); + } + + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } + + private static class JobFactory { + Connection connection; + Configuration configuration; + private final boolean useSnapshot; + private final long ts; + private final boolean outputInvalidRows; + private final OutputFormat outputFormat; + private final String basePath; + private final long scrutinyExecuteTime; + private final long outputMaxRows; // per mapper + private final String tenantId; + Class mapperClass; + + public JobFactory(Connection connection, Configuration configuration, long batchSize, + boolean useSnapshot, long ts, boolean outputInvalidRows, OutputFormat outputFormat, + String basePath, long outputMaxRows, String tenantId, + Class mapperClass) { + this.outputInvalidRows = outputInvalidRows; + this.outputFormat = outputFormat; + this.basePath = basePath; + this.outputMaxRows = outputMaxRows; + PhoenixConfigurationUtil.setScrutinyBatchSize(configuration, batchSize); + this.connection = connection; + this.configuration = configuration; + this.useSnapshot = useSnapshot; + this.tenantId = tenantId; + this.ts = ts; // CURRENT_SCN to set + scrutinyExecuteTime = EnvironmentEdgeManager.currentTimeMillis(); // time at which scrutiny + // was run. + // Same for + // all jobs created from this factory + PhoenixConfigurationUtil.setScrutinyExecuteTimestamp(configuration, scrutinyExecuteTime); + if (!Strings.isNullOrEmpty(tenantId)) { + PhoenixConfigurationUtil.setTenantId(configuration, tenantId); + } + this.mapperClass = mapperClass; } - private void requireOption(CommandLine cmdLine, Option option) { - if (!cmdLine.hasOption(option.getOpt())) { - throw new IllegalStateException(option.getLongOpt() + " is a mandatory parameter"); + public Job createSubmittableJob(String schemaName, String indexTable, String dataTable, + SourceTable sourceTable, Class mapperClass) throws Exception { + Preconditions.checkArgument(SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) + || SourceTable.INDEX_TABLE_SOURCE.equals(sourceTable)); + + final String qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable); + final String qIndexTable; + if (schemaName != null && !schemaName.isEmpty()) { + qIndexTable = SchemaUtil.getQualifiedTableName(schemaName, indexTable); + } else { + qIndexTable = indexTable; + } + PhoenixConfigurationUtil.setScrutinyDataTable(configuration, qDataTable); + PhoenixConfigurationUtil.setScrutinyIndexTable(configuration, qIndexTable); + PhoenixConfigurationUtil.setScrutinySourceTable(configuration, sourceTable); + PhoenixConfigurationUtil.setScrutinyOutputInvalidRows(configuration, outputInvalidRows); + PhoenixConfigurationUtil.setScrutinyOutputMax(configuration, outputMaxRows); + PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); + final PTable pdataTable = phoenixConnection.getTable(qDataTable); + final PTable pindexTable = phoenixConnection.getTable(qIndexTable); + + // Randomize execution order, unless explicitly set + configuration.setBooleanIfUnset( + PhoenixConfigurationUtil.MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER, true); + + // set CURRENT_SCN for our scan so that incoming writes don't throw off scrutiny + configuration.set(PhoenixConfigurationUtil.CURRENT_SCN_VALUE, Long.toString(ts)); + PhoenixConfigurationUtil.setMaxLookbackAge(configuration, pdataTable.getMaxLookbackAge()); + + // set the source table to either data or index table + SourceTargetColumnNames columnNames = SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) + ? new SourceTargetColumnNames.DataSourceColNames(pdataTable, pindexTable) + : new SourceTargetColumnNames.IndexSourceColNames(pdataTable, pindexTable); + String qSourceTable = columnNames.getQualifiedSourceTableName(); + List sourceColumnNames = columnNames.getSourceColNames(); + List sourceDynamicCols = columnNames.getSourceDynamicCols(); + List targetDynamicCols = columnNames.getTargetDynamicCols(); + + // Setup the select query against source - we either select the index columns from the + // index table, + // or select the data table equivalents of the index columns from the data table + final String selectQuery = QueryUtil.constructSelectStatement(qSourceTable, sourceColumnNames, + null, Hint.NO_INDEX, true); + LOGGER.info("Query used on source table to feed the mapper: " + selectQuery); + + PhoenixConfigurationUtil.setScrutinyOutputFormat(configuration, outputFormat); + // if outputting to table, setup the upsert to the output table + if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { + String upsertStmt = IndexScrutinyTableOutput.constructOutputTableUpsert(sourceDynamicCols, + targetDynamicCols, connection); + PhoenixConfigurationUtil.setUpsertStatement(configuration, upsertStmt); + LOGGER.info("Upsert statement used for output table: " + upsertStmt); + } + + final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, qSourceTable, + columnNames.getQualifiedTargetTableName()); + final Job job = Job.getInstance(configuration, jobName); + + if (!useSnapshot) { + PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, qDataTable, selectQuery); + } else { // TODO check if using a snapshot works + Admin admin = null; + String snapshotName; + try { + final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class); + admin = pConnection.getQueryServices().getAdmin(); + String pdataTableName = pdataTable.getName().getString(); + snapshotName = new StringBuilder(pdataTableName).append("-Snapshot").toString(); + admin.snapshot(snapshotName, TableName.valueOf(pdataTableName)); + } finally { + if (admin != null) { + admin.close(); + } } + // root dir not a subdirectory of hbase dir + Path rootDir = new Path("hdfs:///index-snapshot-dir"); + CommonFSUtils.setRootDir(configuration, rootDir); + + // set input for map reduce job using hbase snapshots + // PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, snapshotName, + // qDataTable, restoreDir, selectQuery); + } + TableMapReduceUtil.initCredentials(job); + Path outputPath = getOutputPath(configuration, basePath, + SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) ? pdataTable : pindexTable); + + return configureSubmittableJob(job, outputPath, mapperClass); } - private void printHelpAndExit(String errorMessage, Options options) { - System.err.println(errorMessage); - printHelpAndExit(options, 1); + private Job configureSubmittableJob(Job job, Path outputPath, + Class mapperClass) throws Exception { + Configuration conf = job.getConfiguration(); + conf.setBoolean("mapreduce.job.user.classpath.first", true); + HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); + job.setJarByClass(IndexScrutinyTool.class); + job.setOutputFormatClass(NullOutputFormat.class); + if (outputInvalidRows && OutputFormat.FILE.equals(outputFormat)) { + job.setOutputFormatClass(TextOutputFormat.class); + FileOutputFormat.setOutputPath(job, outputPath); + } + job.setMapperClass((mapperClass == null ? IndexScrutinyMapper.class : mapperClass)); + job.setNumReduceTasks(0); + // Set the Output classes + job.setMapOutputKeyClass(Text.class); + job.setMapOutputValueClass(Text.class); + TableMapReduceUtil.addDependencyJars(job); + return job; } - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); + Path getOutputPath(final Configuration configuration, String basePath, PTable table) + throws IOException { + Path outputPath = null; + FileSystem fs; + if (basePath != null) { + outputPath = + CsvBulkImportUtil.getOutputPath(new Path(basePath), table.getPhysicalName().getString()); + fs = outputPath.getFileSystem(configuration); + fs.delete(outputPath, true); + } + return outputPath; } - - private static class JobFactory { - Connection connection; - Configuration configuration; - private final boolean useSnapshot; - private final long ts; - private final boolean outputInvalidRows; - private final OutputFormat outputFormat; - private final String basePath; - private final long scrutinyExecuteTime; - private final long outputMaxRows; // per mapper - private final String tenantId; - Class mapperClass; - - public JobFactory(Connection connection, Configuration configuration, long batchSize, - boolean useSnapshot, long ts, boolean outputInvalidRows, OutputFormat outputFormat, - String basePath, long outputMaxRows, String tenantId, - Class mapperClass) { - this.outputInvalidRows = outputInvalidRows; - this.outputFormat = outputFormat; - this.basePath = basePath; - this.outputMaxRows = outputMaxRows; - PhoenixConfigurationUtil.setScrutinyBatchSize(configuration, batchSize); - this.connection = connection; - this.configuration = configuration; - this.useSnapshot = useSnapshot; - this.tenantId = tenantId; - this.ts = ts; // CURRENT_SCN to set - scrutinyExecuteTime = EnvironmentEdgeManager.currentTimeMillis(); // time at which scrutiny was run. - // Same for - // all jobs created from this factory - PhoenixConfigurationUtil.setScrutinyExecuteTimestamp(configuration, - scrutinyExecuteTime); - if (!Strings.isNullOrEmpty(tenantId)) { - PhoenixConfigurationUtil.setTenantId(configuration, tenantId); - } - this.mapperClass = mapperClass; - } - - public Job createSubmittableJob(String schemaName, String indexTable, String dataTable, - SourceTable sourceTable, Class mapperClass) throws Exception { - Preconditions.checkArgument(SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) - || SourceTable.INDEX_TABLE_SOURCE.equals(sourceTable)); - - final String qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable); - final String qIndexTable; - if (schemaName != null && !schemaName.isEmpty()) { - qIndexTable = SchemaUtil.getQualifiedTableName(schemaName, indexTable); - } else { - qIndexTable = indexTable; - } - PhoenixConfigurationUtil.setScrutinyDataTable(configuration, qDataTable); - PhoenixConfigurationUtil.setScrutinyIndexTable(configuration, qIndexTable); - PhoenixConfigurationUtil.setScrutinySourceTable(configuration, sourceTable); - PhoenixConfigurationUtil.setScrutinyOutputInvalidRows(configuration, outputInvalidRows); - PhoenixConfigurationUtil.setScrutinyOutputMax(configuration, outputMaxRows); - PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); - final PTable pdataTable = phoenixConnection.getTable(qDataTable); - final PTable pindexTable = phoenixConnection.getTable(qIndexTable); - - // Randomize execution order, unless explicitly set - configuration.setBooleanIfUnset( - PhoenixConfigurationUtil.MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER, true); - - // set CURRENT_SCN for our scan so that incoming writes don't throw off scrutiny - configuration.set(PhoenixConfigurationUtil.CURRENT_SCN_VALUE, Long.toString(ts)); - PhoenixConfigurationUtil.setMaxLookbackAge(configuration, pdataTable.getMaxLookbackAge()); - - // set the source table to either data or index table - SourceTargetColumnNames columnNames = - SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) - ? new SourceTargetColumnNames.DataSourceColNames(pdataTable, - pindexTable) - : new SourceTargetColumnNames.IndexSourceColNames(pdataTable, - pindexTable); - String qSourceTable = columnNames.getQualifiedSourceTableName(); - List sourceColumnNames = columnNames.getSourceColNames(); - List sourceDynamicCols = columnNames.getSourceDynamicCols(); - List targetDynamicCols = columnNames.getTargetDynamicCols(); - - // Setup the select query against source - we either select the index columns from the - // index table, - // or select the data table equivalents of the index columns from the data table - final String selectQuery = - QueryUtil.constructSelectStatement(qSourceTable, sourceColumnNames, null, - Hint.NO_INDEX, true); - LOGGER.info("Query used on source table to feed the mapper: " + selectQuery); - - PhoenixConfigurationUtil.setScrutinyOutputFormat(configuration, outputFormat); - // if outputting to table, setup the upsert to the output table - if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { - String upsertStmt = - IndexScrutinyTableOutput.constructOutputTableUpsert(sourceDynamicCols, - targetDynamicCols, connection); - PhoenixConfigurationUtil.setUpsertStatement(configuration, upsertStmt); - LOGGER.info("Upsert statement used for output table: " + upsertStmt); - } - - final String jobName = - String.format(INDEX_JOB_NAME_TEMPLATE, qSourceTable, - columnNames.getQualifiedTargetTableName()); - final Job job = Job.getInstance(configuration, jobName); - - if (!useSnapshot) { - PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, qDataTable, - selectQuery); - } else { // TODO check if using a snapshot works - Admin admin = null; - String snapshotName; - try { - final PhoenixConnection pConnection = - connection.unwrap(PhoenixConnection.class); - admin = pConnection.getQueryServices().getAdmin(); - String pdataTableName = pdataTable.getName().getString(); - snapshotName = new StringBuilder(pdataTableName).append("-Snapshot").toString(); - admin.snapshot(snapshotName, TableName.valueOf(pdataTableName)); - } finally { - if (admin != null) { - admin.close(); - } - } - // root dir not a subdirectory of hbase dir - Path rootDir = new Path("hdfs:///index-snapshot-dir"); - CommonFSUtils.setRootDir(configuration, rootDir); - - // set input for map reduce job using hbase snapshots - //PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, snapshotName, - // qDataTable, restoreDir, selectQuery); - } - TableMapReduceUtil.initCredentials(job); - Path outputPath = - getOutputPath(configuration, basePath, - SourceTable.DATA_TABLE_SOURCE.equals(sourceTable) ? pdataTable - : pindexTable); - - return configureSubmittableJob(job, outputPath, mapperClass); + } + + @Override + public int run(String[] args) throws Exception { + Connection connection = null; + try { + /** start - parse command line configs **/ + CommandLine cmdLine = null; + try { + cmdLine = parseOptions(args); + } catch (IllegalStateException e) { + printHelpAndExit(e.getMessage(), getOptions()); + } + final Configuration configuration = HBaseConfiguration.addHbaseResources(getConf()); + boolean useTenantId = cmdLine.hasOption(TENANT_ID_OPTION.getOpt()); + String tenantId = null; + if (useTenantId) { + tenantId = cmdLine.getOptionValue(TENANT_ID_OPTION.getOpt()); + configuration.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + LOGGER.info(String.format("IndexScrutinyTool uses a tenantId %s", tenantId)); + } + connection = ConnectionUtil.getInputConnection(configuration); + final String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPTION.getOpt()); + final String dataTable = cmdLine.getOptionValue(DATA_TABLE_OPTION.getOpt()); + String indexTable = cmdLine.getOptionValue(INDEX_TABLE_OPTION.getOpt()); + final String qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable); + String basePath = cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()); + boolean isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); + boolean useSnapshot = cmdLine.hasOption(SNAPSHOT_OPTION.getOpt()); + boolean outputInvalidRows = cmdLine.hasOption(OUTPUT_INVALID_ROWS_OPTION.getOpt()); + SourceTable sourceTable = cmdLine.hasOption(SOURCE_TABLE_OPTION.getOpt()) + ? SourceTable.valueOf(cmdLine.getOptionValue(SOURCE_TABLE_OPTION.getOpt())) + : SourceTable.BOTH; + + long batchSize = cmdLine.hasOption(BATCH_SIZE_OPTION.getOpt()) + ? Long.parseLong(cmdLine.getOptionValue(BATCH_SIZE_OPTION.getOpt())) + : PhoenixConfigurationUtil.DEFAULT_SCRUTINY_BATCH_SIZE; + + long ts = cmdLine.hasOption(TIMESTAMP.getOpt()) + ? Long.parseLong(cmdLine.getOptionValue(TIMESTAMP.getOpt())) + : EnvironmentEdgeManager.currentTimeMillis() - 60000; + + if (indexTable != null) { + if (!IndexTool.isValidIndexTable(connection, qDataTable, indexTable, tenantId)) { + throw new IllegalArgumentException( + String.format(" %s is not an index table for %s ", indexTable, qDataTable)); } - - private Job configureSubmittableJob(Job job, Path outputPath, Class mapperClass) throws Exception { - Configuration conf = job.getConfiguration(); - conf.setBoolean("mapreduce.job.user.classpath.first", true); - HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); - job.setJarByClass(IndexScrutinyTool.class); - job.setOutputFormatClass(NullOutputFormat.class); - if (outputInvalidRows && OutputFormat.FILE.equals(outputFormat)) { - job.setOutputFormatClass(TextOutputFormat.class); - FileOutputFormat.setOutputPath(job, outputPath); - } - job.setMapperClass((mapperClass == null ? IndexScrutinyMapper.class : mapperClass)); - job.setNumReduceTasks(0); - // Set the Output classes - job.setMapOutputKeyClass(Text.class); - job.setMapOutputValueClass(Text.class); - TableMapReduceUtil.addDependencyJars(job); - return job; - } - - Path getOutputPath(final Configuration configuration, String basePath, PTable table) - throws IOException { - Path outputPath = null; - FileSystem fs; - if (basePath != null) { - outputPath = - CsvBulkImportUtil.getOutputPath(new Path(basePath), - table.getPhysicalName().getString()); - fs = outputPath.getFileSystem(configuration); - fs.delete(outputPath, true); - } - return outputPath; + } + + PTable pDataTable = connection.unwrap(PhoenixConnection.class).getTable(qDataTable); + validateTimestamp(configuration, ts, pDataTable.getMaxLookbackAge()); + + String outputFormatOption = cmdLine.getOptionValue(OUTPUT_FORMAT_OPTION.getOpt()); + OutputFormat outputFormat = outputFormatOption != null + ? OutputFormat.valueOf(outputFormatOption.toUpperCase()) + : OutputFormat.TABLE; + long outputMaxRows = cmdLine.hasOption(OUTPUT_MAX.getOpt()) + ? Long.parseLong(cmdLine.getOptionValue(OUTPUT_MAX.getOpt())) + : 1000000L; + /** end - parse command line configs **/ + + if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { + // create the output table if it doesn't exist + Configuration outputConfiguration = HBaseConfiguration.create(configuration); + outputConfiguration.unset(PhoenixRuntime.TENANT_ID_ATTRIB); + try (Connection outputConn = ConnectionUtil.getOutputConnection(outputConfiguration)) { + createScrutinyToolTables(outputConn); } - } - - @Override - public int run(String[] args) throws Exception { - Connection connection = null; - try { - /** start - parse command line configs **/ - CommandLine cmdLine = null; - try { - cmdLine = parseOptions(args); - } catch (IllegalStateException e) { - printHelpAndExit(e.getMessage(), getOptions()); - } - final Configuration configuration = HBaseConfiguration.addHbaseResources(getConf()); - boolean useTenantId = cmdLine.hasOption(TENANT_ID_OPTION.getOpt()); - String tenantId = null; - if (useTenantId) { - tenantId = cmdLine.getOptionValue(TENANT_ID_OPTION.getOpt()); - configuration.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - LOGGER.info(String.format("IndexScrutinyTool uses a tenantId %s", tenantId)); - } - connection = ConnectionUtil.getInputConnection(configuration); - final String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPTION.getOpt()); - final String dataTable = cmdLine.getOptionValue(DATA_TABLE_OPTION.getOpt()); - String indexTable = cmdLine.getOptionValue(INDEX_TABLE_OPTION.getOpt()); - final String qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable); - String basePath = cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()); - boolean isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); - boolean useSnapshot = cmdLine.hasOption(SNAPSHOT_OPTION.getOpt()); - boolean outputInvalidRows = cmdLine.hasOption(OUTPUT_INVALID_ROWS_OPTION.getOpt()); - SourceTable sourceTable = - cmdLine.hasOption(SOURCE_TABLE_OPTION.getOpt()) - ? SourceTable - .valueOf(cmdLine.getOptionValue(SOURCE_TABLE_OPTION.getOpt())) - : SourceTable.BOTH; - - long batchSize = - cmdLine.hasOption(BATCH_SIZE_OPTION.getOpt()) - ? Long.parseLong(cmdLine.getOptionValue(BATCH_SIZE_OPTION.getOpt())) - : PhoenixConfigurationUtil.DEFAULT_SCRUTINY_BATCH_SIZE; - - long ts = - cmdLine.hasOption(TIMESTAMP.getOpt()) - ? Long.parseLong(cmdLine.getOptionValue(TIMESTAMP.getOpt())) - : EnvironmentEdgeManager.currentTimeMillis() - 60000; - - if (indexTable != null) { - if (!IndexTool.isValidIndexTable(connection, qDataTable, indexTable, tenantId)) { - throw new IllegalArgumentException(String - .format(" %s is not an index table for %s ", indexTable, qDataTable)); - } - } - - PTable pDataTable = connection.unwrap(PhoenixConnection.class).getTable(qDataTable); - validateTimestamp(configuration, ts, pDataTable.getMaxLookbackAge()); - - String outputFormatOption = cmdLine.getOptionValue(OUTPUT_FORMAT_OPTION.getOpt()); - OutputFormat outputFormat = - outputFormatOption != null - ? OutputFormat.valueOf(outputFormatOption.toUpperCase()) - : OutputFormat.TABLE; - long outputMaxRows = - cmdLine.hasOption(OUTPUT_MAX.getOpt()) - ? Long.parseLong(cmdLine.getOptionValue(OUTPUT_MAX.getOpt())) - : 1000000L; - /** end - parse command line configs **/ - - if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { - // create the output table if it doesn't exist - Configuration outputConfiguration = HBaseConfiguration.create(configuration); - outputConfiguration.unset(PhoenixRuntime.TENANT_ID_ATTRIB); - try (Connection outputConn = ConnectionUtil.getOutputConnection(outputConfiguration)) { - createScrutinyToolTables(outputConn); - } - } - - LOGGER.info(String.format( - "Running scrutiny [schemaName=%s, dataTable=%s, indexTable=%s, useSnapshot=%s, timestamp=%s, batchSize=%s, outputBasePath=%s, outputFormat=%s, outputMaxRows=%s]", - schemaName, dataTable, indexTable, useSnapshot, ts, batchSize, basePath, - outputFormat, outputMaxRows)); - JobFactory jobFactory = - new JobFactory(connection, configuration, batchSize, useSnapshot, ts, - outputInvalidRows, outputFormat, basePath, outputMaxRows, tenantId, mapperClass); - // If we are running the scrutiny with both tables as the source, run two separate jobs, - // one for each direction - if (SourceTable.BOTH.equals(sourceTable)) { - jobs.add(jobFactory.createSubmittableJob(schemaName, indexTable, dataTable, - SourceTable.DATA_TABLE_SOURCE, mapperClass)); - jobs.add(jobFactory.createSubmittableJob(schemaName, indexTable, dataTable, - SourceTable.INDEX_TABLE_SOURCE, mapperClass)); - } else { - jobs.add(jobFactory.createSubmittableJob(schemaName, indexTable, dataTable, - sourceTable, mapperClass)); - } - - if (!isForeground) { - LOGGER.info("Running Index Scrutiny in Background - Submit async and exit"); - for (Job job : jobs) { - job.submit(); - } - return 0; - } - LOGGER.info( - "Running Index Scrutiny in Foreground. Waits for the build to complete. This may take a long time!."); - boolean result = true; - for (Job job : jobs) { - result = result && job.waitForCompletion(true); - } - - // write the results to the output metadata table - if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { - LOGGER.info("Writing results of jobs to output table " - + IndexScrutinyTableOutput.OUTPUT_METADATA_TABLE_NAME); - IndexScrutinyTableOutput.writeJobResults(connection, args, jobs); - } - - if (result) { - return 0; - } else { - LOGGER.error("IndexScrutinyTool job failed! Check logs for errors.."); - return -1; - } - } catch (Exception ex) { - LOGGER.error("An exception occurred while performing the indexing job: " - + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex)); - return -1; - } finally { - try { - if (connection != null) { - connection.close(); - } - } catch (SQLException sqle) { - LOGGER.error("Failed to close connection ", sqle.getMessage()); - throw new RuntimeException("Failed to close connection"); - } + } + + LOGGER.info(String.format( + "Running scrutiny [schemaName=%s, dataTable=%s, indexTable=%s, useSnapshot=%s, timestamp=%s, batchSize=%s, outputBasePath=%s, outputFormat=%s, outputMaxRows=%s]", + schemaName, dataTable, indexTable, useSnapshot, ts, batchSize, basePath, outputFormat, + outputMaxRows)); + JobFactory jobFactory = new JobFactory(connection, configuration, batchSize, useSnapshot, ts, + outputInvalidRows, outputFormat, basePath, outputMaxRows, tenantId, mapperClass); + // If we are running the scrutiny with both tables as the source, run two separate jobs, + // one for each direction + if (SourceTable.BOTH.equals(sourceTable)) { + jobs.add(jobFactory.createSubmittableJob(schemaName, indexTable, dataTable, + SourceTable.DATA_TABLE_SOURCE, mapperClass)); + jobs.add(jobFactory.createSubmittableJob(schemaName, indexTable, dataTable, + SourceTable.INDEX_TABLE_SOURCE, mapperClass)); + } else { + jobs.add(jobFactory.createSubmittableJob(schemaName, indexTable, dataTable, sourceTable, + mapperClass)); + } + + if (!isForeground) { + LOGGER.info("Running Index Scrutiny in Background - Submit async and exit"); + for (Job job : jobs) { + job.submit(); } - } - - private void validateTimestamp(Configuration configuration, long ts, Long dataTableMaxLookback) { - long maxLookBackAge = MetaDataUtil.getMaxLookbackAge(configuration, dataTableMaxLookback); - if (maxLookBackAge != BaseScannerRegionObserverConstants.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE * 1000L) { - long minTimestamp = EnvironmentEdgeManager.currentTimeMillis() - maxLookBackAge; - if (ts < minTimestamp){ - throw new IllegalArgumentException("Index scrutiny can't look back past the configured" + - " max lookback age: " + maxLookBackAge / 1000 + " seconds"); - } + return 0; + } + LOGGER.info( + "Running Index Scrutiny in Foreground. Waits for the build to complete. This may take a long time!."); + boolean result = true; + for (Job job : jobs) { + result = result && job.waitForCompletion(true); + } + + // write the results to the output metadata table + if (outputInvalidRows && OutputFormat.TABLE.equals(outputFormat)) { + LOGGER.info("Writing results of jobs to output table " + + IndexScrutinyTableOutput.OUTPUT_METADATA_TABLE_NAME); + IndexScrutinyTableOutput.writeJobResults(connection, args, jobs); + } + + if (result) { + return 0; + } else { + LOGGER.error("IndexScrutinyTool job failed! Check logs for errors.."); + return -1; + } + } catch (Exception ex) { + LOGGER.error("An exception occurred while performing the indexing job: " + + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex)); + return -1; + } finally { + try { + if (connection != null) { + connection.close(); } + } catch (SQLException sqle) { + LOGGER.error("Failed to close connection ", sqle.getMessage()); + throw new RuntimeException("Failed to close connection"); + } } - - @VisibleForTesting - public List getJobs() { - return jobs; - } - - public static void main(final String[] args) throws Exception { - int result = ToolRunner.run(new IndexScrutinyTool(), args); - System.exit(result); - } - - public static void createScrutinyToolTables(Connection conn) throws Exception { - conn.createStatement().execute(IndexScrutinyTableOutput.OUTPUT_TABLE_DDL); - conn.createStatement(). - execute(IndexScrutinyTableOutput.OUTPUT_TABLE_BEYOND_LOOKBACK_DDL); - conn.createStatement() - .execute(IndexScrutinyTableOutput.OUTPUT_METADATA_DDL); - conn.createStatement(). - execute(IndexScrutinyTableOutput.OUTPUT_METADATA_BEYOND_LOOKBACK_COUNTER_DDL); + } + + private void validateTimestamp(Configuration configuration, long ts, Long dataTableMaxLookback) { + long maxLookBackAge = MetaDataUtil.getMaxLookbackAge(configuration, dataTableMaxLookback); + if ( + maxLookBackAge != BaseScannerRegionObserverConstants.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE * 1000L + ) { + long minTimestamp = EnvironmentEdgeManager.currentTimeMillis() - maxLookBackAge; + if (ts < minTimestamp) { + throw new IllegalArgumentException("Index scrutiny can't look back past the configured" + + " max lookback age: " + maxLookBackAge / 1000 + " seconds"); + } } + } + + @VisibleForTesting + public List getJobs() { + return jobs; + } + + public static void main(final String[] args) throws Exception { + int result = ToolRunner.run(new IndexScrutinyTool(), args); + System.exit(result); + } + + public static void createScrutinyToolTables(Connection conn) throws Exception { + conn.createStatement().execute(IndexScrutinyTableOutput.OUTPUT_TABLE_DDL); + conn.createStatement().execute(IndexScrutinyTableOutput.OUTPUT_TABLE_BEYOND_LOOKBACK_DDL); + conn.createStatement().execute(IndexScrutinyTableOutput.OUTPUT_METADATA_DDL); + conn.createStatement() + .execute(IndexScrutinyTableOutput.OUTPUT_METADATA_BEYOND_LOOKBACK_COUNTER_DDL); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java index 7e28621461c..f9a14cbabac 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM; - import static org.apache.phoenix.mapreduce.index.IndexVerificationResultRepository.ROW_KEY_SEPARATOR; import java.io.IOException; @@ -38,16 +37,6 @@ import java.util.Map; import java.util.UUID; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.hbase.index.AbstractValueGetter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -56,8 +45,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -78,6 +67,7 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.phoenix.compile.PostIndexDDLCompiler; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; +import org.apache.phoenix.hbase.index.AbstractValueGetter; import org.apache.phoenix.hbase.index.ValueGetter; import org.apache.phoenix.hbase.index.covered.update.ColumnReference; import org.apache.phoenix.hbase.index.util.IndexManagementUtil; @@ -101,6 +91,16 @@ import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.EnvironmentEdgeManager; @@ -115,1134 +115,1154 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * An MR job to populate the index table from the data table. - * */ public class IndexTool extends Configured implements Tool { - public enum IndexVerifyType { - BEFORE("BEFORE"), - AFTER("AFTER"), - BOTH("BOTH"), - ONLY("ONLY"), - NONE("NONE"); - private String value; - private byte[] valueBytes; - - IndexVerifyType(String value) { - this.value = value; - this.valueBytes = PVarchar.INSTANCE.toBytes(value); - } - - public String getValue() { - return this.value; - } - - public byte[] toBytes() { - return this.valueBytes; - } - - public static IndexVerifyType fromValue(String value) { - for (IndexVerifyType verifyType: IndexVerifyType.values()) { - if (value.equals(verifyType.getValue())) { - return verifyType; - } - } - throw new IllegalStateException("Invalid value: "+ value + " for " + IndexVerifyType.class); - } - - public static IndexVerifyType fromValue(byte[] value) { - return fromValue(Bytes.toString(value)); - } + public enum IndexVerifyType { + BEFORE("BEFORE"), + AFTER("AFTER"), + BOTH("BOTH"), + ONLY("ONLY"), + NONE("NONE"); + + private String value; + private byte[] valueBytes; + + IndexVerifyType(String value) { + this.value = value; + this.valueBytes = PVarchar.INSTANCE.toBytes(value); } - public enum IndexDisableLoggingType { - NONE("NONE"), - BEFORE("BEFORE"), - AFTER("AFTER"), - BOTH("BOTH"); - - private String value; - private byte[] valueBytes; - - IndexDisableLoggingType(String value) { - this.value = value; - this.valueBytes = PVarchar.INSTANCE.toBytes(value); - } - - public String getValue() { - return this.value; - } - - public byte[] toBytes() { - return this.valueBytes; - } + public String getValue() { + return this.value; + } - public static IndexDisableLoggingType fromValue(String value) { - for (IndexDisableLoggingType disableLoggingType: IndexDisableLoggingType.values()) { - if (value.equals(disableLoggingType.getValue())) { - return disableLoggingType; - } - } - throw new IllegalStateException("Invalid value: "+ value + " for " + IndexDisableLoggingType.class); - } + public byte[] toBytes() { + return this.valueBytes; + } - public static IndexDisableLoggingType fromValue(byte[] value) { - return fromValue(Bytes.toString(value)); + public static IndexVerifyType fromValue(String value) { + for (IndexVerifyType verifyType : IndexVerifyType.values()) { + if (value.equals(verifyType.getValue())) { + return verifyType; } + } + throw new IllegalStateException("Invalid value: " + value + " for " + IndexVerifyType.class); } - private static final Logger LOGGER = LoggerFactory.getLogger(IndexTool.class); - - //The raw identifiers as passed in, with the escaping used in SQL - //(double quotes for case sensitivity) - private String schemaName; - private String dataTable; - private String indexTable; - private String dataTableWithSchema; - private String indexTableWithSchema; - - private boolean isPartialBuild, isForeground; - private IndexVerifyType indexVerifyType = IndexVerifyType.NONE; - private IndexDisableLoggingType disableLoggingType = IndexDisableLoggingType.NONE; - private SourceTable sourceTable = SourceTable.DATA_TABLE_SOURCE; - //The qualified normalized table names (no double quotes, case same as HBase table) - private String qDataTable; //normalized with schema - private String qIndexTable; //normalized with schema - private String qSchemaName; - private boolean useSnapshot; - private boolean isLocalIndexBuild = false; - private boolean shouldDeleteBeforeRebuild; - private PTable pIndexTable = null; - private PTable pDataTable; - private String tenantId = null; - private Job job; - private Long startTime, endTime, lastVerifyTime; - private IndexType indexType; - private String basePath; - byte[][] splitKeysBeforeJob = null; - Configuration configuration; - - private static final Option SCHEMA_NAME_OPTION = new Option("s", "schema", true, - "Phoenix schema name (optional)"); - private static final Option DATA_TABLE_OPTION = new Option("dt", "data-table", true, - "Data table name (mandatory)"); - private static final Option INDEX_TABLE_OPTION = new Option("it", "index-table", true, - "Index table name(not required in case of partial rebuilding)"); - - private static final Option PARTIAL_REBUILD_OPTION = new Option("pr", "partial-rebuild", false, - "To build indexes for a data table from least disabledTimeStamp"); - - private static final Option DIRECT_API_OPTION = new Option("direct", "direct", false, - "This parameter is deprecated. Direct mode will be used whether it is set or not. Keeping it for backwards compatibility."); - - private static final Option VERIFY_OPTION = new Option("v", "verify", true, - "To verify every data row has a corresponding row of a global index. For other types of indexes, " + - "this option will be silently ignored. The accepted values are NONE, ONLY, BEFORE, AFTER, and BOTH. " + - "NONE is for no inline verification, which is also the default for this option. ONLY is for " + - "verifying without rebuilding index rows. The rest for verifying before, after, and both before " + - "and after rebuilding row. If the verification is done before rebuilding rows and the correct " + - "index rows will not be rebuilt"); - - private static final double DEFAULT_SPLIT_SAMPLING_RATE = 10.0; - - private static final Option SPLIT_INDEX_OPTION = - new Option("sp", "split", true, - "Split the index table before building, to have the same # of regions as the data table. " - + "The data table is sampled to get uniform index splits across the index values. " - + "Takes an optional argument specifying the sampling rate," - + "otherwise defaults to " + DEFAULT_SPLIT_SAMPLING_RATE); - - private static final int DEFAULT_AUTOSPLIT_NUM_REGIONS = 20; - - private static final Option AUTO_SPLIT_INDEX_OPTION = - new Option("spa", "autosplit", true, - "Automatically split the index table if the # of data table regions is greater than N. " - + "Takes an optional argument specifying N, otherwise defaults to " + DEFAULT_AUTOSPLIT_NUM_REGIONS - + ". Can be used in conjunction with -split option to specify the sampling rate"); - - private static final Option RUN_FOREGROUND_OPTION = - new Option( - "runfg", - "run-foreground", - false, - "Applicable on top of -direct option." - + "If specified, runs index build in Foreground. Default - Runs the build in background."); - private static final Option OUTPUT_PATH_OPTION = new Option("op", "output-path", true, - "Output path where the files are written"); - private static final Option SNAPSHOT_OPTION = new Option("snap", "snapshot", false, - "If specified, uses Snapshots for async index building (optional)"); - private static final Option TENANT_ID_OPTION = new Option("tenant", "tenant-id", true, - "If specified, uses Tenant connection for tenant view index building (optional)"); - - private static final Option DELETE_ALL_AND_REBUILD_OPTION = new Option("deleteall", "delete-all-and-rebuild", false, - "Applicable only to global indexes on tables, not to local or view indexes. " - + "If specified, truncates the index table and rebuilds (optional)"); - - private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); - private static final Option START_TIME_OPTION = new Option("st", "start-time", - true, "Start time for indextool rebuild or verify"); - private static final Option END_TIME_OPTION = new Option("et", "end-time", - true, "End time for indextool rebuild or verify"); - - private static final Option RETRY_VERIFY_OPTION = new Option("rv", "retry-verify", - true, "Max scan ts of the last rebuild/verify that needs to be retried incrementally"); - - private static final Option DISABLE_LOGGING_OPTION = new Option("dl", - "disable-logging", true - , "Disable logging of failed verification rows for BEFORE, " + - "AFTER, or BOTH verify jobs"); - - private static final Option USE_INDEX_TABLE_AS_SOURCE_OPTION = - new Option("fi", "from-index", false, - "To verify every row in the index table has a corresponding row in the data table. " - + "Only supported for global indexes. If this option is used with -v AFTER, these " - + "extra rows will be identified but not repaired."); - - public static final String INDEX_JOB_NAME_TEMPLATE = "PHOENIX_%s.%s_INDX_%s"; - - public static final String INVALID_TIME_RANGE_EXCEPTION_MESSAGE = "startTime is greater than " - + "or equal to endTime " - + "or either of them are set in the future; IndexTool can't proceed."; - - public static final String FEATURE_NOT_APPLICABLE = "start-time/end-time and retry verify feature are only " - + "applicable for local or non-transactional global indexes"; - - public static final String RETRY_VERIFY_NOT_APPLICABLE = "retry verify feature accepts " - + "non-zero ts set in the past and ts must be present in PHOENIX_INDEX_TOOL_RESULT table"; - - private Options getOptions() { - final Options options = new Options(); - options.addOption(SCHEMA_NAME_OPTION); - options.addOption(DATA_TABLE_OPTION); - options.addOption(INDEX_TABLE_OPTION); - options.addOption(PARTIAL_REBUILD_OPTION); - options.addOption(DIRECT_API_OPTION); - options.addOption(VERIFY_OPTION); - options.addOption(RUN_FOREGROUND_OPTION); - options.addOption(OUTPUT_PATH_OPTION); - options.addOption(SNAPSHOT_OPTION); - options.addOption(TENANT_ID_OPTION); - options.addOption(DELETE_ALL_AND_REBUILD_OPTION); - options.addOption(HELP_OPTION); - AUTO_SPLIT_INDEX_OPTION.setOptionalArg(true); - SPLIT_INDEX_OPTION.setOptionalArg(true); - START_TIME_OPTION.setOptionalArg(true); - END_TIME_OPTION.setOptionalArg(true); - RETRY_VERIFY_OPTION.setOptionalArg(true); - options.addOption(AUTO_SPLIT_INDEX_OPTION); - options.addOption(SPLIT_INDEX_OPTION); - options.addOption(START_TIME_OPTION); - options.addOption(END_TIME_OPTION); - options.addOption(RETRY_VERIFY_OPTION); - options.addOption(DISABLE_LOGGING_OPTION); - options.addOption(USE_INDEX_TABLE_AS_SOURCE_OPTION); - return options; + public static IndexVerifyType fromValue(byte[] value) { + return fromValue(Bytes.toString(value)); } + } - /** - * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are - * missing. - * @param args supplied command line arguments - * @return the parsed command line - */ - @VisibleForTesting - public CommandLine parseOptions(String[] args) { + public enum IndexDisableLoggingType { + NONE("NONE"), + BEFORE("BEFORE"), + AFTER("AFTER"), + BOTH("BOTH"); - final Options options = getOptions(); + private String value; + private byte[] valueBytes; - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); - } - - if (cmdLine.hasOption(HELP_OPTION.getOpt())) { - printHelpAndExit(options, 0); - } - - if (!cmdLine.hasOption(DATA_TABLE_OPTION.getOpt())) { - throw new IllegalStateException(DATA_TABLE_OPTION.getLongOpt() + " is a mandatory " - + "parameter"); - } - - if (cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt()) - && cmdLine.hasOption(INDEX_TABLE_OPTION.getOpt())) { - throw new IllegalStateException("Index name should not be passed with " - + PARTIAL_REBUILD_OPTION.getLongOpt()); - } - - if (!cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt()) - && !cmdLine.hasOption(INDEX_TABLE_OPTION.getOpt())) { - throw new IllegalStateException("Index name should be passed unless it is a partial rebuild."); - } + IndexDisableLoggingType(String value) { + this.value = value; + this.valueBytes = PVarchar.INSTANCE.toBytes(value); + } - if (cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt()) && cmdLine.hasOption(DELETE_ALL_AND_REBUILD_OPTION.getOpt())) { - throw new IllegalStateException(DELETE_ALL_AND_REBUILD_OPTION.getLongOpt() + " is not compatible with " - + PARTIAL_REBUILD_OPTION.getLongOpt()); - } + public String getValue() { + return this.value; + } - boolean splitIndex = cmdLine.hasOption(AUTO_SPLIT_INDEX_OPTION.getOpt()) || cmdLine.hasOption(SPLIT_INDEX_OPTION.getOpt()); - if (splitIndex && !cmdLine.hasOption(INDEX_TABLE_OPTION.getOpt())) { - throw new IllegalStateException("Must pass an index name for the split index option"); - } - if (splitIndex && cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt())) { - throw new IllegalStateException("Cannot split index for a partial rebuild, as the index table is dropped"); - } - if (loggingDisabledMismatchesVerifyOption(cmdLine)){ - throw new IllegalStateException("Can't disable index verification logging when no " + - "index verification or the wrong kind of index verification has been requested. " + - "VerifyType: [" + cmdLine.getOptionValue(VERIFY_OPTION.getOpt()) + "] and " + - "DisableLoggingType: [" - + cmdLine.getOptionValue(DISABLE_LOGGING_OPTION.getOpt()) + "]"); - } - return cmdLine; + public byte[] toBytes() { + return this.valueBytes; } - private boolean loggingDisabledMismatchesVerifyOption(CommandLine cmdLine) { - boolean loggingDisabled = cmdLine.hasOption(DISABLE_LOGGING_OPTION.getOpt()); - if (!loggingDisabled) { - return false; - } - boolean hasVerifyOption = - cmdLine.hasOption(VERIFY_OPTION.getOpt()); - if (!hasVerifyOption) { - return true; - } - String loggingDisableValue = cmdLine.getOptionValue(DISABLE_LOGGING_OPTION.getOpt()); - String verifyValue = cmdLine.getOptionValue(VERIFY_OPTION.getOpt()); - IndexDisableLoggingType loggingDisableType = IndexDisableLoggingType.fromValue(loggingDisableValue); - IndexVerifyType verifyType = IndexVerifyType.fromValue(verifyValue); - //error if we're trying to disable logging when we're not doing any verification - if (verifyType.equals(IndexVerifyType.NONE)){ - return true; - } - //error if we're disabling logging after rebuild but we're not verifying after rebuild - if ((verifyType.equals(IndexVerifyType.BEFORE) || verifyType.equals(IndexVerifyType.ONLY)) - && loggingDisableType.equals(IndexDisableLoggingType.AFTER)) { - return true; - } - //error if we're disabling logging before rebuild but we're not verifying before rebuild - if ((verifyType.equals(IndexVerifyType.AFTER)) - && loggingDisableType.equals(IndexDisableLoggingType.BEFORE)) { - return true; - } - if (loggingDisableType.equals(IndexDisableLoggingType.BOTH) && - !verifyType.equals(IndexVerifyType.BOTH)){ - return true; + public static IndexDisableLoggingType fromValue(String value) { + for (IndexDisableLoggingType disableLoggingType : IndexDisableLoggingType.values()) { + if (value.equals(disableLoggingType.getValue())) { + return disableLoggingType; } - return false; + } + throw new IllegalStateException( + "Invalid value: " + value + " for " + IndexDisableLoggingType.class); } - private void printHelpAndExit(String errorMessage, Options options) { - System.err.println(errorMessage); - printHelpAndExit(options, 1); + public static IndexDisableLoggingType fromValue(byte[] value) { + return fromValue(Bytes.toString(value)); } - - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); + } + + private static final Logger LOGGER = LoggerFactory.getLogger(IndexTool.class); + + // The raw identifiers as passed in, with the escaping used in SQL + // (double quotes for case sensitivity) + private String schemaName; + private String dataTable; + private String indexTable; + private String dataTableWithSchema; + private String indexTableWithSchema; + + private boolean isPartialBuild, isForeground; + private IndexVerifyType indexVerifyType = IndexVerifyType.NONE; + private IndexDisableLoggingType disableLoggingType = IndexDisableLoggingType.NONE; + private SourceTable sourceTable = SourceTable.DATA_TABLE_SOURCE; + // The qualified normalized table names (no double quotes, case same as HBase table) + private String qDataTable; // normalized with schema + private String qIndexTable; // normalized with schema + private String qSchemaName; + private boolean useSnapshot; + private boolean isLocalIndexBuild = false; + private boolean shouldDeleteBeforeRebuild; + private PTable pIndexTable = null; + private PTable pDataTable; + private String tenantId = null; + private Job job; + private Long startTime, endTime, lastVerifyTime; + private IndexType indexType; + private String basePath; + byte[][] splitKeysBeforeJob = null; + Configuration configuration; + + private static final Option SCHEMA_NAME_OPTION = + new Option("s", "schema", true, "Phoenix schema name (optional)"); + private static final Option DATA_TABLE_OPTION = + new Option("dt", "data-table", true, "Data table name (mandatory)"); + private static final Option INDEX_TABLE_OPTION = new Option("it", "index-table", true, + "Index table name(not required in case of partial rebuilding)"); + + private static final Option PARTIAL_REBUILD_OPTION = new Option("pr", "partial-rebuild", false, + "To build indexes for a data table from least disabledTimeStamp"); + + private static final Option DIRECT_API_OPTION = new Option("direct", "direct", false, + "This parameter is deprecated. Direct mode will be used whether it is set or not. Keeping it for backwards compatibility."); + + private static final Option VERIFY_OPTION = new Option("v", "verify", true, + "To verify every data row has a corresponding row of a global index. For other types of indexes, " + + "this option will be silently ignored. The accepted values are NONE, ONLY, BEFORE, AFTER, and BOTH. " + + "NONE is for no inline verification, which is also the default for this option. ONLY is for " + + "verifying without rebuilding index rows. The rest for verifying before, after, and both before " + + "and after rebuilding row. If the verification is done before rebuilding rows and the correct " + + "index rows will not be rebuilt"); + + private static final double DEFAULT_SPLIT_SAMPLING_RATE = 10.0; + + private static final Option SPLIT_INDEX_OPTION = new Option("sp", "split", true, + "Split the index table before building, to have the same # of regions as the data table. " + + "The data table is sampled to get uniform index splits across the index values. " + + "Takes an optional argument specifying the sampling rate," + "otherwise defaults to " + + DEFAULT_SPLIT_SAMPLING_RATE); + + private static final int DEFAULT_AUTOSPLIT_NUM_REGIONS = 20; + + private static final Option AUTO_SPLIT_INDEX_OPTION = new Option("spa", "autosplit", true, + "Automatically split the index table if the # of data table regions is greater than N. " + + "Takes an optional argument specifying N, otherwise defaults to " + + DEFAULT_AUTOSPLIT_NUM_REGIONS + + ". Can be used in conjunction with -split option to specify the sampling rate"); + + private static final Option RUN_FOREGROUND_OPTION = + new Option("runfg", "run-foreground", false, "Applicable on top of -direct option." + + "If specified, runs index build in Foreground. Default - Runs the build in background."); + private static final Option OUTPUT_PATH_OPTION = + new Option("op", "output-path", true, "Output path where the files are written"); + private static final Option SNAPSHOT_OPTION = new Option("snap", "snapshot", false, + "If specified, uses Snapshots for async index building (optional)"); + private static final Option TENANT_ID_OPTION = new Option("tenant", "tenant-id", true, + "If specified, uses Tenant connection for tenant view index building (optional)"); + + private static final Option DELETE_ALL_AND_REBUILD_OPTION = + new Option("deleteall", "delete-all-and-rebuild", false, + "Applicable only to global indexes on tables, not to local or view indexes. " + + "If specified, truncates the index table and rebuilds (optional)"); + + private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); + private static final Option START_TIME_OPTION = + new Option("st", "start-time", true, "Start time for indextool rebuild or verify"); + private static final Option END_TIME_OPTION = + new Option("et", "end-time", true, "End time for indextool rebuild or verify"); + + private static final Option RETRY_VERIFY_OPTION = new Option("rv", "retry-verify", true, + "Max scan ts of the last rebuild/verify that needs to be retried incrementally"); + + private static final Option DISABLE_LOGGING_OPTION = new Option("dl", "disable-logging", true, + "Disable logging of failed verification rows for BEFORE, " + "AFTER, or BOTH verify jobs"); + + private static final Option USE_INDEX_TABLE_AS_SOURCE_OPTION = + new Option("fi", "from-index", false, + "To verify every row in the index table has a corresponding row in the data table. " + + "Only supported for global indexes. If this option is used with -v AFTER, these " + + "extra rows will be identified but not repaired."); + + public static final String INDEX_JOB_NAME_TEMPLATE = "PHOENIX_%s.%s_INDX_%s"; + + public static final String INVALID_TIME_RANGE_EXCEPTION_MESSAGE = "startTime is greater than " + + "or equal to endTime " + "or either of them are set in the future; IndexTool can't proceed."; + + public static final String FEATURE_NOT_APPLICABLE = + "start-time/end-time and retry verify feature are only " + + "applicable for local or non-transactional global indexes"; + + public static final String RETRY_VERIFY_NOT_APPLICABLE = "retry verify feature accepts " + + "non-zero ts set in the past and ts must be present in PHOENIX_INDEX_TOOL_RESULT table"; + + private Options getOptions() { + final Options options = new Options(); + options.addOption(SCHEMA_NAME_OPTION); + options.addOption(DATA_TABLE_OPTION); + options.addOption(INDEX_TABLE_OPTION); + options.addOption(PARTIAL_REBUILD_OPTION); + options.addOption(DIRECT_API_OPTION); + options.addOption(VERIFY_OPTION); + options.addOption(RUN_FOREGROUND_OPTION); + options.addOption(OUTPUT_PATH_OPTION); + options.addOption(SNAPSHOT_OPTION); + options.addOption(TENANT_ID_OPTION); + options.addOption(DELETE_ALL_AND_REBUILD_OPTION); + options.addOption(HELP_OPTION); + AUTO_SPLIT_INDEX_OPTION.setOptionalArg(true); + SPLIT_INDEX_OPTION.setOptionalArg(true); + START_TIME_OPTION.setOptionalArg(true); + END_TIME_OPTION.setOptionalArg(true); + RETRY_VERIFY_OPTION.setOptionalArg(true); + options.addOption(AUTO_SPLIT_INDEX_OPTION); + options.addOption(SPLIT_INDEX_OPTION); + options.addOption(START_TIME_OPTION); + options.addOption(END_TIME_OPTION); + options.addOption(RETRY_VERIFY_OPTION); + options.addOption(DISABLE_LOGGING_OPTION); + options.addOption(USE_INDEX_TABLE_AS_SOURCE_OPTION); + return options; + } + + /** + * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are + * missing. + * @param args supplied command line arguments + * @return the parsed command line + */ + @VisibleForTesting + public CommandLine parseOptions(String[] args) { + + final Options options = getOptions(); + + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); } - public Long getStartTime() { - return startTime; + if (cmdLine.hasOption(HELP_OPTION.getOpt())) { + printHelpAndExit(options, 0); } - public Long getEndTime() { return endTime; } + if (!cmdLine.hasOption(DATA_TABLE_OPTION.getOpt())) { + throw new IllegalStateException( + DATA_TABLE_OPTION.getLongOpt() + " is a mandatory " + "parameter"); + } - public Long getLastVerifyTime() { return lastVerifyTime; } + if ( + cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt()) + && cmdLine.hasOption(INDEX_TABLE_OPTION.getOpt()) + ) { + throw new IllegalStateException( + "Index name should not be passed with " + PARTIAL_REBUILD_OPTION.getLongOpt()); + } - public IndexTool.IndexDisableLoggingType getDisableLoggingType() { - return disableLoggingType; + if ( + !cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt()) + && !cmdLine.hasOption(INDEX_TABLE_OPTION.getOpt()) + ) { + throw new IllegalStateException( + "Index name should be passed unless it is a partial rebuild."); } - public IndexScrutinyTool.SourceTable getSourceTable() { return sourceTable; } + if ( + cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt()) + && cmdLine.hasOption(DELETE_ALL_AND_REBUILD_OPTION.getOpt()) + ) { + throw new IllegalStateException(DELETE_ALL_AND_REBUILD_OPTION.getLongOpt() + + " is not compatible with " + PARTIAL_REBUILD_OPTION.getLongOpt()); + } - class JobFactory { - Connection connection; - Configuration configuration; - private Path outputPath; - private FileSystem fs; + boolean splitIndex = cmdLine.hasOption(AUTO_SPLIT_INDEX_OPTION.getOpt()) + || cmdLine.hasOption(SPLIT_INDEX_OPTION.getOpt()); + if (splitIndex && !cmdLine.hasOption(INDEX_TABLE_OPTION.getOpt())) { + throw new IllegalStateException("Must pass an index name for the split index option"); + } + if (splitIndex && cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt())) { + throw new IllegalStateException( + "Cannot split index for a partial rebuild, as the index table is dropped"); + } + if (loggingDisabledMismatchesVerifyOption(cmdLine)) { + throw new IllegalStateException("Can't disable index verification logging when no " + + "index verification or the wrong kind of index verification has been requested. " + + "VerifyType: [" + cmdLine.getOptionValue(VERIFY_OPTION.getOpt()) + "] and " + + "DisableLoggingType: [" + cmdLine.getOptionValue(DISABLE_LOGGING_OPTION.getOpt()) + "]"); + } + return cmdLine; + } - public JobFactory(Connection connection, Configuration configuration, Path outputPath) { - this.connection = connection; - this.configuration = configuration; - this.outputPath = outputPath; - } + private boolean loggingDisabledMismatchesVerifyOption(CommandLine cmdLine) { + boolean loggingDisabled = cmdLine.hasOption(DISABLE_LOGGING_OPTION.getOpt()); + if (!loggingDisabled) { + return false; + } + boolean hasVerifyOption = cmdLine.hasOption(VERIFY_OPTION.getOpt()); + if (!hasVerifyOption) { + return true; + } + String loggingDisableValue = cmdLine.getOptionValue(DISABLE_LOGGING_OPTION.getOpt()); + String verifyValue = cmdLine.getOptionValue(VERIFY_OPTION.getOpt()); + IndexDisableLoggingType loggingDisableType = + IndexDisableLoggingType.fromValue(loggingDisableValue); + IndexVerifyType verifyType = IndexVerifyType.fromValue(verifyValue); + // error if we're trying to disable logging when we're not doing any verification + if (verifyType.equals(IndexVerifyType.NONE)) { + return true; + } + // error if we're disabling logging after rebuild but we're not verifying after rebuild + if ( + (verifyType.equals(IndexVerifyType.BEFORE) || verifyType.equals(IndexVerifyType.ONLY)) + && loggingDisableType.equals(IndexDisableLoggingType.AFTER) + ) { + return true; + } + // error if we're disabling logging before rebuild but we're not verifying before rebuild + if ( + (verifyType.equals(IndexVerifyType.AFTER)) + && loggingDisableType.equals(IndexDisableLoggingType.BEFORE) + ) { + return true; + } + if ( + loggingDisableType.equals(IndexDisableLoggingType.BOTH) + && !verifyType.equals(IndexVerifyType.BOTH) + ) { + return true; + } + return false; + } + + private void printHelpAndExit(String errorMessage, Options options) { + System.err.println(errorMessage); + printHelpAndExit(options, 1); + } + + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } + + public Long getStartTime() { + return startTime; + } + + public Long getEndTime() { + return endTime; + } + + public Long getLastVerifyTime() { + return lastVerifyTime; + } + + public IndexTool.IndexDisableLoggingType getDisableLoggingType() { + return disableLoggingType; + } + + public IndexScrutinyTool.SourceTable getSourceTable() { + return sourceTable; + } + + class JobFactory { + Connection connection; + Configuration configuration; + private Path outputPath; + private FileSystem fs; - public Job getJob() throws Exception { - if (isPartialBuild) { - return configureJobForPartialBuild(); - } else { - long maxTimeRange = pIndexTable.getTimeStamp() + 1; - // this is set to ensure index tables remains consistent post population. - if (pDataTable.isTransactional()) { - configuration.set(PhoenixConfigurationUtil.TX_SCN_VALUE, - Long.toString(TransactionUtil.convertToNanoseconds(maxTimeRange))); - configuration.set(PhoenixConfigurationUtil.TX_PROVIDER, pDataTable.getTransactionProvider().name()); - } - if (useSnapshot || (!isLocalIndexBuild && pDataTable.isTransactional())) { - PhoenixConfigurationUtil.setCurrentScnValue(configuration, maxTimeRange); - if (indexVerifyType != IndexVerifyType.NONE) { - LOGGER.warn("Verification is not supported for snapshots and transactional" - + "table index rebuilds, verification parameter ignored"); - } - return configureJobForAsyncIndex(); - } else { - // Local and non-transactional global indexes to be built on the server side - // It is safe not to set CURRENT_SCN_VALUE for server side rebuilds, in order to make sure that - // all the rows that exist so far will be rebuilt. The current time of the servers will - // be used to set the time range for server side scans. - - // However, PHOENIX-5732 introduces endTime parameter to be passed optionally for IndexTool. - // When endTime is passed for local and non-tx global indexes, we'll override the CURRENT_SCN_VALUE. - if (endTime != null) { - PhoenixConfigurationUtil.setCurrentScnValue(configuration, endTime); - } - if (lastVerifyTime != null) { - PhoenixConfigurationUtil.setIndexToolLastVerifyTime(configuration, lastVerifyTime); - } - return configureJobForServerBuildIndex(); - } - } - } + public JobFactory(Connection connection, Configuration configuration, Path outputPath) { + this.connection = connection; + this.configuration = configuration; + this.outputPath = outputPath; + } - private Job configureJobForPartialBuild() throws Exception { - connection = ConnectionUtil.getInputConnection(configuration); - long minDisableTimestamp = HConstants.LATEST_TIMESTAMP; - PTable indexWithMinDisableTimestamp = null; - - //Get Indexes in building state, minDisabledTimestamp - List disableIndexes = new ArrayList(); - List disabledPIndexes = new ArrayList(); - for (PTable index : pDataTable.getIndexes()) { - if (index.getIndexState().equals(PIndexState.BUILDING)) { - disableIndexes.add(index.getTableName().getString()); - disabledPIndexes.add(index); - // We need a way of differentiating the block writes to data table case from - // the leave index active case. In either case, we need to know the time stamp - // at which writes started failing so we can rebuild from that point. If we - // keep the index active *and* have a positive INDEX_DISABLE_TIMESTAMP_BYTES, - // then writes to the data table will be blocked (this is client side logic - // and we can't change this in a minor release). So we use the sign of the - // time stamp to differentiate. - long indexDisableTimestamp = Math.abs(index.getIndexDisableTimestamp()); - if (minDisableTimestamp > indexDisableTimestamp) { - minDisableTimestamp = indexDisableTimestamp; - indexWithMinDisableTimestamp = index; - } - } - } - - if (indexWithMinDisableTimestamp == null) { - throw new Exception("There is no index for a datatable to be rebuild:" + qDataTable); - } - if (minDisableTimestamp == 0) { - throw new Exception("It seems Index " + indexWithMinDisableTimestamp - + " has disable timestamp as 0 , please run IndexTool with IndexName to build it first"); - // TODO probably we can initiate the job by ourself or can skip them while making the list for partial build with a warning - } - - long maxTimestamp = getMaxRebuildAsyncDate(schemaName, disableIndexes); - - //serialize index maintaienr in job conf with Base64 TODO: Need to find better way to serialize them in conf. - List maintainers = Lists.newArrayListWithExpectedSize(disabledPIndexes.size()); - for (PTable index : disabledPIndexes) { - maintainers.add(index.getIndexMaintainer(pDataTable, connection.unwrap(PhoenixConnection.class))); - } - ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - IndexMaintainer.serializeAdditional(pDataTable, indexMetaDataPtr, disabledPIndexes, connection.unwrap(PhoenixConnection.class)); - PhoenixConfigurationUtil.setIndexMaintainers(configuration, indexMetaDataPtr); - if (!Strings.isNullOrEmpty(tenantId)) { - PhoenixConfigurationUtil.setTenantId(configuration, tenantId); - } - - //Prepare raw scan - Scan scan = IndexManagementUtil.newLocalStateScan(maintainers); - scan.setTimeRange(minDisableTimestamp - 1, maxTimestamp); - scan.setRaw(true); - scan.setCacheBlocks(false); - if (pDataTable.isTransactional()) { - long maxTimeRange = pDataTable.getTimeStamp() + 1; - scan.setAttribute(BaseScannerRegionObserverConstants.TX_SCN, - Bytes.toBytes(TransactionUtil.convertToNanoseconds(maxTimeRange))); - } - - - String physicalTableName=pDataTable.getPhysicalName().getString(); - final String jobName = String.format("Phoenix Indexes build for " + pDataTable.getName().toString()); - - PhoenixConfigurationUtil.setInputTableName(configuration, dataTableWithSchema); - PhoenixConfigurationUtil.setPhysicalTableName(configuration, physicalTableName); - - //TODO: update disable indexes - PhoenixConfigurationUtil.setDisableIndexes(configuration, StringUtils.join(",",disableIndexes)); - - final Job job = Job.getInstance(configuration, jobName); - if (outputPath != null) { - FileOutputFormat.setOutputPath(job, outputPath); - } - job.setJarByClass(IndexTool.class); - TableMapReduceUtil.initTableMapperJob(physicalTableName, scan, PhoenixIndexPartialBuildMapper.class, null, - null, job); - TableMapReduceUtil.initCredentials(job); - TableInputFormat.configureSplitTable(job, TableName.valueOf(physicalTableName)); - return configureSubmittableJobUsingDirectApi(job); - } - - private long getMaxRebuildAsyncDate(String schemaName, List disableIndexes) throws SQLException { - Long maxRebuilAsyncDate = HConstants.LATEST_TIMESTAMP; - Long maxDisabledTimeStamp = 0L; - if (disableIndexes == null || disableIndexes.isEmpty()) { - return 0; - } - String query = String.format("SELECT MAX(" + ASYNC_REBUILD_TIMESTAMP + "), " - + "MAX(" + INDEX_DISABLE_TIMESTAMP + ") FROM " - + SYSTEM_CATALOG_NAME + " (" + ASYNC_REBUILD_TIMESTAMP - + " BIGINT) WHERE " + TABLE_SCHEM + " %s AND " + TABLE_NAME + " IN ( %s )", - (schemaName != null && schemaName.length() > 0) ? " = ? " : " IS NULL ", - QueryUtil.generateInListParams(disableIndexes.size())); - try (PreparedStatement selSyscat = connection.prepareStatement(query)) { - int param = 0; - if (schemaName != null && schemaName.length() > 0) { - selSyscat.setString(++param, schemaName); - } - QueryUtil.setQuoteInListElements(selSyscat, disableIndexes, param); - ResultSet rs = selSyscat.executeQuery(); - if (rs.next()) { - maxRebuilAsyncDate = rs.getLong(1); - maxDisabledTimeStamp = rs.getLong(2); - } - // Do check if table is disabled again after user invoked async rebuilding during the run of the job - if (maxRebuilAsyncDate > maxDisabledTimeStamp) { - return maxRebuilAsyncDate; - } else { - throw new RuntimeException( - "Inconsistent state we have one or more index tables which are disabled after the async is called!!"); - } - } - } + public Job getJob() throws Exception { + if (isPartialBuild) { + return configureJobForPartialBuild(); + } else { + long maxTimeRange = pIndexTable.getTimeStamp() + 1; + // this is set to ensure index tables remains consistent post population. + if (pDataTable.isTransactional()) { + configuration.set(PhoenixConfigurationUtil.TX_SCN_VALUE, + Long.toString(TransactionUtil.convertToNanoseconds(maxTimeRange))); + configuration.set(PhoenixConfigurationUtil.TX_PROVIDER, + pDataTable.getTransactionProvider().name()); + } + if (useSnapshot || (!isLocalIndexBuild && pDataTable.isTransactional())) { + PhoenixConfigurationUtil.setCurrentScnValue(configuration, maxTimeRange); + if (indexVerifyType != IndexVerifyType.NONE) { + LOGGER.warn("Verification is not supported for snapshots and transactional" + + "table index rebuilds, verification parameter ignored"); + } + return configureJobForAsyncIndex(); + } else { + // Local and non-transactional global indexes to be built on the server side + // It is safe not to set CURRENT_SCN_VALUE for server side rebuilds, in order to make sure + // that + // all the rows that exist so far will be rebuilt. The current time of the servers will + // be used to set the time range for server side scans. + + // However, PHOENIX-5732 introduces endTime parameter to be passed optionally for + // IndexTool. + // When endTime is passed for local and non-tx global indexes, we'll override the + // CURRENT_SCN_VALUE. + if (endTime != null) { + PhoenixConfigurationUtil.setCurrentScnValue(configuration, endTime); + } + if (lastVerifyTime != null) { + PhoenixConfigurationUtil.setIndexToolLastVerifyTime(configuration, lastVerifyTime); + } + return configureJobForServerBuildIndex(); + } + } + } - private Job configureJobForAsyncIndex() throws Exception { - String physicalIndexTable = pIndexTable.getPhysicalName().getString(); - final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class); - final PostIndexDDLCompiler ddlCompiler = - new PostIndexDDLCompiler(pConnection, new TableRef(pDataTable)); - ddlCompiler.compile(pIndexTable); - final List indexColumns = ddlCompiler.getIndexColumnNames(); - final String selectQuery = ddlCompiler.getSelectQuery(); - final String upsertQuery = - QueryUtil.constructUpsertStatement(indexTableWithSchema, indexColumns, Hint.NO_INDEX); - - configuration.set(PhoenixConfigurationUtil.UPSERT_STATEMENT, upsertQuery); - PhoenixConfigurationUtil.setPhysicalTableName(configuration, physicalIndexTable); - PhoenixConfigurationUtil.setIndexToolIndexTableName(configuration, qIndexTable); - PhoenixConfigurationUtil.setDisableIndexes(configuration, indexTable); - - PhoenixConfigurationUtil.setUpsertColumnNames(configuration, - indexColumns.toArray(new String[indexColumns.size()])); - if (tenantId != null) { - PhoenixConfigurationUtil.setTenantId(configuration, tenantId); - } - final List columnMetadataList = - PhoenixRuntime.generateColumnInfo(pConnection, indexTableWithSchema, - indexColumns); - ColumnInfoToStringEncoderDecoder.encode(configuration, columnMetadataList); - - if (outputPath != null) { - fs = outputPath.getFileSystem(configuration); - fs.delete(outputPath, true); - } - final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, schemaName, dataTable, indexTable); - final Job job = Job.getInstance(configuration, jobName); - job.setJarByClass(IndexTool.class); - job.setMapOutputKeyClass(ImmutableBytesWritable.class); - if (outputPath != null) { - FileOutputFormat.setOutputPath(job, outputPath); - } - - if (!useSnapshot) { - PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, dataTableWithSchema, selectQuery); - } else { - Admin admin = null; - String snapshotName; - try { - admin = pConnection.getQueryServices().getAdmin(); - TableName hDdataTableName = TableName.valueOf(pDataTable.getPhysicalName().getBytes()); - snapshotName = new StringBuilder("INDEXTOOL-") - .append(pDataTable.getName().getString()) - .append("-Snapshot-") - .append(System.currentTimeMillis()) - .toString(); - //FIXME Drop this snapshot after we're done ? - admin.snapshot(snapshotName, hDdataTableName); - } finally { - if (admin != null) { - admin.close(); - } - } - // root dir not a subdirectory of hbase dir - Path rootDir = new Path("hdfs:///index-snapshot-dir"); - CommonFSUtils.setRootDir(configuration, rootDir); - Path restoreDir = new Path(CommonFSUtils.getRootDir(configuration), "restore-dir"); - - // set input for map reduce job using hbase snapshots - PhoenixMapReduceUtil - .setInput(job, PhoenixIndexDBWritable.class, snapshotName, dataTableWithSchema, restoreDir, selectQuery); - } - TableMapReduceUtil.initCredentials(job); - - job.setMapperClass(PhoenixIndexImportDirectMapper.class); - return configureSubmittableJobUsingDirectApi(job); - } + private Job configureJobForPartialBuild() throws Exception { + connection = ConnectionUtil.getInputConnection(configuration); + long minDisableTimestamp = HConstants.LATEST_TIMESTAMP; + PTable indexWithMinDisableTimestamp = null; + + // Get Indexes in building state, minDisabledTimestamp + List disableIndexes = new ArrayList(); + List disabledPIndexes = new ArrayList(); + for (PTable index : pDataTable.getIndexes()) { + if (index.getIndexState().equals(PIndexState.BUILDING)) { + disableIndexes.add(index.getTableName().getString()); + disabledPIndexes.add(index); + // We need a way of differentiating the block writes to data table case from + // the leave index active case. In either case, we need to know the time stamp + // at which writes started failing so we can rebuild from that point. If we + // keep the index active *and* have a positive INDEX_DISABLE_TIMESTAMP_BYTES, + // then writes to the data table will be blocked (this is client side logic + // and we can't change this in a minor release). So we use the sign of the + // time stamp to differentiate. + long indexDisableTimestamp = Math.abs(index.getIndexDisableTimestamp()); + if (minDisableTimestamp > indexDisableTimestamp) { + minDisableTimestamp = indexDisableTimestamp; + indexWithMinDisableTimestamp = index; + } + } + } + + if (indexWithMinDisableTimestamp == null) { + throw new Exception("There is no index for a datatable to be rebuild:" + qDataTable); + } + if (minDisableTimestamp == 0) { + throw new Exception("It seems Index " + indexWithMinDisableTimestamp + + " has disable timestamp as 0 , please run IndexTool with IndexName to build it first"); + // TODO probably we can initiate the job by ourself or can skip them while making the list + // for partial build with a warning + } + + long maxTimestamp = getMaxRebuildAsyncDate(schemaName, disableIndexes); + + // serialize index maintaienr in job conf with Base64 TODO: Need to find better way to + // serialize them in conf. + List maintainers = + Lists.newArrayListWithExpectedSize(disabledPIndexes.size()); + for (PTable index : disabledPIndexes) { + maintainers + .add(index.getIndexMaintainer(pDataTable, connection.unwrap(PhoenixConnection.class))); + } + ImmutableBytesWritable indexMetaDataPtr = + new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); + IndexMaintainer.serializeAdditional(pDataTable, indexMetaDataPtr, disabledPIndexes, + connection.unwrap(PhoenixConnection.class)); + PhoenixConfigurationUtil.setIndexMaintainers(configuration, indexMetaDataPtr); + if (!Strings.isNullOrEmpty(tenantId)) { + PhoenixConfigurationUtil.setTenantId(configuration, tenantId); + } + + // Prepare raw scan + Scan scan = IndexManagementUtil.newLocalStateScan(maintainers); + scan.setTimeRange(minDisableTimestamp - 1, maxTimestamp); + scan.setRaw(true); + scan.setCacheBlocks(false); + if (pDataTable.isTransactional()) { + long maxTimeRange = pDataTable.getTimeStamp() + 1; + scan.setAttribute(BaseScannerRegionObserverConstants.TX_SCN, + Bytes.toBytes(TransactionUtil.convertToNanoseconds(maxTimeRange))); + } + + String physicalTableName = pDataTable.getPhysicalName().getString(); + final String jobName = + String.format("Phoenix Indexes build for " + pDataTable.getName().toString()); + + PhoenixConfigurationUtil.setInputTableName(configuration, dataTableWithSchema); + PhoenixConfigurationUtil.setPhysicalTableName(configuration, physicalTableName); + + // TODO: update disable indexes + PhoenixConfigurationUtil.setDisableIndexes(configuration, + StringUtils.join(",", disableIndexes)); + + final Job job = Job.getInstance(configuration, jobName); + if (outputPath != null) { + FileOutputFormat.setOutputPath(job, outputPath); + } + job.setJarByClass(IndexTool.class); + TableMapReduceUtil.initTableMapperJob(physicalTableName, scan, + PhoenixIndexPartialBuildMapper.class, null, null, job); + TableMapReduceUtil.initCredentials(job); + TableInputFormat.configureSplitTable(job, TableName.valueOf(physicalTableName)); + return configureSubmittableJobUsingDirectApi(job); + } - private Job configureJobForServerBuildIndex() throws Exception { - long indexRebuildQueryTimeoutMs = - configuration.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); - long indexRebuildRPCTimeoutMs = - configuration.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); - long indexRebuildClientScannerTimeOutMs = - configuration.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); - int indexRebuildRpcRetriesCounter = - configuration.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); - // Set various phoenix and hbase level timeouts and rpc retries - configuration.set(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - Long.toString(indexRebuildQueryTimeoutMs)); - configuration.set(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - Long.toString(indexRebuildClientScannerTimeOutMs)); - configuration.set(HConstants.HBASE_RPC_TIMEOUT_KEY, - Long.toString(indexRebuildRPCTimeoutMs)); - configuration.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - Long.toString(indexRebuildRpcRetriesCounter)); - configuration.set("mapreduce.task.timeout", Long.toString(indexRebuildQueryTimeoutMs)); - - // Randomize execution order, unless explicitly set - configuration.setBooleanIfUnset( - PhoenixConfigurationUtil.MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER, true); - - PhoenixConfigurationUtil.setIndexToolDataTableName(configuration, dataTableWithSchema); - PhoenixConfigurationUtil.setIndexToolIndexTableName(configuration, qIndexTable); - PhoenixConfigurationUtil.setIndexToolSourceTable(configuration, sourceTable); - if (startTime != null) { - PhoenixConfigurationUtil.setIndexToolStartTime(configuration, startTime); - } - PhoenixConfigurationUtil.setIndexVerifyType(configuration, indexVerifyType); - PhoenixConfigurationUtil.setDisableLoggingVerifyType(configuration, disableLoggingType); - String physicalIndexTable = pIndexTable.getPhysicalName().getString(); - - PhoenixConfigurationUtil.setPhysicalTableName(configuration, physicalIndexTable); - PhoenixConfigurationUtil.setDisableIndexes(configuration, indexTable); - if (tenantId != null) { - PhoenixConfigurationUtil.setTenantId(configuration, tenantId); - } - - if (outputPath != null) { - fs = outputPath.getFileSystem(configuration); - fs.delete(outputPath, true); - } - final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, schemaName, dataTable, indexTable); - final Job job = Job.getInstance(configuration, jobName); - job.setJarByClass(IndexTool.class); - job.setMapOutputKeyClass(ImmutableBytesWritable.class); - if (outputPath != null) { - FileOutputFormat.setOutputPath(job, outputPath); - } - - PhoenixMapReduceUtil.setInput(job, PhoenixServerBuildIndexDBWritable.class, PhoenixServerBuildIndexInputFormat.class, - dataTableWithSchema, ""); - - TableMapReduceUtil.initCredentials(job); - job.setMapperClass(PhoenixServerBuildIndexMapper.class); - return configureSubmittableJobUsingDirectApi(job); + private long getMaxRebuildAsyncDate(String schemaName, List disableIndexes) + throws SQLException { + Long maxRebuilAsyncDate = HConstants.LATEST_TIMESTAMP; + Long maxDisabledTimeStamp = 0L; + if (disableIndexes == null || disableIndexes.isEmpty()) { + return 0; + } + String query = String.format( + "SELECT MAX(" + ASYNC_REBUILD_TIMESTAMP + "), " + "MAX(" + INDEX_DISABLE_TIMESTAMP + + ") FROM " + SYSTEM_CATALOG_NAME + " (" + ASYNC_REBUILD_TIMESTAMP + " BIGINT) WHERE " + + TABLE_SCHEM + " %s AND " + TABLE_NAME + " IN ( %s )", + (schemaName != null && schemaName.length() > 0) ? " = ? " : " IS NULL ", + QueryUtil.generateInListParams(disableIndexes.size())); + try (PreparedStatement selSyscat = connection.prepareStatement(query)) { + int param = 0; + if (schemaName != null && schemaName.length() > 0) { + selSyscat.setString(++param, schemaName); + } + QueryUtil.setQuoteInListElements(selSyscat, disableIndexes, param); + ResultSet rs = selSyscat.executeQuery(); + if (rs.next()) { + maxRebuilAsyncDate = rs.getLong(1); + maxDisabledTimeStamp = rs.getLong(2); + } + // Do check if table is disabled again after user invoked async rebuilding during the run of + // the job + if (maxRebuilAsyncDate > maxDisabledTimeStamp) { + return maxRebuilAsyncDate; + } else { + throw new RuntimeException( + "Inconsistent state we have one or more index tables which are disabled after the async is called!!"); } + } + } - /** - * Uses the HBase Front Door Api to write to index table. Submits the job and either returns or - * waits for the job completion based on runForeground parameter. - * - * @param job - * @return - * @throws Exception - */ - private Job configureSubmittableJobUsingDirectApi(Job job) throws Exception { - job.setReducerClass(PhoenixIndexImportDirectReducer.class); - Configuration conf = job.getConfiguration(); - HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); - // Set the Physical Table name for use in DirectHTableWriter#write(Mutation) - conf.set(TableOutputFormat.OUTPUT_TABLE, - PhoenixConfigurationUtil.getPhysicalTableName(job.getConfiguration())); - //Set the Output classes - job.setMapOutputKeyClass(ImmutableBytesWritable.class); - job.setMapOutputValueClass(IntWritable.class); - job.setOutputKeyClass(NullWritable.class); - job.setOutputValueClass(NullWritable.class); - TableMapReduceUtil.addDependencyJars(job); - job.setNumReduceTasks(1); - return job; - } - + private Job configureJobForAsyncIndex() throws Exception { + String physicalIndexTable = pIndexTable.getPhysicalName().getString(); + final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class); + final PostIndexDDLCompiler ddlCompiler = + new PostIndexDDLCompiler(pConnection, new TableRef(pDataTable)); + ddlCompiler.compile(pIndexTable); + final List indexColumns = ddlCompiler.getIndexColumnNames(); + final String selectQuery = ddlCompiler.getSelectQuery(); + final String upsertQuery = + QueryUtil.constructUpsertStatement(indexTableWithSchema, indexColumns, Hint.NO_INDEX); + + configuration.set(PhoenixConfigurationUtil.UPSERT_STATEMENT, upsertQuery); + PhoenixConfigurationUtil.setPhysicalTableName(configuration, physicalIndexTable); + PhoenixConfigurationUtil.setIndexToolIndexTableName(configuration, qIndexTable); + PhoenixConfigurationUtil.setDisableIndexes(configuration, indexTable); + + PhoenixConfigurationUtil.setUpsertColumnNames(configuration, + indexColumns.toArray(new String[indexColumns.size()])); + if (tenantId != null) { + PhoenixConfigurationUtil.setTenantId(configuration, tenantId); + } + final List columnMetadataList = + PhoenixRuntime.generateColumnInfo(pConnection, indexTableWithSchema, indexColumns); + ColumnInfoToStringEncoderDecoder.encode(configuration, columnMetadataList); + + if (outputPath != null) { + fs = outputPath.getFileSystem(configuration); + fs.delete(outputPath, true); + } + final String jobName = + String.format(INDEX_JOB_NAME_TEMPLATE, schemaName, dataTable, indexTable); + final Job job = Job.getInstance(configuration, jobName); + job.setJarByClass(IndexTool.class); + job.setMapOutputKeyClass(ImmutableBytesWritable.class); + if (outputPath != null) { + FileOutputFormat.setOutputPath(job, outputPath); + } + + if (!useSnapshot) { + PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, dataTableWithSchema, + selectQuery); + } else { + Admin admin = null; + String snapshotName; + try { + admin = pConnection.getQueryServices().getAdmin(); + TableName hDdataTableName = TableName.valueOf(pDataTable.getPhysicalName().getBytes()); + snapshotName = new StringBuilder("INDEXTOOL-").append(pDataTable.getName().getString()) + .append("-Snapshot-").append(System.currentTimeMillis()).toString(); + // FIXME Drop this snapshot after we're done ? + admin.snapshot(snapshotName, hDdataTableName); + } finally { + if (admin != null) { + admin.close(); + } + } + // root dir not a subdirectory of hbase dir + Path rootDir = new Path("hdfs:///index-snapshot-dir"); + CommonFSUtils.setRootDir(configuration, rootDir); + Path restoreDir = new Path(CommonFSUtils.getRootDir(configuration), "restore-dir"); + + // set input for map reduce job using hbase snapshots + PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, snapshotName, + dataTableWithSchema, restoreDir, selectQuery); + } + TableMapReduceUtil.initCredentials(job); + + job.setMapperClass(PhoenixIndexImportDirectMapper.class); + return configureSubmittableJobUsingDirectApi(job); } - public Job getJob() { - return job; + private Job configureJobForServerBuildIndex() throws Exception { + long indexRebuildQueryTimeoutMs = + configuration.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); + long indexRebuildRPCTimeoutMs = + configuration.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); + long indexRebuildClientScannerTimeOutMs = + configuration.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); + int indexRebuildRpcRetriesCounter = + configuration.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); + // Set various phoenix and hbase level timeouts and rpc retries + configuration.set(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, + Long.toString(indexRebuildQueryTimeoutMs)); + configuration.set(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + Long.toString(indexRebuildClientScannerTimeOutMs)); + configuration.set(HConstants.HBASE_RPC_TIMEOUT_KEY, Long.toString(indexRebuildRPCTimeoutMs)); + configuration.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + Long.toString(indexRebuildRpcRetriesCounter)); + configuration.set("mapreduce.task.timeout", Long.toString(indexRebuildQueryTimeoutMs)); + + // Randomize execution order, unless explicitly set + configuration.setBooleanIfUnset( + PhoenixConfigurationUtil.MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER, true); + + PhoenixConfigurationUtil.setIndexToolDataTableName(configuration, dataTableWithSchema); + PhoenixConfigurationUtil.setIndexToolIndexTableName(configuration, qIndexTable); + PhoenixConfigurationUtil.setIndexToolSourceTable(configuration, sourceTable); + if (startTime != null) { + PhoenixConfigurationUtil.setIndexToolStartTime(configuration, startTime); + } + PhoenixConfigurationUtil.setIndexVerifyType(configuration, indexVerifyType); + PhoenixConfigurationUtil.setDisableLoggingVerifyType(configuration, disableLoggingType); + String physicalIndexTable = pIndexTable.getPhysicalName().getString(); + + PhoenixConfigurationUtil.setPhysicalTableName(configuration, physicalIndexTable); + PhoenixConfigurationUtil.setDisableIndexes(configuration, indexTable); + if (tenantId != null) { + PhoenixConfigurationUtil.setTenantId(configuration, tenantId); + } + + if (outputPath != null) { + fs = outputPath.getFileSystem(configuration); + fs.delete(outputPath, true); + } + final String jobName = + String.format(INDEX_JOB_NAME_TEMPLATE, schemaName, dataTable, indexTable); + final Job job = Job.getInstance(configuration, jobName); + job.setJarByClass(IndexTool.class); + job.setMapOutputKeyClass(ImmutableBytesWritable.class); + if (outputPath != null) { + FileOutputFormat.setOutputPath(job, outputPath); + } + + PhoenixMapReduceUtil.setInput(job, PhoenixServerBuildIndexDBWritable.class, + PhoenixServerBuildIndexInputFormat.class, dataTableWithSchema, ""); + + TableMapReduceUtil.initCredentials(job); + job.setMapperClass(PhoenixServerBuildIndexMapper.class); + return configureSubmittableJobUsingDirectApi(job); } - public static void createIndexToolTables(Connection connection) throws Exception { - try (IndexVerificationResultRepository resultRepo = new IndexVerificationResultRepository(); - IndexVerificationOutputRepository outputRepo = new IndexVerificationOutputRepository()){ - resultRepo.createResultTable(connection); - outputRepo.createOutputTable(connection); - } + /** + * Uses the HBase Front Door Api to write to index table. Submits the job and either returns or + * waits for the job completion based on runForeground parameter. + */ + private Job configureSubmittableJobUsingDirectApi(Job job) throws Exception { + job.setReducerClass(PhoenixIndexImportDirectReducer.class); + Configuration conf = job.getConfiguration(); + HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); + // Set the Physical Table name for use in DirectHTableWriter#write(Mutation) + conf.set(TableOutputFormat.OUTPUT_TABLE, + PhoenixConfigurationUtil.getPhysicalTableName(job.getConfiguration())); + // Set the Output classes + job.setMapOutputKeyClass(ImmutableBytesWritable.class); + job.setMapOutputValueClass(IntWritable.class); + job.setOutputKeyClass(NullWritable.class); + job.setOutputValueClass(NullWritable.class); + TableMapReduceUtil.addDependencyJars(job); + job.setNumReduceTasks(1); + return job; } - @Override - public int run(String[] args) throws Exception { - CommandLine cmdLine; - try { - cmdLine = parseOptions(args); - } catch (IllegalStateException e) { - printHelpAndExit(e.getMessage(), getOptions()); - return -1; - } - configuration = HBaseConfiguration.addHbaseResources(getConf()); - populateIndexToolAttributes(cmdLine); + } - if (tenantId != null) { - configuration.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - } + public Job getJob() { + return job; + } - try (Connection conn = getConnection(configuration)) { - createIndexToolTables(conn); - if (dataTable != null && indexTable != null) { - setupIndexAndDataTable(conn); - checkIfFeatureApplicable(startTime, endTime, lastVerifyTime, pDataTable, isLocalIndexBuild); - if (shouldDeleteBeforeRebuild) { - deleteBeforeRebuild(conn); - } - preSplitIndexTable(cmdLine, conn); - } - - boolean result = submitIndexToolJob(conn, configuration); - - if (result) { - return 0; - } else { - LOGGER.error("IndexTool job failed! Check logs for errors.."); - return -1; - } - } catch (Exception ex) { - LOGGER.error("An exception occurred while performing the indexing job: " - + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex)); - return -1; - } + public static void createIndexToolTables(Connection connection) throws Exception { + try (IndexVerificationResultRepository resultRepo = new IndexVerificationResultRepository(); + IndexVerificationOutputRepository outputRepo = new IndexVerificationOutputRepository()) { + resultRepo.createResultTable(connection); + outputRepo.createOutputTable(connection); + } + } + + @Override + public int run(String[] args) throws Exception { + CommandLine cmdLine; + try { + cmdLine = parseOptions(args); + } catch (IllegalStateException e) { + printHelpAndExit(e.getMessage(), getOptions()); + return -1; } + configuration = HBaseConfiguration.addHbaseResources(getConf()); + populateIndexToolAttributes(cmdLine); - public static void checkIfFeatureApplicable(Long startTime, Long endTime, Long lastVerifyTime, - PTable pDataTable, boolean isLocalIndexBuild) { - boolean isApplicable = isFeatureApplicable(pDataTable, isLocalIndexBuild); - if (!isApplicable) { - if(isTimeRangeSet(startTime, endTime) || lastVerifyTime!=null) { - throw new RuntimeException(FEATURE_NOT_APPLICABLE); - } - } + if (tenantId != null) { + configuration.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); } - private boolean submitIndexToolJob(Connection conn, Configuration configuration) - throws Exception { - Path outputPath = null; - FileSystem fs; - if (basePath != null) { - outputPath = - CsvBulkImportUtil.getOutputPath(new Path(basePath), - pIndexTable == null ? - pDataTable.getPhysicalName().getString() : - pIndexTable.getPhysicalName().getString()); - fs = outputPath.getFileSystem(configuration); - fs.delete(outputPath, true); - } - JobFactory jobFactory = new JobFactory(conn, configuration, outputPath); - job = jobFactory.getJob(); - if (!isForeground) { - LOGGER.info("Running Index Build in Background - Submit async and exit"); - job.submit(); - return true; - } - LOGGER.info("Running Index Build in Foreground. Waits for the build to complete." - + " This may take a long time!."); - return job.waitForCompletion(true); - } - - @VisibleForTesting - public int populateIndexToolAttributes(CommandLine cmdLine) throws Exception { - boolean useTenantId = cmdLine.hasOption(TENANT_ID_OPTION.getOpt()); - boolean useStartTime = cmdLine.hasOption(START_TIME_OPTION.getOpt()); - boolean useEndTime = cmdLine.hasOption(END_TIME_OPTION.getOpt()); - boolean retryVerify = cmdLine.hasOption(RETRY_VERIFY_OPTION.getOpt()); - boolean verify = cmdLine.hasOption(VERIFY_OPTION.getOpt()); - boolean disableLogging = cmdLine.hasOption(DISABLE_LOGGING_OPTION.getOpt()); - boolean useIndexTableAsSource = cmdLine.hasOption(USE_INDEX_TABLE_AS_SOURCE_OPTION.getOpt()); - - if (useTenantId) { - tenantId = cmdLine.getOptionValue(TENANT_ID_OPTION.getOpt()); - } - if(useStartTime) { - startTime = new Long(cmdLine.getOptionValue(START_TIME_OPTION.getOpt())); - } - if (useEndTime) { - endTime = new Long(cmdLine.getOptionValue(END_TIME_OPTION.getOpt())); - } - if(retryVerify) { - lastVerifyTime = new Long(cmdLine.getOptionValue(RETRY_VERIFY_OPTION.getOpt())); - validateLastVerifyTime(); - } - if(isTimeRangeSet(startTime, endTime)) { - validateTimeRange(startTime, endTime); - } - if (verify) { - String value = cmdLine.getOptionValue(VERIFY_OPTION.getOpt()); - indexVerifyType = IndexVerifyType.fromValue(value); - if (disableLogging) { - disableLoggingType = - IndexDisableLoggingType.fromValue( - cmdLine.getOptionValue(DISABLE_LOGGING_OPTION.getOpt())); - } + try (Connection conn = getConnection(configuration)) { + createIndexToolTables(conn); + if (dataTable != null && indexTable != null) { + setupIndexAndDataTable(conn); + checkIfFeatureApplicable(startTime, endTime, lastVerifyTime, pDataTable, isLocalIndexBuild); + if (shouldDeleteBeforeRebuild) { + deleteBeforeRebuild(conn); } + preSplitIndexTable(cmdLine, conn); + } - if (useIndexTableAsSource) { - sourceTable = SourceTable.INDEX_TABLE_SOURCE; - } + boolean result = submitIndexToolJob(conn, configuration); - schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPTION.getOpt()); - dataTable = cmdLine.getOptionValue(DATA_TABLE_OPTION.getOpt()); - indexTable = cmdLine.getOptionValue(INDEX_TABLE_OPTION.getOpt()); - isPartialBuild = cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt()); - dataTableWithSchema = SchemaUtil.getQualifiedPhoenixTableName(schemaName, dataTable); - indexTableWithSchema = SchemaUtil.getQualifiedPhoenixTableName(schemaName, indexTable); - qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable); - basePath = cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()); - isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); - useSnapshot = cmdLine.hasOption(SNAPSHOT_OPTION.getOpt()); - shouldDeleteBeforeRebuild = cmdLine.hasOption(DELETE_ALL_AND_REBUILD_OPTION.getOpt()); + if (result) { return 0; + } else { + LOGGER.error("IndexTool job failed! Check logs for errors.."); + return -1; + } + } catch (Exception ex) { + LOGGER.error("An exception occurred while performing the indexing job: " + + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex)); + return -1; } - - public int validateLastVerifyTime() throws Exception { - Long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - if (lastVerifyTime.compareTo(currentTime) > 0 || lastVerifyTime == 0L || !isValidLastVerifyTime(lastVerifyTime)) { - throw new RuntimeException(RETRY_VERIFY_NOT_APPLICABLE); - } - return 0; + } + + public static void checkIfFeatureApplicable(Long startTime, Long endTime, Long lastVerifyTime, + PTable pDataTable, boolean isLocalIndexBuild) { + boolean isApplicable = isFeatureApplicable(pDataTable, isLocalIndexBuild); + if (!isApplicable) { + if (isTimeRangeSet(startTime, endTime) || lastVerifyTime != null) { + throw new RuntimeException(FEATURE_NOT_APPLICABLE); + } } - - public boolean isValidLastVerifyTime(Long lastVerifyTime) throws Exception { - try (Connection conn = getConnection(configuration); - Table hIndexToolTable = conn.unwrap(PhoenixConnection.class) - .getQueryServices() - .getTable(IndexVerificationResultRepository.RESULT_TABLE_NAME_BYTES)) { - Scan s = new Scan(); - ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); - boolean isNamespaceMapped = SchemaUtil.isNamespaceMappingEnabled(null, cqs.getProps()); - s.setRowPrefixFilter(Bytes.toBytes(String.format("%s%s%s", lastVerifyTime, - ROW_KEY_SEPARATOR, - SchemaUtil.getPhysicalHBaseTableName(qSchemaName, SchemaUtil.normalizeIdentifier(indexTable), - isNamespaceMapped)))); - try (ResultScanner rs = hIndexToolTable.getScanner(s)) { - return rs.next() != null; - } - } + } + + private boolean submitIndexToolJob(Connection conn, Configuration configuration) + throws Exception { + Path outputPath = null; + FileSystem fs; + if (basePath != null) { + outputPath = CsvBulkImportUtil.getOutputPath(new Path(basePath), + pIndexTable == null + ? pDataTable.getPhysicalName().getString() + : pIndexTable.getPhysicalName().getString()); + fs = outputPath.getFileSystem(configuration); + fs.delete(outputPath, true); } - - public static void validateTimeRange(Long sTime, Long eTime) { - Long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - Long st = (sTime == null) ? 0 : sTime; - Long et = (eTime == null) ? currentTime : eTime; - if (st.compareTo(currentTime) > 0 || et.compareTo(currentTime) > 0 || st.compareTo(et) >= 0) { - throw new RuntimeException(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); - } + JobFactory jobFactory = new JobFactory(conn, configuration, outputPath); + job = jobFactory.getJob(); + if (!isForeground) { + LOGGER.info("Running Index Build in Background - Submit async and exit"); + job.submit(); + return true; } - - private Connection getConnection(Configuration configuration) throws SQLException { - return ConnectionUtil.getInputConnection(configuration); + LOGGER.info("Running Index Build in Foreground. Waits for the build to complete." + + " This may take a long time!."); + return job.waitForCompletion(true); + } + + @VisibleForTesting + public int populateIndexToolAttributes(CommandLine cmdLine) throws Exception { + boolean useTenantId = cmdLine.hasOption(TENANT_ID_OPTION.getOpt()); + boolean useStartTime = cmdLine.hasOption(START_TIME_OPTION.getOpt()); + boolean useEndTime = cmdLine.hasOption(END_TIME_OPTION.getOpt()); + boolean retryVerify = cmdLine.hasOption(RETRY_VERIFY_OPTION.getOpt()); + boolean verify = cmdLine.hasOption(VERIFY_OPTION.getOpt()); + boolean disableLogging = cmdLine.hasOption(DISABLE_LOGGING_OPTION.getOpt()); + boolean useIndexTableAsSource = cmdLine.hasOption(USE_INDEX_TABLE_AS_SOURCE_OPTION.getOpt()); + + if (useTenantId) { + tenantId = cmdLine.getOptionValue(TENANT_ID_OPTION.getOpt()); } - - private void setupIndexAndDataTable(Connection connection) throws SQLException, IOException { - pDataTable = connection.unwrap(PhoenixConnection.class).getTableNoCache(qDataTable); - if (!isValidIndexTable(connection, qDataTable, indexTable, tenantId)) { - throw new IllegalArgumentException( - String.format(" %s is not an index table for %s for this connection", - indexTable, qDataTable)); - } - qSchemaName = SchemaUtil.normalizeIdentifier(schemaName); - pIndexTable = connection.unwrap(PhoenixConnection.class).getTable( - SchemaUtil.getQualifiedTableName(schemaName, indexTable)); - indexType = pIndexTable.getIndexType(); - qIndexTable = SchemaUtil.getQualifiedTableName(schemaName, indexTable); - if (IndexType.LOCAL.equals(indexType)) { - isLocalIndexBuild = true; - if (useSnapshot) { - throw new IllegalArgumentException(String.format( - "%s is a local index. snapshots are not supported for local indexes.", - qIndexTable)); - } - try (org.apache.hadoop.hbase.client.Connection hConn - = getTemporaryHConnection(connection.unwrap(PhoenixConnection.class))) { - RegionLocator regionLocator = hConn - .getRegionLocator(TableName.valueOf(pIndexTable.getPhysicalName().getBytes())); - splitKeysBeforeJob = regionLocator.getStartKeys(); - } - } - // We have to mark Disable index to Building before we can set it to Active in the reducer. Otherwise it errors out with - // index state transition error - changeDisabledIndexStateToBuiding(connection); + if (useStartTime) { + startTime = new Long(cmdLine.getOptionValue(START_TIME_OPTION.getOpt())); } - - public static boolean isTimeRangeSet(Long startTime, Long endTime) { - return startTime != null || endTime != null; + if (useEndTime) { + endTime = new Long(cmdLine.getOptionValue(END_TIME_OPTION.getOpt())); } - - private static boolean isFeatureApplicable(PTable dataTable, boolean isLocalIndexBuild) { - if (isLocalIndexBuild || !dataTable.isTransactional()) { - return true; - } - return false; + if (retryVerify) { + lastVerifyTime = new Long(cmdLine.getOptionValue(RETRY_VERIFY_OPTION.getOpt())); + validateLastVerifyTime(); + } + if (isTimeRangeSet(startTime, endTime)) { + validateTimeRange(startTime, endTime); + } + if (verify) { + String value = cmdLine.getOptionValue(VERIFY_OPTION.getOpt()); + indexVerifyType = IndexVerifyType.fromValue(value); + if (disableLogging) { + disableLoggingType = IndexDisableLoggingType + .fromValue(cmdLine.getOptionValue(DISABLE_LOGGING_OPTION.getOpt())); + } } - private void changeDisabledIndexStateToBuiding(Connection connection) throws SQLException { - if (pIndexTable != null && pIndexTable.getIndexState().isDisabled()) { - IndexUtil.updateIndexState(connection.unwrap(PhoenixConnection.class), - pIndexTable.getName().getString(), PIndexState.BUILDING, null); - } + if (useIndexTableAsSource) { + sourceTable = SourceTable.INDEX_TABLE_SOURCE; } - private void preSplitIndexTable(CommandLine cmdLine, Connection connection) - throws SQLException, IOException { - boolean autosplit = cmdLine.hasOption(AUTO_SPLIT_INDEX_OPTION.getOpt()); - boolean splitIndex = cmdLine.hasOption(SPLIT_INDEX_OPTION.getOpt()); - boolean isSalted = pIndexTable.getBucketNum() != null; // no need to split salted tables - if (!isSalted && (IndexType.GLOBAL.equals(indexType) || IndexType.UNCOVERED_GLOBAL.equals(indexType)) && (autosplit || splitIndex)) { - String nOpt = cmdLine.getOptionValue(AUTO_SPLIT_INDEX_OPTION.getOpt()); - int autosplitNumRegions = nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt); - String rateOpt = cmdLine.getOptionValue(SPLIT_INDEX_OPTION.getOpt()); - double samplingRate = rateOpt == null ? DEFAULT_SPLIT_SAMPLING_RATE : Double.parseDouble(rateOpt); - LOGGER.info(String.format("Will split index %s , autosplit=%s ," - + " autoSplitNumRegions=%s , samplingRate=%s", indexTable, autosplit, - autosplitNumRegions, samplingRate)); - - splitIndexTable(connection.unwrap(PhoenixConnection.class), autosplit, - autosplitNumRegions, samplingRate); - } + schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPTION.getOpt()); + dataTable = cmdLine.getOptionValue(DATA_TABLE_OPTION.getOpt()); + indexTable = cmdLine.getOptionValue(INDEX_TABLE_OPTION.getOpt()); + isPartialBuild = cmdLine.hasOption(PARTIAL_REBUILD_OPTION.getOpt()); + dataTableWithSchema = SchemaUtil.getQualifiedPhoenixTableName(schemaName, dataTable); + indexTableWithSchema = SchemaUtil.getQualifiedPhoenixTableName(schemaName, indexTable); + qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable); + basePath = cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()); + isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); + useSnapshot = cmdLine.hasOption(SNAPSHOT_OPTION.getOpt()); + shouldDeleteBeforeRebuild = cmdLine.hasOption(DELETE_ALL_AND_REBUILD_OPTION.getOpt()); + return 0; + } + + public int validateLastVerifyTime() throws Exception { + Long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + if ( + lastVerifyTime.compareTo(currentTime) > 0 || lastVerifyTime == 0L + || !isValidLastVerifyTime(lastVerifyTime) + ) { + throw new RuntimeException(RETRY_VERIFY_NOT_APPLICABLE); + } + return 0; + } + + public boolean isValidLastVerifyTime(Long lastVerifyTime) throws Exception { + try (Connection conn = getConnection(configuration); + Table hIndexToolTable = conn.unwrap(PhoenixConnection.class).getQueryServices() + .getTable(IndexVerificationResultRepository.RESULT_TABLE_NAME_BYTES)) { + Scan s = new Scan(); + ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); + boolean isNamespaceMapped = SchemaUtil.isNamespaceMappingEnabled(null, cqs.getProps()); + s.setRowPrefixFilter(Bytes.toBytes(String.format("%s%s%s", lastVerifyTime, ROW_KEY_SEPARATOR, + SchemaUtil.getPhysicalHBaseTableName(qSchemaName, + SchemaUtil.normalizeIdentifier(indexTable), isNamespaceMapped)))); + try (ResultScanner rs = hIndexToolTable.getScanner(s)) { + return rs.next() != null; + } + } + } + + public static void validateTimeRange(Long sTime, Long eTime) { + Long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + Long st = (sTime == null) ? 0 : sTime; + Long et = (eTime == null) ? currentTime : eTime; + if (st.compareTo(currentTime) > 0 || et.compareTo(currentTime) > 0 || st.compareTo(et) >= 0) { + throw new RuntimeException(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); } + } - private void deleteBeforeRebuild(Connection conn) throws SQLException, IOException { - if (MetaDataUtil.isViewIndex(pIndexTable.getPhysicalName().getString())) { - throw new IllegalArgumentException(String.format( - "%s is a view index. delete-all-and-rebuild is not supported for view indexes", - qIndexTable)); - } + private Connection getConnection(Configuration configuration) throws SQLException { + return ConnectionUtil.getInputConnection(configuration); + } - if (isLocalIndexBuild) { - throw new IllegalArgumentException(String.format( - "%s is a local index. delete-all-and-rebuild is not supported for local indexes", qIndexTable)); - } else { - ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices(); - try (Admin admin = queryServices.getAdmin()){ - TableName tableName = TableName.valueOf(qIndexTable); - admin.disableTable(tableName); - admin.truncateTable(tableName, true); - } - } + private void setupIndexAndDataTable(Connection connection) throws SQLException, IOException { + pDataTable = connection.unwrap(PhoenixConnection.class).getTableNoCache(qDataTable); + if (!isValidIndexTable(connection, qDataTable, indexTable, tenantId)) { + throw new IllegalArgumentException(String + .format(" %s is not an index table for %s for this connection", indexTable, qDataTable)); + } + qSchemaName = SchemaUtil.normalizeIdentifier(schemaName); + pIndexTable = connection.unwrap(PhoenixConnection.class) + .getTable(SchemaUtil.getQualifiedTableName(schemaName, indexTable)); + indexType = pIndexTable.getIndexType(); + qIndexTable = SchemaUtil.getQualifiedTableName(schemaName, indexTable); + if (IndexType.LOCAL.equals(indexType)) { + isLocalIndexBuild = true; + if (useSnapshot) { + throw new IllegalArgumentException(String.format( + "%s is a local index. snapshots are not supported for local indexes.", qIndexTable)); + } + try (org.apache.hadoop.hbase.client.Connection hConn = + getTemporaryHConnection(connection.unwrap(PhoenixConnection.class))) { + RegionLocator regionLocator = + hConn.getRegionLocator(TableName.valueOf(pIndexTable.getPhysicalName().getBytes())); + splitKeysBeforeJob = regionLocator.getStartKeys(); + } + } + // We have to mark Disable index to Building before we can set it to Active in the reducer. + // Otherwise it errors out with + // index state transition error + changeDisabledIndexStateToBuiding(connection); + } + + public static boolean isTimeRangeSet(Long startTime, Long endTime) { + return startTime != null || endTime != null; + } + + private static boolean isFeatureApplicable(PTable dataTable, boolean isLocalIndexBuild) { + if (isLocalIndexBuild || !dataTable.isTransactional()) { + return true; } + return false; + } - private void splitIndexTable(PhoenixConnection pConnection, boolean autosplit, - int autosplitNumRegions, double samplingRate) - throws SQLException, IOException, IllegalArgumentException { - int numRegions; - - TableName hDataName = TableName.valueOf(pDataTable.getPhysicalName().getBytes()); - try (org.apache.hadoop.hbase.client.Connection tempHConn = getTemporaryHConnection(pConnection); - RegionLocator regionLocator = - tempHConn.getRegionLocator(hDataName)) { - numRegions = regionLocator.getStartKeys().length; - if (autosplit && (numRegions <= autosplitNumRegions)) { - LOGGER.info(String.format( - "Will not split index %s because the data table only has %s regions, autoSplitNumRegions=%s", - pIndexTable.getPhysicalName(), numRegions, autosplitNumRegions)); - return; // do nothing if # of regions is too low - } - } - // build a tablesample query to fetch index column values from the data table - DataSourceColNames colNames = new DataSourceColNames(pDataTable, pIndexTable); - String qTableSample = String.format("%s TABLESAMPLE(%.2f)", qDataTable, samplingRate); - List dataColNames = colNames.getDataColNames(); - final String dataSampleQuery = - QueryUtil.constructSelectStatement(qTableSample, dataColNames, null, - Hint.NO_INDEX, true); - IndexMaintainer maintainer = IndexMaintainer.create(pDataTable, pIndexTable, pConnection); - ImmutableBytesWritable dataRowKeyPtr = new ImmutableBytesWritable(); - try (final PhoenixResultSet rs = - pConnection.createStatement().executeQuery(dataSampleQuery) - .unwrap(PhoenixResultSet.class); - Admin admin = pConnection.getQueryServices().getAdmin()) { - EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(numRegions); - ValueGetter getter = getIndexValueGetter(rs, dataColNames); - // loop over data table rows - build the index rowkey, put it in the histogram - while (rs.next()) { - rs.getCurrentRow().getKey(dataRowKeyPtr); - // regionStart/EndKey only needed for local indexes, so we pass null - byte[] indexRowKey = maintainer.buildRowKey(getter, dataRowKeyPtr, null, null, - rs.getCurrentRow().getValue(0).getTimestamp()); - histo.addValue(indexRowKey); - } - List buckets = histo.computeBuckets(); - // do the split - // to get the splits, we just need the right bound of every histogram bucket, excluding the last - byte[][] splitPoints = new byte[buckets.size() - 1][]; - int splitIdx = 0; - for (Bucket b : buckets.subList(0, buckets.size() - 1)) { - splitPoints[splitIdx++] = b.getRightBoundExclusive(); - } - // drop table and recreate with appropriate splits - TableName hIndexName = TableName.valueOf(pIndexTable.getPhysicalName().getBytes()); - TableDescriptor descriptor = admin.getDescriptor(hIndexName); - admin.disableTable(hIndexName); - admin.deleteTable(hIndexName); - admin.createTable(descriptor, splitPoints); - } + private void changeDisabledIndexStateToBuiding(Connection connection) throws SQLException { + if (pIndexTable != null && pIndexTable.getIndexState().isDisabled()) { + IndexUtil.updateIndexState(connection.unwrap(PhoenixConnection.class), + pIndexTable.getName().getString(), PIndexState.BUILDING, null); } + } + + private void preSplitIndexTable(CommandLine cmdLine, Connection connection) + throws SQLException, IOException { + boolean autosplit = cmdLine.hasOption(AUTO_SPLIT_INDEX_OPTION.getOpt()); + boolean splitIndex = cmdLine.hasOption(SPLIT_INDEX_OPTION.getOpt()); + boolean isSalted = pIndexTable.getBucketNum() != null; // no need to split salted tables + if ( + !isSalted + && (IndexType.GLOBAL.equals(indexType) || IndexType.UNCOVERED_GLOBAL.equals(indexType)) + && (autosplit || splitIndex) + ) { + String nOpt = cmdLine.getOptionValue(AUTO_SPLIT_INDEX_OPTION.getOpt()); + int autosplitNumRegions = + nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt); + String rateOpt = cmdLine.getOptionValue(SPLIT_INDEX_OPTION.getOpt()); + double samplingRate = + rateOpt == null ? DEFAULT_SPLIT_SAMPLING_RATE : Double.parseDouble(rateOpt); + LOGGER.info(String.format( + "Will split index %s , autosplit=%s ," + " autoSplitNumRegions=%s , samplingRate=%s", + indexTable, autosplit, autosplitNumRegions, samplingRate)); + + splitIndexTable(connection.unwrap(PhoenixConnection.class), autosplit, autosplitNumRegions, + samplingRate); + } + } - private org.apache.hadoop.hbase.client.Connection getTemporaryHConnection(PhoenixConnection pConnection) - throws SQLException, IOException { - try (Admin admin = pConnection.getQueryServices().getAdmin()) { - return ConnectionFactory.createConnection(admin.getConfiguration()); - } + private void deleteBeforeRebuild(Connection conn) throws SQLException, IOException { + if (MetaDataUtil.isViewIndex(pIndexTable.getPhysicalName().getString())) { + throw new IllegalArgumentException(String.format( + "%s is a view index. delete-all-and-rebuild is not supported for view indexes", + qIndexTable)); } - // setup a ValueGetter to get index values from the ResultSet - public static ValueGetter getIndexValueGetter(final PhoenixResultSet rs, List dataColNames) { - // map from data col name to index in ResultSet - final Map rsIndex = new HashMap<>(dataColNames.size()); - int i = 1; - for (String dataCol : dataColNames) { - rsIndex.put(SchemaUtil.getEscapedFullColumnName(dataCol), i++); - } - return new AbstractValueGetter() { - final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable(); - final ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(); - - @Override - public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException { - try { - String fullColumnName = - SchemaUtil.getEscapedFullColumnName(SchemaUtil - .getColumnDisplayName(ref.getFamily(), ref.getQualifier())); - byte[] colVal = rs.getBytes(rsIndex.get(fullColumnName)); - valuePtr.set(colVal); - } catch (SQLException e) { - throw new IOException(e); - } - return valuePtr; - } - - @Override - public byte[] getRowKey() { - rs.getCurrentRow().getKey(rowKeyPtr); - return ByteUtil.copyKeyBytesIfNecessary(rowKeyPtr); - } - }; + if (isLocalIndexBuild) { + throw new IllegalArgumentException(String.format( + "%s is a local index. delete-all-and-rebuild is not supported for local indexes", + qIndexTable)); + } else { + ConnectionQueryServices queryServices = + conn.unwrap(PhoenixConnection.class).getQueryServices(); + try (Admin admin = queryServices.getAdmin()) { + TableName tableName = TableName.valueOf(qIndexTable); + admin.disableTable(tableName); + admin.truncateTable(tableName, true); + } + } + } + + private void splitIndexTable(PhoenixConnection pConnection, boolean autosplit, + int autosplitNumRegions, double samplingRate) + throws SQLException, IOException, IllegalArgumentException { + int numRegions; + + TableName hDataName = TableName.valueOf(pDataTable.getPhysicalName().getBytes()); + try (org.apache.hadoop.hbase.client.Connection tempHConn = getTemporaryHConnection(pConnection); + RegionLocator regionLocator = tempHConn.getRegionLocator(hDataName)) { + numRegions = regionLocator.getStartKeys().length; + if (autosplit && (numRegions <= autosplitNumRegions)) { + LOGGER.info(String.format( + "Will not split index %s because the data table only has %s regions, autoSplitNumRegions=%s", + pIndexTable.getPhysicalName(), numRegions, autosplitNumRegions)); + return; // do nothing if # of regions is too low + } + } + // build a tablesample query to fetch index column values from the data table + DataSourceColNames colNames = new DataSourceColNames(pDataTable, pIndexTable); + String qTableSample = String.format("%s TABLESAMPLE(%.2f)", qDataTable, samplingRate); + List dataColNames = colNames.getDataColNames(); + final String dataSampleQuery = + QueryUtil.constructSelectStatement(qTableSample, dataColNames, null, Hint.NO_INDEX, true); + IndexMaintainer maintainer = IndexMaintainer.create(pDataTable, pIndexTable, pConnection); + ImmutableBytesWritable dataRowKeyPtr = new ImmutableBytesWritable(); + try ( + final PhoenixResultSet rs = + pConnection.createStatement().executeQuery(dataSampleQuery).unwrap(PhoenixResultSet.class); + Admin admin = pConnection.getQueryServices().getAdmin()) { + EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(numRegions); + ValueGetter getter = getIndexValueGetter(rs, dataColNames); + // loop over data table rows - build the index rowkey, put it in the histogram + while (rs.next()) { + rs.getCurrentRow().getKey(dataRowKeyPtr); + // regionStart/EndKey only needed for local indexes, so we pass null + byte[] indexRowKey = maintainer.buildRowKey(getter, dataRowKeyPtr, null, null, + rs.getCurrentRow().getValue(0).getTimestamp()); + histo.addValue(indexRowKey); + } + List buckets = histo.computeBuckets(); + // do the split + // to get the splits, we just need the right bound of every histogram bucket, excluding the + // last + byte[][] splitPoints = new byte[buckets.size() - 1][]; + int splitIdx = 0; + for (Bucket b : buckets.subList(0, buckets.size() - 1)) { + splitPoints[splitIdx++] = b.getRightBoundExclusive(); + } + // drop table and recreate with appropriate splits + TableName hIndexName = TableName.valueOf(pIndexTable.getPhysicalName().getBytes()); + TableDescriptor descriptor = admin.getDescriptor(hIndexName); + admin.disableTable(hIndexName); + admin.deleteTable(hIndexName); + admin.createTable(descriptor, splitPoints); } + } - /** - * Checks for the validity of the index table passed to the job. - * @param connection - * @param masterTable - * @param indexTable - * @param tenantId - * @return - * @throws SQLException - */ - public static boolean isValidIndexTable(final Connection connection, final String masterTable, - final String indexTable, final String tenantId) throws SQLException { - final DatabaseMetaData dbMetaData = connection.getMetaData(); - final String schemaName = SchemaUtil.getSchemaNameFromFullName(masterTable); - final String tableName = SchemaUtil.getTableNameFromFullName(masterTable); + private org.apache.hadoop.hbase.client.Connection + getTemporaryHConnection(PhoenixConnection pConnection) throws SQLException, IOException { + try (Admin admin = pConnection.getQueryServices().getAdmin()) { + return ConnectionFactory.createConnection(admin.getConfiguration()); + } + } + + // setup a ValueGetter to get index values from the ResultSet + public static ValueGetter getIndexValueGetter(final PhoenixResultSet rs, + List dataColNames) { + // map from data col name to index in ResultSet + final Map rsIndex = new HashMap<>(dataColNames.size()); + int i = 1; + for (String dataCol : dataColNames) { + rsIndex.put(SchemaUtil.getEscapedFullColumnName(dataCol), i++); + } + return new AbstractValueGetter() { + final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable(); + final ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(); - ResultSet rs = null; + @Override + public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) + throws IOException { try { - String catalog = ""; - if (tenantId != null) { - catalog = tenantId; - } - rs = dbMetaData.getIndexInfo(catalog, schemaName, tableName, false, false); - while (rs.next()) { - final String indexName = rs.getString(6); - if (SchemaUtil.normalizeIdentifier(indexTable).equalsIgnoreCase(indexName)) { - return true; - } - } - } finally { - if (rs != null) { - rs.close(); - } - } - return false; + String fullColumnName = SchemaUtil.getEscapedFullColumnName( + SchemaUtil.getColumnDisplayName(ref.getFamily(), ref.getQualifier())); + byte[] colVal = rs.getBytes(rsIndex.get(fullColumnName)); + valuePtr.set(colVal); + } catch (SQLException e) { + throw new IOException(e); + } + return valuePtr; + } + + @Override + public byte[] getRowKey() { + rs.getCurrentRow().getKey(rowKeyPtr); + return ByteUtil.copyKeyBytesIfNecessary(rowKeyPtr); + } + }; + } + + /** + * Checks for the validity of the index table passed to the job. + */ + public static boolean isValidIndexTable(final Connection connection, final String masterTable, + final String indexTable, final String tenantId) throws SQLException { + final DatabaseMetaData dbMetaData = connection.getMetaData(); + final String schemaName = SchemaUtil.getSchemaNameFromFullName(masterTable); + final String tableName = SchemaUtil.getTableNameFromFullName(masterTable); + + ResultSet rs = null; + try { + String catalog = ""; + if (tenantId != null) { + catalog = tenantId; + } + rs = dbMetaData.getIndexInfo(catalog, schemaName, tableName, false, false); + while (rs.next()) { + final String indexName = rs.getString(6); + if (SchemaUtil.normalizeIdentifier(indexTable).equalsIgnoreCase(indexName)) { + return true; + } + } + } finally { + if (rs != null) { + rs.close(); + } } + return false; + } + + public static Map.Entry run(Configuration conf, String schemaName, String dataTable, + String indexTable, boolean useSnapshot, String tenantId, boolean disableBefore, + boolean shouldDeleteBeforeRebuild, boolean runForeground) throws Exception { + final List args = Lists.newArrayList(); + if (schemaName != null) { + args.add("--schema=" + schemaName); + } + // Work around CLI-254. The long-form arg parsing doesn't strip off double-quotes + args.add("--data-table=" + dataTable); + args.add("--index-table=" + indexTable); - public static Map.Entry run(Configuration conf, String schemaName, String dataTable, String indexTable, - boolean useSnapshot, String tenantId, boolean disableBefore, boolean shouldDeleteBeforeRebuild, boolean runForeground) throws Exception { - final List args = Lists.newArrayList(); - if (schemaName != null) { - args.add("--schema=" + schemaName); - } - // Work around CLI-254. The long-form arg parsing doesn't strip off double-quotes - args.add("--data-table=" + dataTable); - args.add("--index-table=" + indexTable); - - if (runForeground) { - args.add("-runfg"); - } - - if (useSnapshot) { - args.add("-snap"); - } + if (runForeground) { + args.add("-runfg"); + } - if (tenantId != null) { - args.add("-tenant"); - args.add(tenantId); - } + if (useSnapshot) { + args.add("-snap"); + } - if (shouldDeleteBeforeRebuild) { - args.add("-deleteall"); - } + if (tenantId != null) { + args.add("-tenant"); + args.add(tenantId); + } - args.add("-op"); - args.add("/tmp/" + UUID.randomUUID().toString()); + if (shouldDeleteBeforeRebuild) { + args.add("-deleteall"); + } - if (disableBefore) { - PhoenixConfigurationUtil.setDisableIndexes(conf, indexTable); - } + args.add("-op"); + args.add("/tmp/" + UUID.randomUUID().toString()); - IndexTool indexingTool = new IndexTool(); - indexingTool.setConf(conf); - int status = indexingTool.run(args.toArray(new String[0])); - Job job = indexingTool.getJob(); - return new AbstractMap.SimpleEntry<>(status, job); + if (disableBefore) { + PhoenixConfigurationUtil.setDisableIndexes(conf, indexTable); } - public static void main(final String[] args) throws Exception { - int result = ToolRunner.run(new IndexTool(), args); - System.exit(result); - } + IndexTool indexingTool = new IndexTool(); + indexingTool.setConf(conf); + int status = indexingTool.run(args.toArray(new String[0])); + Job job = indexingTool.getJob(); + return new AbstractMap.SimpleEntry<>(status, job); + } + + public static void main(final String[] args) throws Exception { + int result = ToolRunner.run(new IndexTool(), args); + System.exit(result); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java index ad95b865c2a..612e50732f9 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexToolUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,56 +25,51 @@ import org.apache.phoenix.mapreduce.util.ConnectionUtil; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.schema.PIndexState; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - /** * Utility class for {@linkplain IndexTool} - * */ public class IndexToolUtil { - private static final String ALTER_INDEX_QUERY_TEMPLATE = "ALTER INDEX IF EXISTS %s ON %s %s"; - - private static final Logger LOGGER = LoggerFactory.getLogger(IndexToolUtil.class); - - /** - * Updates the index state. - * @param configuration - * @param state - * @throws SQLException - */ - public static void updateIndexState(Configuration configuration,PIndexState state) throws SQLException { - final String masterTable = PhoenixConfigurationUtil.getInputTableName(configuration); - final String[] indexTables = PhoenixConfigurationUtil.getDisableIndexes(configuration).split(","); - final Properties overrideProps = new Properties(); - final Connection connection = ConnectionUtil.getOutputConnection(configuration, overrideProps); - try { - for (String indexTable : indexTables) { - updateIndexState(connection, masterTable, indexTable, state); - } - } finally { - if(connection != null) { - connection.close(); - } - } - } - - /** - * Updates the index state. - * @param connection - * @param masterTable - * @param indexTable - * @param state - * @throws SQLException - */ - public static void updateIndexState(Connection connection, final String masterTable , final String indexTable, PIndexState state) throws SQLException { - Preconditions.checkNotNull(connection); - final String alterQuery = String.format(ALTER_INDEX_QUERY_TEMPLATE,indexTable,masterTable,state.name()); - connection.createStatement().execute(alterQuery); - LOGGER.info(" Updated the status of the index {} on {} to {} ", indexTable, masterTable, state.name()); + private static final String ALTER_INDEX_QUERY_TEMPLATE = "ALTER INDEX IF EXISTS %s ON %s %s"; + + private static final Logger LOGGER = LoggerFactory.getLogger(IndexToolUtil.class); + + /** + * Updates the index state. + */ + public static void updateIndexState(Configuration configuration, PIndexState state) + throws SQLException { + final String masterTable = PhoenixConfigurationUtil.getInputTableName(configuration); + final String[] indexTables = + PhoenixConfigurationUtil.getDisableIndexes(configuration).split(","); + final Properties overrideProps = new Properties(); + final Connection connection = ConnectionUtil.getOutputConnection(configuration, overrideProps); + try { + for (String indexTable : indexTables) { + updateIndexState(connection, masterTable, indexTable, state); + } + } finally { + if (connection != null) { + connection.close(); + } } - + } + + /** + * Updates the index state. + */ + public static void updateIndexState(Connection connection, final String masterTable, + final String indexTable, PIndexState state) throws SQLException { + Preconditions.checkNotNull(connection); + final String alterQuery = + String.format(ALTER_INDEX_QUERY_TEMPLATE, indexTable, masterTable, state.name()); + connection.createStatement().execute(alterQuery); + LOGGER.info(" Updated the status of the index {} on {} to {} ", indexTable, masterTable, + state.name()); + } + } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexUpgradeTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexUpgradeTool.java index ed72a1e783f..e3ec562b13c 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexUpgradeTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexUpgradeTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,26 +17,38 @@ */ package org.apache.phoenix.mapreduce.index; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; +import static org.apache.phoenix.query.QueryServicesOptions.GLOBAL_INDEX_CHECKER_ENABLED_MAP_EXPIRATION_MIN; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.logging.FileHandler; +import java.util.logging.Logger; +import java.util.logging.SimpleFormatter; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; - -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.phoenix.hbase.index.IndexRegionObserver; import org.apache.phoenix.hbase.index.Indexer; import org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder; @@ -46,890 +58,855 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.mapreduce.util.ConnectionUtil; import org.apache.phoenix.query.ConnectionQueryServices; - import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; - -import java.sql.ResultSet; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.logging.Logger; -import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.MetaDataUtil; import org.apache.phoenix.util.SchemaUtil; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.Map; -import java.util.UUID; -import java.util.logging.FileHandler; -import java.util.logging.SimpleFormatter; - -import static org.apache.phoenix.query.QueryServicesOptions. - GLOBAL_INDEX_CHECKER_ENABLED_MAP_EXPIRATION_MIN; - public class IndexUpgradeTool extends Configured implements Tool { - private static final Logger LOGGER = Logger.getLogger(IndexUpgradeTool.class.getName()); - - private static final String INDEX_REBUILD_OPTION_SHORT_OPT = "rb"; - private static final String INDEX_TOOL_OPTION_SHORT_OPT = "tool"; - - private static final Option OPERATION_OPTION = new Option("o", "operation", - true, - "[Required] Operation to perform (upgrade/rollback)"); - private static final Option TABLE_OPTION = new Option("tb", "table", true, - "[Required] Tables list ex. table1,table2"); - private static final Option TABLE_CSV_FILE_OPTION = new Option("f", "file", - true, - "[Optional] Tables list in a csv file"); - private static final Option DRY_RUN_OPTION = new Option("d", "dry-run", - false, - "[Optional] If passed this will output steps that will be executed"); - private static final Option HELP_OPTION = new Option("h", "help", - false, "Help"); - private static final Option LOG_FILE_OPTION = new Option("lf", "logfile", - true, - "[Optional] Log file path where the logs are written"); - private static final Option INDEX_REBUILD_OPTION = new Option(INDEX_REBUILD_OPTION_SHORT_OPT, - "index-rebuild", - false, - "[Optional] Rebuild the indexes. Set -" + INDEX_TOOL_OPTION_SHORT_OPT + - " to pass options to IndexTool."); - private static final Option INDEX_TOOL_OPTION = new Option(INDEX_TOOL_OPTION_SHORT_OPT, - "index-tool", - true, - "[Optional] Options to pass to indexTool when rebuilding indexes. " + - "Set -" + INDEX_REBUILD_OPTION_SHORT_OPT + " to rebuild the index."); - - public static final String UPGRADE_OP = "upgrade"; - public static final String ROLLBACK_OP = "rollback"; - private static final String GLOBAL_INDEX_ID = "#NA#"; - private IndexTool indexingTool; - - private HashMap> tablesAndIndexes = new HashMap<>(); - private HashMap> rebuildMap = new HashMap<>(); - private HashMap prop = new HashMap<>(); - private HashMap emptyProp = new HashMap<>(); - - private boolean dryRun, upgrade, rebuild; - private String operation; - private String inputTables; - private String logFile; - private String inputFile; - private boolean isWaitComplete = false; - private String indexToolOpts; - - private boolean test = false; - private boolean failUpgradeTask = false; - private boolean failDowngradeTask = false; - private boolean hasFailure = false; - - public void setDryRun(boolean dryRun) { - this.dryRun = dryRun; - } - - public void setInputTables(String inputTables) { - this.inputTables = inputTables; - } - - public void setLogFile(String logFile) { - this.logFile = logFile; - } - - public void setInputFile(String inputFile) { - this.inputFile = inputFile; - } - - public void setTest(boolean test) { this.test = test; } - - public boolean getIsWaitComplete() { return this.isWaitComplete; } - - public boolean getDryRun() { return this.dryRun; } - - public String getInputTables() { - return this.inputTables; - } - - public String getLogFile() { - return this.logFile; - } - - public String getOperation() { - return this.operation; - } - - public boolean getIsRebuild() { return this.rebuild; } - - public String getIndexToolOpts() { return this.indexToolOpts; } - - @VisibleForTesting - public void setFailUpgradeTask(boolean failInitialTask) { - this.failUpgradeTask = failInitialTask; - } - - public void setFailDowngradeTask(boolean failRollbackTask) { - this.failDowngradeTask = failRollbackTask; - } - - public IndexUpgradeTool(String mode, String tables, String inputFile, - String outputFile, boolean dryRun, IndexTool indexTool, boolean rebuild) { - this.operation = mode; - this.inputTables = tables; - this.inputFile = inputFile; - this.logFile = outputFile; - this.dryRun = dryRun; - this.indexingTool = indexTool; - this.rebuild = rebuild; - } - - public IndexUpgradeTool () { } - - @Override - public int run(String[] args) throws Exception { - CommandLine cmdLine = null; - try { - cmdLine = parseOptions(args); - LOGGER.info("Index Upgrade tool initiated: " + String.join(",", args)); - } catch (IllegalStateException e) { - printHelpAndExit(e.getMessage(), getOptions()); - } - try { - initializeTool(cmdLine); - prepareToolSetup(); - executeTool(); - } catch (Exception e) { - e.printStackTrace(); - hasFailure = true; - } - if (hasFailure) { - return -1; + private static final Logger LOGGER = Logger.getLogger(IndexUpgradeTool.class.getName()); + + private static final String INDEX_REBUILD_OPTION_SHORT_OPT = "rb"; + private static final String INDEX_TOOL_OPTION_SHORT_OPT = "tool"; + + private static final Option OPERATION_OPTION = + new Option("o", "operation", true, "[Required] Operation to perform (upgrade/rollback)"); + private static final Option TABLE_OPTION = + new Option("tb", "table", true, "[Required] Tables list ex. table1,table2"); + private static final Option TABLE_CSV_FILE_OPTION = + new Option("f", "file", true, "[Optional] Tables list in a csv file"); + private static final Option DRY_RUN_OPTION = new Option("d", "dry-run", false, + "[Optional] If passed this will output steps that will be executed"); + private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); + private static final Option LOG_FILE_OPTION = + new Option("lf", "logfile", true, "[Optional] Log file path where the logs are written"); + private static final Option INDEX_REBUILD_OPTION = new Option(INDEX_REBUILD_OPTION_SHORT_OPT, + "index-rebuild", false, "[Optional] Rebuild the indexes. Set -" + INDEX_TOOL_OPTION_SHORT_OPT + + " to pass options to IndexTool."); + private static final Option INDEX_TOOL_OPTION = new Option(INDEX_TOOL_OPTION_SHORT_OPT, + "index-tool", true, "[Optional] Options to pass to indexTool when rebuilding indexes. " + + "Set -" + INDEX_REBUILD_OPTION_SHORT_OPT + " to rebuild the index."); + + public static final String UPGRADE_OP = "upgrade"; + public static final String ROLLBACK_OP = "rollback"; + private static final String GLOBAL_INDEX_ID = "#NA#"; + private IndexTool indexingTool; + + private HashMap> tablesAndIndexes = new HashMap<>(); + private HashMap> rebuildMap = new HashMap<>(); + private HashMap prop = new HashMap<>(); + private HashMap emptyProp = new HashMap<>(); + + private boolean dryRun, upgrade, rebuild; + private String operation; + private String inputTables; + private String logFile; + private String inputFile; + private boolean isWaitComplete = false; + private String indexToolOpts; + + private boolean test = false; + private boolean failUpgradeTask = false; + private boolean failDowngradeTask = false; + private boolean hasFailure = false; + + public void setDryRun(boolean dryRun) { + this.dryRun = dryRun; + } + + public void setInputTables(String inputTables) { + this.inputTables = inputTables; + } + + public void setLogFile(String logFile) { + this.logFile = logFile; + } + + public void setInputFile(String inputFile) { + this.inputFile = inputFile; + } + + public void setTest(boolean test) { + this.test = test; + } + + public boolean getIsWaitComplete() { + return this.isWaitComplete; + } + + public boolean getDryRun() { + return this.dryRun; + } + + public String getInputTables() { + return this.inputTables; + } + + public String getLogFile() { + return this.logFile; + } + + public String getOperation() { + return this.operation; + } + + public boolean getIsRebuild() { + return this.rebuild; + } + + public String getIndexToolOpts() { + return this.indexToolOpts; + } + + @VisibleForTesting + public void setFailUpgradeTask(boolean failInitialTask) { + this.failUpgradeTask = failInitialTask; + } + + public void setFailDowngradeTask(boolean failRollbackTask) { + this.failDowngradeTask = failRollbackTask; + } + + public IndexUpgradeTool(String mode, String tables, String inputFile, String outputFile, + boolean dryRun, IndexTool indexTool, boolean rebuild) { + this.operation = mode; + this.inputTables = tables; + this.inputFile = inputFile; + this.logFile = outputFile; + this.dryRun = dryRun; + this.indexingTool = indexTool; + this.rebuild = rebuild; + } + + public IndexUpgradeTool() { + } + + @Override + public int run(String[] args) throws Exception { + CommandLine cmdLine = null; + try { + cmdLine = parseOptions(args); + LOGGER.info("Index Upgrade tool initiated: " + String.join(",", args)); + } catch (IllegalStateException e) { + printHelpAndExit(e.getMessage(), getOptions()); + } + try { + initializeTool(cmdLine); + prepareToolSetup(); + executeTool(); + } catch (Exception e) { + e.printStackTrace(); + hasFailure = true; + } + if (hasFailure) { + return -1; + } else { + return 0; + } + + } + + /** + * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are + * missing. + * @param args supplied command line arguments + * @return the parsed command line + */ + @VisibleForTesting + public CommandLine parseOptions(String[] args) { + + final Options options = getOptions(); + + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("severe parsing command line options: " + e.getMessage(), options); + } + if (cmdLine.hasOption(HELP_OPTION.getOpt())) { + printHelpAndExit(options, 0); + } + if (!cmdLine.hasOption(OPERATION_OPTION.getOpt())) { + throw new IllegalStateException(OPERATION_OPTION.getLongOpt() + " is a mandatory parameter"); + } + if ( + cmdLine.hasOption(DRY_RUN_OPTION.getOpt()) && !cmdLine.hasOption(LOG_FILE_OPTION.getOpt()) + ) { + throw new IllegalStateException("Log file with " + TABLE_OPTION.getLongOpt() + + " is mandatory if " + DRY_RUN_OPTION.getLongOpt() + " is passed"); + } + if ( + !(cmdLine.hasOption(TABLE_OPTION.getOpt())) + && !(cmdLine.hasOption(TABLE_CSV_FILE_OPTION.getOpt())) + ) { + throw new IllegalStateException("Tables list should be passed in either with" + + TABLE_OPTION.getLongOpt() + " or " + TABLE_CSV_FILE_OPTION.getLongOpt()); + } + if ( + (cmdLine.hasOption(TABLE_OPTION.getOpt())) + && (cmdLine.hasOption(TABLE_CSV_FILE_OPTION.getOpt())) + ) { + throw new IllegalStateException("Tables list passed in with" + TABLE_OPTION.getLongOpt() + + " and " + TABLE_CSV_FILE_OPTION.getLongOpt() + "; specify only one."); + } + if ( + (cmdLine.hasOption(INDEX_TOOL_OPTION.getOpt())) + && !cmdLine.hasOption(INDEX_REBUILD_OPTION.getOpt()) + ) { + throw new IllegalStateException( + "Index tool options should be passed in with " + INDEX_REBUILD_OPTION.getLongOpt()); + } + return cmdLine; + } + + private void printHelpAndExit(String severeMessage, Options options) { + System.err.println(severeMessage); + printHelpAndExit(options, 1); + } + + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } + + private Options getOptions() { + final Options options = new Options(); + options.addOption(OPERATION_OPTION); + TABLE_OPTION.setOptionalArg(true); + options.addOption(TABLE_OPTION); + TABLE_CSV_FILE_OPTION.setOptionalArg(true); + options.addOption(TABLE_CSV_FILE_OPTION); + DRY_RUN_OPTION.setOptionalArg(true); + options.addOption(DRY_RUN_OPTION); + LOG_FILE_OPTION.setOptionalArg(true); + options.addOption(LOG_FILE_OPTION); + options.addOption(HELP_OPTION); + INDEX_REBUILD_OPTION.setOptionalArg(true); + options.addOption(INDEX_REBUILD_OPTION); + INDEX_TOOL_OPTION.setOptionalArg(true); + options.addOption(INDEX_TOOL_OPTION); + return options; + } + + @VisibleForTesting + public void initializeTool(CommandLine cmdLine) { + operation = cmdLine.getOptionValue(OPERATION_OPTION.getOpt()); + inputTables = cmdLine.getOptionValue(TABLE_OPTION.getOpt()); + logFile = cmdLine.getOptionValue(LOG_FILE_OPTION.getOpt()); + inputFile = cmdLine.getOptionValue(TABLE_CSV_FILE_OPTION.getOpt()); + dryRun = cmdLine.hasOption(DRY_RUN_OPTION.getOpt()); + rebuild = cmdLine.hasOption(INDEX_REBUILD_OPTION.getOpt()); + indexToolOpts = cmdLine.getOptionValue(INDEX_TOOL_OPTION.getOpt()); + } + + @VisibleForTesting + public void prepareToolSetup() { + try { + if (logFile != null) { + FileHandler fh = new FileHandler(logFile); + fh.setFormatter(new SimpleFormatter()); + LOGGER.addHandler(fh); + } + + prop.put(IndexUtil.INDEX_BUILDER_CONF_KEY, PhoenixIndexBuilder.class.getName()); + prop.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); + + if (inputTables == null) { + inputTables = new String(Files.readAllBytes(Paths.get(inputFile)), StandardCharsets.UTF_8); + } + if (inputTables == null) { + LOGGER.severe("Tables' list is not available; use -tb or -f option"); + } + LOGGER.info("list of tables passed: " + inputTables); + + if (operation.equalsIgnoreCase(UPGRADE_OP)) { + upgrade = true; + } else if (operation.equalsIgnoreCase(ROLLBACK_OP)) { + upgrade = false; + } else { + throw new IllegalStateException("Invalid option provided for " + OPERATION_OPTION.getOpt() + + " expected values: {upgrade, rollback}"); + } + if (dryRun) { + LOGGER.info("This is the beginning of the tool with dry run."); + } + } catch (IOException e) { + LOGGER.severe("Something went wrong " + e); + System.exit(-1); + } + } + + private static void setRpcRetriesAndTimeouts(Configuration conf) { + long indexRebuildQueryTimeoutMs = conf.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); + long indexRebuildRPCTimeoutMs = conf.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); + long indexRebuildClientScannerTimeOutMs = + conf.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); + int indexRebuildRpcRetriesCounter = conf.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); + + // Set phoenix and hbase level timeouts and rpc retries + conf.setLong(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, indexRebuildQueryTimeoutMs); + conf.setLong(HConstants.HBASE_RPC_TIMEOUT_KEY, indexRebuildRPCTimeoutMs); + conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + indexRebuildClientScannerTimeOutMs); + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, indexRebuildRpcRetriesCounter); + } + + @VisibleForTesting + public static Connection getConnection(Configuration conf) throws SQLException { + setRpcRetriesAndTimeouts(conf); + return ConnectionUtil.getInputConnection(conf); + } + + @VisibleForTesting + public int executeTool() { + Configuration conf = HBaseConfiguration.addHbaseResources(getConf()); + + try (Connection conn = getConnection(conf)) { + ConnectionQueryServices queryServices = + conn.unwrap(PhoenixConnection.class).getQueryServices(); + + boolean status = extractTablesAndIndexes(conn.unwrap(PhoenixConnection.class)); + + if (status) { + return executeTool(conn, queryServices, conf); + } + } catch (SQLException e) { + LOGGER.severe("Something went wrong in executing tool " + e); + } + return -1; + } + + private int executeTool(Connection conn, ConnectionQueryServices queryServices, + Configuration conf) { + ArrayList immutableList = new ArrayList<>(); + ArrayList mutableList = new ArrayList<>(); + for (Map.Entry> entry : tablesAndIndexes.entrySet()) { + String dataTableFullName = entry.getKey(); + try { + PTable dataTable = conn.unwrap(PhoenixConnection.class).getTableNoCache(dataTableFullName); + if (dataTable.isImmutableRows()) { + // add to list where immutable tables are processed in a different function + immutableList.add(dataTableFullName); } else { - return 0; - } - - } - - /** - * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are - * missing. - * @param args supplied command line arguments - * @return the parsed command line - */ - @VisibleForTesting - public CommandLine parseOptions(String[] args) { - - final Options options = getOptions(); - - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("severe parsing command line options: " + e.getMessage(), - options); - } - if (cmdLine.hasOption(HELP_OPTION.getOpt())) { - printHelpAndExit(options, 0); - } - if (!cmdLine.hasOption(OPERATION_OPTION.getOpt())) { - throw new IllegalStateException(OPERATION_OPTION.getLongOpt() - +" is a mandatory parameter"); - } - if (cmdLine.hasOption(DRY_RUN_OPTION.getOpt()) - && !cmdLine.hasOption(LOG_FILE_OPTION.getOpt())) { - throw new IllegalStateException("Log file with "+TABLE_OPTION.getLongOpt() - + " is mandatory if " + DRY_RUN_OPTION.getLongOpt() +" is passed"); - } - if (!(cmdLine.hasOption(TABLE_OPTION.getOpt())) - && !(cmdLine.hasOption(TABLE_CSV_FILE_OPTION.getOpt()))) { - throw new IllegalStateException("Tables list should be passed in either with" - +TABLE_OPTION.getLongOpt() + " or " + TABLE_CSV_FILE_OPTION.getLongOpt()); - } - if ((cmdLine.hasOption(TABLE_OPTION.getOpt())) - && (cmdLine.hasOption(TABLE_CSV_FILE_OPTION.getOpt()))) { - throw new IllegalStateException("Tables list passed in with" - +TABLE_OPTION.getLongOpt() + " and " + TABLE_CSV_FILE_OPTION.getLongOpt() - + "; specify only one."); - } - if ((cmdLine.hasOption(INDEX_TOOL_OPTION.getOpt())) - && !cmdLine.hasOption(INDEX_REBUILD_OPTION.getOpt())) { - throw new IllegalStateException("Index tool options should be passed in with " - + INDEX_REBUILD_OPTION.getLongOpt()); - } - return cmdLine; - } - - private void printHelpAndExit(String severeMessage, Options options) { - System.err.println(severeMessage); - printHelpAndExit(options, 1); - } - - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); - } - - private Options getOptions() { - final Options options = new Options(); - options.addOption(OPERATION_OPTION); - TABLE_OPTION.setOptionalArg(true); - options.addOption(TABLE_OPTION); - TABLE_CSV_FILE_OPTION.setOptionalArg(true); - options.addOption(TABLE_CSV_FILE_OPTION); - DRY_RUN_OPTION.setOptionalArg(true); - options.addOption(DRY_RUN_OPTION); - LOG_FILE_OPTION.setOptionalArg(true); - options.addOption(LOG_FILE_OPTION); - options.addOption(HELP_OPTION); - INDEX_REBUILD_OPTION.setOptionalArg(true); - options.addOption(INDEX_REBUILD_OPTION); - INDEX_TOOL_OPTION.setOptionalArg(true); - options.addOption(INDEX_TOOL_OPTION); - return options; - } - - @VisibleForTesting - public void initializeTool(CommandLine cmdLine) { - operation = cmdLine.getOptionValue(OPERATION_OPTION.getOpt()); - inputTables = cmdLine.getOptionValue(TABLE_OPTION.getOpt()); - logFile = cmdLine.getOptionValue(LOG_FILE_OPTION.getOpt()); - inputFile = cmdLine.getOptionValue(TABLE_CSV_FILE_OPTION.getOpt()); - dryRun = cmdLine.hasOption(DRY_RUN_OPTION.getOpt()); - rebuild = cmdLine.hasOption(INDEX_REBUILD_OPTION.getOpt()); - indexToolOpts = cmdLine.getOptionValue(INDEX_TOOL_OPTION.getOpt()); - } - - @VisibleForTesting - public void prepareToolSetup() { - try { - if (logFile != null) { - FileHandler fh = new FileHandler(logFile); - fh.setFormatter(new SimpleFormatter()); - LOGGER.addHandler(fh); - } - - prop.put(IndexUtil.INDEX_BUILDER_CONF_KEY, PhoenixIndexBuilder.class.getName()); - prop.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); - - if (inputTables == null) { - inputTables = new String( - Files.readAllBytes(Paths.get(inputFile)), StandardCharsets.UTF_8); - } - if (inputTables == null) { - LOGGER.severe("Tables' list is not available; use -tb or -f option"); - } - LOGGER.info("list of tables passed: " + inputTables); - - if (operation.equalsIgnoreCase(UPGRADE_OP)) { - upgrade = true; - } else if (operation.equalsIgnoreCase(ROLLBACK_OP)) { - upgrade = false; - } else { - throw new IllegalStateException("Invalid option provided for " - + OPERATION_OPTION.getOpt() + " expected values: {upgrade, rollback}"); - } - if (dryRun) { - LOGGER.info("This is the beginning of the tool with dry run."); - } - } catch (IOException e) { - LOGGER.severe("Something went wrong " + e); - System.exit(-1); - } - } - - private static void setRpcRetriesAndTimeouts(Configuration conf) { - long indexRebuildQueryTimeoutMs = - conf.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); - long indexRebuildRPCTimeoutMs = - conf.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); - long indexRebuildClientScannerTimeOutMs = - conf.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); - int indexRebuildRpcRetriesCounter = - conf.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); - - // Set phoenix and hbase level timeouts and rpc retries - conf.setLong(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, indexRebuildQueryTimeoutMs); - conf.setLong(HConstants.HBASE_RPC_TIMEOUT_KEY, indexRebuildRPCTimeoutMs); - conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - indexRebuildClientScannerTimeOutMs); - conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, indexRebuildRpcRetriesCounter); - } - - @VisibleForTesting - public static Connection getConnection(Configuration conf) throws SQLException { - setRpcRetriesAndTimeouts(conf); - return ConnectionUtil.getInputConnection(conf); - } - - @VisibleForTesting - public int executeTool() { - Configuration conf = HBaseConfiguration.addHbaseResources(getConf()); - - try (Connection conn = getConnection(conf)) { - ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class) - .getQueryServices(); - - boolean status = extractTablesAndIndexes(conn.unwrap(PhoenixConnection.class)); - - if (status) { - return executeTool(conn, queryServices, conf); - } - } catch (SQLException e) { - LOGGER.severe("Something went wrong in executing tool "+ e); + mutableList.add(dataTableFullName); } + } catch (SQLException e) { + LOGGER + .severe("Something went wrong while getting the PTable " + dataTableFullName + " " + e); return -1; - } - - private int executeTool(Connection conn, - ConnectionQueryServices queryServices, - Configuration conf) { - ArrayList immutableList = new ArrayList<>(); - ArrayList mutableList = new ArrayList<>(); - for (Map.Entry> entry :tablesAndIndexes.entrySet()) { - String dataTableFullName = entry.getKey(); - try { - PTable dataTable = conn.unwrap(PhoenixConnection.class).getTableNoCache( - dataTableFullName); - if (dataTable.isImmutableRows()) { - //add to list where immutable tables are processed in a different function - immutableList.add(dataTableFullName); - } else { - mutableList.add(dataTableFullName); - } - } catch (SQLException e) { - LOGGER.severe("Something went wrong while getting the PTable " - + dataTableFullName + " " + e); - return -1; - } - } - long startWaitTime = executeToolForImmutableTables(queryServices, immutableList); - executeToolForMutableTables(conn, queryServices, conf, mutableList); - enableImmutableTables(queryServices, immutableList, startWaitTime); - rebuildIndexes(conn, conf, immutableList); - if (hasFailure) { - return -1; - } else { - return 0; - } - } - - private long executeToolForImmutableTables(ConnectionQueryServices queryServices, - List immutableList) { - if (immutableList.isEmpty()) { - return 0; - } - LOGGER.info("Started " + operation + " for immutable tables"); - List failedTables = new ArrayList(); - for (String dataTableFullName : immutableList) { - try (Admin admin = queryServices.getAdmin()) { - HashSet indexes = tablesAndIndexes.get(dataTableFullName); - LOGGER.info("Executing " + operation + " of " + dataTableFullName - + " (immutable)"); - disableTable(admin, dataTableFullName, indexes); - modifyTable(admin, dataTableFullName, indexes); - } catch (Throwable e) { - LOGGER.severe("Something went wrong while disabling " - + "or modifying immutable table " + e); - handleFailure(queryServices, dataTableFullName, immutableList, failedTables); - } - } - immutableList.removeAll(failedTables); - long startWaitTime = EnvironmentEdgeManager.currentTimeMillis(); - return startWaitTime; - } - - private void executeToolForMutableTables(Connection conn, - ConnectionQueryServices queryServices, - Configuration conf, - ArrayList mutableTables) { - if (mutableTables.isEmpty()) { - return; - } - LOGGER.info("Started " + operation + " for mutable tables"); - List failedTables = new ArrayList<>(); - for (String dataTableFullName : mutableTables) { - try (Admin admin = queryServices.getAdmin()) { - HashSet indexes = tablesAndIndexes.get(dataTableFullName); - LOGGER.info("Executing " + operation + " of " + dataTableFullName); - disableTable(admin, dataTableFullName, indexes); - modifyTable(admin, dataTableFullName, indexes); - enableTable(admin, dataTableFullName, indexes); - LOGGER.info("Completed " + operation + " of " + dataTableFullName); - } catch (Throwable e) { - LOGGER.severe("Something went wrong while executing " - + operation + " steps for "+ dataTableFullName + " " + e); - handleFailure(queryServices, dataTableFullName, mutableTables, failedTables); - } - } - mutableTables.removeAll(failedTables); - // Opportunistically kick-off index rebuilds after upgrade operation - rebuildIndexes(conn, conf, mutableTables); - } - - private void handleFailure(ConnectionQueryServices queryServices, - String dataTableFullName, - List tableList, - List failedTables) { - hasFailure = true; - LOGGER.info("Performing error handling to revert the steps taken during " + operation); + } + } + long startWaitTime = executeToolForImmutableTables(queryServices, immutableList); + executeToolForMutableTables(conn, queryServices, conf, mutableList); + enableImmutableTables(queryServices, immutableList, startWaitTime); + rebuildIndexes(conn, conf, immutableList); + if (hasFailure) { + return -1; + } else { + return 0; + } + } + + private long executeToolForImmutableTables(ConnectionQueryServices queryServices, + List immutableList) { + if (immutableList.isEmpty()) { + return 0; + } + LOGGER.info("Started " + operation + " for immutable tables"); + List failedTables = new ArrayList(); + for (String dataTableFullName : immutableList) { + try (Admin admin = queryServices.getAdmin()) { HashSet indexes = tablesAndIndexes.get(dataTableFullName); - try (Admin admin = queryServices.getAdmin()) { - upgrade = !upgrade; - disableTable(admin, dataTableFullName, indexes); - modifyTable(admin, dataTableFullName, indexes); - enableTable(admin, dataTableFullName, indexes); - upgrade = !upgrade; - - tablesAndIndexes.remove(dataTableFullName); //removing from the map - failedTables.add(dataTableFullName); //everything in failed tables will later be - // removed from the list - - LOGGER.severe(dataTableFullName+" has been removed from the list as tool failed" - + " to perform "+operation); - } catch (Throwable e) { - LOGGER.severe("Revert of the "+operation +" failed in error handling, " - + "re-enabling tables and then throwing runtime exception"); - LOGGER.severe("Confirm the state for "+getSubListString(tableList, dataTableFullName)); - try (Admin admin = queryServices.getAdmin()) { - enableTable(admin, dataTableFullName, indexes); - } catch (Exception ex) { - throw new RuntimeException("Error re-enabling tables after rollback failure. " + - "Original exception that caused the rollback: [" + e.toString() + " " + "]", ex); - } - throw new RuntimeException(e); - } - } - - private void enableImmutableTables(ConnectionQueryServices queryServices, - ArrayList immutableList, - long startWaitTime) { - if (immutableList.isEmpty()) { - return; - } - while(true) { - long waitMore = getWaitMoreTime(startWaitTime); - if (waitMore <= 0) { - isWaitComplete = true; - break; - } - try { - // If the table is immutable, we need to wait for clients to purge - // their caches of table metadata - Thread.sleep(waitMore); - isWaitComplete = true; - } catch(InterruptedException e) { - LOGGER.warning("Sleep before starting index rebuild is interrupted. " - + "Attempting to sleep again! " + e.getMessage()); - } - } - - for (String dataTableFullName: immutableList) { - try (Admin admin = queryServices.getAdmin()) { - HashSet indexes = tablesAndIndexes.get(dataTableFullName); - enableTable(admin, dataTableFullName, indexes); - } catch (IOException | SQLException e) { - LOGGER.severe("Something went wrong while enabling immutable table " + e); - //removing to avoid any rebuilds after upgrade - tablesAndIndexes.remove(dataTableFullName); - immutableList.remove(dataTableFullName); - throw new RuntimeException("Manually enable the following tables " - + getSubListString(immutableList, dataTableFullName) - + " and run the index rebuild ", e); - } - } - } - - private String getSubListString(List tableList, String dataTableFullName) { - return StringUtils.join(",", tableList.subList(tableList.indexOf(dataTableFullName), - tableList.size())); - } - - private long getWaitMoreTime(long startWaitTime) { - int waitTime = GLOBAL_INDEX_CHECKER_ENABLED_MAP_EXPIRATION_MIN+1; - long endWaitTime = EnvironmentEdgeManager.currentTimeMillis(); - if(test || dryRun) { - return 0; //no wait - } - return (((waitTime) * 60000) - Math.abs(endWaitTime-startWaitTime)); - } - - private void disableTable(Admin admin, String dataTable, HashSetindexes) - throws IOException { - if (admin.isTableEnabled(TableName.valueOf(dataTable))) { - if (!dryRun) { - admin.disableTable(TableName.valueOf(dataTable)); - } - LOGGER.info("Disabled data table " + dataTable); - } else { - LOGGER.info( "Data table " + dataTable + " is already disabled"); - } - for (String indexName : indexes) { - if (admin.isTableEnabled(TableName.valueOf(indexName))) { - if (!dryRun) { - admin.disableTable(TableName.valueOf(indexName)); - } - LOGGER.info("Disabled index table " + indexName); - } else { - LOGGER.info( "Index table " + indexName + " is already disabled"); - } - } - } - - private void modifyTable(Admin admin, String dataTableFullName, HashSet indexes) - throws IOException { - if (upgrade) { - modifyIndexTable(admin, indexes); - modifyDataTable(admin, dataTableFullName); - if (test && failUpgradeTask) { - throw new RuntimeException("Test requested upgrade failure"); - } - } else { - modifyDataTable(admin, dataTableFullName); - modifyIndexTable(admin, indexes); - if (test && failDowngradeTask) { - throw new RuntimeException("Test requested downgrade failure"); - } - } - } - - private void enableTable(Admin admin, String dataTable, Setindexes) - throws IOException { - if (!admin.isTableEnabled(TableName.valueOf(dataTable))) { - if (!dryRun) { - admin.enableTable(TableName.valueOf(dataTable)); - } - LOGGER.info("Enabled data table " + dataTable); - } else { - LOGGER.info( "Data table " + dataTable + " is already enabled"); - } - for (String indexName : indexes) { - if(!admin.isTableEnabled(TableName.valueOf(indexName))) { - if (!dryRun) { - admin.enableTable(TableName.valueOf(indexName)); - } - LOGGER.info("Enabled index table " + indexName); - } else { - LOGGER.info( "Index table " + indexName + " is already enabled"); - } - } - } - - private void rebuildIndexes(Connection conn, Configuration conf, ArrayList tableList) { - if (!upgrade || !rebuild) { - return; - } - - for (String table: tableList) { - rebuildIndexes(conn, conf, table); - } - } - - private void rebuildIndexes(Connection conn, Configuration conf, String dataTableFullName) { - try { - HashMap - rebuildMap = prepareToRebuildIndexes(conn, dataTableFullName); - - //for rebuilding indexes in case of upgrade and if there are indexes on the table/view. - if (rebuildMap.isEmpty()) { - LOGGER.info("No indexes to rebuild for table " + dataTableFullName); - return; - } - if(!test) { - indexingTool = new IndexTool(); - indexingTool.setConf(conf); - } - startIndexRebuilds(rebuildMap, indexingTool); - } catch (SQLException e) { - LOGGER.severe("Failed to prepare the map for index rebuilds " + e); - throw new RuntimeException("Failed to prepare the map for index rebuilds"); - } - } - - private void modifyDataTable(Admin admin, String tableName) - throws IOException { - TableDescriptorBuilder tableDescBuilder = TableDescriptorBuilder - .newBuilder(admin.getDescriptor(TableName.valueOf(tableName))); - if (upgrade) { - removeCoprocessor(admin, tableName, tableDescBuilder, Indexer.class.getName()); - addCoprocessor(admin, tableName, tableDescBuilder, IndexRegionObserver.class.getName()); - } else { - removeCoprocessor(admin, tableName, tableDescBuilder, IndexRegionObserver.class.getName()); - addCoprocessor(admin, tableName, tableDescBuilder, Indexer.class.getName()); - } + LOGGER.info("Executing " + operation + " of " + dataTableFullName + " (immutable)"); + disableTable(admin, dataTableFullName, indexes); + modifyTable(admin, dataTableFullName, indexes); + } catch (Throwable e) { + LOGGER + .severe("Something went wrong while disabling " + "or modifying immutable table " + e); + handleFailure(queryServices, dataTableFullName, immutableList, failedTables); + } + } + immutableList.removeAll(failedTables); + long startWaitTime = EnvironmentEdgeManager.currentTimeMillis(); + return startWaitTime; + } + + private void executeToolForMutableTables(Connection conn, ConnectionQueryServices queryServices, + Configuration conf, ArrayList mutableTables) { + if (mutableTables.isEmpty()) { + return; + } + LOGGER.info("Started " + operation + " for mutable tables"); + List failedTables = new ArrayList<>(); + for (String dataTableFullName : mutableTables) { + try (Admin admin = queryServices.getAdmin()) { + HashSet indexes = tablesAndIndexes.get(dataTableFullName); + LOGGER.info("Executing " + operation + " of " + dataTableFullName); + disableTable(admin, dataTableFullName, indexes); + modifyTable(admin, dataTableFullName, indexes); + enableTable(admin, dataTableFullName, indexes); + LOGGER.info("Completed " + operation + " of " + dataTableFullName); + } catch (Throwable e) { + LOGGER.severe("Something went wrong while executing " + operation + " steps for " + + dataTableFullName + " " + e); + handleFailure(queryServices, dataTableFullName, mutableTables, failedTables); + } + } + mutableTables.removeAll(failedTables); + // Opportunistically kick-off index rebuilds after upgrade operation + rebuildIndexes(conn, conf, mutableTables); + } + + private void handleFailure(ConnectionQueryServices queryServices, String dataTableFullName, + List tableList, List failedTables) { + hasFailure = true; + LOGGER.info("Performing error handling to revert the steps taken during " + operation); + HashSet indexes = tablesAndIndexes.get(dataTableFullName); + try (Admin admin = queryServices.getAdmin()) { + upgrade = !upgrade; + disableTable(admin, dataTableFullName, indexes); + modifyTable(admin, dataTableFullName, indexes); + enableTable(admin, dataTableFullName, indexes); + upgrade = !upgrade; + + tablesAndIndexes.remove(dataTableFullName); // removing from the map + failedTables.add(dataTableFullName); // everything in failed tables will later be + // removed from the list + + LOGGER.severe(dataTableFullName + " has been removed from the list as tool failed" + + " to perform " + operation); + } catch (Throwable e) { + LOGGER.severe("Revert of the " + operation + " failed in error handling, " + + "re-enabling tables and then throwing runtime exception"); + LOGGER.severe("Confirm the state for " + getSubListString(tableList, dataTableFullName)); + try (Admin admin = queryServices.getAdmin()) { + enableTable(admin, dataTableFullName, indexes); + } catch (Exception ex) { + throw new RuntimeException("Error re-enabling tables after rollback failure. " + + "Original exception that caused the rollback: [" + e.toString() + " " + "]", ex); + } + throw new RuntimeException(e); + } + } + + private void enableImmutableTables(ConnectionQueryServices queryServices, + ArrayList immutableList, long startWaitTime) { + if (immutableList.isEmpty()) { + return; + } + while (true) { + long waitMore = getWaitMoreTime(startWaitTime); + if (waitMore <= 0) { + isWaitComplete = true; + break; + } + try { + // If the table is immutable, we need to wait for clients to purge + // their caches of table metadata + Thread.sleep(waitMore); + isWaitComplete = true; + } catch (InterruptedException e) { + LOGGER.warning("Sleep before starting index rebuild is interrupted. " + + "Attempting to sleep again! " + e.getMessage()); + } + } + + for (String dataTableFullName : immutableList) { + try (Admin admin = queryServices.getAdmin()) { + HashSet indexes = tablesAndIndexes.get(dataTableFullName); + enableTable(admin, dataTableFullName, indexes); + } catch (IOException | SQLException e) { + LOGGER.severe("Something went wrong while enabling immutable table " + e); + // removing to avoid any rebuilds after upgrade + tablesAndIndexes.remove(dataTableFullName); + immutableList.remove(dataTableFullName); + throw new RuntimeException( + "Manually enable the following tables " + + getSubListString(immutableList, dataTableFullName) + " and run the index rebuild ", + e); + } + } + } + + private String getSubListString(List tableList, String dataTableFullName) { + return StringUtils.join(",", + tableList.subList(tableList.indexOf(dataTableFullName), tableList.size())); + } + + private long getWaitMoreTime(long startWaitTime) { + int waitTime = GLOBAL_INDEX_CHECKER_ENABLED_MAP_EXPIRATION_MIN + 1; + long endWaitTime = EnvironmentEdgeManager.currentTimeMillis(); + if (test || dryRun) { + return 0; // no wait + } + return (((waitTime) * 60000) - Math.abs(endWaitTime - startWaitTime)); + } + + private void disableTable(Admin admin, String dataTable, HashSet indexes) + throws IOException { + if (admin.isTableEnabled(TableName.valueOf(dataTable))) { + if (!dryRun) { + admin.disableTable(TableName.valueOf(dataTable)); + } + LOGGER.info("Disabled data table " + dataTable); + } else { + LOGGER.info("Data table " + dataTable + " is already disabled"); + } + for (String indexName : indexes) { + if (admin.isTableEnabled(TableName.valueOf(indexName))) { if (!dryRun) { - admin.modifyTable(tableDescBuilder.build()); - } - } - - private void addCoprocessor(Admin admin, String tableName, TableDescriptorBuilder tableDescBuilder, - String coprocName) throws IOException { - addCoprocessor(admin, tableName, tableDescBuilder, coprocName, - QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY, prop); - } - - private void addCoprocessor(Admin admin, String tableName, TableDescriptorBuilder tableDescBuilder, - String coprocName,int priority, Map propsToAdd) throws IOException { - if (!admin.getDescriptor(TableName.valueOf(tableName)).hasCoprocessor(coprocName)) { - if (!dryRun) { - CoprocessorDescriptorBuilder coprocBuilder = - CoprocessorDescriptorBuilder.newBuilder(coprocName); - coprocBuilder.setPriority(priority).setProperties(propsToAdd); - tableDescBuilder.setCoprocessor(coprocBuilder.build()); - } - LOGGER.info("Loaded " + coprocName + " coprocessor on table " + tableName); - } else { - LOGGER.info(coprocName + " coprocessor on table " + tableName + "is already loaded"); + admin.disableTable(TableName.valueOf(indexName)); + } + LOGGER.info("Disabled index table " + indexName); + } else { + LOGGER.info("Index table " + indexName + " is already disabled"); + } + } + } + + private void modifyTable(Admin admin, String dataTableFullName, HashSet indexes) + throws IOException { + if (upgrade) { + modifyIndexTable(admin, indexes); + modifyDataTable(admin, dataTableFullName); + if (test && failUpgradeTask) { + throw new RuntimeException("Test requested upgrade failure"); + } + } else { + modifyDataTable(admin, dataTableFullName); + modifyIndexTable(admin, indexes); + if (test && failDowngradeTask) { + throw new RuntimeException("Test requested downgrade failure"); + } + } + } + + private void enableTable(Admin admin, String dataTable, Set indexes) throws IOException { + if (!admin.isTableEnabled(TableName.valueOf(dataTable))) { + if (!dryRun) { + admin.enableTable(TableName.valueOf(dataTable)); + } + LOGGER.info("Enabled data table " + dataTable); + } else { + LOGGER.info("Data table " + dataTable + " is already enabled"); + } + for (String indexName : indexes) { + if (!admin.isTableEnabled(TableName.valueOf(indexName))) { + if (!dryRun) { + admin.enableTable(TableName.valueOf(indexName)); + } + LOGGER.info("Enabled index table " + indexName); + } else { + LOGGER.info("Index table " + indexName + " is already enabled"); + } + } + } + + private void rebuildIndexes(Connection conn, Configuration conf, ArrayList tableList) { + if (!upgrade || !rebuild) { + return; + } + + for (String table : tableList) { + rebuildIndexes(conn, conf, table); + } + } + + private void rebuildIndexes(Connection conn, Configuration conf, String dataTableFullName) { + try { + HashMap rebuildMap = prepareToRebuildIndexes(conn, dataTableFullName); + + // for rebuilding indexes in case of upgrade and if there are indexes on the table/view. + if (rebuildMap.isEmpty()) { + LOGGER.info("No indexes to rebuild for table " + dataTableFullName); + return; + } + if (!test) { + indexingTool = new IndexTool(); + indexingTool.setConf(conf); + } + startIndexRebuilds(rebuildMap, indexingTool); + } catch (SQLException e) { + LOGGER.severe("Failed to prepare the map for index rebuilds " + e); + throw new RuntimeException("Failed to prepare the map for index rebuilds"); + } + } + + private void modifyDataTable(Admin admin, String tableName) throws IOException { + TableDescriptorBuilder tableDescBuilder = + TableDescriptorBuilder.newBuilder(admin.getDescriptor(TableName.valueOf(tableName))); + if (upgrade) { + removeCoprocessor(admin, tableName, tableDescBuilder, Indexer.class.getName()); + addCoprocessor(admin, tableName, tableDescBuilder, IndexRegionObserver.class.getName()); + } else { + removeCoprocessor(admin, tableName, tableDescBuilder, IndexRegionObserver.class.getName()); + addCoprocessor(admin, tableName, tableDescBuilder, Indexer.class.getName()); + } + if (!dryRun) { + admin.modifyTable(tableDescBuilder.build()); + } + } + + private void addCoprocessor(Admin admin, String tableName, + TableDescriptorBuilder tableDescBuilder, String coprocName) throws IOException { + addCoprocessor(admin, tableName, tableDescBuilder, coprocName, + QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY, prop); + } + + private void addCoprocessor(Admin admin, String tableName, + TableDescriptorBuilder tableDescBuilder, String coprocName, int priority, + Map propsToAdd) throws IOException { + if (!admin.getDescriptor(TableName.valueOf(tableName)).hasCoprocessor(coprocName)) { + if (!dryRun) { + CoprocessorDescriptorBuilder coprocBuilder = + CoprocessorDescriptorBuilder.newBuilder(coprocName); + coprocBuilder.setPriority(priority).setProperties(propsToAdd); + tableDescBuilder.setCoprocessor(coprocBuilder.build()); + } + LOGGER.info("Loaded " + coprocName + " coprocessor on table " + tableName); + } else { + LOGGER.info(coprocName + " coprocessor on table " + tableName + "is already loaded"); + } + } + + private void removeCoprocessor(Admin admin, String tableName, + TableDescriptorBuilder tableDescBuilder, String coprocName) throws IOException { + if (admin.getDescriptor(TableName.valueOf(tableName)).hasCoprocessor(coprocName)) { + if (!dryRun) { + tableDescBuilder.removeCoprocessor(coprocName); + } + LOGGER.info("Unloaded " + coprocName + "coprocessor on table " + tableName); + } else { + LOGGER.info(coprocName + " coprocessor on table " + tableName + " is already unloaded"); + } + } + + private void modifyIndexTable(Admin admin, HashSet indexes) throws IOException { + for (String indexName : indexes) { + TableDescriptorBuilder indexTableDescBuilder = + TableDescriptorBuilder.newBuilder(admin.getDescriptor(TableName.valueOf(indexName))); + if (upgrade) { + // GlobalIndexChecker needs to be a "lower" priority than all the others so that it + // goes first. It also doesn't get the codec props the IndexRegionObserver needs + addCoprocessor(admin, indexName, indexTableDescBuilder, GlobalIndexChecker.class.getName(), + QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY - 1, emptyProp); + } else { + removeCoprocessor(admin, indexName, indexTableDescBuilder, + GlobalIndexChecker.class.getName()); + } + if (!dryRun) { + admin.modifyTable(indexTableDescBuilder.build()); + } + } + } + + private int startIndexRebuilds(HashMap indexInfos, IndexTool indexingTool) { + + for (Map.Entry entry : indexInfos.entrySet()) { + String index = entry.getKey(); + IndexInfo indexInfo = entry.getValue(); + String indexName = SchemaUtil.getTableNameFromFullName(index); + String tenantId = indexInfo.getTenantId(); + String baseTable = indexInfo.getBaseTable(); + String schema = indexInfo.getSchemaName(); + String outFile = "/tmp/index_rebuild_" + schema + "_" + indexName + + (GLOBAL_INDEX_ID.equals(tenantId) ? "" : "_" + tenantId) + "_" + + UUID.randomUUID().toString(); + String[] args = getIndexToolArgValues(schema, baseTable, indexName, outFile, tenantId); + try { + LOGGER.info("Rebuilding index: " + String.join(",", args)); + if (!dryRun) { + indexingTool.run(args); } - } - - private void removeCoprocessor(Admin admin, String tableName, TableDescriptorBuilder tableDescBuilder, - String coprocName) throws IOException { - if (admin.getDescriptor(TableName.valueOf(tableName)).hasCoprocessor(coprocName)) { - if (!dryRun) { - tableDescBuilder.removeCoprocessor(coprocName); + } catch (Exception e) { + LOGGER.severe("Something went wrong while building the index " + index + " " + e); + return -1; + } + } + return 0; + } + + public String[] getIndexToolArgValues(String schema, String baseTable, String indexName, + String outFile, String tenantId) { + String args[] = { "-s", schema, "-dt", baseTable, "-it", indexName, "-direct", "-op", outFile }; + ArrayList list = new ArrayList<>(Arrays.asList(args)); + if (!GLOBAL_INDEX_ID.equals(tenantId)) { + list.add("-tenant"); + list.add(tenantId); + } + + if (!Strings.isNullOrEmpty(indexToolOpts)) { + String[] options = indexToolOpts.split("\\s+"); + for (String opt : options) { + list.add(opt); + } + } + return list.toArray(new String[list.size()]); + } + + private boolean extractTablesAndIndexes(PhoenixConnection conn) { + String[] tables = inputTables.trim().split(","); + PTable dataTable = null; + try { + for (String tableName : tables) { + HashSet physicalIndexes = new HashSet<>(); + dataTable = conn.getTableNoCache(tableName); + String physicalTableName = dataTable.getPhysicalName().getString(); + if (!dataTable.isTransactional() && dataTable.getType().equals(PTableType.TABLE)) { + for (PTable indexTable : dataTable.getIndexes()) { + if (IndexUtil.isGlobalIndex(indexTable)) { + String physicalIndexName = indexTable.getPhysicalName().getString(); + physicalIndexes.add(physicalIndexName); } - LOGGER.info("Unloaded "+ coprocName +"coprocessor on table " + tableName); + } + if (MetaDataUtil.hasViewIndexTable(conn, dataTable.getPhysicalName())) { + String viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName); + physicalIndexes.add(viewIndexPhysicalName); + } + // for upgrade or rollback + tablesAndIndexes.put(physicalTableName, physicalIndexes); } else { - LOGGER.info(coprocName + " coprocessor on table " + tableName + " is already unloaded"); - } - } - - private void modifyIndexTable(Admin admin, HashSet indexes) - throws IOException { - for (String indexName : indexes) { - TableDescriptorBuilder indexTableDescBuilder = TableDescriptorBuilder - .newBuilder(admin.getDescriptor(TableName.valueOf(indexName))); - if (upgrade) { - //GlobalIndexChecker needs to be a "lower" priority than all the others so that it - //goes first. It also doesn't get the codec props the IndexRegionObserver needs - addCoprocessor(admin, indexName, indexTableDescBuilder, GlobalIndexChecker.class.getName(), - QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY -1, emptyProp); - } else { - removeCoprocessor(admin, indexName, indexTableDescBuilder, GlobalIndexChecker.class.getName()); - } - if (!dryRun) { - admin.modifyTable(indexTableDescBuilder.build()); - } - } - } - - private int startIndexRebuilds(HashMap indexInfos, - IndexTool indexingTool) { - - for(Map.Entry entry : indexInfos.entrySet()) { - String index = entry.getKey(); - IndexInfo indexInfo = entry.getValue(); - String indexName = SchemaUtil.getTableNameFromFullName(index); - String tenantId = indexInfo.getTenantId(); - String baseTable = indexInfo.getBaseTable(); - String schema = indexInfo.getSchemaName(); - String outFile = "/tmp/index_rebuild_" +schema+"_"+ indexName + - (GLOBAL_INDEX_ID.equals(tenantId)?"":"_"+tenantId) +"_" - + UUID.randomUUID().toString(); - String[] args = getIndexToolArgValues(schema, baseTable, indexName, outFile, tenantId); - try { - LOGGER.info("Rebuilding index: " + String.join(",", args)); - if (!dryRun) { - indexingTool.run(args); - } - } catch (Exception e) { - LOGGER.severe("Something went wrong while building the index " - + index + " " + e); - return -1; - } - } - return 0; - } - - public String[] getIndexToolArgValues(String schema, String baseTable, String indexName, - String outFile, String tenantId) { - String args[] = { "-s", schema, "-dt", baseTable, "-it", indexName, - "-direct", "-op", outFile }; - ArrayList list = new ArrayList<>(Arrays.asList(args)); - if (!GLOBAL_INDEX_ID.equals(tenantId)) { - list.add("-tenant"); - list.add(tenantId); - } - - if (!Strings.isNullOrEmpty(indexToolOpts)) { - String[] options = indexToolOpts.split("\\s+"); - for (String opt : options) { - list.add(opt); - } - } - return list.toArray(new String[list.size()]); - } - - private boolean extractTablesAndIndexes(PhoenixConnection conn) { - String [] tables = inputTables.trim().split(","); - PTable dataTable = null; - try { - for (String tableName : tables) { - HashSet physicalIndexes = new HashSet<>(); - dataTable = conn.getTableNoCache(tableName); - String physicalTableName = dataTable.getPhysicalName().getString(); - if (!dataTable.isTransactional() && dataTable.getType().equals(PTableType.TABLE)) { - for (PTable indexTable : dataTable.getIndexes()) { - if (IndexUtil.isGlobalIndex(indexTable)) { - String physicalIndexName = indexTable.getPhysicalName().getString(); - physicalIndexes.add(physicalIndexName); - } - } - if (MetaDataUtil.hasViewIndexTable(conn, dataTable.getPhysicalName())) { - String viewIndexPhysicalName = MetaDataUtil - .getViewIndexPhysicalName(physicalTableName); - physicalIndexes.add(viewIndexPhysicalName); - } - //for upgrade or rollback - tablesAndIndexes.put(physicalTableName, physicalIndexes); - } else { - LOGGER.info("Skipping Table " + tableName + " because it is " + - (dataTable.isTransactional() ? "transactional" : "not a data table")); - } - } - return true; - } catch (SQLException e) { - LOGGER.severe("Failed to find list of indexes "+e); - if (dataTable == null) { - LOGGER.severe("Unable to find the provided data table"); - } - return false; - } - } - - private HashMap prepareToRebuildIndexes(Connection conn, - String dataTableFullName) throws SQLException { - - HashMap indexInfos = new HashMap<>(); - HashSet physicalIndexes = tablesAndIndexes.get(dataTableFullName); - - String viewIndexPhysicalName = MetaDataUtil - .getViewIndexPhysicalName(dataTableFullName); - boolean hasViewIndex = physicalIndexes.contains(viewIndexPhysicalName); - String schemaName = SchemaUtil.getSchemaNameFromFullName(dataTableFullName); - String tableName = SchemaUtil.getTableNameFromFullName(dataTableFullName); - - for (String physicalIndexName : physicalIndexes) { - if (physicalIndexName.equals(viewIndexPhysicalName)) { - continue; - } - String indexTableName = SchemaUtil.getTableNameFromFullName(physicalIndexName); - String pIndexName = SchemaUtil.getTableName(schemaName, indexTableName); - IndexInfo indexInfo = new IndexInfo(schemaName, tableName, GLOBAL_INDEX_ID, pIndexName); - indexInfos.put(physicalIndexName, indexInfo); - } - - if (hasViewIndex) { - String viewSql = getViewSql(tableName, schemaName); - - ResultSet rs = conn.createStatement().executeQuery(viewSql); - while (rs.next()) { - String viewFullName = rs.getString(1); - String viewName = SchemaUtil.getTableNameFromFullName(viewFullName); - String tenantId = rs.getString(2); - ArrayList viewIndexes = findViewIndexes(conn, schemaName, viewName, - tenantId); - for (String viewIndex : viewIndexes) { - IndexInfo indexInfo = new IndexInfo(schemaName, viewName, - tenantId == null ? GLOBAL_INDEX_ID : tenantId, viewIndex); - indexInfos.put(viewIndex, indexInfo); - } - } - } - return indexInfos; - } - - @VisibleForTesting - public static String getViewSql(String tableName, String schemaName) { - //column_family has the view name and column_name has the Tenant ID - return "SELECT DISTINCT COLUMN_FAMILY, COLUMN_NAME FROM " - + "SYSTEM.CHILD_LINK " - + "WHERE TABLE_NAME = \'" + tableName + "\'" - + (!Strings.isNullOrEmpty(schemaName) ? " AND TABLE_SCHEM = \'" - + schemaName + "\'" : "") - + " AND LINK_TYPE = " - + PTable.LinkType.CHILD_TABLE.getSerializedValue(); - } - - private ArrayList findViewIndexes(Connection conn, String schemaName, String viewName, - String tenantId) throws SQLException { - - String viewIndexesSql = getViewIndexesSql(viewName, schemaName, tenantId); - ArrayList viewIndexes = new ArrayList<>(); - long stime = EnvironmentEdgeManager.currentTimeMillis(); - ResultSet rs = conn.createStatement().executeQuery(viewIndexesSql); - long etime = EnvironmentEdgeManager.currentTimeMillis(); - LOGGER.info(String.format("Query %s took %d ms ", viewIndexesSql, (etime - stime))); - while(rs.next()) { - String viewIndexName = rs.getString(1); - viewIndexes.add(viewIndexName); - } - return viewIndexes; - } - - @VisibleForTesting - public static String getViewIndexesSql(String viewName, String schemaName, String tenantId) { - return "SELECT DISTINCT COLUMN_FAMILY FROM " - + "SYSTEM.CATALOG " - + "WHERE TABLE_NAME = \'" + viewName + "\'" - + (!Strings.isNullOrEmpty(schemaName) ? " AND TABLE_SCHEM = \'" - + schemaName + "\'" : "") - + " AND LINK_TYPE = " + PTable.LinkType.INDEX_TABLE.getSerializedValue() - + (tenantId != null ? - " AND TENANT_ID = \'" + tenantId + "\'" : " AND TENANT_ID IS NULL"); - } - - private static class IndexInfo { - final private String schemaName; - final private String baseTable; - final private String tenantId; - final private String indexName; - - public IndexInfo(String schemaName, String baseTable, String tenantId, String indexName) { - this.schemaName = schemaName; - this.baseTable = baseTable; - this.tenantId = tenantId; - this.indexName = indexName; - } - - public String getSchemaName() { - return schemaName; - } - - public String getBaseTable() { return baseTable; } - - public String getTenantId() { - return tenantId; - } - - public String getIndexName() { - return indexName; - } - } - - public static void main (String[] args) throws Exception { - int result = ToolRunner.run(new IndexUpgradeTool(), args); - System.exit(result); + LOGGER.info("Skipping Table " + tableName + " because it is " + + (dataTable.isTransactional() ? "transactional" : "not a data table")); + } + } + return true; + } catch (SQLException e) { + LOGGER.severe("Failed to find list of indexes " + e); + if (dataTable == null) { + LOGGER.severe("Unable to find the provided data table"); + } + return false; + } + } + + private HashMap prepareToRebuildIndexes(Connection conn, + String dataTableFullName) throws SQLException { + + HashMap indexInfos = new HashMap<>(); + HashSet physicalIndexes = tablesAndIndexes.get(dataTableFullName); + + String viewIndexPhysicalName = MetaDataUtil.getViewIndexPhysicalName(dataTableFullName); + boolean hasViewIndex = physicalIndexes.contains(viewIndexPhysicalName); + String schemaName = SchemaUtil.getSchemaNameFromFullName(dataTableFullName); + String tableName = SchemaUtil.getTableNameFromFullName(dataTableFullName); + + for (String physicalIndexName : physicalIndexes) { + if (physicalIndexName.equals(viewIndexPhysicalName)) { + continue; + } + String indexTableName = SchemaUtil.getTableNameFromFullName(physicalIndexName); + String pIndexName = SchemaUtil.getTableName(schemaName, indexTableName); + IndexInfo indexInfo = new IndexInfo(schemaName, tableName, GLOBAL_INDEX_ID, pIndexName); + indexInfos.put(physicalIndexName, indexInfo); + } + + if (hasViewIndex) { + String viewSql = getViewSql(tableName, schemaName); + + ResultSet rs = conn.createStatement().executeQuery(viewSql); + while (rs.next()) { + String viewFullName = rs.getString(1); + String viewName = SchemaUtil.getTableNameFromFullName(viewFullName); + String tenantId = rs.getString(2); + ArrayList viewIndexes = findViewIndexes(conn, schemaName, viewName, tenantId); + for (String viewIndex : viewIndexes) { + IndexInfo indexInfo = new IndexInfo(schemaName, viewName, + tenantId == null ? GLOBAL_INDEX_ID : tenantId, viewIndex); + indexInfos.put(viewIndex, indexInfo); + } + } + } + return indexInfos; + } + + @VisibleForTesting + public static String getViewSql(String tableName, String schemaName) { + // column_family has the view name and column_name has the Tenant ID + return "SELECT DISTINCT COLUMN_FAMILY, COLUMN_NAME FROM " + "SYSTEM.CHILD_LINK " + + "WHERE TABLE_NAME = \'" + tableName + "\'" + + (!Strings.isNullOrEmpty(schemaName) ? " AND TABLE_SCHEM = \'" + schemaName + "\'" : "") + + " AND LINK_TYPE = " + PTable.LinkType.CHILD_TABLE.getSerializedValue(); + } + + private ArrayList findViewIndexes(Connection conn, String schemaName, String viewName, + String tenantId) throws SQLException { + + String viewIndexesSql = getViewIndexesSql(viewName, schemaName, tenantId); + ArrayList viewIndexes = new ArrayList<>(); + long stime = EnvironmentEdgeManager.currentTimeMillis(); + ResultSet rs = conn.createStatement().executeQuery(viewIndexesSql); + long etime = EnvironmentEdgeManager.currentTimeMillis(); + LOGGER.info(String.format("Query %s took %d ms ", viewIndexesSql, (etime - stime))); + while (rs.next()) { + String viewIndexName = rs.getString(1); + viewIndexes.add(viewIndexName); + } + return viewIndexes; + } + + @VisibleForTesting + public static String getViewIndexesSql(String viewName, String schemaName, String tenantId) { + return "SELECT DISTINCT COLUMN_FAMILY FROM " + "SYSTEM.CATALOG " + "WHERE TABLE_NAME = \'" + + viewName + "\'" + + (!Strings.isNullOrEmpty(schemaName) ? " AND TABLE_SCHEM = \'" + schemaName + "\'" : "") + + " AND LINK_TYPE = " + PTable.LinkType.INDEX_TABLE.getSerializedValue() + + (tenantId != null ? " AND TENANT_ID = \'" + tenantId + "\'" : " AND TENANT_ID IS NULL"); + } + + private static class IndexInfo { + final private String schemaName; + final private String baseTable; + final private String tenantId; + final private String indexName; + + public IndexInfo(String schemaName, String baseTable, String tenantId, String indexName) { + this.schemaName = schemaName; + this.baseTable = baseTable; + this.tenantId = tenantId; + this.indexName = indexName; + } + + public String getSchemaName() { + return schemaName; + } + + public String getBaseTable() { + return baseTable; + } + + public String getTenantId() { + return tenantId; } + + public String getIndexName() { + return indexName; + } + } + + public static void main(String[] args) throws Exception { + int result = ToolRunner.run(new IndexUpgradeTool(), args); + System.exit(result); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationOutputRepository.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationOutputRepository.java index 57a32dd96cd..32b900e52dd 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationOutputRepository.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationOutputRepository.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,12 @@ */ package org.apache.phoenix.mapreduce.index; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; @@ -38,369 +43,372 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.query.ConnectionQueryServices; import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.util.ByteUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - public class IndexVerificationOutputRepository implements AutoCloseable { - public static final byte[] ROW_KEY_SEPARATOR_BYTE = Bytes.toBytes("|"); - private static final Logger LOGGER = LoggerFactory.getLogger(IndexVerificationOutputRepository.class); - - private Table indexTable; - private byte[] indexName; - private Table outputTable; - private IndexTool.IndexDisableLoggingType disableLoggingVerifyType = - IndexTool.IndexDisableLoggingType.NONE; - private boolean shouldLogBeyondMaxLookback = true; - - public final static String OUTPUT_TABLE_NAME = "PHOENIX_INDEX_TOOL"; - public final static byte[] OUTPUT_TABLE_NAME_BYTES = Bytes.toBytes(OUTPUT_TABLE_NAME); - public final static byte[] OUTPUT_TABLE_COLUMN_FAMILY = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; - - public final static String DATA_TABLE_NAME = "DTName"; - public final static byte[] DATA_TABLE_NAME_BYTES = Bytes.toBytes(DATA_TABLE_NAME); - public final static String INDEX_TABLE_NAME = "ITName"; - public final static byte[] INDEX_TABLE_NAME_BYTES = Bytes.toBytes(INDEX_TABLE_NAME); - public final static String DATA_TABLE_ROW_KEY = "DTRowKey"; - public final static byte[] DATA_TABLE_ROW_KEY_BYTES = Bytes.toBytes(DATA_TABLE_ROW_KEY); - public final static String INDEX_TABLE_ROW_KEY = "ITRowKey"; - public final static byte[] INDEX_TABLE_ROW_KEY_BYTES = Bytes.toBytes(INDEX_TABLE_ROW_KEY); - public final static String DATA_TABLE_TS = "DTTS"; - public final static byte[] DATA_TABLE_TS_BYTES = Bytes.toBytes(DATA_TABLE_TS); - public final static String INDEX_TABLE_TS = "ITTS"; - public final static byte[] INDEX_TABLE_TS_BYTES = Bytes.toBytes(INDEX_TABLE_TS); - public final static String ERROR_MESSAGE = "Error"; - public final static byte[] ERROR_MESSAGE_BYTES = Bytes.toBytes(ERROR_MESSAGE); - public static final String ERROR_TYPE = "ErrorType"; - public static final byte[] ERROR_TYPE_BYTES = Bytes.toBytes(ERROR_TYPE); - - public static final String VERIFICATION_PHASE = "Phase"; - public final static byte[] VERIFICATION_PHASE_BYTES = Bytes.toBytes(VERIFICATION_PHASE); - public final static String EXPECTED_VALUE = "ExpectedValue"; - public final static byte[] EXPECTED_VALUE_BYTES = Bytes.toBytes(EXPECTED_VALUE); - public final static String ACTUAL_VALUE = "ActualValue"; - public final static byte[] ACTUAL_VALUE_BYTES = Bytes.toBytes(ACTUAL_VALUE); - public static final byte[] E_VALUE_PREFIX_BYTES = Bytes.toBytes(" E:"); - public static final byte[] A_VALUE_PREFIX_BYTES = Bytes.toBytes(" A:"); - public static final int PREFIX_LENGTH = 3; - public static final int TOTAL_PREFIX_LENGTH = 6; - public static final byte[] PHASE_BEFORE_VALUE = Bytes.toBytes("BEFORE"); - public static final byte[] PHASE_AFTER_VALUE = Bytes.toBytes("AFTER"); - - - public enum IndexVerificationErrorType { - INVALID_ROW, - MISSING_ROW, - EXTRA_ROW, - EXTRA_CELLS, - BEYOND_MAX_LOOKBACK_INVALID, - BEYOND_MAX_LOOKBACK_MISSING, - UNKNOWN + public static final byte[] ROW_KEY_SEPARATOR_BYTE = Bytes.toBytes("|"); + private static final Logger LOGGER = + LoggerFactory.getLogger(IndexVerificationOutputRepository.class); + + private Table indexTable; + private byte[] indexName; + private Table outputTable; + private IndexTool.IndexDisableLoggingType disableLoggingVerifyType = + IndexTool.IndexDisableLoggingType.NONE; + private boolean shouldLogBeyondMaxLookback = true; + + public final static String OUTPUT_TABLE_NAME = "PHOENIX_INDEX_TOOL"; + public final static byte[] OUTPUT_TABLE_NAME_BYTES = Bytes.toBytes(OUTPUT_TABLE_NAME); + public final static byte[] OUTPUT_TABLE_COLUMN_FAMILY = + QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; + + public final static String DATA_TABLE_NAME = "DTName"; + public final static byte[] DATA_TABLE_NAME_BYTES = Bytes.toBytes(DATA_TABLE_NAME); + public final static String INDEX_TABLE_NAME = "ITName"; + public final static byte[] INDEX_TABLE_NAME_BYTES = Bytes.toBytes(INDEX_TABLE_NAME); + public final static String DATA_TABLE_ROW_KEY = "DTRowKey"; + public final static byte[] DATA_TABLE_ROW_KEY_BYTES = Bytes.toBytes(DATA_TABLE_ROW_KEY); + public final static String INDEX_TABLE_ROW_KEY = "ITRowKey"; + public final static byte[] INDEX_TABLE_ROW_KEY_BYTES = Bytes.toBytes(INDEX_TABLE_ROW_KEY); + public final static String DATA_TABLE_TS = "DTTS"; + public final static byte[] DATA_TABLE_TS_BYTES = Bytes.toBytes(DATA_TABLE_TS); + public final static String INDEX_TABLE_TS = "ITTS"; + public final static byte[] INDEX_TABLE_TS_BYTES = Bytes.toBytes(INDEX_TABLE_TS); + public final static String ERROR_MESSAGE = "Error"; + public final static byte[] ERROR_MESSAGE_BYTES = Bytes.toBytes(ERROR_MESSAGE); + public static final String ERROR_TYPE = "ErrorType"; + public static final byte[] ERROR_TYPE_BYTES = Bytes.toBytes(ERROR_TYPE); + + public static final String VERIFICATION_PHASE = "Phase"; + public final static byte[] VERIFICATION_PHASE_BYTES = Bytes.toBytes(VERIFICATION_PHASE); + public final static String EXPECTED_VALUE = "ExpectedValue"; + public final static byte[] EXPECTED_VALUE_BYTES = Bytes.toBytes(EXPECTED_VALUE); + public final static String ACTUAL_VALUE = "ActualValue"; + public final static byte[] ACTUAL_VALUE_BYTES = Bytes.toBytes(ACTUAL_VALUE); + public static final byte[] E_VALUE_PREFIX_BYTES = Bytes.toBytes(" E:"); + public static final byte[] A_VALUE_PREFIX_BYTES = Bytes.toBytes(" A:"); + public static final int PREFIX_LENGTH = 3; + public static final int TOTAL_PREFIX_LENGTH = 6; + public static final byte[] PHASE_BEFORE_VALUE = Bytes.toBytes("BEFORE"); + public static final byte[] PHASE_AFTER_VALUE = Bytes.toBytes("AFTER"); + + public enum IndexVerificationErrorType { + INVALID_ROW, + MISSING_ROW, + EXTRA_ROW, + EXTRA_CELLS, + BEYOND_MAX_LOOKBACK_INVALID, + BEYOND_MAX_LOOKBACK_MISSING, + UNKNOWN + } + + /** + * Only usable for the create table / read path or for testing. Use setOutputTable and + * setIndexTable first to write. + */ + public IndexVerificationOutputRepository() { + + } + + @VisibleForTesting + public IndexVerificationOutputRepository(byte[] indexName, Connection conn) throws SQLException { + ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices(); + outputTable = queryServices.getTable(OUTPUT_TABLE_NAME_BYTES); + indexTable = queryServices.getTable(indexName); + } + + @VisibleForTesting + public IndexVerificationOutputRepository(Table outputTable, Table indexTable, + IndexTool.IndexDisableLoggingType disableLoggingVerifyType) throws SQLException { + this.outputTable = outputTable; + this.indexTable = indexTable; + this.disableLoggingVerifyType = disableLoggingVerifyType; + } + + public IndexVerificationOutputRepository(byte[] indexName, HTableFactory hTableFactory, + IndexTool.IndexDisableLoggingType disableLoggingVerifyType) throws IOException { + this.indexName = indexName; + outputTable = hTableFactory.getTable(new ImmutableBytesPtr(OUTPUT_TABLE_NAME_BYTES)); + indexTable = hTableFactory.getTable(new ImmutableBytesPtr(indexName)); + this.disableLoggingVerifyType = disableLoggingVerifyType; + } + + public void setShouldLogBeyondMaxLookback(boolean shouldLogBeyondMaxLookback) { + this.shouldLogBeyondMaxLookback = shouldLogBeyondMaxLookback; + } + + public static byte[] generateOutputTableRowKey(long ts, byte[] indexTableName, + byte[] dataRowKey) { + byte[] keyPrefix = Bytes.toBytes(Long.toString(ts)); + byte[] rowKey; + int targetOffset = 0; + // The row key for the output table : timestamp | index table name | data row key + rowKey = new byte[keyPrefix.length + ROW_KEY_SEPARATOR_BYTE.length + indexTableName.length + + ROW_KEY_SEPARATOR_BYTE.length + dataRowKey.length]; + Bytes.putBytes(rowKey, targetOffset, keyPrefix, 0, keyPrefix.length); + targetOffset += keyPrefix.length; + Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); + targetOffset += ROW_KEY_SEPARATOR_BYTE.length; + Bytes.putBytes(rowKey, targetOffset, indexTableName, 0, indexTableName.length); + targetOffset += indexTableName.length; + Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); + targetOffset += ROW_KEY_SEPARATOR_BYTE.length; + Bytes.putBytes(rowKey, targetOffset, dataRowKey, 0, dataRowKey.length); + return rowKey; + } + + /** + * Generates partial row key for use in a Scan to get all rows for an index verification + */ + private static byte[] generatePartialOutputTableRowKey(long ts, byte[] indexTableName) { + byte[] keyPrefix = Bytes.toBytes(Long.toString(ts)); + byte[] partialRowKey; + int targetOffset = 0; + // The row key for the output table : timestamp | index table name | data row key + partialRowKey = + new byte[keyPrefix.length + ROW_KEY_SEPARATOR_BYTE.length + indexTableName.length]; + Bytes.putBytes(partialRowKey, targetOffset, keyPrefix, 0, keyPrefix.length); + targetOffset += keyPrefix.length; + Bytes.putBytes(partialRowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, + ROW_KEY_SEPARATOR_BYTE.length); + targetOffset += ROW_KEY_SEPARATOR_BYTE.length; + Bytes.putBytes(partialRowKey, targetOffset, indexTableName, 0, indexTableName.length); + return partialRowKey; + } + + public void createOutputTable(Connection connection) throws IOException, SQLException { + ConnectionQueryServices queryServices = + connection.unwrap(PhoenixConnection.class).getQueryServices(); + try (Admin admin = queryServices.getAdmin()) { + TableName outputTableName = TableName.valueOf(OUTPUT_TABLE_NAME); + if (!admin.tableExists(outputTableName)) { + ColumnFamilyDescriptor columnDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(OUTPUT_TABLE_COLUMN_FAMILY) + .setTimeToLive(MetaDataProtocol.DEFAULT_LOG_TTL).build(); + TableDescriptor tableDescriptor = + TableDescriptorBuilder.newBuilder(TableName.valueOf(OUTPUT_TABLE_NAME)) + .setColumnFamily(columnDescriptor).build(); + try { + admin.createTable(tableDescriptor); + } catch (TableExistsException e) { + LOGGER.warn("Table exists, ignoring", e); + } + outputTable = admin.getConnection().getTable(outputTableName); + } } - /** - * Only usable for the create table / read path or for testing. Use setOutputTable and - * setIndexTable first to write. - */ - public IndexVerificationOutputRepository() { - + } + + @VisibleForTesting + public void logToIndexToolOutputTable(byte[] dataRowKey, byte[] indexRowKey, long dataRowTs, + long indexRowTs, String errorMsg, byte[] expectedValue, byte[] actualValue, long scanMaxTs, + byte[] tableName, boolean isBeforeRebuild, IndexVerificationErrorType errorType) + throws IOException { + if (shouldLogOutput(isBeforeRebuild, errorType)) { + byte[] rowKey = + generateOutputTableRowKey(scanMaxTs, indexTable.getName().toBytes(), dataRowKey); + Put put = new Put(rowKey); + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, DATA_TABLE_NAME_BYTES, tableName); + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, INDEX_TABLE_NAME_BYTES, indexName); + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, DATA_TABLE_TS_BYTES, + Bytes.toBytes(Long.toString(dataRowTs))); + + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, INDEX_TABLE_ROW_KEY_BYTES, indexRowKey); + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, INDEX_TABLE_TS_BYTES, + Bytes.toBytes(Long.toString(indexRowTs))); + byte[] errorMessageBytes; + if (expectedValue != null) { + errorMessageBytes = getErrorMessageBytes(errorMsg, expectedValue, actualValue); + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, EXPECTED_VALUE_BYTES, expectedValue); + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, ACTUAL_VALUE_BYTES, actualValue); + } else { + errorMessageBytes = Bytes.toBytes(errorMsg); + } + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, ERROR_MESSAGE_BYTES, errorMessageBytes); + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, ERROR_TYPE_BYTES, + Bytes.toBytes(errorType.toString())); + if (isBeforeRebuild) { + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, VERIFICATION_PHASE_BYTES, PHASE_BEFORE_VALUE); + } else { + put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, VERIFICATION_PHASE_BYTES, PHASE_AFTER_VALUE); + } + outputTable.put(put); } + } - @VisibleForTesting - public IndexVerificationOutputRepository(byte[] indexName, Connection conn) throws SQLException { - ConnectionQueryServices queryServices = - conn.unwrap(PhoenixConnection.class).getQueryServices(); - outputTable = queryServices.getTable(OUTPUT_TABLE_NAME_BYTES); - indexTable = queryServices.getTable(indexName); - } + public boolean shouldLogOutput(boolean isBeforeRebuild, IndexVerificationErrorType errorType) { + return shouldLogOutputForVerifyType(isBeforeRebuild) && shouldLogOutputForErrorType(errorType); + } - @VisibleForTesting - public IndexVerificationOutputRepository(Table outputTable, Table indexTable, - IndexTool.IndexDisableLoggingType disableLoggingVerifyType) throws SQLException { - this.outputTable = outputTable; - this.indexTable = indexTable; - this.disableLoggingVerifyType = disableLoggingVerifyType; + private boolean shouldLogOutputForVerifyType(boolean isBeforeRebuild) { + if (disableLoggingVerifyType.equals(IndexTool.IndexDisableLoggingType.BOTH)) { + return false; } - - public IndexVerificationOutputRepository(byte[] indexName, HTableFactory hTableFactory, - IndexTool.IndexDisableLoggingType disableLoggingVerifyType) throws IOException { - this.indexName = indexName; - outputTable = hTableFactory.getTable(new ImmutableBytesPtr(OUTPUT_TABLE_NAME_BYTES)); - indexTable = hTableFactory.getTable(new ImmutableBytesPtr(indexName)); - this.disableLoggingVerifyType = disableLoggingVerifyType; + if (disableLoggingVerifyType.equals(IndexTool.IndexDisableLoggingType.NONE)) { + return true; } - - public void setShouldLogBeyondMaxLookback(boolean shouldLogBeyondMaxLookback) { - this.shouldLogBeyondMaxLookback = shouldLogBeyondMaxLookback; + if ( + isBeforeRebuild && (disableLoggingVerifyType.equals(IndexTool.IndexDisableLoggingType.AFTER)) + ) { + return true; } - - public static byte[] generateOutputTableRowKey(long ts, byte[] indexTableName, byte[] dataRowKey ) { - byte[] keyPrefix = Bytes.toBytes(Long.toString(ts)); - byte[] rowKey; - int targetOffset = 0; - // The row key for the output table : timestamp | index table name | data row key - rowKey = new byte[keyPrefix.length + ROW_KEY_SEPARATOR_BYTE.length + indexTableName.length + - ROW_KEY_SEPARATOR_BYTE.length + dataRowKey.length]; - Bytes.putBytes(rowKey, targetOffset, keyPrefix, 0, keyPrefix.length); - targetOffset += keyPrefix.length; - Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); - targetOffset += ROW_KEY_SEPARATOR_BYTE.length; - Bytes.putBytes(rowKey, targetOffset, indexTableName, 0, indexTableName.length); - targetOffset += indexTableName.length; - Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); - targetOffset += ROW_KEY_SEPARATOR_BYTE.length; - Bytes.putBytes(rowKey, targetOffset, dataRowKey, 0, dataRowKey.length); - return rowKey; + if ( + !isBeforeRebuild && disableLoggingVerifyType.equals(IndexTool.IndexDisableLoggingType.BEFORE) + ) { + return true; } - - /** - * Generates partial row key for use in a Scan to get all rows for an index verification - */ - private static byte[] generatePartialOutputTableRowKey(long ts, byte[] indexTableName){ - byte[] keyPrefix = Bytes.toBytes(Long.toString(ts)); - byte[] partialRowKey; - int targetOffset = 0; - // The row key for the output table : timestamp | index table name | data row key - partialRowKey = new byte[keyPrefix.length + ROW_KEY_SEPARATOR_BYTE.length + indexTableName.length]; - Bytes.putBytes(partialRowKey, targetOffset, keyPrefix, 0, keyPrefix.length); - targetOffset += keyPrefix.length; - Bytes.putBytes(partialRowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); - targetOffset += ROW_KEY_SEPARATOR_BYTE.length; - Bytes.putBytes(partialRowKey, targetOffset, indexTableName, 0, indexTableName.length); - return partialRowKey; + return false; + } + + private boolean shouldLogOutputForErrorType(IndexVerificationErrorType errorType) { + if ( + errorType != null && (errorType.equals(IndexVerificationErrorType.BEYOND_MAX_LOOKBACK_INVALID) + || errorType.equals(IndexVerificationErrorType.BEYOND_MAX_LOOKBACK_MISSING)) + ) { + return shouldLogBeyondMaxLookback; } - - public void createOutputTable(Connection connection) throws IOException, SQLException { - ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices(); - try (Admin admin = queryServices.getAdmin()) { - TableName outputTableName = TableName.valueOf(OUTPUT_TABLE_NAME); - if (!admin.tableExists(outputTableName)) { - ColumnFamilyDescriptor columnDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(OUTPUT_TABLE_COLUMN_FAMILY) - .setTimeToLive(MetaDataProtocol.DEFAULT_LOG_TTL) - .build(); - TableDescriptor tableDescriptor = TableDescriptorBuilder - .newBuilder(TableName.valueOf(OUTPUT_TABLE_NAME)) - .setColumnFamily(columnDescriptor).build(); - try { - admin.createTable(tableDescriptor); - } catch (TableExistsException e) { - LOGGER.warn("Table exists, ignoring", e); - } - outputTable = admin.getConnection().getTable(outputTableName); - } - } + return true; + } + + public static byte[] getErrorMessageBytes(String errorMsg, byte[] expectedValue, + byte[] actualValue) { + byte[] errorMessageBytes; + errorMessageBytes = + new byte[errorMsg.length() + expectedValue.length + actualValue.length + TOTAL_PREFIX_LENGTH]; + Bytes.putBytes(errorMessageBytes, 0, Bytes.toBytes(errorMsg), 0, errorMsg.length()); + int length = errorMsg.length(); + Bytes.putBytes(errorMessageBytes, length, E_VALUE_PREFIX_BYTES, 0, PREFIX_LENGTH); + length += PREFIX_LENGTH; + Bytes.putBytes(errorMessageBytes, length, expectedValue, 0, expectedValue.length); + length += expectedValue.length; + Bytes.putBytes(errorMessageBytes, length, A_VALUE_PREFIX_BYTES, 0, PREFIX_LENGTH); + length += PREFIX_LENGTH; + Bytes.putBytes(errorMessageBytes, length, actualValue, 0, actualValue.length); + return errorMessageBytes; + } + + public List getOutputRows(long ts, byte[] indexName) + throws IOException { + Iterator iter = getOutputRowIterator(ts, indexName); + return getIndexVerificationOutputRows(iter); + } + + @VisibleForTesting + public List getAllOutputRows() throws IOException { + Iterator iter = getOutputRowIteratorForAllRows(); + return getIndexVerificationOutputRows(iter); + } + + private List + getIndexVerificationOutputRows(Iterator iter) { + List outputRowList = new ArrayList(); + while (iter.hasNext()) { + outputRowList.add(iter.next()); } - - @VisibleForTesting - public void logToIndexToolOutputTable(byte[] dataRowKey, byte[] indexRowKey, long dataRowTs, - long indexRowTs, - String errorMsg, byte[] expectedValue, byte[] actualValue, - long scanMaxTs, byte[] tableName, - boolean isBeforeRebuild, - IndexVerificationErrorType errorType) - throws IOException { - if (shouldLogOutput(isBeforeRebuild, errorType)) { - byte[] rowKey = generateOutputTableRowKey(scanMaxTs, indexTable.getName().toBytes(), dataRowKey); - Put put = new Put(rowKey); - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, DATA_TABLE_NAME_BYTES, tableName); - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, INDEX_TABLE_NAME_BYTES, indexName); - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, DATA_TABLE_TS_BYTES, Bytes.toBytes(Long.toString(dataRowTs))); - - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, INDEX_TABLE_ROW_KEY_BYTES, indexRowKey); - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, INDEX_TABLE_TS_BYTES, Bytes.toBytes(Long.toString(indexRowTs))); - byte[] errorMessageBytes; - if (expectedValue != null) { - errorMessageBytes = getErrorMessageBytes(errorMsg, expectedValue, actualValue); - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, EXPECTED_VALUE_BYTES, expectedValue); - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, ACTUAL_VALUE_BYTES, actualValue); - } else { - errorMessageBytes = Bytes.toBytes(errorMsg); - } - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, ERROR_MESSAGE_BYTES, errorMessageBytes); - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, ERROR_TYPE_BYTES, - Bytes.toBytes(errorType.toString())); - if (isBeforeRebuild) { - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, VERIFICATION_PHASE_BYTES, PHASE_BEFORE_VALUE); - } else { - put.addColumn(OUTPUT_TABLE_COLUMN_FAMILY, VERIFICATION_PHASE_BYTES, PHASE_AFTER_VALUE); - } - outputTable.put(put); - } + return outputRowList; + } + + public Iterator getOutputRowIterator(long ts, byte[] indexName) + throws IOException { + Scan scan = new Scan(); + byte[] partialKey = generatePartialOutputTableRowKey(ts, indexName); + scan.withStartRow(partialKey); + scan.withStopRow(ByteUtil.calculateTheClosestNextRowKeyForPrefix(partialKey)); + ResultScanner scanner = outputTable.getScanner(scan); + return new IndexVerificationOutputRowIterator(scanner.iterator()); + } + + @VisibleForTesting + public Iterator getOutputRowIteratorForAllRows() throws IOException { + Scan scan = new Scan(); + ResultScanner scanner = outputTable.getScanner(scan); + return new IndexVerificationOutputRowIterator(scanner.iterator()); + } + + public static IndexVerificationOutputRow getOutputRowFromResult(Result result) { + IndexVerificationOutputRow.IndexVerificationOutputRowBuilder builder = + new IndexVerificationOutputRow.IndexVerificationOutputRowBuilder(); + byte[] rowKey = result.getRow(); + // rowkey is scanTs + SEPARATOR_BYTE + indexTableName + SEPARATOR_BYTE + dataTableRowKey + byte[][] rowKeySplit = ByteUtil.splitArrayBySeparator(rowKey, ROW_KEY_SEPARATOR_BYTE[0]); + builder.setScanMaxTimestamp(Long.parseLong(Bytes.toString(rowKeySplit[0]))); + builder.setIndexTableName(Bytes.toString(rowKeySplit[1])); + builder.setDataTableRowKey(rowKeySplit[2]); + + builder.setDataTableName( + Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, DATA_TABLE_NAME_BYTES))); + builder + .setIndexTableRowKey(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, INDEX_TABLE_ROW_KEY_BYTES)); + builder.setDataTableRowTimestamp(Long + .parseLong(Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, DATA_TABLE_TS_BYTES)))); + builder.setIndexTableRowTimestamp(Long.parseLong( + Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, INDEX_TABLE_TS_BYTES)))); + builder.setErrorMessage( + Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, ERROR_MESSAGE_BYTES))); + // actual and expected value might not be present, but will just set to null if not + builder.setExpectedValue(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, EXPECTED_VALUE_BYTES)); + builder.setActualValue(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, ACTUAL_VALUE_BYTES)); + builder.setPhaseValue(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, VERIFICATION_PHASE_BYTES)); + IndexVerificationErrorType errorType; + try { + errorType = IndexVerificationErrorType + .valueOf(Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, ERROR_TYPE_BYTES))); + } catch (Throwable e) { + // in case we have a cast exception because an incompatible version of the enum produced + // the row, or an earlier version that didn't record error types, it's better to mark + // the error type unknown and move on rather than fail + errorType = IndexVerificationErrorType.UNKNOWN; } + builder.setErrorType(errorType); + return builder.build(); + } - public boolean shouldLogOutput(boolean isBeforeRebuild, IndexVerificationErrorType errorType) { - return shouldLogOutputForVerifyType(isBeforeRebuild) && - shouldLogOutputForErrorType(errorType); + public void close() throws IOException { + if (outputTable != null) { + outputTable.close(); } - - private boolean shouldLogOutputForVerifyType(boolean isBeforeRebuild) { - if (disableLoggingVerifyType.equals(IndexTool.IndexDisableLoggingType.BOTH)) { - return false; - } - if (disableLoggingVerifyType.equals(IndexTool.IndexDisableLoggingType.NONE)) { - return true; - } - if (isBeforeRebuild && - (disableLoggingVerifyType.equals(IndexTool.IndexDisableLoggingType.AFTER))) { - return true; - } - if (!isBeforeRebuild && disableLoggingVerifyType.equals(IndexTool.IndexDisableLoggingType.BEFORE)) { - return true; - } - return false; - } - - private boolean shouldLogOutputForErrorType(IndexVerificationErrorType errorType) { - if (errorType != null && - (errorType.equals(IndexVerificationErrorType.BEYOND_MAX_LOOKBACK_INVALID) || - errorType.equals(IndexVerificationErrorType.BEYOND_MAX_LOOKBACK_MISSING))){ - return shouldLogBeyondMaxLookback; - } - return true; - } - - public static byte[] getErrorMessageBytes(String errorMsg, byte[] expectedValue, byte[] actualValue) { - byte[] errorMessageBytes; - errorMessageBytes = new byte[errorMsg.length() + expectedValue.length + actualValue.length + - TOTAL_PREFIX_LENGTH]; - Bytes.putBytes(errorMessageBytes, 0, Bytes.toBytes(errorMsg), 0, errorMsg.length()); - int length = errorMsg.length(); - Bytes.putBytes(errorMessageBytes, length, E_VALUE_PREFIX_BYTES, 0, PREFIX_LENGTH); - length += PREFIX_LENGTH; - Bytes.putBytes(errorMessageBytes, length, expectedValue, 0, expectedValue.length); - length += expectedValue.length; - Bytes.putBytes(errorMessageBytes, length, A_VALUE_PREFIX_BYTES, 0, PREFIX_LENGTH); - length += PREFIX_LENGTH; - Bytes.putBytes(errorMessageBytes, length, actualValue, 0, actualValue.length); - return errorMessageBytes; - } - - public List getOutputRows(long ts, byte[] indexName) - throws IOException { - Iterator iter = getOutputRowIterator(ts, indexName); - return getIndexVerificationOutputRows(iter); + if (indexTable != null) { + indexTable.close(); } + } - @VisibleForTesting - public List getAllOutputRows() throws IOException { - Iterator iter = getOutputRowIteratorForAllRows(); - return getIndexVerificationOutputRows(iter); - } - - private List getIndexVerificationOutputRows(Iterator iter) { - List outputRowList = new ArrayList(); - while (iter.hasNext()){ - outputRowList.add(iter.next()); - } - return outputRowList; - } + public static class IndexVerificationOutputRowIterator + implements Iterator { + Iterator delegate; - public Iterator getOutputRowIterator(long ts, byte[] indexName) - throws IOException { - Scan scan = new Scan(); - byte[] partialKey = generatePartialOutputTableRowKey(ts, indexName); - scan.withStartRow(partialKey); - scan.withStopRow(ByteUtil.calculateTheClosestNextRowKeyForPrefix(partialKey)); - ResultScanner scanner = outputTable.getScanner(scan); - return new IndexVerificationOutputRowIterator(scanner.iterator()); + public IndexVerificationOutputRowIterator(Iterator delegate) { + this.delegate = delegate; } - @VisibleForTesting - public Iterator getOutputRowIteratorForAllRows() - throws IOException { - Scan scan = new Scan(); - ResultScanner scanner = outputTable.getScanner(scan); - return new IndexVerificationOutputRowIterator(scanner.iterator()); + @Override + public boolean hasNext() { + return delegate.hasNext(); } - public static IndexVerificationOutputRow getOutputRowFromResult(Result result) { - IndexVerificationOutputRow.IndexVerificationOutputRowBuilder builder = - new IndexVerificationOutputRow.IndexVerificationOutputRowBuilder(); - byte[] rowKey = result.getRow(); - //rowkey is scanTs + SEPARATOR_BYTE + indexTableName + SEPARATOR_BYTE + dataTableRowKey - byte[][] rowKeySplit = ByteUtil.splitArrayBySeparator(rowKey, ROW_KEY_SEPARATOR_BYTE[0]); - builder.setScanMaxTimestamp(Long.parseLong(Bytes.toString(rowKeySplit[0]))); - builder.setIndexTableName(Bytes.toString(rowKeySplit[1])); - builder.setDataTableRowKey(rowKeySplit[2]); - - builder.setDataTableName(Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, - DATA_TABLE_NAME_BYTES))); - builder.setIndexTableRowKey(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, - INDEX_TABLE_ROW_KEY_BYTES)); - builder.setDataTableRowTimestamp(Long.parseLong(Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, - DATA_TABLE_TS_BYTES)))); - builder.setIndexTableRowTimestamp(Long.parseLong(Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, - INDEX_TABLE_TS_BYTES)))); - builder.setErrorMessage(Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, - ERROR_MESSAGE_BYTES))); - //actual and expected value might not be present, but will just set to null if not - builder.setExpectedValue(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, EXPECTED_VALUE_BYTES)); - builder.setActualValue(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, ACTUAL_VALUE_BYTES)); - builder.setPhaseValue(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, VERIFICATION_PHASE_BYTES)); - IndexVerificationErrorType errorType; - try { - errorType = - IndexVerificationErrorType.valueOf( - Bytes.toString(result.getValue(OUTPUT_TABLE_COLUMN_FAMILY, ERROR_TYPE_BYTES))); - } catch (Throwable e) { - //in case we have a cast exception because an incompatible version of the enum produced - //the row, or an earlier version that didn't record error types, it's better to mark - // the error type unknown and move on rather than fail - errorType = IndexVerificationErrorType.UNKNOWN; - } - builder.setErrorType(errorType); - return builder.build(); + @Override + public IndexVerificationOutputRow next() { + Result result = delegate.next(); + if (result == null) { + return null; + } else { + return getOutputRowFromResult(result); + } } - public void close() throws IOException { - if (outputTable != null) { - outputTable.close(); - } - if (indexTable != null) { - indexTable.close(); - } + @Override + public void remove() { + delegate.remove(); } - public static class IndexVerificationOutputRowIterator implements Iterator { - Iterator delegate; - public IndexVerificationOutputRowIterator(Iterator delegate){ - this.delegate = delegate; - } - @Override - public boolean hasNext() { - return delegate.hasNext(); - } - - @Override - public IndexVerificationOutputRow next() { - Result result = delegate.next(); - if (result == null) { - return null; - } else { - return getOutputRowFromResult(result); - } - } - - @Override - public void remove() { - delegate.remove(); - } + } - } + public void setIndexTable(Table indexTable) { + this.indexTable = indexTable; + } - public void setIndexTable(Table indexTable) { - this.indexTable = indexTable; - } - - public void setOutputTable(Table outputTable) { - this.outputTable = outputTable; - } + public void setOutputTable(Table outputTable) { + this.outputTable = outputTable; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationOutputRow.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationOutputRow.java index 4dad9b69bba..d7b2da5ef2d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationOutputRow.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationOutputRow.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,222 +17,232 @@ */ package org.apache.phoenix.mapreduce.index; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType; - import java.util.Arrays; import java.util.Objects; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository.IndexVerificationErrorType; + public class IndexVerificationOutputRow { - public static final String SCAN_MAX_TIMESTAMP = "ScanMaxTimestamp: "; + public static final String SCAN_MAX_TIMESTAMP = "ScanMaxTimestamp: "; + private String dataTableName; + private String indexTableName; + private Long scanMaxTimestamp; + private byte[] dataTableRowKey; + private byte[] indexTableRowKey; + private Long dataTableRowTimestamp; + private Long indexTableRowTimestamp; + private String errorMessage; + private byte[] expectedValue; + private byte[] actualValue; + private byte[] phaseValue; + private IndexVerificationErrorType errorType; + + private IndexVerificationOutputRow(String dataTableName, String indexTableName, + byte[] dataTableRowKey, Long scanMaxTimestamp, byte[] indexTableRowKey, + long dataTableRowTimestamp, long indexTableRowTimestamp, String errorMessage, + byte[] expectedValue, byte[] actualValue, byte[] phaseValue, + IndexVerificationErrorType errorType) { + this.dataTableName = dataTableName; + this.indexTableName = indexTableName; + this.scanMaxTimestamp = scanMaxTimestamp; + this.dataTableRowKey = dataTableRowKey; + this.indexTableRowKey = indexTableRowKey; + this.dataTableRowTimestamp = dataTableRowTimestamp; + this.indexTableRowTimestamp = indexTableRowTimestamp; + this.errorMessage = errorMessage; + this.expectedValue = expectedValue; + this.actualValue = actualValue; + this.phaseValue = phaseValue; + this.errorType = errorType; + } + + public String getDataTableName() { + return dataTableName; + } + + public String getIndexTableName() { + return indexTableName; + } + + public Long getScanMaxTimestamp() { + return scanMaxTimestamp; + } + + public byte[] getIndexTableRowKey() { + return indexTableRowKey; + } + + public long getIndexTableRowTimestamp() { + return indexTableRowTimestamp; + } + + public String getErrorMessage() { + return errorMessage; + } + + public byte[] getExpectedValue() { + return expectedValue; + } + + public byte[] getActualValue() { + return actualValue; + } + + public byte[] getPhaseValue() { + return phaseValue; + } + + public byte[] getDataTableRowKey() { + return dataTableRowKey; + } + + public Long getDataTableRowTimestamp() { + return dataTableRowTimestamp; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (!(o instanceof IndexVerificationOutputRow)) { + return false; + } + IndexVerificationOutputRow otherRow = (IndexVerificationOutputRow) o; + + return Objects.equals(dataTableName, otherRow.getDataTableName()) + && Objects.equals(indexTableName, otherRow.getIndexTableName()) + && Objects.equals(scanMaxTimestamp, otherRow.getScanMaxTimestamp()) + && Arrays.equals(dataTableRowKey, otherRow.getDataTableRowKey()) + && Arrays.equals(indexTableRowKey, otherRow.getIndexTableRowKey()) + && Objects.equals(dataTableRowTimestamp, otherRow.getDataTableRowTimestamp()) + && Objects.equals(indexTableRowTimestamp, otherRow.getIndexTableRowTimestamp()) + && Objects.equals(errorMessage, otherRow.getErrorMessage()) + && Arrays.equals(expectedValue, otherRow.getExpectedValue()) + && Arrays.equals(actualValue, otherRow.getActualValue()) + && Arrays.equals(phaseValue, otherRow.getPhaseValue()) + && Objects.equals(errorType, otherRow.getErrorType()); + } + + @Override + public int hashCode() { + return Objects.hashCode(scanMaxTimestamp) ^ Objects.hashCode(indexTableName) + ^ Arrays.hashCode(dataTableRowKey); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(IndexVerificationOutputRepository.DATA_TABLE_NAME + ": ").append(dataTableName) + .append(","); + sb.append(IndexVerificationOutputRepository.INDEX_TABLE_NAME + ": ").append(indexTableName) + .append(","); + sb.append(SCAN_MAX_TIMESTAMP).append(": ").append(scanMaxTimestamp).append(","); + sb.append(IndexVerificationOutputRepository.DATA_TABLE_ROW_KEY + ": ") + .append(Bytes.toString(dataTableRowKey)).append(","); + sb.append(IndexVerificationOutputRepository.INDEX_TABLE_ROW_KEY + ": ") + .append(Bytes.toString(indexTableRowKey)).append(","); + sb.append(IndexVerificationOutputRepository.DATA_TABLE_TS + ": ").append(dataTableRowTimestamp) + .append(","); + sb.append(IndexVerificationOutputRepository.INDEX_TABLE_TS + ": ") + .append(indexTableRowTimestamp).append(","); + sb.append(IndexVerificationOutputRepository.ERROR_MESSAGE + ": ").append(errorMessage) + .append(","); + sb.append(IndexVerificationOutputRepository.EXPECTED_VALUE + ": ") + .append(Bytes.toString(expectedValue)).append(","); + sb.append(IndexVerificationOutputRepository.ACTUAL_VALUE + ": ") + .append(Bytes.toString(actualValue)).append(","); + sb.append(IndexVerificationOutputRepository.VERIFICATION_PHASE + ": ") + .append(Bytes.toString(phaseValue)); + sb.append(IndexVerificationOutputRepository.ERROR_TYPE + ": ") + .append(Objects.toString(errorType)); + return sb.toString(); + } + + public IndexVerificationErrorType getErrorType() { + return errorType; + } + + public static class IndexVerificationOutputRowBuilder { private String dataTableName; private String indexTableName; private Long scanMaxTimestamp; private byte[] dataTableRowKey; private byte[] indexTableRowKey; - private Long dataTableRowTimestamp; - private Long indexTableRowTimestamp; + private long dataTableRowTimestamp; + private long indexTableRowTimestamp; private String errorMessage; private byte[] expectedValue; private byte[] actualValue; private byte[] phaseValue; private IndexVerificationErrorType errorType; - private IndexVerificationOutputRow(String dataTableName, String indexTableName, - byte[] dataTableRowKey, Long scanMaxTimestamp, - byte[] indexTableRowKey, - long dataTableRowTimestamp, long indexTableRowTimestamp, - String errorMessage, byte[] expectedValue, byte[] actualValue, - byte[] phaseValue, IndexVerificationErrorType errorType) { - this.dataTableName = dataTableName; - this.indexTableName = indexTableName; - this.scanMaxTimestamp = scanMaxTimestamp; - this.dataTableRowKey = dataTableRowKey; - this.indexTableRowKey = indexTableRowKey; - this.dataTableRowTimestamp = dataTableRowTimestamp; - this.indexTableRowTimestamp = indexTableRowTimestamp; - this.errorMessage = errorMessage; - this.expectedValue = expectedValue; - this.actualValue = actualValue; - this.phaseValue = phaseValue; - this.errorType = errorType; - } - - public String getDataTableName() { - return dataTableName; - } - - public String getIndexTableName() { - return indexTableName; - } - - public Long getScanMaxTimestamp() { - return scanMaxTimestamp; - } - - public byte[] getIndexTableRowKey() { - return indexTableRowKey; + public IndexVerificationOutputRowBuilder setDataTableName(String dataTableName) { + this.dataTableName = dataTableName; + return this; } - public long getIndexTableRowTimestamp() { - return indexTableRowTimestamp; + public IndexVerificationOutputRowBuilder setIndexTableName(String indexTableName) { + this.indexTableName = indexTableName; + return this; } - public String getErrorMessage() { - return errorMessage; + public IndexVerificationOutputRowBuilder setScanMaxTimestamp(Long scanMaxTimestamp) { + this.scanMaxTimestamp = scanMaxTimestamp; + return this; } - public byte[] getExpectedValue() { - return expectedValue; + public IndexVerificationOutputRowBuilder setIndexTableRowKey(byte[] indexTableRowKey) { + this.indexTableRowKey = indexTableRowKey; + return this; } - public byte[] getActualValue() { - return actualValue; + public IndexVerificationOutputRowBuilder setDataTableRowKey(byte[] dataTableRowKey) { + this.dataTableRowKey = dataTableRowKey; + return this; } - public byte[] getPhaseValue() { - return phaseValue; + public IndexVerificationOutputRowBuilder setDataTableRowTimestamp(long dataTableRowTimestamp) { + this.dataTableRowTimestamp = dataTableRowTimestamp; + return this; } - public byte[] getDataTableRowKey() { - return dataTableRowKey; + public IndexVerificationOutputRowBuilder + setIndexTableRowTimestamp(long indexTableRowTimestamp) { + this.indexTableRowTimestamp = indexTableRowTimestamp; + return this; } - public Long getDataTableRowTimestamp() { - return dataTableRowTimestamp; + public IndexVerificationOutputRowBuilder setErrorMessage(String errorMessage) { + this.errorMessage = errorMessage; + return this; } - @Override - public boolean equals(Object o) { - if (o == null ) { - return false; - } - if (!(o instanceof IndexVerificationOutputRow)) { - return false; - } - IndexVerificationOutputRow otherRow = (IndexVerificationOutputRow) o; - - return Objects.equals(dataTableName, otherRow.getDataTableName()) && - Objects.equals(indexTableName, otherRow.getIndexTableName()) && - Objects.equals(scanMaxTimestamp, otherRow.getScanMaxTimestamp()) && - Arrays.equals(dataTableRowKey, otherRow.getDataTableRowKey()) && - Arrays.equals(indexTableRowKey, otherRow.getIndexTableRowKey()) && - Objects.equals(dataTableRowTimestamp, otherRow.getDataTableRowTimestamp()) && - Objects.equals(indexTableRowTimestamp, otherRow.getIndexTableRowTimestamp()) && - Objects.equals(errorMessage, otherRow.getErrorMessage()) && - Arrays.equals(expectedValue, otherRow.getExpectedValue()) && - Arrays.equals(actualValue, otherRow.getActualValue()) && - Arrays.equals(phaseValue, otherRow.getPhaseValue()) && - Objects.equals(errorType, otherRow.getErrorType()); + public IndexVerificationOutputRowBuilder setExpectedValue(byte[] expectedValue) { + this.expectedValue = expectedValue; + return this; } - @Override - public int hashCode(){ - return Objects.hashCode(scanMaxTimestamp) ^ Objects.hashCode(indexTableName) ^ - Arrays.hashCode(dataTableRowKey); + public IndexVerificationOutputRowBuilder setActualValue(byte[] actualValue) { + this.actualValue = actualValue; + return this; } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(IndexVerificationOutputRepository.DATA_TABLE_NAME + ": ").append(dataTableName).append(","); - sb.append(IndexVerificationOutputRepository.INDEX_TABLE_NAME + ": ").append(indexTableName).append(","); - sb.append(SCAN_MAX_TIMESTAMP).append(": ").append(scanMaxTimestamp).append(","); - sb.append(IndexVerificationOutputRepository.DATA_TABLE_ROW_KEY + ": ").append(Bytes.toString(dataTableRowKey)).append(","); - sb.append(IndexVerificationOutputRepository.INDEX_TABLE_ROW_KEY + ": ").append(Bytes.toString(indexTableRowKey)).append(","); - sb.append(IndexVerificationOutputRepository.DATA_TABLE_TS + ": ").append(dataTableRowTimestamp).append(","); - sb.append(IndexVerificationOutputRepository.INDEX_TABLE_TS + ": ").append(indexTableRowTimestamp).append(","); - sb.append(IndexVerificationOutputRepository.ERROR_MESSAGE + ": ").append(errorMessage).append(","); - sb.append(IndexVerificationOutputRepository.EXPECTED_VALUE + ": ").append(Bytes.toString(expectedValue)).append(","); - sb.append(IndexVerificationOutputRepository.ACTUAL_VALUE + ": ").append(Bytes.toString(actualValue)).append( - ","); - sb.append(IndexVerificationOutputRepository.VERIFICATION_PHASE + ": ").append(Bytes.toString(phaseValue)); - sb.append(IndexVerificationOutputRepository.ERROR_TYPE + ": " ).append(Objects.toString(errorType)); - return sb.toString(); + public IndexVerificationOutputRowBuilder setPhaseValue(byte[] phaseValue) { + this.phaseValue = phaseValue; + return this; } - public IndexVerificationErrorType getErrorType() { - return errorType; + public IndexVerificationOutputRowBuilder setErrorType(IndexVerificationErrorType errorType) { + this.errorType = errorType; + return this; } - public static class IndexVerificationOutputRowBuilder { - private String dataTableName; - private String indexTableName; - private Long scanMaxTimestamp; - private byte[] dataTableRowKey; - private byte[] indexTableRowKey; - private long dataTableRowTimestamp; - private long indexTableRowTimestamp; - private String errorMessage; - private byte[] expectedValue; - private byte[] actualValue; - private byte[] phaseValue; - private IndexVerificationErrorType errorType; - - public IndexVerificationOutputRowBuilder setDataTableName(String dataTableName) { - this.dataTableName = dataTableName; - return this; - } - - public IndexVerificationOutputRowBuilder setIndexTableName(String indexTableName) { - this.indexTableName = indexTableName; - return this; - } - - public IndexVerificationOutputRowBuilder setScanMaxTimestamp(Long scanMaxTimestamp) { - this.scanMaxTimestamp = scanMaxTimestamp; - return this; - } - - public IndexVerificationOutputRowBuilder setIndexTableRowKey(byte[] indexTableRowKey) { - this.indexTableRowKey = indexTableRowKey; - return this; - } - - public IndexVerificationOutputRowBuilder setDataTableRowKey(byte[] dataTableRowKey){ - this.dataTableRowKey = dataTableRowKey; - return this; - } - - public IndexVerificationOutputRowBuilder setDataTableRowTimestamp(long dataTableRowTimestamp) { - this.dataTableRowTimestamp = dataTableRowTimestamp; - return this; - } - - public IndexVerificationOutputRowBuilder setIndexTableRowTimestamp(long indexTableRowTimestamp) { - this.indexTableRowTimestamp = indexTableRowTimestamp; - return this; - } - - public IndexVerificationOutputRowBuilder setErrorMessage(String errorMessage) { - this.errorMessage = errorMessage; - return this; - } - - public IndexVerificationOutputRowBuilder setExpectedValue(byte[] expectedValue) { - this.expectedValue = expectedValue; - return this; - } - - public IndexVerificationOutputRowBuilder setActualValue(byte[] actualValue) { - this.actualValue = actualValue; - return this; - } - - public IndexVerificationOutputRowBuilder setPhaseValue(byte[] phaseValue) { - this.phaseValue = phaseValue; - return this; - } - - public IndexVerificationOutputRowBuilder setErrorType(IndexVerificationErrorType errorType) { - this.errorType = errorType; - return this; - } - - public IndexVerificationOutputRow build() { - return new IndexVerificationOutputRow(dataTableName, indexTableName, dataTableRowKey, - scanMaxTimestamp, indexTableRowKey, dataTableRowTimestamp, indexTableRowTimestamp, - errorMessage, expectedValue, actualValue, phaseValue, errorType); - } + public IndexVerificationOutputRow build() { + return new IndexVerificationOutputRow(dataTableName, indexTableName, dataTableRowKey, + scanMaxTimestamp, indexTableRowKey, dataTableRowTimestamp, indexTableRowTimestamp, + errorMessage, expectedValue, actualValue, phaseValue, errorType); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationResultRepository.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationResultRepository.java index c00b2ae9208..f1cc4891519 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationResultRepository.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/IndexVerificationResultRepository.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,11 @@ */ package org.apache.phoenix.mapreduce.index; -import org.apache.hadoop.hbase.Cell; +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -45,371 +48,396 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; - public class IndexVerificationResultRepository implements AutoCloseable { - private static final Logger LOGGER = LoggerFactory.getLogger(IndexVerificationResultRepository.class); + private static final Logger LOGGER = + LoggerFactory.getLogger(IndexVerificationResultRepository.class); - public static final String RUN_STATUS_SKIPPED = "Skipped"; - public static final String RUN_STATUS_EXECUTED = "Executed"; - private Table resultTable; - private Table indexTable; - public static final String ROW_KEY_SEPARATOR = "|"; - public static final byte[] ROW_KEY_SEPARATOR_BYTE = Bytes.toBytes(ROW_KEY_SEPARATOR); - public final static String RESULT_TABLE_NAME = "PHOENIX_INDEX_TOOL_RESULT"; - public final static byte[] RESULT_TABLE_NAME_BYTES = Bytes.toBytes(RESULT_TABLE_NAME); - public final static byte[] RESULT_TABLE_COLUMN_FAMILY = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; - public final static String SCANNED_DATA_ROW_COUNT = "ScannedDataRowCount"; - public final static byte[] SCANNED_DATA_ROW_COUNT_BYTES = Bytes.toBytes(SCANNED_DATA_ROW_COUNT); - public final static String REBUILT_INDEX_ROW_COUNT = "RebuiltIndexRowCount"; - public final static byte[] REBUILT_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(REBUILT_INDEX_ROW_COUNT); - public final static String SHOULD_RETRY = "ShouldRetry"; - public final static byte[] SHOULD_RETRY_BYTES = Bytes.toBytes(SHOULD_RETRY); - public final static String BEFORE_REBUILD_VALID_INDEX_ROW_COUNT = - "BeforeRebuildValidIndexRowCount"; - public final static byte[] BEFORE_REBUILD_VALID_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(BEFORE_REBUILD_VALID_INDEX_ROW_COUNT); - private static final String INDEX_TOOL_RUN_STATUS = "IndexToolRunStatus"; - public final static byte[] INDEX_TOOL_RUN_STATUS_BYTES = Bytes.toBytes(INDEX_TOOL_RUN_STATUS); - public final static String BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT = - "BeforeRebuildExpiredIndexRowCount"; - public final static byte[] BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT); - public final static String BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT = - "BeforeRebuildMissingIndexRowCount"; - public final static byte[] BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT); - public final static String BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT = - "BeforeRebuildInvalidIndexRowCount"; - public final static byte[] BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT); - public final static String BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT = - "BeforeRebuildUnverifiedIndexRowCount"; - public final static byte[] BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT); - public final static String BEFORE_REBUILD_OLD_INDEX_ROW_COUNT = - "BeforeRebuildOldIndexRowCount"; - public final static byte[] BEFORE_REBUILD_OLD_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(BEFORE_REBUILD_OLD_INDEX_ROW_COUNT); - public final static String BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT = - "BeforeRebuildUnknownIndexRowCount"; - public final static byte[] BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT); - public final static String AFTER_REBUILD_VALID_INDEX_ROW_COUNT = - "AfterRebuildValidIndexRowCount"; - public final static byte[] AFTER_REBUILD_VALID_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(AFTER_REBUILD_VALID_INDEX_ROW_COUNT); - public final static String AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT = - "AfterRebuildExpiredIndexRowCount"; - public final static byte[] AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT); - public final static String AFTER_REBUILD_MISSING_INDEX_ROW_COUNT = - "AfterRebuildMissingIndexRowCount"; - public final static byte[] AFTER_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(AFTER_REBUILD_MISSING_INDEX_ROW_COUNT); - public final static String AFTER_REBUILD_INVALID_INDEX_ROW_COUNT = - "AfterRebuildInvalidIndexRowCount"; - public final static byte[] AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(AFTER_REBUILD_INVALID_INDEX_ROW_COUNT); - public final static String BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT = - "BeforeRebuildBeyondMaxLookBackMissingIndexRowCount"; - public final static byte[] BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES = - Bytes.toBytes(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT); - public final static String BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT = - "BeforeRebuildBeyondMaxLookBackInvalidIndexRowCount"; - public final static byte[] BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES = - Bytes.toBytes(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT); + public static final String RUN_STATUS_SKIPPED = "Skipped"; + public static final String RUN_STATUS_EXECUTED = "Executed"; + private Table resultTable; + private Table indexTable; + public static final String ROW_KEY_SEPARATOR = "|"; + public static final byte[] ROW_KEY_SEPARATOR_BYTE = Bytes.toBytes(ROW_KEY_SEPARATOR); + public final static String RESULT_TABLE_NAME = "PHOENIX_INDEX_TOOL_RESULT"; + public final static byte[] RESULT_TABLE_NAME_BYTES = Bytes.toBytes(RESULT_TABLE_NAME); + public final static byte[] RESULT_TABLE_COLUMN_FAMILY = + QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES; + public final static String SCANNED_DATA_ROW_COUNT = "ScannedDataRowCount"; + public final static byte[] SCANNED_DATA_ROW_COUNT_BYTES = Bytes.toBytes(SCANNED_DATA_ROW_COUNT); + public final static String REBUILT_INDEX_ROW_COUNT = "RebuiltIndexRowCount"; + public final static byte[] REBUILT_INDEX_ROW_COUNT_BYTES = Bytes.toBytes(REBUILT_INDEX_ROW_COUNT); + public final static String SHOULD_RETRY = "ShouldRetry"; + public final static byte[] SHOULD_RETRY_BYTES = Bytes.toBytes(SHOULD_RETRY); + public final static String BEFORE_REBUILD_VALID_INDEX_ROW_COUNT = + "BeforeRebuildValidIndexRowCount"; + public final static byte[] BEFORE_REBUILD_VALID_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REBUILD_VALID_INDEX_ROW_COUNT); + private static final String INDEX_TOOL_RUN_STATUS = "IndexToolRunStatus"; + public final static byte[] INDEX_TOOL_RUN_STATUS_BYTES = Bytes.toBytes(INDEX_TOOL_RUN_STATUS); + public final static String BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT = + "BeforeRebuildExpiredIndexRowCount"; + public final static byte[] BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT); + public final static String BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT = + "BeforeRebuildMissingIndexRowCount"; + public final static byte[] BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT); + public final static String BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT = + "BeforeRebuildInvalidIndexRowCount"; + public final static byte[] BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT); + public final static String BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT = + "BeforeRebuildUnverifiedIndexRowCount"; + public final static byte[] BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT); + public final static String BEFORE_REBUILD_OLD_INDEX_ROW_COUNT = "BeforeRebuildOldIndexRowCount"; + public final static byte[] BEFORE_REBUILD_OLD_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REBUILD_OLD_INDEX_ROW_COUNT); + public final static String BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT = + "BeforeRebuildUnknownIndexRowCount"; + public final static byte[] BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT); + public final static String AFTER_REBUILD_VALID_INDEX_ROW_COUNT = "AfterRebuildValidIndexRowCount"; + public final static byte[] AFTER_REBUILD_VALID_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(AFTER_REBUILD_VALID_INDEX_ROW_COUNT); + public final static String AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT = + "AfterRebuildExpiredIndexRowCount"; + public final static byte[] AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT); + public final static String AFTER_REBUILD_MISSING_INDEX_ROW_COUNT = + "AfterRebuildMissingIndexRowCount"; + public final static byte[] AFTER_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(AFTER_REBUILD_MISSING_INDEX_ROW_COUNT); + public final static String AFTER_REBUILD_INVALID_INDEX_ROW_COUNT = + "AfterRebuildInvalidIndexRowCount"; + public final static byte[] AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(AFTER_REBUILD_INVALID_INDEX_ROW_COUNT); + public final static String BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT = + "BeforeRebuildBeyondMaxLookBackMissingIndexRowCount"; + public final static byte[] BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT); + public final static String BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT = + "BeforeRebuildBeyondMaxLookBackInvalidIndexRowCount"; + public final static byte[] BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT); - public final static String AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT = - "AfterRebuildBeyondMaxLookBackMissingIndexRowCount"; - public final static byte[] AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES = - Bytes.toBytes(AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT); - public final static String AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT = - "AfterRebuildBeyondMaxLookBackInvalidIndexRowCount"; - public final static byte[] AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES = - Bytes.toBytes(AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT); + public final static String AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT = + "AfterRebuildBeyondMaxLookBackMissingIndexRowCount"; + public final static byte[] AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT); + public final static String AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT = + "AfterRebuildBeyondMaxLookBackInvalidIndexRowCount"; + public final static byte[] AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT); - public final static String BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS = "BeforeRebuildInvalidIndexRowCountCozExtraCells"; - public final static byte[] BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES = Bytes.toBytes(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS); - public final static String BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS = "BeforeRebuildInvalidIndexRowCountCozMissingCells"; - public final static byte[] BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES = Bytes.toBytes(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS); + public final static String BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS = + "BeforeRebuildInvalidIndexRowCountCozExtraCells"; + public final static byte[] BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES = + Bytes.toBytes(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS); + public final static String BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS = + "BeforeRebuildInvalidIndexRowCountCozMissingCells"; + public final static byte[] BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES = + Bytes.toBytes(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS); - public final static String AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS = "AfterRebuildInvalidIndexRowCountCozExtraCells"; - public final static byte[] AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES = Bytes.toBytes(AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS); - public final static String AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS = "AfterRebuildInvalidIndexRowCountCozMissingCells"; - public final static byte[] AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES = Bytes.toBytes(AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS); + public final static String AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS = + "AfterRebuildInvalidIndexRowCountCozExtraCells"; + public final static byte[] AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES = + Bytes.toBytes(AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS); + public final static String AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS = + "AfterRebuildInvalidIndexRowCountCozMissingCells"; + public final static byte[] AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES = + Bytes.toBytes(AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS); - public final static String BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT = "BeforeRepairExtraVerifiedIndexRowCount"; - public final static byte[] BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES = - Bytes.toBytes(BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT); - public final static String BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT = "BeforeRepairExtraUnverifiedIndexRowCount"; - public final static byte[] BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES = - Bytes.toBytes(BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT); + public final static String BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT = + "BeforeRepairExtraVerifiedIndexRowCount"; + public final static byte[] BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT); + public final static String BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT = + "BeforeRepairExtraUnverifiedIndexRowCount"; + public final static byte[] BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT); - public final static String AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT = "AfterRepairExtraVerifiedIndexRowCount"; - public final static byte[] AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES = - Bytes.toBytes(AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT); - public final static String AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT = "AfterRepairExtraUnverifiedIndexRowCount"; - public final static byte[] AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES = - Bytes.toBytes(AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT); + public final static String AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT = + "AfterRepairExtraVerifiedIndexRowCount"; + public final static byte[] AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT); + public final static String AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT = + "AfterRepairExtraUnverifiedIndexRowCount"; + public final static byte[] AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES = + Bytes.toBytes(AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT); - /*** - * Only usable for read / create methods. To write use setResultTable and setIndexTable first - */ - public IndexVerificationResultRepository(){ + /*** + * Only usable for read / create methods. To write use setResultTable and setIndexTable first + */ + public IndexVerificationResultRepository() { - } + } - public IndexVerificationResultRepository(Connection conn, byte[] indexNameBytes) throws SQLException { - resultTable = getTable(conn, RESULT_TABLE_NAME_BYTES); - indexTable = getTable(conn, indexNameBytes); - } + public IndexVerificationResultRepository(Connection conn, byte[] indexNameBytes) + throws SQLException { + resultTable = getTable(conn, RESULT_TABLE_NAME_BYTES); + indexTable = getTable(conn, indexNameBytes); + } - public IndexVerificationResultRepository(byte[] indexName, - HTableFactory hTableFactory) throws IOException { - resultTable = hTableFactory.getTable(new ImmutableBytesPtr(RESULT_TABLE_NAME_BYTES)); - indexTable = hTableFactory.getTable(new ImmutableBytesPtr(indexName)); - } + public IndexVerificationResultRepository(byte[] indexName, HTableFactory hTableFactory) + throws IOException { + resultTable = hTableFactory.getTable(new ImmutableBytesPtr(RESULT_TABLE_NAME_BYTES)); + indexTable = hTableFactory.getTable(new ImmutableBytesPtr(indexName)); + } - public void createResultTable(Connection connection) throws IOException, SQLException { - ConnectionQueryServices queryServices = connection.unwrap(PhoenixConnection.class).getQueryServices(); - try (Admin admin = queryServices.getAdmin()) { - TableName resultTableName = TableName.valueOf(RESULT_TABLE_NAME); - if (!admin.tableExists(resultTableName)) { - ColumnFamilyDescriptor columnDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(RESULT_TABLE_COLUMN_FAMILY) - .setTimeToLive(MetaDataProtocol.DEFAULT_LOG_TTL) - .build(); - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(resultTableName) - .setColumnFamily(columnDescriptor).build(); - try { - admin.createTable(tableDescriptor); - } catch (TableExistsException e) { - LOGGER.warn("Table exists, ignoring", e); - } - resultTable = admin.getConnection().getTable(resultTableName); - } + public void createResultTable(Connection connection) throws IOException, SQLException { + ConnectionQueryServices queryServices = + connection.unwrap(PhoenixConnection.class).getQueryServices(); + try (Admin admin = queryServices.getAdmin()) { + TableName resultTableName = TableName.valueOf(RESULT_TABLE_NAME); + if (!admin.tableExists(resultTableName)) { + ColumnFamilyDescriptor columnDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(RESULT_TABLE_COLUMN_FAMILY) + .setTimeToLive(MetaDataProtocol.DEFAULT_LOG_TTL).build(); + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(resultTableName) + .setColumnFamily(columnDescriptor).build(); + try { + admin.createTable(tableDescriptor); + } catch (TableExistsException e) { + LOGGER.warn("Table exists, ignoring", e); } + resultTable = admin.getConnection().getTable(resultTableName); + } } + } - private static byte[] generatePartialResultTableRowKey(long ts, byte[] indexTableName) { - byte[] keyPrefix = Bytes.toBytes(Long.toString(ts)); - int targetOffset = 0; - // The row key for the result table : timestamp | index table name | datable table region name | - // scan start row | scan stop row - byte[] partialRowKey = new byte[keyPrefix.length + ROW_KEY_SEPARATOR_BYTE.length - + indexTableName.length]; - Bytes.putBytes(partialRowKey, targetOffset, keyPrefix, 0, keyPrefix.length); - targetOffset += keyPrefix.length; - Bytes.putBytes(partialRowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); - targetOffset += ROW_KEY_SEPARATOR_BYTE.length; - Bytes.putBytes(partialRowKey, targetOffset, indexTableName, 0, indexTableName.length); - return partialRowKey; - } + private static byte[] generatePartialResultTableRowKey(long ts, byte[] indexTableName) { + byte[] keyPrefix = Bytes.toBytes(Long.toString(ts)); + int targetOffset = 0; + // The row key for the result table : timestamp | index table name | datable table region name | + // scan start row | scan stop row + byte[] partialRowKey = + new byte[keyPrefix.length + ROW_KEY_SEPARATOR_BYTE.length + indexTableName.length]; + Bytes.putBytes(partialRowKey, targetOffset, keyPrefix, 0, keyPrefix.length); + targetOffset += keyPrefix.length; + Bytes.putBytes(partialRowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, + ROW_KEY_SEPARATOR_BYTE.length); + targetOffset += ROW_KEY_SEPARATOR_BYTE.length; + Bytes.putBytes(partialRowKey, targetOffset, indexTableName, 0, indexTableName.length); + return partialRowKey; + } - private static byte[] generateResultTableRowKey(long ts, byte[] indexTableName, byte [] regionName, - byte[] startRow, byte[] stopRow) { - byte[] keyPrefix = Bytes.toBytes(Long.toString(ts)); - int targetOffset = 0; - // The row key for the result table : timestamp | index table name | datable table region name | - // scan start row | scan stop row - byte[] rowKey = new byte[keyPrefix.length + ROW_KEY_SEPARATOR_BYTE.length + indexTableName.length + - ROW_KEY_SEPARATOR_BYTE.length + regionName.length + ROW_KEY_SEPARATOR_BYTE.length + - startRow.length + ROW_KEY_SEPARATOR_BYTE.length + stopRow.length]; - Bytes.putBytes(rowKey, targetOffset, keyPrefix, 0, keyPrefix.length); - targetOffset += keyPrefix.length; - Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); - targetOffset += ROW_KEY_SEPARATOR_BYTE.length; - Bytes.putBytes(rowKey, targetOffset, indexTableName, 0, indexTableName.length); - targetOffset += indexTableName.length; - Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); - targetOffset += ROW_KEY_SEPARATOR_BYTE.length; - Bytes.putBytes(rowKey, targetOffset, regionName, 0, regionName.length); - targetOffset += regionName.length; - Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); - targetOffset += ROW_KEY_SEPARATOR_BYTE.length; - Bytes.putBytes(rowKey, targetOffset, startRow, 0, startRow.length); - targetOffset += startRow.length; - Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); - targetOffset += ROW_KEY_SEPARATOR_BYTE.length; - Bytes.putBytes(rowKey, targetOffset, stopRow, 0, stopRow.length); - return rowKey; - } + private static byte[] generateResultTableRowKey(long ts, byte[] indexTableName, byte[] regionName, + byte[] startRow, byte[] stopRow) { + byte[] keyPrefix = Bytes.toBytes(Long.toString(ts)); + int targetOffset = 0; + // The row key for the result table : timestamp | index table name | datable table region name | + // scan start row | scan stop row + byte[] rowKey = + new byte[keyPrefix.length + ROW_KEY_SEPARATOR_BYTE.length + indexTableName.length + + ROW_KEY_SEPARATOR_BYTE.length + regionName.length + ROW_KEY_SEPARATOR_BYTE.length + + startRow.length + ROW_KEY_SEPARATOR_BYTE.length + stopRow.length]; + Bytes.putBytes(rowKey, targetOffset, keyPrefix, 0, keyPrefix.length); + targetOffset += keyPrefix.length; + Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); + targetOffset += ROW_KEY_SEPARATOR_BYTE.length; + Bytes.putBytes(rowKey, targetOffset, indexTableName, 0, indexTableName.length); + targetOffset += indexTableName.length; + Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); + targetOffset += ROW_KEY_SEPARATOR_BYTE.length; + Bytes.putBytes(rowKey, targetOffset, regionName, 0, regionName.length); + targetOffset += regionName.length; + Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); + targetOffset += ROW_KEY_SEPARATOR_BYTE.length; + Bytes.putBytes(rowKey, targetOffset, startRow, 0, startRow.length); + targetOffset += startRow.length; + Bytes.putBytes(rowKey, targetOffset, ROW_KEY_SEPARATOR_BYTE, 0, ROW_KEY_SEPARATOR_BYTE.length); + targetOffset += ROW_KEY_SEPARATOR_BYTE.length; + Bytes.putBytes(rowKey, targetOffset, stopRow, 0, stopRow.length); + return rowKey; + } - public void logToIndexToolResultTable(IndexToolVerificationResult verificationResult, - IndexTool.IndexVerifyType verifyType, byte[] region) throws IOException { - logToIndexToolResultTable(verificationResult, verifyType, region, false, false); - } + public void logToIndexToolResultTable(IndexToolVerificationResult verificationResult, + IndexTool.IndexVerifyType verifyType, byte[] region) throws IOException { + logToIndexToolResultTable(verificationResult, verifyType, region, false, false); + } - public void logToIndexToolResultTable(IndexToolVerificationResult verificationResult, - IndexTool.IndexVerifyType verifyType, byte[] region, boolean skipped, boolean shouldRetry) throws IOException { - long scanMaxTs = verificationResult.getScanMaxTs(); - byte[] rowKey = generateResultTableRowKey(scanMaxTs, indexTable.getName().toBytes(), - region, verificationResult.getStartRow(), - verificationResult.getStopRow()); - Put put = new Put(rowKey); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, SCANNED_DATA_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getScannedDataRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, REBUILT_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getRebuiltIndexRowCount()))); + public void logToIndexToolResultTable(IndexToolVerificationResult verificationResult, + IndexTool.IndexVerifyType verifyType, byte[] region, boolean skipped, boolean shouldRetry) + throws IOException { + long scanMaxTs = verificationResult.getScanMaxTs(); + byte[] rowKey = generateResultTableRowKey(scanMaxTs, indexTable.getName().toBytes(), region, + verificationResult.getStartRow(), verificationResult.getStopRow()); + Put put = new Put(rowKey); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, SCANNED_DATA_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getScannedDataRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, REBUILT_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getRebuiltIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, SHOULD_RETRY_BYTES, Bytes.toBytes(shouldRetry)); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, INDEX_TOOL_RUN_STATUS_BYTES, - Bytes.toBytes(skipped ? RUN_STATUS_SKIPPED : RUN_STATUS_EXECUTED)); - if (verifyType == IndexTool.IndexVerifyType.BEFORE || verifyType == IndexTool.IndexVerifyType.BOTH || - verifyType == IndexTool.IndexVerifyType.ONLY) { - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_VALID_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildValidIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildExpiredIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildMissingIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildInvalidIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBefore().getBeyondMaxLookBackMissingIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBefore().getBeyondMaxLookBackInvalidIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeIndexHasExtraCellsCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeIndexHasMissingCellsCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildUnverifiedIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_OLD_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildOldIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildUnknownIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeRepairExtraVerifiedIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getBeforeRepairExtraUnverifiedIndexRowCount()))); - } - if (verifyType == IndexTool.IndexVerifyType.AFTER || verifyType == IndexTool.IndexVerifyType.BOTH) { - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_VALID_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfterRebuildValidIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfterRebuildExpiredIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfterRebuildMissingIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfterRebuildInvalidIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfter().getBeyondMaxLookBackMissingIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfter().getBeyondMaxLookBackInvalidIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfterIndexHasExtraCellsCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfterIndexHasMissingCellsCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfterRepairExtraVerifiedIndexRowCount()))); - put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES, - Bytes.toBytes(Long.toString(verificationResult.getAfterRepairExtraUnverifiedIndexRowCount()))); - } - resultTable.put(put); - } - - public Table getTable(Connection conn, byte[] tableName) throws SQLException { - return conn.unwrap(PhoenixConnection.class).getQueryServices() - .getTable(tableName); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, SHOULD_RETRY_BYTES, Bytes.toBytes(shouldRetry)); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, INDEX_TOOL_RUN_STATUS_BYTES, + Bytes.toBytes(skipped ? RUN_STATUS_SKIPPED : RUN_STATUS_EXECUTED)); + if ( + verifyType == IndexTool.IndexVerifyType.BEFORE || verifyType == IndexTool.IndexVerifyType.BOTH + || verifyType == IndexTool.IndexVerifyType.ONLY + ) { + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_VALID_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildValidIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildExpiredIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildMissingIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildInvalidIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES, Bytes.toBytes(Long + .toString(verificationResult.getBefore().getBeyondMaxLookBackMissingIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES, Bytes.toBytes(Long + .toString(verificationResult.getBefore().getBeyondMaxLookBackInvalidIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getBeforeIndexHasExtraCellsCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getBeforeIndexHasMissingCellsCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildUnverifiedIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_OLD_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildOldIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getBeforeRebuildUnknownIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES, + Bytes + .toBytes(Long.toString(verificationResult.getBeforeRepairExtraVerifiedIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, + BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES, Bytes.toBytes( + Long.toString(verificationResult.getBeforeRepairExtraUnverifiedIndexRowCount()))); } - - /** - * Get aggregated verification results from - * {@link #aggregateVerificationResult(Table, IndexToolVerificationResult, - * Scan)} - * Provided Table reference should be closed by caller. - * - * @param htable Table reference. It is caller's responsibility to close - * this reference. - * @param ts timestamp used for Scan's startRow - * @return Verification result - * @throws IOException if something goes wrong while retrieving verification - * results. - */ - public IndexToolVerificationResult getVerificationResult(Table htable, long ts) - throws IOException { - byte[] startRowKey = Bytes.toBytes(Long.toString(ts)); - byte[] stopRowKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(startRowKey); - IndexToolVerificationResult verificationResult = new IndexToolVerificationResult(ts); - Scan scan = new Scan(); - scan.withStartRow(startRowKey); - scan.withStopRow(stopRowKey); - return aggregateVerificationResult(htable, verificationResult, scan); + if ( + verifyType == IndexTool.IndexVerifyType.AFTER || verifyType == IndexTool.IndexVerifyType.BOTH + ) { + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_VALID_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getAfterRebuildValidIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getAfterRebuildExpiredIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_MISSING_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getAfterRebuildMissingIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getAfterRebuildInvalidIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT_BYTES, Bytes.toBytes( + Long.toString(verificationResult.getAfter().getBeyondMaxLookBackMissingIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT_BYTES, Bytes.toBytes( + Long.toString(verificationResult.getAfter().getBeyondMaxLookBackInvalidIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getAfterIndexHasExtraCellsCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, + AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS_BYTES, + Bytes.toBytes(Long.toString(verificationResult.getAfterIndexHasMissingCellsCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT_BYTES, + Bytes + .toBytes(Long.toString(verificationResult.getAfterRepairExtraVerifiedIndexRowCount()))); + put.addColumn(RESULT_TABLE_COLUMN_FAMILY, AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT_BYTES, + Bytes + .toBytes(Long.toString(verificationResult.getAfterRepairExtraUnverifiedIndexRowCount()))); } + resultTable.put(put); + } - private IndexToolVerificationResult aggregateVerificationResult( - Table hTable, IndexToolVerificationResult verificationResult, - Scan scan) throws IOException { - try (ResultScanner scanner = hTable.getScanner(scan)) { - for (Result result = scanner.next(); result != null; - result = scanner.next()) { - boolean isFirst = true; - for (Cell cell : result.rawCells()) { - if (isFirst) { - byte[][] rowKeyParts = ByteUtil.splitArrayBySeparator( - result.getRow(), ROW_KEY_SEPARATOR_BYTE[0]); - verificationResult.setStartRow(rowKeyParts[3]); - verificationResult.setStopRow(rowKeyParts[4]); - isFirst = false; - } - verificationResult.update(cell); - } - } - } - return verificationResult; - } + public Table getTable(Connection conn, byte[] tableName) throws SQLException { + return conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(tableName); + } - public IndexToolVerificationResult getVerificationResult(Connection conn, - long ts, byte[] indexTableName) throws IOException, SQLException { - try (Table hTable = getTable(conn, RESULT_TABLE_NAME_BYTES)) { - byte[] startRowKey = generatePartialResultTableRowKey(ts, - indexTableName); - byte[] stopRowKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix( - startRowKey); - IndexToolVerificationResult verificationResult = - new IndexToolVerificationResult(ts); - Scan scan = new Scan(); - scan.withStartRow(startRowKey); - scan.withStopRow(stopRowKey); - return aggregateVerificationResult(hTable, verificationResult, - scan); - } - } + /** + * Get aggregated verification results from + * {@link #aggregateVerificationResult(Table, IndexToolVerificationResult, Scan)} Provided Table + * reference should be closed by caller. + * @param htable Table reference. It is caller's responsibility to close this reference. + * @param ts timestamp used for Scan's startRow + * @return Verification result + * @throws IOException if something goes wrong while retrieving verification results. + */ + public IndexToolVerificationResult getVerificationResult(Table htable, long ts) + throws IOException { + byte[] startRowKey = Bytes.toBytes(Long.toString(ts)); + byte[] stopRowKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(startRowKey); + IndexToolVerificationResult verificationResult = new IndexToolVerificationResult(ts); + Scan scan = new Scan(); + scan.withStartRow(startRowKey); + scan.withStopRow(stopRowKey); + return aggregateVerificationResult(htable, verificationResult, scan); + } - private IndexToolVerificationResult getVerificationResult(Table htable, byte [] oldRowKey, Scan scan ) - throws IOException { - IndexToolVerificationResult verificationResult = null; - Result result = htable.get(new Get(oldRowKey)); - if(!result.isEmpty()) { - byte[][] rowKeyParts = ByteUtil.splitArrayBySeparator(result.getRow(), ROW_KEY_SEPARATOR_BYTE[0]); - verificationResult = new IndexToolVerificationResult(scan); + private IndexToolVerificationResult aggregateVerificationResult(Table hTable, + IndexToolVerificationResult verificationResult, Scan scan) throws IOException { + try (ResultScanner scanner = hTable.getScanner(scan)) { + for (Result result = scanner.next(); result != null; result = scanner.next()) { + boolean isFirst = true; + for (Cell cell : result.rawCells()) { + if (isFirst) { + byte[][] rowKeyParts = + ByteUtil.splitArrayBySeparator(result.getRow(), ROW_KEY_SEPARATOR_BYTE[0]); verificationResult.setStartRow(rowKeyParts[3]); verificationResult.setStopRow(rowKeyParts[4]); - for (Cell cell : result.rawCells()) { - verificationResult.update(cell); - } + isFirst = false; + } + verificationResult.update(cell); } - return verificationResult; + } } + return verificationResult; + } - public void close() throws IOException { - if (resultTable != null) { - resultTable.close(); - } - if (indexTable != null) { - indexTable.close(); - } + public IndexToolVerificationResult getVerificationResult(Connection conn, long ts, + byte[] indexTableName) throws IOException, SQLException { + try (Table hTable = getTable(conn, RESULT_TABLE_NAME_BYTES)) { + byte[] startRowKey = generatePartialResultTableRowKey(ts, indexTableName); + byte[] stopRowKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(startRowKey); + IndexToolVerificationResult verificationResult = new IndexToolVerificationResult(ts); + Scan scan = new Scan(); + scan.withStartRow(startRowKey); + scan.withStopRow(stopRowKey); + return aggregateVerificationResult(hTable, verificationResult, scan); } + } - public void setResultTable(Table resultTable) { - this.resultTable = resultTable; + private IndexToolVerificationResult getVerificationResult(Table htable, byte[] oldRowKey, + Scan scan) throws IOException { + IndexToolVerificationResult verificationResult = null; + Result result = htable.get(new Get(oldRowKey)); + if (!result.isEmpty()) { + byte[][] rowKeyParts = + ByteUtil.splitArrayBySeparator(result.getRow(), ROW_KEY_SEPARATOR_BYTE[0]); + verificationResult = new IndexToolVerificationResult(scan); + verificationResult.setStartRow(rowKeyParts[3]); + verificationResult.setStopRow(rowKeyParts[4]); + for (Cell cell : result.rawCells()) { + verificationResult.update(cell); + } } + return verificationResult; + } - public void setIndexTable(Table indexTable) { - this.indexTable = indexTable; + public void close() throws IOException { + if (resultTable != null) { + resultTable.close(); } + if (indexTable != null) { + indexTable.close(); + } + } - public IndexToolVerificationResult getVerificationResult(Long ts, Scan scan, Region region, byte[] indexTableName) throws IOException { - byte [] rowKey = generateResultTableRowKey(ts, - indexTableName, region.getRegionInfo().getRegionName(), - scan.getStartRow(), scan.getStopRow()); - return getVerificationResult(resultTable, rowKey, scan); + public void setResultTable(Table resultTable) { + this.resultTable = resultTable; + } - } -} + public void setIndexTable(Table indexTable) { + this.indexTable = indexTable; + } + + public IndexToolVerificationResult getVerificationResult(Long ts, Scan scan, Region region, + byte[] indexTableName) throws IOException { + byte[] rowKey = generateResultTableRowKey(ts, indexTableName, + region.getRegionInfo().getRegionName(), scan.getStartRow(), scan.getStopRow()); + return getVerificationResult(resultTable, rowKey, scan); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexDBWritable.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexDBWritable.java index c333a8e6340..a539bb90658 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexDBWritable.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexDBWritable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,77 +24,75 @@ import org.apache.hadoop.mapreduce.lib.db.DBWritable; import org.apache.phoenix.jdbc.PhoenixResultSet; -import org.apache.phoenix.util.ColumnInfo; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.ColumnInfo; /** * A {@link DBWritable} class that reads and write records. - * - * */ -public class PhoenixIndexDBWritable implements DBWritable { - - private List columnMetadata; - - private List values; - - private int columnCount = -1; - - private long rowTs = -1; - - @Override - public void write(PreparedStatement statement) throws SQLException { - Preconditions.checkNotNull(values); - Preconditions.checkNotNull(columnMetadata); - for(int i = 0 ; i < values.size() ; i++) { - Object value = values.get(i); - ColumnInfo columnInfo = columnMetadata.get(i); - if(value == null) { - statement.setNull(i + 1, columnInfo.getSqlType()); - } else { - statement.setObject(i + 1, value , columnInfo.getSqlType()); - } - } - - } +public class PhoenixIndexDBWritable implements DBWritable { - @Override - public void readFields(ResultSet resultSet) throws SQLException { - // we do this once per mapper. - if(columnCount == -1) { - this.columnCount = resultSet.getMetaData().getColumnCount(); - } - if (columnCount > 0) { - this.rowTs = resultSet.unwrap(PhoenixResultSet.class).getCurrentRow().getValue(0).getTimestamp(); - } - values = Lists.newArrayListWithCapacity(columnCount); - for(int i = 0 ; i < columnCount ; i++) { - Object value = resultSet.getObject(i + 1); - values.add(value); - } - - } + private List columnMetadata; - public List getColumnMetadata() { - return columnMetadata; - } + private List values; - public void setColumnMetadata(List columnMetadata) { - this.columnMetadata = columnMetadata; - } + private int columnCount = -1; - public List getValues() { - return values; - } + private long rowTs = -1; - public void setValues(List values) { - this.values = values; + @Override + public void write(PreparedStatement statement) throws SQLException { + Preconditions.checkNotNull(values); + Preconditions.checkNotNull(columnMetadata); + for (int i = 0; i < values.size(); i++) { + Object value = values.get(i); + ColumnInfo columnInfo = columnMetadata.get(i); + if (value == null) { + statement.setNull(i + 1, columnInfo.getSqlType()); + } else { + statement.setObject(i + 1, value, columnInfo.getSqlType()); + } } - public long getRowTs() { - return rowTs; + } + + @Override + public void readFields(ResultSet resultSet) throws SQLException { + // we do this once per mapper. + if (columnCount == -1) { + this.columnCount = resultSet.getMetaData().getColumnCount(); + } + if (columnCount > 0) { + this.rowTs = + resultSet.unwrap(PhoenixResultSet.class).getCurrentRow().getValue(0).getTimestamp(); + } + values = Lists.newArrayListWithCapacity(columnCount); + for (int i = 0; i < columnCount; i++) { + Object value = resultSet.getObject(i + 1); + values.add(value); } -} \ No newline at end of file + } + + public List getColumnMetadata() { + return columnMetadata; + } + + public void setColumnMetadata(List columnMetadata) { + this.columnMetadata = columnMetadata; + } + + public List getValues() { + return values; + } + + public void setValues(List values) { + this.values = values; + } + + public long getRowTs() { + return rowTs; + } + +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java index 9d2fdcad36c..55305f773ae 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -54,165 +54,161 @@ /** * Mapper that hands over rows from data table to the index table. */ -public class PhoenixIndexImportDirectMapper extends - Mapper { +public class PhoenixIndexImportDirectMapper + extends Mapper { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportDirectMapper.class); + private static final Logger LOGGER = + LoggerFactory.getLogger(PhoenixIndexImportDirectMapper.class); - private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable(); + private final PhoenixIndexDBWritable indxWritable = new PhoenixIndexDBWritable(); - private List indxTblColumnMetadata; + private List indxTblColumnMetadata; - private Connection connection; + private Connection connection; - private PreparedStatement pStatement; + private PreparedStatement pStatement; - private DirectHTableWriter writer; + private DirectHTableWriter writer; - private int batchSize; - private long batchSizeBytes; + private int batchSize; + private long batchSizeBytes; - private MutationState mutationState; - private int currentBatchCount = 0; + private MutationState mutationState; + private int currentBatchCount = 0; - private IndexStatusUpdater indexStatusUpdater; + private IndexStatusUpdater indexStatusUpdater; - @Override - protected void setup(final Context context) throws IOException, InterruptedException { - super.setup(context); - final Configuration configuration = context.getConfiguration(); - writer = new DirectHTableWriter(configuration); + @Override + protected void setup(final Context context) throws IOException, InterruptedException { + super.setup(context); + final Configuration configuration = context.getConfiguration(); + writer = new DirectHTableWriter(configuration); - try { - indxTblColumnMetadata = - PhoenixConfigurationUtil.getUpsertColumnMetadataList(configuration); - indxWritable.setColumnMetadata(indxTblColumnMetadata); + try { + indxTblColumnMetadata = PhoenixConfigurationUtil.getUpsertColumnMetadataList(configuration); + indxWritable.setColumnMetadata(indxTblColumnMetadata); - final Properties overrideProps = new Properties(); - String scn = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); - String txScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); - if (txScnValue == null && scn != null) { - overrideProps.put(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, scn); - } - connection = ConnectionUtil.getOutputConnection(configuration, overrideProps); - connection.setAutoCommit(false); - // Get BatchSize, which is in terms of rows - ConnectionQueryServices services = ((PhoenixConnection) connection).getQueryServices(); - int maxSize = - services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); - batchSize = Math.min(((PhoenixConnection) connection).getMutateBatchSize(), maxSize); + final Properties overrideProps = new Properties(); + String scn = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); + String txScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); + if (txScnValue == null && scn != null) { + overrideProps.put(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, scn); + } + connection = ConnectionUtil.getOutputConnection(configuration, overrideProps); + connection.setAutoCommit(false); + // Get BatchSize, which is in terms of rows + ConnectionQueryServices services = ((PhoenixConnection) connection).getQueryServices(); + int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); + batchSize = Math.min(((PhoenixConnection) connection).getMutateBatchSize(), maxSize); - //Get batch size in terms of bytes - batchSizeBytes = ((PhoenixConnection) connection).getMutateBatchSizeBytes(); + // Get batch size in terms of bytes + batchSizeBytes = ((PhoenixConnection) connection).getMutateBatchSizeBytes(); - LOGGER.info("Mutation Batch Size = " + batchSize); + LOGGER.info("Mutation Batch Size = " + batchSize); - final String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration); - this.pStatement = connection.prepareStatement(upsertQuery); + final String upsertQuery = PhoenixConfigurationUtil.getUpsertStatement(configuration); + this.pStatement = connection.prepareStatement(upsertQuery); - String indexTableName = - PhoenixConfigurationUtil.getIndexToolIndexTableName(configuration); - PTable pIndexTable = connection.unwrap(PhoenixConnection.class).getTable( - indexTableName); + String indexTableName = PhoenixConfigurationUtil.getIndexToolIndexTableName(configuration); + PTable pIndexTable = connection.unwrap(PhoenixConnection.class).getTable(indexTableName); - indexStatusUpdater = new IndexStatusUpdater( - SchemaUtil.getEmptyColumnFamily(pIndexTable), - EncodedColumnsUtil.getEmptyKeyValueInfo(pIndexTable).getFirst()); + indexStatusUpdater = new IndexStatusUpdater(SchemaUtil.getEmptyColumnFamily(pIndexTable), + EncodedColumnsUtil.getEmptyKeyValueInfo(pIndexTable).getFirst()); - } catch (Exception e) { - tryClosingResources(); - throw new RuntimeException(e); - } + } catch (Exception e) { + tryClosingResources(); + throw new RuntimeException(e); } - - @Override - protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context) - throws IOException, InterruptedException { - - try { - currentBatchCount++; - final List values = record.getValues(); - indxWritable.setValues(values); - indxWritable.write(this.pStatement); - this.pStatement.execute(); - - final PhoenixConnection pconn = connection.unwrap(PhoenixConnection.class); - MutationState currentMutationState = pconn.getMutationState(); - if (mutationState == null) { - mutationState = currentMutationState; - } - // Keep accumulating Mutations till batch size - mutationState.join(currentMutationState); - // Write Mutation Batch - if (currentBatchCount % batchSize == 0) { - writeBatch(mutationState, context); - mutationState = null; - } - - // Make sure progress is reported to Application Master. - context.progress(); - } catch (SQLException e) { - LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); - context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount); - throw new RuntimeException(e); - } - context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); + } + + @Override + protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context) + throws IOException, InterruptedException { + + try { + currentBatchCount++; + final List values = record.getValues(); + indxWritable.setValues(values); + indxWritable.write(this.pStatement); + this.pStatement.execute(); + + final PhoenixConnection pconn = connection.unwrap(PhoenixConnection.class); + MutationState currentMutationState = pconn.getMutationState(); + if (mutationState == null) { + mutationState = currentMutationState; + } + // Keep accumulating Mutations till batch size + mutationState.join(currentMutationState); + // Write Mutation Batch + if (currentBatchCount % batchSize == 0) { + writeBatch(mutationState, context); + mutationState = null; + } + + // Make sure progress is reported to Application Master. + context.progress(); + } catch (SQLException e) { + LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); + context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount); + throw new RuntimeException(e); } - - private void writeBatch(MutationState mutationState, Context context) throws IOException, - SQLException, InterruptedException { - final Iterator>> iterator = mutationState.toMutations(true, null); - while (iterator.hasNext()) { - Pair> mutationPair = iterator.next(); - List batchMutations = mutationPair.getSecond(); - List> batchOfBatchMutations = - MutationState.getMutationBatchList(batchSize, batchSizeBytes, batchMutations); - for (List mutationList : batchOfBatchMutations) { - for (Mutation mutation: mutationList) { - indexStatusUpdater.setVerified(mutation.cellScanner()); - } - writer.write(mutationList); - } - context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS).increment( - mutationPair.getSecond().size()); + context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); + } + + private void writeBatch(MutationState mutationState, Context context) + throws IOException, SQLException, InterruptedException { + final Iterator>> iterator = mutationState.toMutations(true, null); + while (iterator.hasNext()) { + Pair> mutationPair = iterator.next(); + List batchMutations = mutationPair.getSecond(); + List> batchOfBatchMutations = + MutationState.getMutationBatchList(batchSize, batchSizeBytes, batchMutations); + for (List mutationList : batchOfBatchMutations) { + for (Mutation mutation : mutationList) { + indexStatusUpdater.setVerified(mutation.cellScanner()); } - connection.rollback(); - currentBatchCount = 0; + writer.write(mutationList); + } + context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS) + .increment(mutationPair.getSecond().size()); } - - @Override - protected void cleanup(Context context) throws IOException, InterruptedException { - try { - // Write the last & final Mutation Batch - if (mutationState != null) { - writeBatch(mutationState, context); - } - // We are writing some dummy key-value as map output here so that we commit only one - // output to reducer. - context.write(new ImmutableBytesWritable( - UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)), - new IntWritable(0)); - super.cleanup(context); - } catch (SQLException e) { - LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); - context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount); - throw new RuntimeException(e); - } finally { - tryClosingResources(); - } + connection.rollback(); + currentBatchCount = 0; + } + + @Override + protected void cleanup(Context context) throws IOException, InterruptedException { + try { + // Write the last & final Mutation Batch + if (mutationState != null) { + writeBatch(mutationState, context); + } + // We are writing some dummy key-value as map output here so that we commit only one + // output to reducer. + context.write( + new ImmutableBytesWritable(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)), + new IntWritable(0)); + super.cleanup(context); + } catch (SQLException e) { + LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); + context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount); + throw new RuntimeException(e); + } finally { + tryClosingResources(); } - - private void tryClosingResources() throws IOException { - if (this.connection != null) { - try { - this.connection.close(); - } catch (SQLException e) { - LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e); - } - } - if (this.writer != null) { - this.writer.close(); - } + } + + private void tryClosingResources() throws IOException { + if (this.connection != null) { + try { + this.connection.close(); + } catch (SQLException e) { + LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e); + } + } + if (this.writer != null) { + this.writer.close(); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java index ed011736866..a9e8540a1ab 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.mapreduce.index; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexVerifyType; + import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; @@ -43,176 +46,195 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID; - -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexVerifyType; - /** * Reducer class that does only one task and that is to update the index state of the table. */ -public class PhoenixIndexImportDirectReducer extends - Reducer { - private AtomicBoolean calledOnce = new AtomicBoolean(false); - private IndexVerificationResultRepository resultRepository; - private static final Logger LOGGER = - LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class); - private String indexTableName; - private byte[] indexTableNameBytes; +public class PhoenixIndexImportDirectReducer + extends Reducer { + private AtomicBoolean calledOnce = new AtomicBoolean(false); + private IndexVerificationResultRepository resultRepository; + private static final Logger LOGGER = + LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class); + private String indexTableName; + private byte[] indexTableNameBytes; - protected void updateCounters(IndexTool.IndexVerifyType verifyType, - Reducer.Context context) - throws IOException { - Configuration configuration = context.getConfiguration(); - try (final Connection connection = ConnectionUtil.getInputConnection(configuration)) { - long ts = Long.parseLong(configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE)); - IndexToolVerificationResult verificationResult = - resultRepository.getVerificationResult(connection, ts, indexTableNameBytes); - context.getCounter(PhoenixIndexToolJobCounters.SCANNED_DATA_ROW_COUNT). - setValue(verificationResult.getScannedDataRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.REBUILT_INDEX_ROW_COUNT). - setValue(verificationResult.getRebuiltIndexRowCount()); - if (verifyType == IndexTool.IndexVerifyType.ONLY || verifyType == IndexTool.IndexVerifyType.BEFORE || - verifyType == IndexTool.IndexVerifyType.BOTH) { - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_VALID_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRebuildValidIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRebuildExpiredIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRebuildMissingIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRebuildInvalidIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRebuildBeyondMaxLookBackMissingIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRebuildBeyondMaxLookBackInvalidIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS). - setValue(verificationResult.getBeforeIndexHasExtraCellsCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS). - setValue(verificationResult.getBeforeIndexHasMissingCellsCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRebuildUnverifiedIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_OLD_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRebuildOldIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRebuildUnknownIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRepairExtraVerifiedIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT). - setValue(verificationResult.getBeforeRepairExtraUnverifiedIndexRowCount()); - } - if (verifyType == IndexTool.IndexVerifyType.BOTH || verifyType == IndexTool.IndexVerifyType.AFTER) { - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_VALID_INDEX_ROW_COUNT). - setValue(verificationResult.getAfterRebuildValidIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT). - setValue(verificationResult.getAfterRebuildExpiredIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_MISSING_INDEX_ROW_COUNT). - setValue(verificationResult.getAfterRebuildMissingIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_INVALID_INDEX_ROW_COUNT). - setValue(verificationResult.getAfterRebuildInvalidIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT). - setValue(verificationResult.getAfterRebuildBeyondMaxLookBackMissingIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT). - setValue(verificationResult.getAfterRebuildBeyondMaxLookBackInvalidIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS). - setValue(verificationResult.getAfterIndexHasExtraCellsCount()); - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS). - setValue(verificationResult.getAfterIndexHasMissingCellsCount()); - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT). - setValue(verificationResult.getAfterRepairExtraVerifiedIndexRowCount()); - context.getCounter(PhoenixIndexToolJobCounters.AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT). - setValue(verificationResult.getAfterRepairExtraUnverifiedIndexRowCount()); - } - if (verificationResult.isVerificationFailed()) { - throw new IOException("Index verification failed! " + verificationResult); - } - } catch (Exception e) { - throw new IOException("Fail to get index verification result", e); - } + protected void updateCounters(IndexTool.IndexVerifyType verifyType, + Reducer.Context context) + throws IOException { + Configuration configuration = context.getConfiguration(); + try (final Connection connection = ConnectionUtil.getInputConnection(configuration)) { + long ts = Long.parseLong(configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE)); + IndexToolVerificationResult verificationResult = + resultRepository.getVerificationResult(connection, ts, indexTableNameBytes); + context.getCounter(PhoenixIndexToolJobCounters.SCANNED_DATA_ROW_COUNT) + .setValue(verificationResult.getScannedDataRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.REBUILT_INDEX_ROW_COUNT) + .setValue(verificationResult.getRebuiltIndexRowCount()); + if ( + verifyType == IndexTool.IndexVerifyType.ONLY + || verifyType == IndexTool.IndexVerifyType.BEFORE + || verifyType == IndexTool.IndexVerifyType.BOTH + ) { + context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_VALID_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRebuildValidIndexRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRebuildExpiredIndexRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRebuildMissingIndexRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRebuildInvalidIndexRowCount()); + context + .getCounter( + PhoenixIndexToolJobCounters.BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRebuildBeyondMaxLookBackMissingIndexRowCount()); + context + .getCounter( + PhoenixIndexToolJobCounters.BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRebuildBeyondMaxLookBackInvalidIndexRowCount()); + context + .getCounter( + PhoenixIndexToolJobCounters.BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS) + .setValue(verificationResult.getBeforeIndexHasExtraCellsCount()); + context + .getCounter( + PhoenixIndexToolJobCounters.BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS) + .setValue(verificationResult.getBeforeIndexHasMissingCellsCount()); + context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRebuildUnverifiedIndexRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_OLD_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRebuildOldIndexRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRebuildUnknownIndexRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRepairExtraVerifiedIndexRowCount()); + context + .getCounter(PhoenixIndexToolJobCounters.BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT) + .setValue(verificationResult.getBeforeRepairExtraUnverifiedIndexRowCount()); + } + if ( + verifyType == IndexTool.IndexVerifyType.BOTH + || verifyType == IndexTool.IndexVerifyType.AFTER + ) { + context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_VALID_INDEX_ROW_COUNT) + .setValue(verificationResult.getAfterRebuildValidIndexRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT) + .setValue(verificationResult.getAfterRebuildExpiredIndexRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_MISSING_INDEX_ROW_COUNT) + .setValue(verificationResult.getAfterRebuildMissingIndexRowCount()); + context.getCounter(PhoenixIndexToolJobCounters.AFTER_REBUILD_INVALID_INDEX_ROW_COUNT) + .setValue(verificationResult.getAfterRebuildInvalidIndexRowCount()); + context + .getCounter( + PhoenixIndexToolJobCounters.AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT) + .setValue(verificationResult.getAfterRebuildBeyondMaxLookBackMissingIndexRowCount()); + context + .getCounter( + PhoenixIndexToolJobCounters.AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT) + .setValue(verificationResult.getAfterRebuildBeyondMaxLookBackInvalidIndexRowCount()); + context + .getCounter( + PhoenixIndexToolJobCounters.AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS) + .setValue(verificationResult.getAfterIndexHasExtraCellsCount()); + context + .getCounter( + PhoenixIndexToolJobCounters.AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS) + .setValue(verificationResult.getAfterIndexHasMissingCellsCount()); + context.getCounter(PhoenixIndexToolJobCounters.AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT) + .setValue(verificationResult.getAfterRepairExtraVerifiedIndexRowCount()); + context + .getCounter(PhoenixIndexToolJobCounters.AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT) + .setValue(verificationResult.getAfterRepairExtraUnverifiedIndexRowCount()); + } + if (verificationResult.isVerificationFailed()) { + throw new IOException("Index verification failed! " + verificationResult); + } + } catch (Exception e) { + throw new IOException("Fail to get index verification result", e); } + } - @Override - protected void setup(Context context) throws IOException { - resultRepository = new IndexVerificationResultRepository(); - indexTableName = PhoenixConfigurationUtil.getPhysicalTableName(context.getConfiguration()); - indexTableNameBytes = Bytes.toBytes(indexTableName); - } + @Override + protected void setup(Context context) throws IOException { + resultRepository = new IndexVerificationResultRepository(); + indexTableName = PhoenixConfigurationUtil.getPhysicalTableName(context.getConfiguration()); + indexTableNameBytes = Bytes.toBytes(indexTableName); + } - @Override - protected void reduce(ImmutableBytesWritable arg0, Iterable arg1, - Reducer.Context context) - throws IOException, InterruptedException + @Override + protected void reduce(ImmutableBytesWritable arg0, Iterable arg1, + Reducer.Context context) + throws IOException, InterruptedException - { - if (!calledOnce.compareAndSet(false, true)) { - return; - } - IndexTool.IndexVerifyType verifyType = getIndexVerifyType(context.getConfiguration()); - if (verifyType != IndexTool.IndexVerifyType.NONE) { - updateCounters(verifyType, context); - } - - if (verifyType != IndexTool.IndexVerifyType.ONLY) { - // "ONLY" option should not turn index state to ACTIVE, as it doesn't rebuild the index - try { - IndexToolUtil.updateIndexState(context.getConfiguration(), PIndexState.ACTIVE); - } catch (SQLException e) { - LOGGER.error(" Failed to update the status to Active", e); - throw new RuntimeException(e.getMessage()); - } - } + { + if (!calledOnce.compareAndSet(false, true)) { + return; + } + IndexTool.IndexVerifyType verifyType = getIndexVerifyType(context.getConfiguration()); + if (verifyType != IndexTool.IndexVerifyType.NONE) { + updateCounters(verifyType, context); + } - if (verifyType != IndexTool.IndexVerifyType.ONLY) { - if (PhoenixConfigurationUtil.getIsTransforming(context.getConfiguration())) { - try { - Transform.completeTransform(ConnectionUtil - .getInputConnection(context.getConfiguration()), context.getConfiguration()); - if (PhoenixConfigurationUtil.getForceCutover(context.getConfiguration())) { - Transform.doForceCutover(ConnectionUtil - .getInputConnection(context.getConfiguration()), context.getConfiguration()); - } - } catch (Exception e) { - LOGGER.error(" Failed to complete transform", e); - throw new RuntimeException(e.getMessage()); - } - } - } + if (verifyType != IndexTool.IndexVerifyType.ONLY) { + // "ONLY" option should not turn index state to ACTIVE, as it doesn't rebuild the index + try { + IndexToolUtil.updateIndexState(context.getConfiguration(), PIndexState.ACTIVE); + } catch (SQLException e) { + LOGGER.error(" Failed to update the status to Active", e); + throw new RuntimeException(e.getMessage()); + } } - @Override - protected void cleanup(Context context) throws IOException, InterruptedException{ + if (verifyType != IndexTool.IndexVerifyType.ONLY) { + if (PhoenixConfigurationUtil.getIsTransforming(context.getConfiguration())) { try { - updateTasksTable(context); - resultRepository.close(); - } catch (SQLException e) { - LOGGER.error(" Failed to update the tasks table"); - throw new RuntimeException(e.getMessage()); + Transform.completeTransform(ConnectionUtil.getInputConnection(context.getConfiguration()), + context.getConfiguration()); + if (PhoenixConfigurationUtil.getForceCutover(context.getConfiguration())) { + Transform.doForceCutover(ConnectionUtil.getInputConnection(context.getConfiguration()), + context.getConfiguration()); + } + } catch (Exception e) { + LOGGER.error(" Failed to complete transform", e); + throw new RuntimeException(e.getMessage()); } + } } + } - private void updateTasksTable(Context context) throws SQLException, IOException { - final Properties overrideProps = new Properties(); - final Connection - connection = ConnectionUtil - .getOutputConnection(context.getConfiguration(), overrideProps); - try { - String fullTableName = PhoenixConfigurationUtil.getInputTableName(context.getConfiguration()); - String tenantId = context.getConfiguration().get(MAPREDUCE_TENANT_ID, null); - String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName); - String tableName = SchemaUtil.getTableNameFromFullName(fullTableName); - String indexName = PhoenixConfigurationUtil.getDisableIndexes(context.getConfiguration()); - List taskRecords = Task.queryTaskTable(connection, null, schemaName, tableName, - PTable.TaskType.INDEX_REBUILD, tenantId, indexName); - if (taskRecords != null && taskRecords.size() > 0) { - for (Task.TaskRecord taskRecord : taskRecords) { - TaskRegionObserver.SelfHealingTask.setEndTaskStatus( - connection.unwrap(PhoenixConnection.class), taskRecord, - PTable.TaskStatus.COMPLETED.toString()); - } - } - } finally { - if (connection != null) { - connection.close(); - } + @Override + protected void cleanup(Context context) throws IOException, InterruptedException { + try { + updateTasksTable(context); + resultRepository.close(); + } catch (SQLException e) { + LOGGER.error(" Failed to update the tasks table"); + throw new RuntimeException(e.getMessage()); + } + } + + private void updateTasksTable(Context context) throws SQLException, IOException { + final Properties overrideProps = new Properties(); + final Connection connection = + ConnectionUtil.getOutputConnection(context.getConfiguration(), overrideProps); + try { + String fullTableName = PhoenixConfigurationUtil.getInputTableName(context.getConfiguration()); + String tenantId = context.getConfiguration().get(MAPREDUCE_TENANT_ID, null); + String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName); + String tableName = SchemaUtil.getTableNameFromFullName(fullTableName); + String indexName = PhoenixConfigurationUtil.getDisableIndexes(context.getConfiguration()); + List taskRecords = Task.queryTaskTable(connection, null, schemaName, + tableName, PTable.TaskType.INDEX_REBUILD, tenantId, indexName); + if (taskRecords != null && taskRecords.size() > 0) { + for (Task.TaskRecord taskRecord : taskRecords) { + TaskRegionObserver.SelfHealingTask.setEndTaskStatus( + connection.unwrap(PhoenixConnection.class), taskRecord, + PTable.TaskStatus.COMPLETED.toString()); } + } + } finally { + if (connection != null) { + connection.close(); + } } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java index 5c3ec774986..6103a9c77cf 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -47,147 +47,150 @@ import org.apache.phoenix.query.ConnectionQueryServices; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * Mapper that hands over rows from data table to the index table. */ -public class PhoenixIndexPartialBuildMapper extends TableMapper { - - private static final Logger LOGGER = - LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class); - - private PhoenixConnection connection; - - private DirectHTableWriter writer; - - private int batchSize; - - private List mutations ; - - private ImmutableBytesPtr maintainers; - - @Override - protected void setup(final Context context) throws IOException, InterruptedException { - super.setup(context); - final Configuration configuration = context.getConfiguration(); - writer = new DirectHTableWriter(configuration); - - try { - final Properties overrideProps = new Properties(); - String scn = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); - String txScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); - if(txScnValue==null && scn!=null) { - overrideProps.put(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, scn); - } - connection = ConnectionUtil.getOutputConnection(configuration, overrideProps).unwrap(PhoenixConnection.class); - connection.setAutoCommit(false); - // Get BatchSize - ConnectionQueryServices services = connection.getQueryServices(); - int maxSize = - services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); - batchSize = Math.min(connection.getMutateBatchSize(), maxSize); - LOGGER.info("Mutation Batch Size = " + batchSize); - this.mutations = Lists.newArrayListWithExpectedSize(batchSize); - maintainers=new ImmutableBytesPtr(PhoenixConfigurationUtil.getIndexMaintainers(configuration)); - } catch (SQLException e) { - tryClosingResources(); - throw new RuntimeException(e.getMessage()); - } +public class PhoenixIndexPartialBuildMapper + extends TableMapper { + + private static final Logger LOGGER = + LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class); + + private PhoenixConnection connection; + + private DirectHTableWriter writer; + + private int batchSize; + + private List mutations; + + private ImmutableBytesPtr maintainers; + + @Override + protected void setup(final Context context) throws IOException, InterruptedException { + super.setup(context); + final Configuration configuration = context.getConfiguration(); + writer = new DirectHTableWriter(configuration); + + try { + final Properties overrideProps = new Properties(); + String scn = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); + String txScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); + if (txScnValue == null && scn != null) { + overrideProps.put(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, scn); + } + connection = ConnectionUtil.getOutputConnection(configuration, overrideProps) + .unwrap(PhoenixConnection.class); + connection.setAutoCommit(false); + // Get BatchSize + ConnectionQueryServices services = connection.getQueryServices(); + int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, + QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); + batchSize = Math.min(connection.getMutateBatchSize(), maxSize); + LOGGER.info("Mutation Batch Size = " + batchSize); + this.mutations = Lists.newArrayListWithExpectedSize(batchSize); + maintainers = + new ImmutableBytesPtr(PhoenixConfigurationUtil.getIndexMaintainers(configuration)); + } catch (SQLException e) { + tryClosingResources(); + throw new RuntimeException(e.getMessage()); } - - @Override - protected void map(ImmutableBytesWritable row, Result value, Context context) - throws IOException, InterruptedException { - context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); - try { - byte[] attribValue = ByteUtil.copyKeyBytesIfNecessary(maintainers); - byte[] uuidValue = ServerCacheClient.generateId(); - byte[] clientVersion = Bytes.toBytes(MetaDataProtocol.PHOENIX_VERSION); - Put put = null; - Delete del = null; - for (Cell cell : value.rawCells()) { - if (cell.getType() == Cell.Type.Put) { - if (put == null) { - put = new Put(CellUtil.cloneRow(cell)); - put.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); - put.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue); - put.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, clientVersion); - put.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES); - mutations.add(put); - } - put.add(cell); - } else { - if (del == null) { - del = new Delete(CellUtil.cloneRow(cell)); - del.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); - del.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue); - del.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, clientVersion); - del.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES); - mutations.add(del); - } - del.add(cell); - } - } - // Write Mutation Batch - if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize == 0) { - writeBatch(mutations, context); - mutations.clear(); - } - // Make sure progress is reported to Application Master. - context.progress(); - } catch (SQLException e) { - LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); - context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); - throw new RuntimeException(e); + } + + @Override + protected void map(ImmutableBytesWritable row, Result value, Context context) + throws IOException, InterruptedException { + context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); + try { + byte[] attribValue = ByteUtil.copyKeyBytesIfNecessary(maintainers); + byte[] uuidValue = ServerCacheClient.generateId(); + byte[] clientVersion = Bytes.toBytes(MetaDataProtocol.PHOENIX_VERSION); + Put put = null; + Delete del = null; + for (Cell cell : value.rawCells()) { + if (cell.getType() == Cell.Type.Put) { + if (put == null) { + put = new Put(CellUtil.cloneRow(cell)); + put.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); + put.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue); + put.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, clientVersion); + put.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, + BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES); + mutations.add(put); + } + put.add(cell); + } else { + if (del == null) { + del = new Delete(CellUtil.cloneRow(cell)); + del.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); + del.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue); + del.setAttribute(BaseScannerRegionObserverConstants.CLIENT_VERSION, clientVersion); + del.setAttribute(BaseScannerRegionObserverConstants.REPLAY_WRITES, + BaseScannerRegionObserverConstants.REPLAY_ONLY_INDEX_WRITES); + mutations.add(del); + } + del.add(cell); } + } + // Write Mutation Batch + if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize == 0) { + writeBatch(mutations, context); + mutations.clear(); + } + // Make sure progress is reported to Application Master. + context.progress(); + } catch (SQLException e) { + LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); + context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); + throw new RuntimeException(e); } - - private void writeBatch(List mutations, Context context) - throws IOException, SQLException, InterruptedException { - writer.write(mutations); - context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS).increment(mutations.size()); + } + + private void writeBatch(List mutations, Context context) + throws IOException, SQLException, InterruptedException { + writer.write(mutations); + context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS).increment(mutations.size()); + } + + @Override + protected void cleanup(Context context) throws IOException, InterruptedException { + try { + // Write the last & final Mutation Batch + if (!mutations.isEmpty()) { + writeBatch(mutations, context); + } + // We are writing some dummy key-value as map output here so that we commit only one + // output to reducer. + context.write( + new ImmutableBytesWritable(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)), + new IntWritable(0)); + super.cleanup(context); + } catch (SQLException e) { + LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); + context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); + throw new RuntimeException(e); + } finally { + tryClosingResources(); } - - @Override - protected void cleanup(Context context) throws IOException, InterruptedException { - try { - // Write the last & final Mutation Batch - if (!mutations.isEmpty()) { - writeBatch(mutations, context); - } - // We are writing some dummy key-value as map output here so that we commit only one - // output to reducer. - context.write(new ImmutableBytesWritable( - UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)), - new IntWritable(0)); - super.cleanup(context); - } catch (SQLException e) { - LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); - context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); - throw new RuntimeException(e); - } finally { - tryClosingResources(); - } + } + + private void tryClosingResources() throws IOException { + if (this.connection != null) { + try { + this.connection.close(); + } catch (SQLException e) { + LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e); + } } - - private void tryClosingResources() throws IOException { - if (this.connection != null) { - try { - this.connection.close(); - } catch (SQLException e) { - LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e); - } - } - if (this.writer != null) { - this.writer.close(); - } + if (this.writer != null) { + this.writer.close(); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexToolJobCounters.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexToolJobCounters.java index 28d2a301888..d79fa452a52 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexToolJobCounters.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexToolJobCounters.java @@ -1,5 +1,4 @@ -/** - /* +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,29 +21,29 @@ * Counters used for Index Tool MR job */ public enum PhoenixIndexToolJobCounters { - SCANNED_DATA_ROW_COUNT, - REBUILT_INDEX_ROW_COUNT, - BEFORE_REBUILD_VALID_INDEX_ROW_COUNT, - BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT, - BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT, - BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT, - BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT, - BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT, - BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS, - BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS, - BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT, - BEFORE_REBUILD_OLD_INDEX_ROW_COUNT, - BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT, - BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT, - BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT, - AFTER_REBUILD_VALID_INDEX_ROW_COUNT, - AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT, - AFTER_REBUILD_MISSING_INDEX_ROW_COUNT, - AFTER_REBUILD_INVALID_INDEX_ROW_COUNT, - AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT, - AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT, - AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS, - AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS, - AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT, - AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT + SCANNED_DATA_ROW_COUNT, + REBUILT_INDEX_ROW_COUNT, + BEFORE_REBUILD_VALID_INDEX_ROW_COUNT, + BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT, + BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT, + BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT, + BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT, + BEFORE_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT, + BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS, + BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS, + BEFORE_REBUILD_UNVERIFIED_INDEX_ROW_COUNT, + BEFORE_REBUILD_OLD_INDEX_ROW_COUNT, + BEFORE_REBUILD_UNKNOWN_INDEX_ROW_COUNT, + BEFORE_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT, + BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT, + AFTER_REBUILD_VALID_INDEX_ROW_COUNT, + AFTER_REBUILD_EXPIRED_INDEX_ROW_COUNT, + AFTER_REBUILD_MISSING_INDEX_ROW_COUNT, + AFTER_REBUILD_INVALID_INDEX_ROW_COUNT, + AFTER_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT, + AFTER_REBUILD_BEYOND_MAXLOOKBACK_INVALID_INDEX_ROW_COUNT, + AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_EXTRA_CELLS, + AFTER_REBUILD_INVALID_INDEX_ROW_COUNT_COZ_MISSING_CELLS, + AFTER_REPAIR_EXTRA_VERIFIED_INDEX_ROW_COUNT, + AFTER_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixScrutinyJobCounters.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixScrutinyJobCounters.java index 54e8a94acb1..59a5ad10b12 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixScrutinyJobCounters.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixScrutinyJobCounters.java @@ -1,4 +1,3 @@ -/** /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,30 +21,30 @@ * Counters used for Index Scrutiny MR job */ public enum PhoenixScrutinyJobCounters { - /** - * number of rows in data table with a valid index row (or vice-versa) - */ - VALID_ROW_COUNT, - /** - * number of rows in data table with an invalid index row (or vice-versa) - */ - INVALID_ROW_COUNT, - /** - * Number of rows in the index table with an incorrect covered column value - */ - BAD_COVERED_COL_VAL_COUNT, - /** - * Number of rows in source that have expired while scrutiny was comparing them with target - */ - EXPIRED_ROW_COUNT, - /** - * Number of batches processed - */ - BATCHES_PROCESSED_COUNT, - /** - * Number of rows in source that became older than the max lookback age while scrutiny - * was comparing them with the target, and didn't match. We break these out separately because - * they could be due to extra versions being compacted, and are harmless. - */ - BEYOND_MAX_LOOKBACK_COUNT; + /** + * number of rows in data table with a valid index row (or vice-versa) + */ + VALID_ROW_COUNT, + /** + * number of rows in data table with an invalid index row (or vice-versa) + */ + INVALID_ROW_COUNT, + /** + * Number of rows in the index table with an incorrect covered column value + */ + BAD_COVERED_COL_VAL_COUNT, + /** + * Number of rows in source that have expired while scrutiny was comparing them with target + */ + EXPIRED_ROW_COUNT, + /** + * Number of batches processed + */ + BATCHES_PROCESSED_COUNT, + /** + * Number of rows in source that became older than the max lookback age while scrutiny was + * comparing them with the target, and didn't match. We break these out separately because they + * could be due to extra versions being compacted, and are harmless. + */ + BEYOND_MAX_LOOKBACK_COUNT; } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexDBWritable.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexDBWritable.java index 4054918465e..e0633c08522 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexDBWritable.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexDBWritable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,28 +32,27 @@ /** * A {@link DBWritable} class that reads the number of rows that have been built. - * - * */ -public class PhoenixServerBuildIndexDBWritable implements DBWritable { - private long rowCount; +public class PhoenixServerBuildIndexDBWritable implements DBWritable { + private long rowCount; - @Override - public void write(PreparedStatement statement) throws SQLException { - throw new SQLFeatureNotSupportedException(); - } + @Override + public void write(PreparedStatement statement) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } - @Override - public void readFields(ResultSet resultSet) throws SQLException { - Tuple row = resultSet.unwrap(PhoenixResultSet.class).getCurrentRow(); - Cell kv = row.getValue(0); - ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); - // A single Cell will be returned with the count(*) - we decode that here - rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); - } + @Override + public void readFields(ResultSet resultSet) throws SQLException { + Tuple row = resultSet.unwrap(PhoenixResultSet.class).getCurrentRow(); + Cell kv = row.getValue(0); + ImmutableBytesWritable tmpPtr = + new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + // A single Cell will be returned with the count(*) - we decode that here + rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); + } - public long getRowCount() { - return rowCount; - } + public long getRowCount() { + return rowCount; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java index 8cdcb6a7125..4fd21aba7df 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,44 +29,45 @@ import org.apache.phoenix.query.QueryServices; /** - * Mapper that does not do much as regions servers actually build the index from the data table regions directly + * Mapper that does not do much as regions servers actually build the index from the data table + * regions directly */ public class PhoenixServerBuildIndexMapper extends - Mapper { + Mapper { - private long rebuildPageRowSize; + private long rebuildPageRowSize; - @Override - protected void setup(final Context context) throws IOException, InterruptedException { - super.setup(context); - String rebuildPageRowSizeConf = - context.getConfiguration().get(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS); - if (rebuildPageRowSizeConf != null) { - this.rebuildPageRowSize = Long.parseLong(rebuildPageRowSizeConf); - } else { - this.rebuildPageRowSize = -1L; - } + @Override + protected void setup(final Context context) throws IOException, InterruptedException { + super.setup(context); + String rebuildPageRowSizeConf = + context.getConfiguration().get(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS); + if (rebuildPageRowSizeConf != null) { + this.rebuildPageRowSize = Long.parseLong(rebuildPageRowSizeConf); + } else { + this.rebuildPageRowSize = -1L; } + } - @Override - protected void map(NullWritable key, PhoenixServerBuildIndexDBWritable record, Context context) - throws IOException, InterruptedException { - context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(record.getRowCount()); - if (this.rebuildPageRowSize != -1) { - if (record.getRowCount() > this.rebuildPageRowSize) { - throw new IOException("Rebuilt/Verified rows greater than page size. Rebuilt rows: " - + record.getRowCount() + " Page size: " + this.rebuildPageRowSize); - } - } - // Make sure progress is reported to Application Master. - context.progress(); + @Override + protected void map(NullWritable key, PhoenixServerBuildIndexDBWritable record, Context context) + throws IOException, InterruptedException { + context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(record.getRowCount()); + if (this.rebuildPageRowSize != -1) { + if (record.getRowCount() > this.rebuildPageRowSize) { + throw new IOException("Rebuilt/Verified rows greater than page size. Rebuilt rows: " + + record.getRowCount() + " Page size: " + this.rebuildPageRowSize); + } } + // Make sure progress is reported to Application Master. + context.progress(); + } - @Override - protected void cleanup(Context context) throws IOException, InterruptedException { - context.write(new ImmutableBytesWritable( - UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)), - new IntWritable(0)); - super.cleanup(context); - } + @Override + protected void cleanup(Context context) throws IOException, InterruptedException { + context.write( + new ImmutableBytesWritable(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)), + new IntWritable(0)); + super.cleanup(context); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/SourceTargetColumnNames.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/SourceTargetColumnNames.java index 1c7991ffe45..15969330a44 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/SourceTargetColumnNames.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/SourceTargetColumnNames.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,175 +21,172 @@ import org.apache.phoenix.mapreduce.util.IndexColumnNames; import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.util.SchemaUtil; /** - * Get index scrutiny source/target column names, depending on whether the source is the - * data table or index table + * Get index scrutiny source/target column names, depending on whether the source is the data table + * or index table */ public interface SourceTargetColumnNames { - List getSourceColNames(); + List getSourceColNames(); - List getUnqualifiedSourceColNames(); + List getUnqualifiedSourceColNames(); - List getTargetColNames(); + List getTargetColNames(); - /** - * @return The target column name with a CAST to the source's data type - */ - List getCastedTargetColNames(); + /** Returns The target column name with a CAST to the source's data type */ + List getCastedTargetColNames(); - List getUnqualifiedTargetColNames(); + List getUnqualifiedTargetColNames(); - List getSourceDynamicCols(); + List getSourceDynamicCols(); - List getTargetDynamicCols(); + List getTargetDynamicCols(); - List getTargetPkColNames(); + List getTargetPkColNames(); - List getSourcePkColNames(); + List getSourcePkColNames(); - String getQualifiedSourceTableName(); + String getQualifiedSourceTableName(); - String getQualifiedTargetTableName(); + String getQualifiedTargetTableName(); + /** + * Used when the data table is the source table of a scrutiny + */ + public static class DataSourceColNames extends IndexColumnNames + implements SourceTargetColumnNames { /** - * Used when the data table is the source table of a scrutiny + * @param pdataTable the data table + * @param pindexTable the index table for the data table */ - public static class DataSourceColNames extends IndexColumnNames - implements SourceTargetColumnNames { - /** - * @param pdataTable the data table - * @param pindexTable the index table for the data table - */ - public DataSourceColNames(PTable pdataTable, PTable pindexTable) { - super(pdataTable, pindexTable); - } - - @Override - public List getSourceColNames() { - return getDataColNames(); - } - - @Override - public List getUnqualifiedSourceColNames() { - return getUnqualifiedDataColNames(); - } - - @Override - public List getUnqualifiedTargetColNames() { - return getUnqualifiedIndexColNames(); - } - - @Override - public List getTargetColNames() { - return getIndexColNames(); - } - - @Override - public List getSourceDynamicCols() { - return getDynamicDataCols(); - } - - @Override - public List getTargetDynamicCols() { - return getDynamicIndexCols(); - } - - @Override - public List getTargetPkColNames() { - return getIndexPkColNames(); - } - - @Override - public List getSourcePkColNames() { - return getDataPkColNames(); - } - - @Override - public String getQualifiedSourceTableName() { - return getQualifiedDataTableName(); - } - - @Override - public String getQualifiedTargetTableName() { - return getQualifiedIndexTableName(); - } - - @Override - public List getCastedTargetColNames() { - return getCastedColumnNames(getIndexColNames(), dataColSqlTypeNames); - } + public DataSourceColNames(PTable pdataTable, PTable pindexTable) { + super(pdataTable, pindexTable); + } + + @Override + public List getSourceColNames() { + return getDataColNames(); + } + + @Override + public List getUnqualifiedSourceColNames() { + return getUnqualifiedDataColNames(); + } + + @Override + public List getUnqualifiedTargetColNames() { + return getUnqualifiedIndexColNames(); + } + + @Override + public List getTargetColNames() { + return getIndexColNames(); + } + + @Override + public List getSourceDynamicCols() { + return getDynamicDataCols(); + } + + @Override + public List getTargetDynamicCols() { + return getDynamicIndexCols(); + } + + @Override + public List getTargetPkColNames() { + return getIndexPkColNames(); + } + + @Override + public List getSourcePkColNames() { + return getDataPkColNames(); + } + + @Override + public String getQualifiedSourceTableName() { + return getQualifiedDataTableName(); + } + + @Override + public String getQualifiedTargetTableName() { + return getQualifiedIndexTableName(); + } + @Override + public List getCastedTargetColNames() { + return getCastedColumnNames(getIndexColNames(), dataColSqlTypeNames); } + } + + /** + * Used when the index table is the source table of a scrutiny + */ + public static class IndexSourceColNames extends IndexColumnNames + implements SourceTargetColumnNames { /** - * Used when the index table is the source table of a scrutiny + * @param pdataTable the data table + * @param pindexTable the index table for the data table */ - public static class IndexSourceColNames extends IndexColumnNames - implements SourceTargetColumnNames { - /** - * @param pdataTable the data table - * @param pindexTable the index table for the data table - */ - public IndexSourceColNames(PTable pdataTable, PTable pindexTable) { - super(pdataTable, pindexTable); - } - - @Override - public List getSourceColNames() { - return getIndexColNames(); - } - - @Override - public List getUnqualifiedSourceColNames() { - return getUnqualifiedIndexColNames(); - } - - @Override - public List getUnqualifiedTargetColNames() { - return getUnqualifiedDataColNames(); - } - - @Override - public List getTargetColNames() { - return getDataColNames(); - } - - @Override - public List getSourceDynamicCols() { - return getDynamicIndexCols(); - } - - @Override - public List getTargetDynamicCols() { - return getDynamicDataCols(); - } - - @Override - public List getTargetPkColNames() { - return getDataPkColNames(); - } - - @Override - public List getSourcePkColNames() { - return getIndexPkColNames(); - } - - @Override - public String getQualifiedSourceTableName() { - return getQualifiedIndexTableName(); - } - - @Override - public String getQualifiedTargetTableName() { - return getQualifiedDataTableName(); - } - - @Override - public List getCastedTargetColNames() { - return getCastedColumnNames(getDataColNames(), indexColSqlTypeNames); - } + public IndexSourceColNames(PTable pdataTable, PTable pindexTable) { + super(pdataTable, pindexTable); + } + + @Override + public List getSourceColNames() { + return getIndexColNames(); + } + + @Override + public List getUnqualifiedSourceColNames() { + return getUnqualifiedIndexColNames(); + } + + @Override + public List getUnqualifiedTargetColNames() { + return getUnqualifiedDataColNames(); + } + + @Override + public List getTargetColNames() { + return getDataColNames(); + } + + @Override + public List getSourceDynamicCols() { + return getDynamicIndexCols(); + } + + @Override + public List getTargetDynamicCols() { + return getDynamicDataCols(); + } + + @Override + public List getTargetPkColNames() { + return getDataPkColNames(); + } + + @Override + public List getSourcePkColNames() { + return getIndexPkColNames(); + } + + @Override + public String getQualifiedSourceTableName() { + return getQualifiedIndexTableName(); + } + + @Override + public String getQualifiedTargetTableName() { + return getQualifiedDataTableName(); + } + + @Override + public List getCastedTargetColNames() { + return getCastedColumnNames(getDataColNames(), indexColSqlTypeNames); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixAsyncIndex.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixAsyncIndex.java index a61e49a2f22..338ee82141b 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixAsyncIndex.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixAsyncIndex.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,55 +21,54 @@ import org.apache.phoenix.schema.PTable.IndexType; public class PhoenixAsyncIndex { - private String tableName; - private IndexType indexType; - private String tableSchem; - private String dataTableName; + private String tableName; + private IndexType indexType; + private String tableSchem; + private String dataTableName; - public String getTableName() { - return tableName; - } + public String getTableName() { + return tableName; + } - public void setTableName(String tableName) { - this.tableName = tableName; - } + public void setTableName(String tableName) { + this.tableName = tableName; + } - public IndexType getIndexType() { - return indexType; - } + public IndexType getIndexType() { + return indexType; + } - public void setIndexType(IndexType indexType) { - this.indexType = indexType; - } + public void setIndexType(IndexType indexType) { + this.indexType = indexType; + } - public String getTableSchem() { - return tableSchem; - } + public String getTableSchem() { + return tableSchem; + } - public void setTableSchem(String tableSchem) { - this.tableSchem = tableSchem; - } + public void setTableSchem(String tableSchem) { + this.tableSchem = tableSchem; + } - public String getDataTableName() { - return dataTableName; - } + public String getDataTableName() { + return dataTableName; + } - public void setDataTableName(String dataTableName) { - this.dataTableName = dataTableName; - } + public void setDataTableName(String dataTableName) { + this.dataTableName = dataTableName; + } - public String getJobName() { - return String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, tableSchem, dataTableName, tableName); - } + public String getJobName() { + return String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, tableSchem, dataTableName, tableName); + } - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("TableName = " + tableName) - .append(" ; IndexType = " + (indexType == null ? "" : indexType.toString())) - .append(" ; TableSchem = " + tableSchem) - .append(" ; DataTableName = " + dataTableName); - return builder.toString(); - } + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("TableName = " + tableName) + .append(" ; IndexType = " + (indexType == null ? "" : indexType.toString())) + .append(" ; TableSchem = " + tableSchem).append(" ; DataTableName = " + dataTableName); + return builder.toString(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobCallable.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobCallable.java index 720d2c76261..1aeb86a668e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobCallable.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobCallable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,46 +28,46 @@ public class PhoenixMRJobCallable implements Callable { - private PhoenixAsyncIndex indexInfo; - private String basePath; - private Configuration conf; + private PhoenixAsyncIndex indexInfo; + private String basePath; + private Configuration conf; - public PhoenixMRJobCallable(Configuration conf, final PhoenixAsyncIndex indexInfo, - String basePath) { - this.conf = conf; - this.indexInfo = indexInfo; - this.basePath = basePath; - } - - @Override - public Boolean call() throws Exception { - StringBuilder commandLineArgBuilder = new StringBuilder(); - commandLineArgBuilder.append(" -dt " + indexInfo.getDataTableName()); - commandLineArgBuilder.append(" -it " + indexInfo.getTableName()); - commandLineArgBuilder.append(" -direct"); - commandLineArgBuilder.append(" -op " + (basePath.endsWith("/") ? basePath : basePath + "/") - + indexInfo.getTableName()); + public PhoenixMRJobCallable(Configuration conf, final PhoenixAsyncIndex indexInfo, + String basePath) { + this.conf = conf; + this.indexInfo = indexInfo; + this.basePath = basePath; + } - if (indexInfo.getTableSchem() != null && indexInfo.getTableSchem().trim().length() > 0) { - commandLineArgBuilder.append(" -s " + indexInfo.getTableSchem()); - } - // Setting the configuration here again (in addition to IndexTool.java) to doubly sure - // configurations are set - final String qDataTable = - SchemaUtil.getTableName(indexInfo.getTableSchem(), indexInfo.getDataTableName()); - final String qIndexTable = - SchemaUtil.getTableName(indexInfo.getTableSchem(), indexInfo.getTableName()); - String physicalIndexTable = qIndexTable; + @Override + public Boolean call() throws Exception { + StringBuilder commandLineArgBuilder = new StringBuilder(); + commandLineArgBuilder.append(" -dt " + indexInfo.getDataTableName()); + commandLineArgBuilder.append(" -it " + indexInfo.getTableName()); + commandLineArgBuilder.append(" -direct"); + commandLineArgBuilder.append( + " -op " + (basePath.endsWith("/") ? basePath : basePath + "/") + indexInfo.getTableName()); - if (IndexType.LOCAL.equals(indexInfo.getIndexType())) { - physicalIndexTable = MetaDataUtil.getLocalIndexTableName(qDataTable); - } - conf.set(TableOutputFormat.OUTPUT_TABLE, physicalIndexTable); + if (indexInfo.getTableSchem() != null && indexInfo.getTableSchem().trim().length() > 0) { + commandLineArgBuilder.append(" -s " + indexInfo.getTableSchem()); + } + // Setting the configuration here again (in addition to IndexTool.java) to doubly sure + // configurations are set + final String qDataTable = + SchemaUtil.getTableName(indexInfo.getTableSchem(), indexInfo.getDataTableName()); + final String qIndexTable = + SchemaUtil.getTableName(indexInfo.getTableSchem(), indexInfo.getTableName()); + String physicalIndexTable = qIndexTable; - IndexTool tool = new IndexTool(); - tool.setConf(conf); - int result = tool.run(commandLineArgBuilder.toString().split(" ")); - return result == 0 ? true : false; + if (IndexType.LOCAL.equals(indexInfo.getIndexType())) { + physicalIndexTable = MetaDataUtil.getLocalIndexTableName(qDataTable); } + conf.set(TableOutputFormat.OUTPUT_TABLE, physicalIndexTable); + + IndexTool tool = new IndexTool(); + tool.setConf(conf); + int result = tool.run(commandLineArgBuilder.toString().split(" ")); + return result == 0 ? true : false; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java index e904b3f931e..d68397e718f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,7 +39,6 @@ import javax.security.auth.login.AppConfigurationEntry; -import com.fasterxml.jackson.databind.JsonNode; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -58,278 +57,263 @@ import org.apache.phoenix.util.PhoenixMRJobUtil.MR_SCHEDULER_TYPE; import org.apache.phoenix.util.UpgradeUtil; import org.apache.phoenix.util.ZKBasedMasterElectionUtil; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.fasterxml.jackson.databind.JsonNode; public class PhoenixMRJobSubmitter { - // Lock to elect a master node that submits the Phoenix Secondary Index MR Jobs - private static final String PHOENIX_LOCKS_PARENT = - "/phoenix/automated-mr-index-build-leader-election"; - public static final String PHOENIX_MR_CLIENT_SCANNER_TIMEOUT_PERIOD = - "phoenix.mr.client.scanner.timeout.period"; - public static final String PHOENIX_MR_RPC_TIMEOUT = - "phoenix.mr.rpc.timeout"; - public static final String PHOENIX_MR_TASK_TIMEOUT = - "phoenix.mr.task.timeout"; - public static final String PHOENIX_MR_CLIENT_RETRIES_NUMBER = - "phoenix.mr.client.retries.number"; - public static final String PHOENIX_MR_CLIENT_PAUSE = - "phoenix.mr.client.retries.number"; - public static final String PHOENIX_MR_ZK_RECOVERY_RETRY = - "phoenix.mr.zk.recovery.retry"; - private static final String AUTO_INDEX_BUILD_LOCK_NAME = "ActiveStandbyElectorLock"; - private static final int DEFAULT_TIMEOUT_IN_MILLIS = 600000; - public static final int DEFAULT_MR_CLIENT_SCANNER_TIMEOUT_PERIOD = DEFAULT_TIMEOUT_IN_MILLIS; - public static final int DEFAULT_MR_RPC_TIMEOUT = DEFAULT_TIMEOUT_IN_MILLIS; - public static final int DEFAULT_MR_TASK_TIMEOUT = DEFAULT_TIMEOUT_IN_MILLIS; - // Reduced HBase/Zookeeper Client Retries - public static final int DEFAULT_MR_CLIENT_RETRIES_NUMBER = 10; - public static final int DEFAULT_MR_CLIENT_PAUSE = 1000; - public static final int DEFAULT_MR_ZK_RECOVERY_RETRY = 1; - - public static final String CANDIDATE_INDEX_INFO_QUERY = "SELECT " - + PhoenixDatabaseMetaData.INDEX_TYPE + "," - + PhoenixDatabaseMetaData.DATA_TABLE_NAME + ", " - + PhoenixDatabaseMetaData.TABLE_SCHEM + ", " - + PhoenixDatabaseMetaData.TABLE_NAME + ", " - + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + ", " - + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP - + " FROM " - + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + ".\"" + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE + "\"" - + " (" + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + " " + PDate.INSTANCE.getSqlTypeName() + ", " - + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName() + ") " - + " WHERE " - + PhoenixDatabaseMetaData.COLUMN_NAME + " IS NULL and " - + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NULL and " - + "(" + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + " IS NOT NULL OR " - + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " IS NOT NULL ) and " - + PhoenixDatabaseMetaData.TABLE_TYPE + " = '" + PTableType.INDEX.getSerializedValue() + "' and " - + PhoenixDatabaseMetaData.INDEX_STATE + " = '" + PIndexState.BUILDING.getSerializedValue() + "'"; - - // TODO - Move this to a property? - private static final int JOB_SUBMIT_POOL_TIMEOUT = 5; - private Configuration conf; - private String zkQuorum; - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobSubmitter.class); - - public PhoenixMRJobSubmitter() throws IOException { - this(null); + // Lock to elect a master node that submits the Phoenix Secondary Index MR Jobs + private static final String PHOENIX_LOCKS_PARENT = + "/phoenix/automated-mr-index-build-leader-election"; + public static final String PHOENIX_MR_CLIENT_SCANNER_TIMEOUT_PERIOD = + "phoenix.mr.client.scanner.timeout.period"; + public static final String PHOENIX_MR_RPC_TIMEOUT = "phoenix.mr.rpc.timeout"; + public static final String PHOENIX_MR_TASK_TIMEOUT = "phoenix.mr.task.timeout"; + public static final String PHOENIX_MR_CLIENT_RETRIES_NUMBER = "phoenix.mr.client.retries.number"; + public static final String PHOENIX_MR_CLIENT_PAUSE = "phoenix.mr.client.retries.number"; + public static final String PHOENIX_MR_ZK_RECOVERY_RETRY = "phoenix.mr.zk.recovery.retry"; + private static final String AUTO_INDEX_BUILD_LOCK_NAME = "ActiveStandbyElectorLock"; + private static final int DEFAULT_TIMEOUT_IN_MILLIS = 600000; + public static final int DEFAULT_MR_CLIENT_SCANNER_TIMEOUT_PERIOD = DEFAULT_TIMEOUT_IN_MILLIS; + public static final int DEFAULT_MR_RPC_TIMEOUT = DEFAULT_TIMEOUT_IN_MILLIS; + public static final int DEFAULT_MR_TASK_TIMEOUT = DEFAULT_TIMEOUT_IN_MILLIS; + // Reduced HBase/Zookeeper Client Retries + public static final int DEFAULT_MR_CLIENT_RETRIES_NUMBER = 10; + public static final int DEFAULT_MR_CLIENT_PAUSE = 1000; + public static final int DEFAULT_MR_ZK_RECOVERY_RETRY = 1; + + public static final String CANDIDATE_INDEX_INFO_QUERY = + "SELECT " + PhoenixDatabaseMetaData.INDEX_TYPE + "," + PhoenixDatabaseMetaData.DATA_TABLE_NAME + + ", " + PhoenixDatabaseMetaData.TABLE_SCHEM + ", " + PhoenixDatabaseMetaData.TABLE_NAME + + ", " + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + ", " + + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " FROM " + + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + ".\"" + + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE + "\"" + " (" + + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + " " + PDate.INSTANCE.getSqlTypeName() + ", " + + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName() + + ") " + " WHERE " + PhoenixDatabaseMetaData.COLUMN_NAME + " IS NULL and " + + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NULL and " + "(" + + PhoenixDatabaseMetaData.ASYNC_CREATED_DATE + " IS NOT NULL OR " + + PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP + " IS NOT NULL ) and " + + PhoenixDatabaseMetaData.TABLE_TYPE + " = '" + PTableType.INDEX.getSerializedValue() + + "' and " + PhoenixDatabaseMetaData.INDEX_STATE + " = '" + + PIndexState.BUILDING.getSerializedValue() + "'"; + + // TODO - Move this to a property? + private static final int JOB_SUBMIT_POOL_TIMEOUT = 5; + private Configuration conf; + private String zkQuorum; + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobSubmitter.class); + + public PhoenixMRJobSubmitter() throws IOException { + this(null); + } + + public PhoenixMRJobSubmitter(Configuration conf) throws IOException { + if (conf == null) { + conf = HBaseConfiguration.create(); } - - public PhoenixMRJobSubmitter(Configuration conf) throws IOException { - if (conf == null) { - conf = HBaseConfiguration.create(); - } - this.conf = conf; - - // Have Phoenix specific properties for defaults to enable potential override - conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - conf.getLong(PHOENIX_MR_CLIENT_SCANNER_TIMEOUT_PERIOD, - DEFAULT_MR_CLIENT_SCANNER_TIMEOUT_PERIOD)); - conf.setLong(HConstants.HBASE_RPC_TIMEOUT_KEY, - conf.getLong(PHOENIX_MR_RPC_TIMEOUT, - DEFAULT_MR_RPC_TIMEOUT)); - conf.setLong(MRJobConfig.TASK_TIMEOUT, - conf.getLong(PHOENIX_MR_TASK_TIMEOUT, - DEFAULT_MR_TASK_TIMEOUT)); - - // Reduced HBase Client Retries - conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - conf.getInt(PHOENIX_MR_CLIENT_RETRIES_NUMBER, - DEFAULT_MR_CLIENT_RETRIES_NUMBER)); - conf.setInt(HConstants.HBASE_CLIENT_PAUSE, - conf.getInt(PHOENIX_MR_CLIENT_PAUSE, - DEFAULT_MR_CLIENT_PAUSE)); - conf.setInt("zookeeper.recovery.retry", - conf.getInt(PHOENIX_MR_ZK_RECOVERY_RETRY, - DEFAULT_MR_ZK_RECOVERY_RETRY)); - - String schedulerType = - conf.get(PhoenixMRJobUtil.PHOENIX_MR_SCHEDULER_TYPE_NAME, - MR_SCHEDULER_TYPE.NONE.toString()); - - MR_SCHEDULER_TYPE type = MR_SCHEDULER_TYPE.valueOf(schedulerType); - - switch (type) { - case CAPACITY: - LOGGER.info("Applying the Capacity Scheduler Queue Configurations"); - PhoenixMRJobUtil.updateCapacityQueueInfo(conf); - break; - case FAIR: - LOGGER.warn("Fair Scheduler type is not yet supported"); - throw new IOException("Fair Scheduler is not yet supported"); - case NONE: - default: - break; - } - zkQuorum = conf.get(HConstants.ZOOKEEPER_QUORUM); - // Use UGI.loginUserFromKeytab to login and work with secure clusters - enableKeyTabSecurity(); + this.conf = conf; + + // Have Phoenix specific properties for defaults to enable potential override + conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, conf + .getLong(PHOENIX_MR_CLIENT_SCANNER_TIMEOUT_PERIOD, DEFAULT_MR_CLIENT_SCANNER_TIMEOUT_PERIOD)); + conf.setLong(HConstants.HBASE_RPC_TIMEOUT_KEY, + conf.getLong(PHOENIX_MR_RPC_TIMEOUT, DEFAULT_MR_RPC_TIMEOUT)); + conf.setLong(MRJobConfig.TASK_TIMEOUT, + conf.getLong(PHOENIX_MR_TASK_TIMEOUT, DEFAULT_MR_TASK_TIMEOUT)); + + // Reduced HBase Client Retries + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + conf.getInt(PHOENIX_MR_CLIENT_RETRIES_NUMBER, DEFAULT_MR_CLIENT_RETRIES_NUMBER)); + conf.setInt(HConstants.HBASE_CLIENT_PAUSE, + conf.getInt(PHOENIX_MR_CLIENT_PAUSE, DEFAULT_MR_CLIENT_PAUSE)); + conf.setInt("zookeeper.recovery.retry", + conf.getInt(PHOENIX_MR_ZK_RECOVERY_RETRY, DEFAULT_MR_ZK_RECOVERY_RETRY)); + + String schedulerType = + conf.get(PhoenixMRJobUtil.PHOENIX_MR_SCHEDULER_TYPE_NAME, MR_SCHEDULER_TYPE.NONE.toString()); + + MR_SCHEDULER_TYPE type = MR_SCHEDULER_TYPE.valueOf(schedulerType); + + switch (type) { + case CAPACITY: + LOGGER.info("Applying the Capacity Scheduler Queue Configurations"); + PhoenixMRJobUtil.updateCapacityQueueInfo(conf); + break; + case FAIR: + LOGGER.warn("Fair Scheduler type is not yet supported"); + throw new IOException("Fair Scheduler is not yet supported"); + case NONE: + default: + break; } - - private void enableKeyTabSecurity() throws IOException { - - final String PRINCIPAL = "principal"; - final String KEYTAB = "keyTab"; - // Login with the credentials from the keytab to retrieve the TGT . The - // renewal of the TGT happens in a Zookeeper thread - String principal = null; - String keyTabPath = null; - AppConfigurationEntry entries[] = - javax.security.auth.login.Configuration.getConfiguration() - .getAppConfigurationEntry("Client"); - LOGGER.info("Security - Fetched App Login Configuration Entries"); - if (entries != null) { - for (AppConfigurationEntry entry : entries) { - if (entry.getOptions().get(PRINCIPAL) != null) { - principal = (String) entry.getOptions().get(PRINCIPAL); - } - if (entry.getOptions().get(KEYTAB) != null) { - keyTabPath = (String) entry.getOptions().get(KEYTAB); - } - } - LOGGER.info("Security - Got Principal = " + principal + ""); - if (principal != null && keyTabPath != null) { - LOGGER.info("Security - Retreiving the TGT with principal:" + principal - + " and keytab:" + keyTabPath); - UserGroupInformation.loginUserFromKeytab(principal, keyTabPath); - LOGGER.info("Security - Retrieved TGT with principal:" + principal + " and keytab:" - + keyTabPath); - } + zkQuorum = conf.get(HConstants.ZOOKEEPER_QUORUM); + // Use UGI.loginUserFromKeytab to login and work with secure clusters + enableKeyTabSecurity(); + } + + private void enableKeyTabSecurity() throws IOException { + + final String PRINCIPAL = "principal"; + final String KEYTAB = "keyTab"; + // Login with the credentials from the keytab to retrieve the TGT . The + // renewal of the TGT happens in a Zookeeper thread + String principal = null; + String keyTabPath = null; + AppConfigurationEntry entries[] = + javax.security.auth.login.Configuration.getConfiguration().getAppConfigurationEntry("Client"); + LOGGER.info("Security - Fetched App Login Configuration Entries"); + if (entries != null) { + for (AppConfigurationEntry entry : entries) { + if (entry.getOptions().get(PRINCIPAL) != null) { + principal = (String) entry.getOptions().get(PRINCIPAL); } - } - - public Map getCandidateJobs() throws SQLException { - Connection con = DriverManager.getConnection("jdbc:phoenix:" + zkQuorum); - return getCandidateJobs(con); - } - - public Map getCandidateJobs(Connection con) throws SQLException { - Properties props = new Properties(); - UpgradeUtil.doNotUpgradeOnFirstConnection(props); - Map candidateIndexes = new HashMap<>(); - try (Statement s = con.createStatement(); - ResultSet rs = s.executeQuery(CANDIDATE_INDEX_INFO_QUERY)) { - while (rs.next()) { - PhoenixAsyncIndex indexInfo = new PhoenixAsyncIndex(); - indexInfo.setIndexType(IndexType.fromSerializedValue(rs - .getByte(PhoenixDatabaseMetaData.INDEX_TYPE))); - indexInfo.setDataTableName(rs.getString(PhoenixDatabaseMetaData.DATA_TABLE_NAME)); - indexInfo.setTableSchem(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM)); - indexInfo.setTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); - candidateIndexes.put(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, - indexInfo.getTableSchem(), indexInfo.getDataTableName(), - indexInfo.getTableName()), indexInfo); - } + if (entry.getOptions().get(KEYTAB) != null) { + keyTabPath = (String) entry.getOptions().get(KEYTAB); } - return candidateIndexes; + } + LOGGER.info("Security - Got Principal = " + principal + ""); + if (principal != null && keyTabPath != null) { + LOGGER.info("Security - Retreiving the TGT with principal:" + principal + " and keytab:" + + keyTabPath); + UserGroupInformation.loginUserFromKeytab(principal, keyTabPath); + LOGGER.info( + "Security - Retrieved TGT with principal:" + principal + " and keytab:" + keyTabPath); + } } + } + + public Map getCandidateJobs() throws SQLException { + Connection con = DriverManager.getConnection("jdbc:phoenix:" + zkQuorum); + return getCandidateJobs(con); + } + + public Map getCandidateJobs(Connection con) throws SQLException { + Properties props = new Properties(); + UpgradeUtil.doNotUpgradeOnFirstConnection(props); + Map candidateIndexes = new HashMap<>(); + try (Statement s = con.createStatement(); + ResultSet rs = s.executeQuery(CANDIDATE_INDEX_INFO_QUERY)) { + while (rs.next()) { + PhoenixAsyncIndex indexInfo = new PhoenixAsyncIndex(); + indexInfo.setIndexType( + IndexType.fromSerializedValue(rs.getByte(PhoenixDatabaseMetaData.INDEX_TYPE))); + indexInfo.setDataTableName(rs.getString(PhoenixDatabaseMetaData.DATA_TABLE_NAME)); + indexInfo.setTableSchem(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM)); + indexInfo.setTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); + candidateIndexes.put(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, + indexInfo.getTableSchem(), indexInfo.getDataTableName(), indexInfo.getTableName()), + indexInfo); + } + } + return candidateIndexes; + } - public int scheduleIndexBuilds() throws Exception { - - ZKWatcher zookeeperWatcher = - new ZKWatcher(conf, "phoenixAutomatedMRIndexBuild", null); - - if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT, - AUTO_INDEX_BUILD_LOCK_NAME)) { - LOGGER.info("Some other node is already running Automated Index Build." + - " Skipping execution!"); - return -1; - } - // 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built - // (in state 'b') - // 2) Get a list of all ACCEPTED, SUBMITTED AND RUNNING jobs from Yarn Resource Manager - // 3) Get the jobs to submit (list from 1 - list from 2) - - // Get Candidate indexes to be built - Map candidateJobs = getCandidateJobs(); - LOGGER.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs); - - // Get already scheduled Jobs list from Yarn Resource Manager - Set submittedJobs = getSubmittedYarnApps(); - LOGGER.info("Already Submitted/Running MR index build jobs - " + submittedJobs); - - // Get final jobs to submit - Set jobsToSchedule = getJobsToSubmit(candidateJobs, submittedJobs); - - LOGGER.info("Final indexes to be built - " + jobsToSchedule); - List> results = new ArrayList>(jobsToSchedule.size()); + public int scheduleIndexBuilds() throws Exception { - int failedJobSubmissionCount = 0; - int timedoutJobSubmissionCount = 0; - ExecutorService jobSubmitPool = Executors.newFixedThreadPool(10); - LOGGER.info("Attempt to submit MR index build jobs for - " + jobsToSchedule); + ZKWatcher zookeeperWatcher = new ZKWatcher(conf, "phoenixAutomatedMRIndexBuild", null); + if ( + !ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT, + AUTO_INDEX_BUILD_LOCK_NAME) + ) { + LOGGER + .info("Some other node is already running Automated Index Build." + " Skipping execution!"); + return -1; + } + // 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built + // (in state 'b') + // 2) Get a list of all ACCEPTED, SUBMITTED AND RUNNING jobs from Yarn Resource Manager + // 3) Get the jobs to submit (list from 1 - list from 2) + + // Get Candidate indexes to be built + Map candidateJobs = getCandidateJobs(); + LOGGER.info("Candidate Indexes to be built as seen from SYSTEM.CATALOG - " + candidateJobs); + + // Get already scheduled Jobs list from Yarn Resource Manager + Set submittedJobs = getSubmittedYarnApps(); + LOGGER.info("Already Submitted/Running MR index build jobs - " + submittedJobs); + + // Get final jobs to submit + Set jobsToSchedule = getJobsToSubmit(candidateJobs, submittedJobs); + + LOGGER.info("Final indexes to be built - " + jobsToSchedule); + List> results = new ArrayList>(jobsToSchedule.size()); + + int failedJobSubmissionCount = 0; + int timedoutJobSubmissionCount = 0; + ExecutorService jobSubmitPool = Executors.newFixedThreadPool(10); + LOGGER.info("Attempt to submit MR index build jobs for - " + jobsToSchedule); + + try { + for (PhoenixAsyncIndex indexToBuild : jobsToSchedule) { + PhoenixMRJobCallable task = + new PhoenixMRJobCallable(HBaseConfiguration.create(conf), indexToBuild, "/"); + results.add(jobSubmitPool.submit(task)); + } + for (Future result : results) { try { - for (PhoenixAsyncIndex indexToBuild : jobsToSchedule) { - PhoenixMRJobCallable task = - new PhoenixMRJobCallable(HBaseConfiguration.create(conf), indexToBuild, "/"); - results.add(jobSubmitPool.submit(task)); - } - for (Future result : results) { - try { - result.get(JOB_SUBMIT_POOL_TIMEOUT, TimeUnit.MINUTES); - } catch (InterruptedException e) { - failedJobSubmissionCount++; - } catch (ExecutionException e) { - failedJobSubmissionCount++; - } catch (TimeoutException e) { - timedoutJobSubmissionCount++; - } - } - } finally { - PhoenixMRJobUtil.shutdown(jobSubmitPool); + result.get(JOB_SUBMIT_POOL_TIMEOUT, TimeUnit.MINUTES); + } catch (InterruptedException e) { + failedJobSubmissionCount++; + } catch (ExecutionException e) { + failedJobSubmissionCount++; + } catch (TimeoutException e) { + timedoutJobSubmissionCount++; } - - LOGGER.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = " - + jobsToSchedule.size() + " ; Failed to Submit = " + failedJobSubmissionCount - + " ; Timed out = " + timedoutJobSubmissionCount); - return failedJobSubmissionCount; + } + } finally { + PhoenixMRJobUtil.shutdown(jobSubmitPool); } - public Set getJobsToSubmit(Map candidateJobs, - Set submittedJobs) { - Set toScheduleJobs = - new HashSet(candidateJobs.values()); - for (String jobId : submittedJobs) { - if (candidateJobs.containsKey(jobId)) { - toScheduleJobs.remove(candidateJobs.get(jobId)); - } - } - return toScheduleJobs; + LOGGER.info("Result of Attempt to Submit MR index build Jobs - Jobs attempted = " + + jobsToSchedule.size() + " ; Failed to Submit = " + failedJobSubmissionCount + + " ; Timed out = " + timedoutJobSubmissionCount); + return failedJobSubmissionCount; + } + + public Set getJobsToSubmit(Map candidateJobs, + Set submittedJobs) { + Set toScheduleJobs = new HashSet(candidateJobs.values()); + for (String jobId : submittedJobs) { + if (candidateJobs.containsKey(jobId)) { + toScheduleJobs.remove(candidateJobs.get(jobId)); + } } - - public Set getSubmittedYarnApps() throws Exception { - String rmAddress = PhoenixMRJobUtil.getActiveResourceManagerAddress(conf, zkQuorum); - Map urlParams = new HashMap(); - urlParams.put(YarnApplication.APP_STATES_ELEMENT, YarnApplication.state.NEW.toString() - + "," + YarnApplication.state.ACCEPTED + "," + YarnApplication.state.SUBMITTED - + "," + YarnApplication.state.RUNNING); - String response = PhoenixMRJobUtil.getJobsInformationFromRM(rmAddress, urlParams); - LOGGER.debug("Already Submitted/Running Apps = " + response); - JsonNode jsonNode = JacksonUtil.getObjectReader().readTree(response); - JsonNode appsJson = jsonNode.get(YarnApplication.APPS_ELEMENT); - Set yarnApplicationSet = new HashSet(); - - if (appsJson == null) { - return yarnApplicationSet; - } - JsonNode appJson = appsJson.get(YarnApplication.APP_ELEMENT); - if (appJson == null) { - return yarnApplicationSet; - } - for (final JsonNode clientVersion : appJson) { - yarnApplicationSet.add(clientVersion.get("name").textValue()); - } - - return yarnApplicationSet; + return toScheduleJobs; + } + + public Set getSubmittedYarnApps() throws Exception { + String rmAddress = PhoenixMRJobUtil.getActiveResourceManagerAddress(conf, zkQuorum); + Map urlParams = new HashMap(); + urlParams.put(YarnApplication.APP_STATES_ELEMENT, + YarnApplication.state.NEW.toString() + "," + YarnApplication.state.ACCEPTED + "," + + YarnApplication.state.SUBMITTED + "," + YarnApplication.state.RUNNING); + String response = PhoenixMRJobUtil.getJobsInformationFromRM(rmAddress, urlParams); + LOGGER.debug("Already Submitted/Running Apps = " + response); + JsonNode jsonNode = JacksonUtil.getObjectReader().readTree(response); + JsonNode appsJson = jsonNode.get(YarnApplication.APPS_ELEMENT); + Set yarnApplicationSet = new HashSet(); + + if (appsJson == null) { + return yarnApplicationSet; } - - public static void main(String[] args) throws Exception { - PhoenixMRJobSubmitter t = new PhoenixMRJobSubmitter(); - t.scheduleIndexBuilds(); + JsonNode appJson = appsJson.get(YarnApplication.APP_ELEMENT); + if (appJson == null) { + return yarnApplicationSet; } + for (final JsonNode clientVersion : appJson) { + yarnApplicationSet.add(clientVersion.get("name").textValue()); + } + + return yarnApplicationSet; + } + + public static void main(String[] args) throws Exception { + PhoenixMRJobSubmitter t = new PhoenixMRJobSubmitter(); + t.scheduleIndexBuilds(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/YarnApplication.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/YarnApplication.java index 926aea33d7d..4f5e1c8f6a9 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/YarnApplication.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/index/automation/YarnApplication.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,190 +19,197 @@ public class YarnApplication { - public static final String APP_ELEMENT = "app"; - public static final String APPS_ELEMENT = "apps"; - public static final String APP_STATES_ELEMENT = "states"; - - private long finishedTime; - private String amContainerLogs; - private String trackingUI; - - public enum state { - NEW, ACCEPTED, SUBMITTED, RUNNING, FINISHED - } - - private String user; - private String id; - private String clusterId; - - public enum finalStatus { - SUCCEEDED, FAILED, KILLED, UNDEFINED - } - - private String amHostHttpAddress; - private double progress; - private String name; - private long startedTime; - private long elapsedTime; - private String diagnostics; - private String trackingUrl; - private String queue; - private int allocatedMB; - private int allocatedVCores; - private int runningContainers; - private int memorySeconds; - private int vcoreSeconds; - - public long getFinishedTime() { - return finishedTime; - } - - public void setFinishedTime(long finishedTime) { - this.finishedTime = finishedTime; - } - - public String getAmContainerLogs() { - return amContainerLogs; - } - - public void setAmContainerLogs(String amContainerLogs) { - this.amContainerLogs = amContainerLogs; - } - - public String getTrackingUI() { - return trackingUI; - } - - public void setTrackingUI(String trackingUI) { - this.trackingUI = trackingUI; - } - - public String getUser() { - return user; - } - - public void setUser(String user) { - this.user = user; - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getClusterId() { - return clusterId; - } - - public void setClusterId(String clusterId) { - this.clusterId = clusterId; - } - - public String getAmHostHttpAddress() { - return amHostHttpAddress; - } - - public void setAmHostHttpAddress(String amHostHttpAddress) { - this.amHostHttpAddress = amHostHttpAddress; - } - - public double getProgress() { - return progress; - } - - public void setProgress(double progress) { - this.progress = progress; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public long getStartedTime() { - return startedTime; - } - - public void setStartedTime(long startedTime) { - this.startedTime = startedTime; - } - - public long getElapsedTime() { - return elapsedTime; - } - - public void setElapsedTime(long elapsedTime) { - this.elapsedTime = elapsedTime; - } - - public String getDiagnostics() { - return diagnostics; - } - - public void setDiagnostics(String diagnostics) { - this.diagnostics = diagnostics; - } - - public String getTrackingUrl() { - return trackingUrl; - } + public static final String APP_ELEMENT = "app"; + public static final String APPS_ELEMENT = "apps"; + public static final String APP_STATES_ELEMENT = "states"; + + private long finishedTime; + private String amContainerLogs; + private String trackingUI; + + public enum state { + NEW, + ACCEPTED, + SUBMITTED, + RUNNING, + FINISHED + } + + private String user; + private String id; + private String clusterId; + + public enum finalStatus { + SUCCEEDED, + FAILED, + KILLED, + UNDEFINED + } + + private String amHostHttpAddress; + private double progress; + private String name; + private long startedTime; + private long elapsedTime; + private String diagnostics; + private String trackingUrl; + private String queue; + private int allocatedMB; + private int allocatedVCores; + private int runningContainers; + private int memorySeconds; + private int vcoreSeconds; + + public long getFinishedTime() { + return finishedTime; + } + + public void setFinishedTime(long finishedTime) { + this.finishedTime = finishedTime; + } + + public String getAmContainerLogs() { + return amContainerLogs; + } + + public void setAmContainerLogs(String amContainerLogs) { + this.amContainerLogs = amContainerLogs; + } + + public String getTrackingUI() { + return trackingUI; + } + + public void setTrackingUI(String trackingUI) { + this.trackingUI = trackingUI; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getClusterId() { + return clusterId; + } + + public void setClusterId(String clusterId) { + this.clusterId = clusterId; + } + + public String getAmHostHttpAddress() { + return amHostHttpAddress; + } + + public void setAmHostHttpAddress(String amHostHttpAddress) { + this.amHostHttpAddress = amHostHttpAddress; + } + + public double getProgress() { + return progress; + } + + public void setProgress(double progress) { + this.progress = progress; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public long getStartedTime() { + return startedTime; + } + + public void setStartedTime(long startedTime) { + this.startedTime = startedTime; + } + + public long getElapsedTime() { + return elapsedTime; + } + + public void setElapsedTime(long elapsedTime) { + this.elapsedTime = elapsedTime; + } + + public String getDiagnostics() { + return diagnostics; + } + + public void setDiagnostics(String diagnostics) { + this.diagnostics = diagnostics; + } + + public String getTrackingUrl() { + return trackingUrl; + } - public void setTrackingUrl(String trackingUrl) { - this.trackingUrl = trackingUrl; - } + public void setTrackingUrl(String trackingUrl) { + this.trackingUrl = trackingUrl; + } - public String getQueue() { - return queue; - } + public String getQueue() { + return queue; + } - public void setQueue(String queue) { - this.queue = queue; - } + public void setQueue(String queue) { + this.queue = queue; + } - public int getAllocatedMB() { - return allocatedMB; - } + public int getAllocatedMB() { + return allocatedMB; + } - public void setAllocatedMB(int allocatedMB) { - this.allocatedMB = allocatedMB; - } + public void setAllocatedMB(int allocatedMB) { + this.allocatedMB = allocatedMB; + } - public int getAllocatedVCores() { - return allocatedVCores; - } + public int getAllocatedVCores() { + return allocatedVCores; + } - public void setAllocatedVCores(int allocatedVCores) { - this.allocatedVCores = allocatedVCores; - } + public void setAllocatedVCores(int allocatedVCores) { + this.allocatedVCores = allocatedVCores; + } - public int getRunningContainers() { - return runningContainers; - } + public int getRunningContainers() { + return runningContainers; + } - public void setRunningContainers(int runningContainers) { - this.runningContainers = runningContainers; - } + public void setRunningContainers(int runningContainers) { + this.runningContainers = runningContainers; + } - public int getMemorySeconds() { - return memorySeconds; - } + public int getMemorySeconds() { + return memorySeconds; + } - public void setMemorySeconds(int memorySeconds) { - this.memorySeconds = memorySeconds; - } + public void setMemorySeconds(int memorySeconds) { + this.memorySeconds = memorySeconds; + } - public int getVcoreSeconds() { - return vcoreSeconds; - } + public int getVcoreSeconds() { + return vcoreSeconds; + } - public void setVcoreSeconds(int vcoreSeconds) { - this.vcoreSeconds = vcoreSeconds; - } + public void setVcoreSeconds(int vcoreSeconds) { + this.vcoreSeconds = vcoreSeconds; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformReducer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformReducer.java index bbabe8878f0..70e4312e988 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformReducer.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformReducer.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,12 @@ */ package org.apache.phoenix.mapreduce.transform; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexVerifyType; + +import java.io.IOException; +import java.sql.Connection; +import java.util.concurrent.atomic.AtomicBoolean; + import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.IntWritable; import org.apache.phoenix.mapreduce.index.IndexTool; @@ -27,46 +33,37 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.Connection; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexVerifyType; - /** * Reducer class that does only one task and that is to complete transform. */ -public class PhoenixTransformReducer extends - PhoenixIndexImportDirectReducer { - private AtomicBoolean calledOnce = new AtomicBoolean(false); +public class PhoenixTransformReducer extends PhoenixIndexImportDirectReducer { + private AtomicBoolean calledOnce = new AtomicBoolean(false); - private static final Logger LOGGER = - LoggerFactory.getLogger(PhoenixTransformReducer.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTransformReducer.class); - @Override - protected void reduce(ImmutableBytesWritable arg0, Iterable arg1, - Context context) - throws IOException, InterruptedException { - if (!calledOnce.compareAndSet(false, true)) { - return; - } - IndexTool.IndexVerifyType verifyType = getIndexVerifyType(context.getConfiguration()); - if (verifyType != IndexTool.IndexVerifyType.NONE) { - updateCounters(verifyType, context); - } + @Override + protected void reduce(ImmutableBytesWritable arg0, Iterable arg1, Context context) + throws IOException, InterruptedException { + if (!calledOnce.compareAndSet(false, true)) { + return; + } + IndexTool.IndexVerifyType verifyType = getIndexVerifyType(context.getConfiguration()); + if (verifyType != IndexTool.IndexVerifyType.NONE) { + updateCounters(verifyType, context); + } - if (verifyType != IndexTool.IndexVerifyType.ONLY) { - try (final Connection - connection = ConnectionUtil.getInputConnection(context.getConfiguration())) { - // Complete full Transform and add a partial transform - Transform.completeTransform(connection, context.getConfiguration()); - if (PhoenixConfigurationUtil.getForceCutover(context.getConfiguration())) { - Transform.doForceCutover(connection, context.getConfiguration()); - } - } catch (Exception e) { - LOGGER.error(" Failed to complete transform", e); - throw new RuntimeException(e.getMessage()); - } + if (verifyType != IndexTool.IndexVerifyType.ONLY) { + try (final Connection connection = + ConnectionUtil.getInputConnection(context.getConfiguration())) { + // Complete full Transform and add a partial transform + Transform.completeTransform(connection, context.getConfiguration()); + if (PhoenixConfigurationUtil.getForceCutover(context.getConfiguration())) { + Transform.doForceCutover(connection, context.getConfiguration()); } + } catch (Exception e) { + LOGGER.error(" Failed to complete transform", e); + throw new RuntimeException(e.getMessage()); + } } -} \ No newline at end of file + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformRepairMapper.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformRepairMapper.java index 6caba9b5f5c..fe7cb0f1ecf 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformRepairMapper.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformRepairMapper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,16 @@ */ package org.apache.phoenix.mapreduce.transform; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.UUID; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -47,159 +56,160 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.transform.TransformMaintainer; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.SQLException; -import java.util.HashSet; -import java.util.List; -import java.util.Properties; -import java.util.Set; -import java.util.UUID; - -import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES; - /** * Mapper that hands over rows from data table to the index table. */ public class PhoenixTransformRepairMapper extends TableMapper { - private static final Logger LOGGER = - LoggerFactory.getLogger(PhoenixTransformRepairMapper.class); - private DirectHTableWriter writer; - private PhoenixConnection connection; - private ImmutableBytesPtr maintainers; - private int batchSize; - private List mutations ; + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTransformRepairMapper.class); + private DirectHTableWriter writer; + private PhoenixConnection connection; + private ImmutableBytesPtr maintainers; + private int batchSize; + private List mutations; - @Override - protected void setup(final Context context) throws IOException, InterruptedException { - super.setup(context); - final Configuration configuration = context.getConfiguration(); - writer = new DirectHTableWriter(configuration); - try { - final Properties overrideProps = new Properties(); - String scn = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); - String txScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); - if(txScnValue==null && scn!=null) { - overrideProps.put(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, scn); - } - connection = ConnectionUtil.getOutputConnection(configuration, overrideProps).unwrap(PhoenixConnection.class); - maintainers=new ImmutableBytesPtr(PhoenixConfigurationUtil.getIndexMaintainers(configuration)); - int maxSize = - connection.getQueryServices().getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, - QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); - batchSize = Math.min(connection.getMutateBatchSize(), maxSize); - this.mutations = Lists.newArrayListWithExpectedSize(batchSize); - LOGGER.info("Mutation Batch Size = " + batchSize); - } catch (SQLException e) { - tryClosingResources(); - throw new RuntimeException(e.getMessage()); - } + @Override + protected void setup(final Context context) throws IOException, InterruptedException { + super.setup(context); + final Configuration configuration = context.getConfiguration(); + writer = new DirectHTableWriter(configuration); + try { + final Properties overrideProps = new Properties(); + String scn = configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE); + String txScnValue = configuration.get(PhoenixConfigurationUtil.TX_SCN_VALUE); + if (txScnValue == null && scn != null) { + overrideProps.put(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, scn); + } + connection = ConnectionUtil.getOutputConnection(configuration, overrideProps) + .unwrap(PhoenixConnection.class); + maintainers = + new ImmutableBytesPtr(PhoenixConfigurationUtil.getIndexMaintainers(configuration)); + int maxSize = connection.getQueryServices().getProps().getInt( + QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); + batchSize = Math.min(connection.getMutateBatchSize(), maxSize); + this.mutations = Lists.newArrayListWithExpectedSize(batchSize); + LOGGER.info("Mutation Batch Size = " + batchSize); + } catch (SQLException e) { + tryClosingResources(); + throw new RuntimeException(e.getMessage()); } + } - @Override - protected void map(ImmutableBytesWritable row, Result value, Context context) - throws IOException, InterruptedException { - context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); - String oldTableName = PhoenixConfigurationUtil.getIndexToolDataTableName(context.getConfiguration()); - Set extraRowsInNewTable = new HashSet<>(); - try (Table oldHTable = connection.getQueryServices().getTable(Bytes.toBytes(oldTableName))) { - for (Cell cell : value.rawCells()) { - Scan buildNewTableScan = new Scan(); - // The following attributes are set to instruct UngroupedAggregateRegionObserver to do partial rebuild - buildNewTableScan.setAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG, TRUE_BYTES); - buildNewTableScan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, maintainers.get()); - buildNewTableScan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, TRUE_BYTES); - buildNewTableScan.setAttribute(BaseScannerRegionObserverConstants.SKIP_REGION_BOUNDARY_CHECK, Bytes.toBytes(true)); - IndexMaintainer transformMaintainer = TransformMaintainer.deserialize(maintainers.get()).get(0); + @Override + protected void map(ImmutableBytesWritable row, Result value, Context context) + throws IOException, InterruptedException { + context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); + String oldTableName = + PhoenixConfigurationUtil.getIndexToolDataTableName(context.getConfiguration()); + Set extraRowsInNewTable = new HashSet<>(); + try (Table oldHTable = connection.getQueryServices().getTable(Bytes.toBytes(oldTableName))) { + for (Cell cell : value.rawCells()) { + Scan buildNewTableScan = new Scan(); + // The following attributes are set to instruct UngroupedAggregateRegionObserver to do + // partial rebuild + buildNewTableScan.setAttribute(BaseScannerRegionObserverConstants.UNGROUPED_AGG, + TRUE_BYTES); + buildNewTableScan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, maintainers.get()); + buildNewTableScan.setAttribute(BaseScannerRegionObserverConstants.REBUILD_INDEXES, + TRUE_BYTES); + buildNewTableScan.setAttribute( + BaseScannerRegionObserverConstants.SKIP_REGION_BOUNDARY_CHECK, Bytes.toBytes(true)); + IndexMaintainer transformMaintainer = + TransformMaintainer.deserialize(maintainers.get()).get(0); - byte[] newRowKey = CellUtil.cloneRow(cell); - // Rebuild the new row from the corresponding row in the old data table - // To implement rowkey reordering etc, we need to rebuild the rowkey. For now it is the same - buildNewTableScan.withStartRow(newRowKey, true); - buildNewTableScan.withStopRow(newRowKey, true); - buildNewTableScan.setTimeRange(0, cell.getTimestamp()+1); - // Pass the index row key to the partial index builder which will rebuild the index row and check if the - // row key of this rebuilt index row matches with the passed index row key - buildNewTableScan.setAttribute(BaseScannerRegionObserverConstants.INDEX_ROW_KEY, newRowKey); - Result result = null; - try (ResultScanner resultScanner = oldHTable.getScanner(buildNewTableScan)) { - result = resultScanner.next(); - } catch (Throwable t) { - ClientUtil.throwIOException(oldTableName, t); - } + byte[] newRowKey = CellUtil.cloneRow(cell); + // Rebuild the new row from the corresponding row in the old data table + // To implement rowkey reordering etc, we need to rebuild the rowkey. For now it is the same + buildNewTableScan.withStartRow(newRowKey, true); + buildNewTableScan.withStopRow(newRowKey, true); + buildNewTableScan.setTimeRange(0, cell.getTimestamp() + 1); + // Pass the index row key to the partial index builder which will rebuild the index row and + // check if the + // row key of this rebuilt index row matches with the passed index row key + buildNewTableScan.setAttribute(BaseScannerRegionObserverConstants.INDEX_ROW_KEY, newRowKey); + Result result = null; + try (ResultScanner resultScanner = oldHTable.getScanner(buildNewTableScan)) { + result = resultScanner.next(); + } catch (Throwable t) { + ClientUtil.throwIOException(oldTableName, t); + } - // A single cell will be returned. We decode that here - byte[] scanVal = result.value(); - long code = PLong.INSTANCE.getCodec().decodeLong(new ImmutableBytesWritable(scanVal), SortOrder.getDefault()); - if (code == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) { - if (!extraRowsInNewTable.contains(newRowKey)) { - extraRowsInNewTable.add(newRowKey); - } - // This means there does not exist an old table row for this unverified new table row - // Delete the unverified row from the new table - Delete del = transformMaintainer.buildRowDeleteMutation(newRowKey, - IndexMaintainer.DeleteType.ALL_VERSIONS, cell.getTimestamp()); - mutations.add(del); - } - // Write Mutation Batch - if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize == 0) { - writeBatch(mutations, context); - mutations.clear(); - } - context.getCounter(PhoenixIndexToolJobCounters.BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT).setValue(extraRowsInNewTable.size()); - // Make sure progress is reported to Application Master. - context.progress(); - } - } catch (SQLException e) { - LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); - context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); - throw new RuntimeException(e); + // A single cell will be returned. We decode that here + byte[] scanVal = result.value(); + long code = PLong.INSTANCE.getCodec().decodeLong(new ImmutableBytesWritable(scanVal), + SortOrder.getDefault()); + if (code == GlobalIndexChecker.RebuildReturnCode.NO_DATA_ROW.getValue()) { + if (!extraRowsInNewTable.contains(newRowKey)) { + extraRowsInNewTable.add(newRowKey); + } + // This means there does not exist an old table row for this unverified new table row + // Delete the unverified row from the new table + Delete del = transformMaintainer.buildRowDeleteMutation(newRowKey, + IndexMaintainer.DeleteType.ALL_VERSIONS, cell.getTimestamp()); + mutations.add(del); + } + // Write Mutation Batch + if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize == 0) { + writeBatch(mutations, context); + mutations.clear(); } + context + .getCounter(PhoenixIndexToolJobCounters.BEFORE_REPAIR_EXTRA_UNVERIFIED_INDEX_ROW_COUNT) + .setValue(extraRowsInNewTable.size()); + // Make sure progress is reported to Application Master. + context.progress(); + } + } catch (SQLException e) { + LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); + context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); + throw new RuntimeException(e); } + } - private void writeBatch(List mutations, Context context) - throws IOException, SQLException, InterruptedException { - writer.write(mutations); - context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS).increment(mutations.size()); - } + private void writeBatch(List mutations, Context context) + throws IOException, SQLException, InterruptedException { + writer.write(mutations); + context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS).increment(mutations.size()); + } - @Override - protected void cleanup(Context context) throws IOException, InterruptedException { - try { - // Write the last & final Mutation Batch - if (!mutations.isEmpty()) { - writeBatch(mutations, context); - } - // We are writing some dummy key-value as map output here so that we commit only one - // output to reducer. - context.write(new ImmutableBytesWritable(UUID.randomUUID().toString().getBytes()), - new IntWritable(0)); - super.cleanup(context); - } catch (SQLException e) { - LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); - context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); - throw new RuntimeException(e); - } finally { - tryClosingResources(); - } + @Override + protected void cleanup(Context context) throws IOException, InterruptedException { + try { + // Write the last & final Mutation Batch + if (!mutations.isEmpty()) { + writeBatch(mutations, context); + } + // We are writing some dummy key-value as map output here so that we commit only one + // output to reducer. + context.write(new ImmutableBytesWritable(UUID.randomUUID().toString().getBytes()), + new IntWritable(0)); + super.cleanup(context); + } catch (SQLException e) { + LOGGER.error(" Error {} while read/write of a record ", e.getMessage()); + context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1); + throw new RuntimeException(e); + } finally { + tryClosingResources(); } + } - private void tryClosingResources() throws IOException { - if (this.connection != null) { - try { - this.connection.close(); - } catch (SQLException e) { - LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e); - } - } - if (this.writer != null) { - this.writer.close(); - } + private void tryClosingResources() throws IOException { + if (this.connection != null) { + try { + this.connection.close(); + } catch (SQLException e) { + LOGGER.error("Error while closing connection in the PhoenixIndexMapper class ", e); + } + } + if (this.writer != null) { + this.writer.close(); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformWithViewsInputFormat.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformWithViewsInputFormat.java index 6410b8cf72e..42f4495bed0 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformWithViewsInputFormat.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/PhoenixTransformWithViewsInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,18 @@ */ package org.apache.phoenix.mapreduce.transform; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolIndexTableName; + +import java.io.IOException; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Properties; + import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.Pair; @@ -44,87 +54,85 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Properties; - -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getIndexToolIndexTableName; +public class PhoenixTransformWithViewsInputFormat + extends PhoenixServerBuildIndexInputFormat { + private static final Logger LOGGER = + LoggerFactory.getLogger(PhoenixTransformWithViewsInputFormat.class); -public class PhoenixTransformWithViewsInputFormat extends PhoenixServerBuildIndexInputFormat { - private static final Logger LOGGER = - LoggerFactory.getLogger(PhoenixTransformWithViewsInputFormat.class); - @Override - public List getSplits(JobContext context) throws IOException, InterruptedException { - final Configuration configuration = context.getConfiguration(); - try (PhoenixConnection connection = (PhoenixConnection) - ConnectionUtil.getInputConnection(configuration)) { - try (Table hTable = connection.unwrap(PhoenixConnection.class).getQueryServices().getTable( - SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, configuration).toBytes())) { - String oldDataTableFullName = PhoenixConfigurationUtil.getIndexToolDataTableName(configuration); - String newDataTableFullName = getIndexToolIndexTableName(configuration); - PTable newDataTable = connection.getTableNoCache(newDataTableFullName); - String schemaName = SchemaUtil.getSchemaNameFromFullName(oldDataTableFullName); - String tableName = SchemaUtil.getTableNameFromFullName(oldDataTableFullName); - byte[] schemaNameBytes = Strings.isNullOrEmpty(schemaName) ? null : schemaName.getBytes(); - Pair, List> allDescendantViews = ViewUtil.findAllDescendantViews(hTable, configuration, null, schemaNameBytes, - tableName.getBytes(), EnvironmentEdgeManager.currentTimeMillis(), false); - List legitimateDecendants = allDescendantViews.getFirst(); + @Override + public List getSplits(JobContext context) throws IOException, InterruptedException { + final Configuration configuration = context.getConfiguration(); + try (PhoenixConnection connection = + (PhoenixConnection) ConnectionUtil.getInputConnection(configuration)) { + try (Table hTable = connection.unwrap(PhoenixConnection.class).getQueryServices().getTable( + SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, configuration).toBytes())) { + String oldDataTableFullName = + PhoenixConfigurationUtil.getIndexToolDataTableName(configuration); + String newDataTableFullName = getIndexToolIndexTableName(configuration); + PTable newDataTable = connection.getTableNoCache(newDataTableFullName); + String schemaName = SchemaUtil.getSchemaNameFromFullName(oldDataTableFullName); + String tableName = SchemaUtil.getTableNameFromFullName(oldDataTableFullName); + byte[] schemaNameBytes = Strings.isNullOrEmpty(schemaName) ? null : schemaName.getBytes(); + Pair, List> allDescendantViews = + ViewUtil.findAllDescendantViews(hTable, configuration, null, schemaNameBytes, + tableName.getBytes(), EnvironmentEdgeManager.currentTimeMillis(), false); + List legitimateDecendants = allDescendantViews.getFirst(); - List inputSplits = new ArrayList<>(); + List inputSplits = new ArrayList<>(); - HashMap columnMap = new HashMap<>(); - for (PColumn column : newDataTable.getColumns()) { - columnMap.put(column.getName().getString(), column); - } + HashMap columnMap = new HashMap<>(); + for (PColumn column : newDataTable.getColumns()) { + columnMap.put(column.getName().getString(), column); + } - for (PTable decendant : legitimateDecendants) { - if (decendant.getViewType() == PTable.ViewType.READ_ONLY) { - continue; - } - PTable newView = Transform.getTransformedView(decendant, newDataTable, columnMap, true); - QueryPlan queryPlan = getQueryPlan(newView, decendant, connection); - inputSplits.addAll(generateSplits(queryPlan, configuration)); - } - if (inputSplits.size() == 0) { - // Get for base table - ServerBuildTransformingTableCompiler compiler = new ServerBuildTransformingTableCompiler(connection, - oldDataTableFullName); - MutationPlan plan = compiler.compile(newDataTable); - inputSplits.addAll(generateSplits(plan.getQueryPlan(), configuration)); - } - return inputSplits; - } - } catch (Exception e) { - LOGGER.error("PhoenixTransformWithViewsInputFormat failed with: " + e.getMessage()); - throw new RuntimeException(e); + for (PTable decendant : legitimateDecendants) { + if (decendant.getViewType() == PTable.ViewType.READ_ONLY) { + continue; + } + PTable newView = Transform.getTransformedView(decendant, newDataTable, columnMap, true); + QueryPlan queryPlan = getQueryPlan(newView, decendant, connection); + inputSplits.addAll(generateSplits(queryPlan, configuration)); } + if (inputSplits.size() == 0) { + // Get for base table + ServerBuildTransformingTableCompiler compiler = + new ServerBuildTransformingTableCompiler(connection, oldDataTableFullName); + MutationPlan plan = compiler.compile(newDataTable); + inputSplits.addAll(generateSplits(plan.getQueryPlan(), configuration)); + } + return inputSplits; + } + } catch (Exception e) { + LOGGER.error("PhoenixTransformWithViewsInputFormat failed with: " + e.getMessage()); + throw new RuntimeException(e); } + } - private QueryPlan getQueryPlan(PTable newTable, PTable oldTable, PhoenixConnection phoenixConnection) throws SQLException { - String tableTenantId = oldTable.getTenantId() == null? null:oldTable.getTenantId().getString(); - String connTenantId = phoenixConnection.getTenantId()==null? null:phoenixConnection.getTenantId().getString(); - if (!Strings.isNullOrEmpty(tableTenantId) && !StringUtils.equals(tableTenantId, connTenantId)) { - Properties props = new Properties(); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tableTenantId); + private QueryPlan getQueryPlan(PTable newTable, PTable oldTable, + PhoenixConnection phoenixConnection) throws SQLException { + String tableTenantId = + oldTable.getTenantId() == null ? null : oldTable.getTenantId().getString(); + String connTenantId = + phoenixConnection.getTenantId() == null ? null : phoenixConnection.getTenantId().getString(); + if (!Strings.isNullOrEmpty(tableTenantId) && !StringUtils.equals(tableTenantId, connTenantId)) { + Properties props = new Properties(); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tableTenantId); - try (PhoenixConnection tenantConnection = (PhoenixConnection) - DriverManager.getConnection(phoenixConnection.getURL(), props)) { - return getQueryPlanInternal(newTable, oldTable, tenantConnection); - } - } - return getQueryPlanInternal(newTable, oldTable, phoenixConnection); + try (PhoenixConnection tenantConnection = + (PhoenixConnection) DriverManager.getConnection(phoenixConnection.getURL(), props)) { + return getQueryPlanInternal(newTable, oldTable, tenantConnection); + } } + return getQueryPlanInternal(newTable, oldTable, phoenixConnection); + } - private QueryPlan getQueryPlanInternal(PTable newTable, PTable decendant, PhoenixConnection phoenixConnection) throws SQLException { - ServerBuildTransformingTableCompiler compiler = new ServerBuildTransformingTableCompiler(phoenixConnection, - SchemaUtil.getTableName(decendant.getSchemaName(), decendant.getTableName()).getString()); + private QueryPlan getQueryPlanInternal(PTable newTable, PTable decendant, + PhoenixConnection phoenixConnection) throws SQLException { + ServerBuildTransformingTableCompiler compiler = + new ServerBuildTransformingTableCompiler(phoenixConnection, + SchemaUtil.getTableName(decendant.getSchemaName(), decendant.getTableName()).getString()); - MutationPlan plan = compiler.compile(newTable); - return plan.getQueryPlan(); - } + MutationPlan plan = compiler.compile(newTable); + return plan.getQueryPlan(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/TransformTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/TransformTool.java index 000eda20974..fe11127c456 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/TransformTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/TransformTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,22 +17,34 @@ */ package org.apache.phoenix.mapreduce.transform; -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.phoenix.mapreduce.PhoenixTTLTool; -import org.apache.phoenix.schema.PIndexState; -import org.apache.phoenix.schema.task.ServerTask; -import org.apache.phoenix.schema.transform.TransformClient; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; +import static org.apache.phoenix.mapreduce.index.IndexTool.createIndexToolTables; +import static org.apache.phoenix.mapreduce.index.IndexTool.isTimeRangeSet; +import static org.apache.phoenix.mapreduce.index.IndexTool.validateTimeRange; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.setCurrentScnValue; +import static org.apache.phoenix.query.QueryConstants.UNVERIFIED_BYTES; +import static org.apache.phoenix.util.QueryUtil.getConnection; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -56,6 +68,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.mapreduce.CsvBulkImportUtil; import org.apache.phoenix.mapreduce.PhoenixServerBuildIndexInputFormat; +import org.apache.phoenix.mapreduce.PhoenixTTLTool; import org.apache.phoenix.mapreduce.index.IndexScrutinyTool; import org.apache.phoenix.mapreduce.index.IndexTool; import org.apache.phoenix.mapreduce.index.PhoenixServerBuildIndexDBWritable; @@ -66,13 +79,19 @@ import org.apache.phoenix.query.HBaseFactoryProvider; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.schema.PIndexState; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.schema.task.ServerTask; import org.apache.phoenix.schema.task.Task; import org.apache.phoenix.schema.transform.SystemTransformRecord; import org.apache.phoenix.schema.transform.Transform; +import org.apache.phoenix.schema.transform.TransformClient; import org.apache.phoenix.schema.transform.TransformMaintainer; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; @@ -90,917 +109,934 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; - -import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES; -import static org.apache.phoenix.mapreduce.index.IndexTool.createIndexToolTables; -import static org.apache.phoenix.mapreduce.index.IndexTool.isTimeRangeSet; -import static org.apache.phoenix.mapreduce.index.IndexTool.validateTimeRange; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.setCurrentScnValue; -import static org.apache.phoenix.query.QueryConstants.UNVERIFIED_BYTES; -import static org.apache.phoenix.util.QueryUtil.getConnection; - public class TransformTool extends Configured implements Tool { - private static final Logger LOGGER = LoggerFactory.getLogger(TransformTool.class); - - public enum MR_COUNTER_METRICS { - TRANSFORM_FAILED, - TRANSFORM_SUCCEED + private static final Logger LOGGER = LoggerFactory.getLogger(TransformTool.class); + + public enum MR_COUNTER_METRICS { + TRANSFORM_FAILED, + TRANSFORM_SUCCEED + } + + private static final Option OUTPUT_PATH_OPTION = + new Option("op", "output-path", true, "Output path where the files are written"); + private static final Option SCHEMA_NAME_OPTION = + new Option("s", "schema", true, "Phoenix schema name (optional)"); + private static final Option DATA_TABLE_OPTION = + new Option("dt", "data-table", true, "Data table name (mandatory)"); + private static final Option INDEX_TABLE_OPTION = new Option("it", "index-table", true, + "Index table name(not required in case of partial rebuilding)"); + + private static final Option FIX_UNVERIFIED_TRANSFORM_OPTION = + new Option("fu", "fix-unverified", false, "To fix unverified transform records"); + + private static final Option FORCE_CUTOVER_OPTION = new Option("fco", "force-cutover", false, + "Updated to old table to point to new table. New table will be active and reads will start serving from the new table"); + + private static final Option USE_NEW_TABLE_AS_SOURCE_OPTION = new Option("fn", "from-new", false, + "To verify every row in the new table has a corresponding row in the old table. "); + + private static final Option PARTIAL_TRANSFORM_OPTION = new Option("pt", "partial-transform", + false, "To transform a data table from a start timestamp"); + + private static final Option ABORT_TRANSFORM_OPTION = + new Option("abort", "abort", false, "Aborts the ongoing transform"); + + private static final Option PAUSE_TRANSFORM_OPTION = new Option("pause", "pause", false, + "Pauses the ongoing transform. If the ongoing transform fails, it will not be retried"); + + private static final Option RESUME_TRANSFORM_OPTION = + new Option("resume", "resume", false, "Resumes the ongoing transform"); + + private static final Option JOB_PRIORITY_OPTION = new Option("p", "job-priority", true, + "Define job priority from 0(highest) to 4. Default is 2(normal)"); + + private static final int DEFAULT_AUTOSPLIT_NUM_REGIONS = 20; + + private static final Option AUTO_SPLIT_OPTION = new Option("spa", "autosplit", true, + "Automatically split the new table if the # of data table regions is greater than N. " + + "Takes an optional argument specifying N, otherwise defaults to " + + DEFAULT_AUTOSPLIT_NUM_REGIONS); + + private static final Option RUN_FOREGROUND_OPTION = new Option("runfg", "run-foreground", false, + "If specified, runs transform in Foreground. Default - Runs the transform in background."); + + private static final Option TENANT_ID_OPTION = new Option("tenant", "tenant-id", true, + "If specified, uses Tenant connection for tenant index transform (optional)"); + + private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); + private static final Option START_TIME_OPTION = + new Option("st", "start-time", true, "Start time for transform"); + + private static final Option END_TIME_OPTION = + new Option("et", "end-time", true, "End time for transform"); + + private static final Option SPLIT_SIZE_OPTION = + new Option("ms", "split-size-per-mapper", true, "Define split size for each mapper."); + + public static final String TRANSFORM_JOB_NAME_TEMPLATE = "PHOENIX_TRANS_%s.%s.%s"; + + public static final String PARTIAL_TRANSFORM_NOT_APPLICABLE = "Partial transform accepts " + + "non-zero ts set in the past as start-time(st) option and that ts must be present in SYSTEM.TRANSFORM table"; + + public static final String TRANSFORM_NOT_APPLICABLE = + "Transform is not applicable for local indexes or views or transactional tables"; + + public static final String PARTIAL_TRANSFORM_NOT_COMPATIBLE = + "Can't abort/pause/resume/split during partial transform"; + + public static final String FORCE_CUTOVER_NOT_COMPATIBLE = + "Force cutover is not applicable with the other parameters"; + + private static final Option VERIFY_OPTION = new Option("v", "verify", true, + "To verify every data row in the old table has a corresponding row in the new table. " + + "The accepted values are NONE, ONLY, BEFORE, AFTER, and BOTH. " + + "NONE is for no inline verification, which is also the default for this option. ONLY is for " + + "verifying without rebuilding the new table rows. The rest for verifying before, after, and both before " + + "and after rebuilding row. If the verification is done before rebuilding rows and the correct " + + "new table rows will not be rebuilt"); + + private Configuration configuration; + private Connection connection; + private String tenantId; + private String dataTable; + private String logicalParentName; + private String basePath; + // logicalTableName is index table and logicalParentName is the data table if this is an index + // transform + // If this is a data table transform, logicalParentName is null and logicalTableName is dataTable + private String logicalTableName; + private String schemaName; + private String indexTable; + private String qDataTable; // normalized with schema + private PTable pIndexTable = null; + private PTable pDataTable; + private PTable pOldTable; + private PTable pNewTable; + + private String oldTableWithSchema; + private String newTableWithSchema; + private JobPriority jobPriority; + private IndexTool.IndexVerifyType verifyType = IndexTool.IndexVerifyType.NONE;; + private String jobName; + private boolean isForeground; + private Long startTime, endTime, lastTransformTime; + private boolean isPartialTransform; + private boolean shouldFixUnverified; + private boolean shouldUseNewTableAsSource; + private boolean shouldForceCutover; + private int splitSize; + private Job job; + + public Long getStartTime() { + return startTime; + } + + public Long getEndTime() { + return endTime; + } + + public CommandLine parseOptions(String[] args) { + final Options options = getOptions(); + CommandLineParser parser = new PosixParser(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); } - private static final Option OUTPUT_PATH_OPTION = new Option("op", "output-path", true, - "Output path where the files are written"); - private static final Option SCHEMA_NAME_OPTION = new Option("s", "schema", true, - "Phoenix schema name (optional)"); - private static final Option DATA_TABLE_OPTION = new Option("dt", "data-table", true, - "Data table name (mandatory)"); - private static final Option INDEX_TABLE_OPTION = new Option("it", "index-table", true, - "Index table name(not required in case of partial rebuilding)"); - - private static final Option FIX_UNVERIFIED_TRANSFORM_OPTION = new Option("fu", "fix-unverified", false, - "To fix unverified transform records"); - - private static final Option FORCE_CUTOVER_OPTION = new Option("fco", "force-cutover", false, - "Updated to old table to point to new table. New table will be active and reads will start serving from the new table"); - - private static final Option USE_NEW_TABLE_AS_SOURCE_OPTION = - new Option("fn", "from-new", false, - "To verify every row in the new table has a corresponding row in the old table. "); - - private static final Option PARTIAL_TRANSFORM_OPTION = new Option("pt", "partial-transform", false, - "To transform a data table from a start timestamp"); - - private static final Option ABORT_TRANSFORM_OPTION = new Option("abort", "abort", false, - "Aborts the ongoing transform"); - - private static final Option PAUSE_TRANSFORM_OPTION = new Option("pause", "pause", false, - "Pauses the ongoing transform. If the ongoing transform fails, it will not be retried"); - - private static final Option RESUME_TRANSFORM_OPTION = new Option("resume", "resume", false, - "Resumes the ongoing transform"); - - private static final Option JOB_PRIORITY_OPTION = new Option("p", "job-priority", true, - "Define job priority from 0(highest) to 4. Default is 2(normal)"); - - private static final int DEFAULT_AUTOSPLIT_NUM_REGIONS = 20; - - private static final Option AUTO_SPLIT_OPTION = - new Option("spa", "autosplit", true, - "Automatically split the new table if the # of data table regions is greater than N. " - + "Takes an optional argument specifying N, otherwise defaults to " + DEFAULT_AUTOSPLIT_NUM_REGIONS - ); - - private static final Option RUN_FOREGROUND_OPTION = - new Option( - "runfg", - "run-foreground", - false, - "If specified, runs transform in Foreground. Default - Runs the transform in background."); - - private static final Option TENANT_ID_OPTION = new Option("tenant", "tenant-id", true, - "If specified, uses Tenant connection for tenant index transform (optional)"); - - private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); - private static final Option START_TIME_OPTION = new Option("st", "start-time", - true, "Start time for transform"); - - private static final Option END_TIME_OPTION = new Option("et", "end-time", - true, "End time for transform"); - - private static final Option SPLIT_SIZE_OPTION = new Option("ms", "split-size-per-mapper", true, - "Define split size for each mapper."); - - public static final String TRANSFORM_JOB_NAME_TEMPLATE = "PHOENIX_TRANS_%s.%s.%s"; - - public static final String PARTIAL_TRANSFORM_NOT_APPLICABLE = "Partial transform accepts " - + "non-zero ts set in the past as start-time(st) option and that ts must be present in SYSTEM.TRANSFORM table"; - - public static final String TRANSFORM_NOT_APPLICABLE = "Transform is not applicable for local indexes or views or transactional tables"; - - public static final String PARTIAL_TRANSFORM_NOT_COMPATIBLE = "Can't abort/pause/resume/split during partial transform"; - - public static final String FORCE_CUTOVER_NOT_COMPATIBLE = "Force cutover is not applicable with the other parameters"; - - private static final Option VERIFY_OPTION = new Option("v", "verify", true, - "To verify every data row in the old table has a corresponding row in the new table. " + - "The accepted values are NONE, ONLY, BEFORE, AFTER, and BOTH. " + - "NONE is for no inline verification, which is also the default for this option. ONLY is for " + - "verifying without rebuilding the new table rows. The rest for verifying before, after, and both before " + - "and after rebuilding row. If the verification is done before rebuilding rows and the correct " + - "new table rows will not be rebuilt"); - - - private Configuration configuration; - private Connection connection; - private String tenantId; - private String dataTable; - private String logicalParentName; - private String basePath; - // logicalTableName is index table and logicalParentName is the data table if this is an index transform - // If this is a data table transform, logicalParentName is null and logicalTableName is dataTable - private String logicalTableName; - private String schemaName; - private String indexTable; - private String qDataTable; //normalized with schema - private PTable pIndexTable = null; - private PTable pDataTable; - private PTable pOldTable; - private PTable pNewTable; - - private String oldTableWithSchema; - private String newTableWithSchema; - private JobPriority jobPriority; - private IndexTool.IndexVerifyType verifyType = IndexTool.IndexVerifyType.NONE;; - private String jobName; - private boolean isForeground; - private Long startTime, endTime, lastTransformTime; - private boolean isPartialTransform; - private boolean shouldFixUnverified; - private boolean shouldUseNewTableAsSource; - private boolean shouldForceCutover; - private int splitSize; - private Job job; - - public Long getStartTime() { - return startTime; - } - - public Long getEndTime() { return endTime; } - - public CommandLine parseOptions(String[] args) { - final Options options = getOptions(); - CommandLineParser parser = new PosixParser(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("Error parsing command line options: " + e.getMessage(), - options); - } - - if (cmdLine.hasOption(HELP_OPTION.getOpt())) { - printHelpAndExit(options, 0); - } - - this.jobPriority = getJobPriority(cmdLine); - - boolean dataTableProvided = (cmdLine.hasOption(DATA_TABLE_OPTION.getOpt())); - if (!dataTableProvided) { - throw new IllegalStateException(DATA_TABLE_OPTION.getLongOpt() + " is a mandatory parameter"); - } - - return cmdLine; - } - - private Options getOptions() { - final Options options = new Options(); - options.addOption(OUTPUT_PATH_OPTION); - options.addOption(SCHEMA_NAME_OPTION); - options.addOption(DATA_TABLE_OPTION); - options.addOption(INDEX_TABLE_OPTION); - options.addOption(TENANT_ID_OPTION); - options.addOption(HELP_OPTION); - options.addOption(JOB_PRIORITY_OPTION); - options.addOption(RUN_FOREGROUND_OPTION); - options.addOption(PARTIAL_TRANSFORM_OPTION); - options.addOption(START_TIME_OPTION); - options.addOption(END_TIME_OPTION); - options.addOption(SPLIT_SIZE_OPTION); - options.addOption(FIX_UNVERIFIED_TRANSFORM_OPTION); - options.addOption(FORCE_CUTOVER_OPTION); - options.addOption(USE_NEW_TABLE_AS_SOURCE_OPTION); - options.addOption(AUTO_SPLIT_OPTION); - options.addOption(ABORT_TRANSFORM_OPTION); - options.addOption(PAUSE_TRANSFORM_OPTION); - options.addOption(RESUME_TRANSFORM_OPTION); - options.addOption(VERIFY_OPTION); - START_TIME_OPTION.setOptionalArg(true); - END_TIME_OPTION.setOptionalArg(true); - return options; - } - - private void printHelpAndExit(String errorMessage, Options options) { - System.err.println(errorMessage); - LOGGER.error(errorMessage); - printHelpAndExit(options, 1); - } - - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); - } - - public CommandLine parseArgs(String[] args) throws Exception { - CommandLine cmdLine; - try { - cmdLine = parseOptions(args); - } catch (IllegalStateException e) { - printHelpAndExit(e.getMessage(), getOptions()); - throw e; - } - - if (getConf() == null) { - setConf(HBaseConfiguration.create()); - } - - return cmdLine; - } - - @VisibleForTesting - public int populateTransformToolAttributesAndValidate(CommandLine cmdLine) throws Exception { - boolean useStartTime = cmdLine.hasOption(START_TIME_OPTION.getOpt()); - boolean useEndTime = cmdLine.hasOption(END_TIME_OPTION.getOpt()); - shouldFixUnverified = cmdLine.hasOption(FIX_UNVERIFIED_TRANSFORM_OPTION.getOpt()); - shouldUseNewTableAsSource = cmdLine.hasOption(USE_NEW_TABLE_AS_SOURCE_OPTION.getOpt()); - shouldForceCutover = cmdLine.hasOption(FORCE_CUTOVER_OPTION.getOpt()); - basePath = cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()); - isPartialTransform = cmdLine.hasOption(PARTIAL_TRANSFORM_OPTION.getOpt()); - if (shouldForceCutover) { - LOGGER.info("TransformTool will fix the unverified rows before cutover"); - shouldFixUnverified = true; - } - if (useStartTime) { - startTime = new Long(cmdLine.getOptionValue(START_TIME_OPTION.getOpt())); - } - - if (useEndTime) { - endTime = new Long(cmdLine.getOptionValue(END_TIME_OPTION.getOpt())); - } - - if (isTimeRangeSet(startTime, endTime)) { - validateTimeRange(startTime, endTime); - } - - if ((isPartialTransform || shouldFixUnverified) && - (cmdLine.hasOption(AUTO_SPLIT_OPTION.getOpt()))) { - throw new IllegalArgumentException(PARTIAL_TRANSFORM_NOT_COMPATIBLE); - } - if ((isPartialTransform || shouldFixUnverified) && - (cmdLine.hasOption(ABORT_TRANSFORM_OPTION.getOpt()) || cmdLine.hasOption(PAUSE_TRANSFORM_OPTION.getOpt()) - || cmdLine.hasOption(RESUME_TRANSFORM_OPTION.getOpt()))) { - throw new IllegalArgumentException(PARTIAL_TRANSFORM_NOT_COMPATIBLE); - } - if (shouldForceCutover && (isPartialTransform || useStartTime || useEndTime || shouldUseNewTableAsSource - || cmdLine.hasOption(AUTO_SPLIT_OPTION.getOpt()))) { - throw new IllegalArgumentException(FORCE_CUTOVER_NOT_COMPATIBLE); - } - - schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPTION.getOpt()); - dataTable = cmdLine.getOptionValue(DATA_TABLE_OPTION.getOpt()); - indexTable = cmdLine.getOptionValue(INDEX_TABLE_OPTION.getOpt()); - qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable); - isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); - if (cmdLine.hasOption(SPLIT_SIZE_OPTION.getOpt())) { - splitSize = Integer.parseInt(cmdLine.getOptionValue(SPLIT_SIZE_OPTION.getOpt())); - } else { - splitSize = PhoenixTTLTool.DEFAULT_MAPPER_SPLIT_SIZE; - } - logicalTableName = dataTable; - logicalParentName = null; - if (!Strings.isNullOrEmpty(indexTable)) { - logicalTableName = indexTable; - logicalParentName = SchemaUtil.getTableName(schemaName, dataTable); - } - - if (isPartialTransform) { - if (!cmdLine.hasOption(START_TIME_OPTION.getOpt())) { - throw new IllegalArgumentException(PARTIAL_TRANSFORM_NOT_APPLICABLE); - } - lastTransformTime = new Long(cmdLine.getOptionValue(START_TIME_OPTION.getOpt())); - SystemTransformRecord transformRecord = getTransformRecord(null); - if (transformRecord == null) { - throw new IllegalArgumentException(PARTIAL_TRANSFORM_NOT_APPLICABLE); - } - if (lastTransformTime == null) { - lastTransformTime = transformRecord.getTransformLastStateTs().getTime(); - } else { - validateLastTransformTime(); - } - } - PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); - pDataTable = phoenixConnection.getTable( - SchemaUtil.getQualifiedTableName(schemaName, dataTable)); - if (indexTable != null) { - pIndexTable = phoenixConnection.getTable( - SchemaUtil.getQualifiedTableName(schemaName, indexTable)); - pOldTable = pIndexTable; - } else { - pOldTable = pDataTable; - } - - SystemTransformRecord transformRecord = getTransformRecord(connection.unwrap(PhoenixConnection.class)); - - validateTransform(pDataTable, pIndexTable, transformRecord); - String newTableName = SchemaUtil.getTableNameFromFullName(transformRecord.getNewPhysicalTableName()); - pNewTable = phoenixConnection.getTableNoCache( - SchemaUtil.getQualifiedTableName(schemaName, newTableName)); - - - oldTableWithSchema = SchemaUtil.getQualifiedPhoenixTableName(schemaName, SchemaUtil.getTableNameFromFullName(pOldTable.getName().getString())); - newTableWithSchema = SchemaUtil.getQualifiedPhoenixTableName(schemaName, SchemaUtil.getTableNameFromFullName(pNewTable.getName().getString())); - if (cmdLine.hasOption(VERIFY_OPTION.getOpt())) { - String value = cmdLine.getOptionValue(VERIFY_OPTION.getOpt()); - verifyType = IndexTool.IndexVerifyType.fromValue(value); - } - - return 0; + if (cmdLine.hasOption(HELP_OPTION.getOpt())) { + printHelpAndExit(options, 0); } - public void validateTransform(PTable argPDataTable, PTable argIndexTable, SystemTransformRecord transformRecord) throws Exception { - - if (argPDataTable.getType() != PTableType.TABLE) { - throw new IllegalArgumentException(TRANSFORM_NOT_APPLICABLE); - } - - if (argIndexTable != null && argIndexTable.getType() != PTableType.INDEX) { - throw new IllegalArgumentException(TRANSFORM_NOT_APPLICABLE); - } - - if (argPDataTable.isTransactional()) { - throw new IllegalArgumentException(TRANSFORM_NOT_APPLICABLE); - } + this.jobPriority = getJobPriority(cmdLine); - if (transformRecord == null){ - throw new IllegalStateException("ALTER statement has not been run and the transform has not been created for this table"); - } + boolean dataTableProvided = (cmdLine.hasOption(DATA_TABLE_OPTION.getOpt())); + if (!dataTableProvided) { + throw new IllegalStateException(DATA_TABLE_OPTION.getLongOpt() + " is a mandatory parameter"); + } - if (pDataTable != null && pIndexTable != null) { - if (!IndexTool.isValidIndexTable(connection, qDataTable, indexTable, tenantId)) { - throw new IllegalArgumentException( - String.format(" %s is not an index table for %s for this connection", - indexTable, qDataTable)); - } - - PTable.IndexType indexType = argIndexTable.getIndexType(); - if (PTable.IndexType.LOCAL.equals(indexType)) { - throw new IllegalArgumentException(TRANSFORM_NOT_APPLICABLE); - } - } + return cmdLine; + } + + private Options getOptions() { + final Options options = new Options(); + options.addOption(OUTPUT_PATH_OPTION); + options.addOption(SCHEMA_NAME_OPTION); + options.addOption(DATA_TABLE_OPTION); + options.addOption(INDEX_TABLE_OPTION); + options.addOption(TENANT_ID_OPTION); + options.addOption(HELP_OPTION); + options.addOption(JOB_PRIORITY_OPTION); + options.addOption(RUN_FOREGROUND_OPTION); + options.addOption(PARTIAL_TRANSFORM_OPTION); + options.addOption(START_TIME_OPTION); + options.addOption(END_TIME_OPTION); + options.addOption(SPLIT_SIZE_OPTION); + options.addOption(FIX_UNVERIFIED_TRANSFORM_OPTION); + options.addOption(FORCE_CUTOVER_OPTION); + options.addOption(USE_NEW_TABLE_AS_SOURCE_OPTION); + options.addOption(AUTO_SPLIT_OPTION); + options.addOption(ABORT_TRANSFORM_OPTION); + options.addOption(PAUSE_TRANSFORM_OPTION); + options.addOption(RESUME_TRANSFORM_OPTION); + options.addOption(VERIFY_OPTION); + START_TIME_OPTION.setOptionalArg(true); + END_TIME_OPTION.setOptionalArg(true); + return options; + } + + private void printHelpAndExit(String errorMessage, Options options) { + System.err.println(errorMessage); + LOGGER.error(errorMessage); + printHelpAndExit(options, 1); + } + + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } + + public CommandLine parseArgs(String[] args) throws Exception { + CommandLine cmdLine; + try { + cmdLine = parseOptions(args); + } catch (IllegalStateException e) { + printHelpAndExit(e.getMessage(), getOptions()); + throw e; } - public int validateLastTransformTime() throws Exception { - Long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - if (lastTransformTime.compareTo(currentTime) > 0 || lastTransformTime == 0L) { - throw new RuntimeException(PARTIAL_TRANSFORM_NOT_APPLICABLE); - } - return 0; + if (getConf() == null) { + setConf(HBaseConfiguration.create()); } - public SystemTransformRecord getTransformRecord(PhoenixConnection connection) throws Exception { - if (connection == null) { - try (Connection conn = getConnection(configuration)) { - SystemTransformRecord transformRecord = Transform.getTransformRecord(schemaName, logicalTableName, logicalParentName, tenantId, conn.unwrap(PhoenixConnection.class)); - return transformRecord; - } - } else { - return Transform.getTransformRecord(schemaName, logicalTableName, logicalParentName, tenantId, connection); - } + return cmdLine; + } + + @VisibleForTesting + public int populateTransformToolAttributesAndValidate(CommandLine cmdLine) throws Exception { + boolean useStartTime = cmdLine.hasOption(START_TIME_OPTION.getOpt()); + boolean useEndTime = cmdLine.hasOption(END_TIME_OPTION.getOpt()); + shouldFixUnverified = cmdLine.hasOption(FIX_UNVERIFIED_TRANSFORM_OPTION.getOpt()); + shouldUseNewTableAsSource = cmdLine.hasOption(USE_NEW_TABLE_AS_SOURCE_OPTION.getOpt()); + shouldForceCutover = cmdLine.hasOption(FORCE_CUTOVER_OPTION.getOpt()); + basePath = cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()); + isPartialTransform = cmdLine.hasOption(PARTIAL_TRANSFORM_OPTION.getOpt()); + if (shouldForceCutover) { + LOGGER.info("TransformTool will fix the unverified rows before cutover"); + shouldFixUnverified = true; + } + if (useStartTime) { + startTime = new Long(cmdLine.getOptionValue(START_TIME_OPTION.getOpt())); } - public String getJobPriority() { - return this.jobPriority.toString(); + if (useEndTime) { + endTime = new Long(cmdLine.getOptionValue(END_TIME_OPTION.getOpt())); } - private JobPriority getJobPriority(CommandLine cmdLine) { - String jobPriorityOption = cmdLine.getOptionValue(JOB_PRIORITY_OPTION.getOpt()); - if (jobPriorityOption == null) { - return JobPriority.NORMAL; - } + if (isTimeRangeSet(startTime, endTime)) { + validateTimeRange(startTime, endTime); + } - switch (jobPriorityOption) { - case "0" : return JobPriority.VERY_HIGH; - case "1" : return JobPriority.HIGH; - case "2" : return JobPriority.NORMAL; - case "3" : return JobPriority.LOW; - case "4" : return JobPriority.VERY_LOW; - default: - return JobPriority.NORMAL; - } + if ( + (isPartialTransform || shouldFixUnverified) && (cmdLine.hasOption(AUTO_SPLIT_OPTION.getOpt())) + ) { + throw new IllegalArgumentException(PARTIAL_TRANSFORM_NOT_COMPATIBLE); + } + if ( + (isPartialTransform || shouldFixUnverified) + && (cmdLine.hasOption(ABORT_TRANSFORM_OPTION.getOpt()) + || cmdLine.hasOption(PAUSE_TRANSFORM_OPTION.getOpt()) + || cmdLine.hasOption(RESUME_TRANSFORM_OPTION.getOpt())) + ) { + throw new IllegalArgumentException(PARTIAL_TRANSFORM_NOT_COMPATIBLE); + } + if ( + shouldForceCutover && (isPartialTransform || useStartTime || useEndTime + || shouldUseNewTableAsSource || cmdLine.hasOption(AUTO_SPLIT_OPTION.getOpt())) + ) { + throw new IllegalArgumentException(FORCE_CUTOVER_NOT_COMPATIBLE); } - public Job getJob() { - return this.job; + schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPTION.getOpt()); + dataTable = cmdLine.getOptionValue(DATA_TABLE_OPTION.getOpt()); + indexTable = cmdLine.getOptionValue(INDEX_TABLE_OPTION.getOpt()); + qDataTable = SchemaUtil.getQualifiedTableName(schemaName, dataTable); + isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); + if (cmdLine.hasOption(SPLIT_SIZE_OPTION.getOpt())) { + splitSize = Integer.parseInt(cmdLine.getOptionValue(SPLIT_SIZE_OPTION.getOpt())); + } else { + splitSize = PhoenixTTLTool.DEFAULT_MAPPER_SPLIT_SIZE; + } + logicalTableName = dataTable; + logicalParentName = null; + if (!Strings.isNullOrEmpty(indexTable)) { + logicalTableName = indexTable; + logicalParentName = SchemaUtil.getTableName(schemaName, dataTable); } - public String getTenantId() { - return this.tenantId; + if (isPartialTransform) { + if (!cmdLine.hasOption(START_TIME_OPTION.getOpt())) { + throw new IllegalArgumentException(PARTIAL_TRANSFORM_NOT_APPLICABLE); + } + lastTransformTime = new Long(cmdLine.getOptionValue(START_TIME_OPTION.getOpt())); + SystemTransformRecord transformRecord = getTransformRecord(null); + if (transformRecord == null) { + throw new IllegalArgumentException(PARTIAL_TRANSFORM_NOT_APPLICABLE); + } + if (lastTransformTime == null) { + lastTransformTime = transformRecord.getTransformLastStateTs().getTime(); + } else { + validateLastTransformTime(); + } + } + PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); + pDataTable = + phoenixConnection.getTable(SchemaUtil.getQualifiedTableName(schemaName, dataTable)); + if (indexTable != null) { + pIndexTable = + phoenixConnection.getTable(SchemaUtil.getQualifiedTableName(schemaName, indexTable)); + pOldTable = pIndexTable; + } else { + pOldTable = pDataTable; } - public void setJobName(String jobName) { - this.jobName = jobName; + SystemTransformRecord transformRecord = + getTransformRecord(connection.unwrap(PhoenixConnection.class)); + + validateTransform(pDataTable, pIndexTable, transformRecord); + String newTableName = + SchemaUtil.getTableNameFromFullName(transformRecord.getNewPhysicalTableName()); + pNewTable = + phoenixConnection.getTableNoCache(SchemaUtil.getQualifiedTableName(schemaName, newTableName)); + + oldTableWithSchema = SchemaUtil.getQualifiedPhoenixTableName(schemaName, + SchemaUtil.getTableNameFromFullName(pOldTable.getName().getString())); + newTableWithSchema = SchemaUtil.getQualifiedPhoenixTableName(schemaName, + SchemaUtil.getTableNameFromFullName(pNewTable.getName().getString())); + if (cmdLine.hasOption(VERIFY_OPTION.getOpt())) { + String value = cmdLine.getOptionValue(VERIFY_OPTION.getOpt()); + verifyType = IndexTool.IndexVerifyType.fromValue(value); } - public Job configureJob() throws Exception { - if (pNewTable.isTransactional()) { - configuration.set(PhoenixConfigurationUtil.TX_SCN_VALUE, - Long.toString(TransactionUtil.convertToNanoseconds(pOldTable.getTimeStamp() + 1))); - configuration.set(PhoenixConfigurationUtil.TX_PROVIDER, pNewTable.getTransactionProvider().name()); - } else { - if (lastTransformTime != null) { - PhoenixConfigurationUtil.setCurrentScnValue(configuration, lastTransformTime); - } else { - if (endTime != null) { - PhoenixConfigurationUtil.setCurrentScnValue(configuration, endTime); - } else { - setCurrentScnValue(configuration, EnvironmentEdgeManager.currentTimeMillis()); - } - } - } - String jobName = String.format(TRANSFORM_JOB_NAME_TEMPLATE, schemaName, dataTable, indexTable==null?null:pNewTable.getName(), - (shouldFixUnverified?"Unverified":"Full")); - if (shouldUseNewTableAsSource) { - jobName = String.format(TRANSFORM_JOB_NAME_TEMPLATE, schemaName, dataTable, indexTable==null?null:pNewTable.getName(), - "NewTableSource_"+pNewTable.getName()); - } + return 0; + } - final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class); - final PostIndexDDLCompiler ddlCompiler = - new PostIndexDDLCompiler(pConnection, new TableRef(pOldTable), true); - ddlCompiler.compile(pNewTable); - final List newColumns = ddlCompiler.getDataColumnNames(); - final String upsertQuery = - QueryUtil.constructUpsertStatement(newTableWithSchema, newColumns, HintNode.Hint.NO_INDEX); + public void validateTransform(PTable argPDataTable, PTable argIndexTable, + SystemTransformRecord transformRecord) throws Exception { - configuration.set(PhoenixConfigurationUtil.UPSERT_STATEMENT, upsertQuery); + if (argPDataTable.getType() != PTableType.TABLE) { + throw new IllegalArgumentException(TRANSFORM_NOT_APPLICABLE); + } - PhoenixConfigurationUtil.setUpsertColumnNames(configuration, - ddlCompiler.getIndexColumnNames().toArray(new String[ddlCompiler.getIndexColumnNames().size()])); - if (tenantId != null) { - PhoenixConfigurationUtil.setTenantId(configuration, tenantId); - } + if (argIndexTable != null && argIndexTable.getType() != PTableType.INDEX) { + throw new IllegalArgumentException(TRANSFORM_NOT_APPLICABLE); + } - PhoenixConfigurationUtil.setIndexVerifyType(configuration, verifyType); - - long indexRebuildQueryTimeoutMs = - configuration.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); - long indexRebuildRPCTimeoutMs = - configuration.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); - long indexRebuildClientScannerTimeOutMs = - configuration.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); - int indexRebuildRpcRetriesCounter = - configuration.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); - // Set various phoenix and hbase level timeouts and rpc retries - configuration.set(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - Long.toString(indexRebuildQueryTimeoutMs)); - configuration.set(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - Long.toString(indexRebuildClientScannerTimeOutMs)); - configuration.set(HConstants.HBASE_RPC_TIMEOUT_KEY, - Long.toString(indexRebuildRPCTimeoutMs)); - configuration.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - Long.toString(indexRebuildRpcRetriesCounter)); - configuration.set("mapreduce.task.timeout", Long.toString(indexRebuildQueryTimeoutMs)); - - PhoenixConfigurationUtil.setIndexToolDataTableName(configuration, oldTableWithSchema); - PhoenixConfigurationUtil.setIndexToolIndexTableName(configuration, newTableWithSchema); - PhoenixConfigurationUtil.setShouldFixUnverifiedTransform(configuration, shouldFixUnverified); - if (shouldFixUnverified || shouldUseNewTableAsSource) { - PhoenixConfigurationUtil.setIndexToolSourceTable(configuration, IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE); - } else { - PhoenixConfigurationUtil.setIndexToolSourceTable(configuration, IndexScrutinyTool.SourceTable.DATA_TABLE_SOURCE); - } - if (startTime != null) { - PhoenixConfigurationUtil.setIndexToolStartTime(configuration, startTime); - } + if (argPDataTable.isTransactional()) { + throw new IllegalArgumentException(TRANSFORM_NOT_APPLICABLE); + } - PhoenixConfigurationUtil.setPhysicalTableName(configuration, pNewTable.getPhysicalName().getString()); - PhoenixConfigurationUtil.setIsTransforming(configuration, true); - Path outputPath = null; - org.apache.hadoop.fs.FileSystem fs; - if (basePath != null) { - outputPath = - CsvBulkImportUtil.getOutputPath(new Path(basePath), - pIndexTable == null ? - pDataTable.getPhysicalName().getString() : - pIndexTable.getPhysicalName().getString()); - fs = outputPath.getFileSystem(configuration); - fs.delete(outputPath, true); - } - PhoenixConfigurationUtil.setMultiInputMapperSplitSize(configuration, splitSize); - - this.job = Job.getInstance(getConf(), jobName); - job.setJarByClass(TransformTool.class); - job.setPriority(this.jobPriority); - - boolean hasChildViews = false; - try (Table hTable = connection.unwrap(PhoenixConnection.class).getQueryServices().getTable( - SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, configuration).toBytes())) { - byte[] tenantIdBytes = Strings.isNullOrEmpty(tenantId) ? null : tenantId.getBytes(); - byte[] schemaNameBytes = Strings.isNullOrEmpty(schemaName) ? null : schemaName.getBytes(); - hasChildViews = ViewUtil.hasChildViews(hTable, tenantIdBytes, schemaNameBytes, - pOldTable.getTableName().getBytes(), HConstants.LATEST_TIMESTAMP); - } + if (transformRecord == null) { + throw new IllegalStateException( + "ALTER statement has not been run and the transform has not been created for this table"); + } - if (hasChildViews && Strings.isNullOrEmpty(tenantId)) { - PhoenixMapReduceUtil.setInput(job, PhoenixServerBuildIndexDBWritable.class, PhoenixTransformWithViewsInputFormat.class, - oldTableWithSchema, ""); - } else { - PhoenixMapReduceUtil.setInput(job, PhoenixServerBuildIndexDBWritable.class, PhoenixServerBuildIndexInputFormat.class, - oldTableWithSchema, ""); - } - if (outputPath != null) { - FileOutputFormat.setOutputPath(job, outputPath); - } - job.setNumReduceTasks(1); - job.setMapOutputKeyClass(ImmutableBytesWritable.class); + if (pDataTable != null && pIndexTable != null) { + if (!IndexTool.isValidIndexTable(connection, qDataTable, indexTable, tenantId)) { + throw new IllegalArgumentException(String + .format(" %s is not an index table for %s for this connection", indexTable, qDataTable)); + } - if (shouldFixUnverified) { - configureUnverifiedFromNewToOld(); - } else { - configureFromOldToNew(); - } - //Set the Output classes - job.setMapOutputValueClass(IntWritable.class); - job.setOutputKeyClass(NullWritable.class); - job.setOutputValueClass(NullWritable.class); - TableMapReduceUtil.addDependencyJars(job); + PTable.IndexType indexType = argIndexTable.getIndexType(); + if (PTable.IndexType.LOCAL.equals(indexType)) { + throw new IllegalArgumentException(TRANSFORM_NOT_APPLICABLE); + } + } + } - job.setReducerClass(PhoenixTransformReducer.class); + public int validateLastTransformTime() throws Exception { + Long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + if (lastTransformTime.compareTo(currentTime) > 0 || lastTransformTime == 0L) { + throw new RuntimeException(PARTIAL_TRANSFORM_NOT_APPLICABLE); + } + return 0; + } + + public SystemTransformRecord getTransformRecord(PhoenixConnection connection) throws Exception { + if (connection == null) { + try (Connection conn = getConnection(configuration)) { + SystemTransformRecord transformRecord = Transform.getTransformRecord(schemaName, + logicalTableName, logicalParentName, tenantId, conn.unwrap(PhoenixConnection.class)); + return transformRecord; + } + } else { + return Transform.getTransformRecord(schemaName, logicalTableName, logicalParentName, tenantId, + connection); + } + } - TableMapReduceUtil.initCredentials(job); - LOGGER.info("TransformTool is running for " + job.getJobName()); + public String getJobPriority() { + return this.jobPriority.toString(); + } - return job; + private JobPriority getJobPriority(CommandLine cmdLine) { + String jobPriorityOption = cmdLine.getOptionValue(JOB_PRIORITY_OPTION.getOpt()); + if (jobPriorityOption == null) { + return JobPriority.NORMAL; } - private void configureFromOldToNew() { - job.setMapperClass(PhoenixServerBuildIndexMapper.class); + switch (jobPriorityOption) { + case "0": + return JobPriority.VERY_HIGH; + case "1": + return JobPriority.HIGH; + case "2": + return JobPriority.NORMAL; + case "3": + return JobPriority.LOW; + case "4": + return JobPriority.VERY_LOW; + default: + return JobPriority.NORMAL; } - - private void configureUnverifiedFromNewToOld() throws IOException, SQLException { - List maintainers = Lists.newArrayListWithExpectedSize(1); - TransformMaintainer transformMaintainer = pNewTable.getTransformMaintainer(pOldTable, connection.unwrap(PhoenixConnection.class)); - maintainers.add(transformMaintainer); - Scan scan = IndexManagementUtil.newLocalStateScan(maintainers); - if (startTime != null) { - scan.setTimeRange(startTime - 1, HConstants.LATEST_TIMESTAMP); - } - scan.setRaw(true); - scan.setCacheBlocks(false); - SingleColumnValueFilter filter = new SingleColumnValueFilter( - transformMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - transformMaintainer.getEmptyKeyValueQualifier(), - CompareOperator.EQUAL, - UNVERIFIED_BYTES - ); - scan.setFilter(filter); - Configuration conf = job.getConfiguration(); - HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); - // Set the Physical Table name for use in DirectHTableWriter#write(Mutation) - conf.set(TableOutputFormat.OUTPUT_TABLE, - PhoenixConfigurationUtil.getPhysicalTableName(job.getConfiguration())); - ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); - TransformMaintainer.serialize(pDataTable, indexMetaDataPtr, pNewTable, connection.unwrap(PhoenixConnection.class)); - PhoenixConfigurationUtil.setIndexMaintainers(conf, indexMetaDataPtr); - TableMapReduceUtil.initTableMapperJob(pNewTable.getPhysicalName().getString(), scan, PhoenixTransformRepairMapper.class, null, - null, job); - } - - public int runJob() throws IOException { - try { - if (isForeground) { - LOGGER.info("Running TransformTool in foreground. " + - "Runs full table scans. This may take a long time!"); - return (job.waitForCompletion(true)) ? 0 : 1; - } else { - LOGGER.info("Running TransformTool in Background - Submit async and exit"); - job.submit(); - return 0; - } - } catch (Exception e) { - LOGGER.error("Caught exception " + e + " trying to run TransformTool.", e); - return 1; + } + + public Job getJob() { + return this.job; + } + + public String getTenantId() { + return this.tenantId; + } + + public void setJobName(String jobName) { + this.jobName = jobName; + } + + public Job configureJob() throws Exception { + if (pNewTable.isTransactional()) { + configuration.set(PhoenixConfigurationUtil.TX_SCN_VALUE, + Long.toString(TransactionUtil.convertToNanoseconds(pOldTable.getTimeStamp() + 1))); + configuration.set(PhoenixConfigurationUtil.TX_PROVIDER, + pNewTable.getTransactionProvider().name()); + } else { + if (lastTransformTime != null) { + PhoenixConfigurationUtil.setCurrentScnValue(configuration, lastTransformTime); + } else { + if (endTime != null) { + PhoenixConfigurationUtil.setCurrentScnValue(configuration, endTime); + } else { + setCurrentScnValue(configuration, EnvironmentEdgeManager.currentTimeMillis()); } + } + } + String jobName = String.format(TRANSFORM_JOB_NAME_TEMPLATE, schemaName, dataTable, + indexTable == null ? null : pNewTable.getName(), + (shouldFixUnverified ? "Unverified" : "Full")); + if (shouldUseNewTableAsSource) { + jobName = String.format(TRANSFORM_JOB_NAME_TEMPLATE, schemaName, dataTable, + indexTable == null ? null : pNewTable.getName(), "NewTableSource_" + pNewTable.getName()); } - private void preSplitTable(CommandLine cmdLine, Connection connection, - Configuration configuration, PTable newTable, PTable oldTable) - throws SQLException, IOException { - boolean autosplit = cmdLine.hasOption(AUTO_SPLIT_OPTION.getOpt()); + final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class); + final PostIndexDDLCompiler ddlCompiler = + new PostIndexDDLCompiler(pConnection, new TableRef(pOldTable), true); + ddlCompiler.compile(pNewTable); + final List newColumns = ddlCompiler.getDataColumnNames(); + final String upsertQuery = + QueryUtil.constructUpsertStatement(newTableWithSchema, newColumns, HintNode.Hint.NO_INDEX); - if (autosplit) { - String nOpt = cmdLine.getOptionValue(AUTO_SPLIT_OPTION.getOpt()); - int autosplitNumRegions = nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt); - LOGGER.info(String.format("Will split table %s , autosplit=%s ," + - " autoSplitNumRegions=%s", newTable.getPhysicalName(), - autosplit, autosplitNumRegions)); + configuration.set(PhoenixConfigurationUtil.UPSERT_STATEMENT, upsertQuery); - splitTable(connection.unwrap(PhoenixConnection.class), autosplit, - autosplitNumRegions, newTable, oldTable); - } + PhoenixConfigurationUtil.setUpsertColumnNames(configuration, ddlCompiler.getIndexColumnNames() + .toArray(new String[ddlCompiler.getIndexColumnNames().size()])); + if (tenantId != null) { + PhoenixConfigurationUtil.setTenantId(configuration, tenantId); } - private void splitTable(PhoenixConnection pConnection, boolean autosplit, - int autosplitNumRegions, PTable newTable, PTable oldTable) - throws SQLException, IOException, IllegalArgumentException { - int numRegions; - byte[][] oldSplitPoints = null; - byte[][] newSplitPoints = null; - // TODO : if the rowkey changes via transform, we need to create new split points - try (Table hDataTable = - pConnection.getQueryServices().getTable(oldTable.getPhysicalName().getBytes()); - org.apache.hadoop.hbase.client.Connection connection = - HBaseFactoryProvider.getHConnectionFactory().createConnection(configuration)) { - // Avoid duplicate split keys and remove the empty key - oldSplitPoints = connection.getRegionLocator(hDataTable.getName()).getStartKeys(); - Arrays.sort(oldSplitPoints, Bytes.BYTES_COMPARATOR); - int numSplits = oldSplitPoints.length; - ArrayList splitList = new ArrayList<>(); - byte[] lastKey = null; - for (byte[] keyBytes : oldSplitPoints) { - if (Bytes.compareTo(keyBytes, EMPTY_BYTE_ARRAY)!=0) { - if (lastKey != null && !Bytes.equals(keyBytes, lastKey)) { - splitList.add(keyBytes); - } - } - lastKey = keyBytes; - } - newSplitPoints = new byte[splitList.size()][]; - for (int i=0; i < splitList.size(); i++) { - newSplitPoints[i] = splitList.get(i); - } - numRegions = newSplitPoints.length; - if (autosplit && (numRegions <= autosplitNumRegions)) { - LOGGER.info(String.format( - "Will not split %s because the data table only has %s regions, autoSplitNumRegions=%s", - newTable.getPhysicalName(), numRegions, autosplitNumRegions)); - return; // do nothing if # of regions is too low - } - } - - try (Admin admin = pConnection.getQueryServices().getAdmin()) { - // do the split - // drop table and recreate with appropriate splits - TableName newTableSplitted = TableName.valueOf(newTable.getPhysicalName().getBytes()); - TableDescriptor descriptor = admin.getDescriptor(newTableSplitted); - admin.disableTable(newTableSplitted); - admin.deleteTable(newTableSplitted); - admin.createTable(descriptor, newSplitPoints); - } + PhoenixConfigurationUtil.setIndexVerifyType(configuration, verifyType); + + long indexRebuildQueryTimeoutMs = + configuration.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); + long indexRebuildRPCTimeoutMs = + configuration.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); + long indexRebuildClientScannerTimeOutMs = + configuration.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); + int indexRebuildRpcRetriesCounter = + configuration.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); + // Set various phoenix and hbase level timeouts and rpc retries + configuration.set(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, + Long.toString(indexRebuildQueryTimeoutMs)); + configuration.set(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, + Long.toString(indexRebuildClientScannerTimeOutMs)); + configuration.set(HConstants.HBASE_RPC_TIMEOUT_KEY, Long.toString(indexRebuildRPCTimeoutMs)); + configuration.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + Long.toString(indexRebuildRpcRetriesCounter)); + configuration.set("mapreduce.task.timeout", Long.toString(indexRebuildQueryTimeoutMs)); + + PhoenixConfigurationUtil.setIndexToolDataTableName(configuration, oldTableWithSchema); + PhoenixConfigurationUtil.setIndexToolIndexTableName(configuration, newTableWithSchema); + PhoenixConfigurationUtil.setShouldFixUnverifiedTransform(configuration, shouldFixUnverified); + if (shouldFixUnverified || shouldUseNewTableAsSource) { + PhoenixConfigurationUtil.setIndexToolSourceTable(configuration, + IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE); + } else { + PhoenixConfigurationUtil.setIndexToolSourceTable(configuration, + IndexScrutinyTool.SourceTable.DATA_TABLE_SOURCE); } - - public void updateTransformRecord(PhoenixConnection connection, PTable.TransformStatus newStatus) throws Exception { - if (verifyType == IndexTool.IndexVerifyType.ONLY) { - return; - } - SystemTransformRecord transformRecord = getTransformRecord(connection); - Transform.updateTransformRecord(connection, transformRecord, newStatus); + if (startTime != null) { + PhoenixConfigurationUtil.setIndexToolStartTime(configuration, startTime); } - protected void updateTransformRecord(Job job) throws Exception { - if (job == null) { - return; - } - if (verifyType == IndexTool.IndexVerifyType.ONLY) { - return; - } - SystemTransformRecord transformRecord = getTransformRecord(connection.unwrap(PhoenixConnection.class)); - SystemTransformRecord.SystemTransformBuilder builder = new SystemTransformRecord.SystemTransformBuilder(transformRecord); - builder.setTransformJobId(job.getJobID().toString()); - builder.setStartTs(new Timestamp(EnvironmentEdgeManager.currentTimeMillis())); - Transform.upsertTransform(builder.build(), connection.unwrap(PhoenixConnection.class)); - } - - public void killJob(SystemTransformRecord transformRecord) throws Exception{ - String jobId = transformRecord.getTransformJobId(); - if (!Strings.isNullOrEmpty(jobId)) { - JobClient jobClient = new JobClient(); - RunningJob runningJob = jobClient.getJob(jobId); - if (runningJob != null) { - try { - runningJob.killJob(); - } catch (IOException ex) { - LOGGER.warn("Transform abort could not kill the job. ", ex); - } - } - } + PhoenixConfigurationUtil.setPhysicalTableName(configuration, + pNewTable.getPhysicalName().getString()); + PhoenixConfigurationUtil.setIsTransforming(configuration, true); + Path outputPath = null; + org.apache.hadoop.fs.FileSystem fs; + if (basePath != null) { + outputPath = CsvBulkImportUtil.getOutputPath(new Path(basePath), + pIndexTable == null + ? pDataTable.getPhysicalName().getString() + : pIndexTable.getPhysicalName().getString()); + fs = outputPath.getFileSystem(configuration); + fs.delete(outputPath, true); + } + PhoenixConfigurationUtil.setMultiInputMapperSplitSize(configuration, splitSize); + + this.job = Job.getInstance(getConf(), jobName); + job.setJarByClass(TransformTool.class); + job.setPriority(this.jobPriority); + + boolean hasChildViews = false; + try (Table hTable = connection.unwrap(PhoenixConnection.class).getQueryServices().getTable( + SchemaUtil.getPhysicalTableName(SYSTEM_CHILD_LINK_NAME_BYTES, configuration).toBytes())) { + byte[] tenantIdBytes = Strings.isNullOrEmpty(tenantId) ? null : tenantId.getBytes(); + byte[] schemaNameBytes = Strings.isNullOrEmpty(schemaName) ? null : schemaName.getBytes(); + hasChildViews = ViewUtil.hasChildViews(hTable, tenantIdBytes, schemaNameBytes, + pOldTable.getTableName().getBytes(), HConstants.LATEST_TIMESTAMP); } - public void abortTransform() throws Exception { - SystemTransformRecord transformRecord = getTransformRecord(connection.unwrap(PhoenixConnection.class)); - if (transformRecord.getTransformStatus().equals(PTable.TransformStatus.COMPLETED.name())) { - throw new IllegalStateException("A completed transform cannot be aborted"); - } - - killJob(transformRecord); - Transform.removeTransformRecord(transformRecord, connection.unwrap(PhoenixConnection.class)); - - // TODO: disable transform on the old table + if (hasChildViews && Strings.isNullOrEmpty(tenantId)) { + PhoenixMapReduceUtil.setInput(job, PhoenixServerBuildIndexDBWritable.class, + PhoenixTransformWithViewsInputFormat.class, oldTableWithSchema, ""); + } else { + PhoenixMapReduceUtil.setInput(job, PhoenixServerBuildIndexDBWritable.class, + PhoenixServerBuildIndexInputFormat.class, oldTableWithSchema, ""); + } + if (outputPath != null) { + FileOutputFormat.setOutputPath(job, outputPath); + } + job.setNumReduceTasks(1); + job.setMapOutputKeyClass(ImmutableBytesWritable.class); - // Cleanup syscat - try (Statement stmt = connection.createStatement()) { - if (pIndexTable != null) { - stmt.execute("DROP INDEX " + transformRecord.getNewPhysicalTableName()); - } else { - stmt.execute("DROP TABLE " + transformRecord.getNewPhysicalTableName()); - } - } catch (SQLException ex) { - LOGGER.warn("Transform abort could not drop the table " + transformRecord.getNewPhysicalTableName()); + if (shouldFixUnverified) { + configureUnverifiedFromNewToOld(); + } else { + configureFromOldToNew(); + } + // Set the Output classes + job.setMapOutputValueClass(IntWritable.class); + job.setOutputKeyClass(NullWritable.class); + job.setOutputValueClass(NullWritable.class); + TableMapReduceUtil.addDependencyJars(job); + + job.setReducerClass(PhoenixTransformReducer.class); + + TableMapReduceUtil.initCredentials(job); + LOGGER.info("TransformTool is running for " + job.getJobName()); + + return job; + } + + private void configureFromOldToNew() { + job.setMapperClass(PhoenixServerBuildIndexMapper.class); + } + + private void configureUnverifiedFromNewToOld() throws IOException, SQLException { + List maintainers = Lists.newArrayListWithExpectedSize(1); + TransformMaintainer transformMaintainer = + pNewTable.getTransformMaintainer(pOldTable, connection.unwrap(PhoenixConnection.class)); + maintainers.add(transformMaintainer); + Scan scan = IndexManagementUtil.newLocalStateScan(maintainers); + if (startTime != null) { + scan.setTimeRange(startTime - 1, HConstants.LATEST_TIMESTAMP); + } + scan.setRaw(true); + scan.setCacheBlocks(false); + SingleColumnValueFilter filter = new SingleColumnValueFilter( + transformMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), + transformMaintainer.getEmptyKeyValueQualifier(), CompareOperator.EQUAL, UNVERIFIED_BYTES); + scan.setFilter(filter); + Configuration conf = job.getConfiguration(); + HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); + // Set the Physical Table name for use in DirectHTableWriter#write(Mutation) + conf.set(TableOutputFormat.OUTPUT_TABLE, + PhoenixConfigurationUtil.getPhysicalTableName(job.getConfiguration())); + ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY); + TransformMaintainer.serialize(pDataTable, indexMetaDataPtr, pNewTable, + connection.unwrap(PhoenixConnection.class)); + PhoenixConfigurationUtil.setIndexMaintainers(conf, indexMetaDataPtr); + TableMapReduceUtil.initTableMapperJob(pNewTable.getPhysicalName().getString(), scan, + PhoenixTransformRepairMapper.class, null, null, job); + } + + public int runJob() throws IOException { + try { + if (isForeground) { + LOGGER.info("Running TransformTool in foreground. " + + "Runs full table scans. This may take a long time!"); + return (job.waitForCompletion(true)) ? 0 : 1; + } else { + LOGGER.info("Running TransformTool in Background - Submit async and exit"); + job.submit(); + return 0; + } + } catch (Exception e) { + LOGGER.error("Caught exception " + e + " trying to run TransformTool.", e); + return 1; + } + } + + private void preSplitTable(CommandLine cmdLine, Connection connection, + Configuration configuration, PTable newTable, PTable oldTable) + throws SQLException, IOException { + boolean autosplit = cmdLine.hasOption(AUTO_SPLIT_OPTION.getOpt()); + + if (autosplit) { + String nOpt = cmdLine.getOptionValue(AUTO_SPLIT_OPTION.getOpt()); + int autosplitNumRegions = + nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt); + LOGGER.info(String.format("Will split table %s , autosplit=%s ," + " autoSplitNumRegions=%s", + newTable.getPhysicalName(), autosplit, autosplitNumRegions)); + + splitTable(connection.unwrap(PhoenixConnection.class), autosplit, autosplitNumRegions, + newTable, oldTable); + } + } + + private void splitTable(PhoenixConnection pConnection, boolean autosplit, int autosplitNumRegions, + PTable newTable, PTable oldTable) throws SQLException, IOException, IllegalArgumentException { + int numRegions; + byte[][] oldSplitPoints = null; + byte[][] newSplitPoints = null; + // TODO : if the rowkey changes via transform, we need to create new split points + try ( + Table hDataTable = + pConnection.getQueryServices().getTable(oldTable.getPhysicalName().getBytes()); + org.apache.hadoop.hbase.client.Connection connection = + HBaseFactoryProvider.getHConnectionFactory().createConnection(configuration)) { + // Avoid duplicate split keys and remove the empty key + oldSplitPoints = connection.getRegionLocator(hDataTable.getName()).getStartKeys(); + Arrays.sort(oldSplitPoints, Bytes.BYTES_COMPARATOR); + int numSplits = oldSplitPoints.length; + ArrayList splitList = new ArrayList<>(); + byte[] lastKey = null; + for (byte[] keyBytes : oldSplitPoints) { + if (Bytes.compareTo(keyBytes, EMPTY_BYTE_ARRAY) != 0) { + if (lastKey != null && !Bytes.equals(keyBytes, lastKey)) { + splitList.add(keyBytes); + } } + lastKey = keyBytes; + } + newSplitPoints = new byte[splitList.size()][]; + for (int i = 0; i < splitList.size(); i++) { + newSplitPoints[i] = splitList.get(i); + } + numRegions = newSplitPoints.length; + if (autosplit && (numRegions <= autosplitNumRegions)) { + LOGGER.info(String.format( + "Will not split %s because the data table only has %s regions, autoSplitNumRegions=%s", + newTable.getPhysicalName(), numRegions, autosplitNumRegions)); + return; // do nothing if # of regions is too low + } } - public void pauseTransform() throws Exception { - SystemTransformRecord transformRecord = getTransformRecord(connection.unwrap(PhoenixConnection.class)); - if (transformRecord.getTransformStatus().equals(PTable.TransformStatus.COMPLETED.name())) { - throw new IllegalStateException("A completed transform cannot be paused"); - } + try (Admin admin = pConnection.getQueryServices().getAdmin()) { + // do the split + // drop table and recreate with appropriate splits + TableName newTableSplitted = TableName.valueOf(newTable.getPhysicalName().getBytes()); + TableDescriptor descriptor = admin.getDescriptor(newTableSplitted); + admin.disableTable(newTableSplitted); + admin.deleteTable(newTableSplitted); + admin.createTable(descriptor, newSplitPoints); + } + } - updateTransformRecord(connection.unwrap(PhoenixConnection.class), PTable.TransformStatus.PAUSED); - killJob(transformRecord); + public void updateTransformRecord(PhoenixConnection connection, PTable.TransformStatus newStatus) + throws Exception { + if (verifyType == IndexTool.IndexVerifyType.ONLY) { + return; } + SystemTransformRecord transformRecord = getTransformRecord(connection); + Transform.updateTransformRecord(connection, transformRecord, newStatus); + } - public void resumeTransform(String[] args, CommandLine cmdLine) throws Exception { - SystemTransformRecord transformRecord = getTransformRecord(connection.unwrap(PhoenixConnection.class)); - if (!transformRecord.getTransformStatus().equals(PTable.TransformStatus.PAUSED.name())) { - throw new IllegalStateException("Only a paused transform can be resumed"); + protected void updateTransformRecord(Job job) throws Exception { + if (job == null) { + return; + } + if (verifyType == IndexTool.IndexVerifyType.ONLY) { + return; + } + SystemTransformRecord transformRecord = + getTransformRecord(connection.unwrap(PhoenixConnection.class)); + SystemTransformRecord.SystemTransformBuilder builder = + new SystemTransformRecord.SystemTransformBuilder(transformRecord); + builder.setTransformJobId(job.getJobID().toString()); + builder.setStartTs(new Timestamp(EnvironmentEdgeManager.currentTimeMillis())); + Transform.upsertTransform(builder.build(), connection.unwrap(PhoenixConnection.class)); + } + + public void killJob(SystemTransformRecord transformRecord) throws Exception { + String jobId = transformRecord.getTransformJobId(); + if (!Strings.isNullOrEmpty(jobId)) { + JobClient jobClient = new JobClient(); + RunningJob runningJob = jobClient.getJob(jobId); + if (runningJob != null) { + try { + runningJob.killJob(); + } catch (IOException ex) { + LOGGER.warn("Transform abort could not kill the job. ", ex); } + } + } + } - Transform.updateNewTableState(connection.unwrap(PhoenixConnection.class), transformRecord, PIndexState.ACTIVE); - - runTransform(args, cmdLine); + public void abortTransform() throws Exception { + SystemTransformRecord transformRecord = + getTransformRecord(connection.unwrap(PhoenixConnection.class)); + if (transformRecord.getTransformStatus().equals(PTable.TransformStatus.COMPLETED.name())) { + throw new IllegalStateException("A completed transform cannot be aborted"); + } - // Check if we already have a TransformMonitor task. If we do, remove those and start a new monitor - List taskRecordList = Task.queryTaskTable(connection, null); - for (Task.TaskRecord taskRecord : taskRecordList) { - if (taskRecord.isMatchingTask(transformRecord)) { - ServerTask.deleteTask(connection.unwrap(PhoenixConnection.class), PTable.TaskType.TRANSFORM_MONITOR, taskRecord.getTimeStamp(), taskRecord.getTenantId(), - taskRecord.getSchemaName(), taskRecord.getTableName(), configuration.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, - QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED)); - } - } + killJob(transformRecord); + Transform.removeTransformRecord(transformRecord, connection.unwrap(PhoenixConnection.class)); + + // TODO: disable transform on the old table + + // Cleanup syscat + try (Statement stmt = connection.createStatement()) { + if (pIndexTable != null) { + stmt.execute("DROP INDEX " + transformRecord.getNewPhysicalTableName()); + } else { + stmt.execute("DROP TABLE " + transformRecord.getNewPhysicalTableName()); + } + } catch (SQLException ex) { + LOGGER.warn( + "Transform abort could not drop the table " + transformRecord.getNewPhysicalTableName()); + } + } - // start TransformMonitor - TransformClient.addTransformMonitorTask(connection.unwrap(PhoenixConnection.class), configuration, transformRecord, - PTable.TaskStatus.CREATED, new Timestamp(EnvironmentEdgeManager.currentTimeMillis()), null); - - } - - public int runTransform(String[] args, CommandLine cmdLine) throws Exception { - int status = 0; - - updateTransformRecord(connection.unwrap(PhoenixConnection.class), PTable.TransformStatus.STARTED); - PhoenixConfigurationUtil.setIsPartialTransform(configuration, isPartialTransform); - PhoenixConfigurationUtil.setIsTransforming(configuration, true); - PhoenixConfigurationUtil.setForceCutover(configuration, shouldForceCutover); - - if (!Strings.isNullOrEmpty(indexTable)) { - PhoenixConfigurationUtil.setTransformingTableType(configuration, IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE); - // Index table transform. Build the index - IndexTool indexTool = new IndexTool(); - indexTool.setConf(configuration); - if (shouldForceCutover) { - List argsList = new ArrayList(Arrays.asList(args)); - // Remove from cmdLine so that indexTool will not throw error - argsList.remove("-"+FORCE_CUTOVER_OPTION.getOpt()); - argsList.remove("--"+FORCE_CUTOVER_OPTION.getLongOpt()); - args = argsList.toArray(new String[0]); - } - status = indexTool.run(args); - Job job = indexTool.getJob(); - if (status == 0) { - updateTransformRecord(job); - } - } else { - PhoenixConfigurationUtil.setTransformingTableType(configuration, IndexScrutinyTool.SourceTable.DATA_TABLE_SOURCE); - if (!isPartialTransform) { - preSplitTable(cmdLine, connection, configuration, pNewTable, pOldTable); - } - configureJob(); - status = runJob(); - if (status == 0) { - updateTransformRecord(this.job); - } - } + public void pauseTransform() throws Exception { + SystemTransformRecord transformRecord = + getTransformRecord(connection.unwrap(PhoenixConnection.class)); + if (transformRecord.getTransformStatus().equals(PTable.TransformStatus.COMPLETED.name())) { + throw new IllegalStateException("A completed transform cannot be paused"); + } - // Record status - if (status != 0) { - LOGGER.error("TransformTool/IndexTool job failed! Check logs for errors.."); - updateTransformRecord(connection.unwrap(PhoenixConnection.class), PTable.TransformStatus.FAILED); - return -1; - } + updateTransformRecord(connection.unwrap(PhoenixConnection.class), + PTable.TransformStatus.PAUSED); + killJob(transformRecord); + } - return status; + public void resumeTransform(String[] args, CommandLine cmdLine) throws Exception { + SystemTransformRecord transformRecord = + getTransformRecord(connection.unwrap(PhoenixConnection.class)); + if (!transformRecord.getTransformStatus().equals(PTable.TransformStatus.PAUSED.name())) { + throw new IllegalStateException("Only a paused transform can be resumed"); } - @Override - public int run(String[] args) throws Exception { - connection = null; - int ret = 0; - CommandLine cmdLine = null; - configuration = HBaseConfiguration.addHbaseResources(getConf()); - try { - cmdLine = parseArgs(args); - if (cmdLine.hasOption(TENANT_ID_OPTION.getOpt())) { - tenantId = cmdLine.getOptionValue(TENANT_ID_OPTION.getOpt()); - if (!Strings.isNullOrEmpty(tenantId)) { - configuration.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - } - } - try (Connection conn = getConnection(configuration)) { - this.connection = conn; - this.connection.setAutoCommit(true); - createIndexToolTables(conn); - populateTransformToolAttributesAndValidate(cmdLine); - if (cmdLine.hasOption(ABORT_TRANSFORM_OPTION.getOpt())) { - abortTransform(); - } else if (cmdLine.hasOption(PAUSE_TRANSFORM_OPTION.getOpt())) { - pauseTransform(); - } else if (cmdLine.hasOption(RESUME_TRANSFORM_OPTION.getOpt())) { - resumeTransform(args, cmdLine); - } else { - ret = runTransform(args, cmdLine); - } - return ret; - } catch (Exception ex) { - LOGGER.error("An error occurred while transforming " + ExceptionUtils.getMessage(ex) + " at:\n" + ExceptionUtils.getStackTrace(ex)); - return -1; - } - } catch (Exception e) { - e.printStackTrace(); - printHelpAndExit(e.toString(), getOptions()); - return -1; - } + Transform.updateNewTableState(connection.unwrap(PhoenixConnection.class), transformRecord, + PIndexState.ACTIVE); + + runTransform(args, cmdLine); + + // Check if we already have a TransformMonitor task. If we do, remove those and start a new + // monitor + List taskRecordList = Task.queryTaskTable(connection, null); + for (Task.TaskRecord taskRecord : taskRecordList) { + if (taskRecord.isMatchingTask(transformRecord)) { + ServerTask.deleteTask(connection.unwrap(PhoenixConnection.class), + PTable.TaskType.TRANSFORM_MONITOR, taskRecord.getTimeStamp(), taskRecord.getTenantId(), + taskRecord.getSchemaName(), taskRecord.getTableName(), configuration.getBoolean( + QueryServices.PHOENIX_ACLS_ENABLED, QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED)); + } } - public static void main(final String[] args) throws Exception { - int result = ToolRunner.run(new TransformTool(), args); - System.exit(result); + // start TransformMonitor + TransformClient.addTransformMonitorTask(connection.unwrap(PhoenixConnection.class), + configuration, transformRecord, PTable.TaskStatus.CREATED, + new Timestamp(EnvironmentEdgeManager.currentTimeMillis()), null); + + } + + public int runTransform(String[] args, CommandLine cmdLine) throws Exception { + int status = 0; + + updateTransformRecord(connection.unwrap(PhoenixConnection.class), + PTable.TransformStatus.STARTED); + PhoenixConfigurationUtil.setIsPartialTransform(configuration, isPartialTransform); + PhoenixConfigurationUtil.setIsTransforming(configuration, true); + PhoenixConfigurationUtil.setForceCutover(configuration, shouldForceCutover); + + if (!Strings.isNullOrEmpty(indexTable)) { + PhoenixConfigurationUtil.setTransformingTableType(configuration, + IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE); + // Index table transform. Build the index + IndexTool indexTool = new IndexTool(); + indexTool.setConf(configuration); + if (shouldForceCutover) { + List argsList = new ArrayList(Arrays.asList(args)); + // Remove from cmdLine so that indexTool will not throw error + argsList.remove("-" + FORCE_CUTOVER_OPTION.getOpt()); + argsList.remove("--" + FORCE_CUTOVER_OPTION.getLongOpt()); + args = argsList.toArray(new String[0]); + } + status = indexTool.run(args); + Job job = indexTool.getJob(); + if (status == 0) { + updateTransformRecord(job); + } + } else { + PhoenixConfigurationUtil.setTransformingTableType(configuration, + IndexScrutinyTool.SourceTable.DATA_TABLE_SOURCE); + if (!isPartialTransform) { + preSplitTable(cmdLine, connection, configuration, pNewTable, pOldTable); + } + configureJob(); + status = runJob(); + if (status == 0) { + updateTransformRecord(this.job); + } } + // Record status + if (status != 0) { + LOGGER.error("TransformTool/IndexTool job failed! Check logs for errors.."); + updateTransformRecord(connection.unwrap(PhoenixConnection.class), + PTable.TransformStatus.FAILED); + return -1; + } - public static TransformTool runTransformTool(SystemTransformRecord systemTransformRecord, Configuration configuration, - boolean isPartial, Long startTime, Long endTime, boolean shouldFixUnverified, boolean doValidation) throws Exception { - List args = Lists.newArrayList(); - if (!Strings.isNullOrEmpty(systemTransformRecord.getSchemaName())) { - args.add("--schema=" + systemTransformRecord.getSchemaName()); + return status; + } + + @Override + public int run(String[] args) throws Exception { + connection = null; + int ret = 0; + CommandLine cmdLine = null; + configuration = HBaseConfiguration.addHbaseResources(getConf()); + try { + cmdLine = parseArgs(args); + if (cmdLine.hasOption(TENANT_ID_OPTION.getOpt())) { + tenantId = cmdLine.getOptionValue(TENANT_ID_OPTION.getOpt()); + if (!Strings.isNullOrEmpty(tenantId)) { + configuration.set(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); } - String oldTableName = systemTransformRecord.getLogicalTableName(); - boolean isIndex = false; - if (!Strings.isNullOrEmpty(systemTransformRecord.getLogicalParentName())) { - isIndex = true; - args.add("--index-table=" + oldTableName); - args.add("--data-table=" + SchemaUtil.getTableNameFromFullName(systemTransformRecord.getLogicalParentName())); + } + try (Connection conn = getConnection(configuration)) { + this.connection = conn; + this.connection.setAutoCommit(true); + createIndexToolTables(conn); + populateTransformToolAttributesAndValidate(cmdLine); + if (cmdLine.hasOption(ABORT_TRANSFORM_OPTION.getOpt())) { + abortTransform(); + } else if (cmdLine.hasOption(PAUSE_TRANSFORM_OPTION.getOpt())) { + pauseTransform(); + } else if (cmdLine.hasOption(RESUME_TRANSFORM_OPTION.getOpt())) { + resumeTransform(args, cmdLine); } else { - args.add("--data-table=" + oldTableName); + ret = runTransform(args, cmdLine); } + return ret; + } catch (Exception ex) { + LOGGER.error("An error occurred while transforming " + ExceptionUtils.getMessage(ex) + + " at:\n" + ExceptionUtils.getStackTrace(ex)); + return -1; + } + } catch (Exception e) { + e.printStackTrace(); + printHelpAndExit(e.toString(), getOptions()); + return -1; + } + } + + public static void main(final String[] args) throws Exception { + int result = ToolRunner.run(new TransformTool(), args); + System.exit(result); + } + + public static TransformTool runTransformTool(SystemTransformRecord systemTransformRecord, + Configuration configuration, boolean isPartial, Long startTime, Long endTime, + boolean shouldFixUnverified, boolean doValidation) throws Exception { + List args = Lists.newArrayList(); + if (!Strings.isNullOrEmpty(systemTransformRecord.getSchemaName())) { + args.add("--schema=" + systemTransformRecord.getSchemaName()); + } + String oldTableName = systemTransformRecord.getLogicalTableName(); + boolean isIndex = false; + if (!Strings.isNullOrEmpty(systemTransformRecord.getLogicalParentName())) { + isIndex = true; + args.add("--index-table=" + oldTableName); + args.add("--data-table=" + + SchemaUtil.getTableNameFromFullName(systemTransformRecord.getLogicalParentName())); + } else { + args.add("--data-table=" + oldTableName); + } - args.add("-op"); - args.add("/tmp/" + UUID.randomUUID().toString()); + args.add("-op"); + args.add("/tmp/" + UUID.randomUUID().toString()); - if (!Strings.isNullOrEmpty(systemTransformRecord.getTenantId())) { - args.add("-tenant"); - args.add(systemTransformRecord.getTenantId()); - } - if(startTime != null) { - args.add("-st"); - args.add(String.valueOf(startTime)); - } - if(endTime != null) { - args.add("-et"); - args.add(String.valueOf(endTime)); - } - if (isPartial) { - if (isIndex) { - // args.add("-pr"); - } else { - args.add("-pt"); - } - } - if (shouldFixUnverified) { - if (!isIndex) { - args.add("-fu"); - } - } + if (!Strings.isNullOrEmpty(systemTransformRecord.getTenantId())) { + args.add("-tenant"); + args.add(systemTransformRecord.getTenantId()); + } + if (startTime != null) { + args.add("-st"); + args.add(String.valueOf(startTime)); + } + if (endTime != null) { + args.add("-et"); + args.add(String.valueOf(endTime)); + } + if (isPartial) { + if (isIndex) { + // args.add("-pr"); + } else { + args.add("-pt"); + } + } + if (shouldFixUnverified) { + if (!isIndex) { + args.add("-fu"); + } + } - if (doValidation) { - args.add("-v"); - args.add(IndexTool.IndexVerifyType.ONLY.getValue()); - } - String[] cmdArgs = args.toArray(new String[0]); - TransformTool tt = new TransformTool(); - Configuration conf = new Configuration(configuration); - tt.setConf(conf); - - LOGGER.info("Running TransformTool with {}", Arrays.toString(cmdArgs), new Exception("Stack Trace")); - int status = tt.run(cmdArgs); - LOGGER.info("TransformTool with {} status is ", Arrays.toString(cmdArgs), status); - if (status != 0) { - return null; - } - return tt; + if (doValidation) { + args.add("-v"); + args.add(IndexTool.IndexVerifyType.ONLY.getValue()); + } + String[] cmdArgs = args.toArray(new String[0]); + TransformTool tt = new TransformTool(); + Configuration conf = new Configuration(configuration); + tt.setConf(conf); + + LOGGER.info("Running TransformTool with {}", Arrays.toString(cmdArgs), + new Exception("Stack Trace")); + int status = tt.run(cmdArgs); + LOGGER.info("TransformTool with {} status is ", Arrays.toString(cmdArgs), status); + if (status != 0) { + return null; } + return tt; + } -} \ No newline at end of file +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ColumnInfoToStringEncoderDecoder.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ColumnInfoToStringEncoderDecoder.java index 1d3398a6733..b7d78835d97 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ColumnInfoToStringEncoderDecoder.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ColumnInfoToStringEncoderDecoder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,45 +20,45 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.util.ColumnInfo; - import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.ColumnInfo; /** * A codec to transform a {@link ColumnInfo} to a {@link String} and decode back. */ public class ColumnInfoToStringEncoderDecoder { - static final String CONFIGURATION_VALUE_PREFIX = "phoenix.colinfo.encoder.decoeder.value"; - static final String CONFIGURATION_COUNT = "phoenix.colinfo.encoder.decoder.count"; - - private ColumnInfoToStringEncoderDecoder() { - - } - - public static void encode(Configuration configuration, List columnInfos) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(columnInfos); - int count=0; - for (int i=0; i columnInfos) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(columnInfos); + int count = 0; + for (int i = 0; i < columnInfos.size(); ++i) { + if (columnInfos.get(i) != null) { + configuration.set(String.format("%s_%d", CONFIGURATION_VALUE_PREFIX, i), + columnInfos.get(i).toString()); + ++count; + } } - - public static List decode(Configuration configuration) { - Preconditions.checkNotNull(configuration); - int numCols = configuration.getInt(CONFIGURATION_COUNT, 0); - List columnInfos = Lists.newArrayListWithExpectedSize(numCols); - for (int i=0; i decode(Configuration configuration) { + Preconditions.checkNotNull(configuration); + int numCols = configuration.getInt(CONFIGURATION_COUNT, 0); + List columnInfos = Lists.newArrayListWithExpectedSize(numCols); + for (int i = 0; i < numCols; ++i) { + columnInfos.add(ColumnInfo + .fromString(configuration.get(String.format("%s_%d", CONFIGURATION_VALUE_PREFIX, i)))); } + return columnInfos; + } - } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultMultiViewJobStatusTracker.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultMultiViewJobStatusTracker.java index f1c9e6bcea0..3e254b92975 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultMultiViewJobStatusTracker.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultMultiViewJobStatusTracker.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,32 +19,32 @@ import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.mapreduce.util.ViewInfoWritable.ViewInfoJobState; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class DefaultMultiViewJobStatusTracker implements MultiViewJobStatusTracker { - private static final Logger LOGGER = - LoggerFactory.getLogger(DefaultMultiViewJobStatusTracker.class); + private static final Logger LOGGER = + LoggerFactory.getLogger(DefaultMultiViewJobStatusTracker.class); - public void updateJobStatus(ViewInfoTracker view, long numberOfDeletedRows, int state, - Configuration config, long duration, String mrJobName) { - if (state == ViewInfoJobState.SUCCEEDED.getValue()) { - LOGGER.debug(String.format("Number of deleted rows from view %s, TenantID %s, " + - "and Source Table Name %s : " + - "number of deleted row %d, duration : %d, mr job name : %s.", - view.getViewName(), view.getTenantId(), view.getRelationName(), - numberOfDeletedRows, duration, mrJobName)); - } else if (state == ViewInfoJobState.DELETED.getValue()) { - LOGGER.debug(String.format("View has been deleted, view info : view %s, TenantID %s, " + - "and Source Table Name %s : %d," + - " mr job name : %s.", view.getViewName(), view.getTenantId(), - view.getRelationName(), mrJobName)); - } else { - LOGGER.debug(String.format("Job is in state %d for view %s, TenantID %s, " + - "Source Table Name %s , and duration : %d, " + - "mr job name : %s.", state, view.getViewName(), view.getTenantId(), - view.getRelationName(), duration, mrJobName)); - } + public void updateJobStatus(ViewInfoTracker view, long numberOfDeletedRows, int state, + Configuration config, long duration, String mrJobName) { + if (state == ViewInfoJobState.SUCCEEDED.getValue()) { + LOGGER.debug(String.format( + "Number of deleted rows from view %s, TenantID %s, " + "and Source Table Name %s : " + + "number of deleted row %d, duration : %d, mr job name : %s.", + view.getViewName(), view.getTenantId(), view.getRelationName(), numberOfDeletedRows, + duration, mrJobName)); + } else if (state == ViewInfoJobState.DELETED.getValue()) { + LOGGER.debug(String.format( + "View has been deleted, view info : view %s, TenantID %s, " + + "and Source Table Name %s : %d," + " mr job name : %s.", + view.getViewName(), view.getTenantId(), view.getRelationName(), mrJobName)); + } else { + LOGGER.debug(String.format( + "Job is in state %d for view %s, TenantID %s, " + + "Source Table Name %s , and duration : %d, " + "mr job name : %s.", + state, view.getViewName(), view.getTenantId(), view.getRelationName(), duration, + mrJobName)); } -} \ No newline at end of file + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultMultiViewSplitStrategy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultMultiViewSplitStrategy.java index 62e5150617a..aa998fd94a9 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultMultiViewSplitStrategy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultMultiViewSplitStrategy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,63 +17,61 @@ */ package org.apache.phoenix.mapreduce.util; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.phoenix.mapreduce.PhoenixMultiViewInputSplit; +import static org.apache.phoenix.mapreduce.PhoenixTTLTool.DEFAULT_MAPPER_SPLIT_SIZE; import java.util.List; -import static org.apache.phoenix.mapreduce.PhoenixTTLTool.DEFAULT_MAPPER_SPLIT_SIZE; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.phoenix.mapreduce.PhoenixMultiViewInputSplit; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; public class DefaultMultiViewSplitStrategy implements MultiViewSplitStrategy { - public List generateSplits(List views, - Configuration configuration) { - int numViewsInSplit = PhoenixConfigurationUtil.getMultiViewSplitSize(configuration); + public List generateSplits(List views, + Configuration configuration) { + int numViewsInSplit = PhoenixConfigurationUtil.getMultiViewSplitSize(configuration); - if (numViewsInSplit < 1) { - numViewsInSplit = DEFAULT_MAPPER_SPLIT_SIZE; - } - - int numberOfMappers = getNumberOfMappers(views.size(),numViewsInSplit); + if (numViewsInSplit < 1) { + numViewsInSplit = DEFAULT_MAPPER_SPLIT_SIZE; + } - final List pSplits = Lists.newArrayListWithExpectedSize(numberOfMappers); - // Split the views into splits + int numberOfMappers = getNumberOfMappers(views.size(), numViewsInSplit); - for (int i = 0; i < numberOfMappers; i++) { - pSplits.add(new PhoenixMultiViewInputSplit(views.subList( - i * numViewsInSplit, getUpperBound(numViewsInSplit, i, views.size())))); - } + final List pSplits = Lists.newArrayListWithExpectedSize(numberOfMappers); + // Split the views into splits - return pSplits; + for (int i = 0; i < numberOfMappers; i++) { + pSplits.add(new PhoenixMultiViewInputSplit( + views.subList(i * numViewsInSplit, getUpperBound(numViewsInSplit, i, views.size())))); } - /* - Calculate number of mappers are needed based on split policy and - number of views on the cluster - */ - public int getNumberOfMappers(int viewSize, int numViewsInSplit) { - int numberOfMappers = viewSize / numViewsInSplit; - if (viewSize % numViewsInSplit > 0) { - numberOfMappers++; - } - return numberOfMappers; - } + return pSplits; + } - /* - Calculate the upper bound for each mapper. For example, given - split policy is 10 cleanup jobs per mapper, and the total view size at the cluster - is 12. - The first mapper will take from [0 - 10), this method will return 10 as upper bound - The second mapper will take from [10 - 12), this method will return 12 as upper bound. - */ - public int getUpperBound(int numViewsInSplit, int i, int viewSize) { - int upper = (i + 1) * numViewsInSplit; - if (viewSize < upper) { - upper = viewSize; - } + /* + * Calculate number of mappers are needed based on split policy and number of views on the cluster + */ + public int getNumberOfMappers(int viewSize, int numViewsInSplit) { + int numberOfMappers = viewSize / numViewsInSplit; + if (viewSize % numViewsInSplit > 0) { + numberOfMappers++; + } + return numberOfMappers; + } - return upper; + /* + * Calculate the upper bound for each mapper. For example, given split policy is 10 cleanup jobs + * per mapper, and the total view size at the cluster is 12. The first mapper will take from [0 - + * 10), this method will return 10 as upper bound The second mapper will take from [10 - 12), this + * method will return 12 as upper bound. + */ + public int getUpperBound(int numViewsInSplit, int i, int viewSize) { + int upper = (i + 1) * numViewsInSplit; + if (viewSize < upper) { + upper = viewSize; } -} \ No newline at end of file + + return upper; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultPhoenixMultiViewListProvider.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultPhoenixMultiViewListProvider.java index 31ba3f19f4f..a36db9953df 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultPhoenixMultiViewListProvider.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/DefaultPhoenixMultiViewListProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,12 @@ */ package org.apache.phoenix.mapreduce.util; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.query.QueryConstants; @@ -26,175 +32,156 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.List; - public class DefaultPhoenixMultiViewListProvider implements PhoenixMultiViewListProvider { - private static final Logger LOGGER = - LoggerFactory.getLogger(DefaultPhoenixMultiViewListProvider.class); - - public List getPhoenixMultiViewList(Configuration configuration) { - boolean isFetchAll = configuration.get( - PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_ALL_VIEWS) != null; + private static final Logger LOGGER = + LoggerFactory.getLogger(DefaultPhoenixMultiViewListProvider.class); - if (!isFetchAll) { - return getTenantOrViewMultiViewList(configuration); - } - List viewInfoWritables = new ArrayList<>(); - boolean isQueryMore = true; - String query = PhoenixMultiInputUtil.getFetchViewQuery(configuration); + public List getPhoenixMultiViewList(Configuration configuration) { + boolean isFetchAll = + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_ALL_VIEWS) + != null; - int limit = PhoenixConfigurationUtil.getMultiViewQueryMoreSplitSize(configuration); + if (!isFetchAll) { + return getTenantOrViewMultiViewList(configuration); + } + List viewInfoWritables = new ArrayList<>(); + boolean isQueryMore = true; + String query = PhoenixMultiInputUtil.getFetchViewQuery(configuration); - String schema = null; - String tableName = configuration.get( - PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW); - if (tableName != null) { - schema = SchemaUtil.getSchemaNameFromFullName(tableName); - } - String tenantId = configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID); - - try (PhoenixConnection connection = (PhoenixConnection) - ConnectionUtil.getInputConnection(configuration)){ - try (PreparedStatement stmt = connection.prepareStatement(query)) { - do { - stmt.setString(1, tenantId); - stmt.setString(2, schema); - stmt.setString(3, tableName); - stmt.setInt(4, limit); - - ResultSet viewRs = stmt.executeQuery(); - String fullTableName = null; - - while (viewRs.next()) { - tenantId = viewRs.getString(1); - schema = viewRs.getString(2); - tableName = viewRs.getString(3); - fullTableName = tableName; - Long viewTtlValue = viewRs.getLong(4); - - if (schema != null && schema.length() > 0) { - fullTableName = SchemaUtil.getTableName(schema, tableName); - } - - if (!isParentHasTTL(connection, tenantId, fullTableName)) { - addingViewIndexToTheFinalList(connection,tenantId,fullTableName, - viewTtlValue, viewInfoWritables); - } - } - if (isQueryMore) { - if (fullTableName == null) { - isQueryMore = false; - } - } - } while (isQueryMore); - } + int limit = PhoenixConfigurationUtil.getMultiViewQueryMoreSplitSize(configuration); - } catch (Exception e) { - LOGGER.error("Getting view info failed with: " + e.getMessage()); - } - return viewInfoWritables; + String schema = null; + String tableName = + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW); + if (tableName != null) { + schema = SchemaUtil.getSchemaNameFromFullName(tableName); } - - public List getTenantOrViewMultiViewList(Configuration configuration) { - List viewInfoWritables = new ArrayList<>(); - String query = PhoenixMultiInputUtil.getFetchViewQuery(configuration); - - try (PhoenixConnection connection = (PhoenixConnection) - ConnectionUtil.getInputConnection(configuration)) { - try (Statement stmt = connection.createStatement()) { - ResultSet viewRs = stmt.executeQuery(query); - while (viewRs.next()) { - String tenantId = viewRs.getString(1); - String schema = viewRs.getString(2); - String tableName = viewRs.getString(3); - Long viewTtlValue = viewRs.getLong(4); - String fullTableName = tableName; - - if (schema != null && schema.length() > 0) { - fullTableName = SchemaUtil.getTableName(schema, tableName); - } - - if (!isParentHasTTL(connection, tenantId, fullTableName)) { - addingViewIndexToTheFinalList(connection,tenantId,fullTableName, - viewTtlValue, viewInfoWritables); - } - } + String tenantId = configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID); + + try (PhoenixConnection connection = + (PhoenixConnection) ConnectionUtil.getInputConnection(configuration)) { + try (PreparedStatement stmt = connection.prepareStatement(query)) { + do { + stmt.setString(1, tenantId); + stmt.setString(2, schema); + stmt.setString(3, tableName); + stmt.setInt(4, limit); + + ResultSet viewRs = stmt.executeQuery(); + String fullTableName = null; + + while (viewRs.next()) { + tenantId = viewRs.getString(1); + schema = viewRs.getString(2); + tableName = viewRs.getString(3); + fullTableName = tableName; + Long viewTtlValue = viewRs.getLong(4); + + if (schema != null && schema.length() > 0) { + fullTableName = SchemaUtil.getTableName(schema, tableName); } - }catch (Exception e) { - LOGGER.error("Getting view info failed with: " + e.getMessage()); - } - return viewInfoWritables; - } - private boolean isParentHasTTL(PhoenixConnection connection, - String tenantId, String fullTableName) { - boolean skip= false; - try { - PTable pTable = connection.getTable(tenantId, fullTableName); - System.out.println("PTable"); - PTable parentTable = connection.getTable(null, pTable.getParentName().toString()); - System.out.println("Parent Table"); - if (parentTable.getType() == PTableType.VIEW && - parentTable.getTTL() > 0) { - /* if the current view parent already has a TTL value, we want to - skip the current view cleanup job because we want to run the cleanup - job for at the GlobalView level instead of running multi-jobs at - the LeafView level for the better performance. - - BaseTable - GlobalView(has TTL) - LeafView1, LeafView2, LeafView3.... - */ - skip = true; + if (!isParentHasTTL(connection, tenantId, fullTableName)) { + addingViewIndexToTheFinalList(connection, tenantId, fullTableName, viewTtlValue, + viewInfoWritables); } - } catch (Exception e) { - skip = true; - LOGGER.error(String.format("Had an issue to process the view: %s, " + - "tenantId: see error %s ", fullTableName, tenantId, - e.getMessage())); + } + if (isQueryMore) { + if (fullTableName == null) { + isQueryMore = false; + } + } + } while (isQueryMore); + } + + } catch (Exception e) { + LOGGER.error("Getting view info failed with: " + e.getMessage()); + } + return viewInfoWritables; + } + + public List getTenantOrViewMultiViewList(Configuration configuration) { + List viewInfoWritables = new ArrayList<>(); + String query = PhoenixMultiInputUtil.getFetchViewQuery(configuration); + + try (PhoenixConnection connection = + (PhoenixConnection) ConnectionUtil.getInputConnection(configuration)) { + try (Statement stmt = connection.createStatement()) { + ResultSet viewRs = stmt.executeQuery(query); + while (viewRs.next()) { + String tenantId = viewRs.getString(1); + String schema = viewRs.getString(2); + String tableName = viewRs.getString(3); + Long viewTtlValue = viewRs.getLong(4); + String fullTableName = tableName; + + if (schema != null && schema.length() > 0) { + fullTableName = SchemaUtil.getTableName(schema, tableName); + } + + if (!isParentHasTTL(connection, tenantId, fullTableName)) { + addingViewIndexToTheFinalList(connection, tenantId, fullTableName, viewTtlValue, + viewInfoWritables); + } } - return skip; + } + } catch (Exception e) { + LOGGER.error("Getting view info failed with: " + e.getMessage()); } - - private void addingViewIndexToTheFinalList(PhoenixConnection connection, String tenantId, - String fullTableName, long viewTtlValue, - List viewInfoWritables) - throws Exception { - PTable pTable = connection.getTable(tenantId, fullTableName); - ViewInfoWritable viewInfoTracker = new ViewInfoTracker( - tenantId, - fullTableName, - viewTtlValue, - pTable.getPhysicalName().getString(), - false + return viewInfoWritables; + } + + private boolean isParentHasTTL(PhoenixConnection connection, String tenantId, + String fullTableName) { + boolean skip = false; + try { + PTable pTable = connection.getTable(tenantId, fullTableName); + System.out.println("PTable"); + PTable parentTable = connection.getTable(null, pTable.getParentName().toString()); + System.out.println("Parent Table"); + if (parentTable.getType() == PTableType.VIEW && parentTable.getTTL() > 0) { + /* + * if the current view parent already has a TTL value, we want to skip the current view + * cleanup job because we want to run the cleanup job for at the GlobalView level instead of + * running multi-jobs at the LeafView level for the better performance. BaseTable + * GlobalView(has TTL) LeafView1, LeafView2, LeafView3.... + */ + skip = true; + } + } catch (Exception e) { + skip = true; + LOGGER + .error(String.format("Had an issue to process the view: %s, " + "tenantId: see error %s ", + fullTableName, tenantId, e.getMessage())); + } + return skip; + } + + private void addingViewIndexToTheFinalList(PhoenixConnection connection, String tenantId, + String fullTableName, long viewTtlValue, List viewInfoWritables) + throws Exception { + PTable pTable = connection.getTable(tenantId, fullTableName); + ViewInfoWritable viewInfoTracker = new ViewInfoTracker(tenantId, fullTableName, viewTtlValue, + pTable.getPhysicalName().getString(), false + + ); + viewInfoWritables.add(viewInfoTracker); + + List allIndexesOnView = pTable.getIndexes(); + for (PTable viewIndexTable : allIndexesOnView) { + String indexName = viewIndexTable.getTableName().getString(); + String indexSchema = viewIndexTable.getSchemaName().getString(); + if (indexName.contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { + indexName = SchemaUtil.getTableNameFromFullName(indexName, + QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); + } + indexName = SchemaUtil.getTableNameFromFullName(indexName); + indexName = SchemaUtil.getTableName(indexSchema, indexName); + ViewInfoWritable viewInfoTrackerForIndexEntry = + new ViewInfoTracker(tenantId, fullTableName, viewTtlValue, indexName, true ); - viewInfoWritables.add(viewInfoTracker); - - List allIndexesOnView = pTable.getIndexes(); - for (PTable viewIndexTable : allIndexesOnView) { - String indexName = viewIndexTable.getTableName().getString(); - String indexSchema = viewIndexTable.getSchemaName().getString(); - if (indexName.contains( - QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) { - indexName = SchemaUtil.getTableNameFromFullName(indexName, - QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR); - } - indexName = SchemaUtil.getTableNameFromFullName(indexName); - indexName = SchemaUtil.getTableName(indexSchema, indexName); - ViewInfoWritable viewInfoTrackerForIndexEntry = new ViewInfoTracker( - tenantId, - fullTableName, - viewTtlValue, - indexName, - true - - ); - viewInfoWritables.add(viewInfoTrackerForIndexEntry); - } + viewInfoWritables.add(viewInfoTrackerForIndexEntry); } -} \ No newline at end of file + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/IndexColumnNames.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/IndexColumnNames.java index 2b93a861dde..555839662e7 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/IndexColumnNames.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/IndexColumnNames.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,230 +26,220 @@ import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.util.IndexUtil; -import org.apache.phoenix.util.SchemaUtil; - import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.util.IndexUtil; +import org.apache.phoenix.util.SchemaUtil; /** * Gets index column names and their data table equivalents */ public class IndexColumnNames { - private List dataNonPkColNames = Lists.newArrayList(); - private List dataPkColNames = Lists.newArrayList(); - private List dataColNames; - protected List dataColSqlTypeNames = Lists.newArrayList(); - private List indexPkColNames = Lists.newArrayList(); - private List indexNonPkColNames = Lists.newArrayList(); - private List indexColNames; - protected List indexColSqlTypeNames = Lists.newArrayList(); - private PTable pdataTable; - private PTable pindexTable; - - public IndexColumnNames(final PTable pdataTable, final PTable pindexTable) { - this.pdataTable = pdataTable; - this.pindexTable = pindexTable; - List pindexCols = pindexTable.getColumns(); - List pkColumns = pindexTable.getPKColumns(); - Set indexColsAdded = new HashSet(); - int offset = 0; - if (pindexTable.getBucketNum() != null) { - offset++; - } - if (pindexTable.getViewIndexId() != null) { - offset++; - } - if (pindexTable.isMultiTenant() && pindexTable.getViewIndexId() != null) { - offset++; + private List dataNonPkColNames = Lists.newArrayList(); + private List dataPkColNames = Lists.newArrayList(); + private List dataColNames; + protected List dataColSqlTypeNames = Lists.newArrayList(); + private List indexPkColNames = Lists.newArrayList(); + private List indexNonPkColNames = Lists.newArrayList(); + private List indexColNames; + protected List indexColSqlTypeNames = Lists.newArrayList(); + private PTable pdataTable; + private PTable pindexTable; + + public IndexColumnNames(final PTable pdataTable, final PTable pindexTable) { + this.pdataTable = pdataTable; + this.pindexTable = pindexTable; + List pindexCols = pindexTable.getColumns(); + List pkColumns = pindexTable.getPKColumns(); + Set indexColsAdded = new HashSet(); + int offset = 0; + if (pindexTable.getBucketNum() != null) { + offset++; + } + if (pindexTable.getViewIndexId() != null) { + offset++; + } + if (pindexTable.isMultiTenant() && pindexTable.getViewIndexId() != null) { + offset++; + } + + if (offset > 0) { + pindexCols = pindexCols.subList(offset, pindexCols.size()); + pkColumns = pkColumns.subList(offset, pkColumns.size()); + } + + // first add the data pk columns + for (PColumn indexCol : pindexCols) { + if (IndexUtil.isDataPKColumn(indexCol)) { + String indexColumnName = indexCol.getName().getString(); + PColumn dPkCol = IndexUtil.getDataColumn(pdataTable, indexColumnName); + dataPkColNames.add(getDataColFullName(dPkCol)); + dataColSqlTypeNames.add(getDataTypeString(dPkCol)); + indexPkColNames.add(indexColumnName); + indexColSqlTypeNames.add(getDataTypeString(indexCol)); + indexColsAdded.add(indexColumnName); + } + } + + // then the rest of the index pk columns + for (PColumn indexPkCol : pkColumns) { + String indexColName = indexPkCol.getName().getString(); + if (!indexColsAdded.contains(indexColName)) { + indexPkColNames.add(indexColName); + indexColSqlTypeNames.add(getDataTypeString(indexPkCol)); + PColumn dCol = IndexUtil.getDataColumn(pdataTable, indexColName); + dataNonPkColNames.add(getDataColFullName(dCol)); + dataColSqlTypeNames.add(getDataTypeString(dCol)); + indexColsAdded.add(indexColName); + } + } + + // then the covered columns (rest of the columns) + for (PColumn indexCol : pindexCols) { + String indexColName = indexCol.getName().getString(); + if (!indexColsAdded.contains(indexColName)) { + indexNonPkColNames.add(indexColName); + indexColSqlTypeNames.add(getDataTypeString(indexCol)); + PColumn dCol = IndexUtil.getDataColumn(pdataTable, indexColName); + dataNonPkColNames.add(getDataColFullName(dCol)); + dataColSqlTypeNames.add(getDataTypeString(dCol)); + } + } + + indexColNames = Lists.newArrayList(Iterables.concat(indexPkColNames, indexNonPkColNames)); + dataColNames = Lists.newArrayList(Iterables.concat(dataPkColNames, dataNonPkColNames)); + } + + private String getDataTypeString(PColumn col) { + PDataType dataType = col.getDataType(); + switch (dataType.getSqlType()) { + case Types.DECIMAL: + String typeStr = dataType.toString(); + if (col.getMaxLength() != null) { + typeStr += "(" + col.getMaxLength().toString(); + if (col.getScale() != null) { + typeStr += "," + col.getScale().toString(); + } + typeStr += ")"; } - - if (offset > 0) { - pindexCols = pindexCols.subList(offset, pindexCols.size()); - pkColumns = pkColumns.subList(offset, pkColumns.size()); - } - - // first add the data pk columns - for (PColumn indexCol : pindexCols) { - if (IndexUtil.isDataPKColumn(indexCol)) { - String indexColumnName = indexCol.getName().getString(); - PColumn dPkCol = IndexUtil.getDataColumn(pdataTable, indexColumnName); - dataPkColNames.add(getDataColFullName(dPkCol)); - dataColSqlTypeNames.add(getDataTypeString(dPkCol)); - indexPkColNames.add(indexColumnName); - indexColSqlTypeNames.add(getDataTypeString(indexCol)); - indexColsAdded.add(indexColumnName); - } - } - - // then the rest of the index pk columns - for (PColumn indexPkCol : pkColumns) { - String indexColName = indexPkCol.getName().getString(); - if (!indexColsAdded.contains(indexColName)) { - indexPkColNames.add(indexColName); - indexColSqlTypeNames.add(getDataTypeString(indexPkCol)); - PColumn dCol = IndexUtil.getDataColumn(pdataTable, indexColName); - dataNonPkColNames.add(getDataColFullName(dCol)); - dataColSqlTypeNames.add(getDataTypeString(dCol)); - indexColsAdded.add(indexColName); - } - } - - // then the covered columns (rest of the columns) - for (PColumn indexCol : pindexCols) { - String indexColName = indexCol.getName().getString(); - if (!indexColsAdded.contains(indexColName)) { - indexNonPkColNames.add(indexColName); - indexColSqlTypeNames.add(getDataTypeString(indexCol)); - PColumn dCol = IndexUtil.getDataColumn(pdataTable, indexColName); - dataNonPkColNames.add(getDataColFullName(dCol)); - dataColSqlTypeNames.add(getDataTypeString(dCol)); - } + return typeStr; + default: + if (col.getMaxLength() != null) { + return String.format("%s(%s)", dataType.toString(), col.getMaxLength()); } - - indexColNames = Lists.newArrayList(Iterables.concat(indexPkColNames, indexNonPkColNames)); - dataColNames = Lists.newArrayList(Iterables.concat(dataPkColNames, dataNonPkColNames)); - } - - private String getDataTypeString(PColumn col) { - PDataType dataType = col.getDataType(); - switch (dataType.getSqlType()) { - case Types.DECIMAL: - String typeStr = dataType.toString(); - if (col.getMaxLength() != null) { - typeStr += "(" + col.getMaxLength().toString(); - if (col.getScale() != null) { - typeStr += "," + col.getScale().toString(); - } - typeStr += ")"; - } - return typeStr; - default: - if (col.getMaxLength() != null) { - return String.format("%s(%s)", dataType.toString(), col.getMaxLength()); - } - return dataType.toString(); - } - } - - public static String getDataColFullName(PColumn dCol) { - String dColFullName = ""; - if (dCol.getFamilyName() != null) { - dColFullName += dCol.getFamilyName().getString() + QueryConstants.NAME_SEPARATOR; - } - dColFullName += dCol.getName().getString(); - return dColFullName; - } - - private List getDynamicCols(List colNames, List colTypes) { - List dynamicCols = Lists.newArrayListWithCapacity(colNames.size()); - for (int i = 0; i < colNames.size(); i++) { - String dataColName = colNames.get(i); - String dataColType = colTypes.get(i); - String dynamicCol = - SchemaUtil.getEscapedFullColumnName(dataColName) + " " + dataColType; - dynamicCols.add(dynamicCol); - } - return dynamicCols; - } - - private List getUnqualifiedColNames(List qualifiedCols) { - return Lists.transform(qualifiedCols, new Function() { - @Override - public String apply(String qCol) { - return SchemaUtil.getTableNameFromFullName(qCol, QueryConstants.NAME_SEPARATOR); - } - }); - } - - protected List getCastedColumnNames(List colNames, List castTypes) { - List castColNames = Lists.newArrayListWithCapacity(colNames.size()); - colNames = SchemaUtil.getEscapedFullColumnNames(colNames); - for (int i = 0; i < colNames.size(); i++) { - castColNames.add("CAST(" + colNames.get(i) + " AS " + castTypes.get(i) + ")"); - } - return castColNames; - } - - public String getQualifiedDataTableName() { - return SchemaUtil.getQualifiedTableName(pdataTable.getSchemaName().getString(), - pdataTable.getTableName().getString()); - } - - public String getQualifiedIndexTableName() { - return SchemaUtil.getQualifiedTableName(pindexTable.getSchemaName().getString(), - pindexTable.getTableName().getString()); - } - - /** - * @return the escaped data column names (equivalents for the index columns) along with their - * sql type, for use in dynamic column queries/upserts - */ - public List getDynamicDataCols() { - // don't want the column family for dynamic columns - return getDynamicCols(getUnqualifiedDataColNames(), dataColSqlTypeNames); - - } - - /** - * @return the escaped index column names along with their sql type, for use in dynamic column - * queries/upserts - */ - public List getDynamicIndexCols() { - // don't want the column family for dynamic columns - return getDynamicCols(getUnqualifiedIndexColNames(), indexColSqlTypeNames); - } - - /** - * @return the corresponding data table column names for the index columns, leading with the - * data table pk columns - */ - public List getDataColNames() { - return dataColNames; - } - - /** - * @return same as getDataColNames, without the column family qualifier - */ - public List getUnqualifiedDataColNames() { - return getUnqualifiedColNames(dataColNames); - } - - /** - * @return the corresponding data table column names for the index columns, which are not part - * of the data table pk - */ - public List getDataNonPkColNames() { - return dataNonPkColNames; - } - - /** - * @return the corresponding data table column names for the index columns, which are part of - * the data table pk - */ - public List getDataPkColNames() { - return dataPkColNames; - } - - /** - * @return the index column names, leading with the data table pk columns - */ - public List getIndexColNames() { - return indexColNames; - } - - /** - * @return same as getIndexColNames, without the column family qualifier - */ - public List getUnqualifiedIndexColNames() { - return getUnqualifiedColNames(indexColNames); - } - - /** - * @return the index pk column names - */ - public List getIndexPkColNames() { - return indexPkColNames; - } + return dataType.toString(); + } + } + + public static String getDataColFullName(PColumn dCol) { + String dColFullName = ""; + if (dCol.getFamilyName() != null) { + dColFullName += dCol.getFamilyName().getString() + QueryConstants.NAME_SEPARATOR; + } + dColFullName += dCol.getName().getString(); + return dColFullName; + } + + private List getDynamicCols(List colNames, List colTypes) { + List dynamicCols = Lists.newArrayListWithCapacity(colNames.size()); + for (int i = 0; i < colNames.size(); i++) { + String dataColName = colNames.get(i); + String dataColType = colTypes.get(i); + String dynamicCol = SchemaUtil.getEscapedFullColumnName(dataColName) + " " + dataColType; + dynamicCols.add(dynamicCol); + } + return dynamicCols; + } + + private List getUnqualifiedColNames(List qualifiedCols) { + return Lists.transform(qualifiedCols, new Function() { + @Override + public String apply(String qCol) { + return SchemaUtil.getTableNameFromFullName(qCol, QueryConstants.NAME_SEPARATOR); + } + }); + } + + protected List getCastedColumnNames(List colNames, List castTypes) { + List castColNames = Lists.newArrayListWithCapacity(colNames.size()); + colNames = SchemaUtil.getEscapedFullColumnNames(colNames); + for (int i = 0; i < colNames.size(); i++) { + castColNames.add("CAST(" + colNames.get(i) + " AS " + castTypes.get(i) + ")"); + } + return castColNames; + } + + public String getQualifiedDataTableName() { + return SchemaUtil.getQualifiedTableName(pdataTable.getSchemaName().getString(), + pdataTable.getTableName().getString()); + } + + public String getQualifiedIndexTableName() { + return SchemaUtil.getQualifiedTableName(pindexTable.getSchemaName().getString(), + pindexTable.getTableName().getString()); + } + + /** + * @return the escaped data column names (equivalents for the index columns) along with their sql + * type, for use in dynamic column queries/upserts + */ + public List getDynamicDataCols() { + // don't want the column family for dynamic columns + return getDynamicCols(getUnqualifiedDataColNames(), dataColSqlTypeNames); + + } + + /** + * @return the escaped index column names along with their sql type, for use in dynamic column + * queries/upserts + */ + public List getDynamicIndexCols() { + // don't want the column family for dynamic columns + return getDynamicCols(getUnqualifiedIndexColNames(), indexColSqlTypeNames); + } + + /** + * @return the corresponding data table column names for the index columns, leading with the data + * table pk columns + */ + public List getDataColNames() { + return dataColNames; + } + + /** Returns same as getDataColNames, without the column family qualifier */ + public List getUnqualifiedDataColNames() { + return getUnqualifiedColNames(dataColNames); + } + + /** + * @return the corresponding data table column names for the index columns, which are not part of + * the data table pk + */ + public List getDataNonPkColNames() { + return dataNonPkColNames; + } + + /** + * @return the corresponding data table column names for the index columns, which are part of the + * data table pk + */ + public List getDataPkColNames() { + return dataPkColNames; + } + + /** Returns the index column names, leading with the data table pk columns */ + public List getIndexColNames() { + return indexColNames; + } + + /** Returns same as getIndexColNames, without the column family qualifier */ + public List getUnqualifiedIndexColNames() { + return getUnqualifiedColNames(indexColNames); + } + + /** Returns the index pk column names */ + public List getIndexPkColNames() { + return indexPkColNames; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/MultiViewJobStatusTracker.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/MultiViewJobStatusTracker.java index 7520dbe608a..3f5b8000223 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/MultiViewJobStatusTracker.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/MultiViewJobStatusTracker.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +20,6 @@ import org.apache.hadoop.conf.Configuration; public interface MultiViewJobStatusTracker { - void updateJobStatus(ViewInfoTracker view, long numberOfDeletedRows, int state, - Configuration config, long duration, String mrJobName); -} \ No newline at end of file + void updateJobStatus(ViewInfoTracker view, long numberOfDeletedRows, int state, + Configuration config, long duration, String mrJobName); +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/MultiViewSplitStrategy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/MultiViewSplitStrategy.java index d438005d860..4567eb5061e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/MultiViewSplitStrategy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/MultiViewSplitStrategy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +17,11 @@ */ package org.apache.phoenix.mapreduce.util; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.InputSplit; -import java.util.List; - public interface MultiViewSplitStrategy { - List generateSplits(List views, Configuration configuration); -} \ No newline at end of file + List generateSplits(List views, Configuration configuration); +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java index 8cfa5382db5..b2475a27d2f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,893 +45,915 @@ import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.SourceTable; import org.apache.phoenix.mapreduce.index.IndexTool; import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.QueryUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** - * A utility class to set properties on the {#link Configuration} instance. - * Used as part of Map Reduce job configuration. - * + * A utility class to set properties on the {#link Configuration} instance. Used as part of Map + * Reduce job configuration. */ public final class PhoenixConfigurationUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixConfigurationUtil.class); - - public static final String SESSION_ID = "phoenix.sessionid"; - - public static final String UPSERT_STATEMENT = "phoenix.upsert.stmt"; - - public static final String SELECT_STATEMENT = "phoenix.select.stmt"; - - public static final String UPSERT_BATCH_SIZE = "phoenix.upsert.batch.size"; - - public static final String SCHEMA_TYPE = "phoenix.select.schema.type"; - - public static final String MAPREDUCE_SELECT_COLUMN_VALUE_PREFIX = "phoenix.mr.select.column.value"; - - public static final String MAPREDUCE_SELECT_COLUMN_COUNT = "phoenix.mr.select.column.count"; - - public static final String MAPREDUCE_UPSERT_COLUMN_VALUE_PREFIX = "phoenix.mr.upsert.column.value"; - - public static final String MAPREDUCE_UPSERT_COLUMN_COUNT = "phoenix.mr.upsert.column.count"; - - public static final String INPUT_TABLE_NAME = "phoenix.input.table.name" ; - - public static final String OUTPUT_TABLE_NAME = "phoenix.colinfo.table.name" ; - - public static final String INPUT_TABLE_CONDITIONS = "phoenix.input.table.conditions" ; - - /** For local indexes which are stored in a single separate physical table*/ - public static final String PHYSICAL_TABLE_NAME = "phoenix.output.table.name" ; - - public static final String TRANSFORM_RETRY_COUNT_VALUE = "phoenix.transform.retry.count"; - - public static final int DEFAULT_TRANSFORM_RETRY_COUNT = 50; - - public static final long DEFAULT_UPSERT_BATCH_SIZE = 1000; - - public static final String INPUT_CLASS = "phoenix.input.class"; - - public static final String CURRENT_SCN_VALUE = "phoenix.mr.currentscn.value"; - - public static final String TX_SCN_VALUE = "phoenix.mr.txscn.value"; - - public static final String TX_PROVIDER = "phoenix.mr.txprovider"; - - /** Configuration key for the class name of an ImportPreUpsertKeyValueProcessor */ - public static final String UPSERT_HOOK_CLASS_CONFKEY = "phoenix.mapreduce.import.kvprocessor"; - - public static final String INDEX_DISABLED_TIMESTAMP_VALUE = "phoenix.mr.index.disableTimestamp"; - - public static final String INDEX_MAINTAINERS = "phoenix.mr.index.maintainers"; - - public static final String SCRUTINY_DATA_TABLE_NAME = "phoenix.mr.scrutiny.data.table.name"; - - public static final String SCRUTINY_INDEX_TABLE_NAME = "phoenix.mr.scrutiny.index.table.name"; - - public static final String INDEX_TOOL_DATA_TABLE_NAME = "phoenix.mr.index_tool.data.table.name"; - - public static final String INDEX_TOOL_INDEX_TABLE_NAME = "phoenix.mr.index_tool.index.table.name"; - - public static final String INDEX_TOOL_SOURCE_TABLE = "phoenix.mr.index_tool.source.table"; - - public static final String SCRUTINY_SOURCE_TABLE = "phoenix.mr.scrutiny.source.table"; - - public static final String SCRUTINY_BATCH_SIZE = "phoenix.mr.scrutiny.batch.size"; - - public static final String SCRUTINY_OUTPUT_INVALID_ROWS = - "phoenix.mr.scrutiny.output.invalid.rows"; - - public static final boolean DEFAULT_SCRUTINY_OUTPUT_INVALID_ROWS = false; + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixConfigurationUtil.class); - public static final String SHOULD_FIX_UNVERIFIED_TRANSFORM = - "phoenix.mr.fix.unverified.transform"; + public static final String SESSION_ID = "phoenix.sessionid"; - public static final boolean DEFAULT_SHOULD_FIX_UNVERIFIED_TRANSFORM = false; + public static final String UPSERT_STATEMENT = "phoenix.upsert.stmt"; - public static final String SCRUTINY_OUTPUT_FORMAT = "phoenix.mr.scrutiny.output.format"; + public static final String SELECT_STATEMENT = "phoenix.select.stmt"; - public static final String SCRUTINY_EXECUTE_TIMESTAMP = "phoenix.mr.scrutiny.execute.timestamp"; + public static final String UPSERT_BATCH_SIZE = "phoenix.upsert.batch.size"; - // max output rows per mapper - public static final String SCRUTINY_OUTPUT_MAX = "phoenix.mr.scrutiny.output.max"; + public static final String SCHEMA_TYPE = "phoenix.select.schema.type"; - public static final long DEFAULT_SCRUTINY_BATCH_SIZE = 1000; + public static final String MAPREDUCE_SELECT_COLUMN_VALUE_PREFIX = + "phoenix.mr.select.column.value"; - public static final String DISABLED_INDEXES = "phoenix.mr.index.disabledIndexes"; + public static final String MAPREDUCE_SELECT_COLUMN_COUNT = "phoenix.mr.select.column.count"; - public static final String VERIFY_INDEX = "phoenix.mr.index.verifyIndex"; + public static final String MAPREDUCE_UPSERT_COLUMN_VALUE_PREFIX = + "phoenix.mr.upsert.column.value"; - public static final String ONLY_VERIFY_INDEX = "phoenix.mr.index.onlyVerifyIndex"; + public static final String MAPREDUCE_UPSERT_COLUMN_COUNT = "phoenix.mr.upsert.column.count"; - public static final String INDEX_VERIFY_TYPE = "phoenix.mr.index.IndexVerifyType"; + public static final String INPUT_TABLE_NAME = "phoenix.input.table.name"; - public static final String DISABLE_LOGGING_TYPE = "phoenix.mr.index" + - ".IndexDisableLoggingType"; + public static final String OUTPUT_TABLE_NAME = "phoenix.colinfo.table.name"; - // Generate splits based on scans from stats, or just from region splits - public static final String MAPREDUCE_SPLIT_BY_STATS = "phoenix.mapreduce.split.by.stats"; + public static final String INPUT_TABLE_CONDITIONS = "phoenix.input.table.conditions"; - public static final boolean DEFAULT_SPLIT_BY_STATS = true; + /** For local indexes which are stored in a single separate physical table */ + public static final String PHYSICAL_TABLE_NAME = "phoenix.output.table.name"; - public static final String SNAPSHOT_NAME_KEY = "phoenix.mapreduce.snapshot.name"; + public static final String TRANSFORM_RETRY_COUNT_VALUE = "phoenix.transform.retry.count"; - public static final String RESTORE_DIR_KEY = "phoenix.tableSnapshot.restore.dir"; + public static final int DEFAULT_TRANSFORM_RETRY_COUNT = 50; - public static final String MAPREDUCE_TENANT_ID = "phoenix.mapreduce.tenantid"; - private static final String INDEX_TOOL_END_TIME = "phoenix.mr.index.endtime"; - private static final String INDEX_TOOL_START_TIME = "phoenix.mr.index.starttime"; - private static final String INDEX_TOOL_LAST_VERIFY_TIME = "phoenix.mr.index.last.verify.time"; + public static final long DEFAULT_UPSERT_BATCH_SIZE = 1000; - public static final String MAPREDUCE_JOB_TYPE = "phoenix.mapreduce.jobtype"; + public static final String INPUT_CLASS = "phoenix.input.class"; - // group number of views per mapper to run the deletion job - public static final String MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE = "phoenix.mapreduce.multi.input.split.size"; + public static final String CURRENT_SCN_VALUE = "phoenix.mr.currentscn.value"; - public static final String MAPREDUCE_MULTI_INPUT_QUERY_BATCH_SIZE = "phoenix.mapreduce.multi.input.batch.size"; + public static final String TX_SCN_VALUE = "phoenix.mr.txscn.value"; - // phoenix ttl data deletion job for a specific view - public static final String MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW = "phoenix.mapreduce.phoenix_ttl.per_view"; + public static final String TX_PROVIDER = "phoenix.mr.txprovider"; - // phoenix ttl data deletion job for all views. - public static final String MAPREDUCE_PHOENIX_TTL_DELETE_JOB_ALL_VIEWS = "phoenix.mapreduce.phoenix_ttl.all"; + /** Configuration key for the class name of an ImportPreUpsertKeyValueProcessor */ + public static final String UPSERT_HOOK_CLASS_CONFKEY = "phoenix.mapreduce.import.kvprocessor"; - // provide an absolute path to inject your multi input logic - public static final String MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ = "phoenix.mapreduce.multi.input.strategy.path"; + public static final String INDEX_DISABLED_TIMESTAMP_VALUE = "phoenix.mr.index.disableTimestamp"; - // provide an absolute path to inject your multi split logic - public static final String MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ = "phoenix.mapreduce.multi.split.strategy.path"; + public static final String INDEX_MAINTAINERS = "phoenix.mr.index.maintainers"; - // provide an absolute path to inject your multi input mapper logic - public static final String MAPREDUCE_MULTI_INPUT_MAPPER_TRACKER_CLAZZ = "phoenix.mapreduce.multi.mapper.tracker.path"; + public static final String SCRUTINY_DATA_TABLE_NAME = "phoenix.mr.scrutiny.data.table.name"; - // provide control to whether or not handle mapreduce snapshot restore and cleanup operations which - // is used by scanners on phoenix side internally or handled by caller externally - public static final String MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE = "phoenix.mapreduce.external.snapshot.restore"; + public static final String SCRUTINY_INDEX_TABLE_NAME = "phoenix.mr.scrutiny.index.table.name"; - // by default MR snapshot restore is handled internally by phoenix - public static final boolean DEFAULT_MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE = false; + public static final String INDEX_TOOL_DATA_TABLE_NAME = "phoenix.mr.index_tool.data.table.name"; - // Is the mapreduce used for table/index transform - public static final String IS_TRANSFORMING_VALUE = "phoenix.mr.istransforming"; + public static final String INDEX_TOOL_INDEX_TABLE_NAME = "phoenix.mr.index_tool.index.table.name"; - // Is force transform cutover - public static final String FORCE_CUTOVER_VALUE = "phoenix.mr.force.cutover"; + public static final String INDEX_TOOL_SOURCE_TABLE = "phoenix.mr.index_tool.source.table"; - // Is the mapreduce used for table/index transform - public static final String TRANSFORMING_TABLE_TYPE = "phoenix.mr.transform.tabletype"; + public static final String SCRUTINY_SOURCE_TABLE = "phoenix.mr.scrutiny.source.table"; - public static final String IS_PARTIAL_TRANSFORM = "phoenix.mr.transform.ispartial"; + public static final String SCRUTINY_BATCH_SIZE = "phoenix.mr.scrutiny.batch.size"; - // Randomize mapper execution order - public static final String MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER = - "phoenix.mapreduce.randomize.mapper.execution.order"; - - // non-index jobs benefit less from this - public static final boolean DEFAULT_MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER = false; - - /** - * Determines type of Phoenix Map Reduce job. - * 1. QUERY allows running arbitrary queries without aggregates - * 2. UPDATE_STATS collects statistics for the table - */ - public enum MRJobType { - QUERY, - UPDATE_STATS - } - - public enum SchemaType { - TABLE, - QUERY - } - - private PhoenixConfigurationUtil(){ - - } - /** - * - * @param tableName - */ - public static void setInputTableName(final Configuration configuration, final String tableName) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(tableName); - configuration.set(INPUT_TABLE_NAME, tableName); - } - - public static void setInputTableConditions(final Configuration configuration, final String conditions) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(conditions); - configuration.set(INPUT_TABLE_CONDITIONS, conditions); - } - - private static void setValues(final Configuration configuration, final String[] columns, final String VALUE_COUNT, final String VALUE_NAME) { - Preconditions.checkNotNull(configuration); - configuration.setInt(VALUE_COUNT, columns.length); - for (int i=0; i getValues(final Configuration configuration, final String VALUE_COUNT, final String VALUE_NAME) { - Preconditions.checkNotNull(configuration); - int numCols = configuration.getInt(VALUE_COUNT, 0); - List cols = Lists.newArrayListWithExpectedSize(numCols); - for (int i=0; i getSelectColumnNames(final Configuration configuration) { - return getValues(configuration, MAPREDUCE_SELECT_COLUMN_COUNT, MAPREDUCE_SELECT_COLUMN_VALUE_PREFIX); - } - - public static void setInputClass(final Configuration configuration, Class inputClass) { - Preconditions.checkNotNull(configuration); - configuration.setClass(INPUT_CLASS ,inputClass,DBWritable.class); - } - - public static void setInputQuery(final Configuration configuration, final String inputQuery) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(inputQuery); - configuration.set(SELECT_STATEMENT, inputQuery); - } - - public static void setPropertyPolicyProviderDisabled(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - configuration.set(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, "false"); - } - - public static void setSchemaType(Configuration configuration, final SchemaType schemaType) { - Preconditions.checkNotNull(configuration); - configuration.set(SCHEMA_TYPE, schemaType.name()); - } - - public static void setMRJobType(Configuration configuration, final MRJobType mrJobType) { - Preconditions.checkNotNull(configuration); - configuration.set(MAPREDUCE_JOB_TYPE, mrJobType.name()); - } - - public static void setPhysicalTableName(final Configuration configuration, final String tableName) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(tableName); - configuration.set(PHYSICAL_TABLE_NAME, tableName); - } - - public static void setOutputTableName(final Configuration configuration, final String tableName) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(tableName); - configuration.set(OUTPUT_TABLE_NAME, tableName); - } - - public static void setUpsertColumnNames(final Configuration configuration,final String[] columns) { - setValues(configuration, columns, MAPREDUCE_UPSERT_COLUMN_COUNT, MAPREDUCE_UPSERT_COLUMN_VALUE_PREFIX); - } + public static final String SCRUTINY_OUTPUT_INVALID_ROWS = + "phoenix.mr.scrutiny.output.invalid.rows"; - public static void setSnapshotNameKey(final Configuration configuration, final String snapshotName) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(snapshotName); - configuration.set(SNAPSHOT_NAME_KEY, snapshotName); - } + public static final boolean DEFAULT_SCRUTINY_OUTPUT_INVALID_ROWS = false; - public static void setRestoreDirKey(final Configuration configuration, final String restoreDir) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(restoreDir); - configuration.set(RESTORE_DIR_KEY, restoreDir); - } + public static final String SHOULD_FIX_UNVERIFIED_TRANSFORM = + "phoenix.mr.fix.unverified.transform"; - public static void setIndexToolStartTime(Configuration configuration, Long startTime) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(startTime); - configuration.set(INDEX_TOOL_START_TIME, Long.toString(startTime)); - } + public static final boolean DEFAULT_SHOULD_FIX_UNVERIFIED_TRANSFORM = false; - public static void setIndexToolLastVerifyTime(Configuration configuration, Long lastVerifyTime) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(lastVerifyTime); - configuration.set(INDEX_TOOL_LAST_VERIFY_TIME, Long.toString(lastVerifyTime)); - } + public static final String SCRUTINY_OUTPUT_FORMAT = "phoenix.mr.scrutiny.output.format"; - public static void setCurrentScnValue(Configuration configuration, Long scn) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(scn); - configuration.set(CURRENT_SCN_VALUE, Long.toString(scn)); - } + public static final String SCRUTINY_EXECUTE_TIMESTAMP = "phoenix.mr.scrutiny.execute.timestamp"; - public static String getIndexToolStartTime(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(INDEX_TOOL_START_TIME); - } + // max output rows per mapper + public static final String SCRUTINY_OUTPUT_MAX = "phoenix.mr.scrutiny.output.max"; - public static String getCurrentScnValue(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(CURRENT_SCN_VALUE); - } + public static final long DEFAULT_SCRUTINY_BATCH_SIZE = 1000; - public static String getIndexToolLastVerifyTime(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(INDEX_TOOL_LAST_VERIFY_TIME); - } - - public static List getUpsertColumnNames(final Configuration configuration) { - return getValues(configuration, MAPREDUCE_UPSERT_COLUMN_COUNT, MAPREDUCE_UPSERT_COLUMN_VALUE_PREFIX); - } - - public static void setBatchSize(final Configuration configuration, final Long batchSize) { - Preconditions.checkNotNull(configuration); - configuration.setLong(UPSERT_BATCH_SIZE, batchSize); - } - - /** - * Sets which HBase cluster a Phoenix MapReduce job should read from - * @param configuration - * @param quorum ZooKeeper quorum string for HBase cluster the MapReduce job will read from - */ - @Deprecated - public static void setInputCluster(final Configuration configuration, - final String quorum) { - Preconditions.checkNotNull(configuration); - configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_INPUT_CLUSTER_QUORUM, quorum); - } + public static final String DISABLED_INDEXES = "phoenix.mr.index.disabledIndexes"; - /** - * Sets which HBase cluster a Phoenix MapReduce job should write to - * @param configuration - * @param quorum ZooKeeper quorum string for HBase cluster the MapReduce job will write to - */ - @Deprecated - public static void setOutputCluster(final Configuration configuration, - final String quorum) { - Preconditions.checkNotNull(configuration); - configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_QUORUM, quorum); - } + public static final String VERIFY_INDEX = "phoenix.mr.index.verifyIndex"; - /** - * Sets which HBase cluster a Phoenix MapReduce job should read from - * @param configuration - * @param url Phoenix JDBC URL - */ - public static void setInputClusterUrl(final Configuration configuration, - final String url) { - Preconditions.checkNotNull(configuration); - configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_INPUT_CLUSTER_URL, url); - } + public static final String ONLY_VERIFY_INDEX = "phoenix.mr.index.onlyVerifyIndex"; - /** - * Sets which HBase cluster a Phoenix MapReduce job should write to - * @param configuration - * @param url Phoenix JDBC URL string for HBase cluster the MapReduce job will write to - */ - public static void setOutputClusterUrl(final Configuration configuration, - final String url) { - Preconditions.checkNotNull(configuration); - configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_URL, url); - } + public static final String INDEX_VERIFY_TYPE = "phoenix.mr.index.IndexVerifyType"; - public static Class getInputClass(final Configuration configuration) { - return configuration.getClass(INPUT_CLASS, NullDBWritable.class); - } - public static SchemaType getSchemaType(final Configuration configuration) { - final String schemaTp = configuration.get(SCHEMA_TYPE); - Preconditions.checkNotNull(schemaTp); - return SchemaType.valueOf(schemaTp); - } + public static final String DISABLE_LOGGING_TYPE = "phoenix.mr.index" + ".IndexDisableLoggingType"; - public static MRJobType getMRJobType(final Configuration configuration, String defaultMRJobType) { - final String mrJobType = configuration.get(MAPREDUCE_JOB_TYPE, defaultMRJobType); - Preconditions.checkNotNull(mrJobType); - return MRJobType.valueOf(mrJobType); - } + // Generate splits based on scans from stats, or just from region splits + public static final String MAPREDUCE_SPLIT_BY_STATS = "phoenix.mapreduce.split.by.stats"; - public static List getUpsertColumnMetadataList(final Configuration configuration) throws SQLException { - Preconditions.checkNotNull(configuration); - List columnMetadataList = null; - columnMetadataList = ColumnInfoToStringEncoderDecoder.decode(configuration); - if (columnMetadataList!=null && !columnMetadataList.isEmpty()) { - return columnMetadataList; - } - final String tableName = getOutputTableName(configuration); - Preconditions.checkNotNull(tableName); - try (PhoenixConnection connection = ConnectionUtil.getOutputConnection(configuration). - unwrap(PhoenixConnection.class)) { - List upsertColumnList = - PhoenixConfigurationUtil.getUpsertColumnNames(configuration); - if(!upsertColumnList.isEmpty()) { - LOGGER.info(String.format("UseUpsertColumns=%s, upsertColumnList.size()=%s," - + " upsertColumnList=%s ",!upsertColumnList.isEmpty(), - upsertColumnList.size(), Joiner.on(",").join(upsertColumnList))); - } - columnMetadataList = PhoenixRuntime.generateColumnInfo(connection, tableName, - upsertColumnList); - // we put the encoded column infos in the Configuration for re usability. - ColumnInfoToStringEncoderDecoder.encode(configuration, columnMetadataList); - } - return columnMetadataList; - } - - public static String getUpsertStatement(final Configuration configuration) throws SQLException { - Preconditions.checkNotNull(configuration); - String upsertStmt = configuration.get(UPSERT_STATEMENT); - if(isNotEmpty(upsertStmt)) { - return upsertStmt; - } - final String tableName = getOutputTableName(configuration); - Preconditions.checkNotNull(tableName); - List upsertColumnNames = PhoenixConfigurationUtil.getUpsertColumnNames(configuration); - final List columnMetadataList = getUpsertColumnMetadataList(configuration); - if (!upsertColumnNames.isEmpty()) { - // Generating UPSERT statement without column name information. - upsertStmt = QueryUtil.constructUpsertStatement(tableName, columnMetadataList); - LOGGER.info("Phoenix Custom Upsert Statement: "+ upsertStmt); - } else { - // Generating UPSERT statement without column name information. - upsertStmt = QueryUtil.constructGenericUpsertStatement(tableName, columnMetadataList.size()); - LOGGER.info("Phoenix Generic Upsert Statement: " + upsertStmt); - } - configuration.set(UPSERT_STATEMENT, upsertStmt); - return upsertStmt; - - } + public static final boolean DEFAULT_SPLIT_BY_STATS = true; - public static void setUpsertStatement(final Configuration configuration, String upsertStmt) - throws SQLException { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(upsertStmt); - configuration.set(UPSERT_STATEMENT, upsertStmt); - } + public static final String SNAPSHOT_NAME_KEY = "phoenix.mapreduce.snapshot.name"; - public static void setMultiInputMapperSplitSize(Configuration configuration, final int splitSize) { - Preconditions.checkNotNull(configuration); - configuration.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, String.valueOf(splitSize)); - } + public static final String RESTORE_DIR_KEY = "phoenix.tableSnapshot.restore.dir"; - public static void setMultiViewQueryMoreSplitSize(Configuration configuration, final int batchSize) { - Preconditions.checkNotNull(configuration); - configuration.set(MAPREDUCE_MULTI_INPUT_QUERY_BATCH_SIZE, String.valueOf(batchSize)); - } + public static final String MAPREDUCE_TENANT_ID = "phoenix.mapreduce.tenantid"; + private static final String INDEX_TOOL_END_TIME = "phoenix.mr.index.endtime"; + private static final String INDEX_TOOL_START_TIME = "phoenix.mr.index.starttime"; + private static final String INDEX_TOOL_LAST_VERIFY_TIME = "phoenix.mr.index.last.verify.time"; - public static int getMultiViewQueryMoreSplitSize(final Configuration configuration) { - final String batchSize = configuration.get(MAPREDUCE_MULTI_INPUT_QUERY_BATCH_SIZE); - Preconditions.checkNotNull(batchSize); - return Integer.parseInt(batchSize); - } + public static final String MAPREDUCE_JOB_TYPE = "phoenix.mapreduce.jobtype"; - public static List getSelectColumnMetadataList(final Configuration configuration) throws SQLException { - Preconditions.checkNotNull(configuration); - List columnMetadataList = null; - columnMetadataList = ColumnInfoToStringEncoderDecoder.decode(configuration); - if (columnMetadataList!=null && !columnMetadataList.isEmpty()) { - return columnMetadataList; - } - final String tableName = getInputTableName(configuration); - Preconditions.checkNotNull(tableName); - Properties props = new Properties(); - String tenantId = configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID); - if (tenantId != null) { - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - } - try (PhoenixConnection connection = ConnectionUtil. - getInputConnection(configuration, props).unwrap(PhoenixConnection.class)) { - final List selectColumnList = getSelectColumnList(configuration); - columnMetadataList = - PhoenixRuntime.generateColumnInfo(connection, tableName, selectColumnList); - // we put the encoded column infos in the Configuration for re usability. - ColumnInfoToStringEncoderDecoder.encode(configuration, columnMetadataList); - } - return columnMetadataList; - } + // group number of views per mapper to run the deletion job + public static final String MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE = + "phoenix.mapreduce.multi.input.split.size"; - public static int getMultiViewSplitSize(final Configuration configuration) { - final String splitSize = configuration.get(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE); - Preconditions.checkNotNull(splitSize); - return Integer.parseInt(splitSize); - } + public static final String MAPREDUCE_MULTI_INPUT_QUERY_BATCH_SIZE = + "phoenix.mapreduce.multi.input.batch.size"; - private static List getSelectColumnList( - final Configuration configuration) { - List selectColumnList = PhoenixConfigurationUtil.getSelectColumnNames(configuration); - if(!selectColumnList.isEmpty()) { - LOGGER.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, " + - "selectColumnList=%s ",!selectColumnList.isEmpty(), - selectColumnList.size(), Joiner.on(",").join(selectColumnList))); - } - return selectColumnList; - } + // phoenix ttl data deletion job for a specific view + public static final String MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW = + "phoenix.mapreduce.phoenix_ttl.per_view"; - public static String getSelectStatement(final Configuration configuration) throws SQLException { - Preconditions.checkNotNull(configuration); - String selectStmt = configuration.get(SELECT_STATEMENT); - if(isNotEmpty(selectStmt)) { - LOGGER.info("Select Statement: " + selectStmt); - return selectStmt; - } - final String tableName = getInputTableName(configuration); - Preconditions.checkNotNull(tableName); - final List columnMetadataList = getSelectColumnMetadataList(configuration); - final String conditions = configuration.get(INPUT_TABLE_CONDITIONS); - LOGGER.info("Building select statement from input conditions: " + conditions); - selectStmt = QueryUtil.constructSelectStatement(tableName, columnMetadataList, conditions); - LOGGER.info("Select Statement: " + selectStmt); - configuration.set(SELECT_STATEMENT, selectStmt); - return selectStmt; - } + // phoenix ttl data deletion job for all views. + public static final String MAPREDUCE_PHOENIX_TTL_DELETE_JOB_ALL_VIEWS = + "phoenix.mapreduce.phoenix_ttl.all"; + // provide an absolute path to inject your multi input logic + public static final String MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ = + "phoenix.mapreduce.multi.input.strategy.path"; - public static long getBatchSize(final Configuration configuration) throws SQLException { - Preconditions.checkNotNull(configuration); - long batchSize = configuration.getLong(UPSERT_BATCH_SIZE, DEFAULT_UPSERT_BATCH_SIZE); - if(batchSize <= 0) { - try (Connection conn = ConnectionUtil.getOutputConnection(configuration)) { - batchSize = ((PhoenixConnection) conn).getMutateBatchSize(); - } - } - configuration.setLong(UPSERT_BATCH_SIZE, batchSize); - return batchSize; - } - - public static int getSelectColumnsCount(Configuration configuration, - String tableName) throws SQLException { - Preconditions.checkNotNull(configuration); - final String schemaTp = configuration.get(SCHEMA_TYPE); - final SchemaType schemaType = SchemaType.valueOf(schemaTp); - int count = 0; - if(SchemaType.QUERY.equals(schemaType)) { - List selectedColumnList = getSelectColumnList(configuration); - count = selectedColumnList == null ? 0 : selectedColumnList.size(); - } else { - List columnInfos = getSelectColumnMetadataList(configuration); - count = columnInfos == null ? 0 : columnInfos.size(); - } - return count; - } + // provide an absolute path to inject your multi split logic + public static final String MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ = + "phoenix.mapreduce.multi.split.strategy.path"; - public static String getInputTableName(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(INPUT_TABLE_NAME); - } + // provide an absolute path to inject your multi input mapper logic + public static final String MAPREDUCE_MULTI_INPUT_MAPPER_TRACKER_CLAZZ = + "phoenix.mapreduce.multi.mapper.tracker.path"; - public static String getPhysicalTableName(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(PHYSICAL_TABLE_NAME); - } - - public static String getOutputTableName(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(OUTPUT_TABLE_NAME); - } + // provide control to whether or not handle mapreduce snapshot restore and cleanup operations + // which + // is used by scanners on phoenix side internally or handled by caller externally + public static final String MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE = + "phoenix.mapreduce.external.snapshot.restore"; - public static void setIsTransforming(Configuration configuration, Boolean isTransforming) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(isTransforming); - configuration.set(IS_TRANSFORMING_VALUE, Boolean.toString(isTransforming)); - } + // by default MR snapshot restore is handled internally by phoenix + public static final boolean DEFAULT_MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE = false; - public static Boolean getIsTransforming(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return Boolean.valueOf(configuration.get(IS_TRANSFORMING_VALUE, "false")); - } + // Is the mapreduce used for table/index transform + public static final String IS_TRANSFORMING_VALUE = "phoenix.mr.istransforming"; - public static void setForceCutover(Configuration configuration, Boolean forceCutover) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(forceCutover); - configuration.set(FORCE_CUTOVER_VALUE, Boolean.toString(forceCutover)); - } + // Is force transform cutover + public static final String FORCE_CUTOVER_VALUE = "phoenix.mr.force.cutover"; - public static Boolean getForceCutover(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return Boolean.valueOf(configuration.get(FORCE_CUTOVER_VALUE, "false")); - } + // Is the mapreduce used for table/index transform + public static final String TRANSFORMING_TABLE_TYPE = "phoenix.mr.transform.tabletype"; - public static void setTransformingTableType(Configuration configuration, - SourceTable sourceTable) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(sourceTable); - configuration.set(TRANSFORMING_TABLE_TYPE, sourceTable.name()); - } + public static final String IS_PARTIAL_TRANSFORM = "phoenix.mr.transform.ispartial"; - public static SourceTable getTransformingTableType(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return SourceTable.valueOf(configuration.get(TRANSFORMING_TABLE_TYPE)); - } + // Randomize mapper execution order + public static final String MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER = + "phoenix.mapreduce.randomize.mapper.execution.order"; - public static void setIsPartialTransform(final Configuration configuration, Boolean partialTransform) throws SQLException { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(partialTransform); - configuration.set(IS_PARTIAL_TRANSFORM, String.valueOf(partialTransform)); - } + // non-index jobs benefit less from this + public static final boolean DEFAULT_MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER = false; - public static boolean getIsPartialTransform(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.getBoolean(IS_PARTIAL_TRANSFORM, false); - } + /** + * Determines type of Phoenix Map Reduce job. 1. QUERY allows running arbitrary queries without + * aggregates 2. UPDATE_STATS collects statistics for the table + */ + public enum MRJobType { + QUERY, + UPDATE_STATS + } - public static void loadHBaseConfiguration(Job job) throws IOException { - // load hbase-site.xml - Configuration hbaseConf = HBaseConfiguration.create(); - for (Map.Entry entry : hbaseConf) { - if (job.getConfiguration().get(entry.getKey()) == null) { - job.getConfiguration().set(entry.getKey(), entry.getValue()); - } - } - //In order to have phoenix working on a secured cluster - TableMapReduceUtil.initCredentials(job); - } - - public static ImportPreUpsertKeyValueProcessor loadPreUpsertProcessor(Configuration conf) { - Class processorClass = null; - try { - processorClass = conf.getClass( - UPSERT_HOOK_CLASS_CONFKEY, FormatToBytesWritableMapper.DefaultImportPreUpsertKeyValueProcessor.class, - ImportPreUpsertKeyValueProcessor.class); - } catch (Exception e) { - throw new IllegalStateException("Couldn't load upsert hook class", e); - } - - return ReflectionUtils.newInstance(processorClass, conf); - } - - public static byte[] getIndexMaintainers(final Configuration configuration){ - Preconditions.checkNotNull(configuration); - return Base64.getDecoder().decode(configuration.get(INDEX_MAINTAINERS)); - } - - public static void setIndexMaintainers(final Configuration configuration, - final ImmutableBytesWritable indexMetaDataPtr) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(indexMetaDataPtr); - configuration.set(INDEX_MAINTAINERS,Bytes.toString(Base64.getEncoder().encode(indexMetaDataPtr.get()))); - } - - public static void setDisableIndexes(Configuration configuration, String indexName) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(indexName); - configuration.set(DISABLED_INDEXES, indexName); - } - - public static void setVerifyIndex(Configuration configuration, boolean verify) { - Preconditions.checkNotNull(configuration); - configuration.setBoolean(VERIFY_INDEX, verify); - } - - public static void setOnlyVerifyIndex(Configuration configuration, boolean verify) { - Preconditions.checkNotNull(configuration); - configuration.setBoolean(ONLY_VERIFY_INDEX, verify); - } - - public static void setIndexVerifyType(Configuration configuration, IndexTool.IndexVerifyType verifyType) { - Preconditions.checkNotNull(configuration); - configuration.set(INDEX_VERIFY_TYPE, verifyType.getValue()); - } - - public static void setDisableLoggingVerifyType(Configuration configuration, - IndexTool.IndexDisableLoggingType disableLoggingType) { - Preconditions.checkNotNull(configuration); - configuration.set(DISABLE_LOGGING_TYPE, disableLoggingType.getValue()); - } - - public static String getScrutinyDataTableName(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(SCRUTINY_DATA_TABLE_NAME); - } - - public static void setScrutinyDataTable(Configuration configuration, String qDataTableName) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(qDataTableName); - configuration.set(SCRUTINY_DATA_TABLE_NAME, qDataTableName); - } - - public static String getScrutinyIndexTableName(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(SCRUTINY_INDEX_TABLE_NAME); - } - public static void setIndexToolDataTableName(Configuration configuration, String qDataTableName) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(qDataTableName); - configuration.set(INDEX_TOOL_DATA_TABLE_NAME, qDataTableName); - } - - public static String getIndexToolDataTableName(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(INDEX_TOOL_DATA_TABLE_NAME); - } - - public static void setScrutinyIndexTable(Configuration configuration, String qIndexTableName) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(qIndexTableName); - configuration.set(SCRUTINY_INDEX_TABLE_NAME, qIndexTableName); - } - - public static SourceTable getScrutinySourceTable(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return SourceTable.valueOf(configuration.get(SCRUTINY_SOURCE_TABLE)); - } - - public static void setIndexToolIndexTableName(Configuration configuration, String qIndexTableName) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(qIndexTableName); - configuration.set(INDEX_TOOL_INDEX_TABLE_NAME, qIndexTableName); - } - - public static String getIndexToolIndexTableName(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(INDEX_TOOL_INDEX_TABLE_NAME); - } - - public static void setIndexToolSourceTable(Configuration configuration, - IndexScrutinyTool.SourceTable sourceTable) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(sourceTable); - configuration.set(INDEX_TOOL_SOURCE_TABLE, sourceTable.name()); - } - - public static IndexScrutinyTool.SourceTable getIndexToolSourceTable(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return IndexScrutinyTool.SourceTable.valueOf(configuration.get(INDEX_TOOL_SOURCE_TABLE, - IndexScrutinyTool.SourceTable.DATA_TABLE_SOURCE.name())); - } - - public static void setScrutinySourceTable(Configuration configuration, - SourceTable sourceTable) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(sourceTable); - configuration.set(SCRUTINY_SOURCE_TABLE, sourceTable.name()); - } - - public static boolean getScrutinyOutputInvalidRows(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.getBoolean(SCRUTINY_OUTPUT_INVALID_ROWS, - DEFAULT_SCRUTINY_OUTPUT_INVALID_ROWS); - } - - public static void setScrutinyOutputInvalidRows(Configuration configuration, - boolean outputInvalidRows) { - Preconditions.checkNotNull(configuration); - configuration.setBoolean(SCRUTINY_OUTPUT_INVALID_ROWS, outputInvalidRows); - } - - public static long getScrutinyBatchSize(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.getLong(SCRUTINY_BATCH_SIZE, DEFAULT_SCRUTINY_BATCH_SIZE); - } - - public static void setScrutinyBatchSize(Configuration configuration, long batchSize) { - Preconditions.checkNotNull(configuration); - configuration.setLong(SCRUTINY_BATCH_SIZE, batchSize); - } - - public static OutputFormat getScrutinyOutputFormat(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return OutputFormat - .valueOf(configuration.get(SCRUTINY_OUTPUT_FORMAT, OutputFormat.FILE.name())); - } - - public static void setScrutinyOutputFormat(Configuration configuration, - OutputFormat outputFormat) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(outputFormat); - configuration.set(SCRUTINY_OUTPUT_FORMAT, outputFormat.name()); - } - - public static long getScrutinyExecuteTimestamp(Configuration configuration) { - Preconditions.checkNotNull(configuration); - long ts = configuration.getLong(SCRUTINY_EXECUTE_TIMESTAMP, -1); - Preconditions.checkArgument(ts != -1); - return ts; - } - - public static void setScrutinyOutputMax(Configuration configuration, - long outputMaxRows) { - Preconditions.checkNotNull(configuration); - configuration.setLong(SCRUTINY_OUTPUT_MAX, outputMaxRows); - } - - public static long getScrutinyOutputMax(Configuration configuration) { - Preconditions.checkNotNull(configuration); - long maxRows = configuration.getLong(SCRUTINY_OUTPUT_MAX, -1); - Preconditions.checkArgument(maxRows != -1); - return maxRows; - } - - public static void setScrutinyExecuteTimestamp(Configuration configuration, long ts) { - Preconditions.checkNotNull(configuration); - configuration.setLong(SCRUTINY_EXECUTE_TIMESTAMP, ts); - } - - public static void setSplitByStats(final Configuration configuration, boolean value) { - Preconditions.checkNotNull(configuration); - configuration.setBoolean(MAPREDUCE_SPLIT_BY_STATS, value); - } - - public static String getDisableIndexes(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.get(DISABLED_INDEXES); - } - - public static boolean getVerifyIndex(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.getBoolean(VERIFY_INDEX, false); - } - - public static boolean getOnlyVerifyIndex(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.getBoolean(ONLY_VERIFY_INDEX, false); - } - - public static IndexTool.IndexVerifyType getIndexVerifyType(Configuration configuration) { - Preconditions.checkNotNull(configuration); - String value = configuration.get(INDEX_VERIFY_TYPE, IndexTool.IndexVerifyType.NONE.getValue()); - return IndexTool.IndexVerifyType.fromValue(value); - } - - public static boolean getShouldFixUnverifiedTransform(Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.getBoolean(SHOULD_FIX_UNVERIFIED_TRANSFORM, - DEFAULT_SHOULD_FIX_UNVERIFIED_TRANSFORM); - } - - public static void setShouldFixUnverifiedTransform(Configuration configuration, - boolean shouldFixUnverified) { - Preconditions.checkNotNull(configuration); - configuration.setBoolean(SHOULD_FIX_UNVERIFIED_TRANSFORM, shouldFixUnverified); - } - - public static IndexTool.IndexVerifyType getDisableLoggingVerifyType(Configuration configuration) { - Preconditions.checkNotNull(configuration); - String value = configuration.get(DISABLE_LOGGING_TYPE, IndexTool.IndexVerifyType.NONE.getValue()); - return IndexTool.IndexVerifyType.fromValue(value); - } - - public static boolean getSplitByStats(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - boolean split = configuration.getBoolean(MAPREDUCE_SPLIT_BY_STATS, DEFAULT_SPLIT_BY_STATS); - return split; - } - - public static void setTenantId(Configuration configuration, String tenantId){ - Preconditions.checkNotNull(configuration); - configuration.set(MAPREDUCE_TENANT_ID, tenantId); - } - - public static void setMRSnapshotManagedExternally(Configuration configuration, Boolean isSnapshotRestoreManagedExternally) { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(isSnapshotRestoreManagedExternally); - configuration.setBoolean(MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE, isSnapshotRestoreManagedExternally); - } - - public static boolean isMRSnapshotManagedExternally(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - boolean isSnapshotRestoreManagedExternally = - configuration.getBoolean(MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE, DEFAULT_MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE); - return isSnapshotRestoreManagedExternally; - } - - public static boolean isMRRandomizeMapperExecutionOrder(final Configuration configuration) { - Preconditions.checkNotNull(configuration); - return configuration.getBoolean(MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER, - DEFAULT_MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER); - } - - public static void setMaxLookbackAge(Configuration configuration, Long maxLookbackAge) { - Preconditions.checkNotNull(configuration); - if (maxLookbackAge != null) { - configuration.setLong(BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE, maxLookbackAge); - } - } - - public static Long getMaxLookbackAge(Configuration configuration) { - Preconditions.checkNotNull(configuration); - String maxLookbackAgeStr = configuration.get(BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE); - return maxLookbackAgeStr != null ? Long.valueOf(maxLookbackAgeStr) : null; - } + public enum SchemaType { + TABLE, + QUERY + } + + private PhoenixConfigurationUtil() { + + } + + /** + * + */ + public static void setInputTableName(final Configuration configuration, final String tableName) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(tableName); + configuration.set(INPUT_TABLE_NAME, tableName); + } + + public static void setInputTableConditions(final Configuration configuration, + final String conditions) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(conditions); + configuration.set(INPUT_TABLE_CONDITIONS, conditions); + } + + private static void setValues(final Configuration configuration, final String[] columns, + final String VALUE_COUNT, final String VALUE_NAME) { + Preconditions.checkNotNull(configuration); + configuration.setInt(VALUE_COUNT, columns.length); + for (int i = 0; i < columns.length; ++i) { + configuration.set(String.format("%s_%d", VALUE_NAME, i), columns[i]); + } + } + + private static List getValues(final Configuration configuration, final String VALUE_COUNT, + final String VALUE_NAME) { + Preconditions.checkNotNull(configuration); + int numCols = configuration.getInt(VALUE_COUNT, 0); + List cols = Lists.newArrayListWithExpectedSize(numCols); + for (int i = 0; i < numCols; ++i) { + cols.add(configuration.get(String.format("%s_%d", VALUE_NAME, i))); + } + return cols; + } + + public static void setSelectColumnNames(final Configuration configuration, + final String[] columns) { + setValues(configuration, columns, MAPREDUCE_SELECT_COLUMN_COUNT, + MAPREDUCE_SELECT_COLUMN_VALUE_PREFIX); + } + + public static List getSelectColumnNames(final Configuration configuration) { + return getValues(configuration, MAPREDUCE_SELECT_COLUMN_COUNT, + MAPREDUCE_SELECT_COLUMN_VALUE_PREFIX); + } + + public static void setInputClass(final Configuration configuration, + Class inputClass) { + Preconditions.checkNotNull(configuration); + configuration.setClass(INPUT_CLASS, inputClass, DBWritable.class); + } + + public static void setInputQuery(final Configuration configuration, final String inputQuery) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(inputQuery); + configuration.set(SELECT_STATEMENT, inputQuery); + } + + public static void setPropertyPolicyProviderDisabled(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + configuration.set(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, "false"); + } + + public static void setSchemaType(Configuration configuration, final SchemaType schemaType) { + Preconditions.checkNotNull(configuration); + configuration.set(SCHEMA_TYPE, schemaType.name()); + } + + public static void setMRJobType(Configuration configuration, final MRJobType mrJobType) { + Preconditions.checkNotNull(configuration); + configuration.set(MAPREDUCE_JOB_TYPE, mrJobType.name()); + } + + public static void setPhysicalTableName(final Configuration configuration, + final String tableName) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(tableName); + configuration.set(PHYSICAL_TABLE_NAME, tableName); + } + + public static void setOutputTableName(final Configuration configuration, final String tableName) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(tableName); + configuration.set(OUTPUT_TABLE_NAME, tableName); + } + + public static void setUpsertColumnNames(final Configuration configuration, + final String[] columns) { + setValues(configuration, columns, MAPREDUCE_UPSERT_COLUMN_COUNT, + MAPREDUCE_UPSERT_COLUMN_VALUE_PREFIX); + } + + public static void setSnapshotNameKey(final Configuration configuration, + final String snapshotName) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(snapshotName); + configuration.set(SNAPSHOT_NAME_KEY, snapshotName); + } + + public static void setRestoreDirKey(final Configuration configuration, final String restoreDir) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(restoreDir); + configuration.set(RESTORE_DIR_KEY, restoreDir); + } + + public static void setIndexToolStartTime(Configuration configuration, Long startTime) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(startTime); + configuration.set(INDEX_TOOL_START_TIME, Long.toString(startTime)); + } + + public static void setIndexToolLastVerifyTime(Configuration configuration, Long lastVerifyTime) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(lastVerifyTime); + configuration.set(INDEX_TOOL_LAST_VERIFY_TIME, Long.toString(lastVerifyTime)); + } + + public static void setCurrentScnValue(Configuration configuration, Long scn) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(scn); + configuration.set(CURRENT_SCN_VALUE, Long.toString(scn)); + } + + public static String getIndexToolStartTime(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(INDEX_TOOL_START_TIME); + } + + public static String getCurrentScnValue(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(CURRENT_SCN_VALUE); + } + + public static String getIndexToolLastVerifyTime(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(INDEX_TOOL_LAST_VERIFY_TIME); + } + + public static List getUpsertColumnNames(final Configuration configuration) { + return getValues(configuration, MAPREDUCE_UPSERT_COLUMN_COUNT, + MAPREDUCE_UPSERT_COLUMN_VALUE_PREFIX); + } + + public static void setBatchSize(final Configuration configuration, final Long batchSize) { + Preconditions.checkNotNull(configuration); + configuration.setLong(UPSERT_BATCH_SIZE, batchSize); + } + + /** + * Sets which HBase cluster a Phoenix MapReduce job should read from + * @param quorum ZooKeeper quorum string for HBase cluster the MapReduce job will read from + */ + @Deprecated + public static void setInputCluster(final Configuration configuration, final String quorum) { + Preconditions.checkNotNull(configuration); + configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_INPUT_CLUSTER_QUORUM, quorum); + } + + /** + * Sets which HBase cluster a Phoenix MapReduce job should write to + * @param quorum ZooKeeper quorum string for HBase cluster the MapReduce job will write to + */ + @Deprecated + public static void setOutputCluster(final Configuration configuration, final String quorum) { + Preconditions.checkNotNull(configuration); + configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_QUORUM, quorum); + } + + /** + * Sets which HBase cluster a Phoenix MapReduce job should read from + * @param url Phoenix JDBC URL + */ + public static void setInputClusterUrl(final Configuration configuration, final String url) { + Preconditions.checkNotNull(configuration); + configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_INPUT_CLUSTER_URL, url); + } + + /** + * Sets which HBase cluster a Phoenix MapReduce job should write to + * @param url Phoenix JDBC URL string for HBase cluster the MapReduce job will write to + */ + public static void setOutputClusterUrl(final Configuration configuration, final String url) { + Preconditions.checkNotNull(configuration); + configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_URL, url); + } + + public static Class getInputClass(final Configuration configuration) { + return configuration.getClass(INPUT_CLASS, NullDBWritable.class); + } + + public static SchemaType getSchemaType(final Configuration configuration) { + final String schemaTp = configuration.get(SCHEMA_TYPE); + Preconditions.checkNotNull(schemaTp); + return SchemaType.valueOf(schemaTp); + } + + public static MRJobType getMRJobType(final Configuration configuration, String defaultMRJobType) { + final String mrJobType = configuration.get(MAPREDUCE_JOB_TYPE, defaultMRJobType); + Preconditions.checkNotNull(mrJobType); + return MRJobType.valueOf(mrJobType); + } + + public static List getUpsertColumnMetadataList(final Configuration configuration) + throws SQLException { + Preconditions.checkNotNull(configuration); + List columnMetadataList = null; + columnMetadataList = ColumnInfoToStringEncoderDecoder.decode(configuration); + if (columnMetadataList != null && !columnMetadataList.isEmpty()) { + return columnMetadataList; + } + final String tableName = getOutputTableName(configuration); + Preconditions.checkNotNull(tableName); + try (PhoenixConnection connection = + ConnectionUtil.getOutputConnection(configuration).unwrap(PhoenixConnection.class)) { + List upsertColumnList = PhoenixConfigurationUtil.getUpsertColumnNames(configuration); + if (!upsertColumnList.isEmpty()) { + LOGGER.info(String.format( + "UseUpsertColumns=%s, upsertColumnList.size()=%s," + " upsertColumnList=%s ", + !upsertColumnList.isEmpty(), upsertColumnList.size(), + Joiner.on(",").join(upsertColumnList))); + } + columnMetadataList = + PhoenixRuntime.generateColumnInfo(connection, tableName, upsertColumnList); + // we put the encoded column infos in the Configuration for re usability. + ColumnInfoToStringEncoderDecoder.encode(configuration, columnMetadataList); + } + return columnMetadataList; + } + + public static String getUpsertStatement(final Configuration configuration) throws SQLException { + Preconditions.checkNotNull(configuration); + String upsertStmt = configuration.get(UPSERT_STATEMENT); + if (isNotEmpty(upsertStmt)) { + return upsertStmt; + } + final String tableName = getOutputTableName(configuration); + Preconditions.checkNotNull(tableName); + List upsertColumnNames = PhoenixConfigurationUtil.getUpsertColumnNames(configuration); + final List columnMetadataList = getUpsertColumnMetadataList(configuration); + if (!upsertColumnNames.isEmpty()) { + // Generating UPSERT statement without column name information. + upsertStmt = QueryUtil.constructUpsertStatement(tableName, columnMetadataList); + LOGGER.info("Phoenix Custom Upsert Statement: " + upsertStmt); + } else { + // Generating UPSERT statement without column name information. + upsertStmt = QueryUtil.constructGenericUpsertStatement(tableName, columnMetadataList.size()); + LOGGER.info("Phoenix Generic Upsert Statement: " + upsertStmt); + } + configuration.set(UPSERT_STATEMENT, upsertStmt); + return upsertStmt; + + } + + public static void setUpsertStatement(final Configuration configuration, String upsertStmt) + throws SQLException { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(upsertStmt); + configuration.set(UPSERT_STATEMENT, upsertStmt); + } + + public static void setMultiInputMapperSplitSize(Configuration configuration, + final int splitSize) { + Preconditions.checkNotNull(configuration); + configuration.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, String.valueOf(splitSize)); + } + + public static void setMultiViewQueryMoreSplitSize(Configuration configuration, + final int batchSize) { + Preconditions.checkNotNull(configuration); + configuration.set(MAPREDUCE_MULTI_INPUT_QUERY_BATCH_SIZE, String.valueOf(batchSize)); + } + + public static int getMultiViewQueryMoreSplitSize(final Configuration configuration) { + final String batchSize = configuration.get(MAPREDUCE_MULTI_INPUT_QUERY_BATCH_SIZE); + Preconditions.checkNotNull(batchSize); + return Integer.parseInt(batchSize); + } + + public static List getSelectColumnMetadataList(final Configuration configuration) + throws SQLException { + Preconditions.checkNotNull(configuration); + List columnMetadataList = null; + columnMetadataList = ColumnInfoToStringEncoderDecoder.decode(configuration); + if (columnMetadataList != null && !columnMetadataList.isEmpty()) { + return columnMetadataList; + } + final String tableName = getInputTableName(configuration); + Preconditions.checkNotNull(tableName); + Properties props = new Properties(); + String tenantId = configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID); + if (tenantId != null) { + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + } + try (PhoenixConnection connection = + ConnectionUtil.getInputConnection(configuration, props).unwrap(PhoenixConnection.class)) { + final List selectColumnList = getSelectColumnList(configuration); + columnMetadataList = + PhoenixRuntime.generateColumnInfo(connection, tableName, selectColumnList); + // we put the encoded column infos in the Configuration for re usability. + ColumnInfoToStringEncoderDecoder.encode(configuration, columnMetadataList); + } + return columnMetadataList; + } + + public static int getMultiViewSplitSize(final Configuration configuration) { + final String splitSize = configuration.get(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE); + Preconditions.checkNotNull(splitSize); + return Integer.parseInt(splitSize); + } + + private static List getSelectColumnList(final Configuration configuration) { + List selectColumnList = PhoenixConfigurationUtil.getSelectColumnNames(configuration); + if (!selectColumnList.isEmpty()) { + LOGGER.info( + String.format("UseSelectColumns=%s, selectColumnList.size()=%s, " + "selectColumnList=%s ", + !selectColumnList.isEmpty(), selectColumnList.size(), + Joiner.on(",").join(selectColumnList))); + } + return selectColumnList; + } + + public static String getSelectStatement(final Configuration configuration) throws SQLException { + Preconditions.checkNotNull(configuration); + String selectStmt = configuration.get(SELECT_STATEMENT); + if (isNotEmpty(selectStmt)) { + LOGGER.info("Select Statement: " + selectStmt); + return selectStmt; + } + final String tableName = getInputTableName(configuration); + Preconditions.checkNotNull(tableName); + final List columnMetadataList = getSelectColumnMetadataList(configuration); + final String conditions = configuration.get(INPUT_TABLE_CONDITIONS); + LOGGER.info("Building select statement from input conditions: " + conditions); + selectStmt = QueryUtil.constructSelectStatement(tableName, columnMetadataList, conditions); + LOGGER.info("Select Statement: " + selectStmt); + configuration.set(SELECT_STATEMENT, selectStmt); + return selectStmt; + } + + public static long getBatchSize(final Configuration configuration) throws SQLException { + Preconditions.checkNotNull(configuration); + long batchSize = configuration.getLong(UPSERT_BATCH_SIZE, DEFAULT_UPSERT_BATCH_SIZE); + if (batchSize <= 0) { + try (Connection conn = ConnectionUtil.getOutputConnection(configuration)) { + batchSize = ((PhoenixConnection) conn).getMutateBatchSize(); + } + } + configuration.setLong(UPSERT_BATCH_SIZE, batchSize); + return batchSize; + } + + public static int getSelectColumnsCount(Configuration configuration, String tableName) + throws SQLException { + Preconditions.checkNotNull(configuration); + final String schemaTp = configuration.get(SCHEMA_TYPE); + final SchemaType schemaType = SchemaType.valueOf(schemaTp); + int count = 0; + if (SchemaType.QUERY.equals(schemaType)) { + List selectedColumnList = getSelectColumnList(configuration); + count = selectedColumnList == null ? 0 : selectedColumnList.size(); + } else { + List columnInfos = getSelectColumnMetadataList(configuration); + count = columnInfos == null ? 0 : columnInfos.size(); + } + return count; + } + + public static String getInputTableName(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(INPUT_TABLE_NAME); + } + + public static String getPhysicalTableName(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(PHYSICAL_TABLE_NAME); + } + + public static String getOutputTableName(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(OUTPUT_TABLE_NAME); + } + + public static void setIsTransforming(Configuration configuration, Boolean isTransforming) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(isTransforming); + configuration.set(IS_TRANSFORMING_VALUE, Boolean.toString(isTransforming)); + } + + public static Boolean getIsTransforming(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return Boolean.valueOf(configuration.get(IS_TRANSFORMING_VALUE, "false")); + } + + public static void setForceCutover(Configuration configuration, Boolean forceCutover) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(forceCutover); + configuration.set(FORCE_CUTOVER_VALUE, Boolean.toString(forceCutover)); + } + + public static Boolean getForceCutover(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return Boolean.valueOf(configuration.get(FORCE_CUTOVER_VALUE, "false")); + } + + public static void setTransformingTableType(Configuration configuration, + SourceTable sourceTable) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(sourceTable); + configuration.set(TRANSFORMING_TABLE_TYPE, sourceTable.name()); + } + + public static SourceTable getTransformingTableType(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return SourceTable.valueOf(configuration.get(TRANSFORMING_TABLE_TYPE)); + } + + public static void setIsPartialTransform(final Configuration configuration, + Boolean partialTransform) throws SQLException { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(partialTransform); + configuration.set(IS_PARTIAL_TRANSFORM, String.valueOf(partialTransform)); + } + + public static boolean getIsPartialTransform(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.getBoolean(IS_PARTIAL_TRANSFORM, false); + } + + public static void loadHBaseConfiguration(Job job) throws IOException { + // load hbase-site.xml + Configuration hbaseConf = HBaseConfiguration.create(); + for (Map.Entry entry : hbaseConf) { + if (job.getConfiguration().get(entry.getKey()) == null) { + job.getConfiguration().set(entry.getKey(), entry.getValue()); + } + } + // In order to have phoenix working on a secured cluster + TableMapReduceUtil.initCredentials(job); + } + + public static ImportPreUpsertKeyValueProcessor loadPreUpsertProcessor(Configuration conf) { + Class processorClass = null; + try { + processorClass = conf.getClass(UPSERT_HOOK_CLASS_CONFKEY, + FormatToBytesWritableMapper.DefaultImportPreUpsertKeyValueProcessor.class, + ImportPreUpsertKeyValueProcessor.class); + } catch (Exception e) { + throw new IllegalStateException("Couldn't load upsert hook class", e); + } + + return ReflectionUtils.newInstance(processorClass, conf); + } + + public static byte[] getIndexMaintainers(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + return Base64.getDecoder().decode(configuration.get(INDEX_MAINTAINERS)); + } + + public static void setIndexMaintainers(final Configuration configuration, + final ImmutableBytesWritable indexMetaDataPtr) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(indexMetaDataPtr); + configuration.set(INDEX_MAINTAINERS, + Bytes.toString(Base64.getEncoder().encode(indexMetaDataPtr.get()))); + } + + public static void setDisableIndexes(Configuration configuration, String indexName) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(indexName); + configuration.set(DISABLED_INDEXES, indexName); + } + + public static void setVerifyIndex(Configuration configuration, boolean verify) { + Preconditions.checkNotNull(configuration); + configuration.setBoolean(VERIFY_INDEX, verify); + } + + public static void setOnlyVerifyIndex(Configuration configuration, boolean verify) { + Preconditions.checkNotNull(configuration); + configuration.setBoolean(ONLY_VERIFY_INDEX, verify); + } + + public static void setIndexVerifyType(Configuration configuration, + IndexTool.IndexVerifyType verifyType) { + Preconditions.checkNotNull(configuration); + configuration.set(INDEX_VERIFY_TYPE, verifyType.getValue()); + } + + public static void setDisableLoggingVerifyType(Configuration configuration, + IndexTool.IndexDisableLoggingType disableLoggingType) { + Preconditions.checkNotNull(configuration); + configuration.set(DISABLE_LOGGING_TYPE, disableLoggingType.getValue()); + } + + public static String getScrutinyDataTableName(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(SCRUTINY_DATA_TABLE_NAME); + } + + public static void setScrutinyDataTable(Configuration configuration, String qDataTableName) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(qDataTableName); + configuration.set(SCRUTINY_DATA_TABLE_NAME, qDataTableName); + } + + public static String getScrutinyIndexTableName(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(SCRUTINY_INDEX_TABLE_NAME); + } + + public static void setIndexToolDataTableName(Configuration configuration, String qDataTableName) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(qDataTableName); + configuration.set(INDEX_TOOL_DATA_TABLE_NAME, qDataTableName); + } + + public static String getIndexToolDataTableName(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(INDEX_TOOL_DATA_TABLE_NAME); + } + + public static void setScrutinyIndexTable(Configuration configuration, String qIndexTableName) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(qIndexTableName); + configuration.set(SCRUTINY_INDEX_TABLE_NAME, qIndexTableName); + } + + public static SourceTable getScrutinySourceTable(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return SourceTable.valueOf(configuration.get(SCRUTINY_SOURCE_TABLE)); + } + + public static void setIndexToolIndexTableName(Configuration configuration, + String qIndexTableName) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(qIndexTableName); + configuration.set(INDEX_TOOL_INDEX_TABLE_NAME, qIndexTableName); + } + + public static String getIndexToolIndexTableName(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(INDEX_TOOL_INDEX_TABLE_NAME); + } + + public static void setIndexToolSourceTable(Configuration configuration, + IndexScrutinyTool.SourceTable sourceTable) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(sourceTable); + configuration.set(INDEX_TOOL_SOURCE_TABLE, sourceTable.name()); + } + + public static IndexScrutinyTool.SourceTable getIndexToolSourceTable(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return IndexScrutinyTool.SourceTable.valueOf(configuration.get(INDEX_TOOL_SOURCE_TABLE, + IndexScrutinyTool.SourceTable.DATA_TABLE_SOURCE.name())); + } + + public static void setScrutinySourceTable(Configuration configuration, SourceTable sourceTable) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(sourceTable); + configuration.set(SCRUTINY_SOURCE_TABLE, sourceTable.name()); + } + + public static boolean getScrutinyOutputInvalidRows(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.getBoolean(SCRUTINY_OUTPUT_INVALID_ROWS, + DEFAULT_SCRUTINY_OUTPUT_INVALID_ROWS); + } + + public static void setScrutinyOutputInvalidRows(Configuration configuration, + boolean outputInvalidRows) { + Preconditions.checkNotNull(configuration); + configuration.setBoolean(SCRUTINY_OUTPUT_INVALID_ROWS, outputInvalidRows); + } + + public static long getScrutinyBatchSize(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.getLong(SCRUTINY_BATCH_SIZE, DEFAULT_SCRUTINY_BATCH_SIZE); + } + + public static void setScrutinyBatchSize(Configuration configuration, long batchSize) { + Preconditions.checkNotNull(configuration); + configuration.setLong(SCRUTINY_BATCH_SIZE, batchSize); + } + + public static OutputFormat getScrutinyOutputFormat(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return OutputFormat + .valueOf(configuration.get(SCRUTINY_OUTPUT_FORMAT, OutputFormat.FILE.name())); + } + + public static void setScrutinyOutputFormat(Configuration configuration, + OutputFormat outputFormat) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(outputFormat); + configuration.set(SCRUTINY_OUTPUT_FORMAT, outputFormat.name()); + } + + public static long getScrutinyExecuteTimestamp(Configuration configuration) { + Preconditions.checkNotNull(configuration); + long ts = configuration.getLong(SCRUTINY_EXECUTE_TIMESTAMP, -1); + Preconditions.checkArgument(ts != -1); + return ts; + } + + public static void setScrutinyOutputMax(Configuration configuration, long outputMaxRows) { + Preconditions.checkNotNull(configuration); + configuration.setLong(SCRUTINY_OUTPUT_MAX, outputMaxRows); + } + + public static long getScrutinyOutputMax(Configuration configuration) { + Preconditions.checkNotNull(configuration); + long maxRows = configuration.getLong(SCRUTINY_OUTPUT_MAX, -1); + Preconditions.checkArgument(maxRows != -1); + return maxRows; + } + + public static void setScrutinyExecuteTimestamp(Configuration configuration, long ts) { + Preconditions.checkNotNull(configuration); + configuration.setLong(SCRUTINY_EXECUTE_TIMESTAMP, ts); + } + + public static void setSplitByStats(final Configuration configuration, boolean value) { + Preconditions.checkNotNull(configuration); + configuration.setBoolean(MAPREDUCE_SPLIT_BY_STATS, value); + } + + public static String getDisableIndexes(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.get(DISABLED_INDEXES); + } + + public static boolean getVerifyIndex(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.getBoolean(VERIFY_INDEX, false); + } + + public static boolean getOnlyVerifyIndex(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.getBoolean(ONLY_VERIFY_INDEX, false); + } + + public static IndexTool.IndexVerifyType getIndexVerifyType(Configuration configuration) { + Preconditions.checkNotNull(configuration); + String value = configuration.get(INDEX_VERIFY_TYPE, IndexTool.IndexVerifyType.NONE.getValue()); + return IndexTool.IndexVerifyType.fromValue(value); + } + + public static boolean getShouldFixUnverifiedTransform(Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.getBoolean(SHOULD_FIX_UNVERIFIED_TRANSFORM, + DEFAULT_SHOULD_FIX_UNVERIFIED_TRANSFORM); + } + + public static void setShouldFixUnverifiedTransform(Configuration configuration, + boolean shouldFixUnverified) { + Preconditions.checkNotNull(configuration); + configuration.setBoolean(SHOULD_FIX_UNVERIFIED_TRANSFORM, shouldFixUnverified); + } + + public static IndexTool.IndexVerifyType getDisableLoggingVerifyType(Configuration configuration) { + Preconditions.checkNotNull(configuration); + String value = + configuration.get(DISABLE_LOGGING_TYPE, IndexTool.IndexVerifyType.NONE.getValue()); + return IndexTool.IndexVerifyType.fromValue(value); + } + + public static boolean getSplitByStats(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + boolean split = configuration.getBoolean(MAPREDUCE_SPLIT_BY_STATS, DEFAULT_SPLIT_BY_STATS); + return split; + } + + public static void setTenantId(Configuration configuration, String tenantId) { + Preconditions.checkNotNull(configuration); + configuration.set(MAPREDUCE_TENANT_ID, tenantId); + } + + public static void setMRSnapshotManagedExternally(Configuration configuration, + Boolean isSnapshotRestoreManagedExternally) { + Preconditions.checkNotNull(configuration); + Preconditions.checkNotNull(isSnapshotRestoreManagedExternally); + configuration.setBoolean(MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE, + isSnapshotRestoreManagedExternally); + } + + public static boolean isMRSnapshotManagedExternally(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + boolean isSnapshotRestoreManagedExternally = configuration + .getBoolean(MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE, DEFAULT_MAPREDUCE_EXTERNAL_SNAPSHOT_RESTORE); + return isSnapshotRestoreManagedExternally; + } + + public static boolean isMRRandomizeMapperExecutionOrder(final Configuration configuration) { + Preconditions.checkNotNull(configuration); + return configuration.getBoolean(MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER, + DEFAULT_MAPREDUCE_RANDOMIZE_MAPPER_EXECUTION_ORDER); + } + + public static void setMaxLookbackAge(Configuration configuration, Long maxLookbackAge) { + Preconditions.checkNotNull(configuration); + if (maxLookbackAge != null) { + configuration.setLong(BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE, maxLookbackAge); + } + } + + public static Long getMaxLookbackAge(Configuration configuration) { + Preconditions.checkNotNull(configuration); + String maxLookbackAgeStr = + configuration.get(BaseScannerRegionObserverConstants.MAX_LOOKBACK_AGE); + return maxLookbackAgeStr != null ? Long.valueOf(maxLookbackAgeStr) : null; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java index 368675d5557..05a7746977e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMapReduceUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.mapreduce.util; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.InputFormat; @@ -28,228 +30,214 @@ import org.apache.phoenix.mapreduce.PhoenixTTLTool; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.SchemaType; -import java.io.IOException; - /** * Utility class for setting Configuration parameters for the Map Reduce job */ public final class PhoenixMapReduceUtil { - private PhoenixMapReduceUtil() { - - } - - /** - * - * @param job - * @param inputClass DBWritable class - * @param tableName Input table name - * @param conditions Condition clause to be added to the WHERE clause. Can be null if there are no conditions. - * @param fieldNames fields being projected for the SELECT query. - */ - public static void setInput(final Job job, final Class inputClass, final String tableName, - final String conditions, final String... fieldNames) { - final Configuration configuration = setInput(job, inputClass, tableName); - if(conditions != null) { - PhoenixConfigurationUtil.setInputTableConditions(configuration, conditions); - } - PhoenixConfigurationUtil.setSelectColumnNames(configuration, fieldNames); - } - - /** - * - * @param job MR job instance - * @param inputClass DBWritable class - * @param inputFormatClass InputFormat class - * @param tableName Input table name - * @param conditions Condition clause to be added to the WHERE clause. - * Can be null if there are no conditions. - * @param fieldNames fields being projected for the SELECT query. - */ - public static void setInput(final Job job, final Class inputClass, - final Class inputFormatClass, final String tableName, - final String conditions, final String... fieldNames) { - final Configuration configuration = setInput(job, inputClass, inputFormatClass, tableName); - if(conditions != null) { - PhoenixConfigurationUtil.setInputTableConditions(configuration, conditions); - } - PhoenixConfigurationUtil.setSelectColumnNames(configuration, fieldNames); - } - - /** - * - * @param job - * @param inputClass DBWritable class - * @param tableName Input table name - * @param inputQuery Select query. - */ - public static void setInput(final Job job, final Class inputClass, final String tableName, final String inputQuery) { - final Configuration configuration = setInput(job, inputClass, tableName); - PhoenixConfigurationUtil.setInputQuery(configuration, inputQuery); - PhoenixConfigurationUtil.setSchemaType(configuration, SchemaType.QUERY); - } - - /** - * - * @param job - * @param inputClass DBWritable class - * @param inputFormatClass InputFormat class - * @param tableName Input table name - * @param inputQuery Select query - */ - - public static void setInput(final Job job, final Class inputClass, - final Class inputFormatClass, - final String tableName, final String inputQuery) { - final Configuration configuration = setInput(job, inputClass, inputFormatClass, tableName); - PhoenixConfigurationUtil.setInputQuery(configuration, inputQuery); - PhoenixConfigurationUtil.setSchemaType(configuration, SchemaType.QUERY); - } - - /** - * - * @param job - * @param inputClass DBWritable class - * @param snapshotName The name of a snapshot (of a table) to read from - * @param tableName Input table name - * @param restoreDir a temporary dir to copy the snapshot files into - * @param conditions Condition clause to be added to the WHERE clause. Can be null if there are no conditions. - * @param fieldNames fields being projected for the SELECT query. - */ - public static void setInput(final Job job, final Class inputClass, final String snapshotName, String tableName, - Path restoreDir, final String conditions, final String... fieldNames) throws - IOException { - final Configuration configuration = setSnapshotInput(job, inputClass, snapshotName, tableName, restoreDir, SchemaType.QUERY); - if(conditions != null) { - PhoenixConfigurationUtil.setInputTableConditions(configuration, conditions); - } - PhoenixConfigurationUtil.setSelectColumnNames(configuration, fieldNames); - } - - /** - * - * @param job MR job instance - * @param tool PhoenixTtlTool for Phoenix TTL deletion MR job - */ - public static void setInput(final Job job, PhoenixTTLTool tool) { - Configuration configuration = job.getConfiguration(); - job.setInputFormatClass(PhoenixMultiViewInputFormat.class); - tool.setPhoenixTTLJobInputConfig(configuration); - PhoenixConfigurationUtil.setSchemaType(configuration, - PhoenixConfigurationUtil.SchemaType.QUERY); - PhoenixConfigurationUtil.setMultiInputMapperSplitSize(configuration, tool.getSplitSize()); - PhoenixConfigurationUtil.setMultiViewQueryMoreSplitSize(configuration, tool.getBatchSize()); - } - - /** - * - * @param job - * @param inputClass DBWritable class - * @param snapshotName The name of a snapshot (of a table) to read from - * @param tableName Input table name - * @param restoreDir a temporary dir to copy the snapshot files into - * @param inputQuery The select query - */ - public static void setInput(final Job job, final Class inputClass, final String snapshotName, String tableName, - Path restoreDir, String inputQuery) throws - IOException { - final Configuration configuration = setSnapshotInput(job, inputClass, snapshotName, tableName, restoreDir, SchemaType.QUERY); - if(inputQuery != null) { - PhoenixConfigurationUtil.setInputQuery(configuration, inputQuery); - } - - } - - public static void setInput(final Job job, final Class inputClass, final String snapshotName, String tableName, - Path restoreDir) { - setSnapshotInput(job, inputClass, snapshotName, tableName, restoreDir, SchemaType.QUERY); - } - - /** - * - * @param job - * @param inputClass DBWritable class - * @param snapshotName The name of a snapshot (of a table) to read from - * @param tableName Input table name - * @param restoreDir a temporary dir to copy the snapshot files into - */ - private static Configuration setSnapshotInput(Job job, Class inputClass, String snapshotName, - String tableName, Path restoreDir, SchemaType schemaType) { - job.setInputFormatClass(PhoenixInputFormat.class); - final Configuration configuration = job.getConfiguration(); - PhoenixConfigurationUtil.setInputClass(configuration, inputClass); - PhoenixConfigurationUtil.setSnapshotNameKey(configuration, snapshotName); - PhoenixConfigurationUtil.setInputTableName(configuration, tableName); - PhoenixConfigurationUtil.setRestoreDirKey(configuration, restoreDir.toString()); - PhoenixConfigurationUtil.setSchemaType(configuration, schemaType); - return configuration; - } - - private static Configuration setInput(final Job job, final Class inputClass, final String tableName){ - job.setInputFormatClass(PhoenixInputFormat.class); - final Configuration configuration = job.getConfiguration(); - PhoenixConfigurationUtil.setInputTableName(configuration, tableName); - PhoenixConfigurationUtil.setInputClass(configuration,inputClass); - return configuration; - } - - private static Configuration setInput(final Job job, final Class inputClass, - final Class inputFormatClass, final String tableName){ - job.setInputFormatClass(inputFormatClass); - final Configuration configuration = job.getConfiguration(); - PhoenixConfigurationUtil.setInputTableName(configuration, tableName); - PhoenixConfigurationUtil.setInputClass(configuration,inputClass); - return configuration; - } - - /** - * A method to override which HBase cluster for {@link PhoenixInputFormat} to read from - * @param job MapReduce Job - * @param quorum an HBase cluster's ZooKeeper quorum - */ - public static void setInputCluster(final Job job, final String quorum) { - final Configuration configuration = job.getConfiguration(); - PhoenixConfigurationUtil.setInputCluster(configuration, quorum); - } - /** - * - * @param job - * @param tableName Output table - * @param columns List of columns separated by , - */ - public static void setOutput(final Job job, final String tableName,final String columns) { - job.setOutputFormatClass(PhoenixOutputFormat.class); - final Configuration configuration = job.getConfiguration(); - PhoenixConfigurationUtil.setOutputTableName(configuration, tableName); - PhoenixConfigurationUtil.setUpsertColumnNames(configuration,columns.split(",")); - } - - - /** - * - * @param job - * @param tableName Output table - * @param fieldNames fields - */ - public static void setOutput(final Job job, final String tableName , final String... fieldNames) { - job.setOutputFormatClass(PhoenixOutputFormat.class); - final Configuration configuration = job.getConfiguration(); - PhoenixConfigurationUtil.setOutputTableName(configuration, tableName); - PhoenixConfigurationUtil.setUpsertColumnNames(configuration,fieldNames); - } - - /** - * A method to override which HBase cluster for {@link PhoenixOutputFormat} to write to - * @param job MapReduce Job - * @param quorum an HBase cluster's ZooKeeper quorum - */ - public static void setOutputCluster(final Job job, final String quorum) { - final Configuration configuration = job.getConfiguration(); - PhoenixConfigurationUtil.setOutputCluster(configuration, quorum); - } - - public static void setTenantId(final Job job, final String tenantId) { - PhoenixConfigurationUtil.setTenantId(job.getConfiguration(), tenantId); - } + private PhoenixMapReduceUtil() { + + } + + /** + * @param inputClass DBWritable class + * @param tableName Input table name + * @param conditions Condition clause to be added to the WHERE clause. Can be null if + * there are no conditions. + * @param fieldNames fields being projected for the SELECT query. + */ + public static void setInput(final Job job, final Class inputClass, + final String tableName, final String conditions, final String... fieldNames) { + final Configuration configuration = setInput(job, inputClass, tableName); + if (conditions != null) { + PhoenixConfigurationUtil.setInputTableConditions(configuration, conditions); + } + PhoenixConfigurationUtil.setSelectColumnNames(configuration, fieldNames); + } + + /** + * @param job MR job instance + * @param inputClass DBWritable class + * @param inputFormatClass InputFormat class + * @param tableName Input table name + * @param conditions Condition clause to be added to the WHERE clause. Can be null + * if there are no conditions. + * @param fieldNames fields being projected for the SELECT query. + */ + public static void setInput(final Job job, final Class inputClass, + final Class inputFormatClass, final String tableName, + final String conditions, final String... fieldNames) { + final Configuration configuration = setInput(job, inputClass, inputFormatClass, tableName); + if (conditions != null) { + PhoenixConfigurationUtil.setInputTableConditions(configuration, conditions); + } + PhoenixConfigurationUtil.setSelectColumnNames(configuration, fieldNames); + } + + /** + * @param inputClass DBWritable class + * @param tableName Input table name + * @param inputQuery Select query. + */ + public static void setInput(final Job job, final Class inputClass, + final String tableName, final String inputQuery) { + final Configuration configuration = setInput(job, inputClass, tableName); + PhoenixConfigurationUtil.setInputQuery(configuration, inputQuery); + PhoenixConfigurationUtil.setSchemaType(configuration, SchemaType.QUERY); + } + + /** + * @param inputClass DBWritable class + * @param inputFormatClass InputFormat class + * @param tableName Input table name + * @param inputQuery Select query + */ + + public static void setInput(final Job job, final Class inputClass, + final Class inputFormatClass, final String tableName, + final String inputQuery) { + final Configuration configuration = setInput(job, inputClass, inputFormatClass, tableName); + PhoenixConfigurationUtil.setInputQuery(configuration, inputQuery); + PhoenixConfigurationUtil.setSchemaType(configuration, SchemaType.QUERY); + } + + /** + * @param inputClass DBWritable class + * @param snapshotName The name of a snapshot (of a table) to read from + * @param tableName Input table name + * @param restoreDir a temporary dir to copy the snapshot files into + * @param conditions Condition clause to be added to the WHERE clause. Can be null if + * there are no conditions. + * @param fieldNames fields being projected for the SELECT query. + */ + public static void setInput(final Job job, final Class inputClass, + final String snapshotName, String tableName, Path restoreDir, final String conditions, + final String... fieldNames) throws IOException { + final Configuration configuration = + setSnapshotInput(job, inputClass, snapshotName, tableName, restoreDir, SchemaType.QUERY); + if (conditions != null) { + PhoenixConfigurationUtil.setInputTableConditions(configuration, conditions); + } + PhoenixConfigurationUtil.setSelectColumnNames(configuration, fieldNames); + } + + /** + * @param job MR job instance + * @param tool PhoenixTtlTool for Phoenix TTL deletion MR job + */ + public static void setInput(final Job job, PhoenixTTLTool tool) { + Configuration configuration = job.getConfiguration(); + job.setInputFormatClass(PhoenixMultiViewInputFormat.class); + tool.setPhoenixTTLJobInputConfig(configuration); + PhoenixConfigurationUtil.setSchemaType(configuration, + PhoenixConfigurationUtil.SchemaType.QUERY); + PhoenixConfigurationUtil.setMultiInputMapperSplitSize(configuration, tool.getSplitSize()); + PhoenixConfigurationUtil.setMultiViewQueryMoreSplitSize(configuration, tool.getBatchSize()); + } + + /** + * @param inputClass DBWritable class + * @param snapshotName The name of a snapshot (of a table) to read from + * @param tableName Input table name + * @param restoreDir a temporary dir to copy the snapshot files into + * @param inputQuery The select query + */ + public static void setInput(final Job job, final Class inputClass, + final String snapshotName, String tableName, Path restoreDir, String inputQuery) + throws IOException { + final Configuration configuration = + setSnapshotInput(job, inputClass, snapshotName, tableName, restoreDir, SchemaType.QUERY); + if (inputQuery != null) { + PhoenixConfigurationUtil.setInputQuery(configuration, inputQuery); + } + + } + + public static void setInput(final Job job, final Class inputClass, + final String snapshotName, String tableName, Path restoreDir) { + setSnapshotInput(job, inputClass, snapshotName, tableName, restoreDir, SchemaType.QUERY); + } + + /** + * @param inputClass DBWritable class + * @param snapshotName The name of a snapshot (of a table) to read from + * @param tableName Input table name + * @param restoreDir a temporary dir to copy the snapshot files into + */ + private static Configuration setSnapshotInput(Job job, Class inputClass, + String snapshotName, String tableName, Path restoreDir, SchemaType schemaType) { + job.setInputFormatClass(PhoenixInputFormat.class); + final Configuration configuration = job.getConfiguration(); + PhoenixConfigurationUtil.setInputClass(configuration, inputClass); + PhoenixConfigurationUtil.setSnapshotNameKey(configuration, snapshotName); + PhoenixConfigurationUtil.setInputTableName(configuration, tableName); + PhoenixConfigurationUtil.setRestoreDirKey(configuration, restoreDir.toString()); + PhoenixConfigurationUtil.setSchemaType(configuration, schemaType); + return configuration; + } + + private static Configuration setInput(final Job job, final Class inputClass, + final String tableName) { + job.setInputFormatClass(PhoenixInputFormat.class); + final Configuration configuration = job.getConfiguration(); + PhoenixConfigurationUtil.setInputTableName(configuration, tableName); + PhoenixConfigurationUtil.setInputClass(configuration, inputClass); + return configuration; + } + + private static Configuration setInput(final Job job, final Class inputClass, + final Class inputFormatClass, final String tableName) { + job.setInputFormatClass(inputFormatClass); + final Configuration configuration = job.getConfiguration(); + PhoenixConfigurationUtil.setInputTableName(configuration, tableName); + PhoenixConfigurationUtil.setInputClass(configuration, inputClass); + return configuration; + } + + /** + * A method to override which HBase cluster for {@link PhoenixInputFormat} to read from + * @param job MapReduce Job + * @param quorum an HBase cluster's ZooKeeper quorum + */ + public static void setInputCluster(final Job job, final String quorum) { + final Configuration configuration = job.getConfiguration(); + PhoenixConfigurationUtil.setInputCluster(configuration, quorum); + } + + /** + * @param tableName Output table + * @param columns List of columns separated by , + */ + public static void setOutput(final Job job, final String tableName, final String columns) { + job.setOutputFormatClass(PhoenixOutputFormat.class); + final Configuration configuration = job.getConfiguration(); + PhoenixConfigurationUtil.setOutputTableName(configuration, tableName); + PhoenixConfigurationUtil.setUpsertColumnNames(configuration, columns.split(",")); + } + + /** + * @param tableName Output table + * @param fieldNames fields + */ + public static void setOutput(final Job job, final String tableName, final String... fieldNames) { + job.setOutputFormatClass(PhoenixOutputFormat.class); + final Configuration configuration = job.getConfiguration(); + PhoenixConfigurationUtil.setOutputTableName(configuration, tableName); + PhoenixConfigurationUtil.setUpsertColumnNames(configuration, fieldNames); + } + + /** + * A method to override which HBase cluster for {@link PhoenixOutputFormat} to write to + * @param job MapReduce Job + * @param quorum an HBase cluster's ZooKeeper quorum + */ + public static void setOutputCluster(final Job job, final String quorum) { + final Configuration configuration = job.getConfiguration(); + PhoenixConfigurationUtil.setOutputCluster(configuration, quorum); + } + + public static void setTenantId(final Job job, final String tenantId) { + PhoenixConfigurationUtil.setTenantId(job.getConfiguration(), tenantId); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMultiInputUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMultiInputUtil.java index 8c28a8d2bbd..e36bbecf0d3 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMultiInputUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMultiInputUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,17 @@ */ package org.apache.phoenix.mapreduce.util; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; + import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.mapreduce.PhoenixTTLTool; import org.apache.phoenix.schema.PTable; @@ -25,90 +36,75 @@ import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.StringUtil; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Properties; - - -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL_NOT_DEFINED; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TTL; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE; - public class PhoenixMultiInputUtil { - public static final String SELECT_ALL_VIEW_METADATA_FROM_SYSCAT_QUERY = - "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, TTL FROM " - + SYSTEM_CATALOG_NAME + " WHERE " - + TABLE_TYPE + " = '" + PTableType.VIEW.getSerializedValue() + "' AND " - + TTL + " IS NOT NULL AND " - + TTL + " > " + TTL_NOT_DEFINED + " AND " - + VIEW_TYPE + " <> " + PTable.ViewType.MAPPED.getSerializedValue(); - - public static Connection buildTenantConnection(String url, String tenantId) - throws SQLException { - Properties props = new Properties(); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - return DriverManager.getConnection(url, props); - } - - public static String getSelectAllPageQuery() { - return SELECT_ALL_VIEW_METADATA_FROM_SYSCAT_QUERY + " AND " + - "(TENANT_ID,TABLE_SCHEM,TABLE_NAME) > (?,?,?) LIMIT ?"; - } - - public static String constructViewMetadataQueryBasedOnView(String fullName, String tenantId) { - String query = SELECT_ALL_VIEW_METADATA_FROM_SYSCAT_QUERY; - - - if (fullName != null) { - if (fullName.equals(PhoenixTTLTool.DELETE_ALL_VIEWS)) { - return query; - } - - String schema = SchemaUtil.getSchemaNameFromFullName(fullName); - String viewName = SchemaUtil.getTableNameFromFullName(fullName); - - if (!schema.equals(StringUtil.EMPTY_STRING)) { - query = query + " AND TABLE_SCHEM = '" + schema + "'"; - } else { - query = query + " AND TABLE_SCHEM IS NULL"; - } + public static final String SELECT_ALL_VIEW_METADATA_FROM_SYSCAT_QUERY = + "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, TTL FROM " + SYSTEM_CATALOG_NAME + " WHERE " + + TABLE_TYPE + " = '" + PTableType.VIEW.getSerializedValue() + "' AND " + TTL + + " IS NOT NULL AND " + TTL + " > " + TTL_NOT_DEFINED + " AND " + VIEW_TYPE + " <> " + + PTable.ViewType.MAPPED.getSerializedValue(); + + public static Connection buildTenantConnection(String url, String tenantId) throws SQLException { + Properties props = new Properties(); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + return DriverManager.getConnection(url, props); + } + + public static String getSelectAllPageQuery() { + return SELECT_ALL_VIEW_METADATA_FROM_SYSCAT_QUERY + " AND " + + "(TENANT_ID,TABLE_SCHEM,TABLE_NAME) > (?,?,?) LIMIT ?"; + } + + public static String constructViewMetadataQueryBasedOnView(String fullName, String tenantId) { + String query = SELECT_ALL_VIEW_METADATA_FROM_SYSCAT_QUERY; + + if (fullName != null) { + if (fullName.equals(PhoenixTTLTool.DELETE_ALL_VIEWS)) { + return query; + } - query = query + " AND TABLE_NAME = '" + viewName + "'"; - } + String schema = SchemaUtil.getSchemaNameFromFullName(fullName); + String viewName = SchemaUtil.getTableNameFromFullName(fullName); - if (tenantId != null && tenantId.length() > 0) { - query = query + " AND TENANT_ID = '" + tenantId + "'"; - } else { - query = query + " AND TENANT_ID IS NULL"; - } + if (!schema.equals(StringUtil.EMPTY_STRING)) { + query = query + " AND TABLE_SCHEM = '" + schema + "'"; + } else { + query = query + " AND TABLE_SCHEM IS NULL"; + } - return query; + query = query + " AND TABLE_NAME = '" + viewName + "'"; } - - public static String constructViewMetadataQueryBasedOnTenant(String tenant) { - return constructViewMetadataQueryBasedOnView(null, tenant); + if (tenantId != null && tenantId.length() > 0) { + query = query + " AND TENANT_ID = '" + tenantId + "'"; + } else { + query = query + " AND TENANT_ID IS NULL"; } - public static String getFetchViewQuery(Configuration configuration) { - String query; - if (configuration.get( - PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_ALL_VIEWS) != null) { - query = PhoenixMultiInputUtil.getSelectAllPageQuery(); - } else if (configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID) != null && - configuration.get(PhoenixConfigurationUtil. - MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW) == null) { - query = PhoenixMultiInputUtil.constructViewMetadataQueryBasedOnTenant( - configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID)); - } else { - query = PhoenixMultiInputUtil.constructViewMetadataQueryBasedOnView( - configuration.get( - PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW), - configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID)); - } - return query; + return query; + } + + public static String constructViewMetadataQueryBasedOnTenant(String tenant) { + return constructViewMetadataQueryBasedOnView(null, tenant); + } + + public static String getFetchViewQuery(Configuration configuration) { + String query; + if ( + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_ALL_VIEWS) != null + ) { + query = PhoenixMultiInputUtil.getSelectAllPageQuery(); + } else if ( + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID) != null + && configuration.get(PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW) + == null + ) { + query = PhoenixMultiInputUtil.constructViewMetadataQueryBasedOnTenant( + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID)); + } else { + query = PhoenixMultiInputUtil.constructViewMetadataQueryBasedOnView( + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_PHOENIX_TTL_DELETE_JOB_PER_VIEW), + configuration.get(PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID)); } -} \ No newline at end of file + return query; + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMultiViewListProvider.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMultiViewListProvider.java index 154f47f4554..446e5d54402 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMultiViewListProvider.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixMultiViewListProvider.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,10 @@ */ package org.apache.phoenix.mapreduce.util; -import org.apache.hadoop.conf.Configuration; import java.util.List; +import org.apache.hadoop.conf.Configuration; + public interface PhoenixMultiViewListProvider { - List getPhoenixMultiViewList(Configuration configuration); -} \ No newline at end of file + List getPhoenixMultiViewList(Configuration configuration); +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ViewInfoTracker.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ViewInfoTracker.java index c46434606d1..79378e9ee50 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ViewInfoTracker.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ViewInfoTracker.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,92 +17,94 @@ */ package org.apache.phoenix.mapreduce.util; -import org.apache.hadoop.io.WritableUtils; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -public class ViewInfoTracker implements ViewInfoWritable { - - String tenantId; - String viewName; - String relationName; - long phoenixTtl; - boolean isIndexRelation; - - public ViewInfoTracker() { - - } - - public ViewInfoTracker(String tenantId, String viewName, long phoenixTtl, - String relationName, boolean isIndexRelation) { - setTenantId(tenantId); - this.viewName = viewName; - this.phoenixTtl = phoenixTtl; - this.relationName = relationName; - this.isIndexRelation = isIndexRelation; - } +import org.apache.hadoop.io.WritableUtils; - private void setTenantId(String tenantId) { - if (tenantId != null) { - this.tenantId = tenantId; - } - } +public class ViewInfoTracker implements ViewInfoWritable { - @Override - public String getTenantId() { - return tenantId; - } + String tenantId; + String viewName; + String relationName; + long phoenixTtl; + boolean isIndexRelation; - @Override - public String getViewName() { - return viewName; - } + public ViewInfoTracker() { - @Override - public String getRelationName() { - return relationName; - } + } - @Override - public boolean isIndexRelation() { - return this.isIndexRelation; - } + public ViewInfoTracker(String tenantId, String viewName, long phoenixTtl, String relationName, + boolean isIndexRelation) { + setTenantId(tenantId); + this.viewName = viewName; + this.phoenixTtl = phoenixTtl; + this.relationName = relationName; + this.isIndexRelation = isIndexRelation; + } - public long getTTL() { - return phoenixTtl; + private void setTenantId(String tenantId) { + if (tenantId != null) { + this.tenantId = tenantId; } - - @Override public void write(DataOutput output) throws IOException { - WritableUtils.writeString(output, tenantId); - WritableUtils.writeString(output, viewName); - WritableUtils.writeVLong(output, phoenixTtl); - WritableUtils.writeString(output, relationName); - WritableUtils.writeString(output, isIndexRelation ? "true" : "false"); + } + + @Override + public String getTenantId() { + return tenantId; + } + + @Override + public String getViewName() { + return viewName; + } + + @Override + public String getRelationName() { + return relationName; + } + + @Override + public boolean isIndexRelation() { + return this.isIndexRelation; + } + + public long getTTL() { + return phoenixTtl; + } + + @Override + public void write(DataOutput output) throws IOException { + WritableUtils.writeString(output, tenantId); + WritableUtils.writeString(output, viewName); + WritableUtils.writeVLong(output, phoenixTtl); + WritableUtils.writeString(output, relationName); + WritableUtils.writeString(output, isIndexRelation ? "true" : "false"); + } + + @Override + public void readFields(DataInput input) throws IOException { + setTenantId(WritableUtils.readString(input)); + viewName = WritableUtils.readString(input); + phoenixTtl = WritableUtils.readVLong(input); + relationName = WritableUtils.readString(input); + isIndexRelation = WritableUtils.readString(input).equals("true"); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("ViewName" + this.viewName); + if (this.tenantId != null) { + sb.append(", Tenant:" + this.tenantId); } - - @Override public void readFields(DataInput input) throws IOException { - setTenantId(WritableUtils.readString(input)); - viewName = WritableUtils.readString(input); - phoenixTtl = WritableUtils.readVLong(input); - relationName = WritableUtils.readString(input); - isIndexRelation = WritableUtils.readString(input).equals("true"); + if (this.isIndexRelation) { + sb.append(", IndexName:" + this.relationName); + } else { + sb.append(", BaseTableName:" + this.relationName); } - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("ViewName" + this.viewName); - if (this.tenantId != null) { - sb.append(", Tenant:" + this.tenantId); - } - if (this.isIndexRelation) { - sb.append(", IndexName:" + this.relationName); - } else { - sb.append(", BaseTableName:" + this.relationName); - } - - return sb.toString(); - } -} \ No newline at end of file + return sb.toString(); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ViewInfoWritable.java b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ViewInfoWritable.java index 48a08e24554..775ae6a3a86 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ViewInfoWritable.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/util/ViewInfoWritable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,36 +17,41 @@ */ package org.apache.phoenix.mapreduce.util; -import org.apache.hadoop.io.Writable; - import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import org.apache.hadoop.io.Writable; + public interface ViewInfoWritable extends Writable { - public enum ViewInfoJobState { - INITIALIZED(1), - RUNNING(2), - SUCCEEDED(3), - FAILED(4), - KILLED(5), - DELETED(6); - - int value; - - ViewInfoJobState(int value) { - this.value = value; - } - - public int getValue() { - return this.value; - } + public enum ViewInfoJobState { + INITIALIZED(1), + RUNNING(2), + SUCCEEDED(3), + FAILED(4), + KILLED(5), + DELETED(6); + + int value; + + ViewInfoJobState(int value) { + this.value = value; } - void write(DataOutput output) throws IOException; - void readFields(DataInput input) throws IOException; - String getTenantId(); - String getViewName(); - String getRelationName(); // from index or data table - boolean isIndexRelation(); -} \ No newline at end of file + public int getValue() { + return this.value; + } + } + + void write(DataOutput output) throws IOException; + + void readFields(DataInput input) throws IOException; + + String getTenantId(); + + String getViewName(); + + String getRelationName(); // from index or data table + + boolean isIndexRelation(); +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilter.java index 39c8b13cb4c..239aa0c5016 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.replication; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME_INDEX; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -28,25 +30,22 @@ import org.apache.phoenix.schema.PTable; import org.apache.phoenix.util.SchemaUtil; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME_INDEX; /** - * Standard replication of the SYSTEM.CATALOG and SYSTEM.CHILD_LINK table can - * be dangerous because schemas may change between the source and target - * clusters at different times, in particular during cluster upgrades. - * However, tenant-owned data such as tenant-owned views need to be copied. - * This WALEntryFilter will only allow tenant-owned rows in SYSTEM.CATALOG to - * be replicated. Data from all other tables is automatically passed. + * Standard replication of the SYSTEM.CATALOG and SYSTEM.CHILD_LINK table can be dangerous because + * schemas may change between the source and target clusters at different times, in particular + * during cluster upgrades. However, tenant-owned data such as tenant-owned views need to be copied. + * This WALEntryFilter will only allow tenant-owned rows in SYSTEM.CATALOG to be replicated. Data + * from all other tables is automatically passed. */ -public class SystemCatalogWALEntryFilter implements - WALEntryFilter, WALCellFilter { +public class SystemCatalogWALEntryFilter implements WALEntryFilter, WALCellFilter { /** - * This is an optimization to just skip the cell filter if we do not care - * about cell filter for certain WALEdits. + * This is an optimization to just skip the cell filter if we do not care about cell filter for + * certain WALEdits. */ private boolean skipCellFilter; // Column value for parent child link private static final byte[] CHILD_TABLE_BYTES = - new byte[]{PTable.LinkType.CHILD_TABLE.getSerializedValue()}; + new byte[] { PTable.LinkType.CHILD_TABLE.getSerializedValue() }; // Number of columns in the primary key of system child link table private static final int NUM_COLUMNS_PRIMARY_KEY = 5; @@ -56,8 +55,7 @@ public WAL.Entry filter(WAL.Entry entry) { // should not block anything. // If the WAL.Entry's table isn't System.Catalog or System.Child_Link, // it auto-passes this filter. - skipCellFilter = - !(SchemaUtil.isMetaTable(entry.getKey().getTableName().getName()) + skipCellFilter = !(SchemaUtil.isMetaTable(entry.getKey().getTableName().getName()) || SchemaUtil.isChildLinkTable(entry.getKey().getTableName().getName())); return entry; } @@ -83,40 +81,40 @@ public Cell filterCell(final WAL.Entry entry, final Cell cell) { private boolean isTenantIdLeadingInKey(final Cell cell) { // rows in system.catalog or system child that aren't tenant-owned // will have a leading separator byte - return cell.getRowArray()[cell.getRowOffset()] - != QueryConstants.SEPARATOR_BYTE; + return cell.getRowArray()[cell.getRowOffset()] != QueryConstants.SEPARATOR_BYTE; } /** - * is the cell for system child link a tenant owned. Besides the non empty - * tenant id, system.child_link table have tenant owned data for parent child - * links. In this case, the column qualifier is - * {@code PhoenixDatabaseMetaData#LINK_TYPE_BYTES} and value is - * {@code PTable.LinkType.CHILD_TABLE}. For corresponding delete markers the - * KeyValue type {@code KeyValue.Type} is {@code KeyValue.Type.DeleteFamily} + * is the cell for system child link a tenant owned. Besides the non empty tenant id, + * system.child_link table have tenant owned data for parent child links. In this case, the column + * qualifier is {@code PhoenixDatabaseMetaData#LINK_TYPE_BYTES} and value is + * {@code PTable.LinkType.CHILD_TABLE}. For corresponding delete markers the KeyValue type + * {@code KeyValue.Type} is {@code KeyValue.Type.DeleteFamily} * @param cell hbase cell * @return true if the cell is tenant owned */ private boolean isTenantRowCellSystemChildLink(final Cell cell) { boolean isTenantRowCell = isTenantIdLeadingInKey(cell); - ImmutableBytesWritable key = new ImmutableBytesWritable( - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); + ImmutableBytesWritable key = + new ImmutableBytesWritable(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); boolean isChildLinkToTenantView = false; if (!isTenantRowCell) { - boolean isChildLink = CellUtil.matchingQualifier( - cell, PhoenixDatabaseMetaData.LINK_TYPE_BYTES); - if ((isChildLink && CellUtil.matchingValue(cell, CHILD_TABLE_BYTES)) || - cell.getType() == Cell.Type.DeleteFamily) { + boolean isChildLink = + CellUtil.matchingQualifier(cell, PhoenixDatabaseMetaData.LINK_TYPE_BYTES); + if ( + (isChildLink && CellUtil.matchingValue(cell, CHILD_TABLE_BYTES)) + || cell.getType() == Cell.Type.DeleteFamily + ) { byte[][] rowViewKeyMetadata = new byte[NUM_COLUMNS_PRIMARY_KEY][]; - SchemaUtil.getVarChars(key.get(), key.getOffset(), - key.getLength(), 0, rowViewKeyMetadata); - /** if the child link is to a tenant-owned view, the COLUMN_NAME field will be - * the byte[] of the tenant otherwise, it will be an empty byte array - * (NOT QueryConstants.SEPARATOR_BYTE, but a byte[0]). This assumption is also - * true for child link's delete markers in SYSTEM.CHILD_LINK as it only contains link - * rows and does not deal with other type of rows like column rows that also has - * COLUMN_NAME populated with actual column name.**/ + SchemaUtil.getVarChars(key.get(), key.getOffset(), key.getLength(), 0, rowViewKeyMetadata); + /** + * if the child link is to a tenant-owned view, the COLUMN_NAME field will be the byte[] of + * the tenant otherwise, it will be an empty byte array (NOT QueryConstants.SEPARATOR_BYTE, + * but a byte[0]). This assumption is also true for child link's delete markers in + * SYSTEM.CHILD_LINK as it only contains link rows and does not deal with other type of rows + * like column rows that also has COLUMN_NAME populated with actual column name. + **/ isChildLinkToTenantView = rowViewKeyMetadata[COLUMN_NAME_INDEX].length != 0; } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/MetaDataSplitPolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/MetaDataSplitPolicy.java index 05145971263..6a6dc69146d 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/MetaDataSplitPolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/MetaDataSplitPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,39 +23,38 @@ public class MetaDataSplitPolicy extends SplitOnLeadingVarCharColumnsPolicy { - private boolean allowSystemCatalogToSplit() { - Configuration conf = getConf(); - return isSystemCatalogSplittable(conf); - } - - public static boolean isSystemCatalogSplittable(Configuration conf) { - boolean allowSplittableSystemCatalogRollback = - conf.getBoolean(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, - QueryServicesOptions.DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK); - boolean allowSystemCatalogToSplit = - conf.getBoolean(QueryServices.SYSTEM_CATALOG_SPLITTABLE, - QueryServicesOptions.DEFAULT_SYSTEM_CATALOG_SPLITTABLE); - return allowSystemCatalogToSplit && !allowSplittableSystemCatalogRollback; - } - - //This only exists in HBase 2.4+ - @Override - protected boolean canSplit() { - return super.canSplit() && allowSystemCatalogToSplit(); - } - - @Override - protected boolean shouldSplit() { - return super.shouldSplit() && allowSystemCatalogToSplit(); - } - - @Override - protected int getColumnToSplitAt() { - // SYSTEM.CATALOG rowkey is (tenant id, schema name, table name, column name, - // column family) ensure all meta data rows for a given schema are in the same - // region (indexes and tables are in the same schema as we lock the parent table - // when modifying an index) - return 2; - } + private boolean allowSystemCatalogToSplit() { + Configuration conf = getConf(); + return isSystemCatalogSplittable(conf); + } + + public static boolean isSystemCatalogSplittable(Configuration conf) { + boolean allowSplittableSystemCatalogRollback = + conf.getBoolean(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, + QueryServicesOptions.DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK); + boolean allowSystemCatalogToSplit = conf.getBoolean(QueryServices.SYSTEM_CATALOG_SPLITTABLE, + QueryServicesOptions.DEFAULT_SYSTEM_CATALOG_SPLITTABLE); + return allowSystemCatalogToSplit && !allowSplittableSystemCatalogRollback; + } + + // This only exists in HBase 2.4+ + @Override + protected boolean canSplit() { + return super.canSplit() && allowSystemCatalogToSplit(); + } + + @Override + protected boolean shouldSplit() { + return super.shouldSplit() && allowSystemCatalogToSplit(); + } + + @Override + protected int getColumnToSplitAt() { + // SYSTEM.CATALOG rowkey is (tenant id, schema name, table name, column name, + // column family) ensure all meta data rows for a given schema are in the same + // region (indexes and tables are in the same schema as we lock the parent table + // when modifying an index) + return 2; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SplitOnLeadingVarCharColumnsPolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SplitOnLeadingVarCharColumnsPolicy.java index 80551f5fcdf..cd6a77287c0 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SplitOnLeadingVarCharColumnsPolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SplitOnLeadingVarCharColumnsPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,26 +21,27 @@ import org.apache.phoenix.util.SchemaUtil; public abstract class SplitOnLeadingVarCharColumnsPolicy extends SteppingSplitPolicy { - abstract protected int getColumnToSplitAt(); - - protected final byte[] getSplitPoint(byte[] splitPoint) { - if (splitPoint==null) { - return splitPoint; - } - int offset = SchemaUtil.getVarCharLength(splitPoint, 0, splitPoint.length, getColumnToSplitAt()); - // Only split between leading columns indicated. - if (offset == splitPoint.length) { - return splitPoint; - } - // Otherwise, an attempt is being made to split in the middle of a table. - // Just return a split point at the boundary of the first two columns instead - byte[] newSplitPoint = new byte[offset + 1]; - System.arraycopy(splitPoint, 0, newSplitPoint, 0, offset+1); - return newSplitPoint; + abstract protected int getColumnToSplitAt(); + + protected final byte[] getSplitPoint(byte[] splitPoint) { + if (splitPoint == null) { + return splitPoint; } - - @Override - protected final byte[] getSplitPoint() { - return getSplitPoint(super.getSplitPoint()); + int offset = + SchemaUtil.getVarCharLength(splitPoint, 0, splitPoint.length, getColumnToSplitAt()); + // Only split between leading columns indicated. + if (offset == splitPoint.length) { + return splitPoint; } + // Otherwise, an attempt is being made to split in the middle of a table. + // Just return a split point at the boundary of the first two columns instead + byte[] newSplitPoint = new byte[offset + 1]; + System.arraycopy(splitPoint, 0, newSplitPoint, 0, offset + 1); + return newSplitPoint; + } + + @Override + protected final byte[] getSplitPoint() { + return getSplitPoint(super.getSplitPoint()); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemFunctionSplitPolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemFunctionSplitPolicy.java index 58e1f9f2c08..8585a934fae 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemFunctionSplitPolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemFunctionSplitPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,9 +19,9 @@ public class SystemFunctionSplitPolicy extends SplitOnLeadingVarCharColumnsPolicy { - @Override - protected int getColumnToSplitAt() { - return 2; - } + @Override + protected int getColumnToSplitAt() { + return 2; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemStatsSplitPolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemStatsSplitPolicy.java index 69fe8aae358..da8a9040ed8 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemStatsSplitPolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemStatsSplitPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,9 +19,9 @@ public class SystemStatsSplitPolicy extends SplitOnLeadingVarCharColumnsPolicy { - @Override - protected int getColumnToSplitAt() { - return 1; - } + @Override + protected int getColumnToSplitAt() { + return 1; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemTaskSplitPolicy.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemTaskSplitPolicy.java index c626f44149e..ea8f06e5afd 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemTaskSplitPolicy.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/SystemTaskSplitPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema; import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; @@ -24,5 +23,5 @@ * Split policy for SYSTEM.TASK table */ public class SystemTaskSplitPolicy extends DisabledRegionSplitPolicy { - // empty + // empty } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java index 1045e880105..e849c81a42c 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -52,6 +52,7 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.MetaDataUtil; @@ -60,335 +61,329 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; - /** - * A default implementation of the Statistics tracker that helps to collect stats like min key, max key and guideposts. + * A default implementation of the Statistics tracker that helps to collect stats like min key, max + * key and guideposts. */ public class DefaultStatisticsCollector implements StatisticsCollector { - private static final Logger LOGGER = - LoggerFactory.getLogger(DefaultStatisticsCollector.class); - - final Map> guidePostsInfoWriterMap = Maps.newHashMap(); - private final Table htable; - private StatisticsWriter statsWriter; - final Pair cachedGuidePosts; - final byte[] guidePostWidthBytes; - final byte[] guidePostPerRegionBytes; - // Where to look for GUIDE_POSTS_WIDTH in SYSTEM.CATALOG - final byte[] ptableKey; + private static final Logger LOGGER = LoggerFactory.getLogger(DefaultStatisticsCollector.class); - private long guidePostDepth; - private long maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP; - private ImmutableBytesWritable currentRow; - private final String tableName; - private final boolean isViewIndexTable; - private final Region region; - private final Configuration configuration; + final Map> guidePostsInfoWriterMap = + Maps.newHashMap(); + private final Table htable; + private StatisticsWriter statsWriter; + final Pair cachedGuidePosts; + final byte[] guidePostWidthBytes; + final byte[] guidePostPerRegionBytes; + // Where to look for GUIDE_POSTS_WIDTH in SYSTEM.CATALOG + final byte[] ptableKey; - public DefaultStatisticsCollector(Configuration configuration, Region region, String tableName, byte[] family, - byte[] gp_width_bytes, byte[] gp_per_region_bytes, StatisticsWriter statsWriter, Table htable) { - this.configuration = configuration; - this.region = region; - this.guidePostWidthBytes = gp_width_bytes; - this.guidePostPerRegionBytes = gp_per_region_bytes; - String pName = tableName; - // For view index, get GUIDE_POST_WIDTH from data physical table - // since there's no row representing those in SYSTEM.CATALOG. - if (MetaDataUtil.isViewIndex(tableName)) { - pName = MetaDataUtil.getViewIndexUserTableName(tableName); - isViewIndexTable = true; - } else { - isViewIndexTable = false; - } - ptableKey = SchemaUtil.getTableKeyFromFullName(pName); - this.tableName = tableName; - // in a compaction we know the one family ahead of time - if (family != null) { - ImmutableBytesPtr cfKey = new ImmutableBytesPtr(family); - cachedGuidePosts = new Pair(0l, new GuidePostsInfoBuilder()); - guidePostsInfoWriterMap.put(cfKey, cachedGuidePosts); - } else { - cachedGuidePosts = null; - } + private long guidePostDepth; + private long maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP; + private ImmutableBytesWritable currentRow; + private final String tableName; + private final boolean isViewIndexTable; + private final Region region; + private final Configuration configuration; - this.statsWriter = statsWriter; - this.htable = htable; + public DefaultStatisticsCollector(Configuration configuration, Region region, String tableName, + byte[] family, byte[] gp_width_bytes, byte[] gp_per_region_bytes, StatisticsWriter statsWriter, + Table htable) { + this.configuration = configuration; + this.region = region; + this.guidePostWidthBytes = gp_width_bytes; + this.guidePostPerRegionBytes = gp_per_region_bytes; + String pName = tableName; + // For view index, get GUIDE_POST_WIDTH from data physical table + // since there's no row representing those in SYSTEM.CATALOG. + if (MetaDataUtil.isViewIndex(tableName)) { + pName = MetaDataUtil.getViewIndexUserTableName(tableName); + isViewIndexTable = true; + } else { + isViewIndexTable = false; } - - @Override - public void init() throws IOException { - try { - initGuidepostDepth(); - } catch (SQLException e) { - throw new IOException(e); - } - LOGGER.info("Initialization complete for " + - this.getClass() + " statistics collector for table " + tableName); + ptableKey = SchemaUtil.getTableKeyFromFullName(pName); + this.tableName = tableName; + // in a compaction we know the one family ahead of time + if (family != null) { + ImmutableBytesPtr cfKey = new ImmutableBytesPtr(family); + cachedGuidePosts = new Pair(0l, new GuidePostsInfoBuilder()); + guidePostsInfoWriterMap.put(cfKey, cachedGuidePosts); + } else { + cachedGuidePosts = null; } - /** - * Determine the GPW for statistics collection for the table. - * The order of priority from highest to lowest is as follows - * 1. Value provided in UPDATE STATISTICS SQL statement (N/A for MR jobs) - * 2. GPW column in SYSTEM.CATALOG for the table is not null - * Inherits the value from base table for views and indexes (PHOENIX-4332) - * 3. Value from global configuration parameters from hbase-site.xml - * - * GPW of 0 disables the stats collection. If stats were previously collected, this task - * would attempt to delete entries from SYSTEM.STATS table. Not reading '0' from SYSTEM.CATALOG - * would mean the fall back to global value which is defaulted to DEFAULT_STATS_GUIDEPOST_PER_REGION - */ - private void initGuidepostDepth() throws IOException, SQLException { - if (guidePostPerRegionBytes != null || guidePostWidthBytes != null) { - getGuidePostDepthFromStatement(); - LOGGER.info("Guide post depth determined from SQL statement: " + guidePostDepth); - } else { - long guidepostWidth = getGuidePostDepthFromSystemCatalog(); - if (guidepostWidth >= 0) { - this.guidePostDepth = guidepostWidth; - LOGGER.info("Guide post depth determined from SYSTEM.CATALOG: " + guidePostDepth); - } else { - this.guidePostDepth = StatisticsUtil.getGuidePostDepth( - configuration.getInt( - QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, - QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_PER_REGION), - configuration.getLongBytes( - QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, - QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES), - region.getTableDescriptor()); - LOGGER.info("Guide post depth determined from global configuration: " + guidePostDepth); - } - } + this.statsWriter = statsWriter; + this.htable = htable; + } + @Override + public void init() throws IOException { + try { + initGuidepostDepth(); + } catch (SQLException e) { + throw new IOException(e); } + LOGGER.info("Initialization complete for " + this.getClass() + + " statistics collector for table " + tableName); + } - private long getGuidePostDepthFromSystemCatalog() throws IOException, SQLException { - try { - long guidepostWidth = -1; - Get get = new Get(ptableKey); - get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES); - Result result = htable.get(get); - if (!result.isEmpty()) { - Cell cell = result.listCells().get(0); - guidepostWidth = PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), cell.getValueOffset(), SortOrder.getDefault()); - } else if (!isViewIndexTable) { - /* - * The table we are collecting stats for is potentially a base table, or local - * index or a global index. For view indexes, we rely on the the guide post - * width column in the parent data table's metadata which we already tried - * retrieving above. - */ - try (Connection conn = - QueryUtil.getConnectionOnServer(configuration)) { - PTable table = conn.unwrap(PhoenixConnection.class).getTable(tableName); - if (table.getType() == PTableType.INDEX - && IndexUtil.isGlobalIndex(table)) { - /* - * For global indexes, we need to get the parentName first and then - * fetch guide post width configured for the parent table. - */ - PName parentName = table.getParentName(); - byte[] parentKey = - SchemaUtil.getTableKeyFromFullName(parentName.getString()); - get = new Get(parentKey); - get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES); - result = htable.get(get); - if (!result.isEmpty()) { - Cell cell = result.listCells().get(0); - guidepostWidth = - PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), - cell.getValueOffset(), SortOrder.getDefault()); - } - } - } - } - return guidepostWidth; - } finally { - if (htable != null) { - try { - htable.close(); - } catch (IOException e) { - LOGGER.warn("Failed to close " + htable.getName(), e); - } - } - } + /** + * Determine the GPW for statistics collection for the table. The order of priority from highest + * to lowest is as follows 1. Value provided in UPDATE STATISTICS SQL statement (N/A for MR jobs) + * 2. GPW column in SYSTEM.CATALOG for the table is not null Inherits the value from base table + * for views and indexes (PHOENIX-4332) 3. Value from global configuration parameters from + * hbase-site.xml GPW of 0 disables the stats collection. If stats were previously collected, this + * task would attempt to delete entries from SYSTEM.STATS table. Not reading '0' from + * SYSTEM.CATALOG would mean the fall back to global value which is defaulted to + * DEFAULT_STATS_GUIDEPOST_PER_REGION + */ + private void initGuidepostDepth() throws IOException, SQLException { + if (guidePostPerRegionBytes != null || guidePostWidthBytes != null) { + getGuidePostDepthFromStatement(); + LOGGER.info("Guide post depth determined from SQL statement: " + guidePostDepth); + } else { + long guidepostWidth = getGuidePostDepthFromSystemCatalog(); + if (guidepostWidth >= 0) { + this.guidePostDepth = guidepostWidth; + LOGGER.info("Guide post depth determined from SYSTEM.CATALOG: " + guidePostDepth); + } else { + this.guidePostDepth = StatisticsUtil.getGuidePostDepth( + configuration.getInt(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, + QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_PER_REGION), + configuration.getLongBytes(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, + QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES), + region.getTableDescriptor()); + LOGGER.info("Guide post depth determined from global configuration: " + guidePostDepth); + } } - private void getGuidePostDepthFromStatement() { - int guidepostPerRegion = 0; - long guidepostWidth = QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES; - if (guidePostPerRegionBytes != null) { - guidepostPerRegion = PInteger.INSTANCE.getCodec().decodeInt(guidePostPerRegionBytes, 0, SortOrder.getDefault()); + } + + private long getGuidePostDepthFromSystemCatalog() throws IOException, SQLException { + try { + long guidepostWidth = -1; + Get get = new Get(ptableKey); + get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES); + Result result = htable.get(get); + if (!result.isEmpty()) { + Cell cell = result.listCells().get(0); + guidepostWidth = PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), + cell.getValueOffset(), SortOrder.getDefault()); + } else if (!isViewIndexTable) { + /* + * The table we are collecting stats for is potentially a base table, or local index or a + * global index. For view indexes, we rely on the the guide post width column in the parent + * data table's metadata which we already tried retrieving above. + */ + try (Connection conn = QueryUtil.getConnectionOnServer(configuration)) { + PTable table = conn.unwrap(PhoenixConnection.class).getTable(tableName); + if (table.getType() == PTableType.INDEX && IndexUtil.isGlobalIndex(table)) { + /* + * For global indexes, we need to get the parentName first and then fetch guide post + * width configured for the parent table. + */ + PName parentName = table.getParentName(); + byte[] parentKey = SchemaUtil.getTableKeyFromFullName(parentName.getString()); + get = new Get(parentKey); + get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES); + result = htable.get(get); + if (!result.isEmpty()) { + Cell cell = result.listCells().get(0); + guidepostWidth = PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), + cell.getValueOffset(), SortOrder.getDefault()); + } + } } - if (guidePostWidthBytes != null) { - guidepostWidth = PLong.INSTANCE.getCodec().decodeInt(guidePostWidthBytes, 0, SortOrder.getDefault()); + } + return guidepostWidth; + } finally { + if (htable != null) { + try { + htable.close(); + } catch (IOException e) { + LOGGER.warn("Failed to close " + htable.getName(), e); } - this.guidePostDepth = StatisticsUtil.getGuidePostDepth(guidepostPerRegion, guidepostWidth, - region.getTableDescriptor()); + } } + } - @Override - public long getMaxTimeStamp() { - return maxTimeStamp; + private void getGuidePostDepthFromStatement() { + int guidepostPerRegion = 0; + long guidepostWidth = QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES; + if (guidePostPerRegionBytes != null) { + guidepostPerRegion = + PInteger.INSTANCE.getCodec().decodeInt(guidePostPerRegionBytes, 0, SortOrder.getDefault()); + } + if (guidePostWidthBytes != null) { + guidepostWidth = + PLong.INSTANCE.getCodec().decodeInt(guidePostWidthBytes, 0, SortOrder.getDefault()); } + this.guidePostDepth = StatisticsUtil.getGuidePostDepth(guidepostPerRegion, guidepostWidth, + region.getTableDescriptor()); + } - @Override - public void close() throws IOException { - if (statsWriter != null) { - this.statsWriter.close(); - } + @Override + public long getMaxTimeStamp() { + return maxTimeStamp; + } + + @Override + public void close() throws IOException { + if (statsWriter != null) { + this.statsWriter.close(); } + } - @Override - public void updateStatistics(Region region, Scan scan) { - try { - List mutations = new ArrayList(); - writeStatistics(region, true, mutations, - EnvironmentEdgeManager.currentTimeMillis(), scan); - commitStats(mutations); - } catch (IOException e) { - LOGGER.error("Unable to update SYSTEM.STATS table.", e); - } + @Override + public void updateStatistics(Region region, Scan scan) { + try { + List mutations = new ArrayList(); + writeStatistics(region, true, mutations, EnvironmentEdgeManager.currentTimeMillis(), scan); + commitStats(mutations); + } catch (IOException e) { + LOGGER.error("Unable to update SYSTEM.STATS table.", e); } + } - private void writeStatistics(final Region region, boolean delete, List mutations, long currentTime, Scan scan) - throws IOException { - Set fams = guidePostsInfoWriterMap.keySet(); - // Update the statistics table. - // Delete statistics for a region if no guide posts are collected for that region during - // UPDATE STATISTICS. This will not impact a stats collection of single column family during - // compaction as guidePostsInfoWriterMap cannot be empty in this case. - if (cachedGuidePosts == null) { - // We're either collecting stats for the data table or the local index table, but not both - // We can determine this based on the column families in the scan being prefixed with the - // local index column family prefix. We always explicitly specify the local index column - // families when we're collecting stats for a local index. - boolean collectingForLocalIndex = scan != null && - !scan.getFamilyMap().isEmpty() && - MetaDataUtil.isLocalIndexFamily(scan.getFamilyMap().keySet().iterator().next()); - for (Store store : region.getStores()) { - ImmutableBytesPtr cfKey = new ImmutableBytesPtr(store.getColumnFamilyDescriptor().getName()); - boolean isLocalIndexStore = MetaDataUtil.isLocalIndexFamily(cfKey); - if (isLocalIndexStore != collectingForLocalIndex) { - continue; - } - if (!guidePostsInfoWriterMap.containsKey(cfKey)) { - Pair emptyGps = new Pair(0l, new GuidePostsInfoBuilder()); - guidePostsInfoWriterMap.put(cfKey, emptyGps); - } - } + private void writeStatistics(final Region region, boolean delete, List mutations, + long currentTime, Scan scan) throws IOException { + Set fams = guidePostsInfoWriterMap.keySet(); + // Update the statistics table. + // Delete statistics for a region if no guide posts are collected for that region during + // UPDATE STATISTICS. This will not impact a stats collection of single column family during + // compaction as guidePostsInfoWriterMap cannot be empty in this case. + if (cachedGuidePosts == null) { + // We're either collecting stats for the data table or the local index table, but not both + // We can determine this based on the column families in the scan being prefixed with the + // local index column family prefix. We always explicitly specify the local index column + // families when we're collecting stats for a local index. + boolean collectingForLocalIndex = scan != null && !scan.getFamilyMap().isEmpty() + && MetaDataUtil.isLocalIndexFamily(scan.getFamilyMap().keySet().iterator().next()); + for (Store store : region.getStores()) { + ImmutableBytesPtr cfKey = + new ImmutableBytesPtr(store.getColumnFamilyDescriptor().getName()); + boolean isLocalIndexStore = MetaDataUtil.isLocalIndexFamily(cfKey); + if (isLocalIndexStore != collectingForLocalIndex) { + continue; } - for (ImmutableBytesPtr fam : fams) { - if (delete) { - statsWriter.deleteStatsForRegion(region, this, fam, mutations); - LOGGER.info("Generated " + mutations.size() + " mutations to delete existing stats"); - } - - // If we've disabled stats, don't write any, just delete them - if (this.guidePostDepth > 0) { - int oldSize = mutations.size(); - statsWriter.addStats(this, fam, mutations, guidePostDepth); - LOGGER.info("Generated " + (mutations.size() - oldSize) + " mutations for new stats"); - } + if (!guidePostsInfoWriterMap.containsKey(cfKey)) { + Pair emptyGps = + new Pair(0l, new GuidePostsInfoBuilder()); + guidePostsInfoWriterMap.put(cfKey, emptyGps); } + } } + for (ImmutableBytesPtr fam : fams) { + if (delete) { + statsWriter.deleteStatsForRegion(region, this, fam, mutations); + LOGGER.info("Generated " + mutations.size() + " mutations to delete existing stats"); + } - private void commitStats(List mutations) throws IOException { - statsWriter.commitStats(mutations, this); - LOGGER.info("Committed " + mutations.size() + " mutations for stats"); + // If we've disabled stats, don't write any, just delete them + if (this.guidePostDepth > 0) { + int oldSize = mutations.size(); + statsWriter.addStats(this, fam, mutations, guidePostDepth); + LOGGER.info("Generated " + (mutations.size() - oldSize) + " mutations for new stats"); + } } + } + + private void commitStats(List mutations) throws IOException { + statsWriter.commitStats(mutations, this); + LOGGER.info("Committed " + mutations.size() + " mutations for stats"); + } - /** - * Update the current statistics based on the latest batch of key-values from the underlying scanner - * - * @param results - * next batch of {@link KeyValue}s + /** + * Update the current statistics based on the latest batch of key-values from the underlying + * scanner next batch of {@link KeyValue}s + */ + @Override + public void collectStatistics(final List results) { + // A guide posts depth of zero disables the collection of stats + if (guidePostDepth == 0 || results.size() == 0) { + return; + } + Map famMap = Maps.newHashMap(); + boolean incrementRow = false; + Cell c = results.get(0); + ImmutableBytesWritable row = + new ImmutableBytesWritable(c.getRowArray(), c.getRowOffset(), c.getRowLength()); + /* + * During compaction, it is possible that HBase will not return all the key values when + * internalScanner.next() is called. So we need the below check to avoid counting a row more + * than once. */ - @Override - public void collectStatistics(final List results) { - // A guide posts depth of zero disables the collection of stats - if (guidePostDepth == 0 || results.size() == 0) { - return; + if (currentRow == null || !row.equals(currentRow)) { + currentRow = row; + incrementRow = true; + } + for (Cell cell : results) { + maxTimeStamp = Math.max(maxTimeStamp, cell.getTimestamp()); + Pair gps; + if (cachedGuidePosts == null) { + ImmutableBytesPtr cfKey = new ImmutableBytesPtr(cell.getFamilyArray(), + cell.getFamilyOffset(), cell.getFamilyLength()); + gps = guidePostsInfoWriterMap.get(cfKey); + if (gps == null) { + gps = new Pair(0l, new GuidePostsInfoBuilder()); + guidePostsInfoWriterMap.put(cfKey, gps); } - Map famMap = Maps.newHashMap(); - boolean incrementRow = false; - Cell c = results.get(0); - ImmutableBytesWritable row = new ImmutableBytesWritable(c.getRowArray(), c.getRowOffset(), c.getRowLength()); - /* - * During compaction, it is possible that HBase will not return all the key values when - * internalScanner.next() is called. So we need the below check to avoid counting a row more - * than once. - */ - if (currentRow == null || !row.equals(currentRow)) { - currentRow = row; - incrementRow = true; + if (famMap.get(cfKey) == null) { + famMap.put(cfKey, true); + gps.getSecond().incrementRowCount(); } - for (Cell cell : results) { - maxTimeStamp = Math.max(maxTimeStamp, cell.getTimestamp()); - Pair gps; - if (cachedGuidePosts == null) { - ImmutableBytesPtr cfKey = new ImmutableBytesPtr(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength()); - gps = guidePostsInfoWriterMap.get(cfKey); - if (gps == null) { - gps = new Pair(0l, - new GuidePostsInfoBuilder()); - guidePostsInfoWriterMap.put(cfKey, gps); - } - if (famMap.get(cfKey) == null) { - famMap.put(cfKey, true); - gps.getSecond().incrementRowCount(); - } - } else { - gps = cachedGuidePosts; - if (incrementRow) { - cachedGuidePosts.getSecond().incrementRowCount(); - incrementRow = false; - } - } - int kvLength = KeyValueUtil.getSerializedSize(cell, true); - long byteCount = gps.getFirst() + kvLength; - gps.setFirst(byteCount); - if (byteCount >= guidePostDepth) { - if (gps.getSecond().addGuidePostOnCollection(row, byteCount, gps.getSecond().getRowCount())) { - gps.setFirst(0l); - gps.getSecond().resetRowCount(); - } - } + } else { + gps = cachedGuidePosts; + if (incrementRow) { + cachedGuidePosts.getSecond().incrementRowCount(); + incrementRow = false; } - } - - @Override - public GuidePostsInfo getGuidePosts(ImmutableBytesPtr fam) { - Pair pair = guidePostsInfoWriterMap.get(fam); - if (pair != null) { - return pair.getSecond().build(); + } + int kvLength = KeyValueUtil.getSerializedSize(cell, true); + long byteCount = gps.getFirst() + kvLength; + gps.setFirst(byteCount); + if (byteCount >= guidePostDepth) { + if ( + gps.getSecond().addGuidePostOnCollection(row, byteCount, gps.getSecond().getRowCount()) + ) { + gps.setFirst(0l); + gps.getSecond().resetRowCount(); } - return null; + } } + } - @Override - public long getGuidePostDepth() { - return guidePostDepth; + @Override + public GuidePostsInfo getGuidePosts(ImmutableBytesPtr fam) { + Pair pair = guidePostsInfoWriterMap.get(fam); + if (pair != null) { + return pair.getSecond().build(); } + return null; + } - @Override - public StatisticsWriter getStatisticsWriter() { - return statsWriter; - } + @Override + public long getGuidePostDepth() { + return guidePostDepth; + } - @Override - public InternalScanner createCompactionScanner(RegionCoprocessorEnvironment env, - Store store, InternalScanner delegate) { + @Override + public StatisticsWriter getStatisticsWriter() { + return statsWriter; + } - ImmutableBytesPtr cfKey = - new ImmutableBytesPtr(store.getColumnFamilyDescriptor().getName()); - LOGGER.info("StatisticsScanner created for table: " - + tableName + " CF: " + store.getColumnFamilyName()); - return new StatisticsScanner(this, statsWriter, env, delegate, cfKey); - } + @Override + public InternalScanner createCompactionScanner(RegionCoprocessorEnvironment env, Store store, + InternalScanner delegate) { + + ImmutableBytesPtr cfKey = new ImmutableBytesPtr(store.getColumnFamilyDescriptor().getName()); + LOGGER.info( + "StatisticsScanner created for table: " + tableName + " CF: " + store.getColumnFamilyName()); + return new StatisticsScanner(this, statsWriter, env, delegate, cfKey); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/NoOpStatisticsCollector.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/NoOpStatisticsCollector.java index 4a8d25f7a58..b52c0ee4d71 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/NoOpStatisticsCollector.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/NoOpStatisticsCollector.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,53 +29,54 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; /** - * A drop-in statistics collector that does nothing. An instance of this class is used for tables - * or environments where statistics collection is disabled. + * A drop-in statistics collector that does nothing. An instance of this class is used for tables or + * environments where statistics collection is disabled. */ public class NoOpStatisticsCollector implements StatisticsCollector { - @Override - public long getMaxTimeStamp() { - return NO_TIMESTAMP; - } + @Override + public long getMaxTimeStamp() { + return NO_TIMESTAMP; + } - @Override - public void close() throws IOException { - // No-op - } + @Override + public void close() throws IOException { + // No-op + } - @Override - public void updateStatistics(Region region, Scan scan) { - // No-op - } + @Override + public void updateStatistics(Region region, Scan scan) { + // No-op + } - @Override - public void collectStatistics(List results) { - // No-op - } + @Override + public void collectStatistics(List results) { + // No-op + } - @Override - public void init() { - // No-op - } + @Override + public void init() { + // No-op + } - @Override public GuidePostsInfo getGuidePosts(ImmutableBytesPtr fam) { - return null; - } + @Override + public GuidePostsInfo getGuidePosts(ImmutableBytesPtr fam) { + return null; + } - @Override - public long getGuidePostDepth() { - return -1; - } + @Override + public long getGuidePostDepth() { + return -1; + } - @Override - public StatisticsWriter getStatisticsWriter() { - return null; - } + @Override + public StatisticsWriter getStatisticsWriter() { + return null; + } - @Override - public InternalScanner createCompactionScanner(RegionCoprocessorEnvironment env, - Store store, InternalScanner delegate) { - return delegate; - } + @Override + public InternalScanner createCompactionScanner(RegionCoprocessorEnvironment env, Store store, + InternalScanner delegate) { + return delegate; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java index bed0ee993be..f97a62f48c1 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,50 +34,49 @@ */ public interface StatisticsCollector extends Closeable { - /** Constant used if no max timestamp is available */ - long NO_TIMESTAMP = -1; + /** Constant used if no max timestamp is available */ + long NO_TIMESTAMP = -1; - /** - * Returns the maximum timestamp of all cells encountered while collecting statistics. - */ - long getMaxTimeStamp(); + /** + * Returns the maximum timestamp of all cells encountered while collecting statistics. + */ + long getMaxTimeStamp(); - /** - * Write the collected statistics for the given region over the scan provided. - */ - void updateStatistics(Region region, Scan scan); + /** + * Write the collected statistics for the given region over the scan provided. + */ + void updateStatistics(Region region, Scan scan); - /** - * Collect statistics for the given list of cells. This method can be called multiple times - * during collection of statistics. - */ - void collectStatistics(List results); + /** + * Collect statistics for the given list of cells. This method can be called multiple times during + * collection of statistics. + */ + void collectStatistics(List results); - /** - * Called before beginning the collection of statistics through {@link #collectStatistics(List)} - * @throws IOException - */ - void init() throws IOException; + /** + * Called before beginning the collection of statistics through {@link #collectStatistics(List)} + */ + void init() throws IOException; - /** - * Retrieve the calculated guide post info for the given column family. - */ - GuidePostsInfo getGuidePosts(ImmutableBytesPtr fam); + /** + * Retrieve the calculated guide post info for the given column family. + */ + GuidePostsInfo getGuidePosts(ImmutableBytesPtr fam); - /** - * Retrieve the guide post depth during stats collection - */ - long getGuidePostDepth(); + /** + * Retrieve the guide post depth during stats collection + */ + long getGuidePostDepth(); - /** - * Retrieve the object that manages statistics persistence - */ - StatisticsWriter getStatisticsWriter(); + /** + * Retrieve the object that manages statistics persistence + */ + StatisticsWriter getStatisticsWriter(); - /** - * Wrap a compaction scanner with a scanner that will collect statistics using this instance. - */ - InternalScanner createCompactionScanner(RegionCoprocessorEnvironment env, Store store, - InternalScanner delegate); + /** + * Wrap a compaction scanner with a scanner that will collect statistics using this instance. + */ + InternalScanner createCompactionScanner(RegionCoprocessorEnvironment env, Store store, + InternalScanner delegate); } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java index 210ea79544f..109718327de 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectorFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,47 +30,48 @@ import org.apache.phoenix.util.ServerUtil.ConnectionType; /** - * Provides new {@link StatisticsCollector} instances based on configuration settings for a - * table (or system-wide configuration of statistics). + * Provides new {@link StatisticsCollector} instances based on configuration settings for a table + * (or system-wide configuration of statistics). */ public class StatisticsCollectorFactory { - public static StatisticsCollector createStatisticsCollector(RegionCoprocessorEnvironment env, - String tableName, long clientTimeStamp, byte[] guidepostWidthBytes, - byte[] guidepostsPerRegionBytes) throws IOException { - return createStatisticsCollector(env, tableName, clientTimeStamp, null, guidepostWidthBytes, guidepostsPerRegionBytes); - } + public static StatisticsCollector createStatisticsCollector(RegionCoprocessorEnvironment env, + String tableName, long clientTimeStamp, byte[] guidepostWidthBytes, + byte[] guidepostsPerRegionBytes) throws IOException { + return createStatisticsCollector(env, tableName, clientTimeStamp, null, guidepostWidthBytes, + guidepostsPerRegionBytes); + } - public static StatisticsCollector createStatisticsCollector( - RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp, - byte[] storeName) throws IOException { - return createStatisticsCollector(env, tableName, clientTimeStamp, storeName, null, null); - } + public static StatisticsCollector createStatisticsCollector(RegionCoprocessorEnvironment env, + String tableName, long clientTimeStamp, byte[] storeName) throws IOException { + return createStatisticsCollector(env, tableName, clientTimeStamp, storeName, null, null); + } - public static StatisticsCollector createStatisticsCollector( - RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp, - byte[] storeName, byte[] guidepostWidthBytes, - byte[] guidepostsPerRegionBytes) throws IOException { - if (statisticsEnabled(env)) { - StatisticsWriter statsWriter = StatisticsWriter.newWriter(env, tableName, clientTimeStamp); - Table table = ConnectionFactory.getConnection(ConnectionType.DEFAULT_SERVER_CONNECTION, env).getTable( - SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration())); - return new DefaultStatisticsCollector(env.getConfiguration(), env.getRegion(), tableName, - storeName,guidepostWidthBytes, guidepostsPerRegionBytes, statsWriter, table); - } else { - return new NoOpStatisticsCollector(); - } - } - - /** - * Determines if statistics are enabled (which is the default). This is done on the - * RegionCoprocessorEnvironment for now to allow setting this on a per-table basis, although - * it could be moved to the general table metadata in the future if there is a realistic - * use case for that. - */ - private static boolean statisticsEnabled(RegionCoprocessorEnvironment env) { - return (env.getConfiguration().getBoolean(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED)) - && StatisticsUtil.isStatsEnabled(env.getRegionInfo().getTable()); + public static StatisticsCollector createStatisticsCollector(RegionCoprocessorEnvironment env, + String tableName, long clientTimeStamp, byte[] storeName, byte[] guidepostWidthBytes, + byte[] guidepostsPerRegionBytes) throws IOException { + if (statisticsEnabled(env)) { + StatisticsWriter statsWriter = StatisticsWriter.newWriter(env, tableName, clientTimeStamp); + Table table = ConnectionFactory.getConnection(ConnectionType.DEFAULT_SERVER_CONNECTION, env) + .getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, + env.getConfiguration())); + return new DefaultStatisticsCollector(env.getConfiguration(), env.getRegion(), tableName, + storeName, guidepostWidthBytes, guidepostsPerRegionBytes, statsWriter, table); + } else { + return new NoOpStatisticsCollector(); } + } + + /** + * Determines if statistics are enabled (which is the default). This is done on the + * RegionCoprocessorEnvironment for now to allow setting this on a per-table basis, although it + * could be moved to the general table metadata in the future if there is a realistic use case for + * that. + */ + private static boolean statisticsEnabled(RegionCoprocessorEnvironment env) { + return (env.getConfiguration().getBoolean(STATS_COLLECTION_ENABLED, + DEFAULT_STATS_COLLECTION_ENABLED)) + && StatisticsUtil.isStatsEnabled(env.getRegionInfo().getTable()); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java index bdc66baec5a..e0ebc20191f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,168 +40,163 @@ import org.slf4j.LoggerFactory; /** - * The scanner that does the scanning to collect the stats during major compaction.{@link DefaultStatisticsCollector} + * The scanner that does the scanning to collect the stats during major + * compaction.{@link DefaultStatisticsCollector} */ public class StatisticsScanner implements InternalScanner { - private static final Logger LOGGER = LoggerFactory.getLogger(StatisticsScanner.class); - private InternalScanner delegate; - private StatisticsWriter statsWriter; - private Region region; - private StatisticsCollector tracker; - private ImmutableBytesPtr family; - private final Configuration config; - private final RegionCoprocessorEnvironment env; - - public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats, RegionCoprocessorEnvironment env, - InternalScanner delegate, ImmutableBytesPtr family) { - this.tracker = tracker; - this.statsWriter = stats; - this.delegate = delegate; - this.region = env.getRegion(); - this.env = env; - this.family = family; - this.config = env.getConfiguration(); - StatisticsCollectionRunTracker.getInstance(config).addCompactingRegion(region.getRegionInfo()); + private static final Logger LOGGER = LoggerFactory.getLogger(StatisticsScanner.class); + private InternalScanner delegate; + private StatisticsWriter statsWriter; + private Region region; + private StatisticsCollector tracker; + private ImmutableBytesPtr family; + private final Configuration config; + private final RegionCoprocessorEnvironment env; + + public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats, + RegionCoprocessorEnvironment env, InternalScanner delegate, ImmutableBytesPtr family) { + this.tracker = tracker; + this.statsWriter = stats; + this.delegate = delegate; + this.region = env.getRegion(); + this.env = env; + this.family = family; + this.config = env.getConfiguration(); + StatisticsCollectionRunTracker.getInstance(config).addCompactingRegion(region.getRegionInfo()); + } + + @Override + public boolean next(List result) throws IOException { + boolean ret = delegate.next(result); + updateStats(result); + return ret; + } + + @Override + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); + } + + /** + * Update the current statistics based on the lastest batch of key-values from the underlying + * scanner next batch of {@link KeyValue}s + */ + private void updateStats(final List results) throws IOException { + if (!results.isEmpty()) { + tracker.collectStatistics(results); } - - @Override - public boolean next(List result) throws IOException { - boolean ret = delegate.next(result); - updateStats(result); - return ret; + } + + @Override + public void close() throws IOException { + boolean async = getConfig().getBoolean(COMMIT_STATS_ASYNC, DEFAULT_COMMIT_STATS_ASYNC); + StatisticsCollectionRunTracker collectionTracker = getStatsCollectionRunTracker(config); + StatisticsScannerCallable callable = createCallable(); + if (isConnectionClosed()) { + LOGGER.debug("Not updating table statistics because the server is stopping/stopped"); + return; } - - @Override - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result); + if (!async) { + callable.call(); + } else { + collectionTracker.runTask(callable); } + } - /** - * Update the current statistics based on the lastest batch of key-values from the underlying scanner - * - * @param results - * next batch of {@link KeyValue}s - * @throws IOException - */ - private void updateStats(final List results) throws IOException { - if (!results.isEmpty()) { - tracker.collectStatistics(results); - } - } + // VisibleForTesting + StatisticsCollectionRunTracker getStatsCollectionRunTracker(Configuration c) { + return StatisticsCollectionRunTracker.getInstance(c); + } - @Override - public void close() throws IOException { - boolean async = getConfig().getBoolean(COMMIT_STATS_ASYNC, DEFAULT_COMMIT_STATS_ASYNC); - StatisticsCollectionRunTracker collectionTracker = getStatsCollectionRunTracker(config); - StatisticsScannerCallable callable = createCallable(); - if (isConnectionClosed()) { - LOGGER.debug("Not updating table statistics because the server is stopping/stopped"); - return; - } - if (!async) { - callable.call(); - } else { - collectionTracker.runTask(callable); - } - } + Configuration getConfig() { + return config; + } - // VisibleForTesting - StatisticsCollectionRunTracker getStatsCollectionRunTracker(Configuration c) { - return StatisticsCollectionRunTracker.getInstance(c); - } + StatisticsWriter getStatisticsWriter() { + return statsWriter; + } - Configuration getConfig() { - return config; - } + Region getRegion() { + return region; + } - StatisticsWriter getStatisticsWriter() { - return statsWriter; - } + Connection getConnection() { + return env.getConnection(); + } - Region getRegion() { - return region; - } - - Connection getConnection() { - return env.getConnection(); - } + StatisticsScannerCallable createCallable() { + return new StatisticsScannerCallable(); + } - StatisticsScannerCallable createCallable() { - return new StatisticsScannerCallable(); - } + StatisticsCollector getTracker() { + return tracker; + } - StatisticsCollector getTracker() { - return tracker; - } + InternalScanner getDelegate() { + return delegate; + } - InternalScanner getDelegate() { - return delegate; - } - - class StatisticsScannerCallable implements Callable { - @Override - public Void call() throws IOException { - IOException toThrow = null; - StatisticsCollectionRunTracker collectionTracker = getStatsCollectionRunTracker(config); - final RegionInfo regionInfo = getRegion().getRegionInfo(); - try { - // update the statistics table - // Just verify if this if fine - ArrayList mutations = new ArrayList(); - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Deleting the stats for the region " - + regionInfo.getRegionNameAsString() - + " as part of major compaction"); - } - getStatisticsWriter().deleteStatsForRegion(region, tracker, family, mutations); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Adding new stats for the region " + - regionInfo.getRegionNameAsString() - + " as part of major compaction"); - } - getStatisticsWriter().addStats(tracker, family, - mutations, tracker.getGuidePostDepth()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Committing new stats for the region " + - regionInfo.getRegionNameAsString() - + " as part of major compaction"); - } - getStatisticsWriter().commitStats(mutations, tracker); - } catch (IOException e) { - if (isConnectionClosed()) { - LOGGER.debug( - "Ignoring error updating statistics because region is closing/closed"); - } else { - LOGGER.error("Failed to update statistics table!", e); - toThrow = e; - } - } finally { - try { - collectionTracker.removeCompactingRegion(regionInfo); - getStatisticsWriter().close();// close the writer - getTracker().close();// close the tracker - } catch (IOException e) { - if (toThrow == null) toThrow = e; - LOGGER.error("Error while closing the stats table", e); - } finally { - // close the delegate scanner - try { - getDelegate().close(); - } catch (IOException e) { - if (toThrow == null) toThrow = e; - LOGGER.error("Error while closing the scanner", e); - } finally { - if (toThrow != null) { throw toThrow; } - } - } + class StatisticsScannerCallable implements Callable { + @Override + public Void call() throws IOException { + IOException toThrow = null; + StatisticsCollectionRunTracker collectionTracker = getStatsCollectionRunTracker(config); + final RegionInfo regionInfo = getRegion().getRegionInfo(); + try { + // update the statistics table + // Just verify if this if fine + ArrayList mutations = new ArrayList(); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString() + + " as part of major compaction"); + } + getStatisticsWriter().deleteStatsForRegion(region, tracker, family, mutations); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString() + + " as part of major compaction"); + } + getStatisticsWriter().addStats(tracker, family, mutations, tracker.getGuidePostDepth()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString() + + " as part of major compaction"); + } + getStatisticsWriter().commitStats(mutations, tracker); + } catch (IOException e) { + if (isConnectionClosed()) { + LOGGER.debug("Ignoring error updating statistics because region is closing/closed"); + } else { + LOGGER.error("Failed to update statistics table!", e); + toThrow = e; + } + } finally { + try { + collectionTracker.removeCompactingRegion(regionInfo); + getStatisticsWriter().close();// close the writer + getTracker().close();// close the tracker + } catch (IOException e) { + if (toThrow == null) toThrow = e; + LOGGER.error("Error while closing the stats table", e); + } finally { + // close the delegate scanner + try { + getDelegate().close(); + } catch (IOException e) { + if (toThrow == null) toThrow = e; + LOGGER.error("Error while closing the scanner", e); + } finally { + if (toThrow != null) { + throw toThrow; } - return null; + } } + } + return null; } + } - private boolean isConnectionClosed() { - return getConnection() == null || getConnection().isClosed() || getConnection().isAborted(); - } + private boolean isConnectionClosed() { + return getConnection() == null || getConnection().isClosed() || getConnection().isAborted(); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java index 7a91e11beab..62169e2c3ea 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -67,246 +67,248 @@ import org.apache.phoenix.util.PrefixByteDecoder; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.ServerUtil; - -import com.google.protobuf.ServiceException; import org.apache.phoenix.util.ServerUtil.ConnectionFactory; import org.apache.phoenix.util.ServerUtil.ConnectionType; +import com.google.protobuf.ServiceException; + /** * Wrapper to access the statistics table SYSTEM.STATS using the HTable. */ public class StatisticsWriter implements Closeable { - public static StatisticsWriter newWriter(PhoenixConnection conn, String tableName, long clientTimeStamp) - throws SQLException { - Configuration configuration = conn.getQueryServices().getConfiguration(); - long newClientTimeStamp = determineClientTimeStamp(configuration, clientTimeStamp); - TableName physicalTableName = SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, configuration); - Table statsWriterTable = conn.getQueryServices().getTable(physicalTableName.getName()); - Table statsReaderTable = conn.getQueryServices().getTable(physicalTableName.getName()); - StatisticsWriter statsTable = new StatisticsWriter(statsReaderTable, statsWriterTable, tableName, - newClientTimeStamp); - return statsTable; - } + public static StatisticsWriter newWriter(PhoenixConnection conn, String tableName, + long clientTimeStamp) throws SQLException { + Configuration configuration = conn.getQueryServices().getConfiguration(); + long newClientTimeStamp = determineClientTimeStamp(configuration, clientTimeStamp); + TableName physicalTableName = SchemaUtil + .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, configuration); + Table statsWriterTable = conn.getQueryServices().getTable(physicalTableName.getName()); + Table statsReaderTable = conn.getQueryServices().getTable(physicalTableName.getName()); + StatisticsWriter statsTable = + new StatisticsWriter(statsReaderTable, statsWriterTable, tableName, newClientTimeStamp); + return statsTable; + } - /** - * @param tableName - * TODO - * @param clientTimeStamp - * TODO - * @return the {@link StatisticsWriter} for the given primary table. - * @throws IOException - * if the table cannot be created due to an underlying HTable creation error - */ - public static StatisticsWriter newWriter(RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp) - throws IOException { - Configuration configuration = env.getConfiguration(); - long newClientTimeStamp = determineClientTimeStamp(configuration, clientTimeStamp); - Table statsWriterTable = ConnectionFactory.getConnection(ConnectionType.DEFAULT_SERVER_CONNECTION, env).getTable( - SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, env.getConfiguration())); - Table statsReaderTable = ServerUtil.getHTableForCoprocessorScan(env, statsWriterTable); - StatisticsWriter statsTable = new StatisticsWriter(statsReaderTable, statsWriterTable, tableName, - newClientTimeStamp); - return statsTable; - } + /** + * TODO TODO + * @return the {@link StatisticsWriter} for the given primary table. if the table cannot be + * created due to an underlying HTable creation error + */ + public static StatisticsWriter newWriter(RegionCoprocessorEnvironment env, String tableName, + long clientTimeStamp) throws IOException { + Configuration configuration = env.getConfiguration(); + long newClientTimeStamp = determineClientTimeStamp(configuration, clientTimeStamp); + Table statsWriterTable = + ConnectionFactory.getConnection(ConnectionType.DEFAULT_SERVER_CONNECTION, env) + .getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, + env.getConfiguration())); + Table statsReaderTable = ServerUtil.getHTableForCoprocessorScan(env, statsWriterTable); + StatisticsWriter statsTable = + new StatisticsWriter(statsReaderTable, statsWriterTable, tableName, newClientTimeStamp); + return statsTable; + } - // Provides a means of clients controlling their timestamps to not use current time - // when background tasks are updating stats. Instead we track the max timestamp of - // the cells and use that. - private static long determineClientTimeStamp(Configuration configuration, long clientTimeStamp) { - boolean useCurrentTime = configuration.getBoolean( - QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, - QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME); - if (!useCurrentTime) { - clientTimeStamp = DefaultStatisticsCollector.NO_TIMESTAMP; - } - if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) { - clientTimeStamp = EnvironmentEdgeManager.currentTimeMillis(); - } - return clientTimeStamp; + // Provides a means of clients controlling their timestamps to not use current time + // when background tasks are updating stats. Instead we track the max timestamp of + // the cells and use that. + private static long determineClientTimeStamp(Configuration configuration, long clientTimeStamp) { + boolean useCurrentTime = configuration.getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, + QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME); + if (!useCurrentTime) { + clientTimeStamp = DefaultStatisticsCollector.NO_TIMESTAMP; } - - private final Table statsWriterTable; - // In HBase 0.98.4 or above, the reader and writer will be the same. - // In pre HBase 0.98.4, there was a bug in using the HTable returned - // from a coprocessor for scans, so in that case it'll be different. - private final Table statsReaderTable; - private final byte[] tableName; - private final long clientTimeStamp; - - private StatisticsWriter(Table statsReaderTable, - Table statsWriterTable, String tableName, long clientTimeStamp) { - this.statsReaderTable = statsReaderTable; - this.statsWriterTable = statsWriterTable; - this.tableName = Bytes.toBytes(tableName); - this.clientTimeStamp = clientTimeStamp; + if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) { + clientTimeStamp = EnvironmentEdgeManager.currentTimeMillis(); } + return clientTimeStamp; + } - /** - * Close the connection to the table - */ - @Override - public void close() throws IOException { - statsWriterTable.close(); - statsReaderTable.close(); - } + private final Table statsWriterTable; + // In HBase 0.98.4 or above, the reader and writer will be the same. + // In pre HBase 0.98.4, there was a bug in using the HTable returned + // from a coprocessor for scans, so in that case it'll be different. + private final Table statsReaderTable; + private final byte[] tableName; + private final long clientTimeStamp; - /** - * Update a list of statistics for a given region. If the UPDATE STATISTICS {@code } query is issued then we use - * Upsert queries to update the table If the region gets splitted or the major compaction happens we update using - * HTable.put() - * - * @param tracker - * - the statistics tracker - * @param cfKey - * - the family for which the stats is getting collected. - * @param mutations - * - list of mutations that collects all the mutations to commit in a batch - * @throws IOException - * if we fail to do any of the puts. Any single failure will prevent any future attempts for the - * remaining list of stats to update - */ - @SuppressWarnings("deprecation") - public void addStats(StatisticsCollector tracker, ImmutableBytesPtr cfKey, - List mutations, long guidePostDepth) throws IOException { - if (tracker == null) { return; } - boolean useMaxTimeStamp = clientTimeStamp == DefaultStatisticsCollector.NO_TIMESTAMP; - long timeStamp = clientTimeStamp; - if (useMaxTimeStamp) { // When using max timestamp, we write the update time later because we only know the ts - // now - timeStamp = tracker.getMaxTimeStamp(); - mutations.add(getLastStatsUpdatedTimePut(timeStamp)); - } - GuidePostsInfo gps = tracker.getGuidePosts(cfKey); - if (gps != null) { - long[] byteCounts = gps.getByteCounts(); - long[] rowCounts = gps.getRowCounts(); - ImmutableBytesWritable keys = gps.getGuidePosts(); - boolean hasGuidePosts = keys.getLength() > 0; - if (hasGuidePosts) { - int guidePostCount = 0; - try (ByteArrayInputStream stream = new ByteArrayInputStream(keys.get(), keys.getOffset(), keys.getLength())) { - DataInput input = new DataInputStream(stream); - PrefixByteDecoder decoder = new PrefixByteDecoder(gps.getMaxLength()); - do { - ImmutableBytesWritable ptr = decoder.decode(input); - addGuidepost(cfKey, mutations, ptr, byteCounts[guidePostCount], rowCounts[guidePostCount], timeStamp); - guidePostCount++; - } while (decoder != null); - } catch (EOFException e) { // Ignore as this signifies we're done + private StatisticsWriter(Table statsReaderTable, Table statsWriterTable, String tableName, + long clientTimeStamp) { + this.statsReaderTable = statsReaderTable; + this.statsWriterTable = statsWriterTable; + this.tableName = Bytes.toBytes(tableName); + this.clientTimeStamp = clientTimeStamp; + } - } - // If we've written guideposts with a guidepost key, then delete the - // empty guidepost indicator that may have been written by other - // regions. - byte[] rowKey = StatisticsUtil.getRowKey(tableName, cfKey, ByteUtil.EMPTY_IMMUTABLE_BYTE_ARRAY); - Delete delete = new Delete(rowKey, timeStamp); - mutations.add(delete); - } else { - /* - * When there is not enough data in the region, we create a guide post with empty - * key with the estimated amount of data in it as the guide post width. We can't - * determine the expected number of rows here since we don't have the PTable and the - * associated schema available to make the row size estimate. We instead will - * compute it on the client side when reading out guideposts from the SYSTEM.STATS - * table in StatisticsUtil#readStatistics(HTableInterface statsHTable, - * GuidePostsKey key, long clientTimeStamp). - */ - addGuidepost(cfKey, mutations, ByteUtil.EMPTY_IMMUTABLE_BYTE_ARRAY, guidePostDepth, - 0, timeStamp); - } - } + /** + * Close the connection to the table + */ + @Override + public void close() throws IOException { + statsWriterTable.close(); + statsReaderTable.close(); + } + + /** + * Update a list of statistics for a given region. If the UPDATE STATISTICS {@code } + * query is issued then we use Upsert queries to update the table If the region gets splitted or + * the major compaction happens we update using HTable.put() - the statistics tracker - the family + * for which the stats is getting collected. - list of mutations that collects all the mutations + * to commit in a batch if we fail to do any of the puts. Any single failure will prevent any + * future attempts for the remaining list of stats to update + */ + @SuppressWarnings("deprecation") + public void addStats(StatisticsCollector tracker, ImmutableBytesPtr cfKey, + List mutations, long guidePostDepth) throws IOException { + if (tracker == null) { + return; } - - @SuppressWarnings("deprecation") - private void addGuidepost(ImmutableBytesPtr cfKey, List mutations, ImmutableBytesWritable ptr, long byteCount, long rowCount, long timeStamp) { - byte[] prefix = StatisticsUtil.getRowKey(tableName, cfKey, ptr); - Put put = new Put(prefix); - put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES, - timeStamp, PLong.INSTANCE.toBytes(byteCount)); - put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES, timeStamp, - PLong.INSTANCE.toBytes(rowCount)); - // Add our empty column value so queries behave correctly - put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp, - ByteUtil.EMPTY_BYTE_ARRAY); - mutations.add(put); + boolean useMaxTimeStamp = clientTimeStamp == DefaultStatisticsCollector.NO_TIMESTAMP; + long timeStamp = clientTimeStamp; + if (useMaxTimeStamp) { // When using max timestamp, we write the update time later because we + // only know the ts + // now + timeStamp = tracker.getMaxTimeStamp(); + mutations.add(getLastStatsUpdatedTimePut(timeStamp)); } + GuidePostsInfo gps = tracker.getGuidePosts(cfKey); + if (gps != null) { + long[] byteCounts = gps.getByteCounts(); + long[] rowCounts = gps.getRowCounts(); + ImmutableBytesWritable keys = gps.getGuidePosts(); + boolean hasGuidePosts = keys.getLength() > 0; + if (hasGuidePosts) { + int guidePostCount = 0; + try (ByteArrayInputStream stream = + new ByteArrayInputStream(keys.get(), keys.getOffset(), keys.getLength())) { + DataInput input = new DataInputStream(stream); + PrefixByteDecoder decoder = new PrefixByteDecoder(gps.getMaxLength()); + do { + ImmutableBytesWritable ptr = decoder.decode(input); + addGuidepost(cfKey, mutations, ptr, byteCounts[guidePostCount], + rowCounts[guidePostCount], timeStamp); + guidePostCount++; + } while (decoder != null); + } catch (EOFException e) { // Ignore as this signifies we're done - private static MutationType getMutationType(Mutation m) throws IOException { - if (m instanceof Put) { - return MutationType.PUT; - } else if (m instanceof Delete) { - return MutationType.DELETE; - } else { - throw new DoNotRetryIOException("Unsupported mutation type in stats commit" + m.getClass().getName()); } + // If we've written guideposts with a guidepost key, then delete the + // empty guidepost indicator that may have been written by other + // regions. + byte[] rowKey = + StatisticsUtil.getRowKey(tableName, cfKey, ByteUtil.EMPTY_IMMUTABLE_BYTE_ARRAY); + Delete delete = new Delete(rowKey, timeStamp); + mutations.add(delete); + } else { + /* + * When there is not enough data in the region, we create a guide post with empty key with + * the estimated amount of data in it as the guide post width. We can't determine the + * expected number of rows here since we don't have the PTable and the associated schema + * available to make the row size estimate. We instead will compute it on the client side + * when reading out guideposts from the SYSTEM.STATS table in + * StatisticsUtil#readStatistics(HTableInterface statsHTable, GuidePostsKey key, long + * clientTimeStamp). + */ + addGuidepost(cfKey, mutations, ByteUtil.EMPTY_IMMUTABLE_BYTE_ARRAY, guidePostDepth, 0, + timeStamp); + } } + } - public void commitStats(final List mutations, final StatisticsCollector statsCollector) - throws IOException { - User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - commitLastStatsUpdatedTime(statsCollector); - if (mutations.size() > 0) { - byte[] row = mutations.get(0).getRow(); - MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); - for (Mutation m : mutations) { - mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m)); - } - MutateRowsRequest mrm = mrmBuilder.build(); - CoprocessorRpcChannel channel = statsWriterTable.coprocessorService(row); - MultiRowMutationService.BlockingInterface service = MultiRowMutationService - .newBlockingStub(channel); - try { - service.mutateRows(null, mrm); - } catch (ServiceException ex) { - ProtobufUtil.toIOException(ex); - } - } - return null; - } - }); - } - - private Put getLastStatsUpdatedTimePut(long timeStamp) { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - byte[] prefix = tableName; - Put put = new Put(prefix); - put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES, - timeStamp, PDate.INSTANCE.toBytes(new Date(currentTime))); - return put; - } + @SuppressWarnings("deprecation") + private void addGuidepost(ImmutableBytesPtr cfKey, List mutations, + ImmutableBytesWritable ptr, long byteCount, long rowCount, long timeStamp) { + byte[] prefix = StatisticsUtil.getRowKey(tableName, cfKey, ptr); + Put put = new Put(prefix); + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES, timeStamp, + PLong.INSTANCE.toBytes(byteCount)); + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES, timeStamp, + PLong.INSTANCE.toBytes(rowCount)); + // Add our empty column value so queries behave correctly + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, + timeStamp, ByteUtil.EMPTY_BYTE_ARRAY); + mutations.add(put); + } - private void commitLastStatsUpdatedTime(StatisticsCollector statsCollector) throws IOException { - long timeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP ? statsCollector.getMaxTimeStamp() : clientTimeStamp; - Put put = getLastStatsUpdatedTimePut(timeStamp); - statsWriterTable.put(put); + private static MutationType getMutationType(Mutation m) throws IOException { + if (m instanceof Put) { + return MutationType.PUT; + } else if (m instanceof Delete) { + return MutationType.DELETE; + } else { + throw new DoNotRetryIOException( + "Unsupported mutation type in stats commit" + m.getClass().getName()); } + } - public void deleteStatsForRegion(Region region, StatisticsCollector tracker, ImmutableBytesPtr fam, - List mutations) throws IOException { - long timeStamp = - clientTimeStamp == DefaultStatisticsCollector.NO_TIMESTAMP - ? tracker.getMaxTimeStamp() : clientTimeStamp; - byte[] startKey = region.getRegionInfo().getStartKey(); - byte[] stopKey = region.getRegionInfo().getEndKey(); - List statsForRegion = new ArrayList(); - Scan s = - MetaDataUtil.newTableRowsScan(getAdjustedKey(startKey, tableName, fam, false), - getAdjustedKey(stopKey, tableName, fam, true), - MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp); - s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); - try (ResultScanner scanner = statsWriterTable.getScanner(s)) { - Result result = null; - while ((result = scanner.next()) != null) { - statsForRegion.add(result); - } - } - for (Result result : statsForRegion) { - mutations.add(new Delete(result.getRow(), timeStamp - 1)); + public void commitStats(final List mutations, final StatisticsCollector statsCollector) + throws IOException { + User.runAsLoginUser(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + commitLastStatsUpdatedTime(statsCollector); + if (mutations.size() > 0) { + byte[] row = mutations.get(0).getRow(); + MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); + for (Mutation m : mutations) { + mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m)); + } + MutateRowsRequest mrm = mrmBuilder.build(); + CoprocessorRpcChannel channel = statsWriterTable.coprocessorService(row); + MultiRowMutationService.BlockingInterface service = + MultiRowMutationService.newBlockingStub(channel); + try { + service.mutateRows(null, mrm); + } catch (ServiceException ex) { + ProtobufUtil.toIOException(ex); + } } + return null; + } + }); + } + + private Put getLastStatsUpdatedTimePut(long timeStamp) { + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + byte[] prefix = tableName; + Put put = new Put(prefix); + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES, timeStamp, + PDate.INSTANCE.toBytes(new Date(currentTime))); + return put; + } + + private void commitLastStatsUpdatedTime(StatisticsCollector statsCollector) throws IOException { + long timeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP + ? statsCollector.getMaxTimeStamp() + : clientTimeStamp; + Put put = getLastStatsUpdatedTimePut(timeStamp); + statsWriterTable.put(put); + } + + public void deleteStatsForRegion(Region region, StatisticsCollector tracker, + ImmutableBytesPtr fam, List mutations) throws IOException { + long timeStamp = clientTimeStamp == DefaultStatisticsCollector.NO_TIMESTAMP + ? tracker.getMaxTimeStamp() + : clientTimeStamp; + byte[] startKey = region.getRegionInfo().getStartKey(); + byte[] stopKey = region.getRegionInfo().getEndKey(); + List statsForRegion = new ArrayList(); + Scan s = MetaDataUtil.newTableRowsScan(getAdjustedKey(startKey, tableName, fam, false), + getAdjustedKey(stopKey, tableName, fam, true), MetaDataProtocol.MIN_TABLE_TIMESTAMP, + clientTimeStamp); + s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); + try (ResultScanner scanner = statsWriterTable.getScanner(s)) { + Result result = null; + while ((result = scanner.next()) != null) { + statsForRegion.add(result); + } + } + for (Result result : statsForRegion) { + mutations.add(new Delete(result.getRow(), timeStamp - 1)); } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java index 3d9837215bf..3c842714665 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,14 +17,14 @@ */ package org.apache.phoenix.schema.stats; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; +import static org.apache.phoenix.query.QueryServices.IS_NAMESPACE_MAPPING_ENABLED; +import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_IS_NAMESPACE_MAPPING_ENABLED; + +import java.nio.charset.StandardCharsets; +import java.sql.Connection; + import org.antlr.runtime.CharStream; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -47,289 +47,286 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MRJobType; import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.phoenix.util.SchemaUtil; - import org.joda.time.Chronology; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; - -import java.nio.charset.StandardCharsets; -import java.sql.Connection; - -import static org.apache.phoenix.query.QueryServices.IS_NAMESPACE_MAPPING_ENABLED; -import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_IS_NAMESPACE_MAPPING_ENABLED; - /** * Tool to collect table level statistics on HBase snapshot */ public class UpdateStatisticsTool extends Configured implements Tool { - private static final Logger LOGGER = LoggerFactory.getLogger(UpdateStatisticsTool.class); - - private static final Option TABLE_NAME_OPTION = new Option("t", "table", true, - "Phoenix Table Name"); - private static final Option SNAPSHOT_NAME_OPTION = new Option("s", "snapshot", true, - "HBase Snapshot Name"); - private static final Option RESTORE_DIR_OPTION = new Option("d", "restore-dir", true, - "Restore Directory for HBase snapshot"); - private static final Option JOB_PRIORITY_OPTION = new Option("p", "job-priority", true, - "Define job priority from 0(highest) to 4"); - private static final Option RUN_FOREGROUND_OPTION = - new Option("runfg", "run-foreground", false, - "If specified, runs UpdateStatisticsTool in Foreground. Default - Runs the build in background"); - private static final Option MANAGE_SNAPSHOT_OPTION = - new Option("ms", "manage-snapshot", false, - "Creates a new snapshot, runs the tool and deletes it"); - - private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); - - private String tableName; - private String snapshotName; - private Path restoreDir; - private JobPriority jobPriority; - private boolean manageSnapshot; - private boolean isForeground; - - private Job job; - - @Override - public int run(String[] args) throws Exception { - try { - parseArgs(args); - preJobTask(); - configureJob(); - TableMapReduceUtil.initCredentials(job); - int ret = runJob(); - postJobTask(); - return ret; - } catch (Exception e) { - e.printStackTrace(); - return -1; - } + private static final Logger LOGGER = LoggerFactory.getLogger(UpdateStatisticsTool.class); + + private static final Option TABLE_NAME_OPTION = + new Option("t", "table", true, "Phoenix Table Name"); + private static final Option SNAPSHOT_NAME_OPTION = + new Option("s", "snapshot", true, "HBase Snapshot Name"); + private static final Option RESTORE_DIR_OPTION = + new Option("d", "restore-dir", true, "Restore Directory for HBase snapshot"); + private static final Option JOB_PRIORITY_OPTION = + new Option("p", "job-priority", true, "Define job priority from 0(highest) to 4"); + private static final Option RUN_FOREGROUND_OPTION = new Option("runfg", "run-foreground", false, + "If specified, runs UpdateStatisticsTool in Foreground. Default - Runs the build in background"); + private static final Option MANAGE_SNAPSHOT_OPTION = new Option("ms", "manage-snapshot", false, + "Creates a new snapshot, runs the tool and deletes it"); + + private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); + + private String tableName; + private String snapshotName; + private Path restoreDir; + private JobPriority jobPriority; + private boolean manageSnapshot; + private boolean isForeground; + + private Job job; + + @Override + public int run(String[] args) throws Exception { + try { + parseArgs(args); + preJobTask(); + configureJob(); + TableMapReduceUtil.initCredentials(job); + int ret = runJob(); + postJobTask(); + return ret; + } catch (Exception e) { + e.printStackTrace(); + return -1; } - - /** - * Run any tasks before the MR job is launched - * Currently being used for snapshot creation - */ - private void preJobTask() throws Exception { - if (!manageSnapshot) { - return; - } - - try (final Connection conn = ConnectionUtil.getInputConnection(getConf())) { - Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); - boolean namespaceMapping = getConf().getBoolean(IS_NAMESPACE_MAPPING_ENABLED, - DEFAULT_IS_NAMESPACE_MAPPING_ENABLED); - String physicalTableName = SchemaUtil.getPhysicalTableName( - tableName.getBytes(StandardCharsets.UTF_8), - namespaceMapping).getNameAsString(); - admin.snapshot(snapshotName, TableName.valueOf(physicalTableName)); - LOGGER.info("Successfully created snapshot " + snapshotName + " for " + physicalTableName); - } + } + + /** + * Run any tasks before the MR job is launched Currently being used for snapshot creation + */ + private void preJobTask() throws Exception { + if (!manageSnapshot) { + return; } - /** - * Run any tasks before the MR job is completed successfully - * Currently being used for snapshot deletion - */ - private void postJobTask() throws Exception { - if (!manageSnapshot) { - return; - } - - try (final Connection conn = ConnectionUtil.getInputConnection(getConf())) { - Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); - admin.deleteSnapshot(snapshotName); - LOGGER.info("Successfully deleted snapshot " + snapshotName); - } + try (final Connection conn = ConnectionUtil.getInputConnection(getConf())) { + Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); + boolean namespaceMapping = + getConf().getBoolean(IS_NAMESPACE_MAPPING_ENABLED, DEFAULT_IS_NAMESPACE_MAPPING_ENABLED); + String physicalTableName = SchemaUtil + .getPhysicalTableName(tableName.getBytes(StandardCharsets.UTF_8), namespaceMapping) + .getNameAsString(); + admin.snapshot(snapshotName, TableName.valueOf(physicalTableName)); + LOGGER.info("Successfully created snapshot " + snapshotName + " for " + physicalTableName); } - - void parseArgs(String[] args) { - CommandLine cmdLine = null; - try { - cmdLine = parseOptions(args); - } catch (IllegalStateException e) { - printHelpAndExit(e.getMessage(), getOptions()); - } - - if (getConf() == null) { - setConf(HBaseConfiguration.create()); - } - - tableName = cmdLine.getOptionValue(TABLE_NAME_OPTION.getOpt()); - snapshotName = cmdLine.getOptionValue(SNAPSHOT_NAME_OPTION.getOpt()); - if (snapshotName == null) { - snapshotName = "UpdateStatisticsTool_" + tableName + "_" + System.currentTimeMillis(); - } - - String restoreDirOptionValue = cmdLine.getOptionValue(RESTORE_DIR_OPTION.getOpt()); - if (restoreDirOptionValue == null) { - restoreDirOptionValue = getConf().get(FS_DEFAULT_NAME_KEY) + "/tmp"; - } - - jobPriority = getJobPriority(cmdLine); - - restoreDir = new Path(restoreDirOptionValue); - manageSnapshot = cmdLine.hasOption(MANAGE_SNAPSHOT_OPTION.getOpt()); - isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); + } + + /** + * Run any tasks before the MR job is completed successfully Currently being used for snapshot + * deletion + */ + private void postJobTask() throws Exception { + if (!manageSnapshot) { + return; } - public String getJobPriority() { - return this.jobPriority.toString(); + try (final Connection conn = ConnectionUtil.getInputConnection(getConf())) { + Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); + admin.deleteSnapshot(snapshotName); + LOGGER.info("Successfully deleted snapshot " + snapshotName); + } + } + + void parseArgs(String[] args) { + CommandLine cmdLine = null; + try { + cmdLine = parseOptions(args); + } catch (IllegalStateException e) { + printHelpAndExit(e.getMessage(), getOptions()); } - private JobPriority getJobPriority(CommandLine cmdLine) { - String jobPriorityOption = cmdLine.getOptionValue(JOB_PRIORITY_OPTION.getOpt()); - if (jobPriorityOption == null) { - return JobPriority.NORMAL; - } - - switch (jobPriorityOption) { - case "0" : return JobPriority.VERY_HIGH; - case "1" : return JobPriority.HIGH; - case "2" : return JobPriority.NORMAL; - case "3" : return JobPriority.LOW; - case "4" : return JobPriority.VERY_LOW; - default: - return JobPriority.NORMAL; - } + if (getConf() == null) { + setConf(HBaseConfiguration.create()); } - private void configureJob() throws Exception { - job = Job.getInstance(getConf(), - "UpdateStatistics-" + tableName + "-" + snapshotName); - PhoenixMapReduceUtil.setInput(job, NullDBWritable.class, - snapshotName, tableName, restoreDir); + tableName = cmdLine.getOptionValue(TABLE_NAME_OPTION.getOpt()); + snapshotName = cmdLine.getOptionValue(SNAPSHOT_NAME_OPTION.getOpt()); + if (snapshotName == null) { + snapshotName = "UpdateStatisticsTool_" + tableName + "_" + System.currentTimeMillis(); + } - PhoenixConfigurationUtil.setMRJobType(job.getConfiguration(), MRJobType.UPDATE_STATS); + String restoreDirOptionValue = cmdLine.getOptionValue(RESTORE_DIR_OPTION.getOpt()); + if (restoreDirOptionValue == null) { + restoreDirOptionValue = getConf().get(FS_DEFAULT_NAME_KEY) + "/tmp"; + } - // DO NOT allow mapper splits using statistics since it may result into many smaller chunks - PhoenixConfigurationUtil.setSplitByStats(job.getConfiguration(), false); + jobPriority = getJobPriority(cmdLine); - job.setJarByClass(UpdateStatisticsTool.class); - job.setMapperClass(TableSnapshotMapper.class); - job.setMapOutputKeyClass(NullWritable.class); - job.setMapOutputValueClass(NullWritable.class); - job.setOutputFormatClass(NullOutputFormat.class); - job.setNumReduceTasks(0); - job.setPriority(this.jobPriority); + restoreDir = new Path(restoreDirOptionValue); + manageSnapshot = cmdLine.hasOption(MANAGE_SNAPSHOT_OPTION.getOpt()); + isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()); + } - TableMapReduceUtil.addDependencyJars(job); - TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - PhoenixConnection.class, Chronology.class, CharStream.class, - SpanReceiver.class, Gauge.class, MetricRegistriesImpl.class); + public String getJobPriority() { + return this.jobPriority.toString(); + } - LOGGER.info("UpdateStatisticsTool running for: " + tableName - + " on snapshot: " + snapshotName + " with restore dir: " + restoreDir); + private JobPriority getJobPriority(CommandLine cmdLine) { + String jobPriorityOption = cmdLine.getOptionValue(JOB_PRIORITY_OPTION.getOpt()); + if (jobPriorityOption == null) { + return JobPriority.NORMAL; } - private int runJob() { - try { - if (isForeground) { - LOGGER.info("Running UpdateStatisticsTool in Foreground. " + - "Runs full table scans. This may take a long time!"); - return (job.waitForCompletion(true)) ? 0 : 1; - } else { - LOGGER.info("Running UpdateStatisticsTool in Background - Submit async and exit"); - job.submit(); - return 0; - } - } catch (Exception e) { - LOGGER.error("Caught exception " + e + " trying to update statistics."); - return 1; - } + switch (jobPriorityOption) { + case "0": + return JobPriority.VERY_HIGH; + case "1": + return JobPriority.HIGH; + case "2": + return JobPriority.NORMAL; + case "3": + return JobPriority.LOW; + case "4": + return JobPriority.VERY_LOW; + default: + return JobPriority.NORMAL; } - - private void printHelpAndExit(String errorMessage, Options options) { - System.err.println(errorMessage); - printHelpAndExit(options, 1); + } + + private void configureJob() throws Exception { + job = Job.getInstance(getConf(), "UpdateStatistics-" + tableName + "-" + snapshotName); + PhoenixMapReduceUtil.setInput(job, NullDBWritable.class, snapshotName, tableName, restoreDir); + + PhoenixConfigurationUtil.setMRJobType(job.getConfiguration(), MRJobType.UPDATE_STATS); + + // DO NOT allow mapper splits using statistics since it may result into many smaller chunks + PhoenixConfigurationUtil.setSplitByStats(job.getConfiguration(), false); + + job.setJarByClass(UpdateStatisticsTool.class); + job.setMapperClass(TableSnapshotMapper.class); + job.setMapOutputKeyClass(NullWritable.class); + job.setMapOutputValueClass(NullWritable.class); + job.setOutputFormatClass(NullOutputFormat.class); + job.setNumReduceTasks(0); + job.setPriority(this.jobPriority); + + TableMapReduceUtil.addDependencyJars(job); + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), PhoenixConnection.class, + Chronology.class, CharStream.class, SpanReceiver.class, Gauge.class, + MetricRegistriesImpl.class); + + LOGGER.info("UpdateStatisticsTool running for: " + tableName + " on snapshot: " + snapshotName + + " with restore dir: " + restoreDir); + } + + private int runJob() { + try { + if (isForeground) { + LOGGER.info("Running UpdateStatisticsTool in Foreground. " + + "Runs full table scans. This may take a long time!"); + return (job.waitForCompletion(true)) ? 0 : 1; + } else { + LOGGER.info("Running UpdateStatisticsTool in Background - Submit async and exit"); + job.submit(); + return 0; + } + } catch (Exception e) { + LOGGER.error("Caught exception " + e + " trying to update statistics."); + return 1; } - - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); + } + + private void printHelpAndExit(String errorMessage, Options options) { + System.err.println(errorMessage); + printHelpAndExit(options, 1); + } + + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } + + /** + * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are + * missing. + * @param args supplied command line arguments + * @return the parsed command line + */ + CommandLine parseOptions(String[] args) { + + final Options options = getOptions(); + + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); } - /** - * Parses the commandline arguments, throws IllegalStateException if mandatory arguments are - * missing. - * @param args supplied command line arguments - * @return the parsed command line - */ - CommandLine parseOptions(String[] args) { - - final Options options = getOptions(); - - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); - } - - if (cmdLine.hasOption(HELP_OPTION.getOpt())) { - printHelpAndExit(options, 0); - } - - if (!cmdLine.hasOption(TABLE_NAME_OPTION.getOpt())) { - throw new IllegalStateException(TABLE_NAME_OPTION.getLongOpt() + " is a mandatory " - + "parameter"); - } - - if (cmdLine.hasOption(MANAGE_SNAPSHOT_OPTION.getOpt()) - && !cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt())) { - throw new IllegalStateException("Snapshot cannot be managed if job is running in background"); - } - - return cmdLine; + if (cmdLine.hasOption(HELP_OPTION.getOpt())) { + printHelpAndExit(options, 0); } - private Options getOptions() { - final Options options = new Options(); - options.addOption(TABLE_NAME_OPTION); - options.addOption(SNAPSHOT_NAME_OPTION); - options.addOption(HELP_OPTION); - options.addOption(RESTORE_DIR_OPTION); - options.addOption(JOB_PRIORITY_OPTION); - options.addOption(RUN_FOREGROUND_OPTION); - options.addOption(MANAGE_SNAPSHOT_OPTION); - return options; + if (!cmdLine.hasOption(TABLE_NAME_OPTION.getOpt())) { + throw new IllegalStateException( + TABLE_NAME_OPTION.getLongOpt() + " is a mandatory " + "parameter"); } - public Job getJob() { - return job; + if ( + cmdLine.hasOption(MANAGE_SNAPSHOT_OPTION.getOpt()) + && !cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()) + ) { + throw new IllegalStateException("Snapshot cannot be managed if job is running in background"); } - public String getSnapshotName() { - return snapshotName; - } + return cmdLine; + } + + private Options getOptions() { + final Options options = new Options(); + options.addOption(TABLE_NAME_OPTION); + options.addOption(SNAPSHOT_NAME_OPTION); + options.addOption(HELP_OPTION); + options.addOption(RESTORE_DIR_OPTION); + options.addOption(JOB_PRIORITY_OPTION); + options.addOption(RUN_FOREGROUND_OPTION); + options.addOption(MANAGE_SNAPSHOT_OPTION); + return options; + } + + public Job getJob() { + return job; + } + + public String getSnapshotName() { + return snapshotName; + } + + public Path getRestoreDir() { + return restoreDir; + } + + /** + * Empty Mapper class since stats collection happens as part of scanner object + */ + public static class TableSnapshotMapper + extends Mapper { - public Path getRestoreDir() { - return restoreDir; - } - - /** - * Empty Mapper class since stats collection happens as part of scanner object - */ - public static class TableSnapshotMapper - extends Mapper { - - @Override - protected void map(NullWritable key, NullDBWritable value, - Context context) { - } + @Override + protected void map(NullWritable key, NullDBWritable value, Context context) { } + } - public static void main(String[] args) throws Exception { - int result = ToolRunner.run(new UpdateStatisticsTool(), args); - System.exit(result); - } + public static void main(String[] args) throws Exception { + int result = ToolRunner.run(new UpdateStatisticsTool(), args); + System.exit(result); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/task/ServerTask.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/task/ServerTask.java index 0258e8c4c75..97d00cb68db 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/task/ServerTask.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/task/ServerTask.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,86 +31,80 @@ import org.apache.phoenix.schema.PTable; public class ServerTask extends Task { - private static void mutateSystemTaskTable(PhoenixConnection conn, PreparedStatement stmt, boolean accessCheckEnabled) - throws IOException { - // we need to mutate SYSTEM.TASK with HBase/login user if access is enabled. - if (accessCheckEnabled) { - User.runAsLoginUser(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - final RpcCall rpcContext = RpcUtil.getRpcContext(); - // setting RPC context as null so that user can be reset - try { - RpcUtil.setRpcContext(null); - stmt.execute(); - conn.commit(); - } catch (SQLException e) { - throw new IOException(e); - } finally { - // setting RPC context back to original context of the RPC - RpcUtil.setRpcContext(rpcContext); - } - return null; - } - }); - } - else { - try { - stmt.execute(); - conn.commit(); - } catch (SQLException e) { - throw new IOException(e); - } + private static void mutateSystemTaskTable(PhoenixConnection conn, PreparedStatement stmt, + boolean accessCheckEnabled) throws IOException { + // we need to mutate SYSTEM.TASK with HBase/login user if access is enabled. + if (accessCheckEnabled) { + User.runAsLoginUser(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + final RpcCall rpcContext = RpcUtil.getRpcContext(); + // setting RPC context as null so that user can be reset + try { + RpcUtil.setRpcContext(null); + stmt.execute(); + conn.commit(); + } catch (SQLException e) { + throw new IOException(e); + } finally { + // setting RPC context back to original context of the RPC + RpcUtil.setRpcContext(rpcContext); + } + return null; } + }); + } else { + try { + stmt.execute(); + conn.commit(); + } catch (SQLException e) { + throw new IOException(e); + } } + } - /** - * Execute and commit upsert query on SYSTEM.TASK - * This method should be used only from server side. Client should use - * {@link #getMutationsForAddTask(SystemTaskParams)} instead of direct - * upsert commit. - * - * @param systemTaskParams Task params with various task related arguments - * @throws IOException If something goes wrong while preparing mutations - * or committing transactions - */ - public static void addTask(SystemTaskParams systemTaskParams) - throws IOException { - addTaskAndGetStatement(systemTaskParams, systemTaskParams.getConn(), - true); - } + /** + * Execute and commit upsert query on SYSTEM.TASK This method should be used only from server + * side. Client should use {@link #getMutationsForAddTask(SystemTaskParams)} instead of direct + * upsert commit. + * @param systemTaskParams Task params with various task related arguments + * @throws IOException If something goes wrong while preparing mutations or committing + * transactions + */ + public static void addTask(SystemTaskParams systemTaskParams) throws IOException { + addTaskAndGetStatement(systemTaskParams, systemTaskParams.getConn(), true); + } - private static PreparedStatement addTaskAndGetStatement( - SystemTaskParams systemTaskParams, PhoenixConnection connection, - boolean shouldCommit) throws IOException { - PreparedStatement stmt = addTaskAndGetStatement(systemTaskParams, connection); + private static PreparedStatement addTaskAndGetStatement(SystemTaskParams systemTaskParams, + PhoenixConnection connection, boolean shouldCommit) throws IOException { + PreparedStatement stmt = addTaskAndGetStatement(systemTaskParams, connection); - // if query is getting executed by client, do not execute and commit - // mutations - if (shouldCommit) { - mutateSystemTaskTable(connection, stmt, - systemTaskParams.isAccessCheckEnabled()); - } - return stmt; + // if query is getting executed by client, do not execute and commit + // mutations + if (shouldCommit) { + mutateSystemTaskTable(connection, stmt, systemTaskParams.isAccessCheckEnabled()); } + return stmt; + } - public static void deleteTask(PhoenixConnection conn, PTable.TaskType taskType, Timestamp ts, String tenantId, - String schemaName, String tableName, boolean accessCheckEnabled) throws IOException { - PreparedStatement stmt = null; - try { - stmt = conn.prepareStatement("DELETE FROM " + - PhoenixDatabaseMetaData.SYSTEM_TASK_NAME + - " WHERE " + PhoenixDatabaseMetaData.TASK_TYPE + " = ? AND " + - PhoenixDatabaseMetaData.TASK_TS + " = ? AND " + - PhoenixDatabaseMetaData.TENANT_ID + (tenantId == null ? " IS NULL " : " = '" + tenantId + "'") + " AND " + - PhoenixDatabaseMetaData.TABLE_SCHEM + (schemaName == null ? " IS NULL " : " = '" + schemaName + "'") + " AND " + - PhoenixDatabaseMetaData.TABLE_NAME + " = ?"); - stmt.setByte(1, taskType.getSerializedValue()); - stmt.setTimestamp(2, ts); - stmt.setString(3, tableName); - } catch (SQLException e) { - throw new IOException(e); - } - mutateSystemTaskTable(conn, stmt, accessCheckEnabled); + public static void deleteTask(PhoenixConnection conn, PTable.TaskType taskType, Timestamp ts, + String tenantId, String schemaName, String tableName, boolean accessCheckEnabled) + throws IOException { + PreparedStatement stmt = null; + try { + stmt = conn.prepareStatement("DELETE FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME + + " WHERE " + PhoenixDatabaseMetaData.TASK_TYPE + " = ? AND " + + PhoenixDatabaseMetaData.TASK_TS + " = ? AND " + PhoenixDatabaseMetaData.TENANT_ID + + (tenantId == null ? " IS NULL " : " = '" + tenantId + "'") + " AND " + + PhoenixDatabaseMetaData.TABLE_SCHEM + + (schemaName == null ? " IS NULL " : " = '" + schemaName + "'") + " AND " + + PhoenixDatabaseMetaData.TABLE_NAME + " = ?"); + stmt.setByte(1, taskType.getSerializedValue()); + stmt.setTimestamp(2, ts); + stmt.setString(3, tableName); + } catch (SQLException e) { + throw new IOException(e); } + mutateSystemTaskTable(conn, stmt, accessCheckEnabled); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/transform/Transform.java b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/transform/Transform.java index b216c9d7e36..1813d746901 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/schema/transform/Transform.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/schema/transform/Transform.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +17,34 @@ */ package org.apache.phoenix.schema.transform; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.coprocessorclient.TableInfo; -import org.apache.phoenix.schema.PIndexState; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import static org.apache.phoenix.coprocessorclient.MetaDataProtocol.MIN_TABLE_TIMESTAMP; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODING_SCHEME; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHYSICAL_TABLE_NAME; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID; +import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY; +import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; +import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB; +import static org.apache.phoenix.schema.ColumnMetaDataOps.addColumnMutation; +import static org.apache.phoenix.schema.MetaDataClient.UPDATE_INDEX_STATE_TO_ACTIVE; +import static org.apache.phoenix.schema.PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS; +import static org.apache.phoenix.schema.PTableType.INDEX; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.coprocessorclient.TableInfo; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.mapreduce.index.IndexScrutinyTool; @@ -31,10 +53,12 @@ import org.apache.phoenix.schema.MetaDataClient; import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PColumnImpl; +import org.apache.phoenix.schema.PIndexState; import org.apache.phoenix.schema.PName; import org.apache.phoenix.schema.PNameFactory; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableImpl; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.QueryUtil; @@ -45,425 +69,446 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.sql.Types; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.apache.phoenix.coprocessorclient.MetaDataProtocol.MIN_TABLE_TIMESTAMP; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODING_SCHEME; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHYSICAL_TABLE_NAME; -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDUCE_TENANT_ID; -import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY; -import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; -import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB; -import static org.apache.phoenix.schema.ColumnMetaDataOps.addColumnMutation; -import static org.apache.phoenix.schema.MetaDataClient.UPDATE_INDEX_STATE_TO_ACTIVE; -import static org.apache.phoenix.schema.PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS; -import static org.apache.phoenix.schema.PTableType.INDEX; - public class Transform extends TransformClient { - private static final Logger LOGGER = LoggerFactory.getLogger(Transform.class); - - public static PTable getTransformingNewTable(PhoenixConnection connection, PTable oldTable) throws SQLException{ - SystemTransformRecord transformRecord = TransformClient.getTransformRecord(connection, oldTable.getType(), oldTable.getSchemaName() - , oldTable.getTableName(), oldTable.getType()==INDEX? oldTable.getParentTableName():null, oldTable.getTenantId() - , oldTable.getBaseTableLogicalName()); - - PTable transformingNewTable = null; - if (transformRecord != null && transformRecord.isActive()) { - // New table will behave like an index - PName newTableNameWithoutSchema = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(transformRecord.getNewPhysicalTableName())); - if (!newTableNameWithoutSchema.equals(oldTable.getPhysicalName(true))) { - transformingNewTable = connection.getTableNoCache( - transformRecord.getNewPhysicalTableName()); - } - } - return transformingNewTable; + private static final Logger LOGGER = LoggerFactory.getLogger(Transform.class); + + public static PTable getTransformingNewTable(PhoenixConnection connection, PTable oldTable) + throws SQLException { + SystemTransformRecord transformRecord = + TransformClient.getTransformRecord(connection, oldTable.getType(), oldTable.getSchemaName(), + oldTable.getTableName(), oldTable.getType() == INDEX ? oldTable.getParentTableName() : null, + oldTable.getTenantId(), oldTable.getBaseTableLogicalName()); + + PTable transformingNewTable = null; + if (transformRecord != null && transformRecord.isActive()) { + // New table will behave like an index + PName newTableNameWithoutSchema = PNameFactory + .newName(SchemaUtil.getTableNameFromFullName(transformRecord.getNewPhysicalTableName())); + if (!newTableNameWithoutSchema.equals(oldTable.getPhysicalName(true))) { + transformingNewTable = + connection.getTableNoCache(transformRecord.getNewPhysicalTableName()); + } } - - public static void updateNewTableState(PhoenixConnection connection, SystemTransformRecord systemTransformRecord, - PIndexState state) - throws SQLException { - String schema = SchemaUtil.getSchemaNameFromFullName(systemTransformRecord.getNewPhysicalTableName()); - String tableName = SchemaUtil.getTableNameFromFullName(systemTransformRecord.getNewPhysicalTableName()); - try (PreparedStatement tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE_TO_ACTIVE)){ - tableUpsert.setString(1, systemTransformRecord.getTenantId() == null ? null : - systemTransformRecord.getTenantId()); - tableUpsert.setString(2, schema); - tableUpsert.setString(3, tableName); - tableUpsert.setString(4, state.getSerializedValue()); - tableUpsert.setLong(5, 0); - tableUpsert.setLong(6, 0); - tableUpsert.execute(); - } - // Update cache - UpgradeUtil.clearCache(connection, connection.getTenantId(), schema, tableName, - systemTransformRecord.getLogicalParentName(), MIN_TABLE_TIMESTAMP); - } - - public static void removeTransformRecord( - SystemTransformRecord transformRecord, PhoenixConnection connection) throws SQLException { - connection.prepareStatement("DELETE FROM " - + PhoenixDatabaseMetaData.SYSTEM_TRANSFORM_NAME + " WHERE " + - (Strings.isNullOrEmpty(transformRecord.getSchemaName()) ? "" : - (PhoenixDatabaseMetaData.TABLE_SCHEM + " ='" + transformRecord.getSchemaName() + "' AND ")) + - PhoenixDatabaseMetaData.LOGICAL_TABLE_NAME + " ='" + transformRecord.getLogicalTableName() + "' AND " + - PhoenixDatabaseMetaData.NEW_PHYS_TABLE_NAME + " ='" + transformRecord.getNewPhysicalTableName() + "' AND " + - PhoenixDatabaseMetaData.TRANSFORM_TYPE + " =" + transformRecord.getTransformType().getSerializedValue() - ).execute(); + return transformingNewTable; + } + + public static void updateNewTableState(PhoenixConnection connection, + SystemTransformRecord systemTransformRecord, PIndexState state) throws SQLException { + String schema = + SchemaUtil.getSchemaNameFromFullName(systemTransformRecord.getNewPhysicalTableName()); + String tableName = + SchemaUtil.getTableNameFromFullName(systemTransformRecord.getNewPhysicalTableName()); + try ( + PreparedStatement tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE_TO_ACTIVE)) { + tableUpsert.setString(1, + systemTransformRecord.getTenantId() == null ? null : systemTransformRecord.getTenantId()); + tableUpsert.setString(2, schema); + tableUpsert.setString(3, tableName); + tableUpsert.setString(4, state.getSerializedValue()); + tableUpsert.setLong(5, 0); + tableUpsert.setLong(6, 0); + tableUpsert.execute(); } + // Update cache + UpgradeUtil.clearCache(connection, connection.getTenantId(), schema, tableName, + systemTransformRecord.getLogicalParentName(), MIN_TABLE_TIMESTAMP); + } + + public static void removeTransformRecord(SystemTransformRecord transformRecord, + PhoenixConnection connection) throws SQLException { + connection + .prepareStatement("DELETE FROM " + PhoenixDatabaseMetaData.SYSTEM_TRANSFORM_NAME + " WHERE " + + (Strings.isNullOrEmpty(transformRecord.getSchemaName()) + ? "" + : (PhoenixDatabaseMetaData.TABLE_SCHEM + " ='" + transformRecord.getSchemaName() + + "' AND ")) + + PhoenixDatabaseMetaData.LOGICAL_TABLE_NAME + " ='" + transformRecord.getLogicalTableName() + + "' AND " + PhoenixDatabaseMetaData.NEW_PHYS_TABLE_NAME + " ='" + + transformRecord.getNewPhysicalTableName() + "' AND " + + PhoenixDatabaseMetaData.TRANSFORM_TYPE + " =" + + transformRecord.getTransformType().getSerializedValue()) + .execute(); + } + + /** + * Disable caching re-design if you use Online Data Format Change since the cutover logic is + * currently incompatible and clients may not learn about the physical table change. See + * https://issues.apache.org/jira/browse/PHOENIX-6883 and + * https://issues.apache.org/jira/browse/PHOENIX-7284. + */ + public static void doCutover(PhoenixConnection connection, + SystemTransformRecord systemTransformRecord) throws Exception { + String tenantId = systemTransformRecord.getTenantId(); + String schema = systemTransformRecord.getSchemaName(); + String tableName = systemTransformRecord.getLogicalTableName(); + String newTableName = + SchemaUtil.getTableNameFromFullName(systemTransformRecord.getNewPhysicalTableName()); + + // Calculate changed metadata + List columnNames = new ArrayList<>(); + List columnValues = new ArrayList<>(); + + getMetadataDifference(connection, systemTransformRecord, columnNames, columnValues); + // TODO In the future, we need to handle rowkey changes and column type changes as well + + String changeViewStmt = + "UPSERT INTO SYSTEM.CATALOG " + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME %s) VALUES (?, ?, ? %s)"; + + String changeTable = String.format("UPSERT INTO SYSTEM.CATALOG " + + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME, PHYSICAL_TABLE_NAME %s ) " + "VALUES(?, ?, ?, ? %s)", + columnNames.size() > 0 ? "," + String.join(",", columnNames) : "", + columnNames.size() > 0 ? "," + QueryUtil.generateInListParams(columnValues.size()) : ""); + + LOGGER.info("About to do cutover via " + changeTable); + TableViewFinderResult childViewsResult = + ViewUtil.findChildViews(connection, tenantId, schema, tableName); + boolean wasCommit = connection.getAutoCommit(); + connection.setAutoCommit(false); + List viewsToUpdateCache = new ArrayList<>(); + try { + try (PreparedStatement stmt = connection.prepareStatement(changeTable)) { + int param = 0; + if (tenantId == null) { + stmt.setNull(++param, Types.VARCHAR); + } else { + stmt.setString(++param, tenantId); + } + if (schema == null) { + stmt.setNull(++param, Types.VARCHAR); + } else { + stmt.setString(++param, schema); + } + stmt.setString(++param, tableName); + stmt.setString(++param, newTableName); + for (int i = 0; i < columnValues.size(); i++) { + stmt.setInt(++param, Integer.parseInt(columnValues.get(i))); + } + stmt.execute(); + } + // Update column qualifiers + PTable pNewTable = connection.getTable(systemTransformRecord.getNewPhysicalTableName()); + PTable pOldTable = connection.getTable(SchemaUtil.getTableName(schema, tableName)); + if ( + pOldTable.getImmutableStorageScheme() != pNewTable.getImmutableStorageScheme() + || pOldTable.getEncodingScheme() != pNewTable.getEncodingScheme() + ) { + MetaDataClient.mutateTransformProperties(connection, tenantId, schema, tableName, + newTableName, pNewTable.getImmutableStorageScheme(), pNewTable.getEncodingScheme()); + // We need to update the columns's qualifiers as well + mutateColumns(connection.unwrap(PhoenixConnection.class), pOldTable, pNewTable); + + HashMap columnMap = new HashMap<>(); + for (PColumn column : pNewTable.getColumns()) { + columnMap.put(column.getName().getString(), column); + } - /** - * Disable caching re-design if you use Online Data Format Change since the cutover logic - * is currently incompatible and clients may not learn about the physical table change. - * See https://issues.apache.org/jira/browse/PHOENIX-6883 and - * https://issues.apache.org/jira/browse/PHOENIX-7284. - */ - public static void doCutover(PhoenixConnection connection, SystemTransformRecord systemTransformRecord) throws Exception{ - String tenantId = systemTransformRecord.getTenantId(); - String schema = systemTransformRecord.getSchemaName(); - String tableName = systemTransformRecord.getLogicalTableName(); - String newTableName = SchemaUtil.getTableNameFromFullName(systemTransformRecord.getNewPhysicalTableName()); - - // Calculate changed metadata - List columnNames = new ArrayList<>(); - List columnValues = new ArrayList<>(); - - getMetadataDifference(connection, systemTransformRecord, columnNames, columnValues); - // TODO In the future, we need to handle rowkey changes and column type changes as well - - String changeViewStmt = "UPSERT INTO SYSTEM.CATALOG " - + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME %s) VALUES (?, ?, ? %s)"; - - String - changeTable = String.format("UPSERT INTO SYSTEM.CATALOG " - + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME, PHYSICAL_TABLE_NAME %s ) " - + "VALUES(?, ?, ?, ? %s)", columnNames.size() > 0 ? "," - + String.join(",", columnNames) : "", columnNames.size() > 0 - ? "," + QueryUtil.generateInListParams(columnValues.size()) : ""); - - LOGGER.info("About to do cutover via " + changeTable); - TableViewFinderResult childViewsResult = ViewUtil.findChildViews(connection, tenantId, schema, tableName); - boolean wasCommit = connection.getAutoCommit(); - connection.setAutoCommit(false); - List viewsToUpdateCache = new ArrayList<>(); - try { - try (PreparedStatement stmt = connection.prepareStatement(changeTable)) { - int param = 0; - if (tenantId == null) { - stmt.setNull(++param, Types.VARCHAR); - } else { - stmt.setString(++param, tenantId); - } - if (schema == null) { - stmt.setNull(++param, Types.VARCHAR); - } else { - stmt.setString(++param, schema); - } - stmt.setString(++param, tableName); - stmt.setString(++param, newTableName); - for (int i = 0; i < columnValues.size(); i++) { - stmt.setInt(++param, Integer.parseInt(columnValues.get(i))); - } - stmt.execute(); - } - // Update column qualifiers - PTable pNewTable = connection.getTable(systemTransformRecord.getNewPhysicalTableName()); - PTable pOldTable = connection.getTable(SchemaUtil.getTableName(schema, tableName)); - if (pOldTable.getImmutableStorageScheme() != pNewTable.getImmutableStorageScheme() || - pOldTable.getEncodingScheme() != pNewTable.getEncodingScheme()) { - MetaDataClient.mutateTransformProperties(connection, tenantId, schema, tableName, newTableName, - pNewTable.getImmutableStorageScheme(), pNewTable.getEncodingScheme()); - // We need to update the columns's qualifiers as well - mutateColumns(connection.unwrap(PhoenixConnection.class), pOldTable, pNewTable); - - HashMap columnMap = new HashMap<>(); - for (PColumn column : pNewTable.getColumns()) { - columnMap.put(column.getName().getString(), column); - } - - // Also update view column qualifiers - for (TableInfo view : childViewsResult.getLinks()) { - PTable pView = connection.getTable(view.getTenantId() == null - ? null : Bytes.toString(view.getTenantId()), - SchemaUtil.getTableName(view.getSchemaName(), view.getTableName())); - mutateViewColumns(connection.unwrap(PhoenixConnection.class), pView, pNewTable, columnMap); - } - } - connection.commit(); - - // We can have millions of views. We need to send it in batches - int maxBatchSize = connection.getQueryServices().getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); - int batchSize = 0; - for (TableInfo view : childViewsResult.getLinks()) { - String changeView = String.format(changeViewStmt, - columnNames.size() > 0 ? "," + String.join(",", columnNames) : "", - columnNames.size() > 0 ? "," - + QueryUtil.generateInListParams(columnValues.size()) : ""); - LOGGER.info("Cutover changing view via " + changeView); - try (PreparedStatement stmt = connection.prepareStatement(changeView)) { - int param = 0; - if (view.getTenantId() == null || view.getTenantId().length == 0) { - stmt.setNull(++param, Types.VARCHAR); - } else { - stmt.setString(++param, Bytes.toString(view.getTenantId())); - } - if (view.getSchemaName() == null || view.getSchemaName().length == 0) { - stmt.setNull(++param, Types.VARCHAR); - } else { - stmt.setString(++param, Bytes.toString(view.getSchemaName())); - } - stmt.setString(++param, Bytes.toString(view.getTableName())); - for (int i = 0; i < columnValues.size(); i++) { - stmt.setInt(++param, Integer.parseInt(columnValues.get(i))); - } - stmt.execute(); - } - viewsToUpdateCache.add(view); - batchSize++; - if (batchSize >= maxBatchSize) { - connection.commit(); - batchSize = 0; - } - } - if (batchSize > 0) { - connection.commit(); - batchSize = 0; - } - - connection.unwrap(PhoenixConnection.class).getQueryServices().clearCache(); - UpgradeUtil.clearCacheAndGetNewTable(connection.unwrap(PhoenixConnection.class), - connection.getTenantId(), - schema, tableName, systemTransformRecord.getLogicalParentName(), MIN_TABLE_TIMESTAMP); - for (TableInfo view : viewsToUpdateCache) { - UpgradeUtil.clearCache(connection.unwrap(PhoenixConnection.class), - PNameFactory.newName(view.getTenantId()), - PNameFactory.newName(view.getSchemaName()).getString(), Bytes.toString(view.getTableName()), - tableName, MIN_TABLE_TIMESTAMP); - } - - // TODO: Cleanup syscat so that we don't have an extra index - } catch (Exception e) { - LOGGER.error("Error happened during cutover ", e); - connection.rollback(); - throw e; - } finally { - connection.setAutoCommit(wasCommit); + // Also update view column qualifiers + for (TableInfo view : childViewsResult.getLinks()) { + PTable pView = connection.getTable( + view.getTenantId() == null ? null : Bytes.toString(view.getTenantId()), + SchemaUtil.getTableName(view.getSchemaName(), view.getTableName())); + mutateViewColumns(connection.unwrap(PhoenixConnection.class), pView, pNewTable, + columnMap); } + } + connection.commit(); + + // We can have millions of views. We need to send it in batches + int maxBatchSize = connection.getQueryServices().getConfiguration() + .getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); + int batchSize = 0; + for (TableInfo view : childViewsResult.getLinks()) { + String changeView = String.format(changeViewStmt, + columnNames.size() > 0 ? "," + String.join(",", columnNames) : "", + columnNames.size() > 0 ? "," + QueryUtil.generateInListParams(columnValues.size()) : ""); + LOGGER.info("Cutover changing view via " + changeView); + try (PreparedStatement stmt = connection.prepareStatement(changeView)) { + int param = 0; + if (view.getTenantId() == null || view.getTenantId().length == 0) { + stmt.setNull(++param, Types.VARCHAR); + } else { + stmt.setString(++param, Bytes.toString(view.getTenantId())); + } + if (view.getSchemaName() == null || view.getSchemaName().length == 0) { + stmt.setNull(++param, Types.VARCHAR); + } else { + stmt.setString(++param, Bytes.toString(view.getSchemaName())); + } + stmt.setString(++param, Bytes.toString(view.getTableName())); + for (int i = 0; i < columnValues.size(); i++) { + stmt.setInt(++param, Integer.parseInt(columnValues.get(i))); + } + stmt.execute(); + } + viewsToUpdateCache.add(view); + batchSize++; + if (batchSize >= maxBatchSize) { + connection.commit(); + batchSize = 0; + } + } + if (batchSize > 0) { + connection.commit(); + batchSize = 0; + } + + connection.unwrap(PhoenixConnection.class).getQueryServices().clearCache(); + UpgradeUtil.clearCacheAndGetNewTable(connection.unwrap(PhoenixConnection.class), + connection.getTenantId(), schema, tableName, systemTransformRecord.getLogicalParentName(), + MIN_TABLE_TIMESTAMP); + for (TableInfo view : viewsToUpdateCache) { + UpgradeUtil.clearCache(connection.unwrap(PhoenixConnection.class), + PNameFactory.newName(view.getTenantId()), + PNameFactory.newName(view.getSchemaName()).getString(), + Bytes.toString(view.getTableName()), tableName, MIN_TABLE_TIMESTAMP); + } + + // TODO: Cleanup syscat so that we don't have an extra index + } catch (Exception e) { + LOGGER.error("Error happened during cutover ", e); + connection.rollback(); + throw e; + } finally { + connection.setAutoCommit(wasCommit); } - - private static void getMetadataDifference(PhoenixConnection connection, - SystemTransformRecord systemTransformRecord, - List columnNames, List columnValues) throws SQLException { - PTable pOldTable = connection.getTable(SchemaUtil.getQualifiedTableName( - systemTransformRecord.getSchemaName(), - systemTransformRecord.getLogicalTableName())); - PTable pNewTable = connection.getTable(SchemaUtil.getQualifiedTableName(SchemaUtil - .getSchemaNameFromFullName(systemTransformRecord.getNewPhysicalTableName()), - SchemaUtil.getTableNameFromFullName( - systemTransformRecord.getNewPhysicalTableName()))); - - Map map = pOldTable.getPropertyValues(); - for(Map.Entry entry : map.entrySet()) { - String oldKey = entry.getKey(); - String oldValue = entry.getValue(); - if (pNewTable.getPropertyValues().containsKey(oldKey)) { - if (PHYSICAL_TABLE_NAME.equals(oldKey)) { - // No need to add here. We will add it. - continue; - } - String newValue = pNewTable.getPropertyValues().get(oldKey); - if (!Strings.nullToEmpty(oldValue).equals(Strings.nullToEmpty(newValue))) { - columnNames.add(oldKey); - // properties value that corresponds to a number will not need single quotes around it - // properties value that corresponds to a boolean value will not need single quotes around it - if (!Strings.isNullOrEmpty(newValue)) { - if(!(StringUtils.isNumeric(newValue)) && - !(newValue.equalsIgnoreCase(Boolean.TRUE.toString()) ||newValue.equalsIgnoreCase(Boolean.FALSE.toString()))) { - if (ENCODING_SCHEME.equals(oldKey)) { - newValue = String.valueOf(PTable.QualifierEncodingScheme.valueOf(newValue).getSerializedMetadataValue()); - } else if (IMMUTABLE_STORAGE_SCHEME.equals(oldKey)) { - newValue = String.valueOf(PTable.ImmutableStorageScheme.valueOf(newValue).getSerializedMetadataValue()); - } - else { - newValue = "'" + newValue + "'"; - } - } - } - columnValues.add(newValue); - } + } + + private static void getMetadataDifference(PhoenixConnection connection, + SystemTransformRecord systemTransformRecord, List columnNames, + List columnValues) throws SQLException { + PTable pOldTable = + connection.getTable(SchemaUtil.getQualifiedTableName(systemTransformRecord.getSchemaName(), + systemTransformRecord.getLogicalTableName())); + PTable pNewTable = connection.getTable(SchemaUtil.getQualifiedTableName( + SchemaUtil.getSchemaNameFromFullName(systemTransformRecord.getNewPhysicalTableName()), + SchemaUtil.getTableNameFromFullName(systemTransformRecord.getNewPhysicalTableName()))); + + Map map = pOldTable.getPropertyValues(); + for (Map.Entry entry : map.entrySet()) { + String oldKey = entry.getKey(); + String oldValue = entry.getValue(); + if (pNewTable.getPropertyValues().containsKey(oldKey)) { + if (PHYSICAL_TABLE_NAME.equals(oldKey)) { + // No need to add here. We will add it. + continue; + } + String newValue = pNewTable.getPropertyValues().get(oldKey); + if (!Strings.nullToEmpty(oldValue).equals(Strings.nullToEmpty(newValue))) { + columnNames.add(oldKey); + // properties value that corresponds to a number will not need single quotes around it + // properties value that corresponds to a boolean value will not need single quotes around + // it + if (!Strings.isNullOrEmpty(newValue)) { + if ( + !(StringUtils.isNumeric(newValue)) + && !(newValue.equalsIgnoreCase(Boolean.TRUE.toString()) + || newValue.equalsIgnoreCase(Boolean.FALSE.toString())) + ) { + if (ENCODING_SCHEME.equals(oldKey)) { + newValue = String.valueOf( + PTable.QualifierEncodingScheme.valueOf(newValue).getSerializedMetadataValue()); + } else if (IMMUTABLE_STORAGE_SCHEME.equals(oldKey)) { + newValue = String.valueOf( + PTable.ImmutableStorageScheme.valueOf(newValue).getSerializedMetadataValue()); + } else { + newValue = "'" + newValue + "'"; + } } + } + columnValues.add(newValue); } + } + } + } + + public static void completeTransform(Connection connection, Configuration configuration) + throws Exception { + // Will be called from Reducer + String tenantId = configuration.get(MAPREDUCE_TENANT_ID, null); + String fullOldTableName = PhoenixConfigurationUtil.getInputTableName(configuration); + String schemaName = SchemaUtil.getSchemaNameFromFullName(fullOldTableName); + String oldTableLogicalName = SchemaUtil.getTableNameFromFullName(fullOldTableName); + String indexTableName = SchemaUtil + .getTableNameFromFullName(PhoenixConfigurationUtil.getIndexToolIndexTableName(configuration)); + String logicaTableName = oldTableLogicalName; + String logicalParentName = null; + if ( + PhoenixConfigurationUtil.getTransformingTableType(configuration) + == IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE + ) { + if (!Strings.isNullOrEmpty(indexTableName)) { + logicaTableName = indexTableName; + logicalParentName = SchemaUtil.getTableName(schemaName, oldTableLogicalName); + } } - public static void completeTransform(Connection connection, Configuration configuration) throws Exception{ - // Will be called from Reducer - String tenantId = configuration.get(MAPREDUCE_TENANT_ID, null); - String fullOldTableName = PhoenixConfigurationUtil.getInputTableName(configuration); - String schemaName = SchemaUtil.getSchemaNameFromFullName(fullOldTableName); - String oldTableLogicalName = SchemaUtil.getTableNameFromFullName(fullOldTableName); - String indexTableName = SchemaUtil.getTableNameFromFullName(PhoenixConfigurationUtil.getIndexToolIndexTableName(configuration)); - String logicaTableName = oldTableLogicalName; - String logicalParentName = null; - if (PhoenixConfigurationUtil.getTransformingTableType(configuration) == IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE) { - if (!Strings.isNullOrEmpty(indexTableName)) { - logicaTableName = indexTableName; - logicalParentName = SchemaUtil.getTableName(schemaName, oldTableLogicalName); - } + SystemTransformRecord transformRecord = getTransformRecord(schemaName, logicaTableName, + logicalParentName, tenantId, connection.unwrap(PhoenixConnection.class)); + + if (!PTable.TransformType.isPartialTransform(transformRecord.getTransformType())) { + updateTransformRecord(connection.unwrap(PhoenixConnection.class), transformRecord, + PTable.TransformStatus.PENDING_CUTOVER); + connection.commit(); + } else { + updateTransformRecord(connection.unwrap(PhoenixConnection.class), transformRecord, + PTable.TransformStatus.COMPLETED); + connection.commit(); + } + } + + public static void updateTransformRecord(PhoenixConnection connection, + SystemTransformRecord transformRecord, PTable.TransformStatus newStatus) throws SQLException { + SystemTransformRecord.SystemTransformBuilder builder = + new SystemTransformRecord.SystemTransformBuilder(transformRecord); + builder.setTransformStatus(newStatus.name()); + builder.setLastStateTs(new Timestamp(EnvironmentEdgeManager.currentTimeMillis())); + if (newStatus == PTable.TransformStatus.STARTED) { + builder.setTransformRetryCount(transformRecord.getTransformRetryCount() + 1); + } + Transform.upsertTransform(builder.build(), connection); + } + + private static void mutateColumns(PhoenixConnection connection, PTable pOldTable, + PTable pNewTable) throws SQLException { + if (pOldTable.getEncodingScheme() != pNewTable.getEncodingScheme()) { + Short nextKeySeq = 0; + for (PColumn column : pNewTable.getColumns()) { + boolean isPk = SchemaUtil.isPKColumn(column); + Short keySeq = isPk ? ++nextKeySeq : null; + PColumn newCol = new PColumnImpl(column.getName(), column.getFamilyName(), + column.getDataType(), column.getMaxLength(), column.getScale(), column.isNullable(), + column.getPosition(), column.getSortOrder(), column.getArraySize(), + column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr(), + column.isRowTimestamp(), column.isDynamic(), column.getColumnQualifierBytes(), + EnvironmentEdgeManager.currentTimeMillis()); + addColumnMutation(connection, + pOldTable.getSchemaName() == null ? null : pOldTable.getSchemaName().getString(), + pOldTable.getTableName().getString(), newCol, + pNewTable.getParentTableName() == null + ? null + : pNewTable.getParentTableName().getString(), + pNewTable.getPKName() == null ? null : pNewTable.getPKName().getString(), keySeq, + pNewTable.getBucketNum() != null); + } + } + } + + public static PTable getTransformedView(PTable pOldView, PTable pNewTable, + HashMap columnMap, boolean withDerivedColumns) throws SQLException { + List newColumns = new ArrayList<>(); + PTable pNewView = null; + if (pOldView.getEncodingScheme() != pNewTable.getEncodingScheme()) { + Short nextKeySeq = 0; + PTable.EncodedCQCounter cqCounterToUse = pNewTable.getEncodedCQCounter(); + String defaultColumnFamily = pNewTable.getDefaultFamilyName() != null + && !Strings.isNullOrEmpty(pNewTable.getDefaultFamilyName().getString()) + ? pNewTable.getDefaultFamilyName().getString() + : DEFAULT_COLUMN_FAMILY; + + for (PColumn column : pOldView.getColumns()) { + boolean isPk = SchemaUtil.isPKColumn(column); + Short keySeq = isPk ? ++nextKeySeq : null; + if (isPk) { + continue; } - - SystemTransformRecord transformRecord = getTransformRecord(schemaName, logicaTableName, logicalParentName, - tenantId, connection.unwrap(PhoenixConnection.class)); - - if (!PTable.TransformType.isPartialTransform(transformRecord.getTransformType())) { - updateTransformRecord(connection.unwrap(PhoenixConnection.class), transformRecord, PTable.TransformStatus.PENDING_CUTOVER); - connection.commit(); + String familyName = null; + if (pNewTable.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS) { + familyName = column.getFamilyName() != null + ? column.getFamilyName().getString() + : defaultColumnFamily; } else { - updateTransformRecord(connection.unwrap(PhoenixConnection.class), transformRecord, PTable.TransformStatus.COMPLETED); - connection.commit(); + familyName = defaultColumnFamily; } - } - - public static void updateTransformRecord(PhoenixConnection connection, SystemTransformRecord transformRecord, PTable.TransformStatus newStatus) throws SQLException { - SystemTransformRecord.SystemTransformBuilder builder = new SystemTransformRecord.SystemTransformBuilder(transformRecord); - builder.setTransformStatus(newStatus.name()); - builder.setLastStateTs(new Timestamp(EnvironmentEdgeManager.currentTimeMillis())); - if (newStatus == PTable.TransformStatus.STARTED) { - builder.setTransformRetryCount(transformRecord.getTransformRetryCount() + 1); + int encodedCQ = pOldView.isAppendOnlySchema() + ? Integer.valueOf(ENCODED_CQ_COUNTER_INITIAL_VALUE + keySeq) + : cqCounterToUse.getNextQualifier(familyName); + byte[] colQualifierBytes = EncodedColumnsUtil + .getColumnQualifierBytes(column.getName().getString(), encodedCQ, pNewTable, isPk); + if (columnMap.containsKey(column.getName().getString())) { + colQualifierBytes = columnMap.get(column.getName().getString()).getColumnQualifierBytes(); + } else { + if (!column.isDerived()) { + cqCounterToUse.increment(familyName); + } } - Transform.upsertTransform(builder.build(), connection); - } - private static void mutateColumns(PhoenixConnection connection, PTable pOldTable, PTable pNewTable) throws SQLException { - if (pOldTable.getEncodingScheme() != pNewTable.getEncodingScheme()) { - Short nextKeySeq = 0; - for (PColumn column : pNewTable.getColumns()) { - boolean isPk = SchemaUtil.isPKColumn(column); - Short keySeq = isPk ? ++nextKeySeq : null; - PColumn newCol = new PColumnImpl(column.getName(), column.getFamilyName(), column.getDataType(), - column.getMaxLength(), column.getScale(), column.isNullable(), column.getPosition(), column.getSortOrder() - , column.getArraySize(), - column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr(), column.isRowTimestamp(), - column.isDynamic(), column.getColumnQualifierBytes(), EnvironmentEdgeManager.currentTimeMillis()); - addColumnMutation(connection, pOldTable.getSchemaName()==null?null:pOldTable.getSchemaName().getString() - , pOldTable.getTableName().getString(), newCol, - pNewTable.getParentTableName() == null ? null : pNewTable.getParentTableName().getString() - , pNewTable.getPKName() == null ? null : pNewTable.getPKName().getString(), keySeq, pNewTable.getBucketNum() != null); - } + if (!withDerivedColumns && column.isDerived()) { + // Don't need to add/change derived columns + continue; } - } - - public static PTable getTransformedView(PTable pOldView, PTable pNewTable, HashMap columnMap, boolean withDerivedColumns) throws SQLException { - List newColumns = new ArrayList<>(); - PTable pNewView = null; - if (pOldView.getEncodingScheme() != pNewTable.getEncodingScheme()) { - Short nextKeySeq = 0; - PTable.EncodedCQCounter cqCounterToUse = pNewTable.getEncodedCQCounter(); - String defaultColumnFamily = pNewTable.getDefaultFamilyName() != null && !Strings.isNullOrEmpty(pNewTable.getDefaultFamilyName().getString()) ? - pNewTable.getDefaultFamilyName().getString() : DEFAULT_COLUMN_FAMILY; - - for (PColumn column : pOldView.getColumns()) { - boolean isPk = SchemaUtil.isPKColumn(column); - Short keySeq = isPk ? ++nextKeySeq : null; - if (isPk) { - continue; - } - String familyName = null; - if (pNewTable.getImmutableStorageScheme() == SINGLE_CELL_ARRAY_WITH_OFFSETS) { - familyName = column.getFamilyName() != null ? column.getFamilyName().getString() : defaultColumnFamily; - } else { - familyName = defaultColumnFamily; - } - int encodedCQ = pOldView.isAppendOnlySchema() ? Integer.valueOf(ENCODED_CQ_COUNTER_INITIAL_VALUE + keySeq) : cqCounterToUse.getNextQualifier(familyName); - byte[] colQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes(column.getName().getString(), - encodedCQ, pNewTable, isPk); - if (columnMap.containsKey(column.getName().getString())) { - colQualifierBytes = columnMap.get(column.getName().getString()).getColumnQualifierBytes(); - } else { - if (!column.isDerived()) { - cqCounterToUse.increment(familyName); - } - } - - if (!withDerivedColumns && column.isDerived()) { - // Don't need to add/change derived columns - continue; - } - - PColumn newCol = new PColumnImpl(column.getName(), PNameFactory.newName(familyName), column.getDataType(), - column.getMaxLength(), column.getScale(), column.isNullable(), column.getPosition(), column.getSortOrder() - , column.getArraySize(), - column.getViewConstant(), column.isViewReferenced(), column.getExpressionStr(), column.isRowTimestamp(), - column.isDynamic(), colQualifierBytes, EnvironmentEdgeManager.currentTimeMillis()); - newColumns.add(newCol); - if (!columnMap.containsKey(newCol.getName().getString())) { - columnMap.put(newCol.getName().getString(), newCol) ; - } - } - pNewView = PTableImpl.builderWithColumns(pOldView, newColumns) - .setQualifierEncodingScheme(pNewTable.getEncodingScheme()) - .setImmutableStorageScheme(pNewTable.getImmutableStorageScheme()) - .setPhysicalNames( - Collections.singletonList(SchemaUtil.getPhysicalHBaseTableName( - pNewTable.getSchemaName(), pNewTable.getTableName(), pNewTable.isNamespaceMapped()))) - .build(); - } else { - // Have to change this per transform type + PColumn newCol = + new PColumnImpl(column.getName(), PNameFactory.newName(familyName), column.getDataType(), + column.getMaxLength(), column.getScale(), column.isNullable(), column.getPosition(), + column.getSortOrder(), column.getArraySize(), column.getViewConstant(), + column.isViewReferenced(), column.getExpressionStr(), column.isRowTimestamp(), + column.isDynamic(), colQualifierBytes, EnvironmentEdgeManager.currentTimeMillis()); + newColumns.add(newCol); + if (!columnMap.containsKey(newCol.getName().getString())) { + columnMap.put(newCol.getName().getString(), newCol); } - return pNewView; + } + + pNewView = + PTableImpl.builderWithColumns(pOldView, newColumns) + .setQualifierEncodingScheme(pNewTable.getEncodingScheme()) + .setImmutableStorageScheme(pNewTable.getImmutableStorageScheme()) + .setPhysicalNames(Collections.singletonList(SchemaUtil.getPhysicalHBaseTableName( + pNewTable.getSchemaName(), pNewTable.getTableName(), pNewTable.isNamespaceMapped()))) + .build(); + } else { + // Have to change this per transform type } - - private static void mutateViewColumns(PhoenixConnection connection, PTable pView, PTable pNewTable, HashMap columnMap) throws SQLException { - if (pView.getEncodingScheme() != pNewTable.getEncodingScheme()) { - Short nextKeySeq = 0; - PTable newView = getTransformedView(pView, pNewTable, columnMap,false); - for (PColumn newCol : newView.getColumns()) { - boolean isPk = SchemaUtil.isPKColumn(newCol); - Short keySeq = isPk ? ++nextKeySeq : null; - if (isPk) { - continue; - } - String tenantId = pView.getTenantId() == null ? null : pView.getTenantId().getString(); - addColumnMutation(connection, tenantId, pView.getSchemaName() == null ? null : pView.getSchemaName().getString() - , pView.getTableName().getString(), newCol, - pView.getParentTableName() == null ? null : pView.getParentTableName().getString() - , pView.getPKName() == null ? null : pView.getPKName().getString(), keySeq, pView.getBucketNum() != null); - } + return pNewView; + } + + private static void mutateViewColumns(PhoenixConnection connection, PTable pView, + PTable pNewTable, HashMap columnMap) throws SQLException { + if (pView.getEncodingScheme() != pNewTable.getEncodingScheme()) { + Short nextKeySeq = 0; + PTable newView = getTransformedView(pView, pNewTable, columnMap, false); + for (PColumn newCol : newView.getColumns()) { + boolean isPk = SchemaUtil.isPKColumn(newCol); + Short keySeq = isPk ? ++nextKeySeq : null; + if (isPk) { + continue; } + String tenantId = pView.getTenantId() == null ? null : pView.getTenantId().getString(); + addColumnMutation(connection, tenantId, + pView.getSchemaName() == null ? null : pView.getSchemaName().getString(), + pView.getTableName().getString(), newCol, + pView.getParentTableName() == null ? null : pView.getParentTableName().getString(), + pView.getPKName() == null ? null : pView.getPKName().getString(), keySeq, + pView.getBucketNum() != null); + } } - - public static void doForceCutover(Connection connection, Configuration configuration) throws Exception{ - PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); - // Will be called from Reducer - String tenantId = configuration.get(MAPREDUCE_TENANT_ID, null); - String fullOldTableName = PhoenixConfigurationUtil.getInputTableName(configuration); - String schemaName = SchemaUtil.getSchemaNameFromFullName(fullOldTableName); - String oldTableLogicalName = SchemaUtil.getTableNameFromFullName(fullOldTableName); - String indexTableName = SchemaUtil.getTableNameFromFullName(PhoenixConfigurationUtil.getIndexToolIndexTableName(configuration)); - String logicaTableName = oldTableLogicalName; - String logicalParentName = null; - if (PhoenixConfigurationUtil.getTransformingTableType(configuration) == IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE) - if (!Strings.isNullOrEmpty(indexTableName)) { - logicaTableName = indexTableName; - logicalParentName = SchemaUtil.getTableName(schemaName, oldTableLogicalName); - } - - SystemTransformRecord transformRecord = getTransformRecord(schemaName, logicaTableName, logicalParentName, - tenantId, phoenixConnection); - Transform.doCutover(phoenixConnection, transformRecord); - updateTransformRecord(phoenixConnection, transformRecord, PTable.TransformStatus.COMPLETED); - phoenixConnection.commit(); + } + + public static void doForceCutover(Connection connection, Configuration configuration) + throws Exception { + PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); + // Will be called from Reducer + String tenantId = configuration.get(MAPREDUCE_TENANT_ID, null); + String fullOldTableName = PhoenixConfigurationUtil.getInputTableName(configuration); + String schemaName = SchemaUtil.getSchemaNameFromFullName(fullOldTableName); + String oldTableLogicalName = SchemaUtil.getTableNameFromFullName(fullOldTableName); + String indexTableName = SchemaUtil + .getTableNameFromFullName(PhoenixConfigurationUtil.getIndexToolIndexTableName(configuration)); + String logicaTableName = oldTableLogicalName; + String logicalParentName = null; + if ( + PhoenixConfigurationUtil.getTransformingTableType(configuration) + == IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE + ) if (!Strings.isNullOrEmpty(indexTableName)) { + logicaTableName = indexTableName; + logicalParentName = SchemaUtil.getTableName(schemaName, oldTableLogicalName); } -} - + SystemTransformRecord transformRecord = getTransformRecord(schemaName, logicaTableName, + logicalParentName, tenantId, phoenixConnection); + Transform.doCutover(phoenixConnection, transformRecord); + updateTransformRecord(phoenixConnection, transformRecord, PTable.TransformStatus.COMPLETED); + phoenixConnection.commit(); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/util/MergeViewIndexIdSequencesTool.java b/phoenix-core-server/src/main/java/org/apache/phoenix/util/MergeViewIndexIdSequencesTool.java index 3eb62ea0861..1676e1d1f53 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/util/MergeViewIndexIdSequencesTool.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/util/MergeViewIndexIdSequencesTool.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,13 +17,6 @@ */ package org.apache.phoenix.util; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -31,81 +24,83 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.mapreduce.util.ConnectionUtil; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class MergeViewIndexIdSequencesTool extends Configured implements Tool { - private static final Logger LOGGER = LoggerFactory.getLogger(MergeViewIndexIdSequencesTool.class); + private static final Logger LOGGER = LoggerFactory.getLogger(MergeViewIndexIdSequencesTool.class); + private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); + private static final Option RUN_OPTION = new Option("r", "run", false, + "Run MergeViewIndexIdSequencesTool to avoid view index id collision."); - private static final Option HELP_OPTION = new Option("h", "help", false, "Help"); - private static final Option RUN_OPTION = new Option("r", "run", false, - "Run MergeViewIndexIdSequencesTool to avoid view index id collision."); - - - private Options getOptions() { - final Options options = new Options(); - options.addOption(RUN_OPTION); - options.addOption(HELP_OPTION); - return options; - } + private Options getOptions() { + final Options options = new Options(); + options.addOption(RUN_OPTION); + options.addOption(HELP_OPTION); + return options; + } - private void parseOptions(String[] args) throws Exception { + private void parseOptions(String[] args) throws Exception { - final Options options = getOptions(); - - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine cmdLine = null; - try { - cmdLine = parser.parse(options, args); - } catch (ParseException e) { - printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); - } - - if (cmdLine.hasOption(HELP_OPTION.getOpt())) { - printHelpAndExit(options, 0); - } - - if (!cmdLine.hasOption(RUN_OPTION.getOpt())) { - printHelpAndExit("Please give at least one param", options); - } + final Options options = getOptions(); + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine cmdLine = null; + try { + cmdLine = parser.parse(options, args); + } catch (ParseException e) { + printHelpAndExit("Error parsing command line options: " + e.getMessage(), options); } - private void printHelpAndExit(Options options, int exitCode) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("help", options); - System.exit(exitCode); + if (cmdLine.hasOption(HELP_OPTION.getOpt())) { + printHelpAndExit(options, 0); } - private void printHelpAndExit(String errorMessage, Options options) { - System.err.println(errorMessage); - printHelpAndExit(options, 1); + if (!cmdLine.hasOption(RUN_OPTION.getOpt())) { + printHelpAndExit("Please give at least one param", options); } - - @Override - public int run(String[] args) throws Exception { - int status = 0; - parseOptions(args); - - final Configuration config = HBaseConfiguration.addHbaseResources(getConf()); - try (PhoenixConnection conn = ConnectionUtil.getInputConnection(config). - unwrap(PhoenixConnection.class)) { - UpgradeUtil.mergeViewIndexIdSequences(conn); - } catch (Exception e) { - LOGGER.error("Get an error while running MergeViewIndexIdSequencesTool, " - + e.getMessage()); - status = 1; - } - return status; - } - public static void main(final String[] args) throws Exception { - int result = ToolRunner.run(new MergeViewIndexIdSequencesTool(), args); - System.exit(result); + } + + private void printHelpAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("help", options); + System.exit(exitCode); + } + + private void printHelpAndExit(String errorMessage, Options options) { + System.err.println(errorMessage); + printHelpAndExit(options, 1); + } + + @Override + public int run(String[] args) throws Exception { + int status = 0; + parseOptions(args); + + final Configuration config = HBaseConfiguration.addHbaseResources(getConf()); + try (PhoenixConnection conn = + ConnectionUtil.getInputConnection(config).unwrap(PhoenixConnection.class)) { + UpgradeUtil.mergeViewIndexIdSequences(conn); + } catch (Exception e) { + LOGGER.error("Get an error while running MergeViewIndexIdSequencesTool, " + e.getMessage()); + status = 1; } -} \ No newline at end of file + return status; + } + + public static void main(final String[] args) throws Exception { + int result = ToolRunner.run(new MergeViewIndexIdSequencesTool(), args); + System.exit(result); + } +} diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java index e7e2aa150da..12bb4b65905 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/util/PhoenixMRJobUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -48,186 +48,183 @@ public class PhoenixMRJobUtil { - private static final String YARN_LEADER_ELECTION = "/yarn-leader-election"; - private static final String ACTIVE_STANDBY_ELECTOR_LOCK = "ActiveStandbyElectorLock"; - private static final String RM_APPS_GET_ENDPOINT = "/ws/v1/cluster/apps"; - - public static final String PHOENIX_INDEX_MR_QUEUE_NAME_PROPERTY = - "phoenix.index.mr.scheduler.capacity.queuename"; - public static final String PHOENIX_INDEX_MR_MAP_MEMORY_PROPERTY = - "phoenix.index.mr.scheduler.capacity.mapMemoryMB"; - public static final String PHOENIX_MR_CONCURRENT_MAP_LIMIT_PROPERTY = - "phoenix.mr.concurrent.map.limit"; - - // Default MR Capacity Scheduler Configurations for Phoenix MR Index Build - // Jobs - public static final String DEFAULT_QUEUE_NAME = "default"; - public static final int DEFAULT_MR_CONCURRENT_MAP_LIMIT = 20; - public static final int DEFAULT_MAP_MEMROY_MB = 5120; - public static final String XMX_OPT = "-Xmx"; - - public static final String RM_HTTP_SCHEME = "http"; - // TODO - Move these as properties? - public static final int RM_CONNECT_TIMEOUT_MILLIS = 10 * 1000; - public static final int RM_READ_TIMEOUT_MILLIS = 10 * 60 * 1000; - - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobUtil.class); + private static final String YARN_LEADER_ELECTION = "/yarn-leader-election"; + private static final String ACTIVE_STANDBY_ELECTOR_LOCK = "ActiveStandbyElectorLock"; + private static final String RM_APPS_GET_ENDPOINT = "/ws/v1/cluster/apps"; + + public static final String PHOENIX_INDEX_MR_QUEUE_NAME_PROPERTY = + "phoenix.index.mr.scheduler.capacity.queuename"; + public static final String PHOENIX_INDEX_MR_MAP_MEMORY_PROPERTY = + "phoenix.index.mr.scheduler.capacity.mapMemoryMB"; + public static final String PHOENIX_MR_CONCURRENT_MAP_LIMIT_PROPERTY = + "phoenix.mr.concurrent.map.limit"; + + // Default MR Capacity Scheduler Configurations for Phoenix MR Index Build + // Jobs + public static final String DEFAULT_QUEUE_NAME = "default"; + public static final int DEFAULT_MR_CONCURRENT_MAP_LIMIT = 20; + public static final int DEFAULT_MAP_MEMROY_MB = 5120; + public static final String XMX_OPT = "-Xmx"; + + public static final String RM_HTTP_SCHEME = "http"; + // TODO - Move these as properties? + public static final int RM_CONNECT_TIMEOUT_MILLIS = 10 * 1000; + public static final int RM_READ_TIMEOUT_MILLIS = 10 * 60 * 1000; + + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMRJobUtil.class); + + public static final String PHOENIX_MR_SCHEDULER_TYPE_NAME = "phoenix.index.mr.scheduler.type"; + + public enum MR_SCHEDULER_TYPE { + CAPACITY, + FAIR, + NONE + }; + + public static String getRMWebAddress(Configuration config) { + return config.get(YarnConfiguration.RM_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS); + } + + public static String getRMWebAddress(Configuration config, String Rmid) { + return config.get(YarnConfiguration.RM_WEBAPP_ADDRESS + "." + Rmid, + YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS); + } + + public static String getActiveResourceManagerAddress(Configuration config, String zkQuorum) + throws IOException, InterruptedException, KeeperException, InvalidProtocolBufferException, + ZooKeeperConnectionException { + // In case of yarn HA is enabled + ZKWatcher zkw = null; + ZooKeeper zk = null; + String activeRMHost = null; + try { + zkw = new ZKWatcher(config, "get-active-yarnmanager", null); + zk = new ZooKeeper(zkQuorum, 30000, zkw, false); + + List children = zk.getChildren(YARN_LEADER_ELECTION, zkw); + for (String subEntry : children) { + List subChildern = zk.getChildren(YARN_LEADER_ELECTION + "/" + subEntry, zkw); + for (String eachEntry : subChildern) { + if (eachEntry.contains(ACTIVE_STANDBY_ELECTOR_LOCK)) { + String path = YARN_LEADER_ELECTION + "/" + subEntry + "/" + ACTIVE_STANDBY_ELECTOR_LOCK; + byte[] data = zk.getData(path, zkw, new Stat()); + ActiveRMInfoProto proto = ActiveRMInfoProto.parseFrom(data); + String RmId = proto.getRmId(); + LOGGER.info("Active RmId : " + RmId); + + activeRMHost = PhoenixMRJobUtil.getRMWebAddress(config, RmId); + LOGGER.info("activeResourceManagerHostname = " + activeRMHost); + + } + } + } + } finally { + if (zkw != null) zkw.close(); + if (zk != null) zk.close(); + } + // In case of yarn HA is NOT enabled + if (activeRMHost == null) { + activeRMHost = PhoenixMRJobUtil.getRMWebAddress(config); + LOGGER.info("ResourceManagerAddress from config = " + activeRMHost); + } - public static final String PHOENIX_MR_SCHEDULER_TYPE_NAME = "phoenix.index.mr.scheduler.type"; + return activeRMHost; + } - public enum MR_SCHEDULER_TYPE { - CAPACITY, FAIR, NONE - }; + public static String getJobsInformationFromRM(String rmAddress, Map urlParams) + throws MalformedURLException, ProtocolException, UnsupportedEncodingException, IOException { + HttpURLConnection con = null; + String response = null; + String url = null; - public static String getRMWebAddress(Configuration config){ - return config.get(YarnConfiguration.RM_WEBAPP_ADDRESS, - YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS); - } + try { + StringBuilder urlBuilder = new StringBuilder(); - public static String getRMWebAddress(Configuration config, String Rmid){ - return config.get(YarnConfiguration.RM_WEBAPP_ADDRESS + "." + Rmid, - YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS); - } + urlBuilder.append(RM_HTTP_SCHEME + "://").append(rmAddress).append(RM_APPS_GET_ENDPOINT); - public static String getActiveResourceManagerAddress(Configuration config, String zkQuorum) - throws IOException, InterruptedException, KeeperException, - InvalidProtocolBufferException, ZooKeeperConnectionException { - // In case of yarn HA is enabled - ZKWatcher zkw = null; - ZooKeeper zk = null; - String activeRMHost = null; - try { - zkw = new ZKWatcher(config, "get-active-yarnmanager", null); - zk = new ZooKeeper(zkQuorum, 30000, zkw, false); - - List children = zk.getChildren(YARN_LEADER_ELECTION, zkw); - for (String subEntry : children) { - List subChildern = - zk.getChildren(YARN_LEADER_ELECTION + "/" + subEntry, zkw); - for (String eachEntry : subChildern) { - if (eachEntry.contains(ACTIVE_STANDBY_ELECTOR_LOCK)) { - String path = - YARN_LEADER_ELECTION + "/" + subEntry + "/" - + ACTIVE_STANDBY_ELECTOR_LOCK; - byte[] data = zk.getData(path, zkw, new Stat()); - ActiveRMInfoProto proto = ActiveRMInfoProto.parseFrom(data); - String RmId = proto.getRmId(); - LOGGER.info("Active RmId : " + RmId); - - activeRMHost = PhoenixMRJobUtil.getRMWebAddress(config, RmId); - LOGGER.info("activeResourceManagerHostname = " + activeRMHost); - - } - } - } - } finally { - if (zkw != null) zkw.close(); - if (zk != null) zk.close(); - } - // In case of yarn HA is NOT enabled - if (activeRMHost == null) { - activeRMHost = PhoenixMRJobUtil.getRMWebAddress(config); - LOGGER.info("ResourceManagerAddress from config = " + activeRMHost); + if (urlParams != null && urlParams.size() != 0) { + urlBuilder.append("?"); + for (String key : urlParams.keySet()) { + urlBuilder.append(key + "=" + urlParams.get(key) + "&"); } + urlBuilder.delete(urlBuilder.length() - 1, urlBuilder.length()); + } - return activeRMHost; - } + url = urlBuilder.toString(); + LOGGER.info("Attempt to get running/submitted jobs information from RM URL = " + url); - public static String getJobsInformationFromRM(String rmAddress, - Map urlParams) throws MalformedURLException, ProtocolException, - UnsupportedEncodingException, IOException { - HttpURLConnection con = null; - String response = null; - String url = null; - - try { - StringBuilder urlBuilder = new StringBuilder(); - - urlBuilder.append(RM_HTTP_SCHEME + "://").append(rmAddress) - .append(RM_APPS_GET_ENDPOINT); - - if (urlParams != null && urlParams.size() != 0) { - urlBuilder.append("?"); - for (String key : urlParams.keySet()) { - urlBuilder.append(key + "=" + urlParams.get(key) + "&"); - } - urlBuilder.delete(urlBuilder.length() - 1, urlBuilder.length()); - } - - url = urlBuilder.toString(); - LOGGER.info("Attempt to get running/submitted jobs information from RM URL = " + url); - - URL obj = new URL(url); - con = (HttpURLConnection) obj.openConnection(); - con.setInstanceFollowRedirects(true); - con.setRequestMethod("GET"); - - con.setConnectTimeout(RM_CONNECT_TIMEOUT_MILLIS); - con.setReadTimeout(RM_READ_TIMEOUT_MILLIS); - - response = getTextContent(con.getInputStream()); - } finally { - if (con != null) con.disconnect(); - } + URL obj = new URL(url); + con = (HttpURLConnection) obj.openConnection(); + con.setInstanceFollowRedirects(true); + con.setRequestMethod("GET"); - LOGGER.info("Result of attempt to get running/submitted jobs from RM - URL=" + url - + ",ResponseCode=" + con.getResponseCode() + ",Response=" + response); + con.setConnectTimeout(RM_CONNECT_TIMEOUT_MILLIS); + con.setReadTimeout(RM_READ_TIMEOUT_MILLIS); - return response; + response = getTextContent(con.getInputStream()); + } finally { + if (con != null) con.disconnect(); } - public static String getTextContent(InputStream is) throws IOException { - BufferedReader in = null; - StringBuilder response = null; - try { - in = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); - String inputLine; - response = new StringBuilder(); - while ((inputLine = in.readLine()) != null) { - response.append(inputLine).append("\n"); - } - } finally { - if (in != null) in.close(); - if (is != null) { - is.close(); - } - } - return response.toString(); + LOGGER.info("Result of attempt to get running/submitted jobs from RM - URL=" + url + + ",ResponseCode=" + con.getResponseCode() + ",Response=" + response); + + return response; + } + + public static String getTextContent(InputStream is) throws IOException { + BufferedReader in = null; + StringBuilder response = null; + try { + in = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); + String inputLine; + response = new StringBuilder(); + while ((inputLine = in.readLine()) != null) { + response.append(inputLine).append("\n"); + } + } finally { + if (in != null) in.close(); + if (is != null) { + is.close(); + } } - - public static void shutdown(ExecutorService pool) throws InterruptedException { - pool.shutdown(); - LOGGER.debug("Shutdown called"); - pool.awaitTermination(200, TimeUnit.MILLISECONDS); - LOGGER.debug("Await termination called to wait for 200 msec"); - if (!pool.isShutdown()) { - pool.shutdownNow(); - LOGGER.debug("Await termination called to wait for 200 msec"); - pool.awaitTermination(100, TimeUnit.MILLISECONDS); - } - if (!pool.isShutdown()) { - LOGGER.warn("Pool did not shutdown"); - } + return response.toString(); + } + + public static void shutdown(ExecutorService pool) throws InterruptedException { + pool.shutdown(); + LOGGER.debug("Shutdown called"); + pool.awaitTermination(200, TimeUnit.MILLISECONDS); + LOGGER.debug("Await termination called to wait for 200 msec"); + if (!pool.isShutdown()) { + pool.shutdownNow(); + LOGGER.debug("Await termination called to wait for 200 msec"); + pool.awaitTermination(100, TimeUnit.MILLISECONDS); } + if (!pool.isShutdown()) { + LOGGER.warn("Pool did not shutdown"); + } + } - /** - * This method set the configuration values for Capacity scheduler. - * @param conf - Configuration to which Capacity Queue information to be added - */ - public static void updateCapacityQueueInfo(Configuration conf) { - conf.set(MRJobConfig.QUEUE_NAME, - conf.get(PHOENIX_INDEX_MR_QUEUE_NAME_PROPERTY, DEFAULT_QUEUE_NAME)); + /** + * This method set the configuration values for Capacity scheduler. + * @param conf - Configuration to which Capacity Queue information to be added + */ + public static void updateCapacityQueueInfo(Configuration conf) { + conf.set(MRJobConfig.QUEUE_NAME, + conf.get(PHOENIX_INDEX_MR_QUEUE_NAME_PROPERTY, DEFAULT_QUEUE_NAME)); - conf.setInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, - conf.getInt(PHOENIX_MR_CONCURRENT_MAP_LIMIT_PROPERTY, DEFAULT_MR_CONCURRENT_MAP_LIMIT)); + conf.setInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, + conf.getInt(PHOENIX_MR_CONCURRENT_MAP_LIMIT_PROPERTY, DEFAULT_MR_CONCURRENT_MAP_LIMIT)); - int mapMemoryMB = conf.getInt(PHOENIX_INDEX_MR_MAP_MEMORY_PROPERTY, DEFAULT_MAP_MEMROY_MB); + int mapMemoryMB = conf.getInt(PHOENIX_INDEX_MR_MAP_MEMORY_PROPERTY, DEFAULT_MAP_MEMROY_MB); - conf.setInt(MRJobConfig.MAP_MEMORY_MB, mapMemoryMB); - conf.set(MRJobConfig.MAP_JAVA_OPTS, XMX_OPT + ((int) (mapMemoryMB * 0.9)) + "m"); + conf.setInt(MRJobConfig.MAP_MEMORY_MB, mapMemoryMB); + conf.set(MRJobConfig.MAP_JAVA_OPTS, XMX_OPT + ((int) (mapMemoryMB * 0.9)) + "m"); - LOGGER.info("Queue Name=" + conf.get(MRJobConfig.QUEUE_NAME) + ";" + "Map Meory MB=" - + conf.get(MRJobConfig.MAP_MEMORY_MB) + ";" + "Map Java Opts=" - + conf.get(MRJobConfig.MAP_JAVA_OPTS)); - } + LOGGER.info("Queue Name=" + conf.get(MRJobConfig.QUEUE_NAME) + ";" + "Map Meory MB=" + + conf.get(MRJobConfig.MAP_MEMORY_MB) + ";" + "Map Java Opts=" + + conf.get(MRJobConfig.MAP_JAVA_OPTS)); + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/util/RepairUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/util/RepairUtil.java index 0b548193bb5..cf0d87a35eb 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/util/RepairUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/util/RepairUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,19 +24,23 @@ import org.apache.hadoop.hbase.util.Bytes; public class RepairUtil { - public static boolean isLocalIndexStoreFilesConsistent(RegionCoprocessorEnvironment environment, Store store) { - byte[] startKey = environment.getRegion().getRegionInfo().getStartKey(); - byte[] endKey = environment.getRegion().getRegionInfo().getEndKey(); - byte[] indexKeyEmbedded = startKey.length == 0 ? new byte[endKey.length] : startKey; - for (StoreFile file : store.getStorefiles()) { - if (file.getFirstKey().isPresent() && file.getFirstKey().get() != null) { - byte[] fileFirstRowKey = CellUtil.cloneRow(file.getFirstKey().get()); - if ((fileFirstRowKey != null && Bytes.compareTo(fileFirstRowKey, 0, - indexKeyEmbedded.length, indexKeyEmbedded, 0, indexKeyEmbedded.length) != 0)) { - return false; } - } + public static boolean isLocalIndexStoreFilesConsistent(RegionCoprocessorEnvironment environment, + Store store) { + byte[] startKey = environment.getRegion().getRegionInfo().getStartKey(); + byte[] endKey = environment.getRegion().getRegionInfo().getEndKey(); + byte[] indexKeyEmbedded = startKey.length == 0 ? new byte[endKey.length] : startKey; + for (StoreFile file : store.getStorefiles()) { + if (file.getFirstKey().isPresent() && file.getFirstKey().get() != null) { + byte[] fileFirstRowKey = CellUtil.cloneRow(file.getFirstKey().get()); + if ( + (fileFirstRowKey != null && Bytes.compareTo(fileFirstRowKey, 0, indexKeyEmbedded.length, + indexKeyEmbedded, 0, indexKeyEmbedded.length) != 0) + ) { + return false; } - return true; + } } + return true; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerIndexUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerIndexUtil.java index fca7f129b5a..ddecfd468d5 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerIndexUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerIndexUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,70 +39,66 @@ import org.apache.phoenix.query.QueryServices; public class ServerIndexUtil { - public static void writeLocalUpdates(Region region, final List mutations, boolean skipWAL) throws IOException { - if(skipWAL) { - for (Mutation m : mutations) { - m.setDurability(Durability.SKIP_WAL); - } - } - region.batchMutate( - mutations.toArray(new Mutation[mutations.size()])); + public static void writeLocalUpdates(Region region, final List mutations, + boolean skipWAL) throws IOException { + if (skipWAL) { + for (Mutation m : mutations) { + m.setDurability(Durability.SKIP_WAL); + } } + region.batchMutate(mutations.toArray(new Mutation[mutations.size()])); + } - public static void wrapResultUsingOffset(List result, final int offset) throws IOException { - ListIterator itr = result.listIterator(); - while (itr.hasNext()) { - final Cell cell = itr.next(); - // TODO: Create DelegateCell class instead - Cell newCell = new OffsetCell(cell, offset); - itr.set(newCell); - } + public static void wrapResultUsingOffset(List result, final int offset) throws IOException { + ListIterator itr = result.listIterator(); + while (itr.hasNext()) { + final Cell cell = itr.next(); + // TODO: Create DelegateCell class instead + Cell newCell = new OffsetCell(cell, offset); + itr.set(newCell); } + } - /** - * Set Cell Tags to delete markers with source of operation attribute. - * @param miniBatchOp miniBatchOp - * @throws IOException IOException - */ - public static void setDeleteAttributes( - MiniBatchOperationInProgress miniBatchOp) - throws IOException { - for (int i = 0; i < miniBatchOp.size(); i++) { - Mutation m = miniBatchOp.getOperation(i); - if (!(m instanceof Delete)) { - // Ignore if it is not Delete type. - continue; - } - byte[] sourceOpAttr = - m.getAttribute(QueryServices.SOURCE_OPERATION_ATTRIB); - if (sourceOpAttr == null) { - continue; - } - Tag sourceOpTag = new ArrayBackedTag( - PhoenixTagType.SOURCE_OPERATION_TAG_TYPE, sourceOpAttr); - List updatedCells = new ArrayList<>(); - for (CellScanner cellScanner = m.cellScanner(); - cellScanner.advance();) { - Cell cell = cellScanner.current(); - RawCell rawCell = (RawCell) cell; - List tags = new ArrayList<>(); - Iterator tagsIterator = rawCell.getTags(); - while (tagsIterator.hasNext()) { - tags.add(tagsIterator.next()); - } - tags.add(sourceOpTag); - // TODO: PrivateCellUtil's IA is Private. - // HBASE-25328 adds a builder methodfor creating Tag which - // will be LP with IA.coproc - Cell updatedCell = PrivateCellUtil.createCell(cell, tags); - updatedCells.add(updatedCell); - } - m.getFamilyCellMap().clear(); - // Clear and add new Cells to the Mutation. - for (Cell cell : updatedCells) { - Delete d = (Delete) m; - d.add(cell); - } + /** + * Set Cell Tags to delete markers with source of operation attribute. + * @param miniBatchOp miniBatchOp + * @throws IOException IOException + */ + public static void setDeleteAttributes(MiniBatchOperationInProgress miniBatchOp) + throws IOException { + for (int i = 0; i < miniBatchOp.size(); i++) { + Mutation m = miniBatchOp.getOperation(i); + if (!(m instanceof Delete)) { + // Ignore if it is not Delete type. + continue; + } + byte[] sourceOpAttr = m.getAttribute(QueryServices.SOURCE_OPERATION_ATTRIB); + if (sourceOpAttr == null) { + continue; + } + Tag sourceOpTag = new ArrayBackedTag(PhoenixTagType.SOURCE_OPERATION_TAG_TYPE, sourceOpAttr); + List updatedCells = new ArrayList<>(); + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + RawCell rawCell = (RawCell) cell; + List tags = new ArrayList<>(); + Iterator tagsIterator = rawCell.getTags(); + while (tagsIterator.hasNext()) { + tags.add(tagsIterator.next()); } + tags.add(sourceOpTag); + // TODO: PrivateCellUtil's IA is Private. + // HBASE-25328 adds a builder methodfor creating Tag which + // will be LP with IA.coproc + Cell updatedCell = PrivateCellUtil.createCell(cell, tags); + updatedCells.add(updatedCell); + } + m.getFamilyCellMap().clear(); + // Clear and add new Cells to the Mutation. + for (Cell cell : updatedCells) { + Delete d = (Delete) m; + d.add(cell); + } } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerUtil.java index c88e7c12d3f..15e8910b75e 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,8 +28,8 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Function; import java.util.concurrent.Future; +import java.util.function.Function; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -55,245 +55,254 @@ import org.slf4j.LoggerFactory; public class ServerUtil { - private static final int COPROCESSOR_SCAN_WORKS = VersionUtil.encodeVersion("0.98.6"); - private static final Logger LOGGER = LoggerFactory.getLogger(ServerUtil.class); - private static final String FORMAT_FOR_TIMESTAMP = ",serverTimestamp=%d,"; + private static final int COPROCESSOR_SCAN_WORKS = VersionUtil.encodeVersion("0.98.6"); + private static final Logger LOGGER = LoggerFactory.getLogger(ServerUtil.class); + private static final String FORMAT_FOR_TIMESTAMP = ",serverTimestamp=%d,"; - private static boolean coprocessorScanWorks(RegionCoprocessorEnvironment env) { - return (VersionUtil.encodeVersion(env.getHBaseVersion()) >= COPROCESSOR_SCAN_WORKS); - } + private static boolean coprocessorScanWorks(RegionCoprocessorEnvironment env) { + return (VersionUtil.encodeVersion(env.getHBaseVersion()) >= COPROCESSOR_SCAN_WORKS); + } - public static boolean hasCoprocessor(RegionCoprocessorEnvironment env, - String CoprocessorClassName) { - Collection coprocessors = - env.getRegion().getTableDescriptor().getCoprocessorDescriptors(); - for (CoprocessorDescriptor coprocessor: coprocessors) { - if (coprocessor.getClassName().equals(CoprocessorClassName)) { - return true; - } - } - return false; + public static boolean hasCoprocessor(RegionCoprocessorEnvironment env, + String CoprocessorClassName) { + Collection coprocessors = + env.getRegion().getTableDescriptor().getCoprocessorDescriptors(); + for (CoprocessorDescriptor coprocessor : coprocessors) { + if (coprocessor.getClassName().equals(CoprocessorClassName)) { + return true; + } } + return false; + } - /* - * This code works around HBASE-11837 which causes HTableInterfaces retrieved from - * RegionCoprocessorEnvironment to not read local data. - */ - private static Table getTableFromSingletonPool(RegionCoprocessorEnvironment env, TableName tableName) throws IOException { - // It's ok to not ever do a pool.close() as we're storing a single - // table only. The HTablePool holds no other resources that this table - // which will be closed itself when it's no longer needed. - Connection conn = ConnectionFactory.getConnection(ConnectionType.DEFAULT_SERVER_CONNECTION, env); - try { - return conn.getTable(tableName); - } catch (RuntimeException t) { - // handle cases that an IOE is wrapped inside a RuntimeException like HTableInterface#createHTableInterface - if (t.getCause() instanceof IOException) { - throw (IOException)t.getCause(); - } else { - throw t; - } - } - } - - public static Table getHTableForCoprocessorScan (RegionCoprocessorEnvironment env, - Table writerTable) throws IOException { - if (coprocessorScanWorks(env)) { - return writerTable; - } - return getTableFromSingletonPool(env, writerTable.getName()); - } - - public static Table getHTableForCoprocessorScan (RegionCoprocessorEnvironment env, TableName tableName) throws IOException { - if (coprocessorScanWorks(env)) { - return env.getConnection().getTable(tableName); - } - return getTableFromSingletonPool(env, tableName); + /* + * This code works around HBASE-11837 which causes HTableInterfaces retrieved from + * RegionCoprocessorEnvironment to not read local data. + */ + private static Table getTableFromSingletonPool(RegionCoprocessorEnvironment env, + TableName tableName) throws IOException { + // It's ok to not ever do a pool.close() as we're storing a single + // table only. The HTablePool holds no other resources that this table + // which will be closed itself when it's no longer needed. + Connection conn = + ConnectionFactory.getConnection(ConnectionType.DEFAULT_SERVER_CONNECTION, env); + try { + return conn.getTable(tableName); + } catch (RuntimeException t) { + // handle cases that an IOE is wrapped inside a RuntimeException like + // HTableInterface#createHTableInterface + if (t.getCause() instanceof IOException) { + throw (IOException) t.getCause(); + } else { + throw t; + } } + } + public static Table getHTableForCoprocessorScan(RegionCoprocessorEnvironment env, + Table writerTable) throws IOException { + if (coprocessorScanWorks(env)) { + return writerTable; + } + return getTableFromSingletonPool(env, writerTable.getName()); + } - public static DoNotRetryIOException wrapInDoNotRetryIOException(String msg, Throwable t, long timestamp) { - if (msg == null) { - msg = ""; - } - if (t instanceof SQLException) { - msg = t.getMessage() + " " + msg; - } - msg += String.format(FORMAT_FOR_TIMESTAMP, timestamp); - return new DoNotRetryIOException(msg, t); + public static Table getHTableForCoprocessorScan(RegionCoprocessorEnvironment env, + TableName tableName) throws IOException { + if (coprocessorScanWorks(env)) { + return env.getConnection().getTable(tableName); } - - public static boolean readyToCommit(int rowCount, long mutationSize, int maxBatchSize, long maxBatchSizeBytes) { - return maxBatchSize > 0 && rowCount >= maxBatchSize - || (maxBatchSizeBytes > 0 && mutationSize >= maxBatchSizeBytes); + return getTableFromSingletonPool(env, tableName); + } + + public static DoNotRetryIOException wrapInDoNotRetryIOException(String msg, Throwable t, + long timestamp) { + if (msg == null) { + msg = ""; } - - public static boolean isKeyInRegion(byte[] key, Region region) { - byte[] startKey = region.getRegionInfo().getStartKey(); - byte[] endKey = region.getRegionInfo().getEndKey(); - return (Bytes.compareTo(startKey, key) <= 0 - && (Bytes.compareTo(HConstants.LAST_ROW, endKey) == 0 || Bytes.compareTo(key, - endKey) < 0)); + if (t instanceof SQLException) { + msg = t.getMessage() + " " + msg; } + msg += String.format(FORMAT_FOR_TIMESTAMP, timestamp); + return new DoNotRetryIOException(msg, t); + } - public static RowLock acquireLock(Region region, byte[] key, List locks) - throws IOException { - RowLock rowLock = region.getRowLock(key, false); - if (rowLock == null) { - throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key)); - } - if (locks != null) { - locks.add(rowLock); - } - return rowLock; - } + public static boolean readyToCommit(int rowCount, long mutationSize, int maxBatchSize, + long maxBatchSizeBytes) { + return maxBatchSize > 0 && rowCount >= maxBatchSize + || (maxBatchSizeBytes > 0 && mutationSize >= maxBatchSizeBytes); + } - public static void releaseRowLocks(List rowLocks) { - if (rowLocks != null) { - for (RowLock rowLock : rowLocks) { - rowLock.release(); - } - rowLocks.clear(); - } - } + public static boolean isKeyInRegion(byte[] key, Region region) { + byte[] startKey = region.getRegionInfo().getStartKey(); + byte[] endKey = region.getRegionInfo().getEndKey(); + return (Bytes.compareTo(startKey, key) <= 0 + && (Bytes.compareTo(HConstants.LAST_ROW, endKey) == 0 || Bytes.compareTo(key, endKey) < 0)); + } - /** - * If scan start rowkey is empty, use region boundaries. Reverse region boundaries - * for reverse scan. - * - * @param scan Scan object for which we need to find start rowkey. - * @param region Region object. - * @return Scan start rowkey based on scan's start rowkey or region boundaries. - */ - public static byte[] getScanStartRowKeyFromScanOrRegionBoundaries(Scan scan, - Region region) { - return scan.getStartRow().length > 0 ? scan.getStartRow() : - (scan.isReversed() ? region.getRegionInfo().getEndKey() : - region.getRegionInfo().getStartKey()); + public static RowLock acquireLock(Region region, byte[] key, List locks) + throws IOException { + RowLock rowLock = region.getRowLock(key, false); + if (rowLock == null) { + throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key)); + } + if (locks != null) { + locks.add(rowLock); } + return rowLock; + } - public static enum ConnectionType { - COMPACTION_CONNECTION, - INDEX_WRITER_CONNECTION, - INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS, - INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS_NO_RETRIES, - DEFAULT_SERVER_CONNECTION; + public static void releaseRowLocks(List rowLocks) { + if (rowLocks != null) { + for (RowLock rowLock : rowLocks) { + rowLock.release(); + } + rowLocks.clear(); } + } - public static class ConnectionFactory { - - private static Map connections = - new ConcurrentHashMap(); + /** + * If scan start rowkey is empty, use region boundaries. Reverse region boundaries for reverse + * scan. + * @param scan Scan object for which we need to find start rowkey. + * @param region Region object. + * @return Scan start rowkey based on scan's start rowkey or region boundaries. + */ + public static byte[] getScanStartRowKeyFromScanOrRegionBoundaries(Scan scan, Region region) { + return scan.getStartRow().length > 0 + ? scan.getStartRow() + : (scan.isReversed() + ? region.getRegionInfo().getEndKey() + : region.getRegionInfo().getStartKey()); + } - public static Connection getConnection(final ConnectionType connectionType, final RegionCoprocessorEnvironment env) { - return connections.computeIfAbsent(connectionType, new Function() { - @Override - public Connection apply(ConnectionType t) { - try { - return CompatUtil.createShortCircuitConnection(getTypeSpecificConfiguration(connectionType, env.getConfiguration()), env); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - }); - } + public static enum ConnectionType { + COMPACTION_CONNECTION, + INDEX_WRITER_CONNECTION, + INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS, + INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS_NO_RETRIES, + DEFAULT_SERVER_CONNECTION; + } - public static Configuration getTypeSpecificConfiguration(ConnectionType connectionType, Configuration conf) { - switch (connectionType) { - case COMPACTION_CONNECTION: - return getCompactionConfig(conf); - case DEFAULT_SERVER_CONNECTION: - return conf; - case INDEX_WRITER_CONNECTION: - return getIndexWriterConnection(conf); - case INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS: - return getIndexWriterConfigurationWithCustomThreads(conf); - case INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS_NO_RETRIES: - return getNoRetriesIndexWriterConfigurationWithCustomThreads(conf); - default: - return conf; - } - } - - public static void shutdown() { - synchronized (ConnectionFactory.class) { - for (Connection connection : connections.values()) { - try { - connection.close(); - } catch (IOException e) { - LOGGER.warn("Unable to close coprocessor connection", e); - } - } - connections.clear(); - } - } - - public static int getConnectionsCount() { - return connections.size(); - } + public static class ConnectionFactory { - } + private static Map connections = + new ConcurrentHashMap(); - public static Configuration getCompactionConfig(Configuration conf) { - Configuration compactionConfig = PropertiesUtil.cloneConfig(conf); - // lower the number of rpc retries, so we don't hang the compaction - compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - conf.getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER, - QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER)); - compactionConfig.setInt(HConstants.HBASE_CLIENT_PAUSE, - conf.getInt(QueryServices.METADATA_WRITE_RETRY_PAUSE, - QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRY_PAUSE)); - return compactionConfig; + public static Connection getConnection(final ConnectionType connectionType, + final RegionCoprocessorEnvironment env) { + return connections.computeIfAbsent(connectionType, + new Function() { + @Override + public Connection apply(ConnectionType t) { + try { + return CompatUtil.createShortCircuitConnection( + getTypeSpecificConfiguration(connectionType, env.getConfiguration()), env); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }); } - public static Configuration getIndexWriterConnection(Configuration conf) { - Configuration clonedConfig = PropertiesUtil.cloneConfig(conf); - /* - * Set the rpc controller factory so that the HTables used by IndexWriter would - * set the correct priorities on the remote RPC calls. - */ - clonedConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, - InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class); - // lower the number of rpc retries. We inherit config from HConnectionManager#setServerSideHConnectionRetries, - // which by default uses a multiplier of 10. That is too many retries for our synchronous index writes - clonedConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - conf.getInt(INDEX_WRITER_RPC_RETRIES_NUMBER, - DEFAULT_INDEX_WRITER_RPC_RETRIES_NUMBER)); - clonedConfig.setInt(HConstants.HBASE_CLIENT_PAUSE, conf - .getInt(INDEX_WRITER_RPC_PAUSE, DEFAULT_INDEX_WRITER_RPC_PAUSE)); - return clonedConfig; + public static Configuration getTypeSpecificConfiguration(ConnectionType connectionType, + Configuration conf) { + switch (connectionType) { + case COMPACTION_CONNECTION: + return getCompactionConfig(conf); + case DEFAULT_SERVER_CONNECTION: + return conf; + case INDEX_WRITER_CONNECTION: + return getIndexWriterConnection(conf); + case INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS: + return getIndexWriterConfigurationWithCustomThreads(conf); + case INDEX_WRITER_CONNECTION_WITH_CUSTOM_THREADS_NO_RETRIES: + return getNoRetriesIndexWriterConfigurationWithCustomThreads(conf); + default: + return conf; + } } - public static Configuration getIndexWriterConfigurationWithCustomThreads(Configuration conf) { - Configuration clonedConfig = getIndexWriterConnection(conf); - setHTableThreads(clonedConfig); - return clonedConfig; + public static void shutdown() { + synchronized (ConnectionFactory.class) { + for (Connection connection : connections.values()) { + try { + connection.close(); + } catch (IOException e) { + LOGGER.warn("Unable to close coprocessor connection", e); + } + } + connections.clear(); + } } - private static void setHTableThreads(Configuration conf) { - // set the number of threads allowed per table. - int htableThreads = - conf.getInt(IndexWriterUtils.INDEX_WRITER_PER_TABLE_THREADS_CONF_KEY, - IndexWriterUtils.DEFAULT_NUM_PER_TABLE_THREADS); - IndexManagementUtil.setIfNotSet(conf, IndexWriterUtils.HTABLE_THREAD_KEY, htableThreads); + public static int getConnectionsCount() { + return connections.size(); } - - public static Configuration getNoRetriesIndexWriterConfigurationWithCustomThreads(Configuration conf) { - Configuration clonedConf = getIndexWriterConfigurationWithCustomThreads(conf); - // note in HBase 2+, numTries = numRetries + 1 - // in prior versions, numTries = numRetries - clonedConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); - return clonedConf; - } + } - public static Throwable getExceptionFromFailedFuture(Future f) { - Throwable t = null; - try { - f.get(); - } catch (Exception e) { - t = e; - } - return t; + public static Configuration getCompactionConfig(Configuration conf) { + Configuration compactionConfig = PropertiesUtil.cloneConfig(conf); + // lower the number of rpc retries, so we don't hang the compaction + compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + conf.getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER, + QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER)); + compactionConfig.setInt(HConstants.HBASE_CLIENT_PAUSE, + conf.getInt(QueryServices.METADATA_WRITE_RETRY_PAUSE, + QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRY_PAUSE)); + return compactionConfig; + } + + public static Configuration getIndexWriterConnection(Configuration conf) { + Configuration clonedConfig = PropertiesUtil.cloneConfig(conf); + /* + * Set the rpc controller factory so that the HTables used by IndexWriter would set the correct + * priorities on the remote RPC calls. + */ + clonedConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, + InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class); + // lower the number of rpc retries. We inherit config from + // HConnectionManager#setServerSideHConnectionRetries, + // which by default uses a multiplier of 10. That is too many retries for our synchronous index + // writes + clonedConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + conf.getInt(INDEX_WRITER_RPC_RETRIES_NUMBER, DEFAULT_INDEX_WRITER_RPC_RETRIES_NUMBER)); + clonedConfig.setInt(HConstants.HBASE_CLIENT_PAUSE, + conf.getInt(INDEX_WRITER_RPC_PAUSE, DEFAULT_INDEX_WRITER_RPC_PAUSE)); + return clonedConfig; + } + + public static Configuration getIndexWriterConfigurationWithCustomThreads(Configuration conf) { + Configuration clonedConfig = getIndexWriterConnection(conf); + setHTableThreads(clonedConfig); + return clonedConfig; + } + + private static void setHTableThreads(Configuration conf) { + // set the number of threads allowed per table. + int htableThreads = conf.getInt(IndexWriterUtils.INDEX_WRITER_PER_TABLE_THREADS_CONF_KEY, + IndexWriterUtils.DEFAULT_NUM_PER_TABLE_THREADS); + IndexManagementUtil.setIfNotSet(conf, IndexWriterUtils.HTABLE_THREAD_KEY, htableThreads); + } + + public static Configuration + getNoRetriesIndexWriterConfigurationWithCustomThreads(Configuration conf) { + Configuration clonedConf = getIndexWriterConfigurationWithCustomThreads(conf); + // note in HBase 2+, numTries = numRetries + 1 + // in prior versions, numTries = numRetries + clonedConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); + return clonedConf; + + } + + public static Throwable getExceptionFromFailedFuture(Future f) { + Throwable t = null; + try { + f.get(); + } catch (Exception e) { + t = e; } + return t; + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerViewUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerViewUtil.java index 47ac74ca19d..ad1504f97cb 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerViewUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ServerViewUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,106 +38,96 @@ import org.slf4j.LoggerFactory; public class ServerViewUtil extends ViewUtil { - private static final Logger logger = LoggerFactory.getLogger(ServerViewUtil.class); + private static final Logger logger = LoggerFactory.getLogger(ServerViewUtil.class); - /** - * Attempt to drop an orphan child view i.e. a child view for which we see a - * {@code parent->child } entry - * in SYSTEM.CHILD_LINK/SYSTEM.CATALOG (as a child) but for whom the parent no longer exists. - * @param env Region Coprocessor environment - * @param tenantIdBytes tenantId of the parent - * @param schemaName schema of the parent - * @param tableOrViewName parent table/view name - * @param sysCatOrSysChildLink SYSTEM.CATALOG or SYSTEM.CHILD_LINK which is used to find the - * {@code parent->child } linking rows - * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG - * @throws SQLException thrown if there is an error getting a connection to the server or - * an error retrieving the PTable for a child view - */ - public static void dropChildViews(RegionCoprocessorEnvironment env, byte[] tenantIdBytes, - byte[] schemaName, byte[] tableOrViewName, byte[] sysCatOrSysChildLink) - throws IOException, SQLException { - Table hTable = null; + /** + * Attempt to drop an orphan child view i.e. a child view for which we see a + * {@code parent->child } entry in SYSTEM.CHILD_LINK/SYSTEM.CATALOG (as a child) but for whom the + * parent no longer exists. + * @param env Region Coprocessor environment + * @param tenantIdBytes tenantId of the parent + * @param schemaName schema of the parent + * @param tableOrViewName parent table/view name + * @param sysCatOrSysChildLink SYSTEM.CATALOG or SYSTEM.CHILD_LINK which is used to find the + * {@code parent->child } linking rows + * @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG + * @throws SQLException thrown if there is an error getting a connection to the server or an error + * retrieving the PTable for a child view + */ + public static void dropChildViews(RegionCoprocessorEnvironment env, byte[] tenantIdBytes, + byte[] schemaName, byte[] tableOrViewName, byte[] sysCatOrSysChildLink) + throws IOException, SQLException { + Table hTable = null; + try { + hTable = ServerUtil.getHTableForCoprocessorScan(env, + SchemaUtil.getPhysicalTableName(sysCatOrSysChildLink, env.getConfiguration())); + } catch (Exception e) { + logger.error("ServerUtil.getHTableForCoprocessorScan error!", e); + } + // if the SYSTEM.CATALOG or SYSTEM.CHILD_LINK doesn't exist just return + if (hTable == null) { + return; + } + + TableViewFinderResult childViewsResult; + try { + childViewsResult = ViewUtil.findImmediateRelatedViews(hTable, tenantIdBytes, schemaName, + tableOrViewName, PTable.LinkType.CHILD_TABLE, HConstants.LATEST_TIMESTAMP); + } finally { + hTable.close(); + } + + for (TableInfo viewInfo : childViewsResult.getLinks()) { + byte[] viewTenantId = viewInfo.getTenantId(); + byte[] viewSchemaName = viewInfo.getSchemaName(); + byte[] viewName = viewInfo.getTableName(); + if (logger.isDebugEnabled()) { + logger.debug("dropChildViews : " + Bytes.toString(schemaName) + "." + + Bytes.toString(tableOrViewName) + " -> " + Bytes.toString(viewSchemaName) + "." + + Bytes.toString(viewName) + "with tenant id :" + Bytes.toString(viewTenantId)); + } + Properties props = new Properties(); + PTable view = null; + if (viewTenantId != null && viewTenantId.length != 0) + props.setProperty(TENANT_ID_ATTRIB, Bytes.toString(viewTenantId)); + try (PhoenixConnection connection = QueryUtil + .getConnectionOnServer(props, env.getConfiguration()).unwrap(PhoenixConnection.class)) { try { - hTable = ServerUtil.getHTableForCoprocessorScan(env, SchemaUtil.getPhysicalTableName( - sysCatOrSysChildLink, env.getConfiguration())); - } catch (Exception e){ - logger.error("ServerUtil.getHTableForCoprocessorScan error!", e); + // Ensure that the view to be dropped has some ancestor that no longer exists + // (and thus will throw a TableNotFoundException). Otherwise, if we are looking + // at an orphan parent->child link, then the view might actually be a legitimate + // child view on another table/view and we should obviously not drop it + view = connection.getTableNoCache(SchemaUtil.getTableName(viewSchemaName, viewName)); + } catch (TableNotFoundException expected) { + // Expected for an orphan view since some ancestor was dropped earlier + logger.info("Found an expected orphan parent->child link keyed by the parent." + + " Parent Tenant Id: '" + Bytes.toString(tenantIdBytes) + "'. Parent Schema Name: '" + + Bytes.toString(schemaName) + "'. Parent Table/View Name: '" + + Bytes.toString(tableOrViewName) + + "'. Will attempt to drop this child view with ViewInfo: '" + viewInfo + "'."); } - // if the SYSTEM.CATALOG or SYSTEM.CHILD_LINK doesn't exist just return - if (hTable==null) { - return; + if (view != null) { + logger.error("Found an orphan parent->child link keyed by this parent or" + + " its descendant. Parent Tenant Id: '" + Bytes.toString(tenantIdBytes) + + "'. Parent Schema Name: '" + Bytes.toString(schemaName) + + "'. Parent Table/View Name: '" + Bytes.toString(tableOrViewName) + + "'. There currently exists a legitimate view of the same name whose" + + " parent hierarchy exists. View Info: '" + viewInfo + + "'. Ignoring this view and not attempting to drop it."); + continue; } - TableViewFinderResult childViewsResult; + MetaDataClient client = new MetaDataClient(connection); + org.apache.phoenix.parse.TableName viewTableName = org.apache.phoenix.parse.TableName + .create(Bytes.toString(viewSchemaName), Bytes.toString(viewName)); try { - childViewsResult = ViewUtil.findImmediateRelatedViews( - hTable, - tenantIdBytes, - schemaName, - tableOrViewName, - PTable.LinkType.CHILD_TABLE, - HConstants.LATEST_TIMESTAMP); - } finally { - hTable.close(); - } - - for (TableInfo viewInfo : childViewsResult.getLinks()) { - byte[] viewTenantId = viewInfo.getTenantId(); - byte[] viewSchemaName = viewInfo.getSchemaName(); - byte[] viewName = viewInfo.getTableName(); - if (logger.isDebugEnabled()) { - logger.debug("dropChildViews : " + Bytes.toString(schemaName) + "." - + Bytes.toString(tableOrViewName) + " -> " - + Bytes.toString(viewSchemaName) + "." + Bytes.toString(viewName) - + "with tenant id :" + Bytes.toString(viewTenantId)); - } - Properties props = new Properties(); - PTable view = null; - if (viewTenantId != null && viewTenantId.length != 0) - props.setProperty(TENANT_ID_ATTRIB, Bytes.toString(viewTenantId)); - try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(props, - env.getConfiguration()).unwrap(PhoenixConnection.class)) { - try { - // Ensure that the view to be dropped has some ancestor that no longer exists - // (and thus will throw a TableNotFoundException). Otherwise, if we are looking - // at an orphan parent->child link, then the view might actually be a legitimate - // child view on another table/view and we should obviously not drop it - view = connection.getTableNoCache(SchemaUtil - .getTableName(viewSchemaName, viewName)); - } catch (TableNotFoundException expected) { - // Expected for an orphan view since some ancestor was dropped earlier - logger.info("Found an expected orphan parent->child link keyed by the parent." - + " Parent Tenant Id: '" + Bytes.toString(tenantIdBytes) - + "'. Parent Schema Name: '" + Bytes.toString(schemaName) - + "'. Parent Table/View Name: '" + Bytes.toString(tableOrViewName) - + "'. Will attempt to drop this child view with ViewInfo: '" - + viewInfo + "'."); - } - if (view != null) { - logger.error("Found an orphan parent->child link keyed by this parent or" - + " its descendant. Parent Tenant Id: '" + Bytes.toString(tenantIdBytes) - + "'. Parent Schema Name: '" + Bytes.toString(schemaName) - + "'. Parent Table/View Name: '" + Bytes.toString(tableOrViewName) - + "'. There currently exists a legitimate view of the same name whose" - + " parent hierarchy exists. View Info: '" + viewInfo - + "'. Ignoring this view and not attempting to drop it."); - continue; - } - - MetaDataClient client = new MetaDataClient(connection); - org.apache.phoenix.parse.TableName viewTableName = - org.apache.phoenix.parse.TableName.create(Bytes.toString(viewSchemaName), - Bytes.toString(viewName)); - try { - client.dropTable(new DropTableStatement(viewTableName, PTableType.VIEW, true, - true, true)); - } catch (TableNotFoundException e) { - logger.info("Ignoring view " + viewTableName - + " as it has already been dropped"); - } - } + client + .dropTable(new DropTableStatement(viewTableName, PTableType.VIEW, true, true, true)); + } catch (TableNotFoundException e) { + logger.info("Ignoring view " + viewTableName + " as it has already been dropped"); } + } } + } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java index 2b51ab18a1a..b85ccef909b 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,39 +32,39 @@ public class ZKBasedMasterElectionUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(ZKBasedMasterElectionUtil.class); + private static final Logger LOGGER = LoggerFactory.getLogger(ZKBasedMasterElectionUtil.class); - public static boolean acquireLock(ZKWatcher zooKeeperWatcher, String parentNode, - String lockName) throws KeeperException, InterruptedException { - // Create the parent node as Persistent - LOGGER.info("Creating the parent lock node:" + parentNode); - ZKUtil.createWithParents(zooKeeperWatcher, parentNode); + public static boolean acquireLock(ZKWatcher zooKeeperWatcher, String parentNode, String lockName) + throws KeeperException, InterruptedException { + // Create the parent node as Persistent + LOGGER.info("Creating the parent lock node:" + parentNode); + ZKUtil.createWithParents(zooKeeperWatcher, parentNode); - // Create the ephemeral node - String lockNode = parentNode + "/" + lockName; - String nodeValue = getHostName() + "_" + UUID.randomUUID().toString(); - LOGGER.info("Trying to acquire the lock by creating node:" + lockNode + " value:" + nodeValue); - // Create the ephemeral node - try { - zooKeeperWatcher.getRecoverableZooKeeper().create(lockNode, Bytes.toBytes(nodeValue), - Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); - } catch (KeeperException.NodeExistsException e) { - LOGGER.info("Could not acquire lock. Another process had already acquired the lock on Node " - + lockName); - return false; - } - LOGGER.info("Obtained the lock :" + lockNode); - return true; + // Create the ephemeral node + String lockNode = parentNode + "/" + lockName; + String nodeValue = getHostName() + "_" + UUID.randomUUID().toString(); + LOGGER.info("Trying to acquire the lock by creating node:" + lockNode + " value:" + nodeValue); + // Create the ephemeral node + try { + zooKeeperWatcher.getRecoverableZooKeeper().create(lockNode, Bytes.toBytes(nodeValue), + Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); + } catch (KeeperException.NodeExistsException e) { + LOGGER.info("Could not acquire lock. Another process had already acquired the lock on Node " + + lockName); + return false; } + LOGGER.info("Obtained the lock :" + lockNode); + return true; + } - private static String getHostName() { - String host = ""; - try { - host = InetAddress.getLocalHost().getCanonicalHostName(); - } catch (UnknownHostException e) { - LOGGER.error("UnknownHostException while trying to get the Local Host address : ", e); - } - return host; + private static String getHostName() { + String host = ""; + try { + host = InetAddress.getLocalHost().getCanonicalHostName(); + } catch (UnknownHostException e) { + LOGGER.error("UnknownHostException while trying to get the Local Host address : ", e); } + return host; + } } diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml index 105402e2393..d9884b6041d 100644 --- a/phoenix-core/pom.xml +++ b/phoenix-core/pom.xml @@ -15,478 +15,452 @@ See the License for the specific language governing permissions and limitations under the License. --> - - 4.0.0 - - org.apache.phoenix - phoenix - 5.3.0-SNAPSHOT - - phoenix-core - Phoenix Core - Phoenix Core, Unit and Integration tests + + 4.0.0 + + org.apache.phoenix + phoenix + 5.3.0-SNAPSHOT + + phoenix-core + Phoenix Core + Phoenix Core, Unit and Integration tests - - ${project.basedir}/.. - + + ${project.basedir}/.. + - - - - - org.apache.maven.plugins - maven-site-plugin - - - org.apache.maven.plugins - maven-jar-plugin - - - prepare-package - - - test-jar - - - - - org.apache.phoenix.util.GeneratePerformanceData - - - - - - - - - org/apache/jute/** - org/apache/zookeeper/** - **/*.jsp - log4j.properties - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - - org.apache.omid:omid-commit-table - - - org.apache.omid:omid-transaction-client - - - org.apache.hbase:hbase-testing-util - - - org.apache.omid:omid-hbase-client - - - org.apache.zookeeper:zookeeper-jute - - - org.apache.phoenix:phoenix-hbase-compat-${hbase.compat.version} - - - org.apache.logging.log4j:log4j-api - - - org.apache.logging.log4j:log4j-core - - - org.apache.logging.log4j:log4j-slf4j-impl - - - org.apache.logging.log4j:log4j-1.2-api - - - - - - - - - create-phoenix-generated-classpath - - build-classpath - - - ${project.build.directory}/cached_classpath.txt - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - org.apache.maven.plugins - maven-failsafe-plugin - - - org.apache.rat - apache-rat-plugin - - - src/main/java/org/apache/phoenix/coprocessor/generated/*.java - src/main/resources/META-INF/services/java.sql.Driver - src/it/resources/*.json - - - - - - - - - org.apache.phoenix - phoenix-core-client - - - org.apache.phoenix - phoenix-core-server - + + + org.apache.phoenix + phoenix-core-client + + + org.apache.phoenix + phoenix-core-server + - - - org.apache.hadoop - hadoop-common - test - - - org.apache.hadoop - hadoop-auth - test - - - org.apache.hadoop - hadoop-yarn-api - test - - - org.apache.zookeeper - zookeeper - test - - - org.apache.hadoop - hadoop-hdfs-client - test - - - org.apache.hadoop - hadoop-mapreduce-client-core - test - + + + org.apache.hadoop + hadoop-common + test + + + org.apache.hadoop + hadoop-auth + test + + + org.apache.hadoop + hadoop-yarn-api + test + + + org.apache.zookeeper + zookeeper + test + + + org.apache.hadoop + hadoop-hdfs-client + test + + + org.apache.hadoop + hadoop-mapreduce-client-core + test + - - - org.apache.hbase - hbase-common - test - - - org.apache.hbase - hbase-server - test - - - org.apache.hbase - hbase-metrics-api - test - - - org.apache.hbase - hbase-protocol - test - - - org.apache.hbase - hbase-protocol-shaded - test - - - org.apache.hbase - hbase-client - test - - - org.apache.hbase - hbase-zookeeper - test - - - org.apache.hbase - hbase-hadoop2-compat - test - - - org.apache.hbase - hbase-mapreduce - test - + + + org.apache.hbase + hbase-common + test + + + org.apache.hbase + hbase-server + test + + + org.apache.hbase + hbase-metrics-api + test + + + org.apache.hbase + hbase-protocol + test + + + org.apache.hbase + hbase-protocol-shaded + test + + + org.apache.hbase + hbase-client + test + + + org.apache.hbase + hbase-zookeeper + test + + + org.apache.hbase + hbase-hadoop2-compat + test + + + org.apache.hbase + hbase-mapreduce + test + - - - org.apache.zookeeper - zookeeper-jute - test - + + + org.apache.zookeeper + zookeeper-jute + test + - - com.jayway.jsonpath - json-path - + + com.jayway.jsonpath + json-path + - - - org.mongodb - bson - + + + org.mongodb + bson + - - - org.apache.omid - omid-hbase-coprocessor - test - - - org.apache.omid - omid-commit-table - test - - - org.apache.omid - omid-transaction-client - test - - - org.apache.omid - omid-hbase-client - test - + + + org.apache.omid + omid-hbase-coprocessor + test + + + org.apache.omid + omid-commit-table + test + + + org.apache.omid + omid-transaction-client + test + + + org.apache.omid + omid-hbase-client + test + + + + org.apache.phoenix + phoenix-hbase-compat-${hbase.compat.version} + test + - - - org.apache.phoenix - phoenix-hbase-compat-${hbase.compat.version} - test - + + org.apache.phoenix.thirdparty + phoenix-shaded-guava + test + - - org.apache.phoenix.thirdparty - phoenix-shaded-guava - test - + + + org.apache.hadoop + hadoop-minikdc + test + + + org.apache.hadoop + hadoop-hdfs + test + + + org.apache.hadoop + hadoop-hdfs + tests + test-jar + test + + + org.apache.hadoop + hadoop-common + test-jar + test + - - - org.apache.hadoop - hadoop-minikdc - test - - - org.apache.hadoop - hadoop-hdfs - test - - - org.apache.hadoop - hadoop-hdfs - test-jar - tests - test - - - org.apache.hadoop - hadoop-common - test - test-jar - + + + org.apache.hbase + hbase-it + test-jar + test + + + org.apache.hbase + hbase-common + test-jar + test + + + org.apache.hbase + hbase-server + test-jar + test + + + org.apache.hbase + hbase-testing-util + test + - - - org.apache.hbase - hbase-it - test-jar - test - - - org.apache.hbase - hbase-common - test-jar - test - - - org.apache.hbase - hbase-server - test-jar - test - - - org.apache.hbase - hbase-testing-util - test - + + + org.apache.omid + omid-tso-server + test + + + org.apache.omid + omid-tso-server + test-jar + test + - - - org.apache.omid - omid-tso-server - test - - - org.apache.omid - omid-tso-server - test - test-jar - + + org.apache.curator + curator-client + test + + + org.apache.curator + curator-framework + test + - - org.apache.curator - curator-client - test - - - org.apache.curator - curator-framework - test - + + + org.apache.commons + commons-compress + + + org.apache.phoenix.thirdparty + phoenix-shaded-commons-cli + test + + + com.github.stephenc.findbugs + findbugs-annotations + test + + + com.google.code.findbugs + jsr305 + test + + + com.google.inject + guice + test + + + com.google.protobuf + protobuf-java + test + + + org.apache.htrace + htrace-core + test + + + org.slf4j + slf4j-api + test + + + org.apache.commons + commons-lang3 + test + + + com.ibm.icu + icu4j + test + + + joda-time + joda-time + test + + + commons-io + commons-io + test + + + org.apache.commons + commons-csv + test + + + commons-codec + commons-codec + test + + + org.hdrhistogram + HdrHistogram + test + + + commons-collections + commons-collections + test + + + junit + junit + test + + + org.mockito + mockito-core + test + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.core + jackson-annotations + + + + org.apache.logging.log4j + log4j-api + test + + + org.apache.logging.log4j + log4j-core + test + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + org.apache.logging.log4j + log4j-1.2-api + test + + + - - - org.apache.commons - commons-compress - - - org.apache.phoenix.thirdparty - phoenix-shaded-commons-cli - test - - - com.github.stephenc.findbugs - findbugs-annotations - test - - - com.google.code.findbugs - jsr305 - test - - - com.google.inject - guice - test - - - com.google.protobuf - protobuf-java - test - - - org.apache.htrace - htrace-core - test - - - org.slf4j - slf4j-api - test - - - org.apache.commons - commons-lang3 - test - - - com.ibm.icu - icu4j - test - - - joda-time - joda-time - test - - - commons-io - commons-io - test - - - org.apache.commons - commons-csv - test - - - commons-codec - commons-codec - test - - - org.hdrhistogram - HdrHistogram - test - - - commons-collections - commons-collections - test - - - junit - junit - test - - - org.mockito - mockito-core - test - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - - - org.apache.logging.log4j - log4j-api - test - - - org.apache.logging.log4j - log4j-core - test - - - org.apache.logging.log4j - log4j-slf4j-impl - test - - - org.apache.logging.log4j - log4j-1.2-api - test - - - + + + + + org.apache.maven.plugins + maven-site-plugin + + + org.apache.maven.plugins + maven-jar-plugin + + + + org/apache/jute/** + org/apache/zookeeper/** + **/*.jsp + log4j.properties + + + + + + test-jar + + prepare-package + + + + org.apache.phoenix.util.GeneratePerformanceData + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + org.apache.omid:omid-commit-table + org.apache.omid:omid-transaction-client + org.apache.hbase:hbase-testing-util + org.apache.omid:omid-hbase-client + org.apache.zookeeper:zookeeper-jute + org.apache.phoenix:phoenix-hbase-compat-${hbase.compat.version} + org.apache.logging.log4j:log4j-api + org.apache.logging.log4j:log4j-core + org.apache.logging.log4j:log4j-slf4j-impl + org.apache.logging.log4j:log4j-1.2-api + + + + + + + create-phoenix-generated-classpath + + build-classpath + + + ${project.build.directory}/cached_classpath.txt + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + org.apache.maven.plugins + maven-failsafe-plugin + + + org.apache.rat + apache-rat-plugin + + + src/main/java/org/apache/phoenix/coprocessor/generated/*.java + src/main/resources/META-INF/services/java.sql.Driver + src/it/resources/*.json + + + + + diff --git a/phoenix-core/src/it/resources/gold_files/gold_query_add_data.txt b/phoenix-core/src/it/resources/gold_files/gold_query_add_data.txt index 70bc53f1255..503b03c732e 100644 --- a/phoenix-core/src/it/resources/gold_files/gold_query_add_data.txt +++ b/phoenix-core/src/it/resources/gold_files/gold_query_add_data.txt @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - + 'COUNT(1)' '7' 'ID','NAME' diff --git a/phoenix-core/src/it/resources/gold_files/gold_query_add_delete.txt b/phoenix-core/src/it/resources/gold_files/gold_query_add_delete.txt index 4fbc21a08c3..307d4688e47 100644 --- a/phoenix-core/src/it/resources/gold_files/gold_query_add_delete.txt +++ b/phoenix-core/src/it/resources/gold_files/gold_query_add_delete.txt @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - + 'COUNT(1)' '4' 'COUNT(1)' diff --git a/phoenix-core/src/it/resources/gold_files/gold_query_create_add.txt b/phoenix-core/src/it/resources/gold_files/gold_query_create_add.txt index 5b19443abca..cd93603078a 100644 --- a/phoenix-core/src/it/resources/gold_files/gold_query_create_add.txt +++ b/phoenix-core/src/it/resources/gold_files/gold_query_create_add.txt @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - + 'COUNT(1)' '3' 'ID','NAME' diff --git a/phoenix-core/src/it/resources/gold_files/gold_query_delete.txt b/phoenix-core/src/it/resources/gold_files/gold_query_delete.txt index 3607c2b31c3..36da41a53e4 100644 --- a/phoenix-core/src/it/resources/gold_files/gold_query_delete.txt +++ b/phoenix-core/src/it/resources/gold_files/gold_query_delete.txt @@ -19,4 +19,4 @@ 'COUNT(1)' '1' 'ID','NAME' -'a','a_name' \ No newline at end of file +'a','a_name' diff --git a/phoenix-core/src/it/resources/gold_files/gold_query_delete_for_splitable_syscat.txt b/phoenix-core/src/it/resources/gold_files/gold_query_delete_for_splitable_syscat.txt index 1f3d8e941ef..00988954b48 100644 --- a/phoenix-core/src/it/resources/gold_files/gold_query_delete_for_splitable_syscat.txt +++ b/phoenix-core/src/it/resources/gold_files/gold_query_delete_for_splitable_syscat.txt @@ -19,4 +19,4 @@ 'COUNT(1)' '1' 'ID','NAME' -'a','a_name' \ No newline at end of file +'a','a_name' diff --git a/phoenix-core/src/it/resources/gold_files/gold_query_orderby_nonpk.txt b/phoenix-core/src/it/resources/gold_files/gold_query_orderby_nonpk.txt index eb1e725922c..d8fd6a8d322 100644 --- a/phoenix-core/src/it/resources/gold_files/gold_query_orderby_nonpk.txt +++ b/phoenix-core/src/it/resources/gold_files/gold_query_orderby_nonpk.txt @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - + 'COUNT(COL2)' '25' 'ID','COL1','COL2' diff --git a/phoenix-core/src/it/resources/gold_files/gold_query_ordered_groupby.txt b/phoenix-core/src/it/resources/gold_files/gold_query_ordered_groupby.txt index a353748f60e..bbfa2f686bb 100644 --- a/phoenix-core/src/it/resources/gold_files/gold_query_ordered_groupby.txt +++ b/phoenix-core/src/it/resources/gold_files/gold_query_ordered_groupby.txt @@ -32,4 +32,4 @@ 'id6','60' 'id7','70' 'id8','80' -'id9','90' \ No newline at end of file +'id9','90' diff --git a/phoenix-core/src/it/resources/gold_files/gold_query_view_index.txt b/phoenix-core/src/it/resources/gold_files/gold_query_view_index.txt index 5ea054978ff..0be616f88be 100644 --- a/phoenix-core/src/it/resources/gold_files/gold_query_view_index.txt +++ b/phoenix-core/src/it/resources/gold_files/gold_query_view_index.txt @@ -17,4 +17,4 @@ */ 'VIEW_INDEX_ID' -'-32768' \ No newline at end of file +'-32768' diff --git a/phoenix-core/src/it/resources/scripts/execute_query.sh b/phoenix-core/src/it/resources/scripts/execute_query.sh index 12cfa468bda..116851cf292 100644 --- a/phoenix-core/src/it/resources/scripts/execute_query.sh +++ b/phoenix-core/src/it/resources/scripts/execute_query.sh @@ -17,7 +17,7 @@ # limitations under the License. ########################################################################### -# This script is intended to run the sql queries in a file with the given client version +# This script is intended to run the sql queries in a file with the given client version zk_url=$1 client_group_id=$2 diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerTest.java index aa629c31184..d3607fbf999 100644 --- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerTest.java +++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,165 +34,169 @@ import org.apache.hadoop.hbase.ipc.RpcScheduler.Context; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Test; import org.mockito.Mockito; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - public class PhoenixRpcSchedulerTest { - private static final Configuration conf = HBaseConfiguration.create(); - private static final InetSocketAddress isa = new InetSocketAddress("localhost", 0); - private class AbortServer implements Abortable { - private boolean aborted = false; - - @Override - public void abort(String why, Throwable e) { - aborted = true; - } + private static final Configuration conf = HBaseConfiguration.create(); + private static final InetSocketAddress isa = new InetSocketAddress("localhost", 0); - @Override - public boolean isAborted() { - return aborted; - } - } + private class AbortServer implements Abortable { + private boolean aborted = false; - /** - * Test that the rpc scheduler schedules index writes to the index handler queue and sends - * everything else to the standard queues - */ - @Test - public void testIndexPriorityWritesToIndexHandler() throws Exception { - RpcScheduler mock = Mockito.mock(RpcScheduler.class); - PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class); - Abortable abortable = new AbortServer(); - PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, mock, 200, 250, 225, 230, qosFunction,abortable); - BalancedQueueRpcExecutor executor = new BalancedQueueRpcExecutor("test-queue", 1, 1,qosFunction,conf,abortable); - scheduler.setIndexExecutorForTesting(executor); - dispatchCallWithPriority(scheduler, 200); - List> queues = executor.getQueues(); - assertEquals(1, queues.size()); - BlockingQueue queue = queues.get(0); - assertNotNull(queue.poll(5, TimeUnit.SECONDS)); - - // try again, this time we tweak the ranges we support - scheduler = new PhoenixRpcScheduler(conf, mock, 101, 110, 105, 115, qosFunction,abortable); - scheduler.setIndexExecutorForTesting(executor); - dispatchCallWithPriority(scheduler, 101); - assertNotNull(queue.poll(5, TimeUnit.SECONDS)); - - Mockito.verify(mock, Mockito.times(2)).init(Mockito.any(Context.class)); - scheduler.stop(); - executor.stop(); + @Override + public void abort(String why, Throwable e) { + aborted = true; } - @Test - public void testServerSideRPCalls() throws Exception { - RpcScheduler mock = Mockito.mock(RpcScheduler.class); - PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class); - Abortable abortable = new AbortServer(); - PhoenixRpcScheduler scheduler1 = new PhoenixRpcScheduler(conf, mock, 200, 250, 100, 300, qosFunction,abortable); - RpcExecutor executor1 = scheduler1.getServerSideExecutorForTesting(); - for (int c = 0; c < 10; c++) { - dispatchCallWithPriority(scheduler1, 100); - } - List> queues1 = executor1.getQueues(); - int numDispatches1 = 0; - for (BlockingQueue queue1 : queues1) { - if (queue1.size() > 0) { - numDispatches1 += queue1.size(); - for (int i = 0; i < queue1.size(); i++) { - assertNotNull(queue1.poll(5, TimeUnit.SECONDS)); - } - } - } - assertEquals(10, numDispatches1); - scheduler1.stop(); - - // try again, with the incorrect executor - PhoenixRpcScheduler scheduler2 = new PhoenixRpcScheduler(conf, mock, 101, 110, 50, 25, qosFunction,abortable); - RpcExecutor executor2 = scheduler2.getIndexExecutorForTesting(); - dispatchCallWithPriority(scheduler2, 50); - List> queues2 = executor2.getQueues(); - int numDispatches2 = 0; - for (BlockingQueue queue2 : queues2) { - if (queue2.size() > 0) { - numDispatches2++; - assertNotNull(queue2.poll(5, TimeUnit.SECONDS)); - } - } - assertEquals(0, numDispatches2); - scheduler2.stop(); - - Mockito.verify(mock, Mockito.times(numDispatches1+1)).init(Mockito.any(Context.class)); - //Verify no dispatches to the default delegate handler - Mockito.verify(mock, Mockito.times(0)).dispatch(Mockito.any(CallRunner.class)); + @Override + public boolean isAborted() { + return aborted; } - - /** - * Test that we delegate to the passed {@link RpcScheduler} when the call priority is outside - * the index range - * @throws Exception - */ - @Test - public void testDelegateWhenOutsideRange() throws Exception { - PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class); - Abortable abortable = new AbortServer(); - RpcScheduler mock = Mockito.mock(RpcScheduler.class); - PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, mock, 200, 250, 225, 275, qosFunction,abortable); - dispatchCallWithPriority(scheduler, 100); - dispatchCallWithPriority(scheduler, 251); - - // try again, this time we tweak the ranges we support - scheduler = new PhoenixRpcScheduler(conf, mock, 101, 110, 105, 115, qosFunction,abortable); - dispatchCallWithPriority(scheduler, 200); - dispatchCallWithPriority(scheduler, 111); - - Mockito.verify(mock, Mockito.times(4)).init(Mockito.any(Context.class)); - Mockito.verify(mock, Mockito.times(4)).dispatch(Mockito.any(CallRunner.class)); - scheduler.stop(); + } + + /** + * Test that the rpc scheduler schedules index writes to the index handler queue and sends + * everything else to the standard queues + */ + @Test + public void testIndexPriorityWritesToIndexHandler() throws Exception { + RpcScheduler mock = Mockito.mock(RpcScheduler.class); + PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class); + Abortable abortable = new AbortServer(); + PhoenixRpcScheduler scheduler = + new PhoenixRpcScheduler(conf, mock, 200, 250, 225, 230, qosFunction, abortable); + BalancedQueueRpcExecutor executor = + new BalancedQueueRpcExecutor("test-queue", 1, 1, qosFunction, conf, abortable); + scheduler.setIndexExecutorForTesting(executor); + dispatchCallWithPriority(scheduler, 200); + List> queues = executor.getQueues(); + assertEquals(1, queues.size()); + BlockingQueue queue = queues.get(0); + assertNotNull(queue.poll(5, TimeUnit.SECONDS)); + + // try again, this time we tweak the ranges we support + scheduler = new PhoenixRpcScheduler(conf, mock, 101, 110, 105, 115, qosFunction, abortable); + scheduler.setIndexExecutorForTesting(executor); + dispatchCallWithPriority(scheduler, 101); + assertNotNull(queue.poll(5, TimeUnit.SECONDS)); + + Mockito.verify(mock, Mockito.times(2)).init(Mockito.any(Context.class)); + scheduler.stop(); + executor.stop(); + } + + @Test + public void testServerSideRPCalls() throws Exception { + RpcScheduler mock = Mockito.mock(RpcScheduler.class); + PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class); + Abortable abortable = new AbortServer(); + PhoenixRpcScheduler scheduler1 = + new PhoenixRpcScheduler(conf, mock, 200, 250, 100, 300, qosFunction, abortable); + RpcExecutor executor1 = scheduler1.getServerSideExecutorForTesting(); + for (int c = 0; c < 10; c++) { + dispatchCallWithPriority(scheduler1, 100); } - - /** - * Test that the rpc scheduler schedules invalidate metadata cache RPC to - * the invalidate metadata cache executor. - */ - @Test - public void testInvalidateMetadataCacheExecutor() throws Exception { - RpcScheduler mock = Mockito.mock(RpcScheduler.class); - PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class); - Abortable abortable = new AbortServer(); - // Set invalidate metadata cache priority to 230. - int invalidateMetadataCacheCallPriority = 230; - PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, mock, - 200, 250, 225, invalidateMetadataCacheCallPriority, qosFunction,abortable); - BalancedQueueRpcExecutor executor = new BalancedQueueRpcExecutor("test-queue", - 1, 1, qosFunction, conf, abortable); - scheduler.setInvalidateMetadataCacheExecutorForTesting(executor); - dispatchCallWithPriority(scheduler, invalidateMetadataCacheCallPriority); - List> queues = executor.getQueues(); - assertEquals(1, queues.size()); - BlockingQueue queue = queues.get(0); - assertEquals(1, queue.size()); - assertNotNull(queue.poll(5, TimeUnit.SECONDS)); - Mockito.verify(mock, Mockito.times(1)).init(Mockito.any(RpcScheduler.Context.class)); - scheduler.stop(); - executor.stop(); + List> queues1 = executor1.getQueues(); + int numDispatches1 = 0; + for (BlockingQueue queue1 : queues1) { + if (queue1.size() > 0) { + numDispatches1 += queue1.size(); + for (int i = 0; i < queue1.size(); i++) { + assertNotNull(queue1.poll(5, TimeUnit.SECONDS)); + } + } } - - private void dispatchCallWithPriority(RpcScheduler scheduler, int priority) throws Exception { - CallRunner task = Mockito.mock(CallRunner.class); - RequestHeader header = RequestHeader.newBuilder().setPriority(priority).build(); - RpcServer server = RpcServerFactory.createRpcServer(null, "test-rpcserver", Lists.newArrayList(new BlockingServiceAndInterface( - SERVICE, null)), isa, conf, scheduler); - ServerCall call = Mockito.mock(ServerCall.class); - when(call.getHeader()).thenReturn(header); - when(call.getRequestUser()).thenReturn(Optional.empty()); - Mockito.when(task.getRpcCall()).thenReturn(call); - - scheduler.dispatch(task); - - Mockito.verify(task).getRpcCall(); - Mockito.verifyNoMoreInteractions(task); - server.stop(); + assertEquals(10, numDispatches1); + scheduler1.stop(); + + // try again, with the incorrect executor + PhoenixRpcScheduler scheduler2 = + new PhoenixRpcScheduler(conf, mock, 101, 110, 50, 25, qosFunction, abortable); + RpcExecutor executor2 = scheduler2.getIndexExecutorForTesting(); + dispatchCallWithPriority(scheduler2, 50); + List> queues2 = executor2.getQueues(); + int numDispatches2 = 0; + for (BlockingQueue queue2 : queues2) { + if (queue2.size() > 0) { + numDispatches2++; + assertNotNull(queue2.poll(5, TimeUnit.SECONDS)); + } } -} \ No newline at end of file + assertEquals(0, numDispatches2); + scheduler2.stop(); + + Mockito.verify(mock, Mockito.times(numDispatches1 + 1)).init(Mockito.any(Context.class)); + // Verify no dispatches to the default delegate handler + Mockito.verify(mock, Mockito.times(0)).dispatch(Mockito.any(CallRunner.class)); + } + + /** + * Test that we delegate to the passed {@link RpcScheduler} when the call priority is outside the + * index range + */ + @Test + public void testDelegateWhenOutsideRange() throws Exception { + PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class); + Abortable abortable = new AbortServer(); + RpcScheduler mock = Mockito.mock(RpcScheduler.class); + PhoenixRpcScheduler scheduler = + new PhoenixRpcScheduler(conf, mock, 200, 250, 225, 275, qosFunction, abortable); + dispatchCallWithPriority(scheduler, 100); + dispatchCallWithPriority(scheduler, 251); + + // try again, this time we tweak the ranges we support + scheduler = new PhoenixRpcScheduler(conf, mock, 101, 110, 105, 115, qosFunction, abortable); + dispatchCallWithPriority(scheduler, 200); + dispatchCallWithPriority(scheduler, 111); + + Mockito.verify(mock, Mockito.times(4)).init(Mockito.any(Context.class)); + Mockito.verify(mock, Mockito.times(4)).dispatch(Mockito.any(CallRunner.class)); + scheduler.stop(); + } + + /** + * Test that the rpc scheduler schedules invalidate metadata cache RPC to the invalidate metadata + * cache executor. + */ + @Test + public void testInvalidateMetadataCacheExecutor() throws Exception { + RpcScheduler mock = Mockito.mock(RpcScheduler.class); + PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class); + Abortable abortable = new AbortServer(); + // Set invalidate metadata cache priority to 230. + int invalidateMetadataCacheCallPriority = 230; + PhoenixRpcScheduler scheduler = new PhoenixRpcScheduler(conf, mock, 200, 250, 225, + invalidateMetadataCacheCallPriority, qosFunction, abortable); + BalancedQueueRpcExecutor executor = + new BalancedQueueRpcExecutor("test-queue", 1, 1, qosFunction, conf, abortable); + scheduler.setInvalidateMetadataCacheExecutorForTesting(executor); + dispatchCallWithPriority(scheduler, invalidateMetadataCacheCallPriority); + List> queues = executor.getQueues(); + assertEquals(1, queues.size()); + BlockingQueue queue = queues.get(0); + assertEquals(1, queue.size()); + assertNotNull(queue.poll(5, TimeUnit.SECONDS)); + Mockito.verify(mock, Mockito.times(1)).init(Mockito.any(RpcScheduler.Context.class)); + scheduler.stop(); + executor.stop(); + } + + private void dispatchCallWithPriority(RpcScheduler scheduler, int priority) throws Exception { + CallRunner task = Mockito.mock(CallRunner.class); + RequestHeader header = RequestHeader.newBuilder().setPriority(priority).build(); + RpcServer server = RpcServerFactory.createRpcServer(null, "test-rpcserver", + Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)), isa, conf, scheduler); + ServerCall call = Mockito.mock(ServerCall.class); + when(call.getHeader()).thenReturn(header); + when(call.getRequestUser()).thenReturn(Optional.empty()); + Mockito.when(task.getRpcCall()).thenReturn(call); + + scheduler.dispatch(task); + + Mockito.verify(task).getRpcCall(); + Mockito.verifyNoMoreInteractions(task); + server.stop(); + } +} diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java index eb28c8df422..b205a4332f6 100644 --- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java +++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,98 +28,96 @@ public class PhoenixRpcSchedulerFactoryTest { - @Test - public void ensureInstantiation() throws Exception { - Configuration conf = new Configuration(false); - conf.setClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, - PhoenixRpcSchedulerFactory.class, RpcSchedulerFactory.class); - // kinda lame that we copy the copy from the regionserver to do this and can't use a static - // method, but meh - try { - Class rpcSchedulerFactoryClass = - conf.getClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, - SimpleRpcSchedulerFactory.class); - Object o = rpcSchedulerFactoryClass.newInstance(); - assertTrue(o instanceof PhoenixRpcSchedulerFactory); - } catch (InstantiationException e) { - assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e, - false); - } catch (IllegalAccessException e) { - assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e, - false); - } + @Test + public void ensureInstantiation() throws Exception { + Configuration conf = new Configuration(false); + conf.setClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, + PhoenixRpcSchedulerFactory.class, RpcSchedulerFactory.class); + // kinda lame that we copy the copy from the regionserver to do this and can't use a static + // method, but meh + try { + Class rpcSchedulerFactoryClass = conf.getClass( + RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, SimpleRpcSchedulerFactory.class); + Object o = rpcSchedulerFactoryClass.newInstance(); + assertTrue(o instanceof PhoenixRpcSchedulerFactory); + } catch (InstantiationException e) { + assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e, + false); + } catch (IllegalAccessException e) { + assertTrue("Should not have got an exception when instantiing the rpc scheduler: " + e, + false); } + } - /** - * Ensure that we can't configure the index and metadata priority ranges inside the hbase ranges - * @throws Exception - */ - @Test - public void testValidateRpcPriorityRanges() throws Exception { - Configuration conf = new Configuration(false); - // standard configs should be fine - PhoenixRpcSchedulerFactory factory = new PhoenixRpcSchedulerFactory(); - factory.create(conf, null); + /** + * Ensure that we can't configure the index and metadata priority ranges inside the hbase ranges + */ + @Test + public void testValidateRpcPriorityRanges() throws Exception { + Configuration conf = new Configuration(false); + // standard configs should be fine + PhoenixRpcSchedulerFactory factory = new PhoenixRpcSchedulerFactory(); + factory.create(conf, null); - // test priorities less than HBase range - setPriorities(conf, -4, -1); - factory.create(conf, null); + // test priorities less than HBase range + setPriorities(conf, -4, -1); + factory.create(conf, null); - // test priorities greater than HBase range - setPriorities(conf, 1001, 1002); - factory.create(conf, null); + // test priorities greater than HBase range + setPriorities(conf, 1001, 1002); + factory.create(conf, null); - // test priorities in HBase range - setPriorities(conf, 1, 201); - try { - factory.create(conf, null); - fail("Should not have allowed priorities in HBase range"); - } catch (IllegalArgumentException e) { - // expected - } - setPriorities(conf, 1001, 1); - try { - factory.create(conf, null); - fail("Should not have allowed priorities in HBase range"); - } catch (IllegalArgumentException e) { - // expected - } - - // test priorities in HBase range - setPriorities(conf, 1001, HConstants.NORMAL_QOS); - try { - factory.create(conf, null); - fail("Should not have allowed priorities in HBase range"); - } catch (IllegalArgumentException e) { - // expected - } - setPriorities(conf, HConstants.NORMAL_QOS, 1001); - try { - factory.create(conf, null); - fail("Should not have allowed priorities in HBase range"); - } catch (IllegalArgumentException e) { - // expected - } - - // test priorities in HBase range - setPriorities(conf, 1001, HConstants.HIGH_QOS); - try { - factory.create(conf, null); - fail("Should not have allowed priorities in HBase range"); - } catch (IllegalArgumentException e) { - // expected - } - setPriorities(conf, HConstants.HIGH_QOS, 1001); - try { - factory.create(conf, null); - fail("Should not have allowed priorities in HBase range"); - } catch (IllegalArgumentException e) { - // expected - } + // test priorities in HBase range + setPriorities(conf, 1, 201); + try { + factory.create(conf, null); + fail("Should not have allowed priorities in HBase range"); + } catch (IllegalArgumentException e) { + // expected + } + setPriorities(conf, 1001, 1); + try { + factory.create(conf, null); + fail("Should not have allowed priorities in HBase range"); + } catch (IllegalArgumentException e) { + // expected + } + + // test priorities in HBase range + setPriorities(conf, 1001, HConstants.NORMAL_QOS); + try { + factory.create(conf, null); + fail("Should not have allowed priorities in HBase range"); + } catch (IllegalArgumentException e) { + // expected + } + setPriorities(conf, HConstants.NORMAL_QOS, 1001); + try { + factory.create(conf, null); + fail("Should not have allowed priorities in HBase range"); + } catch (IllegalArgumentException e) { + // expected } - private void setPriorities(Configuration conf, int indexPrioritymin, int metadataPriority) { - conf.setInt(QueryServices.INDEX_PRIOIRTY_ATTRIB, indexPrioritymin); - conf.setInt(QueryServices.METADATA_PRIOIRTY_ATTRIB, metadataPriority); + // test priorities in HBase range + setPriorities(conf, 1001, HConstants.HIGH_QOS); + try { + factory.create(conf, null); + fail("Should not have allowed priorities in HBase range"); + } catch (IllegalArgumentException e) { + // expected + } + setPriorities(conf, HConstants.HIGH_QOS, 1001); + try { + factory.create(conf, null); + fail("Should not have allowed priorities in HBase range"); + } catch (IllegalArgumentException e) { + // expected } -} \ No newline at end of file + } + + private void setPriorities(Configuration conf, int indexPrioritymin, int metadataPriority) { + conf.setInt(QueryServices.INDEX_PRIOIRTY_ATTRIB, indexPrioritymin); + conf.setInt(QueryServices.METADATA_PRIOIRTY_ATTRIB, metadataPriority); + } +} diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java index 060c57e3c7a..5e8133cdcb6 100644 --- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java +++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,9 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -27,68 +33,61 @@ import org.junit.Assert; import org.junit.Test; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; - public class IndexedKeyValueTest { - private static final byte[] ROW_KEY = Bytes.toBytes("foo"); - private static final byte[] FAMILY = Bytes.toBytes("family"); - private static final byte[] QUALIFIER = Bytes.toBytes("qualifier"); - private static final byte[] VALUE = Bytes.toBytes("value"); - private static final byte[] TABLE_NAME = Bytes.toBytes("MyTableName"); + private static final byte[] ROW_KEY = Bytes.toBytes("foo"); + private static final byte[] FAMILY = Bytes.toBytes("family"); + private static final byte[] QUALIFIER = Bytes.toBytes("qualifier"); + private static final byte[] VALUE = Bytes.toBytes("value"); + private static final byte[] TABLE_NAME = Bytes.toBytes("MyTableName"); - @Test - public void testIndexedKeyValueExceptionWhenMutationEmpty() throws IOException { - boolean caughtNullMutation = false, caughtNullEntry = false; - try { - IndexedKeyValue ikv = IndexedKeyValue.newIndexedKeyValue(TABLE_NAME, null); - } catch (IllegalArgumentException iae){ - caughtNullMutation = true; - } - try { - Mutation m = new Put(ROW_KEY); - IndexedKeyValue ikv = IndexedKeyValue.newIndexedKeyValue(TABLE_NAME, m); - } catch (IllegalArgumentException iae){ - caughtNullEntry = true; - } - //no need to test adding a mutation with a Cell with just a row key; HBase will put in - //a default cell with family byte[0], qualifier and value of "", and LATEST_TIMESTAMP + @Test + public void testIndexedKeyValueExceptionWhenMutationEmpty() throws IOException { + boolean caughtNullMutation = false, caughtNullEntry = false; + try { + IndexedKeyValue ikv = IndexedKeyValue.newIndexedKeyValue(TABLE_NAME, null); + } catch (IllegalArgumentException iae) { + caughtNullMutation = true; + } + try { + Mutation m = new Put(ROW_KEY); + IndexedKeyValue ikv = IndexedKeyValue.newIndexedKeyValue(TABLE_NAME, m); + } catch (IllegalArgumentException iae) { + caughtNullEntry = true; + } + // no need to test adding a mutation with a Cell with just a row key; HBase will put in + // a default cell with family byte[0], qualifier and value of "", and LATEST_TIMESTAMP - Assert.assertTrue(caughtNullMutation & caughtNullEntry); + Assert.assertTrue(caughtNullMutation & caughtNullEntry); - } + } - @Test - public void testIndexedKeyValuePopulatesKVFields() throws Exception { - byte[] row = (ROW_KEY); - Put mutation = new Put(row); - mutation.addColumn(FAMILY, QUALIFIER, VALUE); - IndexedKeyValue indexedKeyValue = IndexedKeyValue.newIndexedKeyValue(TABLE_NAME, mutation); - testIndexedKeyValueHelper(indexedKeyValue, row, TABLE_NAME, mutation); + @Test + public void testIndexedKeyValuePopulatesKVFields() throws Exception { + byte[] row = (ROW_KEY); + Put mutation = new Put(row); + mutation.addColumn(FAMILY, QUALIFIER, VALUE); + IndexedKeyValue indexedKeyValue = IndexedKeyValue.newIndexedKeyValue(TABLE_NAME, mutation); + testIndexedKeyValueHelper(indexedKeyValue, row, TABLE_NAME, mutation); - //now serialize the IndexedKeyValue and make sure the deserialized copy also - //has all the right fields - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream out = new DataOutputStream(baos); - KeyValueCodec.write(out, indexedKeyValue); + // now serialize the IndexedKeyValue and make sure the deserialized copy also + // has all the right fields + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(baos); + KeyValueCodec.write(out, indexedKeyValue); - IndexedKeyValue deSerializedKV = (IndexedKeyValue) - KeyValueCodec.readKeyValue(new DataInputStream( - new ByteArrayInputStream(baos.toByteArray()))); - testIndexedKeyValueHelper(deSerializedKV, row, TABLE_NAME, mutation); + IndexedKeyValue deSerializedKV = (IndexedKeyValue) KeyValueCodec + .readKeyValue(new DataInputStream(new ByteArrayInputStream(baos.toByteArray()))); + testIndexedKeyValueHelper(deSerializedKV, row, TABLE_NAME, mutation); - } + } - private void testIndexedKeyValueHelper(IndexedKeyValue indexedKeyValue, byte[] row, - byte[] tableNameBytes, Mutation mutation) { - Assert.assertArrayEquals(row, CellUtil.cloneRow(indexedKeyValue)); - Assert.assertArrayEquals(tableNameBytes, indexedKeyValue.getIndexTable()); - Assert.assertEquals(mutation.toString(), indexedKeyValue.getMutation().toString()); - Assert.assertArrayEquals(WALEdit.METAFAMILY, CellUtil.cloneFamily(indexedKeyValue)); - } + private void testIndexedKeyValueHelper(IndexedKeyValue indexedKeyValue, byte[] row, + byte[] tableNameBytes, Mutation mutation) { + Assert.assertArrayEquals(row, CellUtil.cloneRow(indexedKeyValue)); + Assert.assertArrayEquals(tableNameBytes, indexedKeyValue.getIndexTable()); + Assert.assertEquals(mutation.toString(), indexedKeyValue.getMutation().toString()); + Assert.assertArrayEquals(WALEdit.METAFAMILY, CellUtil.cloneFamily(indexedKeyValue)); + } } diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodecTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodecTest.java index ee726bba6b0..c0b400e4f32 100644 --- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodecTest.java +++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodecTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,11 +23,12 @@ public class IndexedWALEditCodecTest { - @SuppressWarnings("unused") - @Test - public void testConstructorsArePresent() throws Exception { - // "testing" via the presence of these constructors - IndexedWALEditCodec codec1 = new IndexedWALEditCodec(); - IndexedWALEditCodec codec2 = new IndexedWALEditCodec(new Configuration(false), new CompressionContext(LRUDictionary.class, false, false)); - } + @SuppressWarnings("unused") + @Test + public void testConstructorsArePresent() throws Exception { + // "testing" via the presence of these constructors + IndexedWALEditCodec codec1 = new IndexedWALEditCodec(); + IndexedWALEditCodec codec2 = new IndexedWALEditCodec(new Configuration(false), + new CompressionContext(LRUDictionary.class, false, false)); + } } diff --git a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java index f695578b304..66c8ea4859a 100644 --- a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java +++ b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ReadWriteKeyValuesWithCodecTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import static org.junit.Assert.assertEquals; @@ -85,9 +84,7 @@ public void testWithCompression() throws Exception { writeReadAndVerify(compression, fs, edits, testFile); } - /** - * @return a bunch of {@link WALEdit}s that test a range of serialization possibilities. - */ + /** Returns a bunch of {@link WALEdit}s that test a range of serialization possibilities. */ private List getEdits() { // Build up a couple of edits List edits = new ArrayList(); @@ -103,12 +100,12 @@ private List getEdits() { WALEdit withDelete = new WALEdit(); addMutation(withDelete, d, FAMILY); edits.add(withDelete); - + WALEdit withPutsAndDeletes = new WALEdit(); addMutation(withPutsAndDeletes, d, FAMILY); addMutation(withPutsAndDeletes, p, FAMILY); edits.add(withPutsAndDeletes); - + WALEdit justIndexUpdates = new WALEdit(); byte[] table = Bytes.toBytes("targetTable"); @@ -136,24 +133,24 @@ private void addMutation(WALEdit edit, Mutation m, byte[] family) { } } - - private void writeWALEdit(WALCellCodec codec, List kvs, FSDataOutputStream out) throws IOException { + private void writeWALEdit(WALCellCodec codec, List kvs, FSDataOutputStream out) + throws IOException { out.writeInt(kvs.size()); Codec.Encoder cellEncoder = codec.getEncoder(out); // We interleave the two lists for code simplicity for (Cell kv : kvs) { - cellEncoder.write(kv); + cellEncoder.write(kv); } } - + /** * Write the edits to the specified path on the {@link FileSystem} using the given codec and then * read them back in and ensure that we read the same thing we wrote. */ - private void writeReadAndVerify(final CompressionContext compressionContext, FileSystem fs, List edits, - Path testFile) throws IOException { - - WALCellCodec codec = WALCellCodec.create(UTIL.getConfiguration(), compressionContext); + private void writeReadAndVerify(final CompressionContext compressionContext, FileSystem fs, + List edits, Path testFile) throws IOException { + + WALCellCodec codec = WALCellCodec.create(UTIL.getConfiguration(), compressionContext); // write the edits out FSDataOutputStream out = fs.create(testFile); for (WALEdit edit : edits) { @@ -173,13 +170,14 @@ private void writeReadAndVerify(final CompressionContext compressionContext, Fil in.close(); // make sure the read edits match the written - for(int i=0; i< edits.size(); i++){ + for (int i = 0; i < edits.size(); i++) { WALEdit expected = edits.get(i); WALEdit found = read.get(i); - for(int j=0; j< expected.getCells().size(); j++){ + for (int j = 0; j < expected.getCells().size(); j++) { Cell fkv = found.getCells().get(j); Cell ekv = expected.getCells().get(j); - assertEquals("KV mismatch for edit! Expected: "+expected+", but found: "+found, ekv, fkv); + assertEquals("KV mismatch for edit! Expected: " + expected + ", but found: " + found, ekv, + fkv); } } } diff --git a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java index f4dfd74a016..981efbb5d96 100644 --- a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java +++ b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,13 +24,9 @@ */ public class ExposedMetricCounterLong extends MetricCounterLong { - - /** - * @param info - * @param value */ public ExposedMetricCounterLong(MetricsInfo info, long value) { super(info, value); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java index c5f54e60784..9337abda465 100644 --- a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java +++ b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,13 +30,9 @@ public class ExposedMetricsRecordImpl extends MetricsRecordImpl { /** - * @param info - * @param timestamp - * @param tags - * @param metrics */ public ExposedMetricsRecordImpl(MetricsInfo info, long timestamp, List tags, - Iterable metrics) { + Iterable metrics) { super(info, timestamp, tags, metrics); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java index 1ad1553662f..43cafcd6d9e 100644 --- a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java +++ b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,18 +17,14 @@ */ package org.apache.hadoop.metrics2.lib; -import org.apache.hadoop.metrics2.lib.MetricsInfoImpl; - /** * Helper class to expose access to the {@link org.apache.hadoop.metrics2.lib.MetricsInfoImpl} */ public class ExposedMetricsInfoImpl extends MetricsInfoImpl { - /** - * @param name - * @param description - */ - public ExposedMetricsInfoImpl(String name, String description) { - super(name, description); - } -} \ No newline at end of file + /** + */ + public ExposedMetricsInfoImpl(String name, String description) { + super(name, description); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/SystemExitRule.java b/phoenix-core/src/test/java/org/apache/phoenix/SystemExitRule.java index f3e1092d266..190280842b6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/SystemExitRule.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/SystemExitRule.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix; import org.junit.rules.TestRule; @@ -24,42 +24,41 @@ import org.slf4j.LoggerFactory; /** - * Test rule that prevents System.exit / JVM exit to error out the test runner, which manages - * JVM and providing test output files, and instead throw valid Exception to handle JVM exit - * gracefully + * Test rule that prevents System.exit / JVM exit to error out the test runner, which manages JVM + * and providing test output files, and instead throw valid Exception to handle JVM exit gracefully */ public class SystemExitRule implements TestRule { - private static final Logger LOGGER = LoggerFactory.getLogger(SystemExitRule.class); - private static final SecurityManager SECURITY_MANAGER = new TestSecurityManager(); + private static final Logger LOGGER = LoggerFactory.getLogger(SystemExitRule.class); + private static final SecurityManager SECURITY_MANAGER = new TestSecurityManager(); - @Override - public Statement apply(final Statement s, Description d) { - return new Statement() { - @Override - public void evaluate() throws Throwable { - try { - System.setSecurityManager(SECURITY_MANAGER); - s.evaluate(); - } catch (UnsupportedOperationException e) { - LOGGER.warn("Was unable to set SecurityManager, JVM exits in tests will not be" - + "handled correctly ", e); - } finally { - try { - System.setSecurityManager(null); - } catch (UnsupportedOperationException e) { - //We have logged a warning above already - } - } - } + @Override + public Statement apply(final Statement s, Description d) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + try { + System.setSecurityManager(SECURITY_MANAGER); + s.evaluate(); + } catch (UnsupportedOperationException e) { + LOGGER.warn("Was unable to set SecurityManager, JVM exits in tests will not be" + + "handled correctly ", e); + } finally { + try { + System.setSecurityManager(null); + } catch (UnsupportedOperationException e) { + // We have logged a warning above already + } + } + } - }; - } + }; + } - // Exiting the JVM is not allowed in tests and this exception is thrown instead - // when it is done - public static class SystemExitInTestException extends SecurityException { - // empty - } + // Exiting the JVM is not allowed in tests and this exception is thrown instead + // when it is done + public static class SystemExitInTestException extends SecurityException { + // empty + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/TestJVMExit.java b/phoenix-core/src/test/java/org/apache/phoenix/TestJVMExit.java index 1fd1b72fe6c..c4833b7a356 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/TestJVMExit.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/TestJVMExit.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,17 +22,17 @@ public class TestJVMExit { - @ClassRule - public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); + @ClassRule + public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); - @Test(expected = SystemExitRule.SystemExitInTestException.class) - public void testSystemExit() { - System.exit(10); - } + @Test(expected = SystemExitRule.SystemExitInTestException.class) + public void testSystemExit() { + System.exit(10); + } - @Test(expected = SystemExitRule.SystemExitInTestException.class) - public void testRuntimeHalt() { - Runtime.getRuntime().halt(10); - } + @Test(expected = SystemExitRule.SystemExitInTestException.class) + public void testRuntimeHalt() { + Runtime.getRuntime().halt(10); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/TestSecurityManager.java b/phoenix-core/src/test/java/org/apache/phoenix/TestSecurityManager.java index 643cea6dff0..0ca866257df 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/TestSecurityManager.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/TestSecurityManager.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix; import java.io.FileDescriptor; @@ -26,114 +26,114 @@ */ class TestSecurityManager extends SecurityManager { - @Override - public void checkExit(int status) { - throw new SystemExitRule.SystemExitInTestException(); - } - - @Override - public void checkPermission(Permission permission) { - // no-op - } - - @Override - public void checkPermission(Permission var1, Object var2) { - // no-op - } - - @Override - public void checkSecurityAccess(String var1) { - // no-op - } - - @Override - public void checkConnect(String var1, int var2, Object var3) { - // no-op - } - - @Override - public void checkWrite(String var1) { - // no-op - } - - @Override - public void checkDelete(String var1) { - // no-op - } - - @Override - public void checkConnect(String var1, int var2) { - // no-op - } - - @Override - public void checkLink(String var1) { - // no-op - } - - @Override - public void checkRead(FileDescriptor var1) { - // no-op - } - - @Override - public void checkAccess(Thread var1) { - // no-op - } - - @Override - public void checkAccess(ThreadGroup var1) { - // no-op - } - - @Override - public void checkCreateClassLoader() { - // no-op - } - - @Override - public void checkListen(int var1) { - // no-op - } - - @Override - public void checkAccept(String var1, int var2) { - // no-op - } - - @Override - public void checkMulticast(InetAddress var1) { - // no-op - } - - @Override - public void checkMulticast(InetAddress var1, byte var2) { - // no-op - } - - @Override - public void checkPropertiesAccess() { - // no-op - } - - @Override - public void checkPropertyAccess(String var1) { - // no-op - } - - @Override - public void checkPackageAccess(String var1) { - // no-op - } - - @Override - public void checkPackageDefinition(String var1) { - // no-op - } - - @Override - public void checkSetFactory() { - // no-op - } + @Override + public void checkExit(int status) { + throw new SystemExitRule.SystemExitInTestException(); + } + + @Override + public void checkPermission(Permission permission) { + // no-op + } + + @Override + public void checkPermission(Permission var1, Object var2) { + // no-op + } + + @Override + public void checkSecurityAccess(String var1) { + // no-op + } + + @Override + public void checkConnect(String var1, int var2, Object var3) { + // no-op + } + + @Override + public void checkWrite(String var1) { + // no-op + } + + @Override + public void checkDelete(String var1) { + // no-op + } + + @Override + public void checkConnect(String var1, int var2) { + // no-op + } + + @Override + public void checkLink(String var1) { + // no-op + } + + @Override + public void checkRead(FileDescriptor var1) { + // no-op + } + + @Override + public void checkAccess(Thread var1) { + // no-op + } + + @Override + public void checkAccess(ThreadGroup var1) { + // no-op + } + + @Override + public void checkCreateClassLoader() { + // no-op + } + + @Override + public void checkListen(int var1) { + // no-op + } + + @Override + public void checkAccept(String var1, int var2) { + // no-op + } + + @Override + public void checkMulticast(InetAddress var1) { + // no-op + } + + @Override + public void checkMulticast(InetAddress var1, byte var2) { + // no-op + } + + @Override + public void checkPropertiesAccess() { + // no-op + } + + @Override + public void checkPropertyAccess(String var1) { + // no-op + } + + @Override + public void checkPackageAccess(String var1) { + // no-op + } + + @Override + public void checkPackageDefinition(String var1) { + // no-op + } + + @Override + public void checkSetFactory() { + // no-op + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java b/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java index ee46cecf5d8..9b787f72b7e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/cache/JodaTimezoneCacheTest.java @@ -7,11 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.cache; @@ -27,27 +29,27 @@ public class JodaTimezoneCacheTest { - @Test - public void testGetInstanceByteBufferUTC() { - DateTimeZone instance = JodaTimezoneCache.getInstance(ByteBuffer.wrap(Bytes.toBytes("UTC"))); - assertNotNull(instance); - } - - @Test - public void testGetInstanceString() { - DateTimeZone instance = JodaTimezoneCache.getInstance("America/St_Vincent"); - assertNotNull(instance); - } - - @Test(expected = IllegalDataException.class) - public void testGetInstanceStringUnknown() { - JodaTimezoneCache.getInstance("SOME_UNKNOWN_TIMEZONE"); - } - - @Test - public void testGetInstanceImmutableBytesWritable() { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(Bytes.toBytes("Europe/Isle_of_Man")); - DateTimeZone instance = JodaTimezoneCache.getInstance(ptr); - assertNotNull(instance); - } + @Test + public void testGetInstanceByteBufferUTC() { + DateTimeZone instance = JodaTimezoneCache.getInstance(ByteBuffer.wrap(Bytes.toBytes("UTC"))); + assertNotNull(instance); + } + + @Test + public void testGetInstanceString() { + DateTimeZone instance = JodaTimezoneCache.getInstance("America/St_Vincent"); + assertNotNull(instance); + } + + @Test(expected = IllegalDataException.class) + public void testGetInstanceStringUnknown() { + JodaTimezoneCache.getInstance("SOME_UNKNOWN_TIMEZONE"); + } + + @Test + public void testGetInstanceImmutableBytesWritable() { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(Bytes.toBytes("Europe/Isle_of_Man")); + DateTimeZone instance = JodaTimezoneCache.getInstance(ptr); + assertNotNull(instance); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/cache/ServerCacheClientTest.java b/phoenix-core/src/test/java/org/apache/phoenix/cache/ServerCacheClientTest.java index 875819540b8..c4a0a2178d1 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/cache/ServerCacheClientTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/cache/ServerCacheClientTest.java @@ -7,11 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.cache; @@ -30,23 +32,23 @@ import org.mockito.Mockito; public class ServerCacheClientTest { - @Test - public void testAddServerCache() throws SQLException { - PhoenixConnection connection = Mockito.mock(PhoenixConnection.class); - ConnectionQueryServices services = Mockito.mock(ConnectionQueryServices.class); - Mockito.when(services.getExecutor()).thenReturn(null); - Mockito.when(services.getProps()).thenReturn(new ReadOnlyProps(new HashMap<>())); - Mockito.when(connection.getQueryServices()).thenReturn(services); - byte[] tableName = Bytes.toBytes("TableName"); - PTableImpl pTable = Mockito.mock(PTableImpl.class); - Mockito.when(pTable.getPhysicalName()).thenReturn(PNameFactory.newName("TableName")); - Mockito.when(services.getAllTableRegions(tableName, 600000)).thenThrow(new SQLException( - "Test Exception")); - ServerCacheClient client = new ServerCacheClient(connection); - try { - client.addServerCache(null, null, null, null, pTable, false); - } catch (Exception e) { - assertEquals(e.getMessage(), "Test Exception"); - } + @Test + public void testAddServerCache() throws SQLException { + PhoenixConnection connection = Mockito.mock(PhoenixConnection.class); + ConnectionQueryServices services = Mockito.mock(ConnectionQueryServices.class); + Mockito.when(services.getExecutor()).thenReturn(null); + Mockito.when(services.getProps()).thenReturn(new ReadOnlyProps(new HashMap<>())); + Mockito.when(connection.getQueryServices()).thenReturn(services); + byte[] tableName = Bytes.toBytes("TableName"); + PTableImpl pTable = Mockito.mock(PTableImpl.class); + Mockito.when(pTable.getPhysicalName()).thenReturn(PNameFactory.newName("TableName")); + Mockito.when(services.getAllTableRegions(tableName, 600000)) + .thenThrow(new SQLException("Test Exception")); + ServerCacheClient client = new ServerCacheClient(connection); + try { + client.addServerCache(null, null, null, null, pTable, false); + } catch (Exception e) { + assertEquals(e.getMessage(), "Test Exception"); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/cache/TenantCacheTest.java b/phoenix-core/src/test/java/org/apache/phoenix/cache/TenantCacheTest.java index f287f504ade..f8169bff535 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/cache/TenantCacheTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/cache/TenantCacheTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.cache; +import static org.junit.Assert.*; + import java.io.Closeable; import java.io.DataInput; import java.io.DataOutput; @@ -30,180 +32,193 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.memory.GlobalMemoryManager; import org.apache.phoenix.memory.MemoryManager.MemoryChunk; +import org.apache.phoenix.thirdparty.com.google.common.base.Ticker; import org.apache.phoenix.util.ByteUtil; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.base.Ticker; - -import static org.junit.Assert.*; - public class TenantCacheTest { - @Test - public void testInvalidateClosesMemoryChunk() throws SQLException { - int maxServerCacheTimeToLive = 10000; - int maxServerCachePersistenceTimeToLive = 10; - long maxBytes = 1000; - GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); - TenantCacheImpl newTenantCache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, maxServerCachePersistenceTimeToLive); - ImmutableBytesPtr cacheId = new ImmutableBytesPtr(Bytes.toBytes(1L)); - ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("a")); - newTenantCache.addServerCache(cacheId, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, false, MetaDataProtocol.PHOENIX_VERSION); - assertEquals(maxBytes-1, memoryManager.getAvailableMemory()); - newTenantCache.removeServerCache(cacheId); - assertEquals(maxBytes, memoryManager.getAvailableMemory()); - } - - @Test - public void testTimeoutClosesMemoryChunk() throws Exception { - int maxServerCacheTimeToLive = 10; - int maxServerCachePersistenceTimeToLive = 10; - long maxBytes = 1000; - GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); - ManualTicker ticker = new ManualTicker(); - TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, maxServerCachePersistenceTimeToLive, ticker); - ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); - ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("a")); - cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, false, MetaDataProtocol.PHOENIX_VERSION); - assertEquals(maxBytes-1, memoryManager.getAvailableMemory()); - ticker.time += (maxServerCacheTimeToLive + 1) * 1000000; - cache.cleanUp(); - assertEquals(maxBytes, memoryManager.getAvailableMemory()); + @Test + public void testInvalidateClosesMemoryChunk() throws SQLException { + int maxServerCacheTimeToLive = 10000; + int maxServerCachePersistenceTimeToLive = 10; + long maxBytes = 1000; + GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); + TenantCacheImpl newTenantCache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, + maxServerCachePersistenceTimeToLive); + ImmutableBytesPtr cacheId = new ImmutableBytesPtr(Bytes.toBytes(1L)); + ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("a")); + newTenantCache.addServerCache(cacheId, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, + false, MetaDataProtocol.PHOENIX_VERSION); + assertEquals(maxBytes - 1, memoryManager.getAvailableMemory()); + newTenantCache.removeServerCache(cacheId); + assertEquals(maxBytes, memoryManager.getAvailableMemory()); + } + + @Test + public void testTimeoutClosesMemoryChunk() throws Exception { + int maxServerCacheTimeToLive = 10; + int maxServerCachePersistenceTimeToLive = 10; + long maxBytes = 1000; + GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); + ManualTicker ticker = new ManualTicker(); + TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, + maxServerCachePersistenceTimeToLive, ticker); + ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); + ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("a")); + cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, false, + MetaDataProtocol.PHOENIX_VERSION); + assertEquals(maxBytes - 1, memoryManager.getAvailableMemory()); + ticker.time += (maxServerCacheTimeToLive + 1) * 1000000; + cache.cleanUp(); + assertEquals(maxBytes, memoryManager.getAvailableMemory()); + } + + @Test + public void testFreeMemoryOnAccess() throws Exception { + int maxServerCacheTimeToLive = 10; + int maxServerCachePersistenceTimeToLive = 10; + long maxBytes = 1000; + GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); + ManualTicker ticker = new ManualTicker(); + TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, + maxServerCachePersistenceTimeToLive, ticker); + ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); + ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("a")); + cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, false, + MetaDataProtocol.PHOENIX_VERSION); + assertEquals(maxBytes - 1, memoryManager.getAvailableMemory()); + ticker.time += (maxServerCacheTimeToLive + 1) * 1000000; + assertNull(cache.getServerCache(cacheId1)); + assertEquals(maxBytes, memoryManager.getAvailableMemory()); + } + + @Test + public void testExpiredCacheOnAddingNew() throws Exception { + int maxServerCacheTimeToLive = 10; + int maxServerCachePersistenceTimeToLive = 10; + long maxBytes = 10; + GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); + ManualTicker ticker = new ManualTicker(); + TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, + maxServerCachePersistenceTimeToLive, ticker); + ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); + ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("12345678")); + cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, false, + MetaDataProtocol.PHOENIX_VERSION); + assertEquals(2, memoryManager.getAvailableMemory()); + ticker.time += (maxServerCacheTimeToLive + 1) * 1000000; + cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, false, + MetaDataProtocol.PHOENIX_VERSION); + assertEquals(2, memoryManager.getAvailableMemory()); + } + + @Test + public void testExpiresButStaysInPersistentAfterTimeout() throws Exception { + int maxServerCacheTimeToLive = 100; + int maxServerCachePersistenceTimeToLive = 1000; + long maxBytes = 1000; + GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); + ManualTicker ticker = new ManualTicker(); + TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, + maxServerCachePersistenceTimeToLive, ticker); + ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); + ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("a")); + cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, true, + MetaDataProtocol.PHOENIX_VERSION); + assertEquals(maxBytes - 1, memoryManager.getAvailableMemory()); + assertNotNull(cache.getServerCache(cacheId1)); + + // Expire it from live cache but not persistent cache + ticker.time += (maxServerCacheTimeToLive + 1) * 1000000; + cache.cleanUp(); + assertEquals(maxBytes - 1, memoryManager.getAvailableMemory()); + assertNotNull(cache.getServerCache(cacheId1)); + + // Expire it from persistent cache as well + ticker.time += (maxServerCachePersistenceTimeToLive + 1) * 1000000; + cache.cleanUp(); + assertEquals(maxBytes, memoryManager.getAvailableMemory()); + assertNull(cache.getServerCache(cacheId1)); + } + + @Test + public void testExpiresButStaysInPersistentAfterRemove() throws Exception { + int maxServerCacheTimeToLive = 100; + int maxServerCachePersistenceTimeToLive = 1000; + long maxBytes = 1000; + GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); + ManualTicker ticker = new ManualTicker(); + TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, + maxServerCachePersistenceTimeToLive, ticker); + ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); + ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("12")); + cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, true, + MetaDataProtocol.PHOENIX_VERSION); + assertEquals(maxBytes - 2, memoryManager.getAvailableMemory()); + assertNotNull(cache.getServerCache(cacheId1)); + + // Remove should only remove from live cache + cache.removeServerCache(cacheId1); + assertEquals(maxBytes - 2, memoryManager.getAvailableMemory()); + assertNotNull(cache.getServerCache(cacheId1)); + } + + @Test + public void testEvictPersistentCacheIfSpaceIsNeeded() throws Exception { + int maxServerCacheTimeToLive = 100; + int maxServerCachePersistenceTimeToLive = 1000; + long maxBytes = 10; + GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); + ManualTicker ticker = new ManualTicker(); + TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, + maxServerCachePersistenceTimeToLive, ticker); + ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); + ImmutableBytesWritable cachePtr1 = new ImmutableBytesWritable(Bytes.toBytes("1234")); + cache.addServerCache(cacheId1, cachePtr1, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, true, + MetaDataProtocol.PHOENIX_VERSION); + assertEquals(6, memoryManager.getAvailableMemory()); + + // Remove it, but it should stay in persistent cache + cache.removeServerCache(cacheId1); + assertNotNull(cache.getServerCache(cacheId1)); + assertEquals(6, memoryManager.getAvailableMemory()); + + // Let's do an entry that will require eviction + ImmutableBytesPtr cacheId2 = new ImmutableBytesPtr(Bytes.toBytes(2L)); + ImmutableBytesWritable cachePtr2 = new ImmutableBytesWritable(Bytes.toBytes("12345678")); + cache.addServerCache(cacheId2, cachePtr2, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, true, + MetaDataProtocol.PHOENIX_VERSION); + assertEquals(2, memoryManager.getAvailableMemory()); + assertNull(cache.getServerCache(cacheId1)); + assertNotNull(cache.getServerCache(cacheId2)); + } + + public static class ManualTicker extends Ticker { + public long time = 0; + + @Override + public long read() { + return time; } - @Test - public void testFreeMemoryOnAccess() throws Exception { - int maxServerCacheTimeToLive = 10; - int maxServerCachePersistenceTimeToLive = 10; - long maxBytes = 1000; - GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); - ManualTicker ticker = new ManualTicker(); - TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, maxServerCachePersistenceTimeToLive, ticker); - ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); - ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("a")); - cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, false, MetaDataProtocol.PHOENIX_VERSION); - assertEquals(maxBytes-1, memoryManager.getAvailableMemory()); - ticker.time += (maxServerCacheTimeToLive + 1) * 1000000; - assertNull(cache.getServerCache(cacheId1)); - assertEquals(maxBytes, memoryManager.getAvailableMemory()); - } + } - @Test - public void testExpiredCacheOnAddingNew() throws Exception { - int maxServerCacheTimeToLive = 10; - int maxServerCachePersistenceTimeToLive = 10; - long maxBytes = 10; - GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); - ManualTicker ticker = new ManualTicker(); - TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, maxServerCachePersistenceTimeToLive, ticker); - ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); - ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("12345678")); - cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, false, MetaDataProtocol.PHOENIX_VERSION); - assertEquals(2, memoryManager.getAvailableMemory()); - ticker.time += (maxServerCacheTimeToLive + 1) * 1000000; - cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, false, MetaDataProtocol.PHOENIX_VERSION); - assertEquals(2, memoryManager.getAvailableMemory()); - } + public static ServerCacheFactory cacheFactory = new ServerCacheFactory() { - @Test - public void testExpiresButStaysInPersistentAfterTimeout() throws Exception { - int maxServerCacheTimeToLive = 100; - int maxServerCachePersistenceTimeToLive = 1000; - long maxBytes = 1000; - GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); - ManualTicker ticker = new ManualTicker(); - TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, maxServerCachePersistenceTimeToLive, ticker); - ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); - ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("a")); - cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, true, MetaDataProtocol.PHOENIX_VERSION); - assertEquals(maxBytes-1, memoryManager.getAvailableMemory()); - assertNotNull(cache.getServerCache(cacheId1)); - - // Expire it from live cache but not persistent cache - ticker.time += (maxServerCacheTimeToLive + 1) * 1000000; - cache.cleanUp(); - assertEquals(maxBytes-1, memoryManager.getAvailableMemory()); - assertNotNull(cache.getServerCache(cacheId1)); - - // Expire it from persistent cache as well - ticker.time += (maxServerCachePersistenceTimeToLive + 1) * 1000000; - cache.cleanUp(); - assertEquals(maxBytes, memoryManager.getAvailableMemory()); - assertNull(cache.getServerCache(cacheId1)); + @Override + public void readFields(DataInput arg0) throws IOException { } - @Test - public void testExpiresButStaysInPersistentAfterRemove() throws Exception { - int maxServerCacheTimeToLive = 100; - int maxServerCachePersistenceTimeToLive = 1000; - long maxBytes = 1000; - GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); - ManualTicker ticker = new ManualTicker(); - TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, maxServerCachePersistenceTimeToLive, ticker); - ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); - ImmutableBytesWritable cachePtr = new ImmutableBytesWritable(Bytes.toBytes("12")); - cache.addServerCache(cacheId1, cachePtr, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, true, MetaDataProtocol.PHOENIX_VERSION); - assertEquals(maxBytes-2, memoryManager.getAvailableMemory()); - assertNotNull(cache.getServerCache(cacheId1)); - - // Remove should only remove from live cache - cache.removeServerCache(cacheId1); - assertEquals(maxBytes-2, memoryManager.getAvailableMemory()); - assertNotNull(cache.getServerCache(cacheId1)); + @Override + public void write(DataOutput arg0) throws IOException { } - @Test - public void testEvictPersistentCacheIfSpaceIsNeeded() throws Exception { - int maxServerCacheTimeToLive = 100; - int maxServerCachePersistenceTimeToLive = 1000; - long maxBytes = 10; - GlobalMemoryManager memoryManager = new GlobalMemoryManager(maxBytes); - ManualTicker ticker = new ManualTicker(); - TenantCacheImpl cache = new TenantCacheImpl(memoryManager, maxServerCacheTimeToLive, maxServerCachePersistenceTimeToLive, ticker); - ImmutableBytesPtr cacheId1 = new ImmutableBytesPtr(Bytes.toBytes(1L)); - ImmutableBytesWritable cachePtr1 = new ImmutableBytesWritable(Bytes.toBytes("1234")); - cache.addServerCache(cacheId1, cachePtr1, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, true, MetaDataProtocol.PHOENIX_VERSION); - assertEquals(6, memoryManager.getAvailableMemory()); - - // Remove it, but it should stay in persistent cache - cache.removeServerCache(cacheId1); - assertNotNull(cache.getServerCache(cacheId1)); - assertEquals(6, memoryManager.getAvailableMemory()); - - // Let's do an entry that will require eviction - ImmutableBytesPtr cacheId2 = new ImmutableBytesPtr(Bytes.toBytes(2L)); - ImmutableBytesWritable cachePtr2 = new ImmutableBytesWritable(Bytes.toBytes("12345678")); - cache.addServerCache(cacheId2, cachePtr2, ByteUtil.EMPTY_BYTE_ARRAY, cacheFactory, true, true, MetaDataProtocol.PHOENIX_VERSION); - assertEquals(2, memoryManager.getAvailableMemory()); - assertNull(cache.getServerCache(cacheId1)); - assertNotNull(cache.getServerCache(cacheId2)); + @Override + public Closeable newCache(ImmutableBytesWritable cachePtr, byte[] txState, MemoryChunk chunk, + boolean useProtoForIndexMaintainer, int clientVersion) throws SQLException { + return chunk; } - public static class ManualTicker extends Ticker { - public long time = 0; - - @Override - public long read() { - return time; - } - - } - - public static ServerCacheFactory cacheFactory = new ServerCacheFactory() { - - @Override - public void readFields(DataInput arg0) throws IOException { - } - - @Override - public void write(DataOutput arg0) throws IOException { - } - - @Override - public Closeable newCache(ImmutableBytesWritable cachePtr, byte[] txState, MemoryChunk chunk, boolean useProtoForIndexMaintainer, int clientVersion) - throws SQLException { - return chunk; - } - - }; + }; } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/CreateTableCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/CreateTableCompilerTest.java index 54e52f23320..bf8694a0781 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/CreateTableCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/CreateTableCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,33 +36,35 @@ import org.junit.Test; public class CreateTableCompilerTest extends BaseConnectionlessQueryTest { - @Test - public void testCreateTableWithDuplicateColumns() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class)) { - String ddl = "CREATE TABLE T (ID INTEGER PRIMARY KEY, DUPE INTEGER, DUPE INTEGER)"; - conn.createStatement().execute(ddl); - fail(); - } catch (ColumnAlreadyExistsException e) { - assertEquals("DUPE", e.getColumnName()); - } + @Test + public void testCreateTableWithDuplicateColumns() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (PhoenixConnection conn = + DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class)) { + String ddl = "CREATE TABLE T (ID INTEGER PRIMARY KEY, DUPE INTEGER, DUPE INTEGER)"; + conn.createStatement().execute(ddl); + fail(); + } catch (ColumnAlreadyExistsException e) { + assertEquals("DUPE", e.getColumnName()); } + } - @Test - public void testCreateTableWithNoVerify() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class)) { - String ddl = "CREATE TABLE T (ID INTEGER PRIMARY KEY, A INTEGER, B INTEGER) NOVERIFY"; - boolean result = conn.createStatement().execute(ddl); - assertFalse(result); - } + @Test + public void testCreateTableWithNoVerify() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (PhoenixConnection conn = + DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class)) { + String ddl = "CREATE TABLE T (ID INTEGER PRIMARY KEY, A INTEGER, B INTEGER) NOVERIFY"; + boolean result = conn.createStatement().execute(ddl); + assertFalse(result); } + } - @Test - public void testCreateTableWithNoVerifyValidateStmt() throws SQLException { - String ddl = "CREATE TABLE A (K VARCHAR PRIMARY KEY DESC) NOVERIFY"; - CreateTableStatement stmt = (CreateTableStatement)new SQLParser((ddl)).parseStatement(); + @Test + public void testCreateTableWithNoVerifyValidateStmt() throws SQLException { + String ddl = "CREATE TABLE A (K VARCHAR PRIMARY KEY DESC) NOVERIFY"; + CreateTableStatement stmt = (CreateTableStatement) new SQLParser((ddl)).parseStatement(); - assertTrue(stmt.isNoVerify()); - } + assertTrue(stmt.isNoVerify()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/CursorCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/CursorCompilerTest.java index 2fd99cb91fe..1d0de3206ca 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/CursorCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/CursorCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,75 +17,48 @@ */ package org.apache.phoenix.compile; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.compile.OrderByCompiler.OrderBy; -import org.apache.phoenix.coprocessor.BaseScannerRegionObserver; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.expression.Expression; -import org.apache.phoenix.expression.LiteralExpression; -import org.apache.phoenix.expression.aggregator.Aggregator; -import org.apache.phoenix.expression.aggregator.CountAggregator; -import org.apache.phoenix.expression.aggregator.ServerAggregators; -import org.apache.phoenix.expression.function.TimeUnit; -import org.apache.phoenix.filter.ColumnProjectionFilter; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; -import org.apache.phoenix.jdbc.PhoenixPreparedStatement; -import org.apache.phoenix.jdbc.PhoenixStatement; -import org.apache.phoenix.query.BaseConnectionlessQueryTest; -import org.apache.phoenix.query.QueryConstants; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.schema.*; -import org.apache.phoenix.util.*; -import org.junit.Test; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.*; -import java.math.BigDecimal; import java.sql.*; import java.util.*; -import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; -import static org.apache.phoenix.util.TestUtil.assertDegenerate; -import static org.junit.Assert.*; - +import org.apache.phoenix.query.BaseConnectionlessQueryTest; +import org.apache.phoenix.schema.*; +import org.apache.phoenix.util.*; +import org.junit.Test; /** - * * Test for compiling the various cursor related statements - * - * * @since 0.1 */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="RV_RETURN_VALUE_IGNORED", - justification="Test code.") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "RV_RETURN_VALUE_IGNORED", + justification = "Test code.") public class CursorCompilerTest extends BaseConnectionlessQueryTest { - @Test - public void testCursorLifecycleCompile() throws SQLException { - String query = "SELECT a_string, b_string FROM atable"; - String sql = "DECLARE testCursor CURSOR FOR " + query; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - //this verifies PHOENIX-5534 is fixed and we don't initialize metrics twice - //on a cursor query - props.put("phoenix.query.request.metrics.enabled","true"); + @Test + public void testCursorLifecycleCompile() throws SQLException { + String query = "SELECT a_string, b_string FROM atable"; + String sql = "DECLARE testCursor CURSOR FOR " + query; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + // this verifies PHOENIX-5534 is fixed and we don't initialize metrics twice + // on a cursor query + props.put("phoenix.query.request.metrics.enabled", "true"); - Connection conn = DriverManager.getConnection(getUrl(), props); - //Test declare cursor compile - PreparedStatement statement = conn.prepareStatement(sql); - //Test declare cursor execution - statement.execute(); - assertTrue(CursorUtil.cursorDeclared("testCursor")); - //Test open cursor compile - sql = "OPEN testCursor"; - statement = conn.prepareStatement(sql); - //Test open cursor execution - statement.execute(); - //Test fetch cursor compile - sql = "FETCH NEXT FROM testCursor"; - statement = conn.prepareStatement(sql); - statement.executeQuery(); - } -} \ No newline at end of file + Connection conn = DriverManager.getConnection(getUrl(), props); + // Test declare cursor compile + PreparedStatement statement = conn.prepareStatement(sql); + // Test declare cursor execution + statement.execute(); + assertTrue(CursorUtil.cursorDeclared("testCursor")); + // Test open cursor compile + sql = "OPEN testCursor"; + statement = conn.prepareStatement(sql); + // Test open cursor execution + statement.execute(); + // Test fetch cursor compile + sql = "FETCH NEXT FROM testCursor"; + statement = conn.prepareStatement(sql); + statement.executeQuery(); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/HavingCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/HavingCompilerTest.java index a2b89d6ffc4..87394928017 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/HavingCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/HavingCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -51,150 +51,171 @@ import org.apache.phoenix.util.TestUtil; import org.junit.Test; - public class HavingCompilerTest extends BaseConnectionlessQueryTest { - private static class Expressions { - private Expression whereClause; - private Expression havingClause; - - private Expressions(Expression whereClause, Expression havingClause) { - this.whereClause = whereClause; - this.havingClause = havingClause; - } - } - - private static Expressions compileStatement(String query, List binds) throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); - TestUtil.bindParams(pstmt, binds); - QueryPlan plan = pstmt.compileQuery(); - assertTrue(plan instanceof AggregatePlan); - Filter filter = plan.getContext().getScan().getFilter(); - assertTrue(filter == null || filter instanceof BooleanExpressionFilter); - BooleanExpressionFilter boolFilter = (BooleanExpressionFilter)filter; - AggregatePlan aggPlan = (AggregatePlan)plan; - return new Expressions(boolFilter == null ? null : boolFilter.getExpression(), aggPlan.getHaving()); - } - - @Test - public void testHavingToWhere() throws SQLException { - String query = "select count(1) from atable group by a_string having a_string = 'foo'"; - List binds = Collections.emptyList(); - Expressions expressions = compileStatement(query,binds); - Expression w = constantComparison(CompareOperator.EQUAL, A_STRING,"foo"); - assertEquals(w, expressions.whereClause); - assertNull(expressions.havingClause); - } - - @Test - public void testHavingFuncToWhere() throws SQLException { - // TODO: confirm that this is a valid optimization - String query = "select count(1) from atable group by a_date having round(a_date, 'hour') > ?"; - Date date = new Date(System.currentTimeMillis()); - List binds = Arrays.asList(date); - Expressions expressions = compileStatement(query,binds); - Expression w = constantComparison(CompareOperator.GREATER, RoundDateExpression.create(Arrays.asList(A_DATE,LiteralExpression.newConstant("hour"),LiteralExpression.newConstant(1))), date); - assertEquals(w, expressions.whereClause); - assertNull(expressions.havingClause); - } - - @Test - public void testHavingToAndWhere() throws SQLException { - String query = "select count(1) from atable where b_string > 'bar' group by a_string having a_string = 'foo'"; - List binds = Collections.emptyList(); - Expressions expressions = compileStatement(query,binds); - Expression w = and(constantComparison(CompareOperator.GREATER, B_STRING,"bar"),constantComparison(CompareOperator.EQUAL, A_STRING,"foo")); - assertEquals(w, expressions.whereClause); - assertNull(expressions.havingClause); - } + private static class Expressions { + private Expression whereClause; + private Expression havingClause; - - @Test - public void testAndHavingToAndWhere() throws SQLException { - String query = "select count(1) from atable where b_string > 'bar' group by a_string having count(1) >= 1 and a_string = 'foo'"; - List binds = Collections.emptyList(); - Expressions expressions = compileStatement(query,binds); - Expression h = constantComparison(CompareOperator.GREATER_OR_EQUAL, new CountAggregateFunction(),1L); - Expression w = and(constantComparison(CompareOperator.GREATER, B_STRING,"bar"),constantComparison(CompareOperator.EQUAL, A_STRING,"foo")); - assertEquals(w, expressions.whereClause); - assertEquals(h, expressions.havingClause); - } - - @Test - public void testAndHavingToWhere() throws SQLException { - String query = "select count(1) from atable group by a_string having count(1) >= 1 and a_string = 'foo'"; - List binds = Collections.emptyList(); - Expressions expressions = compileStatement(query,binds); - Expression h = constantComparison(CompareOperator.GREATER_OR_EQUAL, new CountAggregateFunction(),1L); - Expression w = constantComparison(CompareOperator.EQUAL, A_STRING,"foo"); - assertEquals(w, expressions.whereClause); - assertEquals(h, expressions.havingClause); - } - - @Test - public void testInListHavingToWhere() throws SQLException { - String query = "select count(1) from atable group by a_string having a_string in ('foo', 'bar')"; - List binds = Collections.emptyList(); - Expressions expressions = compileStatement(query,binds); - Expression w = TestUtil.in(A_STRING,"foo","bar"); - assertEquals(w, expressions.whereClause); - assertNull(expressions.havingClause); - } - - @Test - public void testAggFuncInHaving() throws SQLException { - String query = "select count(1) from atable group by a_string having count(a_string) >= 1"; - List binds = Collections.emptyList(); - Expressions expressions = compileStatement(query,binds); - Expression h = constantComparison(CompareOperator.GREATER_OR_EQUAL, new CountAggregateFunction(Arrays.asList(A_STRING)),1L); - assertNull(expressions.whereClause); - assertEquals(h, expressions.havingClause); - } - - @Test - public void testOrAggFuncInHaving() throws SQLException { - String query = "select count(1) from atable group by a_string having count(1) >= 1 or a_string = 'foo'"; - List binds = Collections.emptyList(); - Expressions expressions = compileStatement(query,binds); - PColumn aCol = ATABLE.getColumnForColumnName("A_STRING"); - Expression h = or( - constantComparison(CompareOperator.GREATER_OR_EQUAL, new CountAggregateFunction(),1L), - constantComparison(CompareOperator.EQUAL, - new RowKeyColumnExpression(aCol, // a_string comes from group by key in this case - new RowKeyValueAccessor(Arrays.asList(aCol), 0)),"foo")); - assertNull(expressions.whereClause); - assertEquals(h, expressions.havingClause); - } - - @Test - public void testAndAggColsInHaving() throws SQLException { - String query = "select count(1) from atable group by a_string,b_string having a_string = 'a' and b_string = 'b'"; - List binds = Collections.emptyList(); - Expressions expressions = compileStatement(query,binds); - Expression w = and(constantComparison(CompareOperator.EQUAL, A_STRING,"a"),constantComparison(CompareOperator.EQUAL, B_STRING,"b")); - assertEquals(w, expressions.whereClause); - assertNull(expressions.havingClause); - } - - @Test - public void testOrAggColsInHaving() throws SQLException { - String query = "select count(1) from atable group by a_string,b_string having a_string = 'a' or b_string = 'b'"; - List binds = Collections.emptyList(); - Expressions expressions = compileStatement(query,binds); - Expression w = or(constantComparison(CompareOperator.EQUAL, A_STRING,"a"),constantComparison(CompareOperator.EQUAL, B_STRING,"b")); - assertEquals(w, expressions.whereClause); - assertNull(expressions.havingClause); + private Expressions(Expression whereClause, Expression havingClause) { + this.whereClause = whereClause; + this.havingClause = havingClause; } - - @Test - public void testNonAggColInHaving() throws SQLException { - String query = "select count(1) from atable group by a_string having b_string = 'bar'"; - List binds = Collections.emptyList(); - try { - compileStatement(query,binds); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1019 (42Y26): Only aggregate maybe used in the HAVING clause.")); - } + } + + private static Expressions compileStatement(String query, List binds) + throws SQLException { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); + TestUtil.bindParams(pstmt, binds); + QueryPlan plan = pstmt.compileQuery(); + assertTrue(plan instanceof AggregatePlan); + Filter filter = plan.getContext().getScan().getFilter(); + assertTrue(filter == null || filter instanceof BooleanExpressionFilter); + BooleanExpressionFilter boolFilter = (BooleanExpressionFilter) filter; + AggregatePlan aggPlan = (AggregatePlan) plan; + return new Expressions(boolFilter == null ? null : boolFilter.getExpression(), + aggPlan.getHaving()); + } + + @Test + public void testHavingToWhere() throws SQLException { + String query = "select count(1) from atable group by a_string having a_string = 'foo'"; + List binds = Collections.emptyList(); + Expressions expressions = compileStatement(query, binds); + Expression w = constantComparison(CompareOperator.EQUAL, A_STRING, "foo"); + assertEquals(w, expressions.whereClause); + assertNull(expressions.havingClause); + } + + @Test + public void testHavingFuncToWhere() throws SQLException { + // TODO: confirm that this is a valid optimization + String query = "select count(1) from atable group by a_date having round(a_date, 'hour') > ?"; + Date date = new Date(System.currentTimeMillis()); + List binds = Arrays. asList(date); + Expressions expressions = compileStatement(query, binds); + Expression w = + constantComparison(CompareOperator.GREATER, RoundDateExpression.create(Arrays.asList(A_DATE, + LiteralExpression.newConstant("hour"), LiteralExpression.newConstant(1))), date); + assertEquals(w, expressions.whereClause); + assertNull(expressions.havingClause); + } + + @Test + public void testHavingToAndWhere() throws SQLException { + String query = + "select count(1) from atable where b_string > 'bar' group by a_string having a_string = 'foo'"; + List binds = Collections.emptyList(); + Expressions expressions = compileStatement(query, binds); + Expression w = and(constantComparison(CompareOperator.GREATER, B_STRING, "bar"), + constantComparison(CompareOperator.EQUAL, A_STRING, "foo")); + assertEquals(w, expressions.whereClause); + assertNull(expressions.havingClause); + } + + @Test + public void testAndHavingToAndWhere() throws SQLException { + String query = + "select count(1) from atable where b_string > 'bar' group by a_string having count(1) >= 1 and a_string = 'foo'"; + List binds = Collections.emptyList(); + Expressions expressions = compileStatement(query, binds); + Expression h = + constantComparison(CompareOperator.GREATER_OR_EQUAL, new CountAggregateFunction(), 1L); + Expression w = and(constantComparison(CompareOperator.GREATER, B_STRING, "bar"), + constantComparison(CompareOperator.EQUAL, A_STRING, "foo")); + assertEquals(w, expressions.whereClause); + assertEquals(h, expressions.havingClause); + } + + @Test + public void testAndHavingToWhere() throws SQLException { + String query = + "select count(1) from atable group by a_string having count(1) >= 1 and a_string = 'foo'"; + List binds = Collections.emptyList(); + Expressions expressions = compileStatement(query, binds); + Expression h = + constantComparison(CompareOperator.GREATER_OR_EQUAL, new CountAggregateFunction(), 1L); + Expression w = constantComparison(CompareOperator.EQUAL, A_STRING, "foo"); + assertEquals(w, expressions.whereClause); + assertEquals(h, expressions.havingClause); + } + + @Test + public void testInListHavingToWhere() throws SQLException { + String query = + "select count(1) from atable group by a_string having a_string in ('foo', 'bar')"; + List binds = Collections.emptyList(); + Expressions expressions = compileStatement(query, binds); + Expression w = TestUtil.in(A_STRING, "foo", "bar"); + assertEquals(w, expressions.whereClause); + assertNull(expressions.havingClause); + } + + @Test + public void testAggFuncInHaving() throws SQLException { + String query = "select count(1) from atable group by a_string having count(a_string) >= 1"; + List binds = Collections.emptyList(); + Expressions expressions = compileStatement(query, binds); + Expression h = constantComparison(CompareOperator.GREATER_OR_EQUAL, + new CountAggregateFunction(Arrays.asList(A_STRING)), 1L); + assertNull(expressions.whereClause); + assertEquals(h, expressions.havingClause); + } + + @Test + public void testOrAggFuncInHaving() throws SQLException { + String query = + "select count(1) from atable group by a_string having count(1) >= 1 or a_string = 'foo'"; + List binds = Collections.emptyList(); + Expressions expressions = compileStatement(query, binds); + PColumn aCol = ATABLE.getColumnForColumnName("A_STRING"); + Expression h = + or(constantComparison(CompareOperator.GREATER_OR_EQUAL, new CountAggregateFunction(), 1L), + constantComparison(CompareOperator.EQUAL, new RowKeyColumnExpression(aCol, // a_string comes + // from group by + // key in this + // case + new RowKeyValueAccessor(Arrays. asList(aCol), 0)), "foo")); + assertNull(expressions.whereClause); + assertEquals(h, expressions.havingClause); + } + + @Test + public void testAndAggColsInHaving() throws SQLException { + String query = + "select count(1) from atable group by a_string,b_string having a_string = 'a' and b_string = 'b'"; + List binds = Collections.emptyList(); + Expressions expressions = compileStatement(query, binds); + Expression w = and(constantComparison(CompareOperator.EQUAL, A_STRING, "a"), + constantComparison(CompareOperator.EQUAL, B_STRING, "b")); + assertEquals(w, expressions.whereClause); + assertNull(expressions.havingClause); + } + + @Test + public void testOrAggColsInHaving() throws SQLException { + String query = + "select count(1) from atable group by a_string,b_string having a_string = 'a' or b_string = 'b'"; + List binds = Collections.emptyList(); + Expressions expressions = compileStatement(query, binds); + Expression w = or(constantComparison(CompareOperator.EQUAL, A_STRING, "a"), + constantComparison(CompareOperator.EQUAL, B_STRING, "b")); + assertEquals(w, expressions.whereClause); + assertNull(expressions.havingClause); + } + + @Test + public void testNonAggColInHaving() throws SQLException { + String query = "select count(1) from atable group by a_string having b_string = 'bar'"; + List binds = Collections.emptyList(); + try { + compileStatement(query, binds); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 1019 (42Y26): Only aggregate maybe used in the HAVING clause.")); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java index 5027e54daf3..feac801b954 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,9 +35,6 @@ import org.apache.phoenix.compile.JoinCompiler.JoinTable; import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.jdbc.PhoenixStatement; -import org.apache.phoenix.parse.SQLParser; -import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.query.BaseConnectionlessQueryTest; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; @@ -49,117 +46,127 @@ * Test compilation of queries containing joins. */ public class JoinQueryCompilerTest extends BaseConnectionlessQueryTest { - - @BeforeClass - public static synchronized void createJoinTables() throws SQLException { - try (Connection conn = DriverManager.getConnection(getUrl())) { - conn.createStatement().execute("create table " + JOIN_ORDER_TABLE_FULL_NAME + - " (\"order_id\" varchar(15) not null primary key, " + - " \"customer_id\" varchar(10), " + - " \"item_id\" varchar(10), " + - " price integer, " + - " quantity integer, " + - " \"date\" timestamp)"); - conn.createStatement().execute("create table " + JOIN_CUSTOMER_TABLE_FULL_NAME + - " (\"customer_id\" varchar(10) not null primary key, " + - " name varchar, " + - " phone varchar(12), " + - " address varchar, " + - " loc_id varchar(5), " + - " \"date\" date)"); - conn.createStatement().execute("create table " + JOIN_ITEM_TABLE_FULL_NAME + - " (\"item_id\" varchar(10) not null primary key, " + - " name varchar, " + - " price integer, " + - " discount1 integer, " + - " discount2 integer, " + - " \"supplier_id\" varchar(10), " + - " description varchar)"); - conn.createStatement().execute("create table " + JOIN_SUPPLIER_TABLE_FULL_NAME + - " (\"supplier_id\" varchar(10) not null primary key, " + - " name varchar, " + - " phone varchar(12), " + - " address varchar, " + - " loc_id varchar(5))"); - } - } - - @Test - public void testExplainPlan() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - String query = "EXPLAIN SELECT s.\"supplier_id\", \"order_id\", c.name, i.name, quantity, o.\"date\" FROM " + JOIN_ORDER_TABLE_FULL_NAME + " o LEFT JOIN " - + JOIN_CUSTOMER_TABLE_FULL_NAME + " c ON o.\"customer_id\" = c.\"customer_id\" AND c.name LIKE 'C%' LEFT JOIN " - + JOIN_ITEM_TABLE_FULL_NAME + " i ON o.\"item_id\" = i.\"item_id\" RIGHT JOIN " - + JOIN_SUPPLIER_TABLE_FULL_NAME + " s ON s.\"supplier_id\" = i.\"supplier_id\" WHERE i.name LIKE 'T%'"; - ResultSet rs = conn.createStatement().executeQuery(query); - assertEquals( - "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" + - " SERVER FILTER BY FIRST KEY ONLY\n" + - " PARALLEL LEFT-JOIN TABLE 0\n" + - " CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" + - " PARALLEL LEFT-JOIN TABLE 0\n" + - " CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + "\n" + - " SERVER FILTER BY NAME LIKE 'C%'\n" + - " PARALLEL LEFT-JOIN TABLE 1\n" + - " CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + "\n" + - " AFTER-JOIN SERVER FILTER BY I.NAME LIKE 'T%'", QueryUtil.getExplainPlan(rs)); + + @BeforeClass + public static synchronized void createJoinTables() throws SQLException { + try (Connection conn = DriverManager.getConnection(getUrl())) { + conn.createStatement() + .execute("create table " + JOIN_ORDER_TABLE_FULL_NAME + + " (\"order_id\" varchar(15) not null primary key, " + + " \"customer_id\" varchar(10), " + " \"item_id\" varchar(10), " + + " price integer, " + " quantity integer, " + " \"date\" timestamp)"); + conn.createStatement() + .execute("create table " + JOIN_CUSTOMER_TABLE_FULL_NAME + + " (\"customer_id\" varchar(10) not null primary key, " + " name varchar, " + + " phone varchar(12), " + " address varchar, " + " loc_id varchar(5), " + + " \"date\" date)"); + conn.createStatement() + .execute("create table " + JOIN_ITEM_TABLE_FULL_NAME + + " (\"item_id\" varchar(10) not null primary key, " + " name varchar, " + + " price integer, " + " discount1 integer, " + " discount2 integer, " + + " \"supplier_id\" varchar(10), " + " description varchar)"); + conn.createStatement() + .execute("create table " + JOIN_SUPPLIER_TABLE_FULL_NAME + + " (\"supplier_id\" varchar(10) not null primary key, " + " name varchar, " + + " phone varchar(12), " + " address varchar, " + " loc_id varchar(5))"); } + } - @Test - public void testWhereClauseOptimization() throws Exception { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - String queryTemplate = "SELECT t1.\"item_id\", t2.\"item_id\", t3.\"item_id\" FROM " + JOIN_ITEM_TABLE_FULL_NAME + " t1 " - + "%s JOIN " + JOIN_ITEM_TABLE_FULL_NAME + " t2 ON t1.\"item_id\" = t2.\"item_id\" " - + "%s JOIN " + JOIN_ITEM_TABLE_FULL_NAME + " t3 ON t1.\"item_id\" = t3.\"item_id\" " - + "WHERE t1.\"item_id\" = '0000000001' AND t2.\"item_id\" = '0000000002' AND t3.\"item_id\" = '0000000003'"; + @Test + public void testExplainPlan() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + String query = + "EXPLAIN SELECT s.\"supplier_id\", \"order_id\", c.name, i.name, quantity, o.\"date\" FROM " + + JOIN_ORDER_TABLE_FULL_NAME + " o LEFT JOIN " + JOIN_CUSTOMER_TABLE_FULL_NAME + + " c ON o.\"customer_id\" = c.\"customer_id\" AND c.name LIKE 'C%' LEFT JOIN " + + JOIN_ITEM_TABLE_FULL_NAME + " i ON o.\"item_id\" = i.\"item_id\" RIGHT JOIN " + + JOIN_SUPPLIER_TABLE_FULL_NAME + + " s ON s.\"supplier_id\" = i.\"supplier_id\" WHERE i.name LIKE 'T%'"; + ResultSet rs = conn.createStatement().executeQuery(query); + assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_SUPPLIER_TABLE_DISPLAY_NAME + "\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + " PARALLEL LEFT-JOIN TABLE 0\n" + + " CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ORDER_TABLE_DISPLAY_NAME + "\n" + + " PARALLEL LEFT-JOIN TABLE 0\n" + + " CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_CUSTOMER_TABLE_DISPLAY_NAME + + "\n" + " SERVER FILTER BY NAME LIKE 'C%'\n" + + " PARALLEL LEFT-JOIN TABLE 1\n" + + " CLIENT PARALLEL 1-WAY FULL SCAN OVER " + JOIN_ITEM_TABLE_DISPLAY_NAME + + "\n" + " AFTER-JOIN SERVER FILTER BY I.NAME LIKE 'T%'", QueryUtil.getExplainPlan(rs)); + } - String query = String.format(queryTemplate, "INNER", "INNER"); - JoinTable joinTable = TestUtil.getJoinTable(query, pconn); - assertEquals(1, joinTable.getLeftTable().getPreFilterParseNodes().size()); - assertEquals(1, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); - assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); + @Test + public void testWhereClauseOptimization() throws Exception { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + String queryTemplate = "SELECT t1.\"item_id\", t2.\"item_id\", t3.\"item_id\" FROM " + + JOIN_ITEM_TABLE_FULL_NAME + " t1 " + "%s JOIN " + JOIN_ITEM_TABLE_FULL_NAME + + " t2 ON t1.\"item_id\" = t2.\"item_id\" " + "%s JOIN " + JOIN_ITEM_TABLE_FULL_NAME + + " t3 ON t1.\"item_id\" = t3.\"item_id\" " + + "WHERE t1.\"item_id\" = '0000000001' AND t2.\"item_id\" = '0000000002' AND t3.\"item_id\" = '0000000003'"; - query = String.format(queryTemplate, "INNER", "LEFT"); - joinTable = TestUtil.getJoinTable(query, pconn); - assertEquals(1, joinTable.getLeftTable().getPreFilterParseNodes().size()); - assertEquals(1, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); - assertEquals(0, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); + String query = String.format(queryTemplate, "INNER", "INNER"); + JoinTable joinTable = TestUtil.getJoinTable(query, pconn); + assertEquals(1, joinTable.getLeftTable().getPreFilterParseNodes().size()); + assertEquals(1, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); - query = String.format(queryTemplate, "INNER", "RIGHT"); - joinTable = TestUtil.getJoinTable(query, pconn); - assertEquals(0, joinTable.getLeftTable().getPreFilterParseNodes().size()); - assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); - assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); + query = String.format(queryTemplate, "INNER", "LEFT"); + joinTable = TestUtil.getJoinTable(query, pconn); + assertEquals(1, joinTable.getLeftTable().getPreFilterParseNodes().size()); + assertEquals(1, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + assertEquals(0, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); - query = String.format(queryTemplate, "LEFT", "INNER"); - joinTable = TestUtil.getJoinTable(query, pconn); - assertEquals(1, joinTable.getLeftTable().getPreFilterParseNodes().size()); - assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); - assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); + query = String.format(queryTemplate, "INNER", "RIGHT"); + joinTable = TestUtil.getJoinTable(query, pconn); + assertEquals(0, joinTable.getLeftTable().getPreFilterParseNodes().size()); + assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); - query = String.format(queryTemplate, "LEFT", "LEFT"); - joinTable = TestUtil.getJoinTable(query, pconn); - assertEquals(1, joinTable.getLeftTable().getPreFilterParseNodes().size()); - assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); - assertEquals(0, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); + query = String.format(queryTemplate, "LEFT", "INNER"); + joinTable = TestUtil.getJoinTable(query, pconn); + assertEquals(1, joinTable.getLeftTable().getPreFilterParseNodes().size()); + assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); - query = String.format(queryTemplate, "LEFT", "RIGHT"); - joinTable = TestUtil.getJoinTable(query, pconn); - assertEquals(0, joinTable.getLeftTable().getPreFilterParseNodes().size()); - assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); - assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); + query = String.format(queryTemplate, "LEFT", "LEFT"); + joinTable = TestUtil.getJoinTable(query, pconn); + assertEquals(1, joinTable.getLeftTable().getPreFilterParseNodes().size()); + assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + assertEquals(0, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); - query = String.format(queryTemplate, "RIGHT", "INNER"); - joinTable = TestUtil.getJoinTable(query, pconn); - assertEquals(0, joinTable.getLeftTable().getPreFilterParseNodes().size()); - assertEquals(1, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); - assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); + query = String.format(queryTemplate, "LEFT", "RIGHT"); + joinTable = TestUtil.getJoinTable(query, pconn); + assertEquals(0, joinTable.getLeftTable().getPreFilterParseNodes().size()); + assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); - query = String.format(queryTemplate, "RIGHT", "RIGHT"); - joinTable = TestUtil.getJoinTable(query, pconn); - assertEquals(0, joinTable.getLeftTable().getPreFilterParseNodes().size()); - assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); - assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable().getPreFilterParseNodes().size()); - } -} + query = String.format(queryTemplate, "RIGHT", "INNER"); + joinTable = TestUtil.getJoinTable(query, pconn); + assertEquals(0, joinTable.getLeftTable().getPreFilterParseNodes().size()); + assertEquals(1, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + query = String.format(queryTemplate, "RIGHT", "RIGHT"); + joinTable = TestUtil.getJoinTable(query, pconn); + assertEquals(0, joinTable.getLeftTable().getPreFilterParseNodes().size()); + assertEquals(0, joinTable.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + assertEquals(1, joinTable.getJoinSpecs().get(1).getRhsJoinTable().getLeftTable() + .getPreFilterParseNodes().size()); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/LimitCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/LimitCompilerTest.java index 30d926d0bc1..e81ab2ec67c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/LimitCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/LimitCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,89 +40,90 @@ import org.apache.phoenix.util.TestUtil; import org.junit.Test; - public class LimitCompilerTest extends BaseConnectionlessQueryTest { - - private static QueryPlan compileStatement(String query, List binds) throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); - TestUtil.bindParams(pstmt, binds); - return pstmt.compileQuery(); - } - - @Test - public void testLimit() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' limit 5"; - List binds = Collections.emptyList(); - QueryPlan plan = compileStatement(query, binds); - Scan scan = plan.getContext().getScan(); - - assertNull(scan.getFilter()); - assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - assertEquals(plan.getLimit(),Integer.valueOf(5)); - } - @Test - public void testNoLimit() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "'"; - List binds = Collections.emptyList(); - QueryPlan plan = compileStatement(query, binds); - Scan scan = plan.getContext().getScan(); + private static QueryPlan compileStatement(String query, List binds) throws SQLException { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); + TestUtil.bindParams(pstmt, binds); + return pstmt.compileQuery(); + } - assertNull(scan.getFilter()); - assertNull(plan.getLimit()); - assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - @Test - public void testBoundLimit() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' limit ?"; - List binds = Arrays.asList(5); - QueryPlan plan = compileStatement(query, binds); - Scan scan = plan.getContext().getScan(); + @Test + public void testLimit() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "' limit 5"; + List binds = Collections.emptyList(); + QueryPlan plan = compileStatement(query, binds); + Scan scan = plan.getContext().getScan(); - assertNull(scan.getFilter()); - assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - assertEquals(plan.getLimit(),Integer.valueOf(5)); - } + assertNull(scan.getFilter()); + assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + assertEquals(plan.getLimit(), Integer.valueOf(5)); + } - @Test - public void testTypeMismatchBoundLimit() throws SQLException { - String query = "select * from atable limit ?"; - List binds = Arrays.asList("foo"); - try { - compileStatement(query, binds); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("Type mismatch")); - } - } + @Test + public void testNoLimit() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "'"; + List binds = Collections.emptyList(); + QueryPlan plan = compileStatement(query, binds); + Scan scan = plan.getContext().getScan(); + + assertNull(scan.getFilter()); + assertNull(plan.getLimit()); + assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testBoundLimit() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "' limit ?"; + List binds = Arrays. asList(5); + QueryPlan plan = compileStatement(query, binds); + Scan scan = plan.getContext().getScan(); - @Test - public void testNegativeBoundLimit() throws SQLException { - String query = "select * from atable limit ?"; - List binds = Arrays.asList(-1); - QueryPlan plan = compileStatement(query, binds); - assertNull(plan.getLimit()); + assertNull(scan.getFilter()); + assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + assertEquals(plan.getLimit(), Integer.valueOf(5)); + } + + @Test + public void testTypeMismatchBoundLimit() throws SQLException { + String query = "select * from atable limit ?"; + List binds = Arrays. asList("foo"); + try { + compileStatement(query, binds); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage().contains("Type mismatch")); } + } + + @Test + public void testNegativeBoundLimit() throws SQLException { + String query = "select * from atable limit ?"; + List binds = Arrays. asList(-1); + QueryPlan plan = compileStatement(query, binds); + assertNull(plan.getLimit()); + } - @Test - public void testBindTypeMismatch() throws SQLException { - Long tenantId = Long.valueOf(0); - String keyPrefix = "002"; - List binds = Arrays.asList(tenantId,keyPrefix); - String query = "select * from atable where organization_id=? and substr(entity_id,1,3)=?"; - try { - compileStatement(query, binds); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 203 (22005): Type mismatch.")); - } + @Test + public void testBindTypeMismatch() throws SQLException { + Long tenantId = Long.valueOf(0); + String keyPrefix = "002"; + List binds = Arrays. asList(tenantId, keyPrefix); + String query = "select * from atable where organization_id=? and substr(entity_id,1,3)=?"; + try { + compileStatement(query, binds); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains("ERROR 203 (22005): Type mismatch.")); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/PostIndexDDLCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/PostIndexDDLCompilerTest.java index 9e99a7eb969..40c49d35e85 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/PostIndexDDLCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/PostIndexDDLCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,50 +34,52 @@ public class PostIndexDDLCompilerTest extends BaseConnectionlessQueryTest { - @Test - public void testHintInSubquery() throws Exception { - try (Connection conn = DriverManager.getConnection(getUrl())) { - setupTables(conn); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - String query = "UPSERT /*+ NO_INDEX */ INTO T(k, v1) SELECT /*+ NO_INDEX */ k,v1 FROM T WHERE v1 = '4'"; - MutationPlan plan = stmt.compileMutation(query); - assertEquals("T", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); - query = "UPSERT INTO T(k, v1) SELECT /*+ NO_INDEX */ k,v1 FROM T WHERE v1 = '4'"; - plan = stmt.compileMutation(query); - // TODO the following should actually use data table T if we supported hints in subqueries - assertEquals("IDX", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); - } + @Test + public void testHintInSubquery() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + setupTables(conn); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + String query = + "UPSERT /*+ NO_INDEX */ INTO T(k, v1) SELECT /*+ NO_INDEX */ k,v1 FROM T WHERE v1 = '4'"; + MutationPlan plan = stmt.compileMutation(query); + assertEquals("T", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); + query = "UPSERT INTO T(k, v1) SELECT /*+ NO_INDEX */ k,v1 FROM T WHERE v1 = '4'"; + plan = stmt.compileMutation(query); + // TODO the following should actually use data table T if we supported hints in subqueries + assertEquals("IDX", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); } + } - @Test - public void testCompile() throws Exception { - try (Connection conn = DriverManager.getConnection(getUrl())) { - setupTables(conn); - PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); - PTable pDataTable = pConn.getTable(new PTableKey(null, "T")); - PostIndexDDLCompiler compiler = new PostIndexDDLCompiler(pConn, new TableRef(pDataTable)); - MutationPlan plan = compiler.compile(pConn.getTable(new PTableKey(null, "IDX"))); - assertEquals("T", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); - } + @Test + public void testCompile() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + setupTables(conn); + PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); + PTable pDataTable = pConn.getTable(new PTableKey(null, "T")); + PostIndexDDLCompiler compiler = new PostIndexDDLCompiler(pConn, new TableRef(pDataTable)); + MutationPlan plan = compiler.compile(pConn.getTable(new PTableKey(null, "IDX"))); + assertEquals("T", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); } + } - @Test - public void testCreateTableWithNoVerify() throws SQLException { - String ddl = "CREATE TABLE A (K VARCHAR PRIMARY KEY DESC) NOVERIFY"; - try (Connection conn = DriverManager.getConnection(getUrl())) { - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - MutationPlan plan = stmt.compileMutation(ddl); - MutationState state = plan.execute(); + @Test + public void testCreateTableWithNoVerify() throws SQLException { + String ddl = "CREATE TABLE A (K VARCHAR PRIMARY KEY DESC) NOVERIFY"; + try (Connection conn = DriverManager.getConnection(getUrl())) { + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + MutationPlan plan = stmt.compileMutation(ddl); + MutationState state = plan.execute(); - assertEquals("CREATE TABLE\n", plan.getExplainPlan().toString()); - assertEquals(0, state.getMaxSize()); - assertEquals(0, state.getMaxSizeBytes()); - } + assertEquals("CREATE TABLE\n", plan.getExplainPlan().toString()); + assertEquals(0, state.getMaxSize()); + assertEquals(0, state.getMaxSizeBytes()); } + } - private void setupTables(Connection conn) throws SQLException { - conn.createStatement().execute("CREATE TABLE T (k VARCHAR NOT NULL PRIMARY KEY, v1 CHAR(15), v2 VARCHAR)"); - conn.createStatement().execute("CREATE INDEX IDX ON T(v1, v2)"); - } + private void setupTables(Connection conn) throws SQLException { + conn.createStatement() + .execute("CREATE TABLE T (k VARCHAR NOT NULL PRIMARY KEY, v1 CHAR(15), v2 VARCHAR)"); + conn.createStatement().execute("CREATE INDEX IDX ON T(v1, v2)"); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java index bb838b99565..ebe19be760d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -114,7664 +114,7768 @@ import org.junit.Ignore; import org.junit.Test; - - /** - * - * Tests for compiling a query - * The compilation stage finds additional errors that can't be found at parse - * time so this is a good place for negative tests (since the mini-cluster - * is not necessary enabling the tests to run faster). - * - * + * Tests for compiling a query The compilation stage finds additional errors that can't be found at + * parse time so this is a good place for negative tests (since the mini-cluster is not necessary + * enabling the tests to run faster). * @since 0.1 */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="RV_RETURN_VALUE_IGNORED", - justification="Test code.") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "RV_RETURN_VALUE_IGNORED", + justification = "Test code.") public class QueryCompilerTest extends BaseConnectionlessQueryTest { - @Before - public void setUp() { - ParseNodeFactory.setTempAliasCounterValue(0); - } - - @Test - public void testParameterUnbound() throws Exception { - try { - String query = "SELECT a_string, b_string FROM atable WHERE organization_id=? and a_integer = ?"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage().contains("Parameter 2 is unbound")); - } - } - - @Test - public void testMultiPKDef() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - String query = "CREATE TABLE foo (pk1 integer not null primary key, pk2 bigint not null primary key)"; - PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 510 (42889): The table already has a primary key. columnName=PK2")); - } finally { - conn.close(); - } - } - - @Test - public void testPKDefAndPKConstraint() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - String query = "CREATE TABLE foo (pk integer not null primary key, col1 decimal, col2 decimal constraint my_pk primary key (col1,col2))"; - PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 510 (42889): The table already has a primary key. columnName=PK")); - } finally { - conn.close(); - } - } - - @Test - public void testFamilyNameInPK() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - String query = "CREATE TABLE foo (a.pk integer not null primary key, col1 decimal, col2 decimal)"; - PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { - assertEquals(e.getErrorCode(), SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testSameColumnNameInPKAndNonPK() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - String query = "CREATE TABLE t1 (k integer not null primary key, a.k decimal, b.k decimal)"; - conn.createStatement().execute(query); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PColumn c = pconn.getTable(new PTableKey(pconn.getTenantId(), "T1")).getColumnForColumnName("K"); - assertTrue(SchemaUtil.isPKColumn(c)); - } finally { - conn.close(); - } - } - - @Test - public void testVarBinaryNotLastInMultipartPK() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - // When the VARBINARY key is the last column, it is allowed. - String query = "CREATE TABLE foo (a_string varchar not null, b_string varchar not null, a_binary varbinary not null, " + - "col1 decimal, col2 decimal CONSTRAINT pk PRIMARY KEY (a_string, b_string, a_binary))"; + @Before + public void setUp() { + ParseNodeFactory.setTempAliasCounterValue(0); + } + + @Test + public void testParameterUnbound() throws Exception { + try { + String query = + "SELECT a_string, b_string FROM atable WHERE organization_id=? and a_integer = ?"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - try { - // VARBINARY key is not allowed in the middle of the key. - query = "CREATE TABLE foo (a_binary varbinary not null, a_string varchar not null, col1 decimal, col2 decimal CONSTRAINT pk PRIMARY KEY (a_binary, a_string))"; - statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.VARBINARY_IN_ROW_KEY.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testArrayNotLastInMultipartPK() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - // When the VARBINARY key is the last column, it is allowed. - String query = "CREATE TABLE foo (a_string varchar not null, b_string varchar not null, a_array varchar[] not null, " + - "col1 decimal, col2 decimal CONSTRAINT pk PRIMARY KEY (a_string, b_string, a_array))"; + statement.setString(1, "00D300000000XHP"); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage().contains("Parameter 2 is unbound")); + } + } + + @Test + public void testMultiPKDef() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + String query = + "CREATE TABLE foo (pk1 integer not null primary key, pk2 bigint not null primary key)"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 510 (42889): The table already has a primary key. columnName=PK2")); + } finally { + conn.close(); + } + } + + @Test + public void testPKDefAndPKConstraint() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + String query = + "CREATE TABLE foo (pk integer not null primary key, col1 decimal, col2 decimal constraint my_pk primary key (col1,col2))"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 510 (42889): The table already has a primary key. columnName=PK")); + } finally { + conn.close(); + } + } + + @Test + public void testFamilyNameInPK() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + String query = + "CREATE TABLE foo (a.pk integer not null primary key, col1 decimal, col2 decimal)"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { + assertEquals(e.getErrorCode(), SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testSameColumnNameInPKAndNonPK() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + String query = "CREATE TABLE t1 (k integer not null primary key, a.k decimal, b.k decimal)"; + conn.createStatement().execute(query); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PColumn c = + pconn.getTable(new PTableKey(pconn.getTenantId(), "T1")).getColumnForColumnName("K"); + assertTrue(SchemaUtil.isPKColumn(c)); + } finally { + conn.close(); + } + } + + @Test + public void testVarBinaryNotLastInMultipartPK() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + // When the VARBINARY key is the last column, it is allowed. + String query = + "CREATE TABLE foo (a_string varchar not null, b_string varchar not null, a_binary varbinary not null, " + + "col1 decimal, col2 decimal CONSTRAINT pk PRIMARY KEY (a_string, b_string, a_binary))"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + try { + // VARBINARY key is not allowed in the middle of the key. + query = + "CREATE TABLE foo (a_binary varbinary not null, a_string varchar not null, col1 decimal, col2 decimal CONSTRAINT pk PRIMARY KEY (a_binary, a_string))"; + statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.VARBINARY_IN_ROW_KEY.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testArrayNotLastInMultipartPK() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + // When the VARBINARY key is the last column, it is allowed. + String query = + "CREATE TABLE foo (a_string varchar not null, b_string varchar not null, a_array varchar[] not null, " + + "col1 decimal, col2 decimal CONSTRAINT pk PRIMARY KEY (a_string, b_string, a_array))"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + try { + // VARBINARY key is not allowed in the middle of the key. + query = + "CREATE TABLE foo (a_array varchar[] not null, a_string varchar not null, col1 decimal, col2 decimal CONSTRAINT pk PRIMARY KEY (a_array, a_string))"; + statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.VARBINARY_IN_ROW_KEY.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testNoPK() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + String query = "CREATE TABLE foo (pk integer not null, col1 decimal, col2 decimal)"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.KEY_VALUE_NOT_NULL.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testImmutableRowsPK() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + String query = "CREATE IMMUTABLE TABLE foo (pk integer not null, col1 decimal, col2 decimal)"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.PRIMARY_KEY_MISSING.getErrorCode(), e.getErrorCode()); + } + String query = + "CREATE IMMUTABLE TABLE foo (k1 integer not null, k2 decimal not null, col1 decimal not null, constraint pk primary key (k1,k2))"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + conn.close(); + } + + @Test + public void testUnknownFamilyNameInTableOption() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + String query = + "CREATE TABLE foo (pk integer not null primary key, a.col1 decimal, b.col2 decimal) c.my_property='foo'"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { + assertTrue( + e.getMessage().contains("Properties may not be defined for an unused family name")); + } finally { + conn.close(); + } + } + + @Test + public void testInvalidGroupedAggregation() throws Exception { + try { + // Select non agg column in aggregate query + String query = + "SELECT count(1),a_integer FROM atable WHERE organization_id=? GROUP BY a_string"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - try { - // VARBINARY key is not allowed in the middle of the key. - query = "CREATE TABLE foo (a_array varchar[] not null, a_string varchar not null, col1 decimal, col2 decimal CONSTRAINT pk PRIMARY KEY (a_array, a_string))"; - statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.VARBINARY_IN_ROW_KEY.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testNoPK() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - String query = "CREATE TABLE foo (pk integer not null, col1 decimal, col2 decimal)"; - PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.KEY_VALUE_NOT_NULL.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testImmutableRowsPK() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - String query = "CREATE IMMUTABLE TABLE foo (pk integer not null, col1 decimal, col2 decimal)"; - PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.PRIMARY_KEY_MISSING.getErrorCode(), e.getErrorCode()); - } - String query = "CREATE IMMUTABLE TABLE foo (k1 integer not null, k2 decimal not null, col1 decimal not null, constraint pk primary key (k1,k2))"; + statement.setString(1, "00D300000000XHP"); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains( + "ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_INTEGER")); + } + } + + @Test + public void testInvalidGroupExpressionAggregation() throws Exception { + try { + // Select non agg column in aggregate query + String query = + "SELECT sum(a_integer) + a_integer FROM atable WHERE organization_id=? GROUP BY a_string"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); + statement.setString(1, "00D300000000XHP"); + statement.executeQuery(); + fail(); + } finally { conn.close(); - } - - @Test - public void testUnknownFamilyNameInTableOption() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - String query = "CREATE TABLE foo (pk integer not null primary key, a.col1 decimal, b.col2 decimal) c.my_property='foo'"; - PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("Properties may not be defined for an unused family name")); - } finally { - conn.close(); - } - } - - @Test - public void testInvalidGroupedAggregation() throws Exception { - try { - // Select non agg column in aggregate query - String query = "SELECT count(1),a_integer FROM atable WHERE organization_id=? GROUP BY a_string"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_INTEGER")); - } - } - - @Test - public void testInvalidGroupExpressionAggregation() throws Exception { - try { - // Select non agg column in aggregate query - String query = "SELECT sum(a_integer) + a_integer FROM atable WHERE organization_id=? GROUP BY a_string"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_INTEGER")); - } - } - - @Test - public void testAggInWhereClause() throws Exception { - try { - // Select non agg column in aggregate query - String query = "SELECT a_integer FROM atable WHERE organization_id=? AND count(1) > 2"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1017 (42Y26): Aggregate may not be used in WHERE.")); - } - } - - @Test - public void testHavingAggregateQuery() throws Exception { - try { - // Select non agg column in aggregate query - String query = "SELECT a_integer FROM atable WHERE organization_id=? HAVING count(1) > 2"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_INTEGER")); - } - } - - @Test - public void testNonAggInHavingClause() throws Exception { - try { - // Select non agg column in aggregate query - String query = "SELECT a_integer FROM atable WHERE organization_id=? HAVING a_integer = 5"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1019 (42Y26): Only aggregate maybe used in the HAVING clause.")); - } - } - - @Test - public void testTypeMismatchInCase() throws Exception { - try { - // Select non agg column in aggregate query - String query = "SELECT a_integer FROM atable WHERE organization_id=? HAVING CASE WHEN a_integer <= 2 THEN 'foo' WHEN a_integer = 3 THEN 2 WHEN a_integer <= 5 THEN 5 ELSE 5 END = 5"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage().contains("Case expressions must have common type")); - } - } - - @Test - public void testNonBooleanWhereExpression() throws Exception { - try { - // Select non agg column in aggregate query - String query = "SELECT a_integer FROM atable WHERE organization_id=? and CASE WHEN a_integer <= 2 THEN 'foo' WHEN a_integer = 3 THEN 'bar' WHEN a_integer <= 5 THEN 'bas' ELSE 'blah' END"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage().contains("ERROR 203 (22005): Type mismatch. BOOLEAN and VARCHAR for CASE WHEN A_INTEGER <= 2 THEN 'foo'WHEN A_INTEGER = 3 THEN 'bar'WHEN A_INTEGER <= 5 THEN 'bas' ELSE 'blah' END")); - } - } - - @Test - public void testNoSCNInConnectionProps() throws Exception { - Properties props = new Properties(); - DriverManager.getConnection(getUrl(), props); - } - - - @Test - public void testPercentileWrongQueryWithMixOfAggrAndNonAggrExps() throws Exception { - String query = "select a_integer, PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY a_integer ASC) from ATABLE"; - try { - compileQuery(query, Collections.emptyList()); - fail(); - } catch (SQLException e) { - assertEquals("ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_INTEGER", - e.getMessage()); - } - } - - @Test - public void testPercentileWrongQuery1() throws Exception { - String query = "select PERCENTILE_CONT('*') WITHIN GROUP (ORDER BY a_integer ASC) from ATABLE"; - try { - compileQuery(query, Collections.emptyList()); - fail(); - } catch (SQLException e) { - assertEquals( - "ERROR 203 (22005): Type mismatch. expected: [DECIMAL] but was: VARCHAR at PERCENTILE_CONT argument 3", - e.getMessage()); - } - } - - @Test - public void testPercentileWrongQuery2() throws Exception { - String query = "select PERCENTILE_CONT(1.1) WITHIN GROUP (ORDER BY a_integer ASC) from ATABLE"; - try { - compileQuery(query, Collections.emptyList()); - fail(); - } catch (SQLException e) { - assertEquals( - "ERROR 213 (22003): Value outside range. expected: [0 , 1] but was: 1.1 at PERCENTILE_CONT argument 3", - e.getMessage()); - } - } - - @Test - public void testPercentileWrongQuery3() throws Exception { - String query = "select PERCENTILE_CONT(-1) WITHIN GROUP (ORDER BY a_integer ASC) from ATABLE"; - try { - compileQuery(query, Collections.emptyList()); - fail(); - } catch (Exception e) { - assertEquals( - "ERROR 213 (22003): Value outside range. expected: [0 , 1] but was: -1 at PERCENTILE_CONT argument 3", - e.getMessage()); - } - } - - private Scan compileQuery(String query, List binds) throws SQLException { - QueryPlan plan = getQueryPlan(query, binds); - return plan.getContext().getScan(); - } - - private Scan projectQuery(String query) throws SQLException { - QueryPlan plan = getQueryPlan(query, Collections.emptyList()); - plan.iterator(); // Forces projection - return plan.getContext().getScan(); - } - - private QueryPlan getOptimizedQueryPlan(String query) throws SQLException { - return getOptimizedQueryPlan(query, Collections.emptyList()); - } - - private QueryPlan getOptimizedQueryPlan(String query, List binds) throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PhoenixPreparedStatement statement = conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); - for (Object bind : binds) { - statement.setObject(1, bind); - } - QueryPlan plan = statement.optimizeQuery(query); - return plan; - } finally { - conn.close(); - } - } - - private QueryPlan getQueryPlan(String query, List binds) throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PhoenixPreparedStatement statement = conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); - for (Object bind : binds) { - statement.setObject(1, bind); - } - QueryPlan plan = statement.compileQuery(query); - return plan; - } finally { - conn.close(); - } - } - - @Test - public void testKeyOrderedGroupByOptimization() throws Exception { - // Select columns in PK - String[] queries = new String[] { - "SELECT count(1) FROM atable GROUP BY organization_id,entity_id", - "SELECT count(1) FROM atable GROUP BY organization_id,substr(entity_id,1,3),entity_id", - "SELECT count(1) FROM atable GROUP BY entity_id,organization_id", - "SELECT count(1) FROM atable GROUP BY substr(entity_id,1,3),organization_id", - "SELECT count(1) FROM ptsdb GROUP BY host,inst,round(\"DATE\",'HOUR')", - "SELECT count(1) FROM atable GROUP BY organization_id", - }; - List binds = Collections.emptyList(); - for (String query : queries) { - QueryPlan plan = getQueryPlan(query, binds); - assertEquals(query, BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS, plan.getGroupBy().getScanAttribName()); - } - } - - @Test - public void testNullInScanKey() throws Exception { - // Select columns in PK - String query = "select val from ptsdb where inst is null and host='a'"; - List binds = Collections.emptyList(); - Scan scan = compileQuery(query, binds); - // Projects column family with not null column - assertNull(scan.getFilter()); - assertEquals(1,scan.getFamilyMap().keySet().size()); - assertArrayEquals(Bytes.toBytes(SchemaUtil.normalizeIdentifier(QueryConstants.DEFAULT_COLUMN_FAMILY)), scan.getFamilyMap().keySet().iterator().next()); - } - - @Test - public void testOnlyNullInScanKey() throws Exception { - // Select columns in PK - String query = "select val from ptsdb where inst is null"; - List binds = Collections.emptyList(); - Scan scan = compileQuery(query, binds); - // Projects column family with not null column - assertEquals(1,scan.getFamilyMap().keySet().size()); - assertArrayEquals(Bytes.toBytes(SchemaUtil.normalizeIdentifier(QueryConstants.DEFAULT_COLUMN_FAMILY)), scan.getFamilyMap().keySet().iterator().next()); - } - - @Test - public void testIsNullOnNotNullable() throws Exception { - // Select columns in PK - String query = "select a_string from atable where entity_id is null"; - List binds = Collections.emptyList(); - Scan scan = compileQuery(query, binds); - assertDegenerate(scan); - } - - @Test - public void testIsNotNullOnNotNullable() throws Exception { - // Select columns in PK - String query = "select a_string from atable where entity_id is not null"; - List binds = Collections.emptyList(); - Scan scan = compileQuery(query, binds); - assertNull(scan.getFilter()); - assertTrue(scan.getStartRow().length == 0); - assertTrue(scan.getStopRow().length == 0); - } - - @Test - public void testUpsertTypeMismatch() throws Exception { - try { - // Select non agg column in aggregate query - String query = "upsert into ATABLE VALUES (?, ?, ?)"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.setString(2, "00D300000000XHP"); - statement.setInt(3, 1); - statement.executeUpdate(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { // TODO: use error codes - assertTrue(e.getMessage().contains("Type mismatch")); - } - } - - @Test - public void testUpsertMultiByteIntoChar() throws Exception { - String value = "繰り返し曜日マスク"; - try { - // Select non agg column in aggregate query - String query = "upsert into ATABLE VALUES (?, ?, ?)"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "00D300000000XHP"); - statement.setString(2, value); - statement.setInt(3, 1); - statement.executeUpdate(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 201 (22000): Illegal data.")); - assertTrue(e.getMessage().contains("CHAR types may only contain single byte characters")); - assertFalse(e.getMessage().contains(value)); - } - } - - @Test - public void testSelectStarOnGroupBy() throws Exception { - try { - // Select non agg column in aggregate query - String query = "select * from ATABLE group by a_string"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY.")); - } - } - - @Test - public void testOrderByAggSelectNonAgg() throws Exception { - try { - // Order by in select with no limit or group by - String query = "select a_string from ATABLE order by max(b_string)"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_STRING")); - } - } - - @Test - public void testOrderByAggAndNonAgg() throws Exception { - try { - // Order by in select with no limit or group by - String query = "select max(a_string) from ATABLE order by max(b_string),a_string"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_STRING")); - } - } - - @Test - public void testOrderByNonAggSelectAgg() throws Exception { - try { - // Order by in select with no limit or group by - String query = "select max(a_string) from ATABLE order by b_string LIMIT 5"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } finally { - conn.close(); - } - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. B_STRING")); - } - } - - @Test - public void testNotKeyOrderedGroupByOptimization() throws Exception { - // Select columns in PK - String[] queries = new String[] { - "SELECT count(1) FROM atable GROUP BY entity_id", - "SELECT count(1) FROM atable GROUP BY substr(organization_id,2,3)", - "SELECT count(1) FROM atable GROUP BY substr(entity_id,1,3)", - "SELECT count(1) FROM atable GROUP BY to_date(organization_id)", - "SELECT count(1) FROM atable GROUP BY regexp_substr(organization_id, '.*foo.*'),entity_id", - "SELECT count(1) FROM atable GROUP BY substr(organization_id,1),entity_id", - }; - List binds = Collections.emptyList(); - for (String query : queries) { - QueryPlan plan = getQueryPlan(query, binds); - assertEquals(plan.getGroupBy().getScanAttribName(), BaseScannerRegionObserverConstants.UNORDERED_GROUP_BY_EXPRESSIONS); - } - } - - @Test - public void testFunkyColumnNames() throws Exception { - // Select columns in PK - String[] queries = new String[] { - "SELECT \"foo!\",\"foo.bar-bas\",\"#@$\",\"_blah^\" FROM FUNKY_NAMES", - "SELECT count(\"foo!\"),\"_blah^\" FROM FUNKY_NAMES WHERE \"foo.bar-bas\"='x' GROUP BY \"#@$\",\"_blah^\"", - }; - List binds = Collections.emptyList(); - for (String query : queries) { - compileQuery(query, binds); - } - } - - @Test - public void testCountAggregatorFirst() throws Exception { - String[] queries = new String[] { - "SELECT sum(2.5),organization_id FROM atable GROUP BY organization_id,entity_id", - "SELECT avg(a_integer) FROM atable GROUP BY organization_id,substr(entity_id,1,3),entity_id", - "SELECT count(a_string) FROM atable GROUP BY substr(organization_id,1),entity_id", - "SELECT min('foo') FROM atable GROUP BY entity_id,organization_id", - "SELECT min('foo'),sum(a_integer),avg(2.5),4.5,max(b_string) FROM atable GROUP BY substr(organization_id,1),entity_id", - "SELECT sum(2.5) FROM atable", - "SELECT avg(a_integer) FROM atable", - "SELECT count(a_string) FROM atable", - "SELECT min('foo') FROM atable LIMIT 5", - "SELECT min('foo'),sum(a_integer),avg(2.5),4.5,max(b_string) FROM atable", - }; - List binds = Collections.emptyList(); - String query = null; - try { - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - Scan scan = compileQuery(query, binds); - ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserverConstants.AGGREGATORS), null, null); - Aggregator aggregator = aggregators.getAggregators()[0]; - assertTrue(aggregator instanceof CountAggregator); - } - } catch (Exception e) { - throw new Exception(query, e); - } - } - - @Test - public void testInvalidArithmetic() throws Exception { - String[] queries = new String[] { - "SELECT entity_id,organization_id FROM atable where A_STRING - 5.5 < 0", - "SELECT entity_id,organization_id FROM atable where A_DATE - 'transaction' < 0", - "SELECT entity_id,organization_id FROM atable where A_DATE * 45 < 0", - "SELECT entity_id,organization_id FROM atable where A_DATE / 45 < 0", - "SELECT entity_id,organization_id FROM atable where 45 - A_DATE < 0", - "SELECT entity_id,organization_id FROM atable where A_DATE - to_date('2000-01-01 12:00:00') < to_date('2000-02-01 12:00:00')", // RHS must be number - "SELECT entity_id,organization_id FROM atable where A_DATE - A_DATE + 1 < A_DATE", // RHS must be number - "SELECT entity_id,organization_id FROM atable where A_DATE + 2 < 0", // RHS must be date - "SELECT entity_id,organization_id FROM atable where 45.5 - A_DATE < 0", - "SELECT entity_id,organization_id FROM atable where 1 + A_DATE + A_DATE < A_DATE", - "SELECT entity_id,organization_id FROM atable where A_STRING - 45 < 0", - "SELECT entity_id,organization_id FROM atable where A_STRING / 45 < 0", - "SELECT entity_id,organization_id FROM atable where A_STRING * 45 < 0", - "SELECT entity_id,organization_id FROM atable where A_STRING + 45 < 0", - "SELECT entity_id,organization_id FROM atable where A_STRING - 45 < 0", - "SELECT entity_id,organization_id FROM atable where A_STRING - 'transaction' < 0", }; - - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - for (String query : queries) { - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(query); - } catch (SQLException e) { - if (e.getMessage().contains("ERROR 203 (22005): Type mismatch.")) { - continue; - } - throw new IllegalStateException("Didn't find type mismatch: " + query, e); - } - } - } - - - @Test - public void testAmbiguousColumn() throws Exception { - String query = "SELECT * from multi_cf G where RESPONSE_TIME = 2222"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (AmbiguousColumnException e) { // expected - } finally { - conn.close(); - } - } - - @Test - public void testTableAliasMatchesCFName() throws Exception { - String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf G where G.RESPONSE_TIME-1 = F.RESPONSE_TIME"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (AmbiguousColumnException e) { // expected - } finally { - conn.close(); - } - } - - @Test - public void testCoelesceFunctionTypeMismatch() throws Exception { - String query = "SELECT coalesce(x_integer,'foo') from atable"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 203 (22005): Type mismatch. COALESCE expected INTEGER, but got VARCHAR")); - } finally { - conn.close(); - } - } - - @Test - public void testOrderByNotInSelectDistinct() throws Exception { - String query = "SELECT distinct a_string,b_string from atable order by x_integer"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testSelectDistinctAndAll() throws Exception { - String query = "SELECT all distinct a_string,b_string from atable order by x_integer"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.PARSER_ERROR.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testSelectDistinctAndOrderBy() throws Exception { - String query = "select /*+ RANGE_SCAN */ count(distinct organization_id) from atable order by organization_id"; - String query1 = "select count(distinct organization_id) from atable order by organization_id"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN.getErrorCode(), e.getErrorCode()); - } - try { - PreparedStatement statement = conn.prepareStatement(query1); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN.getErrorCode(), e.getErrorCode()); - } + } + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains( + "ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_INTEGER")); + } + } + + @Test + public void testAggInWhereClause() throws Exception { + try { + // Select non agg column in aggregate query + String query = "SELECT a_integer FROM atable WHERE organization_id=? AND count(1) > 2"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.setString(1, "00D300000000XHP"); + statement.executeQuery(); + fail(); + } finally { conn.close(); - } - - @Test - public void testOrderByNotInSelectDistinctAgg() throws Exception { - String query = "SELECT distinct count(1) from atable order by x_integer"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testSelectDistinctWithAggregation() throws Exception { - String query = "SELECT distinct a_string,count(*) from atable"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testAggregateOnColumnsNotInGroupByForImmutableEncodedTable() throws Exception { - String tableName = generateUniqueName(); - String ddl = "CREATE IMMUTABLE TABLE " + tableName + - " (a_string varchar not null, col1 integer, col2 integer" + - " CONSTRAINT pk PRIMARY KEY (a_string))"; - String query = "SELECT col1, max(a_string) from " + tableName + " group by col2"; - try (Connection conn = DriverManager.getConnection(getUrl())) { - conn.createStatement().execute(ddl); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN.getErrorCode(), e.getErrorCode()); - } - } - } - - @Test - public void testRegexpSubstrSetScanKeys() throws Exception { - // First test scan keys are set when the offset is 0 or 1. - String query = "SELECT host FROM ptsdb WHERE regexp_substr(inst, '[a-zA-Z]+') = 'abc'"; - List binds = Collections.emptyList(); - Scan scan = compileQuery(query, binds); - assertArrayEquals(Bytes.toBytes("abc"), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(Bytes.toBytes("abc")),scan.getStopRow()); - assertTrue(scan.getFilter() != null); - - query = "SELECT host FROM ptsdb WHERE regexp_substr(inst, '[a-zA-Z]+', 0) = 'abc'"; - binds = Collections.emptyList(); - scan = compileQuery(query, binds); - assertArrayEquals(Bytes.toBytes("abc"), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(Bytes.toBytes("abc")), scan.getStopRow()); - assertTrue(scan.getFilter() != null); - - // Test scan keys are not set when the offset is not 0 or 1. - query = "SELECT host FROM ptsdb WHERE regexp_substr(inst, '[a-zA-Z]+', 3) = 'abc'"; - binds = Collections.emptyList(); - scan = compileQuery(query, binds); - assertTrue(scan.getStartRow().length == 0); - assertTrue(scan.getStopRow().length == 0); - assertTrue(scan.getFilter() != null); - } - - @Test - public void testStringConcatExpression() throws Exception { - String query = "SELECT entity_id,a_string FROM atable where 2 || a_integer || ? like '2%'"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - byte []x=new byte[]{127,127,0,0};//Binary data - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.setBytes(1, x); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertTrue(e.getMessage().contains("Concatenation does not support")); - } finally { - conn.close(); - } - } - - @Test - public void testDivideByBigDecimalZero() throws Exception { - String query = "SELECT a_integer/x_integer/0.0 FROM atable"; - Connection conn = DriverManager.getConnection(getUrl()); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertTrue(e.getMessage().contains("Divide by zero")); - } finally { - conn.close(); - } - } - - @Test - public void testDivideByIntegerZero() throws Exception { - String query = "SELECT a_integer/0 FROM atable"; - Connection conn = DriverManager.getConnection(getUrl()); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.executeQuery(); - fail(); - } catch (SQLException e) { // expected - assertTrue(e.getMessage().contains("Divide by zero")); - } finally { - conn.close(); - } - } - - @Test - public void testCreateNullableInPKMiddle() throws Exception { - String query = "CREATE TABLE foo(i integer not null, j integer null, k integer not null CONSTRAINT pk PRIMARY KEY(i,j,k))"; - Connection conn = DriverManager.getConnection(getUrl()); - try { - PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { // expected - assertTrue(e.getMessage().contains("PK columns may not be both fixed width and nullable")); - } - } - - @Test - public void testSetSaltBucketOnAlterTable() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("ALTER TABLE atable ADD xyz INTEGER SALT_BUCKETS=4"); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.SALT_ONLY_ON_CREATE_TABLE.getErrorCode(), e.getErrorCode()); - } - try { - conn.createStatement().execute("ALTER TABLE atable SET SALT_BUCKETS=4"); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.SALT_ONLY_ON_CREATE_TABLE.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testAlterNotNull() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("ALTER TABLE atable ADD xyz VARCHAR NOT NULL"); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.KEY_VALUE_NOT_NULL.getErrorCode(), e.getErrorCode()); - } - conn.createStatement().execute("CREATE IMMUTABLE TABLE foo (K1 VARCHAR PRIMARY KEY)"); - try { - conn.createStatement().execute("ALTER TABLE foo ADD xyz VARCHAR NOT NULL PRIMARY KEY"); - fail(); - } catch (SQLException e) { // expected - assertEquals(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY.getErrorCode(), e.getErrorCode()); - } - conn.createStatement().execute("ALTER TABLE FOO ADD xyz VARCHAR NOT NULL"); - } - - @Test - public void testSubstrSetScanKey() throws Exception { - String query = "SELECT inst FROM ptsdb WHERE substr(inst, 0, 3) = 'abc'"; - List binds = Collections.emptyList(); - Scan scan = compileQuery(query, binds); - assertArrayEquals(Bytes.toBytes("abc"), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(Bytes.toBytes("abc")), scan.getStopRow()); - assertTrue(scan.getFilter() == null); // Extracted. - } - - @Test - public void testRTrimSetScanKey() throws Exception { - String query = "SELECT inst FROM ptsdb WHERE rtrim(inst) = 'abc'"; - List binds = Collections.emptyList(); + } + } catch (SQLException e) { + assertTrue(e.getMessage(), + e.getMessage().contains("ERROR 1017 (42Y26): Aggregate may not be used in WHERE.")); + } + } + + @Test + public void testHavingAggregateQuery() throws Exception { + try { + // Select non agg column in aggregate query + String query = "SELECT a_integer FROM atable WHERE organization_id=? HAVING count(1) > 2"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.setString(1, "00D300000000XHP"); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains( + "ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_INTEGER")); + } + } + + @Test + public void testNonAggInHavingClause() throws Exception { + try { + // Select non agg column in aggregate query + String query = "SELECT a_integer FROM atable WHERE organization_id=? HAVING a_integer = 5"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.setString(1, "00D300000000XHP"); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 1019 (42Y26): Only aggregate maybe used in the HAVING clause.")); + } + } + + @Test + public void testTypeMismatchInCase() throws Exception { + try { + // Select non agg column in aggregate query + String query = + "SELECT a_integer FROM atable WHERE organization_id=? HAVING CASE WHEN a_integer <= 2 THEN 'foo' WHEN a_integer = 3 THEN 2 WHEN a_integer <= 5 THEN 5 ELSE 5 END = 5"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.setString(1, "00D300000000XHP"); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage().contains("Case expressions must have common type")); + } + } + + @Test + public void testNonBooleanWhereExpression() throws Exception { + try { + // Select non agg column in aggregate query + String query = + "SELECT a_integer FROM atable WHERE organization_id=? and CASE WHEN a_integer <= 2 THEN 'foo' WHEN a_integer = 3 THEN 'bar' WHEN a_integer <= 5 THEN 'bas' ELSE 'blah' END"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.setString(1, "00D300000000XHP"); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage().contains( + "ERROR 203 (22005): Type mismatch. BOOLEAN and VARCHAR for CASE WHEN A_INTEGER <= 2 THEN 'foo'WHEN A_INTEGER = 3 THEN 'bar'WHEN A_INTEGER <= 5 THEN 'bas' ELSE 'blah' END")); + } + } + + @Test + public void testNoSCNInConnectionProps() throws Exception { + Properties props = new Properties(); + DriverManager.getConnection(getUrl(), props); + } + + @Test + public void testPercentileWrongQueryWithMixOfAggrAndNonAggrExps() throws Exception { + String query = + "select a_integer, PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY a_integer ASC) from ATABLE"; + try { + compileQuery(query, Collections.emptyList()); + fail(); + } catch (SQLException e) { + assertEquals( + "ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_INTEGER", + e.getMessage()); + } + } + + @Test + public void testPercentileWrongQuery1() throws Exception { + String query = "select PERCENTILE_CONT('*') WITHIN GROUP (ORDER BY a_integer ASC) from ATABLE"; + try { + compileQuery(query, Collections.emptyList()); + fail(); + } catch (SQLException e) { + assertEquals( + "ERROR 203 (22005): Type mismatch. expected: [DECIMAL] but was: VARCHAR at PERCENTILE_CONT argument 3", + e.getMessage()); + } + } + + @Test + public void testPercentileWrongQuery2() throws Exception { + String query = "select PERCENTILE_CONT(1.1) WITHIN GROUP (ORDER BY a_integer ASC) from ATABLE"; + try { + compileQuery(query, Collections.emptyList()); + fail(); + } catch (SQLException e) { + assertEquals( + "ERROR 213 (22003): Value outside range. expected: [0 , 1] but was: 1.1 at PERCENTILE_CONT argument 3", + e.getMessage()); + } + } + + @Test + public void testPercentileWrongQuery3() throws Exception { + String query = "select PERCENTILE_CONT(-1) WITHIN GROUP (ORDER BY a_integer ASC) from ATABLE"; + try { + compileQuery(query, Collections.emptyList()); + fail(); + } catch (Exception e) { + assertEquals( + "ERROR 213 (22003): Value outside range. expected: [0 , 1] but was: -1 at PERCENTILE_CONT argument 3", + e.getMessage()); + } + } + + private Scan compileQuery(String query, List binds) throws SQLException { + QueryPlan plan = getQueryPlan(query, binds); + return plan.getContext().getScan(); + } + + private Scan projectQuery(String query) throws SQLException { + QueryPlan plan = getQueryPlan(query, Collections.emptyList()); + plan.iterator(); // Forces projection + return plan.getContext().getScan(); + } + + private QueryPlan getOptimizedQueryPlan(String query) throws SQLException { + return getOptimizedQueryPlan(query, Collections.emptyList()); + } + + private QueryPlan getOptimizedQueryPlan(String query, List binds) throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PhoenixPreparedStatement statement = + conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); + for (Object bind : binds) { + statement.setObject(1, bind); + } + QueryPlan plan = statement.optimizeQuery(query); + return plan; + } finally { + conn.close(); + } + } + + private QueryPlan getQueryPlan(String query, List binds) throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PhoenixPreparedStatement statement = + conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); + for (Object bind : binds) { + statement.setObject(1, bind); + } + QueryPlan plan = statement.compileQuery(query); + return plan; + } finally { + conn.close(); + } + } + + @Test + public void testKeyOrderedGroupByOptimization() throws Exception { + // Select columns in PK + String[] queries = + new String[] { "SELECT count(1) FROM atable GROUP BY organization_id,entity_id", + "SELECT count(1) FROM atable GROUP BY organization_id,substr(entity_id,1,3),entity_id", + "SELECT count(1) FROM atable GROUP BY entity_id,organization_id", + "SELECT count(1) FROM atable GROUP BY substr(entity_id,1,3),organization_id", + "SELECT count(1) FROM ptsdb GROUP BY host,inst,round(\"DATE\",'HOUR')", + "SELECT count(1) FROM atable GROUP BY organization_id", }; + List binds = Collections.emptyList(); + for (String query : queries) { + QueryPlan plan = getQueryPlan(query, binds); + assertEquals(query, BaseScannerRegionObserverConstants.KEY_ORDERED_GROUP_BY_EXPRESSIONS, + plan.getGroupBy().getScanAttribName()); + } + } + + @Test + public void testNullInScanKey() throws Exception { + // Select columns in PK + String query = "select val from ptsdb where inst is null and host='a'"; + List binds = Collections.emptyList(); + Scan scan = compileQuery(query, binds); + // Projects column family with not null column + assertNull(scan.getFilter()); + assertEquals(1, scan.getFamilyMap().keySet().size()); + assertArrayEquals( + Bytes.toBytes(SchemaUtil.normalizeIdentifier(QueryConstants.DEFAULT_COLUMN_FAMILY)), + scan.getFamilyMap().keySet().iterator().next()); + } + + @Test + public void testOnlyNullInScanKey() throws Exception { + // Select columns in PK + String query = "select val from ptsdb where inst is null"; + List binds = Collections.emptyList(); + Scan scan = compileQuery(query, binds); + // Projects column family with not null column + assertEquals(1, scan.getFamilyMap().keySet().size()); + assertArrayEquals( + Bytes.toBytes(SchemaUtil.normalizeIdentifier(QueryConstants.DEFAULT_COLUMN_FAMILY)), + scan.getFamilyMap().keySet().iterator().next()); + } + + @Test + public void testIsNullOnNotNullable() throws Exception { + // Select columns in PK + String query = "select a_string from atable where entity_id is null"; + List binds = Collections.emptyList(); + Scan scan = compileQuery(query, binds); + assertDegenerate(scan); + } + + @Test + public void testIsNotNullOnNotNullable() throws Exception { + // Select columns in PK + String query = "select a_string from atable where entity_id is not null"; + List binds = Collections.emptyList(); + Scan scan = compileQuery(query, binds); + assertNull(scan.getFilter()); + assertTrue(scan.getStartRow().length == 0); + assertTrue(scan.getStopRow().length == 0); + } + + @Test + public void testUpsertTypeMismatch() throws Exception { + try { + // Select non agg column in aggregate query + String query = "upsert into ATABLE VALUES (?, ?, ?)"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.setString(1, "00D300000000XHP"); + statement.setString(2, "00D300000000XHP"); + statement.setInt(3, 1); + statement.executeUpdate(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { // TODO: use error codes + assertTrue(e.getMessage().contains("Type mismatch")); + } + } + + @Test + public void testUpsertMultiByteIntoChar() throws Exception { + String value = "繰り返し曜日マスク"; + try { + // Select non agg column in aggregate query + String query = "upsert into ATABLE VALUES (?, ?, ?)"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.setString(1, "00D300000000XHP"); + statement.setString(2, value); + statement.setInt(3, 1); + statement.executeUpdate(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains("ERROR 201 (22000): Illegal data.")); + assertTrue(e.getMessage().contains("CHAR types may only contain single byte characters")); + assertFalse(e.getMessage().contains(value)); + } + } + + @Test + public void testSelectStarOnGroupBy() throws Exception { + try { + // Select non agg column in aggregate query + String query = "select * from ATABLE group by a_string"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY.")); + } + } + + @Test + public void testOrderByAggSelectNonAgg() throws Exception { + try { + // Order by in select with no limit or group by + String query = "select a_string from ATABLE order by max(b_string)"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains( + "ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_STRING")); + } + } + + @Test + public void testOrderByAggAndNonAgg() throws Exception { + try { + // Order by in select with no limit or group by + String query = "select max(a_string) from ATABLE order by max(b_string),a_string"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains( + "ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. A_STRING")); + } + } + + @Test + public void testOrderByNonAggSelectAgg() throws Exception { + try { + // Order by in select with no limit or group by + String query = "select max(a_string) from ATABLE order by b_string LIMIT 5"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } finally { + conn.close(); + } + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains( + "ERROR 1018 (42Y27): Aggregate may not contain columns not in GROUP BY. B_STRING")); + } + } + + @Test + public void testNotKeyOrderedGroupByOptimization() throws Exception { + // Select columns in PK + String[] queries = new String[] { "SELECT count(1) FROM atable GROUP BY entity_id", + "SELECT count(1) FROM atable GROUP BY substr(organization_id,2,3)", + "SELECT count(1) FROM atable GROUP BY substr(entity_id,1,3)", + "SELECT count(1) FROM atable GROUP BY to_date(organization_id)", + "SELECT count(1) FROM atable GROUP BY regexp_substr(organization_id, '.*foo.*'),entity_id", + "SELECT count(1) FROM atable GROUP BY substr(organization_id,1),entity_id", }; + List binds = Collections.emptyList(); + for (String query : queries) { + QueryPlan plan = getQueryPlan(query, binds); + assertEquals(plan.getGroupBy().getScanAttribName(), + BaseScannerRegionObserverConstants.UNORDERED_GROUP_BY_EXPRESSIONS); + } + } + + @Test + public void testFunkyColumnNames() throws Exception { + // Select columns in PK + String[] queries = new String[] { + "SELECT \"foo!\",\"foo.bar-bas\",\"#@$\",\"_blah^\" FROM FUNKY_NAMES", + "SELECT count(\"foo!\"),\"_blah^\" FROM FUNKY_NAMES WHERE \"foo.bar-bas\"='x' GROUP BY \"#@$\",\"_blah^\"", }; + List binds = Collections.emptyList(); + for (String query : queries) { + compileQuery(query, binds); + } + } + + @Test + public void testCountAggregatorFirst() throws Exception { + String[] queries = new String[] { + "SELECT sum(2.5),organization_id FROM atable GROUP BY organization_id,entity_id", + "SELECT avg(a_integer) FROM atable GROUP BY organization_id,substr(entity_id,1,3),entity_id", + "SELECT count(a_string) FROM atable GROUP BY substr(organization_id,1),entity_id", + "SELECT min('foo') FROM atable GROUP BY entity_id,organization_id", + "SELECT min('foo'),sum(a_integer),avg(2.5),4.5,max(b_string) FROM atable GROUP BY substr(organization_id,1),entity_id", + "SELECT sum(2.5) FROM atable", "SELECT avg(a_integer) FROM atable", + "SELECT count(a_string) FROM atable", "SELECT min('foo') FROM atable LIMIT 5", + "SELECT min('foo'),sum(a_integer),avg(2.5),4.5,max(b_string) FROM atable", }; + List binds = Collections.emptyList(); + String query = null; + try { + for (int i = 0; i < queries.length; i++) { + query = queries[i]; Scan scan = compileQuery(query, binds); - assertArrayEquals(Bytes.toBytes("abc"), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(Bytes.toBytes("abc ")), scan.getStopRow()); - assertNotNull(scan.getFilter()); - } - - @Test - public void testCastingIntegerToDecimalInSelect() throws Exception { - String query = "SELECT CAST (a_integer AS DECIMAL)/2 FROM aTable WHERE 5=a_integer"; - List binds = Collections.emptyList(); - compileQuery(query, binds); - } - - @Test - public void testCastingTimestampToDateInSelect() throws Exception { - String query = "SELECT CAST (a_timestamp AS DATE) FROM aTable"; - List binds = Collections.emptyList(); - compileQuery(query, binds); - } - - @Test - public void testCastingStringToDecimalInSelect() throws Exception { - String query = "SELECT CAST (b_string AS DECIMAL)/2 FROM aTable WHERE 5=a_integer"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since casting a string to decimal isn't supported"); - } catch (SQLException e) { - assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); - } - } - - @Test - public void testCastingStringToDecimalInWhere() throws Exception { - String query = "SELECT a_integer FROM aTable WHERE 2.5=CAST (b_string AS DECIMAL)/2 "; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since casting a string to decimal isn't supported"); - } catch (SQLException e) { - assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); - } - } - - @Test - public void testCastingWithLengthInSelect() throws Exception { - String query = "SELECT CAST (b_string AS VARCHAR(10)) FROM aTable"; - List binds = Collections.emptyList(); - compileQuery(query, binds); - } - - @Test - public void testCastingWithLengthInWhere() throws Exception { - String query = "SELECT b_string FROM aTable WHERE CAST (b_string AS VARCHAR(10)) = 'b'"; - List binds = Collections.emptyList(); - compileQuery(query, binds); - } - - @Test - public void testCastingWithLengthAndScaleInSelect() throws Exception { - String query = "SELECT CAST (x_decimal AS DECIMAL(10,5)) FROM aTable"; - List binds = Collections.emptyList(); + ServerAggregators aggregators = ServerAggregators.deserialize( + scan.getAttribute(BaseScannerRegionObserverConstants.AGGREGATORS), null, null); + Aggregator aggregator = aggregators.getAggregators()[0]; + assertTrue(aggregator instanceof CountAggregator); + } + } catch (Exception e) { + throw new Exception(query, e); + } + } + + @Test + public void testInvalidArithmetic() throws Exception { + String[] queries = + new String[] { "SELECT entity_id,organization_id FROM atable where A_STRING - 5.5 < 0", + "SELECT entity_id,organization_id FROM atable where A_DATE - 'transaction' < 0", + "SELECT entity_id,organization_id FROM atable where A_DATE * 45 < 0", + "SELECT entity_id,organization_id FROM atable where A_DATE / 45 < 0", + "SELECT entity_id,organization_id FROM atable where 45 - A_DATE < 0", + "SELECT entity_id,organization_id FROM atable where A_DATE - to_date('2000-01-01 12:00:00') < to_date('2000-02-01 12:00:00')", // RHS + // must + // be + // number + "SELECT entity_id,organization_id FROM atable where A_DATE - A_DATE + 1 < A_DATE", // RHS + // must + // be + // number + "SELECT entity_id,organization_id FROM atable where A_DATE + 2 < 0", // RHS must be date + "SELECT entity_id,organization_id FROM atable where 45.5 - A_DATE < 0", + "SELECT entity_id,organization_id FROM atable where 1 + A_DATE + A_DATE < A_DATE", + "SELECT entity_id,organization_id FROM atable where A_STRING - 45 < 0", + "SELECT entity_id,organization_id FROM atable where A_STRING / 45 < 0", + "SELECT entity_id,organization_id FROM atable where A_STRING * 45 < 0", + "SELECT entity_id,organization_id FROM atable where A_STRING + 45 < 0", + "SELECT entity_id,organization_id FROM atable where A_STRING - 45 < 0", + "SELECT entity_id,organization_id FROM atable where A_STRING - 'transaction' < 0", }; + + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + for (String query : queries) { + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(query); + } catch (SQLException e) { + if (e.getMessage().contains("ERROR 203 (22005): Type mismatch.")) { + continue; + } + throw new IllegalStateException("Didn't find type mismatch: " + query, e); + } + } + } + + @Test + public void testAmbiguousColumn() throws Exception { + String query = "SELECT * from multi_cf G where RESPONSE_TIME = 2222"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (AmbiguousColumnException e) { // expected + } finally { + conn.close(); + } + } + + @Test + public void testTableAliasMatchesCFName() throws Exception { + String query = + "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf G where G.RESPONSE_TIME-1 = F.RESPONSE_TIME"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (AmbiguousColumnException e) { // expected + } finally { + conn.close(); + } + } + + @Test + public void testCoelesceFunctionTypeMismatch() throws Exception { + String query = "SELECT coalesce(x_integer,'foo') from atable"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 203 (22005): Type mismatch. COALESCE expected INTEGER, but got VARCHAR")); + } finally { + conn.close(); + } + } + + @Test + public void testOrderByNotInSelectDistinct() throws Exception { + String query = "SELECT distinct a_string,b_string from atable order by x_integer"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT.getErrorCode(), + e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testSelectDistinctAndAll() throws Exception { + String query = "SELECT all distinct a_string,b_string from atable order by x_integer"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.PARSER_ERROR.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testSelectDistinctAndOrderBy() throws Exception { + String query = + "select /*+ RANGE_SCAN */ count(distinct organization_id) from atable order by organization_id"; + String query1 = "select count(distinct organization_id) from atable order by organization_id"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN.getErrorCode(), + e.getErrorCode()); + } + try { + PreparedStatement statement = conn.prepareStatement(query1); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN.getErrorCode(), + e.getErrorCode()); + } + conn.close(); + } + + @Test + public void testOrderByNotInSelectDistinctAgg() throws Exception { + String query = "SELECT distinct count(1) from atable order by x_integer"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT.getErrorCode(), + e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testSelectDistinctWithAggregation() throws Exception { + String query = "SELECT distinct a_string,count(*) from atable"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN.getErrorCode(), + e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testAggregateOnColumnsNotInGroupByForImmutableEncodedTable() throws Exception { + String tableName = generateUniqueName(); + String ddl = "CREATE IMMUTABLE TABLE " + tableName + + " (a_string varchar not null, col1 integer, col2 integer" + + " CONSTRAINT pk PRIMARY KEY (a_string))"; + String query = "SELECT col1, max(a_string) from " + tableName + " group by col2"; + try (Connection conn = DriverManager.getConnection(getUrl())) { + conn.createStatement().execute(ddl); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.AGGREGATE_WITH_NOT_GROUP_BY_COLUMN.getErrorCode(), + e.getErrorCode()); + } + } + } + + @Test + public void testRegexpSubstrSetScanKeys() throws Exception { + // First test scan keys are set when the offset is 0 or 1. + String query = "SELECT host FROM ptsdb WHERE regexp_substr(inst, '[a-zA-Z]+') = 'abc'"; + List binds = Collections.emptyList(); + Scan scan = compileQuery(query, binds); + assertArrayEquals(Bytes.toBytes("abc"), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(Bytes.toBytes("abc")), scan.getStopRow()); + assertTrue(scan.getFilter() != null); + + query = "SELECT host FROM ptsdb WHERE regexp_substr(inst, '[a-zA-Z]+', 0) = 'abc'"; + binds = Collections.emptyList(); + scan = compileQuery(query, binds); + assertArrayEquals(Bytes.toBytes("abc"), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(Bytes.toBytes("abc")), scan.getStopRow()); + assertTrue(scan.getFilter() != null); + + // Test scan keys are not set when the offset is not 0 or 1. + query = "SELECT host FROM ptsdb WHERE regexp_substr(inst, '[a-zA-Z]+', 3) = 'abc'"; + binds = Collections.emptyList(); + scan = compileQuery(query, binds); + assertTrue(scan.getStartRow().length == 0); + assertTrue(scan.getStopRow().length == 0); + assertTrue(scan.getFilter() != null); + } + + @Test + public void testStringConcatExpression() throws Exception { + String query = "SELECT entity_id,a_string FROM atable where 2 || a_integer || ? like '2%'"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + byte[] x = new byte[] { 127, 127, 0, 0 };// Binary data + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.setBytes(1, x); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertTrue(e.getMessage().contains("Concatenation does not support")); + } finally { + conn.close(); + } + } + + @Test + public void testDivideByBigDecimalZero() throws Exception { + String query = "SELECT a_integer/x_integer/0.0 FROM atable"; + Connection conn = DriverManager.getConnection(getUrl()); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertTrue(e.getMessage().contains("Divide by zero")); + } finally { + conn.close(); + } + } + + @Test + public void testDivideByIntegerZero() throws Exception { + String query = "SELECT a_integer/0 FROM atable"; + Connection conn = DriverManager.getConnection(getUrl()); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.executeQuery(); + fail(); + } catch (SQLException e) { // expected + assertTrue(e.getMessage().contains("Divide by zero")); + } finally { + conn.close(); + } + } + + @Test + public void testCreateNullableInPKMiddle() throws Exception { + String query = + "CREATE TABLE foo(i integer not null, j integer null, k integer not null CONSTRAINT pk PRIMARY KEY(i,j,k))"; + Connection conn = DriverManager.getConnection(getUrl()); + try { + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { // expected + assertTrue(e.getMessage().contains("PK columns may not be both fixed width and nullable")); + } + } + + @Test + public void testSetSaltBucketOnAlterTable() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute("ALTER TABLE atable ADD xyz INTEGER SALT_BUCKETS=4"); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.SALT_ONLY_ON_CREATE_TABLE.getErrorCode(), e.getErrorCode()); + } + try { + conn.createStatement().execute("ALTER TABLE atable SET SALT_BUCKETS=4"); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.SALT_ONLY_ON_CREATE_TABLE.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testAlterNotNull() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute("ALTER TABLE atable ADD xyz VARCHAR NOT NULL"); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.KEY_VALUE_NOT_NULL.getErrorCode(), e.getErrorCode()); + } + conn.createStatement().execute("CREATE IMMUTABLE TABLE foo (K1 VARCHAR PRIMARY KEY)"); + try { + conn.createStatement().execute("ALTER TABLE foo ADD xyz VARCHAR NOT NULL PRIMARY KEY"); + fail(); + } catch (SQLException e) { // expected + assertEquals(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY.getErrorCode(), + e.getErrorCode()); + } + conn.createStatement().execute("ALTER TABLE FOO ADD xyz VARCHAR NOT NULL"); + } + + @Test + public void testSubstrSetScanKey() throws Exception { + String query = "SELECT inst FROM ptsdb WHERE substr(inst, 0, 3) = 'abc'"; + List binds = Collections.emptyList(); + Scan scan = compileQuery(query, binds); + assertArrayEquals(Bytes.toBytes("abc"), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(Bytes.toBytes("abc")), scan.getStopRow()); + assertTrue(scan.getFilter() == null); // Extracted. + } + + @Test + public void testRTrimSetScanKey() throws Exception { + String query = "SELECT inst FROM ptsdb WHERE rtrim(inst) = 'abc'"; + List binds = Collections.emptyList(); + Scan scan = compileQuery(query, binds); + assertArrayEquals(Bytes.toBytes("abc"), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(Bytes.toBytes("abc ")), scan.getStopRow()); + assertNotNull(scan.getFilter()); + } + + @Test + public void testCastingIntegerToDecimalInSelect() throws Exception { + String query = "SELECT CAST (a_integer AS DECIMAL)/2 FROM aTable WHERE 5=a_integer"; + List binds = Collections.emptyList(); + compileQuery(query, binds); + } + + @Test + public void testCastingTimestampToDateInSelect() throws Exception { + String query = "SELECT CAST (a_timestamp AS DATE) FROM aTable"; + List binds = Collections.emptyList(); + compileQuery(query, binds); + } + + @Test + public void testCastingStringToDecimalInSelect() throws Exception { + String query = "SELECT CAST (b_string AS DECIMAL)/2 FROM aTable WHERE 5=a_integer"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since casting a string to decimal isn't supported"); + } catch (SQLException e) { + assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); + } + } + + @Test + public void testCastingStringToDecimalInWhere() throws Exception { + String query = "SELECT a_integer FROM aTable WHERE 2.5=CAST (b_string AS DECIMAL)/2 "; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since casting a string to decimal isn't supported"); + } catch (SQLException e) { + assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); + } + } + + @Test + public void testCastingWithLengthInSelect() throws Exception { + String query = "SELECT CAST (b_string AS VARCHAR(10)) FROM aTable"; + List binds = Collections.emptyList(); + compileQuery(query, binds); + } + + @Test + public void testCastingWithLengthInWhere() throws Exception { + String query = "SELECT b_string FROM aTable WHERE CAST (b_string AS VARCHAR(10)) = 'b'"; + List binds = Collections.emptyList(); + compileQuery(query, binds); + } + + @Test + public void testCastingWithLengthAndScaleInSelect() throws Exception { + String query = "SELECT CAST (x_decimal AS DECIMAL(10,5)) FROM aTable"; + List binds = Collections.emptyList(); + compileQuery(query, binds); + } + + @Test + public void testUsingNonComparableDataTypesInRowValueConstructorFails() throws Exception { + String query = + "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer) > (2, 'abc')"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since casting a integer to string isn't supported"); + } catch (SQLException e) { + assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); + } + } + + @Test + public void testUsingNonComparableDataTypesOfColumnRefOnLHSAndRowValueConstructorFails() + throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE a_integer > ('abc', 2)"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since casting a integer to string isn't supported"); + } catch (SQLException e) { + assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); + } + } + + @Test + public void testUsingNonComparableDataTypesOfLiteralOnLHSAndRowValueConstructorFails() + throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE 'abc' > (a_integer, x_integer)"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since casting a integer to string isn't supported"); + } catch (SQLException e) { + assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); + } + } + + @Test + public void testUsingNonComparableDataTypesOfColumnRefOnRHSAndRowValueConstructorFails() + throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE ('abc', 2) < a_integer "; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since casting a integer to string isn't supported"); + } catch (SQLException e) { + assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); + } + } + + @Test + public void testUsingNonComparableDataTypesOfLiteralOnRHSAndRowValueConstructorFails() + throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer) < 'abc'"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since casting a integer to string isn't supported"); + } catch (SQLException e) { + assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); + } + } + + @Test + public void testNonConstantInList() throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE a_integer IN (x_integer)"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since non constants in IN is not valid"); + } catch (SQLException e) { + assertTrue(e.getErrorCode() == SQLExceptionCode.VALUE_IN_LIST_NOT_CONSTANT.getErrorCode()); + } + } + + @Test + public void testKeyValueColumnInPKConstraint() throws Exception { + String ddl = "CREATE TABLE t (a.k VARCHAR, b.v VARCHAR CONSTRAINT pk PRIMARY KEY(k))"; + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute(ddl); + fail(); + } catch (SQLException e) { + assertTrue(e.getErrorCode() == SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME.getErrorCode()); + } + } + + @Test + public void testUnknownColumnInPKConstraint() throws Exception { + String ddl = "CREATE TABLE t (k1 VARCHAR, b.v VARCHAR CONSTRAINT pk PRIMARY KEY(k1, k2))"; + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute(ddl); + fail(); + } catch (ColumnNotFoundException e) { + assertEquals("K2", e.getColumnName()); + } + } + + @Test + public void testDuplicatePKColumn() throws Exception { + String ddl = "CREATE TABLE t (k1 VARCHAR, k1 VARCHAR CONSTRAINT pk PRIMARY KEY(k1))"; + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute(ddl); + fail(); + } catch (ColumnAlreadyExistsException e) { + assertEquals("K1", e.getColumnName()); + } + } + + @Test + public void testDuplicateKVColumn() throws Exception { + String ddl = + "CREATE TABLE t (k1 VARCHAR, v1 VARCHAR, v2 VARCHAR, v1 INTEGER CONSTRAINT pk PRIMARY KEY(k1))"; + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute(ddl); + fail(); + } catch (ColumnAlreadyExistsException e) { + assertEquals("V1", e.getColumnName()); + } + } + + private void assertImmutableRows(Connection conn, String fullTableName, boolean expectedValue) + throws SQLException { + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + assertEquals(expectedValue, + pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)).isImmutableRows()); + } + + @Test + public void testInvalidNegativeArrayIndex() throws Exception { + String query = "SELECT a_double_array[-20] FROM table_with_array"; + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute(query); + fail(); + } catch (Exception e) { + + } + } + + @Test + public void testWrongDataTypeInRoundFunction() throws Exception { + String query = "SELECT ROUND(a_string, 'day', 1) FROM aTable"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since VARCHAR is not a valid data type for ROUND"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testNonArrayColumnWithIndex() throws Exception { + String query = "SELECT a_float[1] FROM table_with_array"; + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute(query); + fail(); + } catch (Exception e) { + } + } + + public void testWrongTimeUnitInRoundDateFunction() throws Exception { + String query = "SELECT ROUND(a_date, 'dayss', 1) FROM aTable"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since dayss is not a valid time unit type"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains(TimeUnit.VALID_VALUES)); + } + } + + @Test + public void testWrongMultiplierInRoundDateFunction() throws Exception { + String query = "SELECT ROUND(a_date, 'days', 1.23) FROM aTable"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since multiplier can be an INTEGER only"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testTypeMismatchForArrayElem() throws Exception { + String query = "SELECT (a_string,a_date)[1] FROM aTable"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since a row value constructor is not an array"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testTypeMismatch2ForArrayElem() throws Exception { + String query = "SELECT ROUND(a_date, 'days', 1.23)[1] FROM aTable"; + List binds = Collections.emptyList(); + try { + compileQuery(query, binds); + fail("Compilation should have failed since ROUND does not return an array"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testInvalidArraySize() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + String query = "CREATE TABLE foo (col1 INTEGER[-1] NOT NULL PRIMARY KEY)"; + PreparedStatement statement = conn.prepareStatement(query); + statement.execute(); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testInvalidArrayElemRefInUpsert() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t (k VARCHAR PRIMARY KEY, a INTEGER[10], B INTEGER[10])"); + try { + conn.createStatement().execute("UPSERT INTO t(k,a[2]) VALUES('A', 5)"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.PARSER_ERROR.getErrorCode(), e.getErrorCode()); + } + conn.close(); + } + + @Test + public void testVarbinaryArrayNotSupported() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute("CREATE TABLE t (k VARCHAR PRIMARY KEY, a VARBINARY[10])"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.VARBINARY_ARRAY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode()); + } + conn.close(); + } + + @Test + public void testInvalidNextValueFor() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute("CREATE SEQUENCE alpha.zeta"); + String[] queries = { "SELECT * FROM aTable WHERE a_integer < next value for alpha.zeta", + "SELECT * FROM aTable GROUP BY a_string,next value for alpha.zeta", + "SELECT * FROM aTable GROUP BY 1 + next value for alpha.zeta", + "SELECT * FROM aTable GROUP BY a_integer HAVING a_integer < next value for alpha.zeta", + "SELECT * FROM aTable WHERE a_integer < 3 GROUP BY a_integer HAVING a_integer < next value for alpha.zeta", + "SELECT * FROM aTable ORDER BY next value for alpha.zeta", + "SELECT max(next value for alpha.zeta) FROM aTable", }; + for (String query : queries) { + List binds = Collections.emptyList(); + try { compileQuery(query, binds); - } - - @Test - public void testUsingNonComparableDataTypesInRowValueConstructorFails() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer) > (2, 'abc')"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since casting a integer to string isn't supported"); - } catch (SQLException e) { - assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); - } - } - - @Test - public void testUsingNonComparableDataTypesOfColumnRefOnLHSAndRowValueConstructorFails() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE a_integer > ('abc', 2)"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since casting a integer to string isn't supported"); - } catch (SQLException e) { - assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); - } - } - - @Test - public void testUsingNonComparableDataTypesOfLiteralOnLHSAndRowValueConstructorFails() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE 'abc' > (a_integer, x_integer)"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since casting a integer to string isn't supported"); - } catch (SQLException e) { - assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); - } - } - - @Test - public void testUsingNonComparableDataTypesOfColumnRefOnRHSAndRowValueConstructorFails() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE ('abc', 2) < a_integer "; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since casting a integer to string isn't supported"); - } catch (SQLException e) { - assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); - } - } - - @Test - public void testUsingNonComparableDataTypesOfLiteralOnRHSAndRowValueConstructorFails() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer) < 'abc'"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since casting a integer to string isn't supported"); - } catch (SQLException e) { - assertTrue(e.getErrorCode() == SQLExceptionCode.TYPE_MISMATCH.getErrorCode()); - } - } - - @Test - public void testNonConstantInList() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE a_integer IN (x_integer)"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since non constants in IN is not valid"); - } catch (SQLException e) { - assertTrue(e.getErrorCode() == SQLExceptionCode.VALUE_IN_LIST_NOT_CONSTANT.getErrorCode()); - } - } - - @Test - public void testKeyValueColumnInPKConstraint() throws Exception { - String ddl = "CREATE TABLE t (a.k VARCHAR, b.v VARCHAR CONSTRAINT pk PRIMARY KEY(k))"; - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute(ddl); - fail(); - } catch (SQLException e) { - assertTrue(e.getErrorCode() == SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME.getErrorCode()); - } - } - - @Test - public void testUnknownColumnInPKConstraint() throws Exception { - String ddl = "CREATE TABLE t (k1 VARCHAR, b.v VARCHAR CONSTRAINT pk PRIMARY KEY(k1, k2))"; - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute(ddl); - fail(); - } catch (ColumnNotFoundException e) { - assertEquals("K2",e.getColumnName()); - } - } - - - @Test - public void testDuplicatePKColumn() throws Exception { - String ddl = "CREATE TABLE t (k1 VARCHAR, k1 VARCHAR CONSTRAINT pk PRIMARY KEY(k1))"; - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute(ddl); - fail(); - } catch (ColumnAlreadyExistsException e) { - assertEquals("K1",e.getColumnName()); - } - } - - - @Test - public void testDuplicateKVColumn() throws Exception { - String ddl = "CREATE TABLE t (k1 VARCHAR, v1 VARCHAR, v2 VARCHAR, v1 INTEGER CONSTRAINT pk PRIMARY KEY(k1))"; - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute(ddl); - fail(); - } catch (ColumnAlreadyExistsException e) { - assertEquals("V1",e.getColumnName()); - } - } - - private void assertImmutableRows(Connection conn, String fullTableName, boolean expectedValue) throws SQLException { - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - assertEquals(expectedValue, pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)).isImmutableRows()); - } - - @Test - public void testInvalidNegativeArrayIndex() throws Exception { - String query = "SELECT a_double_array[-20] FROM table_with_array"; - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute(query); - fail(); - } catch (Exception e) { - - } - } - @Test - public void testWrongDataTypeInRoundFunction() throws Exception { - String query = "SELECT ROUND(a_string, 'day', 1) FROM aTable"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since VARCHAR is not a valid data type for ROUND"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testNonArrayColumnWithIndex() throws Exception { - String query = "SELECT a_float[1] FROM table_with_array"; - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute(query); - fail(); - } catch (Exception e) { - } - } - - public void testWrongTimeUnitInRoundDateFunction() throws Exception { - String query = "SELECT ROUND(a_date, 'dayss', 1) FROM aTable"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since dayss is not a valid time unit type"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains(TimeUnit.VALID_VALUES)); - } - } - - @Test - public void testWrongMultiplierInRoundDateFunction() throws Exception { - String query = "SELECT ROUND(a_date, 'days', 1.23) FROM aTable"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since multiplier can be an INTEGER only"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testTypeMismatchForArrayElem() throws Exception { - String query = "SELECT (a_string,a_date)[1] FROM aTable"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since a row value constructor is not an array"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testTypeMismatch2ForArrayElem() throws Exception { - String query = "SELECT ROUND(a_date, 'days', 1.23)[1] FROM aTable"; - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since ROUND does not return an array"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testInvalidArraySize() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - String query = "CREATE TABLE foo (col1 INTEGER[-1] NOT NULL PRIMARY KEY)"; - PreparedStatement statement = conn.prepareStatement(query); - statement.execute(); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testInvalidArrayElemRefInUpsert() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k VARCHAR PRIMARY KEY, a INTEGER[10], B INTEGER[10])"); - try { - conn.createStatement().execute("UPSERT INTO t(k,a[2]) VALUES('A', 5)"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.PARSER_ERROR.getErrorCode(), e.getErrorCode()); - } + fail("Compilation should have failed since this is an invalid usage of NEXT VALUE FOR: " + + query); + } catch (SQLException e) { + assertEquals(query, SQLExceptionCode.INVALID_USE_OF_NEXT_VALUE_FOR.getErrorCode(), + e.getErrorCode()); + } + } + } + + @Test + public void testNoCachingHint() throws Exception { + List binds = Collections.emptyList(); + Scan scan = compileQuery("select val from ptsdb", binds); + assertTrue(scan.getCacheBlocks()); + scan = compileQuery("select /*+ NO_CACHE */ val from ptsdb", binds); + assertFalse(scan.getCacheBlocks()); + scan = compileQuery( + "select /*+ NO_CACHE */ p1.val from ptsdb p1 inner join ptsdb p2 on p1.inst = p2.inst", + binds); + assertFalse(scan.getCacheBlocks()); + } + + @Test + public void testExecuteWithNonEmptyBatch() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + Statement stmt = conn.createStatement(); + stmt.addBatch("SELECT * FROM atable"); + stmt.execute("UPSERT INTO atable VALUES('000000000000000','000000000000000')"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH.getErrorCode(), + e.getErrorCode()); + } + try { + Statement stmt = conn.createStatement(); + stmt.addBatch("SELECT * FROM atable"); + stmt.executeUpdate("UPSERT INTO atable VALUES('000000000000000','000000000000000')"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH.getErrorCode(), + e.getErrorCode()); + } + try { + PreparedStatement stmt = + conn.prepareStatement("UPSERT INTO atable VALUES('000000000000000','000000000000000')"); + stmt.addBatch(); + stmt.execute(); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH.getErrorCode(), + e.getErrorCode()); + } + try { + PreparedStatement stmt = + conn.prepareStatement("UPSERT INTO atable VALUES('000000000000000','000000000000000')"); + stmt.addBatch(); + stmt.executeUpdate(); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH.getErrorCode(), + e.getErrorCode()); + } + conn.close(); + } + + @Test + public void testInvalidPrimaryKeyDecl() throws Exception { + String[] queries = { "CREATE TABLE t (k varchar null primary key)", + "CREATE TABLE t (k varchar null, constraint pk primary key (k))", }; + Connection conn = DriverManager.getConnection(getUrl()); + for (String query : queries) { + try { + conn.createStatement().execute(query); + fail("Compilation should have failed since this is an invalid PRIMARY KEY declaration: " + + query); + } catch (SQLException e) { + assertEquals(query, SQLExceptionCode.SINGLE_PK_MAY_NOT_BE_NULL.getErrorCode(), + e.getErrorCode()); + } + } + } + + @Test + public void testInvalidNullCompositePrimaryKey() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t (k1 varchar, k2 varchar, constraint pk primary key(k1,k2))"); + PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t values(?,?)"); + stmt.setString(1, ""); + stmt.setString(2, ""); + try { + stmt.execute(); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage().contains("Primary key may not be null")); + } + } + + @Test + public void testGroupByLimitOptimization() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 varchar, k2 varchar, v varchar, constraint pk primary key(k1,k2))"); + ResultSet rs; + String[] queries = { "SELECT DISTINCT v FROM T LIMIT 3", + "SELECT v FROM T GROUP BY v,k1 LIMIT 3", "SELECT count(*) FROM T GROUP BY k1 LIMIT 3", + "SELECT max(v) FROM T GROUP BY k1,k2 LIMIT 3", "SELECT k1,k2 FROM T GROUP BY k1,k2 LIMIT 3", + "SELECT max(v) FROM T GROUP BY k2,k1 HAVING k1 > 'a' LIMIT 3", // Having optimized out, order + // of GROUP BY key not + // important + }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + rs = conn.createStatement().executeQuery("EXPLAIN " + query); + assertTrue("Expected to find GROUP BY limit optimization in: " + query, + QueryUtil.getExplainPlan(rs).contains(" LIMIT 3 GROUPS")); + } + } + + @Test + public void testNoGroupByLimitOptimization() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 varchar, k2 varchar, v varchar, constraint pk primary key(k1,k2))"); + ResultSet rs; + String[] queries = { + // "SELECT DISTINCT v FROM T ORDER BY v LIMIT 3", + // "SELECT v FROM T GROUP BY v,k1 ORDER BY v LIMIT 3", + "SELECT DISTINCT count(*) FROM T GROUP BY k1 LIMIT 3", + "SELECT count(1) FROM T GROUP BY v,k1 LIMIT 3", + "SELECT max(v) FROM T GROUP BY k1,k2 HAVING count(k1) > 1 LIMIT 3", + "SELECT count(v) FROM T GROUP BY to_date(k2),k1 LIMIT 3", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + rs = conn.createStatement().executeQuery("EXPLAIN " + query); + String explainPlan = QueryUtil.getExplainPlan(rs); + assertFalse("Did not expected to find GROUP BY limit optimization in: " + query, + explainPlan.contains(" LIMIT 3 GROUPS")); + } + } + + @Test + public void testLocalIndexCreationWithDefaultFamilyOption() throws Exception { + Connection conn1 = DriverManager.getConnection(getUrl()); + try { + Statement statement = conn1.createStatement(); + statement.execute("create table example (id integer not null,fn varchar," + + "\"ln\" varchar constraint pk primary key(id)) DEFAULT_COLUMN_FAMILY='F'"); + try { + statement.execute("create local index my_idx on example (fn) DEFAULT_COLUMN_FAMILY='F'"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE.getErrorCode(), + e.getErrorCode()); + } + statement.execute("create local index my_idx on example (fn)"); + } finally { + conn1.close(); + } + } + + @Test + public void testMultiCFProjection() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + String ddl = "CREATE TABLE multiCF (k integer primary key, a.a varchar, b.b varchar)"; + conn.createStatement().execute(ddl); + String query = "SELECT COUNT(*) FROM multiCF"; + QueryPlan plan = getQueryPlan(query, Collections.emptyList()); + plan.iterator(); + Scan scan = plan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof FirstKeyOnlyFilter); + assertEquals(1, scan.getFamilyMap().size()); + } + + @Test + public void testNonDeterministicExpressionIndex() throws Exception { + String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)"; + Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = null; + try { + stmt = conn.createStatement(); + stmt.execute(ddl); + stmt.execute("CREATE INDEX i ON t (RAND())"); + fail(); + } catch (SQLException e) { + assertEquals( + SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX.getErrorCode(), + e.getErrorCode()); + } finally { + stmt.close(); + } + } + + @Test + public void testStatelessExpressionIndex() throws Exception { + String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)"; + Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = null; + try { + stmt = conn.createStatement(); + stmt.execute(ddl); + stmt.execute("CREATE INDEX i ON t (2)"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX.getErrorCode(), + e.getErrorCode()); + } finally { + stmt.close(); + } + } + + @Test + public void testAggregateExpressionIndex() throws Exception { + String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)"; + Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = null; + try { + stmt = conn.createStatement(); + stmt.execute(ddl); + stmt.execute("CREATE INDEX i ON t (SUM(k1))"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX.getErrorCode(), + e.getErrorCode()); + } finally { + stmt.close(); + } + } + + @Test + public void testDescVarbinaryNotSupported() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute("CREATE TABLE t (k VARBINARY PRIMARY KEY DESC)"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode()); + } + try { + conn.createStatement().execute( + "CREATE TABLE t (k1 VARCHAR NOT NULL, k2 VARBINARY, CONSTRAINT pk PRIMARY KEY (k1,k2 DESC))"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode()); + } + try { + conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR PRIMARY KEY)"); + conn.createStatement().execute("ALTER TABLE t ADD k2 VARBINARY PRIMARY KEY DESC"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode()); + } + conn.close(); + } + + @Test + public void testDivideByZeroExpressionIndex() throws Exception { + String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)"; + Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = null; + try { + stmt = conn.createStatement(); + stmt.execute(ddl); + stmt.execute("CREATE INDEX i ON t (k1/0)"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DIVIDE_BY_ZERO.getErrorCode(), e.getErrorCode()); + } finally { + stmt.close(); + } + } + + @Test + public void testRegex() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = conn.createStatement(); + stmt.execute("CREATE TABLE t (k1 INTEGER PRIMARY KEY, v VARCHAR)"); + + // character classes + stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[abc]') = 'val'"); + stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[^abc]') = 'val'"); + stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-zA-Z]') = 'val'"); + stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-d[m-p]]') = 'val'"); + stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-z&&[def]]') = 'val'"); + stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-z&&[^bc]]') = 'val'"); + stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-z&&[^m-p]]') = 'val'"); + + // predefined character classes + stmt.executeQuery( + "select * from T where REGEXP_SUBSTR(v, '.\\\\d\\\\D\\\\s\\\\S\\\\w\\\\W') = 'val'"); + } + + private static void assertLiteralEquals(Object o, RowProjector p, int i) { + assertTrue(i < p.getColumnCount()); + Expression e = p.getColumnProjector(i).getExpression(); + assertTrue(e instanceof LiteralExpression); + LiteralExpression l = (LiteralExpression) e; + Object lo = l.getValue(); + assertEquals(o, lo); + } + + @Test + public void testIntAndLongMinValue() throws Exception { + BigDecimal oneLessThanMinLong = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); + BigDecimal oneMoreThanMaxLong = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); + String query = "SELECT " + Integer.MIN_VALUE + "," + Long.MIN_VALUE + "," + + (Integer.MIN_VALUE + 1) + "," + (Long.MIN_VALUE + 1) + "," + ((long) Integer.MIN_VALUE - 1) + + "," + oneLessThanMinLong + "," + Integer.MAX_VALUE + "," + Long.MAX_VALUE + "," + + (Integer.MAX_VALUE - 1) + "," + (Long.MAX_VALUE - 1) + "," + ((long) Integer.MAX_VALUE + 1) + + "," + oneMoreThanMaxLong + " FROM " + "\"" + SYSTEM_CATALOG_SCHEMA + "\".\"" + + SYSTEM_STATS_TABLE + "\"" + " LIMIT 1"; + List binds = Collections.emptyList(); + QueryPlan plan = getQueryPlan(query, binds); + RowProjector p = plan.getProjector(); + // Negative integers end up as longs once the * -1 occurs + assertLiteralEquals((long) Integer.MIN_VALUE, p, 0); + // Min long still stays as long + assertLiteralEquals(Long.MIN_VALUE, p, 1); + assertLiteralEquals((long) Integer.MIN_VALUE + 1, p, 2); + assertLiteralEquals(Long.MIN_VALUE + 1, p, 3); + assertLiteralEquals((long) Integer.MIN_VALUE - 1, p, 4); + // Can't fit into long, so becomes BigDecimal + assertLiteralEquals(oneLessThanMinLong, p, 5); + // Positive integers stay as ints + assertLiteralEquals(Integer.MAX_VALUE, p, 6); + assertLiteralEquals(Long.MAX_VALUE, p, 7); + assertLiteralEquals(Integer.MAX_VALUE - 1, p, 8); + assertLiteralEquals(Long.MAX_VALUE - 1, p, 9); + assertLiteralEquals((long) Integer.MAX_VALUE + 1, p, 10); + assertLiteralEquals(oneMoreThanMaxLong, p, 11); + } + + @Test + public void testMathFunctionOrderByOrderPreservingFwd() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 INTEGER not null, k2 double not null, k3 BIGINT not null, v varchar, constraint pk primary key(k1,k2,k3))"); + /* + * "SELECT * FROM T ORDER BY k1, k2", "SELECT * FROM T ORDER BY k1, SIGN(k2)", + * "SELECT * FROM T ORDER BY SIGN(k1), k2", + */ + List queryList = new ArrayList(); + queryList.add("SELECT * FROM T ORDER BY k1, k2"); + for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) { + queryList.add(String.format("SELECT * FROM T ORDER BY k1, %s(k2)", sub)); + queryList.add(String.format("SELECT * FROM T ORDER BY %s(k1), k2", sub)); + } + String[] queries = queryList.toArray(new String[queryList.size()]); + for (int i = 0; i < queries.length; i++) { + String query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } + // Negative test + queryList.clear(); + for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) { + queryList.add(String.format("SELECT * FROM T WHERE %s(k2)=2.0", sub)); + } + for (String query : queryList.toArray(new String[queryList.size()])) { + Scan scan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query) + .getContext().getScan(); + assertNotNull(scan.getFilter()); + assertTrue(scan.getStartRow().length == 0); + assertTrue(scan.getStopRow().length == 0); + } + } + + @Test + public void testMathFunctionOrderByOrderPreservingRev() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 INTEGER not null, k2 double not null, k3 BIGINT not null, v varchar, constraint pk primary key(k1,k2 DESC,k3))"); + List queryList = new ArrayList(); + // "SELECT * FROM T ORDER BY k1 DESC, SIGN(k2) DESC, k3 DESC" + queryList.add("SELECT * FROM T ORDER BY k1 DESC"); + queryList.add("SELECT * FROM T ORDER BY k1 DESC, k2"); + queryList.add("SELECT * FROM T ORDER BY k1 DESC, k2, k3 DESC"); + for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) { + queryList.add(String.format("SELECT * FROM T ORDER BY k1 DESC, %s(k2) DESC, k3 DESC", sub)); + } + String[] queries = queryList.toArray(new String[queryList.size()]); + for (int i = 0; i < queries.length; i++) { + String query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertTrue(query, plan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); + } + // Negative test + queryList.clear(); + for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) { + queryList.add(String.format("SELECT * FROM T WHERE %s(k2)=2.0", sub)); + } + for (String query : queryList.toArray(new String[queryList.size()])) { + Scan scan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query) + .getContext().getScan(); + assertNotNull(scan.getFilter()); + assertTrue(scan.getStartRow().length == 0); + assertTrue(scan.getStopRow().length == 0); + } + } + + @Test + public void testOrderByOrderPreservingFwd() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 date not null, k2 date not null, k3 varchar, v varchar, constraint pk primary key(k1,k2,k3))"); + String[] queries = { "SELECT * FROM T WHERE k2=CURRENT_DATE() ORDER BY k1, k3", + "SELECT * FROM T ORDER BY (k1,k2), k3", "SELECT * FROM T ORDER BY k1,k2,k3 NULLS FIRST", + "SELECT * FROM T ORDER BY k1,k2,k3", "SELECT * FROM T ORDER BY k1,k2", + "SELECT * FROM T ORDER BY k1", "SELECT * FROM T ORDER BY CAST(k1 AS TIMESTAMP)", + "SELECT * FROM T ORDER BY (k1,k2,k3)", + "SELECT * FROM T ORDER BY TRUNC(k1, 'DAY'), CEIL(k2, 'HOUR')", + "SELECT * FROM T ORDER BY INVERT(k1) DESC", + "SELECT * FROM T WHERE k1=CURRENT_DATE() ORDER BY k2", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertTrue("Expected order by to be compiled out: " + query, + plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } + } + + @Test + public void testOrderByOrderPreservingRev() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 date not null, k2 date not null, k3 varchar, v varchar, constraint pk primary key(k1,k2 DESC,k3 DESC))"); + String[] queries = { "SELECT * FROM T ORDER BY INVERT(k1),k2,k3 nulls last", + "SELECT * FROM T ORDER BY INVERT(k1),k2", "SELECT * FROM T ORDER BY INVERT(k1)", + "SELECT * FROM T ORDER BY TRUNC(k1, 'DAY') DESC, CEIL(k2, 'HOUR') DESC", + "SELECT * FROM T ORDER BY k1 DESC", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertTrue("Expected order by to be compiled out: " + query, + plan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); + } + } + + @Test + public void testNotOrderByOrderPreserving() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 date not null, k2 varchar, k3 varchar, v varchar, constraint pk primary key(k1,k2,k3 desc))"); + String[] queries = { "SELECT * FROM T ORDER BY k1,k2 NULLS LAST", + "SELECT * FROM T ORDER BY k1,k2, k3 NULLS LAST", "SELECT * FROM T ORDER BY k1,k3", + "SELECT * FROM T ORDER BY SUBSTR(TO_CHAR(k1),1,4)", "SELECT * FROM T ORDER BY k2", + "SELECT * FROM T ORDER BY INVERT(k1),k3", + "SELECT * FROM T ORDER BY CASE WHEN k1 = CURRENT_DATE() THEN 0 ELSE 1 END", + "SELECT * FROM T ORDER BY TO_CHAR(k1)", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertFalse("Expected order by not to be compiled out: " + query, + plan.getOrderBy().getOrderByExpressions().isEmpty()); + } + } + + @Test + public void testNotOrderByOrderPreservingForAggregation() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE IF NOT EXISTS VA_TEST(ID VARCHAR NOT NULL PRIMARY KEY, VAL1 VARCHAR, VAL2 INTEGER)"); + String[] queries = { + "select distinct ID, VAL1, VAL2 from VA_TEST where \"ID\" in ('ABC','ABD','ABE','ABF','ABG','ABH','AAA', 'AAB', 'AAC','AAD','AAE','AAF') order by VAL1 ASC" }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertFalse("Expected order by not to be compiled out: " + query, + plan.getOrderBy().getOrderByExpressions().isEmpty()); + } + } + + @Test + public void testGroupByOrderPreserving() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))"); + String[] queries = + { "SELECT 1 FROM T GROUP BY k3, (k1,k2)", "SELECT 1 FROM T GROUP BY k2,k1,k3", + "SELECT 1 FROM T GROUP BY k1,k2", "SELECT 1 FROM T GROUP BY k1", + "SELECT 1 FROM T GROUP BY CAST(k1 AS TIMESTAMP)", "SELECT 1 FROM T GROUP BY (k1,k2,k3)", + "SELECT 1 FROM T GROUP BY TRUNC(k2, 'DAY'), CEIL(k1, 'HOUR')", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertTrue("Expected group by to be order preserving: " + query, + plan.getGroupBy().isOrderPreserving()); + } + } + + @Test + public void testGroupByOrderPreserving2() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE T (ORGANIZATION_ID char(15) not null, \n" + + "JOURNEY_ID char(15) not null, \n" + "DATASOURCE SMALLINT not null, \n" + + "MATCH_STATUS TINYINT not null, \n" + "EXTERNAL_DATASOURCE_KEY varchar(30), \n" + + "ENTITY_ID char(15) not null, \n" + "CONSTRAINT PK PRIMARY KEY (\n" + + " ORGANIZATION_ID, \n" + " JOURNEY_ID, \n" + " DATASOURCE, \n" + + " MATCH_STATUS,\n" + " EXTERNAL_DATASOURCE_KEY,\n" + " ENTITY_ID))"); + String[] queries = { "SELECT COUNT(1) As DUP_COUNT\n" + " FROM T \n" + + " WHERE JOURNEY_ID='07ixx000000004J' AND \n" + + " DATASOURCE=0 AND MATCH_STATUS <= 1 and \n" + + " ORGANIZATION_ID='07ixx000000004J' \n" + + " GROUP BY MATCH_STATUS, EXTERNAL_DATASOURCE_KEY \n" + " HAVING COUNT(1) > 1", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertTrue("Expected group by to be order preserving: " + query, + plan.getGroupBy().isOrderPreserving()); + } + } + + @Test + public void testNotGroupByOrderPreserving() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))"); + String[] queries = { "SELECT 1 FROM T GROUP BY k1,k3", "SELECT 1 FROM T GROUP BY k2", + "SELECT 1 FROM T GROUP BY INVERT(k1),k3", + "SELECT 1 FROM T GROUP BY CASE WHEN k1 = CURRENT_DATE() THEN 0 ELSE 1 END", + "SELECT 1 FROM T GROUP BY TO_CHAR(k1)", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertFalse("Expected group by not to be order preserving: " + query, + plan.getGroupBy().isOrderPreserving()); + } + } + + @Test + public void testUseRoundRobinIterator() throws Exception { + Properties props = new Properties(); + props.setProperty(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); + Connection conn = DriverManager.getConnection(getUrl(), props); + conn.createStatement().execute( + "CREATE TABLE t (k1 char(2) not null, k2 varchar not null, k3 integer not null, v varchar, constraint pk primary key(k1,k2,k3))"); + String[] queries = { "SELECT 1 FROM T ", "SELECT 1 FROM T WHERE V = 'c'", + "SELECT 1 FROM T WHERE (k1,k2, k3) > ('a', 'ab', 1)", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertTrue("Expected plan to use round robin iterator " + query, + plan.useRoundRobinIterator()); + } + } + + @Test + public void testForcingRowKeyOrderNotUseRoundRobinIterator() throws Exception { + Properties props = new Properties(); + props.setProperty(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(true)); + Connection conn = DriverManager.getConnection(getUrl(), props); + testForceRowKeyOrder(conn, false); + testForceRowKeyOrder(conn, true); + } + + private void testForceRowKeyOrder(Connection conn, boolean isSalted) throws SQLException { + String tableName = "tablename" + (isSalted ? "_salt" : ""); + conn.createStatement().execute("CREATE TABLE " + tableName + + " (k1 char(2) not null, k2 varchar not null, k3 integer not null, v varchar, constraint pk primary key(k1,k2,k3))"); + String[] queries = + { "SELECT 1 FROM " + tableName, "SELECT 1 FROM " + tableName + " WHERE V = 'c'", + "SELECT 1 FROM " + tableName + " WHERE (k1, k2, k3) > ('a', 'ab', 1)", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertFalse("Expected plan to not use round robin iterator " + query, + plan.useRoundRobinIterator()); + } + } + + @Test + public void testPlanForOrderByOrGroupByNotUseRoundRobin() throws Exception { + Properties props = new Properties(); + props.setProperty(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); + Connection conn = DriverManager.getConnection(getUrl(), props); + testOrderByOrGroupByDoesntUseRoundRobin(conn, true); + testOrderByOrGroupByDoesntUseRoundRobin(conn, false); + } + + private void testOrderByOrGroupByDoesntUseRoundRobin(Connection conn, boolean salted) + throws SQLException { + String tableName = "orderbygroupbytable" + (salted ? "_salt" : ""); + conn.createStatement().execute("CREATE TABLE " + tableName + + " (k1 char(2) not null, k2 varchar not null, k3 integer not null, v varchar, constraint pk primary key(k1,k2,k3))"); + String[] queries = { "SELECT 1 FROM " + tableName + " ORDER BY K1", + "SELECT 1 FROM " + tableName + " WHERE V = 'c' ORDER BY K1, K2", + "SELECT 1 FROM " + tableName + " WHERE V = 'c' ORDER BY K1, K2, K3", + "SELECT 1 FROM " + tableName + " WHERE V = 'c' ORDER BY K3", + "SELECT 1 FROM " + tableName + " WHERE (k1,k2, k3) > ('a', 'ab', 1) ORDER BY V", + "SELECT 1 FROM " + tableName + " GROUP BY V", + "SELECT 1 FROM " + tableName + " GROUP BY K1, V, K2 ORDER BY V", }; + String query; + for (int i = 0; i < queries.length; i++) { + query = queries[i]; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + assertFalse("Expected plan to not use round robin iterator " + query, + plan.useRoundRobinIterator()); + } + } + + @Test + public void testSelectColumnsInOneFamily() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + Statement statement = conn.createStatement(); + try { + // create table with specified column family. + String create = + "CREATE TABLE t (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, f2.v3 varchar, v4 varchar)"; + statement.execute(create); + // select columns in one family. + String query = "SELECT f1.*, v4 FROM t"; + ResultSetMetaData rsMeta = statement.executeQuery(query).getMetaData(); + assertEquals("V1", rsMeta.getColumnName(1)); + assertEquals("V1", rsMeta.getColumnLabel(1)); + assertEquals("V2", rsMeta.getColumnName(2)); + assertEquals("V2", rsMeta.getColumnLabel(2)); + assertEquals("V4", rsMeta.getColumnName(3)); + assertEquals("V4", rsMeta.getColumnLabel(3)); + } finally { + statement.execute("DROP TABLE IF EXISTS t"); + conn.close(); + } + } + + @Test + public void testSelectColumnsInOneFamilyWithSchema() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + Statement statement = conn.createStatement(); + try { + // create table with specified column family. + String create = + "CREATE TABLE s.t (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, f2.v3 varchar, v4 varchar)"; + statement.execute(create); + // select columns in one family. + String query = "SELECT f1.*, v4 FROM s.t"; + ResultSetMetaData rsMeta = statement.executeQuery(query).getMetaData(); + assertEquals("V1", rsMeta.getColumnName(1)); + assertEquals("V1", rsMeta.getColumnLabel(1)); + assertEquals("V2", rsMeta.getColumnName(2)); + assertEquals("V2", rsMeta.getColumnLabel(2)); + assertEquals("V4", rsMeta.getColumnLabel(3)); + assertEquals("V4", rsMeta.getColumnName(3)); + } finally { + statement.execute("DROP TABLE IF EXISTS s.t"); + conn.close(); + } + } + + @Test + public void testNoFromClauseSelect() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + ResultSet rs = conn.createStatement().executeQuery("SELECT 2 * 3 * 4, 5 + 1"); + assertTrue(rs.next()); + assertEquals(24, rs.getInt(1)); + assertEquals(6, rs.getInt(2)); + assertFalse(rs.next()); + + String query = "SELECT 'a' AS col\n" + "UNION ALL\n" + "SELECT 'b' AS col\n" + "UNION ALL\n" + + "SELECT 'c' AS col"; + rs = conn.createStatement().executeQuery(query); + assertTrue(rs.next()); + assertEquals("a", rs.getString(1)); + assertTrue(rs.next()); + assertEquals("b", rs.getString(1)); + assertTrue(rs.next()); + assertEquals("c", rs.getString(1)); + assertFalse(rs.next()); + + rs = conn.createStatement().executeQuery("SELECT * FROM (" + query + ")"); + assertTrue(rs.next()); + assertEquals("a", rs.getString(1)); + assertTrue(rs.next()); + assertEquals("b", rs.getString(1)); + assertTrue(rs.next()); + assertEquals("c", rs.getString(1)); + assertFalse(rs.next()); + } + + @Test + public void testFailNoFromClauseSelect() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + try { + conn.createStatement().executeQuery("SELECT foo, bar"); + fail("Should have got ColumnNotFoundException"); + } catch (ColumnNotFoundException e) { + } + + try { + conn.createStatement().executeQuery("SELECT *"); + fail("Should have got SQLException"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT.getErrorCode(), + e.getErrorCode()); + } + + try { + conn.createStatement().executeQuery("SELECT A.*"); + fail("Should have got SQLException"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT.getErrorCode(), + e.getErrorCode()); + } + } finally { + conn.close(); + } + } + + @Test + public void testServerArrayElementProjection1() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute("CREATE TABLE t(a INTEGER PRIMARY KEY, arr INTEGER ARRAY)"); + ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT arr[1] from t"); + assertTrue(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); + } finally { + conn.createStatement().execute("DROP TABLE IF EXISTS t"); + conn.close(); + } + } + + @Test + public void testServerArrayElementProjection2() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute("CREATE TABLE t(a INTEGER PRIMARY KEY, arr INTEGER ARRAY)"); + ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT arr, arr[1] from t"); + assertFalse(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); + } finally { + conn.createStatement().execute("DROP TABLE IF EXISTS t"); + conn.close(); + } + } + + @Test + public void testServerArrayElementProjection3() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE t(a INTEGER PRIMARY KEY, arr INTEGER ARRAY, arr2 VARCHAR ARRAY)"); + ResultSet rs = + conn.createStatement().executeQuery("EXPLAIN SELECT arr, arr[1], arr2[1] from t"); + assertTrue(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); + } finally { + conn.createStatement().execute("DROP TABLE IF EXISTS t"); + conn.close(); + } + } + + @Test + public void testServerArrayElementProjection4() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); + ResultSet rs = conn.createStatement().executeQuery( + "EXPLAIN SELECT arr1, arr1[1], ARRAY_APPEND(ARRAY_APPEND(arr1, arr2[2]), arr2[1]), p from t"); + assertTrue(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); + } finally { + conn.createStatement().execute("DROP TABLE IF EXISTS t"); + conn.close(); + } + } + + @Test + public void testArrayAppendSingleArg() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); + conn.createStatement().executeQuery("SELECT ARRAY_APPEND(arr2) from t"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.FUNCTION_UNDEFINED.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testArrayPrependSingleArg() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); + conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(arr2) from t"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.FUNCTION_UNDEFINED.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testArrayConcatSingleArg() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); + conn.createStatement().executeQuery("SELECT ARRAY_CAT(arr2) from t"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.FUNCTION_UNDEFINED.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testServerArrayElementProjection5() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); + ResultSet rs = conn.createStatement().executeQuery( + "EXPLAIN SELECT arr1, arr1[1], ARRAY_ELEM(ARRAY_APPEND(arr1, arr2[1]), 1), p, arr2[2] from t"); + assertTrue(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); + } finally { + conn.createStatement().execute("DROP TABLE IF EXISTS t"); + conn.close(); + } + } + + @Test + public void testServerArrayElementProjectionWithArrayPrimaryKey() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute("CREATE TABLE t(arr INTEGER ARRAY PRIMARY KEY)"); + ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT arr[1] from t"); + assertFalse(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); + } finally { + conn.createStatement().execute("DROP TABLE IF EXISTS t"); + conn.close(); + } + } + + @Test + public void testAddingRowTimestampColumn() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + // Column of type VARCHAR cannot be declared as ROW_TIMESTAMP + try { + conn.createStatement().execute( + "CREATE TABLE T1 (PK1 VARCHAR NOT NULL, PK2 VARCHAR NOT NULL, KV1 VARCHAR CONSTRAINT PK PRIMARY KEY(PK1, PK2 ROW_TIMESTAMP)) "); + fail("Varchar column cannot be added as row_timestamp"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE.getErrorCode(), e.getErrorCode()); + } + // Column of type INTEGER cannot be declared as ROW_TIMESTAMP + try { + conn.createStatement().execute( + "CREATE TABLE T1 (PK1 VARCHAR NOT NULL, PK2 INTEGER NOT NULL, KV1 VARCHAR CONSTRAINT PK PRIMARY KEY(PK1, PK2 ROW_TIMESTAMP)) "); + fail("Integer column cannot be added as row_timestamp"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE.getErrorCode(), e.getErrorCode()); + } + // Column of type DOUBLE cannot be declared as ROW_TIMESTAMP + try { + conn.createStatement().execute( + "CREATE TABLE T1 (PK1 VARCHAR NOT NULL, PK2 DOUBLE NOT NULL, KV1 VARCHAR CONSTRAINT PK PRIMARY KEY(PK1, PK2 ROW_TIMESTAMP)) "); + fail("Double column cannot be added as row_timestamp"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE.getErrorCode(), e.getErrorCode()); + } + // Invalid - two columns declared as row_timestamp in pk constraint + try { + conn.createStatement().execute( + "CREATE TABLE T2 (PK1 DATE NOT NULL, PK2 DATE NOT NULL, KV1 VARCHAR CONSTRAINT PK PRIMARY KEY(PK1 ROW_TIMESTAMP , PK2 ROW_TIMESTAMP)) "); + fail("Creating table with two row_timestamp columns should fail"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.ROWTIMESTAMP_ONE_PK_COL_ONLY.getErrorCode(), e.getErrorCode()); + } + + // Invalid because only (unsigned)date, time, long, (unsigned)timestamp are valid data types for + // column to be declared as row_timestamp + try { + conn.createStatement().execute( + "CREATE TABLE T5 (PK1 VARCHAR PRIMARY KEY ROW_TIMESTAMP, PK2 VARCHAR, KV1 VARCHAR)"); + fail("Creating table with a key value column as row_timestamp should fail"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testGroupByVarbinaryOrArray() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE T1 (PK VARCHAR PRIMARY KEY, c1 VARCHAR, c2 VARBINARY, C3 VARCHAR ARRAY, c4 VARBINARY, C5 VARCHAR ARRAY, C6 BINARY(10)) "); + try { + conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c2,c3"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), + e.getErrorCode()); + } + try { + conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c3,c2"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), + e.getErrorCode()); + } + try { + conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c2,c4"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), + e.getErrorCode()); + } + try { + conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c3,c5"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), + e.getErrorCode()); + } + try { + conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c6,c5"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), + e.getErrorCode()); + } + } + + @Test + public void testDMLOfNonIndexWithBuildIndexAt() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props);) { + conn.createStatement().execute("CREATE TABLE t (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR)"); + } + props.put(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, + Long.toString(EnvironmentEdgeManager.currentTimeMillis() + 1)); + try (Connection conn = DriverManager.getConnection(getUrl(), props);) { + try { + conn.createStatement().execute("UPSERT INTO T (k,v1) SELECT k,v1 FROM T"); + fail(); + } catch (SQLException e) { + assertEquals("Unexpected Exception", + SQLExceptionCode.ONLY_INDEX_UPDATABLE_AT_SCN.getErrorCode(), e.getErrorCode()); + } + } + } + + @Test + public void testNegativeGuidePostWidth() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props);) { + try { + conn.createStatement().execute( + "CREATE TABLE t (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR) GUIDE_POSTS_WIDTH = -1"); + fail(); + } catch (SQLException e) { + assertEquals("Unexpected Exception", SQLExceptionCode.PARSER_ERROR.getErrorCode(), + e.getErrorCode()); + } + } + } + + private static void assertFamilies(Scan s, String... families) { + assertEquals(families.length, s.getFamilyMap().size()); + for (String fam : families) { + byte[] cf = Bytes.toBytes(fam); + assertTrue("Expected to contain " + fam, s.getFamilyMap().containsKey(cf)); + } + } + + @Test + public void testProjection() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE t(k INTEGER PRIMARY KEY, a.v1 VARCHAR, b.v2 VARCHAR, c.v3 VARCHAR)"); + assertFamilies(projectQuery("SELECT k FROM t"), "A"); + assertFamilies(projectQuery("SELECT k FROM t WHERE k = 5"), "A"); + assertFamilies(projectQuery("SELECT v2 FROM t WHERE k = 5"), "A", "B"); + assertFamilies(projectQuery("SELECT v2 FROM t WHERE v2 = 'a'"), "B"); + assertFamilies(projectQuery("SELECT v3 FROM t WHERE v2 = 'a'"), "B", "C"); + assertFamilies(projectQuery("SELECT v3 FROM t WHERE v2 = 'a' AND v3 is null"), "A", "B", "C"); + } finally { + conn.close(); + } + } + + private static boolean hasColumnProjectionFilter(Scan scan) { + Iterator iterator = ScanUtil.getFilterIterator(scan); + while (iterator.hasNext()) { + Filter filter = iterator.next(); + if (filter instanceof EncodedQualifiersColumnProjectionFilter) { + return true; + } + } + return false; + } + + @Test + public void testColumnProjectionOptimized() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute( + "CREATE TABLE t(k INTEGER PRIMARY KEY, a.v1 VARCHAR, a.v1b VARCHAR, b.v2 VARCHAR, c.v3 VARCHAR)"); + assertTrue(hasColumnProjectionFilter(projectQuery("SELECT k, v1 FROM t WHERE v2 = 'foo'"))); + assertFalse(hasColumnProjectionFilter(projectQuery("SELECT k, v1 FROM t WHERE v1 = 'foo'"))); + assertFalse(hasColumnProjectionFilter(projectQuery("SELECT v1,v2 FROM t WHERE v1 = 'foo'"))); + assertTrue(hasColumnProjectionFilter( + projectQuery("SELECT v1,v2 FROM t WHERE v1 = 'foo' and v2 = 'bar' and v3 = 'bas'"))); + assertFalse(hasColumnProjectionFilter( + projectQuery("SELECT a.* FROM t WHERE v1 = 'foo' and v1b = 'bar'"))); + } finally { + conn.close(); + } + } + + @Test + public void testOrderByWithNoProjection() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("create table x (id integer primary key, A.i1 integer," + " B.i2 integer)"); + Scan scan = projectQuery("select A.i1 from X group by i1 order by avg(B.i2) " + "desc"); + ServerAggregators aggregators = ServerAggregators + .deserialize(scan.getAttribute(BaseScannerRegionObserverConstants.AGGREGATORS), null, null); + assertEquals(2, aggregators.getAggregatorCount()); + } finally { + conn.close(); + } + } + + @Test + public void testColumnProjectionUnionAll() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute("CREATE TABLE t1(k INTEGER PRIMARY KEY," + + " col1 CHAR(8), col2 VARCHAR(10), col3 decimal(10,2))"); + conn.createStatement().execute( + "CREATE TABLE t2(k TINYINT PRIMARY KEY," + " col1 CHAR(20), col2 CHAR(30), col3 double)"); + QueryPlan plan = + getQueryPlan("SELECT * from t1 union all select * from t2", Collections.emptyList()); + RowProjector rowProj = plan.getProjector(); + assertTrue(rowProj.getColumnProjector(0).getExpression().getDataType() instanceof PInteger); + assertTrue(rowProj.getColumnProjector(1).getExpression().getDataType() instanceof PChar); + assertTrue(rowProj.getColumnProjector(1).getExpression().getMaxLength() == 20); + assertTrue(rowProj.getColumnProjector(2).getExpression().getDataType() instanceof PVarchar); + assertTrue(rowProj.getColumnProjector(2).getExpression().getMaxLength() == 30); + assertTrue(rowProj.getColumnProjector(3).getExpression().getDataType() instanceof PDecimal); + assertTrue(rowProj.getColumnProjector(3).getExpression().getScale() == 2); + } finally { + conn.close(); + } + } + + @Test + public void testFuncIndexUsage() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE t1(k INTEGER PRIMARY KEY," + " col1 VARCHAR, col2 VARCHAR)"); + conn.createStatement() + .execute("CREATE TABLE t2(k INTEGER PRIMARY KEY," + " col1 VARCHAR, col2 VARCHAR)"); + conn.createStatement() + .execute("CREATE TABLE t3(j INTEGER PRIMARY KEY," + " col3 VARCHAR, col4 VARCHAR)"); + conn.createStatement().execute("CREATE INDEX idx ON t1 (col1 || col2)"); + String query = "SELECT a.k from t1 a where a.col1 || a.col2 = 'foobar'"; + ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query); + String explainPlan = QueryUtil.getExplainPlan(rs); + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER IDX ['foobar']\n" + + " SERVER FILTER BY FIRST KEY ONLY", explainPlan); + query = "SELECT k,j from t3 b join t1 a ON k = j where a.col1 || a.col2 = 'foobar'"; + rs = conn.createStatement().executeQuery("EXPLAIN " + query); + explainPlan = QueryUtil.getExplainPlan(rs); + assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER T3\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + " PARALLEL INNER-JOIN TABLE 0\n" + + " CLIENT PARALLEL 1-WAY RANGE SCAN OVER IDX ['foobar']\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + + " DYNAMIC SERVER FILTER BY B.J IN (\"A.:K\")", explainPlan); + query = "SELECT a.k,b.k from t2 b join t1 a ON a.k = b.k where a.col1 || a.col2 = 'foobar'"; + rs = conn.createStatement().executeQuery("EXPLAIN " + query); + explainPlan = QueryUtil.getExplainPlan(rs); + assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER T2\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + " PARALLEL INNER-JOIN TABLE 0\n" + + " CLIENT PARALLEL 1-WAY RANGE SCAN OVER IDX ['foobar']\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + + " DYNAMIC SERVER FILTER BY B.K IN (\"A.:K\")", explainPlan); + } finally { + conn.close(); + } + } + + @Test + public void testSaltTableJoin() throws Exception { + + PhoenixConnection conn = (PhoenixConnection) DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute("drop table if exists SALT_TEST2900"); + + conn.createStatement().execute("create table SALT_TEST2900" + "(" + + "id UNSIGNED_INT not null primary key," + "appId VARCHAR" + ")SALT_BUCKETS=2"); + + conn.createStatement().execute("drop table if exists RIGHT_TEST2900 "); + conn.createStatement().execute("create table RIGHT_TEST2900" + "(" + + "appId VARCHAR not null primary key," + "createTime VARCHAR" + ")"); + + String sql = + "select * from SALT_TEST2900 a inner join RIGHT_TEST2900 b on a.appId=b.appId where a.id>=3 and a.id<=5"; + HashJoinPlan plan = (HashJoinPlan) getQueryPlan(sql, Collections.emptyList()); + ScanRanges ranges = plan.getContext().getScanRanges(); + + List regionLocations = + conn.getQueryServices().getAllTableRegions(Bytes.toBytes("SALT_TEST2900"), 60000); + for (HRegionLocation regionLocation : regionLocations) { + assertTrue(ranges.intersectRegion(regionLocation.getRegion().getStartKey(), + regionLocation.getRegion().getEndKey(), false)); + } + } finally { + conn.close(); + } + } + + @Test + public void testStatefulDefault() throws Exception { + String ddl = "CREATE TABLE table_with_default (" + "pk INTEGER PRIMARY KEY, " + + "datecol DATE DEFAULT CURRENT_DATE())"; + + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute(ddl); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CANNOT_CREATE_DEFAULT.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testAlterTableStatefulDefault() throws Exception { + String ddl = "CREATE TABLE table_with_default (" + "pk INTEGER PRIMARY KEY)"; + String ddl2 = "ALTER TABLE table_with_default " + "ADD datecol DATE DEFAULT CURRENT_DATE()"; + + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(ddl); + try { + conn.createStatement().execute(ddl2); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CANNOT_CREATE_DEFAULT.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testDefaultTypeMismatch() throws Exception { + String ddl = + "CREATE TABLE table_with_default (" + "pk INTEGER PRIMARY KEY, " + "v VARCHAR DEFAULT 1)"; + + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute(ddl); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testAlterTableDefaultTypeMismatch() throws Exception { + String ddl = "CREATE TABLE table_with_default (" + "pk INTEGER PRIMARY KEY)"; + String ddl2 = "ALTER TABLE table_with_default " + "ADD v CHAR(3) DEFAULT 1"; + + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(ddl); + try { + conn.createStatement().execute(ddl2); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testDefaultTypeMismatchInView() throws Exception { + String ddl1 = + "CREATE TABLE table_with_default (" + "pk INTEGER PRIMARY KEY, " + "v VARCHAR DEFAULT 'foo')"; + String ddl2 = "CREATE VIEW my_view(v2 VARCHAR DEFAULT 1) AS SELECT * FROM table_with_default"; + + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(ddl1); + try { + conn.createStatement().execute(ddl2); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testDefaultRowTimestamp1() throws Exception { + String ddl = "CREATE TABLE IF NOT EXISTS table_with_defaults (" + "pk1 INTEGER NOT NULL," + + "pk2 BIGINT NOT NULL DEFAULT 5," + + "CONSTRAINT NAME_PK PRIMARY KEY (pk1, pk2 ROW_TIMESTAMP))"; + + Connection conn = DriverManager.getConnection(getUrl()); + + try { + conn.createStatement().execute(ddl); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CANNOT_CREATE_DEFAULT_ROWTIMESTAMP.getErrorCode(), + e.getErrorCode()); + } + } + + @Test + public void testDefaultRowTimestamp2() throws Exception { + String ddl = + "CREATE TABLE table_with_defaults (" + "k BIGINT DEFAULT 5 PRIMARY KEY ROW_TIMESTAMP)"; + + Connection conn = DriverManager.getConnection(getUrl()); + + try { + conn.createStatement().execute(ddl); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CANNOT_CREATE_DEFAULT_ROWTIMESTAMP.getErrorCode(), + e.getErrorCode()); + } + } + + @Test + public void testDefaultSizeMismatch() throws Exception { + String ddl = "CREATE TABLE table_with_default (" + "pk INTEGER PRIMARY KEY, " + + "v CHAR(3) DEFAULT 'foobar')"; + + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute(ddl); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testAlterTableDefaultSizeMismatch() throws Exception { + String ddl = "CREATE TABLE table_with_default (" + "pk INTEGER PRIMARY KEY)"; + String ddl2 = "ALTER TABLE table_with_default " + "ADD v CHAR(3) DEFAULT 'foobar'"; + + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(ddl); + try { + conn.createStatement().execute(ddl2); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testNullDefaultRemoved() throws Exception { + String ddl = + "CREATE TABLE table_with_default (" + "pk INTEGER PRIMARY KEY, " + "v VARCHAR DEFAULT null)"; + + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(ddl); + PTable table = conn.unwrap(PhoenixConnection.class).getMetaDataCache() + .getTableRef(new PTableKey(null, "TABLE_WITH_DEFAULT")).getTable(); + assertNull(table.getColumnForColumnName("V").getExpressionStr()); + } + + @Test + public void testNullAlterTableDefaultRemoved() throws Exception { + String ddl = "CREATE TABLE table_with_default (" + "pk INTEGER PRIMARY KEY)"; + String ddl2 = "ALTER TABLE table_with_default " + "ADD v CHAR(3) DEFAULT null"; + + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(ddl); + conn.createStatement().execute(ddl2); + PTable table = conn.unwrap(PhoenixConnection.class).getMetaDataCache() + .getTableRef(new PTableKey(null, "TABLE_WITH_DEFAULT")).getTable(); + assertNull(table.getColumnForColumnName("V").getExpressionStr()); + } + + @Test + public void testIndexOnViewWithChildView() throws SQLException { + try (Connection conn = DriverManager.getConnection(getUrl())) { + conn.createStatement() + .execute("CREATE TABLE PLATFORM_ENTITY.GLOBAL_TABLE (\n" + + " ORGANIZATION_ID CHAR(15) NOT NULL,\n" + " KEY_PREFIX CHAR(3) NOT NULL,\n" + + " CREATED_DATE DATE,\n" + " CREATED_BY CHAR(15),\n" + + " CONSTRAINT PK PRIMARY KEY (\n" + " ORGANIZATION_ID,\n" + + " KEY_PREFIX\n" + " )\n" + + ") VERSIONS=1, IMMUTABLE_ROWS=true, MULTI_TENANT=true"); + conn.createStatement() + .execute("CREATE VIEW PLATFORM_ENTITY.GLOBAL_VIEW (\n" + " INT1 BIGINT NOT NULL,\n" + + " DOUBLE1 DECIMAL(12, 3),\n" + " IS_BOOLEAN BOOLEAN,\n" + " TEXT1 VARCHAR,\n" + + " CONSTRAINT PKVIEW PRIMARY KEY\n" + " (\n" + " INT1\n" + " )\n" + ")\n" + + "AS SELECT * FROM PLATFORM_ENTITY.GLOBAL_TABLE WHERE KEY_PREFIX = '123'"); + conn.createStatement().execute( + "CREATE INDEX GLOBAL_INDEX\n" + "ON PLATFORM_ENTITY.GLOBAL_VIEW (TEXT1 DESC, INT1)\n" + + "INCLUDE (CREATED_BY, DOUBLE1, IS_BOOLEAN, CREATED_DATE)"); + String query = "SELECT DOUBLE1 FROM PLATFORM_ENTITY.GLOBAL_VIEW\n" + + "WHERE ORGANIZATION_ID = '00Dxx0000002Col' AND TEXT1='Test' AND INT1=1"; + QueryPlan plan = getOptimizedQueryPlan(query); + assertEquals("PLATFORM_ENTITY.GLOBAL_VIEW", + plan.getContext().getCurrentTable().getTable().getName().getString()); + query = "SELECT DOUBLE1 FROM PLATFORM_ENTITY.GLOBAL_VIEW\n" + + "WHERE ORGANIZATION_ID = '00Dxx0000002Col' AND TEXT1='Test'"; + plan = getOptimizedQueryPlan(query); + assertEquals("PLATFORM_ENTITY.GLOBAL_INDEX", + plan.getContext().getCurrentTable().getTable().getName().getString()); + } + } + + @Test + public void testNotNullKeyValueColumnSalted() throws Exception { + testNotNullKeyValueColumn(3); + } + + @Test + public void testNotNullKeyValueColumnUnsalted() throws Exception { + testNotNullKeyValueColumn(0); + } + + private void testNotNullKeyValueColumn(int saltBuckets) throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute( + "CREATE TABLE t1 (k integer not null primary key, v bigint not null) IMMUTABLE_ROWS=true" + + (saltBuckets == 0 ? "" : (",SALT_BUCKETS=" + saltBuckets))); + conn.createStatement().execute("UPSERT INTO t1 VALUES(0)"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CONSTRAINT_VIOLATION.getErrorCode(), e.getErrorCode()); + } + try { + conn.createStatement().execute( + "CREATE TABLE t2 (k integer not null primary key, v1 bigint not null, v2 varchar, v3 tinyint not null) IMMUTABLE_ROWS=true" + + (saltBuckets == 0 ? "" : (",SALT_BUCKETS=" + saltBuckets))); + conn.createStatement().execute("UPSERT INTO t2(k, v3) VALUES(0,0)"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CONSTRAINT_VIOLATION.getErrorCode(), e.getErrorCode()); + } + try { + conn.createStatement().execute( + "CREATE TABLE t3 (k integer not null primary key, v1 bigint not null, v2 varchar, v3 tinyint not null) IMMUTABLE_ROWS=true" + + (saltBuckets == 0 ? "" : (",SALT_BUCKETS=" + saltBuckets))); + conn.createStatement().execute("UPSERT INTO t3(k, v1) VALUES(0,0)"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CONSTRAINT_VIOLATION.getErrorCode(), e.getErrorCode()); + } + conn.createStatement().execute( + "CREATE TABLE t4 (k integer not null primary key, v1 bigint not null) IMMUTABLE_ROWS=true" + + (saltBuckets == 0 ? "" : (",SALT_BUCKETS=" + saltBuckets))); + conn.createStatement().execute("UPSERT INTO t4 VALUES(0,0)"); + conn.createStatement().execute( + "CREATE TABLE t5 (k integer not null primary key, v1 bigint not null default 0) IMMUTABLE_ROWS=true" + + (saltBuckets == 0 ? "" : (",SALT_BUCKETS=" + saltBuckets))); + conn.createStatement().execute("UPSERT INTO t5 VALUES(0)"); + conn.close(); + } + + @Test + public void testAlterAddNotNullKeyValueColumn() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t1 (k integer not null primary key, v1 bigint not null) IMMUTABLE_ROWS=true"); + try { + conn.createStatement().execute("ALTER TABLE t1 ADD k2 bigint not null primary key"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY.getErrorCode(), + e.getErrorCode()); + } + conn.createStatement().execute("ALTER TABLE t1 ADD v2 bigint not null"); + try { + conn.createStatement().execute("UPSERT INTO t1(k, v1) VALUES(0,0)"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CONSTRAINT_VIOLATION.getErrorCode(), e.getErrorCode()); + } + conn.createStatement().execute("UPSERT INTO t1 VALUES(0,0,0)"); + conn.createStatement().execute("UPSERT INTO t1(v1,k,v2) VALUES(0,0,0)"); + } + + @Test + public void testOnDupKeyForImmutableTable() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE t1 (k integer not null primary key, v bigint) IMMUTABLE_ROWS=true"); + conn.createStatement() + .execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v = v + 1"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE.getErrorCode(), + e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testUpdatePKOnDupKey() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute( + "CREATE TABLE t1 (k1 integer not null, k2 integer not null, v bigint, constraint pk primary key (k1,k2))"); + conn.createStatement() + .execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE k2 = v + 1"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CANNOT_UPDATE_PK_ON_DUP_KEY.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testOnDupKeyTypeMismatch() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute( + "CREATE TABLE t1 (k1 integer not null, k2 integer not null, v1 bigint, v2 varchar, constraint pk primary key (k1,k2))"); + conn.createStatement() + .execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v1 = v2 || 'a'"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testDuplicateColumnOnDupKeyUpdate() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute( + "CREATE TABLE t1 (k1 integer not null, k2 integer not null, v1 bigint, v2 bigint, constraint pk primary key (k1,k2))"); + conn.createStatement() + .execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v1 = v1 + 1, v1 = v2 + 2"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DUPLICATE_COLUMN_IN_ON_DUP_KEY.getErrorCode(), + e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testAggregationInOnDupKey() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t1 (k1 integer not null, k2 integer not null, v bigint, constraint pk primary key (k1,k2))"); + try { + conn.createStatement() + .execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v = sum(v)"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY.getErrorCode(), + e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testSequenceInOnDupKey() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t1 (k1 integer not null, k2 integer not null, v bigint, constraint pk primary key (k1,k2))"); + conn.createStatement().execute("CREATE SEQUENCE s1"); + try { + conn.createStatement() + .execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v = next value for s1"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.INVALID_USE_OF_NEXT_VALUE_FOR.getErrorCode(), e.getErrorCode()); + } finally { + conn.close(); + } + } + + @Test + public void testOrderPreservingGroupBy() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + + conn.createStatement() + .execute("CREATE TABLE test (\n" + " pk1 INTEGER NOT NULL,\n" + + " pk2 INTEGER NOT NULL,\n" + " pk3 INTEGER NOT NULL,\n" + + " pk4 INTEGER NOT NULL,\n" + " v1 INTEGER,\n" + + " CONSTRAINT pk PRIMARY KEY (\n" + " pk1,\n" + + " pk2,\n" + " pk3,\n" + " pk4\n" + + " )\n" + " )"); + String[] queries = new String[] { + "SELECT pk3 FROM test WHERE pk2 = 1 GROUP BY pk2+1,pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE pk2 = 1 GROUP BY pk2,pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY pk1+pk2,pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY pk4,CASE WHEN pk1 > pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY pk4,pk3", }; + int index = 0; + for (String query : queries) { + QueryPlan plan = getQueryPlan(conn, query); + assertTrue((index + 1) + ") " + queries[index], + plan.getOrderBy().getOrderByExpressions().isEmpty()); + index++; + } + } + } + + @Test + public void testOrderPreservingGroupByForNotPkColumns() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + conn.createStatement() + .execute("CREATE TABLE test (\n" + " pk1 varchar, \n" + + " pk2 varchar, \n" + " pk3 varchar, \n" + + " pk4 varchar, \n" + " v1 varchar, \n" + + " v2 varchar,\n" + " CONSTRAINT pk PRIMARY KEY (\n" + + " pk1,\n" + " pk2,\n" + " pk3,\n" + + " pk4\n" + " )\n" + " )"); + String[] queries = new String[] { + "SELECT pk3 FROM test WHERE v2 = 'a' GROUP BY substr(v2,0,1),pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE pk1 = 'c' and v2 = substr('abc',1,1) GROUP BY v2,pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE v1 = 'a' and v2 = 'b' GROUP BY length(v1)+length(v2),pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = 'b' GROUP BY length(pk1)+length(v2),pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE v1 = 'a' and v2 = substr('abc',2,1) GROUP BY pk4,CASE WHEN v1 > v2 THEN v1 ELSE v2 END,pk3 ORDER BY pk4,pk3", + "SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = substr('abc',2,1) GROUP BY pk4,CASE WHEN pk1 > v2 THEN pk1 ELSE v2 END,pk3 ORDER BY pk4,pk3", + "SELECT pk3 FROM test WHERE pk1 = 'a' and pk2 = 'b' and v1 = 'c' GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY pk3" }; + int index = 0; + for (String query : queries) { + QueryPlan plan = getQueryPlan(conn, query); + assertTrue((index + 1) + ") " + queries[index], + plan.getOrderBy().getOrderByExpressions().isEmpty()); + index++; + } + } + } + + @Test + public void testOrderPreservingGroupByForClientAggregatePlan() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = "test_table"; + String sql = "create table " + tableName + "( " + " pk1 varchar not null , " + + " pk2 varchar not null, " + " pk3 varchar not null," + " v1 varchar, " + " v2 varchar, " + + " CONSTRAINT TEST_PK PRIMARY KEY ( " + "pk1," + "pk2," + "pk3 ))"; + conn.createStatement().execute(sql); + + String[] queries = new String[] { "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "group by a.ak3,a.av1 order by a.ak3,a.av1", + + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av2 = 'a' GROUP BY substr(a.av2,0,1),ak3 ORDER BY ak3", + + // for InListExpression + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av2 in('a') GROUP BY substr(a.av2,0,1),ak3 ORDER BY ak3", + + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 = 'c' and a.av2 = substr('abc',1,1) GROUP BY a.av2,a.ak3 ORDER BY a.ak3", + + // for RVC + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where (a.ak1,a.av2) = ('c', substr('abc',1,1)) GROUP BY a.av2,a.ak3 ORDER BY a.ak3", + + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' and a.av2 = 'b' GROUP BY length(a.av1)+length(a.av2),a.ak3 ORDER BY a.ak3", + + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 = 'a' and a.av2 = 'b' GROUP BY length(a.ak1)+length(a.av2),a.ak3 ORDER BY a.ak3", + + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3, coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' and a.av2 = substr('abc',2,1) GROUP BY a.ak4,CASE WHEN a.av1 > a.av2 THEN a.av1 ELSE a.av2 END,a.ak3 ORDER BY a.ak4,a.ak3", + + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 = 0.0 and a.av2 = (5+3*2) GROUP BY a.ak3,CASE WHEN a.ak1 > a.av2 THEN a.ak1 ELSE a.av2 END,a.av1 ORDER BY a.ak3,a.av1", + + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 = 0.0 and a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN a.ak1 > a.av2 THEN a.ak1 ELSE a.av2 END,a.av1 ORDER BY a.ak3,a.av1", + + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 = 0.0 and a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", + + // for IS NULL + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 is null and a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", + + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 = 0.0 and a.av2 is null GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", }; + int index = 0; + for (String query : queries) { + QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, query); + assertTrue((index + 1) + ") " + queries[index], + plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + index++; + } + } finally { + if (conn != null) { conn.close(); - } - - @Test - public void testVarbinaryArrayNotSupported() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t (k VARCHAR PRIMARY KEY, a VARBINARY[10])"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.VARBINARY_ARRAY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode()); - } + } + } + } + + @Test + public void testNotOrderPreservingGroupBy() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + + conn.createStatement() + .execute("CREATE TABLE test (\n" + " pk1 INTEGER NOT NULL,\n" + + " pk2 INTEGER NOT NULL,\n" + " pk3 INTEGER NOT NULL,\n" + + " pk4 INTEGER NOT NULL,\n" + " v1 INTEGER,\n" + + " CONSTRAINT pk PRIMARY KEY (\n" + " pk1,\n" + + " pk2,\n" + " pk3,\n" + " pk4\n" + + " )\n" + " )"); + String[] queries = new String[] { + "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY pk4,CASE WHEN pk1 > pk2 THEN coalesce(v1,1) ELSE pk2 END,pk3 ORDER BY pk4,pk3", + "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3", + "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3", + "SELECT pk3 FROM test GROUP BY pk2,pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE pk1 = 1 GROUP BY pk1,pk2,pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE pk1 = 1 GROUP BY RAND()+pk1,pk2,pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY CASE WHEN pk1 > pk2 THEN pk1 ELSE RAND(1) END,pk3 ORDER BY pk3", }; + int index = 0; + for (String query : queries) { + QueryPlan plan = getQueryPlan(conn, query); + assertFalse((index + 1) + ") " + queries[index], + plan.getOrderBy().getOrderByExpressions().isEmpty()); + index++; + } + } + } + + @Test + public void testNotOrderPreservingGroupByForNotPkColumns() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + conn.createStatement() + .execute("CREATE TABLE test (\n" + " pk1 varchar,\n" + + " pk2 varchar,\n" + " pk3 varchar,\n" + + " pk4 varchar,\n" + " v1 varchar,\n" + " v2 varchar,\n" + + " CONSTRAINT pk PRIMARY KEY (\n" + " pk1,\n" + + " pk2,\n" + " pk3,\n" + " pk4\n" + + " )\n" + " )"); + String[] queries = new String[] { + "SELECT pk3 FROM test WHERE (pk1 = 'a' and pk2 = 'b') or v1 ='c' GROUP BY pk4,CASE WHEN pk1 > pk2 THEN coalesce(v1,'1') ELSE pk2 END,pk3 ORDER BY pk4,pk3", + "SELECT pk3 FROM test WHERE pk1 = 'a' or pk2 = 'b' GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY pk3", + "SELECT pk3 FROM test WHERE pk1 = 'a' and (pk2 = 'b' or v1 = 'c') GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY pk3", + "SELECT v2 FROM test GROUP BY v1,v2 ORDER BY v2", + "SELECT pk3 FROM test WHERE v1 = 'a' GROUP BY v1,v2,pk3 ORDER BY pk3", + "SELECT length(pk3) FROM test WHERE v1 = 'a' GROUP BY RAND()+length(v1),length(v2),length(pk3) ORDER BY length(v2),length(pk3)", + "SELECT length(pk3) FROM test WHERE v1 = 'a' and v2 = 'b' GROUP BY CASE WHEN v1 > v2 THEN length(v1) ELSE RAND(1) END,length(pk3) ORDER BY length(pk3)", }; + int index = 0; + for (String query : queries) { + QueryPlan plan = getQueryPlan(conn, query); + assertFalse((index + 1) + ") " + queries[index], + plan.getOrderBy().getOrderByExpressions().isEmpty()); + index++; + } + } + } + + @Test + public void testNotOrderPreservingGroupByForClientAggregatePlan() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = "table_test"; + String sql = "create table " + tableName + "( " + " pk1 varchar not null , " + + " pk2 varchar not null, " + " pk3 varchar not null," + " v1 varchar, " + " v2 varchar, " + + " CONSTRAINT TEST_PK PRIMARY KEY ( " + "pk1," + "pk2," + "pk3 ))"; + conn.createStatement().execute(sql); + + String[] queries = new String[] { "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where (a.ak1 = 'a' and a.ak2 = 'b') or a.av1 ='c' GROUP BY a.ak4,CASE WHEN a.ak1 > a.ak2 THEN coalesce(a.av1,'1') ELSE a.ak2 END,a.ak3 ORDER BY a.ak4,a.ak3", + + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 = 'a' or a.ak2 = 'b' GROUP BY CASE WHEN a.ak1 > a.ak2 THEN a.av1 WHEN a.ak1 = a.ak2 THEN a.ak1 ELSE a.ak2 END,a.ak3 ORDER BY a.ak3", + + // for in + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 in ( 'a','b') GROUP BY CASE WHEN a.ak1 > a.ak2 THEN a.av1 WHEN a.ak1 = a.ak2 THEN a.ak1 ELSE a.ak2 END,a.ak3 ORDER BY a.ak3", + + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 = 'a' and (a.ak2 = 'b' or a.av1 = 'c') GROUP BY CASE WHEN a.ak1 > a.ak2 THEN a.av1 WHEN a.ak1 = a.ak2 THEN a.ak1 ELSE a.ak2 END,a.ak3 ORDER BY a.ak3", + + "select a.av2 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + "GROUP BY a.av1,a.av2 ORDER BY a.av2", + + "select a.ak3 " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' GROUP BY a.av1,a.av2,a.ak3 ORDER BY a.ak3", + + "select length(a.ak3) " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' GROUP BY RAND()+length(a.av1),length(a.av2),length(a.ak3) ORDER BY length(a.av2),length(a.ak3)", + + "select length(a.ak3) " + + "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' and a.av2 = 'b' GROUP BY CASE WHEN a.av1 > a.av2 THEN length(a.av1) ELSE RAND(1) END,length(a.ak3) ORDER BY length(a.ak3)", + + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 > 0.0 and a.av2 = (5+3*2) GROUP BY a.ak3,CASE WHEN a.ak1 > a.av2 THEN a.ak1 ELSE a.av2 END,a.av1 ORDER BY a.ak3,a.av1", + + // for CoerceExpression + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where CAST(a.ak1 AS INTEGER) = 0 and a.av2 = (5+3*2) GROUP BY a.ak3,a.ak1,a.av1 ORDER BY a.ak3,a.av1", + + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 = 0.0 or a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", + + // for IS NULL + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 is not null and a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", + + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 is null or a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", + + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 is null and a.av2 = length(substr('abc',1,1)) and a.ak1 = 0.0 GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", + + "select a.ak3 " + + "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.ak1 is null and a.av2 = length(substr('abc',1,1)) or a.ak1 = 0.0 GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", }; + int index = 0; + for (String query : queries) { + QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, query); + assertTrue((index + 1) + ") " + queries[index], + plan.getOrderBy().getOrderByExpressions().size() > 0); + index++; + } + } finally { + if (conn != null) { conn.close(); - } - - @Test - public void testInvalidNextValueFor() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE SEQUENCE alpha.zeta"); - String[] queries = { - "SELECT * FROM aTable WHERE a_integer < next value for alpha.zeta", - "SELECT * FROM aTable GROUP BY a_string,next value for alpha.zeta", - "SELECT * FROM aTable GROUP BY 1 + next value for alpha.zeta", - "SELECT * FROM aTable GROUP BY a_integer HAVING a_integer < next value for alpha.zeta", - "SELECT * FROM aTable WHERE a_integer < 3 GROUP BY a_integer HAVING a_integer < next value for alpha.zeta", - "SELECT * FROM aTable ORDER BY next value for alpha.zeta", - "SELECT max(next value for alpha.zeta) FROM aTable", - }; - for (String query : queries) { - List binds = Collections.emptyList(); - try { - compileQuery(query, binds); - fail("Compilation should have failed since this is an invalid usage of NEXT VALUE FOR: " + query); - } catch (SQLException e) { - assertEquals(query, SQLExceptionCode.INVALID_USE_OF_NEXT_VALUE_FOR.getErrorCode(), e.getErrorCode()); - } - } - } - - @Test - public void testNoCachingHint() throws Exception { - List binds = Collections.emptyList(); - Scan scan = compileQuery("select val from ptsdb", binds); - assertTrue(scan.getCacheBlocks()); - scan = compileQuery("select /*+ NO_CACHE */ val from ptsdb", binds); - assertFalse(scan.getCacheBlocks()); - scan = compileQuery("select /*+ NO_CACHE */ p1.val from ptsdb p1 inner join ptsdb p2 on p1.inst = p2.inst", binds); - assertFalse(scan.getCacheBlocks()); - } - - @Test - public void testExecuteWithNonEmptyBatch() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - Statement stmt = conn.createStatement(); - stmt.addBatch("SELECT * FROM atable"); - stmt.execute("UPSERT INTO atable VALUES('000000000000000','000000000000000')"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH.getErrorCode(), e.getErrorCode()); - } - try { - Statement stmt = conn.createStatement(); - stmt.addBatch("SELECT * FROM atable"); - stmt.executeUpdate("UPSERT INTO atable VALUES('000000000000000','000000000000000')"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH.getErrorCode(), e.getErrorCode()); - } - try { - PreparedStatement stmt = conn.prepareStatement("UPSERT INTO atable VALUES('000000000000000','000000000000000')"); - stmt.addBatch(); - stmt.execute(); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH.getErrorCode(), e.getErrorCode()); - } - try { - PreparedStatement stmt = conn.prepareStatement("UPSERT INTO atable VALUES('000000000000000','000000000000000')"); - stmt.addBatch(); - stmt.executeUpdate(); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH.getErrorCode(), e.getErrorCode()); - } + } + } + } + + @Test + public void testOrderByOptimizeForClientAggregatePlanAndDesc() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = "test_table"; + String sql = "create table " + tableName + "( " + " pk1 varchar not null, " + + " pk2 varchar not null, " + " pk3 varchar not null, " + " v1 varchar, " + " v2 varchar, " + + " CONSTRAINT TEST_PK PRIMARY KEY ( " + "pk1 desc," + "pk2 desc," + "pk3 desc))"; + conn.createStatement().execute(sql); + + String[] queries = new String[] { + "select a.ak3 " + + "from (select pk1 ak1,pk2 ak2,pk3 ak3, substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "group by a.ak3,a.av1 order by a.ak3 desc,a.av1", + + "select a.ak3 " + + "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' group by a.av1,a.ak3 order by a.ak3 desc", + + "select a.ak3 " + + "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' and a.av2= 'b' group by CASE WHEN a.av1 > a.av2 THEN a.av1 ELSE a.av2 END,a.ak3,a.ak2 order by a.ak3 desc,a.ak2 desc" }; + + int index = 0; + for (String query : queries) { + QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, query); + assertTrue((index + 1) + ") " + queries[index], + plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + index++; + } + + queries = new String[] { + "select a.ak3 " + + "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "group by a.ak3,a.av1 order by a.ak3,a.av1", + + "select a.ak3 " + + "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' group by a.av1,a.ak3 order by a.ak3", + + "select a.ak3 " + + "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' and a.av2= 'b' group by CASE WHEN a.av1 > a.av2 THEN a.av1 ELSE a.av2 END,a.ak3,a.ak2 order by a.ak3,a.ak2", + + "select a.ak3 " + + "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from " + + tableName + " order by pk2,pk3 limit 10) a " + + "where a.av1 = 'a' and a.av2= 'b' group by CASE WHEN a.av1 > a.av2 THEN a.av1 ELSE a.av2 END,a.ak3,a.ak2 order by a.ak3 asc,a.ak2 desc" }; + index = 0; + for (String query : queries) { + QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, query); + assertTrue((index + 1) + ") " + queries[index], + plan.getOrderBy().getOrderByExpressions().size() > 0); + index++; + } + } finally { + if (conn != null) { conn.close(); - } - - @Test - public void testInvalidPrimaryKeyDecl() throws Exception { - String[] queries = { - "CREATE TABLE t (k varchar null primary key)", - "CREATE TABLE t (k varchar null, constraint pk primary key (k))", - }; - Connection conn = DriverManager.getConnection(getUrl()); - for (String query : queries) { - try { - conn.createStatement().execute(query); - fail("Compilation should have failed since this is an invalid PRIMARY KEY declaration: " + query); - } catch (SQLException e) { - assertEquals(query, SQLExceptionCode.SINGLE_PK_MAY_NOT_BE_NULL.getErrorCode(), e.getErrorCode()); - } - } - } - - @Test - public void testInvalidNullCompositePrimaryKey() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 varchar, k2 varchar, constraint pk primary key(k1,k2))"); - PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t values(?,?)"); - stmt.setString(1, ""); - stmt.setString(2, ""); - try { - stmt.execute(); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("Primary key may not be null")); - } - } - - - @Test - public void testGroupByLimitOptimization() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 varchar, k2 varchar, v varchar, constraint pk primary key(k1,k2))"); - ResultSet rs; - String[] queries = { - "SELECT DISTINCT v FROM T LIMIT 3", - "SELECT v FROM T GROUP BY v,k1 LIMIT 3", - "SELECT count(*) FROM T GROUP BY k1 LIMIT 3", - "SELECT max(v) FROM T GROUP BY k1,k2 LIMIT 3", - "SELECT k1,k2 FROM T GROUP BY k1,k2 LIMIT 3", - "SELECT max(v) FROM T GROUP BY k2,k1 HAVING k1 > 'a' LIMIT 3", // Having optimized out, order of GROUP BY key not important - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - rs = conn.createStatement().executeQuery("EXPLAIN " + query); - assertTrue("Expected to find GROUP BY limit optimization in: " + query, QueryUtil.getExplainPlan(rs).contains(" LIMIT 3 GROUPS")); - } - } - - @Test - public void testNoGroupByLimitOptimization() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 varchar, k2 varchar, v varchar, constraint pk primary key(k1,k2))"); - ResultSet rs; - String[] queries = { -// "SELECT DISTINCT v FROM T ORDER BY v LIMIT 3", -// "SELECT v FROM T GROUP BY v,k1 ORDER BY v LIMIT 3", - "SELECT DISTINCT count(*) FROM T GROUP BY k1 LIMIT 3", - "SELECT count(1) FROM T GROUP BY v,k1 LIMIT 3", - "SELECT max(v) FROM T GROUP BY k1,k2 HAVING count(k1) > 1 LIMIT 3", - "SELECT count(v) FROM T GROUP BY to_date(k2),k1 LIMIT 3", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - rs = conn.createStatement().executeQuery("EXPLAIN " + query); - String explainPlan = QueryUtil.getExplainPlan(rs); - assertFalse("Did not expected to find GROUP BY limit optimization in: " + query, explainPlan.contains(" LIMIT 3 GROUPS")); - } - } - - @Test - public void testLocalIndexCreationWithDefaultFamilyOption() throws Exception { - Connection conn1 = DriverManager.getConnection(getUrl()); - try{ - Statement statement = conn1.createStatement(); - statement.execute("create table example (id integer not null,fn varchar," - + "\"ln\" varchar constraint pk primary key(id)) DEFAULT_COLUMN_FAMILY='F'"); - try { - statement.execute("create local index my_idx on example (fn) DEFAULT_COLUMN_FAMILY='F'"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE.getErrorCode(),e.getErrorCode()); - } - statement.execute("create local index my_idx on example (fn)"); - } finally { - conn1.close(); - } - } - - @Test - public void testMultiCFProjection() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - String ddl = "CREATE TABLE multiCF (k integer primary key, a.a varchar, b.b varchar)"; - conn.createStatement().execute(ddl); - String query = "SELECT COUNT(*) FROM multiCF"; - QueryPlan plan = getQueryPlan(query,Collections.emptyList()); - plan.iterator(); - Scan scan = plan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof FirstKeyOnlyFilter); - assertEquals(1, scan.getFamilyMap().size()); - } - - @Test - public void testNonDeterministicExpressionIndex() throws Exception { - String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)"; - Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = null; - try { - stmt = conn.createStatement(); - stmt.execute(ddl); - stmt.execute("CREATE INDEX i ON t (RAND())"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX.getErrorCode(), e.getErrorCode()); - } - finally { - stmt.close(); - } - } - - @Test - public void testStatelessExpressionIndex() throws Exception { - String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)"; - Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = null; - try { - stmt = conn.createStatement(); - stmt.execute(ddl); - stmt.execute("CREATE INDEX i ON t (2)"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX.getErrorCode(), e.getErrorCode()); - } - finally { - stmt.close(); - } - } - - @Test - public void testAggregateExpressionIndex() throws Exception { - String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)"; - Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = null; - try { - stmt = conn.createStatement(); - stmt.execute(ddl); - stmt.execute("CREATE INDEX i ON t (SUM(k1))"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX.getErrorCode(), e.getErrorCode()); - } - finally { - stmt.close(); - } - } - - @Test - public void testDescVarbinaryNotSupported() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t (k VARBINARY PRIMARY KEY DESC)"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode()); - } - try { - conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR NOT NULL, k2 VARBINARY, CONSTRAINT pk PRIMARY KEY (k1,k2 DESC))"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode()); - } - try { - conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR PRIMARY KEY)"); - conn.createStatement().execute("ALTER TABLE t ADD k2 VARBINARY PRIMARY KEY DESC"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode()); - } + } + } + } + + @Test + public void testGroupByDescColumnBug3451() throws Exception { + + try (Connection conn = DriverManager.getConnection(getUrl())) { + + conn.createStatement() + .execute("CREATE TABLE IF NOT EXISTS GROUPBYTEST (\n" + + " ORGANIZATION_ID CHAR(15) NOT NULL,\n" + + " CONTAINER_ID CHAR(15) NOT NULL,\n" + + " ENTITY_ID CHAR(15) NOT NULL,\n" + " SCORE DOUBLE,\n" + + " CONSTRAINT TEST_PK PRIMARY KEY (\n" + " ORGANIZATION_ID,\n" + + " CONTAINER_ID,\n" + " ENTITY_ID\n" + " )\n" + + " )"); + conn.createStatement().execute( + "CREATE INDEX SCORE_IDX ON GROUPBYTEST (ORGANIZATION_ID,CONTAINER_ID, SCORE DESC, ENTITY_ID DESC)"); + QueryPlan plan = getQueryPlan(conn, + "SELECT DISTINCT entity_id, score\n" + " FROM GROUPBYTEST\n" + + " WHERE organization_id = 'org2'\n" + + " AND container_id IN ( 'container1','container2','container3' )\n" + + " ORDER BY score DESC\n" + " LIMIT 2"); + assertFalse(plan.getOrderBy().getOrderByExpressions().isEmpty()); + plan = getQueryPlan(conn, + "SELECT DISTINCT entity_id, score\n" + " FROM GROUPBYTEST\n" + + " WHERE entity_id = 'entity1'\n" + + " AND container_id IN ( 'container1','container2','container3' )\n" + + " ORDER BY score DESC\n" + " LIMIT 2"); + assertTrue(plan.getOrderBy().getOrderByExpressions().isEmpty()); + } + } + + @Test + public void testGroupByDescColumnBug3452() throws Exception { + + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + + String sql = + "CREATE TABLE GROUPBYDESC3452 ( " + "ORGANIZATION_ID VARCHAR," + "CONTAINER_ID VARCHAR," + + "ENTITY_ID VARCHAR NOT NULL," + "CONSTRAINT TEST_PK PRIMARY KEY ( " + + "ORGANIZATION_ID DESC," + "CONTAINER_ID DESC," + "ENTITY_ID" + "))"; + conn.createStatement().execute(sql); + + // -----ORGANIZATION_ID + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS FIRST"; + QueryPlan queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + + // ----CONTAINER_ID + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + // -----ORGANIZATION_ID ASC CONTAINER_ID ASC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID NULLS FIRST,CONTAINER_ID NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID NULLS FIRST,CONTAINER_ID NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID NULLS LAST,CONTAINER_ID NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID NULLS LAST,CONTAINER_ID NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); + + // -----ORGANIZATION_ID ASC CONTAINER_ID DESC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS FIRST,CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS FIRST,CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS LAST,CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS LAST,CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + // -----ORGANIZATION_ID DESC CONTAINER_ID ASC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID NULLS LAST")); + + // -----ORGANIZATION_ID DESC CONTAINER_ID DESC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + // -----CONTAINER_ID ASC ORGANIZATION_ID ASC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID NULLS FIRST,ORGANIZATION_ID NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID NULLS FIRST,ORGANIZATION_ID NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID NULLS LAST,ORGANIZATION_ID NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID NULLS LAST,ORGANIZATION_ID NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + + // -----CONTAINER_ID ASC ORGANIZATION_ID DESC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS FIRST,ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS FIRST,ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS LAST,ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS LAST,ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + + // -----CONTAINER_ID DESC ORGANIZATION_ID ASC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + + // -----CONTAINER_ID DESC ORGANIZATION_ID DESC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID,CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + + } finally { + if (conn != null) { conn.close(); - } - - @Test - public void testDivideByZeroExpressionIndex() throws Exception { - String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)"; - Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = null; - try { - stmt = conn.createStatement(); - stmt.execute(ddl); - stmt.execute("CREATE INDEX i ON t (k1/0)"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DIVIDE_BY_ZERO.getErrorCode(), e.getErrorCode()); - } - finally { - stmt.close(); - } - } - - @Test - public void testRegex() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = conn.createStatement(); - stmt.execute("CREATE TABLE t (k1 INTEGER PRIMARY KEY, v VARCHAR)"); - - //character classes - stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[abc]') = 'val'"); - stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[^abc]') = 'val'"); - stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-zA-Z]') = 'val'"); - stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-d[m-p]]') = 'val'"); - stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-z&&[def]]') = 'val'"); - stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-z&&[^bc]]') = 'val'"); - stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '[a-z&&[^m-p]]') = 'val'"); - - // predefined character classes - stmt.executeQuery("select * from T where REGEXP_SUBSTR(v, '.\\\\d\\\\D\\\\s\\\\S\\\\w\\\\W') = 'val'"); - } - - private static void assertLiteralEquals(Object o, RowProjector p, int i) { - assertTrue(i < p.getColumnCount()); - Expression e = p.getColumnProjector(i).getExpression(); - assertTrue(e instanceof LiteralExpression); - LiteralExpression l = (LiteralExpression)e; - Object lo = l.getValue(); - assertEquals(o, lo); - } - - @Test - public void testIntAndLongMinValue() throws Exception { - BigDecimal oneLessThanMinLong = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); - BigDecimal oneMoreThanMaxLong = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); - String query = "SELECT " + - Integer.MIN_VALUE + "," + Long.MIN_VALUE + "," + - (Integer.MIN_VALUE+1) + "," + (Long.MIN_VALUE+1) + "," + - ((long)Integer.MIN_VALUE - 1) + "," + oneLessThanMinLong + "," + - Integer.MAX_VALUE + "," + Long.MAX_VALUE + "," + - (Integer.MAX_VALUE - 1) + "," + (Long.MAX_VALUE - 1) + "," + - ((long)Integer.MAX_VALUE + 1) + "," + oneMoreThanMaxLong + - " FROM " + "\""+ SYSTEM_CATALOG_SCHEMA + "\".\"" + SYSTEM_STATS_TABLE + "\"" + " LIMIT 1"; - List binds = Collections.emptyList(); - QueryPlan plan = getQueryPlan(query, binds); - RowProjector p = plan.getProjector(); - // Negative integers end up as longs once the * -1 occurs - assertLiteralEquals((long)Integer.MIN_VALUE, p, 0); - // Min long still stays as long - assertLiteralEquals(Long.MIN_VALUE, p, 1); - assertLiteralEquals((long)Integer.MIN_VALUE + 1, p, 2); - assertLiteralEquals(Long.MIN_VALUE + 1, p, 3); - assertLiteralEquals((long)Integer.MIN_VALUE - 1, p, 4); - // Can't fit into long, so becomes BigDecimal - assertLiteralEquals(oneLessThanMinLong, p, 5); - // Positive integers stay as ints - assertLiteralEquals(Integer.MAX_VALUE, p, 6); - assertLiteralEquals(Long.MAX_VALUE, p, 7); - assertLiteralEquals(Integer.MAX_VALUE - 1, p, 8); - assertLiteralEquals(Long.MAX_VALUE - 1, p, 9); - assertLiteralEquals((long)Integer.MAX_VALUE + 1, p, 10); - assertLiteralEquals(oneMoreThanMaxLong, p, 11); - } - - @Test - public void testMathFunctionOrderByOrderPreservingFwd() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 INTEGER not null, k2 double not null, k3 BIGINT not null, v varchar, constraint pk primary key(k1,k2,k3))"); - /* - * "SELECT * FROM T ORDER BY k1, k2", - * "SELECT * FROM T ORDER BY k1, SIGN(k2)", - * "SELECT * FROM T ORDER BY SIGN(k1), k2", - */ - List queryList = new ArrayList(); - queryList.add("SELECT * FROM T ORDER BY k1, k2"); - for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) { - queryList.add(String.format("SELECT * FROM T ORDER BY k1, %s(k2)", sub)); - queryList.add(String.format("SELECT * FROM T ORDER BY %s(k1), k2", sub)); - } - String[] queries = queryList.toArray(new String[queryList.size()]); - for (int i = 0; i < queries.length; i++) { - String query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } - // Negative test - queryList.clear(); - for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) { - queryList.add(String.format("SELECT * FROM T WHERE %s(k2)=2.0", sub)); - } - for (String query : queryList.toArray(new String[queryList.size()])) { - Scan scan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query).getContext().getScan(); - assertNotNull(scan.getFilter()); - assertTrue(scan.getStartRow().length == 0); - assertTrue(scan.getStopRow().length == 0); - } - } - - @Test - public void testMathFunctionOrderByOrderPreservingRev() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 INTEGER not null, k2 double not null, k3 BIGINT not null, v varchar, constraint pk primary key(k1,k2 DESC,k3))"); - List queryList = new ArrayList(); - // "SELECT * FROM T ORDER BY k1 DESC, SIGN(k2) DESC, k3 DESC" - queryList.add("SELECT * FROM T ORDER BY k1 DESC"); - queryList.add("SELECT * FROM T ORDER BY k1 DESC, k2"); - queryList.add("SELECT * FROM T ORDER BY k1 DESC, k2, k3 DESC"); - for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) { - queryList.add(String.format("SELECT * FROM T ORDER BY k1 DESC, %s(k2) DESC, k3 DESC", sub)); - } - String[] queries = queryList.toArray(new String[queryList.size()]); - for (int i = 0; i < queries.length; i++) { - String query = queries[i]; - QueryPlan plan = - conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertTrue(query, plan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); - } - // Negative test - queryList.clear(); - for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) { - queryList.add(String.format("SELECT * FROM T WHERE %s(k2)=2.0", sub)); - } - for (String query : queryList.toArray(new String[queryList.size()])) { - Scan scan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query).getContext().getScan(); - assertNotNull(scan.getFilter()); - assertTrue(scan.getStartRow().length == 0); - assertTrue(scan.getStopRow().length == 0); - } - } - - @Test - public void testOrderByOrderPreservingFwd() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 varchar, v varchar, constraint pk primary key(k1,k2,k3))"); - String[] queries = { - "SELECT * FROM T WHERE k2=CURRENT_DATE() ORDER BY k1, k3", - "SELECT * FROM T ORDER BY (k1,k2), k3", - "SELECT * FROM T ORDER BY k1,k2,k3 NULLS FIRST", - "SELECT * FROM T ORDER BY k1,k2,k3", - "SELECT * FROM T ORDER BY k1,k2", - "SELECT * FROM T ORDER BY k1", - "SELECT * FROM T ORDER BY CAST(k1 AS TIMESTAMP)", - "SELECT * FROM T ORDER BY (k1,k2,k3)", - "SELECT * FROM T ORDER BY TRUNC(k1, 'DAY'), CEIL(k2, 'HOUR')", - "SELECT * FROM T ORDER BY INVERT(k1) DESC", - "SELECT * FROM T WHERE k1=CURRENT_DATE() ORDER BY k2", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertTrue("Expected order by to be compiled out: " + query, plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } - } - - @Test - public void testOrderByOrderPreservingRev() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 varchar, v varchar, constraint pk primary key(k1,k2 DESC,k3 DESC))"); - String[] queries = { - "SELECT * FROM T ORDER BY INVERT(k1),k2,k3 nulls last", - "SELECT * FROM T ORDER BY INVERT(k1),k2", - "SELECT * FROM T ORDER BY INVERT(k1)", - "SELECT * FROM T ORDER BY TRUNC(k1, 'DAY') DESC, CEIL(k2, 'HOUR') DESC", - "SELECT * FROM T ORDER BY k1 DESC", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertTrue("Expected order by to be compiled out: " + query, plan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); - } - } - - @Test - public void testNotOrderByOrderPreserving() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 varchar, k3 varchar, v varchar, constraint pk primary key(k1,k2,k3 desc))"); - String[] queries = { - "SELECT * FROM T ORDER BY k1,k2 NULLS LAST", - "SELECT * FROM T ORDER BY k1,k2, k3 NULLS LAST", - "SELECT * FROM T ORDER BY k1,k3", - "SELECT * FROM T ORDER BY SUBSTR(TO_CHAR(k1),1,4)", - "SELECT * FROM T ORDER BY k2", - "SELECT * FROM T ORDER BY INVERT(k1),k3", - "SELECT * FROM T ORDER BY CASE WHEN k1 = CURRENT_DATE() THEN 0 ELSE 1 END", - "SELECT * FROM T ORDER BY TO_CHAR(k1)", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertFalse("Expected order by not to be compiled out: " + query, plan.getOrderBy().getOrderByExpressions().isEmpty()); - } - } - - @Test - public void testNotOrderByOrderPreservingForAggregation() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE IF NOT EXISTS VA_TEST(ID VARCHAR NOT NULL PRIMARY KEY, VAL1 VARCHAR, VAL2 INTEGER)"); - String[] queries = { - "select distinct ID, VAL1, VAL2 from VA_TEST where \"ID\" in ('ABC','ABD','ABE','ABF','ABG','ABH','AAA', 'AAB', 'AAC','AAD','AAE','AAF') order by VAL1 ASC" - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertFalse("Expected order by not to be compiled out: " + query, plan.getOrderBy().getOrderByExpressions().isEmpty()); - } - } - - @Test - public void testGroupByOrderPreserving() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))"); - String[] queries = { - "SELECT 1 FROM T GROUP BY k3, (k1,k2)", - "SELECT 1 FROM T GROUP BY k2,k1,k3", - "SELECT 1 FROM T GROUP BY k1,k2", - "SELECT 1 FROM T GROUP BY k1", - "SELECT 1 FROM T GROUP BY CAST(k1 AS TIMESTAMP)", - "SELECT 1 FROM T GROUP BY (k1,k2,k3)", - "SELECT 1 FROM T GROUP BY TRUNC(k2, 'DAY'), CEIL(k1, 'HOUR')", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertTrue("Expected group by to be order preserving: " + query, plan.getGroupBy().isOrderPreserving()); - } - } - - @Test - public void testGroupByOrderPreserving2() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE T (ORGANIZATION_ID char(15) not null, \n" + - "JOURNEY_ID char(15) not null, \n" + - "DATASOURCE SMALLINT not null, \n" + - "MATCH_STATUS TINYINT not null, \n" + - "EXTERNAL_DATASOURCE_KEY varchar(30), \n" + - "ENTITY_ID char(15) not null, \n" + - "CONSTRAINT PK PRIMARY KEY (\n" + - " ORGANIZATION_ID, \n" + - " JOURNEY_ID, \n" + - " DATASOURCE, \n" + - " MATCH_STATUS,\n" + - " EXTERNAL_DATASOURCE_KEY,\n" + - " ENTITY_ID))"); - String[] queries = { - "SELECT COUNT(1) As DUP_COUNT\n" + - " FROM T \n" + - " WHERE JOURNEY_ID='07ixx000000004J' AND \n" + - " DATASOURCE=0 AND MATCH_STATUS <= 1 and \n" + - " ORGANIZATION_ID='07ixx000000004J' \n" + - " GROUP BY MATCH_STATUS, EXTERNAL_DATASOURCE_KEY \n" + - " HAVING COUNT(1) > 1", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertTrue("Expected group by to be order preserving: " + query, plan.getGroupBy().isOrderPreserving()); - } - } - - @Test - public void testNotGroupByOrderPreserving() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))"); - String[] queries = { - "SELECT 1 FROM T GROUP BY k1,k3", - "SELECT 1 FROM T GROUP BY k2", - "SELECT 1 FROM T GROUP BY INVERT(k1),k3", - "SELECT 1 FROM T GROUP BY CASE WHEN k1 = CURRENT_DATE() THEN 0 ELSE 1 END", - "SELECT 1 FROM T GROUP BY TO_CHAR(k1)", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertFalse("Expected group by not to be order preserving: " + query, plan.getGroupBy().isOrderPreserving()); - } - } - - @Test - public void testUseRoundRobinIterator() throws Exception { - Properties props = new Properties(); - props.setProperty(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); - Connection conn = DriverManager.getConnection(getUrl(), props); - conn.createStatement().execute("CREATE TABLE t (k1 char(2) not null, k2 varchar not null, k3 integer not null, v varchar, constraint pk primary key(k1,k2,k3))"); - String[] queries = { - "SELECT 1 FROM T ", - "SELECT 1 FROM T WHERE V = 'c'", - "SELECT 1 FROM T WHERE (k1,k2, k3) > ('a', 'ab', 1)", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertTrue("Expected plan to use round robin iterator " + query, plan.useRoundRobinIterator()); - } - } - - @Test - public void testForcingRowKeyOrderNotUseRoundRobinIterator() throws Exception { - Properties props = new Properties(); - props.setProperty(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(true)); - Connection conn = DriverManager.getConnection(getUrl(), props); - testForceRowKeyOrder(conn, false); - testForceRowKeyOrder(conn, true); - } - - private void testForceRowKeyOrder(Connection conn, boolean isSalted) throws SQLException { - String tableName = "tablename" + (isSalted ? "_salt" : ""); - conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 char(2) not null, k2 varchar not null, k3 integer not null, v varchar, constraint pk primary key(k1,k2,k3))"); - String[] queries = { - "SELECT 1 FROM " + tableName , - "SELECT 1 FROM " + tableName + " WHERE V = 'c'", - "SELECT 1 FROM " + tableName + " WHERE (k1, k2, k3) > ('a', 'ab', 1)", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertFalse("Expected plan to not use round robin iterator " + query, plan.useRoundRobinIterator()); - } - } - - @Test - public void testPlanForOrderByOrGroupByNotUseRoundRobin() throws Exception { - Properties props = new Properties(); - props.setProperty(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); - Connection conn = DriverManager.getConnection(getUrl(), props); - testOrderByOrGroupByDoesntUseRoundRobin(conn, true); - testOrderByOrGroupByDoesntUseRoundRobin(conn, false); - } - - private void testOrderByOrGroupByDoesntUseRoundRobin(Connection conn, boolean salted) throws SQLException { - String tableName = "orderbygroupbytable" + (salted ? "_salt" : ""); - conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 char(2) not null, k2 varchar not null, k3 integer not null, v varchar, constraint pk primary key(k1,k2,k3))"); - String[] queries = { - "SELECT 1 FROM " + tableName + " ORDER BY K1", - "SELECT 1 FROM " + tableName + " WHERE V = 'c' ORDER BY K1, K2", - "SELECT 1 FROM " + tableName + " WHERE V = 'c' ORDER BY K1, K2, K3", - "SELECT 1 FROM " + tableName + " WHERE V = 'c' ORDER BY K3", - "SELECT 1 FROM " + tableName + " WHERE (k1,k2, k3) > ('a', 'ab', 1) ORDER BY V", - "SELECT 1 FROM " + tableName + " GROUP BY V", - "SELECT 1 FROM " + tableName + " GROUP BY K1, V, K2 ORDER BY V", - }; - String query; - for (int i = 0; i < queries.length; i++) { - query = queries[i]; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - assertFalse("Expected plan to not use round robin iterator " + query, plan.useRoundRobinIterator()); - } - } - - @Test - public void testSelectColumnsInOneFamily() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - Statement statement = conn.createStatement(); - try { - // create table with specified column family. - String create = "CREATE TABLE t (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, f2.v3 varchar, v4 varchar)"; - statement.execute(create); - // select columns in one family. - String query = "SELECT f1.*, v4 FROM t"; - ResultSetMetaData rsMeta = statement.executeQuery(query).getMetaData(); - assertEquals("V1", rsMeta.getColumnName(1)); - assertEquals("V1", rsMeta.getColumnLabel(1)); - assertEquals("V2", rsMeta.getColumnName(2)); - assertEquals("V2", rsMeta.getColumnLabel(2)); - assertEquals("V4", rsMeta.getColumnName(3)); - assertEquals("V4", rsMeta.getColumnLabel(3)); - } finally { - statement.execute("DROP TABLE IF EXISTS t"); - conn.close(); - } - } - - @Test - public void testSelectColumnsInOneFamilyWithSchema() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - Statement statement = conn.createStatement(); - try { - // create table with specified column family. - String create = "CREATE TABLE s.t (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, f2.v3 varchar, v4 varchar)"; - statement.execute(create); - // select columns in one family. - String query = "SELECT f1.*, v4 FROM s.t"; - ResultSetMetaData rsMeta = statement.executeQuery(query).getMetaData(); - assertEquals("V1", rsMeta.getColumnName(1)); - assertEquals("V1", rsMeta.getColumnLabel(1)); - assertEquals("V2", rsMeta.getColumnName(2)); - assertEquals("V2", rsMeta.getColumnLabel(2)); - assertEquals("V4", rsMeta.getColumnLabel(3)); - assertEquals("V4", rsMeta.getColumnName(3)); - } finally { - statement.execute("DROP TABLE IF EXISTS s.t"); - conn.close(); - } - } - - @Test - public void testNoFromClauseSelect() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - ResultSet rs = conn.createStatement().executeQuery("SELECT 2 * 3 * 4, 5 + 1"); - assertTrue(rs.next()); - assertEquals(24, rs.getInt(1)); - assertEquals(6, rs.getInt(2)); - assertFalse(rs.next()); - - String query = - "SELECT 'a' AS col\n" + - "UNION ALL\n" + - "SELECT 'b' AS col\n" + - "UNION ALL\n" + - "SELECT 'c' AS col"; - rs = conn.createStatement().executeQuery(query); - assertTrue(rs.next()); - assertEquals("a", rs.getString(1)); - assertTrue(rs.next()); - assertEquals("b", rs.getString(1)); - assertTrue(rs.next()); - assertEquals("c", rs.getString(1)); - assertFalse(rs.next()); - - rs = conn.createStatement().executeQuery("SELECT * FROM (" + query + ")"); - assertTrue(rs.next()); - assertEquals("a", rs.getString(1)); - assertTrue(rs.next()); - assertEquals("b", rs.getString(1)); - assertTrue(rs.next()); - assertEquals("c", rs.getString(1)); - assertFalse(rs.next()); - } - - - @Test - public void testFailNoFromClauseSelect() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - try { - conn.createStatement().executeQuery("SELECT foo, bar"); - fail("Should have got ColumnNotFoundException"); - } catch (ColumnNotFoundException e) { - } - - try { - conn.createStatement().executeQuery("SELECT *"); - fail("Should have got SQLException"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT.getErrorCode(), e.getErrorCode()); - } - - try { - conn.createStatement().executeQuery("SELECT A.*"); - fail("Should have got SQLException"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.NO_TABLE_SPECIFIED_FOR_WILDCARD_SELECT.getErrorCode(), e.getErrorCode()); - } - } finally { - conn.close(); - } - } - - @Test - public void testServerArrayElementProjection1() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t(a INTEGER PRIMARY KEY, arr INTEGER ARRAY)"); - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT arr[1] from t"); - assertTrue(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); - } finally { - conn.createStatement().execute("DROP TABLE IF EXISTS t"); - conn.close(); - } - } - - @Test - public void testServerArrayElementProjection2() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t(a INTEGER PRIMARY KEY, arr INTEGER ARRAY)"); - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT arr, arr[1] from t"); - assertFalse(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); - } finally { - conn.createStatement().execute("DROP TABLE IF EXISTS t"); - conn.close(); - } - } - - @Test - public void testServerArrayElementProjection3() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t(a INTEGER PRIMARY KEY, arr INTEGER ARRAY, arr2 VARCHAR ARRAY)"); - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT arr, arr[1], arr2[1] from t"); - assertTrue(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); - } finally { - conn.createStatement().execute("DROP TABLE IF EXISTS t"); - conn.close(); - } - } - - @Test - public void testServerArrayElementProjection4() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT arr1, arr1[1], ARRAY_APPEND(ARRAY_APPEND(arr1, arr2[2]), arr2[1]), p from t"); - assertTrue(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); - } finally { - conn.createStatement().execute("DROP TABLE IF EXISTS t"); - conn.close(); - } - } - - @Test - public void testArrayAppendSingleArg() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); - conn.createStatement().executeQuery("SELECT ARRAY_APPEND(arr2) from t"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.FUNCTION_UNDEFINED.getErrorCode(),e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testArrayPrependSingleArg() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); - conn.createStatement().executeQuery("SELECT ARRAY_PREPEND(arr2) from t"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.FUNCTION_UNDEFINED.getErrorCode(),e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testArrayConcatSingleArg() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); - conn.createStatement().executeQuery("SELECT ARRAY_CAT(arr2) from t"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.FUNCTION_UNDEFINED.getErrorCode(),e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testServerArrayElementProjection5() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t (p INTEGER PRIMARY KEY, arr1 INTEGER ARRAY, arr2 INTEGER ARRAY)"); - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT arr1, arr1[1], ARRAY_ELEM(ARRAY_APPEND(arr1, arr2[1]), 1), p, arr2[2] from t"); - assertTrue(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); - } finally { - conn.createStatement().execute("DROP TABLE IF EXISTS t"); - conn.close(); - } - } - - @Test - public void testServerArrayElementProjectionWithArrayPrimaryKey() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t(arr INTEGER ARRAY PRIMARY KEY)"); - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT arr[1] from t"); - assertFalse(QueryUtil.getExplainPlan(rs).contains(" SERVER ARRAY ELEMENT PROJECTION")); - } finally { - conn.createStatement().execute("DROP TABLE IF EXISTS t"); - conn.close(); - } - } - - @Test - public void testAddingRowTimestampColumn() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - // Column of type VARCHAR cannot be declared as ROW_TIMESTAMP - try { - conn.createStatement().execute("CREATE TABLE T1 (PK1 VARCHAR NOT NULL, PK2 VARCHAR NOT NULL, KV1 VARCHAR CONSTRAINT PK PRIMARY KEY(PK1, PK2 ROW_TIMESTAMP)) "); - fail("Varchar column cannot be added as row_timestamp"); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE.getErrorCode(), e.getErrorCode()); - } - // Column of type INTEGER cannot be declared as ROW_TIMESTAMP - try { - conn.createStatement().execute("CREATE TABLE T1 (PK1 VARCHAR NOT NULL, PK2 INTEGER NOT NULL, KV1 VARCHAR CONSTRAINT PK PRIMARY KEY(PK1, PK2 ROW_TIMESTAMP)) "); - fail("Integer column cannot be added as row_timestamp"); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE.getErrorCode(), e.getErrorCode()); - } - // Column of type DOUBLE cannot be declared as ROW_TIMESTAMP - try { - conn.createStatement().execute("CREATE TABLE T1 (PK1 VARCHAR NOT NULL, PK2 DOUBLE NOT NULL, KV1 VARCHAR CONSTRAINT PK PRIMARY KEY(PK1, PK2 ROW_TIMESTAMP)) "); - fail("Double column cannot be added as row_timestamp"); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE.getErrorCode(), e.getErrorCode()); - } - // Invalid - two columns declared as row_timestamp in pk constraint - try { - conn.createStatement().execute("CREATE TABLE T2 (PK1 DATE NOT NULL, PK2 DATE NOT NULL, KV1 VARCHAR CONSTRAINT PK PRIMARY KEY(PK1 ROW_TIMESTAMP , PK2 ROW_TIMESTAMP)) "); - fail("Creating table with two row_timestamp columns should fail"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.ROWTIMESTAMP_ONE_PK_COL_ONLY.getErrorCode(), e.getErrorCode()); - } - - // Invalid because only (unsigned)date, time, long, (unsigned)timestamp are valid data types for column to be declared as row_timestamp - try { - conn.createStatement().execute("CREATE TABLE T5 (PK1 VARCHAR PRIMARY KEY ROW_TIMESTAMP, PK2 VARCHAR, KV1 VARCHAR)"); - fail("Creating table with a key value column as row_timestamp should fail"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testGroupByVarbinaryOrArray() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE T1 (PK VARCHAR PRIMARY KEY, c1 VARCHAR, c2 VARBINARY, C3 VARCHAR ARRAY, c4 VARBINARY, C5 VARCHAR ARRAY, C6 BINARY(10)) "); - try { - conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c2,c3"); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), e.getErrorCode()); - } - try { - conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c3,c2"); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), e.getErrorCode()); - } - try { - conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c2,c4"); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), e.getErrorCode()); - } - try { - conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c3,c5"); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), e.getErrorCode()); - } - try { - conn.createStatement().executeQuery("SELECT c1 FROM t1 GROUP BY c1,c6,c5"); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.UNSUPPORTED_GROUP_BY_EXPRESSIONS.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testDMLOfNonIndexWithBuildIndexAt() throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props);) { - conn.createStatement().execute( - "CREATE TABLE t (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR)"); - } - props.put(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, Long.toString(EnvironmentEdgeManager.currentTimeMillis()+1)); - try (Connection conn = DriverManager.getConnection(getUrl(), props);) { - try { - conn.createStatement().execute("UPSERT INTO T (k,v1) SELECT k,v1 FROM T"); - fail(); - } catch (SQLException e) { - assertEquals("Unexpected Exception", - SQLExceptionCode.ONLY_INDEX_UPDATABLE_AT_SCN - .getErrorCode(), e.getErrorCode()); - } - } - } - - @Test - public void testNegativeGuidePostWidth() throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props);) { - try { - conn.createStatement().execute( - "CREATE TABLE t (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR) GUIDE_POSTS_WIDTH = -1"); - fail(); - } catch (SQLException e) { - assertEquals("Unexpected Exception", - SQLExceptionCode.PARSER_ERROR - .getErrorCode(), e.getErrorCode()); - } - } - } - - private static void assertFamilies(Scan s, String... families) { - assertEquals(families.length, s.getFamilyMap().size()); - for (String fam : families) { - byte[] cf = Bytes.toBytes(fam); - assertTrue("Expected to contain " + fam, s.getFamilyMap().containsKey(cf)); - } - } - - @Test - public void testProjection() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t(k INTEGER PRIMARY KEY, a.v1 VARCHAR, b.v2 VARCHAR, c.v3 VARCHAR)"); - assertFamilies(projectQuery("SELECT k FROM t"), "A"); - assertFamilies(projectQuery("SELECT k FROM t WHERE k = 5"), "A"); - assertFamilies(projectQuery("SELECT v2 FROM t WHERE k = 5"), "A", "B"); - assertFamilies(projectQuery("SELECT v2 FROM t WHERE v2 = 'a'"), "B"); - assertFamilies(projectQuery("SELECT v3 FROM t WHERE v2 = 'a'"), "B", "C"); - assertFamilies(projectQuery("SELECT v3 FROM t WHERE v2 = 'a' AND v3 is null"), "A", "B", "C"); - } finally { - conn.close(); - } - } - - private static boolean hasColumnProjectionFilter(Scan scan) { - Iterator iterator = ScanUtil.getFilterIterator(scan); - while (iterator.hasNext()) { - Filter filter = iterator.next(); - if (filter instanceof EncodedQualifiersColumnProjectionFilter) { - return true; - } - } - return false; - } - - @Test - public void testColumnProjectionOptimized() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t(k INTEGER PRIMARY KEY, a.v1 VARCHAR, a.v1b VARCHAR, b.v2 VARCHAR, c.v3 VARCHAR)"); - assertTrue(hasColumnProjectionFilter(projectQuery("SELECT k, v1 FROM t WHERE v2 = 'foo'"))); - assertFalse(hasColumnProjectionFilter(projectQuery("SELECT k, v1 FROM t WHERE v1 = 'foo'"))); - assertFalse(hasColumnProjectionFilter(projectQuery("SELECT v1,v2 FROM t WHERE v1 = 'foo'"))); - assertTrue(hasColumnProjectionFilter(projectQuery("SELECT v1,v2 FROM t WHERE v1 = 'foo' and v2 = 'bar' and v3 = 'bas'"))); - assertFalse(hasColumnProjectionFilter(projectQuery("SELECT a.* FROM t WHERE v1 = 'foo' and v1b = 'bar'"))); - } finally { - conn.close(); - } - } - @Test - public void testOrderByWithNoProjection() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("create table x (id integer primary key, A.i1 integer," + - " B.i2 integer)"); - Scan scan = projectQuery("select A.i1 from X group by i1 order by avg(B.i2) " + - "desc"); - ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute - (BaseScannerRegionObserverConstants.AGGREGATORS), null, null); - assertEquals(2,aggregators.getAggregatorCount()); - } finally { - conn.close(); - } - } - - @Test - public void testColumnProjectionUnionAll() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t1(k INTEGER PRIMARY KEY,"+ - " col1 CHAR(8), col2 VARCHAR(10), col3 decimal(10,2))"); - conn.createStatement().execute("CREATE TABLE t2(k TINYINT PRIMARY KEY," + - " col1 CHAR(20), col2 CHAR(30), col3 double)"); - QueryPlan plan = getQueryPlan("SELECT * from t1 union all select * from t2", - Collections.emptyList()); - RowProjector rowProj = plan.getProjector(); - assertTrue(rowProj.getColumnProjector(0).getExpression().getDataType() - instanceof PInteger); - assertTrue(rowProj.getColumnProjector(1).getExpression().getDataType() - instanceof PChar); - assertTrue(rowProj.getColumnProjector(1).getExpression().getMaxLength() == 20); - assertTrue(rowProj.getColumnProjector(2).getExpression().getDataType() - instanceof PVarchar); - assertTrue(rowProj.getColumnProjector(2).getExpression().getMaxLength() == 30); - assertTrue(rowProj.getColumnProjector(3).getExpression().getDataType() - instanceof PDecimal); - assertTrue(rowProj.getColumnProjector(3).getExpression().getScale() == 2); - } finally { - conn.close(); - } - } - - @Test - public void testFuncIndexUsage() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t1(k INTEGER PRIMARY KEY,"+ - " col1 VARCHAR, col2 VARCHAR)"); - conn.createStatement().execute("CREATE TABLE t2(k INTEGER PRIMARY KEY," + - " col1 VARCHAR, col2 VARCHAR)"); - conn.createStatement().execute("CREATE TABLE t3(j INTEGER PRIMARY KEY," + - " col3 VARCHAR, col4 VARCHAR)"); - conn.createStatement().execute("CREATE INDEX idx ON t1 (col1 || col2)"); - String query = "SELECT a.k from t1 a where a.col1 || a.col2 = 'foobar'"; - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN "+query); - String explainPlan = QueryUtil.getExplainPlan(rs); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER IDX ['foobar']\n" + - " SERVER FILTER BY FIRST KEY ONLY",explainPlan); - query = "SELECT k,j from t3 b join t1 a ON k = j where a.col1 || a.col2 = 'foobar'"; - rs = conn.createStatement().executeQuery("EXPLAIN "+query); - explainPlan = QueryUtil.getExplainPlan(rs); - assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER T3\n" + - " SERVER FILTER BY FIRST KEY ONLY\n" + - " PARALLEL INNER-JOIN TABLE 0\n" + - " CLIENT PARALLEL 1-WAY RANGE SCAN OVER IDX ['foobar']\n" + - " SERVER FILTER BY FIRST KEY ONLY\n" + - " DYNAMIC SERVER FILTER BY B.J IN (\"A.:K\")",explainPlan); - query = "SELECT a.k,b.k from t2 b join t1 a ON a.k = b.k where a.col1 || a.col2 = 'foobar'"; - rs = conn.createStatement().executeQuery("EXPLAIN "+query); - explainPlan = QueryUtil.getExplainPlan(rs); - assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER T2\n" + - " SERVER FILTER BY FIRST KEY ONLY\n" + - " PARALLEL INNER-JOIN TABLE 0\n" + - " CLIENT PARALLEL 1-WAY RANGE SCAN OVER IDX ['foobar']\n" + - " SERVER FILTER BY FIRST KEY ONLY\n" + - " DYNAMIC SERVER FILTER BY B.K IN (\"A.:K\")",explainPlan); - } finally { - conn.close(); - } - } - - @Test - public void testSaltTableJoin() throws Exception{ - - PhoenixConnection conn = (PhoenixConnection)DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("drop table if exists SALT_TEST2900"); - - conn.createStatement().execute( - "create table SALT_TEST2900"+ - "("+ - "id UNSIGNED_INT not null primary key,"+ - "appId VARCHAR"+ - ")SALT_BUCKETS=2"); - - - - conn.createStatement().execute("drop table if exists RIGHT_TEST2900 "); - conn.createStatement().execute( - "create table RIGHT_TEST2900"+ - "("+ - "appId VARCHAR not null primary key,"+ - "createTime VARCHAR"+ - ")"); - - - String sql="select * from SALT_TEST2900 a inner join RIGHT_TEST2900 b on a.appId=b.appId where a.id>=3 and a.id<=5"; - HashJoinPlan plan = (HashJoinPlan)getQueryPlan(sql, Collections.emptyList()); - ScanRanges ranges=plan.getContext().getScanRanges(); - - List regionLocations= - conn.getQueryServices().getAllTableRegions(Bytes.toBytes("SALT_TEST2900"), - 60000); - for (HRegionLocation regionLocation : regionLocations) { - assertTrue(ranges.intersectRegion(regionLocation.getRegion().getStartKey(), - regionLocation.getRegion().getEndKey(), false)); - } - } finally { - conn.close(); - } - } - - @Test - public void testStatefulDefault() throws Exception { - String ddl = "CREATE TABLE table_with_default (" + - "pk INTEGER PRIMARY KEY, " + - "datecol DATE DEFAULT CURRENT_DATE())"; - - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute(ddl); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CANNOT_CREATE_DEFAULT.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testAlterTableStatefulDefault() throws Exception { - String ddl = "CREATE TABLE table_with_default (" + - "pk INTEGER PRIMARY KEY)"; - String ddl2 = "ALTER TABLE table_with_default " + - "ADD datecol DATE DEFAULT CURRENT_DATE()"; - - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(ddl); - try { - conn.createStatement().execute(ddl2); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CANNOT_CREATE_DEFAULT.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testDefaultTypeMismatch() throws Exception { - String ddl = "CREATE TABLE table_with_default (" + - "pk INTEGER PRIMARY KEY, " + - "v VARCHAR DEFAULT 1)"; - - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute(ddl); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testAlterTableDefaultTypeMismatch() throws Exception { - String ddl = "CREATE TABLE table_with_default (" + - "pk INTEGER PRIMARY KEY)"; - String ddl2 = "ALTER TABLE table_with_default " + - "ADD v CHAR(3) DEFAULT 1"; - - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(ddl); - try { - conn.createStatement().execute(ddl2); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testDefaultTypeMismatchInView() throws Exception { - String ddl1 = "CREATE TABLE table_with_default (" + - "pk INTEGER PRIMARY KEY, " + - "v VARCHAR DEFAULT 'foo')"; - String ddl2 = "CREATE VIEW my_view(v2 VARCHAR DEFAULT 1) AS SELECT * FROM table_with_default"; - - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(ddl1); - try { - conn.createStatement().execute(ddl2); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testDefaultRowTimestamp1() throws Exception { - String ddl = "CREATE TABLE IF NOT EXISTS table_with_defaults (" - + "pk1 INTEGER NOT NULL," - + "pk2 BIGINT NOT NULL DEFAULT 5," - + "CONSTRAINT NAME_PK PRIMARY KEY (pk1, pk2 ROW_TIMESTAMP))"; - - Connection conn = DriverManager.getConnection(getUrl()); - - try { - conn.createStatement().execute(ddl); - fail(); - } catch (SQLException e) { - assertEquals( - SQLExceptionCode.CANNOT_CREATE_DEFAULT_ROWTIMESTAMP.getErrorCode(), - e.getErrorCode()); - } - } - - @Test - public void testDefaultRowTimestamp2() throws Exception { - String ddl = "CREATE TABLE table_with_defaults (" - + "k BIGINT DEFAULT 5 PRIMARY KEY ROW_TIMESTAMP)"; - - Connection conn = DriverManager.getConnection(getUrl()); - - try { - conn.createStatement().execute(ddl); - fail(); - } catch (SQLException e) { - assertEquals( - SQLExceptionCode.CANNOT_CREATE_DEFAULT_ROWTIMESTAMP.getErrorCode(), - e.getErrorCode()); - } - } - - @Test - public void testDefaultSizeMismatch() throws Exception { - String ddl = "CREATE TABLE table_with_default (" + - "pk INTEGER PRIMARY KEY, " + - "v CHAR(3) DEFAULT 'foobar')"; - - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute(ddl); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testAlterTableDefaultSizeMismatch() throws Exception { - String ddl = "CREATE TABLE table_with_default (" + - "pk INTEGER PRIMARY KEY)"; - String ddl2 = "ALTER TABLE table_with_default " + - "ADD v CHAR(3) DEFAULT 'foobar'"; - - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(ddl); - try { - conn.createStatement().execute(ddl2); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testNullDefaultRemoved() throws Exception { - String ddl = "CREATE TABLE table_with_default (" + - "pk INTEGER PRIMARY KEY, " + - "v VARCHAR DEFAULT null)"; - - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(ddl); - PTable table = conn.unwrap(PhoenixConnection.class).getMetaDataCache() - .getTableRef(new PTableKey(null,"TABLE_WITH_DEFAULT")).getTable(); - assertNull(table.getColumnForColumnName("V").getExpressionStr()); - } - - @Test - public void testNullAlterTableDefaultRemoved() throws Exception { - String ddl = "CREATE TABLE table_with_default (" + - "pk INTEGER PRIMARY KEY)"; - String ddl2 = "ALTER TABLE table_with_default " + - "ADD v CHAR(3) DEFAULT null"; - - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(ddl); - conn.createStatement().execute(ddl2); - PTable table = conn.unwrap(PhoenixConnection.class).getMetaDataCache() - .getTableRef(new PTableKey(null,"TABLE_WITH_DEFAULT")).getTable(); - assertNull(table.getColumnForColumnName("V").getExpressionStr()); - } - - @Test - public void testIndexOnViewWithChildView() throws SQLException { - try (Connection conn = DriverManager.getConnection(getUrl())) { - conn.createStatement().execute("CREATE TABLE PLATFORM_ENTITY.GLOBAL_TABLE (\n" + - " ORGANIZATION_ID CHAR(15) NOT NULL,\n" + - " KEY_PREFIX CHAR(3) NOT NULL,\n" + - " CREATED_DATE DATE,\n" + - " CREATED_BY CHAR(15),\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " ORGANIZATION_ID,\n" + - " KEY_PREFIX\n" + - " )\n" + - ") VERSIONS=1, IMMUTABLE_ROWS=true, MULTI_TENANT=true"); - conn.createStatement().execute("CREATE VIEW PLATFORM_ENTITY.GLOBAL_VIEW (\n" + - " INT1 BIGINT NOT NULL,\n" + - " DOUBLE1 DECIMAL(12, 3),\n" + - " IS_BOOLEAN BOOLEAN,\n" + - " TEXT1 VARCHAR,\n" + - " CONSTRAINT PKVIEW PRIMARY KEY\n" + - " (\n" + - " INT1\n" + - " )\n" + - ")\n" + - "AS SELECT * FROM PLATFORM_ENTITY.GLOBAL_TABLE WHERE KEY_PREFIX = '123'"); - conn.createStatement().execute("CREATE INDEX GLOBAL_INDEX\n" + - "ON PLATFORM_ENTITY.GLOBAL_VIEW (TEXT1 DESC, INT1)\n" + - "INCLUDE (CREATED_BY, DOUBLE1, IS_BOOLEAN, CREATED_DATE)"); - String query = "SELECT DOUBLE1 FROM PLATFORM_ENTITY.GLOBAL_VIEW\n" - + "WHERE ORGANIZATION_ID = '00Dxx0000002Col' AND TEXT1='Test' AND INT1=1"; - QueryPlan plan = getOptimizedQueryPlan(query); - assertEquals("PLATFORM_ENTITY.GLOBAL_VIEW", plan.getContext().getCurrentTable().getTable().getName() - .getString()); - query = "SELECT DOUBLE1 FROM PLATFORM_ENTITY.GLOBAL_VIEW\n" - + "WHERE ORGANIZATION_ID = '00Dxx0000002Col' AND TEXT1='Test'"; - plan = getOptimizedQueryPlan(query); - assertEquals("PLATFORM_ENTITY.GLOBAL_INDEX", plan.getContext().getCurrentTable().getTable().getName().getString()); - } - } - - @Test - public void testNotNullKeyValueColumnSalted() throws Exception { - testNotNullKeyValueColumn(3); - } - @Test - public void testNotNullKeyValueColumnUnsalted() throws Exception { - testNotNullKeyValueColumn(0); - } - - private void testNotNullKeyValueColumn(int saltBuckets) throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t1 (k integer not null primary key, v bigint not null) IMMUTABLE_ROWS=true" + (saltBuckets == 0 ? "" : (",SALT_BUCKETS="+saltBuckets))); - conn.createStatement().execute("UPSERT INTO t1 VALUES(0)"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CONSTRAINT_VIOLATION.getErrorCode(), e.getErrorCode()); - } - try { - conn.createStatement().execute("CREATE TABLE t2 (k integer not null primary key, v1 bigint not null, v2 varchar, v3 tinyint not null) IMMUTABLE_ROWS=true" + (saltBuckets == 0 ? "" : (",SALT_BUCKETS="+saltBuckets))); - conn.createStatement().execute("UPSERT INTO t2(k, v3) VALUES(0,0)"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CONSTRAINT_VIOLATION.getErrorCode(), e.getErrorCode()); - } - try { - conn.createStatement().execute("CREATE TABLE t3 (k integer not null primary key, v1 bigint not null, v2 varchar, v3 tinyint not null) IMMUTABLE_ROWS=true" + (saltBuckets == 0 ? "" : (",SALT_BUCKETS="+saltBuckets))); - conn.createStatement().execute("UPSERT INTO t3(k, v1) VALUES(0,0)"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CONSTRAINT_VIOLATION.getErrorCode(), e.getErrorCode()); - } - conn.createStatement().execute("CREATE TABLE t4 (k integer not null primary key, v1 bigint not null) IMMUTABLE_ROWS=true" + (saltBuckets == 0 ? "" : (",SALT_BUCKETS="+saltBuckets))); - conn.createStatement().execute("UPSERT INTO t4 VALUES(0,0)"); - conn.createStatement().execute("CREATE TABLE t5 (k integer not null primary key, v1 bigint not null default 0) IMMUTABLE_ROWS=true" + (saltBuckets == 0 ? "" : (",SALT_BUCKETS="+saltBuckets))); - conn.createStatement().execute("UPSERT INTO t5 VALUES(0)"); + } + } + } + + @Test + public void testOrderByDescWithNullsLastBug3469() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + + String sql = + "CREATE TABLE DESCNULLSLAST3469 ( " + "ORGANIZATION_ID VARCHAR," + "CONTAINER_ID VARCHAR," + + "ENTITY_ID VARCHAR NOT NULL," + "CONSTRAINT TEST_PK PRIMARY KEY ( " + + "ORGANIZATION_ID DESC," + "CONTAINER_ID DESC," + "ENTITY_ID" + "))"; + conn.createStatement().execute(sql); + + // -----ORGANIZATION_ID + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS FIRST"; + QueryPlan queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + + // ----CONTAINER_ID + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + // -----ORGANIZATION_ID ASC CONTAINER_ID ASC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID NULLS FIRST,CONTAINER_ID NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID NULLS FIRST,CONTAINER_ID NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID NULLS LAST,CONTAINER_ID NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID NULLS LAST,CONTAINER_ID NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); + + // -----ORGANIZATION_ID ASC CONTAINER_ID DESC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS FIRST,CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS FIRST,CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS LAST,CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS LAST,CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + // -----ORGANIZATION_ID DESC CONTAINER_ID ASC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID NULLS LAST")); + + // -----ORGANIZATION_ID DESC CONTAINER_ID DESC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + + // -----CONTAINER_ID ASC ORGANIZATION_ID ASC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID NULLS FIRST,ORGANIZATION_ID NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID NULLS FIRST,ORGANIZATION_ID NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID NULLS LAST,ORGANIZATION_ID NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID NULLS LAST,ORGANIZATION_ID NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + + // -----CONTAINER_ID ASC ORGANIZATION_ID DESC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS FIRST,ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS FIRST,ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS LAST,ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS LAST,ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + + // -----CONTAINER_ID DESC ORGANIZATION_ID ASC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID ASC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID ASC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID NULLS LAST")); + + // -----CONTAINER_ID DESC ORGANIZATION_ID DESC + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID DESC NULLS FIRST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC")); + + sql = + "SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID DESC NULLS LAST"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString() + .equals("CONTAINER_ID DESC NULLS LAST")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString() + .equals("ORGANIZATION_ID DESC NULLS LAST")); + } finally { + if (conn != null) { conn.close(); - } - - @Test - public void testAlterAddNotNullKeyValueColumn() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t1 (k integer not null primary key, v1 bigint not null) IMMUTABLE_ROWS=true"); - try { - conn.createStatement().execute("ALTER TABLE t1 ADD k2 bigint not null primary key"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY.getErrorCode(), e.getErrorCode()); - } - conn.createStatement().execute("ALTER TABLE t1 ADD v2 bigint not null"); - try { - conn.createStatement().execute("UPSERT INTO t1(k, v1) VALUES(0,0)"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CONSTRAINT_VIOLATION.getErrorCode(), e.getErrorCode()); - } - conn.createStatement().execute("UPSERT INTO t1 VALUES(0,0,0)"); - conn.createStatement().execute("UPSERT INTO t1(v1,k,v2) VALUES(0,0,0)"); - } - - @Test - public void testOnDupKeyForImmutableTable() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t1 (k integer not null primary key, v bigint) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v = v + 1"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testUpdatePKOnDupKey() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t1 (k1 integer not null, k2 integer not null, v bigint, constraint pk primary key (k1,k2))"); - conn.createStatement().execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE k2 = v + 1"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CANNOT_UPDATE_PK_ON_DUP_KEY.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testOnDupKeyTypeMismatch() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t1 (k1 integer not null, k2 integer not null, v1 bigint, v2 varchar, constraint pk primary key (k1,k2))"); - conn.createStatement().execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v1 = v2 || 'a'"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testDuplicateColumnOnDupKeyUpdate() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE t1 (k1 integer not null, k2 integer not null, v1 bigint, v2 bigint, constraint pk primary key (k1,k2))"); - conn.createStatement().execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v1 = v1 + 1, v1 = v2 + 2"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DUPLICATE_COLUMN_IN_ON_DUP_KEY.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testAggregationInOnDupKey() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t1 (k1 integer not null, k2 integer not null, v bigint, constraint pk primary key (k1,k2))"); - try { - conn.createStatement().execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v = sum(v)"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testSequenceInOnDupKey() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t1 (k1 integer not null, k2 integer not null, v bigint, constraint pk primary key (k1,k2))"); - conn.createStatement().execute("CREATE SEQUENCE s1"); - try { - conn.createStatement().execute("UPSERT INTO t1 VALUES(0,0) ON DUPLICATE KEY UPDATE v = next value for s1"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.INVALID_USE_OF_NEXT_VALUE_FOR.getErrorCode(), e.getErrorCode()); - } finally { - conn.close(); - } - } - - @Test - public void testOrderPreservingGroupBy() throws Exception { - try (Connection conn= DriverManager.getConnection(getUrl())) { - - conn.createStatement().execute("CREATE TABLE test (\n" + - " pk1 INTEGER NOT NULL,\n" + - " pk2 INTEGER NOT NULL,\n" + - " pk3 INTEGER NOT NULL,\n" + - " pk4 INTEGER NOT NULL,\n" + - " v1 INTEGER,\n" + - " CONSTRAINT pk PRIMARY KEY (\n" + - " pk1,\n" + - " pk2,\n" + - " pk3,\n" + - " pk4\n" + - " )\n" + - " )"); - String[] queries = new String[] { - "SELECT pk3 FROM test WHERE pk2 = 1 GROUP BY pk2+1,pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE pk2 = 1 GROUP BY pk2,pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY pk1+pk2,pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY pk4,CASE WHEN pk1 > pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY pk4,pk3", - }; - int index = 0; - for (String query : queries) { - QueryPlan plan = getQueryPlan(conn, query); - assertTrue((index + 1) + ") " + queries[index], plan.getOrderBy().getOrderByExpressions().isEmpty()); - index++; - } - } - } - - @Test - public void testOrderPreservingGroupByForNotPkColumns() throws Exception { - try (Connection conn= DriverManager.getConnection(getUrl())) { - conn.createStatement().execute("CREATE TABLE test (\n" + - " pk1 varchar, \n" + - " pk2 varchar, \n" + - " pk3 varchar, \n" + - " pk4 varchar, \n" + - " v1 varchar, \n" + - " v2 varchar,\n" + - " CONSTRAINT pk PRIMARY KEY (\n" + - " pk1,\n" + - " pk2,\n" + - " pk3,\n" + - " pk4\n" + - " )\n" + - " )"); - String[] queries = new String[] { - "SELECT pk3 FROM test WHERE v2 = 'a' GROUP BY substr(v2,0,1),pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE pk1 = 'c' and v2 = substr('abc',1,1) GROUP BY v2,pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE v1 = 'a' and v2 = 'b' GROUP BY length(v1)+length(v2),pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = 'b' GROUP BY length(pk1)+length(v2),pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE v1 = 'a' and v2 = substr('abc',2,1) GROUP BY pk4,CASE WHEN v1 > v2 THEN v1 ELSE v2 END,pk3 ORDER BY pk4,pk3", - "SELECT pk3 FROM test WHERE pk1 = 'a' and v2 = substr('abc',2,1) GROUP BY pk4,CASE WHEN pk1 > v2 THEN pk1 ELSE v2 END,pk3 ORDER BY pk4,pk3", - "SELECT pk3 FROM test WHERE pk1 = 'a' and pk2 = 'b' and v1 = 'c' GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY pk3" - }; - int index = 0; - for (String query : queries) { - QueryPlan plan = getQueryPlan(conn, query); - assertTrue((index + 1) + ") " + queries[index], plan.getOrderBy().getOrderByExpressions().isEmpty()); - index++; - } - } - } - - @Test - public void testOrderPreservingGroupByForClientAggregatePlan() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String tableName = "test_table"; - String sql = "create table " + tableName + "( "+ - " pk1 varchar not null , " + - " pk2 varchar not null, " + - " pk3 varchar not null," + - " v1 varchar, " + - " v2 varchar, " + - " CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "pk1,"+ - "pk2,"+ - "pk3 ))"; - conn.createStatement().execute(sql); - - String[] queries = new String[] { - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "group by a.ak3,a.av1 order by a.ak3,a.av1", - - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av2 = 'a' GROUP BY substr(a.av2,0,1),ak3 ORDER BY ak3", - - //for InListExpression - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av2 in('a') GROUP BY substr(a.av2,0,1),ak3 ORDER BY ak3", - - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 = 'c' and a.av2 = substr('abc',1,1) GROUP BY a.av2,a.ak3 ORDER BY a.ak3", - - //for RVC - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where (a.ak1,a.av2) = ('c', substr('abc',1,1)) GROUP BY a.av2,a.ak3 ORDER BY a.ak3", - - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' and a.av2 = 'b' GROUP BY length(a.av1)+length(a.av2),a.ak3 ORDER BY a.ak3", - - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 = 'a' and a.av2 = 'b' GROUP BY length(a.ak1)+length(a.av2),a.ak3 ORDER BY a.ak3", - - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3, coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' and a.av2 = substr('abc',2,1) GROUP BY a.ak4,CASE WHEN a.av1 > a.av2 THEN a.av1 ELSE a.av2 END,a.ak3 ORDER BY a.ak4,a.ak3", - - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 = 0.0 and a.av2 = (5+3*2) GROUP BY a.ak3,CASE WHEN a.ak1 > a.av2 THEN a.ak1 ELSE a.av2 END,a.av1 ORDER BY a.ak3,a.av1", - - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 = 0.0 and a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN a.ak1 > a.av2 THEN a.ak1 ELSE a.av2 END,a.av1 ORDER BY a.ak3,a.av1", - - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 = 0.0 and a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", - - //for IS NULL - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 is null and a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", - - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 = 0.0 and a.av2 is null GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", - }; - int index = 0; - for (String query : queries) { - QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, query); - assertTrue((index + 1) + ") " + queries[index], plan.getOrderBy()== OrderBy.FWD_ROW_KEY_ORDER_BY); - index++; - } - } - finally { - if(conn != null) { - conn.close(); - } - } - } - - @Test - public void testNotOrderPreservingGroupBy() throws Exception { - try (Connection conn= DriverManager.getConnection(getUrl())) { - - conn.createStatement().execute("CREATE TABLE test (\n" + - " pk1 INTEGER NOT NULL,\n" + - " pk2 INTEGER NOT NULL,\n" + - " pk3 INTEGER NOT NULL,\n" + - " pk4 INTEGER NOT NULL,\n" + - " v1 INTEGER,\n" + - " CONSTRAINT pk PRIMARY KEY (\n" + - " pk1,\n" + - " pk2,\n" + - " pk3,\n" + - " pk4\n" + - " )\n" + - " )"); - String[] queries = new String[] { - "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY pk4,CASE WHEN pk1 > pk2 THEN coalesce(v1,1) ELSE pk2 END,pk3 ORDER BY pk4,pk3", - "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3", - "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3", - "SELECT pk3 FROM test GROUP BY pk2,pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE pk1 = 1 GROUP BY pk1,pk2,pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE pk1 = 1 GROUP BY RAND()+pk1,pk2,pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE pk1 = 1 and pk2 = 2 GROUP BY CASE WHEN pk1 > pk2 THEN pk1 ELSE RAND(1) END,pk3 ORDER BY pk3", - }; - int index = 0; - for (String query : queries) { - QueryPlan plan = getQueryPlan(conn, query); - assertFalse((index + 1) + ") " + queries[index], plan.getOrderBy().getOrderByExpressions().isEmpty()); - index++; - } - } - } - - @Test - public void testNotOrderPreservingGroupByForNotPkColumns() throws Exception { - try (Connection conn= DriverManager.getConnection(getUrl())) { - conn.createStatement().execute("CREATE TABLE test (\n" + - " pk1 varchar,\n" + - " pk2 varchar,\n" + - " pk3 varchar,\n" + - " pk4 varchar,\n" + - " v1 varchar,\n" + - " v2 varchar,\n" + - " CONSTRAINT pk PRIMARY KEY (\n" + - " pk1,\n" + - " pk2,\n" + - " pk3,\n" + - " pk4\n" + - " )\n" + - " )"); - String[] queries = new String[] { - "SELECT pk3 FROM test WHERE (pk1 = 'a' and pk2 = 'b') or v1 ='c' GROUP BY pk4,CASE WHEN pk1 > pk2 THEN coalesce(v1,'1') ELSE pk2 END,pk3 ORDER BY pk4,pk3", - "SELECT pk3 FROM test WHERE pk1 = 'a' or pk2 = 'b' GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY pk3", - "SELECT pk3 FROM test WHERE pk1 = 'a' and (pk2 = 'b' or v1 = 'c') GROUP BY CASE WHEN pk1 > pk2 THEN v1 WHEN pk1 = pk2 THEN pk1 ELSE pk2 END,pk3 ORDER BY pk3", - "SELECT v2 FROM test GROUP BY v1,v2 ORDER BY v2", - "SELECT pk3 FROM test WHERE v1 = 'a' GROUP BY v1,v2,pk3 ORDER BY pk3", - "SELECT length(pk3) FROM test WHERE v1 = 'a' GROUP BY RAND()+length(v1),length(v2),length(pk3) ORDER BY length(v2),length(pk3)", - "SELECT length(pk3) FROM test WHERE v1 = 'a' and v2 = 'b' GROUP BY CASE WHEN v1 > v2 THEN length(v1) ELSE RAND(1) END,length(pk3) ORDER BY length(pk3)", - }; - int index = 0; - for (String query : queries) { - QueryPlan plan = getQueryPlan(conn, query); - assertFalse((index + 1) + ") " + queries[index], plan.getOrderBy().getOrderByExpressions().isEmpty()); - index++; - } - } - } - - @Test - public void testNotOrderPreservingGroupByForClientAggregatePlan() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String tableName = "table_test"; - String sql = "create table " + tableName + "( "+ - " pk1 varchar not null , " + - " pk2 varchar not null, " + - " pk3 varchar not null," + - " v1 varchar, " + - " v2 varchar, " + - " CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "pk1,"+ - "pk2,"+ - "pk3 ))"; - conn.createStatement().execute(sql); - - String[] queries = new String[] { - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where (a.ak1 = 'a' and a.ak2 = 'b') or a.av1 ='c' GROUP BY a.ak4,CASE WHEN a.ak1 > a.ak2 THEN coalesce(a.av1,'1') ELSE a.ak2 END,a.ak3 ORDER BY a.ak4,a.ak3", - - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 = 'a' or a.ak2 = 'b' GROUP BY CASE WHEN a.ak1 > a.ak2 THEN a.av1 WHEN a.ak1 = a.ak2 THEN a.ak1 ELSE a.ak2 END,a.ak3 ORDER BY a.ak3", - - //for in - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 in ( 'a','b') GROUP BY CASE WHEN a.ak1 > a.ak2 THEN a.av1 WHEN a.ak1 = a.ak2 THEN a.ak1 ELSE a.ak2 END,a.ak3 ORDER BY a.ak3", - - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 = 'a' and (a.ak2 = 'b' or a.av1 = 'c') GROUP BY CASE WHEN a.ak1 > a.ak2 THEN a.av1 WHEN a.ak1 = a.ak2 THEN a.ak1 ELSE a.ak2 END,a.ak3 ORDER BY a.ak3", - - "select a.av2 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "GROUP BY a.av1,a.av2 ORDER BY a.av2", - - "select a.ak3 "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' GROUP BY a.av1,a.av2,a.ak3 ORDER BY a.ak3", - - "select length(a.ak3) "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' GROUP BY RAND()+length(a.av1),length(a.av2),length(a.ak3) ORDER BY length(a.av2),length(a.ak3)", - - "select length(a.ak3) "+ - "from (select substr(pk1,1,1) ak1,substr(pk2,1,1) ak2,substr(pk3,1,1) ak3,coalesce(pk3,'1') ak4, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' and a.av2 = 'b' GROUP BY CASE WHEN a.av1 > a.av2 THEN length(a.av1) ELSE RAND(1) END,length(a.ak3) ORDER BY length(a.ak3)", - - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 > 0.0 and a.av2 = (5+3*2) GROUP BY a.ak3,CASE WHEN a.ak1 > a.av2 THEN a.ak1 ELSE a.av2 END,a.av1 ORDER BY a.ak3,a.av1", - - //for CoerceExpression - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where CAST(a.ak1 AS INTEGER) = 0 and a.av2 = (5+3*2) GROUP BY a.ak3,a.ak1,a.av1 ORDER BY a.ak3,a.av1", - - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 = 0.0 or a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", - - //for IS NULL - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 is not null and a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", - - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 is null or a.av2 = length(substr('abc',1,1)) GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", - - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 is null and a.av2 = length(substr('abc',1,1)) and a.ak1 = 0.0 GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", - - "select a.ak3 "+ - "from (select rand() ak1,length(pk2) ak2,length(pk3) ak3,length(v1) av1,length(v2) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.ak1 is null and a.av2 = length(substr('abc',1,1)) or a.ak1 = 0.0 GROUP BY a.ak3,CASE WHEN coalesce(a.ak1,1) > coalesce(a.av2,2) THEN coalesce(a.ak1,1) ELSE coalesce(a.av2,2) END,a.av1 ORDER BY a.ak3,a.av1", - }; - int index = 0; - for (String query : queries) { - QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, query); - assertTrue((index + 1) + ") " + queries[index], plan.getOrderBy().getOrderByExpressions().size() > 0); - index++; - } - } - finally { - if(conn != null) { - conn.close(); - } - } - } - - @Test - public void testOrderByOptimizeForClientAggregatePlanAndDesc() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String tableName = "test_table"; - String sql = "create table " + tableName + "( "+ - " pk1 varchar not null, " + - " pk2 varchar not null, " + - " pk3 varchar not null, " + - " v1 varchar, " + - " v2 varchar, " + - " CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "pk1 desc,"+ - "pk2 desc,"+ - "pk3 desc))"; - conn.createStatement().execute(sql); - - String[] queries = new String[] { - "select a.ak3 "+ - "from (select pk1 ak1,pk2 ak2,pk3 ak3, substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "group by a.ak3,a.av1 order by a.ak3 desc,a.av1", - - "select a.ak3 "+ - "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' group by a.av1,a.ak3 order by a.ak3 desc", - - "select a.ak3 "+ - "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' and a.av2= 'b' group by CASE WHEN a.av1 > a.av2 THEN a.av1 ELSE a.av2 END,a.ak3,a.ak2 order by a.ak3 desc,a.ak2 desc" - }; - - int index = 0; - for (String query : queries) { - QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, query); - assertTrue((index + 1) + ") " + queries[index], plan.getOrderBy()== OrderBy.FWD_ROW_KEY_ORDER_BY); - index++; - } - - queries = new String[] { - "select a.ak3 "+ - "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "group by a.ak3,a.av1 order by a.ak3,a.av1", - - "select a.ak3 "+ - "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' group by a.av1,a.ak3 order by a.ak3", - - "select a.ak3 "+ - "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' and a.av2= 'b' group by CASE WHEN a.av1 > a.av2 THEN a.av1 ELSE a.av2 END,a.ak3,a.ak2 order by a.ak3,a.ak2", - - "select a.ak3 "+ - "from (select pk1 ak1,pk2 ak2,pk3 ak3,substr(v1,1,1) av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+ - "where a.av1 = 'a' and a.av2= 'b' group by CASE WHEN a.av1 > a.av2 THEN a.av1 ELSE a.av2 END,a.ak3,a.ak2 order by a.ak3 asc,a.ak2 desc" - }; - index = 0; - for (String query : queries) { - QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, query); - assertTrue((index + 1) + ") " + queries[index], plan.getOrderBy().getOrderByExpressions().size() > 0); - index++; - } - } - finally { - if(conn != null) { - conn.close(); - } - } - } - - @Test - public void testGroupByDescColumnBug3451() throws Exception { - - try (Connection conn= DriverManager.getConnection(getUrl())) { - - conn.createStatement().execute("CREATE TABLE IF NOT EXISTS GROUPBYTEST (\n" + - " ORGANIZATION_ID CHAR(15) NOT NULL,\n" + - " CONTAINER_ID CHAR(15) NOT NULL,\n" + - " ENTITY_ID CHAR(15) NOT NULL,\n" + - " SCORE DOUBLE,\n" + - " CONSTRAINT TEST_PK PRIMARY KEY (\n" + - " ORGANIZATION_ID,\n" + - " CONTAINER_ID,\n" + - " ENTITY_ID\n" + - " )\n" + - " )"); - conn.createStatement().execute("CREATE INDEX SCORE_IDX ON GROUPBYTEST (ORGANIZATION_ID,CONTAINER_ID, SCORE DESC, ENTITY_ID DESC)"); - QueryPlan plan = getQueryPlan(conn, "SELECT DISTINCT entity_id, score\n" + - " FROM GROUPBYTEST\n" + - " WHERE organization_id = 'org2'\n" + - " AND container_id IN ( 'container1','container2','container3' )\n" + - " ORDER BY score DESC\n" + - " LIMIT 2"); - assertFalse(plan.getOrderBy().getOrderByExpressions().isEmpty()); - plan = getQueryPlan(conn, "SELECT DISTINCT entity_id, score\n" + - " FROM GROUPBYTEST\n" + - " WHERE entity_id = 'entity1'\n" + - " AND container_id IN ( 'container1','container2','container3' )\n" + - " ORDER BY score DESC\n" + - " LIMIT 2"); - assertTrue(plan.getOrderBy().getOrderByExpressions().isEmpty()); - } - } - - @Test - public void testGroupByDescColumnBug3452() throws Exception { - - Connection conn=null; - try { - conn= DriverManager.getConnection(getUrl()); - - String sql="CREATE TABLE GROUPBYDESC3452 ( "+ - "ORGANIZATION_ID VARCHAR,"+ - "CONTAINER_ID VARCHAR,"+ - "ENTITY_ID VARCHAR NOT NULL,"+ - "CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "ORGANIZATION_ID DESC,"+ - "CONTAINER_ID DESC,"+ - "ENTITY_ID"+ - "))"; - conn.createStatement().execute(sql); - - //-----ORGANIZATION_ID - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS FIRST"; - QueryPlan queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy()== OrderBy.REV_ROW_KEY_ORDER_BY); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy()== OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - - //----CONTAINER_ID - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - //-----ORGANIZATION_ID ASC CONTAINER_ID ASC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID NULLS FIRST,CONTAINER_ID NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID NULLS FIRST,CONTAINER_ID NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID NULLS LAST,CONTAINER_ID NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID NULLS LAST,CONTAINER_ID NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); - - //-----ORGANIZATION_ID ASC CONTAINER_ID DESC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS FIRST,CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS FIRST,CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS LAST,CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID ASC NULLS LAST,CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - //-----ORGANIZATION_ID DESC CONTAINER_ID ASC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID NULLS LAST")); - - //-----ORGANIZATION_ID DESC CONTAINER_ID DESC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - //-----CONTAINER_ID ASC ORGANIZATION_ID ASC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID NULLS FIRST,ORGANIZATION_ID NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID NULLS FIRST,ORGANIZATION_ID NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID NULLS LAST,ORGANIZATION_ID NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID NULLS LAST,ORGANIZATION_ID NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID NULLS LAST")); - - //-----CONTAINER_ID ASC ORGANIZATION_ID DESC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS FIRST,ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS FIRST,ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS LAST,ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID ASC NULLS LAST,ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - - //-----CONTAINER_ID DESC ORGANIZATION_ID ASC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID NULLS LAST")); - - //-----CONTAINER_ID DESC ORGANIZATION_ID DESC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID,CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM GROUPBYDESC3452 group by ORGANIZATION_ID, CONTAINER_ID order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - - } finally { - if(conn!=null) { - conn.close(); - } - } - } - - @Test - public void testOrderByDescWithNullsLastBug3469() throws Exception { - Connection conn=null; - try { - conn= DriverManager.getConnection(getUrl()); - - String sql="CREATE TABLE DESCNULLSLAST3469 ( "+ - "ORGANIZATION_ID VARCHAR,"+ - "CONTAINER_ID VARCHAR,"+ - "ENTITY_ID VARCHAR NOT NULL,"+ - "CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "ORGANIZATION_ID DESC,"+ - "CONTAINER_ID DESC,"+ - "ENTITY_ID"+ - "))"; - conn.createStatement().execute(sql); - - //-----ORGANIZATION_ID - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS FIRST"; - QueryPlan queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy()== OrderBy.REV_ROW_KEY_ORDER_BY); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy()== OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - - //----CONTAINER_ID - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==1); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - //-----ORGANIZATION_ID ASC CONTAINER_ID ASC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID NULLS FIRST,CONTAINER_ID NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID NULLS FIRST,CONTAINER_ID NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID NULLS LAST,CONTAINER_ID NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID NULLS LAST,CONTAINER_ID NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); - - //-----ORGANIZATION_ID ASC CONTAINER_ID DESC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS FIRST,CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS FIRST,CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS LAST,CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID ASC NULLS LAST,CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - //-----ORGANIZATION_ID DESC CONTAINER_ID ASC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID NULLS LAST")); - - //-----ORGANIZATION_ID DESC CONTAINER_ID DESC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS FIRST,CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by ORGANIZATION_ID DESC NULLS LAST,CONTAINER_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CONTAINER_ID DESC NULLS LAST")); - - //-----CONTAINER_ID ASC ORGANIZATION_ID ASC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID NULLS FIRST,ORGANIZATION_ID NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID NULLS FIRST,ORGANIZATION_ID NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID NULLS LAST,ORGANIZATION_ID NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID NULLS LAST,ORGANIZATION_ID NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID NULLS LAST")); - - //-----CONTAINER_ID ASC ORGANIZATION_ID DESC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS FIRST,ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS FIRST,ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS LAST,ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID ASC NULLS LAST,ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - - //-----CONTAINER_ID DESC ORGANIZATION_ID ASC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID ASC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID ASC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID NULLS LAST")); - - //-----CONTAINER_ID DESC ORGANIZATION_ID DESC - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS FIRST,ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID DESC NULLS FIRST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC")); - - sql="SELECT CONTAINER_ID,ORGANIZATION_ID FROM DESCNULLSLAST3469 order by CONTAINER_ID DESC NULLS LAST,ORGANIZATION_ID DESC NULLS LAST"; - queryPlan =getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size()==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("CONTAINER_ID DESC NULLS LAST")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("ORGANIZATION_ID DESC NULLS LAST")); - } finally { - if(conn!=null) { - conn.close(); - } - } - } - - @Test - public void testOrderByReverseOptimizationBug3491() throws Exception { - for(boolean salted: new boolean[]{true,false}) { - boolean[] groupBys=new boolean[]{true,true,true,true,false,false,false,false}; - doTestOrderByReverseOptimizationBug3491(salted,true,true,true, - groupBys, - new OrderBy[]{ - OrderBy.REV_ROW_KEY_ORDER_BY,null,null,OrderBy.FWD_ROW_KEY_ORDER_BY, - OrderBy.REV_ROW_KEY_ORDER_BY,null,null,OrderBy.FWD_ROW_KEY_ORDER_BY}); - - doTestOrderByReverseOptimizationBug3491(salted,true,true,false, - groupBys, - new OrderBy[]{ - OrderBy.REV_ROW_KEY_ORDER_BY,null,null,OrderBy.FWD_ROW_KEY_ORDER_BY, - null,OrderBy.REV_ROW_KEY_ORDER_BY,OrderBy.FWD_ROW_KEY_ORDER_BY,null}); - - doTestOrderByReverseOptimizationBug3491(salted,true,false,true, - groupBys, - new OrderBy[]{ - null,OrderBy.REV_ROW_KEY_ORDER_BY,OrderBy.FWD_ROW_KEY_ORDER_BY,null, - OrderBy.REV_ROW_KEY_ORDER_BY,null,null,OrderBy.FWD_ROW_KEY_ORDER_BY}); - - doTestOrderByReverseOptimizationBug3491(salted,true,false,false, - groupBys, - new OrderBy[]{ - null,OrderBy.REV_ROW_KEY_ORDER_BY,OrderBy.FWD_ROW_KEY_ORDER_BY,null, - null,OrderBy.REV_ROW_KEY_ORDER_BY,OrderBy.FWD_ROW_KEY_ORDER_BY,null}); - - doTestOrderByReverseOptimizationBug3491(salted,false,true,true, - groupBys, - new OrderBy[]{ - null,OrderBy.FWD_ROW_KEY_ORDER_BY,OrderBy.REV_ROW_KEY_ORDER_BY,null, - null,OrderBy.FWD_ROW_KEY_ORDER_BY,OrderBy.REV_ROW_KEY_ORDER_BY,null}); - - doTestOrderByReverseOptimizationBug3491(salted,false,true,false, - groupBys, - new OrderBy[]{ - null,OrderBy.FWD_ROW_KEY_ORDER_BY,OrderBy.REV_ROW_KEY_ORDER_BY,null, - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,OrderBy.REV_ROW_KEY_ORDER_BY}); - - doTestOrderByReverseOptimizationBug3491(salted,false,false,true, - groupBys, - new OrderBy[]{ - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - null,OrderBy.FWD_ROW_KEY_ORDER_BY,OrderBy.REV_ROW_KEY_ORDER_BY,null}); - - doTestOrderByReverseOptimizationBug3491(salted,false,false,false, - groupBys, - new OrderBy[]{ - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,OrderBy.REV_ROW_KEY_ORDER_BY}); - } - } - - private void doTestOrderByReverseOptimizationBug3491(boolean salted,boolean desc1,boolean desc2,boolean desc3,boolean[] groupBys,OrderBy[] orderBys) throws Exception { - Connection conn = null; - try { - conn= DriverManager.getConnection(getUrl()); - String tableName="ORDERBY3491_TEST"; - conn.createStatement().execute("DROP TABLE if exists "+tableName); - String sql="CREATE TABLE "+tableName+" ( "+ - "ORGANIZATION_ID INTEGER NOT NULL,"+ - "CONTAINER_ID INTEGER NOT NULL,"+ - "SCORE INTEGER NOT NULL,"+ - "ENTITY_ID INTEGER NOT NULL,"+ - "CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "ORGANIZATION_ID" +(desc1 ? " DESC" : "" )+","+ - "CONTAINER_ID"+(desc2 ? " DESC" : "" )+","+ - "SCORE"+(desc3 ? " DESC" : "" )+","+ - "ENTITY_ID"+ - ")) "+(salted ? "SALT_BUCKETS =4" : ""); - conn.createStatement().execute(sql); - - - String[] sqls={ - //groupBy orderPreserving orderBy asc asc - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC, CONTAINER_ID ASC", - //groupBy orderPreserving orderBy asc desc - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC, CONTAINER_ID DESC", - //groupBy orderPreserving orderBy desc asc - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC, CONTAINER_ID ASC", - //groupBy orderPreserving orderBy desc desc - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC, CONTAINER_ID DESC", - - //groupBy not orderPreserving orderBy asc asc - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC, SCORE ASC", - //groupBy not orderPreserving orderBy asc desc - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC, SCORE DESC", - //groupBy not orderPreserving orderBy desc asc - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC, SCORE ASC", - //groupBy not orderPreserving orderBy desc desc - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC, SCORE DESC" - }; - - for(int i=0;i< sqls.length;i++) { - sql=sqls[i]; - QueryPlan queryPlan=getQueryPlan(conn, sql); - assertTrue((i+1) + ") " + sql,queryPlan.getGroupBy().isOrderPreserving()== groupBys[i]); - OrderBy orderBy=queryPlan.getOrderBy(); - if(orderBys[i]!=null) { - assertTrue((i+1) + ") " + sql,orderBy == orderBys[i]); - } - else { - assertTrue((i+1) + ") " + sql,orderBy.getOrderByExpressions().size() > 0); - } - } - } finally { - if(conn!=null) { - conn.close(); - } - } - } - - @Test - public void testOrderByReverseOptimizationWithNUllsLastBug3491() throws Exception { - for(boolean salted: new boolean[]{true,false}) { - boolean[] groupBys=new boolean[]{ - //groupBy orderPreserving orderBy asc asc - true,true,true,true, - //groupBy orderPreserving orderBy asc desc - true,true,true,true, - //groupBy orderPreserving orderBy desc asc - true,true,true,true, - //groupBy orderPreserving orderBy desc desc - true,true,true,true, - - //groupBy not orderPreserving orderBy asc asc - false,false,false,false, - //groupBy not orderPreserving orderBy asc desc - false,false,false,false, - //groupBy not orderPreserving orderBy desc asc - false,false,false,false, - //groupBy not orderPreserving orderBy desc desc - false,false,false,false, - - false,false,false,false}; - doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,true,true,true, - groupBys, - new OrderBy[]{ - //groupBy orderPreserving orderBy asc asc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy orderPreserving orderBy asc desc - null,null,null,null, - //groupBy orderPreserving orderBy desc asc - null,null,null,null, - //groupBy orderPreserving orderBy desc desc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - - //groupBy not orderPreserving orderBy asc asc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy not orderPreserving orderBy asc desc - null,null,null,null, - //groupBy not orderPreserving orderBy desc asc - null,null,null,null, - //groupBy not orderPreserving orderBy desc desc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - - null,OrderBy.REV_ROW_KEY_ORDER_BY,OrderBy.FWD_ROW_KEY_ORDER_BY,null}); - - doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,true,true,false, - groupBys, - new OrderBy[]{ - //groupBy orderPreserving orderBy asc asc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy orderPreserving orderBy asc desc - null,null,null,null, - //groupBy orderPreserving orderBy desc asc - null,null,null,null, - //groupBy orderPreserving orderBy desc desc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - - //groupBy not orderPreserving orderBy asc asc - null,null,null,null, - //groupBy not orderPreserving orderBy asc desc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy not orderPreserving orderBy desc asc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy not orderPreserving orderBy desc desc - null,null,null,null, - - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,OrderBy.REV_ROW_KEY_ORDER_BY}); - - doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,true,false,true, - groupBys, - new OrderBy[]{ - //groupBy orderPreserving orderBy asc asc - null,null,null,null, - //groupBy orderPreserving orderBy asc desc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy orderPreserving orderBy desc asc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy orderPreserving orderBy desc desc - null,null,null,null, - - //groupBy not orderPreserving orderBy asc asc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy not orderPreserving orderBy asc desc - null,null,null,null, - //groupBy not orderPreserving orderBy desc asc - null,null,null,null, - //groupBy not orderPreserving orderBy desc desc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - null,OrderBy.REV_ROW_KEY_ORDER_BY,OrderBy.FWD_ROW_KEY_ORDER_BY,null}); - - doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,true,false,false, - groupBys, - new OrderBy[]{ - //groupBy orderPreserving orderBy asc asc - null,null,null,null, - //groupBy orderPreserving orderBy asc desc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy orderPreserving orderBy desc asc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy orderPreserving orderBy desc desc - null,null,null,null, - - //groupBy not orderPreserving orderBy asc asc - null,null,null,null, - //groupBy not orderPreserving orderBy asc desc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy not orderPreserving orderBy desc asc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy not orderPreserving orderBy desc desc - null,null,null,null, - - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,OrderBy.REV_ROW_KEY_ORDER_BY}); - - doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,false,true,true, - groupBys, - new OrderBy[]{ - //groupBy orderPreserving orderBy asc asc - null,null,null,null, - //groupBy orderPreserving orderBy asc desc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy orderPreserving orderBy desc asc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy orderPreserving orderBy desc desc - null,null,null,null, - - //groupBy not orderPreserving orderBy asc asc - null,null,null,null, - //groupBy not orderPreserving orderBy asc desc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy not orderPreserving orderBy desc asc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy not orderPreserving orderBy desc desc - null,null,null,null, - - null,OrderBy.REV_ROW_KEY_ORDER_BY,OrderBy.FWD_ROW_KEY_ORDER_BY,null}); - - - doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,false,true,false, - groupBys, - new OrderBy[]{ - //groupBy orderPreserving orderBy asc asc - null,null,null,null, - //groupBy orderPreserving orderBy asc desc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy orderPreserving orderBy desc asc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy orderPreserving orderBy desc desc - null,null,null,null, - - //groupBy not orderPreserving orderBy asc asc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy not orderPreserving orderBy asc desc - null,null,null,null, - //groupBy not orderPreserving orderBy desc asc - null,null,null,null, - //groupBy not orderPreserving orderBy desc desc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,OrderBy.REV_ROW_KEY_ORDER_BY}); - - doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,false,false,true, - groupBys, - new OrderBy[]{ - //groupBy orderPreserving orderBy asc asc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy orderPreserving orderBy asc desc - null,null,null,null, - //groupBy orderPreserving orderBy desc asc - null,null,null,null, - //groupBy orderPreserving orderBy desc desc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - - //groupBy not orderPreserving orderBy asc asc - null,null,null,null, - //groupBy not orderPreserving orderBy asc desc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy not orderPreserving orderBy desc asc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - //groupBy not orderPreserving orderBy desc desc - null,null,null,null, - - null,OrderBy.REV_ROW_KEY_ORDER_BY,OrderBy.FWD_ROW_KEY_ORDER_BY,null}); - - doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,false,false,false, - groupBys, - new OrderBy[]{ - //groupBy orderPreserving orderBy asc asc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy orderPreserving orderBy asc desc - null,null,null,null, - //groupBy orderPreserving orderBy desc asc - null,null,null,null, - //groupBy orderPreserving orderBy desc desc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - - //groupBy not orderPreserving orderBy asc asc - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,null, - //groupBy not orderPreserving orderBy asc desc - null,null,null,null, - //groupBy not orderPreserving orderBy desc asc - null,null,null,null, - //groupBy not orderPreserving orderBy desc desc - null,null,null,OrderBy.REV_ROW_KEY_ORDER_BY, - - OrderBy.FWD_ROW_KEY_ORDER_BY,null,null,OrderBy.REV_ROW_KEY_ORDER_BY}); - } - } - - private void doTestOrderByReverseOptimizationWithNUllsLastBug3491(boolean salted,boolean desc1,boolean desc2,boolean desc3,boolean[] groupBys,OrderBy[] orderBys) throws Exception { - Connection conn = null; - try { - conn= DriverManager.getConnection(getUrl()); - String tableName="ORDERBY3491_TEST"; - conn.createStatement().execute("DROP TABLE if exists "+tableName); - String sql="CREATE TABLE "+tableName+" ( "+ - "ORGANIZATION_ID VARCHAR,"+ - "CONTAINER_ID VARCHAR,"+ - "SCORE VARCHAR,"+ - "ENTITY_ID VARCHAR NOT NULL,"+ - "CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "ORGANIZATION_ID" +(desc1 ? " DESC" : "" )+","+ - "CONTAINER_ID"+(desc2 ? " DESC" : "" )+","+ - "SCORE"+(desc3 ? " DESC" : "" )+","+ - "ENTITY_ID"+ - ")) "+(salted ? "SALT_BUCKETS =4" : ""); - conn.createStatement().execute(sql); - - String[] sqls={ - //groupBy orderPreserving orderBy asc asc - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS FIRST, CONTAINER_ID ASC NULLS FIRST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS FIRST, CONTAINER_ID ASC NULLS LAST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS LAST, CONTAINER_ID ASC NULLS FIRST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS LAST, CONTAINER_ID ASC NULLS LAST", - - //groupBy orderPreserving orderBy asc desc - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS FIRST, CONTAINER_ID DESC NULLS FIRST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS FIRST, CONTAINER_ID DESC NULLS LAST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS LAST, CONTAINER_ID DESC NULLS FIRST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS LAST, CONTAINER_ID DESC NULLS LAST", - - //groupBy orderPreserving orderBy desc asc - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS FIRST, CONTAINER_ID ASC NULLS FIRST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS FIRST, CONTAINER_ID ASC NULLS LAST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS LAST, CONTAINER_ID ASC NULLS FIRST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS LAST, CONTAINER_ID ASC NULLS LAST", - - //groupBy orderPreserving orderBy desc desc - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS FIRST, CONTAINER_ID DESC NULLS FIRST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS FIRST, CONTAINER_ID DESC NULLS LAST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS LAST, CONTAINER_ID DESC NULLS FIRST", - "SELECT ORGANIZATION_ID,CONTAINER_ID FROM "+tableName+" group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS LAST, CONTAINER_ID DESC NULLS LAST", - - //-----groupBy not orderPreserving - - //groupBy not orderPreserving orderBy asc asc - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS FIRST, SCORE ASC NULLS FIRST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS FIRST, SCORE ASC NULLS LAST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS LAST, SCORE ASC NULLS FIRST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS LAST, SCORE ASC NULLS LAST", - - //groupBy not orderPreserving orderBy asc desc - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS FIRST, SCORE DESC NULLS FIRST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS FIRST, SCORE DESC NULLS LAST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS LAST, SCORE DESC NULLS FIRST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS LAST, SCORE DESC NULLS LAST", - - //groupBy not orderPreserving orderBy desc asc - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS FIRST, SCORE ASC NULLS FIRST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS FIRST, SCORE ASC NULLS LAST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS LAST, SCORE ASC NULLS FIRST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS LAST, SCORE ASC NULLS LAST", - - //groupBy not orderPreserving orderBy desc desc - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS FIRST, SCORE DESC NULLS FIRST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS FIRST, SCORE DESC NULLS LAST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS LAST, SCORE DESC NULLS FIRST", - "SELECT ORGANIZATION_ID,SCORE FROM "+tableName+" group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS LAST, SCORE DESC NULLS LAST", - - //-------only one return column---------------------------------- - "SELECT SCORE FROM "+tableName+" group by SCORE ORDER BY SCORE ASC NULLS FIRST", - "SELECT SCORE FROM "+tableName+" group by SCORE ORDER BY SCORE ASC NULLS LAST", - "SELECT SCORE FROM "+tableName+" group by SCORE ORDER BY SCORE DESC NULLS FIRST", - "SELECT SCORE FROM "+tableName+" group by SCORE ORDER BY SCORE DESC NULLS LAST" - }; - - for(int i=0;i< sqls.length;i++) { - sql=sqls[i]; - QueryPlan queryPlan=getQueryPlan(conn, sql); - assertTrue((i+1) + ") " + sql,queryPlan.getGroupBy().isOrderPreserving()== groupBys[i]); - OrderBy orderBy=queryPlan.getOrderBy(); - if(orderBys[i]!=null) { - assertTrue((i+1) + ") " + sql,orderBy == orderBys[i]); - } - else { - assertTrue((i+1) + ") " + sql,orderBy.getOrderByExpressions().size() > 0); - } - } - } finally { - if(conn!=null) { - conn.close(); - } - } - } - - @Test - public void testGroupByCoerceExpressionBug3453() throws Exception { - Connection conn = null; - try { - conn= DriverManager.getConnection(getUrl()); - String tableName="GROUPBY3453_INT"; - String sql="CREATE TABLE "+ tableName +"("+ - "ENTITY_ID INTEGER NOT NULL,"+ - "CONTAINER_ID INTEGER NOT NULL,"+ - "SCORE INTEGER NOT NULL,"+ - "CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID DESC,CONTAINER_ID DESC,SCORE DESC))"; - conn.createStatement().execute(sql); - sql="select DISTINCT entity_id, score from ( select entity_id, score from "+tableName+" limit 1)"; - QueryPlan queryPlan=getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().getExpressions().get(0).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getGroupBy().getExpressions().get(1).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getGroupBy().getKeyExpressions().get(0).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getGroupBy().getKeyExpressions().get(1).getSortOrder()==SortOrder.DESC); - - sql="select DISTINCT entity_id, score from ( select entity_id, score from "+tableName+" limit 3) order by entity_id"; - queryPlan=getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().getExpressions().get(0).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getGroupBy().getExpressions().get(1).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getGroupBy().getKeyExpressions().get(0).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getGroupBy().getKeyExpressions().get(1).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).getExpression().getSortOrder()==SortOrder.DESC); - - sql="select DISTINCT entity_id, score from ( select entity_id, score from "+tableName+" limit 3) order by entity_id desc"; - queryPlan=getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().getExpressions().get(0).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getGroupBy().getExpressions().get(1).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getGroupBy().getKeyExpressions().get(0).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getGroupBy().getKeyExpressions().get(1).getSortOrder()==SortOrder.DESC); - assertTrue(queryPlan.getOrderBy()==OrderBy.FWD_ROW_KEY_ORDER_BY); - } finally { - if(conn!=null) { - conn.close(); - } - } - } - - private static QueryPlan getQueryPlan(Connection conn,String sql) throws SQLException { - PhoenixPreparedStatement statement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); - QueryPlan queryPlan = statement.optimizeQuery(sql); - queryPlan.iterator(); - return queryPlan; - } - - @Test - public void testSortMergeJoinSubQueryOrderByOverrideBug3745() throws Exception { - Connection conn = null; - try { - conn= DriverManager.getConnection(getUrl()); - - String tableName1="MERGE1"; - String tableName2="MERGE2"; - - conn.createStatement().execute("DROP TABLE if exists "+tableName1); - - String sql="CREATE TABLE IF NOT EXISTS "+tableName1+" ( "+ - "AID INTEGER PRIMARY KEY,"+ - "AGE INTEGER"+ - ")"; - conn.createStatement().execute(sql); - - conn.createStatement().execute("DROP TABLE if exists "+tableName2); - sql="CREATE TABLE IF NOT EXISTS "+tableName2+" ( "+ - "BID INTEGER PRIMARY KEY,"+ - "CODE INTEGER"+ - ")"; - conn.createStatement().execute(sql); - - //test for simple scan - sql="select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid "; - - QueryPlan queryPlan=getQueryPlan(conn, sql); - SortMergeJoinPlan sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - ClientScanPlan lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate(); - OrderBy orderBy=lhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - ScanPlan innerScanPlan=(ScanPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate(); - orderBy=innerScanPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AGE")); - assertTrue(innerScanPlan.getLimit().intValue() == 3); - - ClientScanPlan rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID")); - innerScanPlan=(ScanPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate(); - orderBy=innerScanPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("CODE")); - assertTrue(innerScanPlan.getLimit().intValue() == 1); - - //test for aggregate - sql="select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.codesum from (select aid,sum(age) agesum from "+tableName1+" where age >=11 and age<=33 group by aid order by agesum limit 3) a inner join "+ - "(select bid,sum(code) codesum from "+tableName2+" group by bid order by codesum limit 1) b on a.aid=b.bid "; - - - queryPlan=getQueryPlan(conn, sql); - sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate(); - orderBy=lhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - AggregatePlan innerAggregatePlan=(AggregatePlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(AGE)")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 3); - - rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID")); - innerAggregatePlan=(AggregatePlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(CODE)")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 1); - - String tableName3="merge3"; - conn.createStatement().execute("DROP TABLE if exists "+tableName3); - sql="CREATE TABLE IF NOT EXISTS "+tableName3+" ( "+ - "CID INTEGER PRIMARY KEY,"+ - "REGION INTEGER"+ - ")"; - conn.createStatement().execute(sql); - - //test for join - sql="select t1.aid,t1.code,t2.region from "+ - "(select a.aid,b.code from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 order by b.code limit 3) t1 inner join "+ - "(select a.aid,c.region from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 order by c.region desc limit 1) t2 on t1.aid=t2.aid"; - - PhoenixPreparedStatement phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); - queryPlan = phoenixPreparedStatement.optimizeQuery(sql); - sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate(); - orderBy=lhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerScanPlan=(ScanPlan)((HashJoinPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerScanPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("B.CODE")); - assertTrue(innerScanPlan.getLimit().intValue() == 3); - - rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerScanPlan=(ScanPlan)((HashJoinPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerScanPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("C.REGION DESC")); - assertTrue(innerScanPlan.getLimit().intValue() == 1); - - //test for join and aggregate - sql="select t1.aid,t1.codesum,t2.regionsum from "+ - "(select a.aid,sum(b.code) codesum from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by codesum limit 3) t1 inner join "+ - "(select a.aid,sum(c.region) regionsum from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by regionsum desc limit 2) t2 on t1.aid=t2.aid"; - - phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); - queryPlan = phoenixPreparedStatement.optimizeQuery(sql); - sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate(); - orderBy=lhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(B.CODE)")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 3); - - rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(C.REGION) DESC")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 2); - - //test for if SubselectRewriter.isOrderByPrefix had take effect - sql="select t1.aid,t1.codesum,t2.regionsum from "+ - "(select a.aid,sum(b.code) codesum from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by a.aid,codesum limit 3) t1 inner join "+ - "(select a.aid,sum(c.region) regionsum from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by a.aid desc,regionsum desc limit 2) t2 on t1.aid=t2.aid "+ - "order by t1.aid desc"; - - phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); - queryPlan = phoenixPreparedStatement.optimizeQuery(sql); - orderBy=queryPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("T1.AID DESC")); - sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - innerAggregatePlan=(AggregatePlan)((HashJoinPlan)(((TupleProjectionPlan)sortMergeJoinPlan.getLhsPlan()).getDelegate())).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 2); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(B.CODE)")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 3); - - rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 2); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID DESC")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(C.REGION) DESC")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 2); - } finally { - if(conn!=null) { - conn.close(); - } - } - } - - @Test - public void testUnionDifferentColumnNumber() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - Statement statement = conn.createStatement(); - try { - String create = "CREATE TABLE s.t1 (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, " + - "f2.v3 varchar, v4 varchar)"; - statement.execute(create); - create = "CREATE TABLE s.t2 (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, f2.v3 varchar)"; - statement.execute(create); - String query = "SELECT * FROM s.t1 UNION ALL select * FROM s.t2"; - statement.executeQuery(query); - fail("Should fail with different column numbers "); - } catch (SQLException e) { - assertEquals(e.getMessage(), "ERROR 525 (42902): SELECT column number differs in a Union All query " + - "is not allowed. 1st query has 5 columns whereas 2nd query has 4"); - } finally { - statement.execute("DROP TABLE IF EXISTS s.t1"); - statement.execute("DROP TABLE IF EXISTS s.t2"); - conn.close(); - } - } - - @Test - public void testUnionDifferentColumnType() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - Statement statement = conn.createStatement(); - try { - String create = "CREATE TABLE s.t1 (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, " + - "f2.v3 varchar, v4 varchar)"; - statement.execute(create); - create = "CREATE TABLE s.t2 (k integer not null primary key, f1.v1 varchar, f1.v2 integer, " + - "f2.v3 varchar, f2.v4 varchar)"; - statement.execute(create); - String query = "SELECT * FROM s.t1 UNION ALL select * FROM s.t2"; - statement.executeQuery(query); - fail("Should fail with different column types "); - } catch (SQLException e) { - assertEquals(e.getMessage(), "ERROR 526 (42903): SELECT column types differ in a Union All query " + - "is not allowed. Column # 2 is VARCHAR in 1st query where as it is INTEGER in 2nd query"); - } finally { - statement.execute("DROP TABLE IF EXISTS s.t1"); - statement.execute("DROP TABLE IF EXISTS s.t2"); - conn.close(); - } - } - - @Test - public void testCannotCreateStatementOnClosedConnection() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); + } + } + } + + @Test + public void testOrderByReverseOptimizationBug3491() throws Exception { + for (boolean salted : new boolean[] { true, false }) { + boolean[] groupBys = new boolean[] { true, true, true, true, false, false, false, false }; + doTestOrderByReverseOptimizationBug3491(salted, true, true, true, groupBys, + new OrderBy[] { OrderBy.REV_ROW_KEY_ORDER_BY, null, null, OrderBy.FWD_ROW_KEY_ORDER_BY, + OrderBy.REV_ROW_KEY_ORDER_BY, null, null, OrderBy.FWD_ROW_KEY_ORDER_BY }); + + doTestOrderByReverseOptimizationBug3491(salted, true, true, false, groupBys, + new OrderBy[] { OrderBy.REV_ROW_KEY_ORDER_BY, null, null, OrderBy.FWD_ROW_KEY_ORDER_BY, + null, OrderBy.REV_ROW_KEY_ORDER_BY, OrderBy.FWD_ROW_KEY_ORDER_BY, null }); + + doTestOrderByReverseOptimizationBug3491(salted, true, false, true, groupBys, + new OrderBy[] { null, OrderBy.REV_ROW_KEY_ORDER_BY, OrderBy.FWD_ROW_KEY_ORDER_BY, null, + OrderBy.REV_ROW_KEY_ORDER_BY, null, null, OrderBy.FWD_ROW_KEY_ORDER_BY }); + + doTestOrderByReverseOptimizationBug3491(salted, true, false, false, groupBys, + new OrderBy[] { null, OrderBy.REV_ROW_KEY_ORDER_BY, OrderBy.FWD_ROW_KEY_ORDER_BY, null, + null, OrderBy.REV_ROW_KEY_ORDER_BY, OrderBy.FWD_ROW_KEY_ORDER_BY, null }); + + doTestOrderByReverseOptimizationBug3491(salted, false, true, true, groupBys, + new OrderBy[] { null, OrderBy.FWD_ROW_KEY_ORDER_BY, OrderBy.REV_ROW_KEY_ORDER_BY, null, + null, OrderBy.FWD_ROW_KEY_ORDER_BY, OrderBy.REV_ROW_KEY_ORDER_BY, null }); + + doTestOrderByReverseOptimizationBug3491(salted, false, true, false, groupBys, + new OrderBy[] { null, OrderBy.FWD_ROW_KEY_ORDER_BY, OrderBy.REV_ROW_KEY_ORDER_BY, null, + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, OrderBy.REV_ROW_KEY_ORDER_BY }); + + doTestOrderByReverseOptimizationBug3491(salted, false, false, true, groupBys, + new OrderBy[] { OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + null, OrderBy.FWD_ROW_KEY_ORDER_BY, OrderBy.REV_ROW_KEY_ORDER_BY, null }); + + doTestOrderByReverseOptimizationBug3491(salted, false, false, false, groupBys, + new OrderBy[] { OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, OrderBy.REV_ROW_KEY_ORDER_BY }); + } + } + + private void doTestOrderByReverseOptimizationBug3491(boolean salted, boolean desc1, boolean desc2, + boolean desc3, boolean[] groupBys, OrderBy[] orderBys) throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = "ORDERBY3491_TEST"; + conn.createStatement().execute("DROP TABLE if exists " + tableName); + String sql = "CREATE TABLE " + tableName + " ( " + "ORGANIZATION_ID INTEGER NOT NULL," + + "CONTAINER_ID INTEGER NOT NULL," + "SCORE INTEGER NOT NULL," + + "ENTITY_ID INTEGER NOT NULL," + "CONSTRAINT TEST_PK PRIMARY KEY ( " + "ORGANIZATION_ID" + + (desc1 ? " DESC" : "") + "," + "CONTAINER_ID" + (desc2 ? " DESC" : "") + "," + "SCORE" + + (desc3 ? " DESC" : "") + "," + "ENTITY_ID" + ")) " + (salted ? "SALT_BUCKETS =4" : ""); + conn.createStatement().execute(sql); + + String[] sqls = { + // groupBy orderPreserving orderBy asc asc + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC, CONTAINER_ID ASC", + // groupBy orderPreserving orderBy asc desc + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC, CONTAINER_ID DESC", + // groupBy orderPreserving orderBy desc asc + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC, CONTAINER_ID ASC", + // groupBy orderPreserving orderBy desc desc + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC, CONTAINER_ID DESC", + + // groupBy not orderPreserving orderBy asc asc + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC, SCORE ASC", + // groupBy not orderPreserving orderBy asc desc + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC, SCORE DESC", + // groupBy not orderPreserving orderBy desc asc + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC, SCORE ASC", + // groupBy not orderPreserving orderBy desc desc + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC, SCORE DESC" }; + + for (int i = 0; i < sqls.length; i++) { + sql = sqls[i]; + QueryPlan queryPlan = getQueryPlan(conn, sql); + assertTrue((i + 1) + ") " + sql, queryPlan.getGroupBy().isOrderPreserving() == groupBys[i]); + OrderBy orderBy = queryPlan.getOrderBy(); + if (orderBys[i] != null) { + assertTrue((i + 1) + ") " + sql, orderBy == orderBys[i]); + } else { + assertTrue((i + 1) + ") " + sql, orderBy.getOrderByExpressions().size() > 0); + } + } + } finally { + if (conn != null) { conn.close(); - try { - conn.createStatement(); - fail(); - } catch (SQLException e) { - assertEquals(e.getErrorCode(), SQLExceptionCode.CONNECTION_CLOSED.getErrorCode()); - } - try { - conn.prepareStatement("SELECT * FROM SYSTEM.CATALOG"); - fail(); - } catch (SQLException e) { - assertEquals(e.getErrorCode(), SQLExceptionCode.CONNECTION_CLOSED.getErrorCode()); - } - } - - @Test - public void testSingleColLocalIndexPruning() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,C)"); - String query = "SELECT * FROM T WHERE A = 'B' and C='C'"; - PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - List> outerScans = plan.getScans(); - assertEquals(1, outerScans.size()); - List innerScans = outerScans.get(0); - assertEquals(1, innerScans.size()); - Scan scan = innerScans.get(0); - assertEquals("A", Bytes.toString(scan.getStartRow()).trim()); - assertEquals("C", Bytes.toString(scan.getStopRow()).trim()); - } - } - - @Test - public void testMultiColLocalIndexPruning() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " D CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C,\n" + - " D\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,D)"); - String query = "SELECT * FROM T WHERE A = 'C' and B = 'X' and D='C'"; - PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - List> outerScans = plan.getScans(); - assertEquals(1, outerScans.size()); - List innerScans = outerScans.get(0); - assertEquals(1, innerScans.size()); - Scan scan = innerScans.get(0); - assertEquals("C", Bytes.toString(scan.getStartRow()).trim()); - assertEquals("E", Bytes.toString(scan.getStopRow()).trim()); - } - } - - @Test - public void testSkipScanLocalIndexPruning() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " D CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C,\n" + - " D\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,D)"); - String query = "SELECT * FROM T WHERE A IN ('A','G') and B = 'A' and D = 'D'"; - PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - List> outerScans = plan.getScans(); - assertEquals(2, outerScans.size()); - List innerScans1 = outerScans.get(0); - assertEquals(1, innerScans1.size()); - Scan scan1 = innerScans1.get(0); - assertEquals("A", Bytes.toString(scan1.getStartRow()).trim()); - assertEquals("C", Bytes.toString(scan1.getStopRow()).trim()); - List innerScans2 = outerScans.get(1); - assertEquals(1, innerScans2.size()); - Scan scan2 = innerScans2.get(0); - assertEquals("G", Bytes.toString(scan2.getStartRow()).trim()); - assertEquals("I", Bytes.toString(scan2.getStopRow()).trim()); - } - } - - @Test - public void testRVCLocalIndexPruning() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " D CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C,\n" + - " D\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,D)"); - String query = "SELECT * FROM T WHERE A='I' and (B,D) IN (('A','D'),('B','I'))"; - PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - List> outerScans = plan.getScans(); - assertEquals(1, outerScans.size()); - List innerScans = outerScans.get(0); - assertEquals(1, innerScans.size()); - Scan scan = innerScans.get(0); - assertEquals("I", Bytes.toString(scan.getStartRow()).trim()); - assertEquals(0, scan.getStopRow().length); - } - } - - @Test - public void testRVCLocalIndexPruning2() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B VARCHAR,\n" + - " C VARCHAR,\n" + - " D VARCHAR,\n" + - " E VARCHAR,\n" + - " F VARCHAR,\n" + - " G VARCHAR,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C,\n" + - " D,\n" + - " E,\n" + - " F,\n" + - " G\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,C,F,G)"); - String query = "SELECT * FROM T WHERE (A,B,C,D) IN (('I','D','F','X'),('I','I','G','Y')) and F='X' and G='Y'"; - PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - List> outerScans = plan.getScans(); - assertEquals(1, outerScans.size()); - List innerScans = outerScans.get(0); - assertEquals(1, innerScans.size()); - Scan scan = innerScans.get(0); - assertEquals("I", Bytes.toString(scan.getStartRow()).trim()); - assertEquals(0, scan.getStopRow().length); - } - } - - @Test - public void testMinMaxRangeLocalIndexPruning() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " D CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C,\n" + - " D\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,D)"); - String query = "SELECT * FROM T WHERE A = 'C' and (A,B,D) > ('C','B','X') and B < 'Z' and D='C'"; - PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - List> outerScans = plan.getScans(); - assertEquals(1, outerScans.size()); - List innerScans = outerScans.get(0); - assertEquals(1, innerScans.size()); - Scan scan = innerScans.get(0); - assertEquals("C", Bytes.toString(scan.getStartRow()).trim()); - assertEquals("E", Bytes.toString(scan.getStopRow()).trim()); - } - } - - @Test - public void testNoLocalIndexPruning() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(C)"); - String query = "SELECT * FROM T WHERE C='C'"; - PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - List> outerScans = plan.getScans(); - assertEquals(6, outerScans.size()); - } - } - - @Test - public void testLocalIndexRegionPruning() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " D CHAR(1),\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - - conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(D)"); - - // un-pruned, need to scan all six regions - String query = "SELECT * FROM T WHERE D = 'C'"; - PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - assertEquals(6, plan.getScans().size()); - - // fixing first part of the key, can limit scanning to two regions - query = "SELECT * FROM T WHERE A = 'A' AND D = 'C'"; - statement = conn.createStatement().unwrap(PhoenixStatement.class); - plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - assertEquals(2, plan.getScans().size()); - - // same with skipscan filter - query = "SELECT * FROM T WHERE A IN ('A', 'C') AND D = 'C'"; - statement = conn.createStatement().unwrap(PhoenixStatement.class); - plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - assertEquals(3, plan.getScans().size()); - - // two parts of key fixed, need to scan a single region only - query = "SELECT * FROM T WHERE A = 'A' AND B = 'A' AND D = 'C'"; - statement = conn.createStatement().unwrap(PhoenixStatement.class); - plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - assertEquals(1, plan.getScans().size()); - - // same with skipscan filter - query = "SELECT * FROM T WHERE A IN ('A', 'C') AND B = 'A' AND D = 'C'"; - statement = conn.createStatement().unwrap(PhoenixStatement.class); - plan = statement.optimizeQuery(query); - assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); - plan.iterator(); - assertEquals(2, plan.getScans().size()); - } - } - - @Test - public void testSmallScanForPointLookups() throws SQLException { - Properties props = PropertiesUtil.deepCopy(new Properties()); - createTestTable(getUrl(), "CREATE TABLE FOO(\n" + - " a VARCHAR NOT NULL,\n" + - " b VARCHAR NOT NULL,\n" + - " c VARCHAR,\n" + - " CONSTRAINT pk PRIMARY KEY (a, b DESC, c)\n" + - " )"); - - props.put(QueryServices.SMALL_SCAN_THRESHOLD_ATTRIB, "2"); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - String query = "select * from foo where a = 'a' and b = 'b' and c in ('x','y','z')"; - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery(query); - plan.iterator(); - //Fail since we have 3 rows in pointLookup - assertEquals(Scan.ReadType.DEFAULT, plan.getContext().getScan().getReadType()); - query = "select * from foo where a = 'a' and b = 'b' and c = 'c'"; - plan = stmt.compileQuery(query); - plan.iterator(); - //Should be small scan, query is for single row pointLookup - assertEquals(Scan.ReadType.PREAD, plan.getContext().getScan().getReadType()); - } - } - - @Test - public void testLocalIndexPruningInSortMergeJoin() throws SQLException { - verifyLocalIndexPruningWithMultipleTables("SELECT /*+ USE_SORT_MERGE_JOIN*/ *\n" + - "FROM T1 JOIN T2 ON T1.A = T2.A\n" + - "WHERE T1.A = 'B' and T1.C='C' and T2.A IN ('A','G') and T2.B = 'A' and T2.D = 'D'"); - } - - @Ignore("Blocked by PHOENIX-4614") - @Test - public void testLocalIndexPruningInLeftOrInnerHashJoin() throws SQLException { - verifyLocalIndexPruningWithMultipleTables("SELECT *\n" + - "FROM T1 JOIN T2 ON T1.A = T2.A\n" + - "WHERE T1.A = 'B' and T1.C='C' and T2.A IN ('A','G') and T2.B = 'A' and T2.D = 'D'"); - } - - @Ignore("Blocked by PHOENIX-4614") - @Test - public void testLocalIndexPruningInRightHashJoin() throws SQLException { - verifyLocalIndexPruningWithMultipleTables("SELECT *\n" + - "FROM (\n" + - " SELECT A, B, C, D FROM T2 WHERE T2.A IN ('A','G') and T2.B = 'A' and T2.D = 'D'\n" + - ") T2\n" + - "RIGHT JOIN T1 ON T2.A = T1.A\n" + - "WHERE T1.A = 'B' and T1.C='C'"); - } - - @Test - public void testLocalIndexPruningInUinon() throws SQLException { - verifyLocalIndexPruningWithMultipleTables("SELECT A, B, C FROM T1\n" + - "WHERE A = 'B' and C='C'\n" + - "UNION ALL\n" + - "SELECT A, B, C FROM T2\n" + - "WHERE A IN ('A','G') and B = 'A' and D = 'D'"); - } - - private void verifyLocalIndexPruningWithMultipleTables(String query) throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T1 (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX1 ON T1(A,C)"); - conn.createStatement().execute("CREATE TABLE T2 (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " D CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C,\n" + - " D\n" + - " )\n" + - ") SPLIT ON ('A','C','E','G','I')"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX2 ON T2(A,B,D)"); - PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = statement.optimizeQuery(query); - List childPlans = plan.accept(new MultipleChildrenExtractor()); - assertEquals(2, childPlans.size()); - // Check left child - assertEquals("IDX1", childPlans.get(0).getContext().getCurrentTable().getTable().getName().getString()); - childPlans.get(0).iterator(); - List> outerScansL = childPlans.get(0).getScans(); - assertEquals(1, outerScansL.size()); - List innerScansL = outerScansL.get(0); - assertEquals(1, innerScansL.size()); - Scan scanL = innerScansL.get(0); - assertEquals("A", Bytes.toString(scanL.getStartRow()).trim()); - assertEquals("C", Bytes.toString(scanL.getStopRow()).trim()); - // Check right child - assertEquals("IDX2", childPlans.get(1).getContext().getCurrentTable().getTable().getName().getString()); - childPlans.get(1).iterator(); - List> outerScansR = childPlans.get(1).getScans(); - assertEquals(2, outerScansR.size()); - List innerScansR1 = outerScansR.get(0); - assertEquals(1, innerScansR1.size()); - Scan scanR1 = innerScansR1.get(0); - assertEquals("A", Bytes.toString(scanR1.getStartRow()).trim()); - assertEquals("C", Bytes.toString(scanR1.getStopRow()).trim()); - List innerScansR2 = outerScansR.get(1); - assertEquals(1, innerScansR2.size()); - Scan scanR2 = innerScansR2.get(0); - assertEquals("G", Bytes.toString(scanR2.getStartRow()).trim()); - assertEquals("I", Bytes.toString(scanR2.getStopRow()).trim()); - } - } - - @Test - public void testQueryPlanSourceRefsInHashJoin() throws SQLException { - String query = "SELECT * FROM (\n" + - " SELECT K1, V1 FROM A WHERE V1 = 'A'\n" + - ") T1 JOIN (\n" + - " SELECT K2, V2 FROM B WHERE V2 = 'B'\n" + - ") T2 ON K1 = K2 ORDER BY V1"; - verifyQueryPlanSourceRefs(query, 2); - } - - @Test - public void testQueryPlanSourceRefsInSortMergeJoin() throws SQLException { - String query = "SELECT * FROM (\n" + - " SELECT max(K1) KEY1, V1 FROM A GROUP BY V1\n" + - ") T1 JOIN (\n" + - " SELECT max(K2) KEY2, V2 FROM B GROUP BY V2\n" + - ") T2 ON KEY1 = KEY2 ORDER BY V1"; - verifyQueryPlanSourceRefs(query, 2); - } - - @Test - public void testQueryPlanSourceRefsInSubquery() throws SQLException { - String query = "SELECT * FROM A\n" + - "WHERE K1 > (\n" + - " SELECT max(K2) FROM B WHERE V2 = V1\n" + - ") ORDER BY V1"; - verifyQueryPlanSourceRefs(query, 2); - } - - @Test - public void testQueryPlanSourceRefsInSubquery2() throws SQLException { - String query = "SELECT * FROM A\n" + - "WHERE V1 > ANY (\n" + - " SELECT K2 FROM B WHERE V2 = 'B'\n" + - ")"; - verifyQueryPlanSourceRefs(query, 2); - } - - @Test - public void testQueryPlanSourceRefsInSubquery3() throws SQLException { - String query = "SELECT * FROM A\n" + - "WHERE V1 > ANY (\n" + - " SELECT K2 FROM B B1" + - " WHERE V2 = (\n" + - " SELECT max(V2) FROM B B2\n" + - " WHERE B2.K2 = B1.K2 AND V2 < 'K'\n" + - " )\n" + - ")"; - verifyQueryPlanSourceRefs(query, 3); - } - - @Test - public void testQueryPlanSourceRefsInSubquery4() throws SQLException { - String query = "SELECT * FROM (\n" + - " SELECT K1, K2 FROM A\n" + - " JOIN B ON K1 = K2\n" + - " WHERE V1 = 'A' AND V2 = 'B'\n" + - " LIMIT 10\n" + - ") ORDER BY K1"; - verifyQueryPlanSourceRefs(query, 2); - } - - @Test - public void testQueryPlanSourceRefsInSubquery5() throws SQLException { - String query = "SELECT * FROM (\n" + - " SELECT KEY1, KEY2 FROM (\n" + - " SELECT max(K1) KEY1, V1 FROM A GROUP BY V1\n" + - " ) T1 JOIN (\n" + - " SELECT max(K2) KEY2, V2 FROM B GROUP BY V2\n" + - " ) T2 ON KEY1 = KEY2 LIMIT 10\n" + - ") ORDER BY KEY1"; - verifyQueryPlanSourceRefs(query, 2); - } - - @Test - public void testQueryPlanSourceRefsInUnion() throws SQLException { - String query = "SELECT K1, V1 FROM A WHERE V1 = 'A'\n" + - "UNION ALL\n" + - "SELECT K2, V2 FROM B WHERE V2 = 'B'"; - verifyQueryPlanSourceRefs(query, 2); - } - - private void verifyQueryPlanSourceRefs(String query, int refCount) throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE A (\n" + - " K1 VARCHAR(10) NOT NULL PRIMARY KEY,\n" + - " V1 VARCHAR(10))"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX1 ON A(V1)"); - conn.createStatement().execute("CREATE TABLE B (\n" + - " K2 VARCHAR(10) NOT NULL PRIMARY KEY,\n" + - " V2 VARCHAR(10))"); - conn.createStatement().execute("CREATE LOCAL INDEX IDX2 ON B(V2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.compileQuery(query); - Set sourceRefs = plan.getSourceRefs(); - assertEquals(refCount, sourceRefs.size()); - for (TableRef table : sourceRefs) { - assertTrue(table.getTable().getType() == PTableType.TABLE); - } - plan = stmt.optimizeQuery(query); - sourceRefs = plan.getSourceRefs(); - assertEquals(refCount, sourceRefs.size()); - for (TableRef table : sourceRefs) { - assertTrue(table.getTable().getType() == PTableType.INDEX); - } - } - } - - private static class MultipleChildrenExtractor implements QueryPlanVisitor> { - - @Override - public List defaultReturn(QueryPlan plan) { - return Collections.emptyList(); - } - - @Override - public List visit(AggregatePlan plan) { - return Collections.emptyList(); - } - - @Override - public List visit(ScanPlan plan) { - return Collections.emptyList(); - } - - @Override - public List visit(ClientAggregatePlan plan) { - return plan.getDelegate().accept(this); - } - - @Override - public List visit(ClientScanPlan plan) { - return plan.getDelegate().accept(this); - } - - @Override - public List visit(LiteralResultIterationPlan plan) { - return Collections.emptyList(); - } - - @Override - public List visit(TupleProjectionPlan plan) { - return plan.getDelegate().accept(this); - } - - @Override - public List visit(HashJoinPlan plan) { - List children = new ArrayList(plan.getSubPlans().length + 1); - children.add(plan.getDelegate()); - for (HashJoinPlan.SubPlan subPlan : plan.getSubPlans()) { - children.add(subPlan.getInnerPlan()); - } - return children; - } - - @Override - public List visit(SortMergeJoinPlan plan) { - return Lists.newArrayList(plan.getLhsPlan(), plan.getRhsPlan()); - } - - @Override - public List visit(UnionPlan plan) { - return plan.getSubPlans(); - } - - @Override - public List visit(UnnestArrayPlan plan) { - return Collections.emptyList(); - } - - @Override - public List visit(CursorFetchPlan plan) { - return Collections.emptyList(); - } - - @Override - public List visit(ListJarsQueryPlan plan) { - return Collections.emptyList(); - } - - @Override - public List visit(TraceQueryPlan plan) { - return Collections.emptyList(); - } - } - - @Test - public void testGroupByOrderMatchPkColumnOrder4690() throws Exception{ - this.doTestGroupByOrderMatchPkColumnOrderBug4690(false, false); - this.doTestGroupByOrderMatchPkColumnOrderBug4690(false, true); - this.doTestGroupByOrderMatchPkColumnOrderBug4690(true, false); - this.doTestGroupByOrderMatchPkColumnOrderBug4690(true, true); - } - - private void doTestGroupByOrderMatchPkColumnOrderBug4690(boolean desc ,boolean salted) throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String tableName = generateUniqueName(); - String sql = "create table " + tableName + "( "+ - " pk1 integer not null , " + - " pk2 integer not null, " + - " pk3 integer not null," + - " pk4 integer not null,"+ - " v integer, " + - " CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "pk1 "+(desc ? "desc" : "")+", "+ - "pk2 "+(desc ? "desc" : "")+", "+ - "pk3 "+(desc ? "desc" : "")+", "+ - "pk4 "+(desc ? "desc" : "")+ - " )) "+(salted ? "SALT_BUCKETS =4" : "split on(2)"); - conn.createStatement().execute(sql); - - sql = "select pk2,pk1,count(v) from " + tableName + " group by pk2,pk1 order by pk2,pk1"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() ==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK2")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK1")); - - sql = "select pk1,pk2,count(v) from " + tableName + " group by pk2,pk1 order by pk1,pk2"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.FWD_ROW_KEY_ORDER_BY : OrderBy.REV_ROW_KEY_ORDER_BY)); - - sql = "select pk2,pk1,count(v) from " + tableName + " group by pk2,pk1 order by pk2 desc,pk1 desc"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() ==2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK2 DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK1 DESC")); - - sql = "select pk1,pk2,count(v) from " + tableName + " group by pk2,pk1 order by pk1 desc,pk2 desc"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.REV_ROW_KEY_ORDER_BY : OrderBy.FWD_ROW_KEY_ORDER_BY)); - - - sql = "select pk3,pk2,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk3,pk2"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK3")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK2")); - - sql = "select pk2,pk3,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk2,pk3"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.FWD_ROW_KEY_ORDER_BY : OrderBy.REV_ROW_KEY_ORDER_BY)); - - sql = "select pk3,pk2,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk3 desc,pk2 desc"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK3 DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK2 DESC")); - - sql = "select pk2,pk3,count(v) from " + tableName + " where pk1=1 group by pk3,pk2 order by pk2 desc,pk3 desc"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.REV_ROW_KEY_ORDER_BY : OrderBy.FWD_ROW_KEY_ORDER_BY)); - - - sql = "select pk4,pk3,pk1,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk4,pk3,pk1"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 3); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK4")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK3")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(2).toString().equals("PK1")); - - sql = "select pk1,pk3,pk4,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk1,pk3,pk4"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.FWD_ROW_KEY_ORDER_BY : OrderBy.REV_ROW_KEY_ORDER_BY)); - - sql = "select pk4,pk3,pk1,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk4 desc,pk3 desc,pk1 desc"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 3); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK4 DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK3 DESC")); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(2).toString().equals("PK1 DESC")); - - sql = "select pk1,pk3,pk4,count(v) from " + tableName + " where pk2=9 group by pk4,pk3,pk1 order by pk1 desc,pk3 desc,pk4 desc"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == (!desc ? OrderBy.REV_ROW_KEY_ORDER_BY : OrderBy.FWD_ROW_KEY_ORDER_BY)); - } finally { - if(conn != null) { - conn.close(); - } - } - } - - @Test - public void testSortMergeJoinPushFilterThroughSortBug5105() throws Exception { - Connection conn = null; - try { - conn= DriverManager.getConnection(getUrl()); - - String tableName1="MERGE1"; - String tableName2="MERGE2"; - - conn.createStatement().execute("DROP TABLE if exists "+tableName1); - - String sql="CREATE TABLE IF NOT EXISTS "+tableName1+" ( "+ - "AID INTEGER PRIMARY KEY,"+ - "AGE INTEGER"+ - ")"; - conn.createStatement().execute(sql); - - conn.createStatement().execute("DROP TABLE if exists "+tableName2); - sql="CREATE TABLE IF NOT EXISTS "+tableName2+" ( "+ - "BID INTEGER PRIMARY KEY,"+ - "CODE INTEGER"+ - ")"; - conn.createStatement().execute(sql); - - //test for simple scan - sql="select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid where b.code > 50"; - - QueryPlan queryPlan=getQueryPlan(conn, sql); - SortMergeJoinPlan sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - ClientScanPlan lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate(); - OrderBy orderBy=lhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - ScanPlan innerScanPlan=(ScanPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate(); - orderBy=innerScanPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AGE")); - assertTrue(innerScanPlan.getLimit().intValue() == 3); - - ClientScanPlan rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - String tableAlias = rhsOuterPlan.getTableRef().getTableAlias(); - String rewrittenSql = "SELECT "+tableAlias+".BID BID,"+tableAlias+".CODE CODE FROM (SELECT BID,CODE FROM MERGE2 ORDER BY CODE LIMIT 1) "+tableAlias+" WHERE "+tableAlias+".CODE > 50 ORDER BY "+tableAlias+".BID"; - assertTrue(rhsOuterPlan.getStatement().toString().equals(rewrittenSql)); - - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID")); - innerScanPlan=(ScanPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate(); - orderBy=innerScanPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("CODE")); - assertTrue(innerScanPlan.getLimit().intValue() == 1); - - //test for aggregate - sql="select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.codesum from (select aid,sum(age) agesum from "+tableName1+" where age >=11 and age<=33 group by aid order by agesum limit 3) a inner join "+ - "(select bid,sum(code) codesum from "+tableName2+" group by bid order by codesum limit 1) b on a.aid=b.bid where b.codesum > 50"; - - - queryPlan=getQueryPlan(conn, sql); - sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate(); - orderBy=lhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - AggregatePlan innerAggregatePlan=(AggregatePlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(AGE)")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 3); - - rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - tableAlias = rhsOuterPlan.getTableRef().getTableAlias(); - rewrittenSql = "SELECT "+tableAlias+".BID BID,"+tableAlias+".CODESUM CODESUM FROM (SELECT BID, SUM(CODE) CODESUM FROM MERGE2 GROUP BY BID ORDER BY SUM(CODE) LIMIT 1) "+tableAlias+" WHERE "+tableAlias+".CODESUM > 50 ORDER BY "+tableAlias+".BID"; - assertTrue(rhsOuterPlan.getStatement().toString().equals(rewrittenSql)); - - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID")); - innerAggregatePlan=(AggregatePlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(CODE)")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 1); - - String tableName3="merge3"; - conn.createStatement().execute("DROP TABLE if exists "+tableName3); - sql="CREATE TABLE IF NOT EXISTS "+tableName3+" ( "+ - "CID INTEGER PRIMARY KEY,"+ - "REGION INTEGER"+ - ")"; - conn.createStatement().execute(sql); - - //test for join - sql="select t1.aid,t1.code,t2.region from "+ - "(select a.aid,b.code from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 order by b.code limit 3) t1 inner join "+ - "(select a.aid,c.region from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 order by c.region desc limit 1) t2 on t1.aid=t2.aid "+ - "where t1.code > 50"; - - PhoenixPreparedStatement phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); - queryPlan = phoenixPreparedStatement.optimizeQuery(sql); - sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate(); - tableAlias = lhsOuterPlan.getTableRef().getTableAlias(); - rewrittenSql = "SELECT "+tableAlias+".AID AID,"+tableAlias+".CODE CODE FROM (SELECT A.AID,B.CODE FROM MERGE1 A Inner JOIN MERGE2 B ON (A.AID = B.BID) WHERE (B.CODE >= 44 AND B.CODE <= 66) ORDER BY B.CODE LIMIT 3) "+ - tableAlias+" WHERE "+tableAlias+".CODE > 50 ORDER BY "+tableAlias+".AID"; - assertTrue(lhsOuterPlan.getStatement().toString().equals(rewrittenSql)); - - orderBy=lhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerScanPlan=(ScanPlan)((HashJoinPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerScanPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("B.CODE")); - assertTrue(innerScanPlan.getLimit().intValue() == 3); - - rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerScanPlan=(ScanPlan)((HashJoinPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerScanPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("C.REGION DESC")); - assertTrue(innerScanPlan.getLimit().intValue() == 1); - - //test for join and aggregate - sql="select t1.aid,t1.codesum,t2.regionsum from "+ - "(select a.aid,sum(b.code) codesum from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by codesum limit 3) t1 inner join "+ - "(select a.aid,sum(c.region) regionsum from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by regionsum desc limit 2) t2 on t1.aid=t2.aid "+ - "where t1.codesum >=40 and t2.regionsum >= 90"; - - phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); - queryPlan = phoenixPreparedStatement.optimizeQuery(sql); - sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - lhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getLhsPlan())).getDelegate(); - tableAlias = lhsOuterPlan.getTableRef().getTableAlias(); - rewrittenSql = "SELECT "+tableAlias+".AID AID,"+tableAlias+".CODESUM CODESUM FROM (SELECT A.AID, SUM(B.CODE) CODESUM FROM MERGE1 A Inner JOIN MERGE2 B ON (A.AID = B.BID) WHERE (B.CODE >= 44 AND B.CODE <= 66) GROUP BY A.AID ORDER BY SUM(B.CODE) LIMIT 3) "+tableAlias+ - " WHERE "+tableAlias+".CODESUM >= 40 ORDER BY "+tableAlias+".AID"; - assertTrue(lhsOuterPlan.getStatement().toString().equals(rewrittenSql)); - - orderBy=lhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(B.CODE)")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 3); - - rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - tableAlias = rhsOuterPlan.getTableRef().getTableAlias(); - rewrittenSql = "SELECT "+tableAlias+".AID AID,"+tableAlias+".REGIONSUM REGIONSUM FROM (SELECT A.AID, SUM(C.REGION) REGIONSUM FROM MERGE1 A Inner JOIN MERGE3 C ON (A.AID = C.CID) WHERE (C.REGION >= 77 AND C.REGION <= 99) GROUP BY A.AID ORDER BY SUM(C.REGION) DESC LIMIT 2) "+tableAlias+ - " WHERE "+tableAlias+".REGIONSUM >= 90 ORDER BY "+tableAlias+".AID"; - assertTrue(rhsOuterPlan.getStatement().toString().equals(rewrittenSql)); - - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(C.REGION) DESC")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 2); - - //test for if SubselectRewriter.isOrderByPrefix had take effect - sql="select t1.aid,t1.codesum,t2.regionsum from "+ - "(select a.aid,sum(b.code) codesum from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by a.aid,codesum limit 3) t1 inner join "+ - "(select a.aid,sum(c.region) regionsum from "+tableName1+" a inner join "+tableName3+" c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by a.aid desc,regionsum desc limit 2) t2 on t1.aid=t2.aid "+ - "where t1.codesum >=40 and t2.regionsum >= 90 order by t1.aid desc"; - - phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); - queryPlan = phoenixPreparedStatement.optimizeQuery(sql); - orderBy=queryPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("T1.AID DESC")); - sortMergeJoinPlan=(SortMergeJoinPlan)((ClientScanPlan)queryPlan).getDelegate(); - - lhsOuterPlan = (ClientScanPlan)(((TupleProjectionPlan)sortMergeJoinPlan.getLhsPlan()).getDelegate()); - tableAlias = lhsOuterPlan.getTableRef().getTableAlias(); - rewrittenSql = "SELECT "+tableAlias+".AID AID,"+tableAlias+".CODESUM CODESUM FROM (SELECT A.AID, SUM(B.CODE) CODESUM FROM MERGE1 A Inner JOIN MERGE2 B ON (A.AID = B.BID) WHERE (B.CODE >= 44 AND B.CODE <= 66) GROUP BY A.AID ORDER BY A.AID, SUM(B.CODE) LIMIT 3) "+tableAlias+ - " WHERE "+tableAlias+".CODESUM >= 40"; - assertTrue(lhsOuterPlan.getStatement().toString().equals(rewrittenSql)); - - innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)lhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 2); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(B.CODE)")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 3); - - rhsOuterPlan=(ClientScanPlan)((TupleProjectionPlan)(sortMergeJoinPlan.getRhsPlan())).getDelegate(); - tableAlias = rhsOuterPlan.getTableRef().getTableAlias(); - rewrittenSql = "SELECT "+tableAlias+".AID AID,"+tableAlias+".REGIONSUM REGIONSUM FROM (SELECT A.AID, SUM(C.REGION) REGIONSUM FROM MERGE1 A Inner JOIN MERGE3 C ON (A.AID = C.CID) WHERE (C.REGION >= 77 AND C.REGION <= 99) GROUP BY A.AID ORDER BY A.AID DESC, SUM(C.REGION) DESC LIMIT 2) "+tableAlias+ - " WHERE "+tableAlias+".REGIONSUM >= 90 ORDER BY "+tableAlias+".AID"; - assertTrue(rhsOuterPlan.getStatement().toString().equals(rewrittenSql)); - - orderBy=rhsOuterPlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 1); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); - innerAggregatePlan=(AggregatePlan)((HashJoinPlan)((TupleProjectionPlan)rhsOuterPlan.getDelegate()).getDelegate()).getDelegate(); - orderBy=innerAggregatePlan.getOrderBy(); - assertTrue(orderBy.getOrderByExpressions().size() == 2); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID DESC")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(C.REGION) DESC")); - assertTrue(innerAggregatePlan.getLimit().intValue() == 2); - } finally { - if(conn!=null) { - conn.close(); - } - } - } - - @Test - public void testOrderPreservingForClientScanPlanBug5148() throws Exception { - doTestOrderPreservingForClientScanPlanBug5148(false,false); - doTestOrderPreservingForClientScanPlanBug5148(false,true); - doTestOrderPreservingForClientScanPlanBug5148(true, false); - doTestOrderPreservingForClientScanPlanBug5148(true, true); - } - - private void doTestOrderPreservingForClientScanPlanBug5148(boolean desc, boolean salted) throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String tableName = generateUniqueName(); - String sql = "create table " + tableName + "( "+ - " pk1 char(20) not null , " + - " pk2 char(20) not null, " + - " pk3 char(20) not null," + - " v1 varchar, " + - " v2 varchar, " + - " CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "pk1 "+(desc ? "desc" : "")+", "+ - "pk2 "+(desc ? "desc" : "")+", "+ - "pk3 "+(desc ? "desc" : "")+ - " )) "+(salted ? "SALT_BUCKETS =4" : ""); - conn.createStatement().execute(sql); - - sql = "select v1 from (select v1,v2,pk3 from "+tableName+" t where pk1 = '6' order by t.v2,t.pk3,t.v1 limit 10) a order by v2,pk3"; - QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select v1 from (select v1,v2,pk3 from "+tableName+" t where pk1 = '6' order by t.v2,t.pk3,t.v1 limit 10) a where pk3 = '8' order by v2,v1"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select v1 from (select v1,v2,pk3 from "+tableName+" t where pk1 = '6' order by t.v2 desc,t.pk3 desc,t.v1 desc limit 10) a order by v2 desc ,pk3 desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select sub from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt from "+tableName+" t where pk1 = '6' group by v1 ,v2 order by count(pk3),t.v2 limit 10) a order by cnt,sub"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from "+tableName+" t where pk1 = '6' group by v1 ,v2 order by count(pk3),t.v2 limit 10) a order by cast(cnt as bigint),substr(sub,0,1)"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select sub from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt from "+tableName+" t where pk1 = '6' group by v1 ,v2 order by count(pk3) desc,t.v2 desc limit 10) a order by cnt desc ,sub desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select sub from (select substr(v2,0,2) sub,pk2 from "+tableName+" t where pk1 = '6' group by pk2,v2 limit 10) a order by pk2,sub"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - if(desc) { - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - } else { - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } - - sql = "select sub from (select substr(v2,0,2) sub,pk2 from "+tableName+" t where pk1 = '6' group by pk2,v2 limit 10) a order by pk2 desc,sub"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - if(desc) { - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } else { - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - } - - sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from "+tableName+" t where pk1 = '6' group by v1 ,v2 order by t.v2 ,count(pk3) limit 10) a order by sub ,cnt"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from "+tableName+" t where pk1 = '6' group by v1 ,v2 order by t.v2 ,count(pk3) limit 10) a order by substr(sub,0,1) ,cast(cnt as bigint)"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from "+tableName+" t where pk1 = '6' group by v1 ,v2 order by t.v2 ,count(pk3) limit 10) a order by sub ,cnt"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select v1 from (select v1,v2,pk3 from "+tableName+" t where pk1 = '6' order by t.v2 desc,t.pk3 desc,t.v1 desc limit 10) a order by v2 ,pk3"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select v1 from (select v1,v2,pk3 from "+tableName+" t where pk1 = '6' order by t.v2,t.pk3,t.v1 limit 10) a where pk3 = '8' or (v2 < 'abc' and pk3 > '11') order by v2,v1"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - - //test innerQueryPlan is ordered by rowKey - sql = "select pk1 from (select pk3,pk2,pk1 from "+tableName+" t where v1 = '6' order by t.pk1,t.pk2 limit 10) a where pk3 > '8' order by pk1,pk2,pk3"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select pk1 from (select substr(pk3,0,3) sub,pk2,pk1 from "+tableName+" t where v1 = '6' order by t.pk1,t.pk2 limit 10) a where sub > '8' order by pk1,pk2,sub"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select pk1 from (select pk3,pk2,pk1 from "+tableName+" t where v1 = '6' order by t.pk1 desc,t.pk2 desc limit 10) a where pk3 > '8' order by pk1 desc ,pk2 desc ,pk3 desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select pk1 from (select substr(pk3,0,3) sub,pk2,pk1 from "+tableName+" t where v1 = '6' order by t.pk1 desc,t.pk2 desc limit 10) a where sub > '8' order by pk1 desc,pk2 desc,sub desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } finally { - if(conn != null) { - conn.close(); - } - } - } - - @Test - public void testGroupByOrderPreservingForClientAggregatePlanBug5148() throws Exception { - doTestGroupByOrderPreservingForClientAggregatePlanBug5148(false, false); - doTestGroupByOrderPreservingForClientAggregatePlanBug5148(false, true); - doTestGroupByOrderPreservingForClientAggregatePlanBug5148(true, false); - doTestGroupByOrderPreservingForClientAggregatePlanBug5148(true, true); - } - - private void doTestGroupByOrderPreservingForClientAggregatePlanBug5148(boolean desc, boolean salted) throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String tableName = generateUniqueName(); - String sql = "create table " + tableName + "( "+ - " pk1 varchar not null , " + - " pk2 varchar not null, " + - " pk3 varchar not null," + - " v1 varchar, " + - " v2 varchar, " + - " CONSTRAINT TEST_PK PRIMARY KEY ( "+ - "pk1 "+(desc ? "desc" : "")+", "+ - "pk2 "+(desc ? "desc" : "")+", "+ - "pk3 "+(desc ? "desc" : "")+ - " )) "+(salted ? "SALT_BUCKETS =4" : ""); - conn.createStatement().execute(sql); - - sql = "select v1 from (select v1,pk2,pk1 from "+tableName+" t where pk1 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a group by pk2,v1 order by pk2,v1"; - QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select v1 from (select v1,pk2,pk1 from "+tableName+" t where pk1 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' group by v1, pk1 order by v1,pk1"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select v1 from (select v1,pk2,pk1 from "+tableName+" t where pk1 = '6' order by t.pk2 desc,t.v1 desc,t.pk1 limit 10) a group by pk2, v1 order by pk2 desc,v1 desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select v1 from (select v1,pk2,pk1 from "+tableName+" t where pk1 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' or (v1 < 'abc' and pk2 > '11') group by v1, pk1 order by v1,pk1"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!plan.getGroupBy().isOrderPreserving()); - if(desc) { - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - } else { - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } - - sql = "select v1 from (select v1,pk2,pk1 from "+tableName+" t where pk1 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' or (v1 < 'abc' and pk2 > '11') group by v1, pk1 order by v1,pk1 desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!plan.getGroupBy().isOrderPreserving()); - if(desc) { - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } else { - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - } - - sql = "select sub from (select v1,pk2,substr(pk1,0,1) sub from "+tableName+" t where v2 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' group by v1,sub order by v1,sub"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select sub from (select substr(v1,0,1) sub,pk2,pk1 from "+tableName+" t where v2 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' group by sub,pk1 order by sub,pk1"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!plan.getGroupBy().isOrderPreserving()); - if(desc) { - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - } else { - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } - - sql = "select sub from (select substr(v1,0,1) sub,pk2,pk1 from "+tableName+" t where v2 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' group by sub,pk1 order by sub,pk1 desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!plan.getGroupBy().isOrderPreserving()); - if(desc) { - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } else { - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - } - - sql = "select sub from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt from "+tableName+" t where pk1 = '6' group by v1,v2 order by count(pk3),t.v2 limit 10) a group by cnt,sub order by cnt,sub"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select substr(sub,0,1) from (select substr(v2,0,2) sub,count(pk3) cnt from "+tableName+" t where pk1 = '6' group by v1 ,v2 order by count(pk3),t.v2 limit 10) a "+ - "group by cast(cnt as bigint),substr(sub,0,1) order by cast(cnt as bigint),substr(sub,0,1)"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from "+tableName+" t where pk1 = '6' group by v1 ,v2 order by count(pk3) desc,t.v2 desc limit 10) a group by cnt,sub order by cnt desc ,sub desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select substr(sub,0,1) from (select substr(v2,0,2) sub,count(pk3) cnt from "+tableName+" t where pk1 = '6' group by v1 ,v2 order by count(pk3) desc,t.v2 desc limit 10) a "+ - "group by cast(cnt as bigint),substr(sub,0,1) order by cast(cnt as bigint) desc,substr(sub,0,1) desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select sub from (select substr(v2,0,2) sub,pk2 from "+tableName+" t where pk1 = '6' group by pk2,v2 limit 10) a group by pk2,sub order by pk2,sub"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - if(desc) { - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - } else { - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } - - sql = "select sub from (select substr(v2,0,2) sub,pk2 from "+tableName+" t where pk1 = '6' group by pk2,v2 limit 10) a group by pk2,sub order by pk2 desc,sub"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - if(desc) { - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } else { - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - } - - //test innerQueryPlan is ordered by rowKey - sql = "select pk1 from (select pk3,pk2,pk1 from "+tableName+" t where v1 = '6' order by t.pk1,t.pk2 limit 10) a where pk3 > '8' group by pk1,pk2,pk3 order by pk1,pk2,pk3"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select pk1 from (select substr(pk3,0,3) sub,pk2,pk1 from "+tableName+" t where v1 = '6' order by t.pk1,t.pk2 limit 10) a where sub > '8' group by pk1,pk2,sub order by pk1,pk2"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select pk1 from (select pk3,pk2,pk1 from "+tableName+" t where v1 = '6' order by t.pk1 desc,t.pk2 desc limit 10) a where pk3 > '8' group by pk1, pk2, pk3 order by pk1 desc ,pk2 desc ,pk3 desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select pk1 from (select substr(pk3,0,3) sub,pk2,pk1 from "+tableName+" t where v1 = '6' order by t.pk1 desc,t.pk2 desc limit 10) a where sub > '8' group by pk1,pk2,sub order by pk1 desc,pk2 desc"; - plan = TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - } finally { - if(conn != null) { - conn.close(); - } - } - } - - @Test - public void testOrderPreservingForSortMergeJoinBug5148() throws Exception { - doTestOrderPreservingForSortMergeJoinBug5148(false, false); - doTestOrderPreservingForSortMergeJoinBug5148(false, true); - doTestOrderPreservingForSortMergeJoinBug5148(true, false); - doTestOrderPreservingForSortMergeJoinBug5148(true, true); - } - - private void doTestOrderPreservingForSortMergeJoinBug5148(boolean desc, boolean salted) throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - - String tableName1 = generateUniqueName(); - String tableName2 = generateUniqueName(); - - String sql = "CREATE TABLE IF NOT EXISTS "+tableName1+" ( "+ - "AID INTEGER PRIMARY KEY "+(desc ? "desc" : "")+","+ - "AGE INTEGER"+ - ") "+(salted ? "SALT_BUCKETS =4" : ""); - conn.createStatement().execute(sql); - - sql = "CREATE TABLE IF NOT EXISTS "+tableName2+" ( "+ - "BID INTEGER PRIMARY KEY "+(desc ? "desc" : "")+","+ - "CODE INTEGER"+ - ")"+(salted ? "SALT_BUCKETS =4" : ""); - conn.createStatement().execute(sql); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid and a.age = b.code order by a.aid ,a.age"; - QueryPlan queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid and a.age = b.code order by a.aid desc,a.age desc"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,a.age from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid and a.age = b.code group by a.aid,a.age order by a.aid ,a.age"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,a.age from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid and a.age = b.code group by a.aid,a.age order by a.aid desc,a.age desc"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid and a.age = b.code order by b.bid ,b.code"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid and a.age = b.code order by b.bid desc ,b.code desc"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid and a.age = b.code group by b.bid, b.code order by b.bid ,b.code"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ b.code from (select aid,age from "+tableName1+" where age >=11 and age<=33 order by age limit 3) a inner join "+ - "(select bid,code from "+tableName2+" order by code limit 1) b on a.aid=b.bid and a.age = b.code group by b.bid, b.code order by b.bid desc,b.code desc"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - //test part column - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code order by a.aid"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code order by a.aid desc"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code group by a.aid order by a.aid"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code group by a.aid order by a.aid desc"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code order by a.age"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid,a.age from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code order by b.bid"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid,a.age from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code order by b.bid desc"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code group by b.bid order by b.bid"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code group by b.bid order by b.bid desc"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - - sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid,a.age from "+tableName1+" a inner join "+tableName2+" b on a.aid=b.bid and a.age = b.code order by b.code"; - queryPlan = getQueryPlan(conn, sql); - assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); - } finally { - if(conn!=null) { - conn.close(); - } - } - } - - @Test - public void testSortMergeBug4508() throws Exception { - Connection conn = null; - Connection conn010 = null; - try { - // Salted tables - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - conn = DriverManager.getConnection(getUrl(), props); - props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - props.setProperty("TenantId", "010"); - conn010 = DriverManager.getConnection(getUrl(), props); - - String peopleTable1 = generateUniqueName(); - String myTable1 = generateUniqueName(); - conn.createStatement().execute("CREATE TABLE " + peopleTable1 + " (\n" + - "PERSON_ID VARCHAR NOT NULL,\n" + - "NAME VARCHAR\n" + - "CONSTRAINT PK_TEST_PEOPLE PRIMARY KEY (PERSON_ID)) SALT_BUCKETS = 3"); - conn.createStatement().execute("CREATE TABLE " + myTable1 + " (\n" + - "LOCALID VARCHAR NOT NULL,\n" + - "DSID VARCHAR(255) NOT NULL, \n" + - "EID CHAR(40),\n" + - "HAS_CANDIDATES BOOLEAN\n" + - "CONSTRAINT PK_MYTABLE PRIMARY KEY (LOCALID, DSID)) SALT_BUCKETS = 3"); - verifyQueryPlanForSortMergeBug4508(conn, peopleTable1, myTable1); - - // Salted multi-tenant tables - String peopleTable2 = generateUniqueName(); - String myTable2 = generateUniqueName(); - conn.createStatement().execute("CREATE TABLE " + peopleTable2 + " (\n" + - "TENANT_ID VARCHAR NOT NULL,\n" + - "PERSON_ID VARCHAR NOT NULL,\n" + - "NAME VARCHAR\n" + - "CONSTRAINT PK_TEST_PEOPLE PRIMARY KEY (TENANT_ID, PERSON_ID))\n" + - "SALT_BUCKETS = 3, MULTI_TENANT=true"); - conn.createStatement().execute("CREATE TABLE " + myTable2 + " (\n" + - "TENANT_ID VARCHAR NOT NULL,\n" + - "LOCALID VARCHAR NOT NULL,\n" + - "DSID VARCHAR(255) NOT NULL, \n" + - "EID CHAR(40),\n" + - "HAS_CANDIDATES BOOLEAN\n" + - "CONSTRAINT PK_MYTABLE PRIMARY KEY (TENANT_ID, LOCALID, DSID))\n" + - "SALT_BUCKETS = 3, MULTI_TENANT=true"); - verifyQueryPlanForSortMergeBug4508(conn010, peopleTable2, myTable2); - } finally { - if(conn!=null) { - conn.close(); - } - if(conn010 != null) { - conn010.close(); - } - } - } - - private static void verifyQueryPlanForSortMergeBug4508(Connection conn, String peopleTable, String myTable) throws Exception { - String query1 = "SELECT /*+ USE_SORT_MERGE_JOIN*/ COUNT(*)\n" + - "FROM " + peopleTable + " ds JOIN " + myTable + " l\n" + - "ON ds.PERSON_ID = l.LOCALID\n" + - "WHERE l.EID IS NULL AND l.DSID = 'PEOPLE' AND l.HAS_CANDIDATES = FALSE"; - String query2 = "SELECT /*+ USE_SORT_MERGE_JOIN */ COUNT(*)\n" + - "FROM (SELECT LOCALID FROM " + myTable + "\n" + - "WHERE EID IS NULL AND DSID = 'PEOPLE' AND HAS_CANDIDATES = FALSE) l\n" + - "JOIN " + peopleTable + " ds ON ds.PERSON_ID = l.LOCALID"; - - for (String q : new String[]{query1, query2}) { - ResultSet rs = conn.createStatement().executeQuery("explain " + q); - String plan = QueryUtil.getExplainPlan(rs); - assertFalse("Tables should not require sort over their PKs:\n" + plan, - plan.contains("SERVER SORTED BY")); - } - } - - @Test - public void testDistinctCountLimitBug5217() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String tableName = generateUniqueName(); - String sql = "create table " + tableName + "( "+ - " pk1 integer not null , " + - " pk2 integer not null, " + - " v integer, " + - " CONSTRAINT TEST_PK PRIMARY KEY (pk1,pk2))"; - conn.createStatement().execute(sql); - - sql = "select count(distinct pk1) from " + tableName + " limit 1"; - QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, sql); - Scan scan = plan.getContext().getScan(); - assertFalse(TestUtil.hasFilter(scan, PageFilter.class)); - } finally { - if(conn!=null) { - conn.close(); - } - } - } - - @Test - public void testPushDownPostFilterToSubJoinBug5389() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String orderTableName = "order_table"; - String itemTableName = "item_table"; - String supplierTableName = "supplier_table"; - String sql = "create table " + orderTableName + - " (order_id varchar(15) not null primary key, " + - " customer_id varchar(10), " + - " item_id varchar(10), " + - " price integer, " + - " quantity integer, " + - " date timestamp)"; - conn.createStatement().execute(sql); - - sql = "create table " + itemTableName + - " (item_id varchar(10) not null primary key, " + - " name varchar, " + - " price integer, " + - " discount1 integer, " + - " discount2 integer, " + - " supplier_id varchar(10), " + - " description varchar)"; - conn.createStatement().execute(sql); - - sql = "create table " + supplierTableName + - " (supplier_id varchar(10) not null primary key, " + - " name varchar, " + - " phone varchar(12), " + - " address varchar, " + - " loc_id varchar(5))"; - conn.createStatement().execute(sql); - - doTestPushDownPostFilterToSubJoinForNoStarJoinBug5389(conn, supplierTableName, itemTableName, orderTableName); - doTestPushDownPostFilterToSubJoinForSortMergeJoinBug5389(conn, supplierTableName, itemTableName, orderTableName); - } finally { - if(conn != null) { - conn.close(); - } - } - } - - private void doTestPushDownPostFilterToSubJoinForNoStarJoinBug5389( - Connection conn, - String supplierTableName, - String itemTableName, - String orderTableName) throws Exception { - //one condition push down. - String sql = "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName + " s inner join " + itemTableName + " i on s.supplier_id = i.supplier_id "+ - "inner join " + orderTableName + " o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.supplier_id != 'medi' or s.address = 'hai')"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - HashJoinPlan hashJoinPlan = (HashJoinPlan)queryPlan; - assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression() == null); - HashSubPlan[] hashSubPlans = (HashSubPlan[])hashJoinPlan.getSubPlans(); - assertTrue(hashSubPlans.length == 1); - HashJoinPlan subHashJoinPlan = (HashJoinPlan)(hashSubPlans[0].getInnerPlan()); - Expression postFilterExpression = subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression(); - assertTrue(postFilterExpression.toString().equals( - "(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai')")); - - //postFilter references all tables can not push down to subjoin. - sql = "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName + " s inner join " + itemTableName + " i on s.supplier_id = i.supplier_id "+ - "inner join " + orderTableName + " o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.supplier_id != 'medi' or s.address = 'hai' or o.quantity = 8)"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - hashJoinPlan = (HashJoinPlan)queryPlan; - assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression().toString().equals( - "(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai' OR O.QUANTITY = 8)")); - hashSubPlans = (HashSubPlan[])hashJoinPlan.getSubPlans(); - assertTrue(hashSubPlans.length == 1); - subHashJoinPlan = (HashJoinPlan)(hashSubPlans[0].getInnerPlan()); - assertTrue(subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression() == null); - - //one condition can not push down and other two conditions can push down. - sql = "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName + " s inner join " + itemTableName + " i on s.supplier_id = i.supplier_id "+ - "inner join " + orderTableName + " o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.description= 'desc1' or o.quantity > 10) and (i.supplier_id != 'medi' or s.address = 'hai') and (i.name is not null or s.loc_id != '8')"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - hashJoinPlan = (HashJoinPlan)queryPlan; - assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression().toString().equals( - "(I.DESCRIPTION = 'desc1' OR O.QUANTITY > 10)")); - hashSubPlans = (HashSubPlan[])hashJoinPlan.getSubPlans(); - assertTrue(hashSubPlans.length == 1); - subHashJoinPlan = (HashJoinPlan)(hashSubPlans[0].getInnerPlan()); - postFilterExpression = subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression(); - assertTrue(postFilterExpression.toString().equals( - "((I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai') AND (I.NAME IS NOT NULL OR S.LOC_ID != '8'))")); - - //for right join,can not push down - sql = "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName + " s inner join " + itemTableName + " i on s.supplier_id = i.supplier_id "+ - "right join " + orderTableName + " o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.supplier_id != 'medi' or s.address = 'hai')"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - hashJoinPlan = (HashJoinPlan)queryPlan; - assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression().toString().equals( - "(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai')")); - hashSubPlans = (HashSubPlan[])hashJoinPlan.getSubPlans(); - assertTrue(hashSubPlans.length == 1); - subHashJoinPlan = (HashJoinPlan)(hashSubPlans[0].getInnerPlan()); - assertTrue(subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression() == null); - - //for right join,can not push down - sql = "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName + " s inner join " + itemTableName + " i on s.supplier_id = i.supplier_id "+ - "right join " + orderTableName + " o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.description= 'desc1' or o.quantity > 10) and (i.supplier_id != 'medi' or s.address = 'hai') and (i.name is not null or s.loc_id != '8')"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - hashJoinPlan = (HashJoinPlan)queryPlan; - assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression().toString().equals( - "((I.DESCRIPTION = 'desc1' OR O.QUANTITY > 10) AND (I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai') AND (I.NAME IS NOT NULL OR S.LOC_ID != '8'))")); - hashSubPlans = (HashSubPlan[])hashJoinPlan.getSubPlans(); - assertTrue(hashSubPlans.length == 1); - subHashJoinPlan = (HashJoinPlan)(hashSubPlans[0].getInnerPlan()); - assertTrue(subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression() == null); - } - - private void doTestPushDownPostFilterToSubJoinForSortMergeJoinBug5389( - Connection conn, - String supplierTableName, - String itemTableName, - String orderTableName) throws Exception { - //one condition push down. - String sql = "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName+" s inner join "+itemTableName+" i on s.supplier_id = i.supplier_id "+ - "inner join "+orderTableName+" o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.supplier_id != 'medi' or s.address = 'hai')"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - ClientScanPlan clientScanPlan = (ClientScanPlan)queryPlan; - assertTrue(clientScanPlan.getWhere() == null); - SortMergeJoinPlan sortMergeJoinPlan = (SortMergeJoinPlan)clientScanPlan.getDelegate(); - ClientScanPlan lhsClientScanPlan = (ClientScanPlan)sortMergeJoinPlan.getLhsPlan(); - assertTrue(lhsClientScanPlan.getWhere().toString().equals( - "(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai')")); - - //can not push down to subjoin. - sql = "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName+" s inner join "+itemTableName+" i on s.supplier_id = i.supplier_id "+ - "inner join "+orderTableName+" o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.supplier_id != 'medi' or s.address = 'hai' or o.quantity = 8)"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - clientScanPlan = (ClientScanPlan)queryPlan; - assertTrue(clientScanPlan.getWhere().toString().equals( - "(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai' OR O.QUANTITY = 8)")); - sortMergeJoinPlan = (SortMergeJoinPlan)clientScanPlan.getDelegate(); - lhsClientScanPlan = (ClientScanPlan)sortMergeJoinPlan.getLhsPlan(); - assertTrue(lhsClientScanPlan.getWhere() == null); - - //one condition can not push down and other two conditions can push down. - sql = "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName+" s inner join "+itemTableName+" i on s.supplier_id = i.supplier_id "+ - "inner join "+orderTableName+" o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.description= 'desc1' or o.quantity > 10) and (i.supplier_id != 'medi' or s.address = 'hai') and (i.name is not null or s.loc_id != '8')"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - clientScanPlan = (ClientScanPlan)queryPlan; - assertTrue(clientScanPlan.getWhere().toString().equals( - "(I.DESCRIPTION = 'desc1' OR O.QUANTITY > 10)")); - sortMergeJoinPlan = (SortMergeJoinPlan)clientScanPlan.getDelegate(); - lhsClientScanPlan = (ClientScanPlan)sortMergeJoinPlan.getLhsPlan(); - assertTrue(lhsClientScanPlan.getWhere().toString().equals( - "((I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai') AND (I.NAME IS NOT NULL OR S.LOC_ID != '8'))")); - - //for right join,can not push down - sql = "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName+" s inner join "+itemTableName+" i on s.supplier_id = i.supplier_id "+ - "right join "+orderTableName+" o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.supplier_id != 'medi' or s.address = 'hai')"; + } + } + } + + @Test + public void testOrderByReverseOptimizationWithNUllsLastBug3491() throws Exception { + for (boolean salted : new boolean[] { true, false }) { + boolean[] groupBys = new boolean[] { + // groupBy orderPreserving orderBy asc asc + true, true, true, true, + // groupBy orderPreserving orderBy asc desc + true, true, true, true, + // groupBy orderPreserving orderBy desc asc + true, true, true, true, + // groupBy orderPreserving orderBy desc desc + true, true, true, true, + + // groupBy not orderPreserving orderBy asc asc + false, false, false, false, + // groupBy not orderPreserving orderBy asc desc + false, false, false, false, + // groupBy not orderPreserving orderBy desc asc + false, false, false, false, + // groupBy not orderPreserving orderBy desc desc + false, false, false, false, + + false, false, false, false }; + doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted, true, true, true, groupBys, + new OrderBy[] { + // groupBy orderPreserving orderBy asc asc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy orderPreserving orderBy asc desc + null, null, null, null, + // groupBy orderPreserving orderBy desc asc + null, null, null, null, + // groupBy orderPreserving orderBy desc desc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + + // groupBy not orderPreserving orderBy asc asc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy not orderPreserving orderBy asc desc + null, null, null, null, + // groupBy not orderPreserving orderBy desc asc + null, null, null, null, + // groupBy not orderPreserving orderBy desc desc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + + null, OrderBy.REV_ROW_KEY_ORDER_BY, OrderBy.FWD_ROW_KEY_ORDER_BY, null }); + + doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted, true, true, false, groupBys, + new OrderBy[] { + // groupBy orderPreserving orderBy asc asc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy orderPreserving orderBy asc desc + null, null, null, null, + // groupBy orderPreserving orderBy desc asc + null, null, null, null, + // groupBy orderPreserving orderBy desc desc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + + // groupBy not orderPreserving orderBy asc asc + null, null, null, null, + // groupBy not orderPreserving orderBy asc desc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy not orderPreserving orderBy desc asc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy not orderPreserving orderBy desc desc + null, null, null, null, + + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, OrderBy.REV_ROW_KEY_ORDER_BY }); + + doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted, true, false, true, groupBys, + new OrderBy[] { + // groupBy orderPreserving orderBy asc asc + null, null, null, null, + // groupBy orderPreserving orderBy asc desc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy orderPreserving orderBy desc asc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy orderPreserving orderBy desc desc + null, null, null, null, + + // groupBy not orderPreserving orderBy asc asc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy not orderPreserving orderBy asc desc + null, null, null, null, + // groupBy not orderPreserving orderBy desc asc + null, null, null, null, + // groupBy not orderPreserving orderBy desc desc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + OrderBy.FWD_ROW_KEY_ORDER_BY, null }); + + doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted, true, false, false, groupBys, + new OrderBy[] { + // groupBy orderPreserving orderBy asc asc + null, null, null, null, + // groupBy orderPreserving orderBy asc desc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy orderPreserving orderBy desc asc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy orderPreserving orderBy desc desc + null, null, null, null, + + // groupBy not orderPreserving orderBy asc asc + null, null, null, null, + // groupBy not orderPreserving orderBy asc desc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy not orderPreserving orderBy desc asc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy not orderPreserving orderBy desc desc + null, null, null, null, + + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, OrderBy.REV_ROW_KEY_ORDER_BY }); + + doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted, false, true, true, groupBys, + new OrderBy[] { + // groupBy orderPreserving orderBy asc asc + null, null, null, null, + // groupBy orderPreserving orderBy asc desc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy orderPreserving orderBy desc asc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy orderPreserving orderBy desc desc + null, null, null, null, + + // groupBy not orderPreserving orderBy asc asc + null, null, null, null, + // groupBy not orderPreserving orderBy asc desc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy not orderPreserving orderBy desc asc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy not orderPreserving orderBy desc desc + null, null, null, null, + + null, OrderBy.REV_ROW_KEY_ORDER_BY, OrderBy.FWD_ROW_KEY_ORDER_BY, null }); + + doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted, false, true, false, groupBys, + new OrderBy[] { + // groupBy orderPreserving orderBy asc asc + null, null, null, null, + // groupBy orderPreserving orderBy asc desc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy orderPreserving orderBy desc asc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy orderPreserving orderBy desc desc + null, null, null, null, + + // groupBy not orderPreserving orderBy asc asc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy not orderPreserving orderBy asc desc + null, null, null, null, + // groupBy not orderPreserving orderBy desc asc + null, null, null, null, + // groupBy not orderPreserving orderBy desc desc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, OrderBy.REV_ROW_KEY_ORDER_BY }); + + doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted, false, false, true, groupBys, + new OrderBy[] { + // groupBy orderPreserving orderBy asc asc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy orderPreserving orderBy asc desc + null, null, null, null, + // groupBy orderPreserving orderBy desc asc + null, null, null, null, + // groupBy orderPreserving orderBy desc desc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + + // groupBy not orderPreserving orderBy asc asc + null, null, null, null, + // groupBy not orderPreserving orderBy asc desc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy not orderPreserving orderBy desc asc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + // groupBy not orderPreserving orderBy desc desc + null, null, null, null, + + null, OrderBy.REV_ROW_KEY_ORDER_BY, OrderBy.FWD_ROW_KEY_ORDER_BY, null }); + + doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted, false, false, false, groupBys, + new OrderBy[] { + // groupBy orderPreserving orderBy asc asc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy orderPreserving orderBy asc desc + null, null, null, null, + // groupBy orderPreserving orderBy desc asc + null, null, null, null, + // groupBy orderPreserving orderBy desc desc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + + // groupBy not orderPreserving orderBy asc asc + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, null, + // groupBy not orderPreserving orderBy asc desc + null, null, null, null, + // groupBy not orderPreserving orderBy desc asc + null, null, null, null, + // groupBy not orderPreserving orderBy desc desc + null, null, null, OrderBy.REV_ROW_KEY_ORDER_BY, + + OrderBy.FWD_ROW_KEY_ORDER_BY, null, null, OrderBy.REV_ROW_KEY_ORDER_BY }); + } + } + + private void doTestOrderByReverseOptimizationWithNUllsLastBug3491(boolean salted, boolean desc1, + boolean desc2, boolean desc3, boolean[] groupBys, OrderBy[] orderBys) throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = "ORDERBY3491_TEST"; + conn.createStatement().execute("DROP TABLE if exists " + tableName); + String sql = "CREATE TABLE " + tableName + " ( " + "ORGANIZATION_ID VARCHAR," + + "CONTAINER_ID VARCHAR," + "SCORE VARCHAR," + "ENTITY_ID VARCHAR NOT NULL," + + "CONSTRAINT TEST_PK PRIMARY KEY ( " + "ORGANIZATION_ID" + (desc1 ? " DESC" : "") + "," + + "CONTAINER_ID" + (desc2 ? " DESC" : "") + "," + "SCORE" + (desc3 ? " DESC" : "") + "," + + "ENTITY_ID" + ")) " + (salted ? "SALT_BUCKETS =4" : ""); + conn.createStatement().execute(sql); + + String[] sqls = { + // groupBy orderPreserving orderBy asc asc + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS FIRST, CONTAINER_ID ASC NULLS FIRST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS FIRST, CONTAINER_ID ASC NULLS LAST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS LAST, CONTAINER_ID ASC NULLS FIRST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS LAST, CONTAINER_ID ASC NULLS LAST", + + // groupBy orderPreserving orderBy asc desc + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS FIRST, CONTAINER_ID DESC NULLS FIRST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS FIRST, CONTAINER_ID DESC NULLS LAST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS LAST, CONTAINER_ID DESC NULLS FIRST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID ASC NULLS LAST, CONTAINER_ID DESC NULLS LAST", + + // groupBy orderPreserving orderBy desc asc + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS FIRST, CONTAINER_ID ASC NULLS FIRST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS FIRST, CONTAINER_ID ASC NULLS LAST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS LAST, CONTAINER_ID ASC NULLS FIRST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS LAST, CONTAINER_ID ASC NULLS LAST", + + // groupBy orderPreserving orderBy desc desc + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS FIRST, CONTAINER_ID DESC NULLS FIRST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS FIRST, CONTAINER_ID DESC NULLS LAST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS LAST, CONTAINER_ID DESC NULLS FIRST", + "SELECT ORGANIZATION_ID,CONTAINER_ID FROM " + tableName + + " group by ORGANIZATION_ID, CONTAINER_ID ORDER BY ORGANIZATION_ID DESC NULLS LAST, CONTAINER_ID DESC NULLS LAST", + + // -----groupBy not orderPreserving + + // groupBy not orderPreserving orderBy asc asc + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS FIRST, SCORE ASC NULLS FIRST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS FIRST, SCORE ASC NULLS LAST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS LAST, SCORE ASC NULLS FIRST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS LAST, SCORE ASC NULLS LAST", + + // groupBy not orderPreserving orderBy asc desc + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS FIRST, SCORE DESC NULLS FIRST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS FIRST, SCORE DESC NULLS LAST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS LAST, SCORE DESC NULLS FIRST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID ASC NULLS LAST, SCORE DESC NULLS LAST", + + // groupBy not orderPreserving orderBy desc asc + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS FIRST, SCORE ASC NULLS FIRST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS FIRST, SCORE ASC NULLS LAST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS LAST, SCORE ASC NULLS FIRST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS LAST, SCORE ASC NULLS LAST", + + // groupBy not orderPreserving orderBy desc desc + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS FIRST, SCORE DESC NULLS FIRST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS FIRST, SCORE DESC NULLS LAST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS LAST, SCORE DESC NULLS FIRST", + "SELECT ORGANIZATION_ID,SCORE FROM " + tableName + + " group by ORGANIZATION_ID, SCORE ORDER BY ORGANIZATION_ID DESC NULLS LAST, SCORE DESC NULLS LAST", + + // -------only one return column---------------------------------- + "SELECT SCORE FROM " + tableName + " group by SCORE ORDER BY SCORE ASC NULLS FIRST", + "SELECT SCORE FROM " + tableName + " group by SCORE ORDER BY SCORE ASC NULLS LAST", + "SELECT SCORE FROM " + tableName + " group by SCORE ORDER BY SCORE DESC NULLS FIRST", + "SELECT SCORE FROM " + tableName + " group by SCORE ORDER BY SCORE DESC NULLS LAST" }; + + for (int i = 0; i < sqls.length; i++) { + sql = sqls[i]; + QueryPlan queryPlan = getQueryPlan(conn, sql); + assertTrue((i + 1) + ") " + sql, queryPlan.getGroupBy().isOrderPreserving() == groupBys[i]); + OrderBy orderBy = queryPlan.getOrderBy(); + if (orderBys[i] != null) { + assertTrue((i + 1) + ") " + sql, orderBy == orderBys[i]); + } else { + assertTrue((i + 1) + ") " + sql, orderBy.getOrderByExpressions().size() > 0); + } + } + } finally { + if (conn != null) { + conn.close(); + } + } + } + + @Test + public void testGroupByCoerceExpressionBug3453() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = "GROUPBY3453_INT"; + String sql = "CREATE TABLE " + tableName + "(" + "ENTITY_ID INTEGER NOT NULL," + + "CONTAINER_ID INTEGER NOT NULL," + "SCORE INTEGER NOT NULL," + + "CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID DESC,CONTAINER_ID DESC,SCORE DESC))"; + conn.createStatement().execute(sql); + sql = "select DISTINCT entity_id, score from ( select entity_id, score from " + tableName + + " limit 1)"; + QueryPlan queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().getExpressions().get(0).getSortOrder() == SortOrder.DESC); + assertTrue(queryPlan.getGroupBy().getExpressions().get(1).getSortOrder() == SortOrder.DESC); + assertTrue( + queryPlan.getGroupBy().getKeyExpressions().get(0).getSortOrder() == SortOrder.DESC); + assertTrue( + queryPlan.getGroupBy().getKeyExpressions().get(1).getSortOrder() == SortOrder.DESC); + + sql = "select DISTINCT entity_id, score from ( select entity_id, score from " + tableName + + " limit 3) order by entity_id"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().getExpressions().get(0).getSortOrder() == SortOrder.DESC); + assertTrue(queryPlan.getGroupBy().getExpressions().get(1).getSortOrder() == SortOrder.DESC); + assertTrue( + queryPlan.getGroupBy().getKeyExpressions().get(0).getSortOrder() == SortOrder.DESC); + assertTrue( + queryPlan.getGroupBy().getKeyExpressions().get(1).getSortOrder() == SortOrder.DESC); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).getExpression().getSortOrder() + == SortOrder.DESC); + + sql = "select DISTINCT entity_id, score from ( select entity_id, score from " + tableName + + " limit 3) order by entity_id desc"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().getExpressions().get(0).getSortOrder() == SortOrder.DESC); + assertTrue(queryPlan.getGroupBy().getExpressions().get(1).getSortOrder() == SortOrder.DESC); + assertTrue( + queryPlan.getGroupBy().getKeyExpressions().get(0).getSortOrder() == SortOrder.DESC); + assertTrue( + queryPlan.getGroupBy().getKeyExpressions().get(1).getSortOrder() == SortOrder.DESC); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } finally { + if (conn != null) { + conn.close(); + } + } + } + + private static QueryPlan getQueryPlan(Connection conn, String sql) throws SQLException { + PhoenixPreparedStatement statement = + conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); + QueryPlan queryPlan = statement.optimizeQuery(sql); + queryPlan.iterator(); + return queryPlan; + } + + @Test + public void testSortMergeJoinSubQueryOrderByOverrideBug3745() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + + String tableName1 = "MERGE1"; + String tableName2 = "MERGE2"; + + conn.createStatement().execute("DROP TABLE if exists " + tableName1); + + String sql = "CREATE TABLE IF NOT EXISTS " + tableName1 + " ( " + "AID INTEGER PRIMARY KEY," + + "AGE INTEGER" + ")"; + conn.createStatement().execute(sql); + + conn.createStatement().execute("DROP TABLE if exists " + tableName2); + sql = "CREATE TABLE IF NOT EXISTS " + tableName2 + " ( " + "BID INTEGER PRIMARY KEY," + + "CODE INTEGER" + ")"; + conn.createStatement().execute(sql); + + // test for simple scan + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + " order by code limit 1) b on a.aid=b.bid "; + + QueryPlan queryPlan = getQueryPlan(conn, sql); + SortMergeJoinPlan sortMergeJoinPlan = + (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + ClientScanPlan lhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getLhsPlan())).getDelegate(); + OrderBy orderBy = lhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + ScanPlan innerScanPlan = + (ScanPlan) ((TupleProjectionPlan) lhsOuterPlan.getDelegate()).getDelegate(); + orderBy = innerScanPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AGE")); + assertTrue(innerScanPlan.getLimit().intValue() == 3); + + ClientScanPlan rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID")); + innerScanPlan = (ScanPlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()).getDelegate(); + orderBy = innerScanPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("CODE")); + assertTrue(innerScanPlan.getLimit().intValue() == 1); + + // test for aggregate + sql = + "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.codesum from (select aid,sum(age) agesum from " + + tableName1 + + " where age >=11 and age<=33 group by aid order by agesum limit 3) a inner join " + + "(select bid,sum(code) codesum from " + tableName2 + + " group by bid order by codesum limit 1) b on a.aid=b.bid "; + + queryPlan = getQueryPlan(conn, sql); + sortMergeJoinPlan = (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + lhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getLhsPlan())).getDelegate(); + orderBy = lhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + AggregatePlan innerAggregatePlan = + (AggregatePlan) ((TupleProjectionPlan) lhsOuterPlan.getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(AGE)")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 3); + + rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID")); + innerAggregatePlan = + (AggregatePlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(CODE)")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 1); + + String tableName3 = "merge3"; + conn.createStatement().execute("DROP TABLE if exists " + tableName3); + sql = "CREATE TABLE IF NOT EXISTS " + tableName3 + " ( " + "CID INTEGER PRIMARY KEY," + + "REGION INTEGER" + ")"; + conn.createStatement().execute(sql); + + // test for join + sql = "select t1.aid,t1.code,t2.region from " + "(select a.aid,b.code from " + tableName1 + + " a inner join " + tableName2 + + " b on a.aid=b.bid where b.code >=44 and b.code<=66 order by b.code limit 3) t1 inner join " + + "(select a.aid,c.region from " + tableName1 + " a inner join " + tableName3 + + " c on a.aid=c.cid where c.region>=77 and c.region<=99 order by c.region desc limit 1) t2 on t1.aid=t2.aid"; + + PhoenixPreparedStatement phoenixPreparedStatement = + conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); + queryPlan = phoenixPreparedStatement.optimizeQuery(sql); + sortMergeJoinPlan = (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + lhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getLhsPlan())).getDelegate(); + orderBy = lhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerScanPlan = + (ScanPlan) ((HashJoinPlan) ((TupleProjectionPlan) lhsOuterPlan.getDelegate()).getDelegate()) + .getDelegate(); + orderBy = innerScanPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("B.CODE")); + assertTrue(innerScanPlan.getLimit().intValue() == 3); + + rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerScanPlan = + (ScanPlan) ((HashJoinPlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()).getDelegate()) + .getDelegate(); + orderBy = innerScanPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("C.REGION DESC")); + assertTrue(innerScanPlan.getLimit().intValue() == 1); + + // test for join and aggregate + sql = "select t1.aid,t1.codesum,t2.regionsum from " + + "(select a.aid,sum(b.code) codesum from " + tableName1 + " a inner join " + tableName2 + + " b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by codesum limit 3) t1 inner join " + + "(select a.aid,sum(c.region) regionsum from " + tableName1 + " a inner join " + tableName3 + + " c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by regionsum desc limit 2) t2 on t1.aid=t2.aid"; + + phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); + queryPlan = phoenixPreparedStatement.optimizeQuery(sql); + sortMergeJoinPlan = (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + lhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getLhsPlan())).getDelegate(); + orderBy = lhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerAggregatePlan = + (AggregatePlan) ((HashJoinPlan) ((TupleProjectionPlan) lhsOuterPlan.getDelegate()) + .getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(B.CODE)")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 3); + + rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerAggregatePlan = + (AggregatePlan) ((HashJoinPlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()) + .getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(C.REGION) DESC")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 2); + + // test for if SubselectRewriter.isOrderByPrefix had take effect + sql = "select t1.aid,t1.codesum,t2.regionsum from " + + "(select a.aid,sum(b.code) codesum from " + tableName1 + " a inner join " + tableName2 + + " b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by a.aid,codesum limit 3) t1 inner join " + + "(select a.aid,sum(c.region) regionsum from " + tableName1 + " a inner join " + tableName3 + + " c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by a.aid desc,regionsum desc limit 2) t2 on t1.aid=t2.aid " + + "order by t1.aid desc"; + + phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); + queryPlan = phoenixPreparedStatement.optimizeQuery(sql); + orderBy = queryPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("T1.AID DESC")); + sortMergeJoinPlan = (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + innerAggregatePlan = + (AggregatePlan) ((HashJoinPlan) (((TupleProjectionPlan) sortMergeJoinPlan.getLhsPlan()) + .getDelegate())).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 2); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(B.CODE)")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 3); + + rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerAggregatePlan = + (AggregatePlan) ((HashJoinPlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()) + .getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 2); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID DESC")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(C.REGION) DESC")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 2); + } finally { + if (conn != null) { + conn.close(); + } + } + } + + @Test + public void testUnionDifferentColumnNumber() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + Statement statement = conn.createStatement(); + try { + String create = + "CREATE TABLE s.t1 (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, " + + "f2.v3 varchar, v4 varchar)"; + statement.execute(create); + create = + "CREATE TABLE s.t2 (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, f2.v3 varchar)"; + statement.execute(create); + String query = "SELECT * FROM s.t1 UNION ALL select * FROM s.t2"; + statement.executeQuery(query); + fail("Should fail with different column numbers "); + } catch (SQLException e) { + assertEquals(e.getMessage(), + "ERROR 525 (42902): SELECT column number differs in a Union All query " + + "is not allowed. 1st query has 5 columns whereas 2nd query has 4"); + } finally { + statement.execute("DROP TABLE IF EXISTS s.t1"); + statement.execute("DROP TABLE IF EXISTS s.t2"); + conn.close(); + } + } + + @Test + public void testUnionDifferentColumnType() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + Statement statement = conn.createStatement(); + try { + String create = + "CREATE TABLE s.t1 (k integer not null primary key, f1.v1 varchar, f1.v2 varchar, " + + "f2.v3 varchar, v4 varchar)"; + statement.execute(create); + create = "CREATE TABLE s.t2 (k integer not null primary key, f1.v1 varchar, f1.v2 integer, " + + "f2.v3 varchar, f2.v4 varchar)"; + statement.execute(create); + String query = "SELECT * FROM s.t1 UNION ALL select * FROM s.t2"; + statement.executeQuery(query); + fail("Should fail with different column types "); + } catch (SQLException e) { + assertEquals(e.getMessage(), + "ERROR 526 (42903): SELECT column types differ in a Union All query " + + "is not allowed. Column # 2 is VARCHAR in 1st query where as it is INTEGER in 2nd query"); + } finally { + statement.execute("DROP TABLE IF EXISTS s.t1"); + statement.execute("DROP TABLE IF EXISTS s.t2"); + conn.close(); + } + } + + @Test + public void testCannotCreateStatementOnClosedConnection() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.close(); + try { + conn.createStatement(); + fail(); + } catch (SQLException e) { + assertEquals(e.getErrorCode(), SQLExceptionCode.CONNECTION_CLOSED.getErrorCode()); + } + try { + conn.prepareStatement("SELECT * FROM SYSTEM.CATALOG"); + fail(); + } catch (SQLException e) { + assertEquals(e.getErrorCode(), SQLExceptionCode.CONNECTION_CLOSED.getErrorCode()); + } + } + + @Test + public void testSingleColLocalIndexPruning() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + + " B,\n" + " C\n" + " )\n" + ") SPLIT ON ('A','C','E','G','I')"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,C)"); + String query = "SELECT * FROM T WHERE A = 'B' and C='C'"; + PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + List> outerScans = plan.getScans(); + assertEquals(1, outerScans.size()); + List innerScans = outerScans.get(0); + assertEquals(1, innerScans.size()); + Scan scan = innerScans.get(0); + assertEquals("A", Bytes.toString(scan.getStartRow()).trim()); + assertEquals("C", Bytes.toString(scan.getStopRow()).trim()); + } + } + + @Test + public void testMultiColLocalIndexPruning() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " D CHAR(1) NOT NULL,\n" + + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + " B,\n" + " C,\n" + + " D\n" + " )\n" + ") SPLIT ON ('A','C','E','G','I')"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,D)"); + String query = "SELECT * FROM T WHERE A = 'C' and B = 'X' and D='C'"; + PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + List> outerScans = plan.getScans(); + assertEquals(1, outerScans.size()); + List innerScans = outerScans.get(0); + assertEquals(1, innerScans.size()); + Scan scan = innerScans.get(0); + assertEquals("C", Bytes.toString(scan.getStartRow()).trim()); + assertEquals("E", Bytes.toString(scan.getStopRow()).trim()); + } + } + + @Test + public void testSkipScanLocalIndexPruning() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " D CHAR(1) NOT NULL,\n" + + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + " B,\n" + " C,\n" + + " D\n" + " )\n" + ") SPLIT ON ('A','C','E','G','I')"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,D)"); + String query = "SELECT * FROM T WHERE A IN ('A','G') and B = 'A' and D = 'D'"; + PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + List> outerScans = plan.getScans(); + assertEquals(2, outerScans.size()); + List innerScans1 = outerScans.get(0); + assertEquals(1, innerScans1.size()); + Scan scan1 = innerScans1.get(0); + assertEquals("A", Bytes.toString(scan1.getStartRow()).trim()); + assertEquals("C", Bytes.toString(scan1.getStopRow()).trim()); + List innerScans2 = outerScans.get(1); + assertEquals(1, innerScans2.size()); + Scan scan2 = innerScans2.get(0); + assertEquals("G", Bytes.toString(scan2.getStartRow()).trim()); + assertEquals("I", Bytes.toString(scan2.getStopRow()).trim()); + } + } + + @Test + public void testRVCLocalIndexPruning() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " D CHAR(1) NOT NULL,\n" + + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + " B,\n" + " C,\n" + + " D\n" + " )\n" + ") SPLIT ON ('A','C','E','G','I')"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,D)"); + String query = "SELECT * FROM T WHERE A='I' and (B,D) IN (('A','D'),('B','I'))"; + PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + List> outerScans = plan.getScans(); + assertEquals(1, outerScans.size()); + List innerScans = outerScans.get(0); + assertEquals(1, innerScans.size()); + Scan scan = innerScans.get(0); + assertEquals("I", Bytes.toString(scan.getStartRow()).trim()); + assertEquals(0, scan.getStopRow().length); + } + } + + @Test + public void testRVCLocalIndexPruning2() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B VARCHAR,\n" + + " C VARCHAR,\n" + " D VARCHAR,\n" + " E VARCHAR,\n" + " F VARCHAR,\n" + + " G VARCHAR,\n" + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + + " B,\n" + " C,\n" + " D,\n" + " E,\n" + " F,\n" + + " G\n" + " )\n" + ") SPLIT ON ('A','C','E','G','I')"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,C,F,G)"); + String query = + "SELECT * FROM T WHERE (A,B,C,D) IN (('I','D','F','X'),('I','I','G','Y')) and F='X' and G='Y'"; + PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + List> outerScans = plan.getScans(); + assertEquals(1, outerScans.size()); + List innerScans = outerScans.get(0); + assertEquals(1, innerScans.size()); + Scan scan = innerScans.get(0); + assertEquals("I", Bytes.toString(scan.getStartRow()).trim()); + assertEquals(0, scan.getStopRow().length); + } + } + + @Test + public void testMinMaxRangeLocalIndexPruning() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " D CHAR(1) NOT NULL,\n" + + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + " B,\n" + " C,\n" + + " D\n" + " )\n" + ") SPLIT ON ('A','C','E','G','I')"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(A,B,D)"); + String query = + "SELECT * FROM T WHERE A = 'C' and (A,B,D) > ('C','B','X') and B < 'Z' and D='C'"; + PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + List> outerScans = plan.getScans(); + assertEquals(1, outerScans.size()); + List innerScans = outerScans.get(0); + assertEquals(1, innerScans.size()); + Scan scan = innerScans.get(0); + assertEquals("C", Bytes.toString(scan.getStartRow()).trim()); + assertEquals("E", Bytes.toString(scan.getStopRow()).trim()); + } + } + + @Test + public void testNoLocalIndexPruning() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + + " B,\n" + " C\n" + " )\n" + ") SPLIT ON ('A','C','E','G','I')"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(C)"); + String query = "SELECT * FROM T WHERE C='C'"; + PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + List> outerScans = plan.getScans(); + assertEquals(6, outerScans.size()); + } + } + + @Test + public void testLocalIndexRegionPruning() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " D CHAR(1),\n" + " CONSTRAINT PK PRIMARY KEY (\n" + + " A,\n" + " B,\n" + " C\n" + " )\n" + + ") SPLIT ON ('A','C','E','G','I')"); + + conn.createStatement().execute("CREATE LOCAL INDEX IDX ON T(D)"); + + // un-pruned, need to scan all six regions + String query = "SELECT * FROM T WHERE D = 'C'"; + PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + assertEquals(6, plan.getScans().size()); + + // fixing first part of the key, can limit scanning to two regions + query = "SELECT * FROM T WHERE A = 'A' AND D = 'C'"; + statement = conn.createStatement().unwrap(PhoenixStatement.class); + plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + assertEquals(2, plan.getScans().size()); + + // same with skipscan filter + query = "SELECT * FROM T WHERE A IN ('A', 'C') AND D = 'C'"; + statement = conn.createStatement().unwrap(PhoenixStatement.class); + plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + assertEquals(3, plan.getScans().size()); + + // two parts of key fixed, need to scan a single region only + query = "SELECT * FROM T WHERE A = 'A' AND B = 'A' AND D = 'C'"; + statement = conn.createStatement().unwrap(PhoenixStatement.class); + plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + assertEquals(1, plan.getScans().size()); + + // same with skipscan filter + query = "SELECT * FROM T WHERE A IN ('A', 'C') AND B = 'A' AND D = 'C'"; + statement = conn.createStatement().unwrap(PhoenixStatement.class); + plan = statement.optimizeQuery(query); + assertEquals("IDX", plan.getContext().getCurrentTable().getTable().getName().getString()); + plan.iterator(); + assertEquals(2, plan.getScans().size()); + } + } + + @Test + public void testSmallScanForPointLookups() throws SQLException { + Properties props = PropertiesUtil.deepCopy(new Properties()); + createTestTable(getUrl(), + "CREATE TABLE FOO(\n" + " a VARCHAR NOT NULL,\n" + + " b VARCHAR NOT NULL,\n" + " c VARCHAR,\n" + + " CONSTRAINT pk PRIMARY KEY (a, b DESC, c)\n" + " )"); + + props.put(QueryServices.SMALL_SCAN_THRESHOLD_ATTRIB, "2"); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + String query = "select * from foo where a = 'a' and b = 'b' and c in ('x','y','z')"; + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery(query); + plan.iterator(); + // Fail since we have 3 rows in pointLookup + assertEquals(Scan.ReadType.DEFAULT, plan.getContext().getScan().getReadType()); + query = "select * from foo where a = 'a' and b = 'b' and c = 'c'"; + plan = stmt.compileQuery(query); + plan.iterator(); + // Should be small scan, query is for single row pointLookup + assertEquals(Scan.ReadType.PREAD, plan.getContext().getScan().getReadType()); + } + } + + @Test + public void testLocalIndexPruningInSortMergeJoin() throws SQLException { + verifyLocalIndexPruningWithMultipleTables( + "SELECT /*+ USE_SORT_MERGE_JOIN*/ *\n" + "FROM T1 JOIN T2 ON T1.A = T2.A\n" + + "WHERE T1.A = 'B' and T1.C='C' and T2.A IN ('A','G') and T2.B = 'A' and T2.D = 'D'"); + } + + @Ignore("Blocked by PHOENIX-4614") + @Test + public void testLocalIndexPruningInLeftOrInnerHashJoin() throws SQLException { + verifyLocalIndexPruningWithMultipleTables("SELECT *\n" + "FROM T1 JOIN T2 ON T1.A = T2.A\n" + + "WHERE T1.A = 'B' and T1.C='C' and T2.A IN ('A','G') and T2.B = 'A' and T2.D = 'D'"); + } + + @Ignore("Blocked by PHOENIX-4614") + @Test + public void testLocalIndexPruningInRightHashJoin() throws SQLException { + verifyLocalIndexPruningWithMultipleTables("SELECT *\n" + "FROM (\n" + + " SELECT A, B, C, D FROM T2 WHERE T2.A IN ('A','G') and T2.B = 'A' and T2.D = 'D'\n" + + ") T2\n" + "RIGHT JOIN T1 ON T2.A = T1.A\n" + "WHERE T1.A = 'B' and T1.C='C'"); + } + + @Test + public void testLocalIndexPruningInUinon() throws SQLException { + verifyLocalIndexPruningWithMultipleTables( + "SELECT A, B, C FROM T1\n" + "WHERE A = 'B' and C='C'\n" + "UNION ALL\n" + + "SELECT A, B, C FROM T2\n" + "WHERE A IN ('A','G') and B = 'A' and D = 'D'"); + } + + private void verifyLocalIndexPruningWithMultipleTables(String query) throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T1 (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + + " B,\n" + " C\n" + " )\n" + ") SPLIT ON ('A','C','E','G','I')"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX1 ON T1(A,C)"); + conn.createStatement() + .execute("CREATE TABLE T2 (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " D CHAR(1) NOT NULL,\n" + + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + " B,\n" + " C,\n" + + " D\n" + " )\n" + ") SPLIT ON ('A','C','E','G','I')"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX2 ON T2(A,B,D)"); + PhoenixStatement statement = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = statement.optimizeQuery(query); + List childPlans = plan.accept(new MultipleChildrenExtractor()); + assertEquals(2, childPlans.size()); + // Check left child + assertEquals("IDX1", + childPlans.get(0).getContext().getCurrentTable().getTable().getName().getString()); + childPlans.get(0).iterator(); + List> outerScansL = childPlans.get(0).getScans(); + assertEquals(1, outerScansL.size()); + List innerScansL = outerScansL.get(0); + assertEquals(1, innerScansL.size()); + Scan scanL = innerScansL.get(0); + assertEquals("A", Bytes.toString(scanL.getStartRow()).trim()); + assertEquals("C", Bytes.toString(scanL.getStopRow()).trim()); + // Check right child + assertEquals("IDX2", + childPlans.get(1).getContext().getCurrentTable().getTable().getName().getString()); + childPlans.get(1).iterator(); + List> outerScansR = childPlans.get(1).getScans(); + assertEquals(2, outerScansR.size()); + List innerScansR1 = outerScansR.get(0); + assertEquals(1, innerScansR1.size()); + Scan scanR1 = innerScansR1.get(0); + assertEquals("A", Bytes.toString(scanR1.getStartRow()).trim()); + assertEquals("C", Bytes.toString(scanR1.getStopRow()).trim()); + List innerScansR2 = outerScansR.get(1); + assertEquals(1, innerScansR2.size()); + Scan scanR2 = innerScansR2.get(0); + assertEquals("G", Bytes.toString(scanR2.getStartRow()).trim()); + assertEquals("I", Bytes.toString(scanR2.getStopRow()).trim()); + } + } + + @Test + public void testQueryPlanSourceRefsInHashJoin() throws SQLException { + String query = + "SELECT * FROM (\n" + " SELECT K1, V1 FROM A WHERE V1 = 'A'\n" + ") T1 JOIN (\n" + + " SELECT K2, V2 FROM B WHERE V2 = 'B'\n" + ") T2 ON K1 = K2 ORDER BY V1"; + verifyQueryPlanSourceRefs(query, 2); + } + + @Test + public void testQueryPlanSourceRefsInSortMergeJoin() throws SQLException { + String query = + "SELECT * FROM (\n" + " SELECT max(K1) KEY1, V1 FROM A GROUP BY V1\n" + ") T1 JOIN (\n" + + " SELECT max(K2) KEY2, V2 FROM B GROUP BY V2\n" + ") T2 ON KEY1 = KEY2 ORDER BY V1"; + verifyQueryPlanSourceRefs(query, 2); + } + + @Test + public void testQueryPlanSourceRefsInSubquery() throws SQLException { + String query = "SELECT * FROM A\n" + "WHERE K1 > (\n" + + " SELECT max(K2) FROM B WHERE V2 = V1\n" + ") ORDER BY V1"; + verifyQueryPlanSourceRefs(query, 2); + } + + @Test + public void testQueryPlanSourceRefsInSubquery2() throws SQLException { + String query = + "SELECT * FROM A\n" + "WHERE V1 > ANY (\n" + " SELECT K2 FROM B WHERE V2 = 'B'\n" + ")"; + verifyQueryPlanSourceRefs(query, 2); + } + + @Test + public void testQueryPlanSourceRefsInSubquery3() throws SQLException { + String query = "SELECT * FROM A\n" + "WHERE V1 > ANY (\n" + " SELECT K2 FROM B B1" + + " WHERE V2 = (\n" + " SELECT max(V2) FROM B B2\n" + + " WHERE B2.K2 = B1.K2 AND V2 < 'K'\n" + " )\n" + ")"; + verifyQueryPlanSourceRefs(query, 3); + } + + @Test + public void testQueryPlanSourceRefsInSubquery4() throws SQLException { + String query = "SELECT * FROM (\n" + " SELECT K1, K2 FROM A\n" + " JOIN B ON K1 = K2\n" + + " WHERE V1 = 'A' AND V2 = 'B'\n" + " LIMIT 10\n" + ") ORDER BY K1"; + verifyQueryPlanSourceRefs(query, 2); + } + + @Test + public void testQueryPlanSourceRefsInSubquery5() throws SQLException { + String query = "SELECT * FROM (\n" + " SELECT KEY1, KEY2 FROM (\n" + + " SELECT max(K1) KEY1, V1 FROM A GROUP BY V1\n" + " ) T1 JOIN (\n" + + " SELECT max(K2) KEY2, V2 FROM B GROUP BY V2\n" + + " ) T2 ON KEY1 = KEY2 LIMIT 10\n" + ") ORDER BY KEY1"; + verifyQueryPlanSourceRefs(query, 2); + } + + @Test + public void testQueryPlanSourceRefsInUnion() throws SQLException { + String query = "SELECT K1, V1 FROM A WHERE V1 = 'A'\n" + "UNION ALL\n" + + "SELECT K2, V2 FROM B WHERE V2 = 'B'"; + verifyQueryPlanSourceRefs(query, 2); + } + + private void verifyQueryPlanSourceRefs(String query, int refCount) throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement().execute("CREATE TABLE A (\n" + + " K1 VARCHAR(10) NOT NULL PRIMARY KEY,\n" + " V1 VARCHAR(10))"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX1 ON A(V1)"); + conn.createStatement().execute("CREATE TABLE B (\n" + + " K2 VARCHAR(10) NOT NULL PRIMARY KEY,\n" + " V2 VARCHAR(10))"); + conn.createStatement().execute("CREATE LOCAL INDEX IDX2 ON B(V2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.compileQuery(query); + Set sourceRefs = plan.getSourceRefs(); + assertEquals(refCount, sourceRefs.size()); + for (TableRef table : sourceRefs) { + assertTrue(table.getTable().getType() == PTableType.TABLE); + } + plan = stmt.optimizeQuery(query); + sourceRefs = plan.getSourceRefs(); + assertEquals(refCount, sourceRefs.size()); + for (TableRef table : sourceRefs) { + assertTrue(table.getTable().getType() == PTableType.INDEX); + } + } + } + + private static class MultipleChildrenExtractor implements QueryPlanVisitor> { + + @Override + public List defaultReturn(QueryPlan plan) { + return Collections.emptyList(); + } + + @Override + public List visit(AggregatePlan plan) { + return Collections.emptyList(); + } + + @Override + public List visit(ScanPlan plan) { + return Collections.emptyList(); + } + + @Override + public List visit(ClientAggregatePlan plan) { + return plan.getDelegate().accept(this); + } + + @Override + public List visit(ClientScanPlan plan) { + return plan.getDelegate().accept(this); + } + + @Override + public List visit(LiteralResultIterationPlan plan) { + return Collections.emptyList(); + } + + @Override + public List visit(TupleProjectionPlan plan) { + return plan.getDelegate().accept(this); + } + + @Override + public List visit(HashJoinPlan plan) { + List children = new ArrayList(plan.getSubPlans().length + 1); + children.add(plan.getDelegate()); + for (HashJoinPlan.SubPlan subPlan : plan.getSubPlans()) { + children.add(subPlan.getInnerPlan()); + } + return children; + } + + @Override + public List visit(SortMergeJoinPlan plan) { + return Lists.newArrayList(plan.getLhsPlan(), plan.getRhsPlan()); + } + + @Override + public List visit(UnionPlan plan) { + return plan.getSubPlans(); + } + + @Override + public List visit(UnnestArrayPlan plan) { + return Collections.emptyList(); + } + + @Override + public List visit(CursorFetchPlan plan) { + return Collections.emptyList(); + } + + @Override + public List visit(ListJarsQueryPlan plan) { + return Collections.emptyList(); + } + + @Override + public List visit(TraceQueryPlan plan) { + return Collections.emptyList(); + } + } + + @Test + public void testGroupByOrderMatchPkColumnOrder4690() throws Exception { + this.doTestGroupByOrderMatchPkColumnOrderBug4690(false, false); + this.doTestGroupByOrderMatchPkColumnOrderBug4690(false, true); + this.doTestGroupByOrderMatchPkColumnOrderBug4690(true, false); + this.doTestGroupByOrderMatchPkColumnOrderBug4690(true, true); + } + + private void doTestGroupByOrderMatchPkColumnOrderBug4690(boolean desc, boolean salted) + throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = generateUniqueName(); + String sql = "create table " + tableName + "( " + " pk1 integer not null , " + + " pk2 integer not null, " + " pk3 integer not null," + " pk4 integer not null," + + " v integer, " + " CONSTRAINT TEST_PK PRIMARY KEY ( " + "pk1 " + (desc ? "desc" : "") + + ", " + "pk2 " + (desc ? "desc" : "") + ", " + "pk3 " + (desc ? "desc" : "") + ", " + + "pk4 " + (desc ? "desc" : "") + " )) " + (salted ? "SALT_BUCKETS =4" : "split on(2)"); + conn.createStatement().execute(sql); + + sql = "select pk2,pk1,count(v) from " + tableName + " group by pk2,pk1 order by pk2,pk1"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK2")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK1")); + + sql = "select pk1,pk2,count(v) from " + tableName + " group by pk2,pk1 order by pk1,pk2"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() + == (!desc ? OrderBy.FWD_ROW_KEY_ORDER_BY : OrderBy.REV_ROW_KEY_ORDER_BY)); + + sql = "select pk2,pk1,count(v) from " + tableName + + " group by pk2,pk1 order by pk2 desc,pk1 desc"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK2 DESC")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK1 DESC")); + + sql = "select pk1,pk2,count(v) from " + tableName + + " group by pk2,pk1 order by pk1 desc,pk2 desc"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() + == (!desc ? OrderBy.REV_ROW_KEY_ORDER_BY : OrderBy.FWD_ROW_KEY_ORDER_BY)); + + sql = "select pk3,pk2,count(v) from " + tableName + + " where pk1=1 group by pk3,pk2 order by pk3,pk2"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK3")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK2")); + + sql = "select pk2,pk3,count(v) from " + tableName + + " where pk1=1 group by pk3,pk2 order by pk2,pk3"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() + == (!desc ? OrderBy.FWD_ROW_KEY_ORDER_BY : OrderBy.REV_ROW_KEY_ORDER_BY)); + + sql = "select pk3,pk2,count(v) from " + tableName + + " where pk1=1 group by pk3,pk2 order by pk3 desc,pk2 desc"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK3 DESC")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK2 DESC")); + + sql = "select pk2,pk3,count(v) from " + tableName + + " where pk1=1 group by pk3,pk2 order by pk2 desc,pk3 desc"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() + == (!desc ? OrderBy.REV_ROW_KEY_ORDER_BY : OrderBy.FWD_ROW_KEY_ORDER_BY)); + + sql = "select pk4,pk3,pk1,count(v) from " + tableName + + " where pk2=9 group by pk4,pk3,pk1 order by pk4,pk3,pk1"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 3); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK4")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK3")); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().get(2).toString().equals("PK1")); + + sql = "select pk1,pk3,pk4,count(v) from " + tableName + + " where pk2=9 group by pk4,pk3,pk1 order by pk1,pk3,pk4"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() + == (!desc ? OrderBy.FWD_ROW_KEY_ORDER_BY : OrderBy.REV_ROW_KEY_ORDER_BY)); + + sql = "select pk4,pk3,pk1,count(v) from " + tableName + + " where pk2=9 group by pk4,pk3,pk1 order by pk4 desc,pk3 desc,pk1 desc"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() == 3); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK4 DESC")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(1).toString().equals("PK3 DESC")); + assertTrue( + queryPlan.getOrderBy().getOrderByExpressions().get(2).toString().equals("PK1 DESC")); + + sql = "select pk1,pk3,pk4,count(v) from " + tableName + + " where pk2=9 group by pk4,pk3,pk1 order by pk1 desc,pk3 desc,pk4 desc"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() + == (!desc ? OrderBy.REV_ROW_KEY_ORDER_BY : OrderBy.FWD_ROW_KEY_ORDER_BY)); + } finally { + if (conn != null) { + conn.close(); + } + } + } + + @Test + public void testSortMergeJoinPushFilterThroughSortBug5105() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + + String tableName1 = "MERGE1"; + String tableName2 = "MERGE2"; + + conn.createStatement().execute("DROP TABLE if exists " + tableName1); + + String sql = "CREATE TABLE IF NOT EXISTS " + tableName1 + " ( " + "AID INTEGER PRIMARY KEY," + + "AGE INTEGER" + ")"; + conn.createStatement().execute(sql); + + conn.createStatement().execute("DROP TABLE if exists " + tableName2); + sql = "CREATE TABLE IF NOT EXISTS " + tableName2 + " ( " + "BID INTEGER PRIMARY KEY," + + "CODE INTEGER" + ")"; + conn.createStatement().execute(sql); + + // test for simple scan + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + + " order by code limit 1) b on a.aid=b.bid where b.code > 50"; + + QueryPlan queryPlan = getQueryPlan(conn, sql); + SortMergeJoinPlan sortMergeJoinPlan = + (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + ClientScanPlan lhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getLhsPlan())).getDelegate(); + OrderBy orderBy = lhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + ScanPlan innerScanPlan = + (ScanPlan) ((TupleProjectionPlan) lhsOuterPlan.getDelegate()).getDelegate(); + orderBy = innerScanPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AGE")); + assertTrue(innerScanPlan.getLimit().intValue() == 3); + + ClientScanPlan rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + String tableAlias = rhsOuterPlan.getTableRef().getTableAlias(); + String rewrittenSql = "SELECT " + tableAlias + ".BID BID," + tableAlias + + ".CODE CODE FROM (SELECT BID,CODE FROM MERGE2 ORDER BY CODE LIMIT 1) " + tableAlias + + " WHERE " + tableAlias + ".CODE > 50 ORDER BY " + tableAlias + ".BID"; + assertTrue(rhsOuterPlan.getStatement().toString().equals(rewrittenSql)); + + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID")); + innerScanPlan = (ScanPlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()).getDelegate(); + orderBy = innerScanPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("CODE")); + assertTrue(innerScanPlan.getLimit().intValue() == 1); + + // test for aggregate + sql = + "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.codesum from (select aid,sum(age) agesum from " + + tableName1 + + " where age >=11 and age<=33 group by aid order by agesum limit 3) a inner join " + + "(select bid,sum(code) codesum from " + tableName2 + + " group by bid order by codesum limit 1) b on a.aid=b.bid where b.codesum > 50"; + + queryPlan = getQueryPlan(conn, sql); + sortMergeJoinPlan = (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + lhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getLhsPlan())).getDelegate(); + orderBy = lhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + AggregatePlan innerAggregatePlan = + (AggregatePlan) ((TupleProjectionPlan) lhsOuterPlan.getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(AGE)")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 3); + + rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + tableAlias = rhsOuterPlan.getTableRef().getTableAlias(); + rewrittenSql = "SELECT " + tableAlias + ".BID BID," + tableAlias + + ".CODESUM CODESUM FROM (SELECT BID, SUM(CODE) CODESUM FROM MERGE2 GROUP BY BID ORDER BY SUM(CODE) LIMIT 1) " + + tableAlias + " WHERE " + tableAlias + ".CODESUM > 50 ORDER BY " + tableAlias + ".BID"; + assertTrue(rhsOuterPlan.getStatement().toString().equals(rewrittenSql)); + + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("BID")); + innerAggregatePlan = + (AggregatePlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(CODE)")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 1); + + String tableName3 = "merge3"; + conn.createStatement().execute("DROP TABLE if exists " + tableName3); + sql = "CREATE TABLE IF NOT EXISTS " + tableName3 + " ( " + "CID INTEGER PRIMARY KEY," + + "REGION INTEGER" + ")"; + conn.createStatement().execute(sql); + + // test for join + sql = "select t1.aid,t1.code,t2.region from " + "(select a.aid,b.code from " + tableName1 + + " a inner join " + tableName2 + + " b on a.aid=b.bid where b.code >=44 and b.code<=66 order by b.code limit 3) t1 inner join " + + "(select a.aid,c.region from " + tableName1 + " a inner join " + tableName3 + + " c on a.aid=c.cid where c.region>=77 and c.region<=99 order by c.region desc limit 1) t2 on t1.aid=t2.aid " + + "where t1.code > 50"; + + PhoenixPreparedStatement phoenixPreparedStatement = + conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); + queryPlan = phoenixPreparedStatement.optimizeQuery(sql); + sortMergeJoinPlan = (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + lhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getLhsPlan())).getDelegate(); + tableAlias = lhsOuterPlan.getTableRef().getTableAlias(); + rewrittenSql = "SELECT " + tableAlias + ".AID AID," + tableAlias + + ".CODE CODE FROM (SELECT A.AID,B.CODE FROM MERGE1 A Inner JOIN MERGE2 B ON (A.AID = B.BID) WHERE (B.CODE >= 44 AND B.CODE <= 66) ORDER BY B.CODE LIMIT 3) " + + tableAlias + " WHERE " + tableAlias + ".CODE > 50 ORDER BY " + tableAlias + ".AID"; + assertTrue(lhsOuterPlan.getStatement().toString().equals(rewrittenSql)); + + orderBy = lhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerScanPlan = + (ScanPlan) ((HashJoinPlan) ((TupleProjectionPlan) lhsOuterPlan.getDelegate()).getDelegate()) + .getDelegate(); + orderBy = innerScanPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("B.CODE")); + assertTrue(innerScanPlan.getLimit().intValue() == 3); + + rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerScanPlan = + (ScanPlan) ((HashJoinPlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()).getDelegate()) + .getDelegate(); + orderBy = innerScanPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("C.REGION DESC")); + assertTrue(innerScanPlan.getLimit().intValue() == 1); + + // test for join and aggregate + sql = "select t1.aid,t1.codesum,t2.regionsum from " + + "(select a.aid,sum(b.code) codesum from " + tableName1 + " a inner join " + tableName2 + + " b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by codesum limit 3) t1 inner join " + + "(select a.aid,sum(c.region) regionsum from " + tableName1 + " a inner join " + tableName3 + + " c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by regionsum desc limit 2) t2 on t1.aid=t2.aid " + + "where t1.codesum >=40 and t2.regionsum >= 90"; + + phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); + queryPlan = phoenixPreparedStatement.optimizeQuery(sql); + sortMergeJoinPlan = (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + lhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getLhsPlan())).getDelegate(); + tableAlias = lhsOuterPlan.getTableRef().getTableAlias(); + rewrittenSql = "SELECT " + tableAlias + ".AID AID," + tableAlias + + ".CODESUM CODESUM FROM (SELECT A.AID, SUM(B.CODE) CODESUM FROM MERGE1 A Inner JOIN MERGE2 B ON (A.AID = B.BID) WHERE (B.CODE >= 44 AND B.CODE <= 66) GROUP BY A.AID ORDER BY SUM(B.CODE) LIMIT 3) " + + tableAlias + " WHERE " + tableAlias + ".CODESUM >= 40 ORDER BY " + tableAlias + ".AID"; + assertTrue(lhsOuterPlan.getStatement().toString().equals(rewrittenSql)); + + orderBy = lhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerAggregatePlan = + (AggregatePlan) ((HashJoinPlan) ((TupleProjectionPlan) lhsOuterPlan.getDelegate()) + .getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(B.CODE)")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 3); + + rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + tableAlias = rhsOuterPlan.getTableRef().getTableAlias(); + rewrittenSql = "SELECT " + tableAlias + ".AID AID," + tableAlias + + ".REGIONSUM REGIONSUM FROM (SELECT A.AID, SUM(C.REGION) REGIONSUM FROM MERGE1 A Inner JOIN MERGE3 C ON (A.AID = C.CID) WHERE (C.REGION >= 77 AND C.REGION <= 99) GROUP BY A.AID ORDER BY SUM(C.REGION) DESC LIMIT 2) " + + tableAlias + " WHERE " + tableAlias + ".REGIONSUM >= 90 ORDER BY " + tableAlias + ".AID"; + assertTrue(rhsOuterPlan.getStatement().toString().equals(rewrittenSql)); + + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerAggregatePlan = + (AggregatePlan) ((HashJoinPlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()) + .getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("SUM(C.REGION) DESC")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 2); + + // test for if SubselectRewriter.isOrderByPrefix had take effect + sql = "select t1.aid,t1.codesum,t2.regionsum from " + + "(select a.aid,sum(b.code) codesum from " + tableName1 + " a inner join " + tableName2 + + " b on a.aid=b.bid where b.code >=44 and b.code<=66 group by a.aid order by a.aid,codesum limit 3) t1 inner join " + + "(select a.aid,sum(c.region) regionsum from " + tableName1 + " a inner join " + tableName3 + + " c on a.aid=c.cid where c.region>=77 and c.region<=99 group by a.aid order by a.aid desc,regionsum desc limit 2) t2 on t1.aid=t2.aid " + + "where t1.codesum >=40 and t2.regionsum >= 90 order by t1.aid desc"; + + phoenixPreparedStatement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); + queryPlan = phoenixPreparedStatement.optimizeQuery(sql); + orderBy = queryPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("T1.AID DESC")); + sortMergeJoinPlan = (SortMergeJoinPlan) ((ClientScanPlan) queryPlan).getDelegate(); + + lhsOuterPlan = + (ClientScanPlan) (((TupleProjectionPlan) sortMergeJoinPlan.getLhsPlan()).getDelegate()); + tableAlias = lhsOuterPlan.getTableRef().getTableAlias(); + rewrittenSql = "SELECT " + tableAlias + ".AID AID," + tableAlias + + ".CODESUM CODESUM FROM (SELECT A.AID, SUM(B.CODE) CODESUM FROM MERGE1 A Inner JOIN MERGE2 B ON (A.AID = B.BID) WHERE (B.CODE >= 44 AND B.CODE <= 66) GROUP BY A.AID ORDER BY A.AID, SUM(B.CODE) LIMIT 3) " + + tableAlias + " WHERE " + tableAlias + ".CODESUM >= 40"; + assertTrue(lhsOuterPlan.getStatement().toString().equals(rewrittenSql)); + + innerAggregatePlan = + (AggregatePlan) ((HashJoinPlan) ((TupleProjectionPlan) lhsOuterPlan.getDelegate()) + .getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 2); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(B.CODE)")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 3); + + rhsOuterPlan = + (ClientScanPlan) ((TupleProjectionPlan) (sortMergeJoinPlan.getRhsPlan())).getDelegate(); + tableAlias = rhsOuterPlan.getTableRef().getTableAlias(); + rewrittenSql = "SELECT " + tableAlias + ".AID AID," + tableAlias + + ".REGIONSUM REGIONSUM FROM (SELECT A.AID, SUM(C.REGION) REGIONSUM FROM MERGE1 A Inner JOIN MERGE3 C ON (A.AID = C.CID) WHERE (C.REGION >= 77 AND C.REGION <= 99) GROUP BY A.AID ORDER BY A.AID DESC, SUM(C.REGION) DESC LIMIT 2) " + + tableAlias + " WHERE " + tableAlias + ".REGIONSUM >= 90 ORDER BY " + tableAlias + ".AID"; + assertTrue(rhsOuterPlan.getStatement().toString().equals(rewrittenSql)); + + orderBy = rhsOuterPlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 1); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("AID")); + innerAggregatePlan = + (AggregatePlan) ((HashJoinPlan) ((TupleProjectionPlan) rhsOuterPlan.getDelegate()) + .getDelegate()).getDelegate(); + orderBy = innerAggregatePlan.getOrderBy(); + assertTrue(orderBy.getOrderByExpressions().size() == 2); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("A.AID DESC")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("SUM(C.REGION) DESC")); + assertTrue(innerAggregatePlan.getLimit().intValue() == 2); + } finally { + if (conn != null) { + conn.close(); + } + } + } + + @Test + public void testOrderPreservingForClientScanPlanBug5148() throws Exception { + doTestOrderPreservingForClientScanPlanBug5148(false, false); + doTestOrderPreservingForClientScanPlanBug5148(false, true); + doTestOrderPreservingForClientScanPlanBug5148(true, false); + doTestOrderPreservingForClientScanPlanBug5148(true, true); + } + + private void doTestOrderPreservingForClientScanPlanBug5148(boolean desc, boolean salted) + throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = generateUniqueName(); + String sql = "create table " + tableName + "( " + " pk1 char(20) not null , " + + " pk2 char(20) not null, " + " pk3 char(20) not null," + " v1 varchar, " + " v2 varchar, " + + " CONSTRAINT TEST_PK PRIMARY KEY ( " + "pk1 " + (desc ? "desc" : "") + ", " + "pk2 " + + (desc ? "desc" : "") + ", " + "pk3 " + (desc ? "desc" : "") + " )) " + + (salted ? "SALT_BUCKETS =4" : ""); + conn.createStatement().execute(sql); + + sql = "select v1 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2,t.pk3,t.v1 limit 10) a order by v2,pk3"; + QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select v1 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2,t.pk3,t.v1 limit 10) a where pk3 = '8' order by v2,v1"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select v1 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2 desc,t.pk3 desc,t.v1 desc limit 10) a order by v2 desc ,pk3 desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select sub from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt from " + + tableName + + " t where pk1 = '6' group by v1 ,v2 order by count(pk3),t.v2 limit 10) a order by cnt,sub"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from " + tableName + + " t where pk1 = '6' group by v1 ,v2 order by count(pk3),t.v2 limit 10) a order by cast(cnt as bigint),substr(sub,0,1)"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select sub from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt from " + + tableName + + " t where pk1 = '6' group by v1 ,v2 order by count(pk3) desc,t.v2 desc limit 10) a order by cnt desc ,sub desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select sub from (select substr(v2,0,2) sub,pk2 from " + tableName + + " t where pk1 = '6' group by pk2,v2 limit 10) a order by pk2,sub"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + if (desc) { + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + } else { + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } + + sql = "select sub from (select substr(v2,0,2) sub,pk2 from " + tableName + + " t where pk1 = '6' group by pk2,v2 limit 10) a order by pk2 desc,sub"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + if (desc) { + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } else { + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + } + + sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from " + tableName + + " t where pk1 = '6' group by v1 ,v2 order by t.v2 ,count(pk3) limit 10) a order by sub ,cnt"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from " + tableName + + " t where pk1 = '6' group by v1 ,v2 order by t.v2 ,count(pk3) limit 10) a order by substr(sub,0,1) ,cast(cnt as bigint)"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from " + tableName + + " t where pk1 = '6' group by v1 ,v2 order by t.v2 ,count(pk3) limit 10) a order by sub ,cnt"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select v1 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2 desc,t.pk3 desc,t.v1 desc limit 10) a order by v2 ,pk3"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select v1 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2,t.pk3,t.v1 limit 10) a where pk3 = '8' or (v2 < 'abc' and pk3 > '11') order by v2,v1"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + + // test innerQueryPlan is ordered by rowKey + sql = "select pk1 from (select pk3,pk2,pk1 from " + tableName + + " t where v1 = '6' order by t.pk1,t.pk2 limit 10) a where pk3 > '8' order by pk1,pk2,pk3"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select pk1 from (select substr(pk3,0,3) sub,pk2,pk1 from " + tableName + + " t where v1 = '6' order by t.pk1,t.pk2 limit 10) a where sub > '8' order by pk1,pk2,sub"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select pk1 from (select pk3,pk2,pk1 from " + tableName + + " t where v1 = '6' order by t.pk1 desc,t.pk2 desc limit 10) a where pk3 > '8' order by pk1 desc ,pk2 desc ,pk3 desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select pk1 from (select substr(pk3,0,3) sub,pk2,pk1 from " + tableName + + " t where v1 = '6' order by t.pk1 desc,t.pk2 desc limit 10) a where sub > '8' order by pk1 desc,pk2 desc,sub desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } finally { + if (conn != null) { + conn.close(); + } + } + } + + @Test + public void testGroupByOrderPreservingForClientAggregatePlanBug5148() throws Exception { + doTestGroupByOrderPreservingForClientAggregatePlanBug5148(false, false); + doTestGroupByOrderPreservingForClientAggregatePlanBug5148(false, true); + doTestGroupByOrderPreservingForClientAggregatePlanBug5148(true, false); + doTestGroupByOrderPreservingForClientAggregatePlanBug5148(true, true); + } + + private void doTestGroupByOrderPreservingForClientAggregatePlanBug5148(boolean desc, + boolean salted) throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = generateUniqueName(); + String sql = "create table " + tableName + "( " + " pk1 varchar not null , " + + " pk2 varchar not null, " + " pk3 varchar not null," + " v1 varchar, " + " v2 varchar, " + + " CONSTRAINT TEST_PK PRIMARY KEY ( " + "pk1 " + (desc ? "desc" : "") + ", " + "pk2 " + + (desc ? "desc" : "") + ", " + "pk3 " + (desc ? "desc" : "") + " )) " + + (salted ? "SALT_BUCKETS =4" : ""); + conn.createStatement().execute(sql); + + sql = "select v1 from (select v1,pk2,pk1 from " + tableName + + " t where pk1 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a group by pk2,v1 order by pk2,v1"; + QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select v1 from (select v1,pk2,pk1 from " + tableName + + " t where pk1 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' group by v1, pk1 order by v1,pk1"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select v1 from (select v1,pk2,pk1 from " + tableName + + " t where pk1 = '6' order by t.pk2 desc,t.v1 desc,t.pk1 limit 10) a group by pk2, v1 order by pk2 desc,v1 desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select v1 from (select v1,pk2,pk1 from " + tableName + + " t where pk1 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' or (v1 < 'abc' and pk2 > '11') group by v1, pk1 order by v1,pk1"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!plan.getGroupBy().isOrderPreserving()); + if (desc) { + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + } else { + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } + + sql = "select v1 from (select v1,pk2,pk1 from " + tableName + + " t where pk1 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' or (v1 < 'abc' and pk2 > '11') group by v1, pk1 order by v1,pk1 desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!plan.getGroupBy().isOrderPreserving()); + if (desc) { + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } else { + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + } + + sql = "select sub from (select v1,pk2,substr(pk1,0,1) sub from " + tableName + + " t where v2 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' group by v1,sub order by v1,sub"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select sub from (select substr(v1,0,1) sub,pk2,pk1 from " + tableName + + " t where v2 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' group by sub,pk1 order by sub,pk1"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!plan.getGroupBy().isOrderPreserving()); + if (desc) { + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + } else { + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } + + sql = "select sub from (select substr(v1,0,1) sub,pk2,pk1 from " + tableName + + " t where v2 = '6' order by t.pk2,t.v1,t.pk1 limit 10) a where pk2 = '8' group by sub,pk1 order by sub,pk1 desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!plan.getGroupBy().isOrderPreserving()); + if (desc) { + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } else { + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + } + + sql = "select sub from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt from " + + tableName + + " t where pk1 = '6' group by v1,v2 order by count(pk3),t.v2 limit 10) a group by cnt,sub order by cnt,sub"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select substr(sub,0,1) from (select substr(v2,0,2) sub,count(pk3) cnt from " + + tableName + " t where pk1 = '6' group by v1 ,v2 order by count(pk3),t.v2 limit 10) a " + + "group by cast(cnt as bigint),substr(sub,0,1) order by cast(cnt as bigint),substr(sub,0,1)"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select sub from (select substr(v2,0,2) sub,count(pk3) cnt from " + tableName + + " t where pk1 = '6' group by v1 ,v2 order by count(pk3) desc,t.v2 desc limit 10) a group by cnt,sub order by cnt desc ,sub desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select substr(sub,0,1) from (select substr(v2,0,2) sub,count(pk3) cnt from " + + tableName + + " t where pk1 = '6' group by v1 ,v2 order by count(pk3) desc,t.v2 desc limit 10) a " + + "group by cast(cnt as bigint),substr(sub,0,1) order by cast(cnt as bigint) desc,substr(sub,0,1) desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select sub from (select substr(v2,0,2) sub,pk2 from " + tableName + + " t where pk1 = '6' group by pk2,v2 limit 10) a group by pk2,sub order by pk2,sub"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + if (desc) { + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + } else { + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } + + sql = "select sub from (select substr(v2,0,2) sub,pk2 from " + tableName + + " t where pk1 = '6' group by pk2,v2 limit 10) a group by pk2,sub order by pk2 desc,sub"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + if (desc) { + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } else { + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + } + + // test innerQueryPlan is ordered by rowKey + sql = "select pk1 from (select pk3,pk2,pk1 from " + tableName + + " t where v1 = '6' order by t.pk1,t.pk2 limit 10) a where pk3 > '8' group by pk1,pk2,pk3 order by pk1,pk2,pk3"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select pk1 from (select substr(pk3,0,3) sub,pk2,pk1 from " + tableName + + " t where v1 = '6' order by t.pk1,t.pk2 limit 10) a where sub > '8' group by pk1,pk2,sub order by pk1,pk2"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select pk1 from (select pk3,pk2,pk1 from " + tableName + + " t where v1 = '6' order by t.pk1 desc,t.pk2 desc limit 10) a where pk3 > '8' group by pk1, pk2, pk3 order by pk1 desc ,pk2 desc ,pk3 desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select pk1 from (select substr(pk3,0,3) sub,pk2,pk1 from " + tableName + + " t where v1 = '6' order by t.pk1 desc,t.pk2 desc limit 10) a where sub > '8' group by pk1,pk2,sub order by pk1 desc,pk2 desc"; + plan = TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + } finally { + if (conn != null) { + conn.close(); + } + } + } + + @Test + public void testOrderPreservingForSortMergeJoinBug5148() throws Exception { + doTestOrderPreservingForSortMergeJoinBug5148(false, false); + doTestOrderPreservingForSortMergeJoinBug5148(false, true); + doTestOrderPreservingForSortMergeJoinBug5148(true, false); + doTestOrderPreservingForSortMergeJoinBug5148(true, true); + } + + private void doTestOrderPreservingForSortMergeJoinBug5148(boolean desc, boolean salted) + throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + + String tableName1 = generateUniqueName(); + String tableName2 = generateUniqueName(); + + String sql = "CREATE TABLE IF NOT EXISTS " + tableName1 + " ( " + "AID INTEGER PRIMARY KEY " + + (desc ? "desc" : "") + "," + "AGE INTEGER" + ") " + (salted ? "SALT_BUCKETS =4" : ""); + conn.createStatement().execute(sql); + + sql = "CREATE TABLE IF NOT EXISTS " + tableName2 + " ( " + "BID INTEGER PRIMARY KEY " + + (desc ? "desc" : "") + "," + "CODE INTEGER" + ")" + (salted ? "SALT_BUCKETS =4" : ""); + conn.createStatement().execute(sql); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + + " order by code limit 1) b on a.aid=b.bid and a.age = b.code order by a.aid ,a.age"; + QueryPlan queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + + " order by code limit 1) b on a.aid=b.bid and a.age = b.code order by a.aid desc,a.age desc"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,a.age from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + + " order by code limit 1) b on a.aid=b.bid and a.age = b.code group by a.aid,a.age order by a.aid ,a.age"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,a.age from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + + " order by code limit 1) b on a.aid=b.bid and a.age = b.code group by a.aid,a.age order by a.aid desc,a.age desc"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + + " order by code limit 1) b on a.aid=b.bid and a.age = b.code order by b.bid ,b.code"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + + " order by code limit 1) b on a.aid=b.bid and a.age = b.code order by b.bid desc ,b.code desc"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ b.code from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + + " order by code limit 1) b on a.aid=b.bid and a.age = b.code group by b.bid, b.code order by b.bid ,b.code"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ b.code from (select aid,age from " + tableName1 + + " where age >=11 and age<=33 order by age limit 3) a inner join " + + "(select bid,code from " + tableName2 + + " order by code limit 1) b on a.aid=b.bid and a.age = b.code group by b.bid, b.code order by b.bid desc,b.code desc"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + // test part column + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code order by a.aid"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code order by a.aid desc"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code group by a.aid order by a.aid"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code group by a.aid order by a.aid desc"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ a.aid,b.code from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code order by a.age"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid,a.age from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code order by b.bid"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid,a.age from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code order by b.bid desc"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code group by b.bid order by b.bid"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code group by b.bid order by b.bid desc"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + + sql = "select /*+ USE_SORT_MERGE_JOIN */ b.bid,a.age from " + tableName1 + " a inner join " + + tableName2 + " b on a.aid=b.bid and a.age = b.code order by b.code"; + queryPlan = getQueryPlan(conn, sql); + assertTrue(queryPlan.getOrderBy().getOrderByExpressions().size() > 0); + } finally { + if (conn != null) { + conn.close(); + } + } + } + + @Test + public void testSortMergeBug4508() throws Exception { + Connection conn = null; + Connection conn010 = null; + try { + // Salted tables + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + conn = DriverManager.getConnection(getUrl(), props); + props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty("TenantId", "010"); + conn010 = DriverManager.getConnection(getUrl(), props); + + String peopleTable1 = generateUniqueName(); + String myTable1 = generateUniqueName(); + conn.createStatement() + .execute("CREATE TABLE " + peopleTable1 + " (\n" + "PERSON_ID VARCHAR NOT NULL,\n" + + "NAME VARCHAR\n" + + "CONSTRAINT PK_TEST_PEOPLE PRIMARY KEY (PERSON_ID)) SALT_BUCKETS = 3"); + conn.createStatement() + .execute("CREATE TABLE " + myTable1 + " (\n" + "LOCALID VARCHAR NOT NULL,\n" + + "DSID VARCHAR(255) NOT NULL, \n" + "EID CHAR(40),\n" + "HAS_CANDIDATES BOOLEAN\n" + + "CONSTRAINT PK_MYTABLE PRIMARY KEY (LOCALID, DSID)) SALT_BUCKETS = 3"); + verifyQueryPlanForSortMergeBug4508(conn, peopleTable1, myTable1); + + // Salted multi-tenant tables + String peopleTable2 = generateUniqueName(); + String myTable2 = generateUniqueName(); + conn.createStatement() + .execute("CREATE TABLE " + peopleTable2 + " (\n" + "TENANT_ID VARCHAR NOT NULL,\n" + + "PERSON_ID VARCHAR NOT NULL,\n" + "NAME VARCHAR\n" + + "CONSTRAINT PK_TEST_PEOPLE PRIMARY KEY (TENANT_ID, PERSON_ID))\n" + + "SALT_BUCKETS = 3, MULTI_TENANT=true"); + conn.createStatement() + .execute("CREATE TABLE " + myTable2 + " (\n" + "TENANT_ID VARCHAR NOT NULL,\n" + + "LOCALID VARCHAR NOT NULL,\n" + "DSID VARCHAR(255) NOT NULL, \n" + "EID CHAR(40),\n" + + "HAS_CANDIDATES BOOLEAN\n" + + "CONSTRAINT PK_MYTABLE PRIMARY KEY (TENANT_ID, LOCALID, DSID))\n" + + "SALT_BUCKETS = 3, MULTI_TENANT=true"); + verifyQueryPlanForSortMergeBug4508(conn010, peopleTable2, myTable2); + } finally { + if (conn != null) { + conn.close(); + } + if (conn010 != null) { + conn010.close(); + } + } + } + + private static void verifyQueryPlanForSortMergeBug4508(Connection conn, String peopleTable, + String myTable) throws Exception { + String query1 = "SELECT /*+ USE_SORT_MERGE_JOIN*/ COUNT(*)\n" + "FROM " + peopleTable + + " ds JOIN " + myTable + " l\n" + "ON ds.PERSON_ID = l.LOCALID\n" + + "WHERE l.EID IS NULL AND l.DSID = 'PEOPLE' AND l.HAS_CANDIDATES = FALSE"; + String query2 = "SELECT /*+ USE_SORT_MERGE_JOIN */ COUNT(*)\n" + "FROM (SELECT LOCALID FROM " + + myTable + "\n" + "WHERE EID IS NULL AND DSID = 'PEOPLE' AND HAS_CANDIDATES = FALSE) l\n" + + "JOIN " + peopleTable + " ds ON ds.PERSON_ID = l.LOCALID"; + + for (String q : new String[] { query1, query2 }) { + ResultSet rs = conn.createStatement().executeQuery("explain " + q); + String plan = QueryUtil.getExplainPlan(rs); + assertFalse("Tables should not require sort over their PKs:\n" + plan, + plan.contains("SERVER SORTED BY")); + } + } + + @Test + public void testDistinctCountLimitBug5217() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = generateUniqueName(); + String sql = "create table " + tableName + "( " + " pk1 integer not null , " + + " pk2 integer not null, " + " v integer, " + " CONSTRAINT TEST_PK PRIMARY KEY (pk1,pk2))"; + conn.createStatement().execute(sql); + + sql = "select count(distinct pk1) from " + tableName + " limit 1"; + QueryPlan plan = TestUtil.getOptimizeQueryPlan(conn, sql); + Scan scan = plan.getContext().getScan(); + assertFalse(TestUtil.hasFilter(scan, PageFilter.class)); + } finally { + if (conn != null) { + conn.close(); + } + } + } + + @Test + public void testPushDownPostFilterToSubJoinBug5389() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String orderTableName = "order_table"; + String itemTableName = "item_table"; + String supplierTableName = "supplier_table"; + String sql = + "create table " + orderTableName + " (order_id varchar(15) not null primary key, " + + " customer_id varchar(10), " + " item_id varchar(10), " + " price integer, " + + " quantity integer, " + " date timestamp)"; + conn.createStatement().execute(sql); + + sql = "create table " + itemTableName + " (item_id varchar(10) not null primary key, " + + " name varchar, " + " price integer, " + " discount1 integer, " + + " discount2 integer, " + " supplier_id varchar(10), " + " description varchar)"; + conn.createStatement().execute(sql); + + sql = "create table " + supplierTableName + + " (supplier_id varchar(10) not null primary key, " + " name varchar, " + + " phone varchar(12), " + " address varchar, " + " loc_id varchar(5))"; + conn.createStatement().execute(sql); + + doTestPushDownPostFilterToSubJoinForNoStarJoinBug5389(conn, supplierTableName, itemTableName, + orderTableName); + doTestPushDownPostFilterToSubJoinForSortMergeJoinBug5389(conn, supplierTableName, + itemTableName, orderTableName); + } finally { + if (conn != null) { + conn.close(); + } + } + } + + private void doTestPushDownPostFilterToSubJoinForNoStarJoinBug5389(Connection conn, + String supplierTableName, String itemTableName, String orderTableName) throws Exception { + // one condition push down. + String sql = + "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "inner join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.supplier_id != 'medi' or s.address = 'hai')"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + HashJoinPlan hashJoinPlan = (HashJoinPlan) queryPlan; + assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression() == null); + HashSubPlan[] hashSubPlans = (HashSubPlan[]) hashJoinPlan.getSubPlans(); + assertTrue(hashSubPlans.length == 1); + HashJoinPlan subHashJoinPlan = (HashJoinPlan) (hashSubPlans[0].getInnerPlan()); + Expression postFilterExpression = subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression(); + assertTrue( + postFilterExpression.toString().equals("(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai')")); + + // postFilter references all tables can not push down to subjoin. + sql = + "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "inner join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.supplier_id != 'medi' or s.address = 'hai' or o.quantity = 8)"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + hashJoinPlan = (HashJoinPlan) queryPlan; + assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression().toString() + .equals("(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai' OR O.QUANTITY = 8)")); + hashSubPlans = (HashSubPlan[]) hashJoinPlan.getSubPlans(); + assertTrue(hashSubPlans.length == 1); + subHashJoinPlan = (HashJoinPlan) (hashSubPlans[0].getInnerPlan()); + assertTrue(subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression() == null); + + // one condition can not push down and other two conditions can push down. + sql = + "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "inner join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.description= 'desc1' or o.quantity > 10) and (i.supplier_id != 'medi' or s.address = 'hai') and (i.name is not null or s.loc_id != '8')"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + hashJoinPlan = (HashJoinPlan) queryPlan; + assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression().toString() + .equals("(I.DESCRIPTION = 'desc1' OR O.QUANTITY > 10)")); + hashSubPlans = (HashSubPlan[]) hashJoinPlan.getSubPlans(); + assertTrue(hashSubPlans.length == 1); + subHashJoinPlan = (HashJoinPlan) (hashSubPlans[0].getInnerPlan()); + postFilterExpression = subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression(); + assertTrue(postFilterExpression.toString().equals( + "((I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai') AND (I.NAME IS NOT NULL OR S.LOC_ID != '8'))")); + + // for right join,can not push down + sql = + "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "right join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.supplier_id != 'medi' or s.address = 'hai')"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + hashJoinPlan = (HashJoinPlan) queryPlan; + assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression().toString() + .equals("(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai')")); + hashSubPlans = (HashSubPlan[]) hashJoinPlan.getSubPlans(); + assertTrue(hashSubPlans.length == 1); + subHashJoinPlan = (HashJoinPlan) (hashSubPlans[0].getInnerPlan()); + assertTrue(subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression() == null); + + // for right join,can not push down + sql = + "select /*+ NO_STAR_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "right join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.description= 'desc1' or o.quantity > 10) and (i.supplier_id != 'medi' or s.address = 'hai') and (i.name is not null or s.loc_id != '8')"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + hashJoinPlan = (HashJoinPlan) queryPlan; + assertTrue(hashJoinPlan.getJoinInfo().getPostJoinFilterExpression().toString().equals( + "((I.DESCRIPTION = 'desc1' OR O.QUANTITY > 10) AND (I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai') AND (I.NAME IS NOT NULL OR S.LOC_ID != '8'))")); + hashSubPlans = (HashSubPlan[]) hashJoinPlan.getSubPlans(); + assertTrue(hashSubPlans.length == 1); + subHashJoinPlan = (HashJoinPlan) (hashSubPlans[0].getInnerPlan()); + assertTrue(subHashJoinPlan.getJoinInfo().getPostJoinFilterExpression() == null); + } + + private void doTestPushDownPostFilterToSubJoinForSortMergeJoinBug5389(Connection conn, + String supplierTableName, String itemTableName, String orderTableName) throws Exception { + // one condition push down. + String sql = + "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "inner join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.supplier_id != 'medi' or s.address = 'hai')"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + ClientScanPlan clientScanPlan = (ClientScanPlan) queryPlan; + assertTrue(clientScanPlan.getWhere() == null); + SortMergeJoinPlan sortMergeJoinPlan = (SortMergeJoinPlan) clientScanPlan.getDelegate(); + ClientScanPlan lhsClientScanPlan = (ClientScanPlan) sortMergeJoinPlan.getLhsPlan(); + assertTrue(lhsClientScanPlan.getWhere().toString() + .equals("(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai')")); + + // can not push down to subjoin. + sql = + "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "inner join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.supplier_id != 'medi' or s.address = 'hai' or o.quantity = 8)"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + clientScanPlan = (ClientScanPlan) queryPlan; + assertTrue(clientScanPlan.getWhere().toString() + .equals("(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai' OR O.QUANTITY = 8)")); + sortMergeJoinPlan = (SortMergeJoinPlan) clientScanPlan.getDelegate(); + lhsClientScanPlan = (ClientScanPlan) sortMergeJoinPlan.getLhsPlan(); + assertTrue(lhsClientScanPlan.getWhere() == null); + + // one condition can not push down and other two conditions can push down. + sql = + "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "inner join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.description= 'desc1' or o.quantity > 10) and (i.supplier_id != 'medi' or s.address = 'hai') and (i.name is not null or s.loc_id != '8')"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + clientScanPlan = (ClientScanPlan) queryPlan; + assertTrue( + clientScanPlan.getWhere().toString().equals("(I.DESCRIPTION = 'desc1' OR O.QUANTITY > 10)")); + sortMergeJoinPlan = (SortMergeJoinPlan) clientScanPlan.getDelegate(); + lhsClientScanPlan = (ClientScanPlan) sortMergeJoinPlan.getLhsPlan(); + assertTrue(lhsClientScanPlan.getWhere().toString().equals( + "((I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai') AND (I.NAME IS NOT NULL OR S.LOC_ID != '8'))")); + + // for right join,can not push down + sql = + "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "right join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.supplier_id != 'medi' or s.address = 'hai')"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + clientScanPlan = (ClientScanPlan) queryPlan; + assertTrue(clientScanPlan.getWhere().toString() + .equals("(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai')")); + sortMergeJoinPlan = (SortMergeJoinPlan) clientScanPlan.getDelegate(); + // for right join, SortMergeJoinPlan exchanges left and right + ClientScanPlan rhsClientScanPlan = (ClientScanPlan) sortMergeJoinPlan.getRhsPlan(); + assertTrue(rhsClientScanPlan.getWhere() == null); + + // for full join,can not push down + sql = + "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from " + + supplierTableName + " s inner join " + itemTableName + + " i on s.supplier_id = i.supplier_id " + "full join " + orderTableName + + " o on i.item_id = o.item_id " + "where (o.price < 10 or o.price > 20) and " + + "(i.description= 'desc1' or o.quantity > 10) and (i.supplier_id != 'medi' or s.address = 'hai') and (i.name is not null or s.loc_id != '8')"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + clientScanPlan = (ClientScanPlan) queryPlan; + assertTrue(clientScanPlan.getWhere().toString().equals( + "((O.PRICE < 10 OR O.PRICE > 20) AND (I.DESCRIPTION = 'desc1' OR O.QUANTITY > 10) AND (I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai') AND (I.NAME IS NOT NULL OR S.LOC_ID != '8'))")); + sortMergeJoinPlan = (SortMergeJoinPlan) clientScanPlan.getDelegate(); + lhsClientScanPlan = (ClientScanPlan) sortMergeJoinPlan.getLhsPlan(); + assertTrue(lhsClientScanPlan.getWhere() == null); + } + + @Test + public void testSubselectColumnPruneForJoinBug5451() throws Exception { + PhoenixConnection conn = null; + try { + conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class); + String sql = null; + QueryPlan queryPlan = null; + // testNestedDerivedTable require index with same name be created + String tableName = "testA"; + sql = "create table " + tableName + " (organization_id char(15) not null, \n" + + " entity_id char(15) not null,\n" + " a_string varchar(100),\n" + + " b_string varchar(100),\n" + " a_integer integer,\n" + " a_date date,\n" + + " a_time time,\n" + " a_timestamp timestamp,\n" + " x_decimal decimal(31,10),\n" + + " x_long bigint,\n" + " x_integer integer,\n" + " y_integer integer,\n" + + " a_byte tinyint,\n" + " a_short smallint,\n" + " a_float float,\n" + + " a_double double,\n" + " a_unsigned_float unsigned_float,\n" + + " a_unsigned_double unsigned_double\n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n" + ") "; + conn.createStatement().execute(sql); + + // test for subquery + sql = "SELECT q.id, q.x10 * 10 FROM " + + "(SELECT t.eid id, t.x + 9 x10, t.astr a, t.bstr b, aint ai, adouble ad FROM " + + "(SELECT entity_id eid, a_string astr, b_string bstr, a_integer aint, a_double adouble, a_byte + 1 x FROM " + + tableName + " WHERE a_byte + 1 < 9 limit 2) AS t " + + "ORDER BY b, id limit 3) AS q WHERE q.a = 'a' OR q.b = 'b' OR q.b = 'c'"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + ClientScanPlan clientScanPlan = (ClientScanPlan) queryPlan; + TestUtil.assertSelectStatement(clientScanPlan.getStatement(), "SELECT Q.ID,(Q.X10 * 10) FROM " + + "(SELECT T.EID ID,(T.X + 9) X10,T.ASTR A,T.BSTR B FROM " + + "(SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,A_INTEGER AINT,A_DOUBLE ADOUBLE,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2) T " + + "ORDER BY T.BSTR,T.EID LIMIT 3) Q WHERE (Q.A = 'a' OR Q.B = 'b' OR Q.B = 'c')"); + clientScanPlan = + (ClientScanPlan) ((TupleProjectionPlan) clientScanPlan.getDelegate()).getDelegate(); + TestUtil.assertSelectStatement(clientScanPlan.getStatement(), + "SELECT T.EID ID,(T.X + 9) X10,T.ASTR A,T.BSTR B FROM " + + "(SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2) T " + + "ORDER BY T.BSTR,T.EID LIMIT 3"); + ScanPlan scanPlan = + (ScanPlan) ((TupleProjectionPlan) clientScanPlan.getDelegate()).getDelegate(); + TestUtil.assertSelectStatement(scanPlan.getStatement(), + "SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2"); + + // test for subquery with wildcard + sql = "SELECT * FROM " + + "(SELECT t.eid id, t.x + 9 x10, t.astr a, t.bstr b, aint ai, adouble ad FROM " + + "(SELECT entity_id eid, a_string astr, b_string bstr, a_integer aint, a_double adouble, a_byte + 1 x FROM " + + tableName + " WHERE a_byte + 1 < 9 limit 2) AS t " + + "ORDER BY b, id limit 3) AS q WHERE q.a = 'a' OR q.b = 'b' OR q.b = 'c'"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + clientScanPlan = (ClientScanPlan) queryPlan; + TestUtil.assertSelectStatement(clientScanPlan.getStatement(), "SELECT * FROM " + + "(SELECT T.EID ID,(T.X + 9) X10,T.ASTR A,T.BSTR B,AINT AI,ADOUBLE AD FROM " + + "(SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,A_INTEGER AINT,A_DOUBLE ADOUBLE,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2) T " + + "ORDER BY B,ID LIMIT 3) Q WHERE (Q.A = 'a' OR Q.B = 'b' OR Q.B = 'c')"); + clientScanPlan = + (ClientScanPlan) ((TupleProjectionPlan) clientScanPlan.getDelegate()).getDelegate(); + TestUtil.assertSelectStatement(clientScanPlan.getStatement(), + "SELECT T.EID ID,(T.X + 9) X10,T.ASTR A,T.BSTR B,AINT AI,ADOUBLE AD FROM " + + "(SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,A_INTEGER AINT,A_DOUBLE ADOUBLE,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2) T " + + "ORDER BY T.BSTR,T.EID LIMIT 3"); + scanPlan = (ScanPlan) ((TupleProjectionPlan) clientScanPlan.getDelegate()).getDelegate(); + TestUtil.assertSelectStatement(scanPlan.getStatement(), + "SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,A_INTEGER AINT,A_DOUBLE ADOUBLE,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2"); + + // test for some trival cases of subquery. + sql = "SELECT count(*) FROM (SELECT count(*) c FROM " + tableName + " ) AS t"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + ClientAggregatePlan clientAggregatePlan = (ClientAggregatePlan) queryPlan; + TestUtil.assertSelectStatement(clientAggregatePlan.getStatement(), + "SELECT COUNT(1) FROM (SELECT COUNT(1) C FROM TESTA ) T"); + AggregatePlan aggregatePlan = + (AggregatePlan) ((TupleProjectionPlan) clientAggregatePlan.getDelegate()).getDelegate(); + TestUtil.assertSelectStatement(aggregatePlan.getStatement(), "SELECT COUNT(1) C FROM TESTA"); + + sql = + "SELECT count(*) FROM (SELECT count(*) c FROM " + tableName + " GROUP BY a_string) AS t"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + clientAggregatePlan = (ClientAggregatePlan) queryPlan; + TestUtil.assertSelectStatement(clientAggregatePlan.getStatement(), + "SELECT COUNT(1) FROM (SELECT COUNT(1) C FROM TESTA GROUP BY A_STRING) T"); + aggregatePlan = + (AggregatePlan) ((TupleProjectionPlan) clientAggregatePlan.getDelegate()).getDelegate(); + TestUtil.assertSelectStatement(aggregatePlan.getStatement(), + "SELECT COUNT(1) C FROM TESTA GROUP BY A_STRING"); + + sql = "SELECT 1 FROM (SELECT count(*) c FROM " + tableName + " GROUP BY a_string) AS t"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + aggregatePlan = (AggregatePlan) queryPlan; + TestUtil.assertSelectStatement(aggregatePlan.getStatement(), + "SELECT 1 FROM TESTA GROUP BY A_STRING"); + + sql = "SELECT count(*) FROM (SELECT DISTINCT a_string FROM " + tableName + ") AS t"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + clientAggregatePlan = (ClientAggregatePlan) queryPlan; + TestUtil.assertSelectStatement(clientAggregatePlan.getStatement(), + "SELECT COUNT(1) FROM (SELECT DISTINCT A_STRING FROM TESTA ) T"); + aggregatePlan = + (AggregatePlan) ((TupleProjectionPlan) clientAggregatePlan.getDelegate()).getDelegate(); + TestUtil.assertSelectStatement(aggregatePlan.getStatement(), + "SELECT DISTINCT A_STRING FROM TESTA"); + + // test for hash join + sql = + "SELECT q1.id, q2.id FROM (SELECT t.eid id, t.astr a, t.bstr b FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM " + + tableName + ") AS t WHERE t.abyte >= 8) AS q1" + + " JOIN (SELECT t.eid id, t.astr a, t.bstr b, t.abyte x FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM " + + tableName + ") AS t) AS q2 ON q1.a = q2.b" + + " WHERE q2.x != 5 ORDER BY q1.id, q2.id DESC"; + JoinTable joinTablesContext = TestUtil.getJoinTable(sql, conn); + Table leftmostTableContext = joinTablesContext.getLeftTable(); + TestUtil.assertSelectStatement(leftmostTableContext.getSubselectStatement(), + "SELECT ENTITY_ID ID,A_STRING A FROM TESTA WHERE A_BYTE >= 8"); + assertTrue(leftmostTableContext.getPreFilterParseNodes().isEmpty()); + + Table rightTableContext = + joinTablesContext.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable(); + TestUtil.assertSelectStatement(rightTableContext.getSubselectStatement(), + "SELECT ENTITY_ID ID,B_STRING B FROM TESTA"); + assertTrue(rightTableContext.getPreFilterParseNodes().size() == 1); + assertTrue( + rightTableContext.getPreFilterParseNodes().get(0).toString().equals("A_BYTE != 5")); + + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + HashJoinPlan hashJoinPlan = (HashJoinPlan) queryPlan; + Scan scan = hashJoinPlan.getContext().getScan(); + TupleProjector tupleColumnProjector = TupleProjector.deserializeProjectorFromScan(scan); + Expression[] expressions = tupleColumnProjector.getExpressions(); + assertTrue(expressions.length == 2); + + TestUtil.assertSelectStatement(hashJoinPlan.getDelegate().getStatement(), + "SELECT Q1.ID,Q2.ID FROM TESTA WHERE A_BYTE >= 8 ORDER BY Q1.ID,Q2.ID DESC"); + HashSubPlan[] hashSubPlans = (HashSubPlan[]) hashJoinPlan.getSubPlans(); + assertTrue(hashSubPlans.length == 1); + scanPlan = (ScanPlan) ((TupleProjectionPlan) (hashSubPlans[0].getInnerPlan())).getDelegate(); + TestUtil.assertSelectStatement(scanPlan.getStatement(), + "SELECT ENTITY_ID ID,B_STRING B FROM TESTA WHERE A_BYTE != 5"); + + // test for hash join with wildcard + sql = + "SELECT * FROM (SELECT t.eid id, t.astr a, t.bstr b FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM " + + tableName + ") AS t WHERE t.abyte >= 8) AS q1" + + " JOIN (SELECT t.eid id, t.astr a, t.bstr b, t.abyte x FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM " + + tableName + ") AS t) AS q2 ON q1.a = q2.b" + + " WHERE q2.x != 5 ORDER BY q1.id, q2.id DESC"; + joinTablesContext = TestUtil.getJoinTable(sql, conn); + leftmostTableContext = joinTablesContext.getLeftTable(); + TestUtil.assertSelectStatement(leftmostTableContext.getSubselectStatement(), + "SELECT ENTITY_ID ID,A_STRING A,B_STRING B FROM TESTA WHERE A_BYTE >= 8"); + assertTrue(leftmostTableContext.getPreFilterParseNodes().isEmpty()); + + rightTableContext = joinTablesContext.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable(); + TestUtil.assertSelectStatement(rightTableContext.getSubselectStatement(), + "SELECT ENTITY_ID ID,A_STRING A,B_STRING B,A_BYTE X FROM TESTA"); + assertTrue(rightTableContext.getPreFilterParseNodes().size() == 1); + assertTrue( + rightTableContext.getPreFilterParseNodes().get(0).toString().equals("A_BYTE != 5")); + + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + hashJoinPlan = (HashJoinPlan) queryPlan; + scan = hashJoinPlan.getContext().getScan(); + tupleColumnProjector = TupleProjector.deserializeProjectorFromScan(scan); + expressions = tupleColumnProjector.getExpressions(); + assertTrue(expressions.length == 3); + + TestUtil.assertSelectStatement(hashJoinPlan.getDelegate().getStatement(), + "SELECT Q1.*,Q2.* FROM TESTA WHERE A_BYTE >= 8 ORDER BY Q1.ID,Q2.ID DESC"); + hashSubPlans = (HashSubPlan[]) hashJoinPlan.getSubPlans(); + assertTrue(hashSubPlans.length == 1); + scanPlan = (ScanPlan) ((TupleProjectionPlan) (hashSubPlans[0].getInnerPlan())).getDelegate(); + TestUtil.assertSelectStatement(scanPlan.getStatement(), + "SELECT ENTITY_ID ID,A_STRING A,B_STRING B,A_BYTE X FROM TESTA WHERE A_BYTE != 5"); + + // test for sortmergejoin + sql = "SELECT /*+ USE_SORT_MERGE_JOIN */ q1.id, q2.id FROM " + + "(SELECT t.eid id, t.astr a, t.bstr b FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM " + + tableName + ") AS t WHERE t.abyte >= 8) AS q1 " + + "JOIN (SELECT t.eid id, t.astr a, t.bstr b, t.abyte x FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM " + + tableName + ") AS t) AS q2 " + + "ON q1.a = q2.b WHERE q2.x != 5 ORDER BY q1.id, q2.id DESC"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + clientScanPlan = (ClientScanPlan) queryPlan; + SortMergeJoinPlan sortMergeJoinPlan = (SortMergeJoinPlan) clientScanPlan.getDelegate(); + ScanPlan lhsPlan = + (ScanPlan) ((TupleProjectionPlan) sortMergeJoinPlan.getLhsPlan()).getDelegate(); + TestUtil.assertSelectStatement(lhsPlan.getStatement(), + "SELECT ENTITY_ID ID,A_STRING A FROM TESTA WHERE A_BYTE >= 8 ORDER BY A_STRING"); + ScanPlan rhsPlan = + (ScanPlan) ((TupleProjectionPlan) sortMergeJoinPlan.getRhsPlan()).getDelegate(); + TestUtil.assertSelectStatement(rhsPlan.getStatement(), + "SELECT ENTITY_ID ID,B_STRING B FROM TESTA WHERE A_BYTE != 5 ORDER BY B_STRING"); + + // test for sortmergejoin with wildcard + sql = "SELECT /*+ USE_SORT_MERGE_JOIN */ * FROM " + + "(SELECT t.eid id, t.astr a, t.bstr b FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM " + + tableName + ") AS t WHERE t.abyte >= 8) AS q1 " + + "JOIN (SELECT t.eid id, t.astr a, t.bstr b, t.abyte x FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM " + + tableName + ") AS t) AS q2 " + + "ON q1.a = q2.b WHERE q2.x != 5 ORDER BY q1.id, q2.id DESC"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + clientScanPlan = (ClientScanPlan) queryPlan; + sortMergeJoinPlan = (SortMergeJoinPlan) clientScanPlan.getDelegate(); + lhsPlan = (ScanPlan) ((TupleProjectionPlan) sortMergeJoinPlan.getLhsPlan()).getDelegate(); + TestUtil.assertSelectStatement(lhsPlan.getStatement(), + "SELECT ENTITY_ID ID,A_STRING A,B_STRING B FROM TESTA WHERE A_BYTE >= 8 ORDER BY A_STRING"); + rhsPlan = (ScanPlan) ((TupleProjectionPlan) sortMergeJoinPlan.getRhsPlan()).getDelegate(); + TestUtil.assertSelectStatement(rhsPlan.getStatement(), + "SELECT ENTITY_ID ID,A_STRING A,B_STRING B,A_BYTE X FROM TESTA WHERE A_BYTE != 5 ORDER BY B_STRING"); + } finally { + conn.close(); + } + } + + @Test + public void testInSubqueryBug6224() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String itemTableName = "item_table"; + String sql = "create table " + itemTableName + " (item_id varchar not null primary key, " + + " name varchar, " + " price integer, " + " discount1 integer, " + + " discount2 integer, " + " supplier_id varchar, " + " description varchar)"; + conn.createStatement().execute(sql); + + String orderTableName = "order_table"; + sql = "create table " + orderTableName + " (order_id varchar not null primary key, " + + " customer_id varchar, " + " item_id varchar, " + " price integer, " + + " quantity integer, " + " date timestamp)"; + conn.createStatement().execute(sql); + // test simple Correlated subquery + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN " + + "(SELECT item_id FROM " + orderTableName + " o where o.price = i.price) ORDER BY name"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN (SELECT DISTINCT 1 $3,ITEM_ID $4,O.PRICE $2 FROM ORDER_TABLE O ) $1 " + + "ON ((I.ITEM_ID = $1.$4 AND $1.$2 = I.PRICE)) ORDER BY NAME"); + + // test Correlated subquery with AggregateFunction but no groupBy + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN " + + "(SELECT max(item_id) FROM " + orderTableName + + " o where o.price = i.price) ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN " + + "(SELECT DISTINCT 1 $11, MAX(ITEM_ID) $12,O.PRICE $10 FROM ORDER_TABLE O GROUP BY O.PRICE) $9 " + + "ON ((I.ITEM_ID = $9.$12 AND $9.$10 = I.PRICE)) ORDER BY NAME"); + + // test Correlated subquery with AggregateFunction with groupBy + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN " + + "(SELECT max(item_id) FROM " + orderTableName + + " o where o.price = i.price group by o.customer_id) ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN " + + "(SELECT DISTINCT 1 $19, MAX(ITEM_ID) $20,O.PRICE $18 FROM ORDER_TABLE O GROUP BY O.PRICE,O.CUSTOMER_ID) $17 " + + "ON ((I.ITEM_ID = $17.$20 AND $17.$18 = I.PRICE)) ORDER BY NAME"); + + // for Correlated subquery, the extracted join condition must be equal expression. + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN " + + "(SELECT max(item_id) FROM " + orderTableName + + " o where o.price = i.price or o.quantity > 1 group by o.customer_id) ORDER BY name"; + try { queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - clientScanPlan = (ClientScanPlan)queryPlan; - assertTrue(clientScanPlan.getWhere().toString().equals( - "(I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai')")); - sortMergeJoinPlan = (SortMergeJoinPlan)clientScanPlan.getDelegate(); - //for right join, SortMergeJoinPlan exchanges left and right - ClientScanPlan rhsClientScanPlan = (ClientScanPlan)sortMergeJoinPlan.getRhsPlan(); - assertTrue(rhsClientScanPlan.getWhere() == null); - - //for full join,can not push down - sql = "select /*+ USE_SORT_MERGE_JOIN */ COALESCE(o.order_id,'empty_order_id'),i.item_id, i.discount2+5, s.supplier_id, lower(s.name) from "+ - supplierTableName+" s inner join "+itemTableName+" i on s.supplier_id = i.supplier_id "+ - "full join "+orderTableName+" o on i.item_id = o.item_id "+ - "where (o.price < 10 or o.price > 20) and "+ - "(i.description= 'desc1' or o.quantity > 10) and (i.supplier_id != 'medi' or s.address = 'hai') and (i.name is not null or s.loc_id != '8')"; + fail(); + } catch (SQLFeatureNotSupportedException exception) { + + } + + // test Correlated subquery with AggregateFunction with groupBy and is ORed part of the where + // clause. + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN " + + "(SELECT max(item_id) FROM " + orderTableName + + " o where o.price = i.price group by o.customer_id) or i.discount1 > 10 ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Left JOIN " + + "(SELECT DISTINCT 1 $28, MAX(ITEM_ID) $29,O.PRICE $27 FROM ORDER_TABLE O GROUP BY O.PRICE,O.CUSTOMER_ID) $26 " + + "ON ((I.ITEM_ID = $26.$29 AND $26.$27 = I.PRICE)) WHERE ($26.$28 IS NOT NULL OR I.DISCOUNT1 > 10) ORDER BY NAME"); + + // test NonCorrelated subquery + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN " + + "(SELECT item_id FROM " + orderTableName + " o where o.price > 8) ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN " + + "(SELECT DISTINCT 1 $35,ITEM_ID $36 FROM ORDER_TABLE O WHERE O.PRICE > 8) $34 ON (I.ITEM_ID = $34.$36) ORDER BY NAME"); + + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN " + + "(SELECT max(item_id) FROM " + orderTableName + " o where o.price > 8) ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN " + + "(SELECT DISTINCT 1 $42, MAX(ITEM_ID) $43 FROM ORDER_TABLE O WHERE O.PRICE > 8) $41 ON (I.ITEM_ID = $41.$43) ORDER BY NAME"); + + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN " + + "(SELECT max(item_id) FROM " + orderTableName + + " o where o.price > 8 group by o.customer_id,o.item_id) ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN " + + "(SELECT DISTINCT 1 $49, MAX(ITEM_ID) $50 FROM ORDER_TABLE O WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID) $48 " + + "ON (I.ITEM_ID = $48.$50) ORDER BY NAME"); + + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN " + + "(SELECT max(item_id) FROM " + orderTableName + + " o where o.price > 8 group by o.customer_id,o.item_id) or i.discount1 > 10 ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Left JOIN " + + "(SELECT DISTINCT 1 $56, MAX(ITEM_ID) $57 FROM ORDER_TABLE O WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID) $55 " + + "ON (I.ITEM_ID = $55.$57) WHERE ($55.$56 IS NOT NULL OR I.DISCOUNT1 > 10) ORDER BY NAME"); + } finally { + conn.close(); + } + } + + @Test + public void testHashJoinBug6232() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String sql = "CREATE TABLE test (" + " id INTEGER NOT NULL," + " test_id INTEGER," + + " lastchanged TIMESTAMP," + " CONSTRAINT my_pk PRIMARY KEY (id))"; + conn.createStatement().execute(sql); + + // test for LHS is Correlated subquery,the RHS would be as the probe side of Hash join. + sql = "SELECT AAA.* FROM " + "(SELECT id, test_id, lastchanged FROM test T " + + " WHERE lastchanged = ( SELECT max(lastchanged) FROM test WHERE test_id = T.test_id )) AAA " + + "inner join " + "(SELECT id FROM test) BBB " + "on AAA.id = BBB.id"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + HashJoinPlan hashJoinPlan = (HashJoinPlan) queryPlan; + assertTrue(hashJoinPlan.getDelegate() instanceof ScanPlan); + TestUtil.assertSelectStatement(hashJoinPlan.getDelegate().getStatement(), + "SELECT AAA.* FROM TEST"); + SubPlan[] subPlans = hashJoinPlan.getSubPlans(); + assertTrue(subPlans.length == 1); + assertTrue(subPlans[0] instanceof HashSubPlan); + assertTrue(subPlans[0].getInnerPlan() instanceof TupleProjectionPlan); + assertTrue( + ((TupleProjectionPlan) (subPlans[0].getInnerPlan())).getDelegate() instanceof HashJoinPlan); + + // test for LHS is Correlated subquery,the RHS could not as the probe side of hash join, + // so use SortMergeJoinPlan + sql = "SELECT AAA.* FROM " + "(SELECT id, test_id, lastchanged FROM test T " + + " WHERE lastchanged = ( SELECT max(lastchanged) FROM test WHERE test_id = T.test_id )) AAA " + + "inner join " + "(SELECT id FROM test limit 10) BBB " + "on AAA.id = BBB.id"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof ClientScanPlan); + assertTrue(((ClientScanPlan) queryPlan).getDelegate() instanceof SortMergeJoinPlan); + + // test for LHS is NonCorrelated subquery ,would use HashJoin. + String GRAMMAR_TABLE = "CREATE TABLE IF NOT EXISTS GRAMMAR_TABLE (ID INTEGER PRIMARY KEY, " + + "unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id UNSIGNED_LONG, tiny_id TINYINT," + + "unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, unsig_small_id UNSIGNED_SMALLINT," + + "float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id DOUBLE, unsig_double_id UNSIGNED_DOUBLE," + + "decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, date_id DATE, timestamp_id TIMESTAMP," + + "unsig_time_id TIME, unsig_date_id DATE, unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30)," + + "char_id CHAR (30), binary_id BINARY (100), varbinary_id VARBINARY (100))"; + conn.createStatement().execute(GRAMMAR_TABLE); + + String LARGE_TABLE = "CREATE TABLE IF NOT EXISTS LARGE_TABLE (ID INTEGER PRIMARY KEY, " + + "unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id UNSIGNED_LONG, tiny_id TINYINT," + + "unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, unsig_small_id UNSIGNED_SMALLINT," + + "float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id DOUBLE, unsig_double_id UNSIGNED_DOUBLE," + + "decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, date_id DATE, timestamp_id TIMESTAMP," + + "unsig_time_id TIME, unsig_date_id DATE, unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30)," + + "char_id CHAR (30), binary_id BINARY (100), varbinary_id VARBINARY (100))"; + conn.createStatement().execute(LARGE_TABLE); + + String SECONDARY_LARGE_TABLE = + "CREATE TABLE IF NOT EXISTS SECONDARY_LARGE_TABLE (SEC_ID INTEGER PRIMARY KEY," + + "sec_unsig_id UNSIGNED_INT, sec_big_id BIGINT, sec_usnig_long_id UNSIGNED_LONG, sec_tiny_id TINYINT," + + "sec_unsig_tiny_id UNSIGNED_TINYINT, sec_small_id SMALLINT, sec_unsig_small_id UNSIGNED_SMALLINT," + + "sec_float_id FLOAT, sec_unsig_float_id UNSIGNED_FLOAT, sec_double_id DOUBLE, sec_unsig_double_id UNSIGNED_DOUBLE," + + "sec_decimal_id DECIMAL, sec_boolean_id BOOLEAN, sec_time_id TIME, sec_date_id DATE," + + "sec_timestamp_id TIMESTAMP, sec_unsig_time_id TIME, sec_unsig_date_id DATE, sec_unsig_timestamp_id TIMESTAMP," + + "sec_varchar_id VARCHAR (30), sec_char_id CHAR (30), sec_binary_id BINARY (100), sec_varbinary_id VARBINARY (100))"; + conn.createStatement().execute(SECONDARY_LARGE_TABLE); + + sql = + "SELECT * FROM (SELECT ID, BIG_ID, DATE_ID FROM LARGE_TABLE AS A WHERE (A.ID % 5) = 0) AS A " + + "INNER JOIN (SELECT SEC_ID, SEC_TINY_ID, SEC_UNSIG_FLOAT_ID FROM SECONDARY_LARGE_TABLE AS B WHERE (B.SEC_ID % 5) = 0) AS B " + + "ON A.ID=B.SEC_ID WHERE A.DATE_ID > ALL (SELECT SEC_DATE_ID FROM SECONDARY_LARGE_TABLE LIMIT 100) " + + "AND B.SEC_UNSIG_FLOAT_ID = ANY (SELECT sec_unsig_float_id FROM SECONDARY_LARGE_TABLE " + + "WHERE SEC_ID > ALL (SELECT MIN (ID) FROM GRAMMAR_TABLE WHERE UNSIG_ID IS NULL) AND " + + "SEC_UNSIG_ID < ANY (SELECT DISTINCT(UNSIG_ID) FROM LARGE_TABLE WHERE UNSIG_ID<2500) LIMIT 1000) " + + "AND A.ID < 10000"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + hashJoinPlan = (HashJoinPlan) queryPlan; + subPlans = hashJoinPlan.getSubPlans(); + assertTrue(subPlans.length == 2); + assertTrue(subPlans[0] instanceof WhereClauseSubPlan); + assertTrue(subPlans[1] instanceof HashSubPlan); + + String tableName1 = generateUniqueName(); + String tableName2 = generateUniqueName(); + + sql = "CREATE TABLE IF NOT EXISTS " + tableName1 + " ( " + "AID INTEGER PRIMARY KEY," + + "AGE INTEGER" + ")"; + conn.createStatement().execute(sql); + + sql = "CREATE TABLE IF NOT EXISTS " + tableName2 + " ( " + "BID INTEGER PRIMARY KEY," + + "CODE INTEGER" + ")"; + conn.createStatement().execute(sql); + + // test for LHS is a flat table and pushed down NonCorrelated subquery as preFiter. + // would use HashJoin. + sql = "select a.aid from " + tableName1 + " a inner join " + "(select bid,code from " + + tableName2 + " where code > 10 limit 3) b on a.aid = b.bid " + + "where a.age > (select code from " + tableName2 + " c where c.bid = 2) order by a.aid"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + hashJoinPlan = (HashJoinPlan) queryPlan; + ScanPlan scanPlan = (ScanPlan) (hashJoinPlan.getDelegate()); + TestUtil.assertSelectStatement(scanPlan.getStatement(), + "SELECT A.AID FROM " + tableName1 + " A WHERE A.AGE > (SELECT CODE FROM " + tableName2 + + " C WHERE C.BID = 2 LIMIT 2) ORDER BY A.AID"); + subPlans = hashJoinPlan.getSubPlans(); + assertTrue(subPlans.length == 2); + assertTrue(subPlans[0] instanceof WhereClauseSubPlan); + WhereClauseSubPlan whereClauseSubPlan = (WhereClauseSubPlan) subPlans[0]; + TestUtil.assertSelectStatement(whereClauseSubPlan.getInnerPlan().getStatement(), + "SELECT CODE FROM " + tableName2 + " C WHERE C.BID = 2 LIMIT 2"); + assertTrue(subPlans[1] instanceof HashSubPlan); + + // test for LHS is a subselect and pushed down NonCorrelated subquery as preFiter. + // would use HashJoin. + sql = "select a.aid from (select aid,age from " + tableName1 + + " where age >=11 and age<=33) a inner join " + "(select bid,code from " + tableName2 + + " where code > 10 limit 3) b on a.aid = b.bid " + "where a.age > (select code from " + + tableName2 + " c where c.bid = 2) order by a.aid"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + hashJoinPlan = (HashJoinPlan) queryPlan; + scanPlan = (ScanPlan) (hashJoinPlan.getDelegate()); + TestUtil.assertSelectStatement(scanPlan.getStatement(), + "SELECT A.AID FROM " + tableName1 + " WHERE (AGE > (SELECT CODE FROM " + tableName2 + + " C WHERE C.BID = 2 LIMIT 2) AND (AGE >= 11 AND AGE <= 33)) ORDER BY A.AID"); + subPlans = hashJoinPlan.getSubPlans(); + assertTrue(subPlans.length == 2); + assertTrue(subPlans[0] instanceof WhereClauseSubPlan); + whereClauseSubPlan = (WhereClauseSubPlan) subPlans[0]; + TestUtil.assertSelectStatement(whereClauseSubPlan.getInnerPlan().getStatement(), + "SELECT CODE FROM " + tableName2 + " C WHERE C.BID = 2 LIMIT 2"); + assertTrue(subPlans[1] instanceof HashSubPlan); + + // test for LHS is a subselect and pushed down aggregate NonCorrelated subquery as preFiter. + // would use HashJoin. + sql = "select a.aid from (select aid,age from " + tableName1 + + " where age >=11 and age<=33) a inner join " + "(select bid,code from " + tableName2 + + " where code > 10 limit 3) b on a.aid = b.bid " + "where a.age > (select max(code) from " + + tableName2 + " c where c.bid >= 1) order by a.aid"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + hashJoinPlan = (HashJoinPlan) queryPlan; + scanPlan = (ScanPlan) (hashJoinPlan.getDelegate()); + TestUtil.assertSelectStatement(scanPlan.getStatement(), + "SELECT A.AID FROM " + tableName1 + " WHERE (AGE > (SELECT MAX(CODE) FROM " + tableName2 + + " C WHERE C.BID >= 1 LIMIT 2) AND (AGE >= 11 AND AGE <= 33)) ORDER BY A.AID"); + subPlans = hashJoinPlan.getSubPlans(); + assertTrue(subPlans.length == 2); + assertTrue(subPlans[0] instanceof WhereClauseSubPlan); + whereClauseSubPlan = (WhereClauseSubPlan) subPlans[0]; + TestUtil.assertSelectStatement(whereClauseSubPlan.getInnerPlan().getStatement(), + "SELECT MAX(CODE) FROM " + tableName2 + " C WHERE C.BID >= 1 LIMIT 2"); + assertTrue(subPlans[1] instanceof HashSubPlan); + + /** + * test for LHS is a subselect and has an aggregate Correlated subquery as preFiter, but the + * aggregate Correlated subquery would be rewrite as HashJoin before + * {@link JoinCompiler#compile}. + */ + sql = "select a.aid from (select aid,age from " + tableName1 + + " where age >=11 and age<=33) a inner join " + "(select bid,code from " + tableName2 + + " where code > 10 limit 3) b on a.aid = b.bid " + "where a.age > (select max(code) from " + + tableName2 + " c where c.bid = a.aid) order by a.aid"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + hashJoinPlan = (HashJoinPlan) queryPlan; + subPlans = hashJoinPlan.getSubPlans(); + assertTrue(subPlans.length == 2); + assertTrue(subPlans[0] instanceof HashSubPlan); + assertTrue(subPlans[1] instanceof HashSubPlan); + } finally { + conn.close(); + } + } + + @Test + public void testExistsSubqueryBug6498() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String itemTableName = "item_table"; + String sql = "create table " + itemTableName + " (item_id varchar not null primary key, " + + " name varchar, " + " price integer, " + " discount1 integer, " + + " discount2 integer, " + " supplier_id varchar, " + " description varchar)"; + conn.createStatement().execute(sql); + + String orderTableName = "order_table"; + sql = "create table " + orderTableName + " (order_id varchar not null primary key, " + + " customer_id varchar, " + " item_id varchar, " + " price integer, " + + " quantity integer, " + " date timestamp)"; + conn.createStatement().execute(sql); + + // test simple Correlated subquery + ParseNodeFactory.setTempAliasCounterValue(0); + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE exists " + "(SELECT 1 FROM " + + orderTableName + " o where o.price = i.price and o.quantity = 5 ) ORDER BY name"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + System.out.println(queryPlan.getStatement()); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN " + + "(SELECT DISTINCT 1 $3,O.PRICE $2 FROM ORDER_TABLE O WHERE O.QUANTITY = 5) $1 " + + "ON ($1.$2 = I.PRICE) ORDER BY NAME"); + + // test Correlated subquery with AggregateFunction and groupBy + ParseNodeFactory.setTempAliasCounterValue(0); + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE exists " + "(SELECT 1 FROM " + + orderTableName + + " o where o.item_id = i.item_id group by customer_id having count(order_id) > 1) " + + "ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN " + + "(SELECT DISTINCT 1 $3,O.ITEM_ID $2 FROM ORDER_TABLE O GROUP BY O.ITEM_ID,CUSTOMER_ID HAVING COUNT(ORDER_ID) > 1) $1 " + + "ON ($1.$2 = I.ITEM_ID) ORDER BY NAME"); + + // for Correlated subquery, the extracted join condition must be equal expression. + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE exists " + "(SELECT 1 FROM " + + orderTableName + + " o where o.price = i.price or o.quantity > 1 group by o.customer_id) ORDER BY name"; + try { queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - clientScanPlan = (ClientScanPlan)queryPlan; - assertTrue(clientScanPlan.getWhere().toString().equals( - "((O.PRICE < 10 OR O.PRICE > 20) AND (I.DESCRIPTION = 'desc1' OR O.QUANTITY > 10) AND (I.SUPPLIER_ID != 'medi' OR S.ADDRESS = 'hai') AND (I.NAME IS NOT NULL OR S.LOC_ID != '8'))")); - sortMergeJoinPlan = (SortMergeJoinPlan)clientScanPlan.getDelegate(); - lhsClientScanPlan = (ClientScanPlan)sortMergeJoinPlan.getLhsPlan(); - assertTrue(lhsClientScanPlan.getWhere() == null); - } - - @Test - public void testSubselectColumnPruneForJoinBug5451() throws Exception { - PhoenixConnection conn = null; - try { - conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class); - String sql = null; - QueryPlan queryPlan = null; - //testNestedDerivedTable require index with same name be created - String tableName = "testA"; - sql = "create table " + tableName + - " (organization_id char(15) not null, \n" + - " entity_id char(15) not null,\n" + - " a_string varchar(100),\n" + - " b_string varchar(100),\n" + - " a_integer integer,\n" + - " a_date date,\n" + - " a_time time,\n" + - " a_timestamp timestamp,\n" + - " x_decimal decimal(31,10),\n" + - " x_long bigint,\n" + - " x_integer integer,\n" + - " y_integer integer,\n" + - " a_byte tinyint,\n" + - " a_short smallint,\n" + - " a_float float,\n" + - " a_double double,\n" + - " a_unsigned_float unsigned_float,\n" + - " a_unsigned_double unsigned_double\n" + - " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n" + - ") "; - conn.createStatement().execute(sql); - - //test for subquery - sql = "SELECT q.id, q.x10 * 10 FROM " + - "(SELECT t.eid id, t.x + 9 x10, t.astr a, t.bstr b, aint ai, adouble ad FROM "+ - "(SELECT entity_id eid, a_string astr, b_string bstr, a_integer aint, a_double adouble, a_byte + 1 x FROM " + tableName + " WHERE a_byte + 1 < 9 limit 2) AS t "+ - "ORDER BY b, id limit 3) AS q WHERE q.a = 'a' OR q.b = 'b' OR q.b = 'c'"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - ClientScanPlan clientScanPlan = (ClientScanPlan)queryPlan; - TestUtil.assertSelectStatement(clientScanPlan.getStatement(), - "SELECT Q.ID,(Q.X10 * 10) FROM "+ - "(SELECT T.EID ID,(T.X + 9) X10,T.ASTR A,T.BSTR B FROM "+ - "(SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,A_INTEGER AINT,A_DOUBLE ADOUBLE,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2) T "+ - "ORDER BY T.BSTR,T.EID LIMIT 3) Q WHERE (Q.A = 'a' OR Q.B = 'b' OR Q.B = 'c')"); - clientScanPlan = - (ClientScanPlan)((TupleProjectionPlan)clientScanPlan.getDelegate()).getDelegate(); - TestUtil.assertSelectStatement(clientScanPlan.getStatement(), - "SELECT T.EID ID,(T.X + 9) X10,T.ASTR A,T.BSTR B FROM "+ - "(SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2) T "+ - "ORDER BY T.BSTR,T.EID LIMIT 3"); - ScanPlan scanPlan = - (ScanPlan)((TupleProjectionPlan)clientScanPlan.getDelegate()).getDelegate(); - TestUtil.assertSelectStatement( - scanPlan.getStatement(), - "SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2"); - - //test for subquery with wildcard - sql = "SELECT * FROM " + - "(SELECT t.eid id, t.x + 9 x10, t.astr a, t.bstr b, aint ai, adouble ad FROM "+ - "(SELECT entity_id eid, a_string astr, b_string bstr, a_integer aint, a_double adouble, a_byte + 1 x FROM " + tableName + " WHERE a_byte + 1 < 9 limit 2) AS t "+ - "ORDER BY b, id limit 3) AS q WHERE q.a = 'a' OR q.b = 'b' OR q.b = 'c'"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - clientScanPlan = (ClientScanPlan)queryPlan; - TestUtil.assertSelectStatement(clientScanPlan.getStatement(), - "SELECT * FROM "+ - "(SELECT T.EID ID,(T.X + 9) X10,T.ASTR A,T.BSTR B,AINT AI,ADOUBLE AD FROM "+ - "(SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,A_INTEGER AINT,A_DOUBLE ADOUBLE,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2) T "+ - "ORDER BY B,ID LIMIT 3) Q WHERE (Q.A = 'a' OR Q.B = 'b' OR Q.B = 'c')"); - clientScanPlan = (ClientScanPlan)((TupleProjectionPlan)clientScanPlan.getDelegate()).getDelegate(); - TestUtil.assertSelectStatement(clientScanPlan.getStatement(), - "SELECT T.EID ID,(T.X + 9) X10,T.ASTR A,T.BSTR B,AINT AI,ADOUBLE AD FROM "+ - "(SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,A_INTEGER AINT,A_DOUBLE ADOUBLE,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2) T "+ - "ORDER BY T.BSTR,T.EID LIMIT 3"); - scanPlan = (ScanPlan)((TupleProjectionPlan)clientScanPlan.getDelegate()).getDelegate(); - TestUtil.assertSelectStatement( - scanPlan.getStatement(), - "SELECT ENTITY_ID EID,A_STRING ASTR,B_STRING BSTR,A_INTEGER AINT,A_DOUBLE ADOUBLE,(A_BYTE + 1) X FROM TESTA WHERE (A_BYTE + 1) < 9 LIMIT 2"); - - //test for some trival cases of subquery. - sql = "SELECT count(*) FROM (SELECT count(*) c FROM "+tableName+" ) AS t"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - ClientAggregatePlan clientAggregatePlan = (ClientAggregatePlan)queryPlan; - TestUtil.assertSelectStatement(clientAggregatePlan.getStatement(), "SELECT COUNT(1) FROM (SELECT COUNT(1) C FROM TESTA ) T"); - AggregatePlan aggregatePlan = - (AggregatePlan)((TupleProjectionPlan)clientAggregatePlan.getDelegate()).getDelegate(); - TestUtil.assertSelectStatement(aggregatePlan.getStatement(), "SELECT COUNT(1) C FROM TESTA"); - - sql = "SELECT count(*) FROM (SELECT count(*) c FROM "+tableName+" GROUP BY a_string) AS t"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - clientAggregatePlan = (ClientAggregatePlan)queryPlan; - TestUtil.assertSelectStatement( - clientAggregatePlan.getStatement(), - "SELECT COUNT(1) FROM (SELECT COUNT(1) C FROM TESTA GROUP BY A_STRING) T"); - aggregatePlan = - (AggregatePlan)((TupleProjectionPlan)clientAggregatePlan.getDelegate()).getDelegate(); - TestUtil.assertSelectStatement( - aggregatePlan.getStatement(), - "SELECT COUNT(1) C FROM TESTA GROUP BY A_STRING"); - - sql = "SELECT 1 FROM (SELECT count(*) c FROM "+tableName+" GROUP BY a_string) AS t"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - aggregatePlan = (AggregatePlan)queryPlan; - TestUtil.assertSelectStatement(aggregatePlan.getStatement(), "SELECT 1 FROM TESTA GROUP BY A_STRING"); - - sql = "SELECT count(*) FROM (SELECT DISTINCT a_string FROM "+tableName+") AS t"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - clientAggregatePlan = (ClientAggregatePlan)queryPlan; - TestUtil.assertSelectStatement(clientAggregatePlan.getStatement(), "SELECT COUNT(1) FROM (SELECT DISTINCT A_STRING FROM TESTA ) T"); - aggregatePlan = - (AggregatePlan)((TupleProjectionPlan)clientAggregatePlan.getDelegate()).getDelegate(); - TestUtil.assertSelectStatement(aggregatePlan.getStatement(), "SELECT DISTINCT A_STRING FROM TESTA"); - - //test for hash join - sql = "SELECT q1.id, q2.id FROM (SELECT t.eid id, t.astr a, t.bstr b FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM "+tableName+") AS t WHERE t.abyte >= 8) AS q1" - + " JOIN (SELECT t.eid id, t.astr a, t.bstr b, t.abyte x FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM "+tableName+") AS t) AS q2 ON q1.a = q2.b" - + " WHERE q2.x != 5 ORDER BY q1.id, q2.id DESC"; - JoinTable joinTablesContext = TestUtil.getJoinTable(sql, conn); - Table leftmostTableContext = joinTablesContext.getLeftTable(); - TestUtil.assertSelectStatement( - leftmostTableContext.getSubselectStatement(), - "SELECT ENTITY_ID ID,A_STRING A FROM TESTA WHERE A_BYTE >= 8"); - assertTrue(leftmostTableContext.getPreFilterParseNodes().isEmpty()); - - Table rightTableContext = joinTablesContext.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable(); - TestUtil.assertSelectStatement(rightTableContext.getSubselectStatement(), "SELECT ENTITY_ID ID,B_STRING B FROM TESTA"); - assertTrue(rightTableContext.getPreFilterParseNodes().size() == 1); - assertTrue(rightTableContext.getPreFilterParseNodes().get(0).toString().equals("A_BYTE != 5")); - - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - HashJoinPlan hashJoinPlan = (HashJoinPlan)queryPlan; - Scan scan = hashJoinPlan.getContext().getScan(); - TupleProjector tupleColumnProjector = - TupleProjector.deserializeProjectorFromScan(scan); - Expression[] expressions = tupleColumnProjector.getExpressions(); - assertTrue(expressions.length == 2); - - TestUtil.assertSelectStatement( - hashJoinPlan.getDelegate().getStatement(), - "SELECT Q1.ID,Q2.ID FROM TESTA WHERE A_BYTE >= 8 ORDER BY Q1.ID,Q2.ID DESC"); - HashSubPlan[] hashSubPlans = (HashSubPlan[])hashJoinPlan.getSubPlans(); - assertTrue(hashSubPlans.length == 1); - scanPlan =(ScanPlan)((TupleProjectionPlan)(hashSubPlans[0].getInnerPlan())).getDelegate(); - TestUtil.assertSelectStatement( - scanPlan.getStatement(), - "SELECT ENTITY_ID ID,B_STRING B FROM TESTA WHERE A_BYTE != 5"); - - //test for hash join with wildcard - sql = "SELECT * FROM (SELECT t.eid id, t.astr a, t.bstr b FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM "+tableName+") AS t WHERE t.abyte >= 8) AS q1" - + " JOIN (SELECT t.eid id, t.astr a, t.bstr b, t.abyte x FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM "+tableName+") AS t) AS q2 ON q1.a = q2.b" - + " WHERE q2.x != 5 ORDER BY q1.id, q2.id DESC"; - joinTablesContext = TestUtil.getJoinTable(sql, conn); - leftmostTableContext = joinTablesContext.getLeftTable(); - TestUtil.assertSelectStatement( - leftmostTableContext.getSubselectStatement(), - "SELECT ENTITY_ID ID,A_STRING A,B_STRING B FROM TESTA WHERE A_BYTE >= 8"); - assertTrue(leftmostTableContext.getPreFilterParseNodes().isEmpty()); - - rightTableContext = joinTablesContext.getJoinSpecs().get(0).getRhsJoinTable().getLeftTable(); - TestUtil.assertSelectStatement( - rightTableContext.getSubselectStatement(), - "SELECT ENTITY_ID ID,A_STRING A,B_STRING B,A_BYTE X FROM TESTA"); - assertTrue(rightTableContext.getPreFilterParseNodes().size() == 1); - assertTrue(rightTableContext.getPreFilterParseNodes().get(0).toString().equals("A_BYTE != 5")); - - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - hashJoinPlan = (HashJoinPlan)queryPlan; - scan = hashJoinPlan.getContext().getScan(); - tupleColumnProjector = - TupleProjector.deserializeProjectorFromScan(scan); - expressions = tupleColumnProjector.getExpressions(); - assertTrue(expressions.length == 3); - - TestUtil.assertSelectStatement( - hashJoinPlan.getDelegate().getStatement(), - "SELECT Q1.*,Q2.* FROM TESTA WHERE A_BYTE >= 8 ORDER BY Q1.ID,Q2.ID DESC"); - hashSubPlans = (HashSubPlan[])hashJoinPlan.getSubPlans(); - assertTrue(hashSubPlans.length == 1); - scanPlan = (ScanPlan)((TupleProjectionPlan)(hashSubPlans[0].getInnerPlan())).getDelegate(); - TestUtil.assertSelectStatement( - scanPlan.getStatement(), - "SELECT ENTITY_ID ID,A_STRING A,B_STRING B,A_BYTE X FROM TESTA WHERE A_BYTE != 5"); - - //test for sortmergejoin - sql = "SELECT /*+ USE_SORT_MERGE_JOIN */ q1.id, q2.id FROM " + - "(SELECT t.eid id, t.astr a, t.bstr b FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM "+tableName+") AS t WHERE t.abyte >= 8) AS q1 " + - "JOIN (SELECT t.eid id, t.astr a, t.bstr b, t.abyte x FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM "+tableName+") AS t) AS q2 "+ - "ON q1.a = q2.b WHERE q2.x != 5 ORDER BY q1.id, q2.id DESC"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - clientScanPlan = (ClientScanPlan)queryPlan; - SortMergeJoinPlan sortMergeJoinPlan = (SortMergeJoinPlan)clientScanPlan.getDelegate(); - ScanPlan lhsPlan = - (ScanPlan)((TupleProjectionPlan)sortMergeJoinPlan.getLhsPlan()).getDelegate(); - TestUtil.assertSelectStatement( - lhsPlan.getStatement(), - "SELECT ENTITY_ID ID,A_STRING A FROM TESTA WHERE A_BYTE >= 8 ORDER BY A_STRING"); - ScanPlan rhsPlan = - (ScanPlan)((TupleProjectionPlan)sortMergeJoinPlan.getRhsPlan()).getDelegate(); - TestUtil.assertSelectStatement( - rhsPlan.getStatement(), - "SELECT ENTITY_ID ID,B_STRING B FROM TESTA WHERE A_BYTE != 5 ORDER BY B_STRING"); - - //test for sortmergejoin with wildcard - sql = "SELECT /*+ USE_SORT_MERGE_JOIN */ * FROM "+ - "(SELECT t.eid id, t.astr a, t.bstr b FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM "+tableName+") AS t WHERE t.abyte >= 8) AS q1 "+ - "JOIN (SELECT t.eid id, t.astr a, t.bstr b, t.abyte x FROM (SELECT entity_id eid, a_string astr, b_string bstr, a_byte abyte FROM "+tableName+") AS t) AS q2 "+ - "ON q1.a = q2.b WHERE q2.x != 5 ORDER BY q1.id, q2.id DESC"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - clientScanPlan = (ClientScanPlan)queryPlan; - sortMergeJoinPlan = (SortMergeJoinPlan)clientScanPlan.getDelegate(); - lhsPlan = (ScanPlan)((TupleProjectionPlan)sortMergeJoinPlan.getLhsPlan()).getDelegate(); - TestUtil.assertSelectStatement(lhsPlan.getStatement(), - "SELECT ENTITY_ID ID,A_STRING A,B_STRING B FROM TESTA WHERE A_BYTE >= 8 ORDER BY A_STRING"); - rhsPlan = (ScanPlan)((TupleProjectionPlan)sortMergeJoinPlan.getRhsPlan()).getDelegate(); - TestUtil.assertSelectStatement(rhsPlan.getStatement(), - "SELECT ENTITY_ID ID,A_STRING A,B_STRING B,A_BYTE X FROM TESTA WHERE A_BYTE != 5 ORDER BY B_STRING"); - } finally { - conn.close(); - } - } - - @Test - public void testInSubqueryBug6224() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String itemTableName = "item_table"; - String sql ="create table " + itemTableName + - " (item_id varchar not null primary key, " + - " name varchar, " + - " price integer, " + - " discount1 integer, " + - " discount2 integer, " + - " supplier_id varchar, " + - " description varchar)"; - conn.createStatement().execute(sql); - - String orderTableName = "order_table"; - sql = "create table " + orderTableName + - " (order_id varchar not null primary key, " + - " customer_id varchar, " + - " item_id varchar, " + - " price integer, " + - " quantity integer, " + - " date timestamp)"; - conn.createStatement().execute(sql); - //test simple Correlated subquery - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN "+ - "(SELECT item_id FROM " + orderTableName + " o where o.price = i.price) ORDER BY name"; - QueryPlan queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN (SELECT DISTINCT 1 $3,ITEM_ID $4,O.PRICE $2 FROM ORDER_TABLE O ) $1 "+ - "ON ((I.ITEM_ID = $1.$4 AND $1.$2 = I.PRICE)) ORDER BY NAME"); - - //test Correlated subquery with AggregateFunction but no groupBy - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN "+ - "(SELECT max(item_id) FROM " + orderTableName + " o where o.price = i.price) ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN "+ - "(SELECT DISTINCT 1 $11, MAX(ITEM_ID) $12,O.PRICE $10 FROM ORDER_TABLE O GROUP BY O.PRICE) $9 "+ - "ON ((I.ITEM_ID = $9.$12 AND $9.$10 = I.PRICE)) ORDER BY NAME"); - - //test Correlated subquery with AggregateFunction with groupBy - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN "+ - "(SELECT max(item_id) FROM " + orderTableName + " o where o.price = i.price group by o.customer_id) ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN "+ - "(SELECT DISTINCT 1 $19, MAX(ITEM_ID) $20,O.PRICE $18 FROM ORDER_TABLE O GROUP BY O.PRICE,O.CUSTOMER_ID) $17 "+ - "ON ((I.ITEM_ID = $17.$20 AND $17.$18 = I.PRICE)) ORDER BY NAME"); - - //for Correlated subquery, the extracted join condition must be equal expression. - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN "+ - "(SELECT max(item_id) FROM " + orderTableName + " o where o.price = i.price or o.quantity > 1 group by o.customer_id) ORDER BY name"; - try { - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - fail(); - } catch(SQLFeatureNotSupportedException exception) { - - } - - //test Correlated subquery with AggregateFunction with groupBy and is ORed part of the where clause. - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN "+ - "(SELECT max(item_id) FROM " + orderTableName + " o where o.price = i.price group by o.customer_id) or i.discount1 > 10 ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Left JOIN "+ - "(SELECT DISTINCT 1 $28, MAX(ITEM_ID) $29,O.PRICE $27 FROM ORDER_TABLE O GROUP BY O.PRICE,O.CUSTOMER_ID) $26 "+ - "ON ((I.ITEM_ID = $26.$29 AND $26.$27 = I.PRICE)) WHERE ($26.$28 IS NOT NULL OR I.DISCOUNT1 > 10) ORDER BY NAME"); - - // test NonCorrelated subquery - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN "+ - "(SELECT item_id FROM " + orderTableName + " o where o.price > 8) ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN "+ - "(SELECT DISTINCT 1 $35,ITEM_ID $36 FROM ORDER_TABLE O WHERE O.PRICE > 8) $34 ON (I.ITEM_ID = $34.$36) ORDER BY NAME"); - - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN "+ - "(SELECT max(item_id) FROM " + orderTableName + " o where o.price > 8) ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN "+ - "(SELECT DISTINCT 1 $42, MAX(ITEM_ID) $43 FROM ORDER_TABLE O WHERE O.PRICE > 8) $41 ON (I.ITEM_ID = $41.$43) ORDER BY NAME"); - - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN "+ - "(SELECT max(item_id) FROM " + orderTableName + " o where o.price > 8 group by o.customer_id,o.item_id) ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN "+ - "(SELECT DISTINCT 1 $49, MAX(ITEM_ID) $50 FROM ORDER_TABLE O WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID) $48 "+ - "ON (I.ITEM_ID = $48.$50) ORDER BY NAME"); - - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE i.item_id IN "+ - "(SELECT max(item_id) FROM " + orderTableName + " o where o.price > 8 group by o.customer_id,o.item_id) or i.discount1 > 10 ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Left JOIN "+ - "(SELECT DISTINCT 1 $56, MAX(ITEM_ID) $57 FROM ORDER_TABLE O WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID) $55 "+ - "ON (I.ITEM_ID = $55.$57) WHERE ($55.$56 IS NOT NULL OR I.DISCOUNT1 > 10) ORDER BY NAME"); - } finally { - conn.close(); - } - } - - @Test - public void testHashJoinBug6232() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String sql ="CREATE TABLE test (" + - " id INTEGER NOT NULL," + - " test_id INTEGER," + - " lastchanged TIMESTAMP," + - " CONSTRAINT my_pk PRIMARY KEY (id))"; - conn.createStatement().execute(sql); - - //test for LHS is Correlated subquery,the RHS would be as the probe side of Hash join. - sql= "SELECT AAA.* FROM " + - "(SELECT id, test_id, lastchanged FROM test T " + - " WHERE lastchanged = ( SELECT max(lastchanged) FROM test WHERE test_id = T.test_id )) AAA " + - "inner join " + - "(SELECT id FROM test) BBB " + - "on AAA.id = BBB.id"; - QueryPlan queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - HashJoinPlan hashJoinPlan = (HashJoinPlan)queryPlan; - assertTrue(hashJoinPlan.getDelegate() instanceof ScanPlan); - TestUtil.assertSelectStatement( - hashJoinPlan.getDelegate().getStatement(), "SELECT AAA.* FROM TEST"); - SubPlan[] subPlans = hashJoinPlan.getSubPlans(); - assertTrue(subPlans.length == 1); - assertTrue(subPlans[0] instanceof HashSubPlan); - assertTrue(subPlans[0].getInnerPlan() instanceof TupleProjectionPlan); - assertTrue( - ((TupleProjectionPlan)(subPlans[0].getInnerPlan())).getDelegate() instanceof HashJoinPlan); - - //test for LHS is Correlated subquery,the RHS could not as the probe side of hash join, - //so use SortMergeJoinPlan - sql= "SELECT AAA.* FROM " + - "(SELECT id, test_id, lastchanged FROM test T " + - " WHERE lastchanged = ( SELECT max(lastchanged) FROM test WHERE test_id = T.test_id )) AAA " + - "inner join " + - "(SELECT id FROM test limit 10) BBB " + - "on AAA.id = BBB.id"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof ClientScanPlan); - assertTrue(((ClientScanPlan)queryPlan).getDelegate() instanceof SortMergeJoinPlan); - - //test for LHS is NonCorrelated subquery ,would use HashJoin. - String GRAMMAR_TABLE = "CREATE TABLE IF NOT EXISTS GRAMMAR_TABLE (ID INTEGER PRIMARY KEY, " + - "unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id UNSIGNED_LONG, tiny_id TINYINT," + - "unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, unsig_small_id UNSIGNED_SMALLINT," + - "float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id DOUBLE, unsig_double_id UNSIGNED_DOUBLE," + - "decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, date_id DATE, timestamp_id TIMESTAMP," + - "unsig_time_id TIME, unsig_date_id DATE, unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30)," + - "char_id CHAR (30), binary_id BINARY (100), varbinary_id VARBINARY (100))"; - conn.createStatement().execute(GRAMMAR_TABLE); - - String LARGE_TABLE = "CREATE TABLE IF NOT EXISTS LARGE_TABLE (ID INTEGER PRIMARY KEY, " + - "unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id UNSIGNED_LONG, tiny_id TINYINT," + - "unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, unsig_small_id UNSIGNED_SMALLINT," + - "float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id DOUBLE, unsig_double_id UNSIGNED_DOUBLE," + - "decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, date_id DATE, timestamp_id TIMESTAMP," + - "unsig_time_id TIME, unsig_date_id DATE, unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30)," + - "char_id CHAR (30), binary_id BINARY (100), varbinary_id VARBINARY (100))"; - conn.createStatement().execute(LARGE_TABLE); - - String SECONDARY_LARGE_TABLE = "CREATE TABLE IF NOT EXISTS SECONDARY_LARGE_TABLE (SEC_ID INTEGER PRIMARY KEY," + - "sec_unsig_id UNSIGNED_INT, sec_big_id BIGINT, sec_usnig_long_id UNSIGNED_LONG, sec_tiny_id TINYINT," + - "sec_unsig_tiny_id UNSIGNED_TINYINT, sec_small_id SMALLINT, sec_unsig_small_id UNSIGNED_SMALLINT," + - "sec_float_id FLOAT, sec_unsig_float_id UNSIGNED_FLOAT, sec_double_id DOUBLE, sec_unsig_double_id UNSIGNED_DOUBLE," + - "sec_decimal_id DECIMAL, sec_boolean_id BOOLEAN, sec_time_id TIME, sec_date_id DATE," + - "sec_timestamp_id TIMESTAMP, sec_unsig_time_id TIME, sec_unsig_date_id DATE, sec_unsig_timestamp_id TIMESTAMP," + - "sec_varchar_id VARCHAR (30), sec_char_id CHAR (30), sec_binary_id BINARY (100), sec_varbinary_id VARBINARY (100))"; - conn.createStatement().execute(SECONDARY_LARGE_TABLE); - - sql = "SELECT * FROM (SELECT ID, BIG_ID, DATE_ID FROM LARGE_TABLE AS A WHERE (A.ID % 5) = 0) AS A " + - "INNER JOIN (SELECT SEC_ID, SEC_TINY_ID, SEC_UNSIG_FLOAT_ID FROM SECONDARY_LARGE_TABLE AS B WHERE (B.SEC_ID % 5) = 0) AS B " + - "ON A.ID=B.SEC_ID WHERE A.DATE_ID > ALL (SELECT SEC_DATE_ID FROM SECONDARY_LARGE_TABLE LIMIT 100) " + - "AND B.SEC_UNSIG_FLOAT_ID = ANY (SELECT sec_unsig_float_id FROM SECONDARY_LARGE_TABLE " + - "WHERE SEC_ID > ALL (SELECT MIN (ID) FROM GRAMMAR_TABLE WHERE UNSIG_ID IS NULL) AND " + - "SEC_UNSIG_ID < ANY (SELECT DISTINCT(UNSIG_ID) FROM LARGE_TABLE WHERE UNSIG_ID<2500) LIMIT 1000) " + - "AND A.ID < 10000"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - hashJoinPlan = (HashJoinPlan)queryPlan; - subPlans = hashJoinPlan.getSubPlans(); - assertTrue(subPlans.length == 2); - assertTrue(subPlans[0] instanceof WhereClauseSubPlan); - assertTrue(subPlans[1] instanceof HashSubPlan); - - - String tableName1 = generateUniqueName(); - String tableName2 = generateUniqueName(); - - sql="CREATE TABLE IF NOT EXISTS "+tableName1+" ( "+ - "AID INTEGER PRIMARY KEY,"+ - "AGE INTEGER"+ - ")"; - conn.createStatement().execute(sql); - - sql="CREATE TABLE IF NOT EXISTS "+tableName2+" ( "+ - "BID INTEGER PRIMARY KEY,"+ - "CODE INTEGER"+ - ")"; - conn.createStatement().execute(sql); - - //test for LHS is a flat table and pushed down NonCorrelated subquery as preFiter. - //would use HashJoin. - sql="select a.aid from " + tableName1 + " a inner join "+ - "(select bid,code from " + tableName2 + " where code > 10 limit 3) b on a.aid = b.bid "+ - "where a.age > (select code from " + tableName2 + " c where c.bid = 2) order by a.aid"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - hashJoinPlan = (HashJoinPlan)queryPlan; - ScanPlan scanPlan=(ScanPlan)(hashJoinPlan.getDelegate()); - TestUtil.assertSelectStatement( - scanPlan.getStatement(), - "SELECT A.AID FROM " +tableName1+ " A WHERE A.AGE > (SELECT CODE FROM " + tableName2 + " C WHERE C.BID = 2 LIMIT 2) ORDER BY A.AID"); - subPlans = hashJoinPlan.getSubPlans(); - assertTrue(subPlans.length == 2); - assertTrue(subPlans[0] instanceof WhereClauseSubPlan); - WhereClauseSubPlan whereClauseSubPlan = (WhereClauseSubPlan)subPlans[0]; - TestUtil.assertSelectStatement( - whereClauseSubPlan.getInnerPlan().getStatement(), - "SELECT CODE FROM " + tableName2 + " C WHERE C.BID = 2 LIMIT 2"); - assertTrue(subPlans[1] instanceof HashSubPlan); - - //test for LHS is a subselect and pushed down NonCorrelated subquery as preFiter. - //would use HashJoin. - sql="select a.aid from (select aid,age from " + tableName1 + " where age >=11 and age<=33) a inner join "+ - "(select bid,code from " + tableName2 + " where code > 10 limit 3) b on a.aid = b.bid "+ - "where a.age > (select code from " + tableName2 + " c where c.bid = 2) order by a.aid"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - hashJoinPlan = (HashJoinPlan)queryPlan; - scanPlan=(ScanPlan)(hashJoinPlan.getDelegate()); - TestUtil.assertSelectStatement( - scanPlan.getStatement(), - "SELECT A.AID FROM " +tableName1+ " WHERE (AGE > (SELECT CODE FROM " + tableName2 + " C WHERE C.BID = 2 LIMIT 2) AND (AGE >= 11 AND AGE <= 33)) ORDER BY A.AID"); - subPlans = hashJoinPlan.getSubPlans(); - assertTrue(subPlans.length == 2); - assertTrue(subPlans[0] instanceof WhereClauseSubPlan); - whereClauseSubPlan = (WhereClauseSubPlan)subPlans[0]; - TestUtil.assertSelectStatement( - whereClauseSubPlan.getInnerPlan().getStatement(), - "SELECT CODE FROM " + tableName2 + " C WHERE C.BID = 2 LIMIT 2"); - assertTrue(subPlans[1] instanceof HashSubPlan); - - //test for LHS is a subselect and pushed down aggregate NonCorrelated subquery as preFiter. - //would use HashJoin. - sql = "select a.aid from (select aid,age from " + tableName1 + " where age >=11 and age<=33) a inner join "+ - "(select bid,code from " + tableName2 + " where code > 10 limit 3) b on a.aid = b.bid "+ - "where a.age > (select max(code) from " + tableName2 + " c where c.bid >= 1) order by a.aid"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - hashJoinPlan = (HashJoinPlan)queryPlan; - scanPlan=(ScanPlan)(hashJoinPlan.getDelegate()); - TestUtil.assertSelectStatement( - scanPlan.getStatement(), - "SELECT A.AID FROM " + tableName1 + - " WHERE (AGE > (SELECT MAX(CODE) FROM " + tableName2 + " C WHERE C.BID >= 1 LIMIT 2) AND (AGE >= 11 AND AGE <= 33)) ORDER BY A.AID"); - subPlans = hashJoinPlan.getSubPlans(); - assertTrue(subPlans.length == 2); - assertTrue(subPlans[0] instanceof WhereClauseSubPlan); - whereClauseSubPlan = (WhereClauseSubPlan)subPlans[0]; - TestUtil.assertSelectStatement( - whereClauseSubPlan.getInnerPlan().getStatement(), - "SELECT MAX(CODE) FROM " + tableName2 + " C WHERE C.BID >= 1 LIMIT 2"); - assertTrue(subPlans[1] instanceof HashSubPlan); - - /** - * test for LHS is a subselect and has an aggregate Correlated subquery as preFiter, - * but the aggregate Correlated subquery would be rewrite as HashJoin before - * {@link JoinCompiler#compile}. - */ - sql = "select a.aid from (select aid,age from " + tableName1 + " where age >=11 and age<=33) a inner join "+ - "(select bid,code from " + tableName2 + " where code > 10 limit 3) b on a.aid = b.bid "+ - "where a.age > (select max(code) from " + tableName2 + " c where c.bid = a.aid) order by a.aid"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - hashJoinPlan = (HashJoinPlan)queryPlan; - subPlans = hashJoinPlan.getSubPlans(); - assertTrue(subPlans.length == 2); - assertTrue(subPlans[0] instanceof HashSubPlan); - assertTrue(subPlans[1] instanceof HashSubPlan); - } finally { - conn.close(); - } - } - - @Test - public void testExistsSubqueryBug6498() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String itemTableName = "item_table"; - String sql ="create table " + itemTableName + - " (item_id varchar not null primary key, " + - " name varchar, " + - " price integer, " + - " discount1 integer, " + - " discount2 integer, " + - " supplier_id varchar, " + - " description varchar)"; - conn.createStatement().execute(sql); - - String orderTableName = "order_table"; - sql = "create table " + orderTableName + - " (order_id varchar not null primary key, " + - " customer_id varchar, " + - " item_id varchar, " + - " price integer, " + - " quantity integer, " + - " date timestamp)"; - conn.createStatement().execute(sql); - - //test simple Correlated subquery - ParseNodeFactory.setTempAliasCounterValue(0); - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE exists "+ - "(SELECT 1 FROM " + orderTableName + " o where o.price = i.price and o.quantity = 5 ) ORDER BY name"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - System.out.println(queryPlan.getStatement()); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN " + - "(SELECT DISTINCT 1 $3,O.PRICE $2 FROM ORDER_TABLE O WHERE O.QUANTITY = 5) $1 "+ - "ON ($1.$2 = I.PRICE) ORDER BY NAME"); - - //test Correlated subquery with AggregateFunction and groupBy - ParseNodeFactory.setTempAliasCounterValue(0); - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE exists "+ - "(SELECT 1 FROM " + orderTableName + " o where o.item_id = i.item_id group by customer_id having count(order_id) > 1) " + - "ORDER BY name"; - queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Semi JOIN " + - "(SELECT DISTINCT 1 $3,O.ITEM_ID $2 FROM ORDER_TABLE O GROUP BY O.ITEM_ID,CUSTOMER_ID HAVING COUNT(ORDER_ID) > 1) $1 " + - "ON ($1.$2 = I.ITEM_ID) ORDER BY NAME"); - - //for Correlated subquery, the extracted join condition must be equal expression. - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE exists "+ - "(SELECT 1 FROM " + orderTableName + " o where o.price = i.price or o.quantity > 1 group by o.customer_id) ORDER BY name"; - try { - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - fail(); - } catch(SQLFeatureNotSupportedException exception) { - - } - - //test Correlated subquery with AggregateFunction with groupBy and is ORed part of the where clause. - ParseNodeFactory.setTempAliasCounterValue(0); - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE exists "+ - "(SELECT 1 FROM " + orderTableName + " o where o.item_id = i.item_id group by customer_id having count(order_id) > 1) "+ - " or i.discount1 > 10 ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Left JOIN " + - "(SELECT DISTINCT 1 $3,O.ITEM_ID $2 FROM ORDER_TABLE O GROUP BY O.ITEM_ID,CUSTOMER_ID HAVING COUNT(ORDER_ID) > 1) $1 " + - "ON ($1.$2 = I.ITEM_ID) WHERE ($1.$3 IS NOT NULL OR I.DISCOUNT1 > 10) ORDER BY NAME"); - - // test NonCorrelated subquery - ParseNodeFactory.setTempAliasCounterValue(0); - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE exists "+ - "(SELECT 1 FROM " + orderTableName + " o where o.price > 8) ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I WHERE EXISTS (SELECT 1 FROM ORDER_TABLE O WHERE O.PRICE > 8 LIMIT 1) ORDER BY NAME"); - - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE exists "+ - "(SELECT 1 FROM " + orderTableName + " o where o.price > 8 group by o.customer_id,o.item_id having count(order_id) > 1)" + - " ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I WHERE EXISTS "+ - "(SELECT 1 FROM ORDER_TABLE O WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID HAVING COUNT(ORDER_ID) > 1 LIMIT 1)" + - " ORDER BY NAME"); - - sql= "SELECT item_id, name FROM " + itemTableName + " i WHERE exists "+ - "(SELECT 1 FROM " + orderTableName + " o where o.price > 8 group by o.customer_id,o.item_id having count(order_id) > 1)" + - " or i.discount1 > 10 ORDER BY name"; - queryPlan= TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(queryPlan instanceof HashJoinPlan); - TestUtil.assertSelectStatement( - queryPlan.getStatement(), - "SELECT ITEM_ID,NAME FROM ITEM_TABLE I WHERE " + - "( EXISTS (SELECT 1 FROM ORDER_TABLE O WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID HAVING COUNT(ORDER_ID) > 1 LIMIT 1)" + - " OR I.DISCOUNT1 > 10) ORDER BY NAME"); - } finally { - conn.close(); - } - } - - @Test - public void testEliminateUnnecessaryReversedScanBug6798() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String tableName = generateUniqueName(); - - String sql = - "create table " + tableName + "(group_id integer not null, " - + " keyword varchar not null, " + " cost integer, " - + " CONSTRAINT TEST_PK PRIMARY KEY (group_id,keyword)) "; - conn.createStatement().execute(sql); - - /** - * Test {@link GroupBy#isOrderPreserving} is false and {@link OrderBy} is reversed. - */ - sql = - "select keyword,sum(cost) from " + tableName - + " group by keyword order by keyword desc"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - Scan scan = queryPlan.getContext().getScan(); - assertTrue(!queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); - assertTrue(!ScanUtil.isReversed(scan)); - - /** - * Test {@link GroupBy#isOrderPreserving} is true and {@link OrderBy} is reversed. - */ - sql = - "select keyword,sum(cost) from " + tableName - + " group by group_id,keyword order by group_id desc,keyword desc"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(queryPlan.getGroupBy().isOrderPreserving()); - assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); - assertTrue(ScanUtil.isReversed(scan)); - } finally { - conn.close(); - } - } - - @Test - public void testReverseIndexRangeBugPhoenix6916() throws Exception { - String tableName = generateUniqueName(); - String indexName = generateUniqueName(); - try (Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = conn.createStatement()) { - stmt.execute("create table " + tableName + " (id varchar primary key, ts timestamp)"); - stmt.execute("create index " + indexName + " on " + tableName + "(ts desc)"); - - String query = - "select id, ts from " + tableName - + " where ts >= TIMESTAMP '2023-02-23 13:30:00' and ts < TIMESTAMP '2023-02-23 13:40:00'"; - ResultSet rs = stmt.executeQuery("EXPLAIN " + query); - String explainPlan = QueryUtil.getExplainPlan(rs); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexName - + " [~1,677,159,600,000] - [~1,677,159,000,000]\n SERVER FILTER BY FIRST KEY ONLY", - explainPlan); - } - } - - @Test - public void testReverseVarLengthRange6916() throws Exception { - String tableName = generateUniqueName(); - try (Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = conn.createStatement()) { - - stmt.execute("create table " + tableName + " (k varchar primary key desc)"); - - // Explain doesn't display open/closed ranges - String explainExpected = - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName - + " [~'aaa'] - [~'a']\n SERVER FILTER BY FIRST KEY ONLY"; - - String openQry = "select * from " + tableName + " where k > 'a' and k<'aaa'"; - Scan openScan = - getOptimizedQueryPlan(openQry, Collections.emptyList()).getContext().getScan(); - assertEquals("\\x9E\\x9E\\x9F\\x00", Bytes.toStringBinary(openScan.getStartRow())); - assertEquals("\\x9E\\xFF", Bytes.toStringBinary(openScan.getStopRow())); - ResultSet rs = stmt.executeQuery("EXPLAIN " + openQry); - String explainPlan = QueryUtil.getExplainPlan(rs); - assertEquals(explainExpected, explainPlan); - - String closedQry = "select * from " + tableName + " where k >= 'a' and k <= 'aaa'"; - Scan closedScan = - getOptimizedQueryPlan(closedQry, Collections.emptyList()).getContext() - .getScan(); - assertEquals("\\x9E\\x9E\\x9E\\xFF", Bytes.toStringBinary(closedScan.getStartRow())); - assertEquals("\\x9F\\x00", Bytes.toStringBinary(closedScan.getStopRow())); - rs = stmt.executeQuery("EXPLAIN " + closedQry); - explainPlan = QueryUtil.getExplainPlan(rs); - assertEquals(explainExpected, explainPlan); - } - } - - @Test - public void testUncoveredPhoenix6969() throws Exception { - - try (Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = conn.createStatement()) { - - stmt.execute( - "create table dd (k1 integer not null, k2 integer not null, k3 integer not null," - + " k4 integer not null, v1 integer, v2 integer, v3 integer, v4 integer" - + " constraint pk primary key (k1,k2,k3,k4))"); - stmt.execute("create index ii on dd (k4, k1, k2, k3)"); - String query = - "select /*+ index(dd ii) */ k1, k2, k3, k4, v1, v2, v3, v4 from dd" - + " where k4=1 and k2=1 order by k1 asc, v1 asc limit 1"; - ResultSet rs = stmt.executeQuery("EXPLAIN " + query); - String explainPlan = QueryUtil.getExplainPlan(rs); - //We are more interested in the query compiling than the exact result - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER II [1]\n" - + " SERVER MERGE [0.V1, 0.V2, 0.V3, 0.V4]\n" - + " SERVER FILTER BY FIRST KEY ONLY AND \"K2\" = 1\n" - + " SERVER TOP 1 ROW SORTED BY [\"K1\", \"V1\"]\n" - + "CLIENT MERGE SORT\n" - + "CLIENT LIMIT 1", explainPlan); - } - } - - @Test - public void testUncoveredPhoenix6984() throws Exception { - try (Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = conn.createStatement()) { - stmt.execute("CREATE TABLE D (\n" + "K1 CHAR(6) NOT NULL,\n" - + "K2 VARCHAR(22) NOT NULL,\n" - + "K3 CHAR(2) NOT NULL,\n" - + "K4 VARCHAR(36) NOT NULL,\n" - + "V1 TIMESTAMP,\n" - + "V2 TIMESTAMP,\n" - + "CONSTRAINT PK_BILLING_ORDER PRIMARY KEY (K1,K2,K3,K4))"); - - stmt.execute("CREATE INDEX I ON D(K2, K1, K3, K4)"); - String query = - "SELECT /*+ INDEX(D I), NO_INDEX_SERVER_MERGE */ * " - + "FROM D " - + "WHERE K2 = 'XXX' AND " - + "V2 >= TIMESTAMP '2023-05-31 23:59:59.000' AND " - + "V1 <= TIMESTAMP '2023-04-01 00:00:00.000' " - + "ORDER BY V2 asc"; - ResultSet rs = stmt.executeQuery("EXPLAIN " + query); - String explainPlan = QueryUtil.getExplainPlan(rs); - assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER D\n" - + " SERVER FILTER BY (V2 >= TIMESTAMP '2023-05-31 23:59:59.000'" - + " AND V1 <= TIMESTAMP '2023-04-01 00:00:00.000')\n" - + " SERVER SORTED BY [D.V2]\n" - + "CLIENT MERGE SORT\n" - + " SKIP-SCAN-JOIN TABLE 0\n" - + " CLIENT PARALLEL 1-WAY RANGE SCAN OVER I ['XXX']\n" - + " SERVER FILTER BY FIRST KEY ONLY\n" - + " DYNAMIC SERVER FILTER BY (\"D.K1\", \"D.K2\", \"D.K3\", \"D.K4\")" - + " IN (($2.$4, $2.$5, $2.$6, $2.$7))", - explainPlan); - } - } - - @Test - public void testUncoveredPhoenix6986() throws Exception { - Properties props = new Properties(); - props.setProperty(QueryServices.SERVER_MERGE_FOR_UNCOVERED_INDEX, - Boolean.toString(false)); - try (Connection conn = DriverManager.getConnection(getUrl(), props); - Statement stmt = conn.createStatement()) { - stmt.execute("CREATE TABLE TAB_PHOENIX_6986 (\n" + "K1 CHAR(6) NOT NULL,\n" - + "K2 VARCHAR(22) NOT NULL,\n" - + "K3 CHAR(2) NOT NULL,\n" - + "K4 VARCHAR(36) NOT NULL,\n" - + "V1 TIMESTAMP,\n" - + "V2 TIMESTAMP,\n" - + "CONSTRAINT PK_PHOENIX_6986 PRIMARY KEY (K1,K2,K3,K4))"); - - stmt.execute("CREATE INDEX IDX_PHOENIX_6986 ON TAB_PHOENIX_6986(K2, K1, K3, K4)"); - String query = - "SELECT /*+ INDEX(TAB_PHOENIX_6986 IDX_PHOENIX_6986) */ * " - + "FROM TAB_PHOENIX_6986 " - + "WHERE K2 = 'XXX' AND " - + "V2 >= TIMESTAMP '2023-05-31 23:59:59.000' AND " - + "V1 <= TIMESTAMP '2023-04-01 00:00:00.000' " - + "ORDER BY V2 asc"; - ResultSet rs = stmt.executeQuery("EXPLAIN " + query); - String explainPlan = QueryUtil.getExplainPlan(rs); - assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER TAB_PHOENIX_6986\n" - + " SERVER FILTER BY (V2 >= TIMESTAMP '2023-05-31 23:59:59.000'" - + " AND V1 <= TIMESTAMP '2023-04-01 00:00:00.000')\n" - + " SERVER SORTED BY [TAB_PHOENIX_6986.V2]\n" - + "CLIENT MERGE SORT\n" - + " SKIP-SCAN-JOIN TABLE 0\n" - + " CLIENT PARALLEL 1-WAY RANGE SCAN OVER IDX_PHOENIX_6986 ['XXX']\n" - + " SERVER FILTER BY FIRST KEY ONLY\n" - + " DYNAMIC SERVER FILTER BY (\"TAB_PHOENIX_6986.K1\", \"TAB_PHOENIX_6986.K2\", \"TAB_PHOENIX_6986.K3\", \"TAB_PHOENIX_6986.K4\")" - + " IN (($2.$4, $2.$5, $2.$6, $2.$7))", - explainPlan); - } - } - - @Test - public void testUncoveredPhoenix6961() throws Exception { - try (Connection conn = DriverManager.getConnection(getUrl()); - Statement stmt = conn.createStatement();) { - stmt.execute( - "create table d (k integer primary key, v1 integer, v2 integer, v3 integer, v4 integer)"); - stmt.execute("create index i on d(v2) include (v3)"); - String query = "select /*+ index(d i) */ * from d where v2=1 and v3=1"; - ResultSet rs = stmt.executeQuery("EXPLAIN " + query); - String explainPlan = QueryUtil.getExplainPlan(rs); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER I [1]\n" - + " SERVER MERGE [0.V1, 0.V4]\n" - + " SERVER FILTER BY \"V3\" = 1", - explainPlan); - } - } - - @Test - public void testPartialOrderForTupleProjectionWithJoinBug7352() throws Exception { - try (Connection conn = DriverManager.getConnection(getUrl())) { - String cpc_pv_dumper = generateUniqueName(); - String sql = "create table " + cpc_pv_dumper + " ( " - + " aid BIGINT not null," - + " k BIGINT not null," - + " cm BIGINT, " - + " CONSTRAINT TEST_PK PRIMARY KEY (aid, k))"; - conn.createStatement().execute(sql); - - String group_temp = generateUniqueName(); - sql = "create table " + group_temp + " (" - + " aid BIGINT not null," - + " gid TINYINT not null," - + " CONSTRAINT TEST_PK PRIMARY KEY (aid, gid))"; - conn.createStatement().execute(sql); - - sql = "select a_key, sum(groupCost) from ( " - + " select t1.k as a_key, sum(t1.cm) as groupCost " - + " from " + cpc_pv_dumper + " as t1 join " + group_temp + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid" + - ") group by a_key having count(1) >= 2 order by sum(groupCost) desc limit 100"; - QueryPlan plan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - - sql = "select a_key, sum(groupCost) from ( " - + " select t1.k as a_key, t2.gid as b_gid, sum(t1.cm) as groupCost " - + " from " + cpc_pv_dumper + " as t1 join " + group_temp + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid" + - ") group by a_key having count(1) >= 2 order by sum(groupCost) desc limit 100"; - plan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - - sql = "select b_gid, sum(groupCost) from ( " - + " select t1.k as a_key, t2.gid as b_gid, sum(t1.cm) as groupCost " - + " from " + cpc_pv_dumper + " as t1 join " + group_temp + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid" + - ") group by b_gid having count(1) >= 2 order by sum(groupCost) desc limit 100"; - plan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertFalse(plan.getGroupBy().isOrderPreserving()); - - sql = "select b_gid, a_key, groupCost from ( " - + " select t1.k as a_key, t2.gid as b_gid, cast(sum(t1.cm) as bigint) as groupCost " - + " from " + cpc_pv_dumper + " as t1 join " + group_temp - + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid, t2.aid order by sum(t1.cm), a_key, t2.aid desc limit 20" + - ") order by groupCost"; - ClientScanPlan clientScanPlan = (ClientScanPlan)TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(clientScanPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - List outputOrderBys = ((TupleProjectionPlan)(clientScanPlan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - OrderBy outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("GROUPCOST")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("A_KEY")); - - sql = "select b_gid, a_key, groupCost from ( " - + " select t1.k as a_key, t2.gid as b_gid, cast(sum(t1.cm) as bigint) as groupCost " - + " from " + cpc_pv_dumper + " as t1 join " + group_temp - + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid order by sum(t1.cm) desc, a_key asc limit 20" + - ") order by groupCost desc"; - clientScanPlan = (ClientScanPlan)TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(clientScanPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - outputOrderBys = ((TupleProjectionPlan)(clientScanPlan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("GROUPCOST DESC")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("A_KEY")); - - sql = "select b_gid, groupCost from ( " - + " select t2.gid as b_gid, cast(sum(t1.cm) as bigint) as groupCost " - + " from " + cpc_pv_dumper + " as t1 join " + group_temp - + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid order by sum(t1.cm), t1.k limit 20" + - ") order by groupCost"; - clientScanPlan = (ClientScanPlan)TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); - assertTrue(clientScanPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - outputOrderBys = ((TupleProjectionPlan)(clientScanPlan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("GROUPCOST")); - } - } - - @Test - public void testPartialOrderForTupleProjectionPlanBug7352() throws Exception { - doTestPartialOrderForTupleProjectionPlanBug7352(false, false); - doTestPartialOrderForTupleProjectionPlanBug7352(false, true); - doTestPartialOrderForTupleProjectionPlanBug7352(true, false); - doTestPartialOrderForTupleProjectionPlanBug7352(true, true); - } - - private void doTestPartialOrderForTupleProjectionPlanBug7352(boolean desc, boolean salted) throws Exception { - try (Connection conn = DriverManager.getConnection(getUrl())) { - String tableName = generateUniqueName(); - String sql = "create table " + tableName + "( "+ - " pk1 char(20) not null , " + - " pk2 char(20) not null, " + - " pk3 char(20) not null," + - " v1 varchar, " + - " v2 varchar, " + - " v3 varchar, " + - " CONSTRAINT TEST_PK PRIMARY KEY ( " + - " pk1 " + (desc ? "desc" : "")+", "+ - " pk2 " + (desc ? "desc" : "")+", "+ - " pk3 " + (desc ? "desc" : "")+ - " )) " + (salted ? "SALT_BUCKETS =4" : ""); - conn.createStatement().execute(sql); - - sql = "select pk3, v1, v2 from (select v1,v2,pk3 from " + tableName - + " t where pk1 = '6' order by t.v2,t.v1 limit 10) a order by v2"; - ClientScanPlan plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - List outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - OrderBy outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V2")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V1")); - - sql = "select pk3, v1, v2 from (select v1,v2,pk3 from " + tableName - + " t where pk1 = '6' order by t.v2 desc,t.v1 desc limit 10) a order by v2 desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V2 DESC")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V1 DESC")); - - sql = "select pk3, v1, v2 from (select v1,v2,pk3 from " + tableName - + " t where pk1 = '6' order by t.v2 desc,t.v1 desc, t.v3 desc limit 10) a order by v2 desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V2 DESC")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V1 DESC")); - - sql = "select pk3, v1, v2 from (select v1,v2,pk3 from " + tableName - + " t where pk1 = '6' order by t.v2 desc,t.v1 desc, t.v3 asc limit 10) a order by v2 desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V2 DESC")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V1 DESC")); - - sql = "select v2,cnt from (select count(pk3) cnt,v1,v2 from " + tableName - + " t where pk1 = '6' group by t.v1,t.v2,t.v3 limit 10) a order by v1"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V1")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V2")); - - sql = "select sub, pk2Cnt from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt, count(pk2) pk2Cnt from " - + tableName - + " t where pk1 = '6' group by t.v1 ,t.v2, t.v3 " - + " order by count(pk3) desc,t.v2 desc,t.v3 desc limit 10) a order by cnt desc ,sub desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("CNT DESC")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("SUB DESC")); - - sql = "select sub, pk2Cnt from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt, count(pk2) pk2Cnt from " - + tableName - + " t where pk1 = '6' group by t.v1 ,t.v2, t.v3 " - + " order by count(pk3) desc,t.v2 desc,t.v3 asc limit 10) a order by cnt desc ,sub desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("CNT DESC")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("SUB DESC")); - - sql = "select sub, pk2Cnt from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt, count(pk2) pk2Cnt from " - + tableName - + " t where pk1 = '6' group by t.v1 ,t.v2, t.v3 " - + " order by t.v2 desc, count(pk3) desc, t.v3 desc limit 10) a order by sub desc, cnt desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() == 2); - assertTrue(plan.getOrderBy().getOrderByExpressions().get(0).toString().equals("SUB DESC")); - assertTrue(plan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CNT DESC")); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("SUB DESC")); - - sql = "select sub, pk2Cnt from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt, count(pk2) pk2Cnt from " - + tableName - + " t where pk1 = '6' group by v1 ,v2, v3 " - + " order by t.v2 desc, count(pk3) desc, t.v3 asc limit 10) a order by sub desc, cnt desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() == 2); - assertTrue(plan.getOrderBy().getOrderByExpressions().get(0).toString().equals("SUB DESC")); - assertTrue(plan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CNT DESC")); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("SUB DESC")); - - sql = "select v1, pk3, v2 from (select v1,v2,pk3 from " + tableName - + " t where pk1 = '6' order by t.v2, t.v1, t.v3 limit 10) a order by v1"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() == 1); - assertTrue(plan.getOrderBy().getOrderByExpressions().get(0).toString().equals("V1")); - - sql = "select pk3, pk1, pk2 from (select pk1,pk2,pk3 from " + tableName - + " t where pk1 = '6' order by t.v2, t.v1, t.v3 limit 10) a order by pk3"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() == 1); - assertTrue(plan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK3")); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 0); - - sql = "select sub, v1 from (select substr(pk3,0,2) sub, pk2, v1 from " - + tableName + " t where pk1 = '6' order by pk2, pk3 limit 10) a order by pk2 desc ,sub desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - //Here because for subquery, there is no OrderBy REV_ROW_KEY_ORDER_BY - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - if (desc) { - assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(0), "PK2", true, true); - assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(1), "SUB", true, true); - } else { - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("PK2")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("SUB")); - } - - sql = "select sub, v1 from (select substr(pk3,0,2) sub, pk2, v1 from " - + tableName + " t where pk1 = '6' order by pk2 desc, pk3 desc limit 10) a order by pk2 desc ,sub desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); - if (desc) { - assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(0), "PK2", false, false); - assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(1), "SUB", false, false); - } else { - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("PK2 DESC NULLS LAST")); - assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("SUB DESC NULLS LAST")); - } - - sql = "select sub, v1 from (select substr(pk2,0,2) sub, pk3, v1 from " - + tableName + " t where pk1 = '6' order by pk2, pk3 limit 10) a order by sub desc ,pk3 desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - //Here because for subquery, there is no OrderBy REV_ROW_KEY_ORDER_BY - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); - if (desc) { - assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(0), "SUB", true, true); - } else { - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("SUB")); - } - - sql = "select sub, v1 from (select substr(pk2,0,2) sub, pk3, v1 from " - + tableName + " t where pk1 = '6' order by pk2 desc, pk3 desc limit 10) a order by sub desc,pk3 desc"; - plan = (ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); - outputOrderBys = ((TupleProjectionPlan)(plan.getDelegate())).getOutputOrderBys(); - assertTrue(outputOrderBys.size() == 1); - outputOrderBy = outputOrderBys.get(0); - assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); - if (desc) { - assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(0), "SUB", false, false); - } else { - assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("SUB DESC NULLS LAST")); - } - } - } - - private static void assertOrderByForDescExpression( - OrderByExpression orderByExpression, - String strExpression, - boolean isNullsLast, - boolean isAscending) { - assertEquals(strExpression, orderByExpression.getExpression().toString()); - assertEquals(isNullsLast, orderByExpression.isNullsLast()); - assertEquals(isAscending, orderByExpression.isAscending()); - } - - @Test - public void testUnionAllOrderByOptimizeBug7397() throws Exception { - Properties props = new Properties(); - props.setProperty(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - String tableName1 = generateUniqueName(); - String sql1 = "create table " + tableName1 + "( "+ - " fuid UNSIGNED_LONG not null , " + - " fstatsdate UNSIGNED_LONG not null, " + - " fversion UNSIGNED_LONG not null," + - " faid_1 UNSIGNED_LONG not null," + - " clk_pv_1 UNSIGNED_LONG, " + - " activation_pv_1 UNSIGNED_LONG, " + - " CONSTRAINT TEST_PK PRIMARY KEY ( " + - " fuid , " + - " fstatsdate, " + - " fversion, " + - " faid_1 " + - " ))"; - conn.createStatement().execute(sql1); - - String tableName2= generateUniqueName(); - String sql2 = "create table " + tableName2 + "( "+ - " fuid UNSIGNED_LONG not null , " + - " fstatsdate UNSIGNED_LONG not null, " + - " fversion UNSIGNED_LONG not null," + - " faid_2 UNSIGNED_LONG not null," + - " clk_pv_2 UNSIGNED_LONG, " + - " activation_pv_2 UNSIGNED_LONG, " + - " CONSTRAINT TEST_PK PRIMARY KEY ( " + - " fuid , " + - " fstatsdate, " + - " fversion," + - " faid_2 " + - " ))"; - conn.createStatement().execute(sql2); - - String orderedUnionSql = - "(SELECT FUId AS advertiser_id," - + " FAId_1 AS adgroup_id," - + " FStatsDate AS date," - + " SUM(clk_pv_1) AS valid_click_count," - + " SUM(activation_pv_1) AS activated_count" - + " FROM " + tableName1 - + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_1 IN (11, 22, 33, 10))" - + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" - + " GROUP BY FUId, FAId_1, FStatsDate" - + " UNION ALL " - + " SELECT " - + " FUId AS advertiser_id," - + " FAId_2 AS adgroup_id," - + " FStatsDate AS date," - + " SUM(clk_pv_2) AS valid_click_count," - + " SUM(activation_pv_2) AS activated_count" - + " FROM " + tableName2 - + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_2 IN (11, 22, 33, 10))" - + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" - + " GROUP BY FUId, FAId_2, FStatsDate" - + ")"; - - //Test group by orderPreserving - String sql = "SELECT ADVERTISER_ID AS advertiser_id," - + "ADGROUP_ID AS adgroup_id," - + "DATE AS i_date," - + "SUM(VALID_CLICK_COUNT) AS valid_click_count," - + "SUM(ACTIVATED_COUNT) AS activated_count " - + "FROM " - + orderedUnionSql - + "GROUP BY ADVERTISER_ID, ADGROUP_ID, I_DATE " - + "ORDER BY advertiser_id, adgroup_id, i_date " - + "limit 10"; - ClientAggregatePlan plan =(ClientAggregatePlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - UnionPlan unionPlan = (UnionPlan)((TupleProjectionPlan)(plan.getDelegate())).getDelegate(); - assertTrue(unionPlan.isSupportOrderByOptimize()); - assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); - List orderBys = unionPlan.getOutputOrderBys(); - assertTrue(orderBys.size() == 1); - OrderBy orderBy = orderBys.get(0); - assertTrue(orderBy.getOrderByExpressions().size() == 3); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); - assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); - - //Test group by orderPreserving for distinct - sql = "SELECT distinct ADVERTISER_ID AS advertiser_id," - + "ADGROUP_ID AS adgroup_id," - + "DATE AS i_date " - + "FROM " - + orderedUnionSql - + "ORDER BY advertiser_id, adgroup_id, i_date " - + "limit 10"; - plan =(ClientAggregatePlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - unionPlan = (UnionPlan)((TupleProjectionPlan)(plan.getDelegate())).getDelegate(); - assertTrue(unionPlan.isSupportOrderByOptimize()); - assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); - orderBys = unionPlan.getOutputOrderBys(); - assertTrue(orderBys.size() == 1); - orderBy = orderBys.get(0); - assertTrue(orderBy.getOrderByExpressions().size() == 3); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); - assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); - - //Test group by not orderPreserving - sql = "SELECT ADVERTISER_ID AS i_advertiser_id," - + "ADGROUP_ID AS i_adgroup_id," - + "SUM(VALID_CLICK_COUNT) AS valid_click_count," - + "SUM(ACTIVATED_COUNT) AS activated_count " - + "FROM " - + orderedUnionSql - + "GROUP BY I_ADVERTISER_ID, ADGROUP_ID " - + "ORDER BY i_adgroup_id " - + "limit 10"; - plan =(ClientAggregatePlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() != OrderBy.FWD_ROW_KEY_ORDER_BY); - unionPlan = (UnionPlan)((TupleProjectionPlan)(plan.getDelegate())).getDelegate(); - assertTrue(!unionPlan.isSupportOrderByOptimize()); - assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); - assertTrue(unionPlan.getOutputOrderBys().isEmpty()); - - //Test group by not orderPreserving - sql = "SELECT ADGROUP_ID AS adgroup_id," - + "DATE AS i_date," - + "SUM(VALID_CLICK_COUNT) AS valid_click_count," - + "SUM(ACTIVATED_COUNT) AS activated_count " - + "FROM " - + orderedUnionSql - + "GROUP BY ADGROUP_ID, I_DATE " - + "ORDER BY adgroup_id, i_date " - + "limit 10"; - plan =(ClientAggregatePlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!plan.getGroupBy().isOrderPreserving()); - assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - unionPlan = (UnionPlan)((TupleProjectionPlan)(plan.getDelegate())).getDelegate(); - assertTrue(!unionPlan.isSupportOrderByOptimize()); - assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); - assertTrue(unionPlan.getOutputOrderBys().isEmpty()); - - //Test group by orderPreserving with where - sql = "SELECT ADGROUP_ID AS adgroup_id," - + "DATE AS i_date," - + "SUM(VALID_CLICK_COUNT) AS valid_click_count," - + "SUM(ACTIVATED_COUNT) AS activated_count " - + "FROM " - + orderedUnionSql - + " where advertiser_id = 1 " - + "GROUP BY ADGROUP_ID, I_DATE " - + "ORDER BY adgroup_id, i_date " - + "limit 10"; - plan =(ClientAggregatePlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - unionPlan = (UnionPlan)((TupleProjectionPlan)(plan.getDelegate())).getDelegate(); - assertTrue(unionPlan.isSupportOrderByOptimize()); - assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); - orderBys = unionPlan.getOutputOrderBys(); - assertTrue(orderBys.size() == 1); - orderBy = orderBys.get(0); - assertTrue(orderBy.getOrderByExpressions().size() == 3); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); - assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); - - //Test order by orderPreserving - sql ="SELECT ADVERTISER_ID AS advertiser_id," - + "ADGROUP_ID AS adgroup_id," - + "DATE AS i_date," - + "VALID_CLICK_COUNT AS valid_click_count," - + "ACTIVATED_COUNT AS activated_count " - + "FROM " - + orderedUnionSql - + "ORDER BY advertiser_id, i_date, adgroup_id " - + "limit 10"; - ClientScanPlan scanPlan =(ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(scanPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); - unionPlan = (UnionPlan)((TupleProjectionPlan)(scanPlan.getDelegate())).getDelegate(); - assertTrue(unionPlan.isSupportOrderByOptimize()); - assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); - orderBys = unionPlan.getOutputOrderBys(); - assertTrue(orderBys.size() == 1); - orderBy = orderBys.get(0); - assertTrue(orderBy.getOrderByExpressions().size() == 3); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); - assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); - - //Test order by not orderPreserving - sql ="SELECT ADVERTISER_ID AS advertiser_id," - + "ADGROUP_ID AS i_adgroup_id," - + "DATE AS date," - + "VALID_CLICK_COUNT AS valid_click_count," - + "ACTIVATED_COUNT AS activated_count " - + "FROM " - + orderedUnionSql - + "ORDER BY advertiser_id, i_adgroup_id, date, valid_click_count " - + "limit 10"; - scanPlan =(ClientScanPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!scanPlan.getOrderBy().isEmpty()); - unionPlan = (UnionPlan)((TupleProjectionPlan)(scanPlan.getDelegate())).getDelegate(); - assertTrue(!unionPlan.isSupportOrderByOptimize()); - assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); - assertTrue(unionPlan.getOutputOrderBys().isEmpty()); - - //Test there is no order in union - sql ="SELECT ADVERTISER_ID AS advertiser_id," - + "ADGROUP_ID AS adgroup_id," - + "DATE AS i_date," - + "SUM(VALID_CLICK_COUNT) AS valid_click_count," - + "SUM(ACTIVATED_COUNT) AS activated_count " - + "FROM " - + "(SELECT FUId AS advertiser_id," - + " FAId_1 AS adgroup_id," - + " FStatsDate AS date," - + " clk_pv_1 AS valid_click_count," - + " activation_pv_1 AS activated_count" - + " FROM " + tableName1 - + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_1 IN (11, 22, 33, 10))" - + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" - + " UNION ALL " - + " SELECT " - + " FUId AS advertiser_id," - + " FAId_2 AS adgroup_id," - + " FStatsDate AS date," - + " clk_pv_2 AS valid_click_count," - + " activation_pv_2 AS activated_count" - + " FROM " + tableName2 - + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_2 IN (11, 22, 33, 10))" - + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" - + ")" - + "GROUP BY ADVERTISER_ID, ADGROUP_ID, I_DATE " - + "ORDER BY advertiser_id, adgroup_id, i_date " - + "limit 10"; - plan =(ClientAggregatePlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!plan.getGroupBy().isOrderPreserving()); - unionPlan = (UnionPlan)((TupleProjectionPlan)(plan.getDelegate())).getDelegate(); - assertTrue(!unionPlan.isSupportOrderByOptimize()); - assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); - assertTrue(unionPlan.getOutputOrderBys().isEmpty()); - - //Test alias not inconsistent in union - sql ="SELECT ADVERTISER_ID AS advertiser_id," - + "ADGROUP_ID_1 AS adgroup_id," - + "DATE AS i_date," - + "SUM(VALID_CLICK_COUNT) AS valid_click_count," - + "SUM(ACTIVATED_COUNT) AS activated_count " - + "FROM " - + "(SELECT FUId AS advertiser_id," - + " FAId_1 AS adgroup_id_1," - + " FStatsDate AS date," - + " SUM(clk_pv_1) AS valid_click_count," - + " SUM(activation_pv_1) AS activated_count" - + " FROM " + tableName1 - + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_1 IN (11, 22, 33, 10))" - + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" - + " GROUP BY FUId, FAId_1, FStatsDate" - + " UNION ALL " - + " SELECT " - + " FUId AS advertiser_id," - + " FAId_2," - + " FStatsDate AS date," - + " SUM(clk_pv_2)," - + " SUM(activation_pv_2)" - + " FROM " + tableName2 - + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_2 IN (11, 22, 33, 10))" - + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" - + " GROUP BY FUId, FAId_2, FStatsDate" - + ")" - + "GROUP BY ADVERTISER_ID, ADGROUP_ID_1, I_DATE " - + "ORDER BY advertiser_id, adgroup_id, i_date " - + "limit 10"; - plan =(ClientAggregatePlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(plan.getGroupBy().isOrderPreserving()); - unionPlan = (UnionPlan)((TupleProjectionPlan)(plan.getDelegate())).getDelegate(); - assertTrue(unionPlan.isSupportOrderByOptimize()); - assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); - orderBys = unionPlan.getOutputOrderBys(); - assertTrue(orderBys.size() == 1); - orderBy = orderBys.get(0); - assertTrue(orderBy.getOrderByExpressions().size() == 3); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); - assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID_1")); - - //Test order by column not equals in union - sql = "SELECT ADVERTISER_ID AS advertiser_id," - + "ADGROUP_ID AS adgroup_id," - + "DATE AS i_date," - + "SUM(VALID_CLICK_COUNT) AS valid_click_count," - + "SUM(ACTIVATED_COUNT) AS activated_count " - + "FROM " - + "(SELECT FUId AS advertiser_id," - + " FAId_1 AS adgroup_id," - + " FStatsDate AS date," - + " SUM(clk_pv_1) AS valid_click_count," - + " SUM(activation_pv_1) AS activated_count" - + " FROM " + tableName1 - + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_1 IN (11, 22, 33, 10))" - + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" - + " GROUP BY FUId, FAId_1, FStatsDate" - + " UNION ALL " - + " SELECT " - + " FUId AS advertiser_id," - + " FAId_2 AS adgroup_id," - + " cast (0 as UNSIGNED_LONG) AS date," - + " SUM(clk_pv_2) AS valid_click_count," - + " SUM(activation_pv_2) AS activated_count" - + " FROM " + tableName2 - + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_2 IN (11, 22, 33, 10))" - + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" - + " GROUP BY FUId, FAId_2" - + ")" - + "GROUP BY ADVERTISER_ID, ADGROUP_ID, I_DATE " - + "ORDER BY advertiser_id, adgroup_id, i_date " - + "limit 10"; - plan =(ClientAggregatePlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!plan.getGroupBy().isOrderPreserving()); - unionPlan = (UnionPlan)((TupleProjectionPlan)(plan.getDelegate())).getDelegate(); - assertTrue(!unionPlan.isSupportOrderByOptimize()); - assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); - assertTrue(unionPlan.getOutputOrderBys().isEmpty()); - - //Test only union - sql = orderedUnionSql.substring(1, orderedUnionSql.length()-1); - unionPlan =(UnionPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!unionPlan.isSupportOrderByOptimize()); - assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); - assertTrue(unionPlan.getOutputOrderBys().isEmpty()); - - //Test only union and order by match - sql = orderedUnionSql.substring(1, orderedUnionSql.length()-1) + " order by advertiser_id, date, adgroup_id"; - unionPlan =(UnionPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!unionPlan.isSupportOrderByOptimize()); - assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); - assertTrue(unionPlan.getSubPlans().stream().allMatch( - p -> p.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY)); - orderBys = unionPlan.getOutputOrderBys(); - assertTrue(orderBys.size() == 1); - orderBy = orderBys.get(0); - assertTrue(orderBy.getOrderByExpressions().size() == 3); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); - assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); - - //Test only union and order by not match - sql = orderedUnionSql.substring(1, orderedUnionSql.length()-1) + - " order by advertiser_id, date, adgroup_id, valid_click_count"; - unionPlan =(UnionPlan)TestUtil.getOptimizeQueryPlan(conn, sql); - assertTrue(!unionPlan.isSupportOrderByOptimize()); - assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); - assertTrue(unionPlan.getSubPlans().stream().noneMatch( - p -> p.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY)); - orderBys = unionPlan.getOutputOrderBys(); - assertTrue(orderBys.size() == 1); - orderBy = orderBys.get(0); - assertTrue(orderBy.getOrderByExpressions().size() == 4); - assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); - assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); - assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); - assertTrue(orderBy.getOrderByExpressions().get(3).toString().equals("VALID_CLICK_COUNT")); - } - } + fail(); + } catch (SQLFeatureNotSupportedException exception) { + + } + + // test Correlated subquery with AggregateFunction with groupBy and is ORed part of the where + // clause. + ParseNodeFactory.setTempAliasCounterValue(0); + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE exists " + "(SELECT 1 FROM " + + orderTableName + + " o where o.item_id = i.item_id group by customer_id having count(order_id) > 1) " + + " or i.discount1 > 10 ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I Left JOIN " + + "(SELECT DISTINCT 1 $3,O.ITEM_ID $2 FROM ORDER_TABLE O GROUP BY O.ITEM_ID,CUSTOMER_ID HAVING COUNT(ORDER_ID) > 1) $1 " + + "ON ($1.$2 = I.ITEM_ID) WHERE ($1.$3 IS NOT NULL OR I.DISCOUNT1 > 10) ORDER BY NAME"); + + // test NonCorrelated subquery + ParseNodeFactory.setTempAliasCounterValue(0); + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE exists " + "(SELECT 1 FROM " + + orderTableName + " o where o.price > 8) ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I WHERE EXISTS (SELECT 1 FROM ORDER_TABLE O WHERE O.PRICE > 8 LIMIT 1) ORDER BY NAME"); + + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE exists " + "(SELECT 1 FROM " + + orderTableName + + " o where o.price > 8 group by o.customer_id,o.item_id having count(order_id) > 1)" + + " ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I WHERE EXISTS " + + "(SELECT 1 FROM ORDER_TABLE O WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID HAVING COUNT(ORDER_ID) > 1 LIMIT 1)" + + " ORDER BY NAME"); + + sql = "SELECT item_id, name FROM " + itemTableName + " i WHERE exists " + "(SELECT 1 FROM " + + orderTableName + + " o where o.price > 8 group by o.customer_id,o.item_id having count(order_id) > 1)" + + " or i.discount1 > 10 ORDER BY name"; + queryPlan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(queryPlan instanceof HashJoinPlan); + TestUtil.assertSelectStatement(queryPlan.getStatement(), + "SELECT ITEM_ID,NAME FROM ITEM_TABLE I WHERE " + + "( EXISTS (SELECT 1 FROM ORDER_TABLE O WHERE O.PRICE > 8 GROUP BY O.CUSTOMER_ID,O.ITEM_ID HAVING COUNT(ORDER_ID) > 1 LIMIT 1)" + + " OR I.DISCOUNT1 > 10) ORDER BY NAME"); + } finally { + conn.close(); + } + } + + @Test + public void testEliminateUnnecessaryReversedScanBug6798() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String tableName = generateUniqueName(); + + String sql = + "create table " + tableName + "(group_id integer not null, " + " keyword varchar not null, " + + " cost integer, " + " CONSTRAINT TEST_PK PRIMARY KEY (group_id,keyword)) "; + conn.createStatement().execute(sql); + + /** + * Test {@link GroupBy#isOrderPreserving} is false and {@link OrderBy} is reversed. + */ + sql = + "select keyword,sum(cost) from " + tableName + " group by keyword order by keyword desc"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + Scan scan = queryPlan.getContext().getScan(); + assertTrue(!queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); + assertTrue(!ScanUtil.isReversed(scan)); + + /** + * Test {@link GroupBy#isOrderPreserving} is true and {@link OrderBy} is reversed. + */ + sql = "select keyword,sum(cost) from " + tableName + + " group by group_id,keyword order by group_id desc,keyword desc"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(queryPlan.getGroupBy().isOrderPreserving()); + assertTrue(queryPlan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY); + assertTrue(ScanUtil.isReversed(scan)); + } finally { + conn.close(); + } + } + + @Test + public void testReverseIndexRangeBugPhoenix6916() throws Exception { + String tableName = generateUniqueName(); + String indexName = generateUniqueName(); + try (Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = conn.createStatement()) { + stmt.execute("create table " + tableName + " (id varchar primary key, ts timestamp)"); + stmt.execute("create index " + indexName + " on " + tableName + "(ts desc)"); + + String query = "select id, ts from " + tableName + + " where ts >= TIMESTAMP '2023-02-23 13:30:00' and ts < TIMESTAMP '2023-02-23 13:40:00'"; + ResultSet rs = stmt.executeQuery("EXPLAIN " + query); + String explainPlan = QueryUtil.getExplainPlan(rs); + assertEquals( + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + indexName + + " [~1,677,159,600,000] - [~1,677,159,000,000]\n SERVER FILTER BY FIRST KEY ONLY", + explainPlan); + } + } + + @Test + public void testReverseVarLengthRange6916() throws Exception { + String tableName = generateUniqueName(); + try (Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = conn.createStatement()) { + + stmt.execute("create table " + tableName + " (k varchar primary key desc)"); + + // Explain doesn't display open/closed ranges + String explainExpected = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + + " [~'aaa'] - [~'a']\n SERVER FILTER BY FIRST KEY ONLY"; + + String openQry = "select * from " + tableName + " where k > 'a' and k<'aaa'"; + Scan openScan = + getOptimizedQueryPlan(openQry, Collections.emptyList()).getContext().getScan(); + assertEquals("\\x9E\\x9E\\x9F\\x00", Bytes.toStringBinary(openScan.getStartRow())); + assertEquals("\\x9E\\xFF", Bytes.toStringBinary(openScan.getStopRow())); + ResultSet rs = stmt.executeQuery("EXPLAIN " + openQry); + String explainPlan = QueryUtil.getExplainPlan(rs); + assertEquals(explainExpected, explainPlan); + + String closedQry = "select * from " + tableName + " where k >= 'a' and k <= 'aaa'"; + Scan closedScan = + getOptimizedQueryPlan(closedQry, Collections.emptyList()).getContext().getScan(); + assertEquals("\\x9E\\x9E\\x9E\\xFF", Bytes.toStringBinary(closedScan.getStartRow())); + assertEquals("\\x9F\\x00", Bytes.toStringBinary(closedScan.getStopRow())); + rs = stmt.executeQuery("EXPLAIN " + closedQry); + explainPlan = QueryUtil.getExplainPlan(rs); + assertEquals(explainExpected, explainPlan); + } + } + + @Test + public void testUncoveredPhoenix6969() throws Exception { + + try (Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = conn.createStatement()) { + + stmt.execute("create table dd (k1 integer not null, k2 integer not null, k3 integer not null," + + " k4 integer not null, v1 integer, v2 integer, v3 integer, v4 integer" + + " constraint pk primary key (k1,k2,k3,k4))"); + stmt.execute("create index ii on dd (k4, k1, k2, k3)"); + String query = "select /*+ index(dd ii) */ k1, k2, k3, k4, v1, v2, v3, v4 from dd" + + " where k4=1 and k2=1 order by k1 asc, v1 asc limit 1"; + ResultSet rs = stmt.executeQuery("EXPLAIN " + query); + String explainPlan = QueryUtil.getExplainPlan(rs); + // We are more interested in the query compiling than the exact result + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER II [1]\n" + + " SERVER MERGE [0.V1, 0.V2, 0.V3, 0.V4]\n" + + " SERVER FILTER BY FIRST KEY ONLY AND \"K2\" = 1\n" + + " SERVER TOP 1 ROW SORTED BY [\"K1\", \"V1\"]\n" + "CLIENT MERGE SORT\n" + + "CLIENT LIMIT 1", explainPlan); + } + } + + @Test + public void testUncoveredPhoenix6984() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = conn.createStatement()) { + stmt.execute("CREATE TABLE D (\n" + "K1 CHAR(6) NOT NULL,\n" + "K2 VARCHAR(22) NOT NULL,\n" + + "K3 CHAR(2) NOT NULL,\n" + "K4 VARCHAR(36) NOT NULL,\n" + "V1 TIMESTAMP,\n" + + "V2 TIMESTAMP,\n" + "CONSTRAINT PK_BILLING_ORDER PRIMARY KEY (K1,K2,K3,K4))"); + + stmt.execute("CREATE INDEX I ON D(K2, K1, K3, K4)"); + String query = "SELECT /*+ INDEX(D I), NO_INDEX_SERVER_MERGE */ * " + "FROM D " + + "WHERE K2 = 'XXX' AND " + "V2 >= TIMESTAMP '2023-05-31 23:59:59.000' AND " + + "V1 <= TIMESTAMP '2023-04-01 00:00:00.000' " + "ORDER BY V2 asc"; + ResultSet rs = stmt.executeQuery("EXPLAIN " + query); + String explainPlan = QueryUtil.getExplainPlan(rs); + assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER D\n" + + " SERVER FILTER BY (V2 >= TIMESTAMP '2023-05-31 23:59:59.000'" + + " AND V1 <= TIMESTAMP '2023-04-01 00:00:00.000')\n" + " SERVER SORTED BY [D.V2]\n" + + "CLIENT MERGE SORT\n" + " SKIP-SCAN-JOIN TABLE 0\n" + + " CLIENT PARALLEL 1-WAY RANGE SCAN OVER I ['XXX']\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + + " DYNAMIC SERVER FILTER BY (\"D.K1\", \"D.K2\", \"D.K3\", \"D.K4\")" + + " IN (($2.$4, $2.$5, $2.$6, $2.$7))", explainPlan); + } + } + + @Test + public void testUncoveredPhoenix6986() throws Exception { + Properties props = new Properties(); + props.setProperty(QueryServices.SERVER_MERGE_FOR_UNCOVERED_INDEX, Boolean.toString(false)); + try (Connection conn = DriverManager.getConnection(getUrl(), props); + Statement stmt = conn.createStatement()) { + stmt.execute("CREATE TABLE TAB_PHOENIX_6986 (\n" + "K1 CHAR(6) NOT NULL,\n" + + "K2 VARCHAR(22) NOT NULL,\n" + "K3 CHAR(2) NOT NULL,\n" + "K4 VARCHAR(36) NOT NULL,\n" + + "V1 TIMESTAMP,\n" + "V2 TIMESTAMP,\n" + + "CONSTRAINT PK_PHOENIX_6986 PRIMARY KEY (K1,K2,K3,K4))"); + + stmt.execute("CREATE INDEX IDX_PHOENIX_6986 ON TAB_PHOENIX_6986(K2, K1, K3, K4)"); + String query = + "SELECT /*+ INDEX(TAB_PHOENIX_6986 IDX_PHOENIX_6986) */ * " + "FROM TAB_PHOENIX_6986 " + + "WHERE K2 = 'XXX' AND " + "V2 >= TIMESTAMP '2023-05-31 23:59:59.000' AND " + + "V1 <= TIMESTAMP '2023-04-01 00:00:00.000' " + "ORDER BY V2 asc"; + ResultSet rs = stmt.executeQuery("EXPLAIN " + query); + String explainPlan = QueryUtil.getExplainPlan(rs); + assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER TAB_PHOENIX_6986\n" + + " SERVER FILTER BY (V2 >= TIMESTAMP '2023-05-31 23:59:59.000'" + + " AND V1 <= TIMESTAMP '2023-04-01 00:00:00.000')\n" + + " SERVER SORTED BY [TAB_PHOENIX_6986.V2]\n" + "CLIENT MERGE SORT\n" + + " SKIP-SCAN-JOIN TABLE 0\n" + + " CLIENT PARALLEL 1-WAY RANGE SCAN OVER IDX_PHOENIX_6986 ['XXX']\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + + " DYNAMIC SERVER FILTER BY (\"TAB_PHOENIX_6986.K1\", \"TAB_PHOENIX_6986.K2\", \"TAB_PHOENIX_6986.K3\", \"TAB_PHOENIX_6986.K4\")" + + " IN (($2.$4, $2.$5, $2.$6, $2.$7))", explainPlan); + } + } + + @Test + public void testUncoveredPhoenix6961() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl()); + Statement stmt = conn.createStatement();) { + stmt.execute( + "create table d (k integer primary key, v1 integer, v2 integer, v3 integer, v4 integer)"); + stmt.execute("create index i on d(v2) include (v3)"); + String query = "select /*+ index(d i) */ * from d where v2=1 and v3=1"; + ResultSet rs = stmt.executeQuery("EXPLAIN " + query); + String explainPlan = QueryUtil.getExplainPlan(rs); + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER I [1]\n" + + " SERVER MERGE [0.V1, 0.V4]\n" + " SERVER FILTER BY \"V3\" = 1", explainPlan); + } + } + + @Test + public void testPartialOrderForTupleProjectionWithJoinBug7352() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + String cpc_pv_dumper = generateUniqueName(); + String sql = "create table " + cpc_pv_dumper + " ( " + " aid BIGINT not null," + + " k BIGINT not null," + " cm BIGINT, " + " CONSTRAINT TEST_PK PRIMARY KEY (aid, k))"; + conn.createStatement().execute(sql); + + String group_temp = generateUniqueName(); + sql = "create table " + group_temp + " (" + " aid BIGINT not null," + + " gid TINYINT not null," + " CONSTRAINT TEST_PK PRIMARY KEY (aid, gid))"; + conn.createStatement().execute(sql); + + sql = "select a_key, sum(groupCost) from ( " + + " select t1.k as a_key, sum(t1.cm) as groupCost " + " from " + cpc_pv_dumper + + " as t1 join " + group_temp + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid" + + ") group by a_key having count(1) >= 2 order by sum(groupCost) desc limit 100"; + QueryPlan plan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + + sql = "select a_key, sum(groupCost) from ( " + + " select t1.k as a_key, t2.gid as b_gid, sum(t1.cm) as groupCost " + " from " + + cpc_pv_dumper + " as t1 join " + group_temp + + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid" + + ") group by a_key having count(1) >= 2 order by sum(groupCost) desc limit 100"; + plan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + + sql = "select b_gid, sum(groupCost) from ( " + + " select t1.k as a_key, t2.gid as b_gid, sum(t1.cm) as groupCost " + " from " + + cpc_pv_dumper + " as t1 join " + group_temp + + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid" + + ") group by b_gid having count(1) >= 2 order by sum(groupCost) desc limit 100"; + plan = TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertFalse(plan.getGroupBy().isOrderPreserving()); + + sql = "select b_gid, a_key, groupCost from ( " + + " select t1.k as a_key, t2.gid as b_gid, cast(sum(t1.cm) as bigint) as groupCost " + + " from " + cpc_pv_dumper + " as t1 join " + group_temp + + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid, t2.aid order by sum(t1.cm), a_key, t2.aid desc limit 20" + + ") order by groupCost"; + ClientScanPlan clientScanPlan = + (ClientScanPlan) TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(clientScanPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + List outputOrderBys = + ((TupleProjectionPlan) (clientScanPlan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + OrderBy outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("GROUPCOST")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("A_KEY")); + + sql = "select b_gid, a_key, groupCost from ( " + + " select t1.k as a_key, t2.gid as b_gid, cast(sum(t1.cm) as bigint) as groupCost " + + " from " + cpc_pv_dumper + " as t1 join " + group_temp + + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid order by sum(t1.cm) desc, a_key asc limit 20" + + ") order by groupCost desc"; + clientScanPlan = (ClientScanPlan) TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(clientScanPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + outputOrderBys = ((TupleProjectionPlan) (clientScanPlan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("GROUPCOST DESC")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("A_KEY")); + + sql = "select b_gid, groupCost from ( " + + " select t2.gid as b_gid, cast(sum(t1.cm) as bigint) as groupCost " + " from " + + cpc_pv_dumper + " as t1 join " + group_temp + + " as t2 on t1.aid = t2.aid group by t1.k, t2.gid order by sum(t1.cm), t1.k limit 20" + + ") order by groupCost"; + clientScanPlan = (ClientScanPlan) TestUtil.getOptimizeQueryPlanNoIterator(conn, sql); + assertTrue(clientScanPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + outputOrderBys = ((TupleProjectionPlan) (clientScanPlan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("GROUPCOST")); + } + } + + @Test + public void testPartialOrderForTupleProjectionPlanBug7352() throws Exception { + doTestPartialOrderForTupleProjectionPlanBug7352(false, false); + doTestPartialOrderForTupleProjectionPlanBug7352(false, true); + doTestPartialOrderForTupleProjectionPlanBug7352(true, false); + doTestPartialOrderForTupleProjectionPlanBug7352(true, true); + } + + private void doTestPartialOrderForTupleProjectionPlanBug7352(boolean desc, boolean salted) + throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + String tableName = generateUniqueName(); + String sql = "create table " + tableName + "( " + " pk1 char(20) not null , " + + " pk2 char(20) not null, " + " pk3 char(20) not null," + " v1 varchar, " + " v2 varchar, " + + " v3 varchar, " + " CONSTRAINT TEST_PK PRIMARY KEY ( " + " pk1 " + (desc ? "desc" : "") + + ", " + " pk2 " + (desc ? "desc" : "") + ", " + " pk3 " + (desc ? "desc" : "") + " )) " + + (salted ? "SALT_BUCKETS =4" : ""); + conn.createStatement().execute(sql); + + sql = "select pk3, v1, v2 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2,t.v1 limit 10) a order by v2"; + ClientScanPlan plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + List outputOrderBys = + ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + OrderBy outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V2")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V1")); + + sql = "select pk3, v1, v2 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2 desc,t.v1 desc limit 10) a order by v2 desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V2 DESC")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V1 DESC")); + + sql = "select pk3, v1, v2 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2 desc,t.v1 desc, t.v3 desc limit 10) a order by v2 desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V2 DESC")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V1 DESC")); + + sql = "select pk3, v1, v2 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2 desc,t.v1 desc, t.v3 asc limit 10) a order by v2 desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V2 DESC")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V1 DESC")); + + sql = "select v2,cnt from (select count(pk3) cnt,v1,v2 from " + tableName + + " t where pk1 = '6' group by t.v1,t.v2,t.v3 limit 10) a order by v1"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("V1")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("V2")); + + sql = + "select sub, pk2Cnt from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt, count(pk2) pk2Cnt from " + + tableName + " t where pk1 = '6' group by t.v1 ,t.v2, t.v3 " + + " order by count(pk3) desc,t.v2 desc,t.v3 desc limit 10) a order by cnt desc ,sub desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("CNT DESC")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("SUB DESC")); + + sql = + "select sub, pk2Cnt from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt, count(pk2) pk2Cnt from " + + tableName + " t where pk1 = '6' group by t.v1 ,t.v2, t.v3 " + + " order by count(pk3) desc,t.v2 desc,t.v3 asc limit 10) a order by cnt desc ,sub desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("CNT DESC")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("SUB DESC")); + + sql = + "select sub, pk2Cnt from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt, count(pk2) pk2Cnt from " + + tableName + " t where pk1 = '6' group by t.v1 ,t.v2, t.v3 " + + " order by t.v2 desc, count(pk3) desc, t.v3 desc limit 10) a order by sub desc, cnt desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(plan.getOrderBy().getOrderByExpressions().get(0).toString().equals("SUB DESC")); + assertTrue(plan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CNT DESC")); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("SUB DESC")); + + sql = + "select sub, pk2Cnt from (select substr(v2,0,2) sub,cast (count(pk3) as bigint) cnt, count(pk2) pk2Cnt from " + + tableName + " t where pk1 = '6' group by v1 ,v2, v3 " + + " order by t.v2 desc, count(pk3) desc, t.v3 asc limit 10) a order by sub desc, cnt desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() == 2); + assertTrue(plan.getOrderBy().getOrderByExpressions().get(0).toString().equals("SUB DESC")); + assertTrue(plan.getOrderBy().getOrderByExpressions().get(1).toString().equals("CNT DESC")); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("SUB DESC")); + + sql = "select v1, pk3, v2 from (select v1,v2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2, t.v1, t.v3 limit 10) a order by v1"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(plan.getOrderBy().getOrderByExpressions().get(0).toString().equals("V1")); + + sql = "select pk3, pk1, pk2 from (select pk1,pk2,pk3 from " + tableName + + " t where pk1 = '6' order by t.v2, t.v1, t.v3 limit 10) a order by pk3"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() == 1); + assertTrue(plan.getOrderBy().getOrderByExpressions().get(0).toString().equals("PK3")); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 0); + + sql = "select sub, v1 from (select substr(pk3,0,2) sub, pk2, v1 from " + tableName + + " t where pk1 = '6' order by pk2, pk3 limit 10) a order by pk2 desc ,sub desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + // Here because for subquery, there is no OrderBy REV_ROW_KEY_ORDER_BY + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + if (desc) { + assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(0), "PK2", true, + true); + assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(1), "SUB", true, + true); + } else { + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("PK2")); + assertTrue(outputOrderBy.getOrderByExpressions().get(1).toString().equals("SUB")); + } + + sql = "select sub, v1 from (select substr(pk3,0,2) sub, pk2, v1 from " + tableName + + " t where pk1 = '6' order by pk2 desc, pk3 desc limit 10) a order by pk2 desc ,sub desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 2); + if (desc) { + assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(0), "PK2", false, + false); + assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(1), "SUB", false, + false); + } else { + assertTrue( + outputOrderBy.getOrderByExpressions().get(0).toString().equals("PK2 DESC NULLS LAST")); + assertTrue( + outputOrderBy.getOrderByExpressions().get(1).toString().equals("SUB DESC NULLS LAST")); + } + + sql = "select sub, v1 from (select substr(pk2,0,2) sub, pk3, v1 from " + tableName + + " t where pk1 = '6' order by pk2, pk3 limit 10) a order by sub desc ,pk3 desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + // Here because for subquery, there is no OrderBy REV_ROW_KEY_ORDER_BY + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); + if (desc) { + assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(0), "SUB", true, + true); + } else { + assertTrue(outputOrderBy.getOrderByExpressions().get(0).toString().equals("SUB")); + } + + sql = "select sub, v1 from (select substr(pk2,0,2) sub, pk3, v1 from " + tableName + + " t where pk1 = '6' order by pk2 desc, pk3 desc limit 10) a order by sub desc,pk3 desc"; + plan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getOrderBy().getOrderByExpressions().size() > 0); + outputOrderBys = ((TupleProjectionPlan) (plan.getDelegate())).getOutputOrderBys(); + assertTrue(outputOrderBys.size() == 1); + outputOrderBy = outputOrderBys.get(0); + assertTrue(outputOrderBy.getOrderByExpressions().size() == 1); + if (desc) { + assertOrderByForDescExpression(outputOrderBy.getOrderByExpressions().get(0), "SUB", false, + false); + } else { + assertTrue( + outputOrderBy.getOrderByExpressions().get(0).toString().equals("SUB DESC NULLS LAST")); + } + } + } + + private static void assertOrderByForDescExpression(OrderByExpression orderByExpression, + String strExpression, boolean isNullsLast, boolean isAscending) { + assertEquals(strExpression, orderByExpression.getExpression().toString()); + assertEquals(isNullsLast, orderByExpression.isNullsLast()); + assertEquals(isAscending, orderByExpression.isAscending()); + } + + @Test + public void testUnionAllOrderByOptimizeBug7397() throws Exception { + Properties props = new Properties(); + props.setProperty(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + String tableName1 = generateUniqueName(); + String sql1 = "create table " + tableName1 + "( " + " fuid UNSIGNED_LONG not null , " + + " fstatsdate UNSIGNED_LONG not null, " + " fversion UNSIGNED_LONG not null," + + " faid_1 UNSIGNED_LONG not null," + " clk_pv_1 UNSIGNED_LONG, " + + " activation_pv_1 UNSIGNED_LONG, " + " CONSTRAINT TEST_PK PRIMARY KEY ( " + " fuid , " + + " fstatsdate, " + " fversion, " + " faid_1 " + " ))"; + conn.createStatement().execute(sql1); + + String tableName2 = generateUniqueName(); + String sql2 = "create table " + tableName2 + "( " + " fuid UNSIGNED_LONG not null , " + + " fstatsdate UNSIGNED_LONG not null, " + " fversion UNSIGNED_LONG not null," + + " faid_2 UNSIGNED_LONG not null," + " clk_pv_2 UNSIGNED_LONG, " + + " activation_pv_2 UNSIGNED_LONG, " + " CONSTRAINT TEST_PK PRIMARY KEY ( " + " fuid , " + + " fstatsdate, " + " fversion," + " faid_2 " + " ))"; + conn.createStatement().execute(sql2); + + String orderedUnionSql = "(SELECT FUId AS advertiser_id," + " FAId_1 AS adgroup_id," + + " FStatsDate AS date," + " SUM(clk_pv_1) AS valid_click_count," + + " SUM(activation_pv_1) AS activated_count" + " FROM " + tableName1 + + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_1 IN (11, 22, 33, 10))" + + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" + + " GROUP BY FUId, FAId_1, FStatsDate" + " UNION ALL " + " SELECT " + + " FUId AS advertiser_id," + " FAId_2 AS adgroup_id," + " FStatsDate AS date," + + " SUM(clk_pv_2) AS valid_click_count," + " SUM(activation_pv_2) AS activated_count" + + " FROM " + tableName2 + + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_2 IN (11, 22, 33, 10))" + + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" + + " GROUP BY FUId, FAId_2, FStatsDate" + ")"; + + // Test group by orderPreserving + String sql = "SELECT ADVERTISER_ID AS advertiser_id," + "ADGROUP_ID AS adgroup_id," + + "DATE AS i_date," + "SUM(VALID_CLICK_COUNT) AS valid_click_count," + + "SUM(ACTIVATED_COUNT) AS activated_count " + "FROM " + orderedUnionSql + + "GROUP BY ADVERTISER_ID, ADGROUP_ID, I_DATE " + + "ORDER BY advertiser_id, adgroup_id, i_date " + "limit 10"; + ClientAggregatePlan plan = (ClientAggregatePlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + UnionPlan unionPlan = (UnionPlan) ((TupleProjectionPlan) (plan.getDelegate())).getDelegate(); + assertTrue(unionPlan.isSupportOrderByOptimize()); + assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); + List orderBys = unionPlan.getOutputOrderBys(); + assertTrue(orderBys.size() == 1); + OrderBy orderBy = orderBys.get(0); + assertTrue(orderBy.getOrderByExpressions().size() == 3); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); + assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); + + // Test group by orderPreserving for distinct + sql = "SELECT distinct ADVERTISER_ID AS advertiser_id," + "ADGROUP_ID AS adgroup_id," + + "DATE AS i_date " + "FROM " + orderedUnionSql + + "ORDER BY advertiser_id, adgroup_id, i_date " + "limit 10"; + plan = (ClientAggregatePlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + unionPlan = (UnionPlan) ((TupleProjectionPlan) (plan.getDelegate())).getDelegate(); + assertTrue(unionPlan.isSupportOrderByOptimize()); + assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); + orderBys = unionPlan.getOutputOrderBys(); + assertTrue(orderBys.size() == 1); + orderBy = orderBys.get(0); + assertTrue(orderBy.getOrderByExpressions().size() == 3); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); + assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); + + // Test group by not orderPreserving + sql = "SELECT ADVERTISER_ID AS i_advertiser_id," + "ADGROUP_ID AS i_adgroup_id," + + "SUM(VALID_CLICK_COUNT) AS valid_click_count," + + "SUM(ACTIVATED_COUNT) AS activated_count " + "FROM " + orderedUnionSql + + "GROUP BY I_ADVERTISER_ID, ADGROUP_ID " + "ORDER BY i_adgroup_id " + "limit 10"; + plan = (ClientAggregatePlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() != OrderBy.FWD_ROW_KEY_ORDER_BY); + unionPlan = (UnionPlan) ((TupleProjectionPlan) (plan.getDelegate())).getDelegate(); + assertTrue(!unionPlan.isSupportOrderByOptimize()); + assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); + assertTrue(unionPlan.getOutputOrderBys().isEmpty()); + + // Test group by not orderPreserving + sql = "SELECT ADGROUP_ID AS adgroup_id," + "DATE AS i_date," + + "SUM(VALID_CLICK_COUNT) AS valid_click_count," + + "SUM(ACTIVATED_COUNT) AS activated_count " + "FROM " + orderedUnionSql + + "GROUP BY ADGROUP_ID, I_DATE " + "ORDER BY adgroup_id, i_date " + "limit 10"; + plan = (ClientAggregatePlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!plan.getGroupBy().isOrderPreserving()); + assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + unionPlan = (UnionPlan) ((TupleProjectionPlan) (plan.getDelegate())).getDelegate(); + assertTrue(!unionPlan.isSupportOrderByOptimize()); + assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); + assertTrue(unionPlan.getOutputOrderBys().isEmpty()); + + // Test group by orderPreserving with where + sql = "SELECT ADGROUP_ID AS adgroup_id," + "DATE AS i_date," + + "SUM(VALID_CLICK_COUNT) AS valid_click_count," + + "SUM(ACTIVATED_COUNT) AS activated_count " + "FROM " + orderedUnionSql + + " where advertiser_id = 1 " + "GROUP BY ADGROUP_ID, I_DATE " + + "ORDER BY adgroup_id, i_date " + "limit 10"; + plan = (ClientAggregatePlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + unionPlan = (UnionPlan) ((TupleProjectionPlan) (plan.getDelegate())).getDelegate(); + assertTrue(unionPlan.isSupportOrderByOptimize()); + assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); + orderBys = unionPlan.getOutputOrderBys(); + assertTrue(orderBys.size() == 1); + orderBy = orderBys.get(0); + assertTrue(orderBy.getOrderByExpressions().size() == 3); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); + assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); + + // Test order by orderPreserving + sql = + "SELECT ADVERTISER_ID AS advertiser_id," + "ADGROUP_ID AS adgroup_id," + "DATE AS i_date," + + "VALID_CLICK_COUNT AS valid_click_count," + "ACTIVATED_COUNT AS activated_count " + + "FROM " + orderedUnionSql + "ORDER BY advertiser_id, i_date, adgroup_id " + "limit 10"; + ClientScanPlan scanPlan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(scanPlan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY); + unionPlan = (UnionPlan) ((TupleProjectionPlan) (scanPlan.getDelegate())).getDelegate(); + assertTrue(unionPlan.isSupportOrderByOptimize()); + assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); + orderBys = unionPlan.getOutputOrderBys(); + assertTrue(orderBys.size() == 1); + orderBy = orderBys.get(0); + assertTrue(orderBy.getOrderByExpressions().size() == 3); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); + assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); + + // Test order by not orderPreserving + sql = "SELECT ADVERTISER_ID AS advertiser_id," + "ADGROUP_ID AS i_adgroup_id," + + "DATE AS date," + "VALID_CLICK_COUNT AS valid_click_count," + + "ACTIVATED_COUNT AS activated_count " + "FROM " + orderedUnionSql + + "ORDER BY advertiser_id, i_adgroup_id, date, valid_click_count " + "limit 10"; + scanPlan = (ClientScanPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!scanPlan.getOrderBy().isEmpty()); + unionPlan = (UnionPlan) ((TupleProjectionPlan) (scanPlan.getDelegate())).getDelegate(); + assertTrue(!unionPlan.isSupportOrderByOptimize()); + assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); + assertTrue(unionPlan.getOutputOrderBys().isEmpty()); + + // Test there is no order in union + sql = "SELECT ADVERTISER_ID AS advertiser_id," + "ADGROUP_ID AS adgroup_id," + + "DATE AS i_date," + "SUM(VALID_CLICK_COUNT) AS valid_click_count," + + "SUM(ACTIVATED_COUNT) AS activated_count " + "FROM " + "(SELECT FUId AS advertiser_id," + + " FAId_1 AS adgroup_id," + " FStatsDate AS date," + + " clk_pv_1 AS valid_click_count," + " activation_pv_1 AS activated_count" + " FROM " + + tableName1 + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_1 IN (11, 22, 33, 10))" + + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" + " UNION ALL " + + " SELECT " + " FUId AS advertiser_id," + " FAId_2 AS adgroup_id," + + " FStatsDate AS date," + " clk_pv_2 AS valid_click_count," + + " activation_pv_2 AS activated_count" + " FROM " + tableName2 + + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_2 IN (11, 22, 33, 10))" + + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" + ")" + + "GROUP BY ADVERTISER_ID, ADGROUP_ID, I_DATE " + + "ORDER BY advertiser_id, adgroup_id, i_date " + "limit 10"; + plan = (ClientAggregatePlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!plan.getGroupBy().isOrderPreserving()); + unionPlan = (UnionPlan) ((TupleProjectionPlan) (plan.getDelegate())).getDelegate(); + assertTrue(!unionPlan.isSupportOrderByOptimize()); + assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); + assertTrue(unionPlan.getOutputOrderBys().isEmpty()); + + // Test alias not inconsistent in union + sql = "SELECT ADVERTISER_ID AS advertiser_id," + "ADGROUP_ID_1 AS adgroup_id," + + "DATE AS i_date," + "SUM(VALID_CLICK_COUNT) AS valid_click_count," + + "SUM(ACTIVATED_COUNT) AS activated_count " + "FROM " + "(SELECT FUId AS advertiser_id," + + " FAId_1 AS adgroup_id_1," + " FStatsDate AS date," + + " SUM(clk_pv_1) AS valid_click_count," + " SUM(activation_pv_1) AS activated_count" + + " FROM " + tableName1 + + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_1 IN (11, 22, 33, 10))" + + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" + + " GROUP BY FUId, FAId_1, FStatsDate" + " UNION ALL " + " SELECT " + + " FUId AS advertiser_id," + " FAId_2," + " FStatsDate AS date," + " SUM(clk_pv_2)," + + " SUM(activation_pv_2)" + " FROM " + tableName2 + + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_2 IN (11, 22, 33, 10))" + + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" + + " GROUP BY FUId, FAId_2, FStatsDate" + ")" + + "GROUP BY ADVERTISER_ID, ADGROUP_ID_1, I_DATE " + + "ORDER BY advertiser_id, adgroup_id, i_date " + "limit 10"; + plan = (ClientAggregatePlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(plan.getGroupBy().isOrderPreserving()); + unionPlan = (UnionPlan) ((TupleProjectionPlan) (plan.getDelegate())).getDelegate(); + assertTrue(unionPlan.isSupportOrderByOptimize()); + assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); + orderBys = unionPlan.getOutputOrderBys(); + assertTrue(orderBys.size() == 1); + orderBy = orderBys.get(0); + assertTrue(orderBy.getOrderByExpressions().size() == 3); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); + assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID_1")); + + // Test order by column not equals in union + sql = "SELECT ADVERTISER_ID AS advertiser_id," + "ADGROUP_ID AS adgroup_id," + + "DATE AS i_date," + "SUM(VALID_CLICK_COUNT) AS valid_click_count," + + "SUM(ACTIVATED_COUNT) AS activated_count " + "FROM " + "(SELECT FUId AS advertiser_id," + + " FAId_1 AS adgroup_id," + " FStatsDate AS date," + + " SUM(clk_pv_1) AS valid_click_count," + " SUM(activation_pv_1) AS activated_count" + + " FROM " + tableName1 + + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_1 IN (11, 22, 33, 10))" + + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" + + " GROUP BY FUId, FAId_1, FStatsDate" + " UNION ALL " + " SELECT " + + " FUId AS advertiser_id," + " FAId_2 AS adgroup_id," + + " cast (0 as UNSIGNED_LONG) AS date," + " SUM(clk_pv_2) AS valid_click_count," + + " SUM(activation_pv_2) AS activated_count" + " FROM " + tableName2 + + " WHERE (FVersion = 1) AND (FUId IN (1)) AND (FAId_2 IN (11, 22, 33, 10))" + + " AND (FStatsDate >= 20240710) AND (FStatsDate <= 20240718)" + " GROUP BY FUId, FAId_2" + + ")" + "GROUP BY ADVERTISER_ID, ADGROUP_ID, I_DATE " + + "ORDER BY advertiser_id, adgroup_id, i_date " + "limit 10"; + plan = (ClientAggregatePlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!plan.getGroupBy().isOrderPreserving()); + unionPlan = (UnionPlan) ((TupleProjectionPlan) (plan.getDelegate())).getDelegate(); + assertTrue(!unionPlan.isSupportOrderByOptimize()); + assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); + assertTrue(unionPlan.getOutputOrderBys().isEmpty()); + + // Test only union + sql = orderedUnionSql.substring(1, orderedUnionSql.length() - 1); + unionPlan = (UnionPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!unionPlan.isSupportOrderByOptimize()); + assertTrue(!(unionPlan.iterator() instanceof MergeSortTopNResultIterator)); + assertTrue(unionPlan.getOutputOrderBys().isEmpty()); + + // Test only union and order by match + sql = orderedUnionSql.substring(1, orderedUnionSql.length() - 1) + + " order by advertiser_id, date, adgroup_id"; + unionPlan = (UnionPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!unionPlan.isSupportOrderByOptimize()); + assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); + assertTrue(unionPlan.getSubPlans().stream() + .allMatch(p -> p.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY)); + orderBys = unionPlan.getOutputOrderBys(); + assertTrue(orderBys.size() == 1); + orderBy = orderBys.get(0); + assertTrue(orderBy.getOrderByExpressions().size() == 3); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); + assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); + + // Test only union and order by not match + sql = orderedUnionSql.substring(1, orderedUnionSql.length() - 1) + + " order by advertiser_id, date, adgroup_id, valid_click_count"; + unionPlan = (UnionPlan) TestUtil.getOptimizeQueryPlan(conn, sql); + assertTrue(!unionPlan.isSupportOrderByOptimize()); + assertTrue(unionPlan.iterator() instanceof MergeSortTopNResultIterator); + assertTrue(unionPlan.getSubPlans().stream() + .noneMatch(p -> p.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY)); + orderBys = unionPlan.getOutputOrderBys(); + assertTrue(orderBys.size() == 1); + orderBy = orderBys.get(0); + assertTrue(orderBy.getOrderByExpressions().size() == 4); + assertTrue(orderBy.getOrderByExpressions().get(0).toString().equals("ADVERTISER_ID")); + assertTrue(orderBy.getOrderByExpressions().get(1).toString().equals("DATE")); + assertTrue(orderBy.getOrderByExpressions().get(2).toString().equals("ADGROUP_ID")); + assertTrue(orderBy.getOrderByExpressions().get(3).toString().equals("VALID_CLICK_COUNT")); + } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryMetaDataTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryMetaDataTest.java index 7b2edbe0995..86f01856b3a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryMetaDataTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryMetaDataTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,413 +32,460 @@ import org.apache.phoenix.util.TestUtil; import org.junit.Test; - - /** - * * Tests for getting PreparedStatement meta data - * - * * @since 0.1 */ public class QueryMetaDataTest extends BaseConnectionlessQueryTest { - @Test - public void testNoParameterMetaData() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE organization_id='000000000000000'"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(0, pmd.getParameterCount()); - } + @Test + public void testNoParameterMetaData() throws Exception { + String query = "SELECT a_string, b_string FROM atable WHERE organization_id='000000000000000'"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(0, pmd.getParameterCount()); + } - @Test - public void testCaseInsensitive() throws Exception { - String query = "SELECT A_string, b_striNG FROM ataBle WHERE ORGANIZATION_ID='000000000000000'"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(0, pmd.getParameterCount()); - } + @Test + public void testCaseInsensitive() throws Exception { + String query = "SELECT A_string, b_striNG FROM ataBle WHERE ORGANIZATION_ID='000000000000000'"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(0, pmd.getParameterCount()); + } - @Test - public void testParameterMetaData() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE organization_id=? and (a_integer = ? or a_date = ? or b_string = ? or a_string = 'foo')"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(4, pmd.getParameterCount()); - assertEquals(String.class.getName(), pmd.getParameterClassName(1)); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); - assertEquals(Date.class.getName(), pmd.getParameterClassName(3)); - assertEquals(String.class.getName(), pmd.getParameterClassName(4)); - } + @Test + public void testParameterMetaData() throws Exception { + String query = + "SELECT a_string, b_string FROM atable WHERE organization_id=? and (a_integer = ? or a_date = ? or b_string = ? or a_string = 'foo')"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(4, pmd.getParameterCount()); + assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); + assertEquals(Date.class.getName(), pmd.getParameterClassName(3)); + assertEquals(String.class.getName(), pmd.getParameterClassName(4)); + } - @Test - public void testUpsertParameterMetaData() throws Exception { - String query = "UPSERT INTO atable VALUES (?, ?, ?, ?, ?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(5, pmd.getParameterCount()); - assertEquals(String.class.getName(), pmd.getParameterClassName(1)); - assertEquals(String.class.getName(), pmd.getParameterClassName(2)); - assertEquals(String.class.getName(), pmd.getParameterClassName(3)); - assertEquals(String.class.getName(), pmd.getParameterClassName(4)); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(5)); - } + @Test + public void testUpsertParameterMetaData() throws Exception { + String query = "UPSERT INTO atable VALUES (?, ?, ?, ?, ?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(5, pmd.getParameterCount()); + assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + assertEquals(String.class.getName(), pmd.getParameterClassName(2)); + assertEquals(String.class.getName(), pmd.getParameterClassName(3)); + assertEquals(String.class.getName(), pmd.getParameterClassName(4)); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(5)); + } - @Test - public void testToDateFunctionMetaData() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE a_date > to_date(?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(String.class.getName(), pmd.getParameterClassName(1)); - } + @Test + public void testToDateFunctionMetaData() throws Exception { + String query = "SELECT a_string, b_string FROM atable WHERE a_date > to_date(?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + } - @Test - public void testLimitParameterMetaData() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE organization_id=? and a_string = 'foo' LIMIT ?"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(2, pmd.getParameterCount()); - assertEquals(String.class.getName(), pmd.getParameterClassName(1)); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); - } + @Test + public void testLimitParameterMetaData() throws Exception { + String query = + "SELECT a_string, b_string FROM atable WHERE organization_id=? and a_string = 'foo' LIMIT ?"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(2, pmd.getParameterCount()); + assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); + } - @Test - public void testRoundParameterMetaData() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE round(a_date,'day', ?) = ?"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(2, pmd.getParameterCount()); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); - assertEquals(Date.class.getName(), pmd.getParameterClassName(2)); - } + @Test + public void testRoundParameterMetaData() throws Exception { + String query = "SELECT a_string, b_string FROM atable WHERE round(a_date,'day', ?) = ?"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(2, pmd.getParameterCount()); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); + assertEquals(Date.class.getName(), pmd.getParameterClassName(2)); + } - @Test - public void testInListParameterMetaData1() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE a_string IN (?, ?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(2, pmd.getParameterCount()); - assertEquals(String.class.getName(), pmd.getParameterClassName(1)); - assertEquals(String.class.getName(), pmd.getParameterClassName(2)); - } + @Test + public void testInListParameterMetaData1() throws Exception { + String query = "SELECT a_string, b_string FROM atable WHERE a_string IN (?, ?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(2, pmd.getParameterCount()); + assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + assertEquals(String.class.getName(), pmd.getParameterClassName(2)); + } - @Test - public void testInListParameterMetaData2() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE ? IN (2.2, 3)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); - } + @Test + public void testInListParameterMetaData2() throws Exception { + String query = "SELECT a_string, b_string FROM atable WHERE ? IN (2.2, 3)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); + } - @Test - public void testInListParameterMetaData3() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE ? IN ('foo')"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(String.class.getName(), pmd.getParameterClassName(1)); - } + @Test + public void testInListParameterMetaData3() throws Exception { + String query = "SELECT a_string, b_string FROM atable WHERE ? IN ('foo')"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + } - @Test - public void testInListParameterMetaData4() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE ? IN (?, ?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(3, pmd.getParameterCount()); - assertEquals(null, pmd.getParameterClassName(1)); - assertEquals(null, pmd.getParameterClassName(2)); - assertEquals(null, pmd.getParameterClassName(3)); - } + @Test + public void testInListParameterMetaData4() throws Exception { + String query = "SELECT a_string, b_string FROM atable WHERE ? IN (?, ?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(3, pmd.getParameterCount()); + assertEquals(null, pmd.getParameterClassName(1)); + assertEquals(null, pmd.getParameterClassName(2)); + assertEquals(null, pmd.getParameterClassName(3)); + } - @Test - public void testCaseMetaData() throws Exception { - String query1 = "SELECT a_string, b_string FROM atable WHERE case when a_integer = 1 then ? when a_integer > 2 then 2 end > 3"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query1); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); - assertEquals(ParameterMetaData.parameterNullable, pmd.isNullable(1)); - - String query2 = "SELECT a_string, b_string FROM atable WHERE case when a_integer = 1 then 1 when a_integer > 2 then 2 end > ?"; - PreparedStatement statement2 = conn.prepareStatement(query2); - ParameterMetaData pmd2 = statement2.getParameterMetaData(); - assertEquals(1, pmd2.getParameterCount()); - assertEquals(Integer.class.getName(), pmd2.getParameterClassName(1)); - assertEquals(ParameterMetaData.parameterNullable, pmd2.isNullable(1)); - } + @Test + public void testCaseMetaData() throws Exception { + String query1 = + "SELECT a_string, b_string FROM atable WHERE case when a_integer = 1 then ? when a_integer > 2 then 2 end > 3"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query1); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); + assertEquals(ParameterMetaData.parameterNullable, pmd.isNullable(1)); - @Test - public void testSubstrParameterMetaData() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE substr(a_string,?,?) = ?"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(3, pmd.getParameterCount()); - assertEquals(Long.class.getName(), pmd.getParameterClassName(1)); - assertEquals(Long.class.getName(), pmd.getParameterClassName(2)); - assertEquals(String.class.getName(), pmd.getParameterClassName(3)); - } + String query2 = + "SELECT a_string, b_string FROM atable WHERE case when a_integer = 1 then 1 when a_integer > 2 then 2 end > ?"; + PreparedStatement statement2 = conn.prepareStatement(query2); + ParameterMetaData pmd2 = statement2.getParameterMetaData(); + assertEquals(1, pmd2.getParameterCount()); + assertEquals(Integer.class.getName(), pmd2.getParameterClassName(1)); + assertEquals(ParameterMetaData.parameterNullable, pmd2.isNullable(1)); + } - @Test - public void testKeyPrefixParameterMetaData() throws Exception { - String query = "SELECT a_string, b_string FROM atable WHERE organization_id='000000000000000' and substr(entity_id,1,3)=? and a_string = 'foo'"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(String.class.getName(), pmd.getParameterClassName(1)); - } - - @Test - public void testDateSubstractExpressionMetaData1() throws Exception { - String query = "SELECT entity_id,a_string FROM atable where a_date-2.5-?=a_date"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); - } + @Test + public void testSubstrParameterMetaData() throws Exception { + String query = "SELECT a_string, b_string FROM atable WHERE substr(a_string,?,?) = ?"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(3, pmd.getParameterCount()); + assertEquals(Long.class.getName(), pmd.getParameterClassName(1)); + assertEquals(Long.class.getName(), pmd.getParameterClassName(2)); + assertEquals(String.class.getName(), pmd.getParameterClassName(3)); + } - @Test - public void testDateSubstractExpressionMetaData2() throws Exception { - String query = "SELECT entity_id,a_string FROM atable where a_date-?=a_date"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - // FIXME: Should really be Date, but we currently don't know if we're - // comparing to a date or a number where this is being calculated - // (which would disambiguate it). - assertEquals(null, pmd.getParameterClassName(1)); - } + @Test + public void testKeyPrefixParameterMetaData() throws Exception { + String query = + "SELECT a_string, b_string FROM atable WHERE organization_id='000000000000000' and substr(entity_id,1,3)=? and a_string = 'foo'"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + } - @Test - public void testDateSubstractExpressionMetaData3() throws Exception { - String query = "SELECT entity_id,a_string FROM atable where a_date-?=a_integer"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - // FIXME: Should really be Integer, but we currently don't know if we're - // comparing to a date or a number where this is being calculated - // (which would disambiguate it). - assertEquals(null, pmd.getParameterClassName(1)); - } + @Test + public void testDateSubstractExpressionMetaData1() throws Exception { + String query = "SELECT entity_id,a_string FROM atable where a_date-2.5-?=a_date"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); + } - @Test - public void testTwoDateSubstractExpressionMetaData() throws Exception { - String query = "SELECT entity_id,a_string FROM atable where ?-a_date=1"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - // We know this must be date - anything else would be an error - assertEquals(Date.class.getName(), pmd.getParameterClassName(1)); - } + @Test + public void testDateSubstractExpressionMetaData2() throws Exception { + String query = "SELECT entity_id,a_string FROM atable where a_date-?=a_date"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + // FIXME: Should really be Date, but we currently don't know if we're + // comparing to a date or a number where this is being calculated + // (which would disambiguate it). + assertEquals(null, pmd.getParameterClassName(1)); + } - @Test - public void testDateAdditionExpressionMetaData1() throws Exception { - String query = "SELECT entity_id,a_string FROM atable where 1+a_date+?>a_date"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); - } + @Test + public void testDateSubstractExpressionMetaData3() throws Exception { + String query = "SELECT entity_id,a_string FROM atable where a_date-?=a_integer"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + // FIXME: Should really be Integer, but we currently don't know if we're + // comparing to a date or a number where this is being calculated + // (which would disambiguate it). + assertEquals(null, pmd.getParameterClassName(1)); + } - @Test - public void testDateAdditionExpressionMetaData2() throws Exception { - String query = "SELECT entity_id,a_string FROM atable where ?+a_date>a_date"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); - } + @Test + public void testTwoDateSubstractExpressionMetaData() throws Exception { + String query = "SELECT entity_id,a_string FROM atable where ?-a_date=1"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + // We know this must be date - anything else would be an error + assertEquals(Date.class.getName(), pmd.getParameterClassName(1)); + } - @Test - public void testCoerceToDecimalArithmeticMetaData() throws Exception { - String[] ops = { "+", "-", "*", "/" }; - for (String op : ops) { - String query = "SELECT entity_id,a_string FROM atable where a_integer" + op + "2.5" + op + "?=0"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - statement.setInt(1, 4); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); - } - } + @Test + public void testDateAdditionExpressionMetaData1() throws Exception { + String query = "SELECT entity_id,a_string FROM atable where 1+a_date+?>a_date"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); + } - @Test - public void testLongArithmeticMetaData() throws Exception { - String[] ops = { "+", "-", "*", "/" }; - for (String op : ops) { - String query = "SELECT entity_id,a_string FROM atable where a_integer" + op + "2" + op + "?=0"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - statement.setInt(1, 4); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(Long.class.getName(), pmd.getParameterClassName(1)); - } - } + @Test + public void testDateAdditionExpressionMetaData2() throws Exception { + String query = "SELECT entity_id,a_string FROM atable where ?+a_date>a_date"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); + } - @Test - public void testBasicResultSetMetaData() throws Exception { - String query = "SELECT organization_id, a_string, b_string, a_integer i, a_date FROM atable WHERE organization_id='000000000000000' and substr(entity_id,1,3)=? and a_string = 'foo'"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ResultSetMetaData md = statement.getMetaData(); - assertEquals(5, md.getColumnCount()); - - assertEquals("organization_id".toUpperCase(),md.getColumnName(1)); - assertEquals("a_string".toUpperCase(),md.getColumnName(2)); - assertEquals("b_string".toUpperCase(),md.getColumnName(3)); - assertEquals("a_integer".toUpperCase(),md.getColumnName(4)); - assertEquals("i".toUpperCase(),md.getColumnLabel(4)); - assertEquals("a_date".toUpperCase(),md.getColumnName(5)); - - assertEquals(String.class.getName(),md.getColumnClassName(1)); - assertEquals(String.class.getName(),md.getColumnClassName(2)); - assertEquals(String.class.getName(),md.getColumnClassName(3)); - assertEquals(Integer.class.getName(),md.getColumnClassName(4)); - assertEquals(Date.class.getName(),md.getColumnClassName(5)); - - assertEquals("atable".toUpperCase(),md.getTableName(1)); - assertEquals(java.sql.Types.INTEGER,md.getColumnType(4)); - assertEquals(true,md.isReadOnly(1)); - assertEquals(false,md.isDefinitelyWritable(1)); - assertEquals("i".toUpperCase(),md.getColumnLabel(4)); - assertEquals("a_date".toUpperCase(),md.getColumnLabel(5)); - assertEquals(ResultSetMetaData.columnNoNulls,md.isNullable(1)); - assertEquals(ResultSetMetaData.columnNullable,md.isNullable(5)); - } - @Test - public void testStringConcatMetaData() throws Exception { - String query = "SELECT entity_id,a_string FROM atable where 2 || a_integer || ? like '2%'"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - statement.setString(1, "foo"); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(1, pmd.getParameterCount()); - assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + @Test + public void testCoerceToDecimalArithmeticMetaData() throws Exception { + String[] ops = { "+", "-", "*", "/" }; + for (String op : ops) { + String query = + "SELECT entity_id,a_string FROM atable where a_integer" + op + "2.5" + op + "?=0"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + statement.setInt(1, 4); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(BigDecimal.class.getName(), pmd.getParameterClassName(1)); + } + } - } - - @Test - public void testRowValueConstructorBindParamMetaData() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer, a_string) = (?, ?, ?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(3, pmd.getParameterCount()); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); - assertEquals(String.class.getName(), pmd.getParameterClassName(3)); - } - - @Test - public void testRowValueConstructorBindParamMetaDataWithMoreNumberOfBindArgs() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer) = (?, ?, ?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(3, pmd.getParameterCount()); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); - assertEquals(null, pmd.getParameterClassName(3)); - } - - @Test - public void testRowValueConstructorBindParamMetaDataWithLessNumberOfBindArgs() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer, a_string) = (?, ?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(2, pmd.getParameterCount()); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); - } - - @Test - public void testRowValueConstructorBindParamMetaDataWithBindArgsAtSamePlacesOnLHSRHS() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, ?) = (a_integer, ?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(2, pmd.getParameterCount()); - assertEquals(null, pmd.getParameterClassName(1)); - assertEquals(null, pmd.getParameterClassName(2)); - } - - @Test - public void testRowValueConstructorBindParamMetaDataWithBindArgsAtDiffPlacesOnLHSRHS() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, ?) = (?, a_integer)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(2, pmd.getParameterCount()); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); - } - - // @Test broken currently, as we'll end up with null = 7 which is never true - public void testRowValueConstructorBindParamMetaDataWithBindArgsOnLHSAndLiteralExprOnRHS() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE (?, ?) = 7"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(2, pmd.getParameterCount()); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); - assertEquals(null, pmd.getParameterClassName(2)); - } - - @Test - public void testRowValueConstructorBindParamMetaDataWithBindArgsOnRHSAndLiteralExprOnLHS() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE 7 = (?, ?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(2, pmd.getParameterCount()); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); - assertEquals(null, pmd.getParameterClassName(2)); - } - - @Test - public void testNonEqualityRowValueConstructorBindParamMetaDataWithBindArgsOnRHSAndLiteralExprOnLHS() throws Exception { - String query = "SELECT a_integer, x_integer FROM aTable WHERE 7 >= (?, ?)"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(2, pmd.getParameterCount()); - assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); - assertEquals(null, pmd.getParameterClassName(2)); - } - - @Test - public void testBindParamMetaDataForNestedRVC() throws Exception { - String query = "SELECT organization_id, entity_id, a_string FROM aTable WHERE (organization_id, (entity_id, a_string)) >= (?, (?, ?))"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - PreparedStatement statement = conn.prepareStatement(query); - ParameterMetaData pmd = statement.getParameterMetaData(); - assertEquals(3, pmd.getParameterCount()); - assertEquals(String.class.getName(), pmd.getParameterClassName(1)); - assertEquals(String.class.getName(), pmd.getParameterClassName(2)); - assertEquals(String.class.getName(), pmd.getParameterClassName(3)); - } + @Test + public void testLongArithmeticMetaData() throws Exception { + String[] ops = { "+", "-", "*", "/" }; + for (String op : ops) { + String query = + "SELECT entity_id,a_string FROM atable where a_integer" + op + "2" + op + "?=0"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + statement.setInt(1, 4); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(Long.class.getName(), pmd.getParameterClassName(1)); + } + } + + @Test + public void testBasicResultSetMetaData() throws Exception { + String query = + "SELECT organization_id, a_string, b_string, a_integer i, a_date FROM atable WHERE organization_id='000000000000000' and substr(entity_id,1,3)=? and a_string = 'foo'"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ResultSetMetaData md = statement.getMetaData(); + assertEquals(5, md.getColumnCount()); + + assertEquals("organization_id".toUpperCase(), md.getColumnName(1)); + assertEquals("a_string".toUpperCase(), md.getColumnName(2)); + assertEquals("b_string".toUpperCase(), md.getColumnName(3)); + assertEquals("a_integer".toUpperCase(), md.getColumnName(4)); + assertEquals("i".toUpperCase(), md.getColumnLabel(4)); + assertEquals("a_date".toUpperCase(), md.getColumnName(5)); + + assertEquals(String.class.getName(), md.getColumnClassName(1)); + assertEquals(String.class.getName(), md.getColumnClassName(2)); + assertEquals(String.class.getName(), md.getColumnClassName(3)); + assertEquals(Integer.class.getName(), md.getColumnClassName(4)); + assertEquals(Date.class.getName(), md.getColumnClassName(5)); + + assertEquals("atable".toUpperCase(), md.getTableName(1)); + assertEquals(java.sql.Types.INTEGER, md.getColumnType(4)); + assertEquals(true, md.isReadOnly(1)); + assertEquals(false, md.isDefinitelyWritable(1)); + assertEquals("i".toUpperCase(), md.getColumnLabel(4)); + assertEquals("a_date".toUpperCase(), md.getColumnLabel(5)); + assertEquals(ResultSetMetaData.columnNoNulls, md.isNullable(1)); + assertEquals(ResultSetMetaData.columnNullable, md.isNullable(5)); + } + + @Test + public void testStringConcatMetaData() throws Exception { + String query = "SELECT entity_id,a_string FROM atable where 2 || a_integer || ? like '2%'"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + statement.setString(1, "foo"); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(1, pmd.getParameterCount()); + assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + + } + + @Test + public void testRowValueConstructorBindParamMetaData() throws Exception { + String query = + "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer, a_string) = (?, ?, ?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(3, pmd.getParameterCount()); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); + assertEquals(String.class.getName(), pmd.getParameterClassName(3)); + } + + @Test + public void testRowValueConstructorBindParamMetaDataWithMoreNumberOfBindArgs() throws Exception { + String query = + "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer) = (?, ?, ?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(3, pmd.getParameterCount()); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); + assertEquals(null, pmd.getParameterClassName(3)); + } + + @Test + public void testRowValueConstructorBindParamMetaDataWithLessNumberOfBindArgs() throws Exception { + String query = + "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, x_integer, a_string) = (?, ?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(2, pmd.getParameterCount()); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); + } + + @Test + public void testRowValueConstructorBindParamMetaDataWithBindArgsAtSamePlacesOnLHSRHS() + throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, ?) = (a_integer, ?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(2, pmd.getParameterCount()); + assertEquals(null, pmd.getParameterClassName(1)); + assertEquals(null, pmd.getParameterClassName(2)); + } + + @Test + public void testRowValueConstructorBindParamMetaDataWithBindArgsAtDiffPlacesOnLHSRHS() + throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE (a_integer, ?) = (?, a_integer)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(2, pmd.getParameterCount()); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(2)); + } + + // @Test broken currently, as we'll end up with null = 7 which is never true + public void testRowValueConstructorBindParamMetaDataWithBindArgsOnLHSAndLiteralExprOnRHS() + throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE (?, ?) = 7"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(2, pmd.getParameterCount()); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); + assertEquals(null, pmd.getParameterClassName(2)); + } + + @Test + public void testRowValueConstructorBindParamMetaDataWithBindArgsOnRHSAndLiteralExprOnLHS() + throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE 7 = (?, ?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(2, pmd.getParameterCount()); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); + assertEquals(null, pmd.getParameterClassName(2)); + } + + @Test + public void + testNonEqualityRowValueConstructorBindParamMetaDataWithBindArgsOnRHSAndLiteralExprOnLHS() + throws Exception { + String query = "SELECT a_integer, x_integer FROM aTable WHERE 7 >= (?, ?)"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(2, pmd.getParameterCount()); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(1)); + assertEquals(null, pmd.getParameterClassName(2)); + } + + @Test + public void testBindParamMetaDataForNestedRVC() throws Exception { + String query = + "SELECT organization_id, entity_id, a_string FROM aTable WHERE (organization_id, (entity_id, a_string)) >= (?, (?, ?))"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + PreparedStatement statement = conn.prepareStatement(query); + ParameterMetaData pmd = statement.getParameterMetaData(); + assertEquals(3, pmd.getParameterCount()); + assertEquals(String.class.getName(), pmd.getParameterClassName(1)); + assertEquals(String.class.getName(), pmd.getParameterClassName(2)); + assertEquals(String.class.getName(), pmd.getParameterClassName(3)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java index 1e47071d49e..403318e834d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -52,6 +52,8 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.PColumn; import org.apache.phoenix.schema.PTableType; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; +import org.apache.phoenix.thirdparty.com.google.common.base.Splitter; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.QueryUtil; @@ -60,785 +62,854 @@ import org.junit.Ignore; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.base.Splitter; - public class QueryOptimizerTest extends BaseConnectionlessQueryTest { - - public static final String SCHEMA_NAME = ""; - public static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T"); - - public QueryOptimizerTest() { - } - - @Test - public void testRVCUsingPkColsReturnedByPlanShouldUseIndex() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE T (k VARCHAR NOT NULL PRIMARY KEY, v1 CHAR(15), v2 VARCHAR)"); - conn.createStatement().execute("CREATE INDEX IDX ON T(v1, v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - String query = "select * from t where (v1, v2, k) > ('1', '2', '3')"; - QueryPlan plan = stmt.optimizeQuery(query); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testOrderByOptimizedOut() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE foo (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=true"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY k"); - assertEquals(OrderBy.FWD_ROW_KEY_ORDER_BY,plan.getOrderBy()); - } - - @Test - public void testOrderByDropped() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try{ - conn.createStatement().execute("CREATE TABLE foo (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=true"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY 'a','b','c'"); - assertTrue(plan.getOrderBy().getOrderByExpressions().isEmpty()); - } finally { - conn.close(); - } - } - - @Test - public void testOrderByNotDropped() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE foo (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=true"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY v"); - assertFalse(plan.getOrderBy().getOrderByExpressions().isEmpty()); - } - - @Test - public void testOrderByDroppedCompositeKey() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE foo (j INTEGER NOT NULL, k BIGINT NOT NULL, v VARCHAR CONSTRAINT pk PRIMARY KEY (j,k)) IMMUTABLE_ROWS=true"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY j,k"); - assertEquals(OrderBy.FWD_ROW_KEY_ORDER_BY,plan.getOrderBy()); - } - - @Test - public void testOrderByNotDroppedCompositeKey() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE foo (j INTEGER NOT NULL, k BIGINT NOT NULL, v VARCHAR CONSTRAINT pk PRIMARY KEY (j,k)) IMMUTABLE_ROWS=true"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY k,j"); - assertFalse(plan.getOrderBy().getOrderByExpressions().isEmpty()); - } - - @Test - public void testChooseIndexOverTable() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE v1 = 'bar'"); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseTableOverIndex() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT v1 FROM t WHERE k = 1"); - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseTableForSelection() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT v1,v2 FROM t WHERE v1 = 'bar'"); - // Choose T because v2 is not in index - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseTableForDynCols() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t(v3 VARCHAR) WHERE v1 = 'bar'"); - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseTableForSelectionStar() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT * FROM t WHERE v1 = 'bar'"); - // Choose T because v2 is not in index - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseIndexEvenWithSelectionStar() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1) INCLUDE (v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT * FROM t WHERE v1 = 'bar'"); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseIndexFromOrderBy() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k > 30 ORDER BY v1 LIMIT 5"); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChoosePointLookupOverOrderByRemoval() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k = 30 ORDER BY v1 LIMIT 5"); // Prefer - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseIndexFromOrderByDesc() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY DESC, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k > 30 ORDER BY v1, k DESC LIMIT 5"); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseTableFromOrderByAsc() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY DESC, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k > 30 ORDER BY v1, k LIMIT 5"); - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseIndexFromOrderByAsc() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY DESC, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1, k)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k > 30 ORDER BY v1, k LIMIT 5"); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChoosePointLookupOverOrderByDesc() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY DESC, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1, k)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k = 30 ORDER BY v1, k LIMIT 5"); - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - - @Test - public void testChooseIndexWithLongestRowKey() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); - conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("IDX2", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testIgnoreIndexesBasedOnHint() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); - conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT /*+NO_INDEX*/ k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseIndexFromHint() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); - conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT /*+ INDEX(t idx1) */ k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("IDX1", plan.getTableRef().getTable().getTableName().getString()); - plan = stmt.optimizeQuery("SELECT k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("IDX2", plan.getTableRef().getTable().getTableName().getString()); - } - @Test - public void testChooseIndexFromCaseSensitiveHint() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE \"t\" (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx1 ON \"t\"(v1) INCLUDE(v2)"); - conn.createStatement().execute("CREATE INDEX idx2 ON \"t\"(v1,v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT /*+ INDEX(\"t\" idx1) */ k FROM \"t\" WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("IDX1", plan.getTableRef().getTable().getTableName().getString()); - plan = stmt.optimizeQuery("SELECT k FROM \"t\" WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("IDX2", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testChooseIndexFromCaseSensitiveHint2() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE \"t\" (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX \"idx1\" ON \"t\"(v1) INCLUDE(v2)"); - conn.createStatement().execute("CREATE INDEX \"idx2\" ON \"t\"(v1,v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT /*+ INDEX(\"t\" \"idx1\") */ k FROM \"t\" WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("idx1", plan.getTableRef().getTable().getTableName().getString()); - plan = stmt.optimizeQuery("SELECT k FROM \"t\" WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("idx2", plan.getTableRef().getTable().getTableName().getString()); - } - - - @Test - public void testChooseIndexFromDoubleQuotedHint() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); - conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT /*+ INDEX(t \"IDX1\") INDEX(t idx3) */ k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("IDX1", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testIndexHintParsing() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); - conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT /*+ INDEX(t idx3 idx4 \"idx5\") INDEX(t idx6 idx1) */ k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); - assertEquals("IDX1", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testDataTableOverIndexHint() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1,v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT /*+ " + Hint.USE_DATA_OVER_INDEX_TABLE + " */ * FROM t"); - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - // unhinted still uses index - plan = stmt.optimizeQuery("SELECT * FROM t"); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - // hinting with a WHERE clause still uses the index - plan = stmt.optimizeQuery("SELECT /*+ " + Hint.USE_DATA_OVER_INDEX_TABLE + " */ * FROM t WHERE v1 = 'foo'"); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - // Tests that a DELETE without a WHERE clause uses the data table (for parallel deletion on server side) - // DELETE with a WHERE clause should use the index on the client side - @Test - public void testDelete() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1,v2)"); - conn.setAutoCommit(true); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - SQLParser parser = new SQLParser("DELETE FROM t"); - DeleteStatement delete = (DeleteStatement) parser.parseStatement(); - DeleteCompiler compiler = new DeleteCompiler(stmt, null); - MutationPlan plan = compiler.compile(delete); - assertEquals("T", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); - assertEquals(plan.getClass(), DeleteCompiler.ServerSelectDeleteMutationPlan.class); - parser = new SQLParser("DELETE FROM t WHERE v1 = 'foo'"); - delete = (DeleteStatement) parser.parseStatement(); - plan = compiler.compile(delete); - assertEquals("IDX", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); - assertEquals(plan.getClass(), DeleteCompiler.ClientSelectDeleteMutationPlan.class); - } - - @Test - public void testChooseSmallerTable() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT count(*) FROM t"); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testRVCForTableWithSecondaryIndexBasic() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE T (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); - conn.createStatement().execute("CREATE INDEX IDX ON T(v1, v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - String query = "select * from t where (v1, v2) <= ('1', '2')"; - QueryPlan plan = stmt.optimizeQuery(query); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testRVCAllColsForTableWithSecondaryIndexBasic() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE T (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); - conn.createStatement().execute("CREATE INDEX IDX ON T(v1, v2)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - String query = "select * from t where (k, v1, v2) <= ('3', '1', '2')"; - QueryPlan plan = stmt.optimizeQuery(query); - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - // Multi-tenant = false; Query uses index = false; Salted = true - public void testAssertQueryPlanDetails1() throws Exception { - testAssertQueryPlanDetails(false, false, true); - } - - @Test - // Multi-tenant = true; Query uses index = false; Salted = true - public void testAssertQueryPlanDetails2() throws Exception { - testAssertQueryPlanDetails(true, false, true); - } - - @Test - // Multi-tenant = true; Query uses index = true; Salted = false - public void testAssertQueryPlanDetails3() throws Exception { - testAssertQueryPlanDetails(true, true, true); - } - - @Test - // Multi-tenant = false; Query uses index = true; Salted = true - public void testAssertQueryPlanDetails4() throws Exception { - testAssertQueryPlanDetails(false, true, true); - } - - @Test - // Multi-tenant = false; Query uses index = false; Salted = false - public void testAssertQueryPlanDetails5() throws Exception { - testAssertQueryPlanDetails(false, false, false); - } - - @Test - // Multi-tenant = true; Query uses index = false; Salted = false - public void testAssertQueryPlanDetails6() throws Exception { - testAssertQueryPlanDetails(true, false, false); - } - - @Test - // Multi-tenant = true; Query uses index = true; Salted = false - public void testAssertQueryPlanDetails7() throws Exception { - testAssertQueryPlanDetails(true, true, false); - } - - @Test - // Multi-tenant = false; Query uses index = true; Salted = false - public void testAssertQueryPlanDetails8() throws Exception { - testAssertQueryPlanDetails(false, true, false); - } - - @Test - public void testQueryOptimizerShouldSelectThePlanWithMoreNumberOfPKColumns() throws Exception { - Connection conn1 = DriverManager.getConnection(getUrl()); - Connection conn2 = DriverManager.getConnection(getUrl()); - conn1.createStatement().execute("create table index_test_table (a varchar not null,b varchar not null,c varchar not null,d varchar,e varchar, f varchar constraint pk primary key(a,b,c))"); - conn1.createStatement().execute( - "create index INDEX_TEST_TABLE_INDEX_D on INDEX_TEST_TABLE(A,D) include(B,C,E,F)"); - conn1.createStatement().execute( - "create index INDEX_TEST_TABLE_INDEX_F on INDEX_TEST_TABLE(A,F) include(B,C,D,E)"); - ResultSet rs = conn2.createStatement().executeQuery("explain select * from INDEX_TEST_TABLE where A in ('1','2','3','4','5') and F in ('1111','2222','3333')"); - assertEquals("CLIENT PARALLEL 1-WAY SKIP SCAN ON 15 KEYS OVER INDEX_TEST_TABLE_INDEX_F ['1','1111'] - ['5','3333']", QueryUtil.getExplainPlan(rs)); - } - - @Test - public void testCharArrayLength() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute( - "CREATE TABLE TEST.TEST (testInt INTEGER, testCharArray CHAR(3)[], testByteArray BINARY(7)[], " + - "CONSTRAINT test_pk PRIMARY KEY(testInt)) DEFAULT_COLUMN_FAMILY='T'"); - conn.createStatement().execute("CREATE INDEX TEST_INDEX ON TEST.TEST (testInt) INCLUDE (testCharArray, testByteArray)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - - QueryPlan plan = stmt.optimizeQuery("SELECT /*+ INDEX(TEST.TEST TEST_INDEX)*/ testCharArray,testByteArray FROM TEST.TEST"); - List columns = plan.getTableRef().getTable().getColumns(); - assertEquals(3, columns.size()); - assertEquals(3, columns.get(1).getMaxLength().intValue()); - assertEquals(7, columns.get(2).getMaxLength().intValue()); - } - - private void testAssertQueryPlanDetails(boolean multitenant, boolean useIndex, boolean salted) throws Exception { - String sql; - PreparedStatement stmt; - Connection conn = DriverManager.getConnection(getUrl(), new Properties()); - try { - // create table - conn.createStatement().execute("create table " - + "XYZ.ABC" - + " (organization_id char(15) not null, \n" - + " \"DEC\" DECIMAL(10,2) not null,\n" - + " a_string_array varchar(100) array[] not null,\n" - + " b_string varchar(100),\n" - + " CF.a_integer integer,\n" - + " a_date date,\n" - + " CONSTRAINT pk PRIMARY KEY (organization_id, \"DEC\", a_string_array)\n" - + ")" + (salted ? "SALT_BUCKETS=4" : "") + (multitenant == true ? (salted ? ",MULTI_TENANT=true" : "MULTI_TENANT=true") : "")); - - - if (useIndex) { - // create index - conn.createStatement().execute("CREATE INDEX ABC_IDX ON XYZ.ABC (CF.a_integer) INCLUDE (a_date)"); - } - - // switch to a tenant specific connection if multi-tenant. - conn = multitenant ? DriverManager.getConnection(getUrl("tenantId")) : conn; - - // create a tenant specific view if multi-tenant - if (multitenant) { - conn.createStatement().execute("CREATE VIEW ABC_VIEW (ORG_ID VARCHAR) AS SELECT * FROM XYZ.ABC"); - } - - String expectedColNames = multitenant ? addQuotes(null, "DEC,A_STRING_ARRAY") : addQuotes(null,"ORGANIZATION_ID,DEC,A_STRING_ARRAY"); - String expectedColumnNameDataTypes = multitenant ? "\"DEC\" DECIMAL(10,2),\"A_STRING_ARRAY\" VARCHAR(100) ARRAY" : "\"ORGANIZATION_ID\" CHAR(15),\"DEC\" DECIMAL(10,2),\"A_STRING_ARRAY\" VARCHAR(100) ARRAY"; - String tableName = multitenant ? "ABC_VIEW" : "XYZ.ABC"; - String tenantFilter = multitenant ? "" : "organization_id = ? AND "; - String orderByRowKeyClause = multitenant ? "DEC" : "organization_id"; - - // Filter on row key columns of data table. No order by. No limit. - sql = "SELECT CF.a_integer FROM " + tableName + " where " + tenantFilter + " \"DEC\" = ? and a_string_array = ?"; - stmt = conn.prepareStatement(sql); - int counter = 1; - if (!multitenant) { - stmt.setString(counter++, "ORGID"); - } - stmt.setDouble(counter++, 1.23); - String[] strArray = new String[2]; - strArray[0] = "AB"; - strArray[1] = "CD"; - Array array = conn.createArrayOf("VARCHAR", strArray); - stmt.setArray(counter++, array); - assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, false, 0); - - counter = 1; - // Filter on row key columns of data table. Order by row key columns. Limit specified. - sql = "SELECT CF.a_integer FROM " + tableName + " where " + tenantFilter + " \"DEC\" = ? and a_string_array = ? ORDER BY " + orderByRowKeyClause + " LIMIT 100"; - stmt = conn.prepareStatement(sql); - if (!multitenant) { - stmt.setString(counter++, "ORGID"); - } - stmt.setDouble(counter++, 1.23); - array = conn.createArrayOf("VARCHAR", strArray); - stmt.setArray(counter++, array); - assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, false, 100); - - counter = 1; - // Filter on row key columns of data table. Order by non-row key columns. Limit specified. - sql = "SELECT CF.a_integer FROM " + tableName + " where " + tenantFilter + " \"DEC\" = ? and a_string_array = ? ORDER BY a_date LIMIT 100"; - stmt = conn.prepareStatement(sql); - if (!multitenant) { - stmt.setString(counter++, "ORGID"); - } - stmt.setDouble(counter++, 1.23); - array = conn.createArrayOf("VARCHAR", strArray); - stmt.setArray(counter++, array); - assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, true, 100); - - if (useIndex) { - - expectedColNames = multitenant ? ("\"CF\".\"A_INTEGER\"" + ",\"DEC\"" + ",\"A_STRING_ARRAY\"") : ("\"CF\".\"A_INTEGER\"" + ",\"ORGANIZATION_ID\"" + ",\"DEC\"" + ",\"A_STRING_ARRAY\""); - expectedColumnNameDataTypes = multitenant ? ("\"CF\".\"A_INTEGER\"" + " " + "INTEGER" + ",\"DEC\"" + " " + "DECIMAL(10,2)" + ",\"A_STRING_ARRAY\""+ " " + "VARCHAR(100) ARRAY") : ("\"CF\".\"A_INTEGER\"" + " " + "INTEGER" + ",\"ORGANIZATION_ID\"" + " " + "CHAR(15)" + ",\"DEC\"" + " " + "DECIMAL(10,2)" + ",\"A_STRING_ARRAY\""+ " " + "VARCHAR(100) ARRAY"); - - // Filter on columns that the secondary index is on. No order by. No limit. - sql = "SELECT a_date FROM " + tableName + " where CF.a_integer = ?"; - stmt = conn.prepareStatement(sql); - stmt.setInt(1, 1000); - assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, false, 0); - - // Filter on columns that the secondary index is on. Order by on the indexed column. Limit specified. - sql = "SELECT a_date FROM " + tableName + " where CF.a_integer = ? ORDER BY CF.a_integer LIMIT 100"; - stmt = conn.prepareStatement(sql); - stmt.setInt(1, 1000); - assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, false, 100); - - // Filter on columns that the secondary index is on. Order by on the non-indexed column. Limit specified. - sql = "SELECT a_integer FROM " + tableName + " where CF.a_integer = ? and a_date = ? ORDER BY a_date LIMIT 100"; - stmt = conn.prepareStatement(sql); - stmt.setInt(1, 1000); - stmt.setDate(2, new Date(909000)); - assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, true, 100); - } - } finally { - conn.close(); - } - } - - @Test - public void testAssertQueryAgainstTenantSpecificViewGoesThroughIndex() throws Exception { - Connection conn = DriverManager.getConnection(getUrl(), new Properties()); - - // create table - conn.createStatement().execute("create table " - + "XYZ.ABC" - + " (organization_id char(15) not null, \n" - + " entity_id char(15) not null,\n" - + " a_string_array varchar(100) array[] not null,\n" - + " b_string varchar(100),\n" - + " a_string varchar,\n" - + " a_date date,\n" - + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id, a_string_array)\n" - + ")" + "MULTI_TENANT=true"); - - + public static final String SCHEMA_NAME = ""; + public static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T"); + + public QueryOptimizerTest() { + } + + @Test + public void testRVCUsingPkColsReturnedByPlanShouldUseIndex() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE T (k VARCHAR NOT NULL PRIMARY KEY, v1 CHAR(15), v2 VARCHAR)"); + conn.createStatement().execute("CREATE INDEX IDX ON T(v1, v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + String query = "select * from t where (v1, v2, k) > ('1', '2', '3')"; + QueryPlan plan = stmt.optimizeQuery(query); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testOrderByOptimizedOut() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE foo (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=true"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY k"); + assertEquals(OrderBy.FWD_ROW_KEY_ORDER_BY, plan.getOrderBy()); + } + + @Test + public void testOrderByDropped() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement().execute( + "CREATE TABLE foo (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=true"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY 'a','b','c'"); + assertTrue(plan.getOrderBy().getOrderByExpressions().isEmpty()); + } finally { + conn.close(); + } + } + + @Test + public void testOrderByNotDropped() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE foo (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=true"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY v"); + assertFalse(plan.getOrderBy().getOrderByExpressions().isEmpty()); + } + + @Test + public void testOrderByDroppedCompositeKey() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE foo (j INTEGER NOT NULL, k BIGINT NOT NULL, v VARCHAR CONSTRAINT pk PRIMARY KEY (j,k)) IMMUTABLE_ROWS=true"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY j,k"); + assertEquals(OrderBy.FWD_ROW_KEY_ORDER_BY, plan.getOrderBy()); + } + + @Test + public void testOrderByNotDroppedCompositeKey() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE foo (j INTEGER NOT NULL, k BIGINT NOT NULL, v VARCHAR CONSTRAINT pk PRIMARY KEY (j,k)) IMMUTABLE_ROWS=true"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT * FROM foo ORDER BY k,j"); + assertFalse(plan.getOrderBy().getOrderByExpressions().isEmpty()); + } + + @Test + public void testChooseIndexOverTable() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE v1 = 'bar'"); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseTableOverIndex() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT v1 FROM t WHERE k = 1"); + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseTableForSelection() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT v1,v2 FROM t WHERE v1 = 'bar'"); + // Choose T because v2 is not in index + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseTableForDynCols() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t(v3 VARCHAR) WHERE v1 = 'bar'"); + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseTableForSelectionStar() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT * FROM t WHERE v1 = 'bar'"); + // Choose T because v2 is not in index + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseIndexEvenWithSelectionStar() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1) INCLUDE (v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT * FROM t WHERE v1 = 'bar'"); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseIndexFromOrderBy() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k > 30 ORDER BY v1 LIMIT 5"); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChoosePointLookupOverOrderByRemoval() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k = 30 ORDER BY v1 LIMIT 5"); // Prefer + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseIndexFromOrderByDesc() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY DESC, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k > 30 ORDER BY v1, k DESC LIMIT 5"); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseTableFromOrderByAsc() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY DESC, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k > 30 ORDER BY v1, k LIMIT 5"); + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseIndexFromOrderByAsc() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY DESC, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1, k)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k > 30 ORDER BY v1, k LIMIT 5"); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChoosePointLookupOverOrderByDesc() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY DESC, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1, k)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k = 30 ORDER BY v1, k LIMIT 5"); + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseIndexWithLongestRowKey() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); + conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("IDX2", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testIgnoreIndexesBasedOnHint() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); + conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = + stmt.optimizeQuery("SELECT /*+NO_INDEX*/ k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseIndexFromHint() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); + conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = + stmt.optimizeQuery("SELECT /*+ INDEX(t idx1) */ k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("IDX1", plan.getTableRef().getTable().getTableName().getString()); + plan = stmt.optimizeQuery("SELECT k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("IDX2", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseIndexFromCaseSensitiveHint() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE \"t\" (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx1 ON \"t\"(v1) INCLUDE(v2)"); + conn.createStatement().execute("CREATE INDEX idx2 ON \"t\"(v1,v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery( + "SELECT /*+ INDEX(\"t\" idx1) */ k FROM \"t\" WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("IDX1", plan.getTableRef().getTable().getTableName().getString()); + plan = stmt.optimizeQuery("SELECT k FROM \"t\" WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("IDX2", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseIndexFromCaseSensitiveHint2() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE \"t\" (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX \"idx1\" ON \"t\"(v1) INCLUDE(v2)"); + conn.createStatement().execute("CREATE INDEX \"idx2\" ON \"t\"(v1,v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery( + "SELECT /*+ INDEX(\"t\" \"idx1\") */ k FROM \"t\" WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("idx1", plan.getTableRef().getTable().getTableName().getString()); + plan = stmt.optimizeQuery("SELECT k FROM \"t\" WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("idx2", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testChooseIndexFromDoubleQuotedHint() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); + conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery( + "SELECT /*+ INDEX(t \"IDX1\") INDEX(t idx3) */ k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("IDX1", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testIndexHintParsing() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx1 ON t(v1) INCLUDE(v2)"); + conn.createStatement().execute("CREATE INDEX idx2 ON t(v1,v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery( + "SELECT /*+ INDEX(t idx3 idx4 \"idx5\") INDEX(t idx6 idx1) */ k FROM t WHERE v1 = 'foo' AND v2 = 'bar'"); + assertEquals("IDX1", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testDataTableOverIndexHint() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1,v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = + stmt.optimizeQuery("SELECT /*+ " + Hint.USE_DATA_OVER_INDEX_TABLE + " */ * FROM t"); + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + // unhinted still uses index + plan = stmt.optimizeQuery("SELECT * FROM t"); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + // hinting with a WHERE clause still uses the index + plan = stmt.optimizeQuery( + "SELECT /*+ " + Hint.USE_DATA_OVER_INDEX_TABLE + " */ * FROM t WHERE v1 = 'foo'"); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + // Tests that a DELETE without a WHERE clause uses the data table (for parallel deletion on server + // side) + // DELETE with a WHERE clause should use the index on the client side + @Test + public void testDelete() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1,v2)"); + conn.setAutoCommit(true); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + SQLParser parser = new SQLParser("DELETE FROM t"); + DeleteStatement delete = (DeleteStatement) parser.parseStatement(); + DeleteCompiler compiler = new DeleteCompiler(stmt, null); + MutationPlan plan = compiler.compile(delete); + assertEquals("T", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); + assertEquals(plan.getClass(), DeleteCompiler.ServerSelectDeleteMutationPlan.class); + parser = new SQLParser("DELETE FROM t WHERE v1 = 'foo'"); + delete = (DeleteStatement) parser.parseStatement(); + plan = compiler.compile(delete); + assertEquals("IDX", plan.getQueryPlan().getTableRef().getTable().getTableName().getString()); + assertEquals(plan.getClass(), DeleteCompiler.ClientSelectDeleteMutationPlan.class); + } + + @Test + public void testChooseSmallerTable() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) IMMUTABLE_ROWS=true"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT count(*) FROM t"); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testRVCForTableWithSecondaryIndexBasic() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE T (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); + conn.createStatement().execute("CREATE INDEX IDX ON T(v1, v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + String query = "select * from t where (v1, v2) <= ('1', '2')"; + QueryPlan plan = stmt.optimizeQuery(query); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testRVCAllColsForTableWithSecondaryIndexBasic() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE T (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); + conn.createStatement().execute("CREATE INDEX IDX ON T(v1, v2)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + String query = "select * from t where (k, v1, v2) <= ('3', '1', '2')"; + QueryPlan plan = stmt.optimizeQuery(query); + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + // Multi-tenant = false; Query uses index = false; Salted = true + public void testAssertQueryPlanDetails1() throws Exception { + testAssertQueryPlanDetails(false, false, true); + } + + @Test + // Multi-tenant = true; Query uses index = false; Salted = true + public void testAssertQueryPlanDetails2() throws Exception { + testAssertQueryPlanDetails(true, false, true); + } + + @Test + // Multi-tenant = true; Query uses index = true; Salted = false + public void testAssertQueryPlanDetails3() throws Exception { + testAssertQueryPlanDetails(true, true, true); + } + + @Test + // Multi-tenant = false; Query uses index = true; Salted = true + public void testAssertQueryPlanDetails4() throws Exception { + testAssertQueryPlanDetails(false, true, true); + } + + @Test + // Multi-tenant = false; Query uses index = false; Salted = false + public void testAssertQueryPlanDetails5() throws Exception { + testAssertQueryPlanDetails(false, false, false); + } + + @Test + // Multi-tenant = true; Query uses index = false; Salted = false + public void testAssertQueryPlanDetails6() throws Exception { + testAssertQueryPlanDetails(true, false, false); + } + + @Test + // Multi-tenant = true; Query uses index = true; Salted = false + public void testAssertQueryPlanDetails7() throws Exception { + testAssertQueryPlanDetails(true, true, false); + } + + @Test + // Multi-tenant = false; Query uses index = true; Salted = false + public void testAssertQueryPlanDetails8() throws Exception { + testAssertQueryPlanDetails(false, true, false); + } + + @Test + public void testQueryOptimizerShouldSelectThePlanWithMoreNumberOfPKColumns() throws Exception { + Connection conn1 = DriverManager.getConnection(getUrl()); + Connection conn2 = DriverManager.getConnection(getUrl()); + conn1.createStatement().execute( + "create table index_test_table (a varchar not null,b varchar not null,c varchar not null,d varchar,e varchar, f varchar constraint pk primary key(a,b,c))"); + conn1.createStatement() + .execute("create index INDEX_TEST_TABLE_INDEX_D on INDEX_TEST_TABLE(A,D) include(B,C,E,F)"); + conn1.createStatement() + .execute("create index INDEX_TEST_TABLE_INDEX_F on INDEX_TEST_TABLE(A,F) include(B,C,D,E)"); + ResultSet rs = conn2.createStatement().executeQuery( + "explain select * from INDEX_TEST_TABLE where A in ('1','2','3','4','5') and F in ('1111','2222','3333')"); + assertEquals( + "CLIENT PARALLEL 1-WAY SKIP SCAN ON 15 KEYS OVER INDEX_TEST_TABLE_INDEX_F ['1','1111'] - ['5','3333']", + QueryUtil.getExplainPlan(rs)); + } + + @Test + public void testCharArrayLength() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE TEST.TEST (testInt INTEGER, testCharArray CHAR(3)[], testByteArray BINARY(7)[], " + + "CONSTRAINT test_pk PRIMARY KEY(testInt)) DEFAULT_COLUMN_FAMILY='T'"); + conn.createStatement().execute( + "CREATE INDEX TEST_INDEX ON TEST.TEST (testInt) INCLUDE (testCharArray, testByteArray)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + + QueryPlan plan = stmt.optimizeQuery( + "SELECT /*+ INDEX(TEST.TEST TEST_INDEX)*/ testCharArray,testByteArray FROM TEST.TEST"); + List columns = plan.getTableRef().getTable().getColumns(); + assertEquals(3, columns.size()); + assertEquals(3, columns.get(1).getMaxLength().intValue()); + assertEquals(7, columns.get(2).getMaxLength().intValue()); + } + + private void testAssertQueryPlanDetails(boolean multitenant, boolean useIndex, boolean salted) + throws Exception { + String sql; + PreparedStatement stmt; + Connection conn = DriverManager.getConnection(getUrl(), new Properties()); + try { + // create table + conn.createStatement() + .execute("create table " + "XYZ.ABC" + " (organization_id char(15) not null, \n" + + " \"DEC\" DECIMAL(10,2) not null,\n" + + " a_string_array varchar(100) array[] not null,\n" + " b_string varchar(100),\n" + + " CF.a_integer integer,\n" + " a_date date,\n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, \"DEC\", a_string_array)\n" + ")" + + (salted ? "SALT_BUCKETS=4" : "") + + (multitenant == true ? (salted ? ",MULTI_TENANT=true" : "MULTI_TENANT=true") : "")); + + if (useIndex) { // create index - conn.createStatement().execute("CREATE INDEX ABC_IDX ON XYZ.ABC (a_string) INCLUDE (a_date)"); - - conn.close(); - - // switch to a tenant specific connection - conn = DriverManager.getConnection(getUrl("tenantId")); - - // create a tenant specific view - conn.createStatement().execute("CREATE VIEW ABC_VIEW AS SELECT * FROM XYZ.ABC"); - - // query against the tenant specific view - String sql = "SELECT a_date FROM ABC_VIEW where a_string = ?"; - PreparedStatement stmt = conn.prepareStatement(sql); - stmt.setString(1, "1000"); - QueryPlan plan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery(); - assertEquals("Query should use index", PTableType.INDEX, plan.getTableRef().getTable().getType()); - } - - @Test - @Ignore("PHOENIX-4555 should mark these views as ViewType.READONLY") - public void testAssertQueryAgainstTenantSpecificViewDoesNotGoThroughIndex() throws Exception { - Connection conn = DriverManager.getConnection(getUrl(), new Properties()); - - // create table - conn.createStatement().execute("create table " - + "XYZ.ABC" - + " (organization_id char(15) not null, \n" - + " entity_id char(15) not null,\n" - + " a_string_array varchar(100) array[] not null,\n" - + " b_string varchar(100),\n" - + " a_string varchar,\n" - + " a_date date,\n" - + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id, a_string_array)\n" - + ")" + "MULTI_TENANT=true"); - - - // create index - conn.createStatement().execute("CREATE INDEX ABC_IDX ON XYZ.ABC (a_string) INCLUDE (a_date)"); - - conn.close(); - - // switch to a tenant specific connection - conn = DriverManager.getConnection(getUrl("tenantId")); - - // create a tenant specific view - conn.createStatement().execute("CREATE VIEW ABC_VIEW AS SELECT * FROM XYZ.ABC where b_string='foo'"); - - // query against the tenant specific view - String sql = "SELECT a_date FROM ABC_VIEW where a_string = ?"; - PreparedStatement stmt = conn.prepareStatement(sql); - stmt.setString(1, "1000"); - QueryPlan plan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery(); - // should not use index as index does not contain b_string - assertEquals("Query should not use index", PTableType.VIEW, plan.getTableRef().getTable().getType()); - } - - @Test - public void testDistinctPrefixOnVarcharIndex() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT COUNT(DISTINCT v1) FROM t"); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertFalse(plan.getGroupBy().getKeyExpressions().isEmpty()); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testDistinctPrefixOnIntIndex() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 INTEGER, v2 VARCHAR)"); - conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT COUNT(DISTINCT v1) FROM t"); - assertTrue(plan.getGroupBy().isOrderPreserving()); - assertFalse(plan.getGroupBy().getKeyExpressions().isEmpty()); - assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testTableUsedWithQueryMore() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 CHAR(3) NOT NULL, k2 CHAR(15) NOT NULL, k3 DATE NOT NULL, k4 CHAR(15) NOT NULL, CONSTRAINT pk PRIMARY KEY (k1,k2,k3,k4))"); - conn.createStatement().execute("CREATE INDEX idx ON t(k1,k3,k2,k4)"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("SELECT * FROM t WHERE (k1,k2,k3,k4) > ('001','001xx000003DHml',to_date('2015-10-21 09:50:55.0'),'017xx0000022FuI')"); - assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); - } - - @Test - public void testViewUsedWithQueryMoreSalted() throws Exception { - testViewUsedWithQueryMore(3); - } - - @Test - public void testViewUsedWithQueryMoreUnsalted() throws Exception { - testViewUsedWithQueryMore(null); - } - - private void testViewUsedWithQueryMore(Integer saltBuckets) throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - int offset = saltBuckets == null ? 0 : 1; - conn.createStatement().execute("CREATE TABLE MY_TABLES.MY_TABLE " - + "(ORGANIZATION_ID CHAR(15) NOT NULL, " - + "PKCOL1 CHAR(15) NOT NULL," - + "PKCOL2 CHAR(15) NOT NULL," - + "PKCOL3 CHAR(15) NOT NULL," - + "PKCOL4 CHAR(15) NOT NULL,COL1 " - + "CHAR(15)," - + "COL2 CHAR(15)" - + "CONSTRAINT PK PRIMARY KEY (ORGANIZATION_ID,PKCOL1,PKCOL2,PKCOL3,PKCOL4)) MULTI_TENANT=true" + (saltBuckets == null ? "" : (",SALT_BUCKETS=" + saltBuckets))); - conn.createStatement().execute("CREATE INDEX MY_TABLE_INDEX \n" + - "ON MY_TABLES.MY_TABLE (PKCOL1, PKCOL3, PKCOL2, PKCOL4)\n" + - "INCLUDE (COL1, COL2)"); - Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "000000000000000"); - Connection tsconn = DriverManager.getConnection(getUrl(), props); - tsconn.createStatement().execute("CREATE VIEW MY_TABLE_MT_VIEW AS SELECT * FROM MY_TABLES.MY_TABLE"); - PhoenixStatement stmt = tsconn.createStatement().unwrap(PhoenixStatement.class); - QueryPlan plan = stmt.optimizeQuery("select * from my_table_mt_view where (pkcol1, pkcol2, pkcol3, pkcol4) > ('0', '0', '0', '0')"); - assertEquals("MY_TABLE_MT_VIEW", plan.getTableRef().getTable().getTableName().getString()); - - plan = stmt.compileQuery("select * from my_table_mt_view where (pkcol1, pkcol2) > ('0', '0') and pkcol3 = '000000000000000' and pkcol4 = '000000000000000'"); - assertEquals(3 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); - plan = stmt.compileQuery("select * from my_table_mt_view where (pkcol3, pkcol4) > ('0', '0') and pkcol1 = '000000000000000'"); - assertEquals(2 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); - plan = stmt.compileQuery("select * from my_table_mt_view where (pkcol1, pkcol2, pkcol3) < ('0', '0', '0')"); - assertEquals(4 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); - plan = stmt.compileQuery("select * from my_table_mt_view where (pkcol1, pkcol2, pkcol3) < ('9', '9', '9') and (pkcol1, pkcol2) > ('0', '0')"); - assertEquals(4 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); - plan = stmt.compileQuery("select * from my_table_mt_view where pkcol1 = 'a' and pkcol2 = 'b' and pkcol3 = 'c' and (pkcol1, pkcol2) < ('z', 'z')"); - assertEquals(4 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); - plan = stmt.compileQuery("select * from my_table_mt_view where (pkcol2, pkcol3) > ('0', '0') and pkcol1 = '000000000000000'"); - assertEquals(4 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); - } - - private void assertPlanDetails(PreparedStatement stmt, String expectedPkCols, String expectedPkColsDataTypes, boolean expectedHasOrderBy, int expectedLimit) throws SQLException { - Connection conn = stmt.getConnection(); - QueryPlan plan = PhoenixRuntime.getOptimizedQueryPlan(stmt); - - List> columns = PhoenixRuntime.getPkColsForSql(conn, plan); - assertEquals(expectedPkCols, Joiner.on(",").join(getColumnNames(columns))); - List dataTypes = new ArrayList(); - columns = new ArrayList>(); - PhoenixRuntime.getPkColsDataTypesForSql(columns, dataTypes, plan, conn, true); - - assertEquals(expectedPkColsDataTypes, appendColNamesDataTypes(columns, dataTypes)); - assertEquals(expectedHasOrderBy, PhoenixRuntime.hasOrderBy(plan)); - assertEquals(expectedLimit, PhoenixRuntime.getLimit(plan)); - } - - private static List getColumnNames(List> columns) { - List columnNames = new ArrayList(columns.size()); - for (Pair col : columns) { - String familyName = col.getFirst(); - String columnName = col.getSecond(); - if (familyName != null) { - columnName = familyName + QueryConstants.NAME_SEPARATOR + columnName; - } - columnNames.add(columnName); - } - return columnNames; - } - - private String addQuotes(String familyName, String columnNames) { - Iterable columnNamesList = Splitter.on(",").split(columnNames); - List quotedColumnNames = new ArrayList(); - for (String columnName : columnNamesList) { - String quotedColumnName = SchemaUtil.getQuotedFullColumnName(familyName, columnName); - quotedColumnNames.add(quotedColumnName); - } - return Joiner.on(",").join(quotedColumnNames); - } - - private String appendColNamesDataTypes(List> columns, List dataTypes) { - int size = columns.size(); - assertEquals(size, dataTypes.size()); // they will be equal, but what the heck? - List pkColsDataTypes = new ArrayList(size); - for (int i = 0; i < size; i++) { - String familyName = columns.get(i).getFirst(); - String columnName = columns.get(i).getSecond(); - if (familyName != null) { - columnName = familyName + QueryConstants.NAME_SEPARATOR + columnName; - } - pkColsDataTypes.add(columnName + " " + dataTypes.get(i)); - } - return Joiner.on(",").join(pkColsDataTypes); - } - - @Test - public void testMinMaxQualifierRangeWithOrderByOnKVColumn() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - String tableName = "testMintestMinMaxQualifierRange".toUpperCase(); - conn.createStatement().execute("CREATE TABLE " + tableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 INTEGER, v2 VARCHAR) COLUMN_ENCODED_BYTES=4"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - ResultSet rs = stmt.executeQuery("SELECT K from " + tableName + " ORDER BY (v1)"); - assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE); - rs = stmt.executeQuery("SELECT K from " + tableName + " ORDER BY (v1, v2)"); - assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1); - rs = stmt.executeQuery("SELECT V2 from " + tableName + " ORDER BY (v1)"); - assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1); - rs = stmt.executeQuery("SELECT V1 from " + tableName + " ORDER BY (v1, v2)"); - assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1); - } - - @Test - public void testMinMaxQualifierRangeWithNoOrderBy() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - String tableName = "testMintestMinMaxQualifierRange".toUpperCase(); - conn.createStatement().execute("CREATE TABLE " + tableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 INTEGER, v2 VARCHAR) COLUMN_ENCODED_BYTES=4"); - PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); - ResultSet rs = stmt.executeQuery("SELECT K from " + tableName); - assertQualifierRangesNotPresent(rs); - rs = stmt.executeQuery("SELECT V2 from " + tableName); - assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1); - rs = stmt.executeQuery("SELECT V1 from " + tableName); - assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE); - } - - private static void assertQualifierRanges(ResultSet rs, int minQualifier, int maxQualifier) throws SQLException { - Scan scan = rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan().getContext().getScan(); - assertNotNull(scan.getAttribute(MIN_QUALIFIER)); - assertNotNull(scan.getAttribute(MAX_QUALIFIER)); - assertEquals(minQualifier, Bytes.toInt(scan.getAttribute(MIN_QUALIFIER))); - assertEquals(maxQualifier, Bytes.toInt(scan.getAttribute(MAX_QUALIFIER))); - } - - private static void assertQualifierRangesNotPresent(ResultSet rs) throws SQLException { - Scan scan = rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan().getContext().getScan(); - assertNull(scan.getAttribute(MIN_QUALIFIER)); - assertNull(scan.getAttribute(MAX_QUALIFIER)); - } -} \ No newline at end of file + conn.createStatement() + .execute("CREATE INDEX ABC_IDX ON XYZ.ABC (CF.a_integer) INCLUDE (a_date)"); + } + + // switch to a tenant specific connection if multi-tenant. + conn = multitenant ? DriverManager.getConnection(getUrl("tenantId")) : conn; + + // create a tenant specific view if multi-tenant + if (multitenant) { + conn.createStatement() + .execute("CREATE VIEW ABC_VIEW (ORG_ID VARCHAR) AS SELECT * FROM XYZ.ABC"); + } + + String expectedColNames = multitenant + ? addQuotes(null, "DEC,A_STRING_ARRAY") + : addQuotes(null, "ORGANIZATION_ID,DEC,A_STRING_ARRAY"); + String expectedColumnNameDataTypes = multitenant + ? "\"DEC\" DECIMAL(10,2),\"A_STRING_ARRAY\" VARCHAR(100) ARRAY" + : "\"ORGANIZATION_ID\" CHAR(15),\"DEC\" DECIMAL(10,2),\"A_STRING_ARRAY\" VARCHAR(100) ARRAY"; + String tableName = multitenant ? "ABC_VIEW" : "XYZ.ABC"; + String tenantFilter = multitenant ? "" : "organization_id = ? AND "; + String orderByRowKeyClause = multitenant ? "DEC" : "organization_id"; + + // Filter on row key columns of data table. No order by. No limit. + sql = "SELECT CF.a_integer FROM " + tableName + " where " + tenantFilter + + " \"DEC\" = ? and a_string_array = ?"; + stmt = conn.prepareStatement(sql); + int counter = 1; + if (!multitenant) { + stmt.setString(counter++, "ORGID"); + } + stmt.setDouble(counter++, 1.23); + String[] strArray = new String[2]; + strArray[0] = "AB"; + strArray[1] = "CD"; + Array array = conn.createArrayOf("VARCHAR", strArray); + stmt.setArray(counter++, array); + assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, false, 0); + + counter = 1; + // Filter on row key columns of data table. Order by row key columns. Limit specified. + sql = "SELECT CF.a_integer FROM " + tableName + " where " + tenantFilter + + " \"DEC\" = ? and a_string_array = ? ORDER BY " + orderByRowKeyClause + " LIMIT 100"; + stmt = conn.prepareStatement(sql); + if (!multitenant) { + stmt.setString(counter++, "ORGID"); + } + stmt.setDouble(counter++, 1.23); + array = conn.createArrayOf("VARCHAR", strArray); + stmt.setArray(counter++, array); + assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, false, 100); + + counter = 1; + // Filter on row key columns of data table. Order by non-row key columns. Limit specified. + sql = "SELECT CF.a_integer FROM " + tableName + " where " + tenantFilter + + " \"DEC\" = ? and a_string_array = ? ORDER BY a_date LIMIT 100"; + stmt = conn.prepareStatement(sql); + if (!multitenant) { + stmt.setString(counter++, "ORGID"); + } + stmt.setDouble(counter++, 1.23); + array = conn.createArrayOf("VARCHAR", strArray); + stmt.setArray(counter++, array); + assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, true, 100); + + if (useIndex) { + + expectedColNames = multitenant + ? ("\"CF\".\"A_INTEGER\"" + ",\"DEC\"" + ",\"A_STRING_ARRAY\"") + : ("\"CF\".\"A_INTEGER\"" + ",\"ORGANIZATION_ID\"" + ",\"DEC\"" + ",\"A_STRING_ARRAY\""); + expectedColumnNameDataTypes = multitenant + ? ("\"CF\".\"A_INTEGER\"" + " " + "INTEGER" + ",\"DEC\"" + " " + "DECIMAL(10,2)" + + ",\"A_STRING_ARRAY\"" + " " + "VARCHAR(100) ARRAY") + : ("\"CF\".\"A_INTEGER\"" + " " + "INTEGER" + ",\"ORGANIZATION_ID\"" + " " + "CHAR(15)" + + ",\"DEC\"" + " " + "DECIMAL(10,2)" + ",\"A_STRING_ARRAY\"" + " " + + "VARCHAR(100) ARRAY"); + + // Filter on columns that the secondary index is on. No order by. No limit. + sql = "SELECT a_date FROM " + tableName + " where CF.a_integer = ?"; + stmt = conn.prepareStatement(sql); + stmt.setInt(1, 1000); + assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, false, 0); + + // Filter on columns that the secondary index is on. Order by on the indexed column. Limit + // specified. + sql = "SELECT a_date FROM " + tableName + + " where CF.a_integer = ? ORDER BY CF.a_integer LIMIT 100"; + stmt = conn.prepareStatement(sql); + stmt.setInt(1, 1000); + assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, false, 100); + + // Filter on columns that the secondary index is on. Order by on the non-indexed column. + // Limit specified. + sql = "SELECT a_integer FROM " + tableName + + " where CF.a_integer = ? and a_date = ? ORDER BY a_date LIMIT 100"; + stmt = conn.prepareStatement(sql); + stmt.setInt(1, 1000); + stmt.setDate(2, new Date(909000)); + assertPlanDetails(stmt, expectedColNames, expectedColumnNameDataTypes, true, 100); + } + } finally { + conn.close(); + } + } + + @Test + public void testAssertQueryAgainstTenantSpecificViewGoesThroughIndex() throws Exception { + Connection conn = DriverManager.getConnection(getUrl(), new Properties()); + + // create table + conn.createStatement() + .execute("create table " + "XYZ.ABC" + " (organization_id char(15) not null, \n" + + " entity_id char(15) not null,\n" + + " a_string_array varchar(100) array[] not null,\n" + " b_string varchar(100),\n" + + " a_string varchar,\n" + " a_date date,\n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id, a_string_array)\n" + ")" + + "MULTI_TENANT=true"); + + // create index + conn.createStatement().execute("CREATE INDEX ABC_IDX ON XYZ.ABC (a_string) INCLUDE (a_date)"); + + conn.close(); + + // switch to a tenant specific connection + conn = DriverManager.getConnection(getUrl("tenantId")); + + // create a tenant specific view + conn.createStatement().execute("CREATE VIEW ABC_VIEW AS SELECT * FROM XYZ.ABC"); + + // query against the tenant specific view + String sql = "SELECT a_date FROM ABC_VIEW where a_string = ?"; + PreparedStatement stmt = conn.prepareStatement(sql); + stmt.setString(1, "1000"); + QueryPlan plan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery(); + assertEquals("Query should use index", PTableType.INDEX, + plan.getTableRef().getTable().getType()); + } + + @Test + @Ignore("PHOENIX-4555 should mark these views as ViewType.READONLY") + public void testAssertQueryAgainstTenantSpecificViewDoesNotGoThroughIndex() throws Exception { + Connection conn = DriverManager.getConnection(getUrl(), new Properties()); + + // create table + conn.createStatement() + .execute("create table " + "XYZ.ABC" + " (organization_id char(15) not null, \n" + + " entity_id char(15) not null,\n" + + " a_string_array varchar(100) array[] not null,\n" + " b_string varchar(100),\n" + + " a_string varchar,\n" + " a_date date,\n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id, a_string_array)\n" + ")" + + "MULTI_TENANT=true"); + + // create index + conn.createStatement().execute("CREATE INDEX ABC_IDX ON XYZ.ABC (a_string) INCLUDE (a_date)"); + + conn.close(); + + // switch to a tenant specific connection + conn = DriverManager.getConnection(getUrl("tenantId")); + + // create a tenant specific view + conn.createStatement() + .execute("CREATE VIEW ABC_VIEW AS SELECT * FROM XYZ.ABC where b_string='foo'"); + + // query against the tenant specific view + String sql = "SELECT a_date FROM ABC_VIEW where a_string = ?"; + PreparedStatement stmt = conn.prepareStatement(sql); + stmt.setString(1, "1000"); + QueryPlan plan = stmt.unwrap(PhoenixPreparedStatement.class).optimizeQuery(); + // should not use index as index does not contain b_string + assertEquals("Query should not use index", PTableType.VIEW, + plan.getTableRef().getTable().getType()); + } + + @Test + public void testDistinctPrefixOnVarcharIndex() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT COUNT(DISTINCT v1) FROM t"); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertFalse(plan.getGroupBy().getKeyExpressions().isEmpty()); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testDistinctPrefixOnIntIndex() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 INTEGER, v2 VARCHAR)"); + conn.createStatement().execute("CREATE INDEX idx ON t(v1)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery("SELECT COUNT(DISTINCT v1) FROM t"); + assertTrue(plan.getGroupBy().isOrderPreserving()); + assertFalse(plan.getGroupBy().getKeyExpressions().isEmpty()); + assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testTableUsedWithQueryMore() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 CHAR(3) NOT NULL, k2 CHAR(15) NOT NULL, k3 DATE NOT NULL, k4 CHAR(15) NOT NULL, CONSTRAINT pk PRIMARY KEY (k1,k2,k3,k4))"); + conn.createStatement().execute("CREATE INDEX idx ON t(k1,k3,k2,k4)"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery( + "SELECT * FROM t WHERE (k1,k2,k3,k4) > ('001','001xx000003DHml',to_date('2015-10-21 09:50:55.0'),'017xx0000022FuI')"); + assertEquals("T", plan.getTableRef().getTable().getTableName().getString()); + } + + @Test + public void testViewUsedWithQueryMoreSalted() throws Exception { + testViewUsedWithQueryMore(3); + } + + @Test + public void testViewUsedWithQueryMoreUnsalted() throws Exception { + testViewUsedWithQueryMore(null); + } + + private void testViewUsedWithQueryMore(Integer saltBuckets) throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + int offset = saltBuckets == null ? 0 : 1; + conn.createStatement().execute("CREATE TABLE MY_TABLES.MY_TABLE " + + "(ORGANIZATION_ID CHAR(15) NOT NULL, " + "PKCOL1 CHAR(15) NOT NULL," + + "PKCOL2 CHAR(15) NOT NULL," + "PKCOL3 CHAR(15) NOT NULL," + "PKCOL4 CHAR(15) NOT NULL,COL1 " + + "CHAR(15)," + "COL2 CHAR(15)" + + "CONSTRAINT PK PRIMARY KEY (ORGANIZATION_ID,PKCOL1,PKCOL2,PKCOL3,PKCOL4)) MULTI_TENANT=true" + + (saltBuckets == null ? "" : (",SALT_BUCKETS=" + saltBuckets))); + conn.createStatement().execute("CREATE INDEX MY_TABLE_INDEX \n" + + "ON MY_TABLES.MY_TABLE (PKCOL1, PKCOL3, PKCOL2, PKCOL4)\n" + "INCLUDE (COL1, COL2)"); + Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "000000000000000"); + Connection tsconn = DriverManager.getConnection(getUrl(), props); + tsconn.createStatement() + .execute("CREATE VIEW MY_TABLE_MT_VIEW AS SELECT * FROM MY_TABLES.MY_TABLE"); + PhoenixStatement stmt = tsconn.createStatement().unwrap(PhoenixStatement.class); + QueryPlan plan = stmt.optimizeQuery( + "select * from my_table_mt_view where (pkcol1, pkcol2, pkcol3, pkcol4) > ('0', '0', '0', '0')"); + assertEquals("MY_TABLE_MT_VIEW", plan.getTableRef().getTable().getTableName().getString()); + + plan = stmt.compileQuery( + "select * from my_table_mt_view where (pkcol1, pkcol2) > ('0', '0') and pkcol3 = '000000000000000' and pkcol4 = '000000000000000'"); + assertEquals(3 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); + plan = stmt.compileQuery( + "select * from my_table_mt_view where (pkcol3, pkcol4) > ('0', '0') and pkcol1 = '000000000000000'"); + assertEquals(2 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); + plan = stmt.compileQuery( + "select * from my_table_mt_view where (pkcol1, pkcol2, pkcol3) < ('0', '0', '0')"); + assertEquals(4 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); + plan = stmt.compileQuery( + "select * from my_table_mt_view where (pkcol1, pkcol2, pkcol3) < ('9', '9', '9') and (pkcol1, pkcol2) > ('0', '0')"); + assertEquals(4 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); + plan = stmt.compileQuery( + "select * from my_table_mt_view where pkcol1 = 'a' and pkcol2 = 'b' and pkcol3 = 'c' and (pkcol1, pkcol2) < ('z', 'z')"); + assertEquals(4 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); + plan = stmt.compileQuery( + "select * from my_table_mt_view where (pkcol2, pkcol3) > ('0', '0') and pkcol1 = '000000000000000'"); + assertEquals(4 + offset, plan.getContext().getScanRanges().getBoundPkColumnCount()); + } + + private void assertPlanDetails(PreparedStatement stmt, String expectedPkCols, + String expectedPkColsDataTypes, boolean expectedHasOrderBy, int expectedLimit) + throws SQLException { + Connection conn = stmt.getConnection(); + QueryPlan plan = PhoenixRuntime.getOptimizedQueryPlan(stmt); + + List> columns = PhoenixRuntime.getPkColsForSql(conn, plan); + assertEquals(expectedPkCols, Joiner.on(",").join(getColumnNames(columns))); + List dataTypes = new ArrayList(); + columns = new ArrayList>(); + PhoenixRuntime.getPkColsDataTypesForSql(columns, dataTypes, plan, conn, true); + + assertEquals(expectedPkColsDataTypes, appendColNamesDataTypes(columns, dataTypes)); + assertEquals(expectedHasOrderBy, PhoenixRuntime.hasOrderBy(plan)); + assertEquals(expectedLimit, PhoenixRuntime.getLimit(plan)); + } + + private static List getColumnNames(List> columns) { + List columnNames = new ArrayList(columns.size()); + for (Pair col : columns) { + String familyName = col.getFirst(); + String columnName = col.getSecond(); + if (familyName != null) { + columnName = familyName + QueryConstants.NAME_SEPARATOR + columnName; + } + columnNames.add(columnName); + } + return columnNames; + } + + private String addQuotes(String familyName, String columnNames) { + Iterable columnNamesList = Splitter.on(",").split(columnNames); + List quotedColumnNames = new ArrayList(); + for (String columnName : columnNamesList) { + String quotedColumnName = SchemaUtil.getQuotedFullColumnName(familyName, columnName); + quotedColumnNames.add(quotedColumnName); + } + return Joiner.on(",").join(quotedColumnNames); + } + + private String appendColNamesDataTypes(List> columns, + List dataTypes) { + int size = columns.size(); + assertEquals(size, dataTypes.size()); // they will be equal, but what the heck? + List pkColsDataTypes = new ArrayList(size); + for (int i = 0; i < size; i++) { + String familyName = columns.get(i).getFirst(); + String columnName = columns.get(i).getSecond(); + if (familyName != null) { + columnName = familyName + QueryConstants.NAME_SEPARATOR + columnName; + } + pkColsDataTypes.add(columnName + " " + dataTypes.get(i)); + } + return Joiner.on(",").join(pkColsDataTypes); + } + + @Test + public void testMinMaxQualifierRangeWithOrderByOnKVColumn() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + String tableName = "testMintestMinMaxQualifierRange".toUpperCase(); + conn.createStatement().execute("CREATE TABLE " + tableName + + " (k INTEGER NOT NULL PRIMARY KEY, v1 INTEGER, v2 VARCHAR) COLUMN_ENCODED_BYTES=4"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + ResultSet rs = stmt.executeQuery("SELECT K from " + tableName + " ORDER BY (v1)"); + assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE); + rs = stmt.executeQuery("SELECT K from " + tableName + " ORDER BY (v1, v2)"); + assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1); + rs = stmt.executeQuery("SELECT V2 from " + tableName + " ORDER BY (v1)"); + assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1); + rs = stmt.executeQuery("SELECT V1 from " + tableName + " ORDER BY (v1, v2)"); + assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1); + } + + @Test + public void testMinMaxQualifierRangeWithNoOrderBy() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + String tableName = "testMintestMinMaxQualifierRange".toUpperCase(); + conn.createStatement().execute("CREATE TABLE " + tableName + + " (k INTEGER NOT NULL PRIMARY KEY, v1 INTEGER, v2 VARCHAR) COLUMN_ENCODED_BYTES=4"); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + ResultSet rs = stmt.executeQuery("SELECT K from " + tableName); + assertQualifierRangesNotPresent(rs); + rs = stmt.executeQuery("SELECT V2 from " + tableName); + assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1); + rs = stmt.executeQuery("SELECT V1 from " + tableName); + assertQualifierRanges(rs, ENCODED_EMPTY_COLUMN_NAME, ENCODED_CQ_COUNTER_INITIAL_VALUE); + } + + private static void assertQualifierRanges(ResultSet rs, int minQualifier, int maxQualifier) + throws SQLException { + Scan scan = + rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan().getContext().getScan(); + assertNotNull(scan.getAttribute(MIN_QUALIFIER)); + assertNotNull(scan.getAttribute(MAX_QUALIFIER)); + assertEquals(minQualifier, Bytes.toInt(scan.getAttribute(MIN_QUALIFIER))); + assertEquals(maxQualifier, Bytes.toInt(scan.getAttribute(MAX_QUALIFIER))); + } + + private static void assertQualifierRangesNotPresent(ResultSet rs) throws SQLException { + Scan scan = + rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan().getContext().getScan(); + assertNull(scan.getAttribute(MIN_QUALIFIER)); + assertNull(scan.getAttribute(MAX_QUALIFIER)); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/RVCOffsetCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/RVCOffsetCompilerTest.java index 09df67a386c..f46a3af583a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/RVCOffsetCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/RVCOffsetCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,199 +41,190 @@ import org.apache.phoenix.schema.PName; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PDecimal; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - public class RVCOffsetCompilerTest { - private static TableName TABLE_NAME = TableName.create(null,"TABLE1"); - - - RVCOffsetCompiler offsetCompiler; - - @Before - public void init(){ - offsetCompiler = RVCOffsetCompiler.getInstance(); - } + private static TableName TABLE_NAME = TableName.create(null, "TABLE1"); - @Test - public void buildListOfColumnParseNodesTest() throws Exception { - List children = new ArrayList<>(); - ColumnParseNode col1 = new ColumnParseNode(TABLE_NAME,"col1"); - ColumnParseNode col2 = new ColumnParseNode(TABLE_NAME,"col2"); + RVCOffsetCompiler offsetCompiler; - children.add(col1); - children.add(col2); - RowValueConstructorParseNode rvc = new RowValueConstructorParseNode(children); + @Before + public void init() { + offsetCompiler = RVCOffsetCompiler.getInstance(); + } - List - result = - offsetCompiler.buildListOfColumnParseNodes(rvc, true); + @Test + public void buildListOfColumnParseNodesTest() throws Exception { + List children = new ArrayList<>(); + ColumnParseNode col1 = new ColumnParseNode(TABLE_NAME, "col1"); + ColumnParseNode col2 = new ColumnParseNode(TABLE_NAME, "col2"); - assertEquals(2,result.size()); - assertEquals(col1,result.get(0)); - assertEquals(col2,result.get(1)); - } + children.add(col1); + children.add(col2); + RowValueConstructorParseNode rvc = new RowValueConstructorParseNode(children); - @Test - public void buildListOfColumnParseNodesTestIndex() throws Exception { - List children = new ArrayList<>(); - ColumnParseNode col1 = new ColumnParseNode(TABLE_NAME,"col1"); - ColumnParseNode col2 = new ColumnParseNode(TABLE_NAME,"col2"); + List result = offsetCompiler.buildListOfColumnParseNodes(rvc, true); - ParseNodeFactory factory = new ParseNodeFactory(); + assertEquals(2, result.size()); + assertEquals(col1, result.get(0)); + assertEquals(col2, result.get(1)); + } - children.add(factory.cast(col1, PDecimal.INSTANCE, null, null,false)); - children.add(factory.cast(col2, PDecimal.INSTANCE, null, null,false)); + @Test + public void buildListOfColumnParseNodesTestIndex() throws Exception { + List children = new ArrayList<>(); + ColumnParseNode col1 = new ColumnParseNode(TABLE_NAME, "col1"); + ColumnParseNode col2 = new ColumnParseNode(TABLE_NAME, "col2"); - RowValueConstructorParseNode rvc = new RowValueConstructorParseNode(children); + ParseNodeFactory factory = new ParseNodeFactory(); - List - result = - offsetCompiler.buildListOfColumnParseNodes(rvc, true); + children.add(factory.cast(col1, PDecimal.INSTANCE, null, null, false)); + children.add(factory.cast(col2, PDecimal.INSTANCE, null, null, false)); - assertEquals(2,result.size()); - assertEquals(col1,result.get(0)); - assertEquals(col2,result.get(1)); - } + RowValueConstructorParseNode rvc = new RowValueConstructorParseNode(children); + List result = offsetCompiler.buildListOfColumnParseNodes(rvc, true); - @Test - public void buildListOfRowKeyColumnExpressionsTest() throws Exception { - List expressions = new ArrayList<>(); + assertEquals(2, result.size()); + assertEquals(col1, result.get(0)); + assertEquals(col2, result.get(1)); + } - RowKeyColumnExpression rvc1 = new RowKeyColumnExpression(); - RowKeyColumnExpression rvc2 = new RowKeyColumnExpression(); + @Test + public void buildListOfRowKeyColumnExpressionsTest() throws Exception { + List expressions = new ArrayList<>(); - ComparisonExpression expression1 = mock(ComparisonExpression.class); - ComparisonExpression expression2 = mock(ComparisonExpression.class); + RowKeyColumnExpression rvc1 = new RowKeyColumnExpression(); + RowKeyColumnExpression rvc2 = new RowKeyColumnExpression(); - Mockito.when(expression1.getChildren()).thenReturn(Lists.newArrayList(rvc1)); - Mockito.when(expression2.getChildren()).thenReturn(Lists.newArrayList(rvc2)); + ComparisonExpression expression1 = mock(ComparisonExpression.class); + ComparisonExpression expression2 = mock(ComparisonExpression.class); - expressions.add(expression1); - expressions.add(expression2); + Mockito.when(expression1.getChildren()).thenReturn(Lists. newArrayList(rvc1)); + Mockito.when(expression2.getChildren()).thenReturn(Lists. newArrayList(rvc2)); - AndExpression expression = mock(AndExpression.class); - Mockito.when(expression.getChildren()).thenReturn(expressions); + expressions.add(expression1); + expressions.add(expression2); - RVCOffsetCompiler.RowKeyColumnExpressionOutput - output = offsetCompiler.buildListOfRowKeyColumnExpressions(expression, false); - List - result = output.getRowKeyColumnExpressions(); + AndExpression expression = mock(AndExpression.class); + Mockito.when(expression.getChildren()).thenReturn(expressions); - assertEquals(2,result.size()); - assertEquals(rvc1,result.get(0)); + RVCOffsetCompiler.RowKeyColumnExpressionOutput output = + offsetCompiler.buildListOfRowKeyColumnExpressions(expression, false); + List result = output.getRowKeyColumnExpressions(); - assertEquals(rvc2,result.get(1)); - } + assertEquals(2, result.size()); + assertEquals(rvc1, result.get(0)); - @Test - public void buildListOfRowKeyColumnExpressionsIndexTest() throws Exception { - List expressions = new ArrayList<>(); + assertEquals(rvc2, result.get(1)); + } - PColumn - column = new PColumnImpl(PName.EMPTY_COLUMN_NAME, PName.EMPTY_NAME, PDecimal.INSTANCE, 10, 1, - true, 1, SortOrder.getDefault(), 0, null, false, null, false, false, null, HConstants.LATEST_TIMESTAMP); + @Test + public void buildListOfRowKeyColumnExpressionsIndexTest() throws Exception { + List expressions = new ArrayList<>(); + PColumn column = new PColumnImpl(PName.EMPTY_COLUMN_NAME, PName.EMPTY_NAME, PDecimal.INSTANCE, + 10, 1, true, 1, SortOrder.getDefault(), 0, null, false, null, false, false, null, + HConstants.LATEST_TIMESTAMP); - RowKeyColumnExpression rvc1 = new RowKeyColumnExpression(column,null); - RowKeyColumnExpression rvc2 = new RowKeyColumnExpression(column, null); + RowKeyColumnExpression rvc1 = new RowKeyColumnExpression(column, null); + RowKeyColumnExpression rvc2 = new RowKeyColumnExpression(column, null); - Expression coerce1 = CoerceExpression.create(rvc1,PDecimal.INSTANCE); - Expression coerce2 = CoerceExpression.create(rvc2,PDecimal.INSTANCE); + Expression coerce1 = CoerceExpression.create(rvc1, PDecimal.INSTANCE); + Expression coerce2 = CoerceExpression.create(rvc2, PDecimal.INSTANCE); - ComparisonExpression expression1 = mock(ComparisonExpression.class); - ComparisonExpression expression2 = mock(ComparisonExpression.class); + ComparisonExpression expression1 = mock(ComparisonExpression.class); + ComparisonExpression expression2 = mock(ComparisonExpression.class); - Mockito.when(expression1.getChildren()).thenReturn(Lists.newArrayList(coerce1)); - Mockito.when(expression2.getChildren()).thenReturn(Lists.newArrayList(coerce2)); + Mockito.when(expression1.getChildren()).thenReturn(Lists.newArrayList(coerce1)); + Mockito.when(expression2.getChildren()).thenReturn(Lists.newArrayList(coerce2)); - expressions.add(expression1); - expressions.add(expression2); + expressions.add(expression1); + expressions.add(expression2); - AndExpression expression = mock(AndExpression.class); - Mockito.when(expression.getChildren()).thenReturn(expressions); + AndExpression expression = mock(AndExpression.class); + Mockito.when(expression.getChildren()).thenReturn(expressions); - RVCOffsetCompiler.RowKeyColumnExpressionOutput - output = offsetCompiler.buildListOfRowKeyColumnExpressions(expression, true); - List - result = output.getRowKeyColumnExpressions(); + RVCOffsetCompiler.RowKeyColumnExpressionOutput output = + offsetCompiler.buildListOfRowKeyColumnExpressions(expression, true); + List result = output.getRowKeyColumnExpressions(); - assertEquals(2,result.size()); - assertEquals(rvc1,result.get(0)); - assertEquals(rvc2,result.get(1)); - } + assertEquals(2, result.size()); + assertEquals(rvc1, result.get(0)); + assertEquals(rvc2, result.get(1)); + } - @Test - public void buildListOfRowKeyColumnExpressionsSingleNodeComparisonTest() throws Exception { - List expressions = new ArrayList<>(); + @Test + public void buildListOfRowKeyColumnExpressionsSingleNodeComparisonTest() throws Exception { + List expressions = new ArrayList<>(); - RowKeyColumnExpression rvc = new RowKeyColumnExpression(); + RowKeyColumnExpression rvc = new RowKeyColumnExpression(); - ComparisonExpression expression = mock(ComparisonExpression.class); + ComparisonExpression expression = mock(ComparisonExpression.class); - Mockito.when(expression.getChildren()).thenReturn(Lists.newArrayList(rvc)); + Mockito.when(expression.getChildren()).thenReturn(Lists. newArrayList(rvc)); - RVCOffsetCompiler.RowKeyColumnExpressionOutput - output = offsetCompiler.buildListOfRowKeyColumnExpressions(expression, false); - List - result = output.getRowKeyColumnExpressions(); + RVCOffsetCompiler.RowKeyColumnExpressionOutput output = + offsetCompiler.buildListOfRowKeyColumnExpressions(expression, false); + List result = output.getRowKeyColumnExpressions(); - assertEquals(1,result.size()); - assertEquals(rvc,result.get(0)); - } + assertEquals(1, result.size()); + assertEquals(rvc, result.get(0)); + } - @Test - public void buildListOfRowKeyColumnExpressionsSingleNodeIsNullTest() throws Exception { - List expressions = new ArrayList<>(); + @Test + public void buildListOfRowKeyColumnExpressionsSingleNodeIsNullTest() throws Exception { + List expressions = new ArrayList<>(); - RowKeyColumnExpression rvc = new RowKeyColumnExpression(); + RowKeyColumnExpression rvc = new RowKeyColumnExpression(); - IsNullExpression expression = mock(IsNullExpression.class); + IsNullExpression expression = mock(IsNullExpression.class); - Mockito.when(expression.getChildren()).thenReturn(Lists.newArrayList(rvc)); + Mockito.when(expression.getChildren()).thenReturn(Lists. newArrayList(rvc)); - RVCOffsetCompiler.RowKeyColumnExpressionOutput output = offsetCompiler.buildListOfRowKeyColumnExpressions(expression, false); + RVCOffsetCompiler.RowKeyColumnExpressionOutput output = + offsetCompiler.buildListOfRowKeyColumnExpressions(expression, false); - List result = output.getRowKeyColumnExpressions(); + List result = output.getRowKeyColumnExpressions(); - assertEquals(1,result.size()); - assertEquals(rvc,result.get(0)); + assertEquals(1, result.size()); + assertEquals(rvc, result.get(0)); - assertTrue(output.isTrailingNull()); - } + assertTrue(output.isTrailingNull()); + } - @Test - public void buildListOfRowKeyColumnExpressionsIsNullTest() throws Exception { - List expressions = new ArrayList<>(); + @Test + public void buildListOfRowKeyColumnExpressionsIsNullTest() throws Exception { + List expressions = new ArrayList<>(); - RowKeyColumnExpression rvc1 = new RowKeyColumnExpression(); - RowKeyColumnExpression rvc2 = new RowKeyColumnExpression(); + RowKeyColumnExpression rvc1 = new RowKeyColumnExpression(); + RowKeyColumnExpression rvc2 = new RowKeyColumnExpression(); - IsNullExpression expression1 = mock(IsNullExpression.class); - IsNullExpression expression2 = mock(IsNullExpression.class); + IsNullExpression expression1 = mock(IsNullExpression.class); + IsNullExpression expression2 = mock(IsNullExpression.class); - Mockito.when(expression1.getChildren()).thenReturn(Lists.newArrayList(rvc1)); - Mockito.when(expression2.getChildren()).thenReturn(Lists.newArrayList(rvc2)); + Mockito.when(expression1.getChildren()).thenReturn(Lists. newArrayList(rvc1)); + Mockito.when(expression2.getChildren()).thenReturn(Lists. newArrayList(rvc2)); - expressions.add(expression1); - expressions.add(expression2); + expressions.add(expression1); + expressions.add(expression2); - AndExpression expression = mock(AndExpression.class); - Mockito.when(expression.getChildren()).thenReturn(expressions); + AndExpression expression = mock(AndExpression.class); + Mockito.when(expression.getChildren()).thenReturn(expressions); - RVCOffsetCompiler.RowKeyColumnExpressionOutput output = offsetCompiler.buildListOfRowKeyColumnExpressions(expression, false); + RVCOffsetCompiler.RowKeyColumnExpressionOutput output = + offsetCompiler.buildListOfRowKeyColumnExpressions(expression, false); - List result = output.getRowKeyColumnExpressions(); + List result = output.getRowKeyColumnExpressions(); - assertEquals(2,result.size()); - assertEquals(rvc1,result.get(0)); - assertEquals(rvc2,result.get(1)); + assertEquals(2, result.size()); + assertEquals(rvc1, result.get(0)); + assertEquals(rvc2, result.get(1)); - assertTrue(output.isTrailingNull()); - } + assertTrue(output.isTrailingNull()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/SaltedScanRangesTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/SaltedScanRangesTest.java index 03bab724f2e..99a71ded9de 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/SaltedScanRangesTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/SaltedScanRangesTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,207 +33,196 @@ import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - /** * Test for intersect method in {@link ScanRanges} over salted data */ @RunWith(Parameterized.class) public class SaltedScanRangesTest { - private static Integer nBuckets = 3; - private final ScanRanges scanRanges; - private final KeyRange keyRange; - private final boolean expectedResult; + private static Integer nBuckets = 3; + private final ScanRanges scanRanges; + private final KeyRange keyRange; + private final boolean expectedResult; - public SaltedScanRangesTest(ScanRanges scanRanges, int[] widths, - KeyRange keyRange, boolean expectedResult) { - this.keyRange = keyRange; - this.scanRanges = scanRanges; - this.expectedResult = expectedResult; - } + public SaltedScanRangesTest(ScanRanges scanRanges, int[] widths, KeyRange keyRange, + boolean expectedResult) { + this.keyRange = keyRange; + this.scanRanges = scanRanges; + this.expectedResult = expectedResult; + } - @Test - public void test() { - byte[] lowerInclusiveKey = keyRange.getLowerRange(); - if (!keyRange.isLowerInclusive() && !Bytes.equals(lowerInclusiveKey, KeyRange.UNBOUND)) { - // This assumes the last key is fixed length, otherwise the results may be incorrect - // since there's no terminating 0 byte for a variable length key and thus we may be - // incrementing the key too much. - lowerInclusiveKey = ByteUtil.nextKey(lowerInclusiveKey); - } - byte[] upperExclusiveKey = keyRange.getUpperRange(); - if (keyRange.isUpperInclusive()) { - // This assumes the last key is fixed length, otherwise the results may be incorrect - // since there's no terminating 0 byte for a variable length key and thus we may be - // incrementing the key too much. - upperExclusiveKey = ByteUtil.nextKey(upperExclusiveKey); - } - assertEquals(expectedResult, scanRanges.intersectRegion(lowerInclusiveKey,upperExclusiveKey,false)); + @Test + public void test() { + byte[] lowerInclusiveKey = keyRange.getLowerRange(); + if (!keyRange.isLowerInclusive() && !Bytes.equals(lowerInclusiveKey, KeyRange.UNBOUND)) { + // This assumes the last key is fixed length, otherwise the results may be incorrect + // since there's no terminating 0 byte for a variable length key and thus we may be + // incrementing the key too much. + lowerInclusiveKey = ByteUtil.nextKey(lowerInclusiveKey); } - - @Parameters(name="{0} {2}") - public static synchronized Collection data() { - List testCases = Lists.newArrayList(); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, SortOrder.ASC), - }}, - new int[] {0}, - KeyRange.getKeyRange(KeyRange.UNBOUND, new byte[]{1}), - false, - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, SortOrder.ASC), - }}, - new int[] {0}, - KeyRange.getKeyRange(new byte[]{1},new byte[]{2}), - false, - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, SortOrder.ASC), - }}, - new int[] {0}, - KeyRange.getKeyRange(new byte[]{2},KeyRange.UNBOUND), - false, - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, SortOrder.ASC), - }}, - new int[] {0}, - KeyRange.getKeyRange(new byte[]{1},ByteUtil.concat(new byte[]{1}, Bytes.toBytes("c"))), - false, - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, SortOrder.ASC), - }}, - new int[] {0}, - KeyRange.getKeyRange(ByteUtil.concat(new byte[]{1}, Bytes.toBytes("e")), new byte[]{2}), - false, - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, SortOrder.ASC), - }}, - new int[] {0}, - KeyRange.getKeyRange(ByteUtil.concat(new byte[]{1}, Bytes.toBytes("d")), new byte[]{2}), - false, - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, SortOrder.ASC), - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("h"), true, Bytes.toBytes("i"), false, SortOrder.ASC), - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("p"), false, SortOrder.ASC), - }}, - new int[] {0}, - KeyRange.getKeyRange(ByteUtil.concat(new byte[]{1}, Bytes.toBytes("f")), ByteUtil.concat(new byte[]{1}, Bytes.toBytes("g"))), - false, - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, SortOrder.ASC), - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("h"), true, Bytes.toBytes("i"), false, SortOrder.ASC), - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("p"), false, SortOrder.ASC), - }}, - new int[] {0}, - KeyRange.getKeyRange(ByteUtil.concat(new byte[]{1}, Bytes.toBytes("f")), ByteUtil.concat(new byte[]{1}, Bytes.toBytes("g"))), - true, - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, KeyRange.UNBOUND, false, SortOrder.ASC), - }}, - new int[] {1}, - KeyRange.getKeyRange(new byte[]{1,0},new byte[]{2,0}), - false, - true)); - return testCases; + byte[] upperExclusiveKey = keyRange.getUpperRange(); + if (keyRange.isUpperInclusive()) { + // This assumes the last key is fixed length, otherwise the results may be incorrect + // since there's no terminating 0 byte for a variable length key and thus we may be + // incrementing the key too much. + upperExclusiveKey = ByteUtil.nextKey(upperExclusiveKey); } + assertEquals(expectedResult, + scanRanges.intersectRegion(lowerInclusiveKey, upperExclusiveKey, false)); + } - private static Collection foreach(ScanRanges ranges, int[] widths, KeyRange keyRange, - boolean expectedResult) { - List ret = Lists.newArrayList(); - ret.add(new Object[] {ranges, widths, keyRange, expectedResult}); - return ret; - } + @Parameters(name = "{0} {2}") + public static synchronized Collection data() { + List testCases = Lists.newArrayList(); + testCases.addAll(foreach( + new KeyRange[][] { { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, + Bytes.toBytes("e"), false, SortOrder.ASC), } }, + new int[] { 0 }, KeyRange.getKeyRange(KeyRange.UNBOUND, new byte[] { 1 }), false, true)); + testCases.addAll(foreach( + new KeyRange[][] { { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, + Bytes.toBytes("e"), false, SortOrder.ASC), } }, + new int[] { 0 }, KeyRange.getKeyRange(new byte[] { 1 }, new byte[] { 2 }), false, true)); + testCases.addAll(foreach( + new KeyRange[][] { { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, + Bytes.toBytes("e"), false, SortOrder.ASC), } }, + new int[] { 0 }, KeyRange.getKeyRange(new byte[] { 2 }, KeyRange.UNBOUND), false, true)); + testCases.addAll(foreach( + new KeyRange[][] { { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, + Bytes.toBytes("e"), false, SortOrder.ASC), } }, + new int[] { 0 }, + KeyRange.getKeyRange(new byte[] { 1 }, ByteUtil.concat(new byte[] { 1 }, Bytes.toBytes("c"))), + false, false)); + testCases.addAll(foreach( + new KeyRange[][] { { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, + Bytes.toBytes("e"), false, SortOrder.ASC), } }, + new int[] { 0 }, + KeyRange.getKeyRange(ByteUtil.concat(new byte[] { 1 }, Bytes.toBytes("e")), new byte[] { 2 }), + false, false)); + testCases.addAll(foreach( + new KeyRange[][] { { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, + Bytes.toBytes("e"), false, SortOrder.ASC), } }, + new int[] { 0 }, + KeyRange.getKeyRange(ByteUtil.concat(new byte[] { 1 }, Bytes.toBytes("d")), new byte[] { 2 }), + false, true)); + testCases.addAll(foreach( + new KeyRange[][] { { + PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, + SortOrder.ASC), + PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("h"), true, Bytes.toBytes("i"), false, + SortOrder.ASC), + PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("p"), false, + SortOrder.ASC), } }, + new int[] { 0 }, KeyRange.getKeyRange(ByteUtil.concat(new byte[] { 1 }, Bytes.toBytes("f")), + ByteUtil.concat(new byte[] { 1 }, Bytes.toBytes("g"))), + false, true)); + testCases.addAll(foreach( + new KeyRange[][] { { + PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), false, + SortOrder.ASC), + PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("h"), true, Bytes.toBytes("i"), false, + SortOrder.ASC), + PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("p"), false, + SortOrder.ASC), } }, + new int[] { 0 }, KeyRange.getKeyRange(ByteUtil.concat(new byte[] { 1 }, Bytes.toBytes("f")), + ByteUtil.concat(new byte[] { 1 }, Bytes.toBytes("g"))), + true, false)); + testCases.addAll(foreach( + new KeyRange[][] { { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, KeyRange.UNBOUND, + false, SortOrder.ASC), } }, + new int[] { 1 }, KeyRange.getKeyRange(new byte[] { 1, 0 }, new byte[] { 2, 0 }), false, + true)); + return testCases; + } + + private static Collection foreach(ScanRanges ranges, int[] widths, KeyRange keyRange, + boolean expectedResult) { + List ret = Lists.newArrayList(); + ret.add(new Object[] { ranges, widths, keyRange, expectedResult }); + return ret; + } + + private static Collection foreach(KeyRange[][] ranges, int[] widths, KeyRange keyRange, + boolean useSkipScan, boolean expectedResult) { + List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); + slots = new ArrayList<>(slots); + slots.add(0, Collections.singletonList(KeyRange.getKeyRange(new byte[] { 0 }))); + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10); + builder.addField(SaltingUtil.SALTING_COLUMN, false, SortOrder.getDefault()); + for (final int width : widths) { + if (width > 0) { + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PChar.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return width; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); + } else { + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return width; + } + + @Override + public Integer getScale() { + return null; + } - private static Collection foreach(KeyRange[][] ranges, int[] widths, KeyRange keyRange, boolean useSkipScan, - boolean expectedResult) { - List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); - slots = new ArrayList<>(slots); - slots.add(0, Collections.singletonList(KeyRange.getKeyRange(new byte[]{0}))); - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10); - builder.addField(SaltingUtil.SALTING_COLUMN, false, SortOrder.getDefault()); - for (final int width : widths) { - if (width > 0) { - builder.addField(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - @Override - public PDataType getDataType() { - return PChar.INSTANCE; - } - @Override - public Integer getMaxLength() { - return width; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); - } else { - builder.addField(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } - @Override - public Integer getMaxLength() { - return width; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); - } - } - ScanRanges scanRanges = ScanRanges.createSingleSpan(builder.build(), slots, nBuckets , useSkipScan); - return foreach(scanRanges, widths, keyRange, expectedResult); + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); + } } + ScanRanges scanRanges = + ScanRanges.createSingleSpan(builder.build(), slots, nBuckets, useSkipScan); + return foreach(scanRanges, widths, keyRange, expectedResult); + } - private static final Function> ARRAY_TO_LIST = - new Function>() { - @Override - public List apply(KeyRange[] input) { - return Lists.newArrayList(input); - } + private static final Function> ARRAY_TO_LIST = + new Function>() { + @Override + public List apply(KeyRange[] input) { + return Lists.newArrayList(input); + } }; } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesIntersectTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesIntersectTest.java index 48dd1a08fac..bdf23766ea4 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesIntersectTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesIntersectTest.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -28,42 +28,42 @@ import org.apache.phoenix.filter.SkipScanFilter; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.types.PVarchar; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class ScanRangesIntersectTest { - @Test - public void testPointLookupIntersect() throws Exception { - List keys = points("a","j","m","z"); - ScanRanges ranges = ScanRanges.createPointLookup(keys); - assertIntersect(ranges, "b", "l", "j"); - - } - - private static void assertIntersect(ScanRanges ranges, String lowerRange, String upperRange, String... expectedPoints) { - List expectedKeys = points(expectedPoints); - Collections.sort(expectedKeys,KeyRange.COMPARATOR); - Scan scan = new Scan(); - scan.setFilter(ranges.getSkipScanFilter()); - byte[] startKey = lowerRange == null ? KeyRange.UNBOUND : PVarchar.INSTANCE.toBytes(lowerRange); - byte[] stopKey = upperRange == null ? KeyRange.UNBOUND : PVarchar.INSTANCE.toBytes(upperRange); - Scan newScan = ranges.intersectScan(scan, startKey, stopKey, 0, true); - if (expectedPoints.length == 0) { - assertNull(newScan); - } else { - assertNotNull(newScan); - SkipScanFilter filter = (SkipScanFilter)newScan.getFilter(); - assertEquals(expectedKeys, filter.getSlots().get(0)); - } + @Test + public void testPointLookupIntersect() throws Exception { + List keys = points("a", "j", "m", "z"); + ScanRanges ranges = ScanRanges.createPointLookup(keys); + assertIntersect(ranges, "b", "l", "j"); + + } + + private static void assertIntersect(ScanRanges ranges, String lowerRange, String upperRange, + String... expectedPoints) { + List expectedKeys = points(expectedPoints); + Collections.sort(expectedKeys, KeyRange.COMPARATOR); + Scan scan = new Scan(); + scan.setFilter(ranges.getSkipScanFilter()); + byte[] startKey = lowerRange == null ? KeyRange.UNBOUND : PVarchar.INSTANCE.toBytes(lowerRange); + byte[] stopKey = upperRange == null ? KeyRange.UNBOUND : PVarchar.INSTANCE.toBytes(upperRange); + Scan newScan = ranges.intersectScan(scan, startKey, stopKey, 0, true); + if (expectedPoints.length == 0) { + assertNull(newScan); + } else { + assertNotNull(newScan); + SkipScanFilter filter = (SkipScanFilter) newScan.getFilter(); + assertEquals(expectedKeys, filter.getSlots().get(0)); } - - private static List points(String... points) { - List keys = Lists.newArrayListWithExpectedSize(points.length); - for (String point : points) { - keys.add(KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes(point))); - } - return keys; + } + + private static List points(String... points) { + List keys = Lists.newArrayListWithExpectedSize(points.length); + for (String point : points) { + keys.add(KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes(point))); } + return keys; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesTest.java index a8d2b689a64..9f0cecfef9d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,509 +30,516 @@ import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - /** * Test for intersect method in {@link ScanRanges} */ @RunWith(Parameterized.class) public class ScanRangesTest { - private final ScanRanges scanRanges; - private final KeyRange keyRange; - private final boolean expectedResult; + private final ScanRanges scanRanges; + private final KeyRange keyRange; + private final boolean expectedResult; - public ScanRangesTest(ScanRanges scanRanges, int[] widths, - KeyRange keyRange, boolean expectedResult) { - this.keyRange = keyRange; - this.scanRanges = scanRanges; - this.expectedResult = expectedResult; - } + public ScanRangesTest(ScanRanges scanRanges, int[] widths, KeyRange keyRange, + boolean expectedResult) { + this.keyRange = keyRange; + this.scanRanges = scanRanges; + this.expectedResult = expectedResult; + } - @Test - public void test() { - byte[] lowerInclusiveKey = keyRange.getLowerRange(); - if (!keyRange.isLowerInclusive() && !Bytes.equals(lowerInclusiveKey, KeyRange.UNBOUND)) { - // This assumes the last key is fixed length, otherwise the results may be incorrect - // since there's no terminating 0 byte for a variable length key and thus we may be - // incrementing the key too much. - lowerInclusiveKey = ByteUtil.nextKey(lowerInclusiveKey); - } - byte[] upperExclusiveKey = keyRange.getUpperRange(); - if (keyRange.isUpperInclusive()) { - // This assumes the last key is fixed length, otherwise the results may be incorrect - // since there's no terminating 0 byte for a variable length key and thus we may be - // incrementing the key too much. - upperExclusiveKey = ByteUtil.nextKey(upperExclusiveKey); - } - assertEquals(expectedResult, scanRanges.intersectRegion(lowerInclusiveKey,upperExclusiveKey,false)); + @Test + public void test() { + byte[] lowerInclusiveKey = keyRange.getLowerRange(); + if (!keyRange.isLowerInclusive() && !Bytes.equals(lowerInclusiveKey, KeyRange.UNBOUND)) { + // This assumes the last key is fixed length, otherwise the results may be incorrect + // since there's no terminating 0 byte for a variable length key and thus we may be + // incrementing the key too much. + lowerInclusiveKey = ByteUtil.nextKey(lowerInclusiveKey); } - - @Parameters(name="{0} {2}") - public static synchronized Collection data() { - List testCases = Lists.newArrayList(); - // variable length test that demonstrates that null byte - // must be added at end - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("b"), false, Bytes.toBytes("c"), true, SortOrder.ASC), - }}, - new int[] {0}, - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("ba"), true, Bytes.toBytes("bb"), true, SortOrder.ASC), - true)); - // KeyRange covers the first scan range. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a9Z"), true, Bytes.toBytes("c0A"), true, SortOrder.ASC), - true)); - // KeyRange that requires a fixed width exclusive lower bound to be bumped up - // and made inclusive. Otherwise, the comparison thinks its bigger than it really is. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1A"), true, Bytes.toBytes("b1A"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b0A"), true, Bytes.toBytes("b1C"), true, SortOrder.ASC), - true)); - // KeyRange intersect with the first scan range on range's upper end. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b0A"), true, Bytes.toBytes("b1B"), true, SortOrder.ASC), - true)); - // ScanRanges is everything. - testCases.addAll( - foreach(ScanRanges.EVERYTHING, - null, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - true)); - // ScanRanges is nothing. - testCases.addAll( - foreach(ScanRanges.NOTHING, - null, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - false)); - // KeyRange below the first scan range. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - }}, - new int[] {1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b0Y"), true, Bytes.toBytes("b0Z"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b0A"), true, Bytes.toBytes("b2A"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1A"), true, Bytes.toBytes("b1B"), false, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("E"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a0Z"), false, Bytes.toBytes("a1A"), false, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("c"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("C"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a1A"), true, Bytes.toBytes("b1B"), false, SortOrder.ASC), - false)); - // KeyRange intersects with the first scan range on range's lower end. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1C"), true, Bytes.toBytes("b2E"), true, SortOrder.ASC), - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1D"), true, Bytes.toBytes("b2E"), true, SortOrder.ASC), - true)); - // KeyRange above the first scan range, no intersect. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("H"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1E"), true, Bytes.toBytes("b1F"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("G"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a1I"), true, Bytes.toBytes("a2A"), false, SortOrder.ASC), - false)); - // KeyRange above the first scan range, with intersects. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("I"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1E"), true, Bytes.toBytes("b1H"), true, SortOrder.ASC), - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("c"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("d"), true, Bytes.toBytes("d"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("I"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b00"), true, Bytes.toBytes("d00"), true, SortOrder.ASC), - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("c"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("d"), true, Bytes.toBytes("d"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("3"), true, Bytes.toBytes("4"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("I"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b20"), true, Bytes.toBytes("b50"), true, SortOrder.ASC), - true)); - // KeyRange above the last scan range. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1B"), false, Bytes.toBytes("b2A"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), false, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), false, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), false, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b2A"), true, Bytes.toBytes("b2A"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c1A"), false, Bytes.toBytes("c9Z"), true, SortOrder.ASC), - false)); - // KeyRange contains unbound lower bound. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, Bytes.toBytes("a0Z"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, Bytes.toBytes("a0Z"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("E"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, Bytes.toBytes("a1C"), true, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("E"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, Bytes.toBytes("a1D"), true, SortOrder.ASC), - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("E"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, Bytes.toBytes("a2D"), true, SortOrder.ASC), - true)); - // KeyRange contains unbound upper bound - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a0A"), true, KeyRange.UNBOUND, false, SortOrder.ASC), - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a1B"), true, KeyRange.UNBOUND, false, SortOrder.ASC), - true)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a1C"), true, KeyRange.UNBOUND, false, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a3A"), true, KeyRange.UNBOUND, false, SortOrder.ASC), - false)); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(Bytes.toBytes("d0A"), true, KeyRange.UNBOUND, false, SortOrder.ASC), - false)); - // KeyRange is unbound to unbound. - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - },{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, SortOrder.ASC), - }}, - new int[] {1,1,1}, - PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, KeyRange.UNBOUND, false, SortOrder.ASC), - true)); - return testCases; + byte[] upperExclusiveKey = keyRange.getUpperRange(); + if (keyRange.isUpperInclusive()) { + // This assumes the last key is fixed length, otherwise the results may be incorrect + // since there's no terminating 0 byte for a variable length key and thus we may be + // incrementing the key too much. + upperExclusiveKey = ByteUtil.nextKey(upperExclusiveKey); } + assertEquals(expectedResult, + scanRanges.intersectRegion(lowerInclusiveKey, upperExclusiveKey, false)); + } - private static Collection foreach(ScanRanges ranges, int[] widths, KeyRange keyRange, - boolean expectedResult) { - List ret = Lists.newArrayList(); - ret.add(new Object[] {ranges, widths, keyRange, expectedResult}); - return ret; - } + @Parameters(name = "{0} {2}") + public static synchronized Collection data() { + List testCases = Lists.newArrayList(); + // variable length test that demonstrates that null byte + // must be added at end + testCases.addAll(foreach( + new KeyRange[][] { { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("b"), false, + Bytes.toBytes("c"), true, SortOrder.ASC), } }, + new int[] { 0 }, PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("ba"), true, Bytes.toBytes("bb"), + true, SortOrder.ASC), + true)); + // KeyRange covers the first scan range. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("a9Z"), true, + Bytes.toBytes("c0A"), true, SortOrder.ASC), + true)); + // KeyRange that requires a fixed width exclusive lower bound to be bumped up + // and made inclusive. Otherwise, the comparison thinks its bigger than it really is. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1A"), true, + Bytes.toBytes("b1A"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b0A"), true, + Bytes.toBytes("b1C"), true, SortOrder.ASC), + true)); + // KeyRange intersect with the first scan range on range's upper end. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b0A"), true, + Bytes.toBytes("b1B"), true, SortOrder.ASC), + true)); + // ScanRanges is everything. + testCases.addAll(foreach(ScanRanges.EVERYTHING, null, + PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), + true)); + // ScanRanges is nothing. + testCases.addAll(foreach(ScanRanges.NOTHING, null, + PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), + false)); + // KeyRange below the first scan range. + testCases.addAll(foreach( + new KeyRange[][] { { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), + true, SortOrder.ASC), } }, + new int[] { 1 }, + PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b0Y"), true, + Bytes.toBytes("b0Z"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b0A"), true, + Bytes.toBytes("b2A"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1A"), true, + Bytes.toBytes("b1B"), false, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("a0Z"), false, + Bytes.toBytes("a1A"), false, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("c"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("C"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("a1A"), true, + Bytes.toBytes("b1B"), false, SortOrder.ASC), + false)); + // KeyRange intersects with the first scan range on range's lower end. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1C"), true, + Bytes.toBytes("b2E"), true, SortOrder.ASC), + true)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1D"), true, + Bytes.toBytes("b2E"), true, SortOrder.ASC), + true)); + // KeyRange above the first scan range, no intersect. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("H"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1E"), true, + Bytes.toBytes("b1F"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("G"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("a1I"), true, + Bytes.toBytes("a2A"), false, SortOrder.ASC), + false)); + // KeyRange above the first scan range, with intersects. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("I"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1E"), true, + Bytes.toBytes("b1H"), true, SortOrder.ASC), + true)); + testCases.addAll(foreach(new KeyRange[][] { { + PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("c"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("d"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("I"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b00"), true, + Bytes.toBytes("d00"), true, SortOrder.ASC), + true)); + testCases.addAll(foreach(new KeyRange[][] { { + PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("c"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("d"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("3"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("I"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b20"), true, + Bytes.toBytes("b50"), true, SortOrder.ASC), + true)); + // KeyRange above the last scan range. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b1B"), false, + Bytes.toBytes("b2A"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), false, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("b2A"), true, + Bytes.toBytes("b2A"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("c1A"), false, + Bytes.toBytes("c9Z"), true, SortOrder.ASC), + false)); + // KeyRange contains unbound lower bound. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, + Bytes.toBytes("a0Z"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, + Bytes.toBytes("a0Z"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, + Bytes.toBytes("a1C"), true, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, + Bytes.toBytes("a1D"), true, SortOrder.ASC), + true)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, + Bytes.toBytes("a2D"), true, SortOrder.ASC), + true)); + // KeyRange contains unbound upper bound + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("a0A"), true, + KeyRange.UNBOUND, false, SortOrder.ASC), + true)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("a1B"), true, + KeyRange.UNBOUND, false, SortOrder.ASC), + true)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("a1C"), true, + KeyRange.UNBOUND, false, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("a3A"), true, + KeyRange.UNBOUND, false, SortOrder.ASC), + false)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.getKeyRange(Bytes.toBytes("d0A"), true, + KeyRange.UNBOUND, false, SortOrder.ASC), + false)); + // KeyRange is unbound to unbound. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, + PChar.INSTANCE.getKeyRange(KeyRange.UNBOUND, false, KeyRange.UNBOUND, false, SortOrder.ASC), + true)); + return testCases; + } + + private static Collection foreach(ScanRanges ranges, int[] widths, KeyRange keyRange, + boolean expectedResult) { + List ret = Lists.newArrayList(); + ret.add(new Object[] { ranges, widths, keyRange, expectedResult }); + return ret; + } + + private static Collection foreach(KeyRange[][] ranges, int[] widths, KeyRange keyRange, + boolean expectedResult) { + List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10); + for (final int width : widths) { + if (width > 0) { + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PChar.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return width; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); + } else { + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return width; + } + + @Override + public Integer getScale() { + return null; + } - private static Collection foreach(KeyRange[][] ranges, int[] widths, KeyRange keyRange, - boolean expectedResult) { - List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10); - for (final int width : widths) { - if (width > 0) { - builder.addField(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - @Override - public PDataType getDataType() { - return PChar.INSTANCE; - } - @Override - public Integer getMaxLength() { - return width; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); - } else { - builder.addField(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } - @Override - public Integer getMaxLength() { - return width; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); - } - } - ScanRanges scanRanges = ScanRanges.createSingleSpan(builder.build(), slots); - return foreach(scanRanges, widths, keyRange, expectedResult); + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); + } } + ScanRanges scanRanges = ScanRanges.createSingleSpan(builder.build(), slots); + return foreach(scanRanges, widths, keyRange, expectedResult); + } - private static final Function> ARRAY_TO_LIST = - new Function>() { - @Override - public List apply(KeyRange[] input) { - return Lists.newArrayList(input); - } + private static final Function> ARRAY_TO_LIST = + new Function>() { + @Override + public List apply(KeyRange[] input) { + return Lists.newArrayList(input); + } }; } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/SelectStatementRewriterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/SelectStatementRewriterTest.java index aa998f7ba0e..82bd62cdf45 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/SelectStatementRewriterTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/SelectStatementRewriterTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,81 +37,56 @@ import org.apache.phoenix.util.PropertiesUtil; import org.junit.Test; +public class SelectStatementRewriterTest extends BaseConnectionlessQueryTest { + private static Filter compileStatement(String query) throws SQLException { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); + QueryPlan plan = pstmt.compileQuery(); + return plan.getContext().getScan().getFilter(); + } + @Test + public void testCollapseAnd() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0"; + Filter filter = compileStatement(query); + assertEquals(singleKVFilter(constantComparison(CompareOperator.EQUAL, A_INTEGER, 0)), filter); + } -public class SelectStatementRewriterTest extends BaseConnectionlessQueryTest { - private static Filter compileStatement(String query) throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); - QueryPlan plan = pstmt.compileQuery(); - return plan.getContext().getScan().getFilter(); - } + @Test + public void testLHSLiteralCollapseAnd() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where '" + tenantId + "'=organization_id and 0=a_integer"; + Filter filter = compileStatement(query); + assertEquals(singleKVFilter(constantComparison(CompareOperator.EQUAL, A_INTEGER, 0)), filter); + } - - @Test - public void testCollapseAnd() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0"; - Filter filter = compileStatement(query); - assertEquals( - singleKVFilter(constantComparison( - CompareOperator.EQUAL, - A_INTEGER, - 0)), - filter); - } - - @Test - public void testLHSLiteralCollapseAnd() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where '" + tenantId + "'=organization_id and 0=a_integer"; - Filter filter = compileStatement(query); - assertEquals( - singleKVFilter(constantComparison( - CompareOperator.EQUAL, - A_INTEGER, - 0)), - filter); - } - - @Test - public void testRewriteAnd() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0 and a_string='foo'"; - Filter filter = compileStatement(query); - assertEquals( - multiEncodedKVFilter(and( - constantComparison( - CompareOperator.EQUAL, - A_INTEGER, 0), - constantComparison( - CompareOperator.EQUAL, - A_STRING, "foo") - ), TWO_BYTE_QUALIFIERS), - filter); - } + @Test + public void testRewriteAnd() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + + "' and a_integer=0 and a_string='foo'"; + Filter filter = compileStatement(query); + assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOperator.EQUAL, A_INTEGER, 0), + constantComparison(CompareOperator.EQUAL, A_STRING, "foo")), TWO_BYTE_QUALIFIERS), filter); + } - @Test - public void testCollapseWhere() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(organization_id,1,3)='foo' LIMIT 2"; - Filter filter = compileStatement(query); - assertNull(filter); - } + @Test + public void testCollapseWhere() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + + "' and substr(organization_id,1,3)='foo' LIMIT 2"; + Filter filter = compileStatement(query); + assertNull(filter); + } - @Test - public void testNoCollapse() throws SQLException { - String query = "select * from atable where a_integer=0 and a_string='foo'"; - Filter filter = compileStatement(query); - assertEquals( - multiEncodedKVFilter(and( - constantComparison( - CompareOperator.EQUAL, - A_INTEGER, 0), - constantComparison( - CompareOperator.EQUAL, - A_STRING, "foo") - ), TWO_BYTE_QUALIFIERS), - filter); - } + @Test + public void testNoCollapse() throws SQLException { + String query = "select * from atable where a_integer=0 and a_string='foo'"; + Filter filter = compileStatement(query); + assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOperator.EQUAL, A_INTEGER, 0), + constantComparison(CompareOperator.EQUAL, A_STRING, "foo")), TWO_BYTE_QUALIFIERS), filter); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/StatementHintsCompilationTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/StatementHintsCompilationTest.java index eccf8f10765..973b1736c69 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/StatementHintsCompilationTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/StatementHintsCompilationTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,77 +41,85 @@ import org.apache.phoenix.util.TestUtil; import org.junit.Test; - /** * Test compilation of various statements with hints. */ public class StatementHintsCompilationTest extends BaseConnectionlessQueryTest { - private static boolean usingSkipScan(Scan scan) { - Filter filter = scan.getFilter(); - if (filter instanceof FilterList) { - FilterList filterList = (FilterList) filter; - for (Filter childFilter : filterList.getFilters()) { - if (childFilter instanceof SkipScanFilter) { - return true; - } - } - return false; + private static boolean usingSkipScan(Scan scan) { + Filter filter = scan.getFilter(); + if (filter instanceof FilterList) { + FilterList filterList = (FilterList) filter; + for (Filter childFilter : filterList.getFilters()) { + if (childFilter instanceof SkipScanFilter) { + return true; } - return filter instanceof SkipScanFilter; + } + return false; } + return filter instanceof SkipScanFilter; + } - private static QueryPlan compileStatement(String query) throws SQLException { - return compileStatement(query, Collections.emptyList(), null); - } + private static QueryPlan compileStatement(String query) throws SQLException { + return compileStatement(query, Collections.emptyList(), null); + } - private static QueryPlan compileStatement(String query, List binds, Integer limit) throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); - TestUtil.bindParams(pstmt, binds); - QueryPlan plan = pstmt.compileQuery(); - assertEquals(limit, plan.getLimit()); - return plan; - } + private static QueryPlan compileStatement(String query, List binds, Integer limit) + throws SQLException { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); + TestUtil.bindParams(pstmt, binds); + QueryPlan plan = pstmt.compileQuery(); + assertEquals(limit, plan.getLimit()); + return plan; + } - @Test - public void testSelectForceSkipScan() throws Exception { - String id = "000000000000001"; - // A where clause without the first column usually compiles into a range scan. - String query = "SELECT /*+ SKIP_SCAN */ * FROM atable WHERE entity_id='" + id + "'"; - - Scan scan = compileStatement(query).getContext().getScan(); - assertTrue("The first filter should be SkipScanFilter.", usingSkipScan(scan)); - } + @Test + public void testSelectForceSkipScan() throws Exception { + String id = "000000000000001"; + // A where clause without the first column usually compiles into a range scan. + String query = "SELECT /*+ SKIP_SCAN */ * FROM atable WHERE entity_id='" + id + "'"; - @Test - public void testSelectForceRangeScan() throws Exception { - String query = "SELECT /*+ RANGE_SCAN */ * FROM atable WHERE organization_id in (" + - "'000000000000001', '000000000000002', '000000000000003', '000000000000004')"; - Scan scan = compileStatement(query).getContext().getScan(); - // Verify that it is not using SkipScanFilter. - assertFalse("The first filter should not be SkipScanFilter.", usingSkipScan(scan)); - } - - @Test - public void testSelectForceRangeScanForEH() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("create table eh (organization_id char(15) not null,parent_id char(15) not null, created_date date not null, entity_history_id char(15) not null constraint pk primary key (organization_id, parent_id, created_date, entity_history_id))"); - ResultSet rs = conn.createStatement().executeQuery("explain select /*+ RANGE_SCAN */ ORGANIZATION_ID, PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID from eh where ORGANIZATION_ID='111111111111111' and SUBSTR(PARENT_ID, 1, 3) = 'foo' and CREATED_DATE >= TO_DATE ('2012-11-01 00:00:00') and CREATED_DATE < TO_DATE ('2012-11-30 00:00:00') order by ORGANIZATION_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID limit 100"); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER EH ['111111111111111','foo ','2012-11-01 00:00:00.000'] - ['111111111111111','fop ','2012-11-30 00:00:00.000']\n" + - " SERVER FILTER BY FIRST KEY ONLY AND (CREATED_DATE >= DATE '2012-11-01 00:00:00.000' AND CREATED_DATE < DATE '2012-11-30 00:00:00.000')\n" + - " SERVER TOP 100 ROWS SORTED BY [ORGANIZATION_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID]\n" + - "CLIENT MERGE SORT\nCLIENT LIMIT 100",QueryUtil.getExplainPlan(rs)); - } + Scan scan = compileStatement(query).getContext().getScan(); + assertTrue("The first filter should be SkipScanFilter.", usingSkipScan(scan)); + } - @Test - public void testSerialHint() throws Exception { - // test AggregatePlan - String query = "SELECT /*+ SERIAL */ COUNT(*) FROM atable"; - assertTrue("Expected a SERIAL query", compileStatement(query).getExplainPlan().getPlanSteps().get(0).contains("SERIAL")); + @Test + public void testSelectForceRangeScan() throws Exception { + String query = "SELECT /*+ RANGE_SCAN */ * FROM atable WHERE organization_id in (" + + "'000000000000001', '000000000000002', '000000000000003', '000000000000004')"; + Scan scan = compileStatement(query).getContext().getScan(); + // Verify that it is not using SkipScanFilter. + assertFalse("The first filter should not be SkipScanFilter.", usingSkipScan(scan)); + } - // test ScanPlan - query = "SELECT /*+ SERIAL */ * FROM atable limit 10"; - assertTrue("Expected a SERIAL query", compileStatement(query, Collections.emptyList(), 10).getExplainPlan().getPlanSteps().get(0).contains("SERIAL")); - } + @Test + public void testSelectForceRangeScanForEH() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "create table eh (organization_id char(15) not null,parent_id char(15) not null, created_date date not null, entity_history_id char(15) not null constraint pk primary key (organization_id, parent_id, created_date, entity_history_id))"); + ResultSet rs = conn.createStatement().executeQuery( + "explain select /*+ RANGE_SCAN */ ORGANIZATION_ID, PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID from eh where ORGANIZATION_ID='111111111111111' and SUBSTR(PARENT_ID, 1, 3) = 'foo' and CREATED_DATE >= TO_DATE ('2012-11-01 00:00:00') and CREATED_DATE < TO_DATE ('2012-11-30 00:00:00') order by ORGANIZATION_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID limit 100"); + assertEquals( + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER EH ['111111111111111','foo ','2012-11-01 00:00:00.000'] - ['111111111111111','fop ','2012-11-30 00:00:00.000']\n" + + " SERVER FILTER BY FIRST KEY ONLY AND (CREATED_DATE >= DATE '2012-11-01 00:00:00.000' AND CREATED_DATE < DATE '2012-11-30 00:00:00.000')\n" + + " SERVER TOP 100 ROWS SORTED BY [ORGANIZATION_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID]\n" + + "CLIENT MERGE SORT\nCLIENT LIMIT 100", + QueryUtil.getExplainPlan(rs)); + } + + @Test + public void testSerialHint() throws Exception { + // test AggregatePlan + String query = "SELECT /*+ SERIAL */ COUNT(*) FROM atable"; + assertTrue("Expected a SERIAL query", + compileStatement(query).getExplainPlan().getPlanSteps().get(0).contains("SERIAL")); + + // test ScanPlan + query = "SELECT /*+ SERIAL */ * FROM atable limit 10"; + assertTrue("Expected a SERIAL query", compileStatement(query, Collections.emptyList(), 10) + .getExplainPlan().getPlanSteps().get(0).contains("SERIAL")); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/TenantSpecificViewIndexCompileTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/TenantSpecificViewIndexCompileTest.java index 7b54cf1cc8a..d0eb8efbb26 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/TenantSpecificViewIndexCompileTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/TenantSpecificViewIndexCompileTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,239 +36,261 @@ public class TenantSpecificViewIndexCompileTest extends BaseConnectionlessQueryTest { - @Test - public void testOrderByOptimizedOut() throws Exception { - Properties props = new Properties(); - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t(t_id VARCHAR NOT NULL, k1 VARCHAR, k2 VARCHAR, v1 VARCHAR," + - " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2)) multi_tenant=true"); - - String tenantId = "me"; - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); // connection is tenant-specific - conn = DriverManager.getConnection(getUrl(), props); - conn.createStatement().execute("CREATE VIEW v(v2 VARCHAR) AS SELECT * FROM t WHERE k1 = 'a'"); - conn.createStatement().execute("CREATE INDEX i1 ON v(v2) INCLUDE(v1)"); - - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT v1,v2 FROM v WHERE v2 > 'a' ORDER BY v2"); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-9223372036854775808,'me','a'] - [-9223372036854775808,'me',*]", - QueryUtil.getExplainPlan(rs)); - } - - @Test - public void testOrderByOptimizedOutWithoutPredicateInView() throws Exception { - - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t(t_id CHAR(15) NOT NULL, k1 CHAR(3) NOT NULL, k2 CHAR(15) NOT NULL, k3 DATE NOT NULL, v1 VARCHAR," + - " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2, k3)) multi_tenant=true"); - conn.createStatement().execute("CREATE VIEW v1 AS SELECT * FROM t"); - - conn = createTenantSpecificConnection(); - - // Query without predicate ordered by full row key - String sql = "SELECT * FROM v1 ORDER BY k1, k2, k3"; - String expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - // Predicate with valid partial PK - sql = "SELECT * FROM v1 WHERE k1 = 'xyz' ORDER BY k1, k2, k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - sql = "SELECT * FROM v1 WHERE k1 > 'xyz' ORDER BY k1, k2, k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xy{'] - ['tenant123456789',*]"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - String datePredicate = createStaticDate(); - sql = "SELECT * FROM v1 WHERE k1 = 'xyz' AND k2 = '123456789012345' AND k3 < TO_DATE('" + datePredicate + "') ORDER BY k1, k2, k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','123456789012345',*] - ['tenant123456789','xyz','123456789012345','2015-01-01 08:00:00.000']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - - // Predicate without valid partial PK - sql = "SELECT * FROM v1 WHERE k2 < 'abcde1234567890' ORDER BY k1, k2, k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789']\n" + - " SERVER FILTER BY K2 < 'abcde1234567890'"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - } - - @Test - public void testOrderByOptimizedOutWithPredicateInView() throws Exception { - // Arrange - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t(t_id CHAR(15) NOT NULL, k1 CHAR(3) NOT NULL, k2 CHAR(15) NOT NULL, k3 DATE NOT NULL, v1 VARCHAR," + - " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2, k3)) multi_tenant=true"); - conn.createStatement().execute("CREATE VIEW v1 AS SELECT * FROM t WHERE k1 = 'xyz'"); - conn = createTenantSpecificConnection(); - - // Query without predicate ordered by full row key - String sql = "SELECT * FROM v1 ORDER BY k2, k3"; - String expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - // Query without predicate ordered by full row key, but without column view predicate - sql = "SELECT * FROM v1 ORDER BY k2, k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - // Predicate with valid partial PK - sql = "SELECT * FROM v1 WHERE k1 = 'xyz' ORDER BY k2, k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - sql = "SELECT * FROM v1 WHERE k2 < 'abcde1234567890' ORDER BY k2, k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz',*] - ['tenant123456789','xyz','abcde1234567890']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - // Predicate with full PK - String datePredicate = createStaticDate(); - sql = "SELECT * FROM v1 WHERE k2 = '123456789012345' AND k3 < TO_DATE('" + datePredicate + "') ORDER BY k2, k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','123456789012345',*] - ['tenant123456789','xyz','123456789012345','2015-01-01 08:00:00.000']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - - // Predicate with valid partial PK - sql = "SELECT * FROM v1 WHERE k3 < TO_DATE('" + datePredicate + "') ORDER BY k2, k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']\n" + - " SERVER FILTER BY K3 < DATE '" + datePredicate + "'"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - } - - @Test - public void testOrderByOptimizedOutWithMultiplePredicatesInView() throws Exception { - // Arrange - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t(t_id CHAR(15) NOT NULL, k1 CHAR(3) NOT NULL, k2 CHAR(5) NOT NULL, k3 DATE NOT NULL, v1 VARCHAR," + - " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2, k3 DESC)) multi_tenant=true"); - conn.createStatement().execute("CREATE VIEW v1 AS SELECT * FROM t WHERE k1 = 'xyz' AND k2='abcde'"); - conn = createTenantSpecificConnection(); - - // Query without predicate ordered by full row key - String sql = "SELECT * FROM v1 ORDER BY k3 DESC"; - String expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','abcde']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - // Query without predicate ordered by full row key, but without column view predicate - sql = "SELECT * FROM v1 ORDER BY k3 DESC"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','abcde']"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - // Query with predicate ordered by full row key - sql = "SELECT * FROM v1 WHERE k3 <= TO_DATE('" + createStaticDate() + "') ORDER BY k3 DESC"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','abcde',~'2015-01-01 08:00:00.000'] - ['tenant123456789','xyz','abcde',*]"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - // Query with predicate ordered by full row key with date in reverse order - sql = "SELECT * FROM v1 WHERE k3 <= TO_DATE('" + createStaticDate() + "') ORDER BY k3"; - expectedExplainOutput = "CLIENT PARALLEL 1-WAY REVERSE RANGE SCAN OVER T ['tenant123456789','xyz','abcde',~'2015-01-01 08:00:00.000'] - ['tenant123456789','xyz','abcde',*]"; - assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); - assertOrderByHasBeenOptimizedOut(conn, sql); - - } - - - @Test - public void testViewConstantsOptimizedOut() throws Exception { - Properties props = new Properties(); - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t(t_id VARCHAR NOT NULL, k1 VARCHAR, k2 VARCHAR, v1 VARCHAR," + - " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2)) multi_tenant=true"); - - String tenantId = "me"; - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); // connection is tenant-specific - conn = DriverManager.getConnection(getUrl(), props); - conn.createStatement().execute("CREATE VIEW v(v2 VARCHAR) AS SELECT * FROM t WHERE k2 = 'a'"); - conn.createStatement().execute("CREATE INDEX i1 ON v(v2)"); - - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM v WHERE v2 > 'a' and k2 = 'a' ORDER BY v2,k2"); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-9223372036854775808,'me','a'] - [-9223372036854775808,'me',*]\n" + - " SERVER FILTER BY FIRST KEY ONLY", - QueryUtil.getExplainPlan(rs)); - - // Won't use index b/c v1 is not in index, but should optimize out k2 still from the order by - // K2 will still be referenced in the filter, as these are automatically tacked on to the where clause. - rs = conn.createStatement().executeQuery("EXPLAIN SELECT v1 FROM v WHERE v2 > 'a' ORDER BY k2"); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['me']\n" + - " SERVER FILTER BY (V2 > 'a' AND K2 = 'a')", - QueryUtil.getExplainPlan(rs)); - - // If we match K2 against a constant not equal to it's view constant, we should get a degenerate plan - rs = conn.createStatement().executeQuery("EXPLAIN SELECT v1 FROM v WHERE v2 > 'a' and k2='b' ORDER BY k2"); - assertEquals("DEGENERATE SCAN OVER V", - QueryUtil.getExplainPlan(rs)); - } - - @Test - public void testViewConstantsOptimizedOutOnReadOnlyView() throws Exception { - Properties props = new Properties(); - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t(t_id VARCHAR NOT NULL, k1 VARCHAR, k2 VARCHAR, v1 VARCHAR," + - " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2)) multi_tenant=true"); - - String tenantId = "me"; - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); // connection is tenant-specific - conn = DriverManager.getConnection(getUrl(), props); - conn.createStatement().execute("CREATE VIEW v(v2 VARCHAR) AS SELECT * FROM t WHERE k2 = 'a'"); - conn.createStatement().execute("CREATE VIEW v2(v3 VARCHAR) AS SELECT * FROM v WHERE k1 > 'a'"); - conn.createStatement().execute("CREATE INDEX i2 ON v2(v3) include(v2)"); - - // Confirm that a read-only view on an updatable view still optimizes out the read-only parts of the updatable view - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM v2 WHERE v3 > 'a' and k2 = 'a' ORDER BY v3,k2"); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-9223372036854775808,'me','a'] - [-9223372036854775808,'me',*]", - QueryUtil.getExplainPlan(rs)); - } - - //----------------------------------------------------------------- - // Private Helper Methods - //----------------------------------------------------------------- - private Connection createTenantSpecificConnection() throws SQLException { - Connection conn; - Properties props = new Properties(); - String tenantId = "tenant123456789"; - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); // connection is tenant-specific - conn = DriverManager.getConnection(getUrl(), props); - return conn; - } - - - private void assertExplainPlanIsCorrect(Connection conn, String sql, - String expectedExplainOutput) throws SQLException { - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql); - assertEquals(expectedExplainOutput, QueryUtil.getExplainPlan(rs)); - } - - private void assertOrderByHasBeenOptimizedOut(Connection conn, String sql) throws SQLException { - PreparedStatement stmt = conn.prepareStatement(sql); - QueryPlan plan = PhoenixRuntime.getOptimizedQueryPlan(stmt); - assertEquals(0, plan.getOrderBy().getOrderByExpressions().size()); - } - - /** - * Returns the default String representation of 1/1/2015 00:00:00 - */ - private String createStaticDate() { - Calendar cal = Calendar.getInstance(); - cal.set(Calendar.DAY_OF_YEAR, 1); - cal.set(Calendar.YEAR, 2015); - cal.set(Calendar.HOUR_OF_DAY, 0); - cal.set(Calendar.MINUTE, 0); - cal.set(Calendar.SECOND, 0); - cal.set(Calendar.MILLISECOND, 0); - cal.setTimeZone(TimeZone.getTimeZone("America/Los_Angeles")); - return DateUtil.DEFAULT_DATE_FORMATTER.format(cal.getTime()); - } - + @Test + public void testOrderByOptimizedOut() throws Exception { + Properties props = new Properties(); + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t(t_id VARCHAR NOT NULL, k1 VARCHAR, k2 VARCHAR, v1 VARCHAR," + + " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2)) multi_tenant=true"); + + String tenantId = "me"; + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); // connection is tenant-specific + conn = DriverManager.getConnection(getUrl(), props); + conn.createStatement().execute("CREATE VIEW v(v2 VARCHAR) AS SELECT * FROM t WHERE k1 = 'a'"); + conn.createStatement().execute("CREATE INDEX i1 ON v(v2) INCLUDE(v1)"); + + ResultSet rs = + conn.createStatement().executeQuery("EXPLAIN SELECT v1,v2 FROM v WHERE v2 > 'a' ORDER BY v2"); + assertEquals( + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-9223372036854775808,'me','a'] - [-9223372036854775808,'me',*]", + QueryUtil.getExplainPlan(rs)); + } + + @Test + public void testOrderByOptimizedOutWithoutPredicateInView() throws Exception { + + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t(t_id CHAR(15) NOT NULL, k1 CHAR(3) NOT NULL, k2 CHAR(15) NOT NULL, k3 DATE NOT NULL, v1 VARCHAR," + + " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2, k3)) multi_tenant=true"); + conn.createStatement().execute("CREATE VIEW v1 AS SELECT * FROM t"); + + conn = createTenantSpecificConnection(); + + // Query without predicate ordered by full row key + String sql = "SELECT * FROM v1 ORDER BY k1, k2, k3"; + String expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + // Predicate with valid partial PK + sql = "SELECT * FROM v1 WHERE k1 = 'xyz' ORDER BY k1, k2, k3"; + expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + sql = "SELECT * FROM v1 WHERE k1 > 'xyz' ORDER BY k1, k2, k3"; + expectedExplainOutput = + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xy{'] - ['tenant123456789',*]"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + String datePredicate = createStaticDate(); + sql = "SELECT * FROM v1 WHERE k1 = 'xyz' AND k2 = '123456789012345' AND k3 < TO_DATE('" + + datePredicate + "') ORDER BY k1, k2, k3"; + expectedExplainOutput = + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','123456789012345',*] - ['tenant123456789','xyz','123456789012345','2015-01-01 08:00:00.000']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + // Predicate without valid partial PK + sql = "SELECT * FROM v1 WHERE k2 < 'abcde1234567890' ORDER BY k1, k2, k3"; + expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789']\n" + + " SERVER FILTER BY K2 < 'abcde1234567890'"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + } + + @Test + public void testOrderByOptimizedOutWithPredicateInView() throws Exception { + // Arrange + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t(t_id CHAR(15) NOT NULL, k1 CHAR(3) NOT NULL, k2 CHAR(15) NOT NULL, k3 DATE NOT NULL, v1 VARCHAR," + + " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2, k3)) multi_tenant=true"); + conn.createStatement().execute("CREATE VIEW v1 AS SELECT * FROM t WHERE k1 = 'xyz'"); + conn = createTenantSpecificConnection(); + + // Query without predicate ordered by full row key + String sql = "SELECT * FROM v1 ORDER BY k2, k3"; + String expectedExplainOutput = + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + // Query without predicate ordered by full row key, but without column view predicate + sql = "SELECT * FROM v1 ORDER BY k2, k3"; + expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + // Predicate with valid partial PK + sql = "SELECT * FROM v1 WHERE k1 = 'xyz' ORDER BY k2, k3"; + expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + sql = "SELECT * FROM v1 WHERE k2 < 'abcde1234567890' ORDER BY k2, k3"; + expectedExplainOutput = + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz',*] - ['tenant123456789','xyz','abcde1234567890']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + // Predicate with full PK + String datePredicate = createStaticDate(); + sql = "SELECT * FROM v1 WHERE k2 = '123456789012345' AND k3 < TO_DATE('" + datePredicate + + "') ORDER BY k2, k3"; + expectedExplainOutput = + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','123456789012345',*] - ['tenant123456789','xyz','123456789012345','2015-01-01 08:00:00.000']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + // Predicate with valid partial PK + sql = "SELECT * FROM v1 WHERE k3 < TO_DATE('" + datePredicate + "') ORDER BY k2, k3"; + expectedExplainOutput = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz']\n" + + " SERVER FILTER BY K3 < DATE '" + datePredicate + "'"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + } + + @Test + public void testOrderByOptimizedOutWithMultiplePredicatesInView() throws Exception { + // Arrange + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t(t_id CHAR(15) NOT NULL, k1 CHAR(3) NOT NULL, k2 CHAR(5) NOT NULL, k3 DATE NOT NULL, v1 VARCHAR," + + " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2, k3 DESC)) multi_tenant=true"); + conn.createStatement() + .execute("CREATE VIEW v1 AS SELECT * FROM t WHERE k1 = 'xyz' AND k2='abcde'"); + conn = createTenantSpecificConnection(); + + // Query without predicate ordered by full row key + String sql = "SELECT * FROM v1 ORDER BY k3 DESC"; + String expectedExplainOutput = + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','abcde']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + // Query without predicate ordered by full row key, but without column view predicate + sql = "SELECT * FROM v1 ORDER BY k3 DESC"; + expectedExplainOutput = + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','abcde']"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + // Query with predicate ordered by full row key + sql = "SELECT * FROM v1 WHERE k3 <= TO_DATE('" + createStaticDate() + "') ORDER BY k3 DESC"; + expectedExplainOutput = + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['tenant123456789','xyz','abcde',~'2015-01-01 08:00:00.000'] - ['tenant123456789','xyz','abcde',*]"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + // Query with predicate ordered by full row key with date in reverse order + sql = "SELECT * FROM v1 WHERE k3 <= TO_DATE('" + createStaticDate() + "') ORDER BY k3"; + expectedExplainOutput = + "CLIENT PARALLEL 1-WAY REVERSE RANGE SCAN OVER T ['tenant123456789','xyz','abcde',~'2015-01-01 08:00:00.000'] - ['tenant123456789','xyz','abcde',*]"; + assertExplainPlanIsCorrect(conn, sql, expectedExplainOutput); + assertOrderByHasBeenOptimizedOut(conn, sql); + + } + + @Test + public void testViewConstantsOptimizedOut() throws Exception { + Properties props = new Properties(); + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t(t_id VARCHAR NOT NULL, k1 VARCHAR, k2 VARCHAR, v1 VARCHAR," + + " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2)) multi_tenant=true"); + + String tenantId = "me"; + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); // connection is tenant-specific + conn = DriverManager.getConnection(getUrl(), props); + conn.createStatement().execute("CREATE VIEW v(v2 VARCHAR) AS SELECT * FROM t WHERE k2 = 'a'"); + conn.createStatement().execute("CREATE INDEX i1 ON v(v2)"); + + ResultSet rs = conn.createStatement() + .executeQuery("EXPLAIN SELECT v2 FROM v WHERE v2 > 'a' and k2 = 'a' ORDER BY v2,k2"); + assertEquals( + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-9223372036854775808,'me','a'] - [-9223372036854775808,'me',*]\n" + + " SERVER FILTER BY FIRST KEY ONLY", + QueryUtil.getExplainPlan(rs)); + + // Won't use index b/c v1 is not in index, but should optimize out k2 still from the order by + // K2 will still be referenced in the filter, as these are automatically tacked on to the where + // clause. + rs = conn.createStatement().executeQuery("EXPLAIN SELECT v1 FROM v WHERE v2 > 'a' ORDER BY k2"); + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['me']\n" + + " SERVER FILTER BY (V2 > 'a' AND K2 = 'a')", QueryUtil.getExplainPlan(rs)); + + // If we match K2 against a constant not equal to it's view constant, we should get a degenerate + // plan + rs = conn.createStatement() + .executeQuery("EXPLAIN SELECT v1 FROM v WHERE v2 > 'a' and k2='b' ORDER BY k2"); + assertEquals("DEGENERATE SCAN OVER V", QueryUtil.getExplainPlan(rs)); + } + + @Test + public void testViewConstantsOptimizedOutOnReadOnlyView() throws Exception { + Properties props = new Properties(); + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t(t_id VARCHAR NOT NULL, k1 VARCHAR, k2 VARCHAR, v1 VARCHAR," + + " CONSTRAINT pk PRIMARY KEY(t_id, k1, k2)) multi_tenant=true"); + + String tenantId = "me"; + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); // connection is tenant-specific + conn = DriverManager.getConnection(getUrl(), props); + conn.createStatement().execute("CREATE VIEW v(v2 VARCHAR) AS SELECT * FROM t WHERE k2 = 'a'"); + conn.createStatement().execute("CREATE VIEW v2(v3 VARCHAR) AS SELECT * FROM v WHERE k1 > 'a'"); + conn.createStatement().execute("CREATE INDEX i2 ON v2(v3) include(v2)"); + + // Confirm that a read-only view on an updatable view still optimizes out the read-only parts of + // the updatable view + ResultSet rs = conn.createStatement() + .executeQuery("EXPLAIN SELECT v2 FROM v2 WHERE v3 > 'a' and k2 = 'a' ORDER BY v3,k2"); + assertEquals( + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_T [-9223372036854775808,'me','a'] - [-9223372036854775808,'me',*]", + QueryUtil.getExplainPlan(rs)); + } + + // ----------------------------------------------------------------- + // Private Helper Methods + // ----------------------------------------------------------------- + private Connection createTenantSpecificConnection() throws SQLException { + Connection conn; + Properties props = new Properties(); + String tenantId = "tenant123456789"; + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); // connection is tenant-specific + conn = DriverManager.getConnection(getUrl(), props); + return conn; + } + + private void assertExplainPlanIsCorrect(Connection conn, String sql, String expectedExplainOutput) + throws SQLException { + ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql); + assertEquals(expectedExplainOutput, QueryUtil.getExplainPlan(rs)); + } + + private void assertOrderByHasBeenOptimizedOut(Connection conn, String sql) throws SQLException { + PreparedStatement stmt = conn.prepareStatement(sql); + QueryPlan plan = PhoenixRuntime.getOptimizedQueryPlan(stmt); + assertEquals(0, plan.getOrderBy().getOrderByExpressions().size()); + } + + /** + * Returns the default String representation of 1/1/2015 00:00:00 + */ + private String createStaticDate() { + Calendar cal = Calendar.getInstance(); + cal.set(Calendar.DAY_OF_YEAR, 1); + cal.set(Calendar.YEAR, 2015); + cal.set(Calendar.HOUR_OF_DAY, 0); + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); + cal.setTimeZone(TimeZone.getTimeZone("America/Los_Angeles")); + return DateUtil.DEFAULT_DATE_FORMATTER.format(cal.getTime()); + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/ViewCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/ViewCompilerTest.java index 784c682226f..0a9c3758289 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/ViewCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/ViewCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,94 +28,101 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.query.BaseConnectionlessQueryTest; -import org.apache.phoenix.schema.ColumnNotFoundException; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTable.ViewType; import org.apache.phoenix.schema.PTableKey; -import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.PropertiesUtil; import org.junit.Ignore; import org.junit.Test; public class ViewCompilerTest extends BaseConnectionlessQueryTest { - @Test - @Ignore("PHOENIX-4555 should mark these views as ViewType.READONLY") - public void testViewTypeCalculation() throws Exception { - assertViewType(new String[] {"V1","V2","V3","V4"}, new String[] { - "CREATE VIEW v1 AS SELECT * FROM t WHERE k1 = 1 AND k2 = 'foo'", - "CREATE VIEW v2 AS SELECT * FROM t WHERE k2 = 'foo'", - "CREATE VIEW v3 AS SELECT * FROM t WHERE v = 'bar'||'bas'", - "CREATE VIEW v4 AS SELECT * FROM t WHERE 'bar'=v and 5+3/2 = k1", - }, ViewType.UPDATABLE); - assertViewType(new String[] {"V1","V2","V3","V4"}, new String[] { - "CREATE VIEW v1 AS SELECT * FROM t WHERE k1 < 1 AND k2 = 'foo'", - "CREATE VIEW v2 AS SELECT * FROM t WHERE substr(k2,0,3) = 'foo'", - "CREATE VIEW v3 AS SELECT * FROM t WHERE v = TO_CHAR(CURRENT_DATE())", - "CREATE VIEW v4 AS SELECT * FROM t WHERE 'bar'=v or 3 = k1", - }, ViewType.READ_ONLY); + @Test + @Ignore("PHOENIX-4555 should mark these views as ViewType.READONLY") + public void testViewTypeCalculation() throws Exception { + assertViewType(new String[] { "V1", "V2", "V3", "V4" }, + new String[] { "CREATE VIEW v1 AS SELECT * FROM t WHERE k1 = 1 AND k2 = 'foo'", + "CREATE VIEW v2 AS SELECT * FROM t WHERE k2 = 'foo'", + "CREATE VIEW v3 AS SELECT * FROM t WHERE v = 'bar'||'bas'", + "CREATE VIEW v4 AS SELECT * FROM t WHERE 'bar'=v and 5+3/2 = k1", }, + ViewType.UPDATABLE); + assertViewType(new String[] { "V1", "V2", "V3", "V4" }, + new String[] { "CREATE VIEW v1 AS SELECT * FROM t WHERE k1 < 1 AND k2 = 'foo'", + "CREATE VIEW v2 AS SELECT * FROM t WHERE substr(k2,0,3) = 'foo'", + "CREATE VIEW v3 AS SELECT * FROM t WHERE v = TO_CHAR(CURRENT_DATE())", + "CREATE VIEW v4 AS SELECT * FROM t WHERE 'bar'=v or 3 = k1", }, + ViewType.READ_ONLY); + } + + public void assertViewType(String[] viewNames, String[] viewDDLs, ViewType viewType) + throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + PhoenixConnection conn = + DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class); + String ct = + "CREATE TABLE t (k1 INTEGER NOT NULL, k2 VARCHAR, v VARCHAR, CONSTRAINT pk PRIMARY KEY (k1,k2))"; + conn.createStatement().execute(ct); + + for (String viewDDL : viewDDLs) { + conn.createStatement().execute(viewDDL); } - - public void assertViewType(String[] viewNames, String[] viewDDLs, ViewType viewType) throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class); - String ct = "CREATE TABLE t (k1 INTEGER NOT NULL, k2 VARCHAR, v VARCHAR, CONSTRAINT pk PRIMARY KEY (k1,k2))"; - conn.createStatement().execute(ct); - - for (String viewDDL : viewDDLs) { - conn.createStatement().execute(viewDDL); - } - - StringBuilder buf = new StringBuilder(); - int count = 0; - for (String view : viewNames) { - PTable table = conn.getTable(new PTableKey(null, view)); - assertEquals(viewType, table.getViewType()); - conn.createStatement().execute("DROP VIEW " + table.getName().getString()); - buf.append(' '); - buf.append(table.getName().getString()); - count++; - } - assertEquals("Expected " + viewDDLs.length + ", but got " + count + ":"+ buf.toString(), viewDDLs.length, count); + + StringBuilder buf = new StringBuilder(); + int count = 0; + for (String view : viewNames) { + PTable table = conn.getTable(new PTableKey(null, view)); + assertEquals(viewType, table.getViewType()); + conn.createStatement().execute("DROP VIEW " + table.getName().getString()); + buf.append(' '); + buf.append(table.getName().getString()); + count++; } + assertEquals("Expected " + viewDDLs.length + ", but got " + count + ":" + buf.toString(), + viewDDLs.length, count); + } - @Test - @Ignore("PHOENIX-4555 should mark these views as ViewType.READONLY") - public void testViewInvalidation() throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class); - String ct = "CREATE TABLE s1.t (k1 INTEGER NOT NULL, k2 VARCHAR, v VARCHAR, CONSTRAINT pk PRIMARY KEY (k1,k2))"; - conn.createStatement().execute(ct); - conn.createStatement().execute("CREATE VIEW s2.v3 AS SELECT * FROM s1.t WHERE v = 'bar'"); - - try { - conn.createStatement().execute("ALTER VIEW s2.v3 DROP COLUMN v"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CANNOT_DROP_VIEW_REFERENCED_COL.getErrorCode(), e.getErrorCode()); - } - - // No error, as v still exists - conn.createStatement().executeQuery("SELECT v FROM s2.v3"); - conn.createStatement().execute("CREATE VIEW s2.v4 AS SELECT * FROM s1.t WHERE v = 'bas'"); + @Test + @Ignore("PHOENIX-4555 should mark these views as ViewType.READONLY") + public void testViewInvalidation() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + PhoenixConnection conn = + DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class); + String ct = + "CREATE TABLE s1.t (k1 INTEGER NOT NULL, k2 VARCHAR, v VARCHAR, CONSTRAINT pk PRIMARY KEY (k1,k2))"; + conn.createStatement().execute(ct); + conn.createStatement().execute("CREATE VIEW s2.v3 AS SELECT * FROM s1.t WHERE v = 'bar'"); - // Can drop view - conn.createStatement().execute("DROP VIEW s2.v3"); + try { + conn.createStatement().execute("ALTER VIEW s2.v3 DROP COLUMN v"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CANNOT_DROP_VIEW_REFERENCED_COL.getErrorCode(), + e.getErrorCode()); } + // No error, as v still exists + conn.createStatement().executeQuery("SELECT v FROM s2.v3"); + conn.createStatement().execute("CREATE VIEW s2.v4 AS SELECT * FROM s1.t WHERE v = 'bas'"); + + // Can drop view + conn.createStatement().execute("DROP VIEW s2.v3"); + } + + @Test + public void testInvalidUpsertSelect() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + PhoenixConnection conn = + DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class); + conn.createStatement().execute( + "CREATE TABLE t1 (k1 INTEGER NOT NULL, k2 VARCHAR, v VARCHAR, CONSTRAINT pk PRIMARY KEY (k1,k2))"); + conn.createStatement() + .execute("CREATE TABLE t2 (k3 INTEGER NOT NULL, v VARCHAR, CONSTRAINT pk PRIMARY KEY (k3))"); + conn.createStatement().execute("CREATE VIEW v1 AS SELECT * FROM t1 WHERE k1 = 1"); - @Test - public void testInvalidUpsertSelect() throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class); - conn.createStatement().execute("CREATE TABLE t1 (k1 INTEGER NOT NULL, k2 VARCHAR, v VARCHAR, CONSTRAINT pk PRIMARY KEY (k1,k2))"); - conn.createStatement().execute("CREATE TABLE t2 (k3 INTEGER NOT NULL, v VARCHAR, CONSTRAINT pk PRIMARY KEY (k3))"); - conn.createStatement().execute("CREATE VIEW v1 AS SELECT * FROM t1 WHERE k1 = 1"); - - try { - conn.createStatement().executeUpdate("UPSERT INTO v1 SELECT k3,'foo',v FROM t2"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN.getErrorCode(), e.getErrorCode()); - } + try { + conn.createStatement().executeUpdate("UPSERT INTO v1 SELECT k3,'foo',v FROM t2"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN.getErrorCode(), e.getErrorCode()); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java index e1b68a952b6..89354f00732 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,7 +39,6 @@ import static org.junit.Assert.fail; import java.math.BigDecimal; -import java.sql.Connection; import java.sql.Date; import java.sql.DriverManager; import java.sql.SQLException; @@ -79,1087 +78,1107 @@ import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.NumberUtil; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.StringUtil; -import org.apache.phoenix.util.TestUtil; import org.junit.Assert; import org.junit.Ignore; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; - - public class WhereCompilerTest extends BaseConnectionlessQueryTest { - private PhoenixPreparedStatement newPreparedStatement(PhoenixConnection pconn, String query) throws SQLException { - PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); - assertRoundtrip(query); - return pstmt; - } - - @Test - public void testSingleEqualFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertEquals( - singleKVFilter(constantComparison( - CompareOperator.EQUAL, - A_INTEGER, - 0)), - filter); - } - - @Test - public void testOrPKWithAndPKAndNotPK() throws SQLException { - String query = "select * from bugTable where ID = 'i1' or (ID = 'i2' and company = 'c3')"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - pconn.createStatement().execute("create table bugTable(ID varchar primary key,company varchar)"); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - Expression idExpression = new ColumnRef(plan.getTableRef(), plan.getTableRef().getTable().getColumnForColumnName("ID").getPosition()).newColumnExpression(); - Expression id = new RowKeyColumnExpression(idExpression,new RowKeyValueAccessor(plan.getTableRef().getTable().getPKColumns(),0)); - Expression company = new KeyValueColumnExpression(plan.getTableRef().getTable().getColumnForColumnName("COMPANY")); - // FilterList has no equals implementation - assertTrue(filter instanceof FilterList); - FilterList filterList = (FilterList)filter; - assertEquals(FilterList.Operator.MUST_PASS_ALL, filterList.getOperator()); - assertEquals( - Arrays.asList( - new SkipScanFilter( - ImmutableList.of(Arrays.asList( - pointRange("i1"), - pointRange("i2"))), - SchemaUtil.VAR_BINARY_SCHEMA, false), - singleKVFilter( - or(constantComparison(CompareOperator.EQUAL,id,"i1"), - and(constantComparison(CompareOperator.EQUAL,id,"i2"), - constantComparison(CompareOperator.EQUAL,company,"c3"))))), - filterList.getFilters()); - } - - @Test - public void testAndPKAndNotPK() throws SQLException { - String query = "select * from bugTable where ID = 'i2' and company = 'c3'"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - pconn.createStatement().execute("create table bugTable(ID varchar primary key,company varchar)"); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - PColumn column = plan.getTableRef().getTable().getColumnForColumnName("COMPANY"); - assertEquals( - singleKVFilter(constantComparison( - CompareOperator.EQUAL, - new KeyValueColumnExpression(column), - "c3")), - filter); - } - - @Test - public void testSingleFixedFullPkSalted() throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - pconn.createStatement().execute("CREATE TABLE t (k bigint not null primary key, v varchar) SALT_BUCKETS=20"); - String query = "select * from t where k=" + 1; - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - byte[] key = new byte[PLong.INSTANCE.getByteSize() + 1]; - PLong.INSTANCE.toBytes(1L, key, 1); - key[0] = SaltingUtil.getSaltingByte(key, 1, PLong.INSTANCE.getByteSize(), 20); - byte[] expectedStartKey = key; - byte[] expectedEndKey = ByteUtil.nextKey(key); - byte[] startKey = scan.getStartRow(); - byte[] stopKey = scan.getStopRow(); - assertArrayEquals(expectedStartKey, startKey); - assertArrayEquals(expectedEndKey, stopKey); - } - - @Test - public void testSingleVariableFullPkSalted() throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - pconn.createStatement().execute("CREATE TABLE t (k varchar(10) primary key, v varchar) SALT_BUCKETS=20"); - String query = "select * from t where k='a'"; - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - byte[] key = new byte[2]; - PVarchar.INSTANCE.toBytes("a", key, 1); - key[0] = SaltingUtil.getSaltingByte(key, 1, 1, 20); - byte[] expectedStartKey = key; - //lexicographically this is the next PK - byte[] expectedEndKey = ByteUtil.concat(key,new byte[]{0}); - byte[] startKey = scan.getStartRow(); - byte[] stopKey = scan.getStopRow(); - assertTrue(Bytes.compareTo(expectedStartKey, startKey) == 0); - assertTrue(Bytes.compareTo(expectedEndKey, stopKey) == 0); - } - - @Test - public void testMultiFixedFullPkSalted() throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - pconn.createStatement().execute("CREATE TABLE t (k bigint not null primary key, v varchar) SALT_BUCKETS=20"); - String query = "select * from t where k in (1,3)"; - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - byte[] key = new byte[PLong.INSTANCE.getByteSize() + 1]; - PLong.INSTANCE.toBytes(1L, key, 1); - key[0] = SaltingUtil.getSaltingByte(key, 1, PLong.INSTANCE.getByteSize(), 20); - byte[] startKey1 = key; - - key = new byte[PLong.INSTANCE.getByteSize() + 1]; - PLong.INSTANCE.toBytes(3L, key, 1); - key[0] = SaltingUtil.getSaltingByte(key, 1, PLong.INSTANCE.getByteSize(), 20); - byte[] startKey2 = key; - - byte[] startKey = scan.getStartRow(); - byte[] stopKey = scan.getStopRow(); - - // Due to salting byte, the 1 key may be after the 3 key - byte[] expectedStartKey; - byte[] expectedEndKey; - List> expectedRanges = Collections.singletonList( - Arrays.asList(KeyRange.getKeyRange(startKey1), - KeyRange.getKeyRange(startKey2))); - if (Bytes.compareTo(startKey1, startKey2) > 0) { - expectedStartKey = startKey2; - expectedEndKey = startKey1; - Collections.reverse(expectedRanges.get(0)); - } else { - expectedStartKey = startKey1; - expectedEndKey = startKey2; - } - assertEquals(0,startKey.length); - assertEquals(0,stopKey.length); - - assertNotNull(filter); - assertTrue(filter instanceof SkipScanFilter); - SkipScanFilter skipScanFilter = (SkipScanFilter)filter; - assertEquals(1,skipScanFilter.getSlots().size()); - assertEquals(2,skipScanFilter.getSlots().get(0).size()); - assertArrayEquals(expectedStartKey, skipScanFilter.getSlots().get(0).get(0).getLowerRange()); - assertArrayEquals(expectedEndKey, skipScanFilter.getSlots().get(0).get(1).getLowerRange()); - StatementContext context = plan.getContext(); - ScanRanges scanRanges = context.getScanRanges(); - List> ranges = scanRanges.getRanges(); - assertEquals(expectedRanges, ranges); - } - - @Test - public void testMultiColumnEqualFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_string=b_string"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertEquals( - multiEncodedKVFilter(columnComparison( - CompareOperator.EQUAL, - A_STRING, - B_STRING), TWO_BYTE_QUALIFIERS), - filter); - } - - @Test - public void testCollapseFunctionToNull() throws SQLException { - String query = "select * from atable where substr(entity_id,null) = 'foo'"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - - assertArrayEquals(scan.getStartRow(),KeyRange.EMPTY_RANGE.getLowerRange()); - assertArrayEquals(scan.getStopRow(),KeyRange.EMPTY_RANGE.getUpperRange()); - } - - @Test - public void testAndFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id=? and a_integer=0 and a_string='foo'"; - List binds = Arrays.asList(tenantId); - - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - bindParams(pstmt, binds); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - - assertEquals( - multiEncodedKVFilter(and( - constantComparison( - CompareOperator.EQUAL, - A_INTEGER, - 0), - constantComparison( - CompareOperator.EQUAL, - A_STRING, - "foo")), TWO_BYTE_QUALIFIERS), - filter); - } - - @Test - public void testRHSLiteral() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and 0 >= a_integer"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - - Filter filter = scan.getFilter(); - assertEquals( - singleKVFilter(constantComparison( - CompareOperator.LESS_OR_EQUAL, - A_INTEGER, - 0)), - filter); - } - - @Test - public void testToDateFilter() throws Exception { - String tenantId = "000000000000001"; - String dateStr = "2012-01-01 12:00:00"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_date >= to_date('" + dateStr + "')"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - - Date date = DateUtil.parseDate(dateStr); - - assertEquals( - singleKVFilter(constantComparison( - CompareOperator.GREATER_OR_EQUAL, - A_DATE, - date)), - filter); + private PhoenixPreparedStatement newPreparedStatement(PhoenixConnection pconn, String query) + throws SQLException { + PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); + assertRoundtrip(query); + return pstmt; + } + + @Test + public void testSingleEqualFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertEquals(singleKVFilter(constantComparison(CompareOperator.EQUAL, A_INTEGER, 0)), filter); + } + + @Test + public void testOrPKWithAndPKAndNotPK() throws SQLException { + String query = "select * from bugTable where ID = 'i1' or (ID = 'i2' and company = 'c3')"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + pconn.createStatement() + .execute("create table bugTable(ID varchar primary key,company varchar)"); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + Expression idExpression = new ColumnRef(plan.getTableRef(), + plan.getTableRef().getTable().getColumnForColumnName("ID").getPosition()) + .newColumnExpression(); + Expression id = new RowKeyColumnExpression(idExpression, + new RowKeyValueAccessor(plan.getTableRef().getTable().getPKColumns(), 0)); + Expression company = + new KeyValueColumnExpression(plan.getTableRef().getTable().getColumnForColumnName("COMPANY")); + // FilterList has no equals implementation + assertTrue(filter instanceof FilterList); + FilterList filterList = (FilterList) filter; + assertEquals(FilterList.Operator.MUST_PASS_ALL, filterList.getOperator()); + assertEquals( + Arrays + .asList( + new SkipScanFilter(ImmutableList.of(Arrays.asList(pointRange("i1"), pointRange("i2"))), + SchemaUtil.VAR_BINARY_SCHEMA, false), + singleKVFilter(or(constantComparison(CompareOperator.EQUAL, id, "i1"), + and(constantComparison(CompareOperator.EQUAL, id, "i2"), + constantComparison(CompareOperator.EQUAL, company, "c3"))))), + filterList.getFilters()); + } + + @Test + public void testAndPKAndNotPK() throws SQLException { + String query = "select * from bugTable where ID = 'i2' and company = 'c3'"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + pconn.createStatement() + .execute("create table bugTable(ID varchar primary key,company varchar)"); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + PColumn column = plan.getTableRef().getTable().getColumnForColumnName("COMPANY"); + assertEquals( + singleKVFilter( + constantComparison(CompareOperator.EQUAL, new KeyValueColumnExpression(column), "c3")), + filter); + } + + @Test + public void testSingleFixedFullPkSalted() throws SQLException { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + pconn.createStatement() + .execute("CREATE TABLE t (k bigint not null primary key, v varchar) SALT_BUCKETS=20"); + String query = "select * from t where k=" + 1; + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + byte[] key = new byte[PLong.INSTANCE.getByteSize() + 1]; + PLong.INSTANCE.toBytes(1L, key, 1); + key[0] = SaltingUtil.getSaltingByte(key, 1, PLong.INSTANCE.getByteSize(), 20); + byte[] expectedStartKey = key; + byte[] expectedEndKey = ByteUtil.nextKey(key); + byte[] startKey = scan.getStartRow(); + byte[] stopKey = scan.getStopRow(); + assertArrayEquals(expectedStartKey, startKey); + assertArrayEquals(expectedEndKey, stopKey); + } + + @Test + public void testSingleVariableFullPkSalted() throws SQLException { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + pconn.createStatement() + .execute("CREATE TABLE t (k varchar(10) primary key, v varchar) SALT_BUCKETS=20"); + String query = "select * from t where k='a'"; + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + byte[] key = new byte[2]; + PVarchar.INSTANCE.toBytes("a", key, 1); + key[0] = SaltingUtil.getSaltingByte(key, 1, 1, 20); + byte[] expectedStartKey = key; + // lexicographically this is the next PK + byte[] expectedEndKey = ByteUtil.concat(key, new byte[] { 0 }); + byte[] startKey = scan.getStartRow(); + byte[] stopKey = scan.getStopRow(); + assertTrue(Bytes.compareTo(expectedStartKey, startKey) == 0); + assertTrue(Bytes.compareTo(expectedEndKey, stopKey) == 0); + } + + @Test + public void testMultiFixedFullPkSalted() throws SQLException { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + pconn.createStatement() + .execute("CREATE TABLE t (k bigint not null primary key, v varchar) SALT_BUCKETS=20"); + String query = "select * from t where k in (1,3)"; + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + byte[] key = new byte[PLong.INSTANCE.getByteSize() + 1]; + PLong.INSTANCE.toBytes(1L, key, 1); + key[0] = SaltingUtil.getSaltingByte(key, 1, PLong.INSTANCE.getByteSize(), 20); + byte[] startKey1 = key; + + key = new byte[PLong.INSTANCE.getByteSize() + 1]; + PLong.INSTANCE.toBytes(3L, key, 1); + key[0] = SaltingUtil.getSaltingByte(key, 1, PLong.INSTANCE.getByteSize(), 20); + byte[] startKey2 = key; + + byte[] startKey = scan.getStartRow(); + byte[] stopKey = scan.getStopRow(); + + // Due to salting byte, the 1 key may be after the 3 key + byte[] expectedStartKey; + byte[] expectedEndKey; + List> expectedRanges = Collections.singletonList( + Arrays.asList(KeyRange.getKeyRange(startKey1), KeyRange.getKeyRange(startKey2))); + if (Bytes.compareTo(startKey1, startKey2) > 0) { + expectedStartKey = startKey2; + expectedEndKey = startKey1; + Collections.reverse(expectedRanges.get(0)); + } else { + expectedStartKey = startKey1; + expectedEndKey = startKey2; } - - private void helpTestToNumberFilter(String toNumberClause, BigDecimal expectedDecimal) throws Exception { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and x_decimal >= " + toNumberClause; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - - assertEquals( - singleKVFilter(constantComparison( - CompareOperator.GREATER_OR_EQUAL, - X_DECIMAL, - expectedDecimal)), - filter); -} - - private void helpTestToNumberFilterWithNoPattern(String stringValue) throws Exception { - String toNumberClause = "to_number('" + stringValue + "')"; - BigDecimal expectedDecimal = NumberUtil.normalize(new BigDecimal(stringValue)); - helpTestToNumberFilter(toNumberClause, expectedDecimal); + assertEquals(0, startKey.length); + assertEquals(0, stopKey.length); + + assertNotNull(filter); + assertTrue(filter instanceof SkipScanFilter); + SkipScanFilter skipScanFilter = (SkipScanFilter) filter; + assertEquals(1, skipScanFilter.getSlots().size()); + assertEquals(2, skipScanFilter.getSlots().get(0).size()); + assertArrayEquals(expectedStartKey, skipScanFilter.getSlots().get(0).get(0).getLowerRange()); + assertArrayEquals(expectedEndKey, skipScanFilter.getSlots().get(0).get(1).getLowerRange()); + StatementContext context = plan.getContext(); + ScanRanges scanRanges = context.getScanRanges(); + List> ranges = scanRanges.getRanges(); + assertEquals(expectedRanges, ranges); + } + + @Test + public void testMultiColumnEqualFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id='" + tenantId + "' and a_string=b_string"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertEquals(multiEncodedKVFilter(columnComparison(CompareOperator.EQUAL, A_STRING, B_STRING), + TWO_BYTE_QUALIFIERS), filter); + } + + @Test + public void testCollapseFunctionToNull() throws SQLException { + String query = "select * from atable where substr(entity_id,null) = 'foo'"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + + assertArrayEquals(scan.getStartRow(), KeyRange.EMPTY_RANGE.getLowerRange()); + assertArrayEquals(scan.getStopRow(), KeyRange.EMPTY_RANGE.getUpperRange()); + } + + @Test + public void testAndFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id=? and a_integer=0 and a_string='foo'"; + List binds = Arrays. asList(tenantId); + + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + bindParams(pstmt, binds); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + + assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOperator.EQUAL, A_INTEGER, 0), + constantComparison(CompareOperator.EQUAL, A_STRING, "foo")), TWO_BYTE_QUALIFIERS), filter); + } + + @Test + public void testRHSLiteral() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id='" + tenantId + "' and 0 >= a_integer"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + + Filter filter = scan.getFilter(); + assertEquals(singleKVFilter(constantComparison(CompareOperator.LESS_OR_EQUAL, A_INTEGER, 0)), + filter); + } + + @Test + public void testToDateFilter() throws Exception { + String tenantId = "000000000000001"; + String dateStr = "2012-01-01 12:00:00"; + String query = "select * from atable where organization_id='" + tenantId + + "' and a_date >= to_date('" + dateStr + "')"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + + Date date = DateUtil.parseDate(dateStr); + + assertEquals(singleKVFilter(constantComparison(CompareOperator.GREATER_OR_EQUAL, A_DATE, date)), + filter); + } + + private void helpTestToNumberFilter(String toNumberClause, BigDecimal expectedDecimal) + throws Exception { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "' and x_decimal >= " + + toNumberClause; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + + assertEquals(singleKVFilter( + constantComparison(CompareOperator.GREATER_OR_EQUAL, X_DECIMAL, expectedDecimal)), filter); + } + + private void helpTestToNumberFilterWithNoPattern(String stringValue) throws Exception { + String toNumberClause = "to_number('" + stringValue + "')"; + BigDecimal expectedDecimal = NumberUtil.normalize(new BigDecimal(stringValue)); + helpTestToNumberFilter(toNumberClause, expectedDecimal); + } + + @Test + public void testToNumberFilterWithInteger() throws Exception { + String stringValue = "123"; + helpTestToNumberFilterWithNoPattern(stringValue); + } + + @Test + public void testToNumberFilterWithDecimal() throws Exception { + String stringValue = "123.33"; + helpTestToNumberFilterWithNoPattern(stringValue); + } + + @Test + public void testToNumberFilterWithNegativeDecimal() throws Exception { + String stringValue = "-123.33"; + helpTestToNumberFilterWithNoPattern(stringValue); + } + + @Test + public void testToNumberFilterWithPatternParam() throws Exception { + String toNumberClause = "to_number('!1.23333E2', '!0.00000E0')"; + BigDecimal expectedDecimal = NumberUtil.normalize(new BigDecimal("123.333")); + helpTestToNumberFilter(toNumberClause, expectedDecimal); + } + + @Test(expected = AssertionError.class) // compileStatement() fails because zero rows are found by + // to_number() + public void testToNumberFilterWithPatternParamNegativeTest() throws Exception { + String toNumberClause = "to_number('$123.33', '000.00')"; // no currency sign in pattern param + BigDecimal expectedDecimal = NumberUtil.normalize(new BigDecimal("123.33")); + helpTestToNumberFilter(toNumberClause, expectedDecimal); + } + + @Test + public void testRowKeyFilter() throws SQLException { + String keyPrefix = "foo"; + String query = "select * from atable where substr(entity_id,1,3)=?"; + List binds = Arrays. asList(keyPrefix); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + bindParams(pstmt, binds); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + + assertEquals( + new RowKeyComparisonFilter( + constantComparison(CompareOperator.EQUAL, + new SubstrFunction(Arrays. asList( + new RowKeyColumnExpression(ENTITY_ID, + new RowKeyValueAccessor(ATABLE.getPKColumns(), 1)), + LiteralExpression.newConstant(1), LiteralExpression.newConstant(3))), + keyPrefix), + QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES), + filter); + } + + @Test + public void testPaddedRowKeyFilter() throws SQLException { + String keyPrefix = "fo"; + String query = "select * from atable where entity_id=?"; + List binds = Arrays. asList(keyPrefix); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + bindParams(pstmt, binds); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + assertEquals(0, scan.getStartRow().length); + assertEquals(0, scan.getStopRow().length); + assertNotNull(scan.getFilter()); + } + + @Test + public void testPaddedStartStopKey() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "fo"; + String query = "select * from atable where organization_id=? AND entity_id=?"; + List binds = Arrays. asList(tenantId, keyPrefix); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + bindParams(pstmt, binds); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + byte[] expectedStartRow = + ByteUtil.concat(Bytes.toBytes(tenantId), StringUtil.padChar(Bytes.toBytes(keyPrefix), 15)); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(expectedStartRow), scan.getStopRow()); + } + + @Test + public void testDegenerateRowKeyFilter() throws SQLException { + String keyPrefix = "foobar"; + String query = "select * from atable where substr(entity_id,1,3)=?"; + List binds = Arrays. asList(keyPrefix); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + bindParams(pstmt, binds); + QueryPlan plan = pstmt.optimizeQuery(); + // Degenerate b/c "foobar" is more than 3 characters + assertDegenerate(plan.getContext()); + } + + @Test + public void testDegenerateBiggerThanMaxLengthVarchar() throws SQLException { + byte[] tooBigValue = new byte[101]; + Arrays.fill(tooBigValue, (byte) 50); + String aString = (String) PVarchar.INSTANCE.toObject(tooBigValue); + String query = "select * from atable where a_string=?"; + List binds = Arrays. asList(aString); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + bindParams(pstmt, binds); + QueryPlan plan = pstmt.optimizeQuery(); + // Degenerate b/c a_string length is 100 + assertDegenerate(plan.getContext()); + } + + @Test + public void testOrFilter() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "foo"; + int aInt = 2; + String query = + "select * from atable where organization_id=? and (substr(entity_id,1,3)=? or a_integer=?)"; + List binds = Arrays. asList(tenantId, keyPrefix, aInt); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + bindParams(pstmt, binds); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertEquals(singleKVFilter( // single b/c one column is a row key column + or( + constantComparison(CompareOperator.EQUAL, + new SubstrFunction(Arrays. asList( + new RowKeyColumnExpression(ENTITY_ID, + new RowKeyValueAccessor(ATABLE.getPKColumns(), 1)), + LiteralExpression.newConstant(1), LiteralExpression.newConstant(3))), + keyPrefix), + constantComparison(CompareOperator.EQUAL, A_INTEGER, aInt))), + filter); + } + + @Test + public void testTypeMismatch() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id='" + tenantId + "' and a_integer > 'foo'"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + + try { + pstmt.optimizeQuery(); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage().contains("Type mismatch")); } - - @Test - public void testToNumberFilterWithInteger() throws Exception { - String stringValue = "123"; - helpTestToNumberFilterWithNoPattern(stringValue); + } + + @Test + public void testAndFalseFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id='" + tenantId + "' and a_integer=0 and 2=3"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + assertDegenerate(plan.getContext()); + } + + @Test + public void testFalseFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "' and 2=3"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + assertDegenerate(plan.getContext()); + } + + @Test + public void testTrueFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "' and 2<=2"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + assertNull(scan.getFilter()); + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = startRow; + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + } + + @Test + public void testAndTrueFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id='" + tenantId + "' and a_integer=0 and 2<3"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertEquals(singleKVFilter(constantComparison(CompareOperator.EQUAL, A_INTEGER, 0)), filter); + + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = startRow; + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + } + + @Test + public void testOrFalseFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id='" + tenantId + "' and (a_integer=0 or 3!=3)"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertEquals(singleKVFilter(constantComparison(CompareOperator.EQUAL, A_INTEGER, 0)), filter); + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = startRow; + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + } + + @Test + public void testOrTrueFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id='" + tenantId + "' and (a_integer=0 or 3>2)"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = startRow; + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + } + + @Test + public void testInFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id='" + tenantId + "' and a_string IN ('a','b')"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = startRow; + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + + Filter filter = scan.getFilter(); + assertEquals(singleKVFilter(in(A_STRING, "a", "b")), filter); + } + + @Test + public void testInListFilter() throws SQLException { + String tenantId1 = "000000000000001"; + String tenantId2 = "000000000000002"; + String tenantId3 = "000000000000003"; + String query = String.format("select * from %s where organization_id IN ('%s','%s','%s')", + ATABLE_NAME, tenantId1, tenantId3, tenantId2); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId1); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = PVarchar.INSTANCE.toBytes(tenantId3); + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + + Filter filter = scan.getFilter(); + assertEquals(new SkipScanFilter( + ImmutableList + .of(Arrays.asList(pointRange(tenantId1), pointRange(tenantId2), pointRange(tenantId3))), + plan.getTableRef().getTable().getRowKeySchema(), false), filter); + } + + @Test + @Ignore("OR not yet optimized") + public void testOr2InFilter() throws SQLException { + String tenantId1 = "000000000000001"; + String tenantId2 = "000000000000002"; + String tenantId3 = "000000000000003"; + String query = String.format( + "select * from %s where organization_id='%s' OR organization_id='%s' OR organization_id='%s'", + ATABLE_NAME, tenantId1, tenantId3, tenantId2); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + + Filter filter = scan.getFilter(); + assertEquals(new SkipScanFilter( + ImmutableList + .of(Arrays.asList(pointRange(tenantId1), pointRange(tenantId2), pointRange(tenantId3))), + plan.getTableRef().getTable().getRowKeySchema(), false), filter); + + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId1); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = PVarchar.INSTANCE.toBytes(tenantId3); + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + } + + @Test + public void testSecondPkColInListFilter() throws SQLException { + String tenantId = "000000000000001"; + String entityId1 = "00000000000000X"; + String entityId2 = "00000000000000Y"; + String query = + String.format("select * from %s where organization_id='%s' AND entity_id IN ('%s','%s')", + ATABLE_NAME, tenantId, entityId1, entityId2); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId + entityId1); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = PVarchar.INSTANCE.toBytes(tenantId + entityId2); + assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), + scan.getStopRow()); + + Filter filter = scan.getFilter(); + + assertEquals(new SkipScanFilter( + ImmutableList + .of(Arrays.asList(pointRange(tenantId, entityId1), pointRange(tenantId, entityId2))), + SchemaUtil.VAR_BINARY_SCHEMA, false), filter); + } + + @Test + public void testInListWithAnd1GTEFilter() throws SQLException { + String tenantId1 = "000000000000001"; + String tenantId2 = "000000000000002"; + String tenantId3 = "000000000000003"; + String entityId1 = "00000000000000X"; + String entityId2 = "00000000000000Y"; + String query = String.format( + "select * from %s where organization_id IN ('%s','%s','%s') AND entity_id>='%s' AND entity_id<='%s'", + ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId1, entityId2); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertEquals(new SkipScanFilter( + ImmutableList.of( + Arrays.asList(pointRange(tenantId1), pointRange(tenantId2), pointRange(tenantId3)), + Arrays.asList(PChar.INSTANCE.getKeyRange(Bytes.toBytes(entityId1), true, + Bytes.toBytes(entityId2), true, SortOrder.ASC))), + plan.getTableRef().getTable().getRowKeySchema(), false), filter); + } + + @Test + public void testInListWithAnd1Filter() throws SQLException { + String tenantId1 = "000000000000001"; + String tenantId2 = "000000000000002"; + String tenantId3 = "000000000000003"; + String entityId = "00000000000000X"; + String query = + String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id='%s'", + ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertEquals(new SkipScanFilter( + ImmutableList.of(Arrays.asList(pointRange(tenantId1, entityId), + pointRange(tenantId2, entityId), pointRange(tenantId3, entityId))), + SchemaUtil.VAR_BINARY_SCHEMA, false), filter); + } + + @Test + public void testInListWithAnd1FilterScankey() throws SQLException { + String tenantId1 = "000000000000001"; + String tenantId2 = "000000000000002"; + String tenantId3 = "000000000000003"; + String entityId = "00000000000000X"; + String query = + String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id='%s'", + ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId1), PVarchar.INSTANCE.toBytes(entityId)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId3), PVarchar.INSTANCE.toBytes(entityId)); + assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), + scan.getStopRow()); + // TODO: validate scan ranges + } + + private static KeyRange pointRange(String... ids) { + byte[] theKey = ByteUtil.EMPTY_BYTE_ARRAY; + for (String id : ids) { + theKey = ByteUtil.concat(theKey, Bytes.toBytes(id)); } - - @Test - public void testToNumberFilterWithDecimal() throws Exception { - String stringValue = "123.33"; - helpTestToNumberFilterWithNoPattern(stringValue); - } - - @Test - public void testToNumberFilterWithNegativeDecimal() throws Exception { - String stringValue = "-123.33"; - helpTestToNumberFilterWithNoPattern(stringValue); - } - - @Test - public void testToNumberFilterWithPatternParam() throws Exception { - String toNumberClause = "to_number('!1.23333E2', '!0.00000E0')"; - BigDecimal expectedDecimal = NumberUtil.normalize(new BigDecimal("123.333")); - helpTestToNumberFilter(toNumberClause, expectedDecimal); - } - - @Test(expected=AssertionError.class) // compileStatement() fails because zero rows are found by to_number() - public void testToNumberFilterWithPatternParamNegativeTest() throws Exception { - String toNumberClause = "to_number('$123.33', '000.00')"; // no currency sign in pattern param - BigDecimal expectedDecimal = NumberUtil.normalize(new BigDecimal("123.33")); - helpTestToNumberFilter(toNumberClause, expectedDecimal); - } - - @Test - public void testRowKeyFilter() throws SQLException { - String keyPrefix = "foo"; - String query = "select * from atable where substr(entity_id,1,3)=?"; - List binds = Arrays.asList(keyPrefix); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - bindParams(pstmt, binds); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - - assertEquals( - new RowKeyComparisonFilter( - constantComparison(CompareOperator.EQUAL, - new SubstrFunction( - Arrays.asList( - new RowKeyColumnExpression(ENTITY_ID,new RowKeyValueAccessor(ATABLE.getPKColumns(),1)), - LiteralExpression.newConstant(1), - LiteralExpression.newConstant(3)) - ), - keyPrefix), - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES), - filter); - } - - @Test - public void testPaddedRowKeyFilter() throws SQLException { - String keyPrefix = "fo"; - String query = "select * from atable where entity_id=?"; - List binds = Arrays.asList(keyPrefix); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - bindParams(pstmt, binds); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - assertEquals(0,scan.getStartRow().length); - assertEquals(0,scan.getStopRow().length); - assertNotNull(scan.getFilter()); - } - - @Test - public void testPaddedStartStopKey() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "fo"; - String query = "select * from atable where organization_id=? AND entity_id=?"; - List binds = Arrays.asList(tenantId,keyPrefix); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - bindParams(pstmt, binds); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - byte[] expectedStartRow = ByteUtil.concat(Bytes.toBytes(tenantId), StringUtil.padChar(Bytes.toBytes(keyPrefix), 15)); - assertArrayEquals(expectedStartRow,scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(expectedStartRow),scan.getStopRow()); - } - - @Test - public void testDegenerateRowKeyFilter() throws SQLException { - String keyPrefix = "foobar"; - String query = "select * from atable where substr(entity_id,1,3)=?"; - List binds = Arrays.asList(keyPrefix); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - bindParams(pstmt, binds); - QueryPlan plan = pstmt.optimizeQuery(); - // Degenerate b/c "foobar" is more than 3 characters - assertDegenerate(plan.getContext()); - } - - @Test - public void testDegenerateBiggerThanMaxLengthVarchar() throws SQLException { - byte[] tooBigValue = new byte[101]; - Arrays.fill(tooBigValue, (byte)50); - String aString = (String) PVarchar.INSTANCE.toObject(tooBigValue); - String query = "select * from atable where a_string=?"; - List binds = Arrays.asList(aString); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - bindParams(pstmt, binds); - QueryPlan plan = pstmt.optimizeQuery(); - // Degenerate b/c a_string length is 100 - assertDegenerate(plan.getContext()); - } - - @Test - public void testOrFilter() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "foo"; - int aInt = 2; - String query = "select * from atable where organization_id=? and (substr(entity_id,1,3)=? or a_integer=?)"; - List binds = Arrays.asList(tenantId, keyPrefix, aInt); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - bindParams(pstmt, binds); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertEquals( - singleKVFilter( // single b/c one column is a row key column - or( - constantComparison( - CompareOperator.EQUAL, - new SubstrFunction(Arrays. asList( - new RowKeyColumnExpression( - ENTITY_ID, - new RowKeyValueAccessor(ATABLE.getPKColumns(), 1)), - LiteralExpression.newConstant(1), - LiteralExpression.newConstant(3))), - keyPrefix), - constantComparison( - CompareOperator.EQUAL, - A_INTEGER, - aInt))), - filter); - } - - @Test - public void testTypeMismatch() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_integer > 'foo'"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - - try { - pstmt.optimizeQuery(); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("Type mismatch")); - } - } - - @Test - public void testAndFalseFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0 and 2=3"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - assertDegenerate(plan.getContext()); - } - - @Test - public void testFalseFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and 2=3"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - assertDegenerate(plan.getContext()); - } - - @Test - public void testTrueFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and 2<=2"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - assertNull(scan.getFilter()); - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = startRow; - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - } - - @Test - public void testAndTrueFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_integer=0 and 2<3"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertEquals( - singleKVFilter(constantComparison( - CompareOperator.EQUAL, - A_INTEGER, - 0)), - filter); - - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = startRow; - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - } - - @Test - public void testOrFalseFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and (a_integer=0 or 3!=3)"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertEquals( - singleKVFilter(constantComparison( - CompareOperator.EQUAL, - A_INTEGER, - 0)), - filter); - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = startRow; - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - } - - @Test - public void testOrTrueFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and (a_integer=0 or 3>2)"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = startRow; - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - } - - @Test - public void testInFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_string IN ('a','b')"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = startRow; - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - - Filter filter = scan.getFilter(); - assertEquals( - singleKVFilter(in( - A_STRING, - "a", - "b")), - filter); - } - - @Test - public void testInListFilter() throws SQLException { - String tenantId1 = "000000000000001"; - String tenantId2 = "000000000000002"; - String tenantId3 = "000000000000003"; - String query = String.format("select * from %s where organization_id IN ('%s','%s','%s')", - ATABLE_NAME, tenantId1, tenantId3, tenantId2); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId1); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = PVarchar.INSTANCE.toBytes(tenantId3); - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - - Filter filter = scan.getFilter(); - assertEquals( - new SkipScanFilter( - ImmutableList.of(Arrays.asList( - pointRange(tenantId1), - pointRange(tenantId2), - pointRange(tenantId3))), - plan.getTableRef().getTable().getRowKeySchema(), false), - filter); - } - - @Test @Ignore("OR not yet optimized") - public void testOr2InFilter() throws SQLException { - String tenantId1 = "000000000000001"; - String tenantId2 = "000000000000002"; - String tenantId3 = "000000000000003"; - String query = String.format("select * from %s where organization_id='%s' OR organization_id='%s' OR organization_id='%s'", - ATABLE_NAME, tenantId1, tenantId3, tenantId2); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - - Filter filter = scan.getFilter(); - assertEquals( - new SkipScanFilter( - ImmutableList.of(Arrays.asList( - pointRange(tenantId1), - pointRange(tenantId2), - pointRange(tenantId3))), - plan.getTableRef().getTable().getRowKeySchema(), false), - filter); - - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId1); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = PVarchar.INSTANCE.toBytes(tenantId3); - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - } - - @Test - public void testSecondPkColInListFilter() throws SQLException { - String tenantId = "000000000000001"; - String entityId1 = "00000000000000X"; - String entityId2 = "00000000000000Y"; - String query = String.format("select * from %s where organization_id='%s' AND entity_id IN ('%s','%s')", - ATABLE_NAME, tenantId, entityId1, entityId2); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId + entityId1); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = PVarchar.INSTANCE.toBytes(tenantId + entityId2); - assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow()); - - Filter filter = scan.getFilter(); - - assertEquals( - new SkipScanFilter( - ImmutableList.of( - Arrays.asList( - pointRange(tenantId,entityId1), - pointRange(tenantId,entityId2))), - SchemaUtil.VAR_BINARY_SCHEMA, false), - filter); - } - - @Test - public void testInListWithAnd1GTEFilter() throws SQLException { - String tenantId1 = "000000000000001"; - String tenantId2 = "000000000000002"; - String tenantId3 = "000000000000003"; - String entityId1 = "00000000000000X"; - String entityId2 = "00000000000000Y"; - String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id>='%s' AND entity_id<='%s'", - ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId1, entityId2); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertEquals( - new SkipScanFilter( - ImmutableList.of( - Arrays.asList( - pointRange(tenantId1), - pointRange(tenantId2), - pointRange(tenantId3)), - Arrays.asList(PChar.INSTANCE.getKeyRange( - Bytes.toBytes(entityId1), - true, - Bytes.toBytes(entityId2), - true, SortOrder.ASC))), - plan.getTableRef().getTable().getRowKeySchema(), false), - filter); - } - - @Test - public void testInListWithAnd1Filter() throws SQLException { - String tenantId1 = "000000000000001"; - String tenantId2 = "000000000000002"; - String tenantId3 = "000000000000003"; - String entityId = "00000000000000X"; - String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id='%s'", - ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertEquals( - new SkipScanFilter( - ImmutableList.of( - Arrays.asList( - pointRange(tenantId1, entityId), - pointRange(tenantId2, entityId), - pointRange(tenantId3, entityId))), - SchemaUtil.VAR_BINARY_SCHEMA, false), - filter); - } - @Test - public void testInListWithAnd1FilterScankey() throws SQLException { - String tenantId1 = "000000000000001"; - String tenantId2 = "000000000000002"; - String tenantId3 = "000000000000003"; - String entityId = "00000000000000X"; - String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id='%s'", - ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId1), PVarchar.INSTANCE.toBytes(entityId)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId3), PVarchar.INSTANCE.toBytes(entityId)); - assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow()); - // TODO: validate scan ranges - } - - private static KeyRange pointRange(String... ids) { - byte[] theKey = ByteUtil.EMPTY_BYTE_ARRAY; - for (String id : ids) { - theKey = ByteUtil.concat(theKey, Bytes.toBytes(id)); - } - return pointRange(theKey); - } - private static KeyRange pointRange(byte[] bytes) { - return KeyRange.POINT.apply(bytes); - } - - @Test - public void testInListWithAnd2Filter() throws SQLException { - String tenantId1 = "000000000000001"; - String tenantId2 = "000000000000002"; - String entityId1 = "00000000000000X"; - String entityId2 = "00000000000000Y"; - String query = String.format("select * from %s where organization_id IN ('%s','%s') AND entity_id IN ('%s', '%s')", - ATABLE_NAME, tenantId1, tenantId2, entityId1, entityId2); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - - Filter filter = scan.getFilter(); - assertEquals( - new SkipScanFilter( - ImmutableList.>of(ImmutableList.of( - pointRange(tenantId1, entityId1), - pointRange(tenantId1, entityId2), - pointRange(tenantId2, entityId1), - pointRange(tenantId2, entityId2))), - SchemaUtil.VAR_BINARY_SCHEMA, false), - filter); - } - - @Test - public void testPartialRangeFilter() throws SQLException { - // I know these id's are ridiculous, but users can write queries that look like this - String tenantId1 = "001"; - String tenantId2 = "02"; - String query = String.format("select * from %s where organization_id > '%s' AND organization_id < '%s'", - ATABLE_NAME, tenantId1, tenantId2); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - - assertNull(scan.getFilter()); - byte[] wideLower = ByteUtil.nextKey(StringUtil.padChar(Bytes.toBytes(tenantId1), 15)); - byte[] wideUpper = StringUtil.padChar(Bytes.toBytes(tenantId2), 15); - assertArrayEquals(wideLower, scan.getStartRow()); - assertArrayEquals(wideUpper, scan.getStopRow()); - } - - @Test - public void testInListWithAnd2FilterScanKey() throws SQLException { - String tenantId1 = "000000000000001"; - String tenantId2 = "000000000000002"; - String tenantId3 = "000000000000003"; - String entityId1 = "00000000000000X"; - String entityId2 = "00000000000000Y"; - String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id IN ('%s', '%s')", - ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId1, entityId2); - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId1), PVarchar.INSTANCE.toBytes(entityId1)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId3), PVarchar.INSTANCE.toBytes(entityId2)); - assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow()); - // TODO: validate scan ranges - } - - @Test - public void testBetweenFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_integer between 0 and 10"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertEquals( - singleKVFilter(and( - constantComparison( - CompareOperator.GREATER_OR_EQUAL, - A_INTEGER, - 0), - constantComparison( - CompareOperator.LESS_OR_EQUAL, - A_INTEGER, - 10))), - filter); - } - - @Test - public void testNotBetweenFilter() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and a_integer not between 0 and 10"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - assertEquals( - singleKVFilter(not(and( - constantComparison( - CompareOperator.GREATER_OR_EQUAL, - A_INTEGER, - 0), - constantComparison( - CompareOperator.LESS_OR_EQUAL, - A_INTEGER, - 10)))).toString(), - filter.toString()); - } - - @Test - public void testTenantConstraintsAddedToScan() throws SQLException { - String tenantTypeId = "5678"; - String tenantId = "000000000000123"; - String url = getUrl(tenantId); - createTestTable(getUrl(), "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, type_id char(4) not null, " + - "id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, type_id, id)) multi_tenant=true"); - createTestTable(url, "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST WHERE type_id= '" + tenantTypeId + "'"); - - String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'"; - PhoenixConnection pconn = DriverManager.getConnection(url, PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - PTable table = plan.getTableRef().getTable(); - Expression aInteger = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression(); - Expression aString = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression(); - assertEquals( - multiEncodedKVFilter(and( - constantComparison( - CompareOperator.EQUAL, - aInteger, - 0), - constantComparison( - CompareOperator.EQUAL, - aString, - "foo")), TWO_BYTE_QUALIFIERS), - filter); - - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId + tenantTypeId); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = startRow; - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - } - - @Test - public void testTenantConstraintsAddedToScanWithNullTenantTypeId() throws SQLException { - String tenantId = "000000000000123"; - createTestTable(getUrl(), "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, " + - "id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, id)) multi_tenant=true"); - createTestTable(getUrl(tenantId), "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST"); - - String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(tenantId), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Filter filter = scan.getFilter(); - PTable table = plan.getTableRef().getTable(); - Expression aInteger = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression(); - Expression aString = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression(); - assertEquals( - multiEncodedKVFilter(and( - constantComparison( - CompareOperator.EQUAL, - aInteger, - 0), - constantComparison( - CompareOperator.EQUAL, - aString, - "foo")), TWO_BYTE_QUALIFIERS), - filter); - - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = startRow; - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - } - - @Test - public void testScanCaching_Default() throws SQLException { - String query = "select * from atable where a_integer=0"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - Configuration config = HBaseConfiguration.create(); - int defaultScannerCacheSize = config.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); - assertEquals(defaultScannerCacheSize, pstmt.getFetchSize()); - assertEquals(defaultScannerCacheSize, scan.getCaching()); - } - - @Test - public void testScanCaching_CustomFetchSizeOnStatement() throws SQLException { - String query = "select * from atable where a_integer=0"; - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - final int FETCH_SIZE = 25; - pstmt.setFetchSize(FETCH_SIZE); - QueryPlan plan = pstmt.optimizeQuery(); - Scan scan = plan.getContext().getScan(); - assertEquals(FETCH_SIZE, pstmt.getFetchSize()); - assertEquals(FETCH_SIZE, scan.getCaching()); - } - private Expression getDNF(PhoenixConnection pconn, String query) throws SQLException { - //SQLParser parser = new SQLParser("where ID = 'i1' or (ID = 'i2' and A > 1)"); - // ParseNode where = parser.parseWhereClause() - PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); - QueryPlan plan = pstmt.compileQuery(); - ParseNode where = plan.getStatement().getWhere(); - - return transformDNF(where, plan.getContext()); - } - - @Test - public void testWhereInclusion() throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), - PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - String ddl = "create table myTable(ID varchar primary key, A integer, B varchar, " + - "C date, D double, E integer, F json, G varbinary, H varbinary_encoded)"; - pconn.createStatement().execute(ddl); - ddl = "create table myTableDesc(ID varchar primary key DESC, A integer, B varchar, " + - "C date, D double, E integer, F json, G varbinary, H varbinary_encoded)"; - pconn.createStatement().execute(ddl); - - final int NUM = 25; - String[] containingQueries = new String[NUM]; - String[] containedQueries = new String[NUM]; - - containingQueries[0] = "select * from myTable where ID = 'i1' or (ID = 'i2' and A > 1)"; - containedQueries[0] = "select * from myTableDesc where ID = 'i1' or (ID = 'i2' and " + - "A > 2 + 2)"; - - containingQueries[1] = "select * from myTable where ID > 'i3' and A > 1"; - containedQueries[1] = "select * from myTableDesc where (ID > 'i7' or ID = 'i4') and " + - "A > 2 * 10"; - - containingQueries[2] = "select * from myTable where ID IN ('i3', 'i7', 'i1') and A < 10"; - containedQueries[2] = "select * from myTableDesc where ID IN ('i1', 'i7') and A < 10 / 2"; - - containingQueries[3] = "select * from myTableDesc where (ID, B) > ('i3', 'a') and A >= 10"; - containedQueries[3] = "select * from myTable where ID = 'i3' and B = 'c' and A = 10"; - - containingQueries[4] = "select * from myTable where ID >= 'i3' and A between 5 and 15"; - containedQueries[4] = "select * from myTableDesc where ID = 'i3' and A between 5 and 10"; - - containingQueries[5] = "select * from myTable where (A between 5 and 15) and " + - "(D < 10.67 or C <= CURRENT_DATE())"; - containedQueries[5] = "select * from myTable where (A = 5 and D between 1.5 and 9.99) or " + - "(A = 6 and C <= CURRENT_DATE() - 1000)"; - - containingQueries[6] = "select * from myTable where A is not null"; - containedQueries[6] = "select * from myTable where A > 0"; - - containingQueries[7] = "select * from myTable where NOT (B is null)"; - containedQueries[7] = "select * from myTable where (B > 'abc')"; - - containingQueries[8] = "select * from myTable where A >= E and D <= A"; - containedQueries[8] = "select * from myTable where (A > E and D = A)"; - - containingQueries[9] = "select * from myTable where A > E"; - containedQueries[9] = "select * from myTable where (A > E and B is not null)"; - - containingQueries[10] = "select * from myTable where B like '%abc'"; - containedQueries[10] = "select * from myTable where (B like '%abc' and ID > 'i1')"; - - containingQueries[11] = "select * from myTable where " + - "PHOENIX_ROW_TIMESTAMP() < CURRENT_TIME()"; - containedQueries[11] = "select * from myTable where " + - "(PHOENIX_ROW_TIMESTAMP() < CURRENT_TIME() - 1)"; - - containingQueries[12] = "select * from myTable where (A, E) IN ((2,3), (7,8), (10,11))"; - containedQueries[12] = "select * from myTable where (A, E) IN ((2,3), (7,8))"; - - containingQueries[13] = "select * from myTable where ID > 'i3' and ID < 'i5'"; - containedQueries[13] = "select * from myTable where (ID = 'i4') "; - - containingQueries[14] = "select * from myTable where " + - "CURRENT_DATE() - PHOENIX_ROW_TIMESTAMP() < 10"; - containedQueries[14] = "select * from myTable where " + - " CURRENT_DATE() - PHOENIX_ROW_TIMESTAMP() < 5 "; - - containingQueries[15] = "select * from myTable where ID > 'i3' and A > 1 and JSON_VALUE(F, '$.type') > 'i3'"; - containedQueries[15] = "select * from myTableDesc where (ID > 'i7' or ID = 'i4') and " + - "A > 2 * 10 and (JSON_VALUE(F, '$.type') > 'i7' or JSON_VALUE(F, '$.type') = 'i4')"; - - containingQueries[16] = "select * from myTable where JSON_VALUE(F, '$.type') is not null"; - containedQueries[16] = "select * from myTable where JSON_VALUE(F, '$.type') > 'i3'"; - - containingQueries[17] = "select * from myTable where JSON_VALUE(F, '$.type') like '%abc'"; - containedQueries[17] = "select * from myTable where (JSON_VALUE(F, '$.type') like '%abc' and ID > 'i1')"; - - containingQueries[18] = "select * from myTable where JSON_EXISTS(F, '$.type')"; - containedQueries[18] = "select * from myTable where JSON_EXISTS(F, '$.type') and JSON_VALUE(F, '$.type') > 'i3'"; - - containingQueries[19] = "select * from myTable where JSON_VALUE(F, '$.type') IN ('i3', 'i7', 'i1') and A < 10"; - containedQueries[19] = "select * from myTableDesc where JSON_VALUE(F, '$.type') IN ('i1', 'i7') and A < 10 / 2"; - - String val1 = Base64.getEncoder().encodeToString(Bytes.toBytes("Hello")); - String val2 = Base64.getEncoder().encodeToString(Bytes.toBytes("Hello1")); - String val3 = Base64.getEncoder().encodeToString(Bytes.toBytes("Hello2")); - containingQueries[20] = - "select * from myTable where ID = 'i1' or (ID = 'i2' and G > '" + val1 + "')"; - containedQueries[20] = - "select * from myTable where ID = 'i1' or (ID = 'i2' and G > '" + val2 + "')"; - - containingQueries[21] = - "select * from myTable where ID = 'i1' or (ID = 'i2' and H > '" + val1 + "')"; - containedQueries[21] = - "select * from myTable where ID = 'i1' or (ID = 'i2' and H > '" + val2 + "')"; - - containingQueries[22] = - "select * from myTable where G > '" + val1 + "' and G < '" + val3 + "'"; - containedQueries[22] = "select * from myTable where (G = '" + val2 + "') "; - - containingQueries[23] = - "select * from myTable where H > '" + val1 + "' and H < '" + val3 + "'"; - containedQueries[23] = "select * from myTable where (H = '" + val2 + "') "; - - containingQueries[24] = - "select * from myTable where (G, H) IN (('" + val1 + "', '" + val2 + "'), ('" + val1 - + "', '" + val3 + "'), ('" + val2 + "', '" + val3 + "'))"; - containedQueries[24] = - "select * from myTable where (G, H) IN (('" + val1 + "', '" + val3 + "'), ('" + val2 - + "', '" + val3 + "'))"; - - for (int i = 0; i < NUM; i++) { - Assert.assertTrue("Containing query: " + containingQueries[i] + " , Contained query: " - + containedQueries[i], WhereCompiler.contains(getDNF(pconn, containingQueries[i]), - getDNF(pconn, containedQueries[i]))); - Assert.assertFalse("Containing query: " + containingQueries[i] + " , Contained query: " - + containedQueries[i], WhereCompiler.contains(getDNF(pconn, containedQueries[i]), - getDNF(pconn, containingQueries[i]))); - } + return pointRange(theKey); + } + + private static KeyRange pointRange(byte[] bytes) { + return KeyRange.POINT.apply(bytes); + } + + @Test + public void testInListWithAnd2Filter() throws SQLException { + String tenantId1 = "000000000000001"; + String tenantId2 = "000000000000002"; + String entityId1 = "00000000000000X"; + String entityId2 = "00000000000000Y"; + String query = String.format( + "select * from %s where organization_id IN ('%s','%s') AND entity_id IN ('%s', '%s')", + ATABLE_NAME, tenantId1, tenantId2, entityId1, entityId2); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + + Filter filter = scan.getFilter(); + assertEquals(new SkipScanFilter( + ImmutableList.> of( + ImmutableList.of(pointRange(tenantId1, entityId1), pointRange(tenantId1, entityId2), + pointRange(tenantId2, entityId1), pointRange(tenantId2, entityId2))), + SchemaUtil.VAR_BINARY_SCHEMA, false), filter); + } + + @Test + public void testPartialRangeFilter() throws SQLException { + // I know these id's are ridiculous, but users can write queries that look like this + String tenantId1 = "001"; + String tenantId2 = "02"; + String query = + String.format("select * from %s where organization_id > '%s' AND organization_id < '%s'", + ATABLE_NAME, tenantId1, tenantId2); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + + assertNull(scan.getFilter()); + byte[] wideLower = ByteUtil.nextKey(StringUtil.padChar(Bytes.toBytes(tenantId1), 15)); + byte[] wideUpper = StringUtil.padChar(Bytes.toBytes(tenantId2), 15); + assertArrayEquals(wideLower, scan.getStartRow()); + assertArrayEquals(wideUpper, scan.getStopRow()); + } + + @Test + public void testInListWithAnd2FilterScanKey() throws SQLException { + String tenantId1 = "000000000000001"; + String tenantId2 = "000000000000002"; + String tenantId3 = "000000000000003"; + String entityId1 = "00000000000000X"; + String entityId2 = "00000000000000Y"; + String query = String.format( + "select * from %s where organization_id IN ('%s','%s','%s') AND entity_id IN ('%s', '%s')", + ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId1, entityId2); + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId1), PVarchar.INSTANCE.toBytes(entityId1)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId3), PVarchar.INSTANCE.toBytes(entityId2)); + assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), + scan.getStopRow()); + // TODO: validate scan ranges + } + + @Test + public void testBetweenFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + + "' and a_integer between 0 and 10"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertEquals( + singleKVFilter(and(constantComparison(CompareOperator.GREATER_OR_EQUAL, A_INTEGER, 0), + constantComparison(CompareOperator.LESS_OR_EQUAL, A_INTEGER, 10))), + filter); + } + + @Test + public void testNotBetweenFilter() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + + "' and a_integer not between 0 and 10"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + assertEquals( + singleKVFilter(not(and(constantComparison(CompareOperator.GREATER_OR_EQUAL, A_INTEGER, 0), + constantComparison(CompareOperator.LESS_OR_EQUAL, A_INTEGER, 10)))).toString(), + filter.toString()); + } + + @Test + public void testTenantConstraintsAddedToScan() throws SQLException { + String tenantTypeId = "5678"; + String tenantId = "000000000000123"; + String url = getUrl(tenantId); + createTestTable(getUrl(), + "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, type_id char(4) not null, " + + "id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, type_id, id)) multi_tenant=true"); + createTestTable(url, + "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST WHERE type_id= '" + + tenantTypeId + "'"); + + String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'"; + PhoenixConnection pconn = DriverManager + .getConnection(url, PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + PTable table = plan.getTableRef().getTable(); + Expression aInteger = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()) + .newColumnExpression(); + Expression aString = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()) + .newColumnExpression(); + assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOperator.EQUAL, aInteger, 0), + constantComparison(CompareOperator.EQUAL, aString, "foo")), TWO_BYTE_QUALIFIERS), filter); + + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId + tenantTypeId); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = startRow; + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + } + + @Test + public void testTenantConstraintsAddedToScanWithNullTenantTypeId() throws SQLException { + String tenantId = "000000000000123"; + createTestTable(getUrl(), + "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, " + + "id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, id)) multi_tenant=true"); + createTestTable(getUrl(tenantId), + "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST"); + + String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(tenantId), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Filter filter = scan.getFilter(); + PTable table = plan.getTableRef().getTable(); + Expression aInteger = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()) + .newColumnExpression(); + Expression aString = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()) + .newColumnExpression(); + assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOperator.EQUAL, aInteger, 0), + constantComparison(CompareOperator.EQUAL, aString, "foo")), TWO_BYTE_QUALIFIERS), filter); + + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = startRow; + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + } + + @Test + public void testScanCaching_Default() throws SQLException { + String query = "select * from atable where a_integer=0"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + Configuration config = HBaseConfiguration.create(); + int defaultScannerCacheSize = config.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + assertEquals(defaultScannerCacheSize, pstmt.getFetchSize()); + assertEquals(defaultScannerCacheSize, scan.getCaching()); + } + + @Test + public void testScanCaching_CustomFetchSizeOnStatement() throws SQLException { + String query = "select * from atable where a_integer=0"; + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + final int FETCH_SIZE = 25; + pstmt.setFetchSize(FETCH_SIZE); + QueryPlan plan = pstmt.optimizeQuery(); + Scan scan = plan.getContext().getScan(); + assertEquals(FETCH_SIZE, pstmt.getFetchSize()); + assertEquals(FETCH_SIZE, scan.getCaching()); + } + + private Expression getDNF(PhoenixConnection pconn, String query) throws SQLException { + // SQLParser parser = new SQLParser("where ID = 'i1' or (ID = 'i2' and A > 1)"); + // ParseNode where = parser.parseWhereClause() + PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query); + QueryPlan plan = pstmt.compileQuery(); + ParseNode where = plan.getStatement().getWhere(); + + return transformDNF(where, plan.getContext()); + } + + @Test + public void testWhereInclusion() throws SQLException { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + String ddl = "create table myTable(ID varchar primary key, A integer, B varchar, " + + "C date, D double, E integer, F json, G varbinary, H varbinary_encoded)"; + pconn.createStatement().execute(ddl); + ddl = "create table myTableDesc(ID varchar primary key DESC, A integer, B varchar, " + + "C date, D double, E integer, F json, G varbinary, H varbinary_encoded)"; + pconn.createStatement().execute(ddl); + + final int NUM = 25; + String[] containingQueries = new String[NUM]; + String[] containedQueries = new String[NUM]; + + containingQueries[0] = "select * from myTable where ID = 'i1' or (ID = 'i2' and A > 1)"; + containedQueries[0] = + "select * from myTableDesc where ID = 'i1' or (ID = 'i2' and " + "A > 2 + 2)"; + + containingQueries[1] = "select * from myTable where ID > 'i3' and A > 1"; + containedQueries[1] = + "select * from myTableDesc where (ID > 'i7' or ID = 'i4') and " + "A > 2 * 10"; + + containingQueries[2] = "select * from myTable where ID IN ('i3', 'i7', 'i1') and A < 10"; + containedQueries[2] = "select * from myTableDesc where ID IN ('i1', 'i7') and A < 10 / 2"; + + containingQueries[3] = "select * from myTableDesc where (ID, B) > ('i3', 'a') and A >= 10"; + containedQueries[3] = "select * from myTable where ID = 'i3' and B = 'c' and A = 10"; + + containingQueries[4] = "select * from myTable where ID >= 'i3' and A between 5 and 15"; + containedQueries[4] = "select * from myTableDesc where ID = 'i3' and A between 5 and 10"; + + containingQueries[5] = "select * from myTable where (A between 5 and 15) and " + + "(D < 10.67 or C <= CURRENT_DATE())"; + containedQueries[5] = "select * from myTable where (A = 5 and D between 1.5 and 9.99) or " + + "(A = 6 and C <= CURRENT_DATE() - 1000)"; + + containingQueries[6] = "select * from myTable where A is not null"; + containedQueries[6] = "select * from myTable where A > 0"; + + containingQueries[7] = "select * from myTable where NOT (B is null)"; + containedQueries[7] = "select * from myTable where (B > 'abc')"; + + containingQueries[8] = "select * from myTable where A >= E and D <= A"; + containedQueries[8] = "select * from myTable where (A > E and D = A)"; + + containingQueries[9] = "select * from myTable where A > E"; + containedQueries[9] = "select * from myTable where (A > E and B is not null)"; + + containingQueries[10] = "select * from myTable where B like '%abc'"; + containedQueries[10] = "select * from myTable where (B like '%abc' and ID > 'i1')"; + + containingQueries[11] = + "select * from myTable where " + "PHOENIX_ROW_TIMESTAMP() < CURRENT_TIME()"; + containedQueries[11] = + "select * from myTable where " + "(PHOENIX_ROW_TIMESTAMP() < CURRENT_TIME() - 1)"; + + containingQueries[12] = "select * from myTable where (A, E) IN ((2,3), (7,8), (10,11))"; + containedQueries[12] = "select * from myTable where (A, E) IN ((2,3), (7,8))"; + + containingQueries[13] = "select * from myTable where ID > 'i3' and ID < 'i5'"; + containedQueries[13] = "select * from myTable where (ID = 'i4') "; + + containingQueries[14] = + "select * from myTable where " + "CURRENT_DATE() - PHOENIX_ROW_TIMESTAMP() < 10"; + containedQueries[14] = + "select * from myTable where " + " CURRENT_DATE() - PHOENIX_ROW_TIMESTAMP() < 5 "; + + containingQueries[15] = + "select * from myTable where ID > 'i3' and A > 1 and JSON_VALUE(F, '$.type') > 'i3'"; + containedQueries[15] = "select * from myTableDesc where (ID > 'i7' or ID = 'i4') and " + + "A > 2 * 10 and (JSON_VALUE(F, '$.type') > 'i7' or JSON_VALUE(F, '$.type') = 'i4')"; + + containingQueries[16] = "select * from myTable where JSON_VALUE(F, '$.type') is not null"; + containedQueries[16] = "select * from myTable where JSON_VALUE(F, '$.type') > 'i3'"; + + containingQueries[17] = "select * from myTable where JSON_VALUE(F, '$.type') like '%abc'"; + containedQueries[17] = + "select * from myTable where (JSON_VALUE(F, '$.type') like '%abc' and ID > 'i1')"; + + containingQueries[18] = "select * from myTable where JSON_EXISTS(F, '$.type')"; + containedQueries[18] = + "select * from myTable where JSON_EXISTS(F, '$.type') and JSON_VALUE(F, '$.type') > 'i3'"; + + containingQueries[19] = + "select * from myTable where JSON_VALUE(F, '$.type') IN ('i3', 'i7', 'i1') and A < 10"; + containedQueries[19] = + "select * from myTableDesc where JSON_VALUE(F, '$.type') IN ('i1', 'i7') and A < 10 / 2"; + + String val1 = Base64.getEncoder().encodeToString(Bytes.toBytes("Hello")); + String val2 = Base64.getEncoder().encodeToString(Bytes.toBytes("Hello1")); + String val3 = Base64.getEncoder().encodeToString(Bytes.toBytes("Hello2")); + containingQueries[20] = + "select * from myTable where ID = 'i1' or (ID = 'i2' and G > '" + val1 + "')"; + containedQueries[20] = + "select * from myTable where ID = 'i1' or (ID = 'i2' and G > '" + val2 + "')"; + + containingQueries[21] = + "select * from myTable where ID = 'i1' or (ID = 'i2' and H > '" + val1 + "')"; + containedQueries[21] = + "select * from myTable where ID = 'i1' or (ID = 'i2' and H > '" + val2 + "')"; + + containingQueries[22] = "select * from myTable where G > '" + val1 + "' and G < '" + val3 + "'"; + containedQueries[22] = "select * from myTable where (G = '" + val2 + "') "; + + containingQueries[23] = "select * from myTable where H > '" + val1 + "' and H < '" + val3 + "'"; + containedQueries[23] = "select * from myTable where (H = '" + val2 + "') "; + + containingQueries[24] = "select * from myTable where (G, H) IN (('" + val1 + "', '" + val2 + + "'), ('" + val1 + "', '" + val3 + "'), ('" + val2 + "', '" + val3 + "'))"; + containedQueries[24] = "select * from myTable where (G, H) IN (('" + val1 + "', '" + val3 + + "'), ('" + val2 + "', '" + val3 + "'))"; + + for (int i = 0; i < NUM; i++) { + Assert.assertTrue( + "Containing query: " + containingQueries[i] + " , Contained query: " + containedQueries[i], + WhereCompiler.contains(getDNF(pconn, containingQueries[i]), + getDNF(pconn, containedQueries[i]))); + Assert.assertFalse( + "Containing query: " + containingQueries[i] + " , Contained query: " + containedQueries[i], + WhereCompiler.contains(getDNF(pconn, containedQueries[i]), + getDNF(pconn, containingQueries[i]))); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java index c905c066a18..4c6bf187027 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -58,30 +58,6 @@ import java.util.stream.Collectors; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.phoenix.expression.AndExpression; -import org.apache.phoenix.parse.ColumnParseNode; -import org.apache.phoenix.parse.ParseNode; -import org.apache.phoenix.parse.SQLParser; -import org.apache.phoenix.parse.SelectStatement; -import org.apache.phoenix.schema.AmbiguousColumnException; -import org.apache.phoenix.schema.ColumnNotFoundException; -import org.apache.phoenix.schema.ColumnRef; -import org.apache.phoenix.schema.SortOrder; -import org.apache.phoenix.schema.PColumnFamily; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.TableRef; -import org.apache.phoenix.schema.types.PChar; -import org.apache.phoenix.schema.types.PDate; -import org.apache.phoenix.schema.types.PDataType; -import org.apache.phoenix.schema.types.PDecimal; -import org.apache.phoenix.schema.types.PDouble; -import org.apache.phoenix.schema.types.PInteger; -import org.apache.phoenix.schema.types.PLong; -import org.apache.phoenix.schema.types.PTimestamp; -import org.apache.phoenix.schema.types.PUnsignedLong; -import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.thirdparty.com.google.common.base.Optional; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; @@ -93,6 +69,7 @@ import org.apache.phoenix.compile.WhereOptimizer.KeyExpressionVisitor.SingleKeySlot; import org.apache.phoenix.compile.WhereOptimizer.KeyExpressionVisitor.SlotsIterator; import org.apache.phoenix.compile.WhereOptimizer.KeyExpressionVisitor.TrailingRangeIterator; +import org.apache.phoenix.expression.AndExpression; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.function.SubstrFunction; import org.apache.phoenix.filter.BooleanExpressionFilter; @@ -102,9 +79,32 @@ import org.apache.phoenix.filter.SkipScanFilter; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.jdbc.PhoenixPreparedStatement; +import org.apache.phoenix.parse.ColumnParseNode; +import org.apache.phoenix.parse.ParseNode; +import org.apache.phoenix.parse.SQLParser; +import org.apache.phoenix.parse.SelectStatement; import org.apache.phoenix.query.BaseConnectionlessQueryTest; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.query.QueryConstants; +import org.apache.phoenix.schema.AmbiguousColumnException; +import org.apache.phoenix.schema.ColumnNotFoundException; +import org.apache.phoenix.schema.ColumnRef; +import org.apache.phoenix.schema.PColumnFamily; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.schema.types.PChar; +import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.schema.types.PDate; +import org.apache.phoenix.schema.types.PDecimal; +import org.apache.phoenix.schema.types.PDouble; +import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.schema.types.PLong; +import org.apache.phoenix.schema.types.PTimestamp; +import org.apache.phoenix.schema.types.PUnsignedLong; +import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.base.Optional; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.DateUtil; @@ -119,3657 +119,3594 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest { - private static class TestWhereExpressionCompiler extends ExpressionCompiler { - private boolean disambiguateWithFamily; - - public TestWhereExpressionCompiler(StatementContext context) { - super(context); - } - - @Override - public Expression visit(ColumnParseNode node) throws SQLException { - ColumnRef ref = resolveColumn(node); - TableRef tableRef = ref.getTableRef(); - Expression newColumnExpression = ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive()); - if (tableRef.equals(context.getCurrentTable()) && !SchemaUtil.isPKColumn(ref.getColumn())) { - byte[] cq = tableRef.getTable().getImmutableStorageScheme() == PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS - ? QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES : ref.getColumn().getColumnQualifierBytes(); - // track the where condition columns. Later we need to ensure the Scan in HRS scans these column CFs - context.addWhereConditionColumn(ref.getColumn().getFamilyName().getBytes(), cq); - } - return newColumnExpression; - } - - @Override - protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { - ColumnRef ref = super.resolveColumn(node); - if (disambiguateWithFamily) { - return ref; - } - PTable table = ref.getTable(); - // Track if we need to compare KeyValue during filter evaluation - // using column family. If the column qualifier is enough, we - // just use that. - if (!SchemaUtil.isPKColumn(ref.getColumn())) { - if (!EncodedColumnsUtil.usesEncodedColumnNames(table) - || ref.getColumn().isDynamic()) { - try { - table.getColumnForColumnName(ref.getColumn().getName().getString()); - } catch (AmbiguousColumnException e) { - disambiguateWithFamily = true; - } - } else { - for (PColumnFamily columnFamily : table.getColumnFamilies()) { - if (columnFamily.getName().equals(ref.getColumn().getFamilyName())) { - continue; - } - try { - table.getColumnForColumnQualifier(columnFamily.getName().getBytes(), - ref.getColumn().getColumnQualifierBytes()); - // If we find the same qualifier name with different columnFamily, - // then set disambiguateWithFamily to true - disambiguateWithFamily = true; - break; - } catch (ColumnNotFoundException ignore) { - } - } - } + private static class TestWhereExpressionCompiler extends ExpressionCompiler { + private boolean disambiguateWithFamily; + + public TestWhereExpressionCompiler(StatementContext context) { + super(context); + } + + @Override + public Expression visit(ColumnParseNode node) throws SQLException { + ColumnRef ref = resolveColumn(node); + TableRef tableRef = ref.getTableRef(); + Expression newColumnExpression = + ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive()); + if (tableRef.equals(context.getCurrentTable()) && !SchemaUtil.isPKColumn(ref.getColumn())) { + byte[] cq = tableRef.getTable().getImmutableStorageScheme() + == PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS + ? QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES + : ref.getColumn().getColumnQualifierBytes(); + // track the where condition columns. Later we need to ensure the Scan in HRS scans these + // column CFs + context.addWhereConditionColumn(ref.getColumn().getFamilyName().getBytes(), cq); + } + return newColumnExpression; + } + + @Override + protected ColumnRef resolveColumn(ColumnParseNode node) throws SQLException { + ColumnRef ref = super.resolveColumn(node); + if (disambiguateWithFamily) { + return ref; + } + PTable table = ref.getTable(); + // Track if we need to compare KeyValue during filter evaluation + // using column family. If the column qualifier is enough, we + // just use that. + if (!SchemaUtil.isPKColumn(ref.getColumn())) { + if (!EncodedColumnsUtil.usesEncodedColumnNames(table) || ref.getColumn().isDynamic()) { + try { + table.getColumnForColumnName(ref.getColumn().getName().getString()); + } catch (AmbiguousColumnException e) { + disambiguateWithFamily = true; + } + } else { + for (PColumnFamily columnFamily : table.getColumnFamilies()) { + if (columnFamily.getName().equals(ref.getColumn().getFamilyName())) { + continue; } - return ref; - } - } - - private static final String TENANT_PREFIX = "Txt00tst1"; - - private static StatementContext compileStatement(String query) throws SQLException { - return compileStatement(query, Collections.emptyList(), null); - } - - private static StatementContext compileStatement(String query, Integer limit) throws SQLException { - return compileStatement(query, Collections.emptyList(), limit); - } - - private static StatementContext compileStatement(String query, List binds) throws SQLException { - return compileStatement(query, binds, null); - } - - private static StatementContext compileStatement(String query, List binds, Integer limit) throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); - assertRoundtrip(query); - TestUtil.bindParams(pstmt, binds); - QueryPlan plan = pstmt.compileQuery(); - assertEquals(limit, plan.getLimit()); - return plan.getContext(); - } - - @Test - public void testTrailingRangesIterator() throws Exception { - KeyRange[] all = new KeyRange[] {EVERYTHING_RANGE,EVERYTHING_RANGE,EVERYTHING_RANGE,EVERYTHING_RANGE,EVERYTHING_RANGE, EVERYTHING_RANGE}; - List singleAll = Collections.singletonList(all); - KeyRange[] r1 = new KeyRange[] { - EVERYTHING_RANGE, - EVERYTHING_RANGE, - EVERYTHING_RANGE, - getKeyRange(Bytes.toBytes("A")), - EVERYTHING_RANGE, EVERYTHING_RANGE}; - KeyRange[] r2 = new KeyRange[] { - EVERYTHING_RANGE, - EVERYTHING_RANGE, - EVERYTHING_RANGE, - getKeyRange(Bytes.toBytes("B")), - EVERYTHING_RANGE, EVERYTHING_RANGE}; - KeyRange[] r3 = new KeyRange[] { - EVERYTHING_RANGE, - EVERYTHING_RANGE, - EVERYTHING_RANGE, - getKeyRange(Bytes.toBytes("C")), - EVERYTHING_RANGE, EVERYTHING_RANGE}; - KeyRange[] r4 = new KeyRange[] { - EVERYTHING_RANGE, - EVERYTHING_RANGE, - EVERYTHING_RANGE, - getKeyRange(Bytes.toBytes("D")), - EVERYTHING_RANGE, EVERYTHING_RANGE}; - KeyRange[] r5 = new KeyRange[] { - EVERYTHING_RANGE, - EVERYTHING_RANGE, - EVERYTHING_RANGE, - getKeyRange(Bytes.toBytes("A"),true,Bytes.toBytes("D"),true), - EVERYTHING_RANGE, EVERYTHING_RANGE}; - int initPkPos = 1; - int pkPos = 3; - List>> slotsTrailingRangesList = Lists.>>newArrayList( - Lists.>newArrayList(Lists.newArrayList(r5)), - Lists.>newArrayList( - Lists.newArrayList(r1, r2), - Lists.newArrayList(r3, r4) - ), - Lists.>newArrayList(), - Lists.>newArrayList(singleAll) - ); - List results = Lists.newArrayList(); - List expectedResults = Lists.newArrayList(getKeyRange(Bytes.toBytes("A")),getKeyRange(Bytes.toBytes("B")),getKeyRange(Bytes.toBytes("C")),getKeyRange(Bytes.toBytes("D"))); - TrailingRangeIterator iterator = new TrailingRangeIterator(initPkPos, pkPos, slotsTrailingRangesList); - while (iterator.hasNext()) { - do { - do { - KeyRange range = iterator.getRange(); - results.add(range); - } while (iterator.nextTrailingRange()); - } while (iterator.nextRange()); - } - assertEquals(expectedResults, results); - } - - @Test - public void testSlotsIterator() throws Exception { - List keySlotsList = Lists.newArrayList(); - keySlotsList.add(new SingleKeySlot(null, 0, - Lists.newArrayList( - KeyRange.getKeyRange(Bytes.toBytes("A")), - KeyRange.getKeyRange(Bytes.toBytes("B")) - ))); - keySlotsList.add(new SingleKeySlot(null, 1, - Lists.newArrayList( - KeyRange.getKeyRange(Bytes.toBytes("C")) - ))); - keySlotsList.add(new SingleKeySlot(null, 0, - Lists.newArrayList( - KeyRange.getKeyRange(Bytes.toBytes("D")), - KeyRange.getKeyRange(Bytes.toBytes("E")) - ))); - keySlotsList.add(new SingleKeySlot(null, 1, - Lists.newArrayList())); - SlotsIterator iterator = new SlotsIterator(keySlotsList, 0); - String[][] expectedResults = { - {"A",null,"D",null}, - {"B",null, "D", null}, - {"A",null,"E",null}, - {"B",null,"E",null}, - }; - int j = 0; - while (iterator.next()) { - int i; - for (i = 0; i < keySlotsList.size(); i++) { - KeyRange range = iterator.getRange(i); - String result = range == null ? null : Bytes.toString(range.getLowerRange()); - String expectedResult = expectedResults[j][i]; - assertEquals(expectedResult,result); + try { + table.getColumnForColumnQualifier(columnFamily.getName().getBytes(), + ref.getColumn().getColumnQualifierBytes()); + // If we find the same qualifier name with different columnFamily, + // then set disambiguateWithFamily to true + disambiguateWithFamily = true; + break; + } catch (ColumnNotFoundException ignore) { } - assertEquals(i,expectedResults[j].length); - j++; + } } - assertEquals(j, expectedResults.length); - } - - @Test - public void testMathFunc() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); - conn.createStatement().execute("create table test (id integer primary key)"); - Scan scan = compileStatement("select ID, exp(ID) from test where exp(ID) < 10").getScan(); - - assertNotNull(scan.getFilter()); - assertTrue(scan.getStartRow().length == 0); - assertTrue(scan.getStopRow().length == 0); - } - - @Test - public void testSingleKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - @Test - public void testGetByteBitExpression() throws SQLException { - ensureTableCreated(getUrl(), TestUtil.BINARY_NAME, TestUtil.BINARY_NAME); - int result = 1; - String query = "select * from " + BINARY_NAME + " where GET_BYTE(a_binary, 0)=" + result; - Scan scan = compileStatement(query).getScan(); - - byte[] tmpBytes, tmpBytes2, tmpBytes3; - tmpBytes = PInteger.INSTANCE.toBytes(result); - tmpBytes2 = new byte[16]; - System.arraycopy(tmpBytes, 0, tmpBytes2, 0, tmpBytes.length); - tmpBytes = ByteUtil.nextKey(tmpBytes); - tmpBytes3 = new byte[16]; - System.arraycopy(tmpBytes, 0, tmpBytes3, 0, tmpBytes.length); - assertArrayEquals(tmpBytes2, scan.getStartRow()); - assertArrayEquals(tmpBytes3, scan.getStopRow()); - - query = "select * from " + BINARY_NAME + " where GET_BIT(a_binary, 0)=" + result; - scan = compileStatement(query).getScan(); - - tmpBytes = PInteger.INSTANCE.toBytes(result); - tmpBytes2 = new byte[16]; - System.arraycopy(tmpBytes, 0, tmpBytes2, 0, tmpBytes.length); - tmpBytes = ByteUtil.nextKey(tmpBytes); - tmpBytes3 = new byte[16]; - System.arraycopy(tmpBytes, 0, tmpBytes3, 0, tmpBytes.length); - assertArrayEquals(tmpBytes2, scan.getStartRow()); - assertArrayEquals(tmpBytes3, scan.getStopRow()); - } - - @Test - public void testDescDecimalRange() throws SQLException { - String ddl = "create table t (k1 bigint not null, k2 decimal, constraint pk primary key (k1,k2 desc))"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); - conn.createStatement().execute(ddl); - String query = "select * from t where k1 in (1,2) and k2>1.0"; - Scan scan = compileStatement(query).getScan(); - - byte[] startRow = ByteUtil.concat(PLong.INSTANCE.toBytes(1), ByteUtil.nextKey(QueryConstants.SEPARATOR_BYTE_ARRAY), QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); - byte[] upperValue = PDecimal.INSTANCE.toBytes(BigDecimal.valueOf(1.0)); - byte[] stopRow = ByteUtil.concat(PLong.INSTANCE.toBytes(2), SortOrder.invert(upperValue,0,upperValue.length), QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); - assertTrue(scan.getFilter() instanceof SkipScanFilter); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testSingleCharPaddedKeyExpression() throws SQLException { - String tenantId = "1"; - String query = "select * from atable where organization_id='" + tenantId + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - byte[] key = StringUtil.padChar(PChar.INSTANCE.toBytes(tenantId), 15); - assertArrayEquals(key, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(key), scan.getStopRow()); - } - - @Test - public void testSingleBinaryPaddedKeyExpression() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); - conn.createStatement().execute("create table bintable (k BINARY(15) PRIMARY KEY)"); - String tenantId = "1"; - String query = "select * from bintable where k='" + tenantId + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - byte[] key = ByteUtil.fillKey(PVarchar.INSTANCE.toBytes(tenantId), 15); - assertArrayEquals(key, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(key), scan.getStopRow()); - } - - @Test - public void testReverseSingleKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where '" + tenantId + "' = organization_id"; - Scan scan = compileStatement(query).getScan(); - assertNull(scan.getFilter()); - - assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - @Test - public void testStartKeyStopKey() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE start_stop_test (pk char(2) not null primary key)"); + } + return ref; + } + } + + private static final String TENANT_PREFIX = "Txt00tst1"; + + private static StatementContext compileStatement(String query) throws SQLException { + return compileStatement(query, Collections.emptyList(), null); + } + + private static StatementContext compileStatement(String query, Integer limit) + throws SQLException { + return compileStatement(query, Collections.emptyList(), limit); + } + + private static StatementContext compileStatement(String query, List binds) + throws SQLException { + return compileStatement(query, binds, null); + } + + private static StatementContext compileStatement(String query, List binds, Integer limit) + throws SQLException { + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); + assertRoundtrip(query); + TestUtil.bindParams(pstmt, binds); + QueryPlan plan = pstmt.compileQuery(); + assertEquals(limit, plan.getLimit()); + return plan.getContext(); + } + + @Test + public void testTrailingRangesIterator() throws Exception { + KeyRange[] all = new KeyRange[] { EVERYTHING_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE, + EVERYTHING_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE }; + List singleAll = Collections.singletonList(all); + KeyRange[] r1 = new KeyRange[] { EVERYTHING_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE, + getKeyRange(Bytes.toBytes("A")), EVERYTHING_RANGE, EVERYTHING_RANGE }; + KeyRange[] r2 = new KeyRange[] { EVERYTHING_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE, + getKeyRange(Bytes.toBytes("B")), EVERYTHING_RANGE, EVERYTHING_RANGE }; + KeyRange[] r3 = new KeyRange[] { EVERYTHING_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE, + getKeyRange(Bytes.toBytes("C")), EVERYTHING_RANGE, EVERYTHING_RANGE }; + KeyRange[] r4 = new KeyRange[] { EVERYTHING_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE, + getKeyRange(Bytes.toBytes("D")), EVERYTHING_RANGE, EVERYTHING_RANGE }; + KeyRange[] r5 = new KeyRange[] { EVERYTHING_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE, + getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("D"), true), EVERYTHING_RANGE, + EVERYTHING_RANGE }; + int initPkPos = 1; + int pkPos = 3; + List>> slotsTrailingRangesList = + Lists.>> newArrayList( + Lists.> newArrayList(Lists. newArrayList(r5)), + Lists.> newArrayList(Lists. newArrayList(r1, r2), + Lists. newArrayList(r3, r4)), + Lists.> newArrayList(), Lists.> newArrayList(singleAll)); + List results = Lists. newArrayList(); + List expectedResults = + Lists.newArrayList(getKeyRange(Bytes.toBytes("A")), getKeyRange(Bytes.toBytes("B")), + getKeyRange(Bytes.toBytes("C")), getKeyRange(Bytes.toBytes("D"))); + TrailingRangeIterator iterator = + new TrailingRangeIterator(initPkPos, pkPos, slotsTrailingRangesList); + while (iterator.hasNext()) { + do { + do { + KeyRange range = iterator.getRange(); + results.add(range); + } while (iterator.nextTrailingRange()); + } while (iterator.nextRange()); + } + assertEquals(expectedResults, results); + } + + @Test + public void testSlotsIterator() throws Exception { + List keySlotsList = Lists.newArrayList(); + keySlotsList.add(new SingleKeySlot(null, 0, Lists. newArrayList( + KeyRange.getKeyRange(Bytes.toBytes("A")), KeyRange.getKeyRange(Bytes.toBytes("B"))))); + keySlotsList.add(new SingleKeySlot(null, 1, + Lists. newArrayList(KeyRange.getKeyRange(Bytes.toBytes("C"))))); + keySlotsList.add(new SingleKeySlot(null, 0, Lists. newArrayList( + KeyRange.getKeyRange(Bytes.toBytes("D")), KeyRange.getKeyRange(Bytes.toBytes("E"))))); + keySlotsList.add(new SingleKeySlot(null, 1, Lists. newArrayList())); + SlotsIterator iterator = new SlotsIterator(keySlotsList, 0); + String[][] expectedResults = { { "A", null, "D", null }, { "B", null, "D", null }, + { "A", null, "E", null }, { "B", null, "E", null }, }; + int j = 0; + while (iterator.next()) { + int i; + for (i = 0; i < keySlotsList.size(); i++) { + KeyRange range = iterator.getRange(i); + String result = range == null ? null : Bytes.toString(range.getLowerRange()); + String expectedResult = expectedResults[j][i]; + assertEquals(expectedResult, result); + } + assertEquals(i, expectedResults[j].length); + j++; + } + assertEquals(j, expectedResults.length); + } + + @Test + public void testMathFunc() throws SQLException { + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); + conn.createStatement().execute("create table test (id integer primary key)"); + Scan scan = compileStatement("select ID, exp(ID) from test where exp(ID) < 10").getScan(); + + assertNotNull(scan.getFilter()); + assertTrue(scan.getStartRow().length == 0); + assertTrue(scan.getStopRow().length == 0); + } + + @Test + public void testSingleKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testGetByteBitExpression() throws SQLException { + ensureTableCreated(getUrl(), TestUtil.BINARY_NAME, TestUtil.BINARY_NAME); + int result = 1; + String query = "select * from " + BINARY_NAME + " where GET_BYTE(a_binary, 0)=" + result; + Scan scan = compileStatement(query).getScan(); + + byte[] tmpBytes, tmpBytes2, tmpBytes3; + tmpBytes = PInteger.INSTANCE.toBytes(result); + tmpBytes2 = new byte[16]; + System.arraycopy(tmpBytes, 0, tmpBytes2, 0, tmpBytes.length); + tmpBytes = ByteUtil.nextKey(tmpBytes); + tmpBytes3 = new byte[16]; + System.arraycopy(tmpBytes, 0, tmpBytes3, 0, tmpBytes.length); + assertArrayEquals(tmpBytes2, scan.getStartRow()); + assertArrayEquals(tmpBytes3, scan.getStopRow()); + + query = "select * from " + BINARY_NAME + " where GET_BIT(a_binary, 0)=" + result; + scan = compileStatement(query).getScan(); + + tmpBytes = PInteger.INSTANCE.toBytes(result); + tmpBytes2 = new byte[16]; + System.arraycopy(tmpBytes, 0, tmpBytes2, 0, tmpBytes.length); + tmpBytes = ByteUtil.nextKey(tmpBytes); + tmpBytes3 = new byte[16]; + System.arraycopy(tmpBytes, 0, tmpBytes3, 0, tmpBytes.length); + assertArrayEquals(tmpBytes2, scan.getStartRow()); + assertArrayEquals(tmpBytes3, scan.getStopRow()); + } + + @Test + public void testDescDecimalRange() throws SQLException { + String ddl = + "create table t (k1 bigint not null, k2 decimal, constraint pk primary key (k1,k2 desc))"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); + conn.createStatement().execute(ddl); + String query = "select * from t where k1 in (1,2) and k2>1.0"; + Scan scan = compileStatement(query).getScan(); + + byte[] startRow = ByteUtil.concat(PLong.INSTANCE.toBytes(1), + ByteUtil.nextKey(QueryConstants.SEPARATOR_BYTE_ARRAY), + QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); + byte[] upperValue = PDecimal.INSTANCE.toBytes(BigDecimal.valueOf(1.0)); + byte[] stopRow = ByteUtil.concat(PLong.INSTANCE.toBytes(2), + SortOrder.invert(upperValue, 0, upperValue.length), QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); + assertTrue(scan.getFilter() instanceof SkipScanFilter); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testSingleCharPaddedKeyExpression() throws SQLException { + String tenantId = "1"; + String query = "select * from atable where organization_id='" + tenantId + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + byte[] key = StringUtil.padChar(PChar.INSTANCE.toBytes(tenantId), 15); + assertArrayEquals(key, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(key), scan.getStopRow()); + } + + @Test + public void testSingleBinaryPaddedKeyExpression() throws SQLException { + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); + conn.createStatement().execute("create table bintable (k BINARY(15) PRIMARY KEY)"); + String tenantId = "1"; + String query = "select * from bintable where k='" + tenantId + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + byte[] key = ByteUtil.fillKey(PVarchar.INSTANCE.toBytes(tenantId), 15); + assertArrayEquals(key, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(key), scan.getStopRow()); + } + + @Test + public void testReverseSingleKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where '" + tenantId + "' = organization_id"; + Scan scan = compileStatement(query).getScan(); + assertNull(scan.getFilter()); + + assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testStartKeyStopKey() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE start_stop_test (pk char(2) not null primary key)"); + conn.close(); + + String query = "select * from start_stop_test where pk >= 'EA' and pk < 'EZ'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + assertArrayEquals(PVarchar.INSTANCE.toBytes("EA"), scan.getStartRow()); + assertArrayEquals(PVarchar.INSTANCE.toBytes("EZ"), scan.getStopRow()); + } + + @Test + public void testConcatSingleKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id || 'foo' ='" + tenantId + "'||'foo'"; + Scan scan = compileStatement(query).getScan(); + + // The || operator cannot currently be used to form the start/stop key + assertNotNull(scan.getFilter()); + assertEquals(0, scan.getStartRow().length); + assertEquals(0, scan.getStopRow().length); + } + + @Test + public void testLiteralConcatExpression() throws SQLException { + String query = "select * from atable where null||'foo'||'bar' = 'foobar'"; + Scan scan = new Scan(); + List binds = Collections.emptyList(); + compileStatement(query, binds); + + assertNull(scan.getFilter()); + assertEquals(0, scan.getStartRow().length); + assertEquals(0, scan.getStopRow().length); + } + + @Test + public void testSingleKeyNotExpression() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where not organization_id='" + tenantId + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNotNull(scan.getFilter()); + assertEquals(0, scan.getStartRow().length); + assertEquals(0, scan.getStopRow().length); + } + + @Test + public void testMultiKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String query = "select * from atable where organization_id='" + tenantId + + "' and substr(entity_id,1,3)='" + keyPrefix + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testMultiKeyBindExpression() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String query = "select * from atable where organization_id=? and substr(entity_id,1,3)=?"; + List binds = Arrays. asList(tenantId, keyPrefix); + Scan scan = compileStatement(query, binds).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testEqualRound() throws Exception { + String inst = "a"; + String host = "b"; + Date roundDate = DateUtil.parseDate("2012-01-01 00:00:00"); + Date startDate = DateUtil.parseDate("2011-12-31 12:00:00"); + Date endDate = DateUtil.parseDate("2012-01-01 12:00:00"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')=?"; + List binds = Arrays. asList(inst, host, roundDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(startDate)); + assertArrayEquals(startRow, scan.getStartRow()); + assertTrue(scan.includeStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testDegenerateRound() throws Exception { + String inst = "a"; + String host = "b"; + Date startDate = DateUtil.parseDate("2012-01-01 01:00:00"); + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')=?"; + List binds = Arrays. asList(inst, host, startDate); + Scan scan = compileStatement(query, binds).getScan(); + assertDegenerate(scan); + } + + @Test + public void testBoundaryGreaterThanRound() throws Exception { + String inst = "a"; + String host = "b"; + Date roundDate = DateUtil.parseDate("2012-01-01 00:00:00"); + Date startDate = DateUtil.parseDate("2012-01-01 12:00:00"); + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')>?"; + List binds = Arrays. asList(inst, host, roundDate); + Scan scan = compileStatement(query, binds).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(startDate)); + assertArrayEquals(startRow, scan.getStartRow()); + assertTrue(scan.includeStartRow()); + byte[] stopRow = ByteUtil + .nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host), QueryConstants.SEPARATOR_BYTE_ARRAY)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testBoundaryGreaterThanOrEqualRound() throws Exception { + String inst = "a"; + String host = "b"; + Date startDate = DateUtil.parseDate("2012-01-01 00:00:00"); + Date startDateHalfRange = DateUtil.parseDate("2011-12-31 12:00:00.000"); + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')>=?"; + List binds = Arrays. asList(inst, host, startDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(startDateHalfRange)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil + .nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host), QueryConstants.SEPARATOR_BYTE_ARRAY)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testGreaterThanRound() throws Exception { + String inst = "a"; + String host = "b"; + Date roundDate = DateUtil.parseDate("2012-01-01 01:00:00"); + Date startDate = DateUtil.parseDate("2012-01-01 12:00:00"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')>?"; + List binds = Arrays. asList(inst, host, roundDate); + + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(startDate)); + assertArrayEquals(startRow, scan.getStartRow()); + assertTrue(scan.includeStartRow()); + byte[] stopRow = ByteUtil + .nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host), QueryConstants.SEPARATOR_BYTE_ARRAY)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testLessThanRound() throws Exception { + String inst = "a"; + String host = "b"; + Date roundDate = DateUtil.parseDate("2012-01-01 01:00:00"); + Date endDate = DateUtil.parseDate("2012-01-01 12:00:00"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY') binds = Arrays. asList(inst, host, roundDate); + Scan scan = compileStatement(query, binds).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host)/* ,QueryConstants.SEPARATOR_BYTE_ARRAY */); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertTrue(scan.includeStartRow()); + } + + @Test + public void testBoundaryLessThanRound() throws Exception { + String inst = "a"; + String host = "b"; + Date roundDate = DateUtil.parseDate("2012-01-01 00:00:00"); + Date endDate = DateUtil.parseDate("2011-12-31 12:00:00"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY') binds = Arrays. asList(inst, host, roundDate); + + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host)/* ,QueryConstants.SEPARATOR_BYTE_ARRAY */); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testLessThanOrEqualRound() throws Exception { + String inst = "a"; + String host = "b"; + Date roundDate = DateUtil.parseDate("2012-01-01 01:00:00"); + Date endDate = DateUtil.parseDate("2012-01-01 12:00:00"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')<=?"; + List binds = Arrays. asList(inst, host, roundDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host)/* ,QueryConstants.SEPARATOR_BYTE_ARRAY */); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testLessThanOrEqualRound2() throws Exception { + String inst = "a"; + String host = "b"; + Date roundDate = DateUtil.parseDate("2011-12-31 23:00:00"); + Date endDate = DateUtil.parseDate("2011-12-31 12:00:00"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')<=?"; + List binds = Arrays. asList(inst, host, roundDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host)/* ,QueryConstants.SEPARATOR_BYTE_ARRAY */); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testBoundaryLessThanOrEqualRound() throws Exception { + String inst = "a"; + String host = "b"; + Date roundDate = DateUtil.parseDate("2012-01-01 00:00:00"); + Date endDate = DateUtil.parseDate("2012-01-01 12:00:00"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')<=?"; + List binds = Arrays. asList(inst, host, roundDate); + Scan scan = compileStatement(query, binds).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host)/* ,QueryConstants.SEPARATOR_BYTE_ARRAY */); + assertArrayEquals(startRow, scan.getStartRow()); + assertTrue(scan.includeStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testLessThanOrEqualFloor() throws Exception { + String inst = "a"; + String host = "b"; + Date floorDate = DateUtil.parseDate("2012-01-01 01:00:00"); + Date endDate = DateUtil.parseDate("2012-01-02 00:00:00"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and floor(date,'DAY')<=?"; + List binds = Arrays. asList(inst, host, floorDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host)/* ,QueryConstants.SEPARATOR_BYTE_ARRAY */); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testLessThanOrEqualFloorBoundary() throws Exception { + String inst = "a"; + String host = "b"; + Date floorDate = DateUtil.parseDate("2012-01-01 00:00:00"); + Date endDate = DateUtil.parseDate("2012-01-02 00:00:00"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and floor(date,'DAY')<=?"; + List binds = Arrays. asList(inst, host, floorDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host)/* ,QueryConstants.SEPARATOR_BYTE_ARRAY */); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testGreaterThanOrEqualFloor() throws Exception { + String inst = "a"; + String host = "b"; + Date floorDate = DateUtil.parseDate("2012-01-01 01:00:00"); + Date startDate = DateUtil.parseDate("2012-01-02 00:00:00"); + String query = "select * from ptsdb where inst=? and host=? and floor(date,'DAY')>=?"; + List binds = Arrays. asList(inst, host, floorDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(startDate)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil + .nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host), QueryConstants.SEPARATOR_BYTE_ARRAY)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testGreaterThanOrEqualFloorBoundary() throws Exception { + String inst = "a"; + String host = "b"; + Date floorDate = DateUtil.parseDate("2012-01-01 00:00:00"); + Date startDate = DateUtil.parseDate("2012-01-01 00:00:00"); + String query = "select * from ptsdb where inst=? and host=? and floor(date,'DAY')>=?"; + List binds = Arrays. asList(inst, host, floorDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(startDate)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil + .nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host), QueryConstants.SEPARATOR_BYTE_ARRAY)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testLessThanOrEqualCeil() throws Exception { + String inst = "a"; + String host = "b"; + Date ceilDate = DateUtil.parseDate("2012-01-01 01:00:00"); + Date endDate = DateUtil.parseDate("2012-01-01 00:00:00.001"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and ceil(date,'DAY')<=?"; + List binds = Arrays. asList(inst, host, ceilDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host)/* ,QueryConstants.SEPARATOR_BYTE_ARRAY */); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testLessThanOrEqualCeilBoundary() throws Exception { + String inst = "a"; + String host = "b"; + Date ceilDate = DateUtil.parseDate("2012-01-01 00:00:00"); + Date endDate = DateUtil.parseDate("2012-01-01 00:00:00.001"); // Hbase normalizes scans to left + // closed + String query = "select * from ptsdb where inst=? and host=? and ceil(date,'DAY')<=?"; + List binds = Arrays. asList(inst, host, ceilDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host)/* ,QueryConstants.SEPARATOR_BYTE_ARRAY */); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(endDate)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testGreaterThanOrEqualCeil() throws Exception { + String inst = "a"; + String host = "b"; + Date ceilDate = DateUtil.parseDate("2012-01-01 01:00:00"); + Date startDate = DateUtil.parseDate("2012-01-01 00:00:00.001"); + String query = "select * from ptsdb where inst=? and host=? and ceil(date,'DAY')>=?"; + List binds = Arrays. asList(inst, host, ceilDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(startDate)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil + .nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host), QueryConstants.SEPARATOR_BYTE_ARRAY)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testGreaterThanOrEqualCeilBoundary() throws Exception { + String inst = "a"; + String host = "b"; + Date ceilDate = DateUtil.parseDate("2012-01-01 00:00:00"); + Date startDate = DateUtil.parseDate("2011-12-31 00:00:00.001"); + String query = "select * from ptsdb where inst=? and host=? and ceil(date,'DAY')>=?"; + List binds = Arrays. asList(inst, host, ceilDate); + Scan scan = compileStatement(query, binds).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes(host), + QueryConstants.SEPARATOR_BYTE_ARRAY, PDate.INSTANCE.toBytes(startDate)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil + .nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(host), QueryConstants.SEPARATOR_BYTE_ARRAY)); + assertArrayEquals(stopRow, scan.getStopRow()); + assertFalse(scan.includeStopRow()); + } + + @Test + public void testOverlappingKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String entityId = "002333333333333"; + String query = "select * from atable where organization_id='" + tenantId + + "' and substr(entity_id,1,3)='" + keyPrefix + "' and entity_id='" + entityId + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow()); + } + + @Test + public void testSubstrExpressionWithoutLengthVariable() { + assertEquals("SUBSTR(ENTITY_ID, 1)", ((SubstrFunction) substr2(ENTITY_ID, 1)).toString()); + } + + @Test + public void testSubstrExpressionWithLengthVariable() { + assertEquals("SUBSTR(ENTITY_ID, 1, 10)", + ((SubstrFunction) substr(ENTITY_ID, 1, 10)).toString()); + } + + @Test + public void testTrailingSubstrExpression() throws SQLException { + String tenantId = "0xD000000000001"; + String entityId = "002333333333333"; + String query = "select * from atable where substr(organization_id,1,3)='" + + tenantId.substring(0, 3) + "' and entity_id='" + entityId + "'"; + Scan scan = compileStatement(query).getScan(); + assertNotNull(scan.getFilter()); + + byte[] startRow = + ByteUtil.concat(StringUtil.padChar(PVarchar.INSTANCE.toBytes(tenantId.substring(0, 3)), 15), + PVarchar.INSTANCE.toBytes(entityId)); + assertArrayEquals(startRow, scan.getStartRow()); + // Even though the first slot is a non inclusive range, we need to do a next key + // on the second slot because of the algorithm we use to seek to and terminate the + // loop during skip scan. We could end up having a first slot just under the upper + // limit of slot one and a value equal to the value in slot two and we need this to + // be less than the upper range that would get formed. + byte[] stopRow = ByteUtil.concat(StringUtil + .padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId.substring(0, 3))), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testBasicRangeExpression() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id <= '" + tenantId + "'"; + Scan scan = compileStatement(query).getScan(); + assertNull(scan.getFilter()); + + assertTrue(scan.getStartRow().length == 0); + byte[] stopRow = ByteUtil.concat(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId))); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testKeyRangeExpression1() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix1 = "002"; + String keyPrefix2 = "004"; + String query = + "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) >= '" + + keyPrefix1 + "' and substr(entity_id,1,3) < '" + keyPrefix2 + "'"; + Scan scan = compileStatement(query).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix1), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix2), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testKeyRangeExpression2() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix1 = "002"; + String keyPrefix2 = "004"; + String query = + "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) >= '" + + keyPrefix1 + "' and substr(entity_id,1,3) <= '" + keyPrefix2 + "'"; + Scan scan = compileStatement(query).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix1), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PChar.INSTANCE.toBytes(keyPrefix2)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testKeyRangeExpression3() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix1 = "002"; + String keyPrefix2 = "004"; + String query = + "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) > '" + + keyPrefix1 + "' and substr(entity_id,1,3) <= '" + keyPrefix2 + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix1)), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix2)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testKeyRangeExpression4() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix1 = "002"; + String entityId = "002000000000002"; + String query = + "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) > '" + + keyPrefix1 + "' and substr(entity_id,1,3) = '" + entityId + "'"; + Scan scan = compileStatement(query).getScan(); + assertDegenerate(scan); + } + + @Test + public void testKeyRangeExpression5() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix1 = "002"; + String entityId = "002000000000002"; + String query = "select * from atable where organization_id='" + tenantId + + "' and substr(entity_id,1,3) <= '" + keyPrefix1 + "' and entity_id = '" + entityId + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + } + + @Test + public void testKeyRangeExpression6() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix1 = "002"; + String entityId = "002000000000002"; + String query = "select * from atable where organization_id='" + tenantId + + "' and substr(entity_id,1,3) < '" + keyPrefix1 + "' and entity_id = '" + entityId + "'"; + Scan scan = compileStatement(query).getScan(); + assertDegenerate(scan); + } + + @Test + public void testKeyRangeExpression7() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix1 = "002"; + String entityId = "002000000000002"; + String query = "select * from atable where organization_id='" + tenantId + + "' and substr(entity_id,1,3) < '" + keyPrefix1 + "' and entity_id < '" + entityId + "'"; + Scan scan = compileStatement(query).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = PChar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix1), entityId.length())); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testKeyRangeExpression8() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix1 = "001"; + String entityId = "002000000000002"; + String query = "select * from atable where organization_id='" + tenantId + + "' and substr(entity_id,1,3) > '" + keyPrefix1 + "' and entity_id = '" + entityId + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); + assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); + } + + @Test + public void testKeyRangeExpression9() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix1 = "002"; + String keyPrefix2 = "0033"; + String query = + "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) >= '" + + keyPrefix1 + "' and substr(entity_id,1,4) <= '" + keyPrefix2 + "'"; + Scan scan = compileStatement(query).getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix1), 15)); // extra byte is due to implicit + // internal padding + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PChar.INSTANCE.toBytes(keyPrefix2)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + /** + * This is testing the degenerate case where nothing will match because the overlapping keys + * (keyPrefix and entityId) don't match. + */ + @Test + public void testUnequalOverlappingKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String entityId = "001333333333333"; + String query = "select * from atable where organization_id='" + tenantId + + "' and substr(entity_id,1,3)='" + keyPrefix + "' and entity_id='" + entityId + "'"; + Scan scan = compileStatement(query).getScan(); + assertDegenerate(scan); + } + + @Test + public void testTopLevelOrKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + "' or a_integer=2"; + Scan scan = compileStatement(query).getScan(); + + assertNotNull(scan.getFilter()); + assertEquals(0, scan.getStartRow().length); + assertEquals(0, scan.getStopRow().length); + } + + @Test + public void testSiblingOrKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where organization_id='" + tenantId + + "' and (a_integer = 2 or a_integer = 3)"; + Scan scan = compileStatement(query).getScan(); + + assertNotNull(scan.getFilter()); + assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testColumnNotFound() throws SQLException { + String tenantId = "000000000000001"; + String query = "select * from atable where bar='" + tenantId + "'"; + try { + compileStatement(query); + fail(); + } catch (ColumnNotFoundException e) { + // expected + } + } + + @Test + public void testNotContiguousPkColumn() throws SQLException { + String keyPrefix = "002"; + String query = "select * from atable where substr(entity_id,1,3)='" + keyPrefix + "'"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertNotNull(scan.getFilter()); + assertEquals(0, scan.getStartRow().length); + assertEquals(0, scan.getStopRow().length); + } + + @Test + public void testMultipleNonEqualitiesPkColumn() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String query = "select * from atable where organization_id >= '" + tenantId + + "' AND substr(entity_id,1,3) > '" + keyPrefix + "'"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertNotNull(scan.getFilter()); + // assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); + assertArrayEquals( + ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + PChar.INSTANCE.toBytes(PChar.INSTANCE + .pad(PChar.INSTANCE.toObject(ByteUtil.nextKey(PChar.INSTANCE.toBytes(keyPrefix))), 15))), + scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testRHSLiteral() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id='" + tenantId + "' and 0 >= a_integer limit 1000"; + StatementContext context = compileStatement(query, 1000); + Scan scan = context.getScan(); + + assertNotNull(scan.getFilter()); + assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testKeyTypeMismatch() { + String query = "select * from atable where organization_id=5"; + try { + compileStatement(query); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage().contains("Type mismatch")); + } + } + + @Test + public void testLikeExtractAllKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String query = + "select * from atable where organization_id = ? and entity_id LIKE '" + keyPrefix + "%'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testLikeExtractAllKeyExpression2() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "中文"; + String query = + "select * from atable where organization_id = ? and entity_id LIKE '" + keyPrefix + "%'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testLikeExtractAllAsEqKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String query = + "select * from atable where organization_id LIKE ? and entity_id LIKE '" + keyPrefix + "%'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + assertNull(scan.getFilter()); + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testLikeExpressionWithDescOrder() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + String tableName = generateUniqueName(); + conn.createStatement() + .execute("CREATE TABLE " + tableName + " (id varchar, name varchar, type decimal, " + + "status integer CONSTRAINT pk PRIMARY KEY(id desc, type))"); + String query = "SELECT * FROM " + tableName + " where type = 1 and id like 'xy%'"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertTrue(scan.getFilter() instanceof SkipScanFilter); + SkipScanFilter filter = (SkipScanFilter) scan.getFilter(); + + byte[] lowerRange = filter.getSlots().get(0).get(0).getLowerRange(); + byte[] upperRange = filter.getSlots().get(0).get(0).getUpperRange(); + boolean lowerInclusive = filter.getSlots().get(0).get(0).isLowerInclusive(); + boolean upperInclusive = filter.getSlots().get(0).get(0).isUpperInclusive(); + + byte[] startRow = PVarchar.INSTANCE.toBytes("xy"); + byte[] invStartRow = new byte[startRow.length]; + SortOrder.invert(startRow, 0, invStartRow, 0, startRow.length); + + byte[] stopRow = PVarchar.INSTANCE.toBytes("xz"); + byte[] invStopRow = new byte[startRow.length]; + SortOrder.invert(stopRow, 0, invStopRow, 0, stopRow.length); + + assertArrayEquals(invStopRow, lowerRange); + assertArrayEquals(invStartRow, upperRange); + assertFalse(lowerInclusive); + assertTrue(upperInclusive); + + byte[] expectedStartRow = + ByteUtil.concat(invStartRow, new byte[] { 0 }, PDecimal.INSTANCE.toBytes(new BigDecimal(1))); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + + byte[] expectedStopRow = ByteUtil.concat(invStartRow, new byte[] { (byte) (0xFF) }, + PDecimal.INSTANCE.toBytes(new BigDecimal(1)), new byte[] { 1 }); + assertArrayEquals(expectedStopRow, scan.getStopRow()); + + query = "SELECT * FROM " + tableName + " where type = 1 and id like 'x%'"; + context = compileStatement(query); + scan = context.getScan(); + + assertTrue(scan.getFilter() instanceof SkipScanFilter); + filter = (SkipScanFilter) scan.getFilter(); + + lowerRange = filter.getSlots().get(0).get(0).getLowerRange(); + upperRange = filter.getSlots().get(0).get(0).getUpperRange(); + lowerInclusive = filter.getSlots().get(0).get(0).isLowerInclusive(); + upperInclusive = filter.getSlots().get(0).get(0).isUpperInclusive(); + + startRow = PVarchar.INSTANCE.toBytes("x"); + invStartRow = new byte[startRow.length]; + SortOrder.invert(startRow, 0, invStartRow, 0, startRow.length); + + stopRow = PVarchar.INSTANCE.toBytes("y"); + invStopRow = new byte[startRow.length]; + SortOrder.invert(stopRow, 0, invStopRow, 0, stopRow.length); + + assertArrayEquals(invStopRow, lowerRange); + assertArrayEquals(invStartRow, upperRange); + assertFalse(lowerInclusive); + assertTrue(upperInclusive); + + expectedStartRow = + ByteUtil.concat(invStartRow, new byte[] { 0 }, PDecimal.INSTANCE.toBytes(new BigDecimal(1))); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + + expectedStopRow = ByteUtil.concat(invStartRow, new byte[] { (byte) (0xFF) }, + PDecimal.INSTANCE.toBytes(new BigDecimal(1)), new byte[] { 1 }); + assertArrayEquals(expectedStopRow, scan.getStopRow()); + } + + @Test + public void testLikeNoWildcardExpression() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String query = + "select * from atable where organization_id LIKE ? and entity_id LIKE '" + keyPrefix + "'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.nextKey(startRow); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testLikeExtractKeyExpression2() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String likeArg = keyPrefix + "_"; + String query = + "select * from atable where organization_id = ? and entity_id LIKE '" + likeArg + "'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertEquals(rowKeyFilter(like(ENTITY_ID, likeArg, context)), filter); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testLikeOptKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String likeArg = keyPrefix + "%003%"; + String query = + "select * from atable where organization_id = ? and entity_id LIKE '" + likeArg + "'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertEquals(rowKeyFilter(like(ENTITY_ID, likeArg, context)), filter); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testLikeOptKeyExpression2() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String likeArg = keyPrefix + "%003%"; + String query = + "select * from atable where organization_id = ? and substr(entity_id,1,10) LIKE '" + likeArg + + "'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertEquals(rowKeyFilter(like(substr(ENTITY_ID, 1, 10), likeArg, context)), filter); + + byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15)); + byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testLikeNoOptKeyExpression3() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String likeArg = keyPrefix + "%003%"; + String query = + "select * from atable where organization_id = ? and substr(entity_id,4,10) LIKE '" + likeArg + + "'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertEquals(rowKeyFilter(like(substr(ENTITY_ID, 4, 10), likeArg, context)), filter); + + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow()); + } + + @Test + public void testLikeNoOptKeyExpression() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String likeArg = "%001%" + keyPrefix + "%"; + String query = + "select * from atable where organization_id = ? and entity_id LIKE '" + likeArg + "'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertEquals(rowKeyFilter(like(ENTITY_ID, likeArg, context)), filter); + + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow()); + } + + @Test + public void testLikeNoOptKeyExpression2() throws SQLException { + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String likeArg = keyPrefix + "%"; + String query = + "select * from atable where organization_id = ? and entity_id NOT LIKE '" + likeArg + "'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertEquals(rowKeyFilter(not(like(ENTITY_ID, likeArg, context))), filter); + + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow()); + } + + @Test + public void testLikeDegenerate() throws SQLException { + String tenantId = "000000000000001"; + String query = + "select * from atable where organization_id = ? and entity_id LIKE '0000000000000012%003%'"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + + assertDegenerate(scan); + } + + @Test + public void testDegenerateDivision1() throws SQLException { + String query = "select * from atable where a_integer = 3 / null"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertDegenerate(scan); + } + + @Test + public void testDegenerateDivision2() throws SQLException { + String query = "select * from atable where a_integer / null = 3"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertDegenerate(scan); + } + + @Test + public void testDegenerateMult1() throws SQLException { + String query = "select * from atable where a_integer = 3 * null"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertDegenerate(scan); + } + + @Test + public void testDegenerateMult2() throws SQLException { + String query = "select * from atable where a_integer * null = 3"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertDegenerate(scan); + } + + @Test + public void testDegenerateAdd1() throws SQLException { + String query = "select * from atable where a_integer = 3 + null"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertDegenerate(scan); + } + + @Test + public void testDegenerateAdd2() throws SQLException { + String query = "select * from atable where a_integer + null = 3"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertDegenerate(scan); + } + + @Test + public void testDegenerateSub1() throws SQLException { + String query = "select * from atable where a_integer = 3 - null"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertDegenerate(scan); + } + + @Test + public void testDegenerateSub2() throws SQLException { + String query = "select * from atable where a_integer - null = 3"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + + assertDegenerate(scan); + } + + /* + * The following 5 tests are testing the comparison in where clauses under the case when the rhs + * cannot be coerced into the lhs. We need to confirm the decision make by expression compilation + * returns correct decisions. + */ + @Test + public void testValueComparisonInt() throws SQLException { + ensureTableCreated(getUrl(), "PKIntValueTest", "PKIntValueTest"); + String query; + // int <-> long + // Case 1: int = long, comparison always false, key is degenerated. + query = "SELECT * FROM PKintValueTest where pk = " + Long.MAX_VALUE; + assertQueryConditionAlwaysFalse(query); + // Case 2: int != long, comparison always true, no key set since we need to do a full + // scan all the time. + query = "SELECT * FROM PKintValueTest where pk != " + Long.MAX_VALUE; + assertQueryConditionAlwaysTrue(query); + // Case 3: int > positive long, comparison always false; + query = "SELECT * FROM PKintValueTest where pk >= " + Long.MAX_VALUE; + assertQueryConditionAlwaysFalse(query); + // Case 4: int <= Integer.MAX_VALUE < positive long, always true; + query = "SELECT * FROM PKintValueTest where pk <= " + Long.MAX_VALUE; + assertQueryConditionAlwaysTrue(query); + // Case 5: int >= Integer.MIN_VALUE > negative long, always true; + query = "SELECT * FROM PKintValueTest where pk >= " + (Long.MIN_VALUE + 1); + assertQueryConditionAlwaysTrue(query); + // Case 6: int < negative long, comparison always false; + query = "SELECT * FROM PKintValueTest where pk <= " + (Long.MIN_VALUE + 1); + assertQueryConditionAlwaysFalse(query); + } + + @Test + public void testValueComparisonUnsignedInt() throws SQLException { + ensureTableCreated(getUrl(), "PKUnsignedIntValueTest", "PKUnsignedIntValueTest"); + String query; + // unsigned_int <-> negative int/long + // Case 1: unsigned_int = negative int, always false; + query = "SELECT * FROM PKUnsignedIntValueTest where pk = -1"; + assertQueryConditionAlwaysFalse(query); + // Case 2: unsigned_int != negative int, always true; + query = "SELECT * FROM PKUnsignedIntValueTest where pk != -1"; + assertQueryConditionAlwaysTrue(query); + // Case 3: unsigned_int > negative int, always true; + query = "SELECT * FROM PKUnsignedIntValueTest where pk > " + (Long.MIN_VALUE + 1); + assertQueryConditionAlwaysTrue(query); + // Case 4: unsigned_int < negative int, always false; + query = "SELECT * FROM PKUnsignedIntValueTest where pk < " + +(Long.MIN_VALUE + 1); + assertQueryConditionAlwaysFalse(query); + // unsigned_int <-> big positive long + // Case 1: unsigned_int = big positive long, always false; + query = "SELECT * FROM PKUnsignedIntValueTest where pk = " + Long.MAX_VALUE; + assertQueryConditionAlwaysFalse(query); + // Case 2: unsigned_int != big positive long, always true; + query = "SELECT * FROM PKUnsignedIntValueTest where pk != " + Long.MAX_VALUE; + assertQueryConditionAlwaysTrue(query); + // Case 3: unsigned_int > big positive long, always false; + query = "SELECT * FROM PKUnsignedIntValueTest where pk >= " + Long.MAX_VALUE; + assertQueryConditionAlwaysFalse(query); + // Case 4: unsigned_int < big positive long, always true; + query = "SELECT * FROM PKUnsignedIntValueTest where pk <= " + Long.MAX_VALUE; + assertQueryConditionAlwaysTrue(query); + } + + @Test + public void testValueComparisonUnsignedLong() throws SQLException { + ensureTableCreated(getUrl(), "PKUnsignedLongValueTest", "PKUnsignedLongValueTest"); + String query; + // unsigned_long <-> positive int/long + // Case 1: unsigned_long = negative int/long, always false; + query = "SELECT * FROM PKUnsignedLongValueTest where pk = -1"; + assertQueryConditionAlwaysFalse(query); + // Case 2: unsigned_long = negative int/long, always true; + query = "SELECT * FROM PKUnsignedLongValueTest where pk != " + (Long.MIN_VALUE + 1); + assertQueryConditionAlwaysTrue(query); + // Case 3: unsigned_long > negative int/long, always true; + query = "SELECT * FROM PKUnsignedLongValueTest where pk > -1"; + assertQueryConditionAlwaysTrue(query); + // Case 4: unsigned_long < negative int/long, always false; + query = "SELECT * FROM PKUnsignedLongValueTest where pk < " + (Long.MIN_VALUE + 1); + assertQueryConditionAlwaysFalse(query); + } + + private void assertQueryConditionAlwaysTrue(String query) throws SQLException { + Scan scan = compileStatement(query).getScan(); + assertEmptyScanKey(scan); + } + + private void assertQueryConditionAlwaysFalse(String query) throws SQLException { + Scan scan = compileStatement(query).getScan(); + assertDegenerate(scan); + } + + @Test + public void testOrSameColExpression() throws SQLException { + String tenantId1 = "000000000000001"; + String tenantId2 = "000000000000003"; + String query = "select * from atable where organization_id = ? or organization_id = ?"; + List binds = Arrays. asList(tenantId1, tenantId2); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + + assertNotNull(filter); + assertTrue(filter instanceof SkipScanFilter); + ScanRanges scanRanges = context.getScanRanges(); + assertNotNull(scanRanges); + List> ranges = scanRanges.getRanges(); + assertEquals(1, ranges.size()); + List> expectedRanges = Collections.singletonList(Arrays.asList( + PChar.INSTANCE.getKeyRange(PChar.INSTANCE.toBytes(tenantId1), true, + PChar.INSTANCE.toBytes(tenantId1), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(PChar.INSTANCE.toBytes(tenantId2), true, + PChar.INSTANCE.toBytes(tenantId2), true, SortOrder.ASC))); + assertEquals(expectedRanges, ranges); + byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId1); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId2)), scan.getStopRow()); + } + + @Test + public void testAndOrExpression() throws SQLException { + String tenantId1 = "000000000000001"; + String tenantId2 = "000000000000003"; + String entityId1 = "002333333333331"; + String entityId2 = "002333333333333"; + String query = + "select * from atable where (organization_id = ? and entity_id = ?) or (organization_id = ? and entity_id = ?)"; + List binds = Arrays. asList(tenantId1, entityId1, tenantId2, entityId2); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + + assertNotNull(filter); + assertTrue(filter instanceof RowKeyComparisonFilter); + + ScanRanges scanRanges = context.getScanRanges(); + assertEquals(ScanRanges.EVERYTHING, scanRanges); + assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testOrDiffColExpression() throws SQLException { + String tenantId1 = "000000000000001"; + String entityId1 = "002333333333331"; + String query = "select * from atable where organization_id = ? or entity_id = ?"; + List binds = Arrays. asList(tenantId1, entityId1); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + + assertNotNull(filter); + assertTrue(filter instanceof RowKeyComparisonFilter); + ScanRanges scanRanges = context.getScanRanges(); + assertEquals(ScanRanges.EVERYTHING, scanRanges); + assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testOrSameColRangeExpression() throws SQLException { + String query = + "select * from atable where substr(organization_id,1,3) = ? or organization_id LIKE 'foo%'"; + List binds = Arrays. asList("00D"); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + + assertNotNull(filter); + assertTrue(filter instanceof SkipScanFilter); + ScanRanges scanRanges = context.getScanRanges(); + assertNotNull(scanRanges); + List> ranges = scanRanges.getRanges(); + assertEquals(1, ranges.size()); + List> expectedRanges = Collections.singletonList(Arrays.asList( + PChar.INSTANCE.getKeyRange(StringUtil.padChar(PChar.INSTANCE.toBytes("00D"), 15), true, + StringUtil.padChar(ByteUtil.nextKey(PChar.INSTANCE.toBytes("00D")), 15), false, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(StringUtil.padChar(PChar.INSTANCE.toBytes("foo"), 15), true, + StringUtil.padChar(ByteUtil.nextKey(PChar.INSTANCE.toBytes("foo")), 15), false, + SortOrder.ASC))); + assertEquals(expectedRanges, ranges); + } + + @Test + public void testOrPKRanges() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + ensureTableCreated(getUrl(), TestUtil.BTABLE_NAME); + Statement stmt = conn.createStatement(); + // BTABLE has 5 PK columns + String query = "select * from " + BTABLE_NAME + + " where (a_string > '1' and a_string < '5') or (a_string > '6' and a_string < '9')"; + StatementContext context = compileStatement(query); + Filter filter = context.getScan().getFilter(); + + assertNotNull(filter); + assertTrue(filter instanceof SkipScanFilter); + ScanRanges scanRanges = context.getScanRanges(); + assertNotNull(scanRanges); + List> ranges = scanRanges.getRanges(); + assertEquals(1, ranges.size()); + List> expectedRanges = Collections.singletonList( + Arrays.asList(KeyRange.getKeyRange(Bytes.toBytes("1"), false, Bytes.toBytes("5"), false), + KeyRange.getKeyRange(Bytes.toBytes("6"), false, Bytes.toBytes("9"), false))); + assertEquals(expectedRanges, ranges); + + stmt.close(); + conn.close(); + } + + @Test + public void testOrPKRangesNotOptimized() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + ensureTableCreated(getUrl(), TestUtil.BTABLE_NAME); + Statement stmt = conn.createStatement(); + // BTABLE has 5 PK columns + String[] queries = { "select * from " + BTABLE_NAME + + " where (a_string > '1' and a_string < '5') or (a_string > '6' and a_string < '9' and a_id = 'foo')", + "select * from " + BTABLE_NAME + + " where (a_id > 'aaa' and a_id < 'ccc') or (a_id > 'jjj' and a_id < 'mmm')", }; + for (String query : queries) { + StatementContext context = compileStatement(query); + Iterator it = ScanUtil.getFilterIterator(context.getScan()); + while (it.hasNext()) { + assertFalse(it.next() instanceof SkipScanFilter); + } + TestUtil.assertNotDegenerate(context.getScan()); + } + + stmt.close(); + conn.close(); + } + + @Test + public void testForceSkipScanOnSaltedTable() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute("CREATE TABLE IF NOT EXISTS user_messages (\n" + + " SENDER_ID UNSIGNED_LONG NOT NULL,\n" + + " RECIPIENT_ID UNSIGNED_LONG NOT NULL,\n" + " SENDER_IP VARCHAR,\n" + + " IS_READ VARCHAR,\n" + " IS_DELETED VARCHAR,\n" + " M_TEXT VARCHAR,\n" + + " M_TIMESTAMP timestamp NOT NULL,\n" + " ROW_ID UNSIGNED_LONG NOT NULL\n" + + " constraint rowkey primary key (SENDER_ID,RECIPIENT_ID,M_TIMESTAMP DESC,ROW_ID))\n" + + "SALT_BUCKETS=12\n"); + String query = + "select /*+ SKIP_SCAN */ count(*) from user_messages where is_read='N' and recipient_id=5399179882"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + + assertNotNull(filter); + assertTrue(filter instanceof FilterList); + FilterList filterList = (FilterList) filter; + assertEquals(FilterList.Operator.MUST_PASS_ALL, filterList.getOperator()); + assertEquals(2, filterList.getFilters().size()); + assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); + assertTrue(filterList.getFilters().get(1) instanceof SingleKeyValueComparisonFilter); + + ScanRanges scanRanges = context.getScanRanges(); + assertNotNull(scanRanges); + assertEquals(3, scanRanges.getRanges().size()); + assertEquals(1, scanRanges.getRanges().get(1).size()); + assertEquals(KeyRange.EVERYTHING_RANGE, scanRanges.getRanges().get(1).get(0)); + assertEquals(1, scanRanges.getRanges().get(2).size()); + assertTrue(scanRanges.getRanges().get(2).get(0).isSingleKey()); + assertEquals(Long.valueOf(5399179882L), + PUnsignedLong.INSTANCE.toObject(scanRanges.getRanges().get(2).get(0).getLowerRange())); + } + + @Test + public void testForceRangeScanKeepsFilters() throws SQLException { + ensureTableCreated(getUrl(), TestUtil.ENTITY_HISTORY_TABLE_NAME, + TestUtil.ENTITY_HISTORY_TABLE_NAME); + String tenantId = "000000000000001"; + String keyPrefix = "002"; + String query = + "select /*+ RANGE_SCAN */ ORGANIZATION_ID, PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID from " + + TestUtil.ENTITY_HISTORY_TABLE_NAME + + " where ORGANIZATION_ID=? and SUBSTR(PARENT_ID, 1, 3) = ? and CREATED_DATE >= ? and CREATED_DATE < ? order by ORGANIZATION_ID, PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID limit 6"; + Date startTime = new Date(System.currentTimeMillis()); + Date stopTime = new Date(startTime.getTime() + MILLIS_IN_DAY); + List binds = Arrays. asList(tenantId, keyPrefix, startTime, stopTime); + StatementContext context = compileStatement(query, binds, 6); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertTrue(filter instanceof RowKeyComparisonFilter); + + byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix), 15), + PDate.INSTANCE.toBytes(startTime)); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + byte[] expectedStopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); + assertArrayEquals(expectedStopRow, scan.getStopRow()); + } + + @Test + public void testBasicRVCExpression() throws SQLException { + String tenantId = "000000000000001"; + String entityId = "002333333333331"; + String query = "select * from atable where (organization_id,entity_id) >= (?,?)"; + List binds = Arrays. asList(tenantId, entityId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + assertNull(scan.getFilter()); + byte[] expectedStartRow = + ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), PChar.INSTANCE.toBytes(entityId)); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testRVCExpressionThroughOr() throws SQLException { + String tenantId = "000000000000001"; + String entityId = "002333333333331"; + String entityId1 = "002333333333330"; + String entityId2 = "002333333333332"; + String query = + "select * from atable where (organization_id,entity_id) >= (?,?) and organization_id = ? and (entity_id = ? or entity_id = ?)"; + List binds = Arrays. asList(tenantId, entityId, tenantId, entityId1, entityId2); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + byte[] expectedStartRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId1)); + byte[] expectedStopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + PVarchar.INSTANCE.toBytes(entityId2), QueryConstants.SEPARATOR_BYTE_ARRAY); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(expectedStopRow, scan.getStopRow()); + Filter filter = scan.getFilter(); + assertTrue(filter instanceof SkipScanFilter); + SkipScanFilter skipScanFilter = (SkipScanFilter) filter; + List> skipScanRanges = Arrays.asList(Arrays.asList( + KeyRange.getKeyRange( + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId1))), + KeyRange.getKeyRange(ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + PVarchar.INSTANCE.toBytes(entityId2))))); + assertEquals(skipScanRanges, skipScanFilter.getSlots()); + } + + @Test + public void testNotRepresentableBySkipScan() throws SQLException { + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); + String tableName = generateUniqueName(); + conn.createStatement().execute("CREATE TABLE " + tableName + + "(a INTEGER NOT NULL, b INTEGER NOT NULL, CONSTRAINT pk PRIMARY KEY (a,b))"); + String query = "SELECT * FROM " + tableName + + " WHERE (a,b) >= (1,5) and (a,b) < (3,8) and (a = 1 or a = 3) and ((b >= 6 and b < 9) or (b > 3 and b <= 5))"; + StatementContext context = compileStatement(query); + Scan scan = context.getScan(); + byte[] expectedStartRow = + ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(4)); + byte[] expectedStopRow = + ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(9)); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(expectedStopRow, scan.getStopRow()); + Filter filter = scan.getFilter(); + assertTrue(filter instanceof FilterList); + FilterList filterList = (FilterList) filter; + // We can form a skip scan, but it's not exact, so we need the boolean expression filter + // as well. + assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); + assertTrue(filterList.getFilters().get(1) instanceof BooleanExpressionFilter); + SkipScanFilter skipScanFilter = (SkipScanFilter) filterList.getFilters().get(0); + List> skipScanRanges = Arrays.asList( + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(1)), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(3))), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(5), + true), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(6), true, PInteger.INSTANCE.toBytes(9), + false))); + assertEquals(skipScanRanges, skipScanFilter.getSlots()); + } + + /** + * With only a subset of row key cols present (which includes the leading key), Phoenix should + * have optimized the start row for the scan to include the row keys cols that occur contiguously + * in the RVC. Table entity_history has the row key defined as (organization_id, parent_id, + * created_date, entity_history_id). This test uses (organization_id, parent_id, entity_id) in + * RVC. So the start row should be comprised of organization_id and parent_id. + */ + @Test + public void testRVCExpressionWithSubsetOfPKCols() throws SQLException { + String tenantId = "000000000000001"; + String parentId = "000000000000002"; + String entityHistId = "000000000000003"; + + String query = + "select * from entity_history where (organization_id, parent_id, entity_history_id) >= (?,?,?)"; + List binds = Arrays. asList(tenantId, parentId, entityHistId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertTrue(filter instanceof RowKeyComparisonFilter); + byte[] expectedStartRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(parentId)); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + /** + * With the leading row key col missing Phoenix won't be able to optimize and provide the start + * row for the scan. Table entity_history has the row key defined as (organization_id, parent_id, + * created_date, entity_history_id). This test uses (parent_id, entity_id) in RVC. Start row + * should be empty. + */ + + @Test + public void testRVCExpressionWithoutLeadingColOfRowKey() throws SQLException { + + String parentId = "000000000000002"; + String entityHistId = "000000000000003"; + + String query = "select * from entity_history where (parent_id, entity_history_id) >= (?,?)"; + List binds = Arrays. asList(parentId, entityHistId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertTrue(filter instanceof RowKeyComparisonFilter); + assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testRVCExpressionWithNonFirstLeadingColOfRowKey() throws SQLException { + String old_value = "value"; + String orgId = getOrganizationId(); + + String query = "select * from entity_history where (old_value, organization_id) >= (?,?)"; + List binds = Arrays. asList(old_value, orgId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertTrue(filter instanceof SingleKeyValueComparisonFilter); + assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testMultiRVCExpressionsCombinedWithAnd() throws SQLException { + String lowerTenantId = "000000000000001"; + String lowerParentId = "000000000000002"; + Date lowerCreatedDate = new Date(System.currentTimeMillis()); + String upperTenantId = "000000000000008"; + String upperParentId = "000000000000009"; + + String query = + "select * from entity_history where (organization_id, parent_id, created_date) >= (?, ?, ?) AND (organization_id, parent_id) <= (?, ?)"; + List binds = Arrays. asList(lowerTenantId, lowerParentId, lowerCreatedDate, + upperTenantId, upperParentId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(lowerTenantId), + PVarchar.INSTANCE.toBytes(lowerParentId), PDate.INSTANCE.toBytes(lowerCreatedDate)); + byte[] expectedStopRow = ByteUtil.nextKey(ByteUtil + .concat(PVarchar.INSTANCE.toBytes(upperTenantId), PVarchar.INSTANCE.toBytes(upperParentId))); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(expectedStopRow, scan.getStopRow()); + } + + @Test + public void testMultiRVCExpressionsCombinedUsingLiteralExpressions() throws SQLException { + String lowerTenantId = "000000000000001"; + String lowerParentId = "000000000000002"; + Date lowerCreatedDate = new Date(System.currentTimeMillis()); + + String query = + "select * from entity_history where (organization_id, parent_id, created_date) >= (?, ?, ?) AND (organization_id, parent_id) <= ('7', '7')"; + List binds = Arrays. asList(lowerTenantId, lowerParentId, lowerCreatedDate); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(lowerTenantId), + PVarchar.INSTANCE.toBytes(lowerParentId), PDate.INSTANCE.toBytes(lowerCreatedDate)); + byte[] expectedStopRow = + ByteUtil.nextKey(ByteUtil.concat(StringUtil.padChar(PVarchar.INSTANCE.toBytes("7"), 15), + StringUtil.padChar(PVarchar.INSTANCE.toBytes("7"), 15))); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(expectedStopRow, scan.getStopRow()); + } + + @Test + public void testUseOfFunctionOnLHSInRVC() throws SQLException { + String tenantId = "000000000000001"; + String subStringTenantId = tenantId.substring(0, 3); + String parentId = "000000000000002"; + Date createdDate = new Date(System.currentTimeMillis()); + + String query = + "select * from entity_history where (substr(organization_id, 1, 3), parent_id, created_date) >= (?,?,?)"; + List binds = Arrays. asList(subStringTenantId, parentId, createdDate); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertTrue(filter instanceof RowKeyComparisonFilter); + byte[] expectedStartRow = PVarchar.INSTANCE.toBytes(subStringTenantId); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testUseOfFunctionOnLHSInMiddleOfRVC() throws SQLException { + String tenantId = "000000000000001"; + String parentId = "000000000000002"; + String subStringParentId = parentId.substring(0, 3); + Date createdDate = new Date(System.currentTimeMillis()); + + String query = + "select * from entity_history where (organization_id, substr(parent_id, 1, 3), created_date) >= (?,?,?)"; + List binds = Arrays. asList(tenantId, subStringParentId, createdDate); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertTrue(filter instanceof RowKeyComparisonFilter); + byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + PVarchar.INSTANCE.toBytes(subStringParentId)); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testUseOfFunctionOnLHSInMiddleOfRVCForLTE() throws SQLException { + String tenantId = "000000000000001"; + String parentId = "000000000000002"; + String subStringParentId = parentId.substring(0, 3); + Date createdDate = new Date(System.currentTimeMillis()); + + String query = + "select * from entity_history where (organization_id, substr(parent_id, 1, 3), created_date) <= (?,?,?)"; + List binds = Arrays. asList(tenantId, subStringParentId, createdDate); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNotNull(filter); + assertTrue(filter instanceof RowKeyComparisonFilter); + byte[] expectedStopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), + ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(subStringParentId))); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStartRow()); + assertArrayEquals(expectedStopRow, scan.getStopRow()); + } + + @Test + public void testNullAtEndOfRVC() throws SQLException { + String tenantId = "000000000000001"; + String parentId = "000000000000002"; + Date createdDate = null; + + String query = + "select * from entity_history where (organization_id, parent_id, created_date) >= (?,?,?)"; + List binds = Arrays. asList(tenantId, parentId, createdDate); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + byte[] expectedStartRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(parentId)); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testNullInMiddleOfRVC() throws SQLException { + String tenantId = "000000000000001"; + String parentId = null; + Date createdDate = new Date(System.currentTimeMillis()); + + String query = + "select * from entity_history where (organization_id, parent_id, created_date) >= (?,?,?)"; + List binds = Arrays. asList(tenantId, parentId, createdDate); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + byte[] expectedStartRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), new byte[15], + ByteUtil.previousKey(PDate.INSTANCE.toBytes(createdDate))); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testNullAtStartOfRVC() throws SQLException { + String tenantId = null; + String parentId = "000000000000002"; + Date createdDate = new Date(System.currentTimeMillis()); + + String query = + "select * from entity_history where (organization_id, parent_id, created_date) >= (?,?,?)"; + List binds = Arrays. asList(tenantId, parentId, createdDate); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + byte[] expectedStartRow = ByteUtil.concat(new byte[15], + ByteUtil.previousKey(PChar.INSTANCE.toBytes(parentId)), PDate.INSTANCE.toBytes(createdDate)); + assertArrayEquals(expectedStartRow, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testRVCInCombinationWithOtherNonRVC() throws SQLException { + String firstOrgId = "000000000000001"; + String secondOrgId = "000000000000008"; + + String parentId = "000000000000002"; + Date createdDate = new Date(System.currentTimeMillis()); + + String query = + "select * from entity_history where (organization_id, parent_id, created_date) >= (?,?,?) AND organization_id <= ?"; + List binds = Arrays. asList(firstOrgId, parentId, createdDate, secondOrgId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes(firstOrgId), + PVarchar.INSTANCE.toBytes(parentId), PDate.INSTANCE.toBytes(createdDate)), + scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(secondOrgId)), scan.getStopRow()); + } + + @Test + public void testGreaterThanEqualTo_NonRVCOnLHSAndRVCOnRHS_WithNonNullBindParams() + throws SQLException { + String tenantId = "000000000000001"; + String parentId = "000000000000008"; + + String query = "select * from entity_history where organization_id >= (?,?)"; + List binds = Arrays. asList(tenantId, parentId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testGreaterThan_NonRVCOnLHSAndRVCOnRHS_WithNonNullBindParams() throws SQLException { + String tenantId = "000000000000001"; + String parentId = "000000000000008"; + + String query = "select * from entity_history where organization_id > (?,?)"; + List binds = Arrays. asList(tenantId, parentId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testGreaterThan() throws SQLException { + String tenantId = "000000000000001"; + + String query = "select * from entity_history where organization_id >?"; + List binds = Arrays. asList(tenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testLessThanEqualTo_NonRVCOnLHSAndRVCOnRHS_WithNonNullBindParams() + throws SQLException { + String tenantId = "000000000000001"; + String parentId = "000000000000008"; + + String query = "select * from entity_history where organization_id <= (?,?)"; + List binds = Arrays. asList(tenantId, parentId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testLessThan_NonRVCOnLHSAndRVCOnRHS_WithNonNullBindParams() throws SQLException { + String tenantId = "000000000000001"; + String parentId = "000000000000008"; + + String query = "select * from entity_history where organization_id < (?,?)"; + List binds = Arrays. asList(tenantId, parentId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testQueryMoreRVC() throws SQLException { + String ddl = "CREATE TABLE rvcTestIdx " + " (\n" + " pk1 VARCHAR NOT NULL,\n" + + " v1 VARCHAR,\n" + " pk2 DECIMAL NOT NULL,\n" + " CONSTRAINT PK PRIMARY KEY \n" + + " (\n" + " pk1,\n" + " v1,\n" + " pk2\n" + " )\n" + + ") MULTI_TENANT=true,IMMUTABLE_ROWS=true"; + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); + conn.createStatement().execute(ddl); + String query = "SELECT pk1, pk2, v1 FROM rvcTestIdx WHERE pk1 = 'a' AND\n" + + "(pk1, pk2) > ('a', 1)\n" + "ORDER BY PK1, PK2\n" + "LIMIT 2"; + StatementContext context = compileStatement(query, 2); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNotNull(filter); + byte[] startRow = Bytes.toBytes("a"); + byte[] stopRow = + ByteUtil.concat(startRow, ByteUtil.nextKey(QueryConstants.SEPARATOR_BYTE_ARRAY)); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testCombiningRVCUsingOr() throws SQLException { + String firstTenantId = "000000000000001"; + String secondTenantId = "000000000000005"; + String firstParentId = "000000000000011"; + String secondParentId = "000000000000015"; + + String query = + "select * from entity_history where (organization_id, parent_id) >= (?,?) OR (organization_id, parent_id) <= (?, ?)"; + List binds = + Arrays. asList(firstTenantId, firstParentId, secondTenantId, secondParentId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testCombiningRVCUsingOr2() throws SQLException { + String firstTenantId = "000000000000001"; + String secondTenantId = "000000000000005"; + String firstParentId = "000000000000011"; + String secondParentId = "000000000000015"; + + String query = + "select * from entity_history where (organization_id, parent_id) >= (?,?) OR (organization_id, parent_id) >= (?, ?)"; + List binds = + Arrays. asList(firstTenantId, firstParentId, secondTenantId, secondParentId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes(firstTenantId), + PVarchar.INSTANCE.toBytes(firstParentId)), scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testCombiningRVCWithNonRVCUsingOr() throws SQLException { + String firstTenantId = "000000000000001"; + String secondTenantId = "000000000000005"; + String firstParentId = "000000000000011"; + + String query = + "select * from entity_history where (organization_id, parent_id) >= (?,?) OR organization_id >= ?"; + List binds = Arrays. asList(firstTenantId, firstParentId, secondTenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes(firstTenantId), + PVarchar.INSTANCE.toBytes(firstParentId)), scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testCombiningRVCWithNonRVCUsingOr2() throws SQLException { + String firstTenantId = "000000000000001"; + String secondTenantId = "000000000000005"; + String firstParentId = "000000000000011"; + + String query = + "select * from entity_history where (organization_id, parent_id) >= (?,?) OR organization_id <= ?"; + List binds = Arrays. asList(firstTenantId, firstParentId, secondTenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + } + + @Test + public void testCombiningRVCWithNonRVCUsingOr3() throws SQLException { + String firstTenantId = "000000000000005"; + String secondTenantId = "000000000000001"; + String firstParentId = "000000000000011"; + String query = + "select * from entity_history where (organization_id, parent_id) >= (?,?) OR organization_id <= ?"; + List binds = Arrays. asList(firstTenantId, firstParentId, secondTenantId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertTrue(filter instanceof SkipScanFilter); + assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + SkipScanFilter skipScanFilter = (SkipScanFilter) filter; + List> keyRanges = skipScanFilter.getSlots(); + assertEquals(1, keyRanges.size()); + assertEquals(2, keyRanges.get(0).size()); + KeyRange range1 = keyRanges.get(0).get(0); + KeyRange range2 = keyRanges.get(0).get(1); + assertEquals(KeyRange.getKeyRange(KeyRange.UNBOUND, false, Bytes.toBytes(secondTenantId), true), + range1); + assertEquals(KeyRange.getKeyRange( + ByteUtil.concat(Bytes.toBytes(firstTenantId), Bytes.toBytes(firstParentId)), true, + KeyRange.UNBOUND, true), range2); + } + + @Test + public void testUsingRVCNonFullyQualifiedInClause() throws Exception { + String firstOrgId = "000000000000001"; + String secondOrgId = "000000000000009"; + String firstParentId = "000000000000011"; + String secondParentId = "000000000000021"; + String query = + "select * from entity_history where (organization_id, parent_id) IN ((?, ?), (?, ?))"; + List binds = + Arrays. asList(firstOrgId, firstParentId, secondOrgId, secondParentId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertTrue(filter instanceof SkipScanFilter); + assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes(firstOrgId), + PVarchar.INSTANCE.toBytes(firstParentId)), scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(secondOrgId), + PVarchar.INSTANCE.toBytes(secondParentId))), scan.getStopRow()); + } + + @Test + public void testUsingRVCFullyQualifiedInClause() throws Exception { + String firstOrgId = "000000000000001"; + String secondOrgId = "000000000000009"; + String firstParentId = "000000000000011"; + String secondParentId = "000000000000021"; + String query = "select * from atable where (organization_id, entity_id) IN ((?, ?), (?, ?))"; + List binds = + Arrays. asList(firstOrgId, firstParentId, secondOrgId, secondParentId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertTrue(filter instanceof SkipScanFilter); + List> skipScanRanges = Collections.singletonList(Arrays.asList( + KeyRange.getKeyRange( + ByteUtil.concat(PChar.INSTANCE.toBytes(firstOrgId), PChar.INSTANCE.toBytes(firstParentId))), + KeyRange.getKeyRange(ByteUtil.concat(PChar.INSTANCE.toBytes(secondOrgId), + PChar.INSTANCE.toBytes(secondParentId))))); + assertEquals(skipScanRanges, context.getScanRanges().getRanges()); + assertArrayEquals( + ByteUtil.concat(PChar.INSTANCE.toBytes(firstOrgId), PChar.INSTANCE.toBytes(firstParentId)), + scan.getStartRow()); + assertArrayEquals(ByteUtil.concat(PChar.INSTANCE.toBytes(secondOrgId), + PChar.INSTANCE.toBytes(secondParentId), QueryConstants.SEPARATOR_BYTE_ARRAY), + scan.getStopRow()); + } + + @Test + public void testFullyQualifiedRVCWithTenantSpecificViewAndConnection() throws Exception { + String baseTableDDL = + "CREATE TABLE BASE_MULTI_TENANT_TABLE(\n " + " tenant_id VARCHAR(5) NOT NULL,\n" + + " userid INTEGER NOT NULL,\n" + " username VARCHAR NOT NULL,\n" + " col VARCHAR\n " + + " CONSTRAINT pk PRIMARY KEY (tenant_id, userid, username)) MULTI_TENANT=true"; + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(baseTableDDL); + conn.close(); + + String tenantId = "tenantId"; + String tenantViewDDL = "CREATE VIEW TENANT_VIEW AS SELECT * FROM BASE_MULTI_TENANT_TABLE"; + Properties tenantProps = new Properties(); + tenantProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + conn = DriverManager.getConnection(getUrl(), tenantProps); + conn.createStatement().execute(tenantViewDDL); + + String query = "SELECT * FROM TENANT_VIEW WHERE (userid, username) IN ((?, ?), (?, ?))"; + List binds = Arrays. asList(1, "uname1", 2, "uname2"); + + StatementContext context = compileStatementTenantSpecific(tenantId, query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertEquals(SkipScanFilter.class, filter.getClass()); + } + + @Test + public void testFullyQualifiedRVCWithNonTenantSpecificView() throws Exception { + String baseTableDDL = "CREATE TABLE BASE_TABLE(\n " + " tenant_id VARCHAR(5) NOT NULL,\n" + + " userid INTEGER NOT NULL,\n" + " username VARCHAR NOT NULL,\n" + " col VARCHAR\n " + + " CONSTRAINT pk PRIMARY KEY (tenant_id, userid, username))"; + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(baseTableDDL); + conn.close(); + + String viewDDL = "CREATE VIEW VIEWXYZ AS SELECT * FROM BASE_TABLE"; + conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(viewDDL); + + String query = + "SELECT * FROM VIEWXYZ WHERE (tenant_id, userid, username) IN ((?, ?, ?), (?, ?, ?))"; + List binds = Arrays. asList("tenantId", 1, "uname1", "tenantId", 2, "uname2"); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertEquals(SkipScanFilter.class, filter.getClass()); + } + + @Test + public void testRVCWithCompareOpsForRowKeyColumnValuesSmallerThanSchema() throws SQLException { + String orgId = "0000005"; + String entityId = "011"; + String orgId2 = "000005"; + String entityId2 = "11"; + + // CASE 1: >= + String query = "select * from atable where (organization_id, entity_id) >= (?,?)"; + List binds = Arrays. asList(orgId, entityId); + StatementContext context = compileStatement(query, binds); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), + StringUtil.padChar(PChar.INSTANCE.toBytes(entityId), 15)), scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + + // CASE 2: > + query = "select * from atable where (organization_id, entity_id) > (?,?)"; + binds = Arrays. asList(orgId, entityId); + context = compileStatement(query, binds); + scan = context.getScan(); + filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals( + ByteUtil.nextKey(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), + StringUtil.padChar(PChar.INSTANCE.toBytes(entityId), 15))), + scan.getStartRow()); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); + + // CASE 3: <= + query = "select * from atable where (organization_id, entity_id) <= (?,?)"; + binds = Arrays. asList(orgId, entityId); + context = compileStatement(query, binds); + scan = context.getScan(); + filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStartRow()); + assertArrayEquals( + ByteUtil.nextKey(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), + StringUtil.padChar(PChar.INSTANCE.toBytes(entityId), 15))), + scan.getStopRow()); + + // CASE 4: < + query = "select * from atable where (organization_id, entity_id) < (?,?)"; + binds = Arrays. asList(orgId, entityId); + context = compileStatement(query, binds); + scan = context.getScan(); + filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStartRow()); + assertArrayEquals(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), + StringUtil.padChar(PChar.INSTANCE.toBytes(entityId), 15)), scan.getStopRow()); + + // CASE 5: = + // For RVC, this will only occur if there's more than one key in the IN + query = "select * from atable where (organization_id, entity_id) IN ((?,?),(?,?))"; + binds = Arrays. asList(orgId, entityId, orgId2, entityId2); + context = compileStatement(query, binds); + scan = context.getScan(); + filter = scan.getFilter(); + assertTrue(filter instanceof SkipScanFilter); + ScanRanges scanRanges = context.getScanRanges(); + assertEquals(2, scanRanges.getPointLookupCount()); + Iterator iterator = scanRanges.getPointLookupKeyIterator(); + KeyRange k1 = iterator.next(); + assertTrue(k1.isSingleKey()); + assertArrayEquals(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), + StringUtil.padChar(PChar.INSTANCE.toBytes(entityId), 15)), k1.getLowerRange()); + KeyRange k2 = iterator.next(); + assertTrue(k2.isSingleKey()); + assertArrayEquals(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId2), 15), + StringUtil.padChar(PChar.INSTANCE.toBytes(entityId2), 15)), k2.getLowerRange()); + } + + @Test + public void testRVCInView() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute("CREATE TABLE TEST_TABLE.TEST1 (\n" + "PK1 CHAR(3) NOT NULL, \n" + + "PK2 CHAR(3) NOT NULL,\n" + "DATA1 CHAR(10)\n" + "CONSTRAINT PK PRIMARY KEY (PK1, PK2))"); + conn.createStatement() + .execute("CREATE VIEW TEST_TABLE.FOO AS SELECT * FROM TEST_TABLE.TEST1 WHERE PK1 = 'FOO'"); + String query = + "SELECT * FROM TEST_TABLE.FOO WHERE PK2 < '004' AND (PK1,PK2) > ('FOO','002') LIMIT 2"; + Scan scan = compileStatement(query, Collections.emptyList(), 2).getScan(); + byte[] startRow = ByteUtil + .nextKey(ByteUtil.concat(PChar.INSTANCE.toBytes("FOO"), PVarchar.INSTANCE.toBytes("002"))); + assertArrayEquals(startRow, scan.getStartRow()); + byte[] stopRow = ByteUtil.concat(PChar.INSTANCE.toBytes("FOO"), PChar.INSTANCE.toBytes("004")); + assertArrayEquals(stopRow, scan.getStopRow()); + } + + @Test + public void testScanRangeForPointLookup() throws SQLException { + String tenantId = "000000000000001"; + String entityId = "002333333333333"; + String query = String.format( + "select * from atable where organization_id='%s' and entity_id='%s'", tenantId, entityId); + try (Connection conn = DriverManager.getConnection(getUrl())) { + QueryPlan optimizedPlan = TestUtil.getOptimizeQueryPlan(conn, query); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); + byte[] stopRow = ByteUtil.nextKey(startRow); + validateScanRangesForPointLookup(optimizedPlan, startRow, stopRow); + } + } + + @Test + public void testScanRangeForPointLookupRVC() throws SQLException { + String tenantId = "000000000000001"; + String entityId = "002333333333333"; + String query = + String.format("select * from atable where (organization_id, entity_id) IN (('%s','%s'))", + tenantId, entityId); + try (Connection conn = DriverManager.getConnection(getUrl())) { + QueryPlan optimizedPlan = TestUtil.getOptimizeQueryPlan(conn, query); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); + byte[] stopRow = ByteUtil.nextKey(startRow); + validateScanRangesForPointLookup(optimizedPlan, startRow, stopRow); + } + } + + @Test + public void testScanRangeForPointLookupWithLimit() throws SQLException { + String tenantId = "000000000000001"; + String entityId = "002333333333333"; + String query = String.format( + "select * from atable where organization_id='%s' " + "and entity_id='%s' LIMIT 1", tenantId, + entityId); + try (Connection conn = DriverManager.getConnection(getUrl())) { + QueryPlan optimizedPlan = TestUtil.getOptimizeQueryPlan(conn, query); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); + byte[] stopRow = ByteUtil.nextKey(startRow); + validateScanRangesForPointLookup(optimizedPlan, startRow, stopRow); + } + } + + @Test + public void testScanRangeForPointLookupAggregate() throws SQLException { + String tenantId = "000000000000001"; + String entityId = "002333333333333"; + String query = String.format( + "select count(*) from atable where organization_id='%s' " + "and entity_id='%s'", tenantId, + entityId); + try (Connection conn = DriverManager.getConnection(getUrl())) { + QueryPlan optimizedPlan = TestUtil.getOptimizeQueryPlan(conn, query); + byte[] startRow = + ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); + byte[] stopRow = ByteUtil.nextKey(startRow); + validateScanRangesForPointLookup(optimizedPlan, startRow, stopRow); + } + } + + private static void validateScanRangesForPointLookup(QueryPlan optimizedPlan, byte[] startRow, + byte[] stopRow) { + StatementContext context = optimizedPlan.getContext(); + ScanRanges scanRanges = context.getScanRanges(); + assertTrue(scanRanges.isPointLookup()); + assertEquals(1, scanRanges.getPointLookupCount()); + // scan from StatementContext has scan range [start, next(start)] + Scan scanFromContext = context.getScan(); + assertArrayEquals(startRow, scanFromContext.getStartRow()); + assertTrue(scanFromContext.includeStartRow()); + assertArrayEquals(stopRow, scanFromContext.getStopRow()); + assertFalse(scanFromContext.includeStopRow()); + + List> scans = optimizedPlan.getScans(); + assertEquals(1, scans.size()); + assertEquals(1, scans.get(0).size()); + Scan scanFromIterator = scans.get(0).get(0); + if (optimizedPlan.getLimit() == null && !optimizedPlan.getStatement().isAggregate()) { + // scan from iterator has same start and stop row [start, start] i.e a Get + assertTrue(scanFromIterator.isGetScan()); + assertTrue(scanFromIterator.includeStartRow()); + assertTrue(scanFromIterator.includeStopRow()); + } else { + // in case of limit scan range is same as the one in StatementContext + assertArrayEquals(startRow, scanFromIterator.getStartRow()); + assertTrue(scanFromIterator.includeStartRow()); + assertArrayEquals(stopRow, scanFromIterator.getStopRow()); + assertFalse(scanFromIterator.includeStopRow()); + } + } + + private static StatementContext compileStatementTenantSpecific(String tenantId, String query, + List binds) throws Exception { + PhoenixConnection pconn = + getTenantSpecificConnection("tenantId").unwrap(PhoenixConnection.class); + PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); + TestUtil.bindParams(pstmt, binds); + QueryPlan plan = pstmt.compileQuery(); + return plan.getContext(); + } + + private static Connection getTenantSpecificConnection(String tenantId) throws Exception { + Properties tenantProps = new Properties(); + tenantProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + Connection conn = DriverManager.getConnection(getUrl(), tenantProps); + return conn; + } + + @Test + public void testTrailingIsNull() throws Exception { + String baseTableDDL = "CREATE TABLE t(\n " + " a VARCHAR,\n" + " b VARCHAR,\n" + + " CONSTRAINT pk PRIMARY KEY (a, b))"; + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(baseTableDDL); + conn.close(); + + String query = "SELECT * FROM t WHERE a = 'a' and b is null"; + StatementContext context = compileStatement(query, Collections. emptyList()); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertNull(filter); + assertArrayEquals(Bytes.toBytes("a"), scan.getStartRow()); + assertArrayEquals(ByteUtil.concat(Bytes.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, + QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow()); + } + + @Test + public void testTrailingIsNullWithOr() throws Exception { + String baseTableDDL = "CREATE TABLE t(\n " + " a VARCHAR,\n" + " b VARCHAR,\n" + + " CONSTRAINT pk PRIMARY KEY (a, b))"; + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(baseTableDDL); + conn.close(); + + String query = "SELECT * FROM t WHERE a = 'a' and (b is null or b = 'b')"; + StatementContext context = compileStatement(query, Collections. emptyList()); + Scan scan = context.getScan(); + Filter filter = scan.getFilter(); + assertTrue(filter instanceof SkipScanFilter); + SkipScanFilter skipScan = (SkipScanFilter) filter; + List> slots = skipScan.getSlots(); + assertEquals(2, slots.size()); + assertEquals(1, slots.get(0).size()); + assertEquals(2, slots.get(1).size()); + assertEquals(KeyRange.getKeyRange(Bytes.toBytes("a")), slots.get(0).get(0)); + assertTrue(KeyRange.IS_NULL_RANGE == slots.get(1).get(0)); + assertEquals(KeyRange.getKeyRange(Bytes.toBytes("b")), slots.get(1).get(1)); + assertArrayEquals(Bytes.toBytes("a"), scan.getStartRow()); + assertArrayEquals(ByteUtil.concat(Bytes.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow()); + } + + @Test + public void testAndWithRVC() throws Exception { + String ddl; + String query; + StatementContext context; + Connection conn = DriverManager.getConnection(getUrl()); + + ddl = + "create table t (a integer not null, b integer not null, c integer constraint pk primary key (a,b))"; + conn.createStatement().execute(ddl); + + query = "select c from t where a in (1,2) and b = 3 and (a,b) in ( (1,2) , (1,3))"; + context = compileStatement(query, Collections. emptyList()); + assertArrayEquals(ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(3)), + context.getScan().getStartRow()); + assertArrayEquals( + ByteUtil.concat(PInteger.INSTANCE.toBytes(1), ByteUtil.nextKey(PInteger.INSTANCE.toBytes(3))), + context.getScan().getStopRow()); + + query = "select c from t where (a,b) in ( (1,2) , (1,3) ) and b = 4"; + context = compileStatement(query, Collections. emptyList()); + assertDegenerate(context.getScan()); + + query = "select c from t where a = 1 and b = 3 and (a,b) in ( (1,2) , (1,3))"; + context = compileStatement(query, Collections. emptyList()); + assertArrayEquals(ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(3)), + context.getScan().getStartRow()); + assertArrayEquals( + ByteUtil.concat(PInteger.INSTANCE.toBytes(1), ByteUtil.nextKey(PInteger.INSTANCE.toBytes(3))), + context.getScan().getStopRow()); + + // Test with RVC occurring later in the PK + ddl = + "create table t1 (d varchar, e char(3) not null, a integer not null, b integer not null, c integer constraint pk primary key (d, e, a,b))"; + conn.createStatement().execute(ddl); + + query = + "select c from t1 where d = 'a' and e = 'foo' and a in (1,2) and b = 3 and (a,b) in ( (1,2) , (1,3))"; + context = compileStatement(query, Collections. emptyList()); + Scan scan = context.getScan(); + assertArrayEquals( + ByteUtil.concat(PVarchar.INSTANCE.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, + PChar.INSTANCE.toBytes("foo"), PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(3)), + scan.getStartRow()); + assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes("a"), + QueryConstants.SEPARATOR_BYTE_ARRAY, PChar.INSTANCE.toBytes("foo"), + PInteger.INSTANCE.toBytes(1), ByteUtil.nextKey(PInteger.INSTANCE.toBytes(3))), + scan.getStopRow()); + + conn.close(); + } + + @Test + public void testNoAggregatorForOrderBy() throws SQLException { + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); + conn.createStatement().execute( + "create table test (pk1 integer not null, pk2 integer not null, constraint pk primary key (pk1,pk2))"); + StatementContext context = + compileStatement("select count(distinct pk1) from test order by count(distinct pk2)"); + assertEquals(1, context.getAggregationManager().getAggregators().getAggregatorCount()); + context = compileStatement("select sum(pk1) from test order by count(distinct pk2)"); + assertEquals(1, context.getAggregationManager().getAggregators().getAggregatorCount()); + context = compileStatement("select min(pk1) from test order by count(distinct pk2)"); + assertEquals(1, context.getAggregationManager().getAggregators().getAggregatorCount()); + context = compileStatement("select max(pk1) from test order by count(distinct pk2)"); + assertEquals(1, context.getAggregationManager().getAggregators().getAggregatorCount()); + // here the ORDER BY is not optimized away + context = compileStatement("select avg(pk1) from test order by count(distinct pk2)"); + assertEquals(2, context.getAggregationManager().getAggregators().getAggregatorCount()); + } + + @Test + public void testPartialRVCWithLeadingPKEq() throws SQLException { + String tenantId = "o1"; + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE COMMUNITIES.TEST (\n" + " ORGANIZATION_ID CHAR(2) NOT NULL,\n" + + " SCORE DOUBLE NOT NULL,\n" + " ENTITY_ID CHAR(2) NOT NULL\n" + + " CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" + " ORGANIZATION_ID,\n" + + " SCORE,\n" + " ENTITY_ID\n" + " )\n" + + ") VERSIONS=1, MULTI_TENANT=TRUE"); + String query = + "SELECT entity_id, score\n" + "FROM communities.test\n" + "WHERE organization_id = '" + + tenantId + "'\n" + "AND (score, entity_id) > (2.0, '04')\n" + "ORDER BY score, entity_id"; + Scan scan = compileStatement(query).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.nextKey(ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + PDouble.INSTANCE.toBytes(2.0), PChar.INSTANCE.toBytes("04"))); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testPartialRVCWithLeadingPKEqDesc() throws SQLException { + String tenantId = "o1"; + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE COMMUNITIES.TEST (\n" + " ORGANIZATION_ID CHAR(2) NOT NULL,\n" + + " SCORE DOUBLE NOT NULL,\n" + " ENTITY_ID CHAR(2) NOT NULL\n" + + " CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" + " ORGANIZATION_ID,\n" + + " SCORE DESC,\n" + " ENTITY_ID DESC\n" + " )\n" + + ") VERSIONS=1, MULTI_TENANT=TRUE"); + String query = "SELECT entity_id, score\n" + "FROM communities.test\n" + + "WHERE organization_id = '" + tenantId + "'\n" + "AND (score, entity_id) < (2.0, '04')\n" + + "ORDER BY score DESC, entity_id DESC"; + Scan scan = compileStatement(query).getScan(); + assertNull(scan.getFilter()); + + byte[] startRow = ByteUtil.nextKey(ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + PDouble.INSTANCE.toBytes(2.0, SortOrder.DESC), PChar.INSTANCE.toBytes("04", SortOrder.DESC))); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testFullRVCWithLeadingPKEqDesc() throws SQLException { + String tenantId = "o1"; + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE COMMUNITIES.TEST (\n" + " ORGANIZATION_ID CHAR(2) NOT NULL,\n" + + " SCORE DOUBLE NOT NULL,\n" + " ENTITY_ID CHAR(2) NOT NULL\n" + + " CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" + " ORGANIZATION_ID,\n" + + " SCORE DESC,\n" + " ENTITY_ID DESC\n" + " )\n" + + ") VERSIONS=1, MULTI_TENANT=TRUE"); + String query = + "SELECT entity_id, score\n" + "FROM communities.test\n" + "WHERE organization_id = '" + + tenantId + "'\n" + "AND (organization_id, score, entity_id) < ('" + tenantId + + "',2.0, '04')\n" + "ORDER BY score DESC, entity_id DESC"; + Scan scan = compileStatement(query).getScan(); + assertNull(scan.getFilter()); + + // TODO: end to end test that confirms this start row is accurate + byte[] startRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), + PDouble.INSTANCE.toBytes(2.0, SortOrder.DESC), + ByteUtil.nextKey(PChar.INSTANCE.toBytes("04", SortOrder.DESC))); + assertArrayEquals(startRow, scan.getStartRow()); + assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); + } + + @Test + public void testTrimTrailing() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + String sql = + "CREATE TABLE T(" + "A CHAR(1) NOT NULL," + "B CHAR(1) NOT NULL," + "C CHAR(1) NOT NULL," + + "D CHAR(1) NOT NULL," + "DATA INTEGER, " + "CONSTRAINT TEST_PK PRIMARY KEY (A,B,C,D))"; + conn.createStatement().execute(sql); + + // Will cause trailing part of RVC to (A,B,C) to be trimmed allowing us to perform a skip scan + sql = + "select * from T where (A,B,C) >= ('A','A','A') and (A,B,C) < ('D','D','D') and (B,C) > ('E','E')"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + Scan scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof SkipScanFilter); + List> rowKeyRanges = ((SkipScanFilter) (scan.getFilter())).getSlots(); + assertEquals( + Arrays.asList( + Arrays.asList(KeyRange.getKeyRange(PChar.INSTANCE.toBytes("A"), true, + PChar.INSTANCE.toBytes("D"), false)), + Arrays.asList( + KeyRange.getKeyRange(PChar.INSTANCE.toBytes("EE"), false, KeyRange.UNBOUND, false))), + rowKeyRanges); + assertArrayEquals(scan.getStartRow(), PChar.INSTANCE.toBytes("AEF")); + assertArrayEquals(scan.getStopRow(), PChar.INSTANCE.toBytes("D")); + sql = + "select * from T where (A,B,C) > ('A','A','A') and (A,B,C) <= ('D','D','D') and (B,C) >= ('E','E')"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof SkipScanFilter); + rowKeyRanges = ((SkipScanFilter) (scan.getFilter())).getSlots(); + assertEquals( + Arrays.asList( + Arrays.asList(KeyRange.getKeyRange(PChar.INSTANCE.toBytes("A"), true, + PChar.INSTANCE.toBytes("D"), true)), + Arrays.asList( + KeyRange.getKeyRange(PChar.INSTANCE.toBytes("EE"), true, KeyRange.UNBOUND, false))), + rowKeyRanges); + assertArrayEquals(PChar.INSTANCE.toBytes("AEE"), scan.getStartRow()); + assertArrayEquals(PChar.INSTANCE.toBytes("E"), scan.getStopRow()); + } + } + + @Test + public void testMultiSlotTrailingIntersect() throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + String sql = + "CREATE TABLE T(" + "A CHAR(1) NOT NULL," + "B CHAR(1) NOT NULL," + "C CHAR(1) NOT NULL," + + "D CHAR(1) NOT NULL," + "DATA INTEGER, " + "CONSTRAINT TEST_PK PRIMARY KEY (A,B,C,D))"; + conn.createStatement().execute(sql); + + sql = + "select * from t where (a,b) in (('A','B'),('B','A'),('B','B'),('A','A')) and (a,b,c) in ( ('A','B','C') , ('A','C','D'), ('B','B','E'))"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + Scan scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof SkipScanFilter); + List> rowKeyRanges = ((SkipScanFilter) (scan.getFilter())).getSlots(); + assertEquals(Arrays.asList(Arrays.asList(KeyRange.POINT.apply(PChar.INSTANCE.toBytes("ABC")), + KeyRange.POINT.apply(PChar.INSTANCE.toBytes("BBE")))), rowKeyRanges); + assertArrayEquals(scan.getStartRow(), PChar.INSTANCE.toBytes("ABC")); + assertArrayEquals(scan.getStopRow(), PChar.INSTANCE.toBytes("BBF")); + } + } + + @Test + public void testEqualityAndGreaterThanRVC() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " D CHAR(1) NOT NULL,\n" + + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + " B,\n" + " C,\n" + + " D\n" + " )\n" + ")"); + String query = "SELECT * FROM T WHERE A = 'C' and (A,B,C) > ('C','B','X') and C='C'"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, query); + Scan scan = queryPlan.getContext().getScan(); + // + // Note: The optimal scan boundary for the above query is ['CCC' - *), however, I don't see an + // easy way to fix this currently so prioritizing. Opened JIRA PHOENIX-5885 + assertArrayEquals(ByteUtil.concat(PChar.INSTANCE.toBytes("C"), PChar.INSTANCE.toBytes("B"), + PChar.INSTANCE.toBytes("C")), scan.getStartRow()); + assertArrayEquals(PChar.INSTANCE.toBytes("D"), scan.getStopRow()); + } + } + + @Test + public void testEqualityAndGreaterThanRVC2() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + conn.createStatement() + .execute("CREATE TABLE T (\n" + " A CHAR(1) NOT NULL,\n" + " B CHAR(1) NOT NULL,\n" + + " C CHAR(1) NOT NULL,\n" + " D CHAR(1) NOT NULL,\n" + + " CONSTRAINT PK PRIMARY KEY (\n" + " A,\n" + " B,\n" + " C,\n" + + " D\n" + " )\n" + ")"); + String query = "SELECT * FROM T WHERE A = 'C' and (A,B,C) > ('C','B','A') and C='C'"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, query); + Scan scan = queryPlan.getContext().getScan(); + assertArrayEquals(ByteUtil.concat(PChar.INSTANCE.toBytes("C"), PChar.INSTANCE.toBytes("B"), + PChar.INSTANCE.toBytes("C")), scan.getStartRow()); + assertArrayEquals(PChar.INSTANCE.toBytes("D"), scan.getStopRow()); + } + } + + @Test + public void testOrExpressionNonLeadingPKPushToScanBug4602() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String testTableName = "OR_NO_LEADING_PK4602"; + String sql = "CREATE TABLE " + testTableName + "(" + "PK1 INTEGER NOT NULL," + + "PK2 INTEGER NOT NULL," + "PK3 INTEGER NOT NULL," + "DATA INTEGER, " + + "CONSTRAINT TEST_PK PRIMARY KEY (PK1,PK2,PK3))"; + conn.createStatement().execute(sql); + + // case 1: pk1 is equal,pk2 is multiRange + sql = "select * from " + testTableName + + " t where (t.pk1 = 2) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + Scan scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof SkipScanFilter); + List> rowKeyRanges = ((SkipScanFilter) (scan.getFilter())).getSlots(); + assertEquals(Arrays.asList(Arrays.asList(KeyRange.POINT.apply(PInteger.INSTANCE.toBytes(2))), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), + false), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), + false))), + rowKeyRanges); + + assertArrayEquals(scan.getStartRow(), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4))); + assertArrayEquals(scan.getStopRow(), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(9))); + + // case 2: pk1 is range,pk2 is multiRange + sql = "select * from " + testTableName + + " t where (t.pk1 >=2 and t.pk1<5) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof SkipScanFilter); + rowKeyRanges = ((SkipScanFilter) (scan.getFilter())).getSlots(); + assertEquals(Arrays.asList( + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(2), true, + PInteger.INSTANCE.toBytes(5), false)), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), + false), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), + false))), + rowKeyRanges); + assertArrayEquals(scan.getStartRow(), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4))); + assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(5)); + + // case 3 : pk1 has multiRange,,pk2 is multiRange + sql = "select * from " + testTableName + + " t where ((t.pk1 >=2 and t.pk1<5) or (t.pk1 >=7 and t.pk1 <9)) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof SkipScanFilter); + rowKeyRanges = ((SkipScanFilter) (scan.getFilter())).getSlots(); + assertEquals(Arrays.asList( + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(2), true, PInteger.INSTANCE.toBytes(5), + false), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7), true, PInteger.INSTANCE.toBytes(9), + false)), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), + false), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), + false))), + rowKeyRanges); + assertArrayEquals(scan.getStartRow(), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4))); + assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(9)); + + // case4 : only pk1 and pk3, no pk2 + sql = "select * from " + testTableName + + " t where ((t.pk1 >=2 and t.pk1<5) or (t.pk1 >=7 and t.pk1 <9)) and ((t.pk3 >= 4 and t.pk3 <6) or (t.pk3 >= 8 and t.pk3 <9))"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + /** + * This sql use skipScan, and all the whereExpressions are in SkipScanFilter, so there is no + * other RowKeyComparisonFilter needed. + */ + assertTrue(scan.getFilter() instanceof SkipScanFilter); + + rowKeyRanges = ((SkipScanFilter) (scan.getFilter())).getSlots(); + assertEquals(Arrays.asList( + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(2), true, PInteger.INSTANCE.toBytes(5), + false), + KeyRange + .getKeyRange(PInteger.INSTANCE.toBytes(7), true, PInteger.INSTANCE.toBytes(9), false)), + Arrays.asList(KeyRange.EVERYTHING_RANGE), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), + false), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), + false))), + rowKeyRanges); + assertArrayEquals(scan.getStartRow(), PInteger.INSTANCE.toBytes(2)); + assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(9)); + + // case 5: pk1 or data column + sql = + "select * from " + testTableName + " t where ((t.pk1 >=2) or (t.data >= 4 and t.data <9))"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof SingleCQKeyValueComparisonFilter); + Expression pk1Expression = new ColumnRef(queryPlan.getTableRef(), + queryPlan.getTableRef().getTable().getColumnForColumnName("PK1").getPosition()) + .newColumnExpression(); + Expression dataExpression = new ColumnRef(queryPlan.getTableRef(), + queryPlan.getTableRef().getTable().getColumnForColumnName("DATA").getPosition()) + .newColumnExpression(); + assertEquals( + TestUtil.singleKVFilter(TestUtil.or( + TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL, pk1Expression, 2), + TestUtil.and( + TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL, dataExpression, 4), + TestUtil.constantComparison(CompareOperator.LESS, dataExpression, 9)))), + scan.getFilter()); + assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); + assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); + + // case 6: pk1 or pk2,but pk2 is empty range + sql = "select * from " + testTableName + + " t where (t.pk1 >=2 and t.pk1<5) or ((t.pk2 >= 4 and t.pk2 <6) and (t.pk2 >= 8 and t.pk2 <9))"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertNull(scan.getFilter()); + assertArrayEquals(scan.getStartRow(), PInteger.INSTANCE.toBytes(2)); + assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(5)); + + // case 7: pk1 or pk2,but pk2 is all range + sql = "select * from " + testTableName + + " t where (t.pk1 >=2 and t.pk1<5) or (t.pk2 >=7 or t.pk2 <9)"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + + scan = queryPlan.getContext().getScan(); + Expression pk2Expression = new ColumnRef(queryPlan.getTableRef(), + queryPlan.getTableRef().getTable().getColumnForColumnName("PK2").getPosition()) + .newColumnExpression(); + assertTrue(scan.getFilter() instanceof RowKeyComparisonFilter); + assertEquals( + TestUtil.rowKeyFilter(TestUtil.or( + TestUtil.and( + TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL, pk1Expression, 2), + TestUtil.constantComparison(CompareOperator.LESS, pk1Expression, 5)), + TestUtil.or( + TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL, pk2Expression, 7), + TestUtil.constantComparison(CompareOperator.LESS, pk2Expression, 9)))), + scan.getFilter()); + assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); + assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); + + // case 8: pk1 and pk2, but pk1 has a or allRange + sql = "select * from " + testTableName + + " t where ((t.pk1 >=2 and t.pk1<5) or (t.pk1 >=7 or t.pk1 <9)) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof RowKeyComparisonFilter); + assertEquals( + TestUtil.rowKeyFilter(TestUtil.or( + TestUtil.and( + TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL, pk2Expression, 4), + TestUtil.constantComparison(CompareOperator.LESS, pk2Expression, 6)), + TestUtil.and( + TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL, pk2Expression, 8), + TestUtil.constantComparison(CompareOperator.LESS, pk2Expression, 9)))), + scan.getFilter()); + + assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); + assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); + + // case 9: pk1 and pk2, but pk2 has a or allRange + sql = "select * from " + testTableName + + " t where ((t.pk1 >= 4 and t.pk1 <6) or (t.pk1 >= 8 and t.pk1 <9)) and ((t.pk2 >=2 and t.pk2<5) or (t.pk2 >=7 or t.pk2 <9))"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof SkipScanFilter); + rowKeyRanges = ((SkipScanFilter) (scan.getFilter())).getSlots(); + assertEquals(Arrays.asList(Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), + false), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), + false)), + Arrays.asList(KeyRange.EVERYTHING_RANGE)), rowKeyRanges); + assertArrayEquals(scan.getStartRow(), PInteger.INSTANCE.toBytes(4)); + assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(9)); + + // case 10: only pk2 + sql = "select * from " + testTableName + " t where (pk2 <=7 or pk2>9)"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + pk2Expression = new ColumnRef(queryPlan.getTableRef(), + queryPlan.getTableRef().getTable().getColumnForColumnName("PK2").getPosition()) + .newColumnExpression(); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof RowKeyComparisonFilter); + assertEquals( + TestUtil.rowKeyFilter( + TestUtil.or(TestUtil.constantComparison(CompareOperator.LESS_OR_EQUAL, pk2Expression, 7), + TestUtil.constantComparison(CompareOperator.GREATER, pk2Expression, 9))), + scan.getFilter()); + assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); + assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); + + // case 11: pk1 and pk2, but pk1 has a or allRange and force skip scan + sql = "select /*+ SKIP_SCAN */ * from " + testTableName + + " t where ((t.pk1 >=2 and t.pk1<5) or (t.pk1 >=7 or t.pk1 <9)) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof SkipScanFilter); + rowKeyRanges = ((SkipScanFilter) (scan.getFilter())).getSlots(); + assertEquals(Arrays.asList(Arrays.asList(KeyRange.EVERYTHING_RANGE), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), + false), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), + false))), + rowKeyRanges); + assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); + assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); + } finally { + if (conn != null) { conn.close(); - - String query = "select * from start_stop_test where pk >= 'EA' and pk < 'EZ'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - assertArrayEquals(PVarchar.INSTANCE.toBytes("EA"), scan.getStartRow()); - assertArrayEquals(PVarchar.INSTANCE.toBytes("EZ"), scan.getStopRow()); - } - - @Test - public void testConcatSingleKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id || 'foo' ='" + tenantId + "'||'foo'"; - Scan scan = compileStatement(query).getScan(); - - // The || operator cannot currently be used to form the start/stop key - assertNotNull(scan.getFilter()); - assertEquals(0, scan.getStartRow().length); - assertEquals(0, scan.getStopRow().length); - } - - @Test - public void testLiteralConcatExpression() throws SQLException { - String query = "select * from atable where null||'foo'||'bar' = 'foobar'"; - Scan scan = new Scan(); - List binds = Collections.emptyList(); - compileStatement(query, binds); - - assertNull(scan.getFilter()); - assertEquals(0, scan.getStartRow().length); - assertEquals(0, scan.getStopRow().length); - } - - @Test - public void testSingleKeyNotExpression() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where not organization_id='" + tenantId + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNotNull(scan.getFilter()); - assertEquals(0, scan.getStartRow().length); - assertEquals(0, scan.getStopRow().length); - } - - @Test - public void testMultiKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3)='" + keyPrefix + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)), 15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testMultiKeyBindExpression() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String query = "select * from atable where organization_id=? and substr(entity_id,1,3)=?"; - List binds = Arrays.asList(tenantId,keyPrefix); - Scan scan = compileStatement(query, binds).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testEqualRound() throws Exception { - String inst = "a"; - String host = "b"; - Date roundDate = DateUtil.parseDate("2012-01-01 00:00:00"); - Date startDate = DateUtil.parseDate("2011-12-31 12:00:00"); - Date endDate = DateUtil.parseDate("2012-01-01 12:00:00"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')=?"; - List binds = Arrays.asList(inst,host,roundDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(startDate)); - assertArrayEquals(startRow, scan.getStartRow()); - assertTrue(scan.includeStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testDegenerateRound() throws Exception { - String inst = "a"; - String host = "b"; - Date startDate = DateUtil.parseDate("2012-01-01 01:00:00"); - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')=?"; - List binds = Arrays.asList(inst,host,startDate); - Scan scan = compileStatement(query, binds).getScan(); - assertDegenerate(scan); - } - - @Test - public void testBoundaryGreaterThanRound() throws Exception { - String inst = "a"; - String host = "b"; - Date roundDate = DateUtil.parseDate("2012-01-01 00:00:00"); - Date startDate = DateUtil.parseDate("2012-01-01 12:00:00"); - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')>?"; - List binds = Arrays.asList(inst,host,roundDate); - Scan scan = compileStatement(query, binds).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(startDate)); - assertArrayEquals(startRow, scan.getStartRow()); - assertTrue(scan.includeStartRow()); - byte[] stopRow = ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testBoundaryGreaterThanOrEqualRound() throws Exception { - String inst = "a"; - String host = "b"; - Date startDate = DateUtil.parseDate("2012-01-01 00:00:00"); - Date startDateHalfRange = DateUtil.parseDate("2011-12-31 12:00:00.000"); - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')>=?"; - List binds = Arrays.asList(inst,host,startDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(startDateHalfRange)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testGreaterThanRound() throws Exception { - String inst = "a"; - String host = "b"; - Date roundDate = DateUtil.parseDate("2012-01-01 01:00:00"); - Date startDate = DateUtil.parseDate("2012-01-01 12:00:00"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')>?"; - List binds = Arrays.asList(inst,host,roundDate); - - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(startDate)); - assertArrayEquals(startRow, scan.getStartRow()); - assertTrue(scan.includeStartRow()); - byte[] stopRow = ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testLessThanRound() throws Exception { - String inst = "a"; - String host = "b"; - Date roundDate = DateUtil.parseDate("2012-01-01 01:00:00"); - Date endDate = DateUtil.parseDate("2012-01-01 12:00:00"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY') binds = Arrays.asList(inst,host,roundDate); - Scan scan = compileStatement(query, binds).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host)/*,QueryConstants.SEPARATOR_BYTE_ARRAY*/); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertTrue(scan.includeStartRow()); - } - - @Test - public void testBoundaryLessThanRound() throws Exception { - String inst = "a"; - String host = "b"; - Date roundDate = DateUtil.parseDate("2012-01-01 00:00:00"); - Date endDate = DateUtil.parseDate("2011-12-31 12:00:00"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY') binds = Arrays.asList(inst,host,roundDate); - - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host)/*,QueryConstants.SEPARATOR_BYTE_ARRAY*/); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testLessThanOrEqualRound() throws Exception { - String inst = "a"; - String host = "b"; - Date roundDate = DateUtil.parseDate("2012-01-01 01:00:00"); - Date endDate = DateUtil.parseDate("2012-01-01 12:00:00"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')<=?"; - List binds = Arrays.asList(inst,host,roundDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host)/*,QueryConstants.SEPARATOR_BYTE_ARRAY*/); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testLessThanOrEqualRound2() throws Exception { - String inst = "a"; - String host = "b"; - Date roundDate = DateUtil.parseDate("2011-12-31 23:00:00"); - Date endDate = DateUtil.parseDate("2011-12-31 12:00:00"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')<=?"; - List binds = Arrays.asList(inst,host,roundDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host)/*,QueryConstants.SEPARATOR_BYTE_ARRAY*/); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testBoundaryLessThanOrEqualRound() throws Exception { - String inst = "a"; - String host = "b"; - Date roundDate = DateUtil.parseDate("2012-01-01 00:00:00"); - Date endDate = DateUtil.parseDate("2012-01-01 12:00:00"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and round(date,'DAY')<=?"; - List binds = Arrays.asList(inst,host,roundDate); - Scan scan = compileStatement(query, binds).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host)/*,QueryConstants.SEPARATOR_BYTE_ARRAY*/); - assertArrayEquals(startRow, scan.getStartRow()); - assertTrue(scan.includeStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testLessThanOrEqualFloor() throws Exception { - String inst = "a"; - String host = "b"; - Date floorDate = DateUtil.parseDate("2012-01-01 01:00:00"); - Date endDate = DateUtil.parseDate("2012-01-02 00:00:00"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and floor(date,'DAY')<=?"; - List binds = Arrays.asList(inst,host,floorDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host)/*,QueryConstants.SEPARATOR_BYTE_ARRAY*/); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testLessThanOrEqualFloorBoundary() throws Exception { - String inst = "a"; - String host = "b"; - Date floorDate = DateUtil.parseDate("2012-01-01 00:00:00"); - Date endDate = DateUtil.parseDate("2012-01-02 00:00:00"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and floor(date,'DAY')<=?"; - List binds = Arrays.asList(inst,host,floorDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host)/*,QueryConstants.SEPARATOR_BYTE_ARRAY*/); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testGreaterThanOrEqualFloor() throws Exception { - String inst = "a"; - String host = "b"; - Date floorDate = DateUtil.parseDate("2012-01-01 01:00:00"); - Date startDate = DateUtil.parseDate("2012-01-02 00:00:00"); - String query = "select * from ptsdb where inst=? and host=? and floor(date,'DAY')>=?"; - List binds = Arrays.asList(inst,host,floorDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(startDate)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testGreaterThanOrEqualFloorBoundary() throws Exception { - String inst = "a"; - String host = "b"; - Date floorDate = DateUtil.parseDate("2012-01-01 00:00:00"); - Date startDate = DateUtil.parseDate("2012-01-01 00:00:00"); - String query = "select * from ptsdb where inst=? and host=? and floor(date,'DAY')>=?"; - List binds = Arrays.asList(inst,host,floorDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(startDate)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testLessThanOrEqualCeil() throws Exception { - String inst = "a"; - String host = "b"; - Date ceilDate = DateUtil.parseDate("2012-01-01 01:00:00"); - Date endDate = DateUtil.parseDate("2012-01-01 00:00:00.001"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and ceil(date,'DAY')<=?"; - List binds = Arrays.asList(inst,host,ceilDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host)/*,QueryConstants.SEPARATOR_BYTE_ARRAY*/); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testLessThanOrEqualCeilBoundary() throws Exception { - String inst = "a"; - String host = "b"; - Date ceilDate = DateUtil.parseDate("2012-01-01 00:00:00"); - Date endDate = DateUtil.parseDate("2012-01-01 00:00:00.001"); //Hbase normalizes scans to left closed - String query = "select * from ptsdb where inst=? and host=? and ceil(date,'DAY')<=?"; - List binds = Arrays.asList(inst,host,ceilDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host)/*,QueryConstants.SEPARATOR_BYTE_ARRAY*/); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(endDate)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testGreaterThanOrEqualCeil() throws Exception { - String inst = "a"; - String host = "b"; - Date ceilDate = DateUtil.parseDate("2012-01-01 01:00:00"); - Date startDate = DateUtil.parseDate("2012-01-01 00:00:00.001"); - String query = "select * from ptsdb where inst=? and host=? and ceil(date,'DAY')>=?"; - List binds = Arrays.asList(inst,host,ceilDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(startDate)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testGreaterThanOrEqualCeilBoundary() throws Exception { - String inst = "a"; - String host = "b"; - Date ceilDate = DateUtil.parseDate("2012-01-01 00:00:00"); - Date startDate = DateUtil.parseDate("2011-12-31 00:00:00.001"); - String query = "select * from ptsdb where inst=? and host=? and ceil(date,'DAY')>=?"; - List binds = Arrays.asList(inst,host,ceilDate); - Scan scan = compileStatement(query, binds).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY, - PDate.INSTANCE.toBytes(startDate)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(inst),QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(host),QueryConstants.SEPARATOR_BYTE_ARRAY)); - assertArrayEquals(stopRow, scan.getStopRow()); - assertFalse(scan.includeStopRow()); - } - - @Test - public void testOverlappingKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String entityId = "002333333333333"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3)='" + keyPrefix + "' and entity_id='" + entityId + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow()); - } - - @Test - public void testSubstrExpressionWithoutLengthVariable() { - assertEquals("SUBSTR(ENTITY_ID, 1)",((SubstrFunction)substr2(ENTITY_ID,1)).toString()); - } - - @Test - public void testSubstrExpressionWithLengthVariable() { - assertEquals("SUBSTR(ENTITY_ID, 1, 10)",((SubstrFunction)substr(ENTITY_ID,1, 10)).toString()); - } - - @Test - public void testTrailingSubstrExpression() throws SQLException { - String tenantId = "0xD000000000001"; - String entityId = "002333333333333"; - String query = "select * from atable where substr(organization_id,1,3)='" + tenantId.substring(0, 3) + "' and entity_id='" + entityId + "'"; - Scan scan = compileStatement(query).getScan(); - assertNotNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(StringUtil.padChar(PVarchar.INSTANCE.toBytes(tenantId.substring(0,3)),15), - PVarchar.INSTANCE.toBytes(entityId)); - assertArrayEquals(startRow, scan.getStartRow()); - // Even though the first slot is a non inclusive range, we need to do a next key - // on the second slot because of the algorithm we use to seek to and terminate the - // loop during skip scan. We could end up having a first slot just under the upper - // limit of slot one and a value equal to the value in slot two and we need this to - // be less than the upper range that would get formed. - byte[] stopRow = ByteUtil.concat(StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId.substring(0,3))),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testBasicRangeExpression() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id <= '" + tenantId + "'"; - Scan scan = compileStatement(query).getScan(); - assertNull(scan.getFilter()); - - assertTrue(scan.getStartRow().length == 0); - byte[] stopRow = ByteUtil.concat(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId))); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testKeyRangeExpression1() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix1 = "002"; - String keyPrefix2= "004"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) >= '" + keyPrefix1 + "' and substr(entity_id,1,3) < '" + keyPrefix2 + "'"; - Scan scan = compileStatement(query).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix1),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix2),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testKeyRangeExpression2() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix1 = "002"; - String keyPrefix2= "004"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) >= '" + keyPrefix1 + "' and substr(entity_id,1,3) <= '" + keyPrefix2 + "'"; - Scan scan = compileStatement(query).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix1),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PChar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PChar.INSTANCE.toBytes(keyPrefix2)),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testKeyRangeExpression3() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix1 = "002"; - String keyPrefix2= "004"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) > '" + keyPrefix1 + "' and substr(entity_id,1,3) <= '" + keyPrefix2 + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix1)),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix2)),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testKeyRangeExpression4() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix1 = "002"; - String entityId= "002000000000002"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) > '" + keyPrefix1 + "' and substr(entity_id,1,3) = '" + entityId + "'"; - Scan scan = compileStatement(query).getScan(); - assertDegenerate(scan); - } - - @Test - public void testKeyRangeExpression5() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix1 = "002"; - String entityId= "002000000000002"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) <= '" + keyPrefix1 + "' and entity_id = '" + entityId + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - } - - @Test - public void testKeyRangeExpression6() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix1 = "002"; - String entityId= "002000000000002"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) < '" + keyPrefix1 + "' and entity_id = '" + entityId + "'"; - Scan scan = compileStatement(query).getScan(); - assertDegenerate(scan); - } - - @Test - public void testKeyRangeExpression7() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix1 = "002"; - String entityId= "002000000000002"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) < '" + keyPrefix1 + "' and entity_id < '" + entityId + "'"; - Scan scan = compileStatement(query).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = PChar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix1),entityId.length())); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testKeyRangeExpression8() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix1 = "001"; - String entityId= "002000000000002"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) > '" + keyPrefix1 + "' and entity_id = '" + entityId + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); - assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow()); - } - - @Test - public void testKeyRangeExpression9() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix1 = "002"; - String keyPrefix2 = "0033"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3) >= '" + keyPrefix1 + "' and substr(entity_id,1,4) <= '" + keyPrefix2 + "'"; - Scan scan = compileStatement(query).getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PChar.INSTANCE.toBytes(keyPrefix1),15)); // extra byte is due to implicit internal padding - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PChar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PChar.INSTANCE.toBytes(keyPrefix2)),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - /** - * This is testing the degenerate case where nothing will match because the overlapping keys (keyPrefix and entityId) don't match. - * @throws SQLException - */ - @Test - public void testUnequalOverlappingKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String entityId = "001333333333333"; - String query = "select * from atable where organization_id='" + tenantId + "' and substr(entity_id,1,3)='" + keyPrefix + "' and entity_id='" + entityId + "'"; - Scan scan = compileStatement(query).getScan(); - assertDegenerate(scan); - } - - @Test - public void testTopLevelOrKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' or a_integer=2"; - Scan scan = compileStatement(query).getScan(); - - assertNotNull(scan.getFilter()); - assertEquals(0, scan.getStartRow().length); - assertEquals(0, scan.getStopRow().length); - } - - @Test - public void testSiblingOrKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and (a_integer = 2 or a_integer = 3)"; - Scan scan = compileStatement(query).getScan(); - - assertNotNull(scan.getFilter()); - assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - @Test - public void testColumnNotFound() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where bar='" + tenantId + "'"; - try { - compileStatement(query); - fail(); - } catch (ColumnNotFoundException e) { - // expected - } - } - - @Test - public void testNotContiguousPkColumn() throws SQLException { - String keyPrefix = "002"; - String query = "select * from atable where substr(entity_id,1,3)='" + keyPrefix + "'"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertNotNull(scan.getFilter()); - assertEquals(0, scan.getStartRow().length); - assertEquals(0, scan.getStopRow().length); - } - - @Test - public void testMultipleNonEqualitiesPkColumn() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String query = "select * from atable where organization_id >= '" + tenantId + "' AND substr(entity_id,1,3) > '" + keyPrefix + "'"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertNotNull(scan.getFilter()); -// assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); - assertArrayEquals( - ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), - PChar.INSTANCE.toBytes( - PChar.INSTANCE.pad( - PChar.INSTANCE.toObject(ByteUtil.nextKey(PChar.INSTANCE.toBytes(keyPrefix))), - 15))), - scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testRHSLiteral() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id='" + tenantId + "' and 0 >= a_integer limit 1000"; - StatementContext context = compileStatement(query, 1000); - Scan scan = context.getScan(); - - assertNotNull(scan.getFilter()); - assertArrayEquals(PVarchar.INSTANCE.toBytes(tenantId), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - - @Test - public void testKeyTypeMismatch() { - String query = "select * from atable where organization_id=5"; - try { - compileStatement(query); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage().contains("Type mismatch")); - } - } - - @Test - public void testLikeExtractAllKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String query = "select * from atable where organization_id = ? and entity_id LIKE '" + keyPrefix + "%'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testLikeExtractAllKeyExpression2() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "中文"; - String query = "select * from atable where organization_id = ? and entity_id LIKE '" + keyPrefix + "%'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testLikeExtractAllAsEqKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String query = "select * from atable where organization_id LIKE ? and entity_id LIKE '" + keyPrefix + "%'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - assertNull(scan.getFilter()); - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testLikeExpressionWithDescOrder() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - String tableName = generateUniqueName(); - conn.createStatement().execute( - "CREATE TABLE " + tableName + " (id varchar, name varchar, type decimal, " - + "status integer CONSTRAINT pk PRIMARY KEY(id desc, type))"); - String query = "SELECT * FROM " + tableName + " where type = 1 and id like 'xy%'"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertTrue(scan.getFilter() instanceof SkipScanFilter); - SkipScanFilter filter = (SkipScanFilter) scan.getFilter(); - - byte[] lowerRange = filter.getSlots().get(0).get(0).getLowerRange(); - byte[] upperRange = filter.getSlots().get(0).get(0).getUpperRange(); - boolean lowerInclusive = filter.getSlots().get(0).get(0).isLowerInclusive(); - boolean upperInclusive = filter.getSlots().get(0).get(0).isUpperInclusive(); - - byte[] startRow = PVarchar.INSTANCE.toBytes("xy"); - byte[] invStartRow = new byte[startRow.length]; - SortOrder.invert(startRow, 0, invStartRow, 0, startRow.length); - - byte[] stopRow = PVarchar.INSTANCE.toBytes("xz"); - byte[] invStopRow = new byte[startRow.length]; - SortOrder.invert(stopRow, 0, invStopRow, 0, stopRow.length); - - assertArrayEquals(invStopRow, lowerRange); - assertArrayEquals(invStartRow, upperRange); - assertFalse(lowerInclusive); - assertTrue(upperInclusive); - - byte[] expectedStartRow = ByteUtil.concat(invStartRow, new byte[]{0}, - PDecimal.INSTANCE.toBytes(new BigDecimal(1))); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - - byte[] expectedStopRow = ByteUtil.concat(invStartRow, - new byte[]{(byte) (0xFF)}, PDecimal.INSTANCE.toBytes(new BigDecimal(1)), - new byte[]{1}); - assertArrayEquals(expectedStopRow, scan.getStopRow()); - - query = "SELECT * FROM " + tableName + " where type = 1 and id like 'x%'"; - context = compileStatement(query); - scan = context.getScan(); - - assertTrue(scan.getFilter() instanceof SkipScanFilter); - filter = (SkipScanFilter) scan.getFilter(); - - lowerRange = filter.getSlots().get(0).get(0).getLowerRange(); - upperRange = filter.getSlots().get(0).get(0).getUpperRange(); - lowerInclusive = filter.getSlots().get(0).get(0).isLowerInclusive(); - upperInclusive = filter.getSlots().get(0).get(0).isUpperInclusive(); - - startRow = PVarchar.INSTANCE.toBytes("x"); - invStartRow = new byte[startRow.length]; - SortOrder.invert(startRow, 0, invStartRow, 0, startRow.length); - - stopRow = PVarchar.INSTANCE.toBytes("y"); - invStopRow = new byte[startRow.length]; - SortOrder.invert(stopRow, 0, invStopRow, 0, stopRow.length); - - assertArrayEquals(invStopRow, lowerRange); - assertArrayEquals(invStartRow, upperRange); - assertFalse(lowerInclusive); - assertTrue(upperInclusive); - - expectedStartRow = ByteUtil.concat(invStartRow, new byte[]{0}, - PDecimal.INSTANCE.toBytes(new BigDecimal(1))); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - - expectedStopRow = ByteUtil.concat(invStartRow, - new byte[]{(byte) (0xFF)}, PDecimal.INSTANCE.toBytes(new BigDecimal(1)), - new byte[]{1}); - assertArrayEquals(expectedStopRow, scan.getStopRow()); - } - - @Test - public void testLikeNoWildcardExpression() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String query = "select * from atable where organization_id LIKE ? and entity_id LIKE '" + keyPrefix + "'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.nextKey(startRow); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testLikeExtractKeyExpression2() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String likeArg = keyPrefix + "_"; - String query = "select * from atable where organization_id = ? and entity_id LIKE '" + likeArg + "'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertEquals( - rowKeyFilter(like( - ENTITY_ID, - likeArg, - context)), - filter); - - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testLikeOptKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String likeArg = keyPrefix + "%003%"; - String query = "select * from atable where organization_id = ? and entity_id LIKE '" + likeArg + "'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertEquals( - rowKeyFilter(like( - ENTITY_ID, - likeArg, - context)), - filter); - - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15)); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)),15)); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testLikeOptKeyExpression2() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String likeArg = keyPrefix + "%003%"; - String query = "select * from atable where organization_id = ? and substr(entity_id,1,10) LIKE '" + likeArg + "'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertEquals( - rowKeyFilter(like( - substr(ENTITY_ID,1,10), - likeArg, - context)), - filter); - - byte[] startRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15)); - byte[] stopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)),15)); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testLikeNoOptKeyExpression3() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String likeArg = keyPrefix + "%003%"; - String query = "select * from atable where organization_id = ? and substr(entity_id,4,10) LIKE '" + likeArg + "'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertEquals( - rowKeyFilter(like( - substr(ENTITY_ID,4,10), - likeArg, - context)), - filter); - - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow()); - } - - @Test - public void testLikeNoOptKeyExpression() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String likeArg = "%001%" + keyPrefix + "%"; - String query = "select * from atable where organization_id = ? and entity_id LIKE '" + likeArg + "'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertEquals( - rowKeyFilter(like( - ENTITY_ID, - likeArg, - context)), - filter); - - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow()); - } - - @Test - public void testLikeNoOptKeyExpression2() throws SQLException { - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String likeArg = keyPrefix + "%"; - String query = "select * from atable where organization_id = ? and entity_id NOT LIKE '" + likeArg + "'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertEquals( - rowKeyFilter(not(like( - ENTITY_ID, - likeArg, - context))), - filter); - - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow()); - } - - @Test - public void testLikeDegenerate() throws SQLException { - String tenantId = "000000000000001"; - String query = "select * from atable where organization_id = ? and entity_id LIKE '0000000000000012%003%'"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - - assertDegenerate(scan); - } - - @Test - public void testDegenerateDivision1() throws SQLException { - String query = "select * from atable where a_integer = 3 / null"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertDegenerate(scan); - } - - @Test - public void testDegenerateDivision2() throws SQLException { - String query = "select * from atable where a_integer / null = 3"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertDegenerate(scan); - } - - @Test - public void testDegenerateMult1() throws SQLException { - String query = "select * from atable where a_integer = 3 * null"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertDegenerate(scan); - } - - @Test - public void testDegenerateMult2() throws SQLException { - String query = "select * from atable where a_integer * null = 3"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertDegenerate(scan); - } - - @Test - public void testDegenerateAdd1() throws SQLException { - String query = "select * from atable where a_integer = 3 + null"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertDegenerate(scan); - } - - @Test - public void testDegenerateAdd2() throws SQLException { - String query = "select * from atable where a_integer + null = 3"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertDegenerate(scan); - } - - @Test - public void testDegenerateSub1() throws SQLException { - String query = "select * from atable where a_integer = 3 - null"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertDegenerate(scan); - } - - @Test - public void testDegenerateSub2() throws SQLException { - String query = "select * from atable where a_integer - null = 3"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - - assertDegenerate(scan); - } - - /* - * The following 5 tests are testing the comparison in where clauses under the case when the rhs - * cannot be coerced into the lhs. We need to confirm the decision make by expression compilation - * returns correct decisions. - */ - @Test - public void testValueComparisonInt() throws SQLException { - ensureTableCreated(getUrl(),"PKIntValueTest", "PKIntValueTest"); - String query; - // int <-> long - // Case 1: int = long, comparison always false, key is degenerated. - query = "SELECT * FROM PKintValueTest where pk = " + Long.MAX_VALUE; - assertQueryConditionAlwaysFalse(query); - // Case 2: int != long, comparison always true, no key set since we need to do a full - // scan all the time. - query = "SELECT * FROM PKintValueTest where pk != " + Long.MAX_VALUE; - assertQueryConditionAlwaysTrue(query); - // Case 3: int > positive long, comparison always false; - query = "SELECT * FROM PKintValueTest where pk >= " + Long.MAX_VALUE; - assertQueryConditionAlwaysFalse(query); - // Case 4: int <= Integer.MAX_VALUE < positive long, always true; - query = "SELECT * FROM PKintValueTest where pk <= " + Long.MAX_VALUE; - assertQueryConditionAlwaysTrue(query); - // Case 5: int >= Integer.MIN_VALUE > negative long, always true; - query = "SELECT * FROM PKintValueTest where pk >= " + (Long.MIN_VALUE + 1); - assertQueryConditionAlwaysTrue(query); - // Case 6: int < negative long, comparison always false; - query = "SELECT * FROM PKintValueTest where pk <= " + (Long.MIN_VALUE + 1); - assertQueryConditionAlwaysFalse(query); - } - - @Test - public void testValueComparisonUnsignedInt() throws SQLException { - ensureTableCreated(getUrl(), "PKUnsignedIntValueTest", "PKUnsignedIntValueTest"); - String query; - // unsigned_int <-> negative int/long - // Case 1: unsigned_int = negative int, always false; - query = "SELECT * FROM PKUnsignedIntValueTest where pk = -1"; - assertQueryConditionAlwaysFalse(query); - // Case 2: unsigned_int != negative int, always true; - query = "SELECT * FROM PKUnsignedIntValueTest where pk != -1"; - assertQueryConditionAlwaysTrue(query); - // Case 3: unsigned_int > negative int, always true; - query = "SELECT * FROM PKUnsignedIntValueTest where pk > " + (Long.MIN_VALUE + 1); - assertQueryConditionAlwaysTrue(query); - // Case 4: unsigned_int < negative int, always false; - query = "SELECT * FROM PKUnsignedIntValueTest where pk < " + + (Long.MIN_VALUE + 1); - assertQueryConditionAlwaysFalse(query); - // unsigned_int <-> big positive long - // Case 1: unsigned_int = big positive long, always false; - query = "SELECT * FROM PKUnsignedIntValueTest where pk = " + Long.MAX_VALUE; - assertQueryConditionAlwaysFalse(query); - // Case 2: unsigned_int != big positive long, always true; - query = "SELECT * FROM PKUnsignedIntValueTest where pk != " + Long.MAX_VALUE; - assertQueryConditionAlwaysTrue(query); - // Case 3: unsigned_int > big positive long, always false; - query = "SELECT * FROM PKUnsignedIntValueTest where pk >= " + Long.MAX_VALUE; - assertQueryConditionAlwaysFalse(query); - // Case 4: unsigned_int < big positive long, always true; - query = "SELECT * FROM PKUnsignedIntValueTest where pk <= " + Long.MAX_VALUE; - assertQueryConditionAlwaysTrue(query); - } - - @Test - public void testValueComparisonUnsignedLong() throws SQLException { - ensureTableCreated(getUrl(), "PKUnsignedLongValueTest", "PKUnsignedLongValueTest"); - String query; - // unsigned_long <-> positive int/long - // Case 1: unsigned_long = negative int/long, always false; - query = "SELECT * FROM PKUnsignedLongValueTest where pk = -1"; - assertQueryConditionAlwaysFalse(query); - // Case 2: unsigned_long = negative int/long, always true; - query = "SELECT * FROM PKUnsignedLongValueTest where pk != " + (Long.MIN_VALUE + 1); - assertQueryConditionAlwaysTrue(query); - // Case 3: unsigned_long > negative int/long, always true; - query = "SELECT * FROM PKUnsignedLongValueTest where pk > -1"; - assertQueryConditionAlwaysTrue(query); - // Case 4: unsigned_long < negative int/long, always false; - query = "SELECT * FROM PKUnsignedLongValueTest where pk < " + (Long.MIN_VALUE + 1); - assertQueryConditionAlwaysFalse(query); - } - - private void assertQueryConditionAlwaysTrue(String query) throws SQLException { - Scan scan = compileStatement(query).getScan(); - assertEmptyScanKey(scan); - } - - private void assertQueryConditionAlwaysFalse(String query) throws SQLException { - Scan scan = compileStatement(query).getScan(); - assertDegenerate(scan); - } - - @Test - public void testOrSameColExpression() throws SQLException { - String tenantId1 = "000000000000001"; - String tenantId2 = "000000000000003"; - String query = "select * from atable where organization_id = ? or organization_id = ?"; - List binds = Arrays.asList(tenantId1,tenantId2); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - - assertNotNull(filter); - assertTrue(filter instanceof SkipScanFilter); - ScanRanges scanRanges = context.getScanRanges(); - assertNotNull(scanRanges); - List> ranges = scanRanges.getRanges(); - assertEquals(1,ranges.size()); - List> expectedRanges = Collections.singletonList(Arrays.asList( - PChar.INSTANCE.getKeyRange(PChar.INSTANCE.toBytes(tenantId1), true, PChar.INSTANCE.toBytes(tenantId1), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(PChar.INSTANCE.toBytes(tenantId2), true, PChar.INSTANCE.toBytes(tenantId2), true, SortOrder.ASC))); - assertEquals(expectedRanges, ranges); - byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId1); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId2)), scan.getStopRow()); - } - - @Test - public void testAndOrExpression() throws SQLException { - String tenantId1 = "000000000000001"; - String tenantId2 = "000000000000003"; - String entityId1 = "002333333333331"; - String entityId2 = "002333333333333"; - String query = "select * from atable where (organization_id = ? and entity_id = ?) or (organization_id = ? and entity_id = ?)"; - List binds = Arrays.asList(tenantId1,entityId1,tenantId2,entityId2); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - - assertNotNull(filter); - assertTrue(filter instanceof RowKeyComparisonFilter); - - ScanRanges scanRanges = context.getScanRanges(); - assertEquals(ScanRanges.EVERYTHING,scanRanges); - assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testOrDiffColExpression() throws SQLException { - String tenantId1 = "000000000000001"; - String entityId1 = "002333333333331"; - String query = "select * from atable where organization_id = ? or entity_id = ?"; - List binds = Arrays.asList(tenantId1,entityId1); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - - assertNotNull(filter); - assertTrue(filter instanceof RowKeyComparisonFilter); - ScanRanges scanRanges = context.getScanRanges(); - assertEquals(ScanRanges.EVERYTHING,scanRanges); - assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testOrSameColRangeExpression() throws SQLException { - String query = "select * from atable where substr(organization_id,1,3) = ? or organization_id LIKE 'foo%'"; - List binds = Arrays.asList("00D"); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - - assertNotNull(filter); - assertTrue(filter instanceof SkipScanFilter); - ScanRanges scanRanges = context.getScanRanges(); - assertNotNull(scanRanges); - List> ranges = scanRanges.getRanges(); - assertEquals(1,ranges.size()); - List> expectedRanges = Collections.singletonList(Arrays.asList( - PChar.INSTANCE.getKeyRange( - StringUtil.padChar(PChar.INSTANCE.toBytes("00D"),15), true, - StringUtil.padChar(ByteUtil.nextKey(PChar.INSTANCE.toBytes("00D")),15), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange( - StringUtil.padChar(PChar.INSTANCE.toBytes("foo"),15), true, - StringUtil.padChar(ByteUtil.nextKey(PChar.INSTANCE.toBytes("foo")),15), false, SortOrder.ASC))); - assertEquals(expectedRanges, ranges); - } - - @Test - public void testOrPKRanges() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - ensureTableCreated(getUrl(), TestUtil.BTABLE_NAME); - Statement stmt = conn.createStatement(); - // BTABLE has 5 PK columns - String query = "select * from " + BTABLE_NAME + - " where (a_string > '1' and a_string < '5') or (a_string > '6' and a_string < '9')"; - StatementContext context = compileStatement(query); - Filter filter = context.getScan().getFilter(); - - assertNotNull(filter); - assertTrue(filter instanceof SkipScanFilter); - ScanRanges scanRanges = context.getScanRanges(); - assertNotNull(scanRanges); - List> ranges = scanRanges.getRanges(); - assertEquals(1, ranges.size()); - List> expectedRanges = Collections.singletonList(Arrays.asList( - KeyRange.getKeyRange(Bytes.toBytes("1"), false, Bytes.toBytes("5"), false), - KeyRange.getKeyRange(Bytes.toBytes("6"), false, Bytes.toBytes("9"), false))); - assertEquals(expectedRanges, ranges); - - stmt.close(); + } + } + } + + @Test + public void testLastPkColumnIsVariableLengthAndDescBug5307() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + String sql = "CREATE TABLE t1 (\n" + "OBJECT_VERSION VARCHAR NOT NULL,\n" + "LOC VARCHAR,\n" + + "CONSTRAINT PK PRIMARY KEY (OBJECT_VERSION DESC))"; + conn.createStatement().execute(sql); + + byte[] startKey = ByteUtil.concat(PVarchar.INSTANCE.toBytes("2222", SortOrder.DESC), + QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); + byte[] endKey = ByteUtil.concat(PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC), + QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); + ByteUtil.nextKey(endKey, endKey.length); + sql = "SELECT /*+ RANGE_SCAN */ OBJ.OBJECT_VERSION, OBJ.LOC from t1 AS OBJ " + + "where OBJ.OBJECT_VERSION in ('1111','2222')"; + QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + Scan scan = queryPlan.getContext().getScan(); + assertArrayEquals(startKey, scan.getStartRow()); + assertArrayEquals(endKey, scan.getStopRow()); + + sql = "CREATE TABLE t2 (\n" + "OBJECT_ID VARCHAR NOT NULL,\n" + + "OBJECT_VERSION VARCHAR NOT NULL,\n" + "LOC VARCHAR,\n" + + "CONSTRAINT PK PRIMARY KEY (OBJECT_ID, OBJECT_VERSION DESC))"; + conn.createStatement().execute(sql); + + startKey = ByteUtil.concat(PVarchar.INSTANCE.toBytes("obj1", SortOrder.ASC), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes("2222", SortOrder.DESC), + QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); + /** + * For following sql, queryPlan would use SkipScan and is regarded as PointLookup, so the + * endKey is computed as {@link SchemaUtil#VAR_BINARY_SCHEMA},see {@link ScanRanges#create}. + */ + endKey = ByteUtil.concat(PVarchar.INSTANCE.toBytes("obj3", SortOrder.ASC), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC), + QueryConstants.DESC_SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY); + + sql = "SELECT OBJ.OBJECT_ID, OBJ.OBJECT_VERSION, OBJ.LOC from t2 AS OBJ " + + "where (OBJ.OBJECT_ID, OBJ.OBJECT_VERSION) in (('obj1', '2222'),('obj2', '1111'),('obj3', '1111'))"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + FilterList filterList = (FilterList) scan.getFilter(); + assertTrue(filterList.getOperator() == Operator.MUST_PASS_ALL); + assertEquals(filterList.getFilters().size(), 2); + assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); + assertTrue(filterList.getFilters().get(1) instanceof RowKeyComparisonFilter); + RowKeyComparisonFilter rowKeyComparisonFilter = + (RowKeyComparisonFilter) filterList.getFilters().get(1); + assertEquals(rowKeyComparisonFilter.toString(), + "(OBJECT_ID, OBJECT_VERSION) IN (X'6f626a3100cdcdcdcd',X'6f626a3200cececece',X'6f626a3300cececece')"); + + assertTrue(queryPlan.getContext().getScanRanges().isPointLookup()); + assertArrayEquals(startKey, scan.getStartRow()); + assertArrayEquals(endKey, scan.getStopRow()); + } finally { + if (conn != null) { conn.close(); - } - - @Test - public void testOrPKRangesNotOptimized() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - ensureTableCreated(getUrl(), TestUtil.BTABLE_NAME); - Statement stmt = conn.createStatement(); - // BTABLE has 5 PK columns - String[] queries = { - "select * from " + BTABLE_NAME + " where (a_string > '1' and a_string < '5') or (a_string > '6' and a_string < '9' and a_id = 'foo')", - "select * from " + BTABLE_NAME + " where (a_id > 'aaa' and a_id < 'ccc') or (a_id > 'jjj' and a_id < 'mmm')", - }; - for (String query : queries) { - StatementContext context = compileStatement(query); - Iterator it = ScanUtil.getFilterIterator(context.getScan()); - while (it.hasNext()) { - assertFalse(it.next() instanceof SkipScanFilter); - } - TestUtil.assertNotDegenerate(context.getScan()); - } - - stmt.close(); - conn.close(); - } - - @Test - public void testForceSkipScanOnSaltedTable() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE IF NOT EXISTS user_messages (\n" + - " SENDER_ID UNSIGNED_LONG NOT NULL,\n" + - " RECIPIENT_ID UNSIGNED_LONG NOT NULL,\n" + - " SENDER_IP VARCHAR,\n" + - " IS_READ VARCHAR,\n" + - " IS_DELETED VARCHAR,\n" + - " M_TEXT VARCHAR,\n" + - " M_TIMESTAMP timestamp NOT NULL,\n" + - " ROW_ID UNSIGNED_LONG NOT NULL\n" + - " constraint rowkey primary key (SENDER_ID,RECIPIENT_ID,M_TIMESTAMP DESC,ROW_ID))\n" + - "SALT_BUCKETS=12\n"); - String query = "select /*+ SKIP_SCAN */ count(*) from user_messages where is_read='N' and recipient_id=5399179882"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - - assertNotNull(filter); - assertTrue(filter instanceof FilterList); - FilterList filterList = (FilterList)filter; - assertEquals(FilterList.Operator.MUST_PASS_ALL, filterList.getOperator()); - assertEquals(2, filterList.getFilters().size()); - assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); - assertTrue(filterList.getFilters().get(1) instanceof SingleKeyValueComparisonFilter); - - ScanRanges scanRanges = context.getScanRanges(); - assertNotNull(scanRanges); - assertEquals(3,scanRanges.getRanges().size()); - assertEquals(1,scanRanges.getRanges().get(1).size()); - assertEquals(KeyRange.EVERYTHING_RANGE,scanRanges.getRanges().get(1).get(0)); - assertEquals(1,scanRanges.getRanges().get(2).size()); - assertTrue(scanRanges.getRanges().get(2).get(0).isSingleKey()); - assertEquals(Long.valueOf(5399179882L), PUnsignedLong.INSTANCE.toObject(scanRanges.getRanges().get(2).get(0).getLowerRange())); - } - - @Test - public void testForceRangeScanKeepsFilters() throws SQLException { - ensureTableCreated(getUrl(), TestUtil.ENTITY_HISTORY_TABLE_NAME, TestUtil.ENTITY_HISTORY_TABLE_NAME); - String tenantId = "000000000000001"; - String keyPrefix = "002"; - String query = "select /*+ RANGE_SCAN */ ORGANIZATION_ID, PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID from " + TestUtil.ENTITY_HISTORY_TABLE_NAME + - " where ORGANIZATION_ID=? and SUBSTR(PARENT_ID, 1, 3) = ? and CREATED_DATE >= ? and CREATED_DATE < ? order by ORGANIZATION_ID, PARENT_ID, CREATED_DATE, ENTITY_HISTORY_ID limit 6"; - Date startTime = new Date(System.currentTimeMillis()); - Date stopTime = new Date(startTime.getTime() + MILLIS_IN_DAY); - List binds = Arrays.asList(tenantId, keyPrefix, startTime, stopTime); - StatementContext context = compileStatement(query, binds, 6); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertTrue(filter instanceof RowKeyComparisonFilter); - - byte[] expectedStartRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId), StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15), PDate.INSTANCE.toBytes(startTime)); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - byte[] expectedStopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId), StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)),15)); - assertArrayEquals(expectedStopRow, scan.getStopRow()); - } - - @Test - public void testBasicRVCExpression() throws SQLException { - String tenantId = "000000000000001"; - String entityId = "002333333333331"; - String query = "select * from atable where (organization_id,entity_id) >= (?,?)"; - List binds = Arrays.asList(tenantId, entityId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - assertNull(scan.getFilter()); - byte[] expectedStartRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), PChar.INSTANCE.toBytes(entityId)); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - - @Test - public void testRVCExpressionThroughOr() throws SQLException { - String tenantId = "000000000000001"; - String entityId = "002333333333331"; - String entityId1 = "002333333333330"; - String entityId2 = "002333333333332"; - String query = "select * from atable where (organization_id,entity_id) >= (?,?) and organization_id = ? and (entity_id = ? or entity_id = ?)"; - List binds = Arrays.asList(tenantId, entityId, tenantId, entityId1, entityId2); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId1)); - byte[] expectedStopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId2), QueryConstants.SEPARATOR_BYTE_ARRAY); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(expectedStopRow, scan.getStopRow()); - Filter filter = scan.getFilter(); - assertTrue(filter instanceof SkipScanFilter); - SkipScanFilter skipScanFilter = (SkipScanFilter)filter; - List> skipScanRanges = Arrays.asList( - Arrays.asList(KeyRange.getKeyRange(ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId1))), - KeyRange.getKeyRange(ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId2))))); - assertEquals(skipScanRanges, skipScanFilter.getSlots()); - } - - @Test - public void testNotRepresentableBySkipScan() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); - String tableName = generateUniqueName(); - conn.createStatement().execute("CREATE TABLE " + tableName + "(a INTEGER NOT NULL, b INTEGER NOT NULL, CONSTRAINT pk PRIMARY KEY (a,b))"); - String query = "SELECT * FROM " + tableName + - " WHERE (a,b) >= (1,5) and (a,b) < (3,8) and (a = 1 or a = 3) and ((b >= 6 and b < 9) or (b > 3 and b <= 5))"; - StatementContext context = compileStatement(query); - Scan scan = context.getScan(); - byte[] expectedStartRow = ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(4)); - byte[] expectedStopRow = ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(9)); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(expectedStopRow, scan.getStopRow()); - Filter filter = scan.getFilter(); - assertTrue(filter instanceof FilterList); - FilterList filterList = (FilterList)filter; - // We can form a skip scan, but it's not exact, so we need the boolean expression filter - // as well. - assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); - assertTrue(filterList.getFilters().get(1) instanceof BooleanExpressionFilter); - SkipScanFilter skipScanFilter = (SkipScanFilter)filterList.getFilters().get(0); - List> skipScanRanges = Arrays.asList( - Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(1)), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(3))), - Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(5), true), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(6), true, PInteger.INSTANCE.toBytes(9), false))); - assertEquals(skipScanRanges, skipScanFilter.getSlots()); - } - - /** - * With only a subset of row key cols present (which includes the leading key), - * Phoenix should have optimized the start row for the scan to include the - * row keys cols that occur contiguously in the RVC. - * - * Table entity_history has the row key defined as (organization_id, parent_id, created_date, entity_history_id). - * This test uses (organization_id, parent_id, entity_id) in RVC. So the start row should be comprised of - * organization_id and parent_id. - * @throws SQLException - */ - @Test - public void testRVCExpressionWithSubsetOfPKCols() throws SQLException { - String tenantId = "000000000000001"; - String parentId = "000000000000002"; - String entityHistId = "000000000000003"; - - String query = "select * from entity_history where (organization_id, parent_id, entity_history_id) >= (?,?,?)"; - List binds = Arrays.asList(tenantId, parentId, entityHistId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertTrue(filter instanceof RowKeyComparisonFilter); - byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(parentId)); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - /** - * With the leading row key col missing Phoenix won't be able to optimize - * and provide the start row for the scan. - * - * Table entity_history has the row key defined as (organization_id, parent_id, created_date, entity_history_id). - * This test uses (parent_id, entity_id) in RVC. Start row should be empty. - * @throws SQLException - */ - - @Test - public void testRVCExpressionWithoutLeadingColOfRowKey() throws SQLException { - - String parentId = "000000000000002"; - String entityHistId = "000000000000003"; - - String query = "select * from entity_history where (parent_id, entity_history_id) >= (?,?)"; - List binds = Arrays.asList(parentId, entityHistId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertTrue(filter instanceof RowKeyComparisonFilter); - assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testRVCExpressionWithNonFirstLeadingColOfRowKey() throws SQLException { - String old_value = "value"; - String orgId = getOrganizationId(); - - String query = "select * from entity_history where (old_value, organization_id) >= (?,?)"; - List binds = Arrays.asList(old_value, orgId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertTrue(filter instanceof SingleKeyValueComparisonFilter); - assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testMultiRVCExpressionsCombinedWithAnd() throws SQLException { - String lowerTenantId = "000000000000001"; - String lowerParentId = "000000000000002"; - Date lowerCreatedDate = new Date(System.currentTimeMillis()); - String upperTenantId = "000000000000008"; - String upperParentId = "000000000000009"; - - String query = "select * from entity_history where (organization_id, parent_id, created_date) >= (?, ?, ?) AND (organization_id, parent_id) <= (?, ?)"; - List binds = Arrays.asList(lowerTenantId, lowerParentId, lowerCreatedDate, upperTenantId, upperParentId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - byte[] expectedStartRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(lowerTenantId), PVarchar.INSTANCE.toBytes(lowerParentId), PDate.INSTANCE.toBytes(lowerCreatedDate)); - byte[] expectedStopRow = ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(upperTenantId), PVarchar.INSTANCE.toBytes(upperParentId))); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(expectedStopRow, scan.getStopRow()); - } - - @Test - public void testMultiRVCExpressionsCombinedUsingLiteralExpressions() throws SQLException { - String lowerTenantId = "000000000000001"; - String lowerParentId = "000000000000002"; - Date lowerCreatedDate = new Date(System.currentTimeMillis()); - - String query = "select * from entity_history where (organization_id, parent_id, created_date) >= (?, ?, ?) AND (organization_id, parent_id) <= ('7', '7')"; - List binds = Arrays.asList(lowerTenantId, lowerParentId, lowerCreatedDate); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - byte[] expectedStartRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(lowerTenantId), PVarchar.INSTANCE.toBytes(lowerParentId), PDate.INSTANCE.toBytes(lowerCreatedDate)); - byte[] expectedStopRow = ByteUtil.nextKey(ByteUtil.concat(StringUtil.padChar(PVarchar.INSTANCE.toBytes("7"),15), StringUtil.padChar( - PVarchar.INSTANCE.toBytes("7"), 15))); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(expectedStopRow, scan.getStopRow()); - } - - @Test - public void testUseOfFunctionOnLHSInRVC() throws SQLException { - String tenantId = "000000000000001"; - String subStringTenantId = tenantId.substring(0, 3); - String parentId = "000000000000002"; - Date createdDate = new Date(System.currentTimeMillis()); - - String query = "select * from entity_history where (substr(organization_id, 1, 3), parent_id, created_date) >= (?,?,?)"; - List binds = Arrays.asList(subStringTenantId, parentId, createdDate); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertTrue(filter instanceof RowKeyComparisonFilter); - byte[] expectedStartRow = PVarchar.INSTANCE.toBytes(subStringTenantId); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testUseOfFunctionOnLHSInMiddleOfRVC() throws SQLException { - String tenantId = "000000000000001"; - String parentId = "000000000000002"; - String subStringParentId = parentId.substring(0, 3); - Date createdDate = new Date(System.currentTimeMillis()); - - String query = "select * from entity_history where (organization_id, substr(parent_id, 1, 3), created_date) >= (?,?,?)"; - List binds = Arrays.asList(tenantId, subStringParentId, createdDate); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertTrue(filter instanceof RowKeyComparisonFilter); - byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(subStringParentId)); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testUseOfFunctionOnLHSInMiddleOfRVCForLTE() throws SQLException { - String tenantId = "000000000000001"; - String parentId = "000000000000002"; - String subStringParentId = parentId.substring(0, 3); - Date createdDate = new Date(System.currentTimeMillis()); - - String query = "select * from entity_history where (organization_id, substr(parent_id, 1, 3), created_date) <= (?,?,?)"; - List binds = Arrays.asList(tenantId, subStringParentId, createdDate); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNotNull(filter); - assertTrue(filter instanceof RowKeyComparisonFilter); - byte[] expectedStopRow = ByteUtil.concat( - PVarchar.INSTANCE.toBytes(tenantId), ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(subStringParentId))); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStartRow()); - assertArrayEquals(expectedStopRow, scan.getStopRow()); - } - - @Test - public void testNullAtEndOfRVC() throws SQLException { - String tenantId = "000000000000001"; - String parentId = "000000000000002"; - Date createdDate = null; - - String query = "select * from entity_history where (organization_id, parent_id, created_date) >= (?,?,?)"; - List binds = Arrays.asList(tenantId, parentId, createdDate); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - byte[] expectedStartRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(parentId)); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testNullInMiddleOfRVC() throws SQLException { - String tenantId = "000000000000001"; - String parentId = null; - Date createdDate = new Date(System.currentTimeMillis()); - - String query = "select * from entity_history where (organization_id, parent_id, created_date) >= (?,?,?)"; - List binds = Arrays.asList(tenantId, parentId, createdDate); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - byte[] expectedStartRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), new byte[15], ByteUtil.previousKey( - PDate.INSTANCE.toBytes(createdDate))); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testNullAtStartOfRVC() throws SQLException { - String tenantId = null; - String parentId = "000000000000002"; - Date createdDate = new Date(System.currentTimeMillis()); - - String query = "select * from entity_history where (organization_id, parent_id, created_date) >= (?,?,?)"; - List binds = Arrays.asList(tenantId, parentId, createdDate); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - byte[] expectedStartRow = ByteUtil.concat(new byte[15], ByteUtil.previousKey(PChar.INSTANCE.toBytes(parentId)), PDate.INSTANCE.toBytes(createdDate)); - assertArrayEquals(expectedStartRow, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testRVCInCombinationWithOtherNonRVC() throws SQLException { - String firstOrgId = "000000000000001"; - String secondOrgId = "000000000000008"; - - String parentId = "000000000000002"; - Date createdDate = new Date(System.currentTimeMillis()); - - String query = "select * from entity_history where (organization_id, parent_id, created_date) >= (?,?,?) AND organization_id <= ?"; - List binds = Arrays.asList(firstOrgId, parentId, createdDate, secondOrgId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes(firstOrgId), PVarchar.INSTANCE.toBytes(parentId), PDate.INSTANCE.toBytes(createdDate)), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(secondOrgId)), scan.getStopRow()); - } - - @Test - public void testGreaterThanEqualTo_NonRVCOnLHSAndRVCOnRHS_WithNonNullBindParams() throws SQLException { - String tenantId = "000000000000001"; - String parentId = "000000000000008"; - - String query = "select * from entity_history where organization_id >= (?,?)"; - List binds = Arrays.asList(tenantId, parentId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testGreaterThan_NonRVCOnLHSAndRVCOnRHS_WithNonNullBindParams() throws SQLException { - String tenantId = "000000000000001"; - String parentId = "000000000000008"; - - String query = "select * from entity_history where organization_id > (?,?)"; - List binds = Arrays.asList(tenantId, parentId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testGreaterThan() throws SQLException { - String tenantId = "000000000000001"; - - String query = "select * from entity_history where organization_id >?"; - List binds = Arrays.asList(tenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testLessThanEqualTo_NonRVCOnLHSAndRVCOnRHS_WithNonNullBindParams() throws SQLException { - String tenantId = "000000000000001"; - String parentId = "000000000000008"; - - String query = "select * from entity_history where organization_id <= (?,?)"; - List binds = Arrays.asList(tenantId, parentId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - @Test - public void testLessThan_NonRVCOnLHSAndRVCOnRHS_WithNonNullBindParams() throws SQLException { - String tenantId = "000000000000001"; - String parentId = "000000000000008"; - - String query = "select * from entity_history where organization_id < (?,?)"; - List binds = Arrays.asList(tenantId, parentId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - @Test - public void testQueryMoreRVC() throws SQLException { - String ddl = "CREATE TABLE rvcTestIdx " - + " (\n" + - " pk1 VARCHAR NOT NULL,\n" + - " v1 VARCHAR,\n" + - " pk2 DECIMAL NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY \n" + - " (\n" + - " pk1,\n" + - " v1,\n" + - " pk2\n" + - " )\n" + - ") MULTI_TENANT=true,IMMUTABLE_ROWS=true"; - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); - conn.createStatement().execute(ddl); - String query = "SELECT pk1, pk2, v1 FROM rvcTestIdx WHERE pk1 = 'a' AND\n" + - "(pk1, pk2) > ('a', 1)\n" + - "ORDER BY PK1, PK2\n" + - "LIMIT 2"; - StatementContext context = compileStatement(query, 2); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNotNull(filter); - byte[] startRow = Bytes.toBytes("a"); - byte[] stopRow = ByteUtil.concat(startRow, ByteUtil.nextKey(QueryConstants.SEPARATOR_BYTE_ARRAY)); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testCombiningRVCUsingOr() throws SQLException { - String firstTenantId = "000000000000001"; - String secondTenantId = "000000000000005"; - String firstParentId = "000000000000011"; - String secondParentId = "000000000000015"; - - String query = "select * from entity_history where (organization_id, parent_id) >= (?,?) OR (organization_id, parent_id) <= (?, ?)"; - List binds = Arrays.asList(firstTenantId, firstParentId, secondTenantId, secondParentId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testCombiningRVCUsingOr2() throws SQLException { - String firstTenantId = "000000000000001"; - String secondTenantId = "000000000000005"; - String firstParentId = "000000000000011"; - String secondParentId = "000000000000015"; - - String query = "select * from entity_history where (organization_id, parent_id) >= (?,?) OR (organization_id, parent_id) >= (?, ?)"; - List binds = Arrays.asList(firstTenantId, firstParentId, secondTenantId, secondParentId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes(firstTenantId), PVarchar.INSTANCE.toBytes(firstParentId)), scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testCombiningRVCWithNonRVCUsingOr() throws SQLException { - String firstTenantId = "000000000000001"; - String secondTenantId = "000000000000005"; - String firstParentId = "000000000000011"; - - String query = "select * from entity_history where (organization_id, parent_id) >= (?,?) OR organization_id >= ?"; - List binds = Arrays.asList(firstTenantId, firstParentId, secondTenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes(firstTenantId), PVarchar.INSTANCE.toBytes(firstParentId)), scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testCombiningRVCWithNonRVCUsingOr2() throws SQLException { - String firstTenantId = "000000000000001"; - String secondTenantId = "000000000000005"; - String firstParentId = "000000000000011"; - - String query = "select * from entity_history where (organization_id, parent_id) >= (?,?) OR organization_id <= ?"; - List binds = Arrays.asList(firstTenantId, firstParentId, secondTenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - } - - @Test - public void testCombiningRVCWithNonRVCUsingOr3() throws SQLException { - String firstTenantId = "000000000000005"; - String secondTenantId = "000000000000001"; - String firstParentId = "000000000000011"; - String query = "select * from entity_history where (organization_id, parent_id) >= (?,?) OR organization_id <= ?"; - List binds = Arrays.asList(firstTenantId, firstParentId, secondTenantId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertTrue(filter instanceof SkipScanFilter); - assertArrayEquals(HConstants.EMPTY_START_ROW, scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - SkipScanFilter skipScanFilter = (SkipScanFilter)filter; - List> keyRanges = skipScanFilter.getSlots(); - assertEquals(1, keyRanges.size()); - assertEquals(2, keyRanges.get(0).size()); - KeyRange range1 = keyRanges.get(0).get(0); - KeyRange range2 = keyRanges.get(0).get(1); - assertEquals(KeyRange.getKeyRange(KeyRange.UNBOUND, false, Bytes.toBytes(secondTenantId), true), range1); - assertEquals(KeyRange.getKeyRange(ByteUtil.concat(Bytes.toBytes(firstTenantId), Bytes.toBytes(firstParentId)), true, KeyRange.UNBOUND, true), range2); - } - - @Test - public void testUsingRVCNonFullyQualifiedInClause() throws Exception { - String firstOrgId = "000000000000001"; - String secondOrgId = "000000000000009"; - String firstParentId = "000000000000011"; - String secondParentId = "000000000000021"; - String query = "select * from entity_history where (organization_id, parent_id) IN ((?, ?), (?, ?))"; - List binds = Arrays.asList(firstOrgId, firstParentId, secondOrgId, secondParentId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertTrue(filter instanceof SkipScanFilter); - assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes(firstOrgId), PVarchar.INSTANCE.toBytes(firstParentId)), scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes(secondOrgId), PVarchar.INSTANCE.toBytes(secondParentId))), scan.getStopRow()); - } - - @Test - public void testUsingRVCFullyQualifiedInClause() throws Exception { - String firstOrgId = "000000000000001"; - String secondOrgId = "000000000000009"; - String firstParentId = "000000000000011"; - String secondParentId = "000000000000021"; - String query = "select * from atable where (organization_id, entity_id) IN ((?, ?), (?, ?))"; - List binds = Arrays.asList(firstOrgId, firstParentId, secondOrgId, secondParentId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertTrue(filter instanceof SkipScanFilter); - List> skipScanRanges = Collections.singletonList(Arrays.asList( - KeyRange.getKeyRange(ByteUtil.concat(PChar.INSTANCE.toBytes(firstOrgId), PChar.INSTANCE.toBytes(firstParentId))), - KeyRange.getKeyRange(ByteUtil.concat(PChar.INSTANCE.toBytes(secondOrgId), PChar.INSTANCE.toBytes(secondParentId))))); - assertEquals(skipScanRanges, context.getScanRanges().getRanges()); - assertArrayEquals(ByteUtil.concat(PChar.INSTANCE.toBytes(firstOrgId), PChar.INSTANCE.toBytes(firstParentId)), scan.getStartRow()); - assertArrayEquals(ByteUtil.concat(PChar.INSTANCE.toBytes(secondOrgId), PChar.INSTANCE.toBytes(secondParentId), QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow()); - } - - @Test - public void testFullyQualifiedRVCWithTenantSpecificViewAndConnection() throws Exception { - String baseTableDDL = "CREATE TABLE BASE_MULTI_TENANT_TABLE(\n " + - " tenant_id VARCHAR(5) NOT NULL,\n" + - " userid INTEGER NOT NULL,\n" + - " username VARCHAR NOT NULL,\n" + - " col VARCHAR\n " + - " CONSTRAINT pk PRIMARY KEY (tenant_id, userid, username)) MULTI_TENANT=true"; - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(baseTableDDL); - conn.close(); - - String tenantId = "tenantId"; - String tenantViewDDL = "CREATE VIEW TENANT_VIEW AS SELECT * FROM BASE_MULTI_TENANT_TABLE"; - Properties tenantProps = new Properties(); - tenantProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - conn = DriverManager.getConnection(getUrl(), tenantProps); - conn.createStatement().execute(tenantViewDDL); - - String query = "SELECT * FROM TENANT_VIEW WHERE (userid, username) IN ((?, ?), (?, ?))"; - List binds = Arrays.asList(1, "uname1", 2, "uname2"); - - StatementContext context = compileStatementTenantSpecific(tenantId, query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertEquals(SkipScanFilter.class, filter.getClass()); - } - - @Test - public void testFullyQualifiedRVCWithNonTenantSpecificView() throws Exception { - String baseTableDDL = "CREATE TABLE BASE_TABLE(\n " + - " tenant_id VARCHAR(5) NOT NULL,\n" + - " userid INTEGER NOT NULL,\n" + - " username VARCHAR NOT NULL,\n" + - " col VARCHAR\n " + - " CONSTRAINT pk PRIMARY KEY (tenant_id, userid, username))"; - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(baseTableDDL); - conn.close(); - - String viewDDL = "CREATE VIEW VIEWXYZ AS SELECT * FROM BASE_TABLE"; - conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(viewDDL); - - String query = "SELECT * FROM VIEWXYZ WHERE (tenant_id, userid, username) IN ((?, ?, ?), (?, ?, ?))"; - List binds = Arrays.asList("tenantId", 1, "uname1", "tenantId", 2, "uname2"); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertEquals(SkipScanFilter.class, filter.getClass()); - } - - @Test - public void testRVCWithCompareOpsForRowKeyColumnValuesSmallerThanSchema() throws SQLException { - String orgId = "0000005"; - String entityId = "011"; - String orgId2 = "000005"; - String entityId2 = "11"; - - // CASE 1: >= - String query = "select * from atable where (organization_id, entity_id) >= (?,?)"; - List binds = Arrays.asList(orgId, entityId); - StatementContext context = compileStatement(query, binds); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), StringUtil.padChar( - PChar.INSTANCE.toBytes(entityId), 15)), scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - - // CASE 2: > - query = "select * from atable where (organization_id, entity_id) > (?,?)"; - binds = Arrays.asList(orgId, entityId); - context = compileStatement(query, binds); - scan = context.getScan(); - filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(ByteUtil.nextKey(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), StringUtil.padChar( - PChar.INSTANCE.toBytes(entityId), 15))), scan.getStartRow()); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStopRow()); - - // CASE 3: <= - query = "select * from atable where (organization_id, entity_id) <= (?,?)"; - binds = Arrays.asList(orgId, entityId); - context = compileStatement(query, binds); - scan = context.getScan(); - filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), StringUtil.padChar( - PChar.INSTANCE.toBytes(entityId), 15))), scan.getStopRow()); - - // CASE 4: < - query = "select * from atable where (organization_id, entity_id) < (?,?)"; - binds = Arrays.asList(orgId, entityId); - context = compileStatement(query, binds); - scan = context.getScan(); - filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(HConstants.EMPTY_END_ROW, scan.getStartRow()); - assertArrayEquals(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), StringUtil.padChar( - PChar.INSTANCE.toBytes(entityId), 15)), scan.getStopRow()); - - // CASE 5: = - // For RVC, this will only occur if there's more than one key in the IN - query = "select * from atable where (organization_id, entity_id) IN ((?,?),(?,?))"; - binds = Arrays.asList(orgId, entityId, orgId2, entityId2); - context = compileStatement(query, binds); - scan = context.getScan(); - filter = scan.getFilter(); - assertTrue(filter instanceof SkipScanFilter); - ScanRanges scanRanges = context.getScanRanges(); - assertEquals(2,scanRanges.getPointLookupCount()); - Iterator iterator = scanRanges.getPointLookupKeyIterator(); - KeyRange k1 = iterator.next(); - assertTrue(k1.isSingleKey()); - assertArrayEquals(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId), 15), StringUtil.padChar( - PChar.INSTANCE.toBytes(entityId), 15)), k1.getLowerRange()); - KeyRange k2 = iterator.next(); - assertTrue(k2.isSingleKey()); - assertArrayEquals(ByteUtil.concat(StringUtil.padChar(PChar.INSTANCE.toBytes(orgId2), 15), StringUtil.padChar( - PChar.INSTANCE.toBytes(entityId2), 15)), k2.getLowerRange()); - } - - - @Test - public void testRVCInView() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE TEST_TABLE.TEST1 (\n" + - "PK1 CHAR(3) NOT NULL, \n" + - "PK2 CHAR(3) NOT NULL,\n" + - "DATA1 CHAR(10)\n" + - "CONSTRAINT PK PRIMARY KEY (PK1, PK2))"); - conn.createStatement().execute("CREATE VIEW TEST_TABLE.FOO AS SELECT * FROM TEST_TABLE.TEST1 WHERE PK1 = 'FOO'"); - String query = "SELECT * FROM TEST_TABLE.FOO WHERE PK2 < '004' AND (PK1,PK2) > ('FOO','002') LIMIT 2"; - Scan scan = compileStatement(query, Collections.emptyList(), 2).getScan(); - byte[] startRow = ByteUtil.nextKey(ByteUtil.concat(PChar.INSTANCE.toBytes("FOO"), - PVarchar.INSTANCE.toBytes("002"))); - assertArrayEquals(startRow, scan.getStartRow()); - byte[] stopRow = ByteUtil.concat(PChar.INSTANCE.toBytes("FOO"), - PChar.INSTANCE.toBytes("004")); - assertArrayEquals(stopRow, scan.getStopRow()); - } - - @Test - public void testScanRangeForPointLookup() throws SQLException { - String tenantId = "000000000000001"; - String entityId = "002333333333333"; - String query = String.format("select * from atable where organization_id='%s' and entity_id='%s'", - tenantId, entityId); - try (Connection conn = DriverManager.getConnection(getUrl())) { - QueryPlan optimizedPlan = TestUtil.getOptimizeQueryPlan(conn, query); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); - byte[] stopRow = ByteUtil.nextKey(startRow); - validateScanRangesForPointLookup(optimizedPlan, startRow, stopRow); - } - } - - @Test - public void testScanRangeForPointLookupRVC() throws SQLException { - String tenantId = "000000000000001"; - String entityId = "002333333333333"; - String query = String.format("select * from atable where (organization_id, entity_id) IN (('%s','%s'))", - tenantId, entityId); - try (Connection conn = DriverManager.getConnection(getUrl())) { - QueryPlan optimizedPlan = TestUtil.getOptimizeQueryPlan(conn, query); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); - byte[] stopRow = ByteUtil.nextKey(startRow); - validateScanRangesForPointLookup(optimizedPlan, startRow, stopRow); - } - } - - @Test - public void testScanRangeForPointLookupWithLimit() throws SQLException { - String tenantId = "000000000000001"; - String entityId = "002333333333333"; - String query = String.format("select * from atable where organization_id='%s' " + - "and entity_id='%s' LIMIT 1", tenantId, entityId); - try (Connection conn = DriverManager.getConnection(getUrl())) { - QueryPlan optimizedPlan = TestUtil.getOptimizeQueryPlan(conn, query); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); - byte[] stopRow = ByteUtil.nextKey(startRow); - validateScanRangesForPointLookup(optimizedPlan, startRow, stopRow); - } - } - - @Test - public void testScanRangeForPointLookupAggregate() throws SQLException { - String tenantId = "000000000000001"; - String entityId = "002333333333333"; - String query = String.format("select count(*) from atable where organization_id='%s' " + - "and entity_id='%s'", tenantId, entityId); - try (Connection conn = DriverManager.getConnection(getUrl())) { - QueryPlan optimizedPlan = TestUtil.getOptimizeQueryPlan(conn, query); - byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), PVarchar.INSTANCE.toBytes(entityId)); - byte[] stopRow = ByteUtil.nextKey(startRow); - validateScanRangesForPointLookup(optimizedPlan, startRow, stopRow); - } - } - - private static void validateScanRangesForPointLookup(QueryPlan optimizedPlan, byte[] startRow, byte[] stopRow) { - StatementContext context = optimizedPlan.getContext(); - ScanRanges scanRanges = context.getScanRanges(); - assertTrue(scanRanges.isPointLookup()); - assertEquals(1, scanRanges.getPointLookupCount()); - // scan from StatementContext has scan range [start, next(start)] - Scan scanFromContext = context.getScan(); - assertArrayEquals(startRow, scanFromContext.getStartRow()); - assertTrue(scanFromContext.includeStartRow()); - assertArrayEquals(stopRow, scanFromContext.getStopRow()); - assertFalse(scanFromContext.includeStopRow()); - - List> scans = optimizedPlan.getScans(); - assertEquals(1, scans.size()); - assertEquals(1, scans.get(0).size()); - Scan scanFromIterator = scans.get(0).get(0); - if (optimizedPlan.getLimit() == null && !optimizedPlan.getStatement().isAggregate()) { - // scan from iterator has same start and stop row [start, start] i.e a Get - assertTrue(scanFromIterator.isGetScan()); - assertTrue(scanFromIterator.includeStartRow()); - assertTrue(scanFromIterator.includeStopRow()); - } else { - // in case of limit scan range is same as the one in StatementContext - assertArrayEquals(startRow, scanFromIterator.getStartRow()); - assertTrue(scanFromIterator.includeStartRow()); - assertArrayEquals(stopRow, scanFromIterator.getStopRow()); - assertFalse(scanFromIterator.includeStopRow()); - } - } - - private static StatementContext compileStatementTenantSpecific(String tenantId, String query, List binds) throws Exception { - PhoenixConnection pconn = getTenantSpecificConnection("tenantId").unwrap(PhoenixConnection.class); + } + } + } + + @Test + public void testRVCClipBug5753() throws Exception { + String tableName = generateUniqueName(); + try (Connection conn = DriverManager.getConnection(getUrl())) { + conn.setAutoCommit(true); + Statement stmt = conn.createStatement(); + + String sql = + "CREATE TABLE " + tableName + " (" + " pk1 INTEGER NOT NULL , " + " pk2 INTEGER NOT NULL, " + + " pk3 INTEGER NOT NULL, " + " pk4 INTEGER NOT NULL, " + " pk5 INTEGER NOT NULL, " + + " pk6 INTEGER NOT NULL, " + " pk7 INTEGER NOT NULL, " + " pk8 INTEGER NOT NULL, " + + " v INTEGER, CONSTRAINT PK PRIMARY KEY(pk1,pk2,pk3 desc,pk4,pk5,pk6 desc,pk7,pk8))"; + ; + + stmt.execute(sql); + + List> rowKeyRanges = null; + RowKeyComparisonFilter rowKeyComparisonFilter = null; + QueryPlan queryPlan = null; + Scan scan = null; + + sql = "SELECT /*+ RANGE_SCAN */ * FROM " + tableName + + " WHERE (pk1, pk2) IN ((2, 3), (2, 4)) AND pk3 = 5"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof RowKeyComparisonFilter); + rowKeyComparisonFilter = (RowKeyComparisonFilter) scan.getFilter(); + assertEquals(rowKeyComparisonFilter.toString(), + "((PK1, PK2) IN (X'8000000280000003',X'8000000280000004') AND PK3 = 5)"); + assertArrayEquals(scan.getStartRow(), ByteUtil.concat(PInteger.INSTANCE.toBytes(2), + PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(5, SortOrder.DESC))); + assertArrayEquals(scan.getStopRow(), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4), + ByteUtil.nextKey(PInteger.INSTANCE.toBytes(5, SortOrder.DESC)))); + + sql = "select * from " + tableName + + " where (pk1 >=1 and pk1<=2) and (pk2>=2 and pk2<=3) and (pk3,pk4) < (3,5)"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof FilterList); + FilterList filterList = (FilterList) scan.getFilter(); + + assertTrue(filterList.getOperator() == Operator.MUST_PASS_ALL); + assertEquals(filterList.getFilters().size(), 2); + assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); + rowKeyRanges = ((SkipScanFilter) (filterList.getFilters().get(0))).getSlots(); + assertEquals(Arrays.asList( + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(1), true, + PInteger.INSTANCE.toBytes(2), true)), + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(2), true, + PInteger.INSTANCE.toBytes(3), true)), + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(3, SortOrder.DESC), true, + KeyRange.UNBOUND, false))), + rowKeyRanges); + assertArrayEquals(scan.getStartRow(), ByteUtil.concat(PInteger.INSTANCE.toBytes(1), + PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(3, SortOrder.DESC))); + assertArrayEquals(scan.getStopRow(), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4))); + + assertTrue(filterList.getFilters().get(1) instanceof RowKeyComparisonFilter); + rowKeyComparisonFilter = (RowKeyComparisonFilter) filterList.getFilters().get(1); + assertTrue(rowKeyComparisonFilter.toString() + .equals("(TO_INTEGER(PK3), PK4) < (TO_INTEGER(TO_INTEGER(3)), 5)")); + + /** + * RVC is singleKey + */ + sql = "select * from " + tableName + + " where (pk1 >=1 and pk1<=2) and (pk2>=2 and pk2<=3) and (pk3,pk4) in ((3,4),(4,5)) and " + + " (pk5,pk6,pk7) in ((5,6,7),(6,7,8)) and pk8 > 8"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof FilterList); + filterList = (FilterList) scan.getFilter(); + + assertTrue(filterList.getOperator() == Operator.MUST_PASS_ALL); + assertEquals(filterList.getFilters().size(), 2); + assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); + rowKeyRanges = ((SkipScanFilter) (filterList.getFilters().get(0))).getSlots(); + assertEquals( + Arrays.asList( + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(1), true, + PInteger.INSTANCE.toBytes(2), true)), + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(2), true, + PInteger.INSTANCE.toBytes(3), true)), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4, SortOrder.DESC), true, + PInteger.INSTANCE.toBytes(4, SortOrder.DESC), true), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(3, SortOrder.DESC), true, + PInteger.INSTANCE.toBytes(3, SortOrder.DESC), true)), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(4), + true), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(5), true, PInteger.INSTANCE.toBytes(5), + true)), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(5), true, PInteger.INSTANCE.toBytes(5), + true), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(6), true, PInteger.INSTANCE.toBytes(6), + true)), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7, SortOrder.DESC), true, + PInteger.INSTANCE.toBytes(7, SortOrder.DESC), true), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(6, SortOrder.DESC), true, + PInteger.INSTANCE.toBytes(6, SortOrder.DESC), true)), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7), true, PInteger.INSTANCE.toBytes(7), + true), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(8), + true)), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(9), true, KeyRange.UNBOUND, false))), + rowKeyRanges); + assertArrayEquals(scan.getStartRow(), + ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(2), + PInteger.INSTANCE.toBytes(4, SortOrder.DESC), PInteger.INSTANCE.toBytes(4), + PInteger.INSTANCE.toBytes(5), PInteger.INSTANCE.toBytes(7, SortOrder.DESC), + PInteger.INSTANCE.toBytes(7), PInteger.INSTANCE.toBytes(9))); + assertArrayEquals(scan.getStopRow(), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(3), + PInteger.INSTANCE.toBytes(3, SortOrder.DESC), PInteger.INSTANCE.toBytes(5), + PInteger.INSTANCE.toBytes(6), PInteger.INSTANCE.toBytes(6, SortOrder.DESC), + PInteger.INSTANCE.toBytes(9))); + + assertTrue(filterList.getFilters().get(1) instanceof RowKeyComparisonFilter); + rowKeyComparisonFilter = (RowKeyComparisonFilter) filterList.getFilters().get(1); + assertEquals(rowKeyComparisonFilter.toString(), + "((PK3, PK4) IN (X'7ffffffb80000005',X'7ffffffc80000004') AND (PK5, PK6, PK7) IN (X'800000057ffffff980000007',X'800000067ffffff880000008'))"); + /** + * RVC is not singleKey + */ + sql = "select * from " + tableName + + " where (pk1 >=1 and pk1<=2) and (pk2>=2 and pk2<=3) and (pk3,pk4) < (3,4) and " + + " (pk5,pk6,pk7) < (5,6,7) and pk8 > 8"; + queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); + scan = queryPlan.getContext().getScan(); + assertTrue(scan.getFilter() instanceof FilterList); + filterList = (FilterList) scan.getFilter(); + + assertTrue(filterList.getOperator() == Operator.MUST_PASS_ALL); + assertEquals(filterList.getFilters().size(), 2); + assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); + rowKeyRanges = ((SkipScanFilter) (filterList.getFilters().get(0))).getSlots(); + assertEquals( + Arrays.asList( + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(1), true, + PInteger.INSTANCE.toBytes(2), true)), + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(2), true, + PInteger.INSTANCE.toBytes(3), true)), + Arrays.asList(KeyRange.getKeyRange( + PInteger.INSTANCE.toBytes(3, SortOrder.DESC), true, KeyRange.UNBOUND, false)), + Arrays.asList(KeyRange.EVERYTHING_RANGE), + Arrays.asList( + KeyRange.getKeyRange(KeyRange.UNBOUND, false, PInteger.INSTANCE.toBytes(5), true)), + Arrays.asList(KeyRange.EVERYTHING_RANGE), Arrays.asList(KeyRange.EVERYTHING_RANGE), + Arrays.asList( + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(9), true, KeyRange.UNBOUND, false))), + rowKeyRanges); + assertArrayEquals(scan.getStartRow(), ByteUtil.concat(PInteger.INSTANCE.toBytes(1), + PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(3, SortOrder.DESC))); + assertArrayEquals(scan.getStopRow(), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4))); + + assertTrue(filterList.getFilters().get(1) instanceof RowKeyComparisonFilter); + rowKeyComparisonFilter = (RowKeyComparisonFilter) filterList.getFilters().get(1); + assertTrue(rowKeyComparisonFilter.toString() + .equals("((PK5, TO_INTEGER(PK6), PK7) < (5, TO_INTEGER(TO_INTEGER(6)), 7) AND " + + "(TO_INTEGER(PK3), PK4) < (TO_INTEGER(TO_INTEGER(3)), 4))")); + } + } + + @Test + public void testWithLargeORs() throws Exception { + + SortOrder[][] sortOrders = new SortOrder[][] { { SortOrder.ASC, SortOrder.ASC, SortOrder.ASC }, + { SortOrder.ASC, SortOrder.ASC, SortOrder.DESC }, + { SortOrder.ASC, SortOrder.DESC, SortOrder.ASC }, + { SortOrder.ASC, SortOrder.DESC, SortOrder.DESC }, + { SortOrder.DESC, SortOrder.ASC, SortOrder.ASC }, + { SortOrder.DESC, SortOrder.ASC, SortOrder.DESC }, + { SortOrder.DESC, SortOrder.DESC, SortOrder.ASC }, + { SortOrder.DESC, SortOrder.DESC, SortOrder.DESC } }; + + String tableName = generateUniqueName(); + String viewName = String.format("Z_%s", tableName); + PDataType[] testTSVarVarPKTypes = + new PDataType[] { PTimestamp.INSTANCE, PVarchar.INSTANCE, PInteger.INSTANCE }; + String baseTableName = String.format("TEST_ENTITY.%s", tableName); + int tenantId = 1; + int numTestCases = 1; + for (int index = 0; index < sortOrders.length; index++) { + // Test Case 1: PK1 = Timestamp, PK2 = Varchar, PK3 = Integer + String view1Name = String.format("TEST_ENTITY.%s%d", viewName, index * numTestCases + 1); + String partition1 = String.format("Z%d", index * numTestCases + 1); + createTenantView(tenantId, baseTableName, view1Name, partition1, testTSVarVarPKTypes[0], + sortOrders[index][0], testTSVarVarPKTypes[1], sortOrders[index][1], testTSVarVarPKTypes[2], + sortOrders[index][2]); + testTSVarIntAndLargeORs(tenantId, view1Name, sortOrders[index]); + } + } + + /** + * Test that tenantId is present in the scan start row key when using an inherited index on a + * tenant view. + */ + @Test + public void testScanKeyInheritedIndexTenantView() throws Exception { + String baseTableName = generateUniqueName(); + String globalViewName = generateUniqueName(); + String globalViewIndexName = generateUniqueName(); + String tenantViewName = generateUniqueName(); + try (Connection conn = DriverManager.getConnection(getUrl())) { + // create table, view and view index + conn.createStatement().execute("CREATE TABLE " + baseTableName + + " (TENANT_ID CHAR(8) NOT NULL, KP CHAR(3) NOT NULL, PK CHAR(3) NOT NULL, KV CHAR(2), KV2 CHAR(2) " + + "CONSTRAINT PK PRIMARY KEY(TENANT_ID, KP, PK)) MULTI_TENANT=true"); + conn.createStatement().execute("CREATE VIEW " + globalViewName + " AS SELECT * FROM " + + baseTableName + " WHERE KP = '001'"); + conn.createStatement().execute("CREATE INDEX " + globalViewIndexName + " on " + globalViewName + + " (KV) " + " INCLUDE (KV2)"); + // create tenant view + String tenantId = "tenantId"; + Properties tenantProps = new Properties(); + tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + try (Connection tenantConn = DriverManager.getConnection(getUrl(), tenantProps)) { + tenantConn.createStatement() + .execute("CREATE VIEW " + tenantViewName + " AS SELECT * FROM " + globalViewName); + // query on secondary key + String query = "SELECT KV2 FROM " + tenantViewName + " WHERE KV = 'KV'"; + PhoenixConnection pconn = tenantConn.unwrap(PhoenixConnection.class); PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query); - TestUtil.bindParams(pstmt, binds); QueryPlan plan = pstmt.compileQuery(); - return plan.getContext(); - } - - private static Connection getTenantSpecificConnection(String tenantId) throws Exception { - Properties tenantProps = new Properties(); - tenantProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - Connection conn = DriverManager.getConnection(getUrl(), tenantProps); - return conn; - } - - @Test - public void testTrailingIsNull() throws Exception { - String baseTableDDL = "CREATE TABLE t(\n " + - " a VARCHAR,\n" + - " b VARCHAR,\n" + - " CONSTRAINT pk PRIMARY KEY (a, b))"; - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(baseTableDDL); - conn.close(); - - String query = "SELECT * FROM t WHERE a = 'a' and b is null"; - StatementContext context = compileStatement(query, Collections.emptyList()); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertNull(filter); - assertArrayEquals(Bytes.toBytes("a"), scan.getStartRow()); - assertArrayEquals(ByteUtil.concat(Bytes.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow()); - } - - - @Test - public void testTrailingIsNullWithOr() throws Exception { - String baseTableDDL = "CREATE TABLE t(\n " + - " a VARCHAR,\n" + - " b VARCHAR,\n" + - " CONSTRAINT pk PRIMARY KEY (a, b))"; - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(baseTableDDL); - conn.close(); - - String query = "SELECT * FROM t WHERE a = 'a' and (b is null or b = 'b')"; - StatementContext context = compileStatement(query, Collections.emptyList()); - Scan scan = context.getScan(); - Filter filter = scan.getFilter(); - assertTrue(filter instanceof SkipScanFilter); - SkipScanFilter skipScan = (SkipScanFilter)filter; - List>slots = skipScan.getSlots(); - assertEquals(2,slots.size()); - assertEquals(1,slots.get(0).size()); - assertEquals(2,slots.get(1).size()); - assertEquals(KeyRange.getKeyRange(Bytes.toBytes("a")), slots.get(0).get(0)); - assertTrue(KeyRange.IS_NULL_RANGE == slots.get(1).get(0)); - assertEquals(KeyRange.getKeyRange(Bytes.toBytes("b")), slots.get(1).get(1)); - assertArrayEquals(Bytes.toBytes("a"), scan.getStartRow()); - assertArrayEquals(ByteUtil.concat(Bytes.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow()); - } - - @Test - public void testAndWithRVC() throws Exception { - String ddl; - String query; - StatementContext context; - Connection conn = DriverManager.getConnection(getUrl()); - - ddl = "create table t (a integer not null, b integer not null, c integer constraint pk primary key (a,b))"; - conn.createStatement().execute(ddl); - - query = "select c from t where a in (1,2) and b = 3 and (a,b) in ( (1,2) , (1,3))"; - context = compileStatement(query, Collections.emptyList()); - assertArrayEquals(ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(3)), context.getScan().getStartRow()); - assertArrayEquals(ByteUtil.concat(PInteger.INSTANCE.toBytes(1), ByteUtil.nextKey(PInteger.INSTANCE.toBytes(3))), context.getScan().getStopRow()); - - query = "select c from t where (a,b) in ( (1,2) , (1,3) ) and b = 4"; - context = compileStatement(query, Collections.emptyList()); - assertDegenerate(context.getScan()); - - query = "select c from t where a = 1 and b = 3 and (a,b) in ( (1,2) , (1,3))"; - context = compileStatement(query, Collections.emptyList()); - assertArrayEquals(ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(3)), context.getScan().getStartRow()); - assertArrayEquals(ByteUtil.concat(PInteger.INSTANCE.toBytes(1), ByteUtil.nextKey(PInteger.INSTANCE.toBytes(3))), context.getScan().getStopRow()); - - // Test with RVC occurring later in the PK - ddl = "create table t1 (d varchar, e char(3) not null, a integer not null, b integer not null, c integer constraint pk primary key (d, e, a,b))"; - conn.createStatement().execute(ddl); - - query = "select c from t1 where d = 'a' and e = 'foo' and a in (1,2) and b = 3 and (a,b) in ( (1,2) , (1,3))"; - context = compileStatement(query, Collections.emptyList()); - Scan scan = context.getScan(); - assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, PChar.INSTANCE.toBytes("foo"), PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(3)), scan.getStartRow()); - assertArrayEquals(ByteUtil.concat(PVarchar.INSTANCE.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, PChar.INSTANCE.toBytes("foo"), PInteger.INSTANCE.toBytes(1), ByteUtil.nextKey(PInteger.INSTANCE.toBytes(3))), scan.getStopRow()); - - conn.close(); - } - - @Test - public void testNoAggregatorForOrderBy() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); - conn.createStatement().execute("create table test (pk1 integer not null, pk2 integer not null, constraint pk primary key (pk1,pk2))"); - StatementContext context = compileStatement("select count(distinct pk1) from test order by count(distinct pk2)"); - assertEquals(1, context.getAggregationManager().getAggregators().getAggregatorCount()); - context = compileStatement("select sum(pk1) from test order by count(distinct pk2)"); - assertEquals(1, context.getAggregationManager().getAggregators().getAggregatorCount()); - context = compileStatement("select min(pk1) from test order by count(distinct pk2)"); - assertEquals(1, context.getAggregationManager().getAggregators().getAggregatorCount()); - context = compileStatement("select max(pk1) from test order by count(distinct pk2)"); - assertEquals(1, context.getAggregationManager().getAggregators().getAggregatorCount()); - // here the ORDER BY is not optimized away - context = compileStatement("select avg(pk1) from test order by count(distinct pk2)"); - assertEquals(2, context.getAggregationManager().getAggregators().getAggregatorCount()); - } - - @Test - public void testPartialRVCWithLeadingPKEq() throws SQLException { - String tenantId = "o1"; - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE COMMUNITIES.TEST (\n" + - " ORGANIZATION_ID CHAR(2) NOT NULL,\n" + - " SCORE DOUBLE NOT NULL,\n" + - " ENTITY_ID CHAR(2) NOT NULL\n" + - " CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" + - " ORGANIZATION_ID,\n" + - " SCORE,\n" + - " ENTITY_ID\n" + - " )\n" + - ") VERSIONS=1, MULTI_TENANT=TRUE"); - String query = "SELECT entity_id, score\n" + - "FROM communities.test\n" + - "WHERE organization_id = '" + tenantId + "'\n" + - "AND (score, entity_id) > (2.0, '04')\n" + - "ORDER BY score, entity_id"; - Scan scan = compileStatement(query).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.nextKey(ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), PDouble.INSTANCE.toBytes(2.0), PChar.INSTANCE.toBytes("04"))); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - @Test - public void testPartialRVCWithLeadingPKEqDesc() throws SQLException { - String tenantId = "o1"; - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE COMMUNITIES.TEST (\n" + - " ORGANIZATION_ID CHAR(2) NOT NULL,\n" + - " SCORE DOUBLE NOT NULL,\n" + - " ENTITY_ID CHAR(2) NOT NULL\n" + - " CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" + - " ORGANIZATION_ID,\n" + - " SCORE DESC,\n" + - " ENTITY_ID DESC\n" + - " )\n" + - ") VERSIONS=1, MULTI_TENANT=TRUE"); - String query = "SELECT entity_id, score\n" + - "FROM communities.test\n" + - "WHERE organization_id = '" + tenantId + "'\n" + - "AND (score, entity_id) < (2.0, '04')\n" + - "ORDER BY score DESC, entity_id DESC"; - Scan scan = compileStatement(query).getScan(); - assertNull(scan.getFilter()); - - byte[] startRow = ByteUtil.nextKey(ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), PDouble.INSTANCE.toBytes(2.0, SortOrder.DESC), PChar.INSTANCE.toBytes("04", SortOrder.DESC))); + plan = tenantConn.unwrap(PhoenixConnection.class).getQueryServices().getOptimizer() + .optimize(pstmt, plan); + // optimized query plan should use inherited index + assertEquals(tenantViewName + "#" + globalViewIndexName, + plan.getContext().getCurrentTable().getTable().getName().getString()); + Scan scan = plan.getContext().getScan(); + PTable viewIndexPTable = + tenantConn.unwrap(PhoenixConnection.class).getTable(globalViewIndexName); + // PK of view index [_INDEX_ID, tenant_id, KV, PK] + byte[] startRow = ByteUtil.concat(PLong.INSTANCE.toBytes(viewIndexPTable.getViewIndexId()), + PChar.INSTANCE.toBytes(tenantId), PChar.INSTANCE.toBytes("KV")); assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - @Test - public void testFullRVCWithLeadingPKEqDesc() throws SQLException { - String tenantId = "o1"; - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE COMMUNITIES.TEST (\n" + - " ORGANIZATION_ID CHAR(2) NOT NULL,\n" + - " SCORE DOUBLE NOT NULL,\n" + - " ENTITY_ID CHAR(2) NOT NULL\n" + - " CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" + - " ORGANIZATION_ID,\n" + - " SCORE DESC,\n" + - " ENTITY_ID DESC\n" + - " )\n" + - ") VERSIONS=1, MULTI_TENANT=TRUE"); - String query = "SELECT entity_id, score\n" + - "FROM communities.test\n" + - "WHERE organization_id = '" + tenantId + "'\n" + - "AND (organization_id, score, entity_id) < ('" + tenantId + "',2.0, '04')\n" + - "ORDER BY score DESC, entity_id DESC"; - Scan scan = compileStatement(query).getScan(); - assertNull(scan.getFilter()); - - // TODO: end to end test that confirms this start row is accurate - byte[] startRow = ByteUtil.concat(PChar.INSTANCE.toBytes(tenantId), PDouble.INSTANCE.toBytes(2.0, SortOrder.DESC), ByteUtil.nextKey(PChar.INSTANCE.toBytes("04", SortOrder.DESC))); - assertArrayEquals(startRow, scan.getStartRow()); - assertArrayEquals(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(tenantId)), scan.getStopRow()); - } - - @Test - public void testTrimTrailing() throws Exception { - try (Connection conn= DriverManager.getConnection(getUrl())) { - String sql="CREATE TABLE T("+ - "A CHAR(1) NOT NULL,"+ - "B CHAR(1) NOT NULL,"+ - "C CHAR(1) NOT NULL,"+ - "D CHAR(1) NOT NULL,"+ - "DATA INTEGER, "+ - "CONSTRAINT TEST_PK PRIMARY KEY (A,B,C,D))"; - conn.createStatement().execute(sql); - - // Will cause trailing part of RVC to (A,B,C) to be trimmed allowing us to perform a skip scan - sql="select * from T where (A,B,C) >= ('A','A','A') and (A,B,C) < ('D','D','D') and (B,C) > ('E','E')"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - Scan scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof SkipScanFilter); - List> rowKeyRanges = ((SkipScanFilter)(scan.getFilter())).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList( - KeyRange.getKeyRange(PChar.INSTANCE.toBytes("A"), true, PChar.INSTANCE.toBytes("D"), false) - ), - Arrays.asList( - KeyRange.getKeyRange(PChar.INSTANCE.toBytes("EE"), false, KeyRange.UNBOUND, false) - ) - ), - rowKeyRanges - ); - assertArrayEquals(scan.getStartRow(), PChar.INSTANCE.toBytes("AEF")); - assertArrayEquals(scan.getStopRow(), PChar.INSTANCE.toBytes("D")); - sql="select * from T where (A,B,C) > ('A','A','A') and (A,B,C) <= ('D','D','D') and (B,C) >= ('E','E')"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof SkipScanFilter); - rowKeyRanges = ((SkipScanFilter)(scan.getFilter())).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList( - KeyRange.getKeyRange(PChar.INSTANCE.toBytes("A"), true, PChar.INSTANCE.toBytes("D"), true) - ), - Arrays.asList( - KeyRange.getKeyRange(PChar.INSTANCE.toBytes("EE"), true, KeyRange.UNBOUND, false) - ) - ), - rowKeyRanges - ); - assertArrayEquals(PChar.INSTANCE.toBytes("AEE"), scan.getStartRow()); - assertArrayEquals(PChar.INSTANCE.toBytes("E"), scan.getStopRow()); - } - } - - @Test - public void testMultiSlotTrailingIntersect() throws Exception { - try (Connection conn= DriverManager.getConnection(getUrl())) { - String sql="CREATE TABLE T("+ - "A CHAR(1) NOT NULL,"+ - "B CHAR(1) NOT NULL,"+ - "C CHAR(1) NOT NULL,"+ - "D CHAR(1) NOT NULL,"+ - "DATA INTEGER, "+ - "CONSTRAINT TEST_PK PRIMARY KEY (A,B,C,D))"; - conn.createStatement().execute(sql); - - sql = "select * from t where (a,b) in (('A','B'),('B','A'),('B','B'),('A','A')) and (a,b,c) in ( ('A','B','C') , ('A','C','D'), ('B','B','E'))"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - Scan scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof SkipScanFilter); - List> rowKeyRanges = ((SkipScanFilter)(scan.getFilter())).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList( - KeyRange.POINT.apply(PChar.INSTANCE.toBytes("ABC")), - KeyRange.POINT.apply(PChar.INSTANCE.toBytes("BBE")) - ) - ), - rowKeyRanges - ); - assertArrayEquals(scan.getStartRow(), PChar.INSTANCE.toBytes("ABC")); - assertArrayEquals(scan.getStopRow(), PChar.INSTANCE.toBytes("BBF")); - } - } - - @Test - public void testEqualityAndGreaterThanRVC() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " D CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C,\n" + - " D\n" + - " )\n" + - ")"); - String query = "SELECT * FROM T WHERE A = 'C' and (A,B,C) > ('C','B','X') and C='C'"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, query); - Scan scan = queryPlan.getContext().getScan(); - // - // Note: The optimal scan boundary for the above query is ['CCC' - *), however, I don't see an easy way to fix this currently so prioritizing. Opened JIRA PHOENIX-5885 - assertArrayEquals(ByteUtil.concat(PChar.INSTANCE.toBytes("C"), PChar.INSTANCE.toBytes("B"), PChar.INSTANCE.toBytes("C")), scan.getStartRow()); - assertArrayEquals(PChar.INSTANCE.toBytes("D"), scan.getStopRow()); - } - } - - @Test - public void testEqualityAndGreaterThanRVC2() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - conn.createStatement().execute("CREATE TABLE T (\n" + - " A CHAR(1) NOT NULL,\n" + - " B CHAR(1) NOT NULL,\n" + - " C CHAR(1) NOT NULL,\n" + - " D CHAR(1) NOT NULL,\n" + - " CONSTRAINT PK PRIMARY KEY (\n" + - " A,\n" + - " B,\n" + - " C,\n" + - " D\n" + - " )\n" + - ")"); - String query = "SELECT * FROM T WHERE A = 'C' and (A,B,C) > ('C','B','A') and C='C'"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, query); - Scan scan = queryPlan.getContext().getScan(); - assertArrayEquals(ByteUtil.concat(PChar.INSTANCE.toBytes("C"), PChar.INSTANCE.toBytes("B"), PChar.INSTANCE.toBytes("C")), scan.getStartRow()); - assertArrayEquals(PChar.INSTANCE.toBytes("D"), scan.getStopRow()); - } - } - - @Test - public void testOrExpressionNonLeadingPKPushToScanBug4602() throws Exception { - Connection conn = null; - try { - conn= DriverManager.getConnection(getUrl()); - String testTableName="OR_NO_LEADING_PK4602"; - String sql="CREATE TABLE "+ testTableName +"("+ - "PK1 INTEGER NOT NULL,"+ - "PK2 INTEGER NOT NULL,"+ - "PK3 INTEGER NOT NULL,"+ - "DATA INTEGER, "+ - "CONSTRAINT TEST_PK PRIMARY KEY (PK1,PK2,PK3))"; - conn.createStatement().execute(sql); - - //case 1: pk1 is equal,pk2 is multiRange - sql="select * from "+testTableName+" t where (t.pk1 = 2) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - Scan scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof SkipScanFilter); - List> rowKeyRanges = ((SkipScanFilter)(scan.getFilter())).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList(KeyRange.POINT.apply(PInteger.INSTANCE.toBytes(2))), - Arrays.asList( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), false), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), false) - ) - ), - rowKeyRanges - ); - - assertArrayEquals(scan.getStartRow(), ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4))); - assertArrayEquals(scan.getStopRow(), ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(9))); - - //case 2: pk1 is range,pk2 is multiRange - sql="select * from "+testTableName+" t where (t.pk1 >=2 and t.pk1<5) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof SkipScanFilter); - rowKeyRanges = ((SkipScanFilter)(scan.getFilter())).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(2), true, PInteger.INSTANCE.toBytes(5), false)), - Arrays.asList( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), false), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), false) - ) - ), - rowKeyRanges - ); - assertArrayEquals(scan.getStartRow(), ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4))); - assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(5)); - - //case 3 : pk1 has multiRange,,pk2 is multiRange - sql="select * from "+testTableName+" t where ((t.pk1 >=2 and t.pk1<5) or (t.pk1 >=7 and t.pk1 <9)) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof SkipScanFilter); - rowKeyRanges = ((SkipScanFilter)(scan.getFilter())).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(2), true, PInteger.INSTANCE.toBytes(5), false), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7), true, PInteger.INSTANCE.toBytes(9), false) - ), - Arrays.asList( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), false), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), false) - ) - ), - rowKeyRanges - ); - assertArrayEquals(scan.getStartRow(), ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4))); - assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(9)); - - //case4 : only pk1 and pk3, no pk2 - sql="select * from "+testTableName+" t where ((t.pk1 >=2 and t.pk1<5) or (t.pk1 >=7 and t.pk1 <9)) and ((t.pk3 >= 4 and t.pk3 <6) or (t.pk3 >= 8 and t.pk3 <9))"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - /** - * This sql use skipScan, and all the whereExpressions are in SkipScanFilter, - * so there is no other RowKeyComparisonFilter needed. - */ - assertTrue(scan.getFilter() instanceof SkipScanFilter); - - rowKeyRanges = ((SkipScanFilter)(scan.getFilter())).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(2), true, PInteger.INSTANCE.toBytes(5), false), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7), true, PInteger.INSTANCE.toBytes(9), false) - ), - Arrays.asList(KeyRange.EVERYTHING_RANGE), - Arrays.asList( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), false), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), false) - ) - ), - rowKeyRanges - ); - assertArrayEquals(scan.getStartRow(), PInteger.INSTANCE.toBytes(2)); - assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(9)); - - //case 5: pk1 or data column - sql="select * from "+testTableName+" t where ((t.pk1 >=2) or (t.data >= 4 and t.data <9))"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof SingleCQKeyValueComparisonFilter); - Expression pk1Expression = new ColumnRef(queryPlan.getTableRef(), queryPlan.getTableRef().getTable().getColumnForColumnName("PK1").getPosition()).newColumnExpression(); - Expression dataExpression = new ColumnRef(queryPlan.getTableRef(), queryPlan.getTableRef().getTable().getColumnForColumnName("DATA").getPosition()).newColumnExpression(); - assertEquals( - TestUtil.singleKVFilter( - TestUtil.or( - TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL, pk1Expression, 2), - TestUtil.and( - TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL, dataExpression, 4), - TestUtil.constantComparison(CompareOperator.LESS, dataExpression, 9) - ) - ) - ), - scan.getFilter()); - assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); - assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); - - //case 6: pk1 or pk2,but pk2 is empty range - sql ="select * from "+testTableName+" t where (t.pk1 >=2 and t.pk1<5) or ((t.pk2 >= 4 and t.pk2 <6) and (t.pk2 >= 8 and t.pk2 <9))"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertNull(scan.getFilter()); - assertArrayEquals(scan.getStartRow(), PInteger.INSTANCE.toBytes(2)); - assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(5)); - - //case 7: pk1 or pk2,but pk2 is all range - sql ="select * from "+testTableName+" t where (t.pk1 >=2 and t.pk1<5) or (t.pk2 >=7 or t.pk2 <9)"; - queryPlan= TestUtil.getOptimizeQueryPlan(conn, sql); - - scan = queryPlan.getContext().getScan(); - Expression pk2Expression = new ColumnRef(queryPlan.getTableRef(), queryPlan.getTableRef().getTable().getColumnForColumnName("PK2").getPosition()).newColumnExpression(); - assertTrue(scan.getFilter() instanceof RowKeyComparisonFilter); - assertEquals( - TestUtil.rowKeyFilter( - TestUtil.or( - TestUtil.and( - TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL,pk1Expression, 2), - TestUtil.constantComparison(CompareOperator.LESS,pk1Expression, 5)), - TestUtil.or( - TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL,pk2Expression, 7), - TestUtil.constantComparison(CompareOperator.LESS,pk2Expression, 9)) - ) - ), - scan.getFilter()); - assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); - assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); - - //case 8: pk1 and pk2, but pk1 has a or allRange - sql="select * from "+testTableName+" t where ((t.pk1 >=2 and t.pk1<5) or (t.pk1 >=7 or t.pk1 <9)) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof RowKeyComparisonFilter); - assertEquals( - TestUtil.rowKeyFilter( - TestUtil.or( - TestUtil.and( - TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL,pk2Expression, 4), - TestUtil.constantComparison(CompareOperator.LESS,pk2Expression, 6)), - TestUtil.and( - TestUtil.constantComparison(CompareOperator.GREATER_OR_EQUAL,pk2Expression, 8), - TestUtil.constantComparison(CompareOperator.LESS,pk2Expression, 9)) - ) - ), - scan.getFilter()); - - assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); - assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); - - //case 9: pk1 and pk2, but pk2 has a or allRange - sql="select * from "+testTableName+" t where ((t.pk1 >= 4 and t.pk1 <6) or (t.pk1 >= 8 and t.pk1 <9)) and ((t.pk2 >=2 and t.pk2<5) or (t.pk2 >=7 or t.pk2 <9))"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof SkipScanFilter); - rowKeyRanges = ((SkipScanFilter)(scan.getFilter())).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), false), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), false) - ), - Arrays.asList(KeyRange.EVERYTHING_RANGE)), - rowKeyRanges); - assertArrayEquals(scan.getStartRow(), PInteger.INSTANCE.toBytes(4)); - assertArrayEquals(scan.getStopRow(), PInteger.INSTANCE.toBytes(9)); - - //case 10: only pk2 - sql = "select * from "+testTableName+" t where (pk2 <=7 or pk2>9)"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - pk2Expression = new ColumnRef(queryPlan.getTableRef(), queryPlan.getTableRef().getTable().getColumnForColumnName("PK2").getPosition()).newColumnExpression(); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof RowKeyComparisonFilter); - assertEquals( - TestUtil.rowKeyFilter( - TestUtil.or( - TestUtil.constantComparison(CompareOperator.LESS_OR_EQUAL,pk2Expression, 7), - TestUtil.constantComparison(CompareOperator.GREATER,pk2Expression, 9))), - scan.getFilter()); - assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); - assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); - - //case 11: pk1 and pk2, but pk1 has a or allRange and force skip scan - sql="select /*+ SKIP_SCAN */ * from "+testTableName+" t where ((t.pk1 >=2 and t.pk1<5) or (t.pk1 >=7 or t.pk1 <9)) and ((t.pk2 >= 4 and t.pk2 <6) or (t.pk2 >= 8 and t.pk2 <9))"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof SkipScanFilter); - rowKeyRanges = ((SkipScanFilter)(scan.getFilter())).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList(KeyRange.EVERYTHING_RANGE), - Arrays.asList( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4), true, PInteger.INSTANCE.toBytes(6), false), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(8), true, PInteger.INSTANCE.toBytes(9), false) - ) - ), - rowKeyRanges); - assertArrayEquals(scan.getStartRow(), HConstants.EMPTY_START_ROW); - assertArrayEquals(scan.getStopRow(), HConstants.EMPTY_END_ROW); - } - finally { - if(conn!=null) { - conn.close(); - } - } - } - - @Test - public void testLastPkColumnIsVariableLengthAndDescBug5307() throws Exception { - Connection conn = null; - try { - conn = DriverManager.getConnection(getUrl()); - String sql = "CREATE TABLE t1 (\n" + - "OBJECT_VERSION VARCHAR NOT NULL,\n" + - "LOC VARCHAR,\n" + - "CONSTRAINT PK PRIMARY KEY (OBJECT_VERSION DESC))"; - conn.createStatement().execute(sql); - - byte[] startKey = ByteUtil.concat( - PVarchar.INSTANCE.toBytes("2222", SortOrder.DESC), - QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); - byte[] endKey = ByteUtil.concat( - PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC), - QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); - ByteUtil.nextKey(endKey, endKey.length); - sql = "SELECT /*+ RANGE_SCAN */ OBJ.OBJECT_VERSION, OBJ.LOC from t1 AS OBJ "+ - "where OBJ.OBJECT_VERSION in ('1111','2222')"; - QueryPlan queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - Scan scan = queryPlan.getContext().getScan(); - assertArrayEquals(startKey, scan.getStartRow()); - assertArrayEquals(endKey, scan.getStopRow()); - - sql = "CREATE TABLE t2 (\n" + - "OBJECT_ID VARCHAR NOT NULL,\n" + - "OBJECT_VERSION VARCHAR NOT NULL,\n" + - "LOC VARCHAR,\n" + - "CONSTRAINT PK PRIMARY KEY (OBJECT_ID, OBJECT_VERSION DESC))"; - conn.createStatement().execute(sql); - - startKey = ByteUtil.concat( - PVarchar.INSTANCE.toBytes("obj1", SortOrder.ASC), - QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes("2222", SortOrder.DESC), - QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); - /** - * For following sql, queryPlan would use SkipScan and is regarded as PointLookup, - * so the endKey is computed as {@link SchemaUtil#VAR_BINARY_SCHEMA},see {@link ScanRanges#create}. - */ - endKey = ByteUtil.concat( - PVarchar.INSTANCE.toBytes("obj3", SortOrder.ASC), - QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC), - QueryConstants.DESC_SEPARATOR_BYTE_ARRAY, - QueryConstants.SEPARATOR_BYTE_ARRAY); - - sql = "SELECT OBJ.OBJECT_ID, OBJ.OBJECT_VERSION, OBJ.LOC from t2 AS OBJ "+ - "where (OBJ.OBJECT_ID, OBJ.OBJECT_VERSION) in (('obj1', '2222'),('obj2', '1111'),('obj3', '1111'))"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - FilterList filterList = (FilterList)scan.getFilter(); - assertTrue(filterList.getOperator() == Operator.MUST_PASS_ALL); - assertEquals(filterList.getFilters().size(),2); - assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); - assertTrue(filterList.getFilters().get(1) instanceof RowKeyComparisonFilter); - RowKeyComparisonFilter rowKeyComparisonFilter =(RowKeyComparisonFilter) filterList.getFilters().get(1); - assertEquals(rowKeyComparisonFilter.toString(), - "(OBJECT_ID, OBJECT_VERSION) IN (X'6f626a3100cdcdcdcd',X'6f626a3200cececece',X'6f626a3300cececece')"); - - assertTrue(queryPlan.getContext().getScanRanges().isPointLookup()); - assertArrayEquals(startKey, scan.getStartRow()); - assertArrayEquals(endKey, scan.getStopRow()); + } + } + } + + private void createBaseTable(String baseTable) throws SQLException { + + try (Connection globalConnection = DriverManager.getConnection(getUrl())) { + try (Statement cstmt = globalConnection.createStatement()) { + String CO_BASE_TBL_TEMPLATE = + "CREATE TABLE IF NOT EXISTS %s(OID CHAR(15) NOT NULL,KP CHAR(3) NOT NULL,ROW_ID VARCHAR, COL1 VARCHAR,COL2 VARCHAR,COL3 VARCHAR,CREATED_DATE DATE,CREATED_BY CHAR(15),LAST_UPDATE DATE,LAST_UPDATE_BY CHAR(15),SYSTEM_MODSTAMP DATE CONSTRAINT pk PRIMARY KEY (OID,KP)) MULTI_TENANT=true,COLUMN_ENCODED_BYTES=0"; + cstmt.execute(String.format(CO_BASE_TBL_TEMPLATE, baseTable)); + } + } + return; + } + + private void createTenantView(int tenant, String baseTable, String tenantView, String partition, + PDataType pkType1, SortOrder pk1Order, PDataType pkType2, SortOrder pk2Order, PDataType pkType3, + SortOrder pk3Order) throws SQLException { + + String pkType1Str = getType(pkType1); + String pkType2Str = getType(pkType2); + String pkType3Str = getType(pkType3); + createBaseTable(baseTable); + + String tenantConnectionUrl = + String.format("%s;%s=%s%06d", getUrl(), TENANT_ID_ATTRIB, TENANT_PREFIX, tenant); + try (Connection tenantConnection = DriverManager.getConnection(tenantConnectionUrl)) { + try (Statement cstmt = tenantConnection.createStatement()) { + String TENANT_VIEW_TEMPLATE = + "CREATE VIEW IF NOT EXISTS %s(ID1 %s not null,ID2 %s not null,ID3 %s not null,COL4 VARCHAR,COL5 VARCHAR,COL6 VARCHAR CONSTRAINT pk PRIMARY KEY (ID1 %s, ID2 %s, ID3 %s)) " + + "AS SELECT * FROM %s WHERE KP = '%s'"; + cstmt.execute(String.format(TENANT_VIEW_TEMPLATE, tenantView, pkType1Str, pkType2Str, + pkType3Str, pk1Order.name(), pk2Order.name(), pk3Order.name(), baseTable, partition)); + } + } + return; + } + + private int setBindVariables(PhoenixPreparedStatement stmt, int startBindIndex, int numBinds, + PDataType[] testPKTypes) throws SQLException { + + Random rnd = new Random(); + int lastBindCol = 0; + int numCols = testPKTypes.length; + for (int i = 0; i < numBinds; i++) { + for (int b = 0; b < testPKTypes.length; b++) { + int colIndex = startBindIndex + i * numCols + b + 1; + switch (testPKTypes[b].getSqlType()) { + case Types.VARCHAR: { + // pkTypeStr = "VARCHAR(25)"; + stmt.setString(colIndex, RandomStringUtils.randomAlphanumeric(25)); + break; + } + case Types.CHAR: { + // pkTypeStr = "CHAR(15)"; + stmt.setString(colIndex, RandomStringUtils.randomAlphanumeric(15)); + break; + } + case Types.DECIMAL: + // pkTypeStr = "DECIMAL(8,2)"; + stmt.setDouble(colIndex, rnd.nextDouble()); + break; + case Types.INTEGER: + // pkTypeStr = "INTEGER"; + stmt.setInt(colIndex, rnd.nextInt(50000)); + break; + case Types.BIGINT: + // pkTypeStr = "BIGINT"; + stmt.setLong(colIndex, System.currentTimeMillis() + rnd.nextInt(50000)); + break; + case Types.DATE: + // pkTypeStr = "DATE"; + stmt.setDate(colIndex, new Date(System.currentTimeMillis() + rnd.nextInt(50000))); + break; + case Types.TIMESTAMP: + // pkTypeStr = "TIMESTAMP"; + stmt.setTimestamp(colIndex, + new Timestamp(System.currentTimeMillis() + rnd.nextInt(50000))); + break; + default: + // pkTypeStr = "VARCHAR(25)"; + stmt.setString(colIndex, RandomStringUtils.randomAlphanumeric(25)); } - finally { - if(conn != null) { - conn.close(); - } - } - } - - @Test - public void testRVCClipBug5753() throws Exception { - String tableName = generateUniqueName(); - try (Connection conn = DriverManager.getConnection(getUrl())) { - conn.setAutoCommit(true); - Statement stmt = conn.createStatement(); - - String sql = "CREATE TABLE "+tableName+" (" + - " pk1 INTEGER NOT NULL , " + - " pk2 INTEGER NOT NULL, " + - " pk3 INTEGER NOT NULL, " + - " pk4 INTEGER NOT NULL, " + - " pk5 INTEGER NOT NULL, " + - " pk6 INTEGER NOT NULL, " + - " pk7 INTEGER NOT NULL, " + - " pk8 INTEGER NOT NULL, " + - " v INTEGER, CONSTRAINT PK PRIMARY KEY(pk1,pk2,pk3 desc,pk4,pk5,pk6 desc,pk7,pk8))";; - - stmt.execute(sql); - - List> rowKeyRanges = null; - RowKeyComparisonFilter rowKeyComparisonFilter = null; - QueryPlan queryPlan = null; - Scan scan = null; - - sql = "SELECT /*+ RANGE_SCAN */ * FROM "+ tableName + - " WHERE (pk1, pk2) IN ((2, 3), (2, 4)) AND pk3 = 5"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof RowKeyComparisonFilter); - rowKeyComparisonFilter = (RowKeyComparisonFilter)scan.getFilter(); - assertEquals(rowKeyComparisonFilter.toString(), - "((PK1, PK2) IN (X'8000000280000003',X'8000000280000004') AND PK3 = 5)"); - assertArrayEquals( - scan.getStartRow(), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(5, SortOrder.DESC))); - assertArrayEquals( - scan.getStopRow(), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(4), - ByteUtil.nextKey(PInteger.INSTANCE.toBytes(5, SortOrder.DESC)))); - - sql = "select * from " + tableName + - " where (pk1 >=1 and pk1<=2) and (pk2>=2 and pk2<=3) and (pk3,pk4) < (3,5)"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof FilterList); - FilterList filterList = (FilterList)scan.getFilter(); - - assertTrue(filterList.getOperator() == Operator.MUST_PASS_ALL); - assertEquals(filterList.getFilters().size(),2); - assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); - rowKeyRanges = ((SkipScanFilter)(filterList.getFilters().get(0))).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(1), - true, - PInteger.INSTANCE.toBytes(2), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(2), - true, - PInteger.INSTANCE.toBytes(3), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(3, SortOrder.DESC), - true, - KeyRange.UNBOUND, - false)) - ), - rowKeyRanges); - assertArrayEquals( - scan.getStartRow(), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(1), - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(3, SortOrder.DESC))); - assertArrayEquals( - scan.getStopRow(), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(4))); - - assertTrue(filterList.getFilters().get(1) instanceof RowKeyComparisonFilter); - rowKeyComparisonFilter =(RowKeyComparisonFilter) filterList.getFilters().get(1); - assertTrue(rowKeyComparisonFilter.toString().equals( - "(TO_INTEGER(PK3), PK4) < (TO_INTEGER(TO_INTEGER(3)), 5)")); - - /** - * RVC is singleKey - */ - sql = "select * from " + tableName + - " where (pk1 >=1 and pk1<=2) and (pk2>=2 and pk2<=3) and (pk3,pk4) in ((3,4),(4,5)) and "+ - " (pk5,pk6,pk7) in ((5,6,7),(6,7,8)) and pk8 > 8"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof FilterList); - filterList = (FilterList)scan.getFilter(); - - assertTrue(filterList.getOperator() == Operator.MUST_PASS_ALL); - assertEquals(filterList.getFilters().size(),2); - assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); - rowKeyRanges = ((SkipScanFilter)(filterList.getFilters().get(0))).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(1), - true, - PInteger.INSTANCE.toBytes(2), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(2), - true, - PInteger.INSTANCE.toBytes(3), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(4, SortOrder.DESC), - true, - PInteger.INSTANCE.toBytes(4, SortOrder.DESC), - true), - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(3, SortOrder.DESC), - true, - PInteger.INSTANCE.toBytes(3, SortOrder.DESC), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(4), - true, - PInteger.INSTANCE.toBytes(4), - true), - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(5), - true, - PInteger.INSTANCE.toBytes(5), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(5), - true, - PInteger.INSTANCE.toBytes(5), - true), - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(6), - true, - PInteger.INSTANCE.toBytes(6), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(7, SortOrder.DESC), - true, - PInteger.INSTANCE.toBytes(7, SortOrder.DESC), - true), - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(6, SortOrder.DESC), - true, - PInteger.INSTANCE.toBytes(6, SortOrder.DESC), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(7), - true, - PInteger.INSTANCE.toBytes(7), - true), - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(8), - true, - PInteger.INSTANCE.toBytes(8), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(9), - true, - KeyRange.UNBOUND, - false)) - ), - rowKeyRanges); - assertArrayEquals( - scan.getStartRow(), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(1), - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(4, SortOrder.DESC), - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(5), - PInteger.INSTANCE.toBytes(7, SortOrder.DESC), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(9))); - assertArrayEquals( - scan.getStopRow(), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(3, SortOrder.DESC), - PInteger.INSTANCE.toBytes(5), - PInteger.INSTANCE.toBytes(6), - PInteger.INSTANCE.toBytes(6, SortOrder.DESC), - PInteger.INSTANCE.toBytes(9))); - - assertTrue(filterList.getFilters().get(1) instanceof RowKeyComparisonFilter); - rowKeyComparisonFilter =(RowKeyComparisonFilter) filterList.getFilters().get(1); - assertEquals(rowKeyComparisonFilter.toString(), - "((PK3, PK4) IN (X'7ffffffb80000005',X'7ffffffc80000004') AND (PK5, PK6, PK7) IN (X'800000057ffffff980000007',X'800000067ffffff880000008'))"); - /** - * RVC is not singleKey - */ - sql = "select * from " + tableName + - " where (pk1 >=1 and pk1<=2) and (pk2>=2 and pk2<=3) and (pk3,pk4) < (3,4) and "+ - " (pk5,pk6,pk7) < (5,6,7) and pk8 > 8"; - queryPlan = TestUtil.getOptimizeQueryPlan(conn, sql); - scan = queryPlan.getContext().getScan(); - assertTrue(scan.getFilter() instanceof FilterList); - filterList = (FilterList)scan.getFilter(); - - assertTrue(filterList.getOperator() == Operator.MUST_PASS_ALL); - assertEquals(filterList.getFilters().size(),2); - assertTrue(filterList.getFilters().get(0) instanceof SkipScanFilter); - rowKeyRanges = ((SkipScanFilter)(filterList.getFilters().get(0))).getSlots(); - assertEquals( - Arrays.asList( - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(1), - true, - PInteger.INSTANCE.toBytes(2), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(2), - true, - PInteger.INSTANCE.toBytes(3), - true)), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(3, SortOrder.DESC), - true, - KeyRange.UNBOUND, - false)), - Arrays.asList(KeyRange.EVERYTHING_RANGE), - Arrays.asList( - KeyRange.getKeyRange( - KeyRange.UNBOUND, - false, - PInteger.INSTANCE.toBytes(5), - true)), - Arrays.asList(KeyRange.EVERYTHING_RANGE), - Arrays.asList(KeyRange.EVERYTHING_RANGE), - Arrays.asList( - KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(9), - true, - KeyRange.UNBOUND, - false)) - ), - rowKeyRanges); - assertArrayEquals( - scan.getStartRow(), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(1), - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(3, SortOrder.DESC))); - assertArrayEquals( - scan.getStopRow(), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(4))); - - assertTrue(filterList.getFilters().get(1) instanceof RowKeyComparisonFilter); - rowKeyComparisonFilter =(RowKeyComparisonFilter) filterList.getFilters().get(1); - assertTrue(rowKeyComparisonFilter.toString().equals( - "((PK5, TO_INTEGER(PK6), PK7) < (5, TO_INTEGER(TO_INTEGER(6)), 7) AND " + - "(TO_INTEGER(PK3), PK4) < (TO_INTEGER(TO_INTEGER(3)), 4))")); + lastBindCol = colIndex; + } + } + return lastBindCol; + } + + private String getType(PDataType pkType) { + String pkTypeStr = "VARCHAR(25)"; + switch (pkType.getSqlType()) { + case Types.VARCHAR: + pkTypeStr = "VARCHAR(25)"; + break; + case Types.CHAR: + pkTypeStr = "CHAR(15)"; + break; + case Types.DECIMAL: + pkTypeStr = "DECIMAL(8,2)"; + break; + case Types.INTEGER: + pkTypeStr = "INTEGER"; + break; + case Types.BIGINT: + pkTypeStr = "BIGINT"; + break; + case Types.DATE: + pkTypeStr = "DATE"; + break; + case Types.TIMESTAMP: + pkTypeStr = "TIMESTAMP"; + break; + default: + pkTypeStr = "VARCHAR(25)"; + } + return pkTypeStr; + } + + // Test Case 1: PK1 = Timestamp, PK2 = Varchar, PK3 = Integer + private void testTSVarIntAndLargeORs(int tenantId, String viewName, SortOrder[] sortOrder) + throws SQLException { + String testName = "testLargeORs"; + String testLargeORs = String.format("SELECT ROW_ID FROM %s ", viewName); + PDataType[] testPKTypes = + new PDataType[] { PTimestamp.INSTANCE, PVarchar.INSTANCE, PInteger.INSTANCE }; + assertExpectedWithMaxInListAndLargeORs(tenantId, testName, testPKTypes, testLargeORs, + sortOrder); + + } + + public void assertExpectedWithMaxInListAndLargeORs(int tenantId, String testType, + PDataType[] testPKTypes, String testSQL, SortOrder[] sortOrder) throws SQLException { + + Properties tenantProps = PropertiesUtil.deepCopy(TEST_PROPERTIES); + int numINs = 25; + int expectedExtractedNodes = Arrays.asList(new SortOrder[] { sortOrder[0], sortOrder[1] }) + .stream().allMatch(Predicate.isEqual(SortOrder.ASC)) ? 3 : 2; + + // Test for increasing orders of ORs (5,50,500,5000) + for (int o = 0; o < 4; o++) { + int numORs = (int) (5.0 * Math.pow(10.0, (double) o)); + String context = + "ORs:" + numORs + ", sql: " + testSQL + ", type: " + testType + ", sort-order: " + + Arrays.stream(sortOrder).map(s -> s.name()).collect(Collectors.joining(",")); + String tenantConnectionUrl = + String.format("%s;%s=%s%06d", getUrl(), TENANT_ID_ATTRIB, TENANT_PREFIX, tenantId); + try (Connection tenantConnection = + DriverManager.getConnection(tenantConnectionUrl, tenantProps)) { + // Generate the where clause + StringBuilder whereClause = new StringBuilder("(ID1,ID2) IN ((?,?)"); + for (int i = 0; i < numINs; i++) { + whereClause.append(",(?,?)"); } - } - - - @Test - public void testWithLargeORs() throws Exception { - - SortOrder[][] sortOrders = new SortOrder[][] { - {SortOrder.ASC, SortOrder.ASC, SortOrder.ASC}, - {SortOrder.ASC, SortOrder.ASC, SortOrder.DESC}, - {SortOrder.ASC, SortOrder.DESC, SortOrder.ASC}, - {SortOrder.ASC, SortOrder.DESC, SortOrder.DESC}, - {SortOrder.DESC, SortOrder.ASC, SortOrder.ASC}, - {SortOrder.DESC, SortOrder.ASC, SortOrder.DESC}, - {SortOrder.DESC, SortOrder.DESC, SortOrder.ASC}, - {SortOrder.DESC, SortOrder.DESC, SortOrder.DESC} - }; - - String tableName = generateUniqueName(); - String viewName = String.format("Z_%s", tableName); - PDataType[] testTSVarVarPKTypes = new PDataType[] { PTimestamp.INSTANCE, PVarchar.INSTANCE, PInteger.INSTANCE}; - String baseTableName = String.format("TEST_ENTITY.%s", tableName); - int tenantId = 1; - int numTestCases = 1; - for (int index=0;index s.name()).collect(Collectors.joining(",")); - String tenantConnectionUrl = String.format("%s;%s=%s%06d", getUrl(), TENANT_ID_ATTRIB, TENANT_PREFIX, tenantId); - try (Connection tenantConnection = DriverManager.getConnection(tenantConnectionUrl, tenantProps)) { - // Generate the where clause - StringBuilder whereClause = new StringBuilder("(ID1,ID2) IN ((?,?)"); - for (int i = 0; i < numINs; i++) { - whereClause.append(",(?,?)"); - } - whereClause.append(") AND (ID3 = ? "); - for (int i = 0; i < numORs; i++) { - whereClause.append(" OR ID3 = ?"); - } - whereClause.append(") LIMIT 200"); - // Full SQL - String query = testSQL + " WHERE " + whereClause; - - PhoenixPreparedStatement stmtForExtractNodesCheck = - tenantConnection.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); - int lastBoundCol = 0; - lastBoundCol = setBindVariables(stmtForExtractNodesCheck, lastBoundCol, - numINs + 1, - new PDataType[] {testPKTypes[0], testPKTypes[1]}); - lastBoundCol = setBindVariables(stmtForExtractNodesCheck, lastBoundCol, - numORs + 1, new PDataType[] {testPKTypes[2]}); - - // Get the column resolver - SelectStatement selectStatement = new SQLParser(query).parseQuery(); - ColumnResolver resolver = FromCompiler.getResolverForQuery(selectStatement, - tenantConnection.unwrap(PhoenixConnection.class)); - - // Where clause with INs and ORs - ParseNode whereNode = selectStatement.getWhere(); - Expression whereExpression = whereNode.accept(new TestWhereExpressionCompiler( - new StatementContext(stmtForExtractNodesCheck, resolver))); - - // Tenant view where clause - ParseNode viewWhere = SQLParser.parseCondition("KP = 'ECZ'"); - Expression viewWhereExpression = viewWhere.accept(new TestWhereExpressionCompiler( - new StatementContext(stmtForExtractNodesCheck, resolver))); - - // Build the test expression - Expression testExpression = AndExpression.create( - Lists.newArrayList(whereExpression, viewWhereExpression)); - - // Test - Set extractedNodes = Sets.newHashSet(); - WhereOptimizer.pushKeyExpressionsToScan(new StatementContext(stmtForExtractNodesCheck, resolver), - Collections.emptySet(), testExpression, extractedNodes, Optional.absent()); - assertEquals(String.format("Unexpected results expected = %d, actual = %d extracted nodes", - expectedExtractedNodes, extractedNodes.size()), - expectedExtractedNodes, extractedNodes.size()); - } + whereClause.append(") LIMIT 200"); + // Full SQL + String query = testSQL + " WHERE " + whereClause; + + PhoenixPreparedStatement stmtForExtractNodesCheck = + tenantConnection.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); + int lastBoundCol = 0; + lastBoundCol = setBindVariables(stmtForExtractNodesCheck, lastBoundCol, numINs + 1, + new PDataType[] { testPKTypes[0], testPKTypes[1] }); + lastBoundCol = setBindVariables(stmtForExtractNodesCheck, lastBoundCol, numORs + 1, + new PDataType[] { testPKTypes[2] }); + + // Get the column resolver + SelectStatement selectStatement = new SQLParser(query).parseQuery(); + ColumnResolver resolver = FromCompiler.getResolverForQuery(selectStatement, + tenantConnection.unwrap(PhoenixConnection.class)); + + // Where clause with INs and ORs + ParseNode whereNode = selectStatement.getWhere(); + Expression whereExpression = whereNode.accept(new TestWhereExpressionCompiler( + new StatementContext(stmtForExtractNodesCheck, resolver))); + + // Tenant view where clause + ParseNode viewWhere = SQLParser.parseCondition("KP = 'ECZ'"); + Expression viewWhereExpression = viewWhere.accept(new TestWhereExpressionCompiler( + new StatementContext(stmtForExtractNodesCheck, resolver))); + + // Build the test expression + Expression testExpression = + AndExpression.create(Lists.newArrayList(whereExpression, viewWhereExpression)); + + // Test + Set extractedNodes = Sets. newHashSet(); + WhereOptimizer.pushKeyExpressionsToScan( + new StatementContext(stmtForExtractNodesCheck, resolver), Collections.emptySet(), + testExpression, extractedNodes, Optional. absent()); + assertEquals( + String.format("Unexpected results expected = %d, actual = %d extracted nodes", + expectedExtractedNodes, extractedNodes.size()), + expectedExtractedNodes, extractedNodes.size()); + } - } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java index b3df1f8846a..65cfb26fe2a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/TaskMetaDataEndpointTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,11 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.coprocessor; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.concurrent.ConcurrentMap; -import com.google.protobuf.RpcController; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.RawCellBuilder; import org.apache.hadoop.hbase.ServerName; @@ -43,161 +46,152 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import java.io.IOException; -import java.util.concurrent.ConcurrentMap; - -import static org.junit.Assert.assertEquals; +import com.google.protobuf.RpcController; /** * Unit tests for TaskMetaDataEndpoint */ public class TaskMetaDataEndpointTest { - private TaskMetaDataEndpoint taskMetaDataEndpoint; - private Configuration configuration; - - @Mock - private Region region; - - @Mock - private RegionInfo regionInfo; - - @Mock - private Connection connection; - - @Mock - private RpcController controller; - - @Before - public void setUp() throws Exception { - MockitoAnnotations.initMocks(this); - configuration = new Configuration(); - RegionCoprocessorEnvironment environment = - new RegionCoprocessorEnvironment() { - - @Override - public Region getRegion() { - return region; - } - - @Override - public RegionInfo getRegionInfo() { - return regionInfo; - } - - @Override - public OnlineRegions getOnlineRegions() { - return null; - } - - @Override - public ConcurrentMap getSharedData() { - return null; - } - - @Override - public ServerName getServerName() { - return null; - } - - @Override - public Connection getConnection() { - return connection; - } - - @Override - public Connection createConnection( - Configuration conf) { - return null; - } - - @Override - public MetricRegistry getMetricRegistryForRegionServer() { - return null; - } - - @Override - public RawCellBuilder getCellBuilder() { - return null; - } - - @Override - public int getVersion() { - return 0; - } - - @Override - public String getHBaseVersion() { - return null; - } - - @Override - public RegionCoprocessor getInstance() { - return null; - } - - @Override - public int getPriority() { - return 0; - } - - @Override - public int getLoadSequence() { - return 0; - } - - @Override - public Configuration getConfiguration() { - return configuration; - } - - @Override - public ClassLoader getClassLoader() { - return null; - } - }; - taskMetaDataEndpoint = new TaskMetaDataEndpoint(); - taskMetaDataEndpoint.start(environment); - } - - @Test - public void testUpsertTaskDetails() throws Exception { - Mutation mutation = new Put(Bytes.toBytes("row1")); - TaskMetaDataProtos.TaskMutateRequest.Builder builder = - TaskMetaDataProtos.TaskMutateRequest.newBuilder(); - ClientProtos.MutationProto mp = ProtobufUtil.toProto(mutation); - builder.addTableMetadataMutations(mp.toByteString()); - TaskMetaDataProtos.TaskMutateRequest request = builder.build(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - Mockito.doNothing().when(region).mutateRowsWithLocks( - Mockito.anyCollection(), Mockito.any(), Mockito.anyLong(), - Mockito.anyLong()); - taskMetaDataEndpoint.upsertTaskDetails(controller, request, rpcCallback); - Mockito.verify(region, Mockito.times(1)).mutateRowsWithLocks( - Mockito.anyCollection(), Mockito.any(), Mockito.anyLong(), - Mockito.anyLong()); - } - - @Test - public void testUpsertTaskDetailsFailure() throws Exception { - Mutation mutation = new Put(Bytes.toBytes("row2")); - TaskMetaDataProtos.TaskMutateRequest.Builder builder = - TaskMetaDataProtos.TaskMutateRequest.newBuilder(); - ClientProtos.MutationProto mp = ProtobufUtil.toProto(mutation); - builder.addTableMetadataMutations(mp.toByteString()); - TaskMetaDataProtos.TaskMutateRequest request = builder.build(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - Mockito.doThrow(IOException.class).when(region).mutateRowsWithLocks( - Mockito.anyCollection(), Mockito.any(), Mockito.anyLong(), - Mockito.anyLong()); - taskMetaDataEndpoint.upsertTaskDetails(controller, request, rpcCallback); - Mockito.verify(region, Mockito.times(1)).mutateRowsWithLocks( - Mockito.anyCollection(), Mockito.any(), Mockito.anyLong(), - Mockito.anyLong()); - assertEquals(MetaDataProtos.MutationCode.UNABLE_TO_UPSERT_TASK, - rpcCallback.get().getReturnCode()); - } - -} \ No newline at end of file + private TaskMetaDataEndpoint taskMetaDataEndpoint; + private Configuration configuration; + + @Mock + private Region region; + + @Mock + private RegionInfo regionInfo; + + @Mock + private Connection connection; + + @Mock + private RpcController controller; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + configuration = new Configuration(); + RegionCoprocessorEnvironment environment = new RegionCoprocessorEnvironment() { + + @Override + public Region getRegion() { + return region; + } + + @Override + public RegionInfo getRegionInfo() { + return regionInfo; + } + + @Override + public OnlineRegions getOnlineRegions() { + return null; + } + + @Override + public ConcurrentMap getSharedData() { + return null; + } + + @Override + public ServerName getServerName() { + return null; + } + + @Override + public Connection getConnection() { + return connection; + } + + @Override + public Connection createConnection(Configuration conf) { + return null; + } + + @Override + public MetricRegistry getMetricRegistryForRegionServer() { + return null; + } + + @Override + public RawCellBuilder getCellBuilder() { + return null; + } + + @Override + public int getVersion() { + return 0; + } + + @Override + public String getHBaseVersion() { + return null; + } + + @Override + public RegionCoprocessor getInstance() { + return null; + } + + @Override + public int getPriority() { + return 0; + } + + @Override + public int getLoadSequence() { + return 0; + } + + @Override + public Configuration getConfiguration() { + return configuration; + } + + @Override + public ClassLoader getClassLoader() { + return null; + } + }; + taskMetaDataEndpoint = new TaskMetaDataEndpoint(); + taskMetaDataEndpoint.start(environment); + } + + @Test + public void testUpsertTaskDetails() throws Exception { + Mutation mutation = new Put(Bytes.toBytes("row1")); + TaskMetaDataProtos.TaskMutateRequest.Builder builder = + TaskMetaDataProtos.TaskMutateRequest.newBuilder(); + ClientProtos.MutationProto mp = ProtobufUtil.toProto(mutation); + builder.addTableMetadataMutations(mp.toByteString()); + TaskMetaDataProtos.TaskMutateRequest request = builder.build(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + Mockito.doNothing().when(region).mutateRowsWithLocks(Mockito.anyCollection(), Mockito.any(), + Mockito.anyLong(), Mockito.anyLong()); + taskMetaDataEndpoint.upsertTaskDetails(controller, request, rpcCallback); + Mockito.verify(region, Mockito.times(1)).mutateRowsWithLocks(Mockito.anyCollection(), + Mockito.any(), Mockito.anyLong(), Mockito.anyLong()); + } + + @Test + public void testUpsertTaskDetailsFailure() throws Exception { + Mutation mutation = new Put(Bytes.toBytes("row2")); + TaskMetaDataProtos.TaskMutateRequest.Builder builder = + TaskMetaDataProtos.TaskMutateRequest.newBuilder(); + ClientProtos.MutationProto mp = ProtobufUtil.toProto(mutation); + builder.addTableMetadataMutations(mp.toByteString()); + TaskMetaDataProtos.TaskMutateRequest request = builder.build(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + Mockito.doThrow(IOException.class).when(region).mutateRowsWithLocks(Mockito.anyCollection(), + Mockito.any(), Mockito.anyLong(), Mockito.anyLong()); + taskMetaDataEndpoint.upsertTaskDetails(controller, request, rpcCallback); + Mockito.verify(region, Mockito.times(1)).mutateRowsWithLocks(Mockito.anyCollection(), + Mockito.any(), Mockito.anyLong(), Mockito.anyLong()); + assertEquals(MetaDataProtos.MutationCode.UNABLE_TO_UPSERT_TASK, + rpcCallback.get().getReturnCode()); + } + +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/DescVarLengthFastByteComparisonsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/DescVarLengthFastByteComparisonsTest.java index 106471b537d..0fa9890a4e1 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/execute/DescVarLengthFastByteComparisonsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/DescVarLengthFastByteComparisonsTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,22 +24,22 @@ import org.junit.Test; public class DescVarLengthFastByteComparisonsTest { - - @Test - public void testNullIsSmallest() { - byte[] b1 = ByteUtil.EMPTY_BYTE_ARRAY; - byte[] b2 = Bytes.toBytes("a"); - int cmp = DescVarLengthFastByteComparisons.compareTo(b1, 0, b1.length, b2, 0, b2.length); - assertTrue(cmp < 0); - cmp = DescVarLengthFastByteComparisons.compareTo(b2, 0, b2.length, b1, 0, b1.length); - assertTrue(cmp > 0); - } - - @Test - public void testShorterSubstringIsBigger() { - byte[] b1 = Bytes.toBytes("ab"); - byte[] b2 = Bytes.toBytes("a"); - int cmp = DescVarLengthFastByteComparisons.compareTo(b1, 0, b1.length, b2, 0, b2.length); - assertTrue(cmp < 0); - } + + @Test + public void testNullIsSmallest() { + byte[] b1 = ByteUtil.EMPTY_BYTE_ARRAY; + byte[] b2 = Bytes.toBytes("a"); + int cmp = DescVarLengthFastByteComparisons.compareTo(b1, 0, b1.length, b2, 0, b2.length); + assertTrue(cmp < 0); + cmp = DescVarLengthFastByteComparisons.compareTo(b2, 0, b2.length, b1, 0, b1.length); + assertTrue(cmp > 0); + } + + @Test + public void testShorterSubstringIsBigger() { + byte[] b1 = Bytes.toBytes("ab"); + byte[] b2 = Bytes.toBytes("a"); + int cmp = DescVarLengthFastByteComparisons.compareTo(b1, 0, b1.length, b2, 0, b2.length); + assertTrue(cmp < 0); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java index 904f69ed580..18f9b6996c0 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,7 +31,6 @@ import java.util.Collections; import java.util.List; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; @@ -67,156 +66,125 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.tuple.SingleKeyValueTuple; import org.apache.phoenix.schema.tuple.Tuple; -import org.junit.Test; - +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class LiteralResultIteratorPlanTest { - private static final StatementContext CONTEXT; - - static { - try { - PhoenixConnection connection = DriverManager - .getConnection(JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS) - .unwrap(PhoenixConnection.class); - PhoenixStatement stmt = new PhoenixStatement(connection); - ColumnResolver resolver = FromCompiler.getResolverForQuery(SelectStatement.SELECT_ONE, connection); - CONTEXT = new StatementContext(stmt, resolver, new Scan(), new SequenceManager(stmt)); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - - private static final Object[][] RELATION = new Object[][] { - {"2", 20}, - {"2", 40}, - {"5", 50}, - {"6", 60}, - {"5", 100}, - {"1", 10}, - {"3", 30}, - }; - PTable table = createProjectedTableFromLiterals(RELATION[0]).getTable(); - - @Test - public void testLiteralResultIteratorPlanWithOffset() throws SQLException { - Object[][] expected = new Object[][] { - {"2", 40}, - {"5", 50}, - {"6", 60}, - {"5", 100}, - {"1", 10}, - {"3", 30}, - }; - testLiteralResultIteratorPlan(expected, 1, null); - } - - @Test - public void testLiteralResultIteratorPlanWithLimit() throws SQLException { - Object[][] expected = new Object[][] { - {"2", 20}, - {"2", 40}, - {"5", 50}, - {"6", 60}, - }; - testLiteralResultIteratorPlan(expected, null, 4); + private static final StatementContext CONTEXT; + + static { + try { + PhoenixConnection connection = + DriverManager.getConnection(JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS) + .unwrap(PhoenixConnection.class); + PhoenixStatement stmt = new PhoenixStatement(connection); + ColumnResolver resolver = + FromCompiler.getResolverForQuery(SelectStatement.SELECT_ONE, connection); + CONTEXT = new StatementContext(stmt, resolver, new Scan(), new SequenceManager(stmt)); + } catch (SQLException e) { + throw new RuntimeException(e); } - - @Test - public void testLiteralResultIteratorPlanWithLimitAndOffset() throws SQLException { - Object[][] expected = new Object[][] { - {"5", 50}, - {"6", 60}, - {"5", 100}, - {"1", 10}, - }; - testLiteralResultIteratorPlan(expected, 2, 4); + } + + private static final Object[][] RELATION = new Object[][] { { "2", 20 }, { "2", 40 }, { "5", 50 }, + { "6", 60 }, { "5", 100 }, { "1", 10 }, { "3", 30 }, }; + PTable table = createProjectedTableFromLiterals(RELATION[0]).getTable(); + + @Test + public void testLiteralResultIteratorPlanWithOffset() throws SQLException { + Object[][] expected = new Object[][] { { "2", 40 }, { "5", 50 }, { "6", 60 }, { "5", 100 }, + { "1", 10 }, { "3", 30 }, }; + testLiteralResultIteratorPlan(expected, 1, null); + } + + @Test + public void testLiteralResultIteratorPlanWithLimit() throws SQLException { + Object[][] expected = new Object[][] { { "2", 20 }, { "2", 40 }, { "5", 50 }, { "6", 60 }, }; + testLiteralResultIteratorPlan(expected, null, 4); + } + + @Test + public void testLiteralResultIteratorPlanWithLimitAndOffset() throws SQLException { + Object[][] expected = new Object[][] { { "5", 50 }, { "6", 60 }, { "5", 100 }, { "1", 10 }, }; + testLiteralResultIteratorPlan(expected, 2, 4); + } + + private void testLiteralResultIteratorPlan(Object[][] expectedResult, Integer offset, + Integer limit) throws SQLException { + + QueryPlan plan = newLiteralResultIterationPlan(offset, limit); + ResultIterator iter = plan.iterator(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + for (Object[] row : expectedResult) { + Tuple next = iter.next(); + assertNotNull(next); + for (int i = 0; i < row.length; i++) { + PColumn column = table.getColumns().get(i); + boolean eval = new ProjectedColumnExpression(column, table, column.getName().getString()) + .evaluate(next, ptr); + Object o = eval ? column.getDataType().toObject(ptr) : null; + assertEquals(row[i], o); + } } - - private void testLiteralResultIteratorPlan(Object[][] expectedResult, Integer offset, Integer limit) - throws SQLException { - - QueryPlan plan = newLiteralResultIterationPlan(offset, limit); - ResultIterator iter = plan.iterator(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - for (Object[] row : expectedResult) { - Tuple next = iter.next(); - assertNotNull(next); - for (int i = 0; i < row.length; i++) { - PColumn column = table.getColumns().get(i); - boolean eval = new ProjectedColumnExpression(column, table, column.getName().getString()).evaluate(next, - ptr); - Object o = eval ? column.getDataType().toObject(ptr) : null; - assertEquals(row[i], o); - } - } - assertNull(iter.next()); + assertNull(iter.next()); + } + + private QueryPlan newLiteralResultIterationPlan(Integer offset, Integer limit) + throws SQLException { + List tuples = Lists.newArrayList(); + + Tuple baseTuple = new SingleKeyValueTuple(KeyValue.LOWESTKEY); + for (Object[] row : RELATION) { + Expression[] exprs = new Expression[row.length]; + for (int i = 0; i < row.length; i++) { + exprs[i] = LiteralExpression.newConstant(row[i]); + } + TupleProjector projector = new TupleProjector(exprs); + tuples.add(projector.projectResults(baseTuple)); } - private QueryPlan newLiteralResultIterationPlan(Integer offset, Integer limit) throws SQLException { - List tuples = Lists.newArrayList(); - - Tuple baseTuple = new SingleKeyValueTuple(KeyValue.LOWESTKEY); - for (Object[] row : RELATION) { - Expression[] exprs = new Expression[row.length]; - for (int i = 0; i < row.length; i++) { - exprs[i] = LiteralExpression.newConstant(row[i]); - } - TupleProjector projector = new TupleProjector(exprs); - tuples.add(projector.projectResults(baseTuple)); - } - - return new LiteralResultIterationPlan(tuples, CONTEXT, SelectStatement.SELECT_ONE, TableRef.EMPTY_TABLE_REF, - RowProjector.EMPTY_PROJECTOR, limit, offset, OrderBy.EMPTY_ORDER_BY, null); + return new LiteralResultIterationPlan(tuples, CONTEXT, SelectStatement.SELECT_ONE, + TableRef.EMPTY_TABLE_REF, RowProjector.EMPTY_PROJECTOR, limit, offset, OrderBy.EMPTY_ORDER_BY, + null); + } + + private TableRef createProjectedTableFromLiterals(Object[] row) { + List columns = Lists. newArrayList(); + for (int i = 0; i < row.length; i++) { + String name = ParseNodeFactory.createTempAlias(); + Expression expr = LiteralExpression.newConstant(row[i]); + PName colName = PNameFactory.newName(name); + columns.add(new PColumnImpl(PNameFactory.newName(name), + PNameFactory.newName(VALUE_COLUMN_FAMILY), expr.getDataType(), expr.getMaxLength(), + expr.getScale(), expr.isNullable(), i, expr.getSortOrder(), null, null, false, name, false, + false, colName.getBytes(), HConstants.LATEST_TIMESTAMP)); } - - private TableRef createProjectedTableFromLiterals(Object[] row) { - List columns = Lists. newArrayList(); - for (int i = 0; i < row.length; i++) { - String name = ParseNodeFactory.createTempAlias(); - Expression expr = LiteralExpression.newConstant(row[i]); - PName colName = PNameFactory.newName(name); - columns.add(new PColumnImpl(PNameFactory.newName(name), - PNameFactory.newName(VALUE_COLUMN_FAMILY), expr.getDataType(), expr.getMaxLength(), - expr.getScale(), expr.isNullable(), i, expr.getSortOrder(), null, null, false, name, false, false, colName.getBytes(), - HConstants.LATEST_TIMESTAMP)); - } - try { - PTable pTable = new PTableImpl.Builder() - .setType(PTableType.SUBQUERY) - .setTimeStamp(MetaDataProtocol.MIN_TABLE_TIMESTAMP) - .setIndexDisableTimestamp(0L) - .setSequenceNumber(PTable.INITIAL_SEQ_NUM) - .setImmutableRows(false) - .setDisableWAL(false) - .setMultiTenant(false) - .setStoreNulls(false) - .setUpdateCacheFrequency(0) - .setNamespaceMapped(false) - .setAppendOnlySchema(false) - .setImmutableStorageScheme(ImmutableStorageScheme.ONE_CELL_PER_COLUMN) - .setQualifierEncodingScheme(QualifierEncodingScheme.NON_ENCODED_QUALIFIERS) - .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT) - .setEncodedCQCounter(EncodedCQCounter.NULL_COUNTER) - .setUseStatsForParallelization(true) - .setExcludedColumns(ImmutableList.of()) - .setSchemaName(PName.EMPTY_NAME) - .setTableName(PName.EMPTY_NAME) - .setRowKeyOrderOptimizable(true) - .setIndexes(Collections.emptyList()) - .setPhysicalNames(ImmutableList.of()) - .setColumns(columns) - .build(); - TableRef sourceTable = new TableRef(pTable); - List sourceColumnRefs = Lists.newArrayList(); - for (PColumn column : sourceTable.getTable().getColumns()) { - sourceColumnRefs.add(new ColumnRef(sourceTable, column.getPosition())); - } - - return new TableRef(TupleProjectionCompiler.createProjectedTable(sourceTable, sourceColumnRefs, false)); - } catch (SQLException e) { - throw new RuntimeException(e); - } + try { + PTable pTable = new PTableImpl.Builder().setType(PTableType.SUBQUERY) + .setTimeStamp(MetaDataProtocol.MIN_TABLE_TIMESTAMP).setIndexDisableTimestamp(0L) + .setSequenceNumber(PTable.INITIAL_SEQ_NUM).setImmutableRows(false).setDisableWAL(false) + .setMultiTenant(false).setStoreNulls(false).setUpdateCacheFrequency(0) + .setNamespaceMapped(false).setAppendOnlySchema(false) + .setImmutableStorageScheme(ImmutableStorageScheme.ONE_CELL_PER_COLUMN) + .setQualifierEncodingScheme(QualifierEncodingScheme.NON_ENCODED_QUALIFIERS) + .setBaseColumnCount(BASE_TABLE_BASE_COLUMN_COUNT) + .setEncodedCQCounter(EncodedCQCounter.NULL_COUNTER).setUseStatsForParallelization(true) + .setExcludedColumns(ImmutableList.of()).setSchemaName(PName.EMPTY_NAME) + .setTableName(PName.EMPTY_NAME).setRowKeyOrderOptimizable(true) + .setIndexes(Collections.emptyList()).setPhysicalNames(ImmutableList.of()) + .setColumns(columns).build(); + TableRef sourceTable = new TableRef(pTable); + List sourceColumnRefs = Lists.newArrayList(); + for (PColumn column : sourceTable.getTable().getColumns()) { + sourceColumnRefs.add(new ColumnRef(sourceTable, column.getPosition())); + } + + return new TableRef( + TupleProjectionCompiler.createProjectedTable(sourceTable, sourceColumnRefs, false)); + } catch (SQLException e) { + throw new RuntimeException(e); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java index 0a910e2fdd0..482061d5532 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/MutationStateTest.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -17,6 +17,27 @@ */ package org.apache.phoenix.execute; +import static org.apache.phoenix.execute.MutationState.joinSortedIntArrays; +import static org.apache.phoenix.query.BaseTest.generateUniqueName; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Delete; @@ -40,245 +61,224 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; - -import static org.apache.phoenix.execute.MutationState.joinSortedIntArrays; -import static org.apache.phoenix.query.BaseTest.generateUniqueName; -import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - - public class MutationStateTest { - @Test - public void testJoinIntArrays() { - // simple case - int[] a = new int[] {1}; - int[] b = new int[] {2}; - int[] result = joinSortedIntArrays(a, b); - - assertEquals(2, result.length); - assertArrayEquals(new int[] {1,2}, result); - - // empty arrays - a = new int[0]; - b = new int[0]; - result = joinSortedIntArrays(a, b); - - assertEquals(0, result.length); - assertArrayEquals(new int[] {}, result); - - // dupes between arrays - a = new int[] {1,2,3}; - b = new int[] {1,2,4}; - result = joinSortedIntArrays(a, b); - - assertEquals(4, result.length); - assertArrayEquals(new int[] {1,2,3,4}, result); - - // dupes within arrays - a = new int[] {1,2,2,3}; - b = new int[] {1,2,4}; - result = joinSortedIntArrays(a, b); - - assertEquals(4, result.length); - assertArrayEquals(new int[] {1,2,3,4}, result); + @Test + public void testJoinIntArrays() { + // simple case + int[] a = new int[] { 1 }; + int[] b = new int[] { 2 }; + int[] result = joinSortedIntArrays(a, b); + + assertEquals(2, result.length); + assertArrayEquals(new int[] { 1, 2 }, result); + + // empty arrays + a = new int[0]; + b = new int[0]; + result = joinSortedIntArrays(a, b); + + assertEquals(0, result.length); + assertArrayEquals(new int[] {}, result); + + // dupes between arrays + a = new int[] { 1, 2, 3 }; + b = new int[] { 1, 2, 4 }; + result = joinSortedIntArrays(a, b); + + assertEquals(4, result.length); + assertArrayEquals(new int[] { 1, 2, 3, 4 }, result); + + // dupes within arrays + a = new int[] { 1, 2, 2, 3 }; + b = new int[] { 1, 2, 4 }; + result = joinSortedIntArrays(a, b); + + assertEquals(4, result.length); + assertArrayEquals(new int[] { 1, 2, 3, 4 }, result); + } + + private static String getUrl() { + return PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + + PhoenixRuntime.CONNECTIONLESS; + } + + @Test + public void testToMutationsOverMultipleTables() throws Exception { + Connection conn = null; + try { + conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute("create table MUTATION_TEST1" + + "( id1 UNSIGNED_INT not null primary key," + "appId1 VARCHAR)"); + conn.createStatement().execute("create table MUTATION_TEST2" + + "( id2 UNSIGNED_INT not null primary key," + "appId2 VARCHAR)"); + + conn.createStatement().execute("upsert into MUTATION_TEST1(id1,appId1) values(111,'app1')"); + conn.createStatement().execute("upsert into MUTATION_TEST2(id2,appId2) values(222,'app2')"); + + Iterator>> dataTableNameAndMutationKeyValuesIter = + PhoenixRuntime.getUncommittedDataIterator(conn); + + assertTrue(dataTableNameAndMutationKeyValuesIter.hasNext()); + Pair> pair = dataTableNameAndMutationKeyValuesIter.next(); + String tableName1 = Bytes.toString(pair.getFirst()); + List keyValues1 = pair.getSecond(); + + assertTrue(dataTableNameAndMutationKeyValuesIter.hasNext()); + pair = dataTableNameAndMutationKeyValuesIter.next(); + String tableName2 = Bytes.toString(pair.getFirst()); + List keyValues2 = pair.getSecond(); + + if ("MUTATION_TEST1".equals(tableName1)) { + assertTable(tableName1, keyValues1, tableName2, keyValues2); + } else { + assertTable(tableName2, keyValues2, tableName1, keyValues1); + } + assertTrue(!dataTableNameAndMutationKeyValuesIter.hasNext()); + } finally { + if (conn != null) { + conn.close(); + } } - - private static String getUrl() { - return PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS; + } + + private void assertTable(String tableName1, List keyValues1, String tableName2, + List keyValues2) { + assertTrue("MUTATION_TEST1".equals(tableName1)); + assertTrue( + Bytes.equals(PUnsignedInt.INSTANCE.toBytes(111), CellUtil.cloneRow(keyValues1.get(0)))); + assertTrue("app1".equals(PVarchar.INSTANCE.toObject(CellUtil.cloneValue(keyValues1.get(1))))); + + assertTrue("MUTATION_TEST2".equals(tableName2)); + assertTrue( + Bytes.equals(PUnsignedInt.INSTANCE.toBytes(222), CellUtil.cloneRow(keyValues2.get(0)))); + assertTrue("app2".equals(PVarchar.INSTANCE.toObject(CellUtil.cloneValue(keyValues2.get(1))))); + + } + + @Test + public void testGetMutationBatchList() { + byte[] r1 = Bytes.toBytes(1); + byte[] r2 = Bytes.toBytes(2); + byte[] r3 = Bytes.toBytes(3); + byte[] r4 = Bytes.toBytes(4); + // one put and one delete as a group + { + List list = ImmutableList.of(new Put(r1), new Put(r2), new Delete(r2)); + List> batchLists = MutationState.getMutationBatchList(2, 10, list); + assertTrue(batchLists.size() == 2); + assertEquals(batchLists.get(0).size(), 1); + assertEquals(batchLists.get(1).size(), 2); } - @Test - public void testToMutationsOverMultipleTables() throws Exception { - Connection conn = null; - try { - conn=DriverManager.getConnection(getUrl()); - conn.createStatement().execute( - "create table MUTATION_TEST1"+ - "( id1 UNSIGNED_INT not null primary key,"+ - "appId1 VARCHAR)"); - conn.createStatement().execute( - "create table MUTATION_TEST2"+ - "( id2 UNSIGNED_INT not null primary key,"+ - "appId2 VARCHAR)"); - - conn.createStatement().execute("upsert into MUTATION_TEST1(id1,appId1) values(111,'app1')"); - conn.createStatement().execute("upsert into MUTATION_TEST2(id2,appId2) values(222,'app2')"); - - - Iterator>> dataTableNameAndMutationKeyValuesIter = - PhoenixRuntime.getUncommittedDataIterator(conn); - - - assertTrue(dataTableNameAndMutationKeyValuesIter.hasNext()); - Pair> pair=dataTableNameAndMutationKeyValuesIter.next(); - String tableName1=Bytes.toString(pair.getFirst()); - List keyValues1=pair.getSecond(); - - assertTrue(dataTableNameAndMutationKeyValuesIter.hasNext()); - pair=dataTableNameAndMutationKeyValuesIter.next(); - String tableName2=Bytes.toString(pair.getFirst()); - List keyValues2=pair.getSecond(); - - if("MUTATION_TEST1".equals(tableName1)) { - assertTable(tableName1, keyValues1, tableName2, keyValues2); - } - else { - assertTable(tableName2, keyValues2, tableName1, keyValues1); - } - assertTrue(!dataTableNameAndMutationKeyValuesIter.hasNext()); - } - finally { - if(conn!=null) { - conn.close(); - } - } + { + List list = ImmutableList.of(new Put(r1), new Delete(r1), new Put(r2)); + List> batchLists = MutationState.getMutationBatchList(2, 10, list); + assertTrue(batchLists.size() == 2); + assertEquals(batchLists.get(0).size(), 2); + assertEquals(batchLists.get(1).size(), 1); } - private void assertTable(String tableName1,List keyValues1,String tableName2,List keyValues2) { - assertTrue("MUTATION_TEST1".equals(tableName1)); - assertTrue(Bytes.equals(PUnsignedInt.INSTANCE.toBytes(111),CellUtil.cloneRow(keyValues1.get(0)))); - assertTrue("app1".equals(PVarchar.INSTANCE.toObject(CellUtil.cloneValue(keyValues1.get(1))))); - - assertTrue("MUTATION_TEST2".equals(tableName2)); - assertTrue(Bytes.equals(PUnsignedInt.INSTANCE.toBytes(222),CellUtil.cloneRow(keyValues2.get(0)))); - assertTrue("app2".equals(PVarchar.INSTANCE.toObject(CellUtil.cloneValue(keyValues2.get(1))))); - + { + List list = ImmutableList.of(new Put(r3), new Put(r1), new Delete(r1), new Put(r2), + new Put(r4), new Delete(r4)); + List> batchLists = MutationState.getMutationBatchList(2, 10, list); + assertTrue(batchLists.size() == 4); + assertEquals(batchLists.get(0).size(), 1); + assertEquals(batchLists.get(1).size(), 2); + assertEquals(batchLists.get(2).size(), 1); + assertEquals(batchLists.get(3).size(), 2); } - @Test - public void testGetMutationBatchList() { - byte[] r1 = Bytes.toBytes(1); - byte[] r2 = Bytes.toBytes(2); - byte[] r3 = Bytes.toBytes(3); - byte[] r4 = Bytes.toBytes(4); - // one put and one delete as a group - { - List list = ImmutableList.of(new Put(r1), new Put(r2), new Delete(r2)); - List> batchLists = MutationState.getMutationBatchList(2, 10, list); - assertTrue(batchLists.size() == 2); - assertEquals(batchLists.get(0).size(), 1); - assertEquals(batchLists.get(1).size(), 2); - } - - { - List list = ImmutableList.of(new Put(r1), new Delete(r1), new Put(r2)); - List> batchLists = MutationState.getMutationBatchList(2, 10, list); - assertTrue(batchLists.size() == 2); - assertEquals(batchLists.get(0).size(), 2); - assertEquals(batchLists.get(1).size(), 1); - } + } - { - List list = ImmutableList.of(new Put(r3), new Put(r1), new Delete(r1), new Put(r2), new Put(r4), new Delete(r4)); - List> batchLists = MutationState.getMutationBatchList(2, 10, list); - assertTrue(batchLists.size() == 4); - assertEquals(batchLists.get(0).size(), 1); - assertEquals(batchLists.get(1).size(), 2); - assertEquals(batchLists.get(2).size(), 1); - assertEquals(batchLists.get(3).size(), 2); - } + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); - } + @Test + public void testPendingMutationsOnDDL() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty(QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, "true"); + try (Connection conn = DriverManager.getConnection(getUrl(), props); + PhoenixConnection pConnSpy = spy((PhoenixConnection) conn)) { + MutationState mutationState = mock(MutationState.class); + when(mutationState.getNumRows()).thenReturn(1); - @Rule - public ExpectedException exceptionRule = ExpectedException.none(); - - @Test - public void testPendingMutationsOnDDL() throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - props.setProperty(QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, "true"); - try (Connection conn = DriverManager.getConnection(getUrl(), props); - PhoenixConnection pConnSpy = spy((PhoenixConnection) conn)) { - MutationState mutationState = mock(MutationState.class); - when(mutationState.getNumRows()).thenReturn(1); - - // Create a connection with mutation state and mock it - doReturn(mutationState).when(pConnSpy).getMutationState(); - exceptionRule.expect(SQLException.class); - exceptionRule.expectMessage( - SQLExceptionCode.CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS.getMessage()); - - pConnSpy.createStatement().execute("create table MUTATION_TEST1" - + "( id1 UNSIGNED_INT not null primary key," + "appId1 VARCHAR)"); - } + // Create a connection with mutation state and mock it + doReturn(mutationState).when(pConnSpy).getMutationState(); + exceptionRule.expect(SQLException.class); + exceptionRule + .expectMessage(SQLExceptionCode.CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS.getMessage()); + pConnSpy.createStatement().execute("create table MUTATION_TEST1" + + "( id1 UNSIGNED_INT not null primary key," + "appId1 VARCHAR)"); } - @Test - public void testOnDupAndUpsertInSameCommitBatch() throws Exception { - String dataTable1 = generateUniqueName(); - String dataTable2 = generateUniqueName(); - try (Connection conn = DriverManager.getConnection(getUrl())) { - conn.createStatement().execute(String.format( - "create table %s (id1 UNSIGNED_INT not null primary key, appId1 VARCHAR)", dataTable1)); - conn.createStatement().execute(String.format( - "create table %s (id2 UNSIGNED_INT not null primary key, appId2 VARCHAR)", dataTable2)); - - conn.createStatement().execute(String.format( - "upsert into %s(id1,appId1) values(111,'app1')", dataTable1)); - conn.createStatement().execute(String.format( - "upsert into %s(id1,appId1) values(111, 'app1') ON DUPLICATE KEY UPDATE appId1 = null", dataTable1)); - conn.createStatement().execute(String.format( - "upsert into %s(id2,appId2) values(222,'app2')", dataTable2)); - conn.createStatement().execute(String.format( - "upsert into %s(id2,appId2) values(222,'app2') ON DUPLICATE KEY UPDATE appId2 = null", dataTable2)); - - final PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - MutationState state = pconn.getMutationState(); - assertEquals(2, state.getNumRows()); - - int actualPairs = 0; - Iterator>> mutations = state.toMutations(); - while (mutations.hasNext()) { - Pair> nextTable = mutations.next(); - ++actualPairs; - assertEquals(1, nextTable.getSecond().size()); - } - // we have 2 tables and each table has 2 mutation batches - // so we should get 4

    pairs - assertEquals(4, actualPairs); - - List> commitBatches = state.createCommitBatches(); - assertEquals(2, commitBatches.size()); - // first commit batch should only contain regular upserts - verifyCommitBatch(commitBatches.get(0), false, 2, 1); - verifyCommitBatch(commitBatches.get(1), true, 2, 1); - } + } + + @Test + public void testOnDupAndUpsertInSameCommitBatch() throws Exception { + String dataTable1 = generateUniqueName(); + String dataTable2 = generateUniqueName(); + try (Connection conn = DriverManager.getConnection(getUrl())) { + conn.createStatement().execute(String.format( + "create table %s (id1 UNSIGNED_INT not null primary key, appId1 VARCHAR)", dataTable1)); + conn.createStatement().execute(String.format( + "create table %s (id2 UNSIGNED_INT not null primary key, appId2 VARCHAR)", dataTable2)); + + conn.createStatement() + .execute(String.format("upsert into %s(id1,appId1) values(111,'app1')", dataTable1)); + conn.createStatement() + .execute(String.format( + "upsert into %s(id1,appId1) values(111, 'app1') ON DUPLICATE KEY UPDATE appId1 = null", + dataTable1)); + conn.createStatement() + .execute(String.format("upsert into %s(id2,appId2) values(222,'app2')", dataTable2)); + conn.createStatement() + .execute(String.format( + "upsert into %s(id2,appId2) values(222,'app2') ON DUPLICATE KEY UPDATE appId2 = null", + dataTable2)); + + final PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + MutationState state = pconn.getMutationState(); + assertEquals(2, state.getNumRows()); + + int actualPairs = 0; + Iterator>> mutations = state.toMutations(); + while (mutations.hasNext()) { + Pair> nextTable = mutations.next(); + ++actualPairs; + assertEquals(1, nextTable.getSecond().size()); + } + // we have 2 tables and each table has 2 mutation batches + // so we should get 4
    pairs + assertEquals(4, actualPairs); + + List> commitBatches = state.createCommitBatches(); + assertEquals(2, commitBatches.size()); + // first commit batch should only contain regular upserts + verifyCommitBatch(commitBatches.get(0), false, 2, 1); + verifyCommitBatch(commitBatches.get(1), true, 2, 1); } - - private void verifyCommitBatch(Map commitBatch, boolean conditional, - int numberOfBatches, int rowsPerBatch) { - // one for each table - assertEquals(numberOfBatches, commitBatch.size()); - for (Map.Entry entry : commitBatch.entrySet()) { - TableRef tableRef = entry.getKey(); - MultiRowMutationState batch = entry.getValue(); - assertEquals(rowsPerBatch, batch.size()); - for (Map.Entry row : batch.entrySet()) { - ImmutableBytesPtr key = row.getKey(); - RowMutationState rowMutationState = row.getValue(); - if (conditional == true) { - assertNotNull(rowMutationState.getOnDupKeyBytes()); - } else { - assertNull(rowMutationState.getOnDupKeyBytes()); - } - } + } + + private void verifyCommitBatch(Map commitBatch, + boolean conditional, int numberOfBatches, int rowsPerBatch) { + // one for each table + assertEquals(numberOfBatches, commitBatch.size()); + for (Map.Entry entry : commitBatch.entrySet()) { + TableRef tableRef = entry.getKey(); + MultiRowMutationState batch = entry.getValue(); + assertEquals(rowsPerBatch, batch.size()); + for (Map.Entry row : batch.entrySet()) { + ImmutableBytesPtr key = row.getKey(); + RowMutationState rowMutationState = row.getValue(); + if (conditional == true) { + assertNotNull(rowMutationState.getOnDupKeyBytes()); + } else { + assertNull(rowMutationState.getOnDupKeyBytes()); } + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/SortMergeJoinTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/SortMergeJoinTest.java index 4a76bc5f04d..379a28c2870 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/execute/SortMergeJoinTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/SortMergeJoinTest.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -17,11 +17,13 @@ */ package org.apache.phoenix.execute; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.when; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; + import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.StatementContext; @@ -39,433 +41,326 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import static org.junit.Assert.assertTrue; - - public class SortMergeJoinTest { - - @Test - public void testOptimizeSemiJoinForSortMergeJoinBug5956() throws SQLException, InterruptedException { - // mock for SortMergeJoinPlan - StatementContext statementContext = Mockito.mock(StatementContext.class); - PhoenixConnection phoenixConnection = Mockito.mock(PhoenixConnection.class); - when(statementContext.getConnection()).thenReturn(phoenixConnection); - ConnectionQueryServices connectionQueryServices = Mockito.mock(ConnectionQueryServices.class); - when(connectionQueryServices.getProps()).thenReturn(ReadOnlyProps.EMPTY_PROPS); - when(phoenixConnection.getQueryServices()).thenReturn(connectionQueryServices); - - List expressions = new ArrayList(); - Pair,List> lhsAndRhsJoinExpressions = Pair.newPair(expressions, expressions); - Pair, List> lhsAndRhsOrderByNodes = Pair., List> newPair( - new ArrayList(), - new ArrayList()); - - //test semi join rhs is null - JoinTableNode.JoinType joinType = JoinTableNode.JoinType.Semi; - ResultIterator lhsResultIterator = Mockito.mock(ResultIterator.class); + @Test + public void testOptimizeSemiJoinForSortMergeJoinBug5956() + throws SQLException, InterruptedException { + // mock for SortMergeJoinPlan + StatementContext statementContext = Mockito.mock(StatementContext.class); + PhoenixConnection phoenixConnection = Mockito.mock(PhoenixConnection.class); + when(statementContext.getConnection()).thenReturn(phoenixConnection); + ConnectionQueryServices connectionQueryServices = Mockito.mock(ConnectionQueryServices.class); + when(connectionQueryServices.getProps()).thenReturn(ReadOnlyProps.EMPTY_PROPS); + when(phoenixConnection.getQueryServices()).thenReturn(connectionQueryServices); + + List expressions = new ArrayList(); + Pair, List> lhsAndRhsJoinExpressions = + Pair.newPair(expressions, expressions); + Pair, List> lhsAndRhsOrderByNodes = Pair., + List> newPair(new ArrayList(), new ArrayList()); + + // test semi join rhs is null + JoinTableNode.JoinType joinType = JoinTableNode.JoinType.Semi; + ResultIterator lhsResultIterator = Mockito.mock(ResultIterator.class); + Tuple tuple = Mockito.mock(Tuple.class); + when(lhsResultIterator.next()).thenReturn(tuple); + QueryPlan lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + QueryPlan rhsQueryPlan = Mockito.mock(QueryPlan.class); + ResultIterator rhsResultIterator = Mockito.mock(ResultIterator.class); + when(rhsResultIterator.next()).thenReturn(null); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + SortMergeJoinPlan sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + SortMergeJoinPlan.SemiAntiJoinIterator semiAntiJoinIterator = + (SortMergeJoinPlan.SemiAntiJoinIterator) sortMergeJoinPlan.iterator(); + Tuple resultTuple = semiAntiJoinIterator.next(); + assertTrue(resultTuple == null); + assertTrue(semiAntiJoinIterator.isEnd()); + + // test semi join lhs is null + joinType = JoinTableNode.JoinType.Semi; + lhsResultIterator = Mockito.mock(ResultIterator.class); + when(lhsResultIterator.next()).thenReturn(null); + lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + rhsQueryPlan = Mockito.mock(QueryPlan.class); + rhsResultIterator = Mockito.mock(ResultIterator.class); + tuple = Mockito.mock(Tuple.class); + when(rhsResultIterator.next()).thenReturn(tuple); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + semiAntiJoinIterator = (SortMergeJoinPlan.SemiAntiJoinIterator) sortMergeJoinPlan.iterator(); + resultTuple = semiAntiJoinIterator.next(); + assertTrue(resultTuple == null); + assertTrue(semiAntiJoinIterator.isEnd()); + + // test anti join lhs is null + joinType = JoinTableNode.JoinType.Anti; + lhsResultIterator = Mockito.mock(ResultIterator.class); + when(lhsResultIterator.next()).thenReturn(null); + lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + rhsQueryPlan = Mockito.mock(QueryPlan.class); + rhsResultIterator = Mockito.mock(ResultIterator.class); + tuple = Mockito.mock(Tuple.class); + when(rhsResultIterator.next()).thenReturn(tuple); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + semiAntiJoinIterator = (SortMergeJoinPlan.SemiAntiJoinIterator) sortMergeJoinPlan.iterator(); + resultTuple = semiAntiJoinIterator.next(); + assertTrue(resultTuple == null); + assertTrue(semiAntiJoinIterator.isEnd()); + } + + private final long INIT_LATENCY = 10 * 1000L; + + @Test + public void testSortMergeFastReturnNullBug5793() throws SQLException, InterruptedException { + // mock for SortMergeJoinPlan + StatementContext statementContext = Mockito.mock(StatementContext.class); + PhoenixConnection phoenixConnection = Mockito.mock(PhoenixConnection.class); + when(statementContext.getConnection()).thenReturn(phoenixConnection); + ConnectionQueryServices connectionQueryServices = Mockito.mock(ConnectionQueryServices.class); + when(connectionQueryServices.getProps()).thenReturn(ReadOnlyProps.EMPTY_PROPS); + when(phoenixConnection.getQueryServices()).thenReturn(connectionQueryServices); + + List expressions = new ArrayList(); + Pair, List> lhsAndRhsJoinExpressions = + Pair.newPair(expressions, expressions); + Pair, List> lhsAndRhsOrderByNodes = Pair., + List> newPair(new ArrayList(), new ArrayList()); + + // test inner join, lhs long latency and rhs return null. + JoinTableNode.JoinType joinType = JoinTableNode.JoinType.Inner; + ResultIterator lhsResultIterator = Mockito.mock(ResultIterator.class); + when(lhsResultIterator.next()).thenAnswer(longLatencyInit()); + QueryPlan lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + QueryPlan rhsQueryPlan = Mockito.mock(QueryPlan.class); + ResultIterator rhsResultIterator = Mockito.mock(ResultIterator.class); + when(rhsResultIterator.next()).thenReturn(null); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + SortMergeJoinPlan sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + SortMergeJoinPlan.BasicJoinIterator sortMergeJoinResultIterator = + (SortMergeJoinPlan.BasicJoinIterator) sortMergeJoinPlan.iterator(); + + long startTime = System.currentTimeMillis(); + Tuple resultTuple = sortMergeJoinResultIterator.next(); + long elapsed = System.currentTimeMillis() - startTime; + + assertTrue(resultTuple == null); + assertTrue(sortMergeJoinResultIterator.isJoinResultNullBecauseOneSideNull()); + assertTrue(sortMergeJoinResultIterator.isInitialized()); + assertTrue(elapsed < INIT_LATENCY); + + // test inner join, lhs return null and rhs long latency. + joinType = JoinTableNode.JoinType.Inner; + lhsResultIterator = Mockito.mock(ResultIterator.class); + when(lhsResultIterator.next()).thenReturn(null); + lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + rhsQueryPlan = Mockito.mock(QueryPlan.class); + rhsResultIterator = Mockito.mock(ResultIterator.class); + when(rhsResultIterator.next()).thenAnswer(longLatencyInit()); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + sortMergeJoinResultIterator = + (SortMergeJoinPlan.BasicJoinIterator) sortMergeJoinPlan.iterator(); + + startTime = System.currentTimeMillis(); + resultTuple = sortMergeJoinResultIterator.next(); + elapsed = System.currentTimeMillis() - startTime; + + assertTrue(resultTuple == null); + assertTrue(sortMergeJoinResultIterator.isJoinResultNullBecauseOneSideNull()); + assertTrue(sortMergeJoinResultIterator.isInitialized()); + assertTrue(elapsed < INIT_LATENCY); + + // test left join, lhs return null and rhs long latency. + joinType = JoinTableNode.JoinType.Left; + lhsResultIterator = Mockito.mock(ResultIterator.class); + when(lhsResultIterator.next()).thenReturn(null); + lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + rhsQueryPlan = Mockito.mock(QueryPlan.class); + rhsResultIterator = Mockito.mock(ResultIterator.class); + when(rhsResultIterator.next()).thenAnswer(longLatencyInit()); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + sortMergeJoinResultIterator = + (SortMergeJoinPlan.BasicJoinIterator) sortMergeJoinPlan.iterator(); + + startTime = System.currentTimeMillis(); + resultTuple = sortMergeJoinResultIterator.next(); + elapsed = System.currentTimeMillis() - startTime; + + assertTrue(resultTuple == null); + assertTrue(sortMergeJoinResultIterator.isJoinResultNullBecauseOneSideNull()); + assertTrue(sortMergeJoinResultIterator.isInitialized()); + assertTrue(elapsed < INIT_LATENCY); + + // test full join, lhs return null and rhs return null. + joinType = JoinTableNode.JoinType.Full; + lhsResultIterator = Mockito.mock(ResultIterator.class); + when(lhsResultIterator.next()).thenReturn(null); + lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + rhsQueryPlan = Mockito.mock(QueryPlan.class); + rhsResultIterator = Mockito.mock(ResultIterator.class); + when(rhsResultIterator.next()).thenReturn(null); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + sortMergeJoinResultIterator = + (SortMergeJoinPlan.BasicJoinIterator) sortMergeJoinPlan.iterator(); + + startTime = System.currentTimeMillis(); + resultTuple = sortMergeJoinResultIterator.next(); + elapsed = System.currentTimeMillis() - startTime; + + assertTrue(resultTuple == null); + assertTrue(!sortMergeJoinResultIterator.isJoinResultNullBecauseOneSideNull()); + assertTrue(sortMergeJoinResultIterator.isInitialized()); + assertTrue(elapsed < INIT_LATENCY); + + // test left semi join, lhs return null and rhs long latency. + joinType = JoinTableNode.JoinType.Semi; + lhsResultIterator = Mockito.mock(ResultIterator.class); + when(lhsResultIterator.next()).thenReturn(null); + lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + rhsQueryPlan = Mockito.mock(QueryPlan.class); + rhsResultIterator = Mockito.mock(ResultIterator.class); + when(rhsResultIterator.next()).thenAnswer(longLatencyInit()); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + SortMergeJoinPlan.SemiAntiJoinIterator sortMergeJoinSemiAntiResultIterator = + (SortMergeJoinPlan.SemiAntiJoinIterator) sortMergeJoinPlan.iterator(); + + startTime = System.currentTimeMillis(); + resultTuple = sortMergeJoinSemiAntiResultIterator.next(); + elapsed = System.currentTimeMillis() - startTime; + + assertTrue(resultTuple == null); + assertTrue(sortMergeJoinSemiAntiResultIterator.isJoinResultNullBecauseOneSideNull()); + assertTrue(sortMergeJoinSemiAntiResultIterator.isInitialized()); + assertTrue(elapsed < INIT_LATENCY); + + // test left semi join, lhs long latency and rhs return null. + joinType = JoinTableNode.JoinType.Semi; + lhsResultIterator = Mockito.mock(ResultIterator.class); + when(lhsResultIterator.next()).thenAnswer(longLatencyInit()); + lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + rhsQueryPlan = Mockito.mock(QueryPlan.class); + rhsResultIterator = Mockito.mock(ResultIterator.class); + when(rhsResultIterator.next()).thenReturn(null); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + sortMergeJoinSemiAntiResultIterator = + (SortMergeJoinPlan.SemiAntiJoinIterator) sortMergeJoinPlan.iterator(); + + startTime = System.currentTimeMillis(); + resultTuple = sortMergeJoinSemiAntiResultIterator.next(); + elapsed = System.currentTimeMillis() - startTime; + + assertTrue(resultTuple == null); + assertTrue(sortMergeJoinSemiAntiResultIterator.isJoinResultNullBecauseOneSideNull()); + assertTrue(sortMergeJoinSemiAntiResultIterator.isInitialized()); + assertTrue(elapsed < INIT_LATENCY); + + // test left semi join, lhs return null and rhs long latency. + joinType = JoinTableNode.JoinType.Anti; + lhsResultIterator = Mockito.mock(ResultIterator.class); + when(lhsResultIterator.next()).thenReturn(null); + lhsQueryPlan = Mockito.mock(QueryPlan.class); + when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(lhsResultIterator); + + rhsQueryPlan = Mockito.mock(QueryPlan.class); + rhsResultIterator = Mockito.mock(ResultIterator.class); + when(rhsResultIterator.next()).thenAnswer(longLatencyInit()); + when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())) + .thenReturn(rhsResultIterator); + + sortMergeJoinPlan = + new SortMergeJoinPlan(statementContext, null, null, joinType, lhsQueryPlan, rhsQueryPlan, + lhsAndRhsJoinExpressions, expressions, null, null, null, 0, true, lhsAndRhsOrderByNodes); + sortMergeJoinSemiAntiResultIterator = + (SortMergeJoinPlan.SemiAntiJoinIterator) sortMergeJoinPlan.iterator(); + + startTime = System.currentTimeMillis(); + resultTuple = sortMergeJoinSemiAntiResultIterator.next(); + elapsed = System.currentTimeMillis() - startTime; + + assertTrue(resultTuple == null); + assertTrue(sortMergeJoinSemiAntiResultIterator.isJoinResultNullBecauseOneSideNull()); + assertTrue(sortMergeJoinSemiAntiResultIterator.isInitialized()); + assertTrue(elapsed < INIT_LATENCY); + } + + private Answer longLatencyInit() { + return new Answer() { + @Override + public Tuple answer(InvocationOnMock invocation) throws Throwable { + Thread.sleep(INIT_LATENCY); Tuple tuple = Mockito.mock(Tuple.class); - when(lhsResultIterator.next()).thenReturn(tuple); - QueryPlan lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator( - DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - QueryPlan rhsQueryPlan = Mockito.mock(QueryPlan.class); - ResultIterator rhsResultIterator = Mockito.mock(ResultIterator.class); - when(rhsResultIterator.next()).thenReturn(null); - when(rhsQueryPlan.iterator( - DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - SortMergeJoinPlan sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - SortMergeJoinPlan.SemiAntiJoinIterator semiAntiJoinIterator = - (SortMergeJoinPlan.SemiAntiJoinIterator)sortMergeJoinPlan.iterator(); - Tuple resultTuple = semiAntiJoinIterator.next(); - assertTrue(resultTuple == null); - assertTrue(semiAntiJoinIterator.isEnd()); - - //test semi join lhs is null - joinType = JoinTableNode.JoinType.Semi; - lhsResultIterator = Mockito.mock(ResultIterator.class); - when(lhsResultIterator.next()).thenReturn(null); - lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator( - DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - rhsQueryPlan = Mockito.mock(QueryPlan.class); - rhsResultIterator = Mockito.mock(ResultIterator.class); - tuple = Mockito.mock(Tuple.class); - when(rhsResultIterator.next()).thenReturn(tuple); - when(rhsQueryPlan.iterator( - DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - semiAntiJoinIterator = (SortMergeJoinPlan.SemiAntiJoinIterator)sortMergeJoinPlan.iterator(); - resultTuple = semiAntiJoinIterator.next(); - assertTrue(resultTuple == null); - assertTrue(semiAntiJoinIterator.isEnd()); - - //test anti join lhs is null - joinType = JoinTableNode.JoinType.Anti; - lhsResultIterator = Mockito.mock(ResultIterator.class); - when(lhsResultIterator.next()).thenReturn(null); - lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator( - DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - rhsQueryPlan = Mockito.mock(QueryPlan.class); - rhsResultIterator = Mockito.mock(ResultIterator.class); - tuple = Mockito.mock(Tuple.class); - when(rhsResultIterator.next()).thenReturn(tuple); - when(rhsQueryPlan.iterator( - DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - semiAntiJoinIterator = (SortMergeJoinPlan.SemiAntiJoinIterator)sortMergeJoinPlan.iterator(); - resultTuple = semiAntiJoinIterator.next(); - assertTrue(resultTuple == null); - assertTrue(semiAntiJoinIterator.isEnd()); - } - - private final long INIT_LATENCY = 10 * 1000L; - - @Test - public void testSortMergeFastReturnNullBug5793() throws SQLException, InterruptedException { - // mock for SortMergeJoinPlan - StatementContext statementContext = Mockito.mock(StatementContext.class); - PhoenixConnection phoenixConnection = Mockito.mock(PhoenixConnection.class); - when(statementContext.getConnection()).thenReturn(phoenixConnection); - ConnectionQueryServices connectionQueryServices = Mockito.mock(ConnectionQueryServices.class); - when(connectionQueryServices.getProps()).thenReturn(ReadOnlyProps.EMPTY_PROPS); - when(phoenixConnection.getQueryServices()).thenReturn(connectionQueryServices); - - List expressions = new ArrayList(); - Pair,List> lhsAndRhsJoinExpressions = Pair.newPair(expressions, expressions); - Pair, List> lhsAndRhsOrderByNodes = Pair., List> newPair( - new ArrayList(), - new ArrayList()); - - //test inner join, lhs long latency and rhs return null. - JoinTableNode.JoinType joinType = JoinTableNode.JoinType.Inner; - ResultIterator lhsResultIterator = Mockito.mock(ResultIterator.class); - when(lhsResultIterator.next()).thenAnswer(longLatencyInit()); - QueryPlan lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - QueryPlan rhsQueryPlan = Mockito.mock(QueryPlan.class); - ResultIterator rhsResultIterator = Mockito.mock(ResultIterator.class); - when(rhsResultIterator.next()).thenReturn(null); - when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - SortMergeJoinPlan sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - SortMergeJoinPlan.BasicJoinIterator sortMergeJoinResultIterator = - (SortMergeJoinPlan.BasicJoinIterator)sortMergeJoinPlan.iterator(); - - long startTime = System.currentTimeMillis(); - Tuple resultTuple = sortMergeJoinResultIterator.next(); - long elapsed = System.currentTimeMillis() - startTime; - - assertTrue(resultTuple == null); - assertTrue(sortMergeJoinResultIterator.isJoinResultNullBecauseOneSideNull()); - assertTrue(sortMergeJoinResultIterator.isInitialized()); - assertTrue(elapsed < INIT_LATENCY); - - - //test inner join, lhs return null and rhs long latency. - joinType = JoinTableNode.JoinType.Inner; - lhsResultIterator = Mockito.mock(ResultIterator.class); - when(lhsResultIterator.next()).thenReturn(null); - lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - rhsQueryPlan = Mockito.mock(QueryPlan.class); - rhsResultIterator = Mockito.mock(ResultIterator.class); - when(rhsResultIterator.next()).thenAnswer(longLatencyInit()); - when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - sortMergeJoinResultIterator = - (SortMergeJoinPlan.BasicJoinIterator)sortMergeJoinPlan.iterator(); - - startTime = System.currentTimeMillis(); - resultTuple = sortMergeJoinResultIterator.next(); - elapsed = System.currentTimeMillis() - startTime; - - assertTrue(resultTuple == null); - assertTrue(sortMergeJoinResultIterator.isJoinResultNullBecauseOneSideNull()); - assertTrue(sortMergeJoinResultIterator.isInitialized()); - assertTrue(elapsed < INIT_LATENCY); - - //test left join, lhs return null and rhs long latency. - joinType = JoinTableNode.JoinType.Left; - lhsResultIterator = Mockito.mock(ResultIterator.class); - when(lhsResultIterator.next()).thenReturn(null); - lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - rhsQueryPlan = Mockito.mock(QueryPlan.class); - rhsResultIterator = Mockito.mock(ResultIterator.class); - when(rhsResultIterator.next()).thenAnswer(longLatencyInit()); - when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - sortMergeJoinResultIterator = (SortMergeJoinPlan.BasicJoinIterator)sortMergeJoinPlan.iterator(); - - startTime = System.currentTimeMillis(); - resultTuple = sortMergeJoinResultIterator.next(); - elapsed = System.currentTimeMillis() - startTime; - - assertTrue(resultTuple == null); - assertTrue(sortMergeJoinResultIterator.isJoinResultNullBecauseOneSideNull()); - assertTrue(sortMergeJoinResultIterator.isInitialized()); - assertTrue(elapsed < INIT_LATENCY); - - //test full join, lhs return null and rhs return null. - joinType = JoinTableNode.JoinType.Full; - lhsResultIterator = Mockito.mock(ResultIterator.class); - when(lhsResultIterator.next()).thenReturn(null); - lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - rhsQueryPlan = Mockito.mock(QueryPlan.class); - rhsResultIterator = Mockito.mock(ResultIterator.class); - when(rhsResultIterator.next()).thenReturn(null); - when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - sortMergeJoinResultIterator = (SortMergeJoinPlan.BasicJoinIterator)sortMergeJoinPlan.iterator(); - - startTime = System.currentTimeMillis(); - resultTuple = sortMergeJoinResultIterator.next(); - elapsed = System.currentTimeMillis() - startTime; - - assertTrue(resultTuple == null); - assertTrue(!sortMergeJoinResultIterator.isJoinResultNullBecauseOneSideNull()); - assertTrue(sortMergeJoinResultIterator.isInitialized()); - assertTrue(elapsed < INIT_LATENCY); - - //test left semi join, lhs return null and rhs long latency. - joinType = JoinTableNode.JoinType.Semi; - lhsResultIterator = Mockito.mock(ResultIterator.class); - when(lhsResultIterator.next()).thenReturn(null); - lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - rhsQueryPlan = Mockito.mock(QueryPlan.class); - rhsResultIterator = Mockito.mock(ResultIterator.class); - when(rhsResultIterator.next()).thenAnswer(longLatencyInit()); - when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - SortMergeJoinPlan.SemiAntiJoinIterator sortMergeJoinSemiAntiResultIterator = - (SortMergeJoinPlan.SemiAntiJoinIterator)sortMergeJoinPlan.iterator(); - - startTime = System.currentTimeMillis(); - resultTuple = sortMergeJoinSemiAntiResultIterator.next(); - elapsed = System.currentTimeMillis() - startTime; - - assertTrue(resultTuple == null); - assertTrue(sortMergeJoinSemiAntiResultIterator.isJoinResultNullBecauseOneSideNull()); - assertTrue(sortMergeJoinSemiAntiResultIterator.isInitialized()); - assertTrue(elapsed < INIT_LATENCY); - - //test left semi join, lhs long latency and rhs return null. - joinType = JoinTableNode.JoinType.Semi; - lhsResultIterator = Mockito.mock(ResultIterator.class); - when(lhsResultIterator.next()).thenAnswer(longLatencyInit()); - lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - rhsQueryPlan = Mockito.mock(QueryPlan.class); - rhsResultIterator = Mockito.mock(ResultIterator.class); - when(rhsResultIterator.next()).thenReturn(null); - when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - sortMergeJoinSemiAntiResultIterator = (SortMergeJoinPlan.SemiAntiJoinIterator)sortMergeJoinPlan.iterator(); - - startTime = System.currentTimeMillis(); - resultTuple = sortMergeJoinSemiAntiResultIterator.next(); - elapsed = System.currentTimeMillis() - startTime; - - assertTrue(resultTuple == null); - assertTrue(sortMergeJoinSemiAntiResultIterator.isJoinResultNullBecauseOneSideNull()); - assertTrue(sortMergeJoinSemiAntiResultIterator.isInitialized()); - assertTrue(elapsed < INIT_LATENCY); - - //test left semi join, lhs return null and rhs long latency. - joinType = JoinTableNode.JoinType.Anti; - lhsResultIterator = Mockito.mock(ResultIterator.class); - when(lhsResultIterator.next()).thenReturn(null); - lhsQueryPlan = Mockito.mock(QueryPlan.class); - when(lhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(lhsResultIterator); - - rhsQueryPlan = Mockito.mock(QueryPlan.class); - rhsResultIterator = Mockito.mock(ResultIterator.class); - when(rhsResultIterator.next()).thenAnswer(longLatencyInit()); - when(rhsQueryPlan.iterator(DefaultParallelScanGrouper.getInstance())).thenReturn(rhsResultIterator); - - sortMergeJoinPlan = new SortMergeJoinPlan( - statementContext, - null, - null, - joinType, - lhsQueryPlan, - rhsQueryPlan, - lhsAndRhsJoinExpressions, - expressions, - null, - null, - null, - 0, - true, - lhsAndRhsOrderByNodes); - sortMergeJoinSemiAntiResultIterator = - (SortMergeJoinPlan.SemiAntiJoinIterator)sortMergeJoinPlan.iterator(); - - startTime = System.currentTimeMillis(); - resultTuple = sortMergeJoinSemiAntiResultIterator.next(); - elapsed = System.currentTimeMillis() - startTime; - - assertTrue(resultTuple == null); - assertTrue(sortMergeJoinSemiAntiResultIterator.isJoinResultNullBecauseOneSideNull()); - assertTrue(sortMergeJoinSemiAntiResultIterator.isInitialized()); - assertTrue(elapsed < INIT_LATENCY); - } - - private Answer longLatencyInit() { - return new Answer() { - @Override - public Tuple answer(InvocationOnMock invocation) throws Throwable { - Thread.sleep(INIT_LATENCY); - Tuple tuple = Mockito.mock(Tuple.class); - return tuple; - } - }; - } + return tuple; + } + }; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java index f9e8c331a8f..f4e494e159d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -63,109 +63,130 @@ import org.apache.phoenix.schema.types.PIntegerArray; import org.apache.phoenix.schema.types.PVarcharArray; import org.apache.phoenix.schema.types.PhoenixArray; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - @SuppressWarnings("rawtypes") public class UnnestArrayPlanTest { - - private static final StatementContext CONTEXT; - static { - try { - PhoenixConnection connection = DriverManager.getConnection(JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS).unwrap(PhoenixConnection.class); - PhoenixStatement stmt = new PhoenixStatement(connection); - ColumnResolver resolver = FromCompiler.getResolverForQuery(SelectStatement.SELECT_ONE, connection); - CONTEXT = new StatementContext(stmt, resolver, new Scan(), new SequenceManager(stmt)); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - - @Test - public void testUnnestIntegerArrays() throws Exception { - testUnnestArrays(PIntegerArray.INSTANCE, Arrays.asList(new Object[] {1, 10}, new Object[] {2, 20}), false); - } - - @Test - public void testUnnestIntegerArraysWithOrdinality() throws Exception { - testUnnestArrays(PIntegerArray.INSTANCE, Arrays.asList(new Object[] {1, 10}, new Object[] {2, 20}), true); - } - - @Test - public void testUnnestVarcharArrays() throws Exception { - testUnnestArrays(PVarcharArray.INSTANCE, Arrays.asList(new Object[] {"1", "10"}, new Object[] {"2", "20"}), false); - } - - @Test - public void testUnnestVarcharArraysWithOrdinality() throws Exception { - testUnnestArrays(PVarcharArray.INSTANCE, Arrays.asList(new Object[] {"1", "10"}, new Object[] {"2", "20"}), true); - } - - @Test - public void testUnnestEmptyArrays() throws Exception { - testUnnestArrays(PIntegerArray.INSTANCE, Arrays.asList(new Object[] {1, 10}, new Object[]{}, new Object[] {2, 20}), false); - } - - @Test - public void testUnnestEmptyArraysWithOrdinality() throws Exception { - testUnnestArrays(PIntegerArray.INSTANCE, Arrays.asList(new Object[] {1, 10}, new Object[]{}, new Object[] {2, 20}), true); + + private static final StatementContext CONTEXT; + static { + try { + PhoenixConnection connection = + DriverManager.getConnection(JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS) + .unwrap(PhoenixConnection.class); + PhoenixStatement stmt = new PhoenixStatement(connection); + ColumnResolver resolver = + FromCompiler.getResolverForQuery(SelectStatement.SELECT_ONE, connection); + CONTEXT = new StatementContext(stmt, resolver, new Scan(), new SequenceManager(stmt)); + } catch (SQLException e) { + throw new RuntimeException(e); } - - private void testUnnestArrays(PArrayDataType arrayType, List arrays, boolean withOrdinality) throws Exception { - PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE); - List tuples = toTuples(arrayType, arrays); - LiteralResultIterationPlan subPlan = new LiteralResultIterationPlan(tuples, CONTEXT, SelectStatement.SELECT_ONE, - TableRef.EMPTY_TABLE_REF, RowProjector.EMPTY_PROJECTOR, null, null, OrderBy.EMPTY_ORDER_BY, null); - LiteralExpression dummy = LiteralExpression.newConstant(null, arrayType); - RowKeyValueAccessor accessor = new RowKeyValueAccessor(Arrays.asList(dummy), 0); - UnnestArrayPlan plan = new UnnestArrayPlan(subPlan, new RowKeyColumnExpression(dummy, accessor), withOrdinality); - PName colName = PNameFactory.newName("ELEM"); - PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), PNameFactory.newName(VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes(), - HConstants.LATEST_TIMESTAMP); - colName = PNameFactory.newName("IDX"); - PColumn indexColumn = withOrdinality ? new PColumnImpl(colName, PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes(), - HConstants.LATEST_TIMESTAMP) : null; - List columns = withOrdinality ? Arrays.asList(elemColumn, indexColumn) : Arrays.asList(elemColumn); - ProjectedColumnExpression elemExpr = new ProjectedColumnExpression(elemColumn, columns, 0, elemColumn.getName().getString()); - ProjectedColumnExpression indexExpr = withOrdinality ? new ProjectedColumnExpression(indexColumn, columns, 1, indexColumn.getName().getString()) : null; - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ResultIterator iterator = plan.iterator(); - for (Object[] o : flatten(arrays)) { - Tuple tuple = iterator.next(); - assertNotNull(tuple); - assertTrue(elemExpr.evaluate(tuple, ptr)); - Object elem = baseType.toObject(ptr); - assertEquals(o[0], elem); - if (withOrdinality) { - assertTrue(indexExpr.evaluate(tuple, ptr)); - Object index = PInteger.INSTANCE.toObject(ptr); - assertEquals(o[1], index); - } - } - assertNull(iterator.next()); + } + + @Test + public void testUnnestIntegerArrays() throws Exception { + testUnnestArrays(PIntegerArray.INSTANCE, + Arrays.asList(new Object[] { 1, 10 }, new Object[] { 2, 20 }), false); + } + + @Test + public void testUnnestIntegerArraysWithOrdinality() throws Exception { + testUnnestArrays(PIntegerArray.INSTANCE, + Arrays.asList(new Object[] { 1, 10 }, new Object[] { 2, 20 }), true); + } + + @Test + public void testUnnestVarcharArrays() throws Exception { + testUnnestArrays(PVarcharArray.INSTANCE, + Arrays.asList(new Object[] { "1", "10" }, new Object[] { "2", "20" }), false); + } + + @Test + public void testUnnestVarcharArraysWithOrdinality() throws Exception { + testUnnestArrays(PVarcharArray.INSTANCE, + Arrays.asList(new Object[] { "1", "10" }, new Object[] { "2", "20" }), true); + } + + @Test + public void testUnnestEmptyArrays() throws Exception { + testUnnestArrays(PIntegerArray.INSTANCE, + Arrays.asList(new Object[] { 1, 10 }, new Object[] {}, new Object[] { 2, 20 }), false); + } + + @Test + public void testUnnestEmptyArraysWithOrdinality() throws Exception { + testUnnestArrays(PIntegerArray.INSTANCE, + Arrays.asList(new Object[] { 1, 10 }, new Object[] {}, new Object[] { 2, 20 }), true); + } + + private void testUnnestArrays(PArrayDataType arrayType, List arrays, + boolean withOrdinality) throws Exception { + PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE); + List tuples = toTuples(arrayType, arrays); + LiteralResultIterationPlan subPlan = new LiteralResultIterationPlan(tuples, CONTEXT, + SelectStatement.SELECT_ONE, TableRef.EMPTY_TABLE_REF, RowProjector.EMPTY_PROJECTOR, null, + null, OrderBy.EMPTY_ORDER_BY, null); + LiteralExpression dummy = LiteralExpression.newConstant(null, arrayType); + RowKeyValueAccessor accessor = new RowKeyValueAccessor(Arrays.asList(dummy), 0); + UnnestArrayPlan plan = + new UnnestArrayPlan(subPlan, new RowKeyColumnExpression(dummy, accessor), withOrdinality); + PName colName = PNameFactory.newName("ELEM"); + PColumn elemColumn = + new PColumnImpl(PNameFactory.newName("ELEM"), PNameFactory.newName(VALUE_COLUMN_FAMILY), + baseType, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, + colName.getBytes(), HConstants.LATEST_TIMESTAMP); + colName = PNameFactory.newName("IDX"); + PColumn indexColumn = withOrdinality + ? new PColumnImpl(colName, PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, + null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, + colName.getBytes(), HConstants.LATEST_TIMESTAMP) + : null; + List columns = + withOrdinality ? Arrays.asList(elemColumn, indexColumn) : Arrays.asList(elemColumn); + ProjectedColumnExpression elemExpr = + new ProjectedColumnExpression(elemColumn, columns, 0, elemColumn.getName().getString()); + ProjectedColumnExpression indexExpr = withOrdinality + ? new ProjectedColumnExpression(indexColumn, columns, 1, indexColumn.getName().getString()) + : null; + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ResultIterator iterator = plan.iterator(); + for (Object[] o : flatten(arrays)) { + Tuple tuple = iterator.next(); + assertNotNull(tuple); + assertTrue(elemExpr.evaluate(tuple, ptr)); + Object elem = baseType.toObject(ptr); + assertEquals(o[0], elem); + if (withOrdinality) { + assertTrue(indexExpr.evaluate(tuple, ptr)); + Object index = PInteger.INSTANCE.toObject(ptr); + assertEquals(o[1], index); + } } - - private List flatten(List arrays) { - List ret = Lists.newArrayList(); - for (Object[] array : arrays) { - for (int i = 0; i < array.length; i++) { - ret.add(new Object[] {array[i], i + 1}); - } - } - return ret; + assertNull(iterator.next()); + } + + private List flatten(List arrays) { + List ret = Lists.newArrayList(); + for (Object[] array : arrays) { + for (int i = 0; i < array.length; i++) { + ret.add(new Object[] { array[i], i + 1 }); + } } - - private List toTuples(PArrayDataType arrayType, List arrays) { - List tuples = Lists.newArrayListWithExpectedSize(arrays.size()); - PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE); - for (Object[] array : arrays) { - PhoenixArray pArray = new PhoenixArray(baseType, array); - byte[] bytes = arrayType.toBytes(pArray); - tuples.add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0, Cell.Type.Put))); - } - - return tuples; + return ret; + } + + private List toTuples(PArrayDataType arrayType, List arrays) { + List tuples = Lists.newArrayListWithExpectedSize(arrays.size()); + PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE); + for (Object[] array : arrays) { + PhoenixArray pArray = new PhoenixArray(baseType, array); + byte[] bytes = arrayType.toBytes(pArray); + tuples.add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(bytes, 0, bytes.length, + bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0, Cell.Type.Put))); } + + return tuples; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/AbsFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/AbsFunctionTest.java index dd7f67759d7..6279fa3545d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/AbsFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/AbsFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -43,138 +43,134 @@ import org.apache.phoenix.schema.types.PUnsignedLong; import org.apache.phoenix.schema.types.PUnsignedSmallint; import org.apache.phoenix.schema.types.PUnsignedTinyint; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * Unit tests for {@link AbsFunction} */ public class AbsFunctionTest { - private static void testExpression(LiteralExpression literal, Number expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) literal); - Expression absFunction = new AbsFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - absFunction.evaluate(null, ptr); - Number result = - (Number) absFunction.getDataType().toObject(ptr, absFunction.getSortOrder()); - assertTrue(result.getClass().equals(expected.getClass())); - if (result instanceof BigDecimal) { - assertTrue(((BigDecimal) result).compareTo((BigDecimal) expected) == 0); - } else { - assertTrue(result.equals(expected)); - } - } - - private static void test(Number value, PNumericType dataType, Number expected) - throws SQLException { - LiteralExpression literal; - literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - testExpression(literal, expected); - literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - testExpression(literal, expected); - } - - private static void - testBatch(Number[] value, PNumericType dataType, ArrayList expected) - throws SQLException { - assertEquals(value.length, expected.size()); - for (int i = 0; i < value.length; ++i) { - test(value[i], dataType, expected.get(i)); - } + private static void testExpression(LiteralExpression literal, Number expected) + throws SQLException { + List expressions = Lists.newArrayList((Expression) literal); + Expression absFunction = new AbsFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + absFunction.evaluate(null, ptr); + Number result = (Number) absFunction.getDataType().toObject(ptr, absFunction.getSortOrder()); + assertTrue(result.getClass().equals(expected.getClass())); + if (result instanceof BigDecimal) { + assertTrue(((BigDecimal) result).compareTo((BigDecimal) expected) == 0); + } else { + assertTrue(result.equals(expected)); } - - @Test - public void testAbsFunction() throws Exception { - Random random = new Random(); - Number[] value; - ArrayList expected = new ArrayList(); - value = new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), - BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), - BigDecimal.valueOf(-123.1234) }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add(((BigDecimal) value[i]).abs()); - testBatch(value, PDecimal.INSTANCE, expected); - - value = new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, Float.MIN_VALUE, - Float.MAX_VALUE, -Float.MIN_VALUE, -Float.MAX_VALUE, random.nextFloat(), - random.nextFloat(), random.nextFloat() }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add(Math.abs((Float) value[i])); - testBatch(value, PFloat.INSTANCE, expected); - - value = new Float[] { 1.0f, 0.0f, 123.1234f, Float.MIN_VALUE, Float.MAX_VALUE, }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add(Math.abs((Float) value[i])); - testBatch(value, PUnsignedFloat.INSTANCE, expected); - - value = new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, Double.MIN_VALUE, - Double.MAX_VALUE, -Double.MIN_VALUE, -Double.MAX_VALUE, - random.nextDouble(), random.nextDouble(), random.nextDouble() }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add(Math.abs((Double) value[i])); - testBatch(value, PDouble.INSTANCE, expected); - - value = new Double[] { 1.0, 0.0, 123.1234, Double.MIN_VALUE, Double.MAX_VALUE, }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add(Math.abs((Double) value[i])); - testBatch(value, PUnsignedDouble.INSTANCE, expected); - - value = new Long[] { 1L, 0L, -1L, 123L, -123L, Long.MIN_VALUE + 1, Long.MAX_VALUE, - random.nextLong(), random.nextLong(), random.nextLong(), }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add(Math.abs((Long) value[i])); - testBatch(value, PLong.INSTANCE, expected); - - value = new Long[] { 1L, 0L, 123L, Long.MAX_VALUE }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add(Math.abs((Long) value[i])); - testBatch(value, PUnsignedLong.INSTANCE, expected); - - value = new Integer[] { 1, 0, -1, 123, -123, Integer.MIN_VALUE + 1, Integer.MAX_VALUE, - random.nextInt(), random.nextInt(), random.nextInt(), }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add(Math.abs((Integer) value[i])); - testBatch(value, PInteger.INSTANCE, expected); - - value = new Integer[] { 1, 0, 123, Integer.MAX_VALUE }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add(Math.abs((Integer) value[i])); - testBatch(value, PUnsignedInt.INSTANCE, expected); - - value = new Short[] { 1, 0, -1, 123, -123, Short.MIN_VALUE + 1, Short.MAX_VALUE }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add((short) Math.abs((Short) value[i])); - testBatch(value, PSmallint.INSTANCE, expected); - - value = new Short[] { 1, 0, 123, Short.MAX_VALUE }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add((short) Math.abs((Short) value[i])); - testBatch(value, PUnsignedSmallint.INSTANCE, expected); - - value = new Byte[] { 1, 0, -1, 123, -123, Byte.MIN_VALUE + 1, Byte.MAX_VALUE }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add((byte) Math.abs((Byte) value[i])); - testBatch(value, PTinyint.INSTANCE, expected); - - value = new Byte[] { 1, 0, 123, Byte.MAX_VALUE }; - expected.clear(); - for (int i = 0; i < value.length; ++i) - expected.add((byte) Math.abs((Byte) value[i])); - testBatch(value, PUnsignedTinyint.INSTANCE, expected); + } + + private static void test(Number value, PNumericType dataType, Number expected) + throws SQLException { + LiteralExpression literal; + literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + testExpression(literal, expected); + literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + testExpression(literal, expected); + } + + private static void testBatch(Number[] value, PNumericType dataType, ArrayList expected) + throws SQLException { + assertEquals(value.length, expected.size()); + for (int i = 0; i < value.length; ++i) { + test(value[i], dataType, expected.get(i)); } + } + + @Test + public void testAbsFunction() throws Exception { + Random random = new Random(); + Number[] value; + ArrayList expected = new ArrayList(); + value = new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), + BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), BigDecimal.valueOf(-123.1234) }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add(((BigDecimal) value[i]).abs()); + testBatch(value, PDecimal.INSTANCE, expected); + + value = new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, Float.MIN_VALUE, + Float.MAX_VALUE, -Float.MIN_VALUE, -Float.MAX_VALUE, random.nextFloat(), random.nextFloat(), + random.nextFloat() }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add(Math.abs((Float) value[i])); + testBatch(value, PFloat.INSTANCE, expected); + + value = new Float[] { 1.0f, 0.0f, 123.1234f, Float.MIN_VALUE, Float.MAX_VALUE, }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add(Math.abs((Float) value[i])); + testBatch(value, PUnsignedFloat.INSTANCE, expected); + + value = new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, Double.MIN_VALUE, Double.MAX_VALUE, + -Double.MIN_VALUE, -Double.MAX_VALUE, random.nextDouble(), random.nextDouble(), + random.nextDouble() }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add(Math.abs((Double) value[i])); + testBatch(value, PDouble.INSTANCE, expected); + + value = new Double[] { 1.0, 0.0, 123.1234, Double.MIN_VALUE, Double.MAX_VALUE, }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add(Math.abs((Double) value[i])); + testBatch(value, PUnsignedDouble.INSTANCE, expected); + + value = new Long[] { 1L, 0L, -1L, 123L, -123L, Long.MIN_VALUE + 1, Long.MAX_VALUE, + random.nextLong(), random.nextLong(), random.nextLong(), }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add(Math.abs((Long) value[i])); + testBatch(value, PLong.INSTANCE, expected); + + value = new Long[] { 1L, 0L, 123L, Long.MAX_VALUE }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add(Math.abs((Long) value[i])); + testBatch(value, PUnsignedLong.INSTANCE, expected); + + value = new Integer[] { 1, 0, -1, 123, -123, Integer.MIN_VALUE + 1, Integer.MAX_VALUE, + random.nextInt(), random.nextInt(), random.nextInt(), }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add(Math.abs((Integer) value[i])); + testBatch(value, PInteger.INSTANCE, expected); + + value = new Integer[] { 1, 0, 123, Integer.MAX_VALUE }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add(Math.abs((Integer) value[i])); + testBatch(value, PUnsignedInt.INSTANCE, expected); + + value = new Short[] { 1, 0, -1, 123, -123, Short.MIN_VALUE + 1, Short.MAX_VALUE }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add((short) Math.abs((Short) value[i])); + testBatch(value, PSmallint.INSTANCE, expected); + + value = new Short[] { 1, 0, 123, Short.MAX_VALUE }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add((short) Math.abs((Short) value[i])); + testBatch(value, PUnsignedSmallint.INSTANCE, expected); + + value = new Byte[] { 1, 0, -1, 123, -123, Byte.MIN_VALUE + 1, Byte.MAX_VALUE }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add((byte) Math.abs((Byte) value[i])); + testBatch(value, PTinyint.INSTANCE, expected); + + value = new Byte[] { 1, 0, 123, Byte.MAX_VALUE }; + expected.clear(); + for (int i = 0; i < value.length; ++i) + expected.add((byte) Math.abs((Byte) value[i])); + testBatch(value, PUnsignedTinyint.INSTANCE, expected); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java index b2375fe4edd..5fe9bb8b1a1 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,16 @@ */ package org.apache.phoenix.expression; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.Collections; + import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellBuilderFactory; +import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.query.QueryConstants; @@ -33,283 +40,284 @@ import org.apache.phoenix.schema.types.PDataType; import org.junit.Test; -import java.util.Arrays; -import java.util.Collections; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - public class AndExpressionTest { - private AndExpression createAnd(Expression lhs, Expression rhs) { - return new AndExpression(Arrays.asList(lhs, rhs)); + private AndExpression createAnd(Expression lhs, Expression rhs) { + return new AndExpression(Arrays.asList(lhs, rhs)); + } + + private AndExpression createAnd(Boolean x, Boolean y) { + return createAnd(LiteralExpression.newConstant(x), LiteralExpression.newConstant(y)); + } + + private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean rhs) { + AndExpression and = createAnd(lhs, rhs); + ImmutableBytesWritable out = new ImmutableBytesWritable(); + MultiKeyValueTuple tuple = new MultiKeyValueTuple(); + boolean success = and.evaluate(tuple, out); + assertTrue(success); + assertEquals(expected, PBoolean.INSTANCE.toObject(out)); + } + + // Evaluating AND when values of both sides are known should immediately succeed + // and return the same result regardless of order. + private void testImmediate(Boolean expected, Boolean a, Boolean b) { + testImmediateSingle(expected, a, b); + testImmediateSingle(expected, b, a); + } + + private PColumn pcolumn(final String name) { + return new PBaseColumn() { + @Override + public PName getName() { + return PNameFactory.newName(name); + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } + + @Override + public PName getFamilyName() { + return PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY); + } + + @Override + public int getPosition() { + return 0; + } + + @Override + public Integer getArraySize() { + return null; + } + + @Override + public byte[] getViewConstant() { + return new byte[0]; + } + + @Override + public boolean isViewReferenced() { + return false; + } + + @Override + public String getExpressionStr() { + return null; + } + + @Override + public boolean isRowTimestamp() { + return false; + } + + @Override + public boolean isDynamic() { + return false; + } + + @Override + public byte[] getColumnQualifierBytes() { + return null; + } + + @Override + public long getTimestamp() { + return 0; + } + + @Override + public boolean isDerived() { + return false; + } + + @Override + public boolean isExcluded() { + return false; + } + + @Override + public SortOrder getSortOrder() { + return null; + } + }; + } + + private KeyValueColumnExpression kvExpr(final String name) { + return new KeyValueColumnExpression(pcolumn(name)); + } + + private Cell createCell(String name, Boolean value) { + byte[] valueBytes = value == null ? null : value ? PBoolean.TRUE_BYTES : PBoolean.FALSE_BYTES; + return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("row")) + .setFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).setQualifier(Bytes.toBytes(name)) + .setTimestamp(1).setType(Cell.Type.Put).setValue(valueBytes).build(); + } + + private void testPartialOneSideFirst(Boolean expected, Boolean lhs, Boolean rhs, + boolean leftFirst) { + KeyValueColumnExpression lhsExpr = kvExpr("LHS"); + KeyValueColumnExpression rhsExpr = kvExpr("RHS"); + AndExpression and = createAnd(lhsExpr, rhsExpr); + MultiKeyValueTuple tuple = new MultiKeyValueTuple(Collections. emptyList()); + ImmutableBytesWritable out = new ImmutableBytesWritable(); + + // with no data available, should fail + boolean success = and.evaluate(tuple, out); + assertFalse(success); + + // with 1 datum available, should fail + if (leftFirst) { + tuple.setKeyValues(Collections.singletonList(createCell("LHS", lhs))); + } else { + tuple.setKeyValues(Collections.singletonList(createCell("RHS", rhs))); } - - private AndExpression createAnd(Boolean x, Boolean y) { - return createAnd(LiteralExpression.newConstant(x), LiteralExpression.newConstant(y)); + success = and.evaluate(tuple, out); + assertFalse(success); + + // with 2 data available, should succeed + tuple.setKeyValues(Arrays.asList(createCell("LHS", lhs), createCell("RHS", rhs))); + success = and.evaluate(tuple, out); + assertTrue(success); + assertEquals(expected, PBoolean.INSTANCE.toObject(out)); + } + + private void testPartialEvaluation(Boolean expected, Boolean x, Boolean y, boolean xFirst) { + testPartialOneSideFirst(expected, x, y, xFirst); + testPartialOneSideFirst(expected, y, x, !xFirst); + } + + private void testShortCircuitOneSideFirst(Boolean expected, Boolean lhs, Boolean rhs, + boolean leftFirst) { + KeyValueColumnExpression lhsExpr = kvExpr("LHS"); + KeyValueColumnExpression rhsExpr = kvExpr("RHS"); + AndExpression and = createAnd(lhsExpr, rhsExpr); + MultiKeyValueTuple tuple = new MultiKeyValueTuple(Collections. emptyList()); + ImmutableBytesWritable out = new ImmutableBytesWritable(); + + // with no data available, should fail + boolean success = and.evaluate(tuple, out); + assertFalse(success); + + // with 1 datum available, should succeed + if (leftFirst) { + tuple.setKeyValues(Collections.singletonList(createCell("LHS", lhs))); + } else { + tuple.setKeyValues(Collections.singletonList(createCell("RHS", rhs))); } - - private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean rhs) { - AndExpression and = createAnd(lhs, rhs); - ImmutableBytesWritable out = new ImmutableBytesWritable(); - MultiKeyValueTuple tuple = new MultiKeyValueTuple(); - boolean success = and.evaluate(tuple, out); - assertTrue(success); - assertEquals(expected, PBoolean.INSTANCE.toObject(out)); - } - - // Evaluating AND when values of both sides are known should immediately succeed - // and return the same result regardless of order. - private void testImmediate(Boolean expected, Boolean a, Boolean b) { - testImmediateSingle(expected, a, b); - testImmediateSingle(expected, b, a); - } - - private PColumn pcolumn(final String name) { - return new PBaseColumn() { - @Override public PName getName() { - return PNameFactory.newName(name); - } - - @Override public PDataType getDataType() { - return PBoolean.INSTANCE; - } - - @Override public PName getFamilyName() { - return PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY); - } - - @Override public int getPosition() { - return 0; - } - - @Override public Integer getArraySize() { - return null; - } - - @Override public byte[] getViewConstant() { - return new byte[0]; - } - - @Override public boolean isViewReferenced() { - return false; - } - - @Override public String getExpressionStr() { - return null; - } - - @Override public boolean isRowTimestamp() { - return false; - } - - @Override public boolean isDynamic() { - return false; - } - - @Override public byte[] getColumnQualifierBytes() { - return null; - } - - @Override public long getTimestamp() { - return 0; - } - - @Override public boolean isDerived() { - return false; - } - - @Override public boolean isExcluded() { - return false; - } - - @Override public SortOrder getSortOrder() { - return null; - } - }; - } - - private KeyValueColumnExpression kvExpr(final String name) { - return new KeyValueColumnExpression(pcolumn(name)); - } - - private Cell createCell(String name, Boolean value) { - byte[] valueBytes = value == null ? null : value ? PBoolean.TRUE_BYTES : PBoolean.FALSE_BYTES; - return CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(Bytes.toBytes("row")) - .setFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES) - .setQualifier(Bytes.toBytes(name)) - .setTimestamp(1) - .setType(Cell.Type.Put) - .setValue(valueBytes) - .build(); - } - - private void testPartialOneSideFirst(Boolean expected, Boolean lhs, Boolean rhs, boolean leftFirst) { - KeyValueColumnExpression lhsExpr = kvExpr("LHS"); - KeyValueColumnExpression rhsExpr = kvExpr("RHS"); - AndExpression and = createAnd(lhsExpr, rhsExpr); - MultiKeyValueTuple tuple = new MultiKeyValueTuple(Collections.emptyList()); - ImmutableBytesWritable out = new ImmutableBytesWritable(); - - // with no data available, should fail - boolean success = and.evaluate(tuple, out); - assertFalse(success); - - // with 1 datum available, should fail - if (leftFirst) { - tuple.setKeyValues(Collections.singletonList(createCell("LHS", lhs))); - } else { - tuple.setKeyValues(Collections.singletonList(createCell("RHS", rhs))); - } - success = and.evaluate(tuple, out); - assertFalse(success); - - // with 2 data available, should succeed - tuple.setKeyValues(Arrays.asList(createCell("LHS", lhs), createCell("RHS", rhs))); - success = and.evaluate(tuple, out); - assertTrue(success); - assertEquals(expected, PBoolean.INSTANCE.toObject(out)); - } - - private void testPartialEvaluation(Boolean expected, Boolean x, Boolean y, boolean xFirst) { - testPartialOneSideFirst(expected, x, y, xFirst); - testPartialOneSideFirst(expected, y, x, !xFirst); - } - - private void testShortCircuitOneSideFirst(Boolean expected, Boolean lhs, Boolean rhs, boolean leftFirst) { - KeyValueColumnExpression lhsExpr = kvExpr("LHS"); - KeyValueColumnExpression rhsExpr = kvExpr("RHS"); - AndExpression and = createAnd(lhsExpr, rhsExpr); - MultiKeyValueTuple tuple = new MultiKeyValueTuple(Collections.emptyList()); - ImmutableBytesWritable out = new ImmutableBytesWritable(); - - // with no data available, should fail - boolean success = and.evaluate(tuple, out); - assertFalse(success); - - // with 1 datum available, should succeed - if (leftFirst) { - tuple.setKeyValues(Collections.singletonList(createCell("LHS", lhs))); - } else { - tuple.setKeyValues(Collections.singletonList(createCell("RHS", rhs))); - } - success = and.evaluate(tuple, out); - assertTrue(success); - assertEquals(expected, PBoolean.INSTANCE.toObject(out)); - } - - - private void testShortCircuit(Boolean expected, Boolean x, Boolean y, boolean xFirst) { - testShortCircuitOneSideFirst(expected, x, y, xFirst); - testShortCircuitOneSideFirst(expected, y, x, !xFirst); - } - - @Test - public void testImmediateCertainty() { - testImmediate(true, true, true); - testImmediate(false, false, true); - testImmediate(false, false, false); - } - - @Test - public void testImmediateUncertainty() { - testImmediate(null, true, null); - testImmediate(false, false, null); - testImmediate(null, null, null); - } - - @Test - public void testPartialCertainty() { - // T AND T = T - // must evaluate both sides, regardless of order - testPartialEvaluation(true, true, true, true); - testPartialEvaluation(true, true, true, false); - - // T AND F = F - // must evaluate both sides if TRUE is evaluated first - testPartialEvaluation(false, true, false, true); - testPartialEvaluation(false, false, true, false); - } - - @Test - public void testPartialUncertainty() { - // T AND NULL = NULL - // must evaluate both sides, regardless of order of values or evaluation - testPartialEvaluation(null, true, null, true); - testPartialEvaluation(null, true, null, false); - testPartialEvaluation(null, null, true, true); - testPartialEvaluation(null, null, true, false); - - // must evaluate both sides if NULL is evaluated first - - // F AND NULL = FALSE - testPartialEvaluation(false, null, false, true); - testPartialEvaluation(false, false, null, false); - - // NULL AND NULL = NULL - testPartialEvaluation(null, null, null, true); - testPartialEvaluation(null, null, null, false); - } - - @Test - public void testShortCircuitCertainty() { - // need only to evaluate one side if FALSE is evaluated first - - // F AND F = F - testShortCircuit(false, false, false, true); - testShortCircuit(false, false, false, false); - - // T AND F = F - testShortCircuit(false, false, true, true); - testShortCircuit(false, true, false, false); - } - - @Test - public void testShortCircuitUncertainty() { - // need only to evaluate one side if FALSE is evaluated first - - // F AND NULL = FALSE - testShortCircuit(false, false, null, true); - testShortCircuit(false, null, false, false); - } - - @Test - public void testTruthTable() { - // See: https://en.wikipedia.org/wiki/Null_(SQL)#Comparisons_with_NULL_and_the_three-valued_logic_(3VL) - Boolean[][] testCases = new Boolean[][] { - // should short circuit? - // X, Y, if X first, if Y first, X AND Y, - { true, true, false, false, true, }, - { true, false, false, true, false, }, - { false, false, true, true, false, }, - { true, null, false, false, null, }, - { false, null, true, false, false, }, - { null, null, false, false, null, }, - }; - - for (Boolean[] testCase : testCases) { - Boolean x = testCase[0]; - Boolean y = testCase[1]; - boolean shouldShortCircuitWhenXEvaluatedFirst = testCase[2]; - boolean shouldShortCircuitWhenYEvaluatedFirst = testCase[3]; - Boolean expected = testCase[4]; - - // test both directions - testImmediate(expected, x, y); - - if (shouldShortCircuitWhenXEvaluatedFirst) { - testShortCircuit(expected, x, y, true); - } else { - testPartialEvaluation(expected, x, y, true); - } - - if (shouldShortCircuitWhenYEvaluatedFirst) { - testShortCircuit(expected, x, y, false); - } else { - testPartialEvaluation(expected, x, y, false); - } - } + success = and.evaluate(tuple, out); + assertTrue(success); + assertEquals(expected, PBoolean.INSTANCE.toObject(out)); + } + + private void testShortCircuit(Boolean expected, Boolean x, Boolean y, boolean xFirst) { + testShortCircuitOneSideFirst(expected, x, y, xFirst); + testShortCircuitOneSideFirst(expected, y, x, !xFirst); + } + + @Test + public void testImmediateCertainty() { + testImmediate(true, true, true); + testImmediate(false, false, true); + testImmediate(false, false, false); + } + + @Test + public void testImmediateUncertainty() { + testImmediate(null, true, null); + testImmediate(false, false, null); + testImmediate(null, null, null); + } + + @Test + public void testPartialCertainty() { + // T AND T = T + // must evaluate both sides, regardless of order + testPartialEvaluation(true, true, true, true); + testPartialEvaluation(true, true, true, false); + + // T AND F = F + // must evaluate both sides if TRUE is evaluated first + testPartialEvaluation(false, true, false, true); + testPartialEvaluation(false, false, true, false); + } + + @Test + public void testPartialUncertainty() { + // T AND NULL = NULL + // must evaluate both sides, regardless of order of values or evaluation + testPartialEvaluation(null, true, null, true); + testPartialEvaluation(null, true, null, false); + testPartialEvaluation(null, null, true, true); + testPartialEvaluation(null, null, true, false); + + // must evaluate both sides if NULL is evaluated first + + // F AND NULL = FALSE + testPartialEvaluation(false, null, false, true); + testPartialEvaluation(false, false, null, false); + + // NULL AND NULL = NULL + testPartialEvaluation(null, null, null, true); + testPartialEvaluation(null, null, null, false); + } + + @Test + public void testShortCircuitCertainty() { + // need only to evaluate one side if FALSE is evaluated first + + // F AND F = F + testShortCircuit(false, false, false, true); + testShortCircuit(false, false, false, false); + + // T AND F = F + testShortCircuit(false, false, true, true); + testShortCircuit(false, true, false, false); + } + + @Test + public void testShortCircuitUncertainty() { + // need only to evaluate one side if FALSE is evaluated first + + // F AND NULL = FALSE + testShortCircuit(false, false, null, true); + testShortCircuit(false, null, false, false); + } + + @Test + public void testTruthTable() { + // See: + // https://en.wikipedia.org/wiki/Null_(SQL)#Comparisons_with_NULL_and_the_three-valued_logic_(3VL) + Boolean[][] testCases = new Boolean[][] { + // should short circuit? + // X, Y, if X first, if Y first, X AND Y, + { true, true, false, false, true, }, { true, false, false, true, false, }, + { false, false, true, true, false, }, { true, null, false, false, null, }, + { false, null, true, false, false, }, { null, null, false, false, null, }, }; + + for (Boolean[] testCase : testCases) { + Boolean x = testCase[0]; + Boolean y = testCase[1]; + boolean shouldShortCircuitWhenXEvaluatedFirst = testCase[2]; + boolean shouldShortCircuitWhenYEvaluatedFirst = testCase[3]; + Boolean expected = testCase[4]; + + // test both directions + testImmediate(expected, x, y); + + if (shouldShortCircuitWhenXEvaluatedFirst) { + testShortCircuit(expected, x, y, true); + } else { + testPartialEvaluation(expected, x, y, true); + } + + if (shouldShortCircuitWhenYEvaluatedFirst) { + testShortCircuit(expected, x, y, false); + } else { + testPartialEvaluation(expected, x, y, false); + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArithmeticOperationTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArithmeticOperationTest.java index 1b830f2b467..78c4e5c33b6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArithmeticOperationTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArithmeticOperationTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,268 +36,284 @@ import org.apache.phoenix.schema.types.PInteger; import org.junit.Test; - public class ArithmeticOperationTest { - // Addition - // result scale should be: max(ls, rs) - // result precision should be: max(lp - ls, rp - rs) + 1 + max(ls, rs) - @Test - public void testDecimalAddition() throws Exception { - LiteralExpression op1, op2, op3; - List children; - DecimalAddExpression e; - - op1 = LiteralExpression.newConstant(new BigDecimal("1234567890123456789012345678901"), PDecimal.INSTANCE, 31, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); - children = Arrays.asList(op1, op2); - e = new DecimalAddExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1234567890123456789012345691246")); - - op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); - children = Arrays.asList(op1, op2); - e = new DecimalAddExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("12468.45")); - - // Exceeds precision. - op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), PDecimal.INSTANCE, 38, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("123"), PDecimal.INSTANCE, 3, 0); - children = Arrays.asList(op1, op2); - e = new DecimalAddExpression(children); - try { - e.evaluate(null, new ImmutableBytesWritable()); - fail("Evaluation should have failed"); - } catch (DataExceedsCapacityException ex) { - } - - // Pass since we roll out imposing precisioin and scale. - op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), PDecimal.INSTANCE, 38, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("123"), PDecimal.INSTANCE, 3, 0); - op3 = LiteralExpression.newConstant(new BigDecimal("-123"), PDecimal.INSTANCE, 3, 0); - children = Arrays.asList(op1, op2, op3); - e = new DecimalAddExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("99999999999999999999999999999999999999")); - - // Exceeds scale. - op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), PDecimal.INSTANCE, 38, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); - children = Arrays.asList(op1, op2); - e = new DecimalAddExpression(children); - try { - e.evaluate(null, new ImmutableBytesWritable()); - fail("Evaluation should have failed"); - } catch (DataExceedsCapacityException ex) { - } - - // Decimal with no precision and scale. - op1 = LiteralExpression.newConstant(new BigDecimal("9999.1"), PDecimal.INSTANCE); - op2 = LiteralExpression.newConstant(new BigDecimal("1.1111"), PDecimal.INSTANCE, 5, 4); - children = Arrays.asList(op1, op2); - e = new DecimalAddExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("10000.2111")); + // Addition + // result scale should be: max(ls, rs) + // result precision should be: max(lp - ls, rp - rs) + 1 + max(ls, rs) + @Test + public void testDecimalAddition() throws Exception { + LiteralExpression op1, op2, op3; + List children; + DecimalAddExpression e; + + op1 = LiteralExpression.newConstant(new BigDecimal("1234567890123456789012345678901"), + PDecimal.INSTANCE, 31, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); + children = Arrays. asList(op1, op2); + e = new DecimalAddExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1234567890123456789012345691246")); + + op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); + children = Arrays. asList(op1, op2); + e = new DecimalAddExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("12468.45")); + + // Exceeds precision. + op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), + PDecimal.INSTANCE, 38, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("123"), PDecimal.INSTANCE, 3, 0); + children = Arrays. asList(op1, op2); + e = new DecimalAddExpression(children); + try { + e.evaluate(null, new ImmutableBytesWritable()); + fail("Evaluation should have failed"); + } catch (DataExceedsCapacityException ex) { } - @Test - public void testIntPlusDecimal() throws Exception { - LiteralExpression op1, op2; - List children; - DecimalAddExpression e; - - op1 = LiteralExpression.newConstant(new BigDecimal("1234.111"), PDecimal.INSTANCE); - assertNull(op1.getScale()); - op2 = LiteralExpression.newConstant(1, PInteger.INSTANCE); - children = Arrays.asList(op1, op2); - e = new DecimalAddExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1235.111")); + // Pass since we roll out imposing precisioin and scale. + op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), + PDecimal.INSTANCE, 38, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("123"), PDecimal.INSTANCE, 3, 0); + op3 = LiteralExpression.newConstant(new BigDecimal("-123"), PDecimal.INSTANCE, 3, 0); + children = Arrays. asList(op1, op2, op3); + e = new DecimalAddExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, + new BigDecimal("99999999999999999999999999999999999999")); + + // Exceeds scale. + op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), + PDecimal.INSTANCE, 38, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); + children = Arrays. asList(op1, op2); + e = new DecimalAddExpression(children); + try { + e.evaluate(null, new ImmutableBytesWritable()); + fail("Evaluation should have failed"); + } catch (DataExceedsCapacityException ex) { } - // Subtraction - // result scale should be: max(ls, rs) - // result precision should be: max(lp - ls, rp - rs) + 1 + max(ls, rs) - @Test - public void testDecimalSubtraction() throws Exception { - LiteralExpression op1, op2, op3; - List children; - DecimalSubtractExpression e; - - op1 = LiteralExpression.newConstant(new BigDecimal("1234567890123456789012345678901"), PDecimal.INSTANCE, 31, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); - children = Arrays.asList(op1, op2); - e = new DecimalSubtractExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1234567890123456789012345666556")); - - op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); - children = Arrays.asList(op1, op2); - e = new DecimalSubtractExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("12221.55")); - - // Excceds precision - op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), PDecimal.INSTANCE, 38, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("-123"), PDecimal.INSTANCE, 3, 0); - children = Arrays.asList(op1, op2); - e = new DecimalSubtractExpression(children); - try { - e.evaluate(null, new ImmutableBytesWritable()); - fail("Evaluation should have failed"); - } catch (DataExceedsCapacityException ex) { - } - - // Pass since we roll up precision and scale imposing. - op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), PDecimal.INSTANCE, 38, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("-123"), PDecimal.INSTANCE, 3, 0); - op3 = LiteralExpression.newConstant(new BigDecimal("123"), PDecimal.INSTANCE, 3, 0); - children = Arrays.asList(op1, op2, op3); - e = new DecimalSubtractExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("99999999999999999999999999999999999999")); - - // Exceeds scale. - op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), PDecimal.INSTANCE, 38, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); - children = Arrays.asList(op1, op2); - e = new DecimalSubtractExpression(children); - try { - e.evaluate(null, new ImmutableBytesWritable()); - fail("Evaluation should have failed"); - } catch (DataExceedsCapacityException ex) { - } - - // Decimal with no precision and scale. - op1 = LiteralExpression.newConstant(new BigDecimal("1111.1"), PDecimal.INSTANCE); - op2 = LiteralExpression.newConstant(new BigDecimal("1.1111"), PDecimal.INSTANCE, 5, 4); - children = Arrays.asList(op1, op2); - e = new DecimalSubtractExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1109.9889")); + // Decimal with no precision and scale. + op1 = LiteralExpression.newConstant(new BigDecimal("9999.1"), PDecimal.INSTANCE); + op2 = LiteralExpression.newConstant(new BigDecimal("1.1111"), PDecimal.INSTANCE, 5, 4); + children = Arrays. asList(op1, op2); + e = new DecimalAddExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("10000.2111")); + } + + @Test + public void testIntPlusDecimal() throws Exception { + LiteralExpression op1, op2; + List children; + DecimalAddExpression e; + + op1 = LiteralExpression.newConstant(new BigDecimal("1234.111"), PDecimal.INSTANCE); + assertNull(op1.getScale()); + op2 = LiteralExpression.newConstant(1, PInteger.INSTANCE); + children = Arrays. asList(op1, op2); + e = new DecimalAddExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1235.111")); + } + + // Subtraction + // result scale should be: max(ls, rs) + // result precision should be: max(lp - ls, rp - rs) + 1 + max(ls, rs) + @Test + public void testDecimalSubtraction() throws Exception { + LiteralExpression op1, op2, op3; + List children; + DecimalSubtractExpression e; + + op1 = LiteralExpression.newConstant(new BigDecimal("1234567890123456789012345678901"), + PDecimal.INSTANCE, 31, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); + children = Arrays. asList(op1, op2); + e = new DecimalSubtractExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1234567890123456789012345666556")); + + op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); + children = Arrays. asList(op1, op2); + e = new DecimalSubtractExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("12221.55")); + + // Excceds precision + op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), + PDecimal.INSTANCE, 38, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("-123"), PDecimal.INSTANCE, 3, 0); + children = Arrays. asList(op1, op2); + e = new DecimalSubtractExpression(children); + try { + e.evaluate(null, new ImmutableBytesWritable()); + fail("Evaluation should have failed"); + } catch (DataExceedsCapacityException ex) { } - // Multiplication - // result scale should be: ls + rs - // result precision should be: lp + rp - @Test - public void testDecimalMultiplication() throws Exception { - LiteralExpression op1, op2; - List children; - DecimalMultiplyExpression e; - - op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); - children = Arrays.asList(op1, op2); - e = new DecimalMultiplyExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1523990.25")); - - // Value too big, exceeds precision. - op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), PDecimal.INSTANCE, 38, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); - children = Arrays.asList(op1, op2); - e = new DecimalMultiplyExpression(children); - try { - e.evaluate(null, new ImmutableBytesWritable()); - fail("Evaluation should have failed"); - } catch (DataExceedsCapacityException ex) { - } - - // Values exceeds scale. - op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), PDecimal.INSTANCE, 38, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("1.45"), PDecimal.INSTANCE, 3, 2); - children = Arrays.asList(op1, op2); - e = new DecimalMultiplyExpression(children); - try { - e.evaluate(null, new ImmutableBytesWritable()); - fail("Evaluation should have failed"); - } catch (DataExceedsCapacityException ex) { - } - - // Decimal with no precision and scale. - op1 = LiteralExpression.newConstant(new BigDecimal("1111.1"), PDecimal.INSTANCE); - assertNull(op1.getScale()); - op2 = LiteralExpression.newConstant(new BigDecimal("1.1111"), PDecimal.INSTANCE, 5, 4); - children = Arrays.asList(op1, op2); - e = new DecimalMultiplyExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1234.54321")); + // Pass since we roll up precision and scale imposing. + op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), + PDecimal.INSTANCE, 38, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("-123"), PDecimal.INSTANCE, 3, 0); + op3 = LiteralExpression.newConstant(new BigDecimal("123"), PDecimal.INSTANCE, 3, 0); + children = Arrays. asList(op1, op2, op3); + e = new DecimalSubtractExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, + new BigDecimal("99999999999999999999999999999999999999")); + + // Exceeds scale. + op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), + PDecimal.INSTANCE, 38, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); + children = Arrays. asList(op1, op2); + e = new DecimalSubtractExpression(children); + try { + e.evaluate(null, new ImmutableBytesWritable()); + fail("Evaluation should have failed"); + } catch (DataExceedsCapacityException ex) { } - // Division - // result scale should be: 31 - lp + ls - rs - // result precision should be: lp - ls + rp + scale - @Test - public void testDecimalDivision() throws Exception { - LiteralExpression op1, op2; - List children; - DecimalDivideExpression e; - - // The value should be 1234500.0000...00 because we set to scale to be 24. However, in - // PhoenixResultSet.getBigDecimal, the case to (BigDecimal) actually cause the scale to be eradicated. As - // a result, the resulting value does not have the right form. - op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("0.01"), PDecimal.INSTANCE, 2, 2); - children = Arrays.asList(op1, op2); - e = new DecimalDivideExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1.2345E+6")); - - // Exceeds precision. - op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), PDecimal.INSTANCE, 38, 0); - op2 = LiteralExpression.newConstant(new BigDecimal("0.01"), PDecimal.INSTANCE, 2, 2); - children = Arrays.asList(op1, op2); - e = new DecimalDivideExpression(children); - try { - e.evaluate(null, new ImmutableBytesWritable()); - fail("Evaluation should have failed"); - } catch (DataExceedsCapacityException ex) { - } - - // Decimal with no precision and scale. - op1 = LiteralExpression.newConstant(new BigDecimal("10"), PDecimal.INSTANCE); - op2 = LiteralExpression.newConstant(new BigDecimal("3"), PDecimal.INSTANCE, 5, 4); - assertEquals(Integer.valueOf(4),op2.getScale()); - children = Arrays.asList(op1, op2); - e = new DecimalDivideExpression(children); - assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("3.3333333333333333333333333333333333333")); + // Decimal with no precision and scale. + op1 = LiteralExpression.newConstant(new BigDecimal("1111.1"), PDecimal.INSTANCE); + op2 = LiteralExpression.newConstant(new BigDecimal("1.1111"), PDecimal.INSTANCE, 5, 4); + children = Arrays. asList(op1, op2); + e = new DecimalSubtractExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1109.9889")); + } + + // Multiplication + // result scale should be: ls + rs + // result precision should be: lp + rp + @Test + public void testDecimalMultiplication() throws Exception { + LiteralExpression op1, op2; + List children; + DecimalMultiplyExpression e; + + op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2); + children = Arrays. asList(op1, op2); + e = new DecimalMultiplyExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1523990.25")); + + // Value too big, exceeds precision. + op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), + PDecimal.INSTANCE, 38, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); + children = Arrays. asList(op1, op2); + e = new DecimalMultiplyExpression(children); + try { + e.evaluate(null, new ImmutableBytesWritable()); + fail("Evaluation should have failed"); + } catch (DataExceedsCapacityException ex) { } - @Test - public void testPerInvocationClone() throws Exception { - LiteralExpression op1, op2, op3, op4; - List children; - Expression e1, e2, e3, e4; - ImmutableBytesWritable ptr1 = new ImmutableBytesWritable(); - ImmutableBytesWritable ptr2 = new ImmutableBytesWritable(); - - op1 = LiteralExpression.newConstant(5.0); - op2 = LiteralExpression.newConstant(3.0); - op3 = LiteralExpression.newConstant(2.0); - op4 = LiteralExpression.newConstant(1.0); - children = Arrays.asList(op1, op2); - e1 = new DoubleAddExpression(children); - children = Arrays.asList(op3, op4); - e2 = new DoubleSubtractExpression(children); - e3 = new DoubleAddExpression(Arrays.asList(e1, e2)); - e4 = new DoubleAddExpression(Arrays.asList(new RandomFunction(Arrays.asList(LiteralExpression.newConstant(null))), e3)); - CloneExpressionVisitor visitor = new CloneExpressionVisitor(); - Expression clone = e4.accept(visitor); - assertTrue(clone != e4); - e4.evaluate(null, ptr1); - clone.evaluate(null, ptr2); - assertNotEquals(ptr1, ptr2); - - e4 = new DoubleAddExpression(Arrays.asList(new RandomFunction(Arrays.asList(LiteralExpression.newConstant(1))), e3)); - visitor = new CloneExpressionVisitor(); - clone = e4.accept(visitor); - assertTrue(clone == e4); - e4.evaluate(null, ptr1); - clone.evaluate(null, ptr2); - assertEquals(ptr1, ptr2); + // Values exceeds scale. + op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), + PDecimal.INSTANCE, 38, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("1.45"), PDecimal.INSTANCE, 3, 2); + children = Arrays. asList(op1, op2); + e = new DecimalMultiplyExpression(children); + try { + e.evaluate(null, new ImmutableBytesWritable()); + fail("Evaluation should have failed"); + } catch (DataExceedsCapacityException ex) { } - private static void assertEqualValue(Expression e, PDataType type, Object value) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean evaluated = e.evaluate(null, ptr); - assertTrue(evaluated); - assertEquals(value, type.toObject(ptr.get())); - CloneExpressionVisitor visitor = new CloneExpressionVisitor(); - Expression clone = e.accept(visitor); - evaluated = clone.evaluate(null, ptr); - assertTrue(evaluated); - assertEquals(value, type.toObject(ptr.get())); + // Decimal with no precision and scale. + op1 = LiteralExpression.newConstant(new BigDecimal("1111.1"), PDecimal.INSTANCE); + assertNull(op1.getScale()); + op2 = LiteralExpression.newConstant(new BigDecimal("1.1111"), PDecimal.INSTANCE, 5, 4); + children = Arrays. asList(op1, op2); + e = new DecimalMultiplyExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1234.54321")); + } + + // Division + // result scale should be: 31 - lp + ls - rs + // result precision should be: lp - ls + rp + scale + @Test + public void testDecimalDivision() throws Exception { + LiteralExpression op1, op2; + List children; + DecimalDivideExpression e; + + // The value should be 1234500.0000...00 because we set to scale to be 24. However, in + // PhoenixResultSet.getBigDecimal, the case to (BigDecimal) actually cause the scale to be + // eradicated. As + // a result, the resulting value does not have the right form. + op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("0.01"), PDecimal.INSTANCE, 2, 2); + children = Arrays. asList(op1, op2); + e = new DecimalDivideExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1.2345E+6")); + + // Exceeds precision. + op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), + PDecimal.INSTANCE, 38, 0); + op2 = LiteralExpression.newConstant(new BigDecimal("0.01"), PDecimal.INSTANCE, 2, 2); + children = Arrays. asList(op1, op2); + e = new DecimalDivideExpression(children); + try { + e.evaluate(null, new ImmutableBytesWritable()); + fail("Evaluation should have failed"); + } catch (DataExceedsCapacityException ex) { } + + // Decimal with no precision and scale. + op1 = LiteralExpression.newConstant(new BigDecimal("10"), PDecimal.INSTANCE); + op2 = LiteralExpression.newConstant(new BigDecimal("3"), PDecimal.INSTANCE, 5, 4); + assertEquals(Integer.valueOf(4), op2.getScale()); + children = Arrays. asList(op1, op2); + e = new DecimalDivideExpression(children); + assertEqualValue(e, PDecimal.INSTANCE, + new BigDecimal("3.3333333333333333333333333333333333333")); + } + + @Test + public void testPerInvocationClone() throws Exception { + LiteralExpression op1, op2, op3, op4; + List children; + Expression e1, e2, e3, e4; + ImmutableBytesWritable ptr1 = new ImmutableBytesWritable(); + ImmutableBytesWritable ptr2 = new ImmutableBytesWritable(); + + op1 = LiteralExpression.newConstant(5.0); + op2 = LiteralExpression.newConstant(3.0); + op3 = LiteralExpression.newConstant(2.0); + op4 = LiteralExpression.newConstant(1.0); + children = Arrays. asList(op1, op2); + e1 = new DoubleAddExpression(children); + children = Arrays. asList(op3, op4); + e2 = new DoubleSubtractExpression(children); + e3 = new DoubleAddExpression(Arrays. asList(e1, e2)); + e4 = new DoubleAddExpression(Arrays. asList( + new RandomFunction(Arrays. asList(LiteralExpression.newConstant(null))), e3)); + CloneExpressionVisitor visitor = new CloneExpressionVisitor(); + Expression clone = e4.accept(visitor); + assertTrue(clone != e4); + e4.evaluate(null, ptr1); + clone.evaluate(null, ptr2); + assertNotEquals(ptr1, ptr2); + + e4 = new DoubleAddExpression(Arrays. asList( + new RandomFunction(Arrays. asList(LiteralExpression.newConstant(1))), e3)); + visitor = new CloneExpressionVisitor(); + clone = e4.accept(visitor); + assertTrue(clone == e4); + e4.evaluate(null, ptr1); + clone.evaluate(null, ptr2); + assertEquals(ptr1, ptr2); + } + + private static void assertEqualValue(Expression e, PDataType type, Object value) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean evaluated = e.evaluate(null, ptr); + assertTrue(evaluated); + assertEquals(value, type.toObject(ptr.get())); + CloneExpressionVisitor visitor = new CloneExpressionVisitor(); + Expression clone = e.accept(visitor); + evaluated = clone.evaluate(null, ptr); + assertTrue(evaluated); + assertEquals(value, type.toObject(ptr.get())); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayAppendFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayAppendFunctionTest.java index 5ce1855dff8..d5abefa3733 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayAppendFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayAppendFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,378 +29,417 @@ import org.apache.phoenix.expression.function.ArrayAppendFunction; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.*; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class ArrayAppendFunctionTest { - private static void testExpression(LiteralExpression array, LiteralExpression element, PhoenixArray expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) array); - expressions.add(element); - - Expression arrayAppendFunction = new ArrayAppendFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayAppendFunction.evaluate(null, ptr); - PhoenixArray result = (PhoenixArray) arrayAppendFunction.getDataType().toObject(ptr, expressions.get(0).getSortOrder(), array.getMaxLength(), array.getScale()); - assertTrue(result.equals(expected)); - } - - private static void test(PhoenixArray array, Object element, PDataType arrayDataType, Integer arrMaxLen, Integer arrScale, PDataType elementDataType, Integer elemMaxLen, Integer elemScale, PhoenixArray expected, SortOrder arraySortOrder, SortOrder elementSortOrder) throws SQLException { - LiteralExpression arrayLiteral, elementLiteral; - arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, arraySortOrder, Determinism.ALWAYS); - elementLiteral = LiteralExpression.newConstant(element, elementDataType, elemMaxLen, elemScale, elementSortOrder, Determinism.ALWAYS); - testExpression(arrayLiteral, elementLiteral, expected); - } - - @Test - public void testArrayAppendFunction1() throws Exception { - Object[] o = new Object[]{1, 2, -3, 4}; - Object[] o2 = new Object[]{1, 2, -3, 4, 5}; - Object element = 5; - PDataType baseType = PInteger.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction2() throws Exception { - Object[] o = new Object[]{"1", "2", "3", "4"}; - Object[] o2 = new Object[]{"1", "2", "3", "4", "56"}; - Object element = "56"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction3() throws Exception { - //offset array short to int transition - Object[] o = new Object[Short.MAX_VALUE + 1]; - for (int i = 0; i < o.length; i++) { - o[i] = "a"; - } - Object[] o2 = new Object[Short.MAX_VALUE + 2]; - for (int i = 0; i < o2.length - 1; i++) { - o2[i] = "a"; - } - Object element = "b"; - o2[o2.length - 1] = element; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction4() throws Exception { - //offset array int - Object[] o = new Object[Short.MAX_VALUE + 7]; - for (int i = 0; i < o.length; i++) { - o[i] = "a"; - } - Object[] o2 = new Object[Short.MAX_VALUE + 8]; - for (int i = 0; i < o2.length - 1; i++) { - o2[i] = "a"; - } - Object element = "b"; - o2[o2.length - 1] = element; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunctionBoolean() throws Exception { - Boolean[] o = new Boolean[] { true, false, false, true }; - Boolean[] o2 = new Boolean[] { true, false, false, true, false }; - Boolean element = false; - PDataType baseType = PBoolean.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), - null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction6() throws Exception { - Object[] o = new Object[]{new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3)}; - Object[] o2 = new Object[]{new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3), new Float(8.9)}; - Object element = 8.9; - PDataType baseType = PFloat.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction7() throws Exception { - Object[] o = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE}; - Object[] o2 = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE, 12.67}; - Object element = 12.67; - PDataType baseType = PDouble.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + private static void testExpression(LiteralExpression array, LiteralExpression element, + PhoenixArray expected) throws SQLException { + List expressions = Lists.newArrayList((Expression) array); + expressions.add(element); + + Expression arrayAppendFunction = new ArrayAppendFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayAppendFunction.evaluate(null, ptr); + PhoenixArray result = (PhoenixArray) arrayAppendFunction.getDataType().toObject(ptr, + expressions.get(0).getSortOrder(), array.getMaxLength(), array.getScale()); + assertTrue(result.equals(expected)); + } + + private static void test(PhoenixArray array, Object element, PDataType arrayDataType, + Integer arrMaxLen, Integer arrScale, PDataType elementDataType, Integer elemMaxLen, + Integer elemScale, PhoenixArray expected, SortOrder arraySortOrder, SortOrder elementSortOrder) + throws SQLException { + LiteralExpression arrayLiteral, elementLiteral; + arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, + arraySortOrder, Determinism.ALWAYS); + elementLiteral = LiteralExpression.newConstant(element, elementDataType, elemMaxLen, elemScale, + elementSortOrder, Determinism.ALWAYS); + testExpression(arrayLiteral, elementLiteral, expected); + } + + @Test + public void testArrayAppendFunction1() throws Exception { + Object[] o = new Object[] { 1, 2, -3, 4 }; + Object[] o2 = new Object[] { 1, 2, -3, 4, 5 }; + Object element = 5; + PDataType baseType = PInteger.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction2() throws Exception { + Object[] o = new Object[] { "1", "2", "3", "4" }; + Object[] o2 = new Object[] { "1", "2", "3", "4", "56" }; + Object element = "56"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction3() throws Exception { + // offset array short to int transition + Object[] o = new Object[Short.MAX_VALUE + 1]; + for (int i = 0; i < o.length; i++) { + o[i] = "a"; } - - @Test - public void testArrayAppendFunction8() throws Exception { - Object[] o = new Object[]{123l, 677l, 98789l, -78989l, 66787l}; - Object[] o2 = new Object[]{123l, 677l, 98789l, -78989l, 66787l, 543l}; - Object element = 543l; - PDataType baseType = PLong.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction9() throws Exception { - Object[] o = new Object[]{(short) 34, (short) -23, (short) -89, (short) 999, (short) 34}; - Object[] o2 = new Object[]{(short) 34, (short) -23, (short) -89, (short) 999, (short) 34, (short) 7}; - Object element = (short) 7; - PDataType baseType = PSmallint.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + Object[] o2 = new Object[Short.MAX_VALUE + 2]; + for (int i = 0; i < o2.length - 1; i++) { + o2[i] = "a"; } - - @Test - public void testArrayAppendFunction10() throws Exception { - Object[] o = new Object[]{(byte) 4, (byte) 8, (byte) 9}; - Object[] o2 = new Object[]{(byte) 4, (byte) 8, (byte) 9, (byte) 6}; - Object element = (byte) 6; - PDataType baseType = PTinyint.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction11() throws Exception { - Object[] o = new Object[]{BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785)}; - Object[] o2 = new Object[]{BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785), BigDecimal.valueOf(-19)}; - Object element = BigDecimal.valueOf(-19); - PDataType baseType = PDecimal.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction12() throws Exception { - Calendar calendar = Calendar.getInstance(); - java.util.Date currentDate = calendar.getTime(); - java.sql.Date date = new java.sql.Date(currentDate.getTime()); - - Object[] o = new Object[]{date, date, date}; - Object[] o2 = new Object[]{date, date, date, date}; - PDataType baseType = PDate.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, date, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + Object element = "b"; + o2[o2.length - 1] = element; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction4() throws Exception { + // offset array int + Object[] o = new Object[Short.MAX_VALUE + 7]; + for (int i = 0; i < o.length; i++) { + o[i] = "a"; } - - @Test - public void testArrayAppendFunction13() throws Exception { - Calendar calendar = Calendar.getInstance(); - java.util.Date currentDate = calendar.getTime(); - java.sql.Time time = new java.sql.Time(currentDate.getTime()); - - Object[] o = new Object[]{time, time, time}; - Object[] o2 = new Object[]{time, time, time, time}; - PDataType baseType = PTime.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, time, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction14() throws Exception { - Calendar calendar = Calendar.getInstance(); - java.util.Date currentDate = calendar.getTime(); - java.sql.Timestamp timestamp = new java.sql.Timestamp(currentDate.getTime()); - - Object[] o = new Object[]{timestamp, timestamp, timestamp}; - Object[] o2 = new Object[]{timestamp, timestamp, timestamp, timestamp}; - PDataType baseType = PTimestamp.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, timestamp, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction15() throws Exception { - Object[] o = new Object[]{1, 2, -3, 4}; - Object[] o2 = new Object[]{1, 2, -3, 4, 5}; - Object element = 5; - PDataType baseType = PInteger.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction16() throws Exception { - Object[] o = new Object[]{1, 2, -3, 4}; - Object[] o2 = new Object[]{1, 2, -3, 4, 5}; - Object element = 5; - PDataType baseType = PInteger.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.DESC); - } - - @Test - public void testArrayAppendFunction17() throws Exception { - Object[] o = new Object[]{1, 2, -3, 4}; - Object[] o2 = new Object[]{1, 2, -3, 4, 5}; - Object element = 5; - PDataType baseType = PInteger.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testArrayAppendFunction18() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{"1", "2", "3", "4", "5"}; - Object element = "5"; - PDataType baseType = PChar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testArrayAppendFunction19() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{"1", "2", "3", "4", "5"}; - Object element = "5"; - PDataType baseType = PChar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testArrayAppendFunction20() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{"1", "2", "3", "4", "5"}; - Object element = "5"; - PDataType baseType = PChar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.DESC); - } - - @Test - public void testArrayAppendFunction21() throws Exception { - Object[] o = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE}; - Object[] o2 = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE, 12.67}; - Object element = 12.67; - PDataType baseType = PDouble.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testArrayAppendFunction22() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{"1", "2", "3", "4"}; - Object element = null; - PDataType baseType = PChar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForCorrectSeparatorBytes1() throws Exception { - Object[] o = new Object[]{"a", "b", "c"}; - Object element = "d"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - LiteralExpression arrayLiteral, elementLiteral; - arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, SortOrder.ASC, Determinism.ALWAYS); - elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) arrayLiteral); - expressions.add(elementLiteral); - - Expression arrayAppendFunction = new ArrayAppendFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayAppendFunction.evaluate(null, ptr); - byte[] expected = new byte[]{97, 0, 98, 0, 99, 0, 100, 0, 0, 0, -128, 1, -128, 3, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1}; - assertArrayEquals(expected, ptr.get()); - } - - @Test - public void testForCorrectSeparatorBytes2() throws Exception { - Object[] o = new Object[]{"a", "b", "c"}; - Object element = "d"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - LiteralExpression arrayLiteral, elementLiteral; - arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, SortOrder.DESC, Determinism.ALWAYS); - elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) arrayLiteral); - expressions.add(elementLiteral); - - Expression arrayAppendFunction = new ArrayAppendFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayAppendFunction.evaluate(null, ptr); - byte[] expected = new byte[]{-98, -1, -99, -1, -100, -1, -101, -1, -1, -1, -128, 1, -128, 3, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1}; - assertArrayEquals(expected, ptr.get()); - } - - @Test - public void testForCorrectSeparatorBytes3() throws Exception { - Object[] o = new Object[]{"a", null, null, "c"}; - Object element = "d"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - LiteralExpression arrayLiteral, elementLiteral; - arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, SortOrder.DESC, Determinism.ALWAYS); - elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) arrayLiteral); - expressions.add(elementLiteral); - - Expression arrayAppendFunction = new ArrayAppendFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayAppendFunction.evaluate(null, ptr); - byte[] expected = new byte[]{-98, -1, 0, -2, -100, -1, -101, -1, -1, -1, -128, 1, -128, 3, -128, 3, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 5, 1}; - assertArrayEquals(expected, ptr.get()); + Object[] o2 = new Object[Short.MAX_VALUE + 8]; + for (int i = 0; i < o2.length - 1; i++) { + o2[i] = "a"; } + Object element = "b"; + o2[o2.length - 1] = element; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunctionBoolean() throws Exception { + Boolean[] o = new Boolean[] { true, false, false, true }; + Boolean[] o2 = new Boolean[] { true, false, false, true, false }; + Boolean element = false; + PDataType baseType = PBoolean.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction6() throws Exception { + Object[] o = new Object[] { new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3) }; + Object[] o2 = new Object[] { new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3), + new Float(8.9) }; + Object element = 8.9; + PDataType baseType = PFloat.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction7() throws Exception { + Object[] o = new Object[] { 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE }; + Object[] o2 = new Object[] { 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE, 12.67 }; + Object element = 12.67; + PDataType baseType = PDouble.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction8() throws Exception { + Object[] o = new Object[] { 123l, 677l, 98789l, -78989l, 66787l }; + Object[] o2 = new Object[] { 123l, 677l, 98789l, -78989l, 66787l, 543l }; + Object element = 543l; + PDataType baseType = PLong.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction9() throws Exception { + Object[] o = new Object[] { (short) 34, (short) -23, (short) -89, (short) 999, (short) 34 }; + Object[] o2 = + new Object[] { (short) 34, (short) -23, (short) -89, (short) 999, (short) 34, (short) 7 }; + Object element = (short) 7; + PDataType baseType = PSmallint.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction10() throws Exception { + Object[] o = new Object[] { (byte) 4, (byte) 8, (byte) 9 }; + Object[] o2 = new Object[] { (byte) 4, (byte) 8, (byte) 9, (byte) 6 }; + Object element = (byte) 6; + PDataType baseType = PTinyint.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction11() throws Exception { + Object[] o = new Object[] { BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), + BigDecimal.valueOf(785) }; + Object[] o2 = new Object[] { BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), + BigDecimal.valueOf(785), BigDecimal.valueOf(-19) }; + Object element = BigDecimal.valueOf(-19); + PDataType baseType = PDecimal.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction12() throws Exception { + Calendar calendar = Calendar.getInstance(); + java.util.Date currentDate = calendar.getTime(); + java.sql.Date date = new java.sql.Date(currentDate.getTime()); + + Object[] o = new Object[] { date, date, date }; + Object[] o2 = new Object[] { date, date, date, date }; + PDataType baseType = PDate.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, date, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, + null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction13() throws Exception { + Calendar calendar = Calendar.getInstance(); + java.util.Date currentDate = calendar.getTime(); + java.sql.Time time = new java.sql.Time(currentDate.getTime()); + + Object[] o = new Object[] { time, time, time }; + Object[] o2 = new Object[] { time, time, time, time }; + PDataType baseType = PTime.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, time, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, + null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction14() throws Exception { + Calendar calendar = Calendar.getInstance(); + java.util.Date currentDate = calendar.getTime(); + java.sql.Timestamp timestamp = new java.sql.Timestamp(currentDate.getTime()); + + Object[] o = new Object[] { timestamp, timestamp, timestamp }; + Object[] o2 = new Object[] { timestamp, timestamp, timestamp, timestamp }; + PDataType baseType = PTimestamp.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, timestamp, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction15() throws Exception { + Object[] o = new Object[] { 1, 2, -3, 4 }; + Object[] o2 = new Object[] { 1, 2, -3, 4, 5 }; + Object element = 5; + PDataType baseType = PInteger.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction16() throws Exception { + Object[] o = new Object[] { 1, 2, -3, 4 }; + Object[] o2 = new Object[] { 1, 2, -3, 4, 5 }; + Object element = 5; + PDataType baseType = PInteger.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.DESC); + } + + @Test + public void testArrayAppendFunction17() throws Exception { + Object[] o = new Object[] { 1, 2, -3, 4 }; + Object[] o2 = new Object[] { 1, 2, -3, 4, 5 }; + Object element = 5; + PDataType baseType = PInteger.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testArrayAppendFunction18() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { "1", "2", "3", "4", "5" }; + Object element = "5"; + PDataType baseType = PChar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testArrayAppendFunction19() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { "1", "2", "3", "4", "5" }; + Object element = "5"; + PDataType baseType = PChar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testArrayAppendFunction20() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { "1", "2", "3", "4", "5" }; + Object element = "5"; + PDataType baseType = PChar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.DESC); + } + + @Test + public void testArrayAppendFunction21() throws Exception { + Object[] o = new Object[] { 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE }; + Object[] o2 = new Object[] { 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE, 12.67 }; + Object element = 12.67; + PDataType baseType = PDouble.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testArrayAppendFunction22() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { "1", "2", "3", "4" }; + Object element = null; + PDataType baseType = PChar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForCorrectSeparatorBytes1() throws Exception { + Object[] o = new Object[] { "a", "b", "c" }; + Object element = "d"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + LiteralExpression arrayLiteral, elementLiteral; + arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, + SortOrder.ASC, Determinism.ALWAYS); + elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, + Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) arrayLiteral); + expressions.add(elementLiteral); + + Expression arrayAppendFunction = new ArrayAppendFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayAppendFunction.evaluate(null, ptr); + byte[] expected = new byte[] { 97, 0, 98, 0, 99, 0, 100, 0, 0, 0, -128, 1, -128, 3, -128, 5, + -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1 }; + assertArrayEquals(expected, ptr.get()); + } + + @Test + public void testForCorrectSeparatorBytes2() throws Exception { + Object[] o = new Object[] { "a", "b", "c" }; + Object element = "d"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + LiteralExpression arrayLiteral, elementLiteral; + arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, + SortOrder.DESC, Determinism.ALWAYS); + elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, + Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) arrayLiteral); + expressions.add(elementLiteral); + + Expression arrayAppendFunction = new ArrayAppendFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayAppendFunction.evaluate(null, ptr); + byte[] expected = new byte[] { -98, -1, -99, -1, -100, -1, -101, -1, -1, -1, -128, 1, -128, 3, + -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1 }; + assertArrayEquals(expected, ptr.get()); + } + + @Test + public void testForCorrectSeparatorBytes3() throws Exception { + Object[] o = new Object[] { "a", null, null, "c" }; + Object element = "d"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + LiteralExpression arrayLiteral, elementLiteral; + arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, + SortOrder.DESC, Determinism.ALWAYS); + elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, + Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) arrayLiteral); + expressions.add(elementLiteral); + + Expression arrayAppendFunction = new ArrayAppendFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayAppendFunction.evaluate(null, ptr); + byte[] expected = new byte[] { -98, -1, 0, -2, -100, -1, -101, -1, -1, -1, -128, 1, -128, 3, + -128, 3, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 5, 1 }; + assertArrayEquals(expected, ptr.get()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConcatFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConcatFunctionTest.java index 20ae291ca3e..4e2e4a07620 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConcatFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConcatFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression; import static org.junit.Assert.assertArrayEquals; @@ -55,682 +54,709 @@ import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.schema.types.PVarcharArray; import org.apache.phoenix.schema.types.PhoenixArray; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class ArrayConcatFunctionTest { - private static void testExpression(LiteralExpression array1, LiteralExpression array2, PhoenixArray expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) array1); - expressions.add(array2); - - Expression arrayConcatFunction = new ArrayConcatFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayConcatFunction.evaluate(null, ptr); - PhoenixArray result = (PhoenixArray) arrayConcatFunction.getDataType().toObject(ptr, expressions.get(0).getSortOrder(), array1.getMaxLength(), array1.getScale()); - assertEquals(expected, result); - } - - private static void test(PhoenixArray array1, PhoenixArray array2, PDataType array1DataType, Integer arr1MaxLen, Integer arr1Scale, PDataType array2DataType, Integer arr2MaxLen, Integer arr2Scale, PhoenixArray expected, SortOrder array1SortOrder, SortOrder array2SortOrder) throws SQLException { - LiteralExpression array1Literal, array2Literal; - array1Literal = LiteralExpression.newConstant(array1, array1DataType, arr1MaxLen, arr1Scale, array1SortOrder, Determinism.ALWAYS); - array2Literal = LiteralExpression.newConstant(array2, array2DataType, arr2MaxLen, arr2Scale, array2SortOrder, Determinism.ALWAYS); - testExpression(array1Literal, array2Literal, expected); - } - - @Test - public void testChar1() throws SQLException { - Object[] o1 = new Object[]{"aa", "bb"}; - Object[] o2 = new Object[]{"c", "d"}; - Object[] e = new Object[]{"aa", "bb", "c", "d"}; - PDataType type = PCharArray.INSTANCE; - PDataType base = PChar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.ASC); - - } - - @Test - public void testChar2() throws SQLException { - Object[] o1 = new Object[]{"aa", "bb"}; - Object[] o2 = new Object[]{"cc", "dc", "ee"}; - Object[] e = new Object[]{"aa", "bb", "cc", "dc", "ee"}; - PDataType type = PCharArray.INSTANCE; - PDataType base = PChar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.DESC, SortOrder.ASC); - - } - - @Test(expected = DataExceedsCapacityException.class) - public void testChar3() throws SQLException { - Object[] o1 = new Object[]{"c", "d"}; - Object[] o2 = new Object[]{"aa", "bb"}; - Object[] e = new Object[]{"aa", "bb", "c", "d"}; - PDataType type = PCharArray.INSTANCE; - PDataType base = PChar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testInt1() throws SQLException { - Object[] o1 = new Object[]{1, 2}; - Object[] o2 = new Object[]{5, 6, 7}; - Object[] e = new Object[]{1, 2, 5, 6, 7}; - PDataType type = PIntegerArray.INSTANCE; - PDataType base = PInteger.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray.PrimitiveIntPhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testFloat1() throws SQLException { - Object[] o1 = new Object[]{(float) 1.2, (float) 2}; - Object[] o2 = new Object[]{(float) 5, (float) 6, (float) 7}; - Object[] e = new Object[]{(float) 1.2, (float) 2, (float) 5, (float) 6, (float) 7}; - PDataType type = PFloatArray.INSTANCE; - PDataType base = PFloat.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testDouble1() throws SQLException { - Object[] o1 = new Object[]{(double) 1.2, (double) 2}; - Object[] o2 = new Object[]{(double) 5.2, (double) 6, (double) 7}; - Object[] e = new Object[]{(double) 1.2, (double) 2, (double) 5.2, (double) 6, (double) 7}; - PDataType type = PDoubleArray.INSTANCE; - PDataType base = PDouble.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray.PrimitiveDoublePhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray.PrimitiveDoublePhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testLong1() throws SQLException { - Object[] o1 = new Object[]{(long) 1, (long) 2}; - Object[] o2 = new Object[]{(long) 5, (long) 6, (long) 7}; - Object[] e = new Object[]{(long) 1, (long) 2, (long) 5, (long) 6, (long) 7}; - PDataType type = PLongArray.INSTANCE; - PDataType base = PLong.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray.PrimitiveLongPhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray.PrimitiveLongPhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testShort1() throws SQLException { - Object[] o1 = new Object[]{(short) 1, (short) 2}; - Object[] o2 = new Object[]{(short) 5, (short) 6, (short) 7}; - Object[] e = new Object[]{(short) 1, (short) 2, (short) 5, (short) 6, (short) 7}; - PDataType type = PSmallintArray.INSTANCE; - PDataType base = PSmallint.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray.PrimitiveShortPhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray.PrimitiveShortPhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testBoolean1() throws SQLException { - Object[] o1 = new Object[]{true, true}; - Object[] o2 = new Object[]{false, false, false}; - Object[] e = new Object[]{true, true, false, false, false}; - PDataType type = PBooleanArray.INSTANCE; - PDataType base = PBoolean.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testTinyInt1() throws SQLException { - Object[] o1 = new Object[]{(byte) 2, (byte) 2}; - Object[] o2 = new Object[]{(byte) 5, (byte) 6, (byte) 7}; - Object[] e = new Object[]{(byte) 2, (byte) 2, (byte) 5, (byte) 6, (byte) 7}; - PDataType type = PTinyintArray.INSTANCE; - PDataType base = PTinyint.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray.PrimitiveBytePhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray.PrimitiveBytePhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testDate1() throws SQLException { - Object[] o1 = new Object[]{new Date(0l), new Date(0l)}; - Object[] o2 = new Object[]{new Date(0l), new Date(0l), new Date(0l)}; - Object[] e = new Object[]{new Date(0l), new Date(0l), new Date(0l), new Date(0l), new Date(0l)}; - PDataType type = PDateArray.INSTANCE; - PDataType base = PDate.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testDecimal1() throws SQLException { - Object[] o1 = new Object[]{BigDecimal.valueOf(32.4), BigDecimal.valueOf(34)}; - Object[] o2 = new Object[]{BigDecimal.valueOf(32.4), BigDecimal.valueOf(34)}; - Object[] e = new Object[]{BigDecimal.valueOf(32.4), BigDecimal.valueOf(34), BigDecimal.valueOf(32.4), BigDecimal.valueOf(34)}; - PDataType type = PDecimalArray.INSTANCE; - PDataType base = PDecimal.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testVarchar1() throws SQLException { - Object[] o1 = new Object[]{"a", "b"}; - Object[] o2 = new Object[]{"c", "d"}; - Object[] e = new Object[]{"a", "b", "c", "d"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testVarchar2() throws SQLException { - Object[] o1 = new Object[]{"a"}; - Object[] o2 = new Object[]{"c", "d"}; - Object[] e = new Object[]{"a", "c", "d"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testVarchar3() throws SQLException { - Object[] o1 = new Object[]{"a", "b"}; - Object[] o2 = new Object[]{"c"}; - Object[] e = new Object[]{"a", "b", "c"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testVarchar4() throws SQLException { - Object[] o1 = new Object[]{"a"}; - Object[] o2 = new Object[]{null, "c"}; - Object[] e = new Object[]{"a", null, "c"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testVarchar5() throws SQLException { - Object[] o1 = new Object[]{"a", null , null}; - Object[] o2 = new Object[]{null, null, "c"}; - Object[] e = new Object[]{"a", null, null, null, null, "c"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testVarchar6() throws SQLException { - Object[] o1 = new Object[]{"a", "b"}; - Object[] e = new Object[]{"a", "b"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = null; - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testVarchar7() throws SQLException { - Object[] o2 = new Object[]{"a", "b"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = null; - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = arr2; - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testVarchar8() throws SQLException { - Object[] o1 = new Object[]{"a", null, null, "b"}; - Object[] o2 = new Object[]{"c", null, "d", null, "e"}; - Object[] e = new Object[]{"a", null, null, "b", "c", null, "d", null, "e"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test(expected = TypeMismatchException.class) - public void testVarchar9() throws SQLException { - Object[] o1 = new Object[]{"a", "b"}; - Object[] o2 = new Object[]{1, 2}; - - PhoenixArray arr1 = new PhoenixArray(PVarchar.INSTANCE, o1); - PhoenixArray arr2 = new PhoenixArray.PrimitiveIntPhoenixArray(PInteger.INSTANCE, o2); - test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testWithIntOffsetArray() throws SQLException { - Object[] o1 = new Object[Short.MAX_VALUE + 7]; - Object[] o2 = new Object[]{"b", "b"}; - Object[] e = new Object[Short.MAX_VALUE + 9]; - for (int i = 0; i < o1.length; i++) { - o1[i] = "a"; - e[i] = "a"; - } - e[Short.MAX_VALUE + 7] = "b"; - e[Short.MAX_VALUE + 8] = "b"; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testWithShortToIntOffsetArray() throws SQLException { - Object[] o1 = new Object[Short.MAX_VALUE + 1]; - Object[] o2 = new Object[]{"b", "b"}; - Object[] e = new Object[Short.MAX_VALUE + 3]; - for (int i = 0; i < o1.length; i++) { - o1[i] = "a"; - e[i] = "a"; - } - e[Short.MAX_VALUE + 2] = "b"; - e[Short.MAX_VALUE + 1] = "b"; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testWithShortToIntOffsetArray2() throws SQLException { - Object[] o1 = new Object[Short.MAX_VALUE + 1]; - Object[] o2 = new Object[]{null, "b"}; - Object[] e = new Object[Short.MAX_VALUE + 3]; - for (int i = 0; i < o1.length; i++) { - o1[i] = "a"; - e[i] = "a"; - } - e[Short.MAX_VALUE + 1] = null; - e[Short.MAX_VALUE + 2] = "b"; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testWith10NullsAnd246Nulls()throws SQLException{ - Object[] o1 = new Object[11]; - Object[] o2 = new Object[247]; - Object[] e = new Object[258]; - o1[0] = "a"; - o2[o2.length - 1] = "a"; - e[e.length - 1] = "a"; - e[0] = "a"; - - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testWith0NullsAnd256Nulls()throws SQLException{ - Object[] o1 = new Object[1]; - Object[] o2 = new Object[257]; - Object[] e = new Object[258]; - o1[0] = "a"; - o2[o2.length - 1] = "a"; - e[e.length - 1] = "a"; - e[0] = "a"; - - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testWith256NullsAnd0Nulls()throws SQLException{ - Object[] o1 = new Object[257]; - Object[] o2 = new Object[1]; - Object[] e = new Object[258]; - o1[0] = "a"; - o2[o2.length - 1] = "a"; - e[e.length - 1] = "a"; - e[0] = "a"; - - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testWith255NullsAnd0Nulls()throws SQLException{ - Object[] o1 = new Object[256]; - Object[] o2 = new Object[1]; - Object[] e = new Object[257]; - o1[0] = "a"; - o2[o2.length - 1] = "a"; - e[e.length - 1] = "a"; - e[0] = "a"; - - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testWith0NullsAnd255Nulls()throws SQLException{ - Object[] o1 = new Object[1]; - Object[] o2 = new Object[256]; - Object[] e = new Object[257]; - o1[0] = "a"; - o2[o2.length - 1] = "a"; - e[e.length - 1] = "a"; - e[0] = "a"; - - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testWith10NullsAnd245Nulls()throws SQLException{ - Object[] o1 = new Object[11]; - Object[] o2 = new Object[246]; - Object[] e = new Object[257]; - o1[0] = "a"; - o2[o2.length - 1] = "a"; - e[e.length - 1] = "a"; - e[0] = "a"; - - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - PhoenixArray expected = new PhoenixArray(base, e); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); - test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForCorrectSeparatorBytes1() throws Exception { - Object[] o1 = new Object[]{"a", "b"}; - Object[] o2 = new Object[]{"c", "d", "e"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - LiteralExpression array1Literal, array2Literal; - array1Literal = LiteralExpression.newConstant(arr1, type, null, null, SortOrder.ASC, Determinism.ALWAYS); - array2Literal = LiteralExpression.newConstant(arr2, type, null, null, SortOrder.ASC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) array1Literal); - expressions.add(array2Literal); - - Expression arrayConcatFunction = new ArrayConcatFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayConcatFunction.evaluate(null, ptr); - byte[] expected = new byte[]{97, 0, 98, 0, 99, 0, 100, 0, 101, 0, 0, 0, -128, 1, -128, 3, -128, 5, -128, 7, -128, 9, 0, 0, 0, 12, 0, 0, 0, 5, 1}; - assertArrayEquals(expected, ptr.get()); - } - - @Test - public void testForCorrectSeparatorBytes2() throws Exception { - Object[] o1 = new Object[]{"a", "b"}; - Object[] o2 = new Object[]{"c", "d", "e"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - LiteralExpression array1Literal, array2Literal; - array1Literal = LiteralExpression.newConstant(arr1, type, null, null, SortOrder.ASC, Determinism.ALWAYS); - array2Literal = LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) array1Literal); - expressions.add(array2Literal); - - Expression arrayConcatFunction = new ArrayConcatFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayConcatFunction.evaluate(null, ptr); - byte[] expected = new byte[]{97, 0, 98, 0, 99, 0, 100, 0, 101, 0, 0, 0, -128, 1, -128, 3, -128, 5, -128, 7, -128, 9, 0, 0, 0, 12, 0, 0, 0, 5, 1}; - assertArrayEquals(expected, ptr.get()); - } - - @Test - public void testForCorrectSeparatorBytes3() throws Exception { - Object[] o1 = new Object[]{"a", "b"}; - Object[] o2 = new Object[]{"c", "d", "e"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - LiteralExpression array1Literal, array2Literal; - array1Literal = LiteralExpression.newConstant(arr1, type, null, null, SortOrder.DESC, Determinism.ALWAYS); - array2Literal = LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) array1Literal); - expressions.add(array2Literal); - - Expression arrayConcatFunction = new ArrayConcatFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayConcatFunction.evaluate(null, ptr); - byte[] expected = new byte[]{-98, -1, -99, -1, -100, -1, -101, -1, -102, -1, -1, -1, -128, 1, -128, 3, -128, 5, -128, 7, -128, 9, 0, 0, 0, 12, 0, 0, 0, 5, 1}; - assertArrayEquals(expected, ptr.get()); - } - - @Test - public void testForCorrectSeparatorBytes4() throws Exception { - Object[] o1 = new Object[]{"a", "b", null}; - Object[] o2 = new Object[]{null, "c", "d", "e"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - LiteralExpression array1Literal, array2Literal; - array1Literal = LiteralExpression.newConstant(arr1, type, null, null, SortOrder.ASC, Determinism.ALWAYS); - array2Literal = LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) array1Literal); - expressions.add(array2Literal); - - Expression arrayConcatFunction = new ArrayConcatFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayConcatFunction.evaluate(null, ptr); - byte[] expected = new byte[]{97, 0, 98, 0, 0, -2, 99, 0, 100, 0, 101, 0, 0, 0, -128, 1, -128, 3, -128, 5, -128, 5, -128, 7, -128, 9, -128, 11, 0, 0, 0, 14, 0, 0, 0, 7, 1}; - assertArrayEquals(expected, ptr.get()); - } - - @Test - public void testForCorrectSeparatorBytes5() throws Exception { - Object[] o1 = new Object[]{"a", "b", null, null}; - Object[] o2 = new Object[]{null, "c", "d", "e"}; - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - - PhoenixArray arr1 = new PhoenixArray(base, o1); - PhoenixArray arr2 = new PhoenixArray(base, o2); - LiteralExpression array1Literal, array2Literal; - array1Literal = LiteralExpression.newConstant(arr1, type, null, null, SortOrder.DESC, Determinism.ALWAYS); - array2Literal = LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) array1Literal); - expressions.add(array2Literal); - - Expression arrayConcatFunction = new ArrayConcatFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayConcatFunction.evaluate(null, ptr); - byte[] expected = new byte[]{-98, -1, -99, -1, 0, -3, -100, -1, -101, -1, -102, -1, -1, -1, -128, 1, -128, 3, -128, 5, -128, 5, -128, 5, -128, 7, -128, 9, -128, 11, 0, 0, 0, 14, 0, 0, 0, 8, 1}; - assertArrayEquals(expected, ptr.get()); - } + private static void testExpression(LiteralExpression array1, LiteralExpression array2, + PhoenixArray expected) throws SQLException { + List expressions = Lists.newArrayList((Expression) array1); + expressions.add(array2); + + Expression arrayConcatFunction = new ArrayConcatFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayConcatFunction.evaluate(null, ptr); + PhoenixArray result = (PhoenixArray) arrayConcatFunction.getDataType().toObject(ptr, + expressions.get(0).getSortOrder(), array1.getMaxLength(), array1.getScale()); + assertEquals(expected, result); + } + + private static void test(PhoenixArray array1, PhoenixArray array2, PDataType array1DataType, + Integer arr1MaxLen, Integer arr1Scale, PDataType array2DataType, Integer arr2MaxLen, + Integer arr2Scale, PhoenixArray expected, SortOrder array1SortOrder, SortOrder array2SortOrder) + throws SQLException { + LiteralExpression array1Literal, array2Literal; + array1Literal = LiteralExpression.newConstant(array1, array1DataType, arr1MaxLen, arr1Scale, + array1SortOrder, Determinism.ALWAYS); + array2Literal = LiteralExpression.newConstant(array2, array2DataType, arr2MaxLen, arr2Scale, + array2SortOrder, Determinism.ALWAYS); + testExpression(array1Literal, array2Literal, expected); + } + + @Test + public void testChar1() throws SQLException { + Object[] o1 = new Object[] { "aa", "bb" }; + Object[] o2 = new Object[] { "c", "d" }; + Object[] e = new Object[] { "aa", "bb", "c", "d" }; + PDataType type = PCharArray.INSTANCE; + PDataType base = PChar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.ASC); + + } + + @Test + public void testChar2() throws SQLException { + Object[] o1 = new Object[] { "aa", "bb" }; + Object[] o2 = new Object[] { "cc", "dc", "ee" }; + Object[] e = new Object[] { "aa", "bb", "cc", "dc", "ee" }; + PDataType type = PCharArray.INSTANCE; + PDataType base = PChar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.DESC, SortOrder.ASC); + + } + + @Test(expected = DataExceedsCapacityException.class) + public void testChar3() throws SQLException { + Object[] o1 = new Object[] { "c", "d" }; + Object[] o2 = new Object[] { "aa", "bb" }; + Object[] e = new Object[] { "aa", "bb", "c", "d" }; + PDataType type = PCharArray.INSTANCE; + PDataType base = PChar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testInt1() throws SQLException { + Object[] o1 = new Object[] { 1, 2 }; + Object[] o2 = new Object[] { 5, 6, 7 }; + Object[] e = new Object[] { 1, 2, 5, 6, 7 }; + PDataType type = PIntegerArray.INSTANCE; + PDataType base = PInteger.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray.PrimitiveIntPhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testFloat1() throws SQLException { + Object[] o1 = new Object[] { (float) 1.2, (float) 2 }; + Object[] o2 = new Object[] { (float) 5, (float) 6, (float) 7 }; + Object[] e = new Object[] { (float) 1.2, (float) 2, (float) 5, (float) 6, (float) 7 }; + PDataType type = PFloatArray.INSTANCE; + PDataType base = PFloat.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testDouble1() throws SQLException { + Object[] o1 = new Object[] { (double) 1.2, (double) 2 }; + Object[] o2 = new Object[] { (double) 5.2, (double) 6, (double) 7 }; + Object[] e = new Object[] { (double) 1.2, (double) 2, (double) 5.2, (double) 6, (double) 7 }; + PDataType type = PDoubleArray.INSTANCE; + PDataType base = PDouble.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray.PrimitiveDoublePhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray.PrimitiveDoublePhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testLong1() throws SQLException { + Object[] o1 = new Object[] { (long) 1, (long) 2 }; + Object[] o2 = new Object[] { (long) 5, (long) 6, (long) 7 }; + Object[] e = new Object[] { (long) 1, (long) 2, (long) 5, (long) 6, (long) 7 }; + PDataType type = PLongArray.INSTANCE; + PDataType base = PLong.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray.PrimitiveLongPhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray.PrimitiveLongPhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testShort1() throws SQLException { + Object[] o1 = new Object[] { (short) 1, (short) 2 }; + Object[] o2 = new Object[] { (short) 5, (short) 6, (short) 7 }; + Object[] e = new Object[] { (short) 1, (short) 2, (short) 5, (short) 6, (short) 7 }; + PDataType type = PSmallintArray.INSTANCE; + PDataType base = PSmallint.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray.PrimitiveShortPhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray.PrimitiveShortPhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testBoolean1() throws SQLException { + Object[] o1 = new Object[] { true, true }; + Object[] o2 = new Object[] { false, false, false }; + Object[] e = new Object[] { true, true, false, false, false }; + PDataType type = PBooleanArray.INSTANCE; + PDataType base = PBoolean.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testTinyInt1() throws SQLException { + Object[] o1 = new Object[] { (byte) 2, (byte) 2 }; + Object[] o2 = new Object[] { (byte) 5, (byte) 6, (byte) 7 }; + Object[] e = new Object[] { (byte) 2, (byte) 2, (byte) 5, (byte) 6, (byte) 7 }; + PDataType type = PTinyintArray.INSTANCE; + PDataType base = PTinyint.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray.PrimitiveBytePhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray.PrimitiveBytePhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testDate1() throws SQLException { + Object[] o1 = new Object[] { new Date(0l), new Date(0l) }; + Object[] o2 = new Object[] { new Date(0l), new Date(0l), new Date(0l) }; + Object[] e = + new Object[] { new Date(0l), new Date(0l), new Date(0l), new Date(0l), new Date(0l) }; + PDataType type = PDateArray.INSTANCE; + PDataType base = PDate.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testDecimal1() throws SQLException { + Object[] o1 = new Object[] { BigDecimal.valueOf(32.4), BigDecimal.valueOf(34) }; + Object[] o2 = new Object[] { BigDecimal.valueOf(32.4), BigDecimal.valueOf(34) }; + Object[] e = new Object[] { BigDecimal.valueOf(32.4), BigDecimal.valueOf(34), + BigDecimal.valueOf(32.4), BigDecimal.valueOf(34) }; + PDataType type = PDecimalArray.INSTANCE; + PDataType base = PDecimal.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testVarchar1() throws SQLException { + Object[] o1 = new Object[] { "a", "b" }; + Object[] o2 = new Object[] { "c", "d" }; + Object[] e = new Object[] { "a", "b", "c", "d" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testVarchar2() throws SQLException { + Object[] o1 = new Object[] { "a" }; + Object[] o2 = new Object[] { "c", "d" }; + Object[] e = new Object[] { "a", "c", "d" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testVarchar3() throws SQLException { + Object[] o1 = new Object[] { "a", "b" }; + Object[] o2 = new Object[] { "c" }; + Object[] e = new Object[] { "a", "b", "c" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testVarchar4() throws SQLException { + Object[] o1 = new Object[] { "a" }; + Object[] o2 = new Object[] { null, "c" }; + Object[] e = new Object[] { "a", null, "c" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testVarchar5() throws SQLException { + Object[] o1 = new Object[] { "a", null, null }; + Object[] o2 = new Object[] { null, null, "c" }; + Object[] e = new Object[] { "a", null, null, null, null, "c" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testVarchar6() throws SQLException { + Object[] o1 = new Object[] { "a", "b" }; + Object[] e = new Object[] { "a", "b" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = null; + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testVarchar7() throws SQLException { + Object[] o2 = new Object[] { "a", "b" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = null; + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = arr2; + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testVarchar8() throws SQLException { + Object[] o1 = new Object[] { "a", null, null, "b" }; + Object[] o2 = new Object[] { "c", null, "d", null, "e" }; + Object[] e = new Object[] { "a", null, null, "b", "c", null, "d", null, "e" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test(expected = TypeMismatchException.class) + public void testVarchar9() throws SQLException { + Object[] o1 = new Object[] { "a", "b" }; + Object[] o2 = new Object[] { 1, 2 }; + + PhoenixArray arr1 = new PhoenixArray(PVarchar.INSTANCE, o1); + PhoenixArray arr2 = new PhoenixArray.PrimitiveIntPhoenixArray(PInteger.INSTANCE, o2); + test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, + SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, + SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, + SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, + SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testWithIntOffsetArray() throws SQLException { + Object[] o1 = new Object[Short.MAX_VALUE + 7]; + Object[] o2 = new Object[] { "b", "b" }; + Object[] e = new Object[Short.MAX_VALUE + 9]; + for (int i = 0; i < o1.length; i++) { + o1[i] = "a"; + e[i] = "a"; + } + e[Short.MAX_VALUE + 7] = "b"; + e[Short.MAX_VALUE + 8] = "b"; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testWithShortToIntOffsetArray() throws SQLException { + Object[] o1 = new Object[Short.MAX_VALUE + 1]; + Object[] o2 = new Object[] { "b", "b" }; + Object[] e = new Object[Short.MAX_VALUE + 3]; + for (int i = 0; i < o1.length; i++) { + o1[i] = "a"; + e[i] = "a"; + } + e[Short.MAX_VALUE + 2] = "b"; + e[Short.MAX_VALUE + 1] = "b"; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testWithShortToIntOffsetArray2() throws SQLException { + Object[] o1 = new Object[Short.MAX_VALUE + 1]; + Object[] o2 = new Object[] { null, "b" }; + Object[] e = new Object[Short.MAX_VALUE + 3]; + for (int i = 0; i < o1.length; i++) { + o1[i] = "a"; + e[i] = "a"; + } + e[Short.MAX_VALUE + 1] = null; + e[Short.MAX_VALUE + 2] = "b"; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testWith10NullsAnd246Nulls() throws SQLException { + Object[] o1 = new Object[11]; + Object[] o2 = new Object[247]; + Object[] e = new Object[258]; + o1[0] = "a"; + o2[o2.length - 1] = "a"; + e[e.length - 1] = "a"; + e[0] = "a"; + + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testWith0NullsAnd256Nulls() throws SQLException { + Object[] o1 = new Object[1]; + Object[] o2 = new Object[257]; + Object[] e = new Object[258]; + o1[0] = "a"; + o2[o2.length - 1] = "a"; + e[e.length - 1] = "a"; + e[0] = "a"; + + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testWith256NullsAnd0Nulls() throws SQLException { + Object[] o1 = new Object[257]; + Object[] o2 = new Object[1]; + Object[] e = new Object[258]; + o1[0] = "a"; + o2[o2.length - 1] = "a"; + e[e.length - 1] = "a"; + e[0] = "a"; + + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testWith255NullsAnd0Nulls() throws SQLException { + Object[] o1 = new Object[256]; + Object[] o2 = new Object[1]; + Object[] e = new Object[257]; + o1[0] = "a"; + o2[o2.length - 1] = "a"; + e[e.length - 1] = "a"; + e[0] = "a"; + + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testWith0NullsAnd255Nulls() throws SQLException { + Object[] o1 = new Object[1]; + Object[] o2 = new Object[256]; + Object[] e = new Object[257]; + o1[0] = "a"; + o2[o2.length - 1] = "a"; + e[e.length - 1] = "a"; + e[0] = "a"; + + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testWith10NullsAnd245Nulls() throws SQLException { + Object[] o1 = new Object[11]; + Object[] o2 = new Object[246]; + Object[] e = new Object[257]; + o1[0] = "a"; + o2[o2.length - 1] = "a"; + e[e.length - 1] = "a"; + e[0] = "a"; + + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + PhoenixArray expected = new PhoenixArray(base, e); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC); + test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testForCorrectSeparatorBytes1() throws Exception { + Object[] o1 = new Object[] { "a", "b" }; + Object[] o2 = new Object[] { "c", "d", "e" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + LiteralExpression array1Literal, array2Literal; + array1Literal = + LiteralExpression.newConstant(arr1, type, null, null, SortOrder.ASC, Determinism.ALWAYS); + array2Literal = + LiteralExpression.newConstant(arr2, type, null, null, SortOrder.ASC, Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) array1Literal); + expressions.add(array2Literal); + + Expression arrayConcatFunction = new ArrayConcatFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayConcatFunction.evaluate(null, ptr); + byte[] expected = new byte[] { 97, 0, 98, 0, 99, 0, 100, 0, 101, 0, 0, 0, -128, 1, -128, 3, + -128, 5, -128, 7, -128, 9, 0, 0, 0, 12, 0, 0, 0, 5, 1 }; + assertArrayEquals(expected, ptr.get()); + } + + @Test + public void testForCorrectSeparatorBytes2() throws Exception { + Object[] o1 = new Object[] { "a", "b" }; + Object[] o2 = new Object[] { "c", "d", "e" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + LiteralExpression array1Literal, array2Literal; + array1Literal = + LiteralExpression.newConstant(arr1, type, null, null, SortOrder.ASC, Determinism.ALWAYS); + array2Literal = + LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) array1Literal); + expressions.add(array2Literal); + + Expression arrayConcatFunction = new ArrayConcatFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayConcatFunction.evaluate(null, ptr); + byte[] expected = new byte[] { 97, 0, 98, 0, 99, 0, 100, 0, 101, 0, 0, 0, -128, 1, -128, 3, + -128, 5, -128, 7, -128, 9, 0, 0, 0, 12, 0, 0, 0, 5, 1 }; + assertArrayEquals(expected, ptr.get()); + } + + @Test + public void testForCorrectSeparatorBytes3() throws Exception { + Object[] o1 = new Object[] { "a", "b" }; + Object[] o2 = new Object[] { "c", "d", "e" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + LiteralExpression array1Literal, array2Literal; + array1Literal = + LiteralExpression.newConstant(arr1, type, null, null, SortOrder.DESC, Determinism.ALWAYS); + array2Literal = + LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) array1Literal); + expressions.add(array2Literal); + + Expression arrayConcatFunction = new ArrayConcatFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayConcatFunction.evaluate(null, ptr); + byte[] expected = new byte[] { -98, -1, -99, -1, -100, -1, -101, -1, -102, -1, -1, -1, -128, 1, + -128, 3, -128, 5, -128, 7, -128, 9, 0, 0, 0, 12, 0, 0, 0, 5, 1 }; + assertArrayEquals(expected, ptr.get()); + } + + @Test + public void testForCorrectSeparatorBytes4() throws Exception { + Object[] o1 = new Object[] { "a", "b", null }; + Object[] o2 = new Object[] { null, "c", "d", "e" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + LiteralExpression array1Literal, array2Literal; + array1Literal = + LiteralExpression.newConstant(arr1, type, null, null, SortOrder.ASC, Determinism.ALWAYS); + array2Literal = + LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) array1Literal); + expressions.add(array2Literal); + + Expression arrayConcatFunction = new ArrayConcatFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayConcatFunction.evaluate(null, ptr); + byte[] expected = new byte[] { 97, 0, 98, 0, 0, -2, 99, 0, 100, 0, 101, 0, 0, 0, -128, 1, -128, + 3, -128, 5, -128, 5, -128, 7, -128, 9, -128, 11, 0, 0, 0, 14, 0, 0, 0, 7, 1 }; + assertArrayEquals(expected, ptr.get()); + } + + @Test + public void testForCorrectSeparatorBytes5() throws Exception { + Object[] o1 = new Object[] { "a", "b", null, null }; + Object[] o2 = new Object[] { null, "c", "d", "e" }; + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + + PhoenixArray arr1 = new PhoenixArray(base, o1); + PhoenixArray arr2 = new PhoenixArray(base, o2); + LiteralExpression array1Literal, array2Literal; + array1Literal = + LiteralExpression.newConstant(arr1, type, null, null, SortOrder.DESC, Determinism.ALWAYS); + array2Literal = + LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) array1Literal); + expressions.add(array2Literal); + + Expression arrayConcatFunction = new ArrayConcatFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayConcatFunction.evaluate(null, ptr); + byte[] expected = + new byte[] { -98, -1, -99, -1, 0, -3, -100, -1, -101, -1, -102, -1, -1, -1, -128, 1, -128, 3, + -128, 5, -128, 5, -128, 5, -128, 7, -128, 9, -128, 11, 0, 0, 0, 14, 0, 0, 0, 8, 1 }; + assertArrayEquals(expected, ptr.get()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConstructorExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConstructorExpressionTest.java index 83db7425903..3c44625c347 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConstructorExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConstructorExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,47 +27,50 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - public class ArrayConstructorExpressionTest { - - protected static final LiteralExpression CONSTANT_EXPRESSION = LiteralExpression.newConstant(QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - protected static final byte[] BYTE_ARRAY1 = new byte[]{1,2,3,4,5}; - protected static final byte[] BYTE_ARRAY2 = new byte[]{6,7,8}; - protected Expression FALSE_EVAL_EXPRESSION = new DelegateExpression(LiteralExpression.newConstant(null)) { - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - return false; - } + + protected static final LiteralExpression CONSTANT_EXPRESSION = + LiteralExpression.newConstant(QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + protected static final byte[] BYTE_ARRAY1 = new byte[] { 1, 2, 3, 4, 5 }; + protected static final byte[] BYTE_ARRAY2 = new byte[] { 6, 7, 8 }; + protected Expression FALSE_EVAL_EXPRESSION = + new DelegateExpression(LiteralExpression.newConstant(null)) { + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + return false; + } }; - - @Test - public void testLeadingNulls() throws Exception { - List children = Lists.newArrayListWithExpectedSize(4); - LiteralExpression nullExpression = LiteralExpression.newConstant(null); - children.add(nullExpression); - children.add(nullExpression); - children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); - children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); - ArrayConstructorExpression arrayConstructorExpression = new ArrayConstructorExpression(children, PVarbinary.INSTANCE, false); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - - ArrayElemRefExpression arrayElemRefExpression = new ArrayElemRefExpression(Lists.newArrayList(arrayConstructorExpression)); - arrayElemRefExpression.setIndex(1); - arrayElemRefExpression.evaluate(null, ptr); - assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptr.copyBytesIfNecessary()); - arrayElemRefExpression.setIndex(2); - arrayElemRefExpression.evaluate(null, ptr); - assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptr.copyBytesIfNecessary()); - arrayElemRefExpression.setIndex(3); - arrayElemRefExpression.evaluate(null, ptr); - assertArrayEquals(BYTE_ARRAY1, ptr.copyBytesIfNecessary()); - arrayElemRefExpression.setIndex(4); - arrayElemRefExpression.evaluate(null, ptr); - assertArrayEquals(BYTE_ARRAY2, ptr.copyBytesIfNecessary()); - } - + + @Test + public void testLeadingNulls() throws Exception { + List children = Lists.newArrayListWithExpectedSize(4); + LiteralExpression nullExpression = LiteralExpression.newConstant(null); + children.add(nullExpression); + children.add(nullExpression); + children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); + children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); + ArrayConstructorExpression arrayConstructorExpression = + new ArrayConstructorExpression(children, PVarbinary.INSTANCE, false); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + + ArrayElemRefExpression arrayElemRefExpression = + new ArrayElemRefExpression(Lists. newArrayList(arrayConstructorExpression)); + arrayElemRefExpression.setIndex(1); + arrayElemRefExpression.evaluate(null, ptr); + assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptr.copyBytesIfNecessary()); + arrayElemRefExpression.setIndex(2); + arrayElemRefExpression.evaluate(null, ptr); + assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptr.copyBytesIfNecessary()); + arrayElemRefExpression.setIndex(3); + arrayElemRefExpression.evaluate(null, ptr); + assertArrayEquals(BYTE_ARRAY1, ptr.copyBytesIfNecessary()); + arrayElemRefExpression.setIndex(4); + arrayElemRefExpression.evaluate(null, ptr); + assertArrayEquals(BYTE_ARRAY2, ptr.copyBytesIfNecessary()); + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayFillFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayFillFunctionTest.java index 7120db5891f..22449b31942 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayFillFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayFillFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression; import static org.junit.Assert.assertEquals; @@ -30,192 +29,259 @@ import org.apache.phoenix.expression.function.ArrayFillFunction; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.*; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class ArrayFillFunctionTest { - private static void testExpression(LiteralExpression element, LiteralExpression length, PhoenixArray expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) element); - expressions.add(length); - - Expression arrayFillFunction = new ArrayFillFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayFillFunction.evaluate(null, ptr); - PhoenixArray result = (PhoenixArray) arrayFillFunction.getDataType().toObject(ptr, arrayFillFunction.getSortOrder(), arrayFillFunction.getMaxLength(), arrayFillFunction.getScale()); - assertEquals(expected, result); - } - - private static void test(Object element, Object length, PDataType elementDataType, Integer elementMaxLen, Integer elementScale, PDataType lengthDataType, Integer lengthMaxlen, Integer lengthScale, PhoenixArray expected, SortOrder elementSortOrder, SortOrder lengthSortOrder) throws SQLException { - LiteralExpression elementLiteral, lengthLiteral; - elementLiteral = LiteralExpression.newConstant(element, elementDataType, elementMaxLen, elementScale, elementSortOrder, Determinism.ALWAYS); - lengthLiteral = LiteralExpression.newConstant(length, lengthDataType, lengthMaxlen, lengthScale, lengthSortOrder, Determinism.ALWAYS); - testExpression(elementLiteral, lengthLiteral, expected); - } - - @Test - public void testForInt() throws SQLException { - Object element = 5; - Object length = 3; - PDataType baseType = PInteger.INSTANCE; - PhoenixArray e = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, new Object[]{5, 5, 5}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForBoolean() throws SQLException { - Object element = false; - Object length = 3; - PDataType baseType = PBoolean.INSTANCE; - PhoenixArray e = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, new Object[]{false, false, false}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForVarchar() throws SQLException { - Object element = "foo"; - Object length = 3; - PDataType baseType = PVarchar.INSTANCE; - PhoenixArray e = new PhoenixArray(baseType, new Object[]{"foo", "foo", "foo"}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForChar() throws SQLException { - Object element = "foo"; - Object length = 3; - PDataType baseType = PChar.INSTANCE; - PhoenixArray e = new PhoenixArray(baseType, new Object[]{"foo", "foo", "foo"}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForDouble() throws SQLException { - Object element = 34.67; - Object length = 3; - PDataType baseType = PDouble.INSTANCE; - PhoenixArray e = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, new Object[]{34.67, 34.67, 34.67}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForFloat() throws SQLException { - Object element = 5.6; - Object length = 3; - PDataType baseType = PFloat.INSTANCE; - PhoenixArray e = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, new Object[]{(float) 5.6, (float) 5.6, (float) 5.6}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForSmallint() throws SQLException { - Object element = 5; - Object length = 3; - PDataType baseType = PSmallint.INSTANCE; - PhoenixArray e = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, new Object[]{(short) 5, (short) 5, (short) 5}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForTinyint() throws SQLException { - Object element = 6; - Object length = 3; - PDataType baseType = PTinyint.INSTANCE; - PhoenixArray e = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, new Object[]{(byte) 6, (byte) 6, (byte) 6}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForLong() throws SQLException { - Object element = 34567l; - Object length = 3; - PDataType baseType = PLong.INSTANCE; - PhoenixArray e = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, new Object[]{34567l, 34567l, 34567l}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForDecimal() throws SQLException { - Object element = BigDecimal.valueOf(345.67); - Object length = 3; - PDataType baseType = PDecimal.INSTANCE; - PhoenixArray e = new PhoenixArray(baseType, new Object[]{BigDecimal.valueOf(345.67), BigDecimal.valueOf(345.67), BigDecimal.valueOf(345.67)}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForDate() throws SQLException { - Object element = new Date(23); - Object length = 3; - PDataType baseType = PDate.INSTANCE; - PhoenixArray e = new PhoenixArray(baseType, new Object[]{new Date(23), new Date(23), new Date(23)}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForTime() throws SQLException { - Object element = new Time(23); - Object length = 3; - PDataType baseType = PTime.INSTANCE; - PhoenixArray e = new PhoenixArray(baseType, new Object[]{new Time(23), new Time(23), new Time(23)}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForNulls1() throws SQLException { - Object element = null; - Object length = 3; - PDataType baseType = PInteger.INSTANCE; - PhoenixArray e = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, new Object[]{0, 0, 0}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testForNulls2() throws SQLException { - Object element = null; - Object length = 3; - PDataType baseType = PVarchar.INSTANCE; - PhoenixArray e = new PhoenixArray(baseType, new Object[]{null, null, null}); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC); - test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC); - } + private static void testExpression(LiteralExpression element, LiteralExpression length, + PhoenixArray expected) throws SQLException { + List expressions = Lists.newArrayList((Expression) element); + expressions.add(length); + + Expression arrayFillFunction = new ArrayFillFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayFillFunction.evaluate(null, ptr); + PhoenixArray result = + (PhoenixArray) arrayFillFunction.getDataType().toObject(ptr, arrayFillFunction.getSortOrder(), + arrayFillFunction.getMaxLength(), arrayFillFunction.getScale()); + assertEquals(expected, result); + } + + private static void test(Object element, Object length, PDataType elementDataType, + Integer elementMaxLen, Integer elementScale, PDataType lengthDataType, Integer lengthMaxlen, + Integer lengthScale, PhoenixArray expected, SortOrder elementSortOrder, + SortOrder lengthSortOrder) throws SQLException { + LiteralExpression elementLiteral, lengthLiteral; + elementLiteral = LiteralExpression.newConstant(element, elementDataType, elementMaxLen, + elementScale, elementSortOrder, Determinism.ALWAYS); + lengthLiteral = LiteralExpression.newConstant(length, lengthDataType, lengthMaxlen, lengthScale, + lengthSortOrder, Determinism.ALWAYS); + testExpression(elementLiteral, lengthLiteral, expected); + } + + @Test + public void testForInt() throws SQLException { + Object element = 5; + Object length = 3; + PDataType baseType = PInteger.INSTANCE; + PhoenixArray e = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, new Object[] { 5, 5, 5 }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForBoolean() throws SQLException { + Object element = false; + Object length = 3; + PDataType baseType = PBoolean.INSTANCE; + PhoenixArray e = + new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, new Object[] { false, false, false }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForVarchar() throws SQLException { + Object element = "foo"; + Object length = 3; + PDataType baseType = PVarchar.INSTANCE; + PhoenixArray e = new PhoenixArray(baseType, new Object[] { "foo", "foo", "foo" }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForChar() throws SQLException { + Object element = "foo"; + Object length = 3; + PDataType baseType = PChar.INSTANCE; + PhoenixArray e = new PhoenixArray(baseType, new Object[] { "foo", "foo", "foo" }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForDouble() throws SQLException { + Object element = 34.67; + Object length = 3; + PDataType baseType = PDouble.INSTANCE; + PhoenixArray e = + new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, new Object[] { 34.67, 34.67, 34.67 }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + } + + @Test + public void testForFloat() throws SQLException { + Object element = 5.6; + Object length = 3; + PDataType baseType = PFloat.INSTANCE; + PhoenixArray e = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, + new Object[] { (float) 5.6, (float) 5.6, (float) 5.6 }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + } + + @Test + public void testForSmallint() throws SQLException { + Object element = 5; + Object length = 3; + PDataType baseType = PSmallint.INSTANCE; + PhoenixArray e = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, + new Object[] { (short) 5, (short) 5, (short) 5 }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForTinyint() throws SQLException { + Object element = 6; + Object length = 3; + PDataType baseType = PTinyint.INSTANCE; + PhoenixArray e = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, + new Object[] { (byte) 6, (byte) 6, (byte) 6 }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForLong() throws SQLException { + Object element = 34567l; + Object length = 3; + PDataType baseType = PLong.INSTANCE; + PhoenixArray e = + new PhoenixArray.PrimitiveLongPhoenixArray(baseType, new Object[] { 34567l, 34567l, 34567l }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForDecimal() throws SQLException { + Object element = BigDecimal.valueOf(345.67); + Object length = 3; + PDataType baseType = PDecimal.INSTANCE; + PhoenixArray e = new PhoenixArray(baseType, new Object[] { BigDecimal.valueOf(345.67), + BigDecimal.valueOf(345.67), BigDecimal.valueOf(345.67) }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForDate() throws SQLException { + Object element = new Date(23); + Object length = 3; + PDataType baseType = PDate.INSTANCE; + PhoenixArray e = + new PhoenixArray(baseType, new Object[] { new Date(23), new Date(23), new Date(23) }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForTime() throws SQLException { + Object element = new Time(23); + Object length = 3; + PDataType baseType = PTime.INSTANCE; + PhoenixArray e = + new PhoenixArray(baseType, new Object[] { new Time(23), new Time(23), new Time(23) }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForNulls1() throws SQLException { + Object element = null; + Object length = 3; + PDataType baseType = PInteger.INSTANCE; + PhoenixArray e = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, new Object[] { 0, 0, 0 }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } + + @Test + public void testForNulls2() throws SQLException { + Object element = null; + Object length = 3; + PDataType baseType = PVarchar.INSTANCE; + PhoenixArray e = new PhoenixArray(baseType, new Object[] { null, null, null }); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.ASC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.DESC); + test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, + SortOrder.ASC); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayPrependFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayPrependFunctionTest.java index 3ab3bd2ed09..55d5e906996 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayPrependFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayPrependFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,585 +29,642 @@ import org.apache.phoenix.expression.function.ArrayPrependFunction; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.*; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class ArrayPrependFunctionTest { - private static void testExpression(LiteralExpression array, LiteralExpression element, PhoenixArray expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) element); - expressions.add(array); - - Expression arrayPrependFunction = new ArrayPrependFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayPrependFunction.evaluate(null, ptr); - PhoenixArray result = (PhoenixArray) arrayPrependFunction.getDataType().toObject(ptr, expressions.get(1).getSortOrder(), array.getMaxLength(), array.getScale()); - assertEquals(result, expected); - } - - private static void test(PhoenixArray array, Object element, PDataType arrayDataType, Integer arrMaxLen, Integer arrScale, PDataType elementDataType, Integer elemMaxLen, Integer elemScale, PhoenixArray expected, SortOrder arraySortOrder, SortOrder elementSortOrder) throws SQLException { - LiteralExpression arrayLiteral, elementLiteral; - arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, arraySortOrder, Determinism.ALWAYS); - elementLiteral = LiteralExpression.newConstant(element, elementDataType, elemMaxLen, elemScale, elementSortOrder, Determinism.ALWAYS); - testExpression(arrayLiteral, elementLiteral, expected); - } - - @Test - public void testArrayPrependFunction1() throws Exception { - Object[] o = new Object[]{1, 2, -3, 4}; - Object[] o2 = new Object[]{5, 1, 2, -3, 4}; - Object element = 5; - PDataType baseType = PInteger.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction2() throws Exception { - Object[] o = new Object[]{"1", "2", "3", "4"}; - Object[] o2 = new Object[]{"56", "1", "2", "3", "4"}; - Object element = "56"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction3() throws Exception { - //offset array short to int transition - Object[] o = new Object[Short.MAX_VALUE + 1]; - for (int i = 0; i < o.length; i++) { - o[i] = "a"; - } - Object[] o2 = new Object[Short.MAX_VALUE + 2]; - for (int i = 1; i < o2.length; i++) { - o2[i] = "a"; - } - Object element = "b"; - o2[0] = element; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction4() throws Exception { - //offset array int - Object[] o = new Object[Short.MAX_VALUE + 7]; - for (int i = 0; i < o.length; i++) { - o[i] = "a"; - } - Object[] o2 = new Object[Short.MAX_VALUE + 8]; - for (int i = 1; i < o2.length; i++) { - o2[i] = "a"; - } - Object element = "b"; - o2[0] = element; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunctionBoolean() throws Exception { - Boolean[] o = new Boolean[]{true, false, false, true}; - Boolean[] o2 = new Boolean[]{false, true, false, false, true}; - Boolean element = false; - PDataType baseType = PBoolean.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), - null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction6() throws Exception { - Object[] o = new Object[]{new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3)}; - Object[] o2 = new Object[]{new Float(8.9), new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3)}; - Object element = 8.9; - PDataType baseType = PFloat.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction7() throws Exception { - Object[] o = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE}; - Object[] o2 = new Object[]{12.67, 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE}; - Object element = 12.67; - PDataType baseType = PDouble.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction8() throws Exception { - Object[] o = new Object[]{123l, 677l, 98789l, -78989l, 66787l}; - Object[] o2 = new Object[]{543l, 123l, 677l, 98789l, -78989l, 66787l}; - Object element = 543l; - PDataType baseType = PLong.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction9() throws Exception { - Object[] o = new Object[]{(short) 34, (short) -23, (short) -89, (short) 999, (short) 34}; - Object[] o2 = new Object[]{(short) 7, (short) 34, (short) -23, (short) -89, (short) 999, (short) 34}; - Object element = (short) 7; - PDataType baseType = PSmallint.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction10() throws Exception { - Object[] o = new Object[]{(byte) 4, (byte) 8, (byte) 9}; - Object[] o2 = new Object[]{(byte) 6, (byte) 4, (byte) 8, (byte) 9}; - Object element = (byte) 6; - PDataType baseType = PTinyint.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction11() throws Exception { - Object[] o = new Object[]{BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785)}; - Object[] o2 = new Object[]{BigDecimal.valueOf(-19), BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785)}; - Object element = BigDecimal.valueOf(-19); - PDataType baseType = PDecimal.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction12() throws Exception { - Calendar calendar = Calendar.getInstance(); - java.util.Date currentDate = calendar.getTime(); - java.sql.Date date = new java.sql.Date(currentDate.getTime()); - - Object[] o = new Object[]{date, date, date}; - Object[] o2 = new Object[]{date, date, date, date}; - PDataType baseType = PDate.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, date, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction13() throws Exception { - Calendar calendar = Calendar.getInstance(); - java.util.Date currentDate = calendar.getTime(); - java.sql.Time time = new java.sql.Time(currentDate.getTime()); - - Object[] o = new Object[]{time, time, time}; - Object[] o2 = new Object[]{time, time, time, time}; - PDataType baseType = PTime.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, time, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction14() throws Exception { - Calendar calendar = Calendar.getInstance(); - java.util.Date currentDate = calendar.getTime(); - java.sql.Timestamp timestamp = new java.sql.Timestamp(currentDate.getTime()); - - Object[] o = new Object[]{timestamp, timestamp, timestamp}; - Object[] o2 = new Object[]{timestamp, timestamp, timestamp, timestamp}; - PDataType baseType = PTimestamp.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, timestamp, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction15() throws Exception { - Object[] o = new Object[]{1, 2, -3, 4}; - Object[] o2 = new Object[]{5, 1, 2, -3, 4}; - Object element = 5; - PDataType baseType = PInteger.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction16() throws Exception { - Object[] o = new Object[]{1, 2, -3, 4}; - Object[] o2 = new Object[]{5, 1, 2, -3, 4}; - Object element = 5; - PDataType baseType = PInteger.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.DESC); - } - - @Test - public void testArrayPrependFunction17() throws Exception { - Object[] o = new Object[]{1, 2, -3, 4}; - Object[] o2 = new Object[]{5, 1, 2, -3, 4}; - Object element = 5; - PDataType baseType = PInteger.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testArrayPrependFunction18() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{"5", "1", "2", "3", "4"}; - Object element = "5"; - PDataType baseType = PChar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testArrayPrependFunction19() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{"5", "1", "2", "3", "4"}; - Object element = "5"; - PDataType baseType = PChar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.ASC); - } - - @Test - public void testArrayPrependFunction20() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{"5", "1", "2", "3", "4"}; - Object element = "5"; - PDataType baseType = PChar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.DESC); - } - - @Test - public void testArrayPrependFunction21() throws Exception { - Object[] o = new Object[]{4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE}; - Object[] o2 = new Object[]{12.67, 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE}; - Object element = 12.67; - PDataType baseType = PDouble.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testArrayPrependFunction22() throws Exception { - byte[][] o = new byte[][]{new byte[]{2, 0, 3}, new byte[]{42, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}}; - byte[][] o2 = new byte[][]{new byte[]{5, 6}, new byte[]{2, 0, 3}, new byte[]{42, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}}; - byte[] element = new byte[]{5, 6}; - PDataType baseType = PVarbinary.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testArrayPrependFunction23() throws Exception { - byte[][] o = new byte[][]{new byte[]{2, 3}, new byte[]{42, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}}; - byte[][] o2 = new byte[][]{new byte[]{5, 6}, new byte[]{2, 3}, new byte[]{42, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}}; - byte[] element = new byte[]{5, 6}; - PDataType baseType = PBinary.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 2, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testArrayPrependFunction24() throws Exception { - byte[][] o = new byte[][]{new byte[]{2, 0}, new byte[]{13, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}}; - byte[][] o2 = new byte[][]{new byte[]{5, 6}, new byte[]{2, 0}, new byte[]{13, 3}, new byte[]{5, 3}, new byte[]{6, 3}, new byte[]{2, 5}}; - byte[] element = new byte[]{5, 6}; - PDataType baseType = PBinary.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 3, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsWithNoNullsAtBeginning() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{"1", "2", "3", "4"}; - Object element = null; - PDataType baseType = PChar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsAllNulls() throws Exception { - Object element = null; - PDataType baseType = PChar.INSTANCE; - - PhoenixArray arr = null; - PhoenixArray expected = null; - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsWith268NullsAtBeginning() throws Exception { - //268 nulls at the beginning - Object[] o = new Object[270]; - for (int i = 0; i < o.length - 2; i++) - o[i] = null; - - o[o.length - 2] = "1"; - o[o.length - 1] = "2"; - - Object[] o2 = new Object[271]; - for (int i = 0; i < o2.length - 2; i++) - o2[i] = null; - - o2[o2.length - 2] = "1"; - o2[o2.length - 1] = "2"; - - Object element = null; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsWith241NullsAtBeginning() throws Exception { - //241 nulls at the beginning - Object[] o = new Object[243]; - for (int i = 0; i < o.length - 2; i++) - o[i] = null; - - o[o.length - 2] = "1"; - o[o.length - 1] = "2"; - - Object[] o2 = new Object[244]; - for (int i = 0; i < o2.length - 2; i++) - o2[i] = null; - - o2[o2.length - 2] = "1"; - o2[o2.length - 1] = "2"; - - Object element = null; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsWith254NullsAtBeginning() throws Exception { - //254 nulls at the beginning - Object[] o = new Object[256]; - for (int i = 0; i < o.length - 2; i++) - o[i] = null; - - o[o.length - 2] = "1"; - o[o.length - 1] = "2"; - - Object[] o2 = new Object[257]; - for (int i = 0; i < o2.length - 2; i++) - o2[i] = null; - - o2[o2.length - 2] = "1"; - o2[o2.length - 1] = "2"; - - Object element = null; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsWith510NullsAtBeginning() throws Exception { - //510 nulls at the beginning - Object[] o = new Object[512]; - for (int i = 0; i < o.length - 2; i++) - o[i] = null; - - o[o.length - 2] = "1"; - o[o.length - 1] = "2"; - - Object[] o2 = new Object[513]; - for (int i = 0; i < o2.length - 2; i++) - o2[i] = null; - - o2[o2.length - 2] = "1"; - o2[o2.length - 1] = "2"; - - Object element = null; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsWith509NullsAtBeginning() throws Exception { - //509 nulls at the beginning - Object[] o = new Object[511]; - for (int i = 0; i < o.length - 2; i++) - o[i] = null; - - o[o.length - 2] = "1"; - o[o.length - 1] = "2"; - - Object[] o2 = new Object[512]; - for (int i = 0; i < o2.length - 2; i++) - o2[i] = null; - - o2[o2.length - 2] = "1"; - o2[o2.length - 1] = "2"; - - Object element = null; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsWith1NullAtBeginning() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{null, "1 ", "2 ", "3 ", "4 "}; - Object element = null; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsWith2NullsAtBeginning() throws Exception { - Object[] o = new Object[]{null, "1 ", "2 ", "3 ", "4 "}; - Object[] o2 = new Object[]{null, null, "1 ", "2 ", "3 ", "4 "}; - Object element = null; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForNullsWithNullsInMiddle() throws Exception { - Object[] o = new Object[]{"1 ", "2 ", null, "3 ", "4 "}; - Object[] o2 = new Object[]{null, "1 ", "2 ", null, "3 ", "4 "}; - Object element = null; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } - - @Test - public void testForCorrectSeparatorBytes1() throws Exception { - Object[] o = new Object[]{"a", "b", "c"}; - Object element = "d"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - LiteralExpression arrayLiteral, elementLiteral; - arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, SortOrder.ASC, Determinism.ALWAYS); - elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) elementLiteral); - expressions.add(arrayLiteral); - - Expression arrayPrependFunction = new ArrayPrependFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayPrependFunction.evaluate(null, ptr); - byte[] expected = new byte[]{100, 0, 97, 0, 98, 0, 99, 0, 0, 0, -128, 1, -128, 3, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1}; - assertArrayEquals(expected, ptr.get()); - } - - @Test - public void testForCorrectSeparatorBytes2() throws Exception { - Object[] o = new Object[]{"a", "b", "c"}; - Object element = "d"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - LiteralExpression arrayLiteral, elementLiteral; - arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, SortOrder.DESC, Determinism.ALWAYS); - elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) elementLiteral); - expressions.add(arrayLiteral); - - Expression arrayPrependFunction = new ArrayPrependFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayPrependFunction.evaluate(null, ptr); - byte[] expected = new byte[]{-101, -1, -98, -1, -99, -1, -100, -1, -1, -1, -128, 1, -128, 3, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1}; - assertArrayEquals(expected, ptr.get()); - } - - @Test - public void testForCorrectSeparatorBytes3() throws Exception { - Object[] o = new Object[]{"a", null, null, "c"}; - Object element = "d"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - LiteralExpression arrayLiteral, elementLiteral; - arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, SortOrder.DESC, Determinism.ALWAYS); - elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, Determinism.ALWAYS); - List expressions = Lists.newArrayList((Expression) elementLiteral); - expressions.add(arrayLiteral); - - Expression arrayPrependFunction = new ArrayPrependFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayPrependFunction.evaluate(null, ptr); - byte[] expected = new byte[]{-101, -1, -98, -1, 0, -2, -100, -1, -1, -1, -128, 1, -128, 3, -128, 5, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 5, 1}; - assertArrayEquals(expected, ptr.get()); - } + private static void testExpression(LiteralExpression array, LiteralExpression element, + PhoenixArray expected) throws SQLException { + List expressions = Lists.newArrayList((Expression) element); + expressions.add(array); + + Expression arrayPrependFunction = new ArrayPrependFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayPrependFunction.evaluate(null, ptr); + PhoenixArray result = (PhoenixArray) arrayPrependFunction.getDataType().toObject(ptr, + expressions.get(1).getSortOrder(), array.getMaxLength(), array.getScale()); + assertEquals(result, expected); + } + + private static void test(PhoenixArray array, Object element, PDataType arrayDataType, + Integer arrMaxLen, Integer arrScale, PDataType elementDataType, Integer elemMaxLen, + Integer elemScale, PhoenixArray expected, SortOrder arraySortOrder, SortOrder elementSortOrder) + throws SQLException { + LiteralExpression arrayLiteral, elementLiteral; + arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, + arraySortOrder, Determinism.ALWAYS); + elementLiteral = LiteralExpression.newConstant(element, elementDataType, elemMaxLen, elemScale, + elementSortOrder, Determinism.ALWAYS); + testExpression(arrayLiteral, elementLiteral, expected); + } + + @Test + public void testArrayPrependFunction1() throws Exception { + Object[] o = new Object[] { 1, 2, -3, 4 }; + Object[] o2 = new Object[] { 5, 1, 2, -3, 4 }; + Object element = 5; + PDataType baseType = PInteger.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction2() throws Exception { + Object[] o = new Object[] { "1", "2", "3", "4" }; + Object[] o2 = new Object[] { "56", "1", "2", "3", "4" }; + Object element = "56"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction3() throws Exception { + // offset array short to int transition + Object[] o = new Object[Short.MAX_VALUE + 1]; + for (int i = 0; i < o.length; i++) { + o[i] = "a"; + } + Object[] o2 = new Object[Short.MAX_VALUE + 2]; + for (int i = 1; i < o2.length; i++) { + o2[i] = "a"; + } + Object element = "b"; + o2[0] = element; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction4() throws Exception { + // offset array int + Object[] o = new Object[Short.MAX_VALUE + 7]; + for (int i = 0; i < o.length; i++) { + o[i] = "a"; + } + Object[] o2 = new Object[Short.MAX_VALUE + 8]; + for (int i = 1; i < o2.length; i++) { + o2[i] = "a"; + } + Object element = "b"; + o2[0] = element; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunctionBoolean() throws Exception { + Boolean[] o = new Boolean[] { true, false, false, true }; + Boolean[] o2 = new Boolean[] { false, true, false, false, true }; + Boolean element = false; + PDataType baseType = PBoolean.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction6() throws Exception { + Object[] o = new Object[] { new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3) }; + Object[] o2 = new Object[] { new Float(8.9), new Float(2.3), new Float(7.9), new Float(-9.6), + new Float(2.3) }; + Object element = 8.9; + PDataType baseType = PFloat.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction7() throws Exception { + Object[] o = new Object[] { 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE }; + Object[] o2 = new Object[] { 12.67, 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE }; + Object element = 12.67; + PDataType baseType = PDouble.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction8() throws Exception { + Object[] o = new Object[] { 123l, 677l, 98789l, -78989l, 66787l }; + Object[] o2 = new Object[] { 543l, 123l, 677l, 98789l, -78989l, 66787l }; + Object element = 543l; + PDataType baseType = PLong.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction9() throws Exception { + Object[] o = new Object[] { (short) 34, (short) -23, (short) -89, (short) 999, (short) 34 }; + Object[] o2 = + new Object[] { (short) 7, (short) 34, (short) -23, (short) -89, (short) 999, (short) 34 }; + Object element = (short) 7; + PDataType baseType = PSmallint.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction10() throws Exception { + Object[] o = new Object[] { (byte) 4, (byte) 8, (byte) 9 }; + Object[] o2 = new Object[] { (byte) 6, (byte) 4, (byte) 8, (byte) 9 }; + Object element = (byte) 6; + PDataType baseType = PTinyint.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction11() throws Exception { + Object[] o = new Object[] { BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), + BigDecimal.valueOf(785) }; + Object[] o2 = new Object[] { BigDecimal.valueOf(-19), BigDecimal.valueOf(2345), + BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785) }; + Object element = BigDecimal.valueOf(-19); + PDataType baseType = PDecimal.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction12() throws Exception { + Calendar calendar = Calendar.getInstance(); + java.util.Date currentDate = calendar.getTime(); + java.sql.Date date = new java.sql.Date(currentDate.getTime()); + + Object[] o = new Object[] { date, date, date }; + Object[] o2 = new Object[] { date, date, date, date }; + PDataType baseType = PDate.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, date, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, + null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction13() throws Exception { + Calendar calendar = Calendar.getInstance(); + java.util.Date currentDate = calendar.getTime(); + java.sql.Time time = new java.sql.Time(currentDate.getTime()); + + Object[] o = new Object[] { time, time, time }; + Object[] o2 = new Object[] { time, time, time, time }; + PDataType baseType = PTime.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, time, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, + null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction14() throws Exception { + Calendar calendar = Calendar.getInstance(); + java.util.Date currentDate = calendar.getTime(); + java.sql.Timestamp timestamp = new java.sql.Timestamp(currentDate.getTime()); + + Object[] o = new Object[] { timestamp, timestamp, timestamp }; + Object[] o2 = new Object[] { timestamp, timestamp, timestamp, timestamp }; + PDataType baseType = PTimestamp.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, timestamp, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction15() throws Exception { + Object[] o = new Object[] { 1, 2, -3, 4 }; + Object[] o2 = new Object[] { 5, 1, 2, -3, 4 }; + Object element = 5; + PDataType baseType = PInteger.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction16() throws Exception { + Object[] o = new Object[] { 1, 2, -3, 4 }; + Object[] o2 = new Object[] { 5, 1, 2, -3, 4 }; + Object element = 5; + PDataType baseType = PInteger.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.DESC, SortOrder.DESC); + } + + @Test + public void testArrayPrependFunction17() throws Exception { + Object[] o = new Object[] { 1, 2, -3, 4 }; + Object[] o2 = new Object[] { 5, 1, 2, -3, 4 }; + Object element = 5; + PDataType baseType = PInteger.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testArrayPrependFunction18() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { "5", "1", "2", "3", "4" }; + Object element = "5"; + PDataType baseType = PChar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testArrayPrependFunction19() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { "5", "1", "2", "3", "4" }; + Object element = "5"; + PDataType baseType = PChar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.ASC); + } + + @Test + public void testArrayPrependFunction20() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { "5", "1", "2", "3", "4" }; + Object element = "5"; + PDataType baseType = PChar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.DESC, SortOrder.DESC); + } + + @Test + public void testArrayPrependFunction21() throws Exception { + Object[] o = new Object[] { 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE }; + Object[] o2 = new Object[] { 12.67, 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE }; + Object element = 12.67; + PDataType baseType = PDouble.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testArrayPrependFunction22() throws Exception { + byte[][] o = new byte[][] { new byte[] { 2, 0, 3 }, new byte[] { 42, 3 }, new byte[] { 5, 3 }, + new byte[] { 6, 3 }, new byte[] { 2, 5 } }; + byte[][] o2 = new byte[][] { new byte[] { 5, 6 }, new byte[] { 2, 0, 3 }, new byte[] { 42, 3 }, + new byte[] { 5, 3 }, new byte[] { 6, 3 }, new byte[] { 2, 5 } }; + byte[] element = new byte[] { 5, 6 }; + PDataType baseType = PVarbinary.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testArrayPrependFunction23() throws Exception { + byte[][] o = new byte[][] { new byte[] { 2, 3 }, new byte[] { 42, 3 }, new byte[] { 5, 3 }, + new byte[] { 6, 3 }, new byte[] { 2, 5 } }; + byte[][] o2 = new byte[][] { new byte[] { 5, 6 }, new byte[] { 2, 3 }, new byte[] { 42, 3 }, + new byte[] { 5, 3 }, new byte[] { 6, 3 }, new byte[] { 2, 5 } }; + byte[] element = new byte[] { 5, 6 }; + PDataType baseType = PBinary.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 2, + null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testArrayPrependFunction24() throws Exception { + byte[][] o = new byte[][] { new byte[] { 2, 0 }, new byte[] { 13, 3 }, new byte[] { 5, 3 }, + new byte[] { 6, 3 }, new byte[] { 2, 5 } }; + byte[][] o2 = new byte[][] { new byte[] { 5, 6 }, new byte[] { 2, 0 }, new byte[] { 13, 3 }, + new byte[] { 5, 3 }, new byte[] { 6, 3 }, new byte[] { 2, 5 } }; + byte[] element = new byte[] { 5, 6 }; + PDataType baseType = PBinary.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 3, + null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsWithNoNullsAtBeginning() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { "1", "2", "3", "4" }; + Object element = null; + PDataType baseType = PChar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsAllNulls() throws Exception { + Object element = null; + PDataType baseType = PChar.INSTANCE; + + PhoenixArray arr = null; + PhoenixArray expected = null; + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsWith268NullsAtBeginning() throws Exception { + // 268 nulls at the beginning + Object[] o = new Object[270]; + for (int i = 0; i < o.length - 2; i++) + o[i] = null; + + o[o.length - 2] = "1"; + o[o.length - 1] = "2"; + + Object[] o2 = new Object[271]; + for (int i = 0; i < o2.length - 2; i++) + o2[i] = null; + + o2[o2.length - 2] = "1"; + o2[o2.length - 1] = "2"; + + Object element = null; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsWith241NullsAtBeginning() throws Exception { + // 241 nulls at the beginning + Object[] o = new Object[243]; + for (int i = 0; i < o.length - 2; i++) + o[i] = null; + + o[o.length - 2] = "1"; + o[o.length - 1] = "2"; + + Object[] o2 = new Object[244]; + for (int i = 0; i < o2.length - 2; i++) + o2[i] = null; + + o2[o2.length - 2] = "1"; + o2[o2.length - 1] = "2"; + + Object element = null; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsWith254NullsAtBeginning() throws Exception { + // 254 nulls at the beginning + Object[] o = new Object[256]; + for (int i = 0; i < o.length - 2; i++) + o[i] = null; + + o[o.length - 2] = "1"; + o[o.length - 1] = "2"; + + Object[] o2 = new Object[257]; + for (int i = 0; i < o2.length - 2; i++) + o2[i] = null; + + o2[o2.length - 2] = "1"; + o2[o2.length - 1] = "2"; + + Object element = null; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsWith510NullsAtBeginning() throws Exception { + // 510 nulls at the beginning + Object[] o = new Object[512]; + for (int i = 0; i < o.length - 2; i++) + o[i] = null; + + o[o.length - 2] = "1"; + o[o.length - 1] = "2"; + + Object[] o2 = new Object[513]; + for (int i = 0; i < o2.length - 2; i++) + o2[i] = null; + + o2[o2.length - 2] = "1"; + o2[o2.length - 1] = "2"; + + Object element = null; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsWith509NullsAtBeginning() throws Exception { + // 509 nulls at the beginning + Object[] o = new Object[511]; + for (int i = 0; i < o.length - 2; i++) + o[i] = null; + + o[o.length - 2] = "1"; + o[o.length - 1] = "2"; + + Object[] o2 = new Object[512]; + for (int i = 0; i < o2.length - 2; i++) + o2[i] = null; + + o2[o2.length - 2] = "1"; + o2[o2.length - 1] = "2"; + + Object element = null; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsWith1NullAtBeginning() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { null, "1 ", "2 ", "3 ", "4 " }; + Object element = null; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsWith2NullsAtBeginning() throws Exception { + Object[] o = new Object[] { null, "1 ", "2 ", "3 ", "4 " }; + Object[] o2 = new Object[] { null, null, "1 ", "2 ", "3 ", "4 " }; + Object element = null; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), 4, + null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForNullsWithNullsInMiddle() throws Exception { + Object[] o = new Object[] { "1 ", "2 ", null, "3 ", "4 " }; + Object[] o2 = new Object[] { null, "1 ", "2 ", null, "3 ", "4 " }; + Object element = null; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } + + @Test + public void testForCorrectSeparatorBytes1() throws Exception { + Object[] o = new Object[] { "a", "b", "c" }; + Object element = "d"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + LiteralExpression arrayLiteral, elementLiteral; + arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, + SortOrder.ASC, Determinism.ALWAYS); + elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, + Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) elementLiteral); + expressions.add(arrayLiteral); + + Expression arrayPrependFunction = new ArrayPrependFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayPrependFunction.evaluate(null, ptr); + byte[] expected = new byte[] { 100, 0, 97, 0, 98, 0, 99, 0, 0, 0, -128, 1, -128, 3, -128, 5, + -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1 }; + assertArrayEquals(expected, ptr.get()); + } + + @Test + public void testForCorrectSeparatorBytes2() throws Exception { + Object[] o = new Object[] { "a", "b", "c" }; + Object element = "d"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + LiteralExpression arrayLiteral, elementLiteral; + arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, + SortOrder.DESC, Determinism.ALWAYS); + elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, + Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) elementLiteral); + expressions.add(arrayLiteral); + + Expression arrayPrependFunction = new ArrayPrependFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayPrependFunction.evaluate(null, ptr); + byte[] expected = new byte[] { -101, -1, -98, -1, -99, -1, -100, -1, -1, -1, -128, 1, -128, 3, + -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1 }; + assertArrayEquals(expected, ptr.get()); + } + + @Test + public void testForCorrectSeparatorBytes3() throws Exception { + Object[] o = new Object[] { "a", null, null, "c" }; + Object element = "d"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + LiteralExpression arrayLiteral, elementLiteral; + arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, + SortOrder.DESC, Determinism.ALWAYS); + elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, + Determinism.ALWAYS); + List expressions = Lists.newArrayList((Expression) elementLiteral); + expressions.add(arrayLiteral); + + Expression arrayPrependFunction = new ArrayPrependFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayPrependFunction.evaluate(null, ptr); + byte[] expected = new byte[] { -101, -1, -98, -1, 0, -2, -100, -1, -1, -1, -128, 1, -128, 3, + -128, 5, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 5, 1 }; + assertArrayEquals(expected, ptr.get()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayRemoveFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayRemoveFunctionTest.java index ed896bd6916..53629b32f0c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayRemoveFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayRemoveFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -43,243 +43,244 @@ import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.schema.types.PhoenixArray; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class ArrayRemoveFunctionTest { - private static void testExpression(LiteralExpression array, LiteralExpression element, PhoenixArray expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) element); - expressions.add(array); - - Expression arrayRemoveFunction = new ArrayRemoveFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayRemoveFunction.evaluate(null, ptr); - PhoenixArray result = (PhoenixArray) arrayRemoveFunction.getDataType().toObject(ptr, - expressions.get(1).getSortOrder(), array.getMaxLength(), array.getScale()); - assertEquals(expected, result); - } - - private static void test(PhoenixArray array, Object element, PDataType arrayDataType, Integer arrMaxLen, - Integer arrScale, PDataType elementDataType, Integer elemMaxLen, Integer elemScale, PhoenixArray expected, - SortOrder arraySortOrder, SortOrder elementSortOrder) throws SQLException { - LiteralExpression arrayLiteral, elementLiteral; - arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, arraySortOrder, - Determinism.ALWAYS); - elementLiteral = LiteralExpression.newConstant(element, elementDataType, elemMaxLen, elemScale, - elementSortOrder, Determinism.ALWAYS); - testExpression(arrayLiteral, elementLiteral, expected); - } - - @Test - public void testArrayRemoveFunction1() throws Exception { - Object[] o = new Object[] { 1, 2, -3, 4 }; - Object[] o2 = new Object[] { 1, 2, -3 }; - Object element = 4; - PDataType baseType = PInteger.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction2() throws Exception { - Object[] o = new Object[] { "1", "2", "3", "4" }; - Object[] o2 = new Object[] { "1", "3", "4" }; - Object element = "2"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction3() throws Exception { - Object[] o = new Object[] { "1", "2", "2", "4" }; - Object[] o2 = new Object[] { "1", "4" }; - Object element = "2"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction4() throws Exception { - Object[] o = new Object[] { "1", "2", "2", "4" }; - Object[] o2 = new Object[] { "1", "2", "2", "4" }; - Object element = "5"; - PDataType baseType = PVarchar.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunctionBoolean() throws Exception { - Boolean[] o = new Boolean[] { true, false, false, true }; - Boolean[] o2 = new Boolean[] { true, true }; - Boolean element = false; - PDataType baseType = PBoolean.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction6() throws Exception { - Object[] o = new Object[] { new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3) }; - Object[] o2 = new Object[] { new Float(7.9), new Float(-9.6) }; - Object element = 2.3; - PDataType baseType = PFloat.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction7() throws Exception { - Object[] o = new Object[] { 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE }; - Object[] o2 = new Object[] { 9.54, 2.34, -9.675, Double.MAX_VALUE }; - Object element = 4.78; - PDataType baseType = PDouble.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction8() throws Exception { - Object[] o = new Object[] { 123l, 677l, 98789l, -78989l, 66787l }; - Object[] o2 = new Object[] { 123l, 677l, -78989l, 66787l }; - Object element = 98789l; - PDataType baseType = PLong.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction9() throws Exception { - Object[] o = new Object[] { (short) 34, (short) -89, (short) 999, (short) 34 }; - Object[] o2 = new Object[] { (short) 34, (short) -89, (short) 999, (short) 34 }; - Object element = (short) -23; - PDataType baseType = PSmallint.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction10() throws Exception { - Object[] o = new Object[] { (byte) 4, (byte) 8, (byte) 9 }; - Object[] o2 = new Object[] { (byte) 8, (byte) 9 }; - Object element = (byte) 4; - PDataType baseType = PTinyint.INSTANCE; - - PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction11() throws Exception { - Object[] o = new Object[] { BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785) }; - Object[] o2 = new Object[] { BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785) }; - Object element = BigDecimal.valueOf(2345); - PDataType baseType = PDecimal.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction12() throws Exception { - Calendar calendar = Calendar.getInstance(); - java.util.Date currentDate = calendar.getTime(); - java.sql.Date date = new java.sql.Date(currentDate.getTime()); - Date date2 = new Date(new java.util.Date().getTime() + 1000); - - Object[] o = new Object[] { date, date, date, date2 }; - Object[] o2 = new Object[] { date2 }; - PDataType baseType = PDate.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, date, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, - null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction13() throws Exception { - Calendar calendar = Calendar.getInstance(); - java.util.Date currentDate = calendar.getTime(); - java.sql.Time time = new java.sql.Time(currentDate.getTime()); - java.sql.Time time2 = new java.sql.Time(new java.util.Date().getTime() + 1000); - - Object[] o = new Object[] { time, time, time, time2 }; - Object[] o2 = new Object[] { time2 }; - PDataType baseType = PTime.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, time, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, baseType, - null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction14() throws Exception { - Calendar calendar = Calendar.getInstance(); - java.util.Date currentDate = calendar.getTime(); - java.sql.Timestamp timestamp = new java.sql.Timestamp(currentDate.getTime()); - java.sql.Timestamp timestamp2 = new java.sql.Timestamp(new java.util.Date().getTime() + 1000); - - Object[] o = new Object[] { timestamp, timestamp2, timestamp, timestamp }; - Object[] o2 = new Object[] { timestamp2 }; - PDataType baseType = PTimestamp.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, timestamp, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testArrayRemoveFunction15() throws Exception { - byte[][] o = new byte[][] { new byte[] { 2, 0, 3 }, new byte[] { 42, 3 }, new byte[] { 5, 3 }, - new byte[] { 6, 3 }, new byte[] { 2, 5 } }; - byte[][] o2 = new byte[][] { new byte[] { 42, 3 }, new byte[] { 5, 3 }, new byte[] { 6, 3 }, - new byte[] { 2, 5 } }; - byte[] element = new byte[] { 2, 0, 3 }; - PDataType baseType = PVarbinary.INSTANCE; - - PhoenixArray arr = new PhoenixArray(baseType, o); - PhoenixArray expected = new PhoenixArray(baseType, o2); - test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, null, - baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); - } + private static void testExpression(LiteralExpression array, LiteralExpression element, + PhoenixArray expected) throws SQLException { + List expressions = Lists.newArrayList((Expression) element); + expressions.add(array); + + Expression arrayRemoveFunction = new ArrayRemoveFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayRemoveFunction.evaluate(null, ptr); + PhoenixArray result = (PhoenixArray) arrayRemoveFunction.getDataType().toObject(ptr, + expressions.get(1).getSortOrder(), array.getMaxLength(), array.getScale()); + assertEquals(expected, result); + } + + private static void test(PhoenixArray array, Object element, PDataType arrayDataType, + Integer arrMaxLen, Integer arrScale, PDataType elementDataType, Integer elemMaxLen, + Integer elemScale, PhoenixArray expected, SortOrder arraySortOrder, SortOrder elementSortOrder) + throws SQLException { + LiteralExpression arrayLiteral, elementLiteral; + arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, + arraySortOrder, Determinism.ALWAYS); + elementLiteral = LiteralExpression.newConstant(element, elementDataType, elemMaxLen, elemScale, + elementSortOrder, Determinism.ALWAYS); + testExpression(arrayLiteral, elementLiteral, expected); + } + + @Test + public void testArrayRemoveFunction1() throws Exception { + Object[] o = new Object[] { 1, 2, -3, 4 }; + Object[] o2 = new Object[] { 1, 2, -3 }; + Object element = 4; + PDataType baseType = PInteger.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction2() throws Exception { + Object[] o = new Object[] { "1", "2", "3", "4" }; + Object[] o2 = new Object[] { "1", "3", "4" }; + Object element = "2"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction3() throws Exception { + Object[] o = new Object[] { "1", "2", "2", "4" }; + Object[] o2 = new Object[] { "1", "4" }; + Object element = "2"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction4() throws Exception { + Object[] o = new Object[] { "1", "2", "2", "4" }; + Object[] o2 = new Object[] { "1", "2", "2", "4" }; + Object element = "5"; + PDataType baseType = PVarchar.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunctionBoolean() throws Exception { + Boolean[] o = new Boolean[] { true, false, false, true }; + Boolean[] o2 = new Boolean[] { true, true }; + Boolean element = false; + PDataType baseType = PBoolean.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction6() throws Exception { + Object[] o = new Object[] { new Float(2.3), new Float(7.9), new Float(-9.6), new Float(2.3) }; + Object[] o2 = new Object[] { new Float(7.9), new Float(-9.6) }; + Object element = 2.3; + PDataType baseType = PFloat.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction7() throws Exception { + Object[] o = new Object[] { 4.78, 9.54, 2.34, -9.675, Double.MAX_VALUE }; + Object[] o2 = new Object[] { 9.54, 2.34, -9.675, Double.MAX_VALUE }; + Object element = 4.78; + PDataType baseType = PDouble.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction8() throws Exception { + Object[] o = new Object[] { 123l, 677l, 98789l, -78989l, 66787l }; + Object[] o2 = new Object[] { 123l, 677l, -78989l, 66787l }; + Object element = 98789l; + PDataType baseType = PLong.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction9() throws Exception { + Object[] o = new Object[] { (short) 34, (short) -89, (short) 999, (short) 34 }; + Object[] o2 = new Object[] { (short) 34, (short) -89, (short) 999, (short) 34 }; + Object element = (short) -23; + PDataType baseType = PSmallint.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction10() throws Exception { + Object[] o = new Object[] { (byte) 4, (byte) 8, (byte) 9 }; + Object[] o2 = new Object[] { (byte) 8, (byte) 9 }; + Object element = (byte) 4; + PDataType baseType = PTinyint.INSTANCE; + + PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction11() throws Exception { + Object[] o = new Object[] { BigDecimal.valueOf(2345), BigDecimal.valueOf(-23.45), + BigDecimal.valueOf(785) }; + Object[] o2 = new Object[] { BigDecimal.valueOf(-23.45), BigDecimal.valueOf(785) }; + Object element = BigDecimal.valueOf(2345); + PDataType baseType = PDecimal.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction12() throws Exception { + Calendar calendar = Calendar.getInstance(); + java.util.Date currentDate = calendar.getTime(); + java.sql.Date date = new java.sql.Date(currentDate.getTime()); + Date date2 = new Date(new java.util.Date().getTime() + 1000); + + Object[] o = new Object[] { date, date, date, date2 }; + Object[] o2 = new Object[] { date2 }; + PDataType baseType = PDate.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, date, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, + null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction13() throws Exception { + Calendar calendar = Calendar.getInstance(); + java.util.Date currentDate = calendar.getTime(); + java.sql.Time time = new java.sql.Time(currentDate.getTime()); + java.sql.Time time2 = new java.sql.Time(new java.util.Date().getTime() + 1000); + + Object[] o = new Object[] { time, time, time, time2 }; + Object[] o2 = new Object[] { time2 }; + PDataType baseType = PTime.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, time, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), null, + null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction14() throws Exception { + Calendar calendar = Calendar.getInstance(); + java.util.Date currentDate = calendar.getTime(); + java.sql.Timestamp timestamp = new java.sql.Timestamp(currentDate.getTime()); + java.sql.Timestamp timestamp2 = new java.sql.Timestamp(new java.util.Date().getTime() + 1000); + + Object[] o = new Object[] { timestamp, timestamp2, timestamp, timestamp }; + Object[] o2 = new Object[] { timestamp2 }; + PDataType baseType = PTimestamp.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, timestamp, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, null, null, expected, SortOrder.ASC, SortOrder.ASC); + } + + @Test + public void testArrayRemoveFunction15() throws Exception { + byte[][] o = new byte[][] { new byte[] { 2, 0, 3 }, new byte[] { 42, 3 }, new byte[] { 5, 3 }, + new byte[] { 6, 3 }, new byte[] { 2, 5 } }; + byte[][] o2 = new byte[][] { new byte[] { 42, 3 }, new byte[] { 5, 3 }, new byte[] { 6, 3 }, + new byte[] { 2, 5 } }; + byte[] element = new byte[] { 2, 0, 3 }; + PDataType baseType = PVarbinary.INSTANCE; + + PhoenixArray arr = new PhoenixArray(baseType, o); + PhoenixArray expected = new PhoenixArray(baseType, o2); + test(arr, element, PDataType.fromTypeId(baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE), + null, null, baseType, 1, null, expected, SortOrder.ASC, SortOrder.DESC); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayToStringFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayToStringFunctionTest.java index 8ea87d32df3..7367fc24337 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayToStringFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayToStringFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,350 +30,402 @@ import org.apache.phoenix.expression.function.ArrayToStringFunction; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.*; -import org.junit.Ignore; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class ArrayToStringFunctionTest { - private static void testExpression(LiteralExpression array, LiteralExpression delimiter, LiteralExpression nullString, String expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) array); - expressions.add(delimiter); - expressions.add(nullString); + private static void testExpression(LiteralExpression array, LiteralExpression delimiter, + LiteralExpression nullString, String expected) throws SQLException { + List expressions = Lists.newArrayList((Expression) array); + expressions.add(delimiter); + expressions.add(nullString); - Expression arrayToStringFunction = new ArrayToStringFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - arrayToStringFunction.evaluate(null, ptr); - String result = (String) arrayToStringFunction.getDataType().toObject(ptr, arrayToStringFunction.getSortOrder(), arrayToStringFunction.getMaxLength(), arrayToStringFunction.getScale()); - assertEquals(expected, result); - } + Expression arrayToStringFunction = new ArrayToStringFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayToStringFunction.evaluate(null, ptr); + String result = (String) arrayToStringFunction.getDataType().toObject(ptr, + arrayToStringFunction.getSortOrder(), arrayToStringFunction.getMaxLength(), + arrayToStringFunction.getScale()); + assertEquals(expected, result); + } - private static void test(PhoenixArray array, PDataType arrayDataType, Integer arrMaxLen, Integer arrScale, String delimiter, String nullString, String expected, SortOrder arraySortOrder, SortOrder delimiterSortOrder, SortOrder nullStringSortOrder) throws SQLException { - LiteralExpression arrayLiteral, delimiterLiteral, nullStringLiteral; - arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, arraySortOrder, Determinism.ALWAYS); - delimiterLiteral = LiteralExpression.newConstant(delimiter, PVarchar.INSTANCE, null, null, delimiterSortOrder, Determinism.ALWAYS); - nullStringLiteral = LiteralExpression.newConstant(nullString, PVarchar.INSTANCE, null, null, nullStringSortOrder, Determinism.ALWAYS); - testExpression(arrayLiteral, delimiterLiteral, nullStringLiteral, expected); - } + private static void test(PhoenixArray array, PDataType arrayDataType, Integer arrMaxLen, + Integer arrScale, String delimiter, String nullString, String expected, + SortOrder arraySortOrder, SortOrder delimiterSortOrder, SortOrder nullStringSortOrder) + throws SQLException { + LiteralExpression arrayLiteral, delimiterLiteral, nullStringLiteral; + arrayLiteral = LiteralExpression.newConstant(array, arrayDataType, arrMaxLen, arrScale, + arraySortOrder, Determinism.ALWAYS); + delimiterLiteral = LiteralExpression.newConstant(delimiter, PVarchar.INSTANCE, null, null, + delimiterSortOrder, Determinism.ALWAYS); + nullStringLiteral = LiteralExpression.newConstant(nullString, PVarchar.INSTANCE, null, null, + nullStringSortOrder, Determinism.ALWAYS); + testExpression(arrayLiteral, delimiterLiteral, nullStringLiteral, expected); + } - @Test - public void testInt1() throws SQLException { - PDataType type = PIntegerArray.INSTANCE; - PDataType base = PInteger.INSTANCE; - Object[] o1 = new Object[]{1, 2, 3, 4, 5}; - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "1,2,3,4,5"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testInt1() throws SQLException { + PDataType type = PIntegerArray.INSTANCE; + PDataType base = PInteger.INSTANCE; + Object[] o1 = new Object[] { 1, 2, 3, 4, 5 }; + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "1,2,3,4,5"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testInt2() throws SQLException { - PDataType type = PIntegerArray.INSTANCE; - PDataType base = PInteger.INSTANCE; - Object[] o1 = new Object[]{1, 2, 3, 4, 5}; - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); - String delimiter = ","; - String nullString = ""; - String expected = "1,2,3,4,5"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testInt2() throws SQLException { + PDataType type = PIntegerArray.INSTANCE; + PDataType base = PInteger.INSTANCE; + Object[] o1 = new Object[] { 1, 2, 3, 4, 5 }; + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); + String delimiter = ","; + String nullString = ""; + String expected = "1,2,3,4,5"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testInt3() throws SQLException { - PDataType type = PIntegerArray.INSTANCE; - PDataType base = PInteger.INSTANCE; - Object[] o1 = new Object[]{1, 2, 3, 4, 5}; - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); - String delimiter = ""; - String nullString = ""; - String expected = null; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testInt3() throws SQLException { + PDataType type = PIntegerArray.INSTANCE; + PDataType base = PInteger.INSTANCE; + Object[] o1 = new Object[] { 1, 2, 3, 4, 5 }; + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); + String delimiter = ""; + String nullString = ""; + String expected = null; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testInt4() throws SQLException { - PDataType type = PIntegerArray.INSTANCE; - PDataType base = PInteger.INSTANCE; - Object[] o1 = new Object[]{1}; - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); - String delimiter = ","; - String nullString = ""; - String expected = "1"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testInt4() throws SQLException { + PDataType type = PIntegerArray.INSTANCE; + PDataType base = PInteger.INSTANCE; + Object[] o1 = new Object[] { 1 }; + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1); + String delimiter = ","; + String nullString = ""; + String expected = "1"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testFloat1() throws SQLException { - PDataType type = PFloatArray.INSTANCE; - PDataType base = PFloat.INSTANCE; - Object[] o1 = new Object[]{(float) 1.1, (float) 2.2, (float) 3.3, (float) 4.4, (float) 5.5}; - PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "1.1,2.2,3.3,4.4,5.5"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testFloat1() throws SQLException { + PDataType type = PFloatArray.INSTANCE; + PDataType base = PFloat.INSTANCE; + Object[] o1 = new Object[] { (float) 1.1, (float) 2.2, (float) 3.3, (float) 4.4, (float) 5.5 }; + PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "1.1,2.2,3.3,4.4,5.5"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testFloat2() throws SQLException { - PDataType type = PFloatArray.INSTANCE; - PDataType base = PFloat.INSTANCE; - Object[] o1 = new Object[]{(float) 1.1, (float) 2.2, (float) 3.3, (float) 4.4, (float) 5.5}; - PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o1); - String delimiter = ", "; - String nullString = "*"; - String expected = "1.1, 2.2, 3.3, 4.4, 5.5"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - } - - @Test - public void testDate() throws SQLException { - PDataType type = PDateArray.INSTANCE; - PDataType base = PDate.INSTANCE; - Object[] o1 = new Object[]{new Date(0l), new Date(0l), new Date(0l)}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = "*"; - String expected = ""; - for (int i = 0; i < o1.length - 1; i++) { - expected += o1[i].toString() + ", "; - } - expected += o1[o1.length - 1]; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testFloat2() throws SQLException { + PDataType type = PFloatArray.INSTANCE; + PDataType base = PFloat.INSTANCE; + Object[] o1 = new Object[] { (float) 1.1, (float) 2.2, (float) 3.3, (float) 4.4, (float) 5.5 }; + PhoenixArray arr = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o1); + String delimiter = ", "; + String nullString = "*"; + String expected = "1.1, 2.2, 3.3, 4.4, 5.5"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testTime() throws SQLException { - PDataType type = PTimeArray.INSTANCE; - PDataType base = PTime.INSTANCE; - Object[] o1 = new Object[]{new Time(0l), new Time(0l), new Time(0l)}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = "*"; - String expected = ""; - for (int i = 0; i < o1.length - 1; i++) { - expected += o1[i].toString() + ", "; - } - expected += o1[o1.length - 1]; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); + @Test + public void testDate() throws SQLException { + PDataType type = PDateArray.INSTANCE; + PDataType base = PDate.INSTANCE; + Object[] o1 = new Object[] { new Date(0l), new Date(0l), new Date(0l) }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = "*"; + String expected = ""; + for (int i = 0; i < o1.length - 1; i++) { + expected += o1[i].toString() + ", "; } + expected += o1[o1.length - 1]; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testTimestamp() throws SQLException { - PDataType type = PTimestampArray.INSTANCE; - PDataType base = PTimestamp.INSTANCE; - Object[] o1 = new Object[]{new Timestamp(0l), new Timestamp(0l), new Timestamp(0l)}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = "*"; - String expected = ""; - for (int i = 0; i < o1.length - 1; i++) { - expected += o1[i].toString() + ", "; - } - expected += o1[o1.length - 1]; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); + @Test + public void testTime() throws SQLException { + PDataType type = PTimeArray.INSTANCE; + PDataType base = PTime.INSTANCE; + Object[] o1 = new Object[] { new Time(0l), new Time(0l), new Time(0l) }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = "*"; + String expected = ""; + for (int i = 0; i < o1.length - 1; i++) { + expected += o1[i].toString() + ", "; } + expected += o1[o1.length - 1]; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testVarchar1() throws SQLException { - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - Object[] o1 = new Object[]{"hello", null, "hello", null}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = "*"; - String expected = "hello, *, hello, *"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); + @Test + public void testTimestamp() throws SQLException { + PDataType type = PTimestampArray.INSTANCE; + PDataType base = PTimestamp.INSTANCE; + Object[] o1 = new Object[] { new Timestamp(0l), new Timestamp(0l), new Timestamp(0l) }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = "*"; + String expected = ""; + for (int i = 0; i < o1.length - 1; i++) { + expected += o1[i].toString() + ", "; } + expected += o1[o1.length - 1]; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testVarchar2() throws SQLException { - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - Object[] o1 = new Object[]{"hello", null, "hello", null, null}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = "*"; - String expected = "hello, *, hello, *, *"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testVarchar1() throws SQLException { + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + Object[] o1 = new Object[] { "hello", null, "hello", null }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = "*"; + String expected = "hello, *, hello, *"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testVarchar3() throws SQLException { - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - Object[] o1 = new Object[]{"hello", null, "hello", null, null}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = ""; - String expected = "hello, hello"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testVarchar2() throws SQLException { + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + Object[] o1 = new Object[] { "hello", null, "hello", null, null }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = "*"; + String expected = "hello, *, hello, *, *"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testVarchar4() throws SQLException { - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - Object[] o1 = new Object[]{null, "hello", "hello", null, null}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = ""; - String expected = "hello, hello"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testVarchar3() throws SQLException { + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + Object[] o1 = new Object[] { "hello", null, "hello", null, null }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = ""; + String expected = "hello, hello"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testVarchar5() throws SQLException { - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - Object[] o1 = new Object[]{"hello"}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = ""; - String expected = "hello"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testVarchar4() throws SQLException { + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + Object[] o1 = new Object[] { null, "hello", "hello", null, null }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = ""; + String expected = "hello, hello"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testVarchar6() throws SQLException { - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - Object[] o1 = new Object[]{null, null, null, null, "hello"}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = ""; - String expected = "hello"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testVarchar5() throws SQLException { + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + Object[] o1 = new Object[] { "hello" }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = ""; + String expected = "hello"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testVarchar7() throws SQLException { - PDataType type = PVarcharArray.INSTANCE; - PDataType base = PVarchar.INSTANCE; - Object[] o1 = new Object[]{null, null, null, null, "hello"}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ", "; - String nullString = "*"; - String expected = "*, *, *, *, hello"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testVarchar6() throws SQLException { + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + Object[] o1 = new Object[] { null, null, null, null, "hello" }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = ""; + String expected = "hello"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testDouble() throws SQLException { - PDataType type = PDoubleArray.INSTANCE; - PDataType base = PDouble.INSTANCE; - Object[] o1 = new Object[]{23.4, 56.8, 2.4}; - PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "23.4,56.8,2.4"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testVarchar7() throws SQLException { + PDataType type = PVarcharArray.INSTANCE; + PDataType base = PVarchar.INSTANCE; + Object[] o1 = new Object[] { null, null, null, null, "hello" }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ", "; + String nullString = "*"; + String expected = "*, *, *, *, hello"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testTinyint() throws SQLException { - PDataType type = PTinyintArray.INSTANCE; - PDataType base = PTinyint.INSTANCE; - Object[] o1 = new Object[]{(byte) 2, (byte) 4, (byte) 5}; - PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "2,4,5"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testDouble() throws SQLException { + PDataType type = PDoubleArray.INSTANCE; + PDataType base = PDouble.INSTANCE; + Object[] o1 = new Object[] { 23.4, 56.8, 2.4 }; + PhoenixArray arr = new PhoenixArray.PrimitiveDoublePhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "23.4,56.8,2.4"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testSmallint() throws SQLException { - PDataType type = PSmallintArray.INSTANCE; - PDataType base = PSmallint.INSTANCE; - Object[] o1 = new Object[]{(short) 6, (short) 7, (short) 8}; - PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "6,7,8"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testTinyint() throws SQLException { + PDataType type = PTinyintArray.INSTANCE; + PDataType base = PTinyint.INSTANCE; + Object[] o1 = new Object[] { (byte) 2, (byte) 4, (byte) 5 }; + PhoenixArray arr = new PhoenixArray.PrimitiveBytePhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "2,4,5"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testBoolean() throws SQLException { - PDataType type = PBooleanArray.INSTANCE; - PDataType base = PBoolean.INSTANCE; - Object[] o1 = new Object[]{true, false, true}; - PhoenixArray arr = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "true,false,true"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testSmallint() throws SQLException { + PDataType type = PSmallintArray.INSTANCE; + PDataType base = PSmallint.INSTANCE; + Object[] o1 = new Object[] { (short) 6, (short) 7, (short) 8 }; + PhoenixArray arr = new PhoenixArray.PrimitiveShortPhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "6,7,8"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testLong() throws SQLException { - PDataType type = PLongArray.INSTANCE; - PDataType base = PLong.INSTANCE; - Object[] o1 = new Object[]{(long) 23, (long) 34, (long) 45}; - PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "23,34,45"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testBoolean() throws SQLException { + PDataType type = PBooleanArray.INSTANCE; + PDataType base = PBoolean.INSTANCE; + Object[] o1 = new Object[] { true, false, true }; + PhoenixArray arr = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "true,false,true"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testDecimal1() throws SQLException { - PDataType type = PDecimalArray.INSTANCE; - PDataType base = PDecimal.INSTANCE; - Object[] o1 = new Object[]{BigDecimal.valueOf(23.45), BigDecimal.valueOf(2.345), BigDecimal.valueOf(234.5)}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "23.45,2.345,234.5"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testLong() throws SQLException { + PDataType type = PLongArray.INSTANCE; + PDataType base = PLong.INSTANCE; + Object[] o1 = new Object[] { (long) 23, (long) 34, (long) 45 }; + PhoenixArray arr = new PhoenixArray.PrimitiveLongPhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "23,34,45"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testDecimal2() throws SQLException { - PDataType type = PDecimalArray.INSTANCE; - PDataType base = PDecimal.INSTANCE; - Object[] o1 = new Object[]{BigDecimal.valueOf(23.45), BigDecimal.valueOf(2.345), BigDecimal.valueOf(234.5), null}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "23.45,2.345,234.5,*"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testDecimal1() throws SQLException { + PDataType type = PDecimalArray.INSTANCE; + PDataType base = PDecimal.INSTANCE; + Object[] o1 = new Object[] { BigDecimal.valueOf(23.45), BigDecimal.valueOf(2.345), + BigDecimal.valueOf(234.5) }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "23.45,2.345,234.5"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } - @Test - public void testDecimal3() throws SQLException { - PDataType type = PDecimalArray.INSTANCE; - PDataType base = PDecimal.INSTANCE; - Object[] o1 = new Object[]{BigDecimal.valueOf(23.45), BigDecimal.valueOf(2.345), null, BigDecimal.valueOf(234.5)}; - PhoenixArray arr = new PhoenixArray(base, o1); - String delimiter = ","; - String nullString = "*"; - String expected = "23.45,2.345,*,234.5"; - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC); - test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC); - } + @Test + public void testDecimal2() throws SQLException { + PDataType type = PDecimalArray.INSTANCE; + PDataType base = PDecimal.INSTANCE; + Object[] o1 = new Object[] { BigDecimal.valueOf(23.45), BigDecimal.valueOf(2.345), + BigDecimal.valueOf(234.5), null }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "23.45,2.345,234.5,*"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } + + @Test + public void testDecimal3() throws SQLException { + PDataType type = PDecimalArray.INSTANCE; + PDataType base = PDecimal.INSTANCE; + Object[] o1 = new Object[] { BigDecimal.valueOf(23.45), BigDecimal.valueOf(2.345), null, + BigDecimal.valueOf(234.5) }; + PhoenixArray arr = new PhoenixArray(base, o1); + String delimiter = ","; + String nullString = "*"; + String expected = "23.45,2.345,*,234.5"; + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, + SortOrder.ASC); + test(arr, type, null, null, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, + SortOrder.ASC); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/CbrtFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/CbrtFunctionTest.java index 7d1675742b8..0e17adf7022 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/CbrtFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/CbrtFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,88 +40,83 @@ import org.apache.phoenix.schema.types.PUnsignedFloat; import org.apache.phoenix.schema.types.PUnsignedInt; import org.apache.phoenix.schema.types.PUnsignedLong; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * Unit tests for {@link CbrtFunction} */ public class CbrtFunctionTest { - private static void testExpression(LiteralExpression literal, double expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) literal); - Expression cbrtFunction = new CbrtFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - cbrtFunction.evaluate(null, ptr); - Double result = - (Double) cbrtFunction.getDataType().toObject(ptr, cbrtFunction.getSortOrder()); - assertTrue(Math.abs(result.doubleValue() - expected) <= 1e-9); + private static void testExpression(LiteralExpression literal, double expected) + throws SQLException { + List expressions = Lists.newArrayList((Expression) literal); + Expression cbrtFunction = new CbrtFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + cbrtFunction.evaluate(null, ptr); + Double result = (Double) cbrtFunction.getDataType().toObject(ptr, cbrtFunction.getSortOrder()); + assertTrue(Math.abs(result.doubleValue() - expected) <= 1e-9); + } + + private static void test(Number value, PNumericType dataType, double expected) + throws SQLException { + LiteralExpression literal; + literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + testExpression(literal, expected); + literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + testExpression(literal, expected); + } + + private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { + double[] expected = new double[value.length]; + for (int i = 0; i < expected.length; ++i) { + expected[i] = Math.cbrt(value[i].doubleValue()); } - - private static void test(Number value, PNumericType dataType, double expected) - throws SQLException { - LiteralExpression literal; - literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - testExpression(literal, expected); - literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - testExpression(literal, expected); + assertEquals(value.length, expected.length); + for (int i = 0; i < value.length; ++i) { + test(value[i], dataType, expected[i]); } + } - private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { - double[] expected = new double[value.length]; - for (int i = 0; i < expected.length; ++i) { - expected[i] = Math.cbrt(value[i].doubleValue()); - } - assertEquals(value.length, expected.length); - for (int i = 0; i < value.length; ++i) { - test(value[i], dataType, expected[i]); - } - } - - @Test - public void testCbrtFunction() throws Exception { - Random random = new Random(); + @Test + public void testCbrtFunction() throws Exception { + Random random = new Random(); - testBatch( - new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), - BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), - BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()), - BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE); + testBatch( + new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), BigDecimal.valueOf(-1.0), + BigDecimal.valueOf(123.1234), BigDecimal.valueOf(-123.1234), + BigDecimal.valueOf(random.nextDouble()), BigDecimal.valueOf(random.nextDouble()) }, + PDecimal.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), - random.nextFloat() }, PFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), + random.nextFloat() }, PFloat.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); - testBatch( - new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), - random.nextDouble() }, PDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), + random.nextDouble() }, PDouble.INSTANCE); - testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); - testBatch( - new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, - random.nextLong(), random.nextLong() }, PLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, + random.nextLong(), random.nextLong() }, PLong.INSTANCE); - testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); - testBatch( - new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, - random.nextInt(), random.nextInt() }, PInteger.INSTANCE); + testBatch(new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, + random.nextInt(), random.nextInt() }, PInteger.INSTANCE); - testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); + testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, - (short) 123, (short) -123 }, PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, + (short) 123, (short) -123 }, PSmallint.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, - PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, + PSmallint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, - (byte) 123, (byte) -123 }, PTinyint.INSTANCE); + testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, + (byte) 123, (byte) -123 }, PTinyint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); - } + testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/CoerceExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/CoerceExpressionTest.java index 8f0eda2a0af..092b62fc394 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/CoerceExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/CoerceExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,93 +40,89 @@ /** * Test class for unit-testing {@link CoerceExpression} - * - * * @since 0.1 - * */ public class CoerceExpressionTest { - - private static final HashMap map = new HashMap(); - - static { - map.put(String.class, "a"); - map.put(Long.class, 1l); - map.put(Integer.class, 1); - map.put(Short.class, 1); - map.put(Byte.class, 1); - map.put(Float.class, 1.00f); - map.put(Double.class, 1.00d); - map.put(BigDecimal.class, BigDecimal.ONE); - map.put(Timestamp.class, new Timestamp(0)); - map.put(Time.class, new Time(0)); - map.put(Date.class, new Date(0)); - map.put(Boolean.class, Boolean.TRUE); - map.put(byte[].class, new byte[]{-128, 0, 0, 1}); - } - - @Test - public void testCoerceExpressionSupportsCoercingIntToDecimal() throws Exception { - LiteralExpression v = LiteralExpression.newConstant(1, PInteger.INSTANCE); - CoerceExpression e = new CoerceExpression(v, PDecimal.INSTANCE); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - e.evaluate(null, ptr); - Object obj = e.getDataType().toObject(ptr); - assertTrue(obj instanceof BigDecimal); - BigDecimal value = (BigDecimal)obj; - assertTrue(value.equals(BigDecimal.valueOf(1))); - } - - @Test - public void testCoerceExpressionSupportsCoercingCharToVarchar() throws Exception { - LiteralExpression v = LiteralExpression.newConstant("a", PChar.INSTANCE); - CoerceExpression e = new CoerceExpression(v, PVarchar.INSTANCE); + + private static final HashMap map = new HashMap(); + + static { + map.put(String.class, "a"); + map.put(Long.class, 1l); + map.put(Integer.class, 1); + map.put(Short.class, 1); + map.put(Byte.class, 1); + map.put(Float.class, 1.00f); + map.put(Double.class, 1.00d); + map.put(BigDecimal.class, BigDecimal.ONE); + map.put(Timestamp.class, new Timestamp(0)); + map.put(Time.class, new Time(0)); + map.put(Date.class, new Date(0)); + map.put(Boolean.class, Boolean.TRUE); + map.put(byte[].class, new byte[] { -128, 0, 0, 1 }); + } + + @Test + public void testCoerceExpressionSupportsCoercingIntToDecimal() throws Exception { + LiteralExpression v = LiteralExpression.newConstant(1, PInteger.INSTANCE); + CoerceExpression e = new CoerceExpression(v, PDecimal.INSTANCE); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + e.evaluate(null, ptr); + Object obj = e.getDataType().toObject(ptr); + assertTrue(obj instanceof BigDecimal); + BigDecimal value = (BigDecimal) obj; + assertTrue(value.equals(BigDecimal.valueOf(1))); + } + + @Test + public void testCoerceExpressionSupportsCoercingCharToVarchar() throws Exception { + LiteralExpression v = LiteralExpression.newConstant("a", PChar.INSTANCE); + CoerceExpression e = new CoerceExpression(v, PVarchar.INSTANCE); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + e.evaluate(null, ptr); + Object obj = e.getDataType().toObject(ptr); + assertTrue(obj instanceof String); + String value = (String) obj; + assertTrue(value.equals("a")); + } + + @Test + public void testCoerceExpressionSupportsCoercingIntToLong() throws Exception { + LiteralExpression v = LiteralExpression.newConstant(1, PInteger.INSTANCE); + CoerceExpression e = new CoerceExpression(v, PLong.INSTANCE); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + e.evaluate(null, ptr); + Object obj = e.getDataType().toObject(ptr); + assertTrue(obj instanceof Long); + Long value = (Long) obj; + assertTrue(value.equals(Long.valueOf(1))); + } + + @Test + public void testCoerceExpressionSupportsCoercingAllPDataTypesToVarBinary() throws Exception { + for (PDataType p : PDataType.values()) { + if (!p.isArrayType() && !p.equals(PJson.INSTANCE) && !p.equals(PBson.INSTANCE)) { + LiteralExpression v = LiteralExpression.newConstant(map.get(p.getJavaClass()), p); + CoerceExpression e = new CoerceExpression(v, PVarbinary.INSTANCE); ImmutableBytesWritable ptr = new ImmutableBytesWritable(); e.evaluate(null, ptr); Object obj = e.getDataType().toObject(ptr); - assertTrue(obj instanceof String); - String value = (String)obj; - assertTrue(value.equals("a")); + assertTrue("Coercing to VARBINARY failed for PDataType " + p, obj instanceof byte[]); + } } - - @Test - public void testCoerceExpressionSupportsCoercingIntToLong() throws Exception { - LiteralExpression v = LiteralExpression.newConstant(1, PInteger.INSTANCE); - CoerceExpression e = new CoerceExpression(v, PLong.INSTANCE); + } + + @Test + public void testCoerceExpressionSupportsCoercingAllPDataTypesToBinary() throws Exception { + for (PDataType p : PDataType.values()) { + if (!p.isArrayType() && !p.equals(PJson.INSTANCE) && !p.equals(PBson.INSTANCE)) { + LiteralExpression v = LiteralExpression.newConstant(map.get(p.getJavaClass()), p); + CoerceExpression e = new CoerceExpression(v, PBinary.INSTANCE); ImmutableBytesWritable ptr = new ImmutableBytesWritable(); e.evaluate(null, ptr); Object obj = e.getDataType().toObject(ptr); - assertTrue(obj instanceof Long); - Long value = (Long)obj; - assertTrue(value.equals(Long.valueOf(1))); + assertTrue("Coercing to BINARY failed for PDataType " + p, obj instanceof byte[]); + } } - - @Test - public void testCoerceExpressionSupportsCoercingAllPDataTypesToVarBinary() throws Exception { - for (PDataType p : PDataType.values()) { - if (!p.isArrayType() && !p.equals(PJson.INSTANCE) && !p.equals(PBson.INSTANCE)) { - LiteralExpression v = LiteralExpression.newConstant(map.get(p.getJavaClass()), p); - CoerceExpression e = new CoerceExpression(v, PVarbinary.INSTANCE); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - e.evaluate(null, ptr); - Object obj = e.getDataType().toObject(ptr); - assertTrue("Coercing to VARBINARY failed for PDataType " + p, - obj instanceof byte[]); - } - } - } - - @Test - public void testCoerceExpressionSupportsCoercingAllPDataTypesToBinary() throws Exception { - for (PDataType p : PDataType.values()) { - if (!p.isArrayType() && !p.equals(PJson.INSTANCE) && !p.equals(PBson.INSTANCE)) { - LiteralExpression v = LiteralExpression.newConstant(map.get(p.getJavaClass()), p); - CoerceExpression e = new CoerceExpression(v, PBinary.INSTANCE); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - e.evaluate(null, ptr); - Object obj = e.getDataType().toObject(ptr); - assertTrue("Coercing to BINARY failed for PDataType " + p, obj instanceof byte[]); - } - } - } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java index 0856e79dc9d..7ce6af2d41d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,86 +38,90 @@ public class ColumnExpressionTest { - @Test - public void testSerialization() throws Exception { - int maxLen = 30; - int scale = 5; - PName colName = PNameFactory.newName("c1"); - PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PDecimal.INSTANCE, maxLen, scale, - true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP); - ColumnExpression colExp = new KeyValueColumnExpression(column); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dOut = new DataOutputStream(baos); - colExp.write(dOut); - dOut.flush(); + @Test + public void testSerialization() throws Exception { + int maxLen = 30; + int scale = 5; + PName colName = PNameFactory.newName("c1"); + PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PDecimal.INSTANCE, maxLen, + scale, true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, + colName.getBytes(), HConstants.LATEST_TIMESTAMP); + ColumnExpression colExp = new KeyValueColumnExpression(column); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dOut = new DataOutputStream(baos); + colExp.write(dOut); + dOut.flush(); - ColumnExpression colExp2 = new KeyValueColumnExpression(); - byte[] bytes = baos.toByteArray(); - DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length)); - colExp2.readFields(dIn); - assertEquals(maxLen, colExp2.getMaxLength().intValue()); - assertEquals(scale, colExp2.getScale().intValue()); - assertEquals(PDecimal.INSTANCE, colExp2.getDataType()); - } + ColumnExpression colExp2 = new KeyValueColumnExpression(); + byte[] bytes = baos.toByteArray(); + DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length)); + colExp2.readFields(dIn); + assertEquals(maxLen, colExp2.getMaxLength().intValue()); + assertEquals(scale, colExp2.getScale().intValue()); + assertEquals(PDecimal.INSTANCE, colExp2.getDataType()); + } - @Test - public void testSerializationWithNullScale() throws Exception { - int maxLen = 30; - PName colName = PNameFactory.newName("c1"); - PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PBinary.INSTANCE, maxLen, null, - true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP); - ColumnExpression colExp = new KeyValueColumnExpression(column); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dOut = new DataOutputStream(baos); - colExp.write(dOut); - dOut.flush(); + @Test + public void testSerializationWithNullScale() throws Exception { + int maxLen = 30; + PName colName = PNameFactory.newName("c1"); + PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PBinary.INSTANCE, maxLen, + null, true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, + colName.getBytes(), HConstants.LATEST_TIMESTAMP); + ColumnExpression colExp = new KeyValueColumnExpression(column); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dOut = new DataOutputStream(baos); + colExp.write(dOut); + dOut.flush(); - ColumnExpression colExp2 = new KeyValueColumnExpression(); - byte[] bytes = baos.toByteArray(); - DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length)); - colExp2.readFields(dIn); - assertEquals(maxLen, colExp2.getMaxLength().intValue()); - assertNull(colExp2.getScale()); - assertEquals(PBinary.INSTANCE, colExp2.getDataType()); - } + ColumnExpression colExp2 = new KeyValueColumnExpression(); + byte[] bytes = baos.toByteArray(); + DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length)); + colExp2.readFields(dIn); + assertEquals(maxLen, colExp2.getMaxLength().intValue()); + assertNull(colExp2.getScale()); + assertEquals(PBinary.INSTANCE, colExp2.getDataType()); + } - @Test - public void testSerializationWithNullMaxLength() throws Exception { - int scale = 5; - PName colName = PNameFactory.newName("c1"); - PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PVarchar.INSTANCE, null, scale, - true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP); - ColumnExpression colExp = new KeyValueColumnExpression(column); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dOut = new DataOutputStream(baos); - colExp.write(dOut); - dOut.flush(); + @Test + public void testSerializationWithNullMaxLength() throws Exception { + int scale = 5; + PName colName = PNameFactory.newName("c1"); + PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PVarchar.INSTANCE, null, + scale, true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, + colName.getBytes(), HConstants.LATEST_TIMESTAMP); + ColumnExpression colExp = new KeyValueColumnExpression(column); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dOut = new DataOutputStream(baos); + colExp.write(dOut); + dOut.flush(); - ColumnExpression colExp2 = new KeyValueColumnExpression(); - byte[] bytes = baos.toByteArray(); - DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length)); - colExp2.readFields(dIn); - assertNull(colExp2.getMaxLength()); - assertEquals(scale, colExp2.getScale().intValue()); - assertEquals(PVarchar.INSTANCE, colExp2.getDataType()); - } + ColumnExpression colExp2 = new KeyValueColumnExpression(); + byte[] bytes = baos.toByteArray(); + DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length)); + colExp2.readFields(dIn); + assertNull(colExp2.getMaxLength()); + assertEquals(scale, colExp2.getScale().intValue()); + assertEquals(PVarchar.INSTANCE, colExp2.getDataType()); + } - @Test - public void testSerializationWithNullScaleAndMaxLength() throws Exception { - PName colName = PNameFactory.newName("c1"); - PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PDecimal.INSTANCE, null, null, true, - 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP); - ColumnExpression colExp = new KeyValueColumnExpression(column); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream dOut = new DataOutputStream(baos); - colExp.write(dOut); - dOut.flush(); + @Test + public void testSerializationWithNullScaleAndMaxLength() throws Exception { + PName colName = PNameFactory.newName("c1"); + PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PDecimal.INSTANCE, null, + null, true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, + colName.getBytes(), HConstants.LATEST_TIMESTAMP); + ColumnExpression colExp = new KeyValueColumnExpression(column); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dOut = new DataOutputStream(baos); + colExp.write(dOut); + dOut.flush(); - ColumnExpression colExp2 = new KeyValueColumnExpression(); - byte[] bytes = baos.toByteArray(); - DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length)); - colExp2.readFields(dIn); - assertNull(colExp2.getMaxLength()); - assertNull(colExp2.getScale()); - } + ColumnExpression colExp2 = new KeyValueColumnExpression(); + byte[] bytes = baos.toByteArray(); + DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length)); + colExp2.readFields(dIn); + assertNull(colExp2.getMaxLength()); + assertNull(colExp2.getScale()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/DeterminismTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/DeterminismTest.java index 4e4a6482eaf..a754e7b7e63 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/DeterminismTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/DeterminismTest.java @@ -1,11 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.expression; @@ -14,24 +22,24 @@ import org.junit.Test; public class DeterminismTest { - @Test - public void testCombine() { - // combining a determinism enum with ALWAYS should always return the - // other determinism - assertEquals("Unexpected result ", Determinism.PER_ROW, - Determinism.ALWAYS.combine(Determinism.PER_ROW)); - assertEquals("Unexpected result ", Determinism.PER_STATEMENT, - Determinism.ALWAYS.combine(Determinism.PER_STATEMENT)); - assertEquals("Unexpected result ", Determinism.PER_STATEMENT, - Determinism.PER_STATEMENT.combine(Determinism.ALWAYS)); - assertEquals("Unexpected result ", Determinism.PER_ROW, - Determinism.PER_ROW.combine(Determinism.ALWAYS)); - - // combining PER_STATEMENT and PER_ROW should return PER_ROW - assertEquals("Unexpected result ", Determinism.PER_ROW, - Determinism.PER_STATEMENT.combine(Determinism.PER_ROW)); - assertEquals("Unexpected result ", Determinism.PER_ROW, - Determinism.PER_ROW.combine(Determinism.PER_STATEMENT)); + @Test + public void testCombine() { + // combining a determinism enum with ALWAYS should always return the + // other determinism + assertEquals("Unexpected result ", Determinism.PER_ROW, + Determinism.ALWAYS.combine(Determinism.PER_ROW)); + assertEquals("Unexpected result ", Determinism.PER_STATEMENT, + Determinism.ALWAYS.combine(Determinism.PER_STATEMENT)); + assertEquals("Unexpected result ", Determinism.PER_STATEMENT, + Determinism.PER_STATEMENT.combine(Determinism.ALWAYS)); + assertEquals("Unexpected result ", Determinism.PER_ROW, + Determinism.PER_ROW.combine(Determinism.ALWAYS)); - } + // combining PER_STATEMENT and PER_ROW should return PER_ROW + assertEquals("Unexpected result ", Determinism.PER_ROW, + Determinism.PER_STATEMENT.combine(Determinism.PER_ROW)); + assertEquals("Unexpected result ", Determinism.PER_ROW, + Determinism.PER_ROW.combine(Determinism.PER_STATEMENT)); + + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ExpFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ExpFunctionTest.java index bfdc0a680f5..d9bdf1b5390 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ExpFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ExpFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,95 +41,91 @@ import org.apache.phoenix.schema.types.PUnsignedFloat; import org.apache.phoenix.schema.types.PUnsignedInt; import org.apache.phoenix.schema.types.PUnsignedLong; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * Unit tests for {@link ExpFunction} */ public class ExpFunctionTest { - private static boolean testExpression(LiteralExpression literal, double expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) literal); - Expression sqrtFunction = new ExpFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean ret = sqrtFunction.evaluate(null, ptr); - if (ret) { - Double result = - (Double) sqrtFunction.getDataType().toObject(ptr, sqrtFunction.getSortOrder()); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), expected)); - } - return ret; + private static boolean testExpression(LiteralExpression literal, double expected) + throws SQLException { + List expressions = Lists.newArrayList((Expression) literal); + Expression sqrtFunction = new ExpFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean ret = sqrtFunction.evaluate(null, ptr); + if (ret) { + Double result = + (Double) sqrtFunction.getDataType().toObject(ptr, sqrtFunction.getSortOrder()); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), expected)); } - - private static void test(Number value, PNumericType dataType, double expected) - throws SQLException { - LiteralExpression literal; - literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - boolean ret1 = testExpression(literal, expected); - literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - boolean ret2 = testExpression(literal, expected); - assertEquals(ret1, ret2); + return ret; + } + + private static void test(Number value, PNumericType dataType, double expected) + throws SQLException { + LiteralExpression literal; + literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + boolean ret1 = testExpression(literal, expected); + literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + boolean ret2 = testExpression(literal, expected); + assertEquals(ret1, ret2); + } + + private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { + double[] expected = new double[value.length]; + for (int i = 0; i < expected.length; ++i) { + expected[i] = Math.exp(value[i].doubleValue()); } - - private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { - double[] expected = new double[value.length]; - for (int i = 0; i < expected.length; ++i) { - expected[i] = Math.exp(value[i].doubleValue()); - } - assertEquals(value.length, expected.length); - for (int i = 0; i < value.length; ++i) { - test(value[i], dataType, expected[i]); - } + assertEquals(value.length, expected.length); + for (int i = 0; i < value.length; ++i) { + test(value[i], dataType, expected[i]); } + } - @Test - public void testSqrtFunction() throws Exception { - Random random = new Random(); + @Test + public void testSqrtFunction() throws Exception { + Random random = new Random(); - testBatch( - new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), - BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), - BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()), - BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE); + testBatch( + new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), BigDecimal.valueOf(-1.0), + BigDecimal.valueOf(123.1234), BigDecimal.valueOf(-123.1234), + BigDecimal.valueOf(random.nextDouble()), BigDecimal.valueOf(random.nextDouble()) }, + PDecimal.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), - random.nextFloat() }, PFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), + random.nextFloat() }, PFloat.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), - random.nextFloat() }, PFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), + random.nextFloat() }, PFloat.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); - testBatch( - new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), - random.nextDouble() }, PDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), + random.nextDouble() }, PDouble.INSTANCE); - testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); - testBatch( - new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, - random.nextLong(), random.nextLong() }, PLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, + random.nextLong(), random.nextLong() }, PLong.INSTANCE); - testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); - testBatch( - new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, - random.nextInt(), random.nextInt() }, PInteger.INSTANCE); + testBatch(new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, + random.nextInt(), random.nextInt() }, PInteger.INSTANCE); - testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); + testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, - (short) 123, (short) -123 }, PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, + (short) 123, (short) -123 }, PSmallint.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, - PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, + PSmallint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, - (byte) 123, (byte) -123 }, PTinyint.INSTANCE); + testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, + (byte) 123, (byte) -123 }, PTinyint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); - } + testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/GetSetByteBitFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/GetSetByteBitFunctionTest.java index d49cda17fb7..bc49c42e434 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/GetSetByteBitFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/GetSetByteBitFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,157 +33,153 @@ import org.apache.phoenix.schema.types.PBinaryBase; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarbinary; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * Unit tests for {@link GetByteFunction} {@link SetByteFunction} {@link GetBitFunction} * {@link SetBitFunction} */ public class GetSetByteBitFunctionTest { - private void testGetByteExpression(Expression data, Expression offset, int expected) - throws SQLException { - List expressions = Lists.newArrayList(data, offset); - Expression getByteFunction = new GetByteFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - getByteFunction.evaluate(null, ptr); - Integer result = - (Integer) getByteFunction.getDataType().toObject(ptr, - getByteFunction.getSortOrder()); - assertEquals(expected, result.intValue()); - } + private void testGetByteExpression(Expression data, Expression offset, int expected) + throws SQLException { + List expressions = Lists.newArrayList(data, offset); + Expression getByteFunction = new GetByteFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + getByteFunction.evaluate(null, ptr); + Integer result = + (Integer) getByteFunction.getDataType().toObject(ptr, getByteFunction.getSortOrder()); + assertEquals(expected, result.intValue()); + } - private void testSetByteExpression(Expression data, Expression offset, Expression newValue, - byte[] expected) throws SQLException { - List expressions = Lists.newArrayList(data, offset, newValue); - Expression setByteFunction = new SetByteFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - setByteFunction.evaluate(null, ptr); - byte[] result = - (byte[]) setByteFunction.getDataType() - .toObject(ptr, setByteFunction.getSortOrder()); - assertArrayEquals(expected, result); - } + private void testSetByteExpression(Expression data, Expression offset, Expression newValue, + byte[] expected) throws SQLException { + List expressions = Lists.newArrayList(data, offset, newValue); + Expression setByteFunction = new SetByteFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + setByteFunction.evaluate(null, ptr); + byte[] result = + (byte[]) setByteFunction.getDataType().toObject(ptr, setByteFunction.getSortOrder()); + assertArrayEquals(expected, result); + } - private void testGetByte(byte[] bytes, int offset, PBinaryBase dataType, int expected) - throws SQLException { - LiteralExpression dataExpr, offsetExpr; - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); - offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.ASC); - testGetByteExpression(dataExpr, offsetExpr, expected); - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); - offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.DESC); - testGetByteExpression(dataExpr, offsetExpr, expected); - } + private void testGetByte(byte[] bytes, int offset, PBinaryBase dataType, int expected) + throws SQLException { + LiteralExpression dataExpr, offsetExpr; + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); + offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.ASC); + testGetByteExpression(dataExpr, offsetExpr, expected); + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); + offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.DESC); + testGetByteExpression(dataExpr, offsetExpr, expected); + } - private void testSetByte(byte[] bytes, int offset, int newValue, PBinaryBase dataType, - byte[] expected) throws SQLException { - LiteralExpression dataExpr, offsetExpr, newValueExpr; - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); - offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.ASC); - newValueExpr = LiteralExpression.newConstant(newValue, PInteger.INSTANCE, SortOrder.ASC); - testSetByteExpression(dataExpr, offsetExpr, newValueExpr, expected); - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); - offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.DESC); - newValueExpr = LiteralExpression.newConstant(newValue, PInteger.INSTANCE, SortOrder.DESC); - testSetByteExpression(dataExpr, offsetExpr, newValueExpr, expected); - } + private void testSetByte(byte[] bytes, int offset, int newValue, PBinaryBase dataType, + byte[] expected) throws SQLException { + LiteralExpression dataExpr, offsetExpr, newValueExpr; + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); + offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.ASC); + newValueExpr = LiteralExpression.newConstant(newValue, PInteger.INSTANCE, SortOrder.ASC); + testSetByteExpression(dataExpr, offsetExpr, newValueExpr, expected); + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); + offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.DESC); + newValueExpr = LiteralExpression.newConstant(newValue, PInteger.INSTANCE, SortOrder.DESC); + testSetByteExpression(dataExpr, offsetExpr, newValueExpr, expected); + } - @Test - public void testByteBatch() throws SQLException { - byte[] bytes = new byte[256]; - int sum = 0; - for (int i = 0; i < 256; ++i) { - bytes[i] = (byte) (i & 0xff); - sum += bytes[i]; - } - assertEquals(-128, sum); - for (int offset = 0; offset < 256; ++offset) { - testGetByte(bytes, offset, PBinary.INSTANCE, bytes[offset]); - testGetByte(bytes, offset, PVarbinary.INSTANCE, bytes[offset]); - } - for (int offset = 0; offset < 256; ++offset) - for (int tmp = Byte.MIN_VALUE; tmp <= Byte.MAX_VALUE; ++tmp) { - byte[] expected = new byte[bytes.length]; - System.arraycopy(bytes, 0, expected, 0, bytes.length); - expected[offset] = (byte) (tmp & 0xff); - testSetByte(bytes, offset, tmp, PBinary.INSTANCE, expected); - } + @Test + public void testByteBatch() throws SQLException { + byte[] bytes = new byte[256]; + int sum = 0; + for (int i = 0; i < 256; ++i) { + bytes[i] = (byte) (i & 0xff); + sum += bytes[i]; } - - private void testGetBitExpression(Expression data, Expression offset, int expected) - throws SQLException { - List expressions = Lists.newArrayList(data, offset); - Expression getBitFunction = new GetBitFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - getBitFunction.evaluate(null, ptr); - Integer result = - (Integer) getBitFunction.getDataType().toObject(ptr, getBitFunction.getSortOrder()); - assertEquals(expected, result.intValue()); + assertEquals(-128, sum); + for (int offset = 0; offset < 256; ++offset) { + testGetByte(bytes, offset, PBinary.INSTANCE, bytes[offset]); + testGetByte(bytes, offset, PVarbinary.INSTANCE, bytes[offset]); } + for (int offset = 0; offset < 256; ++offset) + for (int tmp = Byte.MIN_VALUE; tmp <= Byte.MAX_VALUE; ++tmp) { + byte[] expected = new byte[bytes.length]; + System.arraycopy(bytes, 0, expected, 0, bytes.length); + expected[offset] = (byte) (tmp & 0xff); + testSetByte(bytes, offset, tmp, PBinary.INSTANCE, expected); + } + } - private void testSetBitExpression(Expression data, Expression offset, Expression newValue, - byte[] expected) throws SQLException { - List expressions = Lists.newArrayList(data, offset, newValue); - Expression setBitFunction = new SetBitFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - setBitFunction.evaluate(null, ptr); - byte[] result = - (byte[]) setBitFunction.getDataType().toObject(ptr, setBitFunction.getSortOrder()); - assertArrayEquals(expected, result); - } + private void testGetBitExpression(Expression data, Expression offset, int expected) + throws SQLException { + List expressions = Lists.newArrayList(data, offset); + Expression getBitFunction = new GetBitFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + getBitFunction.evaluate(null, ptr); + Integer result = + (Integer) getBitFunction.getDataType().toObject(ptr, getBitFunction.getSortOrder()); + assertEquals(expected, result.intValue()); + } - private void testGetBit(byte[] bytes, int offset, PBinaryBase dataType, int expected) - throws SQLException { - LiteralExpression dataExpr, offsetExpr; - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); - offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.ASC); - testGetBitExpression(dataExpr, offsetExpr, expected); - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); - offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.DESC); - testGetBitExpression(dataExpr, offsetExpr, expected); - } + private void testSetBitExpression(Expression data, Expression offset, Expression newValue, + byte[] expected) throws SQLException { + List expressions = Lists.newArrayList(data, offset, newValue); + Expression setBitFunction = new SetBitFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + setBitFunction.evaluate(null, ptr); + byte[] result = + (byte[]) setBitFunction.getDataType().toObject(ptr, setBitFunction.getSortOrder()); + assertArrayEquals(expected, result); + } - private void testSetBit(byte[] bytes, int offset, int newValue, PBinaryBase dataType, - byte[] expected) throws SQLException { - LiteralExpression dataExpr, offsetExpr, newValueExpr; - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); - offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.ASC); - newValueExpr = LiteralExpression.newConstant(newValue, PInteger.INSTANCE, SortOrder.ASC); - testSetBitExpression(dataExpr, offsetExpr, newValueExpr, expected); - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); - offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.DESC); - newValueExpr = LiteralExpression.newConstant(newValue, PInteger.INSTANCE, SortOrder.DESC); - testSetBitExpression(dataExpr, offsetExpr, newValueExpr, expected); - } + private void testGetBit(byte[] bytes, int offset, PBinaryBase dataType, int expected) + throws SQLException { + LiteralExpression dataExpr, offsetExpr; + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); + offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.ASC); + testGetBitExpression(dataExpr, offsetExpr, expected); + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); + offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.DESC); + testGetBitExpression(dataExpr, offsetExpr, expected); + } - @Test - public void testGetBitBatch() throws SQLException { - byte[] bytes = new byte[256]; - int sum = 0; - for (int i = 0; i < 256; ++i) { - bytes[i] = (byte) (i & 0xff); - sum += bytes[i]; - } - assertEquals(-128, sum); - for (int offset = 0; offset < 256 * Byte.SIZE; ++offset) { - byte expected = - (bytes[offset / Byte.SIZE] & (1 << (offset % Byte.SIZE))) != 0 ? (byte) 1 - : (byte) 0; - testGetBit(bytes, offset, PBinary.INSTANCE, expected); - testGetBit(bytes, offset, PVarbinary.INSTANCE, expected); - } - for (int offset = 0; offset < 256 * Byte.SIZE; ++offset) - for (int tmp = 0; tmp <= 1; ++tmp) { - byte[] expected = new byte[bytes.length]; - System.arraycopy(bytes, 0, expected, 0, bytes.length); - if (tmp != 0) { - expected[offset / Byte.SIZE] |= (byte) (1 << (offset % Byte.SIZE)); - } else { - expected[offset / Byte.SIZE] &= (byte) (~(1 << (offset % Byte.SIZE))); - } - testSetBit(bytes, offset, tmp, PBinary.INSTANCE, expected); - } + private void testSetBit(byte[] bytes, int offset, int newValue, PBinaryBase dataType, + byte[] expected) throws SQLException { + LiteralExpression dataExpr, offsetExpr, newValueExpr; + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); + offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.ASC); + newValueExpr = LiteralExpression.newConstant(newValue, PInteger.INSTANCE, SortOrder.ASC); + testSetBitExpression(dataExpr, offsetExpr, newValueExpr, expected); + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); + offsetExpr = LiteralExpression.newConstant(offset, PInteger.INSTANCE, SortOrder.DESC); + newValueExpr = LiteralExpression.newConstant(newValue, PInteger.INSTANCE, SortOrder.DESC); + testSetBitExpression(dataExpr, offsetExpr, newValueExpr, expected); + } + + @Test + public void testGetBitBatch() throws SQLException { + byte[] bytes = new byte[256]; + int sum = 0; + for (int i = 0; i < 256; ++i) { + bytes[i] = (byte) (i & 0xff); + sum += bytes[i]; } + assertEquals(-128, sum); + for (int offset = 0; offset < 256 * Byte.SIZE; ++offset) { + byte expected = + (bytes[offset / Byte.SIZE] & (1 << (offset % Byte.SIZE))) != 0 ? (byte) 1 : (byte) 0; + testGetBit(bytes, offset, PBinary.INSTANCE, expected); + testGetBit(bytes, offset, PVarbinary.INSTANCE, expected); + } + for (int offset = 0; offset < 256 * Byte.SIZE; ++offset) + for (int tmp = 0; tmp <= 1; ++tmp) { + byte[] expected = new byte[bytes.length]; + System.arraycopy(bytes, 0, expected, 0, bytes.length); + if (tmp != 0) { + expected[offset / Byte.SIZE] |= (byte) (1 << (offset % Byte.SIZE)); + } else { + expected[offset / Byte.SIZE] &= (byte) (~(1 << (offset % Byte.SIZE))); + } + testSetBit(bytes, offset, tmp, PBinary.INSTANCE, expected); + } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ILikeExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ILikeExpressionTest.java index e66ad13b29c..1c54aa9d234 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ILikeExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ILikeExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,48 +31,48 @@ import org.junit.Test; public class ILikeExpressionTest { - private boolean testExpression (String value, String expression, SortOrder sortorder) - throws SQLException { - LiteralExpression v = LiteralExpression.newConstant(value, PVarchar.INSTANCE, sortorder); - LiteralExpression p = LiteralExpression.newConstant(expression, PVarchar.INSTANCE, sortorder); - List children = Arrays.asList(v,p); - LikeExpression e1 = ByteBasedLikeExpression.create(children, LikeType.CASE_INSENSITIVE); - LikeExpression e2 = StringBasedLikeExpression.create(children, LikeType.CASE_INSENSITIVE); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean evaluated1 = e1.evaluate(null, ptr); - Boolean result1 = (Boolean)e1.getDataType().toObject(ptr); - assertTrue(evaluated1); - boolean evaluated2 = e2.evaluate(null, ptr); - Boolean result2 = (Boolean)e2.getDataType().toObject(ptr); - assertTrue(evaluated2); - assertEquals(result1, result2); - return result1; - } + private boolean testExpression(String value, String expression, SortOrder sortorder) + throws SQLException { + LiteralExpression v = LiteralExpression.newConstant(value, PVarchar.INSTANCE, sortorder); + LiteralExpression p = LiteralExpression.newConstant(expression, PVarchar.INSTANCE, sortorder); + List children = Arrays. asList(v, p); + LikeExpression e1 = ByteBasedLikeExpression.create(children, LikeType.CASE_INSENSITIVE); + LikeExpression e2 = StringBasedLikeExpression.create(children, LikeType.CASE_INSENSITIVE); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean evaluated1 = e1.evaluate(null, ptr); + Boolean result1 = (Boolean) e1.getDataType().toObject(ptr); + assertTrue(evaluated1); + boolean evaluated2 = e2.evaluate(null, ptr); + Boolean result2 = (Boolean) e2.getDataType().toObject(ptr); + assertTrue(evaluated2); + assertEquals(result1, result2); + return result1; + } - private boolean testExpression(String value, String expression) throws SQLException { - boolean result1 = testExpression(value, expression, SortOrder.ASC); - boolean result2 = testExpression(value, expression, SortOrder.DESC); - assertEquals(result1, result2); - return result1; - } + private boolean testExpression(String value, String expression) throws SQLException { + boolean result1 = testExpression(value, expression, SortOrder.ASC); + boolean result2 = testExpression(value, expression, SortOrder.DESC); + assertEquals(result1, result2); + return result1; + } - @Test - public void testStartWildcard() throws Exception { - assertEquals(Boolean.FALSE, testExpression ("149na7-app1-2-", "%-w")); - assertEquals(Boolean.TRUE, testExpression ("149na7-app1-2-", "%-2%")); - assertEquals(Boolean.TRUE, testExpression ("149na7-app1-2-", "%4%7%2%")); - assertEquals(Boolean.FALSE, testExpression ("149na7-app1-2-", "%9%4%2%")); - } + @Test + public void testStartWildcard() throws Exception { + assertEquals(Boolean.FALSE, testExpression("149na7-app1-2-", "%-w")); + assertEquals(Boolean.TRUE, testExpression("149na7-app1-2-", "%-2%")); + assertEquals(Boolean.TRUE, testExpression("149na7-app1-2-", "%4%7%2%")); + assertEquals(Boolean.FALSE, testExpression("149na7-app1-2-", "%9%4%2%")); + } - @Test - public void testCaseSensitive() throws Exception { - assertEquals(Boolean.TRUE, testExpression ("test", "test")); - assertEquals(Boolean.TRUE, testExpression ("test", "teSt")); - } + @Test + public void testCaseSensitive() throws Exception { + assertEquals(Boolean.TRUE, testExpression("test", "test")); + assertEquals(Boolean.TRUE, testExpression("test", "teSt")); + } - @Test - public void testStartWildcardAndCaseInsensitive() throws Exception { - assertEquals(Boolean.TRUE, testExpression ("test", "%s%")); - assertEquals(Boolean.TRUE, testExpression ("test", "%S%")); - } + @Test + public void testStartWildcardAndCaseInsensitive() throws Exception { + assertEquals(Boolean.TRUE, testExpression("test", "%s%")); + assertEquals(Boolean.TRUE, testExpression("test", "%S%")); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/InListExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/InListExpressionTest.java index 27e9bd888b3..53698c85cba 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/InListExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/InListExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,250 +21,259 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.when; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; - +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Test; import org.mockito.Mockito; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; - - public class InListExpressionTest { - @Test - public void testHashCode() throws Exception { - int valuesNumber = 500000; - List values = new ArrayList<>(valuesNumber); - for (int i = 0; i < valuesNumber; i++) { - values.add(new ImmutableBytesPtr(Bytes.toBytes(i))); - } - InListExpression exp = new InListExpression(values); - - // first time - long startTs = System.currentTimeMillis(); - int firstHashCode = exp.hashCode(); - long firstTimeCost = System.currentTimeMillis() - startTs; - - // the rest access - int restAccessNumber = 3; - startTs = System.currentTimeMillis(); - List hashCodes = Lists.newArrayListWithExpectedSize(restAccessNumber); - for (int i = 0; i < restAccessNumber; i++) { - hashCodes.add(exp.hashCode()); - } - - // check time cost - long restTimeCost = System.currentTimeMillis() - startTs; - assertTrue("first time: " + firstTimeCost + " <= rest time: " + restTimeCost, - firstTimeCost > restTimeCost); - - // check hash code - for (int hashCode : hashCodes) { - assertEquals("hash code not equal, firstHashCode: " + firstHashCode + ", restHashCode: " - + hashCode, firstHashCode, hashCode); - } + @Test + public void testHashCode() throws Exception { + int valuesNumber = 500000; + List values = new ArrayList<>(valuesNumber); + for (int i = 0; i < valuesNumber; i++) { + values.add(new ImmutableBytesPtr(Bytes.toBytes(i))); } - - @Test - public void testGetSortedInListColumnKeyValuePairWithNoPkOrder() { - testGetSortedInListColumnKeyValuePair(false); + InListExpression exp = new InListExpression(values); + + // first time + long startTs = System.currentTimeMillis(); + int firstHashCode = exp.hashCode(); + long firstTimeCost = System.currentTimeMillis() - startTs; + + // the rest access + int restAccessNumber = 3; + startTs = System.currentTimeMillis(); + List hashCodes = Lists.newArrayListWithExpectedSize(restAccessNumber); + for (int i = 0; i < restAccessNumber; i++) { + hashCodes.add(exp.hashCode()); } - @Test - public void testGetSortedInListColumnKeyValuePairWithPkOrder() { - testGetSortedInListColumnKeyValuePair(true); - } + // check time cost + long restTimeCost = System.currentTimeMillis() - startTs; + assertTrue("first time: " + firstTimeCost + " <= rest time: " + restTimeCost, + firstTimeCost > restTimeCost); - private void testGetSortedInListColumnKeyValuePair(boolean isPkOrder) { - // mock literal - List expressionList = new ArrayList<>(); - LiteralExpression literalChild1 = Mockito.mock(LiteralExpression.class); - List literalExpressions = new ArrayList<>(); - when(literalChild1.getDataType()).thenReturn(PInteger.INSTANCE); - when(literalChild1.getBytes()).thenReturn(null); - when(literalChild1.getDeterminism()).thenReturn(Determinism.ALWAYS); - literalExpressions.add(literalChild1); - literalExpressions.add(literalChild1); - - // mock row key column - List expressionChildren = new ArrayList<>(); - RowKeyColumnExpression rowKeyColumnExpressionMock1 = Mockito.mock(RowKeyColumnExpression.class); - RowKeyColumnExpression rowKeyColumnExpressionMock2 = Mockito.mock(RowKeyColumnExpression.class); - - when(rowKeyColumnExpressionMock1.getPosition()).thenReturn(1); - when(rowKeyColumnExpressionMock1.getDeterminism()).thenReturn(Determinism.ALWAYS); - when(rowKeyColumnExpressionMock2.getPosition()).thenReturn(2); - when(rowKeyColumnExpressionMock2.getDeterminism()).thenReturn(Determinism.ALWAYS); - when(rowKeyColumnExpressionMock1.getChildren()).thenReturn(expressionChildren); - when(rowKeyColumnExpressionMock2.getChildren()).thenReturn(literalExpressions); - - // mock row key column PK order position - if (isPkOrder) { - expressionChildren.add(rowKeyColumnExpressionMock1); - expressionChildren.add(rowKeyColumnExpressionMock2); - - } else { - expressionChildren.add(rowKeyColumnExpressionMock2); - expressionChildren.add(rowKeyColumnExpressionMock1); - - } - - RowValueConstructorExpression rvc1 = new RowValueConstructorExpression(expressionChildren, true); - RowValueConstructorExpression rvc2 = new RowValueConstructorExpression(literalExpressions, true); - expressionList.add(rvc1); - expressionList.add(rvc2); - - if (isPkOrder) { - assertEquals(1, ((RowKeyColumnExpression)expressionList.get(0).getChildren().get(0)).getPosition()); - assertEquals(2, ((RowKeyColumnExpression)expressionList.get(0).getChildren().get(1)).getPosition()); - } else { - assertEquals(2, ((RowKeyColumnExpression)expressionList.get(0).getChildren().get(0)).getPosition()); - assertEquals(1, ((RowKeyColumnExpression)expressionList.get(0).getChildren().get(1)).getPosition()); - } - - List inListColumnKeyValuePairList = - InListExpression.getSortedInListColumnKeyValuePair(expressionList); - - assertEquals(1, inListColumnKeyValuePairList.get(0).getRowKeyColumnExpression().getPosition()); - assertEquals(2, inListColumnKeyValuePairList.get(1).getRowKeyColumnExpression().getPosition()); + // check hash code + for (int hashCode : hashCodes) { + assertEquals( + "hash code not equal, firstHashCode: " + firstHashCode + ", restHashCode: " + hashCode, + firstHashCode, hashCode); } + } + + @Test + public void testGetSortedInListColumnKeyValuePairWithNoPkOrder() { + testGetSortedInListColumnKeyValuePair(false); + } + + @Test + public void testGetSortedInListColumnKeyValuePairWithPkOrder() { + testGetSortedInListColumnKeyValuePair(true); + } + + private void testGetSortedInListColumnKeyValuePair(boolean isPkOrder) { + // mock literal + List expressionList = new ArrayList<>(); + LiteralExpression literalChild1 = Mockito.mock(LiteralExpression.class); + List literalExpressions = new ArrayList<>(); + when(literalChild1.getDataType()).thenReturn(PInteger.INSTANCE); + when(literalChild1.getBytes()).thenReturn(null); + when(literalChild1.getDeterminism()).thenReturn(Determinism.ALWAYS); + literalExpressions.add(literalChild1); + literalExpressions.add(literalChild1); + + // mock row key column + List expressionChildren = new ArrayList<>(); + RowKeyColumnExpression rowKeyColumnExpressionMock1 = Mockito.mock(RowKeyColumnExpression.class); + RowKeyColumnExpression rowKeyColumnExpressionMock2 = Mockito.mock(RowKeyColumnExpression.class); + + when(rowKeyColumnExpressionMock1.getPosition()).thenReturn(1); + when(rowKeyColumnExpressionMock1.getDeterminism()).thenReturn(Determinism.ALWAYS); + when(rowKeyColumnExpressionMock2.getPosition()).thenReturn(2); + when(rowKeyColumnExpressionMock2.getDeterminism()).thenReturn(Determinism.ALWAYS); + when(rowKeyColumnExpressionMock1.getChildren()).thenReturn(expressionChildren); + when(rowKeyColumnExpressionMock2.getChildren()).thenReturn(literalExpressions); + + // mock row key column PK order position + if (isPkOrder) { + expressionChildren.add(rowKeyColumnExpressionMock1); + expressionChildren.add(rowKeyColumnExpressionMock2); + + } else { + expressionChildren.add(rowKeyColumnExpressionMock2); + expressionChildren.add(rowKeyColumnExpressionMock1); - @Test - public void testGetSortedInListColumnKeyValuePairWithLessValueThanPkColumns() { - List expressionList = new ArrayList<>(); - LiteralExpression literalChild1 = Mockito.mock(LiteralExpression.class); - List literalExpressions = new ArrayList<>(); - when(literalChild1.getDataType()).thenReturn(PInteger.INSTANCE); - when(literalChild1.getBytes()).thenReturn(null); - when(literalChild1.getDeterminism()).thenReturn(Determinism.ALWAYS); - literalExpressions.add(literalChild1); - literalExpressions.add(literalChild1); - - // mock row key column - List expressionChildren = new ArrayList<>(); - RowKeyColumnExpression rowKeyColumnExpressionMock1 = Mockito.mock(RowKeyColumnExpression.class); - - when(rowKeyColumnExpressionMock1.getPosition()).thenReturn(1); - when(rowKeyColumnExpressionMock1.getDeterminism()).thenReturn(Determinism.ALWAYS); - when(rowKeyColumnExpressionMock1.getChildren()).thenReturn(expressionChildren); - - expressionChildren.add(rowKeyColumnExpressionMock1); - - RowValueConstructorExpression rvc1 = new RowValueConstructorExpression(expressionChildren, true); - RowValueConstructorExpression rvc2 = new RowValueConstructorExpression(literalExpressions, true); - expressionList.add(rvc1); - expressionList.add(rvc2); - - List inListColumnKeyValuePairList = - InListExpression.getSortedInListColumnKeyValuePair(expressionList); - - assertEquals(null, inListColumnKeyValuePairList); } - @Test - public void testGetSortedInListColumnKeyValuePairWithMoreValueThanPkColumn() { - List expressionList = new ArrayList<>(); - LiteralExpression literalChild1 = Mockito.mock(LiteralExpression.class); - List literalExpressions = new ArrayList<>(); - when(literalChild1.getDataType()).thenReturn(PInteger.INSTANCE); - when(literalChild1.getBytes()).thenReturn(null); - when(literalChild1.getDeterminism()).thenReturn(Determinism.ALWAYS); - literalExpressions.add(literalChild1); - - // mock row key column - List expressionChildren = new ArrayList<>(); - RowKeyColumnExpression rowKeyColumnExpressionMock1 = Mockito.mock(RowKeyColumnExpression.class); - when(rowKeyColumnExpressionMock1.getPosition()).thenReturn(1); - when(rowKeyColumnExpressionMock1.getDeterminism()).thenReturn(Determinism.ALWAYS); - when(rowKeyColumnExpressionMock1.getChildren()).thenReturn(expressionChildren); - - expressionChildren.add(rowKeyColumnExpressionMock1); - expressionChildren.add(rowKeyColumnExpressionMock1); - - RowValueConstructorExpression rvc1 = new RowValueConstructorExpression(expressionChildren, true); - RowValueConstructorExpression rvc2 = new RowValueConstructorExpression(literalExpressions, true); - expressionList.add(rvc1); - expressionList.add(rvc2); - - List inListColumnKeyValuePairList = - InListExpression.getSortedInListColumnKeyValuePair(expressionList); - - assertEquals(null, inListColumnKeyValuePairList); + RowValueConstructorExpression rvc1 = + new RowValueConstructorExpression(expressionChildren, true); + RowValueConstructorExpression rvc2 = + new RowValueConstructorExpression(literalExpressions, true); + expressionList.add(rvc1); + expressionList.add(rvc2); + + if (isPkOrder) { + assertEquals(1, + ((RowKeyColumnExpression) expressionList.get(0).getChildren().get(0)).getPosition()); + assertEquals(2, + ((RowKeyColumnExpression) expressionList.get(0).getChildren().get(1)).getPosition()); + } else { + assertEquals(2, + ((RowKeyColumnExpression) expressionList.get(0).getChildren().get(0)).getPosition()); + assertEquals(1, + ((RowKeyColumnExpression) expressionList.get(0).getChildren().get(1)).getPosition()); } - @Test - public void testInListColumnKeyValuePairClass() { - RowKeyColumnExpression rowKeyColumnExpression = Mockito.mock(RowKeyColumnExpression.class); - LiteralExpression literalChild = Mockito.mock(LiteralExpression.class); - - InListExpression.InListColumnKeyValuePair inListColumnKeyValuePair = - new InListExpression.InListColumnKeyValuePair(rowKeyColumnExpression); - inListColumnKeyValuePair.addToLiteralExpressionList(literalChild); - - assertEquals(rowKeyColumnExpression, inListColumnKeyValuePair.getRowKeyColumnExpression()); - assertEquals(literalChild, inListColumnKeyValuePair.getLiteralExpressionList().get(0)); - } - - @Test - public void testGetSortedRowValueConstructorExpressionList() { - byte[] bytesValueOne = ByteBuffer.allocate(4).putInt(1).array(); - byte[] bytesValueTwo = ByteBuffer.allocate(4).putInt(1).array(); - // mock literal - List literalExpressions = new ArrayList<>(); - LiteralExpression literalChild1 = Mockito.mock(LiteralExpression.class); - when(literalChild1.getDataType()).thenReturn(PInteger.INSTANCE); - when(literalChild1.getBytes()).thenReturn(bytesValueOne); - when(literalChild1.getDeterminism()).thenReturn(Determinism.ALWAYS); - literalExpressions.add(literalChild1); - - LiteralExpression literalChild2 = Mockito.mock(LiteralExpression.class); - when(literalChild2.getDataType()).thenReturn(PInteger.INSTANCE); - when(literalChild2.getBytes()).thenReturn(bytesValueTwo); - when(literalChild2.getDeterminism()).thenReturn(Determinism.ALWAYS); - literalExpressions.add(literalChild2); - - List expressionChildren = new ArrayList<>(); - RowKeyColumnExpression rowKeyColumnExpressionMock1 = Mockito.mock(RowKeyColumnExpression.class); - RowKeyColumnExpression rowKeyColumnExpressionMock2 = Mockito.mock(RowKeyColumnExpression.class); - expressionChildren.add(rowKeyColumnExpressionMock1); - expressionChildren.add(rowKeyColumnExpressionMock2); - - when(rowKeyColumnExpressionMock1.getPosition()).thenReturn(1); - when(rowKeyColumnExpressionMock1.getDeterminism()).thenReturn(Determinism.ALWAYS); - when(rowKeyColumnExpressionMock2.getPosition()).thenReturn(2); - when(rowKeyColumnExpressionMock2.getDeterminism()).thenReturn(Determinism.ALWAYS); - when(rowKeyColumnExpressionMock1.getChildren()).thenReturn(expressionChildren); - when(rowKeyColumnExpressionMock2.getChildren()).thenReturn(literalExpressions); - - //construct sorted InListColumnKeyValuePair list - List children = new ArrayList<>(); - InListExpression.InListColumnKeyValuePair rvc1 = - new InListExpression.InListColumnKeyValuePair(rowKeyColumnExpressionMock1); - rvc1.addToLiteralExpressionList(literalChild1); - children.add(rvc1); - InListExpression.InListColumnKeyValuePair rvc2 = - new InListExpression.InListColumnKeyValuePair(rowKeyColumnExpressionMock2); - rvc2.addToLiteralExpressionList(literalChild2); - children.add(rvc2); - - List result = InListExpression.getSortedRowValueConstructorExpressionList( - children,true, 1); - - assertTrue(result.get(0).getChildren().get(0) instanceof RowKeyColumnExpression); - assertTrue(result.get(0).getChildren().get(1) instanceof RowKeyColumnExpression); - assertEquals(1, ((RowKeyColumnExpression)result.get(0).getChildren().get(0)).getPosition()); - assertEquals(2, ((RowKeyColumnExpression)result.get(0).getChildren().get(1)).getPosition()); - - assertTrue(result.get(1).getChildren().get(0) instanceof LiteralExpression); - assertTrue(result.get(1).getChildren().get(1) instanceof LiteralExpression); - assertEquals(bytesValueOne, ((LiteralExpression)result.get(1).getChildren().get(0)).getBytes()); - assertEquals(bytesValueTwo, ((LiteralExpression)result.get(1).getChildren().get(1)).getBytes()); - } + List inListColumnKeyValuePairList = + InListExpression.getSortedInListColumnKeyValuePair(expressionList); + + assertEquals(1, inListColumnKeyValuePairList.get(0).getRowKeyColumnExpression().getPosition()); + assertEquals(2, inListColumnKeyValuePairList.get(1).getRowKeyColumnExpression().getPosition()); + } + + @Test + public void testGetSortedInListColumnKeyValuePairWithLessValueThanPkColumns() { + List expressionList = new ArrayList<>(); + LiteralExpression literalChild1 = Mockito.mock(LiteralExpression.class); + List literalExpressions = new ArrayList<>(); + when(literalChild1.getDataType()).thenReturn(PInteger.INSTANCE); + when(literalChild1.getBytes()).thenReturn(null); + when(literalChild1.getDeterminism()).thenReturn(Determinism.ALWAYS); + literalExpressions.add(literalChild1); + literalExpressions.add(literalChild1); + + // mock row key column + List expressionChildren = new ArrayList<>(); + RowKeyColumnExpression rowKeyColumnExpressionMock1 = Mockito.mock(RowKeyColumnExpression.class); + + when(rowKeyColumnExpressionMock1.getPosition()).thenReturn(1); + when(rowKeyColumnExpressionMock1.getDeterminism()).thenReturn(Determinism.ALWAYS); + when(rowKeyColumnExpressionMock1.getChildren()).thenReturn(expressionChildren); + + expressionChildren.add(rowKeyColumnExpressionMock1); + + RowValueConstructorExpression rvc1 = + new RowValueConstructorExpression(expressionChildren, true); + RowValueConstructorExpression rvc2 = + new RowValueConstructorExpression(literalExpressions, true); + expressionList.add(rvc1); + expressionList.add(rvc2); + + List inListColumnKeyValuePairList = + InListExpression.getSortedInListColumnKeyValuePair(expressionList); + + assertEquals(null, inListColumnKeyValuePairList); + } + + @Test + public void testGetSortedInListColumnKeyValuePairWithMoreValueThanPkColumn() { + List expressionList = new ArrayList<>(); + LiteralExpression literalChild1 = Mockito.mock(LiteralExpression.class); + List literalExpressions = new ArrayList<>(); + when(literalChild1.getDataType()).thenReturn(PInteger.INSTANCE); + when(literalChild1.getBytes()).thenReturn(null); + when(literalChild1.getDeterminism()).thenReturn(Determinism.ALWAYS); + literalExpressions.add(literalChild1); + + // mock row key column + List expressionChildren = new ArrayList<>(); + RowKeyColumnExpression rowKeyColumnExpressionMock1 = Mockito.mock(RowKeyColumnExpression.class); + when(rowKeyColumnExpressionMock1.getPosition()).thenReturn(1); + when(rowKeyColumnExpressionMock1.getDeterminism()).thenReturn(Determinism.ALWAYS); + when(rowKeyColumnExpressionMock1.getChildren()).thenReturn(expressionChildren); + + expressionChildren.add(rowKeyColumnExpressionMock1); + expressionChildren.add(rowKeyColumnExpressionMock1); + + RowValueConstructorExpression rvc1 = + new RowValueConstructorExpression(expressionChildren, true); + RowValueConstructorExpression rvc2 = + new RowValueConstructorExpression(literalExpressions, true); + expressionList.add(rvc1); + expressionList.add(rvc2); + + List inListColumnKeyValuePairList = + InListExpression.getSortedInListColumnKeyValuePair(expressionList); + + assertEquals(null, inListColumnKeyValuePairList); + } + + @Test + public void testInListColumnKeyValuePairClass() { + RowKeyColumnExpression rowKeyColumnExpression = Mockito.mock(RowKeyColumnExpression.class); + LiteralExpression literalChild = Mockito.mock(LiteralExpression.class); + + InListExpression.InListColumnKeyValuePair inListColumnKeyValuePair = + new InListExpression.InListColumnKeyValuePair(rowKeyColumnExpression); + inListColumnKeyValuePair.addToLiteralExpressionList(literalChild); + + assertEquals(rowKeyColumnExpression, inListColumnKeyValuePair.getRowKeyColumnExpression()); + assertEquals(literalChild, inListColumnKeyValuePair.getLiteralExpressionList().get(0)); + } + + @Test + public void testGetSortedRowValueConstructorExpressionList() { + byte[] bytesValueOne = ByteBuffer.allocate(4).putInt(1).array(); + byte[] bytesValueTwo = ByteBuffer.allocate(4).putInt(1).array(); + // mock literal + List literalExpressions = new ArrayList<>(); + LiteralExpression literalChild1 = Mockito.mock(LiteralExpression.class); + when(literalChild1.getDataType()).thenReturn(PInteger.INSTANCE); + when(literalChild1.getBytes()).thenReturn(bytesValueOne); + when(literalChild1.getDeterminism()).thenReturn(Determinism.ALWAYS); + literalExpressions.add(literalChild1); + + LiteralExpression literalChild2 = Mockito.mock(LiteralExpression.class); + when(literalChild2.getDataType()).thenReturn(PInteger.INSTANCE); + when(literalChild2.getBytes()).thenReturn(bytesValueTwo); + when(literalChild2.getDeterminism()).thenReturn(Determinism.ALWAYS); + literalExpressions.add(literalChild2); + + List expressionChildren = new ArrayList<>(); + RowKeyColumnExpression rowKeyColumnExpressionMock1 = Mockito.mock(RowKeyColumnExpression.class); + RowKeyColumnExpression rowKeyColumnExpressionMock2 = Mockito.mock(RowKeyColumnExpression.class); + expressionChildren.add(rowKeyColumnExpressionMock1); + expressionChildren.add(rowKeyColumnExpressionMock2); + + when(rowKeyColumnExpressionMock1.getPosition()).thenReturn(1); + when(rowKeyColumnExpressionMock1.getDeterminism()).thenReturn(Determinism.ALWAYS); + when(rowKeyColumnExpressionMock2.getPosition()).thenReturn(2); + when(rowKeyColumnExpressionMock2.getDeterminism()).thenReturn(Determinism.ALWAYS); + when(rowKeyColumnExpressionMock1.getChildren()).thenReturn(expressionChildren); + when(rowKeyColumnExpressionMock2.getChildren()).thenReturn(literalExpressions); + + // construct sorted InListColumnKeyValuePair list + List children = new ArrayList<>(); + InListExpression.InListColumnKeyValuePair rvc1 = + new InListExpression.InListColumnKeyValuePair(rowKeyColumnExpressionMock1); + rvc1.addToLiteralExpressionList(literalChild1); + children.add(rvc1); + InListExpression.InListColumnKeyValuePair rvc2 = + new InListExpression.InListColumnKeyValuePair(rowKeyColumnExpressionMock2); + rvc2.addToLiteralExpressionList(literalChild2); + children.add(rvc2); + + List result = + InListExpression.getSortedRowValueConstructorExpressionList(children, true, 1); + + assertTrue(result.get(0).getChildren().get(0) instanceof RowKeyColumnExpression); + assertTrue(result.get(0).getChildren().get(1) instanceof RowKeyColumnExpression); + assertEquals(1, ((RowKeyColumnExpression) result.get(0).getChildren().get(0)).getPosition()); + assertEquals(2, ((RowKeyColumnExpression) result.get(0).getChildren().get(1)).getPosition()); + + assertTrue(result.get(1).getChildren().get(0) instanceof LiteralExpression); + assertTrue(result.get(1).getChildren().get(1) instanceof LiteralExpression); + assertEquals(bytesValueOne, + ((LiteralExpression) result.get(1).getChildren().get(0)).getBytes()); + assertEquals(bytesValueTwo, + ((LiteralExpression) result.get(1).getChildren().get(1)).getBytes()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/LikeExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/LikeExpressionTest.java index 2e33e7b2efc..4b01ec391d8 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/LikeExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/LikeExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,68 +31,68 @@ import org.junit.Test; public class LikeExpressionTest { - private boolean testExpression(String value, String expression, SortOrder sortorder) - throws SQLException { - LiteralExpression v = LiteralExpression.newConstant(value, PVarchar.INSTANCE, sortorder); - LiteralExpression p = LiteralExpression.newConstant(expression, PVarchar.INSTANCE, sortorder); - List children = Arrays.asList(v,p); - LikeExpression e1 = ByteBasedLikeExpression.create(children, LikeType.CASE_SENSITIVE); - LikeExpression e2 = StringBasedLikeExpression.create(children, LikeType.CASE_SENSITIVE); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean evaluated1 = e1.evaluate(null, ptr); - Boolean result1 = (Boolean)e1.getDataType().toObject(ptr); - assertTrue(evaluated1); - boolean evaluated2 = e2.evaluate(null, ptr); - Boolean result2 = (Boolean)e2.getDataType().toObject(ptr); - assertTrue(evaluated2); - assertEquals(result1, result2); - return result1; - } + private boolean testExpression(String value, String expression, SortOrder sortorder) + throws SQLException { + LiteralExpression v = LiteralExpression.newConstant(value, PVarchar.INSTANCE, sortorder); + LiteralExpression p = LiteralExpression.newConstant(expression, PVarchar.INSTANCE, sortorder); + List children = Arrays. asList(v, p); + LikeExpression e1 = ByteBasedLikeExpression.create(children, LikeType.CASE_SENSITIVE); + LikeExpression e2 = StringBasedLikeExpression.create(children, LikeType.CASE_SENSITIVE); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean evaluated1 = e1.evaluate(null, ptr); + Boolean result1 = (Boolean) e1.getDataType().toObject(ptr); + assertTrue(evaluated1); + boolean evaluated2 = e2.evaluate(null, ptr); + Boolean result2 = (Boolean) e2.getDataType().toObject(ptr); + assertTrue(evaluated2); + assertEquals(result1, result2); + return result1; + } - private boolean testExpression(String value, String expression) throws SQLException { - boolean result1 = testExpression(value, expression, SortOrder.ASC); - boolean result2 = testExpression(value, expression, SortOrder.DESC); - assertEquals(result1, result2); - return result1; - } + private boolean testExpression(String value, String expression) throws SQLException { + boolean result1 = testExpression(value, expression, SortOrder.ASC); + boolean result2 = testExpression(value, expression, SortOrder.DESC); + assertEquals(result1, result2); + return result1; + } - @Test - public void testStartWildcard() throws Exception { - assertEquals(Boolean.FALSE, testExpression ("149na7-app1-2-", "%-w")); - assertEquals(Boolean.TRUE, testExpression ("149na7-app1-2-", "%-2%")); - assertEquals(Boolean.TRUE, testExpression ("149na7-app1-2-", "%4%7%2%")); - assertEquals(Boolean.FALSE, testExpression ("149na7-app1-2-", "%9%4%2%")); - } + @Test + public void testStartWildcard() throws Exception { + assertEquals(Boolean.FALSE, testExpression("149na7-app1-2-", "%-w")); + assertEquals(Boolean.TRUE, testExpression("149na7-app1-2-", "%-2%")); + assertEquals(Boolean.TRUE, testExpression("149na7-app1-2-", "%4%7%2%")); + assertEquals(Boolean.FALSE, testExpression("149na7-app1-2-", "%9%4%2%")); + } - @Test - public void testCaseSensitive() throws Exception { - assertEquals(Boolean.TRUE, testExpression ("test", "test")); - assertEquals(Boolean.FALSE, testExpression ("test", "teSt")); - } + @Test + public void testCaseSensitive() throws Exception { + assertEquals(Boolean.TRUE, testExpression("test", "test")); + assertEquals(Boolean.FALSE, testExpression("test", "teSt")); + } - @Test - public void testStartWildcardAndCaseInsensitive() throws Exception { - assertEquals(Boolean.TRUE, testExpression ("test", "%s%")); - assertEquals(Boolean.FALSE, testExpression ("test", "%S%")); - } + @Test + public void testStartWildcardAndCaseInsensitive() throws Exception { + assertEquals(Boolean.TRUE, testExpression("test", "%s%")); + assertEquals(Boolean.FALSE, testExpression("test", "%S%")); + } - @Test - public void testOneChar() throws Exception { - assertEquals(Boolean.TRUE, testExpression ("A", "_")); - assertEquals(Boolean.FALSE, testExpression ("AA", "_")); - } + @Test + public void testOneChar() throws Exception { + assertEquals(Boolean.TRUE, testExpression("A", "_")); + assertEquals(Boolean.FALSE, testExpression("AA", "_")); + } - @Test - public void testEmptySourceStr() throws Exception { - assertEquals(Boolean.TRUE, testExpression ("", "%")); - assertEquals(Boolean.FALSE, testExpression ("", "_")); - } + @Test + public void testEmptySourceStr() throws Exception { + assertEquals(Boolean.TRUE, testExpression("", "%")); + assertEquals(Boolean.FALSE, testExpression("", "_")); + } - @Test - public void testNewline() throws Exception { - assertEquals(Boolean.TRUE, testExpression ("AA\nA", "AA%")); - assertEquals(Boolean.TRUE, testExpression ("AA\nA", "AA_A")); - assertEquals(Boolean.TRUE, testExpression ("AA\nA", "AA%A")); - assertEquals(Boolean.FALSE, testExpression ("AA\nA", "AA_")); - } - } + @Test + public void testNewline() throws Exception { + assertEquals(Boolean.TRUE, testExpression("AA\nA", "AA%")); + assertEquals(Boolean.TRUE, testExpression("AA\nA", "AA_A")); + assertEquals(Boolean.TRUE, testExpression("AA\nA", "AA%A")); + assertEquals(Boolean.FALSE, testExpression("AA\nA", "AA_")); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/LnLogFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/LnLogFunctionTest.java index 759a9e55dcb..a899c6536cb 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/LnLogFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/LnLogFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,126 +42,120 @@ import org.apache.phoenix.schema.types.PUnsignedFloat; import org.apache.phoenix.schema.types.PUnsignedInt; import org.apache.phoenix.schema.types.PUnsignedLong; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * Unit tests for {@link LnFunction} and {@link LogFunction} */ public class LnLogFunctionTest { - private static final Expression THREE = LiteralExpression.newConstant(3); - private static final Expression DEFAULT_VALUE = LiteralExpression.newConstant(10.0); - - private static boolean testExpression(LiteralExpression literal, LiteralExpression literal2, - LiteralExpression literal3, double exptForLn, double exptForLog10, double exptForLog3) - throws SQLException { - List expressionsLn = Lists.newArrayList((Expression) literal); - List expressionsLog10 = Lists.newArrayList(literal2, DEFAULT_VALUE); - List expressionsLog3 = Lists.newArrayList(literal3, THREE); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - Expression lnFunction = new LnFunction(expressionsLn); - boolean retLn = lnFunction.evaluate(null, ptr); - if (retLn) { - Double result = - (Double) lnFunction.getDataType().toObject(ptr, lnFunction.getSortOrder()); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptForLn)); - } - - Expression log10Function = new LogFunction(expressionsLog10); - boolean retLog10 = log10Function.evaluate(null, ptr); - if (retLog10) { - Double result = - (Double) log10Function.getDataType() - .toObject(ptr, log10Function.getSortOrder()); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptForLog10)); - } - assertEquals(retLn, retLog10); - - Expression log3Function = new LogFunction(expressionsLog3); - boolean retLog3 = log3Function.evaluate(null, ptr); - if (retLog3) { - Double result = - (Double) log3Function.getDataType().toObject(ptr, log3Function.getSortOrder()); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptForLog3)); - } - assertEquals(retLn, retLog3); - return retLn; + private static final Expression THREE = LiteralExpression.newConstant(3); + private static final Expression DEFAULT_VALUE = LiteralExpression.newConstant(10.0); + + private static boolean testExpression(LiteralExpression literal, LiteralExpression literal2, + LiteralExpression literal3, double exptForLn, double exptForLog10, double exptForLog3) + throws SQLException { + List expressionsLn = Lists.newArrayList((Expression) literal); + List expressionsLog10 = Lists.newArrayList(literal2, DEFAULT_VALUE); + List expressionsLog3 = Lists.newArrayList(literal3, THREE); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + + Expression lnFunction = new LnFunction(expressionsLn); + boolean retLn = lnFunction.evaluate(null, ptr); + if (retLn) { + Double result = (Double) lnFunction.getDataType().toObject(ptr, lnFunction.getSortOrder()); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptForLn)); } - private static void test(Number value, PNumericType dataType, double exptForLn, - double exptForLog10, double exptForLog3) throws SQLException { - LiteralExpression literal, literal2, literal3; - literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - boolean ret1 = - testExpression(literal, literal2, literal3, exptForLn, exptForLog10, exptForLog3); - literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - boolean ret2 = - testExpression(literal, literal2, literal3, exptForLn, exptForLog10, exptForLog3); - assertEquals(ret1, ret2); + Expression log10Function = new LogFunction(expressionsLog10); + boolean retLog10 = log10Function.evaluate(null, ptr); + if (retLog10) { + Double result = + (Double) log10Function.getDataType().toObject(ptr, log10Function.getSortOrder()); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptForLog10)); } - - private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { - double[][] expected = new double[value.length][3]; - for (int i = 0; i < expected.length; ++i) { - expected[i][0] = Math.log(value[i].doubleValue()); - expected[i][1] = Math.log10(value[i].doubleValue()); - expected[i][2] = Math.log10(value[i].doubleValue()) / Math.log10(3); - } - assertEquals(value.length, expected.length); - for (int i = 0; i < value.length; ++i) { - test(value[i], dataType, expected[i][0], expected[i][1], expected[i][2]); - } + assertEquals(retLn, retLog10); + + Expression log3Function = new LogFunction(expressionsLog3); + boolean retLog3 = log3Function.evaluate(null, ptr); + if (retLog3) { + Double result = + (Double) log3Function.getDataType().toObject(ptr, log3Function.getSortOrder()); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptForLog3)); } + assertEquals(retLn, retLog3); + return retLn; + } + + private static void test(Number value, PNumericType dataType, double exptForLn, + double exptForLog10, double exptForLog3) throws SQLException { + LiteralExpression literal, literal2, literal3; + literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + boolean ret1 = + testExpression(literal, literal2, literal3, exptForLn, exptForLog10, exptForLog3); + literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + boolean ret2 = + testExpression(literal, literal2, literal3, exptForLn, exptForLog10, exptForLog3); + assertEquals(ret1, ret2); + } + + private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { + double[][] expected = new double[value.length][3]; + for (int i = 0; i < expected.length; ++i) { + expected[i][0] = Math.log(value[i].doubleValue()); + expected[i][1] = Math.log10(value[i].doubleValue()); + expected[i][2] = Math.log10(value[i].doubleValue()) / Math.log10(3); + } + assertEquals(value.length, expected.length); + for (int i = 0; i < value.length; ++i) { + test(value[i], dataType, expected[i][0], expected[i][1], expected[i][2]); + } + } - @Test - public void testLnLogFunction() throws Exception { - Random random = new Random(); + @Test + public void testLnLogFunction() throws Exception { + Random random = new Random(); - testBatch( - new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), - BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), - BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()), - BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE); + testBatch( + new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), BigDecimal.valueOf(-1.0), + BigDecimal.valueOf(123.1234), BigDecimal.valueOf(-123.1234), + BigDecimal.valueOf(random.nextDouble()), BigDecimal.valueOf(random.nextDouble()) }, + PDecimal.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), - random.nextFloat() }, PFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), + random.nextFloat() }, PFloat.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); - testBatch( - new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), - random.nextDouble() }, PDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), + random.nextDouble() }, PDouble.INSTANCE); - testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); - testBatch( - new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, - random.nextLong(), random.nextLong() }, PLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, + random.nextLong(), random.nextLong() }, PLong.INSTANCE); - testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); - testBatch( - new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, - random.nextInt(), random.nextInt() }, PInteger.INSTANCE); + testBatch(new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, + random.nextInt(), random.nextInt() }, PInteger.INSTANCE); - testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); + testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, - (short) 123, (short) -123 }, PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, + (short) 123, (short) -123 }, PSmallint.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, - PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, + PSmallint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, - (byte) 123, (byte) -123 }, PTinyint.INSTANCE); + testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, + (byte) 123, (byte) -123 }, PTinyint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); - } + testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/MathPIFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/MathPIFunctionTest.java index af223f25b35..9b875ddda35 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/MathPIFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/MathPIFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.expression.function.MathPIFunction; import org.apache.phoenix.query.BaseTest; - import org.junit.Test; /** @@ -30,15 +29,15 @@ */ public class MathPIFunctionTest { - @Test - public void testMathPIFunction() { - Expression mathPIFunction = new MathPIFunction(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean res = mathPIFunction.evaluate(null, ptr); - if (res) { - Double result = (Double) mathPIFunction.getDataType().toObject(ptr); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), Math.PI)); - } - assertTrue(res); + @Test + public void testMathPIFunction() { + Expression mathPIFunction = new MathPIFunction(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean res = mathPIFunction.evaluate(null, ptr); + if (res) { + Double result = (Double) mathPIFunction.getDataType().toObject(ptr); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), Math.PI)); } + assertTrue(res); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/MathTrigFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/MathTrigFunctionTest.java index a49db735aac..b16d08413a9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/MathTrigFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/MathTrigFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.expression; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.math.BigDecimal; import java.sql.SQLException; import java.util.Arrays; @@ -41,139 +44,96 @@ import org.apache.phoenix.schema.types.PUnsignedFloat; import org.apache.phoenix.schema.types.PUnsignedInt; import org.apache.phoenix.schema.types.PUnsignedLong; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; /** - * Unit tests for {@link SinFunction} - * Unit tests for {@link CosFunction} - * Unit tests for {@link TanFunction} + * Unit tests for {@link SinFunction} Unit tests for {@link CosFunction} Unit tests for + * {@link TanFunction} */ @RunWith(Parameterized.class) public class MathTrigFunctionTest { - private Number[] value; - private PNumericType dataType; - - public MathTrigFunctionTest(Number[] value, PNumericType dataType) { - this.value = value; - this.dataType = dataType; - } + private Number[] value; + private PNumericType dataType; - @Parameters(name = "{0} {1}") - public static synchronized Collection data() { - return Arrays.asList((Object[]) new Object[][]{ - { - new BigDecimal[]{BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), - BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), - BigDecimal.valueOf(-123.1234)}, - PDecimal.INSTANCE - }, - { - new Float[]{1.0f, 0.0f, -1.0f, Float.MAX_VALUE, Float.MIN_VALUE, - -Float.MAX_VALUE, -Float.MIN_VALUE, 123.1234f, -123.1234f}, - PFloat.INSTANCE - }, - { - new Float[]{1.0f, 0.0f, Float.MAX_VALUE, Float.MIN_VALUE, 123.1234f}, - PUnsignedFloat.INSTANCE - }, - { - new Double[]{1.0, 0.0, -1.0, Double.MAX_VALUE, Double.MIN_VALUE, - -Double.MAX_VALUE, -Double.MIN_VALUE, 123.1234, -123.1234}, - PDouble.INSTANCE - }, - { - new Double[]{1.0, 0.0, Double.MAX_VALUE, Double.MIN_VALUE, 123.1234}, - PUnsignedDouble.INSTANCE - }, - { - new Long[]{(long) 1, (long) 0, (long) -1, Long.MAX_VALUE, - Long.MIN_VALUE, (long) 123, (long) -123}, - PLong.INSTANCE - }, - { - new Long[]{(long) 1, (long) 0, Long.MAX_VALUE, (long) 123}, - PUnsignedLong.INSTANCE - }, - { - new Integer[]{1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123}, - PInteger.INSTANCE - }, - { - new Integer[]{1, 0, Integer.MAX_VALUE, 123}, - PUnsignedInt.INSTANCE - }, - { - new Short[]{(short) 1, (short) 0, (short) -1, Short.MAX_VALUE, - Short.MIN_VALUE, (short) 123, (short) -123}, - PSmallint.INSTANCE - }, - { - new Short[]{(short) 1, (short) 0, Short.MAX_VALUE, (short) 123}, - PSmallint.INSTANCE - }, - { - new Byte[]{(byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, - Byte.MIN_VALUE, (byte) 123, (byte) -123}, - PTinyint.INSTANCE - }, - { - new Byte[]{(byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123}, - PTinyint.INSTANCE - } - }); - } + public MathTrigFunctionTest(Number[] value, PNumericType dataType) { + this.value = value; + this.dataType = dataType; + } - private boolean testExpression(LiteralExpression literal, double expectedResult, - String testedFunction) throws SQLException { - List expressions = Lists.newArrayList((Expression) literal); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - Expression mathFunction = null; + @Parameters(name = "{0} {1}") + public static synchronized Collection data() { + return Arrays.asList((Object[]) new Object[][] { + { new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), + BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), BigDecimal.valueOf(-123.1234) }, + PDecimal.INSTANCE }, + { new Float[] { 1.0f, 0.0f, -1.0f, Float.MAX_VALUE, Float.MIN_VALUE, -Float.MAX_VALUE, + -Float.MIN_VALUE, 123.1234f, -123.1234f }, PFloat.INSTANCE }, + { new Float[] { 1.0f, 0.0f, Float.MAX_VALUE, Float.MIN_VALUE, 123.1234f }, + PUnsignedFloat.INSTANCE }, + { new Double[] { 1.0, 0.0, -1.0, Double.MAX_VALUE, Double.MIN_VALUE, -Double.MAX_VALUE, + -Double.MIN_VALUE, 123.1234, -123.1234 }, PDouble.INSTANCE }, + { new Double[] { 1.0, 0.0, Double.MAX_VALUE, Double.MIN_VALUE, 123.1234 }, + PUnsignedDouble.INSTANCE }, + { new Long[] { (long) 1, (long) 0, (long) -1, Long.MAX_VALUE, Long.MIN_VALUE, (long) 123, + (long) -123 }, PLong.INSTANCE }, + { new Long[] { (long) 1, (long) 0, Long.MAX_VALUE, (long) 123 }, PUnsignedLong.INSTANCE }, + { new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123 }, + PInteger.INSTANCE }, + { new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE }, + { new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, + (short) 123, (short) -123 }, PSmallint.INSTANCE }, + { new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, PSmallint.INSTANCE }, + { new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, (byte) 123, + (byte) -123 }, PTinyint.INSTANCE }, + { new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE } }); + } - if (testedFunction.equals("SIN")) { - mathFunction = new SinFunction(expressions); - } else if (testedFunction.equals("COS")) { - mathFunction = new CosFunction(expressions); - } else if (testedFunction.equals("TAN")) { - mathFunction = new TanFunction(expressions); - } + private boolean testExpression(LiteralExpression literal, double expectedResult, + String testedFunction) throws SQLException { + List expressions = Lists.newArrayList((Expression) literal); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + Expression mathFunction = null; - boolean ret = mathFunction.evaluate(null, ptr); - if (ret) { - Double result = - (Double) mathFunction.getDataType().toObject(ptr, mathFunction.getSortOrder()); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), expectedResult)); - } + if (testedFunction.equals("SIN")) { + mathFunction = new SinFunction(expressions); + } else if (testedFunction.equals("COS")) { + mathFunction = new CosFunction(expressions); + } else if (testedFunction.equals("TAN")) { + mathFunction = new TanFunction(expressions); + } - return ret; + boolean ret = mathFunction.evaluate(null, ptr); + if (ret) { + Double result = + (Double) mathFunction.getDataType().toObject(ptr, mathFunction.getSortOrder()); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), expectedResult)); } - private void test(Number value, PNumericType dataType, double expectedResult, - String testedFunction) - throws SQLException { - LiteralExpression literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - boolean ret1 = testExpression(literal, expectedResult, testedFunction); + return ret; + } - literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - boolean ret2 = testExpression(literal, expectedResult, testedFunction); - assertEquals(ret1, ret2); - } + private void test(Number value, PNumericType dataType, double expectedResult, + String testedFunction) throws SQLException { + LiteralExpression literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + boolean ret1 = testExpression(literal, expectedResult, testedFunction); + + literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + boolean ret2 = testExpression(literal, expectedResult, testedFunction); + assertEquals(ret1, ret2); + } - @Test - public void testBatch() - throws SQLException { - for (int i = 0; i < value.length; ++i) { - test(value[i], dataType, Math.sin(value[i].doubleValue()), "SIN"); - test(value[i], dataType, Math.cos(value[i].doubleValue()), "COS"); - test(value[i], dataType, Math.tan(value[i].doubleValue()), "TAN"); - } + @Test + public void testBatch() throws SQLException { + for (int i = 0; i < value.length; ++i) { + test(value[i], dataType, Math.sin(value[i].doubleValue()), "SIN"); + test(value[i], dataType, Math.cos(value[i].doubleValue()), "COS"); + test(value[i], dataType, Math.tan(value[i].doubleValue()), "TAN"); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/NullValueTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/NullValueTest.java index b8178b0a745..714ddaf3ae9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/NullValueTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/NullValueTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,79 +33,65 @@ import org.junit.Test; public class NullValueTest extends BaseConnectionlessQueryTest { - - @Test - public void testComparisonExpressionWithNullOperands() throws Exception { - String[] query = {"SELECT 'a' >= ''", - "SELECT '' < 'a'", - "SELECT '' = ''"}; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - for (String q : query) { - ResultSet rs = conn.createStatement().executeQuery(q); - assertTrue(rs.next()); - assertNull(rs.getObject(1)); - assertEquals(false, rs.getBoolean(1)); - assertFalse(rs.next()); - } - } finally { - conn.close(); - } + + @Test + public void testComparisonExpressionWithNullOperands() throws Exception { + String[] query = { "SELECT 'a' >= ''", "SELECT '' < 'a'", "SELECT '' = ''" }; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + for (String q : query) { + ResultSet rs = conn.createStatement().executeQuery(q); + assertTrue(rs.next()); + assertNull(rs.getObject(1)); + assertEquals(false, rs.getBoolean(1)); + assertFalse(rs.next()); + } + } finally { + conn.close(); } - - @Test - public void testAndExpressionWithNullOperands() throws Exception { - String[] query = {"SELECT 'b' >= 'a' and '' < 'b'", - "SELECT 'b' >= '' and 'a' < 'b'", - "SELECT 'a' >= 'b' and 'a' < ''", - "SELECT '' >= 'a' and 'b' < 'a'", - "SELECT 'a' >= '' and '' < 'a'"}; - Boolean[] result = {null, - null, - Boolean.FALSE, - Boolean.FALSE, - null}; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - for (int i = 0; i < query.length; i++) { - ResultSet rs = conn.createStatement().executeQuery(query[i]); - assertTrue(rs.next()); - assertEquals(result[i], rs.getObject(1)); - assertEquals(false, rs.getBoolean(1)); - assertFalse(rs.next()); - } - } finally { - conn.close(); - } + } + + @Test + public void testAndExpressionWithNullOperands() throws Exception { + String[] query = { "SELECT 'b' >= 'a' and '' < 'b'", "SELECT 'b' >= '' and 'a' < 'b'", + "SELECT 'a' >= 'b' and 'a' < ''", "SELECT '' >= 'a' and 'b' < 'a'", + "SELECT 'a' >= '' and '' < 'a'" }; + Boolean[] result = { null, null, Boolean.FALSE, Boolean.FALSE, null }; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + for (int i = 0; i < query.length; i++) { + ResultSet rs = conn.createStatement().executeQuery(query[i]); + assertTrue(rs.next()); + assertEquals(result[i], rs.getObject(1)); + assertEquals(false, rs.getBoolean(1)); + assertFalse(rs.next()); + } + } finally { + conn.close(); } - - @Test - public void testOrExpressionWithNullOperands() throws Exception { - String[] query = {"SELECT 'b' >= 'a' or '' < 'b'", - "SELECT 'b' >= '' or 'a' < 'b'", - "SELECT 'a' >= 'b' or 'a' < ''", - "SELECT '' >= 'a' or 'b' < 'a'", - "SELECT 'a' >= '' or '' < 'a'"}; - Boolean[] result = {Boolean.TRUE, - Boolean.TRUE, - null, - null, - null}; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - for (int i = 0; i < query.length; i++) { - ResultSet rs = conn.createStatement().executeQuery(query[i]); - assertTrue(rs.next()); - assertEquals(result[i], rs.getObject(1)); - assertEquals(Boolean.TRUE.equals(result[i]) ? true : false, rs.getBoolean(1)); - assertFalse(rs.next()); - } - } finally { - conn.close(); - } + } + + @Test + public void testOrExpressionWithNullOperands() throws Exception { + String[] query = { "SELECT 'b' >= 'a' or '' < 'b'", "SELECT 'b' >= '' or 'a' < 'b'", + "SELECT 'a' >= 'b' or 'a' < ''", "SELECT '' >= 'a' or 'b' < 'a'", + "SELECT 'a' >= '' or '' < 'a'" }; + Boolean[] result = { Boolean.TRUE, Boolean.TRUE, null, null, null }; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + for (int i = 0; i < query.length; i++) { + ResultSet rs = conn.createStatement().executeQuery(query[i]); + assertTrue(rs.next()); + assertEquals(result[i], rs.getObject(1)); + assertEquals(Boolean.TRUE.equals(result[i]) ? true : false, rs.getBoolean(1)); + assertFalse(rs.next()); + } + } finally { + conn.close(); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/OctetLengthFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/OctetLengthFunctionTest.java index 8778be66166..b81b55874f1 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/OctetLengthFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/OctetLengthFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,44 +29,42 @@ import org.apache.phoenix.schema.types.PBinary; import org.apache.phoenix.schema.types.PBinaryBase; import org.apache.phoenix.schema.types.PVarbinary; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * Unit tests for {@link OctetLengthFunction} */ public class OctetLengthFunctionTest { - private void testOctetLengthExpression(Expression data, int expected) throws SQLException { - List expressions = Lists.newArrayList(data); - Expression octetLengthFunction = new OctetLengthFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - octetLengthFunction.evaluate(null, ptr); - Integer result = - (Integer) octetLengthFunction.getDataType().toObject(ptr, - octetLengthFunction.getSortOrder()); - if (expected == 0) { - assertNull(result); - } else { - assertEquals(expected, result.intValue()); - } + private void testOctetLengthExpression(Expression data, int expected) throws SQLException { + List expressions = Lists.newArrayList(data); + Expression octetLengthFunction = new OctetLengthFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + octetLengthFunction.evaluate(null, ptr); + Integer result = + (Integer) octetLengthFunction.getDataType().toObject(ptr, octetLengthFunction.getSortOrder()); + if (expected == 0) { + assertNull(result); + } else { + assertEquals(expected, result.intValue()); } + } - private void testOctetLength(byte[] bytes, PBinaryBase dataType, int expected) - throws SQLException { - LiteralExpression dataExpr; - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); - testOctetLengthExpression(dataExpr, expected); - dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); - testOctetLengthExpression(dataExpr, expected); - } + private void testOctetLength(byte[] bytes, PBinaryBase dataType, int expected) + throws SQLException { + LiteralExpression dataExpr; + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.ASC); + testOctetLengthExpression(dataExpr, expected); + dataExpr = LiteralExpression.newConstant(bytes, dataType, SortOrder.DESC); + testOctetLengthExpression(dataExpr, expected); + } - @Test - public void testByteBatch() throws SQLException { - for (int len = 0; len < 300; ++len) { - byte[] bytes = new byte[len]; - testOctetLength(bytes, PBinary.INSTANCE, bytes.length); - testOctetLength(bytes, PVarbinary.INSTANCE, bytes.length); - } + @Test + public void testByteBatch() throws SQLException { + for (int len = 0; len < 300; ++len) { + byte[] bytes = new byte[len]; + testOctetLength(bytes, PBinary.INSTANCE, bytes.length); + testOctetLength(bytes, PVarbinary.INSTANCE, bytes.length); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/OrExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/OrExpressionTest.java index 4f063ef06eb..c479575725f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/OrExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/OrExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,16 @@ */ package org.apache.phoenix.expression; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.Collections; + import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellBuilderFactory; +import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.query.QueryConstants; @@ -33,279 +40,279 @@ import org.apache.phoenix.schema.types.PDataType; import org.junit.Test; -import java.util.Arrays; -import java.util.Collections; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - public class OrExpressionTest { - private OrExpression createOr(Expression lhs, Expression rhs) { - return new OrExpression(Arrays.asList(lhs, rhs)); - } - - private OrExpression createOr(Boolean x, Boolean y) { - return createOr(LiteralExpression.newConstant(x), LiteralExpression.newConstant(y)); + private OrExpression createOr(Expression lhs, Expression rhs) { + return new OrExpression(Arrays.asList(lhs, rhs)); + } + + private OrExpression createOr(Boolean x, Boolean y) { + return createOr(LiteralExpression.newConstant(x), LiteralExpression.newConstant(y)); + } + + private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean rhs) { + OrExpression or = createOr(lhs, rhs); + ImmutableBytesWritable out = new ImmutableBytesWritable(); + MultiKeyValueTuple tuple = new MultiKeyValueTuple(); + boolean success = or.evaluate(tuple, out); + assertTrue(success); + assertEquals(expected, PBoolean.INSTANCE.toObject(out)); + } + + // Evaluating OR when values of both sides are known should immediately succeed + // and return the same result regardless of order. + private void testImmediate(Boolean expected, Boolean a, Boolean b) { + testImmediateSingle(expected, a, b); + testImmediateSingle(expected, b, a); + } + + private PColumn pcolumn(final String name) { + return new PBaseColumn() { + @Override + public PName getName() { + return PNameFactory.newName(name); + } + + @Override + public PDataType getDataType() { + return PBoolean.INSTANCE; + } + + @Override + public PName getFamilyName() { + return PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY); + } + + @Override + public int getPosition() { + return 0; + } + + @Override + public Integer getArraySize() { + return null; + } + + @Override + public byte[] getViewConstant() { + return new byte[0]; + } + + @Override + public boolean isViewReferenced() { + return false; + } + + @Override + public String getExpressionStr() { + return null; + } + + @Override + public boolean isRowTimestamp() { + return false; + } + + @Override + public boolean isDynamic() { + return false; + } + + @Override + public byte[] getColumnQualifierBytes() { + return null; + } + + @Override + public long getTimestamp() { + return 0; + } + + @Override + public boolean isDerived() { + return false; + } + + @Override + public boolean isExcluded() { + return false; + } + + @Override + public SortOrder getSortOrder() { + return null; + } + }; + } + + private KeyValueColumnExpression kvExpr(final String name) { + return new KeyValueColumnExpression(pcolumn(name)); + } + + private Cell createCell(String name, Boolean value) { + byte[] valueBytes = value == null ? null : value ? PBoolean.TRUE_BYTES : PBoolean.FALSE_BYTES; + return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("row")) + .setFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).setQualifier(Bytes.toBytes(name)) + .setTimestamp(1).setType(Cell.Type.Put).setValue(valueBytes).build(); + } + + private void testPartialOneSideFirst(Boolean expected, Boolean lhs, Boolean rhs, + boolean leftFirst) { + KeyValueColumnExpression lhsExpr = kvExpr("LHS"); + KeyValueColumnExpression rhsExpr = kvExpr("RHS"); + OrExpression or = createOr(lhsExpr, rhsExpr); + MultiKeyValueTuple tuple = new MultiKeyValueTuple(Collections. emptyList()); + ImmutableBytesWritable out = new ImmutableBytesWritable(); + + // with no data available, should fail + boolean success = or.evaluate(tuple, out); + assertFalse(success); + + // with 1 datum available, should fail + if (leftFirst) { + tuple.setKeyValues(Collections.singletonList(createCell("LHS", lhs))); + } else { + tuple.setKeyValues(Collections.singletonList(createCell("RHS", rhs))); } - - private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean rhs) { - OrExpression or = createOr(lhs, rhs); - ImmutableBytesWritable out = new ImmutableBytesWritable(); - MultiKeyValueTuple tuple = new MultiKeyValueTuple(); - boolean success = or.evaluate(tuple, out); - assertTrue(success); - assertEquals(expected, PBoolean.INSTANCE.toObject(out)); - } - - // Evaluating OR when values of both sides are known should immediately succeed - // and return the same result regardless of order. - private void testImmediate(Boolean expected, Boolean a, Boolean b) { - testImmediateSingle(expected, a, b); - testImmediateSingle(expected, b, a); + success = or.evaluate(tuple, out); + assertFalse(success); + + // with 2 data available, should succeed + tuple.setKeyValues(Arrays.asList(createCell("LHS", lhs), createCell("RHS", rhs))); + success = or.evaluate(tuple, out); + assertTrue(success); + assertEquals(expected, PBoolean.INSTANCE.toObject(out)); + } + + private void testPartialEvaluation(Boolean expected, Boolean x, Boolean y, boolean xFirst) { + testPartialOneSideFirst(expected, x, y, xFirst); + testPartialOneSideFirst(expected, y, x, !xFirst); + } + + private void testShortCircuitOneSideFirst(Boolean expected, Boolean lhs, Boolean rhs, + boolean leftFirst) { + KeyValueColumnExpression lhsExpr = kvExpr("LHS"); + KeyValueColumnExpression rhsExpr = kvExpr("RHS"); + OrExpression or = createOr(lhsExpr, rhsExpr); + MultiKeyValueTuple tuple = new MultiKeyValueTuple(Collections. emptyList()); + ImmutableBytesWritable out = new ImmutableBytesWritable(); + + // with no data available, should fail + boolean success = or.evaluate(tuple, out); + assertFalse(success); + + // with 1 datum available, should succeed + if (leftFirst) { + tuple.setKeyValues(Collections.singletonList(createCell("LHS", lhs))); + } else { + tuple.setKeyValues(Collections.singletonList(createCell("RHS", rhs))); } - - private PColumn pcolumn(final String name) { - return new PBaseColumn() { - @Override public PName getName() { - return PNameFactory.newName(name); - } - - @Override public PDataType getDataType() { - return PBoolean.INSTANCE; - } - - @Override public PName getFamilyName() { - return PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY); - } - - @Override public int getPosition() { - return 0; - } - - @Override public Integer getArraySize() { - return null; - } - - @Override public byte[] getViewConstant() { - return new byte[0]; - } - - @Override public boolean isViewReferenced() { - return false; - } - - @Override public String getExpressionStr() { - return null; - } - - @Override public boolean isRowTimestamp() { - return false; - } - - @Override public boolean isDynamic() { - return false; - } - - @Override public byte[] getColumnQualifierBytes() { - return null; - } - - @Override public long getTimestamp() { - return 0; - } - - @Override public boolean isDerived() { - return false; - } - - @Override public boolean isExcluded() { - return false; - } - - @Override public SortOrder getSortOrder() { - return null; - } - }; - } - - private KeyValueColumnExpression kvExpr(final String name) { - return new KeyValueColumnExpression(pcolumn(name)); - } - - private Cell createCell(String name, Boolean value) { - byte[] valueBytes = value == null ? null : value ? PBoolean.TRUE_BYTES : PBoolean.FALSE_BYTES; - return CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(Bytes.toBytes("row")) - .setFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES) - .setQualifier(Bytes.toBytes(name)) - .setTimestamp(1) - .setType(Cell.Type.Put) - .setValue(valueBytes) - .build(); - } - - private void testPartialOneSideFirst(Boolean expected, Boolean lhs, Boolean rhs, boolean leftFirst) { - KeyValueColumnExpression lhsExpr = kvExpr("LHS"); - KeyValueColumnExpression rhsExpr = kvExpr("RHS"); - OrExpression or = createOr(lhsExpr, rhsExpr); - MultiKeyValueTuple tuple = new MultiKeyValueTuple(Collections.emptyList()); - ImmutableBytesWritable out = new ImmutableBytesWritable(); - - // with no data available, should fail - boolean success = or.evaluate(tuple, out); - assertFalse(success); - - // with 1 datum available, should fail - if (leftFirst) { - tuple.setKeyValues(Collections.singletonList(createCell("LHS", lhs))); - } else { - tuple.setKeyValues(Collections.singletonList(createCell("RHS", rhs))); - } - success = or.evaluate(tuple, out); - assertFalse(success); - - // with 2 data available, should succeed - tuple.setKeyValues(Arrays.asList(createCell("LHS", lhs), createCell("RHS", rhs))); - success = or.evaluate(tuple, out); - assertTrue(success); - assertEquals(expected, PBoolean.INSTANCE.toObject(out)); - } - - private void testPartialEvaluation(Boolean expected, Boolean x, Boolean y, boolean xFirst) { - testPartialOneSideFirst(expected, x, y, xFirst); - testPartialOneSideFirst(expected, y, x, !xFirst); - } - - private void testShortCircuitOneSideFirst(Boolean expected, Boolean lhs, Boolean rhs, boolean leftFirst) { - KeyValueColumnExpression lhsExpr = kvExpr("LHS"); - KeyValueColumnExpression rhsExpr = kvExpr("RHS"); - OrExpression or = createOr(lhsExpr, rhsExpr); - MultiKeyValueTuple tuple = new MultiKeyValueTuple(Collections.emptyList()); - ImmutableBytesWritable out = new ImmutableBytesWritable(); - - // with no data available, should fail - boolean success = or.evaluate(tuple, out); - assertFalse(success); - - // with 1 datum available, should succeed - if (leftFirst) { - tuple.setKeyValues(Collections.singletonList(createCell("LHS", lhs))); - } else { - tuple.setKeyValues(Collections.singletonList(createCell("RHS", rhs))); - } - success = or.evaluate(tuple, out); - assertTrue(success); - assertEquals(expected, PBoolean.INSTANCE.toObject(out)); - } - - - private void testShortCircuit(Boolean expected, Boolean x, Boolean y, boolean xFirst) { - testShortCircuitOneSideFirst(expected, x, y, xFirst); - testShortCircuitOneSideFirst(expected, y, x, !xFirst); - } - - @Test - public void testImmediateCertainty() { - testImmediate(true, true, true); - testImmediate(true, false, true); - testImmediate(false, false, false); - } - - @Test - public void testImmediateUncertainty() { - testImmediate(true, true, null); - testImmediate(null, false, null); - testImmediate(null, null, null); - } - - @Test - public void testPartialCertainty() { - // must evaluate both sides if FALSE is evaluated first - - // F OR F = F - testPartialEvaluation(false, false, false, true); - testPartialEvaluation(false, false, false, false); - - // T OR F = T - testPartialEvaluation(true, false, true, true); - testPartialEvaluation(true, true, false, false); - } - - @Test - public void testPartialUncertainty() { - // T OR NULL = NULL - testPartialEvaluation(true, null, true, true); - testPartialEvaluation(true, true, null, false); - - // must evaluate both sides if NULL is evaluated first - - // F OR NULL = NULL - testPartialEvaluation(null, null, false, true); - testPartialEvaluation(null, false, null, false); - - // NULL OR NULL = NULL - testPartialEvaluation(null, null, null, true); - testPartialEvaluation(null, null, null, false); - } - - @Test - public void testShortCircuitCertainty() { - // need only to evaluate one side if TRUE is evaluated first - - // T OR T = T - testShortCircuit(true, true, true, true); - testShortCircuit(true, true, true, false); - - - // T OR F = F - testShortCircuit(true, true, false, true); - testShortCircuit(true, false, true, false); - } - - @Test - public void testShortCircuitUncertainty() { - // need only to evaluate one side if TRUE is evaluated first - testShortCircuit(true, true, null, true); - testShortCircuit(true, null, true, false); - } - - @Test - public void testTruthTable() { - // See: https://en.wikipedia.org/wiki/Null_(SQL)#Comparisons_with_NULL_and_the_three-valued_logic_(3VL) - Boolean[][] testCases = new Boolean[][] { - // should short circuit? - // X, Y, if X first, if Y first, X OR Y, - { true, true, true, true, true, }, - { true, false, true, false, true, }, - { false, false, false, false, false, }, - { true, null, true, false, true, }, - { false, null, false, false, null, }, - { null, null, false, false, null, }, - }; - - for (Boolean[] testCase : testCases) { - Boolean x = testCase[0]; - Boolean y = testCase[1]; - boolean shouldShortCircuitWhenXEvaluatedFirst = testCase[2]; - boolean shouldShortCircuitWhenYEvaluatedFirst = testCase[3]; - Boolean expected = testCase[4]; - - // test both directions - testImmediate(expected, x, y); - - if (shouldShortCircuitWhenXEvaluatedFirst) { - testShortCircuit(expected, x, y, true); - } else { - testPartialEvaluation(expected, x, y, true); - } - - if (shouldShortCircuitWhenYEvaluatedFirst) { - testShortCircuit(expected, x, y, false); - } else { - testPartialEvaluation(expected, x, y, false); - } - } + success = or.evaluate(tuple, out); + assertTrue(success); + assertEquals(expected, PBoolean.INSTANCE.toObject(out)); + } + + private void testShortCircuit(Boolean expected, Boolean x, Boolean y, boolean xFirst) { + testShortCircuitOneSideFirst(expected, x, y, xFirst); + testShortCircuitOneSideFirst(expected, y, x, !xFirst); + } + + @Test + public void testImmediateCertainty() { + testImmediate(true, true, true); + testImmediate(true, false, true); + testImmediate(false, false, false); + } + + @Test + public void testImmediateUncertainty() { + testImmediate(true, true, null); + testImmediate(null, false, null); + testImmediate(null, null, null); + } + + @Test + public void testPartialCertainty() { + // must evaluate both sides if FALSE is evaluated first + + // F OR F = F + testPartialEvaluation(false, false, false, true); + testPartialEvaluation(false, false, false, false); + + // T OR F = T + testPartialEvaluation(true, false, true, true); + testPartialEvaluation(true, true, false, false); + } + + @Test + public void testPartialUncertainty() { + // T OR NULL = NULL + testPartialEvaluation(true, null, true, true); + testPartialEvaluation(true, true, null, false); + + // must evaluate both sides if NULL is evaluated first + + // F OR NULL = NULL + testPartialEvaluation(null, null, false, true); + testPartialEvaluation(null, false, null, false); + + // NULL OR NULL = NULL + testPartialEvaluation(null, null, null, true); + testPartialEvaluation(null, null, null, false); + } + + @Test + public void testShortCircuitCertainty() { + // need only to evaluate one side if TRUE is evaluated first + + // T OR T = T + testShortCircuit(true, true, true, true); + testShortCircuit(true, true, true, false); + + // T OR F = F + testShortCircuit(true, true, false, true); + testShortCircuit(true, false, true, false); + } + + @Test + public void testShortCircuitUncertainty() { + // need only to evaluate one side if TRUE is evaluated first + testShortCircuit(true, true, null, true); + testShortCircuit(true, null, true, false); + } + + @Test + public void testTruthTable() { + // See: + // https://en.wikipedia.org/wiki/Null_(SQL)#Comparisons_with_NULL_and_the_three-valued_logic_(3VL) + Boolean[][] testCases = new Boolean[][] { + // should short circuit? + // X, Y, if X first, if Y first, X OR Y, + { true, true, true, true, true, }, { true, false, true, false, true, }, + { false, false, false, false, false, }, { true, null, true, false, true, }, + { false, null, false, false, null, }, { null, null, false, false, null, }, }; + + for (Boolean[] testCase : testCases) { + Boolean x = testCase[0]; + Boolean y = testCase[1]; + boolean shouldShortCircuitWhenXEvaluatedFirst = testCase[2]; + boolean shouldShortCircuitWhenYEvaluatedFirst = testCase[3]; + Boolean expected = testCase[4]; + + // test both directions + testImmediate(expected, x, y); + + if (shouldShortCircuitWhenXEvaluatedFirst) { + testShortCircuit(expected, x, y, true); + } else { + testPartialEvaluation(expected, x, y, true); + } + + if (shouldShortCircuitWhenYEvaluatedFirst) { + testShortCircuit(expected, x, y, false); + } else { + testPartialEvaluation(expected, x, y, false); + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/PowerFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/PowerFunctionTest.java index 795d33ad890..0489948652e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/PowerFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/PowerFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,128 +41,120 @@ import org.apache.phoenix.schema.types.PUnsignedFloat; import org.apache.phoenix.schema.types.PUnsignedInt; import org.apache.phoenix.schema.types.PUnsignedLong; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * Unit tests for {@link PowerFunction} */ public class PowerFunctionTest { - private static final Expression ONE_POINT_FIVE = LiteralExpression.newConstant(1.5); - private static final Expression TWO = LiteralExpression.newConstant(2); - private static final Expression THREE = LiteralExpression.newConstant(3); - - - private static boolean testExpression(LiteralExpression literal, LiteralExpression literal2, - LiteralExpression literal3, double exptFor15, double exptFor2, double exptFor3) - throws SQLException { - List expressions15 = Lists.newArrayList(literal, ONE_POINT_FIVE); - List expressions2 = Lists.newArrayList(literal2, TWO); - List expressions3 = Lists.newArrayList(literal3, THREE); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - Expression powerFunction15 = new PowerFunction(expressions15); - boolean ret15 = powerFunction15.evaluate(null, ptr); - if (ret15) { - Double result = - (Double) powerFunction15.getDataType().toObject(ptr, - powerFunction15.getSortOrder()); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptFor15)); - } - - Expression powerFunction2 = new PowerFunction(expressions2); - boolean ret2 = powerFunction2.evaluate(null, ptr); - if (ret2) { - Double result = - (Double) powerFunction2.getDataType().toObject(ptr, - powerFunction2.getSortOrder()); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptFor2)); - } - assertEquals(ret15, ret2); - - Expression powerFunction3 = new PowerFunction(expressions3); - boolean ret3 = powerFunction3.evaluate(null, ptr); - if (ret3) { - Double result = - (Double) powerFunction3.getDataType().toObject(ptr, - powerFunction3.getSortOrder()); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptFor3)); - } - assertEquals(ret15, ret3); - return ret15; + private static final Expression ONE_POINT_FIVE = LiteralExpression.newConstant(1.5); + private static final Expression TWO = LiteralExpression.newConstant(2); + private static final Expression THREE = LiteralExpression.newConstant(3); + + private static boolean testExpression(LiteralExpression literal, LiteralExpression literal2, + LiteralExpression literal3, double exptFor15, double exptFor2, double exptFor3) + throws SQLException { + List expressions15 = Lists.newArrayList(literal, ONE_POINT_FIVE); + List expressions2 = Lists.newArrayList(literal2, TWO); + List expressions3 = Lists.newArrayList(literal3, THREE); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + + Expression powerFunction15 = new PowerFunction(expressions15); + boolean ret15 = powerFunction15.evaluate(null, ptr); + if (ret15) { + Double result = + (Double) powerFunction15.getDataType().toObject(ptr, powerFunction15.getSortOrder()); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptFor15)); } - private static void test(Number value, PNumericType dataType, double exptFor15, - double exptFor2, double exptFor3) throws SQLException { - LiteralExpression literal, literal2, literal3; - literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - boolean ret1 = testExpression(literal, literal2, literal3, exptFor15, exptFor2, exptFor3); - literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - boolean ret2 = testExpression(literal, literal2, literal3, exptFor15, exptFor2, exptFor3); - assertEquals(ret1, ret2); + Expression powerFunction2 = new PowerFunction(expressions2); + boolean ret2 = powerFunction2.evaluate(null, ptr); + if (ret2) { + Double result = + (Double) powerFunction2.getDataType().toObject(ptr, powerFunction2.getSortOrder()); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptFor2)); } - - private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { - double[][] expected = new double[value.length][3]; - for (int i = 0; i < expected.length; ++i) { - expected[i][0] = Math.pow(value[i].doubleValue(), 1.5); - expected[i][1] = Math.pow(value[i].doubleValue(), 2); - expected[i][2] = Math.pow(value[i].doubleValue(), 3); - } - assertEquals(value.length, expected.length); - for (int i = 0; i < value.length; ++i) { - test(value[i], dataType, expected[i][0], expected[i][1], expected[i][2]); - } + assertEquals(ret15, ret2); + + Expression powerFunction3 = new PowerFunction(expressions3); + boolean ret3 = powerFunction3.evaluate(null, ptr); + if (ret3) { + Double result = + (Double) powerFunction3.getDataType().toObject(ptr, powerFunction3.getSortOrder()); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), exptFor3)); } + assertEquals(ret15, ret3); + return ret15; + } + + private static void test(Number value, PNumericType dataType, double exptFor15, double exptFor2, + double exptFor3) throws SQLException { + LiteralExpression literal, literal2, literal3; + literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + boolean ret1 = testExpression(literal, literal2, literal3, exptFor15, exptFor2, exptFor3); + literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + boolean ret2 = testExpression(literal, literal2, literal3, exptFor15, exptFor2, exptFor3); + assertEquals(ret1, ret2); + } + + private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { + double[][] expected = new double[value.length][3]; + for (int i = 0; i < expected.length; ++i) { + expected[i][0] = Math.pow(value[i].doubleValue(), 1.5); + expected[i][1] = Math.pow(value[i].doubleValue(), 2); + expected[i][2] = Math.pow(value[i].doubleValue(), 3); + } + assertEquals(value.length, expected.length); + for (int i = 0; i < value.length; ++i) { + test(value[i], dataType, expected[i][0], expected[i][1], expected[i][2]); + } + } - @Test - public void testLnLogFunction() throws Exception { - Random random = new Random(); + @Test + public void testLnLogFunction() throws Exception { + Random random = new Random(); - testBatch( - new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), - BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), - BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()), - BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE); + testBatch( + new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), BigDecimal.valueOf(-1.0), + BigDecimal.valueOf(123.1234), BigDecimal.valueOf(-123.1234), + BigDecimal.valueOf(random.nextDouble()), BigDecimal.valueOf(random.nextDouble()) }, + PDecimal.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), - random.nextFloat() }, PFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), + random.nextFloat() }, PFloat.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); - testBatch( - new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), - random.nextDouble() }, PDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), + random.nextDouble() }, PDouble.INSTANCE); - testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); - testBatch( - new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, - random.nextLong(), random.nextLong() }, PLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, + random.nextLong(), random.nextLong() }, PLong.INSTANCE); - testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); - testBatch( - new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, - random.nextInt(), random.nextInt() }, PInteger.INSTANCE); + testBatch(new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, + random.nextInt(), random.nextInt() }, PInteger.INSTANCE); - testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); + testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, - (short) 123, (short) -123 }, PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, + (short) 123, (short) -123 }, PSmallint.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, - PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, + PSmallint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, - (byte) 123, (byte) -123 }, PTinyint.INSTANCE); + testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, + (byte) 123, (byte) -123 }, PTinyint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); - } + testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpReplaceFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpReplaceFunctionTest.java index 25801dd7795..e04c426f057 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpReplaceFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpReplaceFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,54 +28,53 @@ import org.apache.phoenix.expression.function.StringBasedRegexpReplaceFunction; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PVarchar; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class RegexpReplaceFunctionTest { - private final static PVarchar TYPE = PVarchar.INSTANCE; + private final static PVarchar TYPE = PVarchar.INSTANCE; - private String evalExp(Expression exp) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean eval = exp.evaluate(null, ptr); - assertTrue(eval); - String res = (String) exp.getDataType().toObject(ptr); - return res; - } + private String evalExp(Expression exp) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean eval = exp.evaluate(null, ptr); + assertTrue(eval); + String res = (String) exp.getDataType().toObject(ptr); + return res; + } - private String testExpression(String srcStr, String patternStr, String replaceStr, - SortOrder sortOrder) throws SQLException { - Expression srcExp, patternExp, replaceExp; - srcExp = LiteralExpression.newConstant(srcStr, TYPE, sortOrder); - patternExp = LiteralExpression.newConstant(patternStr, TYPE, sortOrder); - replaceExp = LiteralExpression.newConstant(replaceStr, TYPE, sortOrder); - List expressions = Lists.newArrayList(srcExp, patternExp, replaceExp); - String res1, res2; - res1 = evalExp(new ByteBasedRegexpReplaceFunction(expressions)); - res2 = evalExp(new StringBasedRegexpReplaceFunction(expressions)); - assertEquals(res1, res2); - return res1; - } + private String testExpression(String srcStr, String patternStr, String replaceStr, + SortOrder sortOrder) throws SQLException { + Expression srcExp, patternExp, replaceExp; + srcExp = LiteralExpression.newConstant(srcStr, TYPE, sortOrder); + patternExp = LiteralExpression.newConstant(patternStr, TYPE, sortOrder); + replaceExp = LiteralExpression.newConstant(replaceStr, TYPE, sortOrder); + List expressions = Lists.newArrayList(srcExp, patternExp, replaceExp); + String res1, res2; + res1 = evalExp(new ByteBasedRegexpReplaceFunction(expressions)); + res2 = evalExp(new StringBasedRegexpReplaceFunction(expressions)); + assertEquals(res1, res2); + return res1; + } - private String testExpression(String srcStr, String patternStr, String replaceStr) - throws SQLException { - String result1 = testExpression(srcStr, patternStr, replaceStr, SortOrder.ASC); - String result2 = testExpression(srcStr, patternStr, replaceStr, SortOrder.DESC); - assertEquals(result1, result2); - return result1; - } + private String testExpression(String srcStr, String patternStr, String replaceStr) + throws SQLException { + String result1 = testExpression(srcStr, patternStr, replaceStr, SortOrder.ASC); + String result2 = testExpression(srcStr, patternStr, replaceStr, SortOrder.DESC); + assertEquals(result1, result2); + return result1; + } - private void testExpression(String srcStr, String patternStr, String replaceStr, - String expectedStr) throws SQLException { - String result = testExpression(srcStr, patternStr, replaceStr); - assertEquals(expectedStr, result); - } + private void testExpression(String srcStr, String patternStr, String replaceStr, + String expectedStr) throws SQLException { + String result = testExpression(srcStr, patternStr, replaceStr); + assertEquals(expectedStr, result); + } - @Test - public void test() throws Exception { - testExpression("aa11bb22cc33dd44ee", "[0-9]+", "*", "aa*bb*cc*dd*ee"); - testExpression("aa11bb22cc33dd44ee", "[0-9]+", "", "aabbccddee"); - testExpression("aa11bb22cc33dd44ee", "[a-z][0-9]", "", "a1b2c3d4ee"); - testExpression("aa11bb22cc33dd44ee", "[a-z0-9]+", "", (String) null); - } + @Test + public void test() throws Exception { + testExpression("aa11bb22cc33dd44ee", "[0-9]+", "*", "aa*bb*cc*dd*ee"); + testExpression("aa11bb22cc33dd44ee", "[0-9]+", "", "aabbccddee"); + testExpression("aa11bb22cc33dd44ee", "[a-z][0-9]", "", "a1b2c3d4ee"); + testExpression("aa11bb22cc33dd44ee", "[a-z0-9]+", "", (String) null); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSplitFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSplitFunctionTest.java index 01d7930020e..cccb2cf1d7d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSplitFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSplitFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,66 +29,65 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.schema.types.PhoenixArray; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class RegexpSplitFunctionTest { - private final static PVarchar TYPE = PVarchar.INSTANCE; + private final static PVarchar TYPE = PVarchar.INSTANCE; - private String[] evalExp(Expression exp) throws SQLException { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean eval = exp.evaluate(null, ptr); - assertTrue(eval); - PhoenixArray evalRes = (PhoenixArray) exp.getDataType().toObject(ptr); - String[] res = (String[]) evalRes.getArray(); - return res; - } + private String[] evalExp(Expression exp) throws SQLException { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean eval = exp.evaluate(null, ptr); + assertTrue(eval); + PhoenixArray evalRes = (PhoenixArray) exp.getDataType().toObject(ptr); + String[] res = (String[]) evalRes.getArray(); + return res; + } - private String[] testExpression(String srcStr, String patternStr, SortOrder sortOrder) - throws SQLException { - Expression srcExp, patternExp; - srcExp = LiteralExpression.newConstant(srcStr, TYPE, sortOrder); - patternExp = LiteralExpression.newConstant(patternStr, TYPE, sortOrder); - List expressions = Lists.newArrayList(srcExp, patternExp); - String[] res1, res2; - res1 = evalExp(new ByteBasedRegexpSplitFunction(expressions)); - res2 = evalExp(new StringBasedRegexpSplitFunction(expressions)); - testEqual(res2, res1); - return res1; - } + private String[] testExpression(String srcStr, String patternStr, SortOrder sortOrder) + throws SQLException { + Expression srcExp, patternExp; + srcExp = LiteralExpression.newConstant(srcStr, TYPE, sortOrder); + patternExp = LiteralExpression.newConstant(patternStr, TYPE, sortOrder); + List expressions = Lists.newArrayList(srcExp, patternExp); + String[] res1, res2; + res1 = evalExp(new ByteBasedRegexpSplitFunction(expressions)); + res2 = evalExp(new StringBasedRegexpSplitFunction(expressions)); + testEqual(res2, res1); + return res1; + } - private String[] testExpression(String srcStr, String patternStr) throws SQLException { - String[] result1 = testExpression(srcStr, patternStr, SortOrder.ASC); - String[] result2 = testExpression(srcStr, patternStr, SortOrder.DESC); - testEqual(result1, result2); - return result1; - } + private String[] testExpression(String srcStr, String patternStr) throws SQLException { + String[] result1 = testExpression(srcStr, patternStr, SortOrder.ASC); + String[] result2 = testExpression(srcStr, patternStr, SortOrder.DESC); + testEqual(result1, result2); + return result1; + } - private void testEqual(String[] expectedStr, String[] result) { - if (result == null ^ expectedStr == null) return; - if (expectedStr == null) return; - assertEquals(expectedStr.length, result.length); - for (int i = 0; i < expectedStr.length; ++i) - assertEquals(expectedStr[i], result[i]); - } + private void testEqual(String[] expectedStr, String[] result) { + if (result == null ^ expectedStr == null) return; + if (expectedStr == null) return; + assertEquals(expectedStr.length, result.length); + for (int i = 0; i < expectedStr.length; ++i) + assertEquals(expectedStr[i], result[i]); + } - private void testExpression(String srcStr, String patternStr, String[] expectedStr) - throws SQLException { - String[] result = testExpression(srcStr, patternStr); - testEqual(expectedStr, result); - } + private void testExpression(String srcStr, String patternStr, String[] expectedStr) + throws SQLException { + String[] result = testExpression(srcStr, patternStr); + testEqual(expectedStr, result); + } - @Test - public void test() throws Exception { - String[] res = new String[] { "ONE", "TWO", "THREE" }; - testExpression("ONE:TWO:THREE", ":", res); - testExpression("ONE,TWO,THREE", ",", res); - testExpression("12ONE34TWO56THREE78", "[0-9]+", new String[] { null, "ONE", "TWO", "THREE", - null }); - testExpression("ONE34TWO56THREE78", "[0-9]+", new String[] { "ONE", "TWO", "THREE", null }); - testExpression("123ONE34TWO56THREE", "[0-9]+", new String[] { null, "ONE", "TWO", "THREE" }); - testExpression("123", "[0-9]+", new String[] { null, null }); - testExpression("ONE", "[0-9]+", new String[] { "ONE" }); - } + @Test + public void test() throws Exception { + String[] res = new String[] { "ONE", "TWO", "THREE" }; + testExpression("ONE:TWO:THREE", ":", res); + testExpression("ONE,TWO,THREE", ",", res); + testExpression("12ONE34TWO56THREE78", "[0-9]+", + new String[] { null, "ONE", "TWO", "THREE", null }); + testExpression("ONE34TWO56THREE78", "[0-9]+", new String[] { "ONE", "TWO", "THREE", null }); + testExpression("123ONE34TWO56THREE", "[0-9]+", new String[] { null, "ONE", "TWO", "THREE" }); + testExpression("123", "[0-9]+", new String[] { null, null }); + testExpression("ONE", "[0-9]+", new String[] { "ONE" }); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSubstrFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSubstrFunctionTest.java index 1223fd39798..d977e63a20d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSubstrFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/RegexpSubstrFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,55 +29,55 @@ import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarchar; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class RegexpSubstrFunctionTest { - private final static PVarchar TYPE = PVarchar.INSTANCE; + private final static PVarchar TYPE = PVarchar.INSTANCE; - private String evalExp(Expression exp) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean eval = exp.evaluate(null, ptr); - assertTrue(eval); - String res = (String) exp.getDataType().toObject(ptr); - return res; - } + private String evalExp(Expression exp) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean eval = exp.evaluate(null, ptr); + assertTrue(eval); + String res = (String) exp.getDataType().toObject(ptr); + return res; + } - private String testExpression(String srcStr, String patternStr, int offset, SortOrder sortOrder) throws SQLException { - Expression srcExp, patternExp, offsetExp; - srcExp = LiteralExpression.newConstant(srcStr, TYPE, sortOrder); - patternExp = LiteralExpression.newConstant(patternStr, TYPE, sortOrder); - offsetExp = LiteralExpression.newConstant(offset, PInteger.INSTANCE, sortOrder); - List expressions = Lists.newArrayList(srcExp, patternExp, offsetExp); - String res1, res2; - res1 = evalExp(new ByteBasedRegexpSubstrFunction(expressions)); - res2 = evalExp(new StringBasedRegexpSubstrFunction(expressions)); - assertEquals(res1, res2); - return res1; - } + private String testExpression(String srcStr, String patternStr, int offset, SortOrder sortOrder) + throws SQLException { + Expression srcExp, patternExp, offsetExp; + srcExp = LiteralExpression.newConstant(srcStr, TYPE, sortOrder); + patternExp = LiteralExpression.newConstant(patternStr, TYPE, sortOrder); + offsetExp = LiteralExpression.newConstant(offset, PInteger.INSTANCE, sortOrder); + List expressions = Lists.newArrayList(srcExp, patternExp, offsetExp); + String res1, res2; + res1 = evalExp(new ByteBasedRegexpSubstrFunction(expressions)); + res2 = evalExp(new StringBasedRegexpSubstrFunction(expressions)); + assertEquals(res1, res2); + return res1; + } - private String testExpression(String srcStr, String patternStr, int offset) throws SQLException { - String result1 = testExpression(srcStr, patternStr, offset, SortOrder.ASC); - String result2 = testExpression(srcStr, patternStr, offset, SortOrder.DESC); - assertEquals(result1, result2); - return result1; - } + private String testExpression(String srcStr, String patternStr, int offset) throws SQLException { + String result1 = testExpression(srcStr, patternStr, offset, SortOrder.ASC); + String result2 = testExpression(srcStr, patternStr, offset, SortOrder.DESC); + assertEquals(result1, result2); + return result1; + } - private void testExpression(String srcStr, String patternStr, int offset, String expectedStr) - throws SQLException { - String result = testExpression(srcStr, patternStr, offset); - assertEquals(expectedStr, result); - } + private void testExpression(String srcStr, String patternStr, int offset, String expectedStr) + throws SQLException { + String result = testExpression(srcStr, patternStr, offset); + assertEquals(expectedStr, result); + } - @Test - public void test() throws Exception { - testExpression("Report1?1", "[^\\\\?]+", 1, "Report1"); - testExpression("Report1?2", "[^\\\\?]+", 1, "Report1"); - testExpression("Report2?1", "[^\\\\?]+", 1, "Report2"); - testExpression("Report3?2", "[^\\\\?]+", 1, "Report3"); - testExpression("Report3?2", "[4-9]+", 0, (String) null); - testExpression("Report3?2", "[^\\\\?]+", 2, "eport3"); - testExpression("Report3?2", "[^\\\\?]+", -5, "rt3"); - } + @Test + public void test() throws Exception { + testExpression("Report1?1", "[^\\\\?]+", 1, "Report1"); + testExpression("Report1?2", "[^\\\\?]+", 1, "Report1"); + testExpression("Report2?1", "[^\\\\?]+", 1, "Report2"); + testExpression("Report3?2", "[^\\\\?]+", 1, "Report3"); + testExpression("Report3?2", "[4-9]+", 0, (String) null); + testExpression("Report3?2", "[^\\\\?]+", 2, "eport3"); + testExpression("Report3?2", "[^\\\\?]+", -5, "rt3"); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/RoundFloorCeilExpressionsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/RoundFloorCeilExpressionsTest.java index 2234e61582e..64b90f7ad28 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/RoundFloorCeilExpressionsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/RoundFloorCeilExpressionsTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,7 +31,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -70,1859 +69,1754 @@ import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.PropertiesUtil; -import org.joda.time.Chronology; import org.joda.time.chrono.GJChronology; import org.junit.Test; /** - * - * Unit tests for {@link RoundDecimalExpression}, {@link FloorDecimalExpression} - * and {@link CeilDecimalExpression}. - * - * + * Unit tests for {@link RoundDecimalExpression}, {@link FloorDecimalExpression} and + * {@link CeilDecimalExpression}. * @since 3.0.0 */ public class RoundFloorCeilExpressionsTest extends BaseConnectionlessQueryTest { + private static long HALF_SEC = 500; + private static long SEC = 2 * HALF_SEC; - private static long HALF_SEC = 500; - private static long SEC = 2 * HALF_SEC; - - private static long HALF_MIN = 30 * 1000; - private static long MIN = 2 * HALF_MIN; - - private static long HALF_HOUR = 30 * 60 * 1000; - private static long HOUR = 2 * HALF_HOUR; - - private static long HALF_DAY = 12 * 60 * 60 * 1000; - private static long DAY = 2 * HALF_DAY; + private static long HALF_MIN = 30 * 1000; + private static long MIN = 2 * HALF_MIN; - private static long HALF_WEEK = 7 * 12 * 60 * 60 * 1000; - private static long WEEK = 2 * HALF_WEEK; - - // Note that without the "l" the integer arithmetic below would overflow - private static long HALF_YEAR = 365l * 12 * 60 * 60 * 1000; - private static long YEAR = 2l * HALF_YEAR; - - // Decimal Expression Tests - - @Test - public void testRoundDecimalExpression() throws Exception { - LiteralExpression decimalLiteral = LiteralExpression.newConstant(1.23898, PDecimal.INSTANCE); - Expression roundDecimalExpression = RoundDecimalExpression.create(decimalLiteral, 3); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - roundDecimalExpression.evaluate(null, ptr); - Object result = roundDecimalExpression.getDataType().toObject(ptr); - - assertTrue(result instanceof BigDecimal); - BigDecimal resultDecimal = (BigDecimal)result; - assertEquals(BigDecimal.valueOf(1.239), resultDecimal); - } + private static long HALF_HOUR = 30 * 60 * 1000; + private static long HOUR = 2 * HALF_HOUR; - @Test - public void testRoundNegativePrecisionDecimalExpression() throws Exception { - LiteralExpression decimalLiteral = LiteralExpression.newConstant(444.44, PDecimal.INSTANCE); - Expression roundDecimalExpression = RoundDecimalExpression.create(decimalLiteral, -2); + private static long HALF_DAY = 12 * 60 * 60 * 1000; + private static long DAY = 2 * HALF_DAY; - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - roundDecimalExpression.evaluate(null, ptr); - Object result = roundDecimalExpression.getDataType().toObject(ptr); + private static long HALF_WEEK = 7 * 12 * 60 * 60 * 1000; + private static long WEEK = 2 * HALF_WEEK; + + // Note that without the "l" the integer arithmetic below would overflow + private static long HALF_YEAR = 365l * 12 * 60 * 60 * 1000; + private static long YEAR = 2l * HALF_YEAR; - assertTrue(result instanceof BigDecimal); - BigDecimal resultDecimal = (BigDecimal)result; - assertEquals(0, BigDecimal.valueOf(400).compareTo(resultDecimal)); - } + // Decimal Expression Tests - @Test - public void testRoundDecimalExpressionNoop() throws Exception { - LiteralExpression decimalLiteral = LiteralExpression.newConstant(5, PInteger.INSTANCE); - Expression roundDecimalExpression = RoundDecimalExpression.create(decimalLiteral, 3); + @Test + public void testRoundDecimalExpression() throws Exception { + LiteralExpression decimalLiteral = LiteralExpression.newConstant(1.23898, PDecimal.INSTANCE); + Expression roundDecimalExpression = RoundDecimalExpression.create(decimalLiteral, 3); - assertEquals(roundDecimalExpression, decimalLiteral); - } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + roundDecimalExpression.evaluate(null, ptr); + Object result = roundDecimalExpression.getDataType().toObject(ptr); - @Test - public void testFloorDecimalExpression() throws Exception { - LiteralExpression decimalLiteral = LiteralExpression.newConstant(1.23898, PDecimal.INSTANCE); - Expression floorDecimalExpression = FloorDecimalExpression.create(decimalLiteral, 3); + assertTrue(result instanceof BigDecimal); + BigDecimal resultDecimal = (BigDecimal) result; + assertEquals(BigDecimal.valueOf(1.239), resultDecimal); + } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - floorDecimalExpression.evaluate(null, ptr); - Object result = floorDecimalExpression.getDataType().toObject(ptr); + @Test + public void testRoundNegativePrecisionDecimalExpression() throws Exception { + LiteralExpression decimalLiteral = LiteralExpression.newConstant(444.44, PDecimal.INSTANCE); + Expression roundDecimalExpression = RoundDecimalExpression.create(decimalLiteral, -2); - assertTrue(result instanceof BigDecimal); - BigDecimal resultDecimal = (BigDecimal)result; - assertEquals(BigDecimal.valueOf(1.238), resultDecimal); - } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + roundDecimalExpression.evaluate(null, ptr); + Object result = roundDecimalExpression.getDataType().toObject(ptr); - @Test - public void testFloorDecimalExpressionNoop() throws Exception { - LiteralExpression decimalLiteral = LiteralExpression.newConstant(5, PInteger.INSTANCE); - Expression floorDecimalExpression = FloorDecimalExpression.create(decimalLiteral, 3); + assertTrue(result instanceof BigDecimal); + BigDecimal resultDecimal = (BigDecimal) result; + assertEquals(0, BigDecimal.valueOf(400).compareTo(resultDecimal)); + } - assertEquals(floorDecimalExpression, decimalLiteral); - } + @Test + public void testRoundDecimalExpressionNoop() throws Exception { + LiteralExpression decimalLiteral = LiteralExpression.newConstant(5, PInteger.INSTANCE); + Expression roundDecimalExpression = RoundDecimalExpression.create(decimalLiteral, 3); - @Test - public void testCeilDecimalExpression() throws Exception { - LiteralExpression decimalLiteral = LiteralExpression.newConstant(1.23898, PDecimal.INSTANCE); - Expression ceilDecimalExpression = CeilDecimalExpression.create(decimalLiteral, 3); + assertEquals(roundDecimalExpression, decimalLiteral); + } - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ceilDecimalExpression.evaluate(null, ptr); - Object result = ceilDecimalExpression.getDataType().toObject(ptr); + @Test + public void testFloorDecimalExpression() throws Exception { + LiteralExpression decimalLiteral = LiteralExpression.newConstant(1.23898, PDecimal.INSTANCE); + Expression floorDecimalExpression = FloorDecimalExpression.create(decimalLiteral, 3); - assertTrue(result instanceof BigDecimal); - BigDecimal resultDecimal = (BigDecimal)result; - assertEquals(BigDecimal.valueOf(1.239), resultDecimal); - } + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + floorDecimalExpression.evaluate(null, ptr); + Object result = floorDecimalExpression.getDataType().toObject(ptr); - @Test - public void testCeilDecimalExpressionNoop() throws Exception { - LiteralExpression decimalLiteral = LiteralExpression.newConstant(5, PInteger.INSTANCE); - Expression ceilDecimalExpression = CeilDecimalExpression.create(decimalLiteral, 3); + assertTrue(result instanceof BigDecimal); + BigDecimal resultDecimal = (BigDecimal) result; + assertEquals(BigDecimal.valueOf(1.238), resultDecimal); + } - assertEquals(ceilDecimalExpression, decimalLiteral); - } + @Test + public void testFloorDecimalExpressionNoop() throws Exception { + LiteralExpression decimalLiteral = LiteralExpression.newConstant(5, PInteger.INSTANCE); + Expression floorDecimalExpression = FloorDecimalExpression.create(decimalLiteral, 3); - @Test - public void testRoundDecimalExpressionScaleParamValidation() throws Exception { - LiteralExpression decimalLiteral = LiteralExpression.newConstant(1.23898, PDecimal.INSTANCE); - LiteralExpression scale = LiteralExpression.newConstant("3", PVarchar.INSTANCE); + assertEquals(floorDecimalExpression, decimalLiteral); + } - List childExpressions = new ArrayList(2); - childExpressions.add(decimalLiteral); - childExpressions.add(scale); + @Test + public void testCeilDecimalExpression() throws Exception { + LiteralExpression decimalLiteral = LiteralExpression.newConstant(1.23898, PDecimal.INSTANCE); + Expression ceilDecimalExpression = CeilDecimalExpression.create(decimalLiteral, 3); - try { - RoundDecimalExpression.create(childExpressions); - fail("Evaluation should have failed because only an INTEGER is allowed for second param in a RoundDecimalExpression"); - } catch(IllegalDataException e) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ceilDecimalExpression.evaluate(null, ptr); + Object result = ceilDecimalExpression.getDataType().toObject(ptr); - } - } + assertTrue(result instanceof BigDecimal); + BigDecimal resultDecimal = (BigDecimal) result; + assertEquals(BigDecimal.valueOf(1.239), resultDecimal); + } - // KeyRange explicit simple / sanity tests + @Test + public void testCeilDecimalExpressionNoop() throws Exception { + LiteralExpression decimalLiteral = LiteralExpression.newConstant(5, PInteger.INSTANCE); + Expression ceilDecimalExpression = CeilDecimalExpression.create(decimalLiteral, 3); - @Test - public void testRoundDecimalExpressionKeyRangeSimple() throws Exception { - KeyPart baseKeyPart = getDecimalKeyPart(); - ScalarFunction roundDecimalExpression = (ScalarFunction)RoundDecimalExpression.create(DUMMY_DECIMAL, 3); + assertEquals(ceilDecimalExpression, decimalLiteral); + } + + @Test + public void testRoundDecimalExpressionScaleParamValidation() throws Exception { + LiteralExpression decimalLiteral = LiteralExpression.newConstant(1.23898, PDecimal.INSTANCE); + LiteralExpression scale = LiteralExpression.newConstant("3", PVarchar.INSTANCE); - byte[] upperBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.2385")); - byte[] lowerBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.2375")); - KeyRange expectedKeyRange = KeyRange.getKeyRange(lowerBound, upperBound); + List childExpressions = new ArrayList(2); + childExpressions.add(decimalLiteral); + childExpressions.add(scale); + + try { + RoundDecimalExpression.create(childExpressions); + fail( + "Evaluation should have failed because only an INTEGER is allowed for second param in a RoundDecimalExpression"); + } catch (IllegalDataException e) { - KeyPart keyPart = roundDecimalExpression.newKeyPart(baseKeyPart); - assertEquals(expectedKeyRange, keyPart.getKeyRange(CompareOperator.EQUAL, LiteralExpression.newConstant(new BigDecimal("1.238"), PDecimal.INSTANCE))); } - - @Test - public void testFloorDecimalExpressionKeyRangeSimple() throws Exception { - KeyPart baseKeyPart = getDecimalKeyPart(); - ScalarFunction floorDecimalExpression = (ScalarFunction)FloorDecimalExpression.create(DUMMY_DECIMAL, 3); - - byte[] upperBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.239")); - byte[] lowerBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.238")); - KeyRange expectedKeyRange = KeyRange.getKeyRange(lowerBound, true, upperBound, false); - - KeyPart keyPart = floorDecimalExpression.newKeyPart(baseKeyPart); - assertEquals(expectedKeyRange, keyPart.getKeyRange(CompareOperator.EQUAL, LiteralExpression.newConstant(new BigDecimal("1.238"), PDecimal.INSTANCE))); + } + + // KeyRange explicit simple / sanity tests + + @Test + public void testRoundDecimalExpressionKeyRangeSimple() throws Exception { + KeyPart baseKeyPart = getDecimalKeyPart(); + ScalarFunction roundDecimalExpression = + (ScalarFunction) RoundDecimalExpression.create(DUMMY_DECIMAL, 3); + + byte[] upperBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.2385")); + byte[] lowerBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.2375")); + KeyRange expectedKeyRange = KeyRange.getKeyRange(lowerBound, upperBound); + + KeyPart keyPart = roundDecimalExpression.newKeyPart(baseKeyPart); + assertEquals(expectedKeyRange, keyPart.getKeyRange(CompareOperator.EQUAL, + LiteralExpression.newConstant(new BigDecimal("1.238"), PDecimal.INSTANCE))); + } + + @Test + public void testFloorDecimalExpressionKeyRangeSimple() throws Exception { + KeyPart baseKeyPart = getDecimalKeyPart(); + ScalarFunction floorDecimalExpression = + (ScalarFunction) FloorDecimalExpression.create(DUMMY_DECIMAL, 3); + + byte[] upperBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.239")); + byte[] lowerBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.238")); + KeyRange expectedKeyRange = KeyRange.getKeyRange(lowerBound, true, upperBound, false); + + KeyPart keyPart = floorDecimalExpression.newKeyPart(baseKeyPart); + assertEquals(expectedKeyRange, keyPart.getKeyRange(CompareOperator.EQUAL, + LiteralExpression.newConstant(new BigDecimal("1.238"), PDecimal.INSTANCE))); + } + + @Test + public void testCeilDecimalExpressionKeyRangeSimple() throws Exception { + KeyPart baseKeyPart = getDecimalKeyPart(); + ScalarFunction ceilDecimalExpression = + (ScalarFunction) CeilDecimalExpression.create(DUMMY_DECIMAL, 3); + + byte[] upperBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.238")); + byte[] lowerBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.237")); + KeyRange expectedKeyRange = KeyRange.getKeyRange(lowerBound, false, upperBound, true); + + KeyPart keyPart = ceilDecimalExpression.newKeyPart(baseKeyPart); + assertEquals(expectedKeyRange, keyPart.getKeyRange(CompareOperator.EQUAL, + LiteralExpression.newConstant(new BigDecimal("1.238"), PDecimal.INSTANCE))); + } + + // KeyRange complex / generated tests + + @Test + public void testRoundDecimalExpressionKeyRangeCoverage() throws Exception { + KeyPart baseKeyPart = getDecimalKeyPart(); + for (int scale : SCALES) { + ScalarFunction roundDecimalExpression = + (ScalarFunction) RoundDecimalExpression.create(DUMMY_DECIMAL, scale); + KeyPart keyPart = roundDecimalExpression.newKeyPart(baseKeyPart); + verifyKeyPart(RoundingType.ROUND, scale, keyPart); } - - @Test - public void testCeilDecimalExpressionKeyRangeSimple() throws Exception { - KeyPart baseKeyPart = getDecimalKeyPart(); - ScalarFunction ceilDecimalExpression = (ScalarFunction)CeilDecimalExpression.create(DUMMY_DECIMAL, 3); - - byte[] upperBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.238")); - byte[] lowerBound = PDecimal.INSTANCE.toBytes(new BigDecimal("1.237")); - KeyRange expectedKeyRange = KeyRange.getKeyRange(lowerBound, false, upperBound, true); - - KeyPart keyPart = ceilDecimalExpression.newKeyPart(baseKeyPart); - assertEquals(expectedKeyRange, keyPart.getKeyRange(CompareOperator.EQUAL, LiteralExpression.newConstant(new BigDecimal("1.238"), PDecimal.INSTANCE))); - } - - // KeyRange complex / generated tests - - @Test - public void testRoundDecimalExpressionKeyRangeCoverage() throws Exception { - KeyPart baseKeyPart = getDecimalKeyPart(); - for(int scale : SCALES) { - ScalarFunction roundDecimalExpression = (ScalarFunction) RoundDecimalExpression.create(DUMMY_DECIMAL, scale); - KeyPart keyPart = roundDecimalExpression.newKeyPart(baseKeyPart); - verifyKeyPart(RoundingType.ROUND, scale, keyPart); + } + + private static KeyPart getDecimalKeyPart() throws SQLException { + String tableName = generateUniqueName(); + try (PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class)) { + pconn.createStatement().execute("CREATE TABLE " + tableName + " (k DECIMAL PRIMARY KEY)"); + final PTable table = + pconn.getMetaDataCache().getTableRef(new PTableKey(null, tableName)).getTable(); + KeyPart baseKeyPart = new KeyPart() { + + @Override + public KeyRange getKeyRange(CompareOperator op, Expression rhs) { + return KeyRange.EVERYTHING_RANGE; } - } - private static KeyPart getDecimalKeyPart() throws SQLException { - String tableName = generateUniqueName(); - try (PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class)) { - pconn.createStatement().execute("CREATE TABLE " + tableName + " (k DECIMAL PRIMARY KEY)"); - final PTable table = pconn.getMetaDataCache().getTableRef(new PTableKey(null, tableName)).getTable(); - KeyPart baseKeyPart = new KeyPart() { - - @Override - public KeyRange getKeyRange(CompareOperator op, Expression rhs) { - return KeyRange.EVERYTHING_RANGE; - } - - @Override - public Set getExtractNodes() { - return Collections.emptySet(); - } - - @Override - public PColumn getColumn() { - return table.getPKColumns().get(0); - } - - @Override - public PTable getTable() { - return table; - } - }; - return baseKeyPart; + @Override + public Set getExtractNodes() { + return Collections.emptySet(); } - } - - @Test - public void testFloorDecimalExpressionKeyRangeCoverage() throws Exception { - KeyPart baseKeyPart = getDecimalKeyPart(); - for(int scale : SCALES) { - ScalarFunction floorDecimalExpression = (ScalarFunction) FloorDecimalExpression.create(DUMMY_DECIMAL, scale); - KeyPart keyPart = floorDecimalExpression.newKeyPart(baseKeyPart); - verifyKeyPart(RoundingType.FLOOR, scale, keyPart); - } - } - @Test - public void testCeilDecimalExpressionKeyRangeCoverage() throws Exception { - KeyPart baseKeyPart = getDecimalKeyPart(); - for(int scale : SCALES) { - ScalarFunction ceilDecimalExpression = (ScalarFunction) CeilDecimalExpression.create(DUMMY_DECIMAL, scale); - KeyPart keyPart = ceilDecimalExpression.newKeyPart(baseKeyPart); - verifyKeyPart(RoundingType.CEIL, scale, keyPart); + @Override + public PColumn getColumn() { + return table.getPKColumns().get(0); } - } - /** - * Represents the three different types of rounding expression and produces - * expressions of their type when given a Decimal key and scale. - */ - private enum RoundingType { - ROUND("ROUND"), - FLOOR("FLOOR"), - CEIL("CEIL"); - - public final String name; - - RoundingType(String name) { - this.name = name; - } - - /** - * Returns a rounding expression of this type that will round the given decimal key at the - * given scale. - * @param key the byte key for the Decimal to round - * @param scale the scale to round the decimal to - * @return the expression containing the above parameters - */ - public Expression getExpression(byte[] key, int scale) throws SQLException { - LiteralExpression decimalLiteral = LiteralExpression.newConstant(PDecimal.INSTANCE.toObject(key), PDecimal.INSTANCE); - switch(this) { - case ROUND: - return RoundDecimalExpression.create(decimalLiteral, scale); - case FLOOR: - return FloorDecimalExpression.create(decimalLiteral, scale); - case CEIL: - return CeilDecimalExpression.create(decimalLiteral, scale); - default: - throw new AssertionError("Unknown RoundingType"); - } + @Override + public PTable getTable() { + return table; } + }; + return baseKeyPart; } - - /** - * Represents a possible relational operator used in rounding expression where clauses. - * Includes information not kept by CompareFilter.CompareOp, including a string symbol - * representation and a method for actually comparing comparables. - */ - private enum Relation { - EQUAL(CompareOperator.EQUAL, "="), - GREATER(CompareOperator.GREATER, ">"), - GREATER_OR_EQUAL(CompareOperator.GREATER_OR_EQUAL, ">="), - LESS(CompareOperator.LESS, "<"), - LESS_OR_EQUAL(CompareOperator.LESS_OR_EQUAL, "<="); - - public final CompareOperator compareOp; - public final String symbol; - - Relation(CompareOperator compareOp, String symbol) { - this.compareOp = compareOp; - this.symbol = symbol; - } - - public > boolean compare(E lhs, E rhs) { - int comparison = lhs.compareTo(rhs); - switch(this) { - case EQUAL: - return comparison == 0; - case GREATER_OR_EQUAL: - return comparison >= 0; - case GREATER: - return comparison > 0; - case LESS_OR_EQUAL: - return comparison <= 0; - case LESS: - return comparison < 0; - default: - throw new AssertionError("Unknown RelationType"); - } - } + } + + @Test + public void testFloorDecimalExpressionKeyRangeCoverage() throws Exception { + KeyPart baseKeyPart = getDecimalKeyPart(); + for (int scale : SCALES) { + ScalarFunction floorDecimalExpression = + (ScalarFunction) FloorDecimalExpression.create(DUMMY_DECIMAL, scale); + KeyPart keyPart = floorDecimalExpression.newKeyPart(baseKeyPart); + verifyKeyPart(RoundingType.FLOOR, scale, keyPart); } - - /** - * Produces a string error message containing the given information, formatted like a where - * clause.
    - * Example Output:
    - * 'where ROUND(?, 2) <= 2.55' (produced range: [2.545, 2.555) ) - * @param exprType - * @param scale - * @param relation - * @param rhs - * @param range - * @return - */ - private static String getMessage(RoundingType exprType, int scale, Relation relation, BigDecimal rhs, KeyRange range) { - String where = exprType.name + "(?, " + scale + ") " + relation.symbol + " " + rhs; - return "'where " + where + "' (produced range: " + formatDecimalKeyRange(range) + " )"; - } - - /** - * Interpreting the KeyRange as a range of decimal, produces a nicely formatted string - * representation. - * @param range the KeyRange to format - * @return the string representation, e.g. [2.45, 2.55) - */ - private static String formatDecimalKeyRange(KeyRange range) { - return (range.isLowerInclusive() ? "[" : "(") - + (range.lowerUnbound() ? "*" : PDecimal.INSTANCE.toObject(range.getLowerRange())) - + ", " - + (range.upperUnbound() ? "*" : PDecimal.INSTANCE.toObject(range.getUpperRange())) - + (range.isUpperInclusive() ? "]" : ")"); - } - - // create methods need a dummy expression that is not coercible to to a long - // value doesn't matter because we only use those expressions to produce a keypart - private static final LiteralExpression DUMMY_DECIMAL = LiteralExpression.newConstant(new BigDecimal("2.5")); - - private static final List DECIMALS = Collections.unmodifiableList( - Arrays.asList( - BigDecimal.valueOf(Long.MIN_VALUE * 17L - 13L, 9), - BigDecimal.valueOf(Long.MIN_VALUE, 8), - new BigDecimal("-200300"), - new BigDecimal("-8.44"), - new BigDecimal("-2.00"), - new BigDecimal("-0.6"), - new BigDecimal("-0.00032"), - BigDecimal.ZERO, - BigDecimal.ONE, - new BigDecimal("0.00000984"), - new BigDecimal("0.74"), - new BigDecimal("2.00"), - new BigDecimal("7.09"), - new BigDecimal("84900800"), - BigDecimal.valueOf(Long.MAX_VALUE, 8), - BigDecimal.valueOf(Long.MAX_VALUE * 31L + 17L, 7) - )); - - private static final List SCALES = Collections.unmodifiableList(Arrays.asList(0, 1, 2, 3, 8)); - - /** - * Checks that a given KeyPart produces the right key ranges for each relational operator and - * a variety of right-hand-side decimals. - * @param exprType the rounding expression type used to create this KeyPart - * @param scale the scale used to create this KeyPart - * @param keyPart the KeyPart to test - */ - private void verifyKeyPart(RoundingType exprType, int scale, KeyPart keyPart) throws SQLException { - for(BigDecimal rhsDecimal : DECIMALS) { - LiteralExpression rhsExpression = LiteralExpression.newConstant(rhsDecimal, PDecimal.INSTANCE); - for(Relation relation : Relation.values()) { - KeyRange keyRange = keyPart.getKeyRange(relation.compareOp, rhsExpression); - verifyKeyRange(exprType, scale, relation, rhsDecimal, keyRange); - } - } + } + + @Test + public void testCeilDecimalExpressionKeyRangeCoverage() throws Exception { + KeyPart baseKeyPart = getDecimalKeyPart(); + for (int scale : SCALES) { + ScalarFunction ceilDecimalExpression = + (ScalarFunction) CeilDecimalExpression.create(DUMMY_DECIMAL, scale); + KeyPart keyPart = ceilDecimalExpression.newKeyPart(baseKeyPart); + verifyKeyPart(RoundingType.CEIL, scale, keyPart); } + } - /** - * Checks that a given KeyRange's boundaries match with the given rounding expression type, - * rounding scale, relational operator, and right hand side decimal. - * Does so by checking the decimal values immediately on either side of the KeyRange border and - * verifying that they either match or do not match the "where clause" formed by the - * rounding type, scale, relation, and rhs decimal. If a relation should produce an unbounded - * upper or lower range, verifies that that end of the range is unbounded. Finally, if the - * range is empty, verifies that the rhs decimal required more precision than could be - * produced by the rounding expression. - * @param exprType the rounding expression type used to create this KeyRange - * @param scale the rounding scale used to create this KeyRange - * @param relation the relational operator used to create this KeyRange - * @param rhs the right hand side decimal used to create this KeyRange - * @param range the KeyRange to test - */ - private void verifyKeyRange(RoundingType exprType, int scale, Relation relation, BigDecimal rhs, KeyRange range) throws SQLException { - // dump of values for debugging - final String dump = getMessage(exprType, scale, relation, rhs, range); - - ImmutableBytesPtr rhsPtr = new ImmutableBytesPtr(); - LiteralExpression.newConstant(rhs, PDecimal.INSTANCE).evaluate(null, rhsPtr); - - ImmutableBytesPtr lhsPtr = new ImmutableBytesPtr(); - - // we should only get an empty range if we can verify that precision makes a match impossible - if(range == KeyRange.EMPTY_RANGE) { - assertTrue("should only get empty key range for unmatchable rhs precision (" + dump + ")", rhs.scale() > scale); - assertEquals("should only get empty key range for equals checks (" + dump + ")", Relation.EQUAL, relation); - return; - } - - // if it should have an upper bound - if(relation != Relation.GREATER && relation != Relation.GREATER_OR_EQUAL) { - // figure out what the upper bound is - byte[] highestHighIncluded; - byte[] lowestHighExcluded; - if(range.isUpperInclusive()) { - highestHighIncluded = range.getUpperRange(); - lowestHighExcluded = nextDecimalKey(range.getUpperRange()); - } - else { - highestHighIncluded = prevDecimalKey(range.getUpperRange()); - lowestHighExcluded = range.getUpperRange(); - } - - // check on either side of the boundary to validate that it is in fact the boundary - exprType.getExpression(highestHighIncluded, scale).evaluate(null, lhsPtr); - assertTrue("incorrectly excluding " + PDecimal.INSTANCE.toObject(highestHighIncluded) - + " in upper bound for " + dump, relation.compare(lhsPtr, rhsPtr)); - exprType.getExpression(lowestHighExcluded, scale).evaluate(null, lhsPtr); - assertFalse("incorrectly including " + PDecimal.INSTANCE.toObject(lowestHighExcluded) - + " in upper bound for " + dump, relation.compare(lhsPtr, rhsPtr)); - } - else { - // otherwise verify that it does not have an upper bound - assertTrue("should not have a upper bound for " + dump, range.upperUnbound()); - } - - // if it should have a lower bound - if(relation != Relation.LESS && relation != Relation.LESS_OR_EQUAL) { - // figure out what the lower bound is - byte[] lowestLowIncluded; - byte[] highestLowExcluded; - if(range.isLowerInclusive()) { - lowestLowIncluded = range.getLowerRange(); - highestLowExcluded = prevDecimalKey(range.getLowerRange()); - } - else { - lowestLowIncluded = nextDecimalKey(range.getLowerRange()); - highestLowExcluded = range.getLowerRange(); - } - - // check on either side of the boundary to validate that it is in fact the boundary - exprType.getExpression(lowestLowIncluded, scale).evaluate(null, lhsPtr); - assertTrue("incorrectly excluding " + PDecimal.INSTANCE.toObject(lowestLowIncluded) - + " in lower bound for " + dump, relation.compare(lhsPtr, rhsPtr)); - exprType.getExpression(highestLowExcluded, scale).evaluate(null, lhsPtr); - assertFalse("incorrectly including " + PDecimal.INSTANCE.toObject(highestLowExcluded) - + " in lower bound for " + dump, relation.compare(lhsPtr, rhsPtr)); - } - else { - // otherwise verify that it does not have a lower bound - assertTrue("should not have a lower bound for " + dump, range.lowerUnbound()); - } - } + /** + * Represents the three different types of rounding expression and produces expressions of their + * type when given a Decimal key and scale. + */ + private enum RoundingType { + ROUND("ROUND"), + FLOOR("FLOOR"), + CEIL("CEIL"); - /** - * Produces the previous Decimal key relative to the given key. The new key will differ from - * the old key in as small a unit as possible while still maintaining accurate serialization. - * @param key bytes for the old Decimal key - * @return bytes for the new Decimal key, a single unit previous to the old one - */ - private static byte[] prevDecimalKey(byte[] key) { - BigDecimal decimal = (BigDecimal) PDecimal.INSTANCE.toObject(key); - BigDecimal prev = decimal.subtract(getSmallestUnit(decimal)); - return PDecimal.INSTANCE.toBytes(prev); - } + public final String name; - /** - * Produces the next Decimal key relative to the given key. The new key will differ from the - * old key in as small a unit as possible while still maintaining accurate serialization. - * @param key bytes for the old Decimal key - * @return bytes for the new Decimal key, a single unit next from the old one - */ - private static byte[] nextDecimalKey(byte[] key) { - BigDecimal decimal = (BigDecimal) PDecimal.INSTANCE.toObject(key); - BigDecimal next = decimal.add(getSmallestUnit(decimal)); - return PDecimal.INSTANCE.toBytes(next); + RoundingType(String name) { + this.name = name; } /** - * Produces the smallest unit of difference possible for the given decimal that will still - * be serialized accurately. For example, if the MAXIMUM_RELIABLE_PRECISION were 4, then - * getSmallestUnit(2.3) would produce 0.001, as 2.301 could be serialized accurately but - * 2.3001 could not. - * @param decimal the decimal to find the smallest unit in relation to - * @return the smallest BigDecimal unit possible to add to decimal while still maintaining - * accurate serialization - * @throws IllegalArgumentException if decimal requires more than the maximum reliable precision + * Returns a rounding expression of this type that will round the given decimal key at the given + * scale. + * @param key the byte key for the Decimal to round + * @param scale the scale to round the decimal to + * @return the expression containing the above parameters */ - private static BigDecimal getSmallestUnit(BigDecimal decimal) { - if (decimal.precision() > PDataType.MAX_PRECISION) { - throw new IllegalArgumentException("rounding errors mean that we cannot reliably test " + decimal); - } - int minScale = decimal.scale() + (PDataType.MAX_PRECISION - decimal.precision()); - return BigDecimal.valueOf(1, minScale); - } - - // Date Expression Tests - - @Test - public void testRoundDateExpression() throws Exception { - LiteralExpression dateLiteral = LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); - Expression roundDateExpression = RoundDateExpression.create(dateLiteral, TimeUnit.DAY); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - roundDateExpression.evaluate(null, ptr); - Object result = roundDateExpression.getDataType().toObject(ptr); - - assertTrue(result instanceof Date); - Date resultDate = (Date)result; - assertEquals(DateUtil.parseDate("2012-01-02 00:00:00"), resultDate); - } - - @Test - public void testRoundDateExpressionWithMultiplier() throws Exception { - Expression dateLiteral = LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); - Expression roundDateExpression = RoundDateExpression.create(dateLiteral, TimeUnit.MINUTE, 10); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - roundDateExpression.evaluate(null, ptr); - Object result = roundDateExpression.getDataType().toObject(ptr); - - assertTrue(result instanceof Date); - Date resultDate = (Date)result; - assertEquals(DateUtil.parseDate("2012-01-01 14:30:00"), resultDate); - } - - @Test - public void testFloorDateExpression() throws Exception { - LiteralExpression dateLiteral = LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); - Expression floorDateExpression = FloorDateExpression.create(dateLiteral, TimeUnit.DAY); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - floorDateExpression.evaluate(null, ptr); - Object result = floorDateExpression.getDataType().toObject(ptr); - - assertTrue(result instanceof Date); - Date resultDate = (Date)result; - assertEquals(DateUtil.parseDate("2012-01-01 00:00:00"), resultDate); - } - - @Test - public void testFloorDateExpressionWithMultiplier() throws Exception { - Expression dateLiteral = LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); - Expression floorDateExpression = FloorDateExpression.create(dateLiteral, TimeUnit.SECOND, 10); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - floorDateExpression.evaluate(null, ptr); - Object result = floorDateExpression.getDataType().toObject(ptr); - - assertTrue(result instanceof Date); - Date resultDate = (Date)result; - assertEquals(DateUtil.parseDate("2012-01-01 14:25:20"), resultDate); + public Expression getExpression(byte[] key, int scale) throws SQLException { + LiteralExpression decimalLiteral = + LiteralExpression.newConstant(PDecimal.INSTANCE.toObject(key), PDecimal.INSTANCE); + switch (this) { + case ROUND: + return RoundDecimalExpression.create(decimalLiteral, scale); + case FLOOR: + return FloorDecimalExpression.create(decimalLiteral, scale); + case CEIL: + return CeilDecimalExpression.create(decimalLiteral, scale); + default: + throw new AssertionError("Unknown RoundingType"); + } } - - @Test - public void testCeilDateExpression() throws Exception { - LiteralExpression dateLiteral = LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); - Expression ceilDateExpression = CeilDateExpression.create(dateLiteral, TimeUnit.DAY); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ceilDateExpression.evaluate(null, ptr); - Object result = ceilDateExpression.getDataType().toObject(ptr); - - assertTrue(result instanceof Date); - Date resultDate = (Date)result; - assertEquals(DateUtil.parseDate("2012-01-02 00:00:00"), resultDate); + } + + /** + * Represents a possible relational operator used in rounding expression where clauses. Includes + * information not kept by CompareFilter.CompareOp, including a string symbol representation and a + * method for actually comparing comparables. + */ + private enum Relation { + EQUAL(CompareOperator.EQUAL, "="), + GREATER(CompareOperator.GREATER, ">"), + GREATER_OR_EQUAL(CompareOperator.GREATER_OR_EQUAL, ">="), + LESS(CompareOperator.LESS, "<"), + LESS_OR_EQUAL(CompareOperator.LESS_OR_EQUAL, "<="); + + public final CompareOperator compareOp; + public final String symbol; + + Relation(CompareOperator compareOp, String symbol) { + this.compareOp = compareOp; + this.symbol = symbol; } - - @Test - public void testCeilDateExpressionWithMultiplier() throws Exception { - Expression dateLiteral = LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); - Expression ceilDateExpression = CeilDateExpression.create(dateLiteral, TimeUnit.SECOND, 10); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ceilDateExpression.evaluate(null, ptr); - Object result = ceilDateExpression.getDataType().toObject(ptr); - - assertTrue(result instanceof Date); - Date resultDate = (Date)result; - assertEquals(DateUtil.parseDate("2012-01-01 14:25:30"), resultDate); - } - - /** - * Tests {@link RoundDateExpression} constructor check which only allows number of arguments between 2 and 3. - */ - @Test - public void testRoundDateExpressionValidation_1() throws Exception { - LiteralExpression dateLiteral = LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); - - List childExpressions = new ArrayList(1); - childExpressions.add(dateLiteral); - - try { - RoundDateExpression.create(childExpressions); - fail("Instantiating a RoundDateExpression with only one argument should have failed."); - } catch(IllegalArgumentException e) { - } + public > boolean compare(E lhs, E rhs) { + int comparison = lhs.compareTo(rhs); + switch (this) { + case EQUAL: + return comparison == 0; + case GREATER_OR_EQUAL: + return comparison >= 0; + case GREATER: + return comparison > 0; + case LESS_OR_EQUAL: + return comparison <= 0; + case LESS: + return comparison < 0; + default: + throw new AssertionError("Unknown RelationType"); + } } - - /** - * Tests {@link RoundDateExpression} constructor for a valid value of time unit. - */ - @Test - public void testRoundDateExpressionValidation_2() throws Exception { - LiteralExpression dateLiteral = LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); - LiteralExpression timeUnitLiteral = LiteralExpression.newConstant("millis", PVarchar.INSTANCE); - - List childExpressions = new ArrayList(1); - childExpressions.add(dateLiteral); - childExpressions.add(timeUnitLiteral); - - try { - RoundDateExpression.create(childExpressions); - fail("Only a valid time unit represented by TimeUnit enum is allowed and millis is invalid."); - } catch(IllegalArgumentException e) { - - } + } + + /** + * Produces a string error message containing the given information, formatted like a where + * clause.
    + * Example Output:
    + * 'where ROUND(?, 2) <= 2.55' (produced range: [2.545, 2.555) ) + */ + private static String getMessage(RoundingType exprType, int scale, Relation relation, + BigDecimal rhs, KeyRange range) { + String where = exprType.name + "(?, " + scale + ") " + relation.symbol + " " + rhs; + return "'where " + where + "' (produced range: " + formatDecimalKeyRange(range) + " )"; + } + + /** + * Interpreting the KeyRange as a range of decimal, produces a nicely formatted string + * representation. + * @param range the KeyRange to format + * @return the string representation, e.g. [2.45, 2.55) + */ + private static String formatDecimalKeyRange(KeyRange range) { + return (range.isLowerInclusive() ? "[" : "(") + + (range.lowerUnbound() ? "*" : PDecimal.INSTANCE.toObject(range.getLowerRange())) + ", " + + (range.upperUnbound() ? "*" : PDecimal.INSTANCE.toObject(range.getUpperRange())) + + (range.isUpperInclusive() ? "]" : ")"); + } + + // create methods need a dummy expression that is not coercible to to a long + // value doesn't matter because we only use those expressions to produce a keypart + private static final LiteralExpression DUMMY_DECIMAL = + LiteralExpression.newConstant(new BigDecimal("2.5")); + + private static final List DECIMALS = + Collections.unmodifiableList(Arrays.asList(BigDecimal.valueOf(Long.MIN_VALUE * 17L - 13L, 9), + BigDecimal.valueOf(Long.MIN_VALUE, 8), new BigDecimal("-200300"), new BigDecimal("-8.44"), + new BigDecimal("-2.00"), new BigDecimal("-0.6"), new BigDecimal("-0.00032"), BigDecimal.ZERO, + BigDecimal.ONE, new BigDecimal("0.00000984"), new BigDecimal("0.74"), new BigDecimal("2.00"), + new BigDecimal("7.09"), new BigDecimal("84900800"), BigDecimal.valueOf(Long.MAX_VALUE, 8), + BigDecimal.valueOf(Long.MAX_VALUE * 31L + 17L, 7))); + + private static final List SCALES = + Collections.unmodifiableList(Arrays.asList(0, 1, 2, 3, 8)); + + /** + * Checks that a given KeyPart produces the right key ranges for each relational operator and a + * variety of right-hand-side decimals. + * @param exprType the rounding expression type used to create this KeyPart + * @param scale the scale used to create this KeyPart + * @param keyPart the KeyPart to test + */ + private void verifyKeyPart(RoundingType exprType, int scale, KeyPart keyPart) + throws SQLException { + for (BigDecimal rhsDecimal : DECIMALS) { + LiteralExpression rhsExpression = + LiteralExpression.newConstant(rhsDecimal, PDecimal.INSTANCE); + for (Relation relation : Relation.values()) { + KeyRange keyRange = keyPart.getKeyRange(relation.compareOp, rhsExpression); + verifyKeyRange(exprType, scale, relation, rhsDecimal, keyRange); + } } - - @Test - public void testFloorDateExpressionForWeek() throws Exception { - Expression dateLiteral = LiteralExpression.newConstant(DateUtil.parseDate("2016-01-07 08:17:28"), PDate.INSTANCE); - Expression floorDateExpression = FloorDateExpression.create(dateLiteral, TimeUnit.WEEK); - - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - floorDateExpression.evaluate(null, ptr); - Object result = floorDateExpression.getDataType().toObject(ptr); - - assertTrue(result instanceof Date); - Date resultDate = (Date)result; - assertEquals(DateUtil.parseDate("2016-01-04 00:00:00"), resultDate); + } + + /** + * Checks that a given KeyRange's boundaries match with the given rounding expression type, + * rounding scale, relational operator, and right hand side decimal. Does so by checking the + * decimal values immediately on either side of the KeyRange border and verifying that they either + * match or do not match the "where clause" formed by the rounding type, scale, relation, and rhs + * decimal. If a relation should produce an unbounded upper or lower range, verifies that that end + * of the range is unbounded. Finally, if the range is empty, verifies that the rhs decimal + * required more precision than could be produced by the rounding expression. + * @param exprType the rounding expression type used to create this KeyRange + * @param scale the rounding scale used to create this KeyRange + * @param relation the relational operator used to create this KeyRange + * @param rhs the right hand side decimal used to create this KeyRange + * @param range the KeyRange to test + */ + private void verifyKeyRange(RoundingType exprType, int scale, Relation relation, BigDecimal rhs, + KeyRange range) throws SQLException { + // dump of values for debugging + final String dump = getMessage(exprType, scale, relation, rhs, range); + + ImmutableBytesPtr rhsPtr = new ImmutableBytesPtr(); + LiteralExpression.newConstant(rhs, PDecimal.INSTANCE).evaluate(null, rhsPtr); + + ImmutableBytesPtr lhsPtr = new ImmutableBytesPtr(); + + // we should only get an empty range if we can verify that precision makes a match impossible + if (range == KeyRange.EMPTY_RANGE) { + assertTrue("should only get empty key range for unmatchable rhs precision (" + dump + ")", + rhs.scale() > scale); + assertEquals("should only get empty key range for equals checks (" + dump + ")", + Relation.EQUAL, relation); + return; } - private RoundDateExpression getRoundMsExpression(String s, TimeUnit u, int m) throws SQLException { - return (RoundDateExpression)RoundDateExpression.create(LiteralExpression.newConstant(s), u, m ); + // if it should have an upper bound + if (relation != Relation.GREATER && relation != Relation.GREATER_OR_EQUAL) { + // figure out what the upper bound is + byte[] highestHighIncluded; + byte[] lowestHighExcluded; + if (range.isUpperInclusive()) { + highestHighIncluded = range.getUpperRange(); + lowestHighExcluded = nextDecimalKey(range.getUpperRange()); + } else { + highestHighIncluded = prevDecimalKey(range.getUpperRange()); + lowestHighExcluded = range.getUpperRange(); + } + + // check on either side of the boundary to validate that it is in fact the boundary + exprType.getExpression(highestHighIncluded, scale).evaluate(null, lhsPtr); + assertTrue("incorrectly excluding " + PDecimal.INSTANCE.toObject(highestHighIncluded) + + " in upper bound for " + dump, relation.compare(lhsPtr, rhsPtr)); + exprType.getExpression(lowestHighExcluded, scale).evaluate(null, lhsPtr); + assertFalse("incorrectly including " + PDecimal.INSTANCE.toObject(lowestHighExcluded) + + " in upper bound for " + dump, relation.compare(lhsPtr, rhsPtr)); + } else { + // otherwise verify that it does not have an upper bound + assertTrue("should not have a upper bound for " + dump, range.upperUnbound()); } - // The three tests below are backported from PHOENIX-5066. - // When PHOENIX-5066 lands, these can be removed as redundant. - - @Test - public void testRoundingGMT() throws SQLException { - // We operate on Instants for time units up to Days, simply counting millis - - RoundDateExpression oddWholeSecondExp = - getRoundMsExpression("2022-11-11 11:11:11", TimeUnit.SECOND, 1); - java.sql.Timestamp oddWholeSecond = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:11").getTime()); - long lowerBoundaryOddWholeSecond = oddWholeSecond.getTime() - HALF_SEC; - long upperBoundaryOddWholeSecond = oddWholeSecond.getTime() + HALF_SEC - 1; - assertEquals(lowerBoundaryOddWholeSecond, - oddWholeSecondExp.rangeLower(oddWholeSecond.getTime())); - assertEquals(upperBoundaryOddWholeSecond, - oddWholeSecondExp.rangeUpper(oddWholeSecond.getTime())); - assertEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond))); - assertNotEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond - 1))); - assertEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond))); - assertNotEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond + 1))); - - // 10 sec range - RoundDateExpression oddWholeSecondRound10Exp = - getRoundMsExpression("2022-11-11 11:11:10", TimeUnit.SECOND, 10); - java.sql.Timestamp oddWholeSecondRound10 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:10").getTime()); - long lowerBoundaryOddWholeSecondRound10 = oddWholeSecondRound10.getTime() - 5 * SEC; - long upperBoundaryOddWholeSecondRound10 = oddWholeSecondRound10.getTime() + 5 * SEC - 1; - assertEquals(lowerBoundaryOddWholeSecondRound10, - oddWholeSecondRound10Exp.rangeLower(oddWholeSecond.getTime())); - assertEquals(upperBoundaryOddWholeSecondRound10, - oddWholeSecondRound10Exp.rangeUpper(oddWholeSecond.getTime())); - assertEquals(oddWholeSecondRound10, new java.sql.Timestamp( - oddWholeSecondRound10Exp.roundTime(lowerBoundaryOddWholeSecondRound10))); - assertNotEquals(oddWholeSecondRound10, new java.sql.Timestamp( - oddWholeSecondRound10Exp.roundTime(lowerBoundaryOddWholeSecondRound10 - 1))); - assertEquals(oddWholeSecondRound10, new java.sql.Timestamp( - oddWholeSecondRound10Exp.roundTime(upperBoundaryOddWholeSecondRound10))); - assertNotEquals(oddWholeSecondRound10, new java.sql.Timestamp( - oddWholeSecondRound10Exp.roundTime(upperBoundaryOddWholeSecondRound10 + 1))); - - // 15 sec range - RoundDateExpression oddWholeSecondRound15Exp = - getRoundMsExpression("2022-11-11 11:11:15", TimeUnit.SECOND, 15); - java.sql.Timestamp oddWholeSecondRound15 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:15").getTime()); - long lowerBoundaryOddWholeSecondRound15 = oddWholeSecondRound15.getTime() - 15 * HALF_SEC; - long upperBoundaryOddWholeSecondRound15 = - oddWholeSecondRound15.getTime() + 15 * HALF_SEC - 1; - assertEquals(lowerBoundaryOddWholeSecondRound15, - oddWholeSecondRound15Exp.rangeLower(oddWholeSecond.getTime())); - assertEquals(upperBoundaryOddWholeSecondRound15, - oddWholeSecondRound15Exp.rangeUpper(oddWholeSecond.getTime())); - assertEquals(oddWholeSecondRound15, new java.sql.Timestamp( - oddWholeSecondRound15Exp.roundTime(lowerBoundaryOddWholeSecondRound15))); - assertNotEquals(oddWholeSecondRound15, new java.sql.Timestamp( - oddWholeSecondRound15Exp.roundTime(lowerBoundaryOddWholeSecondRound15 - 1))); - assertEquals(oddWholeSecondRound15, new java.sql.Timestamp( - oddWholeSecondRound15Exp.roundTime(upperBoundaryOddWholeSecondRound15))); - assertNotEquals(oddWholeSecondRound15, new java.sql.Timestamp( - oddWholeSecondRound15Exp.roundTime(upperBoundaryOddWholeSecondRound15 + 1))); - - RoundDateExpression evenWholeSecondExp = - getRoundMsExpression("2022-11-11 11:11:12", TimeUnit.SECOND, 1); - java.sql.Timestamp evenWholeSecond = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:12").getTime()); - long lowerBoundaryEvenWholeSecond = evenWholeSecond.getTime() - HALF_SEC; - long upperBoundaryEvenWholeSecond = evenWholeSecond.getTime() + HALF_SEC - 1; - assertEquals(lowerBoundaryEvenWholeSecond, - evenWholeSecondExp.rangeLower(evenWholeSecond.getTime())); - assertEquals(upperBoundaryEvenWholeSecond, - evenWholeSecondExp.rangeUpper(evenWholeSecond.getTime())); - assertEquals(evenWholeSecond, - new java.sql.Timestamp(evenWholeSecondExp.roundTime(lowerBoundaryEvenWholeSecond))); - assertNotEquals(evenWholeSecond, - new java.sql.Timestamp(evenWholeSecondExp.roundTime(lowerBoundaryEvenWholeSecond - 1))); - assertEquals(evenWholeSecond, - new java.sql.Timestamp(evenWholeSecondExp.roundTime(upperBoundaryEvenWholeSecond))); - assertNotEquals(evenWholeSecond, - new java.sql.Timestamp(evenWholeSecondExp.roundTime(upperBoundaryEvenWholeSecond + 1))); - - RoundDateExpression oddWholeMinuteExp = - getRoundMsExpression("2022-11-11 11:11:0", TimeUnit.MINUTE, 1); - java.sql.Timestamp oddWholeMinute = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:0").getTime()); - long lowerBoundaryOddWholeMinute = oddWholeMinute.getTime() - HALF_MIN; - long upperBoundaryOddWholeMinute = oddWholeMinute.getTime() + HALF_MIN - 1; - assertEquals(lowerBoundaryOddWholeMinute, - oddWholeMinuteExp.rangeLower(oddWholeMinute.getTime())); - assertEquals(upperBoundaryOddWholeMinute, - oddWholeMinuteExp.rangeUpper(oddWholeMinute.getTime())); - assertEquals(oddWholeMinute, - new java.sql.Timestamp(oddWholeMinuteExp.roundTime(lowerBoundaryOddWholeMinute))); - assertNotEquals(oddWholeMinute, - new java.sql.Timestamp(oddWholeMinuteExp.roundTime(lowerBoundaryOddWholeMinute - 1))); - assertEquals(oddWholeMinute, - new java.sql.Timestamp(oddWholeMinuteExp.roundTime(upperBoundaryOddWholeMinute))); - assertNotEquals(oddWholeMinute, - new java.sql.Timestamp(oddWholeMinuteExp.roundTime(upperBoundaryOddWholeMinute + 1))); - - RoundDateExpression oddWholeMinuteRound20Exp = - getRoundMsExpression("2022-11-11 11:20:0", TimeUnit.MINUTE, 20); - java.sql.Timestamp oddWholeMinuteRound20 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:20:0").getTime()); - long lowerBoundaryOddWholeMinute20 = oddWholeMinuteRound20.getTime() - 10 * MIN; - long upperBoundaryOddWholeMinute20 = oddWholeMinuteRound20.getTime() + 10 * MIN - 1; - assertEquals(lowerBoundaryOddWholeMinute20, - oddWholeMinuteRound20Exp.rangeLower(oddWholeMinute.getTime())); - assertEquals(upperBoundaryOddWholeMinute20, - oddWholeMinuteRound20Exp.rangeUpper(oddWholeMinute.getTime())); - assertEquals(oddWholeMinuteRound20, new java.sql.Timestamp( - oddWholeMinuteRound20Exp.roundTime(lowerBoundaryOddWholeMinute20))); - assertNotEquals(oddWholeMinuteRound20, new java.sql.Timestamp( - oddWholeMinuteRound20Exp.roundTime(lowerBoundaryOddWholeMinute20 - 1))); - assertEquals(oddWholeMinuteRound20, new java.sql.Timestamp( - oddWholeMinuteRound20Exp.roundTime(upperBoundaryOddWholeMinute20))); - assertNotEquals(oddWholeMinuteRound20, new java.sql.Timestamp( - oddWholeMinuteRound20Exp.roundTime(upperBoundaryOddWholeMinute20 + 1))); - - // Minutes since epoch, don't expect the rounded value to be "round" - - RoundDateExpression oddWholeMinuteRound17Exp = - getRoundMsExpression("2022-11-11 11:12:0", TimeUnit.MINUTE, 17); - java.sql.Timestamp oddWholeMinuteRound17 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:00").getTime()); - long lowerBoundaryOddWholeMinute17 = oddWholeMinuteRound17.getTime() - 17 * HALF_MIN; - long upperBoundaryOddWholeMinute17 = oddWholeMinuteRound17.getTime() + 17 * HALF_MIN - 1; - assertEquals(lowerBoundaryOddWholeMinute17, - oddWholeMinuteRound17Exp.rangeLower(oddWholeMinute.getTime())); - assertEquals(upperBoundaryOddWholeMinute17, - oddWholeMinuteRound17Exp.rangeUpper(oddWholeMinute.getTime())); - assertEquals(oddWholeMinuteRound17, new java.sql.Timestamp( - oddWholeMinuteRound17Exp.roundTime(lowerBoundaryOddWholeMinute17))); - assertNotEquals(oddWholeMinuteRound17, new java.sql.Timestamp( - oddWholeMinuteRound17Exp.roundTime(lowerBoundaryOddWholeMinute17 - 1))); - assertEquals(oddWholeMinuteRound17, new java.sql.Timestamp( - oddWholeMinuteRound17Exp.roundTime(upperBoundaryOddWholeMinute17))); - assertNotEquals(oddWholeMinuteRound17, new java.sql.Timestamp( - oddWholeMinuteRound17Exp.roundTime(upperBoundaryOddWholeMinute17 + 1))); - - RoundDateExpression evenWholeMinuteExp = - getRoundMsExpression("2022-11-11 11:12:0", TimeUnit.MINUTE, 1); - java.sql.Timestamp evenWholeMinute = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:0").getTime()); - long lowerBoundaryEvenWholeMinute = evenWholeMinute.getTime() - HALF_MIN; - long upperBoundaryEvenWholeMinute = evenWholeMinute.getTime() + HALF_MIN - 1; - assertEquals(lowerBoundaryEvenWholeMinute, - evenWholeMinuteExp.rangeLower(evenWholeMinute.getTime())); - assertEquals(upperBoundaryEvenWholeMinute, - evenWholeMinuteExp.rangeUpper(evenWholeMinute.getTime())); - assertEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute))); - assertNotEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute - 1))); - assertEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute))); - assertNotEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute + 1))); - - RoundDateExpression oddWholeHourExp = - getRoundMsExpression("2022-11-11 11:0:0", TimeUnit.HOUR, 1); - java.sql.Timestamp oddWholeHour = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:0:0").getTime()); - long lowerBoundaryOddWholeHour = oddWholeHour.getTime() - HALF_HOUR; - long upperBoundaryOddWholeHour = oddWholeHour.getTime() + HALF_HOUR - 1; - assertEquals(lowerBoundaryOddWholeHour, oddWholeHourExp.rangeLower(oddWholeHour.getTime())); - assertEquals(upperBoundaryOddWholeHour, oddWholeHourExp.rangeUpper(oddWholeHour.getTime())); - assertEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour))); - assertNotEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour - 1))); - assertEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour))); - assertNotEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour + 1))); - - // Not rounding to hourOfDay - RoundDateExpression oddWholeHour10Exp = - getRoundMsExpression("2022-11-11 12:0:0", TimeUnit.HOUR, 10); - java.sql.Timestamp oddWholeHour10 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 12:0:0").getTime()); - long lowerBoundaryOddWholeHour10 = oddWholeHour10.getTime() - HALF_HOUR * 10; - long upperBoundaryOddWholeHour10 = oddWholeHour10.getTime() + HALF_HOUR * 10 - 1; - assertEquals(lowerBoundaryOddWholeHour10, - oddWholeHour10Exp.rangeLower(oddWholeHour.getTime())); - assertEquals(upperBoundaryOddWholeHour10, - oddWholeHour10Exp.rangeUpper(oddWholeHour.getTime())); - assertEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10))); - assertNotEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10 - 1))); - assertEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10))); - assertNotEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10 + 1))); - - // Not rounding to hourOfDay - RoundDateExpression oddWholeHour11Exp = - getRoundMsExpression("2022-11-11 07:0:0", TimeUnit.HOUR, 11); - java.sql.Timestamp oddWholeHour11 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 07:0:0").getTime()); - long lowerBoundaryOddWholeHour11 = oddWholeHour11.getTime() - HALF_HOUR * 11; - long upperBoundaryOddWholeHour11 = oddWholeHour11.getTime() + HALF_HOUR * 11 - 1; - assertEquals(lowerBoundaryOddWholeHour11, - oddWholeHour11Exp.rangeLower(oddWholeHour.getTime())); - assertEquals(upperBoundaryOddWholeHour11, - oddWholeHour11Exp.rangeUpper(oddWholeHour.getTime())); - assertEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11))); - assertNotEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11 - 1))); - assertEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11))); - assertNotEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11 + 1))); - - RoundDateExpression evenwholeHourExp = - getRoundMsExpression("2022-11-11 12:0:0", TimeUnit.HOUR, 1); - java.sql.Timestamp evenwholeHour = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 12:0:0").getTime()); - long lowerBoundaryEvenWholeHour = evenwholeHour.getTime() - HALF_HOUR; - long upperBoundaryEvenWholeHour = evenwholeHour.getTime() + HALF_HOUR - 1; - assertEquals(lowerBoundaryEvenWholeHour, - evenwholeHourExp.rangeLower(evenwholeHour.getTime())); - assertEquals(upperBoundaryEvenWholeHour, - evenwholeHourExp.rangeUpper(evenwholeHour.getTime())); - assertEquals(evenwholeHour, - new java.sql.Timestamp(evenwholeHourExp.roundTime(lowerBoundaryEvenWholeHour))); - assertNotEquals(evenwholeHour, - new java.sql.Timestamp(evenwholeHourExp.roundTime(lowerBoundaryEvenWholeHour - 1))); - assertEquals(evenwholeHour, - new java.sql.Timestamp(evenwholeHourExp.roundTime(upperBoundaryEvenWholeHour))); - assertNotEquals(evenwholeHour, - new java.sql.Timestamp(evenwholeHourExp.roundTime(upperBoundaryEvenWholeHour + 1))); - - // No DST switchover - RoundDateExpression oddWholeDayExp = - getRoundMsExpression("2022-11-11 0:0:0", TimeUnit.DAY, 1); - java.sql.Timestamp oddWholeDay = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 0:0:0").getTime()); - long lowerBoundaryOddWholeDay = oddWholeDay.getTime() - HALF_DAY; - long upperBoundaryOddWholeDay = oddWholeDay.getTime() + HALF_DAY - 1; - assertEquals(lowerBoundaryOddWholeDay, oddWholeDayExp.rangeLower(oddWholeDay.getTime())); - assertEquals(upperBoundaryOddWholeDay, oddWholeDayExp.rangeUpper(oddWholeDay.getTime())); - assertEquals(oddWholeDay, - new java.sql.Timestamp(oddWholeDayExp.roundTime(lowerBoundaryOddWholeDay))); - assertNotEquals(oddWholeDay, - new java.sql.Timestamp(oddWholeDayExp.roundTime(lowerBoundaryOddWholeDay - 1))); - assertEquals(oddWholeDay, - new java.sql.Timestamp(oddWholeDayExp.roundTime(upperBoundaryOddWholeDay))); - assertNotEquals(oddWholeDay, - new java.sql.Timestamp(oddWholeDayExp.roundTime(upperBoundaryOddWholeDay + 1))); - - RoundDateExpression oddWholeDay10Exp = - getRoundMsExpression("2022-11-14 0:0:0", TimeUnit.DAY, 10); - java.sql.Timestamp oddWholeDay10 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-14 0:0:0").getTime()); - long lowerBoundaryOddWholeDay10 = oddWholeDay10.getTime() - 10 * HALF_DAY; - long upperBoundaryOddWholeDay10 = oddWholeDay10.getTime() + 10 * HALF_DAY - 1; - assertEquals(lowerBoundaryOddWholeDay10, - oddWholeDay10Exp.rangeLower(oddWholeDay.getTime())); - assertEquals(upperBoundaryOddWholeDay10, - oddWholeDay10Exp.rangeUpper(oddWholeDay.getTime())); - assertEquals(oddWholeDay10, - new java.sql.Timestamp(oddWholeDay10Exp.roundTime(lowerBoundaryOddWholeDay10))); - assertNotEquals(oddWholeDay10, - new java.sql.Timestamp(oddWholeDay10Exp.roundTime(lowerBoundaryOddWholeDay10 - 1))); - assertEquals(oddWholeDay10, - new java.sql.Timestamp(oddWholeDay10Exp.roundTime(upperBoundaryOddWholeDay10))); - assertNotEquals(oddWholeDay10, - new java.sql.Timestamp(oddWholeDay10Exp.roundTime(upperBoundaryOddWholeDay10 + 1))); - - RoundDateExpression oddWholeDay3Exp = - getRoundMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 3); - java.sql.Timestamp oddWholeDay3 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); - long lowerBoundaryOddWholeDay3 = oddWholeDay3.getTime() - 3 * HALF_DAY; - long upperBoundaryOddWholeDay3 = oddWholeDay3.getTime() + 3 * HALF_DAY - 1; - assertEquals(lowerBoundaryOddWholeDay3, oddWholeDay3Exp.rangeLower(oddWholeDay.getTime())); - assertEquals(upperBoundaryOddWholeDay3, oddWholeDay3Exp.rangeUpper(oddWholeDay.getTime())); - assertEquals(oddWholeDay3, - new java.sql.Timestamp(oddWholeDay3Exp.roundTime(lowerBoundaryOddWholeDay3))); - assertNotEquals(oddWholeDay3, - new java.sql.Timestamp(oddWholeDay3Exp.roundTime(lowerBoundaryOddWholeDay3 - 1))); - assertEquals(oddWholeDay3, - new java.sql.Timestamp(oddWholeDay3Exp.roundTime(upperBoundaryOddWholeDay3))); - assertNotEquals(oddWholeDay3, - new java.sql.Timestamp(oddWholeDay3Exp.roundTime(upperBoundaryOddWholeDay3 + 1))); - - RoundDateExpression evenWholeDayExp = - getRoundMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 1); - java.sql.Timestamp evenWholeDay = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); - long lowerBoundaryEvenWholeDay = evenWholeDay.getTime() - HALF_DAY; - long upperBoundaryEvenWholeDay = evenWholeDay.getTime() + HALF_DAY - 1; - assertEquals(lowerBoundaryEvenWholeDay, evenWholeDayExp.rangeLower(evenWholeDay.getTime())); - assertEquals(upperBoundaryEvenWholeDay, evenWholeDayExp.rangeUpper(evenWholeDay.getTime())); - assertEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay))); - assertNotEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay - 1))); - assertEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay))); - assertNotEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay + 1))); - - // Stateless, we can reuse it for every week test - RoundWeekExpression roundWeekExpression = new RoundWeekExpression(); - java.sql.Timestamp wholeWeekOdd = - new java.sql.Timestamp(DateUtil.parseDate("2022-10-10 0:0:0").getTime()); - long lowerBoundaryWholeWeekOdd = wholeWeekOdd.getTime() - (HALF_WEEK - 1); - long upperBoundaryWholeWeekOdd = wholeWeekOdd.getTime() + HALF_WEEK - 1; - assertEquals(lowerBoundaryWholeWeekOdd, - roundWeekExpression.rangeLower(wholeWeekOdd.getTime())); - assertEquals(upperBoundaryWholeWeekOdd, - roundWeekExpression.rangeUpper(wholeWeekOdd.getTime())); - assertEquals(wholeWeekOdd, - new java.sql.Timestamp(roundWeekExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeWeekOdd, - new java.sql.Timestamp(roundWeekExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeWeekOdd - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeWeekOdd, - new java.sql.Timestamp(roundWeekExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeWeekOdd, - new java.sql.Timestamp(roundWeekExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeWeekOdd + 1, GJChronology.getInstanceUTC())))); - - java.sql.Timestamp wholeWeekEven = - new java.sql.Timestamp(DateUtil.parseDate("2022-10-17 0:0:0").getTime()); - long lowerBoundaryWholeWeekEven = wholeWeekEven.getTime() - HALF_WEEK; - long upperBoundaryWholeWeekEven = wholeWeekEven.getTime() + HALF_WEEK; - assertEquals(lowerBoundaryWholeWeekEven, - roundWeekExpression.rangeLower(wholeWeekEven.getTime())); - assertEquals(upperBoundaryWholeWeekEven, - roundWeekExpression.rangeUpper(wholeWeekEven.getTime())); - assertEquals(wholeWeekEven, - new java.sql.Timestamp(roundWeekExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeWeekEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeWeekEven, - new java.sql.Timestamp(roundWeekExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeWeekEven - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeWeekEven, - new java.sql.Timestamp(roundWeekExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeWeekEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeWeekEven, - new java.sql.Timestamp(roundWeekExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeWeekEven + 1, GJChronology.getInstanceUTC())))); - - RoundMonthExpression roundMonthExpression = new RoundMonthExpression(); - // We're still using roundHalfEven here for backwards compatibility - java.sql.Timestamp wholeMonthEven = - new java.sql.Timestamp(DateUtil.parseDate("2022-06-1 0:0:0").getTime()); - // May is 31 days - long lowerBoundaryWholeMonthEven = wholeMonthEven.getTime() - 31 * HALF_DAY; - // June is 30 days - long upperBoundaryWholeMonthEven = wholeMonthEven.getTime() + 30 * HALF_DAY; - assertEquals(lowerBoundaryWholeMonthEven, - roundMonthExpression.rangeLower(wholeMonthEven.getTime())); - assertEquals(upperBoundaryWholeMonthEven, - roundMonthExpression.rangeUpper(wholeMonthEven.getTime())); - assertEquals(wholeMonthEven, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthEven, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthEven - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeMonthEven, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthEven, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthEven + 1, GJChronology.getInstanceUTC())))); - - // We're still using roundHalfEven here for backwards compatibility - java.sql.Timestamp wholeMonthOdd = - new java.sql.Timestamp(DateUtil.parseDate("2022-07-1 0:0:0").getTime()); - // June is 30 days - long lowerBoundaryWholeMonthOdd = wholeMonthOdd.getTime() - 30 * HALF_DAY + 1; - // July is 31 days - long upperBoundaryWholeMonthOdd = wholeMonthOdd.getTime() + 31 * HALF_DAY - 1; - assertEquals(lowerBoundaryWholeMonthOdd, - roundMonthExpression.rangeLower(wholeMonthOdd.getTime())); - assertEquals(upperBoundaryWholeMonthOdd, - roundMonthExpression.rangeUpper(wholeMonthOdd.getTime())); - assertEquals(wholeMonthOdd, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthOdd, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthOdd - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeMonthOdd, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthOdd, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthOdd + 1, GJChronology.getInstanceUTC())))); - - // We're still using roundHalfEven here for backwards compatibility - java.sql.Timestamp wholeMonthLeap = - new java.sql.Timestamp(DateUtil.parseDate("2024-02-1 0:0:0").getTime()); - // January is 31 days - long lowerBoundaryWholeMonthLeap = wholeMonthLeap.getTime() - 31 * HALF_DAY; - // February is 29 days - long upperBoundaryWholeMonthLeap = wholeMonthLeap.getTime() + 29 * HALF_DAY; - assertEquals(lowerBoundaryWholeMonthLeap, - roundMonthExpression.rangeLower(wholeMonthLeap.getTime())); - assertEquals(upperBoundaryWholeMonthLeap, - roundMonthExpression.rangeUpper(wholeMonthLeap.getTime())); - assertEquals(wholeMonthLeap, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthLeap, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthLeap - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeMonthLeap, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthLeap, - new java.sql.Timestamp(roundMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthLeap + 1, GJChronology.getInstanceUTC())))); - - // We're still using roundHalfEven here for backwards compatibility - RoundYearExpression roundYearExpression = new RoundYearExpression(); - java.sql.Timestamp wholeYearEven = - new java.sql.Timestamp(DateUtil.parseDate("2022-1-1 0:0:0").getTime()); - long lowerBoundaryWholeYearEven = wholeYearEven.getTime() - HALF_YEAR; - long upperBoundaryWholeYearEven = wholeYearEven.getTime() + HALF_YEAR; - assertEquals(lowerBoundaryWholeYearEven, - roundYearExpression.rangeLower(wholeYearEven.getTime())); - assertEquals(upperBoundaryWholeYearEven, - roundYearExpression.rangeUpper(wholeYearEven.getTime())); - assertEquals(wholeYearEven, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearEven, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearEven - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeYearEven, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearEven, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearEven + 1, GJChronology.getInstanceUTC())))); - - // We're still using roundHalfEven here for backwards compatibility - java.sql.Timestamp wholeYearOdd = - new java.sql.Timestamp(DateUtil.parseDate("2023-1-1 0:0:0").getTime()); - long lowerBoundaryWholeYearOdd = wholeYearOdd.getTime() - HALF_YEAR + 1; - long upperBoundaryWholeYearOdd = wholeYearOdd.getTime() + HALF_YEAR - 1; - assertEquals(lowerBoundaryWholeYearOdd, - roundYearExpression.rangeLower(wholeYearOdd.getTime())); - assertEquals(upperBoundaryWholeYearOdd, - roundYearExpression.rangeUpper(wholeYearOdd.getTime())); - assertEquals(wholeYearOdd, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearOdd, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearOdd - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeYearOdd, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearOdd, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearOdd + 1, GJChronology.getInstanceUTC())))); - - // We're still using roundHalfEven here for backwards compatibility - java.sql.Timestamp wholeYearLeapEven = - new java.sql.Timestamp(DateUtil.parseDate("2024-1-1 0:0:0").getTime()); - long lowerBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime() - HALF_YEAR; - long upperBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime() + HALF_YEAR + HALF_DAY; - assertEquals(lowerBoundaryWholeYearLeapEven, - roundYearExpression.rangeLower(wholeYearLeapEven.getTime())); - assertEquals(upperBoundaryWholeYearLeapEven, - roundYearExpression.rangeUpper(wholeYearLeapEven.getTime())); - assertEquals(wholeYearLeapEven, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearLeapEven, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearLeapEven - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeYearLeapEven, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearLeapEven, - new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearLeapEven + 1, GJChronology.getInstanceUTC())))); + // if it should have a lower bound + if (relation != Relation.LESS && relation != Relation.LESS_OR_EQUAL) { + // figure out what the lower bound is + byte[] lowestLowIncluded; + byte[] highestLowExcluded; + if (range.isLowerInclusive()) { + lowestLowIncluded = range.getLowerRange(); + highestLowExcluded = prevDecimalKey(range.getLowerRange()); + } else { + lowestLowIncluded = nextDecimalKey(range.getLowerRange()); + highestLowExcluded = range.getLowerRange(); + } + + // check on either side of the boundary to validate that it is in fact the boundary + exprType.getExpression(lowestLowIncluded, scale).evaluate(null, lhsPtr); + assertTrue("incorrectly excluding " + PDecimal.INSTANCE.toObject(lowestLowIncluded) + + " in lower bound for " + dump, relation.compare(lhsPtr, rhsPtr)); + exprType.getExpression(highestLowExcluded, scale).evaluate(null, lhsPtr); + assertFalse("incorrectly including " + PDecimal.INSTANCE.toObject(highestLowExcluded) + + " in lower bound for " + dump, relation.compare(lhsPtr, rhsPtr)); + } else { + // otherwise verify that it does not have a lower bound + assertTrue("should not have a lower bound for " + dump, range.lowerUnbound()); } - - private FloorDateExpression getFloorMsExpression(String s, TimeUnit u, int m) - throws SQLException { - return (FloorDateExpression) FloorDateExpression.create(LiteralExpression.newConstant(s), u, - m); - } - - @Test - public void testFloorGMT() throws SQLException { - - // No need to repeat odd / even cases - // The logic for upper and lower scan ranges is always - // [floor(ts), ceil(ts+1)-1] - - RoundDateExpression oddWholeSecondExp = - getFloorMsExpression("2022-11-11 11:11:11", TimeUnit.SECOND, 1); - java.sql.Timestamp oddWholeSecond = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:11").getTime()); - long lowerBoundaryOddWholeSecond = oddWholeSecond.getTime(); - long upperBoundaryOddWholeSecond = oddWholeSecond.getTime() + SEC - 1; - assertEquals(lowerBoundaryOddWholeSecond, - oddWholeSecondExp.rangeLower(oddWholeSecond.getTime())); - assertEquals(upperBoundaryOddWholeSecond, - oddWholeSecondExp.rangeUpper(oddWholeSecond.getTime())); - assertEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond))); - assertNotEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond - 1))); - assertEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond))); - assertNotEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond + 1))); - - // 10 sec range - RoundDateExpression oddWholeSecondFloor10Exp = - getFloorMsExpression("2022-11-11 11:11:10", TimeUnit.SECOND, 10); - java.sql.Timestamp oddWholeSecondFloor10 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:10").getTime()); - long lowerBoundaryOddWholeSecondFloor10 = oddWholeSecondFloor10.getTime(); - long upperBoundaryOddWholeSecondFloor10 = oddWholeSecondFloor10.getTime() + 10 * SEC - 1; - assertEquals(lowerBoundaryOddWholeSecondFloor10, - oddWholeSecondFloor10Exp.rangeLower(oddWholeSecond.getTime())); - assertEquals(upperBoundaryOddWholeSecondFloor10, - oddWholeSecondFloor10Exp.rangeUpper(oddWholeSecond.getTime())); - assertEquals(oddWholeSecondFloor10, new java.sql.Timestamp( - oddWholeSecondFloor10Exp.roundTime(lowerBoundaryOddWholeSecondFloor10))); - assertNotEquals(oddWholeSecondFloor10, new java.sql.Timestamp( - oddWholeSecondFloor10Exp.roundTime(lowerBoundaryOddWholeSecondFloor10 - 1))); - assertEquals(oddWholeSecondFloor10, new java.sql.Timestamp( - oddWholeSecondFloor10Exp.roundTime(upperBoundaryOddWholeSecondFloor10))); - assertNotEquals(oddWholeSecondFloor10, new java.sql.Timestamp( - oddWholeSecondFloor10Exp.roundTime(upperBoundaryOddWholeSecondFloor10 + 1))); - - // 15 sec range - RoundDateExpression oddWholeSecondFloor15Exp = - getFloorMsExpression("2022-11-11 11:11:0", TimeUnit.SECOND, 15); - java.sql.Timestamp oddWholeSecondFloor15 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:0").getTime()); - long lowerBoundaryOddWholeSecondFloor15 = oddWholeSecondFloor15.getTime(); - long upperBoundaryOddWholeSecondFloor15 = oddWholeSecondFloor15.getTime() + 15 * SEC - 1; - assertEquals(lowerBoundaryOddWholeSecondFloor15, - oddWholeSecondFloor15Exp.rangeLower(oddWholeSecond.getTime())); - assertEquals(upperBoundaryOddWholeSecondFloor15, - oddWholeSecondFloor15Exp.rangeUpper(oddWholeSecond.getTime())); - assertEquals(oddWholeSecondFloor15, new java.sql.Timestamp( - oddWholeSecondFloor15Exp.roundTime(lowerBoundaryOddWholeSecondFloor15))); - assertNotEquals(oddWholeSecondFloor15, new java.sql.Timestamp( - oddWholeSecondFloor15Exp.roundTime(lowerBoundaryOddWholeSecondFloor15 - 1))); - assertEquals(oddWholeSecondFloor15, new java.sql.Timestamp( - oddWholeSecondFloor15Exp.roundTime(upperBoundaryOddWholeSecondFloor15))); - assertNotEquals(oddWholeSecondFloor15, new java.sql.Timestamp( - oddWholeSecondFloor15Exp.roundTime(upperBoundaryOddWholeSecondFloor15 + 1))); - - RoundDateExpression evenWholeMinuteExp = - getFloorMsExpression("2022-11-11 11:12:0", TimeUnit.MINUTE, 1); - java.sql.Timestamp evenWholeMinute = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:0").getTime()); - long lowerBoundaryEvenWholeMinute = evenWholeMinute.getTime(); - long upperBoundaryEvenWholeMinute = evenWholeMinute.getTime() + MIN - 1; - assertEquals(lowerBoundaryEvenWholeMinute, - evenWholeMinuteExp.rangeLower(evenWholeMinute.getTime())); - assertEquals(upperBoundaryEvenWholeMinute, - evenWholeMinuteExp.rangeUpper(evenWholeMinute.getTime())); - assertEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute))); - assertNotEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute - 1))); - assertEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute))); - assertNotEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute + 1))); - - RoundDateExpression evenWholeMinuteFloor20Exp = - getFloorMsExpression("2022-11-11 11:00:0", TimeUnit.MINUTE, 20); - java.sql.Timestamp evenWholeMinuteFloor20 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:00:0").getTime()); - long lowerBoundaryEvenWholeMinuteFloor20 = evenWholeMinuteFloor20.getTime(); - long upperBoundaryEvenWholeMinuteFloor20 = evenWholeMinuteFloor20.getTime() + 20 * MIN - 1; - assertEquals(lowerBoundaryEvenWholeMinuteFloor20, - evenWholeMinuteFloor20Exp.rangeLower(evenWholeMinute.getTime())); - assertEquals(upperBoundaryEvenWholeMinuteFloor20, - evenWholeMinuteFloor20Exp.rangeUpper(evenWholeMinute.getTime())); - assertEquals(evenWholeMinuteFloor20, new java.sql.Timestamp( - evenWholeMinuteFloor20Exp.roundTime(lowerBoundaryEvenWholeMinuteFloor20))); - assertNotEquals(evenWholeMinuteFloor20, new java.sql.Timestamp( - evenWholeMinuteFloor20Exp.roundTime(lowerBoundaryEvenWholeMinuteFloor20 - 1))); - assertEquals(evenWholeMinuteFloor20, new java.sql.Timestamp( - evenWholeMinuteFloor20Exp.roundTime(upperBoundaryEvenWholeMinuteFloor20))); - assertNotEquals(evenWholeMinuteFloor20, new java.sql.Timestamp( - evenWholeMinuteFloor20Exp.roundTime(upperBoundaryEvenWholeMinuteFloor20 + 1))); - - // Minutes since epoch, don't expect the rounded value to be "round" - RoundDateExpression evenWholeMinuteFloor17Exp = - getFloorMsExpression("2022-11-11 11:12:00", TimeUnit.MINUTE, 17); - java.sql.Timestamp evenWholeMinuteFloor17 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:00").getTime()); - long lowerBoundaryEvenWholeMinute17 = evenWholeMinuteFloor17.getTime(); - long upperBoundaryEvenWholeMinute17 = evenWholeMinuteFloor17.getTime() + 17 * MIN - 1; - assertEquals(lowerBoundaryEvenWholeMinute17, - evenWholeMinuteFloor17Exp.rangeLower(evenWholeMinute.getTime())); - assertEquals(upperBoundaryEvenWholeMinute17, - evenWholeMinuteFloor17Exp.rangeUpper(evenWholeMinute.getTime())); - assertEquals(evenWholeMinuteFloor17, new java.sql.Timestamp( - evenWholeMinuteFloor17Exp.roundTime(lowerBoundaryEvenWholeMinute17))); - assertNotEquals(evenWholeMinuteFloor17, new java.sql.Timestamp( - evenWholeMinuteFloor17Exp.roundTime(lowerBoundaryEvenWholeMinute17 - 1))); - assertEquals(evenWholeMinuteFloor17, new java.sql.Timestamp( - evenWholeMinuteFloor17Exp.roundTime(upperBoundaryEvenWholeMinute17))); - assertNotEquals(evenWholeMinuteFloor17, new java.sql.Timestamp( - evenWholeMinuteFloor17Exp.roundTime(upperBoundaryEvenWholeMinute17 + 1))); - - RoundDateExpression oddWholeHourExp = - getFloorMsExpression("2022-11-11 11:0:0", TimeUnit.HOUR, 1); - java.sql.Timestamp oddWholeHour = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:0:0").getTime()); - long lowerBoundaryOddWholeHour = oddWholeHour.getTime(); - long upperBoundaryOddWholeHour = oddWholeHour.getTime() + HOUR - 1; - assertEquals(lowerBoundaryOddWholeHour, oddWholeHourExp.rangeLower(oddWholeHour.getTime())); - assertEquals(upperBoundaryOddWholeHour, oddWholeHourExp.rangeUpper(oddWholeHour.getTime())); - assertEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour))); - assertNotEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour - 1))); - assertEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour))); - assertNotEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour + 1))); - - // Not rounding to hourOfDay - RoundDateExpression oddWholeHour10Exp = - getFloorMsExpression("2022-11-11 02:0:0", TimeUnit.HOUR, 10); - java.sql.Timestamp oddWholeHour10 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 02:0:0").getTime()); - long lowerBoundaryOddWholeHour10 = oddWholeHour10.getTime(); - long upperBoundaryOddWholeHour10 = oddWholeHour10.getTime() + HOUR * 10 - 1; - assertEquals(lowerBoundaryOddWholeHour10, - oddWholeHour10Exp.rangeLower(oddWholeHour.getTime())); - assertEquals(upperBoundaryOddWholeHour10, - oddWholeHour10Exp.rangeUpper(oddWholeHour.getTime())); - assertEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10))); - assertNotEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10 - 1))); - assertEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10))); - assertNotEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10 + 1))); - - // Not rounding to hourOfDay - RoundDateExpression oddWholeHour11Exp = - getFloorMsExpression("2022-11-11 07:0:0", TimeUnit.HOUR, 11); - java.sql.Timestamp oddWholeHour11 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 07:0:0").getTime()); - long lowerBoundaryOddWholeHour11 = oddWholeHour11.getTime(); - long upperBoundaryOddWholeHour11 = oddWholeHour11.getTime() + HOUR * 11 - 1; - assertEquals(lowerBoundaryOddWholeHour11, - oddWholeHour11Exp.rangeLower(oddWholeHour.getTime())); - assertEquals(upperBoundaryOddWholeHour11, - oddWholeHour11Exp.rangeUpper(oddWholeHour.getTime())); - assertEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11))); - assertNotEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11 - 1))); - assertEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11))); - assertNotEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11 + 1))); - - // No DST switchover - RoundDateExpression evenWholeDayExp = - getFloorMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 1); - java.sql.Timestamp evenWholeDay = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); - long lowerBoundaryEvenWholeDay = evenWholeDay.getTime(); - long upperBoundaryEvenWholeDay = evenWholeDay.getTime() + DAY - 1; - assertEquals(lowerBoundaryEvenWholeDay, evenWholeDayExp.rangeLower(evenWholeDay.getTime())); - assertEquals(upperBoundaryEvenWholeDay, evenWholeDayExp.rangeUpper(evenWholeDay.getTime())); - assertEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay))); - assertNotEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay - 1))); - assertEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay))); - assertNotEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay + 1))); - - RoundDateExpression evenWholeDay2Exp = - getFloorMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 2); - java.sql.Timestamp evenWholeDay2 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); - long lowerBoundaryEvenWholeDay2 = evenWholeDay2.getTime(); - long upperBoundaryEvenWholeDay2 = evenWholeDay2.getTime() + 2 * DAY - 1; - assertEquals(lowerBoundaryEvenWholeDay2, - evenWholeDay2Exp.rangeLower(evenWholeDay.getTime())); - assertEquals(upperBoundaryEvenWholeDay2, - evenWholeDay2Exp.rangeUpper(evenWholeDay.getTime())); - assertEquals(evenWholeDay2, - new java.sql.Timestamp(evenWholeDay2Exp.roundTime(lowerBoundaryEvenWholeDay2))); - assertNotEquals(evenWholeDay2, - new java.sql.Timestamp(evenWholeDay2Exp.roundTime(lowerBoundaryEvenWholeDay2 - 1))); - assertEquals(evenWholeDay2, - new java.sql.Timestamp(evenWholeDay2Exp.roundTime(upperBoundaryEvenWholeDay2))); - assertNotEquals(evenWholeDay2, - new java.sql.Timestamp(evenWholeDay2Exp.roundTime(upperBoundaryEvenWholeDay2 + 1))); - - RoundDateExpression evenWholeDay3Exp = - getFloorMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 3); - java.sql.Timestamp evenWholeDay3 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); - long lowerBoundaryEvenWholeDay3 = evenWholeDay3.getTime(); - long upperBoundaryEvenWholeDay3 = evenWholeDay3.getTime() + 3 * DAY - 1; - assertEquals(lowerBoundaryEvenWholeDay3, - evenWholeDay3Exp.rangeLower(evenWholeDay.getTime())); - assertEquals(upperBoundaryEvenWholeDay3, - evenWholeDay3Exp.rangeUpper(evenWholeDay.getTime())); - assertEquals(evenWholeDay3, - new java.sql.Timestamp(evenWholeDay3Exp.roundTime(lowerBoundaryEvenWholeDay3))); - assertNotEquals(evenWholeDay3, - new java.sql.Timestamp(evenWholeDay3Exp.roundTime(lowerBoundaryEvenWholeDay3 - 1))); - assertEquals(evenWholeDay3, - new java.sql.Timestamp(evenWholeDay3Exp.roundTime(upperBoundaryEvenWholeDay3))); - assertNotEquals(evenWholeDay3, - new java.sql.Timestamp(evenWholeDay3Exp.roundTime(upperBoundaryEvenWholeDay3 + 1))); - - FloorWeekExpression floorWeekExpression = new FloorWeekExpression(); - java.sql.Timestamp wholeWeekOdd = - new java.sql.Timestamp(DateUtil.parseDate("2022-10-10 0:0:0").getTime()); - long lowerBoundaryWholeWeekOdd = wholeWeekOdd.getTime(); - long upperBoundaryWholeWeekOdd = wholeWeekOdd.getTime() + WEEK - 1; - assertEquals(lowerBoundaryWholeWeekOdd, - floorWeekExpression.rangeLower(wholeWeekOdd.getTime())); - assertEquals(upperBoundaryWholeWeekOdd, - floorWeekExpression.rangeUpper(wholeWeekOdd.getTime())); - assertEquals(wholeWeekOdd, - new java.sql.Timestamp(floorWeekExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeWeekOdd, - new java.sql.Timestamp(floorWeekExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeWeekOdd - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeWeekOdd, - new java.sql.Timestamp(floorWeekExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeWeekOdd, - new java.sql.Timestamp(floorWeekExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeWeekOdd + 1, GJChronology.getInstanceUTC())))); - - FloorMonthExpression floorMonthExpression = new FloorMonthExpression(); - java.sql.Timestamp wholeMonthOdd = - new java.sql.Timestamp(DateUtil.parseDate("2022-07-1 0:0:0").getTime()); - long lowerBoundaryWholeMonthOdd = wholeMonthOdd.getTime(); - // July is 31 days - long upperBoundaryWholeMonthOdd = wholeMonthOdd.getTime() + 31 * DAY - 1; - assertEquals(lowerBoundaryWholeMonthOdd, - floorMonthExpression.rangeLower(wholeMonthOdd.getTime())); - assertEquals(upperBoundaryWholeMonthOdd, - floorMonthExpression.rangeUpper(wholeMonthOdd.getTime())); - assertEquals(wholeMonthOdd, - new java.sql.Timestamp(floorMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthOdd, - new java.sql.Timestamp(floorMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthOdd - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeMonthOdd, - new java.sql.Timestamp(floorMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthOdd, - new java.sql.Timestamp(floorMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthOdd + 1, GJChronology.getInstanceUTC())))); - - java.sql.Timestamp wholeMonthLeap = - new java.sql.Timestamp(DateUtil.parseDate("2024-02-1 0:0:0").getTime()); - long lowerBoundaryWholeMonthLeap = wholeMonthLeap.getTime(); - // February is 29 days - long upperBoundaryWholeMonthLeap = wholeMonthLeap.getTime() + 29 * DAY - 1; - assertEquals(lowerBoundaryWholeMonthLeap, - floorMonthExpression.rangeLower(wholeMonthLeap.getTime())); - assertEquals(upperBoundaryWholeMonthLeap, - floorMonthExpression.rangeUpper(wholeMonthLeap.getTime())); - assertEquals(wholeMonthLeap, - new java.sql.Timestamp(floorMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthLeap, - new java.sql.Timestamp(floorMonthExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthLeap - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeMonthLeap, - new java.sql.Timestamp(floorMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthLeap, - new java.sql.Timestamp(floorMonthExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthLeap + 1, GJChronology.getInstanceUTC())))); - - FloorYearExpression floorYearExpression = new FloorYearExpression(); - java.sql.Timestamp wholeYearEven = - new java.sql.Timestamp(DateUtil.parseDate("2022-1-1 0:0:0").getTime()); - long lowerBoundaryWholeYearEven = wholeYearEven.getTime(); - long upperBoundaryWholeYearEven = wholeYearEven.getTime() + YEAR - 1; - assertEquals(lowerBoundaryWholeYearEven, - floorYearExpression.rangeLower(wholeYearEven.getTime())); - assertEquals(upperBoundaryWholeYearEven, - floorYearExpression.rangeUpper(wholeYearEven.getTime())); - assertEquals(wholeYearEven, - new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearEven, - new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearEven - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeYearEven, - new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearEven, - new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearEven + 1, GJChronology.getInstanceUTC())))); - - java.sql.Timestamp wholeYearLeapEven = - new java.sql.Timestamp(DateUtil.parseDate("2024-1-1 0:0:0").getTime()); - long lowerBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime(); - long upperBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime() + YEAR + DAY - 1; - assertEquals(lowerBoundaryWholeYearLeapEven, - floorYearExpression.rangeLower(wholeYearLeapEven.getTime())); - assertEquals(upperBoundaryWholeYearLeapEven, - floorYearExpression.rangeUpper(wholeYearLeapEven.getTime())); - assertEquals(wholeYearLeapEven, - new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearLeapEven, - new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearLeapEven - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeYearLeapEven, - new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearLeapEven, - new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearLeapEven + 1, GJChronology.getInstanceUTC())))); + } + + /** + * Produces the previous Decimal key relative to the given key. The new key will differ from the + * old key in as small a unit as possible while still maintaining accurate serialization. + * @param key bytes for the old Decimal key + * @return bytes for the new Decimal key, a single unit previous to the old one + */ + private static byte[] prevDecimalKey(byte[] key) { + BigDecimal decimal = (BigDecimal) PDecimal.INSTANCE.toObject(key); + BigDecimal prev = decimal.subtract(getSmallestUnit(decimal)); + return PDecimal.INSTANCE.toBytes(prev); + } + + /** + * Produces the next Decimal key relative to the given key. The new key will differ from the old + * key in as small a unit as possible while still maintaining accurate serialization. + * @param key bytes for the old Decimal key + * @return bytes for the new Decimal key, a single unit next from the old one + */ + private static byte[] nextDecimalKey(byte[] key) { + BigDecimal decimal = (BigDecimal) PDecimal.INSTANCE.toObject(key); + BigDecimal next = decimal.add(getSmallestUnit(decimal)); + return PDecimal.INSTANCE.toBytes(next); + } + + /** + * Produces the smallest unit of difference possible for the given decimal that will still be + * serialized accurately. For example, if the MAXIMUM_RELIABLE_PRECISION were 4, then + * getSmallestUnit(2.3) would produce 0.001, as 2.301 could be serialized accurately but 2.3001 + * could not. + * @param decimal the decimal to find the smallest unit in relation to + * @return the smallest BigDecimal unit possible to add to decimal while still maintaining + * accurate serialization + * @throws IllegalArgumentException if decimal requires more than the maximum reliable precision + */ + private static BigDecimal getSmallestUnit(BigDecimal decimal) { + if (decimal.precision() > PDataType.MAX_PRECISION) { + throw new IllegalArgumentException( + "rounding errors mean that we cannot reliably test " + decimal); } + int minScale = decimal.scale() + (PDataType.MAX_PRECISION - decimal.precision()); + return BigDecimal.valueOf(1, minScale); + } + + // Date Expression Tests + + @Test + public void testRoundDateExpression() throws Exception { + LiteralExpression dateLiteral = + LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); + Expression roundDateExpression = RoundDateExpression.create(dateLiteral, TimeUnit.DAY); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + roundDateExpression.evaluate(null, ptr); + Object result = roundDateExpression.getDataType().toObject(ptr); + + assertTrue(result instanceof Date); + Date resultDate = (Date) result; + assertEquals(DateUtil.parseDate("2012-01-02 00:00:00"), resultDate); + } + + @Test + public void testRoundDateExpressionWithMultiplier() throws Exception { + Expression dateLiteral = + LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); + Expression roundDateExpression = RoundDateExpression.create(dateLiteral, TimeUnit.MINUTE, 10); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + roundDateExpression.evaluate(null, ptr); + Object result = roundDateExpression.getDataType().toObject(ptr); + + assertTrue(result instanceof Date); + Date resultDate = (Date) result; + assertEquals(DateUtil.parseDate("2012-01-01 14:30:00"), resultDate); + } + + @Test + public void testFloorDateExpression() throws Exception { + LiteralExpression dateLiteral = + LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); + Expression floorDateExpression = FloorDateExpression.create(dateLiteral, TimeUnit.DAY); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + floorDateExpression.evaluate(null, ptr); + Object result = floorDateExpression.getDataType().toObject(ptr); + + assertTrue(result instanceof Date); + Date resultDate = (Date) result; + assertEquals(DateUtil.parseDate("2012-01-01 00:00:00"), resultDate); + } + + @Test + public void testFloorDateExpressionWithMultiplier() throws Exception { + Expression dateLiteral = + LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); + Expression floorDateExpression = FloorDateExpression.create(dateLiteral, TimeUnit.SECOND, 10); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + floorDateExpression.evaluate(null, ptr); + Object result = floorDateExpression.getDataType().toObject(ptr); + + assertTrue(result instanceof Date); + Date resultDate = (Date) result; + assertEquals(DateUtil.parseDate("2012-01-01 14:25:20"), resultDate); + } + + @Test + public void testCeilDateExpression() throws Exception { + LiteralExpression dateLiteral = + LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); + Expression ceilDateExpression = CeilDateExpression.create(dateLiteral, TimeUnit.DAY); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ceilDateExpression.evaluate(null, ptr); + Object result = ceilDateExpression.getDataType().toObject(ptr); + + assertTrue(result instanceof Date); + Date resultDate = (Date) result; + assertEquals(DateUtil.parseDate("2012-01-02 00:00:00"), resultDate); + } + + @Test + public void testCeilDateExpressionWithMultiplier() throws Exception { + Expression dateLiteral = + LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); + Expression ceilDateExpression = CeilDateExpression.create(dateLiteral, TimeUnit.SECOND, 10); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ceilDateExpression.evaluate(null, ptr); + Object result = ceilDateExpression.getDataType().toObject(ptr); + + assertTrue(result instanceof Date); + Date resultDate = (Date) result; + assertEquals(DateUtil.parseDate("2012-01-01 14:25:30"), resultDate); + } + + /** + * Tests {@link RoundDateExpression} constructor check which only allows number of arguments + * between 2 and 3. + */ + @Test + public void testRoundDateExpressionValidation_1() throws Exception { + LiteralExpression dateLiteral = + LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); + + List childExpressions = new ArrayList(1); + childExpressions.add(dateLiteral); + + try { + RoundDateExpression.create(childExpressions); + fail("Instantiating a RoundDateExpression with only one argument should have failed."); + } catch (IllegalArgumentException e) { - private CeilDateExpression getCeilMsExpression(String s, TimeUnit u, int m) - throws SQLException { - return (CeilDateExpression) CeilDateExpression.create(LiteralExpression.newConstant(s), u, - m); } + } + + /** + * Tests {@link RoundDateExpression} constructor for a valid value of time unit. + */ + @Test + public void testRoundDateExpressionValidation_2() throws Exception { + LiteralExpression dateLiteral = + LiteralExpression.newConstant(DateUtil.parseDate("2012-01-01 14:25:28"), PDate.INSTANCE); + LiteralExpression timeUnitLiteral = LiteralExpression.newConstant("millis", PVarchar.INSTANCE); + + List childExpressions = new ArrayList(1); + childExpressions.add(dateLiteral); + childExpressions.add(timeUnitLiteral); + + try { + RoundDateExpression.create(childExpressions); + fail("Only a valid time unit represented by TimeUnit enum is allowed and millis is invalid."); + } catch (IllegalArgumentException e) { - @Test - public void testCeilGMT() throws SQLException { - - // No need to repeat odd / even cases - // The logic for upper and lower scan ranges is always - // [floor(ts-1)+1, ceil(ts)] - - RoundDateExpression oddWholeSecondExp = - getCeilMsExpression("2022-11-11 11:11:11", TimeUnit.SECOND, 1); - java.sql.Timestamp oddWholeSecond = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:11").getTime()); - long lowerBoundaryOddWholeSecond = oddWholeSecond.getTime() - SEC + 1; - long upperBoundaryOddWholeSecond = oddWholeSecond.getTime(); - assertEquals(lowerBoundaryOddWholeSecond, - oddWholeSecondExp.rangeLower(oddWholeSecond.getTime())); - assertEquals(upperBoundaryOddWholeSecond, - oddWholeSecondExp.rangeUpper(oddWholeSecond.getTime())); - assertEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond))); - assertNotEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond - 1))); - assertEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond))); - assertNotEquals(oddWholeSecond, - new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond + 1))); - - // 10 sec range - RoundDateExpression oddWholeSecondCeil10Exp = - getCeilMsExpression("2022-11-11 11:11:20", TimeUnit.SECOND, 10); - java.sql.Timestamp oddWholeSecondCeil10 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:20").getTime()); - long lowerBoundaryOddWholeSecondCeil10 = oddWholeSecondCeil10.getTime() - 10 * SEC + 1; - long upperBoundaryOddWholeSecondCeil10 = oddWholeSecondCeil10.getTime(); - assertEquals(lowerBoundaryOddWholeSecondCeil10, - oddWholeSecondCeil10Exp.rangeLower(oddWholeSecond.getTime())); - assertEquals(upperBoundaryOddWholeSecondCeil10, - oddWholeSecondCeil10Exp.rangeUpper(oddWholeSecond.getTime())); - assertEquals(oddWholeSecondCeil10, new java.sql.Timestamp( - oddWholeSecondCeil10Exp.roundTime(lowerBoundaryOddWholeSecondCeil10))); - assertNotEquals(oddWholeSecondCeil10, new java.sql.Timestamp( - oddWholeSecondCeil10Exp.roundTime(lowerBoundaryOddWholeSecondCeil10 - 1))); - assertEquals(oddWholeSecondCeil10, new java.sql.Timestamp( - oddWholeSecondCeil10Exp.roundTime(upperBoundaryOddWholeSecondCeil10))); - assertNotEquals(oddWholeSecondCeil10, new java.sql.Timestamp( - oddWholeSecondCeil10Exp.roundTime(upperBoundaryOddWholeSecondCeil10 + 1))); - - // 15 sec range - RoundDateExpression oddWholeSecondCeil15Exp = - getCeilMsExpression("2022-11-11 11:11:15", TimeUnit.SECOND, 15); - java.sql.Timestamp oddWholeSecondCeil15 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:15").getTime()); - long lowerBoundaryOddWholeSecondFloor15 = oddWholeSecondCeil15.getTime() - 15 * SEC + 1; - long upperBoundaryOddWholeSecondFloor15 = oddWholeSecondCeil15.getTime(); - assertEquals(lowerBoundaryOddWholeSecondFloor15, - oddWholeSecondCeil15Exp.rangeLower(oddWholeSecond.getTime())); - assertEquals(upperBoundaryOddWholeSecondFloor15, - oddWholeSecondCeil15Exp.rangeUpper(oddWholeSecond.getTime())); - assertEquals(oddWholeSecondCeil15, new java.sql.Timestamp( - oddWholeSecondCeil15Exp.roundTime(lowerBoundaryOddWholeSecondFloor15))); - assertNotEquals(oddWholeSecondCeil15, new java.sql.Timestamp( - oddWholeSecondCeil15Exp.roundTime(lowerBoundaryOddWholeSecondFloor15 - 1))); - assertEquals(oddWholeSecondCeil15, new java.sql.Timestamp( - oddWholeSecondCeil15Exp.roundTime(upperBoundaryOddWholeSecondFloor15))); - assertNotEquals(oddWholeSecondCeil15, new java.sql.Timestamp( - oddWholeSecondCeil15Exp.roundTime(upperBoundaryOddWholeSecondFloor15 + 1))); - - RoundDateExpression evenWholeMinuteExp = - getCeilMsExpression("2022-11-11 11:12:0", TimeUnit.MINUTE, 1); - java.sql.Timestamp evenWholeMinute = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:0").getTime()); - long lowerBoundaryEvenWholeMinute = evenWholeMinute.getTime() - MIN + 1; - long upperBoundaryEvenWholeMinute = evenWholeMinute.getTime(); - assertEquals(lowerBoundaryEvenWholeMinute, - evenWholeMinuteExp.rangeLower(evenWholeMinute.getTime())); - assertEquals(upperBoundaryEvenWholeMinute, - evenWholeMinuteExp.rangeUpper(evenWholeMinute.getTime())); - assertEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute))); - assertNotEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute - 1))); - assertEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute))); - assertNotEquals(evenWholeMinute, - new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute + 1))); - - RoundDateExpression evenWholeMinuteCeil20Exp = - getCeilMsExpression("2022-11-11 11:20:0", TimeUnit.MINUTE, 20); - java.sql.Timestamp evenWholeMinuteCeil20 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:20:0").getTime()); - long lowerBoundaryEvenWholeMinuteCeil20 = evenWholeMinuteCeil20.getTime() - 20 * MIN + 1; - long upperBoundaryEvenWholeMinuteCeil20 = evenWholeMinuteCeil20.getTime(); - assertEquals(lowerBoundaryEvenWholeMinuteCeil20, - evenWholeMinuteCeil20Exp.rangeLower(evenWholeMinute.getTime())); - assertEquals(upperBoundaryEvenWholeMinuteCeil20, - evenWholeMinuteCeil20Exp.rangeUpper(evenWholeMinute.getTime())); - assertEquals(evenWholeMinuteCeil20, new java.sql.Timestamp( - evenWholeMinuteCeil20Exp.roundTime(lowerBoundaryEvenWholeMinuteCeil20))); - assertNotEquals(evenWholeMinuteCeil20, new java.sql.Timestamp( - evenWholeMinuteCeil20Exp.roundTime(lowerBoundaryEvenWholeMinuteCeil20 - 1))); - assertEquals(evenWholeMinuteCeil20, new java.sql.Timestamp( - evenWholeMinuteCeil20Exp.roundTime(upperBoundaryEvenWholeMinuteCeil20))); - assertNotEquals(evenWholeMinuteCeil20, new java.sql.Timestamp( - evenWholeMinuteCeil20Exp.roundTime(upperBoundaryEvenWholeMinuteCeil20 + 1))); - - // Minutes since epoch, don't expect the rounded value to be "round" - RoundDateExpression evenWholeMinuteCeil17Exp = - getCeilMsExpression("2022-11-11 11:12:00", TimeUnit.MINUTE, 17); - java.sql.Timestamp evenWholeMinuteCeil17 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:00").getTime()); - long lowerBoundaryEvenWholeMinute17 = evenWholeMinuteCeil17.getTime() - 17 * MIN + 1; - long upperBoundaryEvenWholeMinute17 = evenWholeMinuteCeil17.getTime(); - assertEquals(lowerBoundaryEvenWholeMinute17, - evenWholeMinuteCeil17Exp.rangeLower(evenWholeMinute.getTime())); - assertEquals(upperBoundaryEvenWholeMinute17, - evenWholeMinuteCeil17Exp.rangeUpper(evenWholeMinute.getTime())); - assertEquals(evenWholeMinuteCeil17, new java.sql.Timestamp( - evenWholeMinuteCeil17Exp.roundTime(lowerBoundaryEvenWholeMinute17))); - assertNotEquals(evenWholeMinuteCeil17, new java.sql.Timestamp( - evenWholeMinuteCeil17Exp.roundTime(lowerBoundaryEvenWholeMinute17 - 1))); - assertEquals(evenWholeMinuteCeil17, new java.sql.Timestamp( - evenWholeMinuteCeil17Exp.roundTime(upperBoundaryEvenWholeMinute17))); - assertNotEquals(evenWholeMinuteCeil17, new java.sql.Timestamp( - evenWholeMinuteCeil17Exp.roundTime(upperBoundaryEvenWholeMinute17 + 1))); - - RoundDateExpression oddWholeHourExp = - getCeilMsExpression("2022-11-11 11:0:0", TimeUnit.HOUR, 1); - java.sql.Timestamp oddWholeHour = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:0:0").getTime()); - long lowerBoundaryOddWholeHour = oddWholeHour.getTime() - HOUR + 1; - long upperBoundaryOddWholeHour = oddWholeHour.getTime(); - assertEquals(lowerBoundaryOddWholeHour, - oddWholeHourExp.rangeLower(oddWholeHour.getTime() - 1)); - assertEquals(upperBoundaryOddWholeHour, oddWholeHourExp.rangeUpper(oddWholeHour.getTime())); - assertEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour))); - assertNotEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour - 1))); - assertEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour))); - assertNotEquals(oddWholeHour, - new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour + 1))); - - // Not rounding to hourOfDay - RoundDateExpression oddWholeHour10Exp = - getCeilMsExpression("2022-11-11 12:0:0", TimeUnit.HOUR, 10); - java.sql.Timestamp oddWholeHour10 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 12:0:0").getTime()); - long lowerBoundaryOddWholeHour10 = oddWholeHour10.getTime() - 10 * HOUR + 1; - long upperBoundaryOddWholeHour10 = oddWholeHour10.getTime(); - assertEquals(lowerBoundaryOddWholeHour10, - oddWholeHour10Exp.rangeLower(oddWholeHour.getTime())); - assertEquals(upperBoundaryOddWholeHour10, - oddWholeHour10Exp.rangeUpper(oddWholeHour.getTime())); - assertEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10))); - assertNotEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10 - 1))); - assertEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10))); - assertNotEquals(oddWholeHour10, - new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10 + 1))); - - // Not rounding to hourOfDay - RoundDateExpression oddWholeHour11Exp = - getCeilMsExpression("2022-11-11 12:0:0", TimeUnit.HOUR, 11); - java.sql.Timestamp oddWholeHour11 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 18:0:0").getTime()); - long lowerBoundaryOddWholeHour11 = oddWholeHour11.getTime() - 11 * HOUR + 1; - long upperBoundaryOddWholeHour11 = oddWholeHour11.getTime(); - assertEquals(lowerBoundaryOddWholeHour11, - oddWholeHour11Exp.rangeLower(oddWholeHour.getTime())); - assertEquals(upperBoundaryOddWholeHour11, - oddWholeHour11Exp.rangeUpper(oddWholeHour.getTime())); - assertEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11))); - assertNotEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11 - 1))); - assertEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11))); - assertNotEquals(oddWholeHour11, - new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11 + 1))); - - RoundDateExpression evenWholeDayExp = - getCeilMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 1); - java.sql.Timestamp evenWholeDay = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); - long lowerBoundaryEvenWholeDay = evenWholeDay.getTime() - DAY + 1; - long upperBoundaryEvenWholeDay = evenWholeDay.getTime(); - assertEquals(lowerBoundaryEvenWholeDay, evenWholeDayExp.rangeLower(evenWholeDay.getTime())); - assertEquals(upperBoundaryEvenWholeDay, evenWholeDayExp.rangeUpper(evenWholeDay.getTime())); - assertEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay))); - assertNotEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay - 1))); - assertEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay))); - assertNotEquals(evenWholeDay, - new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay + 1))); - - RoundDateExpression evenWholeDay2Exp = - getCeilMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 2); - java.sql.Timestamp evenWholeDay2 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); - long lowerBoundaryEvenWholeDay2 = evenWholeDay2.getTime() - 2 * DAY + 1; - long upperBoundaryEvenWholeDay2 = evenWholeDay2.getTime(); - assertEquals(lowerBoundaryEvenWholeDay2, - evenWholeDay2Exp.rangeLower(evenWholeDay.getTime())); - assertEquals(upperBoundaryEvenWholeDay2, - evenWholeDay2Exp.rangeUpper(evenWholeDay.getTime())); - assertEquals(evenWholeDay2, - new java.sql.Timestamp(evenWholeDay2Exp.roundTime(lowerBoundaryEvenWholeDay2))); - assertNotEquals(evenWholeDay2, - new java.sql.Timestamp(evenWholeDay2Exp.roundTime(lowerBoundaryEvenWholeDay2 - 1))); - assertEquals(evenWholeDay2, - new java.sql.Timestamp(evenWholeDay2Exp.roundTime(upperBoundaryEvenWholeDay2))); - assertNotEquals(evenWholeDay2, - new java.sql.Timestamp(evenWholeDay2Exp.roundTime(upperBoundaryEvenWholeDay2 + 1))); - - RoundDateExpression evenWholeDay3Exp = - getCeilMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 3); - java.sql.Timestamp evenWholeDay3 = - new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); - long lowerBoundaryEvenWholeDay3 = evenWholeDay3.getTime() - 3 * DAY + 1; - long upperBoundaryEvenWholeDay3 = evenWholeDay3.getTime(); - assertEquals(lowerBoundaryEvenWholeDay3, - evenWholeDay3Exp.rangeLower(evenWholeDay.getTime())); - assertEquals(upperBoundaryEvenWholeDay3, - evenWholeDay3Exp.rangeUpper(evenWholeDay.getTime())); - assertEquals(evenWholeDay3, - new java.sql.Timestamp(evenWholeDay3Exp.roundTime(lowerBoundaryEvenWholeDay3))); - assertNotEquals(evenWholeDay3, - new java.sql.Timestamp(evenWholeDay3Exp.roundTime(lowerBoundaryEvenWholeDay3 - 1))); - assertEquals(evenWholeDay3, - new java.sql.Timestamp(evenWholeDay3Exp.roundTime(upperBoundaryEvenWholeDay3))); - assertNotEquals(evenWholeDay3, - new java.sql.Timestamp(evenWholeDay3Exp.roundTime(upperBoundaryEvenWholeDay3 + 1))); - - CeilWeekExpression ceilWeekExp = new CeilWeekExpression(); - java.sql.Timestamp wholeWeekOdd = - new java.sql.Timestamp(DateUtil.parseDate("2022-10-10 0:0:0").getTime()); - long lowerBoundaryWholeWeekOdd = wholeWeekOdd.getTime() - WEEK + 1; - long upperBoundaryWholeWeekOdd = wholeWeekOdd.getTime(); - assertEquals(lowerBoundaryWholeWeekOdd, ceilWeekExp.rangeLower(wholeWeekOdd.getTime())); - assertEquals(upperBoundaryWholeWeekOdd, ceilWeekExp.rangeUpper(wholeWeekOdd.getTime())); - assertEquals(wholeWeekOdd, new java.sql.Timestamp( - ceilWeekExp.roundDateTime(new org.joda.time.DateTime(lowerBoundaryWholeWeekOdd, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeWeekOdd, new java.sql.Timestamp( - ceilWeekExp.roundDateTime(new org.joda.time.DateTime(lowerBoundaryWholeWeekOdd - 1, - GJChronology.getInstanceUTC())))); - assertEquals(wholeWeekOdd, new java.sql.Timestamp( - ceilWeekExp.roundDateTime(new org.joda.time.DateTime(upperBoundaryWholeWeekOdd, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeWeekOdd, new java.sql.Timestamp( - ceilWeekExp.roundDateTime(new org.joda.time.DateTime(upperBoundaryWholeWeekOdd + 1, - GJChronology.getInstanceUTC())))); - - CeilMonthExpression ceilMonthExp = new CeilMonthExpression(); - java.sql.Timestamp wholeMonthOdd = - new java.sql.Timestamp(DateUtil.parseDate("2022-08-1 0:0:0").getTime()); - // July is 31 days - long lowerBoundaryWholeMonthOdd = wholeMonthOdd.getTime() - 31 * DAY + 1; - long upperBoundaryWholeMonthOdd = wholeMonthOdd.getTime(); - assertEquals(lowerBoundaryWholeMonthOdd, ceilMonthExp.rangeLower(wholeMonthOdd.getTime())); - assertEquals(upperBoundaryWholeMonthOdd, ceilMonthExp.rangeUpper(wholeMonthOdd.getTime())); - assertEquals(wholeMonthOdd, new java.sql.Timestamp( - ceilMonthExp.roundDateTime(new org.joda.time.DateTime(lowerBoundaryWholeMonthOdd, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthOdd, - new java.sql.Timestamp(ceilMonthExp.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthOdd - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeMonthOdd, new java.sql.Timestamp( - ceilMonthExp.roundDateTime(new org.joda.time.DateTime(upperBoundaryWholeMonthOdd, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthOdd, - new java.sql.Timestamp(ceilMonthExp.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthOdd + 1, GJChronology.getInstanceUTC())))); - - java.sql.Timestamp wholeMonthLeap = - new java.sql.Timestamp(DateUtil.parseDate("2024-03-1 0:0:0").getTime()); - // February is 29 days - long lowerBoundaryWholeMonthLeap = wholeMonthLeap.getTime() - 29 * DAY + 1; - long upperBoundaryWholeMonthLeap = wholeMonthLeap.getTime(); - assertEquals(lowerBoundaryWholeMonthLeap, - ceilMonthExp.rangeLower(wholeMonthLeap.getTime())); - assertEquals(upperBoundaryWholeMonthLeap, - ceilMonthExp.rangeUpper(wholeMonthLeap.getTime())); - assertEquals(wholeMonthLeap, new java.sql.Timestamp( - ceilMonthExp.roundDateTime(new org.joda.time.DateTime(lowerBoundaryWholeMonthLeap, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthLeap, - new java.sql.Timestamp(ceilMonthExp.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeMonthLeap - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeMonthLeap, new java.sql.Timestamp( - ceilMonthExp.roundDateTime(new org.joda.time.DateTime(upperBoundaryWholeMonthLeap, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeMonthLeap, - new java.sql.Timestamp(ceilMonthExp.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeMonthLeap + 1, GJChronology.getInstanceUTC())))); - - CeilYearExpression ceilYearExp = new CeilYearExpression(); - java.sql.Timestamp wholeYearEven = - new java.sql.Timestamp(DateUtil.parseDate("2022-1-1 0:0:0").getTime()); - long lowerBoundaryWholeYearEven = wholeYearEven.getTime() - YEAR + 1; - long upperBoundaryWholeYearEven = wholeYearEven.getTime(); - assertEquals(lowerBoundaryWholeYearEven, ceilYearExp.rangeLower(wholeYearEven.getTime())); - assertEquals(upperBoundaryWholeYearEven, ceilYearExp.rangeUpper(wholeYearEven.getTime())); - assertEquals(wholeYearEven, new java.sql.Timestamp( - ceilYearExp.roundDateTime(new org.joda.time.DateTime(lowerBoundaryWholeYearEven, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearEven, new java.sql.Timestamp( - ceilYearExp.roundDateTime(new org.joda.time.DateTime(lowerBoundaryWholeYearEven - 1, - GJChronology.getInstanceUTC())))); - assertEquals(wholeYearEven, new java.sql.Timestamp( - ceilYearExp.roundDateTime(new org.joda.time.DateTime(upperBoundaryWholeYearEven, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearEven, new java.sql.Timestamp( - ceilYearExp.roundDateTime(new org.joda.time.DateTime(upperBoundaryWholeYearEven + 1, - GJChronology.getInstanceUTC())))); - - java.sql.Timestamp wholeYearLeapEven = - new java.sql.Timestamp(DateUtil.parseDate("2025-1-1 0:0:0").getTime()); - long lowerBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime() - (YEAR + DAY) + 1; - long upperBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime(); - assertEquals(lowerBoundaryWholeYearLeapEven, - ceilYearExp.rangeLower(wholeYearLeapEven.getTime())); - assertEquals(upperBoundaryWholeYearLeapEven, - ceilYearExp.rangeUpper(wholeYearLeapEven.getTime())); - assertEquals(wholeYearLeapEven, new java.sql.Timestamp( - ceilYearExp.roundDateTime(new org.joda.time.DateTime(lowerBoundaryWholeYearLeapEven, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearLeapEven, - new java.sql.Timestamp(ceilYearExp.roundDateTime(new org.joda.time.DateTime( - lowerBoundaryWholeYearLeapEven - 1, GJChronology.getInstanceUTC())))); - assertEquals(wholeYearLeapEven, new java.sql.Timestamp( - ceilYearExp.roundDateTime(new org.joda.time.DateTime(upperBoundaryWholeYearLeapEven, - GJChronology.getInstanceUTC())))); - assertNotEquals(wholeYearLeapEven, - new java.sql.Timestamp(ceilYearExp.roundDateTime(new org.joda.time.DateTime( - upperBoundaryWholeYearLeapEven + 1, GJChronology.getInstanceUTC())))); } + } + + @Test + public void testFloorDateExpressionForWeek() throws Exception { + Expression dateLiteral = + LiteralExpression.newConstant(DateUtil.parseDate("2016-01-07 08:17:28"), PDate.INSTANCE); + Expression floorDateExpression = FloorDateExpression.create(dateLiteral, TimeUnit.WEEK); + + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + floorDateExpression.evaluate(null, ptr); + Object result = floorDateExpression.getDataType().toObject(ptr); + + assertTrue(result instanceof Date); + Date resultDate = (Date) result; + assertEquals(DateUtil.parseDate("2016-01-04 00:00:00"), resultDate); + } + + private RoundDateExpression getRoundMsExpression(String s, TimeUnit u, int m) + throws SQLException { + return (RoundDateExpression) RoundDateExpression.create(LiteralExpression.newConstant(s), u, m); + } + + // The three tests below are backported from PHOENIX-5066. + // When PHOENIX-5066 lands, these can be removed as redundant. + + @Test + public void testRoundingGMT() throws SQLException { + // We operate on Instants for time units up to Days, simply counting millis + + RoundDateExpression oddWholeSecondExp = + getRoundMsExpression("2022-11-11 11:11:11", TimeUnit.SECOND, 1); + java.sql.Timestamp oddWholeSecond = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:11").getTime()); + long lowerBoundaryOddWholeSecond = oddWholeSecond.getTime() - HALF_SEC; + long upperBoundaryOddWholeSecond = oddWholeSecond.getTime() + HALF_SEC - 1; + assertEquals(lowerBoundaryOddWholeSecond, + oddWholeSecondExp.rangeLower(oddWholeSecond.getTime())); + assertEquals(upperBoundaryOddWholeSecond, + oddWholeSecondExp.rangeUpper(oddWholeSecond.getTime())); + assertEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond))); + assertNotEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond - 1))); + assertEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond))); + assertNotEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond + 1))); + + // 10 sec range + RoundDateExpression oddWholeSecondRound10Exp = + getRoundMsExpression("2022-11-11 11:11:10", TimeUnit.SECOND, 10); + java.sql.Timestamp oddWholeSecondRound10 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:10").getTime()); + long lowerBoundaryOddWholeSecondRound10 = oddWholeSecondRound10.getTime() - 5 * SEC; + long upperBoundaryOddWholeSecondRound10 = oddWholeSecondRound10.getTime() + 5 * SEC - 1; + assertEquals(lowerBoundaryOddWholeSecondRound10, + oddWholeSecondRound10Exp.rangeLower(oddWholeSecond.getTime())); + assertEquals(upperBoundaryOddWholeSecondRound10, + oddWholeSecondRound10Exp.rangeUpper(oddWholeSecond.getTime())); + assertEquals(oddWholeSecondRound10, new java.sql.Timestamp( + oddWholeSecondRound10Exp.roundTime(lowerBoundaryOddWholeSecondRound10))); + assertNotEquals(oddWholeSecondRound10, new java.sql.Timestamp( + oddWholeSecondRound10Exp.roundTime(lowerBoundaryOddWholeSecondRound10 - 1))); + assertEquals(oddWholeSecondRound10, new java.sql.Timestamp( + oddWholeSecondRound10Exp.roundTime(upperBoundaryOddWholeSecondRound10))); + assertNotEquals(oddWholeSecondRound10, new java.sql.Timestamp( + oddWholeSecondRound10Exp.roundTime(upperBoundaryOddWholeSecondRound10 + 1))); + + // 15 sec range + RoundDateExpression oddWholeSecondRound15Exp = + getRoundMsExpression("2022-11-11 11:11:15", TimeUnit.SECOND, 15); + java.sql.Timestamp oddWholeSecondRound15 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:15").getTime()); + long lowerBoundaryOddWholeSecondRound15 = oddWholeSecondRound15.getTime() - 15 * HALF_SEC; + long upperBoundaryOddWholeSecondRound15 = oddWholeSecondRound15.getTime() + 15 * HALF_SEC - 1; + assertEquals(lowerBoundaryOddWholeSecondRound15, + oddWholeSecondRound15Exp.rangeLower(oddWholeSecond.getTime())); + assertEquals(upperBoundaryOddWholeSecondRound15, + oddWholeSecondRound15Exp.rangeUpper(oddWholeSecond.getTime())); + assertEquals(oddWholeSecondRound15, new java.sql.Timestamp( + oddWholeSecondRound15Exp.roundTime(lowerBoundaryOddWholeSecondRound15))); + assertNotEquals(oddWholeSecondRound15, new java.sql.Timestamp( + oddWholeSecondRound15Exp.roundTime(lowerBoundaryOddWholeSecondRound15 - 1))); + assertEquals(oddWholeSecondRound15, new java.sql.Timestamp( + oddWholeSecondRound15Exp.roundTime(upperBoundaryOddWholeSecondRound15))); + assertNotEquals(oddWholeSecondRound15, new java.sql.Timestamp( + oddWholeSecondRound15Exp.roundTime(upperBoundaryOddWholeSecondRound15 + 1))); + + RoundDateExpression evenWholeSecondExp = + getRoundMsExpression("2022-11-11 11:11:12", TimeUnit.SECOND, 1); + java.sql.Timestamp evenWholeSecond = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:12").getTime()); + long lowerBoundaryEvenWholeSecond = evenWholeSecond.getTime() - HALF_SEC; + long upperBoundaryEvenWholeSecond = evenWholeSecond.getTime() + HALF_SEC - 1; + assertEquals(lowerBoundaryEvenWholeSecond, + evenWholeSecondExp.rangeLower(evenWholeSecond.getTime())); + assertEquals(upperBoundaryEvenWholeSecond, + evenWholeSecondExp.rangeUpper(evenWholeSecond.getTime())); + assertEquals(evenWholeSecond, + new java.sql.Timestamp(evenWholeSecondExp.roundTime(lowerBoundaryEvenWholeSecond))); + assertNotEquals(evenWholeSecond, + new java.sql.Timestamp(evenWholeSecondExp.roundTime(lowerBoundaryEvenWholeSecond - 1))); + assertEquals(evenWholeSecond, + new java.sql.Timestamp(evenWholeSecondExp.roundTime(upperBoundaryEvenWholeSecond))); + assertNotEquals(evenWholeSecond, + new java.sql.Timestamp(evenWholeSecondExp.roundTime(upperBoundaryEvenWholeSecond + 1))); + + RoundDateExpression oddWholeMinuteExp = + getRoundMsExpression("2022-11-11 11:11:0", TimeUnit.MINUTE, 1); + java.sql.Timestamp oddWholeMinute = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:0").getTime()); + long lowerBoundaryOddWholeMinute = oddWholeMinute.getTime() - HALF_MIN; + long upperBoundaryOddWholeMinute = oddWholeMinute.getTime() + HALF_MIN - 1; + assertEquals(lowerBoundaryOddWholeMinute, + oddWholeMinuteExp.rangeLower(oddWholeMinute.getTime())); + assertEquals(upperBoundaryOddWholeMinute, + oddWholeMinuteExp.rangeUpper(oddWholeMinute.getTime())); + assertEquals(oddWholeMinute, + new java.sql.Timestamp(oddWholeMinuteExp.roundTime(lowerBoundaryOddWholeMinute))); + assertNotEquals(oddWholeMinute, + new java.sql.Timestamp(oddWholeMinuteExp.roundTime(lowerBoundaryOddWholeMinute - 1))); + assertEquals(oddWholeMinute, + new java.sql.Timestamp(oddWholeMinuteExp.roundTime(upperBoundaryOddWholeMinute))); + assertNotEquals(oddWholeMinute, + new java.sql.Timestamp(oddWholeMinuteExp.roundTime(upperBoundaryOddWholeMinute + 1))); + + RoundDateExpression oddWholeMinuteRound20Exp = + getRoundMsExpression("2022-11-11 11:20:0", TimeUnit.MINUTE, 20); + java.sql.Timestamp oddWholeMinuteRound20 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:20:0").getTime()); + long lowerBoundaryOddWholeMinute20 = oddWholeMinuteRound20.getTime() - 10 * MIN; + long upperBoundaryOddWholeMinute20 = oddWholeMinuteRound20.getTime() + 10 * MIN - 1; + assertEquals(lowerBoundaryOddWholeMinute20, + oddWholeMinuteRound20Exp.rangeLower(oddWholeMinute.getTime())); + assertEquals(upperBoundaryOddWholeMinute20, + oddWholeMinuteRound20Exp.rangeUpper(oddWholeMinute.getTime())); + assertEquals(oddWholeMinuteRound20, + new java.sql.Timestamp(oddWholeMinuteRound20Exp.roundTime(lowerBoundaryOddWholeMinute20))); + assertNotEquals(oddWholeMinuteRound20, new java.sql.Timestamp( + oddWholeMinuteRound20Exp.roundTime(lowerBoundaryOddWholeMinute20 - 1))); + assertEquals(oddWholeMinuteRound20, + new java.sql.Timestamp(oddWholeMinuteRound20Exp.roundTime(upperBoundaryOddWholeMinute20))); + assertNotEquals(oddWholeMinuteRound20, new java.sql.Timestamp( + oddWholeMinuteRound20Exp.roundTime(upperBoundaryOddWholeMinute20 + 1))); + + // Minutes since epoch, don't expect the rounded value to be "round" + + RoundDateExpression oddWholeMinuteRound17Exp = + getRoundMsExpression("2022-11-11 11:12:0", TimeUnit.MINUTE, 17); + java.sql.Timestamp oddWholeMinuteRound17 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:00").getTime()); + long lowerBoundaryOddWholeMinute17 = oddWholeMinuteRound17.getTime() - 17 * HALF_MIN; + long upperBoundaryOddWholeMinute17 = oddWholeMinuteRound17.getTime() + 17 * HALF_MIN - 1; + assertEquals(lowerBoundaryOddWholeMinute17, + oddWholeMinuteRound17Exp.rangeLower(oddWholeMinute.getTime())); + assertEquals(upperBoundaryOddWholeMinute17, + oddWholeMinuteRound17Exp.rangeUpper(oddWholeMinute.getTime())); + assertEquals(oddWholeMinuteRound17, + new java.sql.Timestamp(oddWholeMinuteRound17Exp.roundTime(lowerBoundaryOddWholeMinute17))); + assertNotEquals(oddWholeMinuteRound17, new java.sql.Timestamp( + oddWholeMinuteRound17Exp.roundTime(lowerBoundaryOddWholeMinute17 - 1))); + assertEquals(oddWholeMinuteRound17, + new java.sql.Timestamp(oddWholeMinuteRound17Exp.roundTime(upperBoundaryOddWholeMinute17))); + assertNotEquals(oddWholeMinuteRound17, new java.sql.Timestamp( + oddWholeMinuteRound17Exp.roundTime(upperBoundaryOddWholeMinute17 + 1))); + + RoundDateExpression evenWholeMinuteExp = + getRoundMsExpression("2022-11-11 11:12:0", TimeUnit.MINUTE, 1); + java.sql.Timestamp evenWholeMinute = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:0").getTime()); + long lowerBoundaryEvenWholeMinute = evenWholeMinute.getTime() - HALF_MIN; + long upperBoundaryEvenWholeMinute = evenWholeMinute.getTime() + HALF_MIN - 1; + assertEquals(lowerBoundaryEvenWholeMinute, + evenWholeMinuteExp.rangeLower(evenWholeMinute.getTime())); + assertEquals(upperBoundaryEvenWholeMinute, + evenWholeMinuteExp.rangeUpper(evenWholeMinute.getTime())); + assertEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute))); + assertNotEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute - 1))); + assertEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute))); + assertNotEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute + 1))); + + RoundDateExpression oddWholeHourExp = + getRoundMsExpression("2022-11-11 11:0:0", TimeUnit.HOUR, 1); + java.sql.Timestamp oddWholeHour = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:0:0").getTime()); + long lowerBoundaryOddWholeHour = oddWholeHour.getTime() - HALF_HOUR; + long upperBoundaryOddWholeHour = oddWholeHour.getTime() + HALF_HOUR - 1; + assertEquals(lowerBoundaryOddWholeHour, oddWholeHourExp.rangeLower(oddWholeHour.getTime())); + assertEquals(upperBoundaryOddWholeHour, oddWholeHourExp.rangeUpper(oddWholeHour.getTime())); + assertEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour))); + assertNotEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour - 1))); + assertEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour))); + assertNotEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour + 1))); + + // Not rounding to hourOfDay + RoundDateExpression oddWholeHour10Exp = + getRoundMsExpression("2022-11-11 12:0:0", TimeUnit.HOUR, 10); + java.sql.Timestamp oddWholeHour10 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 12:0:0").getTime()); + long lowerBoundaryOddWholeHour10 = oddWholeHour10.getTime() - HALF_HOUR * 10; + long upperBoundaryOddWholeHour10 = oddWholeHour10.getTime() + HALF_HOUR * 10 - 1; + assertEquals(lowerBoundaryOddWholeHour10, oddWholeHour10Exp.rangeLower(oddWholeHour.getTime())); + assertEquals(upperBoundaryOddWholeHour10, oddWholeHour10Exp.rangeUpper(oddWholeHour.getTime())); + assertEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10))); + assertNotEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10 - 1))); + assertEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10))); + assertNotEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10 + 1))); + + // Not rounding to hourOfDay + RoundDateExpression oddWholeHour11Exp = + getRoundMsExpression("2022-11-11 07:0:0", TimeUnit.HOUR, 11); + java.sql.Timestamp oddWholeHour11 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 07:0:0").getTime()); + long lowerBoundaryOddWholeHour11 = oddWholeHour11.getTime() - HALF_HOUR * 11; + long upperBoundaryOddWholeHour11 = oddWholeHour11.getTime() + HALF_HOUR * 11 - 1; + assertEquals(lowerBoundaryOddWholeHour11, oddWholeHour11Exp.rangeLower(oddWholeHour.getTime())); + assertEquals(upperBoundaryOddWholeHour11, oddWholeHour11Exp.rangeUpper(oddWholeHour.getTime())); + assertEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11))); + assertNotEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11 - 1))); + assertEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11))); + assertNotEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11 + 1))); + + RoundDateExpression evenwholeHourExp = + getRoundMsExpression("2022-11-11 12:0:0", TimeUnit.HOUR, 1); + java.sql.Timestamp evenwholeHour = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 12:0:0").getTime()); + long lowerBoundaryEvenWholeHour = evenwholeHour.getTime() - HALF_HOUR; + long upperBoundaryEvenWholeHour = evenwholeHour.getTime() + HALF_HOUR - 1; + assertEquals(lowerBoundaryEvenWholeHour, evenwholeHourExp.rangeLower(evenwholeHour.getTime())); + assertEquals(upperBoundaryEvenWholeHour, evenwholeHourExp.rangeUpper(evenwholeHour.getTime())); + assertEquals(evenwholeHour, + new java.sql.Timestamp(evenwholeHourExp.roundTime(lowerBoundaryEvenWholeHour))); + assertNotEquals(evenwholeHour, + new java.sql.Timestamp(evenwholeHourExp.roundTime(lowerBoundaryEvenWholeHour - 1))); + assertEquals(evenwholeHour, + new java.sql.Timestamp(evenwholeHourExp.roundTime(upperBoundaryEvenWholeHour))); + assertNotEquals(evenwholeHour, + new java.sql.Timestamp(evenwholeHourExp.roundTime(upperBoundaryEvenWholeHour + 1))); + + // No DST switchover + RoundDateExpression oddWholeDayExp = getRoundMsExpression("2022-11-11 0:0:0", TimeUnit.DAY, 1); + java.sql.Timestamp oddWholeDay = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 0:0:0").getTime()); + long lowerBoundaryOddWholeDay = oddWholeDay.getTime() - HALF_DAY; + long upperBoundaryOddWholeDay = oddWholeDay.getTime() + HALF_DAY - 1; + assertEquals(lowerBoundaryOddWholeDay, oddWholeDayExp.rangeLower(oddWholeDay.getTime())); + assertEquals(upperBoundaryOddWholeDay, oddWholeDayExp.rangeUpper(oddWholeDay.getTime())); + assertEquals(oddWholeDay, + new java.sql.Timestamp(oddWholeDayExp.roundTime(lowerBoundaryOddWholeDay))); + assertNotEquals(oddWholeDay, + new java.sql.Timestamp(oddWholeDayExp.roundTime(lowerBoundaryOddWholeDay - 1))); + assertEquals(oddWholeDay, + new java.sql.Timestamp(oddWholeDayExp.roundTime(upperBoundaryOddWholeDay))); + assertNotEquals(oddWholeDay, + new java.sql.Timestamp(oddWholeDayExp.roundTime(upperBoundaryOddWholeDay + 1))); + + RoundDateExpression oddWholeDay10Exp = + getRoundMsExpression("2022-11-14 0:0:0", TimeUnit.DAY, 10); + java.sql.Timestamp oddWholeDay10 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-14 0:0:0").getTime()); + long lowerBoundaryOddWholeDay10 = oddWholeDay10.getTime() - 10 * HALF_DAY; + long upperBoundaryOddWholeDay10 = oddWholeDay10.getTime() + 10 * HALF_DAY - 1; + assertEquals(lowerBoundaryOddWholeDay10, oddWholeDay10Exp.rangeLower(oddWholeDay.getTime())); + assertEquals(upperBoundaryOddWholeDay10, oddWholeDay10Exp.rangeUpper(oddWholeDay.getTime())); + assertEquals(oddWholeDay10, + new java.sql.Timestamp(oddWholeDay10Exp.roundTime(lowerBoundaryOddWholeDay10))); + assertNotEquals(oddWholeDay10, + new java.sql.Timestamp(oddWholeDay10Exp.roundTime(lowerBoundaryOddWholeDay10 - 1))); + assertEquals(oddWholeDay10, + new java.sql.Timestamp(oddWholeDay10Exp.roundTime(upperBoundaryOddWholeDay10))); + assertNotEquals(oddWholeDay10, + new java.sql.Timestamp(oddWholeDay10Exp.roundTime(upperBoundaryOddWholeDay10 + 1))); + + RoundDateExpression oddWholeDay3Exp = getRoundMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 3); + java.sql.Timestamp oddWholeDay3 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); + long lowerBoundaryOddWholeDay3 = oddWholeDay3.getTime() - 3 * HALF_DAY; + long upperBoundaryOddWholeDay3 = oddWholeDay3.getTime() + 3 * HALF_DAY - 1; + assertEquals(lowerBoundaryOddWholeDay3, oddWholeDay3Exp.rangeLower(oddWholeDay.getTime())); + assertEquals(upperBoundaryOddWholeDay3, oddWholeDay3Exp.rangeUpper(oddWholeDay.getTime())); + assertEquals(oddWholeDay3, + new java.sql.Timestamp(oddWholeDay3Exp.roundTime(lowerBoundaryOddWholeDay3))); + assertNotEquals(oddWholeDay3, + new java.sql.Timestamp(oddWholeDay3Exp.roundTime(lowerBoundaryOddWholeDay3 - 1))); + assertEquals(oddWholeDay3, + new java.sql.Timestamp(oddWholeDay3Exp.roundTime(upperBoundaryOddWholeDay3))); + assertNotEquals(oddWholeDay3, + new java.sql.Timestamp(oddWholeDay3Exp.roundTime(upperBoundaryOddWholeDay3 + 1))); + + RoundDateExpression evenWholeDayExp = getRoundMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 1); + java.sql.Timestamp evenWholeDay = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); + long lowerBoundaryEvenWholeDay = evenWholeDay.getTime() - HALF_DAY; + long upperBoundaryEvenWholeDay = evenWholeDay.getTime() + HALF_DAY - 1; + assertEquals(lowerBoundaryEvenWholeDay, evenWholeDayExp.rangeLower(evenWholeDay.getTime())); + assertEquals(upperBoundaryEvenWholeDay, evenWholeDayExp.rangeUpper(evenWholeDay.getTime())); + assertEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay))); + assertNotEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay - 1))); + assertEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay))); + assertNotEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay + 1))); + + // Stateless, we can reuse it for every week test + RoundWeekExpression roundWeekExpression = new RoundWeekExpression(); + java.sql.Timestamp wholeWeekOdd = + new java.sql.Timestamp(DateUtil.parseDate("2022-10-10 0:0:0").getTime()); + long lowerBoundaryWholeWeekOdd = wholeWeekOdd.getTime() - (HALF_WEEK - 1); + long upperBoundaryWholeWeekOdd = wholeWeekOdd.getTime() + HALF_WEEK - 1; + assertEquals(lowerBoundaryWholeWeekOdd, roundWeekExpression.rangeLower(wholeWeekOdd.getTime())); + assertEquals(upperBoundaryWholeWeekOdd, roundWeekExpression.rangeUpper(wholeWeekOdd.getTime())); + assertEquals(wholeWeekOdd, new java.sql.Timestamp(roundWeekExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeWeekOdd, new java.sql.Timestamp(roundWeekExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeWeekOdd - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeWeekOdd, new java.sql.Timestamp(roundWeekExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeWeekOdd, new java.sql.Timestamp(roundWeekExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeWeekOdd + 1, GJChronology.getInstanceUTC())))); + + java.sql.Timestamp wholeWeekEven = + new java.sql.Timestamp(DateUtil.parseDate("2022-10-17 0:0:0").getTime()); + long lowerBoundaryWholeWeekEven = wholeWeekEven.getTime() - HALF_WEEK; + long upperBoundaryWholeWeekEven = wholeWeekEven.getTime() + HALF_WEEK; + assertEquals(lowerBoundaryWholeWeekEven, + roundWeekExpression.rangeLower(wholeWeekEven.getTime())); + assertEquals(upperBoundaryWholeWeekEven, + roundWeekExpression.rangeUpper(wholeWeekEven.getTime())); + assertEquals(wholeWeekEven, new java.sql.Timestamp(roundWeekExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeWeekEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeWeekEven, new java.sql.Timestamp(roundWeekExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeWeekEven - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeWeekEven, new java.sql.Timestamp(roundWeekExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeWeekEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeWeekEven, new java.sql.Timestamp(roundWeekExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeWeekEven + 1, GJChronology.getInstanceUTC())))); + + RoundMonthExpression roundMonthExpression = new RoundMonthExpression(); + // We're still using roundHalfEven here for backwards compatibility + java.sql.Timestamp wholeMonthEven = + new java.sql.Timestamp(DateUtil.parseDate("2022-06-1 0:0:0").getTime()); + // May is 31 days + long lowerBoundaryWholeMonthEven = wholeMonthEven.getTime() - 31 * HALF_DAY; + // June is 30 days + long upperBoundaryWholeMonthEven = wholeMonthEven.getTime() + 30 * HALF_DAY; + assertEquals(lowerBoundaryWholeMonthEven, + roundMonthExpression.rangeLower(wholeMonthEven.getTime())); + assertEquals(upperBoundaryWholeMonthEven, + roundMonthExpression.rangeUpper(wholeMonthEven.getTime())); + assertEquals(wholeMonthEven, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthEven, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthEven - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeMonthEven, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthEven, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthEven + 1, GJChronology.getInstanceUTC())))); + + // We're still using roundHalfEven here for backwards compatibility + java.sql.Timestamp wholeMonthOdd = + new java.sql.Timestamp(DateUtil.parseDate("2022-07-1 0:0:0").getTime()); + // June is 30 days + long lowerBoundaryWholeMonthOdd = wholeMonthOdd.getTime() - 30 * HALF_DAY + 1; + // July is 31 days + long upperBoundaryWholeMonthOdd = wholeMonthOdd.getTime() + 31 * HALF_DAY - 1; + assertEquals(lowerBoundaryWholeMonthOdd, + roundMonthExpression.rangeLower(wholeMonthOdd.getTime())); + assertEquals(upperBoundaryWholeMonthOdd, + roundMonthExpression.rangeUpper(wholeMonthOdd.getTime())); + assertEquals(wholeMonthOdd, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthOdd, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthOdd - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeMonthOdd, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthOdd, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthOdd + 1, GJChronology.getInstanceUTC())))); + + // We're still using roundHalfEven here for backwards compatibility + java.sql.Timestamp wholeMonthLeap = + new java.sql.Timestamp(DateUtil.parseDate("2024-02-1 0:0:0").getTime()); + // January is 31 days + long lowerBoundaryWholeMonthLeap = wholeMonthLeap.getTime() - 31 * HALF_DAY; + // February is 29 days + long upperBoundaryWholeMonthLeap = wholeMonthLeap.getTime() + 29 * HALF_DAY; + assertEquals(lowerBoundaryWholeMonthLeap, + roundMonthExpression.rangeLower(wholeMonthLeap.getTime())); + assertEquals(upperBoundaryWholeMonthLeap, + roundMonthExpression.rangeUpper(wholeMonthLeap.getTime())); + assertEquals(wholeMonthLeap, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthLeap, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthLeap - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeMonthLeap, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthLeap, new java.sql.Timestamp(roundMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthLeap + 1, GJChronology.getInstanceUTC())))); + + // We're still using roundHalfEven here for backwards compatibility + RoundYearExpression roundYearExpression = new RoundYearExpression(); + java.sql.Timestamp wholeYearEven = + new java.sql.Timestamp(DateUtil.parseDate("2022-1-1 0:0:0").getTime()); + long lowerBoundaryWholeYearEven = wholeYearEven.getTime() - HALF_YEAR; + long upperBoundaryWholeYearEven = wholeYearEven.getTime() + HALF_YEAR; + assertEquals(lowerBoundaryWholeYearEven, + roundYearExpression.rangeLower(wholeYearEven.getTime())); + assertEquals(upperBoundaryWholeYearEven, + roundYearExpression.rangeUpper(wholeYearEven.getTime())); + assertEquals(wholeYearEven, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearEven, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearEven - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeYearEven, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearEven, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearEven + 1, GJChronology.getInstanceUTC())))); + + // We're still using roundHalfEven here for backwards compatibility + java.sql.Timestamp wholeYearOdd = + new java.sql.Timestamp(DateUtil.parseDate("2023-1-1 0:0:0").getTime()); + long lowerBoundaryWholeYearOdd = wholeYearOdd.getTime() - HALF_YEAR + 1; + long upperBoundaryWholeYearOdd = wholeYearOdd.getTime() + HALF_YEAR - 1; + assertEquals(lowerBoundaryWholeYearOdd, roundYearExpression.rangeLower(wholeYearOdd.getTime())); + assertEquals(upperBoundaryWholeYearOdd, roundYearExpression.rangeUpper(wholeYearOdd.getTime())); + assertEquals(wholeYearOdd, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearOdd, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearOdd - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeYearOdd, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearOdd, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearOdd + 1, GJChronology.getInstanceUTC())))); + + // We're still using roundHalfEven here for backwards compatibility + java.sql.Timestamp wholeYearLeapEven = + new java.sql.Timestamp(DateUtil.parseDate("2024-1-1 0:0:0").getTime()); + long lowerBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime() - HALF_YEAR; + long upperBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime() + HALF_YEAR + HALF_DAY; + assertEquals(lowerBoundaryWholeYearLeapEven, + roundYearExpression.rangeLower(wholeYearLeapEven.getTime())); + assertEquals(upperBoundaryWholeYearLeapEven, + roundYearExpression.rangeUpper(wholeYearLeapEven.getTime())); + assertEquals(wholeYearLeapEven, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearLeapEven, + new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( + lowerBoundaryWholeYearLeapEven - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeYearLeapEven, new java.sql.Timestamp(roundYearExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearLeapEven, + new java.sql.Timestamp(roundYearExpression.roundDateTime(new org.joda.time.DateTime( + upperBoundaryWholeYearLeapEven + 1, GJChronology.getInstanceUTC())))); + } + + private FloorDateExpression getFloorMsExpression(String s, TimeUnit u, int m) + throws SQLException { + return (FloorDateExpression) FloorDateExpression.create(LiteralExpression.newConstant(s), u, m); + } + + @Test + public void testFloorGMT() throws SQLException { + + // No need to repeat odd / even cases + // The logic for upper and lower scan ranges is always + // [floor(ts), ceil(ts+1)-1] + + RoundDateExpression oddWholeSecondExp = + getFloorMsExpression("2022-11-11 11:11:11", TimeUnit.SECOND, 1); + java.sql.Timestamp oddWholeSecond = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:11").getTime()); + long lowerBoundaryOddWholeSecond = oddWholeSecond.getTime(); + long upperBoundaryOddWholeSecond = oddWholeSecond.getTime() + SEC - 1; + assertEquals(lowerBoundaryOddWholeSecond, + oddWholeSecondExp.rangeLower(oddWholeSecond.getTime())); + assertEquals(upperBoundaryOddWholeSecond, + oddWholeSecondExp.rangeUpper(oddWholeSecond.getTime())); + assertEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond))); + assertNotEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond - 1))); + assertEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond))); + assertNotEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond + 1))); + + // 10 sec range + RoundDateExpression oddWholeSecondFloor10Exp = + getFloorMsExpression("2022-11-11 11:11:10", TimeUnit.SECOND, 10); + java.sql.Timestamp oddWholeSecondFloor10 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:10").getTime()); + long lowerBoundaryOddWholeSecondFloor10 = oddWholeSecondFloor10.getTime(); + long upperBoundaryOddWholeSecondFloor10 = oddWholeSecondFloor10.getTime() + 10 * SEC - 1; + assertEquals(lowerBoundaryOddWholeSecondFloor10, + oddWholeSecondFloor10Exp.rangeLower(oddWholeSecond.getTime())); + assertEquals(upperBoundaryOddWholeSecondFloor10, + oddWholeSecondFloor10Exp.rangeUpper(oddWholeSecond.getTime())); + assertEquals(oddWholeSecondFloor10, new java.sql.Timestamp( + oddWholeSecondFloor10Exp.roundTime(lowerBoundaryOddWholeSecondFloor10))); + assertNotEquals(oddWholeSecondFloor10, new java.sql.Timestamp( + oddWholeSecondFloor10Exp.roundTime(lowerBoundaryOddWholeSecondFloor10 - 1))); + assertEquals(oddWholeSecondFloor10, new java.sql.Timestamp( + oddWholeSecondFloor10Exp.roundTime(upperBoundaryOddWholeSecondFloor10))); + assertNotEquals(oddWholeSecondFloor10, new java.sql.Timestamp( + oddWholeSecondFloor10Exp.roundTime(upperBoundaryOddWholeSecondFloor10 + 1))); + + // 15 sec range + RoundDateExpression oddWholeSecondFloor15Exp = + getFloorMsExpression("2022-11-11 11:11:0", TimeUnit.SECOND, 15); + java.sql.Timestamp oddWholeSecondFloor15 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:0").getTime()); + long lowerBoundaryOddWholeSecondFloor15 = oddWholeSecondFloor15.getTime(); + long upperBoundaryOddWholeSecondFloor15 = oddWholeSecondFloor15.getTime() + 15 * SEC - 1; + assertEquals(lowerBoundaryOddWholeSecondFloor15, + oddWholeSecondFloor15Exp.rangeLower(oddWholeSecond.getTime())); + assertEquals(upperBoundaryOddWholeSecondFloor15, + oddWholeSecondFloor15Exp.rangeUpper(oddWholeSecond.getTime())); + assertEquals(oddWholeSecondFloor15, new java.sql.Timestamp( + oddWholeSecondFloor15Exp.roundTime(lowerBoundaryOddWholeSecondFloor15))); + assertNotEquals(oddWholeSecondFloor15, new java.sql.Timestamp( + oddWholeSecondFloor15Exp.roundTime(lowerBoundaryOddWholeSecondFloor15 - 1))); + assertEquals(oddWholeSecondFloor15, new java.sql.Timestamp( + oddWholeSecondFloor15Exp.roundTime(upperBoundaryOddWholeSecondFloor15))); + assertNotEquals(oddWholeSecondFloor15, new java.sql.Timestamp( + oddWholeSecondFloor15Exp.roundTime(upperBoundaryOddWholeSecondFloor15 + 1))); + + RoundDateExpression evenWholeMinuteExp = + getFloorMsExpression("2022-11-11 11:12:0", TimeUnit.MINUTE, 1); + java.sql.Timestamp evenWholeMinute = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:0").getTime()); + long lowerBoundaryEvenWholeMinute = evenWholeMinute.getTime(); + long upperBoundaryEvenWholeMinute = evenWholeMinute.getTime() + MIN - 1; + assertEquals(lowerBoundaryEvenWholeMinute, + evenWholeMinuteExp.rangeLower(evenWholeMinute.getTime())); + assertEquals(upperBoundaryEvenWholeMinute, + evenWholeMinuteExp.rangeUpper(evenWholeMinute.getTime())); + assertEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute))); + assertNotEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute - 1))); + assertEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute))); + assertNotEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute + 1))); + + RoundDateExpression evenWholeMinuteFloor20Exp = + getFloorMsExpression("2022-11-11 11:00:0", TimeUnit.MINUTE, 20); + java.sql.Timestamp evenWholeMinuteFloor20 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:00:0").getTime()); + long lowerBoundaryEvenWholeMinuteFloor20 = evenWholeMinuteFloor20.getTime(); + long upperBoundaryEvenWholeMinuteFloor20 = evenWholeMinuteFloor20.getTime() + 20 * MIN - 1; + assertEquals(lowerBoundaryEvenWholeMinuteFloor20, + evenWholeMinuteFloor20Exp.rangeLower(evenWholeMinute.getTime())); + assertEquals(upperBoundaryEvenWholeMinuteFloor20, + evenWholeMinuteFloor20Exp.rangeUpper(evenWholeMinute.getTime())); + assertEquals(evenWholeMinuteFloor20, new java.sql.Timestamp( + evenWholeMinuteFloor20Exp.roundTime(lowerBoundaryEvenWholeMinuteFloor20))); + assertNotEquals(evenWholeMinuteFloor20, new java.sql.Timestamp( + evenWholeMinuteFloor20Exp.roundTime(lowerBoundaryEvenWholeMinuteFloor20 - 1))); + assertEquals(evenWholeMinuteFloor20, new java.sql.Timestamp( + evenWholeMinuteFloor20Exp.roundTime(upperBoundaryEvenWholeMinuteFloor20))); + assertNotEquals(evenWholeMinuteFloor20, new java.sql.Timestamp( + evenWholeMinuteFloor20Exp.roundTime(upperBoundaryEvenWholeMinuteFloor20 + 1))); + + // Minutes since epoch, don't expect the rounded value to be "round" + RoundDateExpression evenWholeMinuteFloor17Exp = + getFloorMsExpression("2022-11-11 11:12:00", TimeUnit.MINUTE, 17); + java.sql.Timestamp evenWholeMinuteFloor17 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:00").getTime()); + long lowerBoundaryEvenWholeMinute17 = evenWholeMinuteFloor17.getTime(); + long upperBoundaryEvenWholeMinute17 = evenWholeMinuteFloor17.getTime() + 17 * MIN - 1; + assertEquals(lowerBoundaryEvenWholeMinute17, + evenWholeMinuteFloor17Exp.rangeLower(evenWholeMinute.getTime())); + assertEquals(upperBoundaryEvenWholeMinute17, + evenWholeMinuteFloor17Exp.rangeUpper(evenWholeMinute.getTime())); + assertEquals(evenWholeMinuteFloor17, + new java.sql.Timestamp(evenWholeMinuteFloor17Exp.roundTime(lowerBoundaryEvenWholeMinute17))); + assertNotEquals(evenWholeMinuteFloor17, new java.sql.Timestamp( + evenWholeMinuteFloor17Exp.roundTime(lowerBoundaryEvenWholeMinute17 - 1))); + assertEquals(evenWholeMinuteFloor17, + new java.sql.Timestamp(evenWholeMinuteFloor17Exp.roundTime(upperBoundaryEvenWholeMinute17))); + assertNotEquals(evenWholeMinuteFloor17, new java.sql.Timestamp( + evenWholeMinuteFloor17Exp.roundTime(upperBoundaryEvenWholeMinute17 + 1))); + + RoundDateExpression oddWholeHourExp = + getFloorMsExpression("2022-11-11 11:0:0", TimeUnit.HOUR, 1); + java.sql.Timestamp oddWholeHour = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:0:0").getTime()); + long lowerBoundaryOddWholeHour = oddWholeHour.getTime(); + long upperBoundaryOddWholeHour = oddWholeHour.getTime() + HOUR - 1; + assertEquals(lowerBoundaryOddWholeHour, oddWholeHourExp.rangeLower(oddWholeHour.getTime())); + assertEquals(upperBoundaryOddWholeHour, oddWholeHourExp.rangeUpper(oddWholeHour.getTime())); + assertEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour))); + assertNotEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour - 1))); + assertEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour))); + assertNotEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour + 1))); + + // Not rounding to hourOfDay + RoundDateExpression oddWholeHour10Exp = + getFloorMsExpression("2022-11-11 02:0:0", TimeUnit.HOUR, 10); + java.sql.Timestamp oddWholeHour10 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 02:0:0").getTime()); + long lowerBoundaryOddWholeHour10 = oddWholeHour10.getTime(); + long upperBoundaryOddWholeHour10 = oddWholeHour10.getTime() + HOUR * 10 - 1; + assertEquals(lowerBoundaryOddWholeHour10, oddWholeHour10Exp.rangeLower(oddWholeHour.getTime())); + assertEquals(upperBoundaryOddWholeHour10, oddWholeHour10Exp.rangeUpper(oddWholeHour.getTime())); + assertEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10))); + assertNotEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10 - 1))); + assertEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10))); + assertNotEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10 + 1))); + + // Not rounding to hourOfDay + RoundDateExpression oddWholeHour11Exp = + getFloorMsExpression("2022-11-11 07:0:0", TimeUnit.HOUR, 11); + java.sql.Timestamp oddWholeHour11 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 07:0:0").getTime()); + long lowerBoundaryOddWholeHour11 = oddWholeHour11.getTime(); + long upperBoundaryOddWholeHour11 = oddWholeHour11.getTime() + HOUR * 11 - 1; + assertEquals(lowerBoundaryOddWholeHour11, oddWholeHour11Exp.rangeLower(oddWholeHour.getTime())); + assertEquals(upperBoundaryOddWholeHour11, oddWholeHour11Exp.rangeUpper(oddWholeHour.getTime())); + assertEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11))); + assertNotEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11 - 1))); + assertEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11))); + assertNotEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11 + 1))); + + // No DST switchover + RoundDateExpression evenWholeDayExp = getFloorMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 1); + java.sql.Timestamp evenWholeDay = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); + long lowerBoundaryEvenWholeDay = evenWholeDay.getTime(); + long upperBoundaryEvenWholeDay = evenWholeDay.getTime() + DAY - 1; + assertEquals(lowerBoundaryEvenWholeDay, evenWholeDayExp.rangeLower(evenWholeDay.getTime())); + assertEquals(upperBoundaryEvenWholeDay, evenWholeDayExp.rangeUpper(evenWholeDay.getTime())); + assertEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay))); + assertNotEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay - 1))); + assertEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay))); + assertNotEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay + 1))); + + RoundDateExpression evenWholeDay2Exp = + getFloorMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 2); + java.sql.Timestamp evenWholeDay2 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); + long lowerBoundaryEvenWholeDay2 = evenWholeDay2.getTime(); + long upperBoundaryEvenWholeDay2 = evenWholeDay2.getTime() + 2 * DAY - 1; + assertEquals(lowerBoundaryEvenWholeDay2, evenWholeDay2Exp.rangeLower(evenWholeDay.getTime())); + assertEquals(upperBoundaryEvenWholeDay2, evenWholeDay2Exp.rangeUpper(evenWholeDay.getTime())); + assertEquals(evenWholeDay2, + new java.sql.Timestamp(evenWholeDay2Exp.roundTime(lowerBoundaryEvenWholeDay2))); + assertNotEquals(evenWholeDay2, + new java.sql.Timestamp(evenWholeDay2Exp.roundTime(lowerBoundaryEvenWholeDay2 - 1))); + assertEquals(evenWholeDay2, + new java.sql.Timestamp(evenWholeDay2Exp.roundTime(upperBoundaryEvenWholeDay2))); + assertNotEquals(evenWholeDay2, + new java.sql.Timestamp(evenWholeDay2Exp.roundTime(upperBoundaryEvenWholeDay2 + 1))); + + RoundDateExpression evenWholeDay3Exp = + getFloorMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 3); + java.sql.Timestamp evenWholeDay3 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); + long lowerBoundaryEvenWholeDay3 = evenWholeDay3.getTime(); + long upperBoundaryEvenWholeDay3 = evenWholeDay3.getTime() + 3 * DAY - 1; + assertEquals(lowerBoundaryEvenWholeDay3, evenWholeDay3Exp.rangeLower(evenWholeDay.getTime())); + assertEquals(upperBoundaryEvenWholeDay3, evenWholeDay3Exp.rangeUpper(evenWholeDay.getTime())); + assertEquals(evenWholeDay3, + new java.sql.Timestamp(evenWholeDay3Exp.roundTime(lowerBoundaryEvenWholeDay3))); + assertNotEquals(evenWholeDay3, + new java.sql.Timestamp(evenWholeDay3Exp.roundTime(lowerBoundaryEvenWholeDay3 - 1))); + assertEquals(evenWholeDay3, + new java.sql.Timestamp(evenWholeDay3Exp.roundTime(upperBoundaryEvenWholeDay3))); + assertNotEquals(evenWholeDay3, + new java.sql.Timestamp(evenWholeDay3Exp.roundTime(upperBoundaryEvenWholeDay3 + 1))); + + FloorWeekExpression floorWeekExpression = new FloorWeekExpression(); + java.sql.Timestamp wholeWeekOdd = + new java.sql.Timestamp(DateUtil.parseDate("2022-10-10 0:0:0").getTime()); + long lowerBoundaryWholeWeekOdd = wholeWeekOdd.getTime(); + long upperBoundaryWholeWeekOdd = wholeWeekOdd.getTime() + WEEK - 1; + assertEquals(lowerBoundaryWholeWeekOdd, floorWeekExpression.rangeLower(wholeWeekOdd.getTime())); + assertEquals(upperBoundaryWholeWeekOdd, floorWeekExpression.rangeUpper(wholeWeekOdd.getTime())); + assertEquals(wholeWeekOdd, new java.sql.Timestamp(floorWeekExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeWeekOdd, new java.sql.Timestamp(floorWeekExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeWeekOdd - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeWeekOdd, new java.sql.Timestamp(floorWeekExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeWeekOdd, new java.sql.Timestamp(floorWeekExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeWeekOdd + 1, GJChronology.getInstanceUTC())))); + + FloorMonthExpression floorMonthExpression = new FloorMonthExpression(); + java.sql.Timestamp wholeMonthOdd = + new java.sql.Timestamp(DateUtil.parseDate("2022-07-1 0:0:0").getTime()); + long lowerBoundaryWholeMonthOdd = wholeMonthOdd.getTime(); + // July is 31 days + long upperBoundaryWholeMonthOdd = wholeMonthOdd.getTime() + 31 * DAY - 1; + assertEquals(lowerBoundaryWholeMonthOdd, + floorMonthExpression.rangeLower(wholeMonthOdd.getTime())); + assertEquals(upperBoundaryWholeMonthOdd, + floorMonthExpression.rangeUpper(wholeMonthOdd.getTime())); + assertEquals(wholeMonthOdd, new java.sql.Timestamp(floorMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthOdd, new java.sql.Timestamp(floorMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthOdd - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeMonthOdd, new java.sql.Timestamp(floorMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthOdd, new java.sql.Timestamp(floorMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthOdd + 1, GJChronology.getInstanceUTC())))); + + java.sql.Timestamp wholeMonthLeap = + new java.sql.Timestamp(DateUtil.parseDate("2024-02-1 0:0:0").getTime()); + long lowerBoundaryWholeMonthLeap = wholeMonthLeap.getTime(); + // February is 29 days + long upperBoundaryWholeMonthLeap = wholeMonthLeap.getTime() + 29 * DAY - 1; + assertEquals(lowerBoundaryWholeMonthLeap, + floorMonthExpression.rangeLower(wholeMonthLeap.getTime())); + assertEquals(upperBoundaryWholeMonthLeap, + floorMonthExpression.rangeUpper(wholeMonthLeap.getTime())); + assertEquals(wholeMonthLeap, new java.sql.Timestamp(floorMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthLeap, new java.sql.Timestamp(floorMonthExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthLeap - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeMonthLeap, new java.sql.Timestamp(floorMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthLeap, new java.sql.Timestamp(floorMonthExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthLeap + 1, GJChronology.getInstanceUTC())))); + + FloorYearExpression floorYearExpression = new FloorYearExpression(); + java.sql.Timestamp wholeYearEven = + new java.sql.Timestamp(DateUtil.parseDate("2022-1-1 0:0:0").getTime()); + long lowerBoundaryWholeYearEven = wholeYearEven.getTime(); + long upperBoundaryWholeYearEven = wholeYearEven.getTime() + YEAR - 1; + assertEquals(lowerBoundaryWholeYearEven, + floorYearExpression.rangeLower(wholeYearEven.getTime())); + assertEquals(upperBoundaryWholeYearEven, + floorYearExpression.rangeUpper(wholeYearEven.getTime())); + assertEquals(wholeYearEven, new java.sql.Timestamp(floorYearExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearEven, new java.sql.Timestamp(floorYearExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearEven - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeYearEven, new java.sql.Timestamp(floorYearExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearEven, new java.sql.Timestamp(floorYearExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearEven + 1, GJChronology.getInstanceUTC())))); + + java.sql.Timestamp wholeYearLeapEven = + new java.sql.Timestamp(DateUtil.parseDate("2024-1-1 0:0:0").getTime()); + long lowerBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime(); + long upperBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime() + YEAR + DAY - 1; + assertEquals(lowerBoundaryWholeYearLeapEven, + floorYearExpression.rangeLower(wholeYearLeapEven.getTime())); + assertEquals(upperBoundaryWholeYearLeapEven, + floorYearExpression.rangeUpper(wholeYearLeapEven.getTime())); + assertEquals(wholeYearLeapEven, new java.sql.Timestamp(floorYearExpression.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearLeapEven, + new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( + lowerBoundaryWholeYearLeapEven - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeYearLeapEven, new java.sql.Timestamp(floorYearExpression.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearLeapEven, + new java.sql.Timestamp(floorYearExpression.roundDateTime(new org.joda.time.DateTime( + upperBoundaryWholeYearLeapEven + 1, GJChronology.getInstanceUTC())))); + } + + private CeilDateExpression getCeilMsExpression(String s, TimeUnit u, int m) throws SQLException { + return (CeilDateExpression) CeilDateExpression.create(LiteralExpression.newConstant(s), u, m); + } + + @Test + public void testCeilGMT() throws SQLException { + + // No need to repeat odd / even cases + // The logic for upper and lower scan ranges is always + // [floor(ts-1)+1, ceil(ts)] + + RoundDateExpression oddWholeSecondExp = + getCeilMsExpression("2022-11-11 11:11:11", TimeUnit.SECOND, 1); + java.sql.Timestamp oddWholeSecond = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:11").getTime()); + long lowerBoundaryOddWholeSecond = oddWholeSecond.getTime() - SEC + 1; + long upperBoundaryOddWholeSecond = oddWholeSecond.getTime(); + assertEquals(lowerBoundaryOddWholeSecond, + oddWholeSecondExp.rangeLower(oddWholeSecond.getTime())); + assertEquals(upperBoundaryOddWholeSecond, + oddWholeSecondExp.rangeUpper(oddWholeSecond.getTime())); + assertEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond))); + assertNotEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(lowerBoundaryOddWholeSecond - 1))); + assertEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond))); + assertNotEquals(oddWholeSecond, + new java.sql.Timestamp(oddWholeSecondExp.roundTime(upperBoundaryOddWholeSecond + 1))); + + // 10 sec range + RoundDateExpression oddWholeSecondCeil10Exp = + getCeilMsExpression("2022-11-11 11:11:20", TimeUnit.SECOND, 10); + java.sql.Timestamp oddWholeSecondCeil10 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:20").getTime()); + long lowerBoundaryOddWholeSecondCeil10 = oddWholeSecondCeil10.getTime() - 10 * SEC + 1; + long upperBoundaryOddWholeSecondCeil10 = oddWholeSecondCeil10.getTime(); + assertEquals(lowerBoundaryOddWholeSecondCeil10, + oddWholeSecondCeil10Exp.rangeLower(oddWholeSecond.getTime())); + assertEquals(upperBoundaryOddWholeSecondCeil10, + oddWholeSecondCeil10Exp.rangeUpper(oddWholeSecond.getTime())); + assertEquals(oddWholeSecondCeil10, + new java.sql.Timestamp(oddWholeSecondCeil10Exp.roundTime(lowerBoundaryOddWholeSecondCeil10))); + assertNotEquals(oddWholeSecondCeil10, new java.sql.Timestamp( + oddWholeSecondCeil10Exp.roundTime(lowerBoundaryOddWholeSecondCeil10 - 1))); + assertEquals(oddWholeSecondCeil10, + new java.sql.Timestamp(oddWholeSecondCeil10Exp.roundTime(upperBoundaryOddWholeSecondCeil10))); + assertNotEquals(oddWholeSecondCeil10, new java.sql.Timestamp( + oddWholeSecondCeil10Exp.roundTime(upperBoundaryOddWholeSecondCeil10 + 1))); + + // 15 sec range + RoundDateExpression oddWholeSecondCeil15Exp = + getCeilMsExpression("2022-11-11 11:11:15", TimeUnit.SECOND, 15); + java.sql.Timestamp oddWholeSecondCeil15 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:11:15").getTime()); + long lowerBoundaryOddWholeSecondFloor15 = oddWholeSecondCeil15.getTime() - 15 * SEC + 1; + long upperBoundaryOddWholeSecondFloor15 = oddWholeSecondCeil15.getTime(); + assertEquals(lowerBoundaryOddWholeSecondFloor15, + oddWholeSecondCeil15Exp.rangeLower(oddWholeSecond.getTime())); + assertEquals(upperBoundaryOddWholeSecondFloor15, + oddWholeSecondCeil15Exp.rangeUpper(oddWholeSecond.getTime())); + assertEquals(oddWholeSecondCeil15, new java.sql.Timestamp( + oddWholeSecondCeil15Exp.roundTime(lowerBoundaryOddWholeSecondFloor15))); + assertNotEquals(oddWholeSecondCeil15, new java.sql.Timestamp( + oddWholeSecondCeil15Exp.roundTime(lowerBoundaryOddWholeSecondFloor15 - 1))); + assertEquals(oddWholeSecondCeil15, new java.sql.Timestamp( + oddWholeSecondCeil15Exp.roundTime(upperBoundaryOddWholeSecondFloor15))); + assertNotEquals(oddWholeSecondCeil15, new java.sql.Timestamp( + oddWholeSecondCeil15Exp.roundTime(upperBoundaryOddWholeSecondFloor15 + 1))); + + RoundDateExpression evenWholeMinuteExp = + getCeilMsExpression("2022-11-11 11:12:0", TimeUnit.MINUTE, 1); + java.sql.Timestamp evenWholeMinute = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:0").getTime()); + long lowerBoundaryEvenWholeMinute = evenWholeMinute.getTime() - MIN + 1; + long upperBoundaryEvenWholeMinute = evenWholeMinute.getTime(); + assertEquals(lowerBoundaryEvenWholeMinute, + evenWholeMinuteExp.rangeLower(evenWholeMinute.getTime())); + assertEquals(upperBoundaryEvenWholeMinute, + evenWholeMinuteExp.rangeUpper(evenWholeMinute.getTime())); + assertEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute))); + assertNotEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(lowerBoundaryEvenWholeMinute - 1))); + assertEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute))); + assertNotEquals(evenWholeMinute, + new java.sql.Timestamp(evenWholeMinuteExp.roundTime(upperBoundaryEvenWholeMinute + 1))); + + RoundDateExpression evenWholeMinuteCeil20Exp = + getCeilMsExpression("2022-11-11 11:20:0", TimeUnit.MINUTE, 20); + java.sql.Timestamp evenWholeMinuteCeil20 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:20:0").getTime()); + long lowerBoundaryEvenWholeMinuteCeil20 = evenWholeMinuteCeil20.getTime() - 20 * MIN + 1; + long upperBoundaryEvenWholeMinuteCeil20 = evenWholeMinuteCeil20.getTime(); + assertEquals(lowerBoundaryEvenWholeMinuteCeil20, + evenWholeMinuteCeil20Exp.rangeLower(evenWholeMinute.getTime())); + assertEquals(upperBoundaryEvenWholeMinuteCeil20, + evenWholeMinuteCeil20Exp.rangeUpper(evenWholeMinute.getTime())); + assertEquals(evenWholeMinuteCeil20, new java.sql.Timestamp( + evenWholeMinuteCeil20Exp.roundTime(lowerBoundaryEvenWholeMinuteCeil20))); + assertNotEquals(evenWholeMinuteCeil20, new java.sql.Timestamp( + evenWholeMinuteCeil20Exp.roundTime(lowerBoundaryEvenWholeMinuteCeil20 - 1))); + assertEquals(evenWholeMinuteCeil20, new java.sql.Timestamp( + evenWholeMinuteCeil20Exp.roundTime(upperBoundaryEvenWholeMinuteCeil20))); + assertNotEquals(evenWholeMinuteCeil20, new java.sql.Timestamp( + evenWholeMinuteCeil20Exp.roundTime(upperBoundaryEvenWholeMinuteCeil20 + 1))); + + // Minutes since epoch, don't expect the rounded value to be "round" + RoundDateExpression evenWholeMinuteCeil17Exp = + getCeilMsExpression("2022-11-11 11:12:00", TimeUnit.MINUTE, 17); + java.sql.Timestamp evenWholeMinuteCeil17 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:12:00").getTime()); + long lowerBoundaryEvenWholeMinute17 = evenWholeMinuteCeil17.getTime() - 17 * MIN + 1; + long upperBoundaryEvenWholeMinute17 = evenWholeMinuteCeil17.getTime(); + assertEquals(lowerBoundaryEvenWholeMinute17, + evenWholeMinuteCeil17Exp.rangeLower(evenWholeMinute.getTime())); + assertEquals(upperBoundaryEvenWholeMinute17, + evenWholeMinuteCeil17Exp.rangeUpper(evenWholeMinute.getTime())); + assertEquals(evenWholeMinuteCeil17, + new java.sql.Timestamp(evenWholeMinuteCeil17Exp.roundTime(lowerBoundaryEvenWholeMinute17))); + assertNotEquals(evenWholeMinuteCeil17, new java.sql.Timestamp( + evenWholeMinuteCeil17Exp.roundTime(lowerBoundaryEvenWholeMinute17 - 1))); + assertEquals(evenWholeMinuteCeil17, + new java.sql.Timestamp(evenWholeMinuteCeil17Exp.roundTime(upperBoundaryEvenWholeMinute17))); + assertNotEquals(evenWholeMinuteCeil17, new java.sql.Timestamp( + evenWholeMinuteCeil17Exp.roundTime(upperBoundaryEvenWholeMinute17 + 1))); + + RoundDateExpression oddWholeHourExp = + getCeilMsExpression("2022-11-11 11:0:0", TimeUnit.HOUR, 1); + java.sql.Timestamp oddWholeHour = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 11:0:0").getTime()); + long lowerBoundaryOddWholeHour = oddWholeHour.getTime() - HOUR + 1; + long upperBoundaryOddWholeHour = oddWholeHour.getTime(); + assertEquals(lowerBoundaryOddWholeHour, oddWholeHourExp.rangeLower(oddWholeHour.getTime() - 1)); + assertEquals(upperBoundaryOddWholeHour, oddWholeHourExp.rangeUpper(oddWholeHour.getTime())); + assertEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour))); + assertNotEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(lowerBoundaryOddWholeHour - 1))); + assertEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour))); + assertNotEquals(oddWholeHour, + new java.sql.Timestamp(oddWholeHourExp.roundTime(upperBoundaryOddWholeHour + 1))); + + // Not rounding to hourOfDay + RoundDateExpression oddWholeHour10Exp = + getCeilMsExpression("2022-11-11 12:0:0", TimeUnit.HOUR, 10); + java.sql.Timestamp oddWholeHour10 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 12:0:0").getTime()); + long lowerBoundaryOddWholeHour10 = oddWholeHour10.getTime() - 10 * HOUR + 1; + long upperBoundaryOddWholeHour10 = oddWholeHour10.getTime(); + assertEquals(lowerBoundaryOddWholeHour10, oddWholeHour10Exp.rangeLower(oddWholeHour.getTime())); + assertEquals(upperBoundaryOddWholeHour10, oddWholeHour10Exp.rangeUpper(oddWholeHour.getTime())); + assertEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10))); + assertNotEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(lowerBoundaryOddWholeHour10 - 1))); + assertEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10))); + assertNotEquals(oddWholeHour10, + new java.sql.Timestamp(oddWholeHour10Exp.roundTime(upperBoundaryOddWholeHour10 + 1))); + + // Not rounding to hourOfDay + RoundDateExpression oddWholeHour11Exp = + getCeilMsExpression("2022-11-11 12:0:0", TimeUnit.HOUR, 11); + java.sql.Timestamp oddWholeHour11 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-11 18:0:0").getTime()); + long lowerBoundaryOddWholeHour11 = oddWholeHour11.getTime() - 11 * HOUR + 1; + long upperBoundaryOddWholeHour11 = oddWholeHour11.getTime(); + assertEquals(lowerBoundaryOddWholeHour11, oddWholeHour11Exp.rangeLower(oddWholeHour.getTime())); + assertEquals(upperBoundaryOddWholeHour11, oddWholeHour11Exp.rangeUpper(oddWholeHour.getTime())); + assertEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11))); + assertNotEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(lowerBoundaryOddWholeHour11 - 1))); + assertEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11))); + assertNotEquals(oddWholeHour11, + new java.sql.Timestamp(oddWholeHour11Exp.roundTime(upperBoundaryOddWholeHour11 + 1))); + + RoundDateExpression evenWholeDayExp = getCeilMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 1); + java.sql.Timestamp evenWholeDay = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); + long lowerBoundaryEvenWholeDay = evenWholeDay.getTime() - DAY + 1; + long upperBoundaryEvenWholeDay = evenWholeDay.getTime(); + assertEquals(lowerBoundaryEvenWholeDay, evenWholeDayExp.rangeLower(evenWholeDay.getTime())); + assertEquals(upperBoundaryEvenWholeDay, evenWholeDayExp.rangeUpper(evenWholeDay.getTime())); + assertEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay))); + assertNotEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(lowerBoundaryEvenWholeDay - 1))); + assertEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay))); + assertNotEquals(evenWholeDay, + new java.sql.Timestamp(evenWholeDayExp.roundTime(upperBoundaryEvenWholeDay + 1))); + + RoundDateExpression evenWholeDay2Exp = getCeilMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 2); + java.sql.Timestamp evenWholeDay2 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); + long lowerBoundaryEvenWholeDay2 = evenWholeDay2.getTime() - 2 * DAY + 1; + long upperBoundaryEvenWholeDay2 = evenWholeDay2.getTime(); + assertEquals(lowerBoundaryEvenWholeDay2, evenWholeDay2Exp.rangeLower(evenWholeDay.getTime())); + assertEquals(upperBoundaryEvenWholeDay2, evenWholeDay2Exp.rangeUpper(evenWholeDay.getTime())); + assertEquals(evenWholeDay2, + new java.sql.Timestamp(evenWholeDay2Exp.roundTime(lowerBoundaryEvenWholeDay2))); + assertNotEquals(evenWholeDay2, + new java.sql.Timestamp(evenWholeDay2Exp.roundTime(lowerBoundaryEvenWholeDay2 - 1))); + assertEquals(evenWholeDay2, + new java.sql.Timestamp(evenWholeDay2Exp.roundTime(upperBoundaryEvenWholeDay2))); + assertNotEquals(evenWholeDay2, + new java.sql.Timestamp(evenWholeDay2Exp.roundTime(upperBoundaryEvenWholeDay2 + 1))); + + RoundDateExpression evenWholeDay3Exp = getCeilMsExpression("2022-11-12 0:0:0", TimeUnit.DAY, 3); + java.sql.Timestamp evenWholeDay3 = + new java.sql.Timestamp(DateUtil.parseDate("2022-11-12 0:0:0").getTime()); + long lowerBoundaryEvenWholeDay3 = evenWholeDay3.getTime() - 3 * DAY + 1; + long upperBoundaryEvenWholeDay3 = evenWholeDay3.getTime(); + assertEquals(lowerBoundaryEvenWholeDay3, evenWholeDay3Exp.rangeLower(evenWholeDay.getTime())); + assertEquals(upperBoundaryEvenWholeDay3, evenWholeDay3Exp.rangeUpper(evenWholeDay.getTime())); + assertEquals(evenWholeDay3, + new java.sql.Timestamp(evenWholeDay3Exp.roundTime(lowerBoundaryEvenWholeDay3))); + assertNotEquals(evenWholeDay3, + new java.sql.Timestamp(evenWholeDay3Exp.roundTime(lowerBoundaryEvenWholeDay3 - 1))); + assertEquals(evenWholeDay3, + new java.sql.Timestamp(evenWholeDay3Exp.roundTime(upperBoundaryEvenWholeDay3))); + assertNotEquals(evenWholeDay3, + new java.sql.Timestamp(evenWholeDay3Exp.roundTime(upperBoundaryEvenWholeDay3 + 1))); + + CeilWeekExpression ceilWeekExp = new CeilWeekExpression(); + java.sql.Timestamp wholeWeekOdd = + new java.sql.Timestamp(DateUtil.parseDate("2022-10-10 0:0:0").getTime()); + long lowerBoundaryWholeWeekOdd = wholeWeekOdd.getTime() - WEEK + 1; + long upperBoundaryWholeWeekOdd = wholeWeekOdd.getTime(); + assertEquals(lowerBoundaryWholeWeekOdd, ceilWeekExp.rangeLower(wholeWeekOdd.getTime())); + assertEquals(upperBoundaryWholeWeekOdd, ceilWeekExp.rangeUpper(wholeWeekOdd.getTime())); + assertEquals(wholeWeekOdd, new java.sql.Timestamp(ceilWeekExp.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeWeekOdd, new java.sql.Timestamp(ceilWeekExp.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeWeekOdd - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeWeekOdd, new java.sql.Timestamp(ceilWeekExp.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeWeekOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeWeekOdd, new java.sql.Timestamp(ceilWeekExp.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeWeekOdd + 1, GJChronology.getInstanceUTC())))); + + CeilMonthExpression ceilMonthExp = new CeilMonthExpression(); + java.sql.Timestamp wholeMonthOdd = + new java.sql.Timestamp(DateUtil.parseDate("2022-08-1 0:0:0").getTime()); + // July is 31 days + long lowerBoundaryWholeMonthOdd = wholeMonthOdd.getTime() - 31 * DAY + 1; + long upperBoundaryWholeMonthOdd = wholeMonthOdd.getTime(); + assertEquals(lowerBoundaryWholeMonthOdd, ceilMonthExp.rangeLower(wholeMonthOdd.getTime())); + assertEquals(upperBoundaryWholeMonthOdd, ceilMonthExp.rangeUpper(wholeMonthOdd.getTime())); + assertEquals(wholeMonthOdd, new java.sql.Timestamp(ceilMonthExp.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthOdd, new java.sql.Timestamp(ceilMonthExp.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthOdd - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeMonthOdd, new java.sql.Timestamp(ceilMonthExp.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthOdd, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthOdd, new java.sql.Timestamp(ceilMonthExp.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthOdd + 1, GJChronology.getInstanceUTC())))); + + java.sql.Timestamp wholeMonthLeap = + new java.sql.Timestamp(DateUtil.parseDate("2024-03-1 0:0:0").getTime()); + // February is 29 days + long lowerBoundaryWholeMonthLeap = wholeMonthLeap.getTime() - 29 * DAY + 1; + long upperBoundaryWholeMonthLeap = wholeMonthLeap.getTime(); + assertEquals(lowerBoundaryWholeMonthLeap, ceilMonthExp.rangeLower(wholeMonthLeap.getTime())); + assertEquals(upperBoundaryWholeMonthLeap, ceilMonthExp.rangeUpper(wholeMonthLeap.getTime())); + assertEquals(wholeMonthLeap, new java.sql.Timestamp(ceilMonthExp.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthLeap, new java.sql.Timestamp(ceilMonthExp.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeMonthLeap - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeMonthLeap, new java.sql.Timestamp(ceilMonthExp.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthLeap, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeMonthLeap, new java.sql.Timestamp(ceilMonthExp.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeMonthLeap + 1, GJChronology.getInstanceUTC())))); + + CeilYearExpression ceilYearExp = new CeilYearExpression(); + java.sql.Timestamp wholeYearEven = + new java.sql.Timestamp(DateUtil.parseDate("2022-1-1 0:0:0").getTime()); + long lowerBoundaryWholeYearEven = wholeYearEven.getTime() - YEAR + 1; + long upperBoundaryWholeYearEven = wholeYearEven.getTime(); + assertEquals(lowerBoundaryWholeYearEven, ceilYearExp.rangeLower(wholeYearEven.getTime())); + assertEquals(upperBoundaryWholeYearEven, ceilYearExp.rangeUpper(wholeYearEven.getTime())); + assertEquals(wholeYearEven, new java.sql.Timestamp(ceilYearExp.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearEven, new java.sql.Timestamp(ceilYearExp.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearEven - 1, GJChronology.getInstanceUTC())))); + assertEquals(wholeYearEven, new java.sql.Timestamp(ceilYearExp.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearEven, new java.sql.Timestamp(ceilYearExp.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearEven + 1, GJChronology.getInstanceUTC())))); + + java.sql.Timestamp wholeYearLeapEven = + new java.sql.Timestamp(DateUtil.parseDate("2025-1-1 0:0:0").getTime()); + long lowerBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime() - (YEAR + DAY) + 1; + long upperBoundaryWholeYearLeapEven = wholeYearLeapEven.getTime(); + assertEquals(lowerBoundaryWholeYearLeapEven, + ceilYearExp.rangeLower(wholeYearLeapEven.getTime())); + assertEquals(upperBoundaryWholeYearLeapEven, + ceilYearExp.rangeUpper(wholeYearLeapEven.getTime())); + assertEquals(wholeYearLeapEven, new java.sql.Timestamp(ceilYearExp.roundDateTime( + new org.joda.time.DateTime(lowerBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearLeapEven, new java.sql.Timestamp( + ceilYearExp.roundDateTime(new org.joda.time.DateTime(lowerBoundaryWholeYearLeapEven - 1, + GJChronology.getInstanceUTC())))); + assertEquals(wholeYearLeapEven, new java.sql.Timestamp(ceilYearExp.roundDateTime( + new org.joda.time.DateTime(upperBoundaryWholeYearLeapEven, GJChronology.getInstanceUTC())))); + assertNotEquals(wholeYearLeapEven, new java.sql.Timestamp( + ceilYearExp.roundDateTime(new org.joda.time.DateTime(upperBoundaryWholeYearLeapEven + 1, + GJChronology.getInstanceUTC())))); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/SignFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/SignFunctionTest.java index cd94159cf73..3eb5eaf1207 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/SignFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/SignFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,9 +38,8 @@ import org.apache.phoenix.schema.types.PUnsignedDouble; import org.apache.phoenix.schema.types.PUnsignedFloat; import org.apache.phoenix.schema.types.PUnsignedInt; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * Unit tests for {@link SignFunction} @@ -48,78 +47,78 @@ */ public class SignFunctionTest { - private static void testExpression(LiteralExpression literal, Integer expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) literal); - Expression signFunction = new SignFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - signFunction.evaluate(null, ptr); - Integer result = - (Integer) signFunction.getDataType().toObject(ptr, signFunction.getSortOrder()); - assertTrue(result.compareTo(expected) == 0); - } - - private static void test(Number value, PNumericType dataType, int expected) throws SQLException { - LiteralExpression literal; - literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - testExpression(literal, expected); - literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - testExpression(literal, expected); - } - - private static void testBatch(Number[] value, PNumericType dataType, int[] expected) - throws SQLException { - assertEquals(value.length, expected.length); - for (int i = 0; i < value.length; ++i) { - test(value[i], dataType, expected[i]); - } + private static void testExpression(LiteralExpression literal, Integer expected) + throws SQLException { + List expressions = Lists.newArrayList((Expression) literal); + Expression signFunction = new SignFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + signFunction.evaluate(null, ptr); + Integer result = + (Integer) signFunction.getDataType().toObject(ptr, signFunction.getSortOrder()); + assertTrue(result.compareTo(expected) == 0); + } + + private static void test(Number value, PNumericType dataType, int expected) throws SQLException { + LiteralExpression literal; + literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + testExpression(literal, expected); + literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + testExpression(literal, expected); + } + + private static void testBatch(Number[] value, PNumericType dataType, int[] expected) + throws SQLException { + assertEquals(value.length, expected.length); + for (int i = 0; i < value.length; ++i) { + test(value[i], dataType, expected[i]); } + } - @Test - public void testSignFunction() throws Exception { - testBatch( - new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), - BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), - BigDecimal.valueOf(-123.1234) }, PDecimal.INSTANCE, - new int[] { 1, 0, -1, 1, -1 }); + @Test + public void testSignFunction() throws Exception { + testBatch( + new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), BigDecimal.valueOf(-1.0), + BigDecimal.valueOf(123.1234), BigDecimal.valueOf(-123.1234) }, + PDecimal.INSTANCE, new int[] { 1, 0, -1, 1, -1 }); - testBatch(new Float[] { 1.0f, 0.0f, -1.0f, Float.MAX_VALUE, Float.MIN_VALUE, - -Float.MAX_VALUE, -Float.MIN_VALUE, 123.1234f, -123.1234f }, PFloat.INSTANCE, - new int[] { 1, 0, -1, 1, 1, -1, -1, 1, -1 }); + testBatch( + new Float[] { 1.0f, 0.0f, -1.0f, Float.MAX_VALUE, Float.MIN_VALUE, -Float.MAX_VALUE, + -Float.MIN_VALUE, 123.1234f, -123.1234f }, + PFloat.INSTANCE, new int[] { 1, 0, -1, 1, 1, -1, -1, 1, -1 }); - testBatch(new Float[] { 1.0f, 0.0f, Float.MAX_VALUE, Float.MIN_VALUE, 123.1234f }, - PUnsignedFloat.INSTANCE, new int[] { 1, 0, 1, 1, 1 }); + testBatch(new Float[] { 1.0f, 0.0f, Float.MAX_VALUE, Float.MIN_VALUE, 123.1234f }, + PUnsignedFloat.INSTANCE, new int[] { 1, 0, 1, 1, 1 }); - testBatch(new Double[] { 1.0, 0.0, -1.0, Double.MAX_VALUE, Double.MIN_VALUE, - -Double.MAX_VALUE, -Double.MIN_VALUE, 123.1234, -123.1234 }, PDouble.INSTANCE, - new int[] { 1, 0, -1, 1, 1, -1, -1, 1, -1 }); + testBatch( + new Double[] { 1.0, 0.0, -1.0, Double.MAX_VALUE, Double.MIN_VALUE, -Double.MAX_VALUE, + -Double.MIN_VALUE, 123.1234, -123.1234 }, + PDouble.INSTANCE, new int[] { 1, 0, -1, 1, 1, -1, -1, 1, -1 }); - testBatch(new Double[] { 1.0, 0.0, Double.MAX_VALUE, Double.MIN_VALUE, 123.1234 }, - PUnsignedDouble.INSTANCE, new int[] { 1, 0, 1, 1, 1 }); + testBatch(new Double[] { 1.0, 0.0, Double.MAX_VALUE, Double.MIN_VALUE, 123.1234 }, + PUnsignedDouble.INSTANCE, new int[] { 1, 0, 1, 1, 1 }); - testBatch(new Long[] { (long) 1, (long) 0, (long) -1, Long.MAX_VALUE, Long.MIN_VALUE, - (long) 123, (long) -123 }, PLong.INSTANCE, new int[] { 1, 0, -1, 1, -1, 1, -1 }); + testBatch(new Long[] { (long) 1, (long) 0, (long) -1, Long.MAX_VALUE, Long.MIN_VALUE, + (long) 123, (long) -123 }, PLong.INSTANCE, new int[] { 1, 0, -1, 1, -1, 1, -1 }); - testBatch(new Long[] { (long) 1, (long) 0, Long.MAX_VALUE, (long) 123 }, PLong.INSTANCE, - new int[] { 1, 0, 1, 1 }); + testBatch(new Long[] { (long) 1, (long) 0, Long.MAX_VALUE, (long) 123 }, PLong.INSTANCE, + new int[] { 1, 0, 1, 1 }); - testBatch(new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123 }, - PInteger.INSTANCE, new int[] { 1, 0, -1, 1, -1, 1, -1 }); + testBatch(new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123 }, + PInteger.INSTANCE, new int[] { 1, 0, -1, 1, -1, 1, -1 }); - testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE, new int[] { - 1, 0, 1, 1 }); + testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE, + new int[] { 1, 0, 1, 1 }); - testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, - (short) 123, (short) -123 }, PSmallint.INSTANCE, - new int[] { 1, 0, -1, 1, -1, 1, -1 }); + testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, + (short) 123, (short) -123 }, PSmallint.INSTANCE, new int[] { 1, 0, -1, 1, -1, 1, -1 }); - testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, - PSmallint.INSTANCE, new int[] { 1, 0, 1, 1 }); + testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, + PSmallint.INSTANCE, new int[] { 1, 0, 1, 1 }); - testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, - (byte) 123, (byte) -123 }, PTinyint.INSTANCE, new int[] { 1, 0, -1, 1, -1, 1, -1 }); + testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, + (byte) 123, (byte) -123 }, PTinyint.INSTANCE, new int[] { 1, 0, -1, 1, -1, 1, -1 }); - testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE, - new int[] { 1, 0, 1, 1 }); - } + testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE, + new int[] { 1, 0, 1, 1 }); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java index 68f6c1f26ad..743637b79f2 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/SortOrderExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -63,297 +63,325 @@ import org.apache.phoenix.schema.types.PUnsignedInt; import org.apache.phoenix.schema.types.PUnsignedLong; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.DateUtil; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * @since 1.2 */ public class SortOrderExpressionTest { - - @Test - public void substr() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE), getLiteral(3), getLiteral(2)); - evaluateAndAssertResult(new SubstrFunction(args), "ah"); - } - - @Test - public void regexpSubstr() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE), getLiteral("l.h"), getLiteral(2)); - evaluateAndAssertResult(new StringBasedRegexpSubstrFunction(args), "lah"); - evaluateAndAssertResult(new ByteBasedRegexpSubstrFunction(args), "lah"); - } - - @Test - public void regexpReplace() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE), getLiteral("l.h"), getLiteral("foo")); - evaluateAndAssertResult(new ByteBasedRegexpReplaceFunction(args), "bfoo"); - evaluateAndAssertResult(new StringBasedRegexpReplaceFunction(args), "bfoo"); - } - - @Test - public void ltrim() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral(" blah", PChar.INSTANCE)); - evaluateAndAssertResult(new LTrimFunction(args), "blah"); - } - - @Test - public void substrLtrim() throws Exception { - List ltrimArgs = Lists.newArrayList(getInvertedLiteral(" blah", PChar.INSTANCE)); - Expression ltrim = new LTrimFunction(ltrimArgs); - List substrArgs = Lists.newArrayList(ltrim, getLiteral(3), getLiteral(2)); - evaluateAndAssertResult(new SubstrFunction(substrArgs), "ah"); - } - - @Test - public void rtrim() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("blah ", PChar.INSTANCE)); - evaluateAndAssertResult(new RTrimFunction(args), "blah"); - } - - @Test - public void lower() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("BLAH", PChar.INSTANCE)); - evaluateAndAssertResult(new LowerFunction(args), "blah"); - } - - @Test - public void upper() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE)); - evaluateAndAssertResult(new UpperFunction(args), "BLAH"); - } - - @Test - public void length() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE)); - evaluateAndAssertResult(new LengthFunction(args), 4); - } - - @Test - public void round() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral(date(12, 11, 2001), PDate.INSTANCE), getLiteral("hour"), getLiteral(1)); - evaluateAndAssertResult(RoundDateExpression.create(args), date(12, 11, 2001)); - } - - @Test - public void sqlTypeName() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral(12, PInteger.INSTANCE)); - evaluateAndAssertResult(new SqlTypeNameFunction(args), "VARCHAR"); - } - - @Test - public void toChar() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral(date(12, 11, 2001), PDate.INSTANCE)); - // We may get AM or am depending on Java version, see JDK-8211985 - // This is just a hack to accept any case, without completely rewriting the test logic - Object caseInsensitiveExpected = new Object() { - @Override - public boolean equals(Object other) { - return (other instanceof String) && "12/11/01 12:00 AM".equalsIgnoreCase((String) other); - } - }; - evaluateAndAssertResult(new ToCharFunction(args, FunctionArgumentType.TEMPORAL, "", DateUtil.getDateFormatter("MM/dd/yy hh:mm a")), - caseInsensitiveExpected); - } - - @Test - public void toDate() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("2001-11-30 00:00:00:0", PVarchar.INSTANCE)); - evaluateAndAssertResult(new ToDateFunction(args, "yyyy-MM-dd HH:mm:ss:S",DateUtil.DEFAULT_TIME_ZONE_ID), date(11, 30, 2001)); - } - - @Test - public void toNumber() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("10", PVarchar.INSTANCE)); - evaluateAndAssertResult(new ToNumberFunction(args, FunctionArgumentType.CHAR, "", null), new BigDecimal(BigInteger.valueOf(1), -1)); - } - - @Test - public void trim() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral(" blah ", PChar.INSTANCE)); - evaluateAndAssertResult(new TrimFunction(args), "blah"); - } - - @Test - public void lpad() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("ABCD", PChar.INSTANCE), getLiteral(7), getLiteral("12")); - evaluateAndAssertResult(new LpadFunction(args), "121ABCD"); - } - - @Test - public void add() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DecimalAddExpression(args), BigDecimal.valueOf(12)); - - args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new LongAddExpression(args), 12l); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PFloat.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleAddExpression(args), 12.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedFloat.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleAddExpression(args), 12.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedDouble.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleAddExpression(args), 12.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PDouble.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleAddExpression(args), 12.0); - } - @Test - public void subtract() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DecimalSubtractExpression(args), BigDecimal.valueOf(8)); - - args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new LongSubtractExpression(args), 8l); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PFloat.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleSubtractExpression(args), 8.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedFloat.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleSubtractExpression(args), 8.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedDouble.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleSubtractExpression(args), 8.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PDouble.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleSubtractExpression(args), 8.0); - } - - @Test - public void divide() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DecimalDivideExpression(args), BigDecimal.valueOf(5)); - - args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new LongDivideExpression(args), 5l); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PFloat.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleDivideExpression(args), 5.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedFloat.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleDivideExpression(args), 5.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedDouble.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleDivideExpression(args), 5.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PDouble.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleDivideExpression(args), 5.0); - } - - @Test - public void multiply() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DecimalMultiplyExpression(args), new BigDecimal(BigInteger.valueOf(2), -1)); - - args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new LongMultiplyExpression(args), 20l); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PFloat.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleMultiplyExpression(args), 20.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedFloat.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleMultiplyExpression(args), 20.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedDouble.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleMultiplyExpression(args), 20.0); - - args = Lists.newArrayList(getInvertedLiteral(10.0, PDouble.INSTANCE), getLiteral(2)); - evaluateAndAssertResult(new DoubleMultiplyExpression(args), 20.0); - } - - @Test - public void compareNumbers() throws Exception { - PDataType[] numberDataTypes = new PDataType[]{ PInteger.INSTANCE, PLong.INSTANCE, PDecimal.INSTANCE, PUnsignedInt.INSTANCE, PUnsignedLong.INSTANCE}; - for (PDataType lhsDataType : numberDataTypes) { - for (PDataType rhsDataType : numberDataTypes) { - runCompareTest(CompareOperator.GREATER, true, 10, lhsDataType, 2, rhsDataType); - } - } - } - - @Test - public void compareCharacters() throws Exception { - PDataType[] textDataTypes = new PDataType[]{ PChar.INSTANCE, PVarchar.INSTANCE}; - for (PDataType lhsDataType : textDataTypes) { - for (PDataType rhsDataType : textDataTypes) { - runCompareTest(CompareOperator.GREATER, true, "xxx", lhsDataType, "bbb", rhsDataType); - } - } - } - - @Test - public void compareBooleans() throws Exception { - runCompareTest(CompareOperator.GREATER, true, true, PBoolean.INSTANCE, false, PBoolean.INSTANCE); - } - - @Test - public void stringConcat() throws Exception { - List args = Lists.newArrayList(getInvertedLiteral("blah", PVarchar.INSTANCE), getInvertedLiteral("foo", PVarchar.INSTANCE)); - evaluateAndAssertResult(new StringConcatExpression(args), "blahfoo"); - - args = Lists.newArrayList(getInvertedLiteral("blah", PVarchar.INSTANCE), getInvertedLiteral(10, PInteger.INSTANCE)); - evaluateAndAssertResult(new StringConcatExpression(args), "blah10"); - } - - private void runCompareTest(CompareOperator op, boolean expectedResult, Object lhsValue, PDataType lhsDataType, Object rhsValue, PDataType rhsDataType) throws Exception { - List args; - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - - args = Lists.newArrayList(getLiteral(lhsValue, lhsDataType), getLiteral(rhsValue, rhsDataType)); - evaluateAndAssertResult(ComparisonExpression.create(op, args, ptr, true), expectedResult, "lhsDataType: " + lhsDataType + " rhsDataType: " + rhsDataType); - - args = Lists.newArrayList(getInvertedLiteral(lhsValue, lhsDataType), getLiteral(rhsValue, rhsDataType)); - evaluateAndAssertResult(ComparisonExpression.create(op, args, ptr, true), expectedResult, "lhs (inverted) dataType: " + lhsDataType + " rhsDataType: " + rhsDataType); - - args = Lists.newArrayList(getLiteral(lhsValue, lhsDataType), getInvertedLiteral(rhsValue, rhsDataType)); - evaluateAndAssertResult(ComparisonExpression.create(op, args, ptr, true), expectedResult, "lhsDataType: " + lhsDataType + " rhs (inverted) dataType: " + rhsDataType); - - args = Lists.newArrayList(getInvertedLiteral(lhsValue, lhsDataType), getInvertedLiteral(rhsValue, rhsDataType)); - evaluateAndAssertResult(ComparisonExpression.create(op, args, ptr, true), expectedResult, "lhs (inverted) dataType: " + lhsDataType + " rhs (inverted) dataType: " + rhsDataType); - } - - private void evaluateAndAssertResult(Expression expression, Object expectedResult) { - evaluateAndAssertResult(expression, expectedResult, null); - } - - private void evaluateAndAssertResult(Expression expression, Object expectedResult, String context) { - context = context == null ? "" : context; - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - assertTrue(expression.evaluate(null, ptr)); - PDataType dataType = expression.getDataType(); - SortOrder sortOrder = expression.getSortOrder(); - Object result = dataType.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), dataType, sortOrder); - assertEquals(context, expectedResult, result); - } - - private Expression getLiteral(Object value) throws Exception { - return LiteralExpression.newConstant(value); + @Test + public void substr() throws Exception { + List args = + Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE), getLiteral(3), getLiteral(2)); + evaluateAndAssertResult(new SubstrFunction(args), "ah"); + } + + @Test + public void regexpSubstr() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE), + getLiteral("l.h"), getLiteral(2)); + evaluateAndAssertResult(new StringBasedRegexpSubstrFunction(args), "lah"); + evaluateAndAssertResult(new ByteBasedRegexpSubstrFunction(args), "lah"); + } + + @Test + public void regexpReplace() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE), + getLiteral("l.h"), getLiteral("foo")); + evaluateAndAssertResult(new ByteBasedRegexpReplaceFunction(args), "bfoo"); + evaluateAndAssertResult(new StringBasedRegexpReplaceFunction(args), "bfoo"); + } + + @Test + public void ltrim() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral(" blah", PChar.INSTANCE)); + evaluateAndAssertResult(new LTrimFunction(args), "blah"); + } + + @Test + public void substrLtrim() throws Exception { + List ltrimArgs = Lists.newArrayList(getInvertedLiteral(" blah", PChar.INSTANCE)); + Expression ltrim = new LTrimFunction(ltrimArgs); + List substrArgs = Lists.newArrayList(ltrim, getLiteral(3), getLiteral(2)); + evaluateAndAssertResult(new SubstrFunction(substrArgs), "ah"); + } + + @Test + public void rtrim() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral("blah ", PChar.INSTANCE)); + evaluateAndAssertResult(new RTrimFunction(args), "blah"); + } + + @Test + public void lower() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral("BLAH", PChar.INSTANCE)); + evaluateAndAssertResult(new LowerFunction(args), "blah"); + } + + @Test + public void upper() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE)); + evaluateAndAssertResult(new UpperFunction(args), "BLAH"); + } + + @Test + public void length() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral("blah", PChar.INSTANCE)); + evaluateAndAssertResult(new LengthFunction(args), 4); + } + + @Test + public void round() throws Exception { + List args = Lists.newArrayList( + getInvertedLiteral(date(12, 11, 2001), PDate.INSTANCE), getLiteral("hour"), getLiteral(1)); + evaluateAndAssertResult(RoundDateExpression.create(args), date(12, 11, 2001)); + } + + @Test + public void sqlTypeName() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral(12, PInteger.INSTANCE)); + evaluateAndAssertResult(new SqlTypeNameFunction(args), "VARCHAR"); + } + + @Test + public void toChar() throws Exception { + List args = + Lists.newArrayList(getInvertedLiteral(date(12, 11, 2001), PDate.INSTANCE)); + // We may get AM or am depending on Java version, see JDK-8211985 + // This is just a hack to accept any case, without completely rewriting the test logic + Object caseInsensitiveExpected = new Object() { + @Override + public boolean equals(Object other) { + return (other instanceof String) && "12/11/01 12:00 AM".equalsIgnoreCase((String) other); + } + }; + evaluateAndAssertResult(new ToCharFunction(args, FunctionArgumentType.TEMPORAL, "", + DateUtil.getDateFormatter("MM/dd/yy hh:mm a")), caseInsensitiveExpected); + } + + @Test + public void toDate() throws Exception { + List args = + Lists.newArrayList(getInvertedLiteral("2001-11-30 00:00:00:0", PVarchar.INSTANCE)); + evaluateAndAssertResult( + new ToDateFunction(args, "yyyy-MM-dd HH:mm:ss:S", DateUtil.DEFAULT_TIME_ZONE_ID), + date(11, 30, 2001)); + } + + @Test + public void toNumber() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral("10", PVarchar.INSTANCE)); + evaluateAndAssertResult(new ToNumberFunction(args, FunctionArgumentType.CHAR, "", null), + new BigDecimal(BigInteger.valueOf(1), -1)); + } + + @Test + public void trim() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral(" blah ", PChar.INSTANCE)); + evaluateAndAssertResult(new TrimFunction(args), "blah"); + } + + @Test + public void lpad() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral("ABCD", PChar.INSTANCE), + getLiteral(7), getLiteral("12")); + evaluateAndAssertResult(new LpadFunction(args), "121ABCD"); + } + + @Test + public void add() throws Exception { + List args = + Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DecimalAddExpression(args), BigDecimal.valueOf(12)); + + args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new LongAddExpression(args), 12l); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PFloat.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleAddExpression(args), 12.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedFloat.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleAddExpression(args), 12.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedDouble.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleAddExpression(args), 12.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PDouble.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleAddExpression(args), 12.0); + } + + @Test + public void subtract() throws Exception { + List args = + Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DecimalSubtractExpression(args), BigDecimal.valueOf(8)); + + args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new LongSubtractExpression(args), 8l); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PFloat.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleSubtractExpression(args), 8.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedFloat.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleSubtractExpression(args), 8.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedDouble.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleSubtractExpression(args), 8.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PDouble.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleSubtractExpression(args), 8.0); + } + + @Test + public void divide() throws Exception { + List args = + Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DecimalDivideExpression(args), BigDecimal.valueOf(5)); + + args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new LongDivideExpression(args), 5l); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PFloat.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleDivideExpression(args), 5.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedFloat.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleDivideExpression(args), 5.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedDouble.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleDivideExpression(args), 5.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PDouble.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleDivideExpression(args), 5.0); + } + + @Test + public void multiply() throws Exception { + List args = + Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DecimalMultiplyExpression(args), + new BigDecimal(BigInteger.valueOf(2), -1)); + + args = Lists.newArrayList(getInvertedLiteral(10, PInteger.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new LongMultiplyExpression(args), 20l); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PFloat.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleMultiplyExpression(args), 20.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedFloat.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleMultiplyExpression(args), 20.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PUnsignedDouble.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleMultiplyExpression(args), 20.0); + + args = Lists.newArrayList(getInvertedLiteral(10.0, PDouble.INSTANCE), getLiteral(2)); + evaluateAndAssertResult(new DoubleMultiplyExpression(args), 20.0); + } + + @Test + public void compareNumbers() throws Exception { + PDataType[] numberDataTypes = new PDataType[] { PInteger.INSTANCE, PLong.INSTANCE, + PDecimal.INSTANCE, PUnsignedInt.INSTANCE, PUnsignedLong.INSTANCE }; + for (PDataType lhsDataType : numberDataTypes) { + for (PDataType rhsDataType : numberDataTypes) { + runCompareTest(CompareOperator.GREATER, true, 10, lhsDataType, 2, rhsDataType); + } } - - private Expression getLiteral(Object value, PDataType dataType) throws Exception { - return LiteralExpression.newConstant(value, dataType); - } - - private Expression getInvertedLiteral(Object literal, PDataType dataType) throws Exception { - return LiteralExpression.newConstant(literal, dataType, SortOrder.DESC); + } + + @Test + public void compareCharacters() throws Exception { + PDataType[] textDataTypes = new PDataType[] { PChar.INSTANCE, PVarchar.INSTANCE }; + for (PDataType lhsDataType : textDataTypes) { + for (PDataType rhsDataType : textDataTypes) { + runCompareTest(CompareOperator.GREATER, true, "xxx", lhsDataType, "bbb", rhsDataType); + } } - - private static Date date(int month, int day, int year) { - Calendar cal = new GregorianCalendar(); - cal.set(Calendar.MONTH, month-1); - cal.set(Calendar.DAY_OF_MONTH, day); - cal.set(Calendar.YEAR, year); - cal.set(Calendar.HOUR_OF_DAY, 0); - cal.set(Calendar.MINUTE, 0); - cal.set(Calendar.SECOND, 0); - cal.set(Calendar.MILLISECOND, 0); - cal.setTimeZone(TimeZone.getTimeZone(DateUtil.DEFAULT_TIME_ZONE_ID)); - Date d = new Date(cal.getTimeInMillis()); - return d; - } + } + + @Test + public void compareBooleans() throws Exception { + runCompareTest(CompareOperator.GREATER, true, true, PBoolean.INSTANCE, false, + PBoolean.INSTANCE); + } + + @Test + public void stringConcat() throws Exception { + List args = Lists.newArrayList(getInvertedLiteral("blah", PVarchar.INSTANCE), + getInvertedLiteral("foo", PVarchar.INSTANCE)); + evaluateAndAssertResult(new StringConcatExpression(args), "blahfoo"); + + args = Lists.newArrayList(getInvertedLiteral("blah", PVarchar.INSTANCE), + getInvertedLiteral(10, PInteger.INSTANCE)); + evaluateAndAssertResult(new StringConcatExpression(args), "blah10"); + } + + private void runCompareTest(CompareOperator op, boolean expectedResult, Object lhsValue, + PDataType lhsDataType, Object rhsValue, PDataType rhsDataType) throws Exception { + List args; + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + + args = Lists.newArrayList(getLiteral(lhsValue, lhsDataType), getLiteral(rhsValue, rhsDataType)); + evaluateAndAssertResult(ComparisonExpression.create(op, args, ptr, true), expectedResult, + "lhsDataType: " + lhsDataType + " rhsDataType: " + rhsDataType); + + args = Lists.newArrayList(getInvertedLiteral(lhsValue, lhsDataType), + getLiteral(rhsValue, rhsDataType)); + evaluateAndAssertResult(ComparisonExpression.create(op, args, ptr, true), expectedResult, + "lhs (inverted) dataType: " + lhsDataType + " rhsDataType: " + rhsDataType); + + args = Lists.newArrayList(getLiteral(lhsValue, lhsDataType), + getInvertedLiteral(rhsValue, rhsDataType)); + evaluateAndAssertResult(ComparisonExpression.create(op, args, ptr, true), expectedResult, + "lhsDataType: " + lhsDataType + " rhs (inverted) dataType: " + rhsDataType); + + args = Lists.newArrayList(getInvertedLiteral(lhsValue, lhsDataType), + getInvertedLiteral(rhsValue, rhsDataType)); + evaluateAndAssertResult(ComparisonExpression.create(op, args, ptr, true), expectedResult, + "lhs (inverted) dataType: " + lhsDataType + " rhs (inverted) dataType: " + rhsDataType); + } + + private void evaluateAndAssertResult(Expression expression, Object expectedResult) { + evaluateAndAssertResult(expression, expectedResult, null); + } + + private void evaluateAndAssertResult(Expression expression, Object expectedResult, + String context) { + context = context == null ? "" : context; + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + assertTrue(expression.evaluate(null, ptr)); + PDataType dataType = expression.getDataType(); + SortOrder sortOrder = expression.getSortOrder(); + Object result = + dataType.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), dataType, sortOrder); + assertEquals(context, expectedResult, result); + } + + private Expression getLiteral(Object value) throws Exception { + return LiteralExpression.newConstant(value); + } + + private Expression getLiteral(Object value, PDataType dataType) throws Exception { + return LiteralExpression.newConstant(value, dataType); + } + + private Expression getInvertedLiteral(Object literal, PDataType dataType) throws Exception { + return LiteralExpression.newConstant(literal, dataType, SortOrder.DESC); + } + + private static Date date(int month, int day, int year) { + Calendar cal = new GregorianCalendar(); + cal.set(Calendar.MONTH, month - 1); + cal.set(Calendar.DAY_OF_MONTH, day); + cal.set(Calendar.YEAR, year); + cal.set(Calendar.HOUR_OF_DAY, 0); + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); + cal.setTimeZone(TimeZone.getTimeZone(DateUtil.DEFAULT_TIME_ZONE_ID)); + Date d = new Date(cal.getTimeInMillis()); + return d; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/SqrtFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/SqrtFunctionTest.java index 997dfe43918..c3ef61cc57e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/SqrtFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/SqrtFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,94 +41,90 @@ import org.apache.phoenix.schema.types.PUnsignedFloat; import org.apache.phoenix.schema.types.PUnsignedInt; import org.apache.phoenix.schema.types.PUnsignedLong; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * Unit tests for {@link SqrtFunction} */ public class SqrtFunctionTest { - private static boolean testExpression(LiteralExpression literal, double expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) literal); - Expression sqrtFunction = new SqrtFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean ret = sqrtFunction.evaluate(null, ptr); - if (ret) { - Double result = - (Double) sqrtFunction.getDataType().toObject(ptr, sqrtFunction.getSortOrder()); - assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), expected)); - } - return ret; + private static boolean testExpression(LiteralExpression literal, double expected) + throws SQLException { + List expressions = Lists.newArrayList((Expression) literal); + Expression sqrtFunction = new SqrtFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean ret = sqrtFunction.evaluate(null, ptr); + if (ret) { + Double result = + (Double) sqrtFunction.getDataType().toObject(ptr, sqrtFunction.getSortOrder()); + assertTrue(BaseTest.twoDoubleEquals(result.doubleValue(), expected)); } - - private static void test(Number value, PNumericType dataType, double expected) - throws SQLException { - LiteralExpression literal; - literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); - boolean ret1 = testExpression(literal, expected); - literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); - boolean ret2 = testExpression(literal, expected); - assertEquals(ret1, ret2); + return ret; + } + + private static void test(Number value, PNumericType dataType, double expected) + throws SQLException { + LiteralExpression literal; + literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC); + boolean ret1 = testExpression(literal, expected); + literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC); + boolean ret2 = testExpression(literal, expected); + assertEquals(ret1, ret2); + } + + private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { + double[] expected = new double[value.length]; + for (int i = 0; i < expected.length; ++i) { + expected[i] = Math.sqrt(value[i].doubleValue()); } - - private static void testBatch(Number[] value, PNumericType dataType) throws SQLException { - double[] expected = new double[value.length]; - for (int i = 0; i < expected.length; ++i) { - expected[i] = Math.sqrt(value[i].doubleValue()); - } - assertEquals(value.length, expected.length); - for (int i = 0; i < value.length; ++i) { - test(value[i], dataType, expected[i]); - } + assertEquals(value.length, expected.length); + for (int i = 0; i < value.length; ++i) { + test(value[i], dataType, expected[i]); } + } - @Test - public void testSqrtFunction() throws Exception { - Random random = new Random(); + @Test + public void testSqrtFunction() throws Exception { + Random random = new Random(); - testBatch( - new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), - BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234), - BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()), - BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE); + testBatch( + new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0), BigDecimal.valueOf(-1.0), + BigDecimal.valueOf(123.1234), BigDecimal.valueOf(-123.1234), + BigDecimal.valueOf(random.nextDouble()), BigDecimal.valueOf(random.nextDouble()) }, + PDecimal.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), - random.nextFloat() }, PFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), + random.nextFloat() }, PFloat.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), - random.nextFloat() }, PFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(), + random.nextFloat() }, PFloat.INSTANCE); - testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); + testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE); - testBatch( - new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), - random.nextDouble() }, PDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(), + random.nextDouble() }, PDouble.INSTANCE); - testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); + testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE); - testBatch( - new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, - random.nextLong(), random.nextLong() }, PLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L, + random.nextLong(), random.nextLong() }, PLong.INSTANCE); - testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); + testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE); - testBatch( - new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, - random.nextInt(), random.nextInt() }, PInteger.INSTANCE); + testBatch(new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123, + random.nextInt(), random.nextInt() }, PInteger.INSTANCE); - testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); + testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, - (short) 123, (short) -123 }, PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE, + (short) 123, (short) -123 }, PSmallint.INSTANCE); - testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, - PSmallint.INSTANCE); + testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 }, + PSmallint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, - (byte) 123, (byte) -123 }, PTinyint.INSTANCE); + testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE, + (byte) 123, (byte) -123 }, PTinyint.INSTANCE); - testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); - } + testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/StringToArrayFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/StringToArrayFunctionTest.java index 99fa292bb77..0db90f4dc78 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/StringToArrayFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/StringToArrayFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,250 +26,297 @@ import org.apache.phoenix.expression.function.StringToArrayFunction; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.*; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class StringToArrayFunctionTest { - private static void testExpression(LiteralExpression array, LiteralExpression delimiter, LiteralExpression nullString, PhoenixArray expected) - throws SQLException { - List expressions = Lists.newArrayList((Expression) array); - expressions.add(delimiter); - expressions.add(nullString); + private static void testExpression(LiteralExpression array, LiteralExpression delimiter, + LiteralExpression nullString, PhoenixArray expected) throws SQLException { + List expressions = Lists.newArrayList((Expression) array); + expressions.add(delimiter); + expressions.add(nullString); - Expression stringToArrayFunction = new StringToArrayFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - stringToArrayFunction.evaluate(null, ptr); - PhoenixArray result = (PhoenixArray) stringToArrayFunction.getDataType().toObject(ptr, stringToArrayFunction.getSortOrder(), stringToArrayFunction.getMaxLength(), stringToArrayFunction.getScale()); - assertEquals(expected, result); - } + Expression stringToArrayFunction = new StringToArrayFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + stringToArrayFunction.evaluate(null, ptr); + PhoenixArray result = (PhoenixArray) stringToArrayFunction.getDataType().toObject(ptr, + stringToArrayFunction.getSortOrder(), stringToArrayFunction.getMaxLength(), + stringToArrayFunction.getScale()); + assertEquals(expected, result); + } - private static void test(String string, String delimiter, String nullString, PhoenixArray expected, SortOrder stringSortOrder, SortOrder delimiterSortOrder, SortOrder nullStringSortOrder, PDataType stringType, PDataType delimiterType, PDataType nullStringType) throws SQLException { - LiteralExpression arrayLiteral, delimiterLiteral, nullStringLiteral; - arrayLiteral = LiteralExpression.newConstant(string, stringType, null, null, stringSortOrder, Determinism.ALWAYS); - delimiterLiteral = LiteralExpression.newConstant(delimiter, delimiterType, null, null, delimiterSortOrder, Determinism.ALWAYS); - nullStringLiteral = LiteralExpression.newConstant(nullString, nullStringType, null, null, nullStringSortOrder, Determinism.ALWAYS); - testExpression(arrayLiteral, delimiterLiteral, nullStringLiteral, expected); - } + private static void test(String string, String delimiter, String nullString, + PhoenixArray expected, SortOrder stringSortOrder, SortOrder delimiterSortOrder, + SortOrder nullStringSortOrder, PDataType stringType, PDataType delimiterType, + PDataType nullStringType) throws SQLException { + LiteralExpression arrayLiteral, delimiterLiteral, nullStringLiteral; + arrayLiteral = LiteralExpression.newConstant(string, stringType, null, null, stringSortOrder, + Determinism.ALWAYS); + delimiterLiteral = LiteralExpression.newConstant(delimiter, delimiterType, null, null, + delimiterSortOrder, Determinism.ALWAYS); + nullStringLiteral = LiteralExpression.newConstant(nullString, nullStringType, null, null, + nullStringSortOrder, Determinism.ALWAYS); + testExpression(arrayLiteral, delimiterLiteral, nullStringLiteral, expected); + } - @Test - public void testStringToArrayFunction1() throws SQLException { - String string = "1,2,3,4,5"; - Object[] o1 = new Object[]{"1", "2", "3", "4", "5"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = ","; - String nullString = "*"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction1() throws SQLException { + String string = "1,2,3,4,5"; + Object[] o1 = new Object[] { "1", "2", "3", "4", "5" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = ","; + String nullString = "*"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction2() throws SQLException { - String string = "1,2,3,4,5"; - Object[] o1 = new Object[]{"1", "2", "3", "4", "5"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = ","; - String nullString = ""; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction2() throws SQLException { + String string = "1,2,3,4,5"; + Object[] o1 = new Object[] { "1", "2", "3", "4", "5" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = ","; + String nullString = ""; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction3() throws SQLException { - String string = "1234"; - Object[] o1 = new Object[]{"1", "2", "3", "4"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = null; - String nullString = ""; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction3() throws SQLException { + String string = "1234"; + Object[] o1 = new Object[] { "1", "2", "3", "4" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = null; + String nullString = ""; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction4() throws SQLException { - String string = "1"; - Object[] o1 = new Object[]{"1"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = ","; - String nullString = ""; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction4() throws SQLException { + String string = "1"; + Object[] o1 = new Object[] { "1" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = ","; + String nullString = ""; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction5() throws SQLException { - String string = "hello, hello, hello"; - Object[] o1 = new Object[]{"hello", "hello", "hello"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = ", "; - String nullString = ""; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction5() throws SQLException { + String string = "hello, hello, hello"; + Object[] o1 = new Object[] { "hello", "hello", "hello" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = ", "; + String nullString = ""; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction6() throws SQLException { - String string = "1.2...2.3...5.6"; - Object[] o1 = new Object[]{"1.2", "2.3", "5.6"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = "..."; - String nullString = ""; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction6() throws SQLException { + String string = "1.2...2.3...5.6"; + Object[] o1 = new Object[] { "1.2", "2.3", "5.6" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = "..."; + String nullString = ""; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction7() throws SQLException { - String string = "a\\b\\c\\d\\e\\f"; - Object[] o1 = new Object[]{"a", "b", "c", "d", "e", "f"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = "\\"; - String nullString = ""; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction7() throws SQLException { + String string = "a\\b\\c\\d\\e\\f"; + Object[] o1 = new Object[] { "a", "b", "c", "d", "e", "f" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = "\\"; + String nullString = ""; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction8() throws SQLException { - String string = "a-b-c-d-e-f-"; - Object[] o1 = new Object[]{"a", "b", "c", "d", "e", "f"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = "-"; - String nullString = ""; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction8() throws SQLException { + String string = "a-b-c-d-e-f-"; + Object[] o1 = new Object[] { "a", "b", "c", "d", "e", "f" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = "-"; + String nullString = ""; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction9() throws SQLException { - String string = "a b c d e f"; - Object[] o1 = new Object[]{"a", "b", "c", "d", "e", "f"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = " "; - String nullString = ""; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction9() throws SQLException { + String string = "a b c d e f"; + Object[] o1 = new Object[] { "a", "b", "c", "d", "e", "f" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = " "; + String nullString = ""; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction10() throws SQLException { - String string = "axbxcxdxexf"; - Object[] o1 = new Object[]{"a", "b", "c", "d", "e", "f"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = "x"; - String nullString = ""; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction10() throws SQLException { + String string = "axbxcxdxexf"; + Object[] o1 = new Object[] { "a", "b", "c", "d", "e", "f" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = "x"; + String nullString = ""; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction11() throws SQLException { - String string = "axbxcxdxexfx*"; - Object[] o1 = new Object[]{"a", "b", "c", "d", "e", "f", null}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = "x"; - String nullString = "*"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction11() throws SQLException { + String string = "axbxcxdxexfx*"; + Object[] o1 = new Object[] { "a", "b", "c", "d", "e", "f", null }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = "x"; + String nullString = "*"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction12() throws SQLException { - String string = "* a b c d e f"; - Object[] o1 = new Object[]{null, "a", "b", "c", "d", "e", "f"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = " "; - String nullString = "*"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction12() throws SQLException { + String string = "* a b c d e f"; + Object[] o1 = new Object[] { null, "a", "b", "c", "d", "e", "f" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = " "; + String nullString = "*"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction13() throws SQLException { - String string = "a * b * c d e f"; - Object[] o1 = new Object[]{"a", null, "b", null, "c", "d", "e", "f"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = " "; - String nullString = "*"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction13() throws SQLException { + String string = "a * b * c d e f"; + Object[] o1 = new Object[] { "a", null, "b", null, "c", "d", "e", "f" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = " "; + String nullString = "*"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction14() throws SQLException { - String string = "null a null"; - Object[] o1 = new Object[]{null, "a", null}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = " "; - String nullString = "null"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction14() throws SQLException { + String string = "null a null"; + Object[] o1 = new Object[] { null, "a", null }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = " "; + String nullString = "null"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction16() throws SQLException { - String string = "null a null"; - Object[] o1 = new Object[]{null, "a", null}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = " "; - String nullString = "null"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction16() throws SQLException { + String string = "null a null"; + Object[] o1 = new Object[] { null, "a", null }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = " "; + String nullString = "null"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction17() throws SQLException { - String string = "null a null"; - Object[] o1 = new Object[]{null, "a", null}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = " "; - String nullString = "null"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PChar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PChar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction17() throws SQLException { + String string = "null a null"; + Object[] o1 = new Object[] { null, "a", null }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = " "; + String nullString = "null"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PChar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PChar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction18() throws SQLException { - String string = "null,a,null"; - Object[] o1 = new Object[]{null, "a", null}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = ","; - String nullString = "null"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PChar.INSTANCE, PChar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PChar.INSTANCE, PChar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction18() throws SQLException { + String string = "null,a,null"; + Object[] o1 = new Object[] { null, "a", null }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = ","; + String nullString = "null"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PChar.INSTANCE, PChar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PChar.INSTANCE, PChar.INSTANCE, PVarchar.INSTANCE); + } - @Test - public void testStringToArrayFunction19() throws SQLException { - String string = "null,a,null"; - Object[] o1 = new Object[]{null, "a", null}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = ","; - String nullString = "null"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PChar.INSTANCE, PChar.INSTANCE, PChar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PChar.INSTANCE, PChar.INSTANCE, PChar.INSTANCE); - } + @Test + public void testStringToArrayFunction19() throws SQLException { + String string = "null,a,null"; + Object[] o1 = new Object[] { null, "a", null }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = ","; + String nullString = "null"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PChar.INSTANCE, PChar.INSTANCE, PChar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PChar.INSTANCE, PChar.INSTANCE, PChar.INSTANCE); + } - @Test - public void testStringToArrayFunction20() throws SQLException { - String string = "abc"; - Object[] o1 = new Object[]{"a", "b", "c"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = null; - String nullString = "null"; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PChar.INSTANCE, PChar.INSTANCE, PChar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PChar.INSTANCE, PChar.INSTANCE, PChar.INSTANCE); - } + @Test + public void testStringToArrayFunction20() throws SQLException { + String string = "abc"; + Object[] o1 = new Object[] { "a", "b", "c" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = null; + String nullString = "null"; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PChar.INSTANCE, PChar.INSTANCE, PChar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PChar.INSTANCE, PChar.INSTANCE, PChar.INSTANCE); + } - @Test - public void testStringToArrayFunction21() throws SQLException { - String string = "(?!^)"; - Object[] o1 = new Object[]{"(", "?", "!", "^", ")"}; - PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); - String delimiter = null; - String nullString = null; - test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); - } + @Test + public void testStringToArrayFunction21() throws SQLException { + String string = "(?!^)"; + Object[] o1 = new Object[] { "(", "?", "!", "^", ")" }; + PhoenixArray expected = new PhoenixArray(PVarchar.INSTANCE, o1); + String delimiter = null; + String nullString = null; + test(string, delimiter, nullString, expected, SortOrder.ASC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + test(string, delimiter, nullString, expected, SortOrder.DESC, SortOrder.ASC, SortOrder.ASC, + PVarchar.INSTANCE, PVarchar.INSTANCE, PVarchar.INSTANCE); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/BuiltinFunctionConstructorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/BuiltinFunctionConstructorTest.java index 4db66281614..e05edd4f375 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/BuiltinFunctionConstructorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/BuiltinFunctionConstructorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,41 +31,48 @@ public class BuiltinFunctionConstructorTest { - @Test - public void testChildrenListConstructors() throws Exception { - ExpressionType[] types = ExpressionType.values(); - for(int i = 0; i < types.length; i++) { - try { - Class expressionClass= types[i].getExpressionClass(); - if(!Modifier.isAbstract( expressionClass.getModifiers() ) && (ScalarFunction.class.isAssignableFrom(expressionClass)) && (expressionClass != UDFExpression.class)) { - Method cloneMethod = expressionClass.getMethod("clone", List.class); - assertNotNull(cloneMethod); - // ScalarFunctions that implement clone(List) don't need to implement a constructor that takes a List - if (cloneMethod.getDeclaringClass() == ScalarFunction.class) { - Constructor cons = expressionClass.getDeclaredConstructor(List.class); - assertTrue("Constructor for " + expressionClass + " is not public", Modifier.isPublic(cons.getModifiers())); - } - } - } catch (Exception e) { - throw new RuntimeException("Unable to find required List constructor " + types[i].getExpressionClass().getName(), e); - } + @Test + public void testChildrenListConstructors() throws Exception { + ExpressionType[] types = ExpressionType.values(); + for (int i = 0; i < types.length; i++) { + try { + Class expressionClass = types[i].getExpressionClass(); + if ( + !Modifier.isAbstract(expressionClass.getModifiers()) + && (ScalarFunction.class.isAssignableFrom(expressionClass)) + && (expressionClass != UDFExpression.class) + ) { + Method cloneMethod = expressionClass.getMethod("clone", List.class); + assertNotNull(cloneMethod); + // ScalarFunctions that implement clone(List) don't need to implement a + // constructor that takes a List + if (cloneMethod.getDeclaringClass() == ScalarFunction.class) { + Constructor cons = expressionClass.getDeclaredConstructor(List.class); + assertTrue("Constructor for " + expressionClass + " is not public", + Modifier.isPublic(cons.getModifiers())); + } } + } catch (Exception e) { + throw new RuntimeException("Unable to find required List constructor " + + types[i].getExpressionClass().getName(), e); + } } + } - @Test - public void testNoArgumentConstructors() { - ExpressionType[] types = ExpressionType.values(); - for(int i = 0; i < types.length; i++) { - try { - if(!AggregateFunction.class.isAssignableFrom(types[i].getExpressionClass())) { - Constructor cons = types[i].getExpressionClass().getDeclaredConstructor(); - cons.setAccessible(true); - cons.newInstance(); - } - } catch (NoSuchMethodException e) { - throw new RuntimeException(e); - } catch (Exception e) { - } + @Test + public void testNoArgumentConstructors() { + ExpressionType[] types = ExpressionType.values(); + for (int i = 0; i < types.length; i++) { + try { + if (!AggregateFunction.class.isAssignableFrom(types[i].getExpressionClass())) { + Constructor cons = types[i].getExpressionClass().getDeclaredConstructor(); + cons.setAccessible(true); + cons.newInstance(); } + } catch (NoSuchMethodException e) { + throw new RuntimeException(e); + } catch (Exception e) { + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/CollationKeyFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/CollationKeyFunctionTest.java index af02046affb..96c38036b1f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/CollationKeyFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/CollationKeyFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,217 +35,236 @@ import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarchar; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.primitives.UnsignedBytes; +import org.junit.Test; /** * "Unit" tests for CollationKeyFunction - * */ public class CollationKeyFunctionTest { - private static String[] chineseChars = new String[] { "\u963f", "\u55c4", "\u963e", "\u554a", "\u4ec8", "\u3d9a", - "\u9f51" }; - - private static Comparator collationKeyComparator = UnsignedBytes.lexicographicalComparator(); - - private static Comparator collationKeyAndIndexComparator = new Comparator() { - @Override - public int compare(ByteArrayAndInteger o1, ByteArrayAndInteger o2) { - int compareResult = collationKeyComparator.compare(o1.byteArray, o2.byteArray); - if (compareResult == 0) { - compareResult = o1.integer.compareTo(o2.integer); - } - return compareResult; - } - }; - - private static class ByteArrayAndInteger { - - private ByteArrayAndInteger(byte[] byteArray, Integer integer) { - super(); - this.byteArray = byteArray; - this.integer = integer; - } - - byte[] byteArray; - Integer integer; - - public String toString() { - return ToStringBuilder.reflectionToString(this); - } - - public static ByteArrayAndInteger findFirstIntegerMatch(List list, - Integer matchingInteger) { - for (ByteArrayAndInteger entry : list) { - if (entry.integer.equals(matchingInteger)) { - return entry; - } - } - return null; - } - } - - @Test - public void testZhSort() throws Exception { - testSortOrderNoEquals(chineseChars, "zh", Boolean.FALSE, null, null, new Integer[] { 4, 3, 1, 5, 2, 0, 6 }); - - } - - @Test - public void testZhTwSort() throws Exception { - testSortOrderNoEquals(chineseChars, "zh_TW", Boolean.FALSE, null, null, new Integer[] { 4, 3, 1, 5, 2, 0, 6 }); - } - - @Test - public void testZhTwStrokeSort() throws Exception { - testSortOrderNoEquals(chineseChars, "zh_TW_STROKE", Boolean.FALSE, null, null, - new Integer[] { 4, 2, 0, 3, 1, 6, 5 }); - } - - @Test - public void testZhStrokeSort() throws Exception { - testSortOrderNoEquals(chineseChars, "zh__STROKE", Boolean.FALSE, null, null, - new Integer[] { 4, 2, 0, 3, 1, 6, 5 }); - } - - @Test - public void testZhPinyinSort() throws Exception { - testSortOrderNoEquals(chineseChars, "zh__PINYIN", Boolean.FALSE, null, null, - new Integer[] { 0, 1, 3, 4, 6, 2, 5 }); - } - - @Test - public void testUpperCaseCollationKeyBytes() throws Exception { - testCollationKeysEqual(new String[] { "abcdef", "ABCDEF", "aBcDeF" }, "en", Boolean.TRUE, null, null); - } - - @Test - public void testNullCollationKey() throws Exception { - List collationKeys = calculateCollationKeys(new String[] { null }, "en", null, null, null); - assertNull(collationKeys.get(0).byteArray); - } - - @Test - public void testEqualCollationKeysForPrimaryStrength() throws Exception { - // "a", "A", "ä" are considered equivalent - testCollationKeysEqual(new String[] { "a", "A", "ä" }, "en", Boolean.FALSE, Collator.PRIMARY, null); - testSortOrderNoEquals(new String[] { "b", "a" }, "en", Boolean.FALSE, Collator.PRIMARY, null, - new Integer[] { 1, 0 }); - - } - - @Test - public void testCollationKeyBytesForSecondaryStrength() throws Exception { - // "a" and "A" are considered equivalent but not "ä" - testCollationKeysEqual(new String[] { "a", "A" }, "en", Boolean.FALSE, Collator.SECONDARY, null); - testSortOrderNoEquals(new String[] { "b", "a", "ä" }, "en", Boolean.FALSE, Collator.SECONDARY, null, - new Integer[] { 1, 2, 0 }); - } - - @Test - public void testCollationKeyBytesForTertiaryStrength() throws Exception { - // none of these are considered equivalent - testSortOrderNoEquals(new String[] { "b", "a", "ä", "A" }, "en", Boolean.FALSE, Collator.TERTIARY, null, - new Integer[] { 1, 3, 2, 0 }); - } - - /** - * Just test that changing the decomposition mode works for basic sorting. - * TODO: Actually test for the accented characters and languages where this - * actually matters. - */ - @Test - public void testCollationKeyBytesForFullDecomposition() throws Exception { - testCollationKeysEqual(new String[] { "a", "A" }, "en", Boolean.FALSE, null, Collator.FULL_DECOMPOSITION); - } - - /** HELPER METHODS **/ - private void testSortOrderNoEquals(String[] inputStrings, String locale, Boolean uppercaseCollator, - Integer strength, Integer decomposition, Integer[] expectedOrder) throws Exception { - List sortedCollationKeysAndIndexes = calculateCollationKeys(inputStrings, locale, - uppercaseCollator, strength, decomposition); - Collections.sort(sortedCollationKeysAndIndexes, collationKeyAndIndexComparator); - testCollationKeysNotEqual(inputStrings, sortedCollationKeysAndIndexes); - - Integer[] sortedIndexes = new Integer[sortedCollationKeysAndIndexes.size()]; - for (int i = 0; i < sortedIndexes.length; i++) { - sortedIndexes[i] = sortedCollationKeysAndIndexes.get(i).integer; - } - assertArrayEquals(expectedOrder, sortedIndexes); - } - - private List calculateCollationKeys(String[] inputStrings, String locale, - Boolean upperCaseCollator, Integer strength, Integer decomposition) throws Exception { - List collationKeysAndIndexes = Lists.newArrayList(); - for (int i = 0; i < inputStrings.length; i++) { - byte[] thisCollationKeyBytes = callFunction(inputStrings[i], locale, upperCaseCollator, strength, - decomposition, SortOrder.ASC); - collationKeysAndIndexes.add(new ByteArrayAndInteger(thisCollationKeyBytes, i)); - } - return collationKeysAndIndexes; - } - - private void testCollationKeysEqual(String[] inputStrings, String locale, Boolean upperCaseCollator, - Integer strength, Integer decomposition) throws Exception { - List collationKeysAndIndexes = calculateCollationKeys(inputStrings, locale, - upperCaseCollator, strength, decomposition); - - for (int i = 0, j = 1; i < inputStrings.length && j < inputStrings.length; i++, j++) { - byte[] iByteArray = ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, i).byteArray; - byte[] jByteArray = ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, j).byteArray; - boolean isPairEqual = collationKeyComparator.compare(iByteArray, jByteArray) == 0; - if (!isPairEqual) { - fail(String.format("Collation keys for inputStrings [%s] and [%s] ([%s], [%s]) were not equal", - inputStrings[i], inputStrings[j], Hex.encodeHexString(iByteArray), - Hex.encodeHexString(jByteArray))); - } - } - } - - private void testCollationKeysNotEqual(String[] inputStrings, List collationKeysAndIndexes) - throws Exception { - for (int i = 0; i < inputStrings.length; i++) { - for (int j = i + 1; j < inputStrings.length; j++) { - byte[] iByteArray = ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, i).byteArray; - byte[] jByteArray = ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, j).byteArray; - boolean isPairEqual = collationKeyComparator.compare(iByteArray, jByteArray) == 0; - if (isPairEqual) { - fail(String.format("Collation keys for inputStrings [%s] and [%s] ([%s], [%s]) were equal", - inputStrings[i], inputStrings[j], Hex.encodeHexString(iByteArray), - Hex.encodeHexString(jByteArray))); - } - } - } - } - - private static byte[] callFunction(String inputStr, String localeIsoCode, Boolean upperCaseCollator, - Integer strength, Integer decomposition, SortOrder sortOrder) throws Exception { - LiteralExpression inputStrLiteral, localeIsoCodeLiteral, upperCaseBooleanLiteral, strengthLiteral, - decompositionLiteral; - inputStrLiteral = LiteralExpression.newConstant(inputStr, PVarchar.INSTANCE, sortOrder); - localeIsoCodeLiteral = LiteralExpression.newConstant(localeIsoCode, PVarchar.INSTANCE, sortOrder); - upperCaseBooleanLiteral = LiteralExpression.newConstant(upperCaseCollator, PBoolean.INSTANCE, sortOrder); - strengthLiteral = LiteralExpression.newConstant(strength, PInteger.INSTANCE, sortOrder); - decompositionLiteral = LiteralExpression.newConstant(decomposition, PInteger.INSTANCE, sortOrder); - return callFunction(inputStrLiteral, localeIsoCodeLiteral, upperCaseBooleanLiteral, strengthLiteral, - decompositionLiteral); - - } - - private static byte[] callFunction(LiteralExpression inputStrLiteral, LiteralExpression localeIsoCodeLiteral, - LiteralExpression upperCaseBooleanLiteral, LiteralExpression strengthLiteral, - LiteralExpression decompositionLiteral) throws Exception { - List expressions = Lists.newArrayList((Expression) inputStrLiteral, - (Expression) localeIsoCodeLiteral, (Expression) upperCaseBooleanLiteral, (Expression) strengthLiteral, - (Expression) decompositionLiteral); - Expression collationKeyFunction = new CollationKeyFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean ret = collationKeyFunction.evaluate(null, ptr); - byte[] result = ret - ? (byte[]) collationKeyFunction.getDataType().toObject(ptr, collationKeyFunction.getSortOrder()) : null; - return result; - } + private static String[] chineseChars = + new String[] { "\u963f", "\u55c4", "\u963e", "\u554a", "\u4ec8", "\u3d9a", "\u9f51" }; + + private static Comparator collationKeyComparator = + UnsignedBytes.lexicographicalComparator(); + + private static Comparator collationKeyAndIndexComparator = + new Comparator() { + @Override + public int compare(ByteArrayAndInteger o1, ByteArrayAndInteger o2) { + int compareResult = collationKeyComparator.compare(o1.byteArray, o2.byteArray); + if (compareResult == 0) { + compareResult = o1.integer.compareTo(o2.integer); + } + return compareResult; + } + }; + + private static class ByteArrayAndInteger { + + private ByteArrayAndInteger(byte[] byteArray, Integer integer) { + super(); + this.byteArray = byteArray; + this.integer = integer; + } + + byte[] byteArray; + Integer integer; + + public String toString() { + return ToStringBuilder.reflectionToString(this); + } + + public static ByteArrayAndInteger findFirstIntegerMatch(List list, + Integer matchingInteger) { + for (ByteArrayAndInteger entry : list) { + if (entry.integer.equals(matchingInteger)) { + return entry; + } + } + return null; + } + } + + @Test + public void testZhSort() throws Exception { + testSortOrderNoEquals(chineseChars, "zh", Boolean.FALSE, null, null, + new Integer[] { 4, 3, 1, 5, 2, 0, 6 }); + + } + + @Test + public void testZhTwSort() throws Exception { + testSortOrderNoEquals(chineseChars, "zh_TW", Boolean.FALSE, null, null, + new Integer[] { 4, 3, 1, 5, 2, 0, 6 }); + } + + @Test + public void testZhTwStrokeSort() throws Exception { + testSortOrderNoEquals(chineseChars, "zh_TW_STROKE", Boolean.FALSE, null, null, + new Integer[] { 4, 2, 0, 3, 1, 6, 5 }); + } + + @Test + public void testZhStrokeSort() throws Exception { + testSortOrderNoEquals(chineseChars, "zh__STROKE", Boolean.FALSE, null, null, + new Integer[] { 4, 2, 0, 3, 1, 6, 5 }); + } + + @Test + public void testZhPinyinSort() throws Exception { + testSortOrderNoEquals(chineseChars, "zh__PINYIN", Boolean.FALSE, null, null, + new Integer[] { 0, 1, 3, 4, 6, 2, 5 }); + } + + @Test + public void testUpperCaseCollationKeyBytes() throws Exception { + testCollationKeysEqual(new String[] { "abcdef", "ABCDEF", "aBcDeF" }, "en", Boolean.TRUE, null, + null); + } + + @Test + public void testNullCollationKey() throws Exception { + List collationKeys = + calculateCollationKeys(new String[] { null }, "en", null, null, null); + assertNull(collationKeys.get(0).byteArray); + } + + @Test + public void testEqualCollationKeysForPrimaryStrength() throws Exception { + // "a", "A", "ä" are considered equivalent + testCollationKeysEqual(new String[] { "a", "A", "ä" }, "en", Boolean.FALSE, Collator.PRIMARY, + null); + testSortOrderNoEquals(new String[] { "b", "a" }, "en", Boolean.FALSE, Collator.PRIMARY, null, + new Integer[] { 1, 0 }); + + } + + @Test + public void testCollationKeyBytesForSecondaryStrength() throws Exception { + // "a" and "A" are considered equivalent but not "ä" + testCollationKeysEqual(new String[] { "a", "A" }, "en", Boolean.FALSE, Collator.SECONDARY, + null); + testSortOrderNoEquals(new String[] { "b", "a", "ä" }, "en", Boolean.FALSE, Collator.SECONDARY, + null, new Integer[] { 1, 2, 0 }); + } + + @Test + public void testCollationKeyBytesForTertiaryStrength() throws Exception { + // none of these are considered equivalent + testSortOrderNoEquals(new String[] { "b", "a", "ä", "A" }, "en", Boolean.FALSE, + Collator.TERTIARY, null, new Integer[] { 1, 3, 2, 0 }); + } + + /** + * Just test that changing the decomposition mode works for basic sorting. TODO: Actually test for + * the accented characters and languages where this actually matters. + */ + @Test + public void testCollationKeyBytesForFullDecomposition() throws Exception { + testCollationKeysEqual(new String[] { "a", "A" }, "en", Boolean.FALSE, null, + Collator.FULL_DECOMPOSITION); + } + + /** HELPER METHODS **/ + private void testSortOrderNoEquals(String[] inputStrings, String locale, + Boolean uppercaseCollator, Integer strength, Integer decomposition, Integer[] expectedOrder) + throws Exception { + List sortedCollationKeysAndIndexes = + calculateCollationKeys(inputStrings, locale, uppercaseCollator, strength, decomposition); + Collections.sort(sortedCollationKeysAndIndexes, collationKeyAndIndexComparator); + testCollationKeysNotEqual(inputStrings, sortedCollationKeysAndIndexes); + + Integer[] sortedIndexes = new Integer[sortedCollationKeysAndIndexes.size()]; + for (int i = 0; i < sortedIndexes.length; i++) { + sortedIndexes[i] = sortedCollationKeysAndIndexes.get(i).integer; + } + assertArrayEquals(expectedOrder, sortedIndexes); + } + + private List calculateCollationKeys(String[] inputStrings, String locale, + Boolean upperCaseCollator, Integer strength, Integer decomposition) throws Exception { + List collationKeysAndIndexes = Lists.newArrayList(); + for (int i = 0; i < inputStrings.length; i++) { + byte[] thisCollationKeyBytes = callFunction(inputStrings[i], locale, upperCaseCollator, + strength, decomposition, SortOrder.ASC); + collationKeysAndIndexes.add(new ByteArrayAndInteger(thisCollationKeyBytes, i)); + } + return collationKeysAndIndexes; + } + + private void testCollationKeysEqual(String[] inputStrings, String locale, + Boolean upperCaseCollator, Integer strength, Integer decomposition) throws Exception { + List collationKeysAndIndexes = + calculateCollationKeys(inputStrings, locale, upperCaseCollator, strength, decomposition); + + for (int i = 0, j = 1; i < inputStrings.length && j < inputStrings.length; i++, j++) { + byte[] iByteArray = + ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, i).byteArray; + byte[] jByteArray = + ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, j).byteArray; + boolean isPairEqual = collationKeyComparator.compare(iByteArray, jByteArray) == 0; + if (!isPairEqual) { + fail( + String.format("Collation keys for inputStrings [%s] and [%s] ([%s], [%s]) were not equal", + inputStrings[i], inputStrings[j], Hex.encodeHexString(iByteArray), + Hex.encodeHexString(jByteArray))); + } + } + } + + private void testCollationKeysNotEqual(String[] inputStrings, + List collationKeysAndIndexes) throws Exception { + for (int i = 0; i < inputStrings.length; i++) { + for (int j = i + 1; j < inputStrings.length; j++) { + byte[] iByteArray = + ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, i).byteArray; + byte[] jByteArray = + ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, j).byteArray; + boolean isPairEqual = collationKeyComparator.compare(iByteArray, jByteArray) == 0; + if (isPairEqual) { + fail( + String.format("Collation keys for inputStrings [%s] and [%s] ([%s], [%s]) were equal", + inputStrings[i], inputStrings[j], Hex.encodeHexString(iByteArray), + Hex.encodeHexString(jByteArray))); + } + } + } + } + + private static byte[] callFunction(String inputStr, String localeIsoCode, + Boolean upperCaseCollator, Integer strength, Integer decomposition, SortOrder sortOrder) + throws Exception { + LiteralExpression inputStrLiteral, localeIsoCodeLiteral, upperCaseBooleanLiteral, + strengthLiteral, decompositionLiteral; + inputStrLiteral = LiteralExpression.newConstant(inputStr, PVarchar.INSTANCE, sortOrder); + localeIsoCodeLiteral = + LiteralExpression.newConstant(localeIsoCode, PVarchar.INSTANCE, sortOrder); + upperCaseBooleanLiteral = + LiteralExpression.newConstant(upperCaseCollator, PBoolean.INSTANCE, sortOrder); + strengthLiteral = LiteralExpression.newConstant(strength, PInteger.INSTANCE, sortOrder); + decompositionLiteral = + LiteralExpression.newConstant(decomposition, PInteger.INSTANCE, sortOrder); + return callFunction(inputStrLiteral, localeIsoCodeLiteral, upperCaseBooleanLiteral, + strengthLiteral, decompositionLiteral); + + } + + private static byte[] callFunction(LiteralExpression inputStrLiteral, + LiteralExpression localeIsoCodeLiteral, LiteralExpression upperCaseBooleanLiteral, + LiteralExpression strengthLiteral, LiteralExpression decompositionLiteral) throws Exception { + List expressions = Lists.newArrayList((Expression) inputStrLiteral, + (Expression) localeIsoCodeLiteral, (Expression) upperCaseBooleanLiteral, + (Expression) strengthLiteral, (Expression) decompositionLiteral); + Expression collationKeyFunction = new CollationKeyFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean ret = collationKeyFunction.evaluate(null, ptr); + byte[] result = ret + ? (byte[]) collationKeyFunction.getDataType().toObject(ptr, + collationKeyFunction.getSortOrder()) + : null; + return result; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunctionTest.java index 66099f76240..1f24f2b9240 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/ExternalSqlTypeIdFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,51 +29,49 @@ import org.apache.phoenix.expression.LiteralExpression; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PIntegerArray; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class ExternalSqlTypeIdFunctionTest { - @Test - public void testEvaluate() throws SQLException { - Expression inputArg = LiteralExpression.newConstant( - PInteger.INSTANCE.getSqlType(), PInteger.INSTANCE); + @Test + public void testEvaluate() throws SQLException { + Expression inputArg = + LiteralExpression.newConstant(PInteger.INSTANCE.getSqlType(), PInteger.INSTANCE); - Object returnValue = executeFunction(inputArg); + Object returnValue = executeFunction(inputArg); - assertEquals(Types.INTEGER, returnValue); - } + assertEquals(Types.INTEGER, returnValue); + } - @Test - public void testEvaluateArrayType() throws SQLException { - Expression inputArg = LiteralExpression.newConstant( - PIntegerArray.INSTANCE.getSqlType(), PInteger.INSTANCE); + @Test + public void testEvaluateArrayType() throws SQLException { + Expression inputArg = + LiteralExpression.newConstant(PIntegerArray.INSTANCE.getSqlType(), PInteger.INSTANCE); - Object returnValue = executeFunction(inputArg); + Object returnValue = executeFunction(inputArg); - assertEquals(Types.ARRAY, returnValue); - } + assertEquals(Types.ARRAY, returnValue); + } - @Test - public void testClone() throws SQLException { - Expression inputArg = LiteralExpression.newConstant( - PIntegerArray.INSTANCE.getSqlType(), PInteger.INSTANCE); - List args = Lists.newArrayList(inputArg); - ExternalSqlTypeIdFunction externalIdFunction = - new ExternalSqlTypeIdFunction(args); - ScalarFunction clone = externalIdFunction.clone(args); - assertEquals(externalIdFunction, clone); - } + @Test + public void testClone() throws SQLException { + Expression inputArg = + LiteralExpression.newConstant(PIntegerArray.INSTANCE.getSqlType(), PInteger.INSTANCE); + List args = Lists.newArrayList(inputArg); + ExternalSqlTypeIdFunction externalIdFunction = new ExternalSqlTypeIdFunction(args); + ScalarFunction clone = externalIdFunction.clone(args); + assertEquals(externalIdFunction, clone); + } - private Object executeFunction(Expression inputArg) throws SQLException { - ExternalSqlTypeIdFunction externalIdFunction = - new ExternalSqlTypeIdFunction(Lists.newArrayList(inputArg)); + private Object executeFunction(Expression inputArg) throws SQLException { + ExternalSqlTypeIdFunction externalIdFunction = + new ExternalSqlTypeIdFunction(Lists.newArrayList(inputArg)); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - assertTrue(externalIdFunction.evaluate(null, ptr)); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + assertTrue(externalIdFunction.evaluate(null, ptr)); - return PInteger.INSTANCE.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), - PInteger.INSTANCE, inputArg.getSortOrder()); - } + return PInteger.INSTANCE.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), + PInteger.INSTANCE, inputArg.getSortOrder()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java index 6fd16ec799a..293b016a22a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/InstrFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,88 +35,89 @@ public class InstrFunctionTest { - private static Object evaluateExpression(String value, PDataType dataType, String strToSearch, SortOrder order) throws SQLException { - Expression inputArg = LiteralExpression.newConstant(value,dataType,order); - - Expression strToSearchExp = LiteralExpression.newConstant(strToSearch,dataType); - List expressions = Arrays.asList(inputArg,strToSearchExp); - Expression instrFunction = new InstrFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - instrFunction.evaluate(null,ptr); - return instrFunction.getDataType().toObject(ptr); - } - - public static void inputExpression(String value, PDataType dataType, String strToSearch,Integer expected, SortOrder order) throws SQLException { - Object obj = evaluateExpression(value, dataType, strToSearch, order); - assertNotNull("Result was unexpectedly null", obj); - assertTrue(((Integer) obj).compareTo(expected) == 0); - } - - public static void inputNullExpression(String value, PDataType dataType, String strToSearch, SortOrder order) throws SQLException { - Object obj = evaluateExpression(value, dataType, strToSearch, order); - assertNull("Result was unexpectedly non-null", obj); - } - - - @Test - public void testInstrFunction() throws SQLException { - inputExpression("abcdefghijkl",PVarchar.INSTANCE, "fgh", 6, SortOrder.ASC); - - inputExpression("abcdefghijkl",PVarchar.INSTANCE, "fgh", 6, SortOrder.DESC); - - inputExpression("abcde fghijkl",PVarchar.INSTANCE, " fgh", 6, SortOrder.ASC); - - inputExpression("abcde fghijkl",PVarchar.INSTANCE, " fgh", 6, SortOrder.DESC); - - inputExpression("abcde fghijkl",PVarchar.INSTANCE, "lmn", 0, SortOrder.DESC); - - inputExpression("abcde fghijkl",PVarchar.INSTANCE, "lmn", 0, SortOrder.ASC); - - inputExpression("ABCDEFGHIJKL",PVarchar.INSTANCE, "FGH", 6, SortOrder.ASC); - - inputExpression("ABCDEFGHIJKL",PVarchar.INSTANCE, "FGH", 6, SortOrder.DESC); - - inputExpression("ABCDEFGHiJKL",PVarchar.INSTANCE, "iJKL", 9, SortOrder.ASC); - - inputExpression("ABCDEFGHiJKL",PVarchar.INSTANCE, "iJKL", 9, SortOrder.DESC); - - inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, " ", 6, SortOrder.ASC); - - inputExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, " ", 6, SortOrder.DESC); - - // Phoenix can't represent empty strings, so an empty or null search string should return null - // See PHOENIX-4884 for more chatter. - inputNullExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, "", SortOrder.ASC); - inputNullExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, "", SortOrder.DESC); - inputNullExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, null, SortOrder.ASC); - inputNullExpression("ABCDE FGHiJKL",PVarchar.INSTANCE, null, SortOrder.DESC); - - inputExpression("ABCDEABC",PVarchar.INSTANCE, "ABC", 1, SortOrder.ASC); - - inputExpression("ABCDEABC",PVarchar.INSTANCE, "ABC", 1, SortOrder.DESC); - - inputExpression("AB01CDEABC",PVarchar.INSTANCE, "01C", 3, SortOrder.ASC); - - inputExpression("AB01CDEABC",PVarchar.INSTANCE, "01C", 3, SortOrder.DESC); - - inputExpression("ABCD%EFGH",PVarchar.INSTANCE, "%", 5, SortOrder.ASC); - - inputExpression("ABCD%EFGH",PVarchar.INSTANCE, "%", 5, SortOrder.DESC); - - //Tests for MultiByte Characters - - inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɚɦ", 2, SortOrder.ASC); - - inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɚɦ", 2, SortOrder.DESC); - - inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɦFGH", 3, SortOrder.ASC); - - inputExpression("AɚɦFGH",PVarchar.INSTANCE, "ɦFGH", 3, SortOrder.DESC); - - inputExpression("AɚɦF/GH",PVarchar.INSTANCE, "ɦF/GH", 3, SortOrder.ASC); - - inputExpression("AɚɦF/GH",PVarchar.INSTANCE, "ɦF/GH", 3, SortOrder.DESC); - } - + private static Object evaluateExpression(String value, PDataType dataType, String strToSearch, + SortOrder order) throws SQLException { + Expression inputArg = LiteralExpression.newConstant(value, dataType, order); + + Expression strToSearchExp = LiteralExpression.newConstant(strToSearch, dataType); + List expressions = Arrays. asList(inputArg, strToSearchExp); + Expression instrFunction = new InstrFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + instrFunction.evaluate(null, ptr); + return instrFunction.getDataType().toObject(ptr); + } + + public static void inputExpression(String value, PDataType dataType, String strToSearch, + Integer expected, SortOrder order) throws SQLException { + Object obj = evaluateExpression(value, dataType, strToSearch, order); + assertNotNull("Result was unexpectedly null", obj); + assertTrue(((Integer) obj).compareTo(expected) == 0); + } + + public static void inputNullExpression(String value, PDataType dataType, String strToSearch, + SortOrder order) throws SQLException { + Object obj = evaluateExpression(value, dataType, strToSearch, order); + assertNull("Result was unexpectedly non-null", obj); + } + + @Test + public void testInstrFunction() throws SQLException { + inputExpression("abcdefghijkl", PVarchar.INSTANCE, "fgh", 6, SortOrder.ASC); + + inputExpression("abcdefghijkl", PVarchar.INSTANCE, "fgh", 6, SortOrder.DESC); + + inputExpression("abcde fghijkl", PVarchar.INSTANCE, " fgh", 6, SortOrder.ASC); + + inputExpression("abcde fghijkl", PVarchar.INSTANCE, " fgh", 6, SortOrder.DESC); + + inputExpression("abcde fghijkl", PVarchar.INSTANCE, "lmn", 0, SortOrder.DESC); + + inputExpression("abcde fghijkl", PVarchar.INSTANCE, "lmn", 0, SortOrder.ASC); + + inputExpression("ABCDEFGHIJKL", PVarchar.INSTANCE, "FGH", 6, SortOrder.ASC); + + inputExpression("ABCDEFGHIJKL", PVarchar.INSTANCE, "FGH", 6, SortOrder.DESC); + + inputExpression("ABCDEFGHiJKL", PVarchar.INSTANCE, "iJKL", 9, SortOrder.ASC); + + inputExpression("ABCDEFGHiJKL", PVarchar.INSTANCE, "iJKL", 9, SortOrder.DESC); + + inputExpression("ABCDE FGHiJKL", PVarchar.INSTANCE, " ", 6, SortOrder.ASC); + + inputExpression("ABCDE FGHiJKL", PVarchar.INSTANCE, " ", 6, SortOrder.DESC); + + // Phoenix can't represent empty strings, so an empty or null search string should return null + // See PHOENIX-4884 for more chatter. + inputNullExpression("ABCDE FGHiJKL", PVarchar.INSTANCE, "", SortOrder.ASC); + inputNullExpression("ABCDE FGHiJKL", PVarchar.INSTANCE, "", SortOrder.DESC); + inputNullExpression("ABCDE FGHiJKL", PVarchar.INSTANCE, null, SortOrder.ASC); + inputNullExpression("ABCDE FGHiJKL", PVarchar.INSTANCE, null, SortOrder.DESC); + + inputExpression("ABCDEABC", PVarchar.INSTANCE, "ABC", 1, SortOrder.ASC); + + inputExpression("ABCDEABC", PVarchar.INSTANCE, "ABC", 1, SortOrder.DESC); + + inputExpression("AB01CDEABC", PVarchar.INSTANCE, "01C", 3, SortOrder.ASC); + + inputExpression("AB01CDEABC", PVarchar.INSTANCE, "01C", 3, SortOrder.DESC); + + inputExpression("ABCD%EFGH", PVarchar.INSTANCE, "%", 5, SortOrder.ASC); + + inputExpression("ABCD%EFGH", PVarchar.INSTANCE, "%", 5, SortOrder.DESC); + + // Tests for MultiByte Characters + + inputExpression("AɚɦFGH", PVarchar.INSTANCE, "ɚɦ", 2, SortOrder.ASC); + + inputExpression("AɚɦFGH", PVarchar.INSTANCE, "ɚɦ", 2, SortOrder.DESC); + + inputExpression("AɚɦFGH", PVarchar.INSTANCE, "ɦFGH", 3, SortOrder.ASC); + + inputExpression("AɚɦFGH", PVarchar.INSTANCE, "ɦFGH", 3, SortOrder.DESC); + + inputExpression("AɚɦF/GH", PVarchar.INSTANCE, "ɦF/GH", 3, SortOrder.ASC); + + inputExpression("AɚɦF/GH", PVarchar.INSTANCE, "ɦF/GH", 3, SortOrder.DESC); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/LowerFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/LowerFunctionTest.java index c5867634db4..4720bbc63df 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/LowerFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/LowerFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,67 +27,66 @@ import org.apache.phoenix.expression.LiteralExpression; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PVarchar; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * "Unit" tests for LowerFunction - * */ public class LowerFunctionTest { - - // These maps were obtained from Java API docs for java.lang.String - // https://docs.oracle.com/javase/8/docs/api/java/lang/String.html#toLowerCase-java.util.Locale- - private static ImmutableMap turkishLowerToUpperCaseMap = - ImmutableMap.of("\u0130", "\u0069", - "\u0049", "\u0131"); - - private static ImmutableMap anyLocaleLowerToUpperCaseMap = - ImmutableMap.of( "\u0399\u03a7\u0398\u03a5\u03a3", "\u03b9\u03c7\u03b8\u03c5\u03c2", - // IXΘϒΣ -> ιχθυς (the last character is the "lunate sigma") - "FrEnCh Fries", "french fries"); + // These maps were obtained from Java API docs for java.lang.String + // https://docs.oracle.com/javase/8/docs/api/java/lang/String.html#toLowerCase-java.util.Locale- + private static ImmutableMap turkishLowerToUpperCaseMap = + ImmutableMap.of("\u0130", "\u0069", "\u0049", "\u0131"); + + private static ImmutableMap anyLocaleLowerToUpperCaseMap = + ImmutableMap.of("\u0399\u03a7\u0398\u03a5\u03a3", "\u03b9\u03c7\u03b8\u03c5\u03c2", + // IXΘϒΣ -> ιχθυς (the last character is the "lunate sigma") + "FrEnCh Fries", "french fries"); + + @Test + public void testTurkishUpperCase() throws Exception { + testLowerToUpperCaseMap(turkishLowerToUpperCaseMap, "tr"); + } + + @Test + public void testUniversalUpperCaseNoLocale() throws Exception { + testLowerToUpperCaseMap(anyLocaleLowerToUpperCaseMap, null); + } + + @Test + public void testUniversalUpperCaseTurkish() throws Exception { + testLowerToUpperCaseMap(anyLocaleLowerToUpperCaseMap, "tr"); + } + + private void testLowerToUpperCaseMap(Map lowerToUpperMap, String locale) + throws Exception { + for (Map.Entry lowerUpperPair : lowerToUpperMap.entrySet()) { + String upperCaseResultAsc = callFunction(lowerUpperPair.getKey(), locale, SortOrder.ASC); + String upperCaseResultDesc = callFunction(lowerUpperPair.getKey(), locale, SortOrder.DESC); - @Test - public void testTurkishUpperCase() throws Exception { - testLowerToUpperCaseMap(turkishLowerToUpperCaseMap, "tr"); - } - - @Test - public void testUniversalUpperCaseNoLocale() throws Exception { - testLowerToUpperCaseMap(anyLocaleLowerToUpperCaseMap, null); - } - - @Test - public void testUniversalUpperCaseTurkish() throws Exception { - testLowerToUpperCaseMap(anyLocaleLowerToUpperCaseMap, "tr"); - } - - private void testLowerToUpperCaseMap(Map lowerToUpperMap, String locale) throws Exception { - for(Map.Entry lowerUpperPair: lowerToUpperMap.entrySet()) { - String upperCaseResultAsc = callFunction(lowerUpperPair.getKey(), locale, SortOrder.ASC); - String upperCaseResultDesc = callFunction(lowerUpperPair.getKey(), locale, SortOrder.DESC); - - assertEquals("Result of calling LowerFunction[ASC] on [" + lowerUpperPair.getKey() + "][" + locale + "] not as expected.", - lowerUpperPair.getValue(), upperCaseResultAsc); - assertEquals("Result of calling LowerFunction[DESC] on [" + lowerUpperPair.getKey() + "][" + locale + "] not as expected.", - lowerUpperPair.getValue(), upperCaseResultDesc); - } - } + assertEquals("Result of calling LowerFunction[ASC] on [" + lowerUpperPair.getKey() + "][" + + locale + "] not as expected.", lowerUpperPair.getValue(), upperCaseResultAsc); + assertEquals("Result of calling LowerFunction[DESC] on [" + lowerUpperPair.getKey() + "][" + + locale + "] not as expected.", lowerUpperPair.getValue(), upperCaseResultDesc); + } + } - private static String callFunction(String inputStr, String localeIsoCode, SortOrder sortOrder) throws Exception { - LiteralExpression inputStrLiteral, localeIsoCodeLiteral; - inputStrLiteral = LiteralExpression.newConstant(inputStr, PVarchar.INSTANCE, sortOrder); - localeIsoCodeLiteral = LiteralExpression.newConstant(localeIsoCode, PVarchar.INSTANCE, sortOrder); - List expressions = Lists.newArrayList((Expression) inputStrLiteral, - (Expression) localeIsoCodeLiteral); - Expression lowerFunction = new LowerFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean ret = lowerFunction.evaluate(null, ptr); - String result = ret - ? (String) lowerFunction.getDataType().toObject(ptr, lowerFunction.getSortOrder()) : null; - return result; - } + private static String callFunction(String inputStr, String localeIsoCode, SortOrder sortOrder) + throws Exception { + LiteralExpression inputStrLiteral, localeIsoCodeLiteral; + inputStrLiteral = LiteralExpression.newConstant(inputStr, PVarchar.INSTANCE, sortOrder); + localeIsoCodeLiteral = + LiteralExpression.newConstant(localeIsoCode, PVarchar.INSTANCE, sortOrder); + List expressions = + Lists.newArrayList((Expression) inputStrLiteral, (Expression) localeIsoCodeLiteral); + Expression lowerFunction = new LowerFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean ret = lowerFunction.evaluate(null, ptr); + String result = + ret ? (String) lowerFunction.getDataType().toObject(ptr, lowerFunction.getSortOrder()) : null; + return result; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/UpperFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/UpperFunctionTest.java index be8e2825a64..50b323ad76e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/UpperFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/UpperFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,66 +27,64 @@ import org.apache.phoenix.expression.LiteralExpression; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PVarchar; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; /** * "Unit" tests for UpperFunction - * */ public class UpperFunctionTest { - - // These maps were obtained from Java API docs for java.lang.String - // https://docs.oracle.com/javase/8/docs/api/java/lang/String.html#toUpperCase-java.util.Locale- - private static ImmutableMap turkishLowerToUpperCaseMap = - ImmutableMap.of("\u0069", "\u0130", - "\u0131", "\u0049"); - - private static ImmutableMap anyLocaleLowerToUpperCaseMap = - ImmutableMap.of("\u00df", "\u0053\u0053", - "Fahrvergnügen", "FAHRVERGNÜGEN"); + // These maps were obtained from Java API docs for java.lang.String + // https://docs.oracle.com/javase/8/docs/api/java/lang/String.html#toUpperCase-java.util.Locale- + private static ImmutableMap turkishLowerToUpperCaseMap = + ImmutableMap.of("\u0069", "\u0130", "\u0131", "\u0049"); + + private static ImmutableMap anyLocaleLowerToUpperCaseMap = + ImmutableMap.of("\u00df", "\u0053\u0053", "Fahrvergnügen", "FAHRVERGNÜGEN"); + + @Test + public void testTurkishUpperCase() throws Exception { + testLowerToUpperCaseMap(turkishLowerToUpperCaseMap, "tr"); + } + + @Test + public void testUniversalUpperCaseNoLocale() throws Exception { + testLowerToUpperCaseMap(anyLocaleLowerToUpperCaseMap, null); + } + + @Test + public void testUniversalUpperCaseTurkish() throws Exception { + testLowerToUpperCaseMap(anyLocaleLowerToUpperCaseMap, "tr"); + } + + private void testLowerToUpperCaseMap(Map lowerToUpperMap, String locale) + throws Exception { + for (Map.Entry lowerUpperPair : lowerToUpperMap.entrySet()) { + String upperCaseResultAsc = callFunction(lowerUpperPair.getKey(), locale, SortOrder.ASC); + String upperCaseResultDesc = callFunction(lowerUpperPair.getKey(), locale, SortOrder.DESC); - @Test - public void testTurkishUpperCase() throws Exception { - testLowerToUpperCaseMap(turkishLowerToUpperCaseMap, "tr"); - } - - @Test - public void testUniversalUpperCaseNoLocale() throws Exception { - testLowerToUpperCaseMap(anyLocaleLowerToUpperCaseMap, null); - } - - @Test - public void testUniversalUpperCaseTurkish() throws Exception { - testLowerToUpperCaseMap(anyLocaleLowerToUpperCaseMap, "tr"); - } - - private void testLowerToUpperCaseMap(Map lowerToUpperMap, String locale) throws Exception { - for(Map.Entry lowerUpperPair: lowerToUpperMap.entrySet()) { - String upperCaseResultAsc = callFunction(lowerUpperPair.getKey(), locale, SortOrder.ASC); - String upperCaseResultDesc = callFunction(lowerUpperPair.getKey(), locale, SortOrder.DESC); - - assertEquals("Result of calling UpperFunction[ASC] on [" + lowerUpperPair.getKey() + "][" + locale + "] not as expected.", - lowerUpperPair.getValue(), upperCaseResultAsc); - assertEquals("Result of calling UpperFunction[DESC] on [" + lowerUpperPair.getKey() + "][" + locale + "] not as expected.", - lowerUpperPair.getValue(), upperCaseResultDesc); - } - } + assertEquals("Result of calling UpperFunction[ASC] on [" + lowerUpperPair.getKey() + "][" + + locale + "] not as expected.", lowerUpperPair.getValue(), upperCaseResultAsc); + assertEquals("Result of calling UpperFunction[DESC] on [" + lowerUpperPair.getKey() + "][" + + locale + "] not as expected.", lowerUpperPair.getValue(), upperCaseResultDesc); + } + } - private static String callFunction(String inputStr, String localeIsoCode, SortOrder sortOrder) throws Exception { - LiteralExpression inputStrLiteral, localeIsoCodeLiteral; - inputStrLiteral = LiteralExpression.newConstant(inputStr, PVarchar.INSTANCE, sortOrder); - localeIsoCodeLiteral = LiteralExpression.newConstant(localeIsoCode, PVarchar.INSTANCE, sortOrder); - List expressions = Lists.newArrayList((Expression) inputStrLiteral, - (Expression) localeIsoCodeLiteral); - Expression upperFunction = new UpperFunction(expressions); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - boolean ret = upperFunction.evaluate(null, ptr); - String result = ret - ? (String) upperFunction.getDataType().toObject(ptr, upperFunction.getSortOrder()) : null; - return result; - } + private static String callFunction(String inputStr, String localeIsoCode, SortOrder sortOrder) + throws Exception { + LiteralExpression inputStrLiteral, localeIsoCodeLiteral; + inputStrLiteral = LiteralExpression.newConstant(inputStr, PVarchar.INSTANCE, sortOrder); + localeIsoCodeLiteral = + LiteralExpression.newConstant(localeIsoCode, PVarchar.INSTANCE, sortOrder); + List expressions = + Lists.newArrayList((Expression) inputStrLiteral, (Expression) localeIsoCodeLiteral); + Expression upperFunction = new UpperFunction(expressions); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + boolean ret = upperFunction.evaluate(null, ptr); + String result = + ret ? (String) upperFunction.getDataType().toObject(ptr, upperFunction.getSortOrder()) : null; + return result; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/rewrite/RowValueConstructorExpressionRewriterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/rewrite/RowValueConstructorExpressionRewriterTest.java index 7ef03642964..33f0595805d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/rewrite/RowValueConstructorExpressionRewriterTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/rewrite/RowValueConstructorExpressionRewriterTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,64 +15,59 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.expression.rewrite; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.sql.SQLException; +import java.util.List; + import org.apache.phoenix.expression.CoerceExpression; import org.apache.phoenix.expression.Determinism; import org.apache.phoenix.expression.Expression; import org.apache.phoenix.expression.RowValueConstructorExpression; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PFloat; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.junit.Test; import org.mockito.Mockito; -import java.sql.SQLException; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - public class RowValueConstructorExpressionRewriterTest { - @Test - public void testRewriteAllChildrenAsc() throws SQLException { - - - Expression ascChild = Mockito.mock(Expression.class); - Mockito.when(ascChild.getSortOrder()).thenReturn(SortOrder.ASC); - Mockito.when(ascChild.getDataType()).thenReturn(PFloat.INSTANCE); - Mockito.when(ascChild.getDeterminism()).thenReturn(Determinism.ALWAYS); - Mockito.when(ascChild.requiresFinalEvaluation()).thenReturn(true); + @Test + public void testRewriteAllChildrenAsc() throws SQLException { - Expression descChild = Mockito.mock(Expression.class); - Mockito.when(descChild.getSortOrder()).thenReturn(SortOrder.DESC); - Mockito.when(descChild.getDataType()).thenReturn(PFloat.INSTANCE); - Mockito.when(descChild.getDeterminism()).thenReturn(Determinism.ALWAYS); - Mockito.when(descChild.requiresFinalEvaluation()).thenReturn(true); + Expression ascChild = Mockito.mock(Expression.class); + Mockito.when(ascChild.getSortOrder()).thenReturn(SortOrder.ASC); + Mockito.when(ascChild.getDataType()).thenReturn(PFloat.INSTANCE); + Mockito.when(ascChild.getDeterminism()).thenReturn(Determinism.ALWAYS); + Mockito.when(ascChild.requiresFinalEvaluation()).thenReturn(true); - List children = ImmutableList.of(ascChild,descChild); - RowValueConstructorExpression expression = - new RowValueConstructorExpression(children,false); + Expression descChild = Mockito.mock(Expression.class); + Mockito.when(descChild.getSortOrder()).thenReturn(SortOrder.DESC); + Mockito.when(descChild.getDataType()).thenReturn(PFloat.INSTANCE); + Mockito.when(descChild.getDeterminism()).thenReturn(Determinism.ALWAYS); + Mockito.when(descChild.requiresFinalEvaluation()).thenReturn(true); + List children = ImmutableList.of(ascChild, descChild); + RowValueConstructorExpression expression = new RowValueConstructorExpression(children, false); - RowValueConstructorExpressionRewriter - rewriter = - RowValueConstructorExpressionRewriter.getSingleton(); + RowValueConstructorExpressionRewriter rewriter = + RowValueConstructorExpressionRewriter.getSingleton(); - RowValueConstructorExpression result = rewriter.rewriteAllChildrenAsc(expression); + RowValueConstructorExpression result = rewriter.rewriteAllChildrenAsc(expression); - assertEquals(2,result.getChildren().size()); + assertEquals(2, result.getChildren().size()); - Expression child1 = result.getChildren().get(0); - Expression child2 = result.getChildren().get(1); + Expression child1 = result.getChildren().get(0); + Expression child2 = result.getChildren().get(1); - assertEquals(SortOrder.ASC, child1.getSortOrder()); - assertEquals(SortOrder.ASC, child2.getSortOrder()); + assertEquals(SortOrder.ASC, child1.getSortOrder()); + assertEquals(SortOrder.ASC, child2.getSortOrder()); - assertEquals(ascChild, child1); - assertTrue(child2 instanceof CoerceExpression); - assertEquals(descChild, ((CoerceExpression)child2).getChild()); + assertEquals(ascChild, child1); + assertTrue(child2 instanceof CoerceExpression); + assertEquals(descChild, ((CoerceExpression) child2).getChild()); - } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/util/regex/PatternPerformanceTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/util/regex/PatternPerformanceTest.java index 6722a71fb78..d08efa5510e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/expression/util/regex/PatternPerformanceTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/util/regex/PatternPerformanceTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,123 +30,122 @@ public class PatternPerformanceTest { - static private class Timer { - private long startTimeStamp; + static private class Timer { + private long startTimeStamp; - public void reset() { - startTimeStamp = System.currentTimeMillis(); - } - - public double currentTime() { - return (System.currentTimeMillis() - startTimeStamp) / 1000.0; - } - - public void printTime(String hint) { - System.out.println(hint + " Time=" + currentTime()); - } - } - - private String[] data = new String[] { "ONE:TWO:THREE", "ABC:DEF", "PKU:THU:FDU" }; - private ImmutableBytesWritable[] dataPtr = new ImmutableBytesWritable[] { getPtr(data[0]), - getPtr(data[1]), getPtr(data[2]) }; - private String patternString; - private ImmutableBytesWritable resultPtr = new ImmutableBytesWritable(); - private int maxTimes = 10000000; - private Timer timer = new Timer(); - private final boolean ENABLE_ASSERT = false; - - private static ImmutableBytesWritable getPtr(String str) { - return new ImmutableBytesWritable(PVarchar.INSTANCE.toBytes(str)); - } - - private void testReplaceAll(ImmutableBytesWritable replacePtr, AbstractBasePattern pattern, - String name) { - timer.reset(); - for (int i = 0; i < maxTimes; ++i) { - ImmutableBytesWritable ptr = dataPtr[i % 3]; - resultPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); - pattern.replaceAll(resultPtr, replacePtr.get(), replacePtr.getOffset(), - replacePtr.getLength()); - if (ENABLE_ASSERT) { - String result = (String) PVarchar.INSTANCE.toObject(resultPtr); - assertTrue((i % 3 == 1 && ":".equals(result)) - || (i % 3 != 1 && "::".equals(result))); - } - } - timer.printTime(name); - } - - public void testReplaceAll() { - patternString = "[A-Z]+"; - ImmutableBytesWritable replacePtr = getPtr(""); - testReplaceAll(replacePtr, new JavaPattern(patternString), "Java replaceAll"); - testReplaceAll(replacePtr, new JONIPattern(patternString), "JONI replaceAll"); + public void reset() { + startTimeStamp = System.currentTimeMillis(); } - private void testLike(AbstractBasePattern pattern, String name) { - timer.reset(); - for (int i = 0; i < maxTimes; ++i) { - ImmutableBytesWritable ptr = dataPtr[i % 3]; - resultPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); - pattern.matches(resultPtr); - if (ENABLE_ASSERT) { - Boolean b = (Boolean) PBoolean.INSTANCE.toObject(resultPtr); - assertTrue(i % 3 != 2 || b.booleanValue()); - } - } - timer.printTime(name); + public double currentTime() { + return (System.currentTimeMillis() - startTimeStamp) / 1000.0; } - public void testLike() { - patternString = "\\Q\\E.*\\QU\\E.*\\QU\\E.*\\QU\\E.*\\Q\\E"; - testLike(new JavaPattern(patternString), "Java Like"); - testLike(new JONIPattern(patternString), "JONI Like"); + public void printTime(String hint) { + System.out.println(hint + " Time=" + currentTime()); } - - private void testSubstr(AbstractBasePattern pattern, String name) { - timer.reset(); - for (int i = 0; i < maxTimes; ++i) { - ImmutableBytesWritable ptr = dataPtr[i % 3]; - resultPtr.set(ptr.get(),ptr.getOffset(),ptr.getLength()); - pattern.substr(resultPtr, 0); - if (ENABLE_ASSERT) { - assertTrue((i % 3 != 2 || ":THU".equals(PVarchar.INSTANCE.toObject(resultPtr)))); - } - } - timer.printTime(name); + } + + private String[] data = new String[] { "ONE:TWO:THREE", "ABC:DEF", "PKU:THU:FDU" }; + private ImmutableBytesWritable[] dataPtr = + new ImmutableBytesWritable[] { getPtr(data[0]), getPtr(data[1]), getPtr(data[2]) }; + private String patternString; + private ImmutableBytesWritable resultPtr = new ImmutableBytesWritable(); + private int maxTimes = 10000000; + private Timer timer = new Timer(); + private final boolean ENABLE_ASSERT = false; + + private static ImmutableBytesWritable getPtr(String str) { + return new ImmutableBytesWritable(PVarchar.INSTANCE.toBytes(str)); + } + + private void testReplaceAll(ImmutableBytesWritable replacePtr, AbstractBasePattern pattern, + String name) { + timer.reset(); + for (int i = 0; i < maxTimes; ++i) { + ImmutableBytesWritable ptr = dataPtr[i % 3]; + resultPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); + pattern.replaceAll(resultPtr, replacePtr.get(), replacePtr.getOffset(), + replacePtr.getLength()); + if (ENABLE_ASSERT) { + String result = (String) PVarchar.INSTANCE.toObject(resultPtr); + assertTrue((i % 3 == 1 && ":".equals(result)) || (i % 3 != 1 && "::".equals(result))); + } } - - public void testSubstr() { - patternString = "\\:[A-Z]+"; - testSubstr(new JavaPattern(patternString), "Java Substr"); - testSubstr(new JONIPattern(patternString), "JONI Substr"); + timer.printTime(name); + } + + public void testReplaceAll() { + patternString = "[A-Z]+"; + ImmutableBytesWritable replacePtr = getPtr(""); + testReplaceAll(replacePtr, new JavaPattern(patternString), "Java replaceAll"); + testReplaceAll(replacePtr, new JONIPattern(patternString), "JONI replaceAll"); + } + + private void testLike(AbstractBasePattern pattern, String name) { + timer.reset(); + for (int i = 0; i < maxTimes; ++i) { + ImmutableBytesWritable ptr = dataPtr[i % 3]; + resultPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); + pattern.matches(resultPtr); + if (ENABLE_ASSERT) { + Boolean b = (Boolean) PBoolean.INSTANCE.toObject(resultPtr); + assertTrue(i % 3 != 2 || b.booleanValue()); + } } - - private void testSplit(AbstractBaseSplitter pattern, String name) throws SQLException { - timer.reset(); - for (int i = 0; i < maxTimes; ++i) { - ImmutableBytesWritable ptr = dataPtr[i % 3]; - resultPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); - boolean ret = pattern.split(resultPtr); - if (ENABLE_ASSERT) { - PhoenixArray array = (PhoenixArray) PVarcharArray.INSTANCE.toObject(resultPtr); - assertTrue(ret && (i % 3 != 1 || ((String[]) array.getArray()).length == 2)); - } - } - timer.printTime(name); + timer.printTime(name); + } + + public void testLike() { + patternString = "\\Q\\E.*\\QU\\E.*\\QU\\E.*\\QU\\E.*\\Q\\E"; + testLike(new JavaPattern(patternString), "Java Like"); + testLike(new JONIPattern(patternString), "JONI Like"); + } + + private void testSubstr(AbstractBasePattern pattern, String name) { + timer.reset(); + for (int i = 0; i < maxTimes; ++i) { + ImmutableBytesWritable ptr = dataPtr[i % 3]; + resultPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); + pattern.substr(resultPtr, 0); + if (ENABLE_ASSERT) { + assertTrue((i % 3 != 2 || ":THU".equals(PVarchar.INSTANCE.toObject(resultPtr)))); + } } - - public void testSplit() throws SQLException { - patternString = "\\:"; - testSplit(new GuavaSplitter(patternString), "GuavaSplit"); - testSplit(new JONIPattern(patternString), "JONI Split"); - } - - @Test - public void test() throws Exception { - // testLike(); - // testReplaceAll(); - // testSubstr(); - // testSplit(); + timer.printTime(name); + } + + public void testSubstr() { + patternString = "\\:[A-Z]+"; + testSubstr(new JavaPattern(patternString), "Java Substr"); + testSubstr(new JONIPattern(patternString), "JONI Substr"); + } + + private void testSplit(AbstractBaseSplitter pattern, String name) throws SQLException { + timer.reset(); + for (int i = 0; i < maxTimes; ++i) { + ImmutableBytesWritable ptr = dataPtr[i % 3]; + resultPtr.set(ptr.get(), ptr.getOffset(), ptr.getLength()); + boolean ret = pattern.split(resultPtr); + if (ENABLE_ASSERT) { + PhoenixArray array = (PhoenixArray) PVarcharArray.INSTANCE.toObject(resultPtr); + assertTrue(ret && (i % 3 != 1 || ((String[]) array.getArray()).length == 2)); + } } + timer.printTime(name); + } + + public void testSplit() throws SQLException { + patternString = "\\:"; + testSplit(new GuavaSplitter(patternString), "GuavaSplit"); + testSplit(new JONIPattern(patternString), "JONI Split"); + } + + @Test + public void test() throws Exception { + // testLike(); + // testReplaceAll(); + // testSubstr(); + // testSplit(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/filter/DistinctPrefixFilterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/filter/DistinctPrefixFilterTest.java index 5554d51b428..c88ac0c6133 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/filter/DistinctPrefixFilterTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/filter/DistinctPrefixFilterTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,240 +35,248 @@ import junit.framework.TestCase; public class DistinctPrefixFilterTest extends TestCase { - private DistinctPrefixFilter createFilter(int[] widths, int prefixLength) { - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(widths.length); - for (final int width : widths) { - builder.addField( - new PDatum() { - - @Override - public boolean isNullable() { - return width <= 0; - } - - @Override - public PDataType getDataType() { - return width <= 0 ? PVarchar.INSTANCE : PChar.INSTANCE; - } - - @Override - public Integer getMaxLength() { - return width <= 0 ? null : width; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - }, width <= 0, SortOrder.getDefault()); + private DistinctPrefixFilter createFilter(int[] widths, int prefixLength) { + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(widths.length); + for (final int width : widths) { + builder.addField(new PDatum() { + + @Override + public boolean isNullable() { + return width <= 0; } - return new DistinctPrefixFilter(builder.build(), prefixLength); - } - - private void assertInclude(String next, Filter f) throws IOException { - assertInclude(Bytes.toBytes(next), f); - } - - private void assertInclude(byte[] next, Filter f) throws IOException { - Cell c = new KeyValue(next, ByteUtil.EMPTY_BYTE_ARRAY, ByteUtil.EMPTY_BYTE_ARRAY, 0, ByteUtil.EMPTY_BYTE_ARRAY); - assertTrue(f.filterCell(c) == ReturnCode.INCLUDE); - assertFalse(f.filterAllRemaining()); - } - - private void assertSeekAndHint(String next, Filter f, String rowHint) throws IOException { - assertSeekAndHint(next, f, rowHint, false); - } - - private void assertSeekAndHint(String next, Filter f, String rowHint, boolean filterAll) throws IOException { - assertSeekAndHint(Bytes.toBytes(next), f, Bytes.toBytes(rowHint), filterAll); - } - - private void assertSeekAndHint(byte[] next, Filter f, byte[] rowHint, boolean filterAll) throws IOException { - Cell c = new KeyValue(next, ByteUtil.EMPTY_BYTE_ARRAY, ByteUtil.EMPTY_BYTE_ARRAY, 0, ByteUtil.EMPTY_BYTE_ARRAY); - assertTrue(f.filterCell(c) == ReturnCode.SEEK_NEXT_USING_HINT); - Cell h = f.getNextCellHint(c); - byte[] hintBytes = rowHint; - assertTrue(Bytes.equals(hintBytes, 0, hintBytes.length, h.getRowArray(), h.getRowOffset(), h.getRowLength())); - assertEquals(filterAll, f.filterAllRemaining()); - } - - public void testSingleFixedWidth() throws Exception { - Filter f = createFilter(new int[]{3}, 1); - assertInclude("000", f); - assertInclude("001", f); - assertSeekAndHint("001", f, "002"); - assertInclude("003", f); - assertInclude("004", f); - assertInclude("005", f); - assertSeekAndHint("005", f, "006"); - - f = createFilter(new int[]{3}, 1); - f.setReversed(true); - assertInclude("005", f); - assertInclude("004", f); - assertSeekAndHint(new byte[]{'0','0','4'}, f, new byte[]{'0','0','4'}, false); - assertInclude("003", f); - assertInclude("002", f); - assertInclude("001", f); - assertSeekAndHint(new byte[]{'0','0','1'}, f, new byte[]{'0','0','1'}, false); - } - public void testMultiFixedWidth() throws Exception { - Filter f = createFilter(new int[]{5,4}, 1); - assertInclude("00000aaaa", f); - assertInclude("00001aaaa", f); - assertSeekAndHint("00001aaaa", f, "00002"); - assertInclude("00003aaaa", f); - assertInclude("00004aaaa", f); - assertInclude("00005aaaa", f); - assertSeekAndHint("00005aaaa", f, "00006"); - - f = createFilter(new int[]{5,4}, 2); - assertInclude("00000aaaa", f); - assertInclude("00001aaaa", f); - assertSeekAndHint("00001aaaa", f, "00001aaab"); - assertInclude("00003aaaa", f); - assertInclude("00004aaaa", f); - assertInclude("00005aaaa", f); - assertSeekAndHint("00005aaaa", f, "00005aaab"); - - f = createFilter(new int[]{3,2}, 1); - f.setReversed(true); - assertInclude("005aa", f); - assertInclude("004aa", f); - assertSeekAndHint(new byte[]{'0','0','4','a','a'}, f, new byte[]{'0','0','4'}, false); - assertInclude("003aa", f); - assertInclude("002aa", f); - assertInclude("001aa", f); - assertSeekAndHint(new byte[]{'0','0','1','a','a'}, f, new byte[]{'0','0','1'}, false); - - f = createFilter(new int[]{3,2}, 2); - f.setReversed(true); - assertInclude("005bb", f); - assertInclude("004bb", f); - assertInclude("003bb", f); - assertSeekAndHint(new byte[]{'0','0','3','b','b'}, f, new byte[]{'0','0','3','b','b'}, false); - assertInclude("003ba", f); - assertInclude("002bb", f); - assertInclude("001bb", f); - assertSeekAndHint(new byte[]{'0','0','1','b','b'}, f, new byte[]{'0','0','1','b','b'}, false); - } - - public void testSingleVariableWidth() throws Exception { - Filter f = createFilter(new int[]{-5}, 1); - assertInclude("00000", f); - assertInclude("00001", f); - assertSeekAndHint("00001", f, "00001\01"); - assertInclude("00003", f); - assertInclude("00004", f); - assertInclude("00005", f); - assertSeekAndHint("00005", f, "00005\01"); - } - - public void testVariableWithNull() throws Exception { - Filter f = createFilter(new int[]{-2,-2}, 1); - assertInclude("\00aa", f); - assertSeekAndHint("\00aa", f, "\01"); - assertSeekAndHint("\00aa", f, "\01"); - - f = createFilter(new int[]{-2,-2}, 2); - assertInclude("\00\00", f); - assertSeekAndHint("\00\00", f, "\00\00\01"); - assertSeekAndHint("\00\00", f, "\00\00\01"); - } + @Override + public PDataType getDataType() { + return width <= 0 ? PVarchar.INSTANCE : PChar.INSTANCE; + } - public void testMultiVariableWidth() throws Exception { - Filter f = createFilter(new int[]{-5,-4}, 1); - assertInclude("00000\00aaaa", f); - assertInclude("00001\00aaaa", f); - assertSeekAndHint("00001\00aaaa", f, "00001\01"); - assertInclude("00003\00aaaa", f); - assertInclude("00004\00aaaa", f); - assertInclude("00005\00aaaa", f); - assertSeekAndHint("00005\00aaaa", f, "00005\01"); - - f = createFilter(new int[]{-5,-4}, 2); - assertInclude("00000\00aaaa", f); - assertInclude("00001\00aaaa", f); - assertSeekAndHint("00001\00aaaa", f, "00001\00aaaa\01"); - assertInclude("00003\00aaaa", f); - assertInclude("00004\00aaaa", f); - assertInclude("00005\00aaaa", f); - assertSeekAndHint("00005\00aaaa", f, "00005\00aaaa\01"); - - f = createFilter(new int[]{-3,-2}, 1); - f.setReversed(true); - assertInclude("005\00aa", f); - assertInclude("004\00aa", f); - assertSeekAndHint(new byte[]{'0','0','4', 0, 'a', 'a'}, f, - new byte[] {'0','0','4'}, false); - - f = createFilter(new int[]{-3,-2}, 2); - f.setReversed(true); - assertInclude("005\00bb", f); - assertInclude("004\00bb", f); - assertSeekAndHint(new byte[]{'0','0','4', 0, 'b', 'b'}, f, - new byte[]{'0','0','4', 0, 'b', 'b'}, false); - } + @Override + public Integer getMaxLength() { + return width <= 0 ? null : width; + } - public void testFixedAfterVariable() throws Exception { - Filter f = createFilter(new int[]{-5,4}, 1); - assertInclude("00000\00aaaa", f); - assertInclude("00001\00aaaa", f); - assertSeekAndHint("00001\00aaaa", f, "00001\01"); - assertInclude("00003\00aaaa", f); - assertInclude("00004\00aaaa", f); - assertInclude("00005\00aaaa", f); - assertSeekAndHint("00005\00aaaa", f, "00005\01"); - - f = createFilter(new int[]{-5,4}, 2); - assertInclude("00000\00aaaa", f); - assertInclude("00001\00aaaa", f); - assertSeekAndHint("00001\00aaaa", f, "00001\00aaab"); - assertInclude("00003\00aaaa", f); - assertInclude("00004\00aaaa", f); - assertInclude("00005\00aaaa", f); - assertSeekAndHint("00005\00aaaa", f, "00005\00aaab"); - } + @Override + public Integer getScale() { + return null; + } - public void testVariableAfterFixed() throws Exception { - Filter f = createFilter(new int[]{5,-4}, 1); - assertInclude("00000aaaa", f); - assertInclude("00001aaaa", f); - assertSeekAndHint("00001aaaa", f, "00002"); - assertInclude("00003aaaa", f); - assertInclude("00004aaaa", f); - assertInclude("00005aaaa", f); - assertSeekAndHint("00005aaaa", f, "00006"); - - f = createFilter(new int[]{5,-4}, 2); - assertInclude("00000aaaa", f); - assertInclude("00001aaaa", f); - assertSeekAndHint("00001aaaa", f, "00001aaaa\01"); - assertInclude("00003aaaa", f); - assertInclude("00004aaaa", f); - assertInclude("00005aaaa", f); - assertSeekAndHint("00005aaaa", f, "00005aaaa\01"); - } + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } - public void testNoNextKey() throws Exception { - Filter f = createFilter(new int[]{2,2}, 1); - assertInclude("00cc", f); - assertInclude(new byte[]{-1,-1,20,20}, f); - // make sure we end the scan when we cannot increase a fixed length prefix - assertSeekAndHint(new byte[]{-1,-1,20,20}, f, new byte[]{-1,-1}, true); - assertSeekAndHint(new byte[]{-1,-1,20,20}, f, new byte[]{-1,-1}, true); - - f = createFilter(new int[]{2,2}, 1); - f.setReversed(true); - assertInclude(new byte[]{0,0,1,1}, f); - assertSeekAndHint(new byte[]{0,0,1,1}, f, new byte[]{0,0}, false); - assertSeekAndHint(new byte[]{0,0,1,1}, f, new byte[]{0,0}, false); + }, width <= 0, SortOrder.getDefault()); } + return new DistinctPrefixFilter(builder.build(), prefixLength); + } + + private void assertInclude(String next, Filter f) throws IOException { + assertInclude(Bytes.toBytes(next), f); + } + + private void assertInclude(byte[] next, Filter f) throws IOException { + Cell c = new KeyValue(next, ByteUtil.EMPTY_BYTE_ARRAY, ByteUtil.EMPTY_BYTE_ARRAY, 0, + ByteUtil.EMPTY_BYTE_ARRAY); + assertTrue(f.filterCell(c) == ReturnCode.INCLUDE); + assertFalse(f.filterAllRemaining()); + } + + private void assertSeekAndHint(String next, Filter f, String rowHint) throws IOException { + assertSeekAndHint(next, f, rowHint, false); + } + + private void assertSeekAndHint(String next, Filter f, String rowHint, boolean filterAll) + throws IOException { + assertSeekAndHint(Bytes.toBytes(next), f, Bytes.toBytes(rowHint), filterAll); + } + + private void assertSeekAndHint(byte[] next, Filter f, byte[] rowHint, boolean filterAll) + throws IOException { + Cell c = new KeyValue(next, ByteUtil.EMPTY_BYTE_ARRAY, ByteUtil.EMPTY_BYTE_ARRAY, 0, + ByteUtil.EMPTY_BYTE_ARRAY); + assertTrue(f.filterCell(c) == ReturnCode.SEEK_NEXT_USING_HINT); + Cell h = f.getNextCellHint(c); + byte[] hintBytes = rowHint; + assertTrue(Bytes.equals(hintBytes, 0, hintBytes.length, h.getRowArray(), h.getRowOffset(), + h.getRowLength())); + assertEquals(filterAll, f.filterAllRemaining()); + } + + public void testSingleFixedWidth() throws Exception { + Filter f = createFilter(new int[] { 3 }, 1); + assertInclude("000", f); + assertInclude("001", f); + assertSeekAndHint("001", f, "002"); + assertInclude("003", f); + assertInclude("004", f); + assertInclude("005", f); + assertSeekAndHint("005", f, "006"); + + f = createFilter(new int[] { 3 }, 1); + f.setReversed(true); + assertInclude("005", f); + assertInclude("004", f); + assertSeekAndHint(new byte[] { '0', '0', '4' }, f, new byte[] { '0', '0', '4' }, false); + assertInclude("003", f); + assertInclude("002", f); + assertInclude("001", f); + assertSeekAndHint(new byte[] { '0', '0', '1' }, f, new byte[] { '0', '0', '1' }, false); + } + + public void testMultiFixedWidth() throws Exception { + Filter f = createFilter(new int[] { 5, 4 }, 1); + assertInclude("00000aaaa", f); + assertInclude("00001aaaa", f); + assertSeekAndHint("00001aaaa", f, "00002"); + assertInclude("00003aaaa", f); + assertInclude("00004aaaa", f); + assertInclude("00005aaaa", f); + assertSeekAndHint("00005aaaa", f, "00006"); + + f = createFilter(new int[] { 5, 4 }, 2); + assertInclude("00000aaaa", f); + assertInclude("00001aaaa", f); + assertSeekAndHint("00001aaaa", f, "00001aaab"); + assertInclude("00003aaaa", f); + assertInclude("00004aaaa", f); + assertInclude("00005aaaa", f); + assertSeekAndHint("00005aaaa", f, "00005aaab"); + + f = createFilter(new int[] { 3, 2 }, 1); + f.setReversed(true); + assertInclude("005aa", f); + assertInclude("004aa", f); + assertSeekAndHint(new byte[] { '0', '0', '4', 'a', 'a' }, f, new byte[] { '0', '0', '4' }, + false); + assertInclude("003aa", f); + assertInclude("002aa", f); + assertInclude("001aa", f); + assertSeekAndHint(new byte[] { '0', '0', '1', 'a', 'a' }, f, new byte[] { '0', '0', '1' }, + false); + + f = createFilter(new int[] { 3, 2 }, 2); + f.setReversed(true); + assertInclude("005bb", f); + assertInclude("004bb", f); + assertInclude("003bb", f); + assertSeekAndHint(new byte[] { '0', '0', '3', 'b', 'b' }, f, + new byte[] { '0', '0', '3', 'b', 'b' }, false); + assertInclude("003ba", f); + assertInclude("002bb", f); + assertInclude("001bb", f); + assertSeekAndHint(new byte[] { '0', '0', '1', 'b', 'b' }, f, + new byte[] { '0', '0', '1', 'b', 'b' }, false); + } + + public void testSingleVariableWidth() throws Exception { + Filter f = createFilter(new int[] { -5 }, 1); + assertInclude("00000", f); + assertInclude("00001", f); + assertSeekAndHint("00001", f, "00001\01"); + assertInclude("00003", f); + assertInclude("00004", f); + assertInclude("00005", f); + assertSeekAndHint("00005", f, "00005\01"); + } + + public void testVariableWithNull() throws Exception { + Filter f = createFilter(new int[] { -2, -2 }, 1); + assertInclude("\00aa", f); + assertSeekAndHint("\00aa", f, "\01"); + assertSeekAndHint("\00aa", f, "\01"); + + f = createFilter(new int[] { -2, -2 }, 2); + assertInclude("\00\00", f); + assertSeekAndHint("\00\00", f, "\00\00\01"); + assertSeekAndHint("\00\00", f, "\00\00\01"); + } + + public void testMultiVariableWidth() throws Exception { + Filter f = createFilter(new int[] { -5, -4 }, 1); + assertInclude("00000\00aaaa", f); + assertInclude("00001\00aaaa", f); + assertSeekAndHint("00001\00aaaa", f, "00001\01"); + assertInclude("00003\00aaaa", f); + assertInclude("00004\00aaaa", f); + assertInclude("00005\00aaaa", f); + assertSeekAndHint("00005\00aaaa", f, "00005\01"); + + f = createFilter(new int[] { -5, -4 }, 2); + assertInclude("00000\00aaaa", f); + assertInclude("00001\00aaaa", f); + assertSeekAndHint("00001\00aaaa", f, "00001\00aaaa\01"); + assertInclude("00003\00aaaa", f); + assertInclude("00004\00aaaa", f); + assertInclude("00005\00aaaa", f); + assertSeekAndHint("00005\00aaaa", f, "00005\00aaaa\01"); + + f = createFilter(new int[] { -3, -2 }, 1); + f.setReversed(true); + assertInclude("005\00aa", f); + assertInclude("004\00aa", f); + assertSeekAndHint(new byte[] { '0', '0', '4', 0, 'a', 'a' }, f, new byte[] { '0', '0', '4' }, + false); + + f = createFilter(new int[] { -3, -2 }, 2); + f.setReversed(true); + assertInclude("005\00bb", f); + assertInclude("004\00bb", f); + assertSeekAndHint(new byte[] { '0', '0', '4', 0, 'b', 'b' }, f, + new byte[] { '0', '0', '4', 0, 'b', 'b' }, false); + } + + public void testFixedAfterVariable() throws Exception { + Filter f = createFilter(new int[] { -5, 4 }, 1); + assertInclude("00000\00aaaa", f); + assertInclude("00001\00aaaa", f); + assertSeekAndHint("00001\00aaaa", f, "00001\01"); + assertInclude("00003\00aaaa", f); + assertInclude("00004\00aaaa", f); + assertInclude("00005\00aaaa", f); + assertSeekAndHint("00005\00aaaa", f, "00005\01"); + + f = createFilter(new int[] { -5, 4 }, 2); + assertInclude("00000\00aaaa", f); + assertInclude("00001\00aaaa", f); + assertSeekAndHint("00001\00aaaa", f, "00001\00aaab"); + assertInclude("00003\00aaaa", f); + assertInclude("00004\00aaaa", f); + assertInclude("00005\00aaaa", f); + assertSeekAndHint("00005\00aaaa", f, "00005\00aaab"); + } + + public void testVariableAfterFixed() throws Exception { + Filter f = createFilter(new int[] { 5, -4 }, 1); + assertInclude("00000aaaa", f); + assertInclude("00001aaaa", f); + assertSeekAndHint("00001aaaa", f, "00002"); + assertInclude("00003aaaa", f); + assertInclude("00004aaaa", f); + assertInclude("00005aaaa", f); + assertSeekAndHint("00005aaaa", f, "00006"); + + f = createFilter(new int[] { 5, -4 }, 2); + assertInclude("00000aaaa", f); + assertInclude("00001aaaa", f); + assertSeekAndHint("00001aaaa", f, "00001aaaa\01"); + assertInclude("00003aaaa", f); + assertInclude("00004aaaa", f); + assertInclude("00005aaaa", f); + assertSeekAndHint("00005aaaa", f, "00005aaaa\01"); + } + + public void testNoNextKey() throws Exception { + Filter f = createFilter(new int[] { 2, 2 }, 1); + assertInclude("00cc", f); + assertInclude(new byte[] { -1, -1, 20, 20 }, f); + // make sure we end the scan when we cannot increase a fixed length prefix + assertSeekAndHint(new byte[] { -1, -1, 20, 20 }, f, new byte[] { -1, -1 }, true); + assertSeekAndHint(new byte[] { -1, -1, 20, 20 }, f, new byte[] { -1, -1 }, true); + + f = createFilter(new int[] { 2, 2 }, 1); + f.setReversed(true); + assertInclude(new byte[] { 0, 0, 1, 1 }, f); + assertSeekAndHint(new byte[] { 0, 0, 1, 1 }, f, new byte[] { 0, 0 }, false); + assertSeekAndHint(new byte[] { 0, 0, 1, 1 }, f, new byte[] { 0, 0 }, false); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/filter/EncodedQualifiersColumnProjectionFilterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/filter/EncodedQualifiersColumnProjectionFilterTest.java index d4f13723d0f..c0ac85da4a6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/filter/EncodedQualifiersColumnProjectionFilterTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/filter/EncodedQualifiersColumnProjectionFilterTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,43 +19,43 @@ import java.util.BitSet; import java.util.HashSet; + import org.apache.phoenix.schema.PTable.QualifierEncodingScheme; + import junit.framework.TestCase; public class EncodedQualifiersColumnProjectionFilterTest extends TestCase { - private final String someEmptyCFName = "cfName1"; - private final String someConditionalCFName1 = "conditionalCfName1"; - private final String someConditionalCFName2 = "conditionalCfName2"; - private final QualifierEncodingScheme someQualifiedEncodingScheme = QualifierEncodingScheme.ONE_BYTE_QUALIFIERS; - private final BitSet someBitSet; - private EncodedQualifiersColumnProjectionFilter filter; - - public EncodedQualifiersColumnProjectionFilterTest() { - HashSet conditionalCFNames = new HashSet(2); - conditionalCFNames.add(someConditionalCFName1.getBytes()); - conditionalCFNames.add(someConditionalCFName2.getBytes()); - - this.someBitSet = new BitSet(); - this.someBitSet.xor(new BitSet(0)); // All 1s - - this.filter = new EncodedQualifiersColumnProjectionFilter( - this.someEmptyCFName.getBytes(), - someBitSet, - conditionalCFNames, - this.someQualifiedEncodingScheme); - } - - public void testToString() { - String outputString = this.filter.toString(); - - assertTrue(outputString.contains("EmptyCFName")); - assertTrue(outputString.contains("EncodingScheme")); - assertTrue(outputString.contains("TrackedColumns")); - assertTrue(outputString.contains("ConditionOnlyCfs")); - assertTrue(outputString.contains(this.someEmptyCFName)); - assertTrue(outputString.contains(this.someConditionalCFName1)); - assertTrue(outputString.contains(this.someConditionalCFName2)); - assertTrue(outputString.contains(this.someBitSet.toString())); - } + private final String someEmptyCFName = "cfName1"; + private final String someConditionalCFName1 = "conditionalCfName1"; + private final String someConditionalCFName2 = "conditionalCfName2"; + private final QualifierEncodingScheme someQualifiedEncodingScheme = + QualifierEncodingScheme.ONE_BYTE_QUALIFIERS; + private final BitSet someBitSet; + private EncodedQualifiersColumnProjectionFilter filter; + + public EncodedQualifiersColumnProjectionFilterTest() { + HashSet conditionalCFNames = new HashSet(2); + conditionalCFNames.add(someConditionalCFName1.getBytes()); + conditionalCFNames.add(someConditionalCFName2.getBytes()); + + this.someBitSet = new BitSet(); + this.someBitSet.xor(new BitSet(0)); // All 1s + + this.filter = new EncodedQualifiersColumnProjectionFilter(this.someEmptyCFName.getBytes(), + someBitSet, conditionalCFNames, this.someQualifiedEncodingScheme); + } + + public void testToString() { + String outputString = this.filter.toString(); + + assertTrue(outputString.contains("EmptyCFName")); + assertTrue(outputString.contains("EncodingScheme")); + assertTrue(outputString.contains("TrackedColumns")); + assertTrue(outputString.contains("ConditionOnlyCfs")); + assertTrue(outputString.contains(this.someEmptyCFName)); + assertTrue(outputString.contains(this.someConditionalCFName1)); + assertTrue(outputString.contains(this.someConditionalCFName2)); + assertTrue(outputString.contains(this.someBitSet.toString())); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanBigFilterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanBigFilterTest.java index b8a50d2cae7..5b01223b446 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanBigFilterTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanBigFilterTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,663 +40,1180 @@ import org.apache.phoenix.schema.stats.GuidePostsInfo; import org.apache.phoenix.schema.stats.GuidePostsInfoBuilder; import org.apache.phoenix.schema.stats.GuidePostsKey; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.ReadOnlyProps; import org.junit.BeforeClass; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +public class SkipScanBigFilterTest extends BaseConnectionlessQueryTest { + private static final byte[][] REGION_BOUNDARIES_MINIMAL = { Bytes.toBytesBinary( + "\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), }; + private static final byte[][] GUIDE_POSTS_MINIMAL = { Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x07#[j\\x80\\x00\\x00\\x00Y\\x08u\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xD3U\\x88\\xFF\\x80\\x00\\x00\\x00\\x84\\xBFJ\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), }; + private static final byte[][] REGION_BOUNDARIES_ALL = { Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D^\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x01y3\\xF7P\\x80\\x00\\x00\\x00B\\xE7\\xF6F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB3\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00oI\\x17B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xFA._\\xE2\\x80\\x00\\x00\\x00\\x98\\xFE2\\xF5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Da\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`1%"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xB4]\\xE7\\x80\\x00\\x00\\x00ER\\xFE#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D]\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01p5R\\xD0\\x80\\x00\\x00\\x00@W\\xCC\\x12\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFE\\xC7U\\x80\\x00\\x00\\x00h\\xDF\"\\xBC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC5\\x8E\\xB0\\x80\\x00\\x00\\x00yM\\xD7\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x028\\xCA\\x85\\xFB\\x80\\x00\\x00\\x00}\\xA3*\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D^\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01y\\x17\\x8B<\\x80\\x00\\x00\\x00i'\\xE8\\xC4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB4\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00oK\\x11_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D]\\x80\\x07\\x15\\x12MOBILE\\x00\\x80\\x00\\x00\\x01a\\x02js\\x80\\x00\\x00\\x00@Y\\xC7\\x0C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0E\\x04@\\x8C\\x80\\x00\\x00\\x00o>\\xB1\\x1E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), }; -public class SkipScanBigFilterTest extends BaseConnectionlessQueryTest { - private static final byte[][] REGION_BOUNDARIES_MINIMAL = { - Bytes.toBytesBinary("\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - }; + private static final byte[][] GUIDE_POSTS_ALL = { Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01!y\\xC3\\x80\\x80\\x00\\x00\\x00+\\xB0)u\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x90h\\xE8;\\x80\\x00\\x00\\x00\\x0E\\x9B\\xE7x\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x14'_\\xF5\\x80\\x00\\x00\\x00(\\xF9\\xDD\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+\\xF1\\xD8d\\x80\\x00\\x00\\x00\\x9B\\xC2A\\xD0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&9JM\\x80\\x00\\x00\\x00w\\x1A\\xF5\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xAAKT\\x80\\x00\\x00\\x00w\\x98{@\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&yD\\x10\\x80\\x00\\x00\\x00w'f\\x04\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x08\\x01\\xA1\\x80\\x00\\x00\\x00w\\x17W\\x0D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&%U\\x1B\\x80\\x00\\x00\\x00w\\x19u\\x1C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xDBV\\x5C\\x80\\x00\\x00\\x00\\x14\\xE5\\xA4\\xCF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\xF2\\xE3\\xA1\\xD8\\x80\\x00\\x00\\x00\\x02\\x9DY\\x88\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xBA\\xDC\\xEF\\x80\\x00\\x00\\x00\\x99l\\x0D\\xD2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&h\\xC6\\x0C\\x80\\x00\\x00\\x00w\"\\xDE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xA516\\x80\\x00\\x00\\x00EL\\xE1\\x8E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`1!\\xA1\\x80\\x00\\x00\\x00;\\xF4\\x8B\\xD4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`~.@\\x80\\x00\\x00\\x00<\\x03\\x85\\xA9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cc\\xAF\\x98\\x80\\x00\\x00\\x00o\\x17\\xB9\\x82\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x80)\\xB7\\x80\\x00\\x00\\x00fo5]\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y3a\\x7F\\x80\\x00\\x00\\x00X\\xC7\\xE3\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x81\\x0Bb\\x80\\x00\\x00\\x00<\\x04s\\xA9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x04\\x7F1DESKTOP\\x00\\x80\\x00\\x00\\x02\\x026U\\x05\\x80\\x00\\x00\\x00kF\\x16(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xF1+\\x80\\x00\\x00\\x00~J\\x87\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Jg&\\xF4\\x80\\x00\\x00\\x00o\\x10\\xC8\\x1F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p5BN\\x80\\x00\\x00\\x00i\\x0El]\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC8t\\x80\\x00\\x00\\x00<\\x0C\\x10\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02]\\xD1\\xBE7\\x80\\x00\\x00\\x00\\x8A\\xFA_\\xDC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xD7\\xEE\\x19\\x80\\x00\\x00\\x00\\x89\\xEC\\xB4\\xCC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p3Ja\\x80\\x00\\x00\\x00tM{\\xBA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9F#n\\x80\\x00\\x00\\x00i\\xC9f\\xB2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01a\\x02b\\xAA\\x80\\x00\\x00\\x00h\\xDF9\\xDA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xE3!\\xC8\\x80\\x00\\x00\\x00\\x89\\xFD\\x1D\\xBB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x99=\\x9F\\x80\\x00\\x00\\x00i\\xC2\\x9D\\x98\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDD\\xDC\\xB5\\x80\\x00\\x00\\x00\\x89\\xE5q=\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDAW\\xCB\\x80\\x00\\x00\\x00\\x89\\xE89\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02y\\xD1d+\\x80\\x00\\x00\\x00o\\x18\\xC7,\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x7F&\\x16\\x80\\x00\\x00\\x00<\\x03\\xE5l\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xE2\\xAE\\x1E\\x80\\x00\\x00\\x00\\x89\\xFA\\xC8\\xED\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC6}\\xD7\\x80\\x00\\x00\\x00E^\\x83\\x8F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02y\\x8B\\xCC\\x84\\x80\\x00\\x00\\x00o\\x1A\\xC6\\xA8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\x1DN#\\x80\\x00\\x00\\x00\\x8A\\x1B\\xF5\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x19\\x80\\x05\\xD8\\xC7TABLET\\x00\\x80\\x00\\x00\\x02Ar2q\\x80\\x00\\x00\\x00\\x98\\x9BF|\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00w\\x9C\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x86.\\xF0\\xF4\\x80\\x00\\x00\\x00\\x98\\x9B`1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x91\\xD3w\\xB3\\x80\\x00\\x00\\x00\\xA00\\x5C\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x13D^:\\x80\\x00\\x00\\x00p\\x8F\\xA6\\x83\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x13\\x0Fq\\xA5\\x80\\x00\\x00\\x00p\\x84w\\x8B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14a\\xB2\\xE6\\x80\\x00\\x00\\x00q\\x09\\x83\\x8A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x170f\\xF3\\x80\\x00\\x00\\x00q\\xD4u(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x14\\x83\\x88l\\x80\\x00\\x00\\x00q\\x11\\xAB\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1BZ\\xE7\\x9E\\x80\\x00\\x00\\x00s~\\xF8\\x14\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&~\\xE2\\xAB\\x80\\x00\\x00\\x00w(\\xD2N\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x91W\\xCD\\xBE\\x80\\x00\\x00\\x00\\x0E\\xAD\\x0A~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&4\\xE0\\x1A\\x80\\x00\\x00\\x00w\\x1A\\xA6\\x99\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+\\xF5\\xF1\\xDD\\x80\\x00\\x00\\x00\\x99m6q\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&<\\xD6\\xB9\\x80\\x00\\x00\\x00w\\x1B4-\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xA7\\xA4\\xED\\x80\\x00\\x00\\x00w\\x97Lb\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&q\"]\\x80\\x00\\x00\\x00w$\\xD6\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x05y\\xEF\\x80\\x00\\x00\\x00w\\x17\\x19c\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\x8A\\xBB\\xB1\\xDE\\x80\\x00\\x00\\x00\\x0EAU\\xE5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&fS\\xE3\\x80\\x00\\x00\\x00w\"\\x8C\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00r\\xD5\\x99\\xF6\\x80\\x00\\x00\\x00\\x15,E\\xC7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&g.\\x93\\x80\\x00\\x00\\x00w\"\\xAA\\x03\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&d\\x15?\\x80\\x00\\x00\\x00w\")\\xAF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E>\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+ \\x16\\x19\\x80\\x00\\x00\\x00x\\x95\\xE4\\x18\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x00\\xBB\\x92\\xA1\\x96\\x80\\x00\\x00\\x00\\x14J\\xAEd\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xF2MOBILE\\x00\\x80\\x00\\x00\\x02n\\x95\\xD0N\\x80\\x00\\x00\\x00\\x92\\x0DF\\xA4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00$\\x0F\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xE1]\\x1D\\x80\\x80\\x00\\x00\\x00}\\x1A\\xA8e\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00$[\\x80\\x00\\x0D\\xDB\\x80\\x058\\x0BDESKTOP\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00n\\xE5\\xBF\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00$[\\x80\\x00\\x0EJ\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x8A\\xC2\\x9F\\xFA\\x80\\x00\\x00\\x00\\x98c\\xD3D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0D\\xDE\\x80\\x01i\\xF6DESKTOP\\x00\\x80\\x00\\x00\\x00\\xFEJ\\xDA\\x83\\x80\\x00\\x00\\x00\\x8D\\x8A\\xD1\\xA1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01s\\xAE\\x95\\xC7\\x80\\x00\\x00\\x00\\x86\\x04\\xF7[\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x06k\\xCB\\x13\\x80\\x00\\x00\\x00\\x84\\xC0N\\x1F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E*\\x80\\x01i\\xF5TABLET\\x00\\x80\\x00\\x00\\x01s\\xAE\\x98\\xF9\\x80\\x00\\x00\\x00\\x9A\\xD4\\xF0\\xED\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xC2fi\\x16\\x80\\x00\\x00\\x00\\x97\\xE1:Z\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E?\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01s\\xAEE\\x94\\x80\\x00\\x00\\x00\\x98\\xF4j\\x0A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0EL\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x002H\\x8C\\xF7\\x80\\x00\\x00\\x00\\x88\\xF6\\xC3F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x5C\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01s\\xAD\\xDE1\\x80\\x00\\x00\\x00\\x9F\\xE1`\\x02\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0D\\xDF\\x80\\x01i\\xECOTHER\\x00\\x80\\x00\\x00\\x01\\xB7@\\x9C\\x89\\x80\\x00\\x00\\x00V\\x81\\x8E\\xC8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E\\x08\\x80\\x01i\\xECOTHER\\x00\\x80\\x00\\x00\\x02\\x00!\\x9F\\xF3\\x80\\x00\\x00\\x00]9N\\x91\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E\"\\x80\\x01i\\xEDOTHER\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x98\\xD1]\\x09\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x91\\xA6\\x80\\x00\\x00\\x00f\\x1A\\xB1\\xF7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0Dh\\x80\\x02p DESKTOP\\x00\\x80\\x00\\x00\\x01g\\x8B\\x81#\\x80\\x00\\x00\\x00?J\\xDC\\xA4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02p\\xE4 ~\\x80\\x00\\x00\\x00\\x8F\\x05\\xDA\\x96\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0E+\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01o(\\x97\\xCA\\x80\\x00\\x00\\x00V\\xDF\\xC8\\x81\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0E3\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01Ud\\x1D\\xF2\\x80\\x00\\x00\\x00V\\xE0\\x95\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01I#\"6\\x80\\x00\\x00\\x00>\\x1E\\xDF\\x87\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0EC\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01$:\\x1BG\\x80\\x00\\x00\\x00V\\xDE\\xFD\\xBB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0EJ\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01KpC\\x07\\x80\\x00\\x00\\x006\\xCE}5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0EP\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xC0\\xDC<\\x02\\x80\\x00\\x00\\x00X\\xAB\\xC6\\x1A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0EV\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xC3B\\x82\\xA5\\x80\\x00\\x00\\x00\\x90\\x1B\\x8F-\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0E]\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC0\\xDCBU\\x80\\x00\\x00\\x00\\x93K\\x86\\xA3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00IN\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01$\\xAE\\xD2\\x0A\\x80\\x00\\x00\\x00?J\\xDD\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x5C\\xBB\\x80\\x00\\x0D\\xFA\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02z*\\xDE2\\x80\\x00\\x00\\x00\\x92\\xFF\\xEEp\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Dj\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x02.\\xD21\\x80\\x00\\x00\\x00kF\\x15b\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CIZ\\xF4\\x80\\x00\\x00\\x00o\\x1D<&\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x07\\x15\\x08OTHER\\x00\\x80\\x00\\x00\\x028\\xCDo\\xC5\\x80\\x00\\x00\\x00}\\xA3\\x88\\x7F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x0Cb\\xDFn\\x80\\x00\\x00\\x00~J\\xA7n\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x87\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01y\\x18K\\x92\\x80\\x00\\x00\\x00B\\xEE\\xF2?\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x8F\"Z\\x80\\x00\\x00\\x00<\\x06\\xDB!\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x90B\\x12\\x80\\x00\\x00\\x00<\\x07(@\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD6\\x95\\xB6\\x80\\x00\\x00\\x00fo\\x84\\xB6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB4\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x903r\\x80\\x00\\x00\\x00i\\x0Ep\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01` \\xF0i\\x80\\x00\\x00\\x00;\\xF2=\\xBF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x9DRS\\x80\\x00\\x00\\x00i\\xC2c\\x8A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\x1DM\\xD8\\x80\\x00\\x00\\x00\\x8A\\x1Ak\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD7\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\"\\x12\\x9A\\x80\\x00\\x00\\x00\\x8A\\x1B\\xD0P\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDDQ\\xAD\\x80\\x00\\x00\\x00\\x89\\xDF\\x8D!\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y ,\\xDC\\x80\\x00\\x00\\x00\\x8A\\x1A\\x81\\xE4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x87\\x01\\x14\\x80\\x00\\x00\\x00fp<\\xDD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xD8\\xD6j\\x80\\x00\\x00\\x00\\x89\\xF6#\\x19\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x09\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE4\\x04\\xE6\\x80\\x00\\x00\\x00\\x89\\xFA\\xC8\\xDB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE3\\x06\\xBC\\x80\\x00\\x00\\x00\\x8A\\x09p<\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\xED8\\xD8\\x80\\x00\\x00\\x00h\\xDE\\xD82\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x07\\x15\\x04DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC5\\x91\\xBF\\x80\\x00\\x00\\x00E]\\x98\\x96\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E-\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDAb\\x0A\\x80\\x00\\x00\\x00\\x89\\xDD\\xE2\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0C.z\\xDF\\x80\\x00\\x00\\x00o\\x1B\\xC6#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0ED\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YL\\xB0\\xE1\\x80\\x00\\x00\\x00\\x8A\\xB9X\\xF7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`|\\x1C\\xD0\\x80\\x00\\x00\\x00<\\x03J_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0EU\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0C?\\xA2#\\x80\\x00\\x00\\x00oKT#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01_\\xDA\\xD6\\xF1\\x80\\x00\\x00\\x00;\\xF2\\x08f\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5C\\xB1-\\x83\\x80\\x00\\x00\\x00\\x8Ap74\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00w\\x9C\\x80\\x00\\x0E=\\x80\\x05\\xD8\\xD3DESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xC1\\x80\\x00\\x00\\x00\\x9EHL\\x10\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00w\\xD8\\x80\\x00\\x0E`\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xBCZiE\\x80\\x00\\x00\\x00u\\xD7\\xC8\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1BtA\\xFE\\x80\\x00\\x00\\x00s\\x83\\xB5\\xC8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!'\\xA4\\x13\\x80\\x00\\x00\\x00u\\x9E\\xD7l\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x14}~v\\x80\\x00\\x00\\x00q\\x10\\xE6\\xE9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x13\\x0AU\\xF5\\x80\\x00\\x00\\x00p\\x840\\x85\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E&\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x17%\\x96\\x86\\x80\\x00\\x00\\x00q\\xD1v\\xC5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E7\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14\\x86\\xC2\"\\x80\\x00\\x00\\x00q\\x12<\\x81\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5CY;{\\x80\\x00\\x00\\x00\\x8A-\\x0A/\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x14'l\\xA2\\x80\\x00\\x00\\x00(\\xF9\\xF1{\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1D\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00:\\xFCmU\\x80\\x00\\x00\\x00\\x02\\x93l\\x11\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02,c\\xDE+\\x80\\x00\\x00\\x00\\x9B\\xC2e8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&]\\x9E\\x94\\x80\\x00\\x00\\x00w \\x90\\xA1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xA8\\x91\\xD7\\x80\\x00\\x00\\x00w\\x97\\x8D>\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x80/\\xCD\\x80\\x00\\x00\\x00w)4\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&]\\xA5\\xFC\\x80\\x00\\x00\\x00w \\xB2\\x0D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x18\\xCF\\x81\\x80\\x00\\x00\\x00w\\x18\\xBE\\xD7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xDB>B\\x80\\x00\\x00\\x00\\x14\\xE5\\xA4\\x91\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x04\\xDF\\x9B\\x80\\x00\\x00\\x00\\x99k\\xF2\\xF6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC3\\x95*\\x80\\x00\\x00\\x00x\\x95\\xD5\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+!#!\\x80\\x00\\x00\\x00x\\x95\\xE5\\x06\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E>\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02(\\xA1\\x873\\x80\\x00\\x00\\x00yf\\x12D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02}r[`\\x80\\x00\\x00\\x00\\x92\\xBA\\xF0\\xB7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xF2DESKTOP\\x00\\x80\\x00\\x00\\x012\\xCD{\\xD9\\x80\\x00\\x00\\x000[\\xA1u\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e]\\x08x\\x80\\x00\\x00\\x00=\\x92\\xF2-\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x02DTk\\x80\\x00\\x00\\x00kK\\x88r\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cc\\x9Dd\\x80\\x00\\x00\\x00~J\\xAC\\xFC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CWE\\xAC\\x80\\x00\\x00\\x00o\\x10\\xB7\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y\\xA6\\xE9n\\x80\\x00\\x00\\x00X\\xCC\\xAF*\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x9DU\\x19\\x80\\x00\\x00\\x00i\\xC2^S\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x07\\x15\\x05OTHER\\x00\\x80\\x00\\x00\\x01\\x84&\\x91\\x1F\\x80\\x00\\x00\\x00O\\xF6\\xC7\\x89\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82f\\x02O\\x80\\x00\\x00\\x00E?\\xAF\\xAB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\x96)[\\x80\\x00\\x00\\x00EE\\xF5\\x96\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5CY,O\\x80\\x00\\x00\\x00\\x8A-\\x09\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x90m\\xAB\\xC6\\x80\\x00\\x00\\x00\\x0E\\x9B\\xE9X\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1D\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00:\\xFF\\xBA\\xA2\\x80\\x00\\x00\\x00\\x02\\x93F\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02,g\\x89+\\x80\\x00\\x00\\x00\\x9B\\xC2n'\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&`\\xABX\\x80\\x00\\x00\\x00w!\\x98m\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02,f\\xF9\\x7F\\x80\\x00\\x00\\x00yL\\x83_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+\\xE2\\xC9\\xE6\\x80\\x00\\x00\\x00yL5\\x18\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x08\\x01\\x96\\x80\\x00\\x00\\x00w\\x17W\\x0D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&Q\\x143\\x80\\x00\\x00\\x00w\\x1E\\x0F\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00r\\xD6\\x15\\xCB\\x80\\x00\\x00\\x00\\x0C\\xDB\\x92\\xFE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xBE\\x83 9\\x80\\x00\\x00\\x00\\x0C\\xDB\\x93\\x80\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC4\\xD3\\xA7\\x80\\x00\\x00\\x00x\\x95\\xD6+\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x03t\\xC5OTHER\\x00\\x80\\x00\\x00\\x01p+I\\xE4\\x80\\x00\\x00\\x00h\\xDE\\xDA\\xF6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`{\\x1D\\xF9\\x80\\x00\\x00\\x00<\\x03\\x17}\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x82\\xC7\\x85\\x80\\x00\\x00\\x00i\\xA97#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CbU\\xD0\\x80\\x00\\x00\\x00~J\\xA3\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x0CXM\\x87\\x80\\x00\\x00\\x00o\\x11TY\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01p7\\xDC\\x83\\x80\\x00\\x00\\x00@\\x5C`\\xE4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`{%\\x14\\x80\\x00\\x00\\x00<\\x03\\x1F\\x87\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xC0\\xDE\\x1E\\x80\\x00\\x00\\x00EY\\xB2\\xF4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC6\\x07\\xE8\\x80\\x00\\x00\\x00O\\xF6\\xE8\\xA6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC8\\x9F\\x80\\x00\\x00\\x00<\\x0C\\x08\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFF\\xB6/\\x80\\x00\\x00\\x00h\\xDF\"\\xEB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC9O\\x80\\x00\\x00\\x00<\\x0C\\x13\\xCC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02]\\xBFT*\\x80\\x00\\x00\\x00\\x8A\\xFAA\\x8C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+h\\xBD\\xBF\\x80\\x00\\x00\\x00y\"N\\x97\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p4:\\xDA\\x80\\x00\\x00\\x00u\\xAE\\x95q\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9F*\\xA8\\x80\\x00\\x00\\x00i\\xC9F\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e]\\x0D\\xAF\\x80\\x00\\x00\\x00=\\x94\\xC4)\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xE02\\xE6\\x80\\x00\\x00\\x00\\x8A\\x0F6(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFF\\xD19\\x80\\x00\\x00\\x00h\\xDE\\x9Df\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDB\\xB2\\xB5\\x80\\x00\\x00\\x00\\x89\\xE36\\xDB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDAW\\xC3\\x80\\x00\\x00\\x00\\x89\\xE13\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02`u&J\\x80\\x00\\x00\\x00\\x8B\\xB7)\\x8E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02y\\x8B\\xDE\\x0D\\x80\\x00\\x00\\x00o\\x11@\\x1C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\x1E\\x9C1\\x80\\x00\\x00\\x00\\x8A\\x1B\\xF6?\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01e]\\xF8q\\x80\\x00\\x00\\x00i\\x0E\\xBE\\xD0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02y\\xC7\\xE8\\xB1\\x80\\x00\\x00\\x00o\\x12\\x1B\\xA3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01h\\x13I\\x9E\\x80\\x00\\x00\\x00i\\x0D\\xD6\\xD6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x18\\x80\\x08]LDESKTOP\\x00\\x80\\x00\\x00\\x02m\\x07\\x90\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00w\\x9C\\x80\\x00\\x0E:\\x80\\x08]LDESKTOP\\x00\\x80\\x00\\x00\\x02\\x86/\\xEE\\x1C\\x80\\x00\\x00\\x00\\x98\\x9B\\x5C\\x88\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02E\\xEC@\\x05\\x80\\x00\\x00\\x00Y\\x12\\xC4\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!\\x18T\\xAC\\x80\\x00\\x00\\x00u\\x9C\\xDA\\xB6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x14`\\x9C6\\x80\\x00\\x00\\x00q\\x09Hp\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x05\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x1B\\x14o\"\\x80\\x00\\x00\\x00sZ\\xFFN\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15\\x82t{\\x80\\x00\\x00\\x00qRS\\x1B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x16\\xBC\\x8C|\\x80\\x00\\x00\\x00q\\xAA\\x14\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x17\\x02O\\xE9\\x80\\x00\\x00\\x00q\\xC9E\\x10\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01y41[\\x80\\x00\\x00\\x00B\\xE7\\xEDE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x01y6&\\x83\\x80\\x00\\x00\\x00B\\xE8\\x96\\xCB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`|\\x15<\\x80\\x00\\x00\\x00<\\x03D\\x13\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\x8C\\xE1\\x80\\x00\\x00\\x00o\\x1C(\\xA7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x82\\x8E'\\x99\\x80\\x00\\x00\\x00b\\xF3\\xABM\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01a\\x01\\x1E\\xB7\\x80\\x00\\x00\\x00h\\xDE\\x93\\xC5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x07\\x15\\x08OTHER\\x00\\x80\\x00\\x00\\x01\\x84!\\xA5\\x8F\\x80\\x00\\x00\\x00E\\xD7\\xA6)\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x0C+!\\x0E\\x80\\x00\\x00\\x00oI\\xEC\\xEC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x026l\\xA0\\x80\\x00\\x00\\x00EJ\\x18\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Jg$=\\x80\\x00\\x00\\x00;\\xF2d.\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE2\\xA7\\xE7\\x80\\x00\\x00\\x00\\x8A\\x07\\x07\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01e\\x5CF^\\x80\\x00\\x00\\x00=\\x92\\x87:\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD7\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD4Rm\\x80\\x00\\x00\\x00o\\x1C8\\x09\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xD9\\xF2\\xCF\\x80\\x00\\x00\\x00\\x89\\xEE\"\\xAB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02YH1\\xB0\\x80\\x00\\x00\\x00\\x8A\\xB8\\xE3\\x8E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02y\\x8B\\xDE\\xF0\\x80\\x00\\x00\\x00\\x903\\xAC\\xEC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YN/5\\x80\\x00\\x00\\x00\\x8A\\x1E)\\x1F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x09\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE1\\xCBo\\x80\\x00\\x00\\x00\\x89\\xEE&M\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0J\\x0B\\x80\\x00\\x00\\x00\\x8A\\x0An\\xA8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02Y\\x1D\\xFD\\xD0\\x80\\x00\\x00\\x00\\x8A\\x1B\\xF4\\x9B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02s\\x16\\xF1\\xF6\\x80\\x00\\x00\\x00\\x95R\\x03\\xD8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E-\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y,\\xF0\\xE1\\x80\\x00\\x00\\x00\\x8A\\x1D,\\x83\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01e\\x5C8\\xE1\\x80\\x00\\x00\\x00i\\x0E\\x9D\\xE4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0EC\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xDD\\x08\\x8B\\x80\\x00\\x00\\x00\\x89\\xEA\\x05k\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02a\\xC9\\x1D\\x10\\x80\\x00\\x00\\x00\\x96\\x06\\x18\\xCD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0EU\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01h\\x13T\\x04\\x80\\x00\\x00\\x00i\\x0D\\xE8\\xB6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0x\\xA4\\x80\\x00\\x00\\x00\\x89\\xDFT\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x1B\\x80\\x05\\xD8\\xD0DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC9M\\xF4b\\x80\\x00\\x00\\x00\\x98\\x9AJy\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00w\\x9C\\x80\\x00\\x0E=\\x80\\x05\\xD8\\xC9DESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xC5\\x80\\x00\\x00\\x00m\\x12\\x9DA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00w\\xD8\\x80\\x00\\x0E`\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xBCX8\"\\x80\\x00\\x00\\x00Wy\\x11\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B\\x1A\\xD6\\x99\\x80\\x00\\x00\\x00s]\\x01U\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1Bg`\\xFF\\x80\\x00\\x00\\x00s\\x82B>\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x14\\x8B\\xA5\\x90\\x80\\x00\\x00\\x00q\\x13\\xF5c\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x15\\x81\\xB7m\\x80\\x00\\x00\\x00qR\\x02\\xF1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E&\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x16}In\\x80\\x00\\x00\\x00q\\x9F\\xCF\\xDF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1Bq\\xC2\\xCB\\x80\\x00\\x00\\x00s\\x83s\\x1E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01$\\xD6\\x5Cm\\x80\\x00\\x00\\x00,\\xBE\\x1A\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x9D\\xE5\\x15\\xA1\\x80\\x00\\x00\\x00a\\xE9D\\xD6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1D\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00:\\xF9^%\\x80\\x00\\x00\\x00\\x02\\x93n\\xBC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x83\\xBD\\x95\\x80\\x00\\x00\\x00\\x99ki_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02%\\xF8\\xF5\\xDB\\x80\\x00\\x00\\x00w\\x16Fr\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'x\\x1D\\xA9\\x80\\x00\\x00\\x00x\\x00Fb\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&Z1\\xCB\\x80\\x00\\x00\\x00w\\x1F\\xB4\\xF2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02+\\xE3b*\\x80\\x00\\x00\\x00yL8p\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&b\\xA9H\\x80\\x00\\x00\\x00w!\\xA20\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&\\x00.\\xC9\\x80\\x00\\x00\\x00w\\x16\\x8F\\xB8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02*\\xCEzM\\x80\\x00\\x00\\x00\\x99l\\x16e\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x5C\\x0B\\xAA\\x80\\x00\\x00\\x00w \\x08\\xA8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x18\\xDD\\xFC\\x80\\x00\\x00\\x00w\\x18\\xBE\\xE9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e\\x5C-Y\\x80\\x00\\x00\\x00=\\x94\\xD7\\x8A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01zjc\\x9A\\x80\\x00\\x00\\x00i\\x0Eg$\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\x87\\x01\\x80\\x00\\x00\\x00oI\\xA1D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC6~\\x06\\x80\\x00\\x00\\x00O\\xF6\\xA9\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y1\\xD2\\xE8\\x80\\x00\\x00\\x00X\\xC6]1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x803}\\x80\\x00\\x00\\x00<\\x04*\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\x88\\xCF\\x83\\x80\\x00\\x00\\x00EA\\x0D\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Jg$o\\x80\\x00\\x00\\x00;\\xF2VF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFE\\xA4\\x16\\x80\\x00\\x00\\x00h\\xDF-\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cd\\x099\\x80\\x00\\x00\\x00~J\\xAF\\x7F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9DM\\xDB\\x80\\x00\\x00\\x00i\\xCAK\\x0F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01z\\x5C\\x1C\\x1D\\x80\\x00\\x00\\x00\\x7FX\\x85\\x07\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xD4E\\xF4\\x80\\x00\\x00\\x00\\x89\\xE1\\xA5\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x85\\x05\\x94\\x80\\x00\\x00\\x00i\\xA6\\x82C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xAB\\xC0\\x11\\x80\\x00\\x00\\x00<\\x0B\\xBE\\xAE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xDE\\x03\\xDD\\x80\\x00\\x00\\x00\\x89\\xF71o\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xDB\\xB4\\x19\\x80\\x00\\x00\\x00\\x89\\xE1_{\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x9Dc\\xDA\\x80\\x00\\x00\\x00i\\xC9\\xEB\\x0A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x12\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x02y\\x8B\\xD9<\\x80\\x00\\x00\\x00oIQ\\x11\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YH;\\x0B\\x80\\x00\\x00\\x00\\x8A\\xB9\\x0B\\x01\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD6\\xA4;\\x80\\x00\\x00\\x00fvo\\xC3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01a$\\xD3\\xB9\\x80\\x00\\x00\\x00<&\\x98\\xCC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02X\\xB8J\\x18\\x80\\x00\\x00\\x00\\x8A\\x1B.2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02YH\\xBB\\x10\\x80\\x00\\x00\\x00\\x8A\\x1CK5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFE\\xAC\\xCF\\x80\\x00\\x00\\x00h\\xDF&V\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xD6!^\\x80\\x00\\x00\\x00\\x8A\\x0E\\x08\\xD6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xD7;\\x09\\x80\\x00\\x00\\x00\\x89\\xDC\\x868\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x18\\x80\\x05\\xD8\\xCBTABLET\\x00\\x80\\x00\\x00\\x02m\\x05\\x10\\xDC\\x80\\x00\\x00\\x00\\x98\\x9AJ\\x97\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00w\\x9C\\x80\\x00\\x0E:\\x80\\x05\\xD8\\xD0MOBILE\\x00\\x80\\x00\\x00\\x01\\xC0\\x93\\xD4\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xBCcx\\x86\\x80\\x00\\x00\\x00Wz\\xADo\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15\\x83\\xA7\\x97\\x80\\x00\\x00\\x00qR\\x7Fd\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B_\\xB4S\\x80\\x00\\x00\\x00s\\x81%\\x04\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x05\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x15\\x81\\xCD\\xAD\\x80\\x00\\x00\\x00qR\\x03\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14hQ\\xF8\\x80\\x00\\x00\\x00q\\x0CN\\x89\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1BYk\"\\x80\\x00\\x00\\x00s|\\xE9U\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14\\x88\\xEE\\xC5\\x80\\x00\\x00\\x00q\\x12\\xDD\\xDF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x00$\\xB5u\\x0D\\x80\\x00\\x00\\x00\\x02\\x97\\xAA\\xC5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00x{\\xB2\\xDB\\x80\\x00\\x00\\x00\\x0C\\xE3\\xB8\\x92\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00xn\\xAFo\\x80\\x00\\x00\\x00\\x0C\\xE3\\x87\\xD7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&~\\x14b\\x80\\x00\\x00\\x00\\x99kw\\xD1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&+i\\xF9\\x80\\x00\\x00\\x00w\\x19\\xD6:\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xA3\\xF1\\xBA\\x80\\x00\\x00\\x00w\\x96S\\x1D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&h\"z\\x80\\x00\\x00\\x00w\"\\xDF\\xCD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02,s.}\\x80\\x00\\x00\\x00yL\\xBBn\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xDE6\\xB2\\x80\\x00\\x00\\x00\\x0C\\xDB\\x99\\x19\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&`\\xBF7\\x80\\x00\\x00\\x00w!}\\xC9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00r\\xD5\\xB2\\xB7\\x80\\x00\\x00\\x00\\x0C\\xDB\\x8E\\x9B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC0{G\\x80\\x00\\x00\\x00\\x99l\\x11\\x9F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01p8kT\\x80\\x00\\x00\\x00sQ\\x05F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x07\\x15\\x05OTHER\\x00\\x80\\x00\\x00\\x01\\x82\\xEEN\\x97\\x80\\x00\\x00\\x00E{\\x11\\x08\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`1\\x15\\xB3\\x80\\x00\\x00\\x00;\\xF4\\x82\\xCB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xE4m\\x80\\x00\\x00\\x00~J\\x9C~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x82\\x8B9\\xA3\\x80\\x00\\x00\\x00b\\xF6R\\x9E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01o\\xBCKV\\x80\\x00\\x00\\x00fo8$\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`y\\xE2q\\x80\\x00\\x00\\x00<\\x02\\xDA\\xAC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+h\\xB9\\xA7\\x80\\x00\\x00\\x00y\"N\\x12\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x02 \\x0E\\xEB\\x80\\x00\\x00\\x00kF\\x12\\xCA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBC\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC9qi\\x80\\x00\\x00\\x00E`\\xAE_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01zd\\xF0\\x1C\\x80\\x00\\x00\\x00B\\xED\\xFBn\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YKjG\\x80\\x00\\x00\\x00\\x8A\\xA07w\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02]\\xC5{\\xD4\\x80\\x00\\x00\\x00\\x8A\\xFAMq\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xDF\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02`wo\\xC8\\x80\\x00\\x00\\x00\\x8B\\xB7\\xA6\\xCF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x85\\x13\\xC1\\x80\\x00\\x00\\x00i\\xA97\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF3\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x9F\\x1F<\\x80\\x00\\x00\\x00i\\xC9u\\x17\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFD\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01zd]\\x0B\\x80\\x00\\x00\\x00i\\xA97\\x89\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x07\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xE2\\x97\\x08\\x80\\x00\\x00\\x00\\x89\\xDAU\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01/\"6\\xC4\\x80\\x00\\x00\\x00+\\xB0(\\xE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xF3\\x100+\\x80\\x00\\x00\\x00\\x02\\x92\\xF5[\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00xs\\xFFB\\x80\\x00\\x00\\x00\\x0C\\xE3\\x97\\xE8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&i\\x0C\\x00\\x80\\x00\\x00\\x00\\x9B\\xBFNN\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x1B=L\\x80\\x00\\x00\\x00w\\x18\\xE4\\xE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\x8A\\xF7\\xC9\\x80\\x00\\x00\\x00w\\x92\\xA5R\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&x+\\x85\\x80\\x00\\x00\\x00w'\\x1C\\xAE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&sI\\x84\\x80\\x00\\x00\\x00w%\\xA0U\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00r\\xD5\\xEE\\xBE\\x80\\x00\\x00\\x00\\x0C\\xDB\\x92R\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\xB9\\xF1\\xB3K\\x80\\x00\\x00\\x00\\x0C\\xDB\\x959\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&6\\x9C\\xA4\\x80\\x00\\x00\\x00\\x99k\\x8C\\xF9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xCE\\x90J\\x80\\x00\\x00\\x00\\x99l\\x16w\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC6\\xCAN\\x80\\x00\\x00\\x00x\\x95\\xD7\\x13\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e]~\\xD6\\x80\\x00\\x00\\x00=\\x94\\xAC\\xD2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x01a\\x03\\xB3\\x1E\\x80\\x00\\x00\\x00h\\xDF2~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x028\\xC9\\x88\\xE5\\x80\\x00\\x00\\x00}\\xA1\\xD1\\x9A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xE6\\x0D\\x80\\x00\\x00\\x00~J\\x86\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x82\\x87\\x7F@\\x80\\x00\\x00\\x00b\\xF6T\\xBA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x90D\\x80\\x80\\x00\\x00\\x00<\\x07)H\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x02C\\xE6\\x8E\\x80\\x00\\x00\\x00kKk\\xA1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x8B\\xF7\\x93\\x80\\x00\\x00\\x00<\\x06\"\\xAD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01e\\x11dD\\x80\\x00\\x00\\x00=l\\x98y\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD4R'\\x80\\x00\\x00\\x00o\\x1A\\x0E:\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p5I-\\x80\\x00\\x00\\x00@\\x5C`k\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02W\\xE0l\\x84\\x80\\x00\\x00\\x00\\x8A\\xB36E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\xA1Q\\xDB\\x80\\x00\\x00\\x00i\\xCA\\xEF\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x07\\x15\\x05OTHER\\x00\\x80\\x00\\x00\\x02YJ\\x93B\\x80\\x00\\x00\\x00\\x89*\\xE4\\xE8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD5\\xB7\\xB6\\x80\\x00\\x00\\x00fol\\x13\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01a\\x03\\xB2\\xD2\\x80\\x00\\x00\\x00h\\xDE\\xBE5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xDD\\x8Dv\\x80\\x00\\x00\\x00\\x89\\xDFBB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x08~\\xD0DESKTOP\\x00\\x80\\x00\\x00\\x02&\\xBD\\x82\\x85\\x80\\x00\\x00\\x00we\\xBF\\x1A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01h\\x14\\x06.\\x80\\x00\\x00\\x00i\\x0D\\xE9=\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02`v\\xD9g\\x80\\x00\\x00\\x00\\x8B\\xB8Pg\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE1\\xDD\\x84\\x80\\x00\\x00\\x00\\x8A\\x070\\xA3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02b\\xC7N\\xF7\\x80\\x00\\x00\\x00\\x96\\x06\\x1BW\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02y\\x8B\\xD2!\\x80\\x00\\x00\\x00oJ\\x14\\xF1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x04\\x7F1DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDE/\\xEA\\x80\\x00\\x00\\x00\\x89\\xF0F0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p,2b\\x80\\x00\\x00\\x00h\\xDE\\xEA\\xFA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01a\\x01%\\xB7\\x80\\x00\\x00\\x00hm-=\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x81\\x1E\\xE1\\x80\\x00\\x00\\x00fo \\xB8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x19\\x80\\x05\\xD8\\xD0DESKTOP\\x00\\x80\\x00\\x00\\x02ArO\\xB3\\x80\\x00\\x00\\x00\\x80\\x0C\\x8F\\xDD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00w\\x9C\\x80\\x00\\x0E;\\x80\\x05\\xD8\\xCCDESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xA0\\x80\\x00\\x00\\x00\\x9EHL\\x14\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xBC`=T\\x80\\x00\\x00\\x00WzW\\xB1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x17c{$\\x80\\x00\\x00\\x00q\\xE2\\x0C\\x82\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14e\\xF8z\\x80\\x00\\x00\\x00q\\x0B;I\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15oZ\\x10\\x80\\x00\\x00\\x00qM\\xDA\\xCD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B$1\\x08\\x80\\x00\\x00\\x00s`\\xFB\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x16\\xF5\\xCB\\xE5\\x80\\x00\\x00\\x00q\\xC6\\x94\\x1A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!e\\xBF\\xBC\\x80\\x00\\x00\\x00u\\xAC\\x9E(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5CX\\xE7\\xAA\\x80\\x00\\x00\\x00\\x8A,\\xA1w\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xF3\\x0D\\xB7V\\x80\\x00\\x00\\x00\\x02\\x92\\xF5a\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x02\\xC0\\x0CMOBILE\\x00\\x80\\x00\\x00\\x00xn\\xF7\\xCE\\x80\\x00\\x00\\x00\\x0C\\xE3\\x89\\x03\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02,cs\\xD9\\x80\\x00\\x00\\x00\\x9B\\xC2_\\xFF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&iqb\\x80\\x00\\x00\\x00w#\\x17f\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02+\\xE3\\xB6\\xC5\\x80\\x00\\x00\\x00yL9\\xCF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+\\xD2\\xD1\\x93\\x80\\x00\\x00\\x00yL }\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&z*\\xE8\\x80\\x00\\x00\\x00w'\\x9EV\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&`\\xC0G\\x80\\x00\\x00\\x00w!}\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xD9\\x86\\x03\\x80\\x00\\x00\\x00\\x0C\\xDB\\x93\\xE5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x02\\xE2\\x06\\x80\\x00\\x00\\x00\\x99k\\xF2\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&h\\x0B~\\x80\\x00\\x00\\x00w\"\\xDE\\x07\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&g*\\xD2\\x80\\x00\\x00\\x00w\"\\xA9\\xFD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E>\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02(\\x97\\x5C$\\x80\\x00\\x00\\x00y\\x0B\\xDD0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02%\\xDA\\x9DM\\x80\\x00\\x00\\x00,\\x8F\\xAE\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xF3TABLET\\x00\\x80\\x00\\x00\\x00\\xF9v\\x14\\x07\\x80\\x00\\x00\\x00#g+\\xB9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00$\\x0F\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xE1U\\xA3Y\\x80\\x00\\x00\\x00\\x80\\x04\\xCC\\xE6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00$[\\x80\\x00\\x0D\\xE4\\x80\\x04\\xF7\\x13DESKTOP\\x00\\x80\\x00\\x00\\x01\\x8A\\xC2o\\x08\\x80\\x00\\x00\\x00N\\xFE\\xBA\\x9F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00$[\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x8A\\xC2\\xED\\x08\\x80\\x00\\x00\\x00\\x98c\\x90\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0D\\xDE\\x80\\x01i\\xF6DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC2g\\x19l\\x80\\x00\\x00\\x00\\x86\\x06\\xB4\\xC6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x07#[j\\x80\\x00\\x00\\x00Y\\x08u\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xD3U\\x88\\xFF\\x80\\x00\\x00\\x00\\x84\\xBFJ\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x00\\xCF3%\\x7F\\x80\\x00\\x00\\x00\\x98\\xFE\\xAA\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E+\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xCB\\xA6K\\x9C\\x80\\x00\\x00\\x00`\\xBAL\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E2\\x80\\x01i\\xF6MOBILE\\x00\\x80\\x00\\x00\\x00\\xD4\\x7Fu*\\x80\\x00\\x00\\x00a\\x0Er1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E?\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x8B\\x85\\x18E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x03\\x8B\\x0D\\xEC"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0EL\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x00\\xE1\\x7F\\xFD\\x03\\x80\\x00\\x00\\x00K\\x89\\xC74\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E]\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xD6\\xC5\\x06\\xA9\\x80\\x00\\x00\\x00\\x9F\\xE2\\x84\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0D\\xE0\\x80\\x01i\\xECTABLET\\x00\\x80\\x00\\x00\\x01\\xC3\\x1D\\xE7~\\x80\\x00\\x00\\x00\\x87\\xD8\\x06\\xFD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E\\x09\\x80\\x01i\\xECDESKTOP\\x00\\x80\\x00\\x00\\x00\\x94\\xE2i2\\x80\\x00\\x00\\x00]:b\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x028\\xC62Z\\x80\\x00\\x00\\x00\\x98\\xD1\\x5C\\x06\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xB5 r.\\x80\\x00\\x00\\x00\\x98\\xD0\\xCE\\x98\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E9\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x17\\xAE\\x05\\xA0\\x80\\x00\\x00\\x00\\x9E[n~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0EI\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x007~\\x0A\\x80\\x00\\x00\\x00\\x9F\\xA8\\xE5\\xF0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0EU\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\x94\\xD9k\\x13\\x80\\x00\\x00\\x00\\x9F\\xA8\\xF7N\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E`\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02\\x00,\\xDC\\xC9\\x80\\x00\\x00\\x00\\x9EE\\xE3_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00=\\x96\\x80\\x00\\x0E!\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xF3\\xA9T\\x13\\x80\\x00\\x00\\x00\\x91\\x0F\\xD7\\xAF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00IN\\x80\\x00\\x0Du\\x80\\x02p DESKTOP\\x00\\x80\\x00\\x00\\x01yF\\xB19\\x80\\x00\\x00\\x00,\\xE6\\x8CZ\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00IN\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01$:\\x15\\x05\\x80\\x00\\x00\\x00\\x8B\\xED\\xB2\\xFD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00IN\\x80\\x00\\x0E+\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01\\xD77n\\xFC\\x80\\x00\\x00\\x00\\x8B\\xEBP\\xEF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00IN\\x80\\x00\\x0E3\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01\\xD77t.\\x80\\x00\\x00\\x00Y\\x0EJp\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00IN\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02;F\\xC9\\xE8\\x80\\x00\\x00\\x00\\x89\\xEA\\xF1\\xC6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00IN\\x80\\x00\\x0EC\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01f4\\x03F\\x80\\x00\\x00\\x00\\x89\\xEA\\xD7\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00IN\\x80\\x00\\x0EJ\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC3B\\x98\\xBC\\x80\\x00\\x00\\x00X\\xB04[\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00IN\\x80\\x00\\x0EP\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02CX\\xAA\\x80\\x80\\x00\\x00\\x00\\x90=3j\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x07\\x15\\x04TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xC6\\x092\\x80\\x00\\x00\\x00E^\\x134\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x80\\xBA\\xB9\\x80\\x00\\x00\\x00fp\\x17\\xE9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x8D\\x1C?\\x80\\x00\\x00\\x00i\\x0Ea3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC9\\x0F\\x80\\x00\\x00\\x00<\\x0C\\x1E\\x1D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x99J_\\x80\\x00\\x00\\x00i\\xCAu\\xD7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y41\\xB9\\x80\\x00\\x00\\x00X\\xCC~\\xE8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x8B\\xEB\\x08\\x80\\x00\\x00\\x00i\\x0Er\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`1&\\x81\\x80\\x00\\x00\\x00;\\xF4\\x8Ea\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xFB\\x98\\x80\\x00\\x00\\x00o\\x1Dg\\xF2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`{\\x1C\\x8C\\x80\\x00\\x00\\x00<\\x03\\x14d\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC5\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02J\\xD5\\xAE\\xDA\\x80\\x00\\x00\\x00o\\x13\\xCE\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCD\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x0CXl\\x8D\\x80\\x00\\x00\\x00o\\x14\\xBBx\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDD\\xC3_\\x80\\x00\\x00\\x00\\x8A\\x07\\x061\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02`v\\xEB\\x9B\\x80\\x00\\x00\\x00\\x8B\\xB8R\\xF7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEA\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02y\\x19\\x00\\xF9\\x80\\x00\\x00\\x00oIJ\\xF9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02K.\\xEB\\x8E\\x80\\x00\\x00\\x00h\\xDF$\\x03\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0\\xCC\\xF7\\x80\\x00\\x00\\x00\\x89\\xDF\\xA0N\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01e\\x5C-g\\x80\\x00\\x00\\x00\\x8C\\xC5\\x10Z\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x12\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xE0\\xDC\\xEB\\x80\\x00\\x00\\x00\\x89\\xE1ch\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01h\\x14\\x1A\\xE2\\x80\\x00\\x00\\x00i\\x0D\\xD3\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02s\\x14m\\xAD\\x80\\x00\\x00\\x00\\x95Q\\xEE\\x9E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x88\\xF7_\\x80\\x00\\x00\\x00i\\xA6\\x82#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0D\\x94\\x80\\x00\\x00\\x00\\x89\\xE9\\xB9*\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0EG\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE1\\xF6+\\x80\\x00\\x00\\x00\\x89\\xEA\\x96X\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YM\\x9C\\x0A\\x80\\x00\\x00\\x00\\x8A\\xB9k\\x8B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0EY\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02YFK4\\x80\\x00\\x00\\x00\\x8A\\x1Ed\\xFC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00w\\x9C\\x80\\x00\\x0D\\xE0\\x80\\x05\\xD8\\xCATABLET\\x00\\x80\\x00\\x00\\x01\\xB8\\xE93y\\x80\\x00\\x00\\x00V\\xB7)\\x1A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00w\\x9C\\x80\\x00\\x0E&\\x80\\x05\\xD8\\xD2DESKTOP\\x00\\x80\\x00\\x00\\x02Ar3\\x10\\x80\\x00\\x00\\x00\\x9B\\xF3\\xE5\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x08\\x80\\x00w\\x9C\\x80\\x00\\x0EN\\x80\\x05\\xD3\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02(\\xA6\\xFFl\\x80\\x00\\x00\\x00x\\xEC\\xED\\xB7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x05\\xBDY\\xC4\\x80\\x00\\x00\\x00%\\xC7\\x96d\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xFATABLET\\x00\\x80\\x00\\x00\\x02+\\xD0Dg\\x80\\x00\\x00\\x00bn\\xEB\\x03\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00$\\x0F\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01}\\xA1\\x89\\x11\\x80\\x00\\x00\\x00C\\xCC\\xDB\\x1C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00$[\\x80\\x00\\x0D\\xEB\\x80\\x04\\xF7\\x19TABLET\\x00\\x80\\x00\\x00\\x01\\x1E\\xF9\\xEE\\x12\\x80\\x00\\x00\\x00n\\xE5\\xCF\\xFD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00$[\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02t\\xEE\\xA8y\\x80\\x00\\x00\\x00\\x98\\x19X7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00)\\xE4\\x80\\x00\\x0D\\xDF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xD3UgN\\x80\\x00\\x00\\x00\\x8D\\x8A\\xBC\\x90\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\xC3?\\xAC\\x13\\x80\\x00\\x00\\x00\\x8D]\\xE3\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x00\\xCE\\xEE\\x03\\xB0\\x80\\x00\\x00\\x00\\x97\\xE2W\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xB3B\\xB6\\x80\\x00\\x00\\x00ER?\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x7F*\\xFC\\x80\\x00\\x00\\x00<\\x03\\xE9\\x8F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x7F\\xFBo\\x80\\x00\\x00\\x00<\\x03\\xFA\\xFE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cd\\x04\\xA9\\x80\\x00\\x00\\x00~J\\xAE]\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x028\\xCC:w\\x80\\x00\\x00\\x00}\\xA2$\\xB7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y\\x15\\x1E{\\x80\\x00\\x00\\x00PS\\xF9\\xB9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`|\\x0A\\xE8\\x80\\x00\\x00\\x00<\\x03:\\x02\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02J\\xD4FC\\x80\\x00\\x00\\x00o\\x15k\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x02C\\xAD\\x86\\x80\\x00\\x00\\x00k\\x1E(t\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX][\\x80\\x00\\x00\\x00~J\\x83=\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xB0\\x84>\\x80\\x00\\x00\\x00<\\x0C1\\xE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD4D\\x5C\\x80\\x00\\x00\\x00o\\x18n \\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02YJ\\xF25\\x80\\x00\\x00\\x00\\x89*\\xE8|\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xD6\\xDBa\\x80\\x00\\x00\\x00\\x89\\xE9\\xC4\\xE6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p*\\x8E\\xE3\\x80\\x00\\x00\\x00@Y\\x9D\\xCA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x87\\x09x\\x80\\x00\\x00\\x00fo\\x83\\xD5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e\\x5C<\\x14\\x80\\x00\\x00\\x00=\\x92\\xA6@\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02YP\\x13\\xDF\\x80\\x00\\x00\\x00\\x89*\\xD0t\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9F\"\\xE3\\x80\\x00\\x00\\x00i\\xC9\\x85c\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE2\\x16&\\x80\\x00\\x00\\x00\\x89\\xE0\\xE7\\xB9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0\\xBC\\xDC\\x80\\x00\\x00\\x00\\x8A\\x0F6\\xAF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02y\\x8B\\xD7\\x83\\x80\\x00\\x00\\x00o\\x1B\\xCE\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`y\\xFE*\\x80\\x00\\x00\\x00<\\x03\\x048\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x07\\x15\\x08DESKTOP\\x00\\x80\\x00\\x00\\x02Y\"\\x1B%\\x80\\x00\\x00\\x00\\x8A\\x1B\\xD0s\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02@\\xD9\\x9FC\\x80\\x00\\x00\\x00<\\x03\\x9CC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01d\\xE0#\\xD0\\x80\\x00\\x00\\x00h\\xDF\\x19\\x18\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x07\\x15\\x08DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC5\\x8D\\x9E\\x80\\x00\\x00\\x00yM\\xD8\\x1B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x19\\x80\\x05\\xD8\\xD1TABLET\\x00\\x80\\x00\\x00\\x01\\xC9@\\xE7\\x99\\x80\\x00\\x00\\x00\\x98\\x9AJ\\xAA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00w\\x9C\\x80\\x00\\x0E;\\x80\\x05\\xD8\\xCADESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xB2\\x80\\x00\\x00\\x00\\x9EHL\\x14\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xBC[\\xC7\\x1E\\x80\\x00\\x00\\x00u\\xD7\\xC8\\xC6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x15lSk\\x80\\x00\\x00\\x00qM\\xC2\\x95\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x13\\x10\\x14!\\x80\\x00\\x00\\x00p\\x84\\x82\\xBD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15ZF\\xD4\\x80\\x00\\x00\\x00qLA\\xFE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B`\\x9Fw\\x80\\x00\\x00\\x00s\\x81d\\x06\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x1B\\x16m\\xF0\\x80\\x00\\x00\\x00s[\\xB0\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + Bytes.toBytesBinary( + "\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!e\\xD4\\x14\\x80\\x00\\x00\\x00u\\xAC\\xA1\\xF4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), }; - private static final byte[][] GUIDE_POSTS_MINIMAL = { - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x07#[j\\x80\\x00\\x00\\x00Y\\x08u\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xD3U\\x88\\xFF\\x80\\x00\\x00\\x00\\x84\\xBFJ\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - }; + @BeforeClass + @Shadower(classBeingShadowed = BaseConnectionlessQueryTest.class) + public static void doSetup() throws Exception { + Map props = Maps.newHashMapWithExpectedSize(1); + // enables manual splitting on salted tables + props.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); + initDriver(new ReadOnlyProps(props.entrySet().iterator())); + } - - private static final byte[][] REGION_BOUNDARIES_ALL = { - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D^\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x01y3\\xF7P\\x80\\x00\\x00\\x00B\\xE7\\xF6F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB3\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00oI\\x17B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xFA._\\xE2\\x80\\x00\\x00\\x00\\x98\\xFE2\\xF5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Da\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`1%"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xB4]\\xE7\\x80\\x00\\x00\\x00ER\\xFE#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D]\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01p5R\\xD0\\x80\\x00\\x00\\x00@W\\xCC\\x12\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFE\\xC7U\\x80\\x00\\x00\\x00h\\xDF\"\\xBC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC5\\x8E\\xB0\\x80\\x00\\x00\\x00yM\\xD7\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x028\\xCA\\x85\\xFB\\x80\\x00\\x00\\x00}\\xA3*\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D^\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01y\\x17\\x8B<\\x80\\x00\\x00\\x00i'\\xE8\\xC4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB4\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00oK\\x11_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D]\\x80\\x07\\x15\\x12MOBILE\\x00\\x80\\x00\\x00\\x01a\\x02js\\x80\\x00\\x00\\x00@Y\\xC7\\x0C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0E\\x04@\\x8C\\x80\\x00\\x00\\x00o>\\xB1\\x1E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - }; + @Test + public void testRangeIntersect() throws Exception { + testIntersect(REGION_BOUNDARIES_MINIMAL, GUIDE_POSTS_MINIMAL); + } - private static final byte[][] GUIDE_POSTS_ALL = { - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01!y\\xC3\\x80\\x80\\x00\\x00\\x00+\\xB0)u\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x90h\\xE8;\\x80\\x00\\x00\\x00\\x0E\\x9B\\xE7x\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x14'_\\xF5\\x80\\x00\\x00\\x00(\\xF9\\xDD\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+\\xF1\\xD8d\\x80\\x00\\x00\\x00\\x9B\\xC2A\\xD0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&9JM\\x80\\x00\\x00\\x00w\\x1A\\xF5\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xAAKT\\x80\\x00\\x00\\x00w\\x98{@\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&yD\\x10\\x80\\x00\\x00\\x00w'f\\x04\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x08\\x01\\xA1\\x80\\x00\\x00\\x00w\\x17W\\x0D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&%U\\x1B\\x80\\x00\\x00\\x00w\\x19u\\x1C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xDBV\\x5C\\x80\\x00\\x00\\x00\\x14\\xE5\\xA4\\xCF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\xF2\\xE3\\xA1\\xD8\\x80\\x00\\x00\\x00\\x02\\x9DY\\x88\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xBA\\xDC\\xEF\\x80\\x00\\x00\\x00\\x99l\\x0D\\xD2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&h\\xC6\\x0C\\x80\\x00\\x00\\x00w\"\\xDE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xA516\\x80\\x00\\x00\\x00EL\\xE1\\x8E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`1!\\xA1\\x80\\x00\\x00\\x00;\\xF4\\x8B\\xD4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`~.@\\x80\\x00\\x00\\x00<\\x03\\x85\\xA9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cc\\xAF\\x98\\x80\\x00\\x00\\x00o\\x17\\xB9\\x82\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x80)\\xB7\\x80\\x00\\x00\\x00fo5]\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y3a\\x7F\\x80\\x00\\x00\\x00X\\xC7\\xE3\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x81\\x0Bb\\x80\\x00\\x00\\x00<\\x04s\\xA9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x04\\x7F1DESKTOP\\x00\\x80\\x00\\x00\\x02\\x026U\\x05\\x80\\x00\\x00\\x00kF\\x16(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xF1+\\x80\\x00\\x00\\x00~J\\x87\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Jg&\\xF4\\x80\\x00\\x00\\x00o\\x10\\xC8\\x1F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p5BN\\x80\\x00\\x00\\x00i\\x0El]\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC8t\\x80\\x00\\x00\\x00<\\x0C\\x10\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02]\\xD1\\xBE7\\x80\\x00\\x00\\x00\\x8A\\xFA_\\xDC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xD7\\xEE\\x19\\x80\\x00\\x00\\x00\\x89\\xEC\\xB4\\xCC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p3Ja\\x80\\x00\\x00\\x00tM{\\xBA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9F#n\\x80\\x00\\x00\\x00i\\xC9f\\xB2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01a\\x02b\\xAA\\x80\\x00\\x00\\x00h\\xDF9\\xDA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xE3!\\xC8\\x80\\x00\\x00\\x00\\x89\\xFD\\x1D\\xBB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x99=\\x9F\\x80\\x00\\x00\\x00i\\xC2\\x9D\\x98\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDD\\xDC\\xB5\\x80\\x00\\x00\\x00\\x89\\xE5q=\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDAW\\xCB\\x80\\x00\\x00\\x00\\x89\\xE89\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02y\\xD1d+\\x80\\x00\\x00\\x00o\\x18\\xC7,\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x7F&\\x16\\x80\\x00\\x00\\x00<\\x03\\xE5l\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xE2\\xAE\\x1E\\x80\\x00\\x00\\x00\\x89\\xFA\\xC8\\xED\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC6}\\xD7\\x80\\x00\\x00\\x00E^\\x83\\x8F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02y\\x8B\\xCC\\x84\\x80\\x00\\x00\\x00o\\x1A\\xC6\\xA8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\x1DN#\\x80\\x00\\x00\\x00\\x8A\\x1B\\xF5\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x19\\x80\\x05\\xD8\\xC7TABLET\\x00\\x80\\x00\\x00\\x02Ar2q\\x80\\x00\\x00\\x00\\x98\\x9BF|\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00w\\x9C\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x86.\\xF0\\xF4\\x80\\x00\\x00\\x00\\x98\\x9B`1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x91\\xD3w\\xB3\\x80\\x00\\x00\\x00\\xA00\\x5C\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x13D^:\\x80\\x00\\x00\\x00p\\x8F\\xA6\\x83\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x13\\x0Fq\\xA5\\x80\\x00\\x00\\x00p\\x84w\\x8B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14a\\xB2\\xE6\\x80\\x00\\x00\\x00q\\x09\\x83\\x8A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x170f\\xF3\\x80\\x00\\x00\\x00q\\xD4u(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x14\\x83\\x88l\\x80\\x00\\x00\\x00q\\x11\\xAB\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x00\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1BZ\\xE7\\x9E\\x80\\x00\\x00\\x00s~\\xF8\\x14\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&~\\xE2\\xAB\\x80\\x00\\x00\\x00w(\\xD2N\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x91W\\xCD\\xBE\\x80\\x00\\x00\\x00\\x0E\\xAD\\x0A~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&4\\xE0\\x1A\\x80\\x00\\x00\\x00w\\x1A\\xA6\\x99\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+\\xF5\\xF1\\xDD\\x80\\x00\\x00\\x00\\x99m6q\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&<\\xD6\\xB9\\x80\\x00\\x00\\x00w\\x1B4-\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xA7\\xA4\\xED\\x80\\x00\\x00\\x00w\\x97Lb\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&q\"]\\x80\\x00\\x00\\x00w$\\xD6\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x05y\\xEF\\x80\\x00\\x00\\x00w\\x17\\x19c\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\x8A\\xBB\\xB1\\xDE\\x80\\x00\\x00\\x00\\x0EAU\\xE5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&fS\\xE3\\x80\\x00\\x00\\x00w\"\\x8C\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00r\\xD5\\x99\\xF6\\x80\\x00\\x00\\x00\\x15,E\\xC7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&g.\\x93\\x80\\x00\\x00\\x00w\"\\xAA\\x03\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&d\\x15?\\x80\\x00\\x00\\x00w\")\\xAF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x09S\\x80\\x00\\x0E>\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+ \\x16\\x19\\x80\\x00\\x00\\x00x\\x95\\xE4\\x18\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x00\\xBB\\x92\\xA1\\x96\\x80\\x00\\x00\\x00\\x14J\\xAEd\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xF2MOBILE\\x00\\x80\\x00\\x00\\x02n\\x95\\xD0N\\x80\\x00\\x00\\x00\\x92\\x0DF\\xA4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00$\\x0F\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xE1]\\x1D\\x80\\x80\\x00\\x00\\x00}\\x1A\\xA8e\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00$[\\x80\\x00\\x0D\\xDB\\x80\\x058\\x0BDESKTOP\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00n\\xE5\\xBF\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00$[\\x80\\x00\\x0EJ\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x8A\\xC2\\x9F\\xFA\\x80\\x00\\x00\\x00\\x98c\\xD3D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0D\\xDE\\x80\\x01i\\xF6DESKTOP\\x00\\x80\\x00\\x00\\x00\\xFEJ\\xDA\\x83\\x80\\x00\\x00\\x00\\x8D\\x8A\\xD1\\xA1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01s\\xAE\\x95\\xC7\\x80\\x00\\x00\\x00\\x86\\x04\\xF7[\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x06k\\xCB\\x13\\x80\\x00\\x00\\x00\\x84\\xC0N\\x1F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E*\\x80\\x01i\\xF5TABLET\\x00\\x80\\x00\\x00\\x01s\\xAE\\x98\\xF9\\x80\\x00\\x00\\x00\\x9A\\xD4\\xF0\\xED\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xC2fi\\x16\\x80\\x00\\x00\\x00\\x97\\xE1:Z\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E?\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01s\\xAEE\\x94\\x80\\x00\\x00\\x00\\x98\\xF4j\\x0A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0EL\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x002H\\x8C\\xF7\\x80\\x00\\x00\\x00\\x88\\xF6\\xC3F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x5C\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01s\\xAD\\xDE1\\x80\\x00\\x00\\x00\\x9F\\xE1`\\x02\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0D\\xDF\\x80\\x01i\\xECOTHER\\x00\\x80\\x00\\x00\\x01\\xB7@\\x9C\\x89\\x80\\x00\\x00\\x00V\\x81\\x8E\\xC8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E\\x08\\x80\\x01i\\xECOTHER\\x00\\x80\\x00\\x00\\x02\\x00!\\x9F\\xF3\\x80\\x00\\x00\\x00]9N\\x91\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E\"\\x80\\x01i\\xEDOTHER\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x98\\xD1]\\x09\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00)\\xEF\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x91\\xA6\\x80\\x00\\x00\\x00f\\x1A\\xB1\\xF7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0Dh\\x80\\x02p DESKTOP\\x00\\x80\\x00\\x00\\x01g\\x8B\\x81#\\x80\\x00\\x00\\x00?J\\xDC\\xA4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02p\\xE4 ~\\x80\\x00\\x00\\x00\\x8F\\x05\\xDA\\x96\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E+\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01o(\\x97\\xCA\\x80\\x00\\x00\\x00V\\xDF\\xC8\\x81\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E3\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01Ud\\x1D\\xF2\\x80\\x00\\x00\\x00V\\xE0\\x95\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01I#\"6\\x80\\x00\\x00\\x00>\\x1E\\xDF\\x87\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0EC\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01$:\\x1BG\\x80\\x00\\x00\\x00V\\xDE\\xFD\\xBB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0EJ\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01KpC\\x07\\x80\\x00\\x00\\x006\\xCE}5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0EP\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xC0\\xDC<\\x02\\x80\\x00\\x00\\x00X\\xAB\\xC6\\x1A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0EV\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xC3B\\x82\\xA5\\x80\\x00\\x00\\x00\\x90\\x1B\\x8F-\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0E]\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC0\\xDCBU\\x80\\x00\\x00\\x00\\x93K\\x86\\xA3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00IN\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01$\\xAE\\xD2\\x0A\\x80\\x00\\x00\\x00?J\\xDD\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x5C\\xBB\\x80\\x00\\x0D\\xFA\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02z*\\xDE2\\x80\\x00\\x00\\x00\\x92\\xFF\\xEEp\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Dj\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x02.\\xD21\\x80\\x00\\x00\\x00kF\\x15b\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CIZ\\xF4\\x80\\x00\\x00\\x00o\\x1D<&\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x07\\x15\\x08OTHER\\x00\\x80\\x00\\x00\\x028\\xCDo\\xC5\\x80\\x00\\x00\\x00}\\xA3\\x88\\x7F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x0Cb\\xDFn\\x80\\x00\\x00\\x00~J\\xA7n\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x87\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01y\\x18K\\x92\\x80\\x00\\x00\\x00B\\xEE\\xF2?\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x8F\"Z\\x80\\x00\\x00\\x00<\\x06\\xDB!\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x90B\\x12\\x80\\x00\\x00\\x00<\\x07(@\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD6\\x95\\xB6\\x80\\x00\\x00\\x00fo\\x84\\xB6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xB4\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x903r\\x80\\x00\\x00\\x00i\\x0Ep\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01` \\xF0i\\x80\\x00\\x00\\x00;\\xF2=\\xBF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x9DRS\\x80\\x00\\x00\\x00i\\xC2c\\x8A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\x1DM\\xD8\\x80\\x00\\x00\\x00\\x8A\\x1Ak\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD7\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\"\\x12\\x9A\\x80\\x00\\x00\\x00\\x8A\\x1B\\xD0P\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDDQ\\xAD\\x80\\x00\\x00\\x00\\x89\\xDF\\x8D!\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y ,\\xDC\\x80\\x00\\x00\\x00\\x8A\\x1A\\x81\\xE4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x87\\x01\\x14\\x80\\x00\\x00\\x00fp<\\xDD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xD8\\xD6j\\x80\\x00\\x00\\x00\\x89\\xF6#\\x19\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x09\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE4\\x04\\xE6\\x80\\x00\\x00\\x00\\x89\\xFA\\xC8\\xDB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE3\\x06\\xBC\\x80\\x00\\x00\\x00\\x8A\\x09p<\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\xED8\\xD8\\x80\\x00\\x00\\x00h\\xDE\\xD82\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x07\\x15\\x04DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC5\\x91\\xBF\\x80\\x00\\x00\\x00E]\\x98\\x96\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E-\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDAb\\x0A\\x80\\x00\\x00\\x00\\x89\\xDD\\xE2\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0E6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0C.z\\xDF\\x80\\x00\\x00\\x00o\\x1B\\xC6#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0ED\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YL\\xB0\\xE1\\x80\\x00\\x00\\x00\\x8A\\xB9X\\xF7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`|\\x1C\\xD0\\x80\\x00\\x00\\x00<\\x03J_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0EU\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0C?\\xA2#\\x80\\x00\\x00\\x00oKT#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00b\\xB9\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01_\\xDA\\xD6\\xF1\\x80\\x00\\x00\\x00;\\xF2\\x08f\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5C\\xB1-\\x83\\x80\\x00\\x00\\x00\\x8Ap74\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00w\\x9C\\x80\\x00\\x0E=\\x80\\x05\\xD8\\xD3DESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xC1\\x80\\x00\\x00\\x00\\x9EHL\\x10\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00w\\xD8\\x80\\x00\\x0E`\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xBCZiE\\x80\\x00\\x00\\x00u\\xD7\\xC8\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1BtA\\xFE\\x80\\x00\\x00\\x00s\\x83\\xB5\\xC8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!'\\xA4\\x13\\x80\\x00\\x00\\x00u\\x9E\\xD7l\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x14}~v\\x80\\x00\\x00\\x00q\\x10\\xE6\\xE9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x13\\x0AU\\xF5\\x80\\x00\\x00\\x00p\\x840\\x85\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E&\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x17%\\x96\\x86\\x80\\x00\\x00\\x00q\\xD1v\\xC5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x01\\x80\\x00\\x8Aj\\x80\\x00\\x0E7\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14\\x86\\xC2\"\\x80\\x00\\x00\\x00q\\x12<\\x81\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5CY;{\\x80\\x00\\x00\\x00\\x8A-\\x0A/\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x14'l\\xA2\\x80\\x00\\x00\\x00(\\xF9\\xF1{\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1D\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00:\\xFCmU\\x80\\x00\\x00\\x00\\x02\\x93l\\x11\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02,c\\xDE+\\x80\\x00\\x00\\x00\\x9B\\xC2e8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&]\\x9E\\x94\\x80\\x00\\x00\\x00w \\x90\\xA1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xA8\\x91\\xD7\\x80\\x00\\x00\\x00w\\x97\\x8D>\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x80/\\xCD\\x80\\x00\\x00\\x00w)4\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&]\\xA5\\xFC\\x80\\x00\\x00\\x00w \\xB2\\x0D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x18\\xCF\\x81\\x80\\x00\\x00\\x00w\\x18\\xBE\\xD7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xDB>B\\x80\\x00\\x00\\x00\\x14\\xE5\\xA4\\x91\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x04\\xDF\\x9B\\x80\\x00\\x00\\x00\\x99k\\xF2\\xF6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC3\\x95*\\x80\\x00\\x00\\x00x\\x95\\xD5\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+!#!\\x80\\x00\\x00\\x00x\\x95\\xE5\\x06\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x09S\\x80\\x00\\x0E>\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02(\\xA1\\x873\\x80\\x00\\x00\\x00yf\\x12D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02}r[`\\x80\\x00\\x00\\x00\\x92\\xBA\\xF0\\xB7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xF2DESKTOP\\x00\\x80\\x00\\x00\\x012\\xCD{\\xD9\\x80\\x00\\x00\\x000[\\xA1u\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e]\\x08x\\x80\\x00\\x00\\x00=\\x92\\xF2-\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x02DTk\\x80\\x00\\x00\\x00kK\\x88r\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cc\\x9Dd\\x80\\x00\\x00\\x00~J\\xAC\\xFC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CWE\\xAC\\x80\\x00\\x00\\x00o\\x10\\xB7\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y\\xA6\\xE9n\\x80\\x00\\x00\\x00X\\xCC\\xAF*\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x9DU\\x19\\x80\\x00\\x00\\x00i\\xC2^S\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x07\\x15\\x05OTHER\\x00\\x80\\x00\\x00\\x01\\x84&\\x91\\x1F\\x80\\x00\\x00\\x00O\\xF6\\xC7\\x89\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82f\\x02O\\x80\\x00\\x00\\x00E?\\xAF\\xAB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x02\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\x96)[\\x80\\x00\\x00\\x00EE\\xF5\\x96\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5CY,O\\x80\\x00\\x00\\x00\\x8A-\\x09\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x90m\\xAB\\xC6\\x80\\x00\\x00\\x00\\x0E\\x9B\\xE9X\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1D\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00:\\xFF\\xBA\\xA2\\x80\\x00\\x00\\x00\\x02\\x93F\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02,g\\x89+\\x80\\x00\\x00\\x00\\x9B\\xC2n'\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&`\\xABX\\x80\\x00\\x00\\x00w!\\x98m\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02,f\\xF9\\x7F\\x80\\x00\\x00\\x00yL\\x83_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+\\xE2\\xC9\\xE6\\x80\\x00\\x00\\x00yL5\\x18\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x08\\x01\\x96\\x80\\x00\\x00\\x00w\\x17W\\x0D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&Q\\x143\\x80\\x00\\x00\\x00w\\x1E\\x0F\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00r\\xD6\\x15\\xCB\\x80\\x00\\x00\\x00\\x0C\\xDB\\x92\\xFE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xBE\\x83 9\\x80\\x00\\x00\\x00\\x0C\\xDB\\x93\\x80\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC4\\xD3\\xA7\\x80\\x00\\x00\\x00x\\x95\\xD6+\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x03t\\xC5OTHER\\x00\\x80\\x00\\x00\\x01p+I\\xE4\\x80\\x00\\x00\\x00h\\xDE\\xDA\\xF6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`{\\x1D\\xF9\\x80\\x00\\x00\\x00<\\x03\\x17}\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x82\\xC7\\x85\\x80\\x00\\x00\\x00i\\xA97#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CbU\\xD0\\x80\\x00\\x00\\x00~J\\xA3\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x0CXM\\x87\\x80\\x00\\x00\\x00o\\x11TY\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01p7\\xDC\\x83\\x80\\x00\\x00\\x00@\\x5C`\\xE4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`{%\\x14\\x80\\x00\\x00\\x00<\\x03\\x1F\\x87\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xC0\\xDE\\x1E\\x80\\x00\\x00\\x00EY\\xB2\\xF4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC6\\x07\\xE8\\x80\\x00\\x00\\x00O\\xF6\\xE8\\xA6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC8\\x9F\\x80\\x00\\x00\\x00<\\x0C\\x08\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFF\\xB6/\\x80\\x00\\x00\\x00h\\xDF\"\\xEB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC9O\\x80\\x00\\x00\\x00<\\x0C\\x13\\xCC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02]\\xBFT*\\x80\\x00\\x00\\x00\\x8A\\xFAA\\x8C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+h\\xBD\\xBF\\x80\\x00\\x00\\x00y\"N\\x97\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p4:\\xDA\\x80\\x00\\x00\\x00u\\xAE\\x95q\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9F*\\xA8\\x80\\x00\\x00\\x00i\\xC9F\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e]\\x0D\\xAF\\x80\\x00\\x00\\x00=\\x94\\xC4)\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xE02\\xE6\\x80\\x00\\x00\\x00\\x8A\\x0F6(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFF\\xD19\\x80\\x00\\x00\\x00h\\xDE\\x9Df\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDB\\xB2\\xB5\\x80\\x00\\x00\\x00\\x89\\xE36\\xDB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDAW\\xC3\\x80\\x00\\x00\\x00\\x89\\xE13\\x0B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02`u&J\\x80\\x00\\x00\\x00\\x8B\\xB7)\\x8E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02y\\x8B\\xDE\\x0D\\x80\\x00\\x00\\x00o\\x11@\\x1C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\x1E\\x9C1\\x80\\x00\\x00\\x00\\x8A\\x1B\\xF6?\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01e]\\xF8q\\x80\\x00\\x00\\x00i\\x0E\\xBE\\xD0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02y\\xC7\\xE8\\xB1\\x80\\x00\\x00\\x00o\\x12\\x1B\\xA3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01h\\x13I\\x9E\\x80\\x00\\x00\\x00i\\x0D\\xD6\\xD6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x18\\x80\\x08]LDESKTOP\\x00\\x80\\x00\\x00\\x02m\\x07\\x90\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00w\\x9C\\x80\\x00\\x0E:\\x80\\x08]LDESKTOP\\x00\\x80\\x00\\x00\\x02\\x86/\\xEE\\x1C\\x80\\x00\\x00\\x00\\x98\\x9B\\x5C\\x88\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02E\\xEC@\\x05\\x80\\x00\\x00\\x00Y\\x12\\xC4\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!\\x18T\\xAC\\x80\\x00\\x00\\x00u\\x9C\\xDA\\xB6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x14`\\x9C6\\x80\\x00\\x00\\x00q\\x09Hp\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x05\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x1B\\x14o\"\\x80\\x00\\x00\\x00sZ\\xFFN\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15\\x82t{\\x80\\x00\\x00\\x00qRS\\x1B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x16\\xBC\\x8C|\\x80\\x00\\x00\\x00q\\xAA\\x14\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x03\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x17\\x02O\\xE9\\x80\\x00\\x00\\x00q\\xC9E\\x10\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01y41[\\x80\\x00\\x00\\x00B\\xE7\\xEDE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x01y6&\\x83\\x80\\x00\\x00\\x00B\\xE8\\x96\\xCB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`|\\x15<\\x80\\x00\\x00\\x00<\\x03D\\x13\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\x8C\\xE1\\x80\\x00\\x00\\x00o\\x1C(\\xA7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x82\\x8E'\\x99\\x80\\x00\\x00\\x00b\\xF3\\xABM\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01a\\x01\\x1E\\xB7\\x80\\x00\\x00\\x00h\\xDE\\x93\\xC5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x07\\x15\\x08OTHER\\x00\\x80\\x00\\x00\\x01\\x84!\\xA5\\x8F\\x80\\x00\\x00\\x00E\\xD7\\xA6)\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x0C+!\\x0E\\x80\\x00\\x00\\x00oI\\xEC\\xEC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x026l\\xA0\\x80\\x00\\x00\\x00EJ\\x18\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Jg$=\\x80\\x00\\x00\\x00;\\xF2d.\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE2\\xA7\\xE7\\x80\\x00\\x00\\x00\\x8A\\x07\\x07\\x05\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01e\\x5CF^\\x80\\x00\\x00\\x00=\\x92\\x87:\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD7\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD4Rm\\x80\\x00\\x00\\x00o\\x1C8\\x09\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xD9\\xF2\\xCF\\x80\\x00\\x00\\x00\\x89\\xEE\"\\xAB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02YH1\\xB0\\x80\\x00\\x00\\x00\\x8A\\xB8\\xE3\\x8E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02y\\x8B\\xDE\\xF0\\x80\\x00\\x00\\x00\\x903\\xAC\\xEC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YN/5\\x80\\x00\\x00\\x00\\x8A\\x1E)\\x1F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x09\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE1\\xCBo\\x80\\x00\\x00\\x00\\x89\\xEE&M\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0J\\x0B\\x80\\x00\\x00\\x00\\x8A\\x0An\\xA8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02Y\\x1D\\xFD\\xD0\\x80\\x00\\x00\\x00\\x8A\\x1B\\xF4\\x9B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02s\\x16\\xF1\\xF6\\x80\\x00\\x00\\x00\\x95R\\x03\\xD8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E-\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y,\\xF0\\xE1\\x80\\x00\\x00\\x00\\x8A\\x1D,\\x83\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0E6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01e\\x5C8\\xE1\\x80\\x00\\x00\\x00i\\x0E\\x9D\\xE4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0EC\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xDD\\x08\\x8B\\x80\\x00\\x00\\x00\\x89\\xEA\\x05k\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02a\\xC9\\x1D\\x10\\x80\\x00\\x00\\x00\\x96\\x06\\x18\\xCD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0EU\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01h\\x13T\\x04\\x80\\x00\\x00\\x00i\\x0D\\xE8\\xB6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00b\\xB9\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0x\\xA4\\x80\\x00\\x00\\x00\\x89\\xDFT\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x1B\\x80\\x05\\xD8\\xD0DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC9M\\xF4b\\x80\\x00\\x00\\x00\\x98\\x9AJy\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00w\\x9C\\x80\\x00\\x0E=\\x80\\x05\\xD8\\xC9DESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xC5\\x80\\x00\\x00\\x00m\\x12\\x9DA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00w\\xD8\\x80\\x00\\x0E`\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xBCX8\"\\x80\\x00\\x00\\x00Wy\\x11\\xB3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE3\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B\\x1A\\xD6\\x99\\x80\\x00\\x00\\x00s]\\x01U\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1Bg`\\xFF\\x80\\x00\\x00\\x00s\\x82B>\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x14\\x8B\\xA5\\x90\\x80\\x00\\x00\\x00q\\x13\\xF5c\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x15\\x81\\xB7m\\x80\\x00\\x00\\x00qR\\x02\\xF1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E&\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x16}In\\x80\\x00\\x00\\x00q\\x9F\\xCF\\xDF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x04\\x80\\x00\\x8Aj\\x80\\x00\\x0E6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1Bq\\xC2\\xCB\\x80\\x00\\x00\\x00s\\x83s\\x1E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01$\\xD6\\x5Cm\\x80\\x00\\x00\\x00,\\xBE\\x1A\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\x9D\\xE5\\x15\\xA1\\x80\\x00\\x00\\x00a\\xE9D\\xD6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1D\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00:\\xF9^%\\x80\\x00\\x00\\x00\\x02\\x93n\\xBC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x83\\xBD\\x95\\x80\\x00\\x00\\x00\\x99ki_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02%\\xF8\\xF5\\xDB\\x80\\x00\\x00\\x00w\\x16Fr\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'x\\x1D\\xA9\\x80\\x00\\x00\\x00x\\x00Fb\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&Z1\\xCB\\x80\\x00\\x00\\x00w\\x1F\\xB4\\xF2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02+\\xE3b*\\x80\\x00\\x00\\x00yL8p\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&b\\xA9H\\x80\\x00\\x00\\x00w!\\xA20\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&\\x00.\\xC9\\x80\\x00\\x00\\x00w\\x16\\x8F\\xB8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02*\\xCEzM\\x80\\x00\\x00\\x00\\x99l\\x16e\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x5C\\x0B\\xAA\\x80\\x00\\x00\\x00w \\x08\\xA8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x18\\xDD\\xFC\\x80\\x00\\x00\\x00w\\x18\\xBE\\xE9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e\\x5C-Y\\x80\\x00\\x00\\x00=\\x94\\xD7\\x8A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01zjc\\x9A\\x80\\x00\\x00\\x00i\\x0Eg$\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\x87\\x01\\x80\\x00\\x00\\x00oI\\xA1D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC6~\\x06\\x80\\x00\\x00\\x00O\\xF6\\xA9\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y1\\xD2\\xE8\\x80\\x00\\x00\\x00X\\xC6]1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x803}\\x80\\x00\\x00\\x00<\\x04*\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\x88\\xCF\\x83\\x80\\x00\\x00\\x00EA\\x0D\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Jg$o\\x80\\x00\\x00\\x00;\\xF2VF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFE\\xA4\\x16\\x80\\x00\\x00\\x00h\\xDF-\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cd\\x099\\x80\\x00\\x00\\x00~J\\xAF\\x7F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9DM\\xDB\\x80\\x00\\x00\\x00i\\xCAK\\x0F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01z\\x5C\\x1C\\x1D\\x80\\x00\\x00\\x00\\x7FX\\x85\\x07\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xD4E\\xF4\\x80\\x00\\x00\\x00\\x89\\xE1\\xA5\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x85\\x05\\x94\\x80\\x00\\x00\\x00i\\xA6\\x82C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xAB\\xC0\\x11\\x80\\x00\\x00\\x00<\\x0B\\xBE\\xAE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xDE\\x03\\xDD\\x80\\x00\\x00\\x00\\x89\\xF71o\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xDB\\xB4\\x19\\x80\\x00\\x00\\x00\\x89\\xE1_{\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x9Dc\\xDA\\x80\\x00\\x00\\x00i\\xC9\\xEB\\x0A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x12\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x02y\\x8B\\xD9<\\x80\\x00\\x00\\x00oIQ\\x11\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YH;\\x0B\\x80\\x00\\x00\\x00\\x8A\\xB9\\x0B\\x01\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD6\\xA4;\\x80\\x00\\x00\\x00fvo\\xC3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01a$\\xD3\\xB9\\x80\\x00\\x00\\x00<&\\x98\\xCC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02X\\xB8J\\x18\\x80\\x00\\x00\\x00\\x8A\\x1B.2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02YH\\xBB\\x10\\x80\\x00\\x00\\x00\\x8A\\x1CK5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xFE\\xAC\\xCF\\x80\\x00\\x00\\x00h\\xDF&V\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xD6!^\\x80\\x00\\x00\\x00\\x8A\\x0E\\x08\\xD6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xD7;\\x09\\x80\\x00\\x00\\x00\\x89\\xDC\\x868\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x18\\x80\\x05\\xD8\\xCBTABLET\\x00\\x80\\x00\\x00\\x02m\\x05\\x10\\xDC\\x80\\x00\\x00\\x00\\x98\\x9AJ\\x97\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00w\\x9C\\x80\\x00\\x0E:\\x80\\x05\\xD8\\xD0MOBILE\\x00\\x80\\x00\\x00\\x01\\xC0\\x93\\xD4\\xB4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\xBCcx\\x86\\x80\\x00\\x00\\x00Wz\\xADo\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15\\x83\\xA7\\x97\\x80\\x00\\x00\\x00qR\\x7Fd\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B_\\xB4S\\x80\\x00\\x00\\x00s\\x81%\\x04\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x05\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x15\\x81\\xCD\\xAD\\x80\\x00\\x00\\x00qR\\x03\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14hQ\\xF8\\x80\\x00\\x00\\x00q\\x0CN\\x89\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1BYk\"\\x80\\x00\\x00\\x00s|\\xE9U\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x05\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14\\x88\\xEE\\xC5\\x80\\x00\\x00\\x00q\\x12\\xDD\\xDF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x00$\\xB5u\\x0D\\x80\\x00\\x00\\x00\\x02\\x97\\xAA\\xC5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00x{\\xB2\\xDB\\x80\\x00\\x00\\x00\\x0C\\xE3\\xB8\\x92\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00xn\\xAFo\\x80\\x00\\x00\\x00\\x0C\\xE3\\x87\\xD7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&~\\x14b\\x80\\x00\\x00\\x00\\x99kw\\xD1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&+i\\xF9\\x80\\x00\\x00\\x00w\\x19\\xD6:\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\xA3\\xF1\\xBA\\x80\\x00\\x00\\x00w\\x96S\\x1D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&h\"z\\x80\\x00\\x00\\x00w\"\\xDF\\xCD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02,s.}\\x80\\x00\\x00\\x00yL\\xBBn\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xDE6\\xB2\\x80\\x00\\x00\\x00\\x0C\\xDB\\x99\\x19\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02&`\\xBF7\\x80\\x00\\x00\\x00w!}\\xC9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00r\\xD5\\xB2\\xB7\\x80\\x00\\x00\\x00\\x0C\\xDB\\x8E\\x9B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC0{G\\x80\\x00\\x00\\x00\\x99l\\x11\\x9F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01p8kT\\x80\\x00\\x00\\x00sQ\\x05F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x07\\x15\\x05OTHER\\x00\\x80\\x00\\x00\\x01\\x82\\xEEN\\x97\\x80\\x00\\x00\\x00E{\\x11\\x08\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`1\\x15\\xB3\\x80\\x00\\x00\\x00;\\xF4\\x82\\xCB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xE4m\\x80\\x00\\x00\\x00~J\\x9C~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x82\\x8B9\\xA3\\x80\\x00\\x00\\x00b\\xF6R\\x9E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01o\\xBCKV\\x80\\x00\\x00\\x00fo8$\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`y\\xE2q\\x80\\x00\\x00\\x00<\\x02\\xDA\\xAC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02+h\\xB9\\xA7\\x80\\x00\\x00\\x00y\"N\\x12\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x02 \\x0E\\xEB\\x80\\x00\\x00\\x00kF\\x12\\xCA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBC\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC9qi\\x80\\x00\\x00\\x00E`\\xAE_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01zd\\xF0\\x1C\\x80\\x00\\x00\\x00B\\xED\\xFBn\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YKjG\\x80\\x00\\x00\\x00\\x8A\\xA07w\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02]\\xC5{\\xD4\\x80\\x00\\x00\\x00\\x8A\\xFAMq\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xDF\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02`wo\\xC8\\x80\\x00\\x00\\x00\\x8B\\xB7\\xA6\\xCF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x85\\x13\\xC1\\x80\\x00\\x00\\x00i\\xA97\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF3\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x9F\\x1F<\\x80\\x00\\x00\\x00i\\xC9u\\x17\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFD\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01zd]\\x0B\\x80\\x00\\x00\\x00i\\xA97\\x89\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x06\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x07\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xE2\\x97\\x08\\x80\\x00\\x00\\x00\\x89\\xDAU\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01/\"6\\xC4\\x80\\x00\\x00\\x00+\\xB0(\\xE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xF3\\x100+\\x80\\x00\\x00\\x00\\x02\\x92\\xF5[\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00xs\\xFFB\\x80\\x00\\x00\\x00\\x0C\\xE3\\x97\\xE8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&i\\x0C\\x00\\x80\\x00\\x00\\x00\\x9B\\xBFNN\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&\\x1B=L\\x80\\x00\\x00\\x00w\\x18\\xE4\\xE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02'\\x8A\\xF7\\xC9\\x80\\x00\\x00\\x00w\\x92\\xA5R\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&x+\\x85\\x80\\x00\\x00\\x00w'\\x1C\\xAE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&sI\\x84\\x80\\x00\\x00\\x00w%\\xA0U\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00r\\xD5\\xEE\\xBE\\x80\\x00\\x00\\x00\\x0C\\xDB\\x92R\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\xB9\\xF1\\xB3K\\x80\\x00\\x00\\x00\\x0C\\xDB\\x959\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&6\\x9C\\xA4\\x80\\x00\\x00\\x00\\x99k\\x8C\\xF9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xCE\\x90J\\x80\\x00\\x00\\x00\\x99l\\x16w\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02*\\xC6\\xCAN\\x80\\x00\\x00\\x00x\\x95\\xD7\\x13\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e]~\\xD6\\x80\\x00\\x00\\x00=\\x94\\xAC\\xD2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0Dq\\x80\\x03t\\xC5DESKTOP\\x00\\x80\\x00\\x00\\x01a\\x03\\xB3\\x1E\\x80\\x00\\x00\\x00h\\xDF2~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x028\\xC9\\x88\\xE5\\x80\\x00\\x00\\x00}\\xA1\\xD1\\x9A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xE6\\x0D\\x80\\x00\\x00\\x00~J\\x86\\xA5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x82\\x87\\x7F@\\x80\\x00\\x00\\x00b\\xF6T\\xBA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x90D\\x80\\x80\\x00\\x00\\x00<\\x07)H\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x96\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x02C\\xE6\\x8E\\x80\\x00\\x00\\x00kKk\\xA1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x8B\\xF7\\x93\\x80\\x00\\x00\\x00<\\x06\"\\xAD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01e\\x11dD\\x80\\x00\\x00\\x00=l\\x98y\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD4R'\\x80\\x00\\x00\\x00o\\x1A\\x0E:\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p5I-\\x80\\x00\\x00\\x00@\\x5C`k\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02W\\xE0l\\x84\\x80\\x00\\x00\\x00\\x8A\\xB36E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\xA1Q\\xDB\\x80\\x00\\x00\\x00i\\xCA\\xEF\\x15\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x07\\x15\\x05OTHER\\x00\\x80\\x00\\x00\\x02YJ\\x93B\\x80\\x00\\x00\\x00\\x89*\\xE4\\xE8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD5\\xB7\\xB6\\x80\\x00\\x00\\x00fol\\x13\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01a\\x03\\xB2\\xD2\\x80\\x00\\x00\\x00h\\xDE\\xBE5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xDD\\x8Dv\\x80\\x00\\x00\\x00\\x89\\xDFBB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x08~\\xD0DESKTOP\\x00\\x80\\x00\\x00\\x02&\\xBD\\x82\\x85\\x80\\x00\\x00\\x00we\\xBF\\x1A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01h\\x14\\x06.\\x80\\x00\\x00\\x00i\\x0D\\xE9=\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02`v\\xD9g\\x80\\x00\\x00\\x00\\x8B\\xB8Pg\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE1\\xDD\\x84\\x80\\x00\\x00\\x00\\x8A\\x070\\xA3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02b\\xC7N\\xF7\\x80\\x00\\x00\\x00\\x96\\x06\\x1BW\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02y\\x8B\\xD2!\\x80\\x00\\x00\\x00oJ\\x14\\xF1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x04\\x7F1DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDE/\\xEA\\x80\\x00\\x00\\x00\\x89\\xF0F0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p,2b\\x80\\x00\\x00\\x00h\\xDE\\xEA\\xFA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01a\\x01%\\xB7\\x80\\x00\\x00\\x00hm-=\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00b\\xB9\\x80\\x00\\x0Eb\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x81\\x1E\\xE1\\x80\\x00\\x00\\x00fo \\xB8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x19\\x80\\x05\\xD8\\xD0DESKTOP\\x00\\x80\\x00\\x00\\x02ArO\\xB3\\x80\\x00\\x00\\x00\\x80\\x0C\\x8F\\xDD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00w\\x9C\\x80\\x00\\x0E;\\x80\\x05\\xD8\\xCCDESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xA0\\x80\\x00\\x00\\x00\\x9EHL\\x14\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xBC`=T\\x80\\x00\\x00\\x00WzW\\xB1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x17c{$\\x80\\x00\\x00\\x00q\\xE2\\x0C\\x82\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x14e\\xF8z\\x80\\x00\\x00\\x00q\\x0B;I\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15oZ\\x10\\x80\\x00\\x00\\x00qM\\xDA\\xCD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B$1\\x08\\x80\\x00\\x00\\x00s`\\xFB\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x16\\xF5\\xCB\\xE5\\x80\\x00\\x00\\x00q\\xC6\\x94\\x1A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x07\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!e\\xBF\\xBC\\x80\\x00\\x00\\x00u\\xAC\\x9E(\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09J\\x80\\x00\\x0D\\xEF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x5CX\\xE7\\xAA\\x80\\x00\\x00\\x00\\x8A,\\xA1w\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xF3\\x0D\\xB7V\\x80\\x00\\x00\\x00\\x02\\x92\\xF5a\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E\\x1C\\x80\\x02\\xC0\\x0CMOBILE\\x00\\x80\\x00\\x00\\x00xn\\xF7\\xCE\\x80\\x00\\x00\\x00\\x0C\\xE3\\x89\\x03\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02,cs\\xD9\\x80\\x00\\x00\\x00\\x9B\\xC2_\\xFF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E.\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&iqb\\x80\\x00\\x00\\x00w#\\x17f\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09J\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02+\\xE3\\xB6\\xC5\\x80\\x00\\x00\\x00yL9\\xCF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09J\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02+\\xD2\\xD1\\x93\\x80\\x00\\x00\\x00yL }\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09J\\x80\\x00\\x0Ec\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&z*\\xE8\\x80\\x00\\x00\\x00w'\\x9EV\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09S\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&`\\xC0G\\x80\\x00\\x00\\x00w!}\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E\\x10\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00w\\xD9\\x86\\x03\\x80\\x00\\x00\\x00\\x0C\\xDB\\x93\\xE5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02&\\x02\\xE2\\x06\\x80\\x00\\x00\\x00\\x99k\\xF2\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E)\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&h\\x0B~\\x80\\x00\\x00\\x00w\"\\xDE\\x07\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E2\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02&g*\\xD2\\x80\\x00\\x00\\x00w\"\\xA9\\xFD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x09S\\x80\\x00\\x0E>\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02(\\x97\\x5C$\\x80\\x00\\x00\\x00y\\x0B\\xDD0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02%\\xDA\\x9DM\\x80\\x00\\x00\\x00,\\x8F\\xAE\\xD9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xF3TABLET\\x00\\x80\\x00\\x00\\x00\\xF9v\\x14\\x07\\x80\\x00\\x00\\x00#g+\\xB9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00$\\x0F\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xE1U\\xA3Y\\x80\\x00\\x00\\x00\\x80\\x04\\xCC\\xE6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00$[\\x80\\x00\\x0D\\xE4\\x80\\x04\\xF7\\x13DESKTOP\\x00\\x80\\x00\\x00\\x01\\x8A\\xC2o\\x08\\x80\\x00\\x00\\x00N\\xFE\\xBA\\x9F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00$[\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x8A\\xC2\\xED\\x08\\x80\\x00\\x00\\x00\\x98c\\x90\\xC1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0D\\xDE\\x80\\x01i\\xF6DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC2g\\x19l\\x80\\x00\\x00\\x00\\x86\\x06\\xB4\\xC6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\x07#[j\\x80\\x00\\x00\\x00Y\\x08u\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xD3U\\x88\\xFF\\x80\\x00\\x00\\x00\\x84\\xBFJ\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x00\\xCF3%\\x7F\\x80\\x00\\x00\\x00\\x98\\xFE\\xAA\\x16\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E+\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xCB\\xA6K\\x9C\\x80\\x00\\x00\\x00`\\xBAL\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E2\\x80\\x01i\\xF6MOBILE\\x00\\x80\\x00\\x00\\x00\\xD4\\x7Fu*\\x80\\x00\\x00\\x00a\\x0Er1\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E?\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x8B\\x85\\x18E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x03\\x8B\\x0D\\xEC"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0EL\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x00\\xE1\\x7F\\xFD\\x03\\x80\\x00\\x00\\x00K\\x89\\xC74\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xE4\\x80\\x00\\x0E]\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xD6\\xC5\\x06\\xA9\\x80\\x00\\x00\\x00\\x9F\\xE2\\x84\\xF3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0D\\xE0\\x80\\x01i\\xECTABLET\\x00\\x80\\x00\\x00\\x01\\xC3\\x1D\\xE7~\\x80\\x00\\x00\\x00\\x87\\xD8\\x06\\xFD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E\\x09\\x80\\x01i\\xECDESKTOP\\x00\\x80\\x00\\x00\\x00\\x94\\xE2i2\\x80\\x00\\x00\\x00]:b\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x028\\xC62Z\\x80\\x00\\x00\\x00\\x98\\xD1\\x5C\\x06\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xB5 r.\\x80\\x00\\x00\\x00\\x98\\xD0\\xCE\\x98\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E9\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x17\\xAE\\x05\\xA0\\x80\\x00\\x00\\x00\\x9E[n~\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0EI\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02\\x007~\\x0A\\x80\\x00\\x00\\x00\\x9F\\xA8\\xE5\\xF0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0EU\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\x94\\xD9k\\x13\\x80\\x00\\x00\\x00\\x9F\\xA8\\xF7N\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00)\\xEF\\x80\\x00\\x0E`\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02\\x00,\\xDC\\xC9\\x80\\x00\\x00\\x00\\x9EE\\xE3_\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00=\\x96\\x80\\x00\\x0E!\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xF3\\xA9T\\x13\\x80\\x00\\x00\\x00\\x91\\x0F\\xD7\\xAF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00IN\\x80\\x00\\x0Du\\x80\\x02p DESKTOP\\x00\\x80\\x00\\x00\\x01yF\\xB19\\x80\\x00\\x00\\x00,\\xE6\\x8CZ\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00IN\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01$:\\x15\\x05\\x80\\x00\\x00\\x00\\x8B\\xED\\xB2\\xFD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00IN\\x80\\x00\\x0E+\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01\\xD77n\\xFC\\x80\\x00\\x00\\x00\\x8B\\xEBP\\xEF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00IN\\x80\\x00\\x0E3\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x01\\xD77t.\\x80\\x00\\x00\\x00Y\\x0EJp\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00IN\\x80\\x00\\x0E;\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02;F\\xC9\\xE8\\x80\\x00\\x00\\x00\\x89\\xEA\\xF1\\xC6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00IN\\x80\\x00\\x0EC\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01f4\\x03F\\x80\\x00\\x00\\x00\\x89\\xEA\\xD7\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00IN\\x80\\x00\\x0EJ\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01\\xC3B\\x98\\xBC\\x80\\x00\\x00\\x00X\\xB04[\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00IN\\x80\\x00\\x0EP\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02CX\\xAA\\x80\\x80\\x00\\x00\\x00\\x90=3j\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x07\\x15\\x04TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xC6\\x092\\x80\\x00\\x00\\x00E^\\x134\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x80\\xBA\\xB9\\x80\\x00\\x00\\x00fp\\x17\\xE9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x8D\\x1C?\\x80\\x00\\x00\\x00i\\x0Ea3\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02 \\x10\\xC9\\x0F\\x80\\x00\\x00\\x00<\\x0C\\x1E\\x1D\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`\\x99J_\\x80\\x00\\x00\\x00i\\xCAu\\xD7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y41\\xB9\\x80\\x00\\x00\\x00X\\xCC~\\xE8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x8B\\xEB\\x08\\x80\\x00\\x00\\x00i\\x0Er\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA1\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`1&\\x81\\x80\\x00\\x00\\x00;\\xF4\\x8Ea\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX\\xFB\\x98\\x80\\x00\\x00\\x00o\\x1Dg\\xF2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`{\\x1C\\x8C\\x80\\x00\\x00\\x00<\\x03\\x14d\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC5\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02J\\xD5\\xAE\\xDA\\x80\\x00\\x00\\x00o\\x13\\xCE\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCD\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x0CXl\\x8D\\x80\\x00\\x00\\x00o\\x14\\xBBx\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xDD\\xC3_\\x80\\x00\\x00\\x00\\x8A\\x07\\x061\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02`v\\xEB\\x9B\\x80\\x00\\x00\\x00\\x8B\\xB8R\\xF7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEA\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02y\\x19\\x00\\xF9\\x80\\x00\\x00\\x00oIJ\\xF9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF4\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02K.\\xEB\\x8E\\x80\\x00\\x00\\x00h\\xDF$\\x03\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0\\xCC\\xF7\\x80\\x00\\x00\\x00\\x89\\xDF\\xA0N\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01e\\x5C-g\\x80\\x00\\x00\\x00\\x8C\\xC5\\x10Z\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x12\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02Y\\xE0\\xDC\\xEB\\x80\\x00\\x00\\x00\\x89\\xE1ch\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01h\\x14\\x1A\\xE2\\x80\\x00\\x00\\x00i\\x0D\\xD3\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E\"\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02s\\x14m\\xAD\\x80\\x00\\x00\\x00\\x95Q\\xEE\\x9E\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x88\\xF7_\\x80\\x00\\x00\\x00i\\xA6\\x82#\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0D\\x94\\x80\\x00\\x00\\x00\\x89\\xE9\\xB9*\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0EG\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE1\\xF6+\\x80\\x00\\x00\\x00\\x89\\xEA\\x96X\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0EN\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02YM\\x9C\\x0A\\x80\\x00\\x00\\x00\\x8A\\xB9k\\x8B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00b\\xB9\\x80\\x00\\x0EY\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02YFK4\\x80\\x00\\x00\\x00\\x8A\\x1Ed\\xFC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00w\\x9C\\x80\\x00\\x0D\\xE0\\x80\\x05\\xD8\\xCATABLET\\x00\\x80\\x00\\x00\\x01\\xB8\\xE93y\\x80\\x00\\x00\\x00V\\xB7)\\x1A\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00w\\x9C\\x80\\x00\\x0E&\\x80\\x05\\xD8\\xD2DESKTOP\\x00\\x80\\x00\\x00\\x02Ar3\\x10\\x80\\x00\\x00\\x00\\x9B\\xF3\\xE5\\xCE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x08\\x80\\x00w\\x9C\\x80\\x00\\x0EN\\x80\\x05\\xD3\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x02(\\xA6\\xFFl\\x80\\x00\\x00\\x00x\\xEC\\xED\\xB7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00\\x1A}\\x80\\x00\\x0E^\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01\\x05\\xBDY\\xC4\\x80\\x00\\x00\\x00%\\xC7\\x96d\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00\\x1A}\\x80\\x00\\x0Ec\\x80\\x03\\x8B\\xFATABLET\\x00\\x80\\x00\\x00\\x02+\\xD0Dg\\x80\\x00\\x00\\x00bn\\xEB\\x03\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00$\\x0F\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01}\\xA1\\x89\\x11\\x80\\x00\\x00\\x00C\\xCC\\xDB\\x1C\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00$[\\x80\\x00\\x0D\\xEB\\x80\\x04\\xF7\\x19TABLET\\x00\\x80\\x00\\x00\\x01\\x1E\\xF9\\xEE\\x12\\x80\\x00\\x00\\x00n\\xE5\\xCF\\xFD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00$[\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02t\\xEE\\xA8y\\x80\\x00\\x00\\x00\\x98\\x19X7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00)\\xE4\\x80\\x00\\x0D\\xDF\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x00\\xD3UgN\\x80\\x00\\x00\\x00\\x8D\\x8A\\xBC\\x90\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x00\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x00\\xC3?\\xAC\\x13\\x80\\x00\\x00\\x00\\x8D]\\xE3\\xE2\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00)\\xE4\\x80\\x00\\x0E\\x17\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x00\\xCE\\xEE\\x03\\xB0\\x80\\x00\\x00\\x00\\x97\\xE2W\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0Di\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\x82\\xB3B\\xB6\\x80\\x00\\x00\\x00ER?\\xE0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0Dr\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x7F*\\xFC\\x80\\x00\\x00\\x00<\\x03\\xE9\\x8F\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0Dx\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01`\\x7F\\xFBo\\x80\\x00\\x00\\x00<\\x03\\xFA\\xFE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x7F\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0Cd\\x04\\xA9\\x80\\x00\\x00\\x00~J\\xAE]\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x86\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x028\\xCC:w\\x80\\x00\\x00\\x00}\\xA2$\\xB7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x8E\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01y\\x15\\x1E{\\x80\\x00\\x00\\x00PS\\xF9\\xB9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\x97\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`|\\x0A\\xE8\\x80\\x00\\x00\\x00<\\x03:\\x02\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xA0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02J\\xD4FC\\x80\\x00\\x00\\x00o\\x15k\\xFB\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xAA\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x02C\\xAD\\x86\\x80\\x00\\x00\\x00k\\x1E(t\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xBD\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x0CX][\\x80\\x00\\x00\\x00~J\\x83=\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xC6\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\xB0\\x84>\\x80\\x00\\x00\\x00<\\x0C1\\xE7\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xCE\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02J\\xD4D\\x5C\\x80\\x00\\x00\\x00o\\x18n \\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xD6\\x80\\x00\\x00\\x01OTHER\\x00\\x80\\x00\\x00\\x02YJ\\xF25\\x80\\x00\\x00\\x00\\x89*\\xE8|\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xE0\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02Y\\xD6\\xDBa\\x80\\x00\\x00\\x00\\x89\\xE9\\xC4\\xE6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xEB\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01p*\\x8E\\xE3\\x80\\x00\\x00\\x00@Y\\x9D\\xCA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x87\\x09x\\x80\\x00\\x00\\x00fo\\x83\\xD5\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0D\\xFE\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01e\\x5C<\\x14\\x80\\x00\\x00\\x00=\\x92\\xA6@\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x08\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02YP\\x13\\xDF\\x80\\x00\\x00\\x00\\x89*\\xD0t\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x13\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x01`\\x9F\"\\xE3\\x80\\x00\\x00\\x00i\\xC9\\x85c\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E\\x1A\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE2\\x16&\\x80\\x00\\x00\\x00\\x89\\xE0\\xE7\\xB9\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E#\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02Y\\xE0\\xBC\\xDC\\x80\\x00\\x00\\x00\\x8A\\x0F6\\xAF\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E,\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02y\\x8B\\xD7\\x83\\x80\\x00\\x00\\x00o\\x1B\\xCE\\xC0\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01`y\\xFE*\\x80\\x00\\x00\\x00<\\x03\\x048\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0EB\\x80\\x07\\x15\\x08DESKTOP\\x00\\x80\\x00\\x00\\x02Y\"\\x1B%\\x80\\x00\\x00\\x00\\x8A\\x1B\\xD0s\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0EK\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02@\\xD9\\x9FC\\x80\\x00\\x00\\x00<\\x03\\x9CC\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0ET\\x80\\x00\\x00\\x01MOBILE\\x00\\x80\\x00\\x00\\x01d\\xE0#\\xD0\\x80\\x00\\x00\\x00h\\xDF\\x19\\x18\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00b\\xB9\\x80\\x00\\x0Ea\\x80\\x07\\x15\\x08DESKTOP\\x00\\x80\\x00\\x00\\x01\\x82\\xC5\\x8D\\x9E\\x80\\x00\\x00\\x00yM\\xD8\\x1B\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00w\\x9C\\x80\\x00\\x0E\\x19\\x80\\x05\\xD8\\xD1TABLET\\x00\\x80\\x00\\x00\\x01\\xC9@\\xE7\\x99\\x80\\x00\\x00\\x00\\x98\\x9AJ\\xAA\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00w\\x9C\\x80\\x00\\x0E;\\x80\\x05\\xD8\\xCADESKTOP\\x00\\x80\\x00\\x00\\x01\\xB8\\xC8\\x96\\xB2\\x80\\x00\\x00\\x00\\x9EHL\\x14\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00w\\xD8\\x80\\x00\\x0E_\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x01\\xBC[\\xC7\\x1E\\x80\\x00\\x00\\x00u\\xD7\\xC8\\xC6\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xE2\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x15lSk\\x80\\x00\\x00\\x00qM\\xC2\\x95\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0D\\xF5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x13\\x10\\x14!\\x80\\x00\\x00\\x00p\\x84\\x82\\xBD\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x06\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x15ZF\\xD4\\x80\\x00\\x00\\x00qLA\\xFE\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0E\\x16\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02\\x1B`\\x9Fw\\x80\\x00\\x00\\x00s\\x81d\\x06\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0E%\\x80\\x00\\x00\\x01TABLET\\x00\\x80\\x00\\x00\\x02\\x1B\\x16m\\xF0\\x80\\x00\\x00\\x00s[\\xB0\\xF8\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - Bytes.toBytesBinary("\\x09\\x80\\x00\\x8Aj\\x80\\x00\\x0E5\\x80\\x00\\x00\\x01DESKTOP\\x00\\x80\\x00\\x00\\x02!e\\xD4\\x14\\x80\\x00\\x00\\x00u\\xAC\\xA1\\xF4\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), - }; + @Test + public void testAllIntersect() throws Exception { + testIntersect(REGION_BOUNDARIES_ALL, GUIDE_POSTS_ALL); + } - - @BeforeClass - @Shadower(classBeingShadowed = BaseConnectionlessQueryTest.class) - public static void doSetup() throws Exception { - Map props = Maps.newHashMapWithExpectedSize(1); - // enables manual splitting on salted tables - props.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); - initDriver(new ReadOnlyProps(props.entrySet().iterator())); - } - - @Test - public void testRangeIntersect() throws Exception { - testIntersect(REGION_BOUNDARIES_MINIMAL, GUIDE_POSTS_MINIMAL); + private void testIntersect(byte[][] regionBoundaries, byte[][] guidePosts) throws Exception { + String ddl = "create table PERF.BIG_OLAP_DOC (\n" + + "client_id integer not null\n" + + ",customer_id integer\n" + + ",time_id integer not null\n" + + ",conversion_type_id integer not null\n" + + ",device_type varchar(16)\n" + + ",keyword_id bigint not null\n" + + ",creative_id bigint not null\n" + + ",placement_id bigint not null\n" + + ",product_target_id bigint not null\n" + + ",network varchar(7)\n" + + ",impressions decimal(18, 4)\n" + + ",publisher_clicks decimal(18, 4)\n" + + ",publisher_cost decimal(18, 4)\n" + + ",conversions decimal(18, 4)\n" + + ",revenue decimal(18, 4)\n" + + " constraint perf_fact_pk primary key (client_id, time_id, conversion_type_id, device_type, keyword_id, creative_id, placement_id, product_target_id))SALT_BUCKETS=10"; + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + StringBuilder ddlBuf = new StringBuilder(ddl + " SPLIT ON ("); + for (int i = 0; i < regionBoundaries.length; i++) { + ddlBuf.append("?,"); } - - @Test - public void testAllIntersect() throws Exception { - testIntersect(REGION_BOUNDARIES_ALL, GUIDE_POSTS_ALL); + ddlBuf.setCharAt(ddlBuf.length() - 1, ')'); + ; + PreparedStatement stmt = conn.prepareStatement(ddlBuf.toString()); + int i = 1; + for (byte[] boundary : regionBoundaries) { + stmt.setBytes(i++, boundary); } - - private void testIntersect(byte[][] regionBoundaries, byte[][] guidePosts) throws Exception { - String ddl = "create table PERF.BIG_OLAP_DOC (\n" + - "client_id integer not null\n" + - ",customer_id integer\n" + - ",time_id integer not null\n" + - ",conversion_type_id integer not null\n" + - ",device_type varchar(16)\n" + - ",keyword_id bigint not null\n" + - ",creative_id bigint not null\n" + - ",placement_id bigint not null\n" + - ",product_target_id bigint not null\n" + - ",network varchar(7)\n" + - ",impressions decimal(18, 4)\n" + - ",publisher_clicks decimal(18, 4)\n" + - ",publisher_cost decimal(18, 4)\n" + - ",conversions decimal(18, 4)\n" + - ",revenue decimal(18, 4)\n" + - " constraint perf_fact_pk primary key (client_id, time_id, conversion_type_id, device_type, keyword_id, creative_id, placement_id, product_target_id))SALT_BUCKETS=10"; - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - StringBuilder ddlBuf = new StringBuilder(ddl + " SPLIT ON ("); - for (int i = 0; i < regionBoundaries.length; i++) { - ddlBuf.append("?,"); - } - ddlBuf.setCharAt(ddlBuf.length()-1, ')');; - PreparedStatement stmt = conn.prepareStatement(ddlBuf.toString()); - int i = 1; - for (byte[] boundary : regionBoundaries) { - stmt.setBytes(i++, boundary); - } - stmt.execute(); - - final PTable table = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, "PERF.BIG_OLAP_DOC")); - GuidePostsInfoBuilder gpWriter = new GuidePostsInfoBuilder(); - for (byte[] gp : guidePosts) { - gpWriter.trackGuidePost(new ImmutableBytesWritable(gp), 1000, 0, 0); - } - GuidePostsInfo info = gpWriter.build(); - PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); - pConn.addTable(table, System.currentTimeMillis()); - ((ConnectionlessQueryServicesImpl) pConn.getQueryServices()) - .addTableStats(new GuidePostsKey(table.getName().getBytes(), QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES), info); + stmt.execute(); - String query = "SELECT count(1) cnt,\n" + - " coalesce(SUM(impressions), 0.0) AS \"impressions\",\n" + - " coalesce(SUM(publisher_clicks), 0.0) AS \"pub_clicks\",\n" + - " coalesce(SUM(publisher_cost), 0.0) AS \"pub_cost\",\n" + - " coalesce(SUM(conversions), 0.0) AS \"conversions\",\n" + - " coalesce(SUM(revenue), 0.0) AS \"revenue\" \n" + - " FROM perf.big_olap_doc\n" + - " WHERE time_id between 3000 and 3700\n" + - " AND network in ('SEARCH')\n" + - " AND conversion_type_id = 1\n" + - " AND client_id = 10724\n" + - " AND device_type in ('MOBILE','DESKTOP','OTHER','TABLET')\n" + - " AND keyword_id in (\n" + - "613214369, 613217307, 613247509, 613248897, 613250382, 613250387, 613252322, 613260252, 613261753, 613261754, 613261759, \n" + - "613261770, 613261873, 613261884, 613261885, 613261888, 613261889, 613261892, 613261897, 613261913, 613261919, 613261927, \n" + - "614496021, 843606367, 843606967, 843607021, 843607033, 843607089, 1038731600, 1038731672, 1038731673, 1038731675, \n" + - "1038731684, 1038731693, 1046990487, 1046990488, 1046990499, 1046990505, 1046990506, 1049724722, 1051109548, 1051311275, \n" + - "1051311904, 1060574377, 1060574395, 1060574506, 1060574562, 1115915938, 1115915939, 1115915941, 1116310571, 1367495544, \n" + - "1367495545, 1367497297, 1367497298, 1367497299, 1367497300, 1367497303, 1367497313, 1367497813, 1367497816, 1367497818, \n" + - "1367497821, 1367497822, 1367497823, 1624976423, 1624976451, 1624976457, 3275636061, 3275640505, 3275645765, 3275645807, \n" + - "3275649138, 3275651456, 3275651460, 3275651478, 3275651479, 3275654566, 3275654568, 3275654570, 3275654575, 3275659612, \n" + - "3275659616, 3275659620, 3275668880, 3275669693, 3275675627, 3275675634, 3275677479, 3275677504, 3275678855, 3275679524, \n" + - "3275679532, 3275680014, 3275682307, 3275682308, 3275682309, 3275682310, 3275682420, 3275682423, 3275682436, 3275682448, \n" + - "3275682460, 3275682462, 3275682474, 3275684831, 3275688903, 3275694023, 3275694025, 3275694027, 3275695054, 3275695056,\n" + - "3275695062, 3275699512, 3275699514, 3275699518, 3275701682, 3275701683, 3275701685, 3275701688, 3275703633, 3275703634, \n" + - "3275703635, 3275703636, 3275703638, 3275703639, 3275704860, 3275704861, 3275764577, 3275797149, 3275798566, 3275798567, \n" + - "3275798568, 3275798592, 3275931147, 3275942728, 3275945337, 3275945338, 3275945339, 3275945340, 3275945342, 3275945344, \n" + - "3275946319, 3275946322, 3275946324, 3275946643, 3275949495, 3275949498, 3275949500, 3275950250, 3275955128, 3275955129, \n" + - "3275955130, 3427017435, 3427017450, 3438304254, 3438304257, 3447068169, 3505227849, 3505227890, 3505556908, 3506351285, \n" + - "3506351389, 3506351398, 3506351468, 3510037138, 3510038610, 3545590644, 3545594378, 3545595073, 3545595318, 3545595506, \n" + - "3545597841, 3545598818, 3545599658, 3545599663, 3545601215, 3556080898, 3556080980, 3556080999, 3556081323, 3565122663, \n" + - "3565122679, 3565122801, 3565122858, 3565122908, 3565122929, 3565122952, 3565122984, 3565123028, 3565123047, 3565123048, \n" + - "3565123203, 3565123230, 3949988054, 3949988056, 3949988070, 3972992248, 3972992252, 3972992254, 3972992257, 3972992263, \n" + - "3972992267, 3972992268, 3972992269, 3972992270, 3972992274, 3972992275, 3972992277, 3972992281, 3972992293, 3972992298, \n" + - "3972992299, 3972992305, 3972992307, 3972992313, 3972992316, 3972992322, 3972992338, 3978471261, 3978471272, 4266318185, \n" + - "4298107404, 4308853119, 4308853123, 4308853500, 4451174646, 4451174656, 4451174701, 4569827278, 4569827284, 4569827287, \n" + - "4569827379, 4569827523, 4569827524, 4896589676, 4979049725, 5054587609, 5136433884, 5362640372, 5393109964, 5393405364, \n" + - "5393405365, 5393405620, 5393405625, 5393405675, 5393405677, 5393405858, 5393405970)"; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); - plan.iterator(); + final PTable table = + conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, "PERF.BIG_OLAP_DOC")); + GuidePostsInfoBuilder gpWriter = new GuidePostsInfoBuilder(); + for (byte[] gp : guidePosts) { + gpWriter.trackGuidePost(new ImmutableBytesWritable(gp), 1000, 0, 0); } + GuidePostsInfo info = gpWriter.build(); + PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); + pConn.addTable(table, System.currentTimeMillis()); + ((ConnectionlessQueryServicesImpl) pConn.getQueryServices()).addTableStats( + new GuidePostsKey(table.getName().getBytes(), QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES), + info); + + String query = "SELECT count(1) cnt,\n" + + " coalesce(SUM(impressions), 0.0) AS \"impressions\",\n" + + " coalesce(SUM(publisher_clicks), 0.0) AS \"pub_clicks\",\n" + + " coalesce(SUM(publisher_cost), 0.0) AS \"pub_cost\",\n" + + " coalesce(SUM(conversions), 0.0) AS \"conversions\",\n" + + " coalesce(SUM(revenue), 0.0) AS \"revenue\" \n" + " FROM perf.big_olap_doc\n" + + " WHERE time_id between 3000 and 3700\n" + " AND network in ('SEARCH')\n" + + " AND conversion_type_id = 1\n" + " AND client_id = 10724\n" + + " AND device_type in ('MOBILE','DESKTOP','OTHER','TABLET')\n" + " AND keyword_id in (\n" + + "613214369, 613217307, 613247509, 613248897, 613250382, 613250387, 613252322, 613260252, 613261753, 613261754, 613261759, \n" + + "613261770, 613261873, 613261884, 613261885, 613261888, 613261889, 613261892, 613261897, 613261913, 613261919, 613261927, \n" + + "614496021, 843606367, 843606967, 843607021, 843607033, 843607089, 1038731600, 1038731672, 1038731673, 1038731675, \n" + + "1038731684, 1038731693, 1046990487, 1046990488, 1046990499, 1046990505, 1046990506, 1049724722, 1051109548, 1051311275, \n" + + "1051311904, 1060574377, 1060574395, 1060574506, 1060574562, 1115915938, 1115915939, 1115915941, 1116310571, 1367495544, \n" + + "1367495545, 1367497297, 1367497298, 1367497299, 1367497300, 1367497303, 1367497313, 1367497813, 1367497816, 1367497818, \n" + + "1367497821, 1367497822, 1367497823, 1624976423, 1624976451, 1624976457, 3275636061, 3275640505, 3275645765, 3275645807, \n" + + "3275649138, 3275651456, 3275651460, 3275651478, 3275651479, 3275654566, 3275654568, 3275654570, 3275654575, 3275659612, \n" + + "3275659616, 3275659620, 3275668880, 3275669693, 3275675627, 3275675634, 3275677479, 3275677504, 3275678855, 3275679524, \n" + + "3275679532, 3275680014, 3275682307, 3275682308, 3275682309, 3275682310, 3275682420, 3275682423, 3275682436, 3275682448, \n" + + "3275682460, 3275682462, 3275682474, 3275684831, 3275688903, 3275694023, 3275694025, 3275694027, 3275695054, 3275695056,\n" + + "3275695062, 3275699512, 3275699514, 3275699518, 3275701682, 3275701683, 3275701685, 3275701688, 3275703633, 3275703634, \n" + + "3275703635, 3275703636, 3275703638, 3275703639, 3275704860, 3275704861, 3275764577, 3275797149, 3275798566, 3275798567, \n" + + "3275798568, 3275798592, 3275931147, 3275942728, 3275945337, 3275945338, 3275945339, 3275945340, 3275945342, 3275945344, \n" + + "3275946319, 3275946322, 3275946324, 3275946643, 3275949495, 3275949498, 3275949500, 3275950250, 3275955128, 3275955129, \n" + + "3275955130, 3427017435, 3427017450, 3438304254, 3438304257, 3447068169, 3505227849, 3505227890, 3505556908, 3506351285, \n" + + "3506351389, 3506351398, 3506351468, 3510037138, 3510038610, 3545590644, 3545594378, 3545595073, 3545595318, 3545595506, \n" + + "3545597841, 3545598818, 3545599658, 3545599663, 3545601215, 3556080898, 3556080980, 3556080999, 3556081323, 3565122663, \n" + + "3565122679, 3565122801, 3565122858, 3565122908, 3565122929, 3565122952, 3565122984, 3565123028, 3565123047, 3565123048, \n" + + "3565123203, 3565123230, 3949988054, 3949988056, 3949988070, 3972992248, 3972992252, 3972992254, 3972992257, 3972992263, \n" + + "3972992267, 3972992268, 3972992269, 3972992270, 3972992274, 3972992275, 3972992277, 3972992281, 3972992293, 3972992298, \n" + + "3972992299, 3972992305, 3972992307, 3972992313, 3972992316, 3972992322, 3972992338, 3978471261, 3978471272, 4266318185, \n" + + "4298107404, 4308853119, 4308853123, 4308853500, 4451174646, 4451174656, 4451174701, 4569827278, 4569827284, 4569827287, \n" + + "4569827379, 4569827523, 4569827524, 4896589676, 4979049725, 5054587609, 5136433884, 5362640372, 5393109964, 5393405364, \n" + + "5393405365, 5393405620, 5393405625, 5393405675, 5393405677, 5393405858, 5393405970)"; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query); + plan.iterator(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterIntersectTest.java b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterIntersectTest.java index 1e5314d264b..a2b9ddc0f41 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterIntersectTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterIntersectTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,579 +33,584 @@ import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - /** * Test for intersect method in {@link SkipScanFilter} */ @RunWith(Parameterized.class) public class SkipScanFilterIntersectTest { - private final SkipScanFilter filter; - private final byte[] lowerInclusiveKey; - private final byte[] upperExclusiveKey; - private final List> expectedNewSlots; + private final SkipScanFilter filter; + private final byte[] lowerInclusiveKey; + private final byte[] upperExclusiveKey; + private final List> expectedNewSlots; + + public SkipScanFilterIntersectTest(List> slots, RowKeySchema schema, + byte[] lowerInclusiveKey, byte[] upperExclusiveKey, List> expectedNewSlots) { + this.filter = new SkipScanFilter(slots, schema, false); + this.lowerInclusiveKey = lowerInclusiveKey; + this.upperExclusiveKey = upperExclusiveKey; + this.expectedNewSlots = expectedNewSlots; + } - public SkipScanFilterIntersectTest(List> slots, RowKeySchema schema, byte[] lowerInclusiveKey, - byte[] upperExclusiveKey, List> expectedNewSlots) { - this.filter = new SkipScanFilter(slots, schema, false); - this.lowerInclusiveKey = lowerInclusiveKey; - this.upperExclusiveKey = upperExclusiveKey; - this.expectedNewSlots = expectedNewSlots; + @Test + public void test() { + SkipScanFilter intersectedFilter = filter.intersect(lowerInclusiveKey, upperExclusiveKey); + if (expectedNewSlots == null && intersectedFilter == null) { + return; } + assertNotNull("Intersected filter should not be null", intersectedFilter); + List> newSlots = intersectedFilter.getSlots(); + assertSameSlots(expectedNewSlots, newSlots); + } - @Test - public void test() { - SkipScanFilter intersectedFilter = filter.intersect(lowerInclusiveKey, upperExclusiveKey); - if (expectedNewSlots == null && intersectedFilter == null) { - return; - } - assertNotNull("Intersected filter should not be null", intersectedFilter); - List> newSlots = intersectedFilter.getSlots(); - assertSameSlots(expectedNewSlots, newSlots); + private void assertSameSlots(List> expectedSlots, List> slots) { + assertEquals(expectedSlots.size(), slots.size()); + for (int i = 0; i < expectedSlots.size(); i++) { + List expectedSlot = expectedSlots.get(i); + List slot = slots.get(i); + assertEquals("index: " + i, expectedSlot.size(), slot.size()); + for (int j = 0; j < expectedSlot.size(); j++) { + KeyRange expectedRange = expectedSlot.get(j); + KeyRange range = slot.get(j); + assertArrayEquals(expectedRange.getLowerRange(), range.getLowerRange()); + assertArrayEquals(expectedRange.getUpperRange(), range.getUpperRange()); + assertEquals(expectedRange.isLowerInclusive(), range.isLowerInclusive()); + assertEquals(expectedRange.isUpperInclusive(), range.isUpperInclusive()); + } } + } + + @Parameters(name = "{0} {4}") + public static synchronized Collection data() { + List testCases = Lists.newArrayList(); + // Both ranges in second slot are required b/c first slot contains range and upper/lower + // values differ in this slot position. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("e"), false, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE + .getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("j3A"), Bytes.toBytes("k4C"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } })); + // Only second range in second slot is required b/c though first slot contains range, + // upper/lower values do not differ in this slot position. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("e"), false, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE + .getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("j3A"), Bytes.toBytes("j4C"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } })); + // Test case exercising repositioning multiple times (initially to slot #2 and then again + // to slot #4). Because there's a range for slot #4 and the lower/upper values are + // different, + // all slot #5 ranges are part of the intersection. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("d"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("u"), false, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("z"), true, Bytes.toBytes("z"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("M"), true, Bytes.toBytes("M"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1, 1, 1 }, Bytes.toBytes("bkCpM"), Bytes.toBytes("bkCtD"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("u"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("M"), true, Bytes.toBytes("M"), true, + SortOrder.ASC), } })); + // Single matching in the first 2 slots. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("b1B"), Bytes.toBytes("b1C"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } })); + // Single matching in the first slot. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("b1Z"), Bytes.toBytes("b3Z"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } })); + // No overlap + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("G"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a1I"), Bytes.toBytes("a2A"), null)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1B"), null)); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1C"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } })); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1D"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), } })); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1D"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), } })); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("b1B"), Bytes.toBytes("b1D"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), } })); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1F"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true, + SortOrder.ASC), } })); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a0Z"), Bytes.toBytes("b3Z"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } })); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a0Z"), Bytes.toBytes("b9Z"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } })); + // Multiple matching in all slot. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a0Z"), Bytes.toBytes("c3Z"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } })); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE + .getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("f4F"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } })); + // VARCHAR as the last column, various cases. + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, -1 }, Bytes.toBytes("d3AA"), Bytes.toBytes("d4FF"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC) } })); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, -1 }, Bytes.toBytes("d0AA"), Bytes.toBytes("d4FF"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } })); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, -1 }, Bytes.toBytes("a0AA"), Bytes.toBytes("f4FF"), + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), true, + SortOrder.ASC), } })); + return testCases; + } - private void assertSameSlots(List> expectedSlots, List> slots) { - assertEquals(expectedSlots.size(), slots.size()); - for (int i=0; i expectedSlot = expectedSlots.get(i); - List slot = slots.get(i); - assertEquals("index: " + i, expectedSlot.size(), slot.size()); - for (int j=0; j foreach(KeyRange[][] ranges, int[] widths, byte[] lowerInclusive, + byte[] upperExclusive, KeyRange[][] expectedRanges) { + List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); + List> expectedSlots = expectedRanges == null + ? null + : Lists.transform(Lists.newArrayList(expectedRanges), ARRAY_TO_LIST); + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10); + for (final int width : widths) { + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return width <= 0; } - } - @Parameters(name = "{0} {4}") - public static synchronized Collection data() { - List testCases = Lists.newArrayList(); - // Both ranges in second slot are required b/c first slot contains range and upper/lower - // values differ in this slot position. - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("e"), - false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), - false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("j3A"), Bytes.toBytes("k4C"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), - false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), } })); - // Only second range in second slot is required b/c though first slot contains range, - // upper/lower values do not differ in this slot position. - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("e"), - false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), - false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("j3A"), Bytes.toBytes("j4C"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), - false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), } })); - // Test case exercising repositioning multiple times (initially to slot #2 and then again - // to slot #4). Because there's a range for slot #4 and the lower/upper values are - // different, - // all slot #5 ranges are part of the intersection. - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("d"), true, Bytes.toBytes("d"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), - false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("u"), - false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("z"), true, Bytes.toBytes("z"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("M"), true, Bytes.toBytes("M"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1, 1, 1 }, Bytes.toBytes("bkCpM"), Bytes.toBytes("bkCtD"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("j"), true, Bytes.toBytes("m"), - false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("C"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("m"), true, Bytes.toBytes("u"), - false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("D"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("M"), true, Bytes.toBytes("M"), - true, SortOrder.ASC), } })); - // Single matching in the first 2 slots. - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("b1B"), Bytes.toBytes("b1C"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), } })); - // Single matching in the first slot. - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("b1Z"), Bytes.toBytes("b3Z"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } })); - // No overlap - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("G"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a1I"), Bytes.toBytes("a2A"), null)); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1B"), null)); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1C"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), } })); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1D"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } })); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1D"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } })); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("b1B"), Bytes.toBytes("b1D"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } })); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("d"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("3"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("b1F"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), - true, SortOrder.ASC), } })); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a0Z"), Bytes.toBytes("b3Z"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } })); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a0Z"), Bytes.toBytes("b9Z"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } })); - // Multiple matching in all slot. - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a0Z"), Bytes.toBytes("c3Z"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } })); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, 1 }, Bytes.toBytes("a0A"), Bytes.toBytes("f4F"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } })); - // VARCHAR as the last column, various cases. - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, -1 }, Bytes.toBytes("d3AA"), Bytes.toBytes("d4FF"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC) } })); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, -1 }, Bytes.toBytes("d0AA"), Bytes.toBytes("d4FF"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), true, - SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } })); - testCases.addAll(foreach( - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } }, - new int[] { 1, 1, -1 }, Bytes.toBytes("a0AA"), Bytes.toBytes("f4FF"), - new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("c"), true, Bytes.toBytes("e"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("4"), - true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("B"), true, - SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("E"), - true, SortOrder.ASC), } })); - return testCases; - } + @Override + public PDataType getDataType() { + return width <= 0 ? PVarchar.INSTANCE : PChar.INSTANCE; + } - private static Collection foreach(KeyRange[][] ranges, int[] widths, byte[] lowerInclusive, - byte[] upperExclusive, KeyRange[][] expectedRanges) { - List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); - List> expectedSlots = expectedRanges == null ? null : Lists.transform(Lists.newArrayList(expectedRanges), ARRAY_TO_LIST); - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10); - for (final int width: widths) { - builder.addField( - new PDatum() { - @Override - public boolean isNullable() { - return width <= 0; - } - @Override - public PDataType getDataType() { - return width <= 0 ? PVarchar.INSTANCE : PChar.INSTANCE; - } - @Override - public Integer getMaxLength() { - return width <= 0 ? null : width; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, width <= 0, SortOrder.getDefault()); + @Override + public Integer getMaxLength() { + return width <= 0 ? null : width; } - List ret = Lists.newArrayList(); - ret.add(new Object[] {slots, builder.build(), lowerInclusive, upperExclusive, expectedSlots}); - return ret; - } - private static final Function> ARRAY_TO_LIST = new Function>() { - @Override public List apply(KeyRange[] input) { - return Lists.newArrayList(input); + @Override + public Integer getScale() { + return null; } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, width <= 0, SortOrder.getDefault()); + } + List ret = Lists.newArrayList(); + ret.add(new Object[] { slots, builder.build(), lowerInclusive, upperExclusive, expectedSlots }); + return ret; + } + + private static final Function> ARRAY_TO_LIST = + new Function>() { + @Override + public List apply(KeyRange[] input) { + return Lists.newArrayList(input); + } }; } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java index 1e02c9de9d8..0dccfb47081 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.filter; import java.io.IOException; @@ -36,15 +35,14 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - import junit.framework.TestCase; //reset() @@ -55,652 +53,484 @@ //filterRow() -> last chance to drop entire row based on the sequence of filterValue() calls. Eg: filter a row if it doesn't contain a specified column. @RunWith(Parameterized.class) public class SkipScanFilterTest extends TestCase { - private final SkipScanFilter skipper; - private final List> cnf; - private final List expectations; - - public SkipScanFilterTest(List> cnf, int[] widths, int[] slotSpans,List expectations) { - this.expectations = expectations; - this.cnf = cnf; - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(widths.length); - for (final int width : widths) { - builder.addField( - new PDatum() { - - @Override - public boolean isNullable() { - return width <= 0; - } - - @Override - public PDataType getDataType() { - return width <= 0 ? PVarchar.INSTANCE : PChar.INSTANCE; - } - - @Override - public Integer getMaxLength() { - return width <= 0 ? null : width; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - }, width <= 0, SortOrder.getDefault()); + private final SkipScanFilter skipper; + private final List> cnf; + private final List expectations; + + public SkipScanFilterTest(List> cnf, int[] widths, int[] slotSpans, + List expectations) { + this.expectations = expectations; + this.cnf = cnf; + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(widths.length); + for (final int width : widths) { + builder.addField(new PDatum() { + + @Override + public boolean isNullable() { + return width <= 0; } - if(slotSpans==null) { - skipper = new SkipScanFilter(cnf, builder.build(), false); - } else { - skipper = new SkipScanFilter(cnf, slotSpans,builder.build(), false); + + @Override + public PDataType getDataType() { + return width <= 0 ? PVarchar.INSTANCE : PChar.INSTANCE; } - } - @Test - public void test() throws IOException { - for (Expectation expectation : expectations) { - expectation.examine(skipper); + @Override + public Integer getMaxLength() { + return width <= 0 ? null : width; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); } - } - @Parameters(name="{0} {1} {3}") - public static synchronized Collection data() { - List testCases = Lists.newArrayList(); - // Variable length tests - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("e"), true, Bytes.toBytes("e"), true, SortOrder.ASC), - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("f"), true, Bytes.toBytes("f"), true, SortOrder.ASC) - }, - { - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), - }, - { - KeyRange.EVERYTHING_RANGE, - }, - { - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), - }}, - new int[4], - null, - new Include(ByteUtil.concat(Bytes.toBytes("a"),QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY, - QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("1") ) ), - new SeekNext(ByteUtil.concat(Bytes.toBytes("e.f"),QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY, - QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("1") ), - ByteUtil.concat(Bytes.toBytes("f"),QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("b"),QueryConstants.SEPARATOR_BYTE_ARRAY, - QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("1") )), - new Include(ByteUtil.concat(Bytes.toBytes("f"),QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY, - QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("1") ) ) ) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("2018-02-10"), true, Bytes.toBytes("2019-02-19"), true, SortOrder.ASC), - }, - { - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("channel"), true, Bytes.toBytes("channel"), true, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true, SortOrder.ASC), - }, - { - KeyRange.EVERYTHING_RANGE, - }, - { - KeyRange.EVERYTHING_RANGE, - }, - { - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("A004"), true, Bytes.toBytes("A004"), true, SortOrder.ASC), - }, - }, - new int[] {0, 0, 1, 0, 0, 0, 0, 0}, - null, - new SeekNext( - ByteUtil.concat(Bytes.toBytes("2018-02-14"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("channel"), - QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("2")), - ByteUtil.concat(Bytes.toBytes("2018-02-14"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("channel"), - QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("2"), QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("A004"))), - new Include(ByteUtil.concat(Bytes.toBytes("2018-02-15"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("channel"), - QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("2"), QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("A004"))) - ) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("20160116121006"), true, Bytes.toBytes("20160116181006"), true, SortOrder.ASC), - }, - { - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("2404787"), true, Bytes.toBytes("2404787"), true, SortOrder.ASC), - }/*, - { - KeyRange.EVERYTHING_RANGE, - }, - { - KeyRange.EVERYTHING_RANGE, - }*/}, - new int[4], - null, - new SeekNext(ByteUtil.concat(Bytes.toBytes("20160116141006"),QueryConstants.SEPARATOR_BYTE_ARRAY, - QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("servlet") ), - ByteUtil.concat(Bytes.toBytes("20160116141006"),QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("2404787") )), - new Include(ByteUtil.concat(Bytes.toBytes("20160116151006"),QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("2404787"), QueryConstants.SEPARATOR_BYTE_ARRAY, - Bytes.toBytes("jdbc"), QueryConstants.SEPARATOR_BYTE_ARRAY ) ) ) - ); - // Fixed length tests - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false, SortOrder.ASC), - }}, - new int[]{3,2,2,2,2}, - null, - new SeekNext("defAAABABAB", "dzzAAAAAAAA"), - new Finished("xyyABABABAB")) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("j"), false, Bytes.toBytes("k"), true, SortOrder.ASC), - }}, - new int[]{0}, - null, - new SeekNext(Bytes.toBytes("a"), ByteUtil.nextKey(new byte[] {'j',QueryConstants.SEPARATOR_BYTE})), - new Include("ja"), - new Include("jz"), - new Include("k"), - new Finished("ka"))); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("aac"), true, Bytes.toBytes("aad"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - }}, - new int[]{3}, - null, - new SeekNext("aab", "aac"), - new SeekNext("abb", "abc"), - new Include("abc"), - new Include("abe"), - new Include("def"), - new Finished("deg"))); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), false, Bytes.toBytes("def"), true, SortOrder.ASC) - }}, - new int[]{3}, - null, - new SeekNext("aba", "abd"), - new Include("abe"), - new Include("def"), - new Finished("deg"))); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), false, Bytes.toBytes("def"), false, SortOrder.ASC) - }}, - new int[]{3}, - null, - new SeekNext("aba", "abd"), - new Finished("def")) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false, SortOrder.ASC), - }}, - new int[]{3}, - null, - new Include("def"), - new SeekNext("deg", "dzz"), - new Include("eee"), - new Finished("xyz")) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("abc"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, SortOrder.ASC), - }}, - new int[]{3,2}, - null, - new Include("abcAB"), - new SeekNext("abcAY","abcEB"), - new Include("abcEF"), - new SeekNext("abcPP","defAB"), - new SeekNext("defEZ","defPO"), - new Include("defPO"), - new Finished("defPP") - ) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("abc"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - }}, - new int[]{2,3}, - null, - new Include("ABabc"), - new SeekNext("ABdeg","ACabc"), - new Include("AMabc"), - new SeekNext("AYabc","EBabc"), - new Include("EFabc"), - new SeekNext("EZdef","POabc"), - new SeekNext("POabd","POdef"), - new Include("POdef"), - new Finished("PPabc")) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - }}, - new int[]{2,3}, - null, - new Include("POdef"), - new Finished("POdeg")) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PO"), true, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - }}, - new int[]{2,3}, - null, - new Include("POdef")) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("AAA"), true, Bytes.toBytes("AAA"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, SortOrder.ASC), - }}, - new int[]{3,2}, - null, - new SeekNext("aaaAA", "abcAB"), - new SeekNext("abcZZ", "abdAB"), - new SeekNext("abdZZ", "abeAB"), - new SeekNext(new byte[]{'d','e','a',(byte)0xFF,(byte)0xFF}, new byte[]{'d','e','b','A','B'}), - new Include("defAB"), - new Include("defAC"), - new Include("defAW"), - new Include("defAX"), - new Include("defEB"), - new Include("defPO"), - new SeekNext("degAB", "dzzAB"), - new Include("dzzAX"), - new Include("dzzEY"), - new SeekNext("dzzEZ", "dzzPO"), - new Include("eeeAB"), - new Include("eeeAC"), - new SeekNext("eeeEA", "eeeEB"), - new Include("eeeEF"), - new SeekNext("eeeEZ","eeePO"), - new Finished("xyzAA")) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzz"), true, Bytes.toBytes("xyz"), false, SortOrder.ASC), - }}, - new int[]{3}, - null, - new SeekNext("abb", "abc"), - new Include("abc"), - new Include("abe"), - new Finished("xyz")) - ); - testCases.addAll( - foreach(new KeyRange[][]{{ - PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, SortOrder.ASC), - }, - { - PChar.INSTANCE.getKeyRange(Bytes.toBytes("100"), true, Bytes.toBytes("250"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(Bytes.toBytes("700"), false, Bytes.toBytes("901"), false, SortOrder.ASC), - }}, - new int[]{3,2,3}, - null, - new SeekNext("abcEB700", "abcEB701"), - new Include("abcEB701"), - new SeekNext("dzzAB250", "dzzAB701"), - new Finished("zzzAA000")) - ); - //for PHOENIX-3705 - testCases.addAll( - foreach( - new KeyRange[][]{{ - PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(1), true, PInteger.INSTANCE.toBytes(4), true, SortOrder.ASC) - }, - { - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(5)), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7)) - }, - { - PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(9), true, PInteger.INSTANCE.toBytes(10), true, SortOrder.ASC), - }}, - new int[]{4,4,4}, - null, - new SeekNext( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(11)), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(5), - PInteger.INSTANCE.toBytes(9))), - new Finished(ByteUtil.concat( - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(11)))) - ); - testCases.addAll( - foreach( - new KeyRange[][]{{ - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(1)), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(3)), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4)) - }, - { - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(5)), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7)) - }, - { - PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(9), true, PInteger.INSTANCE.toBytes(10), true, SortOrder.ASC), - }}, - new int[]{4,4,4}, - null, - new SeekNext( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(11)), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(5), - PInteger.INSTANCE.toBytes(9))), - new Finished(ByteUtil.concat( - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(11)))) - ); - //for RVC - testCases.addAll( - foreach( - new KeyRange[][]{ - { - KeyRange.getKeyRange( - ByteUtil.concat(PInteger.INSTANCE.toBytes(1),PInteger.INSTANCE.toBytes(2)), - true, - ByteUtil.concat(PInteger.INSTANCE.toBytes(3),PInteger.INSTANCE.toBytes(4)), - true) - }, - { - KeyRange.getKeyRange( - ByteUtil.concat(PInteger.INSTANCE.toBytes(5),PInteger.INSTANCE.toBytes(6)), - true, - ByteUtil.concat(PInteger.INSTANCE.toBytes(7),PInteger.INSTANCE.toBytes(8)), - true) - }}, - new int[]{4,4,4,4}, - new int[]{1,1}, - new Include( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(6), - PInteger.INSTANCE.toBytes(7))), - new SeekNext( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(9)), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(5), - PInteger.INSTANCE.toBytes(6))), - new Finished( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(9)))) - ); - testCases.addAll( - foreach( - new KeyRange[][]{ - { - KeyRange.getKeyRange( - ByteUtil.concat(PInteger.INSTANCE.toBytes(1),PInteger.INSTANCE.toBytes(2)), - true, - ByteUtil.concat(PInteger.INSTANCE.toBytes(3),PInteger.INSTANCE.toBytes(4)), - true) - }, - { - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(5)), - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7)) - }, - { - PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(9), true, PInteger.INSTANCE.toBytes(10), true, SortOrder.ASC), - }}, - new int[]{4,4,4,4}, - new int[]{1,0,0}, - new Include( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(1), - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(5), - PInteger.INSTANCE.toBytes(9))), - new SeekNext( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(11)), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(5), - PInteger.INSTANCE.toBytes(9))), - new Finished( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(11)))) - ); - testCases.addAll( - foreach( - new KeyRange[][]{ - { - KeyRange.getKeyRange( - ByteUtil.concat(PInteger.INSTANCE.toBytes(1),PInteger.INSTANCE.toBytes(2)), - true, - ByteUtil.concat(PInteger.INSTANCE.toBytes(3),PInteger.INSTANCE.toBytes(4)), - true) - }, - { - KeyRange.getKeyRange(ByteUtil.concat(PInteger.INSTANCE.toBytes(5),PInteger.INSTANCE.toBytes(6))), - KeyRange.getKeyRange(ByteUtil.concat(PInteger.INSTANCE.toBytes(7),PInteger.INSTANCE.toBytes(8))) - }, - { - PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(9), true, PInteger.INSTANCE.toBytes(10), true, SortOrder.ASC), - }}, - new int[]{4,4,4,4,4}, - new int[]{1,1,0}, - new Include( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(1), - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(5), - PInteger.INSTANCE.toBytes(6), - PInteger.INSTANCE.toBytes(9))), - new SeekNext( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(8), - PInteger.INSTANCE.toBytes(11)), - ByteUtil.concat( - PInteger.INSTANCE.toBytes(2), - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(5), - PInteger.INSTANCE.toBytes(6), - PInteger.INSTANCE.toBytes(9))), - new Finished( - ByteUtil.concat( - PInteger.INSTANCE.toBytes(3), - PInteger.INSTANCE.toBytes(4), - PInteger.INSTANCE.toBytes(7), - PInteger.INSTANCE.toBytes(8), - PInteger.INSTANCE.toBytes(11)))) - ); - return testCases; + }, width <= 0, SortOrder.getDefault()); } + if (slotSpans == null) { + skipper = new SkipScanFilter(cnf, builder.build(), false); + } else { + skipper = new SkipScanFilter(cnf, slotSpans, builder.build(), false); + } + } - private static Collection foreach(KeyRange[][] ranges, int[] widths, int[] slotSpans, Expectation... expectations) { - List> cnf = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); - List ret = Lists.newArrayList(); - ret.add(new Object[] {cnf, widths, slotSpans, Arrays.asList(expectations)} ); - return ret; + @Test + public void test() throws IOException { + for (Expectation expectation : expectations) { + expectation.examine(skipper); } + } - private static final Function> ARRAY_TO_LIST = new Function>() { - @Override public List apply(KeyRange[] input) { - return Lists.newArrayList(input); - } + @Parameters(name = "{0} {1} {3}") + public static synchronized Collection data() { + List testCases = Lists.newArrayList(); + // Variable length tests + testCases.addAll(foreach( + new KeyRange[][] { + { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), + PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("e"), true, Bytes.toBytes("e"), true, + SortOrder.ASC), + PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("f"), true, Bytes.toBytes("f"), true, + SortOrder.ASC) }, + { PVarchar.INSTANCE + .getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true, SortOrder.ASC), }, + { KeyRange.EVERYTHING_RANGE, }, + { PVarchar.INSTANCE + .getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), } }, + new int[4], null, + new Include(ByteUtil.concat(Bytes.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY, + QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("1"))), + new SeekNext( + ByteUtil.concat(Bytes.toBytes("e.f"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY, + QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("1")), + ByteUtil.concat(Bytes.toBytes("f"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("b"), + QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("1"))), + new Include(ByteUtil.concat(Bytes.toBytes("f"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY, + QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("1"))))); + testCases.addAll(foreach( + new KeyRange[][] { + { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("2018-02-10"), true, + Bytes.toBytes("2019-02-19"), true, SortOrder.ASC), }, + { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("channel"), true, Bytes.toBytes("channel"), + true, SortOrder.ASC), }, + { PChar.INSTANCE + .getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true, SortOrder.ASC), }, + { KeyRange.EVERYTHING_RANGE, }, { KeyRange.EVERYTHING_RANGE, }, + { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("A004"), true, Bytes.toBytes("A004"), true, + SortOrder.ASC), }, }, + new int[] { 0, 0, 1, 0, 0, 0, 0, 0 }, null, + new SeekNext( + ByteUtil.concat(Bytes.toBytes("2018-02-14"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("channel"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("2")), + ByteUtil.concat(Bytes.toBytes("2018-02-14"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("channel"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("2"), + QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("A004"))), + new Include(ByteUtil.concat(Bytes.toBytes("2018-02-15"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("channel"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("2"), + QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("A004"))))); + testCases.addAll(foreach( + new KeyRange[][] { + { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("20160116121006"), true, + Bytes.toBytes("20160116181006"), true, SortOrder.ASC), }, + { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("2404787"), true, Bytes.toBytes("2404787"), + true, SortOrder.ASC), }/* + * , { KeyRange.EVERYTHING_RANGE, }, { KeyRange.EVERYTHING_RANGE, } + */ }, + new int[4], null, + new SeekNext( + ByteUtil.concat(Bytes.toBytes("20160116141006"), QueryConstants.SEPARATOR_BYTE_ARRAY, + QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("servlet")), + ByteUtil.concat(Bytes.toBytes("20160116141006"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("2404787"))), + new Include( + ByteUtil.concat(Bytes.toBytes("20160116151006"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes("2404787"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("jdbc"), + QueryConstants.SEPARATOR_BYTE_ARRAY)))); + // Fixed length tests + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false, + SortOrder.ASC), } }, + new int[] { 3, 2, 2, 2, 2 }, null, new SeekNext("defAAABABAB", "dzzAAAAAAAA"), + new Finished("xyyABABABAB"))); + testCases.addAll(foreach( + new KeyRange[][] { { PVarchar.INSTANCE + .getKeyRange(Bytes.toBytes("j"), false, Bytes.toBytes("k"), true, SortOrder.ASC), } }, + new int[] { 0 }, null, + new SeekNext(Bytes.toBytes("a"), + ByteUtil.nextKey(new byte[] { 'j', QueryConstants.SEPARATOR_BYTE })), + new Include("ja"), new Include("jz"), new Include("k"), new Finished("ka"))); + testCases.addAll(foreach( + new KeyRange[][] { { + PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("aac"), true, Bytes.toBytes("aad"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), } }, + new int[] { 3 }, null, new SeekNext("aab", "aac"), new SeekNext("abb", "abc"), + new Include("abc"), new Include("abe"), new Include("def"), new Finished("deg"))); + testCases.addAll(foreach( + new KeyRange[][] { { + PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), false, Bytes.toBytes("def"), true, + SortOrder.ASC) } }, + new int[] { 3 }, null, new SeekNext("aba", "abd"), new Include("abe"), new Include("def"), + new Finished("deg"))); + testCases.addAll(foreach( + new KeyRange[][] { { + PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), false, Bytes.toBytes("def"), false, + SortOrder.ASC) } }, + new int[] { 3 }, null, new SeekNext("aba", "abd"), new Finished("def"))); + testCases.addAll(foreach( + new KeyRange[][] { { + PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false, + SortOrder.ASC), } }, + new int[] { 3 }, null, new Include("def"), new SeekNext("deg", "dzz"), new Include("eee"), + new Finished("xyz"))); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("abc"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, + SortOrder.ASC), } }, + new int[] { 3, 2 }, null, new Include("abcAB"), new SeekNext("abcAY", "abcEB"), + new Include("abcEF"), new SeekNext("abcPP", "defAB"), new SeekNext("defEZ", "defPO"), + new Include("defPO"), new Finished("defPP"))); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("abc"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), } }, + new int[] { 2, 3 }, null, new Include("ABabc"), new SeekNext("ABdeg", "ACabc"), + new Include("AMabc"), new SeekNext("AYabc", "EBabc"), new Include("EFabc"), + new SeekNext("EZdef", "POabc"), new SeekNext("POabd", "POdef"), new Include("POdef"), + new Finished("PPabc"))); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), } }, + new int[] { 2, 3 }, null, new Include("POdef"), new Finished("POdeg"))); + testCases.addAll(foreach(new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PO"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("def"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), } }, + new int[] { 2, 3 }, null, new Include("POdef"))); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("AAA"), true, Bytes.toBytes("AAA"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, + SortOrder.ASC), } }, + new int[] { 3, 2 }, null, new SeekNext("aaaAA", "abcAB"), new SeekNext("abcZZ", "abdAB"), + new SeekNext("abdZZ", "abeAB"), + new SeekNext(new byte[] { 'd', 'e', 'a', (byte) 0xFF, (byte) 0xFF }, + new byte[] { 'd', 'e', 'b', 'A', 'B' }), + new Include("defAB"), new Include("defAC"), new Include("defAW"), new Include("defAX"), + new Include("defEB"), new Include("defPO"), new SeekNext("degAB", "dzzAB"), + new Include("dzzAX"), new Include("dzzEY"), new SeekNext("dzzEZ", "dzzPO"), + new Include("eeeAB"), new Include("eeeAC"), new SeekNext("eeeEA", "eeeEB"), + new Include("eeeEF"), new SeekNext("eeeEZ", "eeePO"), new Finished("xyzAA"))); + testCases.addAll(foreach( + new KeyRange[][] { { + PChar.INSTANCE.getKeyRange(Bytes.toBytes("aaa"), true, Bytes.toBytes("aaa"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzz"), true, Bytes.toBytes("xyz"), false, + SortOrder.ASC), } }, + new int[] { 3 }, null, new SeekNext("abb", "abc"), new Include("abc"), new Include("abe"), + new Finished("xyz"))); + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("dzy"), false, Bytes.toBytes("xyz"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("100"), true, Bytes.toBytes("250"), false, + SortOrder.ASC), + PChar.INSTANCE.getKeyRange(Bytes.toBytes("700"), false, Bytes.toBytes("901"), false, + SortOrder.ASC), } }, + new int[] { 3, 2, 3 }, null, new SeekNext("abcEB700", "abcEB701"), new Include("abcEB701"), + new SeekNext("dzzAB250", "dzzAB701"), new Finished("zzzAA000"))); + // for PHOENIX-3705 + testCases.addAll(foreach( + new KeyRange[][] { + { PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(1), true, + PInteger.INSTANCE.toBytes(4), true, SortOrder.ASC) }, + { KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(5)), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7)) }, + { PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(9), true, + PInteger.INSTANCE.toBytes(10), true, SortOrder.ASC), } }, + new int[] { 4, 4, 4 }, null, + new SeekNext( + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(7), + PInteger.INSTANCE.toBytes(11)), + ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(5), + PInteger.INSTANCE.toBytes(9))), + new Finished(ByteUtil.concat(PInteger.INSTANCE.toBytes(4), PInteger.INSTANCE.toBytes(7), + PInteger.INSTANCE.toBytes(11))))); + testCases.addAll(foreach( + new KeyRange[][] { + { KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(1)), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(3)), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(4)) }, + { KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(5)), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7)) }, + { PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(9), true, + PInteger.INSTANCE.toBytes(10), true, SortOrder.ASC), } }, + new int[] { 4, 4, 4 }, null, + new SeekNext( + ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(7), + PInteger.INSTANCE.toBytes(11)), + ByteUtil.concat(PInteger.INSTANCE.toBytes(4), PInteger.INSTANCE.toBytes(5), + PInteger.INSTANCE.toBytes(9))), + new Finished(ByteUtil.concat(PInteger.INSTANCE.toBytes(4), PInteger.INSTANCE.toBytes(7), + PInteger.INSTANCE.toBytes(11))))); + // for RVC + testCases.addAll(foreach( + new KeyRange[][] { + { KeyRange.getKeyRange( + ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(2)), true, + ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(4)), true) }, + { KeyRange.getKeyRange( + ByteUtil.concat(PInteger.INSTANCE.toBytes(5), PInteger.INSTANCE.toBytes(6)), true, + ByteUtil.concat(PInteger.INSTANCE.toBytes(7), PInteger.INSTANCE.toBytes(8)), true) } }, + new int[] { 4, 4, 4, 4 }, new int[] { 1, 1 }, + new Include(ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(3), + PInteger.INSTANCE.toBytes(6), PInteger.INSTANCE.toBytes(7))), + new SeekNext( + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(3), + PInteger.INSTANCE.toBytes(7), PInteger.INSTANCE.toBytes(9)), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4), + PInteger.INSTANCE.toBytes(5), PInteger.INSTANCE.toBytes(6))), + new Finished(ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(4), + PInteger.INSTANCE.toBytes(7), PInteger.INSTANCE.toBytes(9))))); + testCases.addAll(foreach( + new KeyRange[][] { + { KeyRange.getKeyRange( + ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(2)), true, + ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(4)), true) }, + { KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(5)), + KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(7)) }, + { PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(9), true, + PInteger.INSTANCE.toBytes(10), true, SortOrder.ASC), } }, + new int[] { 4, 4, 4, 4 }, new int[] { 1, 0, 0 }, + new Include(ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(3), + PInteger.INSTANCE.toBytes(5), PInteger.INSTANCE.toBytes(9))), + new SeekNext( + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(3), + PInteger.INSTANCE.toBytes(7), PInteger.INSTANCE.toBytes(11)), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4), + PInteger.INSTANCE.toBytes(5), PInteger.INSTANCE.toBytes(9))), + new Finished(ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(4), + PInteger.INSTANCE.toBytes(7), PInteger.INSTANCE.toBytes(11))))); + testCases.addAll(foreach( + new KeyRange[][] { + { KeyRange.getKeyRange( + ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(2)), true, + ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(4)), true) }, + { KeyRange + .getKeyRange(ByteUtil.concat(PInteger.INSTANCE.toBytes(5), PInteger.INSTANCE.toBytes(6))), + KeyRange.getKeyRange( + ByteUtil.concat(PInteger.INSTANCE.toBytes(7), PInteger.INSTANCE.toBytes(8))) }, + { PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(9), true, + PInteger.INSTANCE.toBytes(10), true, SortOrder.ASC), } }, + new int[] { 4, 4, 4, 4, 4 }, new int[] { 1, 1, 0 }, + new Include(ByteUtil.concat(PInteger.INSTANCE.toBytes(1), PInteger.INSTANCE.toBytes(3), + PInteger.INSTANCE.toBytes(5), PInteger.INSTANCE.toBytes(6), PInteger.INSTANCE.toBytes(9))), + new SeekNext(ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(3), + PInteger.INSTANCE.toBytes(7), PInteger.INSTANCE.toBytes(8), PInteger.INSTANCE.toBytes(11)), + ByteUtil.concat(PInteger.INSTANCE.toBytes(2), PInteger.INSTANCE.toBytes(4), + PInteger.INSTANCE.toBytes(5), PInteger.INSTANCE.toBytes(6), + PInteger.INSTANCE.toBytes(9))), + new Finished(ByteUtil.concat(PInteger.INSTANCE.toBytes(3), PInteger.INSTANCE.toBytes(4), + PInteger.INSTANCE.toBytes(7), PInteger.INSTANCE.toBytes(8), + PInteger.INSTANCE.toBytes(11))))); + return testCases; + } + + private static Collection foreach(KeyRange[][] ranges, int[] widths, int[] slotSpans, + Expectation... expectations) { + List> cnf = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); + List ret = Lists.newArrayList(); + ret.add(new Object[] { cnf, widths, slotSpans, Arrays.asList(expectations) }); + return ret; + } + + private static final Function> ARRAY_TO_LIST = + new Function>() { + @Override + public List apply(KeyRange[] input) { + return Lists.newArrayList(input); + } }; - static interface Expectation { - void examine(SkipScanFilter skipper) throws IOException; + static interface Expectation { + void examine(SkipScanFilter skipper) throws IOException; + } + + private static final class SeekNext implements Expectation { + private final byte[] rowkey, hint; + + public SeekNext(String rowkey, String hint) { + this.rowkey = Bytes.toBytes(rowkey); + this.hint = Bytes.toBytes(hint); } - private static final class SeekNext implements Expectation { - private final byte[] rowkey, hint; - public SeekNext(String rowkey, String hint) { - this.rowkey = Bytes.toBytes(rowkey); - this.hint = Bytes.toBytes(hint); - } - public SeekNext(byte[] rowkey, byte[] hint) { - this.rowkey = rowkey; - this.hint = hint; - } - @SuppressWarnings("deprecation") - @Override public void examine(SkipScanFilter skipper) throws IOException { - KeyValue kv = KeyValueUtil.createFirstOnRow(rowkey); - skipper.reset(); - assertFalse(skipper.filterAllRemaining()); - assertFalse(skipper.filterRowKey(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength())); + public SeekNext(byte[] rowkey, byte[] hint) { + this.rowkey = rowkey; + this.hint = hint; + } - assertEquals(ReturnCode.SEEK_NEXT_USING_HINT, skipper.filterCell(kv)); - assertEquals(KeyValueUtil.createFirstOnRow(hint), skipper.getNextCellHint(kv)); - } + @SuppressWarnings("deprecation") + @Override + public void examine(SkipScanFilter skipper) throws IOException { + KeyValue kv = KeyValueUtil.createFirstOnRow(rowkey); + skipper.reset(); + assertFalse(skipper.filterAllRemaining()); + assertFalse(skipper.filterRowKey(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength())); - @Override public String toString() { - return "rowkey=" + Bytes.toStringBinary(rowkey)+", expected seek next using hint: " + Bytes.toStringBinary(hint); - } + assertEquals(ReturnCode.SEEK_NEXT_USING_HINT, skipper.filterCell(kv)); + assertEquals(KeyValueUtil.createFirstOnRow(hint), skipper.getNextCellHint(kv)); } - private static final class Include implements Expectation { - private final byte[] rowkey; - - public Include(String rowkey) { - this.rowkey = Bytes.toBytes(rowkey); - } - - public Include(byte[] rowkey) { - this.rowkey = rowkey; - } - - @SuppressWarnings("deprecation") - @Override public void examine(SkipScanFilter skipper) throws IOException { - KeyValue kv = KeyValueUtil.createFirstOnRow(rowkey); - skipper.reset(); - assertFalse(skipper.filterAllRemaining()); - assertFalse(skipper.filterRowKey(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength())); - assertEquals(kv.toString(), ReturnCode.INCLUDE_AND_NEXT_COL, skipper.filterCell(kv)); - } - @Override public String toString() { - return "rowkey=" + Bytes.toStringBinary(rowkey)+", expected include"; - } + @Override + public String toString() { + return "rowkey=" + Bytes.toStringBinary(rowkey) + ", expected seek next using hint: " + + Bytes.toStringBinary(hint); } + } - private static final class Finished implements Expectation { - private final byte[] rowkey; - public Finished(String rowkey) { - this.rowkey = Bytes.toBytes(rowkey); - } + private static final class Include implements Expectation { + private final byte[] rowkey; - public Finished(byte[] rowkey) { - this.rowkey = rowkey; - } + public Include(String rowkey) { + this.rowkey = Bytes.toBytes(rowkey); + } - @Override public void examine(SkipScanFilter skipper) throws IOException { - KeyValue kv = KeyValueUtil.createFirstOnRow(rowkey); - skipper.reset(); - assertEquals(ReturnCode.NEXT_ROW,skipper.filterCell(kv)); - skipper.reset(); - assertTrue(skipper.filterAllRemaining()); - } + public Include(byte[] rowkey) { + this.rowkey = rowkey; + } - @Override public String toString() { - return "rowkey=" + Bytes.toStringBinary(rowkey)+", expected finished"; - } + @SuppressWarnings("deprecation") + @Override + public void examine(SkipScanFilter skipper) throws IOException { + KeyValue kv = KeyValueUtil.createFirstOnRow(rowkey); + skipper.reset(); + assertFalse(skipper.filterAllRemaining()); + assertFalse(skipper.filterRowKey(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength())); + assertEquals(kv.toString(), ReturnCode.INCLUDE_AND_NEXT_COL, skipper.filterCell(kv)); + } + + @Override + public String toString() { + return "rowkey=" + Bytes.toStringBinary(rowkey) + ", expected include"; + } + } + + private static final class Finished implements Expectation { + private final byte[] rowkey; + + public Finished(String rowkey) { + this.rowkey = Bytes.toBytes(rowkey); + } + + public Finished(byte[] rowkey) { + this.rowkey = rowkey; + } + + @Override + public void examine(SkipScanFilter skipper) throws IOException { + KeyValue kv = KeyValueUtil.createFirstOnRow(rowkey); + skipper.reset(); + assertEquals(ReturnCode.NEXT_ROW, skipper.filterCell(kv)); + skipper.reset(); + assertTrue(skipper.filterAllRemaining()); + } + + @Override + public String toString() { + return "rowkey=" + Bytes.toStringBinary(rowkey) + ", expected finished"; } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTableName.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTableName.java index 689a5ee031f..6b25d3fab6c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTableName.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTableName.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,4 +42,4 @@ public byte[] getTableName() { public String getTableNameString() { return this.tableName; } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java index faf27ea5835..fe0515d1b27 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTestingUtils.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,28 +45,28 @@ public class IndexTestingUtils { private static final Logger LOGGER = LoggerFactory.getLogger(IndexTestingUtils.class); private static final String MASTER_INFO_PORT_KEY = "hbase.master.info.port"; private static final String RS_INFO_PORT_KEY = "hbase.regionserver.info.port"; - + private IndexTestingUtils() { // private ctor for util class } public static void setupConfig(Configuration conf) { - conf.setInt(MASTER_INFO_PORT_KEY, -1); - conf.setInt(RS_INFO_PORT_KEY, -1); + conf.setInt(MASTER_INFO_PORT_KEY, -1); + conf.setInt(RS_INFO_PORT_KEY, -1); // setup our codec, so we get proper replay/write - conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, IndexedWALEditCodec.class.getName()); + conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, IndexedWALEditCodec.class.getName()); } + /** * Verify the state of the index table between the given key and time ranges against the list of * expected keyvalues. - * @throws IOException */ @SuppressWarnings("javadoc") - public static void verifyIndexTableAtTimestamp(Table index1, List expected, - long start, long end, byte[] startKey, byte[] endKey) throws IOException { - LOGGER.debug("Scanning " + index1.getName().getNameAsString() + " between times (" + start - + ", " + end + "] and keys: [" + Bytes.toString(startKey) + ", " + Bytes.toString(endKey) - + "]."); + public static void verifyIndexTableAtTimestamp(Table index1, List expected, long start, + long end, byte[] startKey, byte[] endKey) throws IOException { + LOGGER + .debug("Scanning " + index1.getName().getNameAsString() + " between times (" + start + ", " + + end + "] and keys: [" + Bytes.toString(startKey) + ", " + Bytes.toString(endKey) + "]."); Scan s = new Scan().withStartRow(startKey).withStopRow(endKey); // s.setRaw(true); s.readAllVersions(); @@ -82,12 +82,13 @@ public static void verifyIndexTableAtTimestamp(Table index1, List expe } public static void verifyIndexTableAtTimestamp(Table index1, List expected, long ts, - byte[] startKey) throws IOException { - IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, startKey, HConstants.EMPTY_END_ROW); + byte[] startKey) throws IOException { + IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, startKey, + HConstants.EMPTY_END_ROW); } public static void verifyIndexTableAtTimestamp(Table index1, List expected, long start, - byte[] startKey, byte[] endKey) throws IOException { + byte[] startKey, byte[] endKey) throws IOException { verifyIndexTableAtTimestamp(index1, expected, start, start + 1, startKey, endKey); } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java index 90d2920d989..0f1912f663f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/StubAbortable.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/ColumnGroup.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/ColumnGroup.java index 81b58053ab2..888d03a6988 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/ColumnGroup.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/ColumnGroup.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.covered; import java.util.ArrayList; @@ -24,7 +23,6 @@ import org.apache.hadoop.hbase.util.Bytes; - /** * A collection of {@link CoveredColumn}s that should be included in a covered index. */ @@ -62,9 +60,9 @@ public boolean matches(String family) { /** * Check to see if any column matches the family/qualifier pair - * @param family family to match against + * @param family family to match against * @param qualifier qualifier to match, can be null, in which case we match all - * qualifiers + * qualifiers * @return true if any column matches, false otherwise */ public boolean matches(byte[] family, byte[] qualifier) { @@ -73,17 +71,15 @@ public boolean matches(byte[] family, byte[] qualifier) { for (CoveredColumn column : columns) { if (column.matchesFamily(fam)) { // check the qualifier - if (column.matchesQualifier(qualifier)) { - return true; + if (column.matchesQualifier(qualifier)) { + return true; } } } return false; } - /** - * @return the number of columns in the group - */ + /** Returns the number of columns in the group */ public int size() { return this.columns.size(); } @@ -109,4 +105,4 @@ public String toString() { public List getColumns() { return this.columns; } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumn.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumn.java index bb03f562011..e49979d68b3 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumn.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumn.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.covered; import org.apache.hadoop.hbase.util.Bytes; @@ -31,7 +30,7 @@ public class CoveredColumn extends ColumnReference { String familyString; private final int hashCode; - public CoveredColumn(byte[] family, byte[] qualifier){ + public CoveredColumn(byte[] family, byte[] qualifier) { this(Bytes.toString(family), qualifier); } @@ -103,4 +102,4 @@ public String toString() { String qualString = getQualifier() == null ? "null" : Bytes.toString(getQualifier()); return "CoveredColumn:[" + familyString + ":" + qualString + "]"; } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java index 33ba053b072..1dd9a079b80 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexCodec.java @@ -1,11 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.hbase.index.covered; @@ -28,7 +36,6 @@ import org.apache.phoenix.hbase.index.BaseIndexCodec; import org.apache.phoenix.hbase.index.scanner.Scanner; import org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; /** @@ -36,341 +43,334 @@ */ public class CoveredColumnIndexCodec extends BaseIndexCodec { - private static final byte[] EMPTY_BYTES = new byte[0]; - public static final byte[] INDEX_ROW_COLUMN_FAMILY = Bytes.toBytes("INDEXED_COLUMNS"); - - private List groups; + private static final byte[] EMPTY_BYTES = new byte[0]; + public static final byte[] INDEX_ROW_COLUMN_FAMILY = Bytes.toBytes("INDEXED_COLUMNS"); - /** - * @param groups - * to initialize the codec with - * @return an instance that is initialized with the given {@link ColumnGroup}s, for testing purposes - */ - public static CoveredColumnIndexCodec getCodecForTesting(List groups) { - CoveredColumnIndexCodec codec = new CoveredColumnIndexCodec(); - codec.groups = Lists.newArrayList(groups); - return codec; - } + private List groups; - @Override - public void initialize(Configuration conf, byte[] tableName) { - groups = CoveredColumnIndexSpecifierBuilder.getColumns(conf); - } + /** + * to initialize the codec with + * @return an instance that is initialized with the given {@link ColumnGroup}s, for testing + * purposes + */ + public static CoveredColumnIndexCodec getCodecForTesting(List groups) { + CoveredColumnIndexCodec codec = new CoveredColumnIndexCodec(); + codec.groups = Lists.newArrayList(groups); + return codec; + } - @Override - public Iterable getIndexUpserts(TableState state, IndexMetaData indexMetaData, byte[] regionStartKey, byte[] regionEndKey, boolean verified) { - List updates = new ArrayList(groups.size()); - for (ColumnGroup group : groups) { - IndexUpdate update = getIndexUpdateForGroup(group, state, indexMetaData); - updates.add(update); - } - return updates; + @Override + public void initialize(Configuration conf, byte[] tableName) { + groups = CoveredColumnIndexSpecifierBuilder.getColumns(conf); } - /** - * @param group - * @param state - * @return the update that should be made to the table - */ - private IndexUpdate getIndexUpdateForGroup(ColumnGroup group, TableState state, IndexMetaData indexMetaData) { - List refs = group.getColumns(); - try { - Pair stateInfo = ((LocalTableState)state).getIndexedColumnsTableState(refs, false, false, indexMetaData); - Scanner kvs = stateInfo.getFirst(); - Pair> columns = getNextEntries(refs, kvs, state.getCurrentRowKey()); - // make sure we close the scanner - kvs.close(); - if (columns.getFirst().intValue() == 0) { return stateInfo.getSecond(); } - // have all the column entries, so just turn it into a Delete for the row - // convert the entries to the needed values - byte[] rowKey = composeRowKey(state.getCurrentRowKey(), columns.getFirst(), columns.getSecond()); - Put p = new Put(rowKey, state.getCurrentTimestamp()); - // add the columns to the put - addColumnsToPut(p, columns.getSecond()); - - // update the index info - IndexUpdate update = stateInfo.getSecond(); - update.setTable(Bytes.toBytes(group.getTable())); - update.setUpdate(p); - return update; - } catch (IOException e) { - throw new RuntimeException("Unexpected exception when getting state for columns: " + refs); - } + @Override + public Iterable getIndexUpserts(TableState state, IndexMetaData indexMetaData, + byte[] regionStartKey, byte[] regionEndKey, boolean verified) { + List updates = new ArrayList(groups.size()); + for (ColumnGroup group : groups) { + IndexUpdate update = getIndexUpdateForGroup(group, state, indexMetaData); + updates.add(update); } + return updates; + } - private static void addColumnsToPut(Put indexInsert, List columns) { - // add each of the corresponding families to the put - int count = 0; - for (ColumnEntry column : columns) { - indexInsert.addColumn(INDEX_ROW_COLUMN_FAMILY, - ArrayUtils.addAll(Bytes.toBytes(count++), toIndexQualifier(column.ref)), null); - } + /** Returns the update that should be made to the table */ + private IndexUpdate getIndexUpdateForGroup(ColumnGroup group, TableState state, + IndexMetaData indexMetaData) { + List refs = group.getColumns(); + try { + Pair stateInfo = + ((LocalTableState) state).getIndexedColumnsTableState(refs, false, false, indexMetaData); + Scanner kvs = stateInfo.getFirst(); + Pair> columns = + getNextEntries(refs, kvs, state.getCurrentRowKey()); + // make sure we close the scanner + kvs.close(); + if (columns.getFirst().intValue() == 0) { + return stateInfo.getSecond(); + } + // have all the column entries, so just turn it into a Delete for the row + // convert the entries to the needed values + byte[] rowKey = + composeRowKey(state.getCurrentRowKey(), columns.getFirst(), columns.getSecond()); + Put p = new Put(rowKey, state.getCurrentTimestamp()); + // add the columns to the put + addColumnsToPut(p, columns.getSecond()); + + // update the index info + IndexUpdate update = stateInfo.getSecond(); + update.setTable(Bytes.toBytes(group.getTable())); + update.setUpdate(p); + return update; + } catch (IOException e) { + throw new RuntimeException("Unexpected exception when getting state for columns: " + refs); } + } - private static byte[] toIndexQualifier(CoveredColumn column) { - return ArrayUtils.addAll(Bytes.toBytes(column.familyString + CoveredColumn.SEPARATOR), column.getQualifier()); + private static void addColumnsToPut(Put indexInsert, List columns) { + // add each of the corresponding families to the put + int count = 0; + for (ColumnEntry column : columns) { + indexInsert.addColumn(INDEX_ROW_COLUMN_FAMILY, + ArrayUtils.addAll(Bytes.toBytes(count++), toIndexQualifier(column.ref)), null); } + } - @Override - public Iterable getIndexDeletes(TableState state, IndexMetaData context, byte[] regionStartKey, byte[] regionEndKey) { - List deletes = new ArrayList(groups.size()); - for (ColumnGroup group : groups) { - deletes.add(getDeleteForGroup(group, state, context)); - } - return deletes; - } + private static byte[] toIndexQualifier(CoveredColumn column) { + return ArrayUtils.addAll(Bytes.toBytes(column.familyString + CoveredColumn.SEPARATOR), + column.getQualifier()); + } - /** - * Get all the deletes necessary for a group of columns - logically, the cleanup the index table for a given index. - * - * @param group - * index information - * @return the cleanup for the given index, or null if no cleanup is necessary - */ - private IndexUpdate getDeleteForGroup(ColumnGroup group, TableState state, IndexMetaData indexMetaData) { - List refs = group.getColumns(); - try { - Pair kvs = ((LocalTableState)state).getIndexedColumnsTableState(refs, false, false, indexMetaData); - Pair> columns = getNextEntries(refs, kvs.getFirst(), state.getCurrentRowKey()); - // make sure we close the scanner reference - kvs.getFirst().close(); - // no change, just return the passed update - if (columns.getFirst() == 0) { return kvs.getSecond(); } - // have all the column entries, so just turn it into a Delete for the row - // convert the entries to the needed values - byte[] rowKey = composeRowKey(state.getCurrentRowKey(), columns.getFirst(), columns.getSecond()); - Delete d = new Delete(rowKey); - d.setTimestamp(state.getCurrentTimestamp()); - IndexUpdate update = kvs.getSecond(); - update.setUpdate(d); - update.setTable(Bytes.toBytes(group.getTable())); - return update; - } catch (IOException e) { - throw new RuntimeException("Unexpected exception when getting state for columns: " + refs); - } + @Override + public Iterable getIndexDeletes(TableState state, IndexMetaData context, + byte[] regionStartKey, byte[] regionEndKey) { + List deletes = new ArrayList(groups.size()); + for (ColumnGroup group : groups) { + deletes.add(getDeleteForGroup(group, state, context)); } + return deletes; + } - /** - * Get the next batch of primary table values for the given columns - * - * @param refs - * columns to match against - * @param state - * @return the total length of all values found and the entries to add for the index - */ - private Pair> getNextEntries(List refs, Scanner kvs, byte[] currentRow) - throws IOException { - int totalValueLength = 0; - List entries = new ArrayList(refs.size()); - - // pull out the latest state for each column reference, in order - for (CoveredColumn ref : refs) { - KeyValue first = ref.getFirstKeyValueForRow(currentRow); - if (!kvs.seek(first)) { - // no more keys, so add a null value - entries.add(new ColumnEntry(null, ref)); - continue; - } - // there is a next value - we only care about the current value, so we can just snag that - Cell next = kvs.next(); - if (ref.matchesFamily(next.getFamilyArray(), next.getFamilyOffset(), - next.getFamilyLength()) - && ref.matchesQualifier(next.getQualifierArray(), next.getQualifierOffset(), - next.getQualifierLength())) { - byte[] v = CellUtil.cloneValue(next); - totalValueLength += v.length; - entries.add(new ColumnEntry(v, ref)); - } else { - // this first one didn't match at all, so we have to put in a null entry - entries.add(new ColumnEntry(null, ref)); - continue; - } - // here's where is gets a little tricky - we either need to decide if we should continue - // adding entries (matches all qualifiers) or if we are done (matches a single qualifier) - if (!ref.allColumns()) { - continue; - } - // matches all columns, so we need to iterate until we hit the next column with the same - // family as the current key - byte[] lastQual = CellUtil.cloneQualifier(next); - byte[] nextQual = null; - while ((next = kvs.next()) != null) { - // different family, done with this column - if (!ref.matchesFamily(next.getFamilyArray(), next.getFamilyOffset(), next.getFamilyLength())) { - break; - } - nextQual = CellUtil.cloneQualifier(next); - // we are still on the same qualifier - skip it, since we already added a column for it - if (Arrays.equals(lastQual, nextQual)) { - continue; - } - // this must match the qualifier since its an all-qualifiers specifier, so we add it - byte[] v = CellUtil.cloneValue(next); - totalValueLength += v.length; - entries.add(new ColumnEntry(v, ref)); - // update the last qualifier to check against - lastQual = nextQual; - } - } - return new Pair>(totalValueLength, entries); + /** + * Get all the deletes necessary for a group of columns - logically, the cleanup the index table + * for a given index. index information + * @return the cleanup for the given index, or null if no cleanup is necessary + */ + private IndexUpdate getDeleteForGroup(ColumnGroup group, TableState state, + IndexMetaData indexMetaData) { + List refs = group.getColumns(); + try { + Pair kvs = + ((LocalTableState) state).getIndexedColumnsTableState(refs, false, false, indexMetaData); + Pair> columns = + getNextEntries(refs, kvs.getFirst(), state.getCurrentRowKey()); + // make sure we close the scanner reference + kvs.getFirst().close(); + // no change, just return the passed update + if (columns.getFirst() == 0) { + return kvs.getSecond(); + } + // have all the column entries, so just turn it into a Delete for the row + // convert the entries to the needed values + byte[] rowKey = + composeRowKey(state.getCurrentRowKey(), columns.getFirst(), columns.getSecond()); + Delete d = new Delete(rowKey); + d.setTimestamp(state.getCurrentTimestamp()); + IndexUpdate update = kvs.getSecond(); + update.setUpdate(d); + update.setTable(Bytes.toBytes(group.getTable())); + return update; + } catch (IOException e) { + throw new RuntimeException("Unexpected exception when getting state for columns: " + refs); } + } - public static class ColumnEntry { - byte[] value = EMPTY_BYTES; - CoveredColumn ref; - - public ColumnEntry(byte[] value, CoveredColumn ref) { - this.value = value == null ? EMPTY_BYTES : value; - this.ref = ref; + /** + * Get the next batch of primary table values for the given columns columns to match against + * @return the total length of all values found and the entries to add for the index + */ + private Pair> getNextEntries(List refs, Scanner kvs, + byte[] currentRow) throws IOException { + int totalValueLength = 0; + List entries = new ArrayList(refs.size()); + + // pull out the latest state for each column reference, in order + for (CoveredColumn ref : refs) { + KeyValue first = ref.getFirstKeyValueForRow(currentRow); + if (!kvs.seek(first)) { + // no more keys, so add a null value + entries.add(new ColumnEntry(null, ref)); + continue; + } + // there is a next value - we only care about the current value, so we can just snag that + Cell next = kvs.next(); + if ( + ref.matchesFamily(next.getFamilyArray(), next.getFamilyOffset(), next.getFamilyLength()) + && ref.matchesQualifier(next.getQualifierArray(), next.getQualifierOffset(), + next.getQualifierLength()) + ) { + byte[] v = CellUtil.cloneValue(next); + totalValueLength += v.length; + entries.add(new ColumnEntry(v, ref)); + } else { + // this first one didn't match at all, so we have to put in a null entry + entries.add(new ColumnEntry(null, ref)); + continue; + } + // here's where is gets a little tricky - we either need to decide if we should continue + // adding entries (matches all qualifiers) or if we are done (matches a single qualifier) + if (!ref.allColumns()) { + continue; + } + // matches all columns, so we need to iterate until we hit the next column with the same + // family as the current key + byte[] lastQual = CellUtil.cloneQualifier(next); + byte[] nextQual = null; + while ((next = kvs.next()) != null) { + // different family, done with this column + if ( + !ref.matchesFamily(next.getFamilyArray(), next.getFamilyOffset(), next.getFamilyLength()) + ) { + break; } + nextQual = CellUtil.cloneQualifier(next); + // we are still on the same qualifier - skip it, since we already added a column for it + if (Arrays.equals(lastQual, nextQual)) { + continue; + } + // this must match the qualifier since its an all-qualifiers specifier, so we add it + byte[] v = CellUtil.cloneValue(next); + totalValueLength += v.length; + entries.add(new ColumnEntry(v, ref)); + // update the last qualifier to check against + lastQual = nextQual; + } } + return new Pair>(totalValueLength, entries); + } - /** - * Compose the final index row key. - *

    - * This is faster than adding each value independently as we can just build a single a array and copy everything - * over once. - * - * @param pk - * primary key of the original row - * @param length - * total number of bytes of all the values that should be added - * @param values - * to use when building the key - */ - public static byte[] composeRowKey(byte[] pk, int length, List values) { - final int numColumnEntries = values.size() * Bytes.SIZEOF_INT; - // now build up expected row key, each of the values, in order, followed by the PK and then some - // info about lengths so we can deserialize each value - // - // output = length of values + primary key + column entries + length of each column entry + number of column entries - byte[] output = new byte[length + pk.length + numColumnEntries + Bytes.SIZEOF_INT]; - int pos = 0; - int[] lengths = new int[values.size()]; - int i = 0; - for (ColumnEntry entry : values) { - byte[] v = entry.value; - // skip doing the copy attempt, if we don't need to - if (v.length != 0) { - System.arraycopy(v, 0, output, pos, v.length); - pos += v.length; - } - lengths[i++] = v.length; - } + public static class ColumnEntry { + byte[] value = EMPTY_BYTES; + CoveredColumn ref; - // add the primary key to the end of the row key - System.arraycopy(pk, 0, output, pos, pk.length); - pos += pk.length; + public ColumnEntry(byte[] value, CoveredColumn ref) { + this.value = value == null ? EMPTY_BYTES : value; + this.ref = ref; + } + } - // add the lengths as suffixes so we can deserialize the elements again - for (int l : lengths) { - byte[] serializedLength = Bytes.toBytes(l); - System.arraycopy(serializedLength, 0, output, pos, Bytes.SIZEOF_INT); - pos += Bytes.SIZEOF_INT; - } + /** + * Compose the final index row key. + *

    + * This is faster than adding each value independently as we can just build a single a array and + * copy everything over once. primary key of the original row total number of bytes of all the + * values that should be added to use when building the key + */ + public static byte[] composeRowKey(byte[] pk, int length, List values) { + final int numColumnEntries = values.size() * Bytes.SIZEOF_INT; + // now build up expected row key, each of the values, in order, followed by the PK and then some + // info about lengths so we can deserialize each value + // + // output = length of values + primary key + column entries + length of each column entry + + // number of column entries + byte[] output = new byte[length + pk.length + numColumnEntries + Bytes.SIZEOF_INT]; + int pos = 0; + int[] lengths = new int[values.size()]; + int i = 0; + for (ColumnEntry entry : values) { + byte[] v = entry.value; + // skip doing the copy attempt, if we don't need to + if (v.length != 0) { + System.arraycopy(v, 0, output, pos, v.length); + pos += v.length; + } + lengths[i++] = v.length; + } - // and the last integer is the number of values - byte[] serializedNumValues = Bytes.toBytes(values.size()); - System.arraycopy(serializedNumValues, 0, output, pos, Bytes.SIZEOF_INT); - // Just in case we serialize more in the rowkey in the future.. - pos += Bytes.SIZEOF_INT; + // add the primary key to the end of the row key + System.arraycopy(pk, 0, output, pos, pk.length); + pos += pk.length; - return output; + // add the lengths as suffixes so we can deserialize the elements again + for (int l : lengths) { + byte[] serializedLength = Bytes.toBytes(l); + System.arraycopy(serializedLength, 0, output, pos, Bytes.SIZEOF_INT); + pos += Bytes.SIZEOF_INT; } - /** - * Essentially a short-cut from building a {@link Put}. - * - * @param pk - * row key - * @param timestamp - * timestamp of all the keyvalues - * @param values - * expected value--column pair - * @return a keyvalues that the index contains for a given row at a timestamp with the given value -- column pairs. - */ - public static List getIndexKeyValueForTesting(byte[] pk, long timestamp, - List> values) { - - int length = 0; - List expected = new ArrayList(values.size()); - for (Pair value : values) { - ColumnEntry entry = new ColumnEntry(value.getFirst(), value.getSecond()); - length += value.getFirst().length; - expected.add(entry); - } + // and the last integer is the number of values + byte[] serializedNumValues = Bytes.toBytes(values.size()); + System.arraycopy(serializedNumValues, 0, output, pos, Bytes.SIZEOF_INT); + // Just in case we serialize more in the rowkey in the future.. + pos += Bytes.SIZEOF_INT; - byte[] rowKey = CoveredColumnIndexCodec.composeRowKey(pk, length, expected); - Put p = new Put(rowKey, timestamp); - CoveredColumnIndexCodec.addColumnsToPut(p, expected); - List kvs = new ArrayList(); - for (Entry> entry : p.getFamilyCellMap().entrySet()) { - kvs.addAll(entry.getValue()); - } + return output; + } - return kvs; + /** + * Essentially a short-cut from building a {@link Put}. row key timestamp of all the keyvalues + * expected value--column pair + * @return a keyvalues that the index contains for a given row at a timestamp with the given value + * -- column pairs. + */ + public static List getIndexKeyValueForTesting(byte[] pk, long timestamp, + List> values) { + + int length = 0; + List expected = new ArrayList(values.size()); + for (Pair value : values) { + ColumnEntry entry = new ColumnEntry(value.getFirst(), value.getSecond()); + length += value.getFirst().length; + expected.add(entry); } - public static List getValues(byte[] bytes) { - // get the total number of keys in the bytes - int keyCount = CoveredColumnIndexCodec.getPreviousInteger(bytes, bytes.length); - List keys = new ArrayList(keyCount); - int[] lengths = new int[keyCount]; - int lengthPos = keyCount - 1; - int pos = bytes.length - Bytes.SIZEOF_INT; - // figure out the length of each key - for (int i = 0; i < keyCount; i++) { - lengths[lengthPos--] = CoveredColumnIndexCodec.getPreviousInteger(bytes, pos); - pos -= Bytes.SIZEOF_INT; - } + byte[] rowKey = CoveredColumnIndexCodec.composeRowKey(pk, length, expected); + Put p = new Put(rowKey, timestamp); + CoveredColumnIndexCodec.addColumnsToPut(p, expected); + List kvs = new ArrayList(); + for (Entry> entry : p.getFamilyCellMap().entrySet()) { + kvs.addAll(entry.getValue()); + } - int current = 0; - for (int length : lengths) { - byte[] key = Arrays.copyOfRange(bytes, current, current + length); - keys.add(key); - current += length; - } + return kvs; + } - return keys; + public static List getValues(byte[] bytes) { + // get the total number of keys in the bytes + int keyCount = CoveredColumnIndexCodec.getPreviousInteger(bytes, bytes.length); + List keys = new ArrayList(keyCount); + int[] lengths = new int[keyCount]; + int lengthPos = keyCount - 1; + int pos = bytes.length - Bytes.SIZEOF_INT; + // figure out the length of each key + for (int i = 0; i < keyCount; i++) { + lengths[lengthPos--] = CoveredColumnIndexCodec.getPreviousInteger(bytes, pos); + pos -= Bytes.SIZEOF_INT; } - /** - * Read an integer from the preceding {@value Bytes#SIZEOF_INT} bytes - * - * @param bytes - * array to read from - * @param start - * start point, backwards from which to read. For example, if specifying "25", we would try to read an - * integer from 21 -> 25 - * @return an integer from the proceeding {@value Bytes#SIZEOF_INT} bytes, if it exists. - */ - private static int getPreviousInteger(byte[] bytes, int start) { - return Bytes.toInt(bytes, start - Bytes.SIZEOF_INT); + int current = 0; + for (int length : lengths) { + byte[] key = Arrays.copyOfRange(bytes, current, current + length); + keys.add(key); + current += length; } - /** - * Check to see if an row key just contains a list of null values. - * - * @param bytes - * row key to examine - * @return true if all the values are zero-length, false otherwise - */ - public static boolean checkRowKeyForAllNulls(byte[] bytes) { - int keyCount = CoveredColumnIndexCodec.getPreviousInteger(bytes, bytes.length); - int pos = bytes.length - Bytes.SIZEOF_INT; - for (int i = 0; i < keyCount; i++) { - int next = CoveredColumnIndexCodec.getPreviousInteger(bytes, pos); - if (next > 0) { return false; } - pos -= Bytes.SIZEOF_INT; - } + return keys; + } - return true; - } + /** + * Read an integer from the preceding {@value Bytes#SIZEOF_INT} bytes array to read from start + * point, backwards from which to read. For example, if specifying "25", we would try to read an + * integer from 21 -> 25 + * @return an integer from the proceeding {@value Bytes#SIZEOF_INT} bytes, if it exists. + */ + private static int getPreviousInteger(byte[] bytes, int start) { + return Bytes.toInt(bytes, start - Bytes.SIZEOF_INT); + } - @Override - public boolean isEnabled(Mutation m) { - // this could be a bit smarter, looking at the groups for the mutation, but we leave it at this - // simple check for the moment. - return groups.size() > 0; + /** + * Check to see if an row key just contains a list of null values. row key to examine + * @return true if all the values are zero-length, false otherwise + */ + public static boolean checkRowKeyForAllNulls(byte[] bytes) { + int keyCount = CoveredColumnIndexCodec.getPreviousInteger(bytes, bytes.length); + int pos = bytes.length - Bytes.SIZEOF_INT; + for (int i = 0; i < keyCount; i++) { + int next = CoveredColumnIndexCodec.getPreviousInteger(bytes, pos); + if (next > 0) { + return false; + } + pos -= Bytes.SIZEOF_INT; } + + return true; + } + + @Override + public boolean isEnabled(Mutation m) { + // this could be a bit smarter, looking at the groups for the mutation, but we leave it at this + // simple check for the moment. + return groups.size() > 0; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java index 3b3692d2ebc..c54a8013355 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.covered; import java.io.IOException; @@ -44,13 +43,14 @@ public class CoveredColumnIndexSpecifierBuilder { // each joined column are either just the columns in the group or all the most recent data in the // row (a fully covered index). private static final String COUNT = ".count"; - private static final String INDEX_GROUPS_COUNT_KEY = INDEX_TO_TABLE_CONF_PREFX + ".groups" + COUNT; + private static final String INDEX_GROUPS_COUNT_KEY = + INDEX_TO_TABLE_CONF_PREFX + ".groups" + COUNT; private static final String INDEX_GROUP_PREFIX = INDEX_TO_TABLE_CONF_PREFX + "group."; private static final String INDEX_GROUP_COVERAGE_SUFFIX = ".columns"; private static final String TABLE_SUFFIX = ".table"; - public static final String NON_TX_INDEX_BUILDER_CLASSNAME = "org.apache.phoenix.index.PhoenixIndexBuilder"; - + public static final String NON_TX_INDEX_BUILDER_CLASSNAME = + "org.apache.phoenix.index.PhoenixIndexBuilder"; // right now, we don't support this should be easy enough to add later // private static final String INDEX_GROUP_FULLY_COVERED = ".covered"; @@ -102,9 +102,6 @@ public Map convertToMap() { } /** - * @param specs - * @param columns - * @param index */ private void addIndexGroupToSpecs(Map specs, ColumnGroup columns, int index) { // hbase.index.covered.group. @@ -113,16 +110,16 @@ private void addIndexGroupToSpecs(Map specs, ColumnGroup columns // set the table to which the group writes // hbase.index.covered.group..table specs.put(prefix + TABLE_SUFFIX, columns.getTable()); - + // a different key for each column in the group // hbase.index.covered.group..columns String columnPrefix = prefix + INDEX_GROUP_COVERAGE_SUFFIX; // hbase.index.covered.group..columns.count = String columnsSizeKey = columnPrefix + COUNT; specs.put(columnsSizeKey, Integer.toString(columns.size())); - + // add each column in the group - int i=0; + int i = 0; for (CoveredColumn column : columns) { // hbase.index.covered.group..columns. String nextKey = columnPrefix + "." + Integer.toString(i); @@ -132,23 +129,24 @@ private void addIndexGroupToSpecs(Map specs, ColumnGroup columns } } - public TableDescriptor build(TableDescriptor desc) throws IOException { - return build(desc, CoveredColumnIndexCodec.class); - } + public TableDescriptor build(TableDescriptor desc) throws IOException { + return build(desc, CoveredColumnIndexCodec.class); + } - public TableDescriptor build(TableDescriptor desc, Class clazz) throws IOException { + public TableDescriptor build(TableDescriptor desc, Class clazz) + throws IOException { // add the codec for the index to the map of options Map opts = this.convertToMap(); opts.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, clazz.getName()); - TableDescriptorBuilder newBuilder = TableDescriptorBuilder.newBuilder(desc); - IndexUtil.enableIndexing(newBuilder, NonTxIndexBuilder.class.getName(), opts, - Coprocessor.PRIORITY_USER, QueryConstants.INDEXER_CLASSNAME); - return newBuilder.build(); + TableDescriptorBuilder newBuilder = TableDescriptorBuilder.newBuilder(desc); + IndexUtil.enableIndexing(newBuilder, NonTxIndexBuilder.class.getName(), opts, + Coprocessor.PRIORITY_USER, QueryConstants.INDEXER_CLASSNAME); + return newBuilder.build(); } public static List getColumns(Configuration conf) { - int count= conf.getInt(INDEX_GROUPS_COUNT_KEY, 0); - if (count ==0) { + int count = conf.getInt(INDEX_GROUPS_COUNT_KEY, 0); + if (count == 0) { return Collections.emptyList(); } @@ -168,7 +166,7 @@ public static List getColumns(Configuration conf) { // hbase.index.covered.group..columns.count = j String columnsSizeKey = columnPrefix + COUNT; int columnCount = conf.getInt(columnsSizeKey, 0); - for(int j=0; j< columnCount; j++){ + for (int j = 0; j < columnCount; j++) { String columnKey = columnPrefix + "." + j; CoveredColumn column = CoveredColumn.parse(conf.get(columnKey)); group.add(column); @@ -181,10 +179,8 @@ public static List getColumns(Configuration conf) { } /** - * @param key - * @param value */ public void addArbitraryConfigForTesting(String key, String value) { this.specs.put(key, value); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnsTest.java index db7d838a006..dbbe5d46a1e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnsTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,10 +22,8 @@ import java.util.Arrays; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Test; - -import org.apache.phoenix.hbase.index.covered.CoveredColumns; import org.apache.phoenix.hbase.index.covered.update.ColumnReference; +import org.junit.Test; public class CoveredColumnsTest { @@ -36,11 +34,11 @@ public class CoveredColumnsTest { public void testCovering() { ColumnReference ref = new ColumnReference(fam, qual); CoveredColumns columns = new CoveredColumns(); - assertEquals("Should have only found a single column to cover", 1, columns - .findNonCoveredColumns(Arrays.asList(ref)).size()); + assertEquals("Should have only found a single column to cover", 1, + columns.findNonCoveredColumns(Arrays.asList(ref)).size()); columns.addColumn(ref); assertEquals("Shouldn't have any columns to cover", 0, columns.findNonCoveredColumns(Arrays.asList(ref)).size()); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredIndexCodecForTesting.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredIndexCodecForTesting.java index c074fb56fa9..399aac7e3fa 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredIndexCodecForTesting.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredIndexCodecForTesting.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,44 +26,46 @@ import org.apache.phoenix.hbase.index.BaseIndexCodec; /** - * An {@link IndexCodec} for testing that allow you to specify the index updates/deletes, regardless of the current - * tables' state. + * An {@link IndexCodec} for testing that allow you to specify the index updates/deletes, regardless + * of the current tables' state. */ public class CoveredIndexCodecForTesting extends BaseIndexCodec { - private List deletes = new ArrayList(); - private List updates = new ArrayList(); + private List deletes = new ArrayList(); + private List updates = new ArrayList(); - public void addIndexDelete(IndexUpdate... deletes) { - this.deletes.addAll(Arrays.asList(deletes)); - } + public void addIndexDelete(IndexUpdate... deletes) { + this.deletes.addAll(Arrays.asList(deletes)); + } - public void addIndexUpserts(IndexUpdate... updates) { - this.updates.addAll(Arrays.asList(updates)); - } + public void addIndexUpserts(IndexUpdate... updates) { + this.updates.addAll(Arrays.asList(updates)); + } - public void clear() { - this.deletes.clear(); - this.updates.clear(); - } + public void clear() { + this.deletes.clear(); + this.updates.clear(); + } - @Override - public Iterable getIndexDeletes(TableState state, IndexMetaData context, byte[] regionStartKey, byte[] regionEndKey) { - return this.deletes; - } + @Override + public Iterable getIndexDeletes(TableState state, IndexMetaData context, + byte[] regionStartKey, byte[] regionEndKey) { + return this.deletes; + } - @Override - public Iterable getIndexUpserts(TableState state, IndexMetaData context, byte[] regionStartKey, byte[] regionEndKey, boolean verified) { - return this.updates; - } + @Override + public Iterable getIndexUpserts(TableState state, IndexMetaData context, + byte[] regionStartKey, byte[] regionEndKey, boolean verified) { + return this.updates; + } - @Override - public void initialize(Configuration conf, byte[] tableName) { - // noop - } + @Override + public void initialize(Configuration conf, byte[] tableName) { + // noop + } - @Override - public boolean isEnabled(Mutation m) { - return true; - } -} \ No newline at end of file + @Override + public boolean isEnabled(Mutation m) { + return true; + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java index f403c1c480c..c674161f21a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,7 +45,6 @@ import org.junit.Test; import org.mockito.Mockito; - public class LocalTableStateTest { private static final byte[] row = Bytes.toBytes("row"); @@ -57,17 +56,17 @@ public class LocalTableStateTest { @Override public ReplayWrite getReplayWrite() { - return null; + return null; } @Override public boolean requiresPriorRowState(Mutation m) { - return true; + return true; } - + @Override public int getClientVersion() { - return ScanUtil.UNKNOWN_CLIENT_VERSION; + return ScanUtil.UNKNOWN_CLIENT_VERSION; } }; @@ -86,50 +85,51 @@ public void testCorrectOrderingWithLazyLoadingColumns() throws Exception { Mockito.when(env.getRegion()).thenReturn(region); final byte[] stored = Bytes.toBytes("stored-value"); - KeyValue kv = new KeyValue(row, fam, qual, ts, Type.Put, stored); kv.setSequenceId(0); HashMap> rowKeyPtrToCells = - new HashMap>(); - rowKeyPtrToCells.put(new ImmutableBytesPtr(row), Collections.singletonList((Cell)kv)); + new HashMap>(); + rowKeyPtrToCells.put(new ImmutableBytesPtr(row), Collections.singletonList((Cell) kv)); CachedLocalTable cachedLocalTable = CachedLocalTable.build(rowKeyPtrToCells); LocalTableState table = new LocalTableState(cachedLocalTable, m); - //add the kvs from the mutation + // add the kvs from the mutation table.addPendingUpdates(m.get(fam, qual)); // setup the lookup ColumnReference col = new ColumnReference(fam, qual); table.setCurrentTimestamp(ts); - //check that our value still shows up first on scan, even though this is a lazy load - Pair p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData); + // check that our value still shows up first on scan, even though this is a lazy load + Pair p = + table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData); Scanner s = p.getFirst(); - assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), s.next()); + assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), + s.next()); } public static final class ScannerCreatedException extends RuntimeException { - ScannerCreatedException(String msg) { - super(msg); - } + ScannerCreatedException(String msg) { + super(msg); + } } @Test public void testNoScannerForImmutableRows() throws Exception { - IndexMetaData indexMetaData = new IndexMetaData() { - - @Override - public ReplayWrite getReplayWrite() { - return null; - } - - @Override - public boolean requiresPriorRowState(Mutation m) { - return false; - } - - @Override - public int getClientVersion() { - return ScanUtil.UNKNOWN_CLIENT_VERSION; - } + IndexMetaData indexMetaData = new IndexMetaData() { + + @Override + public ReplayWrite getReplayWrite() { + return null; + } + + @Override + public boolean requiresPriorRowState(Mutation m) { + return false; + } + + @Override + public int getClientVersion() { + return ScanUtil.UNKNOWN_CLIENT_VERSION; + } }; Put m = new Put(row); @@ -144,21 +144,22 @@ public int getClientVersion() { CachedLocalTable cachedLocalTable = CachedLocalTable.build(null); LocalTableState table = new LocalTableState(cachedLocalTable, m); - //add the kvs from the mutation + // add the kvs from the mutation table.addPendingUpdates(m.get(fam, qual)); // setup the lookup ColumnReference col = new ColumnReference(fam, qual); table.setCurrentTimestamp(ts); - //check that our value still shows up first on scan, even though this is a lazy load - Pair p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData); + // check that our value still shows up first on scan, even though this is a lazy load + Pair p = + table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData); Scanner s = p.getFirst(); - assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), s.next()); + assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), + s.next()); } /** * Test that we correctly rollback the state of keyvalue - * @throws Exception */ @Test @SuppressWarnings("unchecked") @@ -175,8 +176,8 @@ public void testCorrectRollback() throws Exception { storedKv.setSequenceId(2); HashMap> rowKeyPtrToCells = - new HashMap>(); - rowKeyPtrToCells.put(new ImmutableBytesPtr(row), Collections.singletonList((Cell)storedKv)); + new HashMap>(); + rowKeyPtrToCells.put(new ImmutableBytesPtr(row), Collections.singletonList((Cell) storedKv)); CachedLocalTable cachedLocalTable = CachedLocalTable.build(rowKeyPtrToCells); LocalTableState table = new LocalTableState(cachedLocalTable, m); @@ -189,7 +190,8 @@ public void testCorrectRollback() throws Exception { ColumnReference col = new ColumnReference(fam, qual); table.setCurrentTimestamp(ts); // check that the value is there - Pair p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData); + Pair p = + table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData); Scanner s = p.getFirst(); assertEquals("Didn't get the pending mutation's value first", kv, s.next()); @@ -209,24 +211,23 @@ public void testOnlyLoadsRequestedColumns() throws Exception { Region region = Mockito.mock(Region.class); Mockito.when(env.getRegion()).thenReturn(region); final KeyValue storedKv = - new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value")); + new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value")); storedKv.setSequenceId(2); - Put pendingUpdate = new Put(row); pendingUpdate.addColumn(fam, qual, ts, val); HashMap> rowKeyPtrToCells = - new HashMap>(); - rowKeyPtrToCells.put(new ImmutableBytesPtr(row), Collections.singletonList((Cell)storedKv)); + new HashMap>(); + rowKeyPtrToCells.put(new ImmutableBytesPtr(row), Collections.singletonList((Cell) storedKv)); CachedLocalTable cachedLocalTable = CachedLocalTable.build(rowKeyPtrToCells); LocalTableState table = new LocalTableState(cachedLocalTable, pendingUpdate); - // do the lookup for the given column ColumnReference col = new ColumnReference(fam, qual); table.setCurrentTimestamp(ts); // check that the value is there - Pair p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData); + Pair p = + table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData); Scanner s = p.getFirst(); // make sure it read the table the one time assertEquals("Didn't get the stored keyvalue!", storedKv, s.next()); diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java index eaf4674b678..45e36c699e9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -66,6 +66,10 @@ import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableKey; +import org.apache.phoenix.thirdparty.com.google.common.base.Optional; +import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; +import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.TestUtil; @@ -75,329 +79,294 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.apache.phoenix.thirdparty.com.google.common.base.Optional; -import org.apache.phoenix.thirdparty.com.google.common.base.Predicate; -import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - public class NonTxIndexBuilderTest extends BaseConnectionlessQueryTest { - private static final String TEST_TABLE_STRING = "TEST_TABLE"; - private static final String TEST_TABLE_DDL = "CREATE TABLE IF NOT EXISTS " + - TEST_TABLE_STRING + " (\n" + - " ORGANIZATION_ID CHAR(4) NOT NULL,\n" + - " ENTITY_ID CHAR(7) NOT NULL,\n" + - " SCORE INTEGER,\n" + - " LAST_UPDATE_TIME TIMESTAMP\n" + - " CONSTRAINT TEST_TABLE_PK PRIMARY KEY (\n" + - " ORGANIZATION_ID,\n" + - " ENTITY_ID\n" + - " )\n" + - ") VERSIONS=1, MULTI_TENANT=TRUE"; - private static final String TEST_TABLE_INDEX_STRING = "TEST_TABLE_SCORE"; - private static final String TEST_TABLE_INDEX_DDL = "CREATE INDEX IF NOT EXISTS " + - TEST_TABLE_INDEX_STRING - + " ON " + TEST_TABLE_STRING + " (SCORE DESC, ENTITY_ID DESC)"; - private static final byte[] ROW = Bytes.toBytes("org1entity1"); //length 4 + 7 (see ddl) - private static final String FAM_STRING = QueryConstants.DEFAULT_COLUMN_FAMILY; - private static final byte[] FAM = Bytes.toBytes(FAM_STRING); - private static final byte[] INDEXED_QUALIFIER = Bytes.toBytes("SCORE"); - private static final byte[] VALUE_1 = Bytes.toBytes(111); - private static final byte[] VALUE_2 = Bytes.toBytes(222); - private static final byte[] VALUE_3 = Bytes.toBytes(333); - private static final byte[] VALUE_4 = Bytes.toBytes(444); - - private NonTxIndexBuilder indexBuilder; - private PhoenixIndexMetaData mockIndexMetaData; - // Put your current row state in here - the index builder will read from this in LocalTable - // to determine whether the index has changed. - // Whatever we return here should match the table DDL (e.g. length of column value) - private List currentRowCells; - - /** - * Test setup so that {@link NonTxIndexBuilder#getIndexUpdate(Mutation, IndexMetaData)} can be - * called, where any read requests to - * {@link LocalTable#getCurrentRowState(Mutation, Collection, boolean)} are read from our test - * field 'currentRowCells' - */ - @Before - public void setup() throws Exception { - RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); - Configuration conf = new Configuration(false); - conf.set(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); - Mockito.when(env.getConfiguration()).thenReturn(conf); - - // the following is used by LocalTable#getCurrentRowState() - Region mockRegion = Mockito.mock(Region.class); - Mockito.when(env.getRegion()).thenReturn(mockRegion); - - Mockito.when(mockRegion.getScanner(Mockito.any(Scan.class))) - .thenAnswer(new Answer() { - @Override - public RegionScanner answer(InvocationOnMock invocation) throws Throwable { - Scan sArg = (Scan) invocation.getArguments()[0]; - TimeRange timeRange = sArg.getTimeRange(); - return getMockTimeRangeRegionScanner(timeRange); - } - }); - - // the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes() - RegionInfo mockRegionInfo = Mockito.mock(RegionInfo.class); - Mockito.when(env.getRegionInfo()).thenReturn(mockRegionInfo); - Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo); - Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a")); - Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z")); - Mockito.when(mockRegionInfo.getTable()).thenReturn(TableName.valueOf(TEST_TABLE_STRING)); - - mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class); - Mockito.when(mockIndexMetaData.requiresPriorRowState((Mutation)Mockito.any())).thenReturn(true); - Mockito.when(mockIndexMetaData.getReplayWrite()).thenReturn(null); - Mockito.when(mockIndexMetaData.getIndexMaintainers()) - .thenReturn(Collections.singletonList(getTestIndexMaintainer())); - - indexBuilder = new NonTxIndexBuilder(); - indexBuilder.setup(env); - } - - // returns a RegionScanner which filters currentRowCells using the given TimeRange. - // This is called from LocalTable#getCurrentRowState() - // If testIndexMetaData.ignoreNewerMutations() is not set, default TimeRange is 0 to - // Long.MAX_VALUE - private RegionScanner getMockTimeRangeRegionScanner(final TimeRange timeRange) { - return new BaseRegionScanner(Mockito.mock(RegionScanner.class)) { - @Override - public boolean next(List results) throws IOException { - for (Cell cell : currentRowCells) { - if (cell.getTimestamp() >= timeRange.getMin() - && cell.getTimestamp() < timeRange.getMax()) { - results.add(cell); - } - } - return false; // indicate no more results - } - - public boolean next(List result, ScannerContext scannerContext) throws IOException { - return next(result); - } - }; - } - - private IndexMaintainer getTestIndexMaintainer() throws Exception { - Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); - // disable column encoding, makes debugging easier - props.put(QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, "0"); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - conn.setAutoCommit(true); - conn.createStatement().execute(TEST_TABLE_DDL); - conn.createStatement().execute(TEST_TABLE_INDEX_DDL); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TEST_TABLE_STRING)); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - table.getIndexMaintainers(ptr, pconn); - List indexMaintainerList = - IndexMaintainer.deserialize(ptr, GenericKeyValueBuilder.INSTANCE, true); - assertEquals(1, indexMaintainerList.size()); - IndexMaintainer indexMaintainer = indexMaintainerList.get(0); - return indexMaintainer; - } finally { - conn.close(); + private static final String TEST_TABLE_STRING = "TEST_TABLE"; + private static final String TEST_TABLE_DDL = "CREATE TABLE IF NOT EXISTS " + TEST_TABLE_STRING + + " (\n" + " ORGANIZATION_ID CHAR(4) NOT NULL,\n" + " ENTITY_ID CHAR(7) NOT NULL,\n" + + " SCORE INTEGER,\n" + " LAST_UPDATE_TIME TIMESTAMP\n" + + " CONSTRAINT TEST_TABLE_PK PRIMARY KEY (\n" + " ORGANIZATION_ID,\n" + + " ENTITY_ID\n" + " )\n" + ") VERSIONS=1, MULTI_TENANT=TRUE"; + private static final String TEST_TABLE_INDEX_STRING = "TEST_TABLE_SCORE"; + private static final String TEST_TABLE_INDEX_DDL = "CREATE INDEX IF NOT EXISTS " + + TEST_TABLE_INDEX_STRING + " ON " + TEST_TABLE_STRING + " (SCORE DESC, ENTITY_ID DESC)"; + private static final byte[] ROW = Bytes.toBytes("org1entity1"); // length 4 + 7 (see ddl) + private static final String FAM_STRING = QueryConstants.DEFAULT_COLUMN_FAMILY; + private static final byte[] FAM = Bytes.toBytes(FAM_STRING); + private static final byte[] INDEXED_QUALIFIER = Bytes.toBytes("SCORE"); + private static final byte[] VALUE_1 = Bytes.toBytes(111); + private static final byte[] VALUE_2 = Bytes.toBytes(222); + private static final byte[] VALUE_3 = Bytes.toBytes(333); + private static final byte[] VALUE_4 = Bytes.toBytes(444); + + private NonTxIndexBuilder indexBuilder; + private PhoenixIndexMetaData mockIndexMetaData; + // Put your current row state in here - the index builder will read from this in LocalTable + // to determine whether the index has changed. + // Whatever we return here should match the table DDL (e.g. length of column value) + private List currentRowCells; + + /** + * Test setup so that {@link NonTxIndexBuilder#getIndexUpdate(Mutation, IndexMetaData)} can be + * called, where any read requests to + * {@link LocalTable#getCurrentRowState(Mutation, Collection, boolean)} are read from our test + * field 'currentRowCells' + */ + @Before + public void setup() throws Exception { + RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); + Configuration conf = new Configuration(false); + conf.set(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); + Mockito.when(env.getConfiguration()).thenReturn(conf); + + // the following is used by LocalTable#getCurrentRowState() + Region mockRegion = Mockito.mock(Region.class); + Mockito.when(env.getRegion()).thenReturn(mockRegion); + + Mockito.when(mockRegion.getScanner(Mockito.any(Scan.class))) + .thenAnswer(new Answer() { + @Override + public RegionScanner answer(InvocationOnMock invocation) throws Throwable { + Scan sArg = (Scan) invocation.getArguments()[0]; + TimeRange timeRange = sArg.getTimeRange(); + return getMockTimeRangeRegionScanner(timeRange); } - } - - /** - * Tests that updating an indexed column results in a DeleteFamily (prior index cell) and a Put - * (new index cell) - */ - @Test - public void testGetMutableIndexUpdate() throws IOException { - setCurrentRowState(FAM, INDEXED_QUALIFIER, 1, VALUE_1); - - // update ts and value - Put put = new Put(ROW); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(FAM) - .setType(Cell.Type.Put) - .setQualifier(INDEXED_QUALIFIER) - .setTimestamp(2).setValue(VALUE_2).build()); - MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW)); - mutation.addAll(put); - - CachedLocalTable cachedLocalTable = CachedLocalTable.build( - Collections.singletonList(mutation), - this.mockIndexMetaData, - this.indexBuilder.getEnv().getRegion()); - - Collection> indexUpdates = - indexBuilder.getIndexUpdate(mutation, mockIndexMetaData, cachedLocalTable); - assertEquals(2, indexUpdates.size()); - assertContains(indexUpdates, 2, ROW, Cell.Type.DeleteFamily, FAM, - new byte[0] /* qual not needed */, 2); - assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, - Cell.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 2); - } - - /** - * Tests a partial rebuild of a row with multiple versions. 3 versions of the row in data table, - * and we rebuild the index starting from time t=2 - * - * There should be one index row version per data row version. - */ - @Test - public void testRebuildMultipleVersionRow() throws IOException { - // when doing a rebuild, we are replaying mutations so we want to ignore newer mutations - // see LocalTable#getCurrentRowState() - Mockito.when(mockIndexMetaData.getReplayWrite()).thenReturn(ReplayWrite.INDEX_ONLY); - - // the current row state has 3 versions, but if we rebuild as of t=2, scanner in LocalTable - // should only return first - Cell currentCell1 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 1, VALUE_1); - Cell currentCell2 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 2, VALUE_2); - Cell currentCell3 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 3, VALUE_3); - Cell currentCell4 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 4, VALUE_4); - setCurrentRowState(Arrays.asList(currentCell4, currentCell3, currentCell2, currentCell1)); - - // rebuilder replays mutations starting from t=2 - MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW)); - Put put = new Put(ROW); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(FAM) - .setType(Cell.Type.Put) - .setQualifier(INDEXED_QUALIFIER) - .setTimestamp(4).setValue(VALUE_4).build()); - mutation.addAll(put); - put = new Put(ROW); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(FAM) - .setType(Cell.Type.Put) - .setQualifier(INDEXED_QUALIFIER) - .setTimestamp(3).setValue(VALUE_3).build()); - mutation.addAll(put); - put = new Put(ROW); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(FAM) - .setType(Cell.Type.Put) - .setQualifier(INDEXED_QUALIFIER) - .setTimestamp(2).setValue(VALUE_2).build()); - mutation.addAll(put); - - Collection> indexUpdates = Lists.newArrayList(); - Collection mutations = - IndexManagementUtil.flattenMutationsByTimestamp(Collections.singletonList(mutation)); - - CachedLocalTable cachedLocalTable = CachedLocalTable.build( - mutations, - this.mockIndexMetaData, - this.indexBuilder.getEnv().getRegion()); - - for (Mutation m : mutations) { - indexUpdates.addAll(indexBuilder.getIndexUpdate(m, mockIndexMetaData, cachedLocalTable)); - } - // 3 puts and 3 deletes (one to hide existing index row for VALUE_1, and two to hide index - // rows for VALUE_2, VALUE_3) - assertEquals(6, indexUpdates.size()); - - assertContains(indexUpdates, 2, ROW, Cell.Type.DeleteFamily, FAM, - new byte[0] /* qual not needed */, 2); - assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, - Cell.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 2); - assertContains(indexUpdates, 3, ROW, Cell.Type.DeleteFamily, FAM, - new byte[0] /* qual not needed */, 3); - assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, - Cell.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 3); - assertContains(indexUpdates, 4, ROW, Cell.Type.DeleteFamily, FAM, - new byte[0] /* qual not needed */, 4); - assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, - Cell.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 4); - } - - /** - * Tests getting an index update for a mutation with 200 versions Before, the issue PHOENIX-3807 - * was causing this test to take >90 seconds, so here we set a timeout of 5 seconds - */ - @Test(timeout = 10000) - public void testManyVersions() throws IOException { - // when doing a rebuild, we are replaying mutations so we want to ignore newer mutations - // see LocalTable#getCurrentRowState() - Mockito.when(mockIndexMetaData.getReplayWrite()).thenReturn(ReplayWrite.INDEX_ONLY); - MultiMutation mutation = getMultipleVersionMutation(200); - currentRowCells = mutation.getFamilyCellMap().get(FAM); - - Collection mutations = - IndexManagementUtil.flattenMutationsByTimestamp(Collections.singletonList(mutation)); - - CachedLocalTable cachedLocalTable = CachedLocalTable.build( - mutations, - this.mockIndexMetaData, - this.indexBuilder.getEnv().getRegion()); - - Collection> indexUpdates = Lists.newArrayList(); - for (Mutation m : IndexManagementUtil.flattenMutationsByTimestamp(Collections.singletonList(mutation))) { - indexUpdates.addAll(indexBuilder.getIndexUpdate(m, mockIndexMetaData, cachedLocalTable)); + }); + + // the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes() + RegionInfo mockRegionInfo = Mockito.mock(RegionInfo.class); + Mockito.when(env.getRegionInfo()).thenReturn(mockRegionInfo); + Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo); + Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a")); + Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z")); + Mockito.when(mockRegionInfo.getTable()).thenReturn(TableName.valueOf(TEST_TABLE_STRING)); + + mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class); + Mockito.when(mockIndexMetaData.requiresPriorRowState((Mutation) Mockito.any())) + .thenReturn(true); + Mockito.when(mockIndexMetaData.getReplayWrite()).thenReturn(null); + Mockito.when(mockIndexMetaData.getIndexMaintainers()) + .thenReturn(Collections.singletonList(getTestIndexMaintainer())); + + indexBuilder = new NonTxIndexBuilder(); + indexBuilder.setup(env); + } + + // returns a RegionScanner which filters currentRowCells using the given TimeRange. + // This is called from LocalTable#getCurrentRowState() + // If testIndexMetaData.ignoreNewerMutations() is not set, default TimeRange is 0 to + // Long.MAX_VALUE + private RegionScanner getMockTimeRangeRegionScanner(final TimeRange timeRange) { + return new BaseRegionScanner(Mockito.mock(RegionScanner.class)) { + @Override + public boolean next(List results) throws IOException { + for (Cell cell : currentRowCells) { + if ( + cell.getTimestamp() >= timeRange.getMin() && cell.getTimestamp() < timeRange.getMax() + ) { + results.add(cell); + } } - assertNotEquals(0, indexUpdates.size()); - } - - // Assert that the given collection of indexUpdates contains the given cell - private void assertContains(Collection> indexUpdates, - final long mutationTs, final byte[] row, final Cell.Type cellType, final byte[] fam, - final byte[] qual, final long cellTs) { - Predicate> hasCellPredicate = - new Predicate>() { - @Override - public boolean apply(Pair input) { - assertEquals(TEST_TABLE_INDEX_STRING, Bytes.toString(input.getSecond())); - Mutation mutation = input.getFirst(); - if (mutationTs == mutation.getTimestamp()) { - NavigableMap> familyCellMap = - mutation.getFamilyCellMap(); - Cell updateCell = familyCellMap.get(fam).get(0); - if (cellType == updateCell.getType() - && Bytes.compareTo(fam, CellUtil.cloneFamily(updateCell)) == 0 - && Bytes.compareTo(qual, - CellUtil.cloneQualifier(updateCell)) == 0 - && cellTs == updateCell.getTimestamp()) { - return true; - } - } - return false; - } - }; - Optional> tryFind = - Iterables.tryFind(indexUpdates, hasCellPredicate); - assertTrue(tryFind.isPresent()); + return false; // indicate no more results + } + + public boolean next(List result, ScannerContext scannerContext) throws IOException { + return next(result); + } + }; + } + + private IndexMaintainer getTestIndexMaintainer() throws Exception { + Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); + // disable column encoding, makes debugging easier + props.put(QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, "0"); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + conn.setAutoCommit(true); + conn.createStatement().execute(TEST_TABLE_DDL); + conn.createStatement().execute(TEST_TABLE_INDEX_DDL); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TEST_TABLE_STRING)); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + table.getIndexMaintainers(ptr, pconn); + List indexMaintainerList = + IndexMaintainer.deserialize(ptr, GenericKeyValueBuilder.INSTANCE, true); + assertEquals(1, indexMaintainerList.size()); + IndexMaintainer indexMaintainer = indexMaintainerList.get(0); + return indexMaintainer; + } finally { + conn.close(); } - - private void setCurrentRowState(byte[] fam2, byte[] indexedQualifier, int i, byte[] value1) { - Cell cell = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 1, VALUE_1); - currentRowCells = Collections.singletonList(cell); + } + + /** + * Tests that updating an indexed column results in a DeleteFamily (prior index cell) and a Put + * (new index cell) + */ + @Test + public void testGetMutableIndexUpdate() throws IOException { + setCurrentRowState(FAM, INDEXED_QUALIFIER, 1, VALUE_1); + + // update ts and value + Put put = new Put(ROW); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(FAM).setType(Cell.Type.Put).setQualifier(INDEXED_QUALIFIER).setTimestamp(2) + .setValue(VALUE_2).build()); + MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW)); + mutation.addAll(put); + + CachedLocalTable cachedLocalTable = CachedLocalTable.build(Collections.singletonList(mutation), + this.mockIndexMetaData, this.indexBuilder.getEnv().getRegion()); + + Collection> indexUpdates = + indexBuilder.getIndexUpdate(mutation, mockIndexMetaData, cachedLocalTable); + assertEquals(2, indexUpdates.size()); + assertContains(indexUpdates, 2, ROW, Cell.Type.DeleteFamily, FAM, + new byte[0] /* qual not needed */, 2); + assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, + Cell.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 2); + } + + /** + * Tests a partial rebuild of a row with multiple versions. 3 versions of the row in data table, + * and we rebuild the index starting from time t=2 There should be one index row version per data + * row version. + */ + @Test + public void testRebuildMultipleVersionRow() throws IOException { + // when doing a rebuild, we are replaying mutations so we want to ignore newer mutations + // see LocalTable#getCurrentRowState() + Mockito.when(mockIndexMetaData.getReplayWrite()).thenReturn(ReplayWrite.INDEX_ONLY); + + // the current row state has 3 versions, but if we rebuild as of t=2, scanner in LocalTable + // should only return first + Cell currentCell1 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 1, VALUE_1); + Cell currentCell2 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 2, VALUE_2); + Cell currentCell3 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 3, VALUE_3); + Cell currentCell4 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 4, VALUE_4); + setCurrentRowState(Arrays.asList(currentCell4, currentCell3, currentCell2, currentCell1)); + + // rebuilder replays mutations starting from t=2 + MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW)); + Put put = new Put(ROW); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(FAM).setType(Cell.Type.Put).setQualifier(INDEXED_QUALIFIER).setTimestamp(4) + .setValue(VALUE_4).build()); + mutation.addAll(put); + put = new Put(ROW); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(FAM).setType(Cell.Type.Put).setQualifier(INDEXED_QUALIFIER).setTimestamp(3) + .setValue(VALUE_3).build()); + mutation.addAll(put); + put = new Put(ROW); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(FAM).setType(Cell.Type.Put).setQualifier(INDEXED_QUALIFIER).setTimestamp(2) + .setValue(VALUE_2).build()); + mutation.addAll(put); + + Collection> indexUpdates = Lists.newArrayList(); + Collection mutations = + IndexManagementUtil.flattenMutationsByTimestamp(Collections.singletonList(mutation)); + + CachedLocalTable cachedLocalTable = CachedLocalTable.build(mutations, this.mockIndexMetaData, + this.indexBuilder.getEnv().getRegion()); + + for (Mutation m : mutations) { + indexUpdates.addAll(indexBuilder.getIndexUpdate(m, mockIndexMetaData, cachedLocalTable)); } - - private void setCurrentRowState(List cells) { - currentRowCells = cells; + // 3 puts and 3 deletes (one to hide existing index row for VALUE_1, and two to hide index + // rows for VALUE_2, VALUE_3) + assertEquals(6, indexUpdates.size()); + + assertContains(indexUpdates, 2, ROW, Cell.Type.DeleteFamily, FAM, + new byte[0] /* qual not needed */, 2); + assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, + Cell.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 2); + assertContains(indexUpdates, 3, ROW, Cell.Type.DeleteFamily, FAM, + new byte[0] /* qual not needed */, 3); + assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, + Cell.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 3); + assertContains(indexUpdates, 4, ROW, Cell.Type.DeleteFamily, FAM, + new byte[0] /* qual not needed */, 4); + assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, + Cell.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 4); + } + + /** + * Tests getting an index update for a mutation with 200 versions Before, the issue PHOENIX-3807 + * was causing this test to take >90 seconds, so here we set a timeout of 5 seconds + */ + @Test(timeout = 10000) + public void testManyVersions() throws IOException { + // when doing a rebuild, we are replaying mutations so we want to ignore newer mutations + // see LocalTable#getCurrentRowState() + Mockito.when(mockIndexMetaData.getReplayWrite()).thenReturn(ReplayWrite.INDEX_ONLY); + MultiMutation mutation = getMultipleVersionMutation(200); + currentRowCells = mutation.getFamilyCellMap().get(FAM); + + Collection mutations = + IndexManagementUtil.flattenMutationsByTimestamp(Collections.singletonList(mutation)); + + CachedLocalTable cachedLocalTable = CachedLocalTable.build(mutations, this.mockIndexMetaData, + this.indexBuilder.getEnv().getRegion()); + + Collection> indexUpdates = Lists.newArrayList(); + for (Mutation m : IndexManagementUtil + .flattenMutationsByTimestamp(Collections.singletonList(mutation))) { + indexUpdates.addAll(indexBuilder.getIndexUpdate(m, mockIndexMetaData, cachedLocalTable)); } - - private MultiMutation getMultipleVersionMutation(int versions) { - MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW)); - for (int i = versions - 1; i >= 0; i--) { - Put put = new Put(ROW); - try { - put.add(CellBuilderFactory - .create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(FAM) - .setType(Cell.Type.Put) - .setQualifier(INDEXED_QUALIFIER) - .setTimestamp(i) - .setValue(Bytes.toBytes(i)).build()); - } catch (IOException e) { - - } - mutation.addAll(put); + assertNotEquals(0, indexUpdates.size()); + } + + // Assert that the given collection of indexUpdates contains the given cell + private void assertContains(Collection> indexUpdates, + final long mutationTs, final byte[] row, final Cell.Type cellType, final byte[] fam, + final byte[] qual, final long cellTs) { + Predicate> hasCellPredicate = new Predicate>() { + @Override + public boolean apply(Pair input) { + assertEquals(TEST_TABLE_INDEX_STRING, Bytes.toString(input.getSecond())); + Mutation mutation = input.getFirst(); + if (mutationTs == mutation.getTimestamp()) { + NavigableMap> familyCellMap = mutation.getFamilyCellMap(); + Cell updateCell = familyCellMap.get(fam).get(0); + if ( + cellType == updateCell.getType() + && Bytes.compareTo(fam, CellUtil.cloneFamily(updateCell)) == 0 + && Bytes.compareTo(qual, CellUtil.cloneQualifier(updateCell)) == 0 + && cellTs == updateCell.getTimestamp() + ) { + return true; + } } - return mutation; + return false; + } + }; + Optional> tryFind = Iterables.tryFind(indexUpdates, hasCellPredicate); + assertTrue(tryFind.isPresent()); + } + + private void setCurrentRowState(byte[] fam2, byte[] indexedQualifier, int i, byte[] value1) { + Cell cell = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 1, VALUE_1); + currentRowCells = Collections.singletonList(cell); + } + + private void setCurrentRowState(List cells) { + currentRowCells = cells; + } + + private MultiMutation getMultipleVersionMutation(int versions) { + MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW)); + for (int i = versions - 1; i >= 0; i--) { + Put put = new Put(ROW); + try { + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(FAM).setType(Cell.Type.Put).setQualifier(INDEXED_QUALIFIER).setTimestamp(i) + .setValue(Bytes.toBytes(i)).build()); + } catch (IOException e) { + + } + mutation.addAll(put); } + return mutation; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestColumnTracker.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestColumnTracker.java index 7c5de5ff976..b3639c259b0 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestColumnTracker.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestColumnTracker.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,10 +24,9 @@ import java.util.ArrayList; import java.util.Collection; -import org.junit.Test; - import org.apache.phoenix.hbase.index.covered.update.ColumnReference; import org.apache.phoenix.hbase.index.covered.update.ColumnTracker; +import org.junit.Test; public class TestColumnTracker { @@ -58,4 +57,4 @@ public void testHasNewerTimestamps() throws Exception { tracker.setTs(10); assertTrue("Tracker doesn't have newer timetamps with set ts", tracker.hasNewerTimestamps()); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java index 3c0cf027af4..696a84b848e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredColumnIndexCodec.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,21 +41,20 @@ import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexCodec.ColumnEntry; import org.apache.phoenix.hbase.index.covered.data.LocalHBaseState; import org.apache.phoenix.hbase.index.covered.update.ColumnReference; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Test; import org.mockito.Mockito; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - public class TestCoveredColumnIndexCodec { private static final byte[] PK = new byte[] { 'a' }; private static final String FAMILY_STRING = "family"; private static final byte[] FAMILY = Bytes.toBytes(FAMILY_STRING); private static final byte[] QUAL = Bytes.toBytes("qual"); private static final CoveredColumn COLUMN_REF = new CoveredColumn(FAMILY_STRING, QUAL); - private static final byte[] EMPTY_INDEX_KEY = CoveredColumnIndexCodec.composeRowKey(PK, 0, - Arrays.asList(toColumnEntry(new byte[0]))); - private static final byte[] BLANK_INDEX_KEY = CoveredColumnIndexCodec.composeRowKey(PK, 0, - Collections. emptyList()); + private static final byte[] EMPTY_INDEX_KEY = + CoveredColumnIndexCodec.composeRowKey(PK, 0, Arrays.asList(toColumnEntry(new byte[0]))); + private static final byte[] BLANK_INDEX_KEY = + CoveredColumnIndexCodec.composeRowKey(PK, 0, Collections. emptyList()); private static ColumnEntry toColumnEntry(byte[] bytes) { return new ColumnEntry(bytes, COLUMN_REF); @@ -63,21 +62,20 @@ private static ColumnEntry toColumnEntry(byte[] bytes) { /** * Convert between an index and a bunch of values - * @throws Exception */ @Test public void toFromIndexKey() throws Exception { // start with empty values byte[] indexKey = BLANK_INDEX_KEY; List stored = CoveredColumnIndexCodec.getValues(indexKey); - assertEquals("Found some stored values in an index row key that wasn't created with values!", - 0, stored.size()); + assertEquals("Found some stored values in an index row key that wasn't created with values!", 0, + stored.size()); // a single, empty value indexKey = EMPTY_INDEX_KEY; stored = CoveredColumnIndexCodec.getValues(indexKey); - assertEquals("Found some stored values in an index row key that wasn't created with values!", - 1, stored.size()); + assertEquals("Found some stored values in an index row key that wasn't created with values!", 1, + stored.size()); assertEquals("Found a non-zero length value: " + Bytes.toString(stored.get(0)), 0, stored.get(0).length); @@ -86,9 +84,8 @@ public void toFromIndexKey() throws Exception { byte[] v2 = new byte[] { 'b' }; byte[] v3 = Bytes.toBytes("v3"); int len = v1.length + v2.length + v3.length; - indexKey = - CoveredColumnIndexCodec.composeRowKey(PK, len, - Arrays.asList(toColumnEntry(v1), toColumnEntry(v2), toColumnEntry(v3))); + indexKey = CoveredColumnIndexCodec.composeRowKey(PK, len, + Arrays.asList(toColumnEntry(v1), toColumnEntry(v2), toColumnEntry(v3))); stored = CoveredColumnIndexCodec.getValues(indexKey); assertEquals("Didn't find expected number of values in index key!", 3, stored.size()); assertTrue("First index keys don't match!", Bytes.equals(v1, stored.get(0))); @@ -106,21 +103,18 @@ public void testCheckRowKeyForAllNulls() { byte[] result = EMPTY_INDEX_KEY; assertTrue("Didn't correctly read single element as being null in row key", CoveredColumnIndexCodec.checkRowKeyForAllNulls(result)); - result = - CoveredColumnIndexCodec.composeRowKey(pk, 0, - Lists.newArrayList(toColumnEntry(new byte[0]), toColumnEntry(new byte[0]))); + result = CoveredColumnIndexCodec.composeRowKey(pk, 0, + Lists.newArrayList(toColumnEntry(new byte[0]), toColumnEntry(new byte[0]))); assertTrue("Didn't correctly read two elements as being null in row key", CoveredColumnIndexCodec.checkRowKeyForAllNulls(result)); // check cases where it isn't null - result = - CoveredColumnIndexCodec.composeRowKey(pk, 2, - Arrays.asList(toColumnEntry(new byte[] { 1, 2 }))); + result = CoveredColumnIndexCodec.composeRowKey(pk, 2, + Arrays.asList(toColumnEntry(new byte[] { 1, 2 }))); assertFalse("Found a null key, when it wasn't!", CoveredColumnIndexCodec.checkRowKeyForAllNulls(result)); - result = - CoveredColumnIndexCodec.composeRowKey(pk, 2, - Arrays.asList(toColumnEntry(new byte[] { 1, 2 }), toColumnEntry(new byte[0]))); + result = CoveredColumnIndexCodec.composeRowKey(pk, 2, + Arrays.asList(toColumnEntry(new byte[] { 1, 2 }), toColumnEntry(new byte[0]))); assertFalse("Found a null key, when it wasn't!", CoveredColumnIndexCodec.checkRowKeyForAllNulls(result)); } @@ -134,8 +128,8 @@ public SimpleTableState(Result r) { } @Override - public List getCurrentRowState(Mutation m, Collection toCover, boolean preMutationStateOnly) - throws IOException { + public List getCurrentRowState(Mutation m, Collection toCover, + boolean preMutationStateOnly) throws IOException { return r.listCells(); } @@ -151,7 +145,7 @@ public void testGeneratedIndexUpdates() throws Exception { group.add(COLUMN_REF); final Result emptyState = Result.create(Collections. emptyList()); - + // setup the state we expect for the codec RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); Configuration conf = new Configuration(false); @@ -160,7 +154,7 @@ public void testGeneratedIndexUpdates() throws Exception { // make a new codec on those kvs CoveredColumnIndexCodec codec = - CoveredColumnIndexCodec.getCodecForTesting(Arrays.asList(group)); + CoveredColumnIndexCodec.getCodecForTesting(Arrays.asList(group)); // start with a basic put that has some keyvalues Put p = new Put(PK); @@ -177,22 +171,24 @@ public void testGeneratedIndexUpdates() throws Exception { // check the codec for deletes it should send LocalTableState state = new LocalTableState(table, p); - Iterable updates = codec.getIndexDeletes(state, IndexMetaData.NULL_INDEX_META_DATA, null, null); - assertFalse("Found index updates without any existing kvs in table!", updates.iterator().next() - .isValid()); + Iterable updates = + codec.getIndexDeletes(state, IndexMetaData.NULL_INDEX_META_DATA, null, null); + assertFalse("Found index updates without any existing kvs in table!", + updates.iterator().next().isValid()); // get the updates with the pending update state.setCurrentTimestamp(1); state.addPendingUpdates(kvs); updates = codec.getIndexUpserts(state, IndexMetaData.NULL_INDEX_META_DATA, null, null, false); - assertTrue("Didn't find index updates for pending primary table update!", updates.iterator() - .hasNext()); + assertTrue("Didn't find index updates for pending primary table update!", + updates.iterator().hasNext()); for (IndexUpdate update : updates) { - assertTrue("Update marked as invalid, but should be a pending index write!", update.isValid()); + assertTrue("Update marked as invalid, but should be a pending index write!", + update.isValid()); Put m = (Put) update.getUpdate(); // should just be the single update for the column reference byte[] expected = - CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1))); + CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1))); assertArrayEquals("Didn't get expected index value", expected, m.getRow()); } @@ -214,7 +210,7 @@ public void testGeneratedIndexUpdates() throws Exception { Delete m = (Delete) update.getUpdate(); // should just be the single update for the column reference byte[] expected = - CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1))); + CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1))); assertArrayEquals("Didn't get expected index value", expected, m.getRow()); } ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d); @@ -230,14 +226,15 @@ public void testGeneratedIndexUpdates() throws Exception { ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d); } - private void ensureNoUpdatesWhenCoveredByDelete(RegionCoprocessorEnvironment env, IndexCodec codec, List currentState, - Delete d) throws IOException { + private void ensureNoUpdatesWhenCoveredByDelete(RegionCoprocessorEnvironment env, + IndexCodec codec, List currentState, Delete d) throws IOException { LocalHBaseState table = new SimpleTableState(Result.create(currentState)); LocalTableState state = new LocalTableState(table, d); state.setCurrentTimestamp(d.getTimestamp()); // now we shouldn't see anything when getting the index update state.addPendingUpdates(d.getFamilyCellMap().get(FAMILY)); - Iterable updates = codec.getIndexUpserts(state, IndexMetaData.NULL_INDEX_META_DATA, null, null, false); + Iterable updates = + codec.getIndexUpserts(state, IndexMetaData.NULL_INDEX_META_DATA, null, null, false); for (IndexUpdate update : updates) { assertFalse("Had some index updates, though it should have been covered by the delete", update.isValid()); diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredIndexSpecifierBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredIndexSpecifierBuilder.java index d8f3ea55d3c..63a621c7aee 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredIndexSpecifierBuilder.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestCoveredIndexSpecifierBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,12 +33,11 @@ public class TestCoveredIndexSpecifierBuilder { private static final String INDEX_TABLE = "INDEX_TABLE"; private static final String INDEX_TABLE2 = "INDEX_TABLE2"; - @Test public void testSimpleSerialziationDeserialization() throws Exception { byte[] indexed_qualifer = Bytes.toBytes("indexed_qual"); - //setup the index + // setup the index CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE); // match a single family:qualifier pair @@ -53,20 +52,23 @@ public void testSimpleSerialziationDeserialization() throws Exception { CoveredColumn col3 = new CoveredColumn(FAMILY2, indexed_qualifer); fam2.add(col3); builder.addIndexGroup(fam2); - + Configuration conf = new Configuration(false); - //convert the map that HTableDescriptor gets into the conf the coprocessor receives + // convert the map that HTableDescriptor gets into the conf the coprocessor receives Map map = builder.convertToMap(); - for(Entry entry: map.entrySet()){ + for (Entry entry : map.entrySet()) { conf.set(entry.getKey(), entry.getValue()); } List columns = CoveredColumnIndexSpecifierBuilder.getColumns(conf); assertEquals("Didn't deserialize the expected number of column groups", 2, columns.size()); ColumnGroup group = columns.get(0); - assertEquals("Didn't deserialize expected column in first group", col1, group.getColumnForTesting(0)); - assertEquals("Didn't deserialize expected column in first group", col2, group.getColumnForTesting(1)); + assertEquals("Didn't deserialize expected column in first group", col1, + group.getColumnForTesting(0)); + assertEquals("Didn't deserialize expected column in first group", col2, + group.getColumnForTesting(1)); group = columns.get(1); - assertEquals("Didn't deserialize expected column in second group", col3, group.getColumnForTesting(0)); + assertEquals("Didn't deserialize expected column in second group", col3, + group.getColumnForTesting(0)); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java index e40cdd7f3c4..291fdef3a1d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,11 +39,11 @@ public class TestIndexMemStore { @Test public void testCorrectOverwritting() throws Exception { - IndexMemStore store = new IndexMemStore(new DelegateComparator(new CellComparatorImpl()){ - @Override - public int compare(Cell leftCell, Cell rightCell) { - return super.compare(leftCell, rightCell, true); - } + IndexMemStore store = new IndexMemStore(new DelegateComparator(new CellComparatorImpl()) { + @Override + public int compare(Cell leftCell, Cell rightCell) { + return super.compare(leftCell, rightCell, true); + } }); long ts = 10; KeyValue kv = new KeyValue(row, family, qual, ts, Type.Put, val); @@ -70,7 +70,6 @@ public int compare(Cell leftCell, Cell rightCell) { /** * We don't expect custom KeyValue creation, so we can't get into weird situations, where a * {@link Type#DeleteFamily} has a column qualifier specified. - * @throws Exception */ @Test public void testExpectedOrdering() throws Exception { @@ -98,4 +97,4 @@ public void testExpectedOrdering() throws Exception { assertTrue("Didn't get smaller ts Put", kv2 == scanner.next()); assertNull("Have more data in the scanner", scanner.next()); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java index 3d97906213c..ae3f3e353b7 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestApplyAndFilterDeletesFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,8 +28,8 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; @@ -40,8 +40,8 @@ */ public class TestApplyAndFilterDeletesFilter { - private static final Set EMPTY_SET = Collections - . emptySet(); + private static final Set EMPTY_SET = + Collections. emptySet(); private byte[] row = Bytes.toBytes("row"); private byte[] family = Bytes.toBytes("family"); private byte[] qualifier = Bytes.toBytes("qualifier"); @@ -93,7 +93,8 @@ public void testHintCorrectlyToNextFamily() { byte[] laterFamily = Bytes.toBytes("zfamily"); filter = new ApplyAndFilterDeletesFilter(asSet(laterFamily)); assertEquals(ReturnCode.SKIP, filter.filterCell(kv)); - KeyValue expected = KeyValueUtil.createFirstOnRow(CellUtil.cloneRow(kv), laterFamily, new byte[0]); + KeyValue expected = + KeyValueUtil.createFirstOnRow(CellUtil.cloneRow(kv), laterFamily, new byte[0]); assertEquals("Didn't get a hint from a family delete", ReturnCode.SEEK_NEXT_USING_HINT, filter.filterCell(next)); assertEquals("Didn't get correct next key with a next family", expected, @@ -135,7 +136,6 @@ private KeyValue createKvForType(Type t, long timestamp) { /** * Test that when we do a column delete at a given timestamp that we delete the entire column. - * @throws Exception */ @Test public void testCoverForDeleteColumn() throws Exception { @@ -148,7 +148,7 @@ public void testCoverForDeleteColumn() throws Exception { // seek past the given put Cell seek = filter.getNextCellHint(put); assertTrue("Seeked key wasn't past the expected put - didn't skip the column", - CellComparator.getInstance().compare(seek, put) > 0); + CellComparator.getInstance().compare(seek, put) > 0); } /** diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestNewerTimestampFilter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestNewerTimestampFilter.java index b04a5fe459c..3ed0cc947b5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestNewerTimestampFilter.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestNewerTimestampFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.hbase.index.covered.filter.NewerTimestampFilter; import org.junit.Test; public class TestNewerTimestampFilter { @@ -45,4 +44,4 @@ public void testOnlyAllowsOlderTimestamps() { kv = new KeyValue(row, fam, qual, ts - 1, val); assertEquals("Didn't accept kv with lower ts", ReturnCode.INCLUDE, filter.filterCell(kv)); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/update/TestIndexUpdateManager.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/update/TestIndexUpdateManager.java index b542368ac56..172ce17d8f9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/update/TestIndexUpdateManager.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/update/TestIndexUpdateManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -145,4 +145,4 @@ private void validate(IndexUpdateManager manager, List pending) { } assertTrue("Missing pending updates: " + pending, pending.isEmpty()); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestTaskRunner.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestTaskRunner.java index af1e4b80176..e076a63497b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestTaskRunner.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestTaskRunner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,48 +32,47 @@ public class TestTaskRunner { - @Test - public void testWaitForCompletionTaskRunner() throws Exception { - TaskRunner tr = new WaitForCompletionTaskRunner(Executors.newFixedThreadPool(4)); - TaskBatch tasks = new TaskBatch(4); - for (int i = 0; i < 4; i++) { - tasks.add(new EvenNumberFailingTask(i)); - } - Pair, List>> resultAndFutures = - tr.submitUninterruptible(tasks); - List results = resultAndFutures.getFirst(); - List> futures = resultAndFutures.getSecond(); - for (int j = 0; j < 4; j++) { - if (j % 2 == 0) { - assertNull(results.get(j)); - try { - futures.get(j).get(); - fail("Should have received ExecutionException"); - } catch (Exception e) { - assertTrue(e instanceof ExecutionException); - assertTrue(e.getCause().getMessage().equals("Even number task")); - } - } else { - assertTrue(results.get(j)); - } + @Test + public void testWaitForCompletionTaskRunner() throws Exception { + TaskRunner tr = new WaitForCompletionTaskRunner(Executors.newFixedThreadPool(4)); + TaskBatch tasks = new TaskBatch(4); + for (int i = 0; i < 4; i++) { + tasks.add(new EvenNumberFailingTask(i)); + } + Pair, List>> resultAndFutures = tr.submitUninterruptible(tasks); + List results = resultAndFutures.getFirst(); + List> futures = resultAndFutures.getSecond(); + for (int j = 0; j < 4; j++) { + if (j % 2 == 0) { + assertNull(results.get(j)); + try { + futures.get(j).get(); + fail("Should have received ExecutionException"); + } catch (Exception e) { + assertTrue(e instanceof ExecutionException); + assertTrue(e.getCause().getMessage().equals("Even number task")); } + } else { + assertTrue(results.get(j)); + } } + } - private static class EvenNumberFailingTask extends Task { - private int num; - - public EvenNumberFailingTask(int i) { - this.num = i; - } + private static class EvenNumberFailingTask extends Task { + private int num; - @Override - public Boolean call() throws Exception { - if (num % 2 == 0) { - throw new IOException("Even number task"); - } - return Boolean.TRUE; - } + public EvenNumberFailingTask(int i) { + this.num = i; + } + @Override + public Boolean call() throws Exception { + if (num % 2 == 0) { + throw new IOException("Even number task"); + } + return Boolean.TRUE; } + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolBuilder.java index f3a7201a402..da57a28581c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolBuilder.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,7 +32,7 @@ public class TestThreadPoolBuilder { @Test public void testCoreThreadTimeoutNonZero() { Configuration conf = new Configuration(false); - String key = name.getTableNameString()+"-key"; + String key = name.getTableNameString() + "-key"; ThreadPoolBuilder builder = new ThreadPoolBuilder(name.getTableNameString(), conf); assertTrue("core threads not set, but failed return", builder.getKeepAliveTime() > 0); // set an negative value @@ -45,11 +45,11 @@ public void testCoreThreadTimeoutNonZero() { builder.setCoreTimeout(key); assertTrue("core threads not set, but failed return", builder.getKeepAliveTime() > 0); } - + @Test public void testMaxThreadsNonZero() { Configuration conf = new Configuration(false); - String key = name.getTableNameString()+"-key"; + String key = name.getTableNameString() + "-key"; ThreadPoolBuilder builder = new ThreadPoolBuilder(name.getTableNameString(), conf); assertTrue("core threads not set, but failed return", builder.getMaxThreads() > 0); // set an negative value @@ -59,4 +59,4 @@ public void testMaxThreadsNonZero() { builder.setMaxThread(key, 1234); assertEquals("core threads not set, but failed return", 1234, builder.getMaxThreads()); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolManager.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolManager.java index 80602462773..4f999e90c36 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolManager.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,12 +37,13 @@ public class TestThreadPoolManager { public IndexTableName name = new IndexTableName(); @Test - public void testShutdownGetsNewThreadPool() throws Exception{ + public void testShutdownGetsNewThreadPool() throws Exception { Map cache = new HashMap(); - ThreadPoolBuilder builder = new ThreadPoolBuilder(name.getTableNameString(), new Configuration(false)); + ThreadPoolBuilder builder = + new ThreadPoolBuilder(name.getTableNameString(), new Configuration(false)); ThreadPoolExecutor exec = ThreadPoolManager.getExecutor(builder, cache); assertNotNull("Got a null exector from the pool!", exec); - //shutdown the pool and ensure that it actually shutdown + // shutdown the pool and ensure that it actually shutdown exec.shutdown(); ThreadPoolExecutor exec2 = ThreadPoolManager.getExecutor(builder, cache); assertFalse("Got the same exectuor, even though the original shutdown", exec2 == exec); @@ -52,7 +53,7 @@ public void testShutdownGetsNewThreadPool() throws Exception{ public void testShutdownWithReferencesDoesNotStopExecutor() throws Exception { Map cache = new HashMap(); ThreadPoolBuilder builder = - new ThreadPoolBuilder(name.getTableNameString(), new Configuration(false)); + new ThreadPoolBuilder(name.getTableNameString(), new Configuration(false)); ThreadPoolExecutor exec = ThreadPoolManager.getExecutor(builder, cache); assertNotNull("Got a null exector from the pool!", exec); ThreadPoolExecutor exec2 = ThreadPoolManager.getExecutor(builder, cache); @@ -70,23 +71,21 @@ public void testShutdownWithReferencesDoesNotStopExecutor() throws Exception { public void testGetExpectedExecutorForName() throws Exception { Map cache = new HashMap(); ThreadPoolBuilder builder = - new ThreadPoolBuilder(name.getTableNameString(), new Configuration(false)); + new ThreadPoolBuilder(name.getTableNameString(), new Configuration(false)); ThreadPoolExecutor exec = ThreadPoolManager.getExecutor(builder, cache); assertNotNull("Got a null exector from the pool!", exec); ThreadPoolExecutor exec2 = ThreadPoolManager.getExecutor(builder, cache); assertTrue("Got a different exectuor, even though they have the same name", exec2 == exec); builder = new ThreadPoolBuilder(name.getTableNameString(), new Configuration(false)); exec2 = ThreadPoolManager.getExecutor(builder, cache); - assertTrue( - "Got a different exectuor, even though they have the same name, but different confs", + assertTrue("Got a different exectuor, even though they have the same name, but different confs", exec2 == exec); - builder = - new ThreadPoolBuilder(name.getTableNameString() + "-some-other-pool", new Configuration( - false)); + builder = new ThreadPoolBuilder(name.getTableNameString() + "-some-other-pool", + new Configuration(false)); exec2 = ThreadPoolManager.getExecutor(builder, cache); assertFalse( "Got a different exectuor, even though they have the same name, but different confs", exec2 == exec); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java index 7659c40abe7..fdb824cf91a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -48,7 +48,6 @@ public void testUncompressedWal() throws Exception { /** * Compressed WALs are supported when we have the WALEditCodec installed - * @throws Exception */ @Test public void testCompressedWALWithCodec() throws Exception { @@ -61,7 +60,6 @@ public void testCompressedWALWithCodec() throws Exception { /** * We cannot support WAL Compression with the IndexedHLogReader - * @throws Exception */ @Test(expected = IllegalStateException.class) public void testCompressedWALWithHLogReader() throws Exception { @@ -74,9 +72,8 @@ public void testCompressedWALWithHLogReader() throws Exception { /** * Create the specified index table with the necessary columns - * @param admin {@link Admin} to use when creating the table + * @param admin {@link Admin} to use when creating the table * @param indexTable name of the index table. - * @throws IOException */ public static void createIndexTable(Admin admin, String indexTable) throws IOException { createIndexTable(admin, TableDescriptorBuilder.newBuilder(TableName.valueOf(indexTable))); @@ -86,10 +83,11 @@ public static void createIndexTable(Admin admin, String indexTable) throws IOExc * @param admin to create the table * @param index descriptor to update before creating table */ - public static void createIndexTable(Admin admin, TableDescriptorBuilder indexBuilder) throws IOException { - indexBuilder.setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY) - .setKeepDeletedCells(KeepDeletedCells.TRUE).build()); + public static void createIndexTable(Admin admin, TableDescriptorBuilder indexBuilder) + throws IOException { + indexBuilder.setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()); admin.createTable(indexBuilder.build()); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/FakeTableFactory.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/FakeTableFactory.java index 0e18f1f5349..1a3ebcce723 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/FakeTableFactory.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/FakeTableFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.hbase.index.write; import java.io.IOException; @@ -50,14 +49,14 @@ public void shutdown() { shutdown = true; } - @Override - public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException { - return this.tables.get(tablename); - } + @Override + public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException { + return this.tables.get(tablename); + } - @Override - public Connection getConnection() throws IOException { - return null; - } + @Override + public Connection getConnection() throws IOException { + return null; + } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java index d94c7f8b177..eb9223f80f6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; @@ -82,8 +81,8 @@ public void getDefaultFailurePolicy() throws Exception { Region region = Mockito.mock(Region.class); Mockito.when(env.getRegion()).thenReturn(region); Mockito.when(env.getConfiguration()).thenReturn(conf); - Mockito.when(region.getTableDescriptor()).thenReturn( - TableDescriptorBuilder.newBuilder(TableName.valueOf("dummy")).build()); + Mockito.when(region.getTableDescriptor()) + .thenReturn(TableDescriptorBuilder.newBuilder(TableName.valueOf("dummy")).build()); assertNotNull(IndexWriter.getFailurePolicy(env)); } @@ -98,10 +97,10 @@ public void testSynchronouslyCompletesAllWrites() throws Exception { LOGGER.info("Current thread is interrupted: " + Thread.interrupted()); Abortable abort = new StubAbortable(); Stoppable stop = Mockito.mock(Stoppable.class); - RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class); - Configuration conf =new Configuration(); + RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class); + Configuration conf = new Configuration(); Mockito.when(e.getConfiguration()).thenReturn(conf); - Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); + Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); Region mockRegion = Mockito.mock(Region.class); Mockito.when(e.getRegion()).thenReturn(mockRegion); TableDescriptor mockTableDesc = Mockito.mock(TableDescriptor.class); @@ -117,25 +116,26 @@ public void testSynchronouslyCompletesAllWrites() throws Exception { byte[] tableName = this.testName.getTableName(); Put m = new Put(row); m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); - Collection> indexUpdates = Arrays.asList(new Pair(m, - tableName)); + Collection> indexUpdates = + Arrays.asList(new Pair(m, tableName)); Table table = Mockito.mock(Table.class); final boolean[] completed = new boolean[] { false }; - Mockito.doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - // just keep track that it was called - completed[0] = true; - return null; - } - }).when(table).batch(Mockito.anyList(), Mockito.any()); + Mockito.doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + // just keep track that it was called + completed[0] = true; + return null; + } + }).when(table).batch(Mockito.anyList(), Mockito.any()); Mockito.when(table.getName()).thenReturn(TableName.valueOf(testName.getTableName())); // add the table to the set of tables, so its returned to the writer tables.put(new ImmutableBytesPtr(tableName), table); // setup the writer and failure policy - TrackingParallelWriterIndexCommitter committer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); + TrackingParallelWriterIndexCommitter committer = + new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); committer.setup(factory, exec, stop, e); KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy(); policy.setup(stop, e); @@ -160,10 +160,10 @@ public void testShutdownInterruptsAsExpected() throws Exception { // single thread factory so the older request gets queued ExecutorService exec = Executors.newFixedThreadPool(1); Map tables = new HashMap(); - RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class); - Configuration conf =new Configuration(); + RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class); + Configuration conf = new Configuration(); Mockito.when(e.getConfiguration()).thenReturn(conf); - Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); + Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); Region mockRegion = Mockito.mock(Region.class); Mockito.when(e.getRegion()).thenReturn(mockRegion); TableDescriptor mockTableDesc = Mockito.mock(TableDescriptor.class); @@ -179,21 +179,21 @@ public void testShutdownInterruptsAsExpected() throws Exception { final CountDownLatch writeStartedLatch = new CountDownLatch(1); // latch never gets counted down, so we wait forever final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1); - Mockito.doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - LOGGER.info("Write started"); - writeStartedLatch.countDown(); - // when we interrupt the thread for shutdown, we should see this throw an interrupt too - try { - waitOnAbortedLatch.await(); - } catch (InterruptedException e) { - LOGGER.info("Correctly interrupted while writing!"); - throw e; - } - return null; - } - }).when(table).batch(Mockito.anyList(), Mockito.any()); + Mockito.doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + LOGGER.info("Write started"); + writeStartedLatch.countDown(); + // when we interrupt the thread for shutdown, we should see this throw an interrupt too + try { + waitOnAbortedLatch.await(); + } catch (InterruptedException e) { + LOGGER.info("Correctly interrupted while writing!"); + throw e; + } + return null; + } + }).when(table).batch(Mockito.anyList(), Mockito.any()); // add the tables to the set of tables, so its returned to the writer tables.put(new ImmutableBytesPtr(tableName), table); @@ -204,8 +204,9 @@ public Void answer(InvocationOnMock invocation) throws Throwable { indexUpdates.add(new Pair(m, tableName)); // setup the writer - TrackingParallelWriterIndexCommitter committer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); - committer.setup(factory, exec, stop, e ); + TrackingParallelWriterIndexCommitter committer = + new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); + committer.setup(factory, exec, stop, e); KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy(); policy.setup(stop, e); final IndexWriter writer = new IndexWriter(committer, policy); @@ -231,4 +232,4 @@ public void run() { primaryWriter.join(); assertTrue("Writer should have failed because of the stop we issued", failedWrite[0]); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java index 30ea9ce291a..1cb1b625a1b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,9 +41,10 @@ import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.phoenix.hbase.index.IndexTableName; import org.apache.phoenix.hbase.index.StubAbortable; -import org.apache.phoenix.hbase.index.covered.IndexMetaData; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.apache.phoenix.util.ScanUtil; import org.junit.Rule; import org.junit.Test; @@ -53,9 +54,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; - public class TestParalleIndexWriter { private static final Logger LOGGER = LoggerFactory.getLogger(TestParalleIndexWriter.class); @@ -64,18 +62,19 @@ public class TestParalleIndexWriter { private final byte[] row = Bytes.toBytes("row"); @Test - public void testCorrectlyCleansUpResources() throws Exception{ + public void testCorrectlyCleansUpResources() throws Exception { ExecutorService exec = Executors.newFixedThreadPool(1); - RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class); - Configuration conf =new Configuration(); + RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class); + Configuration conf = new Configuration(); Mockito.when(e.getConfiguration()).thenReturn(conf); - Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); - FakeTableFactory factory = new FakeTableFactory( - Collections. emptyMap()); - TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); + Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); + FakeTableFactory factory = + new FakeTableFactory(Collections. emptyMap()); + TrackingParallelWriterIndexCommitter writer = + new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); Stoppable mockStop = Mockito.mock(Stoppable.class); // create a simple writer - writer.setup(factory, exec, mockStop,e); + writer.setup(factory, exec, mockStop, e); // stop the writer writer.stop(this.test.getTableNameString() + " finished"); assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown); @@ -91,13 +90,12 @@ public void testSynchronouslyCompletesAllWrites() throws Exception { Abortable abort = new StubAbortable(); Stoppable stop = Mockito.mock(Stoppable.class); ExecutorService exec = Executors.newFixedThreadPool(1); - Map tables = - new LinkedHashMap(); + Map tables = new LinkedHashMap(); FakeTableFactory factory = new FakeTableFactory(tables); - RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class); - Configuration conf =new Configuration(); + RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class); + Configuration conf = new Configuration(); Mockito.when(e.getConfiguration()).thenReturn(conf); - Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); + Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); Region mockRegion = Mockito.mock(Region.class); Mockito.when(e.getRegion()).thenReturn(mockRegion); TableDescriptor mockTableDesc = Mockito.mock(TableDescriptor.class); @@ -109,7 +107,7 @@ public void testSynchronouslyCompletesAllWrites() throws Exception { Put m = new Put(row); m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); Multimap indexUpdates = - ArrayListMultimap. create(); + ArrayListMultimap. create(); indexUpdates.put(new HTableInterfaceReference(tableName), m); Table table = Mockito.mock(Table.class); @@ -122,13 +120,15 @@ public Void answer(InvocationOnMock invocation) throws Throwable { completed[0] = true; return null; } - }).when(table).batch(Mockito.anyList(),Mockito.any()); - Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName())); + }).when(table).batch(Mockito.anyList(), Mockito.any()); + Mockito.when(table.getName()) + .thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName())); // add the table to the set of tables, so its returned to the writer tables.put(tableName, table); // setup the writer and failure policy - TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); + TrackingParallelWriterIndexCommitter writer = + new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); writer.setup(factory, exec, stop, e); writer.write(indexUpdates, true, ScanUtil.UNKNOWN_CLIENT_VERSION); assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java index e169ce13ab4..95716b65732 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,6 +41,8 @@ import org.apache.phoenix.hbase.index.IndexTableName; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.apache.phoenix.util.ScanUtil; import org.junit.Rule; import org.junit.Test; @@ -50,27 +52,26 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.ArrayListMultimap; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; - public class TestParalleWriterIndexCommitter { - private static final Logger LOGGER = LoggerFactory.getLogger(TestParalleWriterIndexCommitter.class); + private static final Logger LOGGER = + LoggerFactory.getLogger(TestParalleWriterIndexCommitter.class); @Rule public IndexTableName test = new IndexTableName(); private final byte[] row = Bytes.toBytes("row"); @Test - public void testCorrectlyCleansUpResources() throws Exception{ + public void testCorrectlyCleansUpResources() throws Exception { ExecutorService exec = Executors.newFixedThreadPool(1); - FakeTableFactory factory = new FakeTableFactory( - Collections. emptyMap()); - TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); + FakeTableFactory factory = + new FakeTableFactory(Collections. emptyMap()); + TrackingParallelWriterIndexCommitter writer = + new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); Stoppable mockStop = Mockito.mock(Stoppable.class); - RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class); - Configuration conf =new Configuration(); + RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class); + Configuration conf = new Configuration(); Mockito.when(e.getConfiguration()).thenReturn(conf); - Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); + Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); // create a simple writer writer.setup(factory, exec, mockStop, e); // stop the writer @@ -80,15 +81,15 @@ public void testCorrectlyCleansUpResources() throws Exception{ Mockito.verifyNoMoreInteractions(mockStop); } - @SuppressWarnings({ "unchecked"}) + @SuppressWarnings({ "unchecked" }) @Test public void testSynchronouslyCompletesAllWrites() throws Exception { LOGGER.info("Starting " + test.getTableNameString()); LOGGER.info("Current thread is interrupted: " + Thread.interrupted()); - RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class); - Configuration conf =new Configuration(); + RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class); + Configuration conf = new Configuration(); Mockito.when(e.getConfiguration()).thenReturn(conf); - Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); + Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap()); Region mockRegion = Mockito.mock(Region.class); Mockito.when(e.getRegion()).thenReturn(mockRegion); TableDescriptor mockTableDesc = Mockito.mock(TableDescriptor.class); @@ -98,33 +99,34 @@ public void testSynchronouslyCompletesAllWrites() throws Exception { Mockito.when(mockRegion.getTableDescriptor()).thenReturn(mockTableDesc); Stoppable stop = Mockito.mock(Stoppable.class); ExecutorService exec = Executors.newFixedThreadPool(1); - Map tables = - new LinkedHashMap(); + Map tables = new LinkedHashMap(); FakeTableFactory factory = new FakeTableFactory(tables); ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName()); Put m = new Put(row); m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null); Multimap indexUpdates = - ArrayListMultimap. create(); + ArrayListMultimap. create(); indexUpdates.put(new HTableInterfaceReference(tableName), m); Table table = Mockito.mock(Table.class); final boolean[] completed = new boolean[] { false }; - Mockito.doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - // just keep track that it was called - completed[0] = true; - return null; - } - }).when(table).batch(Mockito.anyList(), Mockito.any()); - Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName())); + Mockito.doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + // just keep track that it was called + completed[0] = true; + return null; + } + }).when(table).batch(Mockito.anyList(), Mockito.any()); + Mockito.when(table.getName()) + .thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName())); // add the table to the set of tables, so its returned to the writer tables.put(tableName, table); // setup the writer and failure policy - TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); + TrackingParallelWriterIndexCommitter writer = + new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion()); writer.setup(factory, exec, stop, e); writer.write(indexUpdates, true, ScanUtil.UNKNOWN_CLIENT_VERSION); assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", @@ -133,4 +135,4 @@ public Void answer(InvocationOnMock invocation) throws Throwable { assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown); assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown()); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java index ebea1f1effb..af0f7fe9c91 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -69,6 +69,7 @@ import org.apache.phoenix.hbase.index.util.TestIndexManagementUtil; import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache; import org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.junit.Assert; import org.junit.Ignore; import org.junit.Rule; @@ -76,8 +77,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; - /** * When a regionserver crashes, its WAL is split and then replayed to the server. If the index * region was present on the same server, we have to make a best effort to not kill the server for @@ -105,7 +104,8 @@ private String getIndexTableName() { // ----------------------------------------------------------------------------------------------- private static CountDownLatch allowIndexTableToRecover; - public static class IndexTableBlockingReplayObserver implements RegionObserver, RegionCoprocessor { + public static class IndexTableBlockingReplayObserver + implements RegionObserver, RegionCoprocessor { @Override public Optional getRegionObserver() { @@ -113,10 +113,11 @@ public Optional getRegionObserver() { } @Override - public void preWALRestore( - org.apache.hadoop.hbase.coprocessor.ObserverContext ctx, - org.apache.hadoop.hbase.client.RegionInfo info, WALKey logKey, - org.apache.hadoop.hbase.wal.WALEdit logEdit) throws IOException { + public void preWALRestore( + org.apache.hadoop.hbase.coprocessor.ObserverContext< + ? extends RegionCoprocessorEnvironment> ctx, + org.apache.hadoop.hbase.client.RegionInfo info, WALKey logKey, + org.apache.hadoop.hbase.wal.WALEdit logEdit) throws IOException { try { LOGGER.debug("Restoring logs for index table"); if (allowIndexTableToRecover != null) { @@ -132,7 +133,6 @@ public void preWALRestore( public static class ReleaseLatchOnFailurePolicy extends StoreFailuresInCachePolicy { /** - * @param failedIndexEdits */ public ReleaseLatchOnFailurePolicy(PerRegionIndexWriteCache failedIndexEdits) { super(failedIndexEdits); @@ -140,7 +140,7 @@ public ReleaseLatchOnFailurePolicy(PerRegionIndexWriteCache failedIndexEdits) { @Override public void handleFailure(Multimap attempted, - Exception cause) throws IOException { + Exception cause) throws IOException { LOGGER.debug("Found index update failure!"); if (allowIndexTableToRecover != null) { LOGGER.info("failed index write on WAL recovery - allowing index table to be restored."); @@ -151,9 +151,9 @@ public void handleFailure(Multimap attempted } - //TODO: Jesse to fix + // TODO: Jesse to fix @SuppressWarnings("deprecation") -@Ignore("Configuration issue - valid test, just needs fixing") + @Ignore("Configuration issue - valid test, just needs fixing") @Test public void testWaitsOnIndexRegionToReload() throws Exception { HBaseTestingUtility util = new HBaseTestingUtility(); @@ -182,30 +182,31 @@ public void testWaitsOnIndexRegionToReload() throws Exception { builder.addIndexGroup(columns); // create the primary table w/ indexing enabled - TableDescriptor primaryTable = TableDescriptorBuilder.newBuilder(TableName.valueOf(testTable.getTableName())) - .addColumnFamily(ColumnFamilyDescriptorBuilder.of(family)) - .addColumnFamily(ColumnFamilyDescriptorBuilder.of(nonIndexedFamily)).build(); + TableDescriptor primaryTable = + TableDescriptorBuilder.newBuilder(TableName.valueOf(testTable.getTableName())) + .addColumnFamily(ColumnFamilyDescriptorBuilder.of(family)) + .addColumnFamily(ColumnFamilyDescriptorBuilder.of(nonIndexedFamily)).build(); builder.addArbitraryConfigForTesting(Indexer.RecoveryFailurePolicyKeyForTesting, ReleaseLatchOnFailurePolicy.class.getName()); builder.build(primaryTable); admin.createTable(primaryTable); // create the index table - TableDescriptorBuilder indexTableBuilder = TableDescriptorBuilder - .newBuilder(TableName.valueOf(Bytes.toBytes(getIndexTableName()))) - .addCoprocessor(IndexTableBlockingReplayObserver.class.getName()); + TableDescriptorBuilder indexTableBuilder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes(getIndexTableName()))) + .addCoprocessor(IndexTableBlockingReplayObserver.class.getName()); TestIndexManagementUtil.createIndexTable(admin, indexTableBuilder); // figure out where our tables live - ServerName shared = - ensureTablesLiveOnSameServer(util.getMiniHBaseCluster(), Bytes.toBytes(indexedTableName), - testTable.getTableName()); + ServerName shared = ensureTablesLiveOnSameServer(util.getMiniHBaseCluster(), + Bytes.toBytes(indexedTableName), testTable.getTableName()); // load some data into the table Put p = new Put(Bytes.toBytes("row")); p.addColumn(family, qual, Bytes.toBytes("value")); Connection hbaseConn = ConnectionFactory.createConnection(conf); - Table primary = hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(testTable.getTableName())); + Table primary = + hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(testTable.getTableName())); primary.put(p); // turn on the recovery latch @@ -214,8 +215,8 @@ public void testWaitsOnIndexRegionToReload() throws Exception { // kill the server where the tables live - this should trigger distributed log splitting // find the regionserver that matches the passed server List online = new ArrayList(); - online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared, - testTable.getTableName())); + online.addAll( + getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared, testTable.getTableName())); online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared, Bytes.toBytes(indexedTableName))); @@ -228,7 +229,7 @@ public void testWaitsOnIndexRegionToReload() throws Exception { LOGGER.info("\t== Offline: " + server.getServerName()); continue; } - + List regions = server.getRegions(); LOGGER.info("\t" + server.getServerName() + " regions: " + regions); } @@ -247,7 +248,8 @@ public void testWaitsOnIndexRegionToReload() throws Exception { // make a second put that (1), isn't indexed, so we can be sure of the index state and (2) // ensures that our table is back up Put p2 = new Put(p.getRow()); - p2.addColumn(nonIndexedFamily, Bytes.toBytes("Not indexed"), Bytes.toBytes("non-indexed value")); + p2.addColumn(nonIndexedFamily, Bytes.toBytes("Not indexed"), + Bytes.toBytes("non-indexed value")); primary.put(p2); // make sure that we actually failed the write once (within a 5 minute window) @@ -257,7 +259,8 @@ public void testWaitsOnIndexRegionToReload() throws Exception { // scan the index to make sure it has the one entry, (that had to be replayed from the WAL, // since we hard killed the server) Scan s = new Scan(); - Table index = hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(getIndexTableName())); + Table index = + hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(getIndexTableName())); ResultScanner scanner = index.getScanner(s); int count = 0; for (Result r : scanner) { @@ -275,31 +278,24 @@ public void testWaitsOnIndexRegionToReload() throws Exception { } /** - * @param cluster - * @param server - * @param table - * @return */ private List getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server, - byte[] table) { + byte[] table) { List online = Collections.emptyList(); for (RegionServerThread rst : cluster.getRegionServerThreads()) { // if its the server we are going to kill, get the regions we want to reassign if (rst.getRegionServer().getServerName().equals(server)) { - online = rst.getRegionServer().getRegions(org.apache.hadoop.hbase.TableName.valueOf(table)); - break; + online = rst.getRegionServer().getRegions(org.apache.hadoop.hbase.TableName.valueOf(table)); + break; } } return online; } /** - * @param cluster - * @param indexTable - * @param primaryTable */ private ServerName ensureTablesLiveOnSameServer(MiniHBaseCluster cluster, byte[] indexTable, - byte[] primaryTable) throws Exception { + byte[] primaryTable) throws Exception { ServerName shared = getSharedServer(cluster, indexTable, primaryTable); boolean tryIndex = true; @@ -346,14 +342,9 @@ private ServerName ensureTablesLiveOnSameServer(MiniHBaseCluster cluster, byte[] } /** - * @param cluster - * @param indexTable - * @param primaryTable - * @return - * @throws Exception */ private ServerName getSharedServer(MiniHBaseCluster cluster, byte[] indexTable, - byte[] primaryTable) throws Exception { + byte[] primaryTable) throws Exception { Set indexServers = getServersForTable(cluster, indexTable); Set primaryServers = getServersForTable(cluster, primaryTable); @@ -368,18 +359,19 @@ private ServerName getSharedServer(MiniHBaseCluster cluster, byte[] indexTable, } } throw new RuntimeException( - "Couldn't find a matching server on which both the primary and index table live, " - + "even though they have overlapping server sets"); + "Couldn't find a matching server on which both the primary and index table live, " + + "even though they have overlapping server sets"); } return null; } private Set getServersForTable(MiniHBaseCluster cluster, byte[] table) - throws Exception { + throws Exception { Set indexServers = new HashSet(); for (Region region : cluster.getRegions(table)) { - indexServers.add(cluster.getServerHoldingRegion(null, region.getRegionInfo().getRegionName())); + indexServers + .add(cluster.getServerHoldingRegion(null, region.getRegionInfo().getRegionName())); } return indexServers; } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java index 3328eefbc40..fadd45d0dc5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -46,6 +46,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.phoenix.hbase.index.table.HTableInterfaceReference; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -54,11 +56,8 @@ import org.junit.Test; import org.junit.rules.TestName; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Multimap; - public class TestPerRegionIndexWriteCache { - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static TableName tableName = TableName.valueOf("t1");; private static final byte[] row = Bytes.toBytes("row"); private static final byte[] family = Bytes.toBytes("family"); @@ -83,78 +82,78 @@ public class TestPerRegionIndexWriteCache { @BeforeClass public static synchronized void startDfs() throws Exception { - miniDfs = TEST_UTIL.startMiniDFSCluster(1); + miniDfs = TEST_UTIL.startMiniDFSCluster(1); } @AfterClass public static synchronized void stopDfs() throws Exception { - if (miniDfs != null) { - miniDfs.shutdown(); - miniDfs = null; - } + if (miniDfs != null) { + miniDfs.shutdown(); + miniDfs = null; + } } @SuppressWarnings("deprecation") @Before public void setUp() throws Exception { - Path hbaseRootDir = new Path(getClass().getSimpleName() + "_" + testName.getMethodName()); - TEST_UTIL.getConfiguration().set("hbase.rootdir", hbaseRootDir.toString()); - - FileSystem newFS = miniDfs.getFileSystem(); - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null).setSplit(false).build(); - Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName); - Random rn = new Random(); - tableName = TableName.valueOf("TestPerRegion" + rn.nextInt()); - WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), getClass().getSimpleName()); - wal = walFactory.getWAL(RegionInfoBuilder.newBuilder(TableName.valueOf("logs")).build()); - TableDescriptor htd = - TableDescriptorBuilder - .newBuilder(tableName) - .addColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("a")).build()) - .build(); - - r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) { - @Override - public int hashCode() { - return 1; - } - - @Override - public String toString() { - return "testRegion1"; - } - }; - - r2 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) { - @Override - public int hashCode() { - return 2; - } - - @Override - public String toString() { - return "testRegion1"; - } - }; + Path hbaseRootDir = new Path(getClass().getSimpleName() + "_" + testName.getMethodName()); + TEST_UTIL.getConfiguration().set("hbase.rootdir", hbaseRootDir.toString()); + + FileSystem newFS = miniDfs.getFileSystem(); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null) + .setSplit(false).build(); + Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName); + Random rn = new Random(); + tableName = TableName.valueOf("TestPerRegion" + rn.nextInt()); + WALFactory walFactory = + new WALFactory(TEST_UTIL.getConfiguration(), getClass().getSimpleName()); + wal = walFactory.getWAL(RegionInfoBuilder.newBuilder(TableName.valueOf("logs")).build()); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("a")).build()) + .build(); + + r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) { + @Override + public int hashCode() { + return 1; + } + + @Override + public String toString() { + return "testRegion1"; + } + }; + + r2 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) { + @Override + public int hashCode() { + return 2; + } + + @Override + public String toString() { + return "testRegion1"; + } + }; } - + @After public void cleanUp() throws Exception { - try{ - r1.close(); - r2.close(); - wal.close(); - } catch (Exception ignored) {} - FileSystem newFS = FileSystem.get(TEST_UTIL.getConfiguration()); - newFS.delete(TEST_UTIL.getDataTestDir(), true); + try { + r1.close(); + r2.close(); + wal.close(); + } catch (Exception ignored) { + } + FileSystem newFS = FileSystem.get(TEST_UTIL.getConfiguration()); + newFS.delete(TEST_UTIL.getDataTestDir(), true); } - @Test public void testAddRemoveSingleRegion() { PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache(); - HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1"))); + HTableInterfaceReference t1 = + new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1"))); List mutations = new ArrayList(); mutations.add(p); cache.addEdits(r1, t1, mutations); @@ -162,7 +161,8 @@ public void testAddRemoveSingleRegion() { Set>> entries = edits.asMap().entrySet(); assertEquals("Got more than one table in the the edit map!", 1, entries.size()); for (Entry> entry : entries) { - //ensure that we are still storing a list here - otherwise it breaks the parallel writer implementation + // ensure that we are still storing a list here - otherwise it breaks the parallel writer + // implementation final List stored = (List) entry.getValue(); assertEquals("Got an unexpected amount of mutations in the entry", 1, stored.size()); assertEquals("Got an unexpected mutation in the entry", p, stored.get(0)); @@ -177,7 +177,7 @@ public void testAddRemoveSingleRegion() { public void testMultipleAddsForSingleRegion() { PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache(); HTableInterfaceReference t1 = - new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1"))); + new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1"))); List mutations = Lists. newArrayList(p); cache.addEdits(r1, t1, mutations); @@ -202,7 +202,7 @@ public void testMultipleAddsForSingleRegion() { public void testMultipleRegions() { PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache(); HTableInterfaceReference t1 = - new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1"))); + new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1"))); List mutations = Lists. newArrayList(p); List m2 = Lists. newArrayList(p2); // add each region @@ -235,7 +235,6 @@ public void testMultipleRegions() { assertEquals("Got an unexpected mutation in the entry for region2", p2, stored.get(0)); } - // ensure that a second get doesn't have any more edits. This ensures that we don't keep // references around to these edits and have a memory leak assertNull("Got an entry for a region we removed", cache.getEdits(r1)); diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java index 1a6d1dada49..b6d9446076e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexMaintainerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.index; import static org.junit.Assert.assertArrayEquals; @@ -52,274 +51,319 @@ import org.apache.phoenix.query.BaseConnectionlessQueryTest; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableKey; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.TestUtil; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +public class IndexMaintainerTest extends BaseConnectionlessQueryTest { + private static final String DEFAULT_SCHEMA_NAME = ""; + private static final String DEFAULT_TABLE_NAME = "rkTest"; -public class IndexMaintainerTest extends BaseConnectionlessQueryTest { - private static final String DEFAULT_SCHEMA_NAME = ""; - private static final String DEFAULT_TABLE_NAME = "rkTest"; - - private void testIndexRowKeyBuilding(String dataColumns, String pk, String indexColumns, Object[] values) throws Exception { - testIndexRowKeyBuilding(DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME, dataColumns, pk, indexColumns, values, "", "", ""); - } + private void testIndexRowKeyBuilding(String dataColumns, String pk, String indexColumns, + Object[] values) throws Exception { + testIndexRowKeyBuilding(DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME, dataColumns, pk, indexColumns, + values, "", "", ""); + } - private void testIndexRowKeyBuilding(String dataColumns, String pk, String indexColumns, Object[] values, String includeColumns) throws Exception { - testIndexRowKeyBuilding(DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME, dataColumns, pk, indexColumns, values, includeColumns, "", ""); - } + private void testIndexRowKeyBuilding(String dataColumns, String pk, String indexColumns, + Object[] values, String includeColumns) throws Exception { + testIndexRowKeyBuilding(DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME, dataColumns, pk, indexColumns, + values, includeColumns, "", ""); + } - private void testIndexRowKeyBuilding(String dataColumns, String pk, String indexColumns, Object[] values, String includeColumns, String dataProps, String indexProps) throws Exception { - testIndexRowKeyBuilding(DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME, dataColumns, pk, indexColumns, values, "", dataProps, indexProps); - } + private void testIndexRowKeyBuilding(String dataColumns, String pk, String indexColumns, + Object[] values, String includeColumns, String dataProps, String indexProps) throws Exception { + testIndexRowKeyBuilding(DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME, dataColumns, pk, indexColumns, + values, "", dataProps, indexProps); + } - private static ValueGetter newValueGetter(final byte[] row, final Map valueMap) { - return new AbstractValueGetter() { + private static ValueGetter newValueGetter(final byte[] row, + final Map valueMap) { + return new AbstractValueGetter() { - @Override - public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { - return new ImmutableBytesPtr(valueMap.get(ref)); - } + @Override + public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) { + return new ImmutableBytesPtr(valueMap.get(ref)); + } - @Override - public byte[] getRowKey() { - return row; - } - - }; - } - - private void testIndexRowKeyBuilding(String schemaName, String tableName, String dataColumns, String pk, String indexColumns, Object[] values, String includeColumns, String dataProps, String indexProps) throws Exception { - KeyValueBuilder builder = GenericKeyValueBuilder.INSTANCE; - testIndexRowKeyBuilding(schemaName, tableName, dataColumns, pk, indexColumns, values, includeColumns, dataProps, indexProps, builder); - } + @Override + public byte[] getRowKey() { + return row; + } - private void testIndexRowKeyBuilding(String schemaName, String tableName, String dataColumns, - String pk, String indexColumns, Object[] values, String includeColumns, - String dataProps, String indexProps, KeyValueBuilder builder) throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName),SchemaUtil.normalizeIdentifier(tableName)); - String fullIndexName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName),SchemaUtil.normalizeIdentifier("idx")); - conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + ")) " + (dataProps.isEmpty() ? "" : dataProps) ); - try { - conn.createStatement().execute("CREATE INDEX idx ON " + fullTableName + "(" + indexColumns + ") " + (includeColumns.isEmpty() ? "" : "INCLUDE (" + includeColumns + ") ") + (indexProps.isEmpty() ? "" : indexProps)); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); - PTable index = pconn.getTable(new PTableKey(pconn.getTenantId(),fullIndexName)); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - table.getIndexMaintainers(ptr, pconn); - List c1 = IndexMaintainer.deserialize(ptr, builder, true); - assertEquals(1,c1.size()); - IndexMaintainer im1 = c1.get(0); - - StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES("); - for (int i = 0; i < values.length; i++) { - buf.append("?,"); - } - buf.setCharAt(buf.length()-1, ')'); - PreparedStatement stmt = conn.prepareStatement(buf.toString()); - for (int i = 0; i < values.length; i++) { - stmt.setObject(i+1, values[i]); - } - stmt.execute(); - Iterator>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); - List dataKeyValues = iterator.next().getSecond(); - Map valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size()); - ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(dataKeyValues.get(0).getRowArray(), dataKeyValues.get(0).getRowOffset(), dataKeyValues.get(0).getRowLength()); - byte[] row = rowKeyPtr.copyBytes(); - Put dataMutation = new Put(row); - for (Cell kv : dataKeyValues) { - valueMap.put(new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()), CellUtil.cloneValue(kv)); - dataMutation.add(kv); - } - ValueGetter valueGetter = newValueGetter(row, valueMap); - - List indexMutations = IndexTestUtil.generateIndexData(index, table, dataMutation, ptr, builder); - assertEquals(1,indexMutations.size()); - assertTrue(indexMutations.get(0) instanceof Put); - Mutation indexMutation = indexMutations.get(0); - ImmutableBytesWritable indexKeyPtr = new ImmutableBytesWritable(indexMutation.getRow()); - ptr.set(rowKeyPtr.get(), rowKeyPtr.getOffset(), rowKeyPtr.getLength()); - byte[] mutablelndexRowKey = im1.buildRowKey(valueGetter, ptr, null, null, HConstants.LATEST_TIMESTAMP); - byte[] immutableIndexRowKey = indexKeyPtr.copyBytes(); - assertArrayEquals(immutableIndexRowKey, mutablelndexRowKey); - for (ColumnReference ref : im1.getCoveredColumns()) { - valueMap.get(ref); - } - byte[] dataRowKey = im1.buildDataRowKey(indexKeyPtr, null); - assertArrayEquals(dataRowKey, CellUtil.cloneRow(dataKeyValues.get(0))); - } finally { - try { - conn.rollback(); - conn.createStatement().execute("DROP TABLE " + fullTableName); - } finally { - conn.close(); - } - } - } + }; + } - @Test - public void testRowKeyVarOnlyIndex() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 DECIMAL", "k1,k2", "k2, k1", new Object [] {"a",1.1}); - } - - @Test - public void testVarFixedndex() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1,k2", "k2, k1", new Object [] {"a",1.1}); - } - - - @Test - public void testCompositeRowKeyVarFixedIndex() throws Exception { - // TODO: using 1.1 for INTEGER didn't give error - testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1,k2", "k2, k1", new Object [] {"a",1}); - } - - @Test - public void testCompositeRowKeyVarFixedAtEndIndex() throws Exception { - // Forces trailing zero in index key for fixed length - for (int i = 0; i < 10; i++) { - testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, k3 VARCHAR, v VARCHAR", "k1,k2,k3", "k1, k3, k2", new Object [] {"a",i, "b"}); - } - } - - @Test - public void testSingleKeyValueIndex() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER, v VARCHAR", "k1", "v", new Object [] {"a",1,"b"}); - } - - @Test - public void testMultiKeyValueIndex() throws Exception { - testIndexRowKeyBuilding("k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, v2 CHAR(2), v3 BIGINT", "k1, k2", "v2, k2, v1", new Object [] {"a",1,2.2,"bb"}); - } - - @Test - public void testMultiKeyValueCoveredIndex() throws Exception { - testIndexRowKeyBuilding("k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", "k1, k2", "v2, k2, v1", new Object [] {"a",1,2.2,"bb"}, "v3, v4"); - } - - @Test - public void testSingleKeyValueDescIndex() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER, v VARCHAR", "k1", "v DESC", new Object [] {"a",1,"b"}); - } - - @Test - public void testCompositeRowKeyVarFixedDescIndex() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1,k2", "k2 DESC, k1", new Object [] {"a",1}); - } - - @Test - public void testCompositeRowKeyTimeIndex() throws Exception { - long timeInMillis = System.currentTimeMillis(); - long timeInNanos = System.nanoTime(); - Timestamp ts = new Timestamp(timeInMillis); - ts.setNanos((int) (timeInNanos % 1000000000)); - testIndexRowKeyBuilding("ts1 DATE NOT NULL, ts2 TIME NOT NULL, ts3 TIMESTAMP NOT NULL", "ts1,ts2,ts3", "ts2, ts1", new Object [] {new Date(timeInMillis), new Time(timeInMillis), ts}); - } - - @Test - public void testCompositeRowKeyBytesIndex() throws Exception { - long timeInMillis = System.currentTimeMillis(); - long timeInNanos = System.nanoTime(); - Timestamp ts = new Timestamp(timeInMillis); - ts.setNanos((int) (timeInNanos % 1000000000)); - testIndexRowKeyBuilding("b1 BINARY(3) NOT NULL, v VARCHAR", "b1,v", "v, b1", new Object [] {new byte[] {41,42,43}, "foo"}); - } - - @Test - public void testCompositeDescRowKeyVarFixedDescIndex() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1, k2 DESC", "k2 DESC, k1", new Object [] {"a",1}); - } - - @Test - public void testCompositeDescRowKeyVarDescIndex() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 DECIMAL NOT NULL, v VARCHAR", "k1, k2 DESC", "k2 DESC, k1", new Object [] {"a",1.1,"b"}); - } - - @Test - public void testCompositeDescRowKeyVarAscIndex() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 DECIMAL NOT NULL, v VARCHAR", "k1, k2 DESC", "k2, k1", new Object [] {"a",1.1,"b"}); - } - - @Test - public void testCompositeDescRowKeyVarFixedDescSaltedIndex() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1, k2 DESC", "k2 DESC, k1", new Object [] {"a",1}, "", "", "SALT_BUCKETS=4"); - } - - @Test - public void testCompositeDescRowKeyVarFixedDescSaltedIndexSaltedTable() throws Exception { - testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1, k2 DESC", "k2 DESC, k1", new Object [] {"a",1}, "", "SALT_BUCKETS=3", "SALT_BUCKETS=3"); - } - - @Test - public void testMultiKeyValueCoveredSaltedIndex() throws Exception { - testIndexRowKeyBuilding("k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", "k1, k2", "v2 DESC, k2 DESC, v1", new Object [] {"a",1,2.2,"bb"}, "v3, v4", "", "SALT_BUCKETS=4"); - } - - @Test - public void tesIndexWithBigInt() throws Exception { - testIndexRowKeyBuilding( - "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BIGINT, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", "k1, k2", - "v1 DESC, k2 DESC", new Object[] { "a", 1, 2.2, "bb" }); - } - - @Test - public void tesIndexWithAscBoolean() throws Exception { - testIndexRowKeyBuilding( - "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", "k1, k2", - "v1, k2 DESC", new Object[] { "a", 1, true, "bb" }); - } - - @Test - public void tesIndexWithAscNullBoolean() throws Exception { - testIndexRowKeyBuilding( - "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", "k1, k2", - "v1, k2 DESC", new Object[] { "a", 1, null, "bb" }); - } - - @Test - public void tesIndexWithAscFalseBoolean() throws Exception { - testIndexRowKeyBuilding( - "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", "k1, k2", - "v1, k2 DESC", new Object[] { "a", 1, false, "bb" }); - } - - @Test - public void tesIndexWithDescBoolean() throws Exception { - testIndexRowKeyBuilding( - "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", "k1, k2", - "v1 DESC, k2 DESC", new Object[] { "a", 1, true, "bb" }); + private void testIndexRowKeyBuilding(String schemaName, String tableName, String dataColumns, + String pk, String indexColumns, Object[] values, String includeColumns, String dataProps, + String indexProps) throws Exception { + KeyValueBuilder builder = GenericKeyValueBuilder.INSTANCE; + testIndexRowKeyBuilding(schemaName, tableName, dataColumns, pk, indexColumns, values, + includeColumns, dataProps, indexProps, builder); + } + + private void testIndexRowKeyBuilding(String schemaName, String tableName, String dataColumns, + String pk, String indexColumns, Object[] values, String includeColumns, String dataProps, + String indexProps, KeyValueBuilder builder) throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), + SchemaUtil.normalizeIdentifier(tableName)); + String fullIndexName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), + SchemaUtil.normalizeIdentifier("idx")); + conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + + " CONSTRAINT pk PRIMARY KEY (" + pk + ")) " + (dataProps.isEmpty() ? "" : dataProps)); + try { + conn.createStatement() + .execute("CREATE INDEX idx ON " + fullTableName + "(" + indexColumns + ") " + + (includeColumns.isEmpty() ? "" : "INCLUDE (" + includeColumns + ") ") + + (indexProps.isEmpty() ? "" : indexProps)); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); + PTable index = pconn.getTable(new PTableKey(pconn.getTenantId(), fullIndexName)); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + table.getIndexMaintainers(ptr, pconn); + List c1 = IndexMaintainer.deserialize(ptr, builder, true); + assertEquals(1, c1.size()); + IndexMaintainer im1 = c1.get(0); + + StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES("); + for (int i = 0; i < values.length; i++) { + buf.append("?,"); + } + buf.setCharAt(buf.length() - 1, ')'); + PreparedStatement stmt = conn.prepareStatement(buf.toString()); + for (int i = 0; i < values.length; i++) { + stmt.setObject(i + 1, values[i]); + } + stmt.execute(); + Iterator>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); + List dataKeyValues = iterator.next().getSecond(); + Map valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size()); + ImmutableBytesWritable rowKeyPtr = + new ImmutableBytesWritable(dataKeyValues.get(0).getRowArray(), + dataKeyValues.get(0).getRowOffset(), dataKeyValues.get(0).getRowLength()); + byte[] row = rowKeyPtr.copyBytes(); + Put dataMutation = new Put(row); + for (Cell kv : dataKeyValues) { + valueMap.put( + new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), + kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()), + CellUtil.cloneValue(kv)); + dataMutation.add(kv); + } + ValueGetter valueGetter = newValueGetter(row, valueMap); + + List indexMutations = + IndexTestUtil.generateIndexData(index, table, dataMutation, ptr, builder); + assertEquals(1, indexMutations.size()); + assertTrue(indexMutations.get(0) instanceof Put); + Mutation indexMutation = indexMutations.get(0); + ImmutableBytesWritable indexKeyPtr = new ImmutableBytesWritable(indexMutation.getRow()); + ptr.set(rowKeyPtr.get(), rowKeyPtr.getOffset(), rowKeyPtr.getLength()); + byte[] mutablelndexRowKey = + im1.buildRowKey(valueGetter, ptr, null, null, HConstants.LATEST_TIMESTAMP); + byte[] immutableIndexRowKey = indexKeyPtr.copyBytes(); + assertArrayEquals(immutableIndexRowKey, mutablelndexRowKey); + for (ColumnReference ref : im1.getCoveredColumns()) { + valueMap.get(ref); + } + byte[] dataRowKey = im1.buildDataRowKey(indexKeyPtr, null); + assertArrayEquals(dataRowKey, CellUtil.cloneRow(dataKeyValues.get(0))); + } finally { + try { + conn.rollback(); + conn.createStatement().execute("DROP TABLE " + fullTableName); + } finally { + conn.close(); + } } - - @Test - public void tesIndexWithDescFalseBoolean() throws Exception { - testIndexRowKeyBuilding( - "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", "k1, k2", - "v1 DESC, k2 DESC", new Object[] { "a", 1, false, "bb" }); + } + + @Test + public void testRowKeyVarOnlyIndex() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 DECIMAL", "k1,k2", "k2, k1", new Object[] { "a", 1.1 }); + } + + @Test + public void testVarFixedndex() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1,k2", "k2, k1", + new Object[] { "a", 1.1 }); + } + + @Test + public void testCompositeRowKeyVarFixedIndex() throws Exception { + // TODO: using 1.1 for INTEGER didn't give error + testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1,k2", "k2, k1", + new Object[] { "a", 1 }); + } + + @Test + public void testCompositeRowKeyVarFixedAtEndIndex() throws Exception { + // Forces trailing zero in index key for fixed length + for (int i = 0; i < 10; i++) { + testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, k3 VARCHAR, v VARCHAR", "k1,k2,k3", + "k1, k3, k2", new Object[] { "a", i, "b" }); } - - @Test - public void tesIndexedExpressionSerialization() throws Exception { - Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - conn.setAutoCommit(true); - conn.createStatement().execute("CREATE TABLE IF NOT EXISTS FHA (ORGANIZATION_ID CHAR(15) NOT NULL, PARENT_ID CHAR(15) NOT NULL, CREATED_DATE DATE NOT NULL, ENTITY_HISTORY_ID CHAR(15) NOT NULL, FIELD_HISTORY_ARCHIVE_ID CHAR(15), CREATED_BY_ID VARCHAR, FIELD VARCHAR, DATA_TYPE VARCHAR, OLDVAL_STRING VARCHAR, NEWVAL_STRING VARCHAR, OLDVAL_FIRST_NAME VARCHAR, NEWVAL_FIRST_NAME VARCHAR, OLDVAL_LAST_NAME VARCHAR, NEWVAL_LAST_NAME VARCHAR, OLDVAL_NUMBER DECIMAL, NEWVAL_NUMBER DECIMAL, OLDVAL_DATE DATE, NEWVAL_DATE DATE, ARCHIVE_PARENT_TYPE VARCHAR, ARCHIVE_FIELD_NAME VARCHAR, ARCHIVE_TIMESTAMP DATE, ARCHIVE_PARENT_NAME VARCHAR, DIVISION INTEGER, CONNECTION_ID VARCHAR CONSTRAINT PK PRIMARY KEY (ORGANIZATION_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID )) VERSIONS=1,MULTI_TENANT=true"); - conn.createStatement().execute("CREATE INDEX IDX ON FHA (FIELD_HISTORY_ARCHIVE_ID, UPPER(OLDVAL_STRING) || UPPER(NEWVAL_STRING), NEWVAL_DATE - NEWVAL_DATE)"); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), "FHA")); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - table.getIndexMaintainers(ptr, pconn); - List indexMaintainerList = IndexMaintainer.deserialize(ptr, GenericKeyValueBuilder.INSTANCE, true); - assertEquals(1,indexMaintainerList.size()); - IndexMaintainer indexMaintainer = indexMaintainerList.get(0); - Set indexedColumns = indexMaintainer.getIndexedColumns(); - assertEquals("Unexpected Number of indexed columns ", indexedColumns.size(), 4); - } finally { - conn.close(); - } + } + + @Test + public void testSingleKeyValueIndex() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER, v VARCHAR", "k1", "v", + new Object[] { "a", 1, "b" }); + } + + @Test + public void testMultiKeyValueIndex() throws Exception { + testIndexRowKeyBuilding( + "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, v2 CHAR(2), v3 BIGINT", "k1, k2", + "v2, k2, v1", new Object[] { "a", 1, 2.2, "bb" }); + } + + @Test + public void testMultiKeyValueCoveredIndex() throws Exception { + testIndexRowKeyBuilding( + "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", + "k1, k2", "v2, k2, v1", new Object[] { "a", 1, 2.2, "bb" }, "v3, v4"); + } + + @Test + public void testSingleKeyValueDescIndex() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER, v VARCHAR", "k1", "v DESC", + new Object[] { "a", 1, "b" }); + } + + @Test + public void testCompositeRowKeyVarFixedDescIndex() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1,k2", "k2 DESC, k1", + new Object[] { "a", 1 }); + } + + @Test + public void testCompositeRowKeyTimeIndex() throws Exception { + long timeInMillis = System.currentTimeMillis(); + long timeInNanos = System.nanoTime(); + Timestamp ts = new Timestamp(timeInMillis); + ts.setNanos((int) (timeInNanos % 1000000000)); + testIndexRowKeyBuilding("ts1 DATE NOT NULL, ts2 TIME NOT NULL, ts3 TIMESTAMP NOT NULL", + "ts1,ts2,ts3", "ts2, ts1", + new Object[] { new Date(timeInMillis), new Time(timeInMillis), ts }); + } + + @Test + public void testCompositeRowKeyBytesIndex() throws Exception { + long timeInMillis = System.currentTimeMillis(); + long timeInNanos = System.nanoTime(); + Timestamp ts = new Timestamp(timeInMillis); + ts.setNanos((int) (timeInNanos % 1000000000)); + testIndexRowKeyBuilding("b1 BINARY(3) NOT NULL, v VARCHAR", "b1,v", "v, b1", + new Object[] { new byte[] { 41, 42, 43 }, "foo" }); + } + + @Test + public void testCompositeDescRowKeyVarFixedDescIndex() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1, k2 DESC", + "k2 DESC, k1", new Object[] { "a", 1 }); + } + + @Test + public void testCompositeDescRowKeyVarDescIndex() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 DECIMAL NOT NULL, v VARCHAR", "k1, k2 DESC", + "k2 DESC, k1", new Object[] { "a", 1.1, "b" }); + } + + @Test + public void testCompositeDescRowKeyVarAscIndex() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 DECIMAL NOT NULL, v VARCHAR", "k1, k2 DESC", "k2, k1", + new Object[] { "a", 1.1, "b" }); + } + + @Test + public void testCompositeDescRowKeyVarFixedDescSaltedIndex() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1, k2 DESC", + "k2 DESC, k1", new Object[] { "a", 1 }, "", "", "SALT_BUCKETS=4"); + } + + @Test + public void testCompositeDescRowKeyVarFixedDescSaltedIndexSaltedTable() throws Exception { + testIndexRowKeyBuilding("k1 VARCHAR, k2 INTEGER NOT NULL, v VARCHAR", "k1, k2 DESC", + "k2 DESC, k1", new Object[] { "a", 1 }, "", "SALT_BUCKETS=3", "SALT_BUCKETS=3"); + } + + @Test + public void testMultiKeyValueCoveredSaltedIndex() throws Exception { + testIndexRowKeyBuilding( + "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", + "k1, k2", "v2 DESC, k2 DESC, v1", new Object[] { "a", 1, 2.2, "bb" }, "v3, v4", "", + "SALT_BUCKETS=4"); + } + + @Test + public void tesIndexWithBigInt() throws Exception { + testIndexRowKeyBuilding( + "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BIGINT, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", + "k1, k2", "v1 DESC, k2 DESC", new Object[] { "a", 1, 2.2, "bb" }); + } + + @Test + public void tesIndexWithAscBoolean() throws Exception { + testIndexRowKeyBuilding( + "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", + "k1, k2", "v1, k2 DESC", new Object[] { "a", 1, true, "bb" }); + } + + @Test + public void tesIndexWithAscNullBoolean() throws Exception { + testIndexRowKeyBuilding( + "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", + "k1, k2", "v1, k2 DESC", new Object[] { "a", 1, null, "bb" }); + } + + @Test + public void tesIndexWithAscFalseBoolean() throws Exception { + testIndexRowKeyBuilding( + "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", + "k1, k2", "v1, k2 DESC", new Object[] { "a", 1, false, "bb" }); + } + + @Test + public void tesIndexWithDescBoolean() throws Exception { + testIndexRowKeyBuilding( + "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", + "k1, k2", "v1 DESC, k2 DESC", new Object[] { "a", 1, true, "bb" }); + } + + @Test + public void tesIndexWithDescFalseBoolean() throws Exception { + testIndexRowKeyBuilding( + "k1 CHAR(1) NOT NULL, k2 INTEGER NOT NULL, v1 BOOLEAN, v2 CHAR(2), v3 BIGINT, v4 CHAR(10)", + "k1, k2", "v1 DESC, k2 DESC", new Object[] { "a", 1, false, "bb" }); + } + + @Test + public void tesIndexedExpressionSerialization() throws Exception { + Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + conn.setAutoCommit(true); + conn.createStatement().execute( + "CREATE TABLE IF NOT EXISTS FHA (ORGANIZATION_ID CHAR(15) NOT NULL, PARENT_ID CHAR(15) NOT NULL, CREATED_DATE DATE NOT NULL, ENTITY_HISTORY_ID CHAR(15) NOT NULL, FIELD_HISTORY_ARCHIVE_ID CHAR(15), CREATED_BY_ID VARCHAR, FIELD VARCHAR, DATA_TYPE VARCHAR, OLDVAL_STRING VARCHAR, NEWVAL_STRING VARCHAR, OLDVAL_FIRST_NAME VARCHAR, NEWVAL_FIRST_NAME VARCHAR, OLDVAL_LAST_NAME VARCHAR, NEWVAL_LAST_NAME VARCHAR, OLDVAL_NUMBER DECIMAL, NEWVAL_NUMBER DECIMAL, OLDVAL_DATE DATE, NEWVAL_DATE DATE, ARCHIVE_PARENT_TYPE VARCHAR, ARCHIVE_FIELD_NAME VARCHAR, ARCHIVE_TIMESTAMP DATE, ARCHIVE_PARENT_NAME VARCHAR, DIVISION INTEGER, CONNECTION_ID VARCHAR CONSTRAINT PK PRIMARY KEY (ORGANIZATION_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID )) VERSIONS=1,MULTI_TENANT=true"); + conn.createStatement().execute( + "CREATE INDEX IDX ON FHA (FIELD_HISTORY_ARCHIVE_ID, UPPER(OLDVAL_STRING) || UPPER(NEWVAL_STRING), NEWVAL_DATE - NEWVAL_DATE)"); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), "FHA")); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + table.getIndexMaintainers(ptr, pconn); + List indexMaintainerList = + IndexMaintainer.deserialize(ptr, GenericKeyValueBuilder.INSTANCE, true); + assertEquals(1, indexMaintainerList.size()); + IndexMaintainer indexMaintainer = indexMaintainerList.get(0); + Set indexedColumns = indexMaintainer.getIndexedColumns(); + assertEquals("Unexpected Number of indexed columns ", indexedColumns.size(), 4); + } finally { + conn.close(); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexRebuildRegionScannerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexRebuildRegionScannerTest.java index c59522c3134..c74cebd4183 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexRebuildRegionScannerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexRebuildRegionScannerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,63 +17,71 @@ */ package org.apache.phoenix.index; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.junit.Assert; -import org.junit.Test; +import static org.apache.phoenix.coprocessor.IndexRebuildRegionScanner.getPerTaskIndexMutationMaps; + import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Random; import java.util.TreeMap; -import static org.apache.phoenix.coprocessor.IndexRebuildRegionScanner.getPerTaskIndexMutationMaps; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.junit.Assert; +import org.junit.Test; + +public class IndexRebuildRegionScannerTest { + private static final Random RAND = new Random(7); -public class IndexRebuildRegionScannerTest{ - private static final Random RAND = new Random(7); - @Test - public void testGetPerTaskIndexKeyToMutationMaps() { - TreeMap> indexKeyToMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - final int MAP_SIZE = 64*1024; - final int REPEAT = 32; - final int MAX_SPLIT_SIZE = 32; - for (int i = 0; i < MAP_SIZE; i++) { - byte[] indexKey = Bytes.toBytes(RAND.nextLong() % (MAP_SIZE * 11)); - indexKeyToMutationMap.put(indexKey, null); + @Test + public void testGetPerTaskIndexKeyToMutationMaps() { + TreeMap> indexKeyToMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + final int MAP_SIZE = 64 * 1024; + final int REPEAT = 32; + final int MAX_SPLIT_SIZE = 32; + for (int i = 0; i < MAP_SIZE; i++) { + byte[] indexKey = Bytes.toBytes(RAND.nextLong() % (MAP_SIZE * 11)); + indexKeyToMutationMap.put(indexKey, null); + } + for (int i = 1; i <= REPEAT; i++) { + // i is the number of regions and endKeys are the region end row keys + byte[][] endKeys = new byte[i][Long.SIZE / Byte.SIZE]; + for (int j = 0; j < i - 1; j++) { + endKeys[j] = Bytes.toBytes(RAND.nextLong() % (MAP_SIZE * 5)); // 5 vs 11 is to create some + // imbalance + } + // The last end key is always null + endKeys[i - 1] = null; + List>> mapList = + getPerTaskIndexMutationMaps(indexKeyToMutationMap, endKeys, MAX_SPLIT_SIZE); + int regionIndex = 0; + int regionCount = i; + for (Map> map : mapList) { + // Check map sizes + Assert.assertTrue(map.size() <= MAX_SPLIT_SIZE); + // Check map boundaries + NavigableMap> treeMap = (NavigableMap>) map; + byte[] firstKey = treeMap.firstKey(); + // Find the region including the first key of the of the map + while ( + regionIndex < regionCount - 1 + && Bytes.BYTES_COMPARATOR.compare(firstKey, endKeys[regionIndex]) > 0 + ) { + regionIndex++; } - for (int i = 1; i <= REPEAT; i++) { - // i is the number of regions and endKeys are the region end row keys - byte[][] endKeys = new byte[i][Long.SIZE / Byte.SIZE]; - for (int j = 0; j < i - 1; j++) { - endKeys[j] = Bytes.toBytes(RAND.nextLong() % (MAP_SIZE * 5)); // 5 vs 11 is to create some imbalance - } - // The last end key is always null - endKeys[i - 1] = null; - List>> mapList = getPerTaskIndexMutationMaps(indexKeyToMutationMap, endKeys, MAX_SPLIT_SIZE); - int regionIndex = 0; - int regionCount = i; - for (Map> map : mapList) { - // Check map sizes - Assert.assertTrue(map.size() <= MAX_SPLIT_SIZE); - // Check map boundaries - NavigableMap> treeMap = (NavigableMap>) map; - byte[] firstKey = treeMap.firstKey(); - // Find the region including the first key of the of the map - while (regionIndex < regionCount -1 && Bytes.BYTES_COMPARATOR.compare(firstKey, endKeys[regionIndex]) > 0) { - regionIndex++; - } - // The last key of the map also must fall into the same region - if (regionIndex != regionCount - 1) { - Assert.assertTrue(Bytes.BYTES_COMPARATOR.compare(treeMap.lastKey(), endKeys[regionIndex]) <= 0); - } - } - // The number of splits must be more than or equal to (map size / MAX_SPLIT_SIZE) and must be less than or - // equal to (map size / MAX_SPLIT_SIZE) + i - Assert.assertTrue(mapList.size() >= indexKeyToMutationMap.size() / MAX_SPLIT_SIZE); - Assert.assertTrue(mapList.size() <= (indexKeyToMutationMap.size() / MAX_SPLIT_SIZE) + i); + // The last key of the map also must fall into the same region + if (regionIndex != regionCount - 1) { + Assert.assertTrue( + Bytes.BYTES_COMPARATOR.compare(treeMap.lastKey(), endKeys[regionIndex]) <= 0); } + } + // The number of splits must be more than or equal to (map size / MAX_SPLIT_SIZE) and must be + // less than or + // equal to (map size / MAX_SPLIT_SIZE) + i + Assert.assertTrue(mapList.size() >= indexKeyToMutationMap.size() / MAX_SPLIT_SIZE); + Assert.assertTrue(mapList.size() <= (indexKeyToMutationMap.size() / MAX_SPLIT_SIZE) + i); } - + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexScrutinyMapperTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexScrutinyMapperTest.java index 863593be39e..6e7e71b05b4 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexScrutinyMapperTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexScrutinyMapperTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,9 @@ */ package org.apache.phoenix.index; +import java.util.Arrays; +import java.util.Collection; + import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.mapreduce.index.IndexScrutinyMapper; import org.apache.phoenix.query.BaseConnectionlessQueryTest; @@ -33,100 +36,98 @@ import org.junit.runners.Parameterized; import org.mockito.Mockito; -import java.util.Arrays; -import java.util.Collection; - @RunWith(Parameterized.class) public class IndexScrutinyMapperTest extends BaseConnectionlessQueryTest { - String schema, tableName, indexName; - boolean isNamespaceEnabled; - PTable inputTable; + String schema, tableName, indexName; + boolean isNamespaceEnabled; + PTable inputTable; + + @Before + public void setup() { + schema = "S_" + generateUniqueName(); + tableName = "T_" + generateUniqueName(); + indexName = "I_" + generateUniqueName(); + inputTable = Mockito.mock(PTable.class); - @Before - public void setup() { - schema = "S_" + generateUniqueName(); - tableName = "T_" + generateUniqueName(); - indexName = "I_" + generateUniqueName(); - inputTable = Mockito.mock(PTable.class); + } - } + @Parameterized.Parameters(name = "IndexUpgradeToolTest_isNamespaceEnabled={0}") + public static synchronized Collection data() { + return Arrays.asList(false, true); + } - @Parameterized.Parameters(name ="IndexUpgradeToolTest_isNamespaceEnabled={0}") - public static synchronized Collection data() { - return Arrays.asList( false, true); - } + public IndexScrutinyMapperTest(boolean isNamespaceEnabled) { + this.isNamespaceEnabled = isNamespaceEnabled; + } - public IndexScrutinyMapperTest(boolean isNamespaceEnabled) { - this.isNamespaceEnabled = isNamespaceEnabled; - } - @Test - public void testGetSourceTableName_table() { - String fullTableName = SchemaUtil.getQualifiedTableName(schema, tableName); - PName sourcePhysicalName = SchemaUtil.getPhysicalHBaseTableName(schema, tableName, - isNamespaceEnabled); - String expectedName = SchemaUtil.getPhysicalTableName(Bytes.toBytes(fullTableName), - isNamespaceEnabled).toString(); - //setup - Mockito.when(inputTable.getType()).thenReturn(PTableType.TABLE); - Mockito.when(inputTable.getPhysicalName()).thenReturn(sourcePhysicalName); - Mockito.when(inputTable.getTableName()).thenReturn(PNameFactory.newName(tableName)); - Mockito.when(inputTable.getSchemaName()).thenReturn(PNameFactory.newName(schema)); - //test - String output = IndexScrutinyMapper.getSourceTableName(inputTable, isNamespaceEnabled); - //assert - Assert.assertEquals(expectedName, output); - } + @Test + public void testGetSourceTableName_table() { + String fullTableName = SchemaUtil.getQualifiedTableName(schema, tableName); + PName sourcePhysicalName = + SchemaUtil.getPhysicalHBaseTableName(schema, tableName, isNamespaceEnabled); + String expectedName = + SchemaUtil.getPhysicalTableName(Bytes.toBytes(fullTableName), isNamespaceEnabled).toString(); + // setup + Mockito.when(inputTable.getType()).thenReturn(PTableType.TABLE); + Mockito.when(inputTable.getPhysicalName()).thenReturn(sourcePhysicalName); + Mockito.when(inputTable.getTableName()).thenReturn(PNameFactory.newName(tableName)); + Mockito.when(inputTable.getSchemaName()).thenReturn(PNameFactory.newName(schema)); + // test + String output = IndexScrutinyMapper.getSourceTableName(inputTable, isNamespaceEnabled); + // assert + Assert.assertEquals(expectedName, output); + } - @Test - public void testGetSourceTableName_view() { - String fullTableName = SchemaUtil.getQualifiedTableName(schema, tableName); - PName sourcePhysicalName = SchemaUtil.getPhysicalHBaseTableName(schema, tableName, - isNamespaceEnabled); - String expectedName = SchemaUtil.getPhysicalTableName(Bytes.toBytes(fullTableName), - isNamespaceEnabled).toString(); - //setup - Mockito.when(inputTable.getType()).thenReturn(PTableType.VIEW); - Mockito.when(inputTable.getPhysicalName()).thenReturn(sourcePhysicalName); - //test - String output = IndexScrutinyMapper.getSourceTableName(inputTable, isNamespaceEnabled); - //assert - Assert.assertEquals(expectedName, output); - } + @Test + public void testGetSourceTableName_view() { + String fullTableName = SchemaUtil.getQualifiedTableName(schema, tableName); + PName sourcePhysicalName = + SchemaUtil.getPhysicalHBaseTableName(schema, tableName, isNamespaceEnabled); + String expectedName = + SchemaUtil.getPhysicalTableName(Bytes.toBytes(fullTableName), isNamespaceEnabled).toString(); + // setup + Mockito.when(inputTable.getType()).thenReturn(PTableType.VIEW); + Mockito.when(inputTable.getPhysicalName()).thenReturn(sourcePhysicalName); + // test + String output = IndexScrutinyMapper.getSourceTableName(inputTable, isNamespaceEnabled); + // assert + Assert.assertEquals(expectedName, output); + } - @Test - public void testGetSourceTableName_index() { - String fullTableName = SchemaUtil.getQualifiedTableName(schema, indexName); - PName sourcePhysicalName = SchemaUtil.getPhysicalHBaseTableName(schema, indexName, - isNamespaceEnabled); - String expectedName = SchemaUtil.getPhysicalTableName(Bytes.toBytes(fullTableName), - isNamespaceEnabled).toString(); + @Test + public void testGetSourceTableName_index() { + String fullTableName = SchemaUtil.getQualifiedTableName(schema, indexName); + PName sourcePhysicalName = + SchemaUtil.getPhysicalHBaseTableName(schema, indexName, isNamespaceEnabled); + String expectedName = + SchemaUtil.getPhysicalTableName(Bytes.toBytes(fullTableName), isNamespaceEnabled).toString(); - //setup - Mockito.when(inputTable.getType()).thenReturn(PTableType.INDEX); - Mockito.when(inputTable.getPhysicalName()).thenReturn(sourcePhysicalName); - Mockito.when(inputTable.getTableName()).thenReturn(PNameFactory.newName(indexName)); - Mockito.when(inputTable.getSchemaName()).thenReturn(PNameFactory.newName(schema)); + // setup + Mockito.when(inputTable.getType()).thenReturn(PTableType.INDEX); + Mockito.when(inputTable.getPhysicalName()).thenReturn(sourcePhysicalName); + Mockito.when(inputTable.getTableName()).thenReturn(PNameFactory.newName(indexName)); + Mockito.when(inputTable.getSchemaName()).thenReturn(PNameFactory.newName(schema)); - //test - String output = IndexScrutinyMapper.getSourceTableName(inputTable, isNamespaceEnabled); - //assert - Assert.assertEquals(expectedName, output); - } + // test + String output = IndexScrutinyMapper.getSourceTableName(inputTable, isNamespaceEnabled); + // assert + Assert.assertEquals(expectedName, output); + } - @Test - public void testGetSourceTableName_viewIndex() { - PName physicalTableName = SchemaUtil.getPhysicalHBaseTableName(schema, tableName, - isNamespaceEnabled); - String expectedName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName.getString()); - PName physicalIndexTableName = PNameFactory - .newName(MetaDataUtil.getViewIndexPhysicalName(physicalTableName.getString())); + @Test + public void testGetSourceTableName_viewIndex() { + PName physicalTableName = + SchemaUtil.getPhysicalHBaseTableName(schema, tableName, isNamespaceEnabled); + String expectedName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName.getString()); + PName physicalIndexTableName = + PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(physicalTableName.getString())); - PTable pSourceTable = Mockito.mock(PTable.class); - //setup - Mockito.when(pSourceTable.getPhysicalName()).thenReturn(physicalIndexTableName); - //test - String output = IndexScrutinyMapper.getSourceTableName(pSourceTable, isNamespaceEnabled); - //assert - Assert.assertEquals(expectedName, output); - } + PTable pSourceTable = Mockito.mock(PTable.class); + // setup + Mockito.when(pSourceTable.getPhysicalName()).thenReturn(physicalIndexTableName); + // test + String output = IndexScrutinyMapper.getSourceTableName(pSourceTable, isNamespaceEnabled); + // assert + Assert.assertEquals(expectedName, output); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexToolTest.java index 74179011e6a..20cab1eca2b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexToolTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexToolTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +17,18 @@ */ package org.apache.phoenix.index; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import static org.apache.phoenix.mapreduce.index.IndexTool.FEATURE_NOT_APPLICABLE; +import static org.apache.phoenix.mapreduce.index.IndexTool.INVALID_TIME_RANGE_EXCEPTION_MESSAGE; +import static org.apache.phoenix.mapreduce.index.IndexTool.RETRY_VERIFY_NOT_APPLICABLE; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.when; + import org.apache.phoenix.end2end.IndexToolIT; import org.apache.phoenix.mapreduce.index.IndexScrutinyTool; import org.apache.phoenix.mapreduce.index.IndexTool; import org.apache.phoenix.query.BaseTest; import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.junit.Assert; import org.junit.Before; @@ -33,337 +39,300 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import static org.apache.phoenix.mapreduce.index.IndexTool.FEATURE_NOT_APPLICABLE; -import static org.apache.phoenix.mapreduce.index.IndexTool.INVALID_TIME_RANGE_EXCEPTION_MESSAGE; -import static org.apache.phoenix.mapreduce.index.IndexTool.RETRY_VERIFY_NOT_APPLICABLE; -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.when; - public class IndexToolTest extends BaseTest { - IndexTool it; - private String dataTable; - private String indexTable; - private String schema; - private String tenantId; - @Mock - PTable pDataTable; - boolean localIndex = true; - - @Rule - public ExpectedException exceptionRule = ExpectedException.none(); - - @Before - public void setup() { - it = new IndexTool(); - schema = generateUniqueName(); - dataTable = generateUniqueName(); - indexTable = generateUniqueName(); - tenantId = generateUniqueName(); - MockitoAnnotations.initMocks(this); - } - - @Test - public void testParseOptions_timeRange_timeRangeNotNull() throws Exception { - Long startTime = 10L; - Long endTime = 15L; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - startTime , endTime); - CommandLine cmdLine = it.parseOptions(args); - it.populateIndexToolAttributes(cmdLine); - assertEquals(startTime, it.getStartTime()); - assertEquals(endTime, it.getEndTime()); - } - - @Test - public void testParseOptions_timeRange_null() throws Exception { - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE); - CommandLine cmdLine = it.parseOptions(args); - it.populateIndexToolAttributes(cmdLine); - Assert.assertNull(it.getStartTime()); - Assert.assertNull(it.getEndTime()); - } - - @Test - public void testParseOptions_timeRange_startTimeNotNull() throws Exception { - Long startTime = 10L; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - startTime , null); - CommandLine cmdLine = it.parseOptions(args); - it.populateIndexToolAttributes(cmdLine); - assertEquals(startTime, it.getStartTime()); - assertEquals(null, it.getEndTime()); - } - - @Test - public void testParseOptions_timeRange_endTimeNotNull() throws Exception { - Long endTime = 15L; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - null , endTime); - CommandLine cmdLine = it.parseOptions(args); - it.populateIndexToolAttributes(cmdLine); - assertEquals(null, it.getStartTime()); - assertEquals(endTime, it.getEndTime()); - } - - @Test - public void testParseOptions_timeRange_startTimeNullEndTimeInFuture() throws Exception { - Long endTime = EnvironmentEdgeManager.currentTimeMillis() + 100000; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - null , endTime); - CommandLine cmdLine = it.parseOptions(args); - exceptionRule.expect(RuntimeException.class); - exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); - it.populateIndexToolAttributes(cmdLine); - } - - @Test - public void testParseOptions_timeRange_endTimeNullStartTimeInFuture() throws Exception { - Long startTime = EnvironmentEdgeManager.currentTimeMillis() + 100000; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - startTime , null); - CommandLine cmdLine = it.parseOptions(args); - exceptionRule.expect(RuntimeException.class); - exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); - it.populateIndexToolAttributes(cmdLine); - } - - @Test(timeout = 10000 /* 10 secs */) - public void testParseOptions_timeRange_startTimeInFuture() throws Exception { - Long startTime = EnvironmentEdgeManager.currentTimeMillis() + 100000; - Long endTime = EnvironmentEdgeManager.currentTimeMillis() + 200000; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - startTime , endTime); - CommandLine cmdLine = it.parseOptions(args); - exceptionRule.expect(RuntimeException.class); - exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); - it.populateIndexToolAttributes(cmdLine); - } - - @Test(timeout = 10000 /* 10 secs */) - public void testParseOptions_timeRange_endTimeInFuture() throws Exception { - Long startTime = EnvironmentEdgeManager.currentTimeMillis(); - Long endTime = EnvironmentEdgeManager.currentTimeMillis() + 100000; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - startTime , endTime); - CommandLine cmdLine = it.parseOptions(args); - exceptionRule.expect(RuntimeException.class); - exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); - it.populateIndexToolAttributes(cmdLine); - } - - @Test - public void testParseOptions_timeRange_startTimeEqEndTime() throws Exception { - Long startTime = 10L; - Long endTime = 10L; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - startTime , endTime); - CommandLine cmdLine = it.parseOptions(args); - exceptionRule.expect(RuntimeException.class); - exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); - it.populateIndexToolAttributes(cmdLine); - } - - @Test - public void testParseOptions_timeRange_startTimeGtEndTime() throws Exception { - Long startTime = 10L; - Long endTime = 1L; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - startTime , endTime); - CommandLine cmdLine = it.parseOptions(args); - exceptionRule.expect(RuntimeException.class); - exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); - it.populateIndexToolAttributes(cmdLine); - } - - @Test - public void testCheckTimeRangeFeature_timeRangeSet_transactionalTable_globalIndex() { - when(pDataTable.isTransactional()).thenReturn(true); - exceptionRule.expect(RuntimeException.class); - exceptionRule.expectMessage(FEATURE_NOT_APPLICABLE); - IndexTool.checkIfFeatureApplicable(1L, 3L, null, pDataTable, !localIndex); - } - - @Test - public void testIncrcementalVerifyOption() throws Exception { - IndexTool mockTool = Mockito.mock(IndexTool.class); - when(mockTool.getLastVerifyTime()).thenCallRealMethod(); - Long lastVerifyTime = 10L; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - lastVerifyTime, null, IndexTool.IndexDisableLoggingType.NONE, lastVerifyTime); - when(mockTool.parseOptions(args)).thenCallRealMethod(); - - CommandLine cmdLine = mockTool.parseOptions(args); - - when(mockTool.populateIndexToolAttributes(cmdLine)).thenCallRealMethod(); - when(mockTool.isValidLastVerifyTime(lastVerifyTime)).thenReturn(true); - - mockTool.populateIndexToolAttributes(cmdLine); - Assert.assertEquals(lastVerifyTime, mockTool.getLastVerifyTime()); - - when(pDataTable.isTransactional()).thenReturn(true); - exceptionRule.expect(RuntimeException.class); - exceptionRule.expectMessage(FEATURE_NOT_APPLICABLE); - IndexTool.checkIfFeatureApplicable(null, null, lastVerifyTime, pDataTable, !localIndex); - } - - @Test - public void testIncrcementalVerifyOption_notApplicable() throws Exception { - IndexTool mockTool = Mockito.mock(IndexTool.class); - when(mockTool.getLastVerifyTime()).thenCallRealMethod(); - Long lastVerifyTime = 10L; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.AFTER, - lastVerifyTime, null, IndexTool.IndexDisableLoggingType.NONE, - lastVerifyTime); - when(mockTool.parseOptions(args)).thenCallRealMethod(); - - CommandLine cmdLine = mockTool.parseOptions(args); - - when(mockTool.populateIndexToolAttributes(cmdLine)).thenCallRealMethod(); - when(mockTool.validateLastVerifyTime()).thenCallRealMethod(); - when(mockTool.isValidLastVerifyTime(lastVerifyTime)).thenReturn(false); - - exceptionRule.expect(RuntimeException.class); - exceptionRule.expectMessage(RETRY_VERIFY_NOT_APPLICABLE); - mockTool.populateIndexToolAttributes(cmdLine); - } - - @Test - public void testCheckVerifyAndDisableLogging_defaultsNone() throws Exception { - Long startTime = null; - Long endTime = 10L; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - startTime , endTime); - CommandLine cmdLine = it.parseOptions(args); - it.populateIndexToolAttributes(cmdLine); - assertEquals(IndexTool.IndexDisableLoggingType.NONE, it.getDisableLoggingType()); - } - - @Test - public void testDisableLogging_allowsNone() throws Exception { - verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.NONE); - verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.ONLY); - verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.BEFORE); - verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.AFTER); - verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.BOTH); - } - - @Test - public void testDisableLogging_allowsBefore() throws Exception { - verifyDisableLogging(IndexTool.IndexDisableLoggingType.BEFORE, IndexTool.IndexVerifyType.BEFORE); - verifyDisableLogging(IndexTool.IndexDisableLoggingType.BEFORE, IndexTool.IndexVerifyType.ONLY); - verifyDisableLogging(IndexTool.IndexDisableLoggingType.BEFORE, IndexTool.IndexVerifyType.BOTH); - verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BEFORE, - IndexTool.IndexVerifyType.AFTER); - verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BEFORE, - IndexTool.IndexVerifyType.NONE); - } - - @Test - public void testDisableLogging_allowsAfter() throws Exception { - verifyDisableLogging(IndexTool.IndexDisableLoggingType.AFTER, IndexTool.IndexVerifyType.BOTH); - verifyDisableLogging(IndexTool.IndexDisableLoggingType.AFTER, IndexTool.IndexVerifyType.AFTER); - verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.AFTER, - IndexTool.IndexVerifyType.NONE); - verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.AFTER, - IndexTool.IndexVerifyType.BEFORE); - verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, - IndexTool.IndexVerifyType.ONLY); - } - - @Test - public void testCheckVerifyAndDisableLogging_allowsBoth() throws Exception { - verifyDisableLogging(IndexTool.IndexDisableLoggingType.BOTH, IndexTool.IndexVerifyType.BOTH); - verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, - IndexTool.IndexVerifyType.NONE); - verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, - IndexTool.IndexVerifyType.ONLY); - verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, - IndexTool.IndexVerifyType.BEFORE); - verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, - IndexTool.IndexVerifyType.AFTER); - } - - public void verifyDisableLogging(IndexTool.IndexDisableLoggingType disableType, - IndexTool.IndexVerifyType verifyType) throws Exception { - Long startTime = null; - Long endTime = 10L; - String[] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, verifyType, - startTime, endTime, disableType, null); - CommandLine cmdLine = it.parseOptions(args); - it.populateIndexToolAttributes(cmdLine); - assertEquals(disableType, it.getDisableLoggingType()); - } - - public void verifyDisableLoggingException(IndexTool.IndexDisableLoggingType disableType, - IndexTool.IndexVerifyType verifyType) { - Long startTime = null; - Long endTime = 10L; - String[] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, verifyType, - startTime, endTime, disableType, null); - exceptionRule.expect(IllegalStateException.class); - CommandLine cmdLine = it.parseOptions(args); - } - - @Test - public void testIndexToolDefaultSource() throws Exception { - Long startTime = 1L; - Long endTime = 10L; - String [] args = - IndexToolIT.getArgValues(true, schema, - dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.NONE, - startTime , endTime); - CommandLine cmdLine = it.parseOptions(args); - it.populateIndexToolAttributes(cmdLine); - assertEquals(IndexScrutinyTool.SourceTable.DATA_TABLE_SOURCE, it.getSourceTable()); - } - - @Test - public void testIndexToolFromIndexSource() throws Exception { - verifyFromIndexOption(IndexTool.IndexVerifyType.ONLY); - verifyFromIndexOption(IndexTool.IndexVerifyType.BEFORE); - } - - private void verifyFromIndexOption(IndexTool.IndexVerifyType verifyType) throws Exception { - Long startTime = 1L; - Long endTime = 10L; - String[] args = - IndexToolIT.getArgValues(true, true, schema, - dataTable, indexTable, tenantId, verifyType, - startTime, endTime, IndexTool.IndexDisableLoggingType.BEFORE, null, true); - CommandLine cmdLine = it.parseOptions(args); - it.populateIndexToolAttributes(cmdLine); - assertEquals(IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE, it.getSourceTable()); - } + IndexTool it; + private String dataTable; + private String indexTable; + private String schema; + private String tenantId; + @Mock + PTable pDataTable; + boolean localIndex = true; + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + @Before + public void setup() { + it = new IndexTool(); + schema = generateUniqueName(); + dataTable = generateUniqueName(); + indexTable = generateUniqueName(); + tenantId = generateUniqueName(); + MockitoAnnotations.initMocks(this); + } + + @Test + public void testParseOptions_timeRange_timeRangeNotNull() throws Exception { + Long startTime = 10L; + Long endTime = 15L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, startTime, endTime); + CommandLine cmdLine = it.parseOptions(args); + it.populateIndexToolAttributes(cmdLine); + assertEquals(startTime, it.getStartTime()); + assertEquals(endTime, it.getEndTime()); + } + + @Test + public void testParseOptions_timeRange_null() throws Exception { + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE); + CommandLine cmdLine = it.parseOptions(args); + it.populateIndexToolAttributes(cmdLine); + Assert.assertNull(it.getStartTime()); + Assert.assertNull(it.getEndTime()); + } + + @Test + public void testParseOptions_timeRange_startTimeNotNull() throws Exception { + Long startTime = 10L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, startTime, null); + CommandLine cmdLine = it.parseOptions(args); + it.populateIndexToolAttributes(cmdLine); + assertEquals(startTime, it.getStartTime()); + assertEquals(null, it.getEndTime()); + } + + @Test + public void testParseOptions_timeRange_endTimeNotNull() throws Exception { + Long endTime = 15L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, null, endTime); + CommandLine cmdLine = it.parseOptions(args); + it.populateIndexToolAttributes(cmdLine); + assertEquals(null, it.getStartTime()); + assertEquals(endTime, it.getEndTime()); + } + + @Test + public void testParseOptions_timeRange_startTimeNullEndTimeInFuture() throws Exception { + Long endTime = EnvironmentEdgeManager.currentTimeMillis() + 100000; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, null, endTime); + CommandLine cmdLine = it.parseOptions(args); + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); + it.populateIndexToolAttributes(cmdLine); + } + + @Test + public void testParseOptions_timeRange_endTimeNullStartTimeInFuture() throws Exception { + Long startTime = EnvironmentEdgeManager.currentTimeMillis() + 100000; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, startTime, null); + CommandLine cmdLine = it.parseOptions(args); + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); + it.populateIndexToolAttributes(cmdLine); + } + + @Test(timeout = 10000 /* 10 secs */) + public void testParseOptions_timeRange_startTimeInFuture() throws Exception { + Long startTime = EnvironmentEdgeManager.currentTimeMillis() + 100000; + Long endTime = EnvironmentEdgeManager.currentTimeMillis() + 200000; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, startTime, endTime); + CommandLine cmdLine = it.parseOptions(args); + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); + it.populateIndexToolAttributes(cmdLine); + } + + @Test(timeout = 10000 /* 10 secs */) + public void testParseOptions_timeRange_endTimeInFuture() throws Exception { + Long startTime = EnvironmentEdgeManager.currentTimeMillis(); + Long endTime = EnvironmentEdgeManager.currentTimeMillis() + 100000; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, startTime, endTime); + CommandLine cmdLine = it.parseOptions(args); + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); + it.populateIndexToolAttributes(cmdLine); + } + + @Test + public void testParseOptions_timeRange_startTimeEqEndTime() throws Exception { + Long startTime = 10L; + Long endTime = 10L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, startTime, endTime); + CommandLine cmdLine = it.parseOptions(args); + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); + it.populateIndexToolAttributes(cmdLine); + } + + @Test + public void testParseOptions_timeRange_startTimeGtEndTime() throws Exception { + Long startTime = 10L; + Long endTime = 1L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, startTime, endTime); + CommandLine cmdLine = it.parseOptions(args); + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage(INVALID_TIME_RANGE_EXCEPTION_MESSAGE); + it.populateIndexToolAttributes(cmdLine); + } + + @Test + public void testCheckTimeRangeFeature_timeRangeSet_transactionalTable_globalIndex() { + when(pDataTable.isTransactional()).thenReturn(true); + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage(FEATURE_NOT_APPLICABLE); + IndexTool.checkIfFeatureApplicable(1L, 3L, null, pDataTable, !localIndex); + } + + @Test + public void testIncrcementalVerifyOption() throws Exception { + IndexTool mockTool = Mockito.mock(IndexTool.class); + when(mockTool.getLastVerifyTime()).thenCallRealMethod(); + Long lastVerifyTime = 10L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, lastVerifyTime, null, IndexTool.IndexDisableLoggingType.NONE, + lastVerifyTime); + when(mockTool.parseOptions(args)).thenCallRealMethod(); + + CommandLine cmdLine = mockTool.parseOptions(args); + + when(mockTool.populateIndexToolAttributes(cmdLine)).thenCallRealMethod(); + when(mockTool.isValidLastVerifyTime(lastVerifyTime)).thenReturn(true); + + mockTool.populateIndexToolAttributes(cmdLine); + Assert.assertEquals(lastVerifyTime, mockTool.getLastVerifyTime()); + + when(pDataTable.isTransactional()).thenReturn(true); + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage(FEATURE_NOT_APPLICABLE); + IndexTool.checkIfFeatureApplicable(null, null, lastVerifyTime, pDataTable, !localIndex); + } + + @Test + public void testIncrcementalVerifyOption_notApplicable() throws Exception { + IndexTool mockTool = Mockito.mock(IndexTool.class); + when(mockTool.getLastVerifyTime()).thenCallRealMethod(); + Long lastVerifyTime = 10L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.AFTER, lastVerifyTime, null, IndexTool.IndexDisableLoggingType.NONE, + lastVerifyTime); + when(mockTool.parseOptions(args)).thenCallRealMethod(); + + CommandLine cmdLine = mockTool.parseOptions(args); + + when(mockTool.populateIndexToolAttributes(cmdLine)).thenCallRealMethod(); + when(mockTool.validateLastVerifyTime()).thenCallRealMethod(); + when(mockTool.isValidLastVerifyTime(lastVerifyTime)).thenReturn(false); + + exceptionRule.expect(RuntimeException.class); + exceptionRule.expectMessage(RETRY_VERIFY_NOT_APPLICABLE); + mockTool.populateIndexToolAttributes(cmdLine); + } + + @Test + public void testCheckVerifyAndDisableLogging_defaultsNone() throws Exception { + Long startTime = null; + Long endTime = 10L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, startTime, endTime); + CommandLine cmdLine = it.parseOptions(args); + it.populateIndexToolAttributes(cmdLine); + assertEquals(IndexTool.IndexDisableLoggingType.NONE, it.getDisableLoggingType()); + } + + @Test + public void testDisableLogging_allowsNone() throws Exception { + verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.NONE); + verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.ONLY); + verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.BEFORE); + verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.AFTER); + verifyDisableLogging(IndexTool.IndexDisableLoggingType.NONE, IndexTool.IndexVerifyType.BOTH); + } + + @Test + public void testDisableLogging_allowsBefore() throws Exception { + verifyDisableLogging(IndexTool.IndexDisableLoggingType.BEFORE, + IndexTool.IndexVerifyType.BEFORE); + verifyDisableLogging(IndexTool.IndexDisableLoggingType.BEFORE, IndexTool.IndexVerifyType.ONLY); + verifyDisableLogging(IndexTool.IndexDisableLoggingType.BEFORE, IndexTool.IndexVerifyType.BOTH); + verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BEFORE, + IndexTool.IndexVerifyType.AFTER); + verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BEFORE, + IndexTool.IndexVerifyType.NONE); + } + + @Test + public void testDisableLogging_allowsAfter() throws Exception { + verifyDisableLogging(IndexTool.IndexDisableLoggingType.AFTER, IndexTool.IndexVerifyType.BOTH); + verifyDisableLogging(IndexTool.IndexDisableLoggingType.AFTER, IndexTool.IndexVerifyType.AFTER); + verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.AFTER, + IndexTool.IndexVerifyType.NONE); + verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.AFTER, + IndexTool.IndexVerifyType.BEFORE); + verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, + IndexTool.IndexVerifyType.ONLY); + } + + @Test + public void testCheckVerifyAndDisableLogging_allowsBoth() throws Exception { + verifyDisableLogging(IndexTool.IndexDisableLoggingType.BOTH, IndexTool.IndexVerifyType.BOTH); + verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, + IndexTool.IndexVerifyType.NONE); + verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, + IndexTool.IndexVerifyType.ONLY); + verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, + IndexTool.IndexVerifyType.BEFORE); + verifyDisableLoggingException(IndexTool.IndexDisableLoggingType.BOTH, + IndexTool.IndexVerifyType.AFTER); + } + + public void verifyDisableLogging(IndexTool.IndexDisableLoggingType disableType, + IndexTool.IndexVerifyType verifyType) throws Exception { + Long startTime = null; + Long endTime = 10L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + verifyType, startTime, endTime, disableType, null); + CommandLine cmdLine = it.parseOptions(args); + it.populateIndexToolAttributes(cmdLine); + assertEquals(disableType, it.getDisableLoggingType()); + } + + public void verifyDisableLoggingException(IndexTool.IndexDisableLoggingType disableType, + IndexTool.IndexVerifyType verifyType) { + Long startTime = null; + Long endTime = 10L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + verifyType, startTime, endTime, disableType, null); + exceptionRule.expect(IllegalStateException.class); + CommandLine cmdLine = it.parseOptions(args); + } + + @Test + public void testIndexToolDefaultSource() throws Exception { + Long startTime = 1L; + Long endTime = 10L; + String[] args = IndexToolIT.getArgValues(true, schema, dataTable, indexTable, tenantId, + IndexTool.IndexVerifyType.NONE, startTime, endTime); + CommandLine cmdLine = it.parseOptions(args); + it.populateIndexToolAttributes(cmdLine); + assertEquals(IndexScrutinyTool.SourceTable.DATA_TABLE_SOURCE, it.getSourceTable()); + } + + @Test + public void testIndexToolFromIndexSource() throws Exception { + verifyFromIndexOption(IndexTool.IndexVerifyType.ONLY); + verifyFromIndexOption(IndexTool.IndexVerifyType.BEFORE); + } + + private void verifyFromIndexOption(IndexTool.IndexVerifyType verifyType) throws Exception { + Long startTime = 1L; + Long endTime = 10L; + String[] args = IndexToolIT.getArgValues(true, true, schema, dataTable, indexTable, tenantId, + verifyType, startTime, endTime, IndexTool.IndexDisableLoggingType.BEFORE, null, true); + CommandLine cmdLine = it.parseOptions(args); + it.populateIndexToolAttributes(cmdLine); + assertEquals(IndexScrutinyTool.SourceTable.INDEX_TABLE_SOURCE, it.getSourceTable()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java index 52d4cf8f3dd..9d7aa1cfbbe 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,223 +30,215 @@ import java.util.List; import java.util.UUID; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; - import org.apache.phoenix.mapreduce.index.IndexTool; import org.apache.phoenix.mapreduce.index.IndexUpgradeTool; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.query.BaseConnectionlessQueryTest; -import org.apache.phoenix.query.ConnectionlessTest; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.phoenix.util.PhoenixRuntime; - import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; - @RunWith(Parameterized.class) -public class IndexUpgradeToolTest extends BaseConnectionlessQueryTest{ - private static final String INPUT_LIST = "TEST.MOCK1,TEST1.MOCK2,TEST.MOCK3"; - private final boolean upgrade; - private static final String DUMMY_STRING_VALUE = "anyValue"; - private static final String DUMMY_VERIFY_VALUE = "someVerifyValue"; - private static final String ONLY_VERIFY_VALUE = "ONLY"; - private IndexUpgradeTool indexUpgradeTool=null; - private String outputFile; - - public IndexUpgradeToolTest(boolean upgrade) { - this.upgrade = upgrade; - this.outputFile = "/tmp/index_upgrade_" + UUID.randomUUID().toString(); - } - - private void setup(String[] args) { - indexUpgradeTool = new IndexUpgradeTool(); - CommandLine cmd = indexUpgradeTool.parseOptions(args); - indexUpgradeTool.initializeTool(cmd); - } - - @Test - public void testCommandLineParsing() { - String [] args = {"-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", - INPUT_LIST, "-lf", outputFile, "-d"}; - setup(args); - Assert.assertEquals(indexUpgradeTool.getDryRun(),true); - Assert.assertEquals(indexUpgradeTool.getInputTables(), INPUT_LIST); - Assert.assertEquals(indexUpgradeTool.getOperation(), upgrade ? UPGRADE_OP : ROLLBACK_OP); - Assert.assertEquals(indexUpgradeTool.getLogFile(), outputFile); - // verify index rebuild is disabled by default - Assert.assertEquals(false, indexUpgradeTool.getIsRebuild()); - Assert.assertNull(indexUpgradeTool.getIndexToolOpts()); - } - - @Test - public void testRebuildOptionParsing() { - String [] args = {"-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", - INPUT_LIST, "-rb"}; - setup(args); - Assert.assertEquals(true, indexUpgradeTool.getIsRebuild()); - Assert.assertNull(indexUpgradeTool.getIndexToolOpts()); - } - - @Test(expected = IllegalStateException.class) - public void testIndexToolOptionsNoRebuild() { - String indexToolOpts = "-v " + DUMMY_VERIFY_VALUE; - String [] args = {"-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", INPUT_LIST, - "-tool", indexToolOpts}; - setup(args); - } - - @Test - public void testIfOptionsArePassedToIndexTool() throws Exception { - if (!upgrade) { - return; - } - String [] indexToolOpts = {"-v", ONLY_VERIFY_VALUE, "-runfg", "-st", "100"}; - String indexToolarg = String.join(" ", indexToolOpts); - String [] args = {"-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", - INPUT_LIST, "-lf", outputFile, "-d", "-rb", "-tool", indexToolarg }; - setup(args); - - Assert.assertEquals("value passed to index tool option does not match with provided value", - indexToolarg, indexUpgradeTool.getIndexToolOpts()); - String [] values = indexUpgradeTool.getIndexToolArgValues(DUMMY_STRING_VALUE, - DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, DUMMY_STRING_VALUE); - List argList = Arrays.asList(values); - Assert.assertTrue(argList.contains("-v")); - Assert.assertTrue(argList.contains(ONLY_VERIFY_VALUE)); - Assert.assertEquals("verify option and value are not passed consecutively", 1, - argList.indexOf(ONLY_VERIFY_VALUE) - argList.indexOf("-v")); - Assert.assertTrue(argList.contains("-runfg")); - Assert.assertTrue(argList.contains("-st")); - - // ensure that index tool can parse the options and raises no exceptions - IndexTool it = new IndexTool(); - CommandLine commandLine = it.parseOptions(values); - it.populateIndexToolAttributes(commandLine); - } - - @Test - public void testMalformedSpacingOptionsArePassedToIndexTool() throws Exception { - if (!upgrade) { - return; - } - String [] indexToolOpts = {"-v"+ONLY_VERIFY_VALUE, " -runfg", " -st ", "100 "}; - String indexToolarg = String.join(" ", indexToolOpts); - String [] args = {"-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", - INPUT_LIST, "-rb", "-tool", indexToolarg }; - setup(args); - - Assert.assertEquals("value passed to index tool option does not match with provided value", - indexToolarg, indexUpgradeTool.getIndexToolOpts()); - String [] values = indexUpgradeTool.getIndexToolArgValues(DUMMY_STRING_VALUE, - DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, DUMMY_STRING_VALUE); - List argList = Arrays.asList(values); - Assert.assertTrue(argList.contains("-v" + ONLY_VERIFY_VALUE)); - Assert.assertTrue(argList.contains("-runfg")); - Assert.assertTrue(argList.contains("-st")); - - // ensure that index tool can parse the options and raises no exceptions - IndexTool it = new IndexTool(); - CommandLine commandLine = it.parseOptions(values); - it.populateIndexToolAttributes(commandLine); - } - - @Test(expected = IllegalStateException.class) - public void testBadIndexToolOptions() throws Exception { - String [] indexToolOpts = {"-v" + DUMMY_VERIFY_VALUE}; - String indexToolarg = String.join(" ", indexToolOpts); - String [] args = {"-o", UPGRADE_OP, "-tb", INPUT_LIST, "-rb", "-tool", indexToolarg }; - setup(args); - String [] values = indexUpgradeTool.getIndexToolArgValues(DUMMY_STRING_VALUE, - DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, DUMMY_STRING_VALUE); - IndexTool it = new IndexTool(); - CommandLine commandLine = it.parseOptions(values); - it.populateIndexToolAttributes(commandLine); - } - - @Parameters(name ="IndexUpgradeToolTest_mutable={1}") - public static synchronized Collection data() { - return Arrays.asList( false, true); +public class IndexUpgradeToolTest extends BaseConnectionlessQueryTest { + private static final String INPUT_LIST = "TEST.MOCK1,TEST1.MOCK2,TEST.MOCK3"; + private final boolean upgrade; + private static final String DUMMY_STRING_VALUE = "anyValue"; + private static final String DUMMY_VERIFY_VALUE = "someVerifyValue"; + private static final String ONLY_VERIFY_VALUE = "ONLY"; + private IndexUpgradeTool indexUpgradeTool = null; + private String outputFile; + + public IndexUpgradeToolTest(boolean upgrade) { + this.upgrade = upgrade; + this.outputFile = "/tmp/index_upgrade_" + UUID.randomUUID().toString(); + } + + private void setup(String[] args) { + indexUpgradeTool = new IndexUpgradeTool(); + CommandLine cmd = indexUpgradeTool.parseOptions(args); + indexUpgradeTool.initializeTool(cmd); + } + + @Test + public void testCommandLineParsing() { + String[] args = + { "-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", INPUT_LIST, "-lf", outputFile, "-d" }; + setup(args); + Assert.assertEquals(indexUpgradeTool.getDryRun(), true); + Assert.assertEquals(indexUpgradeTool.getInputTables(), INPUT_LIST); + Assert.assertEquals(indexUpgradeTool.getOperation(), upgrade ? UPGRADE_OP : ROLLBACK_OP); + Assert.assertEquals(indexUpgradeTool.getLogFile(), outputFile); + // verify index rebuild is disabled by default + Assert.assertEquals(false, indexUpgradeTool.getIsRebuild()); + Assert.assertNull(indexUpgradeTool.getIndexToolOpts()); + } + + @Test + public void testRebuildOptionParsing() { + String[] args = { "-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", INPUT_LIST, "-rb" }; + setup(args); + Assert.assertEquals(true, indexUpgradeTool.getIsRebuild()); + Assert.assertNull(indexUpgradeTool.getIndexToolOpts()); + } + + @Test(expected = IllegalStateException.class) + public void testIndexToolOptionsNoRebuild() { + String indexToolOpts = "-v " + DUMMY_VERIFY_VALUE; + String[] args = + { "-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", INPUT_LIST, "-tool", indexToolOpts }; + setup(args); + } + + @Test + public void testIfOptionsArePassedToIndexTool() throws Exception { + if (!upgrade) { + return; } - - private void setupConfForConnectionlessQuery(Configuration conf) { - String connectionlessUrl = PhoenixRuntime.JDBC_PROTOCOL_ZK + JDBC_PROTOCOL_SEPARATOR - + CONNECTIONLESS + JDBC_PROTOCOL_TERMINATOR - + PHOENIX_TEST_DRIVER_URL_PARAM + JDBC_PROTOCOL_TERMINATOR; - PhoenixConfigurationUtil.setInputClusterUrl(conf, connectionlessUrl); - PhoenixConfigurationUtil.setOutputClusterUrl(conf, connectionlessUrl); - conf.unset(HConstants.ZOOKEEPER_CLIENT_PORT); - conf.unset(HConstants.ZOOKEEPER_ZNODE_PARENT); + String[] indexToolOpts = { "-v", ONLY_VERIFY_VALUE, "-runfg", "-st", "100" }; + String indexToolarg = String.join(" ", indexToolOpts); + String[] args = { "-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", INPUT_LIST, "-lf", + outputFile, "-d", "-rb", "-tool", indexToolarg }; + setup(args); + + Assert.assertEquals("value passed to index tool option does not match with provided value", + indexToolarg, indexUpgradeTool.getIndexToolOpts()); + String[] values = indexUpgradeTool.getIndexToolArgValues(DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, + DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, DUMMY_STRING_VALUE); + List argList = Arrays.asList(values); + Assert.assertTrue(argList.contains("-v")); + Assert.assertTrue(argList.contains(ONLY_VERIFY_VALUE)); + Assert.assertEquals("verify option and value are not passed consecutively", 1, + argList.indexOf(ONLY_VERIFY_VALUE) - argList.indexOf("-v")); + Assert.assertTrue(argList.contains("-runfg")); + Assert.assertTrue(argList.contains("-st")); + + // ensure that index tool can parse the options and raises no exceptions + IndexTool it = new IndexTool(); + CommandLine commandLine = it.parseOptions(values); + it.populateIndexToolAttributes(commandLine); + } + + @Test + public void testMalformedSpacingOptionsArePassedToIndexTool() throws Exception { + if (!upgrade) { + return; } - - @Test - public void testConnectionProperties() throws Exception { - Configuration conf = HBaseConfiguration.create(); - - long indexRebuildQueryTimeoutMs = 2000; - long indexRebuildRpcTimeoutMs = 3000; - long indexRebuildClientScannerTimeoutMs = 4000; - int indexRebuildRpcRetryCount = 10; - - conf.setLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, indexRebuildQueryTimeoutMs); - conf.setLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, indexRebuildRpcTimeoutMs); - conf.setLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, - indexRebuildClientScannerTimeoutMs); - conf.setInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, indexRebuildRpcRetryCount); - - // prepare conf for connectionless query - setupConfForConnectionlessQuery(conf); - - try (Connection conn = IndexUpgradeTool.getConnection(conf)) { - // verify connection properties for phoenix, hbase timeouts and retries - Assert.assertEquals(conn.getClientInfo(QueryServices.THREAD_TIMEOUT_MS_ATTRIB), - Long.toString(indexRebuildQueryTimeoutMs)); - Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_RPC_TIMEOUT_KEY), - Long.toString(indexRebuildRpcTimeoutMs)); - Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD), - Long.toString(indexRebuildClientScannerTimeoutMs)); - Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_RETRIES_NUMBER), - Long.toString(indexRebuildRpcRetryCount)); - } + String[] indexToolOpts = { "-v" + ONLY_VERIFY_VALUE, " -runfg", " -st ", "100 " }; + String indexToolarg = String.join(" ", indexToolOpts); + String[] args = + { "-o", upgrade ? UPGRADE_OP : ROLLBACK_OP, "-tb", INPUT_LIST, "-rb", "-tool", indexToolarg }; + setup(args); + + Assert.assertEquals("value passed to index tool option does not match with provided value", + indexToolarg, indexUpgradeTool.getIndexToolOpts()); + String[] values = indexUpgradeTool.getIndexToolArgValues(DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, + DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, DUMMY_STRING_VALUE); + List argList = Arrays.asList(values); + Assert.assertTrue(argList.contains("-v" + ONLY_VERIFY_VALUE)); + Assert.assertTrue(argList.contains("-runfg")); + Assert.assertTrue(argList.contains("-st")); + + // ensure that index tool can parse the options and raises no exceptions + IndexTool it = new IndexTool(); + CommandLine commandLine = it.parseOptions(values); + it.populateIndexToolAttributes(commandLine); + } + + @Test(expected = IllegalStateException.class) + public void testBadIndexToolOptions() throws Exception { + String[] indexToolOpts = { "-v" + DUMMY_VERIFY_VALUE }; + String indexToolarg = String.join(" ", indexToolOpts); + String[] args = { "-o", UPGRADE_OP, "-tb", INPUT_LIST, "-rb", "-tool", indexToolarg }; + setup(args); + String[] values = indexUpgradeTool.getIndexToolArgValues(DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, + DUMMY_STRING_VALUE, DUMMY_STRING_VALUE, DUMMY_STRING_VALUE); + IndexTool it = new IndexTool(); + CommandLine commandLine = it.parseOptions(values); + it.populateIndexToolAttributes(commandLine); + } + + @Parameters(name = "IndexUpgradeToolTest_mutable={1}") + public static synchronized Collection data() { + return Arrays.asList(false, true); + } + + private void setupConfForConnectionlessQuery(Configuration conf) { + String connectionlessUrl = + PhoenixRuntime.JDBC_PROTOCOL_ZK + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS + + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM + JDBC_PROTOCOL_TERMINATOR; + PhoenixConfigurationUtil.setInputClusterUrl(conf, connectionlessUrl); + PhoenixConfigurationUtil.setOutputClusterUrl(conf, connectionlessUrl); + conf.unset(HConstants.ZOOKEEPER_CLIENT_PORT); + conf.unset(HConstants.ZOOKEEPER_ZNODE_PARENT); + } + + @Test + public void testConnectionProperties() throws Exception { + Configuration conf = HBaseConfiguration.create(); + + long indexRebuildQueryTimeoutMs = 2000; + long indexRebuildRpcTimeoutMs = 3000; + long indexRebuildClientScannerTimeoutMs = 4000; + int indexRebuildRpcRetryCount = 10; + + conf.setLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, indexRebuildQueryTimeoutMs); + conf.setLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, indexRebuildRpcTimeoutMs); + conf.setLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, + indexRebuildClientScannerTimeoutMs); + conf.setInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, indexRebuildRpcRetryCount); + + // prepare conf for connectionless query + setupConfForConnectionlessQuery(conf); + + try (Connection conn = IndexUpgradeTool.getConnection(conf)) { + // verify connection properties for phoenix, hbase timeouts and retries + Assert.assertEquals(conn.getClientInfo(QueryServices.THREAD_TIMEOUT_MS_ATTRIB), + Long.toString(indexRebuildQueryTimeoutMs)); + Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_RPC_TIMEOUT_KEY), + Long.toString(indexRebuildRpcTimeoutMs)); + Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD), + Long.toString(indexRebuildClientScannerTimeoutMs)); + Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_RETRIES_NUMBER), + Long.toString(indexRebuildRpcRetryCount)); } - - @Test - public void testConnectionDefaults() throws Exception { - Configuration conf = HBaseConfiguration.create(); - - long indexRebuildQueryTimeoutMs = conf.getLong( - QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); - long indexRebuildRpcTimeoutMs = conf.getLong( - QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); - long indexRebuildClientScannerTimeoutMs = conf.getLong( - QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); - long indexRebuildRpcRetryCount = conf.getInt( - QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, - QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); - - // prepare conf for connectionless query - setupConfForConnectionlessQuery(conf); - - try (Connection conn = IndexUpgradeTool.getConnection(conf)) { - // verify connection properties for phoenix, hbase timeouts and retries - Assert.assertEquals(conn.getClientInfo(QueryServices.THREAD_TIMEOUT_MS_ATTRIB), - Long.toString(indexRebuildQueryTimeoutMs)); - Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_RPC_TIMEOUT_KEY), - Long.toString(indexRebuildRpcTimeoutMs)); - Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD), - Long.toString(indexRebuildClientScannerTimeoutMs)); - Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_RETRIES_NUMBER), - Long.toString(indexRebuildRpcRetryCount)); - } + } + + @Test + public void testConnectionDefaults() throws Exception { + Configuration conf = HBaseConfiguration.create(); + + long indexRebuildQueryTimeoutMs = conf.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT); + long indexRebuildRpcTimeoutMs = conf.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT); + long indexRebuildClientScannerTimeoutMs = + conf.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT); + long indexRebuildRpcRetryCount = conf.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, + QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER); + + // prepare conf for connectionless query + setupConfForConnectionlessQuery(conf); + + try (Connection conn = IndexUpgradeTool.getConnection(conf)) { + // verify connection properties for phoenix, hbase timeouts and retries + Assert.assertEquals(conn.getClientInfo(QueryServices.THREAD_TIMEOUT_MS_ATTRIB), + Long.toString(indexRebuildQueryTimeoutMs)); + Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_RPC_TIMEOUT_KEY), + Long.toString(indexRebuildRpcTimeoutMs)); + Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD), + Long.toString(indexRebuildClientScannerTimeoutMs)); + Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_RETRIES_NUMBER), + Long.toString(indexRebuildRpcRetryCount)); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/PrepareIndexMutationsForRebuildTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/PrepareIndexMutationsForRebuildTest.java index dce37d33560..ada15eb4c50 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/PrepareIndexMutationsForRebuildTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/PrepareIndexMutationsForRebuildTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.index; +import static org.junit.Assert.assertEquals; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -35,840 +42,629 @@ import org.apache.phoenix.util.SchemaUtil; import org.junit.Assert; import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.junit.Assert.assertEquals; public class PrepareIndexMutationsForRebuildTest extends BaseConnectionlessQueryTest { - private static String ROW_KEY = "k1"; - private static String TABLE_NAME = "dataTable"; - private static String INDEX_NAME = "idx"; - - class SetupInfo { - public IndexMaintainer indexMaintainer; - public PTable pDataTable; + private static String ROW_KEY = "k1"; + private static String TABLE_NAME = "dataTable"; + private static String INDEX_NAME = "idx"; + + class SetupInfo { + public IndexMaintainer indexMaintainer; + public PTable pDataTable; + } + + /** + * Get the index maintainer and phoenix table definition of data table. + */ + private SetupInfo setup(String tableName, String indexName, String columns, String indexColumns, + String pk, String includeColumns) throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + + String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(""), + SchemaUtil.normalizeIdentifier(tableName)); + String fullIndexName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(""), + SchemaUtil.normalizeIdentifier(indexName)); + + // construct the data table and index based from the parameters + String str1 = String.format( + "CREATE TABLE %1$s (%2$s CONSTRAINT pk PRIMARY KEY (%3$s)) COLUMN_ENCODED_BYTES=0", + fullTableName, columns, pk); + conn.createStatement().execute(str1); + + String str2 = String.format("CREATE INDEX %1$s ON %2$s (%3$s)", fullIndexName, fullTableName, + indexColumns); + if (!includeColumns.isEmpty()) str2 += " INCLUDE (" + includeColumns + ")"; + conn.createStatement().execute(str2); + + // Get the data table, index table and index maintainer reference from the client's + // ConnectionQueryServiceImpl + // In this way, we don't need to setup a local cluster. + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable pIndexTable = pconn.getTable(new PTableKey(pconn.getTenantId(), fullIndexName)); + PTable pDataTable = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); + IndexMaintainer im = pIndexTable.getIndexMaintainer(pDataTable, pconn); + + SetupInfo info = new SetupInfo(); + info.indexMaintainer = im; + info.pDataTable = pDataTable; + return info; } - - /** - * Get the index maintainer and phoenix table definition of data table. - * @param tableName - * @param indexName - * @param columns - * @param indexColumns - * @param pk - * @param includeColumns - * @return - * @throws Exception - */ - private SetupInfo setup(String tableName, - String indexName, - String columns, - String indexColumns, - String pk, - String includeColumns) throws Exception { - try(Connection conn = DriverManager.getConnection(getUrl())) { - - String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(""), SchemaUtil.normalizeIdentifier(tableName)); - String fullIndexName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(""), SchemaUtil.normalizeIdentifier(indexName)); - - // construct the data table and index based from the parameters - String str1 = String.format("CREATE TABLE %1$s (%2$s CONSTRAINT pk PRIMARY KEY (%3$s)) COLUMN_ENCODED_BYTES=0", - fullTableName, - columns, - pk); - conn.createStatement().execute(str1); - - String str2 = String.format("CREATE INDEX %1$s ON %2$s (%3$s)", - fullIndexName, - fullTableName, - indexColumns); - if (!includeColumns.isEmpty()) - str2 += " INCLUDE (" + includeColumns + ")"; - conn.createStatement().execute(str2); - - // Get the data table, index table and index maintainer reference from the client's ConnectionQueryServiceImpl - // In this way, we don't need to setup a local cluster. - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PTable pIndexTable = pconn.getTable(new PTableKey(pconn.getTenantId(), fullIndexName)); - PTable pDataTable = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); - IndexMaintainer im = pIndexTable.getIndexMaintainer(pDataTable, pconn); - - SetupInfo info = new SetupInfo(); - info.indexMaintainer = im; - info.pDataTable = pDataTable; - return info; + } + + /** + * Simulate one put mutation on the indexed column + */ + @Test + public void testSinglePutOnIndexColumn() throws Exception { + SetupInfo info = + setup(TABLE_NAME, INDEX_NAME, "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", "C1", "ROW_KEY", ""); + + // insert a row + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 1, + Bytes.toBytes("v1")); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, null); + + // Expect one row of index with row key "v1_k1" + Put idxPut1 = new Put(generateIndexRowKey("v1")); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + + assertEqualMutationList(Arrays.asList((Mutation) idxPut1), actualIndexMutations); + } + + /** + * Simulate one put mutation on the non-indexed column + */ + @Test + public void testSinglePutOnNonIndexColumn() throws Exception { + SetupInfo info = + setup(TABLE_NAME, INDEX_NAME, "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", "C1", "ROW_KEY", ""); + + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, null); + + // Expect one row of index with row key "_k1", as indexed column C1 is nullable. + Put idxPut1 = new Put(generateIndexRowKey(null)); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + + assertEqualMutationList(Arrays.asList((Mutation) idxPut1), actualIndexMutations); + } + + /** + * Simulate the column delete on the index column + */ + @Test + public void testDelOnIndexColumn() throws Exception { + SetupInfo info = + setup(TABLE_NAME, INDEX_NAME, "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", "C1", "ROW_KEY", ""); + + // insert the row for deletion + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 1, + Bytes.toBytes("v1")); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + + // only delete the value of column C1 + Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); + addCellToDelMutation(dataDel, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 2, + Cell.Type.DeleteColumn); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, dataDel); + + List expectedIndexMutation = new ArrayList<>(); + + // generate the index row key "v1_k1" + byte[] idxKeyBytes = generateIndexRowKey("v1"); + + Put idxPut1 = new Put(idxKeyBytes); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + expectedIndexMutation.add(idxPut1); + + // generate the index row key "_k1" + Put idxPut2 = new Put(generateIndexRowKey(null)); + addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); + expectedIndexMutation.add(idxPut2); + + // This deletion is to remove the row added by the idxPut1, as idxPut2 has different row key as + // idxPut1. + // Otherwise the row "v1_k1" will still be shown in the scan result + Delete idxDel = new Delete(idxKeyBytes); + addCellToDelMutation(idxDel, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, null, 2, + Cell.Type.DeleteFamily); + expectedIndexMutation.add(idxDel); + + assertEqualMutationList(expectedIndexMutation, actualIndexMutations); + } + + /** + * Simulate the column delete on the non-indexed column + */ + @Test + public void testDelOnNonIndexColumn() throws Exception { + SetupInfo info = + setup(TABLE_NAME, INDEX_NAME, "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", "C1", "ROW_KEY", ""); + + // insert the row for deletion + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 1, + Bytes.toBytes("v1")); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + + // delete the value of column C2 + Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); + addCellToDelMutation(dataDel, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C2"), 2, + Cell.Type.DeleteColumn); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, dataDel); + + List expectedIndexMutations = new ArrayList<>(); + + byte[] idxKeyBytes = generateIndexRowKey("v1"); + + // idxPut1 is the corresponding index mutation of dataPut + Put idxPut1 = new Put(idxKeyBytes); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + expectedIndexMutations.add(idxPut1); + + // idxPut2 is required to update the timestamp, so the index row will have the same life time as + // its corresponding data row. + // No delete mutation is expected on index table, as data mutation happens only on non-indexed + // column. + Put idxPut2 = new Put(idxKeyBytes); + addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); + expectedIndexMutations.add(idxPut2); + + assertEqualMutationList(expectedIndexMutations, actualIndexMutations); + } + + /** + * Simulate the data deletion of all version on the indexed row + */ + @Test + public void testDeleteAllVersions() throws Exception { + SetupInfo info = + setup(TABLE_NAME, INDEX_NAME, "ROW_KEY VARCHAR, C1 VARCHAR", "C1", "ROW_KEY", ""); + + // insert two versions for a single row + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 1, + Bytes.toBytes("v1")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 2, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 2); + + // DeleteFamily will delete all versions of the columns in that family + // Since C1 is the only column of the default column family, so deleting the default family + // removes all version + // of column C1 + Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); + addCellToDelMutation(dataDel, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), null, 3, + Cell.Type.DeleteFamily); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, dataDel); + + List expectedIndexMutations = new ArrayList<>(); + + byte[] idxKeyBytes1 = generateIndexRowKey("v1"); + byte[] idxKeyBytes2 = generateIndexRowKey("v2"); + + // idxPut1 and idxPut2 are generated by two versions in dataPut + Put idxPut1 = new Put(idxKeyBytes1); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + expectedIndexMutations.add(idxPut1); + + Put idxPut2 = new Put(idxKeyBytes2); + addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); + expectedIndexMutations.add(idxPut2); + + // idxDel1 is required to remove the row key "v1_k1" which is added by idxPut1. + // The ts of idxDel1 is same as idxPut2, because it is a result of idxPut2. + // Since C1 is the only index column, so it is translated to DeleteFamily mutation. + Delete idxDel1 = new Delete(idxKeyBytes1); + addCellToDelMutation(idxDel1, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), null, 2, + Cell.Type.DeleteFamily); + expectedIndexMutations.add(idxDel1); + + // idxDel2 is corresponding index mutation of dataDel + Delete idxDel2 = new Delete(idxKeyBytes2); + addCellToDelMutation(idxDel2, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), null, 3, + Cell.Type.DeleteFamily); + expectedIndexMutations.add(idxDel2); + + assertEqualMutationList(expectedIndexMutations, actualIndexMutations); + } + + // Simulate the put and delete mutation with the same time stamp on the index + @Test + public void testPutDeleteOnSameTimeStamp() throws Exception { + SetupInfo info = + setup(TABLE_NAME, INDEX_NAME, "ROW_KEY VARCHAR, C1 VARCHAR", "C1", "ROW_KEY", ""); + + // insert a row + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 1, + Bytes.toBytes("v1")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + + // delete column of C1 from the inserted row + Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); + addCellToDelMutation(dataDel, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 1, + Cell.Type.DeleteColumn); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, dataDel); + + List expectedIndexMutations = new ArrayList<>(); + + // The dataDel will be applied on top of dataPut when we replay them for index rebuild, when + // they have the same time stamp. + // idxPut1 is expected as in data table we still see the row of k1 with empty C1, so we need a + // row in index table with row key "_k1" + Put idxPut1 = new Put(generateIndexRowKey(null)); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + expectedIndexMutations.add(idxPut1); + + assertEqualMutationList(Arrays.asList((Mutation) idxPut1), actualIndexMutations); + } + + // Simulate the put and delete mutation with the same timestamp and put mutation is empty + // after applied delete mutation + @Test + public void testPutDeleteOnSameTimeStampAndPutNullifiedByDelete() throws Exception { + SetupInfo info = setup(TABLE_NAME, INDEX_NAME, + "ROW_KEY VARCHAR, CF1.C1 VARCHAR, CF2.C2 VARCHAR", "CF2.C2", "ROW_KEY", ""); + + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, Bytes.toBytes("CF2"), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + + addCellToPutMutation(dataPut, Bytes.toBytes("CF1"), Bytes.toBytes("C1"), 2, + Bytes.toBytes("v1")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 2); + + Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); + addCellToDelMutation(dataDel, Bytes.toBytes("CF1"), null, 2, Cell.Type.DeleteFamily); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, dataDel); + + List expectedIndexMutations = new ArrayList<>(); + byte[] idxKeyBytes = generateIndexRowKey("v2"); + + // idxPut1 is generated corresponding to dataPut of timestamp 1. + // idxPut2 is generated corresponding to dataPut and dataDel of timestamp 2 + Put idxPut1 = new Put(idxKeyBytes); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + expectedIndexMutations.add(idxPut1); + + Put idxPut2 = new Put(idxKeyBytes); + addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); + expectedIndexMutations.add(idxPut2); + + assertEqualMutationList(expectedIndexMutations, actualIndexMutations); + } + + // Simulate the put and delete mutation with the same timestamp and put mutation and current row + // state + // are empty after applied delete mutation + @Test + public void testPutDeleteOnSameTimeStampAndPutAndOldPutAllNullifiedByDelete() throws Exception { + SetupInfo info = setup(TABLE_NAME, INDEX_NAME, + "ROW_KEY VARCHAR, CF1.C1 VARCHAR, CF2.C2 VARCHAR", "CF2.C2", "ROW_KEY", ""); + + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, Bytes.toBytes("CF2"), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + + addCellToPutMutation(dataPut, Bytes.toBytes("CF2"), Bytes.toBytes("C2"), 2, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 2); + + Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); + addCellToDelMutation(dataDel, Bytes.toBytes("CF2"), null, 2, Cell.Type.DeleteFamily); + addCellToDelMutation(dataDel, SchemaUtil.getEmptyColumnFamily(info.pDataTable), null, 2, + Cell.Type.DeleteFamily); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, dataDel); + + List expectedIndexMutations = new ArrayList<>(); + byte[] idxKeyBytes = generateIndexRowKey("v2"); + + // idxPut1 is generated corresponding to dataPut of timestamp 1. + // idxDel2 is generated because the dataDel of timestamp 2 deletes dataPut of timestamp 2 + // and current row state. + Put idxPut1 = new Put(idxKeyBytes); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + expectedIndexMutations.add(idxPut1); + + Delete idxDel2 = new Delete(idxKeyBytes); + addCellToDelMutation(idxDel2, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), null, 2, + Cell.Type.DeleteFamily); + expectedIndexMutations.add(idxDel2); + + assertEqualMutationList(expectedIndexMutations, actualIndexMutations); + } + + // Simulate the put and delete mutation on the covered column of data table + @Test + public void testCoveredIndexColumns() throws Exception { + SetupInfo info = setup(TABLE_NAME, INDEX_NAME, "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", "C1", + "ROW_KEY", "C2"); + + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 1, + Bytes.toBytes("v1")); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + + Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); + addCellToDelMutation(dataDel, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C1"), 2, + Cell.Type.DeleteColumn); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, dataDel); + + List expectedIndexMutations = new ArrayList<>(); + byte[] idxKeyBytes = generateIndexRowKey("v1"); + + // idxPut1 is generated corresponding to dataPut. + // The column "0:C2" is generated from data table column family and column name, its family name + // is still default family name of index table + Put idxPut1 = new Put(idxKeyBytes); + addCellToPutMutation(idxPut1, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("0:C2"), + 1, Bytes.toBytes("v2")); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + expectedIndexMutations.add(idxPut1); + + // idxKey2 is required by dataDel, as dataDel change the corresponding row key of index table + List idxKey2 = new ArrayList<>(); + idxKey2.add(QueryConstants.SEPARATOR_BYTE); + idxKey2.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes + .asList(Bytes.toBytes(ROW_KEY))); + byte[] idxKeyBytes2 = + org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.toArray(idxKey2); + Put idxPut2 = new Put(idxKeyBytes2); + addCellToPutMutation(idxPut2, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("0:C2"), + 2, Bytes.toBytes("v2")); + addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); + expectedIndexMutations.add(idxPut2); + + // idxDel is required to invalid the index row "v1_k1", dataDel removed the value of indexed + // column + Delete idxDel = new Delete(idxKeyBytes); + addCellToDelMutation(idxDel, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, null, 2, + Cell.Type.DeleteFamily); + expectedIndexMutations.add(idxDel); + + assertEqualMutationList(expectedIndexMutations, actualIndexMutations); + } + + // Simulate the scenario that index column, and covered column belong to different column families + @Test + public void testForMultipleFamilies() throws Exception { + SetupInfo info = + setup(TABLE_NAME, INDEX_NAME, "ROW_KEY VARCHAR, CF1.C1 VARCHAR, CF2.C2 VARCHAR", // define C1 + // and C2 + // with + // different + // families + "CF1.C1", "ROW_KEY", "CF2.C2"); + + // insert a row to the data table + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, Bytes.toBytes("CF1"), Bytes.toBytes("C1"), 1, + Bytes.toBytes("v1")); + addCellToPutMutation(dataPut, Bytes.toBytes("CF2"), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + + // delete the indexed column CF1:C1 + Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); + addCellToDelMutation(dataDel, Bytes.toBytes("CF1"), Bytes.toBytes("C1"), 2, + Cell.Type.DeleteColumn); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, dataDel); + + List expectedIndexMutation = new ArrayList<>(); + + byte[] idxKeyBytes = generateIndexRowKey("v1"); + + // index table will use the family name of the first covered column, which is CF2 here. + Put idxPut1 = new Put(idxKeyBytes); + addCellToPutMutation(idxPut1, Bytes.toBytes("CF2"), Bytes.toBytes("CF2:C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + expectedIndexMutation.add(idxPut1); + + // idxPut2 and idxDel are the result of dataDel + // idxPut2 is to create the index row "_k1", idxDel is to invalid the index row "v1_k1". + Put idxPut2 = new Put(generateIndexRowKey(null)); + addCellToPutMutation(idxPut2, Bytes.toBytes("CF2"), Bytes.toBytes("CF2:C2"), 2, + Bytes.toBytes("v2")); + addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); + expectedIndexMutation.add(idxPut2); + + Delete idxDel = new Delete(idxKeyBytes); + addCellToDelMutation(idxDel, Bytes.toBytes("CF2"), null, 2, Cell.Type.DeleteFamily); + expectedIndexMutation.add(idxDel); + + assertEqualMutationList(expectedIndexMutation, actualIndexMutations); + } + + // Simulate two data put with the same value but different time stamp. + // We expect to see 2 index mutations with same value but different time stamps. + @Test + public void testSameTypeOfMutationWithSameValueButDifferentTimeStamp() throws Exception { + SetupInfo info = + setup(TABLE_NAME, INDEX_NAME, "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", "C1", "ROW_KEY", ""); + + Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v2")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); + addCellToPutMutation(dataPut, + info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), Bytes.toBytes("C2"), 1, + Bytes.toBytes("v3")); + addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 2); + + List actualIndexMutations = IndexRebuildRegionScanner + .prepareIndexMutationsForRebuild(info.indexMaintainer, dataPut, null); + + byte[] idxKeyBytes = generateIndexRowKey(null); + + // idxPut1 and idxPut2 have same value but different time stamp + Put idxPut1 = new Put(idxKeyBytes); + addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); + + Put idxPut2 = new Put(idxKeyBytes); + addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); + + assertEqualMutationList(Arrays.asList((Mutation) idxPut1, (Mutation) idxPut2), + actualIndexMutations); + } + + /** + * Generate the row key for index table by the value of indexed column + */ + byte[] generateIndexRowKey(String indexVal) { + List idxKey = new ArrayList<>(); + if (indexVal != null && !indexVal.isEmpty()) + idxKey.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes + .asList(Bytes.toBytes(indexVal))); + idxKey.add(QueryConstants.SEPARATOR_BYTE); + idxKey.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes + .asList(Bytes.toBytes(ROW_KEY))); + return org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.toArray(idxKey); + } + + void addCellToPutMutation(Put put, byte[] family, byte[] column, long ts, byte[] value) + throws Exception { + byte[] rowKey = put.getRow(); + Cell cell = + CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowKey).setFamily(family) + .setQualifier(column).setTimestamp(ts).setType(Cell.Type.Put).setValue(value).build(); + put.add(cell); + } + + void addCellToDelMutation(Delete del, byte[] family, byte[] column, long ts, Cell.Type type) + throws Exception { + byte[] rowKey = del.getRow(); + Cell cell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowKey) + .setFamily(family).setQualifier(column).setTimestamp(ts).setType(type).setValue(null).build(); + del.add(cell); + } + + /** + * Add Empty column to the existing data put mutation + */ + void addEmptyColumnToDataPutMutation(Put put, PTable ptable, long ts) throws Exception { + addCellToPutMutation(put, SchemaUtil.getEmptyColumnFamily(ptable), + QueryConstants.EMPTY_COLUMN_BYTES, ts, QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + } + + /** + * Add the verified flag to the existing index put mutation + */ + void addEmptyColumnToIndexPutMutation(Put put, IndexMaintainer im, long ts) throws Exception { + addCellToPutMutation(put, im.getEmptyKeyValueFamily().copyBytesIfNecessary(), + QueryConstants.EMPTY_COLUMN_BYTES, ts, QueryConstants.VERIFIED_BYTES); + } + + /** + * Compare two mutation lists without worrying about the order of the mutations in the lists + */ + void assertEqualMutationList(List expectedMutations, List actualMutations) { + assertEquals(expectedMutations.size(), actualMutations.size()); + for (Mutation expected : expectedMutations) { + boolean found = false; + for (Mutation actual : actualMutations) { + if (isEqualMutation(expected, actual)) { + actualMutations.remove(actual); + found = true; + break; } + } + if (!found) Assert.fail(String.format("Cannot find mutation:%s", expected)); } - - /** - * Simulate one put mutation on the indexed column - * @throws Exception - */ - @Test - public void testSinglePutOnIndexColumn() throws Exception { - SetupInfo info = setup(TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", - "C1", - "ROW_KEY", - ""); - - // insert a row - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 1, - Bytes.toBytes("v1")); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild(info.indexMaintainer, - dataPut, - null); - - // Expect one row of index with row key "v1_k1" - Put idxPut1 = new Put(generateIndexRowKey("v1")); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - - assertEqualMutationList(Arrays.asList((Mutation)idxPut1), actualIndexMutations); - } - - /** - * Simulate one put mutation on the non-indexed column - * @throws Exception - */ - @Test - public void testSinglePutOnNonIndexColumn() throws Exception { - SetupInfo info = setup(TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", - "C1", - "ROW_KEY", - ""); - - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild(info.indexMaintainer, - dataPut, - null); - - // Expect one row of index with row key "_k1", as indexed column C1 is nullable. - Put idxPut1 = new Put(generateIndexRowKey(null)); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - - assertEqualMutationList(Arrays.asList((Mutation)idxPut1), actualIndexMutations); - } - - /** - * Simulate the column delete on the index column - * @throws Exception - */ - @Test - public void testDelOnIndexColumn() throws Exception { - SetupInfo info = setup(TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", - "C1", - "ROW_KEY", - ""); - - // insert the row for deletion - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 1, - Bytes.toBytes("v1")); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - - // only delete the value of column C1 - Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); - addCellToDelMutation(dataDel, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 2, - Cell.Type.DeleteColumn); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild(info.indexMaintainer, - dataPut, - dataDel); - - List expectedIndexMutation = new ArrayList<>(); - - // generate the index row key "v1_k1" - byte[] idxKeyBytes = generateIndexRowKey("v1"); - - Put idxPut1 = new Put(idxKeyBytes); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - expectedIndexMutation.add(idxPut1); - - // generate the index row key "_k1" - Put idxPut2 = new Put(generateIndexRowKey(null)); - addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); - expectedIndexMutation.add(idxPut2); - - // This deletion is to remove the row added by the idxPut1, as idxPut2 has different row key as idxPut1. - // Otherwise the row "v1_k1" will still be shown in the scan result - Delete idxDel = new Delete(idxKeyBytes); - addCellToDelMutation(idxDel, - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - null, - 2, - Cell.Type.DeleteFamily); - expectedIndexMutation.add(idxDel); - - assertEqualMutationList(expectedIndexMutation, actualIndexMutations); - } - - /** - * Simulate the column delete on the non-indexed column - * @throws Exception - */ - @Test - public void testDelOnNonIndexColumn() throws Exception { - SetupInfo info = setup(TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", - "C1", - "ROW_KEY", - ""); - - // insert the row for deletion - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 1, - Bytes.toBytes("v1")); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - - // delete the value of column C2 - Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); - addCellToDelMutation(dataDel, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C2"), - 2, - Cell.Type.DeleteColumn); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild(info.indexMaintainer, - dataPut, - dataDel); - - List expectedIndexMutations = new ArrayList<>(); - - byte[] idxKeyBytes = generateIndexRowKey("v1"); - - // idxPut1 is the corresponding index mutation of dataPut - Put idxPut1 = new Put(idxKeyBytes); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - expectedIndexMutations.add(idxPut1); - - // idxPut2 is required to update the timestamp, so the index row will have the same life time as its corresponding data row. - // No delete mutation is expected on index table, as data mutation happens only on non-indexed column. - Put idxPut2 = new Put(idxKeyBytes); - addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); - expectedIndexMutations.add(idxPut2); - - assertEqualMutationList(expectedIndexMutations, actualIndexMutations); - } - - /** - * Simulate the data deletion of all version on the indexed row - * @throws Exception - */ - @Test - public void testDeleteAllVersions() throws Exception { - SetupInfo info = setup(TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, C1 VARCHAR", - "C1", - "ROW_KEY", - ""); - - // insert two versions for a single row - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 1, - Bytes.toBytes("v1")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 2, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 2); - - // DeleteFamily will delete all versions of the columns in that family - // Since C1 is the only column of the default column family, so deleting the default family removes all version - // of column C1 - Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); - addCellToDelMutation(dataDel, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - null, - 3, - Cell.Type.DeleteFamily); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild(info.indexMaintainer, - dataPut, - dataDel); - - List expectedIndexMutations = new ArrayList<>(); - - byte[] idxKeyBytes1 = generateIndexRowKey("v1"); - byte[] idxKeyBytes2 = generateIndexRowKey("v2"); - - // idxPut1 and idxPut2 are generated by two versions in dataPut - Put idxPut1 = new Put(idxKeyBytes1); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - expectedIndexMutations.add(idxPut1); - - Put idxPut2 = new Put(idxKeyBytes2); - addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); - expectedIndexMutations.add(idxPut2); - - // idxDel1 is required to remove the row key "v1_k1" which is added by idxPut1. - // The ts of idxDel1 is same as idxPut2, because it is a result of idxPut2. - // Since C1 is the only index column, so it is translated to DeleteFamily mutation. - Delete idxDel1 = new Delete(idxKeyBytes1); - addCellToDelMutation(idxDel1, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - null, - 2, - Cell.Type.DeleteFamily); - expectedIndexMutations.add(idxDel1); - - // idxDel2 is corresponding index mutation of dataDel - Delete idxDel2 = new Delete(idxKeyBytes2); - addCellToDelMutation(idxDel2, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - null, - 3, - Cell.Type.DeleteFamily); - expectedIndexMutations.add(idxDel2); - - assertEqualMutationList(expectedIndexMutations, actualIndexMutations); - } - - // Simulate the put and delete mutation with the same time stamp on the index - @Test - public void testPutDeleteOnSameTimeStamp() throws Exception { - SetupInfo info = setup(TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, C1 VARCHAR", - "C1", - "ROW_KEY", - ""); - - // insert a row - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 1, - Bytes.toBytes("v1")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable,1); - - // delete column of C1 from the inserted row - Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); - addCellToDelMutation(dataDel, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 1, - Cell.Type.DeleteColumn); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild(info.indexMaintainer, - dataPut, - dataDel); - - List expectedIndexMutations = new ArrayList<>(); - - // The dataDel will be applied on top of dataPut when we replay them for index rebuild, when they have the same time stamp. - // idxPut1 is expected as in data table we still see the row of k1 with empty C1, so we need a row in index table with row key "_k1" - Put idxPut1 = new Put(generateIndexRowKey(null)); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - expectedIndexMutations.add(idxPut1); - - assertEqualMutationList(Arrays.asList((Mutation)idxPut1), actualIndexMutations); - } - - // Simulate the put and delete mutation with the same timestamp and put mutation is empty - // after applied delete mutation - @Test - public void testPutDeleteOnSameTimeStampAndPutNullifiedByDelete() throws Exception { - SetupInfo info = setup( - TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, CF1.C1 VARCHAR, CF2.C2 VARCHAR", - "CF2.C2", - "ROW_KEY", - ""); - - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation( - dataPut, - Bytes.toBytes("CF2"), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - - addCellToPutMutation( - dataPut, - Bytes.toBytes("CF1"), - Bytes.toBytes("C1"), - 2, - Bytes.toBytes("v1")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 2); - - Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); - addCellToDelMutation( - dataDel, - Bytes.toBytes("CF1"), - null, - 2, - Cell.Type.DeleteFamily); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild( - info.indexMaintainer, - dataPut, - dataDel); - - List expectedIndexMutations = new ArrayList<>(); - byte[] idxKeyBytes = generateIndexRowKey("v2"); - - // idxPut1 is generated corresponding to dataPut of timestamp 1. - // idxPut2 is generated corresponding to dataPut and dataDel of timestamp 2 - Put idxPut1 = new Put(idxKeyBytes); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - expectedIndexMutations.add(idxPut1); - - Put idxPut2 = new Put(idxKeyBytes); - addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); - expectedIndexMutations.add(idxPut2); - - assertEqualMutationList(expectedIndexMutations, actualIndexMutations); - } - - // Simulate the put and delete mutation with the same timestamp and put mutation and current row state - // are empty after applied delete mutation - @Test - public void testPutDeleteOnSameTimeStampAndPutAndOldPutAllNullifiedByDelete() throws Exception { - SetupInfo info = setup( - TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, CF1.C1 VARCHAR, CF2.C2 VARCHAR", - "CF2.C2", - "ROW_KEY", - ""); - - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation( - dataPut, - Bytes.toBytes("CF2"), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - - addCellToPutMutation( - dataPut, - Bytes.toBytes("CF2"), - Bytes.toBytes("C2"), - 2, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 2); - - Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); - addCellToDelMutation( - dataDel, - Bytes.toBytes("CF2"), - null, - 2, - Cell.Type.DeleteFamily); - addCellToDelMutation( - dataDel, - SchemaUtil.getEmptyColumnFamily(info.pDataTable), - null, - 2, - Cell.Type.DeleteFamily); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild( - info.indexMaintainer, - dataPut, - dataDel); - - List expectedIndexMutations = new ArrayList<>(); - byte[] idxKeyBytes = generateIndexRowKey("v2"); - - // idxPut1 is generated corresponding to dataPut of timestamp 1. - // idxDel2 is generated because the dataDel of timestamp 2 deletes dataPut of timestamp 2 - // and current row state. - Put idxPut1 = new Put(idxKeyBytes); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - expectedIndexMutations.add(idxPut1); - - Delete idxDel2 = new Delete(idxKeyBytes); - addCellToDelMutation( - idxDel2, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - null, - 2, - Cell.Type.DeleteFamily); - expectedIndexMutations.add(idxDel2); - - assertEqualMutationList(expectedIndexMutations, actualIndexMutations); - } - - // Simulate the put and delete mutation on the covered column of data table - @Test - public void testCoveredIndexColumns() throws Exception { - SetupInfo info = setup(TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", - "C1", - "ROW_KEY", - "C2"); - - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 1, - Bytes.toBytes("v1")); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - - Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); - addCellToDelMutation(dataDel, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C1"), - 2, - Cell.Type.DeleteColumn); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild(info.indexMaintainer, - dataPut, - dataDel); - - List expectedIndexMutations = new ArrayList<>(); - byte[] idxKeyBytes = generateIndexRowKey("v1"); - - // idxPut1 is generated corresponding to dataPut. - // The column "0:C2" is generated from data table column family and column name, its family name is still default family name of index table - Put idxPut1 = new Put(idxKeyBytes); - addCellToPutMutation(idxPut1, - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - Bytes.toBytes("0:C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - expectedIndexMutations.add(idxPut1); - - // idxKey2 is required by dataDel, as dataDel change the corresponding row key of index table - List idxKey2 = new ArrayList<>(); - idxKey2.add(QueryConstants.SEPARATOR_BYTE); - idxKey2.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.asList(Bytes.toBytes(ROW_KEY))); - byte[] idxKeyBytes2 = org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.toArray(idxKey2); - Put idxPut2 = new Put(idxKeyBytes2); - addCellToPutMutation(idxPut2, - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - Bytes.toBytes("0:C2"), - 2, - Bytes.toBytes("v2")); - addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); - expectedIndexMutations.add(idxPut2); - - // idxDel is required to invalid the index row "v1_k1", dataDel removed the value of indexed column - Delete idxDel = new Delete(idxKeyBytes); - addCellToDelMutation(idxDel, - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - null, - 2, - Cell.Type.DeleteFamily); - expectedIndexMutations.add(idxDel); - - assertEqualMutationList(expectedIndexMutations, actualIndexMutations); - } - - // Simulate the scenario that index column, and covered column belong to different column families - @Test - public void testForMultipleFamilies() throws Exception { - SetupInfo info = setup(TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, CF1.C1 VARCHAR, CF2.C2 VARCHAR", //define C1 and C2 with different families - "CF1.C1", - "ROW_KEY", - "CF2.C2"); - - // insert a row to the data table - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation(dataPut, - Bytes.toBytes("CF1"), - Bytes.toBytes("C1"), - 1, - Bytes.toBytes("v1")); - addCellToPutMutation(dataPut, - Bytes.toBytes("CF2"), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - - // delete the indexed column CF1:C1 - Delete dataDel = new Delete(Bytes.toBytes(ROW_KEY)); - addCellToDelMutation(dataDel, - Bytes.toBytes("CF1"), - Bytes.toBytes("C1"), - 2, - Cell.Type.DeleteColumn); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild(info.indexMaintainer, - dataPut, - dataDel); - - List expectedIndexMutation = new ArrayList<>(); - - byte[] idxKeyBytes = generateIndexRowKey("v1"); - - // index table will use the family name of the first covered column, which is CF2 here. - Put idxPut1 = new Put(idxKeyBytes); - addCellToPutMutation(idxPut1, - Bytes.toBytes("CF2"), - Bytes.toBytes("CF2:C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - expectedIndexMutation.add(idxPut1); - - // idxPut2 and idxDel are the result of dataDel - // idxPut2 is to create the index row "_k1", idxDel is to invalid the index row "v1_k1". - Put idxPut2 = new Put(generateIndexRowKey(null)); - addCellToPutMutation(idxPut2, - Bytes.toBytes("CF2"), - Bytes.toBytes("CF2:C2"), - 2, - Bytes.toBytes("v2")); - addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); - expectedIndexMutation.add(idxPut2); - - Delete idxDel = new Delete(idxKeyBytes); - addCellToDelMutation(idxDel, - Bytes.toBytes("CF2"), - null, - 2, - Cell.Type.DeleteFamily); - expectedIndexMutation.add(idxDel); - - assertEqualMutationList(expectedIndexMutation, actualIndexMutations); - } - - // Simulate two data put with the same value but different time stamp. - // We expect to see 2 index mutations with same value but different time stamps. - @Test - public void testSameTypeOfMutationWithSameValueButDifferentTimeStamp() throws Exception { - SetupInfo info = setup(TABLE_NAME, - INDEX_NAME, - "ROW_KEY VARCHAR, C1 VARCHAR, C2 VARCHAR", - "C1", - "ROW_KEY", - ""); - - Put dataPut = new Put(Bytes.toBytes(ROW_KEY)); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v2")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 1); - addCellToPutMutation(dataPut, - info.indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary(), - Bytes.toBytes("C2"), - 1, - Bytes.toBytes("v3")); - addEmptyColumnToDataPutMutation(dataPut, info.pDataTable, 2); - - List actualIndexMutations = IndexRebuildRegionScanner.prepareIndexMutationsForRebuild(info.indexMaintainer, - dataPut, - null); - - byte[] idxKeyBytes = generateIndexRowKey(null); - - // idxPut1 and idxPut2 have same value but different time stamp - Put idxPut1 = new Put(idxKeyBytes); - addEmptyColumnToIndexPutMutation(idxPut1, info.indexMaintainer, 1); - - Put idxPut2 = new Put(idxKeyBytes); - addEmptyColumnToIndexPutMutation(idxPut2, info.indexMaintainer, 2); - - assertEqualMutationList(Arrays.asList((Mutation)idxPut1, (Mutation)idxPut2), actualIndexMutations); - } - - /** - * Generate the row key for index table by the value of indexed column - * @param indexVal - * @return - */ - byte[] generateIndexRowKey(String indexVal) { - List idxKey = new ArrayList<>(); - if (indexVal != null && !indexVal.isEmpty()) - idxKey.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.asList(Bytes.toBytes(indexVal))); - idxKey.add(QueryConstants.SEPARATOR_BYTE); - idxKey.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.asList(Bytes.toBytes(ROW_KEY))); - return org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.toArray(idxKey); + } + + /** + * Compare two mutations without worrying about the order of cells within each mutation + */ + boolean isEqualMutation(Mutation expectedMutation, Mutation actualMutation) { + List expectedCells = new ArrayList<>(); + for (List cells : expectedMutation.getFamilyCellMap().values()) { + expectedCells.addAll(cells); } - void addCellToPutMutation(Put put, byte[] family, byte[] column, long ts, byte[] value) throws Exception { - byte[] rowKey = put.getRow(); - Cell cell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(rowKey) - .setFamily(family) - .setQualifier(column) - .setTimestamp(ts) - .setType(Cell.Type.Put) - .setValue(value) - .build(); - put.add(cell); + List actualCells = new ArrayList<>(); + for (List cells : actualMutation.getFamilyCellMap().values()) { + actualCells.addAll(cells); } - void addCellToDelMutation(Delete del, byte[] family, byte[] column, long ts, Cell.Type type) throws Exception { - byte[] rowKey = del.getRow(); - Cell cell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(rowKey) - .setFamily(family) - .setQualifier(column) - .setTimestamp(ts) - .setType(type) - .setValue(null) - .build(); - del.add(cell); - } - - /** - * Add Empty column to the existing data put mutation - * @param put - * @param ptable - * @param ts - * @throws Exception - */ - void addEmptyColumnToDataPutMutation(Put put, PTable ptable, long ts) throws Exception { - addCellToPutMutation(put, - SchemaUtil.getEmptyColumnFamily(ptable), - QueryConstants.EMPTY_COLUMN_BYTES, - ts, - QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - } - - /** - * Add the verified flag to the existing index put mutation - * @param put - * @param im - * @param ts - * @throws Exception - */ - void addEmptyColumnToIndexPutMutation(Put put, IndexMaintainer im, long ts) throws Exception { - addCellToPutMutation(put, - im.getEmptyKeyValueFamily().copyBytesIfNecessary(), - QueryConstants.EMPTY_COLUMN_BYTES, - ts, - QueryConstants.VERIFIED_BYTES); - } - - /** - * Compare two mutation lists without worrying about the order of the mutations in the lists - * @param expectedMutations - * @param actualMutations - */ - void assertEqualMutationList(List expectedMutations, - List actualMutations) { - assertEquals(expectedMutations.size(), actualMutations.size()); - for (Mutation expected : expectedMutations) { - boolean found = false; - for (Mutation actual: actualMutations) { - if (isEqualMutation(expected, actual)) { - actualMutations.remove(actual); - found = true; - break; - } - } - if (!found) - Assert.fail(String.format("Cannot find mutation:%s", expected)); + if (expectedCells.size() != actualCells.size()) return false; + for (Cell expected : expectedCells) { + boolean found = false; + for (Cell actual : actualCells) { + if (isEqualCell(expected, actual)) { + actualCells.remove(actual); + found = true; + break; } + } + if (!found) return false; } - /** - * Compare two mutations without worrying about the order of cells within each mutation - * @param expectedMutation - * @param actualMutation - * @return - */ - boolean isEqualMutation(Mutation expectedMutation, Mutation actualMutation){ - List expectedCells = new ArrayList<>(); - for (List cells : expectedMutation.getFamilyCellMap().values()) { - expectedCells.addAll(cells); - } - - List actualCells = new ArrayList<>(); - for (List cells : actualMutation.getFamilyCellMap().values()) { - actualCells.addAll(cells); - } - - if (expectedCells.size() != actualCells.size()) - return false; - for(Cell expected : expectedCells) { - boolean found = false; - for(Cell actual: actualCells){ - if (isEqualCell(expected, actual)) { - actualCells.remove(actual); - found = true; - break; - } - } - if (!found) - return false; - } + return true; + } - return true; - } - - boolean isEqualCell(Cell a, Cell b) { - return CellUtil.matchingRows(a, b) - && CellUtil.matchingFamily(a, b) - && CellUtil.matchingQualifier(a, b) - && CellUtil.matchingTimestamp(a, b) - && a.getType() == b.getType() - && CellUtil.matchingValue(a, b); - } + boolean isEqualCell(Cell a, Cell b) { + return CellUtil.matchingRows(a, b) && CellUtil.matchingFamily(a, b) + && CellUtil.matchingQualifier(a, b) && CellUtil.matchingTimestamp(a, b) + && a.getType() == b.getType() && CellUtil.matchingValue(a, b); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/ShouldVerifyTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/ShouldVerifyTest.java index 903503f7ef6..c57de1308b9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/ShouldVerifyTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/ShouldVerifyTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +17,18 @@ */ package org.apache.phoenix.index; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.when; + +import java.io.IOException; + import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.coprocessor.IndexRebuildRegionScanner; import org.apache.phoenix.coprocessor.IndexToolVerificationResult; +import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.mapreduce.index.IndexTool; import org.apache.phoenix.mapreduce.index.IndexVerificationResultRepository; import org.junit.Assert; @@ -32,69 +38,79 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import java.io.IOException; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.Mockito.when; - public class ShouldVerifyTest { - @Mock IndexRebuildRegionScanner scanner; - @Mock IndexMaintainer im; - @Mock Scan scan; - @Mock Region region; - @Mock IndexVerificationResultRepository resultRepository; - byte[] indexRowKey; - @Mock IndexToolVerificationResult verificationResult; + @Mock + IndexRebuildRegionScanner scanner; + @Mock + IndexMaintainer im; + @Mock + Scan scan; + @Mock + Region region; + @Mock + IndexVerificationResultRepository resultRepository; + byte[] indexRowKey; + @Mock + IndexToolVerificationResult verificationResult; - @Before - public void setup() throws IOException { - MockitoAnnotations.initMocks(this); - indexRowKey = null; - when(im.getIndexTableName()).thenReturn(Bytes.toBytes("indexName")); - when(scanner.shouldVerify(any(IndexTool.IndexVerifyType.class), ArgumentMatchers.any(), any(Scan.class), - any(Region.class), any(IndexMaintainer.class), - any(IndexVerificationResultRepository.class), anyBoolean())).thenCallRealMethod(); - when(scanner.shouldVerify()).thenCallRealMethod(); - } + @Before + public void setup() throws IOException { + MockitoAnnotations.initMocks(this); + indexRowKey = null; + when(im.getIndexTableName()).thenReturn(Bytes.toBytes("indexName")); + when(scanner.shouldVerify(any(IndexTool.IndexVerifyType.class), ArgumentMatchers. any(), + any(Scan.class), any(Region.class), any(IndexMaintainer.class), + any(IndexVerificationResultRepository.class), anyBoolean())).thenCallRealMethod(); + when(scanner.shouldVerify()).thenCallRealMethod(); + } - @Test - public void testShouldVerify_repair_true() throws IOException { - indexRowKey = new byte[5]; - Assert.assertTrue(scanner.shouldVerify(IndexTool.IndexVerifyType.ONLY, indexRowKey, scan, region, im, resultRepository, false)); - } + @Test + public void testShouldVerify_repair_true() throws IOException { + indexRowKey = new byte[5]; + Assert.assertTrue(scanner.shouldVerify(IndexTool.IndexVerifyType.ONLY, indexRowKey, scan, + region, im, resultRepository, false)); + } - @Test - public void testShouldVerify_repair_rebuild_true() throws IOException { - indexRowKey = new byte[5]; - when(scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY)).thenReturn(Bytes.toBytes(1L)); - assertShouldVerify(true); - } + @Test + public void testShouldVerify_repair_rebuild_true() throws IOException { + indexRowKey = new byte[5]; + when(scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY)) + .thenReturn(Bytes.toBytes(1L)); + assertShouldVerify(true); + } - private void assertShouldVerify(boolean assertion) throws IOException { - Assert.assertEquals(assertion, scanner.shouldVerify(IndexTool.IndexVerifyType.NONE, indexRowKey, scan, region, im, resultRepository, false)); - Assert.assertEquals(assertion, scanner.shouldVerify(IndexTool.IndexVerifyType.BEFORE, indexRowKey, scan, region, im, resultRepository, false)); - Assert.assertEquals(assertion, scanner.shouldVerify(IndexTool.IndexVerifyType.AFTER, indexRowKey, scan, region, im, resultRepository, false)); - } + private void assertShouldVerify(boolean assertion) throws IOException { + Assert.assertEquals(assertion, scanner.shouldVerify(IndexTool.IndexVerifyType.NONE, indexRowKey, + scan, region, im, resultRepository, false)); + Assert.assertEquals(assertion, scanner.shouldVerify(IndexTool.IndexVerifyType.BEFORE, + indexRowKey, scan, region, im, resultRepository, false)); + Assert.assertEquals(assertion, scanner.shouldVerify(IndexTool.IndexVerifyType.AFTER, + indexRowKey, scan, region, im, resultRepository, false)); + } - @Test - public void testShouldVerify_false() throws IOException { - when(scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY)).thenReturn(Bytes.toBytes(1L)); - when(resultRepository.getVerificationResult(1L, scan, region, im.getIndexTableName())).thenReturn(verificationResult); - assertShouldVerify(false); - } + @Test + public void testShouldVerify_false() throws IOException { + when(scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY)) + .thenReturn(Bytes.toBytes(1L)); + when(resultRepository.getVerificationResult(1L, scan, region, im.getIndexTableName())) + .thenReturn(verificationResult); + assertShouldVerify(false); + } - @Test - public void testShouldVerify_rebuild_true() throws IOException { - when(scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY)).thenReturn(Bytes.toBytes(1L)); - when(resultRepository.getVerificationResult(1L, scan, region, im.getIndexTableName())).thenReturn(null); - assertShouldVerify(true); - } + @Test + public void testShouldVerify_rebuild_true() throws IOException { + when(scan.getAttribute(BaseScannerRegionObserverConstants.INDEX_RETRY_VERIFY)) + .thenReturn(Bytes.toBytes(1L)); + when(resultRepository.getVerificationResult(1L, scan, region, im.getIndexTableName())) + .thenReturn(null); + assertShouldVerify(true); + } - @Test - public void testShouldVerify_noTime_true() throws IOException { - when(resultRepository.getVerificationResult(1L, scan, region, im.getIndexTableName())).thenReturn(verificationResult); - assertShouldVerify(true); - } + @Test + public void testShouldVerify_noTime_true() throws IOException { + when(resultRepository.getVerificationResult(1L, scan, region, im.getIndexTableName())) + .thenReturn(verificationResult); + assertShouldVerify(true); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/VerifySingleIndexRowTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/VerifySingleIndexRowTest.java index 2cdfc1a8f8f..416034f5cf8 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/VerifySingleIndexRowTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/VerifySingleIndexRowTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,22 @@ */ package org.apache.phoenix.index; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import static org.apache.phoenix.coprocessor.GlobalIndexRegionScanner.MUTATION_TS_DESC_COMPARATOR; +import static org.apache.phoenix.query.QueryConstants.EMPTY_COLUMN_BYTES; +import static org.apache.phoenix.query.QueryConstants.UNVERIFIED_BYTES; +import static org.apache.phoenix.query.QueryConstants.VERIFIED_BYTES; +import static org.junit.Assert.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.*; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; @@ -31,7 +45,6 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.phoenix.coprocessor.GlobalIndexRegionScanner; import org.apache.phoenix.coprocessor.IndexRebuildRegionScanner; import org.apache.phoenix.coprocessor.IndexToolVerificationResult; import org.apache.phoenix.jdbc.PhoenixConnection; @@ -40,6 +53,8 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.PTableKey; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.phoenix.util.*; import org.junit.After; import org.junit.Before; @@ -51,759 +66,728 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import java.io.IOException; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.*; - -import static org.apache.phoenix.coprocessor.GlobalIndexRegionScanner.MUTATION_TS_DESC_COMPARATOR; -import static org.apache.phoenix.query.QueryConstants.UNVERIFIED_BYTES; -import static org.apache.phoenix.query.QueryConstants.VERIFIED_BYTES; -import static org.apache.phoenix.query.QueryConstants.EMPTY_COLUMN_BYTES; -import static org.junit.Assert.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.when; - public class VerifySingleIndexRowTest extends BaseConnectionlessQueryTest { - private static final int INDEX_TABLE_EXPIRY_SEC = 1; - private static final String UNEXPECTED_COLUMN = "0:UNEXPECTED_COLUMN"; - public static final String FIRST_ID = "FIRST_ID"; - public static final String SECOND_ID = "SECOND_ID"; - public static final String FIRST_VALUE = "FIRST_VALUE"; - public static final String SECOND_VALUE = "SECOND_VALUE"; - public static final String - CREATE_TABLE_DDL = "CREATE TABLE IF NOT EXISTS %s (FIRST_ID BIGINT NOT NULL, " - + "SECOND_ID BIGINT NOT NULL, FIRST_VALUE VARCHAR(20), " - + "SECOND_VALUE INTEGER " - + "CONSTRAINT PK PRIMARY KEY(FIRST_ID, SECOND_ID)) COLUMN_ENCODED_BYTES=0"; - - public static final String - CREATE_INDEX_DDL = "CREATE INDEX %s ON %s (SECOND_VALUE) INCLUDE (FIRST_VALUE)"; - public static final String COMPLETE_ROW_UPSERT = "UPSERT INTO %s VALUES (?,?,?,?)"; - public static final String PARTIAL_ROW_UPSERT = "UPSERT INTO %s (%s, %s, %s) VALUES (?,?,?)"; - public static final String DELETE_ROW_DML = "DELETE FROM %s WHERE %s = ? AND %s = ?"; - public static final String INCLUDED_COLUMN = "0:FIRST_VALUE"; - - @Rule - public ExpectedException exceptionRule = ExpectedException.none(); - - private enum TestType { - //set of mutations matching expected mutations - VALID_EXACT_MATCH, - //mix of delete and put mutations - VALID_MIX_MUTATIONS, - //only incoming unverified mutations - VALID_NEW_UNVERIFIED_MUTATIONS, - //extra mutations mimicking incoming mutations - VALID_MORE_MUTATIONS, - // mimicking the case where the data cells expired but index has them still - VALID_EXTRA_CELL, - EXPIRED, - INVALID_EXTRA_CELL, - INVALID_EMPTY_CELL, - INVALID_CELL_VALUE, - INVALID_COLUMN - } - - public static class UnitTestClock extends EnvironmentEdge { - long initialTime; - long delta; - - public UnitTestClock(long delta) { - initialTime = System.currentTimeMillis() + delta; - this.delta = delta; - } - - @Override - public long currentTime() { - return System.currentTimeMillis() + delta; + private static final int INDEX_TABLE_EXPIRY_SEC = 1; + private static final String UNEXPECTED_COLUMN = "0:UNEXPECTED_COLUMN"; + public static final String FIRST_ID = "FIRST_ID"; + public static final String SECOND_ID = "SECOND_ID"; + public static final String FIRST_VALUE = "FIRST_VALUE"; + public static final String SECOND_VALUE = "SECOND_VALUE"; + public static final String CREATE_TABLE_DDL = + "CREATE TABLE IF NOT EXISTS %s (FIRST_ID BIGINT NOT NULL, " + + "SECOND_ID BIGINT NOT NULL, FIRST_VALUE VARCHAR(20), " + "SECOND_VALUE INTEGER " + + "CONSTRAINT PK PRIMARY KEY(FIRST_ID, SECOND_ID)) COLUMN_ENCODED_BYTES=0"; + + public static final String CREATE_INDEX_DDL = + "CREATE INDEX %s ON %s (SECOND_VALUE) INCLUDE (FIRST_VALUE)"; + public static final String COMPLETE_ROW_UPSERT = "UPSERT INTO %s VALUES (?,?,?,?)"; + public static final String PARTIAL_ROW_UPSERT = "UPSERT INTO %s (%s, %s, %s) VALUES (?,?,?)"; + public static final String DELETE_ROW_DML = "DELETE FROM %s WHERE %s = ? AND %s = ?"; + public static final String INCLUDED_COLUMN = "0:FIRST_VALUE"; + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + private enum TestType { + // set of mutations matching expected mutations + VALID_EXACT_MATCH, + // mix of delete and put mutations + VALID_MIX_MUTATIONS, + // only incoming unverified mutations + VALID_NEW_UNVERIFIED_MUTATIONS, + // extra mutations mimicking incoming mutations + VALID_MORE_MUTATIONS, + // mimicking the case where the data cells expired but index has them still + VALID_EXTRA_CELL, + EXPIRED, + INVALID_EXTRA_CELL, + INVALID_EMPTY_CELL, + INVALID_CELL_VALUE, + INVALID_COLUMN + } + + public static class UnitTestClock extends EnvironmentEdge { + long initialTime; + long delta; + + public UnitTestClock(long delta) { + initialTime = System.currentTimeMillis() + delta; + this.delta = delta; + } + + @Override + public long currentTime() { + return System.currentTimeMillis() + delta; + } + } + + @Mock + Result indexRow; + @Mock + IndexRebuildRegionScanner rebuildScanner; + List actualMutationList; + String schema, table, dataTableFullName, index, indexTableFullName; + PTable pIndexTable, pDataTable; + Put put = null; + Delete delete = null; + PhoenixConnection pconn; + IndexToolVerificationResult.PhaseResult actualPR; + public Map> indexKeyToMutationMap = null; + Set mostRecentIndexRowKeys; + private IndexMaintainer indexMaintainer; + + @Before + public void setup() throws SQLException, IOException { + MockitoAnnotations.initMocks(this); + createDBObject(); + createMutationsWithUpserts(); + initializeRebuildScannerAttributes(); + initializeGlobalMockitoSetup(); + } + + @After + public void reset() { + EnvironmentEdgeManager.reset(); + } + + public void createDBObject() throws SQLException { + try (Connection conn = DriverManager.getConnection(getUrl(), new Properties())) { + schema = generateUniqueName(); + table = generateUniqueName(); + index = generateUniqueName(); + dataTableFullName = SchemaUtil.getQualifiedTableName(schema, table); + indexTableFullName = SchemaUtil.getQualifiedTableName(schema, index); + + conn.createStatement().execute(String.format(CREATE_TABLE_DDL, dataTableFullName)); + conn.createStatement().execute(String.format(CREATE_INDEX_DDL, index, dataTableFullName)); + conn.commit(); + + pconn = conn.unwrap(PhoenixConnection.class); + pIndexTable = pconn.getTable(new PTableKey(pconn.getTenantId(), indexTableFullName)); + pDataTable = pconn.getTable(new PTableKey(pconn.getTenantId(), dataTableFullName)); + } + } + + private void createMutationsWithUpserts() throws SQLException, IOException { + deleteRow(2, 3); + upsertPartialRow(2, 3, "abc"); + upsertCompleteRow(2, 3, "hik", 8); + upsertPartialRow(2, 3, 10); + upsertPartialRow(2, 3, 4); + deleteRow(2, 3); + upsertPartialRow(2, 3, "def"); + upsertCompleteRow(2, 3, null, 20); + upsertPartialRow(2, 3, "wert"); + } + + private void deleteRow(int key1, int key2) throws SQLException, IOException { + try (Connection conn = DriverManager.getConnection(getUrl(), new Properties())) { + PreparedStatement ps = conn + .prepareStatement(String.format(DELETE_ROW_DML, dataTableFullName, FIRST_ID, SECOND_ID)); + ps.setInt(1, key1); + ps.setInt(2, key2); + ps.execute(); + convertUpsertToMutations(conn); + } + } + + private void upsertPartialRow(int key1, int key2, String val1) throws SQLException, IOException { + + try (Connection conn = DriverManager.getConnection(getUrl(), new Properties())) { + PreparedStatement ps = conn.prepareStatement( + String.format(PARTIAL_ROW_UPSERT, dataTableFullName, FIRST_ID, SECOND_ID, FIRST_VALUE)); + ps.setInt(1, key1); + ps.setInt(2, key2); + ps.setString(3, val1); + ps.execute(); + convertUpsertToMutations(conn); + } + } + + private void upsertPartialRow(int key1, int key2, int value1) throws SQLException, IOException { + + try (Connection conn = DriverManager.getConnection(getUrl(), new Properties())) { + PreparedStatement ps = conn.prepareStatement( + String.format(PARTIAL_ROW_UPSERT, dataTableFullName, FIRST_ID, SECOND_ID, SECOND_VALUE)); + ps.setInt(1, key1); + ps.setInt(2, key2); + ps.setInt(3, value1); + ps.execute(); + convertUpsertToMutations(conn); + } + } + + private void upsertCompleteRow(int key1, int key2, String val1, int val2) + throws SQLException, IOException { + try (Connection conn = DriverManager.getConnection(getUrl(), new Properties())) { + PreparedStatement ps = + conn.prepareStatement(String.format(COMPLETE_ROW_UPSERT, dataTableFullName)); + ps.setInt(1, key1); + ps.setInt(2, key2); + ps.setString(3, val1); + ps.setInt(4, val2); + ps.execute(); + convertUpsertToMutations(conn); + } + } + + private void convertUpsertToMutations(Connection conn) throws SQLException, IOException { + Iterator>> dataTableNameAndMutationKeyValuesIter = + PhoenixRuntime.getUncommittedDataIterator(conn); + Pair> elem = dataTableNameAndMutationKeyValuesIter.next(); + byte[] key = CellUtil.cloneRow(elem.getSecond().get(0)); + long mutationTS = EnvironmentEdgeManager.currentTimeMillis(); + + for (Cell kv : elem.getSecond()) { + Cell cell = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(CellUtil.cloneRow(kv)) + .setFamily(CellUtil.cloneFamily(kv)).setQualifier(CellUtil.cloneQualifier(kv)) + .setTimestamp(mutationTS).setType(kv.getType()).setValue(CellUtil.cloneValue(kv)).build(); + if (cell.getType().equals(Cell.Type.Put)) { + if (put == null) { + put = new Put(key); } - } - - @Mock - Result indexRow; - @Mock - IndexRebuildRegionScanner rebuildScanner; - List actualMutationList; - String schema, table, dataTableFullName, index, indexTableFullName; - PTable pIndexTable, pDataTable; - Put put = null; - Delete delete = null; - PhoenixConnection pconn; - IndexToolVerificationResult.PhaseResult actualPR; - public Map> indexKeyToMutationMap = null; - Set mostRecentIndexRowKeys; - private IndexMaintainer indexMaintainer; - - @Before - public void setup() throws SQLException, IOException { - MockitoAnnotations.initMocks(this); - createDBObject(); - createMutationsWithUpserts(); - initializeRebuildScannerAttributes(); - initializeGlobalMockitoSetup(); - } - - @After - public void reset() { - EnvironmentEdgeManager.reset(); - } - - public void createDBObject() throws SQLException { - try(Connection conn = DriverManager.getConnection(getUrl(), new Properties())) { - schema = generateUniqueName(); - table = generateUniqueName(); - index = generateUniqueName(); - dataTableFullName = SchemaUtil.getQualifiedTableName(schema, table); - indexTableFullName = SchemaUtil.getQualifiedTableName(schema, index); - - conn.createStatement().execute(String.format(CREATE_TABLE_DDL, dataTableFullName)); - conn.createStatement().execute(String.format(CREATE_INDEX_DDL, index, dataTableFullName)); - conn.commit(); - - pconn = conn.unwrap(PhoenixConnection.class); - pIndexTable = pconn.getTable(new PTableKey(pconn.getTenantId(), indexTableFullName)); - pDataTable = pconn.getTable(new PTableKey(pconn.getTenantId(), dataTableFullName)); - } - } - - private void createMutationsWithUpserts() throws SQLException, IOException { - deleteRow(2, 3); - upsertPartialRow(2, 3, "abc"); - upsertCompleteRow(2, 3, "hik", 8); - upsertPartialRow(2, 3, 10); - upsertPartialRow(2,3,4); - deleteRow(2, 3); - upsertPartialRow(2,3, "def"); - upsertCompleteRow(2, 3, null, 20); - upsertPartialRow(2,3, "wert"); - } - - private void deleteRow(int key1, int key2) throws SQLException, IOException { - try(Connection conn = DriverManager.getConnection(getUrl(), new Properties())){ - PreparedStatement ps = - conn.prepareStatement( - String.format(DELETE_ROW_DML, dataTableFullName, FIRST_ID, SECOND_ID)); - ps.setInt(1, key1); - ps.setInt(2, key2); - ps.execute(); - convertUpsertToMutations(conn); - } - } - - private void upsertPartialRow(int key1, int key2, String val1) - throws SQLException, IOException { - - try(Connection conn = DriverManager.getConnection(getUrl(), new Properties())){ - PreparedStatement ps = - conn.prepareStatement( - String.format(PARTIAL_ROW_UPSERT, dataTableFullName, FIRST_ID, SECOND_ID, - FIRST_VALUE)); - ps.setInt(1, key1); - ps.setInt(2, key2); - ps.setString(3, val1); - ps.execute(); - convertUpsertToMutations(conn); - } - } - - private void upsertPartialRow(int key1, int key2, int value1) - throws SQLException, IOException { - - try(Connection conn = DriverManager.getConnection(getUrl(), new Properties())){ - PreparedStatement - ps = - conn.prepareStatement( - String.format(PARTIAL_ROW_UPSERT, dataTableFullName, FIRST_ID, SECOND_ID, - SECOND_VALUE)); - ps.setInt(1, key1); - ps.setInt(2, key2); - ps.setInt(3, value1); - ps.execute(); - convertUpsertToMutations(conn); - } - } - - private void upsertCompleteRow(int key1, int key2, String val1 - , int val2) throws SQLException, IOException { - try(Connection conn = DriverManager.getConnection(getUrl(), new Properties())) { - PreparedStatement - ps = conn.prepareStatement(String.format(COMPLETE_ROW_UPSERT, dataTableFullName)); - ps.setInt(1, key1); - ps.setInt(2, key2); - ps.setString(3, val1); - ps.setInt(4, val2); - ps.execute(); - convertUpsertToMutations(conn); - } - } - - private void convertUpsertToMutations(Connection conn) throws SQLException, IOException { - Iterator>> - dataTableNameAndMutationKeyValuesIter = PhoenixRuntime.getUncommittedDataIterator(conn); - Pair> elem = dataTableNameAndMutationKeyValuesIter.next(); - byte[] key = CellUtil.cloneRow(elem.getSecond().get(0)); - long mutationTS = EnvironmentEdgeManager.currentTimeMillis(); - - for (Cell kv : elem.getSecond()) { - Cell cell = - CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(CellUtil.cloneRow(kv)). - setFamily(CellUtil.cloneFamily(kv)). - setQualifier(CellUtil.cloneQualifier(kv)). - setTimestamp(mutationTS). - setType(kv.getType()). - setValue(CellUtil.cloneValue(kv)).build(); - if (cell.getType().equals(Cell.Type.Put)) { - if (put == null ) { - put = new Put(key); - } - put.add(cell); - } else { - if (delete == null) { - delete = new Delete(key); - } - delete.add(cell); - } - } - } - - private void initializeRebuildScannerAttributes() throws SQLException { - when(rebuildScanner.setIndexTableTTL(ArgumentMatchers.anyInt())).thenCallRealMethod(); - when(rebuildScanner.setIndexMaintainer(ArgumentMatchers.any())).thenCallRealMethod(); - when(rebuildScanner.setMaxLookBackInMills(ArgumentMatchers.anyLong())).thenCallRealMethod(); - rebuildScanner.setIndexTableTTL(HConstants.FOREVER); - indexMaintainer = pIndexTable.getIndexMaintainer(pDataTable, pconn); - rebuildScanner.setIndexMaintainer(indexMaintainer); - // set the maxLookBack to infinite to avoid the compaction - rebuildScanner.setMaxLookBackInMills(Long.MAX_VALUE); - } - - private void initializeGlobalMockitoSetup() throws IOException { - //setup - when(indexMaintainer.getIndexRowKey(put)).thenCallRealMethod(); - when(rebuildScanner.prepareIndexMutations(put, delete, indexKeyToMutationMap, mostRecentIndexRowKeys)).thenCallRealMethod(); - when(rebuildScanner.verifySingleIndexRow(ArgumentMatchers.any(), ArgumentMatchers.any(),ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any(), - ArgumentMatchers.any(), ArgumentMatchers.anyBoolean())).thenCallRealMethod(); - doNothing().when(rebuildScanner) - .logToIndexToolOutputTable(ArgumentMatchers.any(),ArgumentMatchers.any(), - Mockito.anyLong(),Mockito.anyLong(), Mockito.anyString(), - ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.anyBoolean(), - Mockito.any()); - doNothing().when(rebuildScanner) - .logToIndexToolOutputTable(ArgumentMatchers.any(),ArgumentMatchers.any(), - Mockito.anyLong(),Mockito.anyLong(), Mockito.anyString(), - ArgumentMatchers.anyBoolean(), - Mockito.any()); - - //populate the local map to use to create actual mutations - indexKeyToMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); - rebuildScanner.prepareIndexMutations(put, delete, indexKeyToMutationMap, mostRecentIndexRowKeys); - } - - private byte[] getValidRowKey() { - return indexKeyToMutationMap.entrySet().iterator().next().getKey(); - } - - @Test - public void testVerifySingleIndexRow_validIndexRowCount_nonZero() throws IOException { - IndexToolVerificationResult.PhaseResult expectedPR = getValidPhaseResult(); - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.VALID_EXACT_MATCH); - //test code - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(actualPR, expectedPR); - } - } - - @Test - public void testVerifySingleIndexRow_validIndexRowCount_moreActual() throws IOException { - IndexToolVerificationResult.PhaseResult expectedPR = getValidPhaseResult(); - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.VALID_MORE_MUTATIONS); - //test code - - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(actualPR, expectedPR); - } - } - - @Test - public void testVerifySingleIndexRow_allMix() throws IOException { - IndexToolVerificationResult.PhaseResult expectedPR = getValidPhaseResult(); - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.VALID_MIX_MUTATIONS); - //test code - - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(actualPR, expectedPR); - } - } - - @Test - public void testVerifySingleIndexRow_allUnverified() throws IOException { - IndexToolVerificationResult.PhaseResult expectedPR = getValidPhaseResult(); - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.VALID_NEW_UNVERIFIED_MUTATIONS); - //test code - - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(actualPR, expectedPR); - } - } - - @Test - public void testVerifySingleIndexRow_expiredIndexRowCount_nonZero() throws IOException { - IndexToolVerificationResult.PhaseResult - expectedPR = new IndexToolVerificationResult.PhaseResult(0, 1, 0, 0, 0, 0, 0, 0, 0, 0); - try { - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.EXPIRED); - expireThisRow(); - //test code - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(actualPR, expectedPR); - } - } finally { - EnvironmentEdgeManager.reset(); - } - } - - @Test - public void testVerifySingleIndexRow_invalidIndexRowCount_cellValue() throws IOException { - IndexToolVerificationResult.PhaseResult expectedPR = getInvalidPhaseResult(); - expectedPR.setIndexHasExtraCellsCount(1); - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.INVALID_CELL_VALUE); - //test code - - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(actualPR, expectedPR); - } - } - - @Test - public void testVerifySingleIndexRow_invalidIndexRowCount_emptyCell() throws IOException { - IndexToolVerificationResult.PhaseResult expectedPR = getInvalidPhaseResult(); - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.INVALID_EMPTY_CELL); - //test code - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(actualPR, expectedPR); - } - } - - @Test - public void testVerifySingleIndexRow_invalidIndexRowCount_diffColumn() throws IOException { - IndexToolVerificationResult.PhaseResult expectedPR = getInvalidPhaseResult(); - expectedPR.setIndexHasExtraCellsCount(1); - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.INVALID_COLUMN); - //test code - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(actualPR, expectedPR); - } - } - - @Test - public void testVerifySingleIndexRow_invalidIndexRowCount_extraCell() throws IOException { - IndexToolVerificationResult.PhaseResult expectedPR = getInvalidPhaseResult(); - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.INVALID_EXTRA_CELL); - //test code - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(actualPR, expectedPR); - } - } - - @Test - public void testVerifySingleIndexRow_expectedMutations_null() throws IOException { - when(indexRow.getRow()).thenReturn(Bytes.toBytes(1)); - exceptionRule.expect(DoNotRetryIOException.class); - exceptionRule.expectMessage(IndexRebuildRegionScanner.NO_EXPECTED_MUTATION); - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - } - - @Test - public void testVerifySingleIndexRow_validIndexRowCount_extraCell() throws IOException { - for (Map.Entry> - entry : indexKeyToMutationMap.entrySet()) { - initializeLocalMockitoSetup(entry, TestType.VALID_EXTRA_CELL); - //test code - rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, true); - - assertEquals(1, actualPR.getIndexHasExtraCellsCount()); - } - } - - // Test the major compaction on index table only. - // There is at least one expected mutation within maxLookBack that has its matching one in the actual list. - // However there are some expected mutations outside of maxLookBack, which matching ones in actual list may be compacted away. - // We will report such row as a valid row. - @Test - public void testVerifySingleIndexRow_compactionOnIndexTable_atLeastOneExpectedMutationWithinMaxLookBack() throws Exception { - String dataRowKey = "k1"; - byte[] indexRowKey1Bytes = generateIndexRowKey(dataRowKey, "val1"); - ManualEnvironmentEdge injectEdge = new ManualEnvironmentEdge(); - injectEdge.setValue(1); - EnvironmentEdgeManager.injectEdge(injectEdge); - - List expectedMutations = new ArrayList<>(); - List actualMutations = new ArrayList<>(); - // change the maxLookBack from infinite to some interval, which allows to simulate the mutation beyond the maxLookBack window. - long maxLookbackInMills = 10 * 1000; - rebuildScanner.setMaxLookBackInMills(maxLookbackInMills); - - Put put = new Put(indexRowKey1Bytes); - Cell cell = getNewCell(indexRowKey1Bytes, - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_BYTES, - EnvironmentEdgeManager.currentTimeMillis(), - Cell.Type.Put, - QueryConstants.VERIFIED_BYTES); - put.add(cell); - // This mutation is beyond maxLookBack, so add it to expectedMutations only. - expectedMutations.add(put); - - // advance the time of maxLookBack, so last mutation will be outside of maxLookBack, - // next mutation will be within maxLookBack - injectEdge.incrementValue(maxLookbackInMills); - put = new Put(indexRowKey1Bytes); - cell = getNewCell(indexRowKey1Bytes, - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_BYTES, - EnvironmentEdgeManager.currentTimeMillis(), - Cell.Type.Put, - QueryConstants.VERIFIED_BYTES); put.add(cell); - // This mutation is in both expectedMutations and actualMutations, as it is within the maxLookBack, so it will not get chance to be compacted away - expectedMutations.add(put); - actualMutations.add(put); - Result actualMutationsScanResult = Result.create(Arrays.asList(cell)); - - Map> indexKeyToMutationMap = Maps.newTreeMap((Bytes.BYTES_COMPARATOR)); - indexKeyToMutationMap.put(indexRowKey1Bytes, expectedMutations); - when(rebuildScanner.prepareActualIndexMutations(any(Result.class))).thenReturn(actualMutations); - when(indexRow.getRow()).thenReturn(indexRowKey1Bytes); - injectEdge.incrementValue(1); - IndexToolVerificationResult.PhaseResult actualPR = new IndexToolVerificationResult.PhaseResult(); - Collections.sort(indexKeyToMutationMap.get(indexRow.getRow()), MUTATION_TS_DESC_COMPARATOR); - // Report this validation as a success - assertTrue(rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutations, - indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, actualPR, false)); - // validIndexRowCount = 1 - IndexToolVerificationResult.PhaseResult expectedPR = new IndexToolVerificationResult.PhaseResult(1, 0, 0, 0, 0, 0, 0, 0, 0, 0); - assertTrue(actualPR.equals(expectedPR)); - } - - // Test the major compaction on index table only. - // All expected mutations are beyond the maxLookBack, and there are no matching ones in the actual list because of major compaction. - // We will report such row as an invalid beyond maxLookBack row. - @Test - public void testVerifySingleIndexRow_compactionOnIndexTable_noExpectedMutationWithinMaxLookBack() throws Exception { - String dataRowKey = "k1"; - byte[] indexRowKey1Bytes = generateIndexRowKey(dataRowKey, "val1"); - List expectedMutations = new ArrayList<>(); - List actualMutations = new ArrayList<>(); - // change the maxLookBack from infinite to some interval, which allows to simulate the mutation beyond the maxLookBack window. - long maxLookbackInMills = 10 * 1000; - rebuildScanner.setMaxLookBackInMills(maxLookbackInMills); - - ManualEnvironmentEdge injectEdge = new ManualEnvironmentEdge(); - injectEdge.setValue(1); - EnvironmentEdgeManager.injectEdge(injectEdge); - - Put put = new Put(indexRowKey1Bytes); - Cell cell = getNewCell(indexRowKey1Bytes, - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_BYTES, - EnvironmentEdgeManager.currentTimeMillis(), - Cell.Type.Put, - VERIFIED_BYTES); - put.add(cell); - // This mutation is beyond maxLookBack, so add it to expectedMutations only. - expectedMutations.add(put); - - injectEdge.incrementValue(maxLookbackInMills); - put = new Put(indexRowKey1Bytes); - cell = getNewCell(indexRowKey1Bytes, - QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_BYTES, - EnvironmentEdgeManager.currentTimeMillis(), - Cell.Type.Put, - UNVERIFIED_BYTES); - put.add(cell); - // This mutation is actualMutations only, as it is an unverified put - actualMutations.add(put); - Result actualMutationsScanResult = Result.create(Arrays.asList(cell)); - - Map> indexKeyToMutationMap = Maps.newTreeMap((Bytes.BYTES_COMPARATOR)); - indexKeyToMutationMap.put(indexRowKey1Bytes, expectedMutations); - mostRecentIndexRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); - when(rebuildScanner.prepareActualIndexMutations(any(Result.class))).thenReturn(actualMutations); - when(indexRow.getRow()).thenReturn(indexRowKey1Bytes); - - injectEdge.incrementValue(1); - IndexToolVerificationResult.PhaseResult actualPR = new IndexToolVerificationResult.PhaseResult(); - // Report this validation as a failure - assertFalse(rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutations, expectedMutations, mostRecentIndexRowKeys, new ArrayList(), actualPR, true)); - // beyondMaxLookBackInvalidIndexRowCount = 1 - IndexToolVerificationResult.PhaseResult expectedPR = new IndexToolVerificationResult.PhaseResult(0, 0, 0, 0, 0, 1, 0, 0, 0, 0); - assertTrue(actualPR.equals(expectedPR)); - } - - private static byte[] generateIndexRowKey(String dataRowKey, String dataVal){ - List idxKey = new ArrayList<>(); - if (dataVal != null && !dataVal.isEmpty()) - idxKey.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.asList(Bytes.toBytes(dataVal))); - idxKey.add(QueryConstants.SEPARATOR_BYTE); - idxKey.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.asList(Bytes.toBytes(dataRowKey))); - return org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.toArray(idxKey); - } - - private IndexToolVerificationResult.PhaseResult getValidPhaseResult() { - return new IndexToolVerificationResult.PhaseResult(1, 0, 0, 0, 0, 0, 0, 0, 0, 0); - } - - private IndexToolVerificationResult.PhaseResult getInvalidPhaseResult() { - return new IndexToolVerificationResult.PhaseResult(0, 0, 0, 1, 0, 0, 0, 0, 0, 0); - } - - private void initializeLocalMockitoSetup(Map.Entry> entry, - TestType testType) - throws IOException { - actualPR = new IndexToolVerificationResult.PhaseResult(); - byte[] indexKey = entry.getKey(); - when(indexRow.getRow()).thenReturn(indexKey); - actualMutationList = buildActualIndexMutationsList(testType); - when(rebuildScanner.prepareActualIndexMutations(indexRow)).thenReturn(actualMutationList); - } - - private List buildActualIndexMutationsList(TestType testType) { - List actualMutations = new ArrayList<>(); - actualMutations.addAll(indexKeyToMutationMap.get(indexRow.getRow())); - if(testType.equals(TestType.EXPIRED)) { - return actualMutations; - } - if(testType.toString().startsWith("VALID")) { - return getValidActualMutations(testType, actualMutations); + } else { + if (delete == null) { + delete = new Delete(key); } - if(testType.toString().startsWith("INVALID")) { - return getInvalidActualMutations(testType, actualMutations); - } - return null; - } - - private List getValidActualMutations(TestType testType, - List actualMutations) { - List newActualMutations = new ArrayList<>(); - if(testType.equals(TestType.VALID_EXACT_MATCH)) { - return actualMutations; - } - if (testType.equals(TestType.VALID_MIX_MUTATIONS)) { - newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); - newActualMutations.add(getDeleteMutation(actualMutations.get(0), new Long(1))); - newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); - } - if (testType.equals(TestType.VALID_NEW_UNVERIFIED_MUTATIONS)) { - newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); - newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); - newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); - newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), new Long(1))); - } - newActualMutations.addAll(actualMutations); - if(testType.equals(TestType.VALID_MORE_MUTATIONS)) { - newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); - newActualMutations.add(getDeleteMutation(actualMutations.get(0), null)); - newActualMutations.add(getDeleteMutation(actualMutations.get(0), new Long(1))); - newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), new Long(1))); - } - if(testType.equals(TestType.VALID_EXTRA_CELL)) { - for (Mutation m : newActualMutations) { - if (m instanceof Put) { - List origList = m.getFamilyCellMap().firstEntry().getValue(); - Cell newCell = getNewCell(m.getRow(), CellUtil.cloneFamily(origList.get(0)), - Bytes.toBytes("EXTRACOL"), m.getTimestamp(), Cell.Type.Put, - Bytes.toBytes("asdfg")); - byte[] fam = CellUtil.cloneFamily(origList.get(0)); - m.getFamilyCellMap().get(fam).add(newCell); - break; - } - } - } - return newActualMutations; - } - - private List getInvalidActualMutations(TestType testType, - List actualMutations) { - List newActualMutations = new ArrayList<>(); - newActualMutations.addAll(actualMutations); - for (Mutation m : actualMutations) { - newActualMutations.remove(m); - NavigableMap> familyCellMap = m.getFamilyCellMap(); - List cellList = familyCellMap.firstEntry().getValue(); - List newCellList = new ArrayList<>(); - byte[] fam = CellUtil.cloneFamily(cellList.get(0)); - for (Cell c : cellList) { - infiltrateCell(c, newCellList, testType); - } - familyCellMap.put(fam, newCellList); - Mutation newM; - if (m instanceof Put) { - newM = new Put(m.getRow(), m.getTimestamp(), familyCellMap); - } else { - newM = new Delete(m.getRow(), m.getTimestamp(), familyCellMap); - } - newActualMutations.add(newM); + delete.add(cell); + } + } + } + + private void initializeRebuildScannerAttributes() throws SQLException { + when(rebuildScanner.setIndexTableTTL(ArgumentMatchers.anyInt())).thenCallRealMethod(); + when(rebuildScanner.setIndexMaintainer(ArgumentMatchers. any())) + .thenCallRealMethod(); + when(rebuildScanner.setMaxLookBackInMills(ArgumentMatchers.anyLong())).thenCallRealMethod(); + rebuildScanner.setIndexTableTTL(HConstants.FOREVER); + indexMaintainer = pIndexTable.getIndexMaintainer(pDataTable, pconn); + rebuildScanner.setIndexMaintainer(indexMaintainer); + // set the maxLookBack to infinite to avoid the compaction + rebuildScanner.setMaxLookBackInMills(Long.MAX_VALUE); + } + + private void initializeGlobalMockitoSetup() throws IOException { + // setup + when(indexMaintainer.getIndexRowKey(put)).thenCallRealMethod(); + when(rebuildScanner.prepareIndexMutations(put, delete, indexKeyToMutationMap, + mostRecentIndexRowKeys)).thenCallRealMethod(); + when(rebuildScanner.verifySingleIndexRow(ArgumentMatchers. any(), + ArgumentMatchers. any(), ArgumentMatchers. any(), ArgumentMatchers. any(), + ArgumentMatchers. any(), + ArgumentMatchers. any(), + ArgumentMatchers.anyBoolean())).thenCallRealMethod(); + doNothing().when(rebuildScanner).logToIndexToolOutputTable(ArgumentMatchers. any(), + ArgumentMatchers. any(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyString(), + ArgumentMatchers. any(), ArgumentMatchers. any(), + ArgumentMatchers.anyBoolean(), + Mockito. any()); + doNothing().when(rebuildScanner).logToIndexToolOutputTable(ArgumentMatchers. any(), + ArgumentMatchers. any(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyString(), + ArgumentMatchers.anyBoolean(), + Mockito. any()); + + // populate the local map to use to create actual mutations + indexKeyToMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); + rebuildScanner.prepareIndexMutations(put, delete, indexKeyToMutationMap, + mostRecentIndexRowKeys); + } + + private byte[] getValidRowKey() { + return indexKeyToMutationMap.entrySet().iterator().next().getKey(); + } + + @Test + public void testVerifySingleIndexRow_validIndexRowCount_nonZero() throws IOException { + IndexToolVerificationResult.PhaseResult expectedPR = getValidPhaseResult(); + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.VALID_EXACT_MATCH); + // test code + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(actualPR, expectedPR); + } + } + + @Test + public void testVerifySingleIndexRow_validIndexRowCount_moreActual() throws IOException { + IndexToolVerificationResult.PhaseResult expectedPR = getValidPhaseResult(); + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.VALID_MORE_MUTATIONS); + // test code + + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(actualPR, expectedPR); + } + } + + @Test + public void testVerifySingleIndexRow_allMix() throws IOException { + IndexToolVerificationResult.PhaseResult expectedPR = getValidPhaseResult(); + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.VALID_MIX_MUTATIONS); + // test code + + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(actualPR, expectedPR); + } + } + + @Test + public void testVerifySingleIndexRow_allUnverified() throws IOException { + IndexToolVerificationResult.PhaseResult expectedPR = getValidPhaseResult(); + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.VALID_NEW_UNVERIFIED_MUTATIONS); + // test code + + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(actualPR, expectedPR); + } + } + + @Test + public void testVerifySingleIndexRow_expiredIndexRowCount_nonZero() throws IOException { + IndexToolVerificationResult.PhaseResult expectedPR = + new IndexToolVerificationResult.PhaseResult(0, 1, 0, 0, 0, 0, 0, 0, 0, 0); + try { + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.EXPIRED); + expireThisRow(); + // test code + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(actualPR, expectedPR); + } + } finally { + EnvironmentEdgeManager.reset(); + } + } + + @Test + public void testVerifySingleIndexRow_invalidIndexRowCount_cellValue() throws IOException { + IndexToolVerificationResult.PhaseResult expectedPR = getInvalidPhaseResult(); + expectedPR.setIndexHasExtraCellsCount(1); + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.INVALID_CELL_VALUE); + // test code + + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(actualPR, expectedPR); + } + } + + @Test + public void testVerifySingleIndexRow_invalidIndexRowCount_emptyCell() throws IOException { + IndexToolVerificationResult.PhaseResult expectedPR = getInvalidPhaseResult(); + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.INVALID_EMPTY_CELL); + // test code + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(actualPR, expectedPR); + } + } + + @Test + public void testVerifySingleIndexRow_invalidIndexRowCount_diffColumn() throws IOException { + IndexToolVerificationResult.PhaseResult expectedPR = getInvalidPhaseResult(); + expectedPR.setIndexHasExtraCellsCount(1); + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.INVALID_COLUMN); + // test code + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(actualPR, expectedPR); + } + } + + @Test + public void testVerifySingleIndexRow_invalidIndexRowCount_extraCell() throws IOException { + IndexToolVerificationResult.PhaseResult expectedPR = getInvalidPhaseResult(); + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.INVALID_EXTRA_CELL); + // test code + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(actualPR, expectedPR); + } + } + + @Test + public void testVerifySingleIndexRow_expectedMutations_null() throws IOException { + when(indexRow.getRow()).thenReturn(Bytes.toBytes(1)); + exceptionRule.expect(DoNotRetryIOException.class); + exceptionRule.expectMessage(IndexRebuildRegionScanner.NO_EXPECTED_MUTATION); + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, + actualPR, true); + } + + @Test + public void testVerifySingleIndexRow_validIndexRowCount_extraCell() throws IOException { + for (Map.Entry> entry : indexKeyToMutationMap.entrySet()) { + initializeLocalMockitoSetup(entry, TestType.VALID_EXTRA_CELL); + // test code + rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutationList, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, + Collections.EMPTY_LIST, actualPR, true); + + assertEquals(1, actualPR.getIndexHasExtraCellsCount()); + } + } + + // Test the major compaction on index table only. + // There is at least one expected mutation within maxLookBack that has its matching one in the + // actual list. + // However there are some expected mutations outside of maxLookBack, which matching ones in actual + // list may be compacted away. + // We will report such row as a valid row. + @Test + public void + testVerifySingleIndexRow_compactionOnIndexTable_atLeastOneExpectedMutationWithinMaxLookBack() + throws Exception { + String dataRowKey = "k1"; + byte[] indexRowKey1Bytes = generateIndexRowKey(dataRowKey, "val1"); + ManualEnvironmentEdge injectEdge = new ManualEnvironmentEdge(); + injectEdge.setValue(1); + EnvironmentEdgeManager.injectEdge(injectEdge); + + List expectedMutations = new ArrayList<>(); + List actualMutations = new ArrayList<>(); + // change the maxLookBack from infinite to some interval, which allows to simulate the mutation + // beyond the maxLookBack window. + long maxLookbackInMills = 10 * 1000; + rebuildScanner.setMaxLookBackInMills(maxLookbackInMills); + + Put put = new Put(indexRowKey1Bytes); + Cell cell = getNewCell(indexRowKey1Bytes, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_BYTES, EnvironmentEdgeManager.currentTimeMillis(), Cell.Type.Put, + QueryConstants.VERIFIED_BYTES); + put.add(cell); + // This mutation is beyond maxLookBack, so add it to expectedMutations only. + expectedMutations.add(put); + + // advance the time of maxLookBack, so last mutation will be outside of maxLookBack, + // next mutation will be within maxLookBack + injectEdge.incrementValue(maxLookbackInMills); + put = new Put(indexRowKey1Bytes); + cell = getNewCell(indexRowKey1Bytes, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_BYTES, EnvironmentEdgeManager.currentTimeMillis(), Cell.Type.Put, + QueryConstants.VERIFIED_BYTES); + put.add(cell); + // This mutation is in both expectedMutations and actualMutations, as it is within the + // maxLookBack, so it will not get chance to be compacted away + expectedMutations.add(put); + actualMutations.add(put); + Result actualMutationsScanResult = Result.create(Arrays.asList(cell)); + + Map> indexKeyToMutationMap = Maps.newTreeMap((Bytes.BYTES_COMPARATOR)); + indexKeyToMutationMap.put(indexRowKey1Bytes, expectedMutations); + when(rebuildScanner.prepareActualIndexMutations(any(Result.class))).thenReturn(actualMutations); + when(indexRow.getRow()).thenReturn(indexRowKey1Bytes); + injectEdge.incrementValue(1); + IndexToolVerificationResult.PhaseResult actualPR = + new IndexToolVerificationResult.PhaseResult(); + Collections.sort(indexKeyToMutationMap.get(indexRow.getRow()), MUTATION_TS_DESC_COMPARATOR); + // Report this validation as a success + assertTrue(rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutations, + indexKeyToMutationMap.get(indexRow.getRow()), mostRecentIndexRowKeys, Collections.EMPTY_LIST, + actualPR, false)); + // validIndexRowCount = 1 + IndexToolVerificationResult.PhaseResult expectedPR = + new IndexToolVerificationResult.PhaseResult(1, 0, 0, 0, 0, 0, 0, 0, 0, 0); + assertTrue(actualPR.equals(expectedPR)); + } + + // Test the major compaction on index table only. + // All expected mutations are beyond the maxLookBack, and there are no matching ones in the actual + // list because of major compaction. + // We will report such row as an invalid beyond maxLookBack row. + @Test + public void testVerifySingleIndexRow_compactionOnIndexTable_noExpectedMutationWithinMaxLookBack() + throws Exception { + String dataRowKey = "k1"; + byte[] indexRowKey1Bytes = generateIndexRowKey(dataRowKey, "val1"); + List expectedMutations = new ArrayList<>(); + List actualMutations = new ArrayList<>(); + // change the maxLookBack from infinite to some interval, which allows to simulate the mutation + // beyond the maxLookBack window. + long maxLookbackInMills = 10 * 1000; + rebuildScanner.setMaxLookBackInMills(maxLookbackInMills); + + ManualEnvironmentEdge injectEdge = new ManualEnvironmentEdge(); + injectEdge.setValue(1); + EnvironmentEdgeManager.injectEdge(injectEdge); + + Put put = new Put(indexRowKey1Bytes); + Cell cell = getNewCell(indexRowKey1Bytes, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_BYTES, EnvironmentEdgeManager.currentTimeMillis(), Cell.Type.Put, + VERIFIED_BYTES); + put.add(cell); + // This mutation is beyond maxLookBack, so add it to expectedMutations only. + expectedMutations.add(put); + + injectEdge.incrementValue(maxLookbackInMills); + put = new Put(indexRowKey1Bytes); + cell = getNewCell(indexRowKey1Bytes, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_BYTES, EnvironmentEdgeManager.currentTimeMillis(), Cell.Type.Put, + UNVERIFIED_BYTES); + put.add(cell); + // This mutation is actualMutations only, as it is an unverified put + actualMutations.add(put); + Result actualMutationsScanResult = Result.create(Arrays.asList(cell)); + + Map> indexKeyToMutationMap = Maps.newTreeMap((Bytes.BYTES_COMPARATOR)); + indexKeyToMutationMap.put(indexRowKey1Bytes, expectedMutations); + mostRecentIndexRowKeys = new TreeSet<>(Bytes.BYTES_COMPARATOR); + when(rebuildScanner.prepareActualIndexMutations(any(Result.class))).thenReturn(actualMutations); + when(indexRow.getRow()).thenReturn(indexRowKey1Bytes); + + injectEdge.incrementValue(1); + IndexToolVerificationResult.PhaseResult actualPR = + new IndexToolVerificationResult.PhaseResult(); + // Report this validation as a failure + assertFalse(rebuildScanner.verifySingleIndexRow(indexRow.getRow(), actualMutations, + expectedMutations, mostRecentIndexRowKeys, new ArrayList(), actualPR, true)); + // beyondMaxLookBackInvalidIndexRowCount = 1 + IndexToolVerificationResult.PhaseResult expectedPR = + new IndexToolVerificationResult.PhaseResult(0, 0, 0, 0, 0, 1, 0, 0, 0, 0); + assertTrue(actualPR.equals(expectedPR)); + } + + private static byte[] generateIndexRowKey(String dataRowKey, String dataVal) { + List idxKey = new ArrayList<>(); + if (dataVal != null && !dataVal.isEmpty()) + idxKey.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes + .asList(Bytes.toBytes(dataVal))); + idxKey.add(QueryConstants.SEPARATOR_BYTE); + idxKey.addAll(org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes + .asList(Bytes.toBytes(dataRowKey))); + return org.apache.phoenix.thirdparty.com.google.common.primitives.Bytes.toArray(idxKey); + } + + private IndexToolVerificationResult.PhaseResult getValidPhaseResult() { + return new IndexToolVerificationResult.PhaseResult(1, 0, 0, 0, 0, 0, 0, 0, 0, 0); + } + + private IndexToolVerificationResult.PhaseResult getInvalidPhaseResult() { + return new IndexToolVerificationResult.PhaseResult(0, 0, 0, 1, 0, 0, 0, 0, 0, 0); + } + + private void initializeLocalMockitoSetup(Map.Entry> entry, + TestType testType) throws IOException { + actualPR = new IndexToolVerificationResult.PhaseResult(); + byte[] indexKey = entry.getKey(); + when(indexRow.getRow()).thenReturn(indexKey); + actualMutationList = buildActualIndexMutationsList(testType); + when(rebuildScanner.prepareActualIndexMutations(indexRow)).thenReturn(actualMutationList); + } + + private List buildActualIndexMutationsList(TestType testType) { + List actualMutations = new ArrayList<>(); + actualMutations.addAll(indexKeyToMutationMap.get(indexRow.getRow())); + if (testType.equals(TestType.EXPIRED)) { + return actualMutations; + } + if (testType.toString().startsWith("VALID")) { + return getValidActualMutations(testType, actualMutations); + } + if (testType.toString().startsWith("INVALID")) { + return getInvalidActualMutations(testType, actualMutations); + } + return null; + } + + private List getValidActualMutations(TestType testType, + List actualMutations) { + List newActualMutations = new ArrayList<>(); + if (testType.equals(TestType.VALID_EXACT_MATCH)) { + return actualMutations; + } + if (testType.equals(TestType.VALID_MIX_MUTATIONS)) { + newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); + newActualMutations.add(getDeleteMutation(actualMutations.get(0), new Long(1))); + newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); + } + if (testType.equals(TestType.VALID_NEW_UNVERIFIED_MUTATIONS)) { + newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); + newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); + newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); + newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), new Long(1))); + } + newActualMutations.addAll(actualMutations); + if (testType.equals(TestType.VALID_MORE_MUTATIONS)) { + newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), null)); + newActualMutations.add(getDeleteMutation(actualMutations.get(0), null)); + newActualMutations.add(getDeleteMutation(actualMutations.get(0), new Long(1))); + newActualMutations.add(getUnverifiedPutMutation(actualMutations.get(0), new Long(1))); + } + if (testType.equals(TestType.VALID_EXTRA_CELL)) { + for (Mutation m : newActualMutations) { + if (m instanceof Put) { + List origList = m.getFamilyCellMap().firstEntry().getValue(); + Cell newCell = getNewCell(m.getRow(), CellUtil.cloneFamily(origList.get(0)), + Bytes.toBytes("EXTRACOL"), m.getTimestamp(), Cell.Type.Put, Bytes.toBytes("asdfg")); + byte[] fam = CellUtil.cloneFamily(origList.get(0)); + m.getFamilyCellMap().get(fam).add(newCell); + break; } - return newActualMutations; - } - - private void infiltrateCell(Cell c, List newCellList, TestType e) { - Cell newCell; - Cell emptyCell; - switch(e) { - case INVALID_COLUMN: - newCell = getNewCell(CellUtil.cloneRow(c), CellUtil.cloneFamily(c), - Bytes.toBytes(UNEXPECTED_COLUMN), - c.getTimestamp(), - Cell.Type.Put, Bytes.toBytes("zxcv")); - newCellList.add(newCell); - newCellList.add(c); - break; - case INVALID_CELL_VALUE: - if (CellUtil.matchingQualifier(c, EMPTY_COLUMN_BYTES)) { - newCell = getCellWithPut(c); - emptyCell = getVerifiedEmptyCell(c); - newCellList.add(newCell); - newCellList.add(emptyCell); - } else { - newCellList.add(c); - } - break; - case INVALID_EMPTY_CELL: - if (CellUtil.matchingQualifier(c, EMPTY_COLUMN_BYTES)) { - newCell = - getNewCell(CellUtil.cloneRow(c), CellUtil.cloneFamily(c), - CellUtil.cloneQualifier(c), c.getTimestamp(), - Cell.Type.Delete, VERIFIED_BYTES); - newCellList.add(newCell); - } else { - newCellList.add(c); - } - break; - case INVALID_EXTRA_CELL: - newCell = getCellWithPut(c); - emptyCell = getVerifiedEmptyCell(c); - newCellList.add(newCell); - newCellList.add(emptyCell); - newCellList.add(c); + } + } + return newActualMutations; + } + + private List getInvalidActualMutations(TestType testType, + List actualMutations) { + List newActualMutations = new ArrayList<>(); + newActualMutations.addAll(actualMutations); + for (Mutation m : actualMutations) { + newActualMutations.remove(m); + NavigableMap> familyCellMap = m.getFamilyCellMap(); + List cellList = familyCellMap.firstEntry().getValue(); + List newCellList = new ArrayList<>(); + byte[] fam = CellUtil.cloneFamily(cellList.get(0)); + for (Cell c : cellList) { + infiltrateCell(c, newCellList, testType); + } + familyCellMap.put(fam, newCellList); + Mutation newM; + if (m instanceof Put) { + newM = new Put(m.getRow(), m.getTimestamp(), familyCellMap); + } else { + newM = new Delete(m.getRow(), m.getTimestamp(), familyCellMap); + } + newActualMutations.add(newM); + } + return newActualMutations; + } + + private void infiltrateCell(Cell c, List newCellList, TestType e) { + Cell newCell; + Cell emptyCell; + switch (e) { + case INVALID_COLUMN: + newCell = getNewCell(CellUtil.cloneRow(c), CellUtil.cloneFamily(c), + Bytes.toBytes(UNEXPECTED_COLUMN), c.getTimestamp(), Cell.Type.Put, Bytes.toBytes("zxcv")); + newCellList.add(newCell); + newCellList.add(c); + break; + case INVALID_CELL_VALUE: + if (CellUtil.matchingQualifier(c, EMPTY_COLUMN_BYTES)) { + newCell = getCellWithPut(c); + emptyCell = getVerifiedEmptyCell(c); + newCellList.add(newCell); + newCellList.add(emptyCell); + } else { + newCellList.add(c); } - } - - private Cell getVerifiedEmptyCell(Cell c) { - return getNewCell(CellUtil.cloneRow(c), CellUtil.cloneFamily(c), - indexMaintainer.getEmptyKeyValueQualifier(), - c.getTimestamp(), - Cell.Type.Put, VERIFIED_BYTES); - } - - private Cell getCellWithPut(Cell c) { - return getNewCell(CellUtil.cloneRow(c), - CellUtil.cloneFamily(c), Bytes.toBytes(INCLUDED_COLUMN), - c.getTimestamp(), Cell.Type.Put, - Bytes.toBytes("zxcv")); - } - - private void expireThisRow() { - rebuildScanner.setIndexTableTTL(INDEX_TABLE_EXPIRY_SEC); - UnitTestClock expiryClock = new UnitTestClock(5000); - EnvironmentEdgeManager.injectEdge(expiryClock); - } - - private Mutation getDeleteMutation(Mutation orig, Long ts) { - Mutation m = new Delete(orig.getRow()); - List origList = orig.getFamilyCellMap().firstEntry().getValue(); - ts = ts == null ? EnvironmentEdgeManager.currentTimeMillis() : ts; - Cell c = getNewPutCell(orig, origList, ts, Cell.Type.DeleteFamilyVersion); - Cell empty = getEmptyCell(orig, origList, ts, Cell.Type.Put, true); - byte[] fam = CellUtil.cloneFamily(origList.get(0)); - List famCells = Lists.newArrayList(); - m.getFamilyCellMap().put(fam, famCells); - famCells.add(c); - famCells.add(empty); - return m; - } - - private Mutation getUnverifiedPutMutation(Mutation orig, Long ts) { - Mutation m = new Put(orig.getRow()); - if (orig.getAttributesMap() != null) { - for (Map.Entry entry : orig.getAttributesMap().entrySet()) { - m.setAttribute(entry.getKey(), entry.getValue()); - } + break; + case INVALID_EMPTY_CELL: + if (CellUtil.matchingQualifier(c, EMPTY_COLUMN_BYTES)) { + newCell = getNewCell(CellUtil.cloneRow(c), CellUtil.cloneFamily(c), + CellUtil.cloneQualifier(c), c.getTimestamp(), Cell.Type.Delete, VERIFIED_BYTES); + newCellList.add(newCell); + } else { + newCellList.add(c); } - List origList = orig.getFamilyCellMap().firstEntry().getValue(); - ts = ts == null ? EnvironmentEdgeManager.currentTimeMillis() : ts; - Cell c = getNewPutCell(orig, origList, ts, Cell.Type.Put); - Cell empty = getEmptyCell(orig, origList, ts, Cell.Type.Put, false); - byte[] fam = CellUtil.cloneFamily(origList.get(0)); - List famCells = Lists.newArrayList(); - m.getFamilyCellMap().put(fam, famCells); - famCells.add(c); - famCells.add(empty); - return m; - } - - private Cell getEmptyCell(Mutation orig, List origList, Long ts, Cell.Type type, - boolean verified) { - return getNewCell(orig.getRow(), CellUtil.cloneFamily(origList.get(0)), - indexMaintainer.getEmptyKeyValueQualifier(), - ts, type, verified ? VERIFIED_BYTES : UNVERIFIED_BYTES); - } - - private Cell getNewPutCell(Mutation orig, List origList, Long ts, Cell.Type type) { - return getNewCell(orig.getRow(), - CellUtil.cloneFamily(origList.get(0)), Bytes.toBytes(INCLUDED_COLUMN), - ts, type, Bytes.toBytes("asdfg")); - } - - private Cell getNewCell(byte[] row, byte[] family, byte[] qualifier, - long timestamp, Cell.Type type, byte[] value) { - return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row). - setFamily(family).setQualifier(qualifier). - setTimestamp(timestamp).setType(type). - setValue(value).build(); - } + break; + case INVALID_EXTRA_CELL: + newCell = getCellWithPut(c); + emptyCell = getVerifiedEmptyCell(c); + newCellList.add(newCell); + newCellList.add(emptyCell); + newCellList.add(c); + } + } + + private Cell getVerifiedEmptyCell(Cell c) { + return getNewCell(CellUtil.cloneRow(c), CellUtil.cloneFamily(c), + indexMaintainer.getEmptyKeyValueQualifier(), c.getTimestamp(), Cell.Type.Put, VERIFIED_BYTES); + } + + private Cell getCellWithPut(Cell c) { + return getNewCell(CellUtil.cloneRow(c), CellUtil.cloneFamily(c), Bytes.toBytes(INCLUDED_COLUMN), + c.getTimestamp(), Cell.Type.Put, Bytes.toBytes("zxcv")); + } + + private void expireThisRow() { + rebuildScanner.setIndexTableTTL(INDEX_TABLE_EXPIRY_SEC); + UnitTestClock expiryClock = new UnitTestClock(5000); + EnvironmentEdgeManager.injectEdge(expiryClock); + } + + private Mutation getDeleteMutation(Mutation orig, Long ts) { + Mutation m = new Delete(orig.getRow()); + List origList = orig.getFamilyCellMap().firstEntry().getValue(); + ts = ts == null ? EnvironmentEdgeManager.currentTimeMillis() : ts; + Cell c = getNewPutCell(orig, origList, ts, Cell.Type.DeleteFamilyVersion); + Cell empty = getEmptyCell(orig, origList, ts, Cell.Type.Put, true); + byte[] fam = CellUtil.cloneFamily(origList.get(0)); + List famCells = Lists.newArrayList(); + m.getFamilyCellMap().put(fam, famCells); + famCells.add(c); + famCells.add(empty); + return m; + } + + private Mutation getUnverifiedPutMutation(Mutation orig, Long ts) { + Mutation m = new Put(orig.getRow()); + if (orig.getAttributesMap() != null) { + for (Map.Entry entry : orig.getAttributesMap().entrySet()) { + m.setAttribute(entry.getKey(), entry.getValue()); + } + } + List origList = orig.getFamilyCellMap().firstEntry().getValue(); + ts = ts == null ? EnvironmentEdgeManager.currentTimeMillis() : ts; + Cell c = getNewPutCell(orig, origList, ts, Cell.Type.Put); + Cell empty = getEmptyCell(orig, origList, ts, Cell.Type.Put, false); + byte[] fam = CellUtil.cloneFamily(origList.get(0)); + List famCells = Lists.newArrayList(); + m.getFamilyCellMap().put(fam, famCells); + famCells.add(c); + famCells.add(empty); + return m; + } + + private Cell getEmptyCell(Mutation orig, List origList, Long ts, Cell.Type type, + boolean verified) { + return getNewCell(orig.getRow(), CellUtil.cloneFamily(origList.get(0)), + indexMaintainer.getEmptyKeyValueQualifier(), ts, type, + verified ? VERIFIED_BYTES : UNVERIFIED_BYTES); + } + + private Cell getNewPutCell(Mutation orig, List origList, Long ts, Cell.Type type) { + return getNewCell(orig.getRow(), CellUtil.cloneFamily(origList.get(0)), + Bytes.toBytes(INCLUDED_COLUMN), ts, type, Bytes.toBytes("asdfg")); + } + + private Cell getNewCell(byte[] row, byte[] family, byte[] qualifier, long timestamp, + Cell.Type type, byte[] value) { + return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier).setTimestamp(timestamp).setType(type).setValue(value).build(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/automated/MRJobSubmitterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/automated/MRJobSubmitterTest.java index 3a4de4c0cd5..87f2cf59494 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/index/automated/MRJobSubmitterTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/index/automated/MRJobSubmitterTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,120 +34,113 @@ public class MRJobSubmitterTest { - private Map candidateJobs = - new LinkedHashMap(); - private Set submittedJobs = new HashSet(); - - @Before - public void prepare() { - PhoenixAsyncIndex index1 = new PhoenixAsyncIndex(); - index1.setDataTableName("DT1"); - index1.setTableName("IT1"); - index1.setTableSchem("NEW_SCHEM1"); - index1.setIndexType(IndexType.LOCAL); - - candidateJobs.put(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, - index1.getTableSchem(), index1.getDataTableName(), index1.getTableName()), index1); - - PhoenixAsyncIndex index2 = new PhoenixAsyncIndex(); - index2.setDataTableName("DT2"); - index2.setTableName("IT2"); - index2.setTableSchem("NEW_SCHEM2"); - index2.setIndexType(IndexType.LOCAL); - - candidateJobs.put(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, - index2.getTableSchem(), index2.getDataTableName(), index2.getTableName()), index2); + private Map candidateJobs = + new LinkedHashMap(); + private Set submittedJobs = new HashSet(); + + @Before + public void prepare() { + PhoenixAsyncIndex index1 = new PhoenixAsyncIndex(); + index1.setDataTableName("DT1"); + index1.setTableName("IT1"); + index1.setTableSchem("NEW_SCHEM1"); + index1.setIndexType(IndexType.LOCAL); + + candidateJobs.put(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, index1.getTableSchem(), + index1.getDataTableName(), index1.getTableName()), index1); + + PhoenixAsyncIndex index2 = new PhoenixAsyncIndex(); + index2.setDataTableName("DT2"); + index2.setTableName("IT2"); + index2.setTableSchem("NEW_SCHEM2"); + index2.setIndexType(IndexType.LOCAL); + + candidateJobs.put(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, index2.getTableSchem(), + index2.getDataTableName(), index2.getTableName()), index2); + } + + @Test + public void testLocalIndexJobsSubmission() throws IOException { + + // Set the index type to LOCAL + for (String jobId : candidateJobs.keySet()) { + candidateJobs.get(jobId).setIndexType(IndexType.LOCAL); } - - @Test - public void testLocalIndexJobsSubmission() throws IOException { - - // Set the index type to LOCAL - for (String jobId : candidateJobs.keySet()) { - candidateJobs.get(jobId).setIndexType(IndexType.LOCAL); - } - PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); - Set jobsToSubmit = - submitter.getJobsToSubmit(candidateJobs, submittedJobs); - assertEquals(2, jobsToSubmit.size()); - } - - @Test - public void testIndexJobsName() throws IOException { - // Verify index job name contains schem name, not only table name. - PhoenixAsyncIndex index = new PhoenixAsyncIndex(); - index.setDataTableName("MyDataTable"); - index.setTableName("MyTableName"); - index.setTableSchem("MySchem"); - index.setIndexType(IndexType.LOCAL); - - String jobName = String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, - index.getTableSchem(), index.getDataTableName(), index.getTableName()); - assertEquals("PHOENIX_MySchem.MyDataTable_INDX_MyTableName", jobName); - } - - @Test - public void testGlobalIndexJobsForSubmission() throws IOException { - - // Set the index type to GLOBAL - for (String jobId : candidateJobs.keySet()) { - candidateJobs.get(jobId).setIndexType(IndexType.GLOBAL); - } - PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); - Set jobsToSubmit = - submitter.getJobsToSubmit(candidateJobs, submittedJobs); - assertEquals(2, jobsToSubmit.size()); - assertEquals(true, jobsToSubmit.containsAll(candidateJobs.values())); - } - - @Test - public void testSkipSubmittedJob() throws IOException { - PhoenixAsyncIndex[] jobs = new PhoenixAsyncIndex[candidateJobs.size()]; - candidateJobs.values().toArray(jobs); - - // Mark one job as running - submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, - jobs[0].getTableSchem(), jobs[0].getDataTableName(), jobs[0].getTableName())); - - PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); - Set jobsToSubmit = - submitter.getJobsToSubmit(candidateJobs, submittedJobs); - - // Should not contain the running job - assertEquals(1, jobsToSubmit.size()); - assertEquals(false, jobsToSubmit.containsAll(candidateJobs.values())); - assertEquals(true, jobsToSubmit.contains(jobs[1])); - } - - @Test - public void testSkipAllSubmittedJobs() throws IOException { - PhoenixAsyncIndex[] jobs = new PhoenixAsyncIndex[candidateJobs.size()]; - candidateJobs.values().toArray(jobs); - - // Mark all the candidate jobs as running/in-progress - submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, - jobs[0].getTableSchem(), jobs[0].getDataTableName(), jobs[0].getTableName())); - submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, - jobs[1].getTableSchem(), jobs[1].getDataTableName(), jobs[1].getTableName())); - - PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); - Set jobsToSubmit = - submitter.getJobsToSubmit(candidateJobs, submittedJobs); - assertEquals(0, jobsToSubmit.size()); - } - - @Test - public void testNoJobsToSubmit() throws IOException { - // Clear candidate jobs - candidateJobs.clear(); - // Add some dummy running jobs to the submitted list - submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, - "s1", "d1", "i1")); - submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, - "s2", "d2", "i2")); - PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); - Set jobsToSubmit = - submitter.getJobsToSubmit(candidateJobs, submittedJobs); - assertEquals(0, jobsToSubmit.size()); + PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); + Set jobsToSubmit = submitter.getJobsToSubmit(candidateJobs, submittedJobs); + assertEquals(2, jobsToSubmit.size()); + } + + @Test + public void testIndexJobsName() throws IOException { + // Verify index job name contains schem name, not only table name. + PhoenixAsyncIndex index = new PhoenixAsyncIndex(); + index.setDataTableName("MyDataTable"); + index.setTableName("MyTableName"); + index.setTableSchem("MySchem"); + index.setIndexType(IndexType.LOCAL); + + String jobName = String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, index.getTableSchem(), + index.getDataTableName(), index.getTableName()); + assertEquals("PHOENIX_MySchem.MyDataTable_INDX_MyTableName", jobName); + } + + @Test + public void testGlobalIndexJobsForSubmission() throws IOException { + + // Set the index type to GLOBAL + for (String jobId : candidateJobs.keySet()) { + candidateJobs.get(jobId).setIndexType(IndexType.GLOBAL); } -} \ No newline at end of file + PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); + Set jobsToSubmit = submitter.getJobsToSubmit(candidateJobs, submittedJobs); + assertEquals(2, jobsToSubmit.size()); + assertEquals(true, jobsToSubmit.containsAll(candidateJobs.values())); + } + + @Test + public void testSkipSubmittedJob() throws IOException { + PhoenixAsyncIndex[] jobs = new PhoenixAsyncIndex[candidateJobs.size()]; + candidateJobs.values().toArray(jobs); + + // Mark one job as running + submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, jobs[0].getTableSchem(), + jobs[0].getDataTableName(), jobs[0].getTableName())); + + PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); + Set jobsToSubmit = submitter.getJobsToSubmit(candidateJobs, submittedJobs); + + // Should not contain the running job + assertEquals(1, jobsToSubmit.size()); + assertEquals(false, jobsToSubmit.containsAll(candidateJobs.values())); + assertEquals(true, jobsToSubmit.contains(jobs[1])); + } + + @Test + public void testSkipAllSubmittedJobs() throws IOException { + PhoenixAsyncIndex[] jobs = new PhoenixAsyncIndex[candidateJobs.size()]; + candidateJobs.values().toArray(jobs); + + // Mark all the candidate jobs as running/in-progress + submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, jobs[0].getTableSchem(), + jobs[0].getDataTableName(), jobs[0].getTableName())); + submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, jobs[1].getTableSchem(), + jobs[1].getDataTableName(), jobs[1].getTableName())); + + PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); + Set jobsToSubmit = submitter.getJobsToSubmit(candidateJobs, submittedJobs); + assertEquals(0, jobsToSubmit.size()); + } + + @Test + public void testNoJobsToSubmit() throws IOException { + // Clear candidate jobs + candidateJobs.clear(); + // Add some dummy running jobs to the submitted list + submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, "s1", "d1", "i1")); + submittedJobs.add(String.format(IndexTool.INDEX_JOB_NAME_TEMPLATE, "s2", "d2", "i2")); + PhoenixMRJobSubmitter submitter = new PhoenixMRJobSubmitter(); + Set jobsToSubmit = submitter.getJobsToSubmit(candidateJobs, submittedJobs); + assertEquals(0, jobsToSubmit.size()); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/AggregateResultScannerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/AggregateResultScannerTest.java index ddd82417490..b1badadb0f2 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/AggregateResultScannerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/AggregateResultScannerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,41 +37,40 @@ import org.apache.phoenix.util.TestUtil; import org.junit.Test; - - public class AggregateResultScannerTest extends BaseConnectionlessQueryTest { - private final static byte[] A = Bytes.toBytes("a"); - private final static byte[] B = Bytes.toBytes("b"); + private final static byte[] A = Bytes.toBytes("a"); + private final static byte[] B = Bytes.toBytes("b"); - @Test - public void testAggregatingMergeSort() throws Throwable { - Tuple[] results1 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))), - }; - Tuple[] results2 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))) - }; - Tuple[] results3 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))), - }; - Tuple[] results4 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))), - }; - final Listresults = new ArrayList(Arrays.asList(new PeekingResultIterator[] { - new MaterializedResultIterator(Arrays.asList(results1)), - new MaterializedResultIterator(Arrays.asList(results2)), - new MaterializedResultIterator(Arrays.asList(results3)), - new MaterializedResultIterator(Arrays.asList(results4))})); + @Test + public void testAggregatingMergeSort() throws Throwable { + Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))), }; + Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))) }; + Tuple[] results3 = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))), }; + Tuple[] results4 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(1L))), }; + final List results = new ArrayList(Arrays + .asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), + new MaterializedResultIterator(Arrays.asList(results2)), + new MaterializedResultIterator(Arrays.asList(results3)), + new MaterializedResultIterator(Arrays.asList(results4)) })); - Tuple[] expectedResults = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(3L))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(2L))), - }; + Tuple[] expectedResults = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(3L))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, PLong.INSTANCE.toBytes(2L))), }; - ResultIterators iterators = new MaterializedResultIterators(results); - ClientAggregators aggregators = TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); - ResultIterator scanner = new GroupedAggregatingResultIterator(new MergeSortRowKeyResultIterator(iterators), aggregators); - AssertResults.assertResults(scanner, expectedResults); - } -} \ No newline at end of file + ResultIterators iterators = new MaterializedResultIterators(results); + ClientAggregators aggregators = + TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)); + ResultIterator scanner = new GroupedAggregatingResultIterator( + new MergeSortRowKeyResultIterator(iterators), aggregators); + AssertResults.assertResults(scanner, expectedResults); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ConcatResultIteratorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ConcatResultIteratorTest.java index 106aa9d30aa..d39e53eb56b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ConcatResultIteratorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ConcatResultIteratorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,106 +29,113 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.tuple.SingleKeyValueTuple; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.util.AssertResults; import org.junit.Test; - - public class ConcatResultIteratorTest { - private final static byte[] A = Bytes.toBytes("a"); - private final static byte[] B = Bytes.toBytes("b"); - private final static byte[] C = Bytes.toBytes("c"); - private final static byte[] D = Bytes.toBytes("d"); - - @Test - public void testConcat() throws Throwable { - Tuple[] results1 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), - }; - Tuple[] results2 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))) - }; - Tuple[] results3 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))), - }; - final Listresults = Arrays.asList(new PeekingResultIterator[] {new MaterializedResultIterator(Arrays.asList(results1)), new MaterializedResultIterator(Arrays.asList(results2)), new MaterializedResultIterator(Arrays.asList(results3))}); - ResultIterators iterators = new MaterializedResultIterators(results); - - Tuple[] expectedResults = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))), - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))), - }; - - ResultIterator scanner = new ConcatResultIterator(iterators); - AssertResults.assertResults(scanner, expectedResults); - } - - @Test - public void testMergeSort() throws Throwable { - Tuple[] results1 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), - }; - Tuple[] results2 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))) - }; - Tuple[] results3 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), - new SingleKeyValueTuple(new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))), - }; - final Listresults = new ArrayList(Arrays.asList(new PeekingResultIterator[] {new MaterializedResultIterator(Arrays.asList(results1)), new MaterializedResultIterator(Arrays.asList(results2)), new MaterializedResultIterator(Arrays.asList(results3))})); - - Tuple[] expectedResults = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))), - new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))), - }; - - ResultIterators iterators = new ResultIterators() { - - @Override - public List getIterators() throws SQLException { - return results; - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - - } - - @Override - public int size() { - return results.size(); - } - - @Override - public void explain(List planSteps) { - } - - @Override - public List getSplits() { - return Collections.emptyList(); - } - - @Override - public List> getScans() { - return Collections.emptyList(); - } - - @Override - public void close() throws SQLException { - } - }; - ResultIterator scanner = new MergeSortRowKeyResultIterator(iterators); - AssertResults.assertResults(scanner, expectedResults); - } + private final static byte[] A = Bytes.toBytes("a"); + private final static byte[] B = Bytes.toBytes("b"); + private final static byte[] C = Bytes.toBytes("c"); + private final static byte[] D = Bytes.toBytes("d"); + + @Test + public void testConcat() throws Throwable { + Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; + Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))) }; + Tuple[] results3 = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))), }; + final List results = Arrays + .asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), + new MaterializedResultIterator(Arrays.asList(results2)), + new MaterializedResultIterator(Arrays.asList(results3)) }); + ResultIterators iterators = new MaterializedResultIterators(results); + + Tuple[] expectedResults = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))), + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))), }; + + ResultIterator scanner = new ConcatResultIterator(iterators); + AssertResults.assertResults(scanner, expectedResults); + } + + @Test + public void testMergeSort() throws Throwable { + Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; + Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))) }; + Tuple[] results3 = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), + new SingleKeyValueTuple( + new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))), }; + final List results = new ArrayList(Arrays + .asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), + new MaterializedResultIterator(Arrays.asList(results2)), + new MaterializedResultIterator(Arrays.asList(results3)) })); + + Tuple[] expectedResults = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))), + new SingleKeyValueTuple( + new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))), }; + + ResultIterators iterators = new ResultIterators() { + + @Override + public List getIterators() throws SQLException { + return results; + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + + } + + @Override + public int size() { + return results.size(); + } + + @Override + public void explain(List planSteps) { + } + + @Override + public List getSplits() { + return Collections.emptyList(); + } + + @Override + public List> getScans() { + return Collections.emptyList(); + } + + @Override + public void close() throws SQLException { + } + }; + ResultIterator scanner = new MergeSortRowKeyResultIterator(iterators); + AssertResults.assertResults(scanner, expectedResults); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/DistinctAggregatingResultIteratorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/DistinctAggregatingResultIteratorTest.java index 65b73820185..fb38bba165e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/DistinctAggregatingResultIteratorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/DistinctAggregatingResultIteratorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,238 +36,204 @@ import org.mockito.stubbing.Answer; public class DistinctAggregatingResultIteratorTest { - private final static byte[] cf = Bytes.toBytes("cf"); - private final static byte[] cq1 = Bytes.toBytes("cq1"); - private final static byte[] cq2 = Bytes.toBytes("cq2"); - private final static byte[] cq3 = Bytes.toBytes("cq3"); - private final static byte[] rowKey1 = Bytes.toBytes("rowKey1"); - private final static byte[] rowKey2 = Bytes.toBytes("rowKey2"); - private final static byte[] rowKey3 = Bytes.toBytes("rowKey3"); - private final static byte[] rowKey4 = Bytes.toBytes("rowKey4"); - private final static byte[] rowKey5 = Bytes.toBytes("rowKey4"); - - @Test - public void testDistinctAggregatingResultIterator() throws Throwable { - //Test with duplicate - Tuple[] input1 = new Tuple[] { - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), - new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), - new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey3, cf, cq1, PInteger.INSTANCE.toBytes(4)), - new KeyValue(rowKey3, cf, cq2, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), - new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey4, cf, cq1, PInteger.INSTANCE.toBytes(7)), - new KeyValue(rowKey4, cf, cq2, PInteger.INSTANCE.toBytes(8)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), - new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey5, cf, cq1, PInteger.INSTANCE.toBytes(90)), - new KeyValue(rowKey5, cf, cq2, PInteger.INSTANCE.toBytes(100)))), - null - - }; - - Tuple[] result1 = new Tuple[] { - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), - new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), - new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey3, cf, cq1, PInteger.INSTANCE.toBytes(4)), - new KeyValue(rowKey3, cf, cq2, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey4, cf, cq1, PInteger.INSTANCE.toBytes(7)), - new KeyValue(rowKey4, cf, cq2, PInteger.INSTANCE.toBytes(8)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey5, cf, cq1, PInteger.INSTANCE.toBytes(90)), - new KeyValue(rowKey5, cf, cq2, PInteger.INSTANCE.toBytes(100)))) - - }; - RowProjector mockRowProjector = Mockito.mock(RowProjector.class); - Mockito.when(mockRowProjector.getColumnCount()).thenReturn(2); - - KeyValueColumnExpression columnExpression1 = new KeyValueColumnExpression(cf, cq1); - KeyValueColumnExpression columnExpression2 = new KeyValueColumnExpression(cf, cq2); - final ColumnProjector mockColumnProjector1 = Mockito.mock(ColumnProjector.class); - Mockito.when(mockColumnProjector1.getExpression()).thenReturn(columnExpression1); - final ColumnProjector mockColumnProjector2 = Mockito.mock(ColumnProjector.class); - Mockito.when(mockColumnProjector2.getExpression()).thenReturn(columnExpression2); - - Mockito.when(mockRowProjector.getColumnProjectors()).thenAnswer( - new Answer >() { - @Override - public List answer(InvocationOnMock invocation) throws Throwable { - return Arrays.asList(mockColumnProjector1,mockColumnProjector2); - } - }); - - assertResults( - input1, result1, mockRowProjector); - - //Test with duplicate and null - Tuple[] input2 = new Tuple[] { - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), - new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), - new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey3, cf, cq1, PInteger.INSTANCE.toBytes(4)), - new KeyValue(rowKey3, cf, cq2, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(1)), - new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey4, cf, cq1, PInteger.INSTANCE.toBytes(7)), - new KeyValue(rowKey4, cf, cq2, PInteger.INSTANCE.toBytes(8)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), - new KeyValue(rowKey2, cf, cq3, PInteger.INSTANCE.toBytes(12)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(1)), - new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey5, cf, cq1, PInteger.INSTANCE.toBytes(90)), - new KeyValue(rowKey5, cf, cq2, PInteger.INSTANCE.toBytes(100)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), - new KeyValue(rowKey2, cf, cq3, PInteger.INSTANCE.toBytes(12)))), - - null - - }; - - Tuple[] result2 = new Tuple[] { - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), - new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), - new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey3, cf, cq1, PInteger.INSTANCE.toBytes(4)), - new KeyValue(rowKey3, cf, cq2, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(1)), - new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(2)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey4, cf, cq1, PInteger.INSTANCE.toBytes(7)), - new KeyValue(rowKey4, cf, cq2, PInteger.INSTANCE.toBytes(8)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), - new KeyValue(rowKey2, cf, cq3, PInteger.INSTANCE.toBytes(12)))), - - new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey5, cf, cq1, PInteger.INSTANCE.toBytes(90)), - new KeyValue(rowKey5, cf, cq2, PInteger.INSTANCE.toBytes(100)))) - - }; - assertResults( - input2, result2, mockRowProjector); - - //Test with no duplicate - int n = 100; - Tuple[] input3 = new Tuple[n + 1]; - for(int i = 0; i <= n; i++) { - byte[] rowKey = PInteger.INSTANCE.toBytes(i); - input3[i] = new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey, cf, cq1, PInteger.INSTANCE.toBytes(i + 1)), - new KeyValue(rowKey, cf, cq2, PInteger.INSTANCE.toBytes(i + 2)))); + private final static byte[] cf = Bytes.toBytes("cf"); + private final static byte[] cq1 = Bytes.toBytes("cq1"); + private final static byte[] cq2 = Bytes.toBytes("cq2"); + private final static byte[] cq3 = Bytes.toBytes("cq3"); + private final static byte[] rowKey1 = Bytes.toBytes("rowKey1"); + private final static byte[] rowKey2 = Bytes.toBytes("rowKey2"); + private final static byte[] rowKey3 = Bytes.toBytes("rowKey3"); + private final static byte[] rowKey4 = Bytes.toBytes("rowKey4"); + private final static byte[] rowKey5 = Bytes.toBytes("rowKey4"); + + @Test + public void testDistinctAggregatingResultIterator() throws Throwable { + // Test with duplicate + Tuple[] input1 = new Tuple[] { + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), + new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), + new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey3, cf, cq1, PInteger.INSTANCE.toBytes(4)), + new KeyValue(rowKey3, cf, cq2, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), + new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey4, cf, cq1, PInteger.INSTANCE.toBytes(7)), + new KeyValue(rowKey4, cf, cq2, PInteger.INSTANCE.toBytes(8)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), + new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey5, cf, cq1, PInteger.INSTANCE.toBytes(90)), + new KeyValue(rowKey5, cf, cq2, PInteger.INSTANCE.toBytes(100)))), + null + + }; + + Tuple[] result1 = new Tuple[] { + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), + new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), + new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey3, cf, cq1, PInteger.INSTANCE.toBytes(4)), + new KeyValue(rowKey3, cf, cq2, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey4, cf, cq1, PInteger.INSTANCE.toBytes(7)), + new KeyValue(rowKey4, cf, cq2, PInteger.INSTANCE.toBytes(8)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey5, cf, cq1, PInteger.INSTANCE.toBytes(90)), + new KeyValue(rowKey5, cf, cq2, PInteger.INSTANCE.toBytes(100)))) + + }; + RowProjector mockRowProjector = Mockito.mock(RowProjector.class); + Mockito.when(mockRowProjector.getColumnCount()).thenReturn(2); + + KeyValueColumnExpression columnExpression1 = new KeyValueColumnExpression(cf, cq1); + KeyValueColumnExpression columnExpression2 = new KeyValueColumnExpression(cf, cq2); + final ColumnProjector mockColumnProjector1 = Mockito.mock(ColumnProjector.class); + Mockito.when(mockColumnProjector1.getExpression()).thenReturn(columnExpression1); + final ColumnProjector mockColumnProjector2 = Mockito.mock(ColumnProjector.class); + Mockito.when(mockColumnProjector2.getExpression()).thenReturn(columnExpression2); + + Mockito.when(mockRowProjector.getColumnProjectors()) + .thenAnswer(new Answer>() { + @Override + public List answer(InvocationOnMock invocation) throws Throwable { + return Arrays.asList(mockColumnProjector1, mockColumnProjector2); } - input3[n] = null; - Tuple[] result3 = Arrays.copyOfRange(input3, 0, n); - assertResults( - input3, result3, mockRowProjector); - - //Test with all duplicate - Tuple[] input4 = new Tuple[n + 1]; - for(int i = 0; i <= n; i++) { - byte[] rowKey = PInteger.INSTANCE.toBytes(1); - input4[i] = new MultiKeyValueTuple( - Arrays. asList( - new KeyValue(rowKey, cf, cq1, PInteger.INSTANCE.toBytes(2)), - new KeyValue(rowKey, cf, cq2, PInteger.INSTANCE.toBytes(3)))); - } - input4[n] = null; - Tuple[] result4 = new Tuple[] {input4[0]}; - assertResults( - input4, result4, mockRowProjector); + }); - } + assertResults(input1, result1, mockRowProjector); + + // Test with duplicate and null + Tuple[] input2 = new Tuple[] { + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), + new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), + new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey3, cf, cq1, PInteger.INSTANCE.toBytes(4)), + new KeyValue(rowKey3, cf, cq2, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(1)), + new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey4, cf, cq1, PInteger.INSTANCE.toBytes(7)), + new KeyValue(rowKey4, cf, cq2, PInteger.INSTANCE.toBytes(8)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), + new KeyValue(rowKey2, cf, cq3, PInteger.INSTANCE.toBytes(12)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(1)), + new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey5, cf, cq1, PInteger.INSTANCE.toBytes(90)), + new KeyValue(rowKey5, cf, cq2, PInteger.INSTANCE.toBytes(100)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), + new KeyValue(rowKey2, cf, cq3, PInteger.INSTANCE.toBytes(12)))), - private void assertResults(Tuple[] input, Tuple[] result, RowProjector rowProjector) throws Exception { - AggregatingResultIterator mockAggregatingResultIterator = - Mockito.mock(AggregatingResultIterator.class); - Mockito.when(mockAggregatingResultIterator.next()).thenReturn( - input[0], Arrays.copyOfRange(input, 1, input.length)); + null - DistinctAggregatingResultIterator distinctAggregatingResultIterator = - new DistinctAggregatingResultIterator(mockAggregatingResultIterator, rowProjector); - AssertResults.assertResults( - distinctAggregatingResultIterator, result); + }; + + Tuple[] result2 = new Tuple[] { + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(1)), + new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), + new KeyValue(rowKey2, cf, cq2, PInteger.INSTANCE.toBytes(12)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey3, cf, cq1, PInteger.INSTANCE.toBytes(4)), + new KeyValue(rowKey3, cf, cq2, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey1, cf, cq2, PInteger.INSTANCE.toBytes(1)), + new KeyValue(rowKey1, cf, cq1, PInteger.INSTANCE.toBytes(2)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey4, cf, cq1, PInteger.INSTANCE.toBytes(7)), + new KeyValue(rowKey4, cf, cq2, PInteger.INSTANCE.toBytes(8)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey2, cf, cq1, PInteger.INSTANCE.toBytes(11)), + new KeyValue(rowKey2, cf, cq3, PInteger.INSTANCE.toBytes(12)))), + + new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey5, cf, cq1, PInteger.INSTANCE.toBytes(90)), + new KeyValue(rowKey5, cf, cq2, PInteger.INSTANCE.toBytes(100)))) + + }; + assertResults(input2, result2, mockRowProjector); + + // Test with no duplicate + int n = 100; + Tuple[] input3 = new Tuple[n + 1]; + for (int i = 0; i <= n; i++) { + byte[] rowKey = PInteger.INSTANCE.toBytes(i); + input3[i] = new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey, cf, cq1, PInteger.INSTANCE.toBytes(i + 1)), + new KeyValue(rowKey, cf, cq2, PInteger.INSTANCE.toBytes(i + 2)))); + } + input3[n] = null; + Tuple[] result3 = Arrays.copyOfRange(input3, 0, n); + assertResults(input3, result3, mockRowProjector); + + // Test with all duplicate + Tuple[] input4 = new Tuple[n + 1]; + for (int i = 0; i <= n; i++) { + byte[] rowKey = PInteger.INSTANCE.toBytes(1); + input4[i] = new MultiKeyValueTuple( + Arrays. asList(new KeyValue(rowKey, cf, cq1, PInteger.INSTANCE.toBytes(2)), + new KeyValue(rowKey, cf, cq2, PInteger.INSTANCE.toBytes(3)))); } + input4[n] = null; + Tuple[] result4 = new Tuple[] { input4[0] }; + assertResults(input4, result4, mockRowProjector); + + } + + private void assertResults(Tuple[] input, Tuple[] result, RowProjector rowProjector) + throws Exception { + AggregatingResultIterator mockAggregatingResultIterator = + Mockito.mock(AggregatingResultIterator.class); + Mockito.when(mockAggregatingResultIterator.next()).thenReturn(input[0], + Arrays.copyOfRange(input, 1, input.length)); + + DistinctAggregatingResultIterator distinctAggregatingResultIterator = + new DistinctAggregatingResultIterator(mockAggregatingResultIterator, rowProjector); + AssertResults.assertResults(distinctAggregatingResultIterator, result); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/MaterializedResultIterators.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/MaterializedResultIterators.java index 6efc2119b8f..961c4fe01ef 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/MaterializedResultIterators.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/MaterializedResultIterators.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,52 +22,49 @@ import java.util.List; import org.apache.hadoop.hbase.client.Scan; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.query.KeyRange; /** - * * ResultIteraors implementation backed by in-memory list of PeekingResultIterator - * */ public class MaterializedResultIterators implements ResultIterators { - private final List results; + private final List results; - public MaterializedResultIterators(List results) { - this.results = results; - } + public MaterializedResultIterators(List results) { + this.results = results; + } - @Override - public List getIterators() throws SQLException { - return results; - } + @Override + public List getIterators() throws SQLException { + return results; + } - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } - @Override - public int size() { - return results.size(); - } + @Override + public int size() { + return results.size(); + } - @Override - public void explain(List planSteps) { - } + @Override + public void explain(List planSteps) { + } - @Override - public List getSplits() { - return Collections.emptyList(); - } + @Override + public List getSplits() { + return Collections.emptyList(); + } - @Override - public List> getScans() { - return Collections.emptyList(); - } + @Override + public List> getScans() { + return Collections.emptyList(); + } - @Override - public void close() throws SQLException { - } -} \ No newline at end of file + @Override + public void close() throws SQLException { + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/MergeSortResultIteratorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/MergeSortResultIteratorTest.java index 387940553ee..ade2811d5a5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/MergeSortResultIteratorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/MergeSortResultIteratorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,8 +29,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.compile.ExplainPlanAttributes - .ExplainPlanAttributesBuilder; +import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.tuple.SingleKeyValueTuple; import org.apache.phoenix.schema.tuple.Tuple; @@ -38,181 +37,171 @@ import org.junit.Test; public class MergeSortResultIteratorTest { - private final static byte[] A = Bytes.toBytes("a"); - private final static byte[] B = Bytes.toBytes("b"); - - @Test - public void testMergeSort() throws Throwable { - Tuple[] results1 = - new Tuple[] { new SingleKeyValueTuple( - new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; - Tuple[] results2 = - new Tuple[] { new SingleKeyValueTuple( - new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))) }; - Tuple[] results3 = - new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), }; - - final List results = - new ArrayList(Arrays.asList(new PeekingResultIterator[] { - new MaterializedResultIterator(Arrays.asList(results1)), - new MaterializedResultIterator(Arrays.asList(results2)), - new MaterializedResultIterator(Arrays.asList(results3)) })); - - Tuple[] expectedResults = - new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), }; - - ResultIterators iterators = new ResultIterators() { - - @Override - public List getIterators() throws SQLException { - return results; - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - - @Override - public int size() { - return results.size(); - } - - @Override - public void explain(List planSteps) { - } - - @Override - public List getSplits() { - return Collections.emptyList(); - } - - @Override - public List> getScans() { - return Collections.emptyList(); - } - - @Override - public void close() throws SQLException { - } - }; - ResultIterators reverseIterators = new ResultIterators() { - - @Override - public List getIterators() throws SQLException { - return results; - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - - @Override - public int size() { - return results.size(); - } - - @Override - public void explain(List planSteps) { - } - - @Override - public List getSplits() { - return Collections.emptyList(); - } - - @Override - public List> getScans() { - return Collections.emptyList(); - } - - @Override - public void close() throws SQLException { - } - }; - ResultIterator scanner = new MergeSortRowKeyResultIterator(iterators); - AssertResults.assertResults(scanner, expectedResults); - } - - @Test - public void testReverseMergeSort() throws Throwable { - Tuple[] results1 = - new Tuple[] { new SingleKeyValueTuple( - new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; - Tuple[] results2 = - new Tuple[] { new SingleKeyValueTuple( - new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))) }; - Tuple[] results3 = - new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), }; - final List results = - new ArrayList(Arrays.asList(new PeekingResultIterator[] { - new MaterializedResultIterator(Arrays.asList(results1)), - new MaterializedResultIterator(Arrays.asList(results2)), - new MaterializedResultIterator(Arrays.asList(results3)) })); - Tuple[] expectedResults = - new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, - Bytes.toBytes(1))), }; - ResultIterators iterators = new ResultIterators() { - - @Override - public List getIterators() throws SQLException { - return results; - } - - @Override - public void explain(List planSteps, - ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { - } - - @Override - public int size() { - return results.size(); - } - - @Override - public void explain(List planSteps) { - } - - @Override - public List getSplits() { - return Collections.emptyList(); - } - - @Override - public List> getScans() { - return Collections.emptyList(); - } - - @Override - public void close() throws SQLException { - } - }; - ResultIterator scanner = new MergeSortRowKeyResultIterator(iterators, 0, true); - AssertResults.assertResults(scanner, expectedResults); - } + private final static byte[] A = Bytes.toBytes("a"); + private final static byte[] B = Bytes.toBytes("b"); + + @Test + public void testMergeSort() throws Throwable { + Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; + Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))) }; + Tuple[] results3 = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; + + final List results = new ArrayList(Arrays + .asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), + new MaterializedResultIterator(Arrays.asList(results2)), + new MaterializedResultIterator(Arrays.asList(results3)) })); + + Tuple[] expectedResults = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; + + ResultIterators iterators = new ResultIterators() { + + @Override + public List getIterators() throws SQLException { + return results; + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } + + @Override + public int size() { + return results.size(); + } + + @Override + public void explain(List planSteps) { + } + + @Override + public List getSplits() { + return Collections.emptyList(); + } + + @Override + public List> getScans() { + return Collections.emptyList(); + } + + @Override + public void close() throws SQLException { + } + }; + ResultIterators reverseIterators = new ResultIterators() { + + @Override + public List getIterators() throws SQLException { + return results; + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } + + @Override + public int size() { + return results.size(); + } + + @Override + public void explain(List planSteps) { + } + + @Override + public List getSplits() { + return Collections.emptyList(); + } + + @Override + public List> getScans() { + return Collections.emptyList(); + } + + @Override + public void close() throws SQLException { + } + }; + ResultIterator scanner = new MergeSortRowKeyResultIterator(iterators); + AssertResults.assertResults(scanner, expectedResults); + } + + @Test + public void testReverseMergeSort() throws Throwable { + Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; + Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))) }; + Tuple[] results3 = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; + final List results = new ArrayList(Arrays + .asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), + new MaterializedResultIterator(Arrays.asList(results2)), + new MaterializedResultIterator(Arrays.asList(results3)) })); + Tuple[] expectedResults = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; + ResultIterators iterators = new ResultIterators() { + + @Override + public List getIterators() throws SQLException { + return results; + } + + @Override + public void explain(List planSteps, + ExplainPlanAttributesBuilder explainPlanAttributesBuilder) { + } + + @Override + public int size() { + return results.size(); + } + + @Override + public void explain(List planSteps) { + } + + @Override + public List getSplits() { + return Collections.emptyList(); + } + + @Override + public List> getScans() { + return Collections.emptyList(); + } + + @Override + public void close() throws SQLException { + } + }; + ResultIterator scanner = new MergeSortRowKeyResultIterator(iterators, 0, true); + AssertResults.assertResults(scanner, expectedResults); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/OrderedResultIteratorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/OrderedResultIteratorTest.java index f3e30376ba3..14f4771a5d7 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/OrderedResultIteratorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/OrderedResultIteratorTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,52 +43,52 @@ public class OrderedResultIteratorTest { @Test public void testNullIteratorOnClose() throws SQLException { - ResultIterator delegate = ResultIterator.EMPTY_ITERATOR; - List orderByExpressions = Collections.singletonList(null); - int thresholdBytes = Integer.MAX_VALUE; - boolean spoolingEnabled = true; - OrderedResultIterator iterator = - new OrderedResultIterator(delegate, orderByExpressions, spoolingEnabled, - thresholdBytes); - // Should not throw an exception - iterator.close(); - } - - @Test - public void testSpoolingBackwardCompatibility() { - RegionScanner s = Mockito.mock(RegionScanner.class); - RegionInfo regionInfo = Mockito.mock(RegionInfo.class); - Mockito.when(s.getRegionInfo()).thenReturn(regionInfo); - Scan scan = new Scan(); - Expression exp = LiteralExpression.newConstant(Boolean.TRUE); - OrderByExpression ex = OrderByExpression.createByCheckIfOrderByReverse(exp, false, false, false); - ScanPlan.serializeScanRegionObserverIntoScan(scan, 0, Arrays.asList(ex), 100); - // Check 5.1.0 & Check > 5.1.0 - ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("5.1.0")); - NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); + ResultIterator delegate = ResultIterator.EMPTY_ITERATOR; + List orderByExpressions = Collections.singletonList(null); + int thresholdBytes = Integer.MAX_VALUE; + boolean spoolingEnabled = true; + OrderedResultIterator iterator = + new OrderedResultIterator(delegate, orderByExpressions, spoolingEnabled, thresholdBytes); + // Should not throw an exception + iterator.close(); + } - ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("5.2.0")); - NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); - // Check 4.15.0 Check > 4.15.0 - ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("4.15.0")); - NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); - ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("4.15.1")); - NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); + @Test + public void testSpoolingBackwardCompatibility() { + RegionScanner s = Mockito.mock(RegionScanner.class); + RegionInfo regionInfo = Mockito.mock(RegionInfo.class); + Mockito.when(s.getRegionInfo()).thenReturn(regionInfo); + Scan scan = new Scan(); + Expression exp = LiteralExpression.newConstant(Boolean.TRUE); + OrderByExpression ex = + OrderByExpression.createByCheckIfOrderByReverse(exp, false, false, false); + ScanPlan.serializeScanRegionObserverIntoScan(scan, 0, Arrays.asList(ex), 100); + // Check 5.1.0 & Check > 5.1.0 + ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("5.1.0")); + NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); - // Check < 5.1 - ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("5.0.0")); - try { - NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); - fail("Deserialize should fail for 5.0.0 since we didn't serialize thresholdBytes"); - } catch (IllegalArgumentException e) { - } - // Check < 4.15 - ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("4.14.0")); - try { - NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); - fail("Deserialize should fail for 4.14.0 since we didn't serialize thresholdBytes"); - } catch (IllegalArgumentException e) { - } + ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("5.2.0")); + NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); + // Check 4.15.0 Check > 4.15.0 + ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("4.15.0")); + NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); + ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("4.15.1")); + NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); + // Check < 5.1 + ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("5.0.0")); + try { + NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); + fail("Deserialize should fail for 5.0.0 since we didn't serialize thresholdBytes"); + } catch (IllegalArgumentException e) { } + // Check < 4.15 + ScanUtil.setClientVersion(scan, VersionUtil.encodeVersion("4.14.0")); + try { + NonAggregateRegionScannerFactory.deserializeFromScan(scan, s, false, 100); + fail("Deserialize should fail for 4.14.0 since we didn't serialize thresholdBytes"); + } catch (IllegalArgumentException e) { + } + + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIteratorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIteratorTest.java index 347de78ca3c..d21e68f2f87 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIteratorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/RowKeyOrderedAggregateResultIteratorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,115 +35,136 @@ import org.junit.Test; public class RowKeyOrderedAggregateResultIteratorTest extends BaseConnectionlessQueryTest { - private final static byte[] A = Bytes.toBytes("a"); - private final static byte[] B = Bytes.toBytes("b"); - private final static byte[] C = Bytes.toBytes("c"); - private final static byte[] D = Bytes.toBytes("d"); - - @Test - public void testNoSpan() throws Exception { - Tuple[] results1 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), - }; - Tuple[] results2 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))) - }; - Tuple[] results3 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), - new SingleKeyValueTuple(new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), - }; - final Listresults = Arrays.asList(new PeekingResultIterator[] {new MaterializedResultIterator(Arrays.asList(results1)), new MaterializedResultIterator(Arrays.asList(results2)), new MaterializedResultIterator(Arrays.asList(results3))}); - ResultIterators iterators = new MaterializedResultIterators(results); - - Tuple[] expectedResults = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))), - new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), - new SingleKeyValueTuple(new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), - }; - - ClientAggregators aggregators = TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - ResultIterator scanner = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); - AssertResults.assertResults(scanner, expectedResults); - } - - @Test - public void testSpanThree() throws Exception { - Tuple[] results1 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))) - }; - Tuple[] results2 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))) - }; - Tuple[] results3 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), - new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(5L))), - }; - final Listresults = Arrays.asList(new PeekingResultIterator[] {new MaterializedResultIterator(Arrays.asList(results1)), new MaterializedResultIterator(Arrays.asList(results2)), new MaterializedResultIterator(Arrays.asList(results3))}); - ResultIterators iterators = new MaterializedResultIterators(results); - - Tuple[] expectedResults = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(9L))), - new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(5L))), - }; - - ClientAggregators aggregators = TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - ResultIterator scanner = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); - AssertResults.assertResults(scanner, expectedResults); - } - - @Test - public void testSpanAll() throws Exception { - Tuple[] results1 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))) - }; - Tuple[] results2 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))) - }; - Tuple[] results3 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), - }; - final Listresults = Arrays.asList(new PeekingResultIterator[] {new MaterializedResultIterator(Arrays.asList(results1)), new MaterializedResultIterator(Arrays.asList(results2)), new MaterializedResultIterator(Arrays.asList(results3))}); - ResultIterators iterators = new MaterializedResultIterators(results); - - Tuple[] expectedResults = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(9L))), - }; - - ClientAggregators aggregators = TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - ResultIterator scanner = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); - AssertResults.assertResults(scanner, expectedResults); - } - - @Test - public void testSpanEnd() throws Exception { - Tuple[] results1 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), - }; - Tuple[] results2 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), - new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), - new SingleKeyValueTuple(new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(5L))), - }; - Tuple[] results3 = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(6L))), - }; - final Listresults = Arrays.asList(new PeekingResultIterator[] {new MaterializedResultIterator(Arrays.asList(results1)), new MaterializedResultIterator(Arrays.asList(results2)), new MaterializedResultIterator(Arrays.asList(results3))}); - ResultIterators iterators = new MaterializedResultIterators(results); - - Tuple[] expectedResults = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), - new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), - new SingleKeyValueTuple(new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(11L))), - }; - - ClientAggregators aggregators = TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - ResultIterator scanner = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); - AssertResults.assertResults(scanner, expectedResults); - } + private final static byte[] A = Bytes.toBytes("a"); + private final static byte[] B = Bytes.toBytes("b"); + private final static byte[] C = Bytes.toBytes("c"); + private final static byte[] D = Bytes.toBytes("d"); + + @Test + public void testNoSpan() throws Exception { + Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), }; + Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))) }; + Tuple[] results3 = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), + new SingleKeyValueTuple( + new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), }; + final List results = Arrays + .asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), + new MaterializedResultIterator(Arrays.asList(results2)), + new MaterializedResultIterator(Arrays.asList(results3)) }); + ResultIterators iterators = new MaterializedResultIterators(results); + + Tuple[] expectedResults = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))), + new SingleKeyValueTuple( + new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), + new SingleKeyValueTuple( + new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), }; + + ClientAggregators aggregators = + TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + ResultIterator scanner = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); + AssertResults.assertResults(scanner, expectedResults); + } + + @Test + public void testSpanThree() throws Exception { + Tuple[] results1 = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))) }; + Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))) }; + Tuple[] results3 = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), + new SingleKeyValueTuple( + new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(5L))), }; + final List results = Arrays + .asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), + new MaterializedResultIterator(Arrays.asList(results2)), + new MaterializedResultIterator(Arrays.asList(results3)) }); + ResultIterators iterators = new MaterializedResultIterators(results); + + Tuple[] expectedResults = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(9L))), + new SingleKeyValueTuple( + new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(5L))), }; + + ClientAggregators aggregators = + TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + ResultIterator scanner = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); + AssertResults.assertResults(scanner, expectedResults); + } + + @Test + public void testSpanAll() throws Exception { + Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))) }; + Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))) }; + Tuple[] results3 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), }; + final List results = Arrays + .asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), + new MaterializedResultIterator(Arrays.asList(results2)), + new MaterializedResultIterator(Arrays.asList(results3)) }); + ResultIterators iterators = new MaterializedResultIterators(results); + + Tuple[] expectedResults = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(9L))), }; + + ClientAggregators aggregators = + TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + ResultIterator scanner = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); + AssertResults.assertResults(scanner, expectedResults); + } + + @Test + public void testSpanEnd() throws Exception { + Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), }; + Tuple[] results2 = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), + new SingleKeyValueTuple( + new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), + new SingleKeyValueTuple( + new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(5L))), }; + Tuple[] results3 = new Tuple[] { new SingleKeyValueTuple( + new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(6L))), }; + final List results = Arrays + .asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), + new MaterializedResultIterator(Arrays.asList(results2)), + new MaterializedResultIterator(Arrays.asList(results3)) }); + ResultIterators iterators = new MaterializedResultIterators(results); + + Tuple[] expectedResults = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), + new SingleKeyValueTuple( + new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))), + new SingleKeyValueTuple( + new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(11L))), }; + + ClientAggregators aggregators = + TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + ResultIterator scanner = new RowKeyOrderedAggregateResultIterator(iterators, aggregators); + AssertResults.assertResults(scanner, expectedResults); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/SpoolingResultIteratorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/SpoolingResultIteratorTest.java index 0e5684cda0b..75cc7698287 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/SpoolingResultIteratorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/SpoolingResultIteratorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,43 +37,43 @@ import org.apache.phoenix.util.AssertResults; import org.junit.Test; - - public class SpoolingResultIteratorTest { - private final static byte[] A = Bytes.toBytes("a"); - private final static byte[] B = Bytes.toBytes("b"); + private final static byte[] A = Bytes.toBytes("a"); + private final static byte[] B = Bytes.toBytes("b"); + + private void testSpooling(int threshold, long maxSizeSpool) throws Throwable { + Tuple[] results = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; + PeekingResultIterator iterator = new MaterializedResultIterator(Arrays.asList(results)); - private void testSpooling(int threshold, long maxSizeSpool) throws Throwable { - Tuple[] results = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), - }; - PeekingResultIterator iterator = new MaterializedResultIterator(Arrays.asList(results)); + Tuple[] expectedResults = new Tuple[] { + new SingleKeyValueTuple( + new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), + new SingleKeyValueTuple( + new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), }; - Tuple[] expectedResults = new Tuple[] { - new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), - new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), - }; + MemoryManager memoryManager = new DelegatingMemoryManager(new GlobalMemoryManager(threshold)); + ResultIterator scanner = new SpoolingResultIterator(SpoolingMetricsHolder.NO_OP_INSTANCE, + new MemoryMetricsHolder(new ReadMetricQueue(false, LogLevel.OFF), ""), iterator, + memoryManager, threshold, maxSizeSpool, "/tmp"); + AssertResults.assertResults(scanner, expectedResults); + } - MemoryManager memoryManager = new DelegatingMemoryManager(new GlobalMemoryManager(threshold)); - ResultIterator scanner = new SpoolingResultIterator( - SpoolingMetricsHolder.NO_OP_INSTANCE, - new MemoryMetricsHolder(new ReadMetricQueue(false,LogLevel.OFF), ""), iterator, memoryManager, threshold, - maxSizeSpool, "/tmp"); - AssertResults.assertResults(scanner, expectedResults); - } + @Test + public void testInMemorySpooling() throws Throwable { + testSpooling(1024 * 1024, QueryServicesOptions.DEFAULT_MAX_SPOOL_TO_DISK_BYTES); + } - @Test - public void testInMemorySpooling() throws Throwable { - testSpooling(1024*1024, QueryServicesOptions.DEFAULT_MAX_SPOOL_TO_DISK_BYTES); - } - @Test - public void testOnDiskSpooling() throws Throwable { - testSpooling(1, QueryServicesOptions.DEFAULT_MAX_SPOOL_TO_DISK_BYTES); - } + @Test + public void testOnDiskSpooling() throws Throwable { + testSpooling(1, QueryServicesOptions.DEFAULT_MAX_SPOOL_TO_DISK_BYTES); + } - @Test(expected = SpoolTooBigToDiskException.class) - public void testFailToSpool() throws Throwable{ - testSpooling(1, 0L); - } + @Test(expected = SpoolTooBigToDiskException.class) + public void testFailToSpool() throws Throwable { + testSpooling(1, 0L); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/TestingMapReduceParallelScanGrouper.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/TestingMapReduceParallelScanGrouper.java index 27e7c67e0a8..2df4eff6ea5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/TestingMapReduceParallelScanGrouper.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/TestingMapReduceParallelScanGrouper.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,48 +17,48 @@ */ package org.apache.phoenix.iterate; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.phoenix.compile.StatementContext; import java.sql.SQLException; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.phoenix.compile.StatementContext; + /** * ParallelScanGrouper implementation used for testing Phoenix-MapReduce Integration */ public class TestingMapReduceParallelScanGrouper extends MapReduceParallelScanGrouper { - private static final AtomicInteger numCallsToGetRegionBoundaries = new AtomicInteger(0); - private static final TestingMapReduceParallelScanGrouper INSTANCE = - new TestingMapReduceParallelScanGrouper(); + private static final AtomicInteger numCallsToGetRegionBoundaries = new AtomicInteger(0); + private static final TestingMapReduceParallelScanGrouper INSTANCE = + new TestingMapReduceParallelScanGrouper(); - public static TestingMapReduceParallelScanGrouper getInstance() { - return INSTANCE; - } + public static TestingMapReduceParallelScanGrouper getInstance() { + return INSTANCE; + } - @Override - public List getRegionBoundaries(StatementContext context, - byte[] tableName) throws SQLException { - List regionLocations = super.getRegionBoundaries(context, tableName); - numCallsToGetRegionBoundaries.incrementAndGet(); - return regionLocations; - } + @Override + public List getRegionBoundaries(StatementContext context, byte[] tableName) + throws SQLException { + List regionLocations = super.getRegionBoundaries(context, tableName); + numCallsToGetRegionBoundaries.incrementAndGet(); + return regionLocations; + } - @Override - public List getRegionBoundaries(StatementContext context, byte[] tableName, - byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) throws SQLException { - List regionLocations = - super.getRegionBoundaries(context, tableName, startRegionBoundaryKey, - stopRegionBoundaryKey); - numCallsToGetRegionBoundaries.incrementAndGet(); - return regionLocations; - } + @Override + public List getRegionBoundaries(StatementContext context, byte[] tableName, + byte[] startRegionBoundaryKey, byte[] stopRegionBoundaryKey) throws SQLException { + List regionLocations = + super.getRegionBoundaries(context, tableName, startRegionBoundaryKey, stopRegionBoundaryKey); + numCallsToGetRegionBoundaries.incrementAndGet(); + return regionLocations; + } - public static int getNumCallsToGetRegionBoundaries() { - return numCallsToGetRegionBoundaries.get(); - } + public static int getNumCallsToGetRegionBoundaries() { + return numCallsToGetRegionBoundaries.get(); + } - public static void clearNumCallsToGetRegionBoundaries() { - numCallsToGetRegionBoundaries.set(0); - } + public static void clearNumCallsToGetRegionBoundaries() { + numCallsToGetRegionBoundaries.set(0); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ClusterRoleRecordGeneratorToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ClusterRoleRecordGeneratorToolTest.java index f05467cb4f4..982a31ce70d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ClusterRoleRecordGeneratorToolTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ClusterRoleRecordGeneratorToolTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,7 +32,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.junit.Before; import org.junit.Rule; @@ -43,64 +42,64 @@ /** * Unit test for {@link ClusterRoleRecordGeneratorTool}. - * * @see ClusterRoleRecordGeneratorToolIT */ public class ClusterRoleRecordGeneratorToolTest { - private static final Logger LOG = LoggerFactory.getLogger(ClusterRoleRecordGeneratorToolTest.class); + private static final Logger LOG = + LoggerFactory.getLogger(ClusterRoleRecordGeneratorToolTest.class); - private final Configuration conf = HBaseConfiguration.create(); - private final ClusterRoleRecordGeneratorTool generator = new ClusterRoleRecordGeneratorTool(); + private final Configuration conf = HBaseConfiguration.create(); + private final ClusterRoleRecordGeneratorTool generator = new ClusterRoleRecordGeneratorTool(); - @Rule - public final TestName testName = new TestName(); + @Rule + public final TestName testName = new TestName(); - @Before - public void before() { - generator.setConf(conf); - } + @Before + public void before() { + generator.setConf(conf); + } - @Test - public void testGetPeerClusterKey() throws Exception { - String peerZk = "localhost:2188:/hbase"; - ReplicationPeerConfig replicationConfig = mock(ReplicationPeerConfig.class); - when(replicationConfig.getClusterKey()).thenReturn(peerZk); - Admin admin = mock(Admin.class); + @Test + public void testGetPeerClusterKey() throws Exception { + String peerZk = "localhost:2188:/hbase"; + ReplicationPeerConfig replicationConfig = mock(ReplicationPeerConfig.class); + when(replicationConfig.getClusterKey()).thenReturn(peerZk); + Admin admin = mock(Admin.class); - String id = PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT; - when(admin.getReplicationPeerConfig(eq(id))).thenReturn(replicationConfig); - assertEquals(peerZk, ClusterRoleRecordGeneratorTool.getPeerClusterKey(admin, id)); + String id = PHOENIX_HA_GROUP_STORE_PEER_ID_DEFAULT; + when(admin.getReplicationPeerConfig(eq(id))).thenReturn(replicationConfig); + assertEquals(peerZk, ClusterRoleRecordGeneratorTool.getPeerClusterKey(admin, id)); - id = "1984"; - when(admin.getReplicationPeerConfig(eq(id))).thenReturn(replicationConfig); - assertEquals(peerZk, ClusterRoleRecordGeneratorTool.getPeerClusterKey(admin, id)); - } + id = "1984"; + when(admin.getReplicationPeerConfig(eq(id))).thenReturn(replicationConfig); + assertEquals(peerZk, ClusterRoleRecordGeneratorTool.getPeerClusterKey(admin, id)); + } - @Test - public void testGetHaPolicy() throws IOException { - String haGroupName = testName.getMethodName(); - // default HA policy is PARALLEL used for 1P - assertEquals(HighAvailabilityPolicy.PARALLEL, generator.getHaPolicy(haGroupName)); + @Test + public void testGetHaPolicy() throws IOException { + String haGroupName = testName.getMethodName(); + // default HA policy is PARALLEL used for 1P + assertEquals(HighAvailabilityPolicy.PARALLEL, generator.getHaPolicy(haGroupName)); - // return explicit HA policy set in the config - conf.set(String.format(PHOENIX_HA_GROUP_POLICY_ATTR_FORMAT, haGroupName), - HighAvailabilityPolicy.FAILOVER.name()); - assertEquals(HighAvailabilityPolicy.FAILOVER, generator.getHaPolicy(haGroupName)); + // return explicit HA policy set in the config + conf.set(String.format(PHOENIX_HA_GROUP_POLICY_ATTR_FORMAT, haGroupName), + HighAvailabilityPolicy.FAILOVER.name()); + assertEquals(HighAvailabilityPolicy.FAILOVER, generator.getHaPolicy(haGroupName)); - // other HA group still has default HA policy - String haGroupName2 = haGroupName + 2; - assertEquals(HighAvailabilityPolicy.PARALLEL, generator.getHaPolicy(haGroupName2)); + // other HA group still has default HA policy + String haGroupName2 = haGroupName + 2; + assertEquals(HighAvailabilityPolicy.PARALLEL, generator.getHaPolicy(haGroupName2)); - // invalid HA policy name - String invalidHaPolicy = "foobar"; - conf.set(String.format(PHOENIX_HA_GROUP_POLICY_ATTR_FORMAT, haGroupName), invalidHaPolicy); - try { - generator.getHaPolicy(haGroupName); - fail("Should have failed since no such HA policy named " + invalidHaPolicy); - } catch (IOException e) { - LOG.info("Got expected exception for invalid HA policy name {}", invalidHaPolicy, e); - assertNotNull(e.getCause()); - assertTrue(e.getCause() instanceof IllegalArgumentException); - } + // invalid HA policy name + String invalidHaPolicy = "foobar"; + conf.set(String.format(PHOENIX_HA_GROUP_POLICY_ATTR_FORMAT, haGroupName), invalidHaPolicy); + try { + generator.getHaPolicy(haGroupName); + fail("Should have failed since no such HA policy named " + invalidHaPolicy); + } catch (IOException e) { + LOG.info("Got expected exception for invalid HA policy name {}", invalidHaPolicy, e); + assertNotNull(e.getCause()); + assertTrue(e.getCause() instanceof IllegalArgumentException); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ClusterRoleRecordTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ClusterRoleRecordTest.java index 3c2001205d9..dc64f197425 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ClusterRoleRecordTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ClusterRoleRecordTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,7 +29,6 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.phoenix.jdbc.ClusterRoleRecord.ClusterRole; -import org.apache.phoenix.util.JDBCUtil; import org.apache.phoenix.util.JacksonUtil; import org.junit.Rule; import org.junit.Test; @@ -41,211 +40,156 @@ * Unit test for {@link ClusterRoleRecord}. */ public class ClusterRoleRecordTest { - private static final Logger LOG = LoggerFactory.getLogger(ClusterRoleRecordTest.class); - private static final String ZK1 = "zk1-1\\:2181,zk1-2\\:2181::/hbase"; - private static final String ZK2 = "zk2-1\\:2181,zk2-2\\:2181::/hbase"; - - @Rule - public final TestName testName = new TestName(); - - /** - * Helper method to create a temp JSON file with the given array of cluster role records. - */ - public static String createJsonFileWithRecords(ClusterRoleRecord... records) - throws IOException { - File file = File.createTempFile("phoenix.ha.cluster.role.records", ".test.json"); - file.deleteOnExit(); - JacksonUtil.getObjectWriterPretty().writeValue(file, records); - LOG.info("Prepared the JSON file for testing, file:{}, content:\n{}", file, - FileUtils.readFileToString(file, "UTF-8")); - return file.getPath(); + private static final Logger LOG = LoggerFactory.getLogger(ClusterRoleRecordTest.class); + private static final String ZK1 = "zk1-1\\:2181,zk1-2\\:2181::/hbase"; + private static final String ZK2 = "zk2-1\\:2181,zk2-2\\:2181::/hbase"; + + @Rule + public final TestName testName = new TestName(); + + /** + * Helper method to create a temp JSON file with the given array of cluster role records. + */ + public static String createJsonFileWithRecords(ClusterRoleRecord... records) throws IOException { + File file = File.createTempFile("phoenix.ha.cluster.role.records", ".test.json"); + file.deleteOnExit(); + JacksonUtil.getObjectWriterPretty().writeValue(file, records); + LOG.info("Prepared the JSON file for testing, file:{}, content:\n{}", file, + FileUtils.readFileToString(file, "UTF-8")); + return file.getPath(); + } + + @Test + public void testReadWriteJsonToFile() throws IOException { + ClusterRoleRecord record = new ClusterRoleRecord(testName.getMethodName(), + HighAvailabilityPolicy.FAILOVER, ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + String fileName = createJsonFileWithRecords(record); + String fileContent = FileUtils.readFileToString(new File(fileName), "UTF-8"); + assertTrue(fileContent.contains(record.getHaGroupName())); + } + + @Test + public void testToAndFromJson() throws IOException { + ClusterRoleRecord record = new ClusterRoleRecord(testName.getMethodName(), + HighAvailabilityPolicy.FAILOVER, ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + byte[] bytes = ClusterRoleRecord.toJson(record); + Optional record2 = ClusterRoleRecord.fromJson(bytes); + assertTrue(record2.isPresent()); + assertEquals(record, record2.get()); + } + + @Test + public void testGetActiveUrl() { + String haGroupName = testName.getMethodName(); + { + ClusterRoleRecord record = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 0); + assertTrue(record.getActiveUrl().isPresent()); + assertEquals(ZK1, record.getActiveUrl().get()); } - - @Test - public void testReadWriteJsonToFile() throws IOException { - ClusterRoleRecord record = new ClusterRoleRecord( - testName.getMethodName(), HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2, ClusterRole.STANDBY, - 1); - String fileName = createJsonFileWithRecords(record); - String fileContent = FileUtils.readFileToString(new File(fileName), "UTF-8"); - assertTrue(fileContent.contains(record.getHaGroupName())); - } - - @Test - public void testToAndFromJson() throws IOException { - ClusterRoleRecord record = new ClusterRoleRecord( - testName.getMethodName(), HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2, ClusterRole.STANDBY, - 1); - byte[] bytes = ClusterRoleRecord.toJson(record); - Optional record2 = ClusterRoleRecord.fromJson(bytes); - assertTrue(record2.isPresent()); - assertEquals(record, record2.get()); - } - - @Test - public void testGetActiveUrl() { - String haGroupName = testName.getMethodName(); - { - ClusterRoleRecord record = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2, ClusterRole.STANDBY, - 0); - assertTrue(record.getActiveUrl().isPresent()); - assertEquals(ZK1, record.getActiveUrl().get()); - } - { - ClusterRoleRecord record = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.STANDBY, - ZK2, ClusterRole.STANDBY, - 0); - assertFalse(record.getActiveUrl().isPresent()); - } - } - - @Test - public void testIsNewerThan() { - String haGroupName = testName.getMethodName(); - ClusterRoleRecord recordV0 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.STANDBY, - ZK2 , ClusterRole.STANDBY, - 0); - ClusterRoleRecord recordV1 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.STANDBY, - ZK2 , ClusterRole.STANDBY, - 2); - assertTrue(recordV1.isNewerThan(recordV0)); // v1 is indeed newer - assertFalse(recordV1.isNewerThan(recordV1)); // irreflexive - assertFalse(recordV0.isNewerThan(recordV1)); // antisymmetry - - // Create a new cluster role record for a new HA group name. - // Cluster role records for different HA groups can not compare in reality, - // so they are not newer than each other. - String haGroupName2 = haGroupName + RandomStringUtils.randomAlphabetic(2); - ClusterRoleRecord record2 = new ClusterRoleRecord( - haGroupName2, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.STANDBY, - ZK2 , ClusterRole.STANDBY, - 1); - assertFalse(recordV0.isNewerThan(record2)); - assertFalse(recordV1.isNewerThan(record2)); - assertFalse(record2.isNewerThan(recordV0)); - assertFalse(record2.isNewerThan(recordV1)); - } - - @Test - public void testHasSameInfo() { - String haGroupName = testName.getMethodName(); - ClusterRoleRecord recordV0 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2 , ClusterRole.STANDBY, - 0); - ClusterRoleRecord recordV1 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2 , ClusterRole.STANDBY, - 1); - assertTrue(recordV1.hasSameInfo(recordV0)); - assertTrue(recordV1.hasSameInfo(recordV1)); - assertTrue(recordV0.hasSameInfo(recordV1)); - } - - @Test - public void testHasSameInfoDifferentZKOrder() { - String haGroupName = testName.getMethodName(); - ClusterRoleRecord recordV0 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK2, ClusterRole.ACTIVE, - ZK1 , ClusterRole.STANDBY, - 0); - ClusterRoleRecord recordV1 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2 , ClusterRole.STANDBY, - 1); - assertTrue(recordV1.hasSameInfo(recordV0)); - assertTrue(recordV1.hasSameInfo(recordV1)); - assertTrue(recordV0.hasSameInfo(recordV1)); - } - - @Test - public void testHasSameInfoDifferentHostOrder() { - String hostzk1ordered = "zk1-1,zk1-2:2181:/hbase"; - String hostzk1unordered = "zk1-2,zk1-1:2181:/hbase"; - String haGroupName = testName.getMethodName(); - ClusterRoleRecord recordV0 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK2, ClusterRole.ACTIVE, - hostzk1ordered , ClusterRole.STANDBY, - 0); - ClusterRoleRecord recordV1 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - hostzk1unordered, ClusterRole.ACTIVE, - ZK2 , ClusterRole.STANDBY, - 1); - assertTrue(recordV1.hasSameInfo(recordV0)); - assertTrue(recordV1.hasSameInfo(recordV1)); - assertTrue(recordV0.hasSameInfo(recordV1)); - } - - @Test - public void testHasSameInfoNegative() { - String haGroupName = testName.getMethodName(); - ClusterRoleRecord record = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.PARALLEL, - ZK1, ClusterRole.ACTIVE, - ZK2 , ClusterRole.STANDBY, - 0); - - ClusterRoleRecord recordFailover = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2 , ClusterRole.STANDBY, - 1); - assertFalse(record.hasSameInfo(recordFailover)); - assertFalse(recordFailover.hasSameInfo(record)); - - String haGroupName2 = haGroupName + RandomStringUtils.randomAlphabetic(2); - ClusterRoleRecord record2 = new ClusterRoleRecord( - haGroupName2, HighAvailabilityPolicy.PARALLEL, - ZK1, ClusterRole.ACTIVE, - ZK2 , ClusterRole.STANDBY, - 1); - assertFalse(record.hasSameInfo(record2)); - assertFalse(record2.hasSameInfo(record)); - } - - @Test - public void testGetRole() { - ClusterRoleRecord record = new ClusterRoleRecord( - testName.getMethodName(), HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2 , ClusterRole.STANDBY, - 0); - assertEquals(ClusterRole.ACTIVE, record.getRole(ZK1)); - assertEquals(ClusterRole.ACTIVE, record.getRole(record.getZk1())); - assertEquals(ClusterRole.STANDBY, record.getRole(record.getZk2())); - assertEquals(ClusterRole.UNKNOWN, record.getRole(null)); - assertEquals(ClusterRole.UNKNOWN, record.getRole("foo")); - } - - @Test - public void testToPrettyString() { - ClusterRoleRecord record = new ClusterRoleRecord( - testName.getMethodName(), HighAvailabilityPolicy.PARALLEL, - ZK1, ClusterRole.ACTIVE, - ZK2, ClusterRole.STANDBY, - 1); - LOG.info("toString(): {}", record.toString()); - LOG.info("toPrettyString:\n{}", record.toPrettyString()); - assertNotEquals(record.toString(), record.toPrettyString()); + { + ClusterRoleRecord record = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.STANDBY, ZK2, ClusterRole.STANDBY, 0); + assertFalse(record.getActiveUrl().isPresent()); } + } + + @Test + public void testIsNewerThan() { + String haGroupName = testName.getMethodName(); + ClusterRoleRecord recordV0 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.STANDBY, ZK2, ClusterRole.STANDBY, 0); + ClusterRoleRecord recordV1 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.STANDBY, ZK2, ClusterRole.STANDBY, 2); + assertTrue(recordV1.isNewerThan(recordV0)); // v1 is indeed newer + assertFalse(recordV1.isNewerThan(recordV1)); // irreflexive + assertFalse(recordV0.isNewerThan(recordV1)); // antisymmetry + + // Create a new cluster role record for a new HA group name. + // Cluster role records for different HA groups can not compare in reality, + // so they are not newer than each other. + String haGroupName2 = haGroupName + RandomStringUtils.randomAlphabetic(2); + ClusterRoleRecord record2 = new ClusterRoleRecord(haGroupName2, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.STANDBY, ZK2, ClusterRole.STANDBY, 1); + assertFalse(recordV0.isNewerThan(record2)); + assertFalse(recordV1.isNewerThan(record2)); + assertFalse(record2.isNewerThan(recordV0)); + assertFalse(record2.isNewerThan(recordV1)); + } + + @Test + public void testHasSameInfo() { + String haGroupName = testName.getMethodName(); + ClusterRoleRecord recordV0 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 0); + ClusterRoleRecord recordV1 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + assertTrue(recordV1.hasSameInfo(recordV0)); + assertTrue(recordV1.hasSameInfo(recordV1)); + assertTrue(recordV0.hasSameInfo(recordV1)); + } + + @Test + public void testHasSameInfoDifferentZKOrder() { + String haGroupName = testName.getMethodName(); + ClusterRoleRecord recordV0 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK2, ClusterRole.ACTIVE, ZK1, ClusterRole.STANDBY, 0); + ClusterRoleRecord recordV1 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + assertTrue(recordV1.hasSameInfo(recordV0)); + assertTrue(recordV1.hasSameInfo(recordV1)); + assertTrue(recordV0.hasSameInfo(recordV1)); + } + + @Test + public void testHasSameInfoDifferentHostOrder() { + String hostzk1ordered = "zk1-1,zk1-2:2181:/hbase"; + String hostzk1unordered = "zk1-2,zk1-1:2181:/hbase"; + String haGroupName = testName.getMethodName(); + ClusterRoleRecord recordV0 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK2, ClusterRole.ACTIVE, hostzk1ordered, ClusterRole.STANDBY, 0); + ClusterRoleRecord recordV1 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + hostzk1unordered, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + assertTrue(recordV1.hasSameInfo(recordV0)); + assertTrue(recordV1.hasSameInfo(recordV1)); + assertTrue(recordV0.hasSameInfo(recordV1)); + } + + @Test + public void testHasSameInfoNegative() { + String haGroupName = testName.getMethodName(); + ClusterRoleRecord record = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.PARALLEL, + ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 0); + + ClusterRoleRecord recordFailover = new ClusterRoleRecord(haGroupName, + HighAvailabilityPolicy.FAILOVER, ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + assertFalse(record.hasSameInfo(recordFailover)); + assertFalse(recordFailover.hasSameInfo(record)); + + String haGroupName2 = haGroupName + RandomStringUtils.randomAlphabetic(2); + ClusterRoleRecord record2 = new ClusterRoleRecord(haGroupName2, HighAvailabilityPolicy.PARALLEL, + ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + assertFalse(record.hasSameInfo(record2)); + assertFalse(record2.hasSameInfo(record)); + } + + @Test + public void testGetRole() { + ClusterRoleRecord record = new ClusterRoleRecord(testName.getMethodName(), + HighAvailabilityPolicy.FAILOVER, ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 0); + assertEquals(ClusterRole.ACTIVE, record.getRole(ZK1)); + assertEquals(ClusterRole.ACTIVE, record.getRole(record.getZk1())); + assertEquals(ClusterRole.STANDBY, record.getRole(record.getZk2())); + assertEquals(ClusterRole.UNKNOWN, record.getRole(null)); + assertEquals(ClusterRole.UNKNOWN, record.getRole("foo")); + } + + @Test + public void testToPrettyString() { + ClusterRoleRecord record = new ClusterRoleRecord(testName.getMethodName(), + HighAvailabilityPolicy.PARALLEL, ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + LOG.info("toString(): {}", record.toString()); + LOG.info("toPrettyString:\n{}", record.toPrettyString()); + assertNotEquals(record.toString(), record.toPrettyString()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/FailoverPhoenixConnectionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/FailoverPhoenixConnectionTest.java index 8ee3d028bdd..e403d13b78f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/FailoverPhoenixConnectionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/FailoverPhoenixConnectionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,6 @@ package org.apache.phoenix.jdbc; import static org.apache.phoenix.exception.SQLExceptionCode.CLASS_NOT_UNWRAPPABLE; -import static org.apache.phoenix.exception.SQLExceptionCode.HA_CLOSED_AFTER_FAILOVER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; @@ -35,6 +34,9 @@ import java.util.Properties; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.phoenix.exception.FailoverSQLException; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.jdbc.HighAvailabilityGroup.HAGroupInfo; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; @@ -42,179 +44,176 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.exception.FailoverSQLException; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.jdbc.HighAvailabilityGroup.HAGroupInfo; - /** * Unit test for {@link FailoverPhoenixConnection}. - * * @see FailoverPhoenixConnectionIT */ public class FailoverPhoenixConnectionTest { - private static final Logger LOG = LoggerFactory.getLogger(FailoverPhoenixConnectionTest.class); - - @Mock PhoenixConnection connection1; - @Mock PhoenixConnection connection2; - @Mock HighAvailabilityGroup haGroup; - - final HAGroupInfo haGroupInfo = new HAGroupInfo("fake", "zk1", "zk2"); - FailoverPhoenixConnection failoverConnection; // this connection itself is not mocked or spied. - - @Before - public void init() throws SQLException { - MockitoAnnotations.initMocks(this); - when(haGroup.getGroupInfo()).thenReturn(haGroupInfo); - when(haGroup.connectActive(any(Properties.class))).thenReturn(connection1); - - failoverConnection = new FailoverPhoenixConnection(haGroup, new Properties()); - } - - /** - * Test helper method {@link FailoverPhoenixConnection#wrapActionDuringFailover}. - */ - @Test - public void testWrapActionDuringFailover() throws SQLException { - // Test SupplierWithSQLException which returns a value - String str = "Hello, World!"; - assertEquals(str, failoverConnection.wrapActionDuringFailover(() -> str)); - - // Test RunWithSQLException which does not return value - final AtomicInteger counter = new AtomicInteger(0); - failoverConnection.wrapActionDuringFailover(counter::incrementAndGet); - assertEquals(1, counter.get()); - } - - /** - * Test that after calling failover(), the old connection got closed with FailoverSQLException, - * and a new Phoenix connection is opened. - */ - @Test - public void testFailover() throws SQLException { - // Make HAGroup return a different phoenix connection when it gets called next time - when(haGroup.connectActive(any(Properties.class))).thenReturn(connection2); - - // explicit call failover - failoverConnection.failover(1000L); - - // The old connection should have been closed due to failover - verify(connection1, times(1)).close(any(FailoverSQLException.class)); - // A new Phoenix connection is wrapped underneath - assertEquals(connection2, failoverConnection.getWrappedConnection()); - } - - /** - * Test static {@link FailoverPhoenixConnection#failover(Connection, long)} method. - */ - @Test - public void testFailoverStatic() throws SQLException { - try { - FailoverPhoenixConnection.failover(connection1, 1000L); - fail("Should have failed since plain phoenix connection can not failover!"); - } catch (SQLException e) { - assertEquals(CLASS_NOT_UNWRAPPABLE.getErrorCode(),e.getErrorCode()); - LOG.info("Got expected exception when trying to failover on non-HA connection", e); - } - - FailoverPhoenixConnection.failover(failoverConnection, 1000L); - // The old connection should have been closed due to failover - verify(connection1, times(1)).close(any(FailoverSQLException.class)); - } - - /** - * Test that failover() is no-op when it is already pointing to active cluster. - */ - @Test - public void testActiveFailoverIsNoOp() throws SQLException { - when(haGroup.isActive(connection1)).thenReturn(true); - // Make HAGroup return a different phoenix connection when it gets called next time - when(haGroup.connectActive(any(Properties.class))).thenReturn(connection2); - - failoverConnection.failover(1000L); - - // The wrapped phoenix connection is not closed since it is already connecting to ACTIVE - verify(connection1, never()).close(any(FailoverSQLException.class)); - assertEquals(connection1, failoverConnection.getWrappedConnection()); - } - - /** - * Test that with {@link FailoverPolicy.FailoverToActivePolicy}, automatic failover happens. - */ - @Test - public void testFailoverToActivePolicy() throws SQLException { - Properties properties = new Properties(); - properties.setProperty(FailoverPolicy.PHOENIX_HA_FAILOVER_POLICY_ATTR, - FailoverPolicy.FailoverToActivePolicy.NAME); - failoverConnection = new FailoverPhoenixConnection(haGroup, properties); - - LOG.info("Close the wrapped phoenix connection due to failover..."); - // Make HAGroup return a different phoenix connection when it gets called next time - when(haGroup.connectActive(any(Properties.class))).thenReturn(connection2); - // Mimic wrapped phoenix connection gets closed by HA group - doThrow(new FailoverSQLException("", "", new Exception())).when(connection1).commit(); - - // During this operation, internal failover should have happened automatically - failoverConnection.commit(); - - verify(connection1, times(1)).close(any(SQLException.class)); - assertEquals(connection2, failoverConnection.getWrappedConnection()); - } - - /** - * Test that failover() will fail once the connection has been closed. - */ - @Test - public void testConnectionClosed() throws SQLException { - failoverConnection.close(); - - try { - failoverConnection.failover(1000L); - fail("failover should have failed after failover connection is closed!"); - } catch (SQLException e) { - LOG.info("Got expected exception", e); - assertEquals(SQLExceptionCode.CONNECTION_CLOSED.getErrorCode(), e.getErrorCode()); - } - - // Assert that no connection has been doubly closed - verify(connection1, never()).close(any(FailoverSQLException.class)); - verify(connection2, never()).close(any(FailoverSQLException.class)); + private static final Logger LOG = LoggerFactory.getLogger(FailoverPhoenixConnectionTest.class); + + @Mock + PhoenixConnection connection1; + @Mock + PhoenixConnection connection2; + @Mock + HighAvailabilityGroup haGroup; + + final HAGroupInfo haGroupInfo = new HAGroupInfo("fake", "zk1", "zk2"); + FailoverPhoenixConnection failoverConnection; // this connection itself is not mocked or spied. + + @Before + public void init() throws SQLException { + MockitoAnnotations.initMocks(this); + when(haGroup.getGroupInfo()).thenReturn(haGroupInfo); + when(haGroup.connectActive(any(Properties.class))).thenReturn(connection1); + + failoverConnection = new FailoverPhoenixConnection(haGroup, new Properties()); + } + + /** + * Test helper method {@link FailoverPhoenixConnection#wrapActionDuringFailover}. + */ + @Test + public void testWrapActionDuringFailover() throws SQLException { + // Test SupplierWithSQLException which returns a value + String str = "Hello, World!"; + assertEquals(str, failoverConnection.wrapActionDuringFailover(() -> str)); + + // Test RunWithSQLException which does not return value + final AtomicInteger counter = new AtomicInteger(0); + failoverConnection.wrapActionDuringFailover(counter::incrementAndGet); + assertEquals(1, counter.get()); + } + + /** + * Test that after calling failover(), the old connection got closed with FailoverSQLException, + * and a new Phoenix connection is opened. + */ + @Test + public void testFailover() throws SQLException { + // Make HAGroup return a different phoenix connection when it gets called next time + when(haGroup.connectActive(any(Properties.class))).thenReturn(connection2); + + // explicit call failover + failoverConnection.failover(1000L); + + // The old connection should have been closed due to failover + verify(connection1, times(1)).close(any(FailoverSQLException.class)); + // A new Phoenix connection is wrapped underneath + assertEquals(connection2, failoverConnection.getWrappedConnection()); + } + + /** + * Test static {@link FailoverPhoenixConnection#failover(Connection, long)} method. + */ + @Test + public void testFailoverStatic() throws SQLException { + try { + FailoverPhoenixConnection.failover(connection1, 1000L); + fail("Should have failed since plain phoenix connection can not failover!"); + } catch (SQLException e) { + assertEquals(CLASS_NOT_UNWRAPPABLE.getErrorCode(), e.getErrorCode()); + LOG.info("Got expected exception when trying to failover on non-HA connection", e); } - /** - * Test that closing a closed failover connection is a no-op. - */ - @Test - public void testCloseOnceMore() throws SQLException { - failoverConnection.close(); - assertTrue(failoverConnection.isClosed()); - // connection got closed but not due to failover - verify(connection1, times(1)).close(); - verify(connection1, never()).close(any(SQLException.class)); - - // close connection once more - failoverConnection.close(); - verify(connection1, times(1)).close(); - verify(connection1, never()).close(any(SQLException.class)); + FailoverPhoenixConnection.failover(failoverConnection, 1000L); + // The old connection should have been closed due to failover + verify(connection1, times(1)).close(any(FailoverSQLException.class)); + } + + /** + * Test that failover() is no-op when it is already pointing to active cluster. + */ + @Test + public void testActiveFailoverIsNoOp() throws SQLException { + when(haGroup.isActive(connection1)).thenReturn(true); + // Make HAGroup return a different phoenix connection when it gets called next time + when(haGroup.connectActive(any(Properties.class))).thenReturn(connection2); + + failoverConnection.failover(1000L); + + // The wrapped phoenix connection is not closed since it is already connecting to ACTIVE + verify(connection1, never()).close(any(FailoverSQLException.class)); + assertEquals(connection1, failoverConnection.getWrappedConnection()); + } + + /** + * Test that with {@link FailoverPolicy.FailoverToActivePolicy}, automatic failover happens. + */ + @Test + public void testFailoverToActivePolicy() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(FailoverPolicy.PHOENIX_HA_FAILOVER_POLICY_ATTR, + FailoverPolicy.FailoverToActivePolicy.NAME); + failoverConnection = new FailoverPhoenixConnection(haGroup, properties); + + LOG.info("Close the wrapped phoenix connection due to failover..."); + // Make HAGroup return a different phoenix connection when it gets called next time + when(haGroup.connectActive(any(Properties.class))).thenReturn(connection2); + // Mimic wrapped phoenix connection gets closed by HA group + doThrow(new FailoverSQLException("", "", new Exception())).when(connection1).commit(); + + // During this operation, internal failover should have happened automatically + failoverConnection.commit(); + + verify(connection1, times(1)).close(any(SQLException.class)); + assertEquals(connection2, failoverConnection.getWrappedConnection()); + } + + /** + * Test that failover() will fail once the connection has been closed. + */ + @Test + public void testConnectionClosed() throws SQLException { + failoverConnection.close(); + + try { + failoverConnection.failover(1000L); + fail("failover should have failed after failover connection is closed!"); + } catch (SQLException e) { + LOG.info("Got expected exception", e); + assertEquals(SQLExceptionCode.CONNECTION_CLOSED.getErrorCode(), e.getErrorCode()); } - /** - * Test that when HA group fails to create a connection, the failover connection will report - * back the connection establishing error instead of NullPointerException or other ones. - */ - @Test - public void testCheckConnection() throws SQLException { - // Make the wrapped phoenix connection null. This could happen if HAGroup is failing. - when(haGroup.connectActive(any(Properties.class))).thenReturn(null); - failoverConnection = new FailoverPhoenixConnection(haGroup, new Properties()); - assertNull(failoverConnection.getWrappedConnection()); - - try { - failoverConnection.commit(); - fail("Should have failed because the wrapped phoenix connection is null"); - } catch (SQLException e) { - LOG.info("Got expected exception", e); - assertEquals(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION.getErrorCode(), - e.getErrorCode()); - } + // Assert that no connection has been doubly closed + verify(connection1, never()).close(any(FailoverSQLException.class)); + verify(connection2, never()).close(any(FailoverSQLException.class)); + } + + /** + * Test that closing a closed failover connection is a no-op. + */ + @Test + public void testCloseOnceMore() throws SQLException { + failoverConnection.close(); + assertTrue(failoverConnection.isClosed()); + // connection got closed but not due to failover + verify(connection1, times(1)).close(); + verify(connection1, never()).close(any(SQLException.class)); + + // close connection once more + failoverConnection.close(); + verify(connection1, times(1)).close(); + verify(connection1, never()).close(any(SQLException.class)); + } + + /** + * Test that when HA group fails to create a connection, the failover connection will report back + * the connection establishing error instead of NullPointerException or other ones. + */ + @Test + public void testCheckConnection() throws SQLException { + // Make the wrapped phoenix connection null. This could happen if HAGroup is failing. + when(haGroup.connectActive(any(Properties.class))).thenReturn(null); + failoverConnection = new FailoverPhoenixConnection(haGroup, new Properties()); + assertNull(failoverConnection.getWrappedConnection()); + + try { + failoverConnection.commit(); + fail("Should have failed because the wrapped phoenix connection is null"); + } catch (SQLException e) { + LOG.info("Got expected exception", e); + assertEquals(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION.getErrorCode(), e.getErrorCode()); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixConnectionFailureTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixConnectionFailureTest.java index 51d89728bfb..e7dc204a2f6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixConnectionFailureTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixConnectionFailureTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,66 +32,62 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.phoenix.query.BaseTest; -import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.util.PhoenixRuntime; import org.junit.Test; import org.mockito.Mockito; import org.mockito.stubbing.Answer; /** - * Test to make sure once an error is encountered on an underlying phoenix connection - * we don't use that connection during the entire lifecycle of client conenction + * Test to make sure once an error is encountered on an underlying phoenix connection we don't use + * that connection during the entire lifecycle of client conenction */ public class ParallelPhoenixConnectionFailureTest extends BaseTest { - private static String url = - JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS; + private static String url = + JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS; - private static int WAIT_MS = 30000; + private static int WAIT_MS = 30000; - @Test - public void testExecuteQueryChainFailure() throws SQLException { - HBaseTestingUtility hbaseTestingUtility = new HBaseTestingUtility(); - Properties props = new Properties(); - PhoenixConnection conn1 = (PhoenixConnection) DriverManager.getConnection(url, props); - PhoenixConnection conn2 = (PhoenixConnection) DriverManager.getConnection(url, props); - PhoenixConnection connSpy1 = Mockito.spy(conn1); - PhoenixConnection connSpy2 = Mockito.spy(conn2); - AtomicInteger numStatementsCreatedOnConn1 = new AtomicInteger(); - AtomicInteger numStatementsCreatedOnConn2 = new AtomicInteger(); - Answer answer1 = (i -> { - numStatementsCreatedOnConn1.getAndIncrement(); - return conn1.createStatement(); - }); - Answer answer2 = (i -> { - numStatementsCreatedOnConn2.getAndIncrement(); - return conn2.createStatement(); - }); - doAnswer(answer1).when(connSpy1).createStatement(); - doAnswer(answer2).when(connSpy2).createStatement(); - ParallelPhoenixContext context = - new ParallelPhoenixContext(new Properties(), - Mockito.mock(HighAvailabilityGroup.class), - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), - null); - ParallelPhoenixConnection parallelConn = - new ParallelPhoenixConnection(context, CompletableFuture.completedFuture(connSpy1), - CompletableFuture.completedFuture(connSpy2)); - parallelConn.createStatement().execute("SELECT * FROM SYSTEM.CATALOG"); - parallelConn.createStatement().execute("SELECT * FROM SYSTEM.CATALOG"); - // Verify successful execution on both connections - hbaseTestingUtility.waitFor(WAIT_MS, () -> (numStatementsCreatedOnConn1.get() == 2) - && (numStatementsCreatedOnConn2.get() == 2)); - // Error on conn1, we shouldn't use conn1 after that - doThrow(new SQLException()).when(connSpy1).createStatement(); - parallelConn.createStatement().execute("SELECT * FROM SYSTEM.CATALOG"); - hbaseTestingUtility.waitFor(WAIT_MS, () -> numStatementsCreatedOnConn2.get() == 3); - doAnswer(answer1).when(connSpy1).createStatement(); - // Should still have a successful execution only from conn2 since conn1 errored before - parallelConn.createStatement().execute("SELECT * FROM SYSTEM.CATALOG"); - hbaseTestingUtility.waitFor(WAIT_MS, () -> (numStatementsCreatedOnConn1.get() == 2) - && (numStatementsCreatedOnConn2.get() == 4)); - // Any task that we chain on conn1 should error out - assertTrue(context.chainOnConn1(() -> Boolean.TRUE).isCompletedExceptionally()); - } + @Test + public void testExecuteQueryChainFailure() throws SQLException { + HBaseTestingUtility hbaseTestingUtility = new HBaseTestingUtility(); + Properties props = new Properties(); + PhoenixConnection conn1 = (PhoenixConnection) DriverManager.getConnection(url, props); + PhoenixConnection conn2 = (PhoenixConnection) DriverManager.getConnection(url, props); + PhoenixConnection connSpy1 = Mockito.spy(conn1); + PhoenixConnection connSpy2 = Mockito.spy(conn2); + AtomicInteger numStatementsCreatedOnConn1 = new AtomicInteger(); + AtomicInteger numStatementsCreatedOnConn2 = new AtomicInteger(); + Answer answer1 = (i -> { + numStatementsCreatedOnConn1.getAndIncrement(); + return conn1.createStatement(); + }); + Answer answer2 = (i -> { + numStatementsCreatedOnConn2.getAndIncrement(); + return conn2.createStatement(); + }); + doAnswer(answer1).when(connSpy1).createStatement(); + doAnswer(answer2).when(connSpy2).createStatement(); + ParallelPhoenixContext context = + new ParallelPhoenixContext(new Properties(), Mockito.mock(HighAvailabilityGroup.class), + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + ParallelPhoenixConnection parallelConn = new ParallelPhoenixConnection(context, + CompletableFuture.completedFuture(connSpy1), CompletableFuture.completedFuture(connSpy2)); + parallelConn.createStatement().execute("SELECT * FROM SYSTEM.CATALOG"); + parallelConn.createStatement().execute("SELECT * FROM SYSTEM.CATALOG"); + // Verify successful execution on both connections + hbaseTestingUtility.waitFor(WAIT_MS, + () -> (numStatementsCreatedOnConn1.get() == 2) && (numStatementsCreatedOnConn2.get() == 2)); + // Error on conn1, we shouldn't use conn1 after that + doThrow(new SQLException()).when(connSpy1).createStatement(); + parallelConn.createStatement().execute("SELECT * FROM SYSTEM.CATALOG"); + hbaseTestingUtility.waitFor(WAIT_MS, () -> numStatementsCreatedOnConn2.get() == 3); + doAnswer(answer1).when(connSpy1).createStatement(); + // Should still have a successful execution only from conn2 since conn1 errored before + parallelConn.createStatement().execute("SELECT * FROM SYSTEM.CATALOG"); + hbaseTestingUtility.waitFor(WAIT_MS, + () -> (numStatementsCreatedOnConn1.get() == 2) && (numStatementsCreatedOnConn2.get() == 4)); + // Any task that we chain on conn1 should error out + assertTrue(context.chainOnConn1(() -> Boolean.TRUE).isCompletedExceptionally()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixConnectionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixConnectionTest.java index cdc8c2ddb5e..136e72e74f2 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixConnectionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixConnectionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,394 +18,396 @@ package org.apache.phoenix.jdbc; import static org.apache.hadoop.test.GenericTestUtils.waitFor; - -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.monitoring.GlobalClientMetrics; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; -import org.mockito.exceptions.verification.WantedButNotInvoked; -import org.mockito.invocation.InvocationOnMock; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.sql.SQLException; import java.sql.SQLWarning; +import java.util.Properties; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.CountDownLatch; import java.util.function.Supplier; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.monitoring.GlobalClientMetrics; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.exceptions.verification.WantedButNotInvoked; +import org.mockito.invocation.InvocationOnMock; public class ParallelPhoenixConnectionTest { - ParallelPhoenixContext context; - - ParallelPhoenixConnection parallelPhoenixConnection; - PhoenixConnection connection1 = Mockito.mock(PhoenixConnection.class); - PhoenixConnection connection2 = Mockito.mock(PhoenixConnection.class); - - @Before - public void init() throws SQLException { - context = new ParallelPhoenixContext(new Properties(), Mockito.mock(HighAvailabilityGroup.class), - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); - parallelPhoenixConnection = new ParallelPhoenixConnection(context,CompletableFuture.completedFuture(connection1),CompletableFuture.completedFuture(connection2)); - } - - @Test - public void getWarningsBothWarnTest() throws Exception { - SQLWarning warning1 = new SQLWarning("warning1"); - SQLWarning warning2 = new SQLWarning("warning2"); - Mockito.when(connection1.getWarnings()).thenReturn(warning1); - Mockito.when(connection2.getWarnings()).thenReturn(warning2); - - SQLWarning result = parallelPhoenixConnection.getWarnings(); - assertEquals(warning1,result.getNextWarning()); - assertEquals(warning2,result.getNextWarning().getNextWarning()); - } - - @Test - public void getWarnings1WarnTest() throws Exception { - SQLWarning warning2 = new SQLWarning("warning2"); - Mockito.when(connection1.getWarnings()).thenReturn(null); - Mockito.when(connection2.getWarnings()).thenReturn(warning2); - - SQLWarning result = parallelPhoenixConnection.getWarnings(); - assertEquals(warning2,result); + ParallelPhoenixContext context; + + ParallelPhoenixConnection parallelPhoenixConnection; + PhoenixConnection connection1 = Mockito.mock(PhoenixConnection.class); + PhoenixConnection connection2 = Mockito.mock(PhoenixConnection.class); + + @Before + public void init() throws SQLException { + context = + new ParallelPhoenixContext(new Properties(), Mockito.mock(HighAvailabilityGroup.class), + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + parallelPhoenixConnection = + new ParallelPhoenixConnection(context, CompletableFuture.completedFuture(connection1), + CompletableFuture.completedFuture(connection2)); + } + + @Test + public void getWarningsBothWarnTest() throws Exception { + SQLWarning warning1 = new SQLWarning("warning1"); + SQLWarning warning2 = new SQLWarning("warning2"); + Mockito.when(connection1.getWarnings()).thenReturn(warning1); + Mockito.when(connection2.getWarnings()).thenReturn(warning2); + + SQLWarning result = parallelPhoenixConnection.getWarnings(); + assertEquals(warning1, result.getNextWarning()); + assertEquals(warning2, result.getNextWarning().getNextWarning()); + } + + @Test + public void getWarnings1WarnTest() throws Exception { + SQLWarning warning2 = new SQLWarning("warning2"); + Mockito.when(connection1.getWarnings()).thenReturn(null); + Mockito.when(connection2.getWarnings()).thenReturn(warning2); + + SQLWarning result = parallelPhoenixConnection.getWarnings(); + assertEquals(warning2, result); + } + + @Test + public void getWarnings0WarnTest() throws Exception { + Mockito.when(connection1.getWarnings()).thenReturn(null); + Mockito.when(connection2.getWarnings()).thenReturn(null); + + SQLWarning result = parallelPhoenixConnection.getWarnings(); + assertNull(result); + } + + @Test + public void isWrapperForPhoenixConnectionFalseTest() throws SQLException { + boolean result = parallelPhoenixConnection.isWrapperFor(PhoenixConnection.class); + assertFalse(result); + } + + @Test + public void isWrapperForPhoenixMonitoredConnectionTrueTest() throws SQLException { + boolean result = parallelPhoenixConnection.isWrapperFor(PhoenixMonitoredConnection.class); + assertTrue(result); + } + + @Test + public void unwrapPhoenixConnectionFailsTest() { + try { + parallelPhoenixConnection.unwrap(PhoenixConnection.class); + } catch (SQLException e) { + assertEquals(e.getErrorCode(), SQLExceptionCode.CLASS_NOT_UNWRAPPABLE.getErrorCode()); } - - @Test - public void getWarnings0WarnTest() throws Exception { - Mockito.when(connection1.getWarnings()).thenReturn(null); - Mockito.when(connection2.getWarnings()).thenReturn(null); - - SQLWarning result = parallelPhoenixConnection.getWarnings(); - assertNull(result); + } + + @Test + public void unwrapPhoenixMonitoredConnectionTest() throws SQLException { + PhoenixMonitoredConnection result = + parallelPhoenixConnection.unwrap(PhoenixMonitoredConnection.class); + assertEquals(parallelPhoenixConnection, result); + } + + @Test + public void testOpenConnection1Error() throws SQLException { + + CompletableFuture futureConnection1 = CompletableFuture.supplyAsync(() -> { + throw new CompletionException(new Exception("Failed in completing future connection1")); + }); + + CompletableFuture futureConnection2 = + CompletableFuture.completedFuture(connection2); + + // Even if the connection open for one of the connections failed, since the other connection + // was initialized successfully - the ParallelPhoenixConnection object should be returned. Also, + // the + // close should be successful, as one of the connection closed successfully. + parallelPhoenixConnection = + new ParallelPhoenixConnection(context, futureConnection1, futureConnection2); + + parallelPhoenixConnection.close(); + Mockito.verify(connection2).close(); + } + + @Test + public void testOpenConnection2Error() throws SQLException { + + CompletableFuture futureConnection1 = + CompletableFuture.completedFuture(connection1); + CompletableFuture futureConnection2 = CompletableFuture.supplyAsync(() -> { + throw new CompletionException(new Exception("Failed in completing future connection2")); + }); + + // Even if the connection open for one of the connections failed, since the other connection + // was initialized successfully - the ParallelPhoenixConnection object should be returned. Also, + // the + // close should be successful, as one of the connection closed successfully. + parallelPhoenixConnection = + new ParallelPhoenixConnection(context, futureConnection1, futureConnection2); + + parallelPhoenixConnection.close(); + Mockito.verify(connection1).close(); + } + + @Test + public void testOpenBothConnectionError() { + + CompletableFuture futureConnection1 = CompletableFuture.supplyAsync(() -> { + throw new CompletionException(new Exception("Failed in completing future connection1")); + }); + CompletableFuture futureConnection2 = CompletableFuture.supplyAsync(() -> { + throw new CompletionException(new Exception("Failed in completing future connection2")); + }); + + // Since there were failures in establishing both the connections, the + // initialization of ParallelPhoenixConnection itself should throw an exception. + try { + parallelPhoenixConnection = + new ParallelPhoenixConnection(context, futureConnection1, futureConnection2); + fail("Initialization should throw an exception if both the future connections fail."); + } catch (SQLException e) { } - - @Test - public void isWrapperForPhoenixConnectionFalseTest() throws SQLException { - boolean result = parallelPhoenixConnection.isWrapperFor(PhoenixConnection.class); - assertFalse(result); - } - - @Test - public void isWrapperForPhoenixMonitoredConnectionTrueTest() throws SQLException { - boolean result = parallelPhoenixConnection.isWrapperFor(PhoenixMonitoredConnection.class); - assertTrue(result); + } + + @Test + public void testOpenConnection1Delay() throws Exception { + Properties properties = new Properties(); + properties.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, + "1000"); + ParallelPhoenixContext context = + new ParallelPhoenixContext(properties, Mockito.mock(HighAvailabilityGroup.class), + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + + CountDownLatch cdl = new CountDownLatch(1); + CompletableFuture futureConnection1 = + CompletableFuture.supplyAsync(getDelayConnectionSupplier(cdl, connection1)); + CompletableFuture futureConnection2 = + CompletableFuture.completedFuture(connection2); + + // Even though there is delay in establishing connection1, the other connection + // should be established successfully. + parallelPhoenixConnection = + new ParallelPhoenixConnection(context, futureConnection1, futureConnection2); + + // One of the connections, i.e. connection2, should be closed successfully. + parallelPhoenixConnection.close(); + Mockito.verify(connection2).close(); + cdl.countDown(); + waitForConnectionClose(connection1); + } + + @Test + public void testOpenConnection2Delay() throws Exception { + Properties properties = new Properties(); + properties.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, + "1000"); + ParallelPhoenixContext context = + new ParallelPhoenixContext(properties, Mockito.mock(HighAvailabilityGroup.class), + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + + CountDownLatch cdl = new CountDownLatch(1); + CompletableFuture futureConnection1 = + CompletableFuture.completedFuture(connection1); + CompletableFuture futureConnection2 = + CompletableFuture.supplyAsync(getDelayConnectionSupplier(cdl, connection2)); + + // Even though there is delay in establishing connection2, the other connection + // should be established successfully. + parallelPhoenixConnection = + new ParallelPhoenixConnection(context, futureConnection1, futureConnection2); + + // One of the connections, i.e. connection1, should be closed successfully. + parallelPhoenixConnection.close(); + Mockito.verify(connection1).close(); + cdl.countDown(); + waitForConnectionClose(connection2); + } + + @Test(timeout = 10000) + public void testOpenBothConnectionDelay() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, + "1000"); + ParallelPhoenixContext context = + new ParallelPhoenixContext(properties, Mockito.mock(HighAvailabilityGroup.class), + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + + CountDownLatch cdl1 = new CountDownLatch(1); + CompletableFuture futureConnection1 = + CompletableFuture.supplyAsync(getDelayConnectionSupplier(cdl1, connection1)); + + CountDownLatch cdl2 = new CountDownLatch(1); + CompletableFuture futureConnection2 = + CompletableFuture.supplyAsync(getDelayConnectionSupplier(cdl2, connection2)); + + long prevTimeoutCounter = + GlobalClientMetrics.GLOBAL_HA_PARALLEL_TASK_TIMEOUT_COUNTER.getMetric().getValue(); + + // Both the connections have a delay in establishing the connection, in such cases, + // the initialization of the ParallelPhoenixConnection should itself timeout. + try { + parallelPhoenixConnection = + new ParallelPhoenixConnection(context, futureConnection1, futureConnection2); + fail("Initialization should throw an exception if both the future connections timeout"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode(), e.getErrorCode()); + assertTrue(GlobalClientMetrics.GLOBAL_HA_PARALLEL_TASK_TIMEOUT_COUNTER.getMetric().getValue() + > prevTimeoutCounter); } - - @Test - public void unwrapPhoenixConnectionFailsTest() { - try { - parallelPhoenixConnection.unwrap(PhoenixConnection.class); - } catch (SQLException e) { - assertEquals(e.getErrorCode(), SQLExceptionCode.CLASS_NOT_UNWRAPPABLE.getErrorCode()); - } - } - - @Test - public void unwrapPhoenixMonitoredConnectionTest() throws SQLException { - PhoenixMonitoredConnection result = parallelPhoenixConnection.unwrap(PhoenixMonitoredConnection.class); - assertEquals(parallelPhoenixConnection,result); - } - - @Test - public void testOpenConnection1Error() throws SQLException { - - CompletableFuture futureConnection1 = CompletableFuture.supplyAsync(() -> { - throw new CompletionException(new Exception("Failed in completing future connection1")); - }); - - CompletableFuture futureConnection2 = CompletableFuture.completedFuture(connection2); - - // Even if the connection open for one of the connections failed, since the other connection - // was initialized successfully - the ParallelPhoenixConnection object should be returned. Also, the - // close should be successful, as one of the connection closed successfully. - parallelPhoenixConnection = - new ParallelPhoenixConnection(context, - futureConnection1, - futureConnection2); - - parallelPhoenixConnection.close(); - Mockito.verify(connection2).close(); - } - - @Test - public void testOpenConnection2Error() throws SQLException { - - CompletableFuture futureConnection1 = CompletableFuture.completedFuture(connection1); - CompletableFuture futureConnection2 = CompletableFuture.supplyAsync(() -> { - throw new CompletionException(new Exception("Failed in completing future connection2")); - }); - - // Even if the connection open for one of the connections failed, since the other connection - // was initialized successfully - the ParallelPhoenixConnection object should be returned. Also, the - // close should be successful, as one of the connection closed successfully. - parallelPhoenixConnection = - new ParallelPhoenixConnection(context, - futureConnection1, - futureConnection2); - - parallelPhoenixConnection.close(); - Mockito.verify(connection1).close(); - } - - @Test - public void testOpenBothConnectionError() { - - CompletableFuture futureConnection1 = CompletableFuture.supplyAsync(() -> { - throw new CompletionException(new Exception("Failed in completing future connection1")); - }); - CompletableFuture futureConnection2 = CompletableFuture.supplyAsync(() -> { - throw new CompletionException(new Exception("Failed in completing future connection2")); - }); - - // Since there were failures in establishing both the connections, the - // initialization of ParallelPhoenixConnection itself should throw an exception. - try { - parallelPhoenixConnection = - new ParallelPhoenixConnection(context, - futureConnection1, - futureConnection2); - fail("Initialization should throw an exception if both the future connections fail."); - } catch (SQLException e) { - } - } - - @Test - public void testOpenConnection1Delay() throws Exception { - Properties properties = new Properties(); - properties.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, - "1000"); - ParallelPhoenixContext context = - new ParallelPhoenixContext(properties, Mockito.mock(HighAvailabilityGroup.class), - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); - - CountDownLatch cdl = new CountDownLatch(1); - CompletableFuture futureConnection1 = CompletableFuture.supplyAsync(getDelayConnectionSupplier(cdl, connection1)); - CompletableFuture futureConnection2 = CompletableFuture.completedFuture(connection2); - - // Even though there is delay in establishing connection1, the other connection - // should be established successfully. - parallelPhoenixConnection = - new ParallelPhoenixConnection(context, - futureConnection1, - futureConnection2); - - // One of the connections, i.e. connection2, should be closed successfully. - parallelPhoenixConnection.close(); - Mockito.verify(connection2).close(); - cdl.countDown(); - waitForConnectionClose(connection1); - } - - @Test - public void testOpenConnection2Delay() throws Exception { - Properties properties = new Properties(); - properties.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, - "1000"); - ParallelPhoenixContext context = - new ParallelPhoenixContext(properties, Mockito.mock(HighAvailabilityGroup.class), - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); - - CountDownLatch cdl = new CountDownLatch(1); - CompletableFuture futureConnection1 = CompletableFuture.completedFuture(connection1); - CompletableFuture futureConnection2 = CompletableFuture.supplyAsync(getDelayConnectionSupplier(cdl, connection2)); - - // Even though there is delay in establishing connection2, the other connection - // should be established successfully. - parallelPhoenixConnection = - new ParallelPhoenixConnection(context, - futureConnection1, - futureConnection2); - - // One of the connections, i.e. connection1, should be closed successfully. - parallelPhoenixConnection.close(); - Mockito.verify(connection1).close(); - cdl.countDown(); - waitForConnectionClose(connection2); - } - - @Test(timeout = 10000) - public void testOpenBothConnectionDelay() throws SQLException { - Properties properties = new Properties(); - properties.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, - "1000"); - ParallelPhoenixContext context = - new ParallelPhoenixContext(properties, Mockito.mock(HighAvailabilityGroup.class), - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); - - CountDownLatch cdl1 = new CountDownLatch(1); - CompletableFuture futureConnection1 = CompletableFuture.supplyAsync(getDelayConnectionSupplier(cdl1, connection1)); - - CountDownLatch cdl2 = new CountDownLatch(1); - CompletableFuture futureConnection2 = CompletableFuture.supplyAsync(getDelayConnectionSupplier(cdl2, connection2)); - - long prevTimeoutCounter = GlobalClientMetrics.GLOBAL_HA_PARALLEL_TASK_TIMEOUT_COUNTER.getMetric() - .getValue(); - - // Both the connections have a delay in establishing the connection, in such cases, - // the initialization of the ParallelPhoenixConnection should itself timeout. - try { - parallelPhoenixConnection = - new ParallelPhoenixConnection(context, - futureConnection1, - futureConnection2); - fail("Initialization should throw an exception if both the future connections timeout"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode(), - e.getErrorCode()); - assertTrue(GlobalClientMetrics.GLOBAL_HA_PARALLEL_TASK_TIMEOUT_COUNTER.getMetric() - .getValue() > prevTimeoutCounter); - } - } - - @Test - public void testCloseConnection1Error() throws SQLException { - Mockito.doThrow(new SQLException()).when(connection1).close(); - parallelPhoenixConnection.close(); - Mockito.verify(connection2).close(); - } - - @Test - public void testCloseConnection2Error() throws SQLException { - Mockito.doThrow(new SQLException()).when(connection2).close(); - parallelPhoenixConnection.close(); - Mockito.verify(connection1).close(); - } - - @Test - public void testCloseBothConnectionError() throws SQLException { - Mockito.doThrow(new SQLException()).when(connection1).close(); - Mockito.doThrow(new SQLException()).when(connection2).close(); - try { - parallelPhoenixConnection.close(); - fail("Close should throw exception when both underlying close throw exceptions"); - } catch (SQLException e) { - } - Mockito.verify(connection1).close(); - Mockito.verify(connection2).close(); - } - - @Test - public void testConnection1CloseDelay() throws Exception { - CountDownLatch cdl = new CountDownLatch(1); - Supplier delaySupplier = getDelaySupplier(cdl); - context.chainOnConn1(delaySupplier); - - // Even though the chain on conn1 is lagging, the close is not - // chained and happens on a different executor pool. The close - // is async, and anyone of the connection can be closed first. - // We cannot deterministically determine which connection was - // closed first, hence we check on the count of close operation - // on both connection objects, and expect at least one to be called. - parallelPhoenixConnection.close(); - - long countConnection1 = Mockito.mockingDetails(connection1).getInvocations().stream(). - map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); - long countConnection2 = Mockito.mockingDetails(connection2).getInvocations().stream(). - map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); - - assertTrue("Close should be called on at least one of the connections", countConnection1 > 0 || countConnection2 > 0); - } - - @Test - public void testConnection2CloseDelay() throws Exception { - CountDownLatch cdl = new CountDownLatch(1); - Supplier delaySupplier = getDelaySupplier(cdl); - context.chainOnConn2(delaySupplier); - // Chain on conn2 is lagging, we should return after closing conn1 - // Even though the chain on conn2 is lagging, the close is not - // chained and happens on a different executor pool. The close - // is async, and anyone of the connection can be closed first. - // We cannot deterministically determine which connection was - // closed first, hence we check on the count of close operation - // on both connection objects, and expect at least one to be called. - parallelPhoenixConnection.close(); - - long countConnection1 = Mockito.mockingDetails(connection1).getInvocations().stream(). - map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); - long countConnection2 = Mockito.mockingDetails(connection2).getInvocations().stream(). - map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); - - assertTrue("Close should be called on at least one of the connections", countConnection1 > 0 || countConnection2 > 0); - - cdl.countDown(); - } - - @Test - public void testConnectionCloseNoTimeout() throws Exception { - Properties properties = new Properties(); - properties.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, - "1000"); - ParallelPhoenixContext context = - new ParallelPhoenixContext(properties, Mockito.mock(HighAvailabilityGroup.class), - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); - parallelPhoenixConnection = - new ParallelPhoenixConnection(context, - CompletableFuture.completedFuture(connection1), - CompletableFuture.completedFuture(connection2)); - CountDownLatch cdl1 = new CountDownLatch(1); - CountDownLatch cdl2 = new CountDownLatch(1); - Supplier delaySupplier1 = getDelaySupplier(cdl1); - Supplier delaySupplier2 = getDelaySupplier(cdl2); - context.chainOnConn1(delaySupplier1); - context.chainOnConn2(delaySupplier2); - // Even though the chain on both conn1 and conn2 is lagging, - // the close is not chained and happens on a different executor pool. - // The close is async, and anyone of the connection can be closed first. - // We cannot deterministically determine which connection was - // closed first, hence we check on the count of close operation - // on both connection objects, and expect at least one to be called. - parallelPhoenixConnection.close(); - - long countConnection1 = Mockito.mockingDetails(connection1).getInvocations().stream(). - map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); - long countConnection2 = Mockito.mockingDetails(connection2).getInvocations().stream(). - map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); - - assertTrue("Close should be called on at least one of the connections", countConnection1 > 0 || countConnection2 > 0); - - cdl1.countDown(); - cdl2.countDown(); - } - - private void waitForConnectionClose(PhoenixConnection connection) throws Exception { - waitFor(() -> { - try { - Mockito.verify(connection).close(); - } catch (SQLException | WantedButNotInvoked e) { - return false; - } - return true; - }, 1000, 30000); - } - - private Supplier getDelaySupplier(CountDownLatch cdl) { - return (() -> { - try { - cdl.await(); - } catch (InterruptedException e) { - throw new CompletionException(e); - } - return null; - }); - } - - private Supplier getDelayConnectionSupplier(CountDownLatch cdl, PhoenixConnection returnConnection) { - return (() -> { - try { - cdl.await(); - } catch (InterruptedException e) { - throw new CompletionException(e); - } - return returnConnection; - }); + } + + @Test + public void testCloseConnection1Error() throws SQLException { + Mockito.doThrow(new SQLException()).when(connection1).close(); + parallelPhoenixConnection.close(); + Mockito.verify(connection2).close(); + } + + @Test + public void testCloseConnection2Error() throws SQLException { + Mockito.doThrow(new SQLException()).when(connection2).close(); + parallelPhoenixConnection.close(); + Mockito.verify(connection1).close(); + } + + @Test + public void testCloseBothConnectionError() throws SQLException { + Mockito.doThrow(new SQLException()).when(connection1).close(); + Mockito.doThrow(new SQLException()).when(connection2).close(); + try { + parallelPhoenixConnection.close(); + fail("Close should throw exception when both underlying close throw exceptions"); + } catch (SQLException e) { } -} \ No newline at end of file + Mockito.verify(connection1).close(); + Mockito.verify(connection2).close(); + } + + @Test + public void testConnection1CloseDelay() throws Exception { + CountDownLatch cdl = new CountDownLatch(1); + Supplier delaySupplier = getDelaySupplier(cdl); + context.chainOnConn1(delaySupplier); + + // Even though the chain on conn1 is lagging, the close is not + // chained and happens on a different executor pool. The close + // is async, and anyone of the connection can be closed first. + // We cannot deterministically determine which connection was + // closed first, hence we check on the count of close operation + // on both connection objects, and expect at least one to be called. + parallelPhoenixConnection.close(); + + long countConnection1 = Mockito.mockingDetails(connection1).getInvocations().stream() + .map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); + long countConnection2 = Mockito.mockingDetails(connection2).getInvocations().stream() + .map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); + + assertTrue("Close should be called on at least one of the connections", + countConnection1 > 0 || countConnection2 > 0); + } + + @Test + public void testConnection2CloseDelay() throws Exception { + CountDownLatch cdl = new CountDownLatch(1); + Supplier delaySupplier = getDelaySupplier(cdl); + context.chainOnConn2(delaySupplier); + // Chain on conn2 is lagging, we should return after closing conn1 + // Even though the chain on conn2 is lagging, the close is not + // chained and happens on a different executor pool. The close + // is async, and anyone of the connection can be closed first. + // We cannot deterministically determine which connection was + // closed first, hence we check on the count of close operation + // on both connection objects, and expect at least one to be called. + parallelPhoenixConnection.close(); + + long countConnection1 = Mockito.mockingDetails(connection1).getInvocations().stream() + .map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); + long countConnection2 = Mockito.mockingDetails(connection2).getInvocations().stream() + .map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); + + assertTrue("Close should be called on at least one of the connections", + countConnection1 > 0 || countConnection2 > 0); + + cdl.countDown(); + } + + @Test + public void testConnectionCloseNoTimeout() throws Exception { + Properties properties = new Properties(); + properties.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, + "1000"); + ParallelPhoenixContext context = + new ParallelPhoenixContext(properties, Mockito.mock(HighAvailabilityGroup.class), + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + parallelPhoenixConnection = + new ParallelPhoenixConnection(context, CompletableFuture.completedFuture(connection1), + CompletableFuture.completedFuture(connection2)); + CountDownLatch cdl1 = new CountDownLatch(1); + CountDownLatch cdl2 = new CountDownLatch(1); + Supplier delaySupplier1 = getDelaySupplier(cdl1); + Supplier delaySupplier2 = getDelaySupplier(cdl2); + context.chainOnConn1(delaySupplier1); + context.chainOnConn2(delaySupplier2); + // Even though the chain on both conn1 and conn2 is lagging, + // the close is not chained and happens on a different executor pool. + // The close is async, and anyone of the connection can be closed first. + // We cannot deterministically determine which connection was + // closed first, hence we check on the count of close operation + // on both connection objects, and expect at least one to be called. + parallelPhoenixConnection.close(); + + long countConnection1 = Mockito.mockingDetails(connection1).getInvocations().stream() + .map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); + long countConnection2 = Mockito.mockingDetails(connection2).getInvocations().stream() + .map(InvocationOnMock::getMethod).filter(s -> s.getName().equals("close")).count(); + + assertTrue("Close should be called on at least one of the connections", + countConnection1 > 0 || countConnection2 > 0); + + cdl1.countDown(); + cdl2.countDown(); + } + + private void waitForConnectionClose(PhoenixConnection connection) throws Exception { + waitFor(() -> { + try { + Mockito.verify(connection).close(); + } catch (SQLException | WantedButNotInvoked e) { + return false; + } + return true; + }, 1000, 30000); + } + + private Supplier getDelaySupplier(CountDownLatch cdl) { + return (() -> { + try { + cdl.await(); + } catch (InterruptedException e) { + throw new CompletionException(e); + } + return null; + }); + } + + private Supplier getDelayConnectionSupplier(CountDownLatch cdl, + PhoenixConnection returnConnection) { + return (() -> { + try { + cdl.await(); + } catch (InterruptedException e) { + throw new CompletionException(e); + } + return returnConnection; + }); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixContextTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixContextTest.java index 1d0bc2c898c..215dc7e5cd5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixContextTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixContextTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,111 +24,116 @@ import java.util.List; import java.util.Properties; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.phoenix.jdbc.HighAvailabilityGroup.HAGroupInfo; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - public class ParallelPhoenixContextTest { - List executorList; - - @Before - public void init() { - executorList = - Lists.newArrayList(new PhoenixHAExecutorServiceProvider.PhoenixHAClusterExecutorServices(new TrackingThreadPoolExecutor(),new TrackingThreadPoolExecutor()), - new PhoenixHAExecutorServiceProvider.PhoenixHAClusterExecutorServices(new TrackingThreadPoolExecutor(),new TrackingThreadPoolExecutor())); - } + List executorList; - private static class TrackingThreadPoolExecutor extends ThreadPoolExecutor { + @Before + public void init() { + executorList = Lists.newArrayList( + new PhoenixHAExecutorServiceProvider.PhoenixHAClusterExecutorServices( + new TrackingThreadPoolExecutor(), new TrackingThreadPoolExecutor()), + new PhoenixHAExecutorServiceProvider.PhoenixHAClusterExecutorServices( + new TrackingThreadPoolExecutor(), new TrackingThreadPoolExecutor())); + } - AtomicInteger tasksExecuted = new AtomicInteger(); + private static class TrackingThreadPoolExecutor extends ThreadPoolExecutor { - public TrackingThreadPoolExecutor() { - super(1, 1, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); - } + AtomicInteger tasksExecuted = new AtomicInteger(); - @Override - public void execute(Runnable r) { - super.execute(r); - tasksExecuted.incrementAndGet(); - } + public TrackingThreadPoolExecutor() { + super(1, 1, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); } - @Test - public void testContructionFailsWithLessThan2ThreadPools() { - try { - ParallelPhoenixContext context = - new ParallelPhoenixContext(new Properties(), - Mockito.mock(HighAvailabilityGroup.class), - Lists.newArrayList(Mockito.mock(PhoenixHAExecutorServiceProvider.PhoenixHAClusterExecutorServices.class)), null); - fail("Should not construct with less than 2 ThreadPools"); - } catch (IllegalArgumentException e) { - } + @Override + public void execute(Runnable r) { + super.execute(r); + tasksExecuted.incrementAndGet(); } + } - @Test - public void testPool1OutOfCapacity() throws Exception { - HAGroupInfo haGroupInfo = new HAGroupInfo("test", "test1", "test2"); - ParallelPhoenixContext context = - new ParallelPhoenixContext(new Properties(), - new HighAvailabilityGroup(haGroupInfo, - Mockito.mock(Properties.class), - Mockito.mock(ClusterRoleRecord.class), - HighAvailabilityGroup.State.READY), - executorList, Lists.newArrayList(Boolean.FALSE, Boolean.TRUE)); - CompletableFuture future1 = context.chainOnConn1(() -> true); - assertTrue(future1.isCompletedExceptionally()); - assertEquals(0, ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); - assertEquals(0, ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); - CompletableFuture future2 = context.chainOnConn2(() -> true); - assertTrue(future2.get()); - assertEquals(0, ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); - assertEquals(1, ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); + @Test + public void testContructionFailsWithLessThan2ThreadPools() { + try { + ParallelPhoenixContext context = + new ParallelPhoenixContext(new Properties(), Mockito.mock(HighAvailabilityGroup.class), + Lists.newArrayList( + Mockito.mock(PhoenixHAExecutorServiceProvider.PhoenixHAClusterExecutorServices.class)), + null); + fail("Should not construct with less than 2 ThreadPools"); + } catch (IllegalArgumentException e) { } + } - @Test - public void testPool2OutOfCapacity() throws Exception { - HAGroupInfo haGroupInfo = new HAGroupInfo("test", "test1", "test2"); - ParallelPhoenixContext context = - new ParallelPhoenixContext(new Properties(), - new HighAvailabilityGroup(haGroupInfo, - Mockito.mock(Properties.class), - Mockito.mock(ClusterRoleRecord.class), - HighAvailabilityGroup.State.READY), - executorList, Lists.newArrayList(Boolean.TRUE, Boolean.FALSE)); - CompletableFuture future1 = context.chainOnConn1(() -> true); - assertTrue(future1.get()); - assertEquals(1, ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); - assertEquals(0, ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); - CompletableFuture future2 = context.chainOnConn2(() -> true); - assertTrue(future2.isCompletedExceptionally()); - assertEquals(1, ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); - assertEquals(0, ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); - } + @Test + public void testPool1OutOfCapacity() throws Exception { + HAGroupInfo haGroupInfo = new HAGroupInfo("test", "test1", "test2"); + ParallelPhoenixContext context = new ParallelPhoenixContext(new Properties(), + new HighAvailabilityGroup(haGroupInfo, Mockito.mock(Properties.class), + Mockito.mock(ClusterRoleRecord.class), HighAvailabilityGroup.State.READY), + executorList, Lists.newArrayList(Boolean.FALSE, Boolean.TRUE)); + CompletableFuture future1 = context.chainOnConn1(() -> true); + assertTrue(future1.isCompletedExceptionally()); + assertEquals(0, + ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); + assertEquals(0, + ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); + CompletableFuture future2 = context.chainOnConn2(() -> true); + assertTrue(future2.get()); + assertEquals(0, + ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); + assertEquals(1, + ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); + } - @Test - public void testPoolsHaveCapacity() throws Exception { - ParallelPhoenixContext context = - new ParallelPhoenixContext(new Properties(), - Mockito.mock(HighAvailabilityGroup.class), executorList, - Lists.newArrayList(Boolean.TRUE, Boolean.TRUE)); - CompletableFuture future1 = context.chainOnConn1(() -> true); - assertTrue(future1.get()); - assertEquals(1, ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); - assertEquals(0, ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); - CompletableFuture future2 = context.chainOnConn2(() -> true); - assertTrue(future2.get()); - assertEquals(1, ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); - assertEquals(1, ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); - } + @Test + public void testPool2OutOfCapacity() throws Exception { + HAGroupInfo haGroupInfo = new HAGroupInfo("test", "test1", "test2"); + ParallelPhoenixContext context = new ParallelPhoenixContext(new Properties(), + new HighAvailabilityGroup(haGroupInfo, Mockito.mock(Properties.class), + Mockito.mock(ClusterRoleRecord.class), HighAvailabilityGroup.State.READY), + executorList, Lists.newArrayList(Boolean.TRUE, Boolean.FALSE)); + CompletableFuture future1 = context.chainOnConn1(() -> true); + assertTrue(future1.get()); + assertEquals(1, + ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); + assertEquals(0, + ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); + CompletableFuture future2 = context.chainOnConn2(() -> true); + assertTrue(future2.isCompletedExceptionally()); + assertEquals(1, + ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); + assertEquals(0, + ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); + } + + @Test + public void testPoolsHaveCapacity() throws Exception { + ParallelPhoenixContext context = + new ParallelPhoenixContext(new Properties(), Mockito.mock(HighAvailabilityGroup.class), + executorList, Lists.newArrayList(Boolean.TRUE, Boolean.TRUE)); + CompletableFuture future1 = context.chainOnConn1(() -> true); + assertTrue(future1.get()); + assertEquals(1, + ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); + assertEquals(0, + ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); + CompletableFuture future2 = context.chainOnConn2(() -> true); + assertTrue(future2.get()); + assertEquals(1, + ((TrackingThreadPoolExecutor) executorList.get(0).getExecutorService()).tasksExecuted.get()); + assertEquals(1, + ((TrackingThreadPoolExecutor) executorList.get(1).getExecutorService()).tasksExecuted.get()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSetTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSetTest.java index dd6ccb7a1dc..f1922a82a08 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSetTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSetTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,161 +39,159 @@ public class ParallelPhoenixNullComparingResultSetTest { - ParallelPhoenixContext context; - ResultSet rs1; - ResultSet rs2; - CompletableFuture completableRs1; - CompletableFuture completableRs2; - - ParallelPhoenixResultSet resultSet; - - @Before - public void init() { - HAGroupInfo haGroupInfo = new HAGroupInfo("test", "test1", "test2"); - context = new ParallelPhoenixContext(new Properties(), - new HighAvailabilityGroup(haGroupInfo, - Mockito.mock(Properties.class), - Mockito.mock(ClusterRoleRecord.class), - HighAvailabilityGroup.State.READY), - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); - rs1 = Mockito.mock(ResultSet.class); - rs2 = Mockito.mock(ResultSet.class); - completableRs1 = CompletableFuture.completedFuture(rs1); - completableRs2 = CompletableFuture.completedFuture(rs2); - resultSet = new ParallelPhoenixResultSet(context, completableRs1, completableRs2); + ParallelPhoenixContext context; + ResultSet rs1; + ResultSet rs2; + CompletableFuture completableRs1; + CompletableFuture completableRs2; + + ParallelPhoenixResultSet resultSet; + + @Before + public void init() { + HAGroupInfo haGroupInfo = new HAGroupInfo("test", "test1", "test2"); + context = new ParallelPhoenixContext(new Properties(), + new HighAvailabilityGroup(haGroupInfo, Mockito.mock(Properties.class), + Mockito.mock(ClusterRoleRecord.class), HighAvailabilityGroup.State.READY), + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + rs1 = Mockito.mock(ResultSet.class); + rs2 = Mockito.mock(ResultSet.class); + completableRs1 = CompletableFuture.completedFuture(rs1); + completableRs2 = CompletableFuture.completedFuture(rs2); + resultSet = new ParallelPhoenixResultSet(context, completableRs1, completableRs2); + } + + @Test + public void testRs1Null() throws SQLException { + when(rs1.next()).thenReturn(false); + when(rs2.next()).thenReturn(true); + ParallelPhoenixNullComparingResultSet ncrs = + new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); + assertNull(ncrs.getResultSet()); + assertTrue(ncrs.next()); + assertEquals(rs2, ncrs.getResultSet()); + Mockito.verify(rs2).next(); + } + + @Test + public void testRs2Null() throws SQLException { + when(rs1.next()).thenReturn(true); + when(rs2.next()).thenReturn(false); + ParallelPhoenixNullComparingResultSet ncrs = + new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); + assertNull(ncrs.getResultSet()); + assertTrue(ncrs.next()); + assertEquals(rs1, ncrs.getResultSet()); + Mockito.verify(rs1).next(); + } + + @Test + public void testRs1Rs2Null() throws SQLException { + when(rs1.next()).thenReturn(false); + when(rs2.next()).thenReturn(false); + ParallelPhoenixNullComparingResultSet ncrs = + new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); + assertNull(ncrs.getResultSet()); + assertFalse(ncrs.next()); + assertTrue(rs1 == ncrs.getResultSet() || rs2 == ncrs.getResultSet()); + Mockito.verify(rs1).next(); + Mockito.verify(rs2).next(); + } + + @Test + public void testRs1ExceptionRs2Null() throws SQLException { + when(rs1.next()).thenThrow(new RuntimeException()); + when(rs2.next()).thenReturn(false); + ParallelPhoenixNullComparingResultSet ncrs = + new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); + assertNull(ncrs.getResultSet()); + assertFalse(ncrs.next()); + assertEquals(rs2, ncrs.getResultSet()); + Mockito.verify(rs1).next(); + Mockito.verify(rs2).next(); + } + + @Test + public void testRs2Exception() throws SQLException { + when(rs1.next()).thenReturn(true); + when(rs2.next()).thenThrow(new RuntimeException()); + ParallelPhoenixNullComparingResultSet ncrs = + new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); + assertNull(ncrs.getResultSet()); + assertTrue(ncrs.next()); + assertEquals(rs1, ncrs.getResultSet()); + Mockito.verify(rs1).next(); + } + + @Test + public void testRs1Rs2Exception() throws SQLException { + when(rs1.next()).thenThrow(new SQLException()); + when(rs2.next()).thenThrow(new SQLException()); + ParallelPhoenixNullComparingResultSet ncrs = + new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); + assertNull(ncrs.getResultSet()); + try { + ncrs.next(); + fail("RS should've thrown exception"); + } catch (SQLException e) { } - - @Test - public void testRs1Null() throws SQLException { - when(rs1.next()).thenReturn(false); - when(rs2.next()).thenReturn(true); - ParallelPhoenixNullComparingResultSet ncrs = - new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); - assertNull(ncrs.getResultSet()); - assertTrue(ncrs.next()); - assertEquals(rs2, ncrs.getResultSet()); - Mockito.verify(rs2).next(); - } - - @Test - public void testRs2Null() throws SQLException { - when(rs1.next()).thenReturn(true); - when(rs2.next()).thenReturn(false); - ParallelPhoenixNullComparingResultSet ncrs = - new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); - assertNull(ncrs.getResultSet()); - assertTrue(ncrs.next()); - assertEquals(rs1, ncrs.getResultSet()); - Mockito.verify(rs1).next(); - } - - @Test - public void testRs1Rs2Null() throws SQLException { - when(rs1.next()).thenReturn(false); - when(rs2.next()).thenReturn(false); - ParallelPhoenixNullComparingResultSet ncrs = - new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); - assertNull(ncrs.getResultSet()); - assertFalse(ncrs.next()); - assertTrue(rs1 == ncrs.getResultSet() || rs2 == ncrs.getResultSet()); - Mockito.verify(rs1).next(); - Mockito.verify(rs2).next(); - } - - @Test - public void testRs1ExceptionRs2Null() throws SQLException { - when(rs1.next()).thenThrow(new RuntimeException()); - when(rs2.next()).thenReturn(false); - ParallelPhoenixNullComparingResultSet ncrs = - new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); - assertNull(ncrs.getResultSet()); - assertFalse(ncrs.next()); - assertEquals(rs2, ncrs.getResultSet()); - Mockito.verify(rs1).next(); - Mockito.verify(rs2).next(); - } - - @Test - public void testRs2Exception() throws SQLException { - when(rs1.next()).thenReturn(true); - when(rs2.next()).thenThrow(new RuntimeException()); - ParallelPhoenixNullComparingResultSet ncrs = - new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); - assertNull(ncrs.getResultSet()); - assertTrue(ncrs.next()); - assertEquals(rs1, ncrs.getResultSet()); - Mockito.verify(rs1).next(); - } - - @Test - public void testRs1Rs2Exception() throws SQLException { - when(rs1.next()).thenThrow(new SQLException()); - when(rs2.next()).thenThrow(new SQLException()); - ParallelPhoenixNullComparingResultSet ncrs = - new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); - assertNull(ncrs.getResultSet()); - try { - ncrs.next(); - fail("RS should've thrown exception"); - } catch (SQLException e) { - } - Mockito.verify(rs1).next(); - Mockito.verify(rs2).next(); - } - - @Test - public void testErrorOnSingleNullRs1Null() throws SQLException { - when(rs1.next()).thenReturn(false); - when(rs2.next()).thenThrow(new RuntimeException()); - context.getProperties().setProperty( - ParallelPhoenixNullComparingResultSet.ERROR_ON_SINGLE_NULL_ATTRIB, "true"); - ParallelPhoenixNullComparingResultSet ncrs = - new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); - assertNull(ncrs.getResultSet()); - try { - ncrs.next(); - fail("RS should've thrown exception"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.HA_READ_FROM_CLUSTER_FAILED_ON_NULL.getErrorCode(), - e.getErrorCode()); - } - Mockito.verify(rs1).next(); - Mockito.verify(rs2).next(); - } - - @Test - public void testErrorOnSingleNullRs2Null() throws SQLException { - when(rs1.next()).thenThrow(new RuntimeException()); - when(rs2.next()).thenReturn(false); - context.getProperties().setProperty( - ParallelPhoenixNullComparingResultSet.ERROR_ON_SINGLE_NULL_ATTRIB, "true"); - ParallelPhoenixNullComparingResultSet ncrs = - new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); - assertNull(ncrs.getResultSet()); - try { - ncrs.next(); - fail("RS should've thrown exception"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.HA_READ_FROM_CLUSTER_FAILED_ON_NULL.getErrorCode(), - e.getErrorCode()); - } - Mockito.verify(rs1).next(); - Mockito.verify(rs2).next(); + Mockito.verify(rs1).next(); + Mockito.verify(rs2).next(); + } + + @Test + public void testErrorOnSingleNullRs1Null() throws SQLException { + when(rs1.next()).thenReturn(false); + when(rs2.next()).thenThrow(new RuntimeException()); + context.getProperties() + .setProperty(ParallelPhoenixNullComparingResultSet.ERROR_ON_SINGLE_NULL_ATTRIB, "true"); + ParallelPhoenixNullComparingResultSet ncrs = + new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); + assertNull(ncrs.getResultSet()); + try { + ncrs.next(); + fail("RS should've thrown exception"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.HA_READ_FROM_CLUSTER_FAILED_ON_NULL.getErrorCode(), + e.getErrorCode()); } - - @Test - public void testReadValueAfterWaitRs2Null() throws SQLException { - Answer answer = (i -> { - Thread.sleep(2000); - return true; - }); - doAnswer(answer).when(rs1).next(); - when(rs1.getString(0)).thenReturn("test"); - when(rs2.next()).thenReturn(false); - ParallelPhoenixNullComparingResultSet ncrs = - new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); - assertTrue(ncrs.next()); - assertEquals(rs1, ncrs.getResultSet()); - assertEquals("test", ncrs.getString(0)); + Mockito.verify(rs1).next(); + Mockito.verify(rs2).next(); + } + + @Test + public void testErrorOnSingleNullRs2Null() throws SQLException { + when(rs1.next()).thenThrow(new RuntimeException()); + when(rs2.next()).thenReturn(false); + context.getProperties() + .setProperty(ParallelPhoenixNullComparingResultSet.ERROR_ON_SINGLE_NULL_ATTRIB, "true"); + ParallelPhoenixNullComparingResultSet ncrs = + new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); + assertNull(ncrs.getResultSet()); + try { + ncrs.next(); + fail("RS should've thrown exception"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.HA_READ_FROM_CLUSTER_FAILED_ON_NULL.getErrorCode(), + e.getErrorCode()); } -} \ No newline at end of file + Mockito.verify(rs1).next(); + Mockito.verify(rs2).next(); + } + + @Test + public void testReadValueAfterWaitRs2Null() throws SQLException { + Answer answer = (i -> { + Thread.sleep(2000); + return true; + }); + doAnswer(answer).when(rs1).next(); + when(rs1.getString(0)).thenReturn("test"); + when(rs2.next()).thenReturn(false); + ParallelPhoenixNullComparingResultSet ncrs = + new ParallelPhoenixNullComparingResultSet(context, completableRs1, completableRs2); + assertTrue(ncrs.next()); + assertEquals(rs1, ncrs.getResultSet()); + assertEquals("test", ncrs.getString(0)); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixPreparedStatementTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixPreparedStatementTest.java index 1c157d025bd..a4a097bbea5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixPreparedStatementTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixPreparedStatementTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,104 +15,100 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; +import static org.junit.Assert.assertEquals; -import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Properties; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; - -import static org.junit.Assert.assertEquals; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; public class ParallelPhoenixPreparedStatementTest { - ParallelPhoenixContext context; - CompletableFuture future1; - CompletableFuture future2; - PhoenixMonitoredPreparedStatement statement1; - PhoenixMonitoredPreparedStatement statement2; - - - ParallelPhoenixPreparedStatement phoenixPreparedStatement; - - @Before - public void init() throws Exception { - context = new ParallelPhoenixContext(new Properties(), Mockito.mock(HighAvailabilityGroup.class), - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); - - statement1 = Mockito.mock(PhoenixMonitoredPreparedStatement.class); - statement2 = Mockito.mock(PhoenixMonitoredPreparedStatement.class); - - future1 = CompletableFuture.completedFuture(statement1); - future2 = CompletableFuture.completedFuture(statement2); - - phoenixPreparedStatement = new ParallelPhoenixPreparedStatement(context,future1,future2); - } - - @Test - public void getStatement1() throws SQLException { - future1 = Mockito.mock(CompletableFuture.class); - future2 = Mockito.mock(CompletableFuture.class); - phoenixPreparedStatement = new ParallelPhoenixPreparedStatement(context,future1,future2); - assertEquals(future1, phoenixPreparedStatement.getStatement1()); - } - - @Test - public void getStatement2() throws SQLException { - future1 = Mockito.mock(CompletableFuture.class); - future2 = Mockito.mock(CompletableFuture.class); - phoenixPreparedStatement = new ParallelPhoenixPreparedStatement(context,future1,future2); - assertEquals(future2, phoenixPreparedStatement.getStatement2()); - } - - @Test - public void executeQuery() throws SQLException, ExecutionException, InterruptedException { - ResultSet mockResultSet1 = Mockito.mock(ResultSet.class); - ResultSet mockResultSet2 = Mockito.mock(ResultSet.class); - - Mockito.when(statement1.executeQuery()).thenReturn(mockResultSet1); - Mockito.when(statement2.executeQuery()).thenReturn(mockResultSet2); - - ResultSet rs = phoenixPreparedStatement.executeQuery(); - - //TODO: make this less dependant on sleep - Thread.sleep(5000); - - Mockito.verify(statement1).executeQuery(); - Mockito.verify(statement2).executeQuery(); - ParallelPhoenixResultSet parallelRS = (ParallelPhoenixResultSet) rs; - assertEquals(mockResultSet1,parallelRS.getResultSetFuture1().get()); - assertEquals(mockResultSet2,parallelRS.getResultSetFuture2().get()); - } - - @Test - public void setInt() throws SQLException, ExecutionException, InterruptedException { - phoenixPreparedStatement.setInt(1,2); - - //TODO: make this less dependant on sleep - Thread.sleep(5000); - - Mockito.verify(statement1).setInt(1,2); - Mockito.verify(statement2).setInt(1,2); - } - - @Test - public void execute() throws SQLException, ExecutionException, InterruptedException { - phoenixPreparedStatement.execute(); - - //TODO: make this less dependant on sleep - Thread.sleep(5000); - - Mockito.verify(statement1).execute(); - Mockito.verify(statement2).execute(); - } -} \ No newline at end of file + ParallelPhoenixContext context; + CompletableFuture future1; + CompletableFuture future2; + PhoenixMonitoredPreparedStatement statement1; + PhoenixMonitoredPreparedStatement statement2; + + ParallelPhoenixPreparedStatement phoenixPreparedStatement; + + @Before + public void init() throws Exception { + context = + new ParallelPhoenixContext(new Properties(), Mockito.mock(HighAvailabilityGroup.class), + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + + statement1 = Mockito.mock(PhoenixMonitoredPreparedStatement.class); + statement2 = Mockito.mock(PhoenixMonitoredPreparedStatement.class); + + future1 = CompletableFuture.completedFuture(statement1); + future2 = CompletableFuture.completedFuture(statement2); + + phoenixPreparedStatement = new ParallelPhoenixPreparedStatement(context, future1, future2); + } + + @Test + public void getStatement1() throws SQLException { + future1 = Mockito.mock(CompletableFuture.class); + future2 = Mockito.mock(CompletableFuture.class); + phoenixPreparedStatement = new ParallelPhoenixPreparedStatement(context, future1, future2); + assertEquals(future1, phoenixPreparedStatement.getStatement1()); + } + + @Test + public void getStatement2() throws SQLException { + future1 = Mockito.mock(CompletableFuture.class); + future2 = Mockito.mock(CompletableFuture.class); + phoenixPreparedStatement = new ParallelPhoenixPreparedStatement(context, future1, future2); + assertEquals(future2, phoenixPreparedStatement.getStatement2()); + } + + @Test + public void executeQuery() throws SQLException, ExecutionException, InterruptedException { + ResultSet mockResultSet1 = Mockito.mock(ResultSet.class); + ResultSet mockResultSet2 = Mockito.mock(ResultSet.class); + + Mockito.when(statement1.executeQuery()).thenReturn(mockResultSet1); + Mockito.when(statement2.executeQuery()).thenReturn(mockResultSet2); + + ResultSet rs = phoenixPreparedStatement.executeQuery(); + + // TODO: make this less dependant on sleep + Thread.sleep(5000); + + Mockito.verify(statement1).executeQuery(); + Mockito.verify(statement2).executeQuery(); + ParallelPhoenixResultSet parallelRS = (ParallelPhoenixResultSet) rs; + assertEquals(mockResultSet1, parallelRS.getResultSetFuture1().get()); + assertEquals(mockResultSet2, parallelRS.getResultSetFuture2().get()); + } + + @Test + public void setInt() throws SQLException, ExecutionException, InterruptedException { + phoenixPreparedStatement.setInt(1, 2); + + // TODO: make this less dependant on sleep + Thread.sleep(5000); + + Mockito.verify(statement1).setInt(1, 2); + Mockito.verify(statement2).setInt(1, 2); + } + + @Test + public void execute() throws SQLException, ExecutionException, InterruptedException { + phoenixPreparedStatement.execute(); + + // TODO: make this less dependant on sleep + Thread.sleep(5000); + + Mockito.verify(statement1).execute(); + Mockito.verify(statement2).execute(); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSetTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSetTest.java index 3e9a7e0f254..9a72ebab196 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSetTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixResultSetTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import java.sql.ResultSet; import java.sql.SQLException; @@ -33,251 +32,226 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doAnswer; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; public class ParallelPhoenixResultSetTest { - CompletableFuture completableRs1; - CompletableFuture completableRs2; - - ParallelPhoenixResultSet resultSet; - - @Before - public void init() { - completableRs1 = Mockito.mock(CompletableFuture.class); - completableRs2 = Mockito.mock(CompletableFuture.class); - resultSet = - new ParallelPhoenixResultSet( - new ParallelPhoenixContext(new Properties(), null, - HighAvailabilityTestingUtility - .getListOfSingleThreadExecutorServices(), - null), - completableRs1, completableRs2); - } - - @Test - public void testUnbound() throws SQLException { - ResultSet rs = resultSet.getResultSet(); - assertNull(rs); - } - - @Test - public void testNextBound() throws SQLException { - ResultSet rs = Mockito.mock(ResultSet.class); - resultSet.setResultSet(rs); + CompletableFuture completableRs1; + CompletableFuture completableRs2; + + ParallelPhoenixResultSet resultSet; + + @Before + public void init() { + completableRs1 = Mockito.mock(CompletableFuture.class); + completableRs2 = Mockito.mock(CompletableFuture.class); + resultSet = new ParallelPhoenixResultSet( + new ParallelPhoenixContext(new Properties(), null, + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null), + completableRs1, completableRs2); + } + + @Test + public void testUnbound() throws SQLException { + ResultSet rs = resultSet.getResultSet(); + assertNull(rs); + } + + @Test + public void testNextBound() throws SQLException { + ResultSet rs = Mockito.mock(ResultSet.class); + resultSet.setResultSet(rs); + resultSet.next(); + Mockito.verify(rs).next(); + Mockito.verifyNoMoreInteractions(rs); + } + + @Test + public void testRS1WinsNext() throws Exception { + + ResultSet rs1 = Mockito.mock(ResultSet.class); + ResultSet rs2 = Mockito.mock(ResultSet.class); + + Executor rsExecutor2 = Mockito.mock(Executor.class); + + CountDownLatch latch = new CountDownLatch(1); + + // inject a sleep + doAnswer((InvocationOnMock invocation) -> { + Thread thread = new Thread(() -> { + try { + // TODO: Remove this sleep + latch.await(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(); + } + ((Runnable) invocation.getArguments()[0]).run(); + return; + }); + thread.start(); + return null; + }).when(rsExecutor2).execute(any(Runnable.class)); + + completableRs1 = CompletableFuture.completedFuture(rs1); + + completableRs2 = CompletableFuture.supplyAsync(() -> rs2, rsExecutor2); + + resultSet = new ParallelPhoenixResultSet( + new ParallelPhoenixContext(new Properties(), null, + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null), + completableRs1, completableRs2); + + resultSet.next(); + + assertEquals(rs1, resultSet.getResultSet()); + } + + @Test + public void testRS2WinsNext() throws Exception { + ResultSet rs1 = Mockito.mock(ResultSet.class); + ResultSet rs2 = Mockito.mock(ResultSet.class); + + Executor rsExecutor1 = Mockito.mock(Executor.class); + CountDownLatch latch = new CountDownLatch(1); + // inject a sleep + doAnswer((InvocationOnMock invocation) -> { + Thread thread = new Thread(() -> { + try { + latch.await(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(); + } + ((Runnable) invocation.getArguments()[0]).run(); + return; + }); + thread.start(); + return null; + }).when(rsExecutor1).execute(any(Runnable.class)); + + completableRs1 = CompletableFuture.supplyAsync(() -> rs1, rsExecutor1); + completableRs2 = CompletableFuture.completedFuture(rs2); + + resultSet = new ParallelPhoenixResultSet( + new ParallelPhoenixContext(new Properties(), null, + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null), + completableRs1, completableRs2); + + resultSet.next(); + + assertEquals(rs2, resultSet.getResultSet()); + } + + @Test + public void testRS1FailsImmediatelyNext() throws Exception { + ResultSet rs2 = Mockito.mock(ResultSet.class); + Executor rsExecutor2 = Mockito.mock(Executor.class); + CountDownLatch latch = new CountDownLatch(1); + // inject a sleep + doAnswer((InvocationOnMock invocation) -> { + Thread thread = new Thread(() -> { + try { + latch.await(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(); + } + ((Runnable) invocation.getArguments()[0]).run(); + return; + }); + thread.start(); + return null; + }).when(rsExecutor2).execute(any(Runnable.class)); + + completableRs1 = new CompletableFuture<>(); + completableRs1.completeExceptionally(new RuntimeException("Failure")); + + completableRs2 = CompletableFuture.supplyAsync(() -> rs2, rsExecutor2); + + resultSet = new ParallelPhoenixResultSet( + new ParallelPhoenixContext(new Properties(), null, + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null), + completableRs1, completableRs2); + + resultSet.next(); + + assertEquals(rs2, resultSet.getResultSet()); + } + + @Test + public void testRS1SucceedsDuringNext() throws Exception { + ResultSet rs1 = Mockito.mock(ResultSet.class); + ResultSet rs2 = Mockito.mock(ResultSet.class); + + Executor rsExecutor1 = Mockito.mock(Executor.class); + Executor rsExecutor2 = Mockito.mock(Executor.class); + CountDownLatch latch0 = new CountDownLatch(1); + CountDownLatch latch1 = new CountDownLatch(1); + CountDownLatch latch2 = new CountDownLatch(1); + CountDownLatch latch3 = new CountDownLatch(1); + // inject a sleep + doAnswer((InvocationOnMock invocation) -> { + Thread thread = new Thread(() -> { + try { + latch1.await(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(); + } + ((Runnable) invocation.getArguments()[0]).run(); + return; + }); + thread.start(); + return null; + }).when(rsExecutor1).execute(any(Runnable.class)); + + // inject a sleep + doAnswer((InvocationOnMock invocation) -> { + Thread thread = new Thread(() -> { + try { + latch2.await(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(); + } + ((Runnable) invocation.getArguments()[0]).run(); + return; + }); + thread.start(); + return null; + }).when(rsExecutor2).execute(any(Runnable.class)); + + completableRs1 = CompletableFuture.supplyAsync(() -> rs1, rsExecutor1); + completableRs2 = CompletableFuture.supplyAsync(() -> rs2, rsExecutor2); + + resultSet = new ParallelPhoenixResultSet( + new ParallelPhoenixContext(new Properties(), null, + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null), + completableRs1, completableRs2); + + // run next in the background + ExecutorService testService = Executors.newSingleThreadExecutor(); + testService.execute(() -> { + try { + latch0.countDown(); resultSet.next(); - Mockito.verify(rs).next(); - Mockito.verifyNoMoreInteractions(rs); - } - - @Test - public void testRS1WinsNext() throws Exception { - - ResultSet rs1 = Mockito.mock(ResultSet.class); - ResultSet rs2 = Mockito.mock(ResultSet.class); - - Executor rsExecutor2 = Mockito.mock(Executor.class); - - CountDownLatch latch = new CountDownLatch(1); - - //inject a sleep - doAnswer( - (InvocationOnMock invocation) -> { - Thread thread = new Thread(() -> { - try { - //TODO: Remove this sleep - latch.await(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(); - } - ((Runnable) invocation.getArguments()[0]).run(); - return; - }); - thread.start(); - return null; - } - ).when(rsExecutor2).execute(any(Runnable.class)); - - completableRs1 = CompletableFuture.completedFuture(rs1); - - completableRs2 = CompletableFuture.supplyAsync(() -> rs2, rsExecutor2); - - resultSet = - new ParallelPhoenixResultSet( - new ParallelPhoenixContext(new Properties(), null, - HighAvailabilityTestingUtility - .getListOfSingleThreadExecutorServices(), - null), - completableRs1, completableRs2); + } catch (SQLException e) { + throw new RuntimeException(e); + } finally { + latch3.countDown(); + } + }); - resultSet.next(); + // Wait for next to start + latch0.await(10, TimeUnit.SECONDS); - assertEquals(rs1, resultSet.getResultSet()); - } - - @Test - public void testRS2WinsNext() throws Exception { - ResultSet rs1 = Mockito.mock(ResultSet.class); - ResultSet rs2 = Mockito.mock(ResultSet.class); - - Executor rsExecutor1 = Mockito.mock(Executor.class); - CountDownLatch latch = new CountDownLatch(1); - //inject a sleep - doAnswer( - (InvocationOnMock invocation) -> { - Thread thread = new Thread(() -> { - try { - latch.await(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(); - } - ((Runnable) invocation.getArguments()[0]).run(); - return; - }); - thread.start(); - return null; - } - ).when(rsExecutor1).execute(any(Runnable.class)); - - completableRs1 = CompletableFuture.supplyAsync(() -> rs1, rsExecutor1); - completableRs2 = CompletableFuture.completedFuture(rs2); - - resultSet = - new ParallelPhoenixResultSet( - new ParallelPhoenixContext(new Properties(), null, - HighAvailabilityTestingUtility - .getListOfSingleThreadExecutorServices(), - null), - completableRs1, completableRs2); + // Start RS1 asynch + latch1.countDown(); - resultSet.next(); + // Wait for next to finish + latch3.await(10, TimeUnit.SECONDS); - assertEquals(rs2, resultSet.getResultSet()); - } - - @Test - public void testRS1FailsImmediatelyNext() throws Exception { - ResultSet rs2 = Mockito.mock(ResultSet.class); - Executor rsExecutor2 = Mockito.mock(Executor.class); - CountDownLatch latch = new CountDownLatch(1); - //inject a sleep - doAnswer( - (InvocationOnMock invocation) -> { - Thread thread = new Thread(() -> { - try { - latch.await(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(); - } - ((Runnable) invocation.getArguments()[0]).run(); - return; - }); - thread.start(); - return null; - } - ).when(rsExecutor2).execute(any(Runnable.class)); - - completableRs1 = new CompletableFuture<>(); - completableRs1.completeExceptionally(new RuntimeException("Failure")); - - completableRs2 = CompletableFuture.supplyAsync(() -> rs2, rsExecutor2); - - resultSet = - new ParallelPhoenixResultSet( - new ParallelPhoenixContext(new Properties(), null, - HighAvailabilityTestingUtility - .getListOfSingleThreadExecutorServices(), - null), - completableRs1, completableRs2); - - resultSet.next(); + assertEquals(rs1, resultSet.getResultSet()); - assertEquals(rs2, resultSet.getResultSet()); - } - - @Test - public void testRS1SucceedsDuringNext() throws Exception { - ResultSet rs1 = Mockito.mock(ResultSet.class); - ResultSet rs2 = Mockito.mock(ResultSet.class); - - Executor rsExecutor1 = Mockito.mock(Executor.class); - Executor rsExecutor2 = Mockito.mock(Executor.class); - CountDownLatch latch0 = new CountDownLatch(1); - CountDownLatch latch1 = new CountDownLatch(1); - CountDownLatch latch2 = new CountDownLatch(1); - CountDownLatch latch3 = new CountDownLatch(1); - //inject a sleep - doAnswer( - (InvocationOnMock invocation) -> { - Thread thread = new Thread(() -> { - try { - latch1.await(10, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(); - } - ((Runnable) invocation.getArguments()[0]).run(); - return; - }); - thread.start(); - return null; - } - ).when(rsExecutor1).execute(any(Runnable.class)); - - //inject a sleep - doAnswer( - (InvocationOnMock invocation) -> { - Thread thread = new Thread(() -> { - try { - latch2.await(10, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(); - } - ((Runnable) invocation.getArguments()[0]).run(); - return; - }); - thread.start(); - return null; - } - ).when(rsExecutor2).execute(any(Runnable.class)); - - completableRs1 = CompletableFuture.supplyAsync(() -> rs1, rsExecutor1); - completableRs2 = CompletableFuture.supplyAsync(() -> rs2, rsExecutor2); - - resultSet = - new ParallelPhoenixResultSet( - new ParallelPhoenixContext(new Properties(), null, - HighAvailabilityTestingUtility - .getListOfSingleThreadExecutorServices(), - null), - completableRs1, completableRs2); - - //run next in the background - ExecutorService testService = Executors.newSingleThreadExecutor(); - testService.execute(() -> { - try { - latch0.countDown(); - resultSet.next(); - } catch (SQLException e) { - throw new RuntimeException(e); - } finally { - latch3.countDown(); - } - }); - - //Wait for next to start - latch0.await(10, TimeUnit.SECONDS); - - //Start RS1 asynch - latch1.countDown(); - - //Wait for next to finish - latch3.await(10, TimeUnit.SECONDS); - - assertEquals(rs1, resultSet.getResultSet()); - - //Cleanup - latch2.countDown(); - } -} \ No newline at end of file + // Cleanup + latch2.countDown(); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixUtilTest.java index 29d672a4e23..ac78ff1ae7b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ParallelPhoenixUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.junit.Test; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import java.sql.SQLException; import java.util.ArrayList; @@ -34,144 +33,145 @@ import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doAnswer; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; public class ParallelPhoenixUtilTest { - ParallelPhoenixUtil util = ParallelPhoenixUtil.INSTANCE; - - private static final ParallelPhoenixContext context = - new ParallelPhoenixContext(new Properties(), null, - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); - - @Test - public void getAnyOfNonExceptionallySingleFutureTest() throws Exception { - String value = "done"; - CompletableFuture future = CompletableFuture.completedFuture(value); - - List> futures = new ArrayList<>(); - futures.add(future); - String result = (String) util.getAnyOfNonExceptionally(futures, context); - assertEquals(value,result); - } - - @Test - public void getAnyOfNonExceptionallyAllFailedFutureTest() throws Exception { - CompletableFuture future = new CompletableFuture<>(); - future.completeExceptionally(new RuntimeException("Err")); - - List> futures = new ArrayList<>(); - futures.add(future); - try { - util.getAnyOfNonExceptionally(futures, context); - fail(); - } catch (SQLException e) { - } + ParallelPhoenixUtil util = ParallelPhoenixUtil.INSTANCE; + + private static final ParallelPhoenixContext context = new ParallelPhoenixContext(new Properties(), + null, HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + + @Test + public void getAnyOfNonExceptionallySingleFutureTest() throws Exception { + String value = "done"; + CompletableFuture future = CompletableFuture.completedFuture(value); + + List> futures = new ArrayList<>(); + futures.add(future); + String result = (String) util.getAnyOfNonExceptionally(futures, context); + assertEquals(value, result); + } + + @Test + public void getAnyOfNonExceptionallyAllFailedFutureTest() throws Exception { + CompletableFuture future = new CompletableFuture<>(); + future.completeExceptionally(new RuntimeException("Err")); + + List> futures = new ArrayList<>(); + futures.add(future); + try { + util.getAnyOfNonExceptionally(futures, context); + fail(); + } catch (SQLException e) { } - - @Test - public void getAnyOfNonExceptionallyMultipleFuturesTest() throws Exception { - CountDownLatch latch = new CountDownLatch(1); - Executor delayedExecutor = getLatchedMockExecutor(latch); - - CompletableFuture future1 = CompletableFuture.supplyAsync(() -> "1", delayedExecutor); - CompletableFuture future2 = CompletableFuture.supplyAsync(() -> "2", delayedExecutor); - CompletableFuture future3 = CompletableFuture.supplyAsync(() -> "3"); //No delay - CompletableFuture future4 = CompletableFuture.supplyAsync(() -> "4", delayedExecutor); - - List> futures = new ArrayList<>(); - futures.add(future1); - futures.add(future2); - futures.add(future3); - futures.add(future4); - String result = (String) util.getAnyOfNonExceptionally(futures, context); - assertEquals("3",result); + } + + @Test + public void getAnyOfNonExceptionallyMultipleFuturesTest() throws Exception { + CountDownLatch latch = new CountDownLatch(1); + Executor delayedExecutor = getLatchedMockExecutor(latch); + + CompletableFuture future1 = CompletableFuture.supplyAsync(() -> "1", delayedExecutor); + CompletableFuture future2 = CompletableFuture.supplyAsync(() -> "2", delayedExecutor); + CompletableFuture future3 = CompletableFuture.supplyAsync(() -> "3"); // No delay + CompletableFuture future4 = CompletableFuture.supplyAsync(() -> "4", delayedExecutor); + + List> futures = new ArrayList<>(); + futures.add(future1); + futures.add(future2); + futures.add(future3); + futures.add(future4); + String result = (String) util.getAnyOfNonExceptionally(futures, context); + assertEquals("3", result); + } + + @Test + public void getAnyOfNonExceptionallyTimeoutTest() throws Exception { + CompletableFuture future1 = new CompletableFuture<>(); + future1.completeExceptionally(new RuntimeException("Err")); + + CompletableFuture future2 = CompletableFuture.supplyAsync(() -> { + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + throw new CompletionException(e); + } + return "Success"; + }); + List> futures = new ArrayList<>(); + futures.add(future1); + futures.add(future2); + + Properties props = new Properties(); + props.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, "2000"); + ParallelPhoenixContext ctx = new ParallelPhoenixContext(props, null, + HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), null); + long startTime = EnvironmentEdgeManager.currentTime(); + try { + util.getAnyOfNonExceptionally(futures, ctx); + fail("Should've timedout"); + } catch (SQLException e) { + long elapsedTime = EnvironmentEdgeManager.currentTime() - startTime; + assertTrue(elapsedTime >= 2000); + assertEquals(SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode(), e.getErrorCode()); } - - @Test - public void getAnyOfNonExceptionallyTimeoutTest() throws Exception { - CompletableFuture future1 = new CompletableFuture<>(); - future1.completeExceptionally(new RuntimeException("Err")); - - CompletableFuture future2 = CompletableFuture.supplyAsync(() -> { - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - throw new CompletionException(e); - } - return "Success"; - }); - List> futures = new ArrayList<>(); - futures.add(future1); - futures.add(future2); - - Properties props = new Properties(); - props.setProperty(ParallelPhoenixUtil.PHOENIX_HA_PARALLEL_OPERATION_TIMEOUT_ATTRIB, "2000"); - ParallelPhoenixContext ctx = - new ParallelPhoenixContext(props, null, - HighAvailabilityTestingUtility.getListOfSingleThreadExecutorServices(), - null); - long startTime = EnvironmentEdgeManager.currentTime(); + } + + @Test + public void getAnyOfNonExceptionallyFailedFuturesFinishFirstTest() throws Exception { + CountDownLatch latch1 = new CountDownLatch(1); + CountDownLatch latch2 = new CountDownLatch(1); + Executor executor1 = getLatchedMockExecutor(latch1); + Executor executor2 = getLatchedMockExecutor(latch2); + + CompletableFuture future1 = CompletableFuture.supplyAsync(() -> { + throw new RuntimeException(); + }, executor1); + CompletableFuture future2 = CompletableFuture.supplyAsync(() -> { + throw new RuntimeException(); + }, executor1); + CompletableFuture future3 = CompletableFuture.supplyAsync(() -> "3", executor2); + CompletableFuture future4 = CompletableFuture.supplyAsync(() -> { + throw new RuntimeException(); + }, executor1); + + List> futures = new ArrayList<>(); + futures.add(future1); + futures.add(future2); + futures.add(future3); + futures.add(future4); + + // Make sure the exceptions are first + latch1.countDown(); + Thread.sleep(1000); + latch2.countDown(); + + String result = (String) util.getAnyOfNonExceptionally(futures, context); + assertEquals("3", result); + } + + private Executor getLatchedMockExecutor(CountDownLatch latch) { + Executor delayedExecutor = Mockito.mock(Executor.class); + + doAnswer((InvocationOnMock invocation) -> { + Thread thread = new Thread(() -> { try { - util.getAnyOfNonExceptionally(futures, ctx); - fail("Should've timedout"); - } catch (SQLException e) { - long elapsedTime = EnvironmentEdgeManager.currentTime() - startTime; - assertTrue(elapsedTime >= 2000); - assertEquals(SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode(), e.getErrorCode()); + latch.await(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(); } - } - - @Test - public void getAnyOfNonExceptionallyFailedFuturesFinishFirstTest() throws Exception { - CountDownLatch latch1 = new CountDownLatch(1); - CountDownLatch latch2 = new CountDownLatch(1); - Executor executor1 = getLatchedMockExecutor(latch1); - Executor executor2 = getLatchedMockExecutor(latch2); - - CompletableFuture future1 = CompletableFuture.supplyAsync(() -> {throw new RuntimeException();}, executor1); - CompletableFuture future2 = CompletableFuture.supplyAsync(() -> {throw new RuntimeException();}, executor1); - CompletableFuture future3 = CompletableFuture.supplyAsync(() -> "3",executor2); - CompletableFuture future4 = CompletableFuture.supplyAsync(() -> {throw new RuntimeException();}, executor1); - - List> futures = new ArrayList<>(); - futures.add(future1); - futures.add(future2); - futures.add(future3); - futures.add(future4); - - //Make sure the exceptions are first - latch1.countDown(); - Thread.sleep(1000); - latch2.countDown(); - - String result = (String) util.getAnyOfNonExceptionally(futures, context); - assertEquals("3",result); - } - - private Executor getLatchedMockExecutor(CountDownLatch latch) { - Executor delayedExecutor = Mockito.mock(Executor.class); - - doAnswer( - (InvocationOnMock invocation) -> { - Thread thread = new Thread(() -> { - try { - latch.await(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - throw new RuntimeException(); - } - ((Runnable) invocation.getArguments()[0]).run(); - return; - }); - thread.start(); - return null; - } - ).when(delayedExecutor).execute(any(Runnable.class)); - - return delayedExecutor; - } -} \ No newline at end of file + ((Runnable) invocation.getArguments()[0]).run(); + return; + }); + thread.start(); + return null; + }).when(delayedExecutor).execute(any(Runnable.class)); + + return delayedExecutor; + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixDriverTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixDriverTest.java index e7afb30ee29..dcc49cbd49c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixDriverTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixDriverTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,106 +39,109 @@ public class PhoenixDriverTest extends BaseConnectionlessQueryTest { + @Test + public void testFirstConnectionWhenPropsHasTenantId() throws Exception { + Properties props = new Properties(); + final String tenantId = "00Dxx0000001234"; + props.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - @Test - public void testFirstConnectionWhenPropsHasTenantId() throws Exception { - Properties props = new Properties(); - final String tenantId = "00Dxx0000001234"; - props.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + Connection connection = new PhoenixTestDriver().connect(getUrl(), props); + assertEquals(tenantId, connection.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB)); + } - Connection connection = new PhoenixTestDriver().connect(getUrl(), props); - assertEquals(tenantId, connection.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB)); - } + @Test + public void testFirstConnectionWhenUrlHasTenantId() throws Exception { + final String tenantId = "00Dxx0000001234"; + String url = getUrl() + ";" + PhoenixRuntime.TENANT_ID_ATTRIB + "=" + tenantId; + Driver driver = new PhoenixTestDriver(); + + driver.connect(url, new Properties()); + } - @Test - public void testFirstConnectionWhenUrlHasTenantId() throws Exception { - final String tenantId = "00Dxx0000001234"; - String url = getUrl() + ";" + PhoenixRuntime.TENANT_ID_ATTRIB + "=" + tenantId; - Driver driver = new PhoenixTestDriver(); + @Test + public void testMaxMutationSizeSetCorrectly() throws SQLException { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "100"); + connectionProperties.setProperty(QueryServices.IMMUTABLE_ROWS_ATTRIB, "100"); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - driver.connect(url, new Properties()); + PreparedStatement stmt = connection.prepareStatement( + "upsert into " + ATABLE + " (organization_id, entity_id, a_integer) values (?,?,?)"); + try { + for (int i = 0; i < 200; i++) { + stmt.setString(1, "AAAA" + i); + stmt.setString(2, "BBBB" + i); + stmt.setInt(3, 1); + stmt.execute(); + } + fail( + "Upsert should have failed since the number of upserts (200) is greater than the MAX_MUTATION_SIZE_ATTRIB (100)"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getErrorCode(), e.getErrorCode()); } + } - @Test - public void testMaxMutationSizeSetCorrectly() throws SQLException { - Properties connectionProperties = new Properties(); - connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,"100"); - connectionProperties.setProperty(QueryServices.IMMUTABLE_ROWS_ATTRIB,"100"); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + @Test + public void testMaxMutationSizeInBytesSetCorrectly() throws Exception { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB, "100"); + PhoenixConnection connection = + (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties); + assertEquals(100L, connection.getMutateBatchSizeBytes()); + assertEquals(100L, connection.getMutationState().getBatchSizeBytes()); + } - PreparedStatement stmt = connection.prepareStatement("upsert into " + ATABLE + " (organization_id, entity_id, a_integer) values (?,?,?)"); - try { - for (int i = 0; i < 200; i++) { - stmt.setString(1, "AAAA" + i); - stmt.setString(2, "BBBB" + i); - stmt.setInt(3, 1); - stmt.execute(); - } - fail("Upsert should have failed since the number of upserts (200) is greater than the MAX_MUTATION_SIZE_ATTRIB (100)"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getErrorCode(), e.getErrorCode()); - } + @Test + public void testDisallowNegativeScn() { + Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, String.valueOf(-100)); + try { + DriverManager.getConnection(getUrl(), props); + fail("Creating a phoenix connection with negative scn is not allowed"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.INVALID_SCN.getErrorCode(), e.getErrorCode()); } + } - @Test - public void testMaxMutationSizeInBytesSetCorrectly() throws Exception { - Properties connectionProperties = new Properties(); - connectionProperties.setProperty(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB,"100"); - PhoenixConnection connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties); - assertEquals(100L, connection.getMutateBatchSizeBytes()); - assertEquals(100L, connection.getMutationState().getBatchSizeBytes()); + @Ignore + @Test + public void testDisallowIsolationLevel() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + conn.setTransactionIsolation(Connection.TRANSACTION_NONE); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + try { + conn = DriverManager.getConnection(getUrl()); + conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_ISOLATION_LEVEL.getErrorCode(), + e.getErrorCode()); } - - @Test - public void testDisallowNegativeScn() { - Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, String.valueOf(-100)); - try { - DriverManager.getConnection(getUrl(), props); - fail("Creating a phoenix connection with negative scn is not allowed"); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.INVALID_SCN.getErrorCode(), e.getErrorCode()); - } + try { + conn = DriverManager.getConnection(getUrl()); + conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + fail(); + } catch (SQLFeatureNotSupportedException e) { } - - @Ignore - @Test - public void testDisallowIsolationLevel() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - conn.setTransactionIsolation(Connection.TRANSACTION_NONE); - conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED); - conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - try { - conn = DriverManager.getConnection(getUrl()); - conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_ISOLATION_LEVEL.getErrorCode(), e.getErrorCode()); - } - try { - conn = DriverManager.getConnection(getUrl()); - conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - fail(); - } catch(SQLFeatureNotSupportedException e) { - } - Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); - props.setProperty(QueryServices.TRANSACTIONS_ENABLED, Boolean.toString(true)); - conn = DriverManager.getConnection(getUrl(), props); - conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); - try { - conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); - fail(); - } catch(SQLFeatureNotSupportedException e) { - } + Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); + props.setProperty(QueryServices.TRANSACTIONS_ENABLED, Boolean.toString(true)); + conn = DriverManager.getConnection(getUrl(), props); + conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); + try { + conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + fail(); + } catch (SQLFeatureNotSupportedException e) { } + } - @Test - public void testInvalidURL() throws Exception { - Class.forName(PhoenixDriver.class.getName()); - try { + @Test + public void testInvalidURL() throws Exception { + Class.forName(PhoenixDriver.class.getName()); + try { DriverManager.getConnection("any text whatever you want to put here"); fail("Should have failed due to invalid driver"); - } catch(Exception e) { - } + } catch (Exception e) { } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java index fe439d7462a..0d936eec6b9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.jdbc; import static org.junit.Assert.assertEquals; @@ -38,530 +37,419 @@ public class PhoenixEmbeddedDriverTest { - @Test - public void testGetZKConnectionInfo() throws SQLException { - Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - String defaultQuorum = config.get(HConstants.ZOOKEEPER_QUORUM); - - for (String protocol : new String[] { "phoenix", "phoenix+zk" }) { - String[] urls = - new String[] { null, - "", - "jdbc:" + protocol + "", - "jdbc:" + protocol + ";test=true", - "jdbc:" + protocol + ":localhost", - "localhost", - "localhost;", - "jdbc:" + protocol + ":localhost:123", - "jdbc:" + protocol + ":localhost:123;foo=bar", - "localhost:123", - "jdbc:" + protocol + ":localhost:123:/hbase", - "jdbc:" + protocol + ":localhost:123:/foo-bar", - "jdbc:" + protocol + ":localhost:123:/foo-bar;foo=bas", - "localhost:123:/foo-bar", - "jdbc:" + protocol + ":localhost:/hbase", - "jdbc:" + protocol + ":localhost:/foo-bar", - "jdbc:" + protocol + ":localhost:/foo-bar;test=true", - "localhost:/foo-bar", - "jdbc:" + protocol + ":v1,v2,v3", - "jdbc:" + protocol + ":v1,v2,v3;", - "jdbc:" + protocol + ":v1,v2,v3;test=true", - "v1,v2,v3", - "jdbc:" + protocol + ":v1,v2,v3:/hbase", - "jdbc:" + protocol + ":v1,v2,v3:/hbase;test=true", - "v1,v2,v3:/foo-bar", - "jdbc:" + protocol + ":v1,v2,v3:123:/hbase", - "v1,v2,v3:123:/hbase", - "jdbc:" + protocol + ":v1,v2,v3:123:/hbase;test=false", - "jdbc:" + protocol + ":v1,v2,v3:123:/hbase:user/principal:/user.keytab;test=false", - "jdbc:" + protocol + ":v1,v2,v3:123:/foo-bar:user/principal:/user.keytab;test=false", - "jdbc:" + protocol + ":v1,v2,v3:123:user/principal:/user.keytab;test=false", - "jdbc:" + protocol + ":v1,v2,v3:user/principal:/user.keytab;test=false", - "jdbc:" + protocol + ":v1,v2,v3:/hbase:user/principal:/user.keytab;test=false", - "jdbc:" + protocol + ":v1,v2,v3:LongRunningQueries;test=false", - "jdbc:" + protocol + ":v1,v2,v3:345:LongRunningQueries;test=false", - "jdbc:" + protocol + ":localhost:1234:user:C:\\user.keytab", - "jdbc:" + protocol + ":v1,v2,v3:345:/hbase:user1:C:\\Documents and Settings\\user1\\user1.keytab;test=false", }; - String[][] partsList = - new String[][] { { defaultQuorum + ":2181", null, "/hbase" }, - { defaultQuorum + ":2181", null, "/hbase" }, - { defaultQuorum + ":2181", null, "/hbase" }, {}, - { "localhost:2181", null, "/hbase" }, - { "localhost:2181", null, "/hbase" }, - { "localhost:2181", null, "/hbase" }, - { "localhost:123", null, "/hbase" }, - { "localhost:123", null, "/hbase" }, - { "localhost:123", null, "/hbase" }, - { "localhost:123", null, "/hbase" }, - { "localhost:123", null, "/foo-bar" }, - { "localhost:123", null, "/foo-bar" }, - { "localhost:123", null, "/foo-bar" }, - { "localhost:2181", null, "/hbase" }, - { "localhost:2181", null, "/foo-bar" }, - { "localhost:2181", null, "/foo-bar" }, - { "localhost:2181", null, "/foo-bar" }, - { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, - { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, - { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, - { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, - { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, - { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, - { "v1:2181,v2:2181,v3:2181", null, "/foo-bar" }, - { "v1:123,v2:123,v3:123", null, "/hbase" }, - { "v1:123,v2:123,v3:123", null, "/hbase" }, - { "v1:123,v2:123,v3:123", null, "/hbase" }, - { "v1:123,v2:123,v3:123", null, "/hbase", "user/principal", - "/user.keytab" }, - { "v1:123,v2:123,v3:123", null, "/foo-bar", "user/principal", - "/user.keytab" }, - { "v1:123,v2:123,v3:123", null, "/hbase", "user/principal", - "/user.keytab" }, - { "v1:2181,v2:2181,v3:2181", null, "/hbase", "user/principal", - "/user.keytab" }, - { "v1:2181,v2:2181,v3:2181", null, "/hbase", "user/principal", - "/user.keytab" }, - { "v1:2181,v2:2181,v3:2181", null, "/hbase", "LongRunningQueries" }, - { "v1:345,v2:345,v3:345", null, "/hbase", "LongRunningQueries" }, - { "localhost:1234", null, "/hbase", "user", "C:\\user.keytab" }, - { "v1:345,v2:345,v3:345", null, "/hbase", "user1", - "C:\\Documents and Settings\\user1\\user1.keytab" }, }; - assertEquals(urls.length, partsList.length); - for (int i = 0; i < urls.length; i++) { - int pos = 0; - try { - ZKConnectionInfo info = - (ZKConnectionInfo) ConnectionInfo.create(urls[i], null, null); - String[] parts = partsList[i]; - if (parts.length > pos) { - assertEquals(parts[pos], info.getZkHosts()); - } - if (parts.length > ++pos) { - assertEquals(parts[pos], info.getZkPort()); - } - if (parts.length > ++pos) { - assertEquals(parts[pos], info.getZkRootNode()); - } - if (parts.length > ++pos) { - assertEquals(parts[pos], info.getPrincipal()); - } - if (parts.length > ++pos) { - assertEquals(parts[pos], info.getKeytab()); - } - } catch (AssertionError e) { - throw new AssertionError( - "For \"" + urls[i] + " at position: " + pos + "\": " + e.getMessage()); - } - } + @Test + public void testGetZKConnectionInfo() throws SQLException { + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + String defaultQuorum = config.get(HConstants.ZOOKEEPER_QUORUM); + + for (String protocol : new String[] { "phoenix", "phoenix+zk" }) { + String[] urls = new String[] { null, "", "jdbc:" + protocol + "", + "jdbc:" + protocol + ";test=true", "jdbc:" + protocol + ":localhost", "localhost", + "localhost;", "jdbc:" + protocol + ":localhost:123", + "jdbc:" + protocol + ":localhost:123;foo=bar", "localhost:123", + "jdbc:" + protocol + ":localhost:123:/hbase", + "jdbc:" + protocol + ":localhost:123:/foo-bar", + "jdbc:" + protocol + ":localhost:123:/foo-bar;foo=bas", "localhost:123:/foo-bar", + "jdbc:" + protocol + ":localhost:/hbase", "jdbc:" + protocol + ":localhost:/foo-bar", + "jdbc:" + protocol + ":localhost:/foo-bar;test=true", "localhost:/foo-bar", + "jdbc:" + protocol + ":v1,v2,v3", "jdbc:" + protocol + ":v1,v2,v3;", + "jdbc:" + protocol + ":v1,v2,v3;test=true", "v1,v2,v3", + "jdbc:" + protocol + ":v1,v2,v3:/hbase", "jdbc:" + protocol + ":v1,v2,v3:/hbase;test=true", + "v1,v2,v3:/foo-bar", "jdbc:" + protocol + ":v1,v2,v3:123:/hbase", "v1,v2,v3:123:/hbase", + "jdbc:" + protocol + ":v1,v2,v3:123:/hbase;test=false", + "jdbc:" + protocol + ":v1,v2,v3:123:/hbase:user/principal:/user.keytab;test=false", + "jdbc:" + protocol + ":v1,v2,v3:123:/foo-bar:user/principal:/user.keytab;test=false", + "jdbc:" + protocol + ":v1,v2,v3:123:user/principal:/user.keytab;test=false", + "jdbc:" + protocol + ":v1,v2,v3:user/principal:/user.keytab;test=false", + "jdbc:" + protocol + ":v1,v2,v3:/hbase:user/principal:/user.keytab;test=false", + "jdbc:" + protocol + ":v1,v2,v3:LongRunningQueries;test=false", + "jdbc:" + protocol + ":v1,v2,v3:345:LongRunningQueries;test=false", + "jdbc:" + protocol + ":localhost:1234:user:C:\\user.keytab", "jdbc:" + protocol + + ":v1,v2,v3:345:/hbase:user1:C:\\Documents and Settings\\user1\\user1.keytab;test=false", }; + String[][] partsList = new String[][] { { defaultQuorum + ":2181", null, "/hbase" }, + { defaultQuorum + ":2181", null, "/hbase" }, { defaultQuorum + ":2181", null, "/hbase" }, + {}, { "localhost:2181", null, "/hbase" }, { "localhost:2181", null, "/hbase" }, + { "localhost:2181", null, "/hbase" }, { "localhost:123", null, "/hbase" }, + { "localhost:123", null, "/hbase" }, { "localhost:123", null, "/hbase" }, + { "localhost:123", null, "/hbase" }, { "localhost:123", null, "/foo-bar" }, + { "localhost:123", null, "/foo-bar" }, { "localhost:123", null, "/foo-bar" }, + { "localhost:2181", null, "/hbase" }, { "localhost:2181", null, "/foo-bar" }, + { "localhost:2181", null, "/foo-bar" }, { "localhost:2181", null, "/foo-bar" }, + { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, + { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, + { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, + { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, + { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, + { "v1:2181,v2:2181,v3:2181", null, "/hbase" }, + { "v1:2181,v2:2181,v3:2181", null, "/foo-bar" }, { "v1:123,v2:123,v3:123", null, "/hbase" }, + { "v1:123,v2:123,v3:123", null, "/hbase" }, { "v1:123,v2:123,v3:123", null, "/hbase" }, + { "v1:123,v2:123,v3:123", null, "/hbase", "user/principal", "/user.keytab" }, + { "v1:123,v2:123,v3:123", null, "/foo-bar", "user/principal", "/user.keytab" }, + { "v1:123,v2:123,v3:123", null, "/hbase", "user/principal", "/user.keytab" }, + { "v1:2181,v2:2181,v3:2181", null, "/hbase", "user/principal", "/user.keytab" }, + { "v1:2181,v2:2181,v3:2181", null, "/hbase", "user/principal", "/user.keytab" }, + { "v1:2181,v2:2181,v3:2181", null, "/hbase", "LongRunningQueries" }, + { "v1:345,v2:345,v3:345", null, "/hbase", "LongRunningQueries" }, + { "localhost:1234", null, "/hbase", "user", "C:\\user.keytab" }, { "v1:345,v2:345,v3:345", + null, "/hbase", "user1", "C:\\Documents and Settings\\user1\\user1.keytab" }, }; + assertEquals(urls.length, partsList.length); + for (int i = 0; i < urls.length; i++) { + int pos = 0; + try { + ZKConnectionInfo info = (ZKConnectionInfo) ConnectionInfo.create(urls[i], null, null); + String[] parts = partsList[i]; + if (parts.length > pos) { + assertEquals(parts[pos], info.getZkHosts()); + } + if (parts.length > ++pos) { + assertEquals(parts[pos], info.getZkPort()); + } + if (parts.length > ++pos) { + assertEquals(parts[pos], info.getZkRootNode()); + } + if (parts.length > ++pos) { + assertEquals(parts[pos], info.getPrincipal()); + } + if (parts.length > ++pos) { + assertEquals(parts[pos], info.getKeytab()); + } + } catch (AssertionError e) { + throw new AssertionError( + "For \"" + urls[i] + " at position: " + pos + "\": " + e.getMessage()); } - + } } - @Test - public void testGetMasterConnectionInfo() throws SQLException { - assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.3.0")>=0); - Configuration config = - HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); - String defaultMasters = "defaultmaster1:1243,defaultmaster2:2345"; - config.set("hbase.masters", defaultMasters); - - String[] urls = new String[] { - null, - "", - "jdbc:phoenix+master", - "jdbc:phoenix+master;test=true", - "jdbc:phoenix", - "jdbc:phoenix+master:localhost", - "localhost", - "localhost;", - "localhost:123", - "localhost,localhost2:123;", - "localhost\\:123", - "localhost\\:123:", - "localhost\\:123::", - "localhost\\:123:::", - "localhost\\:123::::", - "localhost\\:123:::::", - "localhost\\:123:345::::", - "localhost,localhost2\\:123;", - "localhost,localhost2\\:123:456", - "localhost,localhost2\\:123:456;test=false", - "localhost\\:123:::user/principal:/user.keytab", - "localhost\\:123:::LongRunningQueries", - "localhost\\:123:::LongRunningQueries:", - "localhost\\:123:::LongRunningQueries::", - "localhost\\:123:::user/principal:C:\\user.keytab", - "localhost\\:123:::user/principal:C:\\Documents and Settings\\user1\\user1.keytab", - }; - String[][] partsList = new String[][] { - {defaultMasters}, - {defaultMasters}, - {defaultMasters}, - {defaultMasters}, - {defaultMasters}, - {"localhost:"+HConstants.DEFAULT_MASTER_PORT}, - {"localhost:"+HConstants.DEFAULT_MASTER_PORT}, - {"localhost:"+HConstants.DEFAULT_MASTER_PORT}, - {"localhost:123"}, - {"localhost2:123,localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost2:123,localhost:16000"}, - {"localhost2:123,localhost:456"}, - {"localhost2:123,localhost:456"}, - {"localhost:123","user/principal","/user.keytab"}, - {"localhost:123","LongRunningQueries",null}, - {"localhost:123","LongRunningQueries",null}, - {"localhost:123","LongRunningQueries",null}, - {"localhost:123","user/principal","C:\\user.keytab"}, - {"localhost:123","user/principal","C:\\Documents and Settings\\user1\\user1.keytab"}, - }; - assertEquals(urls.length,partsList.length); - for (int i = 0; i < urls.length; i++) { - try { - Configuration testConfig = new Configuration(config); - MasterConnectionInfo info = (MasterConnectionInfo)ConnectionInfo.create(urls[i], testConfig, null, null); - String[] parts = partsList[i]; - assertEquals(parts[0], info.getBoostrapServers()); - if(parts.length>1) { - assertEquals(parts[1], info.getPrincipal()); - } else { - assertNull(info.getPrincipal()); - } - if(parts.length>2) { - assertEquals(parts[2], info.getKeytab()); - } else { - assertNull(info.getKeytab()); - } - } catch (AssertionError e) { - throw new AssertionError("For \"" + urls[i] + ": " + e.getMessage()); - } + } + + @Test + public void testGetMasterConnectionInfo() throws SQLException { + assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.3.0") >= 0); + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); + String defaultMasters = "defaultmaster1:1243,defaultmaster2:2345"; + config.set("hbase.masters", defaultMasters); + + String[] urls = new String[] { null, "", "jdbc:phoenix+master", "jdbc:phoenix+master;test=true", + "jdbc:phoenix", "jdbc:phoenix+master:localhost", "localhost", "localhost;", "localhost:123", + "localhost,localhost2:123;", "localhost\\:123", "localhost\\:123:", "localhost\\:123::", + "localhost\\:123:::", "localhost\\:123::::", "localhost\\:123:::::", + "localhost\\:123:345::::", "localhost,localhost2\\:123;", "localhost,localhost2\\:123:456", + "localhost,localhost2\\:123:456;test=false", "localhost\\:123:::user/principal:/user.keytab", + "localhost\\:123:::LongRunningQueries", "localhost\\:123:::LongRunningQueries:", + "localhost\\:123:::LongRunningQueries::", "localhost\\:123:::user/principal:C:\\user.keytab", + "localhost\\:123:::user/principal:C:\\Documents and Settings\\user1\\user1.keytab", }; + String[][] partsList = new String[][] { { defaultMasters }, { defaultMasters }, + { defaultMasters }, { defaultMasters }, { defaultMasters }, + { "localhost:" + HConstants.DEFAULT_MASTER_PORT }, + { "localhost:" + HConstants.DEFAULT_MASTER_PORT }, + { "localhost:" + HConstants.DEFAULT_MASTER_PORT }, { "localhost:123" }, + { "localhost2:123,localhost:123" }, { "localhost:123" }, { "localhost:123" }, + { "localhost:123" }, { "localhost:123" }, { "localhost:123" }, { "localhost:123" }, + { "localhost:123" }, { "localhost2:123,localhost:16000" }, { "localhost2:123,localhost:456" }, + { "localhost2:123,localhost:456" }, { "localhost:123", "user/principal", "/user.keytab" }, + { "localhost:123", "LongRunningQueries", null }, + { "localhost:123", "LongRunningQueries", null }, + { "localhost:123", "LongRunningQueries", null }, + { "localhost:123", "user/principal", "C:\\user.keytab" }, + { "localhost:123", "user/principal", "C:\\Documents and Settings\\user1\\user1.keytab" }, }; + assertEquals(urls.length, partsList.length); + for (int i = 0; i < urls.length; i++) { + try { + Configuration testConfig = new Configuration(config); + MasterConnectionInfo info = + (MasterConnectionInfo) ConnectionInfo.create(urls[i], testConfig, null, null); + String[] parts = partsList[i]; + assertEquals(parts[0], info.getBoostrapServers()); + if (parts.length > 1) { + assertEquals(parts[1], info.getPrincipal()); + } else { + assertNull(info.getPrincipal()); } - } - - @Test - public void testGetRPCConnectionInfo() throws SQLException { - assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.5.0")>=0); - Configuration config = - HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); - String defaultBoostraps = "defaultmaster1:1243,defaultmaster2:2345"; - config.set("hbase.client.bootstrap.servers", defaultBoostraps); - - String[] urls = new String[] { - null, - "", - "jdbc:phoenix+rpc", - "jdbc:phoenix+rpc\";test=true", - "jdbc:phoenix", - "jdbc:phoenix+rpc\":localhost", - "localhost", - "localhost;", - "localhost:123", - "localhost,localhost2:123;", - "localhost\\:123", - "localhost\\:123:", - "localhost\\:123::", - "localhost\\:123:::", - "localhost\\:123::::", - "localhost\\:123:::::", - "localhost\\:123:345::::", - "localhost,localhost2\\:123;", - "localhost,localhost2\\:123:456", - "localhost,localhost2\\:123:456;test=false", - "localhost\\:123:::user/principal:/user.keytab", - "localhost\\:123:::LongRunningQueries", - "localhost\\:123:::LongRunningQueries:", - "localhost\\:123:::LongRunningQueries::", - "localhost\\:123:::user/principal:C:\\user.keytab", - "localhost\\:123:::user/principal:C:\\Documents and Settings\\user1\\user1.keytab", - }; - String[][] partsList = new String[][] { - {defaultBoostraps}, - {defaultBoostraps}, - {defaultBoostraps}, - {defaultBoostraps}, - {defaultBoostraps}, - {"localhost"}, - {"localhost"}, - {"localhost"}, - {"localhost:123"}, - {"localhost2:123,localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - {"localhost:123"}, - //No default port - {"localhost,localhost2:123"}, - {"localhost2:123,localhost:456"}, - {"localhost2:123,localhost:456"}, - {"localhost:123","user/principal","/user.keytab"}, - {"localhost:123","LongRunningQueries",null}, - {"localhost:123","LongRunningQueries",null}, - {"localhost:123","LongRunningQueries",null}, - {"localhost:123","user/principal","C:\\user.keytab"}, - {"localhost:123","user/principal","C:\\Documents and Settings\\user1\\user1.keytab"}, - }; - assertEquals(urls.length,partsList.length); - for (int i = 0; i < urls.length; i++) { - try { - Configuration testConfig = new Configuration(config); - RPCConnectionInfo info = (RPCConnectionInfo)ConnectionInfo.create(urls[i], testConfig, null, null); - String[] parts = partsList[i]; - assertEquals(parts[0], info.getBoostrapServers()); - if(parts.length>1) { - assertEquals(parts[1], info.getPrincipal()); - } else { - assertNull(info.getPrincipal()); - } - if(parts.length>2) { - assertEquals(parts[2], info.getKeytab()); - } else { - assertNull(info.getKeytab()); - } - } catch (AssertionError e) { - throw new AssertionError("For \"" + urls[i] + ": " + e.getMessage()); - } + if (parts.length > 2) { + assertEquals(parts[2], info.getKeytab()); + } else { + assertNull(info.getKeytab()); } + } catch (AssertionError e) { + throw new AssertionError("For \"" + urls[i] + ": " + e.getMessage()); + } } - - @Test - public void testNegativeGetConnectionInfo() throws SQLException { - String[] urls = new String[] { - //Reject unescaped ports in quorum string - "jdbc:phoenix:v1:1,v2:2,v3:3", - "jdbc:phoenix:v1:1,v2:2,v3:3;test=true", - "jdbc:phoenix:v1,v2,v3:-1:/hbase;test=true", - "jdbc:phoenix:v1,v2,v3:-1", - "jdbc:phoenix+zk:v1:1,v2:2,v3:3", - "jdbc:phoenix+zk:v1:1,v2:2,v3:3;test=true", - "jdbc:phoenix+zk:v1,v2,v3:-1:/hbase;test=true", - "jdbc:phoenix+zk:v1,v2,v3:-1" - }; - for (String url : urls) { - try { - ConnectionInfo.create(url, null, null); - throw new AssertionError("Expected exception for \"" + url + "\""); - } catch (SQLException e) { - try { - assertEquals(SQLExceptionCode.MALFORMED_CONNECTION_URL.getSQLState(), e.getSQLState()); - } catch (AssertionError ae) { - throw new AssertionError("For \"" + url + "\": " + ae.getMessage()); - } - } + } + + @Test + public void testGetRPCConnectionInfo() throws SQLException { + assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.5.0") >= 0); + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", + "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); + String defaultBoostraps = "defaultmaster1:1243,defaultmaster2:2345"; + config.set("hbase.client.bootstrap.servers", defaultBoostraps); + + String[] urls = new String[] { null, "", "jdbc:phoenix+rpc", "jdbc:phoenix+rpc\";test=true", + "jdbc:phoenix", "jdbc:phoenix+rpc\":localhost", "localhost", "localhost;", "localhost:123", + "localhost,localhost2:123;", "localhost\\:123", "localhost\\:123:", "localhost\\:123::", + "localhost\\:123:::", "localhost\\:123::::", "localhost\\:123:::::", + "localhost\\:123:345::::", "localhost,localhost2\\:123;", "localhost,localhost2\\:123:456", + "localhost,localhost2\\:123:456;test=false", "localhost\\:123:::user/principal:/user.keytab", + "localhost\\:123:::LongRunningQueries", "localhost\\:123:::LongRunningQueries:", + "localhost\\:123:::LongRunningQueries::", "localhost\\:123:::user/principal:C:\\user.keytab", + "localhost\\:123:::user/principal:C:\\Documents and Settings\\user1\\user1.keytab", }; + String[][] partsList = new String[][] { { defaultBoostraps }, { defaultBoostraps }, + { defaultBoostraps }, { defaultBoostraps }, { defaultBoostraps }, { "localhost" }, + { "localhost" }, { "localhost" }, { "localhost:123" }, { "localhost2:123,localhost:123" }, + { "localhost:123" }, { "localhost:123" }, { "localhost:123" }, { "localhost:123" }, + { "localhost:123" }, { "localhost:123" }, { "localhost:123" }, + // No default port + { "localhost,localhost2:123" }, { "localhost2:123,localhost:456" }, + { "localhost2:123,localhost:456" }, { "localhost:123", "user/principal", "/user.keytab" }, + { "localhost:123", "LongRunningQueries", null }, + { "localhost:123", "LongRunningQueries", null }, + { "localhost:123", "LongRunningQueries", null }, + { "localhost:123", "user/principal", "C:\\user.keytab" }, + { "localhost:123", "user/principal", "C:\\Documents and Settings\\user1\\user1.keytab" }, }; + assertEquals(urls.length, partsList.length); + for (int i = 0; i < urls.length; i++) { + try { + Configuration testConfig = new Configuration(config); + RPCConnectionInfo info = + (RPCConnectionInfo) ConnectionInfo.create(urls[i], testConfig, null, null); + String[] parts = partsList[i]; + assertEquals(parts[0], info.getBoostrapServers()); + if (parts.length > 1) { + assertEquals(parts[1], info.getPrincipal()); + } else { + assertNull(info.getPrincipal()); } - } - - @Test - public void testRPCNegativeGetConnectionInfo() throws SQLException { - assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.5.0")>=0); - String[] urls = new String[] { - //Reject unescaped and invalid ports in quorum string - "jdbc:phoenix+rpc:v1:1,v2:2,v3:3", - "jdbc:phoenix+rpc:v1:1,v2:2,v3:3;test=true", - "jdbc:phoenix+rpc:v1,v2,v3:-1:/hbase;test=true", - "jdbc:phoenix+rpc:v1,v2,v3:-1", - "jdbc:phoenix+master:v1:1,v2:2,v3:3", - "jdbc:phoenix+master:v1:1,v2:2,v3:3;test=true", - "jdbc:phoenix+master:v1,v2,v3:-1:/hbase;test=true", - "jdbc:phoenix+master:v1,v2,v3:-1", - //Reject rootnode and missing empty rootnode field - "jdbc:phoenix+rpc:localhost,localhost2\\:123:456:rootNode", - "jdbc:phoenix+rpc:localhost,localhost2\\:123:456:rootNode:prinicpial:keystore", - "jdbc:phoenix+rpc:localhost,localhost2\\:123:456:prinicpial", - "jdbc:phoenix+rpc:localhost,localhost2\\:123:456:prinicpial:keystore", - "jdbc:phoenix+master:localhost,localhost2\\:123:456:rootNode", - "jdbc:phoenix+master:localhost,localhost2\\:123:456:rootNode:prinicpial:keystore", - "jdbc:phoenix+master:localhost,localhost2\\:123:456:prinicpial", - "jdbc:phoenix+master:localhost,localhost2\\:123:456:prinicpial:keystore", - - }; - for (String url : urls) { - try { - ConnectionInfo.create(url, null, null); - throw new AssertionError("Expected exception for \"" + url + "\""); - } catch (SQLException e) { - try { - assertEquals(SQLExceptionCode.MALFORMED_CONNECTION_URL.getSQLState(), e.getSQLState()); - } catch (AssertionError ae) { - throw new AssertionError("For \"" + url + "\": " + ae.getMessage()); - } - } + if (parts.length > 2) { + assertEquals(parts[2], info.getKeytab()); + } else { + assertNull(info.getKeytab()); } + } catch (AssertionError e) { + throw new AssertionError("For \"" + urls[i] + ": " + e.getMessage()); + } } - - @Test - public void testMasterDefaults() throws SQLException { - assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.3.0") >= 0); + } + + @Test + public void testNegativeGetConnectionInfo() throws SQLException { + String[] urls = new String[] { + // Reject unescaped ports in quorum string + "jdbc:phoenix:v1:1,v2:2,v3:3", "jdbc:phoenix:v1:1,v2:2,v3:3;test=true", + "jdbc:phoenix:v1,v2,v3:-1:/hbase;test=true", "jdbc:phoenix:v1,v2,v3:-1", + "jdbc:phoenix+zk:v1:1,v2:2,v3:3", "jdbc:phoenix+zk:v1:1,v2:2,v3:3;test=true", + "jdbc:phoenix+zk:v1,v2,v3:-1:/hbase;test=true", "jdbc:phoenix+zk:v1,v2,v3:-1" }; + for (String url : urls) { + try { + ConnectionInfo.create(url, null, null); + throw new AssertionError("Expected exception for \"" + url + "\""); + } catch (SQLException e) { try { - Configuration config = - HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", - "org.apache.hadoop.hbase.client.MasterRegistry"); - ConnectionInfo.create("jdbc:phoenix+master", config, null, null); - fail("Should have thrown exception"); - } catch (SQLException e) { + assertEquals(SQLExceptionCode.MALFORMED_CONNECTION_URL.getSQLState(), e.getSQLState()); + } catch (AssertionError ae) { + throw new AssertionError("For \"" + url + "\": " + ae.getMessage()); } - - Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); - config.set("hbase.master.hostname", "master.hostname"); - MasterConnectionInfo info = - (MasterConnectionInfo) ConnectionInfo.create("jdbc:phoenix+master", config, null, - null); - assertEquals(info.getBoostrapServers(), "master.hostname:16000"); - - config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); - config.set("hbase.master.hostname", "master.hostname"); - config.set("hbase.master.port", "17000"); - info = (MasterConnectionInfo) ConnectionInfo.create("jdbc:phoenix", config, null, null); - assertEquals(info.getBoostrapServers(), "master.hostname:17000"); - - config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); - config.set("hbase.master.hostname", "master.hostname"); - config.set("hbase.master.port", "17000"); - config.set("hbase.masters", "master1:123,master2:234,master3:345"); - info = (MasterConnectionInfo) ConnectionInfo.create("jdbc:phoenix", config, null, null); - assertEquals(info.getBoostrapServers(), "master1:123,master2:234,master3:345"); - - config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); - config.set("hbase.master.port", "17000"); - info = - (MasterConnectionInfo) ConnectionInfo.create( - "jdbc:phoenix+master:master1.from.url,master2.from.url", config, null, null); - assertEquals(info.getBoostrapServers(), "master1.from.url:17000,master2.from.url:17000"); - - config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); - config.set("hbase.master.port", "17000"); - info = - (MasterConnectionInfo) ConnectionInfo.create( - "jdbc:phoenix+master:master1.from.url\\:123,master2.from.url", config, null, - null); - assertEquals(info.getBoostrapServers(), "master1.from.url:123,master2.from.url:17000"); - - config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); - config.set("hbase.master.hostname", "master.hostname"); - config.set("hbase.master.port", "17000"); - config.set("hbase.masters", "master1:123,master2:234,master3:345"); - info = - (MasterConnectionInfo) ConnectionInfo.create( - "jdbc:phoenix:master1.from.url\\:123,master2.from.url:18000", config, null, - null); - assertEquals(info.getBoostrapServers(), "master1.from.url:123,master2.from.url:18000"); - - config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); - config.set("hbase.master.hostname", "master.hostname"); - config.set("hbase.master.port", "17000"); - config.set("hbase.masters", "master1:123,master2:234,master3:345"); - info = - (MasterConnectionInfo) ConnectionInfo.create( - "jdbc:phoenix:master1.from.url\\:123,master2.from.url\\:234:18000", config, - null, null); - assertEquals(info.getBoostrapServers(), "master1.from.url:123,master2.from.url:234"); + } } - - @Test - public void testRPCDefaults() throws SQLException { - assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.5.0") >= 0); + } + + @Test + public void testRPCNegativeGetConnectionInfo() throws SQLException { + assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.5.0") >= 0); + String[] urls = new String[] { + // Reject unescaped and invalid ports in quorum string + "jdbc:phoenix+rpc:v1:1,v2:2,v3:3", "jdbc:phoenix+rpc:v1:1,v2:2,v3:3;test=true", + "jdbc:phoenix+rpc:v1,v2,v3:-1:/hbase;test=true", "jdbc:phoenix+rpc:v1,v2,v3:-1", + "jdbc:phoenix+master:v1:1,v2:2,v3:3", "jdbc:phoenix+master:v1:1,v2:2,v3:3;test=true", + "jdbc:phoenix+master:v1,v2,v3:-1:/hbase;test=true", "jdbc:phoenix+master:v1,v2,v3:-1", + // Reject rootnode and missing empty rootnode field + "jdbc:phoenix+rpc:localhost,localhost2\\:123:456:rootNode", + "jdbc:phoenix+rpc:localhost,localhost2\\:123:456:rootNode:prinicpial:keystore", + "jdbc:phoenix+rpc:localhost,localhost2\\:123:456:prinicpial", + "jdbc:phoenix+rpc:localhost,localhost2\\:123:456:prinicpial:keystore", + "jdbc:phoenix+master:localhost,localhost2\\:123:456:rootNode", + "jdbc:phoenix+master:localhost,localhost2\\:123:456:rootNode:prinicpial:keystore", + "jdbc:phoenix+master:localhost,localhost2\\:123:456:prinicpial", + "jdbc:phoenix+master:localhost,localhost2\\:123:456:prinicpial:keystore", + + }; + for (String url : urls) { + try { + ConnectionInfo.create(url, null, null); + throw new AssertionError("Expected exception for \"" + url + "\""); + } catch (SQLException e) { try { - Configuration config = - HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", - "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); - ConnectionInfo.create("jdbc:phoenix+rpc", config, null, null); - fail("Should have thrown exception"); - } catch (SQLException e) { + assertEquals(SQLExceptionCode.MALFORMED_CONNECTION_URL.getSQLState(), e.getSQLState()); + } catch (AssertionError ae) { + throw new AssertionError("For \"" + url + "\": " + ae.getMessage()); } - - Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", - "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); - config.set("hbase.client.bootstrap.servers", "bootstrap1\\:123,boostrap2\\:234"); - RPCConnectionInfo info = - (RPCConnectionInfo) ConnectionInfo.create("jdbc:phoenix+rpc", config, null, null); - assertEquals(info.getBoostrapServers(), "bootstrap1\\:123,boostrap2\\:234"); - - config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", - "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); - info = - (RPCConnectionInfo) ConnectionInfo.create( - "jdbc:phoenix+rpc:bootstrap1.from.url,bootstrap2.from.url", config, null, null); - // TODO looks like HBase doesn't do port replacement/check for RPC servers either ? - assertEquals(info.getBoostrapServers(), "bootstrap1.from.url,bootstrap2.from.url"); - - config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", - "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); - info = - (RPCConnectionInfo) ConnectionInfo.create( - "jdbc:phoenix+rpc:bootstrap1.from.url\\:123,bootstrap2.from.url\\::234", config, - null, null); - // TODO looks like HBase doesn't do port replacement/check for RPC servers either ? - assertEquals(info.getBoostrapServers(), "bootstrap1.from.url:123,bootstrap2.from.url:234"); - - // Check fallback to master properties - config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.set("hbase.client.registry.impl", - "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); - config.set("hbase.masters", "master1:123,master2:234,master3:345"); - info = (RPCConnectionInfo) ConnectionInfo.create("jdbc:phoenix+rpc", config, null, null); - // TODO looks like HBase doesn't do port replacement/check for RPC servers either ? - assertEquals(info.getBoostrapServers(), "master1:123,master2:234,master3:345"); + } } - - @Test - public void testNotAccept() throws Exception { - Driver driver = new PhoenixDriver(); - assertFalse(driver.acceptsURL("jdbc:phoenix://localhost")); - assertFalse(driver.acceptsURL("jdbc:phoenix:localhost;test=true;bar=foo")); - assertFalse(driver.acceptsURL("jdbc:phoenix:localhost;test=true")); - assertTrue(driver.acceptsURL("jdbc:phoenix:localhost:123")); - assertTrue(driver.acceptsURL("jdbc:phoenix:localhost:123;untest=true")); - assertTrue(driver.acceptsURL("jdbc:phoenix:localhost:123;untest=true;foo=bar")); - DriverManager.deregisterDriver(driver); + } + + @Test + public void testMasterDefaults() throws SQLException { + assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.3.0") >= 0); + try { + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); + ConnectionInfo.create("jdbc:phoenix+master", config, null, null); + fail("Should have thrown exception"); + } catch (SQLException e) { } - @Test - public void testPrincipalsMatching() throws Exception { - assertTrue(ConnectionInfo.isSameName("user@EXAMPLE.COM", "user@EXAMPLE.COM")); - assertTrue(ConnectionInfo.isSameName("user/localhost@EXAMPLE.COM", "user/localhost@EXAMPLE.COM")); - // the user provided name might have a _HOST in it, which should be replaced by the hostname - assertTrue(ConnectionInfo.isSameName("user/localhost@EXAMPLE.COM", "user/_HOST@EXAMPLE.COM", "localhost")); - assertFalse(ConnectionInfo.isSameName("user/foobar@EXAMPLE.COM", "user/_HOST@EXAMPLE.COM", "localhost")); - assertFalse(ConnectionInfo.isSameName("user@EXAMPLE.COM", "user/_HOST@EXAMPLE.COM", "localhost")); - assertFalse(ConnectionInfo.isSameName("user@FOO", "user@BAR")); - - // NB: We _should_ be able to provide our or krb5.conf for this test to use, but this doesn't - // seem to want to play nicely with the rest of the tests. Instead, we can just provide a default realm - // by hand. - - // For an implied default realm, we should also match that. Users might provide a shortname - // whereas UGI would provide the "full" name. - assertTrue(ConnectionInfo.isSameName("user@APACHE.ORG", "user", null, "APACHE.ORG")); - assertTrue(ConnectionInfo.isSameName("user/localhost@APACHE.ORG", "user/localhost", null, "APACHE.ORG")); - assertFalse(ConnectionInfo.isSameName("user@APACHE.NET", "user", null, "APACHE.ORG")); - assertFalse(ConnectionInfo.isSameName("user/localhost@APACHE.NET", "user/localhost", null, "APACHE.ORG")); - assertTrue(ConnectionInfo.isSameName("user@APACHE.ORG", "user@APACHE.ORG", null, "APACHE.ORG")); - assertTrue(ConnectionInfo.isSameName("user/localhost@APACHE.ORG", "user/localhost@APACHE.ORG", null, "APACHE.ORG")); - - assertTrue(ConnectionInfo.isSameName("user/localhost@APACHE.ORG", "user/_HOST", "localhost", "APACHE.ORG")); - assertTrue(ConnectionInfo.isSameName("user/foobar@APACHE.ORG", "user/_HOST", "foobar", "APACHE.ORG")); - assertFalse(ConnectionInfo.isSameName("user/localhost@APACHE.NET", "user/_HOST", "localhost", "APACHE.ORG")); - assertFalse(ConnectionInfo.isSameName("user/foobar@APACHE.NET", "user/_HOST", "foobar", "APACHE.ORG")); + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); + config.set("hbase.master.hostname", "master.hostname"); + MasterConnectionInfo info = + (MasterConnectionInfo) ConnectionInfo.create("jdbc:phoenix+master", config, null, null); + assertEquals(info.getBoostrapServers(), "master.hostname:16000"); + + config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); + config.set("hbase.master.hostname", "master.hostname"); + config.set("hbase.master.port", "17000"); + info = (MasterConnectionInfo) ConnectionInfo.create("jdbc:phoenix", config, null, null); + assertEquals(info.getBoostrapServers(), "master.hostname:17000"); + + config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); + config.set("hbase.master.hostname", "master.hostname"); + config.set("hbase.master.port", "17000"); + config.set("hbase.masters", "master1:123,master2:234,master3:345"); + info = (MasterConnectionInfo) ConnectionInfo.create("jdbc:phoenix", config, null, null); + assertEquals(info.getBoostrapServers(), "master1:123,master2:234,master3:345"); + + config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); + config.set("hbase.master.port", "17000"); + info = (MasterConnectionInfo) ConnectionInfo + .create("jdbc:phoenix+master:master1.from.url,master2.from.url", config, null, null); + assertEquals(info.getBoostrapServers(), "master1.from.url:17000,master2.from.url:17000"); + + config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); + config.set("hbase.master.port", "17000"); + info = (MasterConnectionInfo) ConnectionInfo + .create("jdbc:phoenix+master:master1.from.url\\:123,master2.from.url", config, null, null); + assertEquals(info.getBoostrapServers(), "master1.from.url:123,master2.from.url:17000"); + + config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); + config.set("hbase.master.hostname", "master.hostname"); + config.set("hbase.master.port", "17000"); + config.set("hbase.masters", "master1:123,master2:234,master3:345"); + info = (MasterConnectionInfo) ConnectionInfo + .create("jdbc:phoenix:master1.from.url\\:123,master2.from.url:18000", config, null, null); + assertEquals(info.getBoostrapServers(), "master1.from.url:123,master2.from.url:18000"); + + config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", "org.apache.hadoop.hbase.client.MasterRegistry"); + config.set("hbase.master.hostname", "master.hostname"); + config.set("hbase.master.port", "17000"); + config.set("hbase.masters", "master1:123,master2:234,master3:345"); + info = (MasterConnectionInfo) ConnectionInfo.create( + "jdbc:phoenix:master1.from.url\\:123,master2.from.url\\:234:18000", config, null, null); + assertEquals(info.getBoostrapServers(), "master1.from.url:123,master2.from.url:234"); + } + + @Test + public void testRPCDefaults() throws SQLException { + assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.5.0") >= 0); + try { + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", + "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); + ConnectionInfo.create("jdbc:phoenix+rpc", config, null, null); + fail("Should have thrown exception"); + } catch (SQLException e) { } + + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", + "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); + config.set("hbase.client.bootstrap.servers", "bootstrap1\\:123,boostrap2\\:234"); + RPCConnectionInfo info = + (RPCConnectionInfo) ConnectionInfo.create("jdbc:phoenix+rpc", config, null, null); + assertEquals(info.getBoostrapServers(), "bootstrap1\\:123,boostrap2\\:234"); + + config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", + "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); + info = (RPCConnectionInfo) ConnectionInfo + .create("jdbc:phoenix+rpc:bootstrap1.from.url,bootstrap2.from.url", config, null, null); + // TODO looks like HBase doesn't do port replacement/check for RPC servers either ? + assertEquals(info.getBoostrapServers(), "bootstrap1.from.url,bootstrap2.from.url"); + + config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", + "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); + info = (RPCConnectionInfo) ConnectionInfo.create( + "jdbc:phoenix+rpc:bootstrap1.from.url\\:123,bootstrap2.from.url\\::234", config, null, null); + // TODO looks like HBase doesn't do port replacement/check for RPC servers either ? + assertEquals(info.getBoostrapServers(), "bootstrap1.from.url:123,bootstrap2.from.url:234"); + + // Check fallback to master properties + config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.set("hbase.client.registry.impl", + "org.apache.hadoop.hbase.client.RpcConnectionRegistry"); + config.set("hbase.masters", "master1:123,master2:234,master3:345"); + info = (RPCConnectionInfo) ConnectionInfo.create("jdbc:phoenix+rpc", config, null, null); + // TODO looks like HBase doesn't do port replacement/check for RPC servers either ? + assertEquals(info.getBoostrapServers(), "master1:123,master2:234,master3:345"); + } + + @Test + public void testNotAccept() throws Exception { + Driver driver = new PhoenixDriver(); + assertFalse(driver.acceptsURL("jdbc:phoenix://localhost")); + assertFalse(driver.acceptsURL("jdbc:phoenix:localhost;test=true;bar=foo")); + assertFalse(driver.acceptsURL("jdbc:phoenix:localhost;test=true")); + assertTrue(driver.acceptsURL("jdbc:phoenix:localhost:123")); + assertTrue(driver.acceptsURL("jdbc:phoenix:localhost:123;untest=true")); + assertTrue(driver.acceptsURL("jdbc:phoenix:localhost:123;untest=true;foo=bar")); + DriverManager.deregisterDriver(driver); + } + + @Test + public void testPrincipalsMatching() throws Exception { + assertTrue(ConnectionInfo.isSameName("user@EXAMPLE.COM", "user@EXAMPLE.COM")); + assertTrue( + ConnectionInfo.isSameName("user/localhost@EXAMPLE.COM", "user/localhost@EXAMPLE.COM")); + // the user provided name might have a _HOST in it, which should be replaced by the hostname + assertTrue(ConnectionInfo.isSameName("user/localhost@EXAMPLE.COM", "user/_HOST@EXAMPLE.COM", + "localhost")); + assertFalse( + ConnectionInfo.isSameName("user/foobar@EXAMPLE.COM", "user/_HOST@EXAMPLE.COM", "localhost")); + assertFalse( + ConnectionInfo.isSameName("user@EXAMPLE.COM", "user/_HOST@EXAMPLE.COM", "localhost")); + assertFalse(ConnectionInfo.isSameName("user@FOO", "user@BAR")); + + // NB: We _should_ be able to provide our or krb5.conf for this test to use, but this doesn't + // seem to want to play nicely with the rest of the tests. Instead, we can just provide a + // default realm + // by hand. + + // For an implied default realm, we should also match that. Users might provide a shortname + // whereas UGI would provide the "full" name. + assertTrue(ConnectionInfo.isSameName("user@APACHE.ORG", "user", null, "APACHE.ORG")); + assertTrue( + ConnectionInfo.isSameName("user/localhost@APACHE.ORG", "user/localhost", null, "APACHE.ORG")); + assertFalse(ConnectionInfo.isSameName("user@APACHE.NET", "user", null, "APACHE.ORG")); + assertFalse( + ConnectionInfo.isSameName("user/localhost@APACHE.NET", "user/localhost", null, "APACHE.ORG")); + assertTrue(ConnectionInfo.isSameName("user@APACHE.ORG", "user@APACHE.ORG", null, "APACHE.ORG")); + assertTrue(ConnectionInfo.isSameName("user/localhost@APACHE.ORG", "user/localhost@APACHE.ORG", + null, "APACHE.ORG")); + + assertTrue(ConnectionInfo.isSameName("user/localhost@APACHE.ORG", "user/_HOST", "localhost", + "APACHE.ORG")); + assertTrue( + ConnectionInfo.isSameName("user/foobar@APACHE.ORG", "user/_HOST", "foobar", "APACHE.ORG")); + assertFalse(ConnectionInfo.isSameName("user/localhost@APACHE.NET", "user/_HOST", "localhost", + "APACHE.ORG")); + assertFalse( + ConnectionInfo.isSameName("user/foobar@APACHE.NET", "user/_HOST", "foobar", "APACHE.ORG")); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixHAAdminToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixHAAdminToolTest.java index 9b463551715..419cdf2583f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixHAAdminToolTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixHAAdminToolTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,7 +37,6 @@ import java.util.List; import java.util.Properties; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; import org.apache.commons.lang3.RandomStringUtils; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.api.GetDataBuilder; @@ -47,6 +46,7 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.phoenix.jdbc.ClusterRoleRecord.ClusterRole; import org.apache.phoenix.jdbc.PhoenixHAAdminTool.PhoenixHAAdminHelper; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option; import org.apache.zookeeper.KeeperException.NoNodeException; import org.junit.After; import org.junit.Before; @@ -60,269 +60,255 @@ /** * Unit test for {@link PhoenixHAAdminTool} including the helper class {@link PhoenixHAAdminHelper}. - * * @see PhoenixHAAdminToolIT */ public class PhoenixHAAdminToolTest { - private static final Logger LOG = LoggerFactory.getLogger(PhoenixHAAdminToolTest.class); - private static final String ZK1 = "zk1:2181:/hbase"; - private static final String ZK2 = "zk2:2181:/hbase"; - private static final PrintStream STDOUT = System.out; - private static final ByteArrayOutputStream STDOUT_CAPTURE = new ByteArrayOutputStream(); + private static final Logger LOG = LoggerFactory.getLogger(PhoenixHAAdminToolTest.class); + private static final String ZK1 = "zk1:2181:/hbase"; + private static final String ZK2 = "zk2:2181:/hbase"; + private static final PrintStream STDOUT = System.out; + private static final ByteArrayOutputStream STDOUT_CAPTURE = new ByteArrayOutputStream(); - private final PhoenixHAAdminTool.HighAvailibilityCuratorProvider mockHighAvailibilityCuratorProvider = Mockito.mock(PhoenixHAAdminTool.HighAvailibilityCuratorProvider.class); + private final PhoenixHAAdminTool.HighAvailibilityCuratorProvider mockHighAvailibilityCuratorProvider = + Mockito.mock(PhoenixHAAdminTool.HighAvailibilityCuratorProvider.class); - /** Use mocked curator since there is no mini-ZK cluster. */ - private final CuratorFramework curator = Mockito.mock(CuratorFramework.class); - /** HA admin to test for one test case. */ - private final PhoenixHAAdminHelper admin = new PhoenixHAAdminHelper(ZK1, new Configuration(), mockHighAvailibilityCuratorProvider); + /** Use mocked curator since there is no mini-ZK cluster. */ + private final CuratorFramework curator = Mockito.mock(CuratorFramework.class); + /** HA admin to test for one test case. */ + private final PhoenixHAAdminHelper admin = + new PhoenixHAAdminHelper(ZK1, new Configuration(), mockHighAvailibilityCuratorProvider); - private String haGroupName; - private ClusterRoleRecord recordV1; + private String haGroupName; + private ClusterRoleRecord recordV1; - @Rule - public final TestName testName = new TestName(); + @Rule + public final TestName testName = new TestName(); - @Before - public void setup() throws Exception { - when(mockHighAvailibilityCuratorProvider.getCurator(Mockito.anyString(), any(Properties.class))).thenReturn(curator); - haGroupName = testName.getMethodName(); - recordV1 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2, ClusterRole.STANDBY, - 1); - saveRecordV1ToZk(); - } + @Before + public void setup() throws Exception { + when(mockHighAvailibilityCuratorProvider.getCurator(Mockito.anyString(), any(Properties.class))) + .thenReturn(curator); + haGroupName = testName.getMethodName(); + recordV1 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, ZK1, + ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + saveRecordV1ToZk(); + } - @After - public void after() { - // reset STDOUT in case it was captured for testing - System.setOut(STDOUT); - } + @After + public void after() { + // reset STDOUT in case it was captured for testing + System.setOut(STDOUT); + } - /** - * Test command line options. - * - * In the test body, we split sections by {} to make sure no variable is reused in mistakenly. - */ - @Test - public void testCommandLineOption() throws Exception { - { // no value for -m option - String[] args = { "-m" }; - int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); - assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); - } - { // -l does not work with -m option - String[] args = { "-l", "-m", "cluster-role-records.yaml" }; - int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); - assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); - } - { // -l does not work with -F/--forceful option - String[] args = { "-l", "-F"}; - int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); - assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); - } - { // -l does not work with --repair option - String[] args = { "-l", "-r"}; - int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); - assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); - } - { // -m does not work with --repair option - String[] args = { "-m", "cluster-role-records.yaml", "-r"}; - int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); - assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); - } + /** + * Test command line options. In the test body, we split sections by {} to make sure no variable + * is reused in mistakenly. + */ + @Test + public void testCommandLineOption() throws Exception { + { // no value for -m option + String[] args = { "-m" }; + int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); + assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); + } + { // -l does not work with -m option + String[] args = { "-l", "-m", "cluster-role-records.yaml" }; + int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); + assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); + } + { // -l does not work with -F/--forceful option + String[] args = { "-l", "-F" }; + int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); + assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); } + { // -l does not work with --repair option + String[] args = { "-l", "-r" }; + int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); + assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); + } + { // -m does not work with --repair option + String[] args = { "-m", "cluster-role-records.yaml", "-r" }; + int ret = ToolRunner.run(new PhoenixHAAdminTool(), args); + assertEquals(PhoenixHAAdminTool.RET_ARGUMENT_ERROR, ret); + } + } - /** - * Test that helper method works for reading cluster role records from JSON file. - */ - @Test - public void testReadRecordsFromFileJson() throws Exception { - { // one record in JSON file - String fileName = ClusterRoleRecordTest.createJsonFileWithRecords(recordV1); - List records = new PhoenixHAAdminTool().readRecordsFromFile(fileName); - assertEquals(1, records.size()); - assertTrue(records.contains(recordV1)); - } + /** + * Test that helper method works for reading cluster role records from JSON file. + */ + @Test + public void testReadRecordsFromFileJson() throws Exception { + { // one record in JSON file + String fileName = ClusterRoleRecordTest.createJsonFileWithRecords(recordV1); + List records = new PhoenixHAAdminTool().readRecordsFromFile(fileName); + assertEquals(1, records.size()); + assertTrue(records.contains(recordV1)); + } - { // two records in JSON file - String haGroupName2 = haGroupName + RandomStringUtils.randomAlphabetic(3); - ClusterRoleRecord record2 = new ClusterRoleRecord( - haGroupName2, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.ACTIVE, - ZK2, ClusterRole.STANDBY, - 1); - String fileName = ClusterRoleRecordTest.createJsonFileWithRecords(recordV1, record2); - List records = new PhoenixHAAdminTool().readRecordsFromFile(fileName); - assertEquals(2, records.size()); - assertTrue(records.contains(recordV1)); - assertTrue(records.contains(record2)); - } + { // two records in JSON file + String haGroupName2 = haGroupName + RandomStringUtils.randomAlphabetic(3); + ClusterRoleRecord record2 = new ClusterRoleRecord(haGroupName2, + HighAvailabilityPolicy.FAILOVER, ZK1, ClusterRole.ACTIVE, ZK2, ClusterRole.STANDBY, 1); + String fileName = ClusterRoleRecordTest.createJsonFileWithRecords(recordV1, record2); + List records = new PhoenixHAAdminTool().readRecordsFromFile(fileName); + assertEquals(2, records.size()); + assertTrue(records.contains(recordV1)); + assertTrue(records.contains(record2)); } + } - /** - * Test that agent will try to create znode if it does not exist. - */ - @Test - public void testCreateIfNotExist() throws Exception { - GetDataBuilder getDataBuilder = Mockito.mock(GetDataBuilder.class); - when(getDataBuilder.forPath(anyString())).thenThrow(new NoNodeException()); - when(curator.getData()).thenReturn(getDataBuilder); + /** + * Test that agent will try to create znode if it does not exist. + */ + @Test + public void testCreateIfNotExist() throws Exception { + GetDataBuilder getDataBuilder = Mockito.mock(GetDataBuilder.class); + when(getDataBuilder.forPath(anyString())).thenThrow(new NoNodeException()); + when(curator.getData()).thenReturn(getDataBuilder); - try { - admin.createOrUpdateDataOnZookeeper(recordV1); - } catch (Exception e) { - LOG.info("Got expected exception when creating the node without mocking it fully", e); - } - verify(curator, atLeastOnce()).create(); + try { + admin.createOrUpdateDataOnZookeeper(recordV1); + } catch (Exception e) { + LOG.info("Got expected exception when creating the node without mocking it fully", e); } + verify(curator, atLeastOnce()).create(); + } - /** - * Test that agent will try to update znode if given record has a newer version. - */ - //Ignored as the updates to curator framework made the verify fail test is not stable interfaces - @Ignore - @Test - public void testUpdate() throws Exception { - boolean result = false; - saveRecordV1ToZk(); - ClusterRoleRecord recordV2 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.STANDBY, - ZK2 , ClusterRole.STANDBY, - 2); // higher version than recordV1 so update should be tried - try { - result = admin.createOrUpdateDataOnZookeeper(recordV2); - } catch (Exception e) { - LOG.info("Got expected exception when creating the node without mocking it fully", e); - } - verify(curator, never()).create(); - // to update data, internally curator is used this way by DistributedAtomicValue - verify(curator, atLeastOnce()).newNamespaceAwareEnsurePath(contains(haGroupName)); + /** + * Test that agent will try to update znode if given record has a newer version. + */ + // Ignored as the updates to curator framework made the verify fail test is not stable interfaces + @Ignore + @Test + public void testUpdate() throws Exception { + boolean result = false; + saveRecordV1ToZk(); + ClusterRoleRecord recordV2 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.STANDBY, ZK2, ClusterRole.STANDBY, 2); // higher version than recordV1 so + // update should be tried + try { + result = admin.createOrUpdateDataOnZookeeper(recordV2); + } catch (Exception e) { + LOG.info("Got expected exception when creating the node without mocking it fully", e); } + verify(curator, never()).create(); + // to update data, internally curator is used this way by DistributedAtomicValue + verify(curator, atLeastOnce()).newNamespaceAwareEnsurePath(contains(haGroupName)); + } - /** - * Test that agent rejects to deal with the record if it is not associated to this ZK. - */ - @Test - public void testFailWithUnrelatedRecord() { - ClusterRoleRecord record2 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1 + RandomStringUtils.random(3), ClusterRole.ACTIVE, // unrelated ZK - ZK2 + RandomStringUtils.random(3), ClusterRole.STANDBY, // unrelated ZK - 1); - try { - admin.createOrUpdateDataOnZookeeper(record2); - } catch (IOException e) { - LOG.info("Got expected exception since the record is not totally related to this ZK"); - assertTrue(e.getMessage().contains("INTERNAL ERROR")); - } - verify(curator, never()).getData(); // not even try to read the znode + /** + * Test that agent rejects to deal with the record if it is not associated to this ZK. + */ + @Test + public void testFailWithUnrelatedRecord() { + ClusterRoleRecord record2 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1 + RandomStringUtils.random(3), ClusterRole.ACTIVE, // unrelated ZK + ZK2 + RandomStringUtils.random(3), ClusterRole.STANDBY, // unrelated ZK + 1); + try { + admin.createOrUpdateDataOnZookeeper(record2); + } catch (IOException e) { + LOG.info("Got expected exception since the record is not totally related to this ZK"); + assertTrue(e.getMessage().contains("INTERNAL ERROR")); } + verify(curator, never()).getData(); // not even try to read the znode + } - /** - * Test that agent rejects to update the record if its version is lower. - */ - @Test - public void testRejectLowerVersionRecord() throws Exception { - saveRecordV1ToZk(); - ClusterRoleRecord recordV0 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.STANDBY, - ZK2 , ClusterRole.STANDBY, - 0); // lower version than recordV1 - assertFalse(admin.createOrUpdateDataOnZookeeper(recordV0)); + /** + * Test that agent rejects to update the record if its version is lower. + */ + @Test + public void testRejectLowerVersionRecord() throws Exception { + saveRecordV1ToZk(); + ClusterRoleRecord recordV0 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.STANDBY, ZK2, ClusterRole.STANDBY, 0); // lower version than recordV1 + assertFalse(admin.createOrUpdateDataOnZookeeper(recordV0)); - verify(curator, never()).setData(); - verify(curator, never()).create(); - } + verify(curator, never()).setData(); + verify(curator, never()).create(); + } - /** - * Test that agent rejects to update the record if it is inconsistent with existing data. - */ - @Test - public void testRejectInconsistentData() throws Exception { - saveRecordV1ToZk(); - ClusterRoleRecord record2 = new ClusterRoleRecord( - haGroupName, HighAvailabilityPolicy.FAILOVER, - ZK1, ClusterRole.STANDBY, - ZK2 , ClusterRole.STANDBY, - 1); // same version but different role1 - try { - admin.createOrUpdateDataOnZookeeper(record2); - } catch (IOException e) { - LOG.info("Got expected exception in case of inconsistent record data", e); - assertTrue(e.getMessage().contains("inconsistent")); - } + /** + * Test that agent rejects to update the record if it is inconsistent with existing data. + */ + @Test + public void testRejectInconsistentData() throws Exception { + saveRecordV1ToZk(); + ClusterRoleRecord record2 = new ClusterRoleRecord(haGroupName, HighAvailabilityPolicy.FAILOVER, + ZK1, ClusterRole.STANDBY, ZK2, ClusterRole.STANDBY, 1); // same version but different role1 + try { + admin.createOrUpdateDataOnZookeeper(record2); + } catch (IOException e) { + LOG.info("Got expected exception in case of inconsistent record data", e); + assertTrue(e.getMessage().contains("inconsistent")); } + } - /** - * Test that the help message is comprehensive enough because this is our operation tool. - */ - @SuppressWarnings("unchecked") - @Test - public void testHelpMessage() throws Exception { - System.setOut(new PrintStream(STDOUT_CAPTURE)); - int ret = new PhoenixHAAdminTool().run(new String[]{"-h"}); - assertEquals(RET_SUCCESS, ret); - PhoenixHAAdminTool.OPTIONS.getOptions().forEach( - (o) -> assertTrue(STDOUT_CAPTURE.toString().contains(((Option) o).getLongOpt()))); - } - - @Test - public void testGetZookeeperQuorum() { - Configuration conf = HBaseConfiguration.create(); - // default local ZK is 127.0.0.1:2181:/hbase - final String localZk = String.format("127.0.0.1:%d:%s", - HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - assertEquals(localZk, getLocalZkUrl(conf)); + /** + * Test that the help message is comprehensive enough because this is our operation tool. + */ + @SuppressWarnings("unchecked") + @Test + public void testHelpMessage() throws Exception { + System.setOut(new PrintStream(STDOUT_CAPTURE)); + int ret = new PhoenixHAAdminTool().run(new String[] { "-h" }); + assertEquals(RET_SUCCESS, ret); + PhoenixHAAdminTool.OPTIONS.getOptions() + .forEach((o) -> assertTrue(STDOUT_CAPTURE.toString().contains(((Option) o).getLongOpt()))); + } - // set host name only; use default port and znode parent - final String host = "foobar"; - conf.set(HConstants.ZOOKEEPER_QUORUM, "foobar"); - final String expectedLocalZk = String.format("%s:%d:%s", host, - HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - assertEquals(expectedLocalZk, getLocalZkUrl(conf)); + @Test + public void testGetZookeeperQuorum() { + Configuration conf = HBaseConfiguration.create(); + // default local ZK is 127.0.0.1:2181:/hbase + final String localZk = String.format("127.0.0.1:%d:%s", HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT, + HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + assertEquals(localZk, getLocalZkUrl(conf)); - // set host name and port; use default znode parent - final int port = 21810; - conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, port); - final String expectedLocalZk2 = String.format("%s:%d:%s", host, port, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - assertEquals(expectedLocalZk2, getLocalZkUrl(conf)); + // set host name only; use default port and znode parent + final String host = "foobar"; + conf.set(HConstants.ZOOKEEPER_QUORUM, "foobar"); + final String expectedLocalZk = String.format("%s:%d:%s", host, + HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + assertEquals(expectedLocalZk, getLocalZkUrl(conf)); - // set host name, port and znode parent - final String znode = "/hbase2"; - conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, znode); - final String expectedLocalZk3 = String.format("%s:%d:%s", host, port, znode); - assertEquals(expectedLocalZk3, getLocalZkUrl(conf)); + // set host name and port; use default znode parent + final int port = 21810; + conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, port); + final String expectedLocalZk2 = + String.format("%s:%d:%s", host, port, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + assertEquals(expectedLocalZk2, getLocalZkUrl(conf)); - // empty hostname is invalid - conf.set(HConstants.ZOOKEEPER_QUORUM, ""); - try { - getLocalZkUrl(conf); - fail("Should have failed because " + HConstants.ZOOKEEPER_QUORUM + " is not set"); - } catch (IllegalArgumentException e) { - LOG.info("Got expected exception when no ZK quorum is set", e); - } + // set host name, port and znode parent + final String znode = "/hbase2"; + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, znode); + final String expectedLocalZk3 = String.format("%s:%d:%s", host, port, znode); + assertEquals(expectedLocalZk3, getLocalZkUrl(conf)); - // invalid port - String invalidPort = "invalidPort"; - conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "invalidPort"); - try { - getLocalZkUrl(conf); - fail("Should have failed because port " + invalidPort + " is invalid"); - } catch (IllegalArgumentException e) { - LOG.info("Got expected exception because port {} is invalid", invalidPort, e); - } + // empty hostname is invalid + conf.set(HConstants.ZOOKEEPER_QUORUM, ""); + try { + getLocalZkUrl(conf); + fail("Should have failed because " + HConstants.ZOOKEEPER_QUORUM + " is not set"); + } catch (IllegalArgumentException e) { + LOG.info("Got expected exception when no ZK quorum is set", e); } - /** Helper method to make curator return V1 record when agent reads data. */ - private void saveRecordV1ToZk() throws Exception { - GetDataBuilder getDataBuilder = Mockito.mock(GetDataBuilder.class); - when(getDataBuilder.forPath(anyString())).thenReturn(ClusterRoleRecord.toJson(recordV1)); - when(curator.getData()).thenReturn(getDataBuilder); + // invalid port + String invalidPort = "invalidPort"; + conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "invalidPort"); + try { + getLocalZkUrl(conf); + fail("Should have failed because port " + invalidPort + " is invalid"); + } catch (IllegalArgumentException e) { + LOG.info("Got expected exception because port {} is invalid", invalidPort, e); } + } + + /** Helper method to make curator return V1 record when agent reads data. */ + private void saveRecordV1ToZk() throws Exception { + GetDataBuilder getDataBuilder = Mockito.mock(GetDataBuilder.class); + when(getDataBuilder.forPath(anyString())).thenReturn(ClusterRoleRecord.toJson(recordV1)); + when(curator.getData()).thenReturn(getDataBuilder); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixHAExecutorServiceProviderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixHAExecutorServiceProviderTest.java index 6a402b392ac..6ec10879c92 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixHAExecutorServiceProviderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixHAExecutorServiceProviderTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,7 +24,6 @@ import java.util.Properties; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadPoolExecutor; import org.junit.After; @@ -35,98 +34,102 @@ public class PhoenixHAExecutorServiceProviderTest { - private static final Properties properties = new Properties(); + private static final Properties properties = new Properties(); - @BeforeClass - public static void setupBeforeClass() { - properties.setProperty(PhoenixHAExecutorServiceProvider.HA_MAX_POOL_SIZE, "2"); - properties.setProperty(PhoenixHAExecutorServiceProvider.HA_MAX_QUEUE_SIZE, "5"); - } + @BeforeClass + public static void setupBeforeClass() { + properties.setProperty(PhoenixHAExecutorServiceProvider.HA_MAX_POOL_SIZE, "2"); + properties.setProperty(PhoenixHAExecutorServiceProvider.HA_MAX_QUEUE_SIZE, "5"); + } - @AfterClass - public static void afterClass() { - PhoenixHAExecutorServiceProvider.resetExecutor(); - } + @AfterClass + public static void afterClass() { + PhoenixHAExecutorServiceProvider.resetExecutor(); + } - @Before - public void beforeTest() { - PhoenixHAExecutorServiceProvider.resetExecutor(); - PhoenixHAExecutorServiceProvider.get(properties); - } + @Before + public void beforeTest() { + PhoenixHAExecutorServiceProvider.resetExecutor(); + PhoenixHAExecutorServiceProvider.get(properties); + } - @After - public void afterTest() { - for (PhoenixHAExecutorServiceProvider.PhoenixHAClusterExecutorServices c : PhoenixHAExecutorServiceProvider.get(properties)) { - c.getExecutorService().shutdownNow(); - c.getCloseExecutorService().shutdownNow(); - } + @After + public void afterTest() { + for (PhoenixHAExecutorServiceProvider.PhoenixHAClusterExecutorServices c : PhoenixHAExecutorServiceProvider + .get(properties)) { + c.getExecutorService().shutdownNow(); + c.getCloseExecutorService().shutdownNow(); } + } - @Test - public void testHAExecutorService1Capacity() { - testHAExecutorServiceCapacity(0); - } + @Test + public void testHAExecutorService1Capacity() { + testHAExecutorServiceCapacity(0); + } - @Test - public void testHAExecutorService2Capacity() { - testHAExecutorServiceCapacity(1); - } + @Test + public void testHAExecutorService2Capacity() { + testHAExecutorServiceCapacity(1); + } - private void testHAExecutorServiceCapacity(int index) { - Properties props = new Properties(); - props.setProperty(PhoenixHAExecutorServiceProvider.HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD, - "0.5"); - ThreadPoolExecutor es = - (ThreadPoolExecutor) PhoenixHAExecutorServiceProvider.get(properties).get(index).getExecutorService(); - Object obj = new Object(); - CompletableFuture.runAsync(getWaitingRunnable(obj), es); - CompletableFuture.runAsync(getWaitingRunnable(obj), es); - assertEquals(es.getQueue().size(), 0); - CompletableFuture.runAsync(getWaitingRunnable(obj), es); - CompletableFuture.runAsync(getWaitingRunnable(obj), es); - assertTrue(PhoenixHAExecutorServiceProvider.hasCapacity(props).get(index)); - CompletableFuture.runAsync(getWaitingRunnable(obj), es); - assertFalse(PhoenixHAExecutorServiceProvider.hasCapacity(props).get(index)); - synchronized (obj) { - obj.notifyAll(); - } + private void testHAExecutorServiceCapacity(int index) { + Properties props = new Properties(); + props.setProperty(PhoenixHAExecutorServiceProvider.HA_THREADPOOL_QUEUE_BACKOFF_THRESHOLD, + "0.5"); + ThreadPoolExecutor es = (ThreadPoolExecutor) PhoenixHAExecutorServiceProvider.get(properties) + .get(index).getExecutorService(); + Object obj = new Object(); + CompletableFuture.runAsync(getWaitingRunnable(obj), es); + CompletableFuture.runAsync(getWaitingRunnable(obj), es); + assertEquals(es.getQueue().size(), 0); + CompletableFuture.runAsync(getWaitingRunnable(obj), es); + CompletableFuture.runAsync(getWaitingRunnable(obj), es); + assertTrue(PhoenixHAExecutorServiceProvider.hasCapacity(props).get(index)); + CompletableFuture.runAsync(getWaitingRunnable(obj), es); + assertFalse(PhoenixHAExecutorServiceProvider.hasCapacity(props).get(index)); + synchronized (obj) { + obj.notifyAll(); } + } - @Test - public void testHAExecutorServiceQueuing() { - ThreadPoolExecutor es = - (ThreadPoolExecutor) PhoenixHAExecutorServiceProvider.get(properties).get(0).getExecutorService(); - Object obj = new Object(); - CompletableFuture.runAsync(getWaitingRunnable(obj), es); - assertEquals(es.getQueue().size(), 0); - CompletableFuture.runAsync(getWaitingRunnable(obj), es); - assertEquals(es.getQueue().size(), 0); - CompletableFuture.runAsync(getWaitingRunnable(obj), es); - assertEquals(es.getQueue().size(), 1); - synchronized (obj) { - obj.notifyAll(); - } + @Test + public void testHAExecutorServiceQueuing() { + ThreadPoolExecutor es = (ThreadPoolExecutor) PhoenixHAExecutorServiceProvider.get(properties) + .get(0).getExecutorService(); + Object obj = new Object(); + CompletableFuture.runAsync(getWaitingRunnable(obj), es); + assertEquals(es.getQueue().size(), 0); + CompletableFuture.runAsync(getWaitingRunnable(obj), es); + assertEquals(es.getQueue().size(), 0); + CompletableFuture.runAsync(getWaitingRunnable(obj), es); + assertEquals(es.getQueue().size(), 1); + synchronized (obj) { + obj.notifyAll(); } + } - @Test - public void testHAExecutorServiceCloserConfigured() { - ThreadPoolExecutor es1 = (ThreadPoolExecutor) PhoenixHAExecutorServiceProvider.get(properties).get(0).getCloseExecutorService(); - ThreadPoolExecutor es2 = (ThreadPoolExecutor) PhoenixHAExecutorServiceProvider.get(properties).get(1).getCloseExecutorService(); - int expectedPoolSize = Integer.valueOf(PhoenixHAExecutorServiceProvider.DEFAULT_HA_CLOSE_MAX_POOL_SIZE); - assertEquals(expectedPoolSize, es1.getMaximumPoolSize()); - assertEquals(expectedPoolSize, es2.getMaximumPoolSize()); - assertNotEquals(es1, es2); - } + @Test + public void testHAExecutorServiceCloserConfigured() { + ThreadPoolExecutor es1 = (ThreadPoolExecutor) PhoenixHAExecutorServiceProvider.get(properties) + .get(0).getCloseExecutorService(); + ThreadPoolExecutor es2 = (ThreadPoolExecutor) PhoenixHAExecutorServiceProvider.get(properties) + .get(1).getCloseExecutorService(); + int expectedPoolSize = + Integer.valueOf(PhoenixHAExecutorServiceProvider.DEFAULT_HA_CLOSE_MAX_POOL_SIZE); + assertEquals(expectedPoolSize, es1.getMaximumPoolSize()); + assertEquals(expectedPoolSize, es2.getMaximumPoolSize()); + assertNotEquals(es1, es2); + } - private Runnable getWaitingRunnable(Object obj) { - return (() -> { - synchronized (obj) { - try { - obj.wait(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - }); - } + private Runnable getWaitingRunnable(Object obj) { + return (() -> { + synchronized (obj) { + try { + obj.wait(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + }); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixPreparedStatementTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixPreparedStatementTest.java index 5d1d09922c8..2b897dd2de7 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixPreparedStatementTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixPreparedStatementTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,63 +25,60 @@ import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.query.BaseConnectionlessQueryTest; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; import org.junit.Test; public class PhoenixPreparedStatementTest extends BaseConnectionlessQueryTest { - @Test - public void testSetParameter_InvalidIndex() throws Exception { - Properties connectionProperties = new Properties(); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + @Test + public void testSetParameter_InvalidIndex() throws Exception { + Properties connectionProperties = new Properties(); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - PreparedStatement stmt = connection.prepareStatement( - "UPSERT INTO " + ATABLE + " (organization_id, entity_id, a_integer) " + - "VALUES (?,?,?)"); + PreparedStatement stmt = connection.prepareStatement( + "UPSERT INTO " + ATABLE + " (organization_id, entity_id, a_integer) " + "VALUES (?,?,?)"); - stmt.setString(1, "AAA"); - stmt.setString(2, "BBB"); - stmt.setInt(3, 1); + stmt.setString(1, "AAA"); + stmt.setString(2, "BBB"); + stmt.setInt(3, 1); - try { - stmt.setString(4, "Invalid bind column"); - fail("Setting a value for a column that doesn't exist should throw SQLException"); - } catch (SQLException e) { - // Expected exception - } + try { + stmt.setString(4, "Invalid bind column"); + fail("Setting a value for a column that doesn't exist should throw SQLException"); + } catch (SQLException e) { + // Expected exception + } - try { - stmt.setString(-1, "Invalid bind column"); - fail("Setting a value for a column that doesn't exist should throw SQLException"); - } catch (SQLException e) { - // Expected exception - } + try { + stmt.setString(-1, "Invalid bind column"); + fail("Setting a value for a column that doesn't exist should throw SQLException"); + } catch (SQLException e) { + // Expected exception } - - @Test - public void testMutationUsingExecuteQueryShouldFail() throws Exception { - Properties connectionProperties = new Properties(); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - PreparedStatement stmt = connection.prepareStatement("DELETE FROM " + ATABLE); - try { - stmt.executeQuery(); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.EXECUTE_QUERY_NOT_APPLICABLE.getErrorCode(), e.getErrorCode()); - } + } + + @Test + public void testMutationUsingExecuteQueryShouldFail() throws Exception { + Properties connectionProperties = new Properties(); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + PreparedStatement stmt = connection.prepareStatement("DELETE FROM " + ATABLE); + try { + stmt.executeQuery(); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.EXECUTE_QUERY_NOT_APPLICABLE.getErrorCode(), e.getErrorCode()); } - - @Test - public void testQueriesUsingExecuteUpdateShouldFail() throws Exception { - Properties connectionProperties = new Properties(); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - PreparedStatement stmt = connection.prepareStatement("SELECT * FROM " + ATABLE); - try { - stmt.executeUpdate(); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.EXECUTE_UPDATE_NOT_APPLICABLE.getErrorCode(), e.getErrorCode()); - } + } + + @Test + public void testQueriesUsingExecuteUpdateShouldFail() throws Exception { + Properties connectionProperties = new Properties(); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + PreparedStatement stmt = connection.prepareStatement("SELECT * FROM " + ATABLE); + try { + stmt.executeUpdate(); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.EXECUTE_UPDATE_NOT_APPLICABLE.getErrorCode(), e.getErrorCode()); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixResultSetMetadataTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixResultSetMetadataTest.java index 6e9b256db98..a04a3fef401 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixResultSetMetadataTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixResultSetMetadataTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,129 +29,132 @@ import org.junit.Test; public class PhoenixResultSetMetadataTest extends BaseConnectionlessQueryTest { - - @Test - public void testColumnDisplaySize() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute( - "CREATE TABLE T (pk1 CHAR(15) not null, pk2 VARCHAR not null, v1 VARCHAR(15), v2 DATE, v3 VARCHAR " + - "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); - ResultSet rs = conn.createStatement().executeQuery("SELECT pk1, pk2, v1, v2, CAST(null AS varchar) FROM T"); - assertEquals(15, rs.getMetaData().getColumnDisplaySize(1)); - assertEquals(PhoenixResultSetMetaData.DEFAULT_DISPLAY_WIDTH, rs.getMetaData().getColumnDisplaySize(2)); - assertEquals(15, rs.getMetaData().getColumnDisplaySize(3)); - assertEquals(conn.unwrap(PhoenixConnection.class).getDatePattern().length(), rs.getMetaData().getColumnDisplaySize(4)); - assertEquals(40, rs.getMetaData().getColumnDisplaySize(5)); - } - - @Test - public void testNullTypeName() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - ResultSet rs = conn.createStatement().executeQuery("select null"); - - assertEquals("NULL", rs.getMetaData().getColumnTypeName(1)); - } - - @Test - public void testCaseSensitiveExpression() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute( - "CREATE TABLE T (pk1 CHAR(15) not null, pk2 VARCHAR not null, \"v1\" VARCHAR(15), v2 DATE, \"v3\" VARCHAR " + - "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); - ResultSet rs = conn.createStatement().executeQuery("SELECT pk1 AS testalias1, pk2 AS \"testalias2\", " + - "\"v1\" AS \"testalias3\", v2, \"v3\" FROM T"); - - assertEquals("PK1", rs.getMetaData().getColumnName(1)); - assertEquals("TESTALIAS1", rs.getMetaData().getColumnLabel(1)); - assertFalse(rs.getMetaData().isCaseSensitive(1)); - - assertEquals("PK2", rs.getMetaData().getColumnName(2)); - assertEquals("testalias2", rs.getMetaData().getColumnLabel(2)); - assertTrue(rs.getMetaData().isCaseSensitive(2)); - - assertEquals("v1", rs.getMetaData().getColumnName(3)); - assertEquals("testalias3", rs.getMetaData().getColumnLabel(3)); - assertTrue(rs.getMetaData().isCaseSensitive(3)); - - assertEquals("V2", rs.getMetaData().getColumnName(4)); - assertEquals("V2", rs.getMetaData().getColumnLabel(4)); - assertFalse(rs.getMetaData().isCaseSensitive(4)); - - assertEquals("v3", rs.getMetaData().getColumnName(5)); - assertEquals("v3", rs.getMetaData().getColumnLabel(5)); - assertTrue(rs.getMetaData().isCaseSensitive(5)); - } - - @Test - public void testLabel() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute( - "CREATE TABLE T (pk1 CHAR(15) not null, pk2 VARCHAR not null, v1 VARCHAR(15), v2 DATE, v3 VARCHAR " + - "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); - ResultSet rs = conn.createStatement().executeQuery("SELECT pk1 AS testalias1, pk2, " + - "v1 AS testalias2, v2 FROM T"); - assertEquals("PK1", rs.getMetaData().getColumnName(1)); - assertEquals("TESTALIAS1", rs.getMetaData().getColumnLabel(1)); - assertEquals("PK2", rs.getMetaData().getColumnName(2)); - assertEquals("PK2", rs.getMetaData().getColumnLabel(2)); - assertEquals("V1", rs.getMetaData().getColumnName(3)); - assertEquals("TESTALIAS2", rs.getMetaData().getColumnLabel(3)); - assertEquals("V2", rs.getMetaData().getColumnName(4)); - assertEquals("V2", rs.getMetaData().getColumnLabel(4)); - } - - @Test - public void testSummandExpression() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute( - "CREATE TABLE T (pk1 CHAR(15) not null, pk2 INTEGER not null, v1 VARCHAR(15), v2 DATE, v3 VARCHAR " + - "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); - ResultSet rs = conn.createStatement().executeQuery("SELECT 3+pk2 FROM T"); - assertEquals("(3 + PK2)", rs.getMetaData().getColumnName(1)); - assertEquals("(3 + PK2)", rs.getMetaData().getColumnLabel(1)); - rs = conn.createStatement().executeQuery("SELECT 3+pk2 AS sum FROM T"); - assertEquals("(3 + PK2)", rs.getMetaData().getColumnName(1)); - assertEquals("SUM", rs.getMetaData().getColumnLabel(1)); - } - - @Test - public void testSqrtExpression() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute( - "CREATE TABLE T (pk1 CHAR(15) not null, pk2 INTEGER not null, v1 VARCHAR(15), v2 DATE, v3 VARCHAR " + - "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); - ResultSet rs = conn.createStatement().executeQuery("SELECT SQRT(3+pk2) FROM T"); - assertEquals("SQRT((3 + PK2))", rs.getMetaData().getColumnName(1)); - assertEquals("SQRT((3 + PK2))", rs.getMetaData().getColumnLabel(1)); - rs = conn.createStatement().executeQuery("SELECT SQRT(3+pk2) AS \"sqrt\" FROM T"); - assertEquals("SQRT((3 + PK2))", rs.getMetaData().getColumnName(1)); - assertEquals("sqrt", rs.getMetaData().getColumnLabel(1)); - } - - @Test - public void testView() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute( - "CREATE TABLE IF NOT EXISTS S.T (A INTEGER PRIMARY KEY, B INTEGER, C VARCHAR, D INTEGER)"); - conn.createStatement().execute( - "CREATE VIEW IF NOT EXISTS S.V (VA INTEGER, VB INTEGER) AS SELECT * FROM S.T WHERE A=2"); - conn.createStatement().execute( - "UPSERT INTO S.V (A, B, C, D, VA, VB) VALUES (2, 200, 'def', -20, 91, 101)"); - conn.createStatement().execute( - "ALTER VIEW S.V DROP COLUMN C"); - - ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM S.V"); - assertEquals("A", rs.getMetaData().getColumnName(1)); - assertEquals("A", rs.getMetaData().getColumnLabel(1)); - assertEquals("B", rs.getMetaData().getColumnName(2)); - assertEquals("B", rs.getMetaData().getColumnLabel(2)); - assertEquals("C", rs.getMetaData().getColumnName(3)); - assertEquals("C", rs.getMetaData().getColumnLabel(3)); - assertEquals("D", rs.getMetaData().getColumnName(4)); - assertEquals("D", rs.getMetaData().getColumnLabel(4)); - assertEquals("VA", rs.getMetaData().getColumnName(5)); - assertEquals("VA", rs.getMetaData().getColumnLabel(5)); - assertEquals("VB", rs.getMetaData().getColumnName(6)); - assertEquals("VB", rs.getMetaData().getColumnLabel(6)); - } + + @Test + public void testColumnDisplaySize() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE T (pk1 CHAR(15) not null, pk2 VARCHAR not null, v1 VARCHAR(15), v2 DATE, v3 VARCHAR " + + "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); + ResultSet rs = + conn.createStatement().executeQuery("SELECT pk1, pk2, v1, v2, CAST(null AS varchar) FROM T"); + assertEquals(15, rs.getMetaData().getColumnDisplaySize(1)); + assertEquals(PhoenixResultSetMetaData.DEFAULT_DISPLAY_WIDTH, + rs.getMetaData().getColumnDisplaySize(2)); + assertEquals(15, rs.getMetaData().getColumnDisplaySize(3)); + assertEquals(conn.unwrap(PhoenixConnection.class).getDatePattern().length(), + rs.getMetaData().getColumnDisplaySize(4)); + assertEquals(40, rs.getMetaData().getColumnDisplaySize(5)); + } + + @Test + public void testNullTypeName() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + ResultSet rs = conn.createStatement().executeQuery("select null"); + + assertEquals("NULL", rs.getMetaData().getColumnTypeName(1)); + } + + @Test + public void testCaseSensitiveExpression() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE T (pk1 CHAR(15) not null, pk2 VARCHAR not null, \"v1\" VARCHAR(15), v2 DATE, \"v3\" VARCHAR " + + "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); + ResultSet rs = + conn.createStatement().executeQuery("SELECT pk1 AS testalias1, pk2 AS \"testalias2\", " + + "\"v1\" AS \"testalias3\", v2, \"v3\" FROM T"); + + assertEquals("PK1", rs.getMetaData().getColumnName(1)); + assertEquals("TESTALIAS1", rs.getMetaData().getColumnLabel(1)); + assertFalse(rs.getMetaData().isCaseSensitive(1)); + + assertEquals("PK2", rs.getMetaData().getColumnName(2)); + assertEquals("testalias2", rs.getMetaData().getColumnLabel(2)); + assertTrue(rs.getMetaData().isCaseSensitive(2)); + + assertEquals("v1", rs.getMetaData().getColumnName(3)); + assertEquals("testalias3", rs.getMetaData().getColumnLabel(3)); + assertTrue(rs.getMetaData().isCaseSensitive(3)); + + assertEquals("V2", rs.getMetaData().getColumnName(4)); + assertEquals("V2", rs.getMetaData().getColumnLabel(4)); + assertFalse(rs.getMetaData().isCaseSensitive(4)); + + assertEquals("v3", rs.getMetaData().getColumnName(5)); + assertEquals("v3", rs.getMetaData().getColumnLabel(5)); + assertTrue(rs.getMetaData().isCaseSensitive(5)); + } + + @Test + public void testLabel() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE T (pk1 CHAR(15) not null, pk2 VARCHAR not null, v1 VARCHAR(15), v2 DATE, v3 VARCHAR " + + "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); + ResultSet rs = conn.createStatement() + .executeQuery("SELECT pk1 AS testalias1, pk2, " + "v1 AS testalias2, v2 FROM T"); + assertEquals("PK1", rs.getMetaData().getColumnName(1)); + assertEquals("TESTALIAS1", rs.getMetaData().getColumnLabel(1)); + assertEquals("PK2", rs.getMetaData().getColumnName(2)); + assertEquals("PK2", rs.getMetaData().getColumnLabel(2)); + assertEquals("V1", rs.getMetaData().getColumnName(3)); + assertEquals("TESTALIAS2", rs.getMetaData().getColumnLabel(3)); + assertEquals("V2", rs.getMetaData().getColumnName(4)); + assertEquals("V2", rs.getMetaData().getColumnLabel(4)); + } + + @Test + public void testSummandExpression() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE T (pk1 CHAR(15) not null, pk2 INTEGER not null, v1 VARCHAR(15), v2 DATE, v3 VARCHAR " + + "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); + ResultSet rs = conn.createStatement().executeQuery("SELECT 3+pk2 FROM T"); + assertEquals("(3 + PK2)", rs.getMetaData().getColumnName(1)); + assertEquals("(3 + PK2)", rs.getMetaData().getColumnLabel(1)); + rs = conn.createStatement().executeQuery("SELECT 3+pk2 AS sum FROM T"); + assertEquals("(3 + PK2)", rs.getMetaData().getColumnName(1)); + assertEquals("SUM", rs.getMetaData().getColumnLabel(1)); + } + + @Test + public void testSqrtExpression() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE T (pk1 CHAR(15) not null, pk2 INTEGER not null, v1 VARCHAR(15), v2 DATE, v3 VARCHAR " + + "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); + ResultSet rs = conn.createStatement().executeQuery("SELECT SQRT(3+pk2) FROM T"); + assertEquals("SQRT((3 + PK2))", rs.getMetaData().getColumnName(1)); + assertEquals("SQRT((3 + PK2))", rs.getMetaData().getColumnLabel(1)); + rs = conn.createStatement().executeQuery("SELECT SQRT(3+pk2) AS \"sqrt\" FROM T"); + assertEquals("SQRT((3 + PK2))", rs.getMetaData().getColumnName(1)); + assertEquals("sqrt", rs.getMetaData().getColumnLabel(1)); + } + + @Test + public void testView() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE IF NOT EXISTS S.T (A INTEGER PRIMARY KEY, B INTEGER, C VARCHAR, D INTEGER)"); + conn.createStatement().execute( + "CREATE VIEW IF NOT EXISTS S.V (VA INTEGER, VB INTEGER) AS SELECT * FROM S.T WHERE A=2"); + conn.createStatement() + .execute("UPSERT INTO S.V (A, B, C, D, VA, VB) VALUES (2, 200, 'def', -20, 91, 101)"); + conn.createStatement().execute("ALTER VIEW S.V DROP COLUMN C"); + + ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM S.V"); + assertEquals("A", rs.getMetaData().getColumnName(1)); + assertEquals("A", rs.getMetaData().getColumnLabel(1)); + assertEquals("B", rs.getMetaData().getColumnName(2)); + assertEquals("B", rs.getMetaData().getColumnLabel(2)); + assertEquals("C", rs.getMetaData().getColumnName(3)); + assertEquals("C", rs.getMetaData().getColumnLabel(3)); + assertEquals("D", rs.getMetaData().getColumnName(4)); + assertEquals("D", rs.getMetaData().getColumnLabel(4)); + assertEquals("VA", rs.getMetaData().getColumnName(5)); + assertEquals("VA", rs.getMetaData().getColumnLabel(5)); + assertEquals("VB", rs.getMetaData().getColumnName(6)); + assertEquals("VB", rs.getMetaData().getColumnLabel(6)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixStatementTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixStatementTest.java index 026c46b9e9c..d51231a2c43 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixStatementTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixStatementTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -43,226 +43,222 @@ public class PhoenixStatementTest extends BaseConnectionlessQueryTest { - private static Field connectionField; - private static Field batchField; + private static Field connectionField; + private static Field batchField; - static { - try { - connectionField = PhoenixStatement.class.getDeclaredField("connection"); - connectionField.setAccessible(true); - batchField = PhoenixStatement.class.getDeclaredField("batch"); - batchField.setAccessible(true); - } catch (NoSuchFieldException | SecurityException e) { - //Test would fail - } + static { + try { + connectionField = PhoenixStatement.class.getDeclaredField("connection"); + connectionField.setAccessible(true); + batchField = PhoenixStatement.class.getDeclaredField("batch"); + batchField.setAccessible(true); + } catch (NoSuchFieldException | SecurityException e) { + // Test would fail } + } - @Test - public void testMutationUsingExecuteQueryShouldFail() throws Exception { - Properties connectionProperties = new Properties(); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - Statement stmt = connection.createStatement(); - try { - stmt.executeQuery("DELETE FROM " + ATABLE); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.EXECUTE_QUERY_NOT_APPLICABLE.getErrorCode(), e.getErrorCode()); - } + @Test + public void testMutationUsingExecuteQueryShouldFail() throws Exception { + Properties connectionProperties = new Properties(); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + Statement stmt = connection.createStatement(); + try { + stmt.executeQuery("DELETE FROM " + ATABLE); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.EXECUTE_QUERY_NOT_APPLICABLE.getErrorCode(), e.getErrorCode()); } + } - @Test - public void testQueriesUsingExecuteUpdateShouldFail() throws Exception { - Properties connectionProperties = new Properties(); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - Statement stmt = connection.createStatement(); - try { - stmt.executeUpdate("SELECT * FROM " + ATABLE); - fail(); - } catch(SQLException e) { - assertEquals(SQLExceptionCode.EXECUTE_UPDATE_NOT_APPLICABLE.getErrorCode(), e.getErrorCode()); - } + @Test + public void testQueriesUsingExecuteUpdateShouldFail() throws Exception { + Properties connectionProperties = new Properties(); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + Statement stmt = connection.createStatement(); + try { + stmt.executeUpdate("SELECT * FROM " + ATABLE); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.EXECUTE_UPDATE_NOT_APPLICABLE.getErrorCode(), e.getErrorCode()); } + } - @Test - /** - * Validates that if a user sets the query timeout via the - * stmt.setQueryTimeout() JDBC method, we correctly store the timeout - * in both milliseconds and seconds. - */ - public void testSettingQueryTimeoutViaJdbc() throws Exception { - // Arrange - Connection connection = DriverManager.getConnection(getUrl()); - Statement stmt = connection.createStatement(); - PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); + @Test + /** + * Validates that if a user sets the query timeout via the stmt.setQueryTimeout() JDBC method, we + * correctly store the timeout in both milliseconds and seconds. + */ + public void testSettingQueryTimeoutViaJdbc() throws Exception { + // Arrange + Connection connection = DriverManager.getConnection(getUrl()); + Statement stmt = connection.createStatement(); + PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); - // Act - stmt.setQueryTimeout(3); + // Act + stmt.setQueryTimeout(3); - // Assert - assertEquals(3, stmt.getQueryTimeout()); - assertEquals(3000, phoenixStmt.getQueryTimeoutInMillis()); - } + // Assert + assertEquals(3, stmt.getQueryTimeout()); + assertEquals(3000, phoenixStmt.getQueryTimeoutInMillis()); + } - @Test - /** - * Validates if a user sets the timeout to zero that we store the timeout - * in millis as the Integer.MAX_VALUE. - */ - public void testSettingZeroQueryTimeoutViaJdbc() throws Exception { - // Arrange - Connection connection = DriverManager.getConnection(getUrl()); - Statement stmt = connection.createStatement(); - PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); + @Test + /** + * Validates if a user sets the timeout to zero that we store the timeout in millis as the + * Integer.MAX_VALUE. + */ + public void testSettingZeroQueryTimeoutViaJdbc() throws Exception { + // Arrange + Connection connection = DriverManager.getConnection(getUrl()); + Statement stmt = connection.createStatement(); + PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); - // Act - stmt.setQueryTimeout(0); + // Act + stmt.setQueryTimeout(0); - // Assert - assertEquals(Integer.MAX_VALUE / 1000, stmt.getQueryTimeout()); - assertEquals(Integer.MAX_VALUE, phoenixStmt.getQueryTimeoutInMillis()); - } + // Assert + assertEquals(Integer.MAX_VALUE / 1000, stmt.getQueryTimeout()); + assertEquals(Integer.MAX_VALUE, phoenixStmt.getQueryTimeoutInMillis()); + } - @Test - /** - * Validates that is negative value is supplied we set the timeout to the default. - */ - public void testSettingNegativeQueryTimeoutViaJdbc() throws Exception { - // Arrange - Connection connection = DriverManager.getConnection(getUrl()); - Statement stmt = connection.createStatement(); - PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); - PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); - int defaultQueryTimeout = phoenixConnection.getQueryServices().getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, - QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); + @Test + /** + * Validates that is negative value is supplied we set the timeout to the default. + */ + public void testSettingNegativeQueryTimeoutViaJdbc() throws Exception { + // Arrange + Connection connection = DriverManager.getConnection(getUrl()); + Statement stmt = connection.createStatement(); + PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); + PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class); + int defaultQueryTimeout = phoenixConnection.getQueryServices().getProps().getInt( + QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); - // Act - stmt.setQueryTimeout(-1); + // Act + stmt.setQueryTimeout(-1); - // Assert - assertEquals(defaultQueryTimeout / 1000, stmt.getQueryTimeout()); - assertEquals(defaultQueryTimeout, phoenixStmt.getQueryTimeoutInMillis()); - } + // Assert + assertEquals(defaultQueryTimeout / 1000, stmt.getQueryTimeout()); + assertEquals(defaultQueryTimeout, phoenixStmt.getQueryTimeoutInMillis()); + } - @Test - /** - * Validates that setting custom phoenix query timeout using - * the phoenix.query.timeoutMs config property is honored. - */ - public void testCustomQueryTimeout() throws Exception { - // Arrange - Properties connectionProperties = new Properties(); - connectionProperties.setProperty("phoenix.query.timeoutMs", "2350"); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - Statement stmt = connection.createStatement(); - PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); + @Test + /** + * Validates that setting custom phoenix query timeout using the phoenix.query.timeoutMs config + * property is honored. + */ + public void testCustomQueryTimeout() throws Exception { + // Arrange + Properties connectionProperties = new Properties(); + connectionProperties.setProperty("phoenix.query.timeoutMs", "2350"); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + Statement stmt = connection.createStatement(); + PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); - // Assert - assertEquals(3, stmt.getQueryTimeout()); - assertEquals(2350, phoenixStmt.getQueryTimeoutInMillis()); - } + // Assert + assertEquals(3, stmt.getQueryTimeout()); + assertEquals(2350, phoenixStmt.getQueryTimeoutInMillis()); + } - @Test - public void testZeroCustomQueryTimeout() throws Exception { - // Arrange - Properties connectionProperties = new Properties(); - connectionProperties.setProperty("phoenix.query.timeoutMs", "0"); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - Statement stmt = connection.createStatement(); - PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); + @Test + public void testZeroCustomQueryTimeout() throws Exception { + // Arrange + Properties connectionProperties = new Properties(); + connectionProperties.setProperty("phoenix.query.timeoutMs", "0"); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + Statement stmt = connection.createStatement(); + PhoenixStatement phoenixStmt = stmt.unwrap(PhoenixStatement.class); - // Assert - assertEquals(0, stmt.getQueryTimeout()); - assertEquals(0, phoenixStmt.getQueryTimeoutInMillis()); - } + // Assert + assertEquals(0, stmt.getQueryTimeout()); + assertEquals(0, phoenixStmt.getQueryTimeoutInMillis()); + } - @Test - public void testExecuteBatchWithFailedStatement() throws Exception { - // Arrange - Properties connectionProperties = new Properties(); - connectionProperties.setProperty("phoenix.query.timeoutMs", "0"); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - Statement stmt = connection.createStatement(); - PhoenixConnection connSpy = spy(connection.unwrap(PhoenixConnection.class)); - connectionField.set(stmt, connSpy); - List batch = Lists.newArrayList( - mock(PhoenixPreparedStatement.class), - mock(PhoenixPreparedStatement.class), - mock(PhoenixPreparedStatement.class)); - batchField.set(stmt, batch); - final String exMsg = "TEST"; - when(batch.get(0).getUpdateCount()).thenReturn(1); - doThrow(new SQLException(exMsg)).when(batch.get(1)).executeForBatch(); - // However, we don't expect this to be called. - when(batch.get(1).getUpdateCount()).thenReturn(1); + @Test + public void testExecuteBatchWithFailedStatement() throws Exception { + // Arrange + Properties connectionProperties = new Properties(); + connectionProperties.setProperty("phoenix.query.timeoutMs", "0"); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + Statement stmt = connection.createStatement(); + PhoenixConnection connSpy = spy(connection.unwrap(PhoenixConnection.class)); + connectionField.set(stmt, connSpy); + List batch = Lists.newArrayList(mock(PhoenixPreparedStatement.class), + mock(PhoenixPreparedStatement.class), mock(PhoenixPreparedStatement.class)); + batchField.set(stmt, batch); + final String exMsg = "TEST"; + when(batch.get(0).getUpdateCount()).thenReturn(1); + doThrow(new SQLException(exMsg)).when(batch.get(1)).executeForBatch(); + // However, we don't expect this to be called. + when(batch.get(1).getUpdateCount()).thenReturn(1); - // Act & Assert - BatchUpdateException ex = assertThrows(BatchUpdateException.class, () -> stmt.executeBatch()); - assertEquals(exMsg, ex.getCause().getMessage()); - int[] updateCounts = ex.getUpdateCounts(); - assertEquals(3, updateCounts.length); - assertEquals(1, updateCounts[0]); - assertEquals(Statement.EXECUTE_FAILED, updateCounts[1]); - assertEquals(-1, updateCounts[2]); - verify(connSpy, never()).commit(); // Ensure commit was never called. - } + // Act & Assert + BatchUpdateException ex = assertThrows(BatchUpdateException.class, () -> stmt.executeBatch()); + assertEquals(exMsg, ex.getCause().getMessage()); + int[] updateCounts = ex.getUpdateCounts(); + assertEquals(3, updateCounts.length); + assertEquals(1, updateCounts[0]); + assertEquals(Statement.EXECUTE_FAILED, updateCounts[1]); + assertEquals(-1, updateCounts[2]); + verify(connSpy, never()).commit(); // Ensure commit was never called. + } - @Test - public void testExecuteBatchWithCommitFailure() throws Exception { - // Arrange - Properties connectionProperties = new Properties(); - connectionProperties.setProperty("phoenix.query.timeoutMs", "0"); - Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); - Statement stmt = connection.createStatement(); - PhoenixConnection connSpy = spy(connection.unwrap(PhoenixConnection.class)); - connectionField.set(stmt, connSpy); - List batch = Lists.newArrayList( - mock(PhoenixPreparedStatement.class)); - batchField.set(stmt, batch); - final String exMsg = "TEST"; - doThrow(new SQLException(exMsg)).when(connSpy).commit(); - when(connSpy.getAutoCommit()).thenReturn(true); + @Test + public void testExecuteBatchWithCommitFailure() throws Exception { + // Arrange + Properties connectionProperties = new Properties(); + connectionProperties.setProperty("phoenix.query.timeoutMs", "0"); + Connection connection = DriverManager.getConnection(getUrl(), connectionProperties); + Statement stmt = connection.createStatement(); + PhoenixConnection connSpy = spy(connection.unwrap(PhoenixConnection.class)); + connectionField.set(stmt, connSpy); + List batch = Lists.newArrayList(mock(PhoenixPreparedStatement.class)); + batchField.set(stmt, batch); + final String exMsg = "TEST"; + doThrow(new SQLException(exMsg)).when(connSpy).commit(); + when(connSpy.getAutoCommit()).thenReturn(true); - // Act & Assert - BatchUpdateException ex = assertThrows(BatchUpdateException.class, () -> stmt.executeBatch()); - assertEquals(exMsg, ex.getCause().getMessage()); - assertNull(ex.getUpdateCounts()); - } + // Act & Assert + BatchUpdateException ex = assertThrows(BatchUpdateException.class, () -> stmt.executeBatch()); + assertEquals(exMsg, ex.getCause().getMessage()); + assertNull(ex.getUpdateCounts()); + } - @Test - public void testRecursiveClose() throws SQLException { - Connection connection = DriverManager.getConnection(getUrl()); - Statement stmt1 = connection.createStatement(); - ResultSet rs11 = stmt1.executeQuery("select * from atable"); - rs11.close(); - assertTrue(rs11.isClosed()); - ResultSet rs12 = stmt1.executeQuery("select * from atable"); - stmt1.close(); - assertTrue(stmt1.isClosed()); - assertTrue(rs12.isClosed()); + @Test + public void testRecursiveClose() throws SQLException { + Connection connection = DriverManager.getConnection(getUrl()); + Statement stmt1 = connection.createStatement(); + ResultSet rs11 = stmt1.executeQuery("select * from atable"); + rs11.close(); + assertTrue(rs11.isClosed()); + ResultSet rs12 = stmt1.executeQuery("select * from atable"); + stmt1.close(); + assertTrue(stmt1.isClosed()); + assertTrue(rs12.isClosed()); - Statement stmt2 = connection.createStatement(); - stmt2.closeOnCompletion(); - ResultSet rs21 = stmt2.executeQuery("select * from atable"); - rs21.close(); - assertTrue(stmt2.isClosed()); + Statement stmt2 = connection.createStatement(); + stmt2.closeOnCompletion(); + ResultSet rs21 = stmt2.executeQuery("select * from atable"); + rs21.close(); + assertTrue(stmt2.isClosed()); - Statement stmt3 = connection.createStatement(); - ResultSet rs31 = stmt3.executeQuery("select * from atable"); - stmt3.executeUpdate("upsert into ATABLE VALUES ('1', '2', '3')"); - assertTrue(rs31.isClosed()); - ResultSet rs32 = stmt3.executeQuery("select * from atable"); - ResultSet rs33 = stmt3.executeQuery("select * from atable"); - assertTrue(rs32.isClosed()); + Statement stmt3 = connection.createStatement(); + ResultSet rs31 = stmt3.executeQuery("select * from atable"); + stmt3.executeUpdate("upsert into ATABLE VALUES ('1', '2', '3')"); + assertTrue(rs31.isClosed()); + ResultSet rs32 = stmt3.executeQuery("select * from atable"); + ResultSet rs33 = stmt3.executeQuery("select * from atable"); + assertTrue(rs32.isClosed()); - Statement stmt4 = connection.createStatement(); - Statement stmt5 = connection.createStatement(); - ResultSet rs41 = stmt3.executeQuery("select * from atable"); - ResultSet rs51 = stmt3.executeQuery("select * from atable"); - connection.close(); - assertTrue(connection.isClosed()); - assertTrue(stmt4.isClosed()); - assertTrue(stmt5.isClosed()); - assertTrue(rs41.isClosed()); - assertTrue(rs51.isClosed()); - } + Statement stmt4 = connection.createStatement(); + Statement stmt5 = connection.createStatement(); + ResultSet rs41 = stmt3.executeQuery("select * from atable"); + ResultSet rs51 = stmt3.executeQuery("select * from atable"); + connection.close(); + assertTrue(connection.isClosed()); + assertTrue(stmt4.isClosed()); + assertTrue(stmt5.isClosed()); + assertTrue(rs41.isClosed()); + assertTrue(rs51.isClosed()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixTestDriver.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixTestDriver.java index 368a9c52a46..d1be6eebf5f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixTestDriver.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixTestDriver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,103 +35,99 @@ import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.ReadOnlyProps; - - /** - * - * JDBC Driver implementation of Phoenix for testing. - * To use this driver, specify test=true in url. - * - * + * JDBC Driver implementation of Phoenix for testing. To use this driver, specify test=true in url. * @since 0.1 */ @ThreadSafe public class PhoenixTestDriver extends PhoenixEmbeddedDriver { - private final ReadOnlyProps overrideProps; - - @GuardedBy("this") - private final QueryServices queryServices; - - @GuardedBy("this") - private boolean closed = false; + private final ReadOnlyProps overrideProps; - @GuardedBy("this") - private final Map - connectionQueryServicesMap = new HashMap<>(); + @GuardedBy("this") + private final QueryServices queryServices; - public PhoenixTestDriver() { - this(ReadOnlyProps.EMPTY_PROPS); - } + @GuardedBy("this") + private boolean closed = false; - // For tests to override the default configuration - public PhoenixTestDriver(ReadOnlyProps props) { - overrideProps = props; - queryServices = new QueryServicesTestImpl(getDefaultProps(), overrideProps); - } + @GuardedBy("this") + private final Map connectionQueryServicesMap = + new HashMap<>(); - @Override - public synchronized QueryServices getQueryServices() { - checkClosed(); - return queryServices; - } + public PhoenixTestDriver() { + this(ReadOnlyProps.EMPTY_PROPS); + } + + // For tests to override the default configuration + public PhoenixTestDriver(ReadOnlyProps props) { + overrideProps = props; + queryServices = new QueryServicesTestImpl(getDefaultProps(), overrideProps); + } - @Override - public boolean acceptsURL(String url) throws SQLException { - // Accept the url only if test=true attribute set - return super.acceptsURL(url) && isTestUrl(url); + @Override + public synchronized QueryServices getQueryServices() { + checkClosed(); + return queryServices; + } + + @Override + public boolean acceptsURL(String url) throws SQLException { + // Accept the url only if test=true attribute set + return super.acceptsURL(url) && isTestUrl(url); + } + + @Override + public synchronized Connection connect(String url, Properties info) throws SQLException { + checkClosed(); + return super.connect(url, info); + } + + @Override // public for testing + public synchronized ConnectionQueryServices getConnectionQueryServices(String url, + Properties infoIn) throws SQLException { + checkClosed(); + final Properties info = PropertiesUtil.deepCopy(infoIn); + ConnectionInfo connInfo = ConnectionInfo.create(url, null, info); + ConnectionQueryServices connectionQueryServices = connectionQueryServicesMap.get(connInfo); + if (connectionQueryServices != null) { + return connectionQueryServices; } - - @Override - public synchronized Connection connect(String url, Properties info) throws SQLException { - checkClosed(); - return super.connect(url, info); + info.putAll(connInfo.asProps().asMap()); + if (connInfo.isConnectionless()) { + connectionQueryServices = new ConnectionlessQueryServicesImpl(queryServices, connInfo, info); + } else { + connectionQueryServices = new ConnectionQueryServicesTestImpl(queryServices, connInfo, info); } - - @Override // public for testing - public synchronized ConnectionQueryServices getConnectionQueryServices(String url, Properties infoIn) throws SQLException { - checkClosed(); - final Properties info = PropertiesUtil.deepCopy(infoIn); - ConnectionInfo connInfo = ConnectionInfo.create(url, null, info); - ConnectionQueryServices connectionQueryServices = connectionQueryServicesMap.get(connInfo); - if (connectionQueryServices != null) { - return connectionQueryServices; - } - info.putAll(connInfo.asProps().asMap()); - if (connInfo.isConnectionless()) { - connectionQueryServices = new ConnectionlessQueryServicesImpl(queryServices, connInfo, info); - } else { - connectionQueryServices = new ConnectionQueryServicesTestImpl(queryServices, connInfo, info); - } - connectionQueryServices.init(url, info); - connectionQueryServicesMap.put(connInfo, connectionQueryServices); - return connectionQueryServices; + connectionQueryServices.init(url, info); + connectionQueryServicesMap.put(connInfo, connectionQueryServices); + return connectionQueryServices; + } + + private synchronized void checkClosed() { + if (closed) { + throw new IllegalStateException("The Phoenix jdbc test driver has been closed."); } - - private synchronized void checkClosed() { - if (closed) { - throw new IllegalStateException("The Phoenix jdbc test driver has been closed."); - } + } + + @Override + public synchronized void close() throws SQLException { + if (closed) { + return; } - - @Override - public synchronized void close() throws SQLException { - if (closed) { - return; - } - closed = true; - try { - for (ConnectionQueryServices cqs : connectionQueryServicesMap.values()) { - cqs.close(); - } - } finally { - ThreadPoolExecutor executor = queryServices.getExecutor(); - try { - queryServices.close(); - } finally { - if (executor != null) executor.shutdownNow(); - connectionQueryServicesMap.clear();; - } - } + closed = true; + try { + for (ConnectionQueryServices cqs : connectionQueryServicesMap.values()) { + cqs.close(); + } + } finally { + ThreadPoolExecutor executor = queryServices.getExecutor(); + try { + queryServices.close(); + } finally { + if (executor != null) executor.shutdownNow(); + connectionQueryServicesMap.clear(); + ; + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ReadOnlyPropertiesTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ReadOnlyPropertiesTest.java index f0b9f736fe3..be93696e44f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ReadOnlyPropertiesTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/ReadOnlyPropertiesTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,62 +23,65 @@ import java.util.Map; import java.util.Properties; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.phoenix.util.ReadOnlyProps; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; - public class ReadOnlyPropertiesTest { - - private static final String PROPERTY_NAME_1 = "property1"; - private static final String DEFAULT_VALUE_1 = "default1"; - private static final String OVERRIDEN_VALUE_1 = "override1"; - private static final String OVERRIDEN_VALUE_2 = "override2"; - - private static final String PROPERTY_NAME_2 = "property2"; - private static final String DEFAULT_VALUE_2 = "default2"; - - private static final Map EMPTY_OVERRIDE_MAP = Collections.emptyMap(); - private static final Map DEFAULT_PROPS_MAP = ImmutableMap.of(PROPERTY_NAME_1, DEFAULT_VALUE_1, PROPERTY_NAME_2, DEFAULT_VALUE_2); - private static final Map OVERRIDE_MAP = ImmutableMap.of(PROPERTY_NAME_1, OVERRIDEN_VALUE_1); - - @Test - public void testDefaultProperties() { - ReadOnlyProps defaultProps = new ReadOnlyProps(DEFAULT_PROPS_MAP); - ReadOnlyProps readOnlyProps = new ReadOnlyProps(defaultProps, EMPTY_OVERRIDE_MAP.entrySet().iterator()); - assertEquals(DEFAULT_VALUE_1, readOnlyProps.get(PROPERTY_NAME_1)); - assertEquals(DEFAULT_VALUE_2, readOnlyProps.get(PROPERTY_NAME_2)); - } - - @Test - public void testOverrideProperties() { - ReadOnlyProps defaultProps = new ReadOnlyProps(DEFAULT_PROPS_MAP); - ReadOnlyProps readOnlyProps = new ReadOnlyProps(defaultProps, OVERRIDE_MAP.entrySet().iterator()); - assertEquals(OVERRIDEN_VALUE_1, readOnlyProps.get(PROPERTY_NAME_1)); - assertEquals(DEFAULT_VALUE_2, readOnlyProps.get(PROPERTY_NAME_2)); - } - - @Test - public void testAddAllOverrideProperties() { - ReadOnlyProps defaultProps = new ReadOnlyProps(DEFAULT_PROPS_MAP); - Properties overrideProps = new Properties(); - overrideProps.setProperty(PROPERTY_NAME_1, OVERRIDEN_VALUE_1); - ReadOnlyProps newProps = defaultProps.addAll(overrideProps); - assertEquals(OVERRIDEN_VALUE_1, newProps.get(PROPERTY_NAME_1)); - assertEquals(DEFAULT_VALUE_2, newProps.get(PROPERTY_NAME_2)); - } - - @Test - public void testOverridingNonDefaultProperties() { - ReadOnlyProps defaultProps = new ReadOnlyProps(DEFAULT_PROPS_MAP); - Properties props = new Properties(); - props.setProperty(PROPERTY_NAME_1, OVERRIDEN_VALUE_1); - ReadOnlyProps nonDefaultProps = defaultProps.addAll(props); - - Properties overrideProps = new Properties(); - overrideProps.setProperty(PROPERTY_NAME_1, OVERRIDEN_VALUE_2); - ReadOnlyProps newProps = nonDefaultProps.addAll(overrideProps); - assertEquals(OVERRIDEN_VALUE_2, newProps.get(PROPERTY_NAME_1)); - assertEquals(DEFAULT_VALUE_2, newProps.get(PROPERTY_NAME_2)); - } + + private static final String PROPERTY_NAME_1 = "property1"; + private static final String DEFAULT_VALUE_1 = "default1"; + private static final String OVERRIDEN_VALUE_1 = "override1"; + private static final String OVERRIDEN_VALUE_2 = "override2"; + + private static final String PROPERTY_NAME_2 = "property2"; + private static final String DEFAULT_VALUE_2 = "default2"; + + private static final Map EMPTY_OVERRIDE_MAP = Collections.emptyMap(); + private static final Map DEFAULT_PROPS_MAP = + ImmutableMap.of(PROPERTY_NAME_1, DEFAULT_VALUE_1, PROPERTY_NAME_2, DEFAULT_VALUE_2); + private static final Map OVERRIDE_MAP = + ImmutableMap.of(PROPERTY_NAME_1, OVERRIDEN_VALUE_1); + + @Test + public void testDefaultProperties() { + ReadOnlyProps defaultProps = new ReadOnlyProps(DEFAULT_PROPS_MAP); + ReadOnlyProps readOnlyProps = + new ReadOnlyProps(defaultProps, EMPTY_OVERRIDE_MAP.entrySet().iterator()); + assertEquals(DEFAULT_VALUE_1, readOnlyProps.get(PROPERTY_NAME_1)); + assertEquals(DEFAULT_VALUE_2, readOnlyProps.get(PROPERTY_NAME_2)); + } + + @Test + public void testOverrideProperties() { + ReadOnlyProps defaultProps = new ReadOnlyProps(DEFAULT_PROPS_MAP); + ReadOnlyProps readOnlyProps = + new ReadOnlyProps(defaultProps, OVERRIDE_MAP.entrySet().iterator()); + assertEquals(OVERRIDEN_VALUE_1, readOnlyProps.get(PROPERTY_NAME_1)); + assertEquals(DEFAULT_VALUE_2, readOnlyProps.get(PROPERTY_NAME_2)); + } + + @Test + public void testAddAllOverrideProperties() { + ReadOnlyProps defaultProps = new ReadOnlyProps(DEFAULT_PROPS_MAP); + Properties overrideProps = new Properties(); + overrideProps.setProperty(PROPERTY_NAME_1, OVERRIDEN_VALUE_1); + ReadOnlyProps newProps = defaultProps.addAll(overrideProps); + assertEquals(OVERRIDEN_VALUE_1, newProps.get(PROPERTY_NAME_1)); + assertEquals(DEFAULT_VALUE_2, newProps.get(PROPERTY_NAME_2)); + } + + @Test + public void testOverridingNonDefaultProperties() { + ReadOnlyProps defaultProps = new ReadOnlyProps(DEFAULT_PROPS_MAP); + Properties props = new Properties(); + props.setProperty(PROPERTY_NAME_1, OVERRIDEN_VALUE_1); + ReadOnlyProps nonDefaultProps = defaultProps.addAll(props); + + Properties overrideProps = new Properties(); + overrideProps.setProperty(PROPERTY_NAME_1, OVERRIDEN_VALUE_2); + ReadOnlyProps newProps = nonDefaultProps.addAll(overrideProps); + assertEquals(OVERRIDEN_VALUE_2, newProps.get(PROPERTY_NAME_1)); + assertEquals(DEFAULT_VALUE_2, newProps.get(PROPERTY_NAME_2)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/BulkLoadToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/BulkLoadToolTest.java index d015f3d8e89..0f4e21b2019 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/BulkLoadToolTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/BulkLoadToolTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,8 @@ */ package org.apache.phoenix.mapreduce; +import static org.junit.Assert.assertEquals; + import java.util.Arrays; import java.util.Collection; @@ -26,54 +28,49 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import static org.junit.Assert.assertEquals; - @RunWith(Parameterized.class) public class BulkLoadToolTest { - @Parameterized.Parameters - public static Collection params() { - return Arrays.asList(new Object[][]{ - { new CsvBulkLoadTool() }, - { new JsonBulkLoadTool() }, - }); - } + @Parameterized.Parameters + public static Collection params() { + return Arrays.asList(new Object[][] { { new CsvBulkLoadTool() }, { new JsonBulkLoadTool() }, }); + } - @Parameterized.Parameter(value = 0) - public AbstractBulkLoadTool bulkLoadTool; + @Parameterized.Parameter(value = 0) + public AbstractBulkLoadTool bulkLoadTool; - @Test - public void testParseOptions() { - CommandLine cmdLine = bulkLoadTool.parseOptions(new String[] { "--input", "/input", - "--table", "mytable" }); + @Test + public void testParseOptions() { + CommandLine cmdLine = + bulkLoadTool.parseOptions(new String[] { "--input", "/input", "--table", "mytable" }); - assertEquals("mytable", cmdLine.getOptionValue(CsvBulkLoadTool.TABLE_NAME_OPT.getOpt())); - assertEquals("/input", cmdLine.getOptionValue(CsvBulkLoadTool.INPUT_PATH_OPT.getOpt())); - } + assertEquals("mytable", cmdLine.getOptionValue(CsvBulkLoadTool.TABLE_NAME_OPT.getOpt())); + assertEquals("/input", cmdLine.getOptionValue(CsvBulkLoadTool.INPUT_PATH_OPT.getOpt())); + } - @Test(expected=IllegalStateException.class) - public void testParseOptions_ExtraArguments() { - bulkLoadTool.parseOptions(new String[] { "--input", "/input", - "--table", "mytable", "these", "shouldnt", "be", "here" }); - } + @Test(expected = IllegalStateException.class) + public void testParseOptions_ExtraArguments() { + bulkLoadTool.parseOptions(new String[] { "--input", "/input", "--table", "mytable", "these", + "shouldnt", "be", "here" }); + } - @Test(expected=IllegalStateException.class) - public void testParseOptions_NoInput() { - bulkLoadTool.parseOptions(new String[] { "--table", "mytable" }); - } + @Test(expected = IllegalStateException.class) + public void testParseOptions_NoInput() { + bulkLoadTool.parseOptions(new String[] { "--table", "mytable" }); + } - @Test(expected=IllegalStateException.class) - public void testParseOptions_NoTable() { - bulkLoadTool.parseOptions(new String[] { "--input", "/input" }); - } + @Test(expected = IllegalStateException.class) + public void testParseOptions_NoTable() { + bulkLoadTool.parseOptions(new String[] { "--input", "/input" }); + } - @Test - public void testGetQualifiedTableName() { - assertEquals("MYSCHEMA.MYTABLE", SchemaUtil.getQualifiedTableName("mySchema", "myTable")); - } + @Test + public void testGetQualifiedTableName() { + assertEquals("MYSCHEMA.MYTABLE", SchemaUtil.getQualifiedTableName("mySchema", "myTable")); + } - @Test - public void testGetQualifiedTableName_NullSchema() { - assertEquals("MYTABLE", SchemaUtil.getQualifiedTableName(null, "myTable")); - } + @Test + public void testGetQualifiedTableName_NullSchema() { + assertEquals("MYTABLE", SchemaUtil.getQualifiedTableName(null, "myTable")); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkImportUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkImportUtilTest.java index 761aa23730e..c46db14fd1a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkImportUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvBulkImportUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,69 +33,70 @@ public class CsvBulkImportUtilTest { - @Test - public void testInitCsvImportJob() throws IOException { - Configuration conf = new Configuration(); - - char delimiter = '\001'; - char quote = '\002'; - char escape = '!'; - - CsvBulkImportUtil.initCsvImportJob(conf, delimiter, quote, escape, null, null); - - // Serialize and deserialize the config to ensure that there aren't any issues - // with non-printable characters as delimiters - File tempFile = File.createTempFile("test-config", ".xml"); - FileOutputStream fileOutputStream = new FileOutputStream(tempFile); - conf.writeXml(fileOutputStream); - fileOutputStream.close(); - Configuration deserialized = new Configuration(); - deserialized.addResource(new FileInputStream(tempFile)); - - assertEquals(Character.valueOf('\001'), - CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.FIELD_DELIMITER_CONFKEY)); - assertEquals(Character.valueOf('\002'), - CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.QUOTE_CHAR_CONFKEY)); - assertEquals(Character.valueOf('!'), - CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.ESCAPE_CHAR_CONFKEY)); - assertNull(deserialized.get(CsvToKeyValueMapper.ARRAY_DELIMITER_CONFKEY)); - - tempFile.delete(); - } - - @Test - public void testConfigurePreUpsertProcessor() { - Configuration conf = new Configuration(); - CsvBulkImportUtil.configurePreUpsertProcessor(conf, MockProcessor.class); - ImportPreUpsertKeyValueProcessor processor = PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); - assertEquals(MockProcessor.class, processor.getClass()); - } - - @Test - public void testGetAndSetChar_BasicChar() { - Configuration conf = new Configuration(); - CsvBulkImportUtil.setChar(conf, "conf.key", '|'); - assertEquals(Character.valueOf('|'), CsvBulkImportUtil.getCharacter(conf, "conf.key")); - } - - @Test - public void testGetAndSetChar_NonPrintableChar() { - Configuration conf = new Configuration(); - CsvBulkImportUtil.setChar(conf, "conf.key", '\001'); - assertEquals(Character.valueOf('\001'), CsvBulkImportUtil.getCharacter(conf, "conf.key")); - } - - @Test - public void testGetChar_NotPresent() { - Configuration conf = new Configuration(); - assertNull(CsvBulkImportUtil.getCharacter(conf, "conf.key")); - } - - public static class MockProcessor implements ImportPreUpsertKeyValueProcessor { - - @Override - public List preUpsert(byte[] rowKey, List keyValues) { - throw new UnsupportedOperationException("Not yet implemented"); - } + @Test + public void testInitCsvImportJob() throws IOException { + Configuration conf = new Configuration(); + + char delimiter = '\001'; + char quote = '\002'; + char escape = '!'; + + CsvBulkImportUtil.initCsvImportJob(conf, delimiter, quote, escape, null, null); + + // Serialize and deserialize the config to ensure that there aren't any issues + // with non-printable characters as delimiters + File tempFile = File.createTempFile("test-config", ".xml"); + FileOutputStream fileOutputStream = new FileOutputStream(tempFile); + conf.writeXml(fileOutputStream); + fileOutputStream.close(); + Configuration deserialized = new Configuration(); + deserialized.addResource(new FileInputStream(tempFile)); + + assertEquals(Character.valueOf('\001'), + CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.FIELD_DELIMITER_CONFKEY)); + assertEquals(Character.valueOf('\002'), + CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.QUOTE_CHAR_CONFKEY)); + assertEquals(Character.valueOf('!'), + CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.ESCAPE_CHAR_CONFKEY)); + assertNull(deserialized.get(CsvToKeyValueMapper.ARRAY_DELIMITER_CONFKEY)); + + tempFile.delete(); + } + + @Test + public void testConfigurePreUpsertProcessor() { + Configuration conf = new Configuration(); + CsvBulkImportUtil.configurePreUpsertProcessor(conf, MockProcessor.class); + ImportPreUpsertKeyValueProcessor processor = + PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); + assertEquals(MockProcessor.class, processor.getClass()); + } + + @Test + public void testGetAndSetChar_BasicChar() { + Configuration conf = new Configuration(); + CsvBulkImportUtil.setChar(conf, "conf.key", '|'); + assertEquals(Character.valueOf('|'), CsvBulkImportUtil.getCharacter(conf, "conf.key")); + } + + @Test + public void testGetAndSetChar_NonPrintableChar() { + Configuration conf = new Configuration(); + CsvBulkImportUtil.setChar(conf, "conf.key", '\001'); + assertEquals(Character.valueOf('\001'), CsvBulkImportUtil.getCharacter(conf, "conf.key")); + } + + @Test + public void testGetChar_NotPresent() { + Configuration conf = new Configuration(); + assertNull(CsvBulkImportUtil.getCharacter(conf, "conf.key")); + } + + public static class MockProcessor implements ImportPreUpsertKeyValueProcessor { + + @Override + public List preUpsert(byte[] rowKey, List keyValues) { + throw new UnsupportedOperationException("Not yet implemented"); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapperTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapperTest.java index fe4e068693d..74ec70dc7c8 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapperTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/CsvToKeyValueMapperTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,37 +17,37 @@ */ package org.apache.phoenix.mapreduce; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import org.apache.commons.csv.CSVRecord; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - public class CsvToKeyValueMapperTest { - @Test - public void testCsvLineParser() throws IOException { - CsvToKeyValueMapper.CsvLineParser lineParser = - new CsvToKeyValueMapper.CsvLineParser(';', '"', '\\'); - CSVRecord parsed = lineParser.parse("one;two"); - - assertEquals("one", parsed.get(0)); - assertEquals("two", parsed.get(1)); - assertTrue(parsed.isConsistent()); - assertEquals(1, parsed.getRecordNumber()); - } - - @Test - public void testCsvLineParserWithQuoting() throws IOException { - CsvToKeyValueMapper.CsvLineParser lineParser = - new CsvToKeyValueMapper.CsvLineParser(';', '"', '\\'); - CSVRecord parsed = lineParser.parse("\"\\\"one\";\"\\;two\\\\\""); - - assertEquals("\"one", parsed.get(0)); - assertEquals(";two\\", parsed.get(1)); - assertTrue(parsed.isConsistent()); - assertEquals(1, parsed.getRecordNumber()); - } + @Test + public void testCsvLineParser() throws IOException { + CsvToKeyValueMapper.CsvLineParser lineParser = + new CsvToKeyValueMapper.CsvLineParser(';', '"', '\\'); + CSVRecord parsed = lineParser.parse("one;two"); + + assertEquals("one", parsed.get(0)); + assertEquals("two", parsed.get(1)); + assertTrue(parsed.isConsistent()); + assertEquals(1, parsed.getRecordNumber()); + } + + @Test + public void testCsvLineParserWithQuoting() throws IOException { + CsvToKeyValueMapper.CsvLineParser lineParser = + new CsvToKeyValueMapper.CsvLineParser(';', '"', '\\'); + CSVRecord parsed = lineParser.parse("\"\\\"one\";\"\\;two\\\\\""); + + assertEquals("\"one", parsed.get(0)); + assertEquals(";two\\", parsed.get(1)); + assertTrue(parsed.isConsistent()); + assertEquals(1, parsed.getRecordNumber()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/DefaultMultiViewSplitStrategyTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/DefaultMultiViewSplitStrategyTest.java index cf17271155f..41957f0105c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/DefaultMultiViewSplitStrategyTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/DefaultMultiViewSplitStrategyTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,12 @@ */ package org.apache.phoenix.mapreduce; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE; +import static org.junit.Assert.assertEquals; + +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.mapreduce.InputSplit; import org.apache.phoenix.mapreduce.util.DefaultMultiViewSplitStrategy; import org.apache.phoenix.mapreduce.util.ViewInfoTracker; @@ -24,87 +30,72 @@ import org.apache.phoenix.query.BaseTest; import org.junit.Test; -import java.util.ArrayList; -import java.util.List; - -import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE; -import static org.junit.Assert.assertEquals; - public class DefaultMultiViewSplitStrategyTest extends BaseTest { - DefaultMultiViewSplitStrategy defaultMultiViewSplitStrategy = - new DefaultMultiViewSplitStrategy(); - @Test - public void testGetUpperBound() { - // given split policy to be 10 with view size 12 - // we expect 2 mappers with range [0,10) and [10,12) - assertEquals(10, - defaultMultiViewSplitStrategy.getUpperBound(10, 0, 12)); - assertEquals(12, - defaultMultiViewSplitStrategy.getUpperBound(10, 1, 12)); - - // given split policy to be 8 with view size 12 - // we expect 2 mappers with range [0,8) and [8,12) - assertEquals(8, - defaultMultiViewSplitStrategy.getUpperBound(8, 0, 12)); - assertEquals(12, - defaultMultiViewSplitStrategy.getUpperBound(8, 1, 12)); - - // given split policy to be 5 with view size 12 - // we expect 1 mappers with range [0,1) - assertEquals(1, - defaultMultiViewSplitStrategy.getUpperBound(5, 0, 1)); - } - - @Test - public void testGetNumberOfMappers() { - int viewSize = 0; - int numViewsInSplit = 10; - - // test empty cluster, which is view size is 0 - assertEquals(0, - defaultMultiViewSplitStrategy.getNumberOfMappers(viewSize,numViewsInSplit)); - - viewSize = 9; - // test viewSize is less than numViewsInSplit - assertEquals(1, - defaultMultiViewSplitStrategy.getNumberOfMappers(viewSize,numViewsInSplit)); - - // test viewSize is equal to numViewsInSplit - viewSize = 10; - assertEquals(1, - defaultMultiViewSplitStrategy.getNumberOfMappers(viewSize,numViewsInSplit)); - - // test viewSize is greater than numViewsInSplit - viewSize = 11; - assertEquals(2, - defaultMultiViewSplitStrategy.getNumberOfMappers(viewSize,numViewsInSplit)); - } - - @Test - public void testGenerateSplits() { - // test number of views greater than split policy - testGenerateSplits(11, 10, 2); - - // test number of views equal to split policy - testGenerateSplits(10, 10, 1); - - // test number of views equal to split policy - testGenerateSplits(8, 10, 1); - - // test number of views is 0 - testGenerateSplits(0, 10, 0); - - // test split policy is 0 - testGenerateSplits(8, 0, 1); - } - - private void testGenerateSplits(int numberOfViews, int splitPolicy, int expectedResultSize) { - List views = new ArrayList<>(); - for (int i = 0; i < numberOfViews; i++) { - views.add(new ViewInfoTracker()); - } - config.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, String.valueOf(splitPolicy)); - List result = defaultMultiViewSplitStrategy.generateSplits(views, config); - assertEquals(expectedResultSize, result.size()); + DefaultMultiViewSplitStrategy defaultMultiViewSplitStrategy = new DefaultMultiViewSplitStrategy(); + + @Test + public void testGetUpperBound() { + // given split policy to be 10 with view size 12 + // we expect 2 mappers with range [0,10) and [10,12) + assertEquals(10, defaultMultiViewSplitStrategy.getUpperBound(10, 0, 12)); + assertEquals(12, defaultMultiViewSplitStrategy.getUpperBound(10, 1, 12)); + + // given split policy to be 8 with view size 12 + // we expect 2 mappers with range [0,8) and [8,12) + assertEquals(8, defaultMultiViewSplitStrategy.getUpperBound(8, 0, 12)); + assertEquals(12, defaultMultiViewSplitStrategy.getUpperBound(8, 1, 12)); + + // given split policy to be 5 with view size 12 + // we expect 1 mappers with range [0,1) + assertEquals(1, defaultMultiViewSplitStrategy.getUpperBound(5, 0, 1)); + } + + @Test + public void testGetNumberOfMappers() { + int viewSize = 0; + int numViewsInSplit = 10; + + // test empty cluster, which is view size is 0 + assertEquals(0, defaultMultiViewSplitStrategy.getNumberOfMappers(viewSize, numViewsInSplit)); + + viewSize = 9; + // test viewSize is less than numViewsInSplit + assertEquals(1, defaultMultiViewSplitStrategy.getNumberOfMappers(viewSize, numViewsInSplit)); + + // test viewSize is equal to numViewsInSplit + viewSize = 10; + assertEquals(1, defaultMultiViewSplitStrategy.getNumberOfMappers(viewSize, numViewsInSplit)); + + // test viewSize is greater than numViewsInSplit + viewSize = 11; + assertEquals(2, defaultMultiViewSplitStrategy.getNumberOfMappers(viewSize, numViewsInSplit)); + } + + @Test + public void testGenerateSplits() { + // test number of views greater than split policy + testGenerateSplits(11, 10, 2); + + // test number of views equal to split policy + testGenerateSplits(10, 10, 1); + + // test number of views equal to split policy + testGenerateSplits(8, 10, 1); + + // test number of views is 0 + testGenerateSplits(0, 10, 0); + + // test split policy is 0 + testGenerateSplits(8, 0, 1); + } + + private void testGenerateSplits(int numberOfViews, int splitPolicy, int expectedResultSize) { + List views = new ArrayList<>(); + for (int i = 0; i < numberOfViews; i++) { + views.add(new ViewInfoTracker()); } -} \ No newline at end of file + config.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, String.valueOf(splitPolicy)); + List result = defaultMultiViewSplitStrategy.generateSplits(views, config); + assertEquals(expectedResultSize, result.size()); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapperTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapperTest.java index 170ed567280..9c4b394ec41 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapperTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapperTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,87 +17,86 @@ */ package org.apache.phoenix.mapreduce; +import static org.junit.Assert.assertEquals; + import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PIntegerArray; import org.apache.phoenix.schema.types.PUnsignedInt; -import org.apache.phoenix.util.ColumnInfo; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - -import static org.junit.Assert.assertEquals; +import org.apache.phoenix.util.ColumnInfo; +import org.junit.Test; public class FormatToBytesWritableMapperTest { - @Test - public void testBuildColumnInfoList() { - List columnInfoList = ImmutableList.of( - new ColumnInfo("idCol", PInteger.INSTANCE.getSqlType()), - new ColumnInfo("unsignedIntCol", PUnsignedInt.INSTANCE.getSqlType()), - new ColumnInfo("stringArrayCol", PIntegerArray.INSTANCE.getSqlType())); - - Configuration conf = new Configuration(); - FormatToBytesWritableMapper.configureColumnInfoList(conf, columnInfoList); - List fromConfig = FormatToBytesWritableMapper.buildColumnInfoList(conf); - - assertEquals(columnInfoList, fromConfig); - } - - @Test - public void testBuildColumnInfoList_ContainingNulls() { - // A null value in the column info list means "skip that column in the input" - List columnInfoListWithNull = Lists.newArrayList( - new ColumnInfo("idCol", PInteger.INSTANCE.getSqlType()), - null, - new ColumnInfo("unsignedIntCol", PUnsignedInt.INSTANCE.getSqlType()), - new ColumnInfo("stringArrayCol", PIntegerArray.INSTANCE.getSqlType())); - - Configuration conf = new Configuration(); - FormatToBytesWritableMapper.configureColumnInfoList(conf, columnInfoListWithNull); - List fromConfig = FormatToBytesWritableMapper.buildColumnInfoList(conf); - - assertEquals(columnInfoListWithNull, fromConfig); - } - - @Test - public void testLoadPreUpdateProcessor() { - Configuration conf = new Configuration(); - conf.setClass(PhoenixConfigurationUtil.UPSERT_HOOK_CLASS_CONFKEY, MockUpsertProcessor.class, - ImportPreUpsertKeyValueProcessor.class); - - ImportPreUpsertKeyValueProcessor processor = PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); - assertEquals(MockUpsertProcessor.class, processor.getClass()); - } - - @Test - public void testLoadPreUpdateProcessor_NotConfigured() { - - Configuration conf = new Configuration(); - ImportPreUpsertKeyValueProcessor processor = PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); - - assertEquals(FormatToBytesWritableMapper.DefaultImportPreUpsertKeyValueProcessor.class, - processor.getClass()); - } - - @Test(expected=IllegalStateException.class) - public void testLoadPreUpdateProcessor_ClassNotFound() { - Configuration conf = new Configuration(); - conf.set(PhoenixConfigurationUtil.UPSERT_HOOK_CLASS_CONFKEY, "MyUndefinedClass"); - - PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); - } - - static class MockUpsertProcessor implements ImportPreUpsertKeyValueProcessor { - @Override - public List preUpsert(byte[] rowKey, List keyValues) { - throw new UnsupportedOperationException("Not yet implemented"); - } + @Test + public void testBuildColumnInfoList() { + List columnInfoList = + ImmutableList.of(new ColumnInfo("idCol", PInteger.INSTANCE.getSqlType()), + new ColumnInfo("unsignedIntCol", PUnsignedInt.INSTANCE.getSqlType()), + new ColumnInfo("stringArrayCol", PIntegerArray.INSTANCE.getSqlType())); + + Configuration conf = new Configuration(); + FormatToBytesWritableMapper.configureColumnInfoList(conf, columnInfoList); + List fromConfig = FormatToBytesWritableMapper.buildColumnInfoList(conf); + + assertEquals(columnInfoList, fromConfig); + } + + @Test + public void testBuildColumnInfoList_ContainingNulls() { + // A null value in the column info list means "skip that column in the input" + List columnInfoListWithNull = + Lists.newArrayList(new ColumnInfo("idCol", PInteger.INSTANCE.getSqlType()), null, + new ColumnInfo("unsignedIntCol", PUnsignedInt.INSTANCE.getSqlType()), + new ColumnInfo("stringArrayCol", PIntegerArray.INSTANCE.getSqlType())); + + Configuration conf = new Configuration(); + FormatToBytesWritableMapper.configureColumnInfoList(conf, columnInfoListWithNull); + List fromConfig = FormatToBytesWritableMapper.buildColumnInfoList(conf); + + assertEquals(columnInfoListWithNull, fromConfig); + } + + @Test + public void testLoadPreUpdateProcessor() { + Configuration conf = new Configuration(); + conf.setClass(PhoenixConfigurationUtil.UPSERT_HOOK_CLASS_CONFKEY, MockUpsertProcessor.class, + ImportPreUpsertKeyValueProcessor.class); + + ImportPreUpsertKeyValueProcessor processor = + PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); + assertEquals(MockUpsertProcessor.class, processor.getClass()); + } + + @Test + public void testLoadPreUpdateProcessor_NotConfigured() { + + Configuration conf = new Configuration(); + ImportPreUpsertKeyValueProcessor processor = + PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); + + assertEquals(FormatToBytesWritableMapper.DefaultImportPreUpsertKeyValueProcessor.class, + processor.getClass()); + } + + @Test(expected = IllegalStateException.class) + public void testLoadPreUpdateProcessor_ClassNotFound() { + Configuration conf = new Configuration(); + conf.set(PhoenixConfigurationUtil.UPSERT_HOOK_CLASS_CONFKEY, "MyUndefinedClass"); + + PhoenixConfigurationUtil.loadPreUpsertProcessor(conf); + } + + static class MockUpsertProcessor implements ImportPreUpsertKeyValueProcessor { + @Override + public List preUpsert(byte[] rowKey, List keyValues) { + throw new UnsupportedOperationException("Not yet implemented"); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputFormatTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputFormatTest.java index e8f2864e8d0..3044479c30e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputFormatTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixMultiViewInputFormatTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,12 +19,9 @@ import static junit.framework.TestCase.assertTrue; import static junit.framework.TestCase.fail; -import static org.apache.phoenix.mapreduce.util. - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE; -import static org.apache.phoenix.mapreduce.util. - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ; -import static org.apache.phoenix.mapreduce.util. - PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ; +import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ; import static org.apache.phoenix.util.PhoenixRuntime.CONNECTIONLESS; import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR; @@ -38,63 +35,61 @@ import org.junit.Test; import org.mockito.Mockito; - public class PhoenixMultiViewInputFormatTest { - private static String CONNECTIONLESS_URL = - JDBC_PROTOCOL_ZK + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS + JDBC_PROTOCOL_TERMINATOR - + PHOENIX_TEST_DRIVER_URL_PARAM + JDBC_PROTOCOL_TERMINATOR; - - @Test - public void testDefaultConfig() throws Exception { - PhoenixMultiViewInputFormat multiViewInputFormat = new PhoenixMultiViewInputFormat(); + private static String CONNECTIONLESS_URL = + JDBC_PROTOCOL_ZK + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS + JDBC_PROTOCOL_TERMINATOR + + PHOENIX_TEST_DRIVER_URL_PARAM + JDBC_PROTOCOL_TERMINATOR; - Configuration config = new Configuration(); - config.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, "10"); - PhoenixConfigurationUtil.setInputClusterUrl(config, CONNECTIONLESS_URL); - JobContext mockContext = Mockito.mock(JobContext.class); - when(mockContext.getConfiguration()).thenReturn(config); + @Test + public void testDefaultConfig() throws Exception { + PhoenixMultiViewInputFormat multiViewInputFormat = new PhoenixMultiViewInputFormat(); - // default run should not raise error - multiViewInputFormat.getSplits(mockContext); - } + Configuration config = new Configuration(); + config.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, "10"); + PhoenixConfigurationUtil.setInputClusterUrl(config, CONNECTIONLESS_URL); + JobContext mockContext = Mockito.mock(JobContext.class); + when(mockContext.getConfiguration()).thenReturn(config); + // default run should not raise error + multiViewInputFormat.getSplits(mockContext); + } - @Test - public void testCustomizedInputStrategyClassNotExists() { - PhoenixMultiViewInputFormat multiViewInputFormat = new PhoenixMultiViewInputFormat(); + @Test + public void testCustomizedInputStrategyClassNotExists() { + PhoenixMultiViewInputFormat multiViewInputFormat = new PhoenixMultiViewInputFormat(); - Configuration config = new Configuration(); - config.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, "10"); - config.set(MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ, "dummy.path"); - PhoenixConfigurationUtil.setInputClusterUrl(config, CONNECTIONLESS_URL); - JobContext mockContext = Mockito.mock(JobContext.class); - when(mockContext.getConfiguration()).thenReturn(config); + Configuration config = new Configuration(); + config.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, "10"); + config.set(MAPREDUCE_MULTI_INPUT_STRATEGY_CLAZZ, "dummy.path"); + PhoenixConfigurationUtil.setInputClusterUrl(config, CONNECTIONLESS_URL); + JobContext mockContext = Mockito.mock(JobContext.class); + when(mockContext.getConfiguration()).thenReturn(config); - try { - multiViewInputFormat.getSplits(mockContext); - fail(); - } catch (Exception e) { - assertTrue(e.getMessage().contains("ClassNotFoundException")); - } + try { + multiViewInputFormat.getSplits(mockContext); + fail(); + } catch (Exception e) { + assertTrue(e.getMessage().contains("ClassNotFoundException")); } + } - @Test - public void testCustomizedInputSplitClassNotExists() { - PhoenixMultiViewInputFormat multiViewInputFormat = new PhoenixMultiViewInputFormat(); + @Test + public void testCustomizedInputSplitClassNotExists() { + PhoenixMultiViewInputFormat multiViewInputFormat = new PhoenixMultiViewInputFormat(); - Configuration config = new Configuration(); - config.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, "10"); - config.set(MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ, "dummy.path"); - PhoenixConfigurationUtil.setInputClusterUrl(config, CONNECTIONLESS_URL); - JobContext mockContext = Mockito.mock(JobContext.class); - when(mockContext.getConfiguration()).thenReturn(config); + Configuration config = new Configuration(); + config.set(MAPREDUCE_MULTI_INPUT_MAPPER_SPLIT_SIZE, "10"); + config.set(MAPREDUCE_MULTI_INPUT_SPLIT_STRATEGY_CLAZZ, "dummy.path"); + PhoenixConfigurationUtil.setInputClusterUrl(config, CONNECTIONLESS_URL); + JobContext mockContext = Mockito.mock(JobContext.class); + when(mockContext.getConfiguration()).thenReturn(config); - try { - multiViewInputFormat.getSplits(mockContext); - fail(); - } catch (Exception e) { - assertTrue(e.getMessage().contains("ClassNotFoundException")); - } + try { + multiViewInputFormat.getSplits(mockContext); + fail(); + } catch (Exception e) { + assertTrue(e.getMessage().contains("ClassNotFoundException")); } -} \ No newline at end of file + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixMultiViewReaderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixMultiViewReaderTest.java index 342a75d35cd..902ff616eda 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixMultiViewReaderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixMultiViewReaderTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,70 +17,58 @@ */ package org.apache.phoenix.mapreduce; -import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.phoenix.mapreduce.util.ViewInfoTracker; -import org.apache.phoenix.mapreduce.util.ViewInfoWritable; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.ArrayList; -import java.util.List; - import static junit.framework.TestCase.assertTrue; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.mockito.Mockito.when; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.phoenix.mapreduce.util.ViewInfoTracker; +import org.apache.phoenix.mapreduce.util.ViewInfoWritable; +import org.junit.Test; +import org.mockito.Mockito; + public class PhoenixMultiViewReaderTest { - @Test - public void test() throws Exception { - String tenantId = "Tenant1"; - String viewName = "viewName1"; - long ttl = 1; - String indexTable = "indexTable"; - String globalView = "globalView"; + @Test + public void test() throws Exception { + String tenantId = "Tenant1"; + String viewName = "viewName1"; + long ttl = 1; + String indexTable = "indexTable"; + String globalView = "globalView"; - PhoenixMultiViewInputSplit mockInput = Mockito.mock(PhoenixMultiViewInputSplit.class); - TaskAttemptContext mockContext = Mockito.mock(TaskAttemptContext.class); - List viewInfoTracker = new ArrayList<>(); - viewInfoTracker.add(new ViewInfoTracker( - tenantId, - viewName, - ttl, - globalView, - false - )); + PhoenixMultiViewInputSplit mockInput = Mockito.mock(PhoenixMultiViewInputSplit.class); + TaskAttemptContext mockContext = Mockito.mock(TaskAttemptContext.class); + List viewInfoTracker = new ArrayList<>(); + viewInfoTracker.add(new ViewInfoTracker(tenantId, viewName, ttl, globalView, false)); - viewInfoTracker.add(new ViewInfoTracker( - tenantId, - viewName, - ttl, - indexTable, - true - )); - when(mockInput.getViewInfoTrackerList()).thenReturn(viewInfoTracker); - PhoenixMultiViewReader phoenixMultiViewReader = new PhoenixMultiViewReader(); - phoenixMultiViewReader.initialize(mockInput, mockContext); + viewInfoTracker.add(new ViewInfoTracker(tenantId, viewName, ttl, indexTable, true)); + when(mockInput.getViewInfoTrackerList()).thenReturn(viewInfoTracker); + PhoenixMultiViewReader phoenixMultiViewReader = new PhoenixMultiViewReader(); + phoenixMultiViewReader.initialize(mockInput, mockContext); - ViewInfoTracker viewInfoWritable; - assertTrue(phoenixMultiViewReader.nextKeyValue()); - viewInfoWritable = (ViewInfoTracker)phoenixMultiViewReader.getCurrentValue(); - assertEquals(tenantId, viewInfoWritable.getTenantId()); - assertEquals(viewName, viewInfoWritable.getViewName()); - assertEquals(ttl, viewInfoWritable.getTTL()); - assertEquals(false, viewInfoWritable.isIndexRelation()); + ViewInfoTracker viewInfoWritable; + assertTrue(phoenixMultiViewReader.nextKeyValue()); + viewInfoWritable = (ViewInfoTracker) phoenixMultiViewReader.getCurrentValue(); + assertEquals(tenantId, viewInfoWritable.getTenantId()); + assertEquals(viewName, viewInfoWritable.getViewName()); + assertEquals(ttl, viewInfoWritable.getTTL()); + assertEquals(false, viewInfoWritable.isIndexRelation()); - assertTrue(phoenixMultiViewReader.nextKeyValue()); - viewInfoWritable = (ViewInfoTracker)phoenixMultiViewReader.getCurrentValue(); - assertEquals(tenantId, viewInfoWritable.getTenantId()); - assertEquals(viewName, viewInfoWritable.getViewName()); - assertEquals(ttl, viewInfoWritable.getTTL()); - assertEquals(true, viewInfoWritable.isIndexRelation()); + assertTrue(phoenixMultiViewReader.nextKeyValue()); + viewInfoWritable = (ViewInfoTracker) phoenixMultiViewReader.getCurrentValue(); + assertEquals(tenantId, viewInfoWritable.getTenantId()); + assertEquals(viewName, viewInfoWritable.getViewName()); + assertEquals(ttl, viewInfoWritable.getTTL()); + assertEquals(true, viewInfoWritable.isIndexRelation()); - assertFalse(phoenixMultiViewReader.nextKeyValue()); - viewInfoWritable = (ViewInfoTracker)phoenixMultiViewReader.getCurrentValue(); - assertNull(phoenixMultiViewReader.getCurrentValue()); - } -} \ No newline at end of file + assertFalse(phoenixMultiViewReader.nextKeyValue()); + viewInfoWritable = (ViewInfoTracker) phoenixMultiViewReader.getCurrentValue(); + assertNull(phoenixMultiViewReader.getCurrentValue()); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixTTLToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixTTLToolTest.java index a4532833cbf..83b078928ac 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixTTLToolTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixTTLToolTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,65 +17,65 @@ */ package org.apache.phoenix.mapreduce; +import static org.junit.Assert.assertEquals; + import org.apache.phoenix.query.BaseTest; import org.junit.Test; -import static org.junit.Assert.assertEquals; - public class PhoenixTTLToolTest extends BaseTest { - String viewName = generateUniqueName(); - String tenantId = generateUniqueName(); + String viewName = generateUniqueName(); + String tenantId = generateUniqueName(); - @Test - public void testParseInput() { - PhoenixTTLTool tool = new PhoenixTTLTool(); - tool.parseArgs(new String[] {"-a"}); + @Test + public void testParseInput() { + PhoenixTTLTool tool = new PhoenixTTLTool(); + tool.parseArgs(new String[] { "-a" }); - assertEquals("NORMAL", tool.getJobPriority()); - assertEquals(true, tool.isDeletingAllViews()); - assertEquals(null, tool.getViewName()); - assertEquals(null, tool.getTenantId()); + assertEquals("NORMAL", tool.getJobPriority()); + assertEquals(true, tool.isDeletingAllViews()); + assertEquals(null, tool.getViewName()); + assertEquals(null, tool.getTenantId()); - tool = new PhoenixTTLTool(); - tool.parseArgs(new String[] {"-v", viewName, "-i",tenantId }); - assertEquals("NORMAL", tool.getJobPriority()); - assertEquals(false, tool.isDeletingAllViews()); - assertEquals(viewName, tool.getViewName()); - assertEquals(tenantId, tool.getTenantId()); + tool = new PhoenixTTLTool(); + tool.parseArgs(new String[] { "-v", viewName, "-i", tenantId }); + assertEquals("NORMAL", tool.getJobPriority()); + assertEquals(false, tool.isDeletingAllViews()); + assertEquals(viewName, tool.getViewName()); + assertEquals(tenantId, tool.getTenantId()); - tool = new PhoenixTTLTool(); - tool.parseArgs(new String[] {"-v", viewName, "-p", "0"}); - assertEquals("VERY_HIGH", tool.getJobPriority()); - assertEquals(false, tool.isDeletingAllViews()); - assertEquals(viewName, tool.getViewName()); - assertEquals(null, tool.getTenantId()); + tool = new PhoenixTTLTool(); + tool.parseArgs(new String[] { "-v", viewName, "-p", "0" }); + assertEquals("VERY_HIGH", tool.getJobPriority()); + assertEquals(false, tool.isDeletingAllViews()); + assertEquals(viewName, tool.getViewName()); + assertEquals(null, tool.getTenantId()); - tool = new PhoenixTTLTool(); - tool.parseArgs(new String[] {"-v", viewName, "-p", "-1"}); - assertEquals("NORMAL", tool.getJobPriority()); - assertEquals(false, tool.isDeletingAllViews()); - assertEquals(viewName, tool.getViewName()); - assertEquals(null, tool.getTenantId()); + tool = new PhoenixTTLTool(); + tool.parseArgs(new String[] { "-v", viewName, "-p", "-1" }); + assertEquals("NORMAL", tool.getJobPriority()); + assertEquals(false, tool.isDeletingAllViews()); + assertEquals(viewName, tool.getViewName()); + assertEquals(null, tool.getTenantId()); - tool = new PhoenixTTLTool(); - tool.parseArgs(new String[] {"-v", viewName, "-p", "DSAFDAS"}); - assertEquals("NORMAL", tool.getJobPriority()); - assertEquals(false, tool.isDeletingAllViews()); - assertEquals(viewName, tool.getViewName()); - assertEquals(null, tool.getTenantId()); + tool = new PhoenixTTLTool(); + tool.parseArgs(new String[] { "-v", viewName, "-p", "DSAFDAS" }); + assertEquals("NORMAL", tool.getJobPriority()); + assertEquals(false, tool.isDeletingAllViews()); + assertEquals(viewName, tool.getViewName()); + assertEquals(null, tool.getTenantId()); - tool = new PhoenixTTLTool(); - tool.parseArgs(new String[] {"-i", tenantId}); - assertEquals("NORMAL", tool.getJobPriority()); - assertEquals(false, tool.isDeletingAllViews()); - assertEquals(null, tool.getViewName()); - assertEquals(tenantId, tool.getTenantId()); - } + tool = new PhoenixTTLTool(); + tool.parseArgs(new String[] { "-i", tenantId }); + assertEquals("NORMAL", tool.getJobPriority()); + assertEquals(false, tool.isDeletingAllViews()); + assertEquals(null, tool.getViewName()); + assertEquals(tenantId, tool.getTenantId()); + } - @Test (expected = IllegalStateException.class) - public void testNoInputParam() { - PhoenixTTLTool tool; - tool = new PhoenixTTLTool(); - tool.parseOptions(new String[] {}); - } -} \ No newline at end of file + @Test(expected = IllegalStateException.class) + public void testNoInputParam() { + PhoenixTTLTool tool; + tool = new PhoenixTTLTool(); + tool.parseOptions(new String[] {}); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixTestingInputFormat.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixTestingInputFormat.java index c0b4bea259b..3643f4fc80d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixTestingInputFormat.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/PhoenixTestingInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.mapreduce; import org.apache.hadoop.conf.Configuration; @@ -30,17 +29,16 @@ */ public class PhoenixTestingInputFormat extends PhoenixInputFormat { - @Override - void setupParallelScansFromQueryPlan(QueryPlan queryPlan) { - setupParallelScansWithScanGrouper(queryPlan, - TestingMapReduceParallelScanGrouper.getInstance()); - } + @Override + void setupParallelScansFromQueryPlan(QueryPlan queryPlan) { + setupParallelScansWithScanGrouper(queryPlan, TestingMapReduceParallelScanGrouper.getInstance()); + } - @Override - RecordReader getPhoenixRecordReader(Class inputClass, - Configuration configuration, QueryPlan queryPlan) { - return new PhoenixRecordReader<>(inputClass , configuration, queryPlan, - TestingMapReduceParallelScanGrouper.getInstance()); - } + @Override + RecordReader getPhoenixRecordReader(Class inputClass, + Configuration configuration, QueryPlan queryPlan) { + return new PhoenixRecordReader<>(inputClass, configuration, queryPlan, + TestingMapReduceParallelScanGrouper.getInstance()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java index 2a29c003bd5..5c59e575d33 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/bulkload/TestTableRowkeyPair.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,41 +27,44 @@ import org.junit.Test; /** - * Tests for {@linkplain TableRowkeyPair} + * Tests for {@linkplain TableRowkeyPair} */ public class TestTableRowkeyPair { - @Test - public void testRowkeyPair() throws IOException { - testsRowsKeys("first", "aa", "first", "aa", 0); - testsRowsKeys("first", "aa", "first", "ab", -1); - testsRowsKeys("second", "aa", "first", "aa", 1); - testsRowsKeys("first", "aa", "first", "aaa", -1); - testsRowsKeys("first","bb", "first", "aaaa", 1); - } + @Test + public void testRowkeyPair() throws IOException { + testsRowsKeys("first", "aa", "first", "aa", 0); + testsRowsKeys("first", "aa", "first", "ab", -1); + testsRowsKeys("second", "aa", "first", "aa", 1); + testsRowsKeys("first", "aa", "first", "aaa", -1); + testsRowsKeys("first", "bb", "first", "aaaa", 1); + } - private void testsRowsKeys(String aTable, String akey, String bTable, String bkey, int expectedSignum) throws IOException { - - final ImmutableBytesWritable arowkey = new ImmutableBytesWritable(Bytes.toBytes(akey)); - TableRowkeyPair pair1 = new TableRowkeyPair(aTable, arowkey); - - ImmutableBytesWritable browkey = new ImmutableBytesWritable(Bytes.toBytes(bkey)); - TableRowkeyPair pair2 = new TableRowkeyPair(bTable, browkey); - - TableRowkeyPair.Comparator comparator = new TableRowkeyPair.Comparator(); - try( ByteArrayOutputStream baosA = new ByteArrayOutputStream(); - ByteArrayOutputStream baosB = new ByteArrayOutputStream()) { - - pair1.write(new DataOutputStream(baosA)); - pair2.write(new DataOutputStream(baosB)); - Assert.assertEquals(expectedSignum , signum(pair1.compareTo(pair2))); - Assert.assertEquals(expectedSignum , signum(comparator.compare(baosA.toByteArray(), 0, baosA.size(), baosB.toByteArray(), 0, baosB.size()))); - Assert.assertEquals(expectedSignum, -signum(comparator.compare(baosB.toByteArray(), 0, baosB.size(), baosA.toByteArray(), 0, baosA.size()))); - } + private void testsRowsKeys(String aTable, String akey, String bTable, String bkey, + int expectedSignum) throws IOException { + final ImmutableBytesWritable arowkey = new ImmutableBytesWritable(Bytes.toBytes(akey)); + TableRowkeyPair pair1 = new TableRowkeyPair(aTable, arowkey); + + ImmutableBytesWritable browkey = new ImmutableBytesWritable(Bytes.toBytes(bkey)); + TableRowkeyPair pair2 = new TableRowkeyPair(bTable, browkey); + + TableRowkeyPair.Comparator comparator = new TableRowkeyPair.Comparator(); + try (ByteArrayOutputStream baosA = new ByteArrayOutputStream(); + ByteArrayOutputStream baosB = new ByteArrayOutputStream()) { + + pair1.write(new DataOutputStream(baosA)); + pair2.write(new DataOutputStream(baosB)); + Assert.assertEquals(expectedSignum, signum(pair1.compareTo(pair2))); + Assert.assertEquals(expectedSignum, signum(comparator.compare(baosA.toByteArray(), 0, + baosA.size(), baosB.toByteArray(), 0, baosB.size()))); + Assert.assertEquals(expectedSignum, -signum(comparator.compare(baosB.toByteArray(), 0, + baosB.size(), baosA.toByteArray(), 0, baosA.size()))); } - - private int signum(int i) { - return i > 0 ? 1: (i == 0 ? 0: -1); - } + + } + + private int signum(int i) { + return i > 0 ? 1 : (i == 0 ? 0 : -1); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/index/BaseIndexTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/index/BaseIndexTest.java index b5411a5cc19..86c131f8b3b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/index/BaseIndexTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/index/BaseIndexTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,66 +32,53 @@ import org.junit.BeforeClass; /** - * * Creates a simple data table and index table - * */ public class BaseIndexTest extends BaseConnectionlessQueryTest { - protected static final String SCHEMA_NAME = "TEST_SCHEMA"; - protected static final String DATA_TABLE_NAME = "TEST_INDEX_COLUMN_NAMES_UTIL"; - protected static final String INDEX_TABLE_NAME = "TEST_ICN_INDEX"; - protected static final String DATA_TABLE_FULL_NAME = SCHEMA_NAME + "." + DATA_TABLE_NAME; - protected static final String INDEX_TABLE_FULL_NAME = SCHEMA_NAME + "." + INDEX_TABLE_NAME; + protected static final String SCHEMA_NAME = "TEST_SCHEMA"; + protected static final String DATA_TABLE_NAME = "TEST_INDEX_COLUMN_NAMES_UTIL"; + protected static final String INDEX_TABLE_NAME = "TEST_ICN_INDEX"; + protected static final String DATA_TABLE_FULL_NAME = SCHEMA_NAME + "." + DATA_TABLE_NAME; + protected static final String INDEX_TABLE_FULL_NAME = SCHEMA_NAME + "." + INDEX_TABLE_NAME; - private static final String DATA_TABLE_DDL = - "CREATE TABLE IF NOT EXISTS " + DATA_TABLE_FULL_NAME + "\n" + - "(\n" + - " ID INTEGER NOT NULL,\n" + - " PK_PART2 TINYINT NOT NULL,\n" + - " NAME VARCHAR,\n" + - " ZIP BIGINT,\n" + - " EMPLOYER CHAR(20),\n" + - " CONSTRAINT PK PRIMARY KEY\n" + - " (\n" + - " ID,\n" + - " PK_PART2\n" + - " \n" + - " )\n" + - ")"; + private static final String DATA_TABLE_DDL = + "CREATE TABLE IF NOT EXISTS " + DATA_TABLE_FULL_NAME + "\n" + "(\n" + + " ID INTEGER NOT NULL,\n" + " PK_PART2 TINYINT NOT NULL,\n" + " NAME VARCHAR,\n" + + " ZIP BIGINT,\n" + " EMPLOYER CHAR(20),\n" + " CONSTRAINT PK PRIMARY KEY\n" + + " (\n" + " ID,\n" + " PK_PART2\n" + " \n" + " )\n" + ")"; - private static final String INDEX_TABLE_DDL = - "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME - + " (NAME) INCLUDE (ZIP)"; - protected PTable pDataTable; - protected PTable pIndexTable; - protected Connection conn; + private static final String INDEX_TABLE_DDL = + "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (NAME) INCLUDE (ZIP)"; + protected PTable pDataTable; + protected PTable pIndexTable; + protected Connection conn; - @BeforeClass - public static synchronized void setupClass() throws Exception { - Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - conn.setAutoCommit(true); - conn.createStatement().execute(DATA_TABLE_DDL); - conn.createStatement().execute(INDEX_TABLE_DDL); - } finally { - conn.close(); - } + @BeforeClass + public static synchronized void setupClass() throws Exception { + Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + conn.setAutoCommit(true); + conn.createStatement().execute(DATA_TABLE_DDL); + conn.createStatement().execute(INDEX_TABLE_DDL); + } finally { + conn.close(); } + } - @Before - public void setup() throws Exception { - Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); - conn = DriverManager.getConnection(getUrl(), props); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - pDataTable = pconn.getTable(new PTableKey(pconn.getTenantId(), DATA_TABLE_FULL_NAME)); - pIndexTable = pconn.getTable(new PTableKey(pconn.getTenantId(), INDEX_TABLE_FULL_NAME)); - } + @Before + public void setup() throws Exception { + Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); + conn = DriverManager.getConnection(getUrl(), props); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + pDataTable = pconn.getTable(new PTableKey(pconn.getTenantId(), DATA_TABLE_FULL_NAME)); + pIndexTable = pconn.getTable(new PTableKey(pconn.getTenantId(), INDEX_TABLE_FULL_NAME)); + } - @After - public void tearDown() throws Exception { - if (conn != null) { - conn.close(); - } + @After + public void tearDown() throws Exception { + if (conn != null) { + conn.close(); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTableOutputTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTableOutputTest.java index 9955fec31bd..b8942881c4b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTableOutputTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTableOutputTest.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.mapreduce.index; @@ -21,78 +28,77 @@ public class IndexScrutinyTableOutputTest extends BaseIndexTest { - private static final long SCRUTINY_TIME_MILLIS = 1502908914193L; + private static final long SCRUTINY_TIME_MILLIS = 1502908914193L; - @Before - public void setup() throws Exception { - super.setup(); - conn.createStatement().execute(IndexScrutinyTableOutput.OUTPUT_TABLE_DDL); - conn.createStatement().execute(IndexScrutinyTableOutput.OUTPUT_METADATA_DDL); - } + @Before + public void setup() throws Exception { + super.setup(); + conn.createStatement().execute(IndexScrutinyTableOutput.OUTPUT_TABLE_DDL); + conn.createStatement().execute(IndexScrutinyTableOutput.OUTPUT_METADATA_DDL); + } - @Test - public void testConstructMetadataParamQuery() { - String metadataParamQuery = - IndexScrutinyTableOutput - .constructMetadataParamQuery(Arrays.asList("INVALID_ROWS_QUERY_ALL")); - assertEquals( - "SELECT \"INVALID_ROWS_QUERY_ALL\" FROM PHOENIX_INDEX_SCRUTINY_METADATA WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\") IN ((?,?,?))", - metadataParamQuery); - } + @Test + public void testConstructMetadataParamQuery() { + String metadataParamQuery = + IndexScrutinyTableOutput.constructMetadataParamQuery(Arrays.asList("INVALID_ROWS_QUERY_ALL")); + assertEquals( + "SELECT \"INVALID_ROWS_QUERY_ALL\" FROM PHOENIX_INDEX_SCRUTINY_METADATA WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\") IN ((?,?,?))", + metadataParamQuery); + } - @Test - public void testGetSqlQueryAllInvalidRows() throws SQLException { - SourceTargetColumnNames columnNames = - new SourceTargetColumnNames.DataSourceColNames(pDataTable, pIndexTable); - String sqlStr = - IndexScrutinyTableOutput.getSqlQueryAllInvalidRows(conn, columnNames, - SCRUTINY_TIME_MILLIS); - assertEquals("SELECT \"SOURCE_TABLE\" , \"TARGET_TABLE\" , \"SCRUTINY_EXECUTE_TIME\" , \"SOURCE_ROW_PK_HASH\" , \"SOURCE_TS\" , \"TARGET_TS\" , \"HAS_TARGET_ROW\" , \"BEYOND_MAX_LOOKBACK\" , \"ID\" , \"PK_PART2\" , \"NAME\" , \"ZIP\" , \":ID\" , \":PK_PART2\" , \"0:NAME\" , \"0:ZIP\" FROM PHOENIX_INDEX_SCRUTINY(\"ID\" INTEGER,\"PK_PART2\" TINYINT,\"NAME\" VARCHAR,\"ZIP\" BIGINT,\":ID\" INTEGER,\":PK_PART2\" TINYINT,\"0:NAME\" VARCHAR,\"0:ZIP\" BIGINT) WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\") IN (('TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL','TEST_SCHEMA.TEST_ICN_INDEX',1502908914193))", - sqlStr); - } + @Test + public void testGetSqlQueryAllInvalidRows() throws SQLException { + SourceTargetColumnNames columnNames = + new SourceTargetColumnNames.DataSourceColNames(pDataTable, pIndexTable); + String sqlStr = + IndexScrutinyTableOutput.getSqlQueryAllInvalidRows(conn, columnNames, SCRUTINY_TIME_MILLIS); + assertEquals( + "SELECT \"SOURCE_TABLE\" , \"TARGET_TABLE\" , \"SCRUTINY_EXECUTE_TIME\" , \"SOURCE_ROW_PK_HASH\" , \"SOURCE_TS\" , \"TARGET_TS\" , \"HAS_TARGET_ROW\" , \"BEYOND_MAX_LOOKBACK\" , \"ID\" , \"PK_PART2\" , \"NAME\" , \"ZIP\" , \":ID\" , \":PK_PART2\" , \"0:NAME\" , \"0:ZIP\" FROM PHOENIX_INDEX_SCRUTINY(\"ID\" INTEGER,\"PK_PART2\" TINYINT,\"NAME\" VARCHAR,\"ZIP\" BIGINT,\":ID\" INTEGER,\":PK_PART2\" TINYINT,\"0:NAME\" VARCHAR,\"0:ZIP\" BIGINT) WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\") IN (('TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL','TEST_SCHEMA.TEST_ICN_INDEX',1502908914193))", + sqlStr); + } - @Test - public void testGetSqlQueryMissingTargetRows() throws SQLException { - SourceTargetColumnNames columnNames = - new SourceTargetColumnNames.DataSourceColNames(pDataTable, pIndexTable); - String query = - IndexScrutinyTableOutput.getSqlQueryMissingTargetRows(conn, columnNames, - SCRUTINY_TIME_MILLIS); - assertEquals("SELECT \"SOURCE_TABLE\" , \"TARGET_TABLE\" , \"SCRUTINY_EXECUTE_TIME\" , \"SOURCE_ROW_PK_HASH\" , \"SOURCE_TS\" , \"TARGET_TS\" , \"HAS_TARGET_ROW\" , \"BEYOND_MAX_LOOKBACK\" , \"ID\" , \"PK_PART2\" , \"NAME\" , \"ZIP\" , \":ID\" , \":PK_PART2\" , \"0:NAME\" , \"0:ZIP\" FROM PHOENIX_INDEX_SCRUTINY(\"ID\" INTEGER,\"PK_PART2\" TINYINT,\"NAME\" VARCHAR,\"ZIP\" BIGINT,\":ID\" INTEGER,\":PK_PART2\" TINYINT,\"0:NAME\" VARCHAR,\"0:ZIP\" BIGINT) WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\", \"HAS_TARGET_ROW\") IN (('TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL','TEST_SCHEMA.TEST_ICN_INDEX',1502908914193,false))", - query); - } + @Test + public void testGetSqlQueryMissingTargetRows() throws SQLException { + SourceTargetColumnNames columnNames = + new SourceTargetColumnNames.DataSourceColNames(pDataTable, pIndexTable); + String query = IndexScrutinyTableOutput.getSqlQueryMissingTargetRows(conn, columnNames, + SCRUTINY_TIME_MILLIS); + assertEquals( + "SELECT \"SOURCE_TABLE\" , \"TARGET_TABLE\" , \"SCRUTINY_EXECUTE_TIME\" , \"SOURCE_ROW_PK_HASH\" , \"SOURCE_TS\" , \"TARGET_TS\" , \"HAS_TARGET_ROW\" , \"BEYOND_MAX_LOOKBACK\" , \"ID\" , \"PK_PART2\" , \"NAME\" , \"ZIP\" , \":ID\" , \":PK_PART2\" , \"0:NAME\" , \"0:ZIP\" FROM PHOENIX_INDEX_SCRUTINY(\"ID\" INTEGER,\"PK_PART2\" TINYINT,\"NAME\" VARCHAR,\"ZIP\" BIGINT,\":ID\" INTEGER,\":PK_PART2\" TINYINT,\"0:NAME\" VARCHAR,\"0:ZIP\" BIGINT) WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\", \"HAS_TARGET_ROW\") IN (('TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL','TEST_SCHEMA.TEST_ICN_INDEX',1502908914193,false))", + query); + } - @Test - public void testGetSqlQueryBadCoveredColVal() throws SQLException { - SourceTargetColumnNames columnNames = - new SourceTargetColumnNames.DataSourceColNames(pDataTable, pIndexTable); - String query = - IndexScrutinyTableOutput.getSqlQueryBadCoveredColVal(conn, columnNames, - SCRUTINY_TIME_MILLIS); - assertEquals("SELECT \"SOURCE_TABLE\" , \"TARGET_TABLE\" , \"SCRUTINY_EXECUTE_TIME\" , \"SOURCE_ROW_PK_HASH\" , \"SOURCE_TS\" , \"TARGET_TS\" , \"HAS_TARGET_ROW\" , \"BEYOND_MAX_LOOKBACK\" , \"ID\" , \"PK_PART2\" , \"NAME\" , \"ZIP\" , \":ID\" , \":PK_PART2\" , \"0:NAME\" , \"0:ZIP\" FROM PHOENIX_INDEX_SCRUTINY(\"ID\" INTEGER,\"PK_PART2\" TINYINT,\"NAME\" VARCHAR,\"ZIP\" BIGINT,\":ID\" INTEGER,\":PK_PART2\" TINYINT,\"0:NAME\" VARCHAR,\"0:ZIP\" BIGINT) WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\", \"HAS_TARGET_ROW\") IN (('TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL','TEST_SCHEMA.TEST_ICN_INDEX',1502908914193,true))", - query); - } + @Test + public void testGetSqlQueryBadCoveredColVal() throws SQLException { + SourceTargetColumnNames columnNames = + new SourceTargetColumnNames.DataSourceColNames(pDataTable, pIndexTable); + String query = + IndexScrutinyTableOutput.getSqlQueryBadCoveredColVal(conn, columnNames, SCRUTINY_TIME_MILLIS); + assertEquals( + "SELECT \"SOURCE_TABLE\" , \"TARGET_TABLE\" , \"SCRUTINY_EXECUTE_TIME\" , \"SOURCE_ROW_PK_HASH\" , \"SOURCE_TS\" , \"TARGET_TS\" , \"HAS_TARGET_ROW\" , \"BEYOND_MAX_LOOKBACK\" , \"ID\" , \"PK_PART2\" , \"NAME\" , \"ZIP\" , \":ID\" , \":PK_PART2\" , \"0:NAME\" , \"0:ZIP\" FROM PHOENIX_INDEX_SCRUTINY(\"ID\" INTEGER,\"PK_PART2\" TINYINT,\"NAME\" VARCHAR,\"ZIP\" BIGINT,\":ID\" INTEGER,\":PK_PART2\" TINYINT,\"0:NAME\" VARCHAR,\"0:ZIP\" BIGINT) WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\", \"HAS_TARGET_ROW\") IN (('TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL','TEST_SCHEMA.TEST_ICN_INDEX',1502908914193,true))", + query); + } - @Test - public void testGetSqlQueryBeyondMaxLookback() throws SQLException { - SourceTargetColumnNames columnNames = - new SourceTargetColumnNames.DataSourceColNames(pDataTable, pIndexTable); - String query = - IndexScrutinyTableOutput.getSqlQueryBeyondMaxLookback(conn, columnNames, - SCRUTINY_TIME_MILLIS); - assertEquals("SELECT \"SOURCE_TABLE\" , \"TARGET_TABLE\" , \"SCRUTINY_EXECUTE_TIME\" , \"SOURCE_ROW_PK_HASH\" , \"SOURCE_TS\" , \"TARGET_TS\" , \"HAS_TARGET_ROW\" , \"BEYOND_MAX_LOOKBACK\" , \"ID\" , \"PK_PART2\" , \"NAME\" , \"ZIP\" , \":ID\" , \":PK_PART2\" , \"0:NAME\" , \"0:ZIP\" FROM PHOENIX_INDEX_SCRUTINY(\"ID\" INTEGER,\"PK_PART2\" TINYINT,\"NAME\" VARCHAR,\"ZIP\" BIGINT,\":ID\" INTEGER,\":PK_PART2\" TINYINT,\"0:NAME\" VARCHAR,\"0:ZIP\" BIGINT) WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\", \"HAS_TARGET_ROW\", \"BEYOND_MAX_LOOKBACK\") IN (('TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL','TEST_SCHEMA.TEST_ICN_INDEX',1502908914193,false,true))", - query); - } + @Test + public void testGetSqlQueryBeyondMaxLookback() throws SQLException { + SourceTargetColumnNames columnNames = + new SourceTargetColumnNames.DataSourceColNames(pDataTable, pIndexTable); + String query = IndexScrutinyTableOutput.getSqlQueryBeyondMaxLookback(conn, columnNames, + SCRUTINY_TIME_MILLIS); + assertEquals( + "SELECT \"SOURCE_TABLE\" , \"TARGET_TABLE\" , \"SCRUTINY_EXECUTE_TIME\" , \"SOURCE_ROW_PK_HASH\" , \"SOURCE_TS\" , \"TARGET_TS\" , \"HAS_TARGET_ROW\" , \"BEYOND_MAX_LOOKBACK\" , \"ID\" , \"PK_PART2\" , \"NAME\" , \"ZIP\" , \":ID\" , \":PK_PART2\" , \"0:NAME\" , \"0:ZIP\" FROM PHOENIX_INDEX_SCRUTINY(\"ID\" INTEGER,\"PK_PART2\" TINYINT,\"NAME\" VARCHAR,\"ZIP\" BIGINT,\":ID\" INTEGER,\":PK_PART2\" TINYINT,\"0:NAME\" VARCHAR,\"0:ZIP\" BIGINT) WHERE (\"SOURCE_TABLE\",\"TARGET_TABLE\",\"SCRUTINY_EXECUTE_TIME\", \"HAS_TARGET_ROW\", \"BEYOND_MAX_LOOKBACK\") IN (('TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL','TEST_SCHEMA.TEST_ICN_INDEX',1502908914193,false,true))", + query); + } - @Test - public void testGetOutputTableUpsert() throws Exception { - IndexColumnNames columnNames = new IndexColumnNames(pDataTable, pIndexTable); - String outputTableUpsert = - IndexScrutinyTableOutput.constructOutputTableUpsert( - columnNames.getDynamicDataCols(), columnNames.getDynamicIndexCols(), conn); - conn.prepareStatement(outputTableUpsert); // shouldn't throw - assertEquals("UPSERT INTO PHOENIX_INDEX_SCRUTINY (\"SOURCE_TABLE\", \"TARGET_TABLE\", \"SCRUTINY_EXECUTE_TIME\", \"SOURCE_ROW_PK_HASH\", \"SOURCE_TS\", \"TARGET_TS\", \"HAS_TARGET_ROW\", \"BEYOND_MAX_LOOKBACK\", \"ID\" INTEGER, \"PK_PART2\" TINYINT, \"NAME\" VARCHAR, \"ZIP\" BIGINT, \":ID\" INTEGER, \":PK_PART2\" TINYINT, \"0:NAME\" VARCHAR, \"0:ZIP\" BIGINT) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", - outputTableUpsert); - } + @Test + public void testGetOutputTableUpsert() throws Exception { + IndexColumnNames columnNames = new IndexColumnNames(pDataTable, pIndexTable); + String outputTableUpsert = IndexScrutinyTableOutput.constructOutputTableUpsert( + columnNames.getDynamicDataCols(), columnNames.getDynamicIndexCols(), conn); + conn.prepareStatement(outputTableUpsert); // shouldn't throw + assertEquals( + "UPSERT INTO PHOENIX_INDEX_SCRUTINY (\"SOURCE_TABLE\", \"TARGET_TABLE\", \"SCRUTINY_EXECUTE_TIME\", \"SOURCE_ROW_PK_HASH\", \"SOURCE_TS\", \"TARGET_TS\", \"HAS_TARGET_ROW\", \"BEYOND_MAX_LOOKBACK\", \"ID\" INTEGER, \"PK_PART2\" TINYINT, \"NAME\" VARCHAR, \"ZIP\" BIGINT, \":ID\" INTEGER, \":PK_PART2\" TINYINT, \"0:NAME\" VARCHAR, \"0:ZIP\" BIGINT) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + outputTableUpsert); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/ColumnInfoToStringEncoderDecoderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/ColumnInfoToStringEncoderDecoderTest.java index 42bcb9775cd..2dfaab12709 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/ColumnInfoToStringEncoderDecoderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/ColumnInfoToStringEncoderDecoderTest.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -25,46 +25,48 @@ import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.schema.types.PDate; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ColumnInfo; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * Tests methods on {@link ColumnInfoToStringEncoderDecoder} */ public class ColumnInfoToStringEncoderDecoderTest { - @Test - public void testEncodeDecode() { - final Configuration configuration = new Configuration (); - final ColumnInfo columnInfo1 = new ColumnInfo("col1", PVarchar.INSTANCE.getSqlType()); - final ColumnInfo columnInfo2 = new ColumnInfo("col2", PDate.INSTANCE.getSqlType()); - ArrayList expectedColInfos = Lists.newArrayList(columnInfo1,columnInfo2); - ColumnInfoToStringEncoderDecoder.encode(configuration, expectedColInfos); - - //verify the configuration has the correct values - assertEquals(2, configuration.getInt(ColumnInfoToStringEncoderDecoder.CONFIGURATION_COUNT, 0)); - assertEquals(columnInfo1.toString(), configuration.get(String.format("%s_%d", ColumnInfoToStringEncoderDecoder.CONFIGURATION_VALUE_PREFIX, 0))); - assertEquals(columnInfo2.toString(), configuration.get(String.format("%s_%d", ColumnInfoToStringEncoderDecoder.CONFIGURATION_VALUE_PREFIX, 1))); - - List actualColInfos = ColumnInfoToStringEncoderDecoder.decode(configuration); - assertEquals(expectedColInfos, actualColInfos); - } - - @Test - public void testEncodeDecodeWithNulls() { - final Configuration configuration = new Configuration (); - final ColumnInfo columnInfo1 = new ColumnInfo("col1", PVarchar.INSTANCE.getSqlType()); - ArrayList expectedColInfos = Lists.newArrayList(columnInfo1); - ColumnInfoToStringEncoderDecoder.encode(configuration, Lists.newArrayList(columnInfo1, null)); - - //verify the configuration has the correct values - assertEquals(1, configuration.getInt(ColumnInfoToStringEncoderDecoder.CONFIGURATION_COUNT, 0)); - assertEquals(columnInfo1.toString(), configuration.get(String.format("%s_%d", ColumnInfoToStringEncoderDecoder.CONFIGURATION_VALUE_PREFIX, 0))); - - List actualColInfos = ColumnInfoToStringEncoderDecoder.decode(configuration); - assertEquals(expectedColInfos, actualColInfos); - } + @Test + public void testEncodeDecode() { + final Configuration configuration = new Configuration(); + final ColumnInfo columnInfo1 = new ColumnInfo("col1", PVarchar.INSTANCE.getSqlType()); + final ColumnInfo columnInfo2 = new ColumnInfo("col2", PDate.INSTANCE.getSqlType()); + ArrayList expectedColInfos = Lists.newArrayList(columnInfo1, columnInfo2); + ColumnInfoToStringEncoderDecoder.encode(configuration, expectedColInfos); + + // verify the configuration has the correct values + assertEquals(2, configuration.getInt(ColumnInfoToStringEncoderDecoder.CONFIGURATION_COUNT, 0)); + assertEquals(columnInfo1.toString(), configuration + .get(String.format("%s_%d", ColumnInfoToStringEncoderDecoder.CONFIGURATION_VALUE_PREFIX, 0))); + assertEquals(columnInfo2.toString(), configuration + .get(String.format("%s_%d", ColumnInfoToStringEncoderDecoder.CONFIGURATION_VALUE_PREFIX, 1))); + + List actualColInfos = ColumnInfoToStringEncoderDecoder.decode(configuration); + assertEquals(expectedColInfos, actualColInfos); + } + + @Test + public void testEncodeDecodeWithNulls() { + final Configuration configuration = new Configuration(); + final ColumnInfo columnInfo1 = new ColumnInfo("col1", PVarchar.INSTANCE.getSqlType()); + ArrayList expectedColInfos = Lists.newArrayList(columnInfo1); + ColumnInfoToStringEncoderDecoder.encode(configuration, Lists.newArrayList(columnInfo1, null)); + + // verify the configuration has the correct values + assertEquals(1, configuration.getInt(ColumnInfoToStringEncoderDecoder.CONFIGURATION_COUNT, 0)); + assertEquals(columnInfo1.toString(), configuration + .get(String.format("%s_%d", ColumnInfoToStringEncoderDecoder.CONFIGURATION_VALUE_PREFIX, 0))); + + List actualColInfos = ColumnInfoToStringEncoderDecoder.decode(configuration); + assertEquals(expectedColInfos, actualColInfos); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/IndexColumnNamesTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/IndexColumnNamesTest.java index 48c688f553c..0fb74b50a3f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/IndexColumnNamesTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/IndexColumnNamesTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,45 +30,57 @@ public class IndexColumnNamesTest extends BaseIndexTest { - private static final String DYNAMIC_COL_DDL = - "CREATE TABLE IF NOT EXISTS PRECISION_NAME_TEST\n" + "(\n" - + " CHAR_TEST CHAR(15) NOT NULL primary key,\n" - + " VARCHAR_TEST VARCHAR(1),\n" + " DECIMAL_TEST DECIMAL(10,2),\n" - + " BINARY_TEST BINARY(11),\n" - + " VARCHAR_UNSPEC VARCHAR,\n" - + " DEC_UNSPEC DECIMAL\n" + ")"; + private static final String DYNAMIC_COL_DDL = "CREATE TABLE IF NOT EXISTS PRECISION_NAME_TEST\n" + + "(\n" + " CHAR_TEST CHAR(15) NOT NULL primary key,\n" + " VARCHAR_TEST VARCHAR(1),\n" + + " DECIMAL_TEST DECIMAL(10,2),\n" + " BINARY_TEST BINARY(11),\n" + + " VARCHAR_UNSPEC VARCHAR,\n" + " DEC_UNSPEC DECIMAL\n" + ")"; - private static final String DYNAMIC_COL_IDX_DDL = - "CREATE INDEX PRECISION_NAME_IDX_TEST ON PRECISION_NAME_TEST(VARCHAR_TEST) INCLUDE (CHAR_TEST,DECIMAL_TEST,BINARY_TEST,VARCHAR_UNSPEC,DEC_UNSPEC)"; + private static final String DYNAMIC_COL_IDX_DDL = + "CREATE INDEX PRECISION_NAME_IDX_TEST ON PRECISION_NAME_TEST(VARCHAR_TEST) INCLUDE (CHAR_TEST,DECIMAL_TEST,BINARY_TEST,VARCHAR_UNSPEC,DEC_UNSPEC)"; - @Test - public void testGetColumnNames() { - IndexColumnNames indexColumnNames = new IndexColumnNames(pDataTable, pIndexTable); - assertEquals("[ID, PK_PART2, 0.NAME, 0.ZIP]", indexColumnNames.getDataColNames().toString()); - assertEquals("[:ID, :PK_PART2, 0:NAME, 0:ZIP]", indexColumnNames.getIndexColNames().toString()); //index column names, leading with the data table pk - assertEquals("[:ID, :PK_PART2, 0:NAME]", indexColumnNames.getIndexPkColNames().toString()); - assertEquals("[ID, PK_PART2]", indexColumnNames.getDataPkColNames().toString()); - assertEquals("[0.NAME, 0.ZIP]", indexColumnNames.getDataNonPkColNames().toString()); + @Test + public void testGetColumnNames() { + IndexColumnNames indexColumnNames = new IndexColumnNames(pDataTable, pIndexTable); + assertEquals("[ID, PK_PART2, 0.NAME, 0.ZIP]", indexColumnNames.getDataColNames().toString()); + assertEquals("[:ID, :PK_PART2, 0:NAME, 0:ZIP]", indexColumnNames.getIndexColNames().toString()); // index + // column + // names, + // leading + // with + // the + // data + // table + // pk + assertEquals("[:ID, :PK_PART2, 0:NAME]", indexColumnNames.getIndexPkColNames().toString()); + assertEquals("[ID, PK_PART2]", indexColumnNames.getDataPkColNames().toString()); + assertEquals("[0.NAME, 0.ZIP]", indexColumnNames.getDataNonPkColNames().toString()); - assertEquals("[\"ID\" INTEGER, \"PK_PART2\" TINYINT, \"NAME\" VARCHAR, \"ZIP\" BIGINT]", indexColumnNames.getDynamicDataCols().toString()); - assertEquals("[\":ID\" INTEGER, \":PK_PART2\" TINYINT, \"0:NAME\" VARCHAR, \"0:ZIP\" BIGINT]", indexColumnNames.getDynamicIndexCols().toString()); - assertEquals("UPSERT /*+ NO_INDEX */ INTO TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL (\"ID\" INTEGER, \"PK_PART2\" TINYINT, \"NAME\" VARCHAR, \"ZIP\" BIGINT) VALUES (?, ?, ?, ?)", QueryUtil.constructUpsertStatement(DATA_TABLE_FULL_NAME, indexColumnNames.getDynamicDataCols(), Hint.NO_INDEX)); - } + assertEquals("[\"ID\" INTEGER, \"PK_PART2\" TINYINT, \"NAME\" VARCHAR, \"ZIP\" BIGINT]", + indexColumnNames.getDynamicDataCols().toString()); + assertEquals("[\":ID\" INTEGER, \":PK_PART2\" TINYINT, \"0:NAME\" VARCHAR, \"0:ZIP\" BIGINT]", + indexColumnNames.getDynamicIndexCols().toString()); + assertEquals( + "UPSERT /*+ NO_INDEX */ INTO TEST_SCHEMA.TEST_INDEX_COLUMN_NAMES_UTIL (\"ID\" INTEGER, \"PK_PART2\" TINYINT, \"NAME\" VARCHAR, \"ZIP\" BIGINT) VALUES (?, ?, ?, ?)", + QueryUtil.constructUpsertStatement(DATA_TABLE_FULL_NAME, + indexColumnNames.getDynamicDataCols(), Hint.NO_INDEX)); + } - /** - * Tests that col types with a precision are outputted correctly in the dynamic columns - * @throws SQLException - */ - @Test - public void testGetDynamicColPrecision() throws SQLException { - conn.createStatement().execute(DYNAMIC_COL_DDL); - conn.createStatement().execute(DYNAMIC_COL_IDX_DDL); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - pDataTable = pconn.getTable(new PTableKey(pconn.getTenantId(), "PRECISION_NAME_TEST")); - pIndexTable = pconn.getTable(new PTableKey(pconn.getTenantId(), "PRECISION_NAME_IDX_TEST")); - IndexColumnNames indexColumnNames = new IndexColumnNames(pDataTable, pIndexTable); - assertEquals("[\"CHAR_TEST\" CHAR(15), \"VARCHAR_TEST\" VARCHAR(1), \"DECIMAL_TEST\" DECIMAL(10,2), \"BINARY_TEST\" BINARY(11), \"VARCHAR_UNSPEC\" VARCHAR, \"DEC_UNSPEC\" DECIMAL]", indexColumnNames.getDynamicDataCols().toString()); - assertEquals("[\":CHAR_TEST\" CHAR(15), \"0:VARCHAR_TEST\" VARCHAR(1), \"0:DECIMAL_TEST\" DECIMAL(10,2), \"0:BINARY_TEST\" BINARY(11), \"0:VARCHAR_UNSPEC\" VARCHAR, \"0:DEC_UNSPEC\" DECIMAL]", - indexColumnNames.getDynamicIndexCols().toString()); - } + /** + * Tests that col types with a precision are outputted correctly in the dynamic columns + */ + @Test + public void testGetDynamicColPrecision() throws SQLException { + conn.createStatement().execute(DYNAMIC_COL_DDL); + conn.createStatement().execute(DYNAMIC_COL_IDX_DDL); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + pDataTable = pconn.getTable(new PTableKey(pconn.getTenantId(), "PRECISION_NAME_TEST")); + pIndexTable = pconn.getTable(new PTableKey(pconn.getTenantId(), "PRECISION_NAME_IDX_TEST")); + IndexColumnNames indexColumnNames = new IndexColumnNames(pDataTable, pIndexTable); + assertEquals( + "[\"CHAR_TEST\" CHAR(15), \"VARCHAR_TEST\" VARCHAR(1), \"DECIMAL_TEST\" DECIMAL(10,2), \"BINARY_TEST\" BINARY(11), \"VARCHAR_UNSPEC\" VARCHAR, \"DEC_UNSPEC\" DECIMAL]", + indexColumnNames.getDynamicDataCols().toString()); + assertEquals( + "[\":CHAR_TEST\" CHAR(15), \"0:VARCHAR_TEST\" VARCHAR(1), \"0:DECIMAL_TEST\" DECIMAL(10,2), \"0:BINARY_TEST\" BINARY(11), \"0:VARCHAR_UNSPEC\" VARCHAR, \"0:DEC_UNSPEC\" DECIMAL]", + indexColumnNames.getDynamicIndexCols().toString()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilTest.java index b02c10e741f..1dd384dedd4 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtilTest.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -25,12 +25,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.mapreduce.Job; -import org.apache.phoenix.jdbc.ZKConnectionInfo; import org.apache.phoenix.mapreduce.index.IndexScrutinyTool.SourceTable; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MRJobType; import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.SchemaType; import org.apache.phoenix.query.BaseConnectionlessQueryTest; -import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.PropertiesUtil; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.TestUtil; @@ -41,297 +39,303 @@ * Test for {@link PhoenixConfigurationUtil} */ public class PhoenixConfigurationUtilTest extends BaseConnectionlessQueryTest { - private static final String ORIGINAL_CLUSTER_QUORUM = "myzookeeperhost"; - private static final String OVERRIDE_CLUSTER_QUORUM = "myoverridezookeeperhost"; - - protected static String TEST_URL = TestUtil.PHOENIX_CONNECTIONLESS_JDBC_URL; - - @Test - /** - * This test reproduces the bug filed in PHOENIX-2310. - * - * When invoking PhoenixConfigurationUtil.getUpsertStatement(), - * if upserting into a Phoenix View and the View DDL had recently been issued such that MetdataClient cache had - * been updated as a result of the create table versus from data in SYSTEM.CATALOG, the Upsert statement - * would contain the Object.toString() classname + hashcode instead of the correct cf.column_name representation - * which would cause the calling Pig script to fail. - */ - public void testUpsertStatementOnNewViewWithReferencedCols() throws Exception { - - // Arrange - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - - try { - final String tableName = "TEST_TABLE_WITH_VIEW"; - final String viewName = "TEST_VIEW"; - String ddl = "CREATE TABLE "+ tableName + - " (a_string varchar not null, a_binary varbinary not null, col1 integer" + - " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; - conn.createStatement().execute(ddl); - String viewDdl = "CREATE VIEW "+ viewName + - " AS SELECT * FROM " + tableName + "\n"; - conn.createStatement().execute(viewDdl); - final Configuration configuration = new Configuration (); - PhoenixConfigurationUtil.setOutputClusterUrl(configuration, TEST_URL); - PhoenixConfigurationUtil.setOutputTableName(configuration, viewName); - PhoenixConfigurationUtil.setPhysicalTableName(configuration, viewName); - PhoenixConfigurationUtil.setUpsertColumnNames(configuration, new String[] {"A_STRING", "A_BINARY", "COL1"}); - - // Act - final String upserStatement = PhoenixConfigurationUtil.getUpsertStatement(configuration); - - // Assert - final String expectedUpsertStatement = "UPSERT INTO " + viewName + " (\"A_STRING\", \"A_BINARY\", \"0\".\"COL1\") VALUES (?, ?, ?)"; - assertEquals(expectedUpsertStatement, upserStatement); - } finally { - conn.close(); - } - } - - @Test - public void testUpsertStatementOnNewTableWithReferencedCols() throws Exception { - - // Arrange - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - - try { - final String tableName = "TEST_TABLE_WITH_REF_COLS"; - String ddl = "CREATE TABLE "+ tableName + - " (a_string varchar not null, a_binary varbinary not null, col1 integer" + - " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; - conn.createStatement().execute(ddl); - final Configuration configuration = new Configuration (); - PhoenixConfigurationUtil.setOutputClusterUrl(configuration, TEST_URL); - PhoenixConfigurationUtil.setOutputTableName(configuration, tableName); - PhoenixConfigurationUtil.setPhysicalTableName(configuration, tableName); - PhoenixConfigurationUtil.setUpsertColumnNames(configuration, new String[] {"A_STRING", "A_BINARY", "COL1"}); - - // Act - final String upserStatement = PhoenixConfigurationUtil.getUpsertStatement(configuration); - - // Assert - final String expectedUpsertStatement = "UPSERT INTO " + tableName + " (\"A_STRING\", \"A_BINARY\", \"0\".\"COL1\") VALUES (?, ?, ?)"; - assertEquals(expectedUpsertStatement, upserStatement); - } finally { - conn.close(); - } - } - - - @Test - public void testUpsertStatement() throws Exception { - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - final String tableName = "TEST_TABLE"; - try { - String ddl = "CREATE TABLE "+ tableName + - " (a_string varchar not null, a_binary varbinary not null, col1 integer" + - " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; - conn.createStatement().execute(ddl); - final Configuration configuration = new Configuration (); - PhoenixConfigurationUtil.setOutputClusterUrl(configuration, TEST_URL); - PhoenixConfigurationUtil.setOutputTableName(configuration, tableName); - PhoenixConfigurationUtil.setPhysicalTableName(configuration, tableName); - final String upserStatement = PhoenixConfigurationUtil.getUpsertStatement(configuration); - final String expectedUpsertStatement = "UPSERT INTO " + tableName + " VALUES (?, ?, ?)"; - assertEquals(expectedUpsertStatement, upserStatement); - } finally { - conn.close(); - } - } - - @Test - public void testSelectStatement() throws Exception { - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - final String tableName = "TEST_TABLE"; - try { - String ddl = "CREATE TABLE "+ tableName + - " (a_string varchar not null, a_binary varbinary not null, col1 integer" + - " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; - conn.createStatement().execute(ddl); - final Configuration configuration = new Configuration (); - PhoenixConfigurationUtil.setInputClusterUrl(configuration, TEST_URL); - PhoenixConfigurationUtil.setInputTableName(configuration, tableName); - final String selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); - final String expectedSelectStatement = "SELECT \"A_STRING\" , \"A_BINARY\" , \"0\".\"COL1\" FROM " + tableName ; - assertEquals(expectedSelectStatement, selectStatement); - } finally { - conn.close(); - } + private static final String ORIGINAL_CLUSTER_QUORUM = "myzookeeperhost"; + private static final String OVERRIDE_CLUSTER_QUORUM = "myoverridezookeeperhost"; + + protected static String TEST_URL = TestUtil.PHOENIX_CONNECTIONLESS_JDBC_URL; + + @Test + /** + * This test reproduces the bug filed in PHOENIX-2310. When invoking + * PhoenixConfigurationUtil.getUpsertStatement(), if upserting into a Phoenix View and the View + * DDL had recently been issued such that MetdataClient cache had been updated as a result of the + * create table versus from data in SYSTEM.CATALOG, the Upsert statement would contain the + * Object.toString() classname + hashcode instead of the correct cf.column_name representation + * which would cause the calling Pig script to fail. + */ + public void testUpsertStatementOnNewViewWithReferencedCols() throws Exception { + + // Arrange + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + + try { + final String tableName = "TEST_TABLE_WITH_VIEW"; + final String viewName = "TEST_VIEW"; + String ddl = "CREATE TABLE " + tableName + + " (a_string varchar not null, a_binary varbinary not null, col1 integer" + + " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; + conn.createStatement().execute(ddl); + String viewDdl = "CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName + "\n"; + conn.createStatement().execute(viewDdl); + final Configuration configuration = new Configuration(); + PhoenixConfigurationUtil.setOutputClusterUrl(configuration, TEST_URL); + PhoenixConfigurationUtil.setOutputTableName(configuration, viewName); + PhoenixConfigurationUtil.setPhysicalTableName(configuration, viewName); + PhoenixConfigurationUtil.setUpsertColumnNames(configuration, + new String[] { "A_STRING", "A_BINARY", "COL1" }); + + // Act + final String upserStatement = PhoenixConfigurationUtil.getUpsertStatement(configuration); + + // Assert + final String expectedUpsertStatement = "UPSERT INTO " + viewName + + " (\"A_STRING\", \"A_BINARY\", \"0\".\"COL1\") VALUES (?, ?, ?)"; + assertEquals(expectedUpsertStatement, upserStatement); + } finally { + conn.close(); } - - @Test - public void testSelectStatementWithSchema() throws Exception { - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - final String tableName = "TEST_TABLE"; - final String schemaName = SchemaUtil.getEscapedArgument("schema"); - final String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - try { - String ddl = "CREATE TABLE "+ fullTableName + - " (a_string varchar not null, a_binary varbinary not null, col1 integer" + - " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; - conn.createStatement().execute(ddl); - final Configuration configuration = new Configuration (); - PhoenixConfigurationUtil.setInputClusterUrl(configuration, TEST_URL); - PhoenixConfigurationUtil.setInputTableName(configuration, fullTableName); - final String selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); - final String expectedSelectStatement = "SELECT \"A_STRING\" , \"A_BINARY\" , \"0\".\"COL1\" FROM " + fullTableName; - assertEquals(expectedSelectStatement, selectStatement); - } finally { - conn.close(); - } + } + + @Test + public void testUpsertStatementOnNewTableWithReferencedCols() throws Exception { + + // Arrange + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + + try { + final String tableName = "TEST_TABLE_WITH_REF_COLS"; + String ddl = "CREATE TABLE " + tableName + + " (a_string varchar not null, a_binary varbinary not null, col1 integer" + + " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; + conn.createStatement().execute(ddl); + final Configuration configuration = new Configuration(); + PhoenixConfigurationUtil.setOutputClusterUrl(configuration, TEST_URL); + PhoenixConfigurationUtil.setOutputTableName(configuration, tableName); + PhoenixConfigurationUtil.setPhysicalTableName(configuration, tableName); + PhoenixConfigurationUtil.setUpsertColumnNames(configuration, + new String[] { "A_STRING", "A_BINARY", "COL1" }); + + // Act + final String upserStatement = PhoenixConfigurationUtil.getUpsertStatement(configuration); + + // Assert + final String expectedUpsertStatement = "UPSERT INTO " + tableName + + " (\"A_STRING\", \"A_BINARY\", \"0\".\"COL1\") VALUES (?, ?, ?)"; + assertEquals(expectedUpsertStatement, upserStatement); + } finally { + conn.close(); } - - @Test - public void testSelectStatementForSpecificColumns() throws Exception { - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - final String tableName = "TEST_TABLE"; - try { - String ddl = "CREATE TABLE "+ tableName + - " (a_string varchar not null, a_binary varbinary not null, col1 integer" + - " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; - conn.createStatement().execute(ddl); - final Configuration configuration = new Configuration (); - PhoenixConfigurationUtil.setInputClusterUrl(configuration, TEST_URL); - PhoenixConfigurationUtil.setInputTableName(configuration, tableName); - PhoenixConfigurationUtil.setSelectColumnNames(configuration, new String[]{"A_BINARY"}); - final String selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); - final String expectedSelectStatement = "SELECT \"A_BINARY\" FROM " + tableName ; - assertEquals(expectedSelectStatement, selectStatement); - } finally { - conn.close(); - } + } + + @Test + public void testUpsertStatement() throws Exception { + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + final String tableName = "TEST_TABLE"; + try { + String ddl = "CREATE TABLE " + tableName + + " (a_string varchar not null, a_binary varbinary not null, col1 integer" + + " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; + conn.createStatement().execute(ddl); + final Configuration configuration = new Configuration(); + PhoenixConfigurationUtil.setOutputClusterUrl(configuration, TEST_URL); + PhoenixConfigurationUtil.setOutputTableName(configuration, tableName); + PhoenixConfigurationUtil.setPhysicalTableName(configuration, tableName); + final String upserStatement = PhoenixConfigurationUtil.getUpsertStatement(configuration); + final String expectedUpsertStatement = "UPSERT INTO " + tableName + " VALUES (?, ?, ?)"; + assertEquals(expectedUpsertStatement, upserStatement); + } finally { + conn.close(); } - - @Test - public void testSelectStatementForArrayTypes() throws Exception { - Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); - final String tableName = "TEST_TABLE"; - try { - String ddl = "CREATE TABLE "+ tableName + - " (ID BIGINT NOT NULL PRIMARY KEY, VCARRAY VARCHAR[])\n"; - conn.createStatement().execute(ddl); - final Configuration configuration = new Configuration (); - PhoenixConfigurationUtil.setInputClusterUrl(configuration, TEST_URL); - PhoenixConfigurationUtil.setSelectColumnNames(configuration,new String[]{"ID","VCARRAY"}); - PhoenixConfigurationUtil.setSchemaType(configuration, SchemaType.QUERY); - PhoenixConfigurationUtil.setInputTableName(configuration, tableName); - final String selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); - final String expectedSelectStatement = "SELECT \"ID\" , \"0\".\"VCARRAY\" FROM " + tableName ; - assertEquals(expectedSelectStatement, selectStatement); - } finally { - conn.close(); - } + } + + @Test + public void testSelectStatement() throws Exception { + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + final String tableName = "TEST_TABLE"; + try { + String ddl = "CREATE TABLE " + tableName + + " (a_string varchar not null, a_binary varbinary not null, col1 integer" + + " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; + conn.createStatement().execute(ddl); + final Configuration configuration = new Configuration(); + PhoenixConfigurationUtil.setInputClusterUrl(configuration, TEST_URL); + PhoenixConfigurationUtil.setInputTableName(configuration, tableName); + final String selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); + final String expectedSelectStatement = + "SELECT \"A_STRING\" , \"A_BINARY\" , \"0\".\"COL1\" FROM " + tableName; + assertEquals(expectedSelectStatement, selectStatement); + } finally { + conn.close(); } - - @Test - public void testInputClusterOverride() throws Exception { - final Configuration configuration = new Configuration(); - configuration.set(HConstants.ZOOKEEPER_QUORUM, ORIGINAL_CLUSTER_QUORUM); - String zkQuorum = PhoenixConfigurationUtilHelper.getInputCluster(configuration); - assertEquals(zkQuorum, ORIGINAL_CLUSTER_QUORUM); - - configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_INPUT_CLUSTER_QUORUM, - OVERRIDE_CLUSTER_QUORUM); - String zkQuorumOverride = PhoenixConfigurationUtilHelper.getInputCluster(configuration); - assertEquals(zkQuorumOverride, OVERRIDE_CLUSTER_QUORUM); - - final Configuration configuration2 = new Configuration(); - PhoenixConfigurationUtil.setInputCluster(configuration2, OVERRIDE_CLUSTER_QUORUM); - String zkQuorumOverride2 = - PhoenixConfigurationUtilHelper.getInputCluster(configuration2); - assertEquals(zkQuorumOverride2, OVERRIDE_CLUSTER_QUORUM); - - final Job job = Job.getInstance(); - PhoenixMapReduceUtil.setInputCluster(job, OVERRIDE_CLUSTER_QUORUM); - Configuration configuration3 = job.getConfiguration(); - String zkQuorumOverride3 = - PhoenixConfigurationUtilHelper.getInputCluster(configuration3); - assertEquals(zkQuorumOverride3, OVERRIDE_CLUSTER_QUORUM); - + } + + @Test + public void testSelectStatementWithSchema() throws Exception { + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + final String tableName = "TEST_TABLE"; + final String schemaName = SchemaUtil.getEscapedArgument("schema"); + final String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + try { + String ddl = "CREATE TABLE " + fullTableName + + " (a_string varchar not null, a_binary varbinary not null, col1 integer" + + " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; + conn.createStatement().execute(ddl); + final Configuration configuration = new Configuration(); + PhoenixConfigurationUtil.setInputClusterUrl(configuration, TEST_URL); + PhoenixConfigurationUtil.setInputTableName(configuration, fullTableName); + final String selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); + final String expectedSelectStatement = + "SELECT \"A_STRING\" , \"A_BINARY\" , \"0\".\"COL1\" FROM " + fullTableName; + assertEquals(expectedSelectStatement, selectStatement); + } finally { + conn.close(); } - - @Test - public void testOutputClusterOverride() throws Exception { - final Configuration configuration = new Configuration(); - configuration.set(HConstants.ZOOKEEPER_QUORUM, ORIGINAL_CLUSTER_QUORUM); - String zkQuorum = PhoenixConfigurationUtilHelper.getOutputCluster(configuration); - assertEquals(zkQuorum, ORIGINAL_CLUSTER_QUORUM); - - configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_QUORUM, - OVERRIDE_CLUSTER_QUORUM); - String zkQuorumOverride = PhoenixConfigurationUtilHelper.getOutputCluster(configuration); - assertEquals(zkQuorumOverride, OVERRIDE_CLUSTER_QUORUM); - - final Configuration configuration2 = new Configuration(); - PhoenixConfigurationUtil.setOutputCluster(configuration2, OVERRIDE_CLUSTER_QUORUM); - String zkQuorumOverride2 = - PhoenixConfigurationUtilHelper.getOutputCluster(configuration2); - assertEquals(zkQuorumOverride2, OVERRIDE_CLUSTER_QUORUM); - - final Job job = Job.getInstance(); - PhoenixMapReduceUtil.setOutputCluster(job, OVERRIDE_CLUSTER_QUORUM); - Configuration configuration3 = job.getConfiguration(); - String zkQuorumOverride3 = - PhoenixConfigurationUtilHelper.getOutputCluster(configuration3); - assertEquals(zkQuorumOverride3, OVERRIDE_CLUSTER_QUORUM); - - } - - @Test - public void testMrJobTypeOverride() throws Exception { - final Job job = Job.getInstance(); - Configuration configuration = job.getConfiguration(); - MRJobType mrJobType = PhoenixConfigurationUtil.getMRJobType(configuration, - MRJobType.QUERY.name()); - assertEquals(MRJobType.QUERY.name(), mrJobType.name()); - - PhoenixConfigurationUtil.setMRJobType(configuration, MRJobType.UPDATE_STATS); - mrJobType = PhoenixConfigurationUtil.getMRJobType(configuration, - MRJobType.QUERY.name()); - assertEquals(MRJobType.UPDATE_STATS.name(), mrJobType.name()); - + } + + @Test + public void testSelectStatementForSpecificColumns() throws Exception { + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + final String tableName = "TEST_TABLE"; + try { + String ddl = "CREATE TABLE " + tableName + + " (a_string varchar not null, a_binary varbinary not null, col1 integer" + + " CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n"; + conn.createStatement().execute(ddl); + final Configuration configuration = new Configuration(); + PhoenixConfigurationUtil.setInputClusterUrl(configuration, TEST_URL); + PhoenixConfigurationUtil.setInputTableName(configuration, tableName); + PhoenixConfigurationUtil.setSelectColumnNames(configuration, new String[] { "A_BINARY" }); + final String selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); + final String expectedSelectStatement = "SELECT \"A_BINARY\" FROM " + tableName; + assertEquals(expectedSelectStatement, selectStatement); + } finally { + conn.close(); } - - @Test - public void testTimeRangeOverride() { - final Configuration configuration = new Configuration(); - Long startTime = 1L; - Long endTime = 2L; - - PhoenixConfigurationUtil.setIndexToolStartTime(configuration, startTime); - PhoenixConfigurationUtil.setCurrentScnValue(configuration, endTime); - Assert.assertEquals(startTime.longValue(), - Long.parseLong(PhoenixConfigurationUtil.getIndexToolStartTime(configuration))); - Assert.assertEquals(endTime.longValue(), - Long.parseLong(PhoenixConfigurationUtil.getCurrentScnValue(configuration))); - - } - - @Test - public void testLastVerifyTimeConfig() { - final Configuration configuration = new Configuration(); - Long lastVerifyTime = 2L; - - PhoenixConfigurationUtil.setIndexToolLastVerifyTime(configuration, lastVerifyTime); - Assert.assertEquals(lastVerifyTime.longValue(), - Long.parseLong(PhoenixConfigurationUtil.getIndexToolLastVerifyTime(configuration))); - - } - - @Test - public void testIndexToolSourceConfig() { - final Configuration conf = new Configuration(); - - // by default source is data table - SourceTable sourceTable = PhoenixConfigurationUtil.getIndexToolSourceTable(conf); - Assert.assertEquals(sourceTable, SourceTable.DATA_TABLE_SOURCE); - - PhoenixConfigurationUtil.setIndexToolSourceTable(conf, SourceTable.INDEX_TABLE_SOURCE); - sourceTable = PhoenixConfigurationUtil.getIndexToolSourceTable(conf); - Assert.assertEquals(sourceTable, SourceTable.INDEX_TABLE_SOURCE); - - PhoenixConfigurationUtil.setIndexToolSourceTable(conf, SourceTable.DATA_TABLE_SOURCE); - sourceTable = PhoenixConfigurationUtil.getIndexToolSourceTable(conf); - Assert.assertEquals(sourceTable, SourceTable.DATA_TABLE_SOURCE); + } + + @Test + public void testSelectStatementForArrayTypes() throws Exception { + Connection conn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES)); + final String tableName = "TEST_TABLE"; + try { + String ddl = + "CREATE TABLE " + tableName + " (ID BIGINT NOT NULL PRIMARY KEY, VCARRAY VARCHAR[])\n"; + conn.createStatement().execute(ddl); + final Configuration configuration = new Configuration(); + PhoenixConfigurationUtil.setInputClusterUrl(configuration, TEST_URL); + PhoenixConfigurationUtil.setSelectColumnNames(configuration, + new String[] { "ID", "VCARRAY" }); + PhoenixConfigurationUtil.setSchemaType(configuration, SchemaType.QUERY); + PhoenixConfigurationUtil.setInputTableName(configuration, tableName); + final String selectStatement = PhoenixConfigurationUtil.getSelectStatement(configuration); + final String expectedSelectStatement = "SELECT \"ID\" , \"0\".\"VCARRAY\" FROM " + tableName; + assertEquals(expectedSelectStatement, selectStatement); + } finally { + conn.close(); } + } + + @Test + public void testInputClusterOverride() throws Exception { + final Configuration configuration = new Configuration(); + configuration.set(HConstants.ZOOKEEPER_QUORUM, ORIGINAL_CLUSTER_QUORUM); + String zkQuorum = PhoenixConfigurationUtilHelper.getInputCluster(configuration); + assertEquals(zkQuorum, ORIGINAL_CLUSTER_QUORUM); + + configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_INPUT_CLUSTER_QUORUM, + OVERRIDE_CLUSTER_QUORUM); + String zkQuorumOverride = PhoenixConfigurationUtilHelper.getInputCluster(configuration); + assertEquals(zkQuorumOverride, OVERRIDE_CLUSTER_QUORUM); + + final Configuration configuration2 = new Configuration(); + PhoenixConfigurationUtil.setInputCluster(configuration2, OVERRIDE_CLUSTER_QUORUM); + String zkQuorumOverride2 = PhoenixConfigurationUtilHelper.getInputCluster(configuration2); + assertEquals(zkQuorumOverride2, OVERRIDE_CLUSTER_QUORUM); + + final Job job = Job.getInstance(); + PhoenixMapReduceUtil.setInputCluster(job, OVERRIDE_CLUSTER_QUORUM); + Configuration configuration3 = job.getConfiguration(); + String zkQuorumOverride3 = PhoenixConfigurationUtilHelper.getInputCluster(configuration3); + assertEquals(zkQuorumOverride3, OVERRIDE_CLUSTER_QUORUM); + + } + + @Test + public void testOutputClusterOverride() throws Exception { + final Configuration configuration = new Configuration(); + configuration.set(HConstants.ZOOKEEPER_QUORUM, ORIGINAL_CLUSTER_QUORUM); + String zkQuorum = PhoenixConfigurationUtilHelper.getOutputCluster(configuration); + assertEquals(zkQuorum, ORIGINAL_CLUSTER_QUORUM); + + configuration.set(PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_QUORUM, + OVERRIDE_CLUSTER_QUORUM); + String zkQuorumOverride = PhoenixConfigurationUtilHelper.getOutputCluster(configuration); + assertEquals(zkQuorumOverride, OVERRIDE_CLUSTER_QUORUM); + + final Configuration configuration2 = new Configuration(); + PhoenixConfigurationUtil.setOutputCluster(configuration2, OVERRIDE_CLUSTER_QUORUM); + String zkQuorumOverride2 = PhoenixConfigurationUtilHelper.getOutputCluster(configuration2); + assertEquals(zkQuorumOverride2, OVERRIDE_CLUSTER_QUORUM); + + final Job job = Job.getInstance(); + PhoenixMapReduceUtil.setOutputCluster(job, OVERRIDE_CLUSTER_QUORUM); + Configuration configuration3 = job.getConfiguration(); + String zkQuorumOverride3 = PhoenixConfigurationUtilHelper.getOutputCluster(configuration3); + assertEquals(zkQuorumOverride3, OVERRIDE_CLUSTER_QUORUM); + + } + + @Test + public void testMrJobTypeOverride() throws Exception { + final Job job = Job.getInstance(); + Configuration configuration = job.getConfiguration(); + MRJobType mrJobType = + PhoenixConfigurationUtil.getMRJobType(configuration, MRJobType.QUERY.name()); + assertEquals(MRJobType.QUERY.name(), mrJobType.name()); + + PhoenixConfigurationUtil.setMRJobType(configuration, MRJobType.UPDATE_STATS); + mrJobType = PhoenixConfigurationUtil.getMRJobType(configuration, MRJobType.QUERY.name()); + assertEquals(MRJobType.UPDATE_STATS.name(), mrJobType.name()); + + } + + @Test + public void testTimeRangeOverride() { + final Configuration configuration = new Configuration(); + Long startTime = 1L; + Long endTime = 2L; + + PhoenixConfigurationUtil.setIndexToolStartTime(configuration, startTime); + PhoenixConfigurationUtil.setCurrentScnValue(configuration, endTime); + Assert.assertEquals(startTime.longValue(), + Long.parseLong(PhoenixConfigurationUtil.getIndexToolStartTime(configuration))); + Assert.assertEquals(endTime.longValue(), + Long.parseLong(PhoenixConfigurationUtil.getCurrentScnValue(configuration))); + + } + + @Test + public void testLastVerifyTimeConfig() { + final Configuration configuration = new Configuration(); + Long lastVerifyTime = 2L; + + PhoenixConfigurationUtil.setIndexToolLastVerifyTime(configuration, lastVerifyTime); + Assert.assertEquals(lastVerifyTime.longValue(), + Long.parseLong(PhoenixConfigurationUtil.getIndexToolLastVerifyTime(configuration))); + + } + + @Test + public void testIndexToolSourceConfig() { + final Configuration conf = new Configuration(); + + // by default source is data table + SourceTable sourceTable = PhoenixConfigurationUtil.getIndexToolSourceTable(conf); + Assert.assertEquals(sourceTable, SourceTable.DATA_TABLE_SOURCE); + + PhoenixConfigurationUtil.setIndexToolSourceTable(conf, SourceTable.INDEX_TABLE_SOURCE); + sourceTable = PhoenixConfigurationUtil.getIndexToolSourceTable(conf); + Assert.assertEquals(sourceTable, SourceTable.INDEX_TABLE_SOURCE); + + PhoenixConfigurationUtil.setIndexToolSourceTable(conf, SourceTable.DATA_TABLE_SOURCE); + sourceTable = PhoenixConfigurationUtil.getIndexToolSourceTable(conf); + Assert.assertEquals(sourceTable, SourceTable.DATA_TABLE_SOURCE); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/memory/MemoryManagerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/memory/MemoryManagerTest.java index 809702a59b6..5e2504f7d32 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/memory/MemoryManagerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/memory/MemoryManagerTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,169 +33,173 @@ import org.junit.Test; /** - * - * Tests for GlobalMemoryManager and ChildMemoryManager - * TODO: use our own time keeper so these tests don't flap - * - * + * Tests for GlobalMemoryManager and ChildMemoryManager TODO: use our own time keeper so these tests + * don't flap * @since 0.1 */ public class MemoryManagerTest { - @ClassRule - public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); - - @Test - public void testOverGlobalMemoryLimit() throws Exception { - GlobalMemoryManager gmm = new GlobalMemoryManager(250); - try { - gmm.allocate(300); - fail(); - } catch (InsufficientMemoryException e) { // expected - } + @ClassRule + public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); - ChildMemoryManager rmm1 = new ChildMemoryManager(gmm,100); - ChildMemoryManager rmm2 = new ChildMemoryManager(gmm,100); - MemoryChunk c1 = rmm1.allocate(100); - MemoryChunk c2 = rmm2.allocate(100); - try { - rmm2.allocate(100); - fail(); - } catch (InsufficientMemoryException e) { // expected - } - - c1.close(); - c2.close(); - assertTrue(rmm1.getAvailableMemory() == rmm1.getMaxMemory()); + @Test + public void testOverGlobalMemoryLimit() throws Exception { + GlobalMemoryManager gmm = new GlobalMemoryManager(250); + try { + gmm.allocate(300); + fail(); + } catch (InsufficientMemoryException e) { // expected } - @Test - public void testChildDecreaseAllocation() throws Exception { - MemoryManager gmm = spy(new GlobalMemoryManager(100)); - ChildMemoryManager rmm1 = new ChildMemoryManager(gmm,100); - ChildMemoryManager rmm2 = new ChildMemoryManager(gmm,10); - MemoryChunk c1 = rmm1.allocate(50); - MemoryChunk c2 = rmm2.allocate(5,50); - assertTrue(c2.getSize() == 10); - c1.close(); - assertTrue(rmm1.getAvailableMemory() == rmm1.getMaxMemory()); - c2.close(); - assertTrue(rmm2.getAvailableMemory() == rmm2.getMaxMemory()); - assertTrue(gmm.getAvailableMemory() == gmm.getMaxMemory()); + ChildMemoryManager rmm1 = new ChildMemoryManager(gmm, 100); + ChildMemoryManager rmm2 = new ChildMemoryManager(gmm, 100); + MemoryChunk c1 = rmm1.allocate(100); + MemoryChunk c2 = rmm2.allocate(100); + try { + rmm2.allocate(100); + fail(); + } catch (InsufficientMemoryException e) { // expected } - @Test - public void testOverChildMemoryLimit() throws Exception { - MemoryManager gmm = new GlobalMemoryManager(100); - ChildMemoryManager rmm1 = new ChildMemoryManager(gmm,25); - ChildMemoryManager rmm2 = new ChildMemoryManager(gmm,25); - ChildMemoryManager rmm3 = new ChildMemoryManager(gmm,25); - ChildMemoryManager rmm4 = new ChildMemoryManager(gmm,35); - MemoryChunk c1 = rmm1.allocate(20); - MemoryChunk c2 = rmm2.allocate(20); - try { - rmm1.allocate(10); - fail(); - } catch (InsufficientMemoryException e) { // expected - } - MemoryChunk c3 = rmm3.allocate(25); - c1.close(); - // Ensure that you can get back to max for rmn1 after failure - MemoryChunk c4 = rmm1.allocate(10); - MemoryChunk c5 = rmm1.allocate(15); - - MemoryChunk c6 = rmm4.allocate(25); - try { - // This passes % test, but fails the next total memory usage test - rmm4.allocate(10); - fail(); - } catch (InsufficientMemoryException e) { // expected - } - c2.close(); - // Tests that % test passes (confirming that the 10 above was subtracted back from request memory usage, - // since we'd be at the max of 35% now - MemoryChunk c7 = rmm4.allocate(10); - - try { - rmm4.allocate(1); - fail(); - } catch (InsufficientMemoryException e) { // expected - } - - try { - rmm2.allocate(25); - fail(); - } catch (InsufficientMemoryException e) { // expected - } - - c3.close(); - c4.close(); - c5.close(); - c6.close(); - c7.close(); - assertTrue(rmm1.getAvailableMemory() == rmm1.getMaxMemory()); - assertTrue(rmm2.getAvailableMemory() == rmm2.getMaxMemory()); - assertTrue(rmm3.getAvailableMemory() == rmm3.getMaxMemory()); - assertTrue(rmm4.getAvailableMemory() == rmm4.getMaxMemory()); + c1.close(); + c2.close(); + assertTrue(rmm1.getAvailableMemory() == rmm1.getMaxMemory()); + } + + @Test + public void testChildDecreaseAllocation() throws Exception { + MemoryManager gmm = spy(new GlobalMemoryManager(100)); + ChildMemoryManager rmm1 = new ChildMemoryManager(gmm, 100); + ChildMemoryManager rmm2 = new ChildMemoryManager(gmm, 10); + MemoryChunk c1 = rmm1.allocate(50); + MemoryChunk c2 = rmm2.allocate(5, 50); + assertTrue(c2.getSize() == 10); + c1.close(); + assertTrue(rmm1.getAvailableMemory() == rmm1.getMaxMemory()); + c2.close(); + assertTrue(rmm2.getAvailableMemory() == rmm2.getMaxMemory()); + assertTrue(gmm.getAvailableMemory() == gmm.getMaxMemory()); + } + + @Test + public void testOverChildMemoryLimit() throws Exception { + MemoryManager gmm = new GlobalMemoryManager(100); + ChildMemoryManager rmm1 = new ChildMemoryManager(gmm, 25); + ChildMemoryManager rmm2 = new ChildMemoryManager(gmm, 25); + ChildMemoryManager rmm3 = new ChildMemoryManager(gmm, 25); + ChildMemoryManager rmm4 = new ChildMemoryManager(gmm, 35); + MemoryChunk c1 = rmm1.allocate(20); + MemoryChunk c2 = rmm2.allocate(20); + try { + rmm1.allocate(10); + fail(); + } catch (InsufficientMemoryException e) { // expected + } + MemoryChunk c3 = rmm3.allocate(25); + c1.close(); + // Ensure that you can get back to max for rmn1 after failure + MemoryChunk c4 = rmm1.allocate(10); + MemoryChunk c5 = rmm1.allocate(15); + + MemoryChunk c6 = rmm4.allocate(25); + try { + // This passes % test, but fails the next total memory usage test + rmm4.allocate(10); + fail(); + } catch (InsufficientMemoryException e) { // expected + } + c2.close(); + // Tests that % test passes (confirming that the 10 above was subtracted back from request + // memory usage, + // since we'd be at the max of 35% now + MemoryChunk c7 = rmm4.allocate(10); + + try { + rmm4.allocate(1); + fail(); + } catch (InsufficientMemoryException e) { // expected } - @Test - public void testConcurrentAllocation() throws Exception { - int THREADS = 100; - - // each thread will attempt up to 100 allocations on average. - final GlobalMemoryManager gmm = new GlobalMemoryManager(THREADS * 1000); - final AtomicInteger count = new AtomicInteger(0); - final CountDownLatch barrier = new CountDownLatch(THREADS); - final CountDownLatch barrier2 = new CountDownLatch(THREADS); - final CountDownLatch signal = new CountDownLatch(1); - /* - * each thread will allocate chunks of 10 bytes, until no more memory is available. - */ - for (int i = 0; i < THREADS; i++) { - new Thread(new Runnable() { - List chunks = new ArrayList<>(); - @Override - public void run() { - try { - while(true) { - Thread.sleep(1); - chunks.add(gmm.allocate(10)); - count.incrementAndGet(); - } - } catch (InsufficientMemoryException e) { - barrier.countDown(); - // wait for the signal to go ahead - try {signal.await();} catch (InterruptedException ix) {} - for (MemoryChunk chunk : chunks) { - chunk.close(); - } - barrier2.countDown(); - } catch (InterruptedException ix) {} - } - }).start(); - } - // wait until all threads failed an allocation - barrier.await(); - // make sure all memory was used - assertTrue(gmm.getAvailableMemory() == 0); - // let the threads end, and free their memory - signal.countDown(); barrier2.await(); - // make sure all memory is freed - assertTrue(gmm.getAvailableMemory() == gmm.getMaxMemory()); + try { + rmm2.allocate(25); + fail(); + } catch (InsufficientMemoryException e) { // expected } - /** - * Test for SpillableGroupByCache which is using MemoryManager to allocate chunks for GroupBy execution - * @throws Exception + c3.close(); + c4.close(); + c5.close(); + c6.close(); + c7.close(); + assertTrue(rmm1.getAvailableMemory() == rmm1.getMaxMemory()); + assertTrue(rmm2.getAvailableMemory() == rmm2.getMaxMemory()); + assertTrue(rmm3.getAvailableMemory() == rmm3.getMaxMemory()); + assertTrue(rmm4.getAvailableMemory() == rmm4.getMaxMemory()); + } + + @Test + public void testConcurrentAllocation() throws Exception { + int THREADS = 100; + + // each thread will attempt up to 100 allocations on average. + final GlobalMemoryManager gmm = new GlobalMemoryManager(THREADS * 1000); + final AtomicInteger count = new AtomicInteger(0); + final CountDownLatch barrier = new CountDownLatch(THREADS); + final CountDownLatch barrier2 = new CountDownLatch(THREADS); + final CountDownLatch signal = new CountDownLatch(1); + /* + * each thread will allocate chunks of 10 bytes, until no more memory is available. */ - @Test - public void testCorrectnessOfChunkAllocation() throws Exception { - for(int i = 1000;i < Integer.MAX_VALUE;) { - i *=1.5f; - long result = GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(i, 100); - assertTrue("Size for GroupByMap is negative" , result > 0); + for (int i = 0; i < THREADS; i++) { + new Thread(new Runnable() { + List chunks = new ArrayList<>(); + + @Override + public void run() { + try { + while (true) { + Thread.sleep(1); + chunks.add(gmm.allocate(10)); + count.incrementAndGet(); + } + } catch (InsufficientMemoryException e) { + barrier.countDown(); + // wait for the signal to go ahead + try { + signal.await(); + } catch (InterruptedException ix) { + } + for (MemoryChunk chunk : chunks) { + chunk.close(); + } + barrier2.countDown(); + } catch (InterruptedException ix) { + } } + }).start(); + } + // wait until all threads failed an allocation + barrier.await(); + // make sure all memory was used + assertTrue(gmm.getAvailableMemory() == 0); + // let the threads end, and free their memory + signal.countDown(); + barrier2.await(); + // make sure all memory is freed + assertTrue(gmm.getAvailableMemory() == gmm.getMaxMemory()); + } + + /** + * Test for SpillableGroupByCache which is using MemoryManager to allocate chunks for GroupBy + * execution + */ + @Test + public void testCorrectnessOfChunkAllocation() throws Exception { + for (int i = 1000; i < Integer.MAX_VALUE;) { + i *= 1.5f; + long result = GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(i, 100); + assertTrue("Size for GroupByMap is negative", result > 0); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java index d2bccb78cb0..ee9b3db0ec7 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,30 +31,30 @@ */ public class LoggingSink implements MetricsSink { - private static final Logger LOGGER = LoggerFactory.getLogger(LoggingSink.class); + private static final Logger LOGGER = LoggerFactory.getLogger(LoggingSink.class); - @Override - public void init(SubsetConfiguration config) { - } + @Override + public void init(SubsetConfiguration config) { + } - @Override - public void putMetrics(MetricsRecord record) { - // we could wait until flush, but this is a really lightweight process, so we just write - // them - // as soon as we get them - if (!LOGGER.isDebugEnabled()) { - return; - } - LOGGER.debug("Found record:" + record.name()); - for (AbstractMetric metric : record.metrics()) { - // just print the metric we care about - if (metric.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) { - LOGGER.debug("\t metric:" + metric); - } - } + @Override + public void putMetrics(MetricsRecord record) { + // we could wait until flush, but this is a really lightweight process, so we just write + // them + // as soon as we get them + if (!LOGGER.isDebugEnabled()) { + return; } - - @Override - public void flush() { + LOGGER.debug("Found record:" + record.name()); + for (AbstractMetric metric : record.metrics()) { + // just print the metric we care about + if (metric.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) { + LOGGER.debug("\t metric:" + metric); + } } -} \ No newline at end of file + } + + @Override + public void flush() { + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/metrics/MetricTypeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/metrics/MetricTypeTest.java index 62b5de5fae0..251cdf6d669 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/metrics/MetricTypeTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/metrics/MetricTypeTest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.phoenix.metrics; import static org.junit.Assert.fail; @@ -5,9 +22,8 @@ import java.util.Map; import org.apache.phoenix.monitoring.MetricType; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.junit.Test; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -28,15 +44,16 @@ */ public class MetricTypeTest { - @Test - public void testUniqueShortNames() throws Exception { - Map shortNameMap = Maps.newHashMapWithExpectedSize(MetricType.values().length); - for (MetricType type : MetricType.values()) { - MetricType oldMetricType = shortNameMap.put(type.shortName(), type); - if (oldMetricType!=null) { - fail("Metric short names should be unique found duplicates for " + type.name() + " and " - + oldMetricType.name()); - } - } + @Test + public void testUniqueShortNames() throws Exception { + Map shortNameMap = + Maps.newHashMapWithExpectedSize(MetricType.values().length); + for (MetricType type : MetricType.values()) { + MetricType oldMetricType = shortNameMap.put(type.shortName(), type); + if (oldMetricType != null) { + fail("Metric short names should be unique found duplicates for " + type.name() + " and " + + oldMetricType.name()); + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/LatencyHistogramTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/LatencyHistogramTest.java index e65185e0cea..f09fdd4db23 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/LatencyHistogramTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/LatencyHistogramTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,77 +19,79 @@ import java.util.HashMap; import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.query.QueryServices; import org.junit.Assert; import org.junit.Test; /** - Test for {@link LatencyHistogram} + * Test for {@link LatencyHistogram} **/ public class LatencyHistogramTest { - @Test - public void testLatencyHistogramRangeOverride() { - String histoName = "PhoenixGetLatencyHisto"; - Configuration conf = new Configuration(); - conf.set(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES, "2, 5, 8"); - LatencyHistogram histogram = new LatencyHistogram(histoName, - "histogram for GET operation latency", conf); - Assert.assertEquals(histoName, histogram.getName()); - long[] ranges = histogram.getRanges(); - Assert.assertNotNull(ranges); - Assert.assertEquals(3, ranges.length); - Assert.assertEquals(2, ranges[0]); - Assert.assertEquals(5, ranges[1]); - Assert.assertEquals(8, ranges[2]); - } + @Test + public void testLatencyHistogramRangeOverride() { + String histoName = "PhoenixGetLatencyHisto"; + Configuration conf = new Configuration(); + conf.set(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES, "2, 5, 8"); + LatencyHistogram histogram = + new LatencyHistogram(histoName, "histogram for GET operation latency", conf); + Assert.assertEquals(histoName, histogram.getName()); + long[] ranges = histogram.getRanges(); + Assert.assertNotNull(ranges); + Assert.assertEquals(3, ranges.length); + Assert.assertEquals(2, ranges[0]); + Assert.assertEquals(5, ranges[1]); + Assert.assertEquals(8, ranges[2]); + } - @Test - public void testEveryRangeInDefaultRange() { - //1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 - Configuration conf = new Configuration(); - String histoName = "PhoenixGetLatencyHisto"; - conf.unset(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES); - LatencyHistogram histogram = new LatencyHistogram(histoName, - "histogram for GET operation latency", conf); - Assert.assertEquals(histoName, histogram.getName()); - Assert.assertEquals(LatencyHistogram.DEFAULT_RANGE, histogram.getRanges()); + @Test + public void testEveryRangeInDefaultRange() { + // 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 + Configuration conf = new Configuration(); + String histoName = "PhoenixGetLatencyHisto"; + conf.unset(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES); + LatencyHistogram histogram = + new LatencyHistogram(histoName, "histogram for GET operation latency", conf); + Assert.assertEquals(histoName, histogram.getName()); + Assert.assertEquals(LatencyHistogram.DEFAULT_RANGE, histogram.getRanges()); - histogram.add(1); - histogram.add(2); - histogram.add(3); - histogram.add(5); - histogram.add(20); - histogram.add(60); - histogram.add(200); - histogram.add(600); - histogram.add(2000); - histogram.add(6000); - histogram.add(20000); - histogram.add(45000); - histogram.add(90000); - histogram.add(200000); - histogram.add(450000); - histogram.add(900000); + histogram.add(1); + histogram.add(2); + histogram.add(3); + histogram.add(5); + histogram.add(20); + histogram.add(60); + histogram.add(200); + histogram.add(600); + histogram.add(2000); + histogram.add(6000); + histogram.add(20000); + histogram.add(45000); + histogram.add(90000); + histogram.add(200000); + histogram.add(450000); + histogram.add(900000); - Map distribution = histogram.getRangeHistogramDistribution().getRangeDistributionMap(); - Map expectedMap = new HashMap<>(); - expectedMap.put("0,1", 1l); - expectedMap.put("1,3", 2l); - expectedMap.put("3,10", 1l); - expectedMap.put("10,30", 1l); - expectedMap.put("30,100", 1l); - expectedMap.put("100,300", 1l); - expectedMap.put("300,1000", 1l); - expectedMap.put("1000,3000", 1l); - expectedMap.put("3000,10000", 1l); - expectedMap.put("10000,30000", 1l); - expectedMap.put("30000,60000", 1l); - expectedMap.put("60000,120000", 1l); - expectedMap.put("120000,300000", 1l); - expectedMap.put("300000,600000", 1l); - Assert.assertEquals(expectedMap, distribution); - } + Map distribution = + histogram.getRangeHistogramDistribution().getRangeDistributionMap(); + Map expectedMap = new HashMap<>(); + expectedMap.put("0,1", 1l); + expectedMap.put("1,3", 2l); + expectedMap.put("3,10", 1l); + expectedMap.put("10,30", 1l); + expectedMap.put("30,100", 1l); + expectedMap.put("100,300", 1l); + expectedMap.put("300,1000", 1l); + expectedMap.put("1000,3000", 1l); + expectedMap.put("3000,10000", 1l); + expectedMap.put("10000,30000", 1l); + expectedMap.put("30000,60000", 1l); + expectedMap.put("60000,120000", 1l); + expectedMap.put("120000,300000", 1l); + expectedMap.put("300000,600000", 1l); + Assert.assertEquals(expectedMap, distribution); + } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/MetricUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/MetricUtilTest.java index 28e85dfa904..ff5a3ed5043 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/MetricUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/MetricUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,13 +17,6 @@ */ package org.apache.phoenix.monitoring; -import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.phoenix.log.LogLevel; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.junit.MockitoJUnitRunner; - import static org.apache.phoenix.monitoring.MetricType.RESULT_SET_TIME_MS; import static org.apache.phoenix.monitoring.MetricType.WALL_CLOCK_TIME_MS; import static org.junit.Assert.assertFalse; @@ -31,38 +24,44 @@ import java.lang.reflect.Field; +import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.phoenix.log.LogLevel; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + @RunWith(MockitoJUnitRunner.class) public class MetricUtilTest { - @Test - public void testGetMetricsStopWatchWithMetricsTrue() throws Exception { - MetricsStopWatch metricsStopWatch = MetricUtil.getMetricsStopWatch(true, - LogLevel.OFF, WALL_CLOCK_TIME_MS); - assertTrue(metricsStopWatch.getMetricsEnabled()); - metricsStopWatch.start(); + @Test + public void testGetMetricsStopWatchWithMetricsTrue() throws Exception { + MetricsStopWatch metricsStopWatch = + MetricUtil.getMetricsStopWatch(true, LogLevel.OFF, WALL_CLOCK_TIME_MS); + assertTrue(metricsStopWatch.getMetricsEnabled()); + metricsStopWatch.start(); - metricsStopWatch = MetricUtil.getMetricsStopWatch(false, - LogLevel.INFO, RESULT_SET_TIME_MS); - assertTrue(metricsStopWatch.getMetricsEnabled()); - } + metricsStopWatch = MetricUtil.getMetricsStopWatch(false, LogLevel.INFO, RESULT_SET_TIME_MS); + assertTrue(metricsStopWatch.getMetricsEnabled()); + } - @Test - public void testGetMetricsStopWatchWithMetricsFalse() throws Exception { - MetricsStopWatch metricsStopWatch = MetricUtil.getMetricsStopWatch(false, - LogLevel.OFF, WALL_CLOCK_TIME_MS); - assertFalse(metricsStopWatch.getMetricsEnabled()); - } + @Test + public void testGetMetricsStopWatchWithMetricsFalse() throws Exception { + MetricsStopWatch metricsStopWatch = + MetricUtil.getMetricsStopWatch(false, LogLevel.OFF, WALL_CLOCK_TIME_MS); + assertFalse(metricsStopWatch.getMetricsEnabled()); + } - @Test - //Check that MetricsSystemImpl has a String "prefix" field in the Hadoop version we test with - public void testInternalMetricsField() throws NoSuchFieldException, - SecurityException, IllegalArgumentException, IllegalAccessException { - MetricsSystemImpl metrics = (MetricsSystemImpl) DefaultMetricsSystem.instance(); - Field prefixField = MetricsSystemImpl.class.getDeclaredField("prefix"); - prefixField.setAccessible(true); - String oldValue = (String)prefixField.get(metrics); - prefixField.set(metrics, "dummy"); - prefixField.set(metrics, oldValue); - prefixField.setAccessible(false); - } + @Test + // Check that MetricsSystemImpl has a String "prefix" field in the Hadoop version we test with + public void testInternalMetricsField() throws NoSuchFieldException, SecurityException, + IllegalArgumentException, IllegalAccessException { + MetricsSystemImpl metrics = (MetricsSystemImpl) DefaultMetricsSystem.instance(); + Field prefixField = MetricsSystemImpl.class.getDeclaredField("prefix"); + prefixField.setAccessible(true); + String oldValue = (String) prefixField.get(metrics); + prefixField.set(metrics, "dummy"); + prefixField.set(metrics, oldValue); + prefixField.setAccessible(false); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java index f97731d3e38..b473ee33f4e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/OverAllQueryMetricsTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,15 +17,6 @@ */ package org.apache.phoenix.monitoring; -import org.apache.phoenix.log.LogLevel; -import org.apache.phoenix.util.EnvironmentEdge; -import org.apache.phoenix.util.EnvironmentEdgeManager; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.util.Map; - import static org.apache.phoenix.monitoring.MetricType.CACHE_REFRESH_SPLITS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.NO_OP_METRIC; import static org.apache.phoenix.monitoring.MetricType.NUM_PARALLEL_SCANS; @@ -35,145 +26,151 @@ import static org.apache.phoenix.monitoring.MetricType.WALL_CLOCK_TIME_MS; import static org.junit.Assert.assertEquals; -public class OverAllQueryMetricsTest { - - private OverAllQueryMetrics overAllQueryMetrics; - private static final long numParallelScans = 10L; - private static final long delta = 1000L; - private static final int queryTimeouts = 5; - private static final int queryFailures = 8; - private static final int cacheRefreshesDueToSplits = 15; - - @Before - public void getFreshMetricsObject() { - overAllQueryMetrics = new OverAllQueryMetrics(true, LogLevel.TRACE); - populateMetrics(overAllQueryMetrics, numParallelScans, queryTimeouts, queryFailures, - cacheRefreshesDueToSplits); - } - - @After - public void reset() { - EnvironmentEdgeManager.reset(); - } - - private static class MyClock extends EnvironmentEdge { - private long time; - private long delta; - - public MyClock(long time, long delta) { - this.time = time; - this.delta = delta; - } - - @Override public long currentTime() { - long prevTime = this.time; - this.time += this.delta; - return prevTime; - } - } - - @Test - public void testQueryWatchTimer() { - assertEquals(0L, overAllQueryMetrics.getWallClockTimeMs()); - MyClock clock = new MyClock(10L, delta); - EnvironmentEdgeManager.injectEdge(clock); - overAllQueryMetrics.startQuery(); - overAllQueryMetrics.endQuery(); - assertEquals(delta, overAllQueryMetrics.getWallClockTimeMs()); - // Ensure that calling endQuery() again doesn't change the wallClockTimeMs - overAllQueryMetrics.endQuery(); - assertEquals(delta, overAllQueryMetrics.getWallClockTimeMs()); - } +import java.util.Map; - @Test - public void testResultSetWatch() { - assertEquals(0L, overAllQueryMetrics.getResultSetTimeMs()); - MyClock clock = new MyClock(10L, delta); - EnvironmentEdgeManager.injectEdge(clock); - overAllQueryMetrics.startResultSetWatch(); - overAllQueryMetrics.stopResultSetWatch(); - assertEquals(delta, overAllQueryMetrics.getResultSetTimeMs()); - // Ensure that calling stopResultSetWatch() again doesn't change the resultSetTimeMs - overAllQueryMetrics.stopResultSetWatch(); - assertEquals(delta, overAllQueryMetrics.getResultSetTimeMs()); - } +import org.apache.phoenix.log.LogLevel; +import org.apache.phoenix.util.EnvironmentEdge; +import org.apache.phoenix.util.EnvironmentEdgeManager; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; - @Test - public void testPublish() { - MyClock clock = new MyClock(10L, delta); - EnvironmentEdgeManager.injectEdge(clock); - overAllQueryMetrics.startQuery(); - overAllQueryMetrics.startResultSetWatch(); - assertPublishedMetrics(overAllQueryMetrics.publish(), numParallelScans, queryTimeouts, - queryFailures, cacheRefreshesDueToSplits, 0L); - overAllQueryMetrics.endQuery(); - overAllQueryMetrics.stopResultSetWatch(); - // expect 2 * delta since we call both endQuery() and stopResultSetWatch() - assertPublishedMetrics(overAllQueryMetrics.publish(), numParallelScans, queryTimeouts, - queryFailures, cacheRefreshesDueToSplits, 2*delta); - } +public class OverAllQueryMetricsTest { - @Test - public void testReset() { - assertPublishedMetrics(overAllQueryMetrics.publish(), numParallelScans, queryTimeouts, - queryFailures, cacheRefreshesDueToSplits, 0L); - overAllQueryMetrics.reset(); - assertPublishedMetrics(overAllQueryMetrics.publish(), 0L, 0L, 0L, 0L, 0L); + private OverAllQueryMetrics overAllQueryMetrics; + private static final long numParallelScans = 10L; + private static final long delta = 1000L; + private static final int queryTimeouts = 5; + private static final int queryFailures = 8; + private static final int cacheRefreshesDueToSplits = 15; + + @Before + public void getFreshMetricsObject() { + overAllQueryMetrics = new OverAllQueryMetrics(true, LogLevel.TRACE); + populateMetrics(overAllQueryMetrics, numParallelScans, queryTimeouts, queryFailures, + cacheRefreshesDueToSplits); + } + + @After + public void reset() { + EnvironmentEdgeManager.reset(); + } + + private static class MyClock extends EnvironmentEdge { + private long time; + private long delta; + + public MyClock(long time, long delta) { + this.time = time; + this.delta = delta; } - @Test - public void testCombine() { - OverAllQueryMetrics otherMetrics = new OverAllQueryMetrics(true, LogLevel.TRACE); - final long otherNumParallelScans = 9L; - final int otherQueryTimeouts = 8; - final int otherQueryFailures = 7; - final int otherCacheRefreshes = 6; - populateMetrics(otherMetrics, otherNumParallelScans, otherQueryTimeouts, otherQueryFailures, - otherCacheRefreshes); - OverAllQueryMetrics finalMetricObj = this.overAllQueryMetrics.combine(otherMetrics); - assertPublishedMetrics(finalMetricObj.publish(), numParallelScans + otherNumParallelScans, - queryTimeouts + otherQueryTimeouts, queryFailures + otherQueryFailures, - cacheRefreshesDueToSplits + otherCacheRefreshes, 0L); + @Override + public long currentTime() { + long prevTime = this.time; + this.time += this.delta; + return prevTime; } - - @Test - public void testNoOpRequestMetricsIfRequestMetricsDisabled() { - OverAllQueryMetrics noOpMetrics = new OverAllQueryMetrics(false, LogLevel.OFF); - populateMetrics(noOpMetrics, numParallelScans, queryTimeouts, queryFailures, - cacheRefreshesDueToSplits); - Map noOpMap = noOpMetrics.publish(); - assertEquals(1, noOpMap.size()); - assertEquals(0L, (long)noOpMap.get(NO_OP_METRIC)); + } + + @Test + public void testQueryWatchTimer() { + assertEquals(0L, overAllQueryMetrics.getWallClockTimeMs()); + MyClock clock = new MyClock(10L, delta); + EnvironmentEdgeManager.injectEdge(clock); + overAllQueryMetrics.startQuery(); + overAllQueryMetrics.endQuery(); + assertEquals(delta, overAllQueryMetrics.getWallClockTimeMs()); + // Ensure that calling endQuery() again doesn't change the wallClockTimeMs + overAllQueryMetrics.endQuery(); + assertEquals(delta, overAllQueryMetrics.getWallClockTimeMs()); + } + + @Test + public void testResultSetWatch() { + assertEquals(0L, overAllQueryMetrics.getResultSetTimeMs()); + MyClock clock = new MyClock(10L, delta); + EnvironmentEdgeManager.injectEdge(clock); + overAllQueryMetrics.startResultSetWatch(); + overAllQueryMetrics.stopResultSetWatch(); + assertEquals(delta, overAllQueryMetrics.getResultSetTimeMs()); + // Ensure that calling stopResultSetWatch() again doesn't change the resultSetTimeMs + overAllQueryMetrics.stopResultSetWatch(); + assertEquals(delta, overAllQueryMetrics.getResultSetTimeMs()); + } + + @Test + public void testPublish() { + MyClock clock = new MyClock(10L, delta); + EnvironmentEdgeManager.injectEdge(clock); + overAllQueryMetrics.startQuery(); + overAllQueryMetrics.startResultSetWatch(); + assertPublishedMetrics(overAllQueryMetrics.publish(), numParallelScans, queryTimeouts, + queryFailures, cacheRefreshesDueToSplits, 0L); + overAllQueryMetrics.endQuery(); + overAllQueryMetrics.stopResultSetWatch(); + // expect 2 * delta since we call both endQuery() and stopResultSetWatch() + assertPublishedMetrics(overAllQueryMetrics.publish(), numParallelScans, queryTimeouts, + queryFailures, cacheRefreshesDueToSplits, 2 * delta); + } + + @Test + public void testReset() { + assertPublishedMetrics(overAllQueryMetrics.publish(), numParallelScans, queryTimeouts, + queryFailures, cacheRefreshesDueToSplits, 0L); + overAllQueryMetrics.reset(); + assertPublishedMetrics(overAllQueryMetrics.publish(), 0L, 0L, 0L, 0L, 0L); + } + + @Test + public void testCombine() { + OverAllQueryMetrics otherMetrics = new OverAllQueryMetrics(true, LogLevel.TRACE); + final long otherNumParallelScans = 9L; + final int otherQueryTimeouts = 8; + final int otherQueryFailures = 7; + final int otherCacheRefreshes = 6; + populateMetrics(otherMetrics, otherNumParallelScans, otherQueryTimeouts, otherQueryFailures, + otherCacheRefreshes); + OverAllQueryMetrics finalMetricObj = this.overAllQueryMetrics.combine(otherMetrics); + assertPublishedMetrics(finalMetricObj.publish(), numParallelScans + otherNumParallelScans, + queryTimeouts + otherQueryTimeouts, queryFailures + otherQueryFailures, + cacheRefreshesDueToSplits + otherCacheRefreshes, 0L); + } + + @Test + public void testNoOpRequestMetricsIfRequestMetricsDisabled() { + OverAllQueryMetrics noOpMetrics = new OverAllQueryMetrics(false, LogLevel.OFF); + populateMetrics(noOpMetrics, numParallelScans, queryTimeouts, queryFailures, + cacheRefreshesDueToSplits); + Map noOpMap = noOpMetrics.publish(); + assertEquals(1, noOpMap.size()); + assertEquals(0L, (long) noOpMap.get(NO_OP_METRIC)); + } + + private void populateMetrics(OverAllQueryMetrics metricsObj, long numParallelScansSetting, + int queryTimeoutsSetting, int queryFailuresSetting, int cacheRefreshesDueToSplitsSetting) { + metricsObj.updateNumParallelScans(numParallelScansSetting); + for (int i = 0; i < queryTimeoutsSetting; i++) { + metricsObj.queryTimedOut(); } - - private void populateMetrics(OverAllQueryMetrics metricsObj, long numParallelScansSetting, - int queryTimeoutsSetting, int queryFailuresSetting, - int cacheRefreshesDueToSplitsSetting) { - metricsObj.updateNumParallelScans(numParallelScansSetting); - for (int i = 0; i < queryTimeoutsSetting; i++) { - metricsObj.queryTimedOut(); - } - for (int i = 0; i < queryFailuresSetting; i++) { - metricsObj.queryFailed(); - } - for (int i = 0; i < cacheRefreshesDueToSplitsSetting; i++) { - metricsObj.cacheRefreshedDueToSplits(); - } + for (int i = 0; i < queryFailuresSetting; i++) { + metricsObj.queryFailed(); } - - private void assertPublishedMetrics( - final Map metrics, - final long expectedNumParallelScans, - final long expectedQueryTimeouts, - final long expectedQueryFailures, - final long expectedCacheRefreshes, - final long expectedElapsedTime) { - assertEquals(expectedNumParallelScans, (long)metrics.get(NUM_PARALLEL_SCANS)); - assertEquals(expectedQueryTimeouts, (long)metrics.get(QUERY_TIMEOUT_COUNTER)); - assertEquals(expectedQueryFailures, (long)metrics.get(QUERY_FAILED_COUNTER)); - assertEquals(expectedCacheRefreshes, (long)metrics.get(CACHE_REFRESH_SPLITS_COUNTER)); - assertEquals(expectedElapsedTime, (long)metrics.get(WALL_CLOCK_TIME_MS)); - assertEquals(expectedElapsedTime, (long)metrics.get(RESULT_SET_TIME_MS)); + for (int i = 0; i < cacheRefreshesDueToSplitsSetting; i++) { + metricsObj.cacheRefreshedDueToSplits(); } + } + + private void assertPublishedMetrics(final Map metrics, + final long expectedNumParallelScans, final long expectedQueryTimeouts, + final long expectedQueryFailures, final long expectedCacheRefreshes, + final long expectedElapsedTime) { + assertEquals(expectedNumParallelScans, (long) metrics.get(NUM_PARALLEL_SCANS)); + assertEquals(expectedQueryTimeouts, (long) metrics.get(QUERY_TIMEOUT_COUNTER)); + assertEquals(expectedQueryFailures, (long) metrics.get(QUERY_FAILED_COUNTER)); + assertEquals(expectedCacheRefreshes, (long) metrics.get(CACHE_REFRESH_SPLITS_COUNTER)); + assertEquals(expectedElapsedTime, (long) metrics.get(WALL_CLOCK_TIME_MS)); + assertEquals(expectedElapsedTime, (long) metrics.get(RESULT_SET_TIME_MS)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/PhoenixTableMetricImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/PhoenixTableMetricImplTest.java index 28611e7f0f2..95e9608f44e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/PhoenixTableMetricImplTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/PhoenixTableMetricImplTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,83 +17,85 @@ */ package org.apache.phoenix.monitoring; -import org.junit.Test; - import static org.junit.Assert.assertEquals; +import org.junit.Test; + public class PhoenixTableMetricImplTest { - /* - Tests the functionality of the TableMetricImpl methods which are exposed - 1. change() - 2. increment() - 3. decrement() - 4. reset() - 5. getValue() - */ - @Test public void testTableMetricImplAvailableMethods() { + /* + * Tests the functionality of the TableMetricImpl methods which are exposed 1. change() 2. + * increment() 3. decrement() 4. reset() 5. getValue() + */ + @Test + public void testTableMetricImplAvailableMethods() { - PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); + PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); - for (int i = 0; i < 10; i++) { - metric.increment(); - } - assertEquals(10, metric.getValue()); - metric.reset(); - assertEquals(0, metric.getValue()); + for (int i = 0; i < 10; i++) { + metric.increment(); + } + assertEquals(10, metric.getValue()); + metric.reset(); + assertEquals(0, metric.getValue()); - for (int i = 0; i < 5; i++) { - metric.change(i); - } - assertEquals(10, metric.getValue()); - metric.reset(); - assertEquals(0, metric.getValue()); + for (int i = 0; i < 5; i++) { + metric.change(i); + } + assertEquals(10, metric.getValue()); + metric.reset(); + assertEquals(0, metric.getValue()); - metric.change(10); - assertEquals(10, metric.getValue()); - for (int i = 0; i < 5; i++) { - metric.decrement(); - } - assertEquals(5, metric.getValue()); + metric.change(10); + assertEquals(10, metric.getValue()); + for (int i = 0; i < 5; i++) { + metric.decrement(); } + assertEquals(5, metric.getValue()); + } - @Test public void testPhoenixImplchange() { + @Test + public void testPhoenixImplchange() { - PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); - for (int i = 0; i < 5; i++) { - metric.change(i); - } - assertEquals(10, metric.getValue()); + PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); + for (int i = 0; i < 5; i++) { + metric.change(i); } + assertEquals(10, metric.getValue()); + } - @Test public void testPhoenixImplIncrement() { - PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); - for (int i = 0; i < 10; i++) { - metric.increment(); - } - assertEquals(10, metric.getValue()); + @Test + public void testPhoenixImplIncrement() { + PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); + for (int i = 0; i < 10; i++) { + metric.increment(); } + assertEquals(10, metric.getValue()); + } - @Test public void testPhoenixImplDecrement() { - PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); - metric.change(10); - for (int i = 0; i < 5; i++) { - metric.decrement(); - } - assertEquals(5, metric.getValue()); + @Test + public void testPhoenixImplDecrement() { + PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); + metric.change(10); + for (int i = 0; i < 5; i++) { + metric.decrement(); } + assertEquals(5, metric.getValue()); + } - @Test public void testPhoenixImplReset() { - PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); - metric.change(10); - metric.reset(); - assertEquals(0, metric.getValue()); - } + @Test + public void testPhoenixImplReset() { + PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); + metric.change(10); + metric.reset(); + assertEquals(0, metric.getValue()); + } - @Test public void testPhoenixImplGetValue() { - PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); - metric.change(10); - assertEquals(10, metric.getValue()); - } + @Test + public void testPhoenixImplGetValue() { + PhoenixTableMetric metric = new PhoenixTableMetricImpl(MetricType.SELECT_SQL_COUNTER); + metric.change(10); + assertEquals(10, metric.getValue()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/SizeHistogramTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/SizeHistogramTest.java index 616d8dd7028..e40b55d4b32 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/SizeHistogramTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/SizeHistogramTest.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -19,62 +19,61 @@ import java.util.HashMap; import java.util.Map; + import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.metrics2.lib.MutableSizeHistogram; import org.apache.phoenix.query.QueryServices; import org.junit.Assert; import org.junit.Test; /** - Test for {@link SizeHistogram} + * Test for {@link SizeHistogram} **/ public class SizeHistogramTest { - @Test - public void testSizeHistogramRangeOverride() { - Configuration conf = new Configuration(); - conf.set(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES, "1, 100, 1000"); - SizeHistogram histogram = new SizeHistogram("PhoenixReadBytesHisto", - "histogram for read bytes", conf); - long[] ranges = histogram.getRanges(); - Assert.assertNotNull(ranges); - Assert.assertEquals(3, ranges.length); - Assert.assertEquals(1, ranges[0]); - Assert.assertEquals(100, ranges[1]); - Assert.assertEquals(1000, ranges[2]); - } + @Test + public void testSizeHistogramRangeOverride() { + Configuration conf = new Configuration(); + conf.set(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES, "1, 100, 1000"); + SizeHistogram histogram = + new SizeHistogram("PhoenixReadBytesHisto", "histogram for read bytes", conf); + long[] ranges = histogram.getRanges(); + Assert.assertNotNull(ranges); + Assert.assertEquals(3, ranges.length); + Assert.assertEquals(1, ranges[0]); + Assert.assertEquals(100, ranges[1]); + Assert.assertEquals(1000, ranges[2]); + } - @Test - public void testEveryRangeInDefaultRange() { - // {10,100,1000,10000,100000,1000000,10000000,100000000}; - Configuration conf = new Configuration(); - String histoName = "PhoenixReadBytesHisto"; - conf.unset(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES); - SizeHistogram histogram = new SizeHistogram(histoName, - "histogram for read bytes", conf); - Assert.assertEquals(histoName, histogram.getName()); - Assert.assertEquals(SizeHistogram.DEFAULT_RANGE, histogram.getRanges()); + @Test + public void testEveryRangeInDefaultRange() { + // {10,100,1000,10000,100000,1000000,10000000,100000000}; + Configuration conf = new Configuration(); + String histoName = "PhoenixReadBytesHisto"; + conf.unset(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES); + SizeHistogram histogram = new SizeHistogram(histoName, "histogram for read bytes", conf); + Assert.assertEquals(histoName, histogram.getName()); + Assert.assertEquals(SizeHistogram.DEFAULT_RANGE, histogram.getRanges()); - histogram.add(5); - histogram.add(50); - histogram.add(500); - histogram.add(5000); - histogram.add(50000); - histogram.add(500000); - histogram.add(5000000); - histogram.add(50000000); - Map - distribution = histogram.getRangeHistogramDistribution().getRangeDistributionMap(); - Map expectedMap = new HashMap<>(); - expectedMap.put("0,10", 1l); - expectedMap.put("10,100", 1l); - expectedMap.put("100,1000", 1l); - expectedMap.put("1000,10000", 1l); - expectedMap.put("10000,100000", 1l); - expectedMap.put("100000,1000000", 1l); - expectedMap.put("1000000,10000000", 1l); - expectedMap.put("10000000,100000000", 1l); - Assert.assertEquals(expectedMap, distribution); - } + histogram.add(5); + histogram.add(50); + histogram.add(500); + histogram.add(5000); + histogram.add(50000); + histogram.add(500000); + histogram.add(5000000); + histogram.add(50000000); + Map distribution = + histogram.getRangeHistogramDistribution().getRangeDistributionMap(); + Map expectedMap = new HashMap<>(); + expectedMap.put("0,10", 1l); + expectedMap.put("10,100", 1l); + expectedMap.put("100,1000", 1l); + expectedMap.put("1000,10000", 1l); + expectedMap.put("10000,100000", 1l); + expectedMap.put("100000,1000000", 1l); + expectedMap.put("1000000,10000000", 1l); + expectedMap.put("10000000,100000000", 1l); + Assert.assertEquals(expectedMap, distribution); + } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableClientMetricsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableClientMetricsTest.java index 52e7793a8ae..fb98cfdd74f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableClientMetricsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableClientMetricsTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.monitoring; -import static org.junit.Assert.assertTrue; - -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.junit.Test; -import java.util.HashMap; -import java.util.Map; - import static org.apache.phoenix.monitoring.MetricType.COUNT_ROWS_SCANNED; import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_BYTES; import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_SQL_COUNTER; @@ -58,161 +48,155 @@ import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.selectScanFailedCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.selectScanSuccessCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.selectSqlQueryTimeCounter; +import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.tableNames; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.taskEndToEndTimeCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.upsertMutationBytesCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.upsertMutationSqlCounter; -import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.tableNames; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; /** - * This test does UT for TableClientMetrics class - * This class has following API - * 1. changeMetricValue - * 2.getTableName - * 3.getMetricMap + * This test does UT for TableClientMetrics class This class has following API 1. changeMetricValue + * 2.getTableName 3.getMetricMap */ public class TableClientMetricsTest { - private static Map tableMetricsSet = new HashMap<>(); + private static Map tableMetricsSet = new HashMap<>(); - public void verifyMetricsFromTableClientMetrics() { - assertFalse(tableMetricsSet.isEmpty()); - for (int i = 0; i < tableNames.length; i++) { - TableClientMetrics instance = tableMetricsSet.get(tableNames[i]); - assertEquals(instance.getTableName(), tableNames[i]); - List metricList = instance.getMetricMap(); - for (PhoenixTableMetric metric : metricList) { + public void verifyMetricsFromTableClientMetrics() { + assertFalse(tableMetricsSet.isEmpty()); + for (int i = 0; i < tableNames.length; i++) { + TableClientMetrics instance = tableMetricsSet.get(tableNames[i]); + assertEquals(instance.getTableName(), tableNames[i]); + List metricList = instance.getMetricMap(); + for (PhoenixTableMetric metric : metricList) { - if (metric.getMetricType().equals(MUTATION_BATCH_SIZE)) { - assertEquals(mutationBatchSizeCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(UPSERT_MUTATION_BYTES)) { - assertEquals(upsertMutationBytesCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(UPSERT_MUTATION_SQL_COUNTER)) { - assertEquals(upsertMutationSqlCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(DELETE_MUTATION_BYTES)) { - assertEquals(deleteMutationByesCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(DELETE_MUTATION_SQL_COUNTER)) { - assertEquals(deleteMutationSqlCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(MUTATION_SQL_COUNTER)) { - assertEquals(mutationSqlCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(MUTATION_COMMIT_TIME)) { - assertEquals(mutationSqlCommitTimeCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(TASK_END_TO_END_TIME)) { - assertEquals(taskEndToEndTimeCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(COUNT_ROWS_SCANNED)) { - assertEquals(countRowsScannedCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(QUERY_FAILED_COUNTER)) { - assertEquals(queryFailedCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(QUERY_TIMEOUT_COUNTER)) { - assertEquals(queryTimeOutCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SCAN_BYTES)) { - assertEquals(scanBytesCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER)) { - assertEquals(selectPointLookUpSuccessCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_POINTLOOKUP_FAILED_SQL_COUNTER)) { - assertEquals(selectPointLookUpFailedCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_SQL_QUERY_TIME)) { - assertEquals(selectSqlQueryTimeCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_SCAN_FAILED_SQL_COUNTER)) { - assertEquals(selectScanFailedCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_SCAN_SUCCESS_SQL_COUNTER)) { - assertEquals(selectScanSuccessCounter[i], metric.getValue()); - } - } + if (metric.getMetricType().equals(MUTATION_BATCH_SIZE)) { + assertEquals(mutationBatchSizeCounter[i], metric.getValue()); } - } - - public boolean verifyTableName() { - - if (tableMetricsSet.isEmpty()) { - return false; + if (metric.getMetricType().equals(UPSERT_MUTATION_BYTES)) { + assertEquals(upsertMutationBytesCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(UPSERT_MUTATION_SQL_COUNTER)) { + assertEquals(upsertMutationSqlCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(DELETE_MUTATION_BYTES)) { + assertEquals(deleteMutationByesCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(DELETE_MUTATION_SQL_COUNTER)) { + assertEquals(deleteMutationSqlCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(MUTATION_SQL_COUNTER)) { + assertEquals(mutationSqlCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(MUTATION_COMMIT_TIME)) { + assertEquals(mutationSqlCommitTimeCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(TASK_END_TO_END_TIME)) { + assertEquals(taskEndToEndTimeCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(COUNT_ROWS_SCANNED)) { + assertEquals(countRowsScannedCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(QUERY_FAILED_COUNTER)) { + assertEquals(queryFailedCounter[i], metric.getValue()); } - for (String tableName : tableNames) { - TableClientMetrics instance = tableMetricsSet.get(tableName); - if (!instance.getTableName().equals(tableName)) { - return false; - } + if (metric.getMetricType().equals(QUERY_TIMEOUT_COUNTER)) { + assertEquals(queryTimeOutCounter[i], metric.getValue()); } - return true; + if (metric.getMetricType().equals(SCAN_BYTES)) { + assertEquals(scanBytesCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER)) { + assertEquals(selectPointLookUpSuccessCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_POINTLOOKUP_FAILED_SQL_COUNTER)) { + assertEquals(selectPointLookUpFailedCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_SQL_QUERY_TIME)) { + assertEquals(selectSqlQueryTimeCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_SCAN_FAILED_SQL_COUNTER)) { + assertEquals(selectScanFailedCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_SCAN_SUCCESS_SQL_COUNTER)) { + assertEquals(selectScanSuccessCounter[i], metric.getValue()); + } + } } + } + public boolean verifyTableName() { - /** - * This test is for changeMetricValue() Method and getMetricMap() - */ - @Test - public void testTableClientMetrics() { - Configuration conf = new Configuration(); - for (int i = 0; i < tableNames.length; i++) { - TableClientMetrics tableClientMetrics = new TableClientMetrics(tableNames[i], conf); - tableMetricsSet.put(tableNames[i], tableClientMetrics); - - tableClientMetrics.changeMetricValue(MUTATION_BATCH_SIZE, - mutationBatchSizeCounter[i]); - tableClientMetrics.changeMetricValue(UPSERT_MUTATION_BYTES, - upsertMutationBytesCounter[i]); - tableClientMetrics.changeMetricValue(UPSERT_MUTATION_SQL_COUNTER, - upsertMutationSqlCounter[i]); - tableClientMetrics.changeMetricValue(DELETE_MUTATION_BYTES, - deleteMutationByesCounter[i]); - tableClientMetrics.changeMetricValue(DELETE_MUTATION_SQL_COUNTER, - deleteMutationSqlCounter[i]); - tableClientMetrics.changeMetricValue(MUTATION_SQL_COUNTER, - mutationSqlCounter[i]); - tableClientMetrics.changeMetricValue(MUTATION_COMMIT_TIME, - mutationSqlCommitTimeCounter[i]); - tableClientMetrics.changeMetricValue(TASK_END_TO_END_TIME, - taskEndToEndTimeCounter[i]); - tableClientMetrics.changeMetricValue(COUNT_ROWS_SCANNED, - countRowsScannedCounter[i]); - tableClientMetrics.changeMetricValue(QUERY_FAILED_COUNTER, - queryFailedCounter[i]); - tableClientMetrics.changeMetricValue(QUERY_TIMEOUT_COUNTER, - queryTimeOutCounter[i]); - tableClientMetrics.changeMetricValue(SCAN_BYTES, scanBytesCounter[i]); - tableClientMetrics.changeMetricValue(MetricType.SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER, - selectPointLookUpSuccessCounter[i]); - tableClientMetrics.changeMetricValue(SELECT_POINTLOOKUP_FAILED_SQL_COUNTER, - selectPointLookUpFailedCounter[i]); - tableClientMetrics.changeMetricValue(SELECT_SQL_QUERY_TIME, - selectSqlQueryTimeCounter[i]); - tableClientMetrics.changeMetricValue(SELECT_SCAN_SUCCESS_SQL_COUNTER, - selectScanSuccessCounter[i]); - tableClientMetrics.changeMetricValue(SELECT_SCAN_FAILED_SQL_COUNTER, - selectScanFailedCounter[i]); - } - verifyMetricsFromTableClientMetrics(); - tableMetricsSet.clear(); + if (tableMetricsSet.isEmpty()) { + return false; } - - /** - * This test is for getTableName() - */ - @Test - public void testTableClientMetricsforTableName() { - Configuration conf = new Configuration(); - for (int i = 0; i < tableNames.length; i++) { - TableClientMetrics tableClientMetrics = new TableClientMetrics(tableNames[i], conf); - tableMetricsSet.put(tableNames[i], tableClientMetrics); - } - assertTrue(verifyTableName()); + for (String tableName : tableNames) { + TableClientMetrics instance = tableMetricsSet.get(tableName); + if (!instance.getTableName().equals(tableName)) { + return false; + } + } + return true; + } + + /** + * This test is for changeMetricValue() Method and getMetricMap() + */ + @Test + public void testTableClientMetrics() { + Configuration conf = new Configuration(); + for (int i = 0; i < tableNames.length; i++) { + TableClientMetrics tableClientMetrics = new TableClientMetrics(tableNames[i], conf); + tableMetricsSet.put(tableNames[i], tableClientMetrics); + + tableClientMetrics.changeMetricValue(MUTATION_BATCH_SIZE, mutationBatchSizeCounter[i]); + tableClientMetrics.changeMetricValue(UPSERT_MUTATION_BYTES, upsertMutationBytesCounter[i]); + tableClientMetrics.changeMetricValue(UPSERT_MUTATION_SQL_COUNTER, + upsertMutationSqlCounter[i]); + tableClientMetrics.changeMetricValue(DELETE_MUTATION_BYTES, deleteMutationByesCounter[i]); + tableClientMetrics.changeMetricValue(DELETE_MUTATION_SQL_COUNTER, + deleteMutationSqlCounter[i]); + tableClientMetrics.changeMetricValue(MUTATION_SQL_COUNTER, mutationSqlCounter[i]); + tableClientMetrics.changeMetricValue(MUTATION_COMMIT_TIME, mutationSqlCommitTimeCounter[i]); + tableClientMetrics.changeMetricValue(TASK_END_TO_END_TIME, taskEndToEndTimeCounter[i]); + tableClientMetrics.changeMetricValue(COUNT_ROWS_SCANNED, countRowsScannedCounter[i]); + tableClientMetrics.changeMetricValue(QUERY_FAILED_COUNTER, queryFailedCounter[i]); + tableClientMetrics.changeMetricValue(QUERY_TIMEOUT_COUNTER, queryTimeOutCounter[i]); + tableClientMetrics.changeMetricValue(SCAN_BYTES, scanBytesCounter[i]); + tableClientMetrics.changeMetricValue(MetricType.SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER, + selectPointLookUpSuccessCounter[i]); + tableClientMetrics.changeMetricValue(SELECT_POINTLOOKUP_FAILED_SQL_COUNTER, + selectPointLookUpFailedCounter[i]); + tableClientMetrics.changeMetricValue(SELECT_SQL_QUERY_TIME, selectSqlQueryTimeCounter[i]); + tableClientMetrics.changeMetricValue(SELECT_SCAN_SUCCESS_SQL_COUNTER, + selectScanSuccessCounter[i]); + tableClientMetrics.changeMetricValue(SELECT_SCAN_FAILED_SQL_COUNTER, + selectScanFailedCounter[i]); + } + verifyMetricsFromTableClientMetrics(); + tableMetricsSet.clear(); + } + + /** + * This test is for getTableName() + */ + @Test + public void testTableClientMetricsforTableName() { + Configuration conf = new Configuration(); + for (int i = 0; i < tableNames.length; i++) { + TableClientMetrics tableClientMetrics = new TableClientMetrics(tableNames[i], conf); + tableMetricsSet.put(tableNames[i], tableClientMetrics); } + assertTrue(verifyTableName()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableHistogramsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableHistogramsTest.java index 2d0e52c6fff..ad460fcc783 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableHistogramsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableHistogramsTest.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -23,25 +23,25 @@ public class TableHistogramsTest { - @Test - public void testTableHistograms() { - String table = "TEST_TABLE"; - Configuration conf = new Configuration(); - TableHistograms tableHistograms = new TableHistograms(table, conf); - Assert.assertEquals(table, tableHistograms.getTableName()); - Assert.assertNotNull(tableHistograms.getUpsertLatencyHisto()); - Assert.assertNotNull(tableHistograms.getUpsertSizeHisto()); - Assert.assertNotNull(tableHistograms.getDeleteLatencyHisto()); - Assert.assertNotNull(tableHistograms.getDeleteSizeHisto()); - Assert.assertNotNull(tableHistograms.getQueryLatencyHisto()); - Assert.assertNotNull(tableHistograms.getQuerySizeHisto()); - Assert.assertNotNull(tableHistograms.getPointLookupLatencyHisto()); - Assert.assertNotNull(tableHistograms.getPointLookupSizeHisto()); - Assert.assertNotNull(tableHistograms.getRangeScanLatencyHisto()); - Assert.assertNotNull(tableHistograms.getRangeScanSizeHisto()); + @Test + public void testTableHistograms() { + String table = "TEST_TABLE"; + Configuration conf = new Configuration(); + TableHistograms tableHistograms = new TableHistograms(table, conf); + Assert.assertEquals(table, tableHistograms.getTableName()); + Assert.assertNotNull(tableHistograms.getUpsertLatencyHisto()); + Assert.assertNotNull(tableHistograms.getUpsertSizeHisto()); + Assert.assertNotNull(tableHistograms.getDeleteLatencyHisto()); + Assert.assertNotNull(tableHistograms.getDeleteSizeHisto()); + Assert.assertNotNull(tableHistograms.getQueryLatencyHisto()); + Assert.assertNotNull(tableHistograms.getQuerySizeHisto()); + Assert.assertNotNull(tableHistograms.getPointLookupLatencyHisto()); + Assert.assertNotNull(tableHistograms.getPointLookupSizeHisto()); + Assert.assertNotNull(tableHistograms.getRangeScanLatencyHisto()); + Assert.assertNotNull(tableHistograms.getRangeScanSizeHisto()); - Assert.assertEquals(5, tableHistograms.getTableLatencyHistogramsDistribution().size()); - Assert.assertEquals(5, tableHistograms.getTableSizeHistogramsDistribution().size()); - } + Assert.assertEquals(5, tableHistograms.getTableLatencyHistogramsDistribution().size()); + Assert.assertEquals(5, tableHistograms.getTableSizeHistogramsDistribution().size()); + } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableLevelMetricsTestData.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableLevelMetricsTestData.java index 10fae8f042b..8282feb496a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableLevelMetricsTestData.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableLevelMetricsTestData.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,160 +15,157 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.monitoring; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import static org.apache.phoenix.monitoring.MetricType.COUNT_ROWS_SCANNED; import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_BYTES; import static org.apache.phoenix.monitoring.MetricType.DELETE_MUTATION_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_SIZE; import static org.apache.phoenix.monitoring.MetricType.MUTATION_COMMIT_TIME; import static org.apache.phoenix.monitoring.MetricType.MUTATION_SQL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_FAILURES; +import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_SUCCESS; import static org.apache.phoenix.monitoring.MetricType.QUERY_FAILED_COUNTER; import static org.apache.phoenix.monitoring.MetricType.QUERY_TIMEOUT_COUNTER; import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES; -import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_FAILURES; -import static org.apache.phoenix.monitoring.MetricType.NUM_SYSTEM_TABLE_RPC_SUCCESS; import static org.apache.phoenix.monitoring.MetricType.SELECT_POINTLOOKUP_FAILED_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.SELECT_SCAN_FAILED_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.SELECT_SCAN_SUCCESS_SQL_COUNTER; import static org.apache.phoenix.monitoring.MetricType.SELECT_SQL_QUERY_TIME; import static org.apache.phoenix.monitoring.MetricType.TASK_END_TO_END_TIME; +import static org.apache.phoenix.monitoring.MetricType.TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS; import static org.apache.phoenix.monitoring.MetricType.UPSERT_MUTATION_BYTES; import static org.apache.phoenix.monitoring.MetricType.UPSERT_MUTATION_SQL_COUNTER; -import static org.apache.phoenix.monitoring.MetricType.TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + /** - * This class is used primarily to populate data and - * verification methods + * This class is used primarily to populate data and verification methods */ public class TableLevelMetricsTestData { - public static final String[] tableNames = { "T0001", "T0002", "T0003" }; - public static Map>[] tableMetricsMap = new Map[tableNames.length]; - public static final long[] mutationBatchSizeCounter = { 90, 100, 150 }; - public static final long[] upsertMutationBytesCounter = { 100, 200, 250 }; - public static final long[] upsertMutationSqlCounter = { 100, 200, 300 }; - public static final long[] deleteMutationByesCounter = { 100, 200, 150 }; - public static final long[] deleteMutationSqlCounter = { 100, 200, 140 }; - public static final long[] mutationSqlCounter = { 200, 400, 600 }; - public static final long[] mutationSqlCommitTimeCounter = { 150, 300, 100 }; - public static final long[] taskEndToEndTimeCounter = { 10, 20, 30 }; - public static final long[] countRowsScannedCounter = { 500, 600, 400 }; - public static final long[] queryFailedCounter = { 10, 20, 30 }; - public static final long[] queryTimeOutCounter = { 30, 40, 40 }; - public static final long[] scanBytesCounter = { 500, 600, 400 }; - public static final long[] selectPointLookUpFailedCounter = { 10, 29, 49 }; - public static final long[] selectSqlQueryTimeCounter = { 30, 40, 55 }; - public static final long[] selectPointLookUpSuccessCounter = { 10, 20, 55 }; - public static final long[] selectScanSuccessCounter = { 200000, 300000, 4444 }; - public static final long[] selectScanFailedCounter = { 1000000, 20000000, 3455 }; - public static final long[] numRpcSuccessCallsSystemCatalog = {200, 100, 300}; - public static final long[] numRpcFailureCallsSystemCatalog = {100, 200, 300}; - public static final long[] timeTakenForRpcCallsSystemCatalog = {500, 600, 370}; + public static final String[] tableNames = { "T0001", "T0002", "T0003" }; + public static Map>[] tableMetricsMap = new Map[tableNames.length]; + public static final long[] mutationBatchSizeCounter = { 90, 100, 150 }; + public static final long[] upsertMutationBytesCounter = { 100, 200, 250 }; + public static final long[] upsertMutationSqlCounter = { 100, 200, 300 }; + public static final long[] deleteMutationByesCounter = { 100, 200, 150 }; + public static final long[] deleteMutationSqlCounter = { 100, 200, 140 }; + public static final long[] mutationSqlCounter = { 200, 400, 600 }; + public static final long[] mutationSqlCommitTimeCounter = { 150, 300, 100 }; + public static final long[] taskEndToEndTimeCounter = { 10, 20, 30 }; + public static final long[] countRowsScannedCounter = { 500, 600, 400 }; + public static final long[] queryFailedCounter = { 10, 20, 30 }; + public static final long[] queryTimeOutCounter = { 30, 40, 40 }; + public static final long[] scanBytesCounter = { 500, 600, 400 }; + public static final long[] selectPointLookUpFailedCounter = { 10, 29, 49 }; + public static final long[] selectSqlQueryTimeCounter = { 30, 40, 55 }; + public static final long[] selectPointLookUpSuccessCounter = { 10, 20, 55 }; + public static final long[] selectScanSuccessCounter = { 200000, 300000, 4444 }; + public static final long[] selectScanFailedCounter = { 1000000, 20000000, 3455 }; + public static final long[] numRpcSuccessCallsSystemCatalog = { 200, 100, 300 }; + public static final long[] numRpcFailureCallsSystemCatalog = { 100, 200, 300 }; + public static final long[] timeTakenForRpcCallsSystemCatalog = { 500, 600, 370 }; - public static void populateMetrics() { - for (int i = 0; i < tableMetricsMap.length; i++) { - tableMetricsMap[i] = new HashMap<>(); - } - for (int i = 0; i < tableNames.length; i++) { - Map metrics = new HashMap<>(); - metrics.put(MUTATION_BATCH_SIZE, mutationBatchSizeCounter[i]); - metrics.put(UPSERT_MUTATION_BYTES, upsertMutationBytesCounter[i]); - metrics.put(UPSERT_MUTATION_SQL_COUNTER, upsertMutationSqlCounter[i]); - metrics.put(DELETE_MUTATION_BYTES, deleteMutationByesCounter[i]); - metrics.put(DELETE_MUTATION_SQL_COUNTER, deleteMutationSqlCounter[i]); - metrics.put(MUTATION_SQL_COUNTER, mutationSqlCounter[i]); - metrics.put(MUTATION_COMMIT_TIME, mutationSqlCommitTimeCounter[i]); - metrics.put(TASK_END_TO_END_TIME, taskEndToEndTimeCounter[i]); - metrics.put(COUNT_ROWS_SCANNED, countRowsScannedCounter[i]); - metrics.put(QUERY_FAILED_COUNTER, queryFailedCounter[i]); - metrics.put(QUERY_TIMEOUT_COUNTER, queryTimeOutCounter[i]); - metrics.put(SCAN_BYTES, scanBytesCounter[i]); - metrics.put(MetricType.SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER, - selectPointLookUpSuccessCounter[i]); - metrics.put(SELECT_POINTLOOKUP_FAILED_SQL_COUNTER, - selectPointLookUpFailedCounter[i]); - metrics.put(MetricType.SELECT_SQL_QUERY_TIME, selectSqlQueryTimeCounter[i]); - metrics.put(SELECT_SCAN_SUCCESS_SQL_COUNTER, selectScanSuccessCounter[i]); - metrics.put(MetricType.SELECT_SCAN_FAILED_SQL_COUNTER, selectScanFailedCounter[i]); - tableMetricsMap[i].put(tableNames[i], metrics); - metrics.put(NUM_SYSTEM_TABLE_RPC_SUCCESS, numRpcSuccessCallsSystemCatalog[i]); - metrics.put(NUM_SYSTEM_TABLE_RPC_FAILURES, numRpcFailureCallsSystemCatalog[i]); - metrics.put(TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, timeTakenForRpcCallsSystemCatalog[i]); - } + public static void populateMetrics() { + for (int i = 0; i < tableMetricsMap.length; i++) { + tableMetricsMap[i] = new HashMap<>(); } + for (int i = 0; i < tableNames.length; i++) { + Map metrics = new HashMap<>(); + metrics.put(MUTATION_BATCH_SIZE, mutationBatchSizeCounter[i]); + metrics.put(UPSERT_MUTATION_BYTES, upsertMutationBytesCounter[i]); + metrics.put(UPSERT_MUTATION_SQL_COUNTER, upsertMutationSqlCounter[i]); + metrics.put(DELETE_MUTATION_BYTES, deleteMutationByesCounter[i]); + metrics.put(DELETE_MUTATION_SQL_COUNTER, deleteMutationSqlCounter[i]); + metrics.put(MUTATION_SQL_COUNTER, mutationSqlCounter[i]); + metrics.put(MUTATION_COMMIT_TIME, mutationSqlCommitTimeCounter[i]); + metrics.put(TASK_END_TO_END_TIME, taskEndToEndTimeCounter[i]); + metrics.put(COUNT_ROWS_SCANNED, countRowsScannedCounter[i]); + metrics.put(QUERY_FAILED_COUNTER, queryFailedCounter[i]); + metrics.put(QUERY_TIMEOUT_COUNTER, queryTimeOutCounter[i]); + metrics.put(SCAN_BYTES, scanBytesCounter[i]); + metrics.put(MetricType.SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER, + selectPointLookUpSuccessCounter[i]); + metrics.put(SELECT_POINTLOOKUP_FAILED_SQL_COUNTER, selectPointLookUpFailedCounter[i]); + metrics.put(MetricType.SELECT_SQL_QUERY_TIME, selectSqlQueryTimeCounter[i]); + metrics.put(SELECT_SCAN_SUCCESS_SQL_COUNTER, selectScanSuccessCounter[i]); + metrics.put(MetricType.SELECT_SCAN_FAILED_SQL_COUNTER, selectScanFailedCounter[i]); + tableMetricsMap[i].put(tableNames[i], metrics); + metrics.put(NUM_SYSTEM_TABLE_RPC_SUCCESS, numRpcSuccessCallsSystemCatalog[i]); + metrics.put(NUM_SYSTEM_TABLE_RPC_FAILURES, numRpcFailureCallsSystemCatalog[i]); + metrics.put(TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, timeTakenForRpcCallsSystemCatalog[i]); + } + } - public void verifyMetricsInjection(int noOfTables) { - Map> map = TableMetricsManager.getTableMetricsMethod(); - assertFalse(map == null || map.isEmpty()); - for (int i = 0; i < noOfTables; i++) { - System.out.println("CURRENTLY ON: " + tableNames[i]); - assertTrue(map.containsKey(tableNames[i])); - List tableMetric = map.get(tableNames[i]); - for (PhoenixTableMetric metric : tableMetric) { - if (metric.getMetricType().equals(MUTATION_BATCH_SIZE)) { - assertEquals(mutationBatchSizeCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(UPSERT_MUTATION_BYTES)) { - assertEquals(upsertMutationBytesCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(UPSERT_MUTATION_SQL_COUNTER)) { - assertEquals(upsertMutationSqlCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(DELETE_MUTATION_BYTES)) { - assertEquals(deleteMutationByesCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(DELETE_MUTATION_SQL_COUNTER)) { - assertEquals(deleteMutationSqlCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(MUTATION_SQL_COUNTER)) { - assertEquals(mutationSqlCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(MUTATION_COMMIT_TIME)) { - assertEquals(mutationSqlCommitTimeCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(TASK_END_TO_END_TIME)) { - assertEquals(taskEndToEndTimeCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(COUNT_ROWS_SCANNED)) { - assertEquals(countRowsScannedCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(QUERY_FAILED_COUNTER)) { - assertEquals(queryFailedCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(QUERY_TIMEOUT_COUNTER)) { - assertEquals(queryTimeOutCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SCAN_BYTES)) { - assertEquals(scanBytesCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER)) { - assertEquals(selectPointLookUpSuccessCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_POINTLOOKUP_FAILED_SQL_COUNTER)) { - assertEquals(selectPointLookUpFailedCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_SQL_QUERY_TIME)) { - assertEquals(selectSqlQueryTimeCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_SCAN_FAILED_SQL_COUNTER)) { - assertEquals(selectScanFailedCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(SELECT_SCAN_SUCCESS_SQL_COUNTER)) { - assertEquals(selectScanSuccessCounter[i], metric.getValue()); - } - } + public void verifyMetricsInjection(int noOfTables) { + Map> map = TableMetricsManager.getTableMetricsMethod(); + assertFalse(map == null || map.isEmpty()); + for (int i = 0; i < noOfTables; i++) { + System.out.println("CURRENTLY ON: " + tableNames[i]); + assertTrue(map.containsKey(tableNames[i])); + List tableMetric = map.get(tableNames[i]); + for (PhoenixTableMetric metric : tableMetric) { + if (metric.getMetricType().equals(MUTATION_BATCH_SIZE)) { + assertEquals(mutationBatchSizeCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(UPSERT_MUTATION_BYTES)) { + assertEquals(upsertMutationBytesCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(UPSERT_MUTATION_SQL_COUNTER)) { + assertEquals(upsertMutationSqlCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(DELETE_MUTATION_BYTES)) { + assertEquals(deleteMutationByesCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(DELETE_MUTATION_SQL_COUNTER)) { + assertEquals(deleteMutationSqlCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(MUTATION_SQL_COUNTER)) { + assertEquals(mutationSqlCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(MUTATION_COMMIT_TIME)) { + assertEquals(mutationSqlCommitTimeCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(TASK_END_TO_END_TIME)) { + assertEquals(taskEndToEndTimeCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(COUNT_ROWS_SCANNED)) { + assertEquals(countRowsScannedCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(QUERY_FAILED_COUNTER)) { + assertEquals(queryFailedCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(QUERY_TIMEOUT_COUNTER)) { + assertEquals(queryTimeOutCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SCAN_BYTES)) { + assertEquals(scanBytesCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER)) { + assertEquals(selectPointLookUpSuccessCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_POINTLOOKUP_FAILED_SQL_COUNTER)) { + assertEquals(selectPointLookUpFailedCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_SQL_QUERY_TIME)) { + assertEquals(selectSqlQueryTimeCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_SCAN_FAILED_SQL_COUNTER)) { + assertEquals(selectScanFailedCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(SELECT_SCAN_SUCCESS_SQL_COUNTER)) { + assertEquals(selectScanSuccessCounter[i], metric.getValue()); } + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableMetricsManagerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableMetricsManagerTest.java index 54f4c6c6137..6b397f98a7e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableMetricsManagerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/TableMetricsManagerTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,428 +15,458 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.monitoring; -import org.apache.hadoop.conf.Configuration; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; - import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.countRowsScannedCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.deleteMutationByesCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.deleteMutationSqlCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.mutationBatchSizeCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.mutationSqlCommitTimeCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.mutationSqlCounter; -import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.queryFailedCounter; -import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.queryTimeOutCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.numRpcFailureCallsSystemCatalog; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.numRpcSuccessCallsSystemCatalog; +import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.populateMetrics; +import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.queryFailedCounter; +import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.queryTimeOutCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.scanBytesCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.selectPointLookUpFailedCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.selectPointLookUpSuccessCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.selectScanFailedCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.selectScanSuccessCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.selectSqlQueryTimeCounter; -import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.tableNames; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.tableMetricsMap; -import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.populateMetrics; -import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.timeTakenForRpcCallsSystemCatalog; +import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.tableNames; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.taskEndToEndTimeCounter; +import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.timeTakenForRpcCallsSystemCatalog; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.upsertMutationBytesCounter; import static org.apache.phoenix.monitoring.TableLevelMetricsTestData.upsertMutationSqlCounter; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.query.QueryServicesOptions; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + /** - * This method is for UT TableMetricManager class. - * This class exposes 4 static functions - * 1.pushMetricsFromConnInstanceMethod - * 2.getTableMetricsMethod - * 3.clearTableLevelMetricsMethod + * This method is for UT TableMetricManager class. This class exposes 4 static functions + * 1.pushMetricsFromConnInstanceMethod 2.getTableMetricsMethod 3.clearTableLevelMetricsMethod * 4.updateMetricsMethod */ public class TableMetricsManagerTest { - public boolean verifyMetricsReset(){ - Map>map = TableMetricsManager.getTableMetricsMethod(); - return map != null && map.isEmpty(); - } - - private static class PushMetrics implements Runnable { - private final Map> map; + public boolean verifyMetricsReset() { + Map> map = TableMetricsManager.getTableMetricsMethod(); + return map != null && map.isEmpty(); + } - public PushMetrics(Map> map) { - this.map = map; - } + private static class PushMetrics implements Runnable { + private final Map> map; - @Override public void run() { - TableMetricsManager.pushMetricsFromConnInstanceMethod(map); - } + public PushMetrics(Map> map) { + this.map = map; } - public boolean verifyTableNamesExists(String tableName){ - Map>map = TableMetricsManager.getTableMetricsMethod(); - return map != null && map.containsKey(tableName); + @Override + public void run() { + TableMetricsManager.pushMetricsFromConnInstanceMethod(map); } + } - /** - * Injecting data parallely to TableMetricsManager using pushMetricsFromConnInstanceMethod() - */ - @Test - public void testVerifyTableLevelMetricsMutilpleThreads() throws Exception { - QueryServicesOptions options = QueryServicesOptions.withDefaults(); - options.setTableLevelMetricsEnabled(); - String tableNamesList = tableNames[0] + "," + tableNames[1] + "," + tableNames[2]; - options.setAllowedListForTableLevelMetrics(tableNamesList); - TableMetricsManager tableMetricsManager = new TableMetricsManager(options); - TableMetricsManager.setInstance(tableMetricsManager); - TableLevelMetricsTestData testData = new TableLevelMetricsTestData(); - populateMetrics(); - - ExecutorService executorService = - Executors.newFixedThreadPool(tableNames.length, new ThreadFactory() { - @Override public Thread newThread(Runnable r) { - Thread t = Executors.defaultThreadFactory().newThread(r); - t.setDaemon(true); - t.setPriority(Thread.MIN_PRIORITY); - return t; - } - }); - List> futureList = Lists.newArrayListWithExpectedSize(tableNames.length); - for (int i = 0; i < tableNames.length; ++i) { - futureList.add(executorService.submit(new PushMetrics(tableMetricsMap[i]))); - } - executorService.shutdown(); - executorService.awaitTermination(10, TimeUnit.SECONDS); - testData.verifyMetricsInjection(tableNames.length); - TableMetricsManager.clearTableLevelMetricsMethod(); - assertTrue(verifyMetricsReset()); - } + public boolean verifyTableNamesExists(String tableName) { + Map> map = TableMetricsManager.getTableMetricsMethod(); + return map != null && map.containsKey(tableName); + } - /** - * test for pushMetricsFromConnInstanceMethod() , getTableMetricsMethod() - * and clearTableLevelMetrics(); - */ - @Test - public void testTableMetricsForPushMetricsFromConnInstanceMethod() { - QueryServicesOptions options = QueryServicesOptions.withDefaults(); - options.setTableLevelMetricsEnabled(); - String tableNamesList = tableNames[0] + "," + tableNames[1] + "," + tableNames[2]; - options.setAllowedListForTableLevelMetrics(tableNamesList); - TableMetricsManager tableMetricsManager = new TableMetricsManager(options); - TableMetricsManager.setInstance(tableMetricsManager); - - TableLevelMetricsTestData testData = new TableLevelMetricsTestData(); - populateMetrics(); - for(int i = 0; i < tableNames.length ; i++){ - TableMetricsManager.pushMetricsFromConnInstanceMethod(tableMetricsMap[i]); + /** + * Injecting data parallely to TableMetricsManager using pushMetricsFromConnInstanceMethod() + */ + @Test + public void testVerifyTableLevelMetricsMutilpleThreads() throws Exception { + QueryServicesOptions options = QueryServicesOptions.withDefaults(); + options.setTableLevelMetricsEnabled(); + String tableNamesList = tableNames[0] + "," + tableNames[1] + "," + tableNames[2]; + options.setAllowedListForTableLevelMetrics(tableNamesList); + TableMetricsManager tableMetricsManager = new TableMetricsManager(options); + TableMetricsManager.setInstance(tableMetricsManager); + TableLevelMetricsTestData testData = new TableLevelMetricsTestData(); + populateMetrics(); + + ExecutorService executorService = + Executors.newFixedThreadPool(tableNames.length, new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setDaemon(true); + t.setPriority(Thread.MIN_PRIORITY); + return t; } - testData.verifyMetricsInjection(3); - TableMetricsManager.clearTableLevelMetricsMethod(); - assertTrue(verifyMetricsReset()); + }); + List> futureList = Lists.newArrayListWithExpectedSize(tableNames.length); + for (int i = 0; i < tableNames.length; ++i) { + futureList.add(executorService.submit(new PushMetrics(tableMetricsMap[i]))); } - - /** - * test for updateMetricsMethod() , getTableMetricsMethod() - * and clearTableLevelMetrics(); - */ - @Test - public void testTableMetricsForUpdateMetricsMethod() { - - QueryServicesOptions options = QueryServicesOptions.withDefaults(); - options.setTableLevelMetricsEnabled(); - TableMetricsManager tableMetricsManager = new TableMetricsManager(options); - TableMetricsManager.setInstance(tableMetricsManager); - - TableLevelMetricsTestData testData = new TableLevelMetricsTestData(); - for(int i = 0; i < tableNames.length; i++) { - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.MUTATION_BATCH_SIZE, mutationBatchSizeCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.UPSERT_MUTATION_SQL_COUNTER, upsertMutationSqlCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.UPSERT_MUTATION_BYTES, upsertMutationBytesCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.DELETE_MUTATION_SQL_COUNTER, deleteMutationSqlCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.DELETE_MUTATION_BYTES, deleteMutationByesCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.MUTATION_SQL_COUNTER, mutationSqlCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.MUTATION_COMMIT_TIME, mutationSqlCommitTimeCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.TASK_END_TO_END_TIME, taskEndToEndTimeCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.COUNT_ROWS_SCANNED, countRowsScannedCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.QUERY_FAILED_COUNTER, queryFailedCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.QUERY_TIMEOUT_COUNTER, queryTimeOutCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.SCAN_BYTES, scanBytesCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER, selectPointLookUpSuccessCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.SELECT_POINTLOOKUP_FAILED_SQL_COUNTER, selectPointLookUpFailedCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.SELECT_SQL_QUERY_TIME, selectSqlQueryTimeCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.SELECT_SCAN_FAILED_SQL_COUNTER, selectScanFailedCounter[i]); - TableMetricsManager.updateMetricsMethod(tableNames[i],MetricType.SELECT_SCAN_SUCCESS_SQL_COUNTER, selectScanSuccessCounter[i]); - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableNames[i], - MetricType.NUM_SYSTEM_TABLE_RPC_SUCCESS, numRpcSuccessCallsSystemCatalog[i]); - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableNames[i], - MetricType.NUM_SYSTEM_TABLE_RPC_FAILURES, numRpcFailureCallsSystemCatalog[i]); - TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableNames[i], - MetricType.TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, timeTakenForRpcCallsSystemCatalog[i]); - } - testData.verifyMetricsInjection(3); - TableMetricsManager.clearTableLevelMetricsMethod(); - assertTrue(verifyMetricsReset()); + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.SECONDS); + testData.verifyMetricsInjection(tableNames.length); + TableMetricsManager.clearTableLevelMetricsMethod(); + assertTrue(verifyMetricsReset()); + } + + /** + * test for pushMetricsFromConnInstanceMethod() , getTableMetricsMethod() and + * clearTableLevelMetrics(); + */ + @Test + public void testTableMetricsForPushMetricsFromConnInstanceMethod() { + QueryServicesOptions options = QueryServicesOptions.withDefaults(); + options.setTableLevelMetricsEnabled(); + String tableNamesList = tableNames[0] + "," + tableNames[1] + "," + tableNames[2]; + options.setAllowedListForTableLevelMetrics(tableNamesList); + TableMetricsManager tableMetricsManager = new TableMetricsManager(options); + TableMetricsManager.setInstance(tableMetricsManager); + + TableLevelMetricsTestData testData = new TableLevelMetricsTestData(); + populateMetrics(); + for (int i = 0; i < tableNames.length; i++) { + TableMetricsManager.pushMetricsFromConnInstanceMethod(tableMetricsMap[i]); } + testData.verifyMetricsInjection(3); + TableMetricsManager.clearTableLevelMetricsMethod(); + assertTrue(verifyMetricsReset()); + } + /** + * test for updateMetricsMethod() , getTableMetricsMethod() and clearTableLevelMetrics(); + */ + @Test + public void testTableMetricsForUpdateMetricsMethod() { + + QueryServicesOptions options = QueryServicesOptions.withDefaults(); + options.setTableLevelMetricsEnabled(); + TableMetricsManager tableMetricsManager = new TableMetricsManager(options); + TableMetricsManager.setInstance(tableMetricsManager); + + TableLevelMetricsTestData testData = new TableLevelMetricsTestData(); + for (int i = 0; i < tableNames.length; i++) { + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.MUTATION_BATCH_SIZE, + mutationBatchSizeCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.UPSERT_MUTATION_SQL_COUNTER, + upsertMutationSqlCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.UPSERT_MUTATION_BYTES, + upsertMutationBytesCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.DELETE_MUTATION_SQL_COUNTER, + deleteMutationSqlCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.DELETE_MUTATION_BYTES, + deleteMutationByesCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.MUTATION_SQL_COUNTER, + mutationSqlCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.MUTATION_COMMIT_TIME, + mutationSqlCommitTimeCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.TASK_END_TO_END_TIME, + taskEndToEndTimeCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.COUNT_ROWS_SCANNED, + countRowsScannedCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.QUERY_FAILED_COUNTER, + queryFailedCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.QUERY_TIMEOUT_COUNTER, + queryTimeOutCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.SCAN_BYTES, + scanBytesCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], + MetricType.SELECT_POINTLOOKUP_SUCCESS_SQL_COUNTER, selectPointLookUpSuccessCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], + MetricType.SELECT_POINTLOOKUP_FAILED_SQL_COUNTER, selectPointLookUpFailedCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], MetricType.SELECT_SQL_QUERY_TIME, + selectSqlQueryTimeCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], + MetricType.SELECT_SCAN_FAILED_SQL_COUNTER, selectScanFailedCounter[i]); + TableMetricsManager.updateMetricsMethod(tableNames[i], + MetricType.SELECT_SCAN_SUCCESS_SQL_COUNTER, selectScanSuccessCounter[i]); + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableNames[i], + MetricType.NUM_SYSTEM_TABLE_RPC_SUCCESS, numRpcSuccessCallsSystemCatalog[i]); + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableNames[i], + MetricType.NUM_SYSTEM_TABLE_RPC_FAILURES, numRpcFailureCallsSystemCatalog[i]); + TableMetricsManager.updateMetricsForSystemCatalogTableMethod(tableNames[i], + MetricType.TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS, timeTakenForRpcCallsSystemCatalog[i]); + } + testData.verifyMetricsInjection(3); + TableMetricsManager.clearTableLevelMetricsMethod(); + assertTrue(verifyMetricsReset()); + } + + // This test puts T0001, T0002 in allowed list and verifies the existence of metrics + // and blocking the T0003. + @Test + public void testTableMetricsForPushMetricsFromConnInstanceMethodWithAllowedTables() { + QueryServicesOptions options = QueryServicesOptions.withDefaults(); + options.setTableLevelMetricsEnabled(); + String tableNamesList = tableNames[0] + "," + tableNames[1]; + options.setAllowedListForTableLevelMetrics(tableNamesList); + + TableMetricsManager tableMetricsManager = new TableMetricsManager(options); + TableMetricsManager.setInstance(tableMetricsManager); + + TableLevelMetricsTestData testData = new TableLevelMetricsTestData(); + populateMetrics(); + for (int i = 0; i < tableNames.length; i++) { + TableMetricsManager.pushMetricsFromConnInstanceMethod(tableMetricsMap[i]); + } + testData.verifyMetricsInjection(2); + assertFalse(verifyTableNamesExists(tableNames[2])); + } - //This test puts T0001, T0002 in allowed list and verifies the existence of metrics - //and blocking the T0003. - @Test - public void testTableMetricsForPushMetricsFromConnInstanceMethodWithAllowedTables() { - QueryServicesOptions options = QueryServicesOptions.withDefaults(); - options.setTableLevelMetricsEnabled(); - String tableNamesList = tableNames[0] + "," + tableNames[1]; - options.setAllowedListForTableLevelMetrics(tableNamesList); - - TableMetricsManager tableMetricsManager = new TableMetricsManager(options); - TableMetricsManager.setInstance(tableMetricsManager); - - TableLevelMetricsTestData testData = new TableLevelMetricsTestData(); - populateMetrics(); - for(int i = 0; i < tableNames.length ; i++){ - TableMetricsManager.pushMetricsFromConnInstanceMethod(tableMetricsMap[i]); - } - - testData.verifyMetricsInjection(2); - assertFalse(verifyTableNamesExists(tableNames[2])); + /* + * Tests histogram metrics for upsert mutations. + */ + @Test + public void testHistogramMetricsForUpsertMutations() { + String tableName = "TEST-TABLE"; + Configuration conf = new Configuration(); + conf.set(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES, "2,5,8"); + conf.set(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES, "10, 100, 1000"); + + QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); + Mockito.doReturn(true).when(mockOptions).isTableLevelMetricsEnabled(); + Mockito.doReturn(tableName).when(mockOptions).getAllowedListTableNames(); + Mockito.doReturn(conf).when(mockOptions).getConfiguration(); + TableMetricsManager tableMetricsManager = new TableMetricsManager(mockOptions); + TableMetricsManager.setInstance(tableMetricsManager); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 1, true); + MutationMetricQueue.MutationMetric metric = new MutationMetricQueue.MutationMetric(0L, 5L, 0L, + 0L, 0L, 0L, 0L, 1L, 0L, 5L, 0L, 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), true); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 2, true); + metric = new MutationMetricQueue.MutationMetric(0L, 10L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 10L, 0L, + 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), true); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 4, true); + metric = new MutationMetricQueue.MutationMetric(0L, 50L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 50L, 0L, + 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), true); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 5, true); + metric = new MutationMetricQueue.MutationMetric(0L, 100L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 100L, 0L, + 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), true); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 6, true); + metric = new MutationMetricQueue.MutationMetric(0L, 500L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 500L, 0L, + 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), true); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 8, true); + metric = new MutationMetricQueue.MutationMetric(0L, 1000L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1000L, + 0L, 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), true); + + // Generate distribution map from histogram snapshots. + LatencyHistogram latencyHistogram = + TableMetricsManager.getUpsertLatencyHistogramForTable(tableName); + SizeHistogram sizeHistogram = TableMetricsManager.getUpsertSizeHistogramForTable(tableName); + + Map latencyMap = + latencyHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); + Map sizeMap = + sizeHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); + for (Long count : latencyMap.values()) { + Assert.assertEquals(new Long(2), count); + } + for (Long count : sizeMap.values()) { + Assert.assertEquals(new Long(2), count); } + } - /* - Tests histogram metrics for upsert mutations. + /* + * Tests histogram metrics for delete mutations. */ - @Test - public void testHistogramMetricsForUpsertMutations() { - String tableName = "TEST-TABLE"; - Configuration conf = new Configuration(); - conf.set(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES, "2,5,8"); - conf.set(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES, "10, 100, 1000"); - - QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); - Mockito.doReturn(true).when(mockOptions).isTableLevelMetricsEnabled(); - Mockito.doReturn(tableName).when(mockOptions).getAllowedListTableNames(); - Mockito.doReturn(conf).when(mockOptions).getConfiguration(); - TableMetricsManager tableMetricsManager = new TableMetricsManager(mockOptions); - TableMetricsManager.setInstance(tableMetricsManager); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 1, true); - MutationMetricQueue.MutationMetric metric = new MutationMetricQueue.MutationMetric( - 0L, 5L, 0L, 0L, 0L,0L, - 0L, 1L, 0L, 5L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), true); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 2, true); - metric = new MutationMetricQueue.MutationMetric(0L, 10L, 0L, 0L, 0L,0L, - 0L, 1L, 0L, 10L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), true); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 4, true); - metric = new MutationMetricQueue.MutationMetric(0L, 50L, 0L, 0L, 0L,0L, - 0L, 1L, 0L, 50L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), true); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 5, true); - metric = new MutationMetricQueue.MutationMetric(0L, 100L, 0L, 0L, 0L,0L, - 0L, 1L, 0L, 100L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), true); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 6, true); - metric = new MutationMetricQueue.MutationMetric(0L, 500L, 0L, 0L, 0L,0L, - 0L, 1L, 0L, 500L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), true); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 8, true); - metric = new MutationMetricQueue.MutationMetric(0L, 1000L, 0L, 0L, 0L,0L, - 0L, 1L, 0L, 1000L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), true); - - - // Generate distribution map from histogram snapshots. - LatencyHistogram latencyHistogram = - TableMetricsManager.getUpsertLatencyHistogramForTable(tableName); - SizeHistogram sizeHistogram = TableMetricsManager.getUpsertSizeHistogramForTable(tableName); - - Map latencyMap = latencyHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); - Map sizeMap = sizeHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); - for (Long count: latencyMap.values()) { - Assert.assertEquals(new Long(2), count); - } - for (Long count: sizeMap.values()) { - Assert.assertEquals(new Long(2), count); - } + @Test + public void testHistogramMetricsForDeleteMutations() { + String tableName = "TEST-TABLE"; + Configuration conf = new Configuration(); + conf.set(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES, "2,5,8"); + conf.set(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES, "10, 100, 1000"); + + QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); + Mockito.doReturn(true).when(mockOptions).isTableLevelMetricsEnabled(); + Mockito.doReturn(tableName).when(mockOptions).getAllowedListTableNames(); + Mockito.doReturn(conf).when(mockOptions).getConfiguration(); + TableMetricsManager tableMetricsManager = new TableMetricsManager(mockOptions); + TableMetricsManager.setInstance(tableMetricsManager); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 1, false); + MutationMetricQueue.MutationMetric metric = new MutationMetricQueue.MutationMetric(0L, 0L, 5L, + 0L, 0L, 0L, 0L, 0L, 1L, 5L, 0L, 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), false); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 2, false); + metric = new MutationMetricQueue.MutationMetric(0L, 0L, 10L, 0L, 0L, 0L, 0L, 0L, 1L, 10L, 0L, + 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), false); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 4, false); + metric = new MutationMetricQueue.MutationMetric(0L, 0L, 50L, 0L, 0L, 0L, 0L, 0L, 1L, 50L, 0L, + 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), false); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 5, false); + metric = new MutationMetricQueue.MutationMetric(0L, 0L, 100L, 0L, 0L, 0L, 0L, 0L, 1L, 100L, 0L, + 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), false); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 6, false); + metric = new MutationMetricQueue.MutationMetric(0L, 0L, 500L, 0L, 0L, 0L, 0L, 0L, 1L, 500L, 0L, + 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), false); + + TableMetricsManager.updateLatencyHistogramForMutations(tableName, 8, false); + metric = new MutationMetricQueue.MutationMetric(0L, 0L, 1000L, 0L, 0L, 0L, 0L, 0L, 1L, 1000L, + 0L, 0L, 0L, 0L, 0L); + TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, + metric.getTotalMutationsSizeBytes().getValue(), false); + + // Generate distribution map from histogram snapshots. + LatencyHistogram latencyHistogram = + TableMetricsManager.getDeleteLatencyHistogramForTable(tableName); + SizeHistogram sizeHistogram = TableMetricsManager.getDeleteSizeHistogramForTable(tableName); + + Map latencyMap = + latencyHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); + Map sizeMap = + sizeHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); + for (Long count : latencyMap.values()) { + Assert.assertEquals(new Long(2), count); } - - /* - Tests histogram metrics for delete mutations. - */ - @Test - public void testHistogramMetricsForDeleteMutations() { - String tableName = "TEST-TABLE"; - Configuration conf = new Configuration(); - conf.set(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES, "2,5,8"); - conf.set(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES, "10, 100, 1000"); - - QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); - Mockito.doReturn(true).when(mockOptions).isTableLevelMetricsEnabled(); - Mockito.doReturn(tableName).when(mockOptions).getAllowedListTableNames(); - Mockito.doReturn(conf).when(mockOptions).getConfiguration(); - TableMetricsManager tableMetricsManager = new TableMetricsManager(mockOptions); - TableMetricsManager.setInstance(tableMetricsManager); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 1, false); - MutationMetricQueue.MutationMetric metric = new MutationMetricQueue.MutationMetric( - 0L, 0L, 5L, 0L, 0L, 0L, - 0L, 0L, 1L, 5L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), false); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 2, false); - metric = new MutationMetricQueue.MutationMetric(0L, 0L, 10L, 0L, 0L, 0L, - 0L, 0L, 1L, 10L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), false); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 4, false); - metric = new MutationMetricQueue.MutationMetric(0L, 0L, 50L, 0L, 0L, 0L, - 0L, 0L, 1L, 50L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), false); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 5,false); - metric = new MutationMetricQueue.MutationMetric(0L, 0L, 100L, 0L, 0L, 0L, - 0L, 0L, 1L, 100L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), false); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 6,false); - metric = new MutationMetricQueue.MutationMetric(0L, 0L, 500L, 0L, 0L, 0L, - 0L, 0L, 1L, 500L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), false); - - TableMetricsManager.updateLatencyHistogramForMutations(tableName, 8, false); - metric = new MutationMetricQueue.MutationMetric(0L, 0L, 1000L, 0L, 0L, 0L, - 0L, 0L, 1L, 1000L, 0L, 0L, 0L, 0L, 0L); - TableMetricsManager.updateSizeHistogramMetricsForMutations(tableName, metric.getTotalMutationsSizeBytes().getValue(), false); - - - // Generate distribution map from histogram snapshots. - LatencyHistogram latencyHistogram = - TableMetricsManager.getDeleteLatencyHistogramForTable(tableName); - SizeHistogram sizeHistogram = TableMetricsManager.getDeleteSizeHistogramForTable(tableName); - - Map latencyMap = latencyHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); - Map sizeMap = sizeHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); - for (Long count: latencyMap.values()) { - Assert.assertEquals(new Long(2), count); - } - for (Long count: sizeMap.values()) { - Assert.assertEquals(new Long(2), count); - } + for (Long count : sizeMap.values()) { + Assert.assertEquals(new Long(2), count); } + } - /* - Tests histogram metrics for select query, point lookup query and range scan query. - */ - @Test - public void testHistogramMetricsForQuery() { - String tableName = "TEST-TABLE"; - Configuration conf = new Configuration(); - conf.set(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES, "2,5,8"); - conf.set(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES, "10, 100, 1000"); - - QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); - Mockito.doReturn(true).when(mockOptions).isTableLevelMetricsEnabled(); - Mockito.doReturn(tableName).when(mockOptions).getAllowedListTableNames(); - Mockito.doReturn(conf).when(mockOptions).getConfiguration(); - TableMetricsManager tableMetricsManager = new TableMetricsManager(mockOptions); - TableMetricsManager.setInstance(tableMetricsManager); - - //Generate 2 read metrics in each bucket, one with point lookup and other with range scan. - TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 1, true); - TableMetricsManager.updateHistogramMetricsForQueryScanBytes(5l, tableName, true); - - TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 2, false); - TableMetricsManager.updateHistogramMetricsForQueryScanBytes(10l, tableName, false); - - TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 4, true); - TableMetricsManager.updateHistogramMetricsForQueryScanBytes(50l, tableName, true); - - TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 5, false); - TableMetricsManager.updateHistogramMetricsForQueryScanBytes(100l, tableName, false); - - TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 7, true); - TableMetricsManager.updateHistogramMetricsForQueryScanBytes(500l, tableName, true); - - TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 8, false); - TableMetricsManager.updateHistogramMetricsForQueryScanBytes(1000l, tableName, false); - - // Generate distribution map from histogram snapshots. - LatencyHistogram latencyHistogram = - TableMetricsManager.getQueryLatencyHistogramForTable(tableName); - SizeHistogram sizeHistogram = TableMetricsManager.getQuerySizeHistogramForTable(tableName); - - Map latencyMap = latencyHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); - Map sizeMap = sizeHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); - for (Long count: latencyMap.values()) { - Assert.assertEquals(new Long(2), count); - } - for (Long count: sizeMap.values()) { - Assert.assertEquals(new Long(2), count); - } - - // Verify there is 1 entry in each bucket for point lookup query. - LatencyHistogram pointLookupLtHisto = - TableMetricsManager.getPointLookupLatencyHistogramForTable(tableName); - SizeHistogram pointLookupSizeHisto = - TableMetricsManager.getPointLookupSizeHistogramForTable(tableName); - - Map pointLookupLtMap = pointLookupLtHisto.getRangeHistogramDistribution().getRangeDistributionMap(); - Map pointLookupSizeMap = pointLookupSizeHisto.getRangeHistogramDistribution().getRangeDistributionMap(); - for (Long count: pointLookupLtMap.values()) { - Assert.assertEquals(new Long(1), count); - } - for (Long count: pointLookupSizeMap.values()) { - Assert.assertEquals(new Long(1), count); - } + /* + * Tests histogram metrics for select query, point lookup query and range scan query. + */ + @Test + public void testHistogramMetricsForQuery() { + String tableName = "TEST-TABLE"; + Configuration conf = new Configuration(); + conf.set(QueryServices.PHOENIX_HISTOGRAM_LATENCY_RANGES, "2,5,8"); + conf.set(QueryServices.PHOENIX_HISTOGRAM_SIZE_RANGES, "10, 100, 1000"); + + QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); + Mockito.doReturn(true).when(mockOptions).isTableLevelMetricsEnabled(); + Mockito.doReturn(tableName).when(mockOptions).getAllowedListTableNames(); + Mockito.doReturn(conf).when(mockOptions).getConfiguration(); + TableMetricsManager tableMetricsManager = new TableMetricsManager(mockOptions); + TableMetricsManager.setInstance(tableMetricsManager); + + // Generate 2 read metrics in each bucket, one with point lookup and other with range scan. + TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 1, true); + TableMetricsManager.updateHistogramMetricsForQueryScanBytes(5l, tableName, true); + + TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 2, false); + TableMetricsManager.updateHistogramMetricsForQueryScanBytes(10l, tableName, false); + + TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 4, true); + TableMetricsManager.updateHistogramMetricsForQueryScanBytes(50l, tableName, true); + + TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 5, false); + TableMetricsManager.updateHistogramMetricsForQueryScanBytes(100l, tableName, false); + + TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 7, true); + TableMetricsManager.updateHistogramMetricsForQueryScanBytes(500l, tableName, true); + + TableMetricsManager.updateHistogramMetricsForQueryLatency(tableName, 8, false); + TableMetricsManager.updateHistogramMetricsForQueryScanBytes(1000l, tableName, false); + + // Generate distribution map from histogram snapshots. + LatencyHistogram latencyHistogram = + TableMetricsManager.getQueryLatencyHistogramForTable(tableName); + SizeHistogram sizeHistogram = TableMetricsManager.getQuerySizeHistogramForTable(tableName); + + Map latencyMap = + latencyHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); + Map sizeMap = + sizeHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); + for (Long count : latencyMap.values()) { + Assert.assertEquals(new Long(2), count); + } + for (Long count : sizeMap.values()) { + Assert.assertEquals(new Long(2), count); + } - // Verify there is 1 entry in each bucket for range scan query. - LatencyHistogram rangeScanLtHisto = - TableMetricsManager.getRangeScanLatencyHistogramForTable(tableName); - SizeHistogram rangeScanSizeHisto = - TableMetricsManager.getRangeScanSizeHistogramForTable(tableName); + // Verify there is 1 entry in each bucket for point lookup query. + LatencyHistogram pointLookupLtHisto = + TableMetricsManager.getPointLookupLatencyHistogramForTable(tableName); + SizeHistogram pointLookupSizeHisto = + TableMetricsManager.getPointLookupSizeHistogramForTable(tableName); + + Map pointLookupLtMap = + pointLookupLtHisto.getRangeHistogramDistribution().getRangeDistributionMap(); + Map pointLookupSizeMap = + pointLookupSizeHisto.getRangeHistogramDistribution().getRangeDistributionMap(); + for (Long count : pointLookupLtMap.values()) { + Assert.assertEquals(new Long(1), count); + } + for (Long count : pointLookupSizeMap.values()) { + Assert.assertEquals(new Long(1), count); + } - Map rangeScanLtMap = rangeScanLtHisto.getRangeHistogramDistribution().getRangeDistributionMap(); - Map rangeScanSizeMap = rangeScanSizeHisto.getRangeHistogramDistribution().getRangeDistributionMap(); - for (Long count: rangeScanLtMap.values()) { - Assert.assertEquals(new Long(1), count); - } - for (Long count: rangeScanSizeMap.values()) { - Assert.assertEquals(new Long(1), count); - } + // Verify there is 1 entry in each bucket for range scan query. + LatencyHistogram rangeScanLtHisto = + TableMetricsManager.getRangeScanLatencyHistogramForTable(tableName); + SizeHistogram rangeScanSizeHisto = + TableMetricsManager.getRangeScanSizeHistogramForTable(tableName); + + Map rangeScanLtMap = + rangeScanLtHisto.getRangeHistogramDistribution().getRangeDistributionMap(); + Map rangeScanSizeMap = + rangeScanSizeHisto.getRangeHistogramDistribution().getRangeDistributionMap(); + for (Long count : rangeScanLtMap.values()) { + Assert.assertEquals(new Long(1), count); } + for (Long count : rangeScanSizeMap.values()) { + Assert.assertEquals(new Long(1), count); + } + } - @Test - public void testTableMetricsNull() { - String tableName = "TEST-TABLE"; - String badTableName = "NOT-ALLOWED-TABLE"; + @Test + public void testTableMetricsNull() { + String tableName = "TEST-TABLE"; + String badTableName = "NOT-ALLOWED-TABLE"; - QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); - Mockito.doReturn(true).when(mockOptions).isTableLevelMetricsEnabled(); - Mockito.doReturn(tableName).when(mockOptions).getAllowedListTableNames(); + QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); + Mockito.doReturn(true).when(mockOptions).isTableLevelMetricsEnabled(); + Mockito.doReturn(tableName).when(mockOptions).getAllowedListTableNames(); - TableMetricsManager tableMetricsManager = new TableMetricsManager(mockOptions); - TableMetricsManager.setInstance(tableMetricsManager); - Assert.assertNull(TableMetricsManager.getQueryLatencyHistogramForTable(badTableName)); - } + TableMetricsManager tableMetricsManager = new TableMetricsManager(mockOptions); + TableMetricsManager.setInstance(tableMetricsManager); + Assert.assertNull(TableMetricsManager.getQueryLatencyHistogramForTable(badTableName)); + } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesHistogramTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesHistogramTest.java index 5b3b107df78..3f11c515e83 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesHistogramTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesHistogramTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,57 +17,58 @@ */ package org.apache.phoenix.monitoring.connectionqueryservice; +import java.util.HashMap; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.query.QueryServices; import org.junit.Assert; import org.junit.Test; -import java.util.HashMap; -import java.util.Map; - public class ConnectionQueryServicesHistogramTest { - @Test - public void testConnectionQueryServiceHistogramRangeOverride() { - String histoName = "PhoenixInternalOpenConn"; - Configuration conf = new Configuration(); - conf.set(QueryServices.CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES, "2, 5, 8"); - ConnectionQueryServicesHistogram histogram = new ConnectionQueryServicesHistogram(histoName, - "histogram for Number of open internal phoenix connections", conf); - Assert.assertEquals(histoName, histogram.getName()); - long[] ranges = histogram.getRanges(); - Assert.assertNotNull(ranges); - long[] expectRanges = {2,5,8}; - Assert.assertArrayEquals(expectRanges, ranges); - } + @Test + public void testConnectionQueryServiceHistogramRangeOverride() { + String histoName = "PhoenixInternalOpenConn"; + Configuration conf = new Configuration(); + conf.set(QueryServices.CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES, "2, 5, 8"); + ConnectionQueryServicesHistogram histogram = new ConnectionQueryServicesHistogram(histoName, + "histogram for Number of open internal phoenix connections", conf); + Assert.assertEquals(histoName, histogram.getName()); + long[] ranges = histogram.getRanges(); + Assert.assertNotNull(ranges); + long[] expectRanges = { 2, 5, 8 }; + Assert.assertArrayEquals(expectRanges, ranges); + } - @Test - public void testEveryRangeInDefaultRange() { - //1, 3, 7, 9, 15, 30, 120, 600 - Configuration conf = new Configuration(); - String histoName = "PhoenixInternalOpenConn"; - conf.unset(QueryServices.CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES); - ConnectionQueryServicesHistogram histogram = new ConnectionQueryServicesHistogram(histoName, - "histogram for Number of open internal phoenix connections", conf); - Assert.assertEquals(histoName, histogram.getName()); - Assert.assertEquals(ConnectionQueryServicesHistogram.DEFAULT_RANGE, histogram.getRanges()); + @Test + public void testEveryRangeInDefaultRange() { + // 1, 3, 7, 9, 15, 30, 120, 600 + Configuration conf = new Configuration(); + String histoName = "PhoenixInternalOpenConn"; + conf.unset(QueryServices.CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES); + ConnectionQueryServicesHistogram histogram = new ConnectionQueryServicesHistogram(histoName, + "histogram for Number of open internal phoenix connections", conf); + Assert.assertEquals(histoName, histogram.getName()); + Assert.assertEquals(ConnectionQueryServicesHistogram.DEFAULT_RANGE, histogram.getRanges()); - histogram.add(1); - histogram.add(3); - histogram.add(7); - histogram.add(9); - histogram.add(15); - histogram.add(30); - histogram.add(120); - histogram.add(600); + histogram.add(1); + histogram.add(3); + histogram.add(7); + histogram.add(9); + histogram.add(15); + histogram.add(30); + histogram.add(120); + histogram.add(600); - Map distribution = histogram.getRangeHistogramDistribution().getRangeDistributionMap(); - Map expectedMap = new HashMap<>(); - expectedMap.put("0,1", 1l); - expectedMap.put("1,10", 3l); - expectedMap.put("10,100", 2l); - expectedMap.put("100,500", 1l); - expectedMap.put("500,1000", 1l); - Assert.assertEquals(expectedMap, distribution); - } + Map distribution = + histogram.getRangeHistogramDistribution().getRangeDistributionMap(); + Map expectedMap = new HashMap<>(); + expectedMap.put("0,1", 1l); + expectedMap.put("1,10", 3l); + expectedMap.put("10,100", 2l); + expectedMap.put("100,500", 1l); + expectedMap.put("500,1000", 1l); + Assert.assertEquals(expectedMap, distribution); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsHistogramsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsHistogramsTest.java index f9fa36375dc..c8654b824e5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsHistogramsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsHistogramsTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,16 +22,20 @@ import org.junit.Test; public class ConnectionQueryServicesMetricsHistogramsTest { - @Test - public void testConnectionQueryServiceMetricsHistograms() { - String connectionQueryServiceName = "USE_CASE_1"; - Configuration conf = new Configuration(); - ConnectionQueryServicesMetricsHistograms - connectionQueryServiceMetricsHistograms = new ConnectionQueryServicesMetricsHistograms(connectionQueryServiceName, conf); - Assert.assertEquals(connectionQueryServiceName, connectionQueryServiceMetricsHistograms.getConnectionQueryServicesName()); - Assert.assertNotNull(connectionQueryServiceMetricsHistograms.getConnectionQueryServicesOpenConnHisto()); - Assert.assertNotNull(connectionQueryServiceMetricsHistograms.getConnectionQueryServicesInternalOpenConnHisto()); + @Test + public void testConnectionQueryServiceMetricsHistograms() { + String connectionQueryServiceName = "USE_CASE_1"; + Configuration conf = new Configuration(); + ConnectionQueryServicesMetricsHistograms connectionQueryServiceMetricsHistograms = + new ConnectionQueryServicesMetricsHistograms(connectionQueryServiceName, conf); + Assert.assertEquals(connectionQueryServiceName, + connectionQueryServiceMetricsHistograms.getConnectionQueryServicesName()); + Assert.assertNotNull( + connectionQueryServiceMetricsHistograms.getConnectionQueryServicesOpenConnHisto()); + Assert.assertNotNull( + connectionQueryServiceMetricsHistograms.getConnectionQueryServicesInternalOpenConnHisto()); - Assert.assertEquals(2, connectionQueryServiceMetricsHistograms.getConnectionQueryServicesHistogramsDistribution().size()); - } + Assert.assertEquals(2, connectionQueryServiceMetricsHistograms + .getConnectionQueryServicesHistogramsDistribution().size()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsManagerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsManagerTest.java index b2073433c4d..a41deddcf85 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsManagerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsManagerTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,17 +17,6 @@ */ package org.apache.phoenix.monitoring.connectionqueryservice; -import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.monitoring.ConnectionQueryServicesMetric; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.List; -import java.util.Map; - import static org.apache.phoenix.monitoring.MetricType.OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.OPEN_PHOENIX_CONNECTIONS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_THROTTLED_COUNTER; @@ -37,76 +26,86 @@ import static org.apache.phoenix.monitoring.connectionqueryservice.ConnectionQueryServicesNameMetricsTest.phoenixConnThrottledCounter; import static org.junit.Assert.assertTrue; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.monitoring.ConnectionQueryServicesMetric; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.query.QueryServicesOptions; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + public class ConnectionQueryServicesMetricsManagerTest { - public boolean verifyMetricsReset(){ - Map> map = - ConnectionQueryServicesMetricsManager.getAllConnectionQueryServicesMetrics(); - return map != null && map.isEmpty(); - } + public boolean verifyMetricsReset() { + Map> map = + ConnectionQueryServicesMetricsManager.getAllConnectionQueryServicesMetrics(); + return map != null && map.isEmpty(); + } - public boolean verifyConnectionQueryServiceNamesExists(String connectionQueryServiceName){ - Map>map = - ConnectionQueryServicesMetricsManager.getAllConnectionQueryServicesMetrics(); - return map != null && map.containsKey(connectionQueryServiceName); - } + public boolean verifyConnectionQueryServiceNamesExists(String connectionQueryServiceName) { + Map> map = + ConnectionQueryServicesMetricsManager.getAllConnectionQueryServicesMetrics(); + return map != null && map.containsKey(connectionQueryServiceName); + } - @Test - public void testConnectionQueryServiceMetricsForUpdateMetricsMethod() { + @Test + public void testConnectionQueryServiceMetricsForUpdateMetricsMethod() { - QueryServicesOptions options = QueryServicesOptions.withDefaults(); - options.setConnectionQueryServiceMetricsEnabled(); - ConnectionQueryServicesMetricsManager connectionQueryServicesMetricsManager = - new ConnectionQueryServicesMetricsManager(options); - ConnectionQueryServicesMetricsManager.setInstance(connectionQueryServicesMetricsManager); + QueryServicesOptions options = QueryServicesOptions.withDefaults(); + options.setConnectionQueryServiceMetricsEnabled(); + ConnectionQueryServicesMetricsManager connectionQueryServicesMetricsManager = + new ConnectionQueryServicesMetricsManager(options); + ConnectionQueryServicesMetricsManager.setInstance(connectionQueryServicesMetricsManager); - ConnectionQueryServicesNameMetricsTest - testData = new ConnectionQueryServicesNameMetricsTest(); - testData.populateMetrics(); - for(int i = 0; i < connectionQueryServiceNames.length; i++) { - ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceNames[i], - OPEN_PHOENIX_CONNECTIONS_COUNTER, openPhoenixConnCounter[i]); - ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceNames[i], - OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, openInternalPhoenixConnCounter[i]); - ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceNames[i], - PHOENIX_CONNECTIONS_THROTTLED_COUNTER, phoenixConnThrottledCounter[i]); - } - testData.verfiyCountOfConnectionQueryServices(connectionQueryServiceNames.length); - ConnectionQueryServicesMetricsManager.clearAllConnectionQueryServiceMetrics(); - assertTrue(verifyMetricsReset()); + ConnectionQueryServicesNameMetricsTest testData = new ConnectionQueryServicesNameMetricsTest(); + testData.populateMetrics(); + for (int i = 0; i < connectionQueryServiceNames.length; i++) { + ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceNames[i], + OPEN_PHOENIX_CONNECTIONS_COUNTER, openPhoenixConnCounter[i]); + ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceNames[i], + OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, openInternalPhoenixConnCounter[i]); + ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceNames[i], + PHOENIX_CONNECTIONS_THROTTLED_COUNTER, phoenixConnThrottledCounter[i]); } + testData.verfiyCountOfConnectionQueryServices(connectionQueryServiceNames.length); + ConnectionQueryServicesMetricsManager.clearAllConnectionQueryServiceMetrics(); + assertTrue(verifyMetricsReset()); + } - @Test - public void testHistogramMetricsForOpenPhoenixConnectionCounter() { - String connectionQueryServiceName = "USE_CASE_1"; - Configuration conf = new Configuration(); - conf.set(QueryServices.CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES, "3, 6, 9"); - - QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); - Mockito.doReturn(true).when(mockOptions) - .isConnectionQueryServiceMetricsEnabled(); - Mockito.doReturn(conf).when(mockOptions).getConfiguration(); - ConnectionQueryServicesMetricsManager connectionQueryServicesMetricsManager = - new ConnectionQueryServicesMetricsManager(mockOptions); - ConnectionQueryServicesMetricsManager.setInstance(connectionQueryServicesMetricsManager); - for (int i=0; i<9; i++) { - updateMetricsAndHistogram(i+1, connectionQueryServiceName); - } + @Test + public void testHistogramMetricsForOpenPhoenixConnectionCounter() { + String connectionQueryServiceName = "USE_CASE_1"; + Configuration conf = new Configuration(); + conf.set(QueryServices.CONNECTION_QUERY_SERVICE_HISTOGRAM_SIZE_RANGES, "3, 6, 9"); + QueryServicesOptions mockOptions = Mockito.mock(QueryServicesOptions.class); + Mockito.doReturn(true).when(mockOptions).isConnectionQueryServiceMetricsEnabled(); + Mockito.doReturn(conf).when(mockOptions).getConfiguration(); + ConnectionQueryServicesMetricsManager connectionQueryServicesMetricsManager = + new ConnectionQueryServicesMetricsManager(mockOptions); + ConnectionQueryServicesMetricsManager.setInstance(connectionQueryServicesMetricsManager); + for (int i = 0; i < 9; i++) { + updateMetricsAndHistogram(i + 1, connectionQueryServiceName); + } - // Generate distribution map from histogram snapshots. - ConnectionQueryServicesHistogram connectionQueryServicesHistogram = - ConnectionQueryServicesMetricsManager.getConnectionQueryServiceOpenConnectionHistogram(connectionQueryServiceName); + // Generate distribution map from histogram snapshots. + ConnectionQueryServicesHistogram connectionQueryServicesHistogram = + ConnectionQueryServicesMetricsManager + .getConnectionQueryServiceOpenConnectionHistogram(connectionQueryServiceName); - Map openPhoenixConnMap = connectionQueryServicesHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); - for (Long count: openPhoenixConnMap.values()) { - Assert.assertEquals(new Long(3), count); - } + Map openPhoenixConnMap = + connectionQueryServicesHistogram.getRangeHistogramDistribution().getRangeDistributionMap(); + for (Long count : openPhoenixConnMap.values()) { + Assert.assertEquals(new Long(3), count); } + } - private void updateMetricsAndHistogram (long counter, String connectionQueryServiceName) { - ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, - OPEN_PHOENIX_CONNECTIONS_COUNTER, counter); - ConnectionQueryServicesMetricsManager.updateConnectionQueryServiceOpenConnectionHistogram(counter, - connectionQueryServiceName); - } + private void updateMetricsAndHistogram(long counter, String connectionQueryServiceName) { + ConnectionQueryServicesMetricsManager.updateMetrics(connectionQueryServiceName, + OPEN_PHOENIX_CONNECTIONS_COUNTER, counter); + ConnectionQueryServicesMetricsManager + .updateConnectionQueryServiceOpenConnectionHistogram(counter, connectionQueryServiceName); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsTest.java index d05a6824b3b..fdbf33e4bea 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesMetricsTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,90 +17,88 @@ */ package org.apache.phoenix.monitoring.connectionqueryservice; -import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.monitoring.ConnectionQueryServicesMetric; -import org.apache.phoenix.monitoring.MetricType; -import org.junit.Test; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; - - +import static org.apache.phoenix.monitoring.connectionqueryservice.ConnectionQueryServicesNameMetricsTest.connectionQueryServiceNames; import static org.apache.phoenix.monitoring.connectionqueryservice.ConnectionQueryServicesNameMetricsTest.openInternalPhoenixConnCounter; import static org.apache.phoenix.monitoring.connectionqueryservice.ConnectionQueryServicesNameMetricsTest.openPhoenixConnCounter; -import static org.apache.phoenix.monitoring.connectionqueryservice.ConnectionQueryServicesNameMetricsTest.connectionQueryServiceNames; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.phoenix.monitoring.ConnectionQueryServicesMetric; +import org.apache.phoenix.monitoring.MetricType; +import org.junit.Test; + public class ConnectionQueryServicesMetricsTest { - static Map phoenixConnectionQueryServiceSet = - new HashMap<>(); + static Map phoenixConnectionQueryServiceSet = + new HashMap<>(); - public boolean verifyConnectionQueryServiceName() { + public boolean verifyConnectionQueryServiceName() { - if (phoenixConnectionQueryServiceSet.isEmpty()) { - return false; - } - for (String connectionQueryServiceName : connectionQueryServiceNames) { - ConnectionQueryServicesMetrics instance = - phoenixConnectionQueryServiceSet.get(connectionQueryServiceName); - if (!instance.getConnectionQueryServiceName().equals(connectionQueryServiceName)) { - return false; - } - } - return true; + if (phoenixConnectionQueryServiceSet.isEmpty()) { + return false; + } + for (String connectionQueryServiceName : connectionQueryServiceNames) { + ConnectionQueryServicesMetrics instance = + phoenixConnectionQueryServiceSet.get(connectionQueryServiceName); + if (!instance.getConnectionQueryServiceName().equals(connectionQueryServiceName)) { + return false; + } } + return true; + } - public void verifyMetricsFromPhoenixConnectionQueryServiceMetrics() { - assertFalse(phoenixConnectionQueryServiceSet.isEmpty()); - for (int i = 0; i < connectionQueryServiceNames.length; i++) { - ConnectionQueryServicesMetrics instance = - phoenixConnectionQueryServiceSet.get(connectionQueryServiceNames[i]); - assertEquals(instance.getConnectionQueryServiceName(), connectionQueryServiceNames[i]); - List metricList = instance.getAllMetrics(); - for (ConnectionQueryServicesMetric metric : metricList) { + public void verifyMetricsFromPhoenixConnectionQueryServiceMetrics() { + assertFalse(phoenixConnectionQueryServiceSet.isEmpty()); + for (int i = 0; i < connectionQueryServiceNames.length; i++) { + ConnectionQueryServicesMetrics instance = + phoenixConnectionQueryServiceSet.get(connectionQueryServiceNames[i]); + assertEquals(instance.getConnectionQueryServiceName(), connectionQueryServiceNames[i]); + List metricList = instance.getAllMetrics(); + for (ConnectionQueryServicesMetric metric : metricList) { - if (metric.getMetricType() - .equals(MetricType.OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER)) { - assertEquals(openInternalPhoenixConnCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(MetricType.OPEN_PHOENIX_CONNECTIONS_COUNTER)) { - assertEquals(openPhoenixConnCounter[i], metric.getValue()); - } - } + if (metric.getMetricType().equals(MetricType.OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER)) { + assertEquals(openInternalPhoenixConnCounter[i], metric.getValue()); } + if (metric.getMetricType().equals(MetricType.OPEN_PHOENIX_CONNECTIONS_COUNTER)) { + assertEquals(openPhoenixConnCounter[i], metric.getValue()); + } + } } + } - @Test - public void testPhoenixConnectionQueryServiceMetricsForPhoenixConnectionQueryServiceName() { - Configuration conf = new Configuration(); - for (int i = 0; i < connectionQueryServiceNames.length; i++) { - ConnectionQueryServicesMetrics instance = - new ConnectionQueryServicesMetrics(connectionQueryServiceNames[i], conf); - phoenixConnectionQueryServiceSet.put(connectionQueryServiceNames[i], instance); - } - assertTrue(verifyConnectionQueryServiceName()); + @Test + public void testPhoenixConnectionQueryServiceMetricsForPhoenixConnectionQueryServiceName() { + Configuration conf = new Configuration(); + for (int i = 0; i < connectionQueryServiceNames.length; i++) { + ConnectionQueryServicesMetrics instance = + new ConnectionQueryServicesMetrics(connectionQueryServiceNames[i], conf); + phoenixConnectionQueryServiceSet.put(connectionQueryServiceNames[i], instance); } + assertTrue(verifyConnectionQueryServiceName()); + } - /** - * This test is for changeMetricValue() Method and getMetricMap() - */ - @Test - public void testPhoenixConnectionQueryServiceMetrics() { - Configuration conf = new Configuration(); - for (int i = 0; i < connectionQueryServiceNames.length; i++) { - ConnectionQueryServicesMetrics instance = - new ConnectionQueryServicesMetrics(connectionQueryServiceNames[i], conf); - phoenixConnectionQueryServiceSet.put(connectionQueryServiceNames[i], instance); + /** + * This test is for changeMetricValue() Method and getMetricMap() + */ + @Test + public void testPhoenixConnectionQueryServiceMetrics() { + Configuration conf = new Configuration(); + for (int i = 0; i < connectionQueryServiceNames.length; i++) { + ConnectionQueryServicesMetrics instance = + new ConnectionQueryServicesMetrics(connectionQueryServiceNames[i], conf); + phoenixConnectionQueryServiceSet.put(connectionQueryServiceNames[i], instance); - instance.setMetricValue(MetricType.OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, - openInternalPhoenixConnCounter[i]); - instance.setMetricValue( - MetricType.OPEN_PHOENIX_CONNECTIONS_COUNTER, openPhoenixConnCounter[i]); - } - verifyMetricsFromPhoenixConnectionQueryServiceMetrics(); - phoenixConnectionQueryServiceSet.clear(); + instance.setMetricValue(MetricType.OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, + openInternalPhoenixConnCounter[i]); + instance.setMetricValue(MetricType.OPEN_PHOENIX_CONNECTIONS_COUNTER, + openPhoenixConnCounter[i]); } + verifyMetricsFromPhoenixConnectionQueryServiceMetrics(); + phoenixConnectionQueryServiceSet.clear(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesNameMetricsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesNameMetricsTest.java index 6a1d26e05fc..b755883cc06 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesNameMetricsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/monitoring/connectionqueryservice/ConnectionQueryServicesNameMetricsTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,13 +17,6 @@ */ package org.apache.phoenix.monitoring.connectionqueryservice; -import org.apache.phoenix.monitoring.ConnectionQueryServicesMetric; -import org.apache.phoenix.monitoring.MetricType; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import static org.apache.phoenix.monitoring.MetricType.OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.OPEN_PHOENIX_CONNECTIONS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.PHOENIX_CONNECTIONS_THROTTLED_COUNTER; @@ -31,57 +24,61 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.phoenix.monitoring.ConnectionQueryServicesMetric; +import org.apache.phoenix.monitoring.MetricType; + /** - * This class is used primarily to populate data and - * verification methods + * This class is used primarily to populate data and verification methods */ public class ConnectionQueryServicesNameMetricsTest { - public static final String[] connectionQueryServiceNames = - { "USE_CASE_1", "USE_CASE_2", "USE_CASE_3" }; - public static Map>[] connectionQueryServiceNameMetricMap = - new Map[connectionQueryServiceNames.length]; - public static final long[] openPhoenixConnCounter = { 1, 1, 1 }; - public static final long[] openInternalPhoenixConnCounter = { 1, 1, 1 }; - public static final long[] phoenixConnThrottledCounter = { 1, 2, 3 }; - + public static final String[] connectionQueryServiceNames = + { "USE_CASE_1", "USE_CASE_2", "USE_CASE_3" }; + public static Map>[] connectionQueryServiceNameMetricMap = + new Map[connectionQueryServiceNames.length]; + public static final long[] openPhoenixConnCounter = { 1, 1, 1 }; + public static final long[] openInternalPhoenixConnCounter = { 1, 1, 1 }; + public static final long[] phoenixConnThrottledCounter = { 1, 2, 3 }; - public void populateMetrics() { - for (int i = 0; i < connectionQueryServiceNameMetricMap.length; i++) { - connectionQueryServiceNameMetricMap[i] = new HashMap<>(); - } - for (int i = 0; i < connectionQueryServiceNames.length; i++) { - Map metrics = new HashMap<>(); - metrics.put(OPEN_PHOENIX_CONNECTIONS_COUNTER, openPhoenixConnCounter[i]); - metrics.put( - OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, openInternalPhoenixConnCounter[i]); - metrics.put(PHOENIX_CONNECTIONS_THROTTLED_COUNTER, phoenixConnThrottledCounter[i]); + public void populateMetrics() { + for (int i = 0; i < connectionQueryServiceNameMetricMap.length; i++) { + connectionQueryServiceNameMetricMap[i] = new HashMap<>(); + } + for (int i = 0; i < connectionQueryServiceNames.length; i++) { + Map metrics = new HashMap<>(); + metrics.put(OPEN_PHOENIX_CONNECTIONS_COUNTER, openPhoenixConnCounter[i]); + metrics.put(OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER, openInternalPhoenixConnCounter[i]); + metrics.put(PHOENIX_CONNECTIONS_THROTTLED_COUNTER, phoenixConnThrottledCounter[i]); - connectionQueryServiceNameMetricMap[i].put(connectionQueryServiceNames[i], metrics); - } + connectionQueryServiceNameMetricMap[i].put(connectionQueryServiceNames[i], metrics); } + } - public void verfiyCountOfConnectionQueryServices(int noOfConnectionQueryServiceName) { - Map> map = - ConnectionQueryServicesMetricsManager.getAllConnectionQueryServicesMetrics(); - assertFalse(map == null || map.isEmpty()); - for (int i = 0; i < noOfConnectionQueryServiceName; i++) { - assertTrue(map.containsKey(connectionQueryServiceNames[i])); - List connectionQueryServiceNameMetric = - map.get(connectionQueryServiceNames[i]); - for (ConnectionQueryServicesMetric metric : connectionQueryServiceNameMetric) { - if (metric.getMetricType().equals(OPEN_PHOENIX_CONNECTIONS_COUNTER)) { - assertEquals(openPhoenixConnCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER)) { - assertEquals(openInternalPhoenixConnCounter[i], metric.getValue()); - } - if (metric.getMetricType().equals(PHOENIX_CONNECTIONS_THROTTLED_COUNTER)) { - assertEquals(phoenixConnThrottledCounter[i], metric.getValue()); - } - } + public void verfiyCountOfConnectionQueryServices(int noOfConnectionQueryServiceName) { + Map> map = + ConnectionQueryServicesMetricsManager.getAllConnectionQueryServicesMetrics(); + assertFalse(map == null || map.isEmpty()); + for (int i = 0; i < noOfConnectionQueryServiceName; i++) { + assertTrue(map.containsKey(connectionQueryServiceNames[i])); + List connectionQueryServiceNameMetric = + map.get(connectionQueryServiceNames[i]); + for (ConnectionQueryServicesMetric metric : connectionQueryServiceNameMetric) { + if (metric.getMetricType().equals(OPEN_PHOENIX_CONNECTIONS_COUNTER)) { + assertEquals(openPhoenixConnCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER)) { + assertEquals(openInternalPhoenixConnCounter[i], metric.getValue()); + } + if (metric.getMetricType().equals(PHOENIX_CONNECTIONS_THROTTLED_COUNTER)) { + assertEquals(phoenixConnThrottledCounter[i], metric.getValue()); } + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/BuiltInFunctionInfoTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/BuiltInFunctionInfoTest.java index 5cf5de0c3ab..2a0fe9b95b8 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/parse/BuiltInFunctionInfoTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/BuiltInFunctionInfoTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,89 +34,88 @@ public class BuiltInFunctionInfoTest { - private static BuiltInFunctionInfo getBuiltInFunctionInfo(Class funcClass) { - return new BuiltInFunctionInfo(funcClass, funcClass.getAnnotation(BuiltInFunction.class)); + private static BuiltInFunctionInfo + getBuiltInFunctionInfo(Class funcClass) { + return new BuiltInFunctionInfo(funcClass, funcClass.getAnnotation(BuiltInFunction.class)); + } + + @Test + public void testConstruct_NoDefaultArgs() { + BuiltInFunctionInfo funcInfo = getBuiltInFunctionInfo(NoDefaultArgsFunction.class); + assertEquals(2, funcInfo.getArgs().length); + assertEquals(2, funcInfo.getRequiredArgCount()); + assertEquals("NO_DEFAULT_ARGS", funcInfo.getName()); + } + + @Test + public void testConstruct_WithOneDefaultArg() { + BuiltInFunctionInfo funcInfo = getBuiltInFunctionInfo(WithOneDefaultArg.class); + assertEquals(3, funcInfo.getArgs().length); + assertEquals(2, funcInfo.getRequiredArgCount()); + assertEquals("WITH_ONE_DEFAULT_ARG", funcInfo.getName()); + } + + @Test + public void testConstruct_WithMultipleDefaultArgs() { + BuiltInFunctionInfo funcInfo = getBuiltInFunctionInfo(WithMultipleDefaultArgs.class); + assertEquals(3, funcInfo.getArgs().length); + assertEquals(1, funcInfo.getRequiredArgCount()); + assertEquals("WITH_MULTIPLE_DEFAULT_ARGS", funcInfo.getName()); + } + + private static class BaseFunctionAdapter extends ScalarFunction { + + private final String name; + + BaseFunctionAdapter(String name) { + this.name = name; } - @Test - public void testConstruct_NoDefaultArgs() { - BuiltInFunctionInfo funcInfo = getBuiltInFunctionInfo(NoDefaultArgsFunction.class); - assertEquals(2, funcInfo.getArgs().length); - assertEquals(2, funcInfo.getRequiredArgCount()); - assertEquals("NO_DEFAULT_ARGS", funcInfo.getName()); + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + throw new UnsupportedOperationException("Can't evalulate a BaseTestFunction"); } - @Test - public void testConstruct_WithOneDefaultArg() { - BuiltInFunctionInfo funcInfo = getBuiltInFunctionInfo(WithOneDefaultArg.class); - assertEquals(3, funcInfo.getArgs().length); - assertEquals(2, funcInfo.getRequiredArgCount()); - assertEquals("WITH_ONE_DEFAULT_ARG", funcInfo.getName()); + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; } - @Test - public void testConstruct_WithMultipleDefaultArgs() { - BuiltInFunctionInfo funcInfo = getBuiltInFunctionInfo(WithMultipleDefaultArgs.class); - assertEquals(3, funcInfo.getArgs().length); - assertEquals(1, funcInfo.getRequiredArgCount()); - assertEquals("WITH_MULTIPLE_DEFAULT_ARGS", funcInfo.getName()); + @Override + public String getName() { + return name; } + } - private static class BaseFunctionAdapter extends ScalarFunction { + @BuiltInFunction(name = "NO_DEFAULT_ARGS", args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }) }) + static class NoDefaultArgsFunction extends BaseFunctionAdapter { - - private final String name; - - BaseFunctionAdapter(String name) { - this.name = name; - } - - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - throw new UnsupportedOperationException("Can't evalulate a BaseTestFunction"); - } - - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } - - @Override - public String getName() { - return name; - } + public NoDefaultArgsFunction(List ignoreChildren) { + super("NO_DEFAULT_ARGS"); } - @BuiltInFunction(name="NO_DEFAULT_ARGS", args={ - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class})}) - static class NoDefaultArgsFunction extends BaseFunctionAdapter { - - public NoDefaultArgsFunction(List ignoreChildren) { - super("NO_DEFAULT_ARGS"); - } - - } + } - @BuiltInFunction(name="WITH_ONE_DEFAULT_ARG", args={ - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class}, defaultValue = "'a'") }) - static class WithOneDefaultArg extends BaseFunctionAdapter { + @BuiltInFunction(name = "WITH_ONE_DEFAULT_ARG", + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }, defaultValue = "'a'") }) + static class WithOneDefaultArg extends BaseFunctionAdapter { - public WithOneDefaultArg(List ignoreChildren) { - super("WITH_ONE_DEFAULT_ARG"); - } + public WithOneDefaultArg(List ignoreChildren) { + super("WITH_ONE_DEFAULT_ARG"); } + } - @BuiltInFunction(name="WITH_MULTIPLE_DEFAULT_ARGS", args={ - @Argument(allowedTypes={PVarchar.class}), - @Argument(allowedTypes={PVarchar.class}, defaultValue = "'a'"), - @Argument(allowedTypes={PVarchar.class}, defaultValue = "'b'") }) - static class WithMultipleDefaultArgs extends BaseFunctionAdapter { + @BuiltInFunction(name = "WITH_MULTIPLE_DEFAULT_ARGS", + args = { @Argument(allowedTypes = { PVarchar.class }), + @Argument(allowedTypes = { PVarchar.class }, defaultValue = "'a'"), + @Argument(allowedTypes = { PVarchar.class }, defaultValue = "'b'") }) + static class WithMultipleDefaultArgs extends BaseFunctionAdapter { - public WithMultipleDefaultArgs(List ignoreChildren) { - super("WITH_MULTIPLE_DEFAULT_ARGS"); - } + public WithMultipleDefaultArgs(List ignoreChildren) { + super("WITH_MULTIPLE_DEFAULT_ARGS"); } -} \ No newline at end of file + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/CastParseNodeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/CastParseNodeTest.java index b62d9a94d24..8daa63357d0 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/parse/CastParseNodeTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/CastParseNodeTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,43 +15,46 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.parse; -import org.apache.phoenix.schema.types.PDataType; +import static org.junit.Assert.*; + import org.apache.phoenix.schema.types.PDecimal; -import org.apache.phoenix.schema.types.PDouble; import org.apache.phoenix.schema.types.PLong; import org.junit.Test; -import static org.junit.Assert.*; - public class CastParseNodeTest { - @Test - public void testToSQL() { - ColumnParseNode columnParseNode = new ColumnParseNode(TableName.create("SCHEMA1", "TABLE1"), "V"); - CastParseNode castParseNode = new CastParseNode(columnParseNode, PLong.INSTANCE, null, null, false); - StringBuilder stringBuilder = new StringBuilder(); - castParseNode.toSQL(null, stringBuilder); - assertEquals(" CAST(TABLE1.V AS BIGINT)", stringBuilder.toString()); - } + @Test + public void testToSQL() { + ColumnParseNode columnParseNode = + new ColumnParseNode(TableName.create("SCHEMA1", "TABLE1"), "V"); + CastParseNode castParseNode = + new CastParseNode(columnParseNode, PLong.INSTANCE, null, null, false); + StringBuilder stringBuilder = new StringBuilder(); + castParseNode.toSQL(null, stringBuilder); + assertEquals(" CAST(TABLE1.V AS BIGINT)", stringBuilder.toString()); + } - @Test - public void testToSQL_WithLengthAndScale() { - ColumnParseNode columnParseNode = new ColumnParseNode(TableName.create("SCHEMA1", "TABLE1"), "V"); - CastParseNode castParseNode = new CastParseNode(columnParseNode, PDecimal.INSTANCE, 5, 3, false); - StringBuilder stringBuilder = new StringBuilder(); - castParseNode.toSQL(null, stringBuilder); - assertEquals(" CAST(TABLE1.V AS DECIMAL(5,3))", stringBuilder.toString()); - } + @Test + public void testToSQL_WithLengthAndScale() { + ColumnParseNode columnParseNode = + new ColumnParseNode(TableName.create("SCHEMA1", "TABLE1"), "V"); + CastParseNode castParseNode = + new CastParseNode(columnParseNode, PDecimal.INSTANCE, 5, 3, false); + StringBuilder stringBuilder = new StringBuilder(); + castParseNode.toSQL(null, stringBuilder); + assertEquals(" CAST(TABLE1.V AS DECIMAL(5,3))", stringBuilder.toString()); + } - @Test - public void testToSQL_ArrayType() { - ColumnParseNode columnParseNode = new ColumnParseNode(TableName.create("SCHEMA1", "TABLE1"), "V"); - CastParseNode castParseNode = new CastParseNode(columnParseNode, PLong.INSTANCE, null, null, true); - StringBuilder stringBuilder = new StringBuilder(); - castParseNode.toSQL(null, stringBuilder); - assertEquals(" CAST(TABLE1.V AS BIGINT ARRAY)", stringBuilder.toString()); - } -} \ No newline at end of file + @Test + public void testToSQL_ArrayType() { + ColumnParseNode columnParseNode = + new ColumnParseNode(TableName.create("SCHEMA1", "TABLE1"), "V"); + CastParseNode castParseNode = + new CastParseNode(columnParseNode, PLong.INSTANCE, null, null, true); + StringBuilder stringBuilder = new StringBuilder(); + castParseNode.toSQL(null, stringBuilder); + assertEquals(" CAST(TABLE1.V AS BIGINT ARRAY)", stringBuilder.toString()); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/CursorParserTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/CursorParserTest.java index 247ee44a534..cb65081d755 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/parse/CursorParserTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/CursorParserTest.java @@ -1,4 +1,4 @@ - /* +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,351 +17,310 @@ */ package org.apache.phoenix.parse; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.jdbc.PhoenixStatement.Operation; -import org.apache.phoenix.schema.SortOrder; -import org.junit.Test; +import static org.junit.Assert.*; import java.io.IOException; import java.io.StringReader; import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; -import java.util.List; - -import static org.junit.Assert.*; +import org.junit.Test; public class CursorParserTest { - private void parseCursor(String sql) throws IOException, SQLException { - SQLParser parser = new SQLParser(new StringReader(sql)); - BindableStatement stmt = null; - try{ - stmt = parser.parseDeclareCursor(); - } catch (SQLException e){ - fail("Unable to parse:\n" + sql); - } - } - - private void parseFetch(String sql) throws IOException, SQLException { - SQLParser parser = new SQLParser(new StringReader(sql)); - BindableStatement stmt = null; - try{ - stmt = parser.parseFetch(); - } catch (SQLException e){ - fail("Unable to parse:\n" + sql); - } - } - - private void parseOpen(String sql) throws IOException, SQLException { - SQLParser parser = new SQLParser(new StringReader(sql)); - BindableStatement stmt = null; - try{ - stmt = parser.parseOpen(); - } catch (SQLException e){ - fail("Unable to parse:\n" + sql); - } - } - - @Test - public void testParseCursor0() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select a from b\n" + - "where ((ind.name = 'X')" + - "and rownum <= (1000 + 1000))\n"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - } - - @Test - public void testParseCursor1() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" + - "where( (ind.name = 'X'\n" + - "and rownum <= 1 + 2)\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '00T')\n" + - "and (ind.name_type = 't'))"; - - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - } - - @Test - public void testParseCursor2() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + - "where (ind.string_value in ('a', 'b', 'c', 'd'))\n" + - "and rownum <= ( 3 + 1 )\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '00T')\n" + - "and (ind.deleted = '0')\n" + - "and (ind.index_num = 1)"; - - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } - - @Test - public void testParseCursor3() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + - "where (ind.number_value > 3)\n" + - "and rownum <= 1000\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '001'\n" + - "and (ind.deleted = '0'))\n" + - "and (ind.index_num = 2)"; - - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } - - @Test - public void testParseCursor4() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select /*+ index(t iecustom_entity_data_created) */ /*gatherSlowStats*/ count(1) from core.custom_entity_data t\n" + - "where (t.created_date > to_date('01/01/2001'))\n" + - "and rownum <= 4500\n" + - "and (t.organization_id = '000000000000000')\n" + - "and (t.key_prefix = '001')"; - - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } - - @Test - public void testCountDistinctCursor() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select count(distinct foo) from core.custom_entity_data t\n" - + "where (t.created_date > to_date('01/01/2001'))\n" - + "and (t.organization_id = '000000000000000')\n" - + "and (t.key_prefix = '001')\n" + "limit 4500"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } - - @Test - public void testIsNullCursor() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select count(foo) from core.custom_entity_data t\n" + - "where (t.created_date is null)\n" + - "and (t.organization_id is not null)\n"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } - - @Test - public void testAsInColumnAlias() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select count(foo) AS c from core.custom_entity_data t\n" + - "where (t.created_date is null)\n" + - "and (t.organization_id is not null)\n"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - + private void parseCursor(String sql) throws IOException, SQLException { + SQLParser parser = new SQLParser(new StringReader(sql)); + BindableStatement stmt = null; + try { + stmt = parser.parseDeclareCursor(); + } catch (SQLException e) { + fail("Unable to parse:\n" + sql); } - - @Test - public void testParseJoin1() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select /*SOQL*/ \"Id\"\n" + - "from (select /*+ ordered index(cft) */\n" + - "cft.val188 \"Marketing_Offer_Code__c\",\n" + - "t.account_id \"Id\"\n" + - "from sales.account_cfdata cft,\n" + - "sales.account t\n" + - "where (cft.account_cfdata_id = t.account_id)\n" + - "and (cft.organization_id = '00D300000000XHP')\n" + - "and (t.organization_id = '00D300000000XHP')\n" + - "and (t.deleted = '0')\n" + - "and (t.account_id != '000000000000000'))\n" + - "where (\"Marketing_Offer_Code__c\" = 'FSCR')"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } - - @Test - public void testParseJoin2() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select /*rptacctlist 00O40000002C3of*/ \"00N40000001M8VK\",\n" + - "\"00N40000001M8VK.ID\",\n" + - "\"00N30000000r0K2\",\n" + - "\"00N30000000jgjo\"\n" + - "from (select /*+ ordered use_hash(aval368) index(cfa) */\n" + - "a.record_type_id \"RECORDTYPE\",\n" + - "aval368.last_name,aval368.first_name || ' ' || aval368.last_name,aval368.name \"00N40000001M8VK\",\n" + - "a.last_update \"LAST_UPDATE\",\n" + - "cfa.val368 \"00N40000001M8VK.ID\",\n" + - "TO_DATE(cfa.val282) \"00N30000000r0K2\",\n" + - "cfa.val252 \"00N30000000jgjo\"\n" + - "from sales.account a,\n" + - "sales.account_cfdata cfa,\n" + - "core.name_denorm aval368\n" + - "where (cfa.account_cfdata_id = a.account_id)\n" + - "and (aval368.entity_id = cfa.val368)\n" + - "and (a.deleted = '0')\n" + - "and (a.organization_id = '00D300000000EaE')\n" + - "and (a.account_id <> '000000000000000')\n" + - "and (cfa.organization_id = '00D300000000EaE')\n" + - "and (aval368.organization_id = '00D300000000EaE')\n" + - "and (aval368.entity_id like '005%'))\n" + - "where (\"RECORDTYPE\" = '0123000000002Gv')\n" + - "AND (\"00N40000001M8VK\" is null or \"00N40000001M8VK\" in ('BRIAN IRWIN', 'BRIAN MILLER', 'COLLEEN HORNYAK', 'ERNIE ZAVORAL JR', 'JAMIE TRIMBUR', 'JOE ANTESBERGER', 'MICHAEL HYTLA', 'NATHAN DELSIGNORE', 'SANJAY GANDHI', 'TOM BASHIOUM'))\n" + - "AND (\"LAST_UPDATE\" >= to_date('2009-08-01 07:00:00'))"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - + } + + private void parseFetch(String sql) throws IOException, SQLException { + SQLParser parser = new SQLParser(new StringReader(sql)); + BindableStatement stmt = null; + try { + stmt = parser.parseFetch(); + } catch (SQLException e) { + fail("Unable to parse:\n" + sql); } - - @Test - public void testCommentCursor() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select a from b -- here we come\n" + - "where ((ind.name = 'X') // to save the day\n" + - "and rownum /* won't run */ <= (1000 + 1000))\n"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - + } + + private void parseOpen(String sql) throws IOException, SQLException { + SQLParser parser = new SQLParser(new StringReader(sql)); + BindableStatement stmt = null; + try { + stmt = parser.parseOpen(); + } catch (SQLException e) { + fail("Unable to parse:\n" + sql); } + } + + @Test + public void testParseCursor0() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select a from b\n" + "where ((ind.name = 'X')" + "and rownum <= (1000 + 1000))\n"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + } + + @Test + public void testParseCursor1() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" + + "where( (ind.name = 'X'\n" + "and rownum <= 1 + 2)\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '00T')\n" + + "and (ind.name_type = 't'))"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + } + + @Test + public void testParseCursor2() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + + "where (ind.string_value in ('a', 'b', 'c', 'd'))\n" + "and rownum <= ( 3 + 1 )\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '00T')\n" + + "and (ind.deleted = '0')\n" + "and (ind.index_num = 1)"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testParseCursor3() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + + "where (ind.number_value > 3)\n" + "and rownum <= 1000\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '001'\n" + + "and (ind.deleted = '0'))\n" + "and (ind.index_num = 2)"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testParseCursor4() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select /*+ index(t iecustom_entity_data_created) */ /*gatherSlowStats*/ count(1) from core.custom_entity_data t\n" + + "where (t.created_date > to_date('01/01/2001'))\n" + "and rownum <= 4500\n" + + "and (t.organization_id = '000000000000000')\n" + "and (t.key_prefix = '001')"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testCountDistinctCursor() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "select count(distinct foo) from core.custom_entity_data t\n" + + "where (t.created_date > to_date('01/01/2001'))\n" + + "and (t.organization_id = '000000000000000')\n" + "and (t.key_prefix = '001')\n" + + "limit 4500"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testIsNullCursor() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "select count(foo) from core.custom_entity_data t\n" + + "where (t.created_date is null)\n" + "and (t.organization_id is not null)\n"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testAsInColumnAlias() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "select count(foo) AS c from core.custom_entity_data t\n" + + "where (t.created_date is null)\n" + "and (t.organization_id is not null)\n"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testParseJoin1() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "select /*SOQL*/ \"Id\"\n" + + "from (select /*+ ordered index(cft) */\n" + "cft.val188 \"Marketing_Offer_Code__c\",\n" + + "t.account_id \"Id\"\n" + "from sales.account_cfdata cft,\n" + "sales.account t\n" + + "where (cft.account_cfdata_id = t.account_id)\n" + + "and (cft.organization_id = '00D300000000XHP')\n" + + "and (t.organization_id = '00D300000000XHP')\n" + "and (t.deleted = '0')\n" + + "and (t.account_id != '000000000000000'))\n" + + "where (\"Marketing_Offer_Code__c\" = 'FSCR')"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testParseJoin2() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "select /*rptacctlist 00O40000002C3of*/ \"00N40000001M8VK\",\n" + + "\"00N40000001M8VK.ID\",\n" + "\"00N30000000r0K2\",\n" + "\"00N30000000jgjo\"\n" + + "from (select /*+ ordered use_hash(aval368) index(cfa) */\n" + + "a.record_type_id \"RECORDTYPE\",\n" + + "aval368.last_name,aval368.first_name || ' ' || aval368.last_name,aval368.name \"00N40000001M8VK\",\n" + + "a.last_update \"LAST_UPDATE\",\n" + "cfa.val368 \"00N40000001M8VK.ID\",\n" + + "TO_DATE(cfa.val282) \"00N30000000r0K2\",\n" + "cfa.val252 \"00N30000000jgjo\"\n" + + "from sales.account a,\n" + "sales.account_cfdata cfa,\n" + "core.name_denorm aval368\n" + + "where (cfa.account_cfdata_id = a.account_id)\n" + "and (aval368.entity_id = cfa.val368)\n" + + "and (a.deleted = '0')\n" + "and (a.organization_id = '00D300000000EaE')\n" + + "and (a.account_id <> '000000000000000')\n" + + "and (cfa.organization_id = '00D300000000EaE')\n" + + "and (aval368.organization_id = '00D300000000EaE')\n" + + "and (aval368.entity_id like '005%'))\n" + "where (\"RECORDTYPE\" = '0123000000002Gv')\n" + + "AND (\"00N40000001M8VK\" is null or \"00N40000001M8VK\" in ('BRIAN IRWIN', 'BRIAN MILLER', 'COLLEEN HORNYAK', 'ERNIE ZAVORAL JR', 'JAMIE TRIMBUR', 'JOE ANTESBERGER', 'MICHAEL HYTLA', 'NATHAN DELSIGNORE', 'SANJAY GANDHI', 'TOM BASHIOUM'))\n" + + "AND (\"LAST_UPDATE\" >= to_date('2009-08-01 07:00:00'))"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testCommentCursor() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select a from b -- here we come\n" + "where ((ind.name = 'X') // to save the day\n" + + "and rownum /* won't run */ <= (1000 + 1000))\n"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testQuoteEscapeCursor() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "select a from b\n" + "where ind.name = 'X''Y'\n"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testSubtractionInSelect() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "select a, 3-1-2, -4- -1-1 from b\n" + "where d = c - 1\n"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testNextValueForSelect() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select next value for foo.bar \n" + "from core.custom_entity_data\n"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testPercentileQuery1() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select PERCENTILE_CONT(0.9) WITHIN GROUP (ORDER BY salary DESC) from core.custom_index_value ind"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testPercentileQuery2() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY mark ASC) from core.custom_index_value ind"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testRowValueConstructorQuery() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = + "select a_integer FROM aTable where (x_integer, y_integer) > (3, 4)"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + + } + + @Test + public void testSingleTopLevelNot() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "select * from t where not c = 5"; + + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); + } + + @Test + public void testHavingWithNot() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "select\n" + "\"WEB_STAT_ALIAS\".\"DOMAIN\" as \"c0\"\n" + + "from \"WEB_STAT\" \"WEB_STAT_ALIAS\"\n" + "group by \"WEB_STAT_ALIAS\".\"DOMAIN\" having\n" + + "(\n" + "(\n" + "NOT\n" + "(\n" + "(sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null)\n" + + ")\n" + "OR NOT((sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null))\n" + ")\n" + + "OR NOT((sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null))\n" + ")\n" + + "order by CASE WHEN \"WEB_STAT_ALIAS\".\"DOMAIN\" IS NULL THEN 1 ELSE 0 END,\n" + + "\"WEB_STAT_ALIAS\".\"DOMAIN\" ASC"; - @Test - public void testQuoteEscapeCursor() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select a from b\n" + - "where ind.name = 'X''Y'\n"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); - @Test - public void testSubtractionInSelect() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select a, 3-1-2, -4- -1-1 from b\n" + - "where d = c - 1\n"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } + } + + @Test + public void testDoubleBackslash() throws Exception { + String expectedNameToken = "testCursor"; + String expectedSelectStatement = "SELECT * FROM T WHERE A LIKE 'a\\(d'"; - @Test - public void testNextValueForSelect() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select next value for foo.bar \n" + - "from core.custom_entity_data\n"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } + String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; + parseCursor(sql); - @Test - public void testPercentileQuery1() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select PERCENTILE_CONT(0.9) WITHIN GROUP (ORDER BY salary DESC) from core.custom_index_value ind"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } + } - @Test - public void testPercentileQuery2() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY mark ASC) from core.custom_index_value ind"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } + @Test + public void testOpenCursor() throws Exception { + String expectedNameToken = "testCursor"; + String sql = "OPEN " + expectedNameToken; + parseOpen(sql); + } - @Test - public void testRowValueConstructorQuery() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select a_integer FROM aTable where (x_integer, y_integer) > (3, 4)"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } - - @Test - public void testSingleTopLevelNot() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select * from t where not c = 5"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - } - - @Test - public void testHavingWithNot() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "select\n" + - "\"WEB_STAT_ALIAS\".\"DOMAIN\" as \"c0\"\n" + - "from \"WEB_STAT\" \"WEB_STAT_ALIAS\"\n" + - "group by \"WEB_STAT_ALIAS\".\"DOMAIN\" having\n" + - "(\n" + - "(\n" + - "NOT\n" + - "(\n" + - "(sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null)\n" + - ")\n" + - "OR NOT((sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null))\n" + - ")\n" + - "OR NOT((sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null))\n" + - ")\n" + - "order by CASE WHEN \"WEB_STAT_ALIAS\".\"DOMAIN\" IS NULL THEN 1 ELSE 0 END,\n" + - "\"WEB_STAT_ALIAS\".\"DOMAIN\" ASC"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } - - @Test - public void testDoubleBackslash() throws Exception { - String expectedNameToken = "testCursor"; - String expectedSelectStatement = "SELECT * FROM T WHERE A LIKE 'a\\(d'"; - - String sql = "DECLARE " + expectedNameToken + " CURSOR FOR " + expectedSelectStatement; - parseCursor(sql); - - } - - @Test - public void testOpenCursor() throws Exception { - String expectedNameToken = "testCursor"; - String sql = "OPEN " + expectedNameToken; - parseOpen(sql); - } - - @Test - public void testFetchNext() throws Exception { - String expectedNameToken = "testCursor"; - String sql = "FETCH NEXT FROM " + expectedNameToken; - parseFetch(sql); - } + @Test + public void testFetchNext() throws Exception { + String expectedNameToken = "testCursor"; + String sql = "FETCH NEXT FROM " + expectedNameToken; + parseFetch(sql); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/PhoenixRowTimestampFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/PhoenixRowTimestampFunctionTest.java index aa9531e345f..254743ef72c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/parse/PhoenixRowTimestampFunctionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/PhoenixRowTimestampFunctionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,47 +17,46 @@ */ package org.apache.phoenix.parse; -import org.junit.Test; - -import java.util.List; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -public class PhoenixRowTimestampFunctionTest { +import java.util.List; - @Test - public void testExpressionWithPhoenixRowTimestamp() throws Exception { - ParseNode parseNode = SQLParser.parseCondition("(PHOENIX_ROW_TIMESTAMP() = PK2)"); - boolean hasPhoenixRowTimestampParseNode = false; - for (ParseNode childNode : parseNode.getChildren()) { - if (childNode.getClass().isAssignableFrom(PhoenixRowTimestampParseNode.class)) { - assertEquals(0, childNode.getChildren().size()); - hasPhoenixRowTimestampParseNode = true; - } - } - assertTrue(hasPhoenixRowTimestampParseNode); - } +import org.junit.Test; - @Test - public void testExpressionWithPhoenixRowTimestampWithParams() throws Exception { - ParseNode parseNode = SQLParser.parseCondition("(PHOENIX_ROW_TIMESTAMP(COL1) = PK2)"); - for (ParseNode childNode : parseNode.getChildren()) { - assertFalse("PhoenixRowTimestampFunction does not take any parameters", - childNode.getClass().isAssignableFrom(PhoenixRowTimestampParseNode.class)); - } - } +public class PhoenixRowTimestampFunctionTest { - @Test - public void testSelectWithPhoenixRowTimestamp() throws Exception { - SQLParser parser = new SQLParser("SELECT PHOENIX_ROW_TIMESTAMP() FROM xyz"); - List nodes = parser.parseQuery().getSelect(); - assertEquals(1, nodes.size()); - assertTrue("PHOENIX_ROW_TIMESTAMP() should parse to PhoenixRowTimestampParseNode", - nodes.get(0).getNode().getClass() - .isAssignableFrom(PhoenixRowTimestampParseNode.class)); - assertEquals(0, nodes.get(0).getNode().getChildren().size()); + @Test + public void testExpressionWithPhoenixRowTimestamp() throws Exception { + ParseNode parseNode = SQLParser.parseCondition("(PHOENIX_ROW_TIMESTAMP() = PK2)"); + boolean hasPhoenixRowTimestampParseNode = false; + for (ParseNode childNode : parseNode.getChildren()) { + if (childNode.getClass().isAssignableFrom(PhoenixRowTimestampParseNode.class)) { + assertEquals(0, childNode.getChildren().size()); + hasPhoenixRowTimestampParseNode = true; + } + } + assertTrue(hasPhoenixRowTimestampParseNode); + } + + @Test + public void testExpressionWithPhoenixRowTimestampWithParams() throws Exception { + ParseNode parseNode = SQLParser.parseCondition("(PHOENIX_ROW_TIMESTAMP(COL1) = PK2)"); + for (ParseNode childNode : parseNode.getChildren()) { + assertFalse("PhoenixRowTimestampFunction does not take any parameters", + childNode.getClass().isAssignableFrom(PhoenixRowTimestampParseNode.class)); } + } + + @Test + public void testSelectWithPhoenixRowTimestamp() throws Exception { + SQLParser parser = new SQLParser("SELECT PHOENIX_ROW_TIMESTAMP() FROM xyz"); + List nodes = parser.parseQuery().getSelect(); + assertEquals(1, nodes.size()); + assertTrue("PHOENIX_ROW_TIMESTAMP() should parse to PhoenixRowTimestampParseNode", + nodes.get(0).getNode().getClass().isAssignableFrom(PhoenixRowTimestampParseNode.class)); + assertEquals(0, nodes.get(0).getNode().getChildren().size()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java index 91c798abf25..08020b27e84 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,7 +19,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -37,1062 +36,932 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.SortOrder; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; - +import org.junit.Test; public class QueryParserTest { - private T parseQuery(String sql, Class type) throws IOException, SQLException { - SQLParser parser = new SQLParser(new StringReader(sql)); - BindableStatement stmt = null; - stmt = parser.parseStatement(); - if (stmt.getOperation() != Operation.QUERY) { - return type != null ? type.cast(stmt) : null; - } - String newSQL = stmt.toString(); - SQLParser newParser = new SQLParser(new StringReader(newSQL)); - BindableStatement newStmt = null; - try { - newStmt = newParser.parseStatement(); - } catch (SQLException e) { - fail("Unable to parse new:\n" + newSQL); - } - assertEquals("Expected equality:\n" + sql + "\n" + newSQL, stmt, newStmt); - return type != null ? type.cast(stmt) : null; - } - - private T parseQuery(String sql) throws IOException, SQLException { - return parseQuery(sql, null); - } - - private void parseQueryThatShouldFail(String sql) throws Exception { - try { - parseQuery(sql); - fail("Query should throw a PhoenixParserException \n " + sql); - } - catch (PhoenixParserException e){ - } - } - - private void parseQueryThatShouldFailWithSQLException(String sql) throws Exception { - try { - parseQuery(sql); - fail("Query should throw a PhoenixParserException \n " + sql); - } - catch (SQLException e){ - } - } - - @Test - public void testParseGrantQuery() throws Exception { - - String sql0 = "GRANT 'RX' ON SYSTEM.\"SEQUENCE\" TO 'user'"; - parseQuery(sql0); - String sql1 = "GRANT 'RWXCA' ON TABLE some_table0 TO 'user0'"; - parseQuery(sql1); - String sql2 = "GRANT 'RWX' ON some_table1 TO 'user1'"; - parseQuery(sql2); - String sql3 = "GRANT 'CA' ON SCHEMA some_schema2 TO 'user2'"; - parseQuery(sql3); - String sql4 = "GRANT 'RXW' ON some_table3 TO GROUP 'group3'"; - parseQuery(sql4); - String sql5 = "GRANT 'RXW' ON \"some_schema5\".\"some_table5\" TO GROUP 'group5'"; - parseQuery(sql5); - String sql6 = "GRANT 'RWA' TO 'user6'"; - parseQuery(sql6); - String sql7 = "GRANT 'A' TO GROUP 'group7'"; - parseQuery(sql7); - String sql8 = "GRANT 'ARXRRRRR' TO GROUP 'group8'"; - parseQueryThatShouldFail(sql8); - } - - @Test - public void testParseRevokeQuery() throws Exception { - - String sql0 = "REVOKE ON SCHEMA SYSTEM FROM 'user0'"; - parseQuery(sql0); - String sql1 = "REVOKE ON SYSTEM.\"SEQUENCE\" FROM 'user1'"; - parseQuery(sql1); - String sql2 = "REVOKE ON TABLE some_table2 FROM GROUP 'group2'"; - parseQuery(sql2); - String sql3 = "REVOKE ON some_table3 FROM GROUP 'group2'"; - parseQuery(sql3); - String sql4 = "REVOKE FROM 'user4'"; - parseQuery(sql4); - String sql5 = "REVOKE FROM GROUP 'group5'"; - parseQuery(sql5); - String sql6 = "REVOKE 'RRWWXAAA' FROM GROUP 'group6'"; - parseQueryThatShouldFail(sql6); - } - - @Test - public void testParsePreQuery0() throws Exception { - String sql = (( - "select a from b\n" + - "where ((ind.name = 'X')" + - "and rownum <= (1000 + 1000))\n" - )); - parseQuery(sql); - } - - @Test - public void testParsePreQuery1() throws Exception { - String sql = (( - "select /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" + - "where( (ind.name = 'X'\n" + - "and rownum <= 1 + 2)\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '00T')\n" + - "and (ind.name_type = 't'))" - )); - parseQuery(sql); - } - - @Test - public void testParsePreQuery2() throws Exception { - String sql = (( - "select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + - "where (ind.string_value in ('a', 'b', 'c', 'd'))\n" + - "and rownum <= ( 3 + 1 )\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '00T')\n" + - "and (ind.deleted = '0')\n" + - "and (ind.index_num = 1)" - )); - parseQuery(sql); - } - - @Test - public void testParsePreQuery3() throws Exception { - String sql = (( - "select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + - "where (ind.number_value > 3)\n" + - "and rownum <= 1000\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '001'\n" + - "and (ind.deleted = '0'))\n" + - "and (ind.index_num = 2)" - )); - parseQuery(sql); - } - - @Test - public void testParsePreQuery4() throws Exception { - String sql = (( - "select /*+ index(t iecustom_entity_data_created) */ /*gatherSlowStats*/ count(1) from core.custom_entity_data t\n" + - "where (t.created_date > to_date('01/01/2001'))\n" + - "and rownum <= 4500\n" + - "and (t.organization_id = '000000000000000')\n" + - "and (t.key_prefix = '001')" - )); - parseQuery(sql); - } - - @Test - public void testCountDistinctQuery() throws Exception { - String sql = (( - "select count(distinct foo) from core.custom_entity_data t\n" - + "where (t.created_date > to_date('01/01/2001'))\n" - + "and (t.organization_id = '000000000000000')\n" - + "and (t.key_prefix = '001')\n" + "limit 4500")); - parseQuery(sql); - } - - @Test - public void testIsNullQuery() throws Exception { - String sql = (( - "select count(foo) from core.custom_entity_data t\n" + - "where (t.created_date is null)\n" + - "and (t.organization_id is not null)\n" - )); - parseQuery(sql); - } - - @Test - public void testAsInColumnAlias() throws Exception { - String sql = (( - "select count(foo) AS c from core.custom_entity_data t\n" + - "where (t.created_date is null)\n" + - "and (t.organization_id is not null)\n" - )); - parseQuery(sql); - } - - @Test - public void testParseJoin1() throws Exception { - String sql = (( - "select /*SOQL*/ \"Id\"\n" + - "from (select /*+ ordered index(cft) */\n" + - "cft.val188 \"Marketing_Offer_Code__c\",\n" + - "t.account_id \"Id\"\n" + - "from sales.account_cfdata cft,\n" + - "sales.account t\n" + - "where (cft.account_cfdata_id = t.account_id)\n" + - "and (cft.organization_id = '00D300000000XHP')\n" + - "and (t.organization_id = '00D300000000XHP')\n" + - "and (t.deleted = '0')\n" + - "and (t.account_id != '000000000000000'))\n" + - "where (\"Marketing_Offer_Code__c\" = 'FSCR')" - )); - parseQuery(sql); - } - - @Test - public void testParseJoin2() throws Exception { - String sql = (( - "select /*rptacctlist 00O40000002C3of*/ \"00N40000001M8VK\",\n" + - "\"00N40000001M8VK.ID\",\n" + - "\"00N30000000r0K2\",\n" + - "\"00N30000000jgjo\"\n" + - "from (select /*+ ordered use_hash(aval368) index(cfa) */\n" + - "a.record_type_id \"RECORDTYPE\",\n" + - "aval368.last_name,aval368.first_name || ' ' || aval368.last_name,aval368.name \"00N40000001M8VK\",\n" + - "a.last_update \"LAST_UPDATE\",\n" + - "cfa.val368 \"00N40000001M8VK.ID\",\n" + - "TO_DATE(cfa.val282) \"00N30000000r0K2\",\n" + - "cfa.val252 \"00N30000000jgjo\"\n" + - "from sales.account a,\n" + - "sales.account_cfdata cfa,\n" + - "core.name_denorm aval368\n" + - "where (cfa.account_cfdata_id = a.account_id)\n" + - "and (aval368.entity_id = cfa.val368)\n" + - "and (a.deleted = '0')\n" + - "and (a.organization_id = '00D300000000EaE')\n" + - "and (a.account_id <> '000000000000000')\n" + - "and (cfa.organization_id = '00D300000000EaE')\n" + - "and (aval368.organization_id = '00D300000000EaE')\n" + - "and (aval368.entity_id like '005%'))\n" + - "where (\"RECORDTYPE\" = '0123000000002Gv')\n" + - "AND (\"00N40000001M8VK\" is null or \"00N40000001M8VK\" in ('BRIAN IRWIN', 'BRIAN MILLER', 'COLLEEN HORNYAK', 'ERNIE ZAVORAL JR', 'JAMIE TRIMBUR', 'JOE ANTESBERGER', 'MICHAEL HYTLA', 'NATHAN DELSIGNORE', 'SANJAY GANDHI', 'TOM BASHIOUM'))\n" + - "AND (\"LAST_UPDATE\" >= to_date('2009-08-01 07:00:00'))" - )); - parseQuery(sql); - } - - @Test - public void testNegative1() throws Exception { - String sql = (( - "select /*gatherSlowStats*/ count(1) core.search_name_lookup ind\n" + - "where (ind.name = 'X')\n" + - "and rownum <= 2000\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '00T')\n" + - "and (ind.name_type = 't')" - )); - try { - parseQuery(sql); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.MISSING_TOKEN.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testNegative2() throws Exception { - String sql = (( - "seelect /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" + - "where (ind.name = 'X')\n" + - "and rownum <= 2000\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '00T')\n" + - "and (ind.name_type = 't')" - )); - try { - parseQuery(sql); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \"seelect\" at line 1, column 1.")); - } - } - - @Test - public void testNegative3() throws Exception { - String sql = (( - "select /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" + - "where (ind.name = 'X')\n" + - "and rownum <= 2000\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '00T')\n" + - "and (ind.name_type = 't'))" - )); - try { - parseQuery(sql); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 603 (42P00): Syntax error. Unexpected input. Expecting \"EOF\", got \")\" at line 6, column 26.")); - } - } - - @Test - public void testNegativeCountDistinct() throws Exception { - String sql = (( - "select /*gatherSlowStats*/ max( distinct 1) from core.search_name_lookup ind\n" + - "where (ind.name = 'X')\n" + - "and rownum <= 2000\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '00T')\n" + - "and (ind.name_type = 't')" - )); - try { - parseQuery(sql); - fail(); - } catch (SQLFeatureNotSupportedException e) { - // expected - } - } - - @Test - public void testNegativeCountStar() throws Exception { - String sql = (( - "select /*gatherSlowStats*/ max(*) from core.search_name_lookup ind\n" + - "where (ind.name = 'X')\n" + - "and rownum <= 2000\n" + - "and (ind.organization_id = '000000000000000')\n" + - "and (ind.key_prefix = '00T')\n" + - "and (ind.name_type = 't')" - )); - try { - parseQuery(sql); - fail(); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \"*\" at line 1, column 32.")); - } - } - - @Test - public void testNegativeNonBooleanWhere() throws Exception { - String sql = (( - "select /*gatherSlowStats*/ max( distinct 1) from core.search_name_lookup ind\n" + - "where 1" - )); - try { - parseQuery(sql); - fail(); - } catch (SQLFeatureNotSupportedException e) { - // expected - } - } - - @Test - public void testCommentQuery() throws Exception { - String sql = (( - "select a from b -- here we come\n" + - "where ((ind.name = 'X') // to save the day\n" + - "and rownum /* won't run */ <= (1000 + 1000))\n" - )); - parseQuery(sql); - } - - @Test - public void testQuoteEscapeQuery() throws Exception { - String sql = (( - "select a from b\n" + - "where ind.name = 'X''Y'\n" - )); - parseQuery(sql); - } - - @Test - public void testSubtractionInSelect() throws Exception { - String sql = (( - "select a, 3-1-2, -4- -1-1 from b\n" + - "where d = c - 1\n" - )); - parseQuery(sql); - } - - @Test - public void testParsingStatementWithMispellToken() throws Exception { - try { - String sql = (( - "selects a from b\n" + - "where e = d\n")); - parseQuery(sql); - fail("Should have caught exception."); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \"selects\" at line 1, column 1.")); - } - try { - String sql = (( - "select a froms b\n" + - "where e = d\n")); - parseQuery(sql); - fail("Should have caught exception."); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 602 (42P00): Syntax error. Missing \"EOF\" at line 1, column 16.")); - } - } - - @Test - public void testParsingStatementWithExtraToken() throws Exception { - try { - String sql = (( - "select a,, from b\n" + - "where e = d\n")); - parseQuery(sql); - fail("Should have caught exception."); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \",\" at line 1, column 10.")); - } - try { - String sql = (( - "select a from from b\n" + - "where e = d\n")); - parseQuery(sql); - fail("Should have caught exception."); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 601 (42P00): Syntax error. Encountered \"from\" at line 1, column 15.")); - } - } - - @Test - public void testParseCreateTableInlinePrimaryKeyWithOrder() throws Exception { - for (String order : new String[]{"asc", "desc"}) { - String s = "create table core.entity_history_archive (id char(15) primary key ${o})".replace("${o}", order); - CreateTableStatement stmt = (CreateTableStatement)new SQLParser((s)).parseStatement(); - List columnDefs = stmt.getColumnDefs(); - assertEquals(1, columnDefs.size()); - assertEquals(SortOrder.fromDDLValue(order), columnDefs.iterator().next().getSortOrder()); - } - } - - @Test - public void testParseCreateTableOrderWithoutPrimaryKeyFails() throws Exception { - for (String order : new String[]{"asc", "desc"}) { - String stmt = "create table core.entity_history_archive (id varchar(20) ${o})".replace("${o}", order); - try { - new SQLParser((stmt)).parseStatement(); - fail("Expected parse exception to be thrown"); - } catch (SQLException e) { - String errorMsg = "ERROR 603 (42P00): Syntax error. Unexpected input. Expecting \"RPAREN\", got \"${o}\"".replace("${o}", order); - assertTrue("Expected message to contain \"" + errorMsg + "\" but got \"" + e.getMessage() + "\"", e.getMessage().contains(errorMsg)); - } - } - } - - @Test - public void testParseCreateTablePrimaryKeyConstraintWithOrder() throws Exception { - for (String order : new String[]{"asc", "desc"}) { - String s = "create table core.entity_history_archive (id CHAR(15), name VARCHAR(150), constraint pk primary key (id ${o}, name ${o}))".replace("${o}", order); - CreateTableStatement stmt = (CreateTableStatement)new SQLParser((s)).parseStatement(); - PrimaryKeyConstraint pkConstraint = stmt.getPrimaryKeyConstraint(); - List> columns = pkConstraint.getColumnNames(); - assertEquals(2, columns.size()); - for (Pair pair : columns) { - assertEquals(SortOrder.fromDDLValue(order), pkConstraint.getColumnWithSortOrder(pair.getFirst()).getSecond()); - } - } - } - - @Test - public void testParseCreateTableCommaBeforePrimaryKeyConstraint() throws Exception { - for (String leadingComma : new String[]{",", ""}) { - String s = "create table core.entity_history_archive (id CHAR(15), name VARCHAR(150)${o} constraint pk primary key (id))".replace("${o}", leadingComma); - - CreateTableStatement stmt = (CreateTableStatement)new SQLParser((s)).parseStatement(); - - assertEquals(2, stmt.getColumnDefs().size()); - assertNotNull(stmt.getPrimaryKeyConstraint()); - } - } - - private CreateCDCStatement parseCreateCDCSimple(String sql, boolean ifNotExists) - throws Exception { - CreateCDCStatement stmt = parseQuery(sql, CreateCDCStatement.class); - assertEquals("FOO", stmt.getCdcObjName().getName()); - assertEquals("BAR", stmt.getDataTable().getTableName()); - assertEquals(ifNotExists, stmt.isIfNotExists()); - return stmt; - } - - @Test - public void testCreateCDCSimple() throws Exception { - parseCreateCDCSimple("create cdc foo on bar", false); - parseCreateCDCSimple("create cdc foo on s.bar", false); - parseCreateCDCSimple("create cdc if not exists foo on bar", true); - parseCreateCDCSimple("create cdc foo on bar", false); - CreateCDCStatement stmt = null; - stmt = parseCreateCDCSimple("create cdc foo on bar TTL=100", false); - assertEquals(Arrays.asList(new Pair("TTL", 100)), - stmt.getProps().get("")); - stmt = parseCreateCDCSimple("create cdc foo on bar include (pre)", false); - assertEquals(new HashSet<>(Arrays.asList(PTable.CDCChangeScope.PRE)), - stmt.getIncludeScopes()); - stmt = parseCreateCDCSimple("create cdc foo on bar include (pre, post, change)", false); - assertEquals(new HashSet<>(Arrays.asList(PTable.CDCChangeScope.PRE, - PTable.CDCChangeScope.POST, PTable.CDCChangeScope.CHANGE)), - stmt.getIncludeScopes()); - stmt = parseCreateCDCSimple("create cdc foo on bar include (pre, pre, post)", - false); - assertEquals(new HashSet<>(Arrays.asList(PTable.CDCChangeScope.PRE, - PTable.CDCChangeScope.POST)), stmt.getIncludeScopes()); - stmt = parseCreateCDCSimple("create cdc if not exists foo on bar abc=def", - true); - assertEquals(Arrays.asList(new Pair("ABC", "def")), stmt.getProps().get("")); - stmt = parseCreateCDCSimple("create cdc if not exists foo on bar abc=def, prop=val", - true); - assertEquals(Arrays.asList(new Pair("ABC", "def"), new Pair("PROP", "val")), - stmt.getProps().get("")); - } - - @Test - public void testCreateCDCWithErrors() throws Exception { - parseQueryThatShouldFail("create cdc foo"); - parseQueryThatShouldFail("create cdc foo on bar include (abc)"); - } - - private void parseInvalidCreateCDC(String sql, int expRrrorCode) throws IOException { - try { - parseQuery(sql); - fail(); - } - catch (SQLException e) { - assertEquals(expRrrorCode, e.getErrorCode()); - } - } - - @Test - public void testInvalidCreateCDC() throws Exception { - parseInvalidCreateCDC("create cdc foo bar", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); - parseInvalidCreateCDC("create cdc foo bar ts", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); - parseInvalidCreateCDC("create cdc foo bar(ts)", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); - parseInvalidCreateCDC("create cdc s.foo on bar(ts)", SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode()); - parseInvalidCreateCDC("create cdc foo bar(ts1, ts2)", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); - } - - @Test - public void testInvalidTrailingCommaOnCreateTable() throws Exception { - String sql = ( - ( - "create table foo (c1 varchar primary key, c2 varchar,)")); - try { - parseQuery(sql); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testCreateSequence() throws Exception { - String sql = (( - "create sequence foo.bar\n" + - "start with 0\n" + - "increment by 1\n")); - parseQuery(sql); - } - - private DropCDCStatement parseDropCDCSimple(String sql, boolean ifNotExists) throws Exception { - DropCDCStatement stmt = parseQuery(sql, DropCDCStatement.class); - assertEquals("FOO", stmt.getCdcObjName().getName()); - assertEquals("BAR", stmt.getTableName().getTableName()); - assertEquals(ifNotExists, stmt.ifExists()); - return stmt; - } - @Test - public void testDropCDCSimple() throws Exception { - DropCDCStatement stmt = null; - parseDropCDCSimple("drop cdc foo on bar", false); - parseDropCDCSimple("drop cdc if exists foo on bar", true); - parseDropCDCSimple("drop cdc if exists foo on s.bar", true); - } - - private void parseInvalidDropCDC(String sql, int expRrrorCode) throws IOException { - try { - parseQuery(sql); - fail(); - } - catch (SQLException e) { - assertEquals(expRrrorCode, e.getErrorCode()); - } - } - - @Test - public void testInvalidDropCDC() throws Exception { - parseInvalidDropCDC("drop cdc foo bar", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); - parseInvalidDropCDC("drop cdc s.foo on bar", SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode()); - parseInvalidDropCDC("drop cdc foo on bar(ts)", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); - } - - private void parseInvalidAlterCDC(String sql, int expRrrorCode) throws IOException { - try { - parseQuery(sql); - fail(); - } - catch (SQLException e) { - assertEquals(expRrrorCode, e.getErrorCode()); - } - } - - @Test - public void testNextValueForSelect() throws Exception { - String sql = (( - "select next value for foo.bar \n" + - "from core.custom_entity_data\n")); - parseQuery(sql); - } - - @Test - public void testNextValueForWhere() throws Exception { - String sql = (( - "upsert into core.custom_entity_data\n" + - "select next value for foo.bar from core.custom_entity_data\n")); - parseQuery(sql); - } - - @Test - public void testBadCharDef() throws Exception { - try { - String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + - " (pk VARCHAR NOT NULL PRIMARY KEY, col CHAR(0))"); - parseQuery(sql); - fail("Should have caught bad char definition."); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.NONPOSITIVE_MAX_LENGTH.getErrorCode(), e.getErrorCode()); - } - try { - String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + - " (pk VARCHAR NOT NULL PRIMARY KEY, col CHAR)"); - parseQuery(sql); - fail("Should have caught bad char definition."); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.MISSING_MAX_LENGTH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testBadVarcharDef() throws Exception { - try { - String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + - " (pk VARCHAR NOT NULL PRIMARY KEY, col VARCHAR(0))"); - parseQuery(sql); - fail("Should have caught bad varchar definition."); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.NONPOSITIVE_MAX_LENGTH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testBadDecimalDef() throws Exception { - try { - String sql = ("CREATE TABLE IF NOT EXISTS testBadDecimalDef" + - " (pk VARCHAR NOT NULL PRIMARY KEY, col DECIMAL(0, 5))"); - parseQuery(sql); - fail("Should have caught bad decimal definition."); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 209 (22003): Decimal precision outside of range. Should be within 1 and 38. columnName=COL")); - } - try { - String sql = ("CREATE TABLE IF NOT EXISTS testBadDecimalDef" + - " (pk VARCHAR NOT NULL PRIMARY KEY, col DECIMAL(40, 5))"); - parseQuery(sql); - fail("Should have caught bad decimal definition."); - } catch (SQLException e) { - assertTrue(e.getMessage(), e.getMessage().contains("ERROR 209 (22003): Decimal precision outside of range. Should be within 1 and 38. columnName=COL")); - } - } - - @Test - public void testBadBinaryDef() throws Exception { - try { - String sql = ("CREATE TABLE IF NOT EXISTS testBadBinaryDef" + - " (pk VARCHAR NOT NULL PRIMARY KEY, col BINARY(0))"); - parseQuery(sql); - fail("Should have caught bad binary definition."); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.NONPOSITIVE_MAX_LENGTH.getErrorCode(), e.getErrorCode()); - } - try { - String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + - " (pk VARCHAR NOT NULL PRIMARY KEY, col BINARY)"); - parseQuery(sql); - fail("Should have caught bad char definition."); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.MISSING_MAX_LENGTH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testPercentileQuery1() throws Exception { - String sql = ( - ( - "select PERCENTILE_CONT(0.9) WITHIN GROUP (ORDER BY salary DESC) from core.custom_index_value ind")); - parseQuery(sql); - } - - @Test - public void testPercentileQuery2() throws Exception { - String sql = ( - ( - "select PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY mark ASC) from core.custom_index_value ind")); - parseQuery(sql); - } - - @Test - public void testRowValueConstructorQuery() throws Exception { - String sql = ( - ( - "select a_integer FROM aTable where (x_integer, y_integer) > (3, 4)")); - parseQuery(sql); - } - - @Test - public void testSingleTopLevelNot() throws Exception { - String sql = ( - ( - "select * from t where not c = 5")); - parseQuery(sql); - } - - @Test - public void testTopLevelNot() throws Exception { - String sql = ( - ( - "select * from t where not c")); - parseQuery(sql); - } - - @Test - public void testRVCInList() throws Exception { - String sql = ( - ( - "select * from t where k in ( (1,2), (3,4) )")); - parseQuery(sql); - } - - @Test - public void testInList() throws Exception { - String sql = ( - ( - "select * from t where k in ( 1,2 )")); - parseQuery(sql); - } - - @Test - public void testInvalidSelectStar() throws Exception { - String sql = ( - ( - "select *,k from t where k in ( 1,2 )")); - try { - parseQuery(sql); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.MISSING_TOKEN.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testTableNameStartsWithUnderscore() throws Exception { - String sql = ( - ( - "select* from _t where k in ( 1,2 )")); - try { - parseQuery(sql); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.PARSER_ERROR.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testValidUpsertSelectHint() throws Exception { - String sql = ( - ( - "upsert /*+ NO_INDEX */ into t select k from t where k in ( 1,2 )")); - parseQuery(sql); - } - - @Test - public void testHavingWithNot() throws Exception { - String sql = ( - ( - "select\n" + - "\"WEB_STAT_ALIAS\".\"DOMAIN\" as \"c0\"\n" + - "from \"WEB_STAT\" \"WEB_STAT_ALIAS\"\n" + - "group by \"WEB_STAT_ALIAS\".\"DOMAIN\" having\n" + - "(\n" + - "(\n" + - "NOT\n" + - "(\n" + - "(sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null)\n" + - ")\n" + - "OR NOT((sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null))\n" + - ")\n" + - "OR NOT((sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null))\n" + - ")\n" + - "order by CASE WHEN \"WEB_STAT_ALIAS\".\"DOMAIN\" IS NULL THEN 1 ELSE 0 END,\n" + - "\"WEB_STAT_ALIAS\".\"DOMAIN\" ASC")); - parseQuery(sql); - } - - @Test - public void testToDateInList() throws Exception { - String sql = ( - ("select * from date_test where d in (to_date('2013-11-04 09:12:00'))")); - parseQuery(sql); - } - - @Test - public void testDateLiteral() throws Exception { - String sql = ( - ( - "select * from t where d = DATE '2013-11-04 09:12:00'")); - parseQuery(sql); - } - - @Test - public void testTimeLiteral() throws Exception { - String sql = ( - ( - "select * from t where d = TIME '2013-11-04 09:12:00'")); - parseQuery(sql); - } - - - @Test - public void testTimestampLiteral() throws Exception { - String sql = ( - ( - "select * from t where d = TIMESTAMP '2013-11-04 09:12:00'")); - parseQuery(sql); - } - - @Test - public void testUnsignedDateLiteral() throws Exception { - String sql = ( - ( - "select * from t where d = UNSIGNED_DATE '2013-11-04 09:12:00'")); - parseQuery(sql); - } - - @Test - public void testUnsignedTimeLiteral() throws Exception { - String sql = ( - ( - "select * from t where d = UNSIGNED_TIME '2013-11-04 09:12:00'")); - parseQuery(sql); - } - - - @Test - public void testUnsignedTimestampLiteral() throws Exception { - String sql = ( - ( - "select * from t where d = UNSIGNED_TIMESTAMP '2013-11-04 09:12:00'")); - parseQuery(sql); - } - - @Test - public void testParseDateEquality() throws Exception { - SQLParser parser = new SQLParser(new StringReader( - "select a from b\n" + - "where date '2014-01-04' = date '2014-01-04'" - )); - parser.parseStatement(); - } - - @Test - public void testParseDateIn() throws Exception { - SQLParser parser = new SQLParser(new StringReader( - "select a from b\n" + - "where date '2014-01-04' in (date '2014-01-04')" - )); - parser.parseStatement(); - } - - @Test - public void testUnknownLiteral() throws Exception { - String sql = ( - ( - "select * from t where d = FOO '2013-11-04 09:12:00'")); - try { - parseQuery(sql); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testUnsupportedLiteral() throws Exception { - String sql = ( - ( - "select * from t where d = DECIMAL '2013-11-04 09:12:00'")); - try { - parseQuery(sql); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); - } - } - - @Test - public void testAnyElementExpression1() throws Exception { - String sql = "select * from t where 'a' = ANY(a)"; - parseQuery(sql); - } - - @Test - public void testAnyElementExpression2() throws Exception { - String sql = "select * from t where 'a' <= ANY(a-b+1)"; - parseQuery(sql); - } - - @Test - public void testAllElementExpression() throws Exception { - String sql = "select * from t where 'a' <= ALL(a-b+1)"; - parseQuery(sql); - } - - @Test - public void testDoubleBackslash() throws Exception { - String sql = "SELECT * FROM T WHERE A LIKE 'a\\(d'"; - parseQuery(sql); + private T parseQuery(String sql, Class type) + throws IOException, SQLException { + SQLParser parser = new SQLParser(new StringReader(sql)); + BindableStatement stmt = null; + stmt = parser.parseStatement(); + if (stmt.getOperation() != Operation.QUERY) { + return type != null ? type.cast(stmt) : null; + } + String newSQL = stmt.toString(); + SQLParser newParser = new SQLParser(new StringReader(newSQL)); + BindableStatement newStmt = null; + try { + newStmt = newParser.parseStatement(); + } catch (SQLException e) { + fail("Unable to parse new:\n" + newSQL); + } + assertEquals("Expected equality:\n" + sql + "\n" + newSQL, stmt, newStmt); + return type != null ? type.cast(stmt) : null; + } + + private T parseQuery(String sql) throws IOException, SQLException { + return parseQuery(sql, null); + } + + private void parseQueryThatShouldFail(String sql) throws Exception { + try { + parseQuery(sql); + fail("Query should throw a PhoenixParserException \n " + sql); + } catch (PhoenixParserException e) { } + } - @Test - public void testUnicodeSpace() throws Exception { - // U+2002 (8194) is a "EN Space" which looks just like a normal space (0x20 in ascii) - String unicodeEnSpace = String.valueOf(Character.toChars(8194)); - String sql = Joiner.on(unicodeEnSpace).join(new String[] {"SELECT", "*", "FROM", "T"}); - parseQuery(sql); + private void parseQueryThatShouldFailWithSQLException(String sql) throws Exception { + try { + parseQuery(sql); + fail("Query should throw a PhoenixParserException \n " + sql); + } catch (SQLException e) { + } + } + + @Test + public void testParseGrantQuery() throws Exception { + + String sql0 = "GRANT 'RX' ON SYSTEM.\"SEQUENCE\" TO 'user'"; + parseQuery(sql0); + String sql1 = "GRANT 'RWXCA' ON TABLE some_table0 TO 'user0'"; + parseQuery(sql1); + String sql2 = "GRANT 'RWX' ON some_table1 TO 'user1'"; + parseQuery(sql2); + String sql3 = "GRANT 'CA' ON SCHEMA some_schema2 TO 'user2'"; + parseQuery(sql3); + String sql4 = "GRANT 'RXW' ON some_table3 TO GROUP 'group3'"; + parseQuery(sql4); + String sql5 = "GRANT 'RXW' ON \"some_schema5\".\"some_table5\" TO GROUP 'group5'"; + parseQuery(sql5); + String sql6 = "GRANT 'RWA' TO 'user6'"; + parseQuery(sql6); + String sql7 = "GRANT 'A' TO GROUP 'group7'"; + parseQuery(sql7); + String sql8 = "GRANT 'ARXRRRRR' TO GROUP 'group8'"; + parseQueryThatShouldFail(sql8); + } + + @Test + public void testParseRevokeQuery() throws Exception { + + String sql0 = "REVOKE ON SCHEMA SYSTEM FROM 'user0'"; + parseQuery(sql0); + String sql1 = "REVOKE ON SYSTEM.\"SEQUENCE\" FROM 'user1'"; + parseQuery(sql1); + String sql2 = "REVOKE ON TABLE some_table2 FROM GROUP 'group2'"; + parseQuery(sql2); + String sql3 = "REVOKE ON some_table3 FROM GROUP 'group2'"; + parseQuery(sql3); + String sql4 = "REVOKE FROM 'user4'"; + parseQuery(sql4); + String sql5 = "REVOKE FROM GROUP 'group5'"; + parseQuery(sql5); + String sql6 = "REVOKE 'RRWWXAAA' FROM GROUP 'group6'"; + parseQueryThatShouldFail(sql6); + } + + @Test + public void testParsePreQuery0() throws Exception { + String sql = + (("select a from b\n" + "where ((ind.name = 'X')" + "and rownum <= (1000 + 1000))\n")); + parseQuery(sql); + } + + @Test + public void testParsePreQuery1() throws Exception { + String sql = (("select /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" + + "where( (ind.name = 'X'\n" + "and rownum <= 1 + 2)\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '00T')\n" + + "and (ind.name_type = 't'))")); + parseQuery(sql); + } + + @Test + public void testParsePreQuery2() throws Exception { + String sql = (("select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + + "where (ind.string_value in ('a', 'b', 'c', 'd'))\n" + "and rownum <= ( 3 + 1 )\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '00T')\n" + + "and (ind.deleted = '0')\n" + "and (ind.index_num = 1)")); + parseQuery(sql); + } + + @Test + public void testParsePreQuery3() throws Exception { + String sql = (("select /*gatherSlowStats*/ count(1) from core.custom_index_value ind\n" + + "where (ind.number_value > 3)\n" + "and rownum <= 1000\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '001'\n" + + "and (ind.deleted = '0'))\n" + "and (ind.index_num = 2)")); + parseQuery(sql); + } + + @Test + public void testParsePreQuery4() throws Exception { + String sql = + (("select /*+ index(t iecustom_entity_data_created) */ /*gatherSlowStats*/ count(1) from core.custom_entity_data t\n" + + "where (t.created_date > to_date('01/01/2001'))\n" + "and rownum <= 4500\n" + + "and (t.organization_id = '000000000000000')\n" + "and (t.key_prefix = '001')")); + parseQuery(sql); + } + + @Test + public void testCountDistinctQuery() throws Exception { + String sql = (("select count(distinct foo) from core.custom_entity_data t\n" + + "where (t.created_date > to_date('01/01/2001'))\n" + + "and (t.organization_id = '000000000000000')\n" + "and (t.key_prefix = '001')\n" + + "limit 4500")); + parseQuery(sql); + } + + @Test + public void testIsNullQuery() throws Exception { + String sql = (("select count(foo) from core.custom_entity_data t\n" + + "where (t.created_date is null)\n" + "and (t.organization_id is not null)\n")); + parseQuery(sql); + } + + @Test + public void testAsInColumnAlias() throws Exception { + String sql = (("select count(foo) AS c from core.custom_entity_data t\n" + + "where (t.created_date is null)\n" + "and (t.organization_id is not null)\n")); + parseQuery(sql); + } + + @Test + public void testParseJoin1() throws Exception { + String sql = (("select /*SOQL*/ \"Id\"\n" + "from (select /*+ ordered index(cft) */\n" + + "cft.val188 \"Marketing_Offer_Code__c\",\n" + "t.account_id \"Id\"\n" + + "from sales.account_cfdata cft,\n" + "sales.account t\n" + + "where (cft.account_cfdata_id = t.account_id)\n" + + "and (cft.organization_id = '00D300000000XHP')\n" + + "and (t.organization_id = '00D300000000XHP')\n" + "and (t.deleted = '0')\n" + + "and (t.account_id != '000000000000000'))\n" + + "where (\"Marketing_Offer_Code__c\" = 'FSCR')")); + parseQuery(sql); + } + + @Test + public void testParseJoin2() throws Exception { + String sql = (("select /*rptacctlist 00O40000002C3of*/ \"00N40000001M8VK\",\n" + + "\"00N40000001M8VK.ID\",\n" + "\"00N30000000r0K2\",\n" + "\"00N30000000jgjo\"\n" + + "from (select /*+ ordered use_hash(aval368) index(cfa) */\n" + + "a.record_type_id \"RECORDTYPE\",\n" + + "aval368.last_name,aval368.first_name || ' ' || aval368.last_name,aval368.name \"00N40000001M8VK\",\n" + + "a.last_update \"LAST_UPDATE\",\n" + "cfa.val368 \"00N40000001M8VK.ID\",\n" + + "TO_DATE(cfa.val282) \"00N30000000r0K2\",\n" + "cfa.val252 \"00N30000000jgjo\"\n" + + "from sales.account a,\n" + "sales.account_cfdata cfa,\n" + "core.name_denorm aval368\n" + + "where (cfa.account_cfdata_id = a.account_id)\n" + "and (aval368.entity_id = cfa.val368)\n" + + "and (a.deleted = '0')\n" + "and (a.organization_id = '00D300000000EaE')\n" + + "and (a.account_id <> '000000000000000')\n" + + "and (cfa.organization_id = '00D300000000EaE')\n" + + "and (aval368.organization_id = '00D300000000EaE')\n" + + "and (aval368.entity_id like '005%'))\n" + "where (\"RECORDTYPE\" = '0123000000002Gv')\n" + + "AND (\"00N40000001M8VK\" is null or \"00N40000001M8VK\" in ('BRIAN IRWIN', 'BRIAN MILLER', 'COLLEEN HORNYAK', 'ERNIE ZAVORAL JR', 'JAMIE TRIMBUR', 'JOE ANTESBERGER', 'MICHAEL HYTLA', 'NATHAN DELSIGNORE', 'SANJAY GANDHI', 'TOM BASHIOUM'))\n" + + "AND (\"LAST_UPDATE\" >= to_date('2009-08-01 07:00:00'))")); + parseQuery(sql); + } + + @Test + public void testNegative1() throws Exception { + String sql = (("select /*gatherSlowStats*/ count(1) core.search_name_lookup ind\n" + + "where (ind.name = 'X')\n" + "and rownum <= 2000\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '00T')\n" + + "and (ind.name_type = 't')")); + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.MISSING_TOKEN.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testNegative2() throws Exception { + String sql = (("seelect /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" + + "where (ind.name = 'X')\n" + "and rownum <= 2000\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '00T')\n" + + "and (ind.name_type = 't')")); + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 601 (42P00): Syntax error. Encountered \"seelect\" at line 1, column 1.")); + } + } + + @Test + public void testNegative3() throws Exception { + String sql = (("select /*gatherSlowStats*/ count(1) from core.search_name_lookup ind\n" + + "where (ind.name = 'X')\n" + "and rownum <= 2000\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '00T')\n" + + "and (ind.name_type = 't'))")); + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains( + "ERROR 603 (42P00): Syntax error. Unexpected input. Expecting \"EOF\", got \")\" at line 6, column 26.")); + } + } + + @Test + public void testNegativeCountDistinct() throws Exception { + String sql = (("select /*gatherSlowStats*/ max( distinct 1) from core.search_name_lookup ind\n" + + "where (ind.name = 'X')\n" + "and rownum <= 2000\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '00T')\n" + + "and (ind.name_type = 't')")); + try { + parseQuery(sql); + fail(); + } catch (SQLFeatureNotSupportedException e) { + // expected + } + } + + @Test + public void testNegativeCountStar() throws Exception { + String sql = (("select /*gatherSlowStats*/ max(*) from core.search_name_lookup ind\n" + + "where (ind.name = 'X')\n" + "and rownum <= 2000\n" + + "and (ind.organization_id = '000000000000000')\n" + "and (ind.key_prefix = '00T')\n" + + "and (ind.name_type = 't')")); + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 601 (42P00): Syntax error. Encountered \"*\" at line 1, column 32.")); + } + } + + @Test + public void testNegativeNonBooleanWhere() throws Exception { + String sql = (("select /*gatherSlowStats*/ max( distinct 1) from core.search_name_lookup ind\n" + + "where 1")); + try { + parseQuery(sql); + fail(); + } catch (SQLFeatureNotSupportedException e) { + // expected + } + } + + @Test + public void testCommentQuery() throws Exception { + String sql = + (("select a from b -- here we come\n" + "where ((ind.name = 'X') // to save the day\n" + + "and rownum /* won't run */ <= (1000 + 1000))\n")); + parseQuery(sql); + } + + @Test + public void testQuoteEscapeQuery() throws Exception { + String sql = (("select a from b\n" + "where ind.name = 'X''Y'\n")); + parseQuery(sql); + } + + @Test + public void testSubtractionInSelect() throws Exception { + String sql = (("select a, 3-1-2, -4- -1-1 from b\n" + "where d = c - 1\n")); + parseQuery(sql); + } + + @Test + public void testParsingStatementWithMispellToken() throws Exception { + try { + String sql = (("selects a from b\n" + "where e = d\n")); + parseQuery(sql); + fail("Should have caught exception."); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 601 (42P00): Syntax error. Encountered \"selects\" at line 1, column 1.")); } - - @Test - public void testInvalidTableOrSchemaName() throws Exception { - // namespace separator (:) cannot be used - parseQueryThatShouldFail("create table a:b (id varchar not null primary key)"); - parseQueryThatShouldFail("create table \"a:b\" (id varchar not null primary key)"); - // name separator (.) cannot be used without double quotes - parseQueryThatShouldFail("create table a.b.c.d (id varchar not null primary key)"); - parseQuery("create table \"a.b\".\"c.d\" (id varchar not null primary key)"); - parseQuery("create table \"a.b.c.d\" (id varchar not null primary key)"); + try { + String sql = (("select a froms b\n" + "where e = d\n")); + parseQuery(sql); + fail("Should have caught exception."); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 602 (42P00): Syntax error. Missing \"EOF\" at line 1, column 16.")); } + } - @Test - public void testIntegerInOffsetSelect() throws Exception { - String sql = "SELECT * FROM T OFFSET 1"; - parseQuery(sql); + @Test + public void testParsingStatementWithExtraToken() throws Exception { + try { + String sql = (("select a,, from b\n" + "where e = d\n")); + parseQuery(sql); + fail("Should have caught exception."); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 601 (42P00): Syntax error. Encountered \",\" at line 1, column 10.")); } - - @Test - public void testRVCInOffsetSelect() throws Exception { - String sql = "SELECT * FROM T OFFSET (A,B,C)=('a','b','c')"; - parseQuery(sql); + try { + String sql = (("select a from from b\n" + "where e = d\n")); + parseQuery(sql); + fail("Should have caught exception."); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage() + .contains("ERROR 601 (42P00): Syntax error. Encountered \"from\" at line 1, column 15.")); + } + } + + @Test + public void testParseCreateTableInlinePrimaryKeyWithOrder() throws Exception { + for (String order : new String[] { "asc", "desc" }) { + String s = "create table core.entity_history_archive (id char(15) primary key ${o})" + .replace("${o}", order); + CreateTableStatement stmt = (CreateTableStatement) new SQLParser((s)).parseStatement(); + List columnDefs = stmt.getColumnDefs(); + assertEquals(1, columnDefs.size()); + assertEquals(SortOrder.fromDDLValue(order), columnDefs.iterator().next().getSortOrder()); + } + } + + @Test + public void testParseCreateTableOrderWithoutPrimaryKeyFails() throws Exception { + for (String order : new String[] { "asc", "desc" }) { + String stmt = + "create table core.entity_history_archive (id varchar(20) ${o})".replace("${o}", order); + try { + new SQLParser((stmt)).parseStatement(); + fail("Expected parse exception to be thrown"); + } catch (SQLException e) { + String errorMsg = + "ERROR 603 (42P00): Syntax error. Unexpected input. Expecting \"RPAREN\", got \"${o}\"" + .replace("${o}", order); + assertTrue( + "Expected message to contain \"" + errorMsg + "\" but got \"" + e.getMessage() + "\"", + e.getMessage().contains(errorMsg)); + } + } + } + + @Test + public void testParseCreateTablePrimaryKeyConstraintWithOrder() throws Exception { + for (String order : new String[] { "asc", "desc" }) { + String s = + "create table core.entity_history_archive (id CHAR(15), name VARCHAR(150), constraint pk primary key (id ${o}, name ${o}))" + .replace("${o}", order); + CreateTableStatement stmt = (CreateTableStatement) new SQLParser((s)).parseStatement(); + PrimaryKeyConstraint pkConstraint = stmt.getPrimaryKeyConstraint(); + List> columns = pkConstraint.getColumnNames(); + assertEquals(2, columns.size()); + for (Pair pair : columns) { + assertEquals(SortOrder.fromDDLValue(order), + pkConstraint.getColumnWithSortOrder(pair.getFirst()).getSecond()); + } + } + } + + @Test + public void testParseCreateTableCommaBeforePrimaryKeyConstraint() throws Exception { + for (String leadingComma : new String[] { ",", "" }) { + String s = + "create table core.entity_history_archive (id CHAR(15), name VARCHAR(150)${o} constraint pk primary key (id))" + .replace("${o}", leadingComma); + + CreateTableStatement stmt = (CreateTableStatement) new SQLParser((s)).parseStatement(); + + assertEquals(2, stmt.getColumnDefs().size()); + assertNotNull(stmt.getPrimaryKeyConstraint()); + } + } + + private CreateCDCStatement parseCreateCDCSimple(String sql, boolean ifNotExists) + throws Exception { + CreateCDCStatement stmt = parseQuery(sql, CreateCDCStatement.class); + assertEquals("FOO", stmt.getCdcObjName().getName()); + assertEquals("BAR", stmt.getDataTable().getTableName()); + assertEquals(ifNotExists, stmt.isIfNotExists()); + return stmt; + } + + @Test + public void testCreateCDCSimple() throws Exception { + parseCreateCDCSimple("create cdc foo on bar", false); + parseCreateCDCSimple("create cdc foo on s.bar", false); + parseCreateCDCSimple("create cdc if not exists foo on bar", true); + parseCreateCDCSimple("create cdc foo on bar", false); + CreateCDCStatement stmt = null; + stmt = parseCreateCDCSimple("create cdc foo on bar TTL=100", false); + assertEquals(Arrays.asList(new Pair("TTL", 100)), stmt.getProps().get("")); + stmt = parseCreateCDCSimple("create cdc foo on bar include (pre)", false); + assertEquals(new HashSet<>(Arrays.asList(PTable.CDCChangeScope.PRE)), stmt.getIncludeScopes()); + stmt = parseCreateCDCSimple("create cdc foo on bar include (pre, post, change)", false); + assertEquals(new HashSet<>(Arrays.asList(PTable.CDCChangeScope.PRE, PTable.CDCChangeScope.POST, + PTable.CDCChangeScope.CHANGE)), stmt.getIncludeScopes()); + stmt = parseCreateCDCSimple("create cdc foo on bar include (pre, pre, post)", false); + assertEquals( + new HashSet<>(Arrays.asList(PTable.CDCChangeScope.PRE, PTable.CDCChangeScope.POST)), + stmt.getIncludeScopes()); + stmt = parseCreateCDCSimple("create cdc if not exists foo on bar abc=def", true); + assertEquals(Arrays.asList(new Pair("ABC", "def")), stmt.getProps().get("")); + stmt = parseCreateCDCSimple("create cdc if not exists foo on bar abc=def, prop=val", true); + assertEquals(Arrays.asList(new Pair("ABC", "def"), new Pair("PROP", "val")), + stmt.getProps().get("")); + } + + @Test + public void testCreateCDCWithErrors() throws Exception { + parseQueryThatShouldFail("create cdc foo"); + parseQueryThatShouldFail("create cdc foo on bar include (abc)"); + } + + private void parseInvalidCreateCDC(String sql, int expRrrorCode) throws IOException { + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertEquals(expRrrorCode, e.getErrorCode()); + } + } + + @Test + public void testInvalidCreateCDC() throws Exception { + parseInvalidCreateCDC("create cdc foo bar", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); + parseInvalidCreateCDC("create cdc foo bar ts", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); + parseInvalidCreateCDC("create cdc foo bar(ts)", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); + parseInvalidCreateCDC("create cdc s.foo on bar(ts)", + SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode()); + parseInvalidCreateCDC("create cdc foo bar(ts1, ts2)", + SQLExceptionCode.MISSING_TOKEN.getErrorCode()); + } + + @Test + public void testInvalidTrailingCommaOnCreateTable() throws Exception { + String sql = (("create table foo (c1 varchar primary key, c2 varchar,)")); + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testCreateSequence() throws Exception { + String sql = (("create sequence foo.bar\n" + "start with 0\n" + "increment by 1\n")); + parseQuery(sql); + } + + private DropCDCStatement parseDropCDCSimple(String sql, boolean ifNotExists) throws Exception { + DropCDCStatement stmt = parseQuery(sql, DropCDCStatement.class); + assertEquals("FOO", stmt.getCdcObjName().getName()); + assertEquals("BAR", stmt.getTableName().getTableName()); + assertEquals(ifNotExists, stmt.ifExists()); + return stmt; + } + + @Test + public void testDropCDCSimple() throws Exception { + DropCDCStatement stmt = null; + parseDropCDCSimple("drop cdc foo on bar", false); + parseDropCDCSimple("drop cdc if exists foo on bar", true); + parseDropCDCSimple("drop cdc if exists foo on s.bar", true); + } + + private void parseInvalidDropCDC(String sql, int expRrrorCode) throws IOException { + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertEquals(expRrrorCode, e.getErrorCode()); } + } - @Test - public void testBindInOffsetSelect() throws Exception { - String sql = "SELECT * FROM T OFFSET ?"; - parseQuery(sql); - } + @Test + public void testInvalidDropCDC() throws Exception { + parseInvalidDropCDC("drop cdc foo bar", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); + parseInvalidDropCDC("drop cdc s.foo on bar", SQLExceptionCode.MISMATCHED_TOKEN.getErrorCode()); + parseInvalidDropCDC("drop cdc foo on bar(ts)", SQLExceptionCode.MISSING_TOKEN.getErrorCode()); + } - @Test - public void testLongQuery() throws Exception { - String sql = "SELECT * FROM T WHERE a IN (1) OFFSET 1"; - parseQuery(sql); + private void parseInvalidAlterCDC(String sql, int expRrrorCode) throws IOException { + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertEquals(expRrrorCode, e.getErrorCode()); + } + } + + @Test + public void testNextValueForSelect() throws Exception { + String sql = (("select next value for foo.bar \n" + "from core.custom_entity_data\n")); + parseQuery(sql); + } + + @Test + public void testNextValueForWhere() throws Exception { + String sql = (("upsert into core.custom_entity_data\n" + + "select next value for foo.bar from core.custom_entity_data\n")); + parseQuery(sql); + } + + @Test + public void testBadCharDef() throws Exception { + try { + String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + + " (pk VARCHAR NOT NULL PRIMARY KEY, col CHAR(0))"); + parseQuery(sql); + fail("Should have caught bad char definition."); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.NONPOSITIVE_MAX_LENGTH.getErrorCode(), e.getErrorCode()); } - - @Test - public void testLimitOffsetQuery() throws Exception { - String sql = "SELECT * FROM T LIMIT 10 OFFSET 1"; - parseQuery(sql); + try { + String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + + " (pk VARCHAR NOT NULL PRIMARY KEY, col CHAR)"); + parseQuery(sql); + fail("Should have caught bad char definition."); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.MISSING_MAX_LENGTH.getErrorCode(), e.getErrorCode()); } + } - @Test - public void testLimitRVCOffsetQuery() throws Exception { - String sql = "SELECT * FROM T LIMIT 10 OFFSET (A,B,C)=('a','b','c')"; + @Test + public void testBadVarcharDef() throws Exception { + try { + String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + + " (pk VARCHAR NOT NULL PRIMARY KEY, col VARCHAR(0))"); parseQuery(sql); + fail("Should have caught bad varchar definition."); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.NONPOSITIVE_MAX_LENGTH.getErrorCode(), e.getErrorCode()); } + } - @Test - public void testShowStmt() throws Exception { - // Happy paths - parseQuery("show schemas"); - parseQuery("show schemas like 'foo%'"); - parseQuery("show tables"); - parseQuery("show tables in foo"); - parseQuery("show tables in foo like 'bar%'"); - parseQuery("show tables like 'bar%'"); - - // Expected failures. - parseQueryThatShouldFail("show schemas like foo"); - parseQueryThatShouldFail("show schemas in foo"); - parseQueryThatShouldFail("show tables 'foo'"); - parseQueryThatShouldFail("show tables in 'foo'"); - parseQueryThatShouldFail("show tables like foo"); + @Test + public void testBadDecimalDef() throws Exception { + try { + String sql = ("CREATE TABLE IF NOT EXISTS testBadDecimalDef" + + " (pk VARCHAR NOT NULL PRIMARY KEY, col DECIMAL(0, 5))"); + parseQuery(sql); + fail("Should have caught bad decimal definition."); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains( + "ERROR 209 (22003): Decimal precision outside of range. Should be within 1 and 38. columnName=COL")); + } + try { + String sql = ("CREATE TABLE IF NOT EXISTS testBadDecimalDef" + + " (pk VARCHAR NOT NULL PRIMARY KEY, col DECIMAL(40, 5))"); + parseQuery(sql); + fail("Should have caught bad decimal definition."); + } catch (SQLException e) { + assertTrue(e.getMessage(), e.getMessage().contains( + "ERROR 209 (22003): Decimal precision outside of range. Should be within 1 and 38. columnName=COL")); + } + } + + @Test + public void testBadBinaryDef() throws Exception { + try { + String sql = ("CREATE TABLE IF NOT EXISTS testBadBinaryDef" + + " (pk VARCHAR NOT NULL PRIMARY KEY, col BINARY(0))"); + parseQuery(sql); + fail("Should have caught bad binary definition."); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.NONPOSITIVE_MAX_LENGTH.getErrorCode(), e.getErrorCode()); } - - @Test - public void testCreateSchema() throws Exception { - String sql0 = "create schema \"schema1\""; - parseQuery(sql0); - String sql1 = "create schema schema1"; - parseQuery(sql1); - String sql2 = "create schema \"default\""; - parseQuery(sql2); - String sql3 = "create schema \"DEFAULT\""; - parseQuery(sql3); + try { + String sql = ("CREATE TABLE IF NOT EXISTS testBadVarcharDef" + + " (pk VARCHAR NOT NULL PRIMARY KEY, col BINARY)"); + parseQuery(sql); + fail("Should have caught bad char definition."); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.MISSING_MAX_LENGTH.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testPercentileQuery1() throws Exception { + String sql = + (("select PERCENTILE_CONT(0.9) WITHIN GROUP (ORDER BY salary DESC) from core.custom_index_value ind")); + parseQuery(sql); + } + + @Test + public void testPercentileQuery2() throws Exception { + String sql = + (("select PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY mark ASC) from core.custom_index_value ind")); + parseQuery(sql); + } + + @Test + public void testRowValueConstructorQuery() throws Exception { + String sql = (("select a_integer FROM aTable where (x_integer, y_integer) > (3, 4)")); + parseQuery(sql); + } + + @Test + public void testSingleTopLevelNot() throws Exception { + String sql = (("select * from t where not c = 5")); + parseQuery(sql); + } + + @Test + public void testTopLevelNot() throws Exception { + String sql = (("select * from t where not c")); + parseQuery(sql); + } + + @Test + public void testRVCInList() throws Exception { + String sql = (("select * from t where k in ( (1,2), (3,4) )")); + parseQuery(sql); + } + + @Test + public void testInList() throws Exception { + String sql = (("select * from t where k in ( 1,2 )")); + parseQuery(sql); + } + + @Test + public void testInvalidSelectStar() throws Exception { + String sql = (("select *,k from t where k in ( 1,2 )")); + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.MISSING_TOKEN.getErrorCode(), e.getErrorCode()); } + } - @Test - public void testShowCreateTable() throws Exception { - // Happy paths - parseQuery("SHOW CREATE TABLE FOO"); - parseQuery("show create table FOO"); - parseQuery("SHOW CREATE TABLE s.FOO"); - parseQuery("SHOW CREATE TABLE \"foo\""); - parseQuery("SHOW CREATE TABLE s.\"foo\""); - parseQuery("SHOW CREATE TABLE \"s\".FOO"); - - // Expected failures. - parseQueryThatShouldFail("SHOW CREATE VIEW foo"); - parseQueryThatShouldFail("SHOW CREATE TABLE 'foo'"); + @Test + public void testTableNameStartsWithUnderscore() throws Exception { + String sql = (("select* from _t where k in ( 1,2 )")); + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.PARSER_ERROR.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testValidUpsertSelectHint() throws Exception { + String sql = (("upsert /*+ NO_INDEX */ into t select k from t where k in ( 1,2 )")); + parseQuery(sql); + } + + @Test + public void testHavingWithNot() throws Exception { + String sql = (("select\n" + "\"WEB_STAT_ALIAS\".\"DOMAIN\" as \"c0\"\n" + + "from \"WEB_STAT\" \"WEB_STAT_ALIAS\"\n" + "group by \"WEB_STAT_ALIAS\".\"DOMAIN\" having\n" + + "(\n" + "(\n" + "NOT\n" + "(\n" + "(sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null)\n" + + ")\n" + "OR NOT((sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null))\n" + ")\n" + + "OR NOT((sum(\"WEB_STAT_ALIAS\".\"ACTIVE_VISITOR\") is null))\n" + ")\n" + + "order by CASE WHEN \"WEB_STAT_ALIAS\".\"DOMAIN\" IS NULL THEN 1 ELSE 0 END,\n" + + "\"WEB_STAT_ALIAS\".\"DOMAIN\" ASC")); + parseQuery(sql); + } + + @Test + public void testToDateInList() throws Exception { + String sql = (("select * from date_test where d in (to_date('2013-11-04 09:12:00'))")); + parseQuery(sql); + } + + @Test + public void testDateLiteral() throws Exception { + String sql = (("select * from t where d = DATE '2013-11-04 09:12:00'")); + parseQuery(sql); + } + + @Test + public void testTimeLiteral() throws Exception { + String sql = (("select * from t where d = TIME '2013-11-04 09:12:00'")); + parseQuery(sql); + } + + @Test + public void testTimestampLiteral() throws Exception { + String sql = (("select * from t where d = TIMESTAMP '2013-11-04 09:12:00'")); + parseQuery(sql); + } + + @Test + public void testUnsignedDateLiteral() throws Exception { + String sql = (("select * from t where d = UNSIGNED_DATE '2013-11-04 09:12:00'")); + parseQuery(sql); + } + + @Test + public void testUnsignedTimeLiteral() throws Exception { + String sql = (("select * from t where d = UNSIGNED_TIME '2013-11-04 09:12:00'")); + parseQuery(sql); + } + + @Test + public void testUnsignedTimestampLiteral() throws Exception { + String sql = (("select * from t where d = UNSIGNED_TIMESTAMP '2013-11-04 09:12:00'")); + parseQuery(sql); + } + + @Test + public void testParseDateEquality() throws Exception { + SQLParser parser = new SQLParser( + new StringReader("select a from b\n" + "where date '2014-01-04' = date '2014-01-04'")); + parser.parseStatement(); + } + + @Test + public void testParseDateIn() throws Exception { + SQLParser parser = new SQLParser( + new StringReader("select a from b\n" + "where date '2014-01-04' in (date '2014-01-04')")); + parser.parseStatement(); + } + + @Test + public void testUnknownLiteral() throws Exception { + String sql = (("select * from t where d = FOO '2013-11-04 09:12:00'")); + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), e.getErrorCode()); } + } - @Test - public void testBinaryLiteral() throws Exception { - // As per ISO/IEC 9075-2:2011(E) 5.3 page 163 - // The literal syntax is: - // - // ::= - // X [ ... ] [ { [ ... ] [ ... ] }... ] - // [ { [ ... ] [ { [ ... ] - // [ ... ] }... ] }... ] - // - // With the current grammar we only approximate the multi-line syntax, but - // we're close enough for most practical purposes - // (We do not enforce a new line before continuation lines) - - // Happy paths - parseQuery("SELECT b, x from x WHERE x = x'00'"); - parseQuery("SELECT b, x from x WHERE x = " - + "x'0 12 ' --comment \n /* comment */ '34 567' \n \n 'aA'"); - parseQuery("SELECT b, x from x WHERE x = " - + "b'0 10 ' --comment \n /* comment */ '10 101' \n \n '00000000'"); - - // Expected failures. - // Space after 'x' - parseQueryThatShouldFailWithSQLException("SELECT b, x from x WHERE x = x '00'"); - parseQueryThatShouldFailWithSQLException("SELECT b, x from x WHERE b = b '00'"); - - // Illegal digit character in first line - parseQueryThatShouldFail("SELECT b, x from x WHERE x = " - + "x'X0 12 ' --comment \n /* comment */ '34 5670' \n \n 'aA'"); - parseQueryThatShouldFail("SELECT b, x from b WHERE b = " - + "b'B0 10 ' --comment \n /* comment */ '10 101' \n \n '000000000'"); - - // Illegal digit character in continuation line - parseQueryThatShouldFail("SELECT b, x from x WHERE x = " - + "x'0 12 ' --comment \n /* comment */ '34 5670' \n \n 'aA_'"); - parseQueryThatShouldFail("SELECT b, x from x WHERE x = " - + "b'0 10 ' --comment \n /* comment */ '00 0000' \n \n '00_'"); - - // No digit between quotes in continuation line - parseQueryThatShouldFail("SELECT b, x from x WHERE x = " - + "x'0 12 ' --comment \n /* comment */ '34 5670' \n \n ''"); - parseQueryThatShouldFail("SELECT b, x from x WHERE x = " - + "b'0 10 ' --comment \n /* comment */ '00 000' \n \n ''"); - } + @Test + public void testUnsupportedLiteral() throws Exception { + String sql = (("select * from t where d = DECIMAL '2013-11-04 09:12:00'")); + try { + parseQuery(sql); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.TYPE_MISMATCH.getErrorCode(), e.getErrorCode()); + } + } + + @Test + public void testAnyElementExpression1() throws Exception { + String sql = "select * from t where 'a' = ANY(a)"; + parseQuery(sql); + } + + @Test + public void testAnyElementExpression2() throws Exception { + String sql = "select * from t where 'a' <= ANY(a-b+1)"; + parseQuery(sql); + } + + @Test + public void testAllElementExpression() throws Exception { + String sql = "select * from t where 'a' <= ALL(a-b+1)"; + parseQuery(sql); + } + + @Test + public void testDoubleBackslash() throws Exception { + String sql = "SELECT * FROM T WHERE A LIKE 'a\\(d'"; + parseQuery(sql); + } + + @Test + public void testUnicodeSpace() throws Exception { + // U+2002 (8194) is a "EN Space" which looks just like a normal space (0x20 in ascii) + String unicodeEnSpace = String.valueOf(Character.toChars(8194)); + String sql = Joiner.on(unicodeEnSpace).join(new String[] { "SELECT", "*", "FROM", "T" }); + parseQuery(sql); + } + + @Test + public void testInvalidTableOrSchemaName() throws Exception { + // namespace separator (:) cannot be used + parseQueryThatShouldFail("create table a:b (id varchar not null primary key)"); + parseQueryThatShouldFail("create table \"a:b\" (id varchar not null primary key)"); + // name separator (.) cannot be used without double quotes + parseQueryThatShouldFail("create table a.b.c.d (id varchar not null primary key)"); + parseQuery("create table \"a.b\".\"c.d\" (id varchar not null primary key)"); + parseQuery("create table \"a.b.c.d\" (id varchar not null primary key)"); + } + + @Test + public void testIntegerInOffsetSelect() throws Exception { + String sql = "SELECT * FROM T OFFSET 1"; + parseQuery(sql); + } + + @Test + public void testRVCInOffsetSelect() throws Exception { + String sql = "SELECT * FROM T OFFSET (A,B,C)=('a','b','c')"; + parseQuery(sql); + } + + @Test + public void testBindInOffsetSelect() throws Exception { + String sql = "SELECT * FROM T OFFSET ?"; + parseQuery(sql); + } + + @Test + public void testLongQuery() throws Exception { + String sql = "SELECT * FROM T WHERE a IN (1) OFFSET 1"; + parseQuery(sql); + } + + @Test + public void testLimitOffsetQuery() throws Exception { + String sql = "SELECT * FROM T LIMIT 10 OFFSET 1"; + parseQuery(sql); + } + + @Test + public void testLimitRVCOffsetQuery() throws Exception { + String sql = "SELECT * FROM T LIMIT 10 OFFSET (A,B,C)=('a','b','c')"; + parseQuery(sql); + } + + @Test + public void testShowStmt() throws Exception { + // Happy paths + parseQuery("show schemas"); + parseQuery("show schemas like 'foo%'"); + parseQuery("show tables"); + parseQuery("show tables in foo"); + parseQuery("show tables in foo like 'bar%'"); + parseQuery("show tables like 'bar%'"); + + // Expected failures. + parseQueryThatShouldFail("show schemas like foo"); + parseQueryThatShouldFail("show schemas in foo"); + parseQueryThatShouldFail("show tables 'foo'"); + parseQueryThatShouldFail("show tables in 'foo'"); + parseQueryThatShouldFail("show tables like foo"); + } + + @Test + public void testCreateSchema() throws Exception { + String sql0 = "create schema \"schema1\""; + parseQuery(sql0); + String sql1 = "create schema schema1"; + parseQuery(sql1); + String sql2 = "create schema \"default\""; + parseQuery(sql2); + String sql3 = "create schema \"DEFAULT\""; + parseQuery(sql3); + } + + @Test + public void testShowCreateTable() throws Exception { + // Happy paths + parseQuery("SHOW CREATE TABLE FOO"); + parseQuery("show create table FOO"); + parseQuery("SHOW CREATE TABLE s.FOO"); + parseQuery("SHOW CREATE TABLE \"foo\""); + parseQuery("SHOW CREATE TABLE s.\"foo\""); + parseQuery("SHOW CREATE TABLE \"s\".FOO"); + + // Expected failures. + parseQueryThatShouldFail("SHOW CREATE VIEW foo"); + parseQueryThatShouldFail("SHOW CREATE TABLE 'foo'"); + } + + @Test + public void testBinaryLiteral() throws Exception { + // As per ISO/IEC 9075-2:2011(E) 5.3 page 163 + // The literal syntax is: + // + // ::= + // X [ ... ] [ { [ ... ] [ ... ] }... ] + // [ { [ ... ] [ { [ ... ] + // [ ... ] }... ] }... ] + // + // With the current grammar we only approximate the multi-line syntax, but + // we're close enough for most practical purposes + // (We do not enforce a new line before continuation lines) + + // Happy paths + parseQuery("SELECT b, x from x WHERE x = x'00'"); + parseQuery( + "SELECT b, x from x WHERE x = " + "x'0 12 ' --comment \n /* comment */ '34 567' \n \n 'aA'"); + parseQuery("SELECT b, x from x WHERE x = " + + "b'0 10 ' --comment \n /* comment */ '10 101' \n \n '00000000'"); + + // Expected failures. + // Space after 'x' + parseQueryThatShouldFailWithSQLException("SELECT b, x from x WHERE x = x '00'"); + parseQueryThatShouldFailWithSQLException("SELECT b, x from x WHERE b = b '00'"); + + // Illegal digit character in first line + parseQueryThatShouldFail("SELECT b, x from x WHERE x = " + + "x'X0 12 ' --comment \n /* comment */ '34 5670' \n \n 'aA'"); + parseQueryThatShouldFail("SELECT b, x from b WHERE b = " + + "b'B0 10 ' --comment \n /* comment */ '10 101' \n \n '000000000'"); + + // Illegal digit character in continuation line + parseQueryThatShouldFail("SELECT b, x from x WHERE x = " + + "x'0 12 ' --comment \n /* comment */ '34 5670' \n \n 'aA_'"); + parseQueryThatShouldFail("SELECT b, x from x WHERE x = " + + "b'0 10 ' --comment \n /* comment */ '00 0000' \n \n '00_'"); + + // No digit between quotes in continuation line + parseQueryThatShouldFail( + "SELECT b, x from x WHERE x = " + "x'0 12 ' --comment \n /* comment */ '34 5670' \n \n ''"); + parseQueryThatShouldFail( + "SELECT b, x from x WHERE x = " + "b'0 10 ' --comment \n /* comment */ '00 000' \n \n ''"); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java index 6a9eb7b8a33..c46365cc024 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -58,112 +58,127 @@ import org.junit.AfterClass; import org.junit.BeforeClass; +public class BaseConnectionlessQueryTest extends BaseTest { + public static PTable ATABLE; + public static Expression ORGANIZATION_ID; + public static Expression ENTITY_ID; + public static Expression A_INTEGER; + public static Expression A_STRING; + public static Expression B_STRING; + public static Expression A_DATE; + public static Expression A_TIME; + public static Expression A_TIMESTAMP; + public static Expression X_DECIMAL; -public class BaseConnectionlessQueryTest extends BaseTest { + protected static String getUrl() { + return TestUtil.PHOENIX_CONNECTIONLESS_JDBC_URL; + } - public static PTable ATABLE; - public static Expression ORGANIZATION_ID; - public static Expression ENTITY_ID; - public static Expression A_INTEGER; - public static Expression A_STRING; - public static Expression B_STRING; - public static Expression A_DATE; - public static Expression A_TIME; - public static Expression A_TIMESTAMP; - public static Expression X_DECIMAL; - - protected static String getUrl() { - return TestUtil.PHOENIX_CONNECTIONLESS_JDBC_URL; - } - - protected static String getUrl(String tenantId) { - return getUrl() + ';' + TENANT_ID_ATTRIB + '=' + tenantId; - } - - protected static PhoenixTestDriver driver; - - private static void startServer(String url) throws Exception { - assertNull(driver); - // only load the test driver if we are testing locally - for integration tests, we want to - // test on a wider scale - if (PhoenixEmbeddedDriver.isTestUrl(url)) { - Map props = Maps.newHashMapWithExpectedSize(1); - driver = initDriver(new ReadOnlyProps(props)); - assertTrue(DriverManager.getDriver(url) == driver); - driver.connect(url, PropertiesUtil.deepCopy(TEST_PROPERTIES)); - } + protected static String getUrl(String tenantId) { + return getUrl() + ';' + TENANT_ID_ATTRIB + '=' + tenantId; + } + + protected static PhoenixTestDriver driver; + + private static void startServer(String url) throws Exception { + assertNull(driver); + // only load the test driver if we are testing locally - for integration tests, we want to + // test on a wider scale + if (PhoenixEmbeddedDriver.isTestUrl(url)) { + Map props = Maps.newHashMapWithExpectedSize(1); + driver = initDriver(new ReadOnlyProps(props)); + assertTrue(DriverManager.getDriver(url) == driver); + driver.connect(url, PropertiesUtil.deepCopy(TEST_PROPERTIES)); } - - protected static synchronized PhoenixTestDriver initDriver(ReadOnlyProps props) throws Exception { - if (driver == null) { - driver = new PhoenixTestDriver(props); - DriverManager.registerDriver(driver); - } - return driver; + } + + protected static synchronized PhoenixTestDriver initDriver(ReadOnlyProps props) throws Exception { + if (driver == null) { + driver = new PhoenixTestDriver(props); + DriverManager.registerDriver(driver); } - - @BeforeClass - public static synchronized void doSetup() throws Exception { - startServer(getUrl()); - ensureTableCreated(getUrl(), ATABLE_NAME); - ensureTableCreated(getUrl(), ENTITY_HISTORY_TABLE_NAME); - ensureTableCreated(getUrl(), FUNKY_NAME); - ensureTableCreated(getUrl(), PTSDB_NAME); - ensureTableCreated(getUrl(), PTSDB2_NAME); - ensureTableCreated(getUrl(), PTSDB3_NAME); - ensureTableCreated(getUrl(), MULTI_CF_NAME); - ensureTableCreated(getUrl(), TABLE_WITH_ARRAY); - - - Properties props = new Properties(); - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP)); - PhoenixConnection conn = DriverManager.getConnection(PHOENIX_CONNECTIONLESS_JDBC_URL, props).unwrap(PhoenixConnection.class); - try { - PTable table = conn.getTable(new PTableKey(null, ATABLE_NAME)); - ATABLE = table; - ORGANIZATION_ID = new ColumnRef(new TableRef(table), table.getColumnForColumnName("ORGANIZATION_ID").getPosition()).newColumnExpression(); - ENTITY_ID = new ColumnRef(new TableRef(table), table.getColumnForColumnName("ENTITY_ID").getPosition()).newColumnExpression(); - A_INTEGER = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression(); - A_STRING = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression(); - B_STRING = new ColumnRef(new TableRef(table), table.getColumnForColumnName("B_STRING").getPosition()).newColumnExpression(); - A_DATE = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_DATE").getPosition()).newColumnExpression(); - A_TIME = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_TIME").getPosition()).newColumnExpression(); - A_TIMESTAMP = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_TIMESTAMP").getPosition()).newColumnExpression(); - X_DECIMAL = new ColumnRef(new TableRef(table), table.getColumnForColumnName("X_DECIMAL").getPosition()).newColumnExpression(); - } finally { - conn.close(); - } + return driver; + } + + @BeforeClass + public static synchronized void doSetup() throws Exception { + startServer(getUrl()); + ensureTableCreated(getUrl(), ATABLE_NAME); + ensureTableCreated(getUrl(), ENTITY_HISTORY_TABLE_NAME); + ensureTableCreated(getUrl(), FUNKY_NAME); + ensureTableCreated(getUrl(), PTSDB_NAME); + ensureTableCreated(getUrl(), PTSDB2_NAME); + ensureTableCreated(getUrl(), PTSDB3_NAME); + ensureTableCreated(getUrl(), MULTI_CF_NAME); + ensureTableCreated(getUrl(), TABLE_WITH_ARRAY); + + Properties props = new Properties(); + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, + Long.toString(HConstants.LATEST_TIMESTAMP)); + PhoenixConnection conn = DriverManager.getConnection(PHOENIX_CONNECTIONLESS_JDBC_URL, props) + .unwrap(PhoenixConnection.class); + try { + PTable table = conn.getTable(new PTableKey(null, ATABLE_NAME)); + ATABLE = table; + ORGANIZATION_ID = new ColumnRef(new TableRef(table), + table.getColumnForColumnName("ORGANIZATION_ID").getPosition()).newColumnExpression(); + ENTITY_ID = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("ENTITY_ID").getPosition()) + .newColumnExpression(); + A_INTEGER = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()) + .newColumnExpression(); + A_STRING = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()) + .newColumnExpression(); + B_STRING = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("B_STRING").getPosition()) + .newColumnExpression(); + A_DATE = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_DATE").getPosition()) + .newColumnExpression(); + A_TIME = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_TIME").getPosition()) + .newColumnExpression(); + A_TIMESTAMP = new ColumnRef(new TableRef(table), + table.getColumnForColumnName("A_TIMESTAMP").getPosition()).newColumnExpression(); + X_DECIMAL = + new ColumnRef(new TableRef(table), table.getColumnForColumnName("X_DECIMAL").getPosition()) + .newColumnExpression(); + } finally { + conn.close(); } - - @AfterClass - public static synchronized void doTeardown() throws Exception { - if (driver != null) { - try { - driver.close(); - } finally { - PhoenixTestDriver driver = BaseConnectionlessQueryTest.driver; - BaseConnectionlessQueryTest.driver = null; - DriverManager.deregisterDriver(driver); - } - } + } + + @AfterClass + public static synchronized void doTeardown() throws Exception { + if (driver != null) { + try { + driver.close(); + } finally { + PhoenixTestDriver driver = BaseConnectionlessQueryTest.driver; + BaseConnectionlessQueryTest.driver = null; + DriverManager.deregisterDriver(driver); + } } + } - protected static void assertRoundtrip(String sql) throws SQLException { - SQLParser parser = new SQLParser(sql); - BindableStatement stmt = null; - stmt = parser.parseStatement(); - if (stmt.getOperation() != Operation.QUERY) { - return; - } - String newSQL = stmt.toString(); - SQLParser newParser = new SQLParser(newSQL); - BindableStatement newStmt = null; - try { - newStmt = newParser.parseStatement(); - } catch (SQLException e) { - fail("Unable to parse new:\n" + newSQL); - } - assertEquals("Expected equality:\n" + sql + "\n" + newSQL, stmt, newStmt); + protected static void assertRoundtrip(String sql) throws SQLException { + SQLParser parser = new SQLParser(sql); + BindableStatement stmt = null; + stmt = parser.parseStatement(); + if (stmt.getOperation() != Operation.QUERY) { + return; + } + String newSQL = stmt.toString(); + SQLParser newParser = new SQLParser(newSQL); + BindableStatement newStmt = null; + try { + newStmt = newParser.parseStatement(); + } catch (SQLException e) { + fail("Unable to parse new:\n" + newSQL); } + assertEquals("Expected equality:\n" + sql + "\n" + newSQL, stmt, newStmt); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java index 1405ad6355c..37b26569dc2 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -126,13 +126,13 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; @@ -161,6 +161,11 @@ import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.TableAlreadyExistsException; import org.apache.phoenix.schema.TableNotFoundException; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.phoenix.util.ConfigUtil; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.PhoenixRuntime; @@ -176,2042 +181,1948 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; -import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; - /** - * - * Base class that contains all the methods needed by - * client-time and hbase-time managed tests. - * - * Tests using a mini cluster need to be classified either - * as {@link ParallelStatsDisabledTest} or {@link ParallelStatsEnabledTest} - * or {@link NeedsOwnMiniClusterTest} otherwise they won't be run - * when one runs mvn verify or mvn install. - * - * For tests needing connectivity to a cluster, please use - * {@link ParallelStatsDisabledIT} or {@link ParallelStatsEnabledIT}. - * - * In the case when a test can't share the same mini cluster as the - * ones used by {@link ParallelStatsDisabledIT} or {@link ParallelStatsEnabledIT}, - * one could extend this class and spin up your own mini cluster. Please - * make sure to annotate such classes with {@link NeedsOwnMiniClusterTest} and - * shutdown the mini cluster in a method annotated by @AfterClass. - * + * Base class that contains all the methods needed by client-time and hbase-time managed tests. + * Tests using a mini cluster need to be classified either as {@link ParallelStatsDisabledTest} or + * {@link ParallelStatsEnabledTest} or {@link NeedsOwnMiniClusterTest} otherwise they won't be run + * when one runs mvn verify or mvn install. For tests needing connectivity to a cluster, please use + * {@link ParallelStatsDisabledIT} or {@link ParallelStatsEnabledIT}. In the case when a test can't + * share the same mini cluster as the ones used by {@link ParallelStatsDisabledIT} or + * {@link ParallelStatsEnabledIT}, one could extend this class and spin up your own mini cluster. + * Please make sure to annotate such classes with {@link NeedsOwnMiniClusterTest} and shutdown the + * mini cluster in a method annotated by @AfterClass. */ public abstract class BaseTest { - public static final String DRIVER_CLASS_NAME_ATTRIB = "phoenix.driver.class.name"; - protected static final String NULL_STRING="NULL"; - private static final double ZERO = 1e-9; - private static final Map tableDDLMap; - private static final Logger LOGGER = LoggerFactory.getLogger(BaseTest.class); - @ClassRule - public static TemporaryFolder tmpFolder = new TemporaryFolder(); - private static final int dropTableTimeout = 120; // 2 mins should be long enough. - private static final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("DROP-TABLE-BASETEST" + "-thread-%s").build(); - private static final ExecutorService dropHTableService = Executors - .newSingleThreadExecutor(factory); - - @ClassRule - public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); - - static { - ImmutableMap.Builder builder = ImmutableMap.builder(); - builder.put(ENTITY_HISTORY_TABLE_NAME,"create table " + ENTITY_HISTORY_TABLE_NAME + - " (organization_id char(15) not null,\n" + - " parent_id char(15) not null,\n" + - " created_date date not null,\n" + - " entity_history_id char(15) not null,\n" + - " old_value varchar,\n" + - " new_value varchar,\n" + //create table shouldn't blow up if the last column definition ends with a comma. - " CONSTRAINT pk PRIMARY KEY (organization_id, parent_id, created_date, entity_history_id)\n" + - ")"); - builder.put(ENTITY_HISTORY_SALTED_TABLE_NAME,"create table " + ENTITY_HISTORY_SALTED_TABLE_NAME + - " (organization_id char(15) not null,\n" + - " parent_id char(15) not null,\n" + - " created_date date not null,\n" + - " entity_history_id char(15) not null,\n" + - " old_value varchar,\n" + - " new_value varchar\n" + - " CONSTRAINT pk PRIMARY KEY (organization_id, parent_id, created_date, entity_history_id))\n" + - " SALT_BUCKETS = 4"); - builder.put(ATABLE_NAME,"create table " + ATABLE_NAME + - " (organization_id char(15) not null, \n" + - " entity_id char(15) not null,\n" + - " a_string varchar(100),\n" + - " b_string varchar(100),\n" + - " a_integer integer,\n" + - " a_date date,\n" + - " a_time time,\n" + - " a_timestamp timestamp,\n" + - " x_decimal decimal(31,10),\n" + - " x_long bigint,\n" + - " x_integer integer,\n" + - " y_integer integer,\n" + - " a_byte tinyint,\n" + - " a_short smallint,\n" + - " a_float float,\n" + - " a_double double,\n" + - " a_unsigned_float unsigned_float,\n" + - " a_unsigned_double unsigned_double\n" + - " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n" + - ") "); - builder.put(TABLE_WITH_ARRAY, "create table " - + TABLE_WITH_ARRAY - + " (organization_id char(15) not null, \n" - + " entity_id char(15) not null,\n" - + " a_string_array varchar(100) array[],\n" - + " b_string varchar(100),\n" - + " a_integer integer,\n" - + " a_date date,\n" - + " a_time time,\n" - + " a_timestamp timestamp,\n" - + " x_decimal decimal(31,10),\n" - + " x_long_array bigint array[],\n" - + " x_integer integer,\n" - + " a_byte_array tinyint array[],\n" - + " a_short smallint,\n" - + " a_float float,\n" - + " a_double_array double array[],\n" - + " a_unsigned_float unsigned_float,\n" - + " a_unsigned_double unsigned_double \n" - + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n" - + ")"); - builder.put(BTABLE_NAME,"create table " + BTABLE_NAME + - " (a_string varchar not null, \n" + - " a_id char(3) not null,\n" + - " b_string varchar not null, \n" + - " a_integer integer not null, \n" + - " c_string varchar(2) null,\n" + - " b_integer integer,\n" + - " c_integer integer,\n" + - " d_string varchar(3),\n" + - " e_string char(10)\n" + - " CONSTRAINT my_pk PRIMARY KEY (a_string,a_id,b_string,a_integer,c_string))"); - builder.put(TABLE_WITH_SALTING,"create table " + TABLE_WITH_SALTING + - " (a_integer integer not null, \n" + - " a_string varchar not null, \n" + - " a_id char(3) not null,\n" + - " b_string varchar, \n" + - " b_integer integer \n" + - " CONSTRAINT pk PRIMARY KEY (a_integer, a_string, a_id))\n" + - " SALT_BUCKETS = 4"); - builder.put(STABLE_NAME,"create table " + STABLE_NAME + - " (id char(1) not null primary key,\n" + - " \"value\" integer)"); - builder.put(PTSDB_NAME,"create table " + PTSDB_NAME + - " (inst varchar null,\n" + - " host varchar null,\n" + - " date date not null,\n" + - " val decimal(31,10)\n" + - " CONSTRAINT pk PRIMARY KEY (inst, host, date))"); - builder.put(PTSDB2_NAME,"create table " + PTSDB2_NAME + - " (inst varchar(10) not null,\n" + - " date date not null,\n" + - " val1 decimal,\n" + - " val2 decimal(31,10),\n" + - " val3 decimal\n" + - " CONSTRAINT pk PRIMARY KEY (inst, date))"); - builder.put(PTSDB3_NAME,"create table " + PTSDB3_NAME + - " (host varchar(10) not null,\n" + - " date date not null,\n" + - " val1 decimal,\n" + - " val2 decimal(31,10),\n" + - " val3 decimal\n" + - " CONSTRAINT pk PRIMARY KEY (host DESC, date DESC))"); - builder.put(FUNKY_NAME,"create table " + FUNKY_NAME + - " (\"foo!\" varchar not null primary key,\n" + - " \"1\".\"#@$\" varchar, \n" + - " \"1\".\"foo.bar-bas\" varchar, \n" + - " \"1\".\"Value\" integer,\n" + - " \"1\".\"VALUE\" integer,\n" + - " \"1\".\"value\" integer,\n" + - " \"1\".\"_blah^\" varchar)" - ); - builder.put(MULTI_CF_NAME,"create table " + MULTI_CF_NAME + - " (id char(15) not null primary key,\n" + - " a.unique_user_count integer,\n" + - " b.unique_org_count integer,\n" + - " c.db_cpu_utilization decimal(31,10),\n" + - " d.transaction_count bigint,\n" + - " e.cpu_utilization decimal(31,10),\n" + - " f.response_time bigint,\n" + - " g.response_time bigint)"); - builder.put(HBASE_DYNAMIC_COLUMNS,"create table " + HBASE_DYNAMIC_COLUMNS + - " (entry varchar not null," + - " F varchar," + - " A.F1v1 varchar," + - " A.F1v2 varchar," + - " B.F2v1 varchar" + - " CONSTRAINT pk PRIMARY KEY (entry))\n"); - builder.put(PRODUCT_METRICS_NAME,"create table " + PRODUCT_METRICS_NAME + - " (organization_id char(15) not null," + - " date date not null," + - " feature char(1) not null," + - " unique_users integer not null,\n" + - " db_utilization decimal(31,10),\n" + - " transactions bigint,\n" + - " cpu_utilization decimal(31,10),\n" + - " response_time bigint,\n" + - " io_time bigint,\n" + - " region varchar,\n" + - " unset_column decimal(31,10)\n" + - " CONSTRAINT pk PRIMARY KEY (organization_id, \"DATE\", feature, UNIQUE_USERS))"); - builder.put(CUSTOM_ENTITY_DATA_FULL_NAME,"create table " + CUSTOM_ENTITY_DATA_FULL_NAME + - " (organization_id char(15) not null, \n" + - " key_prefix char(3) not null,\n" + - " custom_entity_data_id char(12) not null,\n" + - " created_by varchar,\n" + - " created_date date,\n" + - " currency_iso_code char(3),\n" + - " deleted char(1),\n" + - " division decimal(31,10),\n" + - " last_activity date,\n" + - " last_update date,\n" + - " last_update_by varchar,\n" + - " name varchar(240),\n" + - " owner varchar,\n" + - " record_type_id char(15),\n" + - " setup_owner varchar,\n" + - " system_modstamp date,\n" + - " b.val0 varchar,\n" + - " b.val1 varchar,\n" + - " b.val2 varchar,\n" + - " b.val3 varchar,\n" + - " b.val4 varchar,\n" + - " b.val5 varchar,\n" + - " b.val6 varchar,\n" + - " b.val7 varchar,\n" + - " b.val8 varchar,\n" + - " b.val9 varchar\n" + - " CONSTRAINT pk PRIMARY KEY (organization_id, key_prefix, custom_entity_data_id))"); - builder.put("IntKeyTest","create table IntKeyTest" + - " (i integer not null primary key)"); - builder.put("IntIntKeyTest","create table IntIntKeyTest" + - " (i integer not null primary key, j integer)"); - builder.put("PKIntValueTest", "create table PKIntValueTest" + - " (pk integer not null primary key)"); - builder.put("PKBigIntValueTest", "create table PKBigIntValueTest" + - " (pk bigint not null primary key)"); - builder.put("PKUnsignedIntValueTest", "create table PKUnsignedIntValueTest" + - " (pk unsigned_int not null primary key)"); - builder.put("PKUnsignedLongValueTest", "create table PKUnsignedLongValueTest" + - " (pk unsigned_long not null\n" + - " CONSTRAINT pk PRIMARY KEY (pk))"); - builder.put("KVIntValueTest", "create table KVIntValueTest" + - " (pk integer not null primary key,\n" + - " kv integer)\n"); - builder.put("KVBigIntValueTest", "create table KVBigIntValueTest" + - " (pk integer not null primary key,\n" + - " kv bigint)\n"); - builder.put(SUM_DOUBLE_NAME,"create table SumDoubleTest" + - " (id varchar not null primary key, d DOUBLE, f FLOAT, ud UNSIGNED_DOUBLE, uf UNSIGNED_FLOAT, i integer, de decimal)"); - builder.put(BINARY_NAME,"create table " + BINARY_NAME + - " (a_binary BINARY(16) not null, \n" + - " b_binary BINARY(16), \n" + - " a_varbinary VARBINARY, \n" + - " b_varbinary VARBINARY, \n" + - " CONSTRAINT pk PRIMARY KEY (a_binary)\n" + - ") "); - tableDDLMap = builder.build(); - } - - private static final String ORG_ID = "00D300000000XHP"; - protected static int NUM_SLAVES_BASE = 1; - private static final String DEFAULT_RPC_SCHEDULER_FACTORY = PhoenixRpcSchedulerFactory.class.getName(); - - protected static String getZKClientPort(Configuration conf) { - return conf.get(QueryServices.ZOOKEEPER_PORT_ATTRIB); - } - - protected static String url; - protected static PhoenixTestDriver driver; - protected static boolean clusterInitialized = false; - protected static HBaseTestingUtility utility; - protected static final Configuration config = HBaseConfiguration.create(); - - protected static String getUrl() { - if (!clusterInitialized) { - throw new IllegalStateException("Cluster must be initialized before attempting to get the URL"); - } - return url; - } - - protected static String checkClusterInitialized(ReadOnlyProps serverProps) throws Exception { - if (!clusterInitialized) { - url = setUpTestCluster(config, serverProps); - clusterInitialized = true; - } - return url; - } - - /** - * Set up the test hbase cluster. - * @return url to be used by clients to connect to the cluster. - * @throws IOException - */ - protected static String setUpTestCluster(@Nonnull Configuration conf, ReadOnlyProps overrideProps) throws Exception { - boolean isDistributedCluster = isDistributedClusterModeEnabled(conf); - if (!isDistributedCluster) { - return initMiniCluster(conf, overrideProps); - } else { - return initClusterDistributedMode(conf, overrideProps); - } - } - - protected static void destroyDriver() { - if (driver != null) { - try { - assertTrue(destroyDriver(driver)); - } catch (Throwable t) { - LOGGER.error("Exception caught when destroying phoenix test driver", t); - } finally { - driver = null; - } - } - } - - protected synchronized static void dropNonSystemTables() throws Exception { - try { - disableAndDropNonSystemTables(); - } finally { - destroyDriver(); - } - } - - //Note that newer miniCluster versions will overwrite "java.io.tmpdir" system property. - //After you shut down the minicluster, it will point to a non-existent directory - //You will need to save the original "java.io.tmpdir" before starting the miniCluster, and - //restore it after shutting it down, if you want to keep using the JVM. - public static synchronized void tearDownMiniCluster(final int numTables) { - long startTime = System.currentTimeMillis(); - try { - ConnectionFactory.shutdown(); - destroyDriver(); - utility.shutdownMiniMapReduceCluster(); - } catch (Throwable t) { - LOGGER.error("Exception caught when shutting down mini map reduce cluster", t); - } finally { - try { - // Clear ServerMetadataCache. - ServerMetadataCacheTestImpl.resetCache(); - utility.shutdownMiniCluster(); - } catch (Throwable t) { - LOGGER.error("Exception caught when shutting down mini cluster", t); - } finally { - clusterInitialized = false; - utility = null; - LOGGER.info("Time in seconds spent in shutting down mini cluster with " + numTables - + " tables: " + (System.currentTimeMillis() - startTime) / 1000); - } - } - } - - public static synchronized void resetHbase() { - try { - ConnectionFactory.shutdown(); - destroyDriver(); - disableAndDropAllTables(); - } catch (Exception e) { - LOGGER.error("Error resetting HBase"); - } - } - - protected static synchronized void setUpTestDriver(ReadOnlyProps props) throws Exception { - setUpTestDriver(props, props); - } - - protected static synchronized void setUpTestDriver(ReadOnlyProps serverProps, ReadOnlyProps clientProps) throws Exception { - if (driver == null) { - String url = checkClusterInitialized(serverProps); - driver = initAndRegisterTestDriver(url, clientProps); - } - } - - private static boolean isDistributedClusterModeEnabled(Configuration conf) { - boolean isDistributedCluster = false; - //check if the distributed mode was specified as a system property. - isDistributedCluster = Boolean.parseBoolean(System.getProperty(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, "false")); - if (!isDistributedCluster) { - //fall back on hbase-default.xml or hbase-site.xml to check for distributed mode - isDistributedCluster = conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false); - } - return isDistributedCluster; - } - - /** - * Initialize the mini cluster using phoenix-test specific configuration. - * @param overrideProps TODO - * @return url to be used by clients to connect to the mini cluster. - * @throws Exception - */ - private static synchronized String initMiniCluster(Configuration conf, ReadOnlyProps overrideProps) throws Exception { - setUpConfigForMiniCluster(conf, overrideProps); - utility = new HBaseTestingUtility(conf); - try { - long startTime = System.currentTimeMillis(); - utility.startMiniCluster(overrideProps.getInt( - QueryServices.TESTS_MINI_CLUSTER_NUM_REGION_SERVERS, NUM_SLAVES_BASE)); - long startupTime = System.currentTimeMillis()-startTime; - LOGGER.info("HBase minicluster startup complete in {} ms", startupTime); - return getLocalClusterUrl(utility); - } catch (Throwable t) { - throw new RuntimeException(t); - } - } - - protected static String getLocalClusterUrl(HBaseTestingUtility util) throws Exception { - String url = QueryUtil.getConnectionUrl(new Properties(), util.getConfiguration()); - return url + PHOENIX_TEST_DRIVER_URL_PARAM; - } - - /** - * Initialize the cluster in distributed mode - * @param overrideProps TODO - * @return url to be used by clients to connect to the mini cluster. - * @throws Exception - */ - private static String initClusterDistributedMode(Configuration conf, ReadOnlyProps overrideProps) throws Exception { - setTestConfigForDistribuedCluster(conf, overrideProps); - try { - IntegrationTestingUtility util = new IntegrationTestingUtility(conf); - utility = util; - util.initializeCluster(NUM_SLAVES_BASE); - } catch (Exception e) { - throw new RuntimeException(e); - } - return JDBC_PROTOCOL + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; - } - - private static void setTestConfigForDistribuedCluster(Configuration conf, ReadOnlyProps overrideProps) throws Exception { - setDefaultTestConfig(conf, overrideProps); - } - - private static void setDefaultTestConfig(Configuration conf, ReadOnlyProps overrideProps) throws Exception { - ConfigUtil.setReplicationConfigIfAbsent(conf); - QueryServices services = newTestDriver(overrideProps).getQueryServices(); - for (Entry entry : services.getProps()) { - conf.set(entry.getKey(), entry.getValue()); - } - //no point doing sanity checks when running tests. - conf.setBoolean("hbase.table.sanity.checks", false); - // set the server rpc controller and rpc scheduler factory, used to configure the cluster - conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, DEFAULT_RPC_SCHEDULER_FACTORY); - conf.setLong(HConstants.ZK_SESSION_TIMEOUT, 10 * HConstants.DEFAULT_ZK_SESSION_TIMEOUT); - conf.setLong(HConstants.ZOOKEEPER_TICK_TIME, 6 * 1000); - - // override any defaults based on overrideProps - for (Entry entry : overrideProps) { - conf.set(entry.getKey(), entry.getValue()); - } - } - - public static Configuration setUpConfigForMiniCluster(Configuration conf) throws Exception { - return setUpConfigForMiniCluster(conf, ReadOnlyProps.EMPTY_PROPS); - } - - public static Configuration setUpConfigForMiniCluster(Configuration conf, ReadOnlyProps overrideProps) throws Exception { - assertNotNull(conf); - setDefaultTestConfig(conf, overrideProps); - /* - * The default configuration of mini cluster ends up spawning a lot of threads - * that are not really needed by phoenix for test purposes. Limiting these threads - * helps us in running several mini clusters at the same time without hitting - * the threads limit imposed by the OS. - */ - conf.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 5); - conf.setInt("hbase.regionserver.metahandler.count", 2); - conf.setInt("dfs.namenode.handler.count", 2); - conf.setInt("dfs.namenode.service.handler.count", 2); - conf.setInt("dfs.datanode.handler.count", 2); - conf.setInt("ipc.server.read.threadpool.size", 2); - conf.setInt("ipc.server.handler.threadpool.size", 2); - conf.setInt("hbase.regionserver.hlog.syncer.count", 2); - conf.setInt("hbase.hfile.compaction.discharger.interval", 5000); - conf.setInt("hbase.hlog.asyncer.number", 2); - conf.setInt("hbase.assignment.zkevent.workers", 5); - conf.setInt("hbase.assignment.threads.max", 5); - conf.setInt("hbase.catalogjanitor.interval", 5000); - conf.setInt(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB, 10000); - conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); - conf.setInt(NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY, 1); - conf.setInt(GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, 0); - // This results in processing one row at a time in each next operation of the aggregate region - // scanner, i.e., one row pages. In other words, 0ms page allows only one row to be processed - // within one page; 0ms page is equivalent to one-row page - if (conf.getLong(QueryServices.PHOENIX_SERVER_PAGE_SIZE_MS, 0) == 0) { - conf.setLong(QueryServices.PHOENIX_SERVER_PAGE_SIZE_MS, 0); - } - setPhoenixRegionServerEndpoint(conf); - return conf; - } - + public static final String DRIVER_CLASS_NAME_ATTRIB = "phoenix.driver.class.name"; + protected static final String NULL_STRING = "NULL"; + private static final double ZERO = 1e-9; + private static final Map tableDDLMap; + private static final Logger LOGGER = LoggerFactory.getLogger(BaseTest.class); + @ClassRule + public static TemporaryFolder tmpFolder = new TemporaryFolder(); + private static final int dropTableTimeout = 120; // 2 mins should be long enough. + private static final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("DROP-TABLE-BASETEST" + "-thread-%s").build(); + private static final ExecutorService dropHTableService = + Executors.newSingleThreadExecutor(factory); + + @ClassRule + public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); + + static { + ImmutableMap.Builder builder = ImmutableMap.builder(); + builder.put(ENTITY_HISTORY_TABLE_NAME, + "create table " + ENTITY_HISTORY_TABLE_NAME + " (organization_id char(15) not null,\n" + + " parent_id char(15) not null,\n" + " created_date date not null,\n" + + " entity_history_id char(15) not null,\n" + " old_value varchar,\n" + + " new_value varchar,\n" + // create table shouldn't blow up if the last column + // definition ends with a comma. + " CONSTRAINT pk PRIMARY KEY (organization_id, parent_id, created_date, entity_history_id)\n" + + ")"); + builder.put(ENTITY_HISTORY_SALTED_TABLE_NAME, "create table " + ENTITY_HISTORY_SALTED_TABLE_NAME + + " (organization_id char(15) not null,\n" + " parent_id char(15) not null,\n" + + " created_date date not null,\n" + " entity_history_id char(15) not null,\n" + + " old_value varchar,\n" + " new_value varchar\n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, parent_id, created_date, entity_history_id))\n" + + " SALT_BUCKETS = 4"); + builder.put(ATABLE_NAME, + "create table " + ATABLE_NAME + " (organization_id char(15) not null, \n" + + " entity_id char(15) not null,\n" + " a_string varchar(100),\n" + + " b_string varchar(100),\n" + " a_integer integer,\n" + " a_date date,\n" + + " a_time time,\n" + " a_timestamp timestamp,\n" + " x_decimal decimal(31,10),\n" + + " x_long bigint,\n" + " x_integer integer,\n" + " y_integer integer,\n" + + " a_byte tinyint,\n" + " a_short smallint,\n" + " a_float float,\n" + + " a_double double,\n" + " a_unsigned_float unsigned_float,\n" + + " a_unsigned_double unsigned_double\n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n" + ") "); + builder.put(TABLE_WITH_ARRAY, + "create table " + TABLE_WITH_ARRAY + " (organization_id char(15) not null, \n" + + " entity_id char(15) not null,\n" + " a_string_array varchar(100) array[],\n" + + " b_string varchar(100),\n" + " a_integer integer,\n" + " a_date date,\n" + + " a_time time,\n" + " a_timestamp timestamp,\n" + " x_decimal decimal(31,10),\n" + + " x_long_array bigint array[],\n" + " x_integer integer,\n" + + " a_byte_array tinyint array[],\n" + " a_short smallint,\n" + " a_float float,\n" + + " a_double_array double array[],\n" + " a_unsigned_float unsigned_float,\n" + + " a_unsigned_double unsigned_double \n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n" + ")"); + builder.put(BTABLE_NAME, + "create table " + BTABLE_NAME + " (a_string varchar not null, \n" + + " a_id char(3) not null,\n" + " b_string varchar not null, \n" + + " a_integer integer not null, \n" + " c_string varchar(2) null,\n" + + " b_integer integer,\n" + " c_integer integer,\n" + " d_string varchar(3),\n" + + " e_string char(10)\n" + + " CONSTRAINT my_pk PRIMARY KEY (a_string,a_id,b_string,a_integer,c_string))"); + builder.put(TABLE_WITH_SALTING, + "create table " + TABLE_WITH_SALTING + " (a_integer integer not null, \n" + + " a_string varchar not null, \n" + " a_id char(3) not null,\n" + + " b_string varchar, \n" + " b_integer integer \n" + + " CONSTRAINT pk PRIMARY KEY (a_integer, a_string, a_id))\n" + " SALT_BUCKETS = 4"); + builder.put(STABLE_NAME, "create table " + STABLE_NAME + + " (id char(1) not null primary key,\n" + " \"value\" integer)"); + builder.put(PTSDB_NAME, + "create table " + PTSDB_NAME + " (inst varchar null,\n" + " host varchar null,\n" + + " date date not null,\n" + " val decimal(31,10)\n" + + " CONSTRAINT pk PRIMARY KEY (inst, host, date))"); + builder.put(PTSDB2_NAME, + "create table " + PTSDB2_NAME + " (inst varchar(10) not null,\n" + + " date date not null,\n" + " val1 decimal,\n" + " val2 decimal(31,10),\n" + + " val3 decimal\n" + " CONSTRAINT pk PRIMARY KEY (inst, date))"); + builder.put(PTSDB3_NAME, + "create table " + PTSDB3_NAME + " (host varchar(10) not null,\n" + + " date date not null,\n" + " val1 decimal,\n" + " val2 decimal(31,10),\n" + + " val3 decimal\n" + " CONSTRAINT pk PRIMARY KEY (host DESC, date DESC))"); + builder.put(FUNKY_NAME, + "create table " + FUNKY_NAME + " (\"foo!\" varchar not null primary key,\n" + + " \"1\".\"#@$\" varchar, \n" + " \"1\".\"foo.bar-bas\" varchar, \n" + + " \"1\".\"Value\" integer,\n" + " \"1\".\"VALUE\" integer,\n" + + " \"1\".\"value\" integer,\n" + " \"1\".\"_blah^\" varchar)"); + builder.put(MULTI_CF_NAME, + "create table " + MULTI_CF_NAME + " (id char(15) not null primary key,\n" + + " a.unique_user_count integer,\n" + " b.unique_org_count integer,\n" + + " c.db_cpu_utilization decimal(31,10),\n" + " d.transaction_count bigint,\n" + + " e.cpu_utilization decimal(31,10),\n" + " f.response_time bigint,\n" + + " g.response_time bigint)"); + builder.put(HBASE_DYNAMIC_COLUMNS, + "create table " + HBASE_DYNAMIC_COLUMNS + " (entry varchar not null," + " F varchar," + + " A.F1v1 varchar," + " A.F1v2 varchar," + " B.F2v1 varchar" + + " CONSTRAINT pk PRIMARY KEY (entry))\n"); + builder.put(PRODUCT_METRICS_NAME, + "create table " + PRODUCT_METRICS_NAME + " (organization_id char(15) not null," + + " date date not null," + " feature char(1) not null," + + " unique_users integer not null,\n" + " db_utilization decimal(31,10),\n" + + " transactions bigint,\n" + " cpu_utilization decimal(31,10),\n" + + " response_time bigint,\n" + " io_time bigint,\n" + " region varchar,\n" + + " unset_column decimal(31,10)\n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, \"DATE\", feature, UNIQUE_USERS))"); + builder.put(CUSTOM_ENTITY_DATA_FULL_NAME, + "create table " + CUSTOM_ENTITY_DATA_FULL_NAME + " (organization_id char(15) not null, \n" + + " key_prefix char(3) not null,\n" + " custom_entity_data_id char(12) not null,\n" + + " created_by varchar,\n" + " created_date date,\n" + + " currency_iso_code char(3),\n" + " deleted char(1),\n" + + " division decimal(31,10),\n" + " last_activity date,\n" + " last_update date,\n" + + " last_update_by varchar,\n" + " name varchar(240),\n" + " owner varchar,\n" + + " record_type_id char(15),\n" + " setup_owner varchar,\n" + + " system_modstamp date,\n" + " b.val0 varchar,\n" + " b.val1 varchar,\n" + + " b.val2 varchar,\n" + " b.val3 varchar,\n" + " b.val4 varchar,\n" + + " b.val5 varchar,\n" + " b.val6 varchar,\n" + " b.val7 varchar,\n" + + " b.val8 varchar,\n" + " b.val9 varchar\n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, key_prefix, custom_entity_data_id))"); + builder.put("IntKeyTest", "create table IntKeyTest" + " (i integer not null primary key)"); + builder.put("IntIntKeyTest", + "create table IntIntKeyTest" + " (i integer not null primary key, j integer)"); + builder.put("PKIntValueTest", + "create table PKIntValueTest" + " (pk integer not null primary key)"); + builder.put("PKBigIntValueTest", + "create table PKBigIntValueTest" + " (pk bigint not null primary key)"); + builder.put("PKUnsignedIntValueTest", + "create table PKUnsignedIntValueTest" + " (pk unsigned_int not null primary key)"); + builder.put("PKUnsignedLongValueTest", "create table PKUnsignedLongValueTest" + + " (pk unsigned_long not null\n" + " CONSTRAINT pk PRIMARY KEY (pk))"); + builder.put("KVIntValueTest", "create table KVIntValueTest" + + " (pk integer not null primary key,\n" + " kv integer)\n"); + builder.put("KVBigIntValueTest", "create table KVBigIntValueTest" + + " (pk integer not null primary key,\n" + " kv bigint)\n"); + builder.put(SUM_DOUBLE_NAME, "create table SumDoubleTest" + + " (id varchar not null primary key, d DOUBLE, f FLOAT, ud UNSIGNED_DOUBLE, uf UNSIGNED_FLOAT, i integer, de decimal)"); + builder.put(BINARY_NAME, + "create table " + BINARY_NAME + " (a_binary BINARY(16) not null, \n" + + " b_binary BINARY(16), \n" + " a_varbinary VARBINARY, \n" + + " b_varbinary VARBINARY, \n" + " CONSTRAINT pk PRIMARY KEY (a_binary)\n" + ") "); + tableDDLMap = builder.build(); + } + + private static final String ORG_ID = "00D300000000XHP"; + protected static int NUM_SLAVES_BASE = 1; + private static final String DEFAULT_RPC_SCHEDULER_FACTORY = + PhoenixRpcSchedulerFactory.class.getName(); + + protected static String getZKClientPort(Configuration conf) { + return conf.get(QueryServices.ZOOKEEPER_PORT_ATTRIB); + } + + protected static String url; + protected static PhoenixTestDriver driver; + protected static boolean clusterInitialized = false; + protected static HBaseTestingUtility utility; + protected static final Configuration config = HBaseConfiguration.create(); + + protected static String getUrl() { + if (!clusterInitialized) { + throw new IllegalStateException( + "Cluster must be initialized before attempting to get the URL"); + } + return url; + } + + protected static String checkClusterInitialized(ReadOnlyProps serverProps) throws Exception { + if (!clusterInitialized) { + url = setUpTestCluster(config, serverProps); + clusterInitialized = true; + } + return url; + } + + /** + * Set up the test hbase cluster. + * @return url to be used by clients to connect to the cluster. + */ + protected static String setUpTestCluster(@Nonnull Configuration conf, ReadOnlyProps overrideProps) + throws Exception { + boolean isDistributedCluster = isDistributedClusterModeEnabled(conf); + if (!isDistributedCluster) { + return initMiniCluster(conf, overrideProps); + } else { + return initClusterDistributedMode(conf, overrideProps); + } + } + + protected static void destroyDriver() { + if (driver != null) { + try { + assertTrue(destroyDriver(driver)); + } catch (Throwable t) { + LOGGER.error("Exception caught when destroying phoenix test driver", t); + } finally { + driver = null; + } + } + } + + protected synchronized static void dropNonSystemTables() throws Exception { + try { + disableAndDropNonSystemTables(); + } finally { + destroyDriver(); + } + } + + // Note that newer miniCluster versions will overwrite "java.io.tmpdir" system property. + // After you shut down the minicluster, it will point to a non-existent directory + // You will need to save the original "java.io.tmpdir" before starting the miniCluster, and + // restore it after shutting it down, if you want to keep using the JVM. + public static synchronized void tearDownMiniCluster(final int numTables) { + long startTime = System.currentTimeMillis(); + try { + ConnectionFactory.shutdown(); + destroyDriver(); + utility.shutdownMiniMapReduceCluster(); + } catch (Throwable t) { + LOGGER.error("Exception caught when shutting down mini map reduce cluster", t); + } finally { + try { + // Clear ServerMetadataCache. + ServerMetadataCacheTestImpl.resetCache(); + utility.shutdownMiniCluster(); + } catch (Throwable t) { + LOGGER.error("Exception caught when shutting down mini cluster", t); + } finally { + clusterInitialized = false; + utility = null; + LOGGER.info("Time in seconds spent in shutting down mini cluster with " + numTables + + " tables: " + (System.currentTimeMillis() - startTime) / 1000); + } + } + } + + public static synchronized void resetHbase() { + try { + ConnectionFactory.shutdown(); + destroyDriver(); + disableAndDropAllTables(); + } catch (Exception e) { + LOGGER.error("Error resetting HBase"); + } + } + + protected static synchronized void setUpTestDriver(ReadOnlyProps props) throws Exception { + setUpTestDriver(props, props); + } + + protected static synchronized void setUpTestDriver(ReadOnlyProps serverProps, + ReadOnlyProps clientProps) throws Exception { + if (driver == null) { + String url = checkClusterInitialized(serverProps); + driver = initAndRegisterTestDriver(url, clientProps); + } + } + + private static boolean isDistributedClusterModeEnabled(Configuration conf) { + boolean isDistributedCluster = false; + // check if the distributed mode was specified as a system property. + isDistributedCluster = Boolean + .parseBoolean(System.getProperty(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, "false")); + if (!isDistributedCluster) { + // fall back on hbase-default.xml or hbase-site.xml to check for distributed mode + isDistributedCluster = + conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false); + } + return isDistributedCluster; + } + + /** + * Initialize the mini cluster using phoenix-test specific configuration. + * @param overrideProps TODO + * @return url to be used by clients to connect to the mini cluster. + */ + private static synchronized String initMiniCluster(Configuration conf, + ReadOnlyProps overrideProps) throws Exception { + setUpConfigForMiniCluster(conf, overrideProps); + utility = new HBaseTestingUtility(conf); + try { + long startTime = System.currentTimeMillis(); + utility.startMiniCluster( + overrideProps.getInt(QueryServices.TESTS_MINI_CLUSTER_NUM_REGION_SERVERS, NUM_SLAVES_BASE)); + long startupTime = System.currentTimeMillis() - startTime; + LOGGER.info("HBase minicluster startup complete in {} ms", startupTime); + return getLocalClusterUrl(utility); + } catch (Throwable t) { + throw new RuntimeException(t); + } + } + + protected static String getLocalClusterUrl(HBaseTestingUtility util) throws Exception { + String url = QueryUtil.getConnectionUrl(new Properties(), util.getConfiguration()); + return url + PHOENIX_TEST_DRIVER_URL_PARAM; + } + + /** + * Initialize the cluster in distributed mode + * @param overrideProps TODO + * @return url to be used by clients to connect to the mini cluster. + */ + private static String initClusterDistributedMode(Configuration conf, ReadOnlyProps overrideProps) + throws Exception { + setTestConfigForDistribuedCluster(conf, overrideProps); + try { + IntegrationTestingUtility util = new IntegrationTestingUtility(conf); + utility = util; + util.initializeCluster(NUM_SLAVES_BASE); + } catch (Exception e) { + throw new RuntimeException(e); + } + return JDBC_PROTOCOL + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; + } + + private static void setTestConfigForDistribuedCluster(Configuration conf, + ReadOnlyProps overrideProps) throws Exception { + setDefaultTestConfig(conf, overrideProps); + } + + private static void setDefaultTestConfig(Configuration conf, ReadOnlyProps overrideProps) + throws Exception { + ConfigUtil.setReplicationConfigIfAbsent(conf); + QueryServices services = newTestDriver(overrideProps).getQueryServices(); + for (Entry entry : services.getProps()) { + conf.set(entry.getKey(), entry.getValue()); + } + // no point doing sanity checks when running tests. + conf.setBoolean("hbase.table.sanity.checks", false); + // set the server rpc controller and rpc scheduler factory, used to configure the cluster + conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, + DEFAULT_RPC_SCHEDULER_FACTORY); + conf.setLong(HConstants.ZK_SESSION_TIMEOUT, 10 * HConstants.DEFAULT_ZK_SESSION_TIMEOUT); + conf.setLong(HConstants.ZOOKEEPER_TICK_TIME, 6 * 1000); + + // override any defaults based on overrideProps + for (Entry entry : overrideProps) { + conf.set(entry.getKey(), entry.getValue()); + } + } + + public static Configuration setUpConfigForMiniCluster(Configuration conf) throws Exception { + return setUpConfigForMiniCluster(conf, ReadOnlyProps.EMPTY_PROPS); + } + + public static Configuration setUpConfigForMiniCluster(Configuration conf, + ReadOnlyProps overrideProps) throws Exception { + assertNotNull(conf); + setDefaultTestConfig(conf, overrideProps); /* - Set property hbase.coprocessor.regionserver.classes to include test implementation of - PhoenixRegionServerEndpoint by default, if some other regionserver coprocs - are not already present. + * The default configuration of mini cluster ends up spawning a lot of threads that are not + * really needed by phoenix for test purposes. Limiting these threads helps us in running + * several mini clusters at the same time without hitting the threads limit imposed by the OS. */ - protected static void setPhoenixRegionServerEndpoint(Configuration conf) { - String value = conf.get(REGIONSERVER_COPROCESSOR_CONF_KEY); - if (value == null) { - value = PhoenixRegionServerEndpointTestImpl.class.getName(); - } - else { - value = value + "," + PhoenixRegionServerEndpointTestImpl.class.getName(); - } - conf.set(REGIONSERVER_COPROCESSOR_CONF_KEY, value); - } - private static PhoenixTestDriver newTestDriver(ReadOnlyProps props) throws Exception { - PhoenixTestDriver newDriver; - String driverClassName = props.get(DRIVER_CLASS_NAME_ATTRIB); - if(isDistributedClusterModeEnabled(config)) { - HashMap distPropMap = new HashMap<>(1); - distPropMap.put(DROP_METADATA_ATTRIB, Boolean.TRUE.toString()); - props = new ReadOnlyProps(props, distPropMap.entrySet().iterator()); - } - if (driverClassName == null) { - newDriver = new PhoenixTestDriver(props); - } else { - Class clazz = Class.forName(driverClassName); - Constructor constr = clazz.getConstructor(ReadOnlyProps.class); - newDriver = (PhoenixTestDriver)constr.newInstance(props); - } - return newDriver; - } - /** - * Create a {@link PhoenixTestDriver} and register it. - * @return an initialized and registered {@link PhoenixTestDriver} - */ - public static synchronized PhoenixTestDriver initAndRegisterTestDriver(String url, ReadOnlyProps props) throws Exception { - PhoenixTestDriver newDriver = newTestDriver(props); - DriverManager.registerDriver(newDriver); - Driver oldDriver = DriverManager.getDriver(url); - if (oldDriver != newDriver) { - destroyDriver(oldDriver); - } - Properties driverProps = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = newDriver.connect(url, driverProps); - conn.close(); - return newDriver; - } - - //Close and unregister the driver. - protected static synchronized boolean destroyDriver(Driver driver) { - if (driver != null) { - assert(driver instanceof PhoenixEmbeddedDriver); - PhoenixEmbeddedDriver pdriver = (PhoenixEmbeddedDriver)driver; - try { - try { - pdriver.close(); - return true; - } finally { - DriverManager.deregisterDriver(driver); - } - } catch (Exception e) { - LOGGER.warn("Unable to close registered driver: " + driver, e); - } - } - return false; - } - - protected static String getOrganizationId() { - return ORG_ID; - } - - private static long timestamp; - - public static synchronized long nextTimestamp() { - timestamp += 100; - return timestamp; - } - - public static boolean twoDoubleEquals(double a, double b) { - if (Double.isNaN(a) ^ Double.isNaN(b)) return false; - if (Double.isNaN(a)) return true; - if (Double.isInfinite(a) ^ Double.isInfinite(b)) return false; - if (Double.isInfinite(a)) { - if ((a > 0) ^ (b > 0)) return false; - else return true; - } - if (Math.abs(a - b) <= ZERO) { - return true; - } else { - return false; - } - } - - protected static void ensureTableCreated(String url, String tableName) throws SQLException { - ensureTableCreated(url, tableName, tableName, null, null, null); - } - - protected static void ensureTableCreated(String url, String tableName, String tableDDLType) throws SQLException { - ensureTableCreated(url, tableName, tableDDLType, null, null, null); - } - - public static void ensureTableCreated(String url, String tableName, String tableDDLType, byte[][] splits, String tableDDLOptions) throws SQLException { - ensureTableCreated(url, tableName, tableDDLType, splits, null, tableDDLOptions); - } - - protected static void ensureTableCreated(String url, String tableName, String tableDDLType, Long ts) throws SQLException { - ensureTableCreated(url, tableName, tableDDLType, null, ts, null); - } - - protected static void ensureTableCreated(String url, String tableName, String tableDDLType, byte[][] splits, Long ts, String tableDDLOptions) throws SQLException { - String ddl = tableDDLMap.get(tableDDLType); - if(!tableDDLType.equals(tableName)) { - ddl = ddl.replace(tableDDLType, tableName); - } - if (tableDDLOptions!=null) { - ddl += tableDDLOptions; - } - createSchema(url,tableName, ts); - createTestTable(url, ddl, splits, ts); - } - - protected ResultSet executeQuery(Connection conn, QueryBuilder queryBuilder) throws SQLException { - PreparedStatement statement = conn.prepareStatement(queryBuilder.build()); - ResultSet rs = statement.executeQuery(); - return rs; - } - - private static AtomicInteger NAME_SUFFIX = new AtomicInteger(0); - private static final int MAX_SUFFIX_VALUE = 1000000; - - /** - * Counter to track number of tables we have created. This isn't really accurate since this - * counter will be incremented when we call {@link #generateUniqueName()}for getting unique - * schema and sequence names too. But this will have to do. - */ - private static final AtomicInteger TABLE_COUNTER = new AtomicInteger(0); - /* - * Threshold to monitor if we need to restart mini-cluster since we created too many tables. - * Note, we can't have this value too high since we don't want the shutdown to take too - * long a time either. - */ - private static final int TEARDOWN_THRESHOLD = 30; - - public static String generateUniqueName() { - int nextName = NAME_SUFFIX.incrementAndGet(); - if (nextName >= MAX_SUFFIX_VALUE) { - throw new IllegalStateException("Used up all unique names"); - } - TABLE_COUNTER.incrementAndGet(); - return "N" + Integer.toString(MAX_SUFFIX_VALUE + nextName).substring(1); - } - - private static AtomicInteger SEQ_NAME_SUFFIX = new AtomicInteger(0); - private static final int MAX_SEQ_SUFFIX_VALUE = 1000000; - - private static final AtomicInteger SEQ_COUNTER = new AtomicInteger(0); - - public static String generateUniqueSequenceName() { - int nextName = SEQ_NAME_SUFFIX.incrementAndGet(); - if (nextName >= MAX_SEQ_SUFFIX_VALUE) { - throw new IllegalStateException("Used up all unique sequence names"); - } - SEQ_COUNTER.incrementAndGet(); - return "S" + Integer.toString(MAX_SEQ_SUFFIX_VALUE + nextName).substring(1); - } - - public static void assertMetadata(Connection conn, PTable.ImmutableStorageScheme expectedStorageScheme, PTable.QualifierEncodingScheme - expectedColumnEncoding, String tableName) - throws Exception { - PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); - PTable table = phxConn.getTableNoCache(tableName); - assertEquals(expectedStorageScheme, table.getImmutableStorageScheme()); - assertEquals(expectedColumnEncoding, table.getEncodingScheme()); - } - - public static synchronized void freeResourcesIfBeyondThreshold() throws Exception { - if (TABLE_COUNTER.get() > TEARDOWN_THRESHOLD) { - int numTables = TABLE_COUNTER.get(); - TABLE_COUNTER.set(0); - if (isDistributedClusterModeEnabled(config)) { - LOGGER.info("Deleting old tables on distributed cluster because " - + "number of tables is likely greater than {}", - TEARDOWN_THRESHOLD); - deletePriorMetaData(HConstants.LATEST_TIMESTAMP, url); - } else { - LOGGER.info("Shutting down mini cluster because number of tables" - + " on this mini cluster is likely greater than {}", - TEARDOWN_THRESHOLD); - resetHbase(); - } - } - } - - protected static void createTestTable(String url, String ddl) throws SQLException { - createTestTable(url, ddl, null, null); - } - - protected static void createTestTable(String url, String ddl, byte[][] splits, Long ts) throws SQLException { - createTestTable(url, ddl, splits, ts, true); - } - - public static void createSchema(String url, String tableName, Long ts) throws SQLException { - String schema = SchemaUtil.getSchemaNameFromFullName(tableName); - if (!schema.equals("")) { - Properties props = new Properties(); - if (ts != null) { - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts)); - } - try (Connection conn = DriverManager.getConnection(url, props);) { - if (SchemaUtil.isNamespaceMappingEnabled(null, - conn.unwrap(PhoenixConnection.class).getQueryServices().getProps())) { - conn.createStatement().executeUpdate("CREATE SCHEMA IF NOT EXISTS " + schema); - } - } - } - } - - protected static void createTestTable(String url, String ddl, byte[][] splits, Long ts, boolean swallowTableAlreadyExistsException) throws SQLException { - assertNotNull(ddl); - StringBuilder buf = new StringBuilder(ddl); - if (splits != null) { - buf.append(" SPLIT ON ("); - for (int i = 0; i < splits.length; i++) { - buf.append("'").append(Bytes.toString(splits[i])).append("'").append(","); - } - buf.setCharAt(buf.length()-1, ')'); - } - ddl = buf.toString(); - Properties props = new Properties(); - if (ts != null) { - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts)); - } - Connection conn = DriverManager.getConnection(url, props); + conf.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 5); + conf.setInt("hbase.regionserver.metahandler.count", 2); + conf.setInt("dfs.namenode.handler.count", 2); + conf.setInt("dfs.namenode.service.handler.count", 2); + conf.setInt("dfs.datanode.handler.count", 2); + conf.setInt("ipc.server.read.threadpool.size", 2); + conf.setInt("ipc.server.handler.threadpool.size", 2); + conf.setInt("hbase.regionserver.hlog.syncer.count", 2); + conf.setInt("hbase.hfile.compaction.discharger.interval", 5000); + conf.setInt("hbase.hlog.asyncer.number", 2); + conf.setInt("hbase.assignment.zkevent.workers", 5); + conf.setInt("hbase.assignment.threads.max", 5); + conf.setInt("hbase.catalogjanitor.interval", 5000); + conf.setInt(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB, 10000); + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); + conf.setInt(NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY, 1); + conf.setInt(GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, 0); + // This results in processing one row at a time in each next operation of the aggregate region + // scanner, i.e., one row pages. In other words, 0ms page allows only one row to be processed + // within one page; 0ms page is equivalent to one-row page + if (conf.getLong(QueryServices.PHOENIX_SERVER_PAGE_SIZE_MS, 0) == 0) { + conf.setLong(QueryServices.PHOENIX_SERVER_PAGE_SIZE_MS, 0); + } + setPhoenixRegionServerEndpoint(conf); + return conf; + } + + /* + * Set property hbase.coprocessor.regionserver.classes to include test implementation of + * PhoenixRegionServerEndpoint by default, if some other regionserver coprocs are not already + * present. + */ + protected static void setPhoenixRegionServerEndpoint(Configuration conf) { + String value = conf.get(REGIONSERVER_COPROCESSOR_CONF_KEY); + if (value == null) { + value = PhoenixRegionServerEndpointTestImpl.class.getName(); + } else { + value = value + "," + PhoenixRegionServerEndpointTestImpl.class.getName(); + } + conf.set(REGIONSERVER_COPROCESSOR_CONF_KEY, value); + } + + private static PhoenixTestDriver newTestDriver(ReadOnlyProps props) throws Exception { + PhoenixTestDriver newDriver; + String driverClassName = props.get(DRIVER_CLASS_NAME_ATTRIB); + if (isDistributedClusterModeEnabled(config)) { + HashMap distPropMap = new HashMap<>(1); + distPropMap.put(DROP_METADATA_ATTRIB, Boolean.TRUE.toString()); + props = new ReadOnlyProps(props, distPropMap.entrySet().iterator()); + } + if (driverClassName == null) { + newDriver = new PhoenixTestDriver(props); + } else { + Class clazz = Class.forName(driverClassName); + Constructor constr = clazz.getConstructor(ReadOnlyProps.class); + newDriver = (PhoenixTestDriver) constr.newInstance(props); + } + return newDriver; + } + + /** + * Create a {@link PhoenixTestDriver} and register it. + * @return an initialized and registered {@link PhoenixTestDriver} + */ + public static synchronized PhoenixTestDriver initAndRegisterTestDriver(String url, + ReadOnlyProps props) throws Exception { + PhoenixTestDriver newDriver = newTestDriver(props); + DriverManager.registerDriver(newDriver); + Driver oldDriver = DriverManager.getDriver(url); + if (oldDriver != newDriver) { + destroyDriver(oldDriver); + } + Properties driverProps = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = newDriver.connect(url, driverProps); + conn.close(); + return newDriver; + } + + // Close and unregister the driver. + protected static synchronized boolean destroyDriver(Driver driver) { + if (driver != null) { + assert (driver instanceof PhoenixEmbeddedDriver); + PhoenixEmbeddedDriver pdriver = (PhoenixEmbeddedDriver) driver; + try { try { - conn.createStatement().execute(ddl); - } catch (TableAlreadyExistsException e) { - if (! swallowTableAlreadyExistsException) { - throw e; - } + pdriver.close(); + return true; } finally { - conn.close(); - } - } - - protected static byte[][] getDefaultSplits(String tenantId) { - return new byte[][] { - Bytes.toBytes(tenantId + "00A"), - Bytes.toBytes(tenantId + "00B"), - Bytes.toBytes(tenantId + "00C"), - }; - } - - private static void deletePriorSchemas(long ts, String url) throws Exception { - Properties props = new Properties(); - props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1024)); - if (ts != HConstants.LATEST_TIMESTAMP) { - props.setProperty(CURRENT_SCN_ATTRIB, Long.toString(ts)); - } - try (Connection conn = DriverManager.getConnection(url, props)) { - DatabaseMetaData dbmd = conn.getMetaData(); - ResultSet rs = dbmd.getSchemas(); - while (rs.next()) { - String schemaName = rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM); - if (schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME)) { - continue; - } - schemaName = SchemaUtil.getEscapedArgument(schemaName); - - String ddl = "DROP SCHEMA " + schemaName; - conn.createStatement().executeUpdate(ddl); - } - rs.close(); - } - // Make sure all schemas have been dropped - props.remove(CURRENT_SCN_ATTRIB); - try (Connection seeLatestConn = DriverManager.getConnection(url, props)) { - DatabaseMetaData dbmd = seeLatestConn.getMetaData(); - ResultSet rs = dbmd.getSchemas(); - boolean hasSchemas = rs.next(); - if (hasSchemas) { - String schemaName = rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM); - if (schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME)) { - hasSchemas = rs.next(); - } - } - if (hasSchemas) { - fail("The following schemas are not dropped that should be:" + getSchemaNames(rs)); - } - } - } - - protected static synchronized void deletePriorMetaData(long ts, String url) throws Exception { - deletePriorTables(ts, url); - if (ts != HConstants.LATEST_TIMESTAMP) { - ts = nextTimestamp() - 1; - } - deletePriorSchemas(ts, url); - } - - private static void deletePriorTables(long ts, String url) throws Exception { - deletePriorTables(ts, (String)null, url); - } - - private static void deletePriorTables(long ts, String tenantId, String url) throws Exception { - Properties props = new Properties(); - props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1024)); - if (ts != HConstants.LATEST_TIMESTAMP) { - props.setProperty(CURRENT_SCN_ATTRIB, Long.toString(ts)); - } - Connection conn = DriverManager.getConnection(url, props); - try { - deletePriorTables(ts, conn, url); - deletePriorSequences(ts, conn); - - // Make sure all tables and views have been dropped - props.remove(CURRENT_SCN_ATTRIB); - try (Connection seeLatestConn = DriverManager.getConnection(url, props)) { - DatabaseMetaData dbmd = seeLatestConn.getMetaData(); - ResultSet rs = dbmd.getTables(null, null, null, new String[]{PTableType.VIEW.toString(), PTableType.TABLE.toString()}); - while (rs.next()) { - String fullTableName = SchemaUtil.getEscapedTableName( - rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), - rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); - try { - conn.unwrap(PhoenixConnection.class).getTable(fullTableName); - fail("The following tables are not deleted that should be:" + getTableNames(rs)); - } catch (TableNotFoundException e) { - } - } - } - } - finally { - conn.close(); - } - } - - private static void deletePriorTables(long ts, Connection globalConn, String url) throws Exception { - DatabaseMetaData dbmd = globalConn.getMetaData(); - // Drop VIEWs first, as we don't allow a TABLE with views to be dropped - // Tables are sorted by TENANT_ID - List tableTypesList = Arrays.asList(new String[] {PTableType.VIEW.toString()}, new String[] {PTableType.TABLE.toString()}); - for (String[] tableTypes: tableTypesList) { - ResultSet rs = dbmd.getTables(null, null, null, tableTypes); - String lastTenantId = null; - Connection conn = globalConn; - while (rs.next()) { - String fullTableName = SchemaUtil.getEscapedTableName( - rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), - rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); - String ddl = "DROP " + rs.getString(PhoenixDatabaseMetaData.TABLE_TYPE) + " " + fullTableName + " CASCADE"; - String tenantId = rs.getString(1); - if (tenantId != null && !tenantId.equals(lastTenantId)) { - if (lastTenantId != null) { - conn.close(); - } - // Open tenant-specific connection when we find a new one - Properties props = PropertiesUtil.deepCopy(globalConn.getClientInfo()); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - conn = DriverManager.getConnection(url, props); - lastTenantId = tenantId; - } - try { - conn.createStatement().executeUpdate(ddl); - } catch (NewerTableAlreadyExistsException ex) { - LOGGER.info("Newer table " + fullTableName + " or its delete marker exists. Ignore current deletion"); - } catch (TableNotFoundException ex) { - LOGGER.info("Table " + fullTableName + " is already deleted."); - } - } - rs.close(); - if (lastTenantId != null) { - conn.close(); - } - } - } - - private static String getTableNames(ResultSet rs) throws SQLException { - StringBuilder buf = new StringBuilder(); - do { - buf.append(" "); - buf.append(SchemaUtil.getTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), rs.getString(PhoenixDatabaseMetaData.TABLE_NAME))); - } while (rs.next()); - return buf.toString(); - } - - private static String getSchemaNames(ResultSet rs) throws SQLException { - StringBuilder buf = new StringBuilder(); - do { - buf.append(" "); - buf.append(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM)); - } while (rs.next()); - return buf.toString(); - } - - private static void deletePriorSequences(long ts, Connection globalConn) throws Exception { - // TODO: drop tenant-specific sequences too - ResultSet rs = globalConn.createStatement().executeQuery("SELECT " - + PhoenixDatabaseMetaData.TENANT_ID + "," - + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + "," - + PhoenixDatabaseMetaData.SEQUENCE_NAME - + " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE); - String lastTenantId = null; - Connection conn = globalConn; + DriverManager.deregisterDriver(driver); + } + } catch (Exception e) { + LOGGER.warn("Unable to close registered driver: " + driver, e); + } + } + return false; + } + + protected static String getOrganizationId() { + return ORG_ID; + } + + private static long timestamp; + + public static synchronized long nextTimestamp() { + timestamp += 100; + return timestamp; + } + + public static boolean twoDoubleEquals(double a, double b) { + if (Double.isNaN(a) ^ Double.isNaN(b)) return false; + if (Double.isNaN(a)) return true; + if (Double.isInfinite(a) ^ Double.isInfinite(b)) return false; + if (Double.isInfinite(a)) { + if ((a > 0) ^ (b > 0)) return false; + else return true; + } + if (Math.abs(a - b) <= ZERO) { + return true; + } else { + return false; + } + } + + protected static void ensureTableCreated(String url, String tableName) throws SQLException { + ensureTableCreated(url, tableName, tableName, null, null, null); + } + + protected static void ensureTableCreated(String url, String tableName, String tableDDLType) + throws SQLException { + ensureTableCreated(url, tableName, tableDDLType, null, null, null); + } + + public static void ensureTableCreated(String url, String tableName, String tableDDLType, + byte[][] splits, String tableDDLOptions) throws SQLException { + ensureTableCreated(url, tableName, tableDDLType, splits, null, tableDDLOptions); + } + + protected static void ensureTableCreated(String url, String tableName, String tableDDLType, + Long ts) throws SQLException { + ensureTableCreated(url, tableName, tableDDLType, null, ts, null); + } + + protected static void ensureTableCreated(String url, String tableName, String tableDDLType, + byte[][] splits, Long ts, String tableDDLOptions) throws SQLException { + String ddl = tableDDLMap.get(tableDDLType); + if (!tableDDLType.equals(tableName)) { + ddl = ddl.replace(tableDDLType, tableName); + } + if (tableDDLOptions != null) { + ddl += tableDDLOptions; + } + createSchema(url, tableName, ts); + createTestTable(url, ddl, splits, ts); + } + + protected ResultSet executeQuery(Connection conn, QueryBuilder queryBuilder) throws SQLException { + PreparedStatement statement = conn.prepareStatement(queryBuilder.build()); + ResultSet rs = statement.executeQuery(); + return rs; + } + + private static AtomicInteger NAME_SUFFIX = new AtomicInteger(0); + private static final int MAX_SUFFIX_VALUE = 1000000; + + /** + * Counter to track number of tables we have created. This isn't really accurate since this + * counter will be incremented when we call {@link #generateUniqueName()}for getting unique schema + * and sequence names too. But this will have to do. + */ + private static final AtomicInteger TABLE_COUNTER = new AtomicInteger(0); + /* + * Threshold to monitor if we need to restart mini-cluster since we created too many tables. Note, + * we can't have this value too high since we don't want the shutdown to take too long a time + * either. + */ + private static final int TEARDOWN_THRESHOLD = 30; + + public static String generateUniqueName() { + int nextName = NAME_SUFFIX.incrementAndGet(); + if (nextName >= MAX_SUFFIX_VALUE) { + throw new IllegalStateException("Used up all unique names"); + } + TABLE_COUNTER.incrementAndGet(); + return "N" + Integer.toString(MAX_SUFFIX_VALUE + nextName).substring(1); + } + + private static AtomicInteger SEQ_NAME_SUFFIX = new AtomicInteger(0); + private static final int MAX_SEQ_SUFFIX_VALUE = 1000000; + + private static final AtomicInteger SEQ_COUNTER = new AtomicInteger(0); + + public static String generateUniqueSequenceName() { + int nextName = SEQ_NAME_SUFFIX.incrementAndGet(); + if (nextName >= MAX_SEQ_SUFFIX_VALUE) { + throw new IllegalStateException("Used up all unique sequence names"); + } + SEQ_COUNTER.incrementAndGet(); + return "S" + Integer.toString(MAX_SEQ_SUFFIX_VALUE + nextName).substring(1); + } + + public static void assertMetadata(Connection conn, + PTable.ImmutableStorageScheme expectedStorageScheme, + PTable.QualifierEncodingScheme expectedColumnEncoding, String tableName) throws Exception { + PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); + PTable table = phxConn.getTableNoCache(tableName); + assertEquals(expectedStorageScheme, table.getImmutableStorageScheme()); + assertEquals(expectedColumnEncoding, table.getEncodingScheme()); + } + + public static synchronized void freeResourcesIfBeyondThreshold() throws Exception { + if (TABLE_COUNTER.get() > TEARDOWN_THRESHOLD) { + int numTables = TABLE_COUNTER.get(); + TABLE_COUNTER.set(0); + if (isDistributedClusterModeEnabled(config)) { + LOGGER.info("Deleting old tables on distributed cluster because " + + "number of tables is likely greater than {}", TEARDOWN_THRESHOLD); + deletePriorMetaData(HConstants.LATEST_TIMESTAMP, url); + } else { + LOGGER.info("Shutting down mini cluster because number of tables" + + " on this mini cluster is likely greater than {}", TEARDOWN_THRESHOLD); + resetHbase(); + } + } + } + + protected static void createTestTable(String url, String ddl) throws SQLException { + createTestTable(url, ddl, null, null); + } + + protected static void createTestTable(String url, String ddl, byte[][] splits, Long ts) + throws SQLException { + createTestTable(url, ddl, splits, ts, true); + } + + public static void createSchema(String url, String tableName, Long ts) throws SQLException { + String schema = SchemaUtil.getSchemaNameFromFullName(tableName); + if (!schema.equals("")) { + Properties props = new Properties(); + if (ts != null) { + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts)); + } + try (Connection conn = DriverManager.getConnection(url, props);) { + if ( + SchemaUtil.isNamespaceMappingEnabled(null, + conn.unwrap(PhoenixConnection.class).getQueryServices().getProps()) + ) { + conn.createStatement().executeUpdate("CREATE SCHEMA IF NOT EXISTS " + schema); + } + } + } + } + + protected static void createTestTable(String url, String ddl, byte[][] splits, Long ts, + boolean swallowTableAlreadyExistsException) throws SQLException { + assertNotNull(ddl); + StringBuilder buf = new StringBuilder(ddl); + if (splits != null) { + buf.append(" SPLIT ON ("); + for (int i = 0; i < splits.length; i++) { + buf.append("'").append(Bytes.toString(splits[i])).append("'").append(","); + } + buf.setCharAt(buf.length() - 1, ')'); + } + ddl = buf.toString(); + Properties props = new Properties(); + if (ts != null) { + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts)); + } + Connection conn = DriverManager.getConnection(url, props); + try { + conn.createStatement().execute(ddl); + } catch (TableAlreadyExistsException e) { + if (!swallowTableAlreadyExistsException) { + throw e; + } + } finally { + conn.close(); + } + } + + protected static byte[][] getDefaultSplits(String tenantId) { + return new byte[][] { Bytes.toBytes(tenantId + "00A"), Bytes.toBytes(tenantId + "00B"), + Bytes.toBytes(tenantId + "00C"), }; + } + + private static void deletePriorSchemas(long ts, String url) throws Exception { + Properties props = new Properties(); + props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1024)); + if (ts != HConstants.LATEST_TIMESTAMP) { + props.setProperty(CURRENT_SCN_ATTRIB, Long.toString(ts)); + } + try (Connection conn = DriverManager.getConnection(url, props)) { + DatabaseMetaData dbmd = conn.getMetaData(); + ResultSet rs = dbmd.getSchemas(); + while (rs.next()) { + String schemaName = rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM); + if (schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME)) { + continue; + } + schemaName = SchemaUtil.getEscapedArgument(schemaName); + + String ddl = "DROP SCHEMA " + schemaName; + conn.createStatement().executeUpdate(ddl); + } + rs.close(); + } + // Make sure all schemas have been dropped + props.remove(CURRENT_SCN_ATTRIB); + try (Connection seeLatestConn = DriverManager.getConnection(url, props)) { + DatabaseMetaData dbmd = seeLatestConn.getMetaData(); + ResultSet rs = dbmd.getSchemas(); + boolean hasSchemas = rs.next(); + if (hasSchemas) { + String schemaName = rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM); + if (schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME)) { + hasSchemas = rs.next(); + } + } + if (hasSchemas) { + fail("The following schemas are not dropped that should be:" + getSchemaNames(rs)); + } + } + } + + protected static synchronized void deletePriorMetaData(long ts, String url) throws Exception { + deletePriorTables(ts, url); + if (ts != HConstants.LATEST_TIMESTAMP) { + ts = nextTimestamp() - 1; + } + deletePriorSchemas(ts, url); + } + + private static void deletePriorTables(long ts, String url) throws Exception { + deletePriorTables(ts, (String) null, url); + } + + private static void deletePriorTables(long ts, String tenantId, String url) throws Exception { + Properties props = new Properties(); + props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1024)); + if (ts != HConstants.LATEST_TIMESTAMP) { + props.setProperty(CURRENT_SCN_ATTRIB, Long.toString(ts)); + } + Connection conn = DriverManager.getConnection(url, props); + try { + deletePriorTables(ts, conn, url); + deletePriorSequences(ts, conn); + + // Make sure all tables and views have been dropped + props.remove(CURRENT_SCN_ATTRIB); + try (Connection seeLatestConn = DriverManager.getConnection(url, props)) { + DatabaseMetaData dbmd = seeLatestConn.getMetaData(); + ResultSet rs = dbmd.getTables(null, null, null, + new String[] { PTableType.VIEW.toString(), PTableType.TABLE.toString() }); while (rs.next()) { - String tenantId = rs.getString(1); - if (tenantId != null && !tenantId.equals(lastTenantId)) { - if (lastTenantId != null) { - conn.close(); - } - // Open tenant-specific connection when we find a new one - Properties props = new Properties(globalConn.getClientInfo()); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - conn = DriverManager.getConnection(url, props); - lastTenantId = tenantId; - } - - LOGGER.info("DROP SEQUENCE STATEMENT: DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3))); - conn.createStatement().execute("DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3))); - } - rs.close(); - } - - protected static void initSumDoubleValues(byte[][] splits, String url) throws Exception { - initSumDoubleValues(SUM_DOUBLE_NAME, splits, url); - } - - protected static void initSumDoubleValues(String tableName, byte[][] splits, String url) throws Exception { - ensureTableCreated(url, tableName, SUM_DOUBLE_NAME, splits, null); - Properties props = new Properties(); - Connection conn = DriverManager.getConnection(url, props); - try { - // Insert all rows at ts - PreparedStatement stmt = conn.prepareStatement( - "upsert into " + tableName + - "(" + - " id, " + - " d, " + - " f, " + - " ud, " + - " uf) " + - "VALUES (?, ?, ?, ?, ?)"); - stmt.setString(1, "1"); - stmt.setDouble(2, 0.001); - stmt.setFloat(3, 0.01f); - stmt.setDouble(4, 0.001); - stmt.setFloat(5, 0.01f); - stmt.execute(); - - stmt.setString(1, "2"); - stmt.setDouble(2, 0.002); - stmt.setFloat(3, 0.02f); - stmt.setDouble(4, 0.002); - stmt.setFloat(5, 0.02f); - stmt.execute(); - - stmt.setString(1, "3"); - stmt.setDouble(2, 0.003); - stmt.setFloat(3, 0.03f); - stmt.setDouble(4, 0.003); - stmt.setFloat(5, 0.03f); - stmt.execute(); - - stmt.setString(1, "4"); - stmt.setDouble(2, 0.004); - stmt.setFloat(3, 0.04f); - stmt.setDouble(4, 0.004); - stmt.setFloat(5, 0.04f); - stmt.execute(); - - stmt.setString(1, "5"); - stmt.setDouble(2, 0.005); - stmt.setFloat(3, 0.05f); - stmt.setDouble(4, 0.005); - stmt.setFloat(5, 0.05f); - stmt.execute(); - - conn.commit(); - } finally { - conn.close(); - } - } - - protected static String initATableValues(String tenantId, byte[][] splits) throws Exception { - return initATableValues(tenantId, splits, null, null, getUrl()); - } - - protected static String initATableValues(String tenantId, byte[][] splits, Date date, Long ts) throws Exception { - return initATableValues(tenantId, splits, date, ts, getUrl()); - } - - protected static String initATableValues(String tenantId, byte[][] splits, String url) throws Exception { - return initATableValues(tenantId, splits, null, url); - } - - protected static String initATableValues(String tenantId, byte[][] splits, Date date, String url) throws Exception { - return initATableValues(tenantId, splits, date, null, url); - } - - protected static String initATableValues(String tenantId, byte[][] splits, Date date, Long ts, String url) throws Exception { - return initATableValues(null, tenantId, splits, date, ts, url, null); - } - - protected static String initATableValues(String tenantId, byte[][] splits, Date date, Long ts, String url, String tableDDLOptions) throws Exception { - return initATableValues(null, tenantId, splits, date, ts, url, tableDDLOptions); - } - - protected static String initATableValues(String tableName, String tenantId, byte[][] splits, Date date, Long ts, String url, String tableDDLOptions) throws Exception { - if(tableName == null) { - tableName = generateUniqueName(); - } - String tableDDLType = ATABLE_NAME; - if (ts == null) { - ensureTableCreated(url, tableName, tableDDLType, splits, null, tableDDLOptions); - } else { - ensureTableCreated(url, tableName, tableDDLType, splits, ts-5, tableDDLOptions); - } - - Properties props = new Properties(); - if (ts != null) { - props.setProperty(CURRENT_SCN_ATTRIB, Long.toString(ts-3)); - } - - try (Connection conn = DriverManager.getConnection(url, props)) { - // Insert all rows at ts - PreparedStatement stmt = conn.prepareStatement( - "upsert into " + tableName + - "(" + - " ORGANIZATION_ID, " + - " ENTITY_ID, " + - " A_STRING, " + - " B_STRING, " + - " A_INTEGER, " + - " A_DATE, " + - " X_DECIMAL, " + - " X_LONG, " + - " X_INTEGER," + - " Y_INTEGER," + - " A_BYTE," + - " A_SHORT," + - " A_FLOAT," + - " A_DOUBLE," + - " A_UNSIGNED_FLOAT," + - " A_UNSIGNED_DOUBLE)" + - "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); - stmt.setString(1, tenantId); - stmt.setString(2, ROW1); - stmt.setString(3, A_VALUE); - stmt.setString(4, B_VALUE); - stmt.setInt(5, 1); - stmt.setDate(6, date); - stmt.setBigDecimal(7, null); - stmt.setNull(8, Types.BIGINT); - stmt.setNull(9, Types.INTEGER); - stmt.setNull(10, Types.INTEGER); - stmt.setByte(11, (byte)1); - stmt.setShort(12, (short) 128); - stmt.setFloat(13, 0.01f); - stmt.setDouble(14, 0.0001); - stmt.setFloat(15, 0.01f); - stmt.setDouble(16, 0.0001); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, ROW2); - stmt.setString(3, A_VALUE); - stmt.setString(4, C_VALUE); - stmt.setInt(5, 2); - stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 1)); - stmt.setBigDecimal(7, null); - stmt.setNull(8, Types.BIGINT); - stmt.setNull(9, Types.INTEGER); - stmt.setNull(10, Types.INTEGER); - stmt.setByte(11, (byte)2); - stmt.setShort(12, (short) 129); - stmt.setFloat(13, 0.02f); - stmt.setDouble(14, 0.0002); - stmt.setFloat(15, 0.02f); - stmt.setDouble(16, 0.0002); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, ROW3); - stmt.setString(3, A_VALUE); - stmt.setString(4, E_VALUE); - stmt.setInt(5, 3); - stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 2)); - stmt.setBigDecimal(7, null); - stmt.setNull(8, Types.BIGINT); - stmt.setNull(9, Types.INTEGER); - stmt.setNull(10, Types.INTEGER); - stmt.setByte(11, (byte)3); - stmt.setShort(12, (short) 130); - stmt.setFloat(13, 0.03f); - stmt.setDouble(14, 0.0003); - stmt.setFloat(15, 0.03f); - stmt.setDouble(16, 0.0003); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, ROW4); - stmt.setString(3, A_VALUE); - stmt.setString(4, B_VALUE); - stmt.setInt(5, 4); - stmt.setDate(6, date == null ? null : date); - stmt.setBigDecimal(7, null); - stmt.setNull(8, Types.BIGINT); - stmt.setNull(9, Types.INTEGER); - stmt.setNull(10, Types.INTEGER); - stmt.setByte(11, (byte)4); - stmt.setShort(12, (short) 131); - stmt.setFloat(13, 0.04f); - stmt.setDouble(14, 0.0004); - stmt.setFloat(15, 0.04f); - stmt.setDouble(16, 0.0004); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, ROW5); - stmt.setString(3, B_VALUE); - stmt.setString(4, C_VALUE); - stmt.setInt(5, 5); - stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 1)); - stmt.setBigDecimal(7, null); - stmt.setNull(8, Types.BIGINT); - stmt.setNull(9, Types.INTEGER); - stmt.setNull(10, Types.INTEGER); - stmt.setByte(11, (byte)5); - stmt.setShort(12, (short) 132); - stmt.setFloat(13, 0.05f); - stmt.setDouble(14, 0.0005); - stmt.setFloat(15, 0.05f); - stmt.setDouble(16, 0.0005); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, ROW6); - stmt.setString(3, B_VALUE); - stmt.setString(4, E_VALUE); - stmt.setInt(5, 6); - stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 2)); - stmt.setBigDecimal(7, null); - stmt.setNull(8, Types.BIGINT); - stmt.setNull(9, Types.INTEGER); - stmt.setNull(10, Types.INTEGER); - stmt.setByte(11, (byte)6); - stmt.setShort(12, (short) 133); - stmt.setFloat(13, 0.06f); - stmt.setDouble(14, 0.0006); - stmt.setFloat(15, 0.06f); - stmt.setDouble(16, 0.0006); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, ROW7); - stmt.setString(3, B_VALUE); - stmt.setString(4, B_VALUE); - stmt.setInt(5, 7); - stmt.setDate(6, date == null ? null : date); - stmt.setBigDecimal(7, BigDecimal.valueOf(0.1)); - stmt.setLong(8, 5L); - stmt.setInt(9, 5); - stmt.setNull(10, Types.INTEGER); - stmt.setByte(11, (byte)7); - stmt.setShort(12, (short) 134); - stmt.setFloat(13, 0.07f); - stmt.setDouble(14, 0.0007); - stmt.setFloat(15, 0.07f); - stmt.setDouble(16, 0.0007); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, ROW8); - stmt.setString(3, B_VALUE); - stmt.setString(4, C_VALUE); - stmt.setInt(5, 8); - stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 1)); - stmt.setBigDecimal(7, BigDecimal.valueOf(3.9)); - long l = Integer.MIN_VALUE - 1L; - assert(l < Integer.MIN_VALUE); - stmt.setLong(8, l); - stmt.setInt(9, 4); - stmt.setNull(10, Types.INTEGER); - stmt.setByte(11, (byte)8); - stmt.setShort(12, (short) 135); - stmt.setFloat(13, 0.08f); - stmt.setDouble(14, 0.0008); - stmt.setFloat(15, 0.08f); - stmt.setDouble(16, 0.0008); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, ROW9); - stmt.setString(3, C_VALUE); - stmt.setString(4, E_VALUE); - stmt.setInt(5, 9); - stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 2)); - stmt.setBigDecimal(7, BigDecimal.valueOf(3.3)); - l = Integer.MAX_VALUE + 1L; - assert(l > Integer.MAX_VALUE); - stmt.setLong(8, l); - stmt.setInt(9, 3); - stmt.setInt(10, 300); - stmt.setByte(11, (byte)9); - stmt.setShort(12, (short) 0); - stmt.setFloat(13, 0.09f); - stmt.setDouble(14, 0.0009); - stmt.setFloat(15, 0.09f); - stmt.setDouble(16, 0.0009); - stmt.execute(); - conn.commit(); - } - return tableName; - } - - - protected static String initEntityHistoryTableValues(String tenantId, byte[][] splits, Date date, Long ts) throws Exception { - return initEntityHistoryTableValues(ENTITY_HISTORY_TABLE_NAME, tenantId, splits, date, ts, getUrl()); - } - - protected static String initEntityHistoryTableValues(String tableName, String tenantId, byte[][] splits, Date date, Long ts) throws Exception { - return initEntityHistoryTableValues(tableName, tenantId, splits, date, ts, getUrl()); - } - - protected static String initSaltedEntityHistoryTableValues(String tableName, String tenantId, byte[][] splits, Date date, Long ts) throws Exception { - return initSaltedEntityHistoryTableValues(tableName, tenantId, splits, date, ts, getUrl()); - } - - protected static String initEntityHistoryTableValues(String tableName, String tenantId, byte[][] splits, String url) throws Exception { - return initEntityHistoryTableValues(tableName, tenantId, splits, null, null, url); - } - - private static String initEntityHistoryTableValues(String tableName, String tenantId, byte[][] splits, Date date, Long ts, String url) throws Exception { - if (tableName == null) { - tableName = generateUniqueName(); - } - - if (ts == null) { - ensureTableCreated(url, tableName, ENTITY_HISTORY_TABLE_NAME, splits, null); - } else { - ensureTableCreated(url, tableName, ENTITY_HISTORY_TABLE_NAME, splits, ts-2, null); - } - - Properties props = new Properties(); - if (ts != null) { - props.setProperty(CURRENT_SCN_ATTRIB, ts.toString()); - } - Connection conn = DriverManager.getConnection(url, props); - try { - // Insert all rows at ts - PreparedStatement stmt = conn.prepareStatement( - "upsert into " + - tableName + - "(" + - " ORGANIZATION_ID, " + - " PARENT_ID, " + - " CREATED_DATE, " + - " ENTITY_HISTORY_ID, " + - " OLD_VALUE, " + - " NEW_VALUE) " + - "VALUES (?, ?, ?, ?, ?, ?)"); - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID1); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID1); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID2); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID2); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID3); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID3); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID4); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID4); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID5); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID5); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID6); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID6); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID7); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID7); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID8); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID8); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID9); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID9); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - conn.commit(); - } finally { + String fullTableName = + SchemaUtil.getEscapedTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), + rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); + try { + conn.unwrap(PhoenixConnection.class).getTable(fullTableName); + fail("The following tables are not deleted that should be:" + getTableNames(rs)); + } catch (TableNotFoundException e) { + } + } + } + } finally { + conn.close(); + } + } + + private static void deletePriorTables(long ts, Connection globalConn, String url) + throws Exception { + DatabaseMetaData dbmd = globalConn.getMetaData(); + // Drop VIEWs first, as we don't allow a TABLE with views to be dropped + // Tables are sorted by TENANT_ID + List tableTypesList = Arrays.asList(new String[] { PTableType.VIEW.toString() }, + new String[] { PTableType.TABLE.toString() }); + for (String[] tableTypes : tableTypesList) { + ResultSet rs = dbmd.getTables(null, null, null, tableTypes); + String lastTenantId = null; + Connection conn = globalConn; + while (rs.next()) { + String fullTableName = + SchemaUtil.getEscapedTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), + rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)); + String ddl = "DROP " + rs.getString(PhoenixDatabaseMetaData.TABLE_TYPE) + " " + + fullTableName + " CASCADE"; + String tenantId = rs.getString(1); + if (tenantId != null && !tenantId.equals(lastTenantId)) { + if (lastTenantId != null) { conn.close(); + } + // Open tenant-specific connection when we find a new one + Properties props = PropertiesUtil.deepCopy(globalConn.getClientInfo()); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + conn = DriverManager.getConnection(url, props); + lastTenantId = tenantId; } - - return tableName; - } - - protected static String initSaltedEntityHistoryTableValues(String tableName, String tenantId, byte[][] splits, Date date, Long ts, String url) throws Exception { - if (tableName == null) { - tableName = generateUniqueName(); - } - - if (ts == null) { - ensureTableCreated(url, tableName, ENTITY_HISTORY_SALTED_TABLE_NAME, splits, null); - } else { - ensureTableCreated(url, tableName, ENTITY_HISTORY_SALTED_TABLE_NAME, splits, ts-2, null); - } - - Properties props = new Properties(); - if (ts != null) { - props.setProperty(CURRENT_SCN_ATTRIB, ts.toString()); - } - Connection conn = DriverManager.getConnection(url, props); try { - // Insert all rows at ts - PreparedStatement stmt = conn.prepareStatement( - "upsert into " + - tableName + - "(" + - " ORGANIZATION_ID, " + - " PARENT_ID, " + - " CREATED_DATE, " + - " ENTITY_HISTORY_ID, " + - " OLD_VALUE, " + - " NEW_VALUE) " + - "VALUES (?, ?, ?, ?, ?, ?)"); - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID1); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID1); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID2); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID2); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID3); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID3); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID4); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID4); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID5); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID5); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID6); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID6); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID7); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID7); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID8); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID8); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - stmt.setString(1, tenantId); - stmt.setString(2, PARENTID9); - stmt.setDate(3, date); - stmt.setString(4, ENTITYHISTID9); - stmt.setString(5, A_VALUE); - stmt.setString(6, B_VALUE); - stmt.execute(); - - conn.commit(); - } finally { - conn.close(); - } - - return tableName; - } - - /** - * Disable and drop all non system tables - */ - protected static synchronized void disableAndDropNonSystemTables() throws Exception { - if (driver == null) return; - Admin admin = driver.getConnectionQueryServices(getUrl(), new Properties()).getAdmin(); - try { - List tables = admin.listTableDescriptors(); - for (TableDescriptor table : tables) { - String schemaName = SchemaUtil.getSchemaNameFromFullName(table.getTableName().getName()); - if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) { - disableAndDropTable(admin, table.getTableName()); - } + conn.createStatement().executeUpdate(ddl); + } catch (NewerTableAlreadyExistsException ex) { + LOGGER.info("Newer table " + fullTableName + + " or its delete marker exists. Ignore current deletion"); + } catch (TableNotFoundException ex) { + LOGGER.info("Table " + fullTableName + " is already deleted."); + } + } + rs.close(); + if (lastTenantId != null) { + conn.close(); + } + } + } + + private static String getTableNames(ResultSet rs) throws SQLException { + StringBuilder buf = new StringBuilder(); + do { + buf.append(" "); + buf.append(SchemaUtil.getTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), + rs.getString(PhoenixDatabaseMetaData.TABLE_NAME))); + } while (rs.next()); + return buf.toString(); + } + + private static String getSchemaNames(ResultSet rs) throws SQLException { + StringBuilder buf = new StringBuilder(); + do { + buf.append(" "); + buf.append(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM)); + } while (rs.next()); + return buf.toString(); + } + + private static void deletePriorSequences(long ts, Connection globalConn) throws Exception { + // TODO: drop tenant-specific sequences too + ResultSet rs = globalConn.createStatement() + .executeQuery("SELECT " + PhoenixDatabaseMetaData.TENANT_ID + "," + + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + "," + PhoenixDatabaseMetaData.SEQUENCE_NAME + + " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE); + String lastTenantId = null; + Connection conn = globalConn; + while (rs.next()) { + String tenantId = rs.getString(1); + if (tenantId != null && !tenantId.equals(lastTenantId)) { + if (lastTenantId != null) { + conn.close(); + } + // Open tenant-specific connection when we find a new one + Properties props = new Properties(globalConn.getClientInfo()); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + conn = DriverManager.getConnection(url, props); + lastTenantId = tenantId; + } + + LOGGER.info("DROP SEQUENCE STATEMENT: DROP SEQUENCE " + + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3))); + conn.createStatement().execute( + "DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3))); + } + rs.close(); + } + + protected static void initSumDoubleValues(byte[][] splits, String url) throws Exception { + initSumDoubleValues(SUM_DOUBLE_NAME, splits, url); + } + + protected static void initSumDoubleValues(String tableName, byte[][] splits, String url) + throws Exception { + ensureTableCreated(url, tableName, SUM_DOUBLE_NAME, splits, null); + Properties props = new Properties(); + Connection conn = DriverManager.getConnection(url, props); + try { + // Insert all rows at ts + PreparedStatement stmt = conn.prepareStatement("upsert into " + tableName + "(" + " id, " + + " d, " + " f, " + " ud, " + " uf) " + "VALUES (?, ?, ?, ?, ?)"); + stmt.setString(1, "1"); + stmt.setDouble(2, 0.001); + stmt.setFloat(3, 0.01f); + stmt.setDouble(4, 0.001); + stmt.setFloat(5, 0.01f); + stmt.execute(); + + stmt.setString(1, "2"); + stmt.setDouble(2, 0.002); + stmt.setFloat(3, 0.02f); + stmt.setDouble(4, 0.002); + stmt.setFloat(5, 0.02f); + stmt.execute(); + + stmt.setString(1, "3"); + stmt.setDouble(2, 0.003); + stmt.setFloat(3, 0.03f); + stmt.setDouble(4, 0.003); + stmt.setFloat(5, 0.03f); + stmt.execute(); + + stmt.setString(1, "4"); + stmt.setDouble(2, 0.004); + stmt.setFloat(3, 0.04f); + stmt.setDouble(4, 0.004); + stmt.setFloat(5, 0.04f); + stmt.execute(); + + stmt.setString(1, "5"); + stmt.setDouble(2, 0.005); + stmt.setFloat(3, 0.05f); + stmt.setDouble(4, 0.005); + stmt.setFloat(5, 0.05f); + stmt.execute(); + + conn.commit(); + } finally { + conn.close(); + } + } + + protected static String initATableValues(String tenantId, byte[][] splits) throws Exception { + return initATableValues(tenantId, splits, null, null, getUrl()); + } + + protected static String initATableValues(String tenantId, byte[][] splits, Date date, Long ts) + throws Exception { + return initATableValues(tenantId, splits, date, ts, getUrl()); + } + + protected static String initATableValues(String tenantId, byte[][] splits, String url) + throws Exception { + return initATableValues(tenantId, splits, null, url); + } + + protected static String initATableValues(String tenantId, byte[][] splits, Date date, String url) + throws Exception { + return initATableValues(tenantId, splits, date, null, url); + } + + protected static String initATableValues(String tenantId, byte[][] splits, Date date, Long ts, + String url) throws Exception { + return initATableValues(null, tenantId, splits, date, ts, url, null); + } + + protected static String initATableValues(String tenantId, byte[][] splits, Date date, Long ts, + String url, String tableDDLOptions) throws Exception { + return initATableValues(null, tenantId, splits, date, ts, url, tableDDLOptions); + } + + protected static String initATableValues(String tableName, String tenantId, byte[][] splits, + Date date, Long ts, String url, String tableDDLOptions) throws Exception { + if (tableName == null) { + tableName = generateUniqueName(); + } + String tableDDLType = ATABLE_NAME; + if (ts == null) { + ensureTableCreated(url, tableName, tableDDLType, splits, null, tableDDLOptions); + } else { + ensureTableCreated(url, tableName, tableDDLType, splits, ts - 5, tableDDLOptions); + } + + Properties props = new Properties(); + if (ts != null) { + props.setProperty(CURRENT_SCN_ATTRIB, Long.toString(ts - 3)); + } + + try (Connection conn = DriverManager.getConnection(url, props)) { + // Insert all rows at ts + PreparedStatement stmt = conn.prepareStatement( + "upsert into " + tableName + "(" + " ORGANIZATION_ID, " + " ENTITY_ID, " + + " A_STRING, " + " B_STRING, " + " A_INTEGER, " + " A_DATE, " + + " X_DECIMAL, " + " X_LONG, " + " X_INTEGER," + " Y_INTEGER," + " A_BYTE," + + " A_SHORT," + " A_FLOAT," + " A_DOUBLE," + " A_UNSIGNED_FLOAT," + + " A_UNSIGNED_DOUBLE)" + "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); + stmt.setString(1, tenantId); + stmt.setString(2, ROW1); + stmt.setString(3, A_VALUE); + stmt.setString(4, B_VALUE); + stmt.setInt(5, 1); + stmt.setDate(6, date); + stmt.setBigDecimal(7, null); + stmt.setNull(8, Types.BIGINT); + stmt.setNull(9, Types.INTEGER); + stmt.setNull(10, Types.INTEGER); + stmt.setByte(11, (byte) 1); + stmt.setShort(12, (short) 128); + stmt.setFloat(13, 0.01f); + stmt.setDouble(14, 0.0001); + stmt.setFloat(15, 0.01f); + stmt.setDouble(16, 0.0001); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, ROW2); + stmt.setString(3, A_VALUE); + stmt.setString(4, C_VALUE); + stmt.setInt(5, 2); + stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 1)); + stmt.setBigDecimal(7, null); + stmt.setNull(8, Types.BIGINT); + stmt.setNull(9, Types.INTEGER); + stmt.setNull(10, Types.INTEGER); + stmt.setByte(11, (byte) 2); + stmt.setShort(12, (short) 129); + stmt.setFloat(13, 0.02f); + stmt.setDouble(14, 0.0002); + stmt.setFloat(15, 0.02f); + stmt.setDouble(16, 0.0002); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, ROW3); + stmt.setString(3, A_VALUE); + stmt.setString(4, E_VALUE); + stmt.setInt(5, 3); + stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 2)); + stmt.setBigDecimal(7, null); + stmt.setNull(8, Types.BIGINT); + stmt.setNull(9, Types.INTEGER); + stmt.setNull(10, Types.INTEGER); + stmt.setByte(11, (byte) 3); + stmt.setShort(12, (short) 130); + stmt.setFloat(13, 0.03f); + stmt.setDouble(14, 0.0003); + stmt.setFloat(15, 0.03f); + stmt.setDouble(16, 0.0003); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, ROW4); + stmt.setString(3, A_VALUE); + stmt.setString(4, B_VALUE); + stmt.setInt(5, 4); + stmt.setDate(6, date == null ? null : date); + stmt.setBigDecimal(7, null); + stmt.setNull(8, Types.BIGINT); + stmt.setNull(9, Types.INTEGER); + stmt.setNull(10, Types.INTEGER); + stmt.setByte(11, (byte) 4); + stmt.setShort(12, (short) 131); + stmt.setFloat(13, 0.04f); + stmt.setDouble(14, 0.0004); + stmt.setFloat(15, 0.04f); + stmt.setDouble(16, 0.0004); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, ROW5); + stmt.setString(3, B_VALUE); + stmt.setString(4, C_VALUE); + stmt.setInt(5, 5); + stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 1)); + stmt.setBigDecimal(7, null); + stmt.setNull(8, Types.BIGINT); + stmt.setNull(9, Types.INTEGER); + stmt.setNull(10, Types.INTEGER); + stmt.setByte(11, (byte) 5); + stmt.setShort(12, (short) 132); + stmt.setFloat(13, 0.05f); + stmt.setDouble(14, 0.0005); + stmt.setFloat(15, 0.05f); + stmt.setDouble(16, 0.0005); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, ROW6); + stmt.setString(3, B_VALUE); + stmt.setString(4, E_VALUE); + stmt.setInt(5, 6); + stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 2)); + stmt.setBigDecimal(7, null); + stmt.setNull(8, Types.BIGINT); + stmt.setNull(9, Types.INTEGER); + stmt.setNull(10, Types.INTEGER); + stmt.setByte(11, (byte) 6); + stmt.setShort(12, (short) 133); + stmt.setFloat(13, 0.06f); + stmt.setDouble(14, 0.0006); + stmt.setFloat(15, 0.06f); + stmt.setDouble(16, 0.0006); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, ROW7); + stmt.setString(3, B_VALUE); + stmt.setString(4, B_VALUE); + stmt.setInt(5, 7); + stmt.setDate(6, date == null ? null : date); + stmt.setBigDecimal(7, BigDecimal.valueOf(0.1)); + stmt.setLong(8, 5L); + stmt.setInt(9, 5); + stmt.setNull(10, Types.INTEGER); + stmt.setByte(11, (byte) 7); + stmt.setShort(12, (short) 134); + stmt.setFloat(13, 0.07f); + stmt.setDouble(14, 0.0007); + stmt.setFloat(15, 0.07f); + stmt.setDouble(16, 0.0007); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, ROW8); + stmt.setString(3, B_VALUE); + stmt.setString(4, C_VALUE); + stmt.setInt(5, 8); + stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 1)); + stmt.setBigDecimal(7, BigDecimal.valueOf(3.9)); + long l = Integer.MIN_VALUE - 1L; + assert (l < Integer.MIN_VALUE); + stmt.setLong(8, l); + stmt.setInt(9, 4); + stmt.setNull(10, Types.INTEGER); + stmt.setByte(11, (byte) 8); + stmt.setShort(12, (short) 135); + stmt.setFloat(13, 0.08f); + stmt.setDouble(14, 0.0008); + stmt.setFloat(15, 0.08f); + stmt.setDouble(16, 0.0008); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, ROW9); + stmt.setString(3, C_VALUE); + stmt.setString(4, E_VALUE); + stmt.setInt(5, 9); + stmt.setDate(6, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY * 2)); + stmt.setBigDecimal(7, BigDecimal.valueOf(3.3)); + l = Integer.MAX_VALUE + 1L; + assert (l > Integer.MAX_VALUE); + stmt.setLong(8, l); + stmt.setInt(9, 3); + stmt.setInt(10, 300); + stmt.setByte(11, (byte) 9); + stmt.setShort(12, (short) 0); + stmt.setFloat(13, 0.09f); + stmt.setDouble(14, 0.0009); + stmt.setFloat(15, 0.09f); + stmt.setDouble(16, 0.0009); + stmt.execute(); + conn.commit(); + } + return tableName; + } + + protected static String initEntityHistoryTableValues(String tenantId, byte[][] splits, Date date, + Long ts) throws Exception { + return initEntityHistoryTableValues(ENTITY_HISTORY_TABLE_NAME, tenantId, splits, date, ts, + getUrl()); + } + + protected static String initEntityHistoryTableValues(String tableName, String tenantId, + byte[][] splits, Date date, Long ts) throws Exception { + return initEntityHistoryTableValues(tableName, tenantId, splits, date, ts, getUrl()); + } + + protected static String initSaltedEntityHistoryTableValues(String tableName, String tenantId, + byte[][] splits, Date date, Long ts) throws Exception { + return initSaltedEntityHistoryTableValues(tableName, tenantId, splits, date, ts, getUrl()); + } + + protected static String initEntityHistoryTableValues(String tableName, String tenantId, + byte[][] splits, String url) throws Exception { + return initEntityHistoryTableValues(tableName, tenantId, splits, null, null, url); + } + + private static String initEntityHistoryTableValues(String tableName, String tenantId, + byte[][] splits, Date date, Long ts, String url) throws Exception { + if (tableName == null) { + tableName = generateUniqueName(); + } + + if (ts == null) { + ensureTableCreated(url, tableName, ENTITY_HISTORY_TABLE_NAME, splits, null); + } else { + ensureTableCreated(url, tableName, ENTITY_HISTORY_TABLE_NAME, splits, ts - 2, null); + } + + Properties props = new Properties(); + if (ts != null) { + props.setProperty(CURRENT_SCN_ATTRIB, ts.toString()); + } + Connection conn = DriverManager.getConnection(url, props); + try { + // Insert all rows at ts + PreparedStatement stmt = + conn.prepareStatement("upsert into " + tableName + "(" + " ORGANIZATION_ID, " + + " PARENT_ID, " + " CREATED_DATE, " + " ENTITY_HISTORY_ID, " + " OLD_VALUE, " + + " NEW_VALUE) " + "VALUES (?, ?, ?, ?, ?, ?)"); + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID1); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID1); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID2); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID2); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID3); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID3); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID4); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID4); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID5); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID5); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID6); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID6); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID7); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID7); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID8); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID8); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID9); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID9); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + conn.commit(); + } finally { + conn.close(); + } + + return tableName; + } + + protected static String initSaltedEntityHistoryTableValues(String tableName, String tenantId, + byte[][] splits, Date date, Long ts, String url) throws Exception { + if (tableName == null) { + tableName = generateUniqueName(); + } + + if (ts == null) { + ensureTableCreated(url, tableName, ENTITY_HISTORY_SALTED_TABLE_NAME, splits, null); + } else { + ensureTableCreated(url, tableName, ENTITY_HISTORY_SALTED_TABLE_NAME, splits, ts - 2, null); + } + + Properties props = new Properties(); + if (ts != null) { + props.setProperty(CURRENT_SCN_ATTRIB, ts.toString()); + } + Connection conn = DriverManager.getConnection(url, props); + try { + // Insert all rows at ts + PreparedStatement stmt = + conn.prepareStatement("upsert into " + tableName + "(" + " ORGANIZATION_ID, " + + " PARENT_ID, " + " CREATED_DATE, " + " ENTITY_HISTORY_ID, " + " OLD_VALUE, " + + " NEW_VALUE) " + "VALUES (?, ?, ?, ?, ?, ?)"); + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID1); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID1); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID2); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID2); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID3); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID3); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID4); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID4); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID5); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID5); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID6); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID6); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID7); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID7); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID8); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID8); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + stmt.setString(1, tenantId); + stmt.setString(2, PARENTID9); + stmt.setDate(3, date); + stmt.setString(4, ENTITYHISTID9); + stmt.setString(5, A_VALUE); + stmt.setString(6, B_VALUE); + stmt.execute(); + + conn.commit(); + } finally { + conn.close(); + } + + return tableName; + } + + /** + * Disable and drop all non system tables + */ + protected static synchronized void disableAndDropNonSystemTables() throws Exception { + if (driver == null) return; + Admin admin = driver.getConnectionQueryServices(getUrl(), new Properties()).getAdmin(); + try { + List tables = admin.listTableDescriptors(); + for (TableDescriptor table : tables) { + String schemaName = SchemaUtil.getSchemaNameFromFullName(table.getTableName().getName()); + if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) { + disableAndDropTable(admin, table.getTableName()); + } + } + } finally { + admin.close(); + } + } + + private static synchronized void disableAndDropTable(final Admin admin, final TableName tableName) + throws Exception { + Future future = null; + boolean success = false; + try { + try { + future = dropHTableService.submit(new Callable() { + @Override + public Void call() throws Exception { + if (admin.isTableEnabled(tableName)) { + admin.disableTable(tableName); + admin.deleteTable(tableName); } - } finally { - admin.close(); - } - } - - private static synchronized void disableAndDropTable(final Admin admin, final TableName tableName) - throws Exception { - Future future = null; - boolean success = false; - try { + return null; + } + }); + future.get(dropTableTimeout, TimeUnit.SECONDS); + success = true; + } catch (TimeoutException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT) + .setMessage("Not able to disable and delete table " + tableName.getNameAsString() + " in " + + dropTableTimeout + " seconds.") + .build().buildException(); + } catch (Exception e) { + throw e; + } + } finally { + if (future != null && !success) { + future.cancel(true); + } + } + } + + private static synchronized void disableAndDropAllTables() throws IOException { + long startTime = System.currentTimeMillis(); + long deadline = System.currentTimeMillis() + 15 * 60 * 1000; + final Admin admin = utility.getAdmin(); + + List tableDescriptors = admin.listTableDescriptors(); + int tableCount = tableDescriptors.size(); + + while (!(tableDescriptors = admin.listTableDescriptors()).isEmpty()) { + List> futures = new ArrayList<>(); + ExecutorService dropHTableExecutor = Executors.newFixedThreadPool(10, factory); + + for (final TableDescriptor tableDescriptor : tableDescriptors) { + futures.add(dropHTableExecutor.submit(new Callable() { + @Override + public Void call() throws Exception { + final TableName tableName = tableDescriptor.getTableName(); + String table = tableName.toString(); + Future disableFuture = null; try { - future = dropHTableService.submit(new Callable() { - @Override - public Void call() throws Exception { - if (admin.isTableEnabled(tableName)) { - admin.disableTable(tableName); - admin.deleteTable(tableName); - } - return null; - } - }); - future.get(dropTableTimeout, TimeUnit.SECONDS); - success = true; - } catch (TimeoutException e) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT) - .setMessage( - "Not able to disable and delete table " + tableName.getNameAsString() - + " in " + dropTableTimeout + " seconds.").build().buildException(); + LOGGER.info("Calling disable table on: {} ", table); + disableFuture = admin.disableTableAsync(tableName); + disableFuture.get(dropTableTimeout, TimeUnit.SECONDS); + LOGGER.info("Table disabled: {}", table); } catch (Exception e) { - throw e; - } - } finally { - if (future != null && !success) { - future.cancel(true); - } - } - } - - private static synchronized void disableAndDropAllTables() throws IOException { - long startTime = System.currentTimeMillis(); - long deadline = System.currentTimeMillis() + 15 * 60 * 1000; - final Admin admin = utility.getAdmin(); - - List tableDescriptors = admin.listTableDescriptors(); - int tableCount = tableDescriptors.size(); - - while (!(tableDescriptors = admin.listTableDescriptors()).isEmpty()) { - List> futures = new ArrayList<>(); - ExecutorService dropHTableExecutor = Executors.newFixedThreadPool(10, factory); - - for(final TableDescriptor tableDescriptor : tableDescriptors) { - futures.add(dropHTableExecutor.submit(new Callable() { - @Override - public Void call() throws Exception { - final TableName tableName = tableDescriptor.getTableName(); - String table = tableName.toString(); - Future disableFuture = null; - try { - LOGGER.info("Calling disable table on: {} ", table); - disableFuture = admin.disableTableAsync(tableName); - disableFuture.get(dropTableTimeout, TimeUnit.SECONDS); - LOGGER.info("Table disabled: {}", table); - } catch (Exception e) { - LOGGER.warn("Could not disable table {}", table, e); - try { - disableFuture.cancel(true); - } catch (Exception f) { - //fall through - } - //fall through - } - Future deleteFuture = null; - try { - LOGGER.info("Calling delete table on: {}", table); - deleteFuture = admin.deleteTableAsync(tableName); - deleteFuture.get(dropTableTimeout, TimeUnit.SECONDS); - LOGGER.info("Table deleted: {}", table); - } catch (Exception e) { - LOGGER.warn("Could not delete table {}", table, e); - try { - deleteFuture.cancel(true); - } catch (Exception f) { - //fall through - } - //fall through - } - return null; - } - })); - } - - try { - dropHTableExecutor.shutdown(); - dropHTableExecutor.awaitTermination(600, TimeUnit.SECONDS); - } catch (InterruptedException e) { - LOGGER.error("dropHTableExecutor didn't shut down in 10 minutes, calling shutdownNow()"); - dropHTableExecutor.shutdownNow(); - } - - if (System.currentTimeMillis() > deadline) { - LOGGER.error("Could not clean up HBase tables in 15 minutes, killing JVM"); - System.exit(-1); - } - } - - long endTime = System.currentTimeMillis(); - - LOGGER.info("Disabled and dropped {} tables in {} ms", tableCount, endTime-startTime); - } - - public static void assertOneOfValuesEqualsResultSet(ResultSet rs, List>... expectedResultsArray) throws SQLException { - List> results = Lists.newArrayList(); - while (rs.next()) { - List result = Lists.newArrayList(); - for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) { - result.add(rs.getObject(i+1)); - } - results.add(result); - } - for (int j = 0; j < expectedResultsArray.length; j++) { - List> expectedResults = expectedResultsArray[j]; - Set> expectedResultsSet = Sets.newHashSet(expectedResults); - Iterator> iterator = results.iterator(); - while (iterator.hasNext()) { - if (expectedResultsSet.contains(iterator.next())) { - iterator.remove(); - } - } - } - if (results.isEmpty()) return; - fail("Unable to find " + results + " in " + Arrays.asList(expectedResultsArray)); - } - - protected void assertValueEqualsResultSet(ResultSet rs, List expectedResults) throws SQLException { - List> nestedExpectedResults = Lists.newArrayListWithExpectedSize(expectedResults.size()); - for (Object expectedResult : expectedResults) { - nestedExpectedResults.add(Arrays.asList(expectedResult)); - } - assertValuesEqualsResultSet(rs, nestedExpectedResults); - } - - /** - * Asserts that we find the expected values in the result set. We don't know the order, since we don't always - * have an order by and we're going through indexes, but we assert that each expected result occurs once as - * expected (in any order). - */ - public static void assertValuesEqualsResultSet(ResultSet rs, List> expectedResults) throws SQLException { - int expectedCount = expectedResults.size(); - int count = 0; - List> actualResults = Lists.newArrayList(); - List errorResult = null; - while (rs.next() && errorResult == null) { - List result = Lists.newArrayList(); - for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) { - result.add(rs.getObject(i+1)); - } - if (!expectedResults.contains(result)) { - errorResult = result; - } - actualResults.add(result); - count++; - } - assertTrue("Could not find " + errorResult + " in expected results: " + expectedResults + " with actual results: " + actualResults, errorResult == null); - assertEquals(expectedCount, count); - } - - public static HBaseTestingUtility getUtility() { - return utility; - } - - public static void upsertRows(Connection conn, String fullTableName, int numRows) throws SQLException { - for (int i=1; i<=numRows; ++i) { - upsertRow(conn, fullTableName, i, false); - } - } - - public static void upsertRow(Connection conn, String fullTableName, int index, boolean firstRowInBatch) throws SQLException { - String upsert = "UPSERT INTO " + fullTableName - + " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - PreparedStatement stmt = conn.prepareStatement(upsert); - //varchar_pk - stmt.setString(1, firstRowInBatch ? "firstRowInBatch_" : "" + "varchar"+index); - stmt.setString(2, "char"+index); // char_pk - stmt.setInt(3, index); // int_pk - stmt.setLong(4, index); // long_pk - stmt.setBigDecimal(5, new BigDecimal(index)); // decimal_pk - Date date = DateUtil.parseDate("2015-01-01 00:00:00"); - stmt.setDate(6, date); // date_pk - stmt.setString(7, "varchar_a"); // a.varchar_col1 - stmt.setString(8, "chara"); // a.char_col1 - stmt.setInt(9, index+1); // a.int_col1 - stmt.setLong(10, index+1); // a.long_col1 - stmt.setBigDecimal(11, new BigDecimal(index+1)); // a.decimal_col1 - stmt.setDate(12, date); // a.date1 - stmt.setString(13, "varchar_b"); // b.varchar_col2 - stmt.setString(14, "charb"); // b.char_col2 - stmt.setInt(15, index+2); // b.int_col2 - stmt.setLong(16, index+2); // b.long_col2 - stmt.setBigDecimal(17, new BigDecimal(index+2)); // b.decimal_col2 - stmt.setDate(18, date); // b.date2 - stmt.executeUpdate(); - } - - // Populate the test table with data. - public static synchronized void populateTestTable(String fullTableName) throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = DriverManager.getConnection(getUrl(), props)) { - upsertRows(conn, fullTableName, 3); - conn.commit(); - } - } - - // Populate the test table with data. - protected static void populateMultiCFTestTable(String tableName) throws SQLException { - populateMultiCFTestTable(tableName, null); - } - - // Populate the test table with data. - protected static void populateMultiCFTestTable(String tableName, Date date) throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - String upsert = "UPSERT INTO " + tableName - + " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - PreparedStatement stmt = conn.prepareStatement(upsert); - stmt.setString(1, "varchar1"); - stmt.setString(2, "char1"); - stmt.setInt(3, 1); - stmt.setLong(4, 1L); - stmt.setBigDecimal(5, new BigDecimal("1.1")); - stmt.setString(6, "varchar_a"); - stmt.setString(7, "chara"); - stmt.setInt(8, 2); - stmt.setLong(9, 2L); - stmt.setBigDecimal(10, new BigDecimal("2.1")); - stmt.setString(11, "varchar_b"); - stmt.setString(12, "charb"); - stmt.setInt(13, 3); - stmt.setLong(14, 3L); - stmt.setBigDecimal(15, new BigDecimal("3.1")); - stmt.setDate(16, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY)); - stmt.executeUpdate(); - - stmt.setString(1, "varchar2"); - stmt.setString(2, "char2"); - stmt.setInt(3, 2); - stmt.setLong(4, 2L); - stmt.setBigDecimal(5, new BigDecimal("2.2")); - stmt.setString(6, "varchar_a"); - stmt.setString(7, "chara"); - stmt.setInt(8, 3); - stmt.setLong(9, 3L); - stmt.setBigDecimal(10, new BigDecimal("3.2")); - stmt.setString(11, "varchar_b"); - stmt.setString(12, "charb"); - stmt.setInt(13, 4); - stmt.setLong(14, 4L); - stmt.setBigDecimal(15, new BigDecimal("4.2")); - stmt.setDate(16, date); - stmt.executeUpdate(); - - stmt.setString(1, "varchar3"); - stmt.setString(2, "char3"); - stmt.setInt(3, 3); - stmt.setLong(4, 3L); - stmt.setBigDecimal(5, new BigDecimal("3.3")); - stmt.setString(6, "varchar_a"); - stmt.setString(7, "chara"); - stmt.setInt(8, 4); - stmt.setLong(9, 4L); - stmt.setBigDecimal(10, new BigDecimal("4.3")); - stmt.setString(11, "varchar_b"); - stmt.setString(12, "charb"); - stmt.setInt(13, 5); - stmt.setLong(14, 5L); - stmt.setBigDecimal(15, new BigDecimal("5.3")); - stmt.setDate(16, date == null ? null : new Date(date.getTime() + 2 * MILLIS_IN_DAY)); - stmt.executeUpdate(); - - conn.commit(); - } finally { - conn.close(); - } - } - protected static void verifySequenceNotExists(String tenantID, String sequenceName, String sequenceSchemaName) throws SQLException { - verifySequence(tenantID, sequenceName, sequenceSchemaName, false, 0); - } - - protected static void verifySequenceValue(String tenantID, String sequenceName, String sequenceSchemaName, long value) throws SQLException { - verifySequence(tenantID, sequenceName, sequenceSchemaName, true, value); - } - - private static void verifySequence(String tenantID, String sequenceName, String sequenceSchemaName, boolean exists, long value) throws SQLException { - - PhoenixConnection phxConn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class); - String ddl = "SELECT " - + PhoenixDatabaseMetaData.TENANT_ID + "," - + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + "," - + PhoenixDatabaseMetaData.SEQUENCE_NAME + "," - + PhoenixDatabaseMetaData.CURRENT_VALUE - + " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE - + " WHERE "; - - ddl += " TENANT_ID " + ((tenantID == null ) ? "IS NULL " : " = '" + tenantID + "'"); - ddl += " AND SEQUENCE_NAME " + ((sequenceName == null) ? "IS NULL " : " = '" + sequenceName + "'"); - ddl += " AND SEQUENCE_SCHEMA " + ((sequenceSchemaName == null) ? "IS NULL " : " = '" + sequenceSchemaName + "'" ); - - ResultSet rs = phxConn.createStatement().executeQuery(ddl); - - if(exists) { - assertTrue(rs.next()); - assertEquals(value, rs.getLong(4)); - } else { - assertFalse(rs.next()); - } - phxConn.close(); - } - - /** - * Synchronously split table at the given split point - */ - protected static void splitTableSync(Admin admin, TableName hbaseTableName, byte[] splitPoint, - int expectedRegions) throws IOException, InterruptedException { - admin.split(hbaseTableName, splitPoint); - for (int i = 0; i < 30; i++) { - List regions = getUtility().getHBaseCluster().getRegions(hbaseTableName); - if (regions.size() >= expectedRegions) { - boolean splitSuccessful = true; - for (HRegion region : regions) { - if (!region.isSplittable()) { - splitSuccessful = false; - } - } - if(splitSuccessful) { - return; - } + LOGGER.warn("Could not disable table {}", table, e); + try { + disableFuture.cancel(true); + } catch (Exception f) { + // fall through + } + // fall through } - LOGGER.info( - "Sleeping for 1000 ms while waiting for {} to split and all regions to come online", - hbaseTableName.getNameAsString()); - Thread.sleep(1000); - } - throw new IOException("Split did not succeed for table: " + hbaseTableName.getNameAsString() - + " , expected regions after split: " + expectedRegions); - } - - /** - * Returns true if the region contains atleast one of the metadata rows we are interested in - */ - protected static boolean regionContainsMetadataRows(RegionInfo regionInfo, - List metadataRowKeys) { - for (byte[] rowKey : metadataRowKeys) { - if (regionInfo.containsRow(rowKey)) { - return true; - } - } - return false; - } - - protected static void splitTable(TableName fullTableName, List splitPoints) throws Exception { - Admin admin = - driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); - assertTrue("Needs at least two split points ", splitPoints.size() > 1); - assertTrue( - "Number of split points should be less than or equal to the number of region servers ", - splitPoints.size() <= NUM_SLAVES_BASE); - HBaseTestingUtility util = getUtility(); - MiniHBaseCluster cluster = util.getHBaseCluster(); - HMaster master = cluster.getMaster(); - //We don't want BalancerChore to undo our hard work - assertFalse("Balancer must be off", master.isBalancerOn()); - AssignmentManager am = master.getAssignmentManager(); - // No need to split on the first splitPoint since the end key of region boundaries are exclusive - for (int i=1; i regionInfoList = admin.getRegions(fullTableName); - assertEquals(splitPoints.size(), regionInfoList.size()); - HashMap> serverToRegionsList = Maps.newHashMapWithExpectedSize(NUM_SLAVES_BASE); - Deque availableRegionServers = new ArrayDeque(NUM_SLAVES_BASE); - for (int i=0; i tableRegions = - admin.getRegions(fullTableName); - for (RegionInfo hRegionInfo : tableRegions) { - // filter on regions we are interested in - if (regionContainsMetadataRows(hRegionInfo, splitPoints)) { - ServerName serverName = am.getRegionStates().getRegionServerOfRegion(hRegionInfo); - if (!serverToRegionsList.containsKey(serverName)) { - serverToRegionsList.put(serverName, new ArrayList()); - } - serverToRegionsList.get(serverName).add(hRegionInfo); - availableRegionServers.remove(serverName); - } - } - assertFalse("No region servers available to move regions on to ", - availableRegionServers.isEmpty()); - for (Entry> entry : serverToRegionsList.entrySet()) { - List regions = entry.getValue(); - if (regions.size()>1) { - for (int i=1; i< regions.size(); ++i) { - moveRegion(regions.get(i), entry.getKey(), availableRegionServers.pop()); - } - } - } - - // verify each region is on its own region server - tableRegions = - admin.getRegions(fullTableName); - Set serverNames = Sets.newHashSet(); - for (RegionInfo regionInfo : tableRegions) { - // filter on regions we are interested in - if (regionContainsMetadataRows(regionInfo, splitPoints)) { - ServerName serverName = am.getRegionStates().getRegionServerOfRegion(regionInfo); - if (!serverNames.contains(serverName)) { - serverNames.add(serverName); - } - else { - fail("Multiple regions on "+serverName.getServerName()); - } - } - } - } - - /** - * Splits SYSTEM.CATALOG into multiple regions based on the table or view names passed in. - * Metadata for each table or view is moved to a separate region, - * @param tenantToTableAndViewMap map from tenant to tables and views owned by the tenant - */ - protected static void splitSystemCatalog(Map> tenantToTableAndViewMap) throws Exception { - List splitPoints = Lists.newArrayListWithExpectedSize(5); - // add the rows keys of the table or view metadata rows - Set schemaNameSet=Sets.newHashSetWithExpectedSize(15); - for (Entry> entrySet : tenantToTableAndViewMap.entrySet()) { - String tenantId = entrySet.getKey(); - for (String fullName : entrySet.getValue()) { - String schemaName = SchemaUtil.getSchemaNameFromFullName(fullName); - // we don't allow SYSTEM.CATALOG to split within a schema, so to ensure each table - // or view is on a separate region they need to have a unique tenant and schema name - assertTrue("Schema names of tables/view must be unique ", schemaNameSet.add(tenantId+"."+schemaName)); - String tableName = SchemaUtil.getTableNameFromFullName(fullName); - splitPoints.add( - SchemaUtil.getTableKey(tenantId, "".equals(schemaName) ? null : schemaName, tableName)); - } - } - Collections.sort(splitPoints, Bytes.BYTES_COMPARATOR); - - splitTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME, splitPoints); - } - - /** - * Ensures each region of SYSTEM.CATALOG is on a different region server - */ - private static void moveRegion(RegionInfo regionInfo, ServerName srcServerName, ServerName dstServerName) throws Exception { - Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); - HBaseTestingUtility util = getUtility(); - MiniHBaseCluster cluster = util.getHBaseCluster(); - HMaster master = cluster.getMaster(); - AssignmentManager am = master.getAssignmentManager(); - - HRegionServer dstServer = util.getHBaseCluster().getRegionServer(dstServerName); - HRegionServer srcServer = util.getHBaseCluster().getRegionServer(srcServerName); - byte[] encodedRegionNameInBytes = regionInfo.getEncodedNameAsBytes(); - admin.move(encodedRegionNameInBytes, dstServer.getServerName()); - while (dstServer.getOnlineRegion(regionInfo.getRegionName()) == null - || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes) - || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)) { - // wait for the move to be finished - Thread.sleep(100); - } - } - - /** - * It always unassign first region of table. - * @param tableName move region of table. - * @throws IOException - */ - protected static void unassignRegionAsync(final String tableName) throws IOException { - Thread thread = new Thread(new Runnable() { - @Override - public void run() { - try { - final Admin admin = utility.getAdmin(); - final RegionInfo tableRegion = - admin.getRegions(TableName.valueOf(tableName)).get(0); - admin.unassign(tableRegion.getEncodedNameAsBytes(), false); - admin.assign(tableRegion.getEncodedNameAsBytes()); - } catch (IOException e) { - e.printStackTrace(); - } - } - }); - thread.setDaemon(true); - thread.start(); - } - - /** - * Confirms that no storeFile under any region has refCount leakage - * - * @return true if any region has refCount leakage - * @throws IOException caused by - * {@link CompatUtil#isAnyStoreRefCountLeaked(Admin)} - */ - protected synchronized static boolean isAnyStoreRefCountLeaked() - throws IOException { - if (getUtility() != null) { - return isAnyStoreRefCountLeaked(getUtility().getAdmin()); - } - return false; - } - - /** - * HBase 2.3+ has storeRefCount available in RegionMetrics - * - * @param admin Admin instance - * @return true if any region has refCount leakage - * @throws IOException if something went wrong while connecting to Admin - */ - public synchronized static boolean isAnyStoreRefCountLeaked(Admin admin) - throws IOException { - int retries = 5; - while (retries > 0) { - boolean isStoreRefCountLeaked = isStoreRefCountLeaked(admin); - if (!isStoreRefCountLeaked) { - return false; - } - retries--; + Future deleteFuture = null; try { - Thread.sleep(1000); - } catch (InterruptedException e) { - LOGGER.error("Interrupted while sleeping", e); - break; + LOGGER.info("Calling delete table on: {}", table); + deleteFuture = admin.deleteTableAsync(tableName); + deleteFuture.get(dropTableTimeout, TimeUnit.SECONDS); + LOGGER.info("Table deleted: {}", table); + } catch (Exception e) { + LOGGER.warn("Could not delete table {}", table, e); + try { + deleteFuture.cancel(true); + } catch (Exception f) { + // fall through + } + // fall through } - } + return null; + } + })); + } + + try { + dropHTableExecutor.shutdown(); + dropHTableExecutor.awaitTermination(600, TimeUnit.SECONDS); + } catch (InterruptedException e) { + LOGGER.error("dropHTableExecutor didn't shut down in 10 minutes, calling shutdownNow()"); + dropHTableExecutor.shutdownNow(); + } + + if (System.currentTimeMillis() > deadline) { + LOGGER.error("Could not clean up HBase tables in 15 minutes, killing JVM"); + System.exit(-1); + } + } + + long endTime = System.currentTimeMillis(); + + LOGGER.info("Disabled and dropped {} tables in {} ms", tableCount, endTime - startTime); + } + + public static void assertOneOfValuesEqualsResultSet(ResultSet rs, + List>... expectedResultsArray) throws SQLException { + List> results = Lists.newArrayList(); + while (rs.next()) { + List result = Lists.newArrayList(); + for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) { + result.add(rs.getObject(i + 1)); + } + results.add(result); + } + for (int j = 0; j < expectedResultsArray.length; j++) { + List> expectedResults = expectedResultsArray[j]; + Set> expectedResultsSet = Sets.newHashSet(expectedResults); + Iterator> iterator = results.iterator(); + while (iterator.hasNext()) { + if (expectedResultsSet.contains(iterator.next())) { + iterator.remove(); + } + } + } + if (results.isEmpty()) return; + fail("Unable to find " + results + " in " + Arrays.asList(expectedResultsArray)); + } + + protected void assertValueEqualsResultSet(ResultSet rs, List expectedResults) + throws SQLException { + List> nestedExpectedResults = + Lists.newArrayListWithExpectedSize(expectedResults.size()); + for (Object expectedResult : expectedResults) { + nestedExpectedResults.add(Arrays.asList(expectedResult)); + } + assertValuesEqualsResultSet(rs, nestedExpectedResults); + } + + /** + * Asserts that we find the expected values in the result set. We don't know the order, since we + * don't always have an order by and we're going through indexes, but we assert that each expected + * result occurs once as expected (in any order). + */ + public static void assertValuesEqualsResultSet(ResultSet rs, List> expectedResults) + throws SQLException { + int expectedCount = expectedResults.size(); + int count = 0; + List> actualResults = Lists.newArrayList(); + List errorResult = null; + while (rs.next() && errorResult == null) { + List result = Lists.newArrayList(); + for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) { + result.add(rs.getObject(i + 1)); + } + if (!expectedResults.contains(result)) { + errorResult = result; + } + actualResults.add(result); + count++; + } + assertTrue("Could not find " + errorResult + " in expected results: " + expectedResults + + " with actual results: " + actualResults, errorResult == null); + assertEquals(expectedCount, count); + } + + public static HBaseTestingUtility getUtility() { + return utility; + } + + public static void upsertRows(Connection conn, String fullTableName, int numRows) + throws SQLException { + for (int i = 1; i <= numRows; ++i) { + upsertRow(conn, fullTableName, i, false); + } + } + + public static void upsertRow(Connection conn, String fullTableName, int index, + boolean firstRowInBatch) throws SQLException { + String upsert = "UPSERT INTO " + fullTableName + + " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + PreparedStatement stmt = conn.prepareStatement(upsert); + // varchar_pk + stmt.setString(1, firstRowInBatch ? "firstRowInBatch_" : "" + "varchar" + index); + stmt.setString(2, "char" + index); // char_pk + stmt.setInt(3, index); // int_pk + stmt.setLong(4, index); // long_pk + stmt.setBigDecimal(5, new BigDecimal(index)); // decimal_pk + Date date = DateUtil.parseDate("2015-01-01 00:00:00"); + stmt.setDate(6, date); // date_pk + stmt.setString(7, "varchar_a"); // a.varchar_col1 + stmt.setString(8, "chara"); // a.char_col1 + stmt.setInt(9, index + 1); // a.int_col1 + stmt.setLong(10, index + 1); // a.long_col1 + stmt.setBigDecimal(11, new BigDecimal(index + 1)); // a.decimal_col1 + stmt.setDate(12, date); // a.date1 + stmt.setString(13, "varchar_b"); // b.varchar_col2 + stmt.setString(14, "charb"); // b.char_col2 + stmt.setInt(15, index + 2); // b.int_col2 + stmt.setLong(16, index + 2); // b.long_col2 + stmt.setBigDecimal(17, new BigDecimal(index + 2)); // b.decimal_col2 + stmt.setDate(18, date); // b.date2 + stmt.executeUpdate(); + } + + // Populate the test table with data. + public static synchronized void populateTestTable(String fullTableName) throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = DriverManager.getConnection(getUrl(), props)) { + upsertRows(conn, fullTableName, 3); + conn.commit(); + } + } + + // Populate the test table with data. + protected static void populateMultiCFTestTable(String tableName) throws SQLException { + populateMultiCFTestTable(tableName, null); + } + + // Populate the test table with data. + protected static void populateMultiCFTestTable(String tableName, Date date) throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + String upsert = + "UPSERT INTO " + tableName + " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + PreparedStatement stmt = conn.prepareStatement(upsert); + stmt.setString(1, "varchar1"); + stmt.setString(2, "char1"); + stmt.setInt(3, 1); + stmt.setLong(4, 1L); + stmt.setBigDecimal(5, new BigDecimal("1.1")); + stmt.setString(6, "varchar_a"); + stmt.setString(7, "chara"); + stmt.setInt(8, 2); + stmt.setLong(9, 2L); + stmt.setBigDecimal(10, new BigDecimal("2.1")); + stmt.setString(11, "varchar_b"); + stmt.setString(12, "charb"); + stmt.setInt(13, 3); + stmt.setLong(14, 3L); + stmt.setBigDecimal(15, new BigDecimal("3.1")); + stmt.setDate(16, date == null ? null : new Date(date.getTime() + MILLIS_IN_DAY)); + stmt.executeUpdate(); + + stmt.setString(1, "varchar2"); + stmt.setString(2, "char2"); + stmt.setInt(3, 2); + stmt.setLong(4, 2L); + stmt.setBigDecimal(5, new BigDecimal("2.2")); + stmt.setString(6, "varchar_a"); + stmt.setString(7, "chara"); + stmt.setInt(8, 3); + stmt.setLong(9, 3L); + stmt.setBigDecimal(10, new BigDecimal("3.2")); + stmt.setString(11, "varchar_b"); + stmt.setString(12, "charb"); + stmt.setInt(13, 4); + stmt.setLong(14, 4L); + stmt.setBigDecimal(15, new BigDecimal("4.2")); + stmt.setDate(16, date); + stmt.executeUpdate(); + + stmt.setString(1, "varchar3"); + stmt.setString(2, "char3"); + stmt.setInt(3, 3); + stmt.setLong(4, 3L); + stmt.setBigDecimal(5, new BigDecimal("3.3")); + stmt.setString(6, "varchar_a"); + stmt.setString(7, "chara"); + stmt.setInt(8, 4); + stmt.setLong(9, 4L); + stmt.setBigDecimal(10, new BigDecimal("4.3")); + stmt.setString(11, "varchar_b"); + stmt.setString(12, "charb"); + stmt.setInt(13, 5); + stmt.setLong(14, 5L); + stmt.setBigDecimal(15, new BigDecimal("5.3")); + stmt.setDate(16, date == null ? null : new Date(date.getTime() + 2 * MILLIS_IN_DAY)); + stmt.executeUpdate(); + + conn.commit(); + } finally { + conn.close(); + } + } + + protected static void verifySequenceNotExists(String tenantID, String sequenceName, + String sequenceSchemaName) throws SQLException { + verifySequence(tenantID, sequenceName, sequenceSchemaName, false, 0); + } + + protected static void verifySequenceValue(String tenantID, String sequenceName, + String sequenceSchemaName, long value) throws SQLException { + verifySequence(tenantID, sequenceName, sequenceSchemaName, true, value); + } + + private static void verifySequence(String tenantID, String sequenceName, + String sequenceSchemaName, boolean exists, long value) throws SQLException { + + PhoenixConnection phxConn = + DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class); + String ddl = + "SELECT " + PhoenixDatabaseMetaData.TENANT_ID + "," + PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + + "," + PhoenixDatabaseMetaData.SEQUENCE_NAME + "," + PhoenixDatabaseMetaData.CURRENT_VALUE + + " FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " WHERE "; + + ddl += " TENANT_ID " + ((tenantID == null) ? "IS NULL " : " = '" + tenantID + "'"); + ddl += + " AND SEQUENCE_NAME " + ((sequenceName == null) ? "IS NULL " : " = '" + sequenceName + "'"); + ddl += " AND SEQUENCE_SCHEMA " + + ((sequenceSchemaName == null) ? "IS NULL " : " = '" + sequenceSchemaName + "'"); + + ResultSet rs = phxConn.createStatement().executeQuery(ddl); + + if (exists) { + assertTrue(rs.next()); + assertEquals(value, rs.getLong(4)); + } else { + assertFalse(rs.next()); + } + phxConn.close(); + } + + /** + * Synchronously split table at the given split point + */ + protected static void splitTableSync(Admin admin, TableName hbaseTableName, byte[] splitPoint, + int expectedRegions) throws IOException, InterruptedException { + admin.split(hbaseTableName, splitPoint); + for (int i = 0; i < 30; i++) { + List regions = getUtility().getHBaseCluster().getRegions(hbaseTableName); + if (regions.size() >= expectedRegions) { + boolean splitSuccessful = true; + for (HRegion region : regions) { + if (!region.isSplittable()) { + splitSuccessful = false; + } + } + if (splitSuccessful) { + return; + } + } + LOGGER.info( + "Sleeping for 1000 ms while waiting for {} to split and all regions to come online", + hbaseTableName.getNameAsString()); + Thread.sleep(1000); + } + throw new IOException("Split did not succeed for table: " + hbaseTableName.getNameAsString() + + " , expected regions after split: " + expectedRegions); + } + + /** + * Returns true if the region contains atleast one of the metadata rows we are interested in + */ + protected static boolean regionContainsMetadataRows(RegionInfo regionInfo, + List metadataRowKeys) { + for (byte[] rowKey : metadataRowKeys) { + if (regionInfo.containsRow(rowKey)) { return true; - } - - private static boolean isStoreRefCountLeaked(Admin admin) - throws IOException { - for (ServerName serverName : admin.getRegionServers()) { - for (RegionMetrics regionMetrics : admin.getRegionMetrics(serverName)) { - if (regionMetrics.getNameAsString(). - contains(TableName.META_TABLE_NAME.getNameAsString())) { - // Just because something is trying to read from hbase:meta in the background - // doesn't mean we leaked a scanner, so skip this - continue; - } - int regionTotalRefCount = regionMetrics.getStoreRefCount(); - if (regionTotalRefCount > 0) { - LOGGER.error("Region {} has refCount leak. Total refCount" - + " of all storeFiles combined for the region: {}", - regionMetrics.getNameAsString(), regionTotalRefCount); - return true; - } - } - } + } + } + return false; + } + + protected static void splitTable(TableName fullTableName, List splitPoints) + throws Exception { + Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); + assertTrue("Needs at least two split points ", splitPoints.size() > 1); + assertTrue( + "Number of split points should be less than or equal to the number of region servers ", + splitPoints.size() <= NUM_SLAVES_BASE); + HBaseTestingUtility util = getUtility(); + MiniHBaseCluster cluster = util.getHBaseCluster(); + HMaster master = cluster.getMaster(); + // We don't want BalancerChore to undo our hard work + assertFalse("Balancer must be off", master.isBalancerOn()); + AssignmentManager am = master.getAssignmentManager(); + // No need to split on the first splitPoint since the end key of region boundaries are exclusive + for (int i = 1; i < splitPoints.size(); ++i) { + splitTableSync(admin, fullTableName, splitPoints.get(i), i + 1); + } + List regionInfoList = admin.getRegions(fullTableName); + assertEquals(splitPoints.size(), regionInfoList.size()); + HashMap> serverToRegionsList = + Maps.newHashMapWithExpectedSize(NUM_SLAVES_BASE); + Deque availableRegionServers = new ArrayDeque(NUM_SLAVES_BASE); + for (int i = 0; i < NUM_SLAVES_BASE; ++i) { + availableRegionServers.push(util.getHBaseCluster().getRegionServer(i).getServerName()); + } + List tableRegions = admin.getRegions(fullTableName); + for (RegionInfo hRegionInfo : tableRegions) { + // filter on regions we are interested in + if (regionContainsMetadataRows(hRegionInfo, splitPoints)) { + ServerName serverName = am.getRegionStates().getRegionServerOfRegion(hRegionInfo); + if (!serverToRegionsList.containsKey(serverName)) { + serverToRegionsList.put(serverName, new ArrayList()); + } + serverToRegionsList.get(serverName).add(hRegionInfo); + availableRegionServers.remove(serverName); + } + } + assertFalse("No region servers available to move regions on to ", + availableRegionServers.isEmpty()); + for (Entry> entry : serverToRegionsList.entrySet()) { + List regions = entry.getValue(); + if (regions.size() > 1) { + for (int i = 1; i < regions.size(); ++i) { + moveRegion(regions.get(i), entry.getKey(), availableRegionServers.pop()); + } + } + } + + // verify each region is on its own region server + tableRegions = admin.getRegions(fullTableName); + Set serverNames = Sets.newHashSet(); + for (RegionInfo regionInfo : tableRegions) { + // filter on regions we are interested in + if (regionContainsMetadataRows(regionInfo, splitPoints)) { + ServerName serverName = am.getRegionStates().getRegionServerOfRegion(regionInfo); + if (!serverNames.contains(serverName)) { + serverNames.add(serverName); + } else { + fail("Multiple regions on " + serverName.getServerName()); + } + } + } + } + + /** + * Splits SYSTEM.CATALOG into multiple regions based on the table or view names passed in. + * Metadata for each table or view is moved to a separate region, + * @param tenantToTableAndViewMap map from tenant to tables and views owned by the tenant + */ + protected static void splitSystemCatalog(Map> tenantToTableAndViewMap) + throws Exception { + List splitPoints = Lists.newArrayListWithExpectedSize(5); + // add the rows keys of the table or view metadata rows + Set schemaNameSet = Sets.newHashSetWithExpectedSize(15); + for (Entry> entrySet : tenantToTableAndViewMap.entrySet()) { + String tenantId = entrySet.getKey(); + for (String fullName : entrySet.getValue()) { + String schemaName = SchemaUtil.getSchemaNameFromFullName(fullName); + // we don't allow SYSTEM.CATALOG to split within a schema, so to ensure each table + // or view is on a separate region they need to have a unique tenant and schema name + assertTrue("Schema names of tables/view must be unique ", + schemaNameSet.add(tenantId + "." + schemaName)); + String tableName = SchemaUtil.getTableNameFromFullName(fullName); + splitPoints.add( + SchemaUtil.getTableKey(tenantId, "".equals(schemaName) ? null : schemaName, tableName)); + } + } + Collections.sort(splitPoints, Bytes.BYTES_COMPARATOR); + + splitTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME, splitPoints); + } + + /** + * Ensures each region of SYSTEM.CATALOG is on a different region server + */ + private static void moveRegion(RegionInfo regionInfo, ServerName srcServerName, + ServerName dstServerName) throws Exception { + Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); + HBaseTestingUtility util = getUtility(); + MiniHBaseCluster cluster = util.getHBaseCluster(); + HMaster master = cluster.getMaster(); + AssignmentManager am = master.getAssignmentManager(); + + HRegionServer dstServer = util.getHBaseCluster().getRegionServer(dstServerName); + HRegionServer srcServer = util.getHBaseCluster().getRegionServer(srcServerName); + byte[] encodedRegionNameInBytes = regionInfo.getEncodedNameAsBytes(); + admin.move(encodedRegionNameInBytes, dstServer.getServerName()); + while ( + dstServer.getOnlineRegion(regionInfo.getRegionName()) == null + || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes) + || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes) + ) { + // wait for the move to be finished + Thread.sleep(100); + } + } + + /** + * It always unassign first region of table. + * @param tableName move region of table. + */ + protected static void unassignRegionAsync(final String tableName) throws IOException { + Thread thread = new Thread(new Runnable() { + @Override + public void run() { + try { + final Admin admin = utility.getAdmin(); + final RegionInfo tableRegion = admin.getRegions(TableName.valueOf(tableName)).get(0); + admin.unassign(tableRegion.getEncodedNameAsBytes(), false); + admin.assign(tableRegion.getEncodedNameAsBytes()); + } catch (IOException e) { + e.printStackTrace(); + } + } + }); + thread.setDaemon(true); + thread.start(); + } + + /** + * Confirms that no storeFile under any region has refCount leakage + * @return true if any region has refCount leakage + * @throws IOException caused by {@link CompatUtil#isAnyStoreRefCountLeaked(Admin)} + */ + protected synchronized static boolean isAnyStoreRefCountLeaked() throws IOException { + if (getUtility() != null) { + return isAnyStoreRefCountLeaked(getUtility().getAdmin()); + } + return false; + } + + /** + * HBase 2.3+ has storeRefCount available in RegionMetrics + * @param admin Admin instance + * @return true if any region has refCount leakage + * @throws IOException if something went wrong while connecting to Admin + */ + public synchronized static boolean isAnyStoreRefCountLeaked(Admin admin) throws IOException { + int retries = 5; + while (retries > 0) { + boolean isStoreRefCountLeaked = isStoreRefCountLeaked(admin); + if (!isStoreRefCountLeaked) { return false; - } - - protected Long queryTableLevelMaxLookbackAge(String fullTableName) throws Exception { - try(Connection conn = DriverManager.getConnection(getUrl())) { - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - return pconn.getTableNoCache(fullTableName).getMaxLookbackAge(); - } - } - - public void deleteAllRows(Connection conn, TableName tableName) throws SQLException, - IOException, InterruptedException { - Scan scan = new Scan(); - Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices(). - getAdmin(); - org.apache.hadoop.hbase.client.Connection hbaseConn = admin.getConnection(); - Table table = hbaseConn.getTable(tableName); - boolean deletedRows = false; - try (ResultScanner scanner = table.getScanner(scan)) { - for (Result r : scanner) { - Delete del = new Delete(r.getRow()); - table.delete(del); - deletedRows = true; - } - } catch (Exception e) { - //if the table doesn't exist, we have no rows to delete. Easier to catch - //than to pre-check for existence - } - //don't flush/compact if we didn't write anything, because we'll hang forever - if (deletedRows) { - getUtility().getAdmin().flush(tableName); - TestUtil.majorCompact(getUtility(), tableName); - } - } - - static public void resetIndexRegionObserverFailPoints() { - IndexRegionObserver.setFailPreIndexUpdatesForTesting(false); - IndexRegionObserver.setFailDataTableUpdatesForTesting(false); - IndexRegionObserver.setFailPostIndexUpdatesForTesting(false); - } + } + retries--; + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + LOGGER.error("Interrupted while sleeping", e); + break; + } + } + return true; + } + + private static boolean isStoreRefCountLeaked(Admin admin) throws IOException { + for (ServerName serverName : admin.getRegionServers()) { + for (RegionMetrics regionMetrics : admin.getRegionMetrics(serverName)) { + if (regionMetrics.getNameAsString().contains(TableName.META_TABLE_NAME.getNameAsString())) { + // Just because something is trying to read from hbase:meta in the background + // doesn't mean we leaked a scanner, so skip this + continue; + } + int regionTotalRefCount = regionMetrics.getStoreRefCount(); + if (regionTotalRefCount > 0) { + LOGGER.error( + "Region {} has refCount leak. Total refCount" + + " of all storeFiles combined for the region: {}", + regionMetrics.getNameAsString(), regionTotalRefCount); + return true; + } + } + } + return false; + } + + protected Long queryTableLevelMaxLookbackAge(String fullTableName) throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + return pconn.getTableNoCache(fullTableName).getMaxLookbackAge(); + } + } + + public void deleteAllRows(Connection conn, TableName tableName) + throws SQLException, IOException, InterruptedException { + Scan scan = new Scan(); + Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); + org.apache.hadoop.hbase.client.Connection hbaseConn = admin.getConnection(); + Table table = hbaseConn.getTable(tableName); + boolean deletedRows = false; + try (ResultScanner scanner = table.getScanner(scan)) { + for (Result r : scanner) { + Delete del = new Delete(r.getRow()); + table.delete(del); + deletedRows = true; + } + } catch (Exception e) { + // if the table doesn't exist, we have no rows to delete. Easier to catch + // than to pre-check for existence + } + // don't flush/compact if we didn't write anything, because we'll hang forever + if (deletedRows) { + getUtility().getAdmin().flush(tableName); + TestUtil.majorCompact(getUtility(), tableName); + } + } + + static public void resetIndexRegionObserverFailPoints() { + IndexRegionObserver.setFailPreIndexUpdatesForTesting(false); + IndexRegionObserver.setFailDataTableUpdatesForTesting(false); + IndexRegionObserver.setFailPostIndexUpdatesForTesting(false); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java index 048bf250aa5..777a70980b4 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,16 +45,16 @@ import java.util.Map; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.phoenix.SystemExitRule; @@ -69,309 +70,292 @@ import org.mockito.MockitoAnnotations; public class ConnectionQueryServicesImplTest { - private static final PhoenixIOException PHOENIX_IO_EXCEPTION = - new PhoenixIOException(new Exception("Test exception")); - private TableDescriptor sysMutexTableDescCorrectTTL = TableDescriptorBuilder - .newBuilder(TableName.valueOf(SYSTEM_MUTEX_NAME)) - .setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(SYSTEM_MUTEX_FAMILY_NAME_BYTES) - .setTimeToLive(TTL_FOR_MUTEX) - .build()) - .build(); - - @ClassRule - public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); - - @Mock - private ConnectionQueryServicesImpl mockCqs; - - @Mock - private Admin mockAdmin; - - @Mock - private ReadOnlyProps readOnlyProps; - - @Mock - private Connection mockConn; - - @Mock - private Table mockTable; - - @Mock - private GuidePostsCacheWrapper mockTableStatsCache; - - public static final TableDescriptorBuilder SYS_TASK_TDB = TableDescriptorBuilder - .newBuilder(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME)); - public static final TableDescriptorBuilder SYS_TASK_TDB_SP = TableDescriptorBuilder - .newBuilder(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME)) - .setRegionSplitPolicyClassName("abc"); - - - @Before - public void setup() throws IOException, NoSuchFieldException, - IllegalAccessException, SQLException { - MockitoAnnotations.initMocks(this); - Field props = ConnectionQueryServicesImpl.class - .getDeclaredField("props"); - props.setAccessible(true); - props.set(mockCqs, readOnlyProps); - props = ConnectionQueryServicesImpl.class.getDeclaredField("connection"); - props.setAccessible(true); - props.set(mockCqs, mockConn); - props = ConnectionQueryServicesImpl.class.getDeclaredField("tableStatsCache"); - props.setAccessible(true); - props.set(mockCqs, mockTableStatsCache); - when(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin)) - .thenCallRealMethod(); - when(mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB)) - .thenCallRealMethod(); - when(mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB_SP)) - .thenCallRealMethod(); - when(mockCqs.getSysMutexTable()).thenCallRealMethod(); - when(mockCqs.getAdmin()).thenCallRealMethod(); - when(mockCqs.getTable(Mockito.any())).thenCallRealMethod(); - when(mockCqs.getTableIfExists(Mockito.any())).thenCallRealMethod(); - doCallRealMethod().when(mockCqs).dropTables(Mockito.any()); - } - - @SuppressWarnings("unchecked") - @Test - public void testExceptionHandlingOnSystemNamespaceCreation() throws Exception { - // Invoke the real methods for these two calls - when(mockCqs.createSchema(any(List.class), anyString())).thenCallRealMethod(); - doCallRealMethod().when(mockCqs).ensureSystemTablesMigratedToSystemNamespace(); - // Do nothing for this method, just check that it was invoked later - doNothing().when(mockCqs).createSysMutexTableIfNotExists(any(Admin.class)); - - // Spoof out this call so that ensureSystemTablesUpgrade() will return-fast. - when(mockCqs.getSystemTableNamesInDefaultNamespace(any(Admin.class))) - .thenReturn(Collections. emptyList()); - - // Throw a special exception to check on later - doThrow(PHOENIX_IO_EXCEPTION).when(mockCqs).ensureNamespaceCreated(anyString()); - - // Make sure that ensureSystemTablesMigratedToSystemNamespace will try to migrate - // the system tables. - Map props = new HashMap<>(); - props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true"); - when(mockCqs.getProps()).thenReturn(new ReadOnlyProps(props)); - mockCqs.ensureSystemTablesMigratedToSystemNamespace(); - - // Should be called after upgradeSystemTables() - // Proves that execution proceeded - verify(mockCqs).getSystemTableNamesInDefaultNamespace(any()); - - try { - // Verifies that the exception is propagated back to the caller - mockCqs.createSchema(Collections. emptyList(), ""); - } catch (PhoenixIOException e) { - assertEquals(PHOENIX_IO_EXCEPTION, e); - } - } - - @Test - public void testGetNextRegionStartKey() { - RegionInfo mockHRegionInfo = org.mockito.Mockito.mock(RegionInfo.class); - RegionInfo mockPrevHRegionInfo = org.mockito.Mockito.mock(RegionInfo.class); - HRegionLocation mockRegionLocation = org.mockito.Mockito.mock(HRegionLocation.class); - HRegionLocation mockPrevRegionLocation = org.mockito.Mockito.mock(HRegionLocation.class); - ConnectionQueryServicesImpl mockCqsi = - org.mockito.Mockito.mock(ConnectionQueryServicesImpl.class, - org.mockito.Mockito.CALLS_REAL_METHODS); - byte[] corruptedStartAndEndKey = "0x3000".getBytes(); - byte[] corruptedDecreasingKey = "0x2999".getBytes(); - byte[] corruptedNewEndKey = "0x3001".getBytes(); - byte[] notCorruptedStartKey = "0x2999".getBytes(); - byte[] notCorruptedEndKey = "0x3000".getBytes(); - byte[] notCorruptedNewKey = "0x3001".getBytes(); - byte[] mockTableName = "dummyTable".getBytes(); - when(mockRegionLocation.getRegion()).thenReturn(mockHRegionInfo); - when(mockHRegionInfo.getRegionName()).thenReturn(mockTableName); - when(mockPrevRegionLocation.getRegion()).thenReturn(mockPrevHRegionInfo); - when(mockPrevHRegionInfo.getRegionName()).thenReturn(mockTableName); - - // comparing the current regionInfo endKey is equal to the previous endKey - // [0x3000, Ox3000) vs 0x3000 - GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); - when(mockHRegionInfo.getStartKey()).thenReturn(corruptedStartAndEndKey); - when(mockHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); - when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); - testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedStartAndEndKey, true, - mockPrevRegionLocation); - - // comparing the current regionInfo endKey is less than previous endKey - // [0x3000,0x2999) vs 0x3000 - GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); - when(mockHRegionInfo.getStartKey()).thenReturn(corruptedStartAndEndKey); - when(mockHRegionInfo.getEndKey()).thenReturn(corruptedDecreasingKey); - when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); - testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedStartAndEndKey, true, - mockPrevRegionLocation); - - // comparing the current regionInfo endKey is greater than the previous endKey - // [0x2999,0x3001) vs 0x3000. - GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); - when(mockHRegionInfo.getStartKey()).thenReturn(corruptedDecreasingKey); - when(mockHRegionInfo.getEndKey()).thenReturn(corruptedNewEndKey); - when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); - testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedStartAndEndKey, true, - mockPrevRegionLocation); - - // comparing the current regionInfo startKey is greater than the previous endKey leading to a hole - // [0x3000,0x3001) vs 0x2999 - GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); - when(mockHRegionInfo.getStartKey()).thenReturn(corruptedStartAndEndKey); - when(mockHRegionInfo.getEndKey()).thenReturn(corruptedNewEndKey); - when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedDecreasingKey); - testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedDecreasingKey, true, - mockPrevRegionLocation); - - // comparing the current regionInfo startKey is less than the previous endKey leading to an overlap - // [0x2999,0x3001) vs 0x3000. - GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); - when(mockHRegionInfo.getStartKey()).thenReturn(corruptedDecreasingKey); - when(mockHRegionInfo.getEndKey()).thenReturn(corruptedNewEndKey); - when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); - testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedStartAndEndKey, true, - mockPrevRegionLocation); - - // comparing the current regionInfo startKey is equal to the previous endKey - // [0x3000,0x3001) vs 0x3000 - GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); - when(mockHRegionInfo.getStartKey()).thenReturn(corruptedStartAndEndKey); - when(mockHRegionInfo.getEndKey()).thenReturn(notCorruptedNewKey); - when(mockPrevHRegionInfo.getEndKey()).thenReturn(notCorruptedEndKey); - testGetNextRegionStartKey(mockCqsi, mockRegionLocation, notCorruptedEndKey, false, - mockPrevRegionLocation); - - // test EMPTY_START_ROW - GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); - when(mockHRegionInfo.getStartKey()).thenReturn(HConstants.EMPTY_START_ROW); - when(mockHRegionInfo.getEndKey()).thenReturn(notCorruptedEndKey); - testGetNextRegionStartKey(mockCqsi, mockRegionLocation, HConstants.EMPTY_START_ROW, false, - null); - - //test EMPTY_END_ROW - GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); - when(mockHRegionInfo.getStartKey()).thenReturn(notCorruptedStartKey); - when(mockHRegionInfo.getEndKey()).thenReturn(HConstants.EMPTY_END_ROW); - testGetNextRegionStartKey(mockCqsi, mockRegionLocation, notCorruptedStartKey, false, null); - } - - private void testGetNextRegionStartKey(ConnectionQueryServicesImpl mockCqsi, - HRegionLocation mockRegionLocation, byte[] key, boolean isCorrupted, - HRegionLocation mockPrevRegionLocation) { - mockCqsi.getNextRegionStartKey(mockRegionLocation, key, mockPrevRegionLocation); - - assertEquals(isCorrupted ? 1 : 0, - GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric() - .getValue()); - } - - @Test - public void testSysMutexCheckReturnsFalseWhenTableAbsent() throws Exception { - // Override the getDescriptor() call to throw instead - doThrow(new TableNotFoundException()) - .when(mockAdmin) - .getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME)); - doThrow(new TableNotFoundException()) - .when(mockAdmin) - .getDescriptor(TableName.valueOf(SYSTEM_SCHEMA_NAME, SYSTEM_MUTEX_TABLE_NAME)); - assertFalse(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin)); - } - - @Test - public void testSysMutexCheckModifiesTTLWhenWrong() throws Exception { - // Set the wrong TTL - TableDescriptor sysMutexTableDescWrongTTL = TableDescriptorBuilder - .newBuilder(TableName.valueOf(SYSTEM_MUTEX_NAME)) - .setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(SYSTEM_MUTEX_FAMILY_NAME_BYTES) - .setTimeToLive(HConstants.FOREVER) - .build()) - .build(); - when(mockAdmin.getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME))) - .thenReturn(sysMutexTableDescWrongTTL); - - assertTrue(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin)); - verify(mockAdmin, Mockito.times(1)).modifyTable(sysMutexTableDescCorrectTTL); + private static final PhoenixIOException PHOENIX_IO_EXCEPTION = + new PhoenixIOException(new Exception("Test exception")); + private TableDescriptor sysMutexTableDescCorrectTTL = TableDescriptorBuilder + .newBuilder(TableName.valueOf(SYSTEM_MUTEX_NAME)).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(SYSTEM_MUTEX_FAMILY_NAME_BYTES).setTimeToLive(TTL_FOR_MUTEX).build()) + .build(); + + @ClassRule + public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); + + @Mock + private ConnectionQueryServicesImpl mockCqs; + + @Mock + private Admin mockAdmin; + + @Mock + private ReadOnlyProps readOnlyProps; + + @Mock + private Connection mockConn; + + @Mock + private Table mockTable; + + @Mock + private GuidePostsCacheWrapper mockTableStatsCache; + + public static final TableDescriptorBuilder SYS_TASK_TDB = + TableDescriptorBuilder.newBuilder(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME)); + public static final TableDescriptorBuilder SYS_TASK_TDB_SP = + TableDescriptorBuilder.newBuilder(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_TASK_NAME)) + .setRegionSplitPolicyClassName("abc"); + + @Before + public void setup() + throws IOException, NoSuchFieldException, IllegalAccessException, SQLException { + MockitoAnnotations.initMocks(this); + Field props = ConnectionQueryServicesImpl.class.getDeclaredField("props"); + props.setAccessible(true); + props.set(mockCqs, readOnlyProps); + props = ConnectionQueryServicesImpl.class.getDeclaredField("connection"); + props.setAccessible(true); + props.set(mockCqs, mockConn); + props = ConnectionQueryServicesImpl.class.getDeclaredField("tableStatsCache"); + props.setAccessible(true); + props.set(mockCqs, mockTableStatsCache); + when(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin)).thenCallRealMethod(); + when(mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB)).thenCallRealMethod(); + when(mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB_SP)).thenCallRealMethod(); + when(mockCqs.getSysMutexTable()).thenCallRealMethod(); + when(mockCqs.getAdmin()).thenCallRealMethod(); + when(mockCqs.getTable(Mockito.any())).thenCallRealMethod(); + when(mockCqs.getTableIfExists(Mockito.any())).thenCallRealMethod(); + doCallRealMethod().when(mockCqs).dropTables(Mockito.any()); + } + + @SuppressWarnings("unchecked") + @Test + public void testExceptionHandlingOnSystemNamespaceCreation() throws Exception { + // Invoke the real methods for these two calls + when(mockCqs.createSchema(any(List.class), anyString())).thenCallRealMethod(); + doCallRealMethod().when(mockCqs).ensureSystemTablesMigratedToSystemNamespace(); + // Do nothing for this method, just check that it was invoked later + doNothing().when(mockCqs).createSysMutexTableIfNotExists(any(Admin.class)); + + // Spoof out this call so that ensureSystemTablesUpgrade() will return-fast. + when(mockCqs.getSystemTableNamesInDefaultNamespace(any(Admin.class))) + .thenReturn(Collections. emptyList()); + + // Throw a special exception to check on later + doThrow(PHOENIX_IO_EXCEPTION).when(mockCqs).ensureNamespaceCreated(anyString()); + + // Make sure that ensureSystemTablesMigratedToSystemNamespace will try to migrate + // the system tables. + Map props = new HashMap<>(); + props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true"); + when(mockCqs.getProps()).thenReturn(new ReadOnlyProps(props)); + mockCqs.ensureSystemTablesMigratedToSystemNamespace(); + + // Should be called after upgradeSystemTables() + // Proves that execution proceeded + verify(mockCqs).getSystemTableNamesInDefaultNamespace(any()); + + try { + // Verifies that the exception is propagated back to the caller + mockCqs.createSchema(Collections. emptyList(), ""); + } catch (PhoenixIOException e) { + assertEquals(PHOENIX_IO_EXCEPTION, e); } - - @Test - public void testSysMutexCheckDoesNotModifyTableDescWhenTTLCorrect() throws Exception { - when(mockAdmin.getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME))) - .thenReturn(sysMutexTableDescCorrectTTL); - - assertTrue(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin)); - verify(mockAdmin, Mockito.times(0)).modifyTable(any(TableDescriptor.class)); - } - - @Test - public void testSysTaskSplitPolicy() throws Exception { - assertTrue(mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB)); - assertFalse(mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB)); - } - - @Test - public void testSysTaskSplitPolicyWithError() { - try { - mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB_SP); - fail("Split policy for SYSTEM.TASK cannot be updated"); - } catch (SQLException e) { - assertEquals("ERROR 908 (43M19): REGION SPLIT POLICY is incorrect." - + " Region split policy for table TASK is expected to be " - + "among: [null, org.apache.phoenix.schema.SystemTaskSplitPolicy]" - + " , actual split policy: abc tableName=SYSTEM.TASK", - e.getMessage()); - } - } - - @Test - public void testGetSysMutexTableWithName() throws Exception { - when(mockAdmin.tableExists(any())).thenReturn(true); - when(mockConn.getAdmin()).thenReturn(mockAdmin); - when(mockConn.getTable(TableName.valueOf("SYSTEM.MUTEX"))) - .thenReturn(mockTable); - assertSame(mockCqs.getSysMutexTable(), mockTable); - verify(mockAdmin, Mockito.times(1)).tableExists(any()); - verify(mockConn, Mockito.times(1)).getAdmin(); - verify(mockConn, Mockito.times(1)) - .getTable(TableName.valueOf("SYSTEM.MUTEX")); - } - - @Test - public void testGetSysMutexTableWithNamespace() throws Exception { - when(mockAdmin.tableExists(any())).thenReturn(false); - when(mockConn.getAdmin()).thenReturn(mockAdmin); - when(mockConn.getTable(TableName.valueOf("SYSTEM:MUTEX"))) - .thenReturn(mockTable); - assertSame(mockCqs.getSysMutexTable(), mockTable); - verify(mockAdmin, Mockito.times(1)).tableExists(any()); - verify(mockConn, Mockito.times(1)).getAdmin(); - verify(mockConn, Mockito.times(1)) - .getTable(TableName.valueOf("SYSTEM:MUTEX")); - } - - @Test - public void testDropTablesAlreadyDisabled() throws Exception { - when(mockConn.getAdmin()).thenReturn(mockAdmin); - doThrow(new TableNotEnabledException()).when(mockAdmin).disableTable(any()); - doNothing().when(mockAdmin).deleteTable(any()); - mockCqs.dropTables(Collections.singletonList("TEST_TABLE".getBytes(StandardCharsets.UTF_8))); - verify(mockAdmin, Mockito.times(1)).disableTable(TableName.valueOf("TEST_TABLE")); - verify(mockAdmin, Mockito.times(1)).deleteTable(TableName.valueOf("TEST_TABLE")); - verify(mockConn).getAdmin(); - } - - @Test - public void testDropTablesTableEnabled() throws Exception { - when(mockConn.getAdmin()).thenReturn(mockAdmin); - doNothing().when(mockAdmin).disableTable(any()); - doNothing().when(mockAdmin).deleteTable(any()); - doNothing().when(mockTableStatsCache).invalidateAll(); - mockCqs.dropTables(Collections.singletonList("TEST_TABLE".getBytes(StandardCharsets.UTF_8))); - verify(mockAdmin, Mockito.times(1)).disableTable(TableName.valueOf("TEST_TABLE")); - verify(mockAdmin, Mockito.times(1)).deleteTable(TableName.valueOf("TEST_TABLE")); - verify(mockConn).getAdmin(); + } + + @Test + public void testGetNextRegionStartKey() { + RegionInfo mockHRegionInfo = org.mockito.Mockito.mock(RegionInfo.class); + RegionInfo mockPrevHRegionInfo = org.mockito.Mockito.mock(RegionInfo.class); + HRegionLocation mockRegionLocation = org.mockito.Mockito.mock(HRegionLocation.class); + HRegionLocation mockPrevRegionLocation = org.mockito.Mockito.mock(HRegionLocation.class); + ConnectionQueryServicesImpl mockCqsi = org.mockito.Mockito + .mock(ConnectionQueryServicesImpl.class, org.mockito.Mockito.CALLS_REAL_METHODS); + byte[] corruptedStartAndEndKey = "0x3000".getBytes(); + byte[] corruptedDecreasingKey = "0x2999".getBytes(); + byte[] corruptedNewEndKey = "0x3001".getBytes(); + byte[] notCorruptedStartKey = "0x2999".getBytes(); + byte[] notCorruptedEndKey = "0x3000".getBytes(); + byte[] notCorruptedNewKey = "0x3001".getBytes(); + byte[] mockTableName = "dummyTable".getBytes(); + when(mockRegionLocation.getRegion()).thenReturn(mockHRegionInfo); + when(mockHRegionInfo.getRegionName()).thenReturn(mockTableName); + when(mockPrevRegionLocation.getRegion()).thenReturn(mockPrevHRegionInfo); + when(mockPrevHRegionInfo.getRegionName()).thenReturn(mockTableName); + + // comparing the current regionInfo endKey is equal to the previous endKey + // [0x3000, Ox3000) vs 0x3000 + GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); + when(mockHRegionInfo.getStartKey()).thenReturn(corruptedStartAndEndKey); + when(mockHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); + when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); + testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedStartAndEndKey, true, + mockPrevRegionLocation); + + // comparing the current regionInfo endKey is less than previous endKey + // [0x3000,0x2999) vs 0x3000 + GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); + when(mockHRegionInfo.getStartKey()).thenReturn(corruptedStartAndEndKey); + when(mockHRegionInfo.getEndKey()).thenReturn(corruptedDecreasingKey); + when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); + testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedStartAndEndKey, true, + mockPrevRegionLocation); + + // comparing the current regionInfo endKey is greater than the previous endKey + // [0x2999,0x3001) vs 0x3000. + GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); + when(mockHRegionInfo.getStartKey()).thenReturn(corruptedDecreasingKey); + when(mockHRegionInfo.getEndKey()).thenReturn(corruptedNewEndKey); + when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); + testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedStartAndEndKey, true, + mockPrevRegionLocation); + + // comparing the current regionInfo startKey is greater than the previous endKey leading to a + // hole + // [0x3000,0x3001) vs 0x2999 + GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); + when(mockHRegionInfo.getStartKey()).thenReturn(corruptedStartAndEndKey); + when(mockHRegionInfo.getEndKey()).thenReturn(corruptedNewEndKey); + when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedDecreasingKey); + testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedDecreasingKey, true, + mockPrevRegionLocation); + + // comparing the current regionInfo startKey is less than the previous endKey leading to an + // overlap + // [0x2999,0x3001) vs 0x3000. + GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); + when(mockHRegionInfo.getStartKey()).thenReturn(corruptedDecreasingKey); + when(mockHRegionInfo.getEndKey()).thenReturn(corruptedNewEndKey); + when(mockPrevHRegionInfo.getEndKey()).thenReturn(corruptedStartAndEndKey); + testGetNextRegionStartKey(mockCqsi, mockRegionLocation, corruptedStartAndEndKey, true, + mockPrevRegionLocation); + + // comparing the current regionInfo startKey is equal to the previous endKey + // [0x3000,0x3001) vs 0x3000 + GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); + when(mockHRegionInfo.getStartKey()).thenReturn(corruptedStartAndEndKey); + when(mockHRegionInfo.getEndKey()).thenReturn(notCorruptedNewKey); + when(mockPrevHRegionInfo.getEndKey()).thenReturn(notCorruptedEndKey); + testGetNextRegionStartKey(mockCqsi, mockRegionLocation, notCorruptedEndKey, false, + mockPrevRegionLocation); + + // test EMPTY_START_ROW + GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); + when(mockHRegionInfo.getStartKey()).thenReturn(HConstants.EMPTY_START_ROW); + when(mockHRegionInfo.getEndKey()).thenReturn(notCorruptedEndKey); + testGetNextRegionStartKey(mockCqsi, mockRegionLocation, HConstants.EMPTY_START_ROW, false, + null); + + // test EMPTY_END_ROW + GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().reset(); + when(mockHRegionInfo.getStartKey()).thenReturn(notCorruptedStartKey); + when(mockHRegionInfo.getEndKey()).thenReturn(HConstants.EMPTY_END_ROW); + testGetNextRegionStartKey(mockCqsi, mockRegionLocation, notCorruptedStartKey, false, null); + } + + private void testGetNextRegionStartKey(ConnectionQueryServicesImpl mockCqsi, + HRegionLocation mockRegionLocation, byte[] key, boolean isCorrupted, + HRegionLocation mockPrevRegionLocation) { + mockCqsi.getNextRegionStartKey(mockRegionLocation, key, mockPrevRegionLocation); + + assertEquals(isCorrupted ? 1 : 0, + GlobalClientMetrics.GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY.getMetric().getValue()); + } + + @Test + public void testSysMutexCheckReturnsFalseWhenTableAbsent() throws Exception { + // Override the getDescriptor() call to throw instead + doThrow(new TableNotFoundException()).when(mockAdmin) + .getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME)); + doThrow(new TableNotFoundException()).when(mockAdmin) + .getDescriptor(TableName.valueOf(SYSTEM_SCHEMA_NAME, SYSTEM_MUTEX_TABLE_NAME)); + assertFalse(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin)); + } + + @Test + public void testSysMutexCheckModifiesTTLWhenWrong() throws Exception { + // Set the wrong TTL + TableDescriptor sysMutexTableDescWrongTTL = + TableDescriptorBuilder.newBuilder(TableName.valueOf(SYSTEM_MUTEX_NAME)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(SYSTEM_MUTEX_FAMILY_NAME_BYTES) + .setTimeToLive(HConstants.FOREVER).build()) + .build(); + when(mockAdmin.getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME))) + .thenReturn(sysMutexTableDescWrongTTL); + + assertTrue(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin)); + verify(mockAdmin, Mockito.times(1)).modifyTable(sysMutexTableDescCorrectTTL); + } + + @Test + public void testSysMutexCheckDoesNotModifyTableDescWhenTTLCorrect() throws Exception { + when(mockAdmin.getDescriptor(TableName.valueOf(SYSTEM_MUTEX_NAME))) + .thenReturn(sysMutexTableDescCorrectTTL); + + assertTrue(mockCqs.checkIfSysMutexExistsAndModifyTTLIfRequired(mockAdmin)); + verify(mockAdmin, Mockito.times(0)).modifyTable(any(TableDescriptor.class)); + } + + @Test + public void testSysTaskSplitPolicy() throws Exception { + assertTrue(mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB)); + assertFalse(mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB)); + } + + @Test + public void testSysTaskSplitPolicyWithError() { + try { + mockCqs.updateAndConfirmSplitPolicyForTask(SYS_TASK_TDB_SP); + fail("Split policy for SYSTEM.TASK cannot be updated"); + } catch (SQLException e) { + assertEquals("ERROR 908 (43M19): REGION SPLIT POLICY is incorrect." + + " Region split policy for table TASK is expected to be " + + "among: [null, org.apache.phoenix.schema.SystemTaskSplitPolicy]" + + " , actual split policy: abc tableName=SYSTEM.TASK", e.getMessage()); } + } + + @Test + public void testGetSysMutexTableWithName() throws Exception { + when(mockAdmin.tableExists(any())).thenReturn(true); + when(mockConn.getAdmin()).thenReturn(mockAdmin); + when(mockConn.getTable(TableName.valueOf("SYSTEM.MUTEX"))).thenReturn(mockTable); + assertSame(mockCqs.getSysMutexTable(), mockTable); + verify(mockAdmin, Mockito.times(1)).tableExists(any()); + verify(mockConn, Mockito.times(1)).getAdmin(); + verify(mockConn, Mockito.times(1)).getTable(TableName.valueOf("SYSTEM.MUTEX")); + } + + @Test + public void testGetSysMutexTableWithNamespace() throws Exception { + when(mockAdmin.tableExists(any())).thenReturn(false); + when(mockConn.getAdmin()).thenReturn(mockAdmin); + when(mockConn.getTable(TableName.valueOf("SYSTEM:MUTEX"))).thenReturn(mockTable); + assertSame(mockCqs.getSysMutexTable(), mockTable); + verify(mockAdmin, Mockito.times(1)).tableExists(any()); + verify(mockConn, Mockito.times(1)).getAdmin(); + verify(mockConn, Mockito.times(1)).getTable(TableName.valueOf("SYSTEM:MUTEX")); + } + + @Test + public void testDropTablesAlreadyDisabled() throws Exception { + when(mockConn.getAdmin()).thenReturn(mockAdmin); + doThrow(new TableNotEnabledException()).when(mockAdmin).disableTable(any()); + doNothing().when(mockAdmin).deleteTable(any()); + mockCqs.dropTables(Collections.singletonList("TEST_TABLE".getBytes(StandardCharsets.UTF_8))); + verify(mockAdmin, Mockito.times(1)).disableTable(TableName.valueOf("TEST_TABLE")); + verify(mockAdmin, Mockito.times(1)).deleteTable(TableName.valueOf("TEST_TABLE")); + verify(mockConn).getAdmin(); + } + + @Test + public void testDropTablesTableEnabled() throws Exception { + when(mockConn.getAdmin()).thenReturn(mockAdmin); + doNothing().when(mockAdmin).disableTable(any()); + doNothing().when(mockAdmin).deleteTable(any()); + doNothing().when(mockTableStatsCache).invalidateAll(); + mockCqs.dropTables(Collections.singletonList("TEST_TABLE".getBytes(StandardCharsets.UTF_8))); + verify(mockAdmin, Mockito.times(1)).disableTable(TableName.valueOf("TEST_TABLE")); + verify(mockAdmin, Mockito.times(1)).deleteTable(TableName.valueOf("TEST_TABLE")); + verify(mockConn).getAdmin(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java index 01f87167da7..c4ccc879d13 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -50,166 +50,171 @@ import org.junit.ClassRule; import org.junit.Test; - public class ConnectionlessTest { - private static final int saltBuckets = 200; - private static final String orgId = "00D300000000XHP"; - private static final String keyPrefix1 = "111"; - private static final String keyPrefix2 = "112"; - private static final String entityHistoryId1 = "123456789012"; - private static final String entityHistoryId2 = "987654321098"; - private static final String name1 = "Eli"; - private static final String name2 = "Simon"; - private static final Date now = new Date(System.currentTimeMillis()); - private static final byte[] unsaltedRowKey1 = ByteUtil.concat( - PChar.INSTANCE.toBytes(orgId), PChar.INSTANCE.toBytes(keyPrefix1), PChar.INSTANCE.toBytes(entityHistoryId1)); - private static final byte[] unsaltedRowKey2 = ByteUtil.concat( - PChar.INSTANCE.toBytes(orgId), PChar.INSTANCE.toBytes(keyPrefix2), PChar.INSTANCE.toBytes(entityHistoryId2)); - private static final byte[] saltedRowKey1 = ByteUtil.concat( - new byte[] {SaltingUtil.getSaltingByte(unsaltedRowKey1, 0, unsaltedRowKey1.length, saltBuckets)}, - unsaltedRowKey1); - private static final byte[] saltedRowKey2 = ByteUtil.concat( - new byte[] {SaltingUtil.getSaltingByte(unsaltedRowKey2, 0, unsaltedRowKey2.length, saltBuckets)}, - unsaltedRowKey2); - - private static String getUrl() { - return PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS; - } + private static final int saltBuckets = 200; + private static final String orgId = "00D300000000XHP"; + private static final String keyPrefix1 = "111"; + private static final String keyPrefix2 = "112"; + private static final String entityHistoryId1 = "123456789012"; + private static final String entityHistoryId2 = "987654321098"; + private static final String name1 = "Eli"; + private static final String name2 = "Simon"; + private static final Date now = new Date(System.currentTimeMillis()); + private static final byte[] unsaltedRowKey1 = ByteUtil.concat(PChar.INSTANCE.toBytes(orgId), + PChar.INSTANCE.toBytes(keyPrefix1), PChar.INSTANCE.toBytes(entityHistoryId1)); + private static final byte[] unsaltedRowKey2 = ByteUtil.concat(PChar.INSTANCE.toBytes(orgId), + PChar.INSTANCE.toBytes(keyPrefix2), PChar.INSTANCE.toBytes(entityHistoryId2)); + private static final byte[] saltedRowKey1 = ByteUtil.concat( + new byte[] { + SaltingUtil.getSaltingByte(unsaltedRowKey1, 0, unsaltedRowKey1.length, saltBuckets) }, + unsaltedRowKey1); + private static final byte[] saltedRowKey2 = ByteUtil.concat( + new byte[] { + SaltingUtil.getSaltingByte(unsaltedRowKey2, 0, unsaltedRowKey2.length, saltBuckets) }, + unsaltedRowKey2); - @ClassRule - public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); + private static String getUrl() { + return PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + + PhoenixRuntime.CONNECTIONLESS; + } - @BeforeClass - public static synchronized void verifyDriverRegistered() throws SQLException { - assertTrue(DriverManager.getDriver(getUrl()) == PhoenixDriver.INSTANCE); - } - - @Test - public void testConnectionlessUpsert() throws Exception { - testConnectionlessUpsert(null); - } - - @Test - public void testSaltedConnectionlessUpsert() throws Exception { - testConnectionlessUpsert(saltBuckets); - } - - private void testConnectionlessUpsert(Integer saltBuckets) throws Exception { - String dmlStmt = "create table core.entity_history(\n" + - " organization_id char(15) not null, \n" + - " key_prefix char(3) not null,\n" + - " entity_history_id char(12) not null,\n" + - " created_by varchar,\n" + - " created_date date\n" + - " CONSTRAINT pk PRIMARY KEY (organization_id, key_prefix, entity_history_id) ) COLUMN_ENCODED_BYTES=4 " + - (saltBuckets == null ? "" : " , " + (PhoenixDatabaseMetaData.SALT_BUCKETS + "=" + saltBuckets)); - Properties props = new Properties(); - Connection conn = DriverManager.getConnection(getUrl(), props); - PreparedStatement statement = conn.prepareStatement(dmlStmt); - statement.execute(); - - String upsertStmt = "upsert into core.entity_history(organization_id,key_prefix,entity_history_id, created_by, created_date)\n" + - "values(?,?,?,?,?)"; - statement = conn.prepareStatement(upsertStmt); - statement.setString(1, orgId); - statement.setString(2, keyPrefix2); - statement.setString(3, entityHistoryId2); - statement.setString(4, name2); - statement.setDate(5,now); - statement.execute(); - statement.setString(1, orgId); - statement.setString(2, keyPrefix1); - statement.setString(3, entityHistoryId1); - statement.setString(4, name1); - statement.setDate(5,now); - statement.execute(); - - Iterator>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn); - Iterator iterator = dataIterator.next().getSecond().iterator(); - - byte[] expectedRowKey1 = saltBuckets == null ? unsaltedRowKey1 : saltedRowKey1; - byte[] expectedRowKey2 = saltBuckets == null ? unsaltedRowKey2 : saltedRowKey2; - if (Bytes.compareTo(expectedRowKey1, expectedRowKey2) < 0) { - assertRow1(iterator, expectedRowKey1); - assertRow2(iterator, expectedRowKey2); - } else { - assertRow2(iterator, expectedRowKey2); - assertRow1(iterator, expectedRowKey1); - } - - assertFalse(iterator.hasNext()); - assertFalse(dataIterator.hasNext()); - conn.rollback(); // to clear the list of mutations for the next - } - - private static void assertRow1(Iterator iterator, byte[] expectedRowKey1) { - Cell kv; - assertTrue(iterator.hasNext()); - kv = iterator.next(); - assertArrayEquals(expectedRowKey1, CellUtil.cloneRow(kv)); - assertEquals(QueryConstants.EMPTY_COLUMN_VALUE, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv))); - kv = iterator.next(); - assertArrayEquals(expectedRowKey1, CellUtil.cloneRow(kv)); - assertEquals(name1, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv))); - assertTrue(iterator.hasNext()); - kv = iterator.next(); - assertArrayEquals(expectedRowKey1, CellUtil.cloneRow(kv)); - assertEquals(now, PDate.INSTANCE.toObject(CellUtil.cloneValue(kv))); - } + @ClassRule + public static final SystemExitRule SYSTEM_EXIT_RULE = new SystemExitRule(); + + @BeforeClass + public static synchronized void verifyDriverRegistered() throws SQLException { + assertTrue(DriverManager.getDriver(getUrl()) == PhoenixDriver.INSTANCE); + } + + @Test + public void testConnectionlessUpsert() throws Exception { + testConnectionlessUpsert(null); + } - private static void assertRow2(Iterator iterator, byte[] expectedRowKey2) { - Cell kv; - kv = iterator.next(); - assertArrayEquals(expectedRowKey2, CellUtil.cloneRow(kv)); - assertEquals(QueryConstants.EMPTY_COLUMN_VALUE, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv))); - assertTrue(iterator.hasNext()); - kv = iterator.next(); - assertArrayEquals(expectedRowKey2, CellUtil.cloneRow(kv)); - assertEquals(name2, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv))); - assertTrue(iterator.hasNext()); - kv = iterator.next(); - assertArrayEquals(expectedRowKey2, CellUtil.cloneRow(kv)); - assertEquals(now, PDate.INSTANCE.toObject(CellUtil.cloneValue(kv))); + @Test + public void testSaltedConnectionlessUpsert() throws Exception { + testConnectionlessUpsert(saltBuckets); + } + + private void testConnectionlessUpsert(Integer saltBuckets) throws Exception { + String dmlStmt = "create table core.entity_history(\n" + + " organization_id char(15) not null, \n" + " key_prefix char(3) not null,\n" + + " entity_history_id char(12) not null,\n" + " created_by varchar,\n" + + " created_date date\n" + + " CONSTRAINT pk PRIMARY KEY (organization_id, key_prefix, entity_history_id) ) COLUMN_ENCODED_BYTES=4 " + + (saltBuckets == null + ? "" + : " , " + (PhoenixDatabaseMetaData.SALT_BUCKETS + "=" + saltBuckets)); + Properties props = new Properties(); + Connection conn = DriverManager.getConnection(getUrl(), props); + PreparedStatement statement = conn.prepareStatement(dmlStmt); + statement.execute(); + + String upsertStmt = + "upsert into core.entity_history(organization_id,key_prefix,entity_history_id, created_by, created_date)\n" + + "values(?,?,?,?,?)"; + statement = conn.prepareStatement(upsertStmt); + statement.setString(1, orgId); + statement.setString(2, keyPrefix2); + statement.setString(3, entityHistoryId2); + statement.setString(4, name2); + statement.setDate(5, now); + statement.execute(); + statement.setString(1, orgId); + statement.setString(2, keyPrefix1); + statement.setString(3, entityHistoryId1); + statement.setString(4, name1); + statement.setDate(5, now); + statement.execute(); + + Iterator>> dataIterator = + PhoenixRuntime.getUncommittedDataIterator(conn); + Iterator iterator = dataIterator.next().getSecond().iterator(); + + byte[] expectedRowKey1 = saltBuckets == null ? unsaltedRowKey1 : saltedRowKey1; + byte[] expectedRowKey2 = saltBuckets == null ? unsaltedRowKey2 : saltedRowKey2; + if (Bytes.compareTo(expectedRowKey1, expectedRowKey2) < 0) { + assertRow1(iterator, expectedRowKey1); + assertRow2(iterator, expectedRowKey2); + } else { + assertRow2(iterator, expectedRowKey2); + assertRow1(iterator, expectedRowKey1); } - - @Test - public void testMultipleConnectionQueryServices() throws Exception { - String url1 = getUrl(); - // Non-ZK registries don't have heuristics to handle missing URL elements - String url2 = - url1 + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR - + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR - + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + "LongRunningQueries"; - Connection conn1 = DriverManager.getConnection(url1); + + assertFalse(iterator.hasNext()); + assertFalse(dataIterator.hasNext()); + conn.rollback(); // to clear the list of mutations for the next + } + + private static void assertRow1(Iterator iterator, byte[] expectedRowKey1) { + Cell kv; + assertTrue(iterator.hasNext()); + kv = iterator.next(); + assertArrayEquals(expectedRowKey1, CellUtil.cloneRow(kv)); + assertEquals(QueryConstants.EMPTY_COLUMN_VALUE, + PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv))); + kv = iterator.next(); + assertArrayEquals(expectedRowKey1, CellUtil.cloneRow(kv)); + assertEquals(name1, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv))); + assertTrue(iterator.hasNext()); + kv = iterator.next(); + assertArrayEquals(expectedRowKey1, CellUtil.cloneRow(kv)); + assertEquals(now, PDate.INSTANCE.toObject(CellUtil.cloneValue(kv))); + } + + private static void assertRow2(Iterator iterator, byte[] expectedRowKey2) { + Cell kv; + kv = iterator.next(); + assertArrayEquals(expectedRowKey2, CellUtil.cloneRow(kv)); + assertEquals(QueryConstants.EMPTY_COLUMN_VALUE, + PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv))); + assertTrue(iterator.hasNext()); + kv = iterator.next(); + assertArrayEquals(expectedRowKey2, CellUtil.cloneRow(kv)); + assertEquals(name2, PVarchar.INSTANCE.toObject(CellUtil.cloneValue(kv))); + assertTrue(iterator.hasNext()); + kv = iterator.next(); + assertArrayEquals(expectedRowKey2, CellUtil.cloneRow(kv)); + assertEquals(now, PDate.INSTANCE.toObject(CellUtil.cloneValue(kv))); + } + + @Test + public void testMultipleConnectionQueryServices() throws Exception { + String url1 = getUrl(); + // Non-ZK registries don't have heuristics to handle missing URL elements + String url2 = + url1 + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + "LongRunningQueries"; + Connection conn1 = DriverManager.getConnection(url1); + try { + assertEquals(StringUtil.EMPTY_STRING, conn1.getMetaData().getUserName()); + Connection conn2 = DriverManager.getConnection(url2); + try { + assertEquals("LongRunningQueries", conn2.getMetaData().getUserName()); + ConnectionQueryServices cqs1 = conn1.unwrap(PhoenixConnection.class).getQueryServices(); + ConnectionQueryServices cqs2 = conn2.unwrap(PhoenixConnection.class).getQueryServices(); + assertTrue(cqs1 != cqs2); + Connection conn3 = DriverManager.getConnection(url1); try { - assertEquals(StringUtil.EMPTY_STRING, conn1.getMetaData().getUserName()); - Connection conn2 = DriverManager.getConnection(url2); - try { - assertEquals("LongRunningQueries", conn2.getMetaData().getUserName()); - ConnectionQueryServices cqs1 = conn1.unwrap(PhoenixConnection.class).getQueryServices(); - ConnectionQueryServices cqs2 = conn2.unwrap(PhoenixConnection.class).getQueryServices(); - assertTrue(cqs1 != cqs2); - Connection conn3 = DriverManager.getConnection(url1); - try { - ConnectionQueryServices cqs3 = conn3.unwrap(PhoenixConnection.class).getQueryServices(); - assertTrue(cqs1 == cqs3); - Connection conn4 = DriverManager.getConnection(url2); - try { - ConnectionQueryServices cqs4 = conn4.unwrap(PhoenixConnection.class).getQueryServices(); - assertTrue(cqs2 == cqs4); - } finally { - conn4.close(); - } - } finally { - conn3.close(); - } - } finally { - conn2.close(); - } + ConnectionQueryServices cqs3 = conn3.unwrap(PhoenixConnection.class).getQueryServices(); + assertTrue(cqs1 == cqs3); + Connection conn4 = DriverManager.getConnection(url2); + try { + ConnectionQueryServices cqs4 = conn4.unwrap(PhoenixConnection.class).getQueryServices(); + assertTrue(cqs2 == cqs4); + } finally { + conn4.close(); + } } finally { - conn1.close(); + conn3.close(); } - + } finally { + conn2.close(); + } + } finally { + conn1.close(); } + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java b/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java index cd258060466..2aad2f5733b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,120 +20,121 @@ import org.apache.hadoop.hbase.Cell; public class DelegateCell implements Cell { - private final Cell delegate; - private final String name; - public DelegateCell(Cell delegate, String name) { - this.delegate = delegate; - this.name = name; - } - - @Override - public int getValueOffset() { - return delegate.getValueOffset(); - } - - @Override - public int getValueLength() { - return delegate.getValueLength(); - } - - @Override - public byte[] getValueArray() { - return delegate.getValueArray(); - } - - @Override - public byte getTypeByte() { - return delegate.getTypeByte(); - } - - @Override - public long getTimestamp() { - return delegate.getTimestamp(); - } - - @Override - public int getTagsOffset() { - return delegate.getTagsOffset(); - } - - @Override - public byte[] getTagsArray() { - return delegate.getTagsArray(); - } - - @Override - public int getRowOffset() { - return delegate.getRowOffset(); - } - - @Override - public short getRowLength() { - return delegate.getRowLength(); - } - - @Override - public byte[] getRowArray() { - return delegate.getRowArray(); - } - - @Override - public int getQualifierOffset() { - return delegate.getQualifierOffset(); - } - - @Override - public int getQualifierLength() { - return delegate.getQualifierLength(); - } - - @Override - public byte[] getQualifierArray() { - return delegate.getQualifierArray(); - } - - @Override - public int getFamilyOffset() { - return delegate.getFamilyOffset(); - } - - @Override - public byte getFamilyLength() { - return delegate.getFamilyLength(); - } - - @Override - public byte[] getFamilyArray() { - return delegate.getFamilyArray(); - } - - @Override - public String toString() { - return name; - } - - @Override - public long getSequenceId() { - return delegate.getSequenceId(); - } - - @Override - public int getTagsLength() { - return delegate.getTagsLength(); - } - - @Override - public Type getType() { - return delegate.getType(); - } - - @Override - public long heapSize() { - return delegate.heapSize(); - } - - @Override - public int getSerializedSize() { - return delegate.getSerializedSize(); - } + private final Cell delegate; + private final String name; + + public DelegateCell(Cell delegate, String name) { + this.delegate = delegate; + this.name = name; + } + + @Override + public int getValueOffset() { + return delegate.getValueOffset(); + } + + @Override + public int getValueLength() { + return delegate.getValueLength(); + } + + @Override + public byte[] getValueArray() { + return delegate.getValueArray(); + } + + @Override + public byte getTypeByte() { + return delegate.getTypeByte(); + } + + @Override + public long getTimestamp() { + return delegate.getTimestamp(); + } + + @Override + public int getTagsOffset() { + return delegate.getTagsOffset(); + } + + @Override + public byte[] getTagsArray() { + return delegate.getTagsArray(); + } + + @Override + public int getRowOffset() { + return delegate.getRowOffset(); + } + + @Override + public short getRowLength() { + return delegate.getRowLength(); + } + + @Override + public byte[] getRowArray() { + return delegate.getRowArray(); + } + + @Override + public int getQualifierOffset() { + return delegate.getQualifierOffset(); + } + + @Override + public int getQualifierLength() { + return delegate.getQualifierLength(); + } + + @Override + public byte[] getQualifierArray() { + return delegate.getQualifierArray(); + } + + @Override + public int getFamilyOffset() { + return delegate.getFamilyOffset(); + } + + @Override + public byte getFamilyLength() { + return delegate.getFamilyLength(); + } + + @Override + public byte[] getFamilyArray() { + return delegate.getFamilyArray(); + } + + @Override + public String toString() { + return name; + } + + @Override + public long getSequenceId() { + return delegate.getSequenceId(); + } + + @Override + public int getTagsLength() { + return delegate.getTagsLength(); + } + + @Override + public Type getType() { + return delegate.getType(); + } + + @Override + public long heapSize() { + return delegate.heapSize(); + } + + @Override + public int getSerializedSize() { + return delegate.getSerializedSize(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java index 93e8d3e059f..0c3e72354a3 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,439 +39,471 @@ import org.junit.Test; public class EncodedColumnQualifierCellsListTest { - - private static final byte[] row = Bytes.toBytes("row"); - private static final byte[] cf = Bytes.toBytes("cf"); - - - @Test - public void testIterator() { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] cells = new Cell[7]; - int i = 0; - populateListAndArray(list, cells); - Iterator itr = list.iterator(); - assertTrue(itr.hasNext()); - - // test itr.next() - i = 0; - while (itr.hasNext()) { - assertEquals(cells[i++], itr.next()); - } - - assertEquals(7, list.size()); - - // test itr.remove() - itr = list.iterator(); - i = 0; - int numRemoved = 0; - try { - itr.remove(); - fail("Remove not allowed till next() is called"); - } catch (IllegalStateException expected) {} - - while (itr.hasNext()) { - assertEquals(cells[i++], itr.next()); - itr.remove(); - numRemoved++; - } - assertEquals("Number of elements removed should have been the size of the list", 7, numRemoved); + + private static final byte[] row = Bytes.toBytes("row"); + private static final byte[] cf = Bytes.toBytes("cf"); + + @Test + public void testIterator() { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] cells = new Cell[7]; + int i = 0; + populateListAndArray(list, cells); + Iterator itr = list.iterator(); + assertTrue(itr.hasNext()); + + // test itr.next() + i = 0; + while (itr.hasNext()) { + assertEquals(cells[i++], itr.next()); } - - @Test - public void testSize() { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - assertEquals(0, list.size()); - - populateList(list); - - assertEquals(7, list.size()); - int originalSize = list.size(); - - Iterator itr = list.iterator(); - while (itr.hasNext()) { - itr.next(); - itr.remove(); - assertEquals(--originalSize, list.size()); - } + + assertEquals(7, list.size()); + + // test itr.remove() + itr = list.iterator(); + i = 0; + int numRemoved = 0; + try { + itr.remove(); + fail("Remove not allowed till next() is called"); + } catch (IllegalStateException expected) { } - - @Test - public void testIsEmpty() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - assertTrue(list.isEmpty()); - populateList(list); - assertFalse(list.isEmpty()); - Iterator itr = list.iterator(); - while (itr.hasNext()) { - itr.next(); - itr.remove(); - if (itr.hasNext()) { - assertFalse(list.isEmpty()); - } - } - assertTrue(list.isEmpty()); + + while (itr.hasNext()) { + assertEquals(cells[i++], itr.next()); + itr.remove(); + numRemoved++; } - - @Test - public void testContains() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] cells = new Cell[7]; - populateListAndArray(list, cells); - - for (Cell c : cells) { - assertTrue(list.contains(c)); - } - assertFalse(list.contains(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13)))); + assertEquals("Number of elements removed should have been the size of the list", 7, numRemoved); + } + + @Test + public void testSize() { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + assertEquals(0, list.size()); + + populateList(list); + + assertEquals(7, list.size()); + int originalSize = list.size(); + + Iterator itr = list.iterator(); + while (itr.hasNext()) { + itr.next(); + itr.remove(); + assertEquals(--originalSize, list.size()); } - - @Test - public void testToArrayWithParam() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] cells = new Cell[7]; - populateListAndArray(list, cells); - Cell[] array = list.toArray(new Cell[0]); - assertTrue(Arrays.equals(cells, array)); + } + + @Test + public void testIsEmpty() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + assertTrue(list.isEmpty()); + populateList(list); + assertFalse(list.isEmpty()); + Iterator itr = list.iterator(); + while (itr.hasNext()) { + itr.next(); + itr.remove(); + if (itr.hasNext()) { + assertFalse(list.isEmpty()); + } } - - @Test - public void testToArrayWithoutParam() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] cells = new Cell[7]; - populateListAndArray(list, cells); - Object[] array = list.toArray(); - assertTrue(Arrays.equals(cells, array)); + assertTrue(list.isEmpty()); + } + + @Test + public void testContains() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] cells = new Cell[7]; + populateListAndArray(list, cells); + + for (Cell c : cells) { + assertTrue(list.contains(c)); } - - @Test - public void testRemove() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] cells = new Cell[7]; - populateListAndArray(list, cells); - assertTrue(list.remove(cells[0])); - assertEquals(6, list.size()); - assertTrue(list.remove(cells[6])); - assertEquals(5, list.size()); - assertTrue(list.remove(cells[3])); - assertEquals(4, list.size()); - assertFalse(list.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13)))); - assertEquals(4, list.size()); + assertFalse( + list.contains(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13)))); + } + + @Test + public void testToArrayWithParam() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] cells = new Cell[7]; + populateListAndArray(list, cells); + Cell[] array = list.toArray(new Cell[0]); + assertTrue(Arrays.equals(cells, array)); + } + + @Test + public void testToArrayWithoutParam() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] cells = new Cell[7]; + populateListAndArray(list, cells); + Object[] array = list.toArray(); + assertTrue(Arrays.equals(cells, array)); + } + + @Test + public void testRemove() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] cells = new Cell[7]; + populateListAndArray(list, cells); + assertTrue(list.remove(cells[0])); + assertEquals(6, list.size()); + assertTrue(list.remove(cells[6])); + assertEquals(5, list.size()); + assertTrue(list.remove(cells[3])); + assertEquals(4, list.size()); + assertFalse( + list.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13)))); + assertEquals(4, list.size()); + } + + @Test + public void testContainsAll() throws Exception { + EncodedColumnQualiferCellsList list1 = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list1); + EncodedColumnQualiferCellsList list2 = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list2); + assertTrue(list1.containsAll(list2)); + list2.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11))); + assertTrue(list1.containsAll(list2)); + assertFalse(list2.containsAll(list1)); + list2.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13))); + assertFalse(list1.containsAll(list2)); + assertFalse(list2.containsAll(list1)); + List arrayList = new ArrayList<>(); + populateList(arrayList); + assertTrue(list1.containsAll(arrayList)); + } + + @Test + public void testAddAll() throws Exception { + EncodedColumnQualiferCellsList list1 = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list1); + EncodedColumnQualiferCellsList list2 = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list2); + /* + * Note that we don't care about equality of the element being added with the element already + * present at the index. + */ + assertTrue(list1.addAll(list2)); + } + + @Test + public void testAddAllAtIndexFails() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list); + try { + list.addAll(0, new ArrayList()); + } catch (UnsupportedOperationException expected) { } - - @Test - public void testContainsAll() throws Exception { - EncodedColumnQualiferCellsList list1 = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list1); - EncodedColumnQualiferCellsList list2 = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list2); - assertTrue(list1.containsAll(list2)); - list2.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11))); - assertTrue(list1.containsAll(list2)); - assertFalse(list2.containsAll(list1)); - list2.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(13))); - assertFalse(list1.containsAll(list2)); - assertFalse(list2.containsAll(list1)); - List arrayList = new ArrayList<>(); - populateList(arrayList); - assertTrue(list1.containsAll(arrayList)); + } + + @Test + public void testRemoveAll() throws Exception { + EncodedColumnQualiferCellsList list1 = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list1); + ArrayList list2 = new ArrayList<>(); + populateList(list2); + assertTrue(list1.removeAll(list2)); + assertTrue(list1.isEmpty()); + assertFalse(list2.isEmpty()); + } + + @Test + public void testRetainAll() throws Exception { + EncodedColumnQualiferCellsList list1 = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list1); + EncodedColumnQualiferCellsList list2 = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list2); + // retainAll won't be modifying the list1 since they both have the same elements equality wise + assertFalse(list1.retainAll(list2)); + list2.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12))); + assertTrue(list1.retainAll(list2)); + assertEquals(list1.size(), list2.size()); + for (Cell c : list1) { + assertTrue(list2.contains(c)); } - - @Test - public void testAddAll() throws Exception { - EncodedColumnQualiferCellsList list1 = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list1); - EncodedColumnQualiferCellsList list2 = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list2); - /* - * Note that we don't care about equality of the element being added with the element already - * present at the index. - */ - assertTrue(list1.addAll(list2)); + } + + @Test + public void testClear() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list); + list.clear(); + assertTrue(list.isEmpty()); + assertEquals(0, list.size()); + } + + @Test + public void testGetIndex() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] cells = new Cell[7]; + populateListAndArray(list, cells); + for (int i = 0; i < cells.length; i++) { + assertEquals(cells[i], list.get(i)); } - - @Test - public void testAddAllAtIndexFails() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list); - try { - list.addAll(0, new ArrayList()); - } catch (UnsupportedOperationException expected) { - } + } + + @Test + public void testIndexOf() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] cells = new Cell[7]; + populateListAndArray(list, cells); + for (int i = 0; i < cells.length; i++) { + assertEquals(i, list.indexOf(cells[i])); } - - @Test - public void testRemoveAll() throws Exception { - EncodedColumnQualiferCellsList list1 = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list1); - ArrayList list2 = new ArrayList<>(); - populateList(list2); - assertTrue(list1.removeAll(list2)); - assertTrue(list1.isEmpty()); - assertFalse(list2.isEmpty()); + } + + @Test + public void testLastIndexOf() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] cells = new Cell[7]; + populateListAndArray(list, cells); + for (int i = 0; i < cells.length; i++) { + assertEquals(i, list.lastIndexOf(cells[i])); } - - @Test - public void testRetainAll() throws Exception { - EncodedColumnQualiferCellsList list1 = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list1); - EncodedColumnQualiferCellsList list2 = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list2); - // retainAll won't be modifying the list1 since they both have the same elements equality wise - assertFalse(list1.retainAll(list2)); - list2.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12))); - assertTrue(list1.retainAll(list2)); - assertEquals(list1.size(), list2.size()); - for (Cell c : list1) { - assertTrue(list2.contains(c)); - } + } + + @Test + public void testListIterator() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] cells = new Cell[7]; + int i = 0; + populateListAndArray(list, cells); + ListIterator itr = list.listIterator(); + assertTrue(itr.hasNext()); + + // test itr.next() + i = 0; + while (itr.hasNext()) { + assertEquals(cells[i++], itr.next()); } - - @Test - public void testClear() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list); - list.clear(); - assertTrue(list.isEmpty()); - assertEquals(0, list.size()); + + assertEquals(7, list.size()); + + // test itr.remove() + itr = list.listIterator(); + i = 0; + int numRemoved = 0; + try { + itr.remove(); + fail("Remove not allowed till next() is called"); + } catch (IllegalStateException expected) { } - - @Test - public void testGetIndex() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] cells = new Cell[7]; - populateListAndArray(list, cells); - for (int i = 0; i < cells.length; i++) { - assertEquals(cells[i], list.get(i)); - } + + while (itr.hasNext()) { + assertEquals(cells[i++], itr.next()); + itr.remove(); + numRemoved++; } - - @Test - public void testIndexOf() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] cells = new Cell[7]; - populateListAndArray(list, cells); - for (int i = 0; i < cells.length; i++) { - assertEquals(i, list.indexOf(cells[i])); + assertEquals("Number of elements removed should have been the size of the list", 7, numRemoved); + assertTrue(list.isEmpty()); + } + + @Test + public void testListIteratorSet() { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] array = new Cell[7]; + populateListAndArray(list, array); + ListIterator itr = list.listIterator(); + // This cell is KeyValue.createFirstOnRow(row, cf, getEncodedColumnQualifier(12)) + final Cell validCell = array[4]; + // This cell is KeyValue.createFirstOnRow(row, cf, getEncodedColumnQualifier(14)) + final Cell invalidCell = array[5]; + String validCellName = "Valid Cell"; + String invalidCellName = "Invalid Cell"; + Cell validReplacementCell = new DelegateCell(validCell, validCellName); + Cell invalidReplacementCell = new DelegateCell(invalidCell, invalidCellName); + int i = 0; + while (itr.hasNext()) { + Cell c = itr.next(); + if (i == 4) { + itr.set(validReplacementCell); + } + if (i == 6) { + try { + itr.set(invalidReplacementCell); + fail("This should have failed since " + invalidReplacementCell + " cannot be added where " + + c + " is."); + } catch (IllegalArgumentException expected) { } + } + i++; } - - @Test - public void testLastIndexOf() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] cells = new Cell[7]; - populateListAndArray(list, cells); - for (int i = 0; i < cells.length; i++) { - assertEquals(i, list.lastIndexOf(cells[i])); - } + itr = list.listIterator(); + i = 0; + // Assert that the valid cell was added and invalid cell wasn't. + while (itr.hasNext()) { + Cell c = itr.next(); + if (i == 4) { + assertEquals(validCellName, c.toString()); + } + if (i == 6) { + assertNotEquals(invalidCellName, c.toString()); + } + i++; } - - @Test - public void testListIterator() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] cells = new Cell[7]; - int i = 0; - populateListAndArray(list, cells); - ListIterator itr = list.listIterator(); - assertTrue(itr.hasNext()); - - // test itr.next() - i = 0; - while (itr.hasNext()) { - assertEquals(cells[i++], itr.next()); - } - - assertEquals(7, list.size()); - - // test itr.remove() - itr = list.listIterator(); - i = 0; - int numRemoved = 0; - try { - itr.remove(); - fail("Remove not allowed till next() is called"); - } catch (IllegalStateException expected) {} - - while (itr.hasNext()) { - assertEquals(cells[i++], itr.next()); - itr.remove(); - numRemoved++; - } - assertEquals("Number of elements removed should have been the size of the list", 7, numRemoved); - assertTrue(list.isEmpty()); + } + + @Test + public void testListIteratorNextAndPrevious() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + Cell[] array = new Cell[7]; + populateListAndArray(list, array); + ListIterator itr = list.listIterator(); + try { + itr.previous(); + fail( + "Call to itr.previous() should have failed since the iterator hasn't been moved forward yet"); + } catch (NoSuchElementException expected) { + } - - @Test - public void testListIteratorSet() { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] array = new Cell[7]; - populateListAndArray(list, array); - ListIterator itr = list.listIterator(); - // This cell is KeyValue.createFirstOnRow(row, cf, getEncodedColumnQualifier(12)) - final Cell validCell = array[4]; - // This cell is KeyValue.createFirstOnRow(row, cf, getEncodedColumnQualifier(14)) - final Cell invalidCell = array[5]; - String validCellName = "Valid Cell"; - String invalidCellName = "Invalid Cell"; - Cell validReplacementCell = new DelegateCell(validCell, validCellName); - Cell invalidReplacementCell = new DelegateCell(invalidCell, invalidCellName); - int i = 0; - while (itr.hasNext()) { - Cell c = itr.next(); - if (i == 4) { - itr.set(validReplacementCell); - } - if (i == 6) { - try { - itr.set(invalidReplacementCell); - fail("This should have failed since " + invalidReplacementCell + " cannot be added where " + c + " is."); - } catch (IllegalArgumentException expected) { - } - } - i++; - } - itr = list.listIterator(); - i = 0; - // Assert that the valid cell was added and invalid cell wasn't. - while (itr.hasNext()) { - Cell c = itr.next(); - if (i == 4) { - assertEquals(validCellName, c.toString()); - } - if (i == 6) { - assertNotEquals(invalidCellName, c.toString()); - } - i++; - } + Cell c = itr.next(); + Cell d = itr.previous(); + Cell e = itr.next(); + Cell f = itr.previous(); + assertTrue(c.equals(d) && c.equals(f) && c.equals(e)); + itr = list.listIterator(); + int i = 0; + assertEquals(array[i++], itr.next()); + assertEquals(array[i++], itr.next()); + assertEquals(array[i++], itr.next()); + assertEquals(array[--i], itr.previous()); + assertEquals(array[--i], itr.previous()); + assertEquals(array[i++], itr.next()); + + // move itr forward till next() is exhausted + while (itr.hasNext()) { + itr.next(); } - - @Test - public void testListIteratorNextAndPrevious() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - Cell[] array = new Cell[7]; - populateListAndArray(list, array); - ListIterator itr = list.listIterator(); - try { - itr.previous(); - fail("Call to itr.previous() should have failed since the iterator hasn't been moved forward yet"); - } catch (NoSuchElementException expected) { - - } - Cell c = itr.next(); - Cell d = itr.previous(); - Cell e = itr.next(); - Cell f = itr.previous(); - assertTrue(c.equals(d) && c.equals(f) && c.equals(e)); - itr = list.listIterator(); - int i = 0; - assertEquals(array[i++], itr.next()); - assertEquals(array[i++], itr.next()); - assertEquals(array[i++], itr.next()); - assertEquals(array[--i], itr.previous()); - assertEquals(array[--i], itr.previous()); - assertEquals(array[i++], itr.next()); - - // move itr forward till next() is exhausted - while (itr.hasNext()) { - itr.next(); - } - i = 6; - while (itr.hasPrevious()) { - assertEquals(array[i--], itr.previous()); - } - assertEquals("Not all elements navigated using previous()", -1, i); - // now that previous is exhausted, move itr() forward till next() is exhausted - i = 0; - while (itr.hasNext()) { - assertEquals(array[i++], itr.next()); - } - assertEquals("Not all elements navigated using next()", 7, i); + i = 6; + while (itr.hasPrevious()) { + assertEquals(array[i--], itr.previous()); } - - @Test - public void testSetNull() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - try { - list.add(null); - fail("Adding null elements to the list is not allowed"); - } catch (NullPointerException expected) { - - } + assertEquals("Not all elements navigated using previous()", -1, i); + // now that previous is exhausted, move itr() forward till next() is exhausted + i = 0; + while (itr.hasNext()) { + assertEquals(array[i++], itr.next()); } - - @Test - public void testFailFastIterator() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list); - int i = 0; - Iterator itr = list.iterator(); - while (itr.hasNext()) { - i++; - try { - itr.next(); - list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); - if (i == 2) { - fail("ConcurrentModificationException should have been thrown as the list is being modified while being iterated through"); - } - } catch (ConcurrentModificationException expected) { - assertEquals("Exception should have been thrown when getting the second element", - 2, i); - break; - } - } + assertEquals("Not all elements navigated using next()", 7, i); + } + + @Test + public void testSetNull() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + try { + list.add(null); + fail("Adding null elements to the list is not allowed"); + } catch (NullPointerException expected) { + } - - @Test - public void testFailFastListIterator() throws Exception { - EncodedColumnQualiferCellsList list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list); - ListIterator itr = list.listIterator(); - itr.next(); - list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); - try { - itr.next(); - fail("ConcurrentModificationException should have been thrown as the list was modified without using iterator"); - } catch (ConcurrentModificationException expected) { + } - } - list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); - populateList(list); - itr = list.listIterator(); - itr.next(); + @Test + public void testFailFastIterator() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list); + int i = 0; + Iterator itr = list.iterator(); + while (itr.hasNext()) { + i++; + try { itr.next(); - itr.remove(); - itr.next(); - list.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); - try { - itr.next(); - fail("ConcurrentModificationException should have been thrown as the list was modified without using iterator"); - } catch (ConcurrentModificationException expected) { - + list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); + if (i == 2) { + fail( + "ConcurrentModificationException should have been thrown as the list is being modified while being iterated through"); } + } catch (ConcurrentModificationException expected) { + assertEquals("Exception should have been thrown when getting the second element", 2, i); + break; + } } - - private void populateListAndArray(List list, Cell[] cells) { - // add elements in reserved range - list.add(cells[0] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); - list.add(cells[1] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(5))); - list.add(cells[2] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(10))); - - // add elements in qualifier range - list.add(cells[6] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(16))); - list.add(cells[4] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12))); - list.add(cells[5] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(14))); - list.add(cells[3] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11))); + } + + @Test + public void testFailFastListIterator() throws Exception { + EncodedColumnQualiferCellsList list = + new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list); + ListIterator itr = list.listIterator(); + itr.next(); + list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); + try { + itr.next(); + fail( + "ConcurrentModificationException should have been thrown as the list was modified without using iterator"); + } catch (ConcurrentModificationException expected) { + } + list = new EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS); + populateList(list); + itr = list.listIterator(); + itr.next(); + itr.next(); + itr.remove(); + itr.next(); + list.remove(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); + try { + itr.next(); + fail( + "ConcurrentModificationException should have been thrown as the list was modified without using iterator"); + } catch (ConcurrentModificationException expected) { - private void populateList(List list) { - // add elements in reserved range - list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); - list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(5))); - list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(10))); - - // add elements in qualifier range - list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(16))); - list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12))); - list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(14))); - list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11))); } + } + + private void populateListAndArray(List list, Cell[] cells) { + // add elements in reserved range + list.add(cells[0] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); + list.add(cells[1] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(5))); + list.add(cells[2] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(10))); + + // add elements in qualifier range + list.add(cells[6] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(16))); + list.add(cells[4] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12))); + list.add(cells[5] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(14))); + list.add(cells[3] = KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11))); + } + + private void populateList(List list) { + // add elements in reserved range + list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(0))); + list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(5))); + list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(10))); + + // add elements in qualifier range + list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(16))); + list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(12))); + list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(14))); + list.add(KeyValueUtil.createFirstOnRow(row, cf, FOUR_BYTE_QUALIFIERS.encode(11))); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ExplainPlanTextTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ExplainPlanTextTest.java index 828751f43a1..6a0bbf82713 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/ExplainPlanTextTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ExplainPlanTextTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,10 @@ */ package org.apache.phoenix.query; -import org.apache.phoenix.util.PropertiesUtil; -import org.junit.Test; +import static org.apache.phoenix.query.QueryServices.AUTO_COMMIT_ATTRIB; +import static org.apache.phoenix.util.TestUtil.ATABLE_NAME; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; +import static org.junit.Assert.assertEquals; import java.sql.Connection; import java.sql.DriverManager; @@ -29,41 +31,38 @@ import java.util.List; import java.util.Properties; -import static org.apache.phoenix.query.QueryServices.AUTO_COMMIT_ATTRIB; -import static org.apache.phoenix.util.TestUtil.ATABLE_NAME; -import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; -import static org.junit.Assert.assertEquals; +import org.apache.phoenix.util.PropertiesUtil; +import org.junit.Test; -public class ExplainPlanTextTest extends BaseConnectionlessQueryTest{ +public class ExplainPlanTextTest extends BaseConnectionlessQueryTest { - String defaultDeleteStatement = "DELETE FROM " + ATABLE_NAME + " WHERE entity_id='abc'"; + String defaultDeleteStatement = "DELETE FROM " + ATABLE_NAME + " WHERE entity_id='abc'"; - @Test - public void explainDeleteClientTest() throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - List plan = getExplain(defaultDeleteStatement, props); - assertEquals("DELETE ROWS CLIENT SELECT", plan.get(0)); - } + @Test + public void explainDeleteClientTest() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + List plan = getExplain(defaultDeleteStatement, props); + assertEquals("DELETE ROWS CLIENT SELECT", plan.get(0)); + } - @Test - public void explainDeleteServerTest() throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - props.setProperty(AUTO_COMMIT_ATTRIB,"true"); //need autocommit for server today - List plan = getExplain(defaultDeleteStatement, props); - assertEquals("DELETE ROWS SERVER SELECT", plan.get(0)); - } + @Test + public void explainDeleteServerTest() throws Exception { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty(AUTO_COMMIT_ATTRIB, "true"); // need autocommit for server today + List plan = getExplain(defaultDeleteStatement, props); + assertEquals("DELETE ROWS SERVER SELECT", plan.get(0)); + } - private List getExplain(String query, Properties props) throws SQLException { - List explainPlan = new ArrayList<>(); - try(Connection conn = DriverManager.getConnection(getUrl(), props); - PreparedStatement statement = conn.prepareStatement("EXPLAIN " + query); - ResultSet rs = statement.executeQuery()) { - while(rs.next()) { - String plan = rs.getString(1); - explainPlan.add(plan); - } - } - return explainPlan; + private List getExplain(String query, Properties props) throws SQLException { + List explainPlan = new ArrayList<>(); + try (Connection conn = DriverManager.getConnection(getUrl(), props); + PreparedStatement statement = conn.prepareStatement("EXPLAIN " + query); + ResultSet rs = statement.executeQuery()) { + while (rs.next()) { + String plan = rs.getString(1); + explainPlan.add(plan); + } } + return explainPlan; + } } - diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/GuidePostsCacheProviderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/GuidePostsCacheProviderTest.java index f3c1e27b5ca..975e2fc7a6a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/GuidePostsCacheProviderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/GuidePostsCacheProviderTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,17 +20,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import java.util.ServiceLoader; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.phoenix.exception.PhoenixNonRetryableRuntimeException; -import org.apache.phoenix.util.InstanceResolver; import org.apache.phoenix.util.ReadOnlyProps; import org.junit.Before; import org.junit.Test; @@ -38,85 +29,83 @@ public class GuidePostsCacheProviderTest { - static GuidePostsCache testCache = null; - static PhoenixStatsLoader phoenixStatsLoader = null; - - public static class TestGuidePostsCacheFactory implements GuidePostsCacheFactory { - - public static volatile int count=0; - - public TestGuidePostsCacheFactory() { - count++; - } - - @Override public PhoenixStatsLoader getPhoenixStatsLoader( - ConnectionQueryServices clientConnectionQueryServices, ReadOnlyProps readOnlyProps, - Configuration config) { - return phoenixStatsLoader; - } - - @Override - public GuidePostsCache getGuidePostsCache(PhoenixStatsLoader phoenixStatsLoader, - Configuration config) { - return testCache; - } - } - - private GuidePostsCacheProvider helper; - - @Before public void init(){ - TestGuidePostsCacheFactory.count = 0; - helper = new GuidePostsCacheProvider(); - } + static GuidePostsCache testCache = null; + static PhoenixStatsLoader phoenixStatsLoader = null; + public static class TestGuidePostsCacheFactory implements GuidePostsCacheFactory { - @Test(expected = java.lang.NullPointerException.class) - public void loadAndGetGuidePostsCacheFactoryNullStringFailure(){ - helper.loadAndGetGuidePostsCacheFactory(null); - } + public static volatile int count = 0; - @Test(expected = PhoenixNonRetryableRuntimeException.class) - public void loadAndGetGuidePostsCacheFactoryBadStringFailure(){ - helper.loadAndGetGuidePostsCacheFactory("not a class"); + public TestGuidePostsCacheFactory() { + count++; } - @Test(expected = PhoenixNonRetryableRuntimeException.class) - public void loadAndGetGuidePostsCacheFactoryNonImplementingClassFailure(){ - helper.loadAndGetGuidePostsCacheFactory(Object.class.getTypeName()); - } - - @Test - public void loadAndGetGuidePostsCacheFactoryTestFactory(){ - GuidePostsCacheFactory factory = helper.loadAndGetGuidePostsCacheFactory( - TestGuidePostsCacheFactory.class.getTypeName()); - assertTrue(factory instanceof TestGuidePostsCacheFactory); - } - - - @Test - public void getSingletonSimpleTest(){ - GuidePostsCacheFactory factory1 = helper.loadAndGetGuidePostsCacheFactory( - TestGuidePostsCacheFactory.class.getTypeName()); - assertTrue(factory1 instanceof TestGuidePostsCacheFactory); - - GuidePostsCacheFactory factory2 = helper.loadAndGetGuidePostsCacheFactory( - TestGuidePostsCacheFactory.class.getTypeName()); - assertTrue(factory2 instanceof TestGuidePostsCacheFactory); - - assertEquals(factory1,factory2); - assertEquals(1,TestGuidePostsCacheFactory.count); + @Override + public PhoenixStatsLoader getPhoenixStatsLoader( + ConnectionQueryServices clientConnectionQueryServices, ReadOnlyProps readOnlyProps, + Configuration config) { + return phoenixStatsLoader; } - @Test - public void getGuidePostsCacheWrapper(){ - testCache = Mockito.mock(GuidePostsCache.class); - ConnectionQueryServices mockQueryServices = Mockito.mock(ConnectionQueryServices.class); - Configuration mockConfiguration = Mockito.mock(Configuration.class); - GuidePostsCacheWrapper - value = - helper.getGuidePostsCache(TestGuidePostsCacheFactory.class.getTypeName(), - mockQueryServices, mockConfiguration); - value.invalidateAll(); - Mockito.verify(testCache,Mockito.atLeastOnce()).invalidateAll(); + @Override + public GuidePostsCache getGuidePostsCache(PhoenixStatsLoader phoenixStatsLoader, + Configuration config) { + return testCache; } + } + + private GuidePostsCacheProvider helper; + + @Before + public void init() { + TestGuidePostsCacheFactory.count = 0; + helper = new GuidePostsCacheProvider(); + } + + @Test(expected = java.lang.NullPointerException.class) + public void loadAndGetGuidePostsCacheFactoryNullStringFailure() { + helper.loadAndGetGuidePostsCacheFactory(null); + } + + @Test(expected = PhoenixNonRetryableRuntimeException.class) + public void loadAndGetGuidePostsCacheFactoryBadStringFailure() { + helper.loadAndGetGuidePostsCacheFactory("not a class"); + } + + @Test(expected = PhoenixNonRetryableRuntimeException.class) + public void loadAndGetGuidePostsCacheFactoryNonImplementingClassFailure() { + helper.loadAndGetGuidePostsCacheFactory(Object.class.getTypeName()); + } + + @Test + public void loadAndGetGuidePostsCacheFactoryTestFactory() { + GuidePostsCacheFactory factory = + helper.loadAndGetGuidePostsCacheFactory(TestGuidePostsCacheFactory.class.getTypeName()); + assertTrue(factory instanceof TestGuidePostsCacheFactory); + } + + @Test + public void getSingletonSimpleTest() { + GuidePostsCacheFactory factory1 = + helper.loadAndGetGuidePostsCacheFactory(TestGuidePostsCacheFactory.class.getTypeName()); + assertTrue(factory1 instanceof TestGuidePostsCacheFactory); + + GuidePostsCacheFactory factory2 = + helper.loadAndGetGuidePostsCacheFactory(TestGuidePostsCacheFactory.class.getTypeName()); + assertTrue(factory2 instanceof TestGuidePostsCacheFactory); + + assertEquals(factory1, factory2); + assertEquals(1, TestGuidePostsCacheFactory.count); + } + + @Test + public void getGuidePostsCacheWrapper() { + testCache = Mockito.mock(GuidePostsCache.class); + ConnectionQueryServices mockQueryServices = Mockito.mock(ConnectionQueryServices.class); + Configuration mockConfiguration = Mockito.mock(Configuration.class); + GuidePostsCacheWrapper value = helper.getGuidePostsCache( + TestGuidePostsCacheFactory.class.getTypeName(), mockQueryServices, mockConfiguration); + value.invalidateAll(); + Mockito.verify(testCache, Mockito.atLeastOnce()).invalidateAll(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/GuidePostsCacheWrapperTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/GuidePostsCacheWrapperTest.java index 4f1b11400c6..3ae0235f752 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/GuidePostsCacheWrapperTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/GuidePostsCacheWrapperTest.java @@ -1,16 +1,26 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language - * governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.query; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.util.Bytes; @@ -18,89 +28,84 @@ import org.apache.phoenix.schema.PName; import org.apache.phoenix.schema.PTable; import org.apache.phoenix.schema.stats.GuidePostsKey; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - public class GuidePostsCacheWrapperTest { - @Mock - GuidePostsCache cache; - - GuidePostsCacheWrapper wrapper; - - byte[] table = org.apache.hadoop.hbase.util.Bytes.toBytes("tableName"); - byte[] columnFamily1 = Bytes.toBytesBinary("cf1"); - byte[] columnFamily2 = Bytes.toBytesBinary("cf2"); + @Mock + GuidePostsCache cache; - @Before - public void init() { - MockitoAnnotations.initMocks(this); + GuidePostsCacheWrapper wrapper; - wrapper = new GuidePostsCacheWrapper(cache); - } + byte[] table = org.apache.hadoop.hbase.util.Bytes.toBytes("tableName"); + byte[] columnFamily1 = Bytes.toBytesBinary("cf1"); + byte[] columnFamily2 = Bytes.toBytesBinary("cf2"); - @Test - public void invalidateAllTableDescriptor() { - Set cfSet = new HashSet<>(); - cfSet.add(columnFamily1); - cfSet.add(columnFamily2); + @Before + public void init() { + MockitoAnnotations.initMocks(this); + wrapper = new GuidePostsCacheWrapper(cache); + } + @Test + public void invalidateAllTableDescriptor() { + Set cfSet = new HashSet<>(); + cfSet.add(columnFamily1); + cfSet.add(columnFamily2); - TableDescriptor tableDesc = Mockito.mock(TableDescriptor.class); - TableName tableName = TableName.valueOf(table); + TableDescriptor tableDesc = Mockito.mock(TableDescriptor.class); + TableName tableName = TableName.valueOf(table); - Mockito.when(tableDesc.getColumnFamilyNames()).thenReturn(cfSet); - Mockito.when(tableDesc.getTableName()).thenReturn(tableName); + Mockito.when(tableDesc.getColumnFamilyNames()).thenReturn(cfSet); + Mockito.when(tableDesc.getTableName()).thenReturn(tableName); - wrapper.invalidateAll(tableDesc); - Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily1)); - Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily2)); - } + wrapper.invalidateAll(tableDesc); + Mockito.verify(cache, Mockito.times(1)).invalidate(new GuidePostsKey(table, columnFamily1)); + Mockito.verify(cache, Mockito.times(1)).invalidate(new GuidePostsKey(table, columnFamily2)); + } - @Test - public void invalidateAllPTable(){ - PTable ptable = Mockito.mock(PTable.class); - PName pname = Mockito.mock(PName.class); - PName pnamecf1 = Mockito.mock(PName.class); - PName pnamecf2 = Mockito.mock(PName.class); + @Test + public void invalidateAllPTable() { + PTable ptable = Mockito.mock(PTable.class); + PName pname = Mockito.mock(PName.class); + PName pnamecf1 = Mockito.mock(PName.class); + PName pnamecf2 = Mockito.mock(PName.class); - Mockito.when(ptable.getPhysicalName()).thenReturn(pname); - Mockito.when(pname.getBytes()).thenReturn(table); + Mockito.when(ptable.getPhysicalName()).thenReturn(pname); + Mockito.when(pname.getBytes()).thenReturn(table); - PColumnFamily cf1 = Mockito.mock(PColumnFamily.class); - PColumnFamily cf2 = Mockito.mock(PColumnFamily.class); - Mockito.when(cf1.getName()).thenReturn(pnamecf1); - Mockito.when(cf2.getName()).thenReturn(pnamecf2); - Mockito.when(pnamecf1.getBytes()).thenReturn(columnFamily1); - Mockito.when(pnamecf2.getBytes()).thenReturn(columnFamily2); + PColumnFamily cf1 = Mockito.mock(PColumnFamily.class); + PColumnFamily cf2 = Mockito.mock(PColumnFamily.class); + Mockito.when(cf1.getName()).thenReturn(pnamecf1); + Mockito.when(cf2.getName()).thenReturn(pnamecf2); + Mockito.when(pnamecf1.getBytes()).thenReturn(columnFamily1); + Mockito.when(pnamecf2.getBytes()).thenReturn(columnFamily2); - List cfList = Lists.newArrayList(cf1,cf2); - Mockito.when(ptable.getColumnFamilies()).thenReturn(cfList); + List cfList = Lists.newArrayList(cf1, cf2); + Mockito.when(ptable.getColumnFamilies()).thenReturn(cfList); - wrapper.invalidateAll(ptable); + wrapper.invalidateAll(ptable); - Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily1)); - Mockito.verify(cache,Mockito.times(1)).invalidate(new GuidePostsKey(table,columnFamily2)); - } + Mockito.verify(cache, Mockito.times(1)).invalidate(new GuidePostsKey(table, columnFamily1)); + Mockito.verify(cache, Mockito.times(1)).invalidate(new GuidePostsKey(table, columnFamily2)); + } - @Test(expected = NullPointerException.class) - public void invalidateAllTableDescriptorNull() { - TableDescriptor tableDesc = null; - wrapper.invalidateAll(tableDesc); - } + @Test(expected = NullPointerException.class) + public void invalidateAllTableDescriptorNull() { + TableDescriptor tableDesc = null; + wrapper.invalidateAll(tableDesc); + } - @Test(expected = NullPointerException.class) - public void invalidateAllPTableNull(){ - PTable ptable = null; - wrapper.invalidateAll(ptable); - } + @Test(expected = NullPointerException.class) + public void invalidateAllPTableNull() { + PTable ptable = null; + wrapper.invalidateAll(ptable); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/HBaseFactoryProviderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/HBaseFactoryProviderTest.java index 0a5942742cb..1b613abaa4c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/HBaseFactoryProviderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/HBaseFactoryProviderTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,26 +17,26 @@ */ package org.apache.phoenix.query; -import org.junit.Test; - import static org.junit.Assert.assertTrue; +import org.junit.Test; + public class HBaseFactoryProviderTest { - @Test - public void testDefaultHTableFactory() { - HTableFactory provided = HBaseFactoryProvider.getHTableFactory(); - assertTrue(provided instanceof HTableFactory.HTableFactoryImpl); - } + @Test + public void testDefaultHTableFactory() { + HTableFactory provided = HBaseFactoryProvider.getHTableFactory(); + assertTrue(provided instanceof HTableFactory.HTableFactoryImpl); + } - @Test - public void testDefaultConfigurationFactory() { - ConfigurationFactory provided = HBaseFactoryProvider.getConfigurationFactory(); - assertTrue(provided instanceof ConfigurationFactory.ConfigurationFactoryImpl); - } + @Test + public void testDefaultConfigurationFactory() { + ConfigurationFactory provided = HBaseFactoryProvider.getConfigurationFactory(); + assertTrue(provided instanceof ConfigurationFactory.ConfigurationFactoryImpl); + } - @Test - public void testDefaultHConnectionFactory() { - HConnectionFactory provided = HBaseFactoryProvider.getHConnectionFactory(); - assertTrue(provided instanceof HConnectionFactory.HConnectionFactoryImpl); - } + @Test + public void testDefaultHConnectionFactory() { + HConnectionFactory provided = HBaseFactoryProvider.getHConnectionFactory(); + assertTrue(provided instanceof HConnectionFactory.HConnectionFactoryImpl); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeClipTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeClipTest.java index f7b891f2bd9..caa3462d9c6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeClipTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeClipTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,6 +44,7 @@ import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PSmallint; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.junit.After; @@ -52,104 +53,122 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - /** * Test for intersect method in {@link SkipScanFilter} */ @RunWith(Parameterized.class) public class KeyRangeClipTest extends BaseConnectionlessQueryTest { - private final RowKeySchema schema; - private final KeyRange input; - private final KeyRange expectedOutput; - private final int clipTo; + private final RowKeySchema schema; + private final KeyRange input; + private final KeyRange expectedOutput; + private final int clipTo; - private static byte[] getRange(PhoenixConnection pconn, List startValues) throws SQLException { - byte[] lowerRange; - if (startValues == null) { - lowerRange = KeyRange.UNBOUND; - } else { - String upsertValues = StringUtils.repeat("?,", startValues.size()).substring(0,startValues.size() * 2 - 1); - String upsertStmt = "UPSERT INTO T VALUES(" + upsertValues + ")"; - PreparedStatement stmt = pconn.prepareStatement(upsertStmt); - for (int i = 0; i < startValues.size(); i++) { - stmt.setObject(i+1, startValues.get(i)); - } - stmt.execute(); - Cell startCell = PhoenixRuntime.getUncommittedDataIterator(pconn).next().getSecond().get(0); - lowerRange = CellUtil.cloneRow(startCell); - pconn.rollback(); - } - return lowerRange; - } - - public KeyRangeClipTest(String tableDef, List startValues, List endValues, int clipTo, KeyRange expectedOutput) throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class); - pconn.createStatement().execute("CREATE TABLE T(" + tableDef+ ")"); - PTable table = pconn.getMetaDataCache().getTableRef(new PTableKey(null,"T")).getTable(); - this.schema = table.getRowKeySchema(); - byte[] lowerRange = getRange(pconn, startValues); - byte[] upperRange = getRange(pconn, endValues); - this.input = KeyRange.getKeyRange(lowerRange, upperRange); - this.expectedOutput = expectedOutput; - this.clipTo = clipTo; + private static byte[] getRange(PhoenixConnection pconn, List startValues) + throws SQLException { + byte[] lowerRange; + if (startValues == null) { + lowerRange = KeyRange.UNBOUND; + } else { + String upsertValues = + StringUtils.repeat("?,", startValues.size()).substring(0, startValues.size() * 2 - 1); + String upsertStmt = "UPSERT INTO T VALUES(" + upsertValues + ")"; + PreparedStatement stmt = pconn.prepareStatement(upsertStmt); + for (int i = 0; i < startValues.size(); i++) { + stmt.setObject(i + 1, startValues.get(i)); + } + stmt.execute(); + Cell startCell = PhoenixRuntime.getUncommittedDataIterator(pconn).next().getSecond().get(0); + lowerRange = CellUtil.cloneRow(startCell); + pconn.rollback(); } + return lowerRange; + } - @After - public void cleanup() throws SQLException { - PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class); - pconn.createStatement().execute("DROP TABLE T"); - } - - @Test - public void test() { - ScanRanges scanRanges = ScanRanges.create(schema, Collections.>singletonList(Collections.singletonList(input)), new int[] {schema.getFieldCount()-1}, null, false, -1); - ScanRanges clippedRange = BaseResultIterators.computePrefixScanRanges(scanRanges, clipTo); - assertEquals(expectedOutput, clippedRange.getScanRange()); - } + public KeyRangeClipTest(String tableDef, List startValues, List endValues, + int clipTo, KeyRange expectedOutput) throws SQLException { + PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class); + pconn.createStatement().execute("CREATE TABLE T(" + tableDef + ")"); + PTable table = pconn.getMetaDataCache().getTableRef(new PTableKey(null, "T")).getTable(); + this.schema = table.getRowKeySchema(); + byte[] lowerRange = getRange(pconn, startValues); + byte[] upperRange = getRange(pconn, endValues); + this.input = KeyRange.getKeyRange(lowerRange, upperRange); + this.expectedOutput = expectedOutput; + this.clipTo = clipTo; + } - @Parameters(name="KeyRangeClipTest_{0}") - public static synchronized Collection data() { - List testCases = Lists.newArrayList(); - testCases.add(Lists.newArrayList( // [XY - *] - "A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C)", - Lists.newArrayList("XY",null,"Z"), null, 2, - KeyRange.getKeyRange(Bytes.toBytes("XY"), true, UNBOUND, false)).toArray()); - testCases.add(Lists.newArrayList( - "A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C)", - null, Lists.newArrayList("XY",null,"Z"), 2, - KeyRange.getKeyRange( - ByteUtil.nextKey(SEPARATOR_BYTE_ARRAY), true, // skips null values for unbound lower - ByteUtil.nextKey(ByteUtil.concat(Bytes.toBytes("XY"),SEPARATOR_BYTE_ARRAY,SEPARATOR_BYTE_ARRAY,SEPARATOR_BYTE_ARRAY)), false)).toArray()); - testCases.add(Lists.newArrayList( - "A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, D VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C,D)", - Lists.newArrayList("XY",null,null,"Z"), null, 3, - KeyRange.getKeyRange(Bytes.toBytes("XY"), true, UNBOUND, false)).toArray()); - testCases.add(Lists.newArrayList( - "A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, D VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C,D)", - null, Lists.newArrayList("XY",null,null,"Z"), 3, - KeyRange.getKeyRange( - ByteUtil.nextKey(SEPARATOR_BYTE_ARRAY), true, // skips null values for unbound lower - ByteUtil.nextKey(ByteUtil.concat(Bytes.toBytes("XY"),SEPARATOR_BYTE_ARRAY,SEPARATOR_BYTE_ARRAY,SEPARATOR_BYTE_ARRAY,SEPARATOR_BYTE_ARRAY)), false)).toArray()); - testCases.add(Lists.newArrayList( - "A CHAR(1) NOT NULL, B CHAR(1) NOT NULL, C CHAR(1) NOT NULL, CONSTRAINT PK PRIMARY KEY (A,B,C)", - Lists.newArrayList("A","B","C"), Lists.newArrayList("C","D","E"), 2, - KeyRange.getKeyRange(Bytes.toBytes("AB"), true, ByteUtil.nextKey(Bytes.toBytes("CD")), false)).toArray()); - testCases.add(Lists.newArrayList( - "A VARCHAR NOT NULL, B VARCHAR, C SMALLINT NOT NULL, D VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C,D)", - Lists.newArrayList("XY",null,1,"Z"), null, 3, - KeyRange.getKeyRange(ByteUtil.concat(Bytes.toBytes("XY"), SEPARATOR_BYTE_ARRAY, SEPARATOR_BYTE_ARRAY, PSmallint.INSTANCE.toBytes(1)), true, UNBOUND, false)).toArray()); - testCases.add(Lists.newArrayList( - "A VARCHAR NOT NULL, B BIGINT NOT NULL, C VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B DESC,C)", - Lists.newArrayList("XYZ",1,"Z"), null, 2, - KeyRange.getKeyRange(ByteUtil.concat(Bytes.toBytes("XYZ"), SEPARATOR_BYTE_ARRAY, PLong.INSTANCE.toBytes(1, SortOrder.DESC)), true, UNBOUND, false)).toArray()); - testCases.add(Lists.newArrayList( - "A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, CONSTRAINT PK PRIMARY KEY (A DESC,B,C)", - null, Lists.newArrayList("XY",null,"Z"), 3, - KeyRange.getKeyRange( - ByteUtil.nextKey(SEPARATOR_BYTE_ARRAY), true, // skips null values for unbound lower - (ByteUtil.concat(PVarchar.INSTANCE.toBytes("XY",SortOrder.DESC),DESC_SEPARATOR_BYTE_ARRAY,SEPARATOR_BYTE_ARRAY,Bytes.toBytes("Z"))), false)).toArray()); - return testCases; - } + @After + public void cleanup() throws SQLException { + PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class); + pconn.createStatement().execute("DROP TABLE T"); + } + + @Test + public void test() { + ScanRanges scanRanges = ScanRanges.create(schema, + Collections.> singletonList(Collections. singletonList(input)), + new int[] { schema.getFieldCount() - 1 }, null, false, -1); + ScanRanges clippedRange = BaseResultIterators.computePrefixScanRanges(scanRanges, clipTo); + assertEquals(expectedOutput, clippedRange.getScanRange()); + } + + @Parameters(name = "KeyRangeClipTest_{0}") + public static synchronized Collection data() { + List testCases = Lists.newArrayList(); + testCases.add(Lists.newArrayList( // [XY - *] + "A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C)", + Lists.newArrayList("XY", null, "Z"), null, 2, + KeyRange.getKeyRange(Bytes.toBytes("XY"), true, UNBOUND, false)).toArray()); + testCases.add(Lists + .newArrayList("A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C)", + null, Lists.newArrayList("XY", null, "Z"), 2, + KeyRange.getKeyRange(ByteUtil.nextKey(SEPARATOR_BYTE_ARRAY), true, // skips null values for + // unbound lower + ByteUtil.nextKey(ByteUtil.concat(Bytes.toBytes("XY"), SEPARATOR_BYTE_ARRAY, + SEPARATOR_BYTE_ARRAY, SEPARATOR_BYTE_ARRAY)), + false)) + .toArray()); + testCases.add(Lists.newArrayList( + "A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, D VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C,D)", + Lists.newArrayList("XY", null, null, "Z"), null, 3, + KeyRange.getKeyRange(Bytes.toBytes("XY"), true, UNBOUND, false)).toArray()); + testCases.add(Lists + .newArrayList( + "A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, D VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C,D)", + null, Lists.newArrayList("XY", null, null, "Z"), 3, KeyRange + .getKeyRange(ByteUtil.nextKey(SEPARATOR_BYTE_ARRAY), true, // skips null values for + // unbound lower + ByteUtil.nextKey(ByteUtil.concat(Bytes.toBytes("XY"), SEPARATOR_BYTE_ARRAY, + SEPARATOR_BYTE_ARRAY, SEPARATOR_BYTE_ARRAY, SEPARATOR_BYTE_ARRAY)), + false)) + .toArray()); + testCases.add(Lists.newArrayList( + "A CHAR(1) NOT NULL, B CHAR(1) NOT NULL, C CHAR(1) NOT NULL, CONSTRAINT PK PRIMARY KEY (A,B,C)", + Lists.newArrayList("A", "B", "C"), Lists.newArrayList("C", "D", "E"), 2, + KeyRange.getKeyRange(Bytes.toBytes("AB"), true, ByteUtil.nextKey(Bytes.toBytes("CD")), false)) + .toArray()); + testCases.add(Lists.newArrayList( + "A VARCHAR NOT NULL, B VARCHAR, C SMALLINT NOT NULL, D VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B,C,D)", + Lists. newArrayList("XY", null, 1, "Z"), null, 3, + KeyRange.getKeyRange(ByteUtil.concat(Bytes.toBytes("XY"), SEPARATOR_BYTE_ARRAY, + SEPARATOR_BYTE_ARRAY, PSmallint.INSTANCE.toBytes(1)), true, UNBOUND, false)) + .toArray()); + testCases.add(Lists.newArrayList( + "A VARCHAR NOT NULL, B BIGINT NOT NULL, C VARCHAR, CONSTRAINT PK PRIMARY KEY (A,B DESC,C)", + Lists. newArrayList("XYZ", 1, "Z"), null, 2, + KeyRange.getKeyRange(ByteUtil.concat(Bytes.toBytes("XYZ"), SEPARATOR_BYTE_ARRAY, + PLong.INSTANCE.toBytes(1, SortOrder.DESC)), true, UNBOUND, false)) + .toArray()); + testCases.add(Lists + .newArrayList( + "A VARCHAR NOT NULL, B VARCHAR, C VARCHAR, CONSTRAINT PK PRIMARY KEY (A DESC,B,C)", null, + Lists.newArrayList("XY", null, "Z"), 3, + KeyRange.getKeyRange(ByteUtil.nextKey(SEPARATOR_BYTE_ARRAY), true, // skips null values for + // unbound lower + (ByteUtil.concat(PVarchar.INSTANCE.toBytes("XY", SortOrder.DESC), + DESC_SEPARATOR_BYTE_ARRAY, SEPARATOR_BYTE_ARRAY, Bytes.toBytes("Z"))), + false)) + .toArray()); + return testCases; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeCoalesceTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeCoalesceTest.java index f3a53b3103c..0fc0353e2a6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeCoalesceTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeCoalesceTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.query; -import static org.apache.phoenix.query.KeyRange.EMPTY_RANGE; -import static org.apache.phoenix.query.KeyRange.EVERYTHING_RANGE; import static java.util.Arrays.asList; import static org.apache.hadoop.hbase.util.Bytes.toBytes; +import static org.apache.phoenix.query.KeyRange.EMPTY_RANGE; +import static org.apache.phoenix.query.KeyRange.EVERYTHING_RANGE; import java.util.*; -import junit.framework.TestCase; - import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PChar; import org.junit.Test; @@ -34,128 +31,76 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; +import junit.framework.TestCase; + @RunWith(Parameterized.class) public class KeyRangeCoalesceTest extends TestCase { - private static final Random RANDOM = new Random(1); - private final List expected, input; + private static final Random RANDOM = new Random(1); + private final List expected, input; + + public KeyRangeCoalesceTest(List expected, List input) { + this.expected = expected; + this.input = input; + } + + @Parameters(name = "{0} coalesces to {1}") + public static synchronized Collection data() { + return Arrays + .asList(new Object[][] { + { expect(EMPTY_RANGE), input() }, { + expect(PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC)), + input( + PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC)) }, + { expect(PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC)), + input( + PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("D"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("E"), true, SortOrder.ASC)) }, + { expect(PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("Z"), true, SortOrder.ASC)), + input(PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("D"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("E"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("Z"), true, SortOrder.ASC)) }, + { expect(PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), true, SortOrder.ASC)), + input(PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("D"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("E"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("Z"), true, SortOrder.ASC)) }, + { expect(PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), true, SortOrder.ASC)), + input(PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("D"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), false, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("Z"), true, SortOrder.ASC)) }, + { expect(PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("A"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), false, SortOrder.ASC)), + input(PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("A"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), false, SortOrder.ASC)) }, + { expect(PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("B"), false, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("B"), false, toBytes("Z"), false, SortOrder.ASC)), + input( + PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("B"), false, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("B"), false, toBytes("Z"), false, SortOrder.ASC)) }, + { expect( + PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("Z"), false, SortOrder.ASC)), + input(PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("B"), false, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), false, SortOrder.ASC)) }, + { expect(EVERYTHING_RANGE), input(EVERYTHING_RANGE, EVERYTHING_RANGE) }, + { expect(EVERYTHING_RANGE), input(EVERYTHING_RANGE) }, + { expect(EVERYTHING_RANGE), input(EMPTY_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE) }, + { expect(EMPTY_RANGE), input(EMPTY_RANGE) } }); + } + + @Test + public void coalesce() { + assertEquals(expected, KeyRange.coalesce(input)); + List tmp = new ArrayList(input); + Collections.reverse(tmp); + assertEquals(expected, KeyRange.coalesce(input)); + Collections.shuffle(tmp, RANDOM); + assertEquals(expected, KeyRange.coalesce(input)); + } - public KeyRangeCoalesceTest(List expected, List input) { - this.expected = expected; - this.input = input; - } + private static final List expect(KeyRange... kr) { + return asList(kr); + } - @Parameters(name="{0} coalesces to {1}") - public static synchronized Collection data() { - return Arrays.asList(new Object[][] { - {expect( - EMPTY_RANGE - ), - input( - )}, - {expect( - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC) - ), - input( - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC) - )}, - {expect( - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC) - ), - input( - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("D"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("E"), true, SortOrder.ASC) - )}, - {expect( - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("Z"), true, SortOrder.ASC) - ), - input( - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("D"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("E"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("Z"), true, SortOrder.ASC) - )}, - {expect( - PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), true, SortOrder.ASC) - ), - input( - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("D"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("E"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("Z"), true, SortOrder.ASC) - )}, - {expect( - PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), true, SortOrder.ASC) - ), - input( - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("D"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("Z"), true, SortOrder.ASC) - )}, - {expect( - PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("A"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), false, SortOrder.ASC) - ), - input( - PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("A"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), false, SortOrder.ASC) - )}, - {expect( - PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("B"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("B"), false, toBytes("Z"), false, SortOrder.ASC) - ), - input( - PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("B"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("B"), false, toBytes("Z"), false, SortOrder.ASC) - )}, - {expect( - PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("Z"), false, SortOrder.ASC) - ), - input( - PChar.INSTANCE.getKeyRange(toBytes("A"), true, toBytes("B"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("B"), true, toBytes("Z"), false, SortOrder.ASC) - )}, - {expect( - EVERYTHING_RANGE - ), - input( - EVERYTHING_RANGE, - EVERYTHING_RANGE - )}, - {expect( - EVERYTHING_RANGE - ), - input( - EVERYTHING_RANGE - )}, - {expect( - EVERYTHING_RANGE - ), - input( - EMPTY_RANGE, - EVERYTHING_RANGE, - EVERYTHING_RANGE - )}, - {expect( - EMPTY_RANGE - ), - input( - EMPTY_RANGE - )} - }); - } - @Test - public void coalesce() { - assertEquals(expected, KeyRange.coalesce(input)); - List tmp = new ArrayList(input); - Collections.reverse(tmp); - assertEquals(expected, KeyRange.coalesce(input)); - Collections.shuffle(tmp, RANDOM); - assertEquals(expected, KeyRange.coalesce(input)); - } - - private static final List expect(KeyRange... kr) { - return asList(kr); - } - - private static final List input(KeyRange... kr) { - return asList(kr); - } + private static final List input(KeyRange... kr) { + return asList(kr); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeIntersectTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeIntersectTest.java index d8ae8a6e951..0c253ae44b2 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeIntersectTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeIntersectTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.query; import static org.apache.hadoop.hbase.util.Bytes.toBytes; @@ -25,8 +24,6 @@ import java.util.Arrays; import java.util.Collection; -import junit.framework.TestCase; - import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PDate; @@ -36,69 +33,52 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; +import junit.framework.TestCase; + @RunWith(Parameterized.class) public class KeyRangeIntersectTest extends TestCase { - private final KeyRange a, b, intersection; + private final KeyRange a, b, intersection; + + public KeyRangeIntersectTest(KeyRange a, KeyRange b, KeyRange intersection) { + this.a = a; + this.b = b; + this.intersection = intersection; + } - public KeyRangeIntersectTest(KeyRange a, KeyRange b, KeyRange intersection) { - this.a = a; - this.b = b; - this.intersection = intersection; - } + @Parameters(name = "intersection of {0} and {1} is {2}") + public static synchronized Collection data() { + return Arrays.asList(new Object[][] { + { PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("F"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("E"), true, SortOrder.ASC) }, + { PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), false, toBytes("F"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), false, toBytes("E"), true, SortOrder.ASC) }, + { PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), false, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), false, toBytes("F"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), false, toBytes("E"), false, SortOrder.ASC) }, + { PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), false, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), + EMPTY_RANGE }, + { EVERYTHING_RANGE, + PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), }, + { EVERYTHING_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE, }, + { EMPTY_RANGE, EVERYTHING_RANGE, EMPTY_RANGE }, + { EMPTY_RANGE, + PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), + EMPTY_RANGE }, + { PDate.INSTANCE.getKeyRange(PDate.INSTANCE.toBytes(DateUtil.parseDate("2011-01-01")), true, + PDate.INSTANCE.toBytes(DateUtil.parseDate("2016-01-01")), true, SortOrder.ASC), + PDate.INSTANCE.getKeyRange(PDate.INSTANCE.toBytes(DateUtil.parseDate("2012-10-21")), false, + PDate.INSTANCE.toBytes(DateUtil.parseDate("2016-10-31")), false, SortOrder.ASC), + PDate.INSTANCE.getKeyRange(PDate.INSTANCE.toBytes(DateUtil.parseDate("2012-10-21")), false, + PDate.INSTANCE.toBytes(DateUtil.parseDate("2016-01-01")), true, SortOrder.ASC) } }); + } - @Parameters(name="intersection of {0} and {1} is {2}") - public static synchronized Collection data() { - return Arrays.asList(new Object[][] { - { - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("F"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("E"), true, SortOrder.ASC) - }, - { - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), false, toBytes("F"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), false, toBytes("E"), true, SortOrder.ASC) - }, - { - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), false, toBytes("F"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), false, toBytes("E"), false, SortOrder.ASC) - }, - { - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), - EMPTY_RANGE - }, - { - EVERYTHING_RANGE, - PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), - }, - { - EVERYTHING_RANGE, - EVERYTHING_RANGE, - EVERYTHING_RANGE, - }, - { - EMPTY_RANGE, - EVERYTHING_RANGE, - EMPTY_RANGE - }, - { - EMPTY_RANGE, - PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), - EMPTY_RANGE - }, - { - PDate.INSTANCE.getKeyRange(PDate.INSTANCE.toBytes(DateUtil.parseDate("2011-01-01")), true, PDate.INSTANCE.toBytes(DateUtil.parseDate("2016-01-01")), true, SortOrder.ASC), - PDate.INSTANCE.getKeyRange(PDate.INSTANCE.toBytes(DateUtil.parseDate("2012-10-21")), false, PDate.INSTANCE.toBytes(DateUtil.parseDate("2016-10-31")), false, SortOrder.ASC), - PDate.INSTANCE.getKeyRange(PDate.INSTANCE.toBytes(DateUtil.parseDate("2012-10-21")), false, PDate.INSTANCE.toBytes(DateUtil.parseDate("2016-01-01")), true, SortOrder.ASC) - } - }); - } - @Test - public void intersect() { - assertEquals(intersection, a.intersect(b)); - assertEquals(intersection, b.intersect(a)); - } + @Test + public void intersect() { + assertEquals(intersection, a.intersect(b)); + assertEquals(intersection, b.intersect(a)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeMoreTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeMoreTest.java index 6ff133542a6..0f38ef30478 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeMoreTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeMoreTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.query; import java.util.ArrayList; @@ -23,256 +22,247 @@ import java.util.Collections; import java.util.List; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PInteger; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Test; import junit.framework.TestCase; public class KeyRangeMoreTest extends TestCase { - @Test - public void testListIntersectWithOneResultRange() throws Exception { - for(boolean addEmptyRange : new boolean[]{true,false}) { - doTestListIntersectWithOneResultRange(0,200,3,1,180,2,addEmptyRange); - doTestListIntersectWithOneResultRange(1,180,2,0,200,3,addEmptyRange); + @Test + public void testListIntersectWithOneResultRange() throws Exception { + for (boolean addEmptyRange : new boolean[] { true, false }) { + doTestListIntersectWithOneResultRange(0, 200, 3, 1, 180, 2, addEmptyRange); + doTestListIntersectWithOneResultRange(1, 180, 2, 0, 200, 3, addEmptyRange); - doTestListIntersectWithOneResultRange(1,180,3,0,200,2,addEmptyRange); - doTestListIntersectWithOneResultRange(0,200,2,1,180,3,addEmptyRange); + doTestListIntersectWithOneResultRange(1, 180, 3, 0, 200, 2, addEmptyRange); + doTestListIntersectWithOneResultRange(0, 200, 2, 1, 180, 3, addEmptyRange); - doTestListIntersectWithOneResultRange(0, 200, 3, 1, 180, 100,addEmptyRange); - doTestListIntersectWithOneResultRange(1, 180, 100,0, 200, 3,addEmptyRange); + doTestListIntersectWithOneResultRange(0, 200, 3, 1, 180, 100, addEmptyRange); + doTestListIntersectWithOneResultRange(1, 180, 100, 0, 200, 3, addEmptyRange); - doTestListIntersectWithOneResultRange(1, 180, 3, 0, 200, 100,addEmptyRange); - doTestListIntersectWithOneResultRange(0, 200, 100,1, 180, 3,addEmptyRange); - } + doTestListIntersectWithOneResultRange(1, 180, 3, 0, 200, 100, addEmptyRange); + doTestListIntersectWithOneResultRange(0, 200, 100, 1, 180, 3, addEmptyRange); } + } - private void doTestListIntersectWithOneResultRange(int start1,int end1,int step1,int start2,int end2,int step2,boolean addEmptyRange) throws Exception { - List rowKeyRanges1=new ArrayList(); - List rowKeyRanges2=new ArrayList(); - for(int i=start1;i<=end1;i++) { - rowKeyRanges1.add( - PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(i), true, PInteger.INSTANCE.toBytes(i+step1), true, SortOrder.ASC)); - - } - if(addEmptyRange) { - rowKeyRanges1.add(KeyRange.EMPTY_RANGE); - } - for(int i=start2;i<=end2;i++) { - rowKeyRanges2.add( - PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(i), true, PInteger.INSTANCE.toBytes(i+step2), true, SortOrder.ASC)); - } - if(addEmptyRange) { - rowKeyRanges2.add(KeyRange.EMPTY_RANGE); - } - int maxStart=Math.max(start1, start2); - int minEnd=Math.min(end1+step1, end2+step2); - - List expected=Arrays.asList(KeyRange.getKeyRange( - PInteger.INSTANCE.toBytes(maxStart), - true, - PInteger.INSTANCE.toBytes(minEnd), - true)); - - listIntersectAndAssert(rowKeyRanges1,rowKeyRanges2,expected); - } - - @Test - public void testListIntersectWithMultiResultRange() throws Exception { - for(boolean addEmptyRange : new boolean[]{true,false}) { - doTestListIntersectWithMultiResultRange(1, 100, 3, 4, 120, 6,addEmptyRange); - doTestListIntersectWithMultiResultRange(4, 120, 6,1, 100, 3,addEmptyRange); - - doTestListIntersectWithMultiResultRange(1, 200, 3, 5, 240, 10,addEmptyRange); - doTestListIntersectWithMultiResultRange(5, 240, 10,1, 200, 3,addEmptyRange); - } + private void doTestListIntersectWithOneResultRange(int start1, int end1, int step1, int start2, + int end2, int step2, boolean addEmptyRange) throws Exception { + List rowKeyRanges1 = new ArrayList(); + List rowKeyRanges2 = new ArrayList(); + for (int i = start1; i <= end1; i++) { + rowKeyRanges1.add(PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(i), true, + PInteger.INSTANCE.toBytes(i + step1), true, SortOrder.ASC)); } - - private void doTestListIntersectWithMultiResultRange(int start1,int count1,int step1,int start2,int count2,int step2,boolean addEmptyRange) throws Exception { - List rowKeyRanges1=new ArrayList(); - List rowKeyRanges2=new ArrayList(); - for(int i=1;i<=count1;i++) { - rowKeyRanges1.add( - PInteger.INSTANCE.getKeyRange( - PInteger.INSTANCE.toBytes(start1+(i-1)*(step1+1)), - true, - PInteger.INSTANCE.toBytes(start1+i*(step1+1)-1), - true, SortOrder.ASC)); - - } - if(addEmptyRange) { - rowKeyRanges1.add(KeyRange.EMPTY_RANGE); - } - for(int i=1;i<=count2;i++) { - rowKeyRanges2.add( - PInteger.INSTANCE.getKeyRange( - PInteger.INSTANCE.toBytes(start2+(i-1)*(step2+1)), - true, - PInteger.INSTANCE.toBytes(start2+i*(step2+1)-1), - true, SortOrder.ASC)); - } - if(addEmptyRange) { - rowKeyRanges2.add(KeyRange.EMPTY_RANGE); - } - int maxStart=Math.max(start1, start2); - int minEnd=Math.min(start1+count1*(step1+1)-1, start2+count2*(step2+1)-1); - - for(int i=0;i<200;i++) { - List result=KeyRange.intersect(rowKeyRanges1, rowKeyRanges2); - assertResult(result, maxStart,minEnd); - result=KeyRange.intersect(rowKeyRanges2, rowKeyRanges1); - assertResult(result, maxStart,minEnd); - Collections.shuffle(rowKeyRanges1); - Collections.shuffle(rowKeyRanges2); - } + if (addEmptyRange) { + rowKeyRanges1.add(KeyRange.EMPTY_RANGE); } - - private void assertResult(List result,int start,int end) { - int expectStart=start; - for(KeyRange rowKeyRange : result) { - byte[] lowerRange=rowKeyRange.getLowerRange(); - assertTrue(Bytes.equals(lowerRange, PInteger.INSTANCE.toBytes(expectStart))); - byte[] upperRange=rowKeyRange.getUpperRange(); - expectStart=((Integer)PInteger.INSTANCE.toObject(upperRange)).intValue()+1; - } - assertTrue(expectStart-1==end); + for (int i = start2; i <= end2; i++) { + rowKeyRanges2.add(PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(i), true, + PInteger.INSTANCE.toBytes(i + step2), true, SortOrder.ASC)); } - - @Test - public void testListIntersectForPoint() throws Exception { - for(boolean addEmptyRange : new boolean[]{true,false}) { - List rowKeyRanges1=new ArrayList(); - List rowKeyRanges2=new ArrayList(); - for(int i=0;i<=300;i+=2) { - rowKeyRanges1.add( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(i))); - } - if(addEmptyRange) { - rowKeyRanges1.add(KeyRange.EMPTY_RANGE); - } - for(int i=0;i<=300;i+=3) { - rowKeyRanges2.add( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(i))); - } - if(addEmptyRange) { - rowKeyRanges2.add(KeyRange.EMPTY_RANGE); - } - - List expected=new ArrayList(); - for(int i=0;i<=300;i+=6) { - expected.add( - KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(i))); - } - listIntersectAndAssert(rowKeyRanges1,rowKeyRanges2,expected); - } - } - - @Test - public void testListIntersectForBoundary() throws Exception { - List rowKeyRanges1=Arrays.asList(KeyRange.EVERYTHING_RANGE); - List rowKeyRanges2=new ArrayList(); - for(int i=0;i<=100;) { - rowKeyRanges2.add( - PInteger.INSTANCE.getKeyRange( - PInteger.INSTANCE.toBytes(i), - true, - PInteger.INSTANCE.toBytes(i+2), - true, SortOrder.ASC)); - i+=4; - } - List expected=new ArrayList(rowKeyRanges2); - listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, expected); - - rowKeyRanges1=Arrays.asList(KeyRange.EMPTY_RANGE); - rowKeyRanges2=new ArrayList(expected); - listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, Arrays.asList(KeyRange.EMPTY_RANGE)); - - listIntersectAndAssert(Arrays.asList(KeyRange.EMPTY_RANGE),Arrays.asList(KeyRange.EVERYTHING_RANGE),Arrays.asList(KeyRange.EMPTY_RANGE)); - - rowKeyRanges1 = createKeyRangeList( - Arrays.asList(2, 5, 8, Integer.MAX_VALUE), - Arrays.asList(true, true, true, false)); - rowKeyRanges2 = createKeyRangeList( - Arrays.asList(Integer.MIN_VALUE, 4, 7, 10, 13, 14, 19, Integer.MAX_VALUE), - Arrays.asList(false, true, true, true, true, true, true, false)); - expected = createKeyRangeList( - Arrays.asList(2, 4, 8, 10, 13, 14, 19, Integer.MAX_VALUE), - Arrays.asList(true, true, true, true, true, true, true, false)); - listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, expected); - - rowKeyRanges1 = createKeyRangeList( - Arrays.asList(3, 5, 5, 6), - Arrays.asList(true, false, true, false)); - rowKeyRanges2 = createKeyRangeList( - Arrays.asList(3, 5, 6, 7), - Arrays.asList(true, true, true, true)); - expected = createKeyRangeList( - Arrays.asList(3, 5), - Arrays.asList(true, true)); - listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, expected); + if (addEmptyRange) { + rowKeyRanges2.add(KeyRange.EMPTY_RANGE); } + int maxStart = Math.max(start1, start2); + int minEnd = Math.min(end1 + step1, end2 + step2); - @Test - public void testKeyRangeCompareUpperRange() throws Exception { - List rowKeyRanges1 = createKeyRangeListWithFixedLowerRange( - Arrays.asList(Integer.MAX_VALUE, Integer.MAX_VALUE, 10000, 1001, 1000, 1000, 1000, 1000, 1000), - Arrays.asList(false, false, true, true, true, true, false, true, false)); - List rowKeyRanges2 = createKeyRangeListWithFixedLowerRange( - Arrays.asList(Integer.MAX_VALUE, 10000, Integer.MAX_VALUE, 1000, 1001, 1000, 1000, 1000, 1000), - Arrays.asList(false, false, false, true, true, true, false, false, true)); - List expectedResults = Arrays.asList(0, 1, -1, 1, -1, 0, 0, 1, -1); - assertEquals(rowKeyRanges1.size(), rowKeyRanges2.size()); - assertEquals(rowKeyRanges1.size(), expectedResults.size()); - - for (int i = 0; i < expectedResults.size(); i++) { - int compareResult = KeyRange.compareUpperRange(rowKeyRanges1.get(i), rowKeyRanges2.get(i)); - assertEquals(expectedResults.get(i).intValue(), compareResult); - } - } + List expected = + Arrays.asList(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(maxStart), true, + PInteger.INSTANCE.toBytes(minEnd), true)); - private static List createKeyRangeListWithFixedLowerRange(List keys, List boundaryConditions) { - assertEquals(keys.size(), boundaryConditions.size()); - List newKeys = Lists.newArrayListWithCapacity(keys.size() * 2); - List newBoundaryConditions = Lists.newArrayListWithCapacity(boundaryConditions.size() * 2); + listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, expected); + } - for (int i = 0; i < keys.size(); i++) { - newKeys.add(0); - newBoundaryConditions.add(true); - newKeys.add(keys.get(i)); - newBoundaryConditions.add(boundaryConditions.get(i)); - } + @Test + public void testListIntersectWithMultiResultRange() throws Exception { + for (boolean addEmptyRange : new boolean[] { true, false }) { + doTestListIntersectWithMultiResultRange(1, 100, 3, 4, 120, 6, addEmptyRange); + doTestListIntersectWithMultiResultRange(4, 120, 6, 1, 100, 3, addEmptyRange); - return createKeyRangeList(newKeys, newBoundaryConditions); + doTestListIntersectWithMultiResultRange(1, 200, 3, 5, 240, 10, addEmptyRange); + doTestListIntersectWithMultiResultRange(5, 240, 10, 1, 200, 3, addEmptyRange); } - private static List createKeyRangeList(List keys, List boundaryConditions) { - assertEquals(keys.size(), boundaryConditions.size()); - assertTrue(keys.size() % 2 == 0); + } - int size = keys.size() / 2; - List keyRangeList = Lists.newArrayListWithCapacity(size); + private void doTestListIntersectWithMultiResultRange(int start1, int count1, int step1, + int start2, int count2, int step2, boolean addEmptyRange) throws Exception { + List rowKeyRanges1 = new ArrayList(); + List rowKeyRanges2 = new ArrayList(); + for (int i = 1; i <= count1; i++) { + rowKeyRanges1.add( + PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(start1 + (i - 1) * (step1 + 1)), + true, PInteger.INSTANCE.toBytes(start1 + i * (step1 + 1) - 1), true, SortOrder.ASC)); - for (int i = 0; i < size; i++) { - byte[] startKey = keys.get(2*i).equals(Integer.MIN_VALUE) ? KeyRange.UNBOUND : PInteger.INSTANCE.toBytes(keys.get(2*i)); - byte[] endKey = keys.get(2*i + 1).equals(Integer.MAX_VALUE) ? KeyRange.UNBOUND : PInteger.INSTANCE.toBytes(keys.get(2*i + 1)); - keyRangeList.add(PInteger.INSTANCE.getKeyRange(startKey, boundaryConditions.get(2*i), endKey, boundaryConditions.get(2*i+1), SortOrder.ASC)); - } + } + if (addEmptyRange) { + rowKeyRanges1.add(KeyRange.EMPTY_RANGE); + } + for (int i = 1; i <= count2; i++) { + rowKeyRanges2.add( + PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(start2 + (i - 1) * (step2 + 1)), + true, PInteger.INSTANCE.toBytes(start2 + i * (step2 + 1) - 1), true, SortOrder.ASC)); + } + if (addEmptyRange) { + rowKeyRanges2.add(KeyRange.EMPTY_RANGE); + } + int maxStart = Math.max(start1, start2); + int minEnd = Math.min(start1 + count1 * (step1 + 1) - 1, start2 + count2 * (step2 + 1) - 1); + + for (int i = 0; i < 200; i++) { + List result = KeyRange.intersect(rowKeyRanges1, rowKeyRanges2); + assertResult(result, maxStart, minEnd); + result = KeyRange.intersect(rowKeyRanges2, rowKeyRanges1); + assertResult(result, maxStart, minEnd); + Collections.shuffle(rowKeyRanges1); + Collections.shuffle(rowKeyRanges2); + } + } + + private void assertResult(List result, int start, int end) { + int expectStart = start; + for (KeyRange rowKeyRange : result) { + byte[] lowerRange = rowKeyRange.getLowerRange(); + assertTrue(Bytes.equals(lowerRange, PInteger.INSTANCE.toBytes(expectStart))); + byte[] upperRange = rowKeyRange.getUpperRange(); + expectStart = ((Integer) PInteger.INSTANCE.toObject(upperRange)).intValue() + 1; + } + assertTrue(expectStart - 1 == end); + } + + @Test + public void testListIntersectForPoint() throws Exception { + for (boolean addEmptyRange : new boolean[] { true, false }) { + List rowKeyRanges1 = new ArrayList(); + List rowKeyRanges2 = new ArrayList(); + for (int i = 0; i <= 300; i += 2) { + rowKeyRanges1.add(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(i))); + } + if (addEmptyRange) { + rowKeyRanges1.add(KeyRange.EMPTY_RANGE); + } + for (int i = 0; i <= 300; i += 3) { + rowKeyRanges2.add(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(i))); + } + if (addEmptyRange) { + rowKeyRanges2.add(KeyRange.EMPTY_RANGE); + } + + List expected = new ArrayList(); + for (int i = 0; i <= 300; i += 6) { + expected.add(KeyRange.getKeyRange(PInteger.INSTANCE.toBytes(i))); + } + listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, expected); + } + } + + @Test + public void testListIntersectForBoundary() throws Exception { + List rowKeyRanges1 = Arrays.asList(KeyRange.EVERYTHING_RANGE); + List rowKeyRanges2 = new ArrayList(); + for (int i = 0; i <= 100;) { + rowKeyRanges2.add(PInteger.INSTANCE.getKeyRange(PInteger.INSTANCE.toBytes(i), true, + PInteger.INSTANCE.toBytes(i + 2), true, SortOrder.ASC)); + i += 4; + } + List expected = new ArrayList(rowKeyRanges2); + listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, expected); + + rowKeyRanges1 = Arrays.asList(KeyRange.EMPTY_RANGE); + rowKeyRanges2 = new ArrayList(expected); + listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, Arrays.asList(KeyRange.EMPTY_RANGE)); + + listIntersectAndAssert(Arrays.asList(KeyRange.EMPTY_RANGE), + Arrays.asList(KeyRange.EVERYTHING_RANGE), Arrays.asList(KeyRange.EMPTY_RANGE)); + + rowKeyRanges1 = createKeyRangeList(Arrays.asList(2, 5, 8, Integer.MAX_VALUE), + Arrays.asList(true, true, true, false)); + rowKeyRanges2 = + createKeyRangeList(Arrays.asList(Integer.MIN_VALUE, 4, 7, 10, 13, 14, 19, Integer.MAX_VALUE), + Arrays.asList(false, true, true, true, true, true, true, false)); + expected = createKeyRangeList(Arrays.asList(2, 4, 8, 10, 13, 14, 19, Integer.MAX_VALUE), + Arrays.asList(true, true, true, true, true, true, true, false)); + listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, expected); + + rowKeyRanges1 = + createKeyRangeList(Arrays.asList(3, 5, 5, 6), Arrays.asList(true, false, true, false)); + rowKeyRanges2 = + createKeyRangeList(Arrays.asList(3, 5, 6, 7), Arrays.asList(true, true, true, true)); + expected = createKeyRangeList(Arrays.asList(3, 5), Arrays.asList(true, true)); + listIntersectAndAssert(rowKeyRanges1, rowKeyRanges2, expected); + } + + @Test + public void testKeyRangeCompareUpperRange() throws Exception { + List rowKeyRanges1 = createKeyRangeListWithFixedLowerRange(Arrays + .asList(Integer.MAX_VALUE, Integer.MAX_VALUE, 10000, 1001, 1000, 1000, 1000, 1000, 1000), + Arrays.asList(false, false, true, true, true, true, false, true, false)); + List rowKeyRanges2 = createKeyRangeListWithFixedLowerRange(Arrays + .asList(Integer.MAX_VALUE, 10000, Integer.MAX_VALUE, 1000, 1001, 1000, 1000, 1000, 1000), + Arrays.asList(false, false, false, true, true, true, false, false, true)); + List expectedResults = Arrays.asList(0, 1, -1, 1, -1, 0, 0, 1, -1); + assertEquals(rowKeyRanges1.size(), rowKeyRanges2.size()); + assertEquals(rowKeyRanges1.size(), expectedResults.size()); + + for (int i = 0; i < expectedResults.size(); i++) { + int compareResult = KeyRange.compareUpperRange(rowKeyRanges1.get(i), rowKeyRanges2.get(i)); + assertEquals(expectedResults.get(i).intValue(), compareResult); + } + } + + private static List createKeyRangeListWithFixedLowerRange(List keys, + List boundaryConditions) { + assertEquals(keys.size(), boundaryConditions.size()); + List newKeys = Lists.newArrayListWithCapacity(keys.size() * 2); + List newBoundaryConditions = + Lists.newArrayListWithCapacity(boundaryConditions.size() * 2); + + for (int i = 0; i < keys.size(); i++) { + newKeys.add(0); + newBoundaryConditions.add(true); + newKeys.add(keys.get(i)); + newBoundaryConditions.add(boundaryConditions.get(i)); + } - return keyRangeList; + return createKeyRangeList(newKeys, newBoundaryConditions); + } + + private static List createKeyRangeList(List keys, + List boundaryConditions) { + assertEquals(keys.size(), boundaryConditions.size()); + assertTrue(keys.size() % 2 == 0); + + int size = keys.size() / 2; + List keyRangeList = Lists.newArrayListWithCapacity(size); + + for (int i = 0; i < size; i++) { + byte[] startKey = keys.get(2 * i).equals(Integer.MIN_VALUE) + ? KeyRange.UNBOUND + : PInteger.INSTANCE.toBytes(keys.get(2 * i)); + byte[] endKey = keys.get(2 * i + 1).equals(Integer.MAX_VALUE) + ? KeyRange.UNBOUND + : PInteger.INSTANCE.toBytes(keys.get(2 * i + 1)); + keyRangeList.add(PInteger.INSTANCE.getKeyRange(startKey, boundaryConditions.get(2 * i), + endKey, boundaryConditions.get(2 * i + 1), SortOrder.ASC)); } - private static void listIntersectAndAssert(List rowKeyRanges1,List rowKeyRanges2,List expected) { - for (int i = 0; i < 200; i++) { - List result = KeyRange.intersect(rowKeyRanges1, rowKeyRanges2); - assertEquals(expected, result); - result = KeyRange.intersect(rowKeyRanges2, rowKeyRanges1); - assertEquals(expected, result); - Collections.shuffle(rowKeyRanges1); - Collections.shuffle(rowKeyRanges2); - } + return keyRangeList; + } + + private static void listIntersectAndAssert(List rowKeyRanges1, + List rowKeyRanges2, List expected) { + for (int i = 0; i < 200; i++) { + List result = KeyRange.intersect(rowKeyRanges1, rowKeyRanges2); + assertEquals(expected, result); + result = KeyRange.intersect(rowKeyRanges2, rowKeyRanges1); + assertEquals(expected, result); + Collections.shuffle(rowKeyRanges1); + Collections.shuffle(rowKeyRanges2); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeUnionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeUnionTest.java index 174843fed06..79bdc06859d 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeUnionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/KeyRangeUnionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.query; +import static org.apache.hadoop.hbase.util.Bytes.toBytes; import static org.apache.phoenix.query.KeyRange.EMPTY_RANGE; import static org.apache.phoenix.query.KeyRange.EVERYTHING_RANGE; -import static org.apache.hadoop.hbase.util.Bytes.toBytes; import java.util.Arrays; import java.util.Collection; -import junit.framework.TestCase; - import org.apache.phoenix.schema.SortOrder; import org.apache.phoenix.schema.types.PChar; import org.junit.Test; @@ -34,64 +31,46 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; +import junit.framework.TestCase; + @RunWith(Parameterized.class) public class KeyRangeUnionTest extends TestCase { - private final KeyRange a, b, union; + private final KeyRange a, b, union; + + public KeyRangeUnionTest(KeyRange a, KeyRange b, KeyRange union) { + this.a = a; + this.b = b; + this.union = union; + } - public KeyRangeUnionTest(KeyRange a, KeyRange b, KeyRange union) { - this.a = a; - this.b = b; - this.union = union; - } + @Parameters(name = "union of {0} and {1} is {2}") + public static synchronized Collection data() { + return Arrays.asList(new Object[][] { + { PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("F"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("F"), true, SortOrder.ASC) }, + { PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("E"), false, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("F"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("F"), true, SortOrder.ASC) }, + { PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("E"), false, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("E"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("E"), true, SortOrder.ASC) }, + { PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("E"), false, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC), + PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC) }, + { PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), false, SortOrder.ASC), + EMPTY_RANGE, + PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), false, SortOrder.ASC) }, + { EVERYTHING_RANGE, + PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), + EVERYTHING_RANGE, }, + { EVERYTHING_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE, }, + { EMPTY_RANGE, EVERYTHING_RANGE, EVERYTHING_RANGE, }, }); + } - @Parameters(name="union of {0} and {1} is {2}") - public static synchronized Collection data() { - return Arrays.asList(new Object[][] { - { - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("F"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("F"), true, SortOrder.ASC) - }, - { - PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("E"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("F"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("F"), true, SortOrder.ASC) - }, - { - PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("E"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("D"), true, toBytes("E"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("E"), true, SortOrder.ASC) - }, - { - PChar.INSTANCE.getKeyRange(toBytes("C"), false, toBytes("E"), false, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC), - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), true, SortOrder.ASC) - }, - { - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), false, SortOrder.ASC), - EMPTY_RANGE, - PChar.INSTANCE.getKeyRange(toBytes("C"), true, toBytes("E"), false, SortOrder.ASC) - }, - { - EVERYTHING_RANGE, - PChar.INSTANCE.getKeyRange(toBytes("E"), false, toBytes("F"), true, SortOrder.ASC), - EVERYTHING_RANGE, - }, - { - EVERYTHING_RANGE, - EVERYTHING_RANGE, - EVERYTHING_RANGE, - }, - { - EMPTY_RANGE, - EVERYTHING_RANGE, - EVERYTHING_RANGE, - }, - }); - } - @Test - public void union() { - assertEquals(union, a.union(b)); - assertEquals(union, b.union(a)); - } + @Test + public void union() { + assertEquals(union, a.union(b)); + assertEquals(union, b.union(a)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java index c200c40d811..dcdec78b712 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -36,67 +36,78 @@ import org.junit.Test; public class OrderByTest extends BaseConnectionlessQueryTest { - @Test - public void testSortOrderForSingleDescVarLengthCol() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k VARCHAR PRIMARY KEY DESC)"); - conn.createStatement().execute("UPSERT INTO t VALUES ('a')"); - conn.createStatement().execute("UPSERT INTO t VALUES ('ab')"); + @Test + public void testSortOrderForSingleDescVarLengthCol() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute("CREATE TABLE t (k VARCHAR PRIMARY KEY DESC)"); + conn.createStatement().execute("UPSERT INTO t VALUES ('a')"); + conn.createStatement().execute("UPSERT INTO t VALUES ('ab')"); - Iterator>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn); - List kvs = dataIterator.next().getSecond(); - Collections.sort(kvs, CellComparatorImpl.COMPARATOR); - Cell first = kvs.get(0); - assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), first.getRowLength()-1))); - Cell second = kvs.get(1); - assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), second.getRowLength()-1))); - } + Iterator>> dataIterator = + PhoenixRuntime.getUncommittedDataIterator(conn); + List kvs = dataIterator.next().getSecond(); + Collections.sort(kvs, CellComparatorImpl.COMPARATOR); + Cell first = kvs.get(0); + assertEquals("ab", Bytes.toString( + SortOrder.invert(first.getRowArray(), first.getRowOffset(), first.getRowLength() - 1))); + Cell second = kvs.get(1); + assertEquals("a", Bytes.toString( + SortOrder.invert(second.getRowArray(), second.getRowOffset(), second.getRowLength() - 1))); + } - @Test - public void testSortOrderForLeadingDescVarLengthColWithNullFollowing() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR, k2 VARCHAR, CONSTRAINT pk PRIMARY KEY (k1 DESC,k2))"); - conn.createStatement().execute("UPSERT INTO t VALUES ('a')"); - conn.createStatement().execute("UPSERT INTO t VALUES ('ab')"); + @Test + public void testSortOrderForLeadingDescVarLengthColWithNullFollowing() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement() + .execute("CREATE TABLE t (k1 VARCHAR, k2 VARCHAR, CONSTRAINT pk PRIMARY KEY (k1 DESC,k2))"); + conn.createStatement().execute("UPSERT INTO t VALUES ('a')"); + conn.createStatement().execute("UPSERT INTO t VALUES ('ab')"); - Iterator>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn); - List kvs = dataIterator.next().getSecond(); - Collections.sort(kvs, CellComparatorImpl.COMPARATOR); - Cell first = kvs.get(0); - assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), first.getRowLength()-1))); - Cell second = kvs.get(1); - assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), second.getRowLength()-1))); - } + Iterator>> dataIterator = + PhoenixRuntime.getUncommittedDataIterator(conn); + List kvs = dataIterator.next().getSecond(); + Collections.sort(kvs, CellComparatorImpl.COMPARATOR); + Cell first = kvs.get(0); + assertEquals("ab", Bytes.toString( + SortOrder.invert(first.getRowArray(), first.getRowOffset(), first.getRowLength() - 1))); + Cell second = kvs.get(1); + assertEquals("a", Bytes.toString( + SortOrder.invert(second.getRowArray(), second.getRowOffset(), second.getRowLength() - 1))); + } - @Test - public void testSortOrderForLeadingDescVarLengthColWithNonNullFollowing() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR, k2 VARCHAR NOT NULL, CONSTRAINT pk PRIMARY KEY (k1 DESC,k2))"); - conn.createStatement().execute("UPSERT INTO t VALUES ('a','x')"); - conn.createStatement().execute("UPSERT INTO t VALUES ('ab', 'x')"); + @Test + public void testSortOrderForLeadingDescVarLengthColWithNonNullFollowing() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE t (k1 VARCHAR, k2 VARCHAR NOT NULL, CONSTRAINT pk PRIMARY KEY (k1 DESC,k2))"); + conn.createStatement().execute("UPSERT INTO t VALUES ('a','x')"); + conn.createStatement().execute("UPSERT INTO t VALUES ('ab', 'x')"); - Iterator>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn); - List kvs = dataIterator.next().getSecond(); - Collections.sort(kvs, CellComparatorImpl.COMPARATOR); - Cell first = kvs.get(0); - assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), 2))); - Cell second = kvs.get(1); - assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), 1))); - } + Iterator>> dataIterator = + PhoenixRuntime.getUncommittedDataIterator(conn); + List kvs = dataIterator.next().getSecond(); + Collections.sort(kvs, CellComparatorImpl.COMPARATOR); + Cell first = kvs.get(0); + assertEquals("ab", + Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), 2))); + Cell second = kvs.get(1); + assertEquals("a", + Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), 1))); + } - @Test - public void testSortOrderForSingleDescTimestampCol() throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE t (k TIMESTAMP PRIMARY KEY DESC)"); - conn.createStatement().execute("UPSERT INTO t VALUES ('2016-01-04 13:11:51.631')"); + @Test + public void testSortOrderForSingleDescTimestampCol() throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute("CREATE TABLE t (k TIMESTAMP PRIMARY KEY DESC)"); + conn.createStatement().execute("UPSERT INTO t VALUES ('2016-01-04 13:11:51.631')"); - Iterator>> dataIterator = PhoenixRuntime - .getUncommittedDataIterator(conn); - List kvs = dataIterator.next().getSecond(); - Collections.sort(kvs, CellComparatorImpl.COMPARATOR); - Cell first = kvs.get(0); - long millisDeserialized = PDate.INSTANCE.getCodec().decodeLong(first.getRowArray(), - first.getRowOffset(), SortOrder.DESC); - assertEquals(1451913111631L, millisDeserialized); + Iterator>> dataIterator = + PhoenixRuntime.getUncommittedDataIterator(conn); + List kvs = dataIterator.next().getSecond(); + Collections.sort(kvs, CellComparatorImpl.COMPARATOR); + Cell first = kvs.get(0); + long millisDeserialized = PDate.INSTANCE.getCodec().decodeLong(first.getRowArray(), + first.getRowOffset(), SortOrder.DESC); + assertEquals(1451913111631L, millisDeserialized); } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ParallelIteratorsSplitTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ParallelIteratorsSplitTest.java index b6e367a77b4..435d6709fe2 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/ParallelIteratorsSplitTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ParallelIteratorsSplitTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -68,6 +68,9 @@ import org.apache.phoenix.schema.TableRef; import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.PropertiesUtil; import org.junit.Test; @@ -75,442 +78,402 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - @RunWith(Parameterized.class) public class ParallelIteratorsSplitTest extends BaseConnectionlessQueryTest { - private static final String TABLE_NAME = "TEST_SKIP_RANGE_PARALLEL_ITERATOR"; - private static final String DDL = "CREATE TABLE " + TABLE_NAME + " (id char(3) NOT NULL PRIMARY KEY, \"value\" integer)"; - private static final byte[] Ka1A = Bytes.toBytes("a1A"); - private static final byte[] Ka1B = Bytes.toBytes("a1B"); - private static final byte[] Ka1E = Bytes.toBytes("a1E"); - private static final byte[] Ka1G = Bytes.toBytes("a1G"); - private static final byte[] Ka1I = Bytes.toBytes("a1I"); - private static final byte[] Ka2A = Bytes.toBytes("a2A"); - - private final Scan scan; - private final ScanRanges scanRanges; - private final List expectedSplits; - - public ParallelIteratorsSplitTest(Scan scan, ScanRanges scanRanges, List expectedSplits) { - this.scan = scan; - this.scanRanges = scanRanges; - this.expectedSplits = expectedSplits; + private static final String TABLE_NAME = "TEST_SKIP_RANGE_PARALLEL_ITERATOR"; + private static final String DDL = + "CREATE TABLE " + TABLE_NAME + " (id char(3) NOT NULL PRIMARY KEY, \"value\" integer)"; + private static final byte[] Ka1A = Bytes.toBytes("a1A"); + private static final byte[] Ka1B = Bytes.toBytes("a1B"); + private static final byte[] Ka1E = Bytes.toBytes("a1E"); + private static final byte[] Ka1G = Bytes.toBytes("a1G"); + private static final byte[] Ka1I = Bytes.toBytes("a1I"); + private static final byte[] Ka2A = Bytes.toBytes("a2A"); + + private final Scan scan; + private final ScanRanges scanRanges; + private final List expectedSplits; + + public ParallelIteratorsSplitTest(Scan scan, ScanRanges scanRanges, + List expectedSplits) { + this.scan = scan; + this.scanRanges = scanRanges; + this.expectedSplits = expectedSplits; + } + + @Test + public void testGetSplitsWithSkipScanFilter() throws Exception { + byte[][] splits = new byte[][] { Ka1A, Ka1B, Ka1E, Ka1G, Ka1I, Ka2A }; + createTestTable(getUrl(), DDL, splits, null); + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + Connection conn = DriverManager.getConnection(getUrl(), props); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + + PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TABLE_NAME)); + TableRef tableRef = new TableRef(table); + List regions = pconn.getQueryServices() + .getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes(), 60000); + List ranges = getSplits(tableRef, scan, regions, scanRanges); + assertEquals("Unexpected number of splits: " + ranges.size(), expectedSplits.size(), + ranges.size()); + for (int i = 0; i < expectedSplits.size(); i++) { + assertEquals(expectedSplits.get(i), ranges.get(i)); } - - @Test - public void testGetSplitsWithSkipScanFilter() throws Exception { - byte[][] splits = new byte[][] {Ka1A, Ka1B, Ka1E, Ka1G, Ka1I, Ka2A}; - createTestTable(getUrl(),DDL,splits, null); - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - - PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TABLE_NAME)); - TableRef tableRef = new TableRef(table); - List regions = - pconn.getQueryServices() - .getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes(), - 60000); - List ranges = getSplits(tableRef, scan, regions, scanRanges); - assertEquals("Unexpected number of splits: " + ranges.size(), expectedSplits.size(), ranges.size()); - for (int i=0; i data() { + List testCases = Lists.newArrayList(); + // Scan range is empty. + testCases.addAll(foreach(ScanRanges.NOTHING, new int[] { 1, 1, 1 }, new KeyRange[] {})); + // Scan range is everything. + testCases.addAll(foreach(ScanRanges.EVERYTHING, new int[] { 1, 1, 1 }, + new KeyRange[] { getKeyRange(KeyRange.UNBOUND, true, Ka1A, false), + getKeyRange(Ka1A, true, Ka1B, false), getKeyRange(Ka1B, true, Ka1E, false), + getKeyRange(Ka1E, true, Ka1G, false), getKeyRange(Ka1G, true, Ka1I, false), + getKeyRange(Ka1I, true, Ka2A, false), getKeyRange(Ka2A, true, KeyRange.UNBOUND, false) })); + // Scan range lies inside first region. + testCases.addAll(foreach( + new KeyRange[][] { { getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) }, + { getKeyRange(Bytes.toBytes("0"), true, Bytes.toBytes("0"), true) }, + { getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("Z"), true) } }, + new int[] { 1, 1, 1 }, new KeyRange[] { getKeyRange("a0A", true, nextKey("a0Z"), false) })); + // Scan range lies in between first and second, intersecting bound on second. + testCases.addAll(foreach( + new KeyRange[][] { { getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) }, + { getKeyRange(Bytes.toBytes("0"), true, Bytes.toBytes("0"), true), + getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true) }, + { getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true) } }, + new int[] { 1, 1, 1 }, new KeyRange[] { getKeyRange("a0A", true, Ka1A, false), + getKeyRange(Ka1A, true, Ka1B, false), })); + // Scan range spans third, split into 3 due to concurrency config. + testCases.addAll(foreach( + new KeyRange[][] { { getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) }, + { getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true) }, + { getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("E"), false) } }, + new int[] { 1, 1, 1 }, new KeyRange[] { getKeyRange(Ka1B, true, Ka1E, false) })); + // Scan range spans third, split into 3 due to concurrency config. + testCases.addAll(foreach( + new KeyRange[][] { { getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) }, + { getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true) }, + { getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("E"), false) } }, + new int[] { 1, 1, 1 }, new KeyRange[] { getKeyRange(Ka1B, true, Ka1E, false), })); + // Scan range spans 2 ranges, split into 4 due to concurrency config. + testCases.addAll(foreach( + new KeyRange[][] { { getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) }, + { getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true) }, + { getKeyRange(Bytes.toBytes("F"), true, Bytes.toBytes("H"), false) } }, + new int[] { 1, 1, 1 }, new KeyRange[] { getKeyRange("a1F", true, Ka1G, false), + getKeyRange(Ka1G, true, "a1H", false), })); + // Scan range spans more than 3 range, no split. + testCases.addAll(foreach( + new KeyRange[][] { + { getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true), + getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true) }, + { getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true), + getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true), }, + { getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true), + getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true), + getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("G"), true) } }, + new int[] { 1, 1, 1 }, + new KeyRange[] { getKeyRange(Ka1A, true, Ka1B, false), getKeyRange(Ka1B, true, Ka1E, false), + getKeyRange(Ka1G, true, Ka1I, false), getKeyRange(Ka2A, true, nextKey("b2G"), false) })); + return testCases; + } + + private static RowKeySchema buildSchema(int[] widths) { + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10); + for (final int width : widths) { + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; } - } - private static KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, boolean upperInclusive) { - return PChar.INSTANCE.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive, SortOrder.ASC); - } - - private static KeyRange getKeyRange(String lowerRange, boolean lowerInclusive, String upperRange, boolean upperInclusive) { - return PChar.INSTANCE.getKeyRange(Bytes.toBytes(lowerRange), lowerInclusive, Bytes.toBytes(upperRange), upperInclusive, SortOrder.ASC); - } - - private static KeyRange getKeyRange(String lowerRange, boolean lowerInclusive, byte[] upperRange, boolean upperInclusive) { - return PChar.INSTANCE.getKeyRange(Bytes.toBytes(lowerRange), lowerInclusive, upperRange, upperInclusive, SortOrder.ASC); - } - - private static KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, String upperRange, boolean upperInclusive) { - return PChar.INSTANCE.getKeyRange(lowerRange, lowerInclusive, Bytes.toBytes(upperRange), upperInclusive, SortOrder.ASC); - } - - private static String nextKey(String s) { - return Bytes.toString(ByteUtil.nextKey(Bytes.toBytes(s))); - } + @Override + public PDataType getDataType() { + return PChar.INSTANCE; + } - @Parameters(name="{1} {2}") - public static synchronized Collection data() { - List testCases = Lists.newArrayList(); - // Scan range is empty. - testCases.addAll( - foreach(ScanRanges.NOTHING, - new int[] {1,1,1}, - new KeyRange[] { })); - // Scan range is everything. - testCases.addAll( - foreach(ScanRanges.EVERYTHING, - new int[] {1,1,1}, - new KeyRange[] { - getKeyRange(KeyRange.UNBOUND, true, Ka1A, false), - getKeyRange(Ka1A, true, Ka1B, false), - getKeyRange(Ka1B, true, Ka1E, false), - getKeyRange(Ka1E, true, Ka1G, false), - getKeyRange(Ka1G, true, Ka1I, false), - getKeyRange(Ka1I, true, Ka2A, false), - getKeyRange(Ka2A, true, KeyRange.UNBOUND, false) - })); - // Scan range lies inside first region. - testCases.addAll( - foreach(new KeyRange[][]{ - { - getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) - },{ - getKeyRange(Bytes.toBytes("0"), true, Bytes.toBytes("0"), true) - },{ - getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("Z"), true) - }}, - new int[] {1,1,1}, - new KeyRange[] { - getKeyRange("a0A", true, nextKey("a0Z"), false) - })); - // Scan range lies in between first and second, intersecting bound on second. - testCases.addAll( - foreach(new KeyRange[][]{ - { - getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) - },{ - getKeyRange(Bytes.toBytes("0"), true, Bytes.toBytes("0"), true), - getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true) - },{ - getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true) - }}, - new int[] {1,1,1}, - new KeyRange[] { - getKeyRange("a0A", true, Ka1A, false), - getKeyRange(Ka1A, true, Ka1B, false), - })); - // Scan range spans third, split into 3 due to concurrency config. - testCases.addAll( - foreach(new KeyRange[][]{ - { - getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) - },{ - getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true) - },{ - getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("E"), false) - }}, - new int[] {1,1,1}, - new KeyRange[] { - getKeyRange(Ka1B, true, Ka1E, false) - })); - // Scan range spans third, split into 3 due to concurrency config. - testCases.addAll( - foreach(new KeyRange[][]{ - { - getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) - },{ - getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true) - },{ - getKeyRange(Bytes.toBytes("B"), true, Bytes.toBytes("E"), false) - }}, - new int[] {1,1,1}, - new KeyRange[] { - getKeyRange(Ka1B, true, Ka1E, false), - })); - // Scan range spans 2 ranges, split into 4 due to concurrency config. - testCases.addAll( - foreach(new KeyRange[][]{ - { - getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true) - },{ - getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true) - },{ - getKeyRange(Bytes.toBytes("F"), true, Bytes.toBytes("H"), false) - }}, - new int[] {1,1,1}, - new KeyRange[] { - getKeyRange("a1F", true, Ka1G, false), - getKeyRange(Ka1G, true, "a1H", false), - })); - // Scan range spans more than 3 range, no split. - testCases.addAll( - foreach(new KeyRange[][]{ - { - getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true), - getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true) - },{ - getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true), - getKeyRange(Bytes.toBytes("2"), true, Bytes.toBytes("2"), true), - },{ - getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true), - getKeyRange(Bytes.toBytes("C"), true, Bytes.toBytes("D"), true), - getKeyRange(Bytes.toBytes("G"), true, Bytes.toBytes("G"), true) - }}, - new int[] {1,1,1}, - new KeyRange[] { - getKeyRange(Ka1A, true, Ka1B, false), - getKeyRange(Ka1B, true, Ka1E, false), - getKeyRange(Ka1G, true, Ka1I, false), - getKeyRange(Ka2A, true, nextKey("b2G"), false) - })); - return testCases; - } + @Override + public Integer getMaxLength() { + return width; + } - private static RowKeySchema buildSchema(int[] widths) { - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10); - for (final int width : widths) { - builder.addField(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - @Override - public PDataType getDataType() { - return PChar.INSTANCE; - } - @Override - public Integer getMaxLength() { - return width; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); + @Override + public Integer getScale() { + return null; } - return builder.build(); - } - - private static Collection foreach(ScanRanges scanRanges, int[] widths, KeyRange[] expectedSplits) { - SkipScanFilter filter = new SkipScanFilter(scanRanges.getRanges(), buildSchema(widths), false); - Scan scan = new Scan().setFilter(filter).withStartRow(KeyRange.UNBOUND).withStopRow(KeyRange.UNBOUND, true); - List ret = Lists.newArrayList(); - ret.add(new Object[] {scan, scanRanges, Arrays.asList(expectedSplits)}); - return ret; - } - private static Collection foreach(KeyRange[][] ranges, int[] widths, KeyRange[] expectedSplits) { - RowKeySchema schema = buildSchema(widths); - List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); - SkipScanFilter filter = new SkipScanFilter(slots, schema, false); - // Always set start and stop key to max to verify we are using the information in skipscan - // filter over the scan's KMIN and KMAX. - Scan scan = new Scan().setFilter(filter); - ScanRanges scanRanges = ScanRanges.createSingleSpan(schema, slots); - List ret = Lists.newArrayList(); - ret.add(new Object[] {scan, scanRanges, Arrays.asList(expectedSplits)}); - return ret; + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); } - - private static final Function> ARRAY_TO_LIST = - new Function>() { - @Override - public List apply(KeyRange[] input) { - return Lists.newArrayList(input); - } + return builder.build(); + } + + private static Collection foreach(ScanRanges scanRanges, int[] widths, + KeyRange[] expectedSplits) { + SkipScanFilter filter = new SkipScanFilter(scanRanges.getRanges(), buildSchema(widths), false); + Scan scan = new Scan().setFilter(filter).withStartRow(KeyRange.UNBOUND) + .withStopRow(KeyRange.UNBOUND, true); + List ret = Lists.newArrayList(); + ret.add(new Object[] { scan, scanRanges, Arrays. asList(expectedSplits) }); + return ret; + } + + private static Collection foreach(KeyRange[][] ranges, int[] widths, + KeyRange[] expectedSplits) { + RowKeySchema schema = buildSchema(widths); + List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); + SkipScanFilter filter = new SkipScanFilter(slots, schema, false); + // Always set start and stop key to max to verify we are using the information in skipscan + // filter over the scan's KMIN and KMAX. + Scan scan = new Scan().setFilter(filter); + ScanRanges scanRanges = ScanRanges.createSingleSpan(schema, slots); + List ret = Lists.newArrayList(); + ret.add(new Object[] { scan, scanRanges, Arrays. asList(expectedSplits) }); + return ret; + } + + private static final Function> ARRAY_TO_LIST = + new Function>() { + @Override + public List apply(KeyRange[] input) { + return Lists.newArrayList(input); + } }; - private static List getSplits(final TableRef tableRef, final Scan scan, final List regions, - final ScanRanges scanRanges) throws SQLException { - final List tableRefs = Collections.singletonList(tableRef); - ColumnResolver resolver = new ColumnResolver() { - - @Override - public List getFunctions() { - return Collections.emptyList(); - } - - @Override - public List getTables() { - return tableRefs; - } - - @Override - public TableRef resolveTable(String schemaName, String tableName) - throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public PFunction resolveFunction(String functionName) throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean hasUDFs() { - return false; - } - - @Override - public PSchema resolveSchema(String schemaName) throws SQLException { - return null; - } - - @Override - public List getSchemas() { - return null; - } - }; - PhoenixConnection connection = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - final PhoenixStatement statement = new PhoenixStatement(connection); - final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement)); - context.setScanRanges(scanRanges); - ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() { - private final Set tableRefs = ImmutableSet.of(tableRef); - - @Override - public StatementContext getContext() { - return context; - } - - @Override - public ParameterMetaData getParameterMetaData() { - return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; - } - - @Override - public ExplainPlan getExplainPlan() throws SQLException { - return ExplainPlan.EMPTY_PLAN; - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - return ResultIterator.EMPTY_ITERATOR; - } - - @Override - public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException { - return ResultIterator.EMPTY_ITERATOR; - } - - @Override - public ResultIterator iterator() throws SQLException { - return ResultIterator.EMPTY_ITERATOR; - } - - @Override - public long getEstimatedSize() { - return 0; - } - - @Override - public Set getSourceRefs() { - return tableRefs; - } - - @Override - public TableRef getTableRef() { - return tableRef; - } - - @Override - public RowProjector getProjector() { - return RowProjector.EMPTY_PROJECTOR; - } - - @Override - public Integer getLimit() { - return null; - } - - @Override - public Integer getOffset() { - return null; - } - - @Override - public OrderBy getOrderBy() { - return OrderBy.EMPTY_ORDER_BY; - } - - @Override - public GroupBy getGroupBy() { - return GroupBy.EMPTY_GROUP_BY; - } - - @Override - public List getSplits() { - return null; - } - - @Override - public FilterableStatement getStatement() { - return SelectStatement.SELECT_ONE; - } - - @Override - public boolean isDegenerate() { - return false; - } - - @Override - public boolean isRowKeyOrdered() { - return true; - } - - @Override - public List> getScans() { - return null; - } - - @Override - public Operation getOperation() { - return Operation.QUERY; - } - - @Override - public boolean useRoundRobinIterator() { - return false; - } - - @Override - public T accept(QueryPlanVisitor visitor) { - return visitor.defaultReturn(this); - } - - @Override - public Long getEstimatedRowsToScan() { - return null; - } - - @Override - public Long getEstimatedBytesToScan() { - return null; - } - - @Override - public Long getEstimateInfoTimestamp() throws SQLException { - return null; - } - - @Override - public Cost getCost() { - return Cost.ZERO; - } - - @Override - public List getOutputOrderBys() { - return Collections. emptyList(); - } - - @Override - public boolean isApplicable() { return true; } - }, null, new SpoolingResultIterator.SpoolingResultIteratorFactory(context.getConnection().getQueryServices()), context.getScan(), false, null, null); - List keyRanges = parallelIterators.getSplits(); - return keyRanges; - } -} \ No newline at end of file + private static List getSplits(final TableRef tableRef, final Scan scan, + final List regions, final ScanRanges scanRanges) throws SQLException { + final List tableRefs = Collections.singletonList(tableRef); + ColumnResolver resolver = new ColumnResolver() { + + @Override + public List getFunctions() { + return Collections.emptyList(); + } + + @Override + public List getTables() { + return tableRefs; + } + + @Override + public TableRef resolveTable(String schemaName, String tableName) throws SQLException { + throw new UnsupportedOperationException(); + } + + @Override + public ColumnRef resolveColumn(String schemaName, String tableName, String colName) + throws SQLException { + throw new UnsupportedOperationException(); + } + + @Override + public PFunction resolveFunction(String functionName) throws SQLException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasUDFs() { + return false; + } + + @Override + public PSchema resolveSchema(String schemaName) throws SQLException { + return null; + } + + @Override + public List getSchemas() { + return null; + } + }; + PhoenixConnection connection = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + final PhoenixStatement statement = new PhoenixStatement(connection); + final StatementContext context = + new StatementContext(statement, resolver, scan, new SequenceManager(statement)); + context.setScanRanges(scanRanges); + ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() { + private final Set tableRefs = ImmutableSet.of(tableRef); + + @Override + public StatementContext getContext() { + return context; + } + + @Override + public ParameterMetaData getParameterMetaData() { + return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA; + } + + @Override + public ExplainPlan getExplainPlan() throws SQLException { + return ExplainPlan.EMPTY_PLAN; + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { + return ResultIterator.EMPTY_ITERATOR; + } + + @Override + public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) + throws SQLException { + return ResultIterator.EMPTY_ITERATOR; + } + + @Override + public ResultIterator iterator() throws SQLException { + return ResultIterator.EMPTY_ITERATOR; + } + + @Override + public long getEstimatedSize() { + return 0; + } + + @Override + public Set getSourceRefs() { + return tableRefs; + } + + @Override + public TableRef getTableRef() { + return tableRef; + } + + @Override + public RowProjector getProjector() { + return RowProjector.EMPTY_PROJECTOR; + } + + @Override + public Integer getLimit() { + return null; + } + + @Override + public Integer getOffset() { + return null; + } + + @Override + public OrderBy getOrderBy() { + return OrderBy.EMPTY_ORDER_BY; + } + + @Override + public GroupBy getGroupBy() { + return GroupBy.EMPTY_GROUP_BY; + } + + @Override + public List getSplits() { + return null; + } + + @Override + public FilterableStatement getStatement() { + return SelectStatement.SELECT_ONE; + } + + @Override + public boolean isDegenerate() { + return false; + } + + @Override + public boolean isRowKeyOrdered() { + return true; + } + + @Override + public List> getScans() { + return null; + } + + @Override + public Operation getOperation() { + return Operation.QUERY; + } + + @Override + public boolean useRoundRobinIterator() { + return false; + } + + @Override + public T accept(QueryPlanVisitor visitor) { + return visitor.defaultReturn(this); + } + + @Override + public Long getEstimatedRowsToScan() { + return null; + } + + @Override + public Long getEstimatedBytesToScan() { + return null; + } + + @Override + public Long getEstimateInfoTimestamp() throws SQLException { + return null; + } + + @Override + public Cost getCost() { + return Cost.ZERO; + } + + @Override + public List getOutputOrderBys() { + return Collections. emptyList(); + } + + @Override + public boolean isApplicable() { + return true; + } + }, null, new SpoolingResultIterator.SpoolingResultIteratorFactory( + context.getConnection().getQueryServices()), context.getScan(), false, null, null); + List keyRanges = parallelIterators.getSplits(); + return keyRanges; + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixStatsCacheLoaderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixStatsCacheLoaderTest.java index d129670c200..1859e391a6c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixStatsCacheLoaderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixStatsCacheLoaderTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,138 +20,143 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; -import org.apache.phoenix.thirdparty.com.google.common.cache.LoadingCache; -import org.apache.phoenix.thirdparty.com.google.common.cache.Weigher; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.phoenix.schema.stats.GuidePostsInfo; import org.apache.phoenix.schema.stats.GuidePostsKey; +import org.apache.phoenix.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.phoenix.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.phoenix.thirdparty.com.google.common.cache.Weigher; import org.apache.phoenix.util.ByteUtil; import org.junit.Test; -import java.lang.Thread; -import java.util.Collections; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.ExecutorService; - /** * Test class around the PhoenixStatsCacheLoader. */ public class PhoenixStatsCacheLoaderTest { - /** - * {@link PhoenixStatsLoader} test implementation for the Stats Loader. - */ - protected class TestStatsLoaderImpl implements PhoenixStatsLoader { - private int maxLength = 1; - private final CountDownLatch firstTimeRefreshedSignal; - private final CountDownLatch secondTimeRefreshedSignal; - - public TestStatsLoaderImpl(CountDownLatch firstTimeRefreshedSignal, CountDownLatch secondTimeRefreshedSignal) { - this.firstTimeRefreshedSignal = firstTimeRefreshedSignal; - this.secondTimeRefreshedSignal = secondTimeRefreshedSignal; - } + /** + * {@link PhoenixStatsLoader} test implementation for the Stats Loader. + */ + protected class TestStatsLoaderImpl implements PhoenixStatsLoader { + private int maxLength = 1; + private final CountDownLatch firstTimeRefreshedSignal; + private final CountDownLatch secondTimeRefreshedSignal; + + public TestStatsLoaderImpl(CountDownLatch firstTimeRefreshedSignal, + CountDownLatch secondTimeRefreshedSignal) { + this.firstTimeRefreshedSignal = firstTimeRefreshedSignal; + this.secondTimeRefreshedSignal = secondTimeRefreshedSignal; + } - @Override - public boolean needsLoad() { - // Whenever it's called, we try to load stats from stats table - // no matter it has been updated or not. - return true; - } + @Override + public boolean needsLoad() { + // Whenever it's called, we try to load stats from stats table + // no matter it has been updated or not. + return true; + } - @Override - public GuidePostsInfo loadStats(GuidePostsKey statsKey) throws Exception { - return new GuidePostsInfo(Collections. emptyList(), - new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY), - Collections. emptyList(), maxLength++, 0, Collections. emptyList()); - } + @Override + public GuidePostsInfo loadStats(GuidePostsKey statsKey) throws Exception { + return new GuidePostsInfo(Collections. emptyList(), + new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY), Collections. emptyList(), + maxLength++, 0, Collections. emptyList()); + } - @Override - public GuidePostsInfo loadStats(GuidePostsKey statsKey, GuidePostsInfo prevGuidepostInfo) throws Exception { - firstTimeRefreshedSignal.countDown(); - secondTimeRefreshedSignal.countDown(); + @Override + public GuidePostsInfo loadStats(GuidePostsKey statsKey, GuidePostsInfo prevGuidepostInfo) + throws Exception { + firstTimeRefreshedSignal.countDown(); + secondTimeRefreshedSignal.countDown(); - return new GuidePostsInfo(Collections. emptyList(), - new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY), - Collections. emptyList(), maxLength++, 0, Collections. emptyList()); - } + return new GuidePostsInfo(Collections. emptyList(), + new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY), Collections. emptyList(), + maxLength++, 0, Collections. emptyList()); + } + } + + GuidePostsInfo getStats(LoadingCache cache, + GuidePostsKey guidePostsKey) { + GuidePostsInfo guidePostsInfo; + try { + guidePostsInfo = cache.get(guidePostsKey); + } catch (ExecutionException e) { + assertFalse(true); + return GuidePostsInfo.NO_GUIDEPOST; } - GuidePostsInfo getStats(LoadingCache cache, GuidePostsKey guidePostsKey) { - GuidePostsInfo guidePostsInfo; - try { - guidePostsInfo = cache.get(guidePostsKey); - } catch (ExecutionException e) { - assertFalse(true); - return GuidePostsInfo.NO_GUIDEPOST; - } + return guidePostsInfo; + } - return guidePostsInfo; + void sleep(int x) { + try { + Thread.sleep(x); + } catch (InterruptedException e) { + assertFalse(true); } + } - void sleep(int x) { - try { - Thread.sleep(x); - } - catch (InterruptedException e) { - assertFalse(true); - } - } + @Test + public void testStatsBeingAutomaticallyRefreshed() { + ExecutorService executor = Executors.newFixedThreadPool(4); - @Test - public void testStatsBeingAutomaticallyRefreshed() { - ExecutorService executor = Executors.newFixedThreadPool(4); - - CountDownLatch firstTimeRefreshedSignal = new CountDownLatch(1); - CountDownLatch secondTimeRefreshedSignal = new CountDownLatch(2); - - Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - - LoadingCache cache = CacheBuilder.newBuilder() - // Refresh entries a given amount of time after they were written - .refreshAfterWrite(100, TimeUnit.MILLISECONDS) - // Maximum total weight (size in bytes) of stats entries - .maximumWeight(QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE) - // Defer actual size to the PTableStats.getEstimatedSize() - .weigher(new Weigher() { - @Override public int weigh(GuidePostsKey key, GuidePostsInfo info) { - return info.getEstimatedSize(); - } - }) - // Log removals at TRACE for debugging - .removalListener(new GuidePostsCacheImpl.PhoenixStatsCacheRemovalListener()) - // Automatically load the cache when entries are missing - .build(new PhoenixStatsCacheLoader(new TestStatsLoaderImpl( - firstTimeRefreshedSignal, secondTimeRefreshedSignal), config)); - - try { - GuidePostsKey guidePostsKey = new GuidePostsKey(new byte[4], new byte[4]); - GuidePostsInfo guidePostsInfo = getStats(cache, guidePostsKey); - assertTrue(guidePostsInfo.getMaxLength() == 1); - - // Note: With Guava cache, automatic refreshes are performed when the first stale request for an entry occurs. - - // After we sleep here for any time which is larger than the refresh cycle, the refresh of cache entry will be - // triggered for its first time by the call of getStats(). This is deterministic behavior, and it won't cause - // randomized test failures. - sleep(150); - guidePostsInfo = getStats(cache, guidePostsKey); - // Refresh has been triggered for its first time, but still could get the old value - assertTrue(guidePostsInfo.getMaxLength() >= 1); - firstTimeRefreshedSignal.await(); - - sleep(150); - guidePostsInfo = getStats(cache, guidePostsKey); - // Now the second time refresh has been triggered by the above getStats() call, the first time Refresh has completed - // and the cache entry has been updated for sure. - assertTrue(guidePostsInfo.getMaxLength() >= 2); - secondTimeRefreshedSignal.await(); - } - catch (InterruptedException e) { - assertFalse(true); + CountDownLatch firstTimeRefreshedSignal = new CountDownLatch(1); + CountDownLatch secondTimeRefreshedSignal = new CountDownLatch(2); + + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + + LoadingCache cache = CacheBuilder.newBuilder() + // Refresh entries a given amount of time after they were written + .refreshAfterWrite(100, TimeUnit.MILLISECONDS) + // Maximum total weight (size in bytes) of stats entries + .maximumWeight(QueryServicesOptions.DEFAULT_STATS_MAX_CACHE_SIZE) + // Defer actual size to the PTableStats.getEstimatedSize() + .weigher(new Weigher() { + @Override + public int weigh(GuidePostsKey key, GuidePostsInfo info) { + return info.getEstimatedSize(); } + }) + // Log removals at TRACE for debugging + .removalListener(new GuidePostsCacheImpl.PhoenixStatsCacheRemovalListener()) + // Automatically load the cache when entries are missing + .build(new PhoenixStatsCacheLoader( + new TestStatsLoaderImpl(firstTimeRefreshedSignal, secondTimeRefreshedSignal), config)); + + try { + GuidePostsKey guidePostsKey = new GuidePostsKey(new byte[4], new byte[4]); + GuidePostsInfo guidePostsInfo = getStats(cache, guidePostsKey); + assertTrue(guidePostsInfo.getMaxLength() == 1); + + // Note: With Guava cache, automatic refreshes are performed when the first stale request for + // an entry occurs. + + // After we sleep here for any time which is larger than the refresh cycle, the refresh of + // cache entry will be + // triggered for its first time by the call of getStats(). This is deterministic behavior, and + // it won't cause + // randomized test failures. + sleep(150); + guidePostsInfo = getStats(cache, guidePostsKey); + // Refresh has been triggered for its first time, but still could get the old value + assertTrue(guidePostsInfo.getMaxLength() >= 1); + firstTimeRefreshedSignal.await(); + + sleep(150); + guidePostsInfo = getStats(cache, guidePostsKey); + // Now the second time refresh has been triggered by the above getStats() call, the first time + // Refresh has completed + // and the cache entry has been updated for sure. + assertTrue(guidePostsInfo.getMaxLength() >= 2); + secondTimeRefreshedSignal.await(); + } catch (InterruptedException e) { + assertFalse(true); } -} \ No newline at end of file + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixStatsCacheRemovalListenerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixStatsCacheRemovalListenerTest.java index a0b8f060a16..3a085d5ea8c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixStatsCacheRemovalListenerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixStatsCacheRemovalListenerTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,26 +21,25 @@ import static org.junit.Assert.assertTrue; import org.apache.phoenix.query.GuidePostsCacheImpl.PhoenixStatsCacheRemovalListener; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.cache.RemovalCause; +import org.junit.Test; /** * Test class around the PhoenixStatsCacheRemovalListener. */ public class PhoenixStatsCacheRemovalListenerTest { - @Test - public void nonEvictionsAreIgnored() { - // We don't care so much about cases where we trigger a removal or update of the stats - // for a table in the cache, but we would want to know about updates happening automatically - PhoenixStatsCacheRemovalListener listener = new PhoenixStatsCacheRemovalListener(); - // User-driven removals or updates - assertFalse(listener.wasEvicted(RemovalCause.EXPLICIT)); - assertFalse(listener.wasEvicted(RemovalCause.REPLACED)); - // Automatic removals by the cache itself (per configuration) - assertTrue(listener.wasEvicted(RemovalCause.COLLECTED)); - assertTrue(listener.wasEvicted(RemovalCause.EXPIRED)); - assertTrue(listener.wasEvicted(RemovalCause.SIZE)); - } -} \ No newline at end of file + @Test + public void nonEvictionsAreIgnored() { + // We don't care so much about cases where we trigger a removal or update of the stats + // for a table in the cache, but we would want to know about updates happening automatically + PhoenixStatsCacheRemovalListener listener = new PhoenixStatsCacheRemovalListener(); + // User-driven removals or updates + assertFalse(listener.wasEvicted(RemovalCause.EXPLICIT)); + assertFalse(listener.wasEvicted(RemovalCause.REPLACED)); + // Automatic removals by the cache itself (per configuration) + assertTrue(listener.wasEvicted(RemovalCause.COLLECTED)); + assertTrue(listener.wasEvicted(RemovalCause.EXPIRED)); + assertTrue(listener.wasEvicted(RemovalCause.SIZE)); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixTestBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixTestBuilder.java index f3c2f58f8e9..4bbecf202af 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixTestBuilder.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/PhoenixTestBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,23 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.query; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.jdbc.PhoenixStatement; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; -import org.apache.phoenix.thirdparty.com.google.common.collect.Table; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.PTableKey; -import org.apache.phoenix.thirdparty.com.google.common.collect.TreeBasedTable; -import org.apache.phoenix.util.PropertiesUtil; -import org.apache.phoenix.util.SchemaUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static java.util.Arrays.asList; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS; +import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB; import java.sql.Connection; import java.sql.DriverManager; @@ -44,2114 +34,2053 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import static java.util.Arrays.asList; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS; -import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.jdbc.PhoenixStatement; +import org.apache.phoenix.schema.PTable; +import org.apache.phoenix.schema.PTableKey; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import org.apache.phoenix.thirdparty.com.google.common.collect.Table; +import org.apache.phoenix.thirdparty.com.google.common.collect.TreeBasedTable; +import org.apache.phoenix.util.PropertiesUtil; +import org.apache.phoenix.util.SchemaUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * PhoenixTestBuilder is a utility class using a Builder pattern. - * Facilitates the following creation patterns - * 1. Simple tables. - * 2. Global Views on tables. - * 3. Tenant Views on tables or global views. - * 4. Indexes global or local on all of the above. - * 5. Create multiple tenants - * 6. Multiple views for a tenant. - * Typical usage pattern when using this class is - - * 1. Create schema for the test. - * 2. Provide a DataSupplier for the above schema. - * 3. Write validations for your tests. + * PhoenixTestBuilder is a utility class using a Builder pattern. Facilitates the following creation + * patterns 1. Simple tables. 2. Global Views on tables. 3. Tenant Views on tables or global views. + * 4. Indexes global or local on all of the above. 5. Create multiple tenants 6. Multiple views for + * a tenant. Typical usage pattern when using this class is - 1. Create schema for the test. 2. + * Provide a DataSupplier for the above schema. 3. Write validations for your tests. * PhoenixTestBuilder facilitates steps 1 and 2. */ public class PhoenixTestBuilder { - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTestBuilder.class); - private static final int MAX_SUFFIX_VALUE = 1000000; - private static AtomicInteger NAME_SUFFIX = new AtomicInteger(0); + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTestBuilder.class); + private static final int MAX_SUFFIX_VALUE = 1000000; + private static AtomicInteger NAME_SUFFIX = new AtomicInteger(0); - private static String generateUniqueName() { - int nextName = NAME_SUFFIX.incrementAndGet(); - if (nextName >= MAX_SUFFIX_VALUE) { - throw new IllegalStateException("Used up all unique names"); - } - return "T" + Integer.toString(MAX_SUFFIX_VALUE + nextName).substring(1); + private static String generateUniqueName() { + int nextName = NAME_SUFFIX.incrementAndGet(); + if (nextName >= MAX_SUFFIX_VALUE) { + throw new IllegalStateException("Used up all unique names"); } - - /** - * @return a formatted string with nullable info - * for e.g "COL1 VARCHAR (NOT NULL), COL2 VARCHAR (NOT NULL), COL3 VARCHAR (NOT NULL)" - */ - private static String getColumnsAsString(List columns, List types, - boolean isPK) { - assert (columns.size() == types.size()); - - Joiner columnJoiner = Joiner.on(","); - Joiner typeJoiner = Joiner.on(" "); - List columnDefinitions = Lists.newArrayList(); - for (int colIndex = 0; colIndex < columns.size(); colIndex++) { - String column = columns.get(colIndex); - String datatype = types.get(colIndex); - if ((column != null) && (!column.isEmpty())) { - String - columnWithType = - isPK ? - typeJoiner.join(column, datatype, "NOT NULL") : - typeJoiner.join(column, datatype); - columnDefinitions.add(columnWithType); - } - } - return columnJoiner.join(columnDefinitions); + return "T" + Integer.toString(MAX_SUFFIX_VALUE + nextName).substring(1); + } + + /** + * @return a formatted string with nullable info for e.g "COL1 VARCHAR (NOT NULL), COL2 VARCHAR + * (NOT NULL), COL3 VARCHAR (NOT NULL)" + */ + private static String getColumnsAsString(List columns, List types, boolean isPK) { + assert (columns.size() == types.size()); + + Joiner columnJoiner = Joiner.on(","); + Joiner typeJoiner = Joiner.on(" "); + List columnDefinitions = Lists.newArrayList(); + for (int colIndex = 0; colIndex < columns.size(); colIndex++) { + String column = columns.get(colIndex); + String datatype = types.get(colIndex); + if ((column != null) && (!column.isEmpty())) { + String columnWithType = + isPK ? typeJoiner.join(column, datatype, "NOT NULL") : typeJoiner.join(column, datatype); + columnDefinitions.add(columnWithType); + } } - - /** - * @return a formatted string with sort info - * for e.g "PK_COL1, PK_COL2 , PK_COL3 DESC" - */ - private static String getPKColumnsWithSort(List pkColumns, List sortTypes) { - assert (sortTypes == null || sortTypes.size() == pkColumns.size()); - - Joiner pkColumnJoiner = Joiner.on(","); - Joiner sortTypeJoiner = Joiner.on(" "); - List pkColumnDefinitions = Lists.newArrayList(); - for (int colIndex = 0; colIndex < pkColumns.size(); colIndex++) { - String column = pkColumns.get(colIndex); - String sorttype = sortTypes == null ? null : sortTypes.get(colIndex); - if ((column != null) && (!column.isEmpty())) { - String - columnWithSortType = - sorttype == null || sorttype.isEmpty() ? - column : - sortTypeJoiner.join(column, sorttype); - pkColumnDefinitions.add(columnWithSortType); - } - } - return pkColumnJoiner.join(pkColumnDefinitions); + return columnJoiner.join(columnDefinitions); + } + + /** Returns a formatted string with sort info for e.g "PK_COL1, PK_COL2 , PK_COL3 DESC" */ + private static String getPKColumnsWithSort(List pkColumns, List sortTypes) { + assert (sortTypes == null || sortTypes.size() == pkColumns.size()); + + Joiner pkColumnJoiner = Joiner.on(","); + Joiner sortTypeJoiner = Joiner.on(" "); + List pkColumnDefinitions = Lists.newArrayList(); + for (int colIndex = 0; colIndex < pkColumns.size(); colIndex++) { + String column = pkColumns.get(colIndex); + String sorttype = sortTypes == null ? null : sortTypes.get(colIndex); + if ((column != null) && (!column.isEmpty())) { + String columnWithSortType = + sorttype == null || sorttype.isEmpty() ? column : sortTypeJoiner.join(column, sorttype); + pkColumnDefinitions.add(columnWithSortType); + } } - - /** - * @return a formatted string with CFs - * for e.g "A.COL1, B.COL2, C.COl3" - */ - private static String getFQColumnsAsString(List columns, List families) { - Joiner columnJoiner = Joiner.on(","); - return columnJoiner.join(getFQColumnsAsList(columns, families)); - } - - private static List getFQColumnsAsList(List columns, List families) { - assert (columns.size() == families.size()); - - Joiner familyJoiner = Joiner.on("."); - List columnDefinitions = Lists.newArrayList(); - int colIndex = 0; - for (String family : families) { - String column = columns.get(colIndex++); - if ((column != null) && (!column.isEmpty())) { - columnDefinitions.add(((family != null) && (!family.isEmpty())) ? - familyJoiner.join(family, column) : - column); - } - } - return columnDefinitions; + return pkColumnJoiner.join(pkColumnDefinitions); + } + + /** Returns a formatted string with CFs for e.g "A.COL1, B.COL2, C.COl3" */ + private static String getFQColumnsAsString(List columns, List families) { + Joiner columnJoiner = Joiner.on(","); + return columnJoiner.join(getFQColumnsAsList(columns, families)); + } + + private static List getFQColumnsAsList(List columns, List families) { + assert (columns.size() == families.size()); + + Joiner familyJoiner = Joiner.on("."); + List columnDefinitions = Lists.newArrayList(); + int colIndex = 0; + for (String family : families) { + String column = columns.get(colIndex++); + if ((column != null) && (!column.isEmpty())) { + columnDefinitions.add( + ((family != null) && (!family.isEmpty())) ? familyJoiner.join(family, column) : column); + } } - - /** - * @return a formatted string with data types - * for e.g => A.COL1 VARCHAR, A.COL2 VARCHAR, B.COL3 VARCHAR - */ - private static String getFQColumnsAsString(List columns, List families, - List types) { - Joiner columnJoiner = Joiner.on(","); - return columnJoiner.join(getFQColumnsAsList(columns, families, types)); + return columnDefinitions; + } + + /** + * @return a formatted string with data types for e.g => A.COL1 VARCHAR, A.COL2 VARCHAR, B.COL3 + * VARCHAR + */ + private static String getFQColumnsAsString(List columns, List families, + List types) { + Joiner columnJoiner = Joiner.on(","); + return columnJoiner.join(getFQColumnsAsList(columns, families, types)); + } + + /* + * ----------------- Helper methods ----------------- + */ + + private static List getFQColumnsAsList(List columns, List families, + List types) { + assert (columns.size() == families.size()); + + Joiner familyJoiner = Joiner.on("."); + Joiner typeJoiner = Joiner.on(" "); + List columnDefinitions = Lists.newArrayList(); + int colIndex = 0; + for (String family : families) { + String column = columns.get(colIndex); + String datatype = types.get(colIndex); + colIndex++; + if ((column != null) && (!column.isEmpty())) { + String columnWithType = typeJoiner.join(column, datatype); + columnDefinitions.add(((family != null) && (!family.isEmpty())) + ? familyJoiner.join(family, columnWithType) + : columnWithType); + } } + return columnDefinitions; + } - /* - * ----------------- - * Helper methods - * ----------------- - */ + // Test Data supplier interface for test writers to provide custom data. + public interface DataSupplier { + // return the values to be used for upserting data into the underlying entity. + List getValues(int rowIndex) throws Exception; + } - private static List getFQColumnsAsList(List columns, List families, - List types) { - assert (columns.size() == families.size()); - - Joiner familyJoiner = Joiner.on("."); - Joiner typeJoiner = Joiner.on(" "); - List columnDefinitions = Lists.newArrayList(); - int colIndex = 0; - for (String family : families) { - String column = columns.get(colIndex); - String datatype = types.get(colIndex); - colIndex++; - if ((column != null) && (!column.isEmpty())) { - String columnWithType = typeJoiner.join(column, datatype); - columnDefinitions.add(((family != null) && (!family.isEmpty())) ? - familyJoiner.join(family, columnWithType) : - columnWithType); - } - } - return columnDefinitions; - } + // A Data Reader to be used in tests to read test data from test db. + public interface DataReader { + // returns the columns that need to be projected during DML queries, + List getValidationColumns(); - // Test Data supplier interface for test writers to provide custom data. - public interface DataSupplier { - // return the values to be used for upserting data into the underlying entity. - List getValues(int rowIndex) throws Exception; - } + void setValidationColumns(List validationColumns); - // A Data Reader to be used in tests to read test data from test db. - public interface DataReader { - // returns the columns that need to be projected during DML queries, - List getValidationColumns(); + // returns the columns that represent the pk/unique key for this data set, + List getRowKeyColumns(); - void setValidationColumns(List validationColumns); + void setRowKeyColumns(List rowKeyColumns); - // returns the columns that represent the pk/unique key for this data set, - List getRowKeyColumns(); + // returns the connection to be used for DML queries. + Connection getConnection(); - void setRowKeyColumns(List rowKeyColumns); + // The connection to be used for the Reader. + // Please make sure the connection is closed after use or called with try-with resources. + void setConnection(Connection connection); - // returns the connection to be used for DML queries. - Connection getConnection(); + // returns the target entity - whether to use the table, global-view, the tenant-view or + // an index table. + String getTargetEntity(); - // The connection to be used for the Reader. - // Please make sure the connection is closed after use or called with try-with resources. - void setConnection(Connection connection); + void setTargetEntity(String targetEntity); - // returns the target entity - whether to use the table, global-view, the tenant-view or - // an index table. - String getTargetEntity(); + // Build the DML statement and return the SQL string. + String getDML(); - void setTargetEntity(String targetEntity); + String setDML(String dmlStatement); - // Build the DML statement and return the SQL string. - String getDML(); + // template method to read a batch of rows using the above sql. + void readRows() throws SQLException; - String setDML(String dmlStatement); + // Get the data that was read as a Table. + Table getDataTable(); + } - // template method to read a batch of rows using the above sql. - void readRows() throws SQLException; + // A Data Writer to be used in tests to upsert sample data (@see TestDataSupplier) into the sample + // schema. + public interface DataWriter { + // returns the columns that need to be upserted, + // should match the #columns in TestDataSupplier::getValues(). + List getUpsertColumns(); - // Get the data that was read as a Table. - Table getDataTable(); - } + void setUpsertColumns(List upsertColumns); - // A Data Writer to be used in tests to upsert sample data (@see TestDataSupplier) into the sample schema. - public interface DataWriter { - // returns the columns that need to be upserted, - // should match the #columns in TestDataSupplier::getValues(). - List getUpsertColumns(); + // returns the partial/overridden set of columns to be used for upserts. + List getColumnPositionsToUpdate(); - void setUpsertColumns(List upsertColumns); + void setColumnPositionsToUpdate(List columnPositionsToUpdate); - // returns the partial/overridden set of columns to be used for upserts. - List getColumnPositionsToUpdate(); + // returns the connection to be used for upserting rows. + Connection getConnection(); - void setColumnPositionsToUpdate(List columnPositionsToUpdate); + // The connection to be used for the Writer. + // Please make sure the connection is closed after use or called with try-with resources. + void setConnection(Connection connection); - // returns the connection to be used for upserting rows. - Connection getConnection(); + // returns the target entity - whether to use the table, global-view or the tenant-view. + String getTargetEntity(); - // The connection to be used for the Writer. - // Please make sure the connection is closed after use or called with try-with resources. - void setConnection(Connection connection); + void setTargetEntity(String targetEntity); - // returns the target entity - whether to use the table, global-view or the tenant-view. - String getTargetEntity(); + // returns the columns that is set asthe pk/unique key for this data set, + List getRowKeyColumns(); - void setTargetEntity(String targetEntity); + void setRowKeyColumns(List rowKeyColumns); - // returns the columns that is set asthe pk/unique key for this data set, - List getRowKeyColumns(); + // return the data provider for this writer + DataSupplier getTestDataSupplier(); - void setRowKeyColumns(List rowKeyColumns); + void setDataSupplier(DataSupplier dataSupplier); - // return the data provider for this writer - DataSupplier getTestDataSupplier(); + // template method to upsert a single row using the above info. + List upsertRow(int rowIndex) throws Exception; - void setDataSupplier(DataSupplier dataSupplier); + // template method to upsert a batch of rows using the above info. + void upsertRows(int startRowIndex, int numRows) throws Exception; - // template method to upsert a single row using the above info. - List upsertRow(int rowIndex) throws Exception; + // Get the data that was written as a Table + Table getDataTable(); + } - // template method to upsert a batch of rows using the above info. - void upsertRows(int startRowIndex, int numRows) throws Exception; + // Provides template method for returning result set + public static abstract class AbstractDataReader implements DataReader { + Table dataTable = TreeBasedTable.create(); - // Get the data that was written as a Table - Table getDataTable(); + public Table getDataTable() { + return dataTable; } - // Provides template method for returning result set - public static abstract class AbstractDataReader implements DataReader { - Table dataTable = TreeBasedTable.create(); + // Read batch of rows + public void readRows() throws SQLException { + dataTable.clear(); + dataTable = TreeBasedTable.create(); + String sql = getDML(); + Connection connection = getConnection(); + try (Statement stmt = connection.createStatement()) { + + final PhoenixStatement pstmt = stmt.unwrap(PhoenixStatement.class); + ResultSet rs = pstmt.executeQuery(sql); + List cols = getValidationColumns(); + List values = Lists.newArrayList(); + Set rowKeys = getRowKeyColumns() == null || getRowKeyColumns().isEmpty() + ? Sets. newHashSet() + : Sets.newHashSet(getRowKeyColumns()); + List rowKeyParts = Lists.newArrayList(); + while (rs.next()) { + for (String col : cols) { + Object val = rs.getObject(col); + values.add(val); + if (rowKeys.isEmpty()) { + rowKeyParts.add(val.toString()); + } else if (rowKeys.contains(col)) { + rowKeyParts.add(val.toString()); + } + } + + String rowKey = Joiner.on("-").join(rowKeyParts); + for (int v = 0; v < values.size(); v++) { + dataTable.put(rowKey, cols.get(v), values.get(v)); + } + values.clear(); + rowKeyParts.clear(); + } + LOGGER + .info(String.format("########## sql: %s => rows: %d", sql, dataTable.rowKeySet().size())); + + } catch (SQLException e) { + LOGGER.error(String.format(" Error [%s] initializing Reader. ", e.getMessage())); + throw e; + } + } + } - public Table getDataTable() { - return dataTable; - } + // An implementation of the DataReader. + public static class BasicDataReader extends AbstractDataReader { - // Read batch of rows - public void readRows() throws SQLException { - dataTable.clear(); - dataTable = TreeBasedTable.create(); - String sql = getDML(); - Connection connection = getConnection(); - try (Statement stmt = connection.createStatement()) { - - final PhoenixStatement pstmt = stmt.unwrap(PhoenixStatement.class); - ResultSet rs = pstmt.executeQuery(sql); - List cols = getValidationColumns(); - List values = Lists.newArrayList(); - Set rowKeys = getRowKeyColumns() == null || getRowKeyColumns().isEmpty() ? - Sets.newHashSet() : - Sets.newHashSet(getRowKeyColumns()); - List rowKeyParts = Lists.newArrayList(); - while (rs.next()) { - for (String col : cols) { - Object val = rs.getObject(col); - values.add(val); - if (rowKeys.isEmpty()) { - rowKeyParts.add(val.toString()); - } - else if (rowKeys.contains(col)) { - rowKeyParts.add(val.toString()); - } - } - - String rowKey = Joiner.on("-").join(rowKeyParts); - for (int v = 0; v < values.size(); v++) { - dataTable.put(rowKey,cols.get(v), values.get(v)); - } - values.clear(); - rowKeyParts.clear(); - } - LOGGER.info(String.format("########## sql: %s => rows: %d", - sql, dataTable.rowKeySet().size())); - - } catch (SQLException e) { - LOGGER.error(String.format(" Error [%s] initializing Reader. ", - e.getMessage())); - throw e; - } - } + Connection connection; + String targetEntity; + String dmlStatement; + List validationColumns; + List rowKeyColumns; + + @Override + public String getDML() { + return this.dmlStatement; } - // An implementation of the DataReader. - public static class BasicDataReader extends AbstractDataReader { + @Override + public String setDML(String dmlStatement) { + return this.dmlStatement = dmlStatement; + } - Connection connection; - String targetEntity; - String dmlStatement; - List validationColumns; - List rowKeyColumns; + // returns the columns that need to be projected during DML queries, + @Override + public List getValidationColumns() { + return this.validationColumns; + } + @Override + public void setValidationColumns(List validationColumns) { + this.validationColumns = validationColumns; + } - @Override public String getDML() { - return this.dmlStatement; - } + // returns the columns that is set as the pk/unique key for this data set, + @Override + public List getRowKeyColumns() { + return this.rowKeyColumns; + } - @Override public String setDML(String dmlStatement) { - return this.dmlStatement = dmlStatement; - } + @Override + public void setRowKeyColumns(List rowKeyColumns) { + this.rowKeyColumns = rowKeyColumns; + } - // returns the columns that need to be projected during DML queries, - @Override public List getValidationColumns() { - return this.validationColumns; - } + @Override + public Connection getConnection() { + return connection; + } - @Override public void setValidationColumns(List validationColumns) { - this.validationColumns = validationColumns; - } + @Override + public void setConnection(Connection connection) { + this.connection = connection; + } - // returns the columns that is set as the pk/unique key for this data set, - @Override public List getRowKeyColumns() { - return this.rowKeyColumns; - } + @Override + public String getTargetEntity() { + return targetEntity; + } - @Override public void setRowKeyColumns(List rowKeyColumns) { - this.rowKeyColumns = rowKeyColumns; - } + @Override + public void setTargetEntity(String targetEntity) { + this.targetEntity = targetEntity; + } + } - @Override public Connection getConnection() { - return connection; - } + // Provides template method for upserting rows + public static abstract class AbstractDataWriter implements DataWriter { + Table dataTable = TreeBasedTable.create(); - @Override public void setConnection(Connection connection) { - this.connection = connection; - } + public Table getDataTable() { + return dataTable; + } - @Override public String getTargetEntity() { - return targetEntity; - } + // Upsert one row. + public List upsertRow(int rowIndex) throws Exception { + List upsertColumns = Lists.newArrayList(); + List upsertValues = Lists.newArrayList(); + + List rowValues = null; + rowValues = getTestDataSupplier().getValues(rowIndex); + if (getColumnPositionsToUpdate().isEmpty()) { + upsertColumns.addAll(getUpsertColumns()); + upsertValues.addAll(rowValues); + } else { + List columnsToUpdate = getUpsertColumns(); + for (int i : getColumnPositionsToUpdate()) { + upsertColumns.add(columnsToUpdate.get(i)); + upsertValues.add(rowValues.get(i)); + } + } + StringBuilder buf = new StringBuilder("UPSERT INTO "); + buf.append(getTargetEntity()); + buf.append(" (").append(Joiner.on(",").join(upsertColumns)).append(") VALUES("); + + for (int i = 0; i < upsertValues.size(); i++) { + buf.append("?,"); + } + buf.setCharAt(buf.length() - 1, ')'); + LOGGER.debug(buf.toString()); + + Connection connection = getConnection(); + try (PreparedStatement stmt = connection.prepareStatement(buf.toString())) { + for (int i = 0; i < upsertValues.size(); i++) { + // TODO : handle null values + stmt.setObject(i + 1, upsertValues.get(i)); + } + stmt.execute(); + connection.commit(); + } + return upsertValues; + } - @Override public void setTargetEntity(String targetEntity) { - this.targetEntity = targetEntity; - } + // Upsert batch of rows. + public void upsertRows(int startRowIndex, int numRows) throws Exception { + dataTable.clear(); + dataTable = TreeBasedTable.create(); + List upsertColumns = Lists.newArrayList(); + List rowKeyPositions = Lists.newArrayList(); + + // Figure out the upsert columns based on whether this is a full or partial row update. + boolean isFullRowUpdate = getColumnPositionsToUpdate().isEmpty(); + if (isFullRowUpdate) { + upsertColumns.addAll(getUpsertColumns()); + } else { + List tmpColumns = getUpsertColumns(); + for (int i : getColumnPositionsToUpdate()) { + upsertColumns.add(tmpColumns.get(i)); + } + } + + Set rowKeys = getRowKeyColumns() == null || getRowKeyColumns().isEmpty() + ? Sets. newHashSet(getUpsertColumns()) + : Sets.newHashSet(getRowKeyColumns()); + + StringBuilder buf = new StringBuilder("UPSERT INTO "); + buf.append(getTargetEntity()); + buf.append(" (").append(Joiner.on(",").join(upsertColumns)).append(") VALUES("); + for (int i = 0; i < upsertColumns.size(); i++) { + buf.append("?,"); + if (rowKeys.contains(upsertColumns.get(i))) { + rowKeyPositions.add(i); + } + } + buf.setCharAt(buf.length() - 1, ')'); + LOGGER.debug(buf.toString()); + + Connection connection = getConnection(); + try (PreparedStatement stmt = connection.prepareStatement(buf.toString())) { + + for (int r = startRowIndex; r < startRowIndex + numRows; r++) { + List upsertValues = Lists.newArrayList(); + List rowValues = null; + rowValues = getTestDataSupplier().getValues(r); + if (isFullRowUpdate) { + upsertValues.addAll(rowValues); + } else { + for (int c : getColumnPositionsToUpdate()) { + upsertValues.add(rowValues.get(c)); + } + } + + List rowKeyParts = Lists.newArrayList(); + for (int position : rowKeyPositions) { + if (upsertValues.get(position) != null) { + rowKeyParts.add(upsertValues.get(position).toString()); + } + } + String rowKey = Joiner.on("-").join(rowKeyParts); + + for (int v = 0; v < upsertValues.size(); v++) { + // TODO : handle null values + stmt.setObject(v + 1, upsertValues.get(v)); + if (upsertValues.get(v) != null) { + dataTable.put(rowKey, upsertColumns.get(v), upsertValues.get(v)); + } + } + stmt.addBatch(); + } + stmt.executeBatch(); + connection.commit(); + } } + } - // Provides template method for upserting rows - public static abstract class AbstractDataWriter implements DataWriter { - Table dataTable = TreeBasedTable.create(); + // An implementation of the DataWriter. + public static class BasicDataWriter extends AbstractDataWriter { + List upsertColumns = Lists.newArrayList(); + List columnPositionsToUpdate = Lists.newArrayList(); + DataSupplier dataSupplier; + Connection connection; + String targetEntity; + List rowKeyColumns; - public Table getDataTable() { - return dataTable; - } + @Override + public List getUpsertColumns() { + return upsertColumns; + } - // Upsert one row. - public List upsertRow(int rowIndex) throws Exception { - List upsertColumns = Lists.newArrayList(); - List upsertValues = Lists.newArrayList(); - - List rowValues = null; - rowValues = getTestDataSupplier().getValues(rowIndex); - if (getColumnPositionsToUpdate().isEmpty()) { - upsertColumns.addAll(getUpsertColumns()); - upsertValues.addAll(rowValues); - } else { - List columnsToUpdate = getUpsertColumns(); - for (int i : getColumnPositionsToUpdate()) { - upsertColumns.add(columnsToUpdate.get(i)); - upsertValues.add(rowValues.get(i)); - } - } - StringBuilder buf = new StringBuilder("UPSERT INTO "); - buf.append(getTargetEntity()); - buf.append(" (").append(Joiner.on(",").join(upsertColumns)).append(") VALUES("); + @Override + public void setUpsertColumns(List upsertColumns) { + this.upsertColumns = upsertColumns; + } - for (int i = 0; i < upsertValues.size(); i++) { - buf.append("?,"); - } - buf.setCharAt(buf.length() - 1, ')'); - LOGGER.debug(buf.toString()); - - Connection connection = getConnection(); - try (PreparedStatement stmt = connection.prepareStatement(buf.toString())) { - for (int i = 0; i < upsertValues.size(); i++) { - //TODO : handle null values - stmt.setObject(i + 1, upsertValues.get(i)); - } - stmt.execute(); - connection.commit(); - } - return upsertValues; - } + @Override + public List getColumnPositionsToUpdate() { + return columnPositionsToUpdate; + } - // Upsert batch of rows. - public void upsertRows(int startRowIndex, int numRows) throws Exception { - dataTable.clear(); - dataTable = TreeBasedTable.create(); - List upsertColumns = Lists.newArrayList(); - List rowKeyPositions = Lists.newArrayList(); - - // Figure out the upsert columns based on whether this is a full or partial row update. - boolean isFullRowUpdate = getColumnPositionsToUpdate().isEmpty(); - if (isFullRowUpdate) { - upsertColumns.addAll(getUpsertColumns()); - } else { - List tmpColumns = getUpsertColumns(); - for (int i : getColumnPositionsToUpdate()) { - upsertColumns.add(tmpColumns.get(i)); - } - } + @Override + public void setColumnPositionsToUpdate(List columnPositionsToUpdate) { + this.columnPositionsToUpdate = columnPositionsToUpdate; + } - Set rowKeys = getRowKeyColumns() == null || getRowKeyColumns().isEmpty() ? - Sets.newHashSet(getUpsertColumns()) : - Sets.newHashSet(getRowKeyColumns()); - - StringBuilder buf = new StringBuilder("UPSERT INTO "); - buf.append(getTargetEntity()); - buf.append(" (").append(Joiner.on(",").join(upsertColumns)).append(") VALUES("); - for (int i = 0; i < upsertColumns.size(); i++) { - buf.append("?,"); - if (rowKeys.contains(upsertColumns.get(i))) { - rowKeyPositions.add(i); - } - } - buf.setCharAt(buf.length() - 1, ')'); - LOGGER.debug (buf.toString()); - - Connection connection = getConnection(); - try (PreparedStatement stmt = connection.prepareStatement(buf.toString())) { - - for (int r = startRowIndex; r < startRowIndex + numRows; r++) { - List upsertValues = Lists.newArrayList(); - List rowValues = null; - rowValues = getTestDataSupplier().getValues(r); - if (isFullRowUpdate) { - upsertValues.addAll(rowValues); - } else { - for (int c : getColumnPositionsToUpdate()) { - upsertValues.add(rowValues.get(c)); - } - } - - List rowKeyParts = Lists.newArrayList(); - for (int position : rowKeyPositions) { - if (upsertValues.get(position) != null) { - rowKeyParts.add(upsertValues.get(position).toString()); - } - } - String rowKey = Joiner.on("-").join(rowKeyParts); - - for (int v = 0; v < upsertValues.size(); v++) { - //TODO : handle null values - stmt.setObject(v + 1, upsertValues.get(v)); - if (upsertValues.get(v) != null) { - dataTable.put(rowKey,upsertColumns.get(v), upsertValues.get(v)); - } - } - stmt.addBatch(); - } - stmt.executeBatch(); - connection.commit(); - } - } + @Override + public Connection getConnection() { + return connection; + } + @Override + public void setConnection(Connection connection) { + this.connection = connection; } - // An implementation of the DataWriter. - public static class BasicDataWriter extends AbstractDataWriter { - List upsertColumns = Lists.newArrayList(); - List columnPositionsToUpdate = Lists.newArrayList(); - DataSupplier dataSupplier; - Connection connection; - String targetEntity; - List rowKeyColumns; + @Override + public String getTargetEntity() { + return targetEntity; + } - @Override public List getUpsertColumns() { - return upsertColumns; - } + @Override + public void setTargetEntity(String targetEntity) { + this.targetEntity = targetEntity; + } - @Override public void setUpsertColumns(List upsertColumns) { - this.upsertColumns = upsertColumns; - } + // returns the columns that is set as the pk/unique key for this data set, + @Override + public List getRowKeyColumns() { + return this.rowKeyColumns; + } - @Override public List getColumnPositionsToUpdate() { - return columnPositionsToUpdate; - } + @Override + public void setRowKeyColumns(List rowKeyColumns) { + this.rowKeyColumns = rowKeyColumns; + } - @Override public void setColumnPositionsToUpdate(List columnPositionsToUpdate) { - this.columnPositionsToUpdate = columnPositionsToUpdate; - } + @Override + public DataSupplier getTestDataSupplier() { + return dataSupplier; + } - @Override public Connection getConnection() { - return connection; - } + @Override + public void setDataSupplier(DataSupplier dataSupplier) { + this.dataSupplier = dataSupplier; + } - @Override public void setConnection(Connection connection) { - this.connection = connection; - } + } + + /** + * Test SchemaBuilder defaults. + */ + public static class DDLDefaults { + public static final int MAX_ROWS = 10000; + public static final List TABLE_PK_TYPES = asList("CHAR(15)", "CHAR(3)"); + public static final List GLOBAL_VIEW_PK_TYPES = asList("CHAR(15)"); + public static final List TENANT_VIEW_PK_TYPES = asList("CHAR(15)"); + + public static final List COLUMN_TYPES = asList("VARCHAR", "VARCHAR", "VARCHAR"); + public static final List TABLE_COLUMNS = asList("COL1", "COL2", "COL3"); + public static final List GLOBAL_VIEW_COLUMNS = asList("COL4", "COL5", "COL6"); + public static final List TENANT_VIEW_COLUMNS = asList("COL7", "COL8", "COL9"); + + public static final List TABLE_COLUMN_FAMILIES = asList(null, null, null); + public static final List GLOBAL_VIEW_COLUMN_FAMILIES = asList(null, null, null); + public static final List TENANT_VIEW_COLUMN_FAMILIES = asList(null, null, null); + + public static final List TABLE_PK_COLUMNS = asList("OID", "KP"); + public static final List GLOBAL_VIEW_PK_COLUMNS = asList("ID"); + public static final List TENANT_VIEW_PK_COLUMNS = asList("ZID"); + + public static final List TABLE_INDEX_COLUMNS = asList("COL1"); + public static final List TABLE_INCLUDE_COLUMNS = asList("COL3"); + + public static final List GLOBAL_VIEW_INDEX_COLUMNS = asList("COL4"); + public static final List GLOBAL_VIEW_INCLUDE_COLUMNS = asList("COL6"); + + public static final List TENANT_VIEW_INDEX_COLUMNS = asList("COL9"); + public static final List TENANT_VIEW_INCLUDE_COLUMNS = asList("COL7"); + + public static final String DEFAULT_MUTABLE_TABLE_PROPS = + "COLUMN_ENCODED_BYTES=0,DEFAULT_COLUMN_FAMILY='Z'"; + public static final String DEFAULT_IMMUTABLE_TABLE_PROPS = "DEFAULT_COLUMN_FAMILY='Z'"; + public static final String DEFAULT_TABLE_INDEX_PROPS = ""; + public static final String DEFAULT_GLOBAL_VIEW_PROPS = ""; + public static final String DEFAULT_GLOBAL_VIEW_INDEX_PROPS = ""; + public static final String DEFAULT_TENANT_VIEW_PROPS = ""; + public static final String DEFAULT_TENANT_VIEW_INDEX_PROPS = ""; + public static final String DEFAULT_KP = "ECZ"; + public static final String DEFAULT_SCHEMA_NAME = "TEST_ENTITY"; + public static final String DEFAULT_TENANT_ID_FMT = "00D0t%04d%s"; + public static final String DEFAULT_ALT_TENANT_ID_FMT = "00T0t%04d%s"; + public static final String DEFAULT_UNIQUE_PREFIX_TABLE_NAME_FMT = "T_%s_%s"; + public static final String DEFAULT_UNIQUE_PREFIX_GLOBAL_VIEW_NAME_FMT = "GV_%s_%s"; + public static final String DEFAULT_UNIQUE_TABLE_NAME_FMT = "T_%s"; + public static final String DEFAULT_UNIQUE_GLOBAL_VIEW_NAME_FMT = "GV_%s"; + + public static final String DEFAULT_CONNECT_URL = "jdbc:phoenix:localhost"; + + } + + /** + * Schema builder for test writers to prepare various test scenarios. It can be used to define the + * following type of schemas - 1. Simple Table. 2. Table with Global and Tenant Views. 3. Table + * with Tenant Views The above entities can be supplemented with indexes (global or local) The + * builder also provides some reasonable defaults, but can be customized/overridden for specific + * test requirements. + */ + public static class SchemaBuilder { + private static final AtomicInteger TENANT_COUNTER = new AtomicInteger(0); + // variables holding the various options. + boolean tableEnabled = false; + boolean globalViewEnabled = false; + boolean tenantViewEnabled = false; + boolean tableIndexEnabled = false; + boolean globalViewIndexEnabled = false; + boolean tenantViewIndexEnabled = false; + boolean tableCreated = false; + boolean globalViewCreated = false; + boolean tenantViewCreated = false; + boolean tableIndexCreated = false; + boolean globalViewIndexCreated = false; + boolean tenantViewIndexCreated = false; + String url; + String entityKeyPrefix; + private String entityTableName; + private String entityGlobalViewName; + private String entityTenantViewName; + private String entityTableIndexName; + private String entityGlobalViewIndexName; + private String entityTenantViewIndexName; + PTable baseTable; + ConnectOptions connectOptions; + TableOptions tableOptions; + GlobalViewOptions globalViewOptions; + TenantViewOptions tenantViewOptions; + TableIndexOptions tableIndexOptions; + GlobalViewIndexOptions globalViewIndexOptions; + TenantViewIndexOptions tenantViewIndexOptions; + OtherOptions otherOptions; + DataOptions dataOptions; + + public SchemaBuilder(String url) { + this.url = url; + } - @Override public String getTargetEntity() { - return targetEntity; - } + public PTable getBaseTable() { + return baseTable; + } - @Override public void setTargetEntity(String targetEntity) { - this.targetEntity = targetEntity; - } + public void setBaseTable(PTable baseTable) { + this.baseTable = baseTable; + } - // returns the columns that is set as the pk/unique key for this data set, - @Override public List getRowKeyColumns() { - return this.rowKeyColumns; - } + public String getUrl() { + return this.url; + } - @Override public void setRowKeyColumns(List rowKeyColumns) { - this.rowKeyColumns = rowKeyColumns; - } + public boolean isTableEnabled() { + return tableEnabled; + } - @Override public DataSupplier getTestDataSupplier() { - return dataSupplier; - } + public boolean isGlobalViewEnabled() { + return globalViewEnabled; + } - @Override public void setDataSupplier(DataSupplier dataSupplier) { - this.dataSupplier = dataSupplier; - } + public boolean isTenantViewEnabled() { + return tenantViewEnabled; + } + public boolean isTableIndexEnabled() { + return tableIndexEnabled; } - /** - * Test SchemaBuilder defaults. - */ - public static class DDLDefaults { - public static final int MAX_ROWS = 10000; - public static final List TABLE_PK_TYPES = asList("CHAR(15)", "CHAR(3)"); - public static final List GLOBAL_VIEW_PK_TYPES = asList("CHAR(15)"); - public static final List TENANT_VIEW_PK_TYPES = asList("CHAR(15)"); - - public static final List COLUMN_TYPES = asList("VARCHAR", "VARCHAR", "VARCHAR"); - public static final List TABLE_COLUMNS = asList("COL1", "COL2", "COL3"); - public static final List GLOBAL_VIEW_COLUMNS = asList("COL4", "COL5", "COL6"); - public static final List TENANT_VIEW_COLUMNS = asList("COL7", "COL8", "COL9"); - - public static final List TABLE_COLUMN_FAMILIES = asList(null, null, null); - public static final List GLOBAL_VIEW_COLUMN_FAMILIES = asList(null, null, null); - public static final List TENANT_VIEW_COLUMN_FAMILIES = asList(null, null, null); - - public static final List TABLE_PK_COLUMNS = asList("OID", "KP"); - public static final List GLOBAL_VIEW_PK_COLUMNS = asList("ID"); - public static final List TENANT_VIEW_PK_COLUMNS = asList("ZID"); - - public static final List TABLE_INDEX_COLUMNS = asList("COL1"); - public static final List TABLE_INCLUDE_COLUMNS = asList("COL3"); - - public static final List GLOBAL_VIEW_INDEX_COLUMNS = asList("COL4"); - public static final List GLOBAL_VIEW_INCLUDE_COLUMNS = asList("COL6"); - - public static final List TENANT_VIEW_INDEX_COLUMNS = asList("COL9"); - public static final List TENANT_VIEW_INCLUDE_COLUMNS = asList("COL7"); - - public static final String - DEFAULT_MUTABLE_TABLE_PROPS = - "COLUMN_ENCODED_BYTES=0,DEFAULT_COLUMN_FAMILY='Z'"; - public static final String DEFAULT_IMMUTABLE_TABLE_PROPS = "DEFAULT_COLUMN_FAMILY='Z'"; - public static final String DEFAULT_TABLE_INDEX_PROPS = ""; - public static final String DEFAULT_GLOBAL_VIEW_PROPS = ""; - public static final String DEFAULT_GLOBAL_VIEW_INDEX_PROPS = ""; - public static final String DEFAULT_TENANT_VIEW_PROPS = ""; - public static final String DEFAULT_TENANT_VIEW_INDEX_PROPS = ""; - public static final String DEFAULT_KP = "ECZ"; - public static final String DEFAULT_SCHEMA_NAME = "TEST_ENTITY"; - public static final String DEFAULT_TENANT_ID_FMT = "00D0t%04d%s"; - public static final String DEFAULT_ALT_TENANT_ID_FMT = "00T0t%04d%s"; - public static final String DEFAULT_UNIQUE_PREFIX_TABLE_NAME_FMT = "T_%s_%s"; - public static final String DEFAULT_UNIQUE_PREFIX_GLOBAL_VIEW_NAME_FMT = "GV_%s_%s"; - public static final String DEFAULT_UNIQUE_TABLE_NAME_FMT = "T_%s"; - public static final String DEFAULT_UNIQUE_GLOBAL_VIEW_NAME_FMT = "GV_%s"; - - public static final String DEFAULT_CONNECT_URL = "jdbc:phoenix:localhost"; + public boolean isGlobalViewIndexEnabled() { + return globalViewIndexEnabled; + } + public boolean isTenantViewIndexEnabled() { + return tenantViewIndexEnabled; } - /** - * Schema builder for test writers to prepare various test scenarios. - * It can be used to define the following type of schemas - - * 1. Simple Table. - * 2. Table with Global and Tenant Views. - * 3. Table with Tenant Views - * The above entities can be supplemented with indexes (global or local) - * The builder also provides some reasonable defaults, but can be customized/overridden - * for specific test requirements. + /* + ***************************** Setters and Getters */ - public static class SchemaBuilder { - private static final AtomicInteger TENANT_COUNTER = new AtomicInteger(0); - // variables holding the various options. - boolean tableEnabled = false; - boolean globalViewEnabled = false; - boolean tenantViewEnabled = false; - boolean tableIndexEnabled = false; - boolean globalViewIndexEnabled = false; - boolean tenantViewIndexEnabled = false; - boolean tableCreated = false; - boolean globalViewCreated = false; - boolean tenantViewCreated = false; - boolean tableIndexCreated = false; - boolean globalViewIndexCreated = false; - boolean tenantViewIndexCreated = false; - String url; - String entityKeyPrefix; - private String entityTableName; - private String entityGlobalViewName; - private String entityTenantViewName; - private String entityTableIndexName; - private String entityGlobalViewIndexName; - private String entityTenantViewIndexName; - PTable baseTable; - ConnectOptions connectOptions; - TableOptions tableOptions; - GlobalViewOptions globalViewOptions; - TenantViewOptions tenantViewOptions; - TableIndexOptions tableIndexOptions; - GlobalViewIndexOptions globalViewIndexOptions; - TenantViewIndexOptions tenantViewIndexOptions; - OtherOptions otherOptions; - DataOptions dataOptions; - - public SchemaBuilder(String url) { - this.url = url; - } + public void setTableCreated() { + tableCreated = true; + } - public PTable getBaseTable() { - return baseTable; - } + public boolean isTableCreated() { + return tableCreated; + } - public void setBaseTable(PTable baseTable) { - this.baseTable = baseTable; - } + public boolean isGlobalViewCreated() { + return globalViewCreated; + } - public String getUrl() { - return this.url; - } + public boolean isTenantViewCreated() { + return tenantViewCreated; + } - public boolean isTableEnabled() { - return tableEnabled; - } + public boolean isTableIndexCreated() { + return tableIndexCreated; + } - public boolean isGlobalViewEnabled() { - return globalViewEnabled; - } + public boolean isGlobalViewIndexCreated() { + return globalViewIndexCreated; + } - public boolean isTenantViewEnabled() { - return tenantViewEnabled; - } + public boolean isTenantViewIndexCreated() { + return tenantViewIndexCreated; + } - public boolean isTableIndexEnabled() { - return tableIndexEnabled; - } + public String getEntityKeyPrefix() { + return entityKeyPrefix; + } - public boolean isGlobalViewIndexEnabled() { - return globalViewIndexEnabled; - } + public String getEntityTableName() { + return entityTableName; + } - public boolean isTenantViewIndexEnabled() { - return tenantViewIndexEnabled; - } + public String getEntityGlobalViewName() { + return entityGlobalViewName; + } - /* - ***************************** - * Setters and Getters - ***************************** - */ - public void setTableCreated() { - tableCreated = true; - } + public String getEntityTenantViewName() { + return entityTenantViewName; + } - public boolean isTableCreated() { - return tableCreated; - } + public String getEntityTableIndexName() { + return entityTableIndexName; + } - public boolean isGlobalViewCreated() { - return globalViewCreated; - } + public String getEntityGlobalViewIndexName() { + return entityGlobalViewIndexName; + } - public boolean isTenantViewCreated() { - return tenantViewCreated; - } + public String getEntityTenantViewIndexName() { + return entityTenantViewIndexName; + } - public boolean isTableIndexCreated() { - return tableIndexCreated; - } + public String getPhysicalTableName(boolean isNamespaceEnabled) { + return SchemaUtil + .getPhysicalTableName(Bytes.toBytes(getEntityTableName()), isNamespaceEnabled) + .getNameAsString(); + } - public boolean isGlobalViewIndexCreated() { - return globalViewIndexCreated; - } + public String getPhysicalTableIndexName(boolean isNamespaceEnabled) { + return SchemaUtil + .getPhysicalTableName(Bytes.toBytes(getEntityTableIndexName()), isNamespaceEnabled) + .getNameAsString(); + } - public boolean isTenantViewIndexCreated() { - return tenantViewIndexCreated; - } + public ConnectOptions getConnectOptions() { + return connectOptions; + } - public String getEntityKeyPrefix() { - return entityKeyPrefix; - } + public TableOptions getTableOptions() { + return tableOptions; + } - public String getEntityTableName() { - return entityTableName; - } + public GlobalViewOptions getGlobalViewOptions() { + return globalViewOptions; + } - public String getEntityGlobalViewName() { - return entityGlobalViewName; - } + public TenantViewOptions getTenantViewOptions() { + return tenantViewOptions; + } - public String getEntityTenantViewName() { - return entityTenantViewName; - } + public TableIndexOptions getTableIndexOptions() { + return tableIndexOptions; + } - public String getEntityTableIndexName() { - return entityTableIndexName; - } + public GlobalViewIndexOptions getGlobalViewIndexOptions() { + return globalViewIndexOptions; + } - public String getEntityGlobalViewIndexName() { - return entityGlobalViewIndexName; - } + public TenantViewIndexOptions getTenantViewIndexOptions() { + return tenantViewIndexOptions; + } - public String getEntityTenantViewIndexName() { - return entityTenantViewIndexName; - } + public OtherOptions getOtherOptions() { + return otherOptions; + } - public String getPhysicalTableName(boolean isNamespaceEnabled) { - return SchemaUtil.getPhysicalTableName(Bytes.toBytes(getEntityTableName()), - isNamespaceEnabled).getNameAsString(); - } + public DataOptions getDataOptions() { + return dataOptions; + } - public String getPhysicalTableIndexName(boolean isNamespaceEnabled) { - return SchemaUtil.getPhysicalTableName(Bytes.toBytes(getEntityTableIndexName()), - isNamespaceEnabled).getNameAsString(); - } + // "CREATE TABLE IF NOT EXISTS " + + // tableName + + // "(" + + // dataColumns + + // " CONSTRAINT pk PRIMARY KEY (" + pk + ") + // ) " + + // (dataProps.isEmpty() ? "" : dataProps; + public SchemaBuilder withTableDefaults() { + tableEnabled = true; + tableCreated = false; + tableOptions = TableOptions.withDefaults(); + return this; + } - public ConnectOptions getConnectOptions() { - return connectOptions; - } + // "CREATE TABLE IF NOT EXISTS " + + // tableName + + // "(" + + // dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + ") + // ) " + + // (dataProps.isEmpty() ? "" : dataProps; + public SchemaBuilder withTableOptions(TableOptions options) { + tableEnabled = true; + tableCreated = false; + tableOptions = options; + return this; + } - public TableOptions getTableOptions() { - return tableOptions; - } + // "CREATE VIEW IF NOT EXISTS " + + // globalViewName + + // AS SELECT * FROM " + tableName + " WHERE " + globalViewCondition; + public SchemaBuilder withSimpleGlobalView() { + globalViewEnabled = true; + globalViewCreated = false; + globalViewOptions = new GlobalViewOptions(); + return this; + } - public GlobalViewOptions getGlobalViewOptions() { - return globalViewOptions; - } + // "CREATE VIEW IF NOT EXISTS " + + // globalViewName + + // "(" + + // globalViewColumns + " CONSTRAINT pk PRIMARY KEY (" + globalViewPK + ") + // ) AS SELECT * FROM " + tableName + " WHERE " + globalViewCondition; + public SchemaBuilder withGlobalViewDefaults() { + globalViewEnabled = true; + globalViewCreated = false; + globalViewOptions = GlobalViewOptions.withDefaults(); + return this; + } - public TenantViewOptions getTenantViewOptions() { - return tenantViewOptions; - } + // "CREATE VIEW IF NOT EXISTS " + + // globalViewName + + // "(" + + // globalViewColumns + " CONSTRAINT pk PRIMARY KEY (" + globalViewPK + ") + // ) AS SELECT * FROM " + tableName + " WHERE " + globalViewCondition; + public SchemaBuilder withGlobalViewOptions(GlobalViewOptions options) { + globalViewEnabled = true; + globalViewCreated = false; + globalViewOptions = options; + return this; + } - public TableIndexOptions getTableIndexOptions() { - return tableIndexOptions; - } + // "CREATE VIEW IF NOT EXISTS " + tenantViewName + AS SELECT * FROM " + globalViewName; + public SchemaBuilder withSimpleTenantView() { + tenantViewEnabled = true; + tenantViewCreated = false; + tenantViewOptions = new TenantViewOptions(); + return this; + } - public GlobalViewIndexOptions getGlobalViewIndexOptions() { - return globalViewIndexOptions; - } + // "CREATE VIEW IF NOT EXISTS " + + // tenantViewName + + // "(" + + // tenantViewColumns + " CONSTRAINT pk PRIMARY KEY (" + tenantViewPK + ") + // ) AS SELECT * FROM " + globalViewName; + public SchemaBuilder withTenantViewDefaults() { + tenantViewEnabled = true; + tenantViewCreated = false; + tenantViewOptions = TenantViewOptions.withDefaults(); + return this; + } - public TenantViewIndexOptions getTenantViewIndexOptions() { - return tenantViewIndexOptions; - } + // "CREATE VIEW IF NOT EXISTS " + + // tenantViewName + + // "(" + + // tenantViewColumns + " CONSTRAINT pk PRIMARY KEY (" + tenantViewPK + ") + // ) AS SELECT * FROM " + globalViewName; + public SchemaBuilder withTenantViewOptions(TenantViewOptions options) { + tenantViewEnabled = true; + tenantViewCreated = false; + tenantViewOptions = options; + return this; + } - public OtherOptions getOtherOptions() { - return otherOptions; - } + // "CREATE INDEX IF NOT EXISTS + // "IDX_T_T000001" + // ON "TEST_ENTITY"."T_T000001"(COL1) INCLUDE (COL3)" + public SchemaBuilder withTableIndexDefaults() { + tableIndexEnabled = true; + tableIndexCreated = false; + tableIndexOptions = TableIndexOptions.withDefaults(); + return this; + } - public DataOptions getDataOptions() { - return dataOptions; - } + public SchemaBuilder withTableIndexOptions(TableIndexOptions options) { + tableIndexEnabled = true; + tableIndexCreated = false; + tableIndexOptions = options; + return this; + } - // "CREATE TABLE IF NOT EXISTS " + - // tableName + - // "(" + - // dataColumns + - // " CONSTRAINT pk PRIMARY KEY (" + pk + ") - // ) " + - // (dataProps.isEmpty() ? "" : dataProps; - public SchemaBuilder withTableDefaults() { - tableEnabled = true; - tableCreated = false; - tableOptions = TableOptions.withDefaults(); - return this; - } + public SchemaBuilder withGlobalViewIndexDefaults() { + globalViewIndexEnabled = true; + globalViewIndexCreated = false; + globalViewIndexOptions = GlobalViewIndexOptions.withDefaults(); + return this; + } - // "CREATE TABLE IF NOT EXISTS " + - // tableName + - // "(" + - // dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + ") - // ) " + - // (dataProps.isEmpty() ? "" : dataProps; - public SchemaBuilder withTableOptions(TableOptions options) { - tableEnabled = true; - tableCreated = false; - tableOptions = options; - return this; - } + public SchemaBuilder withGlobalViewIndexOptions(GlobalViewIndexOptions options) { + globalViewIndexEnabled = true; + globalViewIndexCreated = false; + globalViewIndexOptions = options; + return this; + } - // "CREATE VIEW IF NOT EXISTS " + - // globalViewName + - // AS SELECT * FROM " + tableName + " WHERE " + globalViewCondition; - public SchemaBuilder withSimpleGlobalView() { - globalViewEnabled = true; - globalViewCreated = false; - globalViewOptions = new GlobalViewOptions(); - return this; - } + public SchemaBuilder withTenantViewIndexDefaults() { + tenantViewIndexEnabled = true; + tenantViewIndexCreated = false; + tenantViewIndexOptions = TenantViewIndexOptions.withDefaults(); + return this; + } - // "CREATE VIEW IF NOT EXISTS " + - // globalViewName + - // "(" + - // globalViewColumns + " CONSTRAINT pk PRIMARY KEY (" + globalViewPK + ") - // ) AS SELECT * FROM " + tableName + " WHERE " + globalViewCondition; - public SchemaBuilder withGlobalViewDefaults() { - globalViewEnabled = true; - globalViewCreated = false; - globalViewOptions = GlobalViewOptions.withDefaults(); - return this; - } + public SchemaBuilder withTenantViewIndexOptions(TenantViewIndexOptions options) { + tenantViewIndexEnabled = true; + tenantViewIndexCreated = false; + tenantViewIndexOptions = options; + return this; + } - // "CREATE VIEW IF NOT EXISTS " + - // globalViewName + - // "(" + - // globalViewColumns + " CONSTRAINT pk PRIMARY KEY (" + globalViewPK + ") - // ) AS SELECT * FROM " + tableName + " WHERE " + globalViewCondition; - public SchemaBuilder withGlobalViewOptions(GlobalViewOptions options) { - globalViewEnabled = true; - globalViewCreated = false; - globalViewOptions = options; - return this; - } + public SchemaBuilder withOtherDefaults() { + this.otherOptions = OtherOptions.withDefaults(); + return this; + } - // "CREATE VIEW IF NOT EXISTS " + tenantViewName + AS SELECT * FROM " + globalViewName; - public SchemaBuilder withSimpleTenantView() { - tenantViewEnabled = true; - tenantViewCreated = false; - tenantViewOptions = new TenantViewOptions(); - return this; - } + public SchemaBuilder withOtherOptions(OtherOptions otherOptions) { + this.otherOptions = otherOptions; + return this; + } - // "CREATE VIEW IF NOT EXISTS " + - // tenantViewName + - // "(" + - // tenantViewColumns + " CONSTRAINT pk PRIMARY KEY (" + tenantViewPK + ") - // ) AS SELECT * FROM " + globalViewName; - public SchemaBuilder withTenantViewDefaults() { - tenantViewEnabled = true; - tenantViewCreated = false; - tenantViewOptions = TenantViewOptions.withDefaults(); - return this; - } + public SchemaBuilder withDataOptionsDefaults() { + this.dataOptions = DataOptions.withDefaults(); + return this; + } - // "CREATE VIEW IF NOT EXISTS " + - // tenantViewName + - // "(" + - // tenantViewColumns + " CONSTRAINT pk PRIMARY KEY (" + tenantViewPK + ") - // ) AS SELECT * FROM " + globalViewName; - public SchemaBuilder withTenantViewOptions(TenantViewOptions options) { - tenantViewEnabled = true; - tenantViewCreated = false; - tenantViewOptions = options; - return this; - } + public SchemaBuilder withDataOptions(DataOptions dataOptions) { + this.dataOptions = dataOptions; + return this; + } - // "CREATE INDEX IF NOT EXISTS - // "IDX_T_T000001" - // ON "TEST_ENTITY"."T_T000001"(COL1) INCLUDE (COL3)" - public SchemaBuilder withTableIndexDefaults() { - tableIndexEnabled = true; - tableIndexCreated = false; - tableIndexOptions = TableIndexOptions.withDefaults(); - return this; - } + public SchemaBuilder withConnectOptions(ConnectOptions connectOptions) { + this.connectOptions = connectOptions; + return this; + } - public SchemaBuilder withTableIndexOptions(TableIndexOptions options) { - tableIndexEnabled = true; - tableIndexCreated = false; - tableIndexOptions = options; - return this; - } + public SchemaBuilder withConnectDefaults() { + this.connectOptions = new ConnectOptions(); + return this; + } - public SchemaBuilder withGlobalViewIndexDefaults() { - globalViewIndexEnabled = true; - globalViewIndexCreated = false; - globalViewIndexOptions = GlobalViewIndexOptions.withDefaults(); - return this; - } + // Build method for creating new tenants with existing table, + // global and tenant view definitions. + // If the tenant view definition is not changed then + // the same view is created with different names for different tenants. + public void buildWithNewTenant() throws Exception { + tenantViewCreated = false; + tenantViewIndexCreated = false; + if (this.dataOptions == null) { + this.dataOptions = DataOptions.withDefaults(); + } + this.dataOptions.tenantId = + (this.dataOptions.tenantId == null || this.dataOptions.tenantId.isEmpty()) + ? String.format(dataOptions.tenantIdFormat, TENANT_COUNTER.incrementAndGet(), + dataOptions.uniqueName) + : this.dataOptions.tenantId; + + build(); + } - public SchemaBuilder withGlobalViewIndexOptions(GlobalViewIndexOptions options) { - globalViewIndexEnabled = true; - globalViewIndexCreated = false; - globalViewIndexOptions = options; - return this; - } + // Build method for creating new tenant views with existing table, + // global and tenant view definitions. + // If the tenant view definition is not changed then + // the same view is created with different names. + public void buildNewView() throws Exception { + tenantViewCreated = false; + tenantViewIndexCreated = false; + if (this.dataOptions == null) { + this.dataOptions = DataOptions.withDefaults(); + } + dataOptions.viewNumber = this.getDataOptions().getNextViewNumber(); + build(); + } - public SchemaBuilder withTenantViewIndexDefaults() { - tenantViewIndexEnabled = true; - tenantViewIndexCreated = false; - tenantViewIndexOptions = TenantViewIndexOptions.withDefaults(); - return this; - } + // The main build method for the builder. + public void build() throws Exception { + + // Set defaults if not specified + if (this.otherOptions == null) { + this.otherOptions = OtherOptions.withDefaults(); + } + + if (this.dataOptions == null) { + this.dataOptions = DataOptions.withDefaults(); + } + + if (this.connectOptions == null) { + this.connectOptions = new ConnectOptions(); + } + + if (this.globalViewOptions == null) { + this.globalViewOptions = new GlobalViewOptions(); + } + + if (this.globalViewIndexOptions == null) { + this.globalViewIndexOptions = new GlobalViewIndexOptions(); + } + + if (this.tenantViewOptions == null) { + this.tenantViewOptions = new TenantViewOptions(); + } + + if (this.tenantViewIndexOptions == null) { + this.tenantViewIndexOptions = new TenantViewIndexOptions(); + } + + if ( + connectOptions.useGlobalConnectionOnly && connectOptions.useTenantConnectionForGlobalView + ) { + throw new IllegalArgumentException( + "useTenantConnectionForGlobalView and useGlobalConnectionOnly both cannot be true"); + } + + String tableName = SchemaUtil.normalizeIdentifier(dataOptions.getTableName()); + String globalViewName = SchemaUtil.normalizeIdentifier(dataOptions.getGlobalViewName()); + String tableSchemaNameToUse = ""; + String globalViewSchemaNameToUse = globalViewOptions.getSchemaName(); + String tenantViewSchemaNameToUse = tenantViewOptions.getSchemaName(); + + // If schema name is overridden by specifying it in data options then use it. + if ((dataOptions.getSchemaName() != null) && (!dataOptions.getSchemaName().isEmpty())) { + tableSchemaNameToUse = dataOptions.getSchemaName(); + globalViewSchemaNameToUse = dataOptions.getSchemaName(); + tenantViewSchemaNameToUse = dataOptions.getSchemaName(); + } else { + tableSchemaNameToUse = tableOptions.getSchemaName(); + } + + String tableSchemaName = + tableEnabled ? SchemaUtil.normalizeIdentifier(tableSchemaNameToUse) : ""; + String globalViewSchemaName = + globalViewEnabled ? SchemaUtil.normalizeIdentifier(globalViewSchemaNameToUse) : ""; + String tenantViewSchemaName = + tenantViewEnabled ? SchemaUtil.normalizeIdentifier(tenantViewSchemaNameToUse) : ""; + entityTableName = SchemaUtil.getTableName(tableSchemaName, tableName); + entityGlobalViewName = SchemaUtil.getTableName(globalViewSchemaName, globalViewName); + + // Derive the keyPrefix to use. + entityKeyPrefix = dataOptions.getKeyPrefix() != null && !dataOptions.getKeyPrefix().isEmpty() + ? dataOptions.getKeyPrefix() + : connectOptions.useGlobalConnectionOnly + ? (String.format("Z%02d", dataOptions.getViewNumber())) + : (tenantViewEnabled && !globalViewEnabled + ? (String.format("Z%02d", dataOptions.getViewNumber())) + : DDLDefaults.DEFAULT_KP); + + String tenantViewName = + dataOptions.getTenantViewName() != null && !dataOptions.getTenantViewName().isEmpty() + ? dataOptions.getTenantViewName() + : SchemaUtil.normalizeIdentifier(entityKeyPrefix); + entityTenantViewName = SchemaUtil.getTableName(tenantViewSchemaName, tenantViewName); + String globalViewCondition = globalViewOptions.globalViewCondition != null + && !globalViewOptions.globalViewCondition.isEmpty() + ? globalViewOptions.getGlobalViewCondition() + : String.format("SELECT * FROM %s WHERE %s = '%s'", entityTableName, + tableOptions.getTablePKColumns().get(1), entityKeyPrefix); + String schemaName = SchemaUtil.getSchemaNameFromFullName(entityTableName); + + // Table and Table Index creation. + try (Connection globalConnection = getGlobalConnection()) { + if (tableEnabled && !tableCreated) { + globalConnection.createStatement().execute(buildCreateTableStmt(entityTableName)); + tableCreated = true; + PTableKey tableKey = + new PTableKey(null, SchemaUtil.normalizeFullTableName(entityTableName)); + setBaseTable(globalConnection.unwrap(PhoenixConnection.class).getTable(tableKey)); + } + // Index on Table + if (tableIndexEnabled && !tableIndexCreated) { + String indexOnTableName = SchemaUtil.normalizeIdentifier( + String.format("IDX_%s", SchemaUtil.normalizeIdentifier(tableName))); + globalConnection.createStatement() + .execute(buildCreateIndexStmt(indexOnTableName, entityTableName, + tableIndexOptions.isLocal, tableIndexOptions.tableIndexColumns, + tableIndexOptions.tableIncludeColumns, tableIndexOptions.indexProps)); + tableIndexCreated = true; + entityTableIndexName = SchemaUtil.getTableName(schemaName, indexOnTableName); + } + } + + // Global View and View Index creation. + try (Connection globalViewConnection = getGlobalViewConnection()) { + if (globalViewEnabled && !globalViewCreated) { + globalViewConnection.createStatement() + .execute(buildCreateGlobalViewStmt(entityGlobalViewName, globalViewCondition)); + globalViewCreated = true; + } + // Index on GlobalView + if (globalViewIndexEnabled && !globalViewIndexCreated) { + String indexOnGlobalViewName = + String.format("IDX_%s", SchemaUtil.normalizeIdentifier(globalViewName)); + globalViewConnection.createStatement() + .execute(buildCreateIndexStmt(indexOnGlobalViewName, entityGlobalViewName, + globalViewIndexOptions.isLocal, globalViewIndexOptions.globalViewIndexColumns, + globalViewIndexOptions.globalViewIncludeColumns, globalViewIndexOptions.indexProps)); + globalViewIndexCreated = true; + entityGlobalViewIndexName = SchemaUtil.getTableName(schemaName, indexOnGlobalViewName); + } + } + + // Tenant View and View Index creation. + try (Connection tenantConnection = getTenantConnection()) { + // Build tenant related views if any + if (tenantViewEnabled && !tenantViewCreated) { + boolean hasTenantViewCondition = tenantViewOptions.getTenantViewCondition() != null + && !tenantViewOptions.getTenantViewCondition().isEmpty(); + String tenantViewCondition; + if (globalViewEnabled) { + tenantViewCondition = hasTenantViewCondition + ? tenantViewOptions.getTenantViewCondition() + : String.format("SELECT * FROM %s", entityGlobalViewName); + } else if (tableEnabled) { + tenantViewCondition = hasTenantViewCondition + ? tenantViewOptions.getTenantViewCondition() + : String.format("SELECT * FROM %s WHERE KP = '%s'", entityTableName, entityKeyPrefix); + } else { + throw new IllegalStateException("Tenant View must be based on tables or global view"); + } + tenantConnection.createStatement() + .execute(buildCreateTenantViewStmt(entityTenantViewName, tenantViewCondition)); + tenantViewCreated = true; + } + // Index on TenantView + if (tenantViewIndexEnabled && !tenantViewIndexCreated) { + String indexOnTenantViewName = String.format("IDX_%s", entityKeyPrefix); + tenantConnection.createStatement() + .execute(buildCreateIndexStmt(indexOnTenantViewName, entityTenantViewName, + tenantViewIndexOptions.isLocal, tenantViewIndexOptions.tenantViewIndexColumns, + tenantViewIndexOptions.tenantViewIncludeColumns, tenantViewIndexOptions.indexProps)); + tenantViewIndexCreated = true; + entityTenantViewIndexName = SchemaUtil.getTableName(schemaName, indexOnTenantViewName); + } + } + } - public SchemaBuilder withTenantViewIndexOptions(TenantViewIndexOptions options) { - tenantViewIndexEnabled = true; - tenantViewIndexCreated = false; - tenantViewIndexOptions = options; - return this; - } + // Helper method for CREATE INDEX stmt builder. + private String buildCreateIndexStmt(String indexName, String onEntityName, boolean isLocal, + List indexColumns, List includeColumns, String indexProps) { + StringBuilder statement = new StringBuilder(); + statement + .append(isLocal ? "CREATE LOCAL INDEX IF NOT EXISTS " : "CREATE INDEX IF NOT EXISTS ") + .append(indexName).append(" ON ").append(onEntityName).append("(") + .append(Joiner.on(",").join(indexColumns)).append(") ") + .append( + includeColumns.isEmpty() ? "" : "INCLUDE (" + Joiner.on(",").join(includeColumns) + ") ") + .append((indexProps.isEmpty() ? "" : indexProps)); + + LOGGER.info(statement.toString()); + return statement.toString(); - public SchemaBuilder withOtherDefaults() { - this.otherOptions = OtherOptions.withDefaults(); - return this; - } + } - public SchemaBuilder withOtherOptions(OtherOptions otherOptions) { - this.otherOptions = otherOptions; - return this; - } + // Helper method for CREATE TABLE stmt builder. + private String buildCreateTableStmt(String fullTableName) { + StringBuilder statement = new StringBuilder(); + StringBuilder tableDefinition = new StringBuilder(); + + if (!tableOptions.tablePKColumns.isEmpty() || !tableOptions.tableColumns.isEmpty()) { + tableDefinition.append(("(")); + if (!tableOptions.tablePKColumns.isEmpty()) { + tableDefinition.append( + getColumnsAsString(tableOptions.tablePKColumns, tableOptions.tablePKColumnTypes, true)); + } + if (!tableOptions.tableColumns.isEmpty()) { + tableDefinition.append(tableOptions.tablePKColumns.isEmpty() ? "" : ",") + .append(getFQColumnsAsString(tableOptions.tableColumns, otherOptions.tableCFs, + tableOptions.tableColumnTypes)); + } + + if (!tableOptions.tablePKColumns.isEmpty()) { + tableDefinition.append(" CONSTRAINT pk PRIMARY KEY ").append("(") + .append( + getPKColumnsWithSort(tableOptions.tablePKColumns, tableOptions.tablePKColumnSort)) + .append(")"); + } + tableDefinition.append((")")); + } + + statement.append("CREATE TABLE IF NOT EXISTS ").append(fullTableName) + .append(tableDefinition.toString()).append(" ") + .append((tableOptions.tableProps.isEmpty() ? "" : tableOptions.tableProps)); + boolean hasAppendedTableProps = !tableOptions.tableProps.isEmpty(); + if (tableOptions.isMultiTenant()) { + statement.append(hasAppendedTableProps ? ", MULTI_TENANT=true" : "MULTI_TENANT" + "=true"); + hasAppendedTableProps = true; + } + if (tableOptions.getSaltBuckets() != null) { + String prop = SALT_BUCKETS + "=" + tableOptions.getSaltBuckets(); + statement.append(hasAppendedTableProps ? ", " + prop : prop); + hasAppendedTableProps = true; + } + if (tableOptions.isImmutable()) { + String prop = IMMUTABLE_ROWS + "=true"; + statement.append(hasAppendedTableProps ? ", " + prop : prop); + hasAppendedTableProps = true; + } + if (tableOptions.isChangeDetectionEnabled()) { + String prop = CHANGE_DETECTION_ENABLED + "=true"; + statement.append(hasAppendedTableProps ? ", " + prop : prop); + hasAppendedTableProps = true; + } + LOGGER.info(statement.toString()); + return statement.toString(); + } - public SchemaBuilder withDataOptionsDefaults() { - this.dataOptions = DataOptions.withDefaults(); - return this; - } + // Helper method for CREATE VIEW (GLOBAL) stmt builder. + private String buildCreateGlobalViewStmt(String fullGlobalViewName, + String globalViewCondition) { + StringBuilder statement = new StringBuilder(); + StringBuilder viewDefinition = new StringBuilder(); + + if ( + !globalViewOptions.globalViewPKColumns.isEmpty() + || !globalViewOptions.globalViewColumns.isEmpty() + ) { + viewDefinition.append(("(")); + if (!globalViewOptions.globalViewPKColumns.isEmpty()) { + viewDefinition.append(getColumnsAsString(globalViewOptions.globalViewPKColumns, + globalViewOptions.globalViewPKColumnTypes, true)); + } + if (!globalViewOptions.globalViewColumns.isEmpty()) { + viewDefinition.append(globalViewOptions.globalViewPKColumns.isEmpty() ? "" : ",") + .append(getFQColumnsAsString(globalViewOptions.globalViewColumns, + otherOptions.globalViewCFs, globalViewOptions.globalViewColumnTypes)); + } + + if (!globalViewOptions.globalViewPKColumns.isEmpty()) { + viewDefinition.append(" CONSTRAINT pk PRIMARY KEY ").append("(") + .append(getPKColumnsWithSort(globalViewOptions.globalViewPKColumns, + globalViewOptions.globalViewPKColumnSort)) + .append(")"); + } + viewDefinition.append((")")); + } + + statement.append("CREATE VIEW IF NOT EXISTS ").append(fullGlobalViewName) + .append(viewDefinition.toString()).append(" AS ").append(globalViewCondition).append(" ") + .append((globalViewOptions.tableProps.isEmpty() ? "" : globalViewOptions.tableProps)); + if (globalViewOptions.isChangeDetectionEnabled()) { + if (!globalViewOptions.tableProps.isEmpty()) { + statement.append(", "); + } + statement.append(CHANGE_DETECTION_ENABLED + "=true"); + } + LOGGER.info(statement.toString()); + return statement.toString(); + } - public SchemaBuilder withDataOptions(DataOptions dataOptions) { - this.dataOptions = dataOptions; - return this; - } + // Helper method for CREATE VIEW (TENANT) stmt builder. + private String buildCreateTenantViewStmt(String fullTenantViewName, + String tenantViewCondition) { + StringBuilder statement = new StringBuilder(); + StringBuilder viewDefinition = new StringBuilder(); + + if ( + !tenantViewOptions.tenantViewPKColumns.isEmpty() + || !tenantViewOptions.tenantViewColumns.isEmpty() + ) { + viewDefinition.append(("(")); + if (!tenantViewOptions.tenantViewPKColumns.isEmpty()) { + viewDefinition.append(getColumnsAsString(tenantViewOptions.tenantViewPKColumns, + tenantViewOptions.tenantViewPKColumnTypes, true)); + } + if (!tenantViewOptions.tenantViewColumns.isEmpty()) { + viewDefinition.append(tenantViewOptions.tenantViewPKColumns.isEmpty() ? "" : ",") + .append(getFQColumnsAsString(tenantViewOptions.tenantViewColumns, + otherOptions.tenantViewCFs, tenantViewOptions.tenantViewColumnTypes)); + } + + if (!tenantViewOptions.tenantViewPKColumns.isEmpty()) { + viewDefinition.append(" CONSTRAINT pk PRIMARY KEY ").append("(") + .append(getPKColumnsWithSort(tenantViewOptions.tenantViewPKColumns, + tenantViewOptions.tenantViewPKColumnSort)) + .append(")"); + } + viewDefinition.append((")")); + } + + statement.append("CREATE VIEW IF NOT EXISTS ").append(fullTenantViewName) + .append(viewDefinition.toString()).append(" AS ").append(tenantViewCondition).append(" ") + .append((tenantViewOptions.tableProps.isEmpty() ? "" : tenantViewOptions.tableProps)); + if (tenantViewOptions.isChangeDetectionEnabled()) { + if (!tenantViewOptions.tableProps.isEmpty()) { + statement.append(", "); + } + statement.append(CHANGE_DETECTION_ENABLED + "=true"); + } + LOGGER.info(statement.toString()); + return statement.toString(); + } - public SchemaBuilder withConnectOptions(ConnectOptions connectOptions) { - this.connectOptions = connectOptions; - return this; - } + Connection getGlobalConnection() throws SQLException { + return getPhoenixConnection(getUrl()); + } - public SchemaBuilder withConnectDefaults() { - this.connectOptions = new ConnectOptions(); - return this; - } + Connection getGlobalViewConnection() throws SQLException { + return getPhoenixConnection(connectOptions.useTenantConnectionForGlobalView + ? getUrl() + ';' + TENANT_ID_ATTRIB + '=' + dataOptions.getTenantId() + : getUrl()); + } - // Build method for creating new tenants with existing table, - // global and tenant view definitions. - // If the tenant view definition is not changed then - // the same view is created with different names for different tenants. - public void buildWithNewTenant() throws Exception { - tenantViewCreated = false; - tenantViewIndexCreated = false; - if (this.dataOptions == null) { - this.dataOptions = DataOptions.withDefaults(); - } - this.dataOptions.tenantId = - (this.dataOptions.tenantId == null || this.dataOptions.tenantId.isEmpty()) - ? String.format(dataOptions.tenantIdFormat, TENANT_COUNTER.incrementAndGet(), dataOptions.uniqueName) - : this.dataOptions.tenantId; + Connection getTenantConnection() throws SQLException { + return getPhoenixConnection(connectOptions.useGlobalConnectionOnly + ? getUrl() + : getUrl() + ';' + TENANT_ID_ATTRIB + '=' + dataOptions.getTenantId()); + } - build(); - } + Connection getPhoenixConnection(String url) throws SQLException { + return getPhoenixConnection(url, connectOptions.connectProps); + } - // Build method for creating new tenant views with existing table, - // global and tenant view definitions. - // If the tenant view definition is not changed then - // the same view is created with different names. - public void buildNewView() throws Exception { - tenantViewCreated = false; - tenantViewIndexCreated = false; - if (this.dataOptions == null) { - this.dataOptions = DataOptions.withDefaults(); - } - dataOptions.viewNumber = this.getDataOptions().getNextViewNumber(); - build(); - } + Connection getPhoenixConnection(String url, Properties props) throws SQLException { + Connection phoenixConnection; + if (props == null) { + Properties connProps = PropertiesUtil.deepCopy(connectOptions.connectProps); + phoenixConnection = DriverManager.getConnection(url, connProps); + } else { + phoenixConnection = DriverManager.getConnection(url, props); + } + phoenixConnection.setAutoCommit(true); + return phoenixConnection; + } - // The main build method for the builder. - public void build() throws Exception { + /** + * Option holders for various statement generation. + */ - // Set defaults if not specified - if (this.otherOptions == null) { - this.otherOptions = OtherOptions.withDefaults(); - } + // Connect options. + public static class ConnectOptions { + Properties connectProps = new Properties(); + boolean useGlobalConnectionOnly = false; + boolean useTenantConnectionForGlobalView = false; - if (this.dataOptions == null) { - this.dataOptions = DataOptions.withDefaults(); - } + /* + ***************************** Setters and Getters + */ - if (this.connectOptions == null) { - this.connectOptions = new ConnectOptions(); - } + public Properties getConnectProps() { + return connectProps; + } - if (this.globalViewOptions == null) { - this.globalViewOptions = new GlobalViewOptions(); - } - - if (this.globalViewIndexOptions == null) { - this.globalViewIndexOptions = new GlobalViewIndexOptions(); - } - - if (this.tenantViewOptions == null) { - this.tenantViewOptions = new TenantViewOptions(); - } - - if (this.tenantViewIndexOptions == null) { - this.tenantViewIndexOptions = new TenantViewIndexOptions(); - } - - if (connectOptions.useGlobalConnectionOnly - && connectOptions.useTenantConnectionForGlobalView) { - throw new IllegalArgumentException( - "useTenantConnectionForGlobalView and useGlobalConnectionOnly both cannot be true"); - } - - String tableName = SchemaUtil.normalizeIdentifier(dataOptions.getTableName()); - String globalViewName = SchemaUtil.normalizeIdentifier(dataOptions.getGlobalViewName()); - String tableSchemaNameToUse = ""; - String globalViewSchemaNameToUse = globalViewOptions.getSchemaName(); - String tenantViewSchemaNameToUse = tenantViewOptions.getSchemaName(); - - // If schema name is overridden by specifying it in data options then use it. - if ((dataOptions.getSchemaName() != null) && (!dataOptions.getSchemaName().isEmpty())) { - tableSchemaNameToUse = dataOptions.getSchemaName(); - globalViewSchemaNameToUse = dataOptions.getSchemaName(); - tenantViewSchemaNameToUse = dataOptions.getSchemaName(); - } else { - tableSchemaNameToUse = tableOptions.getSchemaName(); - } - - String - tableSchemaName = - tableEnabled ? SchemaUtil.normalizeIdentifier(tableSchemaNameToUse) : ""; - String - globalViewSchemaName = - globalViewEnabled ? - SchemaUtil.normalizeIdentifier(globalViewSchemaNameToUse) : - ""; - String - tenantViewSchemaName = - tenantViewEnabled ? - SchemaUtil.normalizeIdentifier(tenantViewSchemaNameToUse) : - ""; - entityTableName = SchemaUtil.getTableName(tableSchemaName, tableName); - entityGlobalViewName = SchemaUtil.getTableName(globalViewSchemaName, globalViewName); - - // Derive the keyPrefix to use. - entityKeyPrefix = - dataOptions.getKeyPrefix() != null && !dataOptions.getKeyPrefix().isEmpty()? - dataOptions.getKeyPrefix() : - connectOptions.useGlobalConnectionOnly ? - (String.format("Z%02d", dataOptions.getViewNumber())) : - (tenantViewEnabled && !globalViewEnabled ? - (String.format("Z%02d", dataOptions.getViewNumber())) : - DDLDefaults.DEFAULT_KP); - - String tenantViewName = - dataOptions.getTenantViewName() != null && - !dataOptions.getTenantViewName().isEmpty() ? - dataOptions.getTenantViewName() : - SchemaUtil.normalizeIdentifier(entityKeyPrefix); - entityTenantViewName = SchemaUtil.getTableName(tenantViewSchemaName, tenantViewName); - String globalViewCondition = globalViewOptions.globalViewCondition != null && - !globalViewOptions.globalViewCondition.isEmpty() ? - globalViewOptions.getGlobalViewCondition() : - String.format("SELECT * FROM %s WHERE %s = '%s'", - entityTableName, - tableOptions.getTablePKColumns().get(1), - entityKeyPrefix); - String schemaName = SchemaUtil.getSchemaNameFromFullName(entityTableName); - - // Table and Table Index creation. - try (Connection globalConnection = getGlobalConnection()) { - if (tableEnabled && !tableCreated) { - globalConnection.createStatement() - .execute(buildCreateTableStmt(entityTableName)); - tableCreated = true; - PTableKey - tableKey = - new PTableKey(null, SchemaUtil.normalizeFullTableName(entityTableName)); - setBaseTable( - globalConnection.unwrap(PhoenixConnection.class).getTable(tableKey)); - } - // Index on Table - if (tableIndexEnabled && !tableIndexCreated) { - String - indexOnTableName = - SchemaUtil.normalizeIdentifier(String.format("IDX_%s", - SchemaUtil.normalizeIdentifier(tableName))); - globalConnection.createStatement().execute( - buildCreateIndexStmt(indexOnTableName, entityTableName, - tableIndexOptions.isLocal, tableIndexOptions.tableIndexColumns, - tableIndexOptions.tableIncludeColumns, - tableIndexOptions.indexProps)); - tableIndexCreated = true; - entityTableIndexName = SchemaUtil.getTableName(schemaName, indexOnTableName); - } - } - - // Global View and View Index creation. - try (Connection globalViewConnection = getGlobalViewConnection()) { - if (globalViewEnabled && !globalViewCreated) { - globalViewConnection.createStatement().execute( - buildCreateGlobalViewStmt(entityGlobalViewName, globalViewCondition)); - globalViewCreated = true; - } - // Index on GlobalView - if (globalViewIndexEnabled && !globalViewIndexCreated) { - String - indexOnGlobalViewName = - String.format("IDX_%s", SchemaUtil.normalizeIdentifier(globalViewName)); - globalViewConnection.createStatement().execute( - buildCreateIndexStmt(indexOnGlobalViewName, entityGlobalViewName, - globalViewIndexOptions.isLocal, - globalViewIndexOptions.globalViewIndexColumns, - globalViewIndexOptions.globalViewIncludeColumns, - globalViewIndexOptions.indexProps)); - globalViewIndexCreated = true; - entityGlobalViewIndexName = - SchemaUtil.getTableName(schemaName, indexOnGlobalViewName); - } - } - - // Tenant View and View Index creation. - try (Connection tenantConnection = getTenantConnection()) { - // Build tenant related views if any - if (tenantViewEnabled && !tenantViewCreated) { - boolean hasTenantViewCondition = - tenantViewOptions.getTenantViewCondition() != null && - !tenantViewOptions.getTenantViewCondition().isEmpty(); - String tenantViewCondition; - if (globalViewEnabled) { - tenantViewCondition = hasTenantViewCondition ? - tenantViewOptions.getTenantViewCondition() : - String.format("SELECT * FROM %s", entityGlobalViewName); - } else if (tableEnabled) { - tenantViewCondition = hasTenantViewCondition ? - tenantViewOptions.getTenantViewCondition() : - String.format("SELECT * FROM %s WHERE KP = '%s'", entityTableName, - entityKeyPrefix); - } else { - throw new IllegalStateException( - "Tenant View must be based on tables or global view"); - } - tenantConnection.createStatement().execute( - buildCreateTenantViewStmt(entityTenantViewName, tenantViewCondition)); - tenantViewCreated = true; - } - // Index on TenantView - if (tenantViewIndexEnabled && !tenantViewIndexCreated) { - String indexOnTenantViewName = String.format("IDX_%s", entityKeyPrefix); - tenantConnection.createStatement().execute( - buildCreateIndexStmt(indexOnTenantViewName, entityTenantViewName, - tenantViewIndexOptions.isLocal, - tenantViewIndexOptions.tenantViewIndexColumns, - tenantViewIndexOptions.tenantViewIncludeColumns, - tenantViewIndexOptions.indexProps)); - tenantViewIndexCreated = true; - entityTenantViewIndexName = - SchemaUtil.getTableName(schemaName, indexOnTenantViewName); - } - } - } - - // Helper method for CREATE INDEX stmt builder. - private String buildCreateIndexStmt(String indexName, String onEntityName, boolean isLocal, - List indexColumns, List includeColumns, String indexProps) { - StringBuilder statement = new StringBuilder(); - statement.append(isLocal ? - "CREATE LOCAL INDEX IF NOT EXISTS " : - "CREATE INDEX IF NOT EXISTS ") - .append(indexName) - .append(" ON ") - .append(onEntityName) - .append("(") - .append(Joiner.on(",").join(indexColumns)) - .append(") ") - .append(includeColumns.isEmpty() ? - "" : - "INCLUDE (" + Joiner.on(",").join(includeColumns) + ") ") - .append((indexProps.isEmpty() ? "" : indexProps)); - - LOGGER.info(statement.toString()); - return statement.toString(); - - } - - // Helper method for CREATE TABLE stmt builder. - private String buildCreateTableStmt(String fullTableName) { - StringBuilder statement = new StringBuilder(); - StringBuilder tableDefinition = new StringBuilder(); - - if (!tableOptions.tablePKColumns.isEmpty() || !tableOptions.tableColumns.isEmpty()) { - tableDefinition.append(("(")); - if (!tableOptions.tablePKColumns.isEmpty()) { - tableDefinition.append(getColumnsAsString(tableOptions.tablePKColumns, - tableOptions.tablePKColumnTypes, true)); - } - if (!tableOptions.tableColumns.isEmpty()) { - tableDefinition.append(tableOptions.tablePKColumns.isEmpty() ? "" : ",") - .append(getFQColumnsAsString(tableOptions.tableColumns, - otherOptions.tableCFs, tableOptions.tableColumnTypes)); - } - - if (!tableOptions.tablePKColumns.isEmpty()) { - tableDefinition.append(" CONSTRAINT pk PRIMARY KEY ").append("(") - .append(getPKColumnsWithSort(tableOptions.tablePKColumns, - tableOptions.tablePKColumnSort)).append(")"); - } - tableDefinition.append((")")); - } - - - statement.append("CREATE TABLE IF NOT EXISTS ").append(fullTableName) - .append(tableDefinition.toString()).append(" ") - .append((tableOptions.tableProps.isEmpty() ? "" : tableOptions.tableProps)); - boolean hasAppendedTableProps = !tableOptions.tableProps.isEmpty(); - if (tableOptions.isMultiTenant()) { - statement.append(hasAppendedTableProps ? ", MULTI_TENANT=true" : "MULTI_TENANT" + - "=true"); - hasAppendedTableProps = true; - } - if (tableOptions.getSaltBuckets() != null) { - String prop = SALT_BUCKETS +"=" + tableOptions.getSaltBuckets(); - statement.append(hasAppendedTableProps ? ", " + prop : prop); - hasAppendedTableProps = true; - } - if (tableOptions.isImmutable()) { - String prop = IMMUTABLE_ROWS + "=true"; - statement.append(hasAppendedTableProps ? ", " + prop : prop); - hasAppendedTableProps = true; - } - if (tableOptions.isChangeDetectionEnabled()) { - String prop = CHANGE_DETECTION_ENABLED + "=true"; - statement.append(hasAppendedTableProps ? ", " + prop : prop); - hasAppendedTableProps = true; - } - LOGGER.info(statement.toString()); - return statement.toString(); - } - - // Helper method for CREATE VIEW (GLOBAL) stmt builder. - private String buildCreateGlobalViewStmt(String fullGlobalViewName, String globalViewCondition) { - StringBuilder statement = new StringBuilder(); - StringBuilder viewDefinition = new StringBuilder(); - - if (!globalViewOptions.globalViewPKColumns.isEmpty() - || !globalViewOptions.globalViewColumns.isEmpty()) { - viewDefinition.append(("(")); - if (!globalViewOptions.globalViewPKColumns.isEmpty()) { - viewDefinition.append(getColumnsAsString(globalViewOptions.globalViewPKColumns, - globalViewOptions.globalViewPKColumnTypes, true)); - } - if (!globalViewOptions.globalViewColumns.isEmpty()) { - viewDefinition - .append(globalViewOptions.globalViewPKColumns.isEmpty() ? "" : ",") - .append(getFQColumnsAsString(globalViewOptions.globalViewColumns, - otherOptions.globalViewCFs, - globalViewOptions.globalViewColumnTypes)); - } - - if (!globalViewOptions.globalViewPKColumns.isEmpty()) { - viewDefinition.append(" CONSTRAINT pk PRIMARY KEY ").append("(") - .append(getPKColumnsWithSort(globalViewOptions.globalViewPKColumns, - globalViewOptions.globalViewPKColumnSort)).append(")"); - } - viewDefinition.append((")")); - } - - statement.append("CREATE VIEW IF NOT EXISTS ").append(fullGlobalViewName) - .append(viewDefinition.toString()).append(" AS ") - .append(globalViewCondition).append(" ") - .append((globalViewOptions.tableProps.isEmpty() ? - "" : - globalViewOptions.tableProps)); - if (globalViewOptions.isChangeDetectionEnabled()) { - if (!globalViewOptions.tableProps.isEmpty()) { - statement.append(", "); - } - statement.append(CHANGE_DETECTION_ENABLED + "=true"); - } - LOGGER.info(statement.toString()); - return statement.toString(); - } - - // Helper method for CREATE VIEW (TENANT) stmt builder. - private String buildCreateTenantViewStmt(String fullTenantViewName, - String tenantViewCondition) { - StringBuilder statement = new StringBuilder(); - StringBuilder viewDefinition = new StringBuilder(); - - if (!tenantViewOptions.tenantViewPKColumns.isEmpty() - || !tenantViewOptions.tenantViewColumns.isEmpty()) { - viewDefinition.append(("(")); - if (!tenantViewOptions.tenantViewPKColumns.isEmpty()) { - viewDefinition.append(getColumnsAsString(tenantViewOptions.tenantViewPKColumns, - tenantViewOptions.tenantViewPKColumnTypes, true)); - } - if (!tenantViewOptions.tenantViewColumns.isEmpty()) { - viewDefinition - .append(tenantViewOptions.tenantViewPKColumns.isEmpty() ? "" : ",") - .append(getFQColumnsAsString(tenantViewOptions.tenantViewColumns, - otherOptions.tenantViewCFs, - tenantViewOptions.tenantViewColumnTypes)); - } - - if (!tenantViewOptions.tenantViewPKColumns.isEmpty()) { - viewDefinition.append(" CONSTRAINT pk PRIMARY KEY ").append("(") - .append(getPKColumnsWithSort(tenantViewOptions.tenantViewPKColumns, - tenantViewOptions.tenantViewPKColumnSort)).append(")"); - } - viewDefinition.append((")")); - } - - statement.append("CREATE VIEW IF NOT EXISTS ").append(fullTenantViewName) - .append(viewDefinition.toString()).append(" AS ").append(tenantViewCondition) - .append(" ").append((tenantViewOptions.tableProps.isEmpty() ? - "" : - tenantViewOptions.tableProps)); - if (tenantViewOptions.isChangeDetectionEnabled()) { - if (!tenantViewOptions.tableProps.isEmpty()) { - statement.append(", "); - } - statement.append(CHANGE_DETECTION_ENABLED + "=true"); - } - LOGGER.info(statement.toString()); - return statement.toString(); - } + public void setConnectProps(Properties connectProps) { + this.connectProps = connectProps; + } - Connection getGlobalConnection() throws SQLException { - return getPhoenixConnection(getUrl()); - } - - Connection getGlobalViewConnection() throws SQLException { - return getPhoenixConnection(connectOptions.useTenantConnectionForGlobalView ? - getUrl() + ';' + TENANT_ID_ATTRIB + '=' + dataOptions.getTenantId() : - getUrl()); - } - - Connection getTenantConnection() throws SQLException { - return getPhoenixConnection(connectOptions.useGlobalConnectionOnly ? - getUrl() : - getUrl() + ';' + TENANT_ID_ATTRIB + '=' + dataOptions.getTenantId()); - } - - Connection getPhoenixConnection(String url) throws SQLException { - return getPhoenixConnection(url, connectOptions.connectProps); - } - - Connection getPhoenixConnection(String url, Properties props) throws SQLException { - Connection phoenixConnection; - if (props == null) { - Properties connProps = PropertiesUtil.deepCopy(connectOptions.connectProps); - phoenixConnection = DriverManager.getConnection(url, connProps); - } else { - phoenixConnection = DriverManager.getConnection(url, props); - } - phoenixConnection.setAutoCommit(true); - return phoenixConnection; - } - - /** - * Option holders for various statement generation. - */ - - // Connect options. - public static class ConnectOptions { - Properties connectProps = new Properties(); - boolean useGlobalConnectionOnly = false; - boolean useTenantConnectionForGlobalView = false; - - /* - ***************************** - * Setters and Getters - ***************************** - */ - - public Properties getConnectProps() { - return connectProps; - } - - public void setConnectProps(Properties connectProps) { - this.connectProps = connectProps; - } - - public boolean isUseGlobalConnectionOnly() { - return useGlobalConnectionOnly; - } - - public void setUseGlobalConnectionOnly(boolean useGlobalConnectionOnly) { - this.useGlobalConnectionOnly = useGlobalConnectionOnly; - } - - public boolean isUseTenantConnectionForGlobalView() { - return useTenantConnectionForGlobalView; - } - - public void setUseTenantConnectionForGlobalView( - boolean useTenantConnectionForGlobalView) { - this.useTenantConnectionForGlobalView = useTenantConnectionForGlobalView; - } - - } - - // Table statement generation. - public static class TableOptions { - String schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; - List tableColumns = Lists.newArrayList(); - List tableColumnTypes = Lists.newArrayList(); - List tablePKColumns = Lists.newArrayList(); - List tablePKColumnTypes = Lists.newArrayList(); - List tablePKColumnSort; - String tableProps = DDLDefaults.DEFAULT_MUTABLE_TABLE_PROPS; - boolean isMultiTenant = true; - Integer saltBuckets = null; - boolean isImmutable = false; - boolean isChangeDetectionEnabled = false; - - /* - ***************************** - * Setters and Getters - ***************************** - */ - - public static TableOptions withDefaults() { - TableOptions options = new TableOptions(); - options.schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; - options.tableColumns = Lists.newArrayList(DDLDefaults.TABLE_COLUMNS); - options.tableColumnTypes = Lists.newArrayList(DDLDefaults.COLUMN_TYPES); - options.tablePKColumns = Lists.newArrayList(DDLDefaults.TABLE_PK_COLUMNS); - options.tablePKColumnTypes = Lists.newArrayList(DDLDefaults.TABLE_PK_TYPES); - options.tableProps = DDLDefaults.DEFAULT_MUTABLE_TABLE_PROPS; - return options; - } - - public String getSchemaName() { - return schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public List getTableColumns() { - return tableColumns; - } + public boolean isUseGlobalConnectionOnly() { + return useGlobalConnectionOnly; + } - public void setTableColumns(List tableColumns) { - this.tableColumns = tableColumns; - } + public void setUseGlobalConnectionOnly(boolean useGlobalConnectionOnly) { + this.useGlobalConnectionOnly = useGlobalConnectionOnly; + } - public List getTableColumnTypes() { - return tableColumnTypes; - } + public boolean isUseTenantConnectionForGlobalView() { + return useTenantConnectionForGlobalView; + } - public void setTableColumnTypes(List tableColumnTypes) { - this.tableColumnTypes = tableColumnTypes; - } - - public List getTablePKColumns() { - return tablePKColumns; - } - - public void setTablePKColumns(List tablePKColumns) { - this.tablePKColumns = tablePKColumns; - } - - public List getTablePKColumnTypes() { - return tablePKColumnTypes; - } - - public void setTablePKColumnTypes(List tablePKColumnTypes) { - this.tablePKColumnTypes = tablePKColumnTypes; - } - - public List getTablePKColumnSort() { - return tablePKColumnSort; - } - - public void setTablePKColumnSort(List tablePKColumnSort) { - this.tablePKColumnSort = tablePKColumnSort; - } - - public String getTableProps() { - return tableProps; - } - - public void setTableProps(String tableProps) { - this.tableProps = tableProps; - } - - public boolean isMultiTenant() { - return this.isMultiTenant; - } - - public void setMultiTenant(boolean isMultiTenant) { - this.isMultiTenant = isMultiTenant; - } - - public Integer getSaltBuckets() { - return this.saltBuckets; - } - public void setSaltBuckets(Integer saltBuckets) { - this.saltBuckets = saltBuckets; - } - - public boolean isImmutable() { - return isImmutable; - } - - public void setImmutable(boolean immutable) { - isImmutable = immutable; - //default props includes a column encoding not supported in immutable tables - if (this.tableProps.equals(DDLDefaults.DEFAULT_MUTABLE_TABLE_PROPS)) { - this.tableProps = DDLDefaults.DEFAULT_IMMUTABLE_TABLE_PROPS; - } - } - - public boolean isChangeDetectionEnabled() { - return isChangeDetectionEnabled; - } - - public void setChangeDetectionEnabled(boolean changeDetectionEnabled) { - isChangeDetectionEnabled = changeDetectionEnabled; - } - } - - // Global View statement generation. - public static class GlobalViewOptions { - String schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; - List globalViewColumns = Lists.newArrayList(); - List globalViewColumnTypes = Lists.newArrayList(); - List globalViewPKColumns = Lists.newArrayList(); - List globalViewPKColumnTypes = Lists.newArrayList(); - List globalViewPKColumnSort; - String tableProps = DDLDefaults.DEFAULT_TENANT_VIEW_PROPS; - String globalViewCondition; - boolean isChangeDetectionEnabled = false; - - /* - ***************************** - * Setters and Getters - ***************************** - */ - - public static GlobalViewOptions withDefaults() { - GlobalViewOptions options = new GlobalViewOptions(); - options.schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; - options.globalViewColumns = Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_COLUMNS); - options.globalViewColumnTypes = Lists.newArrayList(DDLDefaults.COLUMN_TYPES); - options.globalViewPKColumns = - Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_PK_COLUMNS); - options.globalViewPKColumnTypes = - Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_PK_TYPES); - options.tableProps = DDLDefaults.DEFAULT_GLOBAL_VIEW_PROPS; - options.globalViewCondition = ""; - return options; - } - - public String getSchemaName() { - return schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public List getGlobalViewColumns() { - return globalViewColumns; - } - - public void setGlobalViewColumns(List globalViewColumns) { - this.globalViewColumns = globalViewColumns; - } - - public List getGlobalViewColumnTypes() { - return globalViewColumnTypes; - } - - public void setGlobalViewColumnTypes(List globalViewColumnTypes) { - this.globalViewColumnTypes = globalViewColumnTypes; - } - - public List getGlobalViewPKColumns() { - return globalViewPKColumns; - } - - public void setGlobalViewPKColumns(List globalViewPKColumns) { - this.globalViewPKColumns = globalViewPKColumns; - } - - public List getGlobalViewPKColumnTypes() { - return globalViewPKColumnTypes; - } - - public void setGlobalViewPKColumnTypes(List globalViewPKColumnTypes) { - this.globalViewPKColumnTypes = globalViewPKColumnTypes; - } - - public List getGlobalViewPKColumnSort() { - return globalViewPKColumnSort; - } - - public void setGlobalViewPKColumnSort(List globalViewPKColumnSort) { - this.globalViewPKColumnSort = globalViewPKColumnSort; - } - - public String getTableProps() { - return tableProps; - } - - public void setTableProps(String tableProps) { - this.tableProps = tableProps; - } - - public String getGlobalViewCondition() { - return globalViewCondition; - } - - public void setGlobalViewCondition(String globalViewCondition) { - this.globalViewCondition = globalViewCondition; - } - - public boolean isChangeDetectionEnabled() { - return isChangeDetectionEnabled; - } - - public void setChangeDetectionEnabled(boolean changeDetectionEnabled) { - this.isChangeDetectionEnabled = changeDetectionEnabled; - } - } - - // Tenant View statement generation. - public static class TenantViewOptions { - String schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; - List tenantViewColumns = Lists.newArrayList(); - List tenantViewColumnTypes = Lists.newArrayList(); - List tenantViewPKColumns = Lists.newArrayList(); - List tenantViewPKColumnTypes = Lists.newArrayList(); - List tenantViewPKColumnSort; - String tenantViewCondition; - String tableProps = DDLDefaults.DEFAULT_TENANT_VIEW_PROPS; - boolean isChangeDetectionEnabled = false; - - /* - ***************************** - * Setters and Getters - ***************************** - */ - - public static TenantViewOptions withDefaults() { - TenantViewOptions options = new TenantViewOptions(); - options.schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; - options.tenantViewColumns = Lists.newArrayList(DDLDefaults.TENANT_VIEW_COLUMNS); - options.tenantViewColumnTypes = Lists.newArrayList(DDLDefaults.COLUMN_TYPES); - options.tenantViewPKColumns = - Lists.newArrayList(DDLDefaults.TENANT_VIEW_PK_COLUMNS); - options.tenantViewPKColumnTypes = - Lists.newArrayList(DDLDefaults.TENANT_VIEW_PK_TYPES); - options.tableProps = DDLDefaults.DEFAULT_TENANT_VIEW_PROPS; - options.tenantViewCondition = ""; - return options; - } - - public String getSchemaName() { - return schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public List getTenantViewColumns() { - return tenantViewColumns; - } - - public void setTenantViewColumns(List tenantViewColumns) { - this.tenantViewColumns = tenantViewColumns; - } + public void setUseTenantConnectionForGlobalView(boolean useTenantConnectionForGlobalView) { + this.useTenantConnectionForGlobalView = useTenantConnectionForGlobalView; + } - public List getTenantViewColumnTypes() { - return tenantViewColumnTypes; - } - - public void setTenantViewColumnTypes(List tenantViewColumnTypes) { - this.tenantViewColumnTypes = tenantViewColumnTypes; - } - - public List getTenantViewPKColumns() { - return tenantViewPKColumns; - } - - public void setTenantViewPKColumns(List tenantViewPKColumns) { - this.tenantViewPKColumns = tenantViewPKColumns; - } - - public List getTenantViewPKColumnTypes() { - return tenantViewPKColumnTypes; - } - - public void setTenantViewPKColumnTypes(List tenantViewPKColumnTypes) { - this.tenantViewPKColumnTypes = tenantViewPKColumnTypes; - } - - public List getTenantViewPKColumnSort() { - return tenantViewPKColumnSort; - } - - public void setTenantViewPKColumnSort(List tenantViewPKColumnSort) { - this.tenantViewPKColumnSort = tenantViewPKColumnSort; - } - - public String getTableProps() { - return tableProps; - } - - public void setTableProps(String tableProps) { - this.tableProps = tableProps; - } - - public String getTenantViewCondition() { - return tenantViewCondition; - } - public void setTenantViewCondition(String tenantViewCondition) { - this.tenantViewCondition = tenantViewCondition; - } - - public boolean isChangeDetectionEnabled() { - return isChangeDetectionEnabled; - } - - public void setChangeDetectionEnabled(boolean changeDetectionEnabled) { - this.isChangeDetectionEnabled = changeDetectionEnabled; - } - } - - // Table Index statement generation. - public static class TableIndexOptions { - List tableIndexColumns = Lists.newArrayList(); - List tableIncludeColumns = Lists.newArrayList(); - boolean isLocal = false; - String indexProps = ""; - - /* - ***************************** - * Setters and Getters - ***************************** - */ - - public static TableIndexOptions withDefaults() { - TableIndexOptions options = new TableIndexOptions(); - options.tableIndexColumns = Lists.newArrayList(DDLDefaults.TABLE_INDEX_COLUMNS); - options.tableIncludeColumns = Lists.newArrayList(DDLDefaults.TABLE_INCLUDE_COLUMNS); - options.indexProps = DDLDefaults.DEFAULT_TABLE_INDEX_PROPS; - return options; - } - - public List getTableIndexColumns() { - return tableIndexColumns; - } - - public void setTableIndexColumns(List tableIndexColumns) { - this.tableIndexColumns = tableIndexColumns; - } - - public List getTableIncludeColumns() { - return tableIncludeColumns; - } - - public void setTableIncludeColumns(List tableIncludeColumns) { - this.tableIncludeColumns = tableIncludeColumns; - } - - public boolean isLocal() { - return isLocal; - } - - public void setLocal(boolean local) { - isLocal = local; - } - - public String getIndexProps() { - return indexProps; - } - - public void setIndexProps(String indexProps) { - this.indexProps = indexProps; - } - } - - // Global View Index statement generation. - public static class GlobalViewIndexOptions { - List globalViewIndexColumns = Lists.newArrayList(); - List globalViewIncludeColumns = Lists.newArrayList(); - boolean isLocal = false; - String indexProps = ""; - - /* - ***************************** - * Setters and Getters - ***************************** - */ - - public static GlobalViewIndexOptions withDefaults() { - GlobalViewIndexOptions options = new GlobalViewIndexOptions(); - options.globalViewIndexColumns = - Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_INDEX_COLUMNS); - options.globalViewIncludeColumns = - Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_INCLUDE_COLUMNS); - options.indexProps = DDLDefaults.DEFAULT_GLOBAL_VIEW_INDEX_PROPS; - return options; - } - - public List getGlobalViewIndexColumns() { - return globalViewIndexColumns; - } - - public void setGlobalViewIndexColumns(List globalViewIndexColumns) { - this.globalViewIndexColumns = globalViewIndexColumns; - } - - public List getGlobalViewIncludeColumns() { - return globalViewIncludeColumns; - } - - public void setGlobalViewIncludeColumns(List globalViewIncludeColumns) { - this.globalViewIncludeColumns = globalViewIncludeColumns; - } - - public boolean isLocal() { - return isLocal; - } - - public void setLocal(boolean local) { - isLocal = local; - } - - public String getIndexProps() { - return indexProps; - } - - public void setIndexProps(String indexProps) { - this.indexProps = indexProps; - } - } - - // Tenant View Index statement generation. - public static class TenantViewIndexOptions { - List tenantViewIndexColumns = Lists.newArrayList(); - List tenantViewIncludeColumns = Lists.newArrayList(); - boolean isLocal = false; - String indexProps = ""; - - /* - ***************************** - * Setters and Getters - ***************************** - */ - - public static TenantViewIndexOptions withDefaults() { - TenantViewIndexOptions options = new TenantViewIndexOptions(); - options.tenantViewIndexColumns = - Lists.newArrayList(DDLDefaults.TENANT_VIEW_INDEX_COLUMNS); - options.tenantViewIncludeColumns = - Lists.newArrayList(DDLDefaults.TENANT_VIEW_INCLUDE_COLUMNS); - options.indexProps = DDLDefaults.DEFAULT_TENANT_VIEW_INDEX_PROPS; - return options; - } - - public List getTenantViewIndexColumns() { - return tenantViewIndexColumns; - } - - public void setTenantViewIndexColumns(List tenantViewIndexColumns) { - this.tenantViewIndexColumns = tenantViewIndexColumns; - } - - public List getTenantViewIncludeColumns() { - return tenantViewIncludeColumns; - } - - public void setTenantViewIncludeColumns(List tenantViewIncludeColumns) { - this.tenantViewIncludeColumns = tenantViewIncludeColumns; - } - - public boolean isLocal() { - return isLocal; - } - - public void setLocal(boolean local) { - isLocal = local; - } - - public String getIndexProps() { - return indexProps; - } - - public void setIndexProps(String indexProps) { - this.indexProps = indexProps; - } - } - - public static class OtherOptions { - String testName; - List tableCFs = Lists.newArrayList(); - List globalViewCFs = Lists.newArrayList(); - List tenantViewCFs = Lists.newArrayList(); - - /* - ***************************** - * Setters and Getters - ***************************** - */ - - public static OtherOptions withDefaults() { - OtherOptions options = new OtherOptions(); - options.tableCFs = Lists.newArrayList(DDLDefaults.TABLE_COLUMN_FAMILIES); - options.globalViewCFs = Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_COLUMN_FAMILIES); - options.tenantViewCFs = Lists.newArrayList(DDLDefaults.TENANT_VIEW_COLUMN_FAMILIES); - return options; - } - - public String getTestName() { - return testName; - } - - public void setTestName(String testName) { - this.testName = testName; - } - - public List getTableCFs() { - return tableCFs; - } - - public void setTableCFs(List tableCFs) { - this.tableCFs = tableCFs; - } - - public List getGlobalViewCFs() { - return globalViewCFs; - } - - public void setGlobalViewCFs(List globalViewCFs) { - this.globalViewCFs = globalViewCFs; - } - - public List getTenantViewCFs() { - return tenantViewCFs; - } - - public void setTenantViewCFs(List tenantViewCFs) { - this.tenantViewCFs = tenantViewCFs; - } - } - - public static class DataOptions { - String uniqueName = ""; - String uniqueNamePrefix = ""; - String tenantIdFormat = DDLDefaults.DEFAULT_TENANT_ID_FMT; - String keyPrefix = ""; - int viewNumber = 0; - AtomicInteger viewCounter = new AtomicInteger(0); - String tenantId = ""; - String schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; - String tableName = ""; - String globalViewName = ""; - String tenantViewName = ""; - - /* - ***************************** - * Setters and Getters - ***************************** - */ - public static DataOptions withPrefix(String prefix) { - DataOptions options = new DataOptions(); - options.uniqueNamePrefix = prefix; - options.uniqueName = generateUniqueName().substring(1); - options.viewCounter = new AtomicInteger(0); -// options.tenantId = -// String.format(options.tenantIdFormat, TENANT_COUNTER.get(), -// options.uniqueName); - options.tableName = - String.format(DDLDefaults.DEFAULT_UNIQUE_PREFIX_TABLE_NAME_FMT, - options.uniqueNamePrefix, - options.uniqueName); - - options.globalViewName = - String.format(DDLDefaults.DEFAULT_UNIQUE_PREFIX_GLOBAL_VIEW_NAME_FMT, - options.uniqueNamePrefix, - options.uniqueName); - return options; - } - - public static DataOptions withDefaults() { - DataOptions options = new DataOptions(); - options.uniqueName = generateUniqueName().substring(1); - options.viewCounter = new AtomicInteger(0); - options.tableName = - String.format(DDLDefaults.DEFAULT_UNIQUE_TABLE_NAME_FMT, - options.uniqueName); - - options.globalViewName = - String.format(DDLDefaults.DEFAULT_UNIQUE_GLOBAL_VIEW_NAME_FMT, - options.uniqueName); - return options; - } - - public int getNextViewNumber() { - return viewNumber = viewCounter.incrementAndGet(); - } - - public String getNextTenantId() { - return tenantId = String.format( - tenantIdFormat, TENANT_COUNTER.incrementAndGet(), uniqueName); - } - - public int getTenantNumber() { - return TENANT_COUNTER.get(); - } - - public int getNextTenantNumber() { - return TENANT_COUNTER.incrementAndGet(); - } - - public int getViewNumber() { - return viewNumber; - } - - public String getTenantIdFormat() { - return tenantIdFormat; - } - - public void setTenantIdFormat(String tenantIdFormat) { - this.tenantIdFormat = tenantIdFormat; - } - - public String getUniqueName() { - return uniqueName; - } - - public void setUniqueName(String uniqueName) { - this.uniqueName = uniqueName; - } - - public String getTenantId() { - if (tenantId == null || tenantId.isEmpty()) { - return getNextTenantId(); - } - return tenantId; - } - - public void setTenantId(String tenantId) { - this.tenantId = tenantId; - } - - public String getUniqueNamePrefix() { - return uniqueNamePrefix; - } - - public void setUniqueNamePrefix(String uniqueNamePrefix) { - this.uniqueNamePrefix = uniqueNamePrefix; - } - - public String getKeyPrefix() { - return keyPrefix; - } - - public void setKeyPrefix(String keyPrefix) { - this.keyPrefix = keyPrefix; - } - - public void setViewNumber(int viewNumber) { - this.viewNumber = viewNumber; - } + } - public AtomicInteger getViewCounter() { - return viewCounter; - } + // Table statement generation. + public static class TableOptions { + String schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; + List tableColumns = Lists.newArrayList(); + List tableColumnTypes = Lists.newArrayList(); + List tablePKColumns = Lists.newArrayList(); + List tablePKColumnTypes = Lists.newArrayList(); + List tablePKColumnSort; + String tableProps = DDLDefaults.DEFAULT_MUTABLE_TABLE_PROPS; + boolean isMultiTenant = true; + Integer saltBuckets = null; + boolean isImmutable = false; + boolean isChangeDetectionEnabled = false; + + /* + ***************************** Setters and Getters + */ + + public static TableOptions withDefaults() { + TableOptions options = new TableOptions(); + options.schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; + options.tableColumns = Lists.newArrayList(DDLDefaults.TABLE_COLUMNS); + options.tableColumnTypes = Lists.newArrayList(DDLDefaults.COLUMN_TYPES); + options.tablePKColumns = Lists.newArrayList(DDLDefaults.TABLE_PK_COLUMNS); + options.tablePKColumnTypes = Lists.newArrayList(DDLDefaults.TABLE_PK_TYPES); + options.tableProps = DDLDefaults.DEFAULT_MUTABLE_TABLE_PROPS; + return options; + } + + public String getSchemaName() { + return schemaName; + } + + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } + + public List getTableColumns() { + return tableColumns; + } + + public void setTableColumns(List tableColumns) { + this.tableColumns = tableColumns; + } + + public List getTableColumnTypes() { + return tableColumnTypes; + } + + public void setTableColumnTypes(List tableColumnTypes) { + this.tableColumnTypes = tableColumnTypes; + } + + public List getTablePKColumns() { + return tablePKColumns; + } + + public void setTablePKColumns(List tablePKColumns) { + this.tablePKColumns = tablePKColumns; + } + + public List getTablePKColumnTypes() { + return tablePKColumnTypes; + } + + public void setTablePKColumnTypes(List tablePKColumnTypes) { + this.tablePKColumnTypes = tablePKColumnTypes; + } + + public List getTablePKColumnSort() { + return tablePKColumnSort; + } + + public void setTablePKColumnSort(List tablePKColumnSort) { + this.tablePKColumnSort = tablePKColumnSort; + } + + public String getTableProps() { + return tableProps; + } + + public void setTableProps(String tableProps) { + this.tableProps = tableProps; + } + + public boolean isMultiTenant() { + return this.isMultiTenant; + } + + public void setMultiTenant(boolean isMultiTenant) { + this.isMultiTenant = isMultiTenant; + } + + public Integer getSaltBuckets() { + return this.saltBuckets; + } + + public void setSaltBuckets(Integer saltBuckets) { + this.saltBuckets = saltBuckets; + } + + public boolean isImmutable() { + return isImmutable; + } + + public void setImmutable(boolean immutable) { + isImmutable = immutable; + // default props includes a column encoding not supported in immutable tables + if (this.tableProps.equals(DDLDefaults.DEFAULT_MUTABLE_TABLE_PROPS)) { + this.tableProps = DDLDefaults.DEFAULT_IMMUTABLE_TABLE_PROPS; + } + } + + public boolean isChangeDetectionEnabled() { + return isChangeDetectionEnabled; + } + + public void setChangeDetectionEnabled(boolean changeDetectionEnabled) { + isChangeDetectionEnabled = changeDetectionEnabled; + } + } - public void setViewCounter(AtomicInteger viewCounter) { - this.viewCounter = viewCounter; - } + // Global View statement generation. + public static class GlobalViewOptions { + String schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; + List globalViewColumns = Lists.newArrayList(); + List globalViewColumnTypes = Lists.newArrayList(); + List globalViewPKColumns = Lists.newArrayList(); + List globalViewPKColumnTypes = Lists.newArrayList(); + List globalViewPKColumnSort; + String tableProps = DDLDefaults.DEFAULT_TENANT_VIEW_PROPS; + String globalViewCondition; + boolean isChangeDetectionEnabled = false; + + /* + ***************************** Setters and Getters + */ + + public static GlobalViewOptions withDefaults() { + GlobalViewOptions options = new GlobalViewOptions(); + options.schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; + options.globalViewColumns = Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_COLUMNS); + options.globalViewColumnTypes = Lists.newArrayList(DDLDefaults.COLUMN_TYPES); + options.globalViewPKColumns = Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_PK_COLUMNS); + options.globalViewPKColumnTypes = Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_PK_TYPES); + options.tableProps = DDLDefaults.DEFAULT_GLOBAL_VIEW_PROPS; + options.globalViewCondition = ""; + return options; + } + + public String getSchemaName() { + return schemaName; + } + + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } + + public List getGlobalViewColumns() { + return globalViewColumns; + } + + public void setGlobalViewColumns(List globalViewColumns) { + this.globalViewColumns = globalViewColumns; + } + + public List getGlobalViewColumnTypes() { + return globalViewColumnTypes; + } + + public void setGlobalViewColumnTypes(List globalViewColumnTypes) { + this.globalViewColumnTypes = globalViewColumnTypes; + } + + public List getGlobalViewPKColumns() { + return globalViewPKColumns; + } + + public void setGlobalViewPKColumns(List globalViewPKColumns) { + this.globalViewPKColumns = globalViewPKColumns; + } + + public List getGlobalViewPKColumnTypes() { + return globalViewPKColumnTypes; + } + + public void setGlobalViewPKColumnTypes(List globalViewPKColumnTypes) { + this.globalViewPKColumnTypes = globalViewPKColumnTypes; + } + + public List getGlobalViewPKColumnSort() { + return globalViewPKColumnSort; + } + + public void setGlobalViewPKColumnSort(List globalViewPKColumnSort) { + this.globalViewPKColumnSort = globalViewPKColumnSort; + } + + public String getTableProps() { + return tableProps; + } + + public void setTableProps(String tableProps) { + this.tableProps = tableProps; + } + + public String getGlobalViewCondition() { + return globalViewCondition; + } + + public void setGlobalViewCondition(String globalViewCondition) { + this.globalViewCondition = globalViewCondition; + } + + public boolean isChangeDetectionEnabled() { + return isChangeDetectionEnabled; + } + + public void setChangeDetectionEnabled(boolean changeDetectionEnabled) { + this.isChangeDetectionEnabled = changeDetectionEnabled; + } + } - public String getTableName() { - return tableName; - } + // Tenant View statement generation. + public static class TenantViewOptions { + String schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; + List tenantViewColumns = Lists.newArrayList(); + List tenantViewColumnTypes = Lists.newArrayList(); + List tenantViewPKColumns = Lists.newArrayList(); + List tenantViewPKColumnTypes = Lists.newArrayList(); + List tenantViewPKColumnSort; + String tenantViewCondition; + String tableProps = DDLDefaults.DEFAULT_TENANT_VIEW_PROPS; + boolean isChangeDetectionEnabled = false; + + /* + ***************************** Setters and Getters + */ + + public static TenantViewOptions withDefaults() { + TenantViewOptions options = new TenantViewOptions(); + options.schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; + options.tenantViewColumns = Lists.newArrayList(DDLDefaults.TENANT_VIEW_COLUMNS); + options.tenantViewColumnTypes = Lists.newArrayList(DDLDefaults.COLUMN_TYPES); + options.tenantViewPKColumns = Lists.newArrayList(DDLDefaults.TENANT_VIEW_PK_COLUMNS); + options.tenantViewPKColumnTypes = Lists.newArrayList(DDLDefaults.TENANT_VIEW_PK_TYPES); + options.tableProps = DDLDefaults.DEFAULT_TENANT_VIEW_PROPS; + options.tenantViewCondition = ""; + return options; + } + + public String getSchemaName() { + return schemaName; + } + + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } + + public List getTenantViewColumns() { + return tenantViewColumns; + } + + public void setTenantViewColumns(List tenantViewColumns) { + this.tenantViewColumns = tenantViewColumns; + } + + public List getTenantViewColumnTypes() { + return tenantViewColumnTypes; + } + + public void setTenantViewColumnTypes(List tenantViewColumnTypes) { + this.tenantViewColumnTypes = tenantViewColumnTypes; + } + + public List getTenantViewPKColumns() { + return tenantViewPKColumns; + } + + public void setTenantViewPKColumns(List tenantViewPKColumns) { + this.tenantViewPKColumns = tenantViewPKColumns; + } + + public List getTenantViewPKColumnTypes() { + return tenantViewPKColumnTypes; + } + + public void setTenantViewPKColumnTypes(List tenantViewPKColumnTypes) { + this.tenantViewPKColumnTypes = tenantViewPKColumnTypes; + } + + public List getTenantViewPKColumnSort() { + return tenantViewPKColumnSort; + } + + public void setTenantViewPKColumnSort(List tenantViewPKColumnSort) { + this.tenantViewPKColumnSort = tenantViewPKColumnSort; + } + + public String getTableProps() { + return tableProps; + } + + public void setTableProps(String tableProps) { + this.tableProps = tableProps; + } + + public String getTenantViewCondition() { + return tenantViewCondition; + } + + public void setTenantViewCondition(String tenantViewCondition) { + this.tenantViewCondition = tenantViewCondition; + } + + public boolean isChangeDetectionEnabled() { + return isChangeDetectionEnabled; + } + + public void setChangeDetectionEnabled(boolean changeDetectionEnabled) { + this.isChangeDetectionEnabled = changeDetectionEnabled; + } + } - public void setTableName(String tableName) { - this.tableName = tableName; - } + // Table Index statement generation. + public static class TableIndexOptions { + List tableIndexColumns = Lists.newArrayList(); + List tableIncludeColumns = Lists.newArrayList(); + boolean isLocal = false; + String indexProps = ""; + + /* + ***************************** Setters and Getters + */ + + public static TableIndexOptions withDefaults() { + TableIndexOptions options = new TableIndexOptions(); + options.tableIndexColumns = Lists.newArrayList(DDLDefaults.TABLE_INDEX_COLUMNS); + options.tableIncludeColumns = Lists.newArrayList(DDLDefaults.TABLE_INCLUDE_COLUMNS); + options.indexProps = DDLDefaults.DEFAULT_TABLE_INDEX_PROPS; + return options; + } + + public List getTableIndexColumns() { + return tableIndexColumns; + } + + public void setTableIndexColumns(List tableIndexColumns) { + this.tableIndexColumns = tableIndexColumns; + } + + public List getTableIncludeColumns() { + return tableIncludeColumns; + } + + public void setTableIncludeColumns(List tableIncludeColumns) { + this.tableIncludeColumns = tableIncludeColumns; + } + + public boolean isLocal() { + return isLocal; + } + + public void setLocal(boolean local) { + isLocal = local; + } + + public String getIndexProps() { + return indexProps; + } + + public void setIndexProps(String indexProps) { + this.indexProps = indexProps; + } + } - public String getGlobalViewName() { - return globalViewName; - } + // Global View Index statement generation. + public static class GlobalViewIndexOptions { + List globalViewIndexColumns = Lists.newArrayList(); + List globalViewIncludeColumns = Lists.newArrayList(); + boolean isLocal = false; + String indexProps = ""; + + /* + ***************************** Setters and Getters + */ + + public static GlobalViewIndexOptions withDefaults() { + GlobalViewIndexOptions options = new GlobalViewIndexOptions(); + options.globalViewIndexColumns = Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_INDEX_COLUMNS); + options.globalViewIncludeColumns = + Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_INCLUDE_COLUMNS); + options.indexProps = DDLDefaults.DEFAULT_GLOBAL_VIEW_INDEX_PROPS; + return options; + } + + public List getGlobalViewIndexColumns() { + return globalViewIndexColumns; + } + + public void setGlobalViewIndexColumns(List globalViewIndexColumns) { + this.globalViewIndexColumns = globalViewIndexColumns; + } + + public List getGlobalViewIncludeColumns() { + return globalViewIncludeColumns; + } + + public void setGlobalViewIncludeColumns(List globalViewIncludeColumns) { + this.globalViewIncludeColumns = globalViewIncludeColumns; + } + + public boolean isLocal() { + return isLocal; + } + + public void setLocal(boolean local) { + isLocal = local; + } + + public String getIndexProps() { + return indexProps; + } + + public void setIndexProps(String indexProps) { + this.indexProps = indexProps; + } + } - public void setGlobalViewName(String globalViewName) { - this.globalViewName = globalViewName; - } + // Tenant View Index statement generation. + public static class TenantViewIndexOptions { + List tenantViewIndexColumns = Lists.newArrayList(); + List tenantViewIncludeColumns = Lists.newArrayList(); + boolean isLocal = false; + String indexProps = ""; + + /* + ***************************** Setters and Getters + */ + + public static TenantViewIndexOptions withDefaults() { + TenantViewIndexOptions options = new TenantViewIndexOptions(); + options.tenantViewIndexColumns = Lists.newArrayList(DDLDefaults.TENANT_VIEW_INDEX_COLUMNS); + options.tenantViewIncludeColumns = + Lists.newArrayList(DDLDefaults.TENANT_VIEW_INCLUDE_COLUMNS); + options.indexProps = DDLDefaults.DEFAULT_TENANT_VIEW_INDEX_PROPS; + return options; + } + + public List getTenantViewIndexColumns() { + return tenantViewIndexColumns; + } + + public void setTenantViewIndexColumns(List tenantViewIndexColumns) { + this.tenantViewIndexColumns = tenantViewIndexColumns; + } + + public List getTenantViewIncludeColumns() { + return tenantViewIncludeColumns; + } + + public void setTenantViewIncludeColumns(List tenantViewIncludeColumns) { + this.tenantViewIncludeColumns = tenantViewIncludeColumns; + } + + public boolean isLocal() { + return isLocal; + } + + public void setLocal(boolean local) { + isLocal = local; + } + + public String getIndexProps() { + return indexProps; + } + + public void setIndexProps(String indexProps) { + this.indexProps = indexProps; + } + } - public String getTenantViewName() { - return tenantViewName; - } + public static class OtherOptions { + String testName; + List tableCFs = Lists.newArrayList(); + List globalViewCFs = Lists.newArrayList(); + List tenantViewCFs = Lists.newArrayList(); + + /* + ***************************** Setters and Getters + */ + + public static OtherOptions withDefaults() { + OtherOptions options = new OtherOptions(); + options.tableCFs = Lists.newArrayList(DDLDefaults.TABLE_COLUMN_FAMILIES); + options.globalViewCFs = Lists.newArrayList(DDLDefaults.GLOBAL_VIEW_COLUMN_FAMILIES); + options.tenantViewCFs = Lists.newArrayList(DDLDefaults.TENANT_VIEW_COLUMN_FAMILIES); + return options; + } + + public String getTestName() { + return testName; + } + + public void setTestName(String testName) { + this.testName = testName; + } + + public List getTableCFs() { + return tableCFs; + } + + public void setTableCFs(List tableCFs) { + this.tableCFs = tableCFs; + } + + public List getGlobalViewCFs() { + return globalViewCFs; + } + + public void setGlobalViewCFs(List globalViewCFs) { + this.globalViewCFs = globalViewCFs; + } + + public List getTenantViewCFs() { + return tenantViewCFs; + } + + public void setTenantViewCFs(List tenantViewCFs) { + this.tenantViewCFs = tenantViewCFs; + } + } - public void setTenantViewName(String tenantViewName) { - this.tenantViewName = tenantViewName; - } + public static class DataOptions { + String uniqueName = ""; + String uniqueNamePrefix = ""; + String tenantIdFormat = DDLDefaults.DEFAULT_TENANT_ID_FMT; + String keyPrefix = ""; + int viewNumber = 0; + AtomicInteger viewCounter = new AtomicInteger(0); + String tenantId = ""; + String schemaName = DDLDefaults.DEFAULT_SCHEMA_NAME; + String tableName = ""; + String globalViewName = ""; + String tenantViewName = ""; + + /* + ***************************** Setters and Getters + */ + public static DataOptions withPrefix(String prefix) { + DataOptions options = new DataOptions(); + options.uniqueNamePrefix = prefix; + options.uniqueName = generateUniqueName().substring(1); + options.viewCounter = new AtomicInteger(0); + // options.tenantId = + // String.format(options.tenantIdFormat, TENANT_COUNTER.get(), + // options.uniqueName); + options.tableName = String.format(DDLDefaults.DEFAULT_UNIQUE_PREFIX_TABLE_NAME_FMT, + options.uniqueNamePrefix, options.uniqueName); + + options.globalViewName = + String.format(DDLDefaults.DEFAULT_UNIQUE_PREFIX_GLOBAL_VIEW_NAME_FMT, + options.uniqueNamePrefix, options.uniqueName); + return options; + } + + public static DataOptions withDefaults() { + DataOptions options = new DataOptions(); + options.uniqueName = generateUniqueName().substring(1); + options.viewCounter = new AtomicInteger(0); + options.tableName = + String.format(DDLDefaults.DEFAULT_UNIQUE_TABLE_NAME_FMT, options.uniqueName); + + options.globalViewName = + String.format(DDLDefaults.DEFAULT_UNIQUE_GLOBAL_VIEW_NAME_FMT, options.uniqueName); + return options; + } + + public int getNextViewNumber() { + return viewNumber = viewCounter.incrementAndGet(); + } + + public String getNextTenantId() { + return tenantId = + String.format(tenantIdFormat, TENANT_COUNTER.incrementAndGet(), uniqueName); + } + + public int getTenantNumber() { + return TENANT_COUNTER.get(); + } + + public int getNextTenantNumber() { + return TENANT_COUNTER.incrementAndGet(); + } + + public int getViewNumber() { + return viewNumber; + } + + public String getTenantIdFormat() { + return tenantIdFormat; + } + + public void setTenantIdFormat(String tenantIdFormat) { + this.tenantIdFormat = tenantIdFormat; + } + + public String getUniqueName() { + return uniqueName; + } + + public void setUniqueName(String uniqueName) { + this.uniqueName = uniqueName; + } + + public String getTenantId() { + if (tenantId == null || tenantId.isEmpty()) { + return getNextTenantId(); + } + return tenantId; + } + + public void setTenantId(String tenantId) { + this.tenantId = tenantId; + } + + public String getUniqueNamePrefix() { + return uniqueNamePrefix; + } + + public void setUniqueNamePrefix(String uniqueNamePrefix) { + this.uniqueNamePrefix = uniqueNamePrefix; + } + + public String getKeyPrefix() { + return keyPrefix; + } + + public void setKeyPrefix(String keyPrefix) { + this.keyPrefix = keyPrefix; + } + + public void setViewNumber(int viewNumber) { + this.viewNumber = viewNumber; + } + + public AtomicInteger getViewCounter() { + return viewCounter; + } + + public void setViewCounter(AtomicInteger viewCounter) { + this.viewCounter = viewCounter; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public String getGlobalViewName() { + return globalViewName; + } + + public void setGlobalViewName(String globalViewName) { + this.globalViewName = globalViewName; + } + + public String getTenantViewName() { + return tenantViewName; + } + + public void setTenantViewName(String tenantViewName) { + this.tenantViewName = tenantViewName; + } - public String getSchemaName() { - return schemaName; - } + public String getSchemaName() { + return schemaName; + } - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - } + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/PropertyPolicyProviderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/PropertyPolicyProviderTest.java index a8e7fd763d9..f0c987d44aa 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/PropertyPolicyProviderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/PropertyPolicyProviderTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,46 +17,44 @@ */ package org.apache.phoenix.query; -import org.junit.Test; +import static org.junit.Assert.assertTrue; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.util.Properties; -import static org.junit.Assert.assertTrue; +import org.junit.Test; -public class PropertyPolicyProviderTest extends BaseConnectionlessQueryTest{ - @Test - public void testPropertyPolicyProvider() { - PropertyPolicy provided = PropertyPolicyProvider.getPropertyPolicy(); - assertTrue(provided instanceof TestPropertyPolicy); - } +public class PropertyPolicyProviderTest extends BaseConnectionlessQueryTest { + @Test + public void testPropertyPolicyProvider() { + PropertyPolicy provided = PropertyPolicyProvider.getPropertyPolicy(); + assertTrue(provided instanceof TestPropertyPolicy); + } - @Test(expected = PropertyNotAllowedException.class) - public void testPropertyPolicyBlacklisted() throws SQLException { - Properties properties=new Properties(); - properties.put("DisallowedProperty","value"); - try(Connection conn = DriverManager.getConnection(getUrl(),properties); - ){} + @Test(expected = PropertyNotAllowedException.class) + public void testPropertyPolicyBlacklisted() throws SQLException { + Properties properties = new Properties(); + properties.put("DisallowedProperty", "value"); + try (Connection conn = DriverManager.getConnection(getUrl(), properties);) { } + } - @Test - public void testPropertyPolicyWhitelisted() throws SQLException { - Properties properties=new Properties(); - properties.put("allowedProperty","value"); - try( - Connection conn = DriverManager.getConnection(getUrl(),properties); - ){} + @Test + public void testPropertyPolicyWhitelisted() throws SQLException { + Properties properties = new Properties(); + properties.put("allowedProperty", "value"); + try (Connection conn = DriverManager.getConnection(getUrl(), properties);) { } + } - @Test - public void testDisablePropertyPolicyProvider() throws SQLException { - Properties properties=new Properties(); - properties.put("DisallowedProperty","value"); - properties.put(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, "false"); - try( - Connection conn = DriverManager.getConnection(getUrl(), properties) - ){} + @Test + public void testDisablePropertyPolicyProvider() throws SQLException { + Properties properties = new Properties(); + properties.put("DisallowedProperty", "value"); + properties.put(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED, "false"); + try (Connection conn = DriverManager.getConnection(getUrl(), properties)) { } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java index 684af6cad19..40f2bb32e52 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,288 +31,264 @@ import org.junit.Test; public class QueryPlanTest extends BaseConnectionlessQueryTest { - - @Test - public void testExplainPlan() throws Exception { - String[] queryPlans = new String[] { - - "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id > '000000000000002' AND entity_id < '000000000000008' AND (organization_id,entity_id) <= ('000000000000001','000000000000005') ", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','000000000000003'] - ['000000000000001','000000000000005']", - - "SELECT host FROM PTSDB WHERE inst IS NULL AND host IS NOT NULL AND \"DATE\" >= to_date('2013-01-01')", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER PTSDB [null,not null]\n" + - " SERVER FILTER BY FIRST KEY ONLY AND \"DATE\" >= DATE '2013-01-01 00:00:00.000'", - - "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id > '000000000000002' AND entity_id < '000000000000008' AND (organization_id,entity_id) >= ('000000000000001','000000000000005') ", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','000000000000005'] - ['000000000000001','000000000000008']", - - "SELECT host FROM PTSDB3 WHERE host IN ('na1', 'na2','na3')", - "CLIENT PARALLEL 1-WAY SKIP SCAN ON 3 KEYS OVER PTSDB3 [~'na3'] - [~'na1']\n" + - " SERVER FILTER BY FIRST KEY ONLY", - - "SELECT /*+ SMALL*/ host FROM PTSDB3 WHERE host IN ('na1', 'na2','na3')", - "CLIENT PARALLEL 1-WAY SMALL SKIP SCAN ON 3 KEYS OVER PTSDB3 [~'na3'] - [~'na1']\n" + - " SERVER FILTER BY FIRST KEY ONLY", - - "SELECT inst,\"DATE\" FROM PTSDB2 WHERE inst = 'na1' ORDER BY inst DESC, \"DATE\" DESC", - "CLIENT PARALLEL 1-WAY REVERSE RANGE SCAN OVER PTSDB2 ['na1']\n" + - " SERVER FILTER BY FIRST KEY ONLY", - - // Since inst IS NOT NULL is unbounded, we won't continue optimizing - "SELECT host FROM PTSDB WHERE inst IS NOT NULL AND host IS NULL AND \"DATE\" >= to_date('2013-01-01')", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER PTSDB [not null]\n" + - " SERVER FILTER BY FIRST KEY ONLY AND (HOST IS NULL AND \"DATE\" >= DATE '2013-01-01 00:00:00.000')", - - "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id = '000000000000002' AND x_integer = 2 AND a_integer < 5 ", - "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER ATABLE\n" + - " SERVER FILTER BY (X_INTEGER = 2 AND A_INTEGER < 5)", - - "SELECT a_string,b_string FROM atable WHERE organization_id > '000000000000001' AND entity_id > '000000000000002' AND entity_id < '000000000000008' AND (organization_id,entity_id) >= ('000000000000003','000000000000005') ", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000003000000000000005'] - [*]\n" + - " SERVER FILTER BY (ENTITY_ID > '000000000000002' AND ENTITY_ID < '000000000000008')", - - "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id >= '000000000000002' AND entity_id < '000000000000008' AND (organization_id,entity_id) >= ('000000000000000','000000000000005') ", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','000000000000002'] - ['000000000000001','000000000000008']", - - "SELECT * FROM atable", - "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE", - - "SELECT inst,host FROM PTSDB WHERE inst IN ('na1', 'na2','na3') AND host IN ('a','b') AND \"DATE\" >= to_date('2013-01-01') AND \"DATE\" < to_date('2013-01-02')", - "CLIENT PARALLEL 1-WAY SKIP SCAN ON 6 RANGES OVER PTSDB ['na1','a','2013-01-01'] - ['na3','b','2013-01-02']\n" + - " SERVER FILTER BY FIRST KEY ONLY", - - "SELECT inst,host FROM PTSDB WHERE inst LIKE 'na%' AND host IN ('a','b') AND \"DATE\" >= to_date('2013-01-01') AND \"DATE\" < to_date('2013-01-02')", - "CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 RANGES OVER PTSDB ['na','a','2013-01-01'] - ['nb','b','2013-01-02']\n" + - " SERVER FILTER BY FIRST KEY ONLY", - - "SELECT count(*) FROM atable", - "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + - " SERVER FILTER BY FIRST KEY ONLY\n" + - " SERVER AGGREGATE INTO SINGLE ROW", - - "SELECT count(*) FROM atable WHERE organization_id='000000000000001' AND SUBSTR(entity_id,1,3) > '002' AND SUBSTR(entity_id,1,3) <= '003'", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','003 '] - ['000000000000001','004 ']\n" + - " SERVER FILTER BY FIRST KEY ONLY\n" + - " SERVER AGGREGATE INTO SINGLE ROW", - - "SELECT a_string FROM atable WHERE organization_id='000000000000001' AND SUBSTR(entity_id,1,3) > '002' AND SUBSTR(entity_id,1,3) <= '003'", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','003 '] - ['000000000000001','004 ']", - - "SELECT count(1) FROM atable GROUP BY a_string", - "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + - " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING]\n" + - "CLIENT MERGE SORT", - - "SELECT count(1) FROM atable GROUP BY a_string LIMIT 5", - "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + - " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING]\n" + - "CLIENT MERGE SORT\n" + - "CLIENT 5 ROW LIMIT", - - "SELECT a_string FROM atable ORDER BY a_string DESC LIMIT 3", - "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + - " SERVER TOP 3 ROWS SORTED BY [A_STRING DESC]\n" + - "CLIENT MERGE SORT\n" + - "CLIENT LIMIT 3" , - - "SELECT count(1) FROM atable GROUP BY a_string,b_string HAVING max(a_string) = 'a'", - "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + - " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" + - "CLIENT MERGE SORT\n" + - "CLIENT FILTER BY MAX(A_STRING) = 'a'", - - "SELECT count(1) FROM atable WHERE a_integer = 1 GROUP BY ROUND(a_time,'HOUR',2),entity_id HAVING max(a_string) = 'a'", - "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + - " SERVER FILTER BY A_INTEGER = 1\n" + - " SERVER AGGREGATE INTO DISTINCT ROWS BY [ENTITY_ID, ROUND(A_TIME)]\n" + - "CLIENT MERGE SORT\n" + - "CLIENT FILTER BY MAX(A_STRING) = 'a'", - - "SELECT count(1) FROM atable WHERE a_integer = 1 GROUP BY a_string,b_string HAVING max(a_string) = 'a' ORDER BY b_string", - "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + - " SERVER FILTER BY A_INTEGER = 1\n" + - " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" + - "CLIENT MERGE SORT\n" + - "CLIENT FILTER BY MAX(A_STRING) = 'a'\n" + - "CLIENT SORTED BY [B_STRING]", - - "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id != '000000000000002' AND x_integer = 2 AND a_integer < 5 LIMIT 10", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001']\n" + - " SERVER FILTER BY (ENTITY_ID != '000000000000002' AND X_INTEGER = 2 AND A_INTEGER < 5)\n" + - " SERVER 10 ROW LIMIT\n" + - "CLIENT 10 ROW LIMIT", - - "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' ORDER BY a_string ASC NULLS FIRST LIMIT 10", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001']\n" + - " SERVER TOP 10 ROWS SORTED BY [A_STRING]\n" + - "CLIENT MERGE SORT\n" + - "CLIENT LIMIT 10", - - "SELECT max(a_integer) FROM atable WHERE organization_id = '000000000000001' GROUP BY organization_id,entity_id,ROUND(a_date,'HOUR') ORDER BY entity_id NULLS LAST LIMIT 10", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001']\n" + - " SERVER AGGREGATE INTO DISTINCT ROWS BY [ORGANIZATION_ID, ENTITY_ID, ROUND(A_DATE)]\n" + - "CLIENT MERGE SORT\n" + - "CLIENT 10 ROW LIMIT", - - "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' ORDER BY a_string DESC NULLS LAST LIMIT 10", - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001']\n" + - " SERVER TOP 10 ROWS SORTED BY [A_STRING DESC NULLS LAST]\n" + - "CLIENT MERGE SORT\n" + - "CLIENT LIMIT 10", - - "SELECT a_string,b_string FROM atable WHERE organization_id IN ('000000000000001', '000000000000005')", - "CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 KEYS OVER ATABLE ['000000000000001'] - ['000000000000005']", - - "SELECT a_string,b_string FROM atable WHERE organization_id IN ('00D000000000001', '00D000000000005') AND entity_id IN('00E00000000000X','00E00000000000Z')", - "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 4 KEYS OVER ATABLE", - - "SELECT inst,host FROM PTSDB WHERE REGEXP_SUBSTR(INST, '[^-]+', 1) IN ('na1', 'na2','na3')", - "CLIENT PARALLEL 1-WAY SKIP SCAN ON 3 RANGES OVER PTSDB ['na1'] - ['na4']\n" + - " SERVER FILTER BY FIRST KEY ONLY AND REGEXP_SUBSTR(INST, '[^-]+', 1) IN ('na1','na2','na3')", - - }; - for (int i = 0; i < queryPlans.length; i+=2) { - String query = queryPlans[i]; - String plan = queryPlans[i+1]; - Properties props = new Properties(); - // Override date format so we don't have a bunch of zeros - props.setProperty(QueryServices.DATE_FORMAT_ATTRIB, "yyyy-MM-dd"); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - Statement statement = conn.createStatement(); - ResultSet rs = statement.executeQuery("EXPLAIN " + query); - // TODO: figure out a way of verifying that query isn't run during explain execution - assertEquals((i/2+1) + ") " + query, plan, QueryUtil.getExplainPlan(rs)); - } finally { - conn.close(); - } - } - } - - @Test - public void testTenantSpecificConnWithLimit() throws Exception { - String baseTableDDL = "CREATE TABLE BASE_MULTI_TENANT_TABLE(\n " + - " tenant_id VARCHAR(5) NOT NULL,\n" + - " userid INTEGER NOT NULL,\n" + - " username VARCHAR NOT NULL,\n" + - " col VARCHAR\n " + - " CONSTRAINT pk PRIMARY KEY (tenant_id, userid, username)) MULTI_TENANT=true"; - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute(baseTableDDL); + + @Test + public void testExplainPlan() throws Exception { + String[] queryPlans = new String[] { + + "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id > '000000000000002' AND entity_id < '000000000000008' AND (organization_id,entity_id) <= ('000000000000001','000000000000005') ", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','000000000000003'] - ['000000000000001','000000000000005']", + + "SELECT host FROM PTSDB WHERE inst IS NULL AND host IS NOT NULL AND \"DATE\" >= to_date('2013-01-01')", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER PTSDB [null,not null]\n" + + " SERVER FILTER BY FIRST KEY ONLY AND \"DATE\" >= DATE '2013-01-01 00:00:00.000'", + + "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id > '000000000000002' AND entity_id < '000000000000008' AND (organization_id,entity_id) >= ('000000000000001','000000000000005') ", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','000000000000005'] - ['000000000000001','000000000000008']", + + "SELECT host FROM PTSDB3 WHERE host IN ('na1', 'na2','na3')", + "CLIENT PARALLEL 1-WAY SKIP SCAN ON 3 KEYS OVER PTSDB3 [~'na3'] - [~'na1']\n" + + " SERVER FILTER BY FIRST KEY ONLY", + + "SELECT /*+ SMALL*/ host FROM PTSDB3 WHERE host IN ('na1', 'na2','na3')", + "CLIENT PARALLEL 1-WAY SMALL SKIP SCAN ON 3 KEYS OVER PTSDB3 [~'na3'] - [~'na1']\n" + + " SERVER FILTER BY FIRST KEY ONLY", + + "SELECT inst,\"DATE\" FROM PTSDB2 WHERE inst = 'na1' ORDER BY inst DESC, \"DATE\" DESC", + "CLIENT PARALLEL 1-WAY REVERSE RANGE SCAN OVER PTSDB2 ['na1']\n" + + " SERVER FILTER BY FIRST KEY ONLY", + + // Since inst IS NOT NULL is unbounded, we won't continue optimizing + "SELECT host FROM PTSDB WHERE inst IS NOT NULL AND host IS NULL AND \"DATE\" >= to_date('2013-01-01')", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER PTSDB [not null]\n" + + " SERVER FILTER BY FIRST KEY ONLY AND (HOST IS NULL AND \"DATE\" >= DATE '2013-01-01 00:00:00.000')", + + "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id = '000000000000002' AND x_integer = 2 AND a_integer < 5 ", + "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER ATABLE\n" + + " SERVER FILTER BY (X_INTEGER = 2 AND A_INTEGER < 5)", + + "SELECT a_string,b_string FROM atable WHERE organization_id > '000000000000001' AND entity_id > '000000000000002' AND entity_id < '000000000000008' AND (organization_id,entity_id) >= ('000000000000003','000000000000005') ", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000003000000000000005'] - [*]\n" + + " SERVER FILTER BY (ENTITY_ID > '000000000000002' AND ENTITY_ID < '000000000000008')", + + "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id >= '000000000000002' AND entity_id < '000000000000008' AND (organization_id,entity_id) >= ('000000000000000','000000000000005') ", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','000000000000002'] - ['000000000000001','000000000000008']", + + "SELECT * FROM atable", "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE", + + "SELECT inst,host FROM PTSDB WHERE inst IN ('na1', 'na2','na3') AND host IN ('a','b') AND \"DATE\" >= to_date('2013-01-01') AND \"DATE\" < to_date('2013-01-02')", + "CLIENT PARALLEL 1-WAY SKIP SCAN ON 6 RANGES OVER PTSDB ['na1','a','2013-01-01'] - ['na3','b','2013-01-02']\n" + + " SERVER FILTER BY FIRST KEY ONLY", + + "SELECT inst,host FROM PTSDB WHERE inst LIKE 'na%' AND host IN ('a','b') AND \"DATE\" >= to_date('2013-01-01') AND \"DATE\" < to_date('2013-01-02')", + "CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 RANGES OVER PTSDB ['na','a','2013-01-01'] - ['nb','b','2013-01-02']\n" + + " SERVER FILTER BY FIRST KEY ONLY", + + "SELECT count(*) FROM atable", + "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + " SERVER FILTER BY FIRST KEY ONLY\n" + + " SERVER AGGREGATE INTO SINGLE ROW", + + "SELECT count(*) FROM atable WHERE organization_id='000000000000001' AND SUBSTR(entity_id,1,3) > '002' AND SUBSTR(entity_id,1,3) <= '003'", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','003 '] - ['000000000000001','004 ']\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + " SERVER AGGREGATE INTO SINGLE ROW", + + "SELECT a_string FROM atable WHERE organization_id='000000000000001' AND SUBSTR(entity_id,1,3) > '002' AND SUBSTR(entity_id,1,3) <= '003'", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','003 '] - ['000000000000001','004 ']", + + "SELECT count(1) FROM atable GROUP BY a_string", + "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + + " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING]\n" + "CLIENT MERGE SORT", + + "SELECT count(1) FROM atable GROUP BY a_string LIMIT 5", + "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + + " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING]\n" + "CLIENT MERGE SORT\n" + + "CLIENT 5 ROW LIMIT", + + "SELECT a_string FROM atable ORDER BY a_string DESC LIMIT 3", + "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + + " SERVER TOP 3 ROWS SORTED BY [A_STRING DESC]\n" + "CLIENT MERGE SORT\n" + + "CLIENT LIMIT 3", + + "SELECT count(1) FROM atable GROUP BY a_string,b_string HAVING max(a_string) = 'a'", + "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + + " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" + + "CLIENT MERGE SORT\n" + "CLIENT FILTER BY MAX(A_STRING) = 'a'", + + "SELECT count(1) FROM atable WHERE a_integer = 1 GROUP BY ROUND(a_time,'HOUR',2),entity_id HAVING max(a_string) = 'a'", + "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + " SERVER FILTER BY A_INTEGER = 1\n" + + " SERVER AGGREGATE INTO DISTINCT ROWS BY [ENTITY_ID, ROUND(A_TIME)]\n" + + "CLIENT MERGE SORT\n" + "CLIENT FILTER BY MAX(A_STRING) = 'a'", + + "SELECT count(1) FROM atable WHERE a_integer = 1 GROUP BY a_string,b_string HAVING max(a_string) = 'a' ORDER BY b_string", + "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE\n" + " SERVER FILTER BY A_INTEGER = 1\n" + + " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" + + "CLIENT MERGE SORT\n" + "CLIENT FILTER BY MAX(A_STRING) = 'a'\n" + + "CLIENT SORTED BY [B_STRING]", + + "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id != '000000000000002' AND x_integer = 2 AND a_integer < 5 LIMIT 10", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001']\n" + + " SERVER FILTER BY (ENTITY_ID != '000000000000002' AND X_INTEGER = 2 AND A_INTEGER < 5)\n" + + " SERVER 10 ROW LIMIT\n" + "CLIENT 10 ROW LIMIT", + + "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' ORDER BY a_string ASC NULLS FIRST LIMIT 10", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001']\n" + + " SERVER TOP 10 ROWS SORTED BY [A_STRING]\n" + "CLIENT MERGE SORT\n" + + "CLIENT LIMIT 10", + + "SELECT max(a_integer) FROM atable WHERE organization_id = '000000000000001' GROUP BY organization_id,entity_id,ROUND(a_date,'HOUR') ORDER BY entity_id NULLS LAST LIMIT 10", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001']\n" + + " SERVER AGGREGATE INTO DISTINCT ROWS BY [ORGANIZATION_ID, ENTITY_ID, ROUND(A_DATE)]\n" + + "CLIENT MERGE SORT\n" + "CLIENT 10 ROW LIMIT", + + "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' ORDER BY a_string DESC NULLS LAST LIMIT 10", + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001']\n" + + " SERVER TOP 10 ROWS SORTED BY [A_STRING DESC NULLS LAST]\n" + "CLIENT MERGE SORT\n" + + "CLIENT LIMIT 10", + + "SELECT a_string,b_string FROM atable WHERE organization_id IN ('000000000000001', '000000000000005')", + "CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 KEYS OVER ATABLE ['000000000000001'] - ['000000000000005']", + + "SELECT a_string,b_string FROM atable WHERE organization_id IN ('00D000000000001', '00D000000000005') AND entity_id IN('00E00000000000X','00E00000000000Z')", + "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 4 KEYS OVER ATABLE", + + "SELECT inst,host FROM PTSDB WHERE REGEXP_SUBSTR(INST, '[^-]+', 1) IN ('na1', 'na2','na3')", + "CLIENT PARALLEL 1-WAY SKIP SCAN ON 3 RANGES OVER PTSDB ['na1'] - ['na4']\n" + + " SERVER FILTER BY FIRST KEY ONLY AND REGEXP_SUBSTR(INST, '[^-]+', 1) IN ('na1','na2','na3')", + + }; + for (int i = 0; i < queryPlans.length; i += 2) { + String query = queryPlans[i]; + String plan = queryPlans[i + 1]; + Properties props = new Properties(); + // Override date format so we don't have a bunch of zeros + props.setProperty(QueryServices.DATE_FORMAT_ATTRIB, "yyyy-MM-dd"); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + Statement statement = conn.createStatement(); + ResultSet rs = statement.executeQuery("EXPLAIN " + query); + // TODO: figure out a way of verifying that query isn't run during explain execution + assertEquals((i / 2 + 1) + ") " + query, plan, QueryUtil.getExplainPlan(rs)); + } finally { conn.close(); - - String tenantId = "tenantId"; - String tenantViewDDL = "CREATE VIEW TENANT_VIEW AS SELECT * FROM BASE_MULTI_TENANT_TABLE"; - Properties tenantProps = new Properties(); - tenantProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - conn = DriverManager.getConnection(getUrl(), tenantProps); - conn.createStatement().execute(tenantViewDDL); - - String query = "EXPLAIN SELECT * FROM TENANT_VIEW LIMIT 1"; - ResultSet rs = conn.createStatement().executeQuery(query); - assertEquals("CLIENT SERIAL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + - " SERVER 1 ROW LIMIT\n" + - "CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs)); - query = "EXPLAIN SELECT * FROM TENANT_VIEW LIMIT " + Integer.MAX_VALUE; - rs = conn.createStatement().executeQuery(query); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + - " SERVER " + Integer.MAX_VALUE + " ROW LIMIT\n" + - "CLIENT " + Integer.MAX_VALUE + " ROW LIMIT", QueryUtil.getExplainPlan(rs)); - query = "EXPLAIN SELECT * FROM TENANT_VIEW WHERE username = 'Joe' LIMIT 1"; - rs = conn.createStatement().executeQuery(query); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + - " SERVER FILTER BY USERNAME = 'Joe'\n" + - " SERVER 1 ROW LIMIT\n" + - "CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs)); - query = "EXPLAIN SELECT * FROM TENANT_VIEW WHERE col = 'Joe' LIMIT 1"; - rs = conn.createStatement().executeQuery(query); - assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + - " SERVER FILTER BY COL = 'Joe'\n" + - " SERVER 1 ROW LIMIT\n" + - "CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs)); + } } - - @Test - public void testDescTimestampAtBoundary() throws Exception { - Properties props = PropertiesUtil.deepCopy(new Properties()); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - conn.createStatement().execute("CREATE TABLE FOO(\n" + - " a VARCHAR NOT NULL,\n" + - " b TIMESTAMP NOT NULL,\n" + - " c VARCHAR,\n" + - " CONSTRAINT pk PRIMARY KEY (a, b DESC, c)\n" + - " ) IMMUTABLE_ROWS=true\n" + - " ,SALT_BUCKETS=20"); - String query = "select * from foo where a = 'a' and b >= timestamp '2016-01-28 00:00:00' and b < timestamp '2016-01-29 00:00:00'"; - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query); - String queryPlan = QueryUtil.getExplainPlan(rs); - // For real connection CQSI, the result is supposed to be 20-WAY RANGE SCAN, however - // for connection-less impl, since we retrieve region locations for 20 splits and each - // time we get all region locations due to connection-less specific impl, we get - // 20*20 = 400-WAY RANGE SCAN. - assertEquals( - "CLIENT PARALLEL 400-WAY RANGE SCAN OVER FOO [X'00','a',~'2016-01-28 23:59:59.999'] - [X'13','a',~'2016-01-28 00:00:00.000']\n" + - " SERVER FILTER BY FIRST KEY ONLY\n" + - "CLIENT MERGE SORT", queryPlan); - } finally { - conn.close(); - } + } + + @Test + public void testTenantSpecificConnWithLimit() throws Exception { + String baseTableDDL = + "CREATE TABLE BASE_MULTI_TENANT_TABLE(\n " + " tenant_id VARCHAR(5) NOT NULL,\n" + + " userid INTEGER NOT NULL,\n" + " username VARCHAR NOT NULL,\n" + " col VARCHAR\n " + + " CONSTRAINT pk PRIMARY KEY (tenant_id, userid, username)) MULTI_TENANT=true"; + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute(baseTableDDL); + conn.close(); + + String tenantId = "tenantId"; + String tenantViewDDL = "CREATE VIEW TENANT_VIEW AS SELECT * FROM BASE_MULTI_TENANT_TABLE"; + Properties tenantProps = new Properties(); + tenantProps.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); + conn = DriverManager.getConnection(getUrl(), tenantProps); + conn.createStatement().execute(tenantViewDDL); + + String query = "EXPLAIN SELECT * FROM TENANT_VIEW LIMIT 1"; + ResultSet rs = conn.createStatement().executeQuery(query); + assertEquals("CLIENT SERIAL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + + " SERVER 1 ROW LIMIT\n" + "CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs)); + query = "EXPLAIN SELECT * FROM TENANT_VIEW LIMIT " + Integer.MAX_VALUE; + rs = conn.createStatement().executeQuery(query); + assertEquals( + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + " SERVER " + + Integer.MAX_VALUE + " ROW LIMIT\n" + "CLIENT " + Integer.MAX_VALUE + " ROW LIMIT", + QueryUtil.getExplainPlan(rs)); + query = "EXPLAIN SELECT * FROM TENANT_VIEW WHERE username = 'Joe' LIMIT 1"; + rs = conn.createStatement().executeQuery(query); + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + + " SERVER FILTER BY USERNAME = 'Joe'\n" + " SERVER 1 ROW LIMIT\n" + + "CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs)); + query = "EXPLAIN SELECT * FROM TENANT_VIEW WHERE col = 'Joe' LIMIT 1"; + rs = conn.createStatement().executeQuery(query); + assertEquals( + "CLIENT PARALLEL 1-WAY RANGE SCAN OVER BASE_MULTI_TENANT_TABLE ['tenantId']\n" + + " SERVER FILTER BY COL = 'Joe'\n" + " SERVER 1 ROW LIMIT\n" + "CLIENT 1 ROW LIMIT", + QueryUtil.getExplainPlan(rs)); + } + + @Test + public void testDescTimestampAtBoundary() throws Exception { + Properties props = PropertiesUtil.deepCopy(new Properties()); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + conn.createStatement() + .execute("CREATE TABLE FOO(\n" + " a VARCHAR NOT NULL,\n" + + " b TIMESTAMP NOT NULL,\n" + " c VARCHAR,\n" + + " CONSTRAINT pk PRIMARY KEY (a, b DESC, c)\n" + + " ) IMMUTABLE_ROWS=true\n" + " ,SALT_BUCKETS=20"); + String query = + "select * from foo where a = 'a' and b >= timestamp '2016-01-28 00:00:00' and b < timestamp '2016-01-29 00:00:00'"; + ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query); + String queryPlan = QueryUtil.getExplainPlan(rs); + // For real connection CQSI, the result is supposed to be 20-WAY RANGE SCAN, however + // for connection-less impl, since we retrieve region locations for 20 splits and each + // time we get all region locations due to connection-less specific impl, we get + // 20*20 = 400-WAY RANGE SCAN. + assertEquals( + "CLIENT PARALLEL 400-WAY RANGE SCAN OVER FOO [X'00','a',~'2016-01-28 23:59:59.999'] - [X'13','a',~'2016-01-28 00:00:00.000']\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + "CLIENT MERGE SORT", + queryPlan); + } finally { + conn.close(); } - - @Test - public void testUseOfRoundRobinIteratorSurfaced() throws Exception { - Properties props = PropertiesUtil.deepCopy(new Properties()); - props.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); - Connection conn = DriverManager.getConnection(getUrl(), props); - String tableName = "testUseOfRoundRobinIteratorSurfaced".toUpperCase(); - try { - conn.createStatement().execute("CREATE TABLE " + tableName + "(\n" + - " a VARCHAR NOT NULL,\n" + - " b TIMESTAMP NOT NULL,\n" + - " c VARCHAR,\n" + - " CONSTRAINT pk PRIMARY KEY (a, b DESC, c)\n" + - " ) IMMUTABLE_ROWS=true\n" + - " ,SALT_BUCKETS=20"); - String query = "select * from " + tableName + " where a = 'a' and b >= timestamp '2016-01-28 00:00:00' and b < timestamp '2016-01-29 00:00:00'"; - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query); - String queryPlan = QueryUtil.getExplainPlan(rs); - // For real connection CQSI, the result is supposed to be 20-WAY RANGE SCAN, however - // for connection-less impl, since we retrieve region locations for 20 splits and each - // time we get all region locations due to connection-less specific impl, we get - // 20*20 = 400-WAY RANGE SCAN. - assertEquals( - "CLIENT PARALLEL 400-WAY ROUND ROBIN RANGE SCAN OVER " + tableName + " [X'00','a',~'2016-01-28 23:59:59.999'] - [X'13','a',~'2016-01-28 00:00:00.000']\n" + - " SERVER FILTER BY FIRST KEY ONLY", queryPlan); - } finally { - conn.close(); - } + } + + @Test + public void testUseOfRoundRobinIteratorSurfaced() throws Exception { + Properties props = PropertiesUtil.deepCopy(new Properties()); + props.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false)); + Connection conn = DriverManager.getConnection(getUrl(), props); + String tableName = "testUseOfRoundRobinIteratorSurfaced".toUpperCase(); + try { + conn.createStatement() + .execute("CREATE TABLE " + tableName + "(\n" + " a VARCHAR NOT NULL,\n" + + " b TIMESTAMP NOT NULL,\n" + " c VARCHAR,\n" + + " CONSTRAINT pk PRIMARY KEY (a, b DESC, c)\n" + + " ) IMMUTABLE_ROWS=true\n" + " ,SALT_BUCKETS=20"); + String query = "select * from " + tableName + + " where a = 'a' and b >= timestamp '2016-01-28 00:00:00' and b < timestamp '2016-01-29 00:00:00'"; + ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query); + String queryPlan = QueryUtil.getExplainPlan(rs); + // For real connection CQSI, the result is supposed to be 20-WAY RANGE SCAN, however + // for connection-less impl, since we retrieve region locations for 20 splits and each + // time we get all region locations due to connection-less specific impl, we get + // 20*20 = 400-WAY RANGE SCAN. + assertEquals("CLIENT PARALLEL 400-WAY ROUND ROBIN RANGE SCAN OVER " + tableName + + " [X'00','a',~'2016-01-28 23:59:59.999'] - [X'13','a',~'2016-01-28 00:00:00.000']\n" + + " SERVER FILTER BY FIRST KEY ONLY", queryPlan); + } finally { + conn.close(); } - - @Test - public void testSerialHintIgnoredForNonRowkeyOrderBy() throws Exception { - - Properties props = PropertiesUtil.deepCopy(new Properties()); - Connection conn = DriverManager.getConnection(getUrl(), props); - try { - conn.createStatement().execute("CREATE TABLE FOO(\n" + - " a VARCHAR NOT NULL,\n" + - " b TIMESTAMP NOT NULL,\n" + - " c VARCHAR,\n" + - " CONSTRAINT pk PRIMARY KEY (a, b DESC, c)\n" + - " )"); - String query = "select /*+ SERIAL*/ * from foo where a = 'a' ORDER BY b, c"; - ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query); - String queryPlan = QueryUtil.getExplainPlan(rs); - assertEquals( - "CLIENT PARALLEL 1-WAY RANGE SCAN OVER FOO ['a']\n" + - " SERVER FILTER BY FIRST KEY ONLY\n" + - " SERVER SORTED BY [B, C]\n" + - "CLIENT MERGE SORT", queryPlan); - } finally { - conn.close(); - } - + } + + @Test + public void testSerialHintIgnoredForNonRowkeyOrderBy() throws Exception { + + Properties props = PropertiesUtil.deepCopy(new Properties()); + Connection conn = DriverManager.getConnection(getUrl(), props); + try { + conn.createStatement() + .execute("CREATE TABLE FOO(\n" + " a VARCHAR NOT NULL,\n" + + " b TIMESTAMP NOT NULL,\n" + " c VARCHAR,\n" + + " CONSTRAINT pk PRIMARY KEY (a, b DESC, c)\n" + " )"); + String query = "select /*+ SERIAL*/ * from foo where a = 'a' ORDER BY b, c"; + ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query); + String queryPlan = QueryUtil.getExplainPlan(rs); + assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER FOO ['a']\n" + + " SERVER FILTER BY FIRST KEY ONLY\n" + " SERVER SORTED BY [B, C]\n" + + "CLIENT MERGE SORT", queryPlan); + } finally { + conn.close(); } + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java index 1b9352f37d0..0779a72e0d4 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,111 +27,109 @@ /** * QueryServices implementation to use for tests that do not execute queries - * - * * @since 0.1 */ public final class QueryServicesTestImpl extends BaseQueryServicesImpl { - private static final int DEFAULT_THREAD_POOL_SIZE = 10; - // TODO: setting this down to 5mb causes insufficient memory exceptions. Need to investigate why - private static final int DEFAULT_MAX_MEMORY_PERC = 30; // 30% of heap - private static final int DEFAULT_THREAD_TIMEOUT_MS = 60000*5; //5min - private static final int DEFAULT_SPOOL_THRESHOLD_BYTES = 1024 * 1024; // 1m - private static final int DEFAULT_MAX_TENANT_MEMORY_PERC = 100; - private static final int DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS = 60000 * 60; // 1HR (to prevent age-out of hash cache during debugging) - private static final long DEFAULT_MAX_HASH_CACHE_SIZE = 1024*1024*10; // 10 Mb - private static final boolean DEFAULT_DROP_METADATA = false; - - private static final int DEFAULT_MASTER_INFO_PORT = -1; - private static final int DEFAULT_REGIONSERVER_INFO_PORT = -1; - private static final int DEFAULT_REGIONSERVER_LEASE_PERIOD_MS = 9000000; - private static final int DEFAULT_RPC_TIMEOUT_MS = 9000000; - private static final String DEFAULT_WAL_EDIT_CODEC = IndexedWALEditCodec.class.getName(); - public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE = 1024L*1024L*4L; // 4 Mb - public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE = 1024L*1024L*2L; // 2 Mb - public static final int DEFAULT_MIN_STATS_UPDATE_FREQ_MS = 0; - public static final boolean DEFAULT_EXPLAIN_CHUNK_COUNT = false; // TODO: update explain plans in test and set to true - public static final boolean DEFAULT_EXPLAIN_ROW_COUNT = false; // TODO: update explain plans in test and set to true - public static final String DEFAULT_EXTRA_JDBC_ARGUMENTS = PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM; - private static final boolean DEFAULT_RUN_UPDATE_STATS_ASYNC = false; - private static final boolean DEFAULT_COMMIT_STATS_ASYNC = false; - public static final int DEFAULT_INDEX_HANDLER_COUNT = 5; - public static final int DEFAULT_METADATA_HANDLER_COUNT = 5; - public static final int DEFAULT_HCONNECTION_POOL_CORE_SIZE = 10; - public static final int DEFAULT_HCONNECTION_POOL_MAX_SIZE = 10; - public static final int DEFAULT_HTABLE_MAX_THREADS = 10; - public static final long DEFAULT_INDEX_POPULATION_WAIT_TIME = 0; - public static final long DEFAULT_SEQUENCE_CACHE_SIZE = 3; - public static final boolean DEFAULT_TRANSACTIONS_ENABLED = true; - public static final int DEFAULT_AGGREGATE_CHUNK_SIZE_INCREASE = 1000; - /* - * Effectively disable running the index rebuild task by having an infinite delay - * because we want to control it's execution ourselves - */ - public static final long DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY = Long.MAX_VALUE; - public static final int DEFAULT_TXN_TIMEOUT_SECONDS = 30; + private static final int DEFAULT_THREAD_POOL_SIZE = 10; + // TODO: setting this down to 5mb causes insufficient memory exceptions. Need to investigate why + private static final int DEFAULT_MAX_MEMORY_PERC = 30; // 30% of heap + private static final int DEFAULT_THREAD_TIMEOUT_MS = 60000 * 5; // 5min + private static final int DEFAULT_SPOOL_THRESHOLD_BYTES = 1024 * 1024; // 1m + private static final int DEFAULT_MAX_TENANT_MEMORY_PERC = 100; + private static final int DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS = 60000 * 60; // 1HR (to prevent + // age-out of hash + // cache during + // debugging) + private static final long DEFAULT_MAX_HASH_CACHE_SIZE = 1024 * 1024 * 10; // 10 Mb + private static final boolean DEFAULT_DROP_METADATA = false; - - /** - * Set number of salt buckets lower for sequence table during testing, as a high - * value overwhelms our mini clusters. - */ - public static final int DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS = 4; - public static final boolean DEFAULT_FORCE_ROWKEY_ORDER = true; + private static final int DEFAULT_MASTER_INFO_PORT = -1; + private static final int DEFAULT_REGIONSERVER_INFO_PORT = -1; + private static final int DEFAULT_REGIONSERVER_LEASE_PERIOD_MS = 9000000; + private static final int DEFAULT_RPC_TIMEOUT_MS = 9000000; + private static final String DEFAULT_WAL_EDIT_CODEC = IndexedWALEditCodec.class.getName(); + public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE = 1024L * 1024L * 4L; // 4 Mb + public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE = 1024L * 1024L * 2L; // 2 Mb + public static final int DEFAULT_MIN_STATS_UPDATE_FREQ_MS = 0; + public static final boolean DEFAULT_EXPLAIN_CHUNK_COUNT = false; // TODO: update explain plans in + // test and set to true + public static final boolean DEFAULT_EXPLAIN_ROW_COUNT = false; // TODO: update explain plans in + // test and set to true + public static final String DEFAULT_EXTRA_JDBC_ARGUMENTS = + PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM; + private static final boolean DEFAULT_RUN_UPDATE_STATS_ASYNC = false; + private static final boolean DEFAULT_COMMIT_STATS_ASYNC = false; + public static final int DEFAULT_INDEX_HANDLER_COUNT = 5; + public static final int DEFAULT_METADATA_HANDLER_COUNT = 5; + public static final int DEFAULT_HCONNECTION_POOL_CORE_SIZE = 10; + public static final int DEFAULT_HCONNECTION_POOL_MAX_SIZE = 10; + public static final int DEFAULT_HTABLE_MAX_THREADS = 10; + public static final long DEFAULT_INDEX_POPULATION_WAIT_TIME = 0; + public static final long DEFAULT_SEQUENCE_CACHE_SIZE = 3; + public static final boolean DEFAULT_TRANSACTIONS_ENABLED = true; + public static final int DEFAULT_AGGREGATE_CHUNK_SIZE_INCREASE = 1000; + /* + * Effectively disable running the index rebuild task by having an infinite delay because we want + * to control it's execution ourselves + */ + public static final long DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY = Long.MAX_VALUE; + public static final int DEFAULT_TXN_TIMEOUT_SECONDS = 30; - - public QueryServicesTestImpl(ReadOnlyProps defaultProps) { - this(defaultProps, ReadOnlyProps.EMPTY_PROPS); - } - - private static QueryServicesOptions getDefaultServicesOptions() { - String txSnapshotDir; - try { - txSnapshotDir = TestUtil.createTempDirectory().toString(); - } catch (Exception e) { - throw new RuntimeException("Could not create tx snapshot directory", e); - } - return withDefaults() - .setSequenceCacheSize(DEFAULT_SEQUENCE_CACHE_SIZE) - .setTransactionsEnabled(DEFAULT_TRANSACTIONS_ENABLED) - .setExplainChunkCount(DEFAULT_EXPLAIN_CHUNK_COUNT) - .setExplainRowCount(DEFAULT_EXPLAIN_ROW_COUNT) - .setSequenceSaltBuckets(DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS) - .setMinStatsUpdateFrequencyMs(DEFAULT_MIN_STATS_UPDATE_FREQ_MS) - .setThreadPoolSize(DEFAULT_THREAD_POOL_SIZE) - .setMaxMemoryPerc(DEFAULT_MAX_MEMORY_PERC) - .setThreadTimeoutMs(DEFAULT_THREAD_TIMEOUT_MS) - .setClientSpoolThresholdBytes(DEFAULT_SPOOL_THRESHOLD_BYTES) - .setServerSpoolThresholdBytes(DEFAULT_SPOOL_THRESHOLD_BYTES) - .setSpoolDirectory(DEFAULT_SPOOL_DIRECTORY) - .setMaxTenantMemoryPerc(DEFAULT_MAX_TENANT_MEMORY_PERC) - .setMaxServerCacheSize(DEFAULT_MAX_HASH_CACHE_SIZE) - .setMaxServerCacheTTLMs(DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS) - .setMasterInfoPort(DEFAULT_MASTER_INFO_PORT) - .setRegionServerInfoPort(DEFAULT_REGIONSERVER_INFO_PORT) - .setRegionServerLeasePeriodMs(DEFAULT_REGIONSERVER_LEASE_PERIOD_MS) - .setRpcTimeoutMs(DEFAULT_RPC_TIMEOUT_MS) - .setWALEditCodec(DEFAULT_WAL_EDIT_CODEC) - .setDropMetaData(DEFAULT_DROP_METADATA) - .setMaxClientMetaDataCacheSize(DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE) - .setMaxServerMetaDataCacheSize(DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE) - .setForceRowKeyOrder(DEFAULT_FORCE_ROWKEY_ORDER) - .setExtraJDBCArguments(DEFAULT_EXTRA_JDBC_ARGUMENTS) - .setRunUpdateStatsAsync(DEFAULT_RUN_UPDATE_STATS_ASYNC) - .setCommitStatsAsync(DEFAULT_COMMIT_STATS_ASYNC) - .setIndexHandlerCount(DEFAULT_INDEX_HANDLER_COUNT) - .setMetadataHandlerCount(DEFAULT_METADATA_HANDLER_COUNT) - .setHConnectionPoolCoreSize(DEFAULT_HCONNECTION_POOL_CORE_SIZE) - .setHConnectionPoolMaxSize(DEFAULT_HCONNECTION_POOL_MAX_SIZE) - .setMaxThreadsPerHTable(DEFAULT_HTABLE_MAX_THREADS) - .setDefaultIndexPopulationWaitTime(DEFAULT_INDEX_POPULATION_WAIT_TIME) - .setIndexRebuildTaskInitialDelay(DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY) - .set(AGGREGATE_CHUNK_SIZE_INCREASE_ATTRIB, DEFAULT_AGGREGATE_CHUNK_SIZE_INCREASE) - ; + /** + * Set number of salt buckets lower for sequence table during testing, as a high value overwhelms + * our mini clusters. + */ + public static final int DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS = 4; + public static final boolean DEFAULT_FORCE_ROWKEY_ORDER = true; + + public QueryServicesTestImpl(ReadOnlyProps defaultProps) { + this(defaultProps, ReadOnlyProps.EMPTY_PROPS); + } + + private static QueryServicesOptions getDefaultServicesOptions() { + String txSnapshotDir; + try { + txSnapshotDir = TestUtil.createTempDirectory().toString(); + } catch (Exception e) { + throw new RuntimeException("Could not create tx snapshot directory", e); } - - public QueryServicesTestImpl(ReadOnlyProps defaultProps, ReadOnlyProps overrideProps) { - super(defaultProps, getDefaultServicesOptions().setAll(overrideProps)); - } + return withDefaults().setSequenceCacheSize(DEFAULT_SEQUENCE_CACHE_SIZE) + .setTransactionsEnabled(DEFAULT_TRANSACTIONS_ENABLED) + .setExplainChunkCount(DEFAULT_EXPLAIN_CHUNK_COUNT) + .setExplainRowCount(DEFAULT_EXPLAIN_ROW_COUNT) + .setSequenceSaltBuckets(DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS) + .setMinStatsUpdateFrequencyMs(DEFAULT_MIN_STATS_UPDATE_FREQ_MS) + .setThreadPoolSize(DEFAULT_THREAD_POOL_SIZE).setMaxMemoryPerc(DEFAULT_MAX_MEMORY_PERC) + .setThreadTimeoutMs(DEFAULT_THREAD_TIMEOUT_MS) + .setClientSpoolThresholdBytes(DEFAULT_SPOOL_THRESHOLD_BYTES) + .setServerSpoolThresholdBytes(DEFAULT_SPOOL_THRESHOLD_BYTES) + .setSpoolDirectory(DEFAULT_SPOOL_DIRECTORY) + .setMaxTenantMemoryPerc(DEFAULT_MAX_TENANT_MEMORY_PERC) + .setMaxServerCacheSize(DEFAULT_MAX_HASH_CACHE_SIZE) + .setMaxServerCacheTTLMs(DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS) + .setMasterInfoPort(DEFAULT_MASTER_INFO_PORT) + .setRegionServerInfoPort(DEFAULT_REGIONSERVER_INFO_PORT) + .setRegionServerLeasePeriodMs(DEFAULT_REGIONSERVER_LEASE_PERIOD_MS) + .setRpcTimeoutMs(DEFAULT_RPC_TIMEOUT_MS).setWALEditCodec(DEFAULT_WAL_EDIT_CODEC) + .setDropMetaData(DEFAULT_DROP_METADATA) + .setMaxClientMetaDataCacheSize(DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE) + .setMaxServerMetaDataCacheSize(DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE) + .setForceRowKeyOrder(DEFAULT_FORCE_ROWKEY_ORDER) + .setExtraJDBCArguments(DEFAULT_EXTRA_JDBC_ARGUMENTS) + .setRunUpdateStatsAsync(DEFAULT_RUN_UPDATE_STATS_ASYNC) + .setCommitStatsAsync(DEFAULT_COMMIT_STATS_ASYNC) + .setIndexHandlerCount(DEFAULT_INDEX_HANDLER_COUNT) + .setMetadataHandlerCount(DEFAULT_METADATA_HANDLER_COUNT) + .setHConnectionPoolCoreSize(DEFAULT_HCONNECTION_POOL_CORE_SIZE) + .setHConnectionPoolMaxSize(DEFAULT_HCONNECTION_POOL_MAX_SIZE) + .setMaxThreadsPerHTable(DEFAULT_HTABLE_MAX_THREADS) + .setDefaultIndexPopulationWaitTime(DEFAULT_INDEX_POPULATION_WAIT_TIME) + .setIndexRebuildTaskInitialDelay(DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY) + .set(AGGREGATE_CHUNK_SIZE_INCREASE_ATTRIB, DEFAULT_AGGREGATE_CHUNK_SIZE_INCREASE); + } + + public QueryServicesTestImpl(ReadOnlyProps defaultProps, ReadOnlyProps overrideProps) { + super(defaultProps, getDefaultServicesOptions().setAll(overrideProps)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ScannerLeaseRenewalTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ScannerLeaseRenewalTest.java index 2969fdc31a9..7295c469c51 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/ScannerLeaseRenewalTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ScannerLeaseRenewalTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,95 +37,108 @@ import org.junit.Test; public class ScannerLeaseRenewalTest extends BaseConnectionlessQueryTest { - - @Test - public void testRenewLeaseTaskBehavior() throws Exception { - // add connection to the queue - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - LinkedBlockingQueue> connectionsQueue = new LinkedBlockingQueue<>(); - connectionsQueue.add(new WeakReference(pconn)); - - // create a scanner and add it to the queue - int numLeaseRenewals = 4; - int skipRenewLeaseCount = 2; - int failToAcquireLockAt = 3; - RenewLeaseOnlyTableIterator itr = new RenewLeaseOnlyTableIterator(numLeaseRenewals, skipRenewLeaseCount, failToAcquireLockAt, -1); - LinkedBlockingQueue> scannerQueue = pconn.getScanners(); - scannerQueue.add(new WeakReference(itr)); - - RenewLeaseTask task = new RenewLeaseTask(connectionsQueue); - assertTrue(connectionsQueue.size() == 1); - assertTrue(scannerQueue.size() == 1); - - task.run(); - assertTrue(connectionsQueue.size() == 1); - assertTrue(scannerQueue.size() == 1); // lease renewed - assertEquals(RENEWED, itr.getLastRenewLeaseStatus()); - - task.run(); - assertTrue(scannerQueue.size() == 1); - assertTrue(connectionsQueue.size() == 1); // renew lease skipped but scanner still in the queue - assertEquals(THRESHOLD_NOT_REACHED, itr.getLastRenewLeaseStatus()); - - task.run(); - assertTrue(scannerQueue.size() == 1); - assertTrue(connectionsQueue.size() == 1); - assertEquals(LOCK_NOT_ACQUIRED, itr.getLastRenewLeaseStatus()); // lock couldn't be acquired - - task.run(); - assertTrue(scannerQueue.size() == 1); - assertTrue(connectionsQueue.size() == 1); - assertEquals(RENEWED, itr.getLastRenewLeaseStatus()); // lease renewed - - task.run(); - assertTrue(scannerQueue.size() == 0); - assertTrue(connectionsQueue.size() == 1); - assertEquals(CLOSED, itr.getLastRenewLeaseStatus()); // scanner closed and removed from the queue - - pconn.close(); - task.run(); - assertTrue(scannerQueue.size() == 0); - assertTrue("Closing the connection should have removed it from the queue", connectionsQueue.size() == 0); - } - - @Test - public void testRenewLeaseTaskBehaviorOnError() throws Exception { - // add connection to the queue - PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class); - LinkedBlockingQueue> connectionsQueue = new LinkedBlockingQueue<>(); - connectionsQueue.add(new WeakReference(pconn)); - - // create a scanner and add it to the queue - int numLeaseRenewals = 4; - int lockNotAcquiredAt = 1; - int thresholdNotReachedCount = 2; - int failLeaseRenewalAt = 3; - RenewLeaseOnlyTableIterator itr = new RenewLeaseOnlyTableIterator(numLeaseRenewals, thresholdNotReachedCount, lockNotAcquiredAt, failLeaseRenewalAt); - LinkedBlockingQueue> scannerQueue = pconn.getScanners(); - scannerQueue.add(new WeakReference(itr)); - - RenewLeaseTask task = new RenewLeaseTask(connectionsQueue); - assertTrue(connectionsQueue.size() == 1); - assertTrue(scannerQueue.size() == 1); - - task.run(); - assertTrue(connectionsQueue.size() == 1); - assertTrue(scannerQueue.size() == 1); // lock not acquired - assertEquals(LOCK_NOT_ACQUIRED, itr.getLastRenewLeaseStatus()); - - task.run(); - assertTrue(scannerQueue.size() == 1); - assertTrue(connectionsQueue.size() == 1); // renew lease skipped but scanner still in the queue - assertEquals(THRESHOLD_NOT_REACHED, itr.getLastRenewLeaseStatus()); - - task.run(); - assertTrue(scannerQueue.size() == 0); - assertTrue(connectionsQueue.size() == 0); // there was only one connection in the connectionsQueue and it wasn't added back because of error - - pconn.close(); - task.run(); - assertTrue(scannerQueue.size() == 0); - assertTrue("Closing the connection should have removed it from the queue", connectionsQueue.size() == 0); - } - -} \ No newline at end of file + + @Test + public void testRenewLeaseTaskBehavior() throws Exception { + // add connection to the queue + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + LinkedBlockingQueue> connectionsQueue = + new LinkedBlockingQueue<>(); + connectionsQueue.add(new WeakReference(pconn)); + + // create a scanner and add it to the queue + int numLeaseRenewals = 4; + int skipRenewLeaseCount = 2; + int failToAcquireLockAt = 3; + RenewLeaseOnlyTableIterator itr = new RenewLeaseOnlyTableIterator(numLeaseRenewals, + skipRenewLeaseCount, failToAcquireLockAt, -1); + LinkedBlockingQueue> scannerQueue = pconn.getScanners(); + scannerQueue.add(new WeakReference(itr)); + + RenewLeaseTask task = new RenewLeaseTask(connectionsQueue); + assertTrue(connectionsQueue.size() == 1); + assertTrue(scannerQueue.size() == 1); + + task.run(); + assertTrue(connectionsQueue.size() == 1); + assertTrue(scannerQueue.size() == 1); // lease renewed + assertEquals(RENEWED, itr.getLastRenewLeaseStatus()); + + task.run(); + assertTrue(scannerQueue.size() == 1); + assertTrue(connectionsQueue.size() == 1); // renew lease skipped but scanner still in the queue + assertEquals(THRESHOLD_NOT_REACHED, itr.getLastRenewLeaseStatus()); + + task.run(); + assertTrue(scannerQueue.size() == 1); + assertTrue(connectionsQueue.size() == 1); + assertEquals(LOCK_NOT_ACQUIRED, itr.getLastRenewLeaseStatus()); // lock couldn't be acquired + + task.run(); + assertTrue(scannerQueue.size() == 1); + assertTrue(connectionsQueue.size() == 1); + assertEquals(RENEWED, itr.getLastRenewLeaseStatus()); // lease renewed + + task.run(); + assertTrue(scannerQueue.size() == 0); + assertTrue(connectionsQueue.size() == 1); + assertEquals(CLOSED, itr.getLastRenewLeaseStatus()); // scanner closed and removed from the + // queue + + pconn.close(); + task.run(); + assertTrue(scannerQueue.size() == 0); + assertTrue("Closing the connection should have removed it from the queue", + connectionsQueue.size() == 0); + } + + @Test + public void testRenewLeaseTaskBehaviorOnError() throws Exception { + // add connection to the queue + PhoenixConnection pconn = + DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)) + .unwrap(PhoenixConnection.class); + LinkedBlockingQueue> connectionsQueue = + new LinkedBlockingQueue<>(); + connectionsQueue.add(new WeakReference(pconn)); + + // create a scanner and add it to the queue + int numLeaseRenewals = 4; + int lockNotAcquiredAt = 1; + int thresholdNotReachedCount = 2; + int failLeaseRenewalAt = 3; + RenewLeaseOnlyTableIterator itr = new RenewLeaseOnlyTableIterator(numLeaseRenewals, + thresholdNotReachedCount, lockNotAcquiredAt, failLeaseRenewalAt); + LinkedBlockingQueue> scannerQueue = pconn.getScanners(); + scannerQueue.add(new WeakReference(itr)); + + RenewLeaseTask task = new RenewLeaseTask(connectionsQueue); + assertTrue(connectionsQueue.size() == 1); + assertTrue(scannerQueue.size() == 1); + + task.run(); + assertTrue(connectionsQueue.size() == 1); + assertTrue(scannerQueue.size() == 1); // lock not acquired + assertEquals(LOCK_NOT_ACQUIRED, itr.getLastRenewLeaseStatus()); + + task.run(); + assertTrue(scannerQueue.size() == 1); + assertTrue(connectionsQueue.size() == 1); // renew lease skipped but scanner still in the queue + assertEquals(THRESHOLD_NOT_REACHED, itr.getLastRenewLeaseStatus()); + + task.run(); + assertTrue(scannerQueue.size() == 0); + assertTrue(connectionsQueue.size() == 0); // there was only one connection in the + // connectionsQueue and it wasn't added back because + // of error + + pconn.close(); + task.run(); + assertTrue(scannerQueue.size() == 0); + assertTrue("Closing the connection should have removed it from the queue", + connectionsQueue.size() == 0); + } + +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/TestPropertyPolicy.java b/phoenix-core/src/test/java/org/apache/phoenix/query/TestPropertyPolicy.java index 1835437382d..b52ab91bd85 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/query/TestPropertyPolicy.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/query/TestPropertyPolicy.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,9 +32,10 @@ */ public class TestPropertyPolicy implements PropertyPolicy { final static Set propertiesKeyDisAllowed = - Collections.unmodifiableSet(new HashSet<>(asList("DisallowedProperty"))); + Collections.unmodifiableSet(new HashSet<>(asList("DisallowedProperty"))); - @Override public void evaluate(Properties properties) throws PropertyNotAllowedException { + @Override + public void evaluate(Properties properties) throws PropertyNotAllowedException { final Properties offendingProperties = new Properties(); for (Object k : properties.keySet()) { @@ -44,4 +45,4 @@ public class TestPropertyPolicy implements PropertyPolicy { if (offendingProperties.size() > 0) throw new PropertyNotAllowedException(offendingProperties); } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/ImmutableStorageSchemeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/ImmutableStorageSchemeTest.java index da14e2d0916..6206a89ca26 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/ImmutableStorageSchemeTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/ImmutableStorageSchemeTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -45,331 +45,344 @@ import org.apache.phoenix.schema.types.PTinyint; import org.apache.phoenix.schema.types.PUnsignedTinyint; import org.apache.phoenix.schema.types.PVarbinary; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - @RunWith(Parameterized.class) public class ImmutableStorageSchemeTest { - - protected static final LiteralExpression CONSTANT_EXPRESSION = LiteralExpression.newConstant(QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - protected static final byte[] BYTE_ARRAY1 = new byte[]{1,2,3,4,5}; - protected static final byte[] BYTE_ARRAY2 = new byte[]{6,7,8}; - protected Expression FALSE_EVAL_EXPRESSION = new DelegateExpression(LiteralExpression.newConstant(null)) { - @Override - public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { - return false; - } + + protected static final LiteralExpression CONSTANT_EXPRESSION = + LiteralExpression.newConstant(QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + protected static final byte[] BYTE_ARRAY1 = new byte[] { 1, 2, 3, 4, 5 }; + protected static final byte[] BYTE_ARRAY2 = new byte[] { 6, 7, 8 }; + protected Expression FALSE_EVAL_EXPRESSION = + new DelegateExpression(LiteralExpression.newConstant(null)) { + @Override + public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) { + return false; + } }; - private ImmutableStorageScheme immutableStorageScheme; - byte serializationVersion; - - @Parameters(name="ImmutableStorageSchemeTest_immutableStorageScheme={0},serializationVersion={1}}") // name is used by failsafe as file name in reports - public static synchronized List data() { - return Arrays.asList(new Object[][] { - { SINGLE_CELL_ARRAY_WITH_OFFSETS, - IMMUTABLE_SERIALIZATION_VERSION }, - { SINGLE_CELL_ARRAY_WITH_OFFSETS, - IMMUTABLE_SERIALIZATION_V2 } - }); - } - - public ImmutableStorageSchemeTest(ImmutableStorageScheme immutableStorageScheme, byte serializationVersion) { - this.immutableStorageScheme = immutableStorageScheme; - this.immutableStorageScheme.setSerializationVersion(serializationVersion); - this.serializationVersion = serializationVersion; - } + private ImmutableStorageScheme immutableStorageScheme; + byte serializationVersion; - @Test - public void testWithExpressionsThatEvaluatetoFalse() throws Exception { - List children = Lists.newArrayListWithExpectedSize(4); - children.add(CONSTANT_EXPRESSION); - children.add(FALSE_EVAL_EXPRESSION); - children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); - children.add(FALSE_EVAL_EXPRESSION); - children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); - ImmutableBytesPtr ptr = evaluate(children); - - ImmutableBytesPtr ptrCopy = new ImmutableBytesPtr(ptr); - ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); - assertTrue(decoder.decode(ptrCopy, 0)); - assertArrayEquals(QueryConstants.EMPTY_COLUMN_VALUE_BYTES, ptrCopy.copyBytesIfNecessary()); - ptrCopy = new ImmutableBytesPtr(ptr); - assertFalse(decoder.decode(ptrCopy, 1)); - assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); - ptrCopy = new ImmutableBytesPtr(ptr); - assertTrue(decoder.decode(ptrCopy, 2)); - assertArrayEquals(BYTE_ARRAY1, ptrCopy.copyBytesIfNecessary()); - ptrCopy = new ImmutableBytesPtr(ptr); - assertFalse(decoder.decode(ptrCopy, 3)); - assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); - ptrCopy = new ImmutableBytesPtr(ptr); - assertTrue(decoder.decode(ptrCopy, 4)); - assertArrayEquals(BYTE_ARRAY2, ptrCopy.copyBytesIfNecessary()); - } - - @Test - public void testWithMaxOffsetLargerThanShortMax() throws Exception { - int numElements = Short.MAX_VALUE+2; - List children = Lists.newArrayListWithExpectedSize(numElements); - for (int i=0; i children = Lists.newArrayListWithExpectedSize(numElements); - for (int i=0; i<=numElements; i+=2) { - children.add(CONSTANT_EXPRESSION); - children.add(FALSE_EVAL_EXPRESSION); - } - SingleCellConstructorExpression singleCellConstructorExpression = new SingleCellConstructorExpression(immutableStorageScheme, children); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - singleCellConstructorExpression.evaluate(null, ptr); - - ImmutableBytesPtr ptrCopy = new ImmutableBytesPtr(ptr); - ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); - assertTrue(decoder.decode(ptrCopy, 0)); - assertArrayEquals(QueryConstants.EMPTY_COLUMN_VALUE_BYTES, ptrCopy.copyBytesIfNecessary()); - - ptrCopy = new ImmutableBytesPtr(ptr); - assertFalse(decoder.decode(ptrCopy, 1)); - assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); - - ptrCopy = new ImmutableBytesPtr(ptr); - assertTrue(decoder.decode(ptrCopy, numElements-1)); - assertArrayEquals(QueryConstants.EMPTY_COLUMN_VALUE_BYTES, ptrCopy.copyBytesIfNecessary()); - - ptrCopy = new ImmutableBytesPtr(ptr); - assertFalse(decoder.decode(ptrCopy, numElements)); - assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); - } - - @Test - public void testLeadingNulls() throws Exception { - List children = Lists.newArrayListWithExpectedSize(4); - LiteralExpression nullExpression = LiteralExpression.newConstant(null); - children.add(nullExpression); - children.add(nullExpression); - children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); - children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); - ImmutableBytesPtr ptr = evaluate(children); - - assertDecodedContents(ptr, new byte[][] {EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY, BYTE_ARRAY1, BYTE_ARRAY2}); - } + @Parameters( + name = "ImmutableStorageSchemeTest_immutableStorageScheme={0},serializationVersion={1}}") // name + // is + // used + // by + // failsafe + // as + // file + // name + // in + // reports + public static synchronized List data() { + return Arrays + .asList(new Object[][] { { SINGLE_CELL_ARRAY_WITH_OFFSETS, IMMUTABLE_SERIALIZATION_VERSION }, + { SINGLE_CELL_ARRAY_WITH_OFFSETS, IMMUTABLE_SERIALIZATION_V2 } }); + } - @Test - public void testTrailingNulls() throws Exception { - List children = Lists.newArrayListWithExpectedSize(4); - LiteralExpression nullExpression = LiteralExpression.newConstant(null); - children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); - children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); - children.add(nullExpression); - children.add(nullExpression); - ImmutableBytesPtr ptr = evaluate(children); - - assertDecodedContents(ptr, new byte[][] {BYTE_ARRAY1, BYTE_ARRAY2, EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY}); - } + public ImmutableStorageSchemeTest(ImmutableStorageScheme immutableStorageScheme, + byte serializationVersion) { + this.immutableStorageScheme = immutableStorageScheme; + this.immutableStorageScheme.setSerializationVersion(serializationVersion); + this.serializationVersion = serializationVersion; + } - @Test - public void testManyNulls() throws Exception { - List children = Lists.newArrayListWithExpectedSize(4); - LiteralExpression nullExpression = LiteralExpression.newConstant(null); - byte[][] testData = new byte[300][]; - children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); - testData[0] = BYTE_ARRAY1; - for (int i = 1; i < testData.length - 1; i++) { - children.add(nullExpression); - testData[i] = EMPTY_BYTE_ARRAY; - } - children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); - testData[299] = BYTE_ARRAY2; - ImmutableBytesPtr ptr = evaluate(children); + @Test + public void testWithExpressionsThatEvaluatetoFalse() throws Exception { + List children = Lists.newArrayListWithExpectedSize(4); + children.add(CONSTANT_EXPRESSION); + children.add(FALSE_EVAL_EXPRESSION); + children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); + children.add(FALSE_EVAL_EXPRESSION); + children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); + ImmutableBytesPtr ptr = evaluate(children); - assertDecodedContents(ptr, testData); - } + ImmutableBytesPtr ptrCopy = new ImmutableBytesPtr(ptr); + ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); + assertTrue(decoder.decode(ptrCopy, 0)); + assertArrayEquals(QueryConstants.EMPTY_COLUMN_VALUE_BYTES, ptrCopy.copyBytesIfNecessary()); + ptrCopy = new ImmutableBytesPtr(ptr); + assertFalse(decoder.decode(ptrCopy, 1)); + assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); + ptrCopy = new ImmutableBytesPtr(ptr); + assertTrue(decoder.decode(ptrCopy, 2)); + assertArrayEquals(BYTE_ARRAY1, ptrCopy.copyBytesIfNecessary()); + ptrCopy = new ImmutableBytesPtr(ptr); + assertFalse(decoder.decode(ptrCopy, 3)); + assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); + ptrCopy = new ImmutableBytesPtr(ptr); + assertTrue(decoder.decode(ptrCopy, 4)); + assertArrayEquals(BYTE_ARRAY2, ptrCopy.copyBytesIfNecessary()); + } - @Test - public void testSingleLeadingTrailingNull() throws Exception { - List children = Lists.newArrayListWithExpectedSize(4); - LiteralExpression nullExpression = LiteralExpression.newConstant(null); - children.add(nullExpression); - children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); - children.add(nullExpression); - ImmutableBytesPtr ptr = evaluate(children); - - assertDecodedContents(ptr, - new byte[][] { EMPTY_BYTE_ARRAY, BYTE_ARRAY1, EMPTY_BYTE_ARRAY }); + @Test + public void testWithMaxOffsetLargerThanShortMax() throws Exception { + int numElements = Short.MAX_VALUE + 2; + List children = Lists.newArrayListWithExpectedSize(numElements); + for (int i = 0; i < numElements; ++i) { + children.add(CONSTANT_EXPRESSION); } + SingleCellConstructorExpression singleCellConstructorExpression = + new SingleCellConstructorExpression(immutableStorageScheme, children); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + singleCellConstructorExpression.evaluate(null, ptr); - @Test - public void testSingleMiddleNull() throws Exception { - List children = Lists.newArrayListWithExpectedSize(4); - LiteralExpression nullExpression = LiteralExpression.newConstant(null); - children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); - children.add(nullExpression); - children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); - ImmutableBytesPtr ptr = evaluate(children); + ImmutableBytesPtr ptrCopy = new ImmutableBytesPtr(ptr); + ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); + assertTrue(decoder.decode(ptrCopy, 0)); + assertArrayEquals(QueryConstants.EMPTY_COLUMN_VALUE_BYTES, ptrCopy.copyBytesIfNecessary()); - assertDecodedContents(ptr, new byte[][] { BYTE_ARRAY1, EMPTY_BYTE_ARRAY, BYTE_ARRAY2 }); - } + ptrCopy = new ImmutableBytesPtr(ptr); + assertTrue(decoder.decode(ptrCopy, 14999)); + assertArrayEquals(QueryConstants.EMPTY_COLUMN_VALUE_BYTES, ptrCopy.copyBytesIfNecessary()); - @Test - public void testAllShortValues() throws Exception { - int curr = Short.MIN_VALUE; - List children = Lists.newArrayListWithExpectedSize(1); - List failedValues = Lists.newArrayList(); - while (curr <= Short.MAX_VALUE) { - children.add(LiteralExpression.newConstant(curr, PSmallint.INSTANCE)); - ImmutableBytesPtr ptr = evaluate(children); - ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); - assertTrue(decoder.decode(ptr, 0)); - if (ptr.getLength() == 0) { - failedValues.add(curr); - } else { - if (curr != PSmallint.INSTANCE.getCodec().decodeShort(ptr.copyBytesIfNecessary(), 0, - SortOrder.ASC)) { - failedValues.add(curr); - } - } - children.remove(0); - curr++; - } - // in v1, we can't distinguish a null from two short values - if (serializationVersion == IMMUTABLE_SERIALIZATION_VERSION) { - assertTrue(failedValues.size() + " values were not properly decoded: " + failedValues, - failedValues.size() == 2); - } else { - assertTrue(failedValues.size() + " values were not properly decoded: " + failedValues, - failedValues.size() == 0); - } + ptrCopy = new ImmutableBytesPtr(ptr); + assertTrue(decoder.decode(ptrCopy, numElements - 1)); + assertArrayEquals(QueryConstants.EMPTY_COLUMN_VALUE_BYTES, ptrCopy.copyBytesIfNecessary()); + } + + @Test + public void testWithMaxOffsetSmallerThanShortMin() throws Exception { + int numElements = Short.MAX_VALUE + 2; + List children = Lists.newArrayListWithExpectedSize(numElements); + for (int i = 0; i <= numElements; i += 2) { + children.add(CONSTANT_EXPRESSION); + children.add(FALSE_EVAL_EXPRESSION); } + SingleCellConstructorExpression singleCellConstructorExpression = + new SingleCellConstructorExpression(immutableStorageScheme, children); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + singleCellConstructorExpression.evaluate(null, ptr); + + ImmutableBytesPtr ptrCopy = new ImmutableBytesPtr(ptr); + ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); + assertTrue(decoder.decode(ptrCopy, 0)); + assertArrayEquals(QueryConstants.EMPTY_COLUMN_VALUE_BYTES, ptrCopy.copyBytesIfNecessary()); + + ptrCopy = new ImmutableBytesPtr(ptr); + assertFalse(decoder.decode(ptrCopy, 1)); + assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); + + ptrCopy = new ImmutableBytesPtr(ptr); + assertTrue(decoder.decode(ptrCopy, numElements - 1)); + assertArrayEquals(QueryConstants.EMPTY_COLUMN_VALUE_BYTES, ptrCopy.copyBytesIfNecessary()); - @Test - public void testSingleByteValues() throws Exception { - List children = Lists.newArrayListWithExpectedSize(4); - LiteralExpression nullExpression = LiteralExpression.newConstant(null); - children.add(nullExpression); - children.add(LiteralExpression.newConstant((byte) -128, PTinyint.INSTANCE)); - children.add(nullExpression); - children.add(LiteralExpression.newConstant((byte) 0, PUnsignedTinyint.INSTANCE)); - children.add(nullExpression); - children.add(LiteralExpression.newConstant((byte) 127, PUnsignedTinyint.INSTANCE)); - ImmutableBytesPtr ptr = evaluate(children); - - assertNullAtIndex(ptr, 0); - assertValueAtIndex(ptr, 1, (byte) -128, PTinyint.INSTANCE); - assertNullAtIndex(ptr, 2); - assertValueAtIndex(ptr, 3, (byte) 0, PUnsignedTinyint.INSTANCE); - assertNullAtIndex(ptr, 4); - assertValueAtIndex(ptr, 5, (byte) 127, PUnsignedTinyint.INSTANCE); + ptrCopy = new ImmutableBytesPtr(ptr); + assertFalse(decoder.decode(ptrCopy, numElements)); + assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); + } + + @Test + public void testLeadingNulls() throws Exception { + List children = Lists.newArrayListWithExpectedSize(4); + LiteralExpression nullExpression = LiteralExpression.newConstant(null); + children.add(nullExpression); + children.add(nullExpression); + children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); + children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); + ImmutableBytesPtr ptr = evaluate(children); + + assertDecodedContents(ptr, + new byte[][] { EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY, BYTE_ARRAY1, BYTE_ARRAY2 }); + } + + @Test + public void testTrailingNulls() throws Exception { + List children = Lists.newArrayListWithExpectedSize(4); + LiteralExpression nullExpression = LiteralExpression.newConstant(null); + children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); + children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); + children.add(nullExpression); + children.add(nullExpression); + ImmutableBytesPtr ptr = evaluate(children); + + assertDecodedContents(ptr, + new byte[][] { BYTE_ARRAY1, BYTE_ARRAY2, EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY }); + } + + @Test + public void testManyNulls() throws Exception { + List children = Lists.newArrayListWithExpectedSize(4); + LiteralExpression nullExpression = LiteralExpression.newConstant(null); + byte[][] testData = new byte[300][]; + children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); + testData[0] = BYTE_ARRAY1; + for (int i = 1; i < testData.length - 1; i++) { + children.add(nullExpression); + testData[i] = EMPTY_BYTE_ARRAY; } + children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); + testData[299] = BYTE_ARRAY2; + ImmutableBytesPtr ptr = evaluate(children); - @Test - public void testSeparatorByteValues() throws Exception { - List children = Lists.newArrayListWithExpectedSize(4); - LiteralExpression nullExpression = LiteralExpression.newConstant(null); - children.add(nullExpression); - children.add(LiteralExpression.newConstant((short) -32513, PSmallint.INSTANCE)); - children.add(nullExpression); - children.add(LiteralExpression.newConstant((short) 32767, PSmallint.INSTANCE)); - children.add(nullExpression); - children.add(LiteralExpression.newConstant(Integer.MAX_VALUE, PInteger.INSTANCE)); - children.add(nullExpression); - children.add(LiteralExpression.newConstant(Integer.MIN_VALUE, PInteger.INSTANCE)); - // see if we can differentiate two nulls and {separatorByte, 2} - children.add(nullExpression); - children.add(nullExpression); - children.add(LiteralExpression.newConstant((short) -32514, PSmallint.INSTANCE)); - - ImmutableBytesPtr ptr = evaluate(children); - - assertNullAtIndex(ptr, 0); - try { - assertValueAtIndex(ptr, 1, (short) -32513, PSmallint.INSTANCE); - } catch (Exception e) { - if (serializationVersion != IMMUTABLE_SERIALIZATION_VERSION) { - fail("Failed on exception " + e); - } - } - assertNullAtIndex(ptr, 2); - try { - assertValueAtIndex(ptr, 3, (short) 32767, PSmallint.INSTANCE); - } catch (Exception e) { - if (serializationVersion != IMMUTABLE_SERIALIZATION_VERSION) { - fail("Failed on exception " + e); - } + assertDecodedContents(ptr, testData); + } + + @Test + public void testSingleLeadingTrailingNull() throws Exception { + List children = Lists.newArrayListWithExpectedSize(4); + LiteralExpression nullExpression = LiteralExpression.newConstant(null); + children.add(nullExpression); + children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); + children.add(nullExpression); + ImmutableBytesPtr ptr = evaluate(children); + + assertDecodedContents(ptr, new byte[][] { EMPTY_BYTE_ARRAY, BYTE_ARRAY1, EMPTY_BYTE_ARRAY }); + } + + @Test + public void testSingleMiddleNull() throws Exception { + List children = Lists.newArrayListWithExpectedSize(4); + LiteralExpression nullExpression = LiteralExpression.newConstant(null); + children.add(LiteralExpression.newConstant(BYTE_ARRAY1, PVarbinary.INSTANCE)); + children.add(nullExpression); + children.add(LiteralExpression.newConstant(BYTE_ARRAY2, PVarbinary.INSTANCE)); + ImmutableBytesPtr ptr = evaluate(children); + + assertDecodedContents(ptr, new byte[][] { BYTE_ARRAY1, EMPTY_BYTE_ARRAY, BYTE_ARRAY2 }); + } + + @Test + public void testAllShortValues() throws Exception { + int curr = Short.MIN_VALUE; + List children = Lists.newArrayListWithExpectedSize(1); + List failedValues = Lists.newArrayList(); + while (curr <= Short.MAX_VALUE) { + children.add(LiteralExpression.newConstant(curr, PSmallint.INSTANCE)); + ImmutableBytesPtr ptr = evaluate(children); + ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); + assertTrue(decoder.decode(ptr, 0)); + if (ptr.getLength() == 0) { + failedValues.add(curr); + } else { + if ( + curr != PSmallint.INSTANCE.getCodec().decodeShort(ptr.copyBytesIfNecessary(), 0, + SortOrder.ASC) + ) { + failedValues.add(curr); } - assertNullAtIndex(ptr, 4); - assertValueAtIndex(ptr, 5, Integer.MAX_VALUE, PInteger.INSTANCE); - assertNullAtIndex(ptr, 6); - assertValueAtIndex(ptr, 7, Integer.MIN_VALUE, PInteger.INSTANCE); - assertNullAtIndex(ptr, 8); - assertNullAtIndex(ptr, 9); - assertValueAtIndex(ptr, 10, (short) -32514, PSmallint.INSTANCE); + } + children.remove(0); + curr++; } - - private void assertNullAtIndex(ImmutableBytesPtr ptr, int index) { - assertValueAtIndex(ptr, index, null, null); + // in v1, we can't distinguish a null from two short values + if (serializationVersion == IMMUTABLE_SERIALIZATION_VERSION) { + assertTrue(failedValues.size() + " values were not properly decoded: " + failedValues, + failedValues.size() == 2); + } else { + assertTrue(failedValues.size() + " values were not properly decoded: " + failedValues, + failedValues.size() == 0); } + } - private void assertValueAtIndex(ImmutableBytesPtr ptr, int index, Object value, - PDataType type) { - ImmutableBytesPtr ptrCopy = new ImmutableBytesPtr(ptr); - ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); - assertTrue(decoder.decode(ptrCopy, index)); - if (value == null) { - assertArrayEquals(EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); - return; - } - Object decoded; - if (type.equals(PSmallint.INSTANCE)) { - decoded = type.getCodec().decodeShort(ptrCopy.copyBytesIfNecessary(), 0, SortOrder.ASC); - } else if (type.equals(PInteger.INSTANCE)) { - decoded = type.getCodec().decodeInt(ptrCopy.copyBytesIfNecessary(), 0, SortOrder.ASC); - } else { // assume byte for all other types - decoded = type.getCodec().decodeByte(ptrCopy.copyBytesIfNecessary(), 0, SortOrder.ASC); - } - assertEquals(value, decoded); + @Test + public void testSingleByteValues() throws Exception { + List children = Lists.newArrayListWithExpectedSize(4); + LiteralExpression nullExpression = LiteralExpression.newConstant(null); + children.add(nullExpression); + children.add(LiteralExpression.newConstant((byte) -128, PTinyint.INSTANCE)); + children.add(nullExpression); + children.add(LiteralExpression.newConstant((byte) 0, PUnsignedTinyint.INSTANCE)); + children.add(nullExpression); + children.add(LiteralExpression.newConstant((byte) 127, PUnsignedTinyint.INSTANCE)); + ImmutableBytesPtr ptr = evaluate(children); + + assertNullAtIndex(ptr, 0); + assertValueAtIndex(ptr, 1, (byte) -128, PTinyint.INSTANCE); + assertNullAtIndex(ptr, 2); + assertValueAtIndex(ptr, 3, (byte) 0, PUnsignedTinyint.INSTANCE); + assertNullAtIndex(ptr, 4); + assertValueAtIndex(ptr, 5, (byte) 127, PUnsignedTinyint.INSTANCE); + } + + @Test + public void testSeparatorByteValues() throws Exception { + List children = Lists.newArrayListWithExpectedSize(4); + LiteralExpression nullExpression = LiteralExpression.newConstant(null); + children.add(nullExpression); + children.add(LiteralExpression.newConstant((short) -32513, PSmallint.INSTANCE)); + children.add(nullExpression); + children.add(LiteralExpression.newConstant((short) 32767, PSmallint.INSTANCE)); + children.add(nullExpression); + children.add(LiteralExpression.newConstant(Integer.MAX_VALUE, PInteger.INSTANCE)); + children.add(nullExpression); + children.add(LiteralExpression.newConstant(Integer.MIN_VALUE, PInteger.INSTANCE)); + // see if we can differentiate two nulls and {separatorByte, 2} + children.add(nullExpression); + children.add(nullExpression); + children.add(LiteralExpression.newConstant((short) -32514, PSmallint.INSTANCE)); + + ImmutableBytesPtr ptr = evaluate(children); + + assertNullAtIndex(ptr, 0); + try { + assertValueAtIndex(ptr, 1, (short) -32513, PSmallint.INSTANCE); + } catch (Exception e) { + if (serializationVersion != IMMUTABLE_SERIALIZATION_VERSION) { + fail("Failed on exception " + e); + } } + assertNullAtIndex(ptr, 2); + try { + assertValueAtIndex(ptr, 3, (short) 32767, PSmallint.INSTANCE); + } catch (Exception e) { + if (serializationVersion != IMMUTABLE_SERIALIZATION_VERSION) { + fail("Failed on exception " + e); + } + } + assertNullAtIndex(ptr, 4); + assertValueAtIndex(ptr, 5, Integer.MAX_VALUE, PInteger.INSTANCE); + assertNullAtIndex(ptr, 6); + assertValueAtIndex(ptr, 7, Integer.MIN_VALUE, PInteger.INSTANCE); + assertNullAtIndex(ptr, 8); + assertNullAtIndex(ptr, 9); + assertValueAtIndex(ptr, 10, (short) -32514, PSmallint.INSTANCE); + } + + private void assertNullAtIndex(ImmutableBytesPtr ptr, int index) { + assertValueAtIndex(ptr, index, null, null); + } - private ImmutableBytesPtr evaluate(List children) { - SingleCellConstructorExpression singleCellConstructorExpression = - new SingleCellConstructorExpression(immutableStorageScheme, children); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(); - singleCellConstructorExpression.evaluate(null, ptr); - return ptr; + private void assertValueAtIndex(ImmutableBytesPtr ptr, int index, Object value, PDataType type) { + ImmutableBytesPtr ptrCopy = new ImmutableBytesPtr(ptr); + ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); + assertTrue(decoder.decode(ptrCopy, index)); + if (value == null) { + assertArrayEquals(EMPTY_BYTE_ARRAY, ptrCopy.copyBytesIfNecessary()); + return; + } + Object decoded; + if (type.equals(PSmallint.INSTANCE)) { + decoded = type.getCodec().decodeShort(ptrCopy.copyBytesIfNecessary(), 0, SortOrder.ASC); + } else if (type.equals(PInteger.INSTANCE)) { + decoded = type.getCodec().decodeInt(ptrCopy.copyBytesIfNecessary(), 0, SortOrder.ASC); + } else { // assume byte for all other types + decoded = type.getCodec().decodeByte(ptrCopy.copyBytesIfNecessary(), 0, SortOrder.ASC); } + assertEquals(value, decoded); + } - private void assertDecodedContents(ImmutableBytesPtr ptr, byte[]... contents) { - ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); - for (int i = 0; i < contents.length; i++) { - ImmutableBytesPtr ptrCopy = new ImmutableBytesPtr(ptr); - assertTrue(decoder.decode(ptrCopy, i)); - assertArrayEquals(contents[i], ptrCopy.copyBytesIfNecessary()); - } + private ImmutableBytesPtr evaluate(List children) { + SingleCellConstructorExpression singleCellConstructorExpression = + new SingleCellConstructorExpression(immutableStorageScheme, children); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(); + singleCellConstructorExpression.evaluate(null, ptr); + return ptr; + } + + private void assertDecodedContents(ImmutableBytesPtr ptr, byte[]... contents) { + ColumnValueDecoder decoder = immutableStorageScheme.getDecoder(); + for (int i = 0; i < contents.length; i++) { + ImmutableBytesPtr ptrCopy = new ImmutableBytesPtr(ptr); + assertTrue(decoder.decode(ptrCopy, i)); + assertArrayEquals(contents[i], ptrCopy.copyBytesIfNecessary()); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/MetaDataClientTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/MetaDataClientTest.java index 914f8cd8dd1..ccb502cf8e9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/MetaDataClientTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/MetaDataClientTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,13 @@ */ package org.apache.phoenix.schema; +import static junit.framework.TestCase.assertEquals; +import static junit.framework.TestCase.assertTrue; +import static junit.framework.TestCase.fail; + +import java.sql.DriverManager; +import java.sql.SQLException; + import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.jdbc.PhoenixConnection; @@ -27,60 +34,54 @@ import org.apache.phoenix.util.EnvironmentEdgeManager; import org.junit.BeforeClass; import org.junit.Test; -import java.sql.DriverManager; -import java.sql.SQLException; -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertTrue; -import static junit.framework.TestCase.fail; public class MetaDataClientTest extends BaseConnectionlessQueryTest { - private static String schema; - private static String baseTable; - private static PhoenixConnection phxConn; - private static MetaDataClient mockClient; - private static String ddlFormat; - private static CreateTableStatement stmt; + private static String schema; + private static String baseTable; + private static PhoenixConnection phxConn; + private static MetaDataClient mockClient; + private static String ddlFormat; + private static CreateTableStatement stmt; - @BeforeClass - public static synchronized void setupTest() throws SQLException { - schema = generateUniqueName(); - baseTable = generateUniqueName(); - phxConn = (PhoenixConnection) DriverManager.getConnection(getUrl()); - mockClient = new MetaDataClient(phxConn); - ddlFormat = "CREATE TABLE " + schema + "." + baseTable + " " + - "(A VARCHAR PRIMARY KEY, B BIGINT, C VARCHAR)"; - stmt = (CreateTableStatement)new SQLParser((ddlFormat)).parseStatement(); - } + @BeforeClass + public static synchronized void setupTest() throws SQLException { + schema = generateUniqueName(); + baseTable = generateUniqueName(); + phxConn = (PhoenixConnection) DriverManager.getConnection(getUrl()); + mockClient = new MetaDataClient(phxConn); + ddlFormat = "CREATE TABLE " + schema + "." + baseTable + " " + + "(A VARCHAR PRIMARY KEY, B BIGINT, C VARCHAR)"; + stmt = (CreateTableStatement) new SQLParser((ddlFormat)).parseStatement(); + } - @Test - public void testHandleCreateTableMutationCode() throws SQLException { - MetaDataProtocol.MetaDataMutationResult result = new MetaDataProtocol.MetaDataMutationResult - (MetaDataProtocol.MutationCode.UNALLOWED_TABLE_MUTATION ,new PSchema(schema), - EnvironmentEdgeManager.currentTimeMillis()); - try { - mockClient.handleCreateTableMutationCode(result, result.getMutationCode(), stmt, - schema, baseTable, null); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode()); - } + @Test + public void testHandleCreateTableMutationCode() throws SQLException { + MetaDataProtocol.MetaDataMutationResult result = new MetaDataProtocol.MetaDataMutationResult( + MetaDataProtocol.MutationCode.UNALLOWED_TABLE_MUTATION, new PSchema(schema), + EnvironmentEdgeManager.currentTimeMillis()); + try { + mockClient.handleCreateTableMutationCode(result, result.getMutationCode(), stmt, schema, + baseTable, null); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode()); } + } - @Test - //Testing the case when Mutation code thrown from sever is not handled by MetaDataClient - public void testHandleCreateTableMutationCodeWithNewCode() throws SQLException { - MetaDataProtocol.MetaDataMutationResult result = new MetaDataProtocol - .MetaDataMutationResult(MetaDataProtocol.MutationCode.NO_PK_COLUMNS, - new PSchema(schema), EnvironmentEdgeManager.currentTimeMillis()); - try { - mockClient.handleCreateTableMutationCode(result, result.getMutationCode(), stmt, - schema, baseTable, null); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.UNEXPECTED_MUTATION_CODE.getErrorCode(), - e.getErrorCode()); - assertTrue(e.getMessage().contains("NO_PK_COLUMNS")); - } + @Test + // Testing the case when Mutation code thrown from sever is not handled by MetaDataClient + public void testHandleCreateTableMutationCodeWithNewCode() throws SQLException { + MetaDataProtocol.MetaDataMutationResult result = + new MetaDataProtocol.MetaDataMutationResult(MetaDataProtocol.MutationCode.NO_PK_COLUMNS, + new PSchema(schema), EnvironmentEdgeManager.currentTimeMillis()); + try { + mockClient.handleCreateTableMutationCode(result, result.getMutationCode(), stmt, schema, + baseTable, null); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.UNEXPECTED_MUTATION_CODE.getErrorCode(), e.getErrorCode()); + assertTrue(e.getMessage().contains("NO_PK_COLUMNS")); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/MutationTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/MutationTest.java index 04fd5709776..5ad1f7c117f 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/MutationTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/MutationTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,107 +38,112 @@ import org.junit.Test; public class MutationTest extends BaseConnectionlessQueryTest { - @Test - public void testDurability() throws Exception { - testDurability(true); - testDurability(false); - } + @Test + public void testDurability() throws Exception { + testDurability(true); + testDurability(false); + } - private void testDurability(boolean disableWAL) throws Exception { - try(Connection conn = DriverManager.getConnection(getUrl())) { - Durability expectedDurability = disableWAL ? Durability.SKIP_WAL : Durability.USE_DEFAULT; - conn.setAutoCommit(false); - conn.createStatement().execute("CREATE TABLE t1 (k integer not null primary key, a.k varchar, b.k varchar) " + (disableWAL ? "DISABLE_WAL=true" : "")); - conn.createStatement().execute("UPSERT INTO t1 VALUES(1,'a','b')"); - conn.createStatement().execute("DELETE FROM t1 WHERE k=2"); - assertDurability(conn,expectedDurability); - conn.createStatement().execute("DELETE FROM t1 WHERE k=1"); - assertDurability(conn,expectedDurability); - conn.rollback(); - conn.createStatement().execute("DROP TABLE t1"); - } + private void testDurability(boolean disableWAL) throws Exception { + try (Connection conn = DriverManager.getConnection(getUrl())) { + Durability expectedDurability = disableWAL ? Durability.SKIP_WAL : Durability.USE_DEFAULT; + conn.setAutoCommit(false); + conn.createStatement() + .execute("CREATE TABLE t1 (k integer not null primary key, a.k varchar, b.k varchar) " + + (disableWAL ? "DISABLE_WAL=true" : "")); + conn.createStatement().execute("UPSERT INTO t1 VALUES(1,'a','b')"); + conn.createStatement().execute("DELETE FROM t1 WHERE k=2"); + assertDurability(conn, expectedDurability); + conn.createStatement().execute("DELETE FROM t1 WHERE k=1"); + assertDurability(conn, expectedDurability); + conn.rollback(); + conn.createStatement().execute("DROP TABLE t1"); } - - private void assertDurability(Connection conn, Durability durability) throws SQLException { - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - Iterator>> it = pconn.getMutationState().toMutations(); - assertTrue(it.hasNext()); - while (it.hasNext()) { - Pair> pair = it.next(); - assertFalse(pair.getSecond().isEmpty()); - for (Mutation m : pair.getSecond()) { - assertEquals(durability, m.getDurability()); - } - } + } + + private void assertDurability(Connection conn, Durability durability) throws SQLException { + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + Iterator>> it = pconn.getMutationState().toMutations(); + assertTrue(it.hasNext()); + while (it.hasNext()) { + Pair> pair = it.next(); + assertFalse(pair.getSecond().isEmpty()); + for (Mutation m : pair.getSecond()) { + assertEquals(durability, m.getDurability()); + } } - - @Test - public void testSizeConstraint() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - try { - int maxLength1 = 3; - int maxLength2 = 20; - conn.setAutoCommit(false); - String bvalue = "01234567890123456789"; - assertEquals(20,PVarchar.INSTANCE.toBytes(bvalue).length); - String value = "澴粖蟤य褻酃岤豦팑薰鄩脼ժ끦碉碉碉碉碉"; - String validValue = "abcd"; - String columnTypeInfo1 = "CHAR(3)"; - String columnTypeInto2 = "VARBINARY(20)"; - String columnTypeInto3 = "BINARY(20)"; - String columnTypeInto4 = "VARCHAR(20)"; + } + + @Test + public void testSizeConstraint() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + try { + int maxLength1 = 3; + int maxLength2 = 20; + conn.setAutoCommit(false); + String bvalue = "01234567890123456789"; + assertEquals(20, PVarchar.INSTANCE.toBytes(bvalue).length); + String value = "澴粖蟤य褻酃岤豦팑薰鄩脼ժ끦碉碉碉碉碉"; + String validValue = "abcd"; + String columnTypeInfo1 = "CHAR(3)"; + String columnTypeInto2 = "VARBINARY(20)"; + String columnTypeInto3 = "BINARY(20)"; + String columnTypeInto4 = "VARCHAR(20)"; - assertTrue(value.length() <= maxLength2); - assertTrue(PVarchar.INSTANCE.toBytes(value).length > maxLength2); - conn.createStatement().execute("CREATE TABLE t1 (k1 char(" + maxLength1 + ") not null, k2 varchar(" + maxLength2 + "), " - + "v1 varchar(" + maxLength2 + "), v2 varbinary(" + maxLength2 + "), v3 binary(" + maxLength2 + "), constraint pk primary key (k1, k2))"); - conn.createStatement().execute("UPSERT INTO t1 VALUES('a','" + value + "', '" + value + "','" + bvalue + "','" + bvalue + "')"); - try { - conn.createStatement().execute("UPSERT INTO t1(k1,v1) VALUES('" - + validValue+ "','" + value + "')"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(),e.getErrorCode()); - assertFalse(e.getMessage().contains(validValue)); - assertTrue(e.getMessage().contains(columnTypeInfo1)); - } - try { - conn.createStatement().execute("UPSERT INTO t1(k1,v2) VALUES('b','" + value + "')"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(),e.getErrorCode()); - assertFalse(e.getMessage().contains(value)); - assertTrue(e.getMessage().contains(columnTypeInto2)); - } - try { - conn.createStatement().execute("UPSERT INTO t1(k1,v3) VALUES('b','" + value + "')"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(),e.getErrorCode()); - assertFalse(e.getMessage().contains(value)); - assertTrue(e.getMessage().contains(columnTypeInto3)); - } - value = "澴粖蟤य褻酃岤豦팑薰鄩脼ժ끦碉碉碉碉碉碉碉碉碉"; - assertTrue(value.length() > maxLength2); - try { - conn.createStatement().execute("UPSERT INTO t1(k1,k2) VALUES('a','" + value + "')"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(),e.getErrorCode()); - assertFalse(e.getMessage().contains(value)); - assertTrue(e.getMessage().contains(columnTypeInto4)); - } - try { - conn.createStatement().execute("UPSERT INTO t1(k1,v1) VALUES('a','" + value + "')"); - fail(); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(),e.getErrorCode()); - assertFalse(e.getMessage().contains(value)); - assertTrue(e.getMessage().contains(columnTypeInto4)); - } - } finally { - conn.close(); - } + assertTrue(value.length() <= maxLength2); + assertTrue(PVarchar.INSTANCE.toBytes(value).length > maxLength2); + conn.createStatement() + .execute("CREATE TABLE t1 (k1 char(" + maxLength1 + ") not null, k2 varchar(" + maxLength2 + + "), " + "v1 varchar(" + maxLength2 + "), v2 varbinary(" + maxLength2 + "), v3 binary(" + + maxLength2 + "), constraint pk primary key (k1, k2))"); + conn.createStatement().execute("UPSERT INTO t1 VALUES('a','" + value + "', '" + value + "','" + + bvalue + "','" + bvalue + "')"); + try { + conn.createStatement() + .execute("UPSERT INTO t1(k1,v1) VALUES('" + validValue + "','" + value + "')"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode()); + assertFalse(e.getMessage().contains(validValue)); + assertTrue(e.getMessage().contains(columnTypeInfo1)); + } + try { + conn.createStatement().execute("UPSERT INTO t1(k1,v2) VALUES('b','" + value + "')"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode()); + assertFalse(e.getMessage().contains(value)); + assertTrue(e.getMessage().contains(columnTypeInto2)); + } + try { + conn.createStatement().execute("UPSERT INTO t1(k1,v3) VALUES('b','" + value + "')"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode()); + assertFalse(e.getMessage().contains(value)); + assertTrue(e.getMessage().contains(columnTypeInto3)); + } + value = "澴粖蟤य褻酃岤豦팑薰鄩脼ժ끦碉碉碉碉碉碉碉碉碉"; + assertTrue(value.length() > maxLength2); + try { + conn.createStatement().execute("UPSERT INTO t1(k1,k2) VALUES('a','" + value + "')"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode()); + assertFalse(e.getMessage().contains(value)); + assertTrue(e.getMessage().contains(columnTypeInto4)); + } + try { + conn.createStatement().execute("UPSERT INTO t1(k1,v1) VALUES('a','" + value + "')"); + fail(); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode()); + assertFalse(e.getMessage().contains(value)); + assertTrue(e.getMessage().contains(columnTypeInto4)); + } + } finally { + conn.close(); } + } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/PBaseColumn.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/PBaseColumn.java index 59c3ccbb4fa..226c6b6ff90 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/PBaseColumn.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/PBaseColumn.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,32 +19,29 @@ import org.apache.phoenix.util.SizedUtil; - /** - * Base class for PColumn implementors that provides - * some reasonable default implementations. - * + * Base class for PColumn implementors that provides some reasonable default implementations. * @since 0.1 */ public abstract class PBaseColumn implements PColumn { - @Override - public final Integer getMaxLength() { - return null; - } + @Override + public final Integer getMaxLength() { + return null; + } - @Override - public final Integer getScale() { - return null; - } + @Override + public final Integer getScale() { + return null; + } - @Override - public boolean isNullable() { - return false; - } + @Override + public boolean isNullable() { + return false; + } - @Override - public int getEstimatedSize() { - return SizedUtil.OBJECT_SIZE; // Not really interested in sized of these - } + @Override + public int getEstimatedSize() { + return SizedUtil.OBJECT_SIZE; // Not really interested in sized of these + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/PCharPadTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/PCharPadTest.java index 6f511e99193..0b3dabd4d92 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/PCharPadTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/PCharPadTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,11 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.sql.SQLException; import java.util.ArrayList; @@ -36,112 +35,116 @@ public class PCharPadTest { - public void test(String value, PDataType dataType, int length, SortOrder sortOrder, byte[] result) throws SQLException { - LiteralExpression expr = LiteralExpression.newConstant(value, dataType, sortOrder); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(expr.getBytes()); - dataType.pad(ptr, length, sortOrder); - String resultValue = (String) dataType.toObject(ptr, dataType, sortOrder); - assertTrue(Arrays.equals(result, ptr.get())); - assertEquals(value, resultValue); - } - - @Test - public void testCharPaddingAsc1() throws SQLException { - PDataType dataType = PChar.INSTANCE; - String str = "hellow"; - byte[] result = new byte[]{104, 101, 108, 108, 111, 119, 32, 32, 32, 32}; - test(str, dataType, 10, SortOrder.ASC, result); - } - - @Test - public void testCharPaddingAsc2() throws SQLException { - PDataType dataType = PChar.INSTANCE; - String str = "phoenix"; - byte[] result = new byte[]{112, 104, 111, 101, 110, 105, 120, 32, 32, 32, 32, 32, 32, 32}; - test(str, dataType, 14, SortOrder.ASC, result); - } - - @Test - public void testCharPaddingAsc3() throws SQLException { - PDataType dataType = PChar.INSTANCE; - String str = "phoenix"; - byte[] result = new byte[]{112, 104, 111, 101, 110, 105, 120}; - test(str, dataType, 7, SortOrder.ASC, result); + public void test(String value, PDataType dataType, int length, SortOrder sortOrder, byte[] result) + throws SQLException { + LiteralExpression expr = LiteralExpression.newConstant(value, dataType, sortOrder); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(expr.getBytes()); + dataType.pad(ptr, length, sortOrder); + String resultValue = (String) dataType.toObject(ptr, dataType, sortOrder); + assertTrue(Arrays.equals(result, ptr.get())); + assertEquals(value, resultValue); + } + + @Test + public void testCharPaddingAsc1() throws SQLException { + PDataType dataType = PChar.INSTANCE; + String str = "hellow"; + byte[] result = new byte[] { 104, 101, 108, 108, 111, 119, 32, 32, 32, 32 }; + test(str, dataType, 10, SortOrder.ASC, result); + } + + @Test + public void testCharPaddingAsc2() throws SQLException { + PDataType dataType = PChar.INSTANCE; + String str = "phoenix"; + byte[] result = new byte[] { 112, 104, 111, 101, 110, 105, 120, 32, 32, 32, 32, 32, 32, 32 }; + test(str, dataType, 14, SortOrder.ASC, result); + } + + @Test + public void testCharPaddingAsc3() throws SQLException { + PDataType dataType = PChar.INSTANCE; + String str = "phoenix"; + byte[] result = new byte[] { 112, 104, 111, 101, 110, 105, 120 }; + test(str, dataType, 7, SortOrder.ASC, result); + } + + @Test + public void testCharPaddingAsc4() throws SQLException { + PDataType dataType = PChar.INSTANCE; + String str = "hello world"; + byte[] result = + new byte[] { 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 32, 32, 32, 32, 32 }; + test(str, dataType, 16, SortOrder.ASC, result); + } + + @Test + public void testCharPaddingDesc1() throws SQLException { + PDataType dataType = PChar.INSTANCE; + String str = "hellow"; + byte[] result = new byte[] { -105, -102, -109, -109, -112, -120, -33, -33, -33, -33 }; + test(str, dataType, 10, SortOrder.DESC, result); + } + + @Test + public void testCharPaddingDesc2() throws SQLException { + PDataType dataType = PChar.INSTANCE; + String str = "phoenix"; + byte[] result = new byte[] { -113, -105, -112, -102, -111, -106, -121, -33, -33, -33, -33, -33, + -33, -33, -33 }; + test(str, dataType, 15, SortOrder.DESC, result); + } + + @Test + public void testCharPaddingDesc3() throws SQLException { + PDataType dataType = PChar.INSTANCE; + String str = "phoenix"; + byte[] result = new byte[] { -113, -105, -112, -102, -111, -106, -121 }; + test(str, dataType, 7, SortOrder.DESC, result); + } + + @Test + public void testCharPaddingDesc4() throws SQLException { + PDataType dataType = PChar.INSTANCE; + String str = "hello world"; + byte[] result = new byte[] { -105, -102, -109, -109, -112, -33, -120, -112, -115, -109, -101, + -33, -33, -33, -33, -33 }; + test(str, dataType, 16, SortOrder.DESC, result); + } + + @Test + public void testRelativeByteArrayOrder() throws SQLException { + String[] inputs = { "foo", "foo!", "fooA", "foo~" }; + PDataType dataType = PChar.INSTANCE; + Arrays.sort(inputs); + List ascOrderedInputs = new ArrayList<>(inputs.length); + SortOrder sortOrder = SortOrder.ASC; + for (String input : inputs) { + LiteralExpression expr = LiteralExpression.newConstant(input, dataType, sortOrder); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(expr.getBytes()); + dataType.pad(ptr, 8, sortOrder); + ascOrderedInputs.add(ptr.copyBytes()); } - - @Test - public void testCharPaddingAsc4() throws SQLException { - PDataType dataType = PChar.INSTANCE; - String str = "hello world"; - byte[] result = new byte[]{104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 32, 32, 32, 32, 32}; - test(str, dataType, 16, SortOrder.ASC, result); - } - - @Test - public void testCharPaddingDesc1() throws SQLException { - PDataType dataType = PChar.INSTANCE; - String str = "hellow"; - byte[] result = new byte[]{-105, -102, -109, -109, -112, -120, -33, -33, -33, -33}; - test(str, dataType, 10, SortOrder.DESC, result); - } - - @Test - public void testCharPaddingDesc2() throws SQLException { - PDataType dataType = PChar.INSTANCE; - String str = "phoenix"; - byte[] result = new byte[]{-113, -105, -112, -102, -111, -106, -121, -33, -33, -33, -33, -33, -33, -33, -33}; - test(str, dataType, 15, SortOrder.DESC, result); + Collections.sort(ascOrderedInputs, Bytes.BYTES_COMPARATOR); + for (int i = 0; i < inputs.length; i++) { + byte[] bytes = ascOrderedInputs.get(i); + String resultValue = (String) dataType.toObject(bytes, 0, bytes.length, dataType, sortOrder); + assertEquals(inputs[i], resultValue); } - @Test - public void testCharPaddingDesc3() throws SQLException { - PDataType dataType = PChar.INSTANCE; - String str = "phoenix"; - byte[] result = new byte[]{-113, -105, -112, -102, -111, -106, -121}; - test(str, dataType, 7, SortOrder.DESC, result); + List descOrderedInputs = new ArrayList<>(inputs.length); + sortOrder = SortOrder.DESC; + for (String input : inputs) { + LiteralExpression expr = LiteralExpression.newConstant(input, dataType, sortOrder); + ImmutableBytesPtr ptr = new ImmutableBytesPtr(expr.getBytes()); + dataType.pad(ptr, 8, sortOrder); + descOrderedInputs.add(ptr.copyBytes()); } - - @Test - public void testCharPaddingDesc4() throws SQLException { - PDataType dataType = PChar.INSTANCE; - String str = "hello world"; - byte[] result = new byte[]{-105, -102, -109, -109, -112, -33, -120, -112, -115, -109, -101, -33, -33, -33, -33, -33}; - test(str, dataType, 16, SortOrder.DESC, result); - } - - @Test - public void testRelativeByteArrayOrder() throws SQLException { - String[] inputs = {"foo", "foo!", "fooA", "foo~"}; - PDataType dataType = PChar.INSTANCE; - Arrays.sort(inputs); - List ascOrderedInputs = new ArrayList<>(inputs.length); - SortOrder sortOrder = SortOrder.ASC; - for (String input : inputs) { - LiteralExpression expr = LiteralExpression.newConstant(input, dataType, sortOrder); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(expr.getBytes()); - dataType.pad(ptr, 8, sortOrder); - ascOrderedInputs.add(ptr.copyBytes()); - } - Collections.sort(ascOrderedInputs, Bytes.BYTES_COMPARATOR); - for (int i = 0; i < inputs.length; i++) { - byte[] bytes = ascOrderedInputs.get(i); - String resultValue = (String) dataType.toObject(bytes, 0, bytes.length, dataType, sortOrder); - assertEquals(inputs[i], resultValue); - } - - List descOrderedInputs = new ArrayList<>(inputs.length); - sortOrder = SortOrder.DESC; - for (String input : inputs) { - LiteralExpression expr = LiteralExpression.newConstant(input, dataType, sortOrder); - ImmutableBytesPtr ptr = new ImmutableBytesPtr(expr.getBytes()); - dataType.pad(ptr, 8, sortOrder); - descOrderedInputs.add(ptr.copyBytes()); - } - Collections.sort(descOrderedInputs, Bytes.BYTES_COMPARATOR); - for (int i = 0; i < inputs.length; i++) { - byte[] bytes = descOrderedInputs.get(i); - String resultValue = (String) dataType.toObject(bytes, 0, bytes.length, dataType, sortOrder); - assertEquals(inputs[inputs.length - 1 - i], resultValue); - } + Collections.sort(descOrderedInputs, Bytes.BYTES_COMPARATOR); + for (int i = 0; i < inputs.length; i++) { + byte[] bytes = descOrderedInputs.get(i); + String resultValue = (String) dataType.toObject(bytes, 0, bytes.length, dataType, sortOrder); + assertEquals(inputs[inputs.length - 1 - i], resultValue); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/PLongColumn.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/PLongColumn.java index 67c919071d7..11fdf280f85 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/PLongColumn.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/PLongColumn.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,8 +25,8 @@ * @since 0.1 */ public abstract class PLongColumn extends PBaseColumn { - @Override - public final PDataType getDataType() { - return PLong.INSTANCE; - } + @Override + public final PDataType getDataType() { + return PLong.INSTANCE; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java index a5cbfe2f7f6..53fd391c19c 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/PMetaDataImplTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,192 +29,194 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.phoenix.parse.PSchema; import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.TimeKeeper; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; - public class PMetaDataImplTest { - - private static void addToTable(PMetaData metaData, String name, int size, TestTimeKeeper timeKeeper) throws SQLException { - PTable table = new PSizedTable(new PTableKey(null,name), size); - metaData.addTable(table, System.currentTimeMillis()); - timeKeeper.incrementTime(); - } - - private static void removeFromTable(PMetaData metaData, String name, TestTimeKeeper timeKeeper) throws SQLException { - metaData.removeTable(null, name, null, HConstants.LATEST_TIMESTAMP); - timeKeeper.incrementTime(); - } - - private static PTable getFromTable(PMetaData metaData, String name, TestTimeKeeper timeKeeper) throws TableNotFoundException { - PTable table = metaData.getTableRef(new PTableKey(null,name)).getTable(); - timeKeeper.incrementTime(); - return table; - } - - private static void assertNames(PMetaData metaData, String... names) { - Set actualTables = Sets.newHashSet(); - for (PTable table : metaData) { - actualTables.add(table.getKey().getName()); - } - Set expectedTables = Sets.newHashSet(names); - assertEquals(expectedTables,actualTables); - } - - private static class TestTimeKeeper implements TimeKeeper { - private long time = 0; - - @Override - public long getCurrentTime() { - return time; - } - - public void incrementTime() { - time++; - } + + private static void addToTable(PMetaData metaData, String name, int size, + TestTimeKeeper timeKeeper) throws SQLException { + PTable table = new PSizedTable(new PTableKey(null, name), size); + metaData.addTable(table, System.currentTimeMillis()); + timeKeeper.incrementTime(); + } + + private static void removeFromTable(PMetaData metaData, String name, TestTimeKeeper timeKeeper) + throws SQLException { + metaData.removeTable(null, name, null, HConstants.LATEST_TIMESTAMP); + timeKeeper.incrementTime(); + } + + private static PTable getFromTable(PMetaData metaData, String name, TestTimeKeeper timeKeeper) + throws TableNotFoundException { + PTable table = metaData.getTableRef(new PTableKey(null, name)).getTable(); + timeKeeper.incrementTime(); + return table; + } + + private static void assertNames(PMetaData metaData, String... names) { + Set actualTables = Sets.newHashSet(); + for (PTable table : metaData) { + actualTables.add(table.getKey().getName()); } - - @Test - public void testEviction() throws Exception { - TestTimeKeeper timeKeeper = new TestTimeKeeper(); - Map props = Maps.newHashMapWithExpectedSize(2); - props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "10"); - props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); - PMetaData metaData = new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, new ReadOnlyProps(props)); - addToTable(metaData, "a", 5, timeKeeper); - assertEquals(1, metaData.size()); - addToTable(metaData, "b", 4, timeKeeper); - assertEquals(2, metaData.size()); - addToTable(metaData, "c", 3, timeKeeper); - assertEquals(2, metaData.size()); - assertNames(metaData, "b","c"); - - addToTable(metaData, "b", 8, timeKeeper); - assertEquals(1, metaData.size()); - assertNames(metaData, "b"); - - addToTable(metaData, "d", 11, timeKeeper); - assertEquals(1, metaData.size()); - assertNames(metaData, "b"); - - removeFromTable(metaData, "b", timeKeeper); - assertNames(metaData); - - addToTable(metaData, "a", 4, timeKeeper); - assertEquals(1, metaData.size()); - addToTable(metaData, "b", 3, timeKeeper); - assertEquals(2, metaData.size()); - addToTable(metaData, "c", 2, timeKeeper); - assertEquals(3, metaData.size()); - assertNames(metaData, "a", "b","c"); - - getFromTable(metaData, "a", timeKeeper); - addToTable(metaData, "d", 3, timeKeeper); - assertEquals(3, metaData.size()); - assertNames(metaData, "c", "a","d"); + Set expectedTables = Sets.newHashSet(names); + assertEquals(expectedTables, actualTables); + } + + private static class TestTimeKeeper implements TimeKeeper { + private long time = 0; + + @Override + public long getCurrentTime() { + return time; } - @Test - public void shouldNotEvictMoreEntriesThanNecessary() throws Exception { - TestTimeKeeper timeKeeper = new TestTimeKeeper(); - Map props = Maps.newHashMapWithExpectedSize(2); - props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "5"); - props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); - PMetaData metaData = new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, new ReadOnlyProps(props)); - addToTable(metaData, "a", 1, timeKeeper); - assertEquals(1, metaData.size()); - addToTable(metaData, "b", 1, timeKeeper); - assertEquals(2, metaData.size()); - assertNames(metaData, "a", "b"); - addToTable(metaData, "c", 3, timeKeeper); - assertEquals(3, metaData.size()); - assertNames(metaData, "a", "b", "c"); - getFromTable(metaData, "a", timeKeeper); - getFromTable(metaData, "b", timeKeeper); - addToTable(metaData, "d", 3, timeKeeper); - assertEquals(3, metaData.size()); - assertNames(metaData, "a", "b", "d"); + public void incrementTime() { + time++; } + } + + @Test + public void testEviction() throws Exception { + TestTimeKeeper timeKeeper = new TestTimeKeeper(); + Map props = Maps.newHashMapWithExpectedSize(2); + props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "10"); + props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); + PMetaData metaData = new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, new ReadOnlyProps(props)); + addToTable(metaData, "a", 5, timeKeeper); + assertEquals(1, metaData.size()); + addToTable(metaData, "b", 4, timeKeeper); + assertEquals(2, metaData.size()); + addToTable(metaData, "c", 3, timeKeeper); + assertEquals(2, metaData.size()); + assertNames(metaData, "b", "c"); + + addToTable(metaData, "b", 8, timeKeeper); + assertEquals(1, metaData.size()); + assertNames(metaData, "b"); + + addToTable(metaData, "d", 11, timeKeeper); + assertEquals(1, metaData.size()); + assertNames(metaData, "b"); + + removeFromTable(metaData, "b", timeKeeper); + assertNames(metaData); - @Test - public void shouldAlwaysKeepAtLeastOneEntryEvenIfTooLarge() throws Exception { - TestTimeKeeper timeKeeper = new TestTimeKeeper(); - Map props = Maps.newHashMapWithExpectedSize(2); - props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "5"); - props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); - PMetaData metaData = new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, new ReadOnlyProps(props)); - addToTable(metaData, "a", 1, timeKeeper); - assertEquals(1, metaData.size()); - addToTable(metaData, "b", 1, timeKeeper); - assertEquals(2, metaData.size()); - addToTable(metaData, "c", 5, timeKeeper); - assertEquals(1, metaData.size()); - addToTable(metaData, "d", 20, timeKeeper); - assertEquals(1, metaData.size()); - assertNames(metaData, "c"); - addToTable(metaData, "e", 1, timeKeeper); - assertEquals(1, metaData.size()); - addToTable(metaData, "f", 2, timeKeeper); - assertEquals(2, metaData.size()); - assertNames(metaData, "e", "f"); + addToTable(metaData, "a", 4, timeKeeper); + assertEquals(1, metaData.size()); + addToTable(metaData, "b", 3, timeKeeper); + assertEquals(2, metaData.size()); + addToTable(metaData, "c", 2, timeKeeper); + assertEquals(3, metaData.size()); + assertNames(metaData, "a", "b", "c"); + + getFromTable(metaData, "a", timeKeeper); + addToTable(metaData, "d", 3, timeKeeper); + assertEquals(3, metaData.size()); + assertNames(metaData, "c", "a", "d"); + } + + @Test + public void shouldNotEvictMoreEntriesThanNecessary() throws Exception { + TestTimeKeeper timeKeeper = new TestTimeKeeper(); + Map props = Maps.newHashMapWithExpectedSize(2); + props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "5"); + props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); + PMetaData metaData = new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, new ReadOnlyProps(props)); + addToTable(metaData, "a", 1, timeKeeper); + assertEquals(1, metaData.size()); + addToTable(metaData, "b", 1, timeKeeper); + assertEquals(2, metaData.size()); + assertNames(metaData, "a", "b"); + addToTable(metaData, "c", 3, timeKeeper); + assertEquals(3, metaData.size()); + assertNames(metaData, "a", "b", "c"); + getFromTable(metaData, "a", timeKeeper); + getFromTable(metaData, "b", timeKeeper); + addToTable(metaData, "d", 3, timeKeeper); + assertEquals(3, metaData.size()); + assertNames(metaData, "a", "b", "d"); + } + + @Test + public void shouldAlwaysKeepAtLeastOneEntryEvenIfTooLarge() throws Exception { + TestTimeKeeper timeKeeper = new TestTimeKeeper(); + Map props = Maps.newHashMapWithExpectedSize(2); + props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "5"); + props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); + PMetaData metaData = new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, new ReadOnlyProps(props)); + addToTable(metaData, "a", 1, timeKeeper); + assertEquals(1, metaData.size()); + addToTable(metaData, "b", 1, timeKeeper); + assertEquals(2, metaData.size()); + addToTable(metaData, "c", 5, timeKeeper); + assertEquals(1, metaData.size()); + addToTable(metaData, "d", 20, timeKeeper); + assertEquals(1, metaData.size()); + assertNames(metaData, "c"); + addToTable(metaData, "e", 1, timeKeeper); + assertEquals(1, metaData.size()); + addToTable(metaData, "f", 2, timeKeeper); + assertEquals(2, metaData.size()); + assertNames(metaData, "e", "f"); + } + + @Test + public void testAge() throws Exception { + TestTimeKeeper timeKeeper = new TestTimeKeeper(); + Map props = Maps.newHashMapWithExpectedSize(2); + props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "10"); + props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); + PMetaData metaData = new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, new ReadOnlyProps(props)); + String tableName = "a"; + addToTable(metaData, tableName, 1, timeKeeper); + PTableRef aTableRef = metaData.getTableRef(new PTableKey(null, tableName)); + assertNotNull(aTableRef); + assertEquals(1, metaData.getAge(aTableRef)); + tableName = "b"; + addToTable(metaData, tableName, 1, timeKeeper); + PTableRef bTableRef = metaData.getTableRef(new PTableKey(null, tableName)); + assertNotNull(bTableRef); + assertEquals(1, metaData.getAge(bTableRef)); + assertEquals(2, metaData.getAge(aTableRef)); + } + + @Test + public void testSchema() throws Exception { + TestTimeKeeper timeKeeper = new TestTimeKeeper(); + PMetaData metaData = + new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, new ReadOnlyProps(Collections.EMPTY_MAP)); + PSchema schema = new PSchema("testSchema"); + metaData.addSchema(schema); + assertEquals(schema, metaData.getSchema(schema.getSchemaKey())); + metaData.removeSchema(schema, schema.getTimeStamp()); + try { + metaData.getSchema(schema.getSchemaKey()); + fail("the schema should be removed"); + } catch (SchemaNotFoundException e) { } + } + + private static class PSizedTable extends PTableImpl { + private final int size; + private final PTableKey key; - @Test - public void testAge() throws Exception { - TestTimeKeeper timeKeeper = new TestTimeKeeper(); - Map props = Maps.newHashMapWithExpectedSize(2); - props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "10"); - props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); - PMetaData metaData = new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, new ReadOnlyProps(props)); - String tableName = "a"; - addToTable(metaData, tableName, 1, timeKeeper); - PTableRef aTableRef = metaData.getTableRef(new PTableKey(null,tableName)); - assertNotNull(aTableRef); - assertEquals(1, metaData.getAge(aTableRef)); - tableName = "b"; - addToTable(metaData, tableName, 1, timeKeeper); - PTableRef bTableRef = metaData.getTableRef(new PTableKey(null,tableName)); - assertNotNull(bTableRef); - assertEquals(1, metaData.getAge(bTableRef)); - assertEquals(2, metaData.getAge(aTableRef)); + public PSizedTable(PTableKey key, int size) { + this.key = key; + this.size = size; } - @Test - public void testSchema() throws Exception { - TestTimeKeeper timeKeeper = new TestTimeKeeper(); - PMetaData metaData = new PMetaDataImpl(5, Long.MAX_VALUE, timeKeeper, - new ReadOnlyProps(Collections.EMPTY_MAP)); - PSchema schema = new PSchema("testSchema"); - metaData.addSchema(schema); - assertEquals(schema, metaData.getSchema(schema.getSchemaKey())); - metaData.removeSchema(schema, schema.getTimeStamp()); - try { - metaData.getSchema(schema.getSchemaKey()); - fail("the schema should be removed"); - } catch (SchemaNotFoundException e) { - } + @Override + public int getEstimatedSize() { + return size; } - private static class PSizedTable extends PTableImpl { - private final int size; - private final PTableKey key; - - public PSizedTable(PTableKey key, int size) { - this.key = key; - this.size = size; - } - - @Override - public int getEstimatedSize() { - return size; - } - - @Override - public PTableKey getKey() { - return key; - } + @Override + public PTableKey getKey() { + return key; } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeySchemaTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeySchemaTest.java index 29bf542e94b..17781a5e50b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeySchemaTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeySchemaTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema; import static org.junit.Assert.assertEquals; @@ -37,163 +36,181 @@ import org.apache.phoenix.query.BaseConnectionlessQueryTest; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.SchemaUtil; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +public class RowKeySchemaTest extends BaseConnectionlessQueryTest { -public class RowKeySchemaTest extends BaseConnectionlessQueryTest { + public RowKeySchemaTest() { + } - public RowKeySchemaTest() { - } + private void assertExpectedRowKeyValue(String dataColumns, String pk, Object[] values) + throws Exception { + assertIteration(dataColumns, pk, values, ""); + } - private void assertExpectedRowKeyValue(String dataColumns, String pk, Object[] values) throws Exception { - assertIteration(dataColumns,pk,values,""); + private void assertIteration(String dataColumns, String pk, Object[] values, String dataProps) + throws Exception { + String schemaName = ""; + String tableName = "T"; + Connection conn = DriverManager.getConnection(getUrl()); + String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), + SchemaUtil.normalizeIdentifier(tableName)); + conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + + " CONSTRAINT pk PRIMARY KEY (" + pk + ")) " + (dataProps.isEmpty() ? "" : dataProps)); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); + StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES("); + for (int i = 0; i < values.length; i++) { + buf.append("?,"); } - - private void assertIteration(String dataColumns, String pk, Object[] values, String dataProps) throws Exception { - String schemaName = ""; - String tableName = "T"; - Connection conn = DriverManager.getConnection(getUrl()); - String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName),SchemaUtil.normalizeIdentifier(tableName)); - conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + ")) " + (dataProps.isEmpty() ? "" : dataProps) ); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); - StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES("); - for (int i = 0; i < values.length; i++) { - buf.append("?,"); - } - buf.setCharAt(buf.length()-1, ')'); - PreparedStatement stmt = conn.prepareStatement(buf.toString()); - for (int i = 0; i < values.length; i++) { - stmt.setObject(i+1, values[i]); - } - stmt.execute(); - Iterator>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); - List dataKeyValues = iterator.next().getSecond(); - Cell keyValue = dataKeyValues.get(0); - - List sortOrders = Lists.newArrayListWithExpectedSize(table.getPKColumns().size()); - for (PColumn col : table.getPKColumns()) { - sortOrders.add(col.getSortOrder()); - } - RowKeySchema schema = table.getRowKeySchema(); - int minOffset = keyValue.getRowOffset(); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - int nExpectedValues = values.length; - for (int i = values.length-1; i >=0; i--) { - if (values[i] == null) { - nExpectedValues--; - } else { - break; - } - } - int i = 0; - int maxOffset = schema.iterator(keyValue.getRowArray(), minOffset, keyValue.getRowLength(), ptr); - for (i = 0; i < schema.getFieldCount(); i++) { - Boolean hasValue = schema.next(ptr, i, maxOffset); - if (hasValue == null) { - break; - } - assertTrue(hasValue); - PDataType type = PDataType.fromLiteral(values[i]); - SortOrder sortOrder = sortOrders.get(i); - Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder); - assertEquals(values[i], value); - } - assertEquals(nExpectedValues, i); - assertNull(schema.next(ptr, i, maxOffset)); - - for (i--; i >= 0; i--) { - Boolean hasValue = schema.previous(ptr, i, minOffset); - if (hasValue == null) { - break; - } - assertTrue(hasValue); - PDataType type = PDataType.fromLiteral(values[i]); - SortOrder sortOrder = sortOrders.get(i); - Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder); - assertEquals(values[i], value); - } - assertEquals(-1, i); - assertNull(schema.previous(ptr, i, minOffset)); - conn.close(); - } - - @Test - public void testFixedLengthValueAtEnd() throws Exception { - assertExpectedRowKeyValue("n VARCHAR NOT NULL, s CHAR(1) NOT NULL, y SMALLINT NOT NULL, o BIGINT NOT NULL", "n,s,y DESC,o DESC", new Object[] {"Abbey","F",2012,253}); + buf.setCharAt(buf.length() - 1, ')'); + PreparedStatement stmt = conn.prepareStatement(buf.toString()); + for (int i = 0; i < values.length; i++) { + stmt.setObject(i + 1, values[i]); } - - @Test - public void testFixedVarVar() throws Exception { - assertExpectedRowKeyValue("i INTEGER NOT NULL, v1 VARCHAR, v2 VARCHAR", "i, v1, v2", new Object[] {1, "a", "b"}); + stmt.execute(); + Iterator>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); + List dataKeyValues = iterator.next().getSecond(); + Cell keyValue = dataKeyValues.get(0); + + List sortOrders = Lists.newArrayListWithExpectedSize(table.getPKColumns().size()); + for (PColumn col : table.getPKColumns()) { + sortOrders.add(col.getSortOrder()); } - - @Test - public void testFixedFixedVar() throws Exception { - assertExpectedRowKeyValue("c1 INTEGER NOT NULL, c2 BIGINT NOT NULL, c3 VARCHAR", "c1, c2, c3", new Object[] {1, 2, "abc"}); + RowKeySchema schema = table.getRowKeySchema(); + int minOffset = keyValue.getRowOffset(); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + int nExpectedValues = values.length; + for (int i = values.length - 1; i >= 0; i--) { + if (values[i] == null) { + nExpectedValues--; + } else { + break; + } } - - @Test - public void testVarNullNull() throws Exception { - assertExpectedRowKeyValue("c1 VARCHAR, c2 VARCHAR, c3 VARCHAR", "c1, c2, c3", new Object[] {"abc", null, null}); + int i = 0; + int maxOffset = + schema.iterator(keyValue.getRowArray(), minOffset, keyValue.getRowLength(), ptr); + for (i = 0; i < schema.getFieldCount(); i++) { + Boolean hasValue = schema.next(ptr, i, maxOffset); + if (hasValue == null) { + break; + } + assertTrue(hasValue); + PDataType type = PDataType.fromLiteral(values[i]); + SortOrder sortOrder = sortOrders.get(i); + Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder); + assertEquals(values[i], value); } + assertEquals(nExpectedValues, i); + assertNull(schema.next(ptr, i, maxOffset)); - @Test - public void testVarFixedVar() throws Exception { - assertExpectedRowKeyValue("c1 VARCHAR, c2 CHAR(1) NOT NULL, c3 VARCHAR", "c1, c2, c3", new Object[] {"abc", "z", "de"}); + for (i--; i >= 0; i--) { + Boolean hasValue = schema.previous(ptr, i, minOffset); + if (hasValue == null) { + break; + } + assertTrue(hasValue); + PDataType type = PDataType.fromLiteral(values[i]); + SortOrder sortOrder = sortOrders.get(i); + Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder); + assertEquals(values[i], value); } - - @Test - public void testVarFixedFixed() throws Exception { - assertExpectedRowKeyValue("c1 VARCHAR, c2 CHAR(1) NOT NULL, c3 INTEGER NOT NULL", "c1, c2, c3", new Object[] {"abc", "z", 5}); - } - - private static byte[] getKeyPart(PTable t, String... keys) throws SQLException { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - byte[][] keyByteArray = new byte[keys.length][]; - int i = 0; - for (String key : keys) { - keyByteArray[i++] = key == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(key); - } - t.newKey(ptr, keyByteArray); - return ptr.copyBytes(); - } - - @Test - public void testClipLeft() throws Exception { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute("CREATE TABLE T1(K1 CHAR(1) NOT NULL, K2 VARCHAR, K3 VARCHAR, CONSTRAINT pk PRIMARY KEY (K1,K2,K3)) "); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PTable table; - RowKeySchema schema; - table = pconn.getTable(new PTableKey(pconn.getTenantId(), "T1")); - schema = table.getRowKeySchema(); - KeyRange r, rLeft, expectedResult; - r = KeyRange.getKeyRange(getKeyPart(table, "A", "B", "C"), true, getKeyPart(table, "B", "C"), true); - rLeft = schema.clipLeft(0, r, 1, ptr); - expectedResult = KeyRange.getKeyRange(getKeyPart(table, "A"), true, getKeyPart(table, "B"), true); - r = KeyRange.getKeyRange(getKeyPart(table, "A", "B", "C"), true, getKeyPart(table, "B"), true); - rLeft = schema.clipLeft(0, r, 1, ptr); - expectedResult = KeyRange.getKeyRange(getKeyPart(table, "A"), true, getKeyPart(table, "B"), true); - assertEquals(expectedResult, rLeft); - rLeft = schema.clipLeft(0, r, 2, ptr); - expectedResult = KeyRange.getKeyRange(getKeyPart(table, "A", "B"), true, getKeyPart(table, "B"), true); - assertEquals(expectedResult, rLeft); - - r = KeyRange.getKeyRange(getKeyPart(table, "A", "B", "C"), true, KeyRange.UNBOUND, true); - rLeft = schema.clipLeft(0, r, 2, ptr); - expectedResult = KeyRange.getKeyRange(getKeyPart(table, "A", "B"), true, KeyRange.UNBOUND, false); - assertEquals(expectedResult, rLeft); - - r = KeyRange.getKeyRange(KeyRange.UNBOUND, false, getKeyPart(table, "A", "B", "C"), true); - rLeft = schema.clipLeft(0, r, 2, ptr); - expectedResult = KeyRange.getKeyRange(KeyRange.UNBOUND, false, getKeyPart(table, "A", "B"), true); - assertEquals(expectedResult, rLeft); + assertEquals(-1, i); + assertNull(schema.previous(ptr, i, minOffset)); + conn.close(); + } + + @Test + public void testFixedLengthValueAtEnd() throws Exception { + assertExpectedRowKeyValue( + "n VARCHAR NOT NULL, s CHAR(1) NOT NULL, y SMALLINT NOT NULL, o BIGINT NOT NULL", + "n,s,y DESC,o DESC", new Object[] { "Abbey", "F", 2012, 253 }); + } + + @Test + public void testFixedVarVar() throws Exception { + assertExpectedRowKeyValue("i INTEGER NOT NULL, v1 VARCHAR, v2 VARCHAR", "i, v1, v2", + new Object[] { 1, "a", "b" }); + } + + @Test + public void testFixedFixedVar() throws Exception { + assertExpectedRowKeyValue("c1 INTEGER NOT NULL, c2 BIGINT NOT NULL, c3 VARCHAR", "c1, c2, c3", + new Object[] { 1, 2, "abc" }); + } + + @Test + public void testVarNullNull() throws Exception { + assertExpectedRowKeyValue("c1 VARCHAR, c2 VARCHAR, c3 VARCHAR", "c1, c2, c3", + new Object[] { "abc", null, null }); + } + + @Test + public void testVarFixedVar() throws Exception { + assertExpectedRowKeyValue("c1 VARCHAR, c2 CHAR(1) NOT NULL, c3 VARCHAR", "c1, c2, c3", + new Object[] { "abc", "z", "de" }); + } + + @Test + public void testVarFixedFixed() throws Exception { + assertExpectedRowKeyValue("c1 VARCHAR, c2 CHAR(1) NOT NULL, c3 INTEGER NOT NULL", "c1, c2, c3", + new Object[] { "abc", "z", 5 }); + } + + private static byte[] getKeyPart(PTable t, String... keys) throws SQLException { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + byte[][] keyByteArray = new byte[keys.length][]; + int i = 0; + for (String key : keys) { + keyByteArray[i++] = key == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(key); } - + t.newKey(ptr, keyByteArray); + return ptr.copyBytes(); + } + + @Test + public void testClipLeft() throws Exception { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE T1(K1 CHAR(1) NOT NULL, K2 VARCHAR, K3 VARCHAR, CONSTRAINT pk PRIMARY KEY (K1,K2,K3)) "); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable table; + RowKeySchema schema; + table = pconn.getTable(new PTableKey(pconn.getTenantId(), "T1")); + schema = table.getRowKeySchema(); + KeyRange r, rLeft, expectedResult; + r = KeyRange.getKeyRange(getKeyPart(table, "A", "B", "C"), true, getKeyPart(table, "B", "C"), + true); + rLeft = schema.clipLeft(0, r, 1, ptr); + expectedResult = + KeyRange.getKeyRange(getKeyPart(table, "A"), true, getKeyPart(table, "B"), true); + r = KeyRange.getKeyRange(getKeyPart(table, "A", "B", "C"), true, getKeyPart(table, "B"), true); + rLeft = schema.clipLeft(0, r, 1, ptr); + expectedResult = + KeyRange.getKeyRange(getKeyPart(table, "A"), true, getKeyPart(table, "B"), true); + assertEquals(expectedResult, rLeft); + rLeft = schema.clipLeft(0, r, 2, ptr); + expectedResult = + KeyRange.getKeyRange(getKeyPart(table, "A", "B"), true, getKeyPart(table, "B"), true); + assertEquals(expectedResult, rLeft); + + r = KeyRange.getKeyRange(getKeyPart(table, "A", "B", "C"), true, KeyRange.UNBOUND, true); + rLeft = schema.clipLeft(0, r, 2, ptr); + expectedResult = + KeyRange.getKeyRange(getKeyPart(table, "A", "B"), true, KeyRange.UNBOUND, false); + assertEquals(expectedResult, rLeft); + + r = KeyRange.getKeyRange(KeyRange.UNBOUND, false, getKeyPart(table, "A", "B", "C"), true); + rLeft = schema.clipLeft(0, r, 2, ptr); + expectedResult = + KeyRange.getKeyRange(KeyRange.UNBOUND, false, getKeyPart(table, "A", "B"), true); + assertEquals(expectedResult, rLeft); + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java index b144d90dbfa..ca74759e9a5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/RowKeyValueAccessorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.schema; import static org.junit.Assert.assertEquals; @@ -38,55 +37,61 @@ import org.apache.phoenix.util.SchemaUtil; import org.junit.Test; -public class RowKeyValueAccessorTest extends BaseConnectionlessQueryTest { +public class RowKeyValueAccessorTest extends BaseConnectionlessQueryTest { - public RowKeyValueAccessorTest() { - } + public RowKeyValueAccessorTest() { + } - private void assertExpectedRowKeyValue(String dataColumns, String pk, Object[] values, int index) throws Exception { - assertExpectedRowKeyValue(dataColumns,pk,values,index,""); - } - - private void assertExpectedRowKeyValue(String dataColumns, String pk, Object[] values, int index, String dataProps) throws Exception { - String schemaName = ""; - String tableName = "T"; - Connection conn = DriverManager.getConnection(getUrl()); - String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName),SchemaUtil.normalizeIdentifier(tableName)); - conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + ")) " + (dataProps.isEmpty() ? "" : dataProps) ); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); - StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES("); - for (int i = 0; i < values.length; i++) { - buf.append("?,"); - } - buf.setCharAt(buf.length()-1, ')'); - PreparedStatement stmt = conn.prepareStatement(buf.toString()); - for (int i = 0; i < values.length; i++) { - stmt.setObject(i+1, values[i]); - } - stmt.execute(); - Iterator>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); - List dataKeyValues = iterator.next().getSecond(); - KeyValue keyValue = PhoenixKeyValueUtil.maybeCopyCell(dataKeyValues.get(0)); - - List pkColumns = table.getPKColumns(); - RowKeyValueAccessor accessor = new RowKeyValueAccessor(pkColumns, 3); - int offset = - accessor.getOffset(keyValue.getRowArray(), keyValue.getRowOffset()); - int length = accessor.getLength(keyValue.getRowArray(), offset, - keyValue.getOffset() + keyValue.getLength(), null, null); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(keyValue.getRowArray(), offset, length); - - PDataType dataType = pkColumns.get(index).getDataType(); - Object expectedObject = dataType.toObject(values[index], PDataType.fromLiteral(values[index])); - dataType.coerceBytes(ptr, dataType, pkColumns.get(index).getSortOrder(), SortOrder.getDefault()); - Object actualObject = dataType.toObject(ptr); - assertEquals(expectedObject, actualObject); - conn.close(); + private void assertExpectedRowKeyValue(String dataColumns, String pk, Object[] values, int index) + throws Exception { + assertExpectedRowKeyValue(dataColumns, pk, values, index, ""); + } + + private void assertExpectedRowKeyValue(String dataColumns, String pk, Object[] values, int index, + String dataProps) throws Exception { + String schemaName = ""; + String tableName = "T"; + Connection conn = DriverManager.getConnection(getUrl()); + String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), + SchemaUtil.normalizeIdentifier(tableName)); + conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + + " CONSTRAINT pk PRIMARY KEY (" + pk + ")) " + (dataProps.isEmpty() ? "" : dataProps)); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); + StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES("); + for (int i = 0; i < values.length; i++) { + buf.append("?,"); } - - @Test - public void testFixedLengthValueAtEnd() throws Exception { - assertExpectedRowKeyValue("n VARCHAR NOT NULL, s CHAR(1) NOT NULL, y SMALLINT NOT NULL, o BIGINT NOT NULL", "n,s,y DESC,o DESC", new Object[] {"Abbey","F",2012,253}, 3); + buf.setCharAt(buf.length() - 1, ')'); + PreparedStatement stmt = conn.prepareStatement(buf.toString()); + for (int i = 0; i < values.length; i++) { + stmt.setObject(i + 1, values[i]); } + stmt.execute(); + Iterator>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); + List dataKeyValues = iterator.next().getSecond(); + KeyValue keyValue = PhoenixKeyValueUtil.maybeCopyCell(dataKeyValues.get(0)); + + List pkColumns = table.getPKColumns(); + RowKeyValueAccessor accessor = new RowKeyValueAccessor(pkColumns, 3); + int offset = accessor.getOffset(keyValue.getRowArray(), keyValue.getRowOffset()); + int length = accessor.getLength(keyValue.getRowArray(), offset, + keyValue.getOffset() + keyValue.getLength(), null, null); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(keyValue.getRowArray(), offset, length); + + PDataType dataType = pkColumns.get(index).getDataType(); + Object expectedObject = dataType.toObject(values[index], PDataType.fromLiteral(values[index])); + dataType.coerceBytes(ptr, dataType, pkColumns.get(index).getSortOrder(), + SortOrder.getDefault()); + Object actualObject = dataType.toObject(ptr); + assertEquals(expectedObject, actualObject); + conn.close(); + } + + @Test + public void testFixedLengthValueAtEnd() throws Exception { + assertExpectedRowKeyValue( + "n VARCHAR NOT NULL, s CHAR(1) NOT NULL, y SMALLINT NOT NULL, o BIGINT NOT NULL", + "n,s,y DESC,o DESC", new Object[] { "Abbey", "F", 2012, 253 }, 3); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/SaltingUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/SaltingUtilTest.java index 8a28acbb2e5..f33f615f7ae 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/SaltingUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/SaltingUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,39 +17,38 @@ */ package org.apache.phoenix.schema; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.Test; - -import java.util.Set; - import static org.junit.Assert.*; import static org.junit.Assert.assertEquals; -public class SaltingUtilTest { - - @Test - public void testGetSaltingByte() { - Set saltBytes = Sets.newHashSet(); - for (int i = 0; i < 100; i++) { - saltBytes.add(SaltingUtil.getSaltingByte(Bytes.toBytes(i), 0, Bytes.SIZEOF_INT, 3)); - } - assertEquals(ImmutableSet.of((byte)0, (byte)1, (byte)2), saltBytes); - } - +import java.util.Set; - /** - * Check an edge case where a row key's hash code is equal to Integer.MIN_VALUE. - */ - @Test - public void testGetSaltingByte_EdgeCaseHashCode() { - // This array has a hashCode of Integer.MIN_VALUE based on the hashing in SaltingUtil - byte[] rowKey = new byte[] { -106, 0, -10, 0, 19, -2 }; - byte saltingByte = SaltingUtil.getSaltingByte(rowKey, 0, rowKey.length, 3); +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; +import org.junit.Test; - assertTrue("Salting byte should be 0 or 1 or 2 but was " + saltingByte, - ImmutableSet.of((byte)0, (byte)1, (byte)2).contains(saltingByte)); +public class SaltingUtilTest { + @Test + public void testGetSaltingByte() { + Set saltBytes = Sets.newHashSet(); + for (int i = 0; i < 100; i++) { + saltBytes.add(SaltingUtil.getSaltingByte(Bytes.toBytes(i), 0, Bytes.SIZEOF_INT, 3)); } -} \ No newline at end of file + assertEquals(ImmutableSet.of((byte) 0, (byte) 1, (byte) 2), saltBytes); + } + + /** + * Check an edge case where a row key's hash code is equal to Integer.MIN_VALUE. + */ + @Test + public void testGetSaltingByte_EdgeCaseHashCode() { + // This array has a hashCode of Integer.MIN_VALUE based on the hashing in SaltingUtil + byte[] rowKey = new byte[] { -106, 0, -10, 0, 19, -2 }; + byte saltingByte = SaltingUtil.getSaltingByte(rowKey, 0, rowKey.length, 3); + + assertTrue("Salting byte should be 0 or 1 or 2 but was " + saltingByte, + ImmutableSet.of((byte) 0, (byte) 1, (byte) 2).contains(saltingByte)); + + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/SchemaUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/SchemaUtilTest.java index 10d7129fe81..d03a588b707 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/SchemaUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/SchemaUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,51 +26,56 @@ public class SchemaUtilTest { - @Test - public void testExceptionCode() throws Exception { - SQLExceptionCode code = SQLExceptionCode.fromErrorCode(SQLExceptionCode.AGGREGATE_IN_GROUP_BY.getErrorCode()); - assertEquals(SQLExceptionCode.AGGREGATE_IN_GROUP_BY, code); - } - - @Test - public void testGetTableName() { - String tableDisplayName = SchemaUtil.getTableName("schemaName", "tableName"); - assertEquals(tableDisplayName, "schemaName.tableName"); - tableDisplayName = SchemaUtil.getTableName(null, "tableName"); - assertEquals(tableDisplayName, "tableName"); - } + @Test + public void testExceptionCode() throws Exception { + SQLExceptionCode code = + SQLExceptionCode.fromErrorCode(SQLExceptionCode.AGGREGATE_IN_GROUP_BY.getErrorCode()); + assertEquals(SQLExceptionCode.AGGREGATE_IN_GROUP_BY, code); + } - @Test - public void testGetColumnName() { - String columnDisplayName; - columnDisplayName = SchemaUtil.getMetaDataEntityName("schemaName", "tableName", "familyName", "columnName"); - assertEquals(columnDisplayName, "schemaName.tableName.familyName.columnName"); - columnDisplayName = SchemaUtil.getMetaDataEntityName(null, "tableName", "familyName", "columnName"); - assertEquals(columnDisplayName, "tableName.familyName.columnName"); - columnDisplayName = SchemaUtil.getMetaDataEntityName("schemaName", "tableName", null, "columnName"); - assertEquals(columnDisplayName, "schemaName.tableName.columnName"); - columnDisplayName = SchemaUtil.getMetaDataEntityName(null, null, "familyName", "columnName"); - assertEquals(columnDisplayName, "familyName.columnName"); - columnDisplayName = SchemaUtil.getMetaDataEntityName(null, null, null, "columnName"); - assertEquals(columnDisplayName, "columnName"); - } - - @Test - public void testEscapingColumnName() { - assertEquals("\"ID\"", SchemaUtil.getEscapedFullColumnName("ID")); - assertEquals("\"0\".\"NAME\"", SchemaUtil.getEscapedFullColumnName("0.NAME")); - assertEquals("\"CF1\".\"LOCATION\"", SchemaUtil.getEscapedFullColumnName("CF1.LOCATION")); - } + @Test + public void testGetTableName() { + String tableDisplayName = SchemaUtil.getTableName("schemaName", "tableName"); + assertEquals(tableDisplayName, "schemaName.tableName"); + tableDisplayName = SchemaUtil.getTableName(null, "tableName"); + assertEquals(tableDisplayName, "tableName"); + } - @Test - public void testGetTableNameFromFullNameByte() { - String tableDisplayName = SchemaUtil.getTableNameFromFullName(Bytes.toBytes("schemaName.tableName")); - assertEquals(tableDisplayName, "tableName"); - } + @Test + public void testGetColumnName() { + String columnDisplayName; + columnDisplayName = + SchemaUtil.getMetaDataEntityName("schemaName", "tableName", "familyName", "columnName"); + assertEquals(columnDisplayName, "schemaName.tableName.familyName.columnName"); + columnDisplayName = + SchemaUtil.getMetaDataEntityName(null, "tableName", "familyName", "columnName"); + assertEquals(columnDisplayName, "tableName.familyName.columnName"); + columnDisplayName = + SchemaUtil.getMetaDataEntityName("schemaName", "tableName", null, "columnName"); + assertEquals(columnDisplayName, "schemaName.tableName.columnName"); + columnDisplayName = SchemaUtil.getMetaDataEntityName(null, null, "familyName", "columnName"); + assertEquals(columnDisplayName, "familyName.columnName"); + columnDisplayName = SchemaUtil.getMetaDataEntityName(null, null, null, "columnName"); + assertEquals(columnDisplayName, "columnName"); + } - @Test - public void testGetTableNameFromFullName() { - String tableDisplayName = SchemaUtil.getTableNameFromFullName("schemaName.tableName"); - assertEquals(tableDisplayName, "tableName"); - } + @Test + public void testEscapingColumnName() { + assertEquals("\"ID\"", SchemaUtil.getEscapedFullColumnName("ID")); + assertEquals("\"0\".\"NAME\"", SchemaUtil.getEscapedFullColumnName("0.NAME")); + assertEquals("\"CF1\".\"LOCATION\"", SchemaUtil.getEscapedFullColumnName("CF1.LOCATION")); + } + + @Test + public void testGetTableNameFromFullNameByte() { + String tableDisplayName = + SchemaUtil.getTableNameFromFullName(Bytes.toBytes("schemaName.tableName")); + assertEquals(tableDisplayName, "tableName"); + } + + @Test + public void testGetTableNameFromFullName() { + String tableDisplayName = SchemaUtil.getTableNameFromFullName("schemaName.tableName"); + assertEquals(tableDisplayName, "tableName"); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/SequenceAllocationTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/SequenceAllocationTest.java index 84269447801..af3b5911ebe 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/SequenceAllocationTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/SequenceAllocationTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,54 +23,63 @@ import java.util.List; import org.apache.phoenix.query.QueryServicesOptions; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class SequenceAllocationTest { - @Test - /** - * Validates that sorting a List of SequenceAllocation instances - * results in the same sort order as sorting SequenceKey instances. - */ - public void testSortingSequenceAllocation() { - - // Arrange - SequenceKey sequenceKey1 = new SequenceKey(null, "seqalloc", "sequenceC",QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); - SequenceKey sequenceKey2 = new SequenceKey(null, "seqalloc", "sequenceB",QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); - SequenceKey sequenceKey3 = new SequenceKey(null, "seqalloc", "sequenceA",QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); - List sequenceKeys = Lists.newArrayList(sequenceKey1, sequenceKey2, sequenceKey3); - List sequenceAllocations = Lists.newArrayList(new SequenceAllocation(sequenceKey2, 1), new SequenceAllocation(sequenceKey1, 1), new SequenceAllocation(sequenceKey3, 1)); - - // Act - Collections.sort(sequenceKeys); - Collections.sort(sequenceAllocations); - - // Assert - int i = 0; - for (SequenceKey sequenceKey : sequenceKeys) { - assertEquals(sequenceKey, sequenceAllocations.get(i).getSequenceKey()); - i++; - } - } - - @Test - public void testSortingSequenceAllocationPreservesAllocations() { - - // Arrange - SequenceKey sequenceKeyC = new SequenceKey(null, "seqalloc", "sequenceC",QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); - SequenceKey sequenceKeyB = new SequenceKey(null, "seqalloc", "sequenceB",QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); - SequenceKey sequenceKeyA = new SequenceKey(null, "seqalloc", "sequenceA",QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); - List sequenceAllocations = Lists.newArrayList(new SequenceAllocation(sequenceKeyB, 15), new SequenceAllocation(sequenceKeyC, 11), new SequenceAllocation(sequenceKeyA, 1000)); - - // Act - Collections.sort(sequenceAllocations); - - // Assert - assertEquals("sequenceA",sequenceAllocations.get(0).getSequenceKey().getSequenceName()); - assertEquals(1000,sequenceAllocations.get(0).getNumAllocations()); - assertEquals(15,sequenceAllocations.get(1).getNumAllocations()); - assertEquals(11,sequenceAllocations.get(2).getNumAllocations()); + @Test + /** + * Validates that sorting a List of SequenceAllocation instances results in the same sort order as + * sorting SequenceKey instances. + */ + public void testSortingSequenceAllocation() { + + // Arrange + SequenceKey sequenceKey1 = new SequenceKey(null, "seqalloc", "sequenceC", + QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); + SequenceKey sequenceKey2 = new SequenceKey(null, "seqalloc", "sequenceB", + QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); + SequenceKey sequenceKey3 = new SequenceKey(null, "seqalloc", "sequenceA", + QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); + List sequenceKeys = Lists.newArrayList(sequenceKey1, sequenceKey2, sequenceKey3); + List sequenceAllocations = + Lists.newArrayList(new SequenceAllocation(sequenceKey2, 1), + new SequenceAllocation(sequenceKey1, 1), new SequenceAllocation(sequenceKey3, 1)); + + // Act + Collections.sort(sequenceKeys); + Collections.sort(sequenceAllocations); + + // Assert + int i = 0; + for (SequenceKey sequenceKey : sequenceKeys) { + assertEquals(sequenceKey, sequenceAllocations.get(i).getSequenceKey()); + i++; } + } + + @Test + public void testSortingSequenceAllocationPreservesAllocations() { + + // Arrange + SequenceKey sequenceKeyC = new SequenceKey(null, "seqalloc", "sequenceC", + QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); + SequenceKey sequenceKeyB = new SequenceKey(null, "seqalloc", "sequenceB", + QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); + SequenceKey sequenceKeyA = new SequenceKey(null, "seqalloc", "sequenceA", + QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); + List sequenceAllocations = + Lists.newArrayList(new SequenceAllocation(sequenceKeyB, 15), + new SequenceAllocation(sequenceKeyC, 11), new SequenceAllocation(sequenceKeyA, 1000)); + + // Act + Collections.sort(sequenceAllocations); + + // Assert + assertEquals("sequenceA", sequenceAllocations.get(0).getSequenceKey().getSequenceName()); + assertEquals(1000, sequenceAllocations.get(0).getNumAllocations()); + assertEquals(15, sequenceAllocations.get(1).getNumAllocations()); + assertEquals(11, sequenceAllocations.get(2).getNumAllocations()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/SortOrderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/SortOrderTest.java index 8eedeef29dc..b22bbff57c4 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/SortOrderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/SortOrderTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,102 +33,99 @@ * @since 3.0 */ public class SortOrderTest { - - @Test - public void ascSortOrderDoesNotTransformOp() { - for (CompareOperator op : CompareOperator.values()) { - assertSame(op, SortOrder.ASC.transform(op)); - } - } - - @Test - public void booleanLogic() { - assertTrue(PBoolean.INSTANCE.toObject(PDataType.TRUE_BYTES, SortOrder.ASC) == PBoolean.INSTANCE.toObject( - PDataType.FALSE_BYTES, SortOrder.DESC)); - assertTrue( - PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(true), SortOrder.ASC) == PBoolean.INSTANCE.toObject( - PBoolean.INSTANCE.toBytes(false), SortOrder.DESC)); - assertTrue( - PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(true,SortOrder.ASC)) == PBoolean.INSTANCE.toObject( - PBoolean.INSTANCE.toBytes(false,SortOrder.DESC))); - assertFalse(PBoolean.INSTANCE.toObject(PDataType.FALSE_BYTES, SortOrder.ASC) == PBoolean.INSTANCE.toObject(PDataType.FALSE_BYTES, SortOrder.DESC)); - assertFalse( - PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(false), SortOrder.ASC) == PBoolean.INSTANCE.toObject( - PBoolean.INSTANCE.toBytes(false), SortOrder.DESC)); - assertFalse( - PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(false,SortOrder.ASC)) == PBoolean.INSTANCE.toObject( - PBoolean.INSTANCE.toBytes(false,SortOrder.DESC))); - } + @Test + public void ascSortOrderDoesNotTransformOp() { + for (CompareOperator op : CompareOperator.values()) { + assertSame(op, SortOrder.ASC.transform(op)); + } + } - @Test - public void descSortOrderTransformsOp() { - for (CompareOperator op : CompareOperator.values()) { - CompareOperator oppositeOp = SortOrder.DESC.transform(op); - switch (op) { - case EQUAL: - assertSame(CompareOperator.EQUAL, oppositeOp); - break; - case GREATER: - assertSame(CompareOperator.LESS, oppositeOp); - break; - case GREATER_OR_EQUAL: - assertSame(CompareOperator.LESS_OR_EQUAL, oppositeOp); - break; - case LESS: - assertSame(CompareOperator.GREATER, oppositeOp); - break; - case LESS_OR_EQUAL: - assertSame(CompareOperator.GREATER_OR_EQUAL, oppositeOp); - break; - case NOT_EQUAL: - assertSame(CompareOperator.NOT_EQUAL, oppositeOp); - break; - case NO_OP: - assertSame(CompareOperator.NO_OP, oppositeOp); - break; - } - } - } - - @Test - public void defaultIsAsc() { - assertSame(SortOrder.ASC, SortOrder.getDefault()); - } - - @Test - public void ddlValue() { - assertSame(SortOrder.ASC, SortOrder.fromDDLValue("ASC")); - assertSame(SortOrder.ASC, SortOrder.fromDDLValue("asc")); - assertSame(SortOrder.ASC, SortOrder.fromDDLValue("aSc")); - assertSame(SortOrder.DESC, SortOrder.fromDDLValue("DESC")); - assertSame(SortOrder.DESC, SortOrder.fromDDLValue("desc")); - assertSame(SortOrder.DESC, SortOrder.fromDDLValue("DesC")); - - try { - SortOrder.fromDDLValue("foo"); - } catch (IllegalArgumentException expected) { - - } - } - - @Test - public void systemValue() { - assertSame(SortOrder.ASC, SortOrder.fromSystemValue(SortOrder.ASC.getSystemValue())); - assertSame(SortOrder.DESC, SortOrder.fromSystemValue(SortOrder.DESC.getSystemValue())); - assertSame(SortOrder.ASC, SortOrder.fromSystemValue(0)); - } - - @Test - public void invertByte() { - byte b = 42; - assertNotEquals(b, SortOrder.invert(b)); - assertEquals(b, SortOrder.invert(SortOrder.invert(b))); - } - - @Test - public void invertByteArray() { - byte[] b = new byte[]{1, 2, 3, 4}; - assertArrayEquals(b, SortOrder.invert(SortOrder.invert(b, 0, b.length), 0, b.length)); - } + @Test + public void booleanLogic() { + assertTrue(PBoolean.INSTANCE.toObject(PDataType.TRUE_BYTES, SortOrder.ASC) + == PBoolean.INSTANCE.toObject(PDataType.FALSE_BYTES, SortOrder.DESC)); + assertTrue(PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(true), SortOrder.ASC) + == PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(false), SortOrder.DESC)); + assertTrue(PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(true, SortOrder.ASC)) + == PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(false, SortOrder.DESC))); + + assertFalse(PBoolean.INSTANCE.toObject(PDataType.FALSE_BYTES, SortOrder.ASC) + == PBoolean.INSTANCE.toObject(PDataType.FALSE_BYTES, SortOrder.DESC)); + assertFalse(PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(false), SortOrder.ASC) + == PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(false), SortOrder.DESC)); + assertFalse(PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(false, SortOrder.ASC)) + == PBoolean.INSTANCE.toObject(PBoolean.INSTANCE.toBytes(false, SortOrder.DESC))); + } + + @Test + public void descSortOrderTransformsOp() { + for (CompareOperator op : CompareOperator.values()) { + CompareOperator oppositeOp = SortOrder.DESC.transform(op); + switch (op) { + case EQUAL: + assertSame(CompareOperator.EQUAL, oppositeOp); + break; + case GREATER: + assertSame(CompareOperator.LESS, oppositeOp); + break; + case GREATER_OR_EQUAL: + assertSame(CompareOperator.LESS_OR_EQUAL, oppositeOp); + break; + case LESS: + assertSame(CompareOperator.GREATER, oppositeOp); + break; + case LESS_OR_EQUAL: + assertSame(CompareOperator.GREATER_OR_EQUAL, oppositeOp); + break; + case NOT_EQUAL: + assertSame(CompareOperator.NOT_EQUAL, oppositeOp); + break; + case NO_OP: + assertSame(CompareOperator.NO_OP, oppositeOp); + break; + } + } + } + + @Test + public void defaultIsAsc() { + assertSame(SortOrder.ASC, SortOrder.getDefault()); + } + + @Test + public void ddlValue() { + assertSame(SortOrder.ASC, SortOrder.fromDDLValue("ASC")); + assertSame(SortOrder.ASC, SortOrder.fromDDLValue("asc")); + assertSame(SortOrder.ASC, SortOrder.fromDDLValue("aSc")); + assertSame(SortOrder.DESC, SortOrder.fromDDLValue("DESC")); + assertSame(SortOrder.DESC, SortOrder.fromDDLValue("desc")); + assertSame(SortOrder.DESC, SortOrder.fromDDLValue("DesC")); + + try { + SortOrder.fromDDLValue("foo"); + } catch (IllegalArgumentException expected) { + + } + } + + @Test + public void systemValue() { + assertSame(SortOrder.ASC, SortOrder.fromSystemValue(SortOrder.ASC.getSystemValue())); + assertSame(SortOrder.DESC, SortOrder.fromSystemValue(SortOrder.DESC.getSystemValue())); + assertSame(SortOrder.ASC, SortOrder.fromSystemValue(0)); + } + + @Test + public void invertByte() { + byte b = 42; + assertNotEquals(b, SortOrder.invert(b)); + assertEquals(b, SortOrder.invert(SortOrder.invert(b))); + } + + @Test + public void invertByteArray() { + byte[] b = new byte[] { 1, 2, 3, 4 }; + assertArrayEquals(b, SortOrder.invert(SortOrder.invert(b, 0, b.length), 0, b.length)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/SystemSplitPolicyTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/SystemSplitPolicyTest.java index 01074b4bc87..5cdcc89cb5e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/SystemSplitPolicyTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/SystemSplitPolicyTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,68 +30,56 @@ import org.junit.Test; public class SystemSplitPolicyTest { - @Test - public void testStatsSplitPolicy() { - SplitOnLeadingVarCharColumnsPolicy policy = new SystemStatsSplitPolicy(); - byte[] splitOn; - byte[] rowKey; - byte[] table; - ImmutableBytesWritable family; - table = PVarchar.INSTANCE.toBytes("FOO.BAR"); - family = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES_PTR; - rowKey = ByteUtil.concat( - PLong.INSTANCE.toBytes(20L), - PVarchar.INSTANCE.toBytes("BAS"), - QueryConstants.SEPARATOR_BYTE_ARRAY, - PInteger.INSTANCE.toBytes(100)); - splitOn = StatisticsUtil.getRowKey(table, family, rowKey); - splitOn = policy.getSplitPoint(splitOn); - assertArrayEquals(ByteUtil.concat(table, QueryConstants.SEPARATOR_BYTE_ARRAY), splitOn); - - table = PVarchar.INSTANCE.toBytes("MY_TABLE"); - family = new ImmutableBytesWritable(Bytes.toBytes("ABC")); - rowKey = ByteUtil.concat( - PVarchar.INSTANCE.toBytes("BAS"), - QueryConstants.SEPARATOR_BYTE_ARRAY, - PInteger.INSTANCE.toBytes(100), - PLong.INSTANCE.toBytes(20L)); - splitOn = StatisticsUtil.getRowKey(table, family, rowKey); - splitOn = policy.getSplitPoint(splitOn); - assertArrayEquals(ByteUtil.concat(table, QueryConstants.SEPARATOR_BYTE_ARRAY), splitOn); - } - - private static byte[] getSystemFunctionRowKey(String tenantId, String funcName, String typeName, byte[] argPos) { - return ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), - QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(funcName), - QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(typeName), - QueryConstants.SEPARATOR_BYTE_ARRAY, - argPos - ); - } - - private static byte[] getSystemFunctionSplitKey(String tenantId, String funcName) { - return ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), - QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes(funcName), - QueryConstants.SEPARATOR_BYTE_ARRAY); - } - - @Test - public void testFunctionSplitPolicy() { - SplitOnLeadingVarCharColumnsPolicy policy = new SystemFunctionSplitPolicy(); - byte[] splitPoint; - byte[] rowKey; - byte[] expectedSplitPoint; - rowKey = getSystemFunctionRowKey("","MY_FUNC", "VARCHAR", Bytes.toBytes(3)); - expectedSplitPoint = getSystemFunctionSplitKey("","MY_FUNC"); - splitPoint = policy.getSplitPoint(rowKey); - assertArrayEquals(expectedSplitPoint, splitPoint); - - rowKey = getSystemFunctionRowKey("TENANT1","F", "", Bytes.toBytes(3)); - expectedSplitPoint = getSystemFunctionSplitKey("TENANT1","F"); - splitPoint = policy.getSplitPoint(rowKey); - assertArrayEquals(expectedSplitPoint, splitPoint); - } + @Test + public void testStatsSplitPolicy() { + SplitOnLeadingVarCharColumnsPolicy policy = new SystemStatsSplitPolicy(); + byte[] splitOn; + byte[] rowKey; + byte[] table; + ImmutableBytesWritable family; + table = PVarchar.INSTANCE.toBytes("FOO.BAR"); + family = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES_PTR; + rowKey = ByteUtil.concat(PLong.INSTANCE.toBytes(20L), PVarchar.INSTANCE.toBytes("BAS"), + QueryConstants.SEPARATOR_BYTE_ARRAY, PInteger.INSTANCE.toBytes(100)); + splitOn = StatisticsUtil.getRowKey(table, family, rowKey); + splitOn = policy.getSplitPoint(splitOn); + assertArrayEquals(ByteUtil.concat(table, QueryConstants.SEPARATOR_BYTE_ARRAY), splitOn); + + table = PVarchar.INSTANCE.toBytes("MY_TABLE"); + family = new ImmutableBytesWritable(Bytes.toBytes("ABC")); + rowKey = ByteUtil.concat(PVarchar.INSTANCE.toBytes("BAS"), QueryConstants.SEPARATOR_BYTE_ARRAY, + PInteger.INSTANCE.toBytes(100), PLong.INSTANCE.toBytes(20L)); + splitOn = StatisticsUtil.getRowKey(table, family, rowKey); + splitOn = policy.getSplitPoint(splitOn); + assertArrayEquals(ByteUtil.concat(table, QueryConstants.SEPARATOR_BYTE_ARRAY), splitOn); + } + + private static byte[] getSystemFunctionRowKey(String tenantId, String funcName, String typeName, + byte[] argPos) { + return ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(funcName), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(typeName), QueryConstants.SEPARATOR_BYTE_ARRAY, argPos); + } + + private static byte[] getSystemFunctionSplitKey(String tenantId, String funcName) { + return ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId), QueryConstants.SEPARATOR_BYTE_ARRAY, + PVarchar.INSTANCE.toBytes(funcName), QueryConstants.SEPARATOR_BYTE_ARRAY); + } + + @Test + public void testFunctionSplitPolicy() { + SplitOnLeadingVarCharColumnsPolicy policy = new SystemFunctionSplitPolicy(); + byte[] splitPoint; + byte[] rowKey; + byte[] expectedSplitPoint; + rowKey = getSystemFunctionRowKey("", "MY_FUNC", "VARCHAR", Bytes.toBytes(3)); + expectedSplitPoint = getSystemFunctionSplitKey("", "MY_FUNC"); + splitPoint = policy.getSplitPoint(rowKey); + assertArrayEquals(expectedSplitPoint, splitPoint); + + rowKey = getSystemFunctionRowKey("TENANT1", "F", "", Bytes.toBytes(3)); + expectedSplitPoint = getSystemFunctionSplitKey("TENANT1", "F"); + splitPoint = policy.getSplitPoint(rowKey); + assertArrayEquals(expectedSplitPoint, splitPoint); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/ValueBitSetTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/ValueBitSetTest.java index 54565326b60..4476481a2ca 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/ValueBitSetTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/ValueBitSetTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,176 +27,179 @@ import org.apache.phoenix.schema.types.PDataType; import org.junit.Test; - public class ValueBitSetTest { - private KeyValueSchema generateSchema(int nFields, int nRepeating, final int nNotNull) { - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(nNotNull); - for (int i = 0; i < nFields; i++) { - final int fieldIndex = i; - for (int j = 0; j < nRepeating; j++) { - PDatum datum = new PDatum() { - @Override - public boolean isNullable() { - return fieldIndex <= nNotNull; - } - @Override - public PDataType getDataType() { - return PDataType.values()[fieldIndex % PDataType.values().length]; - } - @Override - public Integer getMaxLength() { - return null; - } - @Override - public Integer getScale() { - return null; - } - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }; - builder.addField(datum); - } - } - KeyValueSchema schema = builder.build(); - return schema; + private KeyValueSchema generateSchema(int nFields, int nRepeating, final int nNotNull) { + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(nNotNull); + for (int i = 0; i < nFields; i++) { + final int fieldIndex = i; + for (int j = 0; j < nRepeating; j++) { + PDatum datum = new PDatum() { + @Override + public boolean isNullable() { + return fieldIndex <= nNotNull; + } + + @Override + public PDataType getDataType() { + return PDataType.values()[fieldIndex % PDataType.values().length]; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }; + builder.addField(datum); + } } - - private static void setValueBitSet(KeyValueSchema schema, ValueBitSet valueSet) { - for (int i = 0; i < schema.getFieldCount() - schema.getMinNullable(); i++) { - if ((i & 1) == 1) { - valueSet.set(i); - } - } + KeyValueSchema schema = builder.build(); + return schema; + } + + private static void setValueBitSet(KeyValueSchema schema, ValueBitSet valueSet) { + for (int i = 0; i < schema.getFieldCount() - schema.getMinNullable(); i++) { + if ((i & 1) == 1) { + valueSet.set(i); + } } - - @Test - public void testMinNullableIndex() { - final int minNullableIndex = 4; // first 4 fields are not nullable. - int numFields = 6; - KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(minNullableIndex); - for (int i = 0; i < numFields; i++) { - final int fieldIndex = i; - builder.addField(new PDatum() { - @Override - public boolean isNullable() { - // not nullable till index reaches minNullableIndex - return fieldIndex < minNullableIndex; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public Integer getMaxLength() { - return null; - } - - @Override - public PDataType getDataType() { - return PDataType.values()[fieldIndex % PDataType.values().length]; - } - }); + } + + @Test + public void testMinNullableIndex() { + final int minNullableIndex = 4; // first 4 fields are not nullable. + int numFields = 6; + KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(minNullableIndex); + for (int i = 0; i < numFields; i++) { + final int fieldIndex = i; + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + // not nullable till index reaches minNullableIndex + return fieldIndex < minNullableIndex; } - KeyValueSchema kvSchema = builder.build(); - assertFalse(kvSchema.getFields().get(0).isNullable()); - assertFalse(kvSchema.getFields().get(minNullableIndex - 1).isNullable()); - assertTrue(kvSchema.getFields().get(minNullableIndex).isNullable()); - assertTrue(kvSchema.getFields().get(minNullableIndex + 1).isNullable()); - } - - @Test - public void testNullCount() { - int nFields = 32; - int nRepeating = 5; - int nNotNull = 8; - KeyValueSchema schema = generateSchema(nFields, nRepeating, nNotNull); - ValueBitSet valueSet = ValueBitSet.newInstance(schema); - setValueBitSet(schema, valueSet); - - // From beginning, not spanning longs - assertEquals(5, valueSet.getNullCount(0, 10)); - // From middle, not spanning longs - assertEquals(5, valueSet.getNullCount(10, 10)); - // From middle, spanning to middle of next long - assertEquals(10, valueSet.getNullCount(64 - 5, 20)); - // from end, not spanning longs - assertEquals(5, valueSet.getNullCount(nFields*nRepeating-nNotNull-10, 10)); - // from beginning, spanning long entirely into middle of next long - assertEquals(64, valueSet.getNullCount(2, 128)); - } - - @Test - public void testSizing() { - int nFields = 32; - int nRepeating = 5; - int nNotNull = 8; - KeyValueSchema schema = generateSchema(nFields, nRepeating, nNotNull); - ValueBitSet valueSet = ValueBitSet.newInstance(schema); - // Since no bits are set, it stores the long array length only - assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); - setValueBitSet(schema, valueSet); - assertEquals(Bytes.SIZEOF_SHORT + Bytes.SIZEOF_LONG * 3, valueSet.getEstimatedLength()); - - nFields = 18; - nRepeating = 1; - nNotNull = 2; - schema = generateSchema(nFields, nRepeating, nNotNull); - valueSet = ValueBitSet.newInstance(schema); - assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); - setValueBitSet(schema, valueSet); - assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); - - nFields = 19; - nRepeating = 1; - nNotNull = 2; - schema = generateSchema(nFields, nRepeating, nNotNull); - valueSet = ValueBitSet.newInstance(schema); - assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); - setValueBitSet(schema, valueSet); - assertEquals(Bytes.SIZEOF_SHORT + Bytes.SIZEOF_LONG, valueSet.getEstimatedLength()); - - nFields = 19; - nRepeating = 1; - nNotNull = 19; - schema = generateSchema(nFields, nRepeating, nNotNull); - valueSet = ValueBitSet.newInstance(schema); - assertEquals(0, valueSet.getEstimatedLength()); - - nFields = 129; - nRepeating = 1; - nNotNull = 0; - schema = generateSchema(nFields, nRepeating, nNotNull); - valueSet = ValueBitSet.newInstance(schema); - assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); - setValueBitSet(schema, valueSet); - assertEquals(Bytes.SIZEOF_SHORT + Bytes.SIZEOF_LONG * 2, valueSet.getEstimatedLength()); - valueSet.set(128); - assertEquals(Bytes.SIZEOF_SHORT + Bytes.SIZEOF_LONG * 3, valueSet.getEstimatedLength()); - } - - @Test - public void testMaxSetBit() { - int nFields = 19; - int nRepeating = 1; - int nNotNull = 2; - KeyValueSchema schema = generateSchema(nFields, nRepeating, nNotNull); - ValueBitSet valueSet = ValueBitSet.newInstance(schema); - setValueBitSet(schema, valueSet); - int length = valueSet.getEstimatedLength(); - byte[] buf = new byte[length]; - valueSet.toBytes(buf, 0); - ValueBitSet copyValueSet = ValueBitSet.newInstance(schema); - copyValueSet.or(new ImmutableBytesWritable(buf)); - assertTrue(copyValueSet.getMaxSetBit() >= valueSet.getMaxSetBit()); + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public PDataType getDataType() { + return PDataType.values()[fieldIndex % PDataType.values().length]; + } + }); } + KeyValueSchema kvSchema = builder.build(); + assertFalse(kvSchema.getFields().get(0).isNullable()); + assertFalse(kvSchema.getFields().get(minNullableIndex - 1).isNullable()); + assertTrue(kvSchema.getFields().get(minNullableIndex).isNullable()); + assertTrue(kvSchema.getFields().get(minNullableIndex + 1).isNullable()); + } + + @Test + public void testNullCount() { + int nFields = 32; + int nRepeating = 5; + int nNotNull = 8; + KeyValueSchema schema = generateSchema(nFields, nRepeating, nNotNull); + ValueBitSet valueSet = ValueBitSet.newInstance(schema); + setValueBitSet(schema, valueSet); + + // From beginning, not spanning longs + assertEquals(5, valueSet.getNullCount(0, 10)); + // From middle, not spanning longs + assertEquals(5, valueSet.getNullCount(10, 10)); + // From middle, spanning to middle of next long + assertEquals(10, valueSet.getNullCount(64 - 5, 20)); + // from end, not spanning longs + assertEquals(5, valueSet.getNullCount(nFields * nRepeating - nNotNull - 10, 10)); + // from beginning, spanning long entirely into middle of next long + assertEquals(64, valueSet.getNullCount(2, 128)); + } + + @Test + public void testSizing() { + int nFields = 32; + int nRepeating = 5; + int nNotNull = 8; + KeyValueSchema schema = generateSchema(nFields, nRepeating, nNotNull); + ValueBitSet valueSet = ValueBitSet.newInstance(schema); + // Since no bits are set, it stores the long array length only + assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); + setValueBitSet(schema, valueSet); + assertEquals(Bytes.SIZEOF_SHORT + Bytes.SIZEOF_LONG * 3, valueSet.getEstimatedLength()); + + nFields = 18; + nRepeating = 1; + nNotNull = 2; + schema = generateSchema(nFields, nRepeating, nNotNull); + valueSet = ValueBitSet.newInstance(schema); + assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); + setValueBitSet(schema, valueSet); + assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); + + nFields = 19; + nRepeating = 1; + nNotNull = 2; + schema = generateSchema(nFields, nRepeating, nNotNull); + valueSet = ValueBitSet.newInstance(schema); + assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); + setValueBitSet(schema, valueSet); + assertEquals(Bytes.SIZEOF_SHORT + Bytes.SIZEOF_LONG, valueSet.getEstimatedLength()); + + nFields = 19; + nRepeating = 1; + nNotNull = 19; + schema = generateSchema(nFields, nRepeating, nNotNull); + valueSet = ValueBitSet.newInstance(schema); + assertEquals(0, valueSet.getEstimatedLength()); + + nFields = 129; + nRepeating = 1; + nNotNull = 0; + schema = generateSchema(nFields, nRepeating, nNotNull); + valueSet = ValueBitSet.newInstance(schema); + assertEquals(Bytes.SIZEOF_SHORT, valueSet.getEstimatedLength()); + setValueBitSet(schema, valueSet); + assertEquals(Bytes.SIZEOF_SHORT + Bytes.SIZEOF_LONG * 2, valueSet.getEstimatedLength()); + valueSet.set(128); + assertEquals(Bytes.SIZEOF_SHORT + Bytes.SIZEOF_LONG * 3, valueSet.getEstimatedLength()); + } + + @Test + public void testMaxSetBit() { + int nFields = 19; + int nRepeating = 1; + int nNotNull = 2; + KeyValueSchema schema = generateSchema(nFields, nRepeating, nNotNull); + ValueBitSet valueSet = ValueBitSet.newInstance(schema); + setValueBitSet(schema, valueSet); + int length = valueSet.getEstimatedLength(); + byte[] buf = new byte[length]; + valueSet.toBytes(buf, 0); + ValueBitSet copyValueSet = ValueBitSet.newInstance(schema); + copyValueSet.or(new ImmutableBytesWritable(buf)); + assertTrue(copyValueSet.getMaxSetBit() >= valueSet.getMaxSetBit()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java index fa1523749d4..5144bf1684e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +26,6 @@ import static org.mockito.Mockito.when; import java.io.IOException; -import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Connection; @@ -34,119 +34,115 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.schema.stats.StatisticsScanner.StatisticsScannerCallable; import org.junit.Before; import org.junit.Test; - /** * Test to verify that we don't try to update stats when a RS is stopping. */ public class StatisticsScannerTest { - private Region region; - private RegionServerServices rsServices; - private StatisticsWriter statsWriter; - private StatisticsScannerCallable callable; - private StatisticsCollectionRunTracker runTracker; - private StatisticsScanner mockScanner; - private StatisticsCollector tracker; - private InternalScanner delegate; - private RegionInfo regionInfo; - - private Configuration config; - private RegionCoprocessorEnvironment env; - private Connection conn; - - @Before - public void setupMocks() throws Exception { - this.config = new Configuration(false); - - // Create all of the mocks - this.region = mock(Region.class); - this.rsServices = mock(RegionServerServices.class); - this.statsWriter = mock(StatisticsWriter.class); - this.callable = mock(StatisticsScannerCallable.class); - this.runTracker = mock(StatisticsCollectionRunTracker.class); - this.mockScanner = mock(StatisticsScanner.class); - this.tracker = mock(StatisticsCollector.class); - this.delegate = mock(InternalScanner.class); - this.regionInfo = mock(RegionInfo.class); - this.env = mock(RegionCoprocessorEnvironment.class); - this.conn = mock(Connection.class); - - // Wire up the mocks to the mock StatisticsScanner - when(mockScanner.getStatisticsWriter()).thenReturn(statsWriter); - when(mockScanner.createCallable()).thenReturn(callable); - when(mockScanner.getStatsCollectionRunTracker(any())).thenReturn(runTracker); - when(mockScanner.getRegion()).thenReturn(region); - when(mockScanner.getConfig()).thenReturn(config); - when(mockScanner.getTracker()).thenReturn(tracker); - when(mockScanner.getDelegate()).thenReturn(delegate); - when(env.getConnection()).thenReturn(conn); - when(mockScanner.getConnection()).thenReturn(conn); - - // Wire up the HRegionInfo mock to the Region mock - when(region.getRegionInfo()).thenReturn(regionInfo); - - // Always call close() on the mock StatisticsScanner - doCallRealMethod().when(mockScanner).close(); - } - - @Test - public void testCheckRegionServerStoppingOnClose() throws Exception { - when(conn.isClosed()).thenReturn(true); - when(conn.isAborted()).thenReturn(false); - - mockScanner.close(); - - verify(conn).isClosed(); - verify(callable, never()).call(); - verify(runTracker, never()).runTask(callable); - } - - @Test - public void testCheckRegionServerStoppedOnClose() throws Exception { - when(conn.isClosed()).thenReturn(false); - when(conn.isAborted()).thenReturn(true); - - mockScanner.close(); - - verify(conn).isClosed(); - verify(conn).isAborted(); - verify(callable, never()).call(); - verify(runTracker, never()).runTask(callable); - } - - @SuppressWarnings("unchecked") - @Test - public void testCheckRegionServerStoppingOnException() throws Exception { - StatisticsScannerCallable realCallable = mockScanner.new StatisticsScannerCallable(); - doThrow(new IOException()).when(statsWriter).deleteStatsForRegion(any(), any(), - any(), any()); - when(conn.isClosed()).thenReturn(true); - when(conn.isAborted()).thenReturn(false); - - // Should not throw an exception - realCallable.call(); - - verify(conn).isClosed(); - } - - @SuppressWarnings("unchecked") - @Test - public void testCheckRegionServerStoppedOnException() throws Exception { - StatisticsScannerCallable realCallable = mockScanner.new StatisticsScannerCallable(); - doThrow(new IOException()).when(statsWriter).deleteStatsForRegion(any(), any(), - any(), any()); - when(conn.isClosed()).thenReturn(false); - when(conn.isAborted()).thenReturn(true); - - // Should not throw an exception - realCallable.call(); - - verify(conn).isClosed(); - verify(conn).isAborted(); - } + private Region region; + private RegionServerServices rsServices; + private StatisticsWriter statsWriter; + private StatisticsScannerCallable callable; + private StatisticsCollectionRunTracker runTracker; + private StatisticsScanner mockScanner; + private StatisticsCollector tracker; + private InternalScanner delegate; + private RegionInfo regionInfo; + + private Configuration config; + private RegionCoprocessorEnvironment env; + private Connection conn; + + @Before + public void setupMocks() throws Exception { + this.config = new Configuration(false); + + // Create all of the mocks + this.region = mock(Region.class); + this.rsServices = mock(RegionServerServices.class); + this.statsWriter = mock(StatisticsWriter.class); + this.callable = mock(StatisticsScannerCallable.class); + this.runTracker = mock(StatisticsCollectionRunTracker.class); + this.mockScanner = mock(StatisticsScanner.class); + this.tracker = mock(StatisticsCollector.class); + this.delegate = mock(InternalScanner.class); + this.regionInfo = mock(RegionInfo.class); + this.env = mock(RegionCoprocessorEnvironment.class); + this.conn = mock(Connection.class); + + // Wire up the mocks to the mock StatisticsScanner + when(mockScanner.getStatisticsWriter()).thenReturn(statsWriter); + when(mockScanner.createCallable()).thenReturn(callable); + when(mockScanner.getStatsCollectionRunTracker(any())).thenReturn(runTracker); + when(mockScanner.getRegion()).thenReturn(region); + when(mockScanner.getConfig()).thenReturn(config); + when(mockScanner.getTracker()).thenReturn(tracker); + when(mockScanner.getDelegate()).thenReturn(delegate); + when(env.getConnection()).thenReturn(conn); + when(mockScanner.getConnection()).thenReturn(conn); + + // Wire up the HRegionInfo mock to the Region mock + when(region.getRegionInfo()).thenReturn(regionInfo); + + // Always call close() on the mock StatisticsScanner + doCallRealMethod().when(mockScanner).close(); + } + + @Test + public void testCheckRegionServerStoppingOnClose() throws Exception { + when(conn.isClosed()).thenReturn(true); + when(conn.isAborted()).thenReturn(false); + + mockScanner.close(); + + verify(conn).isClosed(); + verify(callable, never()).call(); + verify(runTracker, never()).runTask(callable); + } + + @Test + public void testCheckRegionServerStoppedOnClose() throws Exception { + when(conn.isClosed()).thenReturn(false); + when(conn.isAborted()).thenReturn(true); + + mockScanner.close(); + + verify(conn).isClosed(); + verify(conn).isAborted(); + verify(callable, never()).call(); + verify(runTracker, never()).runTask(callable); + } + + @SuppressWarnings("unchecked") + @Test + public void testCheckRegionServerStoppingOnException() throws Exception { + StatisticsScannerCallable realCallable = mockScanner.new StatisticsScannerCallable(); + doThrow(new IOException()).when(statsWriter).deleteStatsForRegion(any(), any(), any(), any()); + when(conn.isClosed()).thenReturn(true); + when(conn.isAborted()).thenReturn(false); + + // Should not throw an exception + realCallable.call(); + + verify(conn).isClosed(); + } + + @SuppressWarnings("unchecked") + @Test + public void testCheckRegionServerStoppedOnException() throws Exception { + StatisticsScannerCallable realCallable = mockScanner.new StatisticsScannerCallable(); + doThrow(new IOException()).when(statsWriter).deleteStatsForRegion(any(), any(), any(), any()); + when(conn.isClosed()).thenReturn(false); + when(conn.isAborted()).thenReturn(true); + + // Should not throw an exception + realCallable.call(); + + verify(conn).isClosed(); + verify(conn).isAborted(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/UpdateStatisticsToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/UpdateStatisticsToolTest.java index 5c0a4889ae6..1ac7ffb3ca5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/UpdateStatisticsToolTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/UpdateStatisticsToolTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,92 +17,91 @@ */ package org.apache.phoenix.schema.stats; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.junit.Assert; -import org.junit.Test; - import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.junit.Test; + public class UpdateStatisticsToolTest { - @Test (expected = IllegalStateException.class) - public void testTableNameIsMandatory() { - UpdateStatisticsTool tool = new UpdateStatisticsTool(); - tool.parseOptions(new String[] {}); - } + @Test(expected = IllegalStateException.class) + public void testTableNameIsMandatory() { + UpdateStatisticsTool tool = new UpdateStatisticsTool(); + tool.parseOptions(new String[] {}); + } - @Test (expected = IllegalStateException.class) - public void testManageSnapshotAndRunFgOption1() { - UpdateStatisticsTool tool = new UpdateStatisticsTool(); - tool.parseOptions(new String[] {"-t", "table1", "-ms"}); - } + @Test(expected = IllegalStateException.class) + public void testManageSnapshotAndRunFgOption1() { + UpdateStatisticsTool tool = new UpdateStatisticsTool(); + tool.parseOptions(new String[] { "-t", "table1", "-ms" }); + } - @Test - public void testManageSnapshotAndRunFgOption2() { - UpdateStatisticsTool tool = new UpdateStatisticsTool(); - try { - tool.parseOptions(new String[] {"-t", "table1", "-ms", "-runfg"}); - } catch (IllegalStateException e) { - fail("IllegalStateException is not expected " + - "since all required parameters are provided."); - } + @Test + public void testManageSnapshotAndRunFgOption2() { + UpdateStatisticsTool tool = new UpdateStatisticsTool(); + try { + tool.parseOptions(new String[] { "-t", "table1", "-ms", "-runfg" }); + } catch (IllegalStateException e) { + fail( + "IllegalStateException is not expected " + "since all required parameters are provided."); } + } - @Test - public void testSnapshotNameInput() { - UpdateStatisticsTool tool = new UpdateStatisticsTool(); - tool.parseArgs(new String[] {"-t", "table1", "-ms", "-runfg", "-s", "snap1"}); - assertEquals("snap1", tool.getSnapshotName()); - } + @Test + public void testSnapshotNameInput() { + UpdateStatisticsTool tool = new UpdateStatisticsTool(); + tool.parseArgs(new String[] { "-t", "table1", "-ms", "-runfg", "-s", "snap1" }); + assertEquals("snap1", tool.getSnapshotName()); + } - @Test - public void testSnapshotNameDefault() { - UpdateStatisticsTool tool = new UpdateStatisticsTool(); - tool.parseArgs(new String[] {"-t", "table1", "-ms", "-runfg"}); - assertTrue(tool.getSnapshotName().startsWith("UpdateStatisticsTool_table1_")); - } + @Test + public void testSnapshotNameDefault() { + UpdateStatisticsTool tool = new UpdateStatisticsTool(); + tool.parseArgs(new String[] { "-t", "table1", "-ms", "-runfg" }); + assertTrue(tool.getSnapshotName().startsWith("UpdateStatisticsTool_table1_")); + } - @Test - public void testRestoreDirDefault() { - UpdateStatisticsTool tool = new UpdateStatisticsTool(); - tool.parseArgs(new String[] {"-t", "table1", "-ms", "-runfg"}); - assertEquals("file:/tmp", tool.getRestoreDir().toString()); - } + @Test + public void testRestoreDirDefault() { + UpdateStatisticsTool tool = new UpdateStatisticsTool(); + tool.parseArgs(new String[] { "-t", "table1", "-ms", "-runfg" }); + assertEquals("file:/tmp", tool.getRestoreDir().toString()); + } - @Test - public void testRestoreDirInput() { - UpdateStatisticsTool tool = new UpdateStatisticsTool(); - tool.parseArgs(new String[] {"-t", "table1", "-d", "fs:/path"}); - assertEquals("fs:/path", tool.getRestoreDir().toString()); - } + @Test + public void testRestoreDirInput() { + UpdateStatisticsTool tool = new UpdateStatisticsTool(); + tool.parseArgs(new String[] { "-t", "table1", "-d", "fs:/path" }); + assertEquals("fs:/path", tool.getRestoreDir().toString()); + } - @Test - public void testRestoreDirFromConfig() { - UpdateStatisticsTool tool = new UpdateStatisticsTool(); - Configuration configuration = HBaseConfiguration.create(); - configuration.set(FS_DEFAULT_NAME_KEY, "hdfs://base-dir"); - tool.setConf(configuration); - tool.parseArgs(new String[] {"-t", "table1", "-ms", "-runfg"}); - assertEquals("hdfs://base-dir/tmp", tool.getRestoreDir().toString()); - } + @Test + public void testRestoreDirFromConfig() { + UpdateStatisticsTool tool = new UpdateStatisticsTool(); + Configuration configuration = HBaseConfiguration.create(); + configuration.set(FS_DEFAULT_NAME_KEY, "hdfs://base-dir"); + tool.setConf(configuration); + tool.parseArgs(new String[] { "-t", "table1", "-ms", "-runfg" }); + assertEquals("hdfs://base-dir/tmp", tool.getRestoreDir().toString()); + } - @Test - public void testJobPriorityInput() { - UpdateStatisticsTool tool = new UpdateStatisticsTool(); - tool.parseArgs(new String[] {"-t", "table1"}); - assertEquals("NORMAL", tool.getJobPriority()); + @Test + public void testJobPriorityInput() { + UpdateStatisticsTool tool = new UpdateStatisticsTool(); + tool.parseArgs(new String[] { "-t", "table1" }); + assertEquals("NORMAL", tool.getJobPriority()); - tool.parseArgs(new String[] {"-t", "table1", "-p", "0"}); - assertEquals("VERY_HIGH", tool.getJobPriority()); + tool.parseArgs(new String[] { "-t", "table1", "-p", "0" }); + assertEquals("VERY_HIGH", tool.getJobPriority()); - tool.parseArgs(new String[] {"-t", "table1", "-p", "-1"}); - assertEquals("NORMAL", tool.getJobPriority()); + tool.parseArgs(new String[] { "-t", "table1", "-p", "-1" }); + assertEquals("NORMAL", tool.getJobPriority()); - tool.parseArgs(new String[] {"-t", "table1", "-p", "DSAFDAS"}); - assertEquals("NORMAL", tool.getJobPriority()); - } -} \ No newline at end of file + tool.parseArgs(new String[] { "-t", "table1", "-p", "DSAFDAS" }); + assertEquals("NORMAL", tool.getJobPriority()); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/tuple/SingleKeyValueTupleTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/tuple/SingleKeyValueTupleTest.java index 42df766e6ed..0383fccb60b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/tuple/SingleKeyValueTupleTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/tuple/SingleKeyValueTupleTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,31 +17,32 @@ */ package org.apache.phoenix.schema.tuple; -import org.junit.Test; import static org.junit.Assert.assertTrue; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; public class SingleKeyValueTupleTest { - @Test - public void testToString() { + @Test + public void testToString() { - SingleKeyValueTuple singleKeyValueTuple = new SingleKeyValueTuple(); - assertTrue(singleKeyValueTuple.toString().equals("SingleKeyValueTuple[null]")); - final byte [] rowKey = Bytes.toBytes("aaa"); - singleKeyValueTuple.setKey(new ImmutableBytesWritable(rowKey)); - assertTrue(singleKeyValueTuple.toString().equals("SingleKeyValueTuple[aaa]")); + SingleKeyValueTuple singleKeyValueTuple = new SingleKeyValueTuple(); + assertTrue(singleKeyValueTuple.toString().equals("SingleKeyValueTuple[null]")); + final byte[] rowKey = Bytes.toBytes("aaa"); + singleKeyValueTuple.setKey(new ImmutableBytesWritable(rowKey)); + assertTrue(singleKeyValueTuple.toString().equals("SingleKeyValueTuple[aaa]")); - byte [] family1 = Bytes.toBytes("abc"); - byte [] qualifier1 = Bytes.toBytes("def"); - KeyValue keyValue = new KeyValue(rowKey, family1, qualifier1, 0L, Type.Put, rowKey); - singleKeyValueTuple = new SingleKeyValueTuple(keyValue); - assertTrue(singleKeyValueTuple.toString().startsWith("SingleKeyValueTuple[aaa/abc:def/0/Put/vlen=3")); - assertTrue(singleKeyValueTuple.toString().endsWith("]")); - } + byte[] family1 = Bytes.toBytes("abc"); + byte[] qualifier1 = Bytes.toBytes("def"); + KeyValue keyValue = new KeyValue(rowKey, family1, qualifier1, 0L, Type.Put, rowKey); + singleKeyValueTuple = new SingleKeyValueTuple(keyValue); + assertTrue( + singleKeyValueTuple.toString().startsWith("SingleKeyValueTuple[aaa/abc:def/0/Put/vlen=3")); + assertTrue(singleKeyValueTuple.toString().endsWith("]")); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePhoenixArrayToStringTest.java index 1ec77424a3c..6a5a9e9b94e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePhoenixArrayToStringTest.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; @@ -16,69 +23,68 @@ public abstract class BasePhoenixArrayToStringTest { - @Test - public void testEmptyArray() { - helpTestToString(getBaseType(), new Object[] {}, "[]"); - } + @Test + public void testEmptyArray() { + helpTestToString(getBaseType(), new Object[] {}, "[]"); + } - @Test - public void testSingleObjectArray() { - helpTestToString(getBaseType(), new Object[] { getElement1() }, "[" + getString1() + "]"); - } + @Test + public void testSingleObjectArray() { + helpTestToString(getBaseType(), new Object[] { getElement1() }, "[" + getString1() + "]"); + } - @Test - public void testMultipleObjectArray() { - helpTestToString(getBaseType(), - new Object[] { getElement1(), getElement2(), getElement3() }, "[" + getString1() + ", " - + getString2() + ", " + getString3() + "]"); - } + @Test + public void testMultipleObjectArray() { + helpTestToString(getBaseType(), new Object[] { getElement1(), getElement2(), getElement3() }, + "[" + getString1() + ", " + getString2() + ", " + getString3() + "]"); + } - @Test - public void testSingleNullObjectArray() { - helpTestToString(getBaseType(), new Object[] { null }, "[" + getNullString() + "]"); - } + @Test + public void testSingleNullObjectArray() { + helpTestToString(getBaseType(), new Object[] { null }, "[" + getNullString() + "]"); + } - @Test - public void testMultipleNullObjectArray() { - helpTestToString(getBaseType(), new Object[] { null, null }, "[" + getNullString() + ", " - + getNullString() + "]"); - } + @Test + public void testMultipleNullObjectArray() { + helpTestToString(getBaseType(), new Object[] { null, null }, + "[" + getNullString() + ", " + getNullString() + "]"); + } - @Test - public void testNormalAndNullObjectArray() { - helpTestToString(getBaseType(), new Object[] { null, getElement1(), null, getElement2() }, - "[" + getNullString() + ", " + getString1() + ", " + getNullString() + ", " - + getString2() + "]"); - } + @Test + public void testNormalAndNullObjectArray() { + helpTestToString(getBaseType(), new Object[] { null, getElement1(), null, getElement2() }, "[" + + getNullString() + ", " + getString1() + ", " + getNullString() + ", " + getString2() + "]"); + } - protected abstract PDataType getBaseType(); + protected abstract PDataType getBaseType(); - protected abstract Object getElement1(); + protected abstract Object getElement1(); - protected abstract String getString1(); + protected abstract String getString1(); - protected abstract Object getElement2(); + protected abstract Object getElement2(); - protected abstract String getString2(); + protected abstract String getString2(); - protected abstract Object getElement3(); + protected abstract Object getElement3(); - protected abstract String getString3(); + protected abstract String getString3(); - protected String getNullString() { - return "null"; - } + protected String getNullString() { + return "null"; + } - protected void helpTestToString(PDataType type, Object[] array, String expected) { - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(type, array); - boolean isPrimitive = isPrimitive(arr); - assertEquals("Expected " + getBaseType() + " array to be " + (isPrimitive ? "" : "not ") - + "primitive.", isPrimitive, !arr.getClass().equals(PhoenixArray.class)); - assertEquals(expected, arr.toString()); - } + protected void helpTestToString(PDataType type, Object[] array, String expected) { + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(type, array); + boolean isPrimitive = isPrimitive(arr); + assertEquals( + "Expected " + getBaseType() + " array to be " + (isPrimitive ? "" : "not ") + "primitive.", + isPrimitive, !arr.getClass().equals(PhoenixArray.class)); + assertEquals(expected, arr.toString()); + } - protected boolean isPrimitive(PhoenixArray arr) { - return arr.isPrimitiveType(); - } + protected boolean isPrimitive(PhoenixArray arr) { + return arr.isPrimitiveType(); + } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePrimitiveDoublePhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePrimitiveDoublePhoenixArrayToStringTest.java index 7ee74286710..14f00c00843 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePrimitiveDoublePhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePrimitiveDoublePhoenixArrayToStringTest.java @@ -1,39 +1,46 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; -public abstract class BasePrimitiveDoublePhoenixArrayToStringTest extends - BasePhoenixArrayToStringTest { +public abstract class BasePrimitiveDoublePhoenixArrayToStringTest + extends BasePhoenixArrayToStringTest { - public BasePrimitiveDoublePhoenixArrayToStringTest() { - super(); - } + public BasePrimitiveDoublePhoenixArrayToStringTest() { + super(); + } - @Override - protected String getString1() { - return "1.1"; - } + @Override + protected String getString1() { + return "1.1"; + } - @Override - protected String getString2() { - return "2.2"; - } + @Override + protected String getString2() { + return "2.2"; + } - @Override - protected String getString3() { - return "3.3"; - } + @Override + protected String getString3() { + return "3.3"; + } - @Override - protected String getNullString() { - return "0.0"; - } -} \ No newline at end of file + @Override + protected String getNullString() { + return "0.0"; + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePrimitiveIntPhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePrimitiveIntPhoenixArrayToStringTest.java index 0f105bcf248..73553742bac 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePrimitiveIntPhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/BasePrimitiveIntPhoenixArrayToStringTest.java @@ -1,50 +1,58 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; -public abstract class BasePrimitiveIntPhoenixArrayToStringTest extends BasePhoenixArrayToStringTest { - - @Override - protected Number getElement1() { - return 1; - } - - @Override - protected String getString1() { - return "1"; - } - - @Override - protected Number getElement2() { - return 2; - } - - @Override - protected String getString2() { - return "2"; - } - - @Override - protected Number getElement3() { - return 3; - } - - @Override - protected String getString3() { - return "3"; - } - - @Override - protected String getNullString() { - return "0"; - } - -} \ No newline at end of file +public abstract class BasePrimitiveIntPhoenixArrayToStringTest + extends BasePhoenixArrayToStringTest { + + @Override + protected Number getElement1() { + return 1; + } + + @Override + protected String getString1() { + return "1"; + } + + @Override + protected Number getElement2() { + return 2; + } + + @Override + protected String getString2() { + return "2"; + } + + @Override + protected Number getElement3() { + return 3; + } + + @Override + protected String getString3() { + return "3"; + } + + @Override + protected String getNullString() { + return "0"; + } + +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeForArraysTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeForArraysTest.java index 792ec6b054f..3c17d828637 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeForArraysTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeForArraysTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -38,1204 +38,1152 @@ import org.junit.Test; public class PDataTypeForArraysTest { - @Test - public void testForIntegerArray() { - Integer[] intArr = new Integer[2]; - intArr[0] = 1; - intArr[1] = 2; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PInteger.INSTANCE, intArr); - PIntegerArray.INSTANCE.toObject(arr, PIntegerArray.INSTANCE); - byte[] bytes = PIntegerArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PIntegerArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForBooleanArray() { - Boolean[] boolArr = new Boolean[2]; - boolArr[0] = true; - boolArr[1] = false; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PBoolean.INSTANCE, boolArr); - PBooleanArray.INSTANCE.toObject(arr, PBooleanArray.INSTANCE); - byte[] bytes = PBooleanArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PBooleanArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForVarCharArray() { - String[] strArr = new String[2]; - strArr[0] = "abc"; - strArr[1] = "klmnop"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testVarCharArrayWithNullValues1() { - String[] strArr = new String[6]; - strArr[0] = "abc"; - strArr[1] = null; - strArr[2] = "bcd"; - strArr[3] = null; - strArr[4] = null; - strArr[5] = "b"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testVarCharArrayWithNullValues2() { - String[] strArr = new String[6]; - strArr[0] = "abc"; - strArr[1] = null; - strArr[2] = "bcd"; - strArr[3] = null; - strArr[4] = "cde"; - strArr[5] = null; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); + @Test + public void testForIntegerArray() { + Integer[] intArr = new Integer[2]; + intArr[0] = 1; + intArr[1] = 2; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, intArr); + PIntegerArray.INSTANCE.toObject(arr, PIntegerArray.INSTANCE); + byte[] bytes = PIntegerArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PIntegerArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForBooleanArray() { + Boolean[] boolArr = new Boolean[2]; + boolArr[0] = true; + boolArr[1] = false; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PBoolean.INSTANCE, boolArr); + PBooleanArray.INSTANCE.toObject(arr, PBooleanArray.INSTANCE); + byte[] bytes = PBooleanArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PBooleanArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForVarCharArray() { + String[] strArr = new String[2]; + strArr[0] = "abc"; + strArr[1] = "klmnop"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testVarCharArrayWithNullValues1() { + String[] strArr = new String[6]; + strArr[0] = "abc"; + strArr[1] = null; + strArr[2] = "bcd"; + strArr[3] = null; + strArr[4] = null; + strArr[5] = "b"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testVarCharArrayWithNullValues2() { + String[] strArr = new String[6]; + strArr[0] = "abc"; + strArr[1] = null; + strArr[2] = "bcd"; + strArr[3] = null; + strArr[4] = "cde"; + strArr[5] = null; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testVarCharArrayWithNullValues3() { + String[] strArr = new String[6]; + strArr[0] = "abc"; + strArr[1] = null; + strArr[2] = null; + strArr[3] = null; + strArr[4] = null; + strArr[5] = null; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testVarCharArrayWithNullValues4() { + String[] strArr = new String[7]; + strArr[0] = "abc"; + strArr[1] = null; + strArr[2] = null; + strArr[3] = null; + strArr[4] = null; + strArr[5] = null; + strArr[6] = "xys"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testVarCharArrayWithNullValues5() { + String[] strArr = new String[6]; + strArr[0] = "abc"; + strArr[1] = "bcd"; + strArr[2] = "cde"; + strArr[3] = null; + strArr[4] = null; + strArr[5] = null; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testVarCharArrayWithNullValues6() { + String[] strArr = new String[6]; + strArr[0] = "abc"; + strArr[1] = null; + strArr[2] = "cde"; + strArr[3] = "bcd"; + strArr[4] = null; + strArr[5] = null; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testVarCharArrayWithNullValues7() { + String[] strArr = new String[6]; + strArr[0] = null; + strArr[1] = "abc"; + strArr[2] = null; + strArr[3] = "bcd"; + strArr[4] = null; + strArr[5] = "cde"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForCharArray() { + String[] strArr = new String[2]; + strArr[0] = "a"; + strArr[1] = "d"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PChar.INSTANCE, strArr); + byte[] bytes = PCharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PCharArray.INSTANCE.toObject(bytes, 0, bytes.length, + PCharArray.INSTANCE, null, 1, null); + assertEquals(arr, resultArr); + } + + @Test + public void testForLongArray() { + Long[] longArr = new Long[2]; + longArr[0] = 1l; + longArr[1] = 2l; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PLong.INSTANCE, longArr); + PLongArray.INSTANCE.toObject(arr, PLongArray.INSTANCE); + byte[] bytes = PLongArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PLongArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForSmallIntArray() { + Short[] shortArr = new Short[2]; + shortArr[0] = 1; + shortArr[1] = 2; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PSmallint.INSTANCE, shortArr); + PSmallintArray.INSTANCE.toObject(arr, PSmallintArray.INSTANCE); + byte[] bytes = PSmallintArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PSmallintArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForVarCharArrayForOddNumber() { + String[] strArr = new String[3]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForVarCharArrayOneElement() { + String[] strArr = new String[1]; + strArr[0] = "ereref"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForVarcharArrayWith1ElementInLargerBuffer() { + String[] strArr = new String[1]; + strArr[0] = "abx"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + byte[] moreBytes = new byte[bytes.length + 20]; + // Generate some garbage + for (int i = 0; i < moreBytes.length; i++) { + moreBytes[i] = (byte) -i; } - - @Test - public void testVarCharArrayWithNullValues3() { - String[] strArr = new String[6]; - strArr[0] = "abc"; - strArr[1] = null; - strArr[2] = null; - strArr[3] = null; - strArr[4] = null; - strArr[5] = null; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); + System.arraycopy(bytes, 0, moreBytes, 10, bytes.length); + PhoenixArray resultArr = + (PhoenixArray) PVarcharArray.INSTANCE.toObject(moreBytes, 10, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForVarCharArrayForEvenNumberWithIndex() { + String[] strArr = new String[5]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = "random12"; + strArr[4] = "ranzzz"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("ranzzz", Bytes.toString(res)); + } + + @Test + public void testForVarCharArrayWithOneElementIndex() { + String[] strArr = new String[1]; + strArr[0] = "abx"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 0, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("abx", Bytes.toString(res)); + } + + public void testVariableLengthArrayWithElementsMoreThanShortMax() { + String[] strArr = new String[(2 * Short.MAX_VALUE) + 100]; + for (int i = 0; i < (2 * Short.MAX_VALUE) + 100; i++) { + String str = "abc"; + for (int j = 0; j <= i; j++) { + str += "-"; + } + strArr[i] = str; } - - @Test - public void testVarCharArrayWithNullValues4() { - String[] strArr = new String[7]; - strArr[0] = "abc"; - strArr[1] = null; - strArr[2] = null; - strArr[3] = null; - strArr[4] = null; - strArr[5] = null; - strArr[6] = "xys"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("abc---", Bytes.toString(res)); + } + + @Test + public void testGetArrayLengthForVariableLengthArray() { + String[] strArr = new String[5]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = "random12"; + strArr[4] = "ranzzz"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + int result = PArrayDataType.getArrayLength(ptr, PVarchar.INSTANCE, null); + assertEquals(5, result); + } + + @Test + public void testForVarCharArrayForOddNumberWithIndex() { + String[] strArr = new String[5]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = "random12"; + strArr[4] = "ran"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("random12", Bytes.toString(res)); + } + + @Test + public void testPositionSearchWithVarLengthArrayWithNullValue1() { + String[] strArr = new String[5]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = "ran"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 2, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("random", Bytes.toString(res)); + } + + @Test + public void testPositionSearchWithVarLengthArrayWithNullValue2() { + String[] strArr = new String[5]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = "random12"; + strArr[4] = null; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 2, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("random", Bytes.toString(res)); + } + + @Test + public void testForVarCharArrayForOddNumberWithIndex3() { + String[] strArr = new String[5]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = "random12"; + strArr[4] = null; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("", Bytes.toString(res)); + } + + @Test + public void testForVarCharArrayForOddNumberWithIndex4() { + String[] strArr = new String[5]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = null; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("", Bytes.toString(res)); + } + + @Test + public void testForVarCharArrayForOddNumberWithIndex5() { + String[] strArr = new String[5]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = "random12"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("random12", Bytes.toString(res)); + } + + @Test + public void testForVarCharArrayForOddNumberWithIndex6() { + String[] strArr = new String[6]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = "random12"; + strArr[5] = "random17"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("random12", Bytes.toString(res)); + } + + @Test + public void testPositionSearchWithVarLengthArrayWithNullValue5() { + String[] strArr = new String[5]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = "ran"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("", Bytes.toString(res)); + } + + @Test + public void testPositionSearchWithVarLengthArrayWithNullValueAtTheStart1() { + String[] strArr = new String[5]; + strArr[0] = null; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = "ran"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("", Bytes.toString(res)); + } + + @Test + public void testPositionSearchWithVarLengthArrayWithNullValueAtTheStart2() { + String[] strArr = new String[5]; + strArr[0] = null; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = "ran"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 0, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("", Bytes.toString(res)); + } + + @Test + public void testPositionSearchWithVarLengthArrayWithNullValueAtTheStart3() { + String[] strArr = new String[5]; + strArr[0] = null; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = "ran"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("ran", Bytes.toString(res)); + } + + @Test + public void testPositionSearchWithVarLengthArrayWithAllNulls() { + String[] strArr = new String[5]; + strArr[0] = null; + strArr[1] = null; + strArr[2] = null; + strArr[3] = null; + strArr[4] = null; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("", Bytes.toString(res)); + } + + @Test + public void testForVarCharArrayForOneElementArrayWithIndex() { + String[] strArr = new String[1]; + strArr[0] = "abx"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 0, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("abx", Bytes.toString(res)); + } + + @Test + public void testForVarCharArrayForWithTwoelementsElementArrayWithIndex() { + String[] strArr = new String[2]; + strArr[0] = "abx"; + strArr[1] = "ereref"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 1, PVarchar.INSTANCE, + PVarchar.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + assertEquals("ereref", Bytes.toString(res)); + } + + @Test + public void testLongArrayWithIndex() { + Long[] longArr = new Long[4]; + longArr[0] = 1l; + longArr[1] = 2l; + longArr[2] = 4l; + longArr[3] = 5l; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PLong.INSTANCE, longArr); + PLongArray.INSTANCE.toObject(arr, PLongArray.INSTANCE); + byte[] bytes = PLongArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 2, PLong.INSTANCE, + PLong.INSTANCE.getByteSize()); + int offset = ptr.getOffset(); + int length = ptr.getLength(); + byte[] bs = ptr.get(); + byte[] res = new byte[length]; + System.arraycopy(bs, offset, res, 0, length); + long result = (Long) PLong.INSTANCE.toObject(res); + assertEquals(4l, result); + } + + @Test + public void testGetArrayLengthForFixedLengthArray() { + Long[] longArr = new Long[4]; + longArr[0] = 1l; + longArr[1] = 2l; + longArr[2] = 4l; + longArr[3] = 5l; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PLong.INSTANCE, longArr); + PLongArray.INSTANCE.toObject(arr, PLongArray.INSTANCE); + byte[] bytes = PLongArray.INSTANCE.toBytes(arr); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + int length = PArrayDataType.getArrayLength(ptr, PLong.INSTANCE, null); + assertEquals(4, length); + } + + @Test + public void testForVarcharArrayBiggerArraysNumber() { + String[] strArr = new String[101]; + for (int i = 0; i <= 100; i++) { + strArr[i] = "abc" + i; } - - - @Test - public void testVarCharArrayWithNullValues5() { - String[] strArr = new String[6]; - strArr[0] = "abc"; - strArr[1] = "bcd"; - strArr[2] = "cde"; - strArr[3] = null; - strArr[4] = null; - strArr[5] = null; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForTinyIntArray() { + Byte[] byteArr = new Byte[2]; + byteArr[0] = 1; + byteArr[1] = 2; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PTinyint.INSTANCE, byteArr); + PTinyintArray.INSTANCE.toObject(arr, PTinyintArray.INSTANCE); + byte[] bytes = PTinyintArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PTinyintArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForFloatArray() { + Float[] floatArr = new Float[2]; + floatArr[0] = 1.06f; + floatArr[1] = 2.89f; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PFloat.INSTANCE, floatArr); + PFloatArray.INSTANCE.toObject(arr, PFloatArray.INSTANCE); + byte[] bytes = PFloatArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PFloatArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForDoubleArray() { + Double[] doubleArr = new Double[2]; + doubleArr[0] = 1.06; + doubleArr[1] = 2.89; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PDouble.INSTANCE, doubleArr); + PDoubleArray.INSTANCE.toObject(arr, PDoubleArray.INSTANCE); + byte[] bytes = PDoubleArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PDoubleArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForDecimalArray() { + BigDecimal[] bigDecimalArr = new BigDecimal[2]; + bigDecimalArr[0] = new BigDecimal(89997); + bigDecimalArr[1] = new BigDecimal(8999.995f); + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PDecimal.INSTANCE, bigDecimalArr); + PDecimalArray.INSTANCE.toObject(arr, PDecimalArray.INSTANCE); + byte[] bytes = PDecimalArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PDecimalArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForTimeStampArray() { + Timestamp[] timeStampArr = new Timestamp[2]; + timeStampArr[0] = new Timestamp(System.currentTimeMillis()); + timeStampArr[1] = new Timestamp(900000l); + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PTimestamp.INSTANCE, timeStampArr); + PTimestampArray.INSTANCE.toObject(arr, PTimestampArray.INSTANCE); + byte[] bytes = PTimestampArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PTimestampArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForUnSignedTimeStampArray() { + Timestamp[] timeStampArr = new Timestamp[2]; + timeStampArr[0] = new Timestamp(System.currentTimeMillis()); + timeStampArr[1] = new Timestamp(900000l); + PhoenixArray arr = + PArrayDataType.instantiatePhoenixArray(PUnsignedTimestamp.INSTANCE, timeStampArr); + PUnsignedTimestampArray.INSTANCE.toObject(arr, PUnsignedTimestampArray.INSTANCE); + byte[] bytes = PUnsignedTimestampArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PUnsignedTimestampArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForTimeArray() { + Time[] timeArr = new Time[2]; + timeArr[0] = new Time(System.currentTimeMillis()); + timeArr[1] = new Time(900000l); + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PTime.INSTANCE, timeArr); + PTimeArray.INSTANCE.toObject(arr, PTimeArray.INSTANCE); + byte[] bytes = PTimeArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PTimeArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForUnsignedTimeArray() { + Time[] timeArr = new Time[2]; + timeArr[0] = new Time(System.currentTimeMillis()); + timeArr[1] = new Time(900000l); + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedTime.INSTANCE, timeArr); + PUnsignedTimeArray.INSTANCE.toObject(arr, PUnsignedTimeArray.INSTANCE); + byte[] bytes = PUnsignedTimeArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PUnsignedTimeArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForDateArray() { + Date[] dateArr = new Date[2]; + dateArr[0] = new Date(System.currentTimeMillis()); + dateArr[1] = new Date(System.currentTimeMillis() + System.currentTimeMillis()); + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PDate.INSTANCE, dateArr); + PDateArray.INSTANCE.toObject(arr, PDateArray.INSTANCE); + byte[] bytes = PDateArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PDateArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForUnSignedDateArray() { + Date[] dateArr = new Date[2]; + dateArr[0] = new Date(System.currentTimeMillis()); + dateArr[1] = new Date(System.currentTimeMillis() + System.currentTimeMillis()); + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDate.INSTANCE, dateArr); + PUnsignedDateArray.INSTANCE.toObject(arr, PUnsignedDateArray.INSTANCE); + byte[] bytes = PUnsignedDateArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PUnsignedDateArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForUnSignedLongArray() { + Long[] longArr = new Long[2]; + longArr[0] = 1l; + longArr[1] = 2l; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedLong.INSTANCE, longArr); + PUnsignedLongArray.INSTANCE.toObject(arr, PUnsignedLongArray.INSTANCE); + byte[] bytes = PUnsignedLongArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PUnsignedLongArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForUnSignedIntArray() { + Integer[] intArr = new Integer[2]; + intArr[0] = 1; + intArr[1] = 2; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedInt.INSTANCE, intArr); + PUnsignedIntArray.INSTANCE.toObject(arr, PUnsignedIntArray.INSTANCE); + byte[] bytes = PUnsignedIntArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PUnsignedIntArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForUnSignedSmallIntArray() { + Short[] shortArr = new Short[2]; + shortArr[0] = 1; + shortArr[1] = 2; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedSmallint.INSTANCE, shortArr); + PUnsignedSmallintArray.INSTANCE.toObject(arr, PUnsignedSmallintArray.INSTANCE); + byte[] bytes = PUnsignedSmallintArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PUnsignedSmallintArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForUnSignedTinyIntArray() { + Byte[] byteArr = new Byte[2]; + byteArr[0] = 1; + byteArr[1] = 2; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedTinyint.INSTANCE, byteArr); + PUnsignedTinyintArray.INSTANCE.toObject(arr, PUnsignedTinyintArray.INSTANCE); + byte[] bytes = PUnsignedTinyintArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PUnsignedTinyintArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForUnSignedFloatArray() { + Float[] floatArr = new Float[2]; + floatArr[0] = 1.9993f; + floatArr[1] = 2.786f; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedFloat.INSTANCE, floatArr); + PUnsignedFloatArray.INSTANCE.toObject(arr, PUnsignedFloatArray.INSTANCE); + byte[] bytes = PUnsignedFloatArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PUnsignedFloatArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForUnSignedDoubleArray() { + Double[] doubleArr = new Double[2]; + doubleArr[0] = 1.9993; + doubleArr[1] = 2.786; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDouble.INSTANCE, doubleArr); + PUnsignedDoubleArray.INSTANCE.toObject(arr, PUnsignedDoubleArray.INSTANCE); + byte[] bytes = PUnsignedDoubleArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = + (PhoenixArray) PUnsignedDoubleArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testForArrayComparisionsForFixedWidth() { + Double[] doubleArr = new Double[2]; + doubleArr[0] = 1.9993; + doubleArr[1] = 2.786; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDouble.INSTANCE, doubleArr); + PUnsignedDoubleArray.INSTANCE.toObject(arr, PUnsignedDoubleArray.INSTANCE); + byte[] bytes1 = PUnsignedDoubleArray.INSTANCE.toBytes(arr); + + doubleArr = new Double[2]; + doubleArr[0] = 1.9993; + doubleArr[1] = 2.786; + arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDouble.INSTANCE, doubleArr); + PUnsignedDoubleArray.INSTANCE.toObject(arr, PUnsignedDoubleArray.INSTANCE); + byte[] bytes2 = PUnsignedDoubleArray.INSTANCE.toBytes(arr); + assertTrue(Bytes.equals(bytes1, bytes2)); + } + + @Test + public void testForArrayComparisionsWithInEqualityForFixedWidth() { + Double[] doubleArr = new Double[2]; + doubleArr[0] = 1.9993; + doubleArr[1] = 2.786; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDouble.INSTANCE, doubleArr); + PUnsignedDoubleArray.INSTANCE.toObject(arr, PUnsignedDoubleArray.INSTANCE); + byte[] bytes1 = PUnsignedDoubleArray.INSTANCE.toBytes(arr); + + doubleArr = new Double[3]; + doubleArr[0] = 1.9993; + doubleArr[1] = 2.786; + doubleArr[2] = 6.3; + arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDouble.INSTANCE, doubleArr); + PUnsignedDoubleArray.INSTANCE.toObject(arr, PUnsignedDoubleArray.INSTANCE); + byte[] bytes2 = PUnsignedDoubleArray.INSTANCE.toBytes(arr); + assertTrue(Bytes.compareTo(bytes1, bytes2) < 0); + } + + @Test + public void testForArrayComparisonsForVarWidthArrays() { + String[] strArr = new String[5]; + strArr[0] = "abc"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = "random1"; + strArr[4] = "ran"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); + + strArr = new String[5]; + strArr[0] = "abc"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = "random1"; + strArr[4] = "ran"; + arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); + assertTrue(Bytes.equals(bytes1, bytes2)); + } + + @Test + public void testForArrayComparisonsInEqualityForVarWidthArrays() { + String[] strArr = new String[5]; + strArr[0] = "abc"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = "random1"; + strArr[4] = "ran"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); + + strArr = new String[5]; + strArr[0] = "abc"; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = "random1"; + arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); + assertTrue(Bytes.compareTo(bytes1, bytes2) > 0); + } + + @Test + public void testForArrayComparsionInEqualityWithNullsRepeatingInTheMiddle() { + String[] strArr = new String[6]; + strArr[0] = null; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = "ran"; + strArr[5] = "ran"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); + + strArr = new String[6]; + strArr[0] = null; + strArr[1] = "ereref"; + strArr[2] = "random"; + strArr[3] = null; + strArr[4] = null; + strArr[5] = "ran"; + arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); + assertTrue(Bytes.compareTo(bytes1, bytes2) > 0); + } + + @Test + public void testVarCharArrayWithGreatherThan255NullsInMiddle() { + String strArr[] = new String[300]; + strArr[0] = "abc"; + strArr[1] = "bcd"; + strArr[2] = null; + strArr[3] = null; + strArr[4] = "bcd"; + for (int i = 5; i < strArr.length - 2; i++) { + strArr[i] = null; } - - @Test - public void testVarCharArrayWithNullValues6() { - String[] strArr = new String[6]; - strArr[0] = "abc"; - strArr[1] = null; - strArr[2] = "cde"; - strArr[3] = "bcd"; - strArr[4] = null; - strArr[5] = null; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); + strArr[strArr.length - 1] = "abc"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); + PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); + assertEquals(arr, resultArr); + } + + @Test + public void testVarCharArrayComparisonWithGreaterThan255NullsinMiddle() { + String strArr[] = new String[240]; + strArr[0] = "abc"; + strArr[1] = "bcd"; + strArr[2] = null; + strArr[3] = null; + strArr[4] = "bcd"; + strArr[strArr.length - 1] = "abc"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); + + strArr = new String[16]; + strArr[0] = "abc"; + strArr[1] = "bcd"; + strArr[2] = null; + strArr[3] = null; + strArr[4] = "bcd"; + strArr[strArr.length - 1] = "abc"; + arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); + assertTrue(Bytes.compareTo(bytes1, bytes2) < 0); + } + + @Test + public void testVarCharArrayComparisonWithGreaterThan255NullsinMiddle1() { + String strArr[] = new String[500]; + strArr[0] = "abc"; + strArr[1] = "bcd"; + strArr[2] = null; + strArr[3] = null; + strArr[4] = "bcd"; + strArr[strArr.length - 1] = "abc"; + PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); + + strArr = new String[500]; + strArr[0] = "abc"; + strArr[1] = "bcd"; + strArr[2] = null; + strArr[3] = null; + strArr[4] = "bcd"; + strArr[strArr.length - 1] = "abc"; + arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); + byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); + assertTrue(Bytes.compareTo(bytes1, bytes2) == 0); + } + + @Test + public void testIsRowKeyOrderOptimized1() { + Object[] objects = new Object[] { "a", "b", "c" }; + PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, PVarchar.INSTANCE, SortOrder.ASC); + assertTrue(PArrayDataType.isRowKeyOrderOptimized(PVarcharArray.INSTANCE, SortOrder.ASC, bytes, + 0, bytes.length)); + } + + @Test + public void testIsRowKeyOrderOptimized2() { + Object[] objects = new Object[] { "a", "b", "c" }; + PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, SortOrder.DESC); + assertTrue(PArrayDataType.isRowKeyOrderOptimized(PVarcharArray.INSTANCE, SortOrder.DESC, bytes, + 0, bytes.length)); + } + + @Test + public void testIsRowKeyOrderOptimized3() { + Object[] objects = new Object[] { "a", "b", "c" }; + PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, SortOrder.DESC); + for (int i = 0; i < bytes.length; i++) { + if (bytes[i] == QueryConstants.DESC_SEPARATOR_BYTE) { + bytes[i] = QueryConstants.SEPARATOR_BYTE; + } } - - @Test - public void testVarCharArrayWithNullValues7() { - String[] strArr = new String[6]; - strArr[0] = null; - strArr[1] = "abc"; - strArr[2] = null; - strArr[3] = "bcd"; - strArr[4] = null; - strArr[5] = "cde"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForCharArray() { - String[] strArr = new String[2]; - strArr[0] = "a"; - strArr[1] = "d"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PChar.INSTANCE, strArr); - byte[] bytes = PCharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PCharArray.INSTANCE.toObject( - bytes, 0, bytes.length, PCharArray.INSTANCE, null, 1, null); - assertEquals(arr, resultArr); - } - - @Test - public void testForLongArray() { - Long[] longArr = new Long[2]; - longArr[0] = 1l; - longArr[1] = 2l; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PLong.INSTANCE, longArr); - PLongArray.INSTANCE.toObject(arr, PLongArray.INSTANCE); - byte[] bytes = PLongArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PLongArray.INSTANCE.toObject( - bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForSmallIntArray() { - Short[] shortArr = new Short[2]; - shortArr[0] = 1; - shortArr[1] = 2; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PSmallint.INSTANCE, shortArr); - PSmallintArray.INSTANCE.toObject(arr, PSmallintArray.INSTANCE); - byte[] bytes = PSmallintArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PSmallintArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForVarCharArrayForOddNumber() { - String[] strArr = new String[3]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForVarCharArrayOneElement() { - String[] strArr = new String[1]; - strArr[0] = "ereref"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForVarcharArrayWith1ElementInLargerBuffer() { - String[] strArr = new String[1]; - strArr[0] = "abx"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - byte[] moreBytes = new byte[bytes.length + 20]; - // Generate some garbage - for (int i = 0; i < moreBytes.length; i++) { - moreBytes[i] = (byte)-i; - } - System.arraycopy(bytes, 0, moreBytes, 10, bytes.length); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE.toObject(moreBytes, 10, bytes.length); - assertEquals(arr, resultArr); - } - - - @Test - public void testForVarCharArrayForEvenNumberWithIndex() { - String[] strArr = new String[5]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = "random12"; - strArr[4] = "ranzzz"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("ranzzz", Bytes.toString(res)); - } - - - @Test - public void testForVarCharArrayWithOneElementIndex() { - String[] strArr = new String[1]; - strArr[0] = "abx"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 0, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("abx", Bytes.toString(res)); - } - - public void testVariableLengthArrayWithElementsMoreThanShortMax() { - String[] strArr = new String[(2 * Short.MAX_VALUE) + 100]; - for(int i = 0 ; i < (2 * Short.MAX_VALUE) + 100; i++ ) { - String str = "abc"; - for(int j = 0 ; j <= i ;j++) { - str += "-"; - } - strArr[i] = str; - } - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("abc---", Bytes.toString(res)); - } - - @Test - public void testGetArrayLengthForVariableLengthArray() { - String[] strArr = new String[5]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = "random12"; - strArr[4] = "ranzzz"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - int result = PArrayDataType.getArrayLength(ptr, PVarchar.INSTANCE, null); - assertEquals(5, result); - } - - @Test - public void testForVarCharArrayForOddNumberWithIndex() { - String[] strArr = new String[5]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = "random12"; - strArr[4] = "ran"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("random12", Bytes.toString(res)); - } - - @Test - public void testPositionSearchWithVarLengthArrayWithNullValue1() { - String[] strArr = new String[5]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = "ran"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 2, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("random", Bytes.toString(res)); - } - - @Test - public void testPositionSearchWithVarLengthArrayWithNullValue2() { - String[] strArr = new String[5]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = "random12"; - strArr[4] = null; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 2, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("random", Bytes.toString(res)); - } - @Test - public void testForVarCharArrayForOddNumberWithIndex3() { - String[] strArr = new String[5]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = "random12"; - strArr[4] = null; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("", Bytes.toString(res)); - } - - @Test - public void testForVarCharArrayForOddNumberWithIndex4() { - String[] strArr = new String[5]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = null; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("", Bytes.toString(res)); - } - - @Test - public void testForVarCharArrayForOddNumberWithIndex5() { - String[] strArr = new String[5]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = "random12"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("random12", Bytes.toString(res)); - } - - @Test - public void testForVarCharArrayForOddNumberWithIndex6() { - String[] strArr = new String[6]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = "random12"; - strArr[5] = "random17"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("random12", Bytes.toString(res)); - } - @Test - public void testPositionSearchWithVarLengthArrayWithNullValue5() { - String[] strArr = new String[5]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = "ran"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("", Bytes.toString(res)); - } - - @Test - public void testPositionSearchWithVarLengthArrayWithNullValueAtTheStart1() { - String[] strArr = new String[5]; - strArr[0] = null; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = "ran"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 3, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("", Bytes.toString(res)); - } - - @Test - public void testPositionSearchWithVarLengthArrayWithNullValueAtTheStart2() { - String[] strArr = new String[5]; - strArr[0] = null; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = "ran"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 0, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("", Bytes.toString(res)); - } - - @Test - public void testPositionSearchWithVarLengthArrayWithNullValueAtTheStart3() { - String[] strArr = new String[5]; - strArr[0] = null; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = "ran"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("ran", Bytes.toString(res)); - } - - @Test - public void testPositionSearchWithVarLengthArrayWithAllNulls() { - String[] strArr = new String[5]; - strArr[0] = null; - strArr[1] = null; - strArr[2] = null; - strArr[3] = null; - strArr[4] = null; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 4, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("", Bytes.toString(res)); - } - - @Test - public void testForVarCharArrayForOneElementArrayWithIndex() { - String[] strArr = new String[1]; - strArr[0] = "abx"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 0, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("abx", Bytes.toString(res)); - } - - @Test - public void testForVarCharArrayForWithTwoelementsElementArrayWithIndex() { - String[] strArr = new String[2]; - strArr[0] = "abx"; - strArr[1] = "ereref"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 1, PVarchar.INSTANCE, PVarchar.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - assertEquals("ereref", Bytes.toString(res)); - } - - @Test - public void testLongArrayWithIndex() { - Long[] longArr = new Long[4]; - longArr[0] = 1l; - longArr[1] = 2l; - longArr[2] = 4l; - longArr[3] = 5l; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PLong.INSTANCE, longArr); - PLongArray.INSTANCE.toObject(arr, PLongArray.INSTANCE); - byte[] bytes = PLongArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 2, PLong.INSTANCE, PLong.INSTANCE.getByteSize()); - int offset = ptr.getOffset(); - int length = ptr.getLength(); - byte[] bs = ptr.get(); - byte[] res = new byte[length]; - System.arraycopy(bs, offset, res, 0, length); - long result = (Long) PLong.INSTANCE.toObject(res); - assertEquals(4l, result); - } - - @Test - public void testGetArrayLengthForFixedLengthArray() { - Long[] longArr = new Long[4]; - longArr[0] = 1l; - longArr[1] = 2l; - longArr[2] = 4l; - longArr[3] = 5l; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PLong.INSTANCE, longArr); - PLongArray.INSTANCE.toObject(arr, PLongArray.INSTANCE); - byte[] bytes = PLongArray.INSTANCE.toBytes(arr); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - int length = PArrayDataType.getArrayLength(ptr, PLong.INSTANCE, null); - assertEquals(4, length); - } - - @Test - public void testForVarcharArrayBiggerArraysNumber() { - String[] strArr = new String[101]; - for (int i = 0; i <= 100; i++) { - strArr[i] = "abc" + i; - } - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForTinyIntArray() { - Byte[] byteArr = new Byte[2]; - byteArr[0] = 1; - byteArr[1] = 2; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PTinyint.INSTANCE, byteArr); - PTinyintArray.INSTANCE.toObject(arr, PTinyintArray.INSTANCE); - byte[] bytes = PTinyintArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PTinyintArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForFloatArray() { - Float[] floatArr = new Float[2]; - floatArr[0] = 1.06f; - floatArr[1] = 2.89f; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PFloat.INSTANCE, floatArr); - PFloatArray.INSTANCE.toObject(arr, PFloatArray.INSTANCE); - byte[] bytes = PFloatArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PFloatArray.INSTANCE.toObject( - bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForDoubleArray() { - Double[] doubleArr = new Double[2]; - doubleArr[0] = 1.06; - doubleArr[1] = 2.89; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PDouble.INSTANCE, doubleArr); - PDoubleArray.INSTANCE.toObject(arr, PDoubleArray.INSTANCE); - byte[] bytes = PDoubleArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PDoubleArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForDecimalArray() { - BigDecimal[] bigDecimalArr = new BigDecimal[2]; - bigDecimalArr[0] = new BigDecimal(89997); - bigDecimalArr[1] = new BigDecimal(8999.995f); - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PDecimal.INSTANCE, bigDecimalArr); - PDecimalArray.INSTANCE.toObject(arr, PDecimalArray.INSTANCE); - byte[] bytes = PDecimalArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PDecimalArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForTimeStampArray() { - Timestamp[] timeStampArr = new Timestamp[2]; - timeStampArr[0] = new Timestamp(System.currentTimeMillis()); - timeStampArr[1] = new Timestamp(900000l); - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PTimestamp.INSTANCE, timeStampArr); - PTimestampArray.INSTANCE.toObject(arr, PTimestampArray.INSTANCE); - byte[] bytes = PTimestampArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PTimestampArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForUnSignedTimeStampArray() { - Timestamp[] timeStampArr = new Timestamp[2]; - timeStampArr[0] = new Timestamp(System.currentTimeMillis()); - timeStampArr[1] = new Timestamp(900000l); - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PUnsignedTimestamp.INSTANCE, timeStampArr); - PUnsignedTimestampArray.INSTANCE.toObject(arr, - PUnsignedTimestampArray.INSTANCE); - byte[] bytes = PUnsignedTimestampArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PUnsignedTimestampArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForTimeArray() { - Time[] timeArr = new Time[2]; - timeArr[0] = new Time(System.currentTimeMillis()); - timeArr[1] = new Time(900000l); - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PTime.INSTANCE, timeArr); - PTimeArray.INSTANCE.toObject(arr, PTimeArray.INSTANCE); - byte[] bytes = PTimeArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PTimeArray.INSTANCE.toObject( - bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForUnsignedTimeArray() { - Time[] timeArr = new Time[2]; - timeArr[0] = new Time(System.currentTimeMillis()); - timeArr[1] = new Time(900000l); - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PUnsignedTime.INSTANCE, timeArr); - PUnsignedTimeArray.INSTANCE.toObject(arr, - PUnsignedTimeArray.INSTANCE); - byte[] bytes = PUnsignedTimeArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PUnsignedTimeArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForDateArray() { - Date[] dateArr = new Date[2]; - dateArr[0] = new Date(System.currentTimeMillis()); - dateArr[1] = new Date(System.currentTimeMillis() - + System.currentTimeMillis()); - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PDate.INSTANCE, dateArr); - PDateArray.INSTANCE.toObject(arr, PDateArray.INSTANCE); - byte[] bytes = PDateArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PDateArray.INSTANCE.toObject( - bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForUnSignedDateArray() { - Date[] dateArr = new Date[2]; - dateArr[0] = new Date(System.currentTimeMillis()); - dateArr[1] = new Date(System.currentTimeMillis() - + System.currentTimeMillis()); - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PUnsignedDate.INSTANCE, dateArr); - PUnsignedDateArray.INSTANCE.toObject(arr, - PUnsignedDateArray.INSTANCE); - byte[] bytes = PUnsignedDateArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PUnsignedDateArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForUnSignedLongArray() { - Long[] longArr = new Long[2]; - longArr[0] = 1l; - longArr[1] = 2l; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PUnsignedLong.INSTANCE, longArr); - PUnsignedLongArray.INSTANCE.toObject(arr, - PUnsignedLongArray.INSTANCE); - byte[] bytes = PUnsignedLongArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PUnsignedLongArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForUnSignedIntArray() { - Integer[] intArr = new Integer[2]; - intArr[0] = 1; - intArr[1] = 2; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PUnsignedInt.INSTANCE, intArr); - PUnsignedIntArray.INSTANCE - .toObject(arr, PUnsignedIntArray.INSTANCE); - byte[] bytes = PUnsignedIntArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PUnsignedIntArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForUnSignedSmallIntArray() { - Short[] shortArr = new Short[2]; - shortArr[0] = 1; - shortArr[1] = 2; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PUnsignedSmallint.INSTANCE, shortArr); - PUnsignedSmallintArray.INSTANCE.toObject(arr, - PUnsignedSmallintArray.INSTANCE); - byte[] bytes = PUnsignedSmallintArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PUnsignedSmallintArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForUnSignedTinyIntArray() { - Byte[] byteArr = new Byte[2]; - byteArr[0] = 1; - byteArr[1] = 2; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PUnsignedTinyint.INSTANCE, byteArr); - PUnsignedTinyintArray.INSTANCE.toObject(arr, - PUnsignedTinyintArray.INSTANCE); - byte[] bytes = PUnsignedTinyintArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PUnsignedTinyintArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForUnSignedFloatArray() { - Float[] floatArr = new Float[2]; - floatArr[0] = 1.9993f; - floatArr[1] = 2.786f; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PUnsignedFloat.INSTANCE, floatArr); - PUnsignedFloatArray.INSTANCE.toObject(arr, - PUnsignedFloatArray.INSTANCE); - byte[] bytes = PUnsignedFloatArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PUnsignedFloatArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForUnSignedDoubleArray() { - Double[] doubleArr = new Double[2]; - doubleArr[0] = 1.9993; - doubleArr[1] = 2.786; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PUnsignedDouble.INSTANCE, doubleArr); - PUnsignedDoubleArray.INSTANCE.toObject(arr, - PUnsignedDoubleArray.INSTANCE); - byte[] bytes = PUnsignedDoubleArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PUnsignedDoubleArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testForArrayComparisionsForFixedWidth() { - Double[] doubleArr = new Double[2]; - doubleArr[0] = 1.9993; - doubleArr[1] = 2.786; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDouble.INSTANCE, doubleArr); - PUnsignedDoubleArray.INSTANCE.toObject(arr, PUnsignedDoubleArray.INSTANCE); - byte[] bytes1 = PUnsignedDoubleArray.INSTANCE.toBytes(arr); - - doubleArr = new Double[2]; - doubleArr[0] = 1.9993; - doubleArr[1] = 2.786; - arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDouble.INSTANCE, doubleArr); - PUnsignedDoubleArray.INSTANCE.toObject(arr, PUnsignedDoubleArray.INSTANCE); - byte[] bytes2 = PUnsignedDoubleArray.INSTANCE.toBytes(arr); - assertTrue(Bytes.equals(bytes1, bytes2)); - } - - @Test - public void testForArrayComparisionsWithInEqualityForFixedWidth() { - Double[] doubleArr = new Double[2]; - doubleArr[0] = 1.9993; - doubleArr[1] = 2.786; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDouble.INSTANCE, doubleArr); - PUnsignedDoubleArray.INSTANCE.toObject(arr, PUnsignedDoubleArray.INSTANCE); - byte[] bytes1 = PUnsignedDoubleArray.INSTANCE.toBytes(arr); - - doubleArr = new Double[3]; - doubleArr[0] = 1.9993; - doubleArr[1] = 2.786; - doubleArr[2] = 6.3; - arr = PArrayDataType.instantiatePhoenixArray(PUnsignedDouble.INSTANCE, doubleArr); - PUnsignedDoubleArray.INSTANCE.toObject(arr, PUnsignedDoubleArray.INSTANCE); - byte[] bytes2 = PUnsignedDoubleArray.INSTANCE.toBytes(arr); - assertTrue(Bytes.compareTo(bytes1, bytes2) < 0); - } - - @Test - public void testForArrayComparisonsForVarWidthArrays() { - String[] strArr = new String[5]; - strArr[0] = "abc"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = "random1"; - strArr[4] = "ran"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); - - strArr = new String[5]; - strArr[0] = "abc"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = "random1"; - strArr[4] = "ran"; - arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); - assertTrue(Bytes.equals(bytes1, bytes2)); - } - - @Test - public void testForArrayComparisonsInEqualityForVarWidthArrays() { - String[] strArr = new String[5]; - strArr[0] = "abc"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = "random1"; - strArr[4] = "ran"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); - - strArr = new String[5]; - strArr[0] = "abc"; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = "random1"; - arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); - assertTrue(Bytes.compareTo(bytes1, bytes2) > 0); - } - - @Test - public void testForArrayComparsionInEqualityWithNullsRepeatingInTheMiddle() { - String[] strArr = new String[6]; - strArr[0] = null; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = "ran"; - strArr[5] = "ran"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); - - strArr = new String[6]; - strArr[0] = null; - strArr[1] = "ereref"; - strArr[2] = "random"; - strArr[3] = null; - strArr[4] = null; - strArr[5] = "ran"; - arr = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, strArr); - byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); - assertTrue(Bytes.compareTo(bytes1, bytes2) > 0); - } - - @Test - public void testVarCharArrayWithGreatherThan255NullsInMiddle() { - String strArr[] = new String[300]; - strArr[0] = "abc"; - strArr[1] = "bcd"; - strArr[2] = null; - strArr[3] = null; - strArr[4] = "bcd"; - for(int i = 5; i < strArr.length - 2; i++) { - strArr[i] = null; - } - strArr[strArr.length - 1] = "abc"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr); - PhoenixArray resultArr = (PhoenixArray) PVarcharArray.INSTANCE - .toObject(bytes, 0, bytes.length); - assertEquals(arr, resultArr); - } - - @Test - public void testVarCharArrayComparisonWithGreaterThan255NullsinMiddle() { - String strArr[] = new String[240]; - strArr[0] = "abc"; - strArr[1] = "bcd"; - strArr[2] = null; - strArr[3] = null; - strArr[4] = "bcd"; - strArr[strArr.length - 1] = "abc"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); - - strArr = new String[16]; - strArr[0] = "abc"; - strArr[1] = "bcd"; - strArr[2] = null; - strArr[3] = null; - strArr[4] = "bcd"; - strArr[strArr.length - 1] = "abc"; - arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); - assertTrue(Bytes.compareTo(bytes1, bytes2) < 0); - } - - @Test - public void testVarCharArrayComparisonWithGreaterThan255NullsinMiddle1() { - String strArr[] = new String[500]; - strArr[0] = "abc"; - strArr[1] = "bcd"; - strArr[2] = null; - strArr[3] = null; - strArr[4] = "bcd"; - strArr[strArr.length - 1] = "abc"; - PhoenixArray arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes1 = PVarcharArray.INSTANCE.toBytes(arr); - - strArr = new String[500]; - strArr[0] = "abc"; - strArr[1] = "bcd"; - strArr[2] = null; - strArr[3] = null; - strArr[4] = "bcd"; - strArr[strArr.length - 1] = "abc"; - arr = PArrayDataType.instantiatePhoenixArray( - PVarchar.INSTANCE, strArr); - byte[] bytes2 = PVarcharArray.INSTANCE.toBytes(arr); - assertTrue(Bytes.compareTo(bytes1, bytes2) == 0); - } - - @Test - public void testIsRowKeyOrderOptimized1() { - Object[] objects = new Object[]{"a", "b", "c"}; - PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, PVarchar.INSTANCE, SortOrder.ASC); - assertTrue(PArrayDataType.isRowKeyOrderOptimized(PVarcharArray.INSTANCE, SortOrder.ASC, bytes, 0, bytes.length)); - } - - @Test - public void testIsRowKeyOrderOptimized2() { - Object[] objects = new Object[]{"a", "b", "c"}; - PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, SortOrder.DESC); - assertTrue(PArrayDataType.isRowKeyOrderOptimized(PVarcharArray.INSTANCE, SortOrder.DESC, bytes, 0, bytes.length)); - } - - @Test - public void testIsRowKeyOrderOptimized3() { - Object[] objects = new Object[]{"a", "b", "c"}; - PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, SortOrder.DESC); - for (int i = 0; i < bytes.length; i++) { - if (bytes[i] == QueryConstants.DESC_SEPARATOR_BYTE) { - bytes[i] = QueryConstants.SEPARATOR_BYTE; - } - } - assertFalse(PArrayDataType.isRowKeyOrderOptimized(PVarcharArray.INSTANCE, SortOrder.DESC, bytes, 0, bytes.length)); - } - - @Test - public void testIsRowKeyOrderOptimized4() { - assertTrue(PArrayDataType.isRowKeyOrderOptimized(PVarcharArray.INSTANCE, SortOrder.DESC, null, 0, 0)); - } - - @Test - public void testIsRowKeyOrderOptimized5() { - Object[] objects = new Object[]{1, 2, 3}; - PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(PInteger.INSTANCE, objects); - byte[] bytes = PIntegerArray.INSTANCE.toBytes(arr, PInteger.INSTANCE, SortOrder.ASC); - assertTrue(PArrayDataType.isRowKeyOrderOptimized(PIntegerArray.INSTANCE, SortOrder.ASC, bytes, 0, bytes.length)); - } - - @Test - public void testVarcharArrayDesc(){ - Object[] objects = new Object[]{"a", "b", null}; - PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, PVarchar.INSTANCE, SortOrder.DESC); - PhoenixArray arr2 = (PhoenixArray)PVarcharArray.INSTANCE.toObject(bytes, SortOrder.DESC); - assertEquals(arr, arr2); - } - - @Test - public void testPositionAtArrayElementWithDescArray(){ - Object[] objects = new Object[]{"a", "b", null}; - PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); - byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, PVarchar.INSTANCE, SortOrder.DESC); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); - PArrayDataTypeDecoder.positionAtArrayElement(ptr, 2, PVarchar.INSTANCE, null); - String value = (String)PVarchar.INSTANCE.toObject(ptr, SortOrder.DESC); - assertEquals(null, value); - } - - @Test - public void testIsCoercibleTo() { - PDataTypeFactory typeFactory = PDataTypeFactory.getInstance(); - for (PDataType type : typeFactory.getTypes()) { - if (type.isArrayType()) { - Object arr = type.getSampleValue(); - assertTrue(type.isCoercibleTo(type, arr)); - } - } - } - - @Test - public void testArrayConversion() { - final String[] data = new String[] {"asdf", "qwerty"}; - PhoenixArray phxArray = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, data); - assertTrue("Converting a PhoenixArray to a PhoenixArray should return the same object", - phxArray == PVarcharArray.INSTANCE.toPhoenixArray(phxArray, PVarchar.INSTANCE)); - // Create a skeleton of an Array which isn't a PhoenixArray. Make sure we can convert that. - Array customArray = new Array() { - - @Override - public String getBaseTypeName() throws SQLException { - return "VARCHAR"; - } - - @Override - public int getBaseType() throws SQLException { - return Types.VARCHAR; - } - - @Override - public Object getArray() throws SQLException { - return data; - } - - @Override - public Object getArray(Map> map) throws SQLException { - return null; - } - - @Override - public Object getArray(long index, int count) throws SQLException { - return null; - } - - @Override - public Object getArray(long index, int count, Map> map) - throws SQLException { - return null; - } - - @Override - public ResultSet getResultSet() throws SQLException { - return null; - } - - @Override - public ResultSet getResultSet(Map> map) throws SQLException { - return null; - } - - @Override - public ResultSet getResultSet(long index, int count) throws SQLException { - return null; - } - - @Override - public ResultSet getResultSet(long index, int count, Map> map) - throws SQLException { - return null; - } - - @Override public void free() throws SQLException {} - }; - - PhoenixArray copy = PVarcharArray.INSTANCE.toPhoenixArray(customArray, PVarchar.INSTANCE); - assertEquals(phxArray, copy); + assertFalse(PArrayDataType.isRowKeyOrderOptimized(PVarcharArray.INSTANCE, SortOrder.DESC, bytes, + 0, bytes.length)); + } + + @Test + public void testIsRowKeyOrderOptimized4() { + assertTrue( + PArrayDataType.isRowKeyOrderOptimized(PVarcharArray.INSTANCE, SortOrder.DESC, null, 0, 0)); + } + + @Test + public void testIsRowKeyOrderOptimized5() { + Object[] objects = new Object[] { 1, 2, 3 }; + PhoenixArray arr = new PhoenixArray.PrimitiveIntPhoenixArray(PInteger.INSTANCE, objects); + byte[] bytes = PIntegerArray.INSTANCE.toBytes(arr, PInteger.INSTANCE, SortOrder.ASC); + assertTrue(PArrayDataType.isRowKeyOrderOptimized(PIntegerArray.INSTANCE, SortOrder.ASC, bytes, + 0, bytes.length)); + } + + @Test + public void testVarcharArrayDesc() { + Object[] objects = new Object[] { "a", "b", null }; + PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, PVarchar.INSTANCE, SortOrder.DESC); + PhoenixArray arr2 = (PhoenixArray) PVarcharArray.INSTANCE.toObject(bytes, SortOrder.DESC); + assertEquals(arr, arr2); + } + + @Test + public void testPositionAtArrayElementWithDescArray() { + Object[] objects = new Object[] { "a", "b", null }; + PhoenixArray arr = new PhoenixArray(PVarchar.INSTANCE, objects); + byte[] bytes = PVarcharArray.INSTANCE.toBytes(arr, PVarchar.INSTANCE, SortOrder.DESC); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(bytes); + PArrayDataTypeDecoder.positionAtArrayElement(ptr, 2, PVarchar.INSTANCE, null); + String value = (String) PVarchar.INSTANCE.toObject(ptr, SortOrder.DESC); + assertEquals(null, value); + } + + @Test + public void testIsCoercibleTo() { + PDataTypeFactory typeFactory = PDataTypeFactory.getInstance(); + for (PDataType type : typeFactory.getTypes()) { + if (type.isArrayType()) { + Object arr = type.getSampleValue(); + assertTrue(type.isCoercibleTo(type, arr)); + } } + } + + @Test + public void testArrayConversion() { + final String[] data = new String[] { "asdf", "qwerty" }; + PhoenixArray phxArray = PArrayDataType.instantiatePhoenixArray(PVarchar.INSTANCE, data); + assertTrue("Converting a PhoenixArray to a PhoenixArray should return the same object", + phxArray == PVarcharArray.INSTANCE.toPhoenixArray(phxArray, PVarchar.INSTANCE)); + // Create a skeleton of an Array which isn't a PhoenixArray. Make sure we can convert that. + Array customArray = new Array() { + + @Override + public String getBaseTypeName() throws SQLException { + return "VARCHAR"; + } + + @Override + public int getBaseType() throws SQLException { + return Types.VARCHAR; + } + + @Override + public Object getArray() throws SQLException { + return data; + } + + @Override + public Object getArray(Map> map) throws SQLException { + return null; + } + + @Override + public Object getArray(long index, int count) throws SQLException { + return null; + } + + @Override + public Object getArray(long index, int count, Map> map) throws SQLException { + return null; + } + + @Override + public ResultSet getResultSet() throws SQLException { + return null; + } + + @Override + public ResultSet getResultSet(Map> map) throws SQLException { + return null; + } + + @Override + public ResultSet getResultSet(long index, int count) throws SQLException { + return null; + } + + @Override + public ResultSet getResultSet(long index, int count, Map> map) + throws SQLException { + return null; + } + + @Override + public void free() throws SQLException { + } + }; + + PhoenixArray copy = PVarcharArray.INSTANCE.toPhoenixArray(customArray, PVarchar.INSTANCE); + assertEquals(phxArray, copy); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java index 1cdb183b811..9c217998292 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -43,1919 +43,1958 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.ConstraintViolationException; import org.apache.phoenix.schema.SortOrder; +import org.apache.phoenix.thirdparty.com.google.common.collect.TreeMultimap; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.TestUtil; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.TreeMultimap; - - public class PDataTypeTest { - @Test - public void testFloatToLongComparison() { - // Basic tests - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(1e100), 0, PFloat.INSTANCE.getByteSize(), SortOrder - .getDefault(), - PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) > 0); - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(0.001), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), - PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); - - // Edge tests - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MAX_VALUE), 0, - PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Integer.MAX_VALUE - 1), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) > 0); - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MIN_VALUE), 0, - PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Integer.MIN_VALUE + 1), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MIN_VALUE), 0, - PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) == 0); - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MAX_VALUE + 1.0F), 0, - PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Integer.MAX_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) > 0); // Passes due to rounding - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MAX_VALUE + 129.0F), 0, - PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Integer.MAX_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) > 0); - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MIN_VALUE - 128.0F), 0, - PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) == 0); - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MIN_VALUE - 129.0F), 0, - PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); - - float f1 = 9111111111111111.0F; - float f2 = 9111111111111112.0F; - assertTrue(f1 == f2); - long la = 9111111111111111L; - assertTrue(f1 > Integer.MAX_VALUE); - assertTrue(la == f1); - assertTrue(la == f2); - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(f1), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), - PLong.INSTANCE.toBytes(la), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) == 0); - assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(f2), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), - PLong.INSTANCE.toBytes(la), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) == 0); - - // Same as above, but reversing LHS and RHS - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), - PFloat.INSTANCE.toBytes(1e100), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PFloat.INSTANCE) < 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), - PFloat.INSTANCE.toBytes(0.001), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PFloat.INSTANCE) > 0); - - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MAX_VALUE - 1), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PFloat.INSTANCE.toBytes(Integer.MAX_VALUE), 0, - PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PFloat.INSTANCE) < 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MIN_VALUE + 1), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PFloat.INSTANCE.toBytes(Integer.MIN_VALUE), 0, - PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PFloat.INSTANCE) > 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PFloat.INSTANCE.toBytes(Integer.MIN_VALUE), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), - PFloat.INSTANCE) == 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MAX_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PFloat.INSTANCE.toBytes(Integer.MAX_VALUE + 1.0F), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), - PFloat.INSTANCE) < 0); // Passes due to rounding - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MAX_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PFloat.INSTANCE.toBytes(Integer.MAX_VALUE + 129.0F), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), - PFloat.INSTANCE) < 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PFloat.INSTANCE.toBytes(Integer.MIN_VALUE - 128.0F), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), - PFloat.INSTANCE) == 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PFloat.INSTANCE.toBytes(Integer.MIN_VALUE - 129.0F), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), - PFloat.INSTANCE) > 0); - - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(la), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), - PFloat.INSTANCE.toBytes(f1), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PFloat.INSTANCE) == 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(la), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), - PFloat.INSTANCE.toBytes(f2), 0, PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PFloat.INSTANCE) == 0); - } - - @Test - public void testDoubleToDecimalComparison() { - // Basic tests - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(1.23), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), - PDecimal.INSTANCE.toBytes(BigDecimal.valueOf(1.24)), 0, PDecimal.INSTANCE.getByteSize(), SortOrder.getDefault(), PDecimal.INSTANCE) < 0); - } - - @Test - public void testDoubleToLongComparison() { - // Basic tests - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(-1e100), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), - PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(0.001), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), - PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); - - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MAX_VALUE), 0, - PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Long.MAX_VALUE - 1), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) > 0); - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MIN_VALUE), 0, - PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Long.MIN_VALUE + 1), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MIN_VALUE), 0, - PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) == 0); - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MAX_VALUE + 1024.0), 0, - PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Long.MAX_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) == 0); - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MAX_VALUE + 1025.0), 0, - PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Long.MAX_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) > 0); - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MIN_VALUE - 1024.0), 0, - PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) == 0); - assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MIN_VALUE - 1025.0), 0, - PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); - - // Same as above, but reversing LHS and RHS - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), - PDouble.INSTANCE.toBytes(-1e100), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PDouble.INSTANCE) > 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), - PDouble.INSTANCE.toBytes(0.001), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PDouble.INSTANCE) > 0); - - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MAX_VALUE - 1), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PDouble.INSTANCE.toBytes(Long.MAX_VALUE), 0, - PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PDouble.INSTANCE) < 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MIN_VALUE + 1), 0, - PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PDouble.INSTANCE.toBytes(Long.MIN_VALUE), 0, - PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PDouble.INSTANCE) > 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PDouble.INSTANCE.toBytes(Long.MIN_VALUE), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), - PDouble.INSTANCE) == 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MAX_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PDouble.INSTANCE.toBytes(Long.MAX_VALUE + 1024.0), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), - PDouble.INSTANCE) == 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MAX_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PDouble.INSTANCE.toBytes(Long.MAX_VALUE + 1025.0), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), - PDouble.INSTANCE) < 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PDouble.INSTANCE.toBytes(Long.MIN_VALUE - 1024.0), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), - PDouble.INSTANCE) == 0); - assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), - SortOrder.getDefault(), PDouble.INSTANCE.toBytes(Long.MIN_VALUE - 1025.0), 0, PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), - PDouble.INSTANCE) > 0); - - long i = 10; - long maxl = (1L << 62); - try { - for (; i < 100; i++) { - double d = Math.pow(2, i); - if ((long)d > maxl) { - assertTrue(i > 62); - continue; - } - long l = (1L << i) - 1; - assertTrue(l + 1L == (long)d); - assertTrue(l < (long)d); - } - } catch (AssertionError t) { - throw t; - } - double d = 0.0; - try { - while (d <= 1024) { - double d1 = Long.MAX_VALUE; - double d2 = Long.MAX_VALUE + d; - assertTrue(d2 == d1); - d++; - } - } catch (AssertionError t) { - throw t; - } - d = 0.0; - try { - while (d >= -1024) { - double d1 = Long.MIN_VALUE; - double d2 = Long.MIN_VALUE + d; - assertTrue(d2 == d1); - d--; - } - } catch (AssertionError t) { - throw t; + @Test + public void testFloatToLongComparison() { + // Basic tests + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(1e100), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(1), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) > 0); + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(0.001), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(1), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); + + // Edge tests + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MAX_VALUE), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Integer.MAX_VALUE - 1), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) > 0); + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MIN_VALUE), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Integer.MIN_VALUE + 1), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) < 0); + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MIN_VALUE), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) == 0); + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MAX_VALUE + 1.0F), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Integer.MAX_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) > 0); // Passes due to rounding + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MAX_VALUE + 129.0F), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Integer.MAX_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) > 0); + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MIN_VALUE - 128.0F), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) == 0); + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(Integer.MIN_VALUE - 129.0F), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) < 0); + + float f1 = 9111111111111111.0F; + float f2 = 9111111111111112.0F; + assertTrue(f1 == f2); + long la = 9111111111111111L; + assertTrue(f1 > Integer.MAX_VALUE); + assertTrue(la == f1); + assertTrue(la == f2); + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(f1), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(la), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) == 0); + assertTrue(PFloat.INSTANCE.compareTo(PFloat.INSTANCE.toBytes(f2), 0, + PFloat.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(la), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) == 0); + + // Same as above, but reversing LHS and RHS + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE.toBytes(1e100), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) < 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE.toBytes(0.001), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) > 0); + + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MAX_VALUE - 1), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PFloat.INSTANCE.toBytes(Integer.MAX_VALUE), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) < 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MIN_VALUE + 1), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PFloat.INSTANCE.toBytes(Integer.MIN_VALUE), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) > 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PFloat.INSTANCE.toBytes(Integer.MIN_VALUE), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) == 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MAX_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PFloat.INSTANCE.toBytes(Integer.MAX_VALUE + 1.0F), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) < 0); // Passes due to rounding + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MAX_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PFloat.INSTANCE.toBytes(Integer.MAX_VALUE + 129.0F), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) < 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PFloat.INSTANCE.toBytes(Integer.MIN_VALUE - 128.0F), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) == 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Integer.MIN_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PFloat.INSTANCE.toBytes(Integer.MIN_VALUE - 129.0F), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) > 0); + + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(la), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE.toBytes(f1), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) == 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(la), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE.toBytes(f2), 0, PFloat.INSTANCE.getByteSize(), + SortOrder.getDefault(), PFloat.INSTANCE) == 0); + } + + @Test + public void testDoubleToDecimalComparison() { + // Basic tests + assertTrue( + PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(1.23), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDecimal.INSTANCE.toBytes(BigDecimal.valueOf(1.24)), 0, + PDecimal.INSTANCE.getByteSize(), SortOrder.getDefault(), PDecimal.INSTANCE) < 0); + } + + @Test + public void testDoubleToLongComparison() { + // Basic tests + assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(-1e100), 0, + PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(1), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); + assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(0.001), 0, + PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE.toBytes(1), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), PLong.INSTANCE) < 0); + + assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MAX_VALUE), 0, + PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Long.MAX_VALUE - 1), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) > 0); + assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MIN_VALUE), 0, + PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Long.MIN_VALUE + 1), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) < 0); + assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MIN_VALUE), 0, + PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) == 0); + assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MAX_VALUE + 1024.0), 0, + PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Long.MAX_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) == 0); + assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MAX_VALUE + 1025.0), 0, + PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Long.MAX_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) > 0); + assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MIN_VALUE - 1024.0), 0, + PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) == 0); + assertTrue(PDouble.INSTANCE.compareTo(PDouble.INSTANCE.toBytes(Long.MIN_VALUE - 1025.0), 0, + PDouble.INSTANCE.getByteSize(), SortOrder.getDefault(), + PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PLong.INSTANCE) < 0); + + // Same as above, but reversing LHS and RHS + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE.toBytes(-1e100), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE) > 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(1), 0, PLong.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE.toBytes(0.001), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE) > 0); + + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MAX_VALUE - 1), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PDouble.INSTANCE.toBytes(Long.MAX_VALUE), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE) < 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MIN_VALUE + 1), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PDouble.INSTANCE.toBytes(Long.MIN_VALUE), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE) > 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PDouble.INSTANCE.toBytes(Long.MIN_VALUE), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE) == 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MAX_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PDouble.INSTANCE.toBytes(Long.MAX_VALUE + 1024.0), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE) == 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MAX_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PDouble.INSTANCE.toBytes(Long.MAX_VALUE + 1025.0), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE) < 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PDouble.INSTANCE.toBytes(Long.MIN_VALUE - 1024.0), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE) == 0); + assertTrue(PLong.INSTANCE.compareTo(PLong.INSTANCE.toBytes(Long.MIN_VALUE), 0, + PLong.INSTANCE.getByteSize(), SortOrder.getDefault(), + PDouble.INSTANCE.toBytes(Long.MIN_VALUE - 1025.0), 0, PDouble.INSTANCE.getByteSize(), + SortOrder.getDefault(), PDouble.INSTANCE) > 0); + + long i = 10; + long maxl = (1L << 62); + try { + for (; i < 100; i++) { + double d = Math.pow(2, i); + if ((long) d > maxl) { + assertTrue(i > 62); + continue; } - double d1 = Long.MAX_VALUE; - double d2 = Long.MAX_VALUE + 1024.0; - double d3 = Long.MAX_VALUE + 1025.0; - assertTrue(d1 == d2); - assertTrue(d3 > d1); - long l1 = Long.MAX_VALUE - 1; - assertTrue((long)d1 > l1); - } - - @Test - public void testLong() { - Long la = 4L; - byte[] b = PLong.INSTANCE.toBytes(la); - Long lb = (Long) PLong.INSTANCE.toObject(b); - assertEquals(la,lb); - - Long na = 1L; - Long nb = -1L; - byte[] ba = PLong.INSTANCE.toBytes(na); - byte[] bb = PLong.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - Integer value = 100; - Object obj = PLong.INSTANCE.toObject(value, PInteger.INSTANCE); - assertTrue(obj instanceof Long); - assertEquals(100, ((Long)obj).longValue()); - - Long longValue = 100l; - Object longObj = PLong.INSTANCE.toObject(longValue, PLong.INSTANCE); - assertTrue(longObj instanceof Long); - assertEquals(100, ((Long)longObj).longValue()); - - assertEquals(0, PLong.INSTANCE.compareTo(Long.MAX_VALUE, Float.valueOf(Long.MAX_VALUE), PFloat.INSTANCE)); - assertEquals(0, PLong.INSTANCE.compareTo(Long.MAX_VALUE, Double.valueOf(Long.MAX_VALUE), PDouble.INSTANCE)); - assertEquals(-1, PLong.INSTANCE.compareTo(99, Float.valueOf(100), PFloat.INSTANCE)); - assertEquals(1, PLong.INSTANCE.compareTo(101, Float.valueOf(100), PFloat.INSTANCE)); - - Double d = -2.0; - Object lo = PLong.INSTANCE.toObject(d, PDouble.INSTANCE); - assertEquals(-2L, ((Long)lo).longValue()); - - byte[] bytes = PDouble.INSTANCE.toBytes(d); - lo = PLong.INSTANCE.toObject(bytes,0, bytes.length, PDouble.INSTANCE); - assertEquals(-2L, ((Long)lo).longValue()); - - Float f = -2.0f; - lo = PLong.INSTANCE.toObject(f, PFloat.INSTANCE); - assertEquals(-2L, ((Long)lo).longValue()); - - bytes = PFloat.INSTANCE.toBytes(f); - lo = PLong.INSTANCE.toObject(bytes,0, bytes.length, PFloat.INSTANCE); - assertEquals(-2L, ((Long)lo).longValue()); - - // Checks for unsignedlong - d = 2.0; - lo = PUnsignedLong.INSTANCE.toObject(d, PDouble.INSTANCE); - assertEquals(2L, ((Long)lo).longValue()); - - bytes = PDouble.INSTANCE.toBytes(d); - lo = PUnsignedLong.INSTANCE.toObject(bytes,0, bytes.length, PDouble.INSTANCE); - assertEquals(2L, ((Long)lo).longValue()); - - f = 2.0f; - lo = PUnsignedLong.INSTANCE.toObject(f, PFloat.INSTANCE); - assertEquals(2L, ((Long)lo).longValue()); - - bytes = PFloat.INSTANCE.toBytes(f); - lo = PUnsignedLong.INSTANCE.toObject(bytes,0, bytes.length, PFloat.INSTANCE); - assertEquals(2L, ((Long)lo).longValue()); - - } - - @Test - public void testInt() { - Integer na = 4; - byte[] b = PInteger.INSTANCE.toBytes(na); - Integer nb = (Integer) PInteger.INSTANCE.toObject(b); - assertEquals(na,nb); - - na = 1; - nb = -1; - byte[] ba = PInteger.INSTANCE.toBytes(na); - byte[] bb = PInteger.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = -1; - nb = -3; - ba = PInteger.INSTANCE.toBytes(na); - bb = PInteger.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = -3; - nb = -100000000; - ba = PInteger.INSTANCE.toBytes(na); - bb = PInteger.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - Long value = 100l; - Object obj = PInteger.INSTANCE.toObject(value, PLong.INSTANCE); - assertTrue(obj instanceof Integer); - assertEquals(100, ((Integer)obj).intValue()); - - Float unsignedFloatValue = 100f; - Object unsignedFloatObj = PInteger.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); - assertTrue(unsignedFloatObj instanceof Integer); - assertEquals(100, ((Integer)unsignedFloatObj).intValue()); - - Double unsignedDoubleValue = 100d; - Object unsignedDoubleObj = PInteger.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); - assertTrue(unsignedDoubleObj instanceof Integer); - assertEquals(100, ((Integer)unsignedDoubleObj).intValue()); - - Float floatValue = 100f; - Object floatObj = PInteger.INSTANCE.toObject(floatValue, PFloat.INSTANCE); - assertTrue(floatObj instanceof Integer); - assertEquals(100, ((Integer)floatObj).intValue()); - - Double doubleValue = 100d; - Object doubleObj = PInteger.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); - assertTrue(doubleObj instanceof Integer); - assertEquals(100, ((Integer)doubleObj).intValue()); - - Short shortValue = 100; - Object shortObj = PInteger.INSTANCE.toObject(shortValue, PSmallint.INSTANCE); - assertTrue(shortObj instanceof Integer); - assertEquals(100, ((Integer)shortObj).intValue()); - } - - @Test - public void testSmallInt() { - Short na = 4; - byte[] b = PSmallint.INSTANCE.toBytes(na); - Short nb = (Short) PSmallint.INSTANCE.toObject(b); - assertEquals(na,nb); - - na = 4; - b = PSmallint.INSTANCE.toBytes(na, SortOrder.DESC); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(b); - nb = PSmallint.INSTANCE.getCodec().decodeShort(ptr, SortOrder.DESC); - assertEquals(na,nb); - - na = 1; - nb = -1; - byte[] ba = PSmallint.INSTANCE.toBytes(na); - byte[] bb = PSmallint.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = -1; - nb = -3; - ba = PSmallint.INSTANCE.toBytes(na); - bb = PSmallint.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = -3; - nb = -10000; - ba = PSmallint.INSTANCE.toBytes(na); - bb = PSmallint.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - Integer value = 100; - Object obj = PSmallint.INSTANCE.toObject(value, PInteger.INSTANCE); - assertTrue(obj instanceof Short); - assertEquals(100, ((Short)obj).shortValue()); - - Float unsignedFloatValue = 100f; - Object unsignedFloatObj = PSmallint.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); - assertTrue(unsignedFloatObj instanceof Short); - assertEquals(100, ((Short)unsignedFloatObj).shortValue()); - - Double unsignedDoubleValue = 100d; - Object unsignedDoubleObj = PSmallint.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); - assertTrue(unsignedDoubleObj instanceof Short); - assertEquals(100, ((Short)unsignedDoubleObj).shortValue()); - - Float floatValue = 100f; - Object floatObj = PSmallint.INSTANCE.toObject(floatValue, PFloat.INSTANCE); - assertTrue(floatObj instanceof Short); - assertEquals(100, ((Short)floatObj).shortValue()); - - Double doubleValue = 100d; - Object doubleObj = PSmallint.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); - assertTrue(doubleObj instanceof Short); - assertEquals(100, ((Short)doubleObj).shortValue()); - } - - @Test - public void testTinyInt() { - Byte na = 4; - byte[] b = PTinyint.INSTANCE.toBytes(na); - Byte nb = (Byte) PTinyint.INSTANCE.toObject(b); - assertEquals(na,nb); - - na = 1; - nb = -1; - byte[] ba = PTinyint.INSTANCE.toBytes(na); - byte[] bb = PTinyint.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = -1; - nb = -3; - ba = PTinyint.INSTANCE.toBytes(na); - bb = PTinyint.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = -3; - nb = -100; - ba = PTinyint.INSTANCE.toBytes(na); - bb = PTinyint.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - Integer value = 100; - Object obj = PTinyint.INSTANCE.toObject(value, PInteger.INSTANCE); - assertTrue(obj instanceof Byte); - assertEquals(100, ((Byte)obj).byteValue()); - - Float floatValue = 100f; - Object floatObj = PTinyint.INSTANCE.toObject(floatValue, PFloat.INSTANCE); - assertTrue(floatObj instanceof Byte); - assertEquals(100, ((Byte)floatObj).byteValue()); - - Float unsignedFloatValue = 100f; - Object unsignedFloatObj = PTinyint.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); - assertTrue(unsignedFloatObj instanceof Byte); - assertEquals(100, ((Byte)unsignedFloatObj).byteValue()); - - Double unsignedDoubleValue = 100d; - Object unsignedDoubleObj = PTinyint.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); - assertTrue(unsignedDoubleObj instanceof Byte); - assertEquals(100, ((Byte)unsignedDoubleObj).byteValue()); - - Double doubleValue = 100d; - Object doubleObj = PTinyint.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); - assertTrue(doubleObj instanceof Byte); - assertEquals(100, ((Byte)doubleObj).byteValue()); - - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (byte) -1)); - } - - @Test - public void testUnsignedSmallInt() { - Short na = 4; - byte[] b = PUnsignedSmallint.INSTANCE.toBytes(na); - Short nb = (Short) PUnsignedSmallint.INSTANCE.toObject(b); - assertEquals(na,nb); - - na = 10; - nb = 8; - byte[] ba = PUnsignedSmallint.INSTANCE.toBytes(na); - byte[] bb = PUnsignedSmallint.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - Integer value = 100; - Object obj = PUnsignedSmallint.INSTANCE.toObject(value, PInteger.INSTANCE); - assertTrue(obj instanceof Short); - assertEquals(100, ((Short)obj).shortValue()); - - Float floatValue = 100f; - Object floatObj = PUnsignedSmallint.INSTANCE.toObject(floatValue, PFloat.INSTANCE); - assertTrue(floatObj instanceof Short); - assertEquals(100, ((Short)floatObj).shortValue()); - - Float unsignedFloatValue = 100f; - Object unsignedFloatObj = PUnsignedSmallint.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); - assertTrue(unsignedFloatObj instanceof Short); - assertEquals(100, ((Short)unsignedFloatObj).shortValue()); - - Double unsignedDoubleValue = 100d; - Object unsignedDoubleObj = PUnsignedSmallint.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); - assertTrue(unsignedDoubleObj instanceof Short); - assertEquals(100, ((Short)unsignedDoubleObj).shortValue()); - - Double doubleValue = 100d; - Object doubleObj = PUnsignedSmallint.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); - assertTrue(doubleObj instanceof Short); - assertEquals(100, ((Short)doubleObj).shortValue()); + long l = (1L << i) - 1; + assertTrue(l + 1L == (long) d); + assertTrue(l < (long) d); + } + } catch (AssertionError t) { + throw t; } - - @Test - public void testUnsignedTinyInt() { - Byte na = 4; - byte[] b = PUnsignedTinyint.INSTANCE.toBytes(na); - Byte nb = (Byte) PUnsignedTinyint.INSTANCE.toObject(b); - assertEquals(na,nb); - - na = 10; - nb = 8; - byte[] ba = PUnsignedTinyint.INSTANCE.toBytes(na); - byte[] bb = PUnsignedTinyint.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - Integer value = 100; - Object obj = PUnsignedTinyint.INSTANCE.toObject(value, PInteger.INSTANCE); - assertTrue(obj instanceof Byte); - assertEquals(100, ((Byte)obj).byteValue()); - - Float floatValue = 100f; - Object floatObj = PUnsignedTinyint.INSTANCE.toObject(floatValue, PFloat.INSTANCE); - assertTrue(floatObj instanceof Byte); - assertEquals(100, ((Byte)floatObj).byteValue()); - - Float unsignedFloatValue = 100f; - Object unsignedFloatObj = PUnsignedTinyint.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); - assertTrue(unsignedFloatObj instanceof Byte); - assertEquals(100, ((Byte)unsignedFloatObj).byteValue()); - - Double unsignedDoubleValue = 100d; - Object unsignedDoubleObj = PUnsignedTinyint.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); - assertTrue(unsignedDoubleObj instanceof Byte); - assertEquals(100, ((Byte)unsignedDoubleObj).byteValue()); - - Double doubleValue = 100d; - Object doubleObj = PUnsignedTinyint.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); - assertTrue(doubleObj instanceof Byte); - assertEquals(100, ((Byte)doubleObj).byteValue()); - } - - @Test - public void testUnsignedFloat() { - Float na = 0.005f; - byte[] b = PUnsignedFloat.INSTANCE.toBytes(na); - Float nb = (Float) PUnsignedFloat.INSTANCE.toObject(b); - assertEquals(na,nb); - - na = 10.0f; - b = PUnsignedFloat.INSTANCE.toBytes(na, SortOrder.DESC); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(b); - nb = PUnsignedFloat.INSTANCE.getCodec().decodeFloat(ptr, SortOrder.DESC); - assertEquals(na,nb); - - na = 2.0f; - nb = 1.0f; - byte[] ba = PUnsignedFloat.INSTANCE.toBytes(na); - byte[] bb = PUnsignedFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = 0.0f; - nb = Float.MIN_VALUE; - ba = PUnsignedFloat.INSTANCE.toBytes(na); - bb = PUnsignedFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Float.MIN_VALUE; - nb = Float.MAX_VALUE; - ba = PUnsignedFloat.INSTANCE.toBytes(na); - bb = PUnsignedFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Float.MAX_VALUE; - nb = Float.POSITIVE_INFINITY; - ba = PUnsignedFloat.INSTANCE.toBytes(na); - bb = PUnsignedFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Float.POSITIVE_INFINITY; - nb = Float.NaN; - ba = PUnsignedFloat.INSTANCE.toBytes(na); - bb = PUnsignedFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - Integer value = 100; - Object obj = PUnsignedFloat.INSTANCE.toObject(value, PInteger.INSTANCE); - assertTrue(obj instanceof Float); - } - - @Test - public void testUnsignedDouble() { - Double na = 0.005; - byte[] b = PUnsignedDouble.INSTANCE.toBytes(na); - Double nb = (Double) PUnsignedDouble.INSTANCE.toObject(b); - assertEquals(na,nb); - - na = 10.0; - b = PUnsignedDouble.INSTANCE.toBytes(na, SortOrder.DESC); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(b); - nb = PUnsignedDouble.INSTANCE.getCodec().decodeDouble(ptr, SortOrder.DESC); - assertEquals(na,nb); - - na = 2.0; - nb = 1.0; - byte[] ba = PUnsignedDouble.INSTANCE.toBytes(na); - byte[] bb = PUnsignedDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = 0.0; - nb = Double.MIN_VALUE; - ba = PUnsignedDouble.INSTANCE.toBytes(na); - bb = PUnsignedDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Double.MIN_VALUE; - nb = Double.MAX_VALUE; - ba = PUnsignedDouble.INSTANCE.toBytes(na); - bb = PUnsignedDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Double.MAX_VALUE; - nb = Double.POSITIVE_INFINITY; - ba = PUnsignedDouble.INSTANCE.toBytes(na); - bb = PUnsignedDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Double.POSITIVE_INFINITY; - nb = Double.NaN; - ba = PUnsignedDouble.INSTANCE.toBytes(na); - bb = PUnsignedDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - Integer value = 100; - Object obj = PUnsignedDouble.INSTANCE.toObject(value, PInteger.INSTANCE); - assertTrue(obj instanceof Double); - - assertEquals(1, PUnsignedDouble.INSTANCE.compareTo(Double.valueOf(101), Long.valueOf(100), PLong.INSTANCE)); - assertEquals(0, PUnsignedDouble.INSTANCE.compareTo(Double.valueOf(Long.MAX_VALUE), Long.MAX_VALUE, PLong.INSTANCE)); - assertEquals(-1, PUnsignedDouble.INSTANCE.compareTo(Double.valueOf(1), Long.valueOf(100), PLong.INSTANCE)); - - assertEquals(0, PUnsignedDouble.INSTANCE.compareTo(Double.valueOf(101), BigDecimal.valueOf(101.0), PDecimal.INSTANCE)); - } - - @Test - public void testFloat() { - Float na = 0.005f; - byte[] b = PFloat.INSTANCE.toBytes(na); - Float nb = (Float) PFloat.INSTANCE.toObject(b); - assertEquals(na,nb); - - na = 10.0f; - b = PFloat.INSTANCE.toBytes(na, SortOrder.DESC); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(b); - nb = PFloat.INSTANCE.getCodec().decodeFloat(ptr, SortOrder.DESC); - assertEquals(na,nb); - - na = 1.0f; - nb = -1.0f; - byte[] ba = PFloat.INSTANCE.toBytes(na); - byte[] bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = -1f; - nb = -3f; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = Float.NEGATIVE_INFINITY; - nb = -Float.MAX_VALUE; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = -Float.MAX_VALUE; - nb = -Float.MIN_VALUE; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = -Float.MIN_VALUE; - nb = -0.0f; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = -0.0f; - nb = 0.0f; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = 0.0f; - nb = Float.MIN_VALUE; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Float.MIN_VALUE; - nb = Float.MAX_VALUE; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Float.MAX_VALUE; - nb = Float.POSITIVE_INFINITY; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Float.POSITIVE_INFINITY; - nb = Float.NaN; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - Integer value = 100; - Object obj = PFloat.INSTANCE.toObject(value, PInteger.INSTANCE); - assertTrue(obj instanceof Float); - - Double dvalue = Double.NEGATIVE_INFINITY; - obj = PFloat.INSTANCE.toObject(dvalue, PDouble.INSTANCE); - assertTrue(obj instanceof Float); - assertEquals(Float.NEGATIVE_INFINITY, obj); - - na = 1.0f; - nb = -1.0f; - ba = PFloat.INSTANCE.toBytes(na); - bb = PFloat.INSTANCE.toBytes(nb); - float nna = PFloat.INSTANCE.getCodec().decodeFloat(ba, 0, SortOrder.DESC); - float nnb = PFloat.INSTANCE.getCodec().decodeFloat(bb, 0, SortOrder.DESC); - assertTrue(Float.compare(nna, nnb) < 0); - } - - @Test - public void testDoubleComparison() { - testRealNumberComparison(PDouble.INSTANCE, new Double[] {0.99, 1.0, 1.001, 1.01, 2.0}); - } - - @Test - public void testFloatComparison() { - testRealNumberComparison(PFloat.INSTANCE, new Float[] {0.99f, 1.0f, 1.001f, 1.01f, 2.0f}); + double d = 0.0; + try { + while (d <= 1024) { + double d1 = Long.MAX_VALUE; + double d2 = Long.MAX_VALUE + d; + assertTrue(d2 == d1); + d++; + } + } catch (AssertionError t) { + throw t; } - - @Test - public void testDecimalComparison() { - testRealNumberComparison(PDecimal.INSTANCE, new BigDecimal[] {BigDecimal.valueOf(0.99), BigDecimal.valueOf(1.0), BigDecimal.valueOf(1.001), BigDecimal.valueOf(1.01), BigDecimal.valueOf(2.0)}); + d = 0.0; + try { + while (d >= -1024) { + double d1 = Long.MIN_VALUE; + double d2 = Long.MIN_VALUE + d; + assertTrue(d2 == d1); + d--; + } + } catch (AssertionError t) { + throw t; } - - private static void testRealNumberComparison(PDataType type, Object[] a) { - - for (SortOrder sortOrder : SortOrder.values()) { - int factor = (sortOrder == SortOrder.ASC ? 1 : -1); - byte[] prev_b = null; - Object prev_o = null; - for (Object o : a) { - byte[] b = type.toBytes(o, sortOrder); - if (prev_b != null) { - assertTrue("Compare of " + o + " with " + prev_o + " " + sortOrder + " failed.", ScanUtil.getComparator(type.isFixedWidth(), sortOrder).compare(prev_b, 0, prev_b.length, b, 0, b.length) * factor < 0); - } - prev_b = b; - prev_o = o; - } + double d1 = Long.MAX_VALUE; + double d2 = Long.MAX_VALUE + 1024.0; + double d3 = Long.MAX_VALUE + 1025.0; + assertTrue(d1 == d2); + assertTrue(d3 > d1); + long l1 = Long.MAX_VALUE - 1; + assertTrue((long) d1 > l1); + } + + @Test + public void testLong() { + Long la = 4L; + byte[] b = PLong.INSTANCE.toBytes(la); + Long lb = (Long) PLong.INSTANCE.toObject(b); + assertEquals(la, lb); + + Long na = 1L; + Long nb = -1L; + byte[] ba = PLong.INSTANCE.toBytes(na); + byte[] bb = PLong.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + Integer value = 100; + Object obj = PLong.INSTANCE.toObject(value, PInteger.INSTANCE); + assertTrue(obj instanceof Long); + assertEquals(100, ((Long) obj).longValue()); + + Long longValue = 100l; + Object longObj = PLong.INSTANCE.toObject(longValue, PLong.INSTANCE); + assertTrue(longObj instanceof Long); + assertEquals(100, ((Long) longObj).longValue()); + + assertEquals(0, + PLong.INSTANCE.compareTo(Long.MAX_VALUE, Float.valueOf(Long.MAX_VALUE), PFloat.INSTANCE)); + assertEquals(0, + PLong.INSTANCE.compareTo(Long.MAX_VALUE, Double.valueOf(Long.MAX_VALUE), PDouble.INSTANCE)); + assertEquals(-1, PLong.INSTANCE.compareTo(99, Float.valueOf(100), PFloat.INSTANCE)); + assertEquals(1, PLong.INSTANCE.compareTo(101, Float.valueOf(100), PFloat.INSTANCE)); + + Double d = -2.0; + Object lo = PLong.INSTANCE.toObject(d, PDouble.INSTANCE); + assertEquals(-2L, ((Long) lo).longValue()); + + byte[] bytes = PDouble.INSTANCE.toBytes(d); + lo = PLong.INSTANCE.toObject(bytes, 0, bytes.length, PDouble.INSTANCE); + assertEquals(-2L, ((Long) lo).longValue()); + + Float f = -2.0f; + lo = PLong.INSTANCE.toObject(f, PFloat.INSTANCE); + assertEquals(-2L, ((Long) lo).longValue()); + + bytes = PFloat.INSTANCE.toBytes(f); + lo = PLong.INSTANCE.toObject(bytes, 0, bytes.length, PFloat.INSTANCE); + assertEquals(-2L, ((Long) lo).longValue()); + + // Checks for unsignedlong + d = 2.0; + lo = PUnsignedLong.INSTANCE.toObject(d, PDouble.INSTANCE); + assertEquals(2L, ((Long) lo).longValue()); + + bytes = PDouble.INSTANCE.toBytes(d); + lo = PUnsignedLong.INSTANCE.toObject(bytes, 0, bytes.length, PDouble.INSTANCE); + assertEquals(2L, ((Long) lo).longValue()); + + f = 2.0f; + lo = PUnsignedLong.INSTANCE.toObject(f, PFloat.INSTANCE); + assertEquals(2L, ((Long) lo).longValue()); + + bytes = PFloat.INSTANCE.toBytes(f); + lo = PUnsignedLong.INSTANCE.toObject(bytes, 0, bytes.length, PFloat.INSTANCE); + assertEquals(2L, ((Long) lo).longValue()); + + } + + @Test + public void testInt() { + Integer na = 4; + byte[] b = PInteger.INSTANCE.toBytes(na); + Integer nb = (Integer) PInteger.INSTANCE.toObject(b); + assertEquals(na, nb); + + na = 1; + nb = -1; + byte[] ba = PInteger.INSTANCE.toBytes(na); + byte[] bb = PInteger.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = -1; + nb = -3; + ba = PInteger.INSTANCE.toBytes(na); + bb = PInteger.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = -3; + nb = -100000000; + ba = PInteger.INSTANCE.toBytes(na); + bb = PInteger.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + Long value = 100l; + Object obj = PInteger.INSTANCE.toObject(value, PLong.INSTANCE); + assertTrue(obj instanceof Integer); + assertEquals(100, ((Integer) obj).intValue()); + + Float unsignedFloatValue = 100f; + Object unsignedFloatObj = + PInteger.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); + assertTrue(unsignedFloatObj instanceof Integer); + assertEquals(100, ((Integer) unsignedFloatObj).intValue()); + + Double unsignedDoubleValue = 100d; + Object unsignedDoubleObj = + PInteger.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); + assertTrue(unsignedDoubleObj instanceof Integer); + assertEquals(100, ((Integer) unsignedDoubleObj).intValue()); + + Float floatValue = 100f; + Object floatObj = PInteger.INSTANCE.toObject(floatValue, PFloat.INSTANCE); + assertTrue(floatObj instanceof Integer); + assertEquals(100, ((Integer) floatObj).intValue()); + + Double doubleValue = 100d; + Object doubleObj = PInteger.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); + assertTrue(doubleObj instanceof Integer); + assertEquals(100, ((Integer) doubleObj).intValue()); + + Short shortValue = 100; + Object shortObj = PInteger.INSTANCE.toObject(shortValue, PSmallint.INSTANCE); + assertTrue(shortObj instanceof Integer); + assertEquals(100, ((Integer) shortObj).intValue()); + } + + @Test + public void testSmallInt() { + Short na = 4; + byte[] b = PSmallint.INSTANCE.toBytes(na); + Short nb = (Short) PSmallint.INSTANCE.toObject(b); + assertEquals(na, nb); + + na = 4; + b = PSmallint.INSTANCE.toBytes(na, SortOrder.DESC); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(b); + nb = PSmallint.INSTANCE.getCodec().decodeShort(ptr, SortOrder.DESC); + assertEquals(na, nb); + + na = 1; + nb = -1; + byte[] ba = PSmallint.INSTANCE.toBytes(na); + byte[] bb = PSmallint.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = -1; + nb = -3; + ba = PSmallint.INSTANCE.toBytes(na); + bb = PSmallint.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = -3; + nb = -10000; + ba = PSmallint.INSTANCE.toBytes(na); + bb = PSmallint.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + Integer value = 100; + Object obj = PSmallint.INSTANCE.toObject(value, PInteger.INSTANCE); + assertTrue(obj instanceof Short); + assertEquals(100, ((Short) obj).shortValue()); + + Float unsignedFloatValue = 100f; + Object unsignedFloatObj = + PSmallint.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); + assertTrue(unsignedFloatObj instanceof Short); + assertEquals(100, ((Short) unsignedFloatObj).shortValue()); + + Double unsignedDoubleValue = 100d; + Object unsignedDoubleObj = + PSmallint.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); + assertTrue(unsignedDoubleObj instanceof Short); + assertEquals(100, ((Short) unsignedDoubleObj).shortValue()); + + Float floatValue = 100f; + Object floatObj = PSmallint.INSTANCE.toObject(floatValue, PFloat.INSTANCE); + assertTrue(floatObj instanceof Short); + assertEquals(100, ((Short) floatObj).shortValue()); + + Double doubleValue = 100d; + Object doubleObj = PSmallint.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); + assertTrue(doubleObj instanceof Short); + assertEquals(100, ((Short) doubleObj).shortValue()); + } + + @Test + public void testTinyInt() { + Byte na = 4; + byte[] b = PTinyint.INSTANCE.toBytes(na); + Byte nb = (Byte) PTinyint.INSTANCE.toObject(b); + assertEquals(na, nb); + + na = 1; + nb = -1; + byte[] ba = PTinyint.INSTANCE.toBytes(na); + byte[] bb = PTinyint.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = -1; + nb = -3; + ba = PTinyint.INSTANCE.toBytes(na); + bb = PTinyint.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = -3; + nb = -100; + ba = PTinyint.INSTANCE.toBytes(na); + bb = PTinyint.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + Integer value = 100; + Object obj = PTinyint.INSTANCE.toObject(value, PInteger.INSTANCE); + assertTrue(obj instanceof Byte); + assertEquals(100, ((Byte) obj).byteValue()); + + Float floatValue = 100f; + Object floatObj = PTinyint.INSTANCE.toObject(floatValue, PFloat.INSTANCE); + assertTrue(floatObj instanceof Byte); + assertEquals(100, ((Byte) floatObj).byteValue()); + + Float unsignedFloatValue = 100f; + Object unsignedFloatObj = + PTinyint.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); + assertTrue(unsignedFloatObj instanceof Byte); + assertEquals(100, ((Byte) unsignedFloatObj).byteValue()); + + Double unsignedDoubleValue = 100d; + Object unsignedDoubleObj = + PTinyint.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); + assertTrue(unsignedDoubleObj instanceof Byte); + assertEquals(100, ((Byte) unsignedDoubleObj).byteValue()); + + Double doubleValue = 100d; + Object doubleObj = PTinyint.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); + assertTrue(doubleObj instanceof Byte); + assertEquals(100, ((Byte) doubleObj).byteValue()); + + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (byte) -1)); + } + + @Test + public void testUnsignedSmallInt() { + Short na = 4; + byte[] b = PUnsignedSmallint.INSTANCE.toBytes(na); + Short nb = (Short) PUnsignedSmallint.INSTANCE.toObject(b); + assertEquals(na, nb); + + na = 10; + nb = 8; + byte[] ba = PUnsignedSmallint.INSTANCE.toBytes(na); + byte[] bb = PUnsignedSmallint.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + Integer value = 100; + Object obj = PUnsignedSmallint.INSTANCE.toObject(value, PInteger.INSTANCE); + assertTrue(obj instanceof Short); + assertEquals(100, ((Short) obj).shortValue()); + + Float floatValue = 100f; + Object floatObj = PUnsignedSmallint.INSTANCE.toObject(floatValue, PFloat.INSTANCE); + assertTrue(floatObj instanceof Short); + assertEquals(100, ((Short) floatObj).shortValue()); + + Float unsignedFloatValue = 100f; + Object unsignedFloatObj = + PUnsignedSmallint.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); + assertTrue(unsignedFloatObj instanceof Short); + assertEquals(100, ((Short) unsignedFloatObj).shortValue()); + + Double unsignedDoubleValue = 100d; + Object unsignedDoubleObj = + PUnsignedSmallint.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); + assertTrue(unsignedDoubleObj instanceof Short); + assertEquals(100, ((Short) unsignedDoubleObj).shortValue()); + + Double doubleValue = 100d; + Object doubleObj = PUnsignedSmallint.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); + assertTrue(doubleObj instanceof Short); + assertEquals(100, ((Short) doubleObj).shortValue()); + } + + @Test + public void testUnsignedTinyInt() { + Byte na = 4; + byte[] b = PUnsignedTinyint.INSTANCE.toBytes(na); + Byte nb = (Byte) PUnsignedTinyint.INSTANCE.toObject(b); + assertEquals(na, nb); + + na = 10; + nb = 8; + byte[] ba = PUnsignedTinyint.INSTANCE.toBytes(na); + byte[] bb = PUnsignedTinyint.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + Integer value = 100; + Object obj = PUnsignedTinyint.INSTANCE.toObject(value, PInteger.INSTANCE); + assertTrue(obj instanceof Byte); + assertEquals(100, ((Byte) obj).byteValue()); + + Float floatValue = 100f; + Object floatObj = PUnsignedTinyint.INSTANCE.toObject(floatValue, PFloat.INSTANCE); + assertTrue(floatObj instanceof Byte); + assertEquals(100, ((Byte) floatObj).byteValue()); + + Float unsignedFloatValue = 100f; + Object unsignedFloatObj = + PUnsignedTinyint.INSTANCE.toObject(unsignedFloatValue, PUnsignedFloat.INSTANCE); + assertTrue(unsignedFloatObj instanceof Byte); + assertEquals(100, ((Byte) unsignedFloatObj).byteValue()); + + Double unsignedDoubleValue = 100d; + Object unsignedDoubleObj = + PUnsignedTinyint.INSTANCE.toObject(unsignedDoubleValue, PUnsignedDouble.INSTANCE); + assertTrue(unsignedDoubleObj instanceof Byte); + assertEquals(100, ((Byte) unsignedDoubleObj).byteValue()); + + Double doubleValue = 100d; + Object doubleObj = PUnsignedTinyint.INSTANCE.toObject(doubleValue, PDouble.INSTANCE); + assertTrue(doubleObj instanceof Byte); + assertEquals(100, ((Byte) doubleObj).byteValue()); + } + + @Test + public void testUnsignedFloat() { + Float na = 0.005f; + byte[] b = PUnsignedFloat.INSTANCE.toBytes(na); + Float nb = (Float) PUnsignedFloat.INSTANCE.toObject(b); + assertEquals(na, nb); + + na = 10.0f; + b = PUnsignedFloat.INSTANCE.toBytes(na, SortOrder.DESC); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(b); + nb = PUnsignedFloat.INSTANCE.getCodec().decodeFloat(ptr, SortOrder.DESC); + assertEquals(na, nb); + + na = 2.0f; + nb = 1.0f; + byte[] ba = PUnsignedFloat.INSTANCE.toBytes(na); + byte[] bb = PUnsignedFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = 0.0f; + nb = Float.MIN_VALUE; + ba = PUnsignedFloat.INSTANCE.toBytes(na); + bb = PUnsignedFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Float.MIN_VALUE; + nb = Float.MAX_VALUE; + ba = PUnsignedFloat.INSTANCE.toBytes(na); + bb = PUnsignedFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Float.MAX_VALUE; + nb = Float.POSITIVE_INFINITY; + ba = PUnsignedFloat.INSTANCE.toBytes(na); + bb = PUnsignedFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Float.POSITIVE_INFINITY; + nb = Float.NaN; + ba = PUnsignedFloat.INSTANCE.toBytes(na); + bb = PUnsignedFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + Integer value = 100; + Object obj = PUnsignedFloat.INSTANCE.toObject(value, PInteger.INSTANCE); + assertTrue(obj instanceof Float); + } + + @Test + public void testUnsignedDouble() { + Double na = 0.005; + byte[] b = PUnsignedDouble.INSTANCE.toBytes(na); + Double nb = (Double) PUnsignedDouble.INSTANCE.toObject(b); + assertEquals(na, nb); + + na = 10.0; + b = PUnsignedDouble.INSTANCE.toBytes(na, SortOrder.DESC); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(b); + nb = PUnsignedDouble.INSTANCE.getCodec().decodeDouble(ptr, SortOrder.DESC); + assertEquals(na, nb); + + na = 2.0; + nb = 1.0; + byte[] ba = PUnsignedDouble.INSTANCE.toBytes(na); + byte[] bb = PUnsignedDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = 0.0; + nb = Double.MIN_VALUE; + ba = PUnsignedDouble.INSTANCE.toBytes(na); + bb = PUnsignedDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Double.MIN_VALUE; + nb = Double.MAX_VALUE; + ba = PUnsignedDouble.INSTANCE.toBytes(na); + bb = PUnsignedDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Double.MAX_VALUE; + nb = Double.POSITIVE_INFINITY; + ba = PUnsignedDouble.INSTANCE.toBytes(na); + bb = PUnsignedDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Double.POSITIVE_INFINITY; + nb = Double.NaN; + ba = PUnsignedDouble.INSTANCE.toBytes(na); + bb = PUnsignedDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + Integer value = 100; + Object obj = PUnsignedDouble.INSTANCE.toObject(value, PInteger.INSTANCE); + assertTrue(obj instanceof Double); + + assertEquals(1, + PUnsignedDouble.INSTANCE.compareTo(Double.valueOf(101), Long.valueOf(100), PLong.INSTANCE)); + assertEquals(0, PUnsignedDouble.INSTANCE.compareTo(Double.valueOf(Long.MAX_VALUE), + Long.MAX_VALUE, PLong.INSTANCE)); + assertEquals(-1, + PUnsignedDouble.INSTANCE.compareTo(Double.valueOf(1), Long.valueOf(100), PLong.INSTANCE)); + + assertEquals(0, PUnsignedDouble.INSTANCE.compareTo(Double.valueOf(101), + BigDecimal.valueOf(101.0), PDecimal.INSTANCE)); + } + + @Test + public void testFloat() { + Float na = 0.005f; + byte[] b = PFloat.INSTANCE.toBytes(na); + Float nb = (Float) PFloat.INSTANCE.toObject(b); + assertEquals(na, nb); + + na = 10.0f; + b = PFloat.INSTANCE.toBytes(na, SortOrder.DESC); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(b); + nb = PFloat.INSTANCE.getCodec().decodeFloat(ptr, SortOrder.DESC); + assertEquals(na, nb); + + na = 1.0f; + nb = -1.0f; + byte[] ba = PFloat.INSTANCE.toBytes(na); + byte[] bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = -1f; + nb = -3f; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = Float.NEGATIVE_INFINITY; + nb = -Float.MAX_VALUE; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = -Float.MAX_VALUE; + nb = -Float.MIN_VALUE; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = -Float.MIN_VALUE; + nb = -0.0f; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = -0.0f; + nb = 0.0f; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = 0.0f; + nb = Float.MIN_VALUE; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Float.MIN_VALUE; + nb = Float.MAX_VALUE; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Float.MAX_VALUE; + nb = Float.POSITIVE_INFINITY; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Float.POSITIVE_INFINITY; + nb = Float.NaN; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + Integer value = 100; + Object obj = PFloat.INSTANCE.toObject(value, PInteger.INSTANCE); + assertTrue(obj instanceof Float); + + Double dvalue = Double.NEGATIVE_INFINITY; + obj = PFloat.INSTANCE.toObject(dvalue, PDouble.INSTANCE); + assertTrue(obj instanceof Float); + assertEquals(Float.NEGATIVE_INFINITY, obj); + + na = 1.0f; + nb = -1.0f; + ba = PFloat.INSTANCE.toBytes(na); + bb = PFloat.INSTANCE.toBytes(nb); + float nna = PFloat.INSTANCE.getCodec().decodeFloat(ba, 0, SortOrder.DESC); + float nnb = PFloat.INSTANCE.getCodec().decodeFloat(bb, 0, SortOrder.DESC); + assertTrue(Float.compare(nna, nnb) < 0); + } + + @Test + public void testDoubleComparison() { + testRealNumberComparison(PDouble.INSTANCE, new Double[] { 0.99, 1.0, 1.001, 1.01, 2.0 }); + } + + @Test + public void testFloatComparison() { + testRealNumberComparison(PFloat.INSTANCE, new Float[] { 0.99f, 1.0f, 1.001f, 1.01f, 2.0f }); + } + + @Test + public void testDecimalComparison() { + testRealNumberComparison(PDecimal.INSTANCE, + new BigDecimal[] { BigDecimal.valueOf(0.99), BigDecimal.valueOf(1.0), + BigDecimal.valueOf(1.001), BigDecimal.valueOf(1.01), BigDecimal.valueOf(2.0) }); + } + + private static void testRealNumberComparison(PDataType type, Object[] a) { + + for (SortOrder sortOrder : SortOrder.values()) { + int factor = (sortOrder == SortOrder.ASC ? 1 : -1); + byte[] prev_b = null; + Object prev_o = null; + for (Object o : a) { + byte[] b = type.toBytes(o, sortOrder); + if (prev_b != null) { + assertTrue("Compare of " + o + " with " + prev_o + " " + sortOrder + " failed.", + ScanUtil.getComparator(type.isFixedWidth(), sortOrder).compare(prev_b, 0, prev_b.length, + b, 0, b.length) * factor < 0); } + prev_b = b; + prev_o = o; + } } - - @Test - public void testDouble() { - Double na = 0.005; - byte[] b = PDouble.INSTANCE.toBytes(na); - Double nb = (Double) PDouble.INSTANCE.toObject(b); - assertEquals(na,nb); - - na = 10.0; - b = PDouble.INSTANCE.toBytes(na, SortOrder.DESC); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - ptr.set(b); - nb = PDouble.INSTANCE.getCodec().decodeDouble(ptr, SortOrder.DESC); - assertEquals(na,nb); - - na = 1.0; - nb = -1.0; - byte[] ba = PDouble.INSTANCE.toBytes(na); - byte[] bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = -1.0; - nb = -3.0; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - - na = Double.NEGATIVE_INFINITY; - nb = -Double.MAX_VALUE; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = -Double.MAX_VALUE; - nb = -Double.MIN_VALUE; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = -Double.MIN_VALUE; - nb = -0.0; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = -0.0; - nb = 0.0; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = 0.0; - nb = Double.MIN_VALUE; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Double.MIN_VALUE; - nb = Double.MAX_VALUE; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Double.MAX_VALUE; - nb = Double.POSITIVE_INFINITY; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - na = Double.POSITIVE_INFINITY; - nb = Double.NaN; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) < 0); - - Integer value = 100; - Object obj = PDouble.INSTANCE.toObject(value, PInteger.INSTANCE); - assertTrue(obj instanceof Double); - - na = 1.0; - nb = -1.0; - ba = PDouble.INSTANCE.toBytes(na); - bb = PDouble.INSTANCE.toBytes(nb); - double nna = PDouble.INSTANCE.getCodec().decodeDouble(ba, 0, SortOrder.DESC); - double nnb = PDouble.INSTANCE.getCodec().decodeDouble(bb, 0, SortOrder.DESC); - assertTrue(Double.compare(nna, nnb) < 0); - - assertEquals(1, PDouble.INSTANCE.compareTo(Double.valueOf(101), Long.valueOf(100), PLong.INSTANCE)); - assertEquals(0, PDouble.INSTANCE.compareTo(Double.valueOf(Long.MAX_VALUE), Long.MAX_VALUE, PLong.INSTANCE)); - assertEquals(-1, PDouble.INSTANCE.compareTo(Double.valueOf(1), Long.valueOf(100), PLong.INSTANCE)); - - assertEquals(0, PDouble.INSTANCE.compareTo(Double.valueOf(101), BigDecimal.valueOf(101.0), PDecimal.INSTANCE)); + } + + @Test + public void testDouble() { + Double na = 0.005; + byte[] b = PDouble.INSTANCE.toBytes(na); + Double nb = (Double) PDouble.INSTANCE.toObject(b); + assertEquals(na, nb); + + na = 10.0; + b = PDouble.INSTANCE.toBytes(na, SortOrder.DESC); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + ptr.set(b); + nb = PDouble.INSTANCE.getCodec().decodeDouble(ptr, SortOrder.DESC); + assertEquals(na, nb); + + na = 1.0; + nb = -1.0; + byte[] ba = PDouble.INSTANCE.toBytes(na); + byte[] bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = -1.0; + nb = -3.0; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + + na = Double.NEGATIVE_INFINITY; + nb = -Double.MAX_VALUE; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = -Double.MAX_VALUE; + nb = -Double.MIN_VALUE; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = -Double.MIN_VALUE; + nb = -0.0; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = -0.0; + nb = 0.0; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = 0.0; + nb = Double.MIN_VALUE; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Double.MIN_VALUE; + nb = Double.MAX_VALUE; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Double.MAX_VALUE; + nb = Double.POSITIVE_INFINITY; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + na = Double.POSITIVE_INFINITY; + nb = Double.NaN; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) < 0); + + Integer value = 100; + Object obj = PDouble.INSTANCE.toObject(value, PInteger.INSTANCE); + assertTrue(obj instanceof Double); + + na = 1.0; + nb = -1.0; + ba = PDouble.INSTANCE.toBytes(na); + bb = PDouble.INSTANCE.toBytes(nb); + double nna = PDouble.INSTANCE.getCodec().decodeDouble(ba, 0, SortOrder.DESC); + double nnb = PDouble.INSTANCE.getCodec().decodeDouble(bb, 0, SortOrder.DESC); + assertTrue(Double.compare(nna, nnb) < 0); + + assertEquals(1, + PDouble.INSTANCE.compareTo(Double.valueOf(101), Long.valueOf(100), PLong.INSTANCE)); + assertEquals(0, + PDouble.INSTANCE.compareTo(Double.valueOf(Long.MAX_VALUE), Long.MAX_VALUE, PLong.INSTANCE)); + assertEquals(-1, + PDouble.INSTANCE.compareTo(Double.valueOf(1), Long.valueOf(100), PLong.INSTANCE)); + + assertEquals(0, PDouble.INSTANCE.compareTo(Double.valueOf(101), BigDecimal.valueOf(101.0), + PDecimal.INSTANCE)); + } + + @Test + public void testBigDecimal() { + byte[] b; + BigDecimal na, nb; + + b = new byte[] { (byte) 0xc2, 0x02, 0x10, 0x36, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x0f, 0x27, + 0x38, 0x1c, 0x05, 0x40, 0x62, 0x21, 0x54, 0x4d, 0x4e, 0x01, 0x14, 0x36, 0x0d, 0x33 }; + BigDecimal decodedBytes = (BigDecimal) PDecimal.INSTANCE.toObject(b); + assertTrue(decodedBytes.compareTo(BigDecimal.ZERO) > 0); + + na = new BigDecimal(new BigInteger("12345678901239998123456789"), 2); + // [-52, 13, 35, 57, 79, 91, 13, 40, 100, 82, 24, 46, 68, 90] + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + TestUtil.assertRoundEquals(na, nb); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + na = new BigDecimal("115.533333333333331438552704639732837677001953125"); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + TestUtil.assertRoundEquals(na, nb); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + // test for negative serialization using biginteger + na = new BigDecimal("-5.00000000000000000000000001"); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + TestUtil.assertRoundEquals(na, nb); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + // test for serialization of 38 digits + na = new BigDecimal("-2.4999999999999999999999999999999999999"); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + TestUtil.assertRoundEquals(na, nb); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + // test for serialization of 39 digits, should round to -2.5 + na = new BigDecimal("-2.499999999999999999999999999999999999999"); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + assertTrue(nb.compareTo(new BigDecimal("-2.5")) == 0); + assertEquals(new BigDecimal("-2.5"), nb); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + na = new BigDecimal(2.5); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + assertTrue(na.compareTo(nb) == 0); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + // If we don't remove trailing zeros, this fails + na = new BigDecimal(Double.parseDouble("96.45238095238095")); + String naStr = na.toString(); + assertTrue(naStr != null); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + TestUtil.assertRoundEquals(na, nb); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + // If we don't remove trailing zeros, this fails + na = new BigDecimal(-1000); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + assertTrue(na.compareTo(nb) == 0); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + na = new BigDecimal("1000.5829999999999913"); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + assertTrue(na.compareTo(nb) == 0); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + na = TestUtil.computeAverage(11000, 3); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + assertTrue(na.compareTo(nb) == 0); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + na = new BigDecimal(new BigInteger("12345678901239999"), 2); + b = PDecimal.INSTANCE.toBytes(na); + nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); + assertTrue(na.compareTo(nb) == 0); + assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); + + na = new BigDecimal(1); + nb = new BigDecimal(-1); + byte[] ba = PDecimal.INSTANCE.toBytes(na); + byte[] bb = PDecimal.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + assertTrue(ba.length <= PDecimal.INSTANCE.estimateByteSize(na)); + assertTrue(bb.length <= PDecimal.INSTANCE.estimateByteSize(nb)); + + na = new BigDecimal(-1); + nb = new BigDecimal(-2); + ba = PDecimal.INSTANCE.toBytes(na); + bb = PDecimal.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + assertTrue(ba.length <= PDecimal.INSTANCE.estimateByteSize(na)); + assertTrue(bb.length <= PDecimal.INSTANCE.estimateByteSize(nb)); + + na = new BigDecimal(-3); + nb = new BigDecimal(-1000); + assertTrue(na.compareTo(nb) > 0); + ba = PDecimal.INSTANCE.toBytes(na); + bb = PDecimal.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + assertTrue(ba.length <= PDecimal.INSTANCE.estimateByteSize(na)); + assertTrue(bb.length <= PDecimal.INSTANCE.estimateByteSize(nb)); + + na = new BigDecimal(BigInteger.valueOf(12345678901239998L), 2); + nb = new BigDecimal(97); + assertTrue(na.compareTo(nb) > 0); + ba = PDecimal.INSTANCE.toBytes(na); + bb = PDecimal.INSTANCE.toBytes(nb); + assertTrue(Bytes.compareTo(ba, bb) > 0); + assertTrue(ba.length <= PDecimal.INSTANCE.estimateByteSize(na)); + assertTrue(bb.length <= PDecimal.INSTANCE.estimateByteSize(nb)); + + List values = Arrays.asList(new BigDecimal[] { new BigDecimal(-1000), + new BigDecimal(-100000000), new BigDecimal(1000), new BigDecimal("-0.001"), + new BigDecimal("0.001"), new BigDecimal(new BigInteger("12345678901239999"), 2), + new BigDecimal(new BigInteger("12345678901239998"), 2), + new BigDecimal(new BigInteger("12345678901239998123456789"), 2), // bigger than long + new BigDecimal(new BigInteger("-1000"), 3), new BigDecimal(new BigInteger("-1000"), 10), + new BigDecimal(99), new BigDecimal(97), new BigDecimal(-3) }); + + List byteValues = new ArrayList(); + for (int i = 0; i < values.size(); i++) { + byteValues.add(PDecimal.INSTANCE.toBytes(values.get(i))); } - @Test - public void testBigDecimal() { - byte[] b; - BigDecimal na, nb; - - b = new byte[] { - (byte)0xc2,0x02,0x10,0x36,0x22,0x22,0x22,0x22,0x22,0x22,0x0f,0x27,0x38,0x1c,0x05,0x40,0x62,0x21,0x54,0x4d,0x4e,0x01,0x14,0x36,0x0d,0x33 - }; - BigDecimal decodedBytes = (BigDecimal) PDecimal.INSTANCE.toObject(b); - assertTrue(decodedBytes.compareTo(BigDecimal.ZERO) > 0); - - na = new BigDecimal(new BigInteger("12345678901239998123456789"), 2); - //[-52, 13, 35, 57, 79, 91, 13, 40, 100, 82, 24, 46, 68, 90] - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - TestUtil.assertRoundEquals(na,nb); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - na = new BigDecimal("115.533333333333331438552704639732837677001953125"); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - TestUtil.assertRoundEquals(na,nb); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - // test for negative serialization using biginteger - na = new BigDecimal("-5.00000000000000000000000001"); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - TestUtil.assertRoundEquals(na,nb); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - // test for serialization of 38 digits - na = new BigDecimal("-2.4999999999999999999999999999999999999"); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - TestUtil.assertRoundEquals(na,nb); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - // test for serialization of 39 digits, should round to -2.5 - na = new BigDecimal("-2.499999999999999999999999999999999999999"); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - assertTrue(nb.compareTo(new BigDecimal("-2.5")) == 0); - assertEquals(new BigDecimal("-2.5"), nb); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - na = new BigDecimal(2.5); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - assertTrue(na.compareTo(nb) == 0); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - // If we don't remove trailing zeros, this fails - na = new BigDecimal(Double.parseDouble("96.45238095238095")); - String naStr = na.toString(); - assertTrue(naStr != null); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - TestUtil.assertRoundEquals(na,nb); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - // If we don't remove trailing zeros, this fails - na = new BigDecimal(-1000); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - assertTrue(na.compareTo(nb) == 0); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - na = new BigDecimal("1000.5829999999999913"); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - assertTrue(na.compareTo(nb) == 0); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - na = TestUtil.computeAverage(11000, 3); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - assertTrue(na.compareTo(nb) == 0); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - na = new BigDecimal(new BigInteger("12345678901239999"), 2); - b = PDecimal.INSTANCE.toBytes(na); - nb = (BigDecimal) PDecimal.INSTANCE.toObject(b); - assertTrue(na.compareTo(nb) == 0); - assertTrue(b.length <= PDecimal.INSTANCE.estimateByteSize(na)); - - na = new BigDecimal(1); - nb = new BigDecimal(-1); - byte[] ba = PDecimal.INSTANCE.toBytes(na); - byte[] bb = PDecimal.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - assertTrue(ba.length <= PDecimal.INSTANCE.estimateByteSize(na)); - assertTrue(bb.length <= PDecimal.INSTANCE.estimateByteSize(nb)); - - na = new BigDecimal(-1); - nb = new BigDecimal(-2); - ba = PDecimal.INSTANCE.toBytes(na); - bb = PDecimal.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - assertTrue(ba.length <= PDecimal.INSTANCE.estimateByteSize(na)); - assertTrue(bb.length <= PDecimal.INSTANCE.estimateByteSize(nb)); - - na = new BigDecimal(-3); - nb = new BigDecimal(-1000); - assertTrue(na.compareTo(nb) > 0); - ba = PDecimal.INSTANCE.toBytes(na); - bb = PDecimal.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - assertTrue(ba.length <= PDecimal.INSTANCE.estimateByteSize(na)); - assertTrue(bb.length <= PDecimal.INSTANCE.estimateByteSize(nb)); - - na = new BigDecimal(BigInteger.valueOf(12345678901239998L), 2); - nb = new BigDecimal(97); - assertTrue(na.compareTo(nb) > 0); - ba = PDecimal.INSTANCE.toBytes(na); - bb = PDecimal.INSTANCE.toBytes(nb); - assertTrue(Bytes.compareTo(ba, bb) > 0); - assertTrue(ba.length <= PDecimal.INSTANCE.estimateByteSize(na)); - assertTrue(bb.length <= PDecimal.INSTANCE.estimateByteSize(nb)); - - List values = Arrays.asList(new BigDecimal[] { - new BigDecimal(-1000), - new BigDecimal(-100000000), - new BigDecimal(1000), - new BigDecimal("-0.001"), - new BigDecimal("0.001"), - new BigDecimal(new BigInteger("12345678901239999"), 2), - new BigDecimal(new BigInteger("12345678901239998"), 2), - new BigDecimal(new BigInteger("12345678901239998123456789"), 2), // bigger than long - new BigDecimal(new BigInteger("-1000"),3), - new BigDecimal(new BigInteger("-1000"),10), - new BigDecimal(99), - new BigDecimal(97), - new BigDecimal(-3) - }); - - List byteValues = new ArrayList(); - for (int i = 0; i < values.size(); i++) { - byteValues.add(PDecimal.INSTANCE.toBytes(values.get(i))); - } - - for (int i = 0; i < values.size(); i++) { - BigDecimal expected = values.get(i); - BigDecimal actual = (BigDecimal) PDecimal.INSTANCE.toObject(byteValues.get(i)); - assertTrue("For " + i + " expected " + expected + " but got " + actual,expected.round( - PDataType.DEFAULT_MATH_CONTEXT).compareTo(actual.round( - PDataType.DEFAULT_MATH_CONTEXT)) == 0); - assertTrue(byteValues.get(i).length <= PDecimal.INSTANCE.estimateByteSize(expected)); - } - - Collections.sort(values); - Collections.sort(byteValues, Bytes.BYTES_COMPARATOR); - - for (int i = 0; i < values.size(); i++) { - BigDecimal expected = values.get(i); - byte[] bytes = PDecimal.INSTANCE.toBytes(values.get(i)); - assertNotNull("bytes converted from values should not be null!", bytes); - BigDecimal actual = (BigDecimal) PDecimal.INSTANCE.toObject(byteValues.get(i)); - assertTrue("For " + i + " expected " + expected + " but got " + actual,expected.round(PDataType.DEFAULT_MATH_CONTEXT).compareTo(actual.round(PDataType.DEFAULT_MATH_CONTEXT))==0); - } - - - { - String[] strs ={ - "\\xC2\\x03\\x0C\\x10\\x01\\x01\\x01\\x01\\x01\\x019U#\\x13W\\x09\\x09" - ,"\\xC2\\x03<,ddddddN\\x1B\\x1B!.9N" - ,"\\xC2\\x039" - ,"\\xC2\\x03\\x16,\\x01\\x01\\x01\\x01\\x01\\x01E\\x16\\x16\\x03@\\x1EG" - ,"\\xC2\\x02d6dddddd\\x15*]\\x0E<1F" - ,"\\xC2\\x04 3" - ,"\\xC2\\x03$Ldddddd\\x0A\\x06\\x06\\x1ES\\x1C\\x08" - ,"\\xC2\\x03\\x1E\\x0A\\x01\\x01\\x01\\x01\\x01\\x01#\\x0B=4 AV" - ,"\\xC2\\x02\\\\x04dddddd\\x15*]\\x0E<1F" - ,"\\xC2\\x02V\"\\x01\\x01\\x01\\x01\\x01\\x02\\x1A\\x068\\x162&O" - }; - for (String str : strs) { - byte[] bytes = Bytes.toBytesBinary(str); - Object o = PDecimal.INSTANCE.toObject(bytes); - assertNotNull(o); - } - } - } - public static String bytesToHex(byte[] bytes) { - final char[] hexArray = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'}; - char[] hexChars = new char[bytes.length * 2]; - int v; - for ( int j = 0; j < bytes.length; j++ ) { - v = bytes[j] & 0xFF; - hexChars[j * 2] = hexArray[v >>> 4]; - hexChars[j * 2 + 1] = hexArray[v & 0x0F]; - } - return new String(hexChars); + for (int i = 0; i < values.size(); i++) { + BigDecimal expected = values.get(i); + BigDecimal actual = (BigDecimal) PDecimal.INSTANCE.toObject(byteValues.get(i)); + assertTrue("For " + i + " expected " + expected + " but got " + actual, + expected.round(PDataType.DEFAULT_MATH_CONTEXT) + .compareTo(actual.round(PDataType.DEFAULT_MATH_CONTEXT)) == 0); + assertTrue(byteValues.get(i).length <= PDecimal.INSTANCE.estimateByteSize(expected)); } - @Test - public void testEmptyString() throws Throwable { - byte[] b1 = PVarchar.INSTANCE.toBytes(""); - byte[] b2 = PVarchar.INSTANCE.toBytes(null); - assert (b1.length == 0 && Bytes.compareTo(b1, b2) == 0); + Collections.sort(values); + Collections.sort(byteValues, Bytes.BYTES_COMPARATOR); + + for (int i = 0; i < values.size(); i++) { + BigDecimal expected = values.get(i); + byte[] bytes = PDecimal.INSTANCE.toBytes(values.get(i)); + assertNotNull("bytes converted from values should not be null!", bytes); + BigDecimal actual = (BigDecimal) PDecimal.INSTANCE.toObject(byteValues.get(i)); + assertTrue("For " + i + " expected " + expected + " but got " + actual, + expected.round(PDataType.DEFAULT_MATH_CONTEXT) + .compareTo(actual.round(PDataType.DEFAULT_MATH_CONTEXT)) == 0); } - @Test - public void testNull() throws Throwable { - byte[] b = new byte[8]; - for (PDataType type : PDataType.values()) { - try { - type.toBytes(null); - type.toBytes(null, b, 0); - type.toObject(new byte[0], 0, 0); - type.toObject(new byte[0], 0, 0, type); - if (type.isArrayType()) { - type.toBytes(new PhoenixArray()); - type.toBytes(new PhoenixArray(), b, 0); - } - } catch (ConstraintViolationException e) { - if (!type.isArrayType() && ! ( type.isFixedWidth() && e.getMessage().contains("may not be null"))) { - // Fixed width types do not support the concept of a "null" value. - fail(type + ":" + e); - } - } - } + { + String[] strs = { "\\xC2\\x03\\x0C\\x10\\x01\\x01\\x01\\x01\\x01\\x019U#\\x13W\\x09\\x09", + "\\xC2\\x03<,ddddddN\\x1B\\x1B!.9N", "\\xC2\\x039", + "\\xC2\\x03\\x16,\\x01\\x01\\x01\\x01\\x01\\x01E\\x16\\x16\\x03@\\x1EG", + "\\xC2\\x02d6dddddd\\x15*]\\x0E<1F", "\\xC2\\x04 3", + "\\xC2\\x03$Ldddddd\\x0A\\x06\\x06\\x1ES\\x1C\\x08", + "\\xC2\\x03\\x1E\\x0A\\x01\\x01\\x01\\x01\\x01\\x01#\\x0B=4 AV", + "\\xC2\\x02\\\\x04dddddd\\x15*]\\x0E<1F", + "\\xC2\\x02V\"\\x01\\x01\\x01\\x01\\x01\\x02\\x1A\\x068\\x162&O" }; + for (String str : strs) { + byte[] bytes = Bytes.toBytesBinary(str); + Object o = PDecimal.INSTANCE.toObject(bytes); + assertNotNull(o); + } } - - @Test - public void testValueCoersion() throws Exception { - // Testing coercing integer to other values. - assertFalse(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, 0.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, Double.valueOf(Float.MAX_VALUE) + Double.valueOf(Float.MAX_VALUE))); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, Double.valueOf(Long.MAX_VALUE) + Double.valueOf(Long.MAX_VALUE))); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -100000.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -1000.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -100000.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -1000.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 0.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10.0)); - assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, -10.0)); - assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, Double.MAX_VALUE)); - - assertTrue(PFloat.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, -10.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, Float.valueOf(Long.MAX_VALUE) + Float.valueOf(Long.MAX_VALUE))); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, -10.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, -10.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -10.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -100000.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -10.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -1000.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -10.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -100000.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -10.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -1000.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10.0f)); - assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0.0f)); - assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, -10.0f)); - - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, 10.0)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, 0.0)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, Double.MAX_VALUE)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10.0)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0.0)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, Double.MAX_VALUE)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10.0)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0.0)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10.0)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0.0)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10.0)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0.0)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10.0)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0.0)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10.0)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0.0)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10.0)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0.0)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10.0)); - assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0.0)); - assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, Double.MAX_VALUE)); - - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10.0f)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0.0f)); - assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, Float.MAX_VALUE)); - assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10.0f)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0.0f)); - assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10.0f)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0.0f)); - assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10.0f)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0.0f)); - assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10.0f)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0.0f)); - assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10.0f)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0.0f)); - assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10.0f)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0.0f)); - assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); - - // Testing coercing integer to other values. - assertTrue(PInteger.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PLong.INSTANCE, -10)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, -10)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, -10)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -10)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -100000)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -10)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -1000)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -10)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -100000)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -10)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -1000)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, -10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0)); - assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, -10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 10)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 0)); - assertTrue(PInteger.INSTANCE.isCoercibleTo(PVarbinary.INSTANCE, 0)); - - // Testing coercing long to other values. - assertTrue(PLong.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Long.MAX_VALUE)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Integer.MAX_VALUE + 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (long)Integer.MAX_VALUE)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Integer.MAX_VALUE - 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 0L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, -10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Integer.MIN_VALUE + 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (long)Integer.MIN_VALUE)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Integer.MIN_VALUE - 10L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Long.MIN_VALUE)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, -10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, Long.MAX_VALUE)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, -10L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, Long.MIN_VALUE)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -10L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -100000L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -10L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -1000L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -10L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -100000L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -10L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -1000L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 0L)); - assertFalse(PLong.INSTANCE - .isCoercibleTo(PUnsignedDouble.INSTANCE, -1L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10L)); - assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0L)); - assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, -1L)); - - // Testing coercing smallint to other values. - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short)10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short)0)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short)-10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short)10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short)0)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short)-10)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short)10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short)0)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short)-10)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short)10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short)0)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short)-10)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short)10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short)0)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short)-10)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short)1000)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (short)10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (short)0)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (short)-10)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short)10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short)0)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short)-10)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short)1000)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (short)10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (short)0)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (short)-1)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (short)10)); - assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (short)0)); - assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (short)-1)); - - // Testing coercing tinyint to other values. - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte)10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte)0)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte)-10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte)10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte)0)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte)-10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte)100)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte)0)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte)-10)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte)10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte)0)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte)-10)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte)10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte)0)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte)-10)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte)10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte)0)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte)-10)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (byte)10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (byte)0)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (byte)-10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (byte)10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (byte)0)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (byte)-1)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (byte)10)); - assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (byte)0)); - assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (byte)-1)); - - // Testing coercing unsigned_int to other values. - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 10)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 0)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0)); - assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0)); - assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0)); - assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0)); - assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 100000)); - assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0)); - assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 1000)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); - assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); - - // Testing coercing unsigned_long to other values. - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 10L)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 0L)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10L)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0L)); - assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10L)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0L)); - assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10L)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0L)); - assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 100000L)); - assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10L)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0L)); - assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 1000L)); - assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); - - // Testing coercing unsigned_smallint to other values. - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short)10)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short)0)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short)10)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short)0)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short)10)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short)0)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short)10)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short)0)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (short)10)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (short)0)); - assertFalse(PUnsignedSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short)10)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short)0)); - assertFalse(PUnsignedSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short)1000)); - assertFalse(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short)10)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short)0)); - assertFalse(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short)1000)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); - assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); - - // Testing coercing unsigned_tinyint to other values. - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte)10)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte)0)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte)10)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte)0)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte)10)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte)0)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte)10)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte)0)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte)10)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte)0)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (byte)10)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (byte)0)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte)10)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte)0)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); - assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); - - // Testing coercing Date types - assertTrue(PDate.INSTANCE.isCoercibleTo(PTimestamp.INSTANCE)); - assertTrue(PDate.INSTANCE.isCoercibleTo(PTime.INSTANCE)); - assertFalse(PTimestamp.INSTANCE.isCoercibleTo(PDate.INSTANCE)); - assertFalse(PTimestamp.INSTANCE.isCoercibleTo(PTime.INSTANCE)); - assertTrue(PTime.INSTANCE.isCoercibleTo(PTimestamp.INSTANCE)); - assertTrue(PTime.INSTANCE.isCoercibleTo(PDate.INSTANCE)); + } + + public static String bytesToHex(byte[] bytes) { + final char[] hexArray = + { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + char[] hexChars = new char[bytes.length * 2]; + int v; + for (int j = 0; j < bytes.length; j++) { + v = bytes[j] & 0xFF; + hexChars[j * 2] = hexArray[v >>> 4]; + hexChars[j * 2 + 1] = hexArray[v & 0x0F]; } - - @Test - public void testGetDeicmalPrecisionAndScaleFromRawBytes() throws Exception { - // Special case for 0. - BigDecimal bd = new BigDecimal("0"); - byte[] b = PDecimal.INSTANCE.toBytes(bd); - int[] v = PDataType.getDecimalPrecisionAndScale(b, 0, b.length, SortOrder.getDefault()); - assertEquals(0, v[0]); - assertEquals(0, v[1]); - - // Special case for 0 descending - bd = new BigDecimal("0"); - b = PDecimal.INSTANCE.toBytes(bd, SortOrder.DESC); - v = PDataType.getDecimalPrecisionAndScale(b, 0, b.length, SortOrder.DESC); - assertEquals(0, v[0]); - assertEquals(0, v[1]); - - BigDecimal[] bds = new BigDecimal[] { - new BigDecimal("1"), - new BigDecimal("0.11"), - new BigDecimal("1.1"), - new BigDecimal("11"), - new BigDecimal("101"), - new BigDecimal("10.1"), - new BigDecimal("1.01"), - new BigDecimal("0.101"), - new BigDecimal("1001"), - new BigDecimal("100.1"), - new BigDecimal("10.01"), - new BigDecimal("1.001"), - new BigDecimal("0.1001"), - new BigDecimal("10001"), - new BigDecimal("1000.1"), - new BigDecimal("100.01"), - new BigDecimal("10.001"), - new BigDecimal("1.0001"), - new BigDecimal("0.10001"), - new BigDecimal("100000000000000000000000000000"), - new BigDecimal("1000000000000000000000000000000"), - new BigDecimal("0.000000000000000000000000000001"), - new BigDecimal("0.0000000000000000000000000000001"), - new BigDecimal("111111111111111111111111111111"), - new BigDecimal("1111111111111111111111111111111"), - new BigDecimal("0.111111111111111111111111111111"), - new BigDecimal("0.1111111111111111111111111111111"), - }; - - for (int i=0; i 0); - - byte[] b1 = PDate.INSTANCE.toBytes(date1); - byte[] b2 = PDate.INSTANCE.toBytes(date2); - assertTrue(Bytes.compareTo(b1, b2) > 0); - - } - - @Test - public void testIllegalUnsignedDateTime() { - Date date1 = new Date(-1000); - try { - PUnsignedDate.INSTANCE.toBytes(date1); - fail(); - } catch (RuntimeException e) { - assertTrue(e.getCause() instanceof SQLException); - SQLException sqlE = (SQLException)e.getCause(); - assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), sqlE.getErrorCode()); + } catch (ConstraintViolationException e) { + if ( + !type.isArrayType() + && !(type.isFixedWidth() && e.getMessage().contains("may not be null")) + ) { + // Fixed width types do not support the concept of a "null" value. + fail(type + ":" + e); } + } } - - @Test - public void testGetResultSetSqlType() { - assertEquals(Types.INTEGER, PInteger.INSTANCE.getResultSetSqlType()); - assertEquals(Types.INTEGER, PUnsignedInt.INSTANCE.getResultSetSqlType()); - assertEquals(Types.BIGINT, PLong.INSTANCE.getResultSetSqlType()); - assertEquals(Types.BIGINT, PUnsignedLong.INSTANCE.getResultSetSqlType()); - assertEquals(Types.SMALLINT, PSmallint.INSTANCE.getResultSetSqlType()); - assertEquals(Types.SMALLINT, PUnsignedSmallint.INSTANCE.getResultSetSqlType()); - assertEquals(Types.TINYINT, PTinyint.INSTANCE.getResultSetSqlType()); - assertEquals(Types.TINYINT, PUnsignedTinyint.INSTANCE.getResultSetSqlType()); - assertEquals(Types.FLOAT, PFloat.INSTANCE.getResultSetSqlType()); - assertEquals(Types.FLOAT, PUnsignedFloat.INSTANCE.getResultSetSqlType()); - assertEquals(Types.DOUBLE, PDouble.INSTANCE.getResultSetSqlType()); - assertEquals(Types.DOUBLE, PUnsignedDouble.INSTANCE.getResultSetSqlType()); - assertEquals(Types.DATE, PDate.INSTANCE.getResultSetSqlType()); - assertEquals(Types.DATE, PUnsignedDate.INSTANCE.getResultSetSqlType()); - assertEquals(Types.TIME, PTime.INSTANCE.getResultSetSqlType()); - assertEquals(Types.TIME, PUnsignedTime.INSTANCE.getResultSetSqlType()); - assertEquals(Types.TIMESTAMP, PTimestamp.INSTANCE.getResultSetSqlType()); - assertEquals(Types.TIMESTAMP, PUnsignedTimestamp.INSTANCE.getResultSetSqlType()); - - // Check that all array types are defined as java.sql.Types.ARRAY - for (PDataType dataType : PDataType.values()) { - if (dataType.isArrayType()) { - assertEquals("Wrong datatype for " + dataType, - Types.ARRAY, - dataType.getResultSetSqlType()); - } - } + } + + @Test + public void testValueCoersion() throws Exception { + // Testing coercing integer to other values. + assertFalse(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, 0.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, + Double.valueOf(Float.MAX_VALUE) + Double.valueOf(Float.MAX_VALUE))); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, + Double.valueOf(Long.MAX_VALUE) + Double.valueOf(Long.MAX_VALUE))); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -100000.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -1000.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -100000.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -1000.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 0.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10.0)); + assertTrue(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, -10.0)); + assertFalse(PDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, Double.MAX_VALUE)); + + assertTrue(PFloat.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, -10.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, + Float.valueOf(Long.MAX_VALUE) + Float.valueOf(Long.MAX_VALUE))); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, -10.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, -10.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -10.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -100000.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -10.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -1000.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -10.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -100000.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -10.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -1000.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10.0f)); + assertTrue(PFloat.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0.0f)); + assertFalse(PFloat.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, -10.0f)); + + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, 10.0)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, 0.0)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PFloat.INSTANCE, Double.MAX_VALUE)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10.0)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0.0)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PLong.INSTANCE, Double.MAX_VALUE)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10.0)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0.0)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10.0)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0.0)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10.0)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0.0)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10.0)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0.0)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10.0)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0.0)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10.0)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0.0)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10.0)); + assertTrue(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0.0)); + assertFalse(PUnsignedDouble.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, Double.MAX_VALUE)); + + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10.0f)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0.0f)); + assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PLong.INSTANCE, Float.MAX_VALUE)); + assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10.0f)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0.0f)); + assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10.0f)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0.0f)); + assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10.0f)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0.0f)); + assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10.0f)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0.0f)); + assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10.0f)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0.0f)); + assertFalse(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10.0f)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0.0f)); + assertTrue(PUnsignedFloat.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); + + // Testing coercing integer to other values. + assertTrue(PInteger.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PLong.INSTANCE, -10)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, -10)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, -10)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -10)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -100000)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -10)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -1000)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -10)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -100000)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -10)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -1000)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, -10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0)); + assertFalse(PInteger.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, -10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 10)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 0)); + assertTrue(PInteger.INSTANCE.isCoercibleTo(PVarbinary.INSTANCE, 0)); + + // Testing coercing long to other values. + assertTrue(PLong.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Long.MAX_VALUE)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Integer.MAX_VALUE + 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (long) Integer.MAX_VALUE)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Integer.MAX_VALUE - 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 0L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, -10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Integer.MIN_VALUE + 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (long) Integer.MIN_VALUE)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Integer.MIN_VALUE - 10L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, Long.MIN_VALUE)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, 0L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, -10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, Long.MAX_VALUE)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, -10L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, Long.MIN_VALUE)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -10L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, -100000L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -10L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, -1000L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -10L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, -100000L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -10L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, -1000L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, 0L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, -1L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 10L)); + assertTrue(PLong.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, 0L)); + assertFalse(PLong.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, -1L)); + + // Testing coercing smallint to other values. + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short) 10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short) 0)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short) -10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short) 10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short) 0)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short) -10)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short) 10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short) 0)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short) -10)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short) 10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short) 0)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short) -10)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short) 10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short) 0)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short) -10)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short) 1000)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (short) 10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (short) 0)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (short) -10)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short) 10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short) 0)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short) -10)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short) 1000)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (short) 10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (short) 0)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (short) -1)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (short) 10)); + assertTrue(PSmallint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (short) 0)); + assertFalse(PSmallint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (short) -1)); + + // Testing coercing tinyint to other values. + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte) 10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte) 0)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte) -10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte) 10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte) 0)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte) -10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte) 100)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte) 0)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte) -10)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte) 10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte) 0)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte) -10)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte) 10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte) 0)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte) -10)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte) 10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte) 0)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte) -10)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (byte) 10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (byte) 0)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (byte) -10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (byte) 10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (byte) 0)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE, (byte) -1)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (byte) 10)); + assertTrue(PTinyint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (byte) 0)); + assertFalse(PTinyint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE, (byte) -1)); + + // Testing coercing unsigned_int to other values. + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 10)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 0)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PLong.INSTANCE, 10)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PLong.INSTANCE, 0)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 10)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, 0)); + assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0)); + assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0)); + assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0)); + assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 100000)); + assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0)); + assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 1000)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); + assertTrue(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); + + // Testing coercing unsigned_long to other values. + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 10L)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PInteger.INSTANCE, 0L)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 10L)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, 0L)); + assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 10L)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, 0L)); + assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 10L)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 0L)); + assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, 100000L)); + assertFalse(PUnsignedInt.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 10L)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 0L)); + assertFalse(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, 1000L)); + assertTrue(PUnsignedLong.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); + + // Testing coercing unsigned_smallint to other values. + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short) 10)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (short) 0)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short) 10)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (short) 0)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short) 10)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (short) 0)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short) 10)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (short) 0)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (short) 10)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (short) 0)); + assertFalse(PUnsignedSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short) 10)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short) 0)); + assertFalse(PUnsignedSmallint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (short) 1000)); + assertFalse(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short) 10)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short) 0)); + assertFalse(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedTinyint.INSTANCE, (short) 1000)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); + assertTrue(PUnsignedSmallint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); + + // Testing coercing unsigned_tinyint to other values. + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PDouble.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PFloat.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte) 10)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PInteger.INSTANCE, (byte) 0)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte) 10)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PLong.INSTANCE, (byte) 0)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte) 10)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedLong.INSTANCE, (byte) 0)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte) 10)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedInt.INSTANCE, (byte) 0)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte) 10)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PSmallint.INSTANCE, (byte) 0)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (byte) 10)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PTinyint.INSTANCE, (byte) 0)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte) 10)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedSmallint.INSTANCE, (byte) 0)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedDouble.INSTANCE)); + assertTrue(PUnsignedTinyint.INSTANCE.isCoercibleTo(PUnsignedFloat.INSTANCE)); + + // Testing coercing Date types + assertTrue(PDate.INSTANCE.isCoercibleTo(PTimestamp.INSTANCE)); + assertTrue(PDate.INSTANCE.isCoercibleTo(PTime.INSTANCE)); + assertFalse(PTimestamp.INSTANCE.isCoercibleTo(PDate.INSTANCE)); + assertFalse(PTimestamp.INSTANCE.isCoercibleTo(PTime.INSTANCE)); + assertTrue(PTime.INSTANCE.isCoercibleTo(PTimestamp.INSTANCE)); + assertTrue(PTime.INSTANCE.isCoercibleTo(PDate.INSTANCE)); + } + + @Test + public void testGetDeicmalPrecisionAndScaleFromRawBytes() throws Exception { + // Special case for 0. + BigDecimal bd = new BigDecimal("0"); + byte[] b = PDecimal.INSTANCE.toBytes(bd); + int[] v = PDataType.getDecimalPrecisionAndScale(b, 0, b.length, SortOrder.getDefault()); + assertEquals(0, v[0]); + assertEquals(0, v[1]); + + // Special case for 0 descending + bd = new BigDecimal("0"); + b = PDecimal.INSTANCE.toBytes(bd, SortOrder.DESC); + v = PDataType.getDecimalPrecisionAndScale(b, 0, b.length, SortOrder.DESC); + assertEquals(0, v[0]); + assertEquals(0, v[1]); + + BigDecimal[] bds = new BigDecimal[] { new BigDecimal("1"), new BigDecimal("0.11"), + new BigDecimal("1.1"), new BigDecimal("11"), new BigDecimal("101"), new BigDecimal("10.1"), + new BigDecimal("1.01"), new BigDecimal("0.101"), new BigDecimal("1001"), + new BigDecimal("100.1"), new BigDecimal("10.01"), new BigDecimal("1.001"), + new BigDecimal("0.1001"), new BigDecimal("10001"), new BigDecimal("1000.1"), + new BigDecimal("100.01"), new BigDecimal("10.001"), new BigDecimal("1.0001"), + new BigDecimal("0.10001"), new BigDecimal("100000000000000000000000000000"), + new BigDecimal("1000000000000000000000000000000"), + new BigDecimal("0.000000000000000000000000000001"), + new BigDecimal("0.0000000000000000000000000000001"), + new BigDecimal("111111111111111111111111111111"), + new BigDecimal("1111111111111111111111111111111"), + new BigDecimal("0.111111111111111111111111111111"), + new BigDecimal("0.1111111111111111111111111111111"), }; + + for (int i = 0; i < bds.length; i++) { + testReadDecimalPrecisionAndScaleFromRawBytes(bds[i], SortOrder.ASC); + testReadDecimalPrecisionAndScaleFromRawBytes(bds[i], SortOrder.DESC); + testReadDecimalPrecisionAndScaleFromRawBytes(bds[i].negate(), SortOrder.getDefault()); } - private void testReadDecimalPrecisionAndScaleFromRawBytes(BigDecimal bd, SortOrder sortOrder) { - byte[] b = PDecimal.INSTANCE.toBytes(bd, sortOrder); - int[] v = PDataType.getDecimalPrecisionAndScale(b, 0, b.length, sortOrder); - assertEquals(bd.toString(), bd.precision(), v[0]); - assertEquals(bd.toString(), bd.scale(), v[1]); + assertTrue(new BigDecimal("5").remainder(BigDecimal.ONE).equals(BigDecimal.ZERO)); + assertTrue(new BigDecimal("5.0").remainder(BigDecimal.ONE).compareTo(BigDecimal.ZERO) == 0); + assertTrue(new BigDecimal("5.00").remainder(BigDecimal.ONE).compareTo(BigDecimal.ZERO) == 0); + assertFalse(new BigDecimal("5.01").remainder(BigDecimal.ONE).equals(BigDecimal.ZERO)); + assertFalse(new BigDecimal("-5.1").remainder(BigDecimal.ONE).equals(BigDecimal.ZERO)); + } + + @Test + public void testDateConversions() { + long now = System.currentTimeMillis(); + Date date = new Date(now); + Time t = new Time(now); + Timestamp ts = new Timestamp(now); + + Object o = PDate.INSTANCE.toObject(ts, PTimestamp.INSTANCE); + assertEquals(o.getClass(), java.sql.Date.class); + o = PDate.INSTANCE.toObject(t, PTime.INSTANCE); + assertEquals(o.getClass(), java.sql.Date.class); + + o = PTime.INSTANCE.toObject(date, PDate.INSTANCE); + assertEquals(o.getClass(), java.sql.Time.class); + o = PTime.INSTANCE.toObject(ts, PTimestamp.INSTANCE); + assertEquals(o.getClass(), java.sql.Time.class); + + o = PTimestamp.INSTANCE.toObject(date, PDate.INSTANCE); + assertEquals(o.getClass(), java.sql.Timestamp.class); + o = PTimestamp.INSTANCE.toObject(t, PTime.INSTANCE); + assertEquals(o.getClass(), java.sql.Timestamp.class); + } + + @Test + public void testNegativeDateTime() { + Date date1 = new Date(-1000); + Date date2 = new Date(-2000); + assertTrue(date1.compareTo(date2) > 0); + + byte[] b1 = PDate.INSTANCE.toBytes(date1); + byte[] b2 = PDate.INSTANCE.toBytes(date2); + assertTrue(Bytes.compareTo(b1, b2) > 0); + + } + + @Test + public void testIllegalUnsignedDateTime() { + Date date1 = new Date(-1000); + try { + PUnsignedDate.INSTANCE.toBytes(date1); + fail(); + } catch (RuntimeException e) { + assertTrue(e.getCause() instanceof SQLException); + SQLException sqlE = (SQLException) e.getCause(); + assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), sqlE.getErrorCode()); } - - @Test - public void testArithmeticOnLong() { - long startWith = -5; - long incrementBy = 1; - for (int i = 0; i < 10; i++) { - long next = nextValueFor(startWith, incrementBy); - assertEquals(startWith + incrementBy, next); - startWith = next; - } - startWith = 5; - incrementBy = -1; - for (int i = 0; i < 10; i++) { - long next = nextValueFor(startWith, incrementBy); - assertEquals(startWith + incrementBy, next); - startWith = next; - } - startWith = 0; - incrementBy = 100; - for (int i = 0; i < 10; i++) { - long next = nextValueFor(startWith, incrementBy); - assertEquals(startWith + incrementBy, next); - startWith = next; - } + } + + @Test + public void testGetResultSetSqlType() { + assertEquals(Types.INTEGER, PInteger.INSTANCE.getResultSetSqlType()); + assertEquals(Types.INTEGER, PUnsignedInt.INSTANCE.getResultSetSqlType()); + assertEquals(Types.BIGINT, PLong.INSTANCE.getResultSetSqlType()); + assertEquals(Types.BIGINT, PUnsignedLong.INSTANCE.getResultSetSqlType()); + assertEquals(Types.SMALLINT, PSmallint.INSTANCE.getResultSetSqlType()); + assertEquals(Types.SMALLINT, PUnsignedSmallint.INSTANCE.getResultSetSqlType()); + assertEquals(Types.TINYINT, PTinyint.INSTANCE.getResultSetSqlType()); + assertEquals(Types.TINYINT, PUnsignedTinyint.INSTANCE.getResultSetSqlType()); + assertEquals(Types.FLOAT, PFloat.INSTANCE.getResultSetSqlType()); + assertEquals(Types.FLOAT, PUnsignedFloat.INSTANCE.getResultSetSqlType()); + assertEquals(Types.DOUBLE, PDouble.INSTANCE.getResultSetSqlType()); + assertEquals(Types.DOUBLE, PUnsignedDouble.INSTANCE.getResultSetSqlType()); + assertEquals(Types.DATE, PDate.INSTANCE.getResultSetSqlType()); + assertEquals(Types.DATE, PUnsignedDate.INSTANCE.getResultSetSqlType()); + assertEquals(Types.TIME, PTime.INSTANCE.getResultSetSqlType()); + assertEquals(Types.TIME, PUnsignedTime.INSTANCE.getResultSetSqlType()); + assertEquals(Types.TIMESTAMP, PTimestamp.INSTANCE.getResultSetSqlType()); + assertEquals(Types.TIMESTAMP, PUnsignedTimestamp.INSTANCE.getResultSetSqlType()); + + // Check that all array types are defined as java.sql.Types.ARRAY + for (PDataType dataType : PDataType.values()) { + if (dataType.isArrayType()) { + assertEquals("Wrong datatype for " + dataType, Types.ARRAY, dataType.getResultSetSqlType()); + } } - - @Test - public void testGetSampleValue() { - PDataType[] types = PDataType.values(); - // Test validity of 10 sample values for each type - for (int i = 0; i < 10; i++) { - for (PDataType type : types) { - Integer maxLength = - (type == PChar.INSTANCE - || type == PBinary.INSTANCE - || type == PCharArray.INSTANCE - || type == PBinaryArray.INSTANCE) ? 10 : null; - int arrayLength = 10; - Object sampleValue = type.getSampleValue(maxLength, arrayLength); - byte[] b = type.toBytes(sampleValue); - type.toObject(b, 0, b.length, type, SortOrder.getDefault(), maxLength, null); - } - } + } + + private void testReadDecimalPrecisionAndScaleFromRawBytes(BigDecimal bd, SortOrder sortOrder) { + byte[] b = PDecimal.INSTANCE.toBytes(bd, sortOrder); + int[] v = PDataType.getDecimalPrecisionAndScale(b, 0, b.length, sortOrder); + assertEquals(bd.toString(), bd.precision(), v[0]); + assertEquals(bd.toString(), bd.scale(), v[1]); + } + + @Test + public void testArithmeticOnLong() { + long startWith = -5; + long incrementBy = 1; + for (int i = 0; i < 10; i++) { + long next = nextValueFor(startWith, incrementBy); + assertEquals(startWith + incrementBy, next); + startWith = next; } - - // Simulate what an HBase Increment does with the value encoded as a long - private long nextValueFor(long startWith, long incrementBy) { - long hstartWith = Bytes.toLong(PLong.INSTANCE.toBytes(startWith)); - hstartWith += incrementBy; - return (Long) PLong.INSTANCE.toObject(Bytes.toBytes(hstartWith)); + startWith = 5; + incrementBy = -1; + for (int i = 0; i < 10; i++) { + long next = nextValueFor(startWith, incrementBy); + assertEquals(startWith + incrementBy, next); + startWith = next; } - - - @Test - public void testCoercibleGoldfile() { - TreeMultimap coercibleToMap = TreeMultimap.create(); - PDataType[] orderedTypes = PDataTypeFactory.getInstance().getOrderedTypes(); - for (PDataType fromType : orderedTypes) { - for (PDataType targetType : orderedTypes) { - if (fromType.isCoercibleTo(targetType)) { - coercibleToMap.put(fromType.toString(), targetType.toString()); - } - } - } - assertEquals( - "{BIGINT=[BIGINT, BINARY, DECIMAL, DOUBLE, VARBINARY], " - + "BIGINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, VARBINARY ARRAY], " - + "BINARY=[BINARY, VARBINARY], " - + "BINARY ARRAY=[BINARY ARRAY, VARBINARY ARRAY], " - + "BOOLEAN=[BINARY, BOOLEAN, VARBINARY, VARBINARY_ENCODED], " - + "BOOLEAN ARRAY=[BINARY ARRAY, BOOLEAN ARRAY, VARBINARY ARRAY], " - + "BSON=[BINARY, BSON, VARBINARY], " - + "CHAR=[BINARY, CHAR, VARBINARY, VARCHAR], " - + "CHAR ARRAY=[BINARY ARRAY, CHAR ARRAY, VARBINARY ARRAY, VARCHAR ARRAY], " - + "DATE=[BINARY, DATE, TIME, TIMESTAMP, VARBINARY], " - + "DATE ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, VARBINARY ARRAY], " - + "DECIMAL=[DECIMAL, VARBINARY, VARBINARY_ENCODED], " - + "DECIMAL ARRAY=[DECIMAL ARRAY, VARBINARY ARRAY], " - + "DOUBLE=[BINARY, DECIMAL, DOUBLE, VARBINARY], " - + "DOUBLE ARRAY=[BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, VARBINARY ARRAY], " - + "FLOAT=[BINARY, DECIMAL, DOUBLE, FLOAT, VARBINARY], " - + "FLOAT ARRAY=[BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, VARBINARY ARRAY], " - + "INTEGER=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, VARBINARY], " - + "INTEGER ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, VARBINARY ARRAY], " - + "JSON=[BINARY, JSON, VARBINARY], " - + "SMALLINT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, SMALLINT, VARBINARY], " - + "SMALLINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, SMALLINT ARRAY, VARBINARY ARRAY], " - + "TIME=[BINARY, DATE, TIME, TIMESTAMP, VARBINARY], " - + "TIME ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, VARBINARY ARRAY], " - + "TIMESTAMP=[BINARY, TIMESTAMP, VARBINARY], " - + "TIMESTAMP ARRAY=[BINARY ARRAY, TIMESTAMP ARRAY, VARBINARY ARRAY], " - + "TINYINT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, SMALLINT, TINYINT, VARBINARY], " - + "TINYINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, SMALLINT ARRAY, TINYINT ARRAY, VARBINARY ARRAY], " - + "UNSIGNED_DATE=[BINARY, DATE, TIME, TIMESTAMP, UNSIGNED_DATE, UNSIGNED_TIME, UNSIGNED_TIMESTAMP, VARBINARY], " - + "UNSIGNED_DATE ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, UNSIGNED_DATE ARRAY, UNSIGNED_TIME ARRAY, UNSIGNED_TIMESTAMP ARRAY, VARBINARY ARRAY], " - + "UNSIGNED_DOUBLE=[BINARY, DECIMAL, DOUBLE, UNSIGNED_DOUBLE, VARBINARY], " - + "UNSIGNED_DOUBLE ARRAY=[BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, UNSIGNED_DOUBLE ARRAY, VARBINARY ARRAY], " - + "UNSIGNED_FLOAT=[BINARY, DECIMAL, DOUBLE, FLOAT, UNSIGNED_DOUBLE, UNSIGNED_FLOAT, VARBINARY], " - + "UNSIGNED_FLOAT ARRAY=[BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_FLOAT ARRAY, VARBINARY ARRAY], " - + "UNSIGNED_INT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, UNSIGNED_DOUBLE, UNSIGNED_FLOAT, UNSIGNED_INT, UNSIGNED_LONG, VARBINARY], " - + "UNSIGNED_INT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_FLOAT ARRAY, UNSIGNED_INT ARRAY, UNSIGNED_LONG ARRAY, VARBINARY ARRAY], " - + "UNSIGNED_LONG=[BIGINT, BINARY, DECIMAL, DOUBLE, UNSIGNED_DOUBLE, UNSIGNED_LONG, VARBINARY], " - + "UNSIGNED_LONG ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_LONG ARRAY, VARBINARY ARRAY], " - + "UNSIGNED_SMALLINT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, SMALLINT, UNSIGNED_DOUBLE, UNSIGNED_FLOAT, UNSIGNED_INT, UNSIGNED_LONG, UNSIGNED_SMALLINT, VARBINARY], " - + "UNSIGNED_SMALLINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, SMALLINT ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_FLOAT ARRAY, UNSIGNED_INT ARRAY, UNSIGNED_LONG ARRAY, UNSIGNED_SMALLINT ARRAY, VARBINARY ARRAY], " - + "UNSIGNED_TIME=[BINARY, DATE, TIME, TIMESTAMP, UNSIGNED_DATE, UNSIGNED_TIME, UNSIGNED_TIMESTAMP, VARBINARY], " - + "UNSIGNED_TIME ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, UNSIGNED_DATE ARRAY, UNSIGNED_TIME ARRAY, UNSIGNED_TIMESTAMP ARRAY, VARBINARY ARRAY], " - + "UNSIGNED_TIMESTAMP=[BINARY, DATE, TIME, TIMESTAMP, UNSIGNED_DATE, UNSIGNED_TIME, UNSIGNED_TIMESTAMP, VARBINARY], " - + "UNSIGNED_TIMESTAMP ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, UNSIGNED_DATE ARRAY, UNSIGNED_TIME ARRAY, UNSIGNED_TIMESTAMP ARRAY, VARBINARY ARRAY], " - + "UNSIGNED_TINYINT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, SMALLINT, TINYINT, UNSIGNED_DOUBLE, UNSIGNED_FLOAT, UNSIGNED_INT, UNSIGNED_LONG, UNSIGNED_SMALLINT, UNSIGNED_TINYINT, VARBINARY], " - + "UNSIGNED_TINYINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, SMALLINT ARRAY, TINYINT ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_FLOAT ARRAY, UNSIGNED_INT ARRAY, UNSIGNED_LONG ARRAY, UNSIGNED_SMALLINT ARRAY, UNSIGNED_TINYINT ARRAY, VARBINARY ARRAY], " - + "VARBINARY=[BINARY, VARBINARY, VARBINARY_ENCODED], " - + "VARBINARY ARRAY=[BINARY ARRAY, VARBINARY ARRAY], " - + "VARBINARY_ENCODED=[BINARY, VARBINARY, VARBINARY_ENCODED], " - + "VARCHAR=[BINARY, CHAR, VARBINARY, VARBINARY_ENCODED, VARCHAR], " - + "VARCHAR ARRAY=[BINARY ARRAY, CHAR ARRAY, VARBINARY ARRAY, VARCHAR ARRAY]}", - coercibleToMap.toString()); + startWith = 0; + incrementBy = 100; + for (int i = 0; i < 10; i++) { + long next = nextValueFor(startWith, incrementBy); + assertEquals(startWith + incrementBy, next); + startWith = next; } - - @Test - public void testIntVersusLong() { - long l = -1L; - int i = -1; - assertTrue(PLong.INSTANCE.compareTo(l, i, PInteger.INSTANCE)==0); - assertTrue(PInteger.INSTANCE.compareTo(i, l, PLong.INSTANCE)==0); + } + + @Test + public void testGetSampleValue() { + PDataType[] types = PDataType.values(); + // Test validity of 10 sample values for each type + for (int i = 0; i < 10; i++) { + for (PDataType type : types) { + Integer maxLength = (type == PChar.INSTANCE || type == PBinary.INSTANCE + || type == PCharArray.INSTANCE || type == PBinaryArray.INSTANCE) ? 10 : null; + int arrayLength = 10; + Object sampleValue = type.getSampleValue(maxLength, arrayLength); + byte[] b = type.toBytes(sampleValue); + type.toObject(b, 0, b.length, type, SortOrder.getDefault(), maxLength, null); + } } - - @Test - public void testSeparatorBytes() { - byte biggest = (byte) 0xFF; - assertEquals(biggest, QueryConstants.DESC_SEPARATOR_BYTE); - byte[] array = new byte[1]; - for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; i++) { - array[0] = (byte) i; - assertTrue(Bytes.compareTo(array, QueryConstants.DESC_SEPARATOR_BYTE_ARRAY) <= 0); + } + + // Simulate what an HBase Increment does with the value encoded as a long + private long nextValueFor(long startWith, long incrementBy) { + long hstartWith = Bytes.toLong(PLong.INSTANCE.toBytes(startWith)); + hstartWith += incrementBy; + return (Long) PLong.INSTANCE.toObject(Bytes.toBytes(hstartWith)); + } + + @Test + public void testCoercibleGoldfile() { + TreeMultimap coercibleToMap = TreeMultimap.create(); + PDataType[] orderedTypes = PDataTypeFactory.getInstance().getOrderedTypes(); + for (PDataType fromType : orderedTypes) { + for (PDataType targetType : orderedTypes) { + if (fromType.isCoercibleTo(targetType)) { + coercibleToMap.put(fromType.toString(), targetType.toString()); } + } } - - @Test - public void testBoolean() { - byte[] bytes = PBoolean.INSTANCE.toBytes(Boolean.TRUE); - assertEquals(1, bytes[0]); - bytes = PBoolean.INSTANCE.toBytes(Boolean.FALSE); - assertEquals(0, bytes[0]); - - bytes = PBoolean.INSTANCE.toBytes(Boolean.TRUE, SortOrder.DESC); - assertEquals(0, bytes[0]); - bytes = PBoolean.INSTANCE.toBytes(Boolean.FALSE, SortOrder.DESC); - assertEquals(1, bytes[0]); - - Object dec = PDecimal.INSTANCE.toObject(Boolean.TRUE, PBoolean.INSTANCE); - bytes = PDecimal.INSTANCE.toBytes(dec); - Object b = PBoolean.INSTANCE.toObject(bytes, 0, bytes.length, PDecimal.INSTANCE, SortOrder.ASC); - assertEquals(true, b); - - dec = PDecimal.INSTANCE.toObject(Boolean.FALSE, PBoolean.INSTANCE); - bytes = PDecimal.INSTANCE.toBytes(dec); - b = PBoolean.INSTANCE.toObject(bytes, 0, bytes.length, PDecimal.INSTANCE, SortOrder.ASC); - assertEquals(false, b); - - dec = PDecimal.INSTANCE.toObject(Boolean.TRUE, PBoolean.INSTANCE); - bytes = PDecimal.INSTANCE.toBytes(dec, SortOrder.DESC); - b = PBoolean.INSTANCE.toObject(bytes, 0, bytes.length, PDecimal.INSTANCE, SortOrder.DESC); - assertEquals(true, b); - - dec = PDecimal.INSTANCE.toObject(Boolean.FALSE, PBoolean.INSTANCE); - bytes = PDecimal.INSTANCE.toBytes(dec, SortOrder.DESC); - b = PBoolean.INSTANCE.toObject(bytes, 0, bytes.length, PDecimal.INSTANCE, SortOrder.DESC); - assertEquals(false, b); - } - - @Test - public void testTimestampToDateComparison() { - long now = System.currentTimeMillis(); - Timestamp ts1 = DateUtil.getTimestamp(now, 1111); - final byte[] bytes1 = PTimestamp.INSTANCE.toBytes(ts1); - Date ts2 = new Date(now); - final byte[] bytes2 = PDate.INSTANCE.toBytes(ts2); - assertTrue(PTimestamp.INSTANCE.compareTo(bytes1, 0, bytes1.length, SortOrder.getDefault(), bytes2, 0, bytes2.length, SortOrder.getDefault(), PDate.INSTANCE) > 0); - - Timestamp ts3 = DateUtil.getTimestamp(now, 0); - final byte[] bytes3 = PTimestamp.INSTANCE.toBytes(ts3); - assertTrue(PTimestamp.INSTANCE.compareTo(bytes3, 0, bytes3.length, SortOrder.getDefault(), bytes2, 0, bytes2.length, SortOrder.getDefault(), PDate.INSTANCE) == 0); - - Timestamp ts4 = DateUtil.getTimestamp(now, 0); - final byte[] bytes4 = PUnsignedTimestamp.INSTANCE.toBytes(ts4); - assertTrue(PUnsignedTimestamp.INSTANCE.compareTo(bytes4, 0, bytes4.length, SortOrder.getDefault(), bytes2, 0, bytes2.length, SortOrder.getDefault(), PDate.INSTANCE) == 0); - } - - @Test - public void testTimestamp() { - long now = System.currentTimeMillis(); - Timestamp ts1 = DateUtil.getTimestamp(now, 1111); - final byte[] bytes1 = PTimestamp.INSTANCE.toBytes(ts1); - Timestamp ts2 = DateUtil.getTimestamp(now, 1112); - final byte[] bytes2 = PTimestamp.INSTANCE.toBytes(ts2); - assertTrue(Bytes.compareTo(bytes1, bytes2) < 0); - - final byte[] ibytes1 = SortOrder.invert(bytes1, 0, bytes1.length); - final byte[] ibytes2 = SortOrder.invert(bytes2, 0, bytes2.length); - assertTrue(Bytes.compareTo(ibytes1, ibytes2) > 0); - - Timestamp ts3 = new Timestamp(now+1); - final byte[] bytes3 = PTimestamp.INSTANCE.toBytes(ts3); - assertTrue(Bytes.compareTo(bytes3, bytes2) > 0); - final byte[] ibytes3 = SortOrder.invert(bytes3, 0, bytes3.length); - assertTrue(Bytes.compareTo(ibytes3, ibytes2) < 0); - - Timestamp ts4 = new Timestamp(now-1); - byte[] bytes4 = PTimestamp.INSTANCE.toBytes(ts4); - assertTrue(Bytes.compareTo(bytes4, bytes1) < 0); - byte[] ibytes4 = SortOrder.invert(bytes4, 0, bytes4.length); - assertTrue(Bytes.compareTo(ibytes4, ibytes1) > 0); + assertEquals("{BIGINT=[BIGINT, BINARY, DECIMAL, DOUBLE, VARBINARY], " + + "BIGINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, VARBINARY ARRAY], " + + "BINARY=[BINARY, VARBINARY], " + "BINARY ARRAY=[BINARY ARRAY, VARBINARY ARRAY], " + + "BOOLEAN=[BINARY, BOOLEAN, VARBINARY, VARBINARY_ENCODED], " + + "BOOLEAN ARRAY=[BINARY ARRAY, BOOLEAN ARRAY, VARBINARY ARRAY], " + + "BSON=[BINARY, BSON, VARBINARY], " + "CHAR=[BINARY, CHAR, VARBINARY, VARCHAR], " + + "CHAR ARRAY=[BINARY ARRAY, CHAR ARRAY, VARBINARY ARRAY, VARCHAR ARRAY], " + + "DATE=[BINARY, DATE, TIME, TIMESTAMP, VARBINARY], " + + "DATE ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, VARBINARY ARRAY], " + + "DECIMAL=[DECIMAL, VARBINARY, VARBINARY_ENCODED], " + + "DECIMAL ARRAY=[DECIMAL ARRAY, VARBINARY ARRAY], " + + "DOUBLE=[BINARY, DECIMAL, DOUBLE, VARBINARY], " + + "DOUBLE ARRAY=[BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, VARBINARY ARRAY], " + + "FLOAT=[BINARY, DECIMAL, DOUBLE, FLOAT, VARBINARY], " + + "FLOAT ARRAY=[BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, VARBINARY ARRAY], " + + "INTEGER=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, VARBINARY], " + + "INTEGER ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, VARBINARY ARRAY], " + + "JSON=[BINARY, JSON, VARBINARY], " + + "SMALLINT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, SMALLINT, VARBINARY], " + + "SMALLINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, SMALLINT ARRAY, VARBINARY ARRAY], " + + "TIME=[BINARY, DATE, TIME, TIMESTAMP, VARBINARY], " + + "TIME ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, VARBINARY ARRAY], " + + "TIMESTAMP=[BINARY, TIMESTAMP, VARBINARY], " + + "TIMESTAMP ARRAY=[BINARY ARRAY, TIMESTAMP ARRAY, VARBINARY ARRAY], " + + "TINYINT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, SMALLINT, TINYINT, VARBINARY], " + + "TINYINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, SMALLINT ARRAY, TINYINT ARRAY, VARBINARY ARRAY], " + + "UNSIGNED_DATE=[BINARY, DATE, TIME, TIMESTAMP, UNSIGNED_DATE, UNSIGNED_TIME, UNSIGNED_TIMESTAMP, VARBINARY], " + + "UNSIGNED_DATE ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, UNSIGNED_DATE ARRAY, UNSIGNED_TIME ARRAY, UNSIGNED_TIMESTAMP ARRAY, VARBINARY ARRAY], " + + "UNSIGNED_DOUBLE=[BINARY, DECIMAL, DOUBLE, UNSIGNED_DOUBLE, VARBINARY], " + + "UNSIGNED_DOUBLE ARRAY=[BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, UNSIGNED_DOUBLE ARRAY, VARBINARY ARRAY], " + + "UNSIGNED_FLOAT=[BINARY, DECIMAL, DOUBLE, FLOAT, UNSIGNED_DOUBLE, UNSIGNED_FLOAT, VARBINARY], " + + "UNSIGNED_FLOAT ARRAY=[BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_FLOAT ARRAY, VARBINARY ARRAY], " + + "UNSIGNED_INT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, UNSIGNED_DOUBLE, UNSIGNED_FLOAT, UNSIGNED_INT, UNSIGNED_LONG, VARBINARY], " + + "UNSIGNED_INT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_FLOAT ARRAY, UNSIGNED_INT ARRAY, UNSIGNED_LONG ARRAY, VARBINARY ARRAY], " + + "UNSIGNED_LONG=[BIGINT, BINARY, DECIMAL, DOUBLE, UNSIGNED_DOUBLE, UNSIGNED_LONG, VARBINARY], " + + "UNSIGNED_LONG ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_LONG ARRAY, VARBINARY ARRAY], " + + "UNSIGNED_SMALLINT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, SMALLINT, UNSIGNED_DOUBLE, UNSIGNED_FLOAT, UNSIGNED_INT, UNSIGNED_LONG, UNSIGNED_SMALLINT, VARBINARY], " + + "UNSIGNED_SMALLINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, SMALLINT ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_FLOAT ARRAY, UNSIGNED_INT ARRAY, UNSIGNED_LONG ARRAY, UNSIGNED_SMALLINT ARRAY, VARBINARY ARRAY], " + + "UNSIGNED_TIME=[BINARY, DATE, TIME, TIMESTAMP, UNSIGNED_DATE, UNSIGNED_TIME, UNSIGNED_TIMESTAMP, VARBINARY], " + + "UNSIGNED_TIME ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, UNSIGNED_DATE ARRAY, UNSIGNED_TIME ARRAY, UNSIGNED_TIMESTAMP ARRAY, VARBINARY ARRAY], " + + "UNSIGNED_TIMESTAMP=[BINARY, DATE, TIME, TIMESTAMP, UNSIGNED_DATE, UNSIGNED_TIME, UNSIGNED_TIMESTAMP, VARBINARY], " + + "UNSIGNED_TIMESTAMP ARRAY=[BINARY ARRAY, DATE ARRAY, TIME ARRAY, TIMESTAMP ARRAY, UNSIGNED_DATE ARRAY, UNSIGNED_TIME ARRAY, UNSIGNED_TIMESTAMP ARRAY, VARBINARY ARRAY], " + + "UNSIGNED_TINYINT=[BIGINT, BINARY, DECIMAL, DOUBLE, FLOAT, INTEGER, SMALLINT, TINYINT, UNSIGNED_DOUBLE, UNSIGNED_FLOAT, UNSIGNED_INT, UNSIGNED_LONG, UNSIGNED_SMALLINT, UNSIGNED_TINYINT, VARBINARY], " + + "UNSIGNED_TINYINT ARRAY=[BIGINT ARRAY, BINARY ARRAY, DECIMAL ARRAY, DOUBLE ARRAY, FLOAT ARRAY, INTEGER ARRAY, SMALLINT ARRAY, TINYINT ARRAY, UNSIGNED_DOUBLE ARRAY, UNSIGNED_FLOAT ARRAY, UNSIGNED_INT ARRAY, UNSIGNED_LONG ARRAY, UNSIGNED_SMALLINT ARRAY, UNSIGNED_TINYINT ARRAY, VARBINARY ARRAY], " + + "VARBINARY=[BINARY, VARBINARY, VARBINARY_ENCODED], " + + "VARBINARY ARRAY=[BINARY ARRAY, VARBINARY ARRAY], " + + "VARBINARY_ENCODED=[BINARY, VARBINARY, VARBINARY_ENCODED], " + + "VARCHAR=[BINARY, CHAR, VARBINARY, VARBINARY_ENCODED, VARCHAR], " + + "VARCHAR ARRAY=[BINARY ARRAY, CHAR ARRAY, VARBINARY ARRAY, VARCHAR ARRAY]}", + coercibleToMap.toString()); + } + + @Test + public void testIntVersusLong() { + long l = -1L; + int i = -1; + assertTrue(PLong.INSTANCE.compareTo(l, i, PInteger.INSTANCE) == 0); + assertTrue(PInteger.INSTANCE.compareTo(i, l, PLong.INSTANCE) == 0); + } + + @Test + public void testSeparatorBytes() { + byte biggest = (byte) 0xFF; + assertEquals(biggest, QueryConstants.DESC_SEPARATOR_BYTE); + byte[] array = new byte[1]; + for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; i++) { + array[0] = (byte) i; + assertTrue(Bytes.compareTo(array, QueryConstants.DESC_SEPARATOR_BYTE_ARRAY) <= 0); } - - @Test - public void testAscExclusiveTimestampRange() { - long now = System.currentTimeMillis(); - Timestamp ts1 = DateUtil.getTimestamp(now, 999999); - final byte[] lowerRange = PTimestamp.INSTANCE.toBytes(ts1); - Timestamp ts2 = new Timestamp(now + MILLIS_IN_DAY); - final byte[] upperRange = PTimestamp.INSTANCE.toBytes(ts2); - KeyRange range = PTimestamp.INSTANCE.getKeyRange(lowerRange, false, upperRange, false, SortOrder.ASC); - Timestamp ts3 = new Timestamp(now + 1); - // Rolled up to next millis - final byte[] expectedLowerRange = PTimestamp.INSTANCE.toBytes(ts3); - assertTrue(Bytes.compareTo(expectedLowerRange, range.getLowerRange()) == 0); - assertTrue(Bytes.compareTo(upperRange, range.getUpperRange()) == 0); - } - - - @Test - public void testDescExclusiveTimestampRange() { - long now = System.currentTimeMillis(); - Timestamp ts1 = new Timestamp(now + MILLIS_IN_DAY); - final byte[] lowerRange = PTimestamp.INSTANCE.toBytes(ts1, SortOrder.DESC); - Timestamp ts2 = new Timestamp(now); - final byte[] upperRange = PTimestamp.INSTANCE.toBytes(ts2, SortOrder.DESC); - KeyRange range = PTimestamp.INSTANCE.getKeyRange(lowerRange, false, upperRange, false, SortOrder.DESC); - Timestamp ts3 = DateUtil.getTimestamp(now + MILLIS_IN_DAY - 1, 999999); - // Rolled up to next millis - final byte[] expectedLowerRange = PTimestamp.INSTANCE.toBytes(ts3, SortOrder.DESC); - assertTrue(Bytes.compareTo(expectedLowerRange, range.getLowerRange()) == 0); - assertTrue(Bytes.compareTo(upperRange, range.getUpperRange()) == 0); - } - - @Test - public void testCompareToNull() { - for (PDataType type1 : PDataType.values()) { - Object value1 = type1.getSampleValue(); - for (PDataType type2 : PDataType.values()) { - Object value2 = null; - if (type1.isComparableTo(type2)) { - assertTrue(type1.compareTo(value1, value2, type2) > 0); - } - } + } + + @Test + public void testBoolean() { + byte[] bytes = PBoolean.INSTANCE.toBytes(Boolean.TRUE); + assertEquals(1, bytes[0]); + bytes = PBoolean.INSTANCE.toBytes(Boolean.FALSE); + assertEquals(0, bytes[0]); + + bytes = PBoolean.INSTANCE.toBytes(Boolean.TRUE, SortOrder.DESC); + assertEquals(0, bytes[0]); + bytes = PBoolean.INSTANCE.toBytes(Boolean.FALSE, SortOrder.DESC); + assertEquals(1, bytes[0]); + + Object dec = PDecimal.INSTANCE.toObject(Boolean.TRUE, PBoolean.INSTANCE); + bytes = PDecimal.INSTANCE.toBytes(dec); + Object b = PBoolean.INSTANCE.toObject(bytes, 0, bytes.length, PDecimal.INSTANCE, SortOrder.ASC); + assertEquals(true, b); + + dec = PDecimal.INSTANCE.toObject(Boolean.FALSE, PBoolean.INSTANCE); + bytes = PDecimal.INSTANCE.toBytes(dec); + b = PBoolean.INSTANCE.toObject(bytes, 0, bytes.length, PDecimal.INSTANCE, SortOrder.ASC); + assertEquals(false, b); + + dec = PDecimal.INSTANCE.toObject(Boolean.TRUE, PBoolean.INSTANCE); + bytes = PDecimal.INSTANCE.toBytes(dec, SortOrder.DESC); + b = PBoolean.INSTANCE.toObject(bytes, 0, bytes.length, PDecimal.INSTANCE, SortOrder.DESC); + assertEquals(true, b); + + dec = PDecimal.INSTANCE.toObject(Boolean.FALSE, PBoolean.INSTANCE); + bytes = PDecimal.INSTANCE.toBytes(dec, SortOrder.DESC); + b = PBoolean.INSTANCE.toObject(bytes, 0, bytes.length, PDecimal.INSTANCE, SortOrder.DESC); + assertEquals(false, b); + } + + @Test + public void testTimestampToDateComparison() { + long now = System.currentTimeMillis(); + Timestamp ts1 = DateUtil.getTimestamp(now, 1111); + final byte[] bytes1 = PTimestamp.INSTANCE.toBytes(ts1); + Date ts2 = new Date(now); + final byte[] bytes2 = PDate.INSTANCE.toBytes(ts2); + assertTrue(PTimestamp.INSTANCE.compareTo(bytes1, 0, bytes1.length, SortOrder.getDefault(), + bytes2, 0, bytes2.length, SortOrder.getDefault(), PDate.INSTANCE) > 0); + + Timestamp ts3 = DateUtil.getTimestamp(now, 0); + final byte[] bytes3 = PTimestamp.INSTANCE.toBytes(ts3); + assertTrue(PTimestamp.INSTANCE.compareTo(bytes3, 0, bytes3.length, SortOrder.getDefault(), + bytes2, 0, bytes2.length, SortOrder.getDefault(), PDate.INSTANCE) == 0); + + Timestamp ts4 = DateUtil.getTimestamp(now, 0); + final byte[] bytes4 = PUnsignedTimestamp.INSTANCE.toBytes(ts4); + assertTrue( + PUnsignedTimestamp.INSTANCE.compareTo(bytes4, 0, bytes4.length, SortOrder.getDefault(), + bytes2, 0, bytes2.length, SortOrder.getDefault(), PDate.INSTANCE) == 0); + } + + @Test + public void testTimestamp() { + long now = System.currentTimeMillis(); + Timestamp ts1 = DateUtil.getTimestamp(now, 1111); + final byte[] bytes1 = PTimestamp.INSTANCE.toBytes(ts1); + Timestamp ts2 = DateUtil.getTimestamp(now, 1112); + final byte[] bytes2 = PTimestamp.INSTANCE.toBytes(ts2); + assertTrue(Bytes.compareTo(bytes1, bytes2) < 0); + + final byte[] ibytes1 = SortOrder.invert(bytes1, 0, bytes1.length); + final byte[] ibytes2 = SortOrder.invert(bytes2, 0, bytes2.length); + assertTrue(Bytes.compareTo(ibytes1, ibytes2) > 0); + + Timestamp ts3 = new Timestamp(now + 1); + final byte[] bytes3 = PTimestamp.INSTANCE.toBytes(ts3); + assertTrue(Bytes.compareTo(bytes3, bytes2) > 0); + final byte[] ibytes3 = SortOrder.invert(bytes3, 0, bytes3.length); + assertTrue(Bytes.compareTo(ibytes3, ibytes2) < 0); + + Timestamp ts4 = new Timestamp(now - 1); + byte[] bytes4 = PTimestamp.INSTANCE.toBytes(ts4); + assertTrue(Bytes.compareTo(bytes4, bytes1) < 0); + byte[] ibytes4 = SortOrder.invert(bytes4, 0, bytes4.length); + assertTrue(Bytes.compareTo(ibytes4, ibytes1) > 0); + } + + @Test + public void testAscExclusiveTimestampRange() { + long now = System.currentTimeMillis(); + Timestamp ts1 = DateUtil.getTimestamp(now, 999999); + final byte[] lowerRange = PTimestamp.INSTANCE.toBytes(ts1); + Timestamp ts2 = new Timestamp(now + MILLIS_IN_DAY); + final byte[] upperRange = PTimestamp.INSTANCE.toBytes(ts2); + KeyRange range = + PTimestamp.INSTANCE.getKeyRange(lowerRange, false, upperRange, false, SortOrder.ASC); + Timestamp ts3 = new Timestamp(now + 1); + // Rolled up to next millis + final byte[] expectedLowerRange = PTimestamp.INSTANCE.toBytes(ts3); + assertTrue(Bytes.compareTo(expectedLowerRange, range.getLowerRange()) == 0); + assertTrue(Bytes.compareTo(upperRange, range.getUpperRange()) == 0); + } + + @Test + public void testDescExclusiveTimestampRange() { + long now = System.currentTimeMillis(); + Timestamp ts1 = new Timestamp(now + MILLIS_IN_DAY); + final byte[] lowerRange = PTimestamp.INSTANCE.toBytes(ts1, SortOrder.DESC); + Timestamp ts2 = new Timestamp(now); + final byte[] upperRange = PTimestamp.INSTANCE.toBytes(ts2, SortOrder.DESC); + KeyRange range = + PTimestamp.INSTANCE.getKeyRange(lowerRange, false, upperRange, false, SortOrder.DESC); + Timestamp ts3 = DateUtil.getTimestamp(now + MILLIS_IN_DAY - 1, 999999); + // Rolled up to next millis + final byte[] expectedLowerRange = PTimestamp.INSTANCE.toBytes(ts3, SortOrder.DESC); + assertTrue(Bytes.compareTo(expectedLowerRange, range.getLowerRange()) == 0); + assertTrue(Bytes.compareTo(upperRange, range.getUpperRange()) == 0); + } + + @Test + public void testCompareToNull() { + for (PDataType type1 : PDataType.values()) { + Object value1 = type1.getSampleValue(); + for (PDataType type2 : PDataType.values()) { + Object value2 = null; + if (type1.isComparableTo(type2)) { + assertTrue(type1.compareTo(value1, value2, type2) > 0); } - for (PDataType type1 : PDataType.values()) { - Object value1 = null; - for (PDataType type2 : PDataType.values()) { - Object value2 = type2.getSampleValue(); - if (type1.isComparableTo(type2)) { - assertTrue(type1.compareTo(value1, value2, type2) < 0); - } - } + } + } + for (PDataType type1 : PDataType.values()) { + Object value1 = null; + for (PDataType type2 : PDataType.values()) { + Object value2 = type2.getSampleValue(); + if (type1.isComparableTo(type2)) { + assertTrue(type1.compareTo(value1, value2, type2) < 0); } - for (PDataType type1 : PDataType.values()) { - Object value1 = null; - for (PDataType type2 : PDataType.values()) { - Object value2 = null; - if (type1.isComparableTo(type2)) { - assertTrue(type1.compareTo(value1, value2, type2) == 0); - } - } + } + } + for (PDataType type1 : PDataType.values()) { + Object value1 = null; + for (PDataType type2 : PDataType.values()) { + Object value2 = null; + if (type1.isComparableTo(type2)) { + assertTrue(type1.compareTo(value1, value2, type2) == 0); } + } } + } - @Test - public void testFromSqlTypeName() { - assertEquals(PVarchar.INSTANCE, PDataType.fromSqlTypeName("varchar")); - } + @Test + public void testFromSqlTypeName() { + assertEquals(PVarchar.INSTANCE, PDataType.fromSqlTypeName("varchar")); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDateArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDateArrayToStringTest.java index 4ae210b6bf6..1aa4eca3066 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDateArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDateArrayToStringTest.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; @@ -18,63 +25,62 @@ import org.apache.phoenix.util.DateUtil; public class PDateArrayToStringTest extends BasePhoenixArrayToStringTest { - private static final String DATE1 = "2001-01-01 12:15:15.123"; - private static final String DATE2 = "2002-02-02 14:30:30.456"; - private static final String DATE3 = "2003-03-03 16:45:45.789"; + private static final String DATE1 = "2001-01-01 12:15:15.123"; + private static final String DATE2 = "2002-02-02 14:30:30.456"; + private static final String DATE3 = "2003-03-03 16:45:45.789"; - @Override - protected PDataType getBaseType() { - return PDate.INSTANCE; - } + @Override + protected PDataType getBaseType() { + return PDate.INSTANCE; + } - @Override - protected boolean isPrimitive(PhoenixArray arr) { - // dates have codec like primitive times but date is not primitive/scalar && there's no - // primitive date array - return false; - } + @Override + protected boolean isPrimitive(PhoenixArray arr) { + // dates have codec like primitive times but date is not primitive/scalar && there's no + // primitive date array + return false; + } - @Override - protected Object getElement1() { - return parseDate(DATE1); - } + @Override + protected Object getElement1() { + return parseDate(DATE1); + } - @Override - protected String getString1() { - return "'" + DATE1 + "'"; - } + @Override + protected String getString1() { + return "'" + DATE1 + "'"; + } - @Override - protected Object getElement2() { - return parseDate(DATE2); - } + @Override + protected Object getElement2() { + return parseDate(DATE2); + } - @Override - protected String getString2() { - return "'" + DATE2 + "'"; - } + @Override + protected String getString2() { + return "'" + DATE2 + "'"; + } - @Override - protected Object getElement3() { - return parseDate(DATE3); - } + @Override + protected Object getElement3() { + return parseDate(DATE3); + } - @Override - protected String getString3() { - return "'" + DATE3 + "'"; - } + @Override + protected String getString3() { + return "'" + DATE3 + "'"; + } - private Object parseDate(String dateString) { - try { - java.util.Date date = - new SimpleDateFormat(DateUtil.DEFAULT_DATE_FORMAT).parse(dateString); - Calendar cal = Calendar.getInstance(); - cal.setTimeInMillis(date.getTime()); - cal.add(Calendar.MILLISECOND, TimeZone.getDefault().getOffset(date.getTime())); - return cal.getTime(); - } catch (ParseException e) { - throw new RuntimeException(e); - } + private Object parseDate(String dateString) { + try { + java.util.Date date = new SimpleDateFormat(DateUtil.DEFAULT_DATE_FORMAT).parse(dateString); + Calendar cal = Calendar.getInstance(); + cal.setTimeInMillis(date.getTime()); + cal.add(Calendar.MILLISECOND, TimeZone.getDefault().getOffset(date.getTime())); + return cal.getTime(); + } catch (ParseException e) { + throw new RuntimeException(e); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PVarcharArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PVarcharArrayToStringTest.java index 9a8e9ab0a7d..2d820a839dc 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PVarcharArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PVarcharArrayToStringTest.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; @@ -17,55 +24,52 @@ */ public class PVarcharArrayToStringTest extends BasePhoenixArrayToStringTest { - @Test - public void testUnicodeString() { - helpTestToString( - getBaseType(), // - new String[] { "a" + "\u00ea" + "\u00f1" + "b", "c" + "\u00a0" + "\u00ff" + "d" }, - "['aêñb', 'c ÿd']"); - } + @Test + public void testUnicodeString() { + helpTestToString(getBaseType(), // + new String[] { "a" + "\u00ea" + "\u00f1" + "b", "c" + "\u00a0" + "\u00ff" + "d" }, + "['aêñb', 'c ÿd']"); + } - @Test - public void testStringWithSeparators() { - helpTestToString( - getBaseType(), // - new String[] { "a,b,c", "d\"e\"f\"", "'g'h'i'" }, - "['a,b,c', 'd\"e\"f\"', '''g''h''i''']"); - } + @Test + public void testStringWithSeparators() { + helpTestToString(getBaseType(), // + new String[] { "a,b,c", "d\"e\"f\"", "'g'h'i'" }, "['a,b,c', 'd\"e\"f\"', '''g''h''i''']"); + } - @Override - protected PVarchar getBaseType() { - return PVarchar.INSTANCE; - } + @Override + protected PVarchar getBaseType() { + return PVarchar.INSTANCE; + } - @Override - protected String getString1() { - return "'string1'"; - } + @Override + protected String getString1() { + return "'string1'"; + } - @Override - protected String getElement1() { - return "string1"; - } + @Override + protected String getElement1() { + return "string1"; + } - @Override - protected String getString2() { - return "'string2'"; - } + @Override + protected String getString2() { + return "'string2'"; + } - @Override - protected String getElement2() { - return "string2"; - } + @Override + protected String getElement2() { + return "string2"; + } - @Override - protected String getString3() { - return "'string3'"; - } + @Override + protected String getString3() { + return "'string3'"; + } - @Override - protected String getElement3() { - return "string3"; - } + @Override + protected String getElement3() { + return "string3"; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveBooleanPhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveBooleanPhoenixArrayToStringTest.java index 1ec47521769..6cb18068885 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveBooleanPhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveBooleanPhoenixArrayToStringTest.java @@ -1,56 +1,63 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; public class PrimitiveBooleanPhoenixArrayToStringTest extends BasePhoenixArrayToStringTest { - @Override - protected PDataType getBaseType() { - return PBoolean.INSTANCE; - } - - @Override - protected String getNullString() { - // primitive arrays don't use null - return "false"; - } - - @Override - protected Object getElement1() { - return true; - } - - @Override - protected String getString1() { - return "true"; - } - - @Override - protected Object getElement2() { - return false; - } - - @Override - protected String getString2() { - return "false"; - } - - @Override - protected Object getElement3() { - return getElement1(); - } - - @Override - protected String getString3() { - return getString1(); - } + @Override + protected PDataType getBaseType() { + return PBoolean.INSTANCE; + } + + @Override + protected String getNullString() { + // primitive arrays don't use null + return "false"; + } + + @Override + protected Object getElement1() { + return true; + } + + @Override + protected String getString1() { + return "true"; + } + + @Override + protected Object getElement2() { + return false; + } + + @Override + protected String getString2() { + return "false"; + } + + @Override + protected Object getElement3() { + return getElement1(); + } + + @Override + protected String getString3() { + return getString1(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveBytePhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveBytePhoenixArrayToStringTest.java index 12a1bb0924e..bff9601a8ae 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveBytePhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveBytePhoenixArrayToStringTest.java @@ -1,34 +1,42 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; -public class PrimitiveBytePhoenixArrayToStringTest extends BasePrimitiveIntPhoenixArrayToStringTest { +public class PrimitiveBytePhoenixArrayToStringTest + extends BasePrimitiveIntPhoenixArrayToStringTest { - @Override - protected PDataType getBaseType() { - return PTinyint.INSTANCE; - } + @Override + protected PDataType getBaseType() { + return PTinyint.INSTANCE; + } - @Override - protected Number getElement1() { - return (byte) 1; - } + @Override + protected Number getElement1() { + return (byte) 1; + } - @Override - protected Number getElement2() { - return (byte) 2; - } + @Override + protected Number getElement2() { + return (byte) 2; + } - @Override - protected Number getElement3() { - return (byte) 3; - } + @Override + protected Number getElement3() { + return (byte) 3; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveDoublePhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveDoublePhoenixArrayToStringTest.java index 62e9a828a42..dd7da0eb991 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveDoublePhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveDoublePhoenixArrayToStringTest.java @@ -1,36 +1,43 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; -public class PrimitiveDoublePhoenixArrayToStringTest extends - BasePrimitiveDoublePhoenixArrayToStringTest { +public class PrimitiveDoublePhoenixArrayToStringTest + extends BasePrimitiveDoublePhoenixArrayToStringTest { - @Override - protected PDataType getBaseType() { - return PDouble.INSTANCE; - } + @Override + protected PDataType getBaseType() { + return PDouble.INSTANCE; + } - @Override - protected Object getElement1() { - return 1.1; - } + @Override + protected Object getElement1() { + return 1.1; + } - @Override - protected Object getElement2() { - return 2.2; - } + @Override + protected Object getElement2() { + return 2.2; + } - @Override - protected Object getElement3() { - return 3.3; - } + @Override + protected Object getElement3() { + return 3.3; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveFloatPhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveFloatPhoenixArrayToStringTest.java index 92a3e87fb38..b3f0933dfc7 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveFloatPhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveFloatPhoenixArrayToStringTest.java @@ -1,36 +1,44 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; -public class PrimitiveFloatPhoenixArrayToStringTest extends BasePrimitiveDoublePhoenixArrayToStringTest { +public class PrimitiveFloatPhoenixArrayToStringTest + extends BasePrimitiveDoublePhoenixArrayToStringTest { - @Override - protected PDataType getBaseType() { - return PFloat.INSTANCE; - } + @Override + protected PDataType getBaseType() { + return PFloat.INSTANCE; + } - @Override - protected Object getElement1() { - return 1.1f; - } + @Override + protected Object getElement1() { + return 1.1f; + } - @Override - protected Object getElement2() { - return 2.2f; - - } + @Override + protected Object getElement2() { + return 2.2f; - @Override - protected Object getElement3() { - return 3.3f; - } + } + + @Override + protected Object getElement3() { + return 3.3f; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveIntPhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveIntPhoenixArrayToStringTest.java index 770469015da..d1ad3bebc75 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveIntPhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveIntPhoenixArrayToStringTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,9 +19,9 @@ public class PrimitiveIntPhoenixArrayToStringTest extends BasePrimitiveIntPhoenixArrayToStringTest { - @Override - protected PDataType getBaseType() { - return PInteger.INSTANCE; - } + @Override + protected PDataType getBaseType() { + return PInteger.INSTANCE; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveLongPhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveLongPhoenixArrayToStringTest.java index 35db369cede..814c6123f08 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveLongPhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveLongPhoenixArrayToStringTest.java @@ -1,35 +1,43 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; -public class PrimitiveLongPhoenixArrayToStringTest extends BasePrimitiveIntPhoenixArrayToStringTest { +public class PrimitiveLongPhoenixArrayToStringTest + extends BasePrimitiveIntPhoenixArrayToStringTest { - @Override - protected PDataType getBaseType() { - return PLong.INSTANCE; - } + @Override + protected PDataType getBaseType() { + return PLong.INSTANCE; + } - @Override - protected Number getElement1() { - return 1L; - } + @Override + protected Number getElement1() { + return 1L; + } - @Override - protected Number getElement2() { - return 2L; - } + @Override + protected Number getElement2() { + return 2L; + } - @Override - protected Number getElement3() { - return 3L; - } + @Override + protected Number getElement3() { + return 3L; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveShortPhoenixArrayToStringTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveShortPhoenixArrayToStringTest.java index 4958c637cbe..525642bb1a1 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveShortPhoenixArrayToStringTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PrimitiveShortPhoenixArrayToStringTest.java @@ -1,36 +1,43 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.schema.types; -public class PrimitiveShortPhoenixArrayToStringTest extends - BasePrimitiveIntPhoenixArrayToStringTest { +public class PrimitiveShortPhoenixArrayToStringTest + extends BasePrimitiveIntPhoenixArrayToStringTest { - @Override - protected PDataType getBaseType() { - return PSmallint.INSTANCE; - } + @Override + protected PDataType getBaseType() { + return PSmallint.INSTANCE; + } - @Override - protected Short getElement1() { - return (short) 1; - } + @Override + protected Short getElement1() { + return (short) 1; + } - @Override - protected Short getElement2() { - return (short) 2; - } + @Override + protected Short getElement2() { + return (short) 2; + } - @Override - protected Short getElement3() { - return (short) 3; - } + @Override + protected Short getElement3() { + return (short) 3; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceSpanReceiverTest.java b/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceSpanReceiverTest.java index bba1dd847fc..b81b5fd7f59 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceSpanReceiverTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceSpanReceiverTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,15 +33,15 @@ public class TraceSpanReceiverTest { @BeforeClass - public static synchronized void setup() throws Exception{ + public static synchronized void setup() throws Exception { } /** - * For PHOENIX-1126, Phoenix originally assumed all the annotation values were integers, - * but HBase writes some strings as well, so we need to be able to handle that too + * For PHOENIX-1126, Phoenix originally assumed all the annotation values were integers, but HBase + * writes some strings as well, so we need to be able to handle that too */ @Test - public void testNonIntegerAnnotations(){ + public void testNonIntegerAnnotations() { Span span = getSpan(); // make sure its less than the length of an integer @@ -62,7 +62,7 @@ public void testNonIntegerAnnotations(){ } @Test - public void testIntegerAnnotations(){ + public void testIntegerAnnotations() { Span span = getSpan(); // add annotation through the phoenix interfaces @@ -75,8 +75,8 @@ public void testIntegerAnnotations(){ assertTrue(source.getNumSpans() == 1); } - private Span getSpan(){ + private Span getSpan() { // Spans with Trace Id as 0 will be rejected (See PHOENIX-3767 for details) - return new MilliSpan("test span", 1, 1 , 2, "pid"); + return new MilliSpan("test span", 1, 1, 2, "pid"); } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/transaction/NotAvailableTransactionService.java b/phoenix-core/src/test/java/org/apache/phoenix/transaction/NotAvailableTransactionService.java index 9ee927172e8..38fc0a82953 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/transaction/NotAvailableTransactionService.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/transaction/NotAvailableTransactionService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,10 +21,11 @@ public class NotAvailableTransactionService implements PhoenixTransactionService { - private static final NotAvailableTransactionService INSTANCE = new NotAvailableTransactionService(); + private static final NotAvailableTransactionService INSTANCE = + new NotAvailableTransactionService(); public static NotAvailableTransactionService getInstance() { - return INSTANCE; + return INSTANCE; } @Override diff --git a/phoenix-core/src/test/java/org/apache/phoenix/transaction/OmidTransactionService.java b/phoenix-core/src/test/java/org/apache/phoenix/transaction/OmidTransactionService.java index 02531faf770..ab849176d35 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/transaction/OmidTransactionService.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/transaction/OmidTransactionService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,9 +22,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.omid.committable.CommitTable; -import org.apache.omid.committable.InMemoryCommitTable; import org.apache.omid.committable.CommitTable.Client; import org.apache.omid.committable.CommitTable.Writer; +import org.apache.omid.committable.InMemoryCommitTable; import org.apache.omid.transaction.HBaseOmidClientConfiguration; import org.apache.omid.transaction.HBaseTransactionManager; import org.apache.omid.tso.TSOMockModule; @@ -42,83 +42,82 @@ public class OmidTransactionService implements PhoenixTransactionService { - private static final String OMID_TSO_PORT = "phoenix.omid.tso.port"; - private static final String OMID_TSO_CONFLICT_MAP_SIZE = "phoenix.omid.tso.conflict.map.size"; - private static final String OMID_TSO_TIMESTAMP_TYPE = "phoenix.omid.tso.timestamp.type"; - private static final int DEFAULT_OMID_TSO_CONFLICT_MAP_SIZE = 1000; - private static final String DEFAULT_OMID_TSO_TIMESTAMP_TYPE = "WORLD_TIME"; + private static final String OMID_TSO_PORT = "phoenix.omid.tso.port"; + private static final String OMID_TSO_CONFLICT_MAP_SIZE = "phoenix.omid.tso.conflict.map.size"; + private static final String OMID_TSO_TIMESTAMP_TYPE = "phoenix.omid.tso.timestamp.type"; + private static final int DEFAULT_OMID_TSO_CONFLICT_MAP_SIZE = 1000; + private static final String DEFAULT_OMID_TSO_TIMESTAMP_TYPE = "WORLD_TIME"; + + private final HBaseTransactionManager transactionManager; + private TSOServer tso; + + public OmidTransactionService(TSOServer tso, HBaseTransactionManager transactionManager) { + this.tso = tso; + this.transactionManager = transactionManager; + } + + public static OmidTransactionService startAndInjectOmidTransactionService( + OmidTransactionProvider transactionProvider, Configuration config, + ConnectionInfo connectionInfo, int port) throws SQLException { + TSOServerConfig tsoConfig = new TSOServerConfig(); + TSOServer tso; + + tsoConfig.setPort(port); + tsoConfig.setConflictMapSize( + config.getInt(OMID_TSO_CONFLICT_MAP_SIZE, DEFAULT_OMID_TSO_CONFLICT_MAP_SIZE)); + tsoConfig + .setTimestampType(config.get(OMID_TSO_TIMESTAMP_TYPE, DEFAULT_OMID_TSO_TIMESTAMP_TYPE)); + tsoConfig.setWaitStrategy(WAIT_STRATEGY.LOW_CPU.toString()); + + Injector injector = Guice.createInjector(new TSOMockModule(tsoConfig)); + tso = injector.getInstance(TSOServer.class); + tso.startAsync(); + tso.awaitRunning(); + + OmidClientConfiguration clientConfig = new OmidClientConfiguration(); + clientConfig.setConnectionString("localhost:" + port); + clientConfig.setConflictAnalysisLevel(OmidClientConfiguration.ConflictDetectionLevel.ROW); + + InMemoryCommitTable commitTable = (InMemoryCommitTable) injector.getInstance(CommitTable.class); + + HBaseTransactionManager transactionManager; + Client commitTableClient; + Writer commitTableWriter; + try { + // Create the associated Handler + TSOClient client = TSOClient.newInstance(clientConfig); + + HBaseOmidClientConfiguration clientConf = new HBaseOmidClientConfiguration(); + clientConf.setConnectionString("localhost:" + port); + clientConf.setConflictAnalysisLevel(OmidClientConfiguration.ConflictDetectionLevel.ROW); + clientConf.setHBaseConfiguration(config); + commitTableClient = commitTable.getClient(); + commitTableWriter = commitTable.getWriter(); + transactionManager = + HBaseTransactionManager.builder(clientConf).commitTableClient(commitTableClient) + .commitTableWriter(commitTableWriter).tsoClient(client).build(); + } catch (IOException | InterruptedException e) { + throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED) + .setMessage(e.getMessage()).setRootCause(e).build().buildException(); + } - private final HBaseTransactionManager transactionManager; - private TSOServer tso; + transactionProvider.injectTestService(transactionManager, commitTableClient); - public OmidTransactionService(TSOServer tso, HBaseTransactionManager transactionManager) { - this.tso = tso; - this.transactionManager = transactionManager; - } + return new OmidTransactionService(tso, transactionManager); + } - public static OmidTransactionService startAndInjectOmidTransactionService( - OmidTransactionProvider transactionProvider, Configuration config, - ConnectionInfo connectionInfo, int port) throws SQLException { - TSOServerConfig tsoConfig = new TSOServerConfig(); - TSOServer tso; - - tsoConfig.setPort(port); - tsoConfig.setConflictMapSize( - config.getInt(OMID_TSO_CONFLICT_MAP_SIZE, DEFAULT_OMID_TSO_CONFLICT_MAP_SIZE)); - tsoConfig.setTimestampType( - config.get(OMID_TSO_TIMESTAMP_TYPE, DEFAULT_OMID_TSO_TIMESTAMP_TYPE)); - tsoConfig.setWaitStrategy(WAIT_STRATEGY.LOW_CPU.toString()); - - Injector injector = Guice.createInjector(new TSOMockModule(tsoConfig)); - tso = injector.getInstance(TSOServer.class); - tso.startAsync(); - tso.awaitRunning(); - - OmidClientConfiguration clientConfig = new OmidClientConfiguration(); - clientConfig.setConnectionString("localhost:" + port); - clientConfig.setConflictAnalysisLevel(OmidClientConfiguration.ConflictDetectionLevel.ROW); - - InMemoryCommitTable commitTable = - (InMemoryCommitTable) injector.getInstance(CommitTable.class); - - HBaseTransactionManager transactionManager; - Client commitTableClient; - Writer commitTableWriter; - try { - // Create the associated Handler - TSOClient client = TSOClient.newInstance(clientConfig); - - HBaseOmidClientConfiguration clientConf = new HBaseOmidClientConfiguration(); - clientConf.setConnectionString("localhost:" + port); - clientConf.setConflictAnalysisLevel(OmidClientConfiguration.ConflictDetectionLevel.ROW); - clientConf.setHBaseConfiguration(config); - commitTableClient = commitTable.getClient(); - commitTableWriter = commitTable.getWriter(); - transactionManager = - HBaseTransactionManager.builder(clientConf).commitTableClient(commitTableClient) - .commitTableWriter(commitTableWriter).tsoClient(client).build(); - } catch (IOException | InterruptedException e) { - throw new SQLExceptionInfo.Builder(SQLExceptionCode.TRANSACTION_FAILED) - .setMessage(e.getMessage()).setRootCause(e).build().buildException(); - } - - transactionProvider.injectTestService(transactionManager, commitTableClient); - - return new OmidTransactionService(tso, transactionManager); - } + public void start() { - public void start() { + } + @Override + public void close() throws IOException { + if (transactionManager != null) { + transactionManager.close(); } - - @Override - public void close() throws IOException { - if (transactionManager != null) { - transactionManager.close(); - } - if (tso != null) { - tso.stopAsync(); - tso.awaitTerminated(); - } + if (tso != null) { + tso.stopAsync(); + tso.awaitTerminated(); } -} \ No newline at end of file + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/transaction/PhoenixTransactionService.java b/phoenix-core/src/test/java/org/apache/phoenix/transaction/PhoenixTransactionService.java index 10c46e1e9b6..ebbe24fce79 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/transaction/PhoenixTransactionService.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/transaction/PhoenixTransactionService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/phoenix-core/src/test/java/org/apache/phoenix/transaction/TransactionServiceManager.java b/phoenix-core/src/test/java/org/apache/phoenix/transaction/TransactionServiceManager.java index ebb892101ed..7fa70dc0ea5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/transaction/TransactionServiceManager.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/transaction/TransactionServiceManager.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,14 +25,17 @@ public class TransactionServiceManager { - public static PhoenixTransactionService startTransactionService(TransactionFactory.Provider provider, Configuration config, ConnectionInfo connInfo, int port) throws SQLException { - PhoenixTransactionProvider transactionProvider = provider.getTransactionProvider(); - if (provider == Provider.NOTAVAILABLE) { - return NotAvailableTransactionService.getInstance(); - } else if (provider == Provider.OMID) { - return OmidTransactionService.startAndInjectOmidTransactionService((OmidTransactionProvider)transactionProvider, config, connInfo, port); - } - throw new UnsupportedOperationException("Unknown transaction provider"); + public static PhoenixTransactionService startTransactionService( + TransactionFactory.Provider provider, Configuration config, ConnectionInfo connInfo, int port) + throws SQLException { + PhoenixTransactionProvider transactionProvider = provider.getTransactionProvider(); + if (provider == Provider.NOTAVAILABLE) { + return NotAvailableTransactionService.getInstance(); + } else if (provider == Provider.OMID) { + return OmidTransactionService.startAndInjectOmidTransactionService( + (OmidTransactionProvider) transactionProvider, config, connInfo, port); } + throw new UnsupportedOperationException("Unknown transaction provider"); + } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java index 24ee5c90c4b..f5f6d5aa7b3 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,181 +42,177 @@ import org.apache.phoenix.schema.types.PBinary; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PIntegerArray; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.junit.After; import org.junit.Before; import org.junit.Test; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; - public abstract class AbstractUpsertExecutorTest extends BaseConnectionlessQueryTest { - protected Connection conn; - protected List columnInfoList; - protected PreparedStatement preparedStatement; - protected UpsertExecutor.UpsertListener upsertListener; - - protected abstract UpsertExecutor getUpsertExecutor(); - protected abstract R createRecord(Object... columnValues) throws IOException; - protected abstract UpsertExecutor getUpsertExecutor(Connection conn); - - private static String TIMESTAMP_WITH_NANOS = "2006-11-03 00:00:00.001003000"; - - @Before - public void setUp() throws SQLException { - columnInfoList = ImmutableList.of( - new ColumnInfo("ID", Types.BIGINT), - new ColumnInfo("NAME", Types.VARCHAR), - new ColumnInfo("AGE", Types.INTEGER), - new ColumnInfo("VALUES", PIntegerArray.INSTANCE.getSqlType()), - new ColumnInfo("BEARD", Types.BOOLEAN), - new ColumnInfo("PIC", Types.BINARY), - new ColumnInfo("T", Types.TIMESTAMP)); - - preparedStatement = mock(PreparedStatement.class); - upsertListener = mock(UpsertExecutor.UpsertListener.class); - Properties properties = new Properties(); - properties.setProperty("phoenix.query.dateFormatTimeZone", DateUtil.DEFAULT_TIME_ZONE_ID); - conn = DriverManager.getConnection(getUrl(), properties); - } - - @After - public void tearDown() throws SQLException { - conn.close(); - } - - @Test - public void testExecute() throws Exception { - byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); - getUpsertExecutor().execute( - createRecord(123L, "NameValue", 42, Arrays.asList(1, 2, 3), true, encodedBinaryData, - Timestamp.valueOf(TIMESTAMP_WITH_NANOS))); - - verify(upsertListener).upsertDone(1L); - verifyNoMoreInteractions(upsertListener); - - verify(preparedStatement).setObject(1, Long.valueOf(123L)); - verify(preparedStatement).setObject(2, "NameValue"); - verify(preparedStatement).setObject(3, Integer.valueOf(42)); - verify(preparedStatement).setObject(4, PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Object[]{1,2,3})); - verify(preparedStatement).setObject(5, Boolean.TRUE); - verify(preparedStatement).setObject(6, binaryData); - verify(preparedStatement).setObject(7, DateUtil.parseTimestamp(TIMESTAMP_WITH_NANOS)); - verify(preparedStatement).execute(); - verifyNoMoreInteractions(preparedStatement); - } - - @Test - public void testExecute_TooFewFields() throws Exception { - R recordWithTooFewFields = createRecord(123L, "NameValue"); - getUpsertExecutor().execute(recordWithTooFewFields); - - verify(upsertListener).errorOnRecord(eq(recordWithTooFewFields), any(Throwable.class)); - verifyNoMoreInteractions(upsertListener); - } - - @Test - public void testExecute_TooManyFields() throws Exception { - byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); - R recordWithTooManyFields = createRecord(123L, "NameValue", 42, Arrays.asList(1, 2, 3), - true, encodedBinaryData, Timestamp.valueOf(TIMESTAMP_WITH_NANOS), "garbage"); - getUpsertExecutor().execute(recordWithTooManyFields); - - verify(upsertListener).upsertDone(1L); - verifyNoMoreInteractions(upsertListener); - - verify(preparedStatement).setObject(1, Long.valueOf(123L)); - verify(preparedStatement).setObject(2, "NameValue"); - verify(preparedStatement).setObject(3, Integer.valueOf(42)); - verify(preparedStatement).setObject(4, PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Object[]{1,2,3})); - verify(preparedStatement).setObject(5, Boolean.TRUE); - verify(preparedStatement).setObject(6, binaryData); - verify(preparedStatement).setObject(7, DateUtil.parseTimestamp(TIMESTAMP_WITH_NANOS)); - verify(preparedStatement).execute(); - verifyNoMoreInteractions(preparedStatement); - } - - @Test - public void testExecute_NullField() throws Exception { - byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); - getUpsertExecutor().execute( - createRecord(123L, "NameValue", null, Arrays.asList(1, 2, 3), false, encodedBinaryData, - Timestamp.valueOf(TIMESTAMP_WITH_NANOS))); - - verify(upsertListener).upsertDone(1L); - verifyNoMoreInteractions(upsertListener); - - verify(preparedStatement).setObject(1, Long.valueOf(123L)); - verify(preparedStatement).setObject(2, "NameValue"); - verify(preparedStatement).setNull(3, columnInfoList.get(2).getSqlType()); - verify(preparedStatement).setObject(4, PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Object[]{1,2,3})); - verify(preparedStatement).setObject(5, Boolean.FALSE); - verify(preparedStatement).setObject(6, binaryData); - verify(preparedStatement).setObject(7, DateUtil.parseTimestamp(TIMESTAMP_WITH_NANOS)); - verify(preparedStatement).execute(); - verifyNoMoreInteractions(preparedStatement); - } - - @Test - public void testExecute_InvalidType() throws Exception { - byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); - R recordWithInvalidType = - createRecord(123L, "NameValue", "ThisIsNotANumber", Arrays.asList(1, 2, 3), true, - encodedBinaryData, Timestamp.valueOf(TIMESTAMP_WITH_NANOS)); - getUpsertExecutor().execute(recordWithInvalidType); - - verify(upsertListener).errorOnRecord(eq(recordWithInvalidType), any(Throwable.class)); - verifyNoMoreInteractions(upsertListener); - } - - @Test - public void testExecute_InvalidBoolean() throws Exception { - byte[] binaryData=(byte[])PBinary.INSTANCE.getSampleValue(); - String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); - R csvRecordWithInvalidType = - createRecord("123,NameValue,42,1:2:3,NotABoolean," + encodedBinaryData + "," - + TIMESTAMP_WITH_NANOS); - getUpsertExecutor().execute(csvRecordWithInvalidType); - - verify(upsertListener).errorOnRecord(eq(csvRecordWithInvalidType), any(Throwable.class)); - } - - @Test - public void testExecute_InvalidBinary() throws Exception { - String notBase64Encoded="#@$df"; - R csvRecordWithInvalidType = - createRecord("123,NameValue,42,1:2:3,true," + notBase64Encoded + "," - + TIMESTAMP_WITH_NANOS); - getUpsertExecutor().execute(csvRecordWithInvalidType); - - verify(upsertListener).errorOnRecord(eq(csvRecordWithInvalidType), any(Throwable.class)); - } - - @Test - public void testExecute_AsciiEncoded() throws Exception { - String asciiValue="#@$df"; - Properties info=new Properties(); - info.setProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,"ASCII"); - getUpsertExecutor(DriverManager.getConnection(getUrl(), info)).execute( - createRecord(123L, "NameValue", 42, Arrays.asList(1, 2, 3), true, asciiValue, - Timestamp.valueOf(TIMESTAMP_WITH_NANOS))); - - verify(upsertListener).upsertDone(1L); - verifyNoMoreInteractions(upsertListener); - - verify(preparedStatement).setObject(1, Long.valueOf(123L)); - verify(preparedStatement).setObject(2, "NameValue"); - verify(preparedStatement).setObject(3, Integer.valueOf(42)); - verify(preparedStatement).setObject(4, PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Object[]{1,2,3})); - verify(preparedStatement).setObject(5, Boolean.TRUE); - verify(preparedStatement).setObject(6, Bytes.toBytes(asciiValue)); - verify(preparedStatement).setObject(7, DateUtil.parseTimestamp(TIMESTAMP_WITH_NANOS)); - verify(preparedStatement).execute(); - verifyNoMoreInteractions(preparedStatement); - } - - + protected Connection conn; + protected List columnInfoList; + protected PreparedStatement preparedStatement; + protected UpsertExecutor.UpsertListener upsertListener; + + protected abstract UpsertExecutor getUpsertExecutor(); + + protected abstract R createRecord(Object... columnValues) throws IOException; + + protected abstract UpsertExecutor getUpsertExecutor(Connection conn); + + private static String TIMESTAMP_WITH_NANOS = "2006-11-03 00:00:00.001003000"; + + @Before + public void setUp() throws SQLException { + columnInfoList = ImmutableList.of(new ColumnInfo("ID", Types.BIGINT), + new ColumnInfo("NAME", Types.VARCHAR), new ColumnInfo("AGE", Types.INTEGER), + new ColumnInfo("VALUES", PIntegerArray.INSTANCE.getSqlType()), + new ColumnInfo("BEARD", Types.BOOLEAN), new ColumnInfo("PIC", Types.BINARY), + new ColumnInfo("T", Types.TIMESTAMP)); + + preparedStatement = mock(PreparedStatement.class); + upsertListener = mock(UpsertExecutor.UpsertListener.class); + Properties properties = new Properties(); + properties.setProperty("phoenix.query.dateFormatTimeZone", DateUtil.DEFAULT_TIME_ZONE_ID); + conn = DriverManager.getConnection(getUrl(), properties); + } + + @After + public void tearDown() throws SQLException { + conn.close(); + } + + @Test + public void testExecute() throws Exception { + byte[] binaryData = (byte[]) PBinary.INSTANCE.getSampleValue(); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); + getUpsertExecutor().execute(createRecord(123L, "NameValue", 42, Arrays.asList(1, 2, 3), true, + encodedBinaryData, Timestamp.valueOf(TIMESTAMP_WITH_NANOS))); + + verify(upsertListener).upsertDone(1L); + verifyNoMoreInteractions(upsertListener); + + verify(preparedStatement).setObject(1, Long.valueOf(123L)); + verify(preparedStatement).setObject(2, "NameValue"); + verify(preparedStatement).setObject(3, Integer.valueOf(42)); + verify(preparedStatement).setObject(4, + PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Object[] { 1, 2, 3 })); + verify(preparedStatement).setObject(5, Boolean.TRUE); + verify(preparedStatement).setObject(6, binaryData); + verify(preparedStatement).setObject(7, DateUtil.parseTimestamp(TIMESTAMP_WITH_NANOS)); + verify(preparedStatement).execute(); + verifyNoMoreInteractions(preparedStatement); + } + + @Test + public void testExecute_TooFewFields() throws Exception { + R recordWithTooFewFields = createRecord(123L, "NameValue"); + getUpsertExecutor().execute(recordWithTooFewFields); + + verify(upsertListener).errorOnRecord(eq(recordWithTooFewFields), any(Throwable.class)); + verifyNoMoreInteractions(upsertListener); + } + + @Test + public void testExecute_TooManyFields() throws Exception { + byte[] binaryData = (byte[]) PBinary.INSTANCE.getSampleValue(); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); + R recordWithTooManyFields = createRecord(123L, "NameValue", 42, Arrays.asList(1, 2, 3), true, + encodedBinaryData, Timestamp.valueOf(TIMESTAMP_WITH_NANOS), "garbage"); + getUpsertExecutor().execute(recordWithTooManyFields); + + verify(upsertListener).upsertDone(1L); + verifyNoMoreInteractions(upsertListener); + + verify(preparedStatement).setObject(1, Long.valueOf(123L)); + verify(preparedStatement).setObject(2, "NameValue"); + verify(preparedStatement).setObject(3, Integer.valueOf(42)); + verify(preparedStatement).setObject(4, + PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Object[] { 1, 2, 3 })); + verify(preparedStatement).setObject(5, Boolean.TRUE); + verify(preparedStatement).setObject(6, binaryData); + verify(preparedStatement).setObject(7, DateUtil.parseTimestamp(TIMESTAMP_WITH_NANOS)); + verify(preparedStatement).execute(); + verifyNoMoreInteractions(preparedStatement); + } + + @Test + public void testExecute_NullField() throws Exception { + byte[] binaryData = (byte[]) PBinary.INSTANCE.getSampleValue(); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); + getUpsertExecutor().execute(createRecord(123L, "NameValue", null, Arrays.asList(1, 2, 3), false, + encodedBinaryData, Timestamp.valueOf(TIMESTAMP_WITH_NANOS))); + + verify(upsertListener).upsertDone(1L); + verifyNoMoreInteractions(upsertListener); + + verify(preparedStatement).setObject(1, Long.valueOf(123L)); + verify(preparedStatement).setObject(2, "NameValue"); + verify(preparedStatement).setNull(3, columnInfoList.get(2).getSqlType()); + verify(preparedStatement).setObject(4, + PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Object[] { 1, 2, 3 })); + verify(preparedStatement).setObject(5, Boolean.FALSE); + verify(preparedStatement).setObject(6, binaryData); + verify(preparedStatement).setObject(7, DateUtil.parseTimestamp(TIMESTAMP_WITH_NANOS)); + verify(preparedStatement).execute(); + verifyNoMoreInteractions(preparedStatement); + } + + @Test + public void testExecute_InvalidType() throws Exception { + byte[] binaryData = (byte[]) PBinary.INSTANCE.getSampleValue(); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); + R recordWithInvalidType = createRecord(123L, "NameValue", "ThisIsNotANumber", + Arrays.asList(1, 2, 3), true, encodedBinaryData, Timestamp.valueOf(TIMESTAMP_WITH_NANOS)); + getUpsertExecutor().execute(recordWithInvalidType); + + verify(upsertListener).errorOnRecord(eq(recordWithInvalidType), any(Throwable.class)); + verifyNoMoreInteractions(upsertListener); + } + + @Test + public void testExecute_InvalidBoolean() throws Exception { + byte[] binaryData = (byte[]) PBinary.INSTANCE.getSampleValue(); + String encodedBinaryData = Bytes.toString(Base64.getEncoder().encode(binaryData)); + R csvRecordWithInvalidType = createRecord( + "123,NameValue,42,1:2:3,NotABoolean," + encodedBinaryData + "," + TIMESTAMP_WITH_NANOS); + getUpsertExecutor().execute(csvRecordWithInvalidType); + + verify(upsertListener).errorOnRecord(eq(csvRecordWithInvalidType), any(Throwable.class)); + } + + @Test + public void testExecute_InvalidBinary() throws Exception { + String notBase64Encoded = "#@$df"; + R csvRecordWithInvalidType = + createRecord("123,NameValue,42,1:2:3,true," + notBase64Encoded + "," + TIMESTAMP_WITH_NANOS); + getUpsertExecutor().execute(csvRecordWithInvalidType); + + verify(upsertListener).errorOnRecord(eq(csvRecordWithInvalidType), any(Throwable.class)); + } + + @Test + public void testExecute_AsciiEncoded() throws Exception { + String asciiValue = "#@$df"; + Properties info = new Properties(); + info.setProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, "ASCII"); + getUpsertExecutor(DriverManager.getConnection(getUrl(), info)) + .execute(createRecord(123L, "NameValue", 42, Arrays.asList(1, 2, 3), true, asciiValue, + Timestamp.valueOf(TIMESTAMP_WITH_NANOS))); + + verify(upsertListener).upsertDone(1L); + verifyNoMoreInteractions(upsertListener); + + verify(preparedStatement).setObject(1, Long.valueOf(123L)); + verify(preparedStatement).setObject(2, "NameValue"); + verify(preparedStatement).setObject(3, Integer.valueOf(42)); + verify(preparedStatement).setObject(4, + PArrayDataType.instantiatePhoenixArray(PInteger.INSTANCE, new Object[] { 1, 2, 3 })); + verify(preparedStatement).setObject(5, Boolean.TRUE); + verify(preparedStatement).setObject(6, Bytes.toBytes(asciiValue)); + verify(preparedStatement).setObject(7, DateUtil.parseTimestamp(TIMESTAMP_WITH_NANOS)); + verify(preparedStatement).execute(); + verifyNoMoreInteractions(preparedStatement); + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/AssertResults.java b/phoenix-core/src/test/java/org/apache/phoenix/util/AssertResults.java index eafd903f8e5..24f1a1c6a0b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/AssertResults.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/AssertResults.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,109 +22,104 @@ import java.util.*; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; - import org.apache.phoenix.iterate.ResultIterator; import org.apache.phoenix.schema.tuple.Tuple; - /** - * * Utility class to assert that a scan returns the expected results - * - * * @since 0.1 */ public class AssertResults { - public static final AssertingIterator NONE = new NoopAssertingIterator(); - - private AssertResults() { + public static final AssertingIterator NONE = new NoopAssertingIterator(); + + private AssertResults() { + } + + public static void assertResults(ResultIterator scanner, Tuple[] results) throws Exception { + assertResults(scanner, new ResultAssertingIterator(Arrays.asList(results).iterator())); + } + + public static void assertUnorderedResults(ResultIterator scanner, Tuple[] results) + throws Exception { + assertResults(scanner, new UnorderedResultAssertingIterator(Arrays.asList(results))); + } + + public static void assertResults(ResultIterator scanner, AssertingIterator iterator) + throws Exception { + try { + for (Tuple result = scanner.next(); result != null; result = scanner.next()) { + iterator.assertNext(result); + } + iterator.assertDone(); + } finally { + scanner.close(); + } + } + + public static interface AssertingIterator { + public void assertNext(Tuple result) throws Exception; + + public void assertDone() throws Exception; + } + + /** + * Use to iterate through results without checking the values against + * @since 0.1 + */ + private static final class NoopAssertingIterator implements AssertingIterator { + @Override + public void assertDone() throws Exception { } - - public static void assertResults(ResultIterator scanner, Tuple[] results) throws Exception { - assertResults(scanner,new ResultAssertingIterator(Arrays.asList(results).iterator())); + + @Override + public void assertNext(Tuple result) throws Exception { } - - public static void assertUnorderedResults(ResultIterator scanner, Tuple[] results) throws Exception { - assertResults(scanner,new UnorderedResultAssertingIterator(Arrays.asList(results))); + } + + public static class ResultAssertingIterator implements AssertingIterator { + private final Iterator expectedResults; + + public ResultAssertingIterator(Iterator expectedResults) { + this.expectedResults = expectedResults; } - - public static void assertResults(ResultIterator scanner, AssertingIterator iterator) throws Exception { - try { - for (Tuple result = scanner.next(); result != null; result = scanner.next()) { - iterator.assertNext(result); - } - iterator.assertDone(); - } finally { - scanner.close(); - } + + @Override + public void assertDone() { + assertTrue(!expectedResults.hasNext()); } - - public static interface AssertingIterator { - public void assertNext(Tuple result) throws Exception; - public void assertDone() throws Exception; + + @Override + public void assertNext(Tuple result) throws Exception { + assertTrue(expectedResults.hasNext()); + Tuple expected = expectedResults.next(); + TestUtil.compareTuples(expected, result); } - - /** - * - * Use to iterate through results without checking the values against - * - * - * @since 0.1 - */ - private static final class NoopAssertingIterator implements AssertingIterator { - @Override - public void assertDone() throws Exception { - } - - @Override - public void assertNext(Tuple result) throws Exception { - } + } + + public static class UnorderedResultAssertingIterator implements AssertingIterator { + private final ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); + private final Map expectedResults; + + public UnorderedResultAssertingIterator(Collection expectedResults) { + this.expectedResults = new HashMap(expectedResults.size()); + for (Tuple result : expectedResults) { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + result.getKey(ptr); + this.expectedResults.put(ptr, result); + } } - - public static class ResultAssertingIterator implements AssertingIterator { - private final Iterator expectedResults; - - public ResultAssertingIterator(Iterator expectedResults) { - this.expectedResults = expectedResults; - } - - @Override - public void assertDone() { - assertTrue(!expectedResults.hasNext()); - } - - @Override - public void assertNext(Tuple result) throws Exception { - assertTrue(expectedResults.hasNext()); - Tuple expected = expectedResults.next(); - TestUtil.compareTuples(expected, result); - } + + @Override + public void assertDone() { + assertTrue(expectedResults.isEmpty()); } - public static class UnorderedResultAssertingIterator implements AssertingIterator { - private final ImmutableBytesWritable tempPtr = new ImmutableBytesWritable(); - private final Map expectedResults; - - public UnorderedResultAssertingIterator(Collection expectedResults) { - this.expectedResults = new HashMap(expectedResults.size()); - for (Tuple result : expectedResults) { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - result.getKey(ptr); - this.expectedResults.put(ptr,result); - } - } - - @Override - public void assertDone() { - assertTrue(expectedResults.isEmpty()); - } - - @Override - public void assertNext(Tuple result) throws Exception { - result.getKey(tempPtr); - Tuple expected = expectedResults.remove(tempPtr); - TestUtil.compareTuples(expected, result); - } + @Override + public void assertNext(Tuple result) throws Exception { + result.getKey(tempPtr); + Tuple expected = expectedResults.remove(tempPtr); + TestUtil.compareTuples(expected, result); } - + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/Base62EncoderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/Base62EncoderTest.java index d29b5584f43..029c908505a 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/Base62EncoderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/Base62EncoderTest.java @@ -1,18 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.util; @@ -22,24 +23,25 @@ public class Base62EncoderTest { - @Test - public final void testPowersBase62() { - // add 1 since pow62 doesn't contain it - long[] pow62 = new long[Base62Encoder.pow62.length + 1]; - pow62[0] = 1; - System.arraycopy(Base62Encoder.pow62, 0, pow62, 1, Base62Encoder.pow62.length); - long input = 0l; - // test 0 - assertEquals("Base 62 encoded value for " + input + " is incorrect ", "0", Base62Encoder.toString(input)); - StringBuilder expectedValBuilder = new StringBuilder("1"); - for (int i = 0; i < pow62.length; ++i) { - input = pow62[i]; - assertEquals("Base 62 encoded value for " + input + " is incorrect ", expectedValBuilder.toString(), - Base62Encoder.toString(input)); - assertEquals("Base 62 encoded value for " + input + " is incorrect ", "-" + expectedValBuilder.toString(), - Base62Encoder.toString(-input)); - expectedValBuilder.append("0"); - } + @Test + public final void testPowersBase62() { + // add 1 since pow62 doesn't contain it + long[] pow62 = new long[Base62Encoder.pow62.length + 1]; + pow62[0] = 1; + System.arraycopy(Base62Encoder.pow62, 0, pow62, 1, Base62Encoder.pow62.length); + long input = 0l; + // test 0 + assertEquals("Base 62 encoded value for " + input + " is incorrect ", "0", + Base62Encoder.toString(input)); + StringBuilder expectedValBuilder = new StringBuilder("1"); + for (int i = 0; i < pow62.length; ++i) { + input = pow62[i]; + assertEquals("Base 62 encoded value for " + input + " is incorrect ", + expectedValBuilder.toString(), Base62Encoder.toString(input)); + assertEquals("Base 62 encoded value for " + input + " is incorrect ", + "-" + expectedValBuilder.toString(), Base62Encoder.toString(-input)); + expectedValBuilder.append("0"); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/ByteUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/ByteUtilTest.java index 13e3f5e1068..4f79e173528 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/ByteUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/ByteUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,42 +25,44 @@ public class ByteUtilTest { - @Test - public void testSplitBytes() { - byte[] startRow = Bytes.toBytes("EA"); - byte[] stopRow = Bytes.toBytes("EZ"); - byte[][] splitPoints = Bytes.split(startRow, stopRow, 10); - for (byte[] splitPoint : splitPoints) { - assertTrue(Bytes.toStringBinary(splitPoint), Bytes.compareTo(startRow, splitPoint) <= 0); - assertTrue(Bytes.toStringBinary(splitPoint), Bytes.compareTo(stopRow, splitPoint) >= 0); - } + @Test + public void testSplitBytes() { + byte[] startRow = Bytes.toBytes("EA"); + byte[] stopRow = Bytes.toBytes("EZ"); + byte[][] splitPoints = Bytes.split(startRow, stopRow, 10); + for (byte[] splitPoint : splitPoints) { + assertTrue(Bytes.toStringBinary(splitPoint), Bytes.compareTo(startRow, splitPoint) <= 0); + assertTrue(Bytes.toStringBinary(splitPoint), Bytes.compareTo(stopRow, splitPoint) >= 0); } - - @Test - public void testVIntToBytes() { - for (int i = -10000; i <= 10000; i++) { - byte[] vint = Bytes.vintToBytes(i); - int vintSize = vint.length; - byte[] vint2 = new byte[vint.length]; - assertEquals(vintSize, ByteUtil.vintToBytes(vint2, 0, i)); - assertTrue(Bytes.BYTES_COMPARATOR.compare(vint,vint2) == 0); - } - } - - @Test - public void testNextKey() { - byte[] key = new byte[] {1}; - assertEquals((byte)2, ByteUtil.nextKey(key)[0]); - key = new byte[] {1, (byte)255}; - byte[] nextKey = ByteUtil.nextKey(key); - byte[] expectedKey = new byte[] {2,(byte)0}; - assertArrayEquals(expectedKey, nextKey); - key = ByteUtil.concat(Bytes.toBytes("00D300000000XHP"), PInteger.INSTANCE.toBytes(Integer.MAX_VALUE)); - nextKey = ByteUtil.nextKey(key); - expectedKey = ByteUtil.concat(Bytes.toBytes("00D300000000XHQ"), PInteger.INSTANCE.toBytes(Integer.MIN_VALUE)); - assertArrayEquals(expectedKey, nextKey); - - key = new byte[] {(byte)255}; - assertNull(ByteUtil.nextKey(key)); + } + + @Test + public void testVIntToBytes() { + for (int i = -10000; i <= 10000; i++) { + byte[] vint = Bytes.vintToBytes(i); + int vintSize = vint.length; + byte[] vint2 = new byte[vint.length]; + assertEquals(vintSize, ByteUtil.vintToBytes(vint2, 0, i)); + assertTrue(Bytes.BYTES_COMPARATOR.compare(vint, vint2) == 0); } + } + + @Test + public void testNextKey() { + byte[] key = new byte[] { 1 }; + assertEquals((byte) 2, ByteUtil.nextKey(key)[0]); + key = new byte[] { 1, (byte) 255 }; + byte[] nextKey = ByteUtil.nextKey(key); + byte[] expectedKey = new byte[] { 2, (byte) 0 }; + assertArrayEquals(expectedKey, nextKey); + key = ByteUtil.concat(Bytes.toBytes("00D300000000XHP"), + PInteger.INSTANCE.toBytes(Integer.MAX_VALUE)); + nextKey = ByteUtil.nextKey(key); + expectedKey = ByteUtil.concat(Bytes.toBytes("00D300000000XHQ"), + PInteger.INSTANCE.toBytes(Integer.MIN_VALUE)); + assertArrayEquals(expectedKey, nextKey); + + key = new byte[] { (byte) 255 }; + assertNull(ByteUtil.nextKey(key)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/CDCUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/CDCUtilTest.java index 7feb261dc81..a70bb419739 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/CDCUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/CDCUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,48 +15,42 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; -import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.schema.PTable; -import org.junit.Test; +import static org.apache.phoenix.schema.PTable.CDCChangeScope.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.sql.SQLException; import java.util.Arrays; import java.util.HashSet; -import static org.apache.phoenix.schema.PTable.CDCChangeScope.*; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import org.apache.phoenix.exception.SQLExceptionCode; +import org.apache.phoenix.schema.PTable; +import org.junit.Test; public class CDCUtilTest { - @Test - public void testScopeSetConstruction() throws Exception { - assertEquals(new HashSet<>(Arrays.asList(PRE)), CDCUtil.makeChangeScopeEnumsFromString( - "PRE")); - assertEquals(new HashSet<>(Arrays.asList(PRE)), - CDCUtil.makeChangeScopeEnumsFromString("PRE,")); - assertEquals(new HashSet<>(Arrays.asList(PRE)), - CDCUtil.makeChangeScopeEnumsFromString("PRE, PRE")); - assertEquals(new HashSet<>(Arrays.asList(CHANGE, PRE, POST)), - CDCUtil.makeChangeScopeEnumsFromString("POST,PRE,CHANGE")); - try { - CDCUtil.makeChangeScopeEnumsFromString("DUMMY"); - } catch (SQLException e) { - assertEquals(SQLExceptionCode.UNKNOWN_INCLUDE_CHANGE_SCOPE.getErrorCode(), - e.getErrorCode()); - assertTrue(e.getMessage().endsWith("DUMMY")); - } + @Test + public void testScopeSetConstruction() throws Exception { + assertEquals(new HashSet<>(Arrays.asList(PRE)), CDCUtil.makeChangeScopeEnumsFromString("PRE")); + assertEquals(new HashSet<>(Arrays.asList(PRE)), CDCUtil.makeChangeScopeEnumsFromString("PRE,")); + assertEquals(new HashSet<>(Arrays.asList(PRE)), + CDCUtil.makeChangeScopeEnumsFromString("PRE, PRE")); + assertEquals(new HashSet<>(Arrays.asList(CHANGE, PRE, POST)), + CDCUtil.makeChangeScopeEnumsFromString("POST,PRE,CHANGE")); + try { + CDCUtil.makeChangeScopeEnumsFromString("DUMMY"); + } catch (SQLException e) { + assertEquals(SQLExceptionCode.UNKNOWN_INCLUDE_CHANGE_SCOPE.getErrorCode(), e.getErrorCode()); + assertTrue(e.getMessage().endsWith("DUMMY")); } + } - @Test - public void testScopeStringConstruction() throws Exception { - assertEquals(null, CDCUtil.makeChangeScopeStringFromEnums(null)); - assertEquals("", CDCUtil.makeChangeScopeStringFromEnums( - new HashSet())); - assertEquals("CHANGE,PRE,POST", CDCUtil.makeChangeScopeStringFromEnums( - new HashSet<>(Arrays.asList(CHANGE, PRE, POST)))); - } + @Test + public void testScopeStringConstruction() throws Exception { + assertEquals(null, CDCUtil.makeChangeScopeStringFromEnums(null)); + assertEquals("", CDCUtil.makeChangeScopeStringFromEnums(new HashSet())); + assertEquals("CHANGE,PRE,POST", + CDCUtil.makeChangeScopeStringFromEnums(new HashSet<>(Arrays.asList(CHANGE, PRE, POST)))); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/ClientUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/ClientUtilTest.java index 868daebfb8f..bf05280f440 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/ClientUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/ClientUtilTest.java @@ -1,24 +1,31 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.util; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.concurrent.ExecutorService; + +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.hbase.index.table.HTableFactory; @@ -28,66 +35,60 @@ import org.junit.Test; import org.mockito.Mockito; -import java.io.IOException; -import java.util.concurrent.ExecutorService; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - public class ClientUtilTest { - String existingNamespaceOne = "existingNamespaceOne"; - String existingNamespaceTwo = "existingNamespaceTwo"; - String nonExistingNamespace = "nonExistingNamespace"; + String existingNamespaceOne = "existingNamespaceOne"; + String existingNamespaceTwo = "existingNamespaceTwo"; + String nonExistingNamespace = "nonExistingNamespace"; - String[] namespaces = { existingNamespaceOne, existingNamespaceTwo }; + String[] namespaces = { existingNamespaceOne, existingNamespaceTwo }; - @Test - public void testIsHbaseNamespaceAvailableWithExistingNamespace() throws Exception { - Admin mockAdmin = getMockedAdmin(); - assertTrue(ClientUtil.isHBaseNamespaceAvailable(mockAdmin, existingNamespaceOne)); - } + @Test + public void testIsHbaseNamespaceAvailableWithExistingNamespace() throws Exception { + Admin mockAdmin = getMockedAdmin(); + assertTrue(ClientUtil.isHBaseNamespaceAvailable(mockAdmin, existingNamespaceOne)); + } - @Test - public void testIsHbaseNamespaceAvailableWithNonExistingNamespace() throws Exception{ - Admin mockAdmin = getMockedAdmin(); - assertFalse(ClientUtil.isHBaseNamespaceAvailable(mockAdmin,nonExistingNamespace)); - } + @Test + public void testIsHbaseNamespaceAvailableWithNonExistingNamespace() throws Exception { + Admin mockAdmin = getMockedAdmin(); + assertFalse(ClientUtil.isHBaseNamespaceAvailable(mockAdmin, nonExistingNamespace)); + } - private Admin getMockedAdmin() throws Exception { - Admin mockAdmin = Mockito.mock(Admin.class); - Mockito.when(mockAdmin.listNamespaces()).thenReturn(namespaces); - return mockAdmin; - } + private Admin getMockedAdmin() throws Exception { + Admin mockAdmin = Mockito.mock(Admin.class); + Mockito.when(mockAdmin.listNamespaces()).thenReturn(namespaces); + return mockAdmin; + } - @Test - public void testCoprocessorHConnectionGetTableWithClosedConnection() throws Exception { - // Mock Connection object to throw IllegalArgumentException. - Connection connection = Mockito.mock(Connection.class); - Mockito.doThrow(new IllegalArgumentException()).when(connection).getTable(Mockito.any()); - Mockito.doThrow(new IllegalArgumentException()).when(connection).getTable( - Mockito.any(), Mockito.any()); - Mockito.doReturn(true).when(connection).isClosed(); + @Test + public void testCoprocessorHConnectionGetTableWithClosedConnection() throws Exception { + // Mock Connection object to throw IllegalArgumentException. + Connection connection = Mockito.mock(Connection.class); + Mockito.doThrow(new IllegalArgumentException()).when(connection).getTable(Mockito.any()); + Mockito.doThrow(new IllegalArgumentException()).when(connection).getTable(Mockito.any(), + Mockito. any()); + Mockito.doReturn(true).when(connection).isClosed(); - // Spy CoprocessorHConnectionTableFactory - RegionCoprocessorEnvironment mockEnv = Mockito.mock(RegionCoprocessorEnvironment.class); - HTableFactory hTableFactory = IndexWriterUtils.getDefaultDelegateHTableFactory(mockEnv); - IndexWriterUtils.CoprocessorHConnectionTableFactory spyedObj = (IndexWriterUtils. - CoprocessorHConnectionTableFactory)Mockito.spy(hTableFactory); - Mockito.doReturn(connection).when(spyedObj).getConnection(); + // Spy CoprocessorHConnectionTableFactory + RegionCoprocessorEnvironment mockEnv = Mockito.mock(RegionCoprocessorEnvironment.class); + HTableFactory hTableFactory = IndexWriterUtils.getDefaultDelegateHTableFactory(mockEnv); + IndexWriterUtils.CoprocessorHConnectionTableFactory spyedObj = + (IndexWriterUtils.CoprocessorHConnectionTableFactory) Mockito.spy(hTableFactory); + Mockito.doReturn(connection).when(spyedObj).getConnection(); - try { - spyedObj.getTable(new ImmutableBytesPtr(Bytes.toBytes("test_table"))); - Assert.fail("IOException exception expected as connection was closed"); - } catch(DoNotRetryIOException e) { - Assert.fail("DoNotRetryIOException not expected instead should throw IOException"); - }catch (IOException e1) { - try { - spyedObj.getTable(new ImmutableBytesPtr(Bytes.toBytes("test_table")), null); - Assert.fail("IOException exception expected as connection was closed"); - } catch (IOException e2) { - // IO Exception is expected. Should fail is any other exception. - } - } + try { + spyedObj.getTable(new ImmutableBytesPtr(Bytes.toBytes("test_table"))); + Assert.fail("IOException exception expected as connection was closed"); + } catch (DoNotRetryIOException e) { + Assert.fail("DoNotRetryIOException not expected instead should throw IOException"); + } catch (IOException e1) { + try { + spyedObj.getTable(new ImmutableBytesPtr(Bytes.toBytes("test_table")), null); + Assert.fail("IOException exception expected as connection was closed"); + } catch (IOException e2) { + // IO Exception is expected. Should fail is any other exception. + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/ColumnInfoTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/ColumnInfoTest.java index 3bc26f2dc64..a0016f0d717 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/ColumnInfoTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/ColumnInfoTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,57 +29,61 @@ public class ColumnInfoTest { - @Test - public void testToFromStringRoundTrip() { - ColumnInfo columnInfo = new ColumnInfo("a.myColumn", Types.INTEGER); - assertEquals(columnInfo, ColumnInfo.fromString(columnInfo.toString())); - } + @Test + public void testToFromStringRoundTrip() { + ColumnInfo columnInfo = new ColumnInfo("a.myColumn", Types.INTEGER); + assertEquals(columnInfo, ColumnInfo.fromString(columnInfo.toString())); + } - @Test(expected=IllegalArgumentException.class) - public void testFromString_InvalidString() { - ColumnInfo.fromString("invalid"); - } + @Test(expected = IllegalArgumentException.class) + public void testFromString_InvalidString() { + ColumnInfo.fromString("invalid"); + } - @Test - public void testFromString_InvalidDataType() { - try { - ColumnInfo.fromString("COLNAME:badType"); - } catch (RuntimeException e) { - assertTrue(e.getCause() instanceof SQLException); - SQLException sqlE = (SQLException)e.getCause(); - assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), sqlE.getErrorCode()); - } + @Test + public void testFromString_InvalidDataType() { + try { + ColumnInfo.fromString("COLNAME:badType"); + } catch (RuntimeException e) { + assertTrue(e.getCause() instanceof SQLException); + SQLException sqlE = (SQLException) e.getCause(); + assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), sqlE.getErrorCode()); } - - @Test - public void testToFromColonInColumnName() { - ColumnInfo columnInfo = new ColumnInfo(":myColumn", Types.INTEGER); - assertEquals(columnInfo, ColumnInfo.fromString(columnInfo.toString())); - } - - @Test - public void testOptionalDescriptionType() { - testType(new ColumnInfo("a.myColumn", Types.CHAR), "CHAR:\"a\".\"myColumn\""); - testType(new ColumnInfo("a.myColumn", Types.CHAR, 100), "CHAR(100):\"a\".\"myColumn\""); - testType(new ColumnInfo("a.myColumn", Types.VARCHAR), "VARCHAR:\"a\".\"myColumn\""); - testType(new ColumnInfo("a.myColumn", Types.VARCHAR, 100), "VARCHAR(100):\"a\".\"myColumn\""); - testType(new ColumnInfo("a.myColumn", Types.DECIMAL), "DECIMAL:\"a\".\"myColumn\""); - testType(new ColumnInfo("a.myColumn", Types.DECIMAL, 100, 10), "DECIMAL(100,10):\"a\".\"myColumn\""); - testType(new ColumnInfo("a.myColumn", Types.BINARY, 5), "BINARY(5):\"a\".\"myColumn\""); + } - // Array types - testType(new ColumnInfo("a.myColumn", PCharArray.INSTANCE.getSqlType(), 3), "CHAR(3) ARRAY:\"a\".\"myColumn\""); - testType(new ColumnInfo("a.myColumn", PDecimalArray.INSTANCE.getSqlType(), 10, 2), "DECIMAL(10,2) ARRAY:\"a\".\"myColumn\""); - testType(new ColumnInfo("a.myColumn", PVarcharArray.INSTANCE.getSqlType(), 4), "VARCHAR(4) ARRAY:\"a\".\"myColumn\""); - } + @Test + public void testToFromColonInColumnName() { + ColumnInfo columnInfo = new ColumnInfo(":myColumn", Types.INTEGER); + assertEquals(columnInfo, ColumnInfo.fromString(columnInfo.toString())); + } - private void testType(ColumnInfo columnInfo, String expected) { - assertEquals(expected, columnInfo.toString()); - ColumnInfo reverted = ColumnInfo.fromString(columnInfo.toString()); - assertEquals(reverted.getColumnName(), columnInfo.getColumnName()); - assertEquals(reverted.getDisplayName(), columnInfo.getDisplayName()); - assertEquals(reverted.getSqlType(), columnInfo.getSqlType()); - assertEquals(reverted.getMaxLength(), columnInfo.getMaxLength()); - assertEquals(reverted.getScale(), columnInfo.getScale()); - } + @Test + public void testOptionalDescriptionType() { + testType(new ColumnInfo("a.myColumn", Types.CHAR), "CHAR:\"a\".\"myColumn\""); + testType(new ColumnInfo("a.myColumn", Types.CHAR, 100), "CHAR(100):\"a\".\"myColumn\""); + testType(new ColumnInfo("a.myColumn", Types.VARCHAR), "VARCHAR:\"a\".\"myColumn\""); + testType(new ColumnInfo("a.myColumn", Types.VARCHAR, 100), "VARCHAR(100):\"a\".\"myColumn\""); + testType(new ColumnInfo("a.myColumn", Types.DECIMAL), "DECIMAL:\"a\".\"myColumn\""); + testType(new ColumnInfo("a.myColumn", Types.DECIMAL, 100, 10), + "DECIMAL(100,10):\"a\".\"myColumn\""); + testType(new ColumnInfo("a.myColumn", Types.BINARY, 5), "BINARY(5):\"a\".\"myColumn\""); + + // Array types + testType(new ColumnInfo("a.myColumn", PCharArray.INSTANCE.getSqlType(), 3), + "CHAR(3) ARRAY:\"a\".\"myColumn\""); + testType(new ColumnInfo("a.myColumn", PDecimalArray.INSTANCE.getSqlType(), 10, 2), + "DECIMAL(10,2) ARRAY:\"a\".\"myColumn\""); + testType(new ColumnInfo("a.myColumn", PVarcharArray.INSTANCE.getSqlType(), 4), + "VARCHAR(4) ARRAY:\"a\".\"myColumn\""); + } + + private void testType(ColumnInfo columnInfo, String expected) { + assertEquals(expected, columnInfo.toString()); + ColumnInfo reverted = ColumnInfo.fromString(columnInfo.toString()); + assertEquals(reverted.getColumnName(), columnInfo.getColumnName()); + assertEquals(reverted.getDisplayName(), columnInfo.getDisplayName()); + assertEquals(reverted.getSqlType(), columnInfo.getSqlType()); + assertEquals(reverted.getMaxLength(), columnInfo.getMaxLength()); + assertEquals(reverted.getScale(), columnInfo.getScale()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java index 9a9408157ae..e8ae524c495 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/DateUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -40,331 +40,317 @@ /** * Test class for {@link DateUtil} - * * @since 2.1.3 */ public class DateUtilTest { - private static final long ONE_HOUR_IN_MILLIS = 1000L * 60L * 60L; - - @Test - public void testDemonstrateSetNanosOnTimestampLosingMillis() { - Timestamp ts1 = new Timestamp(120055); - ts1.setNanos(60); - - Timestamp ts2 = new Timestamp(120100); - ts2.setNanos(60); - - /* - * This really should have been assertFalse() because we started with timestamps that - * had different milliseconds 120055 and 120100. THe problem is that the timestamp's - * constructor converts the milliseconds passed into seconds and assigns the left-over - * milliseconds to the nanos part of the timestamp. If setNanos() is called after that - * then the previous value of nanos gets overwritten resulting in loss of milliseconds. - */ - assertTrue(ts1.equals(ts2)); - - /* - * The right way to deal with timestamps when you have both milliseconds and nanos to assign - * is to use the DateUtil.getTimestamp(long millis, int nanos). - */ - ts1 = DateUtil.getTimestamp(120055, 60); - ts2 = DateUtil.getTimestamp(120100, 60); - assertFalse(ts1.equals(ts2)); - assertTrue(ts2.after(ts1)); - } - - @Test - public void testGetDateParser_DefaultTimeZone() throws ParseException { - Date date = new Date(DateUtil.getDateTimeParser("yyyy-MM-dd", PDate.INSTANCE).parseDateTime("1970-01-01")); - assertEquals(0, date.getTime()); - } - - @Test - public void testGetDateParser_CustomTimeZone() throws ParseException { - Date date = new Date(DateUtil.getDateTimeParser( - "yyyy-MM-dd", PDate.INSTANCE, TimeZone.getTimeZone("GMT+1").getID()).parseDateTime("1970-01-01")); - assertEquals(-ONE_HOUR_IN_MILLIS, date.getTime()); - } - - @Test - public void testGetDateParser_LocalTimeZone() throws ParseException { - Date date = new Date(DateUtil.getDateTimeParser( - "yyyy-MM-dd", PDate.INSTANCE, TimeZone.getDefault().getID()).parseDateTime("1970-01-01")); - assertEquals(Date.valueOf("1970-01-01"), date); - } - - @Test - public void testGetTimestampParser_DefaultTimeZone() throws ParseException { - Timestamp ts = new Timestamp(DateUtil.getDateTimeParser("yyyy-MM-dd HH:mm:ss", PTimestamp.INSTANCE) - .parseDateTime("1970-01-01 00:00:00")); - assertEquals(0, ts.getTime()); - } - - @Test - public void testGetTimestampParser_CustomTimeZone() throws ParseException { - Timestamp ts = new Timestamp(DateUtil.getDateTimeParser("yyyy-MM-dd HH:mm:ss", PTimestamp.INSTANCE, TimeZone.getTimeZone("GMT+1").getID()) - .parseDateTime("1970-01-01 00:00:00")); - assertEquals(-ONE_HOUR_IN_MILLIS, ts.getTime()); - } - - @Test - public void testGetTimestampParser_LocalTimeZone() throws ParseException { - Timestamp ts = new Timestamp(DateUtil.getDateTimeParser( - "yyyy-MM-dd HH:mm:ss", - PTimestamp.INSTANCE, TimeZone.getDefault().getID()).parseDateTime("1970-01-01 00:00:00")); - assertEquals(Timestamp.valueOf("1970-01-01 00:00:00"), ts); - } - - @Test - public void testGetTimeParser_DefaultTimeZone() throws ParseException { - Time time = new Time(DateUtil.getDateTimeParser("HH:mm:ss", PTime.INSTANCE).parseDateTime("00:00:00")); - assertEquals(0, time.getTime()); - } - - @Test - public void testGetTimeParser_CustomTimeZone() throws ParseException { - Time time = new Time(DateUtil.getDateTimeParser( - "HH:mm:ss", - PTime.INSTANCE, TimeZone.getTimeZone("GMT+1").getID()).parseDateTime("00:00:00")); - assertEquals(-ONE_HOUR_IN_MILLIS, time.getTime()); - } - - @Test - public void testGetTimeParser_LocalTimeZone() throws ParseException { - Time time = new Time(DateUtil.getDateTimeParser( - "HH:mm:ss", PTime.INSTANCE, TimeZone.getDefault().getID()).parseDateTime("00:00:00")); - assertEquals(Time.valueOf("00:00:00"), time); - } - - @Test - public void testParseDate() { - assertEquals(10000L, DateUtil.parseDate("1970-01-01 00:00:10").getTime()); - } - - @Test - public void testParseDate_PureDate() { - assertEquals(0L, DateUtil.parseDate("1970-01-01").getTime()); - } - - @Test(expected = IllegalDataException.class) - public void testParseDate_InvalidDate() { - DateUtil.parseDate("not-a-date"); - } - - @Test - public void testParseTime() { - assertEquals(10000L, DateUtil.parseTime("1970-01-01 00:00:10").getTime()); - } - - @Test(expected=IllegalDataException.class) - public void testParseTime_InvalidTime() { - DateUtil.parseDate("not-a-time"); - } - - @Test - public void testParseTimestamp() { - assertEquals(10000L, DateUtil.parseTimestamp("1970-01-01 00:00:10").getTime()); - } - - @Test - public void testParseTimestamp_WithMillis() { - assertEquals(10123L, DateUtil.parseTimestamp("1970-01-01 00:00:10.123").getTime()); - } - - @Test - public void testParseTimestamp_WithNanos() { - assertEquals(123000000, DateUtil.parseTimestamp("1970-01-01 00:00:10.123").getNanos()); - - assertEquals(123456780, DateUtil.parseTimestamp("1970-01-01 00:00:10.12345678").getNanos - ()); - assertEquals(999999999, DateUtil.parseTimestamp("1970-01-01 00:00:10.999999999").getNanos - ()); - - } - - @Test(expected=IllegalDataException.class) - public void testParseTimestamp_tooLargeNanos() { - DateUtil.parseTimestamp("1970-01-01 00:00:10.9999999999"); - } - - @Test(expected=IllegalDataException.class) - public void testParseTimestamp_missingNanos() { - DateUtil.parseTimestamp("1970-01-01 00:00:10."); - } - @Test(expected=IllegalDataException.class) - public void testParseTimestamp_negativeNanos() { - DateUtil.parseTimestamp("1970-01-01 00:00:10.-1"); - } - - @Test(expected=IllegalDataException.class) - public void testParseTimestamp_InvalidTimestamp() { - DateUtil.parseTimestamp("not-a-timestamp"); - } - - // This test absolutely relies on JVM TZ being set to America/Los_Angeles, - // and is going to fail otherwise. Maven already sets this. - @Test - public void testTZCorrection() { - - TimeZone tz = TimeZone.getDefault(); - - // First with the current time - LocalDateTime nowLDT = LocalDateTime.now(); - Instant nowInstantLocal = nowLDT.atZone(ZoneId.systemDefault()).toInstant(); - Instant nowInstantGMT = nowLDT.atZone(ZoneOffset.UTC).toInstant(); - - java.sql.Date sqlDateNowLocal = new java.sql.Date(nowInstantLocal.toEpochMilli()); - java.sql.Time sqlTimeNowLocal = new java.sql.Time(nowInstantLocal.toEpochMilli()); - java.sql.Timestamp sqlTimestampNowLocal = - new java.sql.Timestamp(nowInstantLocal.toEpochMilli()); - - java.sql.Date sqlDateNowGMT = new java.sql.Date(nowInstantGMT.toEpochMilli()); - java.sql.Time sqlTimeNowGMT = new java.sql.Time(nowInstantGMT.toEpochMilli()); - java.sql.Timestamp sqlTimestampNowGMT = - new java.sql.Timestamp(nowInstantGMT.toEpochMilli()); - - assertEquals(DateUtil.applyInputDisplacement(sqlDateNowLocal, tz), sqlDateNowGMT); - assertEquals(DateUtil.applyInputDisplacement(sqlTimeNowLocal, tz), sqlTimeNowGMT); - assertEquals(DateUtil.applyInputDisplacement(sqlTimestampNowLocal, tz), sqlTimestampNowGMT); - - assertEquals(DateUtil.applyOutputDisplacement(sqlDateNowGMT, tz), sqlDateNowLocal); - assertEquals(DateUtil.applyOutputDisplacement(sqlTimeNowGMT, tz), sqlTimeNowLocal); - assertEquals(DateUtil.applyOutputDisplacement(sqlTimestampNowGMT, tz), - sqlTimestampNowLocal); - - // Make sure that we don't use a fixed offset - - LocalDateTime summerLDT = LocalDateTime.of(2023, 6, 01, 10, 10, 10); - LocalDateTime winterLDT = LocalDateTime.of(2023, 1, 01, 10, 10, 10); - - Instant summerInstantLocal = summerLDT.atZone(ZoneId.systemDefault()).toInstant(); - Instant summerInstantDisplaced = summerLDT.atZone(ZoneOffset.UTC).toInstant(); - - Instant winterInstantLocal = winterLDT.atZone(ZoneId.systemDefault()).toInstant(); - Instant winterInstantDisplaced = winterLDT.atZone(ZoneOffset.UTC).toInstant(); - - java.sql.Date sqlDateSummerLocal = new java.sql.Date(summerInstantLocal.toEpochMilli()); - java.sql.Time sqlTimeSummerLocal = new java.sql.Time(summerInstantLocal.toEpochMilli()); - java.sql.Timestamp sqlTimestampSummerLocal = - new java.sql.Timestamp(summerInstantLocal.toEpochMilli()); - - java.sql.Date sqlDateSummerDisplaced = - new java.sql.Date(summerInstantDisplaced.toEpochMilli()); - java.sql.Time sqlTimeSummerDisplaced = - new java.sql.Time(summerInstantDisplaced.toEpochMilli()); - java.sql.Timestamp sqlTimestampSummerDisplaced = - new java.sql.Timestamp(summerInstantDisplaced.toEpochMilli()); - - java.sql.Date sqlDateWinterLocal = new java.sql.Date(winterInstantLocal.toEpochMilli()); - java.sql.Time sqlTimeWinterLocal = new java.sql.Time(winterInstantLocal.toEpochMilli()); - java.sql.Timestamp sqlTimestampWinterLocal = - new java.sql.Timestamp(winterInstantLocal.toEpochMilli()); - - java.sql.Date sqlDateWinterDisplaced = - new java.sql.Date(winterInstantDisplaced.toEpochMilli()); - java.sql.Time sqlTimeWinterDisplaced = - new java.sql.Time(winterInstantDisplaced.toEpochMilli()); - java.sql.Timestamp sqlTimestampWinterDisplaced = - new java.sql.Timestamp(winterInstantDisplaced.toEpochMilli()); - - assertEquals(DateUtil.applyInputDisplacement(sqlDateSummerLocal, tz), - sqlDateSummerDisplaced); - assertEquals(DateUtil.applyInputDisplacement(sqlTimeSummerLocal, tz), - sqlTimeSummerDisplaced); - assertEquals(DateUtil.applyInputDisplacement(sqlTimestampSummerLocal, tz), - sqlTimestampSummerDisplaced); - - assertEquals(DateUtil.applyOutputDisplacement(sqlDateSummerDisplaced, tz), - sqlDateSummerLocal); - assertEquals(DateUtil.applyOutputDisplacement(sqlTimeSummerDisplaced, tz), - sqlTimeSummerLocal); - assertEquals(DateUtil.applyOutputDisplacement(sqlTimestampSummerDisplaced, tz), - sqlTimestampSummerLocal); - - assertEquals(DateUtil.applyInputDisplacement(sqlDateWinterLocal, tz), - sqlDateWinterDisplaced); - assertEquals(DateUtil.applyInputDisplacement(sqlTimeWinterLocal, tz), - sqlTimeWinterDisplaced); - assertEquals(DateUtil.applyInputDisplacement(sqlTimestampWinterLocal, tz), - sqlTimestampWinterDisplaced); - - assertEquals(DateUtil.applyOutputDisplacement(sqlDateWinterDisplaced, tz), - sqlDateWinterLocal); - assertEquals(DateUtil.applyOutputDisplacement(sqlTimeWinterDisplaced, tz), - sqlTimeWinterLocal); - assertEquals(DateUtil.applyOutputDisplacement(sqlTimestampWinterDisplaced, tz), - sqlTimestampWinterLocal); - - // This also demonstrates why you SHOULD NOT use the java.sql. temporal types with - // WITHOUT TIMEZONE types. - - // Check the dates around DST switch - ZoneId pacific = ZoneId.of("America/Los_Angeles"); - assertEquals("Test must be run in America/Los_Angeles time zone", ZoneId.systemDefault(), - pacific); - LocalDateTime endOfWinter = LocalDateTime.of(2023, 3, 12, 1, 59, 59); - // There is no 2:00, the next time is 3:00 - LocalDateTime nonExistent = LocalDateTime.of(2023, 3, 12, 2, 0, 0); - LocalDateTime startOfSummer = LocalDateTime.of(2023, 3, 12, 3, 0, 0); - LocalDateTime endOfSummer = LocalDateTime.of(2023, 1, 05, 00, 59, 59); - // Time warps back to 1:00 instead of reaching 2:00 the first time - LocalDateTime ambiguous = LocalDateTime.of(2023, 1, 05, 1, 30, 0); - LocalDateTime startOfWinter = LocalDateTime.of(2023, 1, 05, 2, 0, 0); - - java.sql.Timestamp endOfWinterLocal = - java.sql.Timestamp.from(endOfWinter.atZone(pacific).toInstant()); - java.sql.Timestamp endOfWinterDisplaced = - java.sql.Timestamp.from(endOfWinter.atZone(ZoneOffset.UTC).toInstant()); - assertEquals(DateUtil.applyInputDisplacement(endOfWinterLocal, tz), endOfWinterDisplaced); - assertEquals(DateUtil.applyOutputDisplacement(endOfWinterDisplaced, tz), endOfWinterLocal); - - java.sql.Timestamp startOfSummerLocal = - java.sql.Timestamp.from(startOfSummer.atZone(pacific).toInstant()); - java.sql.Timestamp startOfSummerDisplaced = - java.sql.Timestamp.from(startOfSummer.atZone(ZoneOffset.UTC).toInstant()); - assertEquals(DateUtil.applyInputDisplacement(startOfSummerLocal, tz), - startOfSummerDisplaced); - assertEquals(DateUtil.applyOutputDisplacement(startOfSummerDisplaced, tz), - startOfSummerLocal); - - // This just gives us 3:00 - java.sql.Timestamp nonExistentLocal = - java.sql.Timestamp.from(nonExistent.atZone(pacific).toInstant()); - assertEquals(nonExistentLocal, startOfSummerLocal); - java.sql.Timestamp nonExistentDisplaced = - java.sql.Timestamp.from(nonExistent.atZone(ZoneOffset.UTC).toInstant()); - // we get a valid date - assertEquals(DateUtil.applyInputDisplacement(nonExistentLocal, tz), startOfSummerDisplaced); - // This conversion is ambigiuous, but in this direction we get one Local date for two - // different displaced dates - assertNotEquals(nonExistentDisplaced, startOfSummerDisplaced); - assertEquals(DateUtil.applyOutputDisplacement(nonExistentDisplaced, tz), nonExistentLocal); - assertEquals(DateUtil.applyOutputDisplacement(startOfSummerDisplaced, tz), - nonExistentLocal); - - java.sql.Timestamp endOfSummerLocal = - java.sql.Timestamp.from(endOfSummer.atZone(pacific).toInstant()); - java.sql.Timestamp endOfSummerDisplaced = - java.sql.Timestamp.from(endOfSummer.atZone(ZoneOffset.UTC).toInstant()); - assertEquals(DateUtil.applyInputDisplacement(endOfSummerLocal, tz), endOfSummerDisplaced); - assertEquals(DateUtil.applyOutputDisplacement(endOfSummerDisplaced, tz), endOfSummerLocal); - - // Confirm that we do the same thing as Java - java.sql.Timestamp ambiguousLocal = - java.sql.Timestamp.from(ambiguous.atZone(pacific).toInstant()); - java.sql.Timestamp ambiguousDisplaced = - java.sql.Timestamp.from(ambiguous.atZone(ZoneOffset.UTC).toInstant()); - assertEquals(DateUtil.applyInputDisplacement(ambiguousLocal, tz), ambiguousDisplaced); - assertEquals(DateUtil.applyOutputDisplacement(ambiguousDisplaced, tz), ambiguousLocal); - - java.sql.Timestamp startOfWinterLocal = - java.sql.Timestamp.from(startOfWinter.atZone(pacific).toInstant()); - java.sql.Timestamp startOfWinterDisplaced = - java.sql.Timestamp.from(startOfWinter.atZone(ZoneOffset.UTC).toInstant()); - assertEquals(DateUtil.applyInputDisplacement(startOfWinterLocal, tz), - startOfWinterDisplaced); - assertEquals(DateUtil.applyOutputDisplacement(startOfWinterDisplaced, tz), - startOfWinterLocal); - } + private static final long ONE_HOUR_IN_MILLIS = 1000L * 60L * 60L; + + @Test + public void testDemonstrateSetNanosOnTimestampLosingMillis() { + Timestamp ts1 = new Timestamp(120055); + ts1.setNanos(60); + + Timestamp ts2 = new Timestamp(120100); + ts2.setNanos(60); + + /* + * This really should have been assertFalse() because we started with timestamps that had + * different milliseconds 120055 and 120100. THe problem is that the timestamp's constructor + * converts the milliseconds passed into seconds and assigns the left-over milliseconds to the + * nanos part of the timestamp. If setNanos() is called after that then the previous value of + * nanos gets overwritten resulting in loss of milliseconds. + */ + assertTrue(ts1.equals(ts2)); + + /* + * The right way to deal with timestamps when you have both milliseconds and nanos to assign is + * to use the DateUtil.getTimestamp(long millis, int nanos). + */ + ts1 = DateUtil.getTimestamp(120055, 60); + ts2 = DateUtil.getTimestamp(120100, 60); + assertFalse(ts1.equals(ts2)); + assertTrue(ts2.after(ts1)); + } + + @Test + public void testGetDateParser_DefaultTimeZone() throws ParseException { + Date date = new Date( + DateUtil.getDateTimeParser("yyyy-MM-dd", PDate.INSTANCE).parseDateTime("1970-01-01")); + assertEquals(0, date.getTime()); + } + + @Test + public void testGetDateParser_CustomTimeZone() throws ParseException { + Date date = new Date(DateUtil + .getDateTimeParser("yyyy-MM-dd", PDate.INSTANCE, TimeZone.getTimeZone("GMT+1").getID()) + .parseDateTime("1970-01-01")); + assertEquals(-ONE_HOUR_IN_MILLIS, date.getTime()); + } + + @Test + public void testGetDateParser_LocalTimeZone() throws ParseException { + Date date = new Date( + DateUtil.getDateTimeParser("yyyy-MM-dd", PDate.INSTANCE, TimeZone.getDefault().getID()) + .parseDateTime("1970-01-01")); + assertEquals(Date.valueOf("1970-01-01"), date); + } + + @Test + public void testGetTimestampParser_DefaultTimeZone() throws ParseException { + Timestamp ts = + new Timestamp(DateUtil.getDateTimeParser("yyyy-MM-dd HH:mm:ss", PTimestamp.INSTANCE) + .parseDateTime("1970-01-01 00:00:00")); + assertEquals(0, ts.getTime()); + } + + @Test + public void testGetTimestampParser_CustomTimeZone() throws ParseException { + Timestamp ts = + new Timestamp(DateUtil.getDateTimeParser("yyyy-MM-dd HH:mm:ss", PTimestamp.INSTANCE, + TimeZone.getTimeZone("GMT+1").getID()).parseDateTime("1970-01-01 00:00:00")); + assertEquals(-ONE_HOUR_IN_MILLIS, ts.getTime()); + } + + @Test + public void testGetTimestampParser_LocalTimeZone() throws ParseException { + Timestamp ts = new Timestamp(DateUtil + .getDateTimeParser("yyyy-MM-dd HH:mm:ss", PTimestamp.INSTANCE, TimeZone.getDefault().getID()) + .parseDateTime("1970-01-01 00:00:00")); + assertEquals(Timestamp.valueOf("1970-01-01 00:00:00"), ts); + } + + @Test + public void testGetTimeParser_DefaultTimeZone() throws ParseException { + Time time = + new Time(DateUtil.getDateTimeParser("HH:mm:ss", PTime.INSTANCE).parseDateTime("00:00:00")); + assertEquals(0, time.getTime()); + } + + @Test + public void testGetTimeParser_CustomTimeZone() throws ParseException { + Time time = new Time( + DateUtil.getDateTimeParser("HH:mm:ss", PTime.INSTANCE, TimeZone.getTimeZone("GMT+1").getID()) + .parseDateTime("00:00:00")); + assertEquals(-ONE_HOUR_IN_MILLIS, time.getTime()); + } + + @Test + public void testGetTimeParser_LocalTimeZone() throws ParseException { + Time time = + new Time(DateUtil.getDateTimeParser("HH:mm:ss", PTime.INSTANCE, TimeZone.getDefault().getID()) + .parseDateTime("00:00:00")); + assertEquals(Time.valueOf("00:00:00"), time); + } + + @Test + public void testParseDate() { + assertEquals(10000L, DateUtil.parseDate("1970-01-01 00:00:10").getTime()); + } + + @Test + public void testParseDate_PureDate() { + assertEquals(0L, DateUtil.parseDate("1970-01-01").getTime()); + } + + @Test(expected = IllegalDataException.class) + public void testParseDate_InvalidDate() { + DateUtil.parseDate("not-a-date"); + } + + @Test + public void testParseTime() { + assertEquals(10000L, DateUtil.parseTime("1970-01-01 00:00:10").getTime()); + } + + @Test(expected = IllegalDataException.class) + public void testParseTime_InvalidTime() { + DateUtil.parseDate("not-a-time"); + } + + @Test + public void testParseTimestamp() { + assertEquals(10000L, DateUtil.parseTimestamp("1970-01-01 00:00:10").getTime()); + } + + @Test + public void testParseTimestamp_WithMillis() { + assertEquals(10123L, DateUtil.parseTimestamp("1970-01-01 00:00:10.123").getTime()); + } + + @Test + public void testParseTimestamp_WithNanos() { + assertEquals(123000000, DateUtil.parseTimestamp("1970-01-01 00:00:10.123").getNanos()); + + assertEquals(123456780, DateUtil.parseTimestamp("1970-01-01 00:00:10.12345678").getNanos()); + assertEquals(999999999, DateUtil.parseTimestamp("1970-01-01 00:00:10.999999999").getNanos()); + + } + + @Test(expected = IllegalDataException.class) + public void testParseTimestamp_tooLargeNanos() { + DateUtil.parseTimestamp("1970-01-01 00:00:10.9999999999"); + } + + @Test(expected = IllegalDataException.class) + public void testParseTimestamp_missingNanos() { + DateUtil.parseTimestamp("1970-01-01 00:00:10."); + } + + @Test(expected = IllegalDataException.class) + public void testParseTimestamp_negativeNanos() { + DateUtil.parseTimestamp("1970-01-01 00:00:10.-1"); + } + + @Test(expected = IllegalDataException.class) + public void testParseTimestamp_InvalidTimestamp() { + DateUtil.parseTimestamp("not-a-timestamp"); + } + + // This test absolutely relies on JVM TZ being set to America/Los_Angeles, + // and is going to fail otherwise. Maven already sets this. + @Test + public void testTZCorrection() { + + TimeZone tz = TimeZone.getDefault(); + + // First with the current time + LocalDateTime nowLDT = LocalDateTime.now(); + Instant nowInstantLocal = nowLDT.atZone(ZoneId.systemDefault()).toInstant(); + Instant nowInstantGMT = nowLDT.atZone(ZoneOffset.UTC).toInstant(); + + java.sql.Date sqlDateNowLocal = new java.sql.Date(nowInstantLocal.toEpochMilli()); + java.sql.Time sqlTimeNowLocal = new java.sql.Time(nowInstantLocal.toEpochMilli()); + java.sql.Timestamp sqlTimestampNowLocal = + new java.sql.Timestamp(nowInstantLocal.toEpochMilli()); + + java.sql.Date sqlDateNowGMT = new java.sql.Date(nowInstantGMT.toEpochMilli()); + java.sql.Time sqlTimeNowGMT = new java.sql.Time(nowInstantGMT.toEpochMilli()); + java.sql.Timestamp sqlTimestampNowGMT = new java.sql.Timestamp(nowInstantGMT.toEpochMilli()); + + assertEquals(DateUtil.applyInputDisplacement(sqlDateNowLocal, tz), sqlDateNowGMT); + assertEquals(DateUtil.applyInputDisplacement(sqlTimeNowLocal, tz), sqlTimeNowGMT); + assertEquals(DateUtil.applyInputDisplacement(sqlTimestampNowLocal, tz), sqlTimestampNowGMT); + + assertEquals(DateUtil.applyOutputDisplacement(sqlDateNowGMT, tz), sqlDateNowLocal); + assertEquals(DateUtil.applyOutputDisplacement(sqlTimeNowGMT, tz), sqlTimeNowLocal); + assertEquals(DateUtil.applyOutputDisplacement(sqlTimestampNowGMT, tz), sqlTimestampNowLocal); + + // Make sure that we don't use a fixed offset + + LocalDateTime summerLDT = LocalDateTime.of(2023, 6, 01, 10, 10, 10); + LocalDateTime winterLDT = LocalDateTime.of(2023, 1, 01, 10, 10, 10); + + Instant summerInstantLocal = summerLDT.atZone(ZoneId.systemDefault()).toInstant(); + Instant summerInstantDisplaced = summerLDT.atZone(ZoneOffset.UTC).toInstant(); + + Instant winterInstantLocal = winterLDT.atZone(ZoneId.systemDefault()).toInstant(); + Instant winterInstantDisplaced = winterLDT.atZone(ZoneOffset.UTC).toInstant(); + + java.sql.Date sqlDateSummerLocal = new java.sql.Date(summerInstantLocal.toEpochMilli()); + java.sql.Time sqlTimeSummerLocal = new java.sql.Time(summerInstantLocal.toEpochMilli()); + java.sql.Timestamp sqlTimestampSummerLocal = + new java.sql.Timestamp(summerInstantLocal.toEpochMilli()); + + java.sql.Date sqlDateSummerDisplaced = new java.sql.Date(summerInstantDisplaced.toEpochMilli()); + java.sql.Time sqlTimeSummerDisplaced = new java.sql.Time(summerInstantDisplaced.toEpochMilli()); + java.sql.Timestamp sqlTimestampSummerDisplaced = + new java.sql.Timestamp(summerInstantDisplaced.toEpochMilli()); + + java.sql.Date sqlDateWinterLocal = new java.sql.Date(winterInstantLocal.toEpochMilli()); + java.sql.Time sqlTimeWinterLocal = new java.sql.Time(winterInstantLocal.toEpochMilli()); + java.sql.Timestamp sqlTimestampWinterLocal = + new java.sql.Timestamp(winterInstantLocal.toEpochMilli()); + + java.sql.Date sqlDateWinterDisplaced = new java.sql.Date(winterInstantDisplaced.toEpochMilli()); + java.sql.Time sqlTimeWinterDisplaced = new java.sql.Time(winterInstantDisplaced.toEpochMilli()); + java.sql.Timestamp sqlTimestampWinterDisplaced = + new java.sql.Timestamp(winterInstantDisplaced.toEpochMilli()); + + assertEquals(DateUtil.applyInputDisplacement(sqlDateSummerLocal, tz), sqlDateSummerDisplaced); + assertEquals(DateUtil.applyInputDisplacement(sqlTimeSummerLocal, tz), sqlTimeSummerDisplaced); + assertEquals(DateUtil.applyInputDisplacement(sqlTimestampSummerLocal, tz), + sqlTimestampSummerDisplaced); + + assertEquals(DateUtil.applyOutputDisplacement(sqlDateSummerDisplaced, tz), sqlDateSummerLocal); + assertEquals(DateUtil.applyOutputDisplacement(sqlTimeSummerDisplaced, tz), sqlTimeSummerLocal); + assertEquals(DateUtil.applyOutputDisplacement(sqlTimestampSummerDisplaced, tz), + sqlTimestampSummerLocal); + + assertEquals(DateUtil.applyInputDisplacement(sqlDateWinterLocal, tz), sqlDateWinterDisplaced); + assertEquals(DateUtil.applyInputDisplacement(sqlTimeWinterLocal, tz), sqlTimeWinterDisplaced); + assertEquals(DateUtil.applyInputDisplacement(sqlTimestampWinterLocal, tz), + sqlTimestampWinterDisplaced); + + assertEquals(DateUtil.applyOutputDisplacement(sqlDateWinterDisplaced, tz), sqlDateWinterLocal); + assertEquals(DateUtil.applyOutputDisplacement(sqlTimeWinterDisplaced, tz), sqlTimeWinterLocal); + assertEquals(DateUtil.applyOutputDisplacement(sqlTimestampWinterDisplaced, tz), + sqlTimestampWinterLocal); + + // This also demonstrates why you SHOULD NOT use the java.sql. temporal types with + // WITHOUT TIMEZONE types. + + // Check the dates around DST switch + ZoneId pacific = ZoneId.of("America/Los_Angeles"); + assertEquals("Test must be run in America/Los_Angeles time zone", ZoneId.systemDefault(), + pacific); + LocalDateTime endOfWinter = LocalDateTime.of(2023, 3, 12, 1, 59, 59); + // There is no 2:00, the next time is 3:00 + LocalDateTime nonExistent = LocalDateTime.of(2023, 3, 12, 2, 0, 0); + LocalDateTime startOfSummer = LocalDateTime.of(2023, 3, 12, 3, 0, 0); + LocalDateTime endOfSummer = LocalDateTime.of(2023, 1, 05, 00, 59, 59); + // Time warps back to 1:00 instead of reaching 2:00 the first time + LocalDateTime ambiguous = LocalDateTime.of(2023, 1, 05, 1, 30, 0); + LocalDateTime startOfWinter = LocalDateTime.of(2023, 1, 05, 2, 0, 0); + + java.sql.Timestamp endOfWinterLocal = + java.sql.Timestamp.from(endOfWinter.atZone(pacific).toInstant()); + java.sql.Timestamp endOfWinterDisplaced = + java.sql.Timestamp.from(endOfWinter.atZone(ZoneOffset.UTC).toInstant()); + assertEquals(DateUtil.applyInputDisplacement(endOfWinterLocal, tz), endOfWinterDisplaced); + assertEquals(DateUtil.applyOutputDisplacement(endOfWinterDisplaced, tz), endOfWinterLocal); + + java.sql.Timestamp startOfSummerLocal = + java.sql.Timestamp.from(startOfSummer.atZone(pacific).toInstant()); + java.sql.Timestamp startOfSummerDisplaced = + java.sql.Timestamp.from(startOfSummer.atZone(ZoneOffset.UTC).toInstant()); + assertEquals(DateUtil.applyInputDisplacement(startOfSummerLocal, tz), startOfSummerDisplaced); + assertEquals(DateUtil.applyOutputDisplacement(startOfSummerDisplaced, tz), startOfSummerLocal); + + // This just gives us 3:00 + java.sql.Timestamp nonExistentLocal = + java.sql.Timestamp.from(nonExistent.atZone(pacific).toInstant()); + assertEquals(nonExistentLocal, startOfSummerLocal); + java.sql.Timestamp nonExistentDisplaced = + java.sql.Timestamp.from(nonExistent.atZone(ZoneOffset.UTC).toInstant()); + // we get a valid date + assertEquals(DateUtil.applyInputDisplacement(nonExistentLocal, tz), startOfSummerDisplaced); + // This conversion is ambigiuous, but in this direction we get one Local date for two + // different displaced dates + assertNotEquals(nonExistentDisplaced, startOfSummerDisplaced); + assertEquals(DateUtil.applyOutputDisplacement(nonExistentDisplaced, tz), nonExistentLocal); + assertEquals(DateUtil.applyOutputDisplacement(startOfSummerDisplaced, tz), nonExistentLocal); + + java.sql.Timestamp endOfSummerLocal = + java.sql.Timestamp.from(endOfSummer.atZone(pacific).toInstant()); + java.sql.Timestamp endOfSummerDisplaced = + java.sql.Timestamp.from(endOfSummer.atZone(ZoneOffset.UTC).toInstant()); + assertEquals(DateUtil.applyInputDisplacement(endOfSummerLocal, tz), endOfSummerDisplaced); + assertEquals(DateUtil.applyOutputDisplacement(endOfSummerDisplaced, tz), endOfSummerLocal); + + // Confirm that we do the same thing as Java + java.sql.Timestamp ambiguousLocal = + java.sql.Timestamp.from(ambiguous.atZone(pacific).toInstant()); + java.sql.Timestamp ambiguousDisplaced = + java.sql.Timestamp.from(ambiguous.atZone(ZoneOffset.UTC).toInstant()); + assertEquals(DateUtil.applyInputDisplacement(ambiguousLocal, tz), ambiguousDisplaced); + assertEquals(DateUtil.applyOutputDisplacement(ambiguousDisplaced, tz), ambiguousLocal); + + java.sql.Timestamp startOfWinterLocal = + java.sql.Timestamp.from(startOfWinter.atZone(pacific).toInstant()); + java.sql.Timestamp startOfWinterDisplaced = + java.sql.Timestamp.from(startOfWinter.atZone(ZoneOffset.UTC).toInstant()); + assertEquals(DateUtil.applyInputDisplacement(startOfWinterLocal, tz), startOfWinterDisplaced); + assertEquals(DateUtil.applyOutputDisplacement(startOfWinterDisplaced, tz), startOfWinterLocal); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/EquiDepthStreamHistogramTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/EquiDepthStreamHistogramTest.java index 9cf1f43c128..3ff78cdf948 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/EquiDepthStreamHistogramTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/EquiDepthStreamHistogramTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,269 +35,270 @@ import org.junit.Test; public class EquiDepthStreamHistogramTest { - byte[] bytesA = Bytes.toBytes("a"); - byte[] bytesB = Bytes.toBytes("b"); - byte[] bytesC = Bytes.toBytes("c"); - byte[] bytesD = Bytes.toBytes("d"); - byte[] bytesE = Bytes.toBytes("e"); - Bar a_b; - Bar b_c; - Bar c_d; - Bar d_e; + byte[] bytesA = Bytes.toBytes("a"); + byte[] bytesB = Bytes.toBytes("b"); + byte[] bytesC = Bytes.toBytes("c"); + byte[] bytesD = Bytes.toBytes("d"); + byte[] bytesE = Bytes.toBytes("e"); + Bar a_b; + Bar b_c; + Bar c_d; + Bar d_e; - @Before - public void resetBars() { - a_b = new Bar(bytesA, bytesB); - b_c = new Bar(bytesB, bytesC); - c_d = new Bar(bytesC, bytesD); - d_e = new Bar(bytesD, bytesE); - } + @Before + public void resetBars() { + a_b = new Bar(bytesA, bytesB); + b_c = new Bar(bytesB, bytesC); + c_d = new Bar(bytesC, bytesD); + d_e = new Bar(bytesD, bytesE); + } - @Test - public void testComparator() { - // test ordering - List barList = new ArrayList<>(); - barList.add(b_c); - barList.add(c_d); - barList.add(a_b); + @Test + public void testComparator() { + // test ordering + List barList = new ArrayList<>(); + barList.add(b_c); + barList.add(c_d); + barList.add(a_b); - Collections.sort(barList); - assertEquals(a_b, barList.get(0)); - assertEquals(b_c, barList.get(1)); - assertEquals(c_d, barList.get(2)); + Collections.sort(barList); + assertEquals(a_b, barList.get(0)); + assertEquals(b_c, barList.get(1)); + assertEquals(c_d, barList.get(2)); - // test when a bar fully contains another - Bar a_a = new Bar(bytesA, bytesA); - assertEquals(0, a_b.compareTo(a_a)); - assertEquals(0, a_a.compareTo(a_b)); - assertEquals(1, b_c.compareTo(a_a)); - assertEquals(-1, a_a.compareTo(b_c)); - assertEquals(0, Collections.binarySearch(barList, a_a)); - assertEquals(1, Collections.binarySearch(barList, new Bar(bytesB, bytesB))); - assertEquals(-4, Collections.binarySearch(barList, new Bar(Bytes.toBytes("e"), Bytes.toBytes("e")))); - assertEquals(0, a_a.compareTo(a_a)); - } + // test when a bar fully contains another + Bar a_a = new Bar(bytesA, bytesA); + assertEquals(0, a_b.compareTo(a_a)); + assertEquals(0, a_a.compareTo(a_b)); + assertEquals(1, b_c.compareTo(a_a)); + assertEquals(-1, a_a.compareTo(b_c)); + assertEquals(0, Collections.binarySearch(barList, a_a)); + assertEquals(1, Collections.binarySearch(barList, new Bar(bytesB, bytesB))); + assertEquals(-4, + Collections.binarySearch(barList, new Bar(Bytes.toBytes("e"), Bytes.toBytes("e")))); + assertEquals(0, a_a.compareTo(a_a)); + } - @Test - public void testGetBar() { - EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(10); - Bar bar = histo.getBar(bytesB); - assertTrue(Arrays.equals(bytesB, bar.getLeftBoundInclusive())); - assertEquals(1, histo.bars.size()); - assertTrue(bar == histo.getBar(bytesB)); - assertTrue(bar == histo.getBar(bytesA)); - assertTrue(bar == histo.getBar(bytesC)); - assertEquals(1, histo.bars.size()); - assertArrayEquals(bytesA, bar.getLeftBoundInclusive()); - assertArrayEquals(bytesC, bar.getRightBoundExclusive()); + @Test + public void testGetBar() { + EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(10); + Bar bar = histo.getBar(bytesB); + assertTrue(Arrays.equals(bytesB, bar.getLeftBoundInclusive())); + assertEquals(1, histo.bars.size()); + assertTrue(bar == histo.getBar(bytesB)); + assertTrue(bar == histo.getBar(bytesA)); + assertTrue(bar == histo.getBar(bytesC)); + assertEquals(1, histo.bars.size()); + assertArrayEquals(bytesA, bar.getLeftBoundInclusive()); + assertArrayEquals(bytesC, bar.getRightBoundExclusive()); - histo.bars = new ArrayList(); - histo.bars.add(b_c); - histo.bars.add(c_d); - assertEquals(b_c, histo.getBar(bytesB)); - assertEquals(c_d, histo.getBar(bytesC)); + histo.bars = new ArrayList(); + histo.bars.add(b_c); + histo.bars.add(c_d); + assertEquals(b_c, histo.getBar(bytesB)); + assertEquals(c_d, histo.getBar(bytesC)); - assertTrue(histo.getBar(bytesA) == b_c); - assertTrue(histo.getBar(bytesE) == c_d); - assertArrayEquals(bytesA, b_c.getLeftBoundInclusive()); - assertArrayEquals(bytesE, c_d.getRightBoundExclusive()); - } + assertTrue(histo.getBar(bytesA) == b_c); + assertTrue(histo.getBar(bytesE) == c_d); + assertArrayEquals(bytesA, b_c.getLeftBoundInclusive()); + assertArrayEquals(bytesE, c_d.getRightBoundExclusive()); + } - @Test - public void testMergeBars() { - EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(2, 1); - // test merge of two bars - histo.bars.add(a_b); - histo.bars.add(b_c); - histo.bars.add(c_d); - histo.bars.add(d_e); - histo.totalCount = 20; // maxBarCount of 1.7 * (10/2) = 17 - a_b.incrementCount(3); - b_c.incrementCount(2); - c_d.incrementCount(10); - d_e.incrementCount(5); - histo.mergeBars(); - assertEquals(3, histo.bars.size()); - Bar mergedBar = histo.bars.get(0); - assertEquals(5, mergedBar.getSize()); - assertArrayEquals(bytesA, mergedBar.getLeftBoundInclusive()); - assertArrayEquals(bytesC, mergedBar.getRightBoundExclusive()); + @Test + public void testMergeBars() { + EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(2, 1); + // test merge of two bars + histo.bars.add(a_b); + histo.bars.add(b_c); + histo.bars.add(c_d); + histo.bars.add(d_e); + histo.totalCount = 20; // maxBarCount of 1.7 * (10/2) = 17 + a_b.incrementCount(3); + b_c.incrementCount(2); + c_d.incrementCount(10); + d_e.incrementCount(5); + histo.mergeBars(); + assertEquals(3, histo.bars.size()); + Bar mergedBar = histo.bars.get(0); + assertEquals(5, mergedBar.getSize()); + assertArrayEquals(bytesA, mergedBar.getLeftBoundInclusive()); + assertArrayEquals(bytesC, mergedBar.getRightBoundExclusive()); - // merge again a_c=5 c_d=10 d_e=5 - histo.mergeBars(); - assertEquals(2, histo.bars.size()); - mergedBar = histo.bars.get(0); - assertEquals(15, mergedBar.getSize()); - assertArrayEquals(bytesA, mergedBar.getLeftBoundInclusive()); - assertArrayEquals(bytesD, mergedBar.getRightBoundExclusive()); + // merge again a_c=5 c_d=10 d_e=5 + histo.mergeBars(); + assertEquals(2, histo.bars.size()); + mergedBar = histo.bars.get(0); + assertEquals(15, mergedBar.getSize()); + assertArrayEquals(bytesA, mergedBar.getLeftBoundInclusive()); + assertArrayEquals(bytesD, mergedBar.getRightBoundExclusive()); - // a_d=15 d_e=5 , 20 > 17 so merge shouldn't happen - histo.mergeBars(); - assertEquals(2, histo.bars.size()); - } + // a_d=15 d_e=5 , 20 > 17 so merge shouldn't happen + histo.mergeBars(); + assertEquals(2, histo.bars.size()); + } - @Test - public void testSplitBar() { - EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(10); - Bar targetBar = new Bar(bytesA, bytesC); - targetBar.incrementCount(31); - histo.bars.add(targetBar); - histo.splitBar(targetBar); - assertEquals(2, histo.bars.size()); - Bar newLeft = histo.bars.get(0); - assertArrayEquals(bytesA, newLeft.getLeftBoundInclusive()); - assertArrayEquals(bytesB, newLeft.getRightBoundExclusive()); - assertEquals(15, newLeft.getSize()); - Bar newRight = histo.bars.get(1); - assertArrayEquals(bytesB, newRight.getLeftBoundInclusive()); - assertArrayEquals(bytesC, newRight.getRightBoundExclusive()); - assertEquals(16, newRight.getSize()); + @Test + public void testSplitBar() { + EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(10); + Bar targetBar = new Bar(bytesA, bytesC); + targetBar.incrementCount(31); + histo.bars.add(targetBar); + histo.splitBar(targetBar); + assertEquals(2, histo.bars.size()); + Bar newLeft = histo.bars.get(0); + assertArrayEquals(bytesA, newLeft.getLeftBoundInclusive()); + assertArrayEquals(bytesB, newLeft.getRightBoundExclusive()); + assertEquals(15, newLeft.getSize()); + Bar newRight = histo.bars.get(1); + assertArrayEquals(bytesB, newRight.getLeftBoundInclusive()); + assertArrayEquals(bytesC, newRight.getRightBoundExclusive()); + assertEquals(16, newRight.getSize()); - // test blocked bars are distributed correctly - histo.bars.clear(); - targetBar = new Bar(bytesA, bytesE); - targetBar.incrementCount(10); - a_b.incrementCount(3); - targetBar.addBlockedBar(a_b); - b_c.incrementCount(4); - targetBar.addBlockedBar(b_c); - c_d.incrementCount(2); - targetBar.addBlockedBar(c_d); - d_e.incrementCount(1); - targetBar.addBlockedBar(d_e); - histo.bars.add(targetBar); - histo.splitBar(targetBar); - newLeft = histo.bars.get(0); - newRight = histo.bars.get(1); - assertEquals(10, newLeft.getSize()); - assertEquals(a_b, newLeft.getBlockedBars().get(0)); - assertEquals(d_e, newLeft.getBlockedBars().get(1)); - assertEquals(10, newRight.getSize()); - assertEquals(b_c, newRight.getBlockedBars().get(0)); - assertEquals(c_d, newRight.getBlockedBars().get(1)); - } + // test blocked bars are distributed correctly + histo.bars.clear(); + targetBar = new Bar(bytesA, bytesE); + targetBar.incrementCount(10); + a_b.incrementCount(3); + targetBar.addBlockedBar(a_b); + b_c.incrementCount(4); + targetBar.addBlockedBar(b_c); + c_d.incrementCount(2); + targetBar.addBlockedBar(c_d); + d_e.incrementCount(1); + targetBar.addBlockedBar(d_e); + histo.bars.add(targetBar); + histo.splitBar(targetBar); + newLeft = histo.bars.get(0); + newRight = histo.bars.get(1); + assertEquals(10, newLeft.getSize()); + assertEquals(a_b, newLeft.getBlockedBars().get(0)); + assertEquals(d_e, newLeft.getBlockedBars().get(1)); + assertEquals(10, newRight.getSize()); + assertEquals(b_c, newRight.getBlockedBars().get(0)); + assertEquals(c_d, newRight.getBlockedBars().get(1)); + } - @Test - public void testAddValues() { - EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(3); - for (int i = 0; i < 100; i++) { - histo.addValue(Bytes.toBytes(i + "")); - } - // (expansion factor 7) * (3 buckets) - assertEquals(21, histo.bars.size()); - long total = 0; - for (Bar b : histo.bars) { - total += b.getSize(); - } - assertEquals(100, total); + @Test + public void testAddValues() { + EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(3); + for (int i = 0; i < 100; i++) { + histo.addValue(Bytes.toBytes(i + "")); } + // (expansion factor 7) * (3 buckets) + assertEquals(21, histo.bars.size()); + long total = 0; + for (Bar b : histo.bars) { + total += b.getSize(); + } + assertEquals(100, total); + } - @Test - public void testComputeBuckets() { - EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(3); - histo.addValue(bytesA); - histo.addValue(bytesB); - histo.addValue(bytesC); - histo.addValue(bytesD); - histo.addValue(bytesE); - List buckets = histo.computeBuckets(); - assertEquals(3, buckets.size()); - Bucket bucket = buckets.get(0); - assertEquals(2, bucket.getCountEstimate()); - assertInBucket(bucket, bytesA); - assertInBucket(bucket, bytesB); - bucket = buckets.get(1); - assertEquals(2, bucket.getCountEstimate()); - assertInBucket(bucket, bytesC); - assertInBucket(bucket, bytesD); - bucket = buckets.get(2); - assertEquals(1, bucket.getCountEstimate()); - assertInBucketInclusive(bucket, bytesE); + @Test + public void testComputeBuckets() { + EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(3); + histo.addValue(bytesA); + histo.addValue(bytesB); + histo.addValue(bytesC); + histo.addValue(bytesD); + histo.addValue(bytesE); + List buckets = histo.computeBuckets(); + assertEquals(3, buckets.size()); + Bucket bucket = buckets.get(0); + assertEquals(2, bucket.getCountEstimate()); + assertInBucket(bucket, bytesA); + assertInBucket(bucket, bytesB); + bucket = buckets.get(1); + assertEquals(2, bucket.getCountEstimate()); + assertInBucket(bucket, bytesC); + assertInBucket(bucket, bytesD); + bucket = buckets.get(2); + assertEquals(1, bucket.getCountEstimate()); + assertInBucketInclusive(bucket, bytesE); - // test closestSplitIdx - total count is currently 5, idealBuckSize=2 - histo.bars.clear(); - a_b.incrementCount(); - histo.bars.add(a_b); - Bar b_d = new Bar(bytesB, bytesD); - b_d.incrementCount(3); // use 1/3 of this bar's count for first bucket - histo.bars.add(b_d); - histo.bars.add(d_e); - buckets = histo.computeBuckets(); - bucket = buckets.get(0); - // bound should be 1/3 of [bytesB, bytesD), - // since we used 1/3 of b_d's count for first bucket - byte[][] splits = Bytes.split(bytesB, bytesD, 8); - assertArrayEquals(splits[3], bucket.getRightBoundExclusive()); - bucket = buckets.get(1); - assertArrayEquals(splits[3], bucket.leftBoundInclusive); - } + // test closestSplitIdx - total count is currently 5, idealBuckSize=2 + histo.bars.clear(); + a_b.incrementCount(); + histo.bars.add(a_b); + Bar b_d = new Bar(bytesB, bytesD); + b_d.incrementCount(3); // use 1/3 of this bar's count for first bucket + histo.bars.add(b_d); + histo.bars.add(d_e); + buckets = histo.computeBuckets(); + bucket = buckets.get(0); + // bound should be 1/3 of [bytesB, bytesD), + // since we used 1/3 of b_d's count for first bucket + byte[][] splits = Bytes.split(bytesB, bytesD, 8); + assertArrayEquals(splits[3], bucket.getRightBoundExclusive()); + bucket = buckets.get(1); + assertArrayEquals(splits[3], bucket.leftBoundInclusive); + } - // check if the value lies in the bucket range - private void assertInBucket(Bucket bucket, byte[] value) { - assertTrue(Bytes.compareTo(value, bucket.getLeftBoundInclusive()) >= 0); - assertTrue(Bytes.compareTo(value, bucket.getRightBoundExclusive()) < 0); - } + // check if the value lies in the bucket range + private void assertInBucket(Bucket bucket, byte[] value) { + assertTrue(Bytes.compareTo(value, bucket.getLeftBoundInclusive()) >= 0); + assertTrue(Bytes.compareTo(value, bucket.getRightBoundExclusive()) < 0); + } - // right bound is inclusive - private void assertInBucketInclusive(Bucket bucket, byte[] value) { - assertTrue(Bytes.compareTo(value, bucket.getLeftBoundInclusive()) >= 0); - assertTrue(Bytes.compareTo(value, bucket.getRightBoundExclusive()) <= 0); - } + // right bound is inclusive + private void assertInBucketInclusive(Bucket bucket, byte[] value) { + assertTrue(Bytes.compareTo(value, bucket.getLeftBoundInclusive()) >= 0); + assertTrue(Bytes.compareTo(value, bucket.getRightBoundExclusive()) <= 0); + } - /** - * Stream of data is has uniformly distributed values - */ - @Test - public void testUniformDistribution() { - EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(4); - for (int i = 0; i < 100000; i++) { - histo.addValue(Bytes.toBytes((i % 8) + "")); - } - Iterator buckets = histo.computeBuckets().iterator(); - Bucket bucket = buckets.next(); - assertEquals(25000, bucket.getCountEstimate()); - assertInBucket(bucket, Bytes.toBytes("0")); - assertInBucket(bucket, Bytes.toBytes("1")); - bucket = buckets.next(); - assertEquals(25000, bucket.getCountEstimate()); - assertInBucket(bucket, Bytes.toBytes("2")); - assertInBucket(bucket, Bytes.toBytes("3")); - bucket = buckets.next(); - assertEquals(25000, bucket.getCountEstimate()); - assertInBucket(bucket, Bytes.toBytes("4")); - assertInBucket(bucket, Bytes.toBytes("5")); - bucket = buckets.next(); - assertEquals(25000, bucket.getCountEstimate()); - assertInBucket(bucket, Bytes.toBytes("6")); - assertInBucket(bucket, Bytes.toBytes("7")); + /** + * Stream of data is has uniformly distributed values + */ + @Test + public void testUniformDistribution() { + EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(4); + for (int i = 0; i < 100000; i++) { + histo.addValue(Bytes.toBytes((i % 8) + "")); } + Iterator buckets = histo.computeBuckets().iterator(); + Bucket bucket = buckets.next(); + assertEquals(25000, bucket.getCountEstimate()); + assertInBucket(bucket, Bytes.toBytes("0")); + assertInBucket(bucket, Bytes.toBytes("1")); + bucket = buckets.next(); + assertEquals(25000, bucket.getCountEstimate()); + assertInBucket(bucket, Bytes.toBytes("2")); + assertInBucket(bucket, Bytes.toBytes("3")); + bucket = buckets.next(); + assertEquals(25000, bucket.getCountEstimate()); + assertInBucket(bucket, Bytes.toBytes("4")); + assertInBucket(bucket, Bytes.toBytes("5")); + bucket = buckets.next(); + assertEquals(25000, bucket.getCountEstimate()); + assertInBucket(bucket, Bytes.toBytes("6")); + assertInBucket(bucket, Bytes.toBytes("7")); + } - /** - * Stream of data is skewed Gaussian distribution with mean of 100 and standard deviation of 25 - */ - @Test - public void testSkewedDistribution() { - Random random = new Random(); - EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(5); - for (int i = 0; i < 100000; i++) { - int value = (int) Math.round(random.nextGaussian() * 25 + 100); - histo.addValue(Bytes.toBytes(value)); - } - // our middle bucket should have a smaller length than the end buckets, - // since we have more values clustered in the middle - List buckets = histo.computeBuckets(); - Bucket first = buckets.get(0); - int firstLength = getLength(first); - Bucket last = buckets.get(4); - int lastLength = getLength(last); - Bucket middle = buckets.get(2); - int middleLength = getLength(middle); - assertTrue(firstLength - middleLength > 25); - assertTrue(lastLength - middleLength > 25); + /** + * Stream of data is skewed Gaussian distribution with mean of 100 and standard deviation of 25 + */ + @Test + public void testSkewedDistribution() { + Random random = new Random(); + EquiDepthStreamHistogram histo = new EquiDepthStreamHistogram(5); + for (int i = 0; i < 100000; i++) { + int value = (int) Math.round(random.nextGaussian() * 25 + 100); + histo.addValue(Bytes.toBytes(value)); } + // our middle bucket should have a smaller length than the end buckets, + // since we have more values clustered in the middle + List buckets = histo.computeBuckets(); + Bucket first = buckets.get(0); + int firstLength = getLength(first); + Bucket last = buckets.get(4); + int lastLength = getLength(last); + Bucket middle = buckets.get(2); + int middleLength = getLength(middle); + assertTrue(firstLength - middleLength > 25); + assertTrue(lastLength - middleLength > 25); + } - private int getLength(Bucket last) { - return Math.abs( - Bytes.toInt(last.getLeftBoundInclusive()) - Bytes.toInt(last.getRightBoundExclusive())); - } + private int getLength(Bucket last) { + return Math + .abs(Bytes.toInt(last.getLeftBoundInclusive()) - Bytes.toInt(last.getRightBoundExclusive())); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/GeneratePerformanceData.java b/phoenix-core/src/test/java/org/apache/phoenix/util/GeneratePerformanceData.java index 53a1d018fe7..083dc566fc6 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/GeneratePerformanceData.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/GeneratePerformanceData.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,38 +26,35 @@ import java.util.Random; public class GeneratePerformanceData { - public static void main(String[] args) throws FileNotFoundException, IOException { - String[] host = {"NA","CS","EU"}; - String[] domain = {"Salesforce.com","Apple.com","Google.com"}; - String[] feature = {"Login","Report","Dashboard"}; - Calendar now = GregorianCalendar.getInstance(); - Random random = new Random(); - SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); - if (args.length != 2) { - System.out.println("Data file name and row count must be specified as arguments"); - return; - } - String dataFile = args[0]; - int rowCount = Integer.parseInt(args[1]); - FileOutputStream fostream = null; - try { - fostream = new FileOutputStream(dataFile); - for (int i=0; i tableColumns = ptable.getColumns(); - List tablePKColumns = ptable.getPKColumns(); - if (ptable.getBucketNum() != null) { - tableColumnOffset = 1; - tableColumns = tableColumns.subList(tableColumnOffset, tableColumns.size()); - tablePKColumns = tablePKColumns.subList(tableColumnOffset, tablePKColumns.size()); - } - PTable pindex = pconn.getTable(new PTableKey(pconn.getTenantId(), fullIndexName)); - List indexColumns = pindex.getColumns(); - int indexColumnOffset = 0; - if (pindex.getBucketNum() != null) { - indexColumnOffset = 1; - } - if (pindex.getViewIndexId() != null) { - indexColumnOffset++; - } - if (indexColumnOffset > 0) { - indexColumns = indexColumns.subList(indexColumnOffset, indexColumns.size()); - } - StringBuilder indexQueryBuf = new StringBuilder("SELECT "); - for (PColumn dcol : tablePKColumns) { - indexQueryBuf.append("CAST(\"" + IndexUtil.getIndexColumnName(dcol) + "\" AS " + dcol.getDataType().getSqlTypeName() + ")"); - indexQueryBuf.append(","); - } - for (PColumn icol :indexColumns) { - PColumn dcol = IndexUtil.getDataColumn(ptable, icol.getName().getString()); - if (SchemaUtil.isPKColumn(icol) && !SchemaUtil.isPKColumn(dcol)) { - indexQueryBuf.append("CAST (\"" + icol.getName().getString() + "\" AS " + dcol.getDataType().getSqlTypeName() + ")"); - indexQueryBuf.append(","); - } - } - for (PColumn icol : indexColumns) { - if (!SchemaUtil.isPKColumn(icol)) { - PColumn dcol = IndexUtil.getDataColumn(ptable, icol.getName().getString()); - indexQueryBuf.append("CAST (\"" + icol.getName().getString() + "\" AS " + dcol.getDataType().getSqlTypeName() + ")"); - indexQueryBuf.append(","); - } - } - indexQueryBuf.setLength(indexQueryBuf.length()-1); - indexQueryBuf.append("\nFROM " + fullIndexName); - - StringBuilder tableQueryBuf = new StringBuilder("SELECT "); - for (PColumn dcol : tablePKColumns) { - tableQueryBuf.append("\"" + dcol.getName().getString() + "\""); - tableQueryBuf.append(","); - } - for (PColumn icol : indexColumns) { - PColumn dcol = IndexUtil.getDataColumn(ptable, icol.getName().getString()); - if (SchemaUtil.isPKColumn(icol) && !SchemaUtil.isPKColumn(dcol)) { - if (dcol.getFamilyName() != null) { - tableQueryBuf.append("\"" + dcol.getFamilyName().getString() + "\""); - tableQueryBuf.append("."); - } - tableQueryBuf.append("\"" + dcol.getName().getString() + "\""); - tableQueryBuf.append(","); - } - } - for (PColumn icol : indexColumns) { - if (!SchemaUtil.isPKColumn(icol)) { - PColumn dcol = IndexUtil.getDataColumn(ptable, icol.getName().getString()); - if (dcol.getFamilyName() != null) { - tableQueryBuf.append("\"" + dcol.getFamilyName().getString() + "\""); - tableQueryBuf.append("."); - } - tableQueryBuf.append("\"" + dcol.getName().getString() + "\""); - tableQueryBuf.append(","); - } - } - tableQueryBuf.setLength(tableQueryBuf.length()-1); - tableQueryBuf.append("\nFROM " + fullTableName + "\nWHERE ("); - for (PColumn dcol : tablePKColumns) { - tableQueryBuf.append("\"" + dcol.getName().getString() + "\""); - tableQueryBuf.append(","); - } - tableQueryBuf.setLength(tableQueryBuf.length()-1); - tableQueryBuf.append(") = (("); - for (int i = 0; i < tablePKColumns.size(); i++) { - tableQueryBuf.append("?"); - tableQueryBuf.append(","); + public static long scrutinizeIndex(Connection conn, String fullTableName, String fullIndexName) + throws SQLException { + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable ptable = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); + int tableColumnOffset = 0; + List tableColumns = ptable.getColumns(); + List tablePKColumns = ptable.getPKColumns(); + if (ptable.getBucketNum() != null) { + tableColumnOffset = 1; + tableColumns = tableColumns.subList(tableColumnOffset, tableColumns.size()); + tablePKColumns = tablePKColumns.subList(tableColumnOffset, tablePKColumns.size()); + } + PTable pindex = pconn.getTable(new PTableKey(pconn.getTenantId(), fullIndexName)); + List indexColumns = pindex.getColumns(); + int indexColumnOffset = 0; + if (pindex.getBucketNum() != null) { + indexColumnOffset = 1; + } + if (pindex.getViewIndexId() != null) { + indexColumnOffset++; + } + if (indexColumnOffset > 0) { + indexColumns = indexColumns.subList(indexColumnOffset, indexColumns.size()); + } + StringBuilder indexQueryBuf = new StringBuilder("SELECT "); + for (PColumn dcol : tablePKColumns) { + indexQueryBuf.append("CAST(\"" + IndexUtil.getIndexColumnName(dcol) + "\" AS " + + dcol.getDataType().getSqlTypeName() + ")"); + indexQueryBuf.append(","); + } + for (PColumn icol : indexColumns) { + PColumn dcol = IndexUtil.getDataColumn(ptable, icol.getName().getString()); + if (SchemaUtil.isPKColumn(icol) && !SchemaUtil.isPKColumn(dcol)) { + indexQueryBuf.append("CAST (\"" + icol.getName().getString() + "\" AS " + + dcol.getDataType().getSqlTypeName() + ")"); + indexQueryBuf.append(","); + } + } + for (PColumn icol : indexColumns) { + if (!SchemaUtil.isPKColumn(icol)) { + PColumn dcol = IndexUtil.getDataColumn(ptable, icol.getName().getString()); + indexQueryBuf.append("CAST (\"" + icol.getName().getString() + "\" AS " + + dcol.getDataType().getSqlTypeName() + ")"); + indexQueryBuf.append(","); + } + } + indexQueryBuf.setLength(indexQueryBuf.length() - 1); + indexQueryBuf.append("\nFROM " + fullIndexName); + + StringBuilder tableQueryBuf = new StringBuilder("SELECT "); + for (PColumn dcol : tablePKColumns) { + tableQueryBuf.append("\"" + dcol.getName().getString() + "\""); + tableQueryBuf.append(","); + } + for (PColumn icol : indexColumns) { + PColumn dcol = IndexUtil.getDataColumn(ptable, icol.getName().getString()); + if (SchemaUtil.isPKColumn(icol) && !SchemaUtil.isPKColumn(dcol)) { + if (dcol.getFamilyName() != null) { + tableQueryBuf.append("\"" + dcol.getFamilyName().getString() + "\""); + tableQueryBuf.append("."); } - tableQueryBuf.setLength(tableQueryBuf.length()-1); - tableQueryBuf.append("))"); - - String tableQuery = tableQueryBuf.toString(); - PreparedStatement istmt = conn.prepareStatement(tableQuery); - - String indexQuery = indexQueryBuf.toString(); - ResultSet irs = conn.createStatement().executeQuery(indexQuery); - ResultSetMetaData irsmd = irs.getMetaData(); - long icount = 0; - while (irs.next()) { - icount++; - StringBuilder pkBuf = new StringBuilder("("); - for (int i = 0; i < tablePKColumns.size(); i++) { - PColumn dcol = tablePKColumns.get(i); - int offset = i+1; - Object pkVal = irs.getObject(offset); - PDataType pkType = PDataType.fromTypeId(irsmd.getColumnType(offset)); - istmt.setObject(offset, pkVal, dcol.getDataType().getSqlType()); - pkBuf.append(pkType.toStringLiteral(pkVal)); - pkBuf.append(","); - } - pkBuf.setLength(pkBuf.length()-1); - pkBuf.append(")"); - ResultSet drs = istmt.executeQuery(); - ResultSetMetaData drsmd = drs.getMetaData(); - assertTrue("Expected to find PK in data table: " + pkBuf, drs.next()); - for (int i = 0; i < irsmd.getColumnCount(); i++) { - Object iVal = irs.getObject(i + 1); - PDataType iType = PDataType.fromTypeId(irsmd.getColumnType(i + 1)); - Object dVal = drs.getObject(i + 1); - PDataType dType = PDataType.fromTypeId(drsmd.getColumnType(i + 1)); - assertTrue("Expected equality for " + drsmd.getColumnName(i + 1) + ", but " + iType.toStringLiteral(iVal) + "!=" + dType.toStringLiteral(dVal), Objects.equal(iVal, dVal)); - } + tableQueryBuf.append("\"" + dcol.getName().getString() + "\""); + tableQueryBuf.append(","); + } + } + for (PColumn icol : indexColumns) { + if (!SchemaUtil.isPKColumn(icol)) { + PColumn dcol = IndexUtil.getDataColumn(ptable, icol.getName().getString()); + if (dcol.getFamilyName() != null) { + tableQueryBuf.append("\"" + dcol.getFamilyName().getString() + "\""); + tableQueryBuf.append("."); } - - long dcount = TestUtil.getRowCount(conn, fullTableName); - assertEquals("Expected data table row count to match", dcount, icount); - return dcount; + tableQueryBuf.append("\"" + dcol.getName().getString() + "\""); + tableQueryBuf.append(","); + } + } + tableQueryBuf.setLength(tableQueryBuf.length() - 1); + tableQueryBuf.append("\nFROM " + fullTableName + "\nWHERE ("); + for (PColumn dcol : tablePKColumns) { + tableQueryBuf.append("\"" + dcol.getName().getString() + "\""); + tableQueryBuf.append(","); } + tableQueryBuf.setLength(tableQueryBuf.length() - 1); + tableQueryBuf.append(") = (("); + for (int i = 0; i < tablePKColumns.size(); i++) { + tableQueryBuf.append("?"); + tableQueryBuf.append(","); + } + tableQueryBuf.setLength(tableQueryBuf.length() - 1); + tableQueryBuf.append("))"); + + String tableQuery = tableQueryBuf.toString(); + PreparedStatement istmt = conn.prepareStatement(tableQuery); + + String indexQuery = indexQueryBuf.toString(); + ResultSet irs = conn.createStatement().executeQuery(indexQuery); + ResultSetMetaData irsmd = irs.getMetaData(); + long icount = 0; + while (irs.next()) { + icount++; + StringBuilder pkBuf = new StringBuilder("("); + for (int i = 0; i < tablePKColumns.size(); i++) { + PColumn dcol = tablePKColumns.get(i); + int offset = i + 1; + Object pkVal = irs.getObject(offset); + PDataType pkType = PDataType.fromTypeId(irsmd.getColumnType(offset)); + istmt.setObject(offset, pkVal, dcol.getDataType().getSqlType()); + pkBuf.append(pkType.toStringLiteral(pkVal)); + pkBuf.append(","); + } + pkBuf.setLength(pkBuf.length() - 1); + pkBuf.append(")"); + ResultSet drs = istmt.executeQuery(); + ResultSetMetaData drsmd = drs.getMetaData(); + assertTrue("Expected to find PK in data table: " + pkBuf, drs.next()); + for (int i = 0; i < irsmd.getColumnCount(); i++) { + Object iVal = irs.getObject(i + 1); + PDataType iType = PDataType.fromTypeId(irsmd.getColumnType(i + 1)); + Object dVal = drs.getObject(i + 1); + PDataType dType = PDataType.fromTypeId(drsmd.getColumnType(i + 1)); + assertTrue( + "Expected equality for " + drsmd.getColumnName(i + 1) + ", but " + + iType.toStringLiteral(iVal) + "!=" + dType.toStringLiteral(dVal), + Objects.equal(iVal, dVal)); + } + } + + long dcount = TestUtil.getRowCount(conn, fullTableName); + assertEquals("Expected data table row count to match", dcount, icount); + return dcount; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/IndexUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/IndexUtilTest.java index 77a3b7b3252..39269a5a7b5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/IndexUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/IndexUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,134 +23,76 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDataTypeFactory; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.junit.Test; public class IndexUtilTest { - - @Test - public void testIndexNonNullableColumnDataType() { - verifyIndexColumnDataTypes(false, - "{BIGINT=BIGINT, " - + "BIGINT ARRAY=BIGINT ARRAY, " - + "BINARY=BINARY, " - + "BINARY ARRAY=BINARY ARRAY, " - + "BOOLEAN=BOOLEAN, " - + "BOOLEAN ARRAY=BOOLEAN ARRAY, " - + "CHAR=CHAR, " - + "CHAR ARRAY=CHAR ARRAY, " - + "DATE=DATE, " - + "DATE ARRAY=DATE ARRAY, " - + "DECIMAL=DECIMAL, " - + "DECIMAL ARRAY=DECIMAL ARRAY, " - + "DOUBLE=DOUBLE, " - + "DOUBLE ARRAY=DOUBLE ARRAY, " - + "FLOAT=FLOAT, " - + "FLOAT ARRAY=FLOAT ARRAY, " - + "INTEGER=INTEGER, " - + "INTEGER ARRAY=INTEGER ARRAY, " - + "SMALLINT=SMALLINT, " - + "SMALLINT ARRAY=SMALLINT ARRAY, " - + "TIME=TIME, " - + "TIME ARRAY=TIME ARRAY, " - + "TIMESTAMP=TIMESTAMP, " - + "TIMESTAMP ARRAY=TIMESTAMP ARRAY, " - + "TINYINT=TINYINT, " - + "TINYINT ARRAY=TINYINT ARRAY, " - + "UNSIGNED_DATE=UNSIGNED_DATE, " - + "UNSIGNED_DATE ARRAY=UNSIGNED_DATE ARRAY, " - + "UNSIGNED_DOUBLE=UNSIGNED_DOUBLE, " - + "UNSIGNED_DOUBLE ARRAY=UNSIGNED_DOUBLE ARRAY, " - + "UNSIGNED_FLOAT=UNSIGNED_FLOAT, " - + "UNSIGNED_FLOAT ARRAY=UNSIGNED_FLOAT ARRAY, " - + "UNSIGNED_INT=UNSIGNED_INT, " - + "UNSIGNED_INT ARRAY=UNSIGNED_INT ARRAY, " - + "UNSIGNED_LONG=UNSIGNED_LONG, " - + "UNSIGNED_LONG ARRAY=UNSIGNED_LONG ARRAY, " - + "UNSIGNED_SMALLINT=UNSIGNED_SMALLINT, " - + "UNSIGNED_SMALLINT ARRAY=UNSIGNED_SMALLINT ARRAY, " - + "UNSIGNED_TIME=UNSIGNED_TIME, " - + "UNSIGNED_TIME ARRAY=UNSIGNED_TIME ARRAY, " - + "UNSIGNED_TIMESTAMP=UNSIGNED_TIMESTAMP, " - + "UNSIGNED_TIMESTAMP ARRAY=UNSIGNED_TIMESTAMP ARRAY, " - + "UNSIGNED_TINYINT=UNSIGNED_TINYINT, " - + "UNSIGNED_TINYINT ARRAY=UNSIGNED_TINYINT ARRAY, " - + "VARBINARY=VARBINARY, " - + "VARBINARY ARRAY=VARBINARY ARRAY, " - + "VARBINARY_ENCODED=VARBINARY_ENCODED, " - + "VARCHAR=VARCHAR, " - + "VARCHAR ARRAY=VARCHAR ARRAY}"); - } - @Test - public void testIndexNullableColumnDataType() { - verifyIndexColumnDataTypes(true, - "{BIGINT=DECIMAL, " - + "BIGINT ARRAY=BIGINT ARRAY, " - + "BINARY=VARBINARY, " - + "BINARY ARRAY=BINARY ARRAY, " - + "BOOLEAN=DECIMAL, " - + "BOOLEAN ARRAY=BOOLEAN ARRAY, " - + "CHAR=VARCHAR, " - + "CHAR ARRAY=CHAR ARRAY, " - + "DATE=DECIMAL, " - + "DATE ARRAY=DATE ARRAY, " - + "DECIMAL=DECIMAL, " - + "DECIMAL ARRAY=DECIMAL ARRAY, " - + "DOUBLE=DECIMAL, " - + "DOUBLE ARRAY=DOUBLE ARRAY, " - + "FLOAT=DECIMAL, " - + "FLOAT ARRAY=FLOAT ARRAY, " - + "INTEGER=DECIMAL, " - + "INTEGER ARRAY=INTEGER ARRAY, " - + "SMALLINT=DECIMAL, " - + "SMALLINT ARRAY=SMALLINT ARRAY, " - + "TIME=DECIMAL, " - + "TIME ARRAY=TIME ARRAY, " - + "TIMESTAMP=DECIMAL, " - + "TIMESTAMP ARRAY=TIMESTAMP ARRAY, " - + "TINYINT=DECIMAL, " - + "TINYINT ARRAY=TINYINT ARRAY, " - + "UNSIGNED_DATE=DECIMAL, " - + "UNSIGNED_DATE ARRAY=UNSIGNED_DATE ARRAY, " - + "UNSIGNED_DOUBLE=DECIMAL, " - + "UNSIGNED_DOUBLE ARRAY=UNSIGNED_DOUBLE ARRAY, " - + "UNSIGNED_FLOAT=DECIMAL, " - + "UNSIGNED_FLOAT ARRAY=UNSIGNED_FLOAT ARRAY, " - + "UNSIGNED_INT=DECIMAL, " - + "UNSIGNED_INT ARRAY=UNSIGNED_INT ARRAY, " - + "UNSIGNED_LONG=DECIMAL, " - + "UNSIGNED_LONG ARRAY=UNSIGNED_LONG ARRAY, " - + "UNSIGNED_SMALLINT=DECIMAL, " - + "UNSIGNED_SMALLINT ARRAY=UNSIGNED_SMALLINT ARRAY, " - + "UNSIGNED_TIME=DECIMAL, " - + "UNSIGNED_TIME ARRAY=UNSIGNED_TIME ARRAY, " - + "UNSIGNED_TIMESTAMP=DECIMAL, " - + "UNSIGNED_TIMESTAMP ARRAY=UNSIGNED_TIMESTAMP ARRAY, " - + "UNSIGNED_TINYINT=DECIMAL, " - + "UNSIGNED_TINYINT ARRAY=UNSIGNED_TINYINT ARRAY, " - + "VARBINARY=VARBINARY, " - + "VARBINARY ARRAY=VARBINARY ARRAY, " - + "VARBINARY_ENCODED=VARBINARY_ENCODED, " - + "VARCHAR=VARCHAR, " - + "VARCHAR ARRAY=VARCHAR ARRAY}"); - } + @Test + public void testIndexNonNullableColumnDataType() { + verifyIndexColumnDataTypes(false, + "{BIGINT=BIGINT, " + "BIGINT ARRAY=BIGINT ARRAY, " + "BINARY=BINARY, " + + "BINARY ARRAY=BINARY ARRAY, " + "BOOLEAN=BOOLEAN, " + "BOOLEAN ARRAY=BOOLEAN ARRAY, " + + "CHAR=CHAR, " + "CHAR ARRAY=CHAR ARRAY, " + "DATE=DATE, " + "DATE ARRAY=DATE ARRAY, " + + "DECIMAL=DECIMAL, " + "DECIMAL ARRAY=DECIMAL ARRAY, " + "DOUBLE=DOUBLE, " + + "DOUBLE ARRAY=DOUBLE ARRAY, " + "FLOAT=FLOAT, " + "FLOAT ARRAY=FLOAT ARRAY, " + + "INTEGER=INTEGER, " + "INTEGER ARRAY=INTEGER ARRAY, " + "SMALLINT=SMALLINT, " + + "SMALLINT ARRAY=SMALLINT ARRAY, " + "TIME=TIME, " + "TIME ARRAY=TIME ARRAY, " + + "TIMESTAMP=TIMESTAMP, " + "TIMESTAMP ARRAY=TIMESTAMP ARRAY, " + "TINYINT=TINYINT, " + + "TINYINT ARRAY=TINYINT ARRAY, " + "UNSIGNED_DATE=UNSIGNED_DATE, " + + "UNSIGNED_DATE ARRAY=UNSIGNED_DATE ARRAY, " + "UNSIGNED_DOUBLE=UNSIGNED_DOUBLE, " + + "UNSIGNED_DOUBLE ARRAY=UNSIGNED_DOUBLE ARRAY, " + "UNSIGNED_FLOAT=UNSIGNED_FLOAT, " + + "UNSIGNED_FLOAT ARRAY=UNSIGNED_FLOAT ARRAY, " + "UNSIGNED_INT=UNSIGNED_INT, " + + "UNSIGNED_INT ARRAY=UNSIGNED_INT ARRAY, " + "UNSIGNED_LONG=UNSIGNED_LONG, " + + "UNSIGNED_LONG ARRAY=UNSIGNED_LONG ARRAY, " + "UNSIGNED_SMALLINT=UNSIGNED_SMALLINT, " + + "UNSIGNED_SMALLINT ARRAY=UNSIGNED_SMALLINT ARRAY, " + "UNSIGNED_TIME=UNSIGNED_TIME, " + + "UNSIGNED_TIME ARRAY=UNSIGNED_TIME ARRAY, " + "UNSIGNED_TIMESTAMP=UNSIGNED_TIMESTAMP, " + + "UNSIGNED_TIMESTAMP ARRAY=UNSIGNED_TIMESTAMP ARRAY, " + + "UNSIGNED_TINYINT=UNSIGNED_TINYINT, " + "UNSIGNED_TINYINT ARRAY=UNSIGNED_TINYINT ARRAY, " + + "VARBINARY=VARBINARY, " + "VARBINARY ARRAY=VARBINARY ARRAY, " + + "VARBINARY_ENCODED=VARBINARY_ENCODED, " + "VARCHAR=VARCHAR, " + + "VARCHAR ARRAY=VARCHAR ARRAY}"); + } + + @Test + public void testIndexNullableColumnDataType() { + verifyIndexColumnDataTypes(true, + "{BIGINT=DECIMAL, " + "BIGINT ARRAY=BIGINT ARRAY, " + "BINARY=VARBINARY, " + + "BINARY ARRAY=BINARY ARRAY, " + "BOOLEAN=DECIMAL, " + "BOOLEAN ARRAY=BOOLEAN ARRAY, " + + "CHAR=VARCHAR, " + "CHAR ARRAY=CHAR ARRAY, " + "DATE=DECIMAL, " + + "DATE ARRAY=DATE ARRAY, " + "DECIMAL=DECIMAL, " + "DECIMAL ARRAY=DECIMAL ARRAY, " + + "DOUBLE=DECIMAL, " + "DOUBLE ARRAY=DOUBLE ARRAY, " + "FLOAT=DECIMAL, " + + "FLOAT ARRAY=FLOAT ARRAY, " + "INTEGER=DECIMAL, " + "INTEGER ARRAY=INTEGER ARRAY, " + + "SMALLINT=DECIMAL, " + "SMALLINT ARRAY=SMALLINT ARRAY, " + "TIME=DECIMAL, " + + "TIME ARRAY=TIME ARRAY, " + "TIMESTAMP=DECIMAL, " + "TIMESTAMP ARRAY=TIMESTAMP ARRAY, " + + "TINYINT=DECIMAL, " + "TINYINT ARRAY=TINYINT ARRAY, " + "UNSIGNED_DATE=DECIMAL, " + + "UNSIGNED_DATE ARRAY=UNSIGNED_DATE ARRAY, " + "UNSIGNED_DOUBLE=DECIMAL, " + + "UNSIGNED_DOUBLE ARRAY=UNSIGNED_DOUBLE ARRAY, " + "UNSIGNED_FLOAT=DECIMAL, " + + "UNSIGNED_FLOAT ARRAY=UNSIGNED_FLOAT ARRAY, " + "UNSIGNED_INT=DECIMAL, " + + "UNSIGNED_INT ARRAY=UNSIGNED_INT ARRAY, " + "UNSIGNED_LONG=DECIMAL, " + + "UNSIGNED_LONG ARRAY=UNSIGNED_LONG ARRAY, " + "UNSIGNED_SMALLINT=DECIMAL, " + + "UNSIGNED_SMALLINT ARRAY=UNSIGNED_SMALLINT ARRAY, " + "UNSIGNED_TIME=DECIMAL, " + + "UNSIGNED_TIME ARRAY=UNSIGNED_TIME ARRAY, " + "UNSIGNED_TIMESTAMP=DECIMAL, " + + "UNSIGNED_TIMESTAMP ARRAY=UNSIGNED_TIMESTAMP ARRAY, " + "UNSIGNED_TINYINT=DECIMAL, " + + "UNSIGNED_TINYINT ARRAY=UNSIGNED_TINYINT ARRAY, " + "VARBINARY=VARBINARY, " + + "VARBINARY ARRAY=VARBINARY ARRAY, " + "VARBINARY_ENCODED=VARBINARY_ENCODED, " + + "VARCHAR=VARCHAR, " + "VARCHAR ARRAY=VARCHAR ARRAY}"); + } - private void verifyIndexColumnDataTypes(boolean isNullable, String expected) { - Map indexColumnDataTypes = Maps.newTreeMap(); - for (PDataType dataType : PDataTypeFactory.getInstance().getTypes()) { - if (!dataType.isComparisonSupported()) { - // JSON Datatype can't be an IndexColumn - continue; - } - String indexColumnDataType = "unsupported"; - try { - indexColumnDataType = IndexUtil.getIndexColumnDataType(isNullable, dataType).toString(); - } catch (IllegalArgumentException e) { - } - indexColumnDataTypes.put(dataType.toString(), indexColumnDataType); - } - assertEquals(expected, indexColumnDataTypes.toString()); + private void verifyIndexColumnDataTypes(boolean isNullable, String expected) { + Map indexColumnDataTypes = Maps.newTreeMap(); + for (PDataType dataType : PDataTypeFactory.getInstance().getTypes()) { + if (!dataType.isComparisonSupported()) { + // JSON Datatype can't be an IndexColumn + continue; + } + String indexColumnDataType = "unsupported"; + try { + indexColumnDataType = IndexUtil.getIndexColumnDataType(isNullable, dataType).toString(); + } catch (IllegalArgumentException e) { + } + indexColumnDataTypes.put(dataType.toString(), indexColumnDataType); } + assertEquals(expected, indexColumnDataTypes.toString()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java index 11add1ffcb2..b653133e477 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/JDBCUtilTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,148 +27,154 @@ import java.util.Map; import java.util.Properties; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.hbase.client.Consistency; import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; import org.junit.Test; public class JDBCUtilTest { - - @Test - public void testGetCustomTracingAnnotationsWithNone() { - String url = "localhost;TenantId=abc;"; - Map customAnnotations = JDBCUtil.getAnnotations(url, new Properties()); - assertTrue(customAnnotations.isEmpty()); - } - - @Test - public void testGetCustomTracingAnnotationInBothPropertiesAndURL() { - String annotKey1 = "key1"; - String annotVal1 = "val1"; - String annotKey2 = "key2"; - String annotVal2 = "val2"; - String annotKey3 = "key3"; - String annotVal3 = "val3"; - - String url= "localhost;" + ANNOTATION_ATTRIB_PREFIX + annotKey1 + '=' + annotVal1; - - Properties prop = new Properties(); - prop.put(ANNOTATION_ATTRIB_PREFIX + annotKey2, annotVal2); - prop.put(ANNOTATION_ATTRIB_PREFIX + annotKey3, annotVal3); - - Map customAnnotations = JDBCUtil.getAnnotations(url, prop); - assertEquals(3, customAnnotations.size()); - assertEquals(annotVal1, customAnnotations.get(annotKey1)); - assertEquals(annotVal2, customAnnotations.get(annotKey2)); - assertEquals(annotVal3, customAnnotations.get(annotKey3)); - } - - @Test - public void testRemoveProperty() { - assertEquals("localhost;", JDBCUtil.removeProperty("localhost;TenantId=abc;", TENANT_ID_ATTRIB)); - assertEquals("localhost;foo=bar", JDBCUtil.removeProperty("localhost;TenantId=abc;foo=bar", TENANT_ID_ATTRIB)); - assertEquals("localhost;TenantId=abc", JDBCUtil.removeProperty("localhost;TenantId=abc;foo=bar", "foo")); - assertEquals("localhost;TenantId=abc;foo=bar", JDBCUtil.removeProperty("localhost;TenantId=abc;foo=bar", "bar")); - } - - @Test - public void testGetAutoCommit_NotSpecified_DefaultTrue() { - assertTrue(JDBCUtil.getAutoCommit("localhost", new Properties(), true)); - } - - - @Test - public void testGetAutoCommit_NotSpecified_DefaultFalse() { - assertFalse(JDBCUtil.getAutoCommit("localhost", new Properties(), false)); - } - - @Test - public void testGetAutoCommit_TrueInUrl() { - assertTrue(JDBCUtil.getAutoCommit("localhost;AutoCommit=TrUe", new Properties(), false)); - } - - @Test - public void testGetAutoCommit_FalseInUrl() { - assertFalse(JDBCUtil.getAutoCommit("localhost;AutoCommit=FaLse", new Properties(), false)); - } - - @Test - public void testGetAutoCommit_TrueInProperties() { - Properties props = new Properties(); - props.setProperty("AutoCommit", "true"); - assertTrue(JDBCUtil.getAutoCommit("localhost", props, false)); - } - - @Test - public void testGetAutoCommit_FalseInProperties() { - Properties props = new Properties(); - props.setProperty("AutoCommit", "false"); - assertFalse(JDBCUtil.getAutoCommit("localhost", props, false)); - } - - @Test - public void testGetConsistency_TIMELINE_InUrl() { - assertTrue(JDBCUtil.getConsistencyLevel("localhost;Consistency=TIMELINE", new Properties(), - Consistency.STRONG.toString()) == Consistency.TIMELINE); - } - - @Test - public void testSchema() { - assertTrue(JDBCUtil.getSchema("localhost;schema=TEST", new Properties(), null).equals("TEST")); - assertNull(JDBCUtil.getSchema("localhost;schema=", new Properties(), null)); - assertNull(JDBCUtil.getSchema("localhost;", new Properties(), null)); - } - - @Test - public void testGetConsistency_TIMELINE_InProperties() { - Properties props = new Properties(); - props.setProperty(PhoenixRuntime.CONSISTENCY_ATTRIB, "TIMELINE"); - assertTrue(JDBCUtil.getConsistencyLevel("localhost", props, Consistency.STRONG.toString()) - == Consistency.TIMELINE); - } - - @Test - public void testGetMaxMutateBytes() throws Exception { - assertEquals(1000L, JDBCUtil.getMutateBatchSizeBytes("localhost;" + PhoenixRuntime.UPSERT_BATCH_SIZE_BYTES_ATTRIB + - "=1000", new Properties(), ReadOnlyProps.EMPTY_PROPS)); - - Properties props = new Properties(); - props.setProperty(PhoenixRuntime.UPSERT_BATCH_SIZE_BYTES_ATTRIB, "2000"); - assertEquals(2000L, JDBCUtil.getMutateBatchSizeBytes("localhost", props, ReadOnlyProps.EMPTY_PROPS)); - - Map propMap = Maps.newHashMap(); - propMap.put(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB, "3000"); - ReadOnlyProps readOnlyProps = new ReadOnlyProps(propMap); - assertEquals(3000L, JDBCUtil.getMutateBatchSizeBytes("localhost", new Properties(), readOnlyProps)); - } - - @Test - public void formatZookeeperUrlSameOrderTest() { - String zk1 = "zk1.net\\:2181,zk2.net\\:2181,zk3.net\\:2181::/hbase"; - String result = JDBCUtil.formatZookeeperUrl(zk1); - assertEquals(zk1,result); - } - - - @Test - public void formatZookeeperUrlDifferentOrderTest() { - String zk1 = "zk3.net,zk2.net,zk1.net:2181:/hbase"; - String result = JDBCUtil.formatZookeeperUrl(zk1); - assertEquals("zk1.net\\:2181,zk2.net\\:2181,zk3.net\\:2181::/hbase",result); - } - - @Test - public void formatZookeeperUrlNoTrailersTest() { - String zk1 = "zk1.net,zk2.net,zk3.net"; - String result = JDBCUtil.formatZookeeperUrl(zk1); - assertEquals("zk1.net\\:2181,zk2.net\\:2181,zk3.net\\:2181::/hbase",result); - } - - @Test - public void formatZookeeperUrlToLowercaseTest() { - String zk1 = "MYHOST1.NET,MYHOST2.NET"; - String result = JDBCUtil.formatZookeeperUrl(zk1); - assertEquals("myhost1.net\\:2181,myhost2.net\\:2181::/hbase",result); - } + + @Test + public void testGetCustomTracingAnnotationsWithNone() { + String url = "localhost;TenantId=abc;"; + Map customAnnotations = JDBCUtil.getAnnotations(url, new Properties()); + assertTrue(customAnnotations.isEmpty()); + } + + @Test + public void testGetCustomTracingAnnotationInBothPropertiesAndURL() { + String annotKey1 = "key1"; + String annotVal1 = "val1"; + String annotKey2 = "key2"; + String annotVal2 = "val2"; + String annotKey3 = "key3"; + String annotVal3 = "val3"; + + String url = "localhost;" + ANNOTATION_ATTRIB_PREFIX + annotKey1 + '=' + annotVal1; + + Properties prop = new Properties(); + prop.put(ANNOTATION_ATTRIB_PREFIX + annotKey2, annotVal2); + prop.put(ANNOTATION_ATTRIB_PREFIX + annotKey3, annotVal3); + + Map customAnnotations = JDBCUtil.getAnnotations(url, prop); + assertEquals(3, customAnnotations.size()); + assertEquals(annotVal1, customAnnotations.get(annotKey1)); + assertEquals(annotVal2, customAnnotations.get(annotKey2)); + assertEquals(annotVal3, customAnnotations.get(annotKey3)); + } + + @Test + public void testRemoveProperty() { + assertEquals("localhost;", + JDBCUtil.removeProperty("localhost;TenantId=abc;", TENANT_ID_ATTRIB)); + assertEquals("localhost;foo=bar", + JDBCUtil.removeProperty("localhost;TenantId=abc;foo=bar", TENANT_ID_ATTRIB)); + assertEquals("localhost;TenantId=abc", + JDBCUtil.removeProperty("localhost;TenantId=abc;foo=bar", "foo")); + assertEquals("localhost;TenantId=abc;foo=bar", + JDBCUtil.removeProperty("localhost;TenantId=abc;foo=bar", "bar")); + } + + @Test + public void testGetAutoCommit_NotSpecified_DefaultTrue() { + assertTrue(JDBCUtil.getAutoCommit("localhost", new Properties(), true)); + } + + @Test + public void testGetAutoCommit_NotSpecified_DefaultFalse() { + assertFalse(JDBCUtil.getAutoCommit("localhost", new Properties(), false)); + } + + @Test + public void testGetAutoCommit_TrueInUrl() { + assertTrue(JDBCUtil.getAutoCommit("localhost;AutoCommit=TrUe", new Properties(), false)); + } + + @Test + public void testGetAutoCommit_FalseInUrl() { + assertFalse(JDBCUtil.getAutoCommit("localhost;AutoCommit=FaLse", new Properties(), false)); + } + + @Test + public void testGetAutoCommit_TrueInProperties() { + Properties props = new Properties(); + props.setProperty("AutoCommit", "true"); + assertTrue(JDBCUtil.getAutoCommit("localhost", props, false)); + } + + @Test + public void testGetAutoCommit_FalseInProperties() { + Properties props = new Properties(); + props.setProperty("AutoCommit", "false"); + assertFalse(JDBCUtil.getAutoCommit("localhost", props, false)); + } + + @Test + public void testGetConsistency_TIMELINE_InUrl() { + assertTrue(JDBCUtil.getConsistencyLevel("localhost;Consistency=TIMELINE", new Properties(), + Consistency.STRONG.toString()) == Consistency.TIMELINE); + } + + @Test + public void testSchema() { + assertTrue(JDBCUtil.getSchema("localhost;schema=TEST", new Properties(), null).equals("TEST")); + assertNull(JDBCUtil.getSchema("localhost;schema=", new Properties(), null)); + assertNull(JDBCUtil.getSchema("localhost;", new Properties(), null)); + } + + @Test + public void testGetConsistency_TIMELINE_InProperties() { + Properties props = new Properties(); + props.setProperty(PhoenixRuntime.CONSISTENCY_ATTRIB, "TIMELINE"); + assertTrue(JDBCUtil.getConsistencyLevel("localhost", props, Consistency.STRONG.toString()) + == Consistency.TIMELINE); + } + + @Test + public void testGetMaxMutateBytes() throws Exception { + assertEquals(1000L, + JDBCUtil.getMutateBatchSizeBytes( + "localhost;" + PhoenixRuntime.UPSERT_BATCH_SIZE_BYTES_ATTRIB + "=1000", new Properties(), + ReadOnlyProps.EMPTY_PROPS)); + + Properties props = new Properties(); + props.setProperty(PhoenixRuntime.UPSERT_BATCH_SIZE_BYTES_ATTRIB, "2000"); + assertEquals(2000L, + JDBCUtil.getMutateBatchSizeBytes("localhost", props, ReadOnlyProps.EMPTY_PROPS)); + + Map propMap = Maps.newHashMap(); + propMap.put(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB, "3000"); + ReadOnlyProps readOnlyProps = new ReadOnlyProps(propMap); + assertEquals(3000L, + JDBCUtil.getMutateBatchSizeBytes("localhost", new Properties(), readOnlyProps)); + } + + @Test + public void formatZookeeperUrlSameOrderTest() { + String zk1 = "zk1.net\\:2181,zk2.net\\:2181,zk3.net\\:2181::/hbase"; + String result = JDBCUtil.formatZookeeperUrl(zk1); + assertEquals(zk1, result); + } + + @Test + public void formatZookeeperUrlDifferentOrderTest() { + String zk1 = "zk3.net,zk2.net,zk1.net:2181:/hbase"; + String result = JDBCUtil.formatZookeeperUrl(zk1); + assertEquals("zk1.net\\:2181,zk2.net\\:2181,zk3.net\\:2181::/hbase", result); + } + + @Test + public void formatZookeeperUrlNoTrailersTest() { + String zk1 = "zk1.net,zk2.net,zk3.net"; + String result = JDBCUtil.formatZookeeperUrl(zk1); + assertEquals("zk1.net\\:2181,zk2.net\\:2181,zk3.net\\:2181::/hbase", result); + } + + @Test + public void formatZookeeperUrlToLowercaseTest() { + String zk1 = "MYHOST1.NET,MYHOST2.NET"; + String result = JDBCUtil.formatZookeeperUrl(zk1); + assertEquals("myhost1.net\\:2181,myhost2.net\\:2181::/hbase", result); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/LikeExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/LikeExpressionTest.java index d481ade6246..9569e15d4ee 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/LikeExpressionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/LikeExpressionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,21 +20,18 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import org.junit.Test; - import org.apache.phoenix.expression.LikeExpression; - +import org.junit.Test; public class LikeExpressionTest { - @Test - public void testWildcardToLikeExpression() { - String tableName = "PTSDB"; - assertTrue(tableName==LikeExpression.wildCardToLike(tableName)); - assertEquals("PRODUCT\\_METRICS", LikeExpression.wildCardToLike("PRODUCT_METRICS")); - assertEquals("PRODUCT%", LikeExpression.wildCardToLike("PRODUCT*")); - assertEquals("PRODUCT_METRICS", LikeExpression.wildCardToLike("PRODUCT?METRICS")); - assertEquals("PR?O%UCT%M*TRI_S", LikeExpression.wildCardToLike("PR\\?O*UCT*M\\*TRI?S")); - } - + @Test + public void testWildcardToLikeExpression() { + String tableName = "PTSDB"; + assertTrue(tableName == LikeExpression.wildCardToLike(tableName)); + assertEquals("PRODUCT\\_METRICS", LikeExpression.wildCardToLike("PRODUCT_METRICS")); + assertEquals("PRODUCT%", LikeExpression.wildCardToLike("PRODUCT*")); + assertEquals("PRODUCT_METRICS", LikeExpression.wildCardToLike("PRODUCT?METRICS")); + assertEquals("PR?O%UCT%M*TRI_S", LikeExpression.wildCardToLike("PR\\?O*UCT*M\\*TRI?S")); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/LogUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/LogUtilTest.java index ee913d1aa07..0b3ab2571cf 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/LogUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/LogUtilTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,39 +22,39 @@ import static org.mockito.Mockito.when; import org.apache.phoenix.jdbc.PhoenixConnection; +import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; - @RunWith(MockitoJUnitRunner.class) public class LogUtilTest { - - @Mock PhoenixConnection con; - - @Test - public void testAddCustomAnnotationsWithNullConnection() { - String logLine = LogUtil.addCustomAnnotations("log line", (PhoenixConnection)null); - assertEquals(logLine, "log line"); - } - - @Test - public void testAddCustomAnnotationsWithNullAnnotations() { - when(con.getCustomTracingAnnotations()).thenReturn(null); - - String logLine = LogUtil.addCustomAnnotations("log line", con); - assertEquals(logLine, "log line"); - } - - @Test - public void testAddCustomAnnotations() { - when(con.getCustomTracingAnnotations()).thenReturn(ImmutableMap.of("a1", "v1", "a2", "v2")); - - String logLine = LogUtil.addCustomAnnotations("log line", con); - assertTrue(logLine.contains("log line")); - assertTrue(logLine.contains("a1=v1")); - assertTrue(logLine.contains("a2=v2")); - } + + @Mock + PhoenixConnection con; + + @Test + public void testAddCustomAnnotationsWithNullConnection() { + String logLine = LogUtil.addCustomAnnotations("log line", (PhoenixConnection) null); + assertEquals(logLine, "log line"); + } + + @Test + public void testAddCustomAnnotationsWithNullAnnotations() { + when(con.getCustomTracingAnnotations()).thenReturn(null); + + String logLine = LogUtil.addCustomAnnotations("log line", con); + assertEquals(logLine, "log line"); + } + + @Test + public void testAddCustomAnnotations() { + when(con.getCustomTracingAnnotations()).thenReturn(ImmutableMap.of("a1", "v1", "a2", "v2")); + + String logLine = LogUtil.addCustomAnnotations("log line", con); + assertTrue(logLine.contains("log line")); + assertTrue(logLine.contains("a1=v1")); + assertTrue(logLine.contains("a2=v2")); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java index 1f1525e3f27..9c7061d3ac9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,8 +17,10 @@ */ package org.apache.phoenix.util; +import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY; import static org.apache.phoenix.coprocessorclient.MetaDataEndpointImplConstants.VIEW_MODIFIED_PROPERTY_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LAST_DDL_TIMESTAMP_BYTES; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY_BYTES; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; @@ -26,6 +28,12 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import java.util.Iterator; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -55,156 +63,162 @@ import org.junit.Test; import org.mockito.Mockito; -import java.util.Iterator; -import java.util.List; - -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES; - public class MetaDataUtilTest { - private static final byte[] ROW = Bytes.toBytes("row"); - private static final byte[] QUALIFIER = Bytes.toBytes("qual"); - private static final byte[] ORIGINAL_VALUE = Bytes.toBytes("generic-value"); - private static final byte[] DUMMY_TAGS = Bytes.toBytes("tags"); - private final ExtendedCellBuilder mockBuilder = Mockito.mock(ExtendedCellBuilder.class); - private final ExtendedCell mockCellWithTags = Mockito.mock(ExtendedCell.class); - - @Before - public void setupMockCellBuilder() { - Mockito.when(mockBuilder.setRow(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt())).thenReturn(mockBuilder); - Mockito.when(mockBuilder.setFamily(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt())).thenReturn(mockBuilder); - Mockito.when(mockBuilder.setQualifier(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt())).thenReturn(mockBuilder); - Mockito.when(mockBuilder.setValue(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt())).thenReturn(mockBuilder); - Mockito.when(mockBuilder.setTimestamp(Mockito.anyLong())).thenReturn(mockBuilder); - Mockito.when(mockBuilder.setType(Mockito.any(Cell.Type.class))) - .thenReturn(mockBuilder); - Mockito.when(mockBuilder.setTags(Mockito.any(byte[].class))) - .thenReturn(mockBuilder); - Mockito.when(mockBuilder.build()).thenReturn(mockCellWithTags); - } - - @Test - public void testEncode() { - assertEquals(VersionUtil.encodeVersion("0.94.5"),VersionUtil.encodeVersion("0.94.5-mapR")); - assertTrue(VersionUtil.encodeVersion("0.94.6")>VersionUtil.encodeVersion("0.94.5-mapR")); - assertTrue(VersionUtil.encodeVersion("0.94.6")>VersionUtil.encodeVersion("0.94.5")); - assertTrue(VersionUtil.encodeVersion("0.94.1-mapR")>VersionUtil.encodeVersion("0.94")); - assertTrue(VersionUtil.encodeVersion("1", "1", "3")>VersionUtil.encodeVersion("1", "1", "1")); - } - - @Test - public void testDecode() { - int encodedVersion = VersionUtil.encodeVersion("4.15.5"); - assertEquals(VersionUtil.decodeMajorVersion(encodedVersion), 4); - assertEquals(VersionUtil.decodeMinorVersion(encodedVersion), 15); - assertEquals(VersionUtil.decodePatchVersion(encodedVersion), 5); - } + private static final byte[] ROW = Bytes.toBytes("row"); + private static final byte[] QUALIFIER = Bytes.toBytes("qual"); + private static final byte[] ORIGINAL_VALUE = Bytes.toBytes("generic-value"); + private static final byte[] DUMMY_TAGS = Bytes.toBytes("tags"); + private final ExtendedCellBuilder mockBuilder = Mockito.mock(ExtendedCellBuilder.class); + private final ExtendedCell mockCellWithTags = Mockito.mock(ExtendedCell.class); + + @Before + public void setupMockCellBuilder() { + Mockito.when(mockBuilder.setRow(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt())) + .thenReturn(mockBuilder); + Mockito + .when(mockBuilder.setFamily(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt())) + .thenReturn(mockBuilder); + Mockito + .when(mockBuilder.setQualifier(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt())) + .thenReturn(mockBuilder); + Mockito + .when(mockBuilder.setValue(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt())) + .thenReturn(mockBuilder); + Mockito.when(mockBuilder.setTimestamp(Mockito.anyLong())).thenReturn(mockBuilder); + Mockito.when(mockBuilder.setType(Mockito.any(Cell.Type.class))).thenReturn(mockBuilder); + Mockito.when(mockBuilder.setTags(Mockito.any(byte[].class))).thenReturn(mockBuilder); + Mockito.when(mockBuilder.build()).thenReturn(mockCellWithTags); + } - @Test - public void testCompatibility() { - assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,1), 1, 2).getIsCompatible()); - assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,10), 1, 1).getIsCompatible()); - assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,0), 1, 2).getIsCompatible()); - assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1,2,255), 1, 2).getIsCompatible()); - assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(2,2,0), 2, 0).getIsCompatible()); - assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(2,10,36), 2, 9).getIsCompatible()); - assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 4, 0).getIsCompatible()); - assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 2, 0).getIsCompatible()); - assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 3, 2).getIsCompatible()); - assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 3, 5).getIsCompatible()); - } + @Test + public void testEncode() { + assertEquals(VersionUtil.encodeVersion("0.94.5"), VersionUtil.encodeVersion("0.94.5-mapR")); + assertTrue(VersionUtil.encodeVersion("0.94.6") > VersionUtil.encodeVersion("0.94.5-mapR")); + assertTrue(VersionUtil.encodeVersion("0.94.6") > VersionUtil.encodeVersion("0.94.5")); + assertTrue(VersionUtil.encodeVersion("0.94.1-mapR") > VersionUtil.encodeVersion("0.94")); + assertTrue(VersionUtil.encodeVersion("1", "1", "3") > VersionUtil.encodeVersion("1", "1", "1")); + } - @Test - public void testCompatibilityNewerClient() { - MetaDataUtil.ClientServerCompatibility compatibility1 = MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 4, 0); - assertFalse(compatibility1.getIsCompatible()); - assertEquals(compatibility1.getErrorCode(), SQLExceptionCode.OUTDATED_JARS.getErrorCode()); - MetaDataUtil.ClientServerCompatibility compatibility2 = MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 3, 2); - assertFalse(compatibility2.getIsCompatible()); - assertEquals(compatibility2.getErrorCode(), SQLExceptionCode.OUTDATED_JARS.getErrorCode()); - } + @Test + public void testDecode() { + int encodedVersion = VersionUtil.encodeVersion("4.15.5"); + assertEquals(VersionUtil.decodeMajorVersion(encodedVersion), 4); + assertEquals(VersionUtil.decodeMinorVersion(encodedVersion), 15); + assertEquals(VersionUtil.decodePatchVersion(encodedVersion), 5); + } - @Test - public void testCompatibilityMismatchedMajorVersions() { - MetaDataUtil.ClientServerCompatibility compatibility = MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 2, 0); - assertFalse(compatibility.getIsCompatible()); - assertEquals(compatibility.getErrorCode(), SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()); - } + @Test + public void testCompatibility() { + assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1, 2, 1), 1, 2) + .getIsCompatible()); + assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1, 2, 10), 1, 1) + .getIsCompatible()); + assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1, 2, 0), 1, 2) + .getIsCompatible()); + assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(1, 2, 255), 1, 2) + .getIsCompatible()); + assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(2, 2, 0), 2, 0) + .getIsCompatible()); + assertTrue(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(2, 10, 36), 2, 9) + .getIsCompatible()); + assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3, 1, 10), 4, 0) + .getIsCompatible()); + assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3, 1, 10), 2, 0) + .getIsCompatible()); + assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3, 1, 10), 3, 2) + .getIsCompatible()); + assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3, 1, 10), 3, 5) + .getIsCompatible()); + } - @Test - public void testMutatingAPut() throws Exception { - Put put = generateOriginalPut(); - byte[] newValue = Bytes.toBytes("new-value"); - Cell cell = put.get(TABLE_FAMILY_BYTES, QUALIFIER).get(0); - assertEquals(Bytes.toString(ORIGINAL_VALUE), - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); - MetaDataUtil.mutatePutValue(put, TABLE_FAMILY_BYTES, QUALIFIER, newValue); - cell = put.get(TABLE_FAMILY_BYTES, QUALIFIER).get(0); - assertEquals(Bytes.toString(newValue), - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); - } + @Test + public void testCompatibilityNewerClient() { + MetaDataUtil.ClientServerCompatibility compatibility1 = + MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3, 1, 10), 4, 0); + assertFalse(compatibility1.getIsCompatible()); + assertEquals(compatibility1.getErrorCode(), SQLExceptionCode.OUTDATED_JARS.getErrorCode()); + MetaDataUtil.ClientServerCompatibility compatibility2 = + MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3, 1, 10), 3, 2); + assertFalse(compatibility2.getIsCompatible()); + assertEquals(compatibility2.getErrorCode(), SQLExceptionCode.OUTDATED_JARS.getErrorCode()); + } - @Test - public void testTaggingAPutWrongQualifier() throws Exception { - Put put = generateOriginalPut(); - Cell initialCell = put.get(TABLE_FAMILY_BYTES, QUALIFIER).get(0); - - // Different qualifier, so no tags should be set - MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, EMPTY_BYTE_ARRAY, - mockBuilder, EMPTY_BYTE_ARRAY, DUMMY_TAGS); - verify(mockBuilder, never()).setTags(Mockito.any(byte[].class)); - Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); - assertEquals(initialCell, newCell); - assertNull(TagUtil.carryForwardTags(newCell)); - } + @Test + public void testCompatibilityMismatchedMajorVersions() { + MetaDataUtil.ClientServerCompatibility compatibility = + MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3, 1, 10), 2, 0); + assertFalse(compatibility.getIsCompatible()); + assertEquals(compatibility.getErrorCode(), + SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR.getErrorCode()); + } - @Test - public void testTaggingAPutUnconditionally() throws Exception { - Put put = generateOriginalPut(); + @Test + public void testMutatingAPut() throws Exception { + Put put = generateOriginalPut(); + byte[] newValue = Bytes.toBytes("new-value"); + Cell cell = put.get(TABLE_FAMILY_BYTES, QUALIFIER).get(0); + assertEquals(Bytes.toString(ORIGINAL_VALUE), + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + MetaDataUtil.mutatePutValue(put, TABLE_FAMILY_BYTES, QUALIFIER, newValue); + cell = put.get(TABLE_FAMILY_BYTES, QUALIFIER).get(0); + assertEquals(Bytes.toString(newValue), + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + } - // valueArray is null so we always set tags - MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, QUALIFIER, - mockBuilder, null, DUMMY_TAGS); - verify(mockBuilder, times(1)).setTags(Mockito.any(byte[].class)); - Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); - assertEquals(mockCellWithTags, newCell); - } + @Test + public void testTaggingAPutWrongQualifier() throws Exception { + Put put = generateOriginalPut(); + Cell initialCell = put.get(TABLE_FAMILY_BYTES, QUALIFIER).get(0); + + // Different qualifier, so no tags should be set + MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, EMPTY_BYTE_ARRAY, + mockBuilder, EMPTY_BYTE_ARRAY, DUMMY_TAGS); + verify(mockBuilder, never()).setTags(Mockito.any(byte[].class)); + Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); + assertEquals(initialCell, newCell); + assertNull(TagUtil.carryForwardTags(newCell)); + } - @Test - public void testSkipTaggingAPutDueToSameCellValue() throws Exception { - Put put = generateOriginalPut(); - Cell initialCell = put.get(TABLE_FAMILY_BYTES, QUALIFIER).get(0); - - // valueArray is set as the value stored in the cell, so we skip tagging the cell - MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, QUALIFIER, - mockBuilder, ORIGINAL_VALUE, DUMMY_TAGS); - verify(mockBuilder, never()).setTags(Mockito.any(byte[].class)); - Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); - assertEquals(initialCell, newCell); - assertNull(TagUtil.carryForwardTags(newCell)); - } + @Test + public void testTaggingAPutUnconditionally() throws Exception { + Put put = generateOriginalPut(); + + // valueArray is null so we always set tags + MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, QUALIFIER, mockBuilder, + null, DUMMY_TAGS); + verify(mockBuilder, times(1)).setTags(Mockito.any(byte[].class)); + Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); + assertEquals(mockCellWithTags, newCell); + } - @Test - public void testTaggingAPutDueToDifferentCellValue() throws Exception { - Put put = generateOriginalPut(); + @Test + public void testSkipTaggingAPutDueToSameCellValue() throws Exception { + Put put = generateOriginalPut(); + Cell initialCell = put.get(TABLE_FAMILY_BYTES, QUALIFIER).get(0); + + // valueArray is set as the value stored in the cell, so we skip tagging the cell + MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, QUALIFIER, mockBuilder, + ORIGINAL_VALUE, DUMMY_TAGS); + verify(mockBuilder, never()).setTags(Mockito.any(byte[].class)); + Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); + assertEquals(initialCell, newCell); + assertNull(TagUtil.carryForwardTags(newCell)); + } - // valueArray is set to a value different than the one in the cell, so we tag the cell - MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, QUALIFIER, - mockBuilder, EMPTY_BYTE_ARRAY, DUMMY_TAGS); - verify(mockBuilder, times(1)).setTags(Mockito.any(byte[].class)); - Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); - assertEquals(mockCellWithTags, newCell); - } + @Test + public void testTaggingAPutDueToDifferentCellValue() throws Exception { + Put put = generateOriginalPut(); + + // valueArray is set to a value different than the one in the cell, so we tag the cell + MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, QUALIFIER, mockBuilder, + EMPTY_BYTE_ARRAY, DUMMY_TAGS); + verify(mockBuilder, times(1)).setTags(Mockito.any(byte[].class)); + Cell newCell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); + assertEquals(mockCellWithTags, newCell); + } - /** + /** * Ensure it supports {@link GenericKeyValueBuilder} * @throws Exception on failure */ @@ -212,34 +226,34 @@ public void testTaggingAPutDueToDifferentCellValue() throws Exception { public void testGetMutationKeyValue() throws Exception { String version = VersionInfo.getVersion(); KeyValueBuilder builder = KeyValueBuilder.get(version); - KeyValue kv = builder.buildPut(wrap(ROW), wrap(TABLE_FAMILY_BYTES), wrap(QUALIFIER), - wrap(ORIGINAL_VALUE)); + KeyValue kv = + builder.buildPut(wrap(ROW), wrap(TABLE_FAMILY_BYTES), wrap(QUALIFIER), wrap(ORIGINAL_VALUE)); Put put = new Put(ROW); KeyValueBuilder.addQuietly(put, kv); // read back out the value ImmutableBytesPtr ptr = new ImmutableBytesPtr(); assertTrue(MetaDataUtil.getMutationValue(put, QUALIFIER, builder, ptr)); - assertEquals("Value returned doesn't match stored value for " + builder.getClass().getName() - + "!", 0, + assertEquals( + "Value returned doesn't match stored value for " + builder.getClass().getName() + "!", 0, ByteUtil.BYTES_PTR_COMPARATOR.compare(ptr, wrap(ORIGINAL_VALUE))); // try again, this time with the clientkeyvalue builder if (builder != GenericKeyValueBuilder.INSTANCE) { - builder = GenericKeyValueBuilder.INSTANCE; - byte[] value = Bytes.toBytes("client-value"); - kv = builder.buildPut(wrap(ROW), wrap(TABLE_FAMILY_BYTES), wrap(QUALIFIER), wrap(value)); - put = new Put(ROW); - KeyValueBuilder.addQuietly(put, kv); - - // read back out the value - assertTrue(MetaDataUtil.getMutationValue(put, QUALIFIER, builder, ptr)); - assertEquals("Value returned doesn't match stored value for " + builder.getClass().getName() - + "!", 0, - ByteUtil.BYTES_PTR_COMPARATOR.compare(ptr, wrap(value))); - - // ensure that we don't get matches for qualifiers that don't match - assertFalse(MetaDataUtil.getMutationValue(put, Bytes.toBytes("not a match"), builder, ptr)); + builder = GenericKeyValueBuilder.INSTANCE; + byte[] value = Bytes.toBytes("client-value"); + kv = builder.buildPut(wrap(ROW), wrap(TABLE_FAMILY_BYTES), wrap(QUALIFIER), wrap(value)); + put = new Put(ROW); + KeyValueBuilder.addQuietly(put, kv); + + // read back out the value + assertTrue(MetaDataUtil.getMutationValue(put, QUALIFIER, builder, ptr)); + assertEquals( + "Value returned doesn't match stored value for " + builder.getClass().getName() + "!", 0, + ByteUtil.BYTES_PTR_COMPARATOR.compare(ptr, wrap(value))); + + // ensure that we don't get matches for qualifiers that don't match + assertFalse(MetaDataUtil.getMutationValue(put, Bytes.toBytes("not a match"), builder, ptr)); } } @@ -247,99 +261,105 @@ private static ImmutableBytesPtr wrap(byte[] bytes) { return new ImmutableBytesPtr(bytes); } - @Test - public void testEncodeDecode() { - String hbaseVersionStr = "0.98.14"; - Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); - config.setBoolean(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, false); - config.setBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, false); - - long version = MetaDataUtil.encodeVersion(hbaseVersionStr, config); - int hbaseVersion = MetaDataUtil.decodeHBaseVersion(version); - int expectedHBaseVersion = VersionUtil.encodeVersion(0, 98, 14); - assertEquals(expectedHBaseVersion, hbaseVersion); - boolean isTableNamespaceMappingEnabled = MetaDataUtil.decodeTableNamespaceMappingEnabled(version); - assertFalse(isTableNamespaceMappingEnabled); - int phoenixVersion = MetaDataUtil.decodePhoenixVersion(version); - int expectedPhoenixVersion = VersionUtil.encodeVersion(MetaDataProtocol.PHOENIX_MAJOR_VERSION, - MetaDataProtocol.PHOENIX_MINOR_VERSION, MetaDataProtocol.PHOENIX_PATCH_NUMBER); - assertEquals(expectedPhoenixVersion, phoenixVersion); - - config.setBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, true); - - version = MetaDataUtil.encodeVersion(hbaseVersionStr, config); - hbaseVersion = MetaDataUtil.decodeHBaseVersion(version); - expectedHBaseVersion = VersionUtil.encodeVersion(0, 98, 14); - assertEquals(expectedHBaseVersion, hbaseVersion); - isTableNamespaceMappingEnabled = MetaDataUtil.decodeTableNamespaceMappingEnabled(version); - assertTrue(isTableNamespaceMappingEnabled); - phoenixVersion = MetaDataUtil.decodePhoenixVersion(version); - expectedPhoenixVersion = VersionUtil.encodeVersion(MetaDataProtocol.PHOENIX_MAJOR_VERSION, - MetaDataProtocol.PHOENIX_MINOR_VERSION, MetaDataProtocol.PHOENIX_PATCH_NUMBER); - assertEquals(expectedPhoenixVersion, phoenixVersion); - } + @Test + public void testEncodeDecode() { + String hbaseVersionStr = "0.98.14"; + Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); + config.setBoolean(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, false); + config.setBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, false); + + long version = MetaDataUtil.encodeVersion(hbaseVersionStr, config); + int hbaseVersion = MetaDataUtil.decodeHBaseVersion(version); + int expectedHBaseVersion = VersionUtil.encodeVersion(0, 98, 14); + assertEquals(expectedHBaseVersion, hbaseVersion); + boolean isTableNamespaceMappingEnabled = + MetaDataUtil.decodeTableNamespaceMappingEnabled(version); + assertFalse(isTableNamespaceMappingEnabled); + int phoenixVersion = MetaDataUtil.decodePhoenixVersion(version); + int expectedPhoenixVersion = VersionUtil.encodeVersion(MetaDataProtocol.PHOENIX_MAJOR_VERSION, + MetaDataProtocol.PHOENIX_MINOR_VERSION, MetaDataProtocol.PHOENIX_PATCH_NUMBER); + assertEquals(expectedPhoenixVersion, phoenixVersion); + + config.setBoolean(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, true); + + version = MetaDataUtil.encodeVersion(hbaseVersionStr, config); + hbaseVersion = MetaDataUtil.decodeHBaseVersion(version); + expectedHBaseVersion = VersionUtil.encodeVersion(0, 98, 14); + assertEquals(expectedHBaseVersion, hbaseVersion); + isTableNamespaceMappingEnabled = MetaDataUtil.decodeTableNamespaceMappingEnabled(version); + assertTrue(isTableNamespaceMappingEnabled); + phoenixVersion = MetaDataUtil.decodePhoenixVersion(version); + expectedPhoenixVersion = VersionUtil.encodeVersion(MetaDataProtocol.PHOENIX_MAJOR_VERSION, + MetaDataProtocol.PHOENIX_MINOR_VERSION, MetaDataProtocol.PHOENIX_PATCH_NUMBER); + assertEquals(expectedPhoenixVersion, phoenixVersion); + } - private Put generateOriginalPut() { - String version = VersionInfo.getVersion(); - KeyValueBuilder builder = KeyValueBuilder.get(version); - KeyValue kv = builder.buildPut(wrap(ROW), wrap(TABLE_FAMILY_BYTES), wrap(QUALIFIER), - wrap(ORIGINAL_VALUE)); - Put put = new Put(ROW); - KeyValueBuilder.addQuietly(put, kv); - return put; - } + private Put generateOriginalPut() { + String version = VersionInfo.getVersion(); + KeyValueBuilder builder = KeyValueBuilder.get(version); + KeyValue kv = + builder.buildPut(wrap(ROW), wrap(TABLE_FAMILY_BYTES), wrap(QUALIFIER), wrap(ORIGINAL_VALUE)); + Put put = new Put(ROW); + KeyValueBuilder.addQuietly(put, kv); + return put; + } - @Test - public void testConditionallyAddTagsToPutCells( ) { - List tags = TagUtil.asList(VIEW_MODIFIED_PROPERTY_BYTES, 0, VIEW_MODIFIED_PROPERTY_BYTES.length); - assertEquals(tags.size(), 1); - Tag expectedTag = tags.get(0); - - String version = VersionInfo.getVersion(); - KeyValueBuilder builder = KeyValueBuilder.get(version); - KeyValue kv = builder.buildPut(wrap(ROW), wrap(TABLE_FAMILY_BYTES), wrap(UPDATE_CACHE_FREQUENCY_BYTES), wrap( - PLong.INSTANCE.toBytes(0))); - Put put = new Put(ROW); - KeyValueBuilder.addQuietly(put, kv); - - ExtendedCellBuilder cellBuilder = (ExtendedCellBuilder) RawCellBuilderFactory.create(); - MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, UPDATE_CACHE_FREQUENCY_BYTES, cellBuilder, - PInteger.INSTANCE.toBytes(1), VIEW_MODIFIED_PROPERTY_BYTES); - - Cell cell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); - - // To check the cell tag whether view has modified this property - assertTrue(Bytes.compareTo(expectedTag.getValueArray(), TagUtil.concatTags(EMPTY_BYTE_ARRAY, cell)) == 0); - assertTrue(Bytes.contains(TagUtil.concatTags(EMPTY_BYTE_ARRAY, cell), expectedTag.getValueArray())); - - // To check tag data can be correctly deserialized - Iterator tagIterator = PrivateCellUtil.tagsIterator(cell); - assertTrue(tagIterator.hasNext()); - Tag actualTag = tagIterator.next(); - assertTrue(Bytes.compareTo(actualTag.getValueArray(), actualTag.getValueOffset(), actualTag.getValueLength(), - expectedTag.getValueArray(), expectedTag.getValueOffset(), expectedTag.getValueLength()) == 0); - assertFalse(tagIterator.hasNext()); - } + @Test + public void testConditionallyAddTagsToPutCells() { + List tags = + TagUtil.asList(VIEW_MODIFIED_PROPERTY_BYTES, 0, VIEW_MODIFIED_PROPERTY_BYTES.length); + assertEquals(tags.size(), 1); + Tag expectedTag = tags.get(0); - @Test - public void testGetLastDDLTimestampUpdate() throws Exception { - byte[] tableHeaderRowKey = SchemaUtil.getTableKey("TenantId", "schema", "table"); - long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis(); - long clientTimestamp = serverTimestamp - 1000L; - Put p = MetaDataUtil.getLastDDLTimestampUpdate(tableHeaderRowKey, clientTimestamp, - serverTimestamp); - assertNotNull(p); - assertFalse("Mutation is empty!", p.isEmpty()); - assertArrayEquals(tableHeaderRowKey, p.getRow()); - assertEquals(clientTimestamp, p.getTimestamp()); - assertTrue(p.cellScanner().advance()); - List cells = p.get(TABLE_FAMILY_BYTES, LAST_DDL_TIMESTAMP_BYTES); - assertNotNull(cells); - assertTrue(cells.size() > 0); - Cell c = cells.get(0); - assertNotNull("Cell is null!", c); - assertEquals(serverTimestamp, PLong.INSTANCE.getCodec().decodeLong(CellUtil.cloneValue(c), - 0, SortOrder.ASC)); - } -} + String version = VersionInfo.getVersion(); + KeyValueBuilder builder = KeyValueBuilder.get(version); + KeyValue kv = builder.buildPut(wrap(ROW), wrap(TABLE_FAMILY_BYTES), + wrap(UPDATE_CACHE_FREQUENCY_BYTES), wrap(PLong.INSTANCE.toBytes(0))); + Put put = new Put(ROW); + KeyValueBuilder.addQuietly(put, kv); + ExtendedCellBuilder cellBuilder = (ExtendedCellBuilder) RawCellBuilderFactory.create(); + MetaDataUtil.conditionallyAddTagsToPutCells(put, TABLE_FAMILY_BYTES, + UPDATE_CACHE_FREQUENCY_BYTES, cellBuilder, PInteger.INSTANCE.toBytes(1), + VIEW_MODIFIED_PROPERTY_BYTES); + + Cell cell = put.getFamilyCellMap().get(TABLE_FAMILY_BYTES).get(0); + + // To check the cell tag whether view has modified this property + assertTrue( + Bytes.compareTo(expectedTag.getValueArray(), TagUtil.concatTags(EMPTY_BYTE_ARRAY, cell)) + == 0); + assertTrue( + Bytes.contains(TagUtil.concatTags(EMPTY_BYTE_ARRAY, cell), expectedTag.getValueArray())); + + // To check tag data can be correctly deserialized + Iterator tagIterator = PrivateCellUtil.tagsIterator(cell); + assertTrue(tagIterator.hasNext()); + Tag actualTag = tagIterator.next(); + assertTrue(Bytes.compareTo(actualTag.getValueArray(), actualTag.getValueOffset(), + actualTag.getValueLength(), expectedTag.getValueArray(), expectedTag.getValueOffset(), + expectedTag.getValueLength()) == 0); + assertFalse(tagIterator.hasNext()); + } + + @Test + public void testGetLastDDLTimestampUpdate() throws Exception { + byte[] tableHeaderRowKey = SchemaUtil.getTableKey("TenantId", "schema", "table"); + long serverTimestamp = EnvironmentEdgeManager.currentTimeMillis(); + long clientTimestamp = serverTimestamp - 1000L; + Put p = + MetaDataUtil.getLastDDLTimestampUpdate(tableHeaderRowKey, clientTimestamp, serverTimestamp); + assertNotNull(p); + assertFalse("Mutation is empty!", p.isEmpty()); + assertArrayEquals(tableHeaderRowKey, p.getRow()); + assertEquals(clientTimestamp, p.getTimestamp()); + assertTrue(p.cellScanner().advance()); + List cells = p.get(TABLE_FAMILY_BYTES, LAST_DDL_TIMESTAMP_BYTES); + assertNotNull(cells); + assertTrue(cells.size() > 0); + Cell c = cells.get(0); + assertNotNull("Cell is null!", c); + assertEquals(serverTimestamp, + PLong.INSTANCE.getCodec().decodeLong(CellUtil.cloneValue(c), 0, SortOrder.ASC)); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/MinVersionTestRunner.java b/phoenix-core/src/test/java/org/apache/phoenix/util/MinVersionTestRunner.java index 8106f04cbf0..8f83d1ffa64 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/MinVersionTestRunner.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/MinVersionTestRunner.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,35 +30,35 @@ import org.junit.runners.model.InitializationError; /** - * * Conditionally run tests based on HBase version. Uses a - * @minVersion(versionStr) annotation on either the test class - * or the test method. - * + * @minVersion(versionStr) annotation on either the test class or the test method. */ public class MinVersionTestRunner extends BlockJUnit4ClassRunner { - - public MinVersionTestRunner(Class klass) throws InitializationError { - super(klass); - } - @Override - public void runChild(FrameworkMethod method, RunNotifier notifier) { - MinVersion methodCondition = method.getAnnotation(MinVersion.class); - MinVersion classCondition = this.getTestClass().getJavaClass().getAnnotation(MinVersion.class); - String versionStr = VersionInfo.getVersion(); - int version = VersionUtil.encodeVersion(versionStr); - if ( (methodCondition == null || version >= VersionUtil.encodeVersion(methodCondition.value())) - && (classCondition == null || version >= VersionUtil.encodeVersion(classCondition.value()))) { - super.runChild(method, notifier); - } else { - notifier.fireTestIgnored(describeChild(method)); - } + + public MinVersionTestRunner(Class klass) throws InitializationError { + super(klass); + } + + @Override + public void runChild(FrameworkMethod method, RunNotifier notifier) { + MinVersion methodCondition = method.getAnnotation(MinVersion.class); + MinVersion classCondition = this.getTestClass().getJavaClass().getAnnotation(MinVersion.class); + String versionStr = VersionInfo.getVersion(); + int version = VersionUtil.encodeVersion(versionStr); + if ( + (methodCondition == null || version >= VersionUtil.encodeVersion(methodCondition.value())) + && (classCondition == null || version >= VersionUtil.encodeVersion(classCondition.value())) + ) { + super.runChild(method, notifier); + } else { + notifier.fireTestIgnored(describeChild(method)); } - - @Target( {ElementType.TYPE, ElementType.METHOD} ) - @Retention(RetentionPolicy.RUNTIME) - public @interface MinVersion { - /** The minimum version supported for this test class or test method */ - String value(); - }} + } + @Target({ ElementType.TYPE, ElementType.METHOD }) + @Retention(RetentionPolicy.RUNTIME) + public @interface MinVersion { + /** The minimum version supported for this test class or test method */ + String value(); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixContextExecutorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixContextExecutorTest.java index 4c85c85a6df..6e8272eb4da 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixContextExecutorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixContextExecutorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,33 +17,32 @@ */ package org.apache.phoenix.util; +import static org.junit.Assert.assertEquals; + import java.net.URL; import java.net.URLClassLoader; import java.util.concurrent.Callable; import org.junit.Test; -import static org.junit.Assert.assertEquals; - public class PhoenixContextExecutorTest { - @Test - public void testCall() { - URLClassLoader customerClassLoader = new URLClassLoader(new URL[]{}); - ClassLoader saveCcl = Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(customerClassLoader); - try { - PhoenixContextExecutor.callWithoutPropagation(new Callable() { - @Override - public Object call() { - assertEquals( - PhoenixContextExecutor.class.getClassLoader(), - Thread.currentThread().getContextClassLoader()); - return null; - } - }); - } finally { - Thread.currentThread().setContextClassLoader(saveCcl); + @Test + public void testCall() { + URLClassLoader customerClassLoader = new URLClassLoader(new URL[] {}); + ClassLoader saveCcl = Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(customerClassLoader); + try { + PhoenixContextExecutor.callWithoutPropagation(new Callable() { + @Override + public Object call() { + assertEquals(PhoenixContextExecutor.class.getClassLoader(), + Thread.currentThread().getContextClassLoader()); + return null; } - + }); + } finally { + Thread.currentThread().setContextClassLoader(saveCcl); } + + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixEncodeDecodeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixEncodeDecodeTest.java index a06d5e75506..75df67a2cbc 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixEncodeDecodeTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixEncodeDecodeTest.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -26,45 +26,51 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.query.BaseConnectionlessQueryTest; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class PhoenixEncodeDecodeTest extends BaseConnectionlessQueryTest { - - @Test - public void testDecodeValues1() throws Exception { - testDecodeValues(false, false); - } - - @Test - public void testDecodeValues2() throws Exception { - testDecodeValues(true, false); - } - - @Test - public void testDecodeValues3() throws Exception { - testDecodeValues(true, true); - } - - @Test - public void testDecodeValues4() throws Exception { - testDecodeValues(false, true); - } - - @SuppressWarnings("unchecked") - private void testDecodeValues(boolean nullFixedWidth, boolean nullVariableWidth) throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - conn.createStatement().execute( - "CREATE TABLE T(pk1 CHAR(15) not null, pk2 VARCHAR not null, CF1.v1 DATE, CF2.v2 VARCHAR, CF2.v1 VARCHAR " + - "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); - - Date d = nullFixedWidth ? null : new Date(100); - String s = nullVariableWidth ? null : "foo"; - Object[] values = new Object[] {"def", "eid", d, s, s}; - byte[] bytes = PhoenixRuntime.encodeColumnValues(conn, "T", values, Lists.newArrayList(new Pair(null, "pk1"), new Pair(null, "pk2"), new Pair("cf1", "v1"), new Pair("cf2", "v2"), new Pair("cf2", "v1"))); - Object[] decodedValues = PhoenixRuntime.decodeColumnValues(conn, "T", bytes, Lists.newArrayList(new Pair(null, "pk1"), new Pair(null, "pk2"), new Pair("cf1", "v1"), new Pair("cf2", "v2"), new Pair("cf2", "v1"))); - assertEquals(Lists.newArrayList("def", "eid", d, s, s), Arrays.asList(decodedValues)); - } - + + @Test + public void testDecodeValues1() throws Exception { + testDecodeValues(false, false); + } + + @Test + public void testDecodeValues2() throws Exception { + testDecodeValues(true, false); + } + + @Test + public void testDecodeValues3() throws Exception { + testDecodeValues(true, true); + } + + @Test + public void testDecodeValues4() throws Exception { + testDecodeValues(false, true); + } + + @SuppressWarnings("unchecked") + private void testDecodeValues(boolean nullFixedWidth, boolean nullVariableWidth) + throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + conn.createStatement().execute( + "CREATE TABLE T(pk1 CHAR(15) not null, pk2 VARCHAR not null, CF1.v1 DATE, CF2.v2 VARCHAR, CF2.v1 VARCHAR " + + "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) "); + + Date d = nullFixedWidth ? null : new Date(100); + String s = nullVariableWidth ? null : "foo"; + Object[] values = new Object[] { "def", "eid", d, s, s }; + byte[] bytes = PhoenixRuntime.encodeColumnValues(conn, "T", values, + Lists.newArrayList(new Pair(null, "pk1"), + new Pair(null, "pk2"), new Pair("cf1", "v1"), + new Pair("cf2", "v2"), new Pair("cf2", "v1"))); + Object[] decodedValues = PhoenixRuntime.decodeColumnValues(conn, "T", bytes, + Lists.newArrayList(new Pair(null, "pk1"), + new Pair(null, "pk2"), new Pair("cf1", "v1"), + new Pair("cf2", "v2"), new Pair("cf2", "v1"))); + assertEquals(Lists.newArrayList("def", "eid", d, s, s), Arrays.asList(decodedValues)); + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixMRJobUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixMRJobUtilTest.java index 27d66a226f5..831bf78d9f9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixMRJobUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixMRJobUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,32 +17,32 @@ */ package org.apache.phoenix.util; +import static org.junit.Assert.assertEquals; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.Test; -import static org.junit.Assert.assertEquals; - public class PhoenixMRJobUtilTest { - @Test - public void testGetRMHostName(){ - Configuration config = HBaseConfiguration.create(); - String testRMAddress = "testRMhostName:portnumber"; + @Test + public void testGetRMHostName() { + Configuration config = HBaseConfiguration.create(); + String testRMAddress = "testRMhostName:portnumber"; - String address = PhoenixMRJobUtil.getRMWebAddress(config); - assertEquals(YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, address); + String address = PhoenixMRJobUtil.getRMWebAddress(config); + assertEquals(YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, address); - config.set(YarnConfiguration.RM_WEBAPP_ADDRESS, testRMAddress); - address = PhoenixMRJobUtil.getRMWebAddress(config); - assertEquals(testRMAddress, address); + config.set(YarnConfiguration.RM_WEBAPP_ADDRESS, testRMAddress); + address = PhoenixMRJobUtil.getRMWebAddress(config); + assertEquals(testRMAddress, address); - //HA mode - address = PhoenixMRJobUtil.getRMWebAddress(config, "rm11"); - assertEquals(YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, address); + // HA mode + address = PhoenixMRJobUtil.getRMWebAddress(config, "rm11"); + assertEquals(YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, address); - config.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm11", testRMAddress); - address = PhoenixMRJobUtil.getRMWebAddress(config, "rm11"); - assertEquals(testRMAddress, address); - } + config.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm11", testRMAddress); + address = PhoenixMRJobUtil.getRMWebAddress(config, "rm11"); + assertEquals(testRMAddress, address); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixRuntimeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixRuntimeTest.java index 5460e086bc3..e5ed3b8e798 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixRuntimeTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/PhoenixRuntimeTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; import static org.junit.Assert.assertArrayEquals; @@ -26,8 +25,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; import java.sql.Connection; import java.sql.Date; import java.sql.DriverManager; @@ -39,7 +36,6 @@ import java.util.Properties; import org.apache.commons.lang3.exception.ExceptionUtils; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.expression.Expression; @@ -48,308 +44,313 @@ import org.apache.phoenix.query.BaseConnectionlessQueryTest; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.PTable; -import org.apache.phoenix.schema.PTable.QualifierEncodingScheme; import org.apache.phoenix.schema.TableNotFoundException; import org.apache.phoenix.schema.types.PDataType; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; +import org.junit.Test; public class PhoenixRuntimeTest extends BaseConnectionlessQueryTest { - @Test - public void testParseArguments_MinimalCase() { - PhoenixRuntime.ExecutionCommand execCmd = PhoenixRuntime.ExecutionCommand.parseArgs( - new String[] { "localhost", "test.csv" }); + @Test + public void testParseArguments_MinimalCase() { + PhoenixRuntime.ExecutionCommand execCmd = + PhoenixRuntime.ExecutionCommand.parseArgs(new String[] { "localhost", "test.csv" }); + assertEquals("localhost", execCmd.getConnectionString()); - assertEquals( - "localhost", - execCmd.getConnectionString()); + assertEquals(ImmutableList.of("test.csv"), execCmd.getInputFiles()); - assertEquals( - ImmutableList.of("test.csv"), - execCmd.getInputFiles()); + assertEquals(',', execCmd.getFieldDelimiter()); + assertEquals('"', execCmd.getQuoteCharacter()); + assertNull(execCmd.getEscapeCharacter()); - assertEquals(',', execCmd.getFieldDelimiter()); - assertEquals('"', execCmd.getQuoteCharacter()); - assertNull(execCmd.getEscapeCharacter()); + assertNull(execCmd.getTableName()); - assertNull(execCmd.getTableName()); + assertNull(execCmd.getColumns()); - assertNull(execCmd.getColumns()); + assertFalse(execCmd.isStrict()); - assertFalse(execCmd.isStrict()); + assertEquals(CSVCommonsLoader.DEFAULT_ARRAY_ELEMENT_SEPARATOR, + execCmd.getArrayElementSeparator()); + } - assertEquals( - CSVCommonsLoader.DEFAULT_ARRAY_ELEMENT_SEPARATOR, - execCmd.getArrayElementSeparator()); - } + @Test + public void testParseArguments_FullOption() { + PhoenixRuntime.ExecutionCommand execCmd = PhoenixRuntime.ExecutionCommand.parseArgs( + new String[] { "-t", "mytable", "myzkhost:2181", "--strict", "file1.sql", "test.csv", + "file2.sql", "--header", "one, two,three", "-a", "!", "-d", ":", "-q", "3", "-e", "4" }); - @Test - public void testParseArguments_FullOption() { - PhoenixRuntime.ExecutionCommand execCmd = PhoenixRuntime.ExecutionCommand.parseArgs( - new String[] { "-t", "mytable", "myzkhost:2181", "--strict", "file1.sql", - "test.csv", "file2.sql", "--header", "one, two,three", "-a", "!", "-d", - ":", "-q", "3", "-e", "4" }); + assertEquals("myzkhost:2181", execCmd.getConnectionString()); - assertEquals("myzkhost:2181", execCmd.getConnectionString()); + assertEquals(ImmutableList.of("file1.sql", "test.csv", "file2.sql"), execCmd.getInputFiles()); - assertEquals(ImmutableList.of("file1.sql", "test.csv", "file2.sql"), - execCmd.getInputFiles()); + assertEquals(':', execCmd.getFieldDelimiter()); + assertEquals('3', execCmd.getQuoteCharacter()); + assertEquals(Character.valueOf('4'), execCmd.getEscapeCharacter()); - assertEquals(':', execCmd.getFieldDelimiter()); - assertEquals('3', execCmd.getQuoteCharacter()); - assertEquals(Character.valueOf('4'), execCmd.getEscapeCharacter()); + assertEquals("mytable", execCmd.getTableName()); - assertEquals("mytable", execCmd.getTableName()); + assertEquals(ImmutableList.of("one", "two", "three"), execCmd.getColumns()); + assertTrue(execCmd.isStrict()); + assertEquals("!", execCmd.getArrayElementSeparator()); + } - assertEquals(ImmutableList.of("one", "two", "three"), execCmd.getColumns()); - assertTrue(execCmd.isStrict()); - assertEquals("!", execCmd.getArrayElementSeparator()); - } - - @Test - public void testGetPkColsEncodeDecode() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - String ddl = "CREATE TABLE t (\n" + - "TENANT_ID VARCHAR NOT NULL,\n" + - "PARENT_ID CHAR(15) NOT NULL,\n" + - "CREATED_DATE DATE NOT NULL,\n" + - "ENTITY_HISTORY_ID CHAR(15) NOT NULL,\n" + - "DATA_TYPE VARCHAR,\n" + - "OLDVAL_STRING VARCHAR,\n" + - "NEWVAL_STRING VARCHAR\n" + - "CONSTRAINT PK PRIMARY KEY(TENANT_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID))" - + " MULTI_TENANT = true, IMMUTABLE_ROWS = true"; - conn.createStatement().execute(ddl); - String indexDDL = "CREATE INDEX i ON t (CREATED_DATE, PARENT_ID) INCLUDE (DATA_TYPE, OLDVAL_STRING, NEWVAL_STRING)"; - conn.createStatement().execute(indexDDL); - - String tenantId = "111111111111111"; - String parentId = "222222222222222"; - Date createdDate = new Date(System.currentTimeMillis()); - String ehId = "333333333333333"; - - Object[] values = new Object[] {tenantId, createdDate, parentId, ehId}; - QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).optimizeQuery("SELECT PARENT_ID FROM T WHERE CREATED_DATE > CURRENT_DATE()-1 AND TENANT_ID = '111111111111111'"); - List> pkColumns = PhoenixRuntime.getPkColsForSql(conn, plan); - String fullTableName = plan.getTableRef().getTable().getName().getString(); - assertEquals("I", fullTableName); - byte[] encodedValues = PhoenixRuntime.encodeColumnValues(conn, fullTableName, values, pkColumns); - Object[] decodedValues = PhoenixRuntime.decodeColumnValues(conn, fullTableName, encodedValues, pkColumns); - assertArrayEquals(values, decodedValues); - - plan = conn.createStatement().unwrap(PhoenixStatement.class).optimizeQuery("SELECT /*+ NO_INDEX */ ENTITY_HISTORY_ID FROM T"); - pkColumns = PhoenixRuntime.getPkColsForSql(conn, plan); - values = new Object[] {tenantId, parentId, createdDate, ehId}; - fullTableName = plan.getTableRef().getTable().getName().getString(); - assertEquals("T", fullTableName); - encodedValues = PhoenixRuntime.encodeColumnValues(conn, fullTableName, values, pkColumns); - decodedValues = PhoenixRuntime.decodeColumnValues(conn, fullTableName, encodedValues, pkColumns); - assertArrayEquals(values, decodedValues); - } - - @Test - public void testGetPkColsDataTypes() throws Exception { - Connection conn = DriverManager.getConnection(getUrl(), new Properties()); - int i = 0; - PDataType[] pTypes = PDataType.values(); - int size = pTypes.length; - StringBuilder sb = null; - try { - for (i = 0 ; i < size; i++) { - PDataType pType = pTypes[i]; - String sqlTypeName = pType.getSqlTypeName(); - if (sqlTypeName.equalsIgnoreCase("VARBINARY ARRAY")) { - // we don't support VARBINARY ARRAYS yet - // JIRA - https://issues.apache.org/jira/browse/PHOENIX-1329 - continue; - } - // Condition to check if a type can be used as a primary key. - if (!pType.canBePrimaryKey()) { - continue; - } - if (pType.isArrayType() && PDataType.arrayBaseType(pType).isFixedWidth() && PDataType.arrayBaseType(pType).getByteSize() == null) { - // Need to treat array type whose base type is of fixed width whose byte size is not known as a special case. - // Cannot just use the sql type name returned by PDataType.getSqlTypeName(). - String baseTypeName = PDataType.arrayBaseType(pType).getSqlTypeName(); - sqlTypeName = baseTypeName + "(15)" + " " + PDataType.ARRAY_TYPE_SUFFIX; - } else if (pType.isFixedWidth() && pType.getByteSize() == null) { - sqlTypeName = sqlTypeName + "(15)"; - } - String columnName = "col" + i; - String tableName = "t" + i; - - sb = new StringBuilder(100); - - // create a table by using the type name as returned by PDataType - sb.append("CREATE TABLE " + tableName + " ("); - sb.append(columnName + " " + sqlTypeName + " NOT NULL PRIMARY KEY, V1 VARCHAR)"); - conn.createStatement().execute(sb.toString()); - - // generate the optimized query plan by going through the pk of the table. - PreparedStatement stmt = conn.prepareStatement("SELECT * FROM " + tableName + " WHERE " + columnName + " = ?"); - Integer maxLength = pType.isFixedWidth() && pType.getByteSize() == null ? 15 : null; - stmt.setObject(1, pType.getSampleValue(maxLength)); - QueryPlan plan = PhoenixRuntime.getOptimizedQueryPlan(stmt); - - // now go through the utility method, get column name and type name and - // try creating another table with the returned info. Use the query plan generated above. - // If table can be created with the returned sql type name, then great! - // It would mean "Roundtrip" of column data type name works. - List> pkCols = new ArrayList>(); - List dataTypes = new ArrayList(); - PhoenixRuntime.getPkColsDataTypesForSql(pkCols, dataTypes, plan, conn, true); - - tableName = "newt" + i; - columnName = "newCol" + i; - String roundTripSqlTypeName = dataTypes.get(0); - - // create a table by using the type name as returned by the utility method - sb = new StringBuilder(100); - sb.append("CREATE TABLE " + tableName + " ("); - sb.append(columnName + " " + roundTripSqlTypeName + " NOT NULL PRIMARY KEY)"); - conn.createStatement().execute(sb.toString()); - } - } catch (Exception e) { - fail("Failed sql: " + sb.toString() + ExceptionUtils.getStackTrace(e)); + @Test + public void testGetPkColsEncodeDecode() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + String ddl = + "CREATE TABLE t (\n" + "TENANT_ID VARCHAR NOT NULL,\n" + "PARENT_ID CHAR(15) NOT NULL,\n" + + "CREATED_DATE DATE NOT NULL,\n" + "ENTITY_HISTORY_ID CHAR(15) NOT NULL,\n" + + "DATA_TYPE VARCHAR,\n" + "OLDVAL_STRING VARCHAR,\n" + "NEWVAL_STRING VARCHAR\n" + + "CONSTRAINT PK PRIMARY KEY(TENANT_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID))" + + " MULTI_TENANT = true, IMMUTABLE_ROWS = true"; + conn.createStatement().execute(ddl); + String indexDDL = + "CREATE INDEX i ON t (CREATED_DATE, PARENT_ID) INCLUDE (DATA_TYPE, OLDVAL_STRING, NEWVAL_STRING)"; + conn.createStatement().execute(indexDDL); + + String tenantId = "111111111111111"; + String parentId = "222222222222222"; + Date createdDate = new Date(System.currentTimeMillis()); + String ehId = "333333333333333"; + + Object[] values = new Object[] { tenantId, createdDate, parentId, ehId }; + QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).optimizeQuery( + "SELECT PARENT_ID FROM T WHERE CREATED_DATE > CURRENT_DATE()-1 AND TENANT_ID = '111111111111111'"); + List> pkColumns = PhoenixRuntime.getPkColsForSql(conn, plan); + String fullTableName = plan.getTableRef().getTable().getName().getString(); + assertEquals("I", fullTableName); + byte[] encodedValues = + PhoenixRuntime.encodeColumnValues(conn, fullTableName, values, pkColumns); + Object[] decodedValues = + PhoenixRuntime.decodeColumnValues(conn, fullTableName, encodedValues, pkColumns); + assertArrayEquals(values, decodedValues); + + plan = conn.createStatement().unwrap(PhoenixStatement.class) + .optimizeQuery("SELECT /*+ NO_INDEX */ ENTITY_HISTORY_ID FROM T"); + pkColumns = PhoenixRuntime.getPkColsForSql(conn, plan); + values = new Object[] { tenantId, parentId, createdDate, ehId }; + fullTableName = plan.getTableRef().getTable().getName().getString(); + assertEquals("T", fullTableName); + encodedValues = PhoenixRuntime.encodeColumnValues(conn, fullTableName, values, pkColumns); + decodedValues = + PhoenixRuntime.decodeColumnValues(conn, fullTableName, encodedValues, pkColumns); + assertArrayEquals(values, decodedValues); + } + + @Test + public void testGetPkColsDataTypes() throws Exception { + Connection conn = DriverManager.getConnection(getUrl(), new Properties()); + int i = 0; + PDataType[] pTypes = PDataType.values(); + int size = pTypes.length; + StringBuilder sb = null; + try { + for (i = 0; i < size; i++) { + PDataType pType = pTypes[i]; + String sqlTypeName = pType.getSqlTypeName(); + if (sqlTypeName.equalsIgnoreCase("VARBINARY ARRAY")) { + // we don't support VARBINARY ARRAYS yet + // JIRA - https://issues.apache.org/jira/browse/PHOENIX-1329 + continue; } - } - - @Test - public void testGetTenantIdExpression() throws Exception { - Connection conn = DriverManager.getConnection(getUrl()); - Expression e1 = PhoenixRuntime.getTenantIdExpression(conn, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME); - assertNull(e1); - Expression e2 = PhoenixRuntime.getTenantIdExpression(conn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME); - assertNotNull(e2); - - Expression e3 = PhoenixRuntime.getTenantIdExpression(conn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME); - assertNotNull(e3); - - conn.createStatement().execute("CREATE TABLE FOO (k VARCHAR PRIMARY KEY)"); - Expression e4 = PhoenixRuntime.getTenantIdExpression(conn, "FOO"); - assertNull(e4); - - conn.createStatement().execute("CREATE TABLE A.BAR (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true"); - Expression e5 = PhoenixRuntime.getTenantIdExpression(conn, "A.BAR"); - assertNotNull(e5); - - conn.createStatement().execute("CREATE INDEX I1 ON A.BAR (K2)"); - Expression e5A = PhoenixRuntime.getTenantIdExpression(conn, "A.I1"); - assertNotNull(e5A); - - conn.createStatement().execute("CREATE TABLE BAS (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true, SALT_BUCKETS=3"); - Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, "BAS"); - assertNotNull(e6); - - conn.createStatement().execute("CREATE INDEX I2 ON BAS (K2)"); - Expression e6A = PhoenixRuntime.getTenantIdExpression(conn, "I2"); - assertNotNull(e6A); - - try { - PhoenixRuntime.getTenantIdExpression(conn, "NOT.ATABLE"); - fail(); - } catch (TableNotFoundException e) { - // Expected + // Condition to check if a type can be used as a primary key. + if (!pType.canBePrimaryKey()) { + continue; } - - Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); - props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "t1"); - Connection tsconn = DriverManager.getConnection(getUrl(), props); - tsconn.createStatement().execute("CREATE VIEW V(V1 VARCHAR) AS SELECT * FROM BAS"); - Expression e7 = PhoenixRuntime.getTenantIdExpression(tsconn, "V"); - assertNotNull(e7); - tsconn.createStatement().execute("CREATE LOCAL INDEX I3 ON V (V1)"); - try { - PhoenixRuntime.getTenantIdExpression(tsconn, "I3"); - fail(); - } catch (SQLFeatureNotSupportedException e) { - // Expected + if ( + pType.isArrayType() && PDataType.arrayBaseType(pType).isFixedWidth() + && PDataType.arrayBaseType(pType).getByteSize() == null + ) { + // Need to treat array type whose base type is of fixed width whose byte size is not known + // as a special case. + // Cannot just use the sql type name returned by PDataType.getSqlTypeName(). + String baseTypeName = PDataType.arrayBaseType(pType).getSqlTypeName(); + sqlTypeName = baseTypeName + "(15)" + " " + PDataType.ARRAY_TYPE_SUFFIX; + } else if (pType.isFixedWidth() && pType.getByteSize() == null) { + sqlTypeName = sqlTypeName + "(15)"; } - } - - @Test - public void testTableNameWithoutSchema() throws Exception { - String tableName = "tableName"; - String tableNameNormalized = tableName.toUpperCase(); - - getTableTester(tableNameNormalized, tableName); - } + String columnName = "col" + i; + String tableName = "t" + i; - @Test - public void testTableNameWithSchema() throws Exception { - String tableName = "tableName"; - String schemaName = "schemaName"; - String fullName = schemaName + "." + tableName; - String fullNameNormalized = fullName.toUpperCase(); - - getTableTester(fullNameNormalized, fullName); - } - - @Test - public void testCaseSensitiveTableNameWithoutSchema() throws Exception { - String caseSensitiveTableName = "tableName"; - - getTableTester(caseSensitiveTableName, quoteString(caseSensitiveTableName)); - } - - @Test - public void testCaseSensitiveTableNameWithSchema() throws Exception { - String caseSensitiveTableName = "tableName"; - String schemaName = "schemaName"; - String fullNameNormalized = schemaName.toUpperCase() + "." + caseSensitiveTableName; - String fullNameQuoted = schemaName + "." + quoteString(caseSensitiveTableName); - - getTableTester(fullNameNormalized, fullNameQuoted); - } - - @Test - public void testCaseSensitiveTableNameWithCaseSensitiveSchema() throws Exception { - String caseSensitiveTableName = "tableName"; - String caseSensitiveSchemaName = "schemaName"; - String fullName = caseSensitiveSchemaName + "." + caseSensitiveTableName; - String fullNameQuoted = quoteString(caseSensitiveSchemaName) + "." + quoteString(caseSensitiveTableName); - - getTableTester(fullName, fullNameQuoted); - } + sb = new StringBuilder(100); - @Test - public void testCaseSensitiveTableNameWithCaseSensitiveSchemaWithPeriod() throws Exception { - String caseSensitiveTableName = "tableName"; - String caseSensitiveSchemaName = "schema.Name"; - String fullName = caseSensitiveSchemaName + "." + caseSensitiveTableName; - String fullNameQuoted = quoteString(caseSensitiveSchemaName) + "." + quoteString(caseSensitiveTableName); - - getTableTester(fullName, fullNameQuoted); + // create a table by using the type name as returned by PDataType + sb.append("CREATE TABLE " + tableName + " ("); + sb.append(columnName + " " + sqlTypeName + " NOT NULL PRIMARY KEY, V1 VARCHAR)"); + conn.createStatement().execute(sb.toString()); + + // generate the optimized query plan by going through the pk of the table. + PreparedStatement stmt = + conn.prepareStatement("SELECT * FROM " + tableName + " WHERE " + columnName + " = ?"); + Integer maxLength = pType.isFixedWidth() && pType.getByteSize() == null ? 15 : null; + stmt.setObject(1, pType.getSampleValue(maxLength)); + QueryPlan plan = PhoenixRuntime.getOptimizedQueryPlan(stmt); + + // now go through the utility method, get column name and type name and + // try creating another table with the returned info. Use the query plan generated above. + // If table can be created with the returned sql type name, then great! + // It would mean "Roundtrip" of column data type name works. + List> pkCols = new ArrayList>(); + List dataTypes = new ArrayList(); + PhoenixRuntime.getPkColsDataTypesForSql(pkCols, dataTypes, plan, conn, true); + + tableName = "newt" + i; + columnName = "newCol" + i; + String roundTripSqlTypeName = dataTypes.get(0); + + // create a table by using the type name as returned by the utility method + sb = new StringBuilder(100); + sb.append("CREATE TABLE " + tableName + " ("); + sb.append(columnName + " " + roundTripSqlTypeName + " NOT NULL PRIMARY KEY)"); + conn.createStatement().execute(sb.toString()); + } + } catch (Exception e) { + fail("Failed sql: " + sb.toString() + ExceptionUtils.getStackTrace(e)); } - - private void getTableTester(String normalizedName, String sqlStatementName) throws SQLException { - Connection conn = DriverManager.getConnection(getUrl()); - try { - conn.createStatement().execute("CREATE TABLE " + sqlStatementName + " (k VARCHAR PRIMARY KEY)"); - PTable aTable = PhoenixRuntime.getTable(conn, normalizedName); - assertNotNull(aTable); - } finally { - if (null != conn) { - conn.createStatement().execute("DROP TABLE IF EXISTS " + sqlStatementName); - } - } + } + + @Test + public void testGetTenantIdExpression() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + Expression e1 = + PhoenixRuntime.getTenantIdExpression(conn, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME); + assertNull(e1); + Expression e2 = + PhoenixRuntime.getTenantIdExpression(conn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME); + assertNotNull(e2); + + Expression e3 = + PhoenixRuntime.getTenantIdExpression(conn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME); + assertNotNull(e3); + + conn.createStatement().execute("CREATE TABLE FOO (k VARCHAR PRIMARY KEY)"); + Expression e4 = PhoenixRuntime.getTenantIdExpression(conn, "FOO"); + assertNull(e4); + + conn.createStatement().execute( + "CREATE TABLE A.BAR (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true"); + Expression e5 = PhoenixRuntime.getTenantIdExpression(conn, "A.BAR"); + assertNotNull(e5); + + conn.createStatement().execute("CREATE INDEX I1 ON A.BAR (K2)"); + Expression e5A = PhoenixRuntime.getTenantIdExpression(conn, "A.I1"); + assertNotNull(e5A); + + conn.createStatement().execute( + "CREATE TABLE BAS (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true, SALT_BUCKETS=3"); + Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, "BAS"); + assertNotNull(e6); + + conn.createStatement().execute("CREATE INDEX I2 ON BAS (K2)"); + Expression e6A = PhoenixRuntime.getTenantIdExpression(conn, "I2"); + assertNotNull(e6A); + + try { + PhoenixRuntime.getTenantIdExpression(conn, "NOT.ATABLE"); + fail(); + } catch (TableNotFoundException e) { + // Expected } - - private String quoteString(String string) { - return "\"" + string + "\""; + + Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES); + props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "t1"); + Connection tsconn = DriverManager.getConnection(getUrl(), props); + tsconn.createStatement().execute("CREATE VIEW V(V1 VARCHAR) AS SELECT * FROM BAS"); + Expression e7 = PhoenixRuntime.getTenantIdExpression(tsconn, "V"); + assertNotNull(e7); + tsconn.createStatement().execute("CREATE LOCAL INDEX I3 ON V (V1)"); + try { + PhoenixRuntime.getTenantIdExpression(tsconn, "I3"); + fail(); + } catch (SQLFeatureNotSupportedException e) { + // Expected } + } + + @Test + public void testTableNameWithoutSchema() throws Exception { + String tableName = "tableName"; + String tableNameNormalized = tableName.toUpperCase(); + + getTableTester(tableNameNormalized, tableName); + } + + @Test + public void testTableNameWithSchema() throws Exception { + String tableName = "tableName"; + String schemaName = "schemaName"; + String fullName = schemaName + "." + tableName; + String fullNameNormalized = fullName.toUpperCase(); - @Test - public void testGetWallClockTimeFromCellTimeStamp() { - long ts = System.currentTimeMillis(); - assertEquals(ts, PhoenixRuntime.getWallClockTimeFromCellTimeStamp(ts)); - long nanoTs = TransactionUtil.convertToNanoseconds(ts); - assertEquals(ts, PhoenixRuntime.getWallClockTimeFromCellTimeStamp(nanoTs)); - long skewedTs = ts + QueryConstants.MILLIS_IN_DAY; // skew of a day - // Even with a day of skew, we won't consider the ts a nanos timestamp - assertEquals(skewedTs, PhoenixRuntime.getWallClockTimeFromCellTimeStamp(skewedTs)); + getTableTester(fullNameNormalized, fullName); + } + + @Test + public void testCaseSensitiveTableNameWithoutSchema() throws Exception { + String caseSensitiveTableName = "tableName"; + + getTableTester(caseSensitiveTableName, quoteString(caseSensitiveTableName)); + } + + @Test + public void testCaseSensitiveTableNameWithSchema() throws Exception { + String caseSensitiveTableName = "tableName"; + String schemaName = "schemaName"; + String fullNameNormalized = schemaName.toUpperCase() + "." + caseSensitiveTableName; + String fullNameQuoted = schemaName + "." + quoteString(caseSensitiveTableName); + + getTableTester(fullNameNormalized, fullNameQuoted); + } + + @Test + public void testCaseSensitiveTableNameWithCaseSensitiveSchema() throws Exception { + String caseSensitiveTableName = "tableName"; + String caseSensitiveSchemaName = "schemaName"; + String fullName = caseSensitiveSchemaName + "." + caseSensitiveTableName; + String fullNameQuoted = + quoteString(caseSensitiveSchemaName) + "." + quoteString(caseSensitiveTableName); + + getTableTester(fullName, fullNameQuoted); + } + + @Test + public void testCaseSensitiveTableNameWithCaseSensitiveSchemaWithPeriod() throws Exception { + String caseSensitiveTableName = "tableName"; + String caseSensitiveSchemaName = "schema.Name"; + String fullName = caseSensitiveSchemaName + "." + caseSensitiveTableName; + String fullNameQuoted = + quoteString(caseSensitiveSchemaName) + "." + quoteString(caseSensitiveTableName); + + getTableTester(fullName, fullNameQuoted); + } + + private void getTableTester(String normalizedName, String sqlStatementName) throws SQLException { + Connection conn = DriverManager.getConnection(getUrl()); + try { + conn.createStatement() + .execute("CREATE TABLE " + sqlStatementName + " (k VARCHAR PRIMARY KEY)"); + PTable aTable = PhoenixRuntime.getTable(conn, normalizedName); + assertNotNull(aTable); + } finally { + if (null != conn) { + conn.createStatement().execute("DROP TABLE IF EXISTS " + sqlStatementName); + } } + } + + private String quoteString(String string) { + return "\"" + string + "\""; + } + + @Test + public void testGetWallClockTimeFromCellTimeStamp() { + long ts = System.currentTimeMillis(); + assertEquals(ts, PhoenixRuntime.getWallClockTimeFromCellTimeStamp(ts)); + long nanoTs = TransactionUtil.convertToNanoseconds(ts); + assertEquals(ts, PhoenixRuntime.getWallClockTimeFromCellTimeStamp(nanoTs)); + long skewedTs = ts + QueryConstants.MILLIS_IN_DAY; // skew of a day + // Even with a day of skew, we won't consider the ts a nanos timestamp + assertEquals(skewedTs, PhoenixRuntime.getWallClockTimeFromCellTimeStamp(skewedTs)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/PrefixByteEncoderDecoderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/PrefixByteEncoderDecoderTest.java index f8aa7dbf022..655a0d044a2 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/PrefixByteEncoderDecoderTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/PrefixByteEncoderDecoderTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,64 +33,74 @@ import org.apache.phoenix.query.QueryConstants; import org.junit.Test; - public class PrefixByteEncoderDecoderTest { - static final List guideposts = Arrays.asList( - ByteUtil.concat(Bytes.toBytes("aaaaaaaaaa"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), Bytes.toBytes("bbbbbbbbbb")), - ByteUtil.concat(Bytes.toBytes("aaaaaaaaaa"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), Bytes.toBytes("bbbbbccccc")), - ByteUtil.concat(Bytes.toBytes("aaaaaaaaaa"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(2000L), Bytes.toBytes("bbbbbbbbbb")), - ByteUtil.concat(Bytes.toBytes("bbbbbbbbbb"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), Bytes.toBytes("bbbbbbbbbb")), - ByteUtil.concat(Bytes.toBytes("bbbbbbbbbb"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(2000L), Bytes.toBytes("bbbbbbbbbb")), - ByteUtil.concat(Bytes.toBytes("bbbbbbbbbb"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(2000L), Bytes.toBytes("c")), - ByteUtil.concat(Bytes.toBytes("bbbbbbbbbbb"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), Bytes.toBytes("bbbbbbbbbb")), - ByteUtil.concat(Bytes.toBytes("d"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), Bytes.toBytes("bbbbbbbbbb")), - ByteUtil.concat(Bytes.toBytes("d"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), Bytes.toBytes("bbbbbbbbbbc")), - ByteUtil.concat(Bytes.toBytes("e"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), Bytes.toBytes("bbbbbbbbbb")) - ); - - @Test - public void testEncode() throws IOException { - List listOfBytes = Arrays.asList(Bytes.toBytes("aaaaa"), Bytes.toBytes("aaaabb")); - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - int maxLength = PrefixByteCodec.encodeBytes(listOfBytes, ptr); - assertEquals(6, maxLength); - TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(PrefixByteCodec.calculateSize(listOfBytes)); - DataOutput output = new DataOutputStream(stream); - WritableUtils.writeVInt(output, 0); - WritableUtils.writeVInt(output, 5); - output.write(Bytes.toBytes("aaaaa")); // No space savings on first key - WritableUtils.writeVInt(output, 4); - WritableUtils.writeVInt(output, 2); - output.write(Bytes.toBytes("bb")); // Only writes part of second key that's different - assertArrayEquals(stream.toByteArray(), ptr.copyBytes()); - } - - @Test - public void testEncodeDecodeWithSingleBuffer() throws IOException { - testEncodeDecode(true); - } - - @Test - public void testEncodeDecodeWithNewBuffer() throws IOException { - testEncodeDecode(false); - } - - private void testEncodeDecode(boolean useSingleBuffer) throws IOException { - ImmutableBytesWritable ptr = new ImmutableBytesWritable(); - int maxLength = PrefixByteCodec.encodeBytes(guideposts, ptr); - int encodedSize = ptr.getLength(); - int unencodedSize = PrefixByteCodec.calculateSize(guideposts); - assertTrue(encodedSize < unencodedSize); - List listOfBytes = PrefixByteCodec.decodeBytes(ptr, useSingleBuffer ? maxLength : -1); - assertListByteArraysEquals(guideposts, listOfBytes); - } - - private static void assertListByteArraysEquals(List listOfBytes1, List listOfBytes2) { - assertEquals(listOfBytes1.size(), listOfBytes2.size()); - for (int i = 0; i < listOfBytes1.size(); i++) { - assertArrayEquals(listOfBytes1.get(i), listOfBytes2.get(i)); - } + static final List guideposts = Arrays.asList( + ByteUtil.concat(Bytes.toBytes("aaaaaaaaaa"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(1000L), Bytes.toBytes("bbbbbbbbbb")), + ByteUtil.concat(Bytes.toBytes("aaaaaaaaaa"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(1000L), Bytes.toBytes("bbbbbccccc")), + ByteUtil.concat(Bytes.toBytes("aaaaaaaaaa"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(2000L), Bytes.toBytes("bbbbbbbbbb")), + ByteUtil.concat(Bytes.toBytes("bbbbbbbbbb"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(1000L), Bytes.toBytes("bbbbbbbbbb")), + ByteUtil.concat(Bytes.toBytes("bbbbbbbbbb"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(2000L), Bytes.toBytes("bbbbbbbbbb")), + ByteUtil.concat(Bytes.toBytes("bbbbbbbbbb"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(2000L), Bytes.toBytes("c")), + ByteUtil.concat(Bytes.toBytes("bbbbbbbbbbb"), QueryConstants.SEPARATOR_BYTE_ARRAY, + Bytes.toBytes(1000L), Bytes.toBytes("bbbbbbbbbb")), + ByteUtil.concat(Bytes.toBytes("d"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), + Bytes.toBytes("bbbbbbbbbb")), + ByteUtil.concat(Bytes.toBytes("d"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), + Bytes.toBytes("bbbbbbbbbbc")), + ByteUtil.concat(Bytes.toBytes("e"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes(1000L), + Bytes.toBytes("bbbbbbbbbb"))); + + @Test + public void testEncode() throws IOException { + List listOfBytes = Arrays.asList(Bytes.toBytes("aaaaa"), Bytes.toBytes("aaaabb")); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + int maxLength = PrefixByteCodec.encodeBytes(listOfBytes, ptr); + assertEquals(6, maxLength); + TrustedByteArrayOutputStream stream = + new TrustedByteArrayOutputStream(PrefixByteCodec.calculateSize(listOfBytes)); + DataOutput output = new DataOutputStream(stream); + WritableUtils.writeVInt(output, 0); + WritableUtils.writeVInt(output, 5); + output.write(Bytes.toBytes("aaaaa")); // No space savings on first key + WritableUtils.writeVInt(output, 4); + WritableUtils.writeVInt(output, 2); + output.write(Bytes.toBytes("bb")); // Only writes part of second key that's different + assertArrayEquals(stream.toByteArray(), ptr.copyBytes()); + } + + @Test + public void testEncodeDecodeWithSingleBuffer() throws IOException { + testEncodeDecode(true); + } + + @Test + public void testEncodeDecodeWithNewBuffer() throws IOException { + testEncodeDecode(false); + } + + private void testEncodeDecode(boolean useSingleBuffer) throws IOException { + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + int maxLength = PrefixByteCodec.encodeBytes(guideposts, ptr); + int encodedSize = ptr.getLength(); + int unencodedSize = PrefixByteCodec.calculateSize(guideposts); + assertTrue(encodedSize < unencodedSize); + List listOfBytes = PrefixByteCodec.decodeBytes(ptr, useSingleBuffer ? maxLength : -1); + assertListByteArraysEquals(guideposts, listOfBytes); + } + + private static void assertListByteArraysEquals(List listOfBytes1, + List listOfBytes2) { + assertEquals(listOfBytes1.size(), listOfBytes2.size()); + for (int i = 0; i < listOfBytes1.size(); i++) { + assertArrayEquals(listOfBytes1.get(i), listOfBytes2.get(i)); } + } -} \ No newline at end of file +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java index 6a33986c887..559834974ab 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/PropertiesUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,76 +21,77 @@ import java.sql.SQLException; import java.util.Properties; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; - import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtilHelper; import org.junit.Test; public class PropertiesUtilTest { - private static final String SOME_TENANT_ID = "00Dxx0000001234"; - private static final String SOME_OTHER_PROPERTY_KEY = "some_other_property"; - private static final String SOME_OTHER_PROPERTY_VALUE = "some_other_value"; - - @Test - public void testCopy() throws Exception{ - final Properties propsWithTenant = new Properties(); - propsWithTenant.put(PhoenixRuntime.TENANT_ID_ATTRIB, SOME_TENANT_ID); + private static final String SOME_TENANT_ID = "00Dxx0000001234"; + private static final String SOME_OTHER_PROPERTY_KEY = "some_other_property"; + private static final String SOME_OTHER_PROPERTY_VALUE = "some_other_value"; + + @Test + public void testCopy() throws Exception { + final Properties propsWithTenant = new Properties(); + propsWithTenant.put(PhoenixRuntime.TENANT_ID_ATTRIB, SOME_TENANT_ID); + + verifyValidCopy(propsWithTenant); + } - verifyValidCopy(propsWithTenant); - } + @Test + public void testCopyOnWrappedProperties() throws Exception { + final Properties propsWithTenant = new Properties(); + propsWithTenant.put(PhoenixRuntime.TENANT_ID_ATTRIB, SOME_TENANT_ID); - @Test - public void testCopyOnWrappedProperties() throws Exception{ - final Properties propsWithTenant = new Properties(); - propsWithTenant.put(PhoenixRuntime.TENANT_ID_ATTRIB, SOME_TENANT_ID); + verifyValidCopy(new Properties(propsWithTenant)); + } - verifyValidCopy(new Properties(propsWithTenant)); - } + @Test + public void testCopyFromConfiguration() throws Exception { + // make sure that we don't only copy the ZK quorum, but all + // properties + final Configuration conf = HBaseConfiguration.create(); + final Properties props = new Properties(); - @Test - public void testCopyFromConfiguration() throws Exception{ - //make sure that we don't only copy the ZK quorum, but all - //properties - final Configuration conf = HBaseConfiguration.create(); - final Properties props = new Properties(); - - conf.set(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST); - conf.set(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY, - PropertiesUtilTest.SOME_OTHER_PROPERTY_VALUE); - Properties combinedProps = PropertiesUtil.combineProperties(props, conf); - assertEquals(combinedProps.getProperty(HConstants.ZOOKEEPER_QUORUM), - conf.get(HConstants.ZOOKEEPER_QUORUM)); - assertEquals(combinedProps.getProperty(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY), - conf.get(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY)); - } + conf.set(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST); + conf.set(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY, + PropertiesUtilTest.SOME_OTHER_PROPERTY_VALUE); + Properties combinedProps = PropertiesUtil.combineProperties(props, conf); + assertEquals(combinedProps.getProperty(HConstants.ZOOKEEPER_QUORUM), + conf.get(HConstants.ZOOKEEPER_QUORUM)); + assertEquals(combinedProps.getProperty(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY), + conf.get(PropertiesUtilTest.SOME_OTHER_PROPERTY_KEY)); + } - @Test - public void testPropertyOverrideRespected() throws Exception { - final Configuration conf = HBaseConfiguration.create(); - final Properties props = new Properties(); - props.setProperty(HConstants.HBASE_RPC_TIMEOUT_KEY, - Long.toString(HConstants.DEFAULT_HBASE_RPC_TIMEOUT * 10)); - Properties combinedProps = PropertiesUtil.combineProperties(props, conf); - assertEquals(combinedProps.getProperty(HConstants.HBASE_RPC_TIMEOUT_KEY), - Long.toString(HConstants.DEFAULT_HBASE_RPC_TIMEOUT * 10)); - } + @Test + public void testPropertyOverrideRespected() throws Exception { + final Configuration conf = HBaseConfiguration.create(); + final Properties props = new Properties(); + props.setProperty(HConstants.HBASE_RPC_TIMEOUT_KEY, + Long.toString(HConstants.DEFAULT_HBASE_RPC_TIMEOUT * 10)); + Properties combinedProps = PropertiesUtil.combineProperties(props, conf); + assertEquals(combinedProps.getProperty(HConstants.HBASE_RPC_TIMEOUT_KEY), + Long.toString(HConstants.DEFAULT_HBASE_RPC_TIMEOUT * 10)); + } - @Test - public void testDeprecatedProperties() throws Exception { - final Configuration conf = HBaseConfiguration.create(); - conf.set("phoneix.mapreduce.output.cluster.quorum", "myoverridezookeeperhost"); - String test = PhoenixConfigurationUtilHelper.getOutputCluster(conf); - assertEquals("myoverridezookeeperhost", test); - assertEquals("myoverridezookeeperhost", - conf.get(PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_QUORUM)); - } + @Test + public void testDeprecatedProperties() throws Exception { + final Configuration conf = HBaseConfiguration.create(); + conf.set("phoneix.mapreduce.output.cluster.quorum", "myoverridezookeeperhost"); + String test = PhoenixConfigurationUtilHelper.getOutputCluster(conf); + assertEquals("myoverridezookeeperhost", test); + assertEquals("myoverridezookeeperhost", + conf.get(PhoenixConfigurationUtilHelper.MAPREDUCE_OUTPUT_CLUSTER_QUORUM)); + } - private void verifyValidCopy(Properties props) throws SQLException { - Properties copy = PropertiesUtil.deepCopy(props); - copy.containsKey(PhoenixRuntime.TENANT_ID_ATTRIB); //This checks the map and NOT the defaults in java.util.Properties - assertEquals(SOME_TENANT_ID, copy.getProperty(PhoenixRuntime.TENANT_ID_ATTRIB)); - } + private void verifyValidCopy(Properties props) throws SQLException { + Properties copy = PropertiesUtil.deepCopy(props); + copy.containsKey(PhoenixRuntime.TENANT_ID_ATTRIB); // This checks the map and NOT the defaults + // in java.util.Properties + assertEquals(SOME_TENANT_ID, copy.getProperty(PhoenixRuntime.TENANT_ID_ATTRIB)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/QualifierEncodingSchemeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/QualifierEncodingSchemeTest.java index 2b08d7d7ec0..74df4e59928 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/QualifierEncodingSchemeTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/QualifierEncodingSchemeTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,92 +28,99 @@ import org.junit.Test; public class QualifierEncodingSchemeTest { - - @Test - public void testOneByteQualifierEncodeDecode() { - assertEquals(1, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(1))); - assertEquals(127, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(127))); - assertEquals(63, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(63))); - assertEquals(130, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(130))); - assertEquals(255, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(255))); - byte[] arr1 = ONE_BYTE_QUALIFIERS.encode(255); - byte[] arr2 = new byte[] {-128, arr1[0]}; - assertEquals(255, ONE_BYTE_QUALIFIERS.decode(arr2, 1, 1)); - try { - ONE_BYTE_QUALIFIERS.decode(arr2); - fail(); - } catch (InvalidQualifierBytesException expected) {} - try { - ONE_BYTE_QUALIFIERS.decode(arr2, 0, 2); - fail(); - } catch (InvalidQualifierBytesException expected) {} - + + @Test + public void testOneByteQualifierEncodeDecode() { + assertEquals(1, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(1))); + assertEquals(127, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(127))); + assertEquals(63, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(63))); + assertEquals(130, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(130))); + assertEquals(255, ONE_BYTE_QUALIFIERS.decode(ONE_BYTE_QUALIFIERS.encode(255))); + byte[] arr1 = ONE_BYTE_QUALIFIERS.encode(255); + byte[] arr2 = new byte[] { -128, arr1[0] }; + assertEquals(255, ONE_BYTE_QUALIFIERS.decode(arr2, 1, 1)); + try { + ONE_BYTE_QUALIFIERS.decode(arr2); + fail(); + } catch (InvalidQualifierBytesException expected) { + } + try { + ONE_BYTE_QUALIFIERS.decode(arr2, 0, 2); + fail(); + } catch (InvalidQualifierBytesException expected) { + } + + } + + @Test + public void testTwoByteQualifierEncodeDecode() { + assertEquals(1, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(1))); + assertEquals(127, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(127))); + assertEquals(63, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(63))); + assertEquals(130, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(130))); + assertEquals(128, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(128))); + assertEquals(129, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(129))); + assertEquals(32767, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(32767))); + assertEquals(32768, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(32768))); + assertEquals(65535, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(65535))); + byte[] arr1 = TWO_BYTE_QUALIFIERS.encode(65535); + byte[] arr2 = new byte[] { -128, arr1[0], arr1[1] }; + assertEquals(65535, TWO_BYTE_QUALIFIERS.decode(arr2, 1, 2)); + try { + TWO_BYTE_QUALIFIERS.decode(arr2); + fail(); + } catch (InvalidQualifierBytesException expected) { } - - @Test - public void testTwoByteQualifierEncodeDecode() { - assertEquals(1, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(1))); - assertEquals(127, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(127))); - assertEquals(63, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(63))); - assertEquals(130, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(130))); - assertEquals(128, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(128))); - assertEquals(129, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(129))); - assertEquals(32767, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(32767))); - assertEquals(32768, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(32768))); - assertEquals(65535, TWO_BYTE_QUALIFIERS.decode(TWO_BYTE_QUALIFIERS.encode(65535))); - byte[] arr1 = TWO_BYTE_QUALIFIERS.encode(65535); - byte[] arr2 = new byte[] {-128, arr1[0], arr1[1]}; - assertEquals(65535, TWO_BYTE_QUALIFIERS.decode(arr2, 1, 2)); - try { - TWO_BYTE_QUALIFIERS.decode(arr2); - fail(); - } catch (InvalidQualifierBytesException expected) {} + } + + @Test + public void testThreeByteQualifierEncodeDecode() { + assertEquals(1, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(1))); + assertEquals(127, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(127))); + assertEquals(63, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(63))); + assertEquals(130, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(130))); + assertEquals(128, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(128))); + assertEquals(129, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(129))); + assertEquals(32767, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(32767))); + assertEquals(32768, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(32768))); + assertEquals(65535, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(65535))); + assertEquals(16777215, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(16777215))); + byte[] arr1 = THREE_BYTE_QUALIFIERS.encode(16777215); + byte[] arr2 = new byte[] { -128, arr1[0], arr1[1], arr1[2] }; + assertEquals(16777215, THREE_BYTE_QUALIFIERS.decode(arr2, 1, 3)); + try { + THREE_BYTE_QUALIFIERS.decode(arr2, 0, 2); + fail(); + } catch (InvalidQualifierBytesException expected) { } - - @Test - public void testThreeByteQualifierEncodeDecode() { - assertEquals(1, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(1))); - assertEquals(127, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(127))); - assertEquals(63, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(63))); - assertEquals(130, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(130))); - assertEquals(128, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(128))); - assertEquals(129, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(129))); - assertEquals(32767, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(32767))); - assertEquals(32768, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(32768))); - assertEquals(65535, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(65535))); - assertEquals(16777215, THREE_BYTE_QUALIFIERS.decode(THREE_BYTE_QUALIFIERS.encode(16777215))); - byte[] arr1 = THREE_BYTE_QUALIFIERS.encode(16777215); - byte[] arr2 = new byte[] {-128, arr1[0], arr1[1], arr1[2]}; - assertEquals(16777215, THREE_BYTE_QUALIFIERS.decode(arr2, 1, 3)); - try { - THREE_BYTE_QUALIFIERS.decode(arr2, 0, 2); - fail(); - } catch (InvalidQualifierBytesException expected) {} + } + + @Test + public void testFourByteQualifierEncodeDecode() { + assertEquals(1, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(1))); + assertEquals(127, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(127))); + assertEquals(63, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(63))); + assertEquals(130, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(130))); + assertEquals(128, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(128))); + assertEquals(129, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(129))); + assertEquals(32767, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(32767))); + assertEquals(32768, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(32768))); + assertEquals(65535, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(65535))); + assertEquals(Integer.MAX_VALUE, + FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(Integer.MAX_VALUE))); + byte[] arr1 = FOUR_BYTE_QUALIFIERS.encode(Integer.MAX_VALUE); + byte[] arr2 = new byte[] { -128, arr1[0], arr1[1], arr1[2], arr1[3] }; + assertEquals(Integer.MAX_VALUE, FOUR_BYTE_QUALIFIERS.decode(arr2, 1, 4)); + try { + FOUR_BYTE_QUALIFIERS.decode(arr2); + fail(); + } catch (InvalidQualifierBytesException expected) { } - - @Test - public void testFourByteQualifierEncodeDecode() { - assertEquals(1, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(1))); - assertEquals(127, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(127))); - assertEquals(63, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(63))); - assertEquals(130, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(130))); - assertEquals(128, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(128))); - assertEquals(129, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(129))); - assertEquals(32767, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(32767))); - assertEquals(32768, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(32768))); - assertEquals(65535, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(65535))); - assertEquals(Integer.MAX_VALUE, FOUR_BYTE_QUALIFIERS.decode(FOUR_BYTE_QUALIFIERS.encode(Integer.MAX_VALUE))); - byte[] arr1 = FOUR_BYTE_QUALIFIERS.encode(Integer.MAX_VALUE); - byte[] arr2 = new byte[] {-128, arr1[0], arr1[1], arr1[2], arr1[3]}; - assertEquals(Integer.MAX_VALUE, FOUR_BYTE_QUALIFIERS.decode(arr2, 1, 4)); - try { - FOUR_BYTE_QUALIFIERS.decode(arr2); - fail(); - } catch (InvalidQualifierBytesException expected) {} - try { - FOUR_BYTE_QUALIFIERS.decode(arr2, 0, 3); - fail(); - } catch (InvalidQualifierBytesException expected) {} + try { + FOUR_BYTE_QUALIFIERS.decode(arr2, 0, 3); + fail(); + } catch (InvalidQualifierBytesException expected) { } - + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/QueryUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/QueryUtilTest.java index 945f3e474a9..427d6e590bd 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/QueryUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/QueryUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,153 +28,145 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.TestZKConnectionRegistry; import org.apache.phoenix.jdbc.ZKConnectionInfo; import org.apache.phoenix.parse.HintNode.Hint; -import org.junit.Test; - import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.junit.Test; public class QueryUtilTest { - private static final ColumnInfo ID_COLUMN = new ColumnInfo("ID", Types.BIGINT); - private static final ColumnInfo NAME_COLUMN = new ColumnInfo("NAME", Types.VARCHAR); - - @Test - public void testConstructUpsertStatement_ColumnInfos() { - assertEquals( - "UPSERT INTO MYTAB (\"ID\", \"NAME\") VALUES (?, ?)", - QueryUtil.constructUpsertStatement("MYTAB", ImmutableList.of(ID_COLUMN, NAME_COLUMN))); - - } - - @Test(expected=IllegalArgumentException.class) - public void testConstructUpsertStatement_ColumnInfos_NoColumns() { - QueryUtil.constructUpsertStatement("MYTAB", ImmutableList.of()); - } - - @Test - public void testConstructGenericUpsertStatement() { - assertEquals( - "UPSERT INTO MYTAB VALUES (?, ?)", - QueryUtil.constructGenericUpsertStatement("MYTAB", 2)); - } - - @Test(expected=IllegalArgumentException.class) - public void testConstructGenericUpsertStatement_NoColumns() { - QueryUtil.constructGenericUpsertStatement("MYTAB", 0); + private static final ColumnInfo ID_COLUMN = new ColumnInfo("ID", Types.BIGINT); + private static final ColumnInfo NAME_COLUMN = new ColumnInfo("NAME", Types.VARCHAR); + + @Test + public void testConstructUpsertStatement_ColumnInfos() { + assertEquals("UPSERT INTO MYTAB (\"ID\", \"NAME\") VALUES (?, ?)", + QueryUtil.constructUpsertStatement("MYTAB", ImmutableList.of(ID_COLUMN, NAME_COLUMN))); + + } + + @Test(expected = IllegalArgumentException.class) + public void testConstructUpsertStatement_ColumnInfos_NoColumns() { + QueryUtil.constructUpsertStatement("MYTAB", ImmutableList. of()); + } + + @Test + public void testConstructGenericUpsertStatement() { + assertEquals("UPSERT INTO MYTAB VALUES (?, ?)", + QueryUtil.constructGenericUpsertStatement("MYTAB", 2)); + } + + @Test(expected = IllegalArgumentException.class) + public void testConstructGenericUpsertStatement_NoColumns() { + QueryUtil.constructGenericUpsertStatement("MYTAB", 0); + } + + @Test + public void testConstructSelectStatement() { + assertEquals("SELECT \"ID\" , \"NAME\" FROM MYTAB", + QueryUtil.constructSelectStatement("MYTAB", ImmutableList.of(ID_COLUMN, NAME_COLUMN), null)); + } + + @Test + public void testConstructSelectStatementWithSchema() { + assertEquals("SELECT \"ID\" , \"NAME\" FROM A.MYTAB", QueryUtil + .constructSelectStatement("A.MYTAB", ImmutableList.of(ID_COLUMN, NAME_COLUMN), null)); + } + + @Test + public void testConstructSelectStatementWithCaseSensitiveSchema() { + final String tableName = "MYTAB"; + final String schemaName = SchemaUtil.getEscapedArgument("a"); + final String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + assertEquals("SELECT \"ID\" , \"NAME\" FROM \"a\".MYTAB", QueryUtil + .constructSelectStatement(fullTableName, ImmutableList.of(ID_COLUMN, NAME_COLUMN), null)); + } + + @Test + public void testConstructSelectStatementWithCaseSensitiveTable() { + final String tableName = SchemaUtil.getEscapedArgument("mytab"); + final String schemaName = SchemaUtil.getEscapedArgument("a"); + final String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + assertEquals("SELECT \"ID\" , \"NAME\" FROM \"a\".\"mytab\"", QueryUtil + .constructSelectStatement(fullTableName, ImmutableList.of(ID_COLUMN, NAME_COLUMN), null)); + } + + @Test + public void testConstructSelectWithHint() { + assertEquals( + "SELECT /*+ NO_INDEX */ \"col1\" , \"col2\" FROM MYTAB WHERE (\"col2\"=? and \"col3\" is null)", + QueryUtil.constructSelectStatement("MYTAB", Lists.newArrayList("col1", "col2"), + "\"col2\"=? and \"col3\" is null", Hint.NO_INDEX, true)); + } + + @Test + public void testConstructParameterizedInClause() { + assertEquals("((?,?,?),(?,?,?))", QueryUtil.constructParameterizedInClause(3, 2)); + assertEquals("((?))", QueryUtil.constructParameterizedInClause(1, 1)); + } + + /** + * Test that we create connection strings from the HBase Configuration that match the expected + * syntax. Expected to log exceptions as it uses ZK host names that don't exist + * @throws Exception on failure + */ + @Test + public void testCreateConnectionFromConfiguration() throws Exception { + Properties props = new Properties(); + // standard lookup. this already checks if we set hbase.zookeeper.clientPort + Configuration conf = new Configuration(false); + // Need this for HBase 3 where ZK is not the default + conf.set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + ZKConnectionInfo.ZK_REGISTRY_NAME); + conf.set(HConstants.ZOOKEEPER_QUORUM, "localhost"); + conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "2181"); + String conn = QueryUtil.getConnectionUrl(props, conf); + validateUrl(conn); + + // set the zks to a few hosts, some of which are no online + conf.set(HConstants.ZOOKEEPER_QUORUM, + "host.at.some.domain.1,localhost," + "host.at.other.domain.3"); + conn = QueryUtil.getConnectionUrl(props, conf); + validateUrl(conn); + + // and try with different leader/peer ports + conf.set("hbase.zookeeper.peerport", "3338"); + conf.set("hbase.zookeeper.leaderport", "3339"); + conn = QueryUtil.getConnectionUrl(props, conf); + validateUrl(conn); + } + + private void validateUrl(String url) { + String prefix = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; + String zkPrefix = PhoenixRuntime.JDBC_PROTOCOL_ZK + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; + String masterPrefix = + PhoenixRuntime.JDBC_PROTOCOL_MASTER + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; + String rpcPrefix = PhoenixRuntime.JDBC_PROTOCOL_RPC + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; + + assertTrue("JDBC URL missing jdbc protocol prefix", url.startsWith(prefix) + || url.startsWith(zkPrefix) || url.startsWith(masterPrefix) || url.startsWith(rpcPrefix)); + assertTrue("JDBC URL missing jdbc terminator suffix", url.endsWith(";")); + url = url.replaceAll("\\\\:", "="); + // remove the prefix, should only be left with server[,server...]:port:/znode + String[] splits = url.split(":"); + splits = Arrays.copyOfRange(splits, 2, splits.length); + assertTrue("zk details should contain at least server component", splits.length >= 1); + // make sure that each server is comma separated + String[] servers = splits[0].replaceAll("=", "\\\\:").split(","); + for (String server : servers) { + assertFalse("Found whitespace in server names for url: " + url, server.contains(" ")); } - - @Test - public void testConstructSelectStatement() { - assertEquals( - "SELECT \"ID\" , \"NAME\" FROM MYTAB", - QueryUtil.constructSelectStatement("MYTAB", ImmutableList.of(ID_COLUMN,NAME_COLUMN),null)); + if (splits.length >= 2 && !splits[1].isEmpty()) { + // second bit is a port number, should not through + try { + Integer.parseInt(splits[1]); + } catch (NumberFormatException e) { + fail(e.getMessage()); + } } - - @Test - public void testConstructSelectStatementWithSchema() { - assertEquals( - "SELECT \"ID\" , \"NAME\" FROM A.MYTAB", - QueryUtil.constructSelectStatement("A.MYTAB", ImmutableList.of(ID_COLUMN,NAME_COLUMN),null)); - } - - @Test - public void testConstructSelectStatementWithCaseSensitiveSchema() { - final String tableName = "MYTAB"; - final String schemaName = SchemaUtil.getEscapedArgument("a"); - final String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - assertEquals( - "SELECT \"ID\" , \"NAME\" FROM \"a\".MYTAB", - QueryUtil.constructSelectStatement(fullTableName, ImmutableList.of(ID_COLUMN,NAME_COLUMN),null)); - } - - @Test - public void testConstructSelectStatementWithCaseSensitiveTable() { - final String tableName = SchemaUtil.getEscapedArgument("mytab"); - final String schemaName = SchemaUtil.getEscapedArgument("a"); - final String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - assertEquals( - "SELECT \"ID\" , \"NAME\" FROM \"a\".\"mytab\"", - QueryUtil.constructSelectStatement(fullTableName, ImmutableList.of(ID_COLUMN,NAME_COLUMN),null)); - } - - @Test - public void testConstructSelectWithHint() { - assertEquals( - "SELECT /*+ NO_INDEX */ \"col1\" , \"col2\" FROM MYTAB WHERE (\"col2\"=? and \"col3\" is null)", - QueryUtil.constructSelectStatement("MYTAB", Lists.newArrayList("col1", "col2"), - "\"col2\"=? and \"col3\" is null", Hint.NO_INDEX, true)); - } - - @Test - public void testConstructParameterizedInClause() { - assertEquals("((?,?,?),(?,?,?))", QueryUtil.constructParameterizedInClause(3, 2)); - assertEquals("((?))", QueryUtil.constructParameterizedInClause(1, 1)); - } - - /** - * Test that we create connection strings from the HBase Configuration that match the - * expected syntax. Expected to log exceptions as it uses ZK host names that don't exist - * @throws Exception on failure - */ - @Test - public void testCreateConnectionFromConfiguration() throws Exception { - Properties props = new Properties(); - // standard lookup. this already checks if we set hbase.zookeeper.clientPort - Configuration conf = new Configuration(false); - // Need this for HBase 3 where ZK is not the default - conf.set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - ZKConnectionInfo.ZK_REGISTRY_NAME); - conf.set(HConstants.ZOOKEEPER_QUORUM, "localhost"); - conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "2181"); - String conn = QueryUtil.getConnectionUrl(props, conf); - validateUrl(conn); - - // set the zks to a few hosts, some of which are no online - conf.set(HConstants.ZOOKEEPER_QUORUM, "host.at.some.domain.1,localhost," + - "host.at.other.domain.3"); - conn = QueryUtil.getConnectionUrl(props, conf); - validateUrl(conn); - - // and try with different leader/peer ports - conf.set("hbase.zookeeper.peerport", "3338"); - conf.set("hbase.zookeeper.leaderport", "3339"); - conn = QueryUtil.getConnectionUrl(props, conf); - validateUrl(conn); - } - - private void validateUrl(String url) { - String prefix = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; - String zkPrefix = PhoenixRuntime.JDBC_PROTOCOL_ZK+ PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; - String masterPrefix = PhoenixRuntime.JDBC_PROTOCOL_MASTER + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; - String rpcPrefix = PhoenixRuntime.JDBC_PROTOCOL_RPC + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR; - - assertTrue("JDBC URL missing jdbc protocol prefix", - url.startsWith(prefix) || url.startsWith(zkPrefix) || url.startsWith(masterPrefix) - || url.startsWith(rpcPrefix)); - assertTrue("JDBC URL missing jdbc terminator suffix", url.endsWith(";")); - url = url.replaceAll("\\\\:", "="); - // remove the prefix, should only be left with server[,server...]:port:/znode - String[] splits = url.split(":"); - splits = Arrays.copyOfRange(splits, 2, splits.length); - assertTrue("zk details should contain at least server component", splits.length >= 1); - // make sure that each server is comma separated - String[] servers = splits[0].replaceAll("=", "\\\\:").split(","); - for(String server: servers){ - assertFalse("Found whitespace in server names for url: " + url, server.contains(" ")); - } - if (splits.length >= 2 && !splits[1].isEmpty()) { - // second bit is a port number, should not through - try { - Integer.parseInt(splits[1]); - } catch (NumberFormatException e) { - fail(e.getMessage()); - } - } - if (splits.length >= 3) { - assertTrue("znode parent is not an absolute path", splits[2].startsWith("/")); - } + if (splits.length >= 3) { + assertTrue("znode parent is not an absolute path", splits[2].startsWith("/")); } -} \ No newline at end of file + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/ReadOnlyPropsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/ReadOnlyPropsTest.java index 5c5c28759ce..67de05497e4 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/ReadOnlyPropsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/ReadOnlyPropsTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,38 +15,37 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; -import org.junit.Test; +import static org.junit.Assert.assertEquals; import java.util.HashMap; import java.util.Map; -import static org.junit.Assert.assertEquals; +import org.junit.Test; public class ReadOnlyPropsTest { - @Test - public void testGetLongBytesMissingProperty() { - Map props = new HashMap<>(); - ReadOnlyProps readOnlyProps = new ReadOnlyProps(props); - assertEquals(1L, readOnlyProps.getLongBytes("missing-prop", 1L)); - } - - @Test - public void testGetLongBytesValidValue() { - Map props = new HashMap<>(); - props.put("prop1", "1K"); - ReadOnlyProps readOnlyProps = new ReadOnlyProps(props); - assertEquals(1024, readOnlyProps.getLongBytes("prop1", 0L)); - } - - @Test(expected = IllegalArgumentException.class) - public void testGetLongBytesInvalidValue() { - Map props = new HashMap<>(); - props.put("prop1", "1KY"); - ReadOnlyProps readOnlyProps = new ReadOnlyProps(props); - assertEquals(1024, readOnlyProps.getLongBytes("prop1", 0L)); - } + @Test + public void testGetLongBytesMissingProperty() { + Map props = new HashMap<>(); + ReadOnlyProps readOnlyProps = new ReadOnlyProps(props); + assertEquals(1L, readOnlyProps.getLongBytes("missing-prop", 1L)); + } + + @Test + public void testGetLongBytesValidValue() { + Map props = new HashMap<>(); + props.put("prop1", "1K"); + ReadOnlyProps readOnlyProps = new ReadOnlyProps(props); + assertEquals(1024, readOnlyProps.getLongBytes("prop1", 0L)); + } + + @Test(expected = IllegalArgumentException.class) + public void testGetLongBytesInvalidValue() { + Map props = new HashMap<>(); + props.put("prop1", "1KY"); + ReadOnlyProps readOnlyProps = new ReadOnlyProps(props); + assertEquals(1024, readOnlyProps.getLongBytes("prop1", 0L)); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/Repeat.java b/phoenix-core/src/test/java/org/apache/phoenix/util/Repeat.java index 7c7c013969a..e18c7b75fe2 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/Repeat.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/Repeat.java @@ -4,12 +4,12 @@ * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the - * "License"); you maynot use this file except in compliance + * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicablelaw or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -22,9 +22,8 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.METHOD}) -public @interface Repeat { - int value(); -} - +@Retention(RetentionPolicy.RUNTIME) +@Target({ ElementType.METHOD }) +public @interface Repeat { + int value(); +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/RowKeyMatcherTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/RowKeyMatcherTest.java index 2ff2e145172..62667e5a1c9 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/RowKeyMatcherTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/RowKeyMatcherTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,6 +17,16 @@ */ package org.apache.phoenix.util; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.coprocessorclient.RowKeyMatcher; @@ -28,260 +38,244 @@ import org.junit.BeforeClass; import org.junit.Test; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - public class RowKeyMatcherTest { - public List getSampleData() { - - List tableList = new ArrayList(); - tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "", "TEST_ENTITY.GV_000001", "001", 30000)); - tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "", "TEST_ENTITY.GV_000002", "002", 60000)); - tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "", "TEST_ENTITY.GV_000003", "003", 60000)); - tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0001000001", "TEST_ENTITY.Z01", "00D0t0001000001Z01", 60000)); - tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0002000001", "TEST_ENTITY.Z01","00D0t0002000001Z01", 120000)); - tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0003000001", "TEST_ENTITY.Z01","00D0t0003000001Z01", 180000)); - tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0004000001", "TEST_ENTITY.Z01","00D0t0004000001Z01", 300000)); - tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0005000001", "TEST_ENTITY.Z01","00D0t0005000001Z01", 6000)); - return tableList; - } - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - } - - @Before - public void setUp() throws Exception { - } - - @After - public void tearDown() throws Exception { - } - - - @Test - public void testOverlappingPrefixes() { - RowKeyMatcher globalRowKeyMatcher = new RowKeyMatcher(); - RowKeyMatcher tenantRowKeyMatcher = new RowKeyMatcher(); - TableTTLInfoCache cache = new TableTTLInfoCache(); - List sampleRows = new ArrayList<>(); - sampleRows.add("0010t0001000001001Z01#12348"); - sampleRows.add("0010t0001000001002Z01#7832438"); - - TableTTLInfo table1 = new TableTTLInfo("TEST_ENTITY.T_000001", "", "TEST_ENTITY.GV_000001", "001", 30000); - TableTTLInfo table2 = new TableTTLInfo("TEST_ENTITY.T_000001", - "0010t0001000001", "TEST_ENTITY.Z01", - "0010t0001000001002Z01", 60000); - - Integer tableId1 = cache.addTable(table1); - Integer tableId2 = cache.addTable(table2); - globalRowKeyMatcher.put(table1.getMatchPattern(), tableId1); - tenantRowKeyMatcher.put(table2.getMatchPattern(), tableId2); - - int tenantOffset = 0; - int globalOffset = 15; - - Integer row0GlobalMatch = globalRowKeyMatcher.get(sampleRows.get(0).getBytes(), globalOffset); - assertTrue(String.format("row-%d, matched = %s, row = %s", - 0, row0GlobalMatch != null, sampleRows.get(0)), row0GlobalMatch != null); - Integer row0TenantMatch = tenantRowKeyMatcher.get(sampleRows.get(0).getBytes(), tenantOffset); - assertTrue(String.format("row-%d, matched = %s, row = %s", - 0, row0TenantMatch != null, sampleRows.get(0)), row0TenantMatch == null); - - Integer row1GlobalMatch = globalRowKeyMatcher.get(sampleRows.get(1).getBytes(), globalOffset); - assertTrue(String.format("row-%d, matched = %s, row = %s", - 0, row1GlobalMatch != null, sampleRows.get(1)), row1GlobalMatch == null); - Integer row1TenantMatch = tenantRowKeyMatcher.get(sampleRows.get(1).getBytes(), tenantOffset); - assertTrue(String.format("row-%d, matched = %s, row = %s", - 0, row1TenantMatch != null, sampleRows.get(1)), row1TenantMatch != null); + public List getSampleData() { + + List tableList = new ArrayList(); + tableList + .add(new TableTTLInfo("TEST_ENTITY.T_000001", "", "TEST_ENTITY.GV_000001", "001", 30000)); + tableList + .add(new TableTTLInfo("TEST_ENTITY.T_000001", "", "TEST_ENTITY.GV_000002", "002", 60000)); + tableList + .add(new TableTTLInfo("TEST_ENTITY.T_000001", "", "TEST_ENTITY.GV_000003", "003", 60000)); + tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0001000001", "TEST_ENTITY.Z01", + "00D0t0001000001Z01", 60000)); + tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0002000001", "TEST_ENTITY.Z01", + "00D0t0002000001Z01", 120000)); + tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0003000001", "TEST_ENTITY.Z01", + "00D0t0003000001Z01", 180000)); + tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0004000001", "TEST_ENTITY.Z01", + "00D0t0004000001Z01", 300000)); + tableList.add(new TableTTLInfo("TEST_ENTITY.T_000001", "00D0t0005000001", "TEST_ENTITY.Z01", + "00D0t0005000001Z01", 6000)); + return tableList; + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + } + + @Before + public void setUp() throws Exception { + } + + @After + public void tearDown() throws Exception { + } + + @Test + public void testOverlappingPrefixes() { + RowKeyMatcher globalRowKeyMatcher = new RowKeyMatcher(); + RowKeyMatcher tenantRowKeyMatcher = new RowKeyMatcher(); + TableTTLInfoCache cache = new TableTTLInfoCache(); + List sampleRows = new ArrayList<>(); + sampleRows.add("0010t0001000001001Z01#12348"); + sampleRows.add("0010t0001000001002Z01#7832438"); + + TableTTLInfo table1 = + new TableTTLInfo("TEST_ENTITY.T_000001", "", "TEST_ENTITY.GV_000001", "001", 30000); + TableTTLInfo table2 = new TableTTLInfo("TEST_ENTITY.T_000001", "0010t0001000001", + "TEST_ENTITY.Z01", "0010t0001000001002Z01", 60000); + + Integer tableId1 = cache.addTable(table1); + Integer tableId2 = cache.addTable(table2); + globalRowKeyMatcher.put(table1.getMatchPattern(), tableId1); + tenantRowKeyMatcher.put(table2.getMatchPattern(), tableId2); + + int tenantOffset = 0; + int globalOffset = 15; + + Integer row0GlobalMatch = globalRowKeyMatcher.get(sampleRows.get(0).getBytes(), globalOffset); + assertTrue(String.format("row-%d, matched = %s, row = %s", 0, row0GlobalMatch != null, + sampleRows.get(0)), row0GlobalMatch != null); + Integer row0TenantMatch = tenantRowKeyMatcher.get(sampleRows.get(0).getBytes(), tenantOffset); + assertTrue(String.format("row-%d, matched = %s, row = %s", 0, row0TenantMatch != null, + sampleRows.get(0)), row0TenantMatch == null); + + Integer row1GlobalMatch = globalRowKeyMatcher.get(sampleRows.get(1).getBytes(), globalOffset); + assertTrue(String.format("row-%d, matched = %s, row = %s", 0, row1GlobalMatch != null, + sampleRows.get(1)), row1GlobalMatch == null); + Integer row1TenantMatch = tenantRowKeyMatcher.get(sampleRows.get(1).getBytes(), tenantOffset); + assertTrue(String.format("row-%d, matched = %s, row = %s", 0, row1TenantMatch != null, + sampleRows.get(1)), row1TenantMatch != null); + } + + @Test + public void testSamplePrefixes() { + RowKeyMatcher rowKeyMatcher = new RowKeyMatcher(); + TableTTLInfoCache cache = new TableTTLInfoCache(); + List sampleTableList = new ArrayList(); + + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000001".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x00"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000002".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x01"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000003".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x02"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000004".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x03"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000005".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x04"), 300)); + + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000006".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x8F"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000007".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x9F"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000008".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xAF"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000009".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xBF"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_0000010".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xCF"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000011".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xDF"), 300)); + sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), + ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000012".getBytes(), + Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xEF"), 300)); + + for (int i = 0; i < sampleTableList.size(); i++) { + Integer tableId = cache.addTable(sampleTableList.get(i)); + rowKeyMatcher.put(sampleTableList.get(i).getMatchPattern(), tableId); + assertEquals(tableId.intValue(), i); } - - @Test - public void testSamplePrefixes() { - RowKeyMatcher rowKeyMatcher = new RowKeyMatcher(); - TableTTLInfoCache cache = new TableTTLInfoCache(); - List sampleTableList = new ArrayList(); - - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000001".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x00"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000002".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x01"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000003".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x02"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000004".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x03"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000005".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x04"), - 300)); - - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000006".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x8F"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000007".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\x9F"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000008".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xAF"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000009".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xBF"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_0000010".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xCF"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000011".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xDF"), - 300)); - sampleTableList.add(new TableTTLInfo("TEST_ENTITY.T_000001".getBytes(), - ByteUtil.EMPTY_BYTE_ARRAY, "TEST_ENTITY.GV_000012".getBytes(), - Bytes.toBytesBinary("\\x7F\\xFF\\xFF\\xFF\\xFF\\xFF\\x80\\xEF"), - 300)); - - - for (int i = 0; i < sampleTableList.size(); i++) { - Integer tableId = cache.addTable(sampleTableList.get(i)); - rowKeyMatcher.put(sampleTableList.get(i).getMatchPattern(), tableId); - assertEquals(tableId.intValue(), i); - } - int offset = 0; - for (int i = 0; i sampleTableList = getSampleData(); - runTest(rowKeyMatcher, cache, sampleTableList, 1, 1); - - //Assert results - assertResults(rowKeyMatcher, sampleTableList); - assertResults(cache, sampleTableList); - - } - - @Test - public void testRepeatingSampleDataCount() { - RowKeyMatcher rowKeyMatcher = new RowKeyMatcher(); - TableTTLInfoCache cache = new TableTTLInfoCache(); - List sampleTableList = getSampleData(); - runTest(rowKeyMatcher, cache, sampleTableList, 1, 25); - - //Assert results - assertResults(rowKeyMatcher, sampleTableList); - assertResults(cache, sampleTableList); - + int offset = 0; + for (int i = 0; i < sampleTableList.size(); i++) { + assertEquals(rowKeyMatcher.get(sampleTableList.get(i).getMatchPattern(), offset).intValue(), + i); } - @Test - public void testConcurrentSampleDataCount() { - RowKeyMatcher rowKeyMatcher = new RowKeyMatcher(); - TableTTLInfoCache cache = new TableTTLInfoCache(); - List sampleTableList = getSampleData(); - runTest(rowKeyMatcher, cache, sampleTableList, 5, 5); - - //Assert results - assertResults(rowKeyMatcher, sampleTableList); - assertResults(cache, sampleTableList); - } - - private void assertResults(RowKeyMatcher rowKeyMatcher, List sampleTableList) { - //Assert results - int tableCountExpected = sampleTableList.size(); - int prefixCountActual = rowKeyMatcher.getNumEntries(); - String message = String.format("expected = %d, actual = %d", tableCountExpected, prefixCountActual); - assertTrue(message, tableCountExpected == prefixCountActual); - } - - private void assertResults(TableTTLInfoCache cache, List sampleTableList) { -// //Assert results - Set dedupedTables = new HashSet(); - dedupedTables.addAll(sampleTableList); - - int tableCountExpected = sampleTableList.size(); - int dedupeTableCountExpected = dedupedTables.size(); - int tableCountActual = cache.getNumTablesInCache(); - String message = String.format("expected = %d, actual = %d", tableCountExpected, tableCountActual); - assertTrue(message, dedupeTableCountExpected == tableCountActual); - assertTrue(message, tableCountExpected == tableCountActual); - } - - private void runTest(RowKeyMatcher targetRowKeyMatcher, TableTTLInfoCache cache, - List sampleData, int numThreads, int numRepeats) { - - try { - Thread[] threads = new Thread[numThreads]; - final CountDownLatch latch = new CountDownLatch(threads.length); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(new Runnable() { - public void run() { - try { - for (int repeats = 0; repeats < numRepeats; repeats++) { - addTablesToPrefixIndex(sampleData, targetRowKeyMatcher); - addTablesToCache(sampleData, cache); - } - } finally { - latch.countDown(); - } - } - }, "data-generator-" + i); - threads[i].setDaemon(true); + } + + @Test + public void testSingleSampleDataCount() { + RowKeyMatcher rowKeyMatcher = new RowKeyMatcher(); + TableTTLInfoCache cache = new TableTTLInfoCache(); + + List sampleTableList = getSampleData(); + runTest(rowKeyMatcher, cache, sampleTableList, 1, 1); + + // Assert results + assertResults(rowKeyMatcher, sampleTableList); + assertResults(cache, sampleTableList); + + } + + @Test + public void testRepeatingSampleDataCount() { + RowKeyMatcher rowKeyMatcher = new RowKeyMatcher(); + TableTTLInfoCache cache = new TableTTLInfoCache(); + List sampleTableList = getSampleData(); + runTest(rowKeyMatcher, cache, sampleTableList, 1, 25); + + // Assert results + assertResults(rowKeyMatcher, sampleTableList); + assertResults(cache, sampleTableList); + + } + + @Test + public void testConcurrentSampleDataCount() { + RowKeyMatcher rowKeyMatcher = new RowKeyMatcher(); + TableTTLInfoCache cache = new TableTTLInfoCache(); + List sampleTableList = getSampleData(); + runTest(rowKeyMatcher, cache, sampleTableList, 5, 5); + + // Assert results + assertResults(rowKeyMatcher, sampleTableList); + assertResults(cache, sampleTableList); + } + + private void assertResults(RowKeyMatcher rowKeyMatcher, List sampleTableList) { + // Assert results + int tableCountExpected = sampleTableList.size(); + int prefixCountActual = rowKeyMatcher.getNumEntries(); + String message = + String.format("expected = %d, actual = %d", tableCountExpected, prefixCountActual); + assertTrue(message, tableCountExpected == prefixCountActual); + } + + private void assertResults(TableTTLInfoCache cache, List sampleTableList) { + // //Assert results + Set dedupedTables = new HashSet(); + dedupedTables.addAll(sampleTableList); + + int tableCountExpected = sampleTableList.size(); + int dedupeTableCountExpected = dedupedTables.size(); + int tableCountActual = cache.getNumTablesInCache(); + String message = + String.format("expected = %d, actual = %d", tableCountExpected, tableCountActual); + assertTrue(message, dedupeTableCountExpected == tableCountActual); + assertTrue(message, tableCountExpected == tableCountActual); + } + + private void runTest(RowKeyMatcher targetRowKeyMatcher, TableTTLInfoCache cache, + List sampleData, int numThreads, int numRepeats) { + + try { + Thread[] threads = new Thread[numThreads]; + final CountDownLatch latch = new CountDownLatch(threads.length); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(new Runnable() { + public void run() { + try { + for (int repeats = 0; repeats < numRepeats; repeats++) { + addTablesToPrefixIndex(sampleData, targetRowKeyMatcher); + addTablesToCache(sampleData, cache); + } + } finally { + latch.countDown(); } - for (int i = 0; i < threads.length; i++) { - threads[i].start(); - } - latch.await(); - } - catch (InterruptedException ie) { - fail(ie.getMessage()); - } - } - - - private void addTablesToPrefixIndex(List tableList, RowKeyMatcher rowKeyMatcher) { - AtomicInteger tableId = new AtomicInteger(0); - tableList.forEach(m -> { - rowKeyMatcher.put(m.getMatchPattern(), tableId.incrementAndGet()); - }); + } + }, "data-generator-" + i); + threads[i].setDaemon(true); + } + for (int i = 0; i < threads.length; i++) { + threads[i].start(); + } + latch.await(); + } catch (InterruptedException ie) { + fail(ie.getMessage()); } - - private void addTablesToCache(List tableList, TableTTLInfoCache cache) { - tableList.forEach(m -> { - cache.addTable(m); - }); - } - -} \ No newline at end of file + } + + private void addTablesToPrefixIndex(List tableList, RowKeyMatcher rowKeyMatcher) { + AtomicInteger tableId = new AtomicInteger(0); + tableList.forEach(m -> { + rowKeyMatcher.put(m.getMatchPattern(), tableId.incrementAndGet()); + }); + } + + private void addTablesToCache(List tableList, TableTTLInfoCache cache) { + tableList.forEach(m -> { + cache.addTable(m); + }); + } + +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/RunUntilFailure.java b/phoenix-core/src/test/java/org/apache/phoenix/util/RunUntilFailure.java index ed0a8393e32..b21efa475f8 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/RunUntilFailure.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/RunUntilFailure.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,64 +27,58 @@ import org.junit.runners.model.InitializationError; import org.junit.runners.model.Statement; -public class RunUntilFailure extends BlockJUnit4ClassRunner { - private boolean hasFailure; +public class RunUntilFailure extends BlockJUnit4ClassRunner { + private boolean hasFailure; - public RunUntilFailure(Class klass) throws InitializationError { - super(klass); - } + public RunUntilFailure(Class klass) throws InitializationError { + super(klass); + } - @Override - protected Description describeChild(FrameworkMethod method) { - if (method.getAnnotation(Repeat.class) != null && - method.getAnnotation(Ignore.class) == null) { - return describeRepeatTest(method); - } - return super.describeChild(method); - } + @Override + protected Description describeChild(FrameworkMethod method) { + if (method.getAnnotation(Repeat.class) != null && method.getAnnotation(Ignore.class) == null) { + return describeRepeatTest(method); + } + return super.describeChild(method); + } - private Description describeRepeatTest(FrameworkMethod method) { - int times = method.getAnnotation(Repeat.class).value(); + private Description describeRepeatTest(FrameworkMethod method) { + int times = method.getAnnotation(Repeat.class).value(); - Description description = Description.createSuiteDescription( - testName(method) + " [" + times + " times]", - method.getAnnotations()); + Description description = Description + .createSuiteDescription(testName(method) + " [" + times + " times]", method.getAnnotations()); - for (int i = 1; i <= times; i++) { - description.addChild(Description.createTestDescription( - getTestClass().getJavaClass(), - testName(method) + "-" + i)); - } - return description; - } + for (int i = 1; i <= times; i++) { + description.addChild(Description.createTestDescription(getTestClass().getJavaClass(), + testName(method) + "-" + i)); + } + return description; + } - @Override - protected void runChild(final FrameworkMethod method, RunNotifier notifier) { - Description description = describeChild(method); + @Override + protected void runChild(final FrameworkMethod method, RunNotifier notifier) { + Description description = describeChild(method); - if (method.getAnnotation(Repeat.class) != null && - method.getAnnotation(Ignore.class) == null) { - runRepeatedly(methodBlock(method), description, notifier); - } - super.runChild(method, notifier); - } + if (method.getAnnotation(Repeat.class) != null && method.getAnnotation(Ignore.class) == null) { + runRepeatedly(methodBlock(method), description, notifier); + } + super.runChild(method, notifier); + } - private void runRepeatedly(Statement statement, Description description, - RunNotifier notifier) { - notifier.addListener(new RunListener() { - @Override - public void testFailure(Failure failure) { - hasFailure = true; - } - }); - for (Description desc : description.getChildren()) { - if (hasFailure) { - notifier.fireTestIgnored(desc); - } else if(!desc.isSuite()) { - runLeaf(statement, desc, notifier); - } - } - } - -} + private void runRepeatedly(Statement statement, Description description, RunNotifier notifier) { + notifier.addListener(new RunListener() { + @Override + public void testFailure(Failure failure) { + hasFailure = true; + } + }); + for (Description desc : description.getChildren()) { + if (hasFailure) { + notifier.fireTestIgnored(desc); + } else if (!desc.isSuite()) { + runLeaf(statement, desc, notifier); + } + } + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/SQLExceptionCodeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/SQLExceptionCodeTest.java index 87696c49844..8355de06055 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/SQLExceptionCodeTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/SQLExceptionCodeTest.java @@ -1,58 +1,52 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.util; +import java.sql.SQLException; + import org.apache.phoenix.exception.SQLExceptionCode; import org.apache.phoenix.exception.SQLExceptionInfo; import org.junit.Assert; import org.junit.Test; -import java.sql.SQLException; - public class SQLExceptionCodeTest { - @Test - public void testOperationTimedOutTest() { - SQLException sqlException = new SQLExceptionInfo - .Builder(SQLExceptionCode.OPERATION_TIMED_OUT) - .setMessage("Test Operation Timedout") - .setRootCause(new IllegalArgumentException("TestOpsTimeout1")) - .build().buildException(); - Assert.assertEquals("Test Operation Timedout", - sqlException.getMessage()); - Assert.assertEquals("TestOpsTimeout1", - sqlException.getCause().getMessage()); - Assert.assertTrue(sqlException.getCause() instanceof - IllegalArgumentException); - Assert.assertEquals(sqlException.getErrorCode(), - SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode()); - Assert.assertEquals(sqlException.getSQLState(), - SQLExceptionCode.OPERATION_TIMED_OUT.getSQLState()); - sqlException = new SQLExceptionInfo - .Builder(SQLExceptionCode.OPERATION_TIMED_OUT) - .build().buildException(); - Assert.assertEquals(SQLExceptionCode.OPERATION_TIMED_OUT.getMessage(), - sqlException.getMessage()); - Assert.assertNull(sqlException.getCause()); - Assert.assertEquals(sqlException.getErrorCode(), - SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode()); - Assert.assertEquals(sqlException.getSQLState(), - SQLExceptionCode.OPERATION_TIMED_OUT.getSQLState()); - } + @Test + public void testOperationTimedOutTest() { + SQLException sqlException = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT) + .setMessage("Test Operation Timedout") + .setRootCause(new IllegalArgumentException("TestOpsTimeout1")).build().buildException(); + Assert.assertEquals("Test Operation Timedout", sqlException.getMessage()); + Assert.assertEquals("TestOpsTimeout1", sqlException.getCause().getMessage()); + Assert.assertTrue(sqlException.getCause() instanceof IllegalArgumentException); + Assert.assertEquals(sqlException.getErrorCode(), + SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode()); + Assert.assertEquals(sqlException.getSQLState(), + SQLExceptionCode.OPERATION_TIMED_OUT.getSQLState()); + sqlException = + new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).build().buildException(); + Assert.assertEquals(SQLExceptionCode.OPERATION_TIMED_OUT.getMessage(), + sqlException.getMessage()); + Assert.assertNull(sqlException.getCause()); + Assert.assertEquals(sqlException.getErrorCode(), + SQLExceptionCode.OPERATION_TIMED_OUT.getErrorCode()); + Assert.assertEquals(sqlException.getSQLState(), + SQLExceptionCode.OPERATION_TIMED_OUT.getSQLState()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/ScanUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/ScanUtilTest.java index e088bbfab53..40cbc8dca63 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/ScanUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/ScanUtilTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,10 +16,18 @@ * limitations under the License. */ package org.apache.phoenix.util; -import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; + import static org.apache.phoenix.util.TestUtil.ATABLE_NAME; +import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; import static org.junit.Assert.assertArrayEquals; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Properties; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; @@ -45,6 +53,8 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.runners.Enclosed; @@ -52,520 +62,553 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Properties; - /** * Test the SetKey method in ScanUtil. */ @RunWith(Enclosed.class) public class ScanUtilTest { - @RunWith(Parameterized.class) - public static class ParameterizedScanUtilTest { - private final List> slots; - private final byte[] expectedKey; - private final RowKeySchema schema; - private final Bound bound; - - public ParameterizedScanUtilTest(List> slots, int[] widths, byte[] expectedKey, Bound bound) - throws Exception { - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(widths.length); - for (final int width : widths) { - if (width > 0) { - builder.addField(new PDatum() { - @Override public boolean isNullable() { - return false; - } - - @Override public PDataType getDataType() { - return PChar.INSTANCE; - } - - @Override public Integer getMaxLength() { - return width; - } - - @Override public Integer getScale() { - return null; - } - - @Override public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); - } else { - builder.addField(new PDatum() { - @Override public boolean isNullable() { - return false; - } - - @Override public PDataType getDataType() { - return PVarchar.INSTANCE; - } - - @Override public Integer getMaxLength() { - return null; - } - - @Override public Integer getScale() { - return null; - } - - @Override public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); - } + @RunWith(Parameterized.class) + public static class ParameterizedScanUtilTest { + private final List> slots; + private final byte[] expectedKey; + private final RowKeySchema schema; + private final Bound bound; + + public ParameterizedScanUtilTest(List> slots, int[] widths, byte[] expectedKey, + Bound bound) throws Exception { + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(widths.length); + for (final int width : widths) { + if (width > 0) { + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; } - this.schema = builder.build(); - this.slots = slots; - this.expectedKey = expectedKey; - this.bound = bound; + + @Override + public PDataType getDataType() { + return PChar.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return width; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); + } else { + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); } + } + this.schema = builder.build(); + this.slots = slots; + this.expectedKey = expectedKey; + this.bound = bound; + } + + @Test + public void test() { + byte[] key = new byte[1024]; + int[] position = new int[slots.size()]; + int offset = ScanUtil.setKey(schema, slots, ScanUtil.getDefaultSlotSpans(slots.size()), + position, bound, key, 0, 0, slots.size()); + byte[] actualKey = new byte[offset]; + System.arraycopy(key, 0, actualKey, 0, offset); + assertArrayEquals(expectedKey, actualKey); + } + + @Parameters(name = "{0} {1} {2} {3} {4}") + public static synchronized Collection data() { + List testCases = Lists.newArrayList(); + // 1, Lower bound, all single keys, all inclusive. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1A"), Bound.LOWER)); + // 2, Lower bound, all range keys, all inclusive. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1A"), Bound.LOWER)); + // 3, Lower bound, mixed single and range keys, all inclusive. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1A"), Bound.LOWER)); + // 4, Lower bound, all range key, all exclusive on lower bound. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), false, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), false, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("b2B"), Bound.LOWER)); + // 5, Lower bound, all range key, some exclusive. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), false, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("b1B"), Bound.LOWER)); + // 6, Lower bound, mixed single and range key, mixed inclusive and exclusive. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1B"), Bound.LOWER)); + // 7, Lower bound, unbound key in the middle, fixed length. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE + .getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, + { KeyRange.EVERYTHING_RANGE, }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a"), Bound.LOWER)); + // 8, Lower bound, unbound key in the middle, variable length. + testCases.addAll(foreach( + new KeyRange[][] { { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, + Bytes.toBytes("a"), true, SortOrder.ASC), }, { KeyRange.EVERYTHING_RANGE, } }, + new int[] { 1, 1 }, PChar.INSTANCE.toBytes("a"), Bound.LOWER)); + // 9, Lower bound, unbound key at end, variable length. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE + .getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, + { KeyRange.EVERYTHING_RANGE, }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a"), Bound.LOWER)); + // 10, Upper bound, all single keys, all inclusive, increment at end. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1B"), Bound.UPPER)); + // 11, Upper bound, all range keys, all inclusive, increment at end. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("b2C"), Bound.UPPER)); + // 12, Upper bound, all range keys, all exclusive, no increment at end. + testCases + .addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), false, + SortOrder.ASC), } }, + new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("b"), Bound.UPPER)); + // 13, Upper bound, single inclusive, range inclusive, increment at end. + testCases + .addAll( + foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, + SortOrder.ASC), } }, + new int[] { 1, 1 }, PChar.INSTANCE.toBytes("a3"), Bound.UPPER)); + // 14, Upper bound, range exclusive, single inclusive, increment at end. + testCases.addAll(foreach(new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), false, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), } }, + new int[] { 1, 1 }, PChar.INSTANCE.toBytes("b"), Bound.UPPER)); + // 15, Upper bound, range inclusive, single inclusive, increment at end. + testCases + .addAll( + foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, + SortOrder.ASC), } }, + new int[] { 1, 1 }, PChar.INSTANCE.toBytes("b2"), Bound.UPPER)); + // 16, Upper bound, single inclusive, range exclusive, no increment at end. + testCases + .addAll( + foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), false, + SortOrder.ASC), } }, + new int[] { 1, 1 }, PChar.INSTANCE.toBytes("a2"), Bound.UPPER)); + // 17, Upper bound, unbound key, fixed length; + testCases.addAll(foreach( + new KeyRange[][] { { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, + Bytes.toBytes("a"), true, SortOrder.ASC), }, { KeyRange.EVERYTHING_RANGE, } }, + new int[] { 1, 1 }, PChar.INSTANCE.toBytes("b"), Bound.UPPER)); + // 18, Upper bound, unbound key, variable length; + testCases.addAll(foreach( + new KeyRange[][] { { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, + Bytes.toBytes("a"), true, SortOrder.ASC), }, { KeyRange.EVERYTHING_RANGE, } }, + new int[] { 1, 1 }, PChar.INSTANCE.toBytes("b"), Bound.UPPER)); + // 19, Upper bound, keys wrapped around when incrementing. + testCases.addAll(foreach(new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(new byte[] { -1 }, true, new byte[] { -1 }, true, + SortOrder.ASC) }, + { PChar.INSTANCE.getKeyRange(new byte[] { -1 }, true, new byte[] { -1 }, true, + SortOrder.ASC) } }, + new int[] { 1, 1 }, ByteUtil.EMPTY_BYTE_ARRAY, Bound.UPPER)); + // 20, Variable length + testCases.addAll(foreach( + new KeyRange[][] { + { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, + SortOrder.ASC), }, + { PVarchar.INSTANCE + .getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), true, SortOrder.ASC), } }, + new int[] { 1, 0 }, + ByteUtil.nextKey( + ByteUtil.concat(PVarchar.INSTANCE.toBytes("aB"), QueryConstants.SEPARATOR_BYTE_ARRAY)), + Bound.UPPER)); + return testCases; + } - @Test - public void test() { - byte[] key = new byte[1024]; - int[] position = new int[slots.size()]; - int - offset = - ScanUtil.setKey(schema, slots, ScanUtil.getDefaultSlotSpans(slots.size()), position, - bound, key, 0, 0, slots.size()); - byte[] actualKey = new byte[offset]; - System.arraycopy(key, 0, actualKey, 0, offset); - assertArrayEquals(expectedKey, actualKey); + private static Collection foreach(KeyRange[][] ranges, int[] widths, byte[] expectedKey, + Bound bound) { + List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); + List ret = Lists.newArrayList(); + ret.add(new Object[] { slots, widths, expectedKey, bound }); + return ret; + } + + private static final Function> ARRAY_TO_LIST = + new Function>() { + @Override + public List apply(KeyRange[] input) { + return Lists.newArrayList(input); } + }; + } + + public static class NonParameterizedScanUtilTest { - @Parameters(name = "{0} {1} {2} {3} {4}") - public static synchronized Collection data() { - List testCases = Lists.newArrayList(); - // 1, Lower bound, all single keys, all inclusive. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1A"), - Bound.LOWER)); - // 2, Lower bound, all range keys, all inclusive. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1A"), - Bound.LOWER)); - // 3, Lower bound, mixed single and range keys, all inclusive. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1A"), - Bound.LOWER)); - // 4, Lower bound, all range key, all exclusive on lower bound. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), false, Bytes.toBytes("b"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), false, Bytes.toBytes("2"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("b2B"), - Bound.LOWER)); - // 5, Lower bound, all range key, some exclusive. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), false, Bytes.toBytes("b"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("b1B"), - Bound.LOWER)); - // 6, Lower bound, mixed single and range key, mixed inclusive and exclusive. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1B"), - Bound.LOWER)); - // 7, Lower bound, unbound key in the middle, fixed length. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { KeyRange.EVERYTHING_RANGE, }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), false, Bytes.toBytes("B"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a"), - Bound.LOWER)); - // 8, Lower bound, unbound key in the middle, variable length. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { KeyRange.EVERYTHING_RANGE, } }, new int[] { 1, 1 }, PChar.INSTANCE.toBytes("a"), - Bound.LOWER)); - // 9, Lower bound, unbound key at end, variable length. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { KeyRange.EVERYTHING_RANGE, }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a"), - Bound.LOWER)); - // 10, Upper bound, all single keys, all inclusive, increment at end. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("A"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("a1B"), - Bound.UPPER)); - // 11, Upper bound, all range keys, all inclusive, increment at end. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), - true, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("b2C"), - Bound.UPPER)); - // 12, Upper bound, all range keys, all exclusive, no increment at end. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), - false, SortOrder.ASC), } }, new int[] { 1, 1, 1 }, PChar.INSTANCE.toBytes("b"), - Bound.UPPER)); - // 13, Upper bound, single inclusive, range inclusive, increment at end. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), - true, SortOrder.ASC), } }, new int[] { 1, 1 }, PChar.INSTANCE.toBytes("a3"), Bound.UPPER)); - // 14, Upper bound, range exclusive, single inclusive, increment at end. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), false, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), - true, SortOrder.ASC), } }, new int[] { 1, 1 }, PChar.INSTANCE.toBytes("b"), Bound.UPPER)); - // 15, Upper bound, range inclusive, single inclusive, increment at end. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("b"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), - true, SortOrder.ASC), } }, new int[] { 1, 1 }, PChar.INSTANCE.toBytes("b2"), Bound.UPPER)); - // 16, Upper bound, single inclusive, range exclusive, no increment at end. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("2"), - false, SortOrder.ASC), } }, new int[] { 1, 1 }, PChar.INSTANCE.toBytes("a2"), - Bound.UPPER)); - // 17, Upper bound, unbound key, fixed length; - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { KeyRange.EVERYTHING_RANGE, } }, new int[] { 1, 1 }, PChar.INSTANCE.toBytes("b"), - Bound.UPPER)); - // 18, Upper bound, unbound key, variable length; - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { KeyRange.EVERYTHING_RANGE, } }, new int[] { 1, 1 }, PChar.INSTANCE.toBytes("b"), - Bound.UPPER)); - // 19, Upper bound, keys wrapped around when incrementing. - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(new byte[] { -1 }, true, new byte[] { -1 }, true, SortOrder.ASC) }, - { PChar.INSTANCE.getKeyRange(new byte[] { -1 }, true, new byte[] { -1 }, true, SortOrder.ASC) } }, - new int[] { 1, 1 }, ByteUtil.EMPTY_BYTE_ARRAY, Bound.UPPER)); - // 20, Variable length - testCases.addAll(foreach(new KeyRange[][] { - { PChar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true, SortOrder.ASC), }, - { PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("A"), true, Bytes.toBytes("B"), - true, SortOrder.ASC), } }, new int[] { 1, 0 }, - ByteUtil.nextKey(ByteUtil.concat(PVarchar.INSTANCE.toBytes("aB"), QueryConstants.SEPARATOR_BYTE_ARRAY)), - Bound.UPPER)); - return testCases; + @Test + public void testSlotsSaltedVarbinaryPk() { + byte[] key = new byte[1024]; + + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(2); + + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; } - private static Collection foreach(KeyRange[][] ranges, int[] widths, byte[] expectedKey, - Bound bound) { - List> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST); - List ret = Lists.newArrayList(); - ret.add(new Object[] { slots, widths, expectedKey, bound }); - return ret; + @Override + public PDataType getDataType() { + return PBinary.INSTANCE; } - private static final Function> ARRAY_TO_LIST = new Function>() { - @Override public List apply(KeyRange[] input) { - return Lists.newArrayList(input); - } - }; - } + @Override + public Integer getMaxLength() { + return 1; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); + + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; + } - public static class NonParameterizedScanUtilTest { - - @Test - public void testSlotsSaltedVarbinaryPk() { - byte[] key = new byte[1024]; - - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(2); - - builder.addField(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - - @Override - public PDataType getDataType() { - return PBinary.INSTANCE; - } - - @Override - public Integer getMaxLength() { - return 1; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); - - builder.addField(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - - @Override - public PDataType getDataType() { - return PVarbinary.INSTANCE; - } - - @Override - public Integer getMaxLength() { - return 60; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); - - List ranges = Lists.newArrayList(KeyRange.getKeyRange(new byte[] { 0, 5 })); - List> pkKeyRanges = Lists.newArrayList(); - pkKeyRanges.add(ranges); - - // For this case slots the salt bucket and key are one span - int[] slotSpans = new int[] { 1 }; - - int offset = ScanUtil.setKey(builder.build(), pkKeyRanges, slotSpans, new int[] { 0 }, Bound.UPPER, key, 0, - 0, slotSpans.length); - byte[] actualKey = new byte[offset]; - System.arraycopy(key, 0, actualKey, 0, offset); - assertArrayEquals(new byte[] { 0, 5, 0 }, actualKey); + @Override + public PDataType getDataType() { + return PVarbinary.INSTANCE; } - @Test - public void testLastPkColumnIsVariableLengthAndDescBug5307() throws Exception{ - RowKeySchemaBuilder rowKeySchemaBuilder = new RowKeySchemaBuilder(2); - - rowKeySchemaBuilder.addField(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } - - @Override - public Integer getMaxLength() { - return null; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - }, false, SortOrder.getDefault()); - - rowKeySchemaBuilder.addField(new PDatum() { - @Override - public boolean isNullable() { - return false; - } - - @Override - public PDataType getDataType() { - return PVarchar.INSTANCE; - } - - @Override - public Integer getMaxLength() { - return null; - } - - @Override - public Integer getScale() { - return null; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.DESC; - } - }, false, SortOrder.DESC); - - rowKeySchemaBuilder.rowKeyOrderOptimizable(true); - RowKeySchema rowKeySchema = rowKeySchemaBuilder.build(); - //it is [[obj1, obj2, obj3], [\xCD\xCD\xCD\xCD, \xCE\xCE\xCE\xCE, \xCE\xCE\xCE\xCE]] - List> rowKeySlotRangesList = Arrays.asList( - Arrays.asList( - KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("obj1", SortOrder.ASC)), - KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("obj2", SortOrder.ASC)), - KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("obj3", SortOrder.ASC))), - Arrays.asList( - KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("2222", SortOrder.DESC)), - KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC)), - KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC)))); - - int[] rowKeySlotSpans = new int[]{0,0}; - byte[] rowKey = new byte[1024]; - int[] rowKeySlotRangesIndexes = new int[]{0,0}; - int rowKeyLength = ScanUtil.setKey( - rowKeySchema, - rowKeySlotRangesList, - rowKeySlotSpans, - rowKeySlotRangesIndexes, - Bound.LOWER, - rowKey, - 0, - 0, - 2); - byte[] startKey = Arrays.copyOf(rowKey, rowKeyLength); - - rowKeySlotRangesIndexes = new int[]{2,2}; - rowKey = new byte[1024]; - rowKeyLength = ScanUtil.setKey( - rowKeySchema, - rowKeySlotRangesList, - rowKeySlotSpans, - rowKeySlotRangesIndexes, - Bound.UPPER, - rowKey, - 0, - 0, - 2); - byte[] endKey = Arrays.copyOf(rowKey, rowKeyLength); - - byte[] expectedStartKey = ByteUtil.concat( - PVarchar.INSTANCE.toBytes("obj1", SortOrder.ASC), - QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes("2222", SortOrder.DESC), - QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); - byte[] expectedEndKey = ByteUtil.concat( - PVarchar.INSTANCE.toBytes("obj3", SortOrder.ASC), - QueryConstants.SEPARATOR_BYTE_ARRAY, - PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC), - QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); - ByteUtil.nextKey(expectedEndKey, expectedEndKey.length); - - assertArrayEquals(expectedStartKey, startKey); - assertArrayEquals(expectedEndKey, endKey); + @Override + public Integer getMaxLength() { + return 60; } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); + + List ranges = Lists.newArrayList(KeyRange.getKeyRange(new byte[] { 0, 5 })); + List> pkKeyRanges = Lists.newArrayList(); + pkKeyRanges.add(ranges); + + // For this case slots the salt bucket and key are one span + int[] slotSpans = new int[] { 1 }; + + int offset = ScanUtil.setKey(builder.build(), pkKeyRanges, slotSpans, new int[] { 0 }, + Bound.UPPER, key, 0, 0, slotSpans.length); + byte[] actualKey = new byte[offset]; + System.arraycopy(key, 0, actualKey, 0, offset); + assertArrayEquals(new byte[] { 0, 5, 0 }, actualKey); } - public static class PhoenixTTLScanUtilTest extends BaseConnectionlessQueryTest { - - @Test - public void testPhoenixTTLUtilMethods() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - try (Connection conn = driver.connect(getUrl(), props)) { - PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); - PTable table = phxConn.getTable(new PTableKey(null, ATABLE_NAME)); - - byte[] emptyColumnFamilyName = SchemaUtil.getEmptyColumnFamily(table); - byte[] emptyColumnName = table.getEncodingScheme() - == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS ? - QueryConstants.EMPTY_COLUMN_BYTES : - table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); - - String row = "test.row"; - long timestamp42 = 42L; - KeyValue.Type type42 = KeyValue.Type.Put; - String value42 = "test.value.42"; - long seqId42 = 1042L; - - List cellList = Lists.newArrayList(); - Cell cell42 = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(Bytes.toBytes(row)) - .setFamily(emptyColumnFamilyName) - .setQualifier(emptyColumnName) - .setTimestamp(timestamp42) - .setType(type42.getCode()) - .setValue(Bytes.toBytes(value42)) - .setSequenceId(seqId42) - .build(); - // Add cell to the cell list - cellList.add(cell42); - - long timestamp43 = 43L; - String columnName = "test_column"; - KeyValue.Type type43 = KeyValue.Type.Put; - String value43 = "test.value.43"; - long seqId43 = 1043L; - Cell cell43 = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(Bytes.toBytes(row)) - .setFamily(emptyColumnFamilyName) - .setQualifier(Bytes.toBytes(columnName)) - .setTimestamp(timestamp43) - .setType(type43.getCode()) - .setValue(Bytes.toBytes(value43)) - .setSequenceId(seqId43) - .build(); - // Add cell to the cell list - cellList.add(cell43); - - long timestamp44 = 44L; - Scan testScan = new Scan(); - testScan.setAttribute(BaseScannerRegionObserverConstants.TTL, Bytes.toBytes(1L)); - // Test isTTLExpired - Assert.assertTrue(ScanUtil.isTTLExpired(cell42, testScan, timestamp44)); - Assert.assertFalse(ScanUtil.isTTLExpired(cell43, testScan, timestamp44)); - // Test isEmptyColumn - Assert.assertTrue(ScanUtil.isEmptyColumn(cell42, emptyColumnFamilyName, emptyColumnName)); - Assert.assertFalse(ScanUtil.isEmptyColumn(cell43, emptyColumnFamilyName, emptyColumnName)); - // Test getMaxTimestamp - Assert.assertEquals(timestamp43, ScanUtil.getMaxTimestamp(cellList)); - } + @Test + public void testLastPkColumnIsVariableLengthAndDescBug5307() throws Exception { + RowKeySchemaBuilder rowKeySchemaBuilder = new RowKeySchemaBuilder(2); + + rowKeySchemaBuilder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; } - @Test - public void testIsServerSideMaskingPropertySet() throws Exception { - // Test property is not set - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - props.setProperty(QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, "false"); - PhoenixTestDriver driver1 = new PhoenixTestDriver(ReadOnlyProps.EMPTY_PROPS.addAll(props)); - try (Connection conn = driver1.connect(getUrl(), props)) { - PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); - Assert.assertFalse(ScanUtil.isServerSideMaskingEnabled(phxConn)); - } + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } - // Test property is set - props.setProperty(QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, "true"); - PhoenixTestDriver driver2 = new PhoenixTestDriver(ReadOnlyProps.EMPTY_PROPS.addAll(props)); - try (Connection conn = driver2.connect(getUrl(), props)) { - PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); - Assert.assertTrue(ScanUtil.isServerSideMaskingEnabled(phxConn)); - } + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; } + + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } + }, false, SortOrder.getDefault()); + + rowKeySchemaBuilder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return null; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return SortOrder.DESC; + } + }, false, SortOrder.DESC); + + rowKeySchemaBuilder.rowKeyOrderOptimizable(true); + RowKeySchema rowKeySchema = rowKeySchemaBuilder.build(); + // it is [[obj1, obj2, obj3], [\xCD\xCD\xCD\xCD, \xCE\xCE\xCE\xCE, \xCE\xCE\xCE\xCE]] + List> rowKeySlotRangesList = Arrays.asList( + Arrays.asList(KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("obj1", SortOrder.ASC)), + KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("obj2", SortOrder.ASC)), + KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("obj3", SortOrder.ASC))), + Arrays.asList(KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("2222", SortOrder.DESC)), + KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC)), + KeyRange.getKeyRange(PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC)))); + + int[] rowKeySlotSpans = new int[] { 0, 0 }; + byte[] rowKey = new byte[1024]; + int[] rowKeySlotRangesIndexes = new int[] { 0, 0 }; + int rowKeyLength = ScanUtil.setKey(rowKeySchema, rowKeySlotRangesList, rowKeySlotSpans, + rowKeySlotRangesIndexes, Bound.LOWER, rowKey, 0, 0, 2); + byte[] startKey = Arrays.copyOf(rowKey, rowKeyLength); + + rowKeySlotRangesIndexes = new int[] { 2, 2 }; + rowKey = new byte[1024]; + rowKeyLength = ScanUtil.setKey(rowKeySchema, rowKeySlotRangesList, rowKeySlotSpans, + rowKeySlotRangesIndexes, Bound.UPPER, rowKey, 0, 0, 2); + byte[] endKey = Arrays.copyOf(rowKey, rowKeyLength); + + byte[] expectedStartKey = ByteUtil.concat(PVarchar.INSTANCE.toBytes("obj1", SortOrder.ASC), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes("2222", SortOrder.DESC), + QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); + byte[] expectedEndKey = ByteUtil.concat(PVarchar.INSTANCE.toBytes("obj3", SortOrder.ASC), + QueryConstants.SEPARATOR_BYTE_ARRAY, PVarchar.INSTANCE.toBytes("1111", SortOrder.DESC), + QueryConstants.DESC_SEPARATOR_BYTE_ARRAY); + ByteUtil.nextKey(expectedEndKey, expectedEndKey.length); + + assertArrayEquals(expectedStartKey, startKey); + assertArrayEquals(expectedEndKey, endKey); + } + } + + public static class PhoenixTTLScanUtilTest extends BaseConnectionlessQueryTest { + + @Test + public void testPhoenixTTLUtilMethods() throws SQLException { + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + try (Connection conn = driver.connect(getUrl(), props)) { + PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); + PTable table = phxConn.getTable(new PTableKey(null, ATABLE_NAME)); + + byte[] emptyColumnFamilyName = SchemaUtil.getEmptyColumnFamily(table); + byte[] emptyColumnName = + table.getEncodingScheme() == PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS + ? QueryConstants.EMPTY_COLUMN_BYTES + : table.getEncodingScheme().encode(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); + + String row = "test.row"; + long timestamp42 = 42L; + KeyValue.Type type42 = KeyValue.Type.Put; + String value42 = "test.value.42"; + long seqId42 = 1042L; + + List cellList = Lists.newArrayList(); + Cell cell42 = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) + .setRow(Bytes.toBytes(row)).setFamily(emptyColumnFamilyName).setQualifier(emptyColumnName) + .setTimestamp(timestamp42).setType(type42.getCode()).setValue(Bytes.toBytes(value42)) + .setSequenceId(seqId42).build(); + // Add cell to the cell list + cellList.add(cell42); + + long timestamp43 = 43L; + String columnName = "test_column"; + KeyValue.Type type43 = KeyValue.Type.Put; + String value43 = "test.value.43"; + long seqId43 = 1043L; + Cell cell43 = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes(row)) + .setFamily(emptyColumnFamilyName).setQualifier(Bytes.toBytes(columnName)) + .setTimestamp(timestamp43).setType(type43.getCode()).setValue(Bytes.toBytes(value43)) + .setSequenceId(seqId43).build(); + // Add cell to the cell list + cellList.add(cell43); + + long timestamp44 = 44L; + Scan testScan = new Scan(); + testScan.setAttribute(BaseScannerRegionObserverConstants.TTL, Bytes.toBytes(1L)); + // Test isTTLExpired + Assert.assertTrue(ScanUtil.isTTLExpired(cell42, testScan, timestamp44)); + Assert.assertFalse(ScanUtil.isTTLExpired(cell43, testScan, timestamp44)); + // Test isEmptyColumn + Assert.assertTrue(ScanUtil.isEmptyColumn(cell42, emptyColumnFamilyName, emptyColumnName)); + Assert.assertFalse(ScanUtil.isEmptyColumn(cell43, emptyColumnFamilyName, emptyColumnName)); + // Test getMaxTimestamp + Assert.assertEquals(timestamp43, ScanUtil.getMaxTimestamp(cellList)); + } + } + + @Test + public void testIsServerSideMaskingPropertySet() throws Exception { + // Test property is not set + Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); + props.setProperty(QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, "false"); + PhoenixTestDriver driver1 = new PhoenixTestDriver(ReadOnlyProps.EMPTY_PROPS.addAll(props)); + try (Connection conn = driver1.connect(getUrl(), props)) { + PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); + Assert.assertFalse(ScanUtil.isServerSideMaskingEnabled(phxConn)); + } + + // Test property is set + props.setProperty(QueryServices.PHOENIX_TTL_SERVER_SIDE_MASKING_ENABLED, "true"); + PhoenixTestDriver driver2 = new PhoenixTestDriver(ReadOnlyProps.EMPTY_PROPS.addAll(props)); + try (Connection conn = driver2.connect(getUrl(), props)) { + PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); + Assert.assertTrue(ScanUtil.isServerSideMaskingEnabled(phxConn)); + } } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/SequenceUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/SequenceUtilTest.java index 2abc4821c85..931abc9ef19 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/SequenceUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/SequenceUtilTest.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.util; @@ -19,101 +26,116 @@ public class SequenceUtilTest { - private static long MIN_VALUE = 1; - private static long MAX_VALUE = 10; - private static long CACHE_SIZE = 2; - - @Test - public void testAscendingNextValueWithinLimit() throws SQLException { - assertFalse(SequenceUtil.checkIfLimitReached(5, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE)); - } - - @Test - public void testAscendingNextValueReachLimit() throws SQLException { - assertFalse(SequenceUtil.checkIfLimitReached(6, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE)); - } - - @Test - public void testAscendingNextValueGreaterThanMaxValue() throws SQLException { - assertTrue(SequenceUtil.checkIfLimitReached(MAX_VALUE, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE)); - } - - @Test - public void testAscendingOverflow() throws SQLException { - assertTrue(SequenceUtil.checkIfLimitReached(Long.MAX_VALUE, 0, Long.MAX_VALUE, 1/* incrementBy */, CACHE_SIZE)); - } - - @Test - public void testDescendingNextValueWithinLimit() throws SQLException { - assertFalse(SequenceUtil.checkIfLimitReached(6, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, CACHE_SIZE)); - } - - @Test - public void testDescendingNextValueReachLimit() throws SQLException { - assertFalse(SequenceUtil.checkIfLimitReached(5, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, CACHE_SIZE)); - } - - @Test - public void testDescendingNextValueLessThanMinValue() throws SQLException { - assertTrue(SequenceUtil.checkIfLimitReached(2, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, CACHE_SIZE)); - } - - @Test - public void testDescendingOverflowCycle() throws SQLException { - assertTrue(SequenceUtil.checkIfLimitReached(Long.MIN_VALUE, Long.MIN_VALUE, 0, -1/* incrementBy */, CACHE_SIZE)); - } - - @Test - public void testBulkAllocationAscendingNextValueGreaterThanMax() throws SQLException { - assertTrue(SequenceUtil.checkIfLimitReached(MAX_VALUE, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE, 1)); - } - - @Test - public void testBulkAllocationAscendingNextValueReachLimit() throws SQLException { - assertFalse(SequenceUtil.checkIfLimitReached(6, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE, 2)); - } - - @Test - public void testBulkAllocationAscendingNextValueWithinLimit() throws SQLException { - assertFalse(SequenceUtil.checkIfLimitReached(5, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE, 2)); - - } - - @Test - public void testBulkAllocationAscendingOverflow() throws SQLException { - assertTrue(SequenceUtil.checkIfLimitReached(Long.MAX_VALUE, 0, Long.MAX_VALUE, 1/* incrementBy */, CACHE_SIZE, 100)); - } - - - @Test - public void testBulkAllocationDescendingNextValueLessThanMax() throws SQLException { - assertTrue(SequenceUtil.checkIfLimitReached(10, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, CACHE_SIZE, 5)); - } - - @Test - public void testBulkAllocationDescendingNextValueReachLimit() throws SQLException { - assertFalse(SequenceUtil.checkIfLimitReached(7, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, CACHE_SIZE, 3)); - } - - @Test - public void testBulkAllocationDescendingNextValueWithinLimit() throws SQLException { - assertFalse(SequenceUtil.checkIfLimitReached(8, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, CACHE_SIZE, 2)); - - } - - @Test - public void testBulkAllocationDescendingOverflowCycle() throws SQLException { - assertTrue(SequenceUtil.checkIfLimitReached(Long.MIN_VALUE, Long.MIN_VALUE, 0, -1/* incrementBy */, CACHE_SIZE, 100)); - } - - @Test - public void testIsCycleAllowedForBulkAllocation() { - assertFalse(SequenceUtil.isCycleAllowed(2)); - } - - @Test - public void testIsCycleAllowedForStandardAllocation() { - assertTrue(SequenceUtil.isCycleAllowed(1)); - } - + private static long MIN_VALUE = 1; + private static long MAX_VALUE = 10; + private static long CACHE_SIZE = 2; + + @Test + public void testAscendingNextValueWithinLimit() throws SQLException { + assertFalse( + SequenceUtil.checkIfLimitReached(5, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE)); + } + + @Test + public void testAscendingNextValueReachLimit() throws SQLException { + assertFalse( + SequenceUtil.checkIfLimitReached(6, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE)); + } + + @Test + public void testAscendingNextValueGreaterThanMaxValue() throws SQLException { + assertTrue(SequenceUtil.checkIfLimitReached(MAX_VALUE, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, + CACHE_SIZE)); + } + + @Test + public void testAscendingOverflow() throws SQLException { + assertTrue(SequenceUtil.checkIfLimitReached(Long.MAX_VALUE, 0, Long.MAX_VALUE, + 1/* incrementBy */, CACHE_SIZE)); + } + + @Test + public void testDescendingNextValueWithinLimit() throws SQLException { + assertFalse( + SequenceUtil.checkIfLimitReached(6, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, CACHE_SIZE)); + } + + @Test + public void testDescendingNextValueReachLimit() throws SQLException { + assertFalse( + SequenceUtil.checkIfLimitReached(5, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, CACHE_SIZE)); + } + + @Test + public void testDescendingNextValueLessThanMinValue() throws SQLException { + assertTrue( + SequenceUtil.checkIfLimitReached(2, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, CACHE_SIZE)); + } + + @Test + public void testDescendingOverflowCycle() throws SQLException { + assertTrue(SequenceUtil.checkIfLimitReached(Long.MIN_VALUE, Long.MIN_VALUE, 0, + -1/* incrementBy */, CACHE_SIZE)); + } + + @Test + public void testBulkAllocationAscendingNextValueGreaterThanMax() throws SQLException { + assertTrue(SequenceUtil.checkIfLimitReached(MAX_VALUE, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, + CACHE_SIZE, 1)); + } + + @Test + public void testBulkAllocationAscendingNextValueReachLimit() throws SQLException { + assertFalse( + SequenceUtil.checkIfLimitReached(6, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE, 2)); + } + + @Test + public void testBulkAllocationAscendingNextValueWithinLimit() throws SQLException { + assertFalse( + SequenceUtil.checkIfLimitReached(5, MIN_VALUE, MAX_VALUE, 2/* incrementBy */, CACHE_SIZE, 2)); + + } + + @Test + public void testBulkAllocationAscendingOverflow() throws SQLException { + assertTrue(SequenceUtil.checkIfLimitReached(Long.MAX_VALUE, 0, Long.MAX_VALUE, + 1/* incrementBy */, CACHE_SIZE, 100)); + } + + @Test + public void testBulkAllocationDescendingNextValueLessThanMax() throws SQLException { + assertTrue(SequenceUtil.checkIfLimitReached(10, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, + CACHE_SIZE, 5)); + } + + @Test + public void testBulkAllocationDescendingNextValueReachLimit() throws SQLException { + assertFalse(SequenceUtil.checkIfLimitReached(7, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, + CACHE_SIZE, 3)); + } + + @Test + public void testBulkAllocationDescendingNextValueWithinLimit() throws SQLException { + assertFalse(SequenceUtil.checkIfLimitReached(8, MIN_VALUE, MAX_VALUE, -2/* incrementBy */, + CACHE_SIZE, 2)); + + } + + @Test + public void testBulkAllocationDescendingOverflowCycle() throws SQLException { + assertTrue(SequenceUtil.checkIfLimitReached(Long.MIN_VALUE, Long.MIN_VALUE, 0, + -1/* incrementBy */, CACHE_SIZE, 100)); + } + + @Test + public void testIsCycleAllowedForBulkAllocation() { + assertFalse(SequenceUtil.isCycleAllowed(2)); + } + + @Test + public void testIsCycleAllowedForStandardAllocation() { + assertTrue(SequenceUtil.isCycleAllowed(1)); + } + } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/StringUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/StringUtilTest.java index 6d005626c8e..1ae9e880c9b 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/StringUtilTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/StringUtilTest.java @@ -1,18 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.util; @@ -24,59 +25,60 @@ public class StringUtilTest { - private void testLpad(String inputString, int length, String fillString, String expectedOutput) throws Exception { - byte[] input = inputString.getBytes(); - byte[] fill = fillString.getBytes(); - byte[] output = StringUtil.lpad(input, 0, input.length, fill, 0, fill.length, false, length); - assertArrayEquals("Incorrect output of lpad", expectedOutput.getBytes(), output); - } + private void testLpad(String inputString, int length, String fillString, String expectedOutput) + throws Exception { + byte[] input = inputString.getBytes(); + byte[] fill = fillString.getBytes(); + byte[] output = StringUtil.lpad(input, 0, input.length, fill, 0, fill.length, false, length); + assertArrayEquals("Incorrect output of lpad", expectedOutput.getBytes(), output); + } - @Test - public void testLpadFillLengthLessThanPadLength() throws Exception { - testLpad("ABCD", 8, "12", "1212ABCD"); - } + @Test + public void testLpadFillLengthLessThanPadLength() throws Exception { + testLpad("ABCD", 8, "12", "1212ABCD"); + } - @Test - public void testLpadFillLengthEqualPadLength() throws Exception { - testLpad("ABCD", 8, "1234", "1234ABCD"); - } - - @Test - public void testLpadFillLengthGreaterThanPadLength() throws Exception { - testLpad("ABCD", 8, "12345", "1234ABCD"); - } + @Test + public void testLpadFillLengthEqualPadLength() throws Exception { + testLpad("ABCD", 8, "1234", "1234ABCD"); + } - @Test - public void testLpadZeroPadding() throws Exception { - testLpad("ABCD", 4, "1234", "ABCD"); - } + @Test + public void testLpadFillLengthGreaterThanPadLength() throws Exception { + testLpad("ABCD", 8, "12345", "1234ABCD"); + } - @Test - public void testCalculateUTF8Offset() throws Exception { - String tmp, padding = "padding", data = "零一二三四五六七八九", trailing = "trailing"; - byte[] bytes = (padding + data + trailing).getBytes(); - int ret, offset = padding.getBytes().length, length = data.getBytes().length; + @Test + public void testLpadZeroPadding() throws Exception { + testLpad("ABCD", 4, "1234", "ABCD"); + } - tmp = padding; - for (int i = 0; i < data.length(); ++i) { - ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i); - assertEquals(tmp.getBytes().length, ret); - tmp = tmp + data.charAt(i); - } - for (int i = data.length(); i < data.length() + 10; ++i) { - ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i); - assertEquals(-1, ret); - } + @Test + public void testCalculateUTF8Offset() throws Exception { + String tmp, padding = "padding", data = "零一二三四五六七八九", trailing = "trailing"; + byte[] bytes = (padding + data + trailing).getBytes(); + int ret, offset = padding.getBytes().length, length = data.getBytes().length; - for (int i = -data.length() - 10; i < -data.length(); ++i) { - ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i); - assertEquals(-1, ret); - } - tmp = padding; - for (int i = -data.length(); i <= -1; ++i) { - ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i); - assertEquals("i=" + i, tmp.getBytes().length, ret); - tmp = tmp + data.charAt(i + data.length()); - } + tmp = padding; + for (int i = 0; i < data.length(); ++i) { + ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i); + assertEquals(tmp.getBytes().length, ret); + tmp = tmp + data.charAt(i); + } + for (int i = data.length(); i < data.length() + 10; ++i) { + ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i); + assertEquals(-1, ret); + } + + for (int i = -data.length() - 10; i < -data.length(); ++i) { + ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i); + assertEquals(-1, ret); + } + tmp = padding; + for (int i = -data.length(); i <= -1; ++i) { + ret = StringUtil.calculateUTF8Offset(bytes, offset, length, SortOrder.ASC, i); + assertEquals("i=" + i, tmp.getBytes().length, ret); + tmp = tmp + data.charAt(i + data.length()); } + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TenantIdByteConversionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TenantIdByteConversionTest.java index 75af95da3ec..c15d65ceb88 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/TenantIdByteConversionTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TenantIdByteConversionTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util; import static org.junit.Assert.assertArrayEquals; @@ -50,265 +49,218 @@ import org.apache.phoenix.schema.types.PUnsignedSmallint; import org.apache.phoenix.schema.types.PUnsignedTinyint; import org.apache.phoenix.schema.types.PVarchar; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /*Test the getTenantIdBytes method in ScanUtil*/ @RunWith(Parameterized.class) public class TenantIdByteConversionTest { - private RowKeySchema schema; - private boolean isSalted; - private PName tenantId; - private byte[] expectedTenantIdBytes; - - - public TenantIdByteConversionTest( - RowKeySchema schema, - boolean isSalted, - PName tenantId, - byte[] expectedTenantIdBytes ) { - this.schema = schema; - this.isSalted = isSalted; - this.tenantId = tenantId; - this.expectedTenantIdBytes = expectedTenantIdBytes; - } - - @Test - public void test() { - try { - byte[] actualTenantIdBytes = ScanUtil.getTenantIdBytes(schema, isSalted, tenantId, false); - assertArrayEquals(expectedTenantIdBytes, actualTenantIdBytes); - } catch (SQLException ex) { - fail(ex.getMessage()); - } - } - - @Parameters - public static synchronized Collection data() { - List testCases = Lists.newArrayList(); - // Varchar - testCases.add(new Object[] { - getDataSchema(PVarchar.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("NameOfTenant"), - PVarchar.INSTANCE.toBytes("NameOfTenant") - }); - - // Char - testCases.add(new Object[] { - getDataSchema(PChar.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("N"), - PChar.INSTANCE.toBytes(PChar.INSTANCE.toObject("N")) - }); - - //Int - testCases.add(new Object[] { - getDataSchema(PInteger.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("2147483646"), - PInteger.INSTANCE.toBytes(PInteger.INSTANCE.toObject("2147483646")) - }); - - // UnsignedInt - testCases.add(new Object[] { - getDataSchema(PUnsignedInt.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("2147483646"), - PUnsignedInt.INSTANCE.toBytes(PUnsignedInt.INSTANCE.toObject("2147483646")) - }); - - //BigInt - testCases.add(new Object[] { - getDataSchema(PLong.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("9223372036854775806"), - PLong.INSTANCE.toBytes(PLong.INSTANCE.toObject("9223372036854775806")) - }); - - //UnsignedLong - testCases.add(new Object[] { - getDataSchema(PUnsignedLong.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("9223372036854775806"), - PUnsignedLong.INSTANCE.toBytes(PUnsignedLong.INSTANCE.toObject("9223372036854775806")) - }); - - //TinyInt - testCases.add(new Object[] { - getDataSchema(PTinyint.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("126"), - PTinyint.INSTANCE.toBytes(PTinyint.INSTANCE.toObject("126")) - }); - - //UnsignedTinyInt - testCases.add(new Object[] { - getDataSchema(PUnsignedTinyint.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("126"), - PUnsignedTinyint.INSTANCE.toBytes(PUnsignedTinyint.INSTANCE.toObject("126")) - }); - - //SmallInt - testCases.add(new Object[] { - getDataSchema(PSmallint.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("32766"), - PSmallint.INSTANCE.toBytes(PSmallint.INSTANCE.toObject("32766")) - }); - - //UnsignedSmallInt - testCases.add(new Object[] { - getDataSchema(PUnsignedSmallint.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("32766"), - PUnsignedSmallint.INSTANCE.toBytes(PUnsignedSmallint.INSTANCE.toObject("32766")) - }); - - //Float - testCases.add(new Object[] { - getDataSchema(PFloat.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("3.402823466"), - PFloat.INSTANCE.toBytes(PFloat.INSTANCE.toObject("3.402823466")) - }); - - //UnsignedFloat - testCases.add(new Object[] { - getDataSchema(PUnsignedFloat.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("3.402823466"), - PUnsignedFloat.INSTANCE.toBytes(PUnsignedFloat.INSTANCE.toObject("3.402823466")) - }); - - //Double - testCases.add(new Object[] { - getDataSchema(PDouble.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("1.7976931348623158"), - PDouble.INSTANCE.toBytes(PDouble.INSTANCE.toObject("1.7976931348623158")) - }); - - //UnsignedDouble - testCases.add(new Object[] { - getDataSchema(PUnsignedDouble.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("1.7976931348623158"), - PUnsignedDouble.INSTANCE.toBytes(PUnsignedDouble.INSTANCE.toObject("1.7976931348623158")) - }); - - //UnsignedDecimal - testCases.add(new Object[] { - getDataSchema(PDecimal.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("3.402823466"), - PDecimal.INSTANCE.toBytes(PDecimal.INSTANCE.toObject("3.402823466")) - }); - - //Boolean - testCases.add(new Object[] { - getDataSchema(PBoolean.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName("true"), - PBoolean.INSTANCE.toBytes(PBoolean.INSTANCE.toObject("true")) - }); - - //Binary - byte[] bytes = new byte[] {0, 1, 2, 3}; - String byteString = new String( Base64.getEncoder().encode(bytes) ); - testCases.add(new Object[] { - getDataSchema(PBinary.INSTANCE, SortOrder.getDefault()), - false, - PNameFactory.newName(byteString), - PBinary.INSTANCE.toBytes(PBinary.INSTANCE.toObject(byteString)) - }); - - //Descending TenantId - testCases.add(new Object[] { - getDataSchema(PUnsignedInt.INSTANCE, SortOrder.DESC), - false, - PNameFactory.newName("2147483646"), - PUnsignedInt.INSTANCE.toBytes(PUnsignedInt.INSTANCE.toObject("2147483646")) - }); - - return testCases; - } - - public static RowKeySchema getDataSchema (final PDataType data, final SortOrder sortOrder) { - RowKeySchemaBuilder builder = new RowKeySchemaBuilder(3); - - builder.addField(new PDatum() { - @Override public boolean isNullable() { - return false; - } - - @Override public PDataType getDataType() { - return data; - } - - @Override public Integer getMaxLength() { - return 1; - } - - @Override public Integer getScale() { - return null; - } - - @Override public SortOrder getSortOrder() { - return sortOrder; - } - }, false, sortOrder); - - builder.addField(new PDatum() { - @Override public boolean isNullable() { - return false; - } - - @Override public PDataType getDataType() { - return PUnsignedInt.INSTANCE; - } - - @Override public Integer getMaxLength() { - return 3; - } - - @Override public Integer getScale() { - return null; - } - - @Override public SortOrder getSortOrder() { - return sortOrder; - } - }, false, sortOrder); - - builder.addField(new PDatum() { - @Override public boolean isNullable() { - return true; - } - - @Override public PDataType getDataType() { - return PVarchar.INSTANCE; - } - - @Override public Integer getMaxLength() { - return 3; - } - - @Override public Integer getScale() { - return null; - } - - @Override public SortOrder getSortOrder() { - return sortOrder; - } - }, false, sortOrder); - - return builder.build(); + private RowKeySchema schema; + private boolean isSalted; + private PName tenantId; + private byte[] expectedTenantIdBytes; + + public TenantIdByteConversionTest(RowKeySchema schema, boolean isSalted, PName tenantId, + byte[] expectedTenantIdBytes) { + this.schema = schema; + this.isSalted = isSalted; + this.tenantId = tenantId; + this.expectedTenantIdBytes = expectedTenantIdBytes; + } + + @Test + public void test() { + try { + byte[] actualTenantIdBytes = ScanUtil.getTenantIdBytes(schema, isSalted, tenantId, false); + assertArrayEquals(expectedTenantIdBytes, actualTenantIdBytes); + } catch (SQLException ex) { + fail(ex.getMessage()); } + } + + @Parameters + public static synchronized Collection data() { + List testCases = Lists.newArrayList(); + // Varchar + testCases.add(new Object[] { getDataSchema(PVarchar.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("NameOfTenant"), PVarchar.INSTANCE.toBytes("NameOfTenant") }); + + // Char + testCases.add(new Object[] { getDataSchema(PChar.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("N"), PChar.INSTANCE.toBytes(PChar.INSTANCE.toObject("N")) }); + + // Int + testCases.add(new Object[] { getDataSchema(PInteger.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("2147483646"), + PInteger.INSTANCE.toBytes(PInteger.INSTANCE.toObject("2147483646")) }); + + // UnsignedInt + testCases.add(new Object[] { getDataSchema(PUnsignedInt.INSTANCE, SortOrder.getDefault()), + false, PNameFactory.newName("2147483646"), + PUnsignedInt.INSTANCE.toBytes(PUnsignedInt.INSTANCE.toObject("2147483646")) }); + + // BigInt + testCases.add(new Object[] { getDataSchema(PLong.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("9223372036854775806"), + PLong.INSTANCE.toBytes(PLong.INSTANCE.toObject("9223372036854775806")) }); + + // UnsignedLong + testCases.add(new Object[] { getDataSchema(PUnsignedLong.INSTANCE, SortOrder.getDefault()), + false, PNameFactory.newName("9223372036854775806"), + PUnsignedLong.INSTANCE.toBytes(PUnsignedLong.INSTANCE.toObject("9223372036854775806")) }); + + // TinyInt + testCases.add(new Object[] { getDataSchema(PTinyint.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("126"), PTinyint.INSTANCE.toBytes(PTinyint.INSTANCE.toObject("126")) }); + + // UnsignedTinyInt + testCases.add(new Object[] { getDataSchema(PUnsignedTinyint.INSTANCE, SortOrder.getDefault()), + false, PNameFactory.newName("126"), + PUnsignedTinyint.INSTANCE.toBytes(PUnsignedTinyint.INSTANCE.toObject("126")) }); + + // SmallInt + testCases.add(new Object[] { getDataSchema(PSmallint.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("32766"), + PSmallint.INSTANCE.toBytes(PSmallint.INSTANCE.toObject("32766")) }); + + // UnsignedSmallInt + testCases.add(new Object[] { getDataSchema(PUnsignedSmallint.INSTANCE, SortOrder.getDefault()), + false, PNameFactory.newName("32766"), + PUnsignedSmallint.INSTANCE.toBytes(PUnsignedSmallint.INSTANCE.toObject("32766")) }); + + // Float + testCases.add(new Object[] { getDataSchema(PFloat.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("3.402823466"), + PFloat.INSTANCE.toBytes(PFloat.INSTANCE.toObject("3.402823466")) }); + + // UnsignedFloat + testCases.add(new Object[] { getDataSchema(PUnsignedFloat.INSTANCE, SortOrder.getDefault()), + false, PNameFactory.newName("3.402823466"), + PUnsignedFloat.INSTANCE.toBytes(PUnsignedFloat.INSTANCE.toObject("3.402823466")) }); + + // Double + testCases.add(new Object[] { getDataSchema(PDouble.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("1.7976931348623158"), + PDouble.INSTANCE.toBytes(PDouble.INSTANCE.toObject("1.7976931348623158")) }); + + // UnsignedDouble + testCases.add(new Object[] { getDataSchema(PUnsignedDouble.INSTANCE, SortOrder.getDefault()), + false, PNameFactory.newName("1.7976931348623158"), + PUnsignedDouble.INSTANCE.toBytes(PUnsignedDouble.INSTANCE.toObject("1.7976931348623158")) }); + + // UnsignedDecimal + testCases.add(new Object[] { getDataSchema(PDecimal.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("3.402823466"), + PDecimal.INSTANCE.toBytes(PDecimal.INSTANCE.toObject("3.402823466")) }); + + // Boolean + testCases.add(new Object[] { getDataSchema(PBoolean.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName("true"), + PBoolean.INSTANCE.toBytes(PBoolean.INSTANCE.toObject("true")) }); + + // Binary + byte[] bytes = new byte[] { 0, 1, 2, 3 }; + String byteString = new String(Base64.getEncoder().encode(bytes)); + testCases.add(new Object[] { getDataSchema(PBinary.INSTANCE, SortOrder.getDefault()), false, + PNameFactory.newName(byteString), + PBinary.INSTANCE.toBytes(PBinary.INSTANCE.toObject(byteString)) }); + + // Descending TenantId + testCases.add(new Object[] { getDataSchema(PUnsignedInt.INSTANCE, SortOrder.DESC), false, + PNameFactory.newName("2147483646"), + PUnsignedInt.INSTANCE.toBytes(PUnsignedInt.INSTANCE.toObject("2147483646")) }); + + return testCases; + } + + public static RowKeySchema getDataSchema(final PDataType data, final SortOrder sortOrder) { + RowKeySchemaBuilder builder = new RowKeySchemaBuilder(3); + + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return data; + } + + @Override + public Integer getMaxLength() { + return 1; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return sortOrder; + } + }, false, sortOrder); + + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return false; + } + + @Override + public PDataType getDataType() { + return PUnsignedInt.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return 3; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return sortOrder; + } + }, false, sortOrder); + + builder.addField(new PDatum() { + @Override + public boolean isNullable() { + return true; + } + + @Override + public PDataType getDataType() { + return PVarchar.INSTANCE; + } + + @Override + public Integer getMaxLength() { + return 3; + } + + @Override + public Integer getScale() { + return null; + } + + @Override + public SortOrder getSortOrder() { + return sortOrder; + } + }, false, sortOrder); + + return builder.build(); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestDDLUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestDDLUtil.java index e0989883fcb..d42e88dcd00 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestDDLUtil.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestDDLUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,91 +17,81 @@ */ package org.apache.phoenix.util; -import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; - import java.sql.Connection; import java.sql.SQLException; +import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; + public class TestDDLUtil { - private boolean isNamespaceMapped; - private boolean isChangeDetectionEnabled; + private boolean isNamespaceMapped; + private boolean isChangeDetectionEnabled; + + public TestDDLUtil(boolean isNamespaceMapped) { + this.isNamespaceMapped = isNamespaceMapped; + } - public TestDDLUtil(boolean isNamespaceMapped) { - this.isNamespaceMapped = isNamespaceMapped; + public void createBaseTable(Connection conn, String schemaName, String tableName, + boolean multiTenant, Integer saltBuckets, String splits, boolean immutable) + throws SQLException { + if (isNamespaceMapped) { + conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); } + String ddl = "CREATE " + (immutable ? "IMMUTABLE" : "") + " TABLE " + + SchemaUtil.getTableName(schemaName, tableName) + " (t_id VARCHAR NOT NULL,\n" + + "k1 VARCHAR NOT NULL,\n" + "k2 INTEGER NOT NULL,\n" + "v1 VARCHAR,\n" + "v2 INTEGER,\n" + + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2))\n"; + String ddlOptions = multiTenant ? "MULTI_TENANT=true" : ""; + if (saltBuckets != null) { + ddlOptions = ddlOptions + (ddlOptions.isEmpty() ? "" : ", ") + "salt_buckets=" + saltBuckets; + } + if (isChangeDetectionEnabled) { + ddlOptions = + ddlOptions + (ddlOptions.isEmpty() ? "" : ", ") + "CHANGE_DETECTION_ENABLED=TRUE"; + } + if (splits != null) { + ddlOptions = ddlOptions + (ddlOptions.isEmpty() ? "" : ", ") + "splits=" + splits; + } + conn.createStatement().execute(ddl + ddlOptions); + } - public void createBaseTable(Connection conn, String schemaName, String tableName, - boolean multiTenant, - Integer saltBuckets, String splits, boolean immutable) - throws SQLException { - if (isNamespaceMapped) { - conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); - } - String ddl = "CREATE " + (immutable ? "IMMUTABLE" : "") + - " TABLE " + SchemaUtil.getTableName(schemaName, tableName) + - " (t_id VARCHAR NOT NULL,\n" + - "k1 VARCHAR NOT NULL,\n" + - "k2 INTEGER NOT NULL,\n" + - "v1 VARCHAR,\n" + - "v2 INTEGER,\n" + - "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2))\n"; - String ddlOptions = multiTenant ? "MULTI_TENANT=true" : ""; - if (saltBuckets != null) { - ddlOptions = ddlOptions - + (ddlOptions.isEmpty() ? "" : ", ") - + "salt_buckets=" + saltBuckets; - } - if (isChangeDetectionEnabled) { - ddlOptions = ddlOptions + (ddlOptions.isEmpty() ? "" : ", ") + - "CHANGE_DETECTION_ENABLED=TRUE"; - } - if (splits != null) { - ddlOptions = ddlOptions - + (ddlOptions.isEmpty() ? "" : ", ") - + "splits=" + splits; - } - conn.createStatement().execute(ddl + ddlOptions); + public void createIndex(Connection conn, String schemaName, String indexName, String tableName, + String indexedColumnName, boolean isLocal, boolean isAsync) throws SQLException { + if (isNamespaceMapped) { + conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); } + String fullTableName = SchemaUtil.getTableName(schemaName, tableName); + String local = isLocal ? " LOCAL " : ""; + String async = isAsync ? " ASYNC " : ""; + String sql = "CREATE " + local + " INDEX " + indexName + " ON " + fullTableName + "(" + + indexedColumnName + ")" + async; + conn.createStatement().execute(sql); + } - public void createIndex(Connection conn, String schemaName, String indexName, - String tableName, String indexedColumnName, boolean isLocal, - boolean isAsync) throws SQLException { - if (isNamespaceMapped) { - conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); - } - String fullTableName = SchemaUtil.getTableName(schemaName, tableName); - String local = isLocal ? " LOCAL " : ""; - String async = isAsync ? " ASYNC " : ""; - String sql = - "CREATE " + local + " INDEX " + indexName + " ON " + fullTableName + "(" + - indexedColumnName + ")" + async; - conn.createStatement().execute(sql); + public void createView(Connection conn, String schemaName, String viewName, String baseTableName) + throws SQLException { + if (isNamespaceMapped) { + conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); } - public void createView(Connection conn, String schemaName, String viewName, - String baseTableName) throws SQLException { - if (isNamespaceMapped) { - conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); - } - String fullViewName = SchemaUtil.getTableName(schemaName, viewName); - String fullTableName = SchemaUtil.getTableName(schemaName, baseTableName); - String viewSql = "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + fullTableName; - if (isChangeDetectionEnabled) { - viewSql = viewSql + " " + PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED + "=TRUE"; - } - conn.createStatement().execute(viewSql); + String fullViewName = SchemaUtil.getTableName(schemaName, viewName); + String fullTableName = SchemaUtil.getTableName(schemaName, baseTableName); + String viewSql = "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + fullTableName; + if (isChangeDetectionEnabled) { + viewSql = viewSql + " " + PhoenixDatabaseMetaData.CHANGE_DETECTION_ENABLED + "=TRUE"; } + conn.createStatement().execute(viewSql); + } - public void createViewIndex(Connection conn, String schemaName, String indexName, - String viewName, - String indexColumn) throws SQLException { - if (isNamespaceMapped) { - conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); - } - String fullViewName = SchemaUtil.getTableName(schemaName, viewName); - conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullViewName + "(" + indexColumn + ")"); + public void createViewIndex(Connection conn, String schemaName, String indexName, String viewName, + String indexColumn) throws SQLException { + if (isNamespaceMapped) { + conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName); } + String fullViewName = SchemaUtil.getTableName(schemaName, viewName); + conn.createStatement() + .execute("CREATE INDEX " + indexName + " ON " + fullViewName + "(" + indexColumn + ")"); + } - public void setChangeDetectionEnabled(boolean isChangeDetectionEnabled) { - this.isChangeDetectionEnabled = isChangeDetectionEnabled; - } + public void setChangeDetectionEnabled(boolean isChangeDetectionEnabled) { + this.isChangeDetectionEnabled = isChangeDetectionEnabled; + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java index 6ea2a2eb656..74550217525 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -72,7 +72,6 @@ import org.apache.hadoop.hbase.client.CoprocessorDescriptor; import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; - import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -91,13 +90,13 @@ import org.apache.phoenix.compile.ColumnResolver; import org.apache.phoenix.compile.FromCompiler; import org.apache.phoenix.compile.JoinCompiler; +import org.apache.phoenix.compile.JoinCompiler.JoinTable; import org.apache.phoenix.compile.QueryPlan; import org.apache.phoenix.compile.SequenceManager; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.compile.StatementNormalizer; import org.apache.phoenix.compile.SubqueryRewriter; import org.apache.phoenix.compile.SubselectRewriter; -import org.apache.phoenix.compile.JoinCompiler.JoinTable; import org.apache.phoenix.coprocessor.CompactionScanner; import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheRequest; import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse; @@ -130,9 +129,9 @@ import org.apache.phoenix.jdbc.PhoenixPreparedStatement; import org.apache.phoenix.jdbc.PhoenixStatement; import org.apache.phoenix.parse.FilterableStatement; +import org.apache.phoenix.parse.LikeParseNode.LikeType; import org.apache.phoenix.parse.SQLParser; import org.apache.phoenix.parse.SelectStatement; -import org.apache.phoenix.parse.LikeParseNode.LikeType; import org.apache.phoenix.query.ConnectionQueryServices; import org.apache.phoenix.query.KeyRange; import org.apache.phoenix.query.QueryConstants; @@ -152,1302 +151,1354 @@ import org.apache.phoenix.schema.stats.GuidePostsKey; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PDataType; +import org.apache.phoenix.thirdparty.com.google.common.base.Objects; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.base.Objects; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - - - public class TestUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(TestUtil.class); - - private static final Long ZERO = new Long(0); - public static final String DEFAULT_SCHEMA_NAME = "S"; - public static final String DEFAULT_DATA_TABLE_NAME = "T"; - public static final String DEFAULT_INDEX_TABLE_NAME = "I"; - public static final String DEFAULT_DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(DEFAULT_SCHEMA_NAME, "T"); - public static final String DEFAULT_INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(DEFAULT_SCHEMA_NAME, "I"); - - public static final String TEST_TABLE_SCHEMA = "(" + - " varchar_pk VARCHAR NOT NULL, " + - " char_pk CHAR(10) NOT NULL, " + - " int_pk INTEGER NOT NULL, " + - " long_pk BIGINT NOT NULL, " + - " decimal_pk DECIMAL(31, 10) NOT NULL, " + - " date_pk DATE NOT NULL, " + - " a.varchar_col1 VARCHAR, " + - " a.char_col1 CHAR(10), " + - " a.int_col1 INTEGER, " + - " a.long_col1 BIGINT, " + - " a.decimal_col1 DECIMAL(31, 10), " + - " a.date1 DATE, " + - " b.varchar_col2 VARCHAR, " + - " b.char_col2 CHAR(10), " + - " b.int_col2 INTEGER, " + - " b.long_col2 BIGINT, " + - " b.decimal_col2 DECIMAL(31, 10), " + - " b.date2 DATE " + - " CONSTRAINT pk PRIMARY KEY (varchar_pk, char_pk, int_pk, long_pk DESC, decimal_pk, date_pk)) "; - - private TestUtil() { - } - - public static final String CF_NAME = "a"; - public static final byte[] CF = Bytes.toBytes(CF_NAME); - - public static final String CF2_NAME = "b"; - - public final static String A_VALUE = "a"; - public final static byte[] A = Bytes.toBytes(A_VALUE); - public final static String B_VALUE = "b"; - public final static byte[] B = Bytes.toBytes(B_VALUE); - public final static String C_VALUE = "c"; - public final static byte[] C = Bytes.toBytes(C_VALUE); - public final static String D_VALUE = "d"; - public final static byte[] D = Bytes.toBytes(D_VALUE); - public final static String E_VALUE = "e"; - public final static byte[] E = Bytes.toBytes(E_VALUE); - - public final static String ROW1 = "00A123122312312"; - public final static String ROW2 = "00A223122312312"; - public final static String ROW3 = "00A323122312312"; - public final static String ROW4 = "00A423122312312"; - public final static String ROW5 = "00B523122312312"; - public final static String ROW6 = "00B623122312312"; - public final static String ROW7 = "00B723122312312"; - public final static String ROW8 = "00B823122312312"; - public final static String ROW9 = "00C923122312312"; - - public final static String PARENTID1 = "0500x0000000001"; - public final static String PARENTID2 = "0500x0000000002"; - public final static String PARENTID3 = "0500x0000000003"; - public final static String PARENTID4 = "0500x0000000004"; - public final static String PARENTID5 = "0500x0000000005"; - public final static String PARENTID6 = "0500x0000000006"; - public final static String PARENTID7 = "0500x0000000007"; - public final static String PARENTID8 = "0500x0000000008"; - public final static String PARENTID9 = "0500x0000000009"; - - public final static List PARENTIDS = Lists.newArrayList(PARENTID1, PARENTID2, PARENTID3, PARENTID4, PARENTID5, PARENTID6, PARENTID7, PARENTID8, PARENTID9); - - public final static String ENTITYHISTID1 = "017x00000000001"; - public final static String ENTITYHISTID2 = "017x00000000002"; - public final static String ENTITYHISTID3 = "017x00000000003"; - public final static String ENTITYHISTID4 = "017x00000000004"; - public final static String ENTITYHISTID5 = "017x00000000005"; - public final static String ENTITYHISTID6 = "017x00000000006"; - public final static String ENTITYHISTID7 = "017x00000000007"; - public final static String ENTITYHISTID8 = "017x00000000008"; - public final static String ENTITYHISTID9 = "017x00000000009"; - - public final static List ENTITYHISTIDS = Lists.newArrayList(ENTITYHISTID1, ENTITYHISTID2, ENTITYHISTID3, ENTITYHISTID4, ENTITYHISTID5, ENTITYHISTID6, ENTITYHISTID7, ENTITYHISTID8, ENTITYHISTID9); - - public static final String LOCALHOST = "localhost"; - public static final String PHOENIX_JDBC_URL = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; - public static final String PHOENIX_CONNECTIONLESS_JDBC_URL = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; - - public static final String TEST_SCHEMA_FILE_NAME = "config" + File.separator + "test-schema.xml"; - public static final String CED_SCHEMA_FILE_NAME = "config" + File.separator + "schema.xml"; - public static final String ENTITY_HISTORY_TABLE_NAME = "ENTITY_HISTORY"; - public static final String ENTITY_HISTORY_SALTED_TABLE_NAME = "ENTITY_HISTORY_SALTED"; - public static final String ATABLE_NAME = "ATABLE"; - public static final String TABLE_WITH_ARRAY = "TABLE_WITH_ARRAY"; - public static final String SUM_DOUBLE_NAME = "SumDoubleTest"; - public static final String ATABLE_SCHEMA_NAME = ""; - public static final String BTABLE_NAME = "BTABLE"; - public static final String STABLE_NAME = "STABLE"; - public static final String STABLE_PK_NAME = "ID"; - public static final String STABLE_SCHEMA_NAME = ""; - public static final String CUSTOM_ENTITY_DATA_FULL_NAME = "CORE.CUSTOM_ENTITY_DATA"; - public static final String CUSTOM_ENTITY_DATA_NAME = "CUSTOM_ENTITY_DATA"; - public static final String CUSTOM_ENTITY_DATA_SCHEMA_NAME = "CORE"; - public static final String HBASE_NATIVE = "HBASE_NATIVE"; - public static final String HBASE_NATIVE_SCHEMA_NAME = ""; - public static final String HBASE_DYNAMIC_COLUMNS = "HBASE_DYNAMIC_COLUMNS"; - public static final String HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME = ""; - public static final String PRODUCT_METRICS_NAME = "PRODUCT_METRICS"; - public static final String PTSDB_NAME = "PTSDB"; - public static final String PTSDB2_NAME = "PTSDB2"; - public static final String PTSDB3_NAME = "PTSDB3"; - public static final String PTSDB_SCHEMA_NAME = ""; - public static final String FUNKY_NAME = "FUNKY_NAMES"; - public static final String MULTI_CF_NAME = "MULTI_CF"; - public static final String MDTEST_NAME = "MDTEST"; - public static final String MDTEST_SCHEMA_NAME = ""; - public static final String KEYONLY_NAME = "KEYONLY"; - public static final String TABLE_WITH_SALTING = "TABLE_WITH_SALTING"; - public static final String INDEX_DATA_SCHEMA = "INDEX_TEST"; - public static final String INDEX_DATA_TABLE = "INDEX_DATA_TABLE"; - public static final String MUTABLE_INDEX_DATA_TABLE = "MUTABLE_INDEX_DATA_TABLE"; - public static final String TRANSACTIONAL_DATA_TABLE = "TRANSACTIONAL_DATA_TABLE"; - public static final String JOIN_SCHEMA = "Join"; - public static final String JOIN_ORDER_TABLE = "OrderTable"; - public static final String JOIN_CUSTOMER_TABLE = "CustomerTable"; - public static final String JOIN_ITEM_TABLE = "ItemTable"; - public static final String JOIN_SUPPLIER_TABLE = "SupplierTable"; - public static final String JOIN_COITEM_TABLE = "CoitemTable"; - public static final String JOIN_ORDER_TABLE_FULL_NAME = '"' + JOIN_SCHEMA + "\".\"" + JOIN_ORDER_TABLE + '"'; - public static final String JOIN_CUSTOMER_TABLE_FULL_NAME = '"' + JOIN_SCHEMA + "\".\"" + JOIN_CUSTOMER_TABLE + '"'; - public static final String JOIN_ITEM_TABLE_FULL_NAME = '"' + JOIN_SCHEMA + "\".\"" + JOIN_ITEM_TABLE + '"'; - public static final String JOIN_SUPPLIER_TABLE_FULL_NAME = '"' + JOIN_SCHEMA + "\".\"" + JOIN_SUPPLIER_TABLE + '"'; - public static final String JOIN_COITEM_TABLE_FULL_NAME = '"' + JOIN_SCHEMA + "\".\"" + JOIN_COITEM_TABLE + '"'; - public static final String JOIN_ORDER_TABLE_DISPLAY_NAME = JOIN_SCHEMA + "." + JOIN_ORDER_TABLE; - public static final String JOIN_CUSTOMER_TABLE_DISPLAY_NAME = JOIN_SCHEMA + "." + JOIN_CUSTOMER_TABLE; - public static final String JOIN_ITEM_TABLE_DISPLAY_NAME = JOIN_SCHEMA + "." + JOIN_ITEM_TABLE; - public static final String JOIN_SUPPLIER_TABLE_DISPLAY_NAME = JOIN_SCHEMA + "." + JOIN_SUPPLIER_TABLE; - public static final String JOIN_COITEM_TABLE_DISPLAY_NAME = JOIN_SCHEMA + "." + JOIN_COITEM_TABLE; - public static final String BINARY_NAME = "BinaryTable"; - - /** - * Read-only properties used by all tests - */ - public static final Properties TEST_PROPERTIES = new Properties() { + private static final Logger LOGGER = LoggerFactory.getLogger(TestUtil.class); + + private static final Long ZERO = new Long(0); + public static final String DEFAULT_SCHEMA_NAME = "S"; + public static final String DEFAULT_DATA_TABLE_NAME = "T"; + public static final String DEFAULT_INDEX_TABLE_NAME = "I"; + public static final String DEFAULT_DATA_TABLE_FULL_NAME = + SchemaUtil.getTableName(DEFAULT_SCHEMA_NAME, "T"); + public static final String DEFAULT_INDEX_TABLE_FULL_NAME = + SchemaUtil.getTableName(DEFAULT_SCHEMA_NAME, "I"); + + public static final String TEST_TABLE_SCHEMA = "(" + " varchar_pk VARCHAR NOT NULL, " + + " char_pk CHAR(10) NOT NULL, " + " int_pk INTEGER NOT NULL, " + + " long_pk BIGINT NOT NULL, " + " decimal_pk DECIMAL(31, 10) NOT NULL, " + + " date_pk DATE NOT NULL, " + " a.varchar_col1 VARCHAR, " + " a.char_col1 CHAR(10), " + + " a.int_col1 INTEGER, " + " a.long_col1 BIGINT, " + " a.decimal_col1 DECIMAL(31, 10), " + + " a.date1 DATE, " + " b.varchar_col2 VARCHAR, " + " b.char_col2 CHAR(10), " + + " b.int_col2 INTEGER, " + " b.long_col2 BIGINT, " + " b.decimal_col2 DECIMAL(31, 10), " + + " b.date2 DATE " + + " CONSTRAINT pk PRIMARY KEY (varchar_pk, char_pk, int_pk, long_pk DESC, decimal_pk, date_pk)) "; + + private TestUtil() { + } + + public static final String CF_NAME = "a"; + public static final byte[] CF = Bytes.toBytes(CF_NAME); + + public static final String CF2_NAME = "b"; + + public final static String A_VALUE = "a"; + public final static byte[] A = Bytes.toBytes(A_VALUE); + public final static String B_VALUE = "b"; + public final static byte[] B = Bytes.toBytes(B_VALUE); + public final static String C_VALUE = "c"; + public final static byte[] C = Bytes.toBytes(C_VALUE); + public final static String D_VALUE = "d"; + public final static byte[] D = Bytes.toBytes(D_VALUE); + public final static String E_VALUE = "e"; + public final static byte[] E = Bytes.toBytes(E_VALUE); + + public final static String ROW1 = "00A123122312312"; + public final static String ROW2 = "00A223122312312"; + public final static String ROW3 = "00A323122312312"; + public final static String ROW4 = "00A423122312312"; + public final static String ROW5 = "00B523122312312"; + public final static String ROW6 = "00B623122312312"; + public final static String ROW7 = "00B723122312312"; + public final static String ROW8 = "00B823122312312"; + public final static String ROW9 = "00C923122312312"; + + public final static String PARENTID1 = "0500x0000000001"; + public final static String PARENTID2 = "0500x0000000002"; + public final static String PARENTID3 = "0500x0000000003"; + public final static String PARENTID4 = "0500x0000000004"; + public final static String PARENTID5 = "0500x0000000005"; + public final static String PARENTID6 = "0500x0000000006"; + public final static String PARENTID7 = "0500x0000000007"; + public final static String PARENTID8 = "0500x0000000008"; + public final static String PARENTID9 = "0500x0000000009"; + + public final static List PARENTIDS = Lists.newArrayList(PARENTID1, PARENTID2, PARENTID3, + PARENTID4, PARENTID5, PARENTID6, PARENTID7, PARENTID8, PARENTID9); + + public final static String ENTITYHISTID1 = "017x00000000001"; + public final static String ENTITYHISTID2 = "017x00000000002"; + public final static String ENTITYHISTID3 = "017x00000000003"; + public final static String ENTITYHISTID4 = "017x00000000004"; + public final static String ENTITYHISTID5 = "017x00000000005"; + public final static String ENTITYHISTID6 = "017x00000000006"; + public final static String ENTITYHISTID7 = "017x00000000007"; + public final static String ENTITYHISTID8 = "017x00000000008"; + public final static String ENTITYHISTID9 = "017x00000000009"; + + public final static List ENTITYHISTIDS = + Lists.newArrayList(ENTITYHISTID1, ENTITYHISTID2, ENTITYHISTID3, ENTITYHISTID4, ENTITYHISTID5, + ENTITYHISTID6, ENTITYHISTID7, ENTITYHISTID8, ENTITYHISTID9); + + public static final String LOCALHOST = "localhost"; + public static final String PHOENIX_JDBC_URL = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; + public static final String PHOENIX_CONNECTIONLESS_JDBC_URL = + JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + CONNECTIONLESS + JDBC_PROTOCOL_TERMINATOR + + PHOENIX_TEST_DRIVER_URL_PARAM; + + public static final String TEST_SCHEMA_FILE_NAME = "config" + File.separator + "test-schema.xml"; + public static final String CED_SCHEMA_FILE_NAME = "config" + File.separator + "schema.xml"; + public static final String ENTITY_HISTORY_TABLE_NAME = "ENTITY_HISTORY"; + public static final String ENTITY_HISTORY_SALTED_TABLE_NAME = "ENTITY_HISTORY_SALTED"; + public static final String ATABLE_NAME = "ATABLE"; + public static final String TABLE_WITH_ARRAY = "TABLE_WITH_ARRAY"; + public static final String SUM_DOUBLE_NAME = "SumDoubleTest"; + public static final String ATABLE_SCHEMA_NAME = ""; + public static final String BTABLE_NAME = "BTABLE"; + public static final String STABLE_NAME = "STABLE"; + public static final String STABLE_PK_NAME = "ID"; + public static final String STABLE_SCHEMA_NAME = ""; + public static final String CUSTOM_ENTITY_DATA_FULL_NAME = "CORE.CUSTOM_ENTITY_DATA"; + public static final String CUSTOM_ENTITY_DATA_NAME = "CUSTOM_ENTITY_DATA"; + public static final String CUSTOM_ENTITY_DATA_SCHEMA_NAME = "CORE"; + public static final String HBASE_NATIVE = "HBASE_NATIVE"; + public static final String HBASE_NATIVE_SCHEMA_NAME = ""; + public static final String HBASE_DYNAMIC_COLUMNS = "HBASE_DYNAMIC_COLUMNS"; + public static final String HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME = ""; + public static final String PRODUCT_METRICS_NAME = "PRODUCT_METRICS"; + public static final String PTSDB_NAME = "PTSDB"; + public static final String PTSDB2_NAME = "PTSDB2"; + public static final String PTSDB3_NAME = "PTSDB3"; + public static final String PTSDB_SCHEMA_NAME = ""; + public static final String FUNKY_NAME = "FUNKY_NAMES"; + public static final String MULTI_CF_NAME = "MULTI_CF"; + public static final String MDTEST_NAME = "MDTEST"; + public static final String MDTEST_SCHEMA_NAME = ""; + public static final String KEYONLY_NAME = "KEYONLY"; + public static final String TABLE_WITH_SALTING = "TABLE_WITH_SALTING"; + public static final String INDEX_DATA_SCHEMA = "INDEX_TEST"; + public static final String INDEX_DATA_TABLE = "INDEX_DATA_TABLE"; + public static final String MUTABLE_INDEX_DATA_TABLE = "MUTABLE_INDEX_DATA_TABLE"; + public static final String TRANSACTIONAL_DATA_TABLE = "TRANSACTIONAL_DATA_TABLE"; + public static final String JOIN_SCHEMA = "Join"; + public static final String JOIN_ORDER_TABLE = "OrderTable"; + public static final String JOIN_CUSTOMER_TABLE = "CustomerTable"; + public static final String JOIN_ITEM_TABLE = "ItemTable"; + public static final String JOIN_SUPPLIER_TABLE = "SupplierTable"; + public static final String JOIN_COITEM_TABLE = "CoitemTable"; + public static final String JOIN_ORDER_TABLE_FULL_NAME = + '"' + JOIN_SCHEMA + "\".\"" + JOIN_ORDER_TABLE + '"'; + public static final String JOIN_CUSTOMER_TABLE_FULL_NAME = + '"' + JOIN_SCHEMA + "\".\"" + JOIN_CUSTOMER_TABLE + '"'; + public static final String JOIN_ITEM_TABLE_FULL_NAME = + '"' + JOIN_SCHEMA + "\".\"" + JOIN_ITEM_TABLE + '"'; + public static final String JOIN_SUPPLIER_TABLE_FULL_NAME = + '"' + JOIN_SCHEMA + "\".\"" + JOIN_SUPPLIER_TABLE + '"'; + public static final String JOIN_COITEM_TABLE_FULL_NAME = + '"' + JOIN_SCHEMA + "\".\"" + JOIN_COITEM_TABLE + '"'; + public static final String JOIN_ORDER_TABLE_DISPLAY_NAME = JOIN_SCHEMA + "." + JOIN_ORDER_TABLE; + public static final String JOIN_CUSTOMER_TABLE_DISPLAY_NAME = + JOIN_SCHEMA + "." + JOIN_CUSTOMER_TABLE; + public static final String JOIN_ITEM_TABLE_DISPLAY_NAME = JOIN_SCHEMA + "." + JOIN_ITEM_TABLE; + public static final String JOIN_SUPPLIER_TABLE_DISPLAY_NAME = + JOIN_SCHEMA + "." + JOIN_SUPPLIER_TABLE; + public static final String JOIN_COITEM_TABLE_DISPLAY_NAME = JOIN_SCHEMA + "." + JOIN_COITEM_TABLE; + public static final String BINARY_NAME = "BinaryTable"; + + /** + * Read-only properties used by all tests + */ + public static final Properties TEST_PROPERTIES = new Properties() { + @Override + public String put(Object key, Object value) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public Object remove(Object key) { + throw new UnsupportedOperationException(); + } + }; + + public static byte[][] getSplits(String tenantId) { + return new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes(tenantId + "00A"), + Bytes.toBytes(tenantId + "00B"), Bytes.toBytes(tenantId + "00C"), }; + } + + public static void assertRoundEquals(BigDecimal bd1, BigDecimal bd2) { + bd1 = bd1.round(PDataType.DEFAULT_MATH_CONTEXT); + bd2 = bd2.round(PDataType.DEFAULT_MATH_CONTEXT); + if (bd1.compareTo(bd2) != 0) { + fail("expected:<" + bd1 + "> but was:<" + bd2 + ">"); + } + } + + public static BigDecimal computeAverage(double sum, long count) { + return BigDecimal.valueOf(sum).divide(BigDecimal.valueOf(count), + PDataType.DEFAULT_MATH_CONTEXT); + } + + public static BigDecimal computeAverage(long sum, long count) { + return BigDecimal.valueOf(sum).divide(BigDecimal.valueOf(count), + PDataType.DEFAULT_MATH_CONTEXT); + } + + public static Expression constantComparison(CompareOperator op, PColumn c, Object o) { + return new ComparisonExpression( + Arrays. asList(new KeyValueColumnExpression(c), LiteralExpression.newConstant(o)), + op); + } + + public static Expression kvColumn(PColumn c) { + return new KeyValueColumnExpression(c); + } + + public static Expression pkColumn(PColumn c, List columns) { + return new RowKeyColumnExpression(c, new RowKeyValueAccessor(columns, columns.indexOf(c))); + } + + public static Expression constantComparison(CompareOperator op, Expression e, Object o) { + return new ComparisonExpression(Arrays.asList(e, LiteralExpression.newConstant(o)), op); + } + + private static boolean useByteBasedRegex(StatementContext context) { + return context.getConnection().getQueryServices().getProps().getBoolean( + QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); + } + + public static Expression like(Expression e, Object o, StatementContext context) { + return useByteBasedRegex(context) + ? ByteBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), + LikeType.CASE_SENSITIVE) + : StringBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), + LikeType.CASE_SENSITIVE); + } + + public static Expression ilike(Expression e, Object o, StatementContext context) { + return useByteBasedRegex(context) + ? ByteBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), + LikeType.CASE_INSENSITIVE) + : StringBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), + LikeType.CASE_INSENSITIVE); + } + + public static Expression substr(Expression e, Object offset, Object length) { + return new SubstrFunction(Arrays.asList(e, LiteralExpression.newConstant(offset), + LiteralExpression.newConstant(length))); + } + + public static Expression substr2(Expression e, Object offset) { + + return new SubstrFunction( + Arrays.asList(e, LiteralExpression.newConstant(offset), LiteralExpression.newConstant(null))); + } + + public static Expression columnComparison(CompareOperator op, Expression c1, Expression c2) { + return new ComparisonExpression(Arrays. asList(c1, c2), op); + } + + public static SingleKeyValueComparisonFilter singleKVFilter(Expression e) { + return new SingleCQKeyValueComparisonFilter(e); + } + + public static RowKeyComparisonFilter rowKeyFilter(Expression e) { + return new RowKeyComparisonFilter(e, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); + } + + public static MultiKeyValueComparisonFilter multiKVFilter(Expression e) { + return new MultiCQKeyValueComparisonFilter(e, false, ByteUtil.EMPTY_BYTE_ARRAY); + } + + public static MultiEncodedCQKeyValueComparisonFilter multiEncodedKVFilter(Expression e, + QualifierEncodingScheme encodingScheme) { + return new MultiEncodedCQKeyValueComparisonFilter(e, encodingScheme, false, null); + } + + public static Expression and(Expression... expressions) { + return new AndExpression(Arrays.asList(expressions)); + } + + public static Expression not(Expression expression) { + return new NotExpression(expression); + } + + public static Expression or(Expression... expressions) { + return new OrExpression(Arrays.asList(expressions)); + } + + public static Expression in(Expression... expressions) throws SQLException { + return InListExpression.create(Arrays.asList(expressions), false, new ImmutableBytesWritable(), + true); + } + + public static Expression in(Expression e, Object... literals) throws SQLException { + PDataType childType = e.getDataType(); + List expressions = new ArrayList(literals.length + 1); + expressions.add(e); + for (Object o : literals) { + expressions.add(LiteralExpression.newConstant(o, childType)); + } + return InListExpression.create(expressions, false, new ImmutableBytesWritable(), true); + } + + public static void assertDegenerate(StatementContext context) { + Scan scan = context.getScan(); + assertDegenerate(scan); + } + + public static void assertDegenerate(Scan scan) { + assertNull(scan.getFilter()); + assertArrayEquals(KeyRange.EMPTY_RANGE.getLowerRange(), scan.getStartRow()); + assertArrayEquals(KeyRange.EMPTY_RANGE.getLowerRange(), scan.getStopRow()); + assertEquals(null, scan.getFilter()); + } + + public static void assertNotDegenerate(Scan scan) { + assertFalse(Bytes.compareTo(KeyRange.EMPTY_RANGE.getLowerRange(), scan.getStartRow()) == 0 + && Bytes.compareTo(KeyRange.EMPTY_RANGE.getLowerRange(), scan.getStopRow()) == 0); + } + + public static void assertEmptyScanKey(Scan scan) { + assertNull(scan.getFilter()); + assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, scan.getStartRow()); + assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, scan.getStopRow()); + assertEquals(null, scan.getFilter()); + } + + /** + * Does a deep comparison of two Results, down to the byte arrays. + * @param res1 first result to compare + * @param res2 second result to compare + * @throws Exception Every difference is throwing an exception + */ + public static void compareTuples(Tuple res1, Tuple res2) throws Exception { + if (res2 == null) { + throw new Exception("There wasn't enough rows, we stopped at " + res1); + } + if (res1.size() != res2.size()) { + throw new Exception("This row doesn't have the same number of KVs: " + res1.toString() + + " compared to " + res2.toString()); + } + for (int i = 0; i < res1.size(); i++) { + Cell ourKV = res1.getValue(i); + Cell replicatedKV = res2.getValue(i); + if (!ourKV.equals(replicatedKV)) { + throw new Exception( + "This result was different: " + res1.toString() + " compared to " + res2.toString()); + } + } + } + + public static void clearMetaDataCache(Connection conn) throws Throwable { + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + Table htable = + pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES); + htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW, new Batch.Call() { @Override - public String put(Object key, Object value) { - throw new UnsupportedOperationException(); - } - - @Override - public void clear() { - throw new UnsupportedOperationException(); - } - - @Override - public Object remove(Object key) { - throw new UnsupportedOperationException(); - } - }; - - public static byte[][] getSplits(String tenantId) { - return new byte[][]{ - HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes(tenantId + "00A"), - Bytes.toBytes(tenantId + "00B"), - Bytes.toBytes(tenantId + "00C"), - }; - } - - public static void assertRoundEquals(BigDecimal bd1, BigDecimal bd2) { - bd1 = bd1.round(PDataType.DEFAULT_MATH_CONTEXT); - bd2 = bd2.round(PDataType.DEFAULT_MATH_CONTEXT); - if (bd1.compareTo(bd2) != 0) { - fail("expected:<" + bd1 + "> but was:<" + bd2 + ">"); - } - } - - public static BigDecimal computeAverage(double sum, long count) { - return BigDecimal.valueOf(sum).divide(BigDecimal.valueOf(count), PDataType.DEFAULT_MATH_CONTEXT); - } - - public static BigDecimal computeAverage(long sum, long count) { - return BigDecimal.valueOf(sum).divide(BigDecimal.valueOf(count), PDataType.DEFAULT_MATH_CONTEXT); - } - - public static Expression constantComparison(CompareOperator op, PColumn c, Object o) { - return new ComparisonExpression(Arrays.asList(new KeyValueColumnExpression(c), LiteralExpression.newConstant(o)), op); - } - - public static Expression kvColumn(PColumn c) { - return new KeyValueColumnExpression(c); - } - - public static Expression pkColumn(PColumn c, List columns) { - return new RowKeyColumnExpression(c, new RowKeyValueAccessor(columns, columns.indexOf(c))); - } - - public static Expression constantComparison(CompareOperator op, Expression e, Object o) { - return new ComparisonExpression(Arrays.asList(e, LiteralExpression.newConstant(o)), op); - } - - private static boolean useByteBasedRegex(StatementContext context) { - return context - .getConnection() - .getQueryServices() - .getProps() - .getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB, - QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX); - } - - public static Expression like(Expression e, Object o, StatementContext context) { - return useByteBasedRegex(context) ? - ByteBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_SENSITIVE) : - StringBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_SENSITIVE); - } - - public static Expression ilike(Expression e, Object o, StatementContext context) { - return useByteBasedRegex(context) ? - ByteBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_INSENSITIVE) : - StringBasedLikeExpression.create(Arrays.asList(e, LiteralExpression.newConstant(o)), LikeType.CASE_INSENSITIVE); - } - - public static Expression substr(Expression e, Object offset, Object length) { - return new SubstrFunction(Arrays.asList(e, LiteralExpression.newConstant(offset), LiteralExpression.newConstant(length))); - } - - public static Expression substr2(Expression e, Object offset) { - - return new SubstrFunction(Arrays.asList(e, LiteralExpression.newConstant(offset), LiteralExpression.newConstant(null))); - } - - public static Expression columnComparison(CompareOperator op, Expression c1, Expression c2) { - return new ComparisonExpression(Arrays.asList(c1, c2), op); - } - - public static SingleKeyValueComparisonFilter singleKVFilter(Expression e) { - return new SingleCQKeyValueComparisonFilter(e); - } - - public static RowKeyComparisonFilter rowKeyFilter(Expression e) { - return new RowKeyComparisonFilter(e, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); - } - - public static MultiKeyValueComparisonFilter multiKVFilter(Expression e) { - return new MultiCQKeyValueComparisonFilter(e, false, ByteUtil.EMPTY_BYTE_ARRAY); - } - - public static MultiEncodedCQKeyValueComparisonFilter multiEncodedKVFilter(Expression e, QualifierEncodingScheme encodingScheme) { - return new MultiEncodedCQKeyValueComparisonFilter(e, encodingScheme, false, null); - } - - public static Expression and(Expression... expressions) { - return new AndExpression(Arrays.asList(expressions)); - } - - public static Expression not(Expression expression) { - return new NotExpression(expression); - } - - public static Expression or(Expression... expressions) { - return new OrExpression(Arrays.asList(expressions)); - } - - public static Expression in(Expression... expressions) throws SQLException { - return InListExpression.create(Arrays.asList(expressions), false, new ImmutableBytesWritable(), true); - } - - public static Expression in(Expression e, Object... literals) throws SQLException { - PDataType childType = e.getDataType(); - List expressions = new ArrayList(literals.length + 1); - expressions.add(e); - for (Object o : literals) { - expressions.add(LiteralExpression.newConstant(o, childType)); - } - return InListExpression.create(expressions, false, new ImmutableBytesWritable(), true); - } - - public static void assertDegenerate(StatementContext context) { - Scan scan = context.getScan(); - assertDegenerate(scan); - } - - public static void assertDegenerate(Scan scan) { - assertNull(scan.getFilter()); - assertArrayEquals(KeyRange.EMPTY_RANGE.getLowerRange(), scan.getStartRow()); - assertArrayEquals(KeyRange.EMPTY_RANGE.getLowerRange(), scan.getStopRow()); - assertEquals(null, scan.getFilter()); - } - - public static void assertNotDegenerate(Scan scan) { - assertFalse( - Bytes.compareTo(KeyRange.EMPTY_RANGE.getLowerRange(), scan.getStartRow()) == 0 && - Bytes.compareTo(KeyRange.EMPTY_RANGE.getLowerRange(), scan.getStopRow()) == 0); - } - - public static void assertEmptyScanKey(Scan scan) { - assertNull(scan.getFilter()); - assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, scan.getStartRow()); - assertArrayEquals(ByteUtil.EMPTY_BYTE_ARRAY, scan.getStopRow()); - assertEquals(null, scan.getFilter()); - } - - /** - * Does a deep comparison of two Results, down to the byte arrays. - * - * @param res1 first result to compare - * @param res2 second result to compare - * @throws Exception Every difference is throwing an exception - */ - public static void compareTuples(Tuple res1, Tuple res2) - throws Exception { - if (res2 == null) { - throw new Exception("There wasn't enough rows, we stopped at " - + res1); - } - if (res1.size() != res2.size()) { - throw new Exception("This row doesn't have the same number of KVs: " - + res1.toString() + " compared to " + res2.toString()); - } - for (int i = 0; i < res1.size(); i++) { - Cell ourKV = res1.getValue(i); - Cell replicatedKV = res2.getValue(i); - if (!ourKV.equals(replicatedKV)) { - throw new Exception("This result was different: " - + res1.toString() + " compared to " + res2.toString()); - } - } - } - - public static void clearMetaDataCache(Connection conn) throws Throwable { - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - Table htable = pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES); - htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, new Batch.Call() { - @Override - public ClearCacheResponse call(MetaDataService instance) throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); - ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder(); - instance.clearCache(controller, builder.build(), rpcCallback); - if (controller.getFailedOn() != null) { - throw controller.getFailedOn(); - } - return rpcCallback.get(); - } - }); - } - - public static void closeStatement(Statement stmt) { - try { - stmt.close(); - } catch (Throwable ignore) { - } - } - - public static void closeConnection(Connection conn) { - try { - conn.close(); - } catch (Throwable ignore) { - } - } - - public static void closeStmtAndConn(Statement stmt, Connection conn) { - closeStatement(stmt); - closeConnection(conn); - } - - public static void bindParams(PhoenixPreparedStatement stmt, List binds) throws SQLException { - for (int i = 0; i < binds.size(); i++) { - stmt.setObject(i + 1, binds.get(i)); - } - } - - /** - * @param conn connection to be used - * @param sortOrder sort order of column contain input values - * @param id id of the row being inserted - * @param input input to be inserted - */ - public static void upsertRow(Connection conn, String tableName, String sortOrder, int id, Object input) throws SQLException { - String dml = String.format("UPSERT INTO " + tableName + "_%s VALUES(?,?)", sortOrder); - PreparedStatement stmt = conn.prepareStatement(dml); - stmt.setInt(1, id); - if (input instanceof String) - stmt.setString(2, (String) input); - else if (input instanceof Integer) - stmt.setInt(2, (Integer) input); - else if (input instanceof Double) - stmt.setDouble(2, (Double) input); - else if (input instanceof Float) - stmt.setFloat(2, (Float) input); - else if (input instanceof Boolean) - stmt.setBoolean(2, (Boolean) input); - else if (input instanceof Long) - stmt.setLong(2, (Long) input); - else - throw new UnsupportedOperationException("" + input.getClass() + " is not supported by upsertRow"); - stmt.execute(); - conn.commit(); - } - - public static void createGroupByTestTable(Connection conn, String tableName) throws SQLException { - conn.createStatement().execute("create table " + tableName + - " (id varchar not null primary key,\n" + - " uri varchar, appcpu integer)"); - } - - private static void createTable(Connection conn, String inputSqlType, String tableName, String sortOrder) throws SQLException { - String dmlFormat = - "CREATE TABLE " + tableName + "_%s (id INTEGER NOT NULL, pk %s NOT NULL, " + "kv %s " - + "CONSTRAINT PK_CONSTRAINT PRIMARY KEY (id, pk %s))"; - String ddl = String.format(dmlFormat, sortOrder, inputSqlType, inputSqlType, sortOrder); - conn.createStatement().execute(ddl); - conn.commit(); - } - - /** - * Creates a table to be used for testing. It contains one id column, one varchar column to be used as input, and - * one column which will contain null values - * - * @param conn connection to be used - * @param inputSqlType sql type of input - * @param inputList list of values to be inserted into the pk column - */ - public static String initTables(Connection conn, String inputSqlType, List inputList) throws Exception { - String tableName = generateUniqueName(); - createTable(conn, inputSqlType, tableName, "ASC"); - createTable(conn, inputSqlType, tableName, "DESC"); - for (int i = 0; i < inputList.size(); ++i) { - upsertRow(conn, tableName, "ASC", i, inputList.get(i)); - upsertRow(conn, tableName, "DESC", i, inputList.get(i)); - } - return tableName; - } - - public static List getAllSplits(Connection conn, String tableName) throws SQLException { - return getSplits(conn, tableName, null, null, null, null, null); - } - - public static List getAllSplits(Connection conn, String tableName, String where, String selectClause) throws SQLException { - return getSplits(conn, tableName, null, null, null, where, selectClause); - } - - public static List getSplits(Connection conn, String tableName, String pkCol, byte[] lowerRange, byte[] upperRange, String whereClauseSuffix, String selectClause) throws SQLException { - String whereClauseStart = - (lowerRange == null && upperRange == null ? "" : - " WHERE " + ((lowerRange != null ? (pkCol + " >= ? " + (upperRange != null ? " AND " : "")) : "") - + (upperRange != null ? (pkCol + " < ?") : ""))); - String whereClause = whereClauseSuffix == null ? whereClauseStart : whereClauseStart.length() == 0 ? (" WHERE " + whereClauseSuffix) : (" AND " + whereClauseSuffix); - String query = "SELECT /*+ NO_INDEX */ " + selectClause + " FROM " + tableName + whereClause; - PhoenixPreparedStatement pstmt = conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); - if (lowerRange != null) { - pstmt.setBytes(1, lowerRange); - } - if (upperRange != null) { - pstmt.setBytes(lowerRange != null ? 2 : 1, upperRange); - } - pstmt.execute(); - List keyRanges = pstmt.getQueryPlan().getSplits(); - return keyRanges; - } - - public static Collection getGuidePostsList(Connection conn, String tableName) throws SQLException { - return getGuidePostsList(conn, tableName, null, null, null, null); - } - - public static Collection getGuidePostsList(Connection conn, String tableName, String where) - throws SQLException { - return getGuidePostsList(conn, tableName, null, null, null, where); - } - - public static Collection getGuidePostsList(Connection conn, String tableName, String pkCol, - byte[] lowerRange, byte[] upperRange, String whereClauseSuffix) throws SQLException { - String whereClauseStart = (lowerRange == null && upperRange == null ? "" - : " WHERE " - + ((lowerRange != null ? (pkCol + " >= ? " + (upperRange != null ? " AND " : "")) : "") + (upperRange != null ? (pkCol + " < ?") - : ""))); - String whereClause = whereClauseSuffix == null ? whereClauseStart - : whereClauseStart.length() == 0 ? (" WHERE " + whereClauseSuffix) : (" AND " + whereClauseSuffix); - String query = "SELECT /*+ NO_INDEX */ COUNT(*) FROM " + tableName + whereClause; - PhoenixPreparedStatement pstmt = conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); - if (lowerRange != null) { - pstmt.setBytes(1, lowerRange); - } - if (upperRange != null) { - pstmt.setBytes(lowerRange != null ? 2 : 1, upperRange); - } - pstmt.execute(); - TableRef tableRef = pstmt.getQueryPlan().getTableRef(); - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PTable table = tableRef.getTable(); - GuidePostsInfo info = pconn.getQueryServices().getTableStats(new GuidePostsKey(table.getName().getBytes(), SchemaUtil.getEmptyColumnFamily(table))); - return Collections.singletonList(info); - } - - public static void analyzeTable(Connection conn, String tableName) throws IOException, SQLException { - analyzeTable(conn, tableName, false); - } - - public static void analyzeTable(Connection conn, String tableName, boolean transactional) throws IOException, SQLException { - String query = "UPDATE STATISTICS " + tableName; - conn.createStatement().execute(query); - // if the table is transactional burn a txn in order to make sure the next txn read pointer is close to wall clock time - conn.commit(); - } - - public static void analyzeTableIndex(Connection conn, String tableName) throws IOException, SQLException { - String query = "UPDATE STATISTICS " + tableName + " INDEX"; - conn.createStatement().execute(query); - } - - public static void analyzeTableColumns(Connection conn, String tableName) throws IOException, SQLException { - String query = "UPDATE STATISTICS " + tableName + " COLUMNS"; - conn.createStatement().execute(query); - } - - public static void analyzeTable(String url, Properties props, String tableName) throws IOException, SQLException { - Connection conn = DriverManager.getConnection(url, props); - analyzeTable(conn, tableName); - conn.close(); - } - - public static void setRowKeyColumns(PreparedStatement stmt, int i) throws SQLException { - // insert row - stmt.setString(1, "varchar" + String.valueOf(i)); - stmt.setString(2, "char" + String.valueOf(i)); - stmt.setInt(3, i); - stmt.setLong(4, i); - stmt.setBigDecimal(5, new BigDecimal(i * 0.5d)); - Date date = new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i - 1) * MILLIS_IN_DAY); - stmt.setDate(6, date); - } - - public static void validateRowKeyColumns(ResultSet rs, int i) throws SQLException { - assertTrue(rs.next()); - assertEquals(rs.getString(1), "varchar" + String.valueOf(i)); - assertEquals(rs.getString(2), "char" + String.valueOf(i)); - assertEquals(rs.getInt(3), i); - assertEquals(rs.getInt(4), i); - assertEquals(rs.getBigDecimal(5), new BigDecimal(i * 0.5d)); - Date date = new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i - 1) * MILLIS_IN_DAY); - assertEquals(rs.getDate(6), date); - } - - public static ClientAggregators getSingleSumAggregator(String url, Properties props) throws SQLException { - try (PhoenixConnection pconn = DriverManager.getConnection(url, props).unwrap(PhoenixConnection.class)) { - PhoenixStatement statement = new PhoenixStatement(pconn); - StatementContext context = new StatementContext(statement, null, new Scan(), new SequenceManager(statement)); - AggregationManager aggregationManager = context.getAggregationManager(); - SumAggregateFunction func = new SumAggregateFunction(Arrays.asList(new KeyValueColumnExpression(new PLongColumn() { - @Override - public PName getName() { - return SINGLE_COLUMN_NAME; - } - - @Override - public PName getFamilyName() { - return SINGLE_COLUMN_FAMILY_NAME; - } - - @Override - public int getPosition() { - return 0; - } - - @Override - public SortOrder getSortOrder() { - return SortOrder.getDefault(); - } - - @Override - public Integer getArraySize() { - return 0; - } - - @Override - public byte[] getViewConstant() { - return null; - } - - @Override - public boolean isViewReferenced() { - return false; - } - - @Override - public String getExpressionStr() { - return null; - } - - @Override - public long getTimestamp() { - return HConstants.LATEST_TIMESTAMP; - } - - @Override - public boolean isDerived() { - return false; - } - - @Override - public boolean isExcluded() { - return false; - } - - @Override - public boolean isRowTimestamp() { - return false; - } - - @Override - public boolean isDynamic() { - return false; - } - - @Override - public byte[] getColumnQualifierBytes() { - return SINGLE_COLUMN_NAME.getBytes(); - } - })), null); - aggregationManager.setAggregators(new ClientAggregators(Collections.singletonList(func), 1)); - ClientAggregators aggregators = aggregationManager.getAggregators(); - return aggregators; - } - } - - public static void createMultiCFTestTable(Connection conn, String tableName, String options) throws SQLException { - String ddl = "create table if not exists " + tableName + "(" + - " varchar_pk VARCHAR NOT NULL, " + - " char_pk CHAR(5) NOT NULL, " + - " int_pk INTEGER NOT NULL, " + - " long_pk BIGINT NOT NULL, " + - " decimal_pk DECIMAL(31, 10) NOT NULL, " + - " a.varchar_col1 VARCHAR, " + - " a.char_col1 CHAR(5), " + - " a.int_col1 INTEGER, " + - " a.long_col1 BIGINT, " + - " a.decimal_col1 DECIMAL(31, 10), " + - " b.varchar_col2 VARCHAR, " + - " b.char_col2 CHAR(5), " + - " b.int_col2 INTEGER, " + - " b.long_col2 BIGINT, " + - " b.decimal_col2 DECIMAL, " + - " b.date_col DATE " + - " CONSTRAINT pk PRIMARY KEY (varchar_pk, char_pk, int_pk, long_pk DESC, decimal_pk)) " - + (options != null ? options : ""); - conn.createStatement().execute(ddl); - } - - public static void flush(HBaseTestingUtility utility, TableName table) throws IOException { - Admin admin = utility.getAdmin(); - admin.flush(table); - } - - public static void minorCompact(HBaseTestingUtility utility, TableName table) - throws IOException, InterruptedException { - try { - CompactionScanner.setForceMinorCompaction(true); - Admin admin = utility.getAdmin(); - admin.compact(table); - int waitForCompactionToCompleteCounter = 0; - while (CompactionScanner.getForceMinorCompaction()) { - waitForCompactionToCompleteCounter++; - if (waitForCompactionToCompleteCounter > 20) { - Assert.fail(); - } - Thread.sleep(1000); - } - } - finally { - CompactionScanner.setForceMinorCompaction(false); - } - } - - public static void majorCompact(HBaseTestingUtility utility, TableName table) - throws IOException, InterruptedException { - long compactionRequestedSCN = EnvironmentEdgeManager.currentTimeMillis(); - Admin admin = utility.getAdmin(); - admin.majorCompact(table); - long lastCompactionTimestamp; - CompactionState state = null; - CompactionState previousState = null; - while ((state = admin.getCompactionState(table)).equals(CompactionState.MAJOR) - || state.equals(CompactionState.MAJOR_AND_MINOR) - || (lastCompactionTimestamp = - admin.getLastMajorCompactionTimestamp(table)) < compactionRequestedSCN) { - // In HBase 2.5 getLastMajorCompactionTimestamp doesn't seem to get updated when the - // clock is stopped, so check for the state going to NONE instead - if (state.equals(CompactionState.NONE) && (previousState != null - && (previousState.equals(CompactionState.MAJOR_AND_MINOR) - || previousState.equals(CompactionState.MAJOR)))) { - break; - } - previousState = state; - Thread.sleep(100); - } - } - - /** - * Runs a major compaction, and then waits until the compaction is complete before returning. - * - * @param tableName name of the table to be compacted - */ - public static void doMajorCompaction(Connection conn, String tableName) throws Exception { - - tableName = SchemaUtil.normalizeIdentifier(tableName); - - // We simply write a marker row, request a major compaction, and then wait until the marker - // row is gone - PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); - PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName)); - ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); - MutationState mutationState = pconn.getMutationState(); - if (table.isTransactional()) { - mutationState.startTransaction(table.getTransactionProvider()); - } - try (Table htable = mutationState.getHTable(table)) { - byte[] markerRowKey = Bytes.toBytes("TO_DELETE"); - - Put put = new Put(markerRowKey); - long timestamp = 0L; - // We do not want to wait an hour because of PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY - // So set the timestamp of the put and delete as early as possible - put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_VALUE_BYTES, timestamp, - QueryConstants.EMPTY_COLUMN_VALUE_BYTES); - htable.put(put); - Delete delete = new Delete(markerRowKey); - delete.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, - QueryConstants.EMPTY_COLUMN_VALUE_BYTES, timestamp); - htable.delete(delete); - htable.close(); - if (table.isTransactional()) { - mutationState.commit(); - } - - Admin hbaseAdmin = services.getAdmin(); - hbaseAdmin.flush(TableName.valueOf(tableName)); - hbaseAdmin.majorCompact(TableName.valueOf(tableName)); - hbaseAdmin.close(); - - boolean compactionDone = false; - while (!compactionDone) { - Thread.sleep(6000L); - Scan scan = new Scan(); - scan.withStartRow(markerRowKey); - scan.withStopRow(Bytes.add(markerRowKey, new byte[]{0})); - scan.setRaw(true); - - try (Table htableForRawScan = services.getTable(Bytes.toBytes(tableName))) { - ResultScanner scanner = htableForRawScan.getScanner(scan); - List results = Lists.newArrayList(scanner); - LOGGER.info("Results: " + results); - compactionDone = results.isEmpty(); - scanner.close(); - } - LOGGER.info("Compaction done: " + compactionDone); - - // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows - if (!compactionDone && table.isTransactional()) { - hbaseAdmin = services.getAdmin(); - hbaseAdmin.flush(TableName.valueOf(tableName)); - hbaseAdmin.majorCompact(TableName.valueOf(tableName)); - hbaseAdmin.close(); - } - } - } - } - - public static void createTransactionalTable(Connection conn, String tableName) throws SQLException { - createTransactionalTable(conn, tableName, ""); - } - - public static void createTransactionalTable(Connection conn, String tableName, String extraProps) throws SQLException { - conn.createStatement().execute("create table " + tableName + TestUtil.TEST_TABLE_SCHEMA + "TRANSACTIONAL=true" + (extraProps.length() == 0 ? "" : ("," + extraProps))); - } - - public static void dumpTable(Connection conn, TableName tableName) - throws SQLException, IOException { - ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); - Table table = cqs.getTable(tableName.getName()); - dumpTable(table); - } - - public static void dumpTable(Table table) throws IOException { - System.out.println("************ dumping " + table + " **************"); - Scan s = new Scan(); - s.setRaw(true); - s.readAllVersions(); - int cellCount = 0; - int rowCount = 0; - try (ResultScanner scanner = table.getScanner(s)) { - Result result; - while ((result = scanner.next()) != null) { - rowCount++; - System.out.println("Row count: " + rowCount); - CellScanner cellScanner = result.cellScanner(); - Cell current; - while (cellScanner.advance()) { - current = cellScanner.current(); - System.out.println(current + " column= " + - Bytes.toStringBinary(CellUtil.cloneQualifier(current)) + - " val=" + Bytes.toStringBinary(CellUtil.cloneValue(current))); - cellCount++; - } - } - } - System.out.println("----- Row count: " + rowCount + " Cell count: " + cellCount + " -----"); - } - - public static int getRawRowCount(Table table) throws IOException { - dumpTable(table); - return getRowCount(table, true); - } - - public static int getRowCount(Table table, boolean isRaw) throws IOException { - Scan s = new Scan(); - s.setRaw(isRaw); - int rows = 0; - try (ResultScanner scanner = table.getScanner(s)) { - while (scanner.next() != null) { - rows++; - } - } - return rows; - } - - public static CellCount getCellCount(Table table, boolean isRaw) throws IOException { - Scan s = new Scan(); - s.setRaw(isRaw); - s.readAllVersions(); - - CellCount cellCount = new CellCount(); - try (ResultScanner scanner = table.getScanner(s)) { - Result result = null; - while ((result = scanner.next()) != null) { - CellScanner cellScanner = result.cellScanner(); - Cell current = null; - while (cellScanner.advance()) { - current = cellScanner.current(); - cellCount.addCell(Bytes.toString(CellUtil.cloneRow(current))); - } - } - } - return cellCount; - } - - static class CellCount { - private Map rowCountMap = new HashMap(); - - void addCell(String key) { - if (rowCountMap.containsKey(key)) { - rowCountMap.put(key, rowCountMap.get(key) + 1); - } else { - rowCountMap.put(key, 1); - } - } - - int getCellCount(String key) { - if (rowCountMap.containsKey(key)) { - return rowCountMap.get(key); - } else { - return 0; - } - } - } - - - public static void dumpIndexStatus(Connection conn, String indexName) throws IOException, SQLException { - try (Table table = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) { - System.out.println("************ dumping index status for " + indexName + " **************"); - Scan s = new Scan(); - s.setRaw(true); - s.readAllVersions(); - byte[] startRow = SchemaUtil.getTableKeyFromFullName(indexName); - s.withStartRow(startRow); - s.withStopRow(ByteUtil.nextKey(ByteUtil.concat(startRow, QueryConstants.SEPARATOR_BYTE_ARRAY))); - try (ResultScanner scanner = table.getScanner(s)) { - Result result = null; - while ((result = scanner.next()) != null) { - CellScanner cellScanner = result.cellScanner(); - Cell current = null; - while (cellScanner.advance()) { - current = cellScanner.current(); - if (Bytes.compareTo(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), PhoenixDatabaseMetaData.INDEX_STATE_BYTES, 0, PhoenixDatabaseMetaData.INDEX_STATE_BYTES.length) == 0) { - System.out.println(current.getTimestamp() + "/INDEX_STATE=" + PIndexState.fromSerializedValue(current.getValueArray()[current.getValueOffset()])); - } - } - } - } - System.out.println("-----------------------------------------------"); - } - } - - public static void printResultSet(ResultSet rs) throws SQLException { - while (rs.next()) { - printResult(rs, false); - } - } - - public static void printResult(ResultSet rs, boolean multiLine) throws SQLException { - StringBuilder builder = new StringBuilder(); - int columnCount = rs.getMetaData().getColumnCount(); - for (int i = 0; i < columnCount; i++) { - Object value = rs.getObject(i + 1); - String output = value == null ? "null" : value.toString(); - builder.append(output); - if (i + 1 < columnCount) { - builder.append(","); - if (multiLine) { - builder.append("\n"); - } - } - System.out.println(builder.toString()); - } - } - - public static void waitForIndexRebuild(Connection conn, String fullIndexName, PIndexState indexState) throws InterruptedException, SQLException { - waitForIndexState(conn, fullIndexName, indexState, 0L); - } - - private static class IndexStateCheck { - public final PIndexState indexState; - public final Long indexDisableTimestamp; - public final Boolean success; - - public IndexStateCheck(PIndexState indexState, Long indexDisableTimestamp, Boolean success) { - this.indexState = indexState; - this.indexDisableTimestamp = indexDisableTimestamp; - this.success = success; - } - } - - public static void waitForIndexState(Connection conn, String fullIndexName, PIndexState expectedIndexState) throws InterruptedException, SQLException { - int maxTries = 120, nTries = 0; - PIndexState actualIndexState = null; - do { - String schema = SchemaUtil.getSchemaNameFromFullName(fullIndexName); - String index = SchemaUtil.getTableNameFromFullName(fullIndexName); - Thread.sleep(1000); // sleep 1 sec - String query = "SELECT " + PhoenixDatabaseMetaData.INDEX_STATE + " FROM " + - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " WHERE (" + PhoenixDatabaseMetaData.TABLE_SCHEM + "," + PhoenixDatabaseMetaData.TABLE_NAME - + ") = (" + "'" + schema + "','" + index + "') " - + "AND " + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NULL AND " + PhoenixDatabaseMetaData.COLUMN_NAME + " IS NULL"; - ResultSet rs = conn.createStatement().executeQuery(query); - if (rs.next()) { - actualIndexState = PIndexState.fromSerializedValue(rs.getString(1)); - boolean matchesExpected = (actualIndexState == expectedIndexState); - if (matchesExpected) { - return; - } - } - } while (++nTries < maxTries); - fail("Ran out of time waiting for index state to become " + expectedIndexState + " last seen actual state is " + - (actualIndexState == null ? "Unknown" : actualIndexState.toString())); - } - - public static void waitForIndexState(Connection conn, String fullIndexName, PIndexState expectedIndexState, Long expectedIndexDisableTimestamp) throws InterruptedException, SQLException { - int maxTries = 60, nTries = 0; - do { - Thread.sleep(1000); // sleep 1 sec - IndexStateCheck state = checkIndexStateInternal(conn, fullIndexName, expectedIndexState, expectedIndexDisableTimestamp); - if (state.success != null) { - if (Boolean.TRUE.equals(state.success)) { - return; - } - fail("Index state will not become " + expectedIndexState); - } - } while (++nTries < maxTries); - fail("Ran out of time waiting for index state to become " + expectedIndexState); - } - - public static boolean checkIndexState(Connection conn, String fullIndexName, PIndexState expectedIndexState, Long expectedIndexDisableTimestamp) throws SQLException { - return Boolean.TRUE.equals(checkIndexStateInternal(conn, fullIndexName, expectedIndexState, expectedIndexDisableTimestamp).success); - } - - public static void assertIndexState(Connection conn, String fullIndexName, PIndexState expectedIndexState, Long expectedIndexDisableTimestamp) throws SQLException { - IndexStateCheck state = checkIndexStateInternal(conn, fullIndexName, expectedIndexState, expectedIndexDisableTimestamp); - if (!Boolean.TRUE.equals(state.success)) { - if (expectedIndexState != null) { - assertEquals(expectedIndexState, state.indexState); - } - if (expectedIndexDisableTimestamp != null) { - assertEquals(expectedIndexDisableTimestamp, state.indexDisableTimestamp); - } + public ClearCacheResponse call(MetaDataService instance) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder(); + instance.clearCache(controller, builder.build(), rpcCallback); + if (controller.getFailedOn() != null) { + throw controller.getFailedOn(); + } + return rpcCallback.get(); } - } + }); + } + + public static void closeStatement(Statement stmt) { + try { + stmt.close(); + } catch (Throwable ignore) { + } + } + + public static void closeConnection(Connection conn) { + try { + conn.close(); + } catch (Throwable ignore) { + } + } + + public static void closeStmtAndConn(Statement stmt, Connection conn) { + closeStatement(stmt); + closeConnection(conn); + } + + public static void bindParams(PhoenixPreparedStatement stmt, List binds) + throws SQLException { + for (int i = 0; i < binds.size(); i++) { + stmt.setObject(i + 1, binds.get(i)); + } + } + + /** + * @param conn connection to be used + * @param sortOrder sort order of column contain input values + * @param id id of the row being inserted + * @param input input to be inserted + */ + public static void upsertRow(Connection conn, String tableName, String sortOrder, int id, + Object input) throws SQLException { + String dml = String.format("UPSERT INTO " + tableName + "_%s VALUES(?,?)", sortOrder); + PreparedStatement stmt = conn.prepareStatement(dml); + stmt.setInt(1, id); + if (input instanceof String) stmt.setString(2, (String) input); + else if (input instanceof Integer) stmt.setInt(2, (Integer) input); + else if (input instanceof Double) stmt.setDouble(2, (Double) input); + else if (input instanceof Float) stmt.setFloat(2, (Float) input); + else if (input instanceof Boolean) stmt.setBoolean(2, (Boolean) input); + else if (input instanceof Long) stmt.setLong(2, (Long) input); + else throw new UnsupportedOperationException( + "" + input.getClass() + " is not supported by upsertRow"); + stmt.execute(); + conn.commit(); + } + + public static void createGroupByTestTable(Connection conn, String tableName) throws SQLException { + conn.createStatement().execute("create table " + tableName + + " (id varchar not null primary key,\n" + " uri varchar, appcpu integer)"); + } + + private static void createTable(Connection conn, String inputSqlType, String tableName, + String sortOrder) throws SQLException { + String dmlFormat = "CREATE TABLE " + tableName + "_%s (id INTEGER NOT NULL, pk %s NOT NULL, " + + "kv %s " + "CONSTRAINT PK_CONSTRAINT PRIMARY KEY (id, pk %s))"; + String ddl = String.format(dmlFormat, sortOrder, inputSqlType, inputSqlType, sortOrder); + conn.createStatement().execute(ddl); + conn.commit(); + } + + /** + * Creates a table to be used for testing. It contains one id column, one varchar column to be + * used as input, and one column which will contain null values + * @param conn connection to be used + * @param inputSqlType sql type of input + * @param inputList list of values to be inserted into the pk column + */ + public static String initTables(Connection conn, String inputSqlType, List inputList) + throws Exception { + String tableName = generateUniqueName(); + createTable(conn, inputSqlType, tableName, "ASC"); + createTable(conn, inputSqlType, tableName, "DESC"); + for (int i = 0; i < inputList.size(); ++i) { + upsertRow(conn, tableName, "ASC", i, inputList.get(i)); + upsertRow(conn, tableName, "DESC", i, inputList.get(i)); + } + return tableName; + } + + public static List getAllSplits(Connection conn, String tableName) throws SQLException { + return getSplits(conn, tableName, null, null, null, null, null); + } + + public static List getAllSplits(Connection conn, String tableName, String where, + String selectClause) throws SQLException { + return getSplits(conn, tableName, null, null, null, where, selectClause); + } + + public static List getSplits(Connection conn, String tableName, String pkCol, + byte[] lowerRange, byte[] upperRange, String whereClauseSuffix, String selectClause) + throws SQLException { + String whereClauseStart = (lowerRange == null && upperRange == null + ? "" + : " WHERE " + + ((lowerRange != null ? (pkCol + " >= ? " + (upperRange != null ? " AND " : "")) : "") + + (upperRange != null ? (pkCol + " < ?") : ""))); + String whereClause = whereClauseSuffix == null ? whereClauseStart + : whereClauseStart.length() == 0 ? (" WHERE " + whereClauseSuffix) + : (" AND " + whereClauseSuffix); + String query = "SELECT /*+ NO_INDEX */ " + selectClause + " FROM " + tableName + whereClause; + PhoenixPreparedStatement pstmt = + conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); + if (lowerRange != null) { + pstmt.setBytes(1, lowerRange); + } + if (upperRange != null) { + pstmt.setBytes(lowerRange != null ? 2 : 1, upperRange); + } + pstmt.execute(); + List keyRanges = pstmt.getQueryPlan().getSplits(); + return keyRanges; + } + + public static Collection getGuidePostsList(Connection conn, String tableName) + throws SQLException { + return getGuidePostsList(conn, tableName, null, null, null, null); + } + + public static Collection getGuidePostsList(Connection conn, String tableName, + String where) throws SQLException { + return getGuidePostsList(conn, tableName, null, null, null, where); + } + + public static Collection getGuidePostsList(Connection conn, String tableName, + String pkCol, byte[] lowerRange, byte[] upperRange, String whereClauseSuffix) + throws SQLException { + String whereClauseStart = (lowerRange == null && upperRange == null + ? "" + : " WHERE " + + ((lowerRange != null ? (pkCol + " >= ? " + (upperRange != null ? " AND " : "")) : "") + + (upperRange != null ? (pkCol + " < ?") : ""))); + String whereClause = whereClauseSuffix == null ? whereClauseStart + : whereClauseStart.length() == 0 ? (" WHERE " + whereClauseSuffix) + : (" AND " + whereClauseSuffix); + String query = "SELECT /*+ NO_INDEX */ COUNT(*) FROM " + tableName + whereClause; + PhoenixPreparedStatement pstmt = + conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class); + if (lowerRange != null) { + pstmt.setBytes(1, lowerRange); + } + if (upperRange != null) { + pstmt.setBytes(lowerRange != null ? 2 : 1, upperRange); + } + pstmt.execute(); + TableRef tableRef = pstmt.getQueryPlan().getTableRef(); + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable table = tableRef.getTable(); + GuidePostsInfo info = pconn.getQueryServices().getTableStats( + new GuidePostsKey(table.getName().getBytes(), SchemaUtil.getEmptyColumnFamily(table))); + return Collections.singletonList(info); + } + + public static void analyzeTable(Connection conn, String tableName) + throws IOException, SQLException { + analyzeTable(conn, tableName, false); + } + + public static void analyzeTable(Connection conn, String tableName, boolean transactional) + throws IOException, SQLException { + String query = "UPDATE STATISTICS " + tableName; + conn.createStatement().execute(query); + // if the table is transactional burn a txn in order to make sure the next txn read pointer is + // close to wall clock time + conn.commit(); + } + + public static void analyzeTableIndex(Connection conn, String tableName) + throws IOException, SQLException { + String query = "UPDATE STATISTICS " + tableName + " INDEX"; + conn.createStatement().execute(query); + } + + public static void analyzeTableColumns(Connection conn, String tableName) + throws IOException, SQLException { + String query = "UPDATE STATISTICS " + tableName + " COLUMNS"; + conn.createStatement().execute(query); + } + + public static void analyzeTable(String url, Properties props, String tableName) + throws IOException, SQLException { + Connection conn = DriverManager.getConnection(url, props); + analyzeTable(conn, tableName); + conn.close(); + } + + public static void setRowKeyColumns(PreparedStatement stmt, int i) throws SQLException { + // insert row + stmt.setString(1, "varchar" + String.valueOf(i)); + stmt.setString(2, "char" + String.valueOf(i)); + stmt.setInt(3, i); + stmt.setLong(4, i); + stmt.setBigDecimal(5, new BigDecimal(i * 0.5d)); + Date date = + new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i - 1) * MILLIS_IN_DAY); + stmt.setDate(6, date); + } + + public static void validateRowKeyColumns(ResultSet rs, int i) throws SQLException { + assertTrue(rs.next()); + assertEquals(rs.getString(1), "varchar" + String.valueOf(i)); + assertEquals(rs.getString(2), "char" + String.valueOf(i)); + assertEquals(rs.getInt(3), i); + assertEquals(rs.getInt(4), i); + assertEquals(rs.getBigDecimal(5), new BigDecimal(i * 0.5d)); + Date date = + new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i - 1) * MILLIS_IN_DAY); + assertEquals(rs.getDate(6), date); + } + + public static ClientAggregators getSingleSumAggregator(String url, Properties props) + throws SQLException { + try (PhoenixConnection pconn = + DriverManager.getConnection(url, props).unwrap(PhoenixConnection.class)) { + PhoenixStatement statement = new PhoenixStatement(pconn); + StatementContext context = + new StatementContext(statement, null, new Scan(), new SequenceManager(statement)); + AggregationManager aggregationManager = context.getAggregationManager(); + SumAggregateFunction func = new SumAggregateFunction( + Arrays. asList(new KeyValueColumnExpression(new PLongColumn() { + @Override + public PName getName() { + return SINGLE_COLUMN_NAME; + } + + @Override + public PName getFamilyName() { + return SINGLE_COLUMN_FAMILY_NAME; + } + + @Override + public int getPosition() { + return 0; + } - public static PIndexState getIndexState(Connection conn, String fullIndexName) throws SQLException { - IndexStateCheck state = checkIndexStateInternal(conn, fullIndexName, null, null); - return state.indexState; - } + @Override + public SortOrder getSortOrder() { + return SortOrder.getDefault(); + } - public static long getPendingDisableCount(PhoenixConnection conn, String indexTableName) { - byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName); - Get get = new Get(indexTableKey); - get.addColumn(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES); - - try { - Result pendingDisableCountResult = - conn.getQueryServices() - .getTable(SchemaUtil.getPhysicalTableName( - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, - conn.getQueryServices().getProps()).getName()) - .get(get); - return Bytes.toLong(pendingDisableCountResult.getValue(TABLE_FAMILY_BYTES, - PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES)); - } catch (Exception e) { - LOGGER.error("Exception in getPendingDisableCount: " + e); + @Override + public Integer getArraySize() { return 0; + } + + @Override + public byte[] getViewConstant() { + return null; + } + + @Override + public boolean isViewReferenced() { + return false; + } + + @Override + public String getExpressionStr() { + return null; + } + + @Override + public long getTimestamp() { + return HConstants.LATEST_TIMESTAMP; + } + + @Override + public boolean isDerived() { + return false; + } + + @Override + public boolean isExcluded() { + return false; + } + + @Override + public boolean isRowTimestamp() { + return false; + } + + @Override + public boolean isDynamic() { + return false; + } + + @Override + public byte[] getColumnQualifierBytes() { + return SINGLE_COLUMN_NAME.getBytes(); + } + })), null); + aggregationManager.setAggregators( + new ClientAggregators(Collections. singletonList(func), 1)); + ClientAggregators aggregators = aggregationManager.getAggregators(); + return aggregators; + } + } + + public static void createMultiCFTestTable(Connection conn, String tableName, String options) + throws SQLException { + String ddl = + "create table if not exists " + tableName + "(" + " varchar_pk VARCHAR NOT NULL, " + + " char_pk CHAR(5) NOT NULL, " + " int_pk INTEGER NOT NULL, " + + " long_pk BIGINT NOT NULL, " + " decimal_pk DECIMAL(31, 10) NOT NULL, " + + " a.varchar_col1 VARCHAR, " + " a.char_col1 CHAR(5), " + " a.int_col1 INTEGER, " + + " a.long_col1 BIGINT, " + " a.decimal_col1 DECIMAL(31, 10), " + + " b.varchar_col2 VARCHAR, " + " b.char_col2 CHAR(5), " + " b.int_col2 INTEGER, " + + " b.long_col2 BIGINT, " + " b.decimal_col2 DECIMAL, " + " b.date_col DATE " + + " CONSTRAINT pk PRIMARY KEY (varchar_pk, char_pk, int_pk, long_pk DESC, decimal_pk)) " + + (options != null ? options : ""); + conn.createStatement().execute(ddl); + } + + public static void flush(HBaseTestingUtility utility, TableName table) throws IOException { + Admin admin = utility.getAdmin(); + admin.flush(table); + } + + public static void minorCompact(HBaseTestingUtility utility, TableName table) + throws IOException, InterruptedException { + try { + CompactionScanner.setForceMinorCompaction(true); + Admin admin = utility.getAdmin(); + admin.compact(table); + int waitForCompactionToCompleteCounter = 0; + while (CompactionScanner.getForceMinorCompaction()) { + waitForCompactionToCompleteCounter++; + if (waitForCompactionToCompleteCounter > 20) { + Assert.fail(); } - } - - private static IndexStateCheck checkIndexStateInternal(Connection conn, String fullIndexName, PIndexState expectedIndexState, Long expectedIndexDisableTimestamp) throws SQLException { - String schema = SchemaUtil.getSchemaNameFromFullName(fullIndexName); - String index = SchemaUtil.getTableNameFromFullName(fullIndexName); - String query = "SELECT CAST(" + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " AS BIGINT)," + PhoenixDatabaseMetaData.INDEX_STATE + " FROM " + - PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " WHERE (" + PhoenixDatabaseMetaData.TABLE_SCHEM + "," + PhoenixDatabaseMetaData.TABLE_NAME - + ") = (" + "'" + schema + "','" + index + "') " - + "AND " + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NULL AND " + PhoenixDatabaseMetaData.COLUMN_NAME + " IS NULL"; - ResultSet rs = conn.createStatement().executeQuery(query); - Long actualIndexDisableTimestamp = null; - PIndexState actualIndexState = null; - if (rs.next()) { - actualIndexDisableTimestamp = rs.getLong(1); - actualIndexState = PIndexState.fromSerializedValue(rs.getString(2)); - boolean matchesExpected = (expectedIndexDisableTimestamp == null || Objects.equal(actualIndexDisableTimestamp, expectedIndexDisableTimestamp)) - && (expectedIndexState == null || actualIndexState == expectedIndexState); - if (matchesExpected) { - return new IndexStateCheck(actualIndexState, actualIndexDisableTimestamp, Boolean.TRUE); - } - if (ZERO.equals(actualIndexDisableTimestamp)) { - return new IndexStateCheck(actualIndexState, actualIndexDisableTimestamp, Boolean.FALSE); - } + Thread.sleep(1000); + } + } finally { + CompactionScanner.setForceMinorCompaction(false); + } + } + + public static void majorCompact(HBaseTestingUtility utility, TableName table) + throws IOException, InterruptedException { + long compactionRequestedSCN = EnvironmentEdgeManager.currentTimeMillis(); + Admin admin = utility.getAdmin(); + admin.majorCompact(table); + long lastCompactionTimestamp; + CompactionState state = null; + CompactionState previousState = null; + while ( + (state = admin.getCompactionState(table)).equals(CompactionState.MAJOR) + || state.equals(CompactionState.MAJOR_AND_MINOR) + || (lastCompactionTimestamp = admin.getLastMajorCompactionTimestamp(table)) + < compactionRequestedSCN + ) { + // In HBase 2.5 getLastMajorCompactionTimestamp doesn't seem to get updated when the + // clock is stopped, so check for the state going to NONE instead + if ( + state.equals(CompactionState.NONE) + && (previousState != null && (previousState.equals(CompactionState.MAJOR_AND_MINOR) + || previousState.equals(CompactionState.MAJOR))) + ) { + break; + } + previousState = state; + Thread.sleep(100); + } + } + + /** + * Runs a major compaction, and then waits until the compaction is complete before returning. + * @param tableName name of the table to be compacted + */ + public static void doMajorCompaction(Connection conn, String tableName) throws Exception { + + tableName = SchemaUtil.normalizeIdentifier(tableName); + + // We simply write a marker row, request a major compaction, and then wait until the marker + // row is gone + PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); + PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName)); + ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); + MutationState mutationState = pconn.getMutationState(); + if (table.isTransactional()) { + mutationState.startTransaction(table.getTransactionProvider()); + } + try (Table htable = mutationState.getHTable(table)) { + byte[] markerRowKey = Bytes.toBytes("TO_DELETE"); + + Put put = new Put(markerRowKey); + long timestamp = 0L; + // We do not want to wait an hour because of PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY + // So set the timestamp of the put and delete as early as possible + put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_VALUE_BYTES, timestamp, + QueryConstants.EMPTY_COLUMN_VALUE_BYTES); + htable.put(put); + Delete delete = new Delete(markerRowKey); + delete.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, + QueryConstants.EMPTY_COLUMN_VALUE_BYTES, timestamp); + htable.delete(delete); + htable.close(); + if (table.isTransactional()) { + mutationState.commit(); + } + + Admin hbaseAdmin = services.getAdmin(); + hbaseAdmin.flush(TableName.valueOf(tableName)); + hbaseAdmin.majorCompact(TableName.valueOf(tableName)); + hbaseAdmin.close(); + + boolean compactionDone = false; + while (!compactionDone) { + Thread.sleep(6000L); + Scan scan = new Scan(); + scan.withStartRow(markerRowKey); + scan.withStopRow(Bytes.add(markerRowKey, new byte[] { 0 })); + scan.setRaw(true); + + try (Table htableForRawScan = services.getTable(Bytes.toBytes(tableName))) { + ResultScanner scanner = htableForRawScan.getScanner(scan); + List results = Lists.newArrayList(scanner); + LOGGER.info("Results: " + results); + compactionDone = results.isEmpty(); + scanner.close(); } - return new IndexStateCheck(actualIndexState, actualIndexDisableTimestamp, null); - } - - public static long getRowCount(Connection conn, String tableName) throws SQLException { - ResultSet rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + tableName); - assertTrue(rs.next()); - return rs.getLong(1); - } - - public static void addCoprocessor(Connection conn, String tableName, Class coprocessorClass) throws Exception { - int priority = QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY + 100; - ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); - TableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName)); - TableDescriptorBuilder descriptorBuilder = null; - if (!descriptor.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .collect(Collectors.toList()).contains(coprocessorClass.getName())) { - descriptorBuilder=TableDescriptorBuilder.newBuilder(descriptor); - descriptorBuilder.setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(coprocessorClass.getName()).setPriority(priority).build()); - }else{ - return; - } - final int retries = 10; - int numTries = 10; - descriptor = descriptorBuilder.build(); - try (Admin admin = services.getAdmin()) { - admin.modifyTable(descriptor); - while (!admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor) - && numTries > 0) { - numTries--; - if (numTries == 0) { - throw new Exception( - "Failed to add " + coprocessorClass.getName() + " after " - + retries + " retries."); - } - Thread.sleep(1000); - } + LOGGER.info("Compaction done: " + compactionDone); + + // need to run compaction after the next txn snapshot has been written so that compaction + // can remove deleted rows + if (!compactionDone && table.isTransactional()) { + hbaseAdmin = services.getAdmin(); + hbaseAdmin.flush(TableName.valueOf(tableName)); + hbaseAdmin.majorCompact(TableName.valueOf(tableName)); + hbaseAdmin.close(); } - } - - public static void removeCoprocessor(Connection conn, String tableName, Class coprocessorClass) throws Exception { - ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); - TableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName)); - TableDescriptorBuilder descriptorBuilder = null; - if (descriptor.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .collect(Collectors.toList()).contains(coprocessorClass.getName())) { - descriptorBuilder=TableDescriptorBuilder.newBuilder(descriptor); - descriptorBuilder.removeCoprocessor(coprocessorClass.getName()); - }else{ - return; + } + } + } + + public static void createTransactionalTable(Connection conn, String tableName) + throws SQLException { + createTransactionalTable(conn, tableName, ""); + } + + public static void createTransactionalTable(Connection conn, String tableName, String extraProps) + throws SQLException { + conn.createStatement().execute("create table " + tableName + TestUtil.TEST_TABLE_SCHEMA + + "TRANSACTIONAL=true" + (extraProps.length() == 0 ? "" : ("," + extraProps))); + } + + public static void dumpTable(Connection conn, TableName tableName) + throws SQLException, IOException { + ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); + Table table = cqs.getTable(tableName.getName()); + dumpTable(table); + } + + public static void dumpTable(Table table) throws IOException { + System.out.println("************ dumping " + table + " **************"); + Scan s = new Scan(); + s.setRaw(true); + s.readAllVersions(); + int cellCount = 0; + int rowCount = 0; + try (ResultScanner scanner = table.getScanner(s)) { + Result result; + while ((result = scanner.next()) != null) { + rowCount++; + System.out.println("Row count: " + rowCount); + CellScanner cellScanner = result.cellScanner(); + Cell current; + while (cellScanner.advance()) { + current = cellScanner.current(); + System.out + .println(current + " column= " + Bytes.toStringBinary(CellUtil.cloneQualifier(current)) + + " val=" + Bytes.toStringBinary(CellUtil.cloneValue(current))); + cellCount++; } - final int retries = 10; - int numTries = retries; - descriptor = descriptorBuilder.build(); - try (Admin admin = services.getAdmin()) { - admin.modifyTable(descriptor); - while (!admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor) - && numTries > 0) { - numTries--; - if (numTries == 0) { - throw new Exception( - "Failed to remove " + coprocessorClass.getName() + " after " - + retries + " retries."); - } - Thread.sleep(1000); - } + } + } + System.out.println("----- Row count: " + rowCount + " Cell count: " + cellCount + " -----"); + } + + public static int getRawRowCount(Table table) throws IOException { + dumpTable(table); + return getRowCount(table, true); + } + + public static int getRowCount(Table table, boolean isRaw) throws IOException { + Scan s = new Scan(); + s.setRaw(isRaw); + int rows = 0; + try (ResultScanner scanner = table.getScanner(s)) { + while (scanner.next() != null) { + rows++; + } + } + return rows; + } + + public static CellCount getCellCount(Table table, boolean isRaw) throws IOException { + Scan s = new Scan(); + s.setRaw(isRaw); + s.readAllVersions(); + + CellCount cellCount = new CellCount(); + try (ResultScanner scanner = table.getScanner(s)) { + Result result = null; + while ((result = scanner.next()) != null) { + CellScanner cellScanner = result.cellScanner(); + Cell current = null; + while (cellScanner.advance()) { + current = cellScanner.current(); + cellCount.addCell(Bytes.toString(CellUtil.cloneRow(current))); } - } - - public static boolean compare(CompareOperator op, ImmutableBytesWritable lhsOutPtr, ImmutableBytesWritable rhsOutPtr) { - int compareResult = Bytes.compareTo(lhsOutPtr.get(), lhsOutPtr.getOffset(), lhsOutPtr.getLength(), rhsOutPtr.get(), rhsOutPtr.getOffset(), rhsOutPtr.getLength()); - return ByteUtil.compare(op, compareResult); - } - - public static QueryPlan getOptimizeQueryPlan(Connection conn, String sql) throws SQLException { - PhoenixPreparedStatement statement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); - QueryPlan queryPlan = statement.optimizeQuery(sql); - queryPlan.iterator(); - return queryPlan; - } - - public static QueryPlan getOptimizeQueryPlanNoIterator(Connection conn, String sql) throws SQLException { - PhoenixPreparedStatement statement = conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); - QueryPlan queryPlan = statement.optimizeQuery(sql); - return queryPlan; - } - - public static void assertResultSet(ResultSet rs, Object[][] rows) throws Exception { - for (int rowIndex = 0; rowIndex < rows.length; rowIndex++) { - assertTrue("rowIndex:[" + rowIndex + "] rs.next error!", rs.next()); - for (int columnIndex = 1; columnIndex <= rows[rowIndex].length; columnIndex++) { - Object realValue = rs.getObject(columnIndex); - Object expectedValue = rows[rowIndex][columnIndex - 1]; - if (realValue == null) { - assertNull("rowIndex:[" + rowIndex + "],columnIndex:[" + columnIndex + "]", expectedValue); - } else { - assertEquals("rowIndex:[" + rowIndex + "],columnIndex:[" + columnIndex + "]", - expectedValue, - realValue - ); - } + } + } + return cellCount; + } + + static class CellCount { + private Map rowCountMap = new HashMap(); + + void addCell(String key) { + if (rowCountMap.containsKey(key)) { + rowCountMap.put(key, rowCountMap.get(key) + 1); + } else { + rowCountMap.put(key, 1); + } + } + + int getCellCount(String key) { + if (rowCountMap.containsKey(key)) { + return rowCountMap.get(key); + } else { + return 0; + } + } + } + + public static void dumpIndexStatus(Connection conn, String indexName) + throws IOException, SQLException { + try (Table table = conn.unwrap(PhoenixConnection.class).getQueryServices() + .getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) { + System.out.println("************ dumping index status for " + indexName + " **************"); + Scan s = new Scan(); + s.setRaw(true); + s.readAllVersions(); + byte[] startRow = SchemaUtil.getTableKeyFromFullName(indexName); + s.withStartRow(startRow); + s.withStopRow( + ByteUtil.nextKey(ByteUtil.concat(startRow, QueryConstants.SEPARATOR_BYTE_ARRAY))); + try (ResultScanner scanner = table.getScanner(s)) { + Result result = null; + while ((result = scanner.next()) != null) { + CellScanner cellScanner = result.cellScanner(); + Cell current = null; + while (cellScanner.advance()) { + current = cellScanner.current(); + if ( + Bytes.compareTo(current.getQualifierArray(), current.getQualifierOffset(), + current.getQualifierLength(), PhoenixDatabaseMetaData.INDEX_STATE_BYTES, 0, + PhoenixDatabaseMetaData.INDEX_STATE_BYTES.length) == 0 + ) { + System.out.println(current.getTimestamp() + "/INDEX_STATE=" + PIndexState + .fromSerializedValue(current.getValueArray()[current.getValueOffset()])); } + } } - assertTrue(!rs.next()); - } - - /** - * Find a random free port in localhost for binding. - * - * @return A port number or -1 for failure. - */ - public static int getRandomPort() { - try (ServerSocket socket = new ServerSocket(0)) { - socket.setReuseAddress(true); - return socket.getLocalPort(); - } catch (IOException e) { - return -1; + } + System.out.println("-----------------------------------------------"); + } + } + + public static void printResultSet(ResultSet rs) throws SQLException { + while (rs.next()) { + printResult(rs, false); + } + } + + public static void printResult(ResultSet rs, boolean multiLine) throws SQLException { + StringBuilder builder = new StringBuilder(); + int columnCount = rs.getMetaData().getColumnCount(); + for (int i = 0; i < columnCount; i++) { + Object value = rs.getObject(i + 1); + String output = value == null ? "null" : value.toString(); + builder.append(output); + if (i + 1 < columnCount) { + builder.append(","); + if (multiLine) { + builder.append("\n"); } - } - - public static boolean hasFilter(Scan scan, Class filterClass) { - Iterator filterIter = ScanUtil.getFilterIterator(scan); - while (filterIter.hasNext()) { - Filter filter = filterIter.next(); - if (filterClass.isInstance(filter)) { - return true; - } + } + System.out.println(builder.toString()); + } + } + + public static void waitForIndexRebuild(Connection conn, String fullIndexName, + PIndexState indexState) throws InterruptedException, SQLException { + waitForIndexState(conn, fullIndexName, indexState, 0L); + } + + private static class IndexStateCheck { + public final PIndexState indexState; + public final Long indexDisableTimestamp; + public final Boolean success; + + public IndexStateCheck(PIndexState indexState, Long indexDisableTimestamp, Boolean success) { + this.indexState = indexState; + this.indexDisableTimestamp = indexDisableTimestamp; + this.success = success; + } + } + + public static void waitForIndexState(Connection conn, String fullIndexName, + PIndexState expectedIndexState) throws InterruptedException, SQLException { + int maxTries = 120, nTries = 0; + PIndexState actualIndexState = null; + do { + String schema = SchemaUtil.getSchemaNameFromFullName(fullIndexName); + String index = SchemaUtil.getTableNameFromFullName(fullIndexName); + Thread.sleep(1000); // sleep 1 sec + String query = "SELECT " + PhoenixDatabaseMetaData.INDEX_STATE + " FROM " + + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " WHERE (" + + PhoenixDatabaseMetaData.TABLE_SCHEM + "," + PhoenixDatabaseMetaData.TABLE_NAME + ") = (" + + "'" + schema + "','" + index + "') " + "AND " + PhoenixDatabaseMetaData.COLUMN_FAMILY + + " IS NULL AND " + PhoenixDatabaseMetaData.COLUMN_NAME + " IS NULL"; + ResultSet rs = conn.createStatement().executeQuery(query); + if (rs.next()) { + actualIndexState = PIndexState.fromSerializedValue(rs.getString(1)); + boolean matchesExpected = (actualIndexState == expectedIndexState); + if (matchesExpected) { + return; } - return false; - } - - public static JoinTable getJoinTable(String query, PhoenixConnection connection) throws SQLException { - SQLParser parser = new SQLParser(query); - SelectStatement select = SubselectRewriter.flatten(parser.parseQuery(), connection); - ColumnResolver resolver = FromCompiler.getResolverForQuery(select, connection); - select = StatementNormalizer.normalize(select, resolver); - SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolver, connection); - if (transformedSelect != select) { - resolver = FromCompiler.getResolverForQuery(transformedSelect, connection); - select = StatementNormalizer.normalize(transformedSelect, resolver); + } + } while (++nTries < maxTries); + fail("Ran out of time waiting for index state to become " + expectedIndexState + + " last seen actual state is " + + (actualIndexState == null ? "Unknown" : actualIndexState.toString())); + } + + public static void waitForIndexState(Connection conn, String fullIndexName, + PIndexState expectedIndexState, Long expectedIndexDisableTimestamp) + throws InterruptedException, SQLException { + int maxTries = 60, nTries = 0; + do { + Thread.sleep(1000); // sleep 1 sec + IndexStateCheck state = checkIndexStateInternal(conn, fullIndexName, expectedIndexState, + expectedIndexDisableTimestamp); + if (state.success != null) { + if (Boolean.TRUE.equals(state.success)) { + return; } - PhoenixStatement stmt = connection.createStatement().unwrap(PhoenixStatement.class); - return JoinCompiler.compile(stmt, select, resolver); - } - - public static void assertSelectStatement(FilterableStatement selectStatement, String sql) { - assertTrue(selectStatement.toString().trim().equals(sql)); - } - - public static void assertSqlExceptionCode(SQLExceptionCode code, SQLException se) { - assertEquals(code.getErrorCode(), se.getErrorCode()); - assertTrue("Wrong error message", se.getMessage().contains(code.getMessage())); - assertEquals(code.getSQLState(), se.getSQLState()); - } - - public static void assertTableHasTtl(Connection conn, TableName tableName, int ttl, boolean phoenixTTLEnabled) - throws SQLException, IOException { - long tableTTL = -1; - if (phoenixTTLEnabled) { - tableTTL = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, - tableName.getNameAsString())).getTTL(); - } else { - tableTTL = getColumnDescriptor(conn, tableName).getTimeToLive(); - } - Assert.assertEquals(ttl, tableTTL); - } - - public static void assertTableHasVersions(Connection conn, TableName tableName, int versions) - throws SQLException, IOException { - ColumnFamilyDescriptor cd = getColumnDescriptor(conn, tableName); - Assert.assertEquals(versions, cd.getMaxVersions()); - } - - public static ColumnFamilyDescriptor getColumnDescriptor(Connection conn, TableName tableName) - throws SQLException, IOException { - Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); - TableDescriptor td = admin.getDescriptor(tableName); - return td.getColumnFamilies()[0]; - } - - public static void assertRawRowCount(Connection conn, TableName table, int expectedRowCount) - throws SQLException, IOException { - ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); - int count = TestUtil.getRawRowCount(cqs.getTable(table.getName())); - assertEquals(expectedRowCount, count); - } - - public static int getRawRowCount(Connection conn, TableName table) - throws SQLException, IOException { - ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); - return TestUtil.getRawRowCount(cqs.getTable(table.getName())); - } - - public static int getRawCellCount(Connection conn, TableName tableName, byte[] row) - throws SQLException, IOException { - ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); - Table table = cqs.getTable(tableName.getName()); - CellCount cellCount = getCellCount(table, true); - return cellCount.getCellCount(Bytes.toString(row)); - } - public static void assertRawCellCount(Connection conn, TableName tableName, - byte[] row, int expectedCellCount) - throws SQLException, IOException { - int count = getRawCellCount(conn, tableName, row); - assertEquals(expectedCellCount, count); - } - - public static void assertRowExistsAtSCN(String url, String sql, long scn, boolean shouldExist) - throws SQLException { - boolean rowExists = false; - Properties props = new Properties(); - ResultSet rs; - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn)); - try (Connection conn = DriverManager.getConnection(url, props)) { - rs = conn.createStatement().executeQuery(sql); - rowExists = rs.next(); - if (shouldExist) { - Assert.assertTrue("Row was not found at time " + scn + - " when it should have been", - rowExists); - } else { - Assert.assertFalse("Row was found at time " + scn + - " when it should not have been", rowExists); - } + fail("Index state will not become " + expectedIndexState); + } + } while (++nTries < maxTries); + fail("Ran out of time waiting for index state to become " + expectedIndexState); + } + + public static boolean checkIndexState(Connection conn, String fullIndexName, + PIndexState expectedIndexState, Long expectedIndexDisableTimestamp) throws SQLException { + return Boolean.TRUE.equals(checkIndexStateInternal(conn, fullIndexName, expectedIndexState, + expectedIndexDisableTimestamp).success); + } + + public static void assertIndexState(Connection conn, String fullIndexName, + PIndexState expectedIndexState, Long expectedIndexDisableTimestamp) throws SQLException { + IndexStateCheck state = checkIndexStateInternal(conn, fullIndexName, expectedIndexState, + expectedIndexDisableTimestamp); + if (!Boolean.TRUE.equals(state.success)) { + if (expectedIndexState != null) { + assertEquals(expectedIndexState, state.indexState); + } + if (expectedIndexDisableTimestamp != null) { + assertEquals(expectedIndexDisableTimestamp, state.indexDisableTimestamp); + } + } + } + + public static PIndexState getIndexState(Connection conn, String fullIndexName) + throws SQLException { + IndexStateCheck state = checkIndexStateInternal(conn, fullIndexName, null, null); + return state.indexState; + } + + public static long getPendingDisableCount(PhoenixConnection conn, String indexTableName) { + byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName); + Get get = new Get(indexTableKey); + get.addColumn(TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES); + + try { + Result pendingDisableCountResult = conn.getQueryServices() + .getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, + conn.getQueryServices().getProps()).getName()) + .get(get); + return Bytes.toLong(pendingDisableCountResult.getValue(TABLE_FAMILY_BYTES, + PhoenixDatabaseMetaData.PENDING_DISABLE_COUNT_BYTES)); + } catch (Exception e) { + LOGGER.error("Exception in getPendingDisableCount: " + e); + return 0; + } + } + + private static IndexStateCheck checkIndexStateInternal(Connection conn, String fullIndexName, + PIndexState expectedIndexState, Long expectedIndexDisableTimestamp) throws SQLException { + String schema = SchemaUtil.getSchemaNameFromFullName(fullIndexName); + String index = SchemaUtil.getTableNameFromFullName(fullIndexName); + String query = "SELECT CAST(" + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " AS BIGINT)," + + PhoenixDatabaseMetaData.INDEX_STATE + " FROM " + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + + " WHERE (" + PhoenixDatabaseMetaData.TABLE_SCHEM + "," + PhoenixDatabaseMetaData.TABLE_NAME + + ") = (" + "'" + schema + "','" + index + "') " + "AND " + + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NULL AND " + + PhoenixDatabaseMetaData.COLUMN_NAME + " IS NULL"; + ResultSet rs = conn.createStatement().executeQuery(query); + Long actualIndexDisableTimestamp = null; + PIndexState actualIndexState = null; + if (rs.next()) { + actualIndexDisableTimestamp = rs.getLong(1); + actualIndexState = PIndexState.fromSerializedValue(rs.getString(2)); + boolean matchesExpected = (expectedIndexDisableTimestamp == null + || Objects.equal(actualIndexDisableTimestamp, expectedIndexDisableTimestamp)) + && (expectedIndexState == null || actualIndexState == expectedIndexState); + if (matchesExpected) { + return new IndexStateCheck(actualIndexState, actualIndexDisableTimestamp, Boolean.TRUE); + } + if (ZERO.equals(actualIndexDisableTimestamp)) { + return new IndexStateCheck(actualIndexState, actualIndexDisableTimestamp, Boolean.FALSE); + } + } + return new IndexStateCheck(actualIndexState, actualIndexDisableTimestamp, null); + } + + public static long getRowCount(Connection conn, String tableName) throws SQLException { + ResultSet rs = + conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + tableName); + assertTrue(rs.next()); + return rs.getLong(1); + } + + public static void addCoprocessor(Connection conn, String tableName, Class coprocessorClass) + throws Exception { + int priority = QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY + 100; + ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); + TableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName)); + TableDescriptorBuilder descriptorBuilder = null; + if ( + !descriptor.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) + .collect(Collectors.toList()).contains(coprocessorClass.getName()) + ) { + descriptorBuilder = TableDescriptorBuilder.newBuilder(descriptor); + descriptorBuilder.setCoprocessor(CoprocessorDescriptorBuilder + .newBuilder(coprocessorClass.getName()).setPriority(priority).build()); + } else { + return; + } + final int retries = 10; + int numTries = 10; + descriptor = descriptorBuilder.build(); + try (Admin admin = services.getAdmin()) { + admin.modifyTable(descriptor); + while ( + !admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor) && numTries > 0 + ) { + numTries--; + if (numTries == 0) { + throw new Exception( + "Failed to add " + coprocessorClass.getName() + " after " + retries + " retries."); } - - } - - public static void assertRowHasExpectedValueAtSCN(String url, String sql, - long scn, String value) throws SQLException { - Properties props = new Properties(); - ResultSet rs; - props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn)); - try (Connection conn = DriverManager.getConnection(url, props)) { - rs = conn.createStatement().executeQuery(sql); - Assert.assertTrue("Value " + value + " does not exist at scn " + scn, rs.next()); - Assert.assertEquals(value, rs.getString(1)); + Thread.sleep(1000); + } + } + } + + public static void removeCoprocessor(Connection conn, String tableName, Class coprocessorClass) + throws Exception { + ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices(); + TableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName)); + TableDescriptorBuilder descriptorBuilder = null; + if ( + descriptor.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) + .collect(Collectors.toList()).contains(coprocessorClass.getName()) + ) { + descriptorBuilder = TableDescriptorBuilder.newBuilder(descriptor); + descriptorBuilder.removeCoprocessor(coprocessorClass.getName()); + } else { + return; + } + final int retries = 10; + int numTries = retries; + descriptor = descriptorBuilder.build(); + try (Admin admin = services.getAdmin()) { + admin.modifyTable(descriptor); + while ( + !admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor) && numTries > 0 + ) { + numTries--; + if (numTries == 0) { + throw new Exception( + "Failed to remove " + coprocessorClass.getName() + " after " + retries + " retries."); } - - } - - public static String getExplainPlan(Connection conn, String sql) throws SQLException { - try (ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql)){ - return QueryUtil.getExplainPlan(rs); + Thread.sleep(1000); + } + } + } + + public static boolean compare(CompareOperator op, ImmutableBytesWritable lhsOutPtr, + ImmutableBytesWritable rhsOutPtr) { + int compareResult = Bytes.compareTo(lhsOutPtr.get(), lhsOutPtr.getOffset(), + lhsOutPtr.getLength(), rhsOutPtr.get(), rhsOutPtr.getOffset(), rhsOutPtr.getLength()); + return ByteUtil.compare(op, compareResult); + } + + public static QueryPlan getOptimizeQueryPlan(Connection conn, String sql) throws SQLException { + PhoenixPreparedStatement statement = + conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); + QueryPlan queryPlan = statement.optimizeQuery(sql); + queryPlan.iterator(); + return queryPlan; + } + + public static QueryPlan getOptimizeQueryPlanNoIterator(Connection conn, String sql) + throws SQLException { + PhoenixPreparedStatement statement = + conn.prepareStatement(sql).unwrap(PhoenixPreparedStatement.class); + QueryPlan queryPlan = statement.optimizeQuery(sql); + return queryPlan; + } + + public static void assertResultSet(ResultSet rs, Object[][] rows) throws Exception { + for (int rowIndex = 0; rowIndex < rows.length; rowIndex++) { + assertTrue("rowIndex:[" + rowIndex + "] rs.next error!", rs.next()); + for (int columnIndex = 1; columnIndex <= rows[rowIndex].length; columnIndex++) { + Object realValue = rs.getObject(columnIndex); + Object expectedValue = rows[rowIndex][columnIndex - 1]; + if (realValue == null) { + assertNull("rowIndex:[" + rowIndex + "],columnIndex:[" + columnIndex + "]", + expectedValue); + } else { + assertEquals("rowIndex:[" + rowIndex + "],columnIndex:[" + columnIndex + "]", + expectedValue, realValue); } - } - - public static Path createTempDirectory() throws IOException { - // We cannot use java.nio.file.Files.createTempDirectory(null), - // because that caches the value of "java.io.tmpdir" on class load. - return Files.createTempDirectory(Paths.get(System.getProperty("java.io.tmpdir")), null); - } + } + } + assertTrue(!rs.next()); + } + + /** + * Find a random free port in localhost for binding. + * @return A port number or -1 for failure. + */ + public static int getRandomPort() { + try (ServerSocket socket = new ServerSocket(0)) { + socket.setReuseAddress(true); + return socket.getLocalPort(); + } catch (IOException e) { + return -1; + } + } + + public static boolean hasFilter(Scan scan, Class filterClass) { + Iterator filterIter = ScanUtil.getFilterIterator(scan); + while (filterIter.hasNext()) { + Filter filter = filterIter.next(); + if (filterClass.isInstance(filter)) { + return true; + } + } + return false; + } + + public static JoinTable getJoinTable(String query, PhoenixConnection connection) + throws SQLException { + SQLParser parser = new SQLParser(query); + SelectStatement select = SubselectRewriter.flatten(parser.parseQuery(), connection); + ColumnResolver resolver = FromCompiler.getResolverForQuery(select, connection); + select = StatementNormalizer.normalize(select, resolver); + SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolver, connection); + if (transformedSelect != select) { + resolver = FromCompiler.getResolverForQuery(transformedSelect, connection); + select = StatementNormalizer.normalize(transformedSelect, resolver); + } + PhoenixStatement stmt = connection.createStatement().unwrap(PhoenixStatement.class); + return JoinCompiler.compile(stmt, select, resolver); + } + + public static void assertSelectStatement(FilterableStatement selectStatement, String sql) { + assertTrue(selectStatement.toString().trim().equals(sql)); + } + + public static void assertSqlExceptionCode(SQLExceptionCode code, SQLException se) { + assertEquals(code.getErrorCode(), se.getErrorCode()); + assertTrue("Wrong error message", se.getMessage().contains(code.getMessage())); + assertEquals(code.getSQLState(), se.getSQLState()); + } + + public static void assertTableHasTtl(Connection conn, TableName tableName, int ttl, + boolean phoenixTTLEnabled) throws SQLException, IOException { + long tableTTL = -1; + if (phoenixTTLEnabled) { + tableTTL = conn.unwrap(PhoenixConnection.class) + .getTable(new PTableKey(null, tableName.getNameAsString())).getTTL(); + } else { + tableTTL = getColumnDescriptor(conn, tableName).getTimeToLive(); + } + Assert.assertEquals(ttl, tableTTL); + } + + public static void assertTableHasVersions(Connection conn, TableName tableName, int versions) + throws SQLException, IOException { + ColumnFamilyDescriptor cd = getColumnDescriptor(conn, tableName); + Assert.assertEquals(versions, cd.getMaxVersions()); + } + + public static ColumnFamilyDescriptor getColumnDescriptor(Connection conn, TableName tableName) + throws SQLException, IOException { + Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); + TableDescriptor td = admin.getDescriptor(tableName); + return td.getColumnFamilies()[0]; + } + + public static void assertRawRowCount(Connection conn, TableName table, int expectedRowCount) + throws SQLException, IOException { + ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); + int count = TestUtil.getRawRowCount(cqs.getTable(table.getName())); + assertEquals(expectedRowCount, count); + } + + public static int getRawRowCount(Connection conn, TableName table) + throws SQLException, IOException { + ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); + return TestUtil.getRawRowCount(cqs.getTable(table.getName())); + } + + public static int getRawCellCount(Connection conn, TableName tableName, byte[] row) + throws SQLException, IOException { + ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices(); + Table table = cqs.getTable(tableName.getName()); + CellCount cellCount = getCellCount(table, true); + return cellCount.getCellCount(Bytes.toString(row)); + } + + public static void assertRawCellCount(Connection conn, TableName tableName, byte[] row, + int expectedCellCount) throws SQLException, IOException { + int count = getRawCellCount(conn, tableName, row); + assertEquals(expectedCellCount, count); + } + + public static void assertRowExistsAtSCN(String url, String sql, long scn, boolean shouldExist) + throws SQLException { + boolean rowExists = false; + Properties props = new Properties(); + ResultSet rs; + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn)); + try (Connection conn = DriverManager.getConnection(url, props)) { + rs = conn.createStatement().executeQuery(sql); + rowExists = rs.next(); + if (shouldExist) { + Assert.assertTrue("Row was not found at time " + scn + " when it should have been", + rowExists); + } else { + Assert.assertFalse("Row was found at time " + scn + " when it should not have been", + rowExists); + } + } + + } + + public static void assertRowHasExpectedValueAtSCN(String url, String sql, long scn, String value) + throws SQLException { + Properties props = new Properties(); + ResultSet rs; + props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn)); + try (Connection conn = DriverManager.getConnection(url, props)) { + rs = conn.createStatement().executeQuery(sql); + Assert.assertTrue("Value " + value + " does not exist at scn " + scn, rs.next()); + Assert.assertEquals(value, rs.getString(1)); + } + + } + + public static String getExplainPlan(Connection conn, String sql) throws SQLException { + try (ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + sql)) { + return QueryUtil.getExplainPlan(rs); + } + } + + public static Path createTempDirectory() throws IOException { + // We cannot use java.nio.file.Files.createTempDirectory(null), + // because that caches the value of "java.io.tmpdir" on class load. + return Files.createTempDirectory(Paths.get(System.getProperty("java.io.tmpdir")), null); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/bson/ComparisonExpressionUtilsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/bson/ComparisonExpressionUtilsTest.java index 50879dceb46..8a89029f364 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/bson/ComparisonExpressionUtilsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/bson/ComparisonExpressionUtilsTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util.bson; +import static org.junit.Assert.assertTrue; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.expression.util.bson.DocumentComparisonExpressionUtils; +import org.apache.phoenix.expression.util.bson.SQLComparisonExpressionUtils; import org.bson.BsonArray; import org.bson.BsonBinary; import org.bson.BsonBoolean; @@ -35,12 +39,6 @@ import org.bson.RawBsonDocument; import org.junit.Test; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.expression.util.bson.DocumentComparisonExpressionUtils; -import org.apache.phoenix.expression.util.bson.SQLComparisonExpressionUtils; - -import static org.junit.Assert.assertTrue; - /** * Tests for BSON Condition Expression Utility. */ @@ -52,97 +50,97 @@ public void testSQLComparisonExpression1() { TestFieldsMap compareValMap1 = getCompareValMap1(); RawBsonDocument rawBsonDocument = TestUtil.getRawBsonDocument(testFieldsMap1); - //{ - // "$Id20": 101.011, - // "$Id2": 12, - // "#NestedList1_10": "1234abce", - // "$Id1": 120, - // "$Id10": 101, - // "$Ids1": "12", - // ":ISBN": "111-1111111111", - // "#NestedList1_xyz0123": "xyz0123", - // "$NestedList1_485": -485.33, - // "$NestedMap1_NList1_30": { - // "$binary": { - // "base64": "V2hpdGVl", - // "subType": "00" - // } - // }, - // "InPublication": false, - // "$Ids10": "100", - // "#NestedMap1_NList1_3": { - // "$binary": { - // "base64": "V2hpdA==", - // "subType": "00" - // } - // }, - // "#NestedList1_1": "1234abcc", - // "#NMap1_NList1": "NListVal01", - // "$NestedList1_4850": -485.35, - // "$Id": 101.01, - // "#Title": "Book 101 Title" - //} + // { + // "$Id20": 101.011, + // "$Id2": 12, + // "#NestedList1_10": "1234abce", + // "$Id1": 120, + // "$Id10": 101, + // "$Ids1": "12", + // ":ISBN": "111-1111111111", + // "#NestedList1_xyz0123": "xyz0123", + // "$NestedList1_485": -485.33, + // "$NestedMap1_NList1_30": { + // "$binary": { + // "base64": "V2hpdGVl", + // "subType": "00" + // } + // }, + // "InPublication": false, + // "$Ids10": "100", + // "#NestedMap1_NList1_3": { + // "$binary": { + // "base64": "V2hpdA==", + // "subType": "00" + // } + // }, + // "#NestedList1_1": "1234abcc", + // "#NMap1_NList1": "NListVal01", + // "$NestedList1_4850": -485.35, + // "$Id": 101.01, + // "#Title": "Book 101 Title" + // } RawBsonDocument compareValues = TestUtil.getRawBsonDocument(compareValMap1); SQLComparisonExpressionUtils SQLComparisonExpressionUtils = - new SQLComparisonExpressionUtils(rawBsonDocument, compareValues); + new SQLComparisonExpressionUtils(rawBsonDocument, compareValues); - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "(field_exists(Id) OR field_not_exists(Title))")); + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("(field_exists(Id) OR field_not_exists(Title))")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "((field_not_exists(Id) AND field_not_exists(Title1)) OR field_exists(ISBN2))" - + " OR ((Id <> #Title) AND ((InPublication = InPublication) OR ((ISBN = :ISBN)" - + " AND (Title = #Title))))")); + "((field_not_exists(Id) AND field_not_exists(Title1)) OR field_exists(ISBN2))" + + " OR ((Id <> #Title) AND ((InPublication = InPublication) OR ((ISBN = :ISBN)" + + " AND (Title = #Title))))")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "((field_exists(NestedMap1.ISBN) AND field_not_exists(NestedMap1.NList1[3])))")); + "((field_exists(NestedMap1.ISBN) AND field_not_exists(NestedMap1.NList1[3])))")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "NestedMap1.Id = $Id AND (NestedMap1.InPublication = InPublication)")); + "NestedMap1.Id = $Id AND (NestedMap1.InPublication = InPublication)")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "((NestedMap1.Id = $Id) AND ((NestedMap1.InPublication[0] = InPublication) OR " - + "((ISBN[0] = :ISBN) AND (Title = #Title))) OR " - + "(NestedMap1.NList1[0] = #NMap1_NList1))")); + "((NestedMap1.Id = $Id) AND ((NestedMap1.InPublication[0] = InPublication) OR " + + "((ISBN[0] = :ISBN) AND (Title = #Title))) OR " + + "(NestedMap1.NList1[0] = #NMap1_NList1))")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "((field_not_exists(Id) AND field_not_exists(Title1)) OR field_exists(ISBN2))" - + " OR ((NestedMap1.Id = $Id) AND ((NestedMap1.InPublication = InPublication) OR " - + "((ISBN = :ISBN) AND (Title = #Title))))")); + "((field_not_exists(Id) AND field_not_exists(Title1)) OR field_exists(ISBN2))" + + " OR ((NestedMap1.Id = $Id) AND ((NestedMap1.InPublication = InPublication) OR " + + "((ISBN = :ISBN) AND (Title = #Title))))")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "NestedList1[0] <= $NestedList1_485 AND NestedList1[1] > #NestedList1_1 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " - + "NestedList1[2][1].Id < $Id1 AND IdS < $Ids1 AND Id2 > $Id2 AND NestedMap1.NList1[2] > #NestedMap1_NList1_3")); + "NestedList1[0] <= $NestedList1_485 AND NestedList1[1] > #NestedList1_1 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " + + "NestedList1[2][1].Id < $Id1 AND IdS < $Ids1 AND Id2 > $Id2 AND NestedMap1.NList1[2] > #NestedMap1_NList1_3")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "NestedList1[0] <= $NestedList1_485 AND NestedList1[1] >= #NestedList1_1 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " - + "NestedList1[2][1].Id <= $Id1 AND IdS <= $Ids1 AND Id2 >= $Id2 AND NestedMap1.NList1[2] >= #NestedMap1_NList1_3")); + "NestedList1[0] <= $NestedList1_485 AND NestedList1[1] >= #NestedList1_1 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " + + "NestedList1[2][1].Id <= $Id1 AND IdS <= $Ids1 AND Id2 >= $Id2 AND NestedMap1.NList1[2] >= #NestedMap1_NList1_3")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] < #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " - + "NestedList1[2][1].Id > $Id10 AND IdS > $Ids10 AND Id2 < $Id20 AND NestedMap1.NList1[2] < $NestedMap1_NList1_30")); + "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] < #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " + + "NestedList1[2][1].Id > $Id10 AND IdS > $Ids10 AND Id2 < $Id20 AND NestedMap1.NList1[2] < $NestedMap1_NList1_30")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] <= #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " - + "NestedList1[2][1].Id >= $Id10 AND IdS >= $Ids10 AND Id2 <= $Id20 AND NestedMap1.NList1[2] <= $NestedMap1_NList1_30 AND " - + "NestedMap1.NList1[2] <> $NestedMap1_NList1_30")); + "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] <= #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " + + "NestedList1[2][1].Id >= $Id10 AND IdS >= $Ids10 AND Id2 <= $Id20 AND NestedMap1.NList1[2] <= $NestedMap1_NList1_30 AND " + + "NestedMap1.NList1[2] <> $NestedMap1_NList1_30")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] <= #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " - + "NestedList1[2][1].Id >= $Id10 AND IdS >= $Ids10 AND Id2 <= $Id20 AND NestedMap1.NList1[2] <= $NestedMap1_NList1_30 AND " - + "(NestedMap1.NList1[2] = $NestedMap1_NList1_30 OR NestedList1[0] BETWEEN $NestedList1_4850 AND $Id2)")); + "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] <= #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " + + "NestedList1[2][1].Id >= $Id10 AND IdS >= $Ids10 AND Id2 <= $Id20 AND NestedMap1.NList1[2] <= $NestedMap1_NList1_30 AND " + + "(NestedMap1.NList1[2] = $NestedMap1_NList1_30 OR NestedList1[0] BETWEEN $NestedList1_4850 AND $Id2)")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] <= #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " - + "NestedMap1.NList1[0] IN ($Id, $Id1, $Id20, #NMap1_NList1) AND NestedMap1.NList1[2] <= $NestedMap1_NList1_30 AND " - + "(NestedMap1.NList1[2] = $NestedMap1_NList1_30 OR NestedList1[0] BETWEEN $NestedList1_4850 AND $Id2)")); + "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] <= #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " + + "NestedMap1.NList1[0] IN ($Id, $Id1, $Id20, #NMap1_NList1) AND NestedMap1.NList1[2] <= $NestedMap1_NList1_30 AND " + + "(NestedMap1.NList1[2] = $NestedMap1_NList1_30 OR NestedList1[0] BETWEEN $NestedList1_4850 AND $Id2)")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] <= #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " - + "NestedMap1.NList1[0] IN ($Id, $Id1, $Id20, #NMap1_NList1) AND NestedMap1.NList1[2] <= $NestedMap1_NList1_30 AND " - + "(NestedMap1.NList1[2] = $NestedMap1_NList1_30 OR NestedList1[0] BETWEEN $NestedList1_4850 AND $Id2)" - + " AND NOT NestedMap1.InPublication IN ($Id, $Id1, $Id20, $Id21)")); + "NestedList1[0] >= $NestedList1_4850 AND NestedList1[1] <= #NestedList1_10 AND NestedList1[2][0] >= #NestedList1_xyz0123 AND " + + "NestedMap1.NList1[0] IN ($Id, $Id1, $Id20, #NMap1_NList1) AND NestedMap1.NList1[2] <= $NestedMap1_NList1_30 AND " + + "(NestedMap1.NList1[2] = $NestedMap1_NList1_30 OR NestedList1[0] BETWEEN $NestedList1_4850 AND $Id2)" + + " AND NOT NestedMap1.InPublication IN ($Id, $Id1, $Id20, $Id21)")); } @@ -155,132 +153,129 @@ public void testSQLComparisonExpression2() { TestFieldsMap compareValMap1 = getCompareValMap1(); RawBsonDocument rawBsonDocument = TestUtil.getRawBsonDocument(testFieldsMap1); - //{ - // "$Id20": 101.011, - // "$Id2": 12, - // "#NestedList1_10": "1234abce", - // "$Id1": 120, - // "$Id10": 101, - // "$Ids1": "12", - // ":ISBN": "111-1111111111", - // "#NestedList1_xyz0123": "xyz0123", - // "$NestedList1_485": -485.33, - // "$NestedMap1_NList1_30": { - // "$binary": { - // "base64": "V2hpdGVl", - // "subType": "00" - // } - // }, - // "InPublication": false, - // "$Ids10": "100", - // "#NestedMap1_NList1_3": { - // "$binary": { - // "base64": "V2hpdA==", - // "subType": "00" - // } - // }, - // "#NestedList1_1": "1234abcc", - // "#NMap1_NList1": "NListVal01", - // "$NestedList1_4850": -485.35, - // "$Id": 101.01, - // "#Title": "Book 101 Title" - //} + // { + // "$Id20": 101.011, + // "$Id2": 12, + // "#NestedList1_10": "1234abce", + // "$Id1": 120, + // "$Id10": 101, + // "$Ids1": "12", + // ":ISBN": "111-1111111111", + // "#NestedList1_xyz0123": "xyz0123", + // "$NestedList1_485": -485.33, + // "$NestedMap1_NList1_30": { + // "$binary": { + // "base64": "V2hpdGVl", + // "subType": "00" + // } + // }, + // "InPublication": false, + // "$Ids10": "100", + // "#NestedMap1_NList1_3": { + // "$binary": { + // "base64": "V2hpdA==", + // "subType": "00" + // } + // }, + // "#NestedList1_1": "1234abcc", + // "#NMap1_NList1": "NListVal01", + // "$NestedList1_4850": -485.35, + // "$Id": 101.01, + // "#Title": "Book 101 Title" + // } RawBsonDocument compareValues = TestUtil.getRawBsonDocument(compareValMap1); SQLComparisonExpressionUtils SQLComparisonExpressionUtils = - new SQLComparisonExpressionUtils(rawBsonDocument, compareValues); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "(exists('Id') || !exists('Title'))")); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "((!exists('Id') && !exists('Title1')) || exists('ISBN2')) || " + - "((!isEquals('Id', '#Title'))" + - " && ((isEquals('InPublication', 'InPublication'))" + - " || ((isEquals('ISBN', ':ISBN')) && (isEquals('Title', '#Title')))))")); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "((exists('NestedMap1.ISBN') && !exists('NestedMap1.NList1[3]')))")); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "isEquals('NestedMap1.Id', '$Id')" + - " && (isEquals('NestedMap1.InPublication', 'InPublication'))")); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "((isEquals('NestedMap1.Id', '$Id'))" + - " && ((isEquals('NestedMap1.InPublication[0]', 'InPublication'))" + - " || ((isEquals('ISBN[0]', ':ISBN')) && (isEquals('Title', '#Title'))))" + - " || (isEquals('NestedMap1.NList1[0]', '#NMap1_NList1')))")); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "((!exists('Id') && !exists('Title1')) || exists('ISBN2')) ||" + - " ((isEquals('NestedMap1.Id', '$Id'))" + - " && ((isEquals('NestedMap1.InPublication', 'InPublication'))" + - " || ((isEquals('ISBN', ':ISBN')) && (isEquals('Title', '#Title')))))")); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "lessThanOrEquals('NestedList1[0]', '$NestedList1_485')" + - " && greaterThan('NestedList1[1]', '#NestedList1_1')" + - " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + - " && lessThan('NestedList1[2][1].Id', '$Id1') && lessThan('IdS', '$Ids1')" + - " && greaterThan('Id2', '$Id2')" + - " && greaterThan('NestedMap1.NList1[2]', '#NestedMap1_NList1_3')")); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "lessThanOrEquals('NestedList1[0]', '$NestedList1_485')" + - " && greaterThanOrEquals('NestedList1[1]', '#NestedList1_1')" + - " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + - " && lessThanOrEquals('NestedList1[2][1].Id', '$Id1')" + - " && lessThanOrEquals('IdS', '$Ids1')" + - " && greaterThanOrEquals('Id2', '$Id2')" + - " && greaterThanOrEquals('NestedMap1.NList1[2]', '#NestedMap1_NList1_3')")); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + - " && lessThan('NestedList1[1]', '#NestedList1_10')" + - " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + - " && greaterThan('NestedList1[2][1].Id', '$Id10')" + - " && greaterThan('IdS', '$Ids10') && lessThan('Id2', '$Id20')" + - " && lessThan('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')")); + new SQLComparisonExpressionUtils(rawBsonDocument, compareValues); - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + - " && lessThanOrEquals('NestedList1[1]', '#NestedList1_10')" + - " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + - " && greaterThanOrEquals('NestedList1[2][1].Id', '$Id10')" + - " && greaterThanOrEquals('IdS', '$Ids10') && lessThanOrEquals('Id2', '$Id20')" + - " && lessThanOrEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + - " && !isEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')")); + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("(exists('Id') || !exists('Title'))")); - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + - " && lessThanOrEquals('NestedList1[1]', '#NestedList1_10')" + - " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + - " && greaterThanOrEquals('NestedList1[2][1].Id', '$Id10')" + - " && greaterThanOrEquals('IdS', '$Ids10')" + - " && lessThanOrEquals('Id2', '$Id20')" + - " && lessThanOrEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + - " && (isEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + - " || between('NestedList1[0]', '$NestedList1_4850', '$Id2'))")); - - assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + - " && lessThanOrEquals('NestedList1[1]', '#NestedList1_10')" + - " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + - " && in('NestedMap1.NList1[0]', '$Id, $Id1, $Id20, #NMap1_NList1')" + - " && lessThanOrEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + - " && (isEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + - " || between('NestedList1[0]', '$NestedList1_4850', '$Id2'))")); + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("((!exists('Id') && !exists('Title1')) || exists('ISBN2')) || " + + "((!isEquals('Id', '#Title'))" + " && ((isEquals('InPublication', 'InPublication'))" + + " || ((isEquals('ISBN', ':ISBN')) && (isEquals('Title', '#Title')))))")); assertTrue(SQLComparisonExpressionUtils.evaluateConditionExpression( - "greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + - " && lessThanOrEquals('NestedList1[1]', '#NestedList1_10')" + - " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + - " && in('NestedMap1.NList1[0]', '$Id, $Id1, $Id20, #NMap1_NList1')" + - " && lessThanOrEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + - " && (isEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + - " || between('NestedList1[0]', '$NestedList1_4850', '$Id2'))" + - " && !in('NestedMap1.InPublication', '$Id, $Id1, $Id20, $Id21')")); + "((exists('NestedMap1.ISBN') && !exists('NestedMap1.NList1[3]')))")); + + assertTrue( + SQLComparisonExpressionUtils.evaluateConditionExpression("isEquals('NestedMap1.Id', '$Id')" + + " && (isEquals('NestedMap1.InPublication', 'InPublication'))")); + + assertTrue( + SQLComparisonExpressionUtils.evaluateConditionExpression("((isEquals('NestedMap1.Id', '$Id'))" + + " && ((isEquals('NestedMap1.InPublication[0]', 'InPublication'))" + + " || ((isEquals('ISBN[0]', ':ISBN')) && (isEquals('Title', '#Title'))))" + + " || (isEquals('NestedMap1.NList1[0]', '#NMap1_NList1')))")); + + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("((!exists('Id') && !exists('Title1')) || exists('ISBN2')) ||" + + " ((isEquals('NestedMap1.Id', '$Id'))" + + " && ((isEquals('NestedMap1.InPublication', 'InPublication'))" + + " || ((isEquals('ISBN', ':ISBN')) && (isEquals('Title', '#Title')))))")); + + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("lessThanOrEquals('NestedList1[0]', '$NestedList1_485')" + + " && greaterThan('NestedList1[1]', '#NestedList1_1')" + + " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + + " && lessThan('NestedList1[2][1].Id', '$Id1') && lessThan('IdS', '$Ids1')" + + " && greaterThan('Id2', '$Id2')" + + " && greaterThan('NestedMap1.NList1[2]', '#NestedMap1_NList1_3')")); + + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("lessThanOrEquals('NestedList1[0]', '$NestedList1_485')" + + " && greaterThanOrEquals('NestedList1[1]', '#NestedList1_1')" + + " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + + " && lessThanOrEquals('NestedList1[2][1].Id', '$Id1')" + + " && lessThanOrEquals('IdS', '$Ids1')" + " && greaterThanOrEquals('Id2', '$Id2')" + + " && greaterThanOrEquals('NestedMap1.NList1[2]', '#NestedMap1_NList1_3')")); + + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + + " && lessThan('NestedList1[1]', '#NestedList1_10')" + + " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + + " && greaterThan('NestedList1[2][1].Id', '$Id10')" + + " && greaterThan('IdS', '$Ids10') && lessThan('Id2', '$Id20')" + + " && lessThan('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')")); + + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + + " && lessThanOrEquals('NestedList1[1]', '#NestedList1_10')" + + " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + + " && greaterThanOrEquals('NestedList1[2][1].Id', '$Id10')" + + " && greaterThanOrEquals('IdS', '$Ids10') && lessThanOrEquals('Id2', '$Id20')" + + " && lessThanOrEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + + " && !isEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')")); + + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + + " && lessThanOrEquals('NestedList1[1]', '#NestedList1_10')" + + " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + + " && greaterThanOrEquals('NestedList1[2][1].Id', '$Id10')" + + " && greaterThanOrEquals('IdS', '$Ids10')" + " && lessThanOrEquals('Id2', '$Id20')" + + " && lessThanOrEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + + " && (isEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + + " || between('NestedList1[0]', '$NestedList1_4850', '$Id2'))")); + + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + + " && lessThanOrEquals('NestedList1[1]', '#NestedList1_10')" + + " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + + " && in('NestedMap1.NList1[0]', '$Id, $Id1, $Id20, #NMap1_NList1')" + + " && lessThanOrEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + + " && (isEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + + " || between('NestedList1[0]', '$NestedList1_4850', '$Id2'))")); + + assertTrue(SQLComparisonExpressionUtils + .evaluateConditionExpression("greaterThanOrEquals('NestedList1[0]', '$NestedList1_4850')" + + " && lessThanOrEquals('NestedList1[1]', '#NestedList1_10')" + + " && greaterThanOrEquals('NestedList1[2][0]', '#NestedList1_xyz0123')" + + " && in('NestedMap1.NList1[0]', '$Id, $Id1, $Id20, #NMap1_NList1')" + + " && lessThanOrEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + + " && (isEquals('NestedMap1.NList1[2]', '$NestedMap1_NList1_30')" + + " || between('NestedList1[0]', '$NestedList1_4850', '$Id2'))" + + " && !in('NestedMap1.InPublication', '$Id, $Id1, $Id20, $Id21')")); } @@ -291,30 +286,30 @@ public void testDocumentComparisonExpression() { RawBsonDocument rawBsonDocument = TestUtil.getRawBsonDocument(testFieldsMap1); BsonDocument expressionDocument = new BsonDocument(); List orList = new ArrayList<>(); - orList.add(new BsonDocument().append("Id", - new BsonDocument().append("$exists", new BsonBoolean(true)))); + orList.add( + new BsonDocument().append("Id", new BsonDocument().append("$exists", new BsonBoolean(true)))); orList.add(new BsonDocument().append("Title", - new BsonDocument().append("$exists", new BsonBoolean(false)))); + new BsonDocument().append("$exists", new BsonBoolean(false)))); expressionDocument.append("$or", new BsonArray(orList)); // Condition Expression: - //{ - // "$or": [ - // { - // "Id": { - // "$exists": true - // } - // }, - // { - // "Title": { - // "$exists": false - // } - // } - // ] - //} + // { + // "$or": [ + // { + // "Id": { + // "$exists": true + // } + // }, + // { + // "Title": { + // "$exists": false + // } + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); expressionDocument = new BsonDocument(); BsonArray orListArray = new BsonArray(); @@ -323,32 +318,32 @@ public void testDocumentComparisonExpression() { BsonDocument andDoc1 = new BsonDocument(); BsonArray andList1 = new BsonArray(); andList1.add(new BsonDocument().append("Id", - new BsonDocument().append("$exists", new BsonBoolean(false)))); + new BsonDocument().append("$exists", new BsonBoolean(false)))); andList1.add(new BsonDocument().append("Title1", - new BsonDocument().append("$exists", new BsonBoolean(false)))); + new BsonDocument().append("$exists", new BsonBoolean(false)))); andDoc1.append("$and", andList1); orList1.add(andDoc1); orList1.add(new BsonDocument().append("ISBN2", - new BsonDocument().append("$exists", new BsonBoolean(true)))); + new BsonDocument().append("$exists", new BsonBoolean(true)))); orDoc1.append("$or", orList1); orListArray.add(orDoc1); BsonArray andList2 = new BsonArray(); BsonDocument andDoc2 = new BsonDocument(); andList2.add(new BsonDocument().append("Id", - new BsonDocument().append("$ne", new BsonString("Book 101 Title")))); + new BsonDocument().append("$ne", new BsonString("Book 101 Title")))); BsonArray orList2 = new BsonArray(); BsonDocument orDoc2 = new BsonDocument(); orList2.add(new BsonDocument().append("InPublication", - new BsonDocument().append("$eq", new BsonBoolean(false)))); + new BsonDocument().append("$eq", new BsonBoolean(false)))); BsonArray andList3 = new BsonArray(); BsonDocument andDoc3 = new BsonDocument(); andList3.add(new BsonDocument().append("ISBN", - new BsonDocument().append("$eq", new BsonString("111-1111111111")))); + new BsonDocument().append("$eq", new BsonString("111-1111111111")))); andList3.add(new BsonDocument().append("Title", - new BsonDocument().append("$eq", new BsonString("Book 101 Title")))); + new BsonDocument().append("$eq", new BsonString("Book 101 Title")))); andDoc3.append("$and", andList3); orList2.add(andDoc3); @@ -361,140 +356,140 @@ public void testDocumentComparisonExpression() { expressionDocument.append("$or", orListArray); // Condition Expression: - //{ - // "$or": [ - // { - // "$or": [ - // { - // "$and": [ - // { - // "Id": { - // "$exists": false - // } - // }, - // { - // "Title1": { - // "$exists": false - // } - // } - // ] - // }, - // { - // "ISBN2": { - // "$exists": true - // } - // } - // ] - // }, - // { - // "$and": [ - // { - // "Id": { - // "$ne": "Book 101 Title" - // } - // }, - // { - // "$or": [ - // { - // "InPublication": { - // "$eq": false - // } - // }, - // { - // "$and": [ - // { - // "ISBN": { - // "$eq": "111-1111111111" - // } - // }, - // { - // "Title": { - // "$eq": "Book 101 Title" - // } - // } - // ] - // } - // ] - // } - // ] - // } - // ] - //} + // { + // "$or": [ + // { + // "$or": [ + // { + // "$and": [ + // { + // "Id": { + // "$exists": false + // } + // }, + // { + // "Title1": { + // "$exists": false + // } + // } + // ] + // }, + // { + // "ISBN2": { + // "$exists": true + // } + // } + // ] + // }, + // { + // "$and": [ + // { + // "Id": { + // "$ne": "Book 101 Title" + // } + // }, + // { + // "$or": [ + // { + // "InPublication": { + // "$eq": false + // } + // }, + // { + // "$and": [ + // { + // "ISBN": { + // "$eq": "111-1111111111" + // } + // }, + // { + // "Title": { + // "$eq": "Book 101 Title" + // } + // } + // ] + // } + // ] + // } + // ] + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); expressionDocument = new BsonDocument(); andList1 = new BsonArray(); andList1.add(new BsonDocument().append("NestedMap1.ISBN", - new BsonDocument().append("$exists", new BsonBoolean(true)))); + new BsonDocument().append("$exists", new BsonBoolean(true)))); andList1.add(new BsonDocument().append("NestedMap1.NList1[3]", - new BsonDocument().append("$exists", new BsonBoolean(false)))); + new BsonDocument().append("$exists", new BsonBoolean(false)))); expressionDocument.append("$and", andList1); // Condition Expression: - //{ - // "$and": [ - // { - // "NestedMap1.ISBN": { - // "$exists": true - // } - // }, - // { - // "NestedMap1.NList1[3]": { - // "$exists": false - // } - // } - // ] - //} + // { + // "$and": [ + // { + // "NestedMap1.ISBN": { + // "$exists": true + // } + // }, + // { + // "NestedMap1.NList1[3]": { + // "$exists": false + // } + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); expressionDocument = new BsonDocument(); andList1 = new BsonArray(); andList1.add(new BsonDocument().append("NestedMap1.Id", - new BsonDocument().append("$eq", new BsonDouble(101.01)))); + new BsonDocument().append("$eq", new BsonDouble(101.01)))); andList1.add(new BsonDocument().append("NestedMap1.InPublication", - new BsonDocument().append("$eq", new BsonBoolean(false)))); + new BsonDocument().append("$eq", new BsonBoolean(false)))); expressionDocument.append("$and", andList1); // Condition Expression: - //{ - // "$and": [ - // { - // "NestedMap1.Id": { - // "$eq": 101.01 - // } - // }, - // { - // "NestedMap1.InPublication": { - // "$eq": false - // } - // } - // ] - //} + // { + // "$and": [ + // { + // "NestedMap1.Id": { + // "$eq": 101.01 + // } + // }, + // { + // "NestedMap1.InPublication": { + // "$eq": false + // } + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); expressionDocument = new BsonDocument(); andList1 = new BsonArray(); andDoc1 = new BsonDocument(); andList1.add(new BsonDocument().append("NestedMap1.Id", - new BsonDocument().append("$eq", new BsonDouble(101.01)))); + new BsonDocument().append("$eq", new BsonDouble(101.01)))); orList1 = new BsonArray(); orDoc1 = new BsonDocument(); orList1.add(new BsonDocument().append("NestedMap1.InPublication[0]", - new BsonDocument().append("$eq", new BsonBoolean(false)))); + new BsonDocument().append("$eq", new BsonBoolean(false)))); andList2 = new BsonArray(); andDoc2 = new BsonDocument(); andList2.add(new BsonDocument().append("ISBN[0]", - new BsonDocument().append("$eq", new BsonString("111-1111111111")))); + new BsonDocument().append("$eq", new BsonString("111-1111111111")))); andList2.add(new BsonDocument().append("Title", - new BsonDocument().append("$eq", new BsonString("Book 101 Title")))); + new BsonDocument().append("$eq", new BsonString("Book 101 Title")))); andDoc2.append("$and", andList2); orList1.add(andDoc2); @@ -508,54 +503,54 @@ public void testDocumentComparisonExpression() { orList2.add(andDoc1); orList2.add(new BsonDocument().append("NestedMap1.NList1[0]", - new BsonDocument().append("$eq", new BsonString("NListVal01")))); + new BsonDocument().append("$eq", new BsonString("NListVal01")))); expressionDocument.append("$or", orList2); // Condition Expression: - //{ - // "$or": [ - // { - // "$and": [ - // { - // "NestedMap1.Id": { - // "$eq": 101.01 - // } - // }, - // { - // "$or": [ - // { - // "NestedMap1.InPublication[0]": { - // "$eq": false - // } - // }, - // { - // "$and": [ - // { - // "ISBN[0]": { - // "$eq": "111-1111111111" - // } - // }, - // { - // "Title": { - // "$eq": "Book 101 Title" - // } - // } - // ] - // } - // ] - // } - // ] - // }, - // { - // "NestedMap1.NList1[0]": { - // "$eq": "NListVal01" - // } - // } - // ] - //} + // { + // "$or": [ + // { + // "$and": [ + // { + // "NestedMap1.Id": { + // "$eq": 101.01 + // } + // }, + // { + // "$or": [ + // { + // "NestedMap1.InPublication[0]": { + // "$eq": false + // } + // }, + // { + // "$and": [ + // { + // "ISBN[0]": { + // "$eq": "111-1111111111" + // } + // }, + // { + // "Title": { + // "$eq": "Book 101 Title" + // } + // } + // ] + // } + // ] + // } + // ] + // }, + // { + // "NestedMap1.NList1[0]": { + // "$eq": "NListVal01" + // } + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); expressionDocument = new BsonDocument(); orListArray = new BsonArray(); @@ -563,9 +558,9 @@ public void testDocumentComparisonExpression() { andList1 = new BsonArray(); andDoc1 = new BsonDocument(); andList1.add(new BsonDocument().append("Id", - new BsonDocument().append("$exists", new BsonBoolean(false)))); + new BsonDocument().append("$exists", new BsonBoolean(false)))); andList1.add(new BsonDocument().append("Title1", - new BsonDocument().append("$exists", new BsonBoolean(false)))); + new BsonDocument().append("$exists", new BsonBoolean(false)))); andDoc1.append("$and", andList1); orList1 = new BsonArray(); @@ -573,7 +568,7 @@ public void testDocumentComparisonExpression() { orList1.add(andDoc1); orList1.add(new BsonDocument().append("ISBN2", - new BsonDocument().append("$exists", new BsonBoolean(true)))); + new BsonDocument().append("$exists", new BsonBoolean(true)))); orDoc1.append("$or", orList1); orListArray.add(orDoc1); @@ -581,20 +576,20 @@ public void testDocumentComparisonExpression() { andList2 = new BsonArray(); andDoc2 = new BsonDocument(); andList2.add(new BsonDocument().append("NestedMap1.Id", - new BsonDocument().append("$eq", new BsonDouble(101.01)))); + new BsonDocument().append("$eq", new BsonDouble(101.01)))); orList2 = new BsonArray(); orDoc2 = new BsonDocument(); orList2.add(new BsonDocument().append("NestedMap1.InPublication", - new BsonDocument().append("$eq", new BsonBoolean(false)))); + new BsonDocument().append("$eq", new BsonBoolean(false)))); andList3 = new BsonArray(); andDoc3 = new BsonDocument(); andList3.add(new BsonDocument().append("ISBN", - new BsonDocument().append("$eq", new BsonString("111-1111111111")))); + new BsonDocument().append("$eq", new BsonString("111-1111111111")))); andList3.add(new BsonDocument().append("Title", - new BsonDocument().append("$eq", new BsonString("Book 101 Title")))); + new BsonDocument().append("$eq", new BsonString("Book 101 Title")))); andDoc3.append("$and", andList3); orList2.add(andDoc3); @@ -607,352 +602,352 @@ public void testDocumentComparisonExpression() { expressionDocument.append("$or", orListArray); // Condition Expression: - //{ - // "$or": [ - // { - // "$or": [ - // { - // "$and": [ - // { - // "Id": { - // "$exists": false - // } - // }, - // { - // "Title1": { - // "$exists": false - // } - // } - // ] - // }, - // { - // "ISBN2": { - // "$exists": true - // } - // } - // ] - // }, - // { - // "$and": [ - // { - // "NestedMap1.Id": { - // "$eq": 101.01 - // } - // }, - // { - // "$or": [ - // { - // "NestedMap1.InPublication": { - // "$eq": false - // } - // }, - // { - // "$and": [ - // { - // "ISBN": { - // "$eq": "111-1111111111" - // } - // }, - // { - // "Title": { - // "$eq": "Book 101 Title" - // } - // } - // ] - // } - // ] - // } - // ] - // } - // ] - //} + // { + // "$or": [ + // { + // "$or": [ + // { + // "$and": [ + // { + // "Id": { + // "$exists": false + // } + // }, + // { + // "Title1": { + // "$exists": false + // } + // } + // ] + // }, + // { + // "ISBN2": { + // "$exists": true + // } + // } + // ] + // }, + // { + // "$and": [ + // { + // "NestedMap1.Id": { + // "$eq": 101.01 + // } + // }, + // { + // "$or": [ + // { + // "NestedMap1.InPublication": { + // "$eq": false + // } + // }, + // { + // "$and": [ + // { + // "ISBN": { + // "$eq": "111-1111111111" + // } + // }, + // { + // "Title": { + // "$eq": "Book 101 Title" + // } + // } + // ] + // } + // ] + // } + // ] + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); expressionDocument = new BsonDocument(); andList1 = new BsonArray(); andList1.add(new BsonDocument().append("NestedList1[0]", - new BsonDocument().append("$lte", new BsonDouble(-485.33)))); + new BsonDocument().append("$lte", new BsonDouble(-485.33)))); andList1.add(new BsonDocument().append("NestedList1[1]", - new BsonDocument().append("$gt", new BsonString("1234abcc")))); + new BsonDocument().append("$gt", new BsonString("1234abcc")))); andList1.add(new BsonDocument().append("NestedList1[2][0]", - new BsonDocument().append("$gte", new BsonString("xyz0123")))); + new BsonDocument().append("$gte", new BsonString("xyz0123")))); andList1.add(new BsonDocument().append("NestedList1[2][1].Id", - new BsonDocument().append("$lt", new BsonInt32(120)))); - andList1.add(new BsonDocument().append("IdS", - new BsonDocument().append("$lt", new BsonString("12")))); - andList1.add(new BsonDocument().append("Id2", - new BsonDocument().append("$gt", new BsonInt32(12)))); + new BsonDocument().append("$lt", new BsonInt32(120)))); + andList1.add( + new BsonDocument().append("IdS", new BsonDocument().append("$lt", new BsonString("12")))); + andList1 + .add(new BsonDocument().append("Id2", new BsonDocument().append("$gt", new BsonInt32(12)))); andList1.add(new BsonDocument().append("NestedMap1.NList1[2]", - new BsonDocument().append("$gt", new BsonBinary(Bytes.toBytes("Whit"))))); + new BsonDocument().append("$gt", new BsonBinary(Bytes.toBytes("Whit"))))); expressionDocument.append("$and", andList1); // Condition Expression: - //{ - // "$and": [ - // { - // "NestedList1[0]": { - // "$lte": -485.33 - // } - // }, - // { - // "NestedList1[1]": { - // "$gt": "1234abcc" - // } - // }, - // { - // "NestedList1[2][0]": { - // "$gte": "xyz0123" - // } - // }, - // { - // "NestedList1[2][1].Id": { - // "$lt": 120 - // } - // }, - // { - // "IdS": { - // "$lt": "12" - // } - // }, - // { - // "Id2": { - // "$gt": 12 - // } - // }, - // { - // "NestedMap1.NList1[2]": { - // "$gt": { - // "$binary": { - // "base64": "V2hpdA==", - // "subType": "00" - // } - // } - // } - // } - // ] - //} + // { + // "$and": [ + // { + // "NestedList1[0]": { + // "$lte": -485.33 + // } + // }, + // { + // "NestedList1[1]": { + // "$gt": "1234abcc" + // } + // }, + // { + // "NestedList1[2][0]": { + // "$gte": "xyz0123" + // } + // }, + // { + // "NestedList1[2][1].Id": { + // "$lt": 120 + // } + // }, + // { + // "IdS": { + // "$lt": "12" + // } + // }, + // { + // "Id2": { + // "$gt": 12 + // } + // }, + // { + // "NestedMap1.NList1[2]": { + // "$gt": { + // "$binary": { + // "base64": "V2hpdA==", + // "subType": "00" + // } + // } + // } + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); expressionDocument = new BsonDocument(); andList1 = new BsonArray(); andList1.add(new BsonDocument().append("NestedList1[0]", - new BsonDocument().append("$lte", new BsonDouble(-485.33)))); + new BsonDocument().append("$lte", new BsonDouble(-485.33)))); andList1.add(new BsonDocument().append("NestedList1[1]", - new BsonDocument().append("$gte", new BsonString("1234abcc")))); + new BsonDocument().append("$gte", new BsonString("1234abcc")))); andList1.add(new BsonDocument().append("NestedList1[2][0]", - new BsonDocument().append("$gte", new BsonString("xyz0123")))); + new BsonDocument().append("$gte", new BsonString("xyz0123")))); andList1.add(new BsonDocument().append("NestedList1[2][1].Id", - new BsonDocument().append("$lte", new BsonInt32(120)))); - andList1.add(new BsonDocument().append("IdS", - new BsonDocument().append("$lte", new BsonString("12")))); - andList1.add(new BsonDocument().append("Id2", - new BsonDocument().append("$gte", new BsonInt32(12)))); + new BsonDocument().append("$lte", new BsonInt32(120)))); + andList1.add( + new BsonDocument().append("IdS", new BsonDocument().append("$lte", new BsonString("12")))); + andList1 + .add(new BsonDocument().append("Id2", new BsonDocument().append("$gte", new BsonInt32(12)))); andList1.add(new BsonDocument().append("NestedMap1.NList1[2]", - new BsonDocument().append("$gte", new BsonBinary(Bytes.toBytes("Whit"))))); + new BsonDocument().append("$gte", new BsonBinary(Bytes.toBytes("Whit"))))); expressionDocument.append("$and", andList1); // Condition Expression: - //{ - // "$and": [ - // { - // "NestedList1[0]": { - // "$lte": -485.33 - // } - // }, - // { - // "NestedList1[1]": { - // "$gte": "1234abcc" - // } - // }, - // { - // "NestedList1[2][0]": { - // "$gte": "xyz0123" - // } - // }, - // { - // "NestedList1[2][1].Id": { - // "$lte": 120 - // } - // }, - // { - // "IdS": { - // "$lte": "12" - // } - // }, - // { - // "Id2": { - // "$gte": 12 - // } - // }, - // { - // "NestedMap1.NList1[2]": { - // "$gte": { - // "$binary": { - // "base64": "V2hpdA==", - // "subType": "00" - // } - // } - // } - // } - // ] - //} + // { + // "$and": [ + // { + // "NestedList1[0]": { + // "$lte": -485.33 + // } + // }, + // { + // "NestedList1[1]": { + // "$gte": "1234abcc" + // } + // }, + // { + // "NestedList1[2][0]": { + // "$gte": "xyz0123" + // } + // }, + // { + // "NestedList1[2][1].Id": { + // "$lte": 120 + // } + // }, + // { + // "IdS": { + // "$lte": "12" + // } + // }, + // { + // "Id2": { + // "$gte": 12 + // } + // }, + // { + // "NestedMap1.NList1[2]": { + // "$gte": { + // "$binary": { + // "base64": "V2hpdA==", + // "subType": "00" + // } + // } + // } + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); expressionDocument = new BsonDocument(); andList1 = new BsonArray(); andList1.add(new BsonDocument().append("NestedList1[0]", - new BsonDocument().append("$gte", new BsonDouble(-485.35)))); + new BsonDocument().append("$gte", new BsonDouble(-485.35)))); andList1.add(new BsonDocument().append("NestedList1[1]", - new BsonDocument().append("$lt", new BsonString("1234abce")))); + new BsonDocument().append("$lt", new BsonString("1234abce")))); andList1.add(new BsonDocument().append("NestedList1[2][0]", - new BsonDocument().append("$gte", new BsonString("xyz0123")))); + new BsonDocument().append("$gte", new BsonString("xyz0123")))); andList1.add(new BsonDocument().append("NestedList1[2][1].Id", - new BsonDocument().append("$gt", new BsonInt64(101)))); + new BsonDocument().append("$gt", new BsonInt64(101)))); + andList1.add( + new BsonDocument().append("IdS", new BsonDocument().append("$gt", new BsonString("100")))); andList1.add( - new BsonDocument().append("IdS", new BsonDocument().append("$gt", new BsonString("100")))); - andList1.add(new BsonDocument().append("Id2", - new BsonDocument().append("$lt", new BsonDouble(101.011)))); + new BsonDocument().append("Id2", new BsonDocument().append("$lt", new BsonDouble(101.011)))); andList1.add(new BsonDocument().append("NestedMap1.NList1[2]", - new BsonDocument().append("$lt", new BsonBinary(Bytes.toBytes("Whitee"))))); + new BsonDocument().append("$lt", new BsonBinary(Bytes.toBytes("Whitee"))))); expressionDocument.append("$and", andList1); // Condition Expression: - //{ - // "$and": [ - // { - // "NestedList1[0]": { - // "$gte": -485.35 - // } - // }, - // { - // "NestedList1[1]": { - // "$lt": "1234abce" - // } - // }, - // { - // "NestedList1[2][0]": { - // "$gte": "xyz0123" - // } - // }, - // { - // "NestedList1[2][1].Id": { - // "$gt": 101 - // } - // }, - // { - // "IdS": { - // "$gt": "100" - // } - // }, - // { - // "Id2": { - // "$lt": 101.011 - // } - // }, - // { - // "NestedMap1.NList1[2]": { - // "$lt": { - // "$binary": { - // "base64": "V2hpdGVl", - // "subType": "00" - // } - // } - // } - // } - // ] - //} + // { + // "$and": [ + // { + // "NestedList1[0]": { + // "$gte": -485.35 + // } + // }, + // { + // "NestedList1[1]": { + // "$lt": "1234abce" + // } + // }, + // { + // "NestedList1[2][0]": { + // "$gte": "xyz0123" + // } + // }, + // { + // "NestedList1[2][1].Id": { + // "$gt": 101 + // } + // }, + // { + // "IdS": { + // "$gt": "100" + // } + // }, + // { + // "Id2": { + // "$lt": 101.011 + // } + // }, + // { + // "NestedMap1.NList1[2]": { + // "$lt": { + // "$binary": { + // "base64": "V2hpdGVl", + // "subType": "00" + // } + // } + // } + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); expressionDocument = new BsonDocument(); andList1 = new BsonArray(); andList1.add(new BsonDocument().append("NestedList1[0]", - new BsonDocument().append("$gte", new BsonDouble(-485.35)))); + new BsonDocument().append("$gte", new BsonDouble(-485.35)))); andList1.add(new BsonDocument().append("NestedList1[1]", - new BsonDocument().append("$lte", new BsonString("1234abce")))); + new BsonDocument().append("$lte", new BsonString("1234abce")))); andList1.add(new BsonDocument().append("NestedList1[2][0]", - new BsonDocument().append("$gte", new BsonString("xyz0123")))); + new BsonDocument().append("$gte", new BsonString("xyz0123")))); andList1.add(new BsonDocument().append("NestedList1[2][1].Id", - new BsonDocument().append("$gte", new BsonInt64(101)))); - andList1.add(new BsonDocument().append("IdS", - new BsonDocument().append("$gte", new BsonString("100")))); - andList1.add(new BsonDocument().append("Id2", - new BsonDocument().append("$lte", new BsonDouble(101.011)))); + new BsonDocument().append("$gte", new BsonInt64(101)))); + andList1.add( + new BsonDocument().append("IdS", new BsonDocument().append("$gte", new BsonString("100")))); + andList1.add( + new BsonDocument().append("Id2", new BsonDocument().append("$lte", new BsonDouble(101.011)))); andList1.add(new BsonDocument().append("NestedMap1.NList1[2]", - new BsonDocument().append("$lte", new BsonBinary(Bytes.toBytes("Whitee"))))); + new BsonDocument().append("$lte", new BsonBinary(Bytes.toBytes("Whitee"))))); andList1.add(new BsonDocument().append("NestedMap1.NList1[2]", - new BsonDocument().append("$ne", new BsonBinary(Bytes.toBytes("Whitee"))))); + new BsonDocument().append("$ne", new BsonBinary(Bytes.toBytes("Whitee"))))); expressionDocument.append("$and", andList1); // Condition Expression: - //{ - // "$and": [ - // { - // "NestedList1[0]": { - // "$gte": -485.35 - // } - // }, - // { - // "NestedList1[1]": { - // "$lte": "1234abce" - // } - // }, - // { - // "NestedList1[2][0]": { - // "$gte": "xyz0123" - // } - // }, - // { - // "NestedList1[2][1].Id": { - // "$gte": 101 - // } - // }, - // { - // "IdS": { - // "$gte": "100" - // } - // }, - // { - // "Id2": { - // "$lte": 101.011 - // } - // }, - // { - // "NestedMap1.NList1[2]": { - // "$lte": { - // "$binary": { - // "base64": "V2hpdGVl", - // "subType": "00" - // } - // } - // } - // }, - // { - // "NestedMap1.NList1[2]": { - // "$ne": { - // "$binary": { - // "base64": "V2hpdGVl", - // "subType": "00" - // } - // } - // } - // } - // ] - //} + // { + // "$and": [ + // { + // "NestedList1[0]": { + // "$gte": -485.35 + // } + // }, + // { + // "NestedList1[1]": { + // "$lte": "1234abce" + // } + // }, + // { + // "NestedList1[2][0]": { + // "$gte": "xyz0123" + // } + // }, + // { + // "NestedList1[2][1].Id": { + // "$gte": 101 + // } + // }, + // { + // "IdS": { + // "$gte": "100" + // } + // }, + // { + // "Id2": { + // "$lte": 101.011 + // } + // }, + // { + // "NestedMap1.NList1[2]": { + // "$lte": { + // "$binary": { + // "base64": "V2hpdGVl", + // "subType": "00" + // } + // } + // } + // }, + // { + // "NestedMap1.NList1[2]": { + // "$ne": { + // "$binary": { + // "base64": "V2hpdGVl", + // "subType": "00" + // } + // } + // } + // } + // ] + // } assertTrue(DocumentComparisonExpressionUtils.evaluateConditionExpression(rawBsonDocument, - expressionDocument)); + expressionDocument)); } @@ -976,9 +971,9 @@ private static TestFieldsMap getCompareValMap1() { map2.put("#NestedList1_1", new TestFieldValue().withS("1234abcc")); map2.put("#NestedList1_10", new TestFieldValue().withS("1234abce")); map2.put("#NestedMap1_NList1_3", - new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Whit")))); + new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Whit")))); map2.put("$NestedMap1_NList1_30", - new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Whitee")))); + new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Whitee")))); compareValMap.setMap(map2); return compareValMap; } @@ -998,10 +993,9 @@ private static TestFieldsMap getPhoenixFieldMap1() { nestedMap1.put("ISBN", new TestFieldValue().withS("111-1111111111")); nestedMap1.put("InPublication", new TestFieldValue().withBOOL(false)); nestedMap1.put("NList1", - new TestFieldValue().withL( - new TestFieldValue().withS("NListVal01"), - new TestFieldValue().withN(-0023.4), - new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("White"))))); + new TestFieldValue().withL(new TestFieldValue().withS("NListVal01"), + new TestFieldValue().withN(-0023.4), + new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("White"))))); map.put("NestedMap1", new TestFieldValue().withM(nestedMap1)); Map nestedList1Map1 = new HashMap<>(); nestedList1Map1.put("Id", new TestFieldValue().withN(101.01)); @@ -1009,10 +1003,9 @@ private static TestFieldsMap getPhoenixFieldMap1() { nestedList1Map1.put("ISBN", new TestFieldValue().withS("111-1111111111")); nestedList1Map1.put("InPublication", new TestFieldValue().withBOOL(false)); map.put("NestedList1", - new TestFieldValue().withL(new TestFieldValue().withN(-485.34), - new TestFieldValue().withS("1234abcd"), - new TestFieldValue().withL(new TestFieldValue().withS("xyz0123"), - new TestFieldValue().withM(nestedList1Map1)))); + new TestFieldValue().withL(new TestFieldValue().withN(-485.34), + new TestFieldValue().withS("1234abcd"), new TestFieldValue().withL( + new TestFieldValue().withS("xyz0123"), new TestFieldValue().withM(nestedList1Map1)))); testFieldsMap1.setMap(map); return testFieldsMap1; } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/bson/UpdateExpressionUtilsTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/bson/UpdateExpressionUtilsTest.java index 4f09946b408..ba3893e581e 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/bson/UpdateExpressionUtilsTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/bson/UpdateExpressionUtilsTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.util.bson; import java.util.HashMap; @@ -23,6 +22,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.expression.util.bson.UpdateExpressionUtils; import org.bson.BsonBinaryReader; import org.bson.BsonDocument; import org.bson.BsonNull; @@ -36,9 +37,6 @@ import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.phoenix.expression.util.bson.UpdateExpressionUtils; - /** * Tests for BSON Update Expression Utility. */ @@ -49,329 +47,235 @@ public void testUpdateExpression() { TestFieldsMap map = getTestFieldsMap1(); TestFieldsMap comparisonMap = getComparisonValuesMap(); - Assert.assertEquals(TestUtil.getPhoenixFieldMap( - TestUtil.getRawBsonBytes(map)), map); - Assert.assertEquals(TestUtil.getPhoenixFieldMap( - map.toString()), map); + Assert.assertEquals(TestUtil.getPhoenixFieldMap(TestUtil.getRawBsonBytes(map)), map); + Assert.assertEquals(TestUtil.getPhoenixFieldMap(map.toString()), map); BsonDocument bsonDocument = TestUtil.getBsonDocument(map); assertDeserializedBsonDoc(bsonDocument); -/* BsonDocument expressionDoc = getBsonDocument("SET Title = :newTitle, Id = :newId , " + - "NestedMap1.ColorList = :ColorList , " - + "Id1 = :Id1 , NestedMap1.NList1[0] = :NList1_0 , " - + "NestedList1[2][1].ISBN = :NestedList1_ISBN , " - + "NestedMap1.NestedMap2.NewID = :newId , " - + "NestedMap1.NestedMap2.NList[2] = :NList003 , " - + "NestedMap1.NestedMap2.NList[0] = :NList001 ADD AddedId :attr5_0 , " - + "NestedMap1.AddedId :attr5_0, NestedMap1.NestedMap2.Id :newIdNeg , " - + "NestedList12[2][0] :NestedList12_00 , NestedList12[2][1] :NestedList12_01 ," - + " Pictures :AddedPics " - + "REMOVE IdS, Id2, NestedMap1.Title , " - + "NestedMap1.NestedMap2.InPublication , NestedList1[2][1].TitleSet1 " - + "DELETE PictureBinarySet :PictureBinarySet01 , NestedMap1.NSet1 :NSet01 ," - + " NestedList1[2][1].TitleSet2 :NestedList1TitleSet01", TestUtil.getRawBsonDocument(comparisonMap)); - */ - - String updateExpression = "{\n" + - " \"$SET\": {\n" + - " \"Title\": \"Cycle_1234_new\",\n" + - " \"Id\": \"12345\",\n" + - " \"NestedMap1.ColorList\": [\n" + - " \"Black\",\n" + - " {\n" + - " \"$binary\": {\n" + - " \"base64\": \"V2hpdGU=\",\n" + - " \"subType\": \"00\"\n" + - " }\n" + - " },\n" + - " \"Silver\"\n" + - " ],\n" + - " \"Id1\": {\n" + - " \"$binary\": {\n" + - " \"base64\": \"SURfMTAx\",\n" + - " \"subType\": \"00\"\n" + - " }\n" + - " },\n" + - " \"NestedMap1.NList1[0]\": {\n" + - " \"$set\": [\n" + - " \"Updated_set_01\",\n" + - " \"Updated_set_02\"\n" + - " ]\n" + - " },\n" + - " \"NestedList1[2][1].ISBN\": \"111-1111111122\",\n" + - " \"NestedMap1.NestedMap2.NewID\": \"12345\",\n" + - " \"NestedMap1.NestedMap2.NList[2]\": null,\n" + - " \"NestedMap1.NestedMap2.NList[0]\": 12.22\n" + - " },\n" + - " \"$UNSET\": {\n" + - " \"IdS\": null,\n" + - " \"Id2\": null,\n" + - " \"NestedMap1.Title\": null,\n" + - " \"NestedMap1.NestedMap2.InPublication\": null,\n" + - " \"NestedList1[2][1].TitleSet1\": null,\n" + - " \"NestedList1[2][10]\": null,\n" + - " \"NestedMap1.NList1[2]\": null\n" + - " },\n" + - " \"$ADD\": {\n" + - " \"AddedId\": 10,\n" + - " \"NestedMap1.AddedId\": 10,\n" + - " \"NestedMap1.NestedMap2.Id\": -12345,\n" + - " \"NestedList12[2][0]\": {\n" + - " \"$set\": [\n" + - " \"xyz01234\",\n" + - " \"abc01234\"\n" + - " ]\n" + - " },\n" + - " \"NestedList12[2][1]\": {\n" + - " \"$set\": [\n" + - " {\n" + - " \"$binary\": {\n" + - " \"base64\": \"dmFsMDM=\",\n" + - " \"subType\": \"00\"\n" + - " }\n" + - " },\n" + - " {\n" + - " \"$binary\": {\n" + - " \"base64\": \"dmFsMDQ=\",\n" + - " \"subType\": \"00\"\n" + - " }\n" + - " }\n" + - " ]\n" + - " },\n" + - " \"NestedList12[2][2]\": {\n" + - " \"$set\": [\n" + - " -234.56,\n" + - " 123,\n" + - " 93756.93475960549,\n" + - " 293755723028458.6\n" + - " ]\n" + - " },\n" + - " \"Pictures\": {\n" + - " \"$set\": [\n" + - " \"xyz5@_rear.jpg\",\n" + - " \"1235@_rear.jpg\"\n" + - " ]\n" + - " }\n" + - " },\n" + - " \"$DELETE_FROM_SET\": {\n" + - " \"PictureBinarySet\": {\n" + - " \"$set\": [\n" + - " {\n" + - " \"$binary\": {\n" + - " \"base64\": \"MTIzX3JlYXIuanBn\",\n" + - " \"subType\": \"00\"\n" + - " }\n" + - " },\n" + - " {\n" + - " \"$binary\": {\n" + - " \"base64\": \"eHl6X2Zyb250LmpwZ19ubw==\",\n" + - " \"subType\": \"00\"\n" + - " }\n" + - " },\n" + - " {\n" + - " \"$binary\": {\n" + - " \"base64\": \"eHl6X2Zyb250LmpwZw==\",\n" + - " \"subType\": \"00\"\n" + - " }\n" + - " }\n" + - " ]\n" + - " },\n" + - " \"NestedMap1.NSet1\": {\n" + - " \"$set\": [\n" + - " -6830.5555,\n" + - " -48695\n" + - " ]\n" + - " },\n" + - " \"NestedList1[2][1].TitleSet2\": {\n" + - " \"$set\": [\n" + - " \"Book 1011 Title\",\n" + - " \"Book 1010 Title\"\n" + - " ]\n" + - " }\n" + - " }\n" + - "}"; - - //{ - // "$SET": { - // "Title": "Cycle_1234_new", - // "Id": "12345", - // "NestedMap1.ColorList": [ - // "Black", - // { - // "$binary": { - // "base64": "V2hpdGU=", - // "subType": "00" - // } - // }, - // "Silver" - // ], - // "Id1": { - // "$binary": { - // "base64": "SURfMTAx", - // "subType": "00" - // } - // }, - // "NestedMap1.NList1[0]": { - // "$set": [ - // "Updated_set_01", - // "Updated_set_02" - // ] - // }, - // "NestedList1[2][1].ISBN": "111-1111111122", - // "NestedMap1.NestedMap2.NewID": "12345", - // "NestedMap1.NestedMap2.NList[2]": null, - // "NestedMap1.NestedMap2.NList[0]": 12.22 - // }, - // "$UNSET": { - // "IdS": null, - // "Id2": null, - // "NestedMap1.Title": null, - // "NestedMap1.NestedMap2.InPublication": null, - // "NestedList1[2][1].TitleSet1": null, - // "NestedList1[2][10]": null, - // "NestedMap1.NList1[2]": null - // }, - // "$ADD": { - // "AddedId": 10, - // "NestedMap1.AddedId": 10, - // "NestedMap1.NestedMap2.Id": -12345, - // "NestedList12[2][0]": { - // "$set": [ - // "xyz01234", - // "abc01234" - // ] - // }, - // "NestedList12[2][1]": { - // "$set": [ - // { - // "$binary": { - // "base64": "dmFsMDM=", - // "subType": "00" - // } - // }, - // { - // "$binary": { - // "base64": "dmFsMDQ=", - // "subType": "00" - // } - // } - // ] - // }, - // "NestedList12[2][2]": { - // "$set": [ - // -234.56, - // 123, - // 93756.93475960549, - // 293755723028458.6 - // ] - // }, - // "Pictures": { - // "$set": [ - // "xyz5@_rear.jpg", - // "1235@_rear.jpg" - // ] - // } - // }, - // "$DELETE_FROM_SET": { - // "PictureBinarySet": { - // "$set": [ - // { - // "$binary": { - // "base64": "MTIzX3JlYXIuanBn", - // "subType": "00" - // } - // }, - // { - // "$binary": { - // "base64": "eHl6X2Zyb250LmpwZ19ubw==", - // "subType": "00" - // } - // }, - // { - // "$binary": { - // "base64": "eHl6X2Zyb250LmpwZw==", - // "subType": "00" - // } - // } - // ] - // }, - // "NestedMap1.NSet1": { - // "$set": [ - // -6830.5555, - // -48695 - // ] - // }, - // "NestedList1[2][1].TitleSet2": { - // "$set": [ - // "Book 1011 Title", - // "Book 1010 Title" - // ] - // } - // } - //} + /* + * BsonDocument expressionDoc = getBsonDocument("SET Title = :newTitle, Id = :newId , " + + * "NestedMap1.ColorList = :ColorList , " + "Id1 = :Id1 , NestedMap1.NList1[0] = :NList1_0 , " + + * "NestedList1[2][1].ISBN = :NestedList1_ISBN , " + "NestedMap1.NestedMap2.NewID = :newId , " + + * "NestedMap1.NestedMap2.NList[2] = :NList003 , " + + * "NestedMap1.NestedMap2.NList[0] = :NList001 ADD AddedId :attr5_0 , " + + * "NestedMap1.AddedId :attr5_0, NestedMap1.NestedMap2.Id :newIdNeg , " + + * "NestedList12[2][0] :NestedList12_00 , NestedList12[2][1] :NestedList12_01 ," + + * " Pictures :AddedPics " + "REMOVE IdS, Id2, NestedMap1.Title , " + + * "NestedMap1.NestedMap2.InPublication , NestedList1[2][1].TitleSet1 " + + * "DELETE PictureBinarySet :PictureBinarySet01 , NestedMap1.NSet1 :NSet01 ," + + * " NestedList1[2][1].TitleSet2 :NestedList1TitleSet01", + * TestUtil.getRawBsonDocument(comparisonMap)); + */ + + String updateExpression = "{\n" + " \"$SET\": {\n" + " \"Title\": \"Cycle_1234_new\",\n" + + " \"Id\": \"12345\",\n" + " \"NestedMap1.ColorList\": [\n" + " \"Black\",\n" + + " {\n" + " \"$binary\": {\n" + " \"base64\": \"V2hpdGU=\",\n" + + " \"subType\": \"00\"\n" + " }\n" + " },\n" + " \"Silver\"\n" + + " ],\n" + " \"Id1\": {\n" + " \"$binary\": {\n" + + " \"base64\": \"SURfMTAx\",\n" + " \"subType\": \"00\"\n" + " }\n" + + " },\n" + " \"NestedMap1.NList1[0]\": {\n" + " \"$set\": [\n" + + " \"Updated_set_01\",\n" + " \"Updated_set_02\"\n" + " ]\n" + " },\n" + + " \"NestedList1[2][1].ISBN\": \"111-1111111122\",\n" + + " \"NestedMap1.NestedMap2.NewID\": \"12345\",\n" + + " \"NestedMap1.NestedMap2.NList[2]\": null,\n" + + " \"NestedMap1.NestedMap2.NList[0]\": 12.22\n" + " },\n" + " \"$UNSET\": {\n" + + " \"IdS\": null,\n" + " \"Id2\": null,\n" + " \"NestedMap1.Title\": null,\n" + + " \"NestedMap1.NestedMap2.InPublication\": null,\n" + + " \"NestedList1[2][1].TitleSet1\": null,\n" + " \"NestedList1[2][10]\": null,\n" + + " \"NestedMap1.NList1[2]\": null\n" + " },\n" + " \"$ADD\": {\n" + + " \"AddedId\": 10,\n" + " \"NestedMap1.AddedId\": 10,\n" + + " \"NestedMap1.NestedMap2.Id\": -12345,\n" + " \"NestedList12[2][0]\": {\n" + + " \"$set\": [\n" + " \"xyz01234\",\n" + " \"abc01234\"\n" + " ]\n" + + " },\n" + " \"NestedList12[2][1]\": {\n" + " \"$set\": [\n" + " {\n" + + " \"$binary\": {\n" + " \"base64\": \"dmFsMDM=\",\n" + + " \"subType\": \"00\"\n" + " }\n" + " },\n" + " {\n" + + " \"$binary\": {\n" + " \"base64\": \"dmFsMDQ=\",\n" + + " \"subType\": \"00\"\n" + " }\n" + " }\n" + " ]\n" + + " },\n" + " \"NestedList12[2][2]\": {\n" + " \"$set\": [\n" + + " -234.56,\n" + " 123,\n" + " 93756.93475960549,\n" + + " 293755723028458.6\n" + " ]\n" + " },\n" + " \"Pictures\": {\n" + + " \"$set\": [\n" + " \"xyz5@_rear.jpg\",\n" + " \"1235@_rear.jpg\"\n" + + " ]\n" + " }\n" + " },\n" + " \"$DELETE_FROM_SET\": {\n" + + " \"PictureBinarySet\": {\n" + " \"$set\": [\n" + " {\n" + + " \"$binary\": {\n" + " \"base64\": \"MTIzX3JlYXIuanBn\",\n" + + " \"subType\": \"00\"\n" + " }\n" + " },\n" + " {\n" + + " \"$binary\": {\n" + " \"base64\": \"eHl6X2Zyb250LmpwZ19ubw==\",\n" + + " \"subType\": \"00\"\n" + " }\n" + " },\n" + " {\n" + + " \"$binary\": {\n" + " \"base64\": \"eHl6X2Zyb250LmpwZw==\",\n" + + " \"subType\": \"00\"\n" + " }\n" + " }\n" + " ]\n" + + " },\n" + " \"NestedMap1.NSet1\": {\n" + " \"$set\": [\n" + + " -6830.5555,\n" + " -48695\n" + " ]\n" + " },\n" + + " \"NestedList1[2][1].TitleSet2\": {\n" + " \"$set\": [\n" + + " \"Book 1011 Title\",\n" + " \"Book 1010 Title\"\n" + " ]\n" + " }\n" + + " }\n" + "}"; + + // { + // "$SET": { + // "Title": "Cycle_1234_new", + // "Id": "12345", + // "NestedMap1.ColorList": [ + // "Black", + // { + // "$binary": { + // "base64": "V2hpdGU=", + // "subType": "00" + // } + // }, + // "Silver" + // ], + // "Id1": { + // "$binary": { + // "base64": "SURfMTAx", + // "subType": "00" + // } + // }, + // "NestedMap1.NList1[0]": { + // "$set": [ + // "Updated_set_01", + // "Updated_set_02" + // ] + // }, + // "NestedList1[2][1].ISBN": "111-1111111122", + // "NestedMap1.NestedMap2.NewID": "12345", + // "NestedMap1.NestedMap2.NList[2]": null, + // "NestedMap1.NestedMap2.NList[0]": 12.22 + // }, + // "$UNSET": { + // "IdS": null, + // "Id2": null, + // "NestedMap1.Title": null, + // "NestedMap1.NestedMap2.InPublication": null, + // "NestedList1[2][1].TitleSet1": null, + // "NestedList1[2][10]": null, + // "NestedMap1.NList1[2]": null + // }, + // "$ADD": { + // "AddedId": 10, + // "NestedMap1.AddedId": 10, + // "NestedMap1.NestedMap2.Id": -12345, + // "NestedList12[2][0]": { + // "$set": [ + // "xyz01234", + // "abc01234" + // ] + // }, + // "NestedList12[2][1]": { + // "$set": [ + // { + // "$binary": { + // "base64": "dmFsMDM=", + // "subType": "00" + // } + // }, + // { + // "$binary": { + // "base64": "dmFsMDQ=", + // "subType": "00" + // } + // } + // ] + // }, + // "NestedList12[2][2]": { + // "$set": [ + // -234.56, + // 123, + // 93756.93475960549, + // 293755723028458.6 + // ] + // }, + // "Pictures": { + // "$set": [ + // "xyz5@_rear.jpg", + // "1235@_rear.jpg" + // ] + // } + // }, + // "$DELETE_FROM_SET": { + // "PictureBinarySet": { + // "$set": [ + // { + // "$binary": { + // "base64": "MTIzX3JlYXIuanBn", + // "subType": "00" + // } + // }, + // { + // "$binary": { + // "base64": "eHl6X2Zyb250LmpwZ19ubw==", + // "subType": "00" + // } + // }, + // { + // "$binary": { + // "base64": "eHl6X2Zyb250LmpwZw==", + // "subType": "00" + // } + // } + // ] + // }, + // "NestedMap1.NSet1": { + // "$set": [ + // -6830.5555, + // -48695 + // ] + // }, + // "NestedList1[2][1].TitleSet2": { + // "$set": [ + // "Book 1011 Title", + // "Book 1010 Title" + // ] + // } + // } + // } RawBsonDocument expressionDoc = RawBsonDocument.parse(updateExpression); UpdateExpressionUtils.updateExpression(expressionDoc, bsonDocument); - Assert.assertEquals("Update expression could not update the map", - getTestFieldsMap2(), - TestUtil.getPhoenixFieldMap(bsonDocument)); - - //{ - // "$SET": { - // "NestedList1[0]": "NestedList1[0] + 12.22", - // "NestedList1[3]": null, - // "NestedList1[4]": true, - // "attr_5[0]": "attr_5[0] - 10", - // "Id1": "12345" - // }, - // "$DELETE_FROM_SET": { - // "NestedList12[2][2]": { - // "$set": [ - // -234.56, - // 123, - // 93756.93475960549, - // 293755723028458.6 - // ] - // } - // } - //} - - updateExpression = "{\n" + - " \"$SET\": {\n" + - " \"NestedList1[0]\": \"NestedList1[0] + 12.22\",\n" + - " \"NestedList1[3]\": null,\n" + - " \"NestedList1[4]\": true,\n" + - " \"attr_5[0]\": \"attr_5[0] - 10\",\n" + - " \"Id1\": \"12345\"\n" + - " },\n" + - " \"$DELETE_FROM_SET\": {\n" + - " \"NestedList12[2][2]\": {\n" + - " \"$set\": [\n" + - " -234.56,\n" + - " 123,\n" + - " 93756.93475960549,\n" + - " 293755723028458.6\n" + - " ]\n" + - " }\n" + - " }\n" + - "}"; - -/* UpdateExpressionUtils.updateExpression( - getBsonDocument("SET NestedList1[0] = NestedList1[0] + :NList001 , " - + "NestedList1[3] = :NList003 , NestedList1[4] = :NList004, " - + "attr_5[0] = attr_5[0] - :attr5_0, Id1 = :newId", - TestUtil.getRawBsonDocument(comparisonMap)), bsonDocument); - */ + Assert.assertEquals("Update expression could not update the map", getTestFieldsMap2(), + TestUtil.getPhoenixFieldMap(bsonDocument)); + + // { + // "$SET": { + // "NestedList1[0]": "NestedList1[0] + 12.22", + // "NestedList1[3]": null, + // "NestedList1[4]": true, + // "attr_5[0]": "attr_5[0] - 10", + // "Id1": "12345" + // }, + // "$DELETE_FROM_SET": { + // "NestedList12[2][2]": { + // "$set": [ + // -234.56, + // 123, + // 93756.93475960549, + // 293755723028458.6 + // ] + // } + // } + // } + + updateExpression = + "{\n" + " \"$SET\": {\n" + " \"NestedList1[0]\": \"NestedList1[0] + 12.22\",\n" + + " \"NestedList1[3]\": null,\n" + " \"NestedList1[4]\": true,\n" + + " \"attr_5[0]\": \"attr_5[0] - 10\",\n" + " \"Id1\": \"12345\"\n" + " },\n" + + " \"$DELETE_FROM_SET\": {\n" + " \"NestedList12[2][2]\": {\n" + " \"$set\": [\n" + + " -234.56,\n" + " 123,\n" + " 93756.93475960549,\n" + + " 293755723028458.6\n" + " ]\n" + " }\n" + " }\n" + "}"; + + /* + * UpdateExpressionUtils.updateExpression( + * getBsonDocument("SET NestedList1[0] = NestedList1[0] + :NList001 , " + + * "NestedList1[3] = :NList003 , NestedList1[4] = :NList004, " + + * "attr_5[0] = attr_5[0] - :attr5_0, Id1 = :newId", + * TestUtil.getRawBsonDocument(comparisonMap)), bsonDocument); + */ expressionDoc = RawBsonDocument.parse(updateExpression); UpdateExpressionUtils.updateExpression(expressionDoc, bsonDocument); Assert.assertEquals("Update expression could not update the map after second update", - getTestFieldsMap3(), - TestUtil.getPhoenixFieldMap(bsonDocument)); + getTestFieldsMap3(), TestUtil.getPhoenixFieldMap(bsonDocument)); - Assert.assertEquals(TestUtil.getPhoenixFieldMap( - TestUtil.getRawBsonBytes(map)), map); + Assert.assertEquals(TestUtil.getPhoenixFieldMap(TestUtil.getRawBsonBytes(map)), map); } private static void assertDeserializedBsonDoc(BsonDocument bsonDocument) { @@ -379,16 +283,16 @@ private static void assertDeserializedBsonDoc(BsonDocument bsonDocument) { byte[] serializedBytes = Bytes.toBytes((rawBsonDocument).getByteBuffer().asNIO()); RawBsonDocument desRawBsonDocument = new RawBsonDocument(serializedBytes); BsonDocument deserializedBsonDocument; - try (BsonBinaryReader bsonReader = new BsonBinaryReader( - new ByteBufferBsonInput(desRawBsonDocument.getByteBuffer()))) { + try (BsonBinaryReader bsonReader = + new BsonBinaryReader(new ByteBufferBsonInput(desRawBsonDocument.getByteBuffer()))) { deserializedBsonDocument = - new BsonDocumentCodec().decode(bsonReader, DecoderContext.builder().build()); + new BsonDocumentCodec().decode(bsonReader, DecoderContext.builder().build()); } Assert.assertEquals(bsonDocument, deserializedBsonDocument); } public static BsonDocument getBsonDocument(String updateExpression, - BsonDocument comparisonValue) { + BsonDocument comparisonValue) { String setRegExPattern = "SET\\s+(.+?)(?=\\s+(REMOVE|ADD|DELETE)\\b|$)"; String removeRegExPattern = "REMOVE\\s+(.+?)(?=\\s+(SET|ADD|DELETE)\\b|$)"; @@ -440,7 +344,7 @@ public static BsonDocument getBsonDocument(String updateExpression, } } else { throw new RuntimeException( - "SET Expression " + setString + " does not include key value pairs separated by ="); + "SET Expression " + setString + " does not include key value pairs separated by ="); } } bsonDocument.put("$SET", setBsonDoc); @@ -465,8 +369,8 @@ public static BsonDocument getBsonDocument(String updateExpression, String attributeVal = keyVal[1].trim(); addBsonDoc.put(attributeKey, comparisonValue.get(attributeVal)); } else { - throw new RuntimeException("ADD Expression " + addString - + " does not include key value pairs separated by space"); + throw new RuntimeException( + "ADD Expression " + addString + " does not include key value pairs separated by space"); } } bsonDocument.put("$ADD", addBsonDoc); @@ -483,7 +387,7 @@ public static BsonDocument getBsonDocument(String updateExpression, delBsonDoc.put(attributeKey, comparisonValue.get(attributeVal)); } else { throw new RuntimeException("DELETE Expression " + deleteString - + " does not include key value pairs separated by space"); + + " does not include key value pairs separated by space"); } } bsonDocument.put("$DELETE_FROM_SET", delBsonDoc); @@ -492,9 +396,9 @@ public static BsonDocument getBsonDocument(String updateExpression, } private static BsonString getArithmeticExpVal(String attributeVal, - BsonDocument comparisonValuesDocument) { + BsonDocument comparisonValuesDocument) { String[] tokens = attributeVal.split("\\s+"); - // Pattern pattern = Pattern.compile(":?[a-zA-Z0-9]+"); + // Pattern pattern = Pattern.compile(":?[a-zA-Z0-9]+"); Pattern pattern = Pattern.compile("[#:$]?[^\\s\\n]+"); Number newNum = null; StringBuilder val = new StringBuilder(); @@ -513,7 +417,7 @@ private static BsonString getArithmeticExpVal(String attributeVal, BsonValue bsonValue = comparisonValuesDocument.get(operand); if (!bsonValue.isNumber() && !bsonValue.isDecimal128()) { throw new IllegalArgumentException( - "Operand " + operand + " is not provided as number type"); + "Operand " + operand + " is not provided as number type"); } Number numVal = UpdateExpressionUtils.getNumberFromBsonNumber((BsonNumber) bsonValue); val.append(numVal); @@ -531,13 +435,11 @@ private static TestFieldsMap getComparisonValuesMap() { map2.put(":newTitle", new TestFieldValue().withS("Cycle_1234_new")); map2.put(":newId", new TestFieldValue().withS("12345")); map2.put(":newIdNeg", new TestFieldValue().withN(-12345)); - map2.put(":ColorList", new TestFieldValue().withL( - new TestFieldValue().withS("Black"), + map2.put(":ColorList", + new TestFieldValue().withL(new TestFieldValue().withS("Black"), new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("White"))), - new TestFieldValue().withS("Silver") - )); - map2.put(":Id1", - new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("ID_101")))); + new TestFieldValue().withS("Silver"))); + map2.put(":Id1", new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("ID_101")))); map2.put(":NList001", new TestFieldValue().withN(12.22)); map2.put(":NList003", new TestFieldValue().withNULL(true)); map2.put(":NList004", new TestFieldValue().withBOOL(true)); @@ -545,21 +447,17 @@ private static TestFieldsMap getComparisonValuesMap() { map2.put(":NList1_0", new TestFieldValue().withSS("Updated_set_01", "Updated_set_02")); map2.put(":NestedList1_ISBN", new TestFieldValue().withS("111-1111111122")); map2.put(":NestedList12_00", new TestFieldValue().withSS("xyz01234", "abc01234")); - map2.put(":NestedList12_01", new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("val03")), - new SerializableBytesPtr(Bytes.toBytes("val04")) - )); - map2.put(":AddedPics", new TestFieldValue().withSS( - "1235@_rear.jpg", - "xyz5@_rear.jpg")); - map2.put(":PictureBinarySet01", new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("123_rear.jpg")), + map2.put(":NestedList12_01", + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("val03")), + new SerializableBytesPtr(Bytes.toBytes("val04")))); + map2.put(":AddedPics", new TestFieldValue().withSS("1235@_rear.jpg", "xyz5@_rear.jpg")); + map2.put(":PictureBinarySet01", + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("123_rear.jpg")), new SerializableBytesPtr(Bytes.toBytes("xyz_front.jpg")), - new SerializableBytesPtr(Bytes.toBytes("xyz_front.jpg_no")) - )); + new SerializableBytesPtr(Bytes.toBytes("xyz_front.jpg_no")))); map2.put(":NSet01", new TestFieldValue().withNS(-6830.5555, -48695)); - map2.put(":NestedList1TitleSet01", new TestFieldValue().withSS("Book 1010 Title", - "Book 1011 Title")); + map2.put(":NestedList1TitleSet01", + new TestFieldValue().withSS("Book 1010 Title", "Book 1011 Title")); comparisonMap.setMap(map2); return comparisonMap; } @@ -569,18 +467,16 @@ private static TestFieldsMap getTestFieldsMap1() { Map map = new HashMap<>(); map.put("attr_0", new TestFieldValue().withS("str_val_0")); map.put("attr_1", new TestFieldValue().withN(1295.03)); - map.put("attr_5", new TestFieldValue().withL( - new TestFieldValue().withN(1234), + map.put("attr_5", + new TestFieldValue().withL(new TestFieldValue().withN(1234), new TestFieldValue().withS("str001"), - new TestFieldValue().withB(new SerializableBytesPtr( - new byte[] {0, 1, 2, 3, 4, 5})))); + new TestFieldValue().withB(new SerializableBytesPtr(new byte[] { 0, 1, 2, 3, 4, 5 })))); Map nMap1 = new HashMap<>(); nMap1.put("n_attr_0", new TestFieldValue().withS("str_val_0")); nMap1.put("n_attr_1", new TestFieldValue().withN(1295.03)); String bytesAttributeVal1 = "2048u5nblwjeiWFGTH(4bf930"; byte[] bytesAttrVal1 = bytesAttributeVal1.getBytes(); - nMap1.put("n_attr_2", new TestFieldValue().withB(new SerializableBytesPtr( - bytesAttrVal1))); + nMap1.put("n_attr_2", new TestFieldValue().withB(new SerializableBytesPtr(bytesAttrVal1))); nMap1.put("n_attr_3", new TestFieldValue().withBOOL(true)); nMap1.put("n_attr_4", new TestFieldValue().withNULL(true)); map.put("attr_6", new TestFieldValue().withM(nMap1)); @@ -588,23 +484,17 @@ private static TestFieldsMap getTestFieldsMap1() { map.put("IdS", new TestFieldValue().withS("101.01")); map.put("Id2", new TestFieldValue().withN(101.01)); map.put("ColorBytes", - new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Black")))); - map.put("RelatedItems", - new TestFieldValue().withNS(1234, -485.45582904, 123.0948, 0.111)); - map.put("Pictures", new TestFieldValue().withSS( - "123_rear.jpg", - "xyz_rear.jpg", - "123_front.jpg", - "xyz_front.jpg" - )); - map.put("PictureBinarySet", new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("123_rear.jpg")), + new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Black")))); + map.put("RelatedItems", new TestFieldValue().withNS(1234, -485.45582904, 123.0948, 0.111)); + map.put("Pictures", new TestFieldValue().withSS("123_rear.jpg", "xyz_rear.jpg", "123_front.jpg", + "xyz_front.jpg")); + map.put("PictureBinarySet", + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("123_rear.jpg")), new SerializableBytesPtr(Bytes.toBytes("xyz_rear.jpg")), new SerializableBytesPtr(Bytes.toBytes("123_front.jpg")), new SerializableBytesPtr(Bytes.toBytes("xyz_front.jpg")), new SerializableBytesPtr(Bytes.toBytes("123abc_rear.jpg")), - new SerializableBytesPtr(Bytes.toBytes("xyzabc_rear.jpg")) - )); + new SerializableBytesPtr(Bytes.toBytes("xyzabc_rear.jpg")))); map.put("Title", new TestFieldValue().withS("Book 101 Title")); map.put("ISBN", new TestFieldValue().withS("111-1111111111")); map.put("InPublication", new TestFieldValue().withBOOL(false)); @@ -614,56 +504,46 @@ private static TestFieldsMap getTestFieldsMap1() { nestedMap1.put("ISBN", new TestFieldValue().withS("111-1111111111")); nestedMap1.put("InPublication", new TestFieldValue().withBOOL(false)); nestedMap1.put("NList1", - new TestFieldValue().withL(new TestFieldValue().withS("NListVal01"), - new TestFieldValue().withN(-0.00234), - new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("to_be_removed"))))); + new TestFieldValue().withL(new TestFieldValue().withS("NListVal01"), + new TestFieldValue().withN(-0.00234), + new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("to_be_removed"))))); Map nestedMap2 = new HashMap<>(); nestedMap2.put("Id", new TestFieldValue().withN(101.22)); nestedMap2.put("Title", new TestFieldValue().withS("Book 10122 Title")); nestedMap2.put("ISBN", new TestFieldValue().withS("111-1111111111999")); nestedMap2.put("InPublication", new TestFieldValue().withBOOL(true)); - nestedMap2.put("NList", - new TestFieldValue().withL(new TestFieldValue().withS("NListVal01"), - new TestFieldValue().withN(-0.00234))); - nestedMap1.put("NestedMap2", - new TestFieldValue().withM(nestedMap2)); + nestedMap2.put("NList", new TestFieldValue().withL(new TestFieldValue().withS("NListVal01"), + new TestFieldValue().withN(-0.00234))); + nestedMap1.put("NestedMap2", new TestFieldValue().withM(nestedMap2)); nestedMap1.put("NSet1", - new TestFieldValue().withNS(123.45, 9586.7778, -124, -6830.5555, 10238, - -48695)); + new TestFieldValue().withNS(123.45, 9586.7778, -124, -6830.5555, 10238, -48695)); map.put("NestedMap1", new TestFieldValue().withM(nestedMap1)); Map nestedList1Map1 = new HashMap<>(); nestedList1Map1.put("Id", new TestFieldValue().withN(101.01)); nestedList1Map1.put("Title", new TestFieldValue().withS("Book 101 Title")); nestedList1Map1.put("ISBN", new TestFieldValue().withS("111-1111111111")); nestedList1Map1.put("InPublication", new TestFieldValue().withBOOL(false)); - nestedList1Map1.put("IdSet", - new TestFieldValue().withNS(204850.69703847596, -39457860.486939476, 20576024, - 19306873, 4869067048362749590684d)); - nestedList1Map1.put("TitleSet1", - new TestFieldValue().withSS("Book 1010 Title", "Book 1011 Title", - "Book 1111 Title", "Book 1200 Title", "Book 1201 Title")); - nestedList1Map1.put("TitleSet2", - new TestFieldValue().withSS("Book 1010 Title", "Book 1011 Title", - "Book 1111 Title", "Book 1200 Title", "Book 1201 Title")); - nestedList1Map1.put("BinaryTitleSet", new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("Book 1010 Title Binary")), + nestedList1Map1.put("IdSet", new TestFieldValue().withNS(204850.69703847596, + -39457860.486939476, 20576024, 19306873, 4869067048362749590684d)); + nestedList1Map1.put("TitleSet1", new TestFieldValue().withSS("Book 1010 Title", + "Book 1011 Title", "Book 1111 Title", "Book 1200 Title", "Book 1201 Title")); + nestedList1Map1.put("TitleSet2", new TestFieldValue().withSS("Book 1010 Title", + "Book 1011 Title", "Book 1111 Title", "Book 1200 Title", "Book 1201 Title")); + nestedList1Map1.put("BinaryTitleSet", + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("Book 1010 Title Binary")), new SerializableBytesPtr(Bytes.toBytes("Book 1011 Title Binary")), - new SerializableBytesPtr(Bytes.toBytes("Book 1111 Title Binary")) - )); + new SerializableBytesPtr(Bytes.toBytes("Book 1111 Title Binary")))); map.put("NestedList1", - new TestFieldValue().withL(new TestFieldValue().withN(-485.34), - new TestFieldValue().withS("1234abcd"), - new TestFieldValue().withL(new TestFieldValue().withS("xyz0123"), - new TestFieldValue().withM(nestedList1Map1)))); + new TestFieldValue().withL(new TestFieldValue().withN(-485.34), + new TestFieldValue().withS("1234abcd"), new TestFieldValue().withL( + new TestFieldValue().withS("xyz0123"), new TestFieldValue().withM(nestedList1Map1)))); map.put("NestedList12", - new TestFieldValue().withL(new TestFieldValue().withN(-485.34), - new TestFieldValue().withS("1234abcd"), - new TestFieldValue().withL( - new TestFieldValue().withSS("xyz0123"), - new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("val01")), - new SerializableBytesPtr(Bytes.toBytes("val02")), - new SerializableBytesPtr(Bytes.toBytes("val03")))))); + new TestFieldValue().withL(new TestFieldValue().withN(-485.34), + new TestFieldValue().withS("1234abcd"), + new TestFieldValue().withL(new TestFieldValue().withSS("xyz0123"), + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("val01")), + new SerializableBytesPtr(Bytes.toBytes("val02")), + new SerializableBytesPtr(Bytes.toBytes("val03")))))); testFieldsMap.setMap(map); return testFieldsMap; } @@ -674,42 +554,31 @@ private static TestFieldsMap getTestFieldsMap2() { map.put("attr_0", new TestFieldValue().withS("str_val_0")); map.put("AddedId", new TestFieldValue().withN(10)); map.put("attr_1", new TestFieldValue().withN(1295.03)); - map.put("Id1", - new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("ID_101")))); - map.put("attr_5", new TestFieldValue().withL( - new TestFieldValue().withN(1234), + map.put("Id1", new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("ID_101")))); + map.put("attr_5", + new TestFieldValue().withL(new TestFieldValue().withN(1234), new TestFieldValue().withS("str001"), - new TestFieldValue().withB(new SerializableBytesPtr( - new byte[] {0, 1, 2, 3, 4, 5})))); + new TestFieldValue().withB(new SerializableBytesPtr(new byte[] { 0, 1, 2, 3, 4, 5 })))); Map nMap1 = new HashMap<>(); nMap1.put("n_attr_0", new TestFieldValue().withS("str_val_0")); nMap1.put("n_attr_1", new TestFieldValue().withN(1295.03)); String bytesAttributeVal1 = "2048u5nblwjeiWFGTH(4bf930"; byte[] bytesAttrVal1 = bytesAttributeVal1.getBytes(); - nMap1.put("n_attr_2", new TestFieldValue().withB(new SerializableBytesPtr( - bytesAttrVal1))); + nMap1.put("n_attr_2", new TestFieldValue().withB(new SerializableBytesPtr(bytesAttrVal1))); nMap1.put("n_attr_3", new TestFieldValue().withBOOL(true)); nMap1.put("n_attr_4", new TestFieldValue().withNULL(true)); map.put("attr_6", new TestFieldValue().withM(nMap1)); map.put("Id", new TestFieldValue().withS("12345")); map.put("ColorBytes", - new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Black")))); - map.put("RelatedItems", - new TestFieldValue().withNS(1234, -485.45582904, 123.0948, 0.111)); - map.put("Pictures", new TestFieldValue().withSS( - "123_rear.jpg", - "1235@_rear.jpg", - "xyz5@_rear.jpg", - "xyz_rear.jpg", - "123_front.jpg", - "xyz_front.jpg" - )); - map.put("PictureBinarySet", new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("xyz_rear.jpg")), + new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Black")))); + map.put("RelatedItems", new TestFieldValue().withNS(1234, -485.45582904, 123.0948, 0.111)); + map.put("Pictures", new TestFieldValue().withSS("123_rear.jpg", "1235@_rear.jpg", + "xyz5@_rear.jpg", "xyz_rear.jpg", "123_front.jpg", "xyz_front.jpg")); + map.put("PictureBinarySet", + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("xyz_rear.jpg")), new SerializableBytesPtr(Bytes.toBytes("123_front.jpg")), new SerializableBytesPtr(Bytes.toBytes("123abc_rear.jpg")), - new SerializableBytesPtr(Bytes.toBytes("xyzabc_rear.jpg")) - )); + new SerializableBytesPtr(Bytes.toBytes("xyzabc_rear.jpg")))); map.put("Title", new TestFieldValue().withS("Cycle_1234_new")); map.put("ISBN", new TestFieldValue().withS("111-1111111111")); map.put("InPublication", new TestFieldValue().withBOOL(false)); @@ -718,66 +587,49 @@ private static TestFieldsMap getTestFieldsMap2() { nestedMap1.put("AddedId", new TestFieldValue().withN(10)); nestedMap1.put("ISBN", new TestFieldValue().withS("111-1111111111")); nestedMap1.put("InPublication", new TestFieldValue().withBOOL(false)); - nestedMap1.put("NList1", new TestFieldValue().withL( - new TestFieldValue().withSS("Updated_set_01", "Updated_set_02"), + nestedMap1.put("NList1", + new TestFieldValue().withL(new TestFieldValue().withSS("Updated_set_01", "Updated_set_02"), new TestFieldValue().withN(-0.00234))); - nestedMap1.put("ColorList", new TestFieldValue().withL( - new TestFieldValue().withS("Black"), + nestedMap1.put("ColorList", + new TestFieldValue().withL(new TestFieldValue().withS("Black"), new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("White"))), - new TestFieldValue().withS("Silver") - )); + new TestFieldValue().withS("Silver"))); Map nestedMap2 = new HashMap<>(); nestedMap2.put("Id", new TestFieldValue().withN(-12243.78)); nestedMap2.put("NewID", new TestFieldValue().withS("12345")); nestedMap2.put("Title", new TestFieldValue().withS("Book 10122 Title")); nestedMap2.put("ISBN", new TestFieldValue().withS("111-1111111111999")); - nestedMap2.put("NList", - new TestFieldValue().withL( - new TestFieldValue().withN(12.22), - new TestFieldValue().withN(-0.00234), - new TestFieldValue().withNULL(true))); - nestedMap1.put("NestedMap2", - new TestFieldValue().withM(nestedMap2)); - nestedMap1.put("NSet1", - new TestFieldValue().withNS(123.45, 9586.7778, -124, 10238)); + nestedMap2.put("NList", new TestFieldValue().withL(new TestFieldValue().withN(12.22), + new TestFieldValue().withN(-0.00234), new TestFieldValue().withNULL(true))); + nestedMap1.put("NestedMap2", new TestFieldValue().withM(nestedMap2)); + nestedMap1.put("NSet1", new TestFieldValue().withNS(123.45, 9586.7778, -124, 10238)); map.put("NestedMap1", new TestFieldValue().withM(nestedMap1)); Map nestedList1Map1 = new HashMap<>(); nestedList1Map1.put("Id", new TestFieldValue().withN(101.01)); nestedList1Map1.put("Title", new TestFieldValue().withS("Book 101 Title")); nestedList1Map1.put("ISBN", new TestFieldValue().withS("111-1111111122")); nestedList1Map1.put("InPublication", new TestFieldValue().withBOOL(false)); - nestedList1Map1.put("IdSet", - new TestFieldValue().withNS(204850.69703847596, -39457860.486939476, 20576024, - 19306873, 4869067048362749590684D)); + nestedList1Map1.put("IdSet", new TestFieldValue().withNS(204850.69703847596, + -39457860.486939476, 20576024, 19306873, 4869067048362749590684D)); nestedList1Map1.put("TitleSet2", - new TestFieldValue().withSS( - "Book 1111 Title", "Book 1200 Title", "Book 1201 Title")); - nestedList1Map1.put("BinaryTitleSet", new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("Book 1010 Title Binary")), + new TestFieldValue().withSS("Book 1111 Title", "Book 1200 Title", "Book 1201 Title")); + nestedList1Map1.put("BinaryTitleSet", + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("Book 1010 Title Binary")), new SerializableBytesPtr(Bytes.toBytes("Book 1011 Title Binary")), - new SerializableBytesPtr(Bytes.toBytes("Book 1111 Title Binary")) - )); + new SerializableBytesPtr(Bytes.toBytes("Book 1111 Title Binary")))); map.put("NestedList1", - new TestFieldValue().withL(new TestFieldValue().withN(-485.34), - new TestFieldValue().withS("1234abcd"), - new TestFieldValue().withL(new TestFieldValue().withS("xyz0123"), - new TestFieldValue().withM(nestedList1Map1)))); + new TestFieldValue().withL(new TestFieldValue().withN(-485.34), + new TestFieldValue().withS("1234abcd"), new TestFieldValue().withL( + new TestFieldValue().withS("xyz0123"), new TestFieldValue().withM(nestedList1Map1)))); map.put("NestedList12", - new TestFieldValue().withL( - new TestFieldValue().withN(-485.34), - new TestFieldValue().withS("1234abcd"), - new TestFieldValue().withL( - new TestFieldValue().withSS("xyz0123", "xyz01234", "abc01234"), - new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("val01")), - new SerializableBytesPtr(Bytes.toBytes("val02")), - new SerializableBytesPtr(Bytes.toBytes("val03")), - new SerializableBytesPtr(Bytes.toBytes("val04"))), - new TestFieldValue().withNS( - -234.56, - 123, - 93756.93475960549, - 293755723028458.6)))); + new TestFieldValue().withL(new TestFieldValue().withN(-485.34), + new TestFieldValue().withS("1234abcd"), + new TestFieldValue().withL(new TestFieldValue().withSS("xyz0123", "xyz01234", "abc01234"), + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("val01")), + new SerializableBytesPtr(Bytes.toBytes("val02")), + new SerializableBytesPtr(Bytes.toBytes("val03")), + new SerializableBytesPtr(Bytes.toBytes("val04"))), + new TestFieldValue().withNS(-234.56, 123, 93756.93475960549, 293755723028458.6)))); testFieldsMap.setMap(map); return testFieldsMap; } @@ -789,40 +641,30 @@ private static TestFieldsMap getTestFieldsMap3() { map.put("AddedId", new TestFieldValue().withN(10)); map.put("attr_1", new TestFieldValue().withN(1295.03)); map.put("Id1", new TestFieldValue().withS("12345")); - map.put("attr_5", new TestFieldValue().withL( - new TestFieldValue().withN(1224), + map.put("attr_5", + new TestFieldValue().withL(new TestFieldValue().withN(1224), new TestFieldValue().withS("str001"), - new TestFieldValue().withB(new SerializableBytesPtr( - new byte[] {0, 1, 2, 3, 4, 5})))); + new TestFieldValue().withB(new SerializableBytesPtr(new byte[] { 0, 1, 2, 3, 4, 5 })))); Map nMap1 = new HashMap<>(); nMap1.put("n_attr_0", new TestFieldValue().withS("str_val_0")); nMap1.put("n_attr_1", new TestFieldValue().withN(1295.03)); String bytesAttributeVal1 = "2048u5nblwjeiWFGTH(4bf930"; byte[] bytesAttrVal1 = bytesAttributeVal1.getBytes(); - nMap1.put("n_attr_2", new TestFieldValue().withB(new SerializableBytesPtr( - bytesAttrVal1))); + nMap1.put("n_attr_2", new TestFieldValue().withB(new SerializableBytesPtr(bytesAttrVal1))); nMap1.put("n_attr_3", new TestFieldValue().withBOOL(true)); nMap1.put("n_attr_4", new TestFieldValue().withNULL(true)); map.put("attr_6", new TestFieldValue().withM(nMap1)); map.put("Id", new TestFieldValue().withS("12345")); map.put("ColorBytes", - new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Black")))); - map.put("RelatedItems", - new TestFieldValue().withNS(1234, -485.45582904, 123.0948, 0.111)); - map.put("Pictures", new TestFieldValue().withSS( - "123_rear.jpg", - "1235@_rear.jpg", - "xyz5@_rear.jpg", - "xyz_rear.jpg", - "123_front.jpg", - "xyz_front.jpg" - )); - map.put("PictureBinarySet", new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("xyz_rear.jpg")), + new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("Black")))); + map.put("RelatedItems", new TestFieldValue().withNS(1234, -485.45582904, 123.0948, 0.111)); + map.put("Pictures", new TestFieldValue().withSS("123_rear.jpg", "1235@_rear.jpg", + "xyz5@_rear.jpg", "xyz_rear.jpg", "123_front.jpg", "xyz_front.jpg")); + map.put("PictureBinarySet", + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("xyz_rear.jpg")), new SerializableBytesPtr(Bytes.toBytes("123_front.jpg")), new SerializableBytesPtr(Bytes.toBytes("123abc_rear.jpg")), - new SerializableBytesPtr(Bytes.toBytes("xyzabc_rear.jpg")) - )); + new SerializableBytesPtr(Bytes.toBytes("xyzabc_rear.jpg")))); map.put("Title", new TestFieldValue().withS("Cycle_1234_new")); map.put("ISBN", new TestFieldValue().withS("111-1111111111")); map.put("InPublication", new TestFieldValue().withBOOL(false)); @@ -831,63 +673,50 @@ private static TestFieldsMap getTestFieldsMap3() { nestedMap1.put("AddedId", new TestFieldValue().withN(10)); nestedMap1.put("ISBN", new TestFieldValue().withS("111-1111111111")); nestedMap1.put("InPublication", new TestFieldValue().withBOOL(false)); - nestedMap1.put("NList1", new TestFieldValue().withL( - new TestFieldValue().withSS("Updated_set_01", "Updated_set_02"), + nestedMap1.put("NList1", + new TestFieldValue().withL(new TestFieldValue().withSS("Updated_set_01", "Updated_set_02"), new TestFieldValue().withN(-0.00234))); - nestedMap1.put("ColorList", new TestFieldValue().withL( - new TestFieldValue().withS("Black"), + nestedMap1.put("ColorList", + new TestFieldValue().withL(new TestFieldValue().withS("Black"), new TestFieldValue().withB(new SerializableBytesPtr(Bytes.toBytes("White"))), - new TestFieldValue().withS("Silver") - )); + new TestFieldValue().withS("Silver"))); Map nestedMap2 = new HashMap<>(); nestedMap2.put("Id", new TestFieldValue().withN(-12243.78)); nestedMap2.put("NewID", new TestFieldValue().withS("12345")); nestedMap2.put("Title", new TestFieldValue().withS("Book 10122 Title")); nestedMap2.put("ISBN", new TestFieldValue().withS("111-1111111111999")); - nestedMap2.put("NList", - new TestFieldValue().withL( - new TestFieldValue().withN(12.22), - new TestFieldValue().withN(-0.00234), - new TestFieldValue().withNULL(true))); - nestedMap1.put("NestedMap2", - new TestFieldValue().withM(nestedMap2)); - nestedMap1.put("NSet1", - new TestFieldValue().withNS(123.45, 9586.7778, -124, 10238)); + nestedMap2.put("NList", new TestFieldValue().withL(new TestFieldValue().withN(12.22), + new TestFieldValue().withN(-0.00234), new TestFieldValue().withNULL(true))); + nestedMap1.put("NestedMap2", new TestFieldValue().withM(nestedMap2)); + nestedMap1.put("NSet1", new TestFieldValue().withNS(123.45, 9586.7778, -124, 10238)); map.put("NestedMap1", new TestFieldValue().withM(nestedMap1)); Map nestedList1Map1 = new HashMap<>(); nestedList1Map1.put("Id", new TestFieldValue().withN(101.01)); nestedList1Map1.put("Title", new TestFieldValue().withS("Book 101 Title")); nestedList1Map1.put("ISBN", new TestFieldValue().withS("111-1111111122")); nestedList1Map1.put("InPublication", new TestFieldValue().withBOOL(false)); - nestedList1Map1.put("IdSet", - new TestFieldValue().withNS(204850.69703847596, -39457860.486939476, 20576024, - 19306873, 4869067048362749590684d)); + nestedList1Map1.put("IdSet", new TestFieldValue().withNS(204850.69703847596, + -39457860.486939476, 20576024, 19306873, 4869067048362749590684d)); nestedList1Map1.put("TitleSet2", - new TestFieldValue().withSS( - "Book 1111 Title", "Book 1200 Title", "Book 1201 Title")); - nestedList1Map1.put("BinaryTitleSet", new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("Book 1010 Title Binary")), + new TestFieldValue().withSS("Book 1111 Title", "Book 1200 Title", "Book 1201 Title")); + nestedList1Map1.put("BinaryTitleSet", + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("Book 1010 Title Binary")), new SerializableBytesPtr(Bytes.toBytes("Book 1011 Title Binary")), - new SerializableBytesPtr(Bytes.toBytes("Book 1111 Title Binary")) - )); + new SerializableBytesPtr(Bytes.toBytes("Book 1111 Title Binary")))); map.put("NestedList1", - new TestFieldValue().withL(new TestFieldValue().withN(-473.11999999999995), - new TestFieldValue().withS("1234abcd"), - new TestFieldValue().withL(new TestFieldValue().withS("xyz0123"), - new TestFieldValue().withM(nestedList1Map1)), - new TestFieldValue().withNULL(true), - new TestFieldValue().withBOOL(true))); + new TestFieldValue().withL(new TestFieldValue().withN(-473.11999999999995), + new TestFieldValue().withS("1234abcd"), + new TestFieldValue().withL(new TestFieldValue().withS("xyz0123"), + new TestFieldValue().withM(nestedList1Map1)), + new TestFieldValue().withNULL(true), new TestFieldValue().withBOOL(true))); map.put("NestedList12", - new TestFieldValue().withL( - new TestFieldValue().withN(-485.34), - new TestFieldValue().withS("1234abcd"), - new TestFieldValue().withL( - new TestFieldValue().withSS("xyz0123", "xyz01234", "abc01234"), - new TestFieldValue().withBS( - new SerializableBytesPtr(Bytes.toBytes("val01")), - new SerializableBytesPtr(Bytes.toBytes("val02")), - new SerializableBytesPtr(Bytes.toBytes("val03")), - new SerializableBytesPtr(Bytes.toBytes("val04")))))); + new TestFieldValue().withL(new TestFieldValue().withN(-485.34), + new TestFieldValue().withS("1234abcd"), + new TestFieldValue().withL(new TestFieldValue().withSS("xyz0123", "xyz01234", "abc01234"), + new TestFieldValue().withBS(new SerializableBytesPtr(Bytes.toBytes("val01")), + new SerializableBytesPtr(Bytes.toBytes("val02")), + new SerializableBytesPtr(Bytes.toBytes("val03")), + new SerializableBytesPtr(Bytes.toBytes("val04")))))); testFieldsMap.setMap(map); return testFieldsMap; } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/csv/CsvUpsertExecutorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/csv/CsvUpsertExecutorTest.java index 85147c353a5..55377d2c750 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/csv/CsvUpsertExecutorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/csv/CsvUpsertExecutorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,51 +25,49 @@ import org.apache.commons.csv.CSVFormat; import org.apache.commons.csv.CSVParser; import org.apache.commons.csv.CSVRecord; +import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; +import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; import org.apache.phoenix.util.AbstractUpsertExecutorTest; import org.apache.phoenix.util.UpsertExecutor; import org.junit.Before; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.collect.Iterables; - public class CsvUpsertExecutorTest extends AbstractUpsertExecutorTest { - private static final String ARRAY_SEP = ":"; + private static final String ARRAY_SEP = ":"; - private UpsertExecutor upsertExecutor; + private UpsertExecutor upsertExecutor; - @Override - public UpsertExecutor getUpsertExecutor() { - return upsertExecutor; - } - - @Override - public UpsertExecutor getUpsertExecutor(Connection conn) { - return new CsvUpsertExecutor(conn, columnInfoList, preparedStatement, - upsertListener, ARRAY_SEP); - } - - @Override - public CSVRecord createRecord(Object... columnValues) throws IOException { - for (int i = 0; i < columnValues.length; i++) { - if (columnValues[i] == null) { - // Joiner.join throws on nulls, replace with empty string. - columnValues[i] = ""; - } - if (columnValues[i] instanceof List) { - columnValues[i] = Joiner.on(ARRAY_SEP).join((List) columnValues[i]); - } - } - String inputRecord = Joiner.on(',').join(columnValues); - return Iterables.getFirst(CSVParser.parse(inputRecord, CSVFormat.DEFAULT), null); - } + @Override + public UpsertExecutor getUpsertExecutor() { + return upsertExecutor; + } - @Before - public void setUp() throws SQLException { - super.setUp(); - upsertExecutor = new CsvUpsertExecutor(conn, columnInfoList, preparedStatement, - upsertListener, ARRAY_SEP); + @Override + public UpsertExecutor getUpsertExecutor(Connection conn) { + return new CsvUpsertExecutor(conn, columnInfoList, preparedStatement, upsertListener, + ARRAY_SEP); + } + + @Override + public CSVRecord createRecord(Object... columnValues) throws IOException { + for (int i = 0; i < columnValues.length; i++) { + if (columnValues[i] == null) { + // Joiner.join throws on nulls, replace with empty string. + columnValues[i] = ""; + } + if (columnValues[i] instanceof List) { + columnValues[i] = Joiner.on(ARRAY_SEP).join((List) columnValues[i]); + } } + String inputRecord = Joiner.on(',').join(columnValues); + return Iterables.getFirst(CSVParser.parse(inputRecord, CSVFormat.DEFAULT), null); + } + + @Before + public void setUp() throws SQLException { + super.setUp(); + upsertExecutor = + new CsvUpsertExecutor(conn, columnInfoList, preparedStatement, upsertListener, ARRAY_SEP); + } - } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/csv/StringToArrayConverterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/csv/StringToArrayConverterTest.java index 25d9d80199f..2360ea13308 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/csv/StringToArrayConverterTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/csv/StringToArrayConverterTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,50 +34,43 @@ public class StringToArrayConverterTest extends BaseConnectionlessQueryTest { - private Connection conn; - private StringToArrayConverter converter; + private Connection conn; + private StringToArrayConverter converter; - @Before - public void setUp() throws SQLException { - conn = DriverManager.getConnection(getUrl()); - converter = new StringToArrayConverter(conn, ":", PVarchar.INSTANCE); - } + @Before + public void setUp() throws SQLException { + conn = DriverManager.getConnection(getUrl()); + converter = new StringToArrayConverter(conn, ":", PVarchar.INSTANCE); + } - @After - public void tearDown() throws SQLException { - conn.close(); - } + @After + public void tearDown() throws SQLException { + conn.close(); + } - @Test - public void testToArray_EmptyString() throws SQLException { - Array emptyArray = converter.toArray(""); - assertEquals(0, ((Object[]) emptyArray.getArray()).length); - } + @Test + public void testToArray_EmptyString() throws SQLException { + Array emptyArray = converter.toArray(""); + assertEquals(0, ((Object[]) emptyArray.getArray()).length); + } + @Test + public void testToArray_SingleElement() throws SQLException { + Array singleElementArray = converter.toArray("value"); + assertArrayEquals(new Object[] { "value" }, (Object[]) singleElementArray.getArray()); + } - @Test - public void testToArray_SingleElement() throws SQLException { - Array singleElementArray = converter.toArray("value"); - assertArrayEquals( - new Object[]{"value"}, - (Object[]) singleElementArray.getArray()); - } + @Test + public void testToArray_MultipleElements() throws SQLException { + Array multiElementArray = converter.toArray("one:two"); + assertArrayEquals(new Object[] { "one", "two" }, (Object[]) multiElementArray.getArray()); + } - @Test - public void testToArray_MultipleElements() throws SQLException { - Array multiElementArray = converter.toArray("one:two"); - assertArrayEquals( - new Object[]{"one", "two"}, - (Object[]) multiElementArray.getArray()); - } - - @Test - public void testToArray_IntegerValues() throws SQLException { - StringToArrayConverter intArrayConverter = new StringToArrayConverter( - conn, ":", PInteger.INSTANCE); - Array intArray = intArrayConverter.toArray("1:2:3"); - assertArrayEquals( - new int[]{1, 2, 3}, - (int[]) intArray.getArray()); - } + @Test + public void testToArray_IntegerValues() throws SQLException { + StringToArrayConverter intArrayConverter = + new StringToArrayConverter(conn, ":", PInteger.INSTANCE); + Array intArray = intArrayConverter.toArray("1:2:3"); + assertArrayEquals(new int[] { 1, 2, 3 }, (int[]) intArray.getArray()); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/i18n/LinguisticSortTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/i18n/LinguisticSortTest.java index 7603b4d5b7c..19527a10eea 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/i18n/LinguisticSortTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/i18n/LinguisticSortTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,6 +28,7 @@ import static org.apache.phoenix.util.i18n.LinguisticSort.CHINESE_TW; import static org.apache.phoenix.util.i18n.LinguisticSort.CHINESE_TW_STROKE; import static org.apache.phoenix.util.i18n.LinguisticSort.CROATIAN; +import static org.apache.phoenix.util.i18n.LinguisticSort.ESPERANTO; import static org.apache.phoenix.util.i18n.LinguisticSort.ESTONIAN; import static org.apache.phoenix.util.i18n.LinguisticSort.FINNISH; import static org.apache.phoenix.util.i18n.LinguisticSort.HUNGARIAN; @@ -35,20 +37,17 @@ import static org.apache.phoenix.util.i18n.LinguisticSort.KOREAN; import static org.apache.phoenix.util.i18n.LinguisticSort.LATVIAN; import static org.apache.phoenix.util.i18n.LinguisticSort.LITHUANIAN; +import static org.apache.phoenix.util.i18n.LinguisticSort.LUXEMBOURGISH; import static org.apache.phoenix.util.i18n.LinguisticSort.ROMANIAN; import static org.apache.phoenix.util.i18n.LinguisticSort.SERBIAN_LATIN; import static org.apache.phoenix.util.i18n.LinguisticSort.SLOVAK; import static org.apache.phoenix.util.i18n.LinguisticSort.SLOVENE; import static org.apache.phoenix.util.i18n.LinguisticSort.TAJIK; +import static org.apache.phoenix.util.i18n.LinguisticSort.TAMIL; import static org.apache.phoenix.util.i18n.LinguisticSort.TURKISH; import static org.apache.phoenix.util.i18n.LinguisticSort.TURKMEN; -import static org.apache.phoenix.util.i18n.LinguisticSort.VIETNAMESE; -import static org.apache.phoenix.util.i18n.LinguisticSort.LUXEMBOURGISH; import static org.apache.phoenix.util.i18n.LinguisticSort.URDU; -import static org.apache.phoenix.util.i18n.LinguisticSort.TAMIL; -import static org.apache.phoenix.util.i18n.LinguisticSort.ESPERANTO; - -import com.ibm.icu.text.Normalizer2; +import static org.apache.phoenix.util.i18n.LinguisticSort.VIETNAMESE; import java.text.CollationKey; import java.text.Collator; @@ -65,586 +64,584 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList; import org.apache.phoenix.thirdparty.com.google.common.collect.Ordering; +import com.ibm.icu.text.Normalizer2; + import junit.framework.TestCase; /** * This test class was partially copied from Salesforce's internationalization utility library - * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. - * The i18n-util library is not maintained anymore, and it was using vulnerable dependencies. - * For more info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 - * - * This could be expanded significantly. + * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. The + * i18n-util library is not maintained anymore, and it was using vulnerable dependencies. For more + * info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 This could be expanded + * significantly. */ public class LinguisticSortTest extends TestCase { - public LinguisticSortTest(String name) { - super(name); - } - - public void testThaiBasicSorting() { - Locale thaiLoc = new Locale("th"); - - LinguisticSort thaiSort = LinguisticSort.get(thaiLoc); - - // basic sanity check on thai collator comparisons - ImmutableList unsorted = - ImmutableList.of("azw", "Ac", "ab", "21", "zaa", "b\u0e40k", "bk"); - ImmutableList sorted = - ImmutableList.of("21", "ab", "Ac", "azw", "bk", "b\u0e40k", "zaa"); - - assertEquals(sorted, - Ordering.from(thaiSort.getNonCachingComparator()).sortedCopy(unsorted)); - assertEquals(sorted, - Ordering.from(thaiSort.getComparator(16)).sortedCopy(unsorted)); + public LinguisticSortTest(String name) { + super(name); + } + + public void testThaiBasicSorting() { + Locale thaiLoc = new Locale("th"); + + LinguisticSort thaiSort = LinguisticSort.get(thaiLoc); + + // basic sanity check on thai collator comparisons + ImmutableList unsorted = + ImmutableList.of("azw", "Ac", "ab", "21", "zaa", "b\u0e40k", "bk"); + ImmutableList sorted = + ImmutableList.of("21", "ab", "Ac", "azw", "bk", "b\u0e40k", "zaa"); + + assertEquals(sorted, Ordering.from(thaiSort.getNonCachingComparator()).sortedCopy(unsorted)); + assertEquals(sorted, Ordering.from(thaiSort.getComparator(16)).sortedCopy(unsorted)); + } + + public void testThaiCharactersOfDeath() { + // This is the original bug report + Collator c = Collator.getInstance(new Locale("th")); + String s = "\u0e40"; + // any one of \u0e40, \u0e41, \u0e42, \u0e43, or \u0e44 will do + System.out.println(c.compare(s, s)); // In JDK6: runs forever + + // Here's the "real" test + Locale thaiLoc = new Locale("th"); + + LinguisticSort thaiSort = LinguisticSort.get(thaiLoc); + Collator thaiColl = thaiSort.getCollator(); + + String[] oomStrings = + { "\u0e3f", "\u0e45", "\u0e40k", "\u0e44", "\u0e43", "\u0e42", "\u0e41", "\u0e40" }; + String[] srcStrings = oomStrings; + // Deprecated Patched collator adds space after problematic characters at end of string + // (because of http://bugs.sun.com/view_bug.do?bug_id=5047314) + // Otherwise unpatched collator would OOM on these strings + // String [] srcStrings = { + // "\u0e3f", "\u0e45", "\u0e40k", "\u0e44 ", "\u0e43 ", "\u0e42 ", "\u0e41 ", "\u0e40 " + // }; + + for (int i = 0; i < oomStrings.length; i++) { + String oomString = oomStrings[i]; + CollationKey key = thaiColl.getCollationKey(oomString); + assertEquals("string #" + i, srcStrings[i], key.getSourceString()); } - - public void testThaiCharactersOfDeath() { - // This is the original bug report - Collator c = Collator.getInstance(new Locale("th")); - String s = "\u0e40"; - // any one of \u0e40, \u0e41, \u0e42, \u0e43, or \u0e44 will do - System.out.println(c.compare(s, s)); // In JDK6: runs forever - - - // Here's the "real" test - Locale thaiLoc = new Locale("th"); - - LinguisticSort thaiSort = LinguisticSort.get(thaiLoc); - Collator thaiColl = thaiSort.getCollator(); - - String [] oomStrings = { - "\u0e3f", "\u0e45", "\u0e40k", "\u0e44", "\u0e43", "\u0e42", "\u0e41", "\u0e40" - }; - String [] srcStrings = oomStrings; - // Deprecated Patched collator adds space after problematic characters at end of string - // (because of http://bugs.sun.com/view_bug.do?bug_id=5047314) - // Otherwise unpatched collator would OOM on these strings - // String [] srcStrings = { - // "\u0e3f", "\u0e45", "\u0e40k", "\u0e44 ", "\u0e43 ", "\u0e42 ", "\u0e41 ", "\u0e40 " - // }; - - for (int i=0; i knownDifferences = + EnumSet.of(CATALAN, FINNISH, TURKISH, CHINESE_HK, CHINESE_HK_STROKE, CHINESE_TW, + CHINESE_TW_STROKE, JAPANESE, KOREAN, BULGARIAN, ROMANIAN, VIETNAMESE, HUNGARIAN, SLOVAK, + SERBIAN_LATIN, BOSNIAN, BASQUE, LUXEMBOURGISH, SLOVENE, CROATIAN, ESTONIAN, ICELANDIC, + LATVIAN, LITHUANIAN, TAJIK, TURKMEN, AZERBAIJANI, URDU, BENGALI, TAMIL, ESPERANTO); + + for (LinguisticSort sort : LinguisticSort.values()) { + if (knownDifferences.contains(sort)) { + continue; + } + + String[] alphabet = sort.getAlphabet(); + String[] icuAlphabet = LinguisticSort.getAlphabetFromICU(sort.getLocale()); + String alphaAsString = Arrays.toString(alphabet); + String icuAlphaAsString = Arrays.toString(icuAlphabet); + + assertEquals("LinguisticSort for " + sort + " doesn't match", icuAlphaAsString, + alphaAsString); + if (!icuAlphaAsString.equals(alphaAsString)) { + System.out.println(sort + "\n" + icuAlphaAsString + "\n" + alphaAsString); + } else { + // System.out.println(sort + ":SAME"); + } } + } - public void testRolodexIndexByChar() throws Exception{ - LinguisticSort englishSort = LinguisticSort.ENGLISH; - - assertEquals(0, englishSort.getRolodexIndexForChar("a")); - assertEquals(0, englishSort.getRolodexIndexForChar("Á")); - assertEquals(1, englishSort.getRolodexIndexForChar("b")); - assertEquals(13, englishSort.getRolodexIndexForChar("N")); - assertEquals(13, englishSort.getRolodexIndexForChar("Ñ")); - assertEquals(25, englishSort.getRolodexIndexForChar("z")); - //А below is the Cyrillic А - assertOther(Arrays.asList("А", "こ"), englishSort); - - //Spanish - LinguisticSort spanishSort = LinguisticSort.SPANISH; - assertEquals(0, spanishSort.getRolodexIndexForChar("a")); - assertEquals(0, spanishSort.getRolodexIndexForChar("Á")); - assertEquals(1, spanishSort.getRolodexIndexForChar("b")); - assertEquals(13, spanishSort.getRolodexIndexForChar("N")); - assertEquals(14, spanishSort.getRolodexIndexForChar("Ñ")); - assertEquals(26, spanishSort.getRolodexIndexForChar("z")); - //А below is the Cyrillic А - assertOther(Arrays.asList("А", "こ"), spanishSort); - - //Japanese - LinguisticSort japaneseSort = LinguisticSort.JAPANESE; - assertEquals(0, japaneseSort.getRolodexIndexForChar("a")); - assertEquals(0, japaneseSort.getRolodexIndexForChar("Á")); - assertEquals(1, japaneseSort.getRolodexIndexForChar("b")); - assertEquals(13, japaneseSort.getRolodexIndexForChar("N")); - assertEquals(13, japaneseSort.getRolodexIndexForChar("Ñ")); - assertEquals(25, japaneseSort.getRolodexIndexForChar("z")); - assertEquals(27, japaneseSort.getRolodexIndexForChar("こ")); - assertEquals(27, japaneseSort.getRolodexIndexForChar("く")); - assertEquals(31, japaneseSort.getRolodexIndexForChar("ふ")); - //А below is the Cyrillic А - assertOther(Arrays.asList("\u0410"), spanishSort); // А - - //Malay has a rolodex - LinguisticSort malaySort = LinguisticSort.MALAY; - assertEquals(0, malaySort.getRolodexIndexForChar("a")); - assertEquals(25, malaySort.getRolodexIndexForChar("z")); - assertOther(Arrays.asList("\u0410", "\u304f"), malaySort); // "А", "く" - - // Thai has a rolodex, all of these should be "other" - // (Thai has 44 chars, so other is 46) - LinguisticSort thaiSort = LinguisticSort.THAI; - assertConstant(Arrays.asList("A", "Á", "b", "\u304f", "\u0410"), - thaiSort, 46, "had a rolodex index."); + private void assertOther(Collection chars, LinguisticSort sort) { + assertConstant(chars, sort, sort.getAlphabetLength(), "wasn't in 'Other' category"); + } + private void assertConstant(Collection chars, LinguisticSort sort, int constant, + String message) { + for (String c : chars) { + assertEquals(c + " " + message, constant, sort.getRolodexIndexForChar(c)); } - - public void testRolodexComparedToIcu() { - Set knownDifferences = EnumSet.of( - CATALAN, FINNISH, TURKISH, CHINESE_HK, CHINESE_HK_STROKE, CHINESE_TW, - CHINESE_TW_STROKE, JAPANESE, KOREAN, BULGARIAN, ROMANIAN, VIETNAMESE, - HUNGARIAN, SLOVAK, SERBIAN_LATIN, BOSNIAN, BASQUE, LUXEMBOURGISH, SLOVENE, - CROATIAN, ESTONIAN, ICELANDIC, LATVIAN, LITHUANIAN, TAJIK, TURKMEN, AZERBAIJANI, - URDU, BENGALI, TAMIL, ESPERANTO); - - for (LinguisticSort sort : LinguisticSort.values()) { - if (knownDifferences.contains(sort)) { - continue; - } - - String[] alphabet = sort.getAlphabet(); - String[] icuAlphabet = LinguisticSort.getAlphabetFromICU(sort.getLocale()); - String alphaAsString = Arrays.toString(alphabet); - String icuAlphaAsString = Arrays.toString(icuAlphabet); - - assertEquals("LinguisticSort for " + sort + " doesn't match", - icuAlphaAsString, alphaAsString); - if (!icuAlphaAsString.equals(alphaAsString)) { - System.out.println(sort + "\n" + icuAlphaAsString + "\n" + alphaAsString); - } else { - //System.out.println(sort + ":SAME"); - } + } + + /** + * Make sure the upper case collator works equivalently to upper-casing then collating + */ + public void testUpperCaseCollator() { + // bump these up for performance testing + final int repeatTimes = 1; + final int testSize = 1000; + + testUpperCaseCollator(true, repeatTimes, testSize); + testUpperCaseCollator(false, repeatTimes, testSize); + } + + /** + * Implementation of the testUpperCaseCollator that allows breaking out an ascii only test from a + * general string test + */ + private void testUpperCaseCollator(boolean asciiOnly, int repeatTimes, int testSize) { + final LinguisticSort sort = LinguisticSort.ENGLISH; + final Collator collator = sort.getCollator(); + + final Collator ucCollator = sort.getUpperCaseCollator(false); + + final Random r = new Random(); + final int maxLength = 100; + for (int iteration = 0; iteration < repeatTimes; iteration++) { + final boolean lastTime = iteration == repeatTimes - 1; + final String[] originals = new String[testSize]; + for (int i = 0; i < testSize; i++) { + switch (i) { + case 0: + originals[i] = "abß"; + break; + case 1: + originals[i] = "abSS"; + break; + case 2: + originals[i] = "abß"; + break; + case 3: + originals[i] = "ffo"; + break; + case 4: + originals[i] = "ffi"; + break; + case 5: + originals[i] = "FFI"; + break; + case 6: + originals[i] = "fred"; + break; + case 7: + originals[i] = "FRED"; + break; + case 8: + originals[i] = "FREE"; + break; + case 9: + originals[i] = "剫"; + break; + case 10: + originals[i] = "뻎"; + break; + case 11: + originals[i] = "\u1fe3"; + break; + case 12: + originals[i] = "\u05d7"; + break; + case 13: + originals[i] = "\u1fd3"; + break; + case 14: + originals[i] = "\u1441"; + break; + case 15: + originals[i] = "\ub9fe"; + break; + case 16: + originals[i] = "\u0398"; + break; + case 17: + originals[i] = "\u0399"; + break; + case 18: + originals[i] = "\u039a"; + break; + case 19: + originals[i] = "\u4371"; + break; + case 20: + originals[i] = "\ufb06"; + break; + default: + originals[i] = randomString(r, maxLength, asciiOnly); } - } - - private void assertOther(Collection chars, LinguisticSort sort){ - assertConstant(chars, sort, sort.getAlphabetLength(), "wasn't in 'Other' category"); - } - - private void assertConstant(Collection chars, LinguisticSort sort, - int constant, String message) { - for (String c : chars){ - assertEquals(c + " " + message, constant, sort.getRolodexIndexForChar(c)); + } + + final int[] upperResults = new int[testSize]; + { + final long start = System.currentTimeMillis(); + for (int i = 0; i < testSize; i++) { + final int next = i + 1 == testSize ? 0 : i + 1; + upperResults[i] = collator.compare(sort.getUpperCaseValue(originals[i], false), + sort.getUpperCaseValue(originals[next], false)); } - } - - /** - * Make sure the upper case collator works equivalently to upper-casing then collating - */ - public void testUpperCaseCollator() { - // bump these up for performance testing - final int repeatTimes = 1; - final int testSize = 1000; - - testUpperCaseCollator(true, repeatTimes, testSize); - testUpperCaseCollator(false, repeatTimes, testSize); - } - - /** - * Implementation of the testUpperCaseCollator that allows breaking out an ascii only - * test from a general string test - */ - private void testUpperCaseCollator(boolean asciiOnly, int repeatTimes, int testSize) { - final LinguisticSort sort = LinguisticSort.ENGLISH; - final Collator collator = sort.getCollator(); - - final Collator ucCollator = sort.getUpperCaseCollator(false); - - final Random r = new Random(); - final int maxLength = 100; - for (int iteration = 0; iteration < repeatTimes; iteration++) { - final boolean lastTime = iteration == repeatTimes - 1; - final String[] originals = new String[testSize]; - for (int i = 0; i < testSize; i++) { - switch (i) { - case 0: - originals[i] = "abß"; - break; - case 1: - originals[i] = "abSS"; - break; - case 2: - originals[i] = "abß"; - break; - case 3: - originals[i] = "ffo"; - break; - case 4: - originals[i] = "ffi"; - break; - case 5: - originals[i] = "FFI"; - break; - case 6: - originals[i] = "fred"; - break; - case 7: - originals[i] = "FRED"; - break; - case 8: - originals[i] = "FREE"; - break; - case 9: - originals[i] = "剫"; - break; - case 10: - originals[i] = "뻎"; - break; - case 11: - originals[i] = "\u1fe3"; - break; - case 12: - originals[i] = "\u05d7"; - break; - case 13: - originals[i] = "\u1fd3"; - break; - case 14: - originals[i] = "\u1441"; - break; - case 15: - originals[i] = "\ub9fe"; - break; - case 16: - originals[i] = "\u0398"; - break; - case 17: - originals[i] = "\u0399"; - break; - case 18: - originals[i] = "\u039a"; - break; - case 19: - originals[i] = "\u4371"; - break; - case 20: - originals[i] = "\ufb06"; - break; - default : - originals[i] = randomString(r, maxLength, asciiOnly); - } - } - - final int[] upperResults = new int[testSize]; - { - final long start = System.currentTimeMillis(); - for (int i = 0; i < testSize; i++) { - final int next = i + 1 == testSize ? 0 : i + 1; - upperResults[i] = collator.compare(sort.getUpperCaseValue(originals[i], false), - sort.getUpperCaseValue(originals[next], false)); - } - if (lastTime) { - final long time = System.currentTimeMillis() - start; - System.out.println("Compared " + testSize + " " + (asciiOnly ? "ascii " : "") + - "strings with upper casing in " + time + "ms"); - } - } - - final int[] caseResults = new int[testSize]; - { - final long start = System.currentTimeMillis(); - for (int i = 0; i < testSize; i++) { - final int next = i + 1 == testSize ? 0 : i + 1; - caseResults[i] = ucCollator.compare(originals[i], originals[next]); - } - if (lastTime) { - final long time = System.currentTimeMillis() - start; - System.out.println("Compared " + testSize + " " + (asciiOnly ? "ascii " : "") + - "strings with upper case collator comparison in " + time + "ms"); - } - } - - final int[] keyResults = new int[testSize]; - { - final long start = System.currentTimeMillis(); - for (int i = 0; i < testSize; i++) { - final int next = i + 1 == testSize ? 0 : i + 1; - keyResults[i] = ucCollator.getCollationKey(originals[i]) - .compareTo(ucCollator.getCollationKey(originals[next])); - } - if (lastTime) { - final long time = System.currentTimeMillis() - start; - System.out.println("Compared " + testSize + " " + (asciiOnly ? "ascii " : "") + - "strings with collation keys in " + time + "ms"); - } - } - - if (lastTime) { - System.out.println(); - } - - if (lastTime) { - // normalizing helps see why strings don't compare the same when upper-cased - final Normalizer2 normalizer = Normalizer2.getNFKDInstance(); - for (int i = 0; i < testSize; i++) { - final int next = i + 1 == testSize ? 0 : i + 1; - final boolean caseOk = upperResults[i] == caseResults[i]; - final boolean keyOk = upperResults[i] == keyResults[i]; - if (!caseOk || !keyOk) { - final String message = - "Did not get expected result when comparing string " + i + " " + - (caseOk ? "" : "using upper case collator comparison ") + - (caseOk || keyOk ? "" : "or ") + - (keyOk ? "" : "using collation key comparison ") + - "\n" + - "'" + escape(originals[i]) + "'\n" + - "(" + escape(sort.getUpperCaseValue(originals[i], false)) + ")\n" + - "<" + escape(normalizer.normalize(originals[i])) + "> " + - "with string " + next + " \n" + - "'" + escape(originals[next]) + "'\n" + - "(" + escape(sort.getUpperCaseValue(originals[next], false)) + - ")\n " + - "<" + escape(normalizer.normalize(originals[next])) + ">"; - assertEquals(message, upperResults[i], caseResults[i]); - } - } - } + if (lastTime) { + final long time = System.currentTimeMillis() - start; + System.out.println("Compared " + testSize + " " + (asciiOnly ? "ascii " : "") + + "strings with upper casing in " + time + "ms"); } - } - - /** - * For diagnosis of mismatched strings, dumps a string using standard Java notation - * for escaping non-printable or non-ascii characters - */ - private String escape(String string) { - final StringBuilder sb = new StringBuilder(string.length() * 2); - int index = 0; - while (index < string.length()) { - final int ch = string.codePointAt(index); - index += Character.charCount(ch); - - escapeCodePoint(sb, ch); + } + + final int[] caseResults = new int[testSize]; + { + final long start = System.currentTimeMillis(); + for (int i = 0; i < testSize; i++) { + final int next = i + 1 == testSize ? 0 : i + 1; + caseResults[i] = ucCollator.compare(originals[i], originals[next]); } - return sb.toString(); - } - - /** - * Escapes a single code point so that non-ascii and non-printable characters use - * their standard Java escape - */ - private void escapeCodePoint(final StringBuilder sb, final int ch) { - switch(ch) { - case '\b' : sb.append("\\b"); - break; - case '\t' : sb.append("\\t"); - break; - case '\n' : sb.append("\\n"); - break; - case '\r' : sb.append("\\r"); - break; - case '\f' : sb.append("\\f"); - break; - case '\"' : sb.append("\\\""); - break; - case '\\' : sb.append("\\\\"); - break; - default: - if (ch < 0x20 || ch > 0x7E) { - sb.append(String.format("\\u%04x", ch)); - } else { - sb.appendCodePoint(ch); - } + if (lastTime) { + final long time = System.currentTimeMillis() - start; + System.out.println("Compared " + testSize + " " + (asciiOnly ? "ascii " : "") + + "strings with upper case collator comparison in " + time + "ms"); } - } - - /** - * Generates a random string with between 0 and maxLength characters - */ - private String randomString(Random r, int maxLength, boolean asciiOnly) { - final int length = r.nextInt(maxLength); - return randomFixedLengthString(r, length, asciiOnly); - } - - - /** - * Generates a random string of the given length - */ - private String randomFixedLengthString(Random r, int length, boolean asciiOnly) { - final StringBuilder sb = new StringBuilder(); - for (int i = 0; i < length; i++) { - char c = 0; - while (!Character.isDefined(c) || Character.isISOControl(c)) { - c = (char)(asciiOnly ? r.nextInt(128) : r.nextInt()); - } - sb.append(c); + } + + final int[] keyResults = new int[testSize]; + { + final long start = System.currentTimeMillis(); + for (int i = 0; i < testSize; i++) { + final int next = i + 1 == testSize ? 0 : i + 1; + keyResults[i] = ucCollator.getCollationKey(originals[i]) + .compareTo(ucCollator.getCollationKey(originals[next])); } - return sb.toString(); - } - - public void testUpperCaseExceptionChars() { - // Sharp s in English - String[][] enCases = new String[][] { - // { input, expected output } - new String[] { "ß", "ß" }, - new String[] { "ßß", "ßß" }, - new String[] { "ßßß", "ßßß" }, - new String[] { "aß", "Aß" }, - new String[] { "aaaß", "AAAß" }, - new String[] { "ßa", "ßA" }, - new String[] { "ßaaa", "ßAAA" }, - new String[] { "aßb", "AßB" }, - new String[] { "aaaßbbb", "AAAßBBB" }, - new String[] { "ßaß", "ßAß" }, - new String[] { "ßaaaß", "ßAAAß" }, - new String[] { "aßbßc", "AßBßC" }, - new String[] { "aaaßbbbßccc", "AAAßBBBßCCC" }, - new String[] { "aßßc", "AßßC" }, - new String[] { "aaaßßccc", "AAAßßCCC" }, - }; - - for (String[] c : enCases) { - assertEquals(c[1], LinguisticSort.ENGLISH.getUpperCaseValue(c[0], false)); + if (lastTime) { + final long time = System.currentTimeMillis() - start; + System.out.println("Compared " + testSize + " " + (asciiOnly ? "ascii " : "") + + "strings with collation keys in " + time + "ms"); } - - // Omicron in Greek - String[][] greekCases = new String[][] { - new String[] { "\u039f", "\u039f" }, // capital omicron - new String[] { "Ό", "\u039f" } - - }; - - for (String[] c : greekCases) { - assertEquals(c[1], LinguisticSort.GREEK.getUpperCaseValue(c[0], false)); + } + + if (lastTime) { + System.out.println(); + } + + if (lastTime) { + // normalizing helps see why strings don't compare the same when upper-cased + final Normalizer2 normalizer = Normalizer2.getNFKDInstance(); + for (int i = 0; i < testSize; i++) { + final int next = i + 1 == testSize ? 0 : i + 1; + final boolean caseOk = upperResults[i] == caseResults[i]; + final boolean keyOk = upperResults[i] == keyResults[i]; + if (!caseOk || !keyOk) { + final String message = "Did not get expected result when comparing string " + i + " " + + (caseOk ? "" : "using upper case collator comparison ") + + (caseOk || keyOk ? "" : "or ") + (keyOk ? "" : "using collation key comparison ") + + "\n" + "'" + escape(originals[i]) + "'\n" + "(" + + escape(sort.getUpperCaseValue(originals[i], false)) + ")\n" + "<" + + escape(normalizer.normalize(originals[i])) + "> " + "with string " + next + " \n" + + "'" + escape(originals[next]) + "'\n" + "(" + + escape(sort.getUpperCaseValue(originals[next], false)) + ")\n " + "<" + + escape(normalizer.normalize(originals[next])) + ">"; + assertEquals(message, upperResults[i], caseResults[i]); + } } + } } - - public void testUsesUpper() { - assertTrue(LinguisticSort.ENGLISH.usesUpperToGetUpperCase(false)); - assertTrue(LinguisticSort.ESPERANTO.usesUpperToGetUpperCase(false)); - assertTrue(!LinguisticSort.GERMAN.usesUpperToGetUpperCase(false)); + } + + /** + * For diagnosis of mismatched strings, dumps a string using standard Java notation for escaping + * non-printable or non-ascii characters + */ + private String escape(String string) { + final StringBuilder sb = new StringBuilder(string.length() * 2); + int index = 0; + while (index < string.length()) { + final int ch = string.codePointAt(index); + index += Character.charCount(ch); + + escapeCodePoint(sb, ch); } - - public void testGetUpperCaseCollationKey() { - assertEquals(LinguisticSort.ENGLISH.getUpperCaseSql("x", false), - LinguisticSort.ENGLISH.getUpperCollationKeySql("x", false)); + return sb.toString(); + } + + /** + * Escapes a single code point so that non-ascii and non-printable characters use their standard + * Java escape + */ + private void escapeCodePoint(final StringBuilder sb, final int ch) { + switch (ch) { + case '\b': + sb.append("\\b"); + break; + case '\t': + sb.append("\\t"); + break; + case '\n': + sb.append("\\n"); + break; + case '\r': + sb.append("\\r"); + break; + case '\f': + sb.append("\\f"); + break; + case '\"': + sb.append("\\\""); + break; + case '\\': + sb.append("\\\\"); + break; + default: + if (ch < 0x20 || ch > 0x7E) { + sb.append(String.format("\\u%04x", ch)); + } else { + sb.appendCodePoint(ch); + } } - - /** - * I wanted to see the perf impact of doing special-case logic in the EN locale for the German - * sharp s, ß. Rename this test (remove the leading _) to run it, e.g. in Eclipse. - *

    - * This method generates two sets of 1000 randomish Strings, one with sharp s and one without. - * Then it runs 1 million uppercase operations on each bank of strings, using the EN locale - * (with the special-case logic) and a test locale -- EO, Esperanto -- which does not have - * any special-case logic. - *

    - * For posterity, when I run this on my machine, I see results like this - * (averages rounded to nearest 10ms): - *

    - *

    - * - * - * - *
    ENGLSIHESPERANTOGREEK
    with sharp s330ms260ms370ms
    without sharp s150ms130ms213ms
    - */ - public void _testUpperCasePerf() { - String[] withSharpS = genStrings(1000, true); - String[] withoutSharpS = genStrings(1000, false); - - System.out.println("ENGLISH, with ß:"); - runUpperCase(LinguisticSort.ENGLISH, withSharpS); - System.out.println("ENGLISH, without ß:"); - runUpperCase(LinguisticSort.ENGLISH, withoutSharpS); - - System.out.println("ESPERANTO, with ß:"); - runUpperCase(LinguisticSort.ESPERANTO, withSharpS); - System.out.println("ESPERANTO, without ß:"); - runUpperCase(LinguisticSort.ESPERANTO, withoutSharpS); - - // Interesting for having a lot of exceptions. - System.out.println("GREEK, with ß:"); - runUpperCase(LinguisticSort.GREEK, withSharpS); - System.out.println("GREEK, without ß:"); - runUpperCase(LinguisticSort.GREEK, withoutSharpS); + } + + /** + * Generates a random string with between 0 and maxLength characters + */ + private String randomString(Random r, int maxLength, boolean asciiOnly) { + final int length = r.nextInt(maxLength); + return randomFixedLengthString(r, length, asciiOnly); + } + + /** + * Generates a random string of the given length + */ + private String randomFixedLengthString(Random r, int length, boolean asciiOnly) { + final StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + char c = 0; + while (!Character.isDefined(c) || Character.isISOControl(c)) { + c = (char) (asciiOnly ? r.nextInt(128) : r.nextInt()); + } + sb.append(c); + } + return sb.toString(); + } + + public void testUpperCaseExceptionChars() { + // Sharp s in English + String[][] enCases = new String[][] { + // { input, expected output } + new String[] { "ß", "ß" }, new String[] { "ßß", "ßß" }, new String[] { "ßßß", "ßßß" }, + new String[] { "aß", "Aß" }, new String[] { "aaaß", "AAAß" }, new String[] { "ßa", "ßA" }, + new String[] { "ßaaa", "ßAAA" }, new String[] { "aßb", "AßB" }, + new String[] { "aaaßbbb", "AAAßBBB" }, new String[] { "ßaß", "ßAß" }, + new String[] { "ßaaaß", "ßAAAß" }, new String[] { "aßbßc", "AßBßC" }, + new String[] { "aaaßbbbßccc", "AAAßBBBßCCC" }, new String[] { "aßßc", "AßßC" }, + new String[] { "aaaßßccc", "AAAßßCCC" }, }; + + for (String[] c : enCases) { + assertEquals(c[1], LinguisticSort.ENGLISH.getUpperCaseValue(c[0], false)); } - private void runUpperCase(LinguisticSort sort, String[] inputs) { - // Warm up - for (int i = 0; i < 10000; i++) { - sort.getUpperCaseValue(inputs[i % inputs.length], false); - } + // Omicron in Greek + String[][] greekCases = new String[][] { new String[] { "\u039f", "\u039f" }, // capital omicron + new String[] { "Ό", "\u039f" } - // Run experiment - for (int i = 0; i < 3; i++) { - long start = System.currentTimeMillis(); - for (int j = 0; j < 1000000; j++) { - sort.getUpperCaseValue(inputs[j % inputs.length], false); - } + }; - System.out.println("[" + (i + 1) + "] Complete in " + - (System.currentTimeMillis() - start) + "ms."); - } + for (String[] c : greekCases) { + assertEquals(c[1], LinguisticSort.GREEK.getUpperCaseValue(c[0], false)); } - - /** - * Return n randomly generated strings, each containing at least - * one sharp s if useSharpS is true. - * */ - private String[] genStrings(int n, boolean useSharpS) { - Random r = new Random(); - - String[] inputs = new String[n]; - for (int i = 0; i < inputs.length; i++) { - inputs[i] = randomString(r, r.nextInt(12) + 1, r.nextBoolean()) - + (useSharpS? "ß" : "") - + (r.nextBoolean() ? - randomString(r, r.nextInt(12) + 1, r.nextBoolean()) + (useSharpS? "ß" : "") - : "") - + (randomString(r, r.nextInt(12) + 1, r.nextBoolean())); - - if (!useSharpS) assertFalse(inputs[i].contains("ß")); - } - return inputs; + } + + public void testUsesUpper() { + assertTrue(LinguisticSort.ENGLISH.usesUpperToGetUpperCase(false)); + assertTrue(LinguisticSort.ESPERANTO.usesUpperToGetUpperCase(false)); + assertTrue(!LinguisticSort.GERMAN.usesUpperToGetUpperCase(false)); + } + + public void testGetUpperCaseCollationKey() { + assertEquals(LinguisticSort.ENGLISH.getUpperCaseSql("x", false), + LinguisticSort.ENGLISH.getUpperCollationKeySql("x", false)); + } + + /** + * I wanted to see the perf impact of doing special-case logic in the EN locale for the German + * sharp s, ß. Rename this test (remove the leading _) to run it, e.g. in Eclipse. + *

    + * This method generates two sets of 1000 randomish Strings, one with sharp s and one without. + * Then it runs 1 million uppercase operations on each bank of strings, using the EN locale (with + * the special-case logic) and a test locale -- EO, Esperanto -- which does not have any + * special-case logic. + *

    + * For posterity, when I run this on my machine, I see results like this (averages rounded to + * nearest 10ms): + *

    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    ENGLSIHESPERANTOGREEK
    with sharp s330ms260ms370ms
    without sharp s150ms130ms213ms
    + */ + public void _testUpperCasePerf() { + String[] withSharpS = genStrings(1000, true); + String[] withoutSharpS = genStrings(1000, false); + + System.out.println("ENGLISH, with ß:"); + runUpperCase(LinguisticSort.ENGLISH, withSharpS); + System.out.println("ENGLISH, without ß:"); + runUpperCase(LinguisticSort.ENGLISH, withoutSharpS); + + System.out.println("ESPERANTO, with ß:"); + runUpperCase(LinguisticSort.ESPERANTO, withSharpS); + System.out.println("ESPERANTO, without ß:"); + runUpperCase(LinguisticSort.ESPERANTO, withoutSharpS); + + // Interesting for having a lot of exceptions. + System.out.println("GREEK, with ß:"); + runUpperCase(LinguisticSort.GREEK, withSharpS); + System.out.println("GREEK, without ß:"); + runUpperCase(LinguisticSort.GREEK, withoutSharpS); + } + + private void runUpperCase(LinguisticSort sort, String[] inputs) { + // Warm up + for (int i = 0; i < 10000; i++) { + sort.getUpperCaseValue(inputs[i % inputs.length], false); } - private List cloneAndSort(LinguisticSort sort, List source) { - List result = new ArrayList(source); - Collections.sort(result, sort.getCollator()); - return result; - } + // Run experiment + for (int i = 0; i < 3; i++) { + long start = System.currentTimeMillis(); + for (int j = 0; j < 1000000; j++) { + sort.getUpperCaseValue(inputs[j % inputs.length], false); + } - /** - * Validate that the sorting of the linguistic sorts for various locales is "correct" - * The toSort below is in this order. - * 阿嗄阾啊 : āáǎa - * 仈㶚 : bā bà - * 齑: ji - */ - public void testChineseSorting() { - final List toSort = ImmutableList.of("\u963f", "\u55c4", "\u963e", - "\u554a", "\u4ec8", "\u3d9a", "\u9f51"); - assertEquals(ImmutableList.of("\u4ec8", "\u554a", "\u55c4", "\u3d9a", "\u963e", - "\u963f", "\u9f51"), cloneAndSort(LinguisticSort.CHINESE, toSort)); - assertEquals(ImmutableList.of("\u4ec8", "\u554a", "\u55c4", "\u3d9a", "\u963e", - "\u963f", "\u9f51"), cloneAndSort(LinguisticSort.CHINESE_HK, toSort)); - assertEquals(ImmutableList.of("\u4ec8", "\u554a", "\u55c4", "\u3d9a", "\u963e", - "\u963f", "\u9f51"), cloneAndSort(LinguisticSort.CHINESE_TW, toSort)); - assertEquals(ImmutableList.of("\u4ec8", "\u963e", "\u963f", "\u554a", "\u55c4", - "\u9f51", "\u3d9a"), cloneAndSort(LinguisticSort.CHINESE_STROKE, toSort)); - assertEquals(ImmutableList.of("\u4ec8", "\u963e", "\u963f", "\u554a", "\u55c4", - "\u9f51", "\u3d9a"), cloneAndSort(LinguisticSort.CHINESE_HK_STROKE, toSort)); - assertEquals(ImmutableList.of("\u4ec8", "\u963e", "\u963f", "\u554a", "\u55c4", - "\u9f51", "\u3d9a"), cloneAndSort(LinguisticSort.CHINESE_TW_STROKE, toSort)); - assertEquals(ImmutableList.of("\u963f", "\u55c4", "\u554a", "\u4ec8", "\u9f51", - "\u963e", "\u3d9a"), cloneAndSort(LinguisticSort.CHINESE_PINYIN, toSort)); + System.out + .println("[" + (i + 1) + "] Complete in " + (System.currentTimeMillis() - start) + "ms."); } - - public void testChineseLocaleMapping() { - assertEquals(LinguisticSort.CHINESE, - LinguisticSort.get(new Locale("zh"))); - assertEquals(LinguisticSort.CHINESE_TW, - LinguisticSort.get(new Locale("zh","TW"))); - assertEquals(LinguisticSort.CHINESE, - LinguisticSort.get(new Locale("zh","SG"))); - assertEquals(LinguisticSort.CHINESE_HK, - LinguisticSort.get(new Locale("zh","HK"))); - assertEquals(LinguisticSort.CHINESE_TW_STROKE, - LinguisticSort.get(new Locale("zh","TW","STROKE"))); - assertEquals(LinguisticSort.CHINESE_HK_STROKE, - LinguisticSort.get(new Locale("zh","HK","STROKE"))); - assertEquals(LinguisticSort.CHINESE_STROKE, - LinguisticSort.get(new Locale("zh","CN","STROKE"))); - assertEquals(LinguisticSort.CHINESE_STROKE, - LinguisticSort.get(new Locale("zh","SG","STROKE"))); - assertEquals(LinguisticSort.CHINESE_STROKE, - LinguisticSort.get(new Locale("zh","","STROKE"))); - assertEquals(LinguisticSort.CHINESE_PINYIN, - LinguisticSort.get(new Locale("zh","CN","PINYIN"))); - assertEquals(LinguisticSort.CHINESE_PINYIN, - LinguisticSort.get(new Locale("zh","SG","PINYIN"))); - assertEquals(LinguisticSort.CHINESE_PINYIN, - LinguisticSort.get(new Locale("zh","","PINYIN"))); + } + + /** + * Return n randomly generated strings, each containing at least one sharp s if useSharpS is true. + */ + private String[] genStrings(int n, boolean useSharpS) { + Random r = new Random(); + + String[] inputs = new String[n]; + for (int i = 0; i < inputs.length; i++) { + inputs[i] = randomString(r, r.nextInt(12) + 1, r.nextBoolean()) + (useSharpS ? "ß" : "") + + (r.nextBoolean() + ? randomString(r, r.nextInt(12) + 1, r.nextBoolean()) + (useSharpS ? "ß" : "") + : "") + + (randomString(r, r.nextInt(12) + 1, r.nextBoolean())); + + if (!useSharpS) assertFalse(inputs[i].contains("ß")); } + return inputs; + } + + private List cloneAndSort(LinguisticSort sort, List source) { + List result = new ArrayList(source); + Collections.sort(result, sort.getCollator()); + return result; + } + + /** + * Validate that the sorting of the linguistic sorts for various locales is "correct" The toSort + * below is in this order. 阿嗄阾啊 : āáǎa 仈㶚 : bā bà 齑: ji + */ + public void testChineseSorting() { + final List toSort = + ImmutableList.of("\u963f", "\u55c4", "\u963e", "\u554a", "\u4ec8", "\u3d9a", "\u9f51"); + assertEquals( + ImmutableList.of("\u4ec8", "\u554a", "\u55c4", "\u3d9a", "\u963e", "\u963f", "\u9f51"), + cloneAndSort(LinguisticSort.CHINESE, toSort)); + assertEquals( + ImmutableList.of("\u4ec8", "\u554a", "\u55c4", "\u3d9a", "\u963e", "\u963f", "\u9f51"), + cloneAndSort(LinguisticSort.CHINESE_HK, toSort)); + assertEquals( + ImmutableList.of("\u4ec8", "\u554a", "\u55c4", "\u3d9a", "\u963e", "\u963f", "\u9f51"), + cloneAndSort(LinguisticSort.CHINESE_TW, toSort)); + assertEquals( + ImmutableList.of("\u4ec8", "\u963e", "\u963f", "\u554a", "\u55c4", "\u9f51", "\u3d9a"), + cloneAndSort(LinguisticSort.CHINESE_STROKE, toSort)); + assertEquals( + ImmutableList.of("\u4ec8", "\u963e", "\u963f", "\u554a", "\u55c4", "\u9f51", "\u3d9a"), + cloneAndSort(LinguisticSort.CHINESE_HK_STROKE, toSort)); + assertEquals( + ImmutableList.of("\u4ec8", "\u963e", "\u963f", "\u554a", "\u55c4", "\u9f51", "\u3d9a"), + cloneAndSort(LinguisticSort.CHINESE_TW_STROKE, toSort)); + assertEquals( + ImmutableList.of("\u963f", "\u55c4", "\u554a", "\u4ec8", "\u9f51", "\u963e", "\u3d9a"), + cloneAndSort(LinguisticSort.CHINESE_PINYIN, toSort)); + } + + public void testChineseLocaleMapping() { + assertEquals(LinguisticSort.CHINESE, LinguisticSort.get(new Locale("zh"))); + assertEquals(LinguisticSort.CHINESE_TW, LinguisticSort.get(new Locale("zh", "TW"))); + assertEquals(LinguisticSort.CHINESE, LinguisticSort.get(new Locale("zh", "SG"))); + assertEquals(LinguisticSort.CHINESE_HK, LinguisticSort.get(new Locale("zh", "HK"))); + assertEquals(LinguisticSort.CHINESE_TW_STROKE, + LinguisticSort.get(new Locale("zh", "TW", "STROKE"))); + assertEquals(LinguisticSort.CHINESE_HK_STROKE, + LinguisticSort.get(new Locale("zh", "HK", "STROKE"))); + assertEquals(LinguisticSort.CHINESE_STROKE, + LinguisticSort.get(new Locale("zh", "CN", "STROKE"))); + assertEquals(LinguisticSort.CHINESE_STROKE, + LinguisticSort.get(new Locale("zh", "SG", "STROKE"))); + assertEquals(LinguisticSort.CHINESE_STROKE, LinguisticSort.get(new Locale("zh", "", "STROKE"))); + assertEquals(LinguisticSort.CHINESE_PINYIN, + LinguisticSort.get(new Locale("zh", "CN", "PINYIN"))); + assertEquals(LinguisticSort.CHINESE_PINYIN, + LinguisticSort.get(new Locale("zh", "SG", "PINYIN"))); + assertEquals(LinguisticSort.CHINESE_PINYIN, LinguisticSort.get(new Locale("zh", "", "PINYIN"))); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/i18n/OracleUpperTableGeneratorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/i18n/OracleUpperTableGeneratorTest.java index 2e101cf78d6..ddf16a1da92 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/i18n/OracleUpperTableGeneratorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/i18n/OracleUpperTableGeneratorTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,376 +17,370 @@ */ package org.apache.phoenix.util.i18n; -import junit.framework.TestCase; - import java.io.PrintWriter; import java.io.StringWriter; import java.util.Locale; +import junit.framework.TestCase; + /** * This test class was partially copied from Salesforce's internationalization utility library - * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. - * The i18n-util library is not maintained anymore, and it was using vulnerable dependencies. - * For more info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 - * - * A generator for OracleUpperTable.java. This generator creates an OracleUpperTable for each - * of a number of {@link UpperExpr PL/SQL expressions}, which simply tabulates the these - * differences, allowing them to be compensated for. + * (com.salesforce.i18n:i18n-util:1.0.4), which was released under the 3-clause BSD License. The + * i18n-util library is not maintained anymore, and it was using vulnerable dependencies. For more + * info, see: https://issues.apache.org/jira/browse/PHOENIX-6818 A generator for + * OracleUpperTable.java. This generator creates an OracleUpperTable for each of a number of + * {@link UpperExpr PL/SQL expressions}, which simply tabulates the these differences, allowing them + * to be compensated for. *

    - * May be run as a JUnit test or as a stand-alone Java application. Run the output in Oracle - * to generate the source for OracleUpperTable.java. - * + * May be run as a JUnit test or as a stand-alone Java application. Run the output in Oracle to + * generate the source for OracleUpperTable.java. * @see OracleUpper * @see OracleUpperTable */ public class OracleUpperTableGeneratorTest extends TestCase { - private static final char[] charsToTest = new char[] { - // i may be messed up for Turkic languages where it's supposed to upper-case - // to dotted I. - 'i', - // Sharp s may upper-case to SS or itself, depending on the details. - 'ß', - // Oracle removes tonos from all of these when upper-casing. - 'Ά', 'Έ', 'Ή', 'Ί', 'Ό', 'Ύ','Ώ','ά','έ','ή','ί','ό','ύ','ώ' - }; + private static final char[] charsToTest = new char[] { + // i may be messed up for Turkic languages where it's supposed to upper-case + // to dotted I. + 'i', + // Sharp s may upper-case to SS or itself, depending on the details. + 'ß', + // Oracle removes tonos from all of these when upper-casing. + 'Ά', 'Έ', 'Ή', 'Ί', 'Ό', 'Ύ', 'Ώ', 'ά', 'έ', 'ή', 'ί', 'ό', 'ύ', 'ώ' }; + + /** + * Most of these were just generated from the LinguisticSort enum: + * + *

    +   * 
    +   *     public static void generateValuesFromLinguisticSort() {
    +   *         for (LinguisticSort s : LinguisticSort.values()) {
    +   *             System.out.println(String.format("%1$s(\"%2$s\", \"%3$s\"),",
    +   *                 s.name(), s.getUpperSqlFormatString(), s.getLocale().getLanguage()));
    +   *         }
    +   *     }
    +   * 
    +   * 
    + * + * Each value is a PL/SQL upper case expression that may return different results than Java's + * String.toUpperCase method for the given language. + */ + private enum UpperExpr { + ENGLISH("upper(%s)", "en"), + GERMAN("nls_upper(%s, 'nls_sort=xgerman')", "de"), + FRENCH("nls_upper(%s, 'nls_sort=xfrench')", "fr"), + ITALIAN("nls_upper(%s, 'nls_sort=italian')", "it"), + SPANISH("nls_upper(%s, 'nls_sort=spanish')", "es"), + CATALAN("nls_upper(%s, 'nls_sort=catalan')", "ca"), + DUTCH("nls_upper(%s, 'nls_sort=dutch')", "nl"), + PORTUGUESE("nls_upper(%s, 'nls_sort=west_european')", "pt"), + DANISH("nls_upper(%s, 'nls_sort=danish')", "da"), + NORWEGIAN("nls_upper(%s, 'nls_sort=norwegian')", "no"), + SWEDISH("nls_upper(%s, 'nls_sort=swedish')", "sv"), + FINNISH("nls_upper(%s, 'nls_sort=finnish')", "fi"), + CZECH("nls_upper(%s, 'nls_sort=xczech')", "cs"), + POLISH("nls_upper(%s, 'nls_sort=polish')", "pl"), + TURKISH("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "tr"), + CHINESE_HK("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_radical_m')", "zh"), + CHINESE_TW("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_radical_m')", "zh"), + CHINESE("nls_upper(to_single_byte(%s), 'nls_sort=schinese_radical_m')", "zh"), + JAPANESE("nls_upper(to_single_byte(%s), 'nls_sort=japanese_m')", "ja"), + KOREAN("nls_upper(to_single_byte(%s), 'nls_sort=korean_m')", "ko"), + RUSSIAN("nls_upper(%s, 'nls_sort=russian')", "ru"), + BULGARIAN("nls_upper(%s, 'nls_sort=bulgarian')", "bg"), + INDONESIAN("nls_upper(%s, 'nls_sort=indonesian')", "in"), + ROMANIAN("nls_upper(%s, 'nls_sort=romanian')", "ro"), + VIETNAMESE("nls_upper(%s, 'nls_sort=vietnamese')", "vi"), + UKRAINIAN("nls_upper(%s, 'nls_sort=ukrainian')", "uk"), + HUNGARIAN("nls_upper(%s, 'nls_sort=xhungarian')", "hu"), + GREEK("nls_upper(%s, 'nls_sort=greek')", "el"), + HEBREW("nls_upper(%s, 'nls_sort=hebrew')", "iw"), + SLOVAK("nls_upper(%s, 'nls_sort=slovak')", "sk"), + SERBIAN_CYRILLIC("nls_upper(%s, 'nls_sort=generic_m')", "sr"), + SERBIAN_LATIN("nls_upper(%s, 'nls_sort=xcroatian')", "sh"), + BOSNIAN("nls_upper(%s, 'nls_sort=xcroatian')", "bs"), + GEORGIAN("nls_upper(%s, 'nls_sort=binary')", "ka"), + BASQUE("nls_upper(%s, 'nls_sort=west_european')", "eu"), + MALTESE("nls_upper(%s, 'nls_sort=west_european')", "mt"), + ROMANSH("nls_upper(%s, 'nls_sort=west_european')", "rm"), + LUXEMBOURGISH("nls_upper(%s, 'nls_sort=west_european')", "lb"), + IRISH("nls_upper(%s, 'nls_sort=west_european')", "ga"), + SLOVENE("nls_upper(%s, 'nls_sort=xslovenian')", "sl"), + CROATIAN("nls_upper(%s, 'nls_sort=xcroatian')", "hr"), + MALAY("nls_upper(%s, 'nls_sort=malay')", "ms"), + ARABIC("nls_upper(%s, 'nls_sort=arabic')", "ar"), + ESTONIAN("nls_upper(%s, 'nls_sort=estonian')", "et"), + ICELANDIC("nls_upper(%s, 'nls_sort=icelandic')", "is"), + LATVIAN("nls_upper(%s, 'nls_sort=latvian')", "lv"), + LITHUANIAN("nls_upper(%s, 'nls_sort=lithuanian')", "lt"), + KYRGYZ("nls_upper(%s, 'nls_sort=binary')", "ky"), + KAZAKH("nls_upper(%s, 'nls_sort=binary')", "kk"), + TAJIK("nls_upper(%s, 'nls_sort=russian')", "tg"), + BELARUSIAN("nls_upper(%s, 'nls_sort=russian')", "be"), + TURKMEN("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "tk"), + AZERBAIJANI("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "az"), + ARMENIAN("nls_upper(%s, 'nls_sort=binary')", "hy"), + THAI("nls_upper(%s, 'nls_sort=thai_dictionary')", "th"), + HINDI("nls_upper(%s, 'nls_sort=binary')", "hi"), + URDU("nls_upper(%s, 'nls_sort=arabic')", "ur"), + BENGALI("nls_upper(%s, 'nls_sort=bengali')", "bn"), + TAMIL("nls_upper(%s, 'nls_sort=binary')", "ta"), + ESPERANTO("upper(%s)", "eo"), + + // for formulas + XWEST_EUROPEAN("NLS_UPPER(%s,'NLS_SORT=xwest_european')", "en"); + + private final String expr; + private final Locale locale; /** - * Most of these were just generated from the LinguisticSort enum: - * - *
    
    -     *     public static void generateValuesFromLinguisticSort() {
    -     *         for (LinguisticSort s : LinguisticSort.values()) {
    -     *             System.out.println(String.format("%1$s(\"%2$s\", \"%3$s\"),",
    -     *                 s.name(), s.getUpperSqlFormatString(), s.getLocale().getLanguage()));
    -     *         }
    -     *     }
    -     * 
    - * - * Each value is a PL/SQL upper case expression that may return different results than - * Java's String.toUpperCase method for the given language. + * @param expr the PL/SQL expression with %s wildcards for the single string input. + * @param langCode ISO code for the language to use, as in str.toUpperCase(new + * Locale(langCode)). */ - private enum UpperExpr { - ENGLISH("upper(%s)", "en"), - GERMAN("nls_upper(%s, 'nls_sort=xgerman')", "de"), - FRENCH("nls_upper(%s, 'nls_sort=xfrench')", "fr"), - ITALIAN("nls_upper(%s, 'nls_sort=italian')", "it"), - SPANISH("nls_upper(%s, 'nls_sort=spanish')", "es"), - CATALAN("nls_upper(%s, 'nls_sort=catalan')", "ca"), - DUTCH("nls_upper(%s, 'nls_sort=dutch')", "nl"), - PORTUGUESE("nls_upper(%s, 'nls_sort=west_european')", "pt"), - DANISH("nls_upper(%s, 'nls_sort=danish')", "da"), - NORWEGIAN("nls_upper(%s, 'nls_sort=norwegian')", "no"), - SWEDISH("nls_upper(%s, 'nls_sort=swedish')", "sv"), - FINNISH("nls_upper(%s, 'nls_sort=finnish')", "fi"), - CZECH("nls_upper(%s, 'nls_sort=xczech')", "cs"), - POLISH("nls_upper(%s, 'nls_sort=polish')", "pl"), - TURKISH("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "tr"), - CHINESE_HK("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_radical_m')", "zh"), - CHINESE_TW("nls_upper(to_single_byte(%s), 'nls_sort=tchinese_radical_m')", "zh"), - CHINESE("nls_upper(to_single_byte(%s), 'nls_sort=schinese_radical_m')", "zh"), - JAPANESE("nls_upper(to_single_byte(%s), 'nls_sort=japanese_m')", "ja"), - KOREAN("nls_upper(to_single_byte(%s), 'nls_sort=korean_m')", "ko"), - RUSSIAN("nls_upper(%s, 'nls_sort=russian')", "ru"), - BULGARIAN("nls_upper(%s, 'nls_sort=bulgarian')", "bg"), - INDONESIAN("nls_upper(%s, 'nls_sort=indonesian')", "in"), - ROMANIAN("nls_upper(%s, 'nls_sort=romanian')", "ro"), - VIETNAMESE("nls_upper(%s, 'nls_sort=vietnamese')", "vi"), - UKRAINIAN("nls_upper(%s, 'nls_sort=ukrainian')", "uk"), - HUNGARIAN("nls_upper(%s, 'nls_sort=xhungarian')", "hu"), - GREEK("nls_upper(%s, 'nls_sort=greek')", "el"), - HEBREW("nls_upper(%s, 'nls_sort=hebrew')", "iw"), - SLOVAK("nls_upper(%s, 'nls_sort=slovak')", "sk"), - SERBIAN_CYRILLIC("nls_upper(%s, 'nls_sort=generic_m')", "sr"), - SERBIAN_LATIN("nls_upper(%s, 'nls_sort=xcroatian')", "sh"), - BOSNIAN("nls_upper(%s, 'nls_sort=xcroatian')", "bs"), - GEORGIAN("nls_upper(%s, 'nls_sort=binary')", "ka"), - BASQUE("nls_upper(%s, 'nls_sort=west_european')", "eu"), - MALTESE("nls_upper(%s, 'nls_sort=west_european')", "mt"), - ROMANSH("nls_upper(%s, 'nls_sort=west_european')", "rm"), - LUXEMBOURGISH("nls_upper(%s, 'nls_sort=west_european')", "lb"), - IRISH("nls_upper(%s, 'nls_sort=west_european')", "ga"), - SLOVENE("nls_upper(%s, 'nls_sort=xslovenian')", "sl"), - CROATIAN("nls_upper(%s, 'nls_sort=xcroatian')", "hr"), - MALAY("nls_upper(%s, 'nls_sort=malay')", "ms"), - ARABIC("nls_upper(%s, 'nls_sort=arabic')", "ar"), - ESTONIAN("nls_upper(%s, 'nls_sort=estonian')", "et"), - ICELANDIC("nls_upper(%s, 'nls_sort=icelandic')", "is"), - LATVIAN("nls_upper(%s, 'nls_sort=latvian')", "lv"), - LITHUANIAN("nls_upper(%s, 'nls_sort=lithuanian')", "lt"), - KYRGYZ("nls_upper(%s, 'nls_sort=binary')", "ky"), - KAZAKH("nls_upper(%s, 'nls_sort=binary')", "kk"), - TAJIK("nls_upper(%s, 'nls_sort=russian')", "tg"), - BELARUSIAN("nls_upper(%s, 'nls_sort=russian')", "be"), - TURKMEN("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "tk"), - AZERBAIJANI("nls_upper(translate(%s,'i','İ'), 'nls_sort=xturkish')", "az"), - ARMENIAN("nls_upper(%s, 'nls_sort=binary')", "hy"), - THAI("nls_upper(%s, 'nls_sort=thai_dictionary')", "th"), - HINDI("nls_upper(%s, 'nls_sort=binary')", "hi"), - URDU("nls_upper(%s, 'nls_sort=arabic')", "ur"), - BENGALI("nls_upper(%s, 'nls_sort=bengali')", "bn"), - TAMIL("nls_upper(%s, 'nls_sort=binary')", "ta"), - ESPERANTO("upper(%s)", "eo"), - - // for formulas - XWEST_EUROPEAN("NLS_UPPER(%s,'NLS_SORT=xwest_european')", "en"); - - - private final String expr; - private final Locale locale; - - /** - * @param expr the PL/SQL expression with %s wildcards for the single string input. - * @param langCode ISO code for the language to use, as in - * str.toUpperCase(new Locale(langCode)). - */ - private UpperExpr(String expr, String langCode) { - this.expr = expr; - this.locale = new Locale(langCode); - } - - private String getSql(char value) { - return String.format(expr, "unistr('\\" + hexCodePoint(value) + "')"); - } + private UpperExpr(String expr, String langCode) { + this.expr = expr; + this.locale = new Locale(langCode); + } - private String getJava(char value) { - return Character.toString(value).toUpperCase(locale); - } + private String getSql(char value) { + return String.format(expr, "unistr('\\" + hexCodePoint(value) + "')"); } - /** - * This method generates some anonymous PL/SQL routines which, when run, will generate an - * OracleUpperTable value for each {@code UpperExpr}. Each table is created by comparing - * the result of {@link String#toUpperCase(Locale)} against a - * {@link UpperExpr#getSql(char) PL/SQL expression}. The table contains all deviations from - * Oracle for each character in a {@link #charsToTest given set} that we know are fussy. - */ - public static void generateUpperCaseExceptions(PrintWriter out) { - - out.println("set serveroutput on;"); - out.println("set define off;"); // So we don't have to escape ampersands. - out.println("/"); - out.println("BEGIN"); - - putLine(out, "/*"); - putLine(out, " * Licensed to the Apache Software Foundation (ASF) under one or more"); - putLine(out, " * contributor license agreements. See the NOTICE file distributed with"); - putLine(out, " * this work for additional information regarding copyright ownership."); - putLine(out, " * The ASF licenses this file to you under the Apache License, Version 2.0"); - putLine(out, " * (the \"License\"); you may not use this file except in compliance with"); - putLine(out, " * the License. You may obtain a copy of the License at"); - putLine(out, " *"); - putLine(out, " * http://www.apache.org/licenses/LICENSE-2.0"); - putLine(out, " *"); - putLine(out, " * Unless required by applicable law or agreed to in writing, software"); - putLine(out, " * distributed under the License is distributed on an \"AS IS\" BASIS,"); - putLine(out, " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied."); - putLine(out, " * See the License for the specific language governing permissions and"); - putLine(out, " * limitations under the License."); - putLine(out, " */"); - - putLine(out, "package i18n;"); - putLine(out, ""); - putLine(out, "import java.util.Locale;"); - putLine(out, "import edu.umd.cs.findbugs.annotations.NonNull;"); - putLine(out, ""); - putLine(out, "/**"); - putLine(out, " * Generated by " + OracleUpperTableGeneratorTest.class.getCanonicalName()); - putLine(out, " *

    "); - putLine(out, " * An instance of this enum codifies the difference between executing a " + - "{@link #getSqlFormatString() particular PL/SQL"); - putLine(out, " * expression} in Oracle and executing {@link String#toUpperCase(Locale)} " + - "for a {@link #getLocale() particular locale}"); - putLine(out, " * in Java. These differences (also called exceptions) are expressed by " + - "the output of {@link #getUpperCaseExceptions()}"); - putLine(out, " * and {@link #getUpperCaseExceptionMapping(char)}."); - putLine(out, " *

    "); - putLine(out, " * The tables are generated by testing a particular set of characters " + - "that are known to contain exceptions and"); - putLine(out, " * {@link #toUpperCase(String) may be used} to compensate for exceptions " + - "found and generate output in Java that will be"); - putLine(out, " * consistent with Oracle for the given (sql expression, locale) pair " + - "over all tested values."); - putLine(out, " *

    "); - putLine(out, " * Characters tested:"); - putLine(out, " *

      "); + private String getJava(char value) { + return Character.toString(value).toUpperCase(locale); + } + } + + /** + * This method generates some anonymous PL/SQL routines which, when run, will generate an + * OracleUpperTable value for each {@code UpperExpr}. Each table is created by comparing the + * result of {@link String#toUpperCase(Locale)} against a {@link UpperExpr#getSql(char) PL/SQL + * expression}. The table contains all deviations from Oracle for each character in a + * {@link #charsToTest given set} that we know are fussy. + */ + public static void generateUpperCaseExceptions(PrintWriter out) { + + out.println("set serveroutput on;"); + out.println("set define off;"); // So we don't have to escape ampersands. + out.println("/"); + out.println("BEGIN"); + + putLine(out, "/*"); + putLine(out, " * Licensed to the Apache Software Foundation (ASF) under one or more"); + putLine(out, " * contributor license agreements. See the NOTICE file distributed with"); + putLine(out, " * this work for additional information regarding copyright ownership."); + putLine(out, " * The ASF licenses this file to you under the Apache License, Version 2.0"); + putLine(out, " * (the \"License\"); you may not use this file except in compliance with"); + putLine(out, " * the License. You may obtain a copy of the License at"); + putLine(out, " *"); + putLine(out, " * http://www.apache.org/licenses/LICENSE-2.0"); + putLine(out, " *"); + putLine(out, " * Unless required by applicable law or agreed to in writing, software"); + putLine(out, " * distributed under the License is distributed on an \"AS IS\" BASIS,"); + putLine(out, " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied."); + putLine(out, " * See the License for the specific language governing permissions and"); + putLine(out, " * limitations under the License."); + putLine(out, " */"); + + putLine(out, "package i18n;"); + putLine(out, ""); + putLine(out, "import java.util.Locale;"); + putLine(out, "import edu.umd.cs.findbugs.annotations.NonNull;"); + putLine(out, ""); + putLine(out, "/**"); + putLine(out, " * Generated by " + OracleUpperTableGeneratorTest.class.getCanonicalName()); + putLine(out, " *

      "); + putLine(out, " * An instance of this enum codifies the difference between executing a " + + "{@link #getSqlFormatString() particular PL/SQL"); + putLine(out, " * expression} in Oracle and executing {@link String#toUpperCase(Locale)} " + + "for a {@link #getLocale() particular locale}"); + putLine(out, " * in Java. These differences (also called exceptions) are expressed by " + + "the output of {@link #getUpperCaseExceptions()}"); + putLine(out, " * and {@link #getUpperCaseExceptionMapping(char)}."); + putLine(out, " *

      "); + putLine(out, " * The tables are generated by testing a particular set of characters " + + "that are known to contain exceptions and"); + putLine(out, " * {@link #toUpperCase(String) may be used} to compensate for exceptions " + + "found and generate output in Java that will be"); + putLine(out, " * consistent with Oracle for the given (sql expression, locale) pair " + + "over all tested values."); + putLine(out, " *

      "); + putLine(out, " * Characters tested:"); + putLine(out, " *

        "); + for (char c : charsToTest) { + putLine(out, " *
      • U+%1$s &#x%1$s
      • ", hexCodePoint(c)); + } + putLine(out, " *
      "); + putLine(out, " *"); + putLine(out, " * @see OracleUpper"); + putLine(out, " */"); + putLine(out, "public enum OracleUpperTable {"); + + for (UpperExpr u : UpperExpr.values()) { + put(out, " %s(\"%s\", \"%s\", \"", u.name(), u.expr, u.locale.getLanguage()); + + // Don't generate any exceptions for EO, it's a test value and + // I wanna use it as a baseline. + if (u != UpperExpr.ESPERANTO) { for (char c : charsToTest) { - putLine(out, " *
    • U+%1$s &#x%1$s
    • ", hexCodePoint(c)); + String template = "IF %1$s <> '%2$s' THEN dbms_output.put(unistr('\\%3$s')); END IF;"; + out.println(String.format(template, u.getSql(c), u.getJava(c), hexCodePoint(c))); } - putLine(out, " *
    "); - putLine(out, " *"); - putLine(out, " * @see OracleUpper"); - putLine(out, " */"); - putLine(out, "public enum OracleUpperTable {"); - - for (UpperExpr u : UpperExpr.values()) { - put(out, " %s(\"%s\", \"%s\", \"", u.name(), u.expr, u.locale.getLanguage()); - - // Don't generate any exceptions for EO, it's a test value and - // I wanna use it as a baseline. - if (u != UpperExpr.ESPERANTO) { - for (char c : charsToTest) { - String template = "IF %1$s <> '%2$s' THEN dbms_output.put(unistr('\\%3$s')); END IF;"; - out.println(String.format(template, u.getSql(c), u.getJava(c), hexCodePoint(c))); - } - } - - putLine(out, "\"),"); - } - - putLine(out, " ;"); - putLine(out, ""); - putLine(out, " private final String sql;"); - putLine(out, " private final Locale locale;"); - putLine(out, " private final char[] exceptionChars;"); - putLine(out, ""); - putLine(out, " private OracleUpperTable(String sql, String lang, " + - "String exceptionChars) {"); - putLine(out, " this.sql = sql;"); - putLine(out, " this.locale = new Locale(lang);"); - putLine(out, " this.exceptionChars = exceptionChars.toCharArray();"); - putLine(out, " }"); - putLine(out, ""); - putLine(out, " /**"); - putLine(out, " * Return an array containing characters for which Java's " + - "String.toUpperCase method is known to"); - putLine(out, " * deviate from the result of Oracle evaluating {@link #getSql(String) " + - "this expression}."); - putLine(out, " *"); - putLine(out, " * @return an array containing all exceptional characters."); - putLine(out, " */"); - putLine(out, " final @NonNull char[] getUpperCaseExceptions() {"); - putLine(out, " return exceptionChars;"); - putLine(out, " }"); - putLine(out, ""); - putLine(out, " /**"); - putLine(out, " * For a character, {@code exception}, contained in the String " + - "returned from"); - putLine(out, " * {@link #getUpperCaseExceptions()}, this method returns the " + - "anticipated result of upper-casing"); - putLine(out, " * the character in Oracle when evaluating {@link #getSql(String) " + - "this expression}."); - putLine(out, " *"); - putLine(out, " * @return the upper case of {@code exception}, according to what " + - "Oracle would do."); - putLine(out, " * @throws IllegalArgumentException"); - putLine(out, " * if the character is not contained in the String returned"); - putLine(out, " * by {@link #getUpperCaseExceptions()}."); - putLine(out, " */"); - putLine(out, " final String getUpperCaseExceptionMapping(char exception) {"); - - putLine(out, " switch (exception) {"); - for (char c : charsToTest){ - putLine(out, " case '%s':", "" + c); - putLine(out, " switch (this) {"); - for (UpperExpr u : UpperExpr.values()) { - if (u == UpperExpr.ESPERANTO) { - continue; - } - String template = "IF %1$s <> '%2$s' THEN dbms_output.put_line(' " + - "case %3$s: return ' || '\"' || %1$s || '\"; // %2$s'); END IF;"; - out.println(String.format(template, - u.getSql(c), - u.getJava(c), - u.name())); - } - putLine(out, " default: // fall out"); - putLine(out, " }"); - putLine(out, " break;"); - } - putLine(out, " }"); - - putLine(out, " throw new IllegalArgumentException("); - putLine(out, " \"No upper case mapping for char=\" + exception"); - putLine(out, " + \" and this=\" + this);"); - putLine(out, " }"); - putLine(out, ""); - - putLine(out, " public final Locale getLocale() {"); - putLine(out, " return locale;"); - putLine(out, " }"); - putLine(out, ""); - - putLine(out, " public String getSqlFormatString() {"); - putLine(out, " return sql;"); - putLine(out, " }"); - putLine(out, ""); - - putLine(out, " public String getSql(String expr) {"); - putLine(out, " return String.format(sql, expr);"); - putLine(out, " }"); - putLine(out, ""); - - putLine(out, " public String toUpperCase(String value) {"); - putLine(out, " return OracleUpper.toUpperCase(this, value);"); - putLine(out, " }"); - putLine(out, ""); - - putLine(out, " public static final OracleUpperTable forLinguisticSort(String sort) {"); - putLine(out, " return Enum.valueOf(OracleUpperTable.class, sort);"); - putLine(out, " }"); - putLine(out, "}"); - - out.println("END;"); - } + } - /** Escape single quotes by doubling them up (i.e. two single quotes in a row). */ - private static String sqlEscape(String str) { - //return TextUtil.replaceChar(str, '\'', "''"); - return str.replace("'", "''"); + putLine(out, "\"),"); } - /** Return four hex digits of the character's codepoint. */ - private static String hexCodePoint(char c) { - String cp = Integer.toHexString(c); - while (cp.length() < 4) { - cp = "0" + cp; + putLine(out, " ;"); + putLine(out, ""); + putLine(out, " private final String sql;"); + putLine(out, " private final Locale locale;"); + putLine(out, " private final char[] exceptionChars;"); + putLine(out, ""); + putLine(out, + " private OracleUpperTable(String sql, String lang, " + "String exceptionChars) {"); + putLine(out, " this.sql = sql;"); + putLine(out, " this.locale = new Locale(lang);"); + putLine(out, " this.exceptionChars = exceptionChars.toCharArray();"); + putLine(out, " }"); + putLine(out, ""); + putLine(out, " /**"); + putLine(out, " * Return an array containing characters for which Java's " + + "String.toUpperCase method is known to"); + putLine(out, " * deviate from the result of Oracle evaluating {@link #getSql(String) " + + "this expression}."); + putLine(out, " *"); + putLine(out, " * @return an array containing all exceptional characters."); + putLine(out, " */"); + putLine(out, " final @NonNull char[] getUpperCaseExceptions() {"); + putLine(out, " return exceptionChars;"); + putLine(out, " }"); + putLine(out, ""); + putLine(out, " /**"); + putLine(out, + " * For a character, {@code exception}, contained in the String " + "returned from"); + putLine(out, " * {@link #getUpperCaseExceptions()}, this method returns the " + + "anticipated result of upper-casing"); + putLine(out, " * the character in Oracle when evaluating {@link #getSql(String) " + + "this expression}."); + putLine(out, " *"); + putLine(out, + " * @return the upper case of {@code exception}, according to what " + "Oracle would do."); + putLine(out, " * @throws IllegalArgumentException"); + putLine(out, " * if the character is not contained in the String returned"); + putLine(out, " * by {@link #getUpperCaseExceptions()}."); + putLine(out, " */"); + putLine(out, " final String getUpperCaseExceptionMapping(char exception) {"); + + putLine(out, " switch (exception) {"); + for (char c : charsToTest) { + putLine(out, " case '%s':", "" + c); + putLine(out, " switch (this) {"); + for (UpperExpr u : UpperExpr.values()) { + if (u == UpperExpr.ESPERANTO) { + continue; } - return cp; + String template = "IF %1$s <> '%2$s' THEN dbms_output.put_line(' " + + "case %3$s: return ' || '\"' || %1$s || '\"; // %2$s'); END IF;"; + out.println(String.format(template, u.getSql(c), u.getJava(c), u.name())); + } + putLine(out, " default: // fall out"); + putLine(out, " }"); + putLine(out, " break;"); } - - /** Send to standard output a dbms_output.put_line call that will emit the result of - * {@link String#format(String, Object...) formatting} {@code str} with {@code args}. - * - * @param str a format string - * @param args optional format arguments. - */ - private static void put(PrintWriter out, String str, String... args) { - out.println("dbms_output.put('" + format(str, args) + "');"); + putLine(out, " }"); + + putLine(out, " throw new IllegalArgumentException("); + putLine(out, " \"No upper case mapping for char=\" + exception"); + putLine(out, " + \" and this=\" + this);"); + putLine(out, " }"); + putLine(out, ""); + + putLine(out, " public final Locale getLocale() {"); + putLine(out, " return locale;"); + putLine(out, " }"); + putLine(out, ""); + + putLine(out, " public String getSqlFormatString() {"); + putLine(out, " return sql;"); + putLine(out, " }"); + putLine(out, ""); + + putLine(out, " public String getSql(String expr) {"); + putLine(out, " return String.format(sql, expr);"); + putLine(out, " }"); + putLine(out, ""); + + putLine(out, " public String toUpperCase(String value) {"); + putLine(out, " return OracleUpper.toUpperCase(this, value);"); + putLine(out, " }"); + putLine(out, ""); + + putLine(out, " public static final OracleUpperTable forLinguisticSort(String sort) {"); + putLine(out, " return Enum.valueOf(OracleUpperTable.class, sort);"); + putLine(out, " }"); + putLine(out, "}"); + + out.println("END;"); + } + + /** Escape single quotes by doubling them up (i.e. two single quotes in a row). */ + private static String sqlEscape(String str) { + // return TextUtil.replaceChar(str, '\'', "''"); + return str.replace("'", "''"); + } + + /** Return four hex digits of the character's codepoint. */ + private static String hexCodePoint(char c) { + String cp = Integer.toHexString(c); + while (cp.length() < 4) { + cp = "0" + cp; } - - /** Send to standard output a dbms_output.put call that will emit the result of - * {@link #format(String, String...) formatting} {@code str} with {@code args}. - * - * @param str a format string - * @param args optional format arguments. - */ - private static void putLine(PrintWriter out, String str, String... args) { - out.println("dbms_output.put_line('" + format(str, args) + "');"); + return cp; + } + + /** + * Send to standard output a dbms_output.put_line call that will emit the result of + * {@link String#format(String, Object...) formatting} {@code str} with {@code args}. + * @param str a format string + * @param args optional format arguments. + */ + private static void put(PrintWriter out, String str, String... args) { + out.println("dbms_output.put('" + format(str, args) + "');"); + } + + /** + * Send to standard output a dbms_output.put call that will emit the result of + * {@link #format(String, String...) formatting} {@code str} with {@code args}. + * @param str a format string + * @param args optional format arguments. + */ + private static void putLine(PrintWriter out, String str, String... args) { + out.println("dbms_output.put_line('" + format(str, args) + "');"); + } + + /** + * Both {@code str} and {@code args} will be {@link #sqlEscape(String) sql escaped}, and then + * {@code str} will be {@link String#format(String, Object...) formatted} using {@code args}. + */ + private static String format(String str, String... args) { + str = sqlEscape(str); + if (args != null && args.length > 0) { + for (int i = 0; i < args.length; i++) { + args[i] = sqlEscape(args[i]); + } + str = String.format(str, (Object[]) args); } + return str; + } - /** - * Both {@code str} and {@code args} will be {@link #sqlEscape(String) sql escaped}, - * and then {@code str} will be {@link String#format(String, Object...) formatted} - * using {@code args}. - */ - private static String format(String str, String... args) { - str = sqlEscape(str); - if (args != null && args.length > 0) { - for (int i = 0; i < args.length; i++) { - args[i] = sqlEscape(args[i]); - } - str = String.format(str, (Object[])args); - } - return str; - } + public static void main(String[] args) { + generateUpperCaseExceptions(new PrintWriter(System.out)); + } - public static void main(String[] args) { - generateUpperCaseExceptions(new PrintWriter(System.out)); - } - - public void testGenerateUpperCaseExceptions() { - // Don't bother logging it, just see if there's an exception - generateUpperCaseExceptions(new PrintWriter(new StringWriter())); - } + public void testGenerateUpperCaseExceptions() { + // Don't bother logging it, just see if there's an exception + generateUpperCaseExceptions(new PrintWriter(new StringWriter())); + } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/json/JsonUpsertExecutorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/json/JsonUpsertExecutorTest.java index 6ac9cf9b40d..c2921773de5 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/json/JsonUpsertExecutorTest.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/json/JsonUpsertExecutorTest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,31 +29,33 @@ public class JsonUpsertExecutorTest extends AbstractUpsertExecutorTest, Object> { - private UpsertExecutor, Object> upsertExecutor; + private UpsertExecutor, Object> upsertExecutor; - @Override - protected UpsertExecutor, Object> getUpsertExecutor() { - return upsertExecutor; - } - - @Override - protected Map createRecord(Object... columnValues) throws IOException { - Map ret = new HashMap(columnValues.length); - int min = Math.min(columnInfoList.size(), columnValues.length); - for (int i = 0; i < min; i++) { - ret.put(columnInfoList.get(i).getColumnName().replace("\"", "").toLowerCase(), columnValues[i]); - } - return ret; - } - - @Before - public void setUp() throws SQLException { - super.setUp(); - upsertExecutor = new JsonUpsertExecutor(conn, columnInfoList, preparedStatement, upsertListener); - } + @Override + protected UpsertExecutor, Object> getUpsertExecutor() { + return upsertExecutor; + } - @Override - protected UpsertExecutor, Object> getUpsertExecutor(Connection conn) { - return new JsonUpsertExecutor(conn, columnInfoList, preparedStatement, upsertListener); + @Override + protected Map createRecord(Object... columnValues) throws IOException { + Map ret = new HashMap(columnValues.length); + int min = Math.min(columnInfoList.size(), columnValues.length); + for (int i = 0; i < min; i++) { + ret.put(columnInfoList.get(i).getColumnName().replace("\"", "").toLowerCase(), + columnValues[i]); } + return ret; + } + + @Before + public void setUp() throws SQLException { + super.setUp(); + upsertExecutor = + new JsonUpsertExecutor(conn, columnInfoList, preparedStatement, upsertListener); + } + + @Override + protected UpsertExecutor, Object> getUpsertExecutor(Connection conn) { + return new JsonUpsertExecutor(conn, columnInfoList, preparedStatement, upsertListener); + } } diff --git a/phoenix-hbase-compat-2.4.1/pom.xml b/phoenix-hbase-compat-2.4.1/pom.xml index a41269ffa7e..fdf5899a250 100644 --- a/phoenix-hbase-compat-2.4.1/pom.xml +++ b/phoenix-hbase-compat-2.4.1/pom.xml @@ -15,10 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -35,7 +32,7 @@ - + org.apache.hbase hbase-client @@ -67,7 +64,7 @@ ${hbase24.compat.version} provided - + org.apache.hbase hbase-protocol ${hbase24.compat.version} diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java index 47cf21d4cba..1c6507b0611 100644 --- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java +++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,14 +25,14 @@ public abstract class CompatDelegateHTable implements Table { - protected final Table delegate; + protected final Table delegate; - public CompatDelegateHTable(Table delegate) { - this.delegate = delegate; - } + public CompatDelegateHTable(Table delegate) { + this.delegate = delegate; + } - @Override - public Result mutateRow(RowMutations rm) throws IOException { - return delegate.mutateRow(rm); - } + @Override + public Result mutateRow(RowMutations rm) throws IOException { + return delegate.mutateRow(rm); + } } diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java index 20d0eaae6d1..a79d128f2bf 100644 --- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java +++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,12 +30,10 @@ public class CompatIndexHalfStoreFileReader extends StoreFileReader { - public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf, - final Configuration conf, - final ReaderContext readerContext, - final HFileInfo hFileInfo, Path p) throws IOException { - super(readerContext, hFileInfo, cacheConf, new AtomicInteger(0), conf); - } - + public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf, + final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p) + throws IOException { + super(readerContext, hFileInfo, cacheConf, new AtomicInteger(0), conf); + } } diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java index 45681c45231..5372e0e7c0e 100644 --- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java +++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,10 +23,10 @@ import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; public abstract class CompatIndexedHLogReader extends ProtobufLogReader { - @Override - protected void initAfterCompression() throws IOException { - conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, - "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"); - super.initAfterCompression(); - } + @Override + protected void initAfterCompression() throws IOException { + conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, + "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"); + super.initAfterCompression(); + } } diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java index 4939fdb4ca6..a9f9427c6ab 100644 --- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java +++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,16 +19,13 @@ import org.apache.hadoop.hbase.regionserver.StoreFileScanner; - public class CompatLocalIndexStoreFileScanner extends StoreFileScanner { - public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader, - boolean cacheBlocks, boolean pread, - boolean isCompaction, long readPt, long scannerOrder, - boolean canOptimizeForNonNullColumn) { - super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), !isCompaction, reader - .getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); - } - + public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader, + boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, long scannerOrder, + boolean canOptimizeForNonNullColumn) { + super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), !isCompaction, + reader.getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); + } } diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java index c4c98c68925..f228a954d81 100644 --- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java +++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,44 +30,42 @@ public abstract class CompatOmidTransactionTable implements Table { - protected Table hTable; + protected Table hTable; - public CompatOmidTransactionTable(Table hTable) { - this.hTable = hTable; - } + public CompatOmidTransactionTable(Table hTable) { + this.hTable = hTable; + } - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - return hTable.getTableDescriptor(); - } + @Override + public HTableDescriptor getTableDescriptor() throws IOException { + return hTable.getTableDescriptor(); + } - @Override - public Result mutateRow(RowMutations rm) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public Result mutateRow(RowMutations rm) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Put put) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Put put) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Delete delete) - throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Delete delete) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, RowMutations mutation) - throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, RowMutations mutation) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { - throw new UnsupportedOperationException(); - } + @Override + public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { + throw new UnsupportedOperationException(); + } } diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java index 45dab147713..e138b3f9730 100644 --- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java +++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,14 +27,12 @@ * passing off the call to the delegate {@link RpcScheduler}. */ public abstract class CompatPhoenixRpcScheduler extends RpcScheduler { - protected RpcScheduler delegate; + protected RpcScheduler delegate; - @Override - public boolean dispatch(CallRunner task) throws IOException, InterruptedException { - return compatDispatch(task); - } + @Override + public boolean dispatch(CallRunner task) throws IOException, InterruptedException { + return compatDispatch(task); + } - public abstract boolean compatDispatch(CallRunner task) - throws IOException, InterruptedException; + public abstract boolean compatDispatch(CallRunner task) throws IOException, InterruptedException; } - diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java index 671bed3dadf..b475e8eb70a 100644 --- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java +++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -35,50 +36,45 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class CompatUtil { - private static final Logger LOGGER = LoggerFactory.getLogger( - CompatUtil.class); + private static final Logger LOGGER = LoggerFactory.getLogger(CompatUtil.class); - private static boolean hasFixedShortCircuitConnection = - VersionInfo.compareVersion(VersionInfo.getVersion(), "2.4.12") >= 0; + private static boolean hasFixedShortCircuitConnection = + VersionInfo.compareVersion(VersionInfo.getVersion(), "2.4.12") >= 0; - private CompatUtil() { - //Not to be instantiated - } + private CompatUtil() { + // Not to be instantiated + } - public static HFileContext createHFileContext(Configuration conf, Algorithm compression, - Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) { + public static HFileContext createHFileContext(Configuration conf, Algorithm compression, + Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) { - return new HFileContextBuilder() - .withCompression(compression) - .withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) - .withBlockSize(blockSize) - .withDataBlockEncoding(encoding) - .build(); - } + return new HFileContextBuilder().withCompression(compression) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).build(); + } - public static List getMergeRegions(Connection conn, RegionInfo regionInfo) - throws IOException { - return MetaTableAccessor.getMergeRegions(conn, regionInfo.getRegionName()); - } + public static List getMergeRegions(Connection conn, RegionInfo regionInfo) + throws IOException { + return MetaTableAccessor.getMergeRegions(conn, regionInfo.getRegionName()); + } - public static ChecksumType getChecksumType(Configuration conf) { - return StoreUtils.getChecksumType(conf); - } + public static ChecksumType getChecksumType(Configuration conf) { + return StoreUtils.getChecksumType(conf); + } - public static int getBytesPerChecksum(Configuration conf) { - return StoreUtils.getBytesPerChecksum(conf); - } + public static int getBytesPerChecksum(Configuration conf) { + return StoreUtils.getBytesPerChecksum(conf); + } - public static Connection createShortCircuitConnection(final Configuration configuration, - final RegionCoprocessorEnvironment env) throws IOException { - if (hasFixedShortCircuitConnection) { - return env.createConnection(configuration); - } else { - return org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(configuration); - } + public static Connection createShortCircuitConnection(final Configuration configuration, + final RegionCoprocessorEnvironment env) throws IOException { + if (hasFixedShortCircuitConnection) { + return env.createConnection(configuration); + } else { + return org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(configuration); } + } } diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java index ed892093dc4..ccd416a9143 100644 --- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java +++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,10 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compat.hbase; public class HbaseCompatCapabilities { - // Currently every supported HBase version has the same capabilities, so there is - // nothing in here. + // Currently every supported HBase version has the same capabilities, so there is + // nothing in here. } diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java index 05ca87b9f7a..cc23c83e5ea 100644 --- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java +++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compat.hbase; import java.io.IOException; @@ -30,33 +30,32 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; /** - * Replication Sink compat endpoint that helps attach WAL attributes to - * mutation. In order to do so, this endpoint utilizes regionserver hook + * Replication Sink compat endpoint that helps attach WAL attributes to mutation. In order to do so, + * this endpoint utilizes regionserver hook * {@link #preReplicationSinkBatchMutate(ObserverContext, AdminProtos.WALEntry, Mutation)} */ public class ReplicationSinkCompatEndpoint - implements RegionServerCoprocessor, RegionServerObserver { + implements RegionServerCoprocessor, RegionServerObserver { - @Override - public Optional getRegionServerObserver() { - return Optional.of(this); - } + @Override + public Optional getRegionServerObserver() { + return Optional.of(this); + } - @Override - public void preReplicationSinkBatchMutate( - ObserverContext ctx, AdminProtos.WALEntry walEntry, - Mutation mutation) throws IOException { - RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, mutation); - List attributeList = walEntry.getKey().getExtendedAttributesList(); - attachWALExtendedAttributesToMutation(mutation, attributeList); - } + @Override + public void preReplicationSinkBatchMutate(ObserverContext ctx, + AdminProtos.WALEntry walEntry, Mutation mutation) throws IOException { + RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, mutation); + List attributeList = walEntry.getKey().getExtendedAttributesList(); + attachWALExtendedAttributesToMutation(mutation, attributeList); + } - private void attachWALExtendedAttributesToMutation(Mutation mutation, - List attributeList) { - if (attributeList != null) { - for (WALProtos.Attribute attribute : attributeList) { - mutation.setAttribute(attribute.getKey(), attribute.getValue().toByteArray()); - } - } + private void attachWALExtendedAttributesToMutation(Mutation mutation, + List attributeList) { + if (attributeList != null) { + for (WALProtos.Attribute attribute : attributeList) { + mutation.setAttribute(attribute.getKey(), attribute.getValue().toByteArray()); + } } + } } diff --git a/phoenix-hbase-compat-2.5.0/pom.xml b/phoenix-hbase-compat-2.5.0/pom.xml index 11d25f997fd..bdc3d92b15d 100644 --- a/phoenix-hbase-compat-2.5.0/pom.xml +++ b/phoenix-hbase-compat-2.5.0/pom.xml @@ -15,10 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -35,7 +32,7 @@ - + org.apache.hbase hbase-client @@ -67,7 +64,7 @@ ${hbase25.compat.version} provided - + org.apache.hbase hbase-protocol ${hbase25.compat.version} diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java index 8e6dca1b79e..c580275868f 100644 --- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java +++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,19 +26,19 @@ public abstract class CompatDelegateHTable implements Table { - protected final Table delegate; + protected final Table delegate; - public CompatDelegateHTable(Table delegate) { - this.delegate = delegate; - } + public CompatDelegateHTable(Table delegate) { + this.delegate = delegate; + } - @Override - public RegionLocator getRegionLocator() throws IOException { - return delegate.getRegionLocator(); - } + @Override + public RegionLocator getRegionLocator() throws IOException { + return delegate.getRegionLocator(); + } - @Override - public Result mutateRow(RowMutations rm) throws IOException { - return delegate.mutateRow(rm); - } + @Override + public Result mutateRow(RowMutations rm) throws IOException { + return delegate.mutateRow(rm); + } } diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java index 20d0eaae6d1..a79d128f2bf 100644 --- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java +++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,12 +30,10 @@ public class CompatIndexHalfStoreFileReader extends StoreFileReader { - public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf, - final Configuration conf, - final ReaderContext readerContext, - final HFileInfo hFileInfo, Path p) throws IOException { - super(readerContext, hFileInfo, cacheConf, new AtomicInteger(0), conf); - } - + public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf, + final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p) + throws IOException { + super(readerContext, hFileInfo, cacheConf, new AtomicInteger(0), conf); + } } diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java index 45681c45231..5372e0e7c0e 100644 --- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java +++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,10 +23,10 @@ import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; public abstract class CompatIndexedHLogReader extends ProtobufLogReader { - @Override - protected void initAfterCompression() throws IOException { - conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, - "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"); - super.initAfterCompression(); - } + @Override + protected void initAfterCompression() throws IOException { + conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, + "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"); + super.initAfterCompression(); + } } diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java index 780e841dee7..a9f9427c6ab 100644 --- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java +++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,17 +19,13 @@ import org.apache.hadoop.hbase.regionserver.StoreFileScanner; - public class CompatLocalIndexStoreFileScanner extends StoreFileScanner { - public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader, - boolean cacheBlocks, boolean pread, - boolean isCompaction, long readPt, long scannerOrder, - boolean canOptimizeForNonNullColumn) { - super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), !isCompaction, - reader.getHFileReader().hasMVCCInfo(), readPt, scannerOrder, - canOptimizeForNonNullColumn); - } - + public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader, + boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, long scannerOrder, + boolean canOptimizeForNonNullColumn) { + super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), !isCompaction, + reader.getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); + } } diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java index cfbfe407756..7f8f3429d30 100644 --- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java +++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,49 +31,47 @@ public abstract class CompatOmidTransactionTable implements Table { - protected Table hTable; + protected Table hTable; - public CompatOmidTransactionTable(Table hTable) { - this.hTable = hTable; - } + public CompatOmidTransactionTable(Table hTable) { + this.hTable = hTable; + } - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - return hTable.getTableDescriptor(); - } + @Override + public HTableDescriptor getTableDescriptor() throws IOException { + return hTable.getTableDescriptor(); + } - @Override - public RegionLocator getRegionLocator() throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public RegionLocator getRegionLocator() throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public Result mutateRow(RowMutations rm) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public Result mutateRow(RowMutations rm) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Put put) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Put put) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Delete delete) - throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Delete delete) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, RowMutations mutation) - throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, RowMutations mutation) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { - throw new UnsupportedOperationException(); - } + @Override + public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { + throw new UnsupportedOperationException(); + } } diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java index 22c792b6fe2..b495f7e03bd 100644 --- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java +++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,18 +27,17 @@ * passing off the call to the delegate {@link RpcScheduler}. */ public abstract class CompatPhoenixRpcScheduler extends RpcScheduler { - protected RpcScheduler delegate; + protected RpcScheduler delegate; - @Override - public boolean dispatch(CallRunner task) { - try { - return compatDispatch(task); - } catch (Exception e) { - //This never happens with Hbase 2.5 - throw new RuntimeException(e); - } + @Override + public boolean dispatch(CallRunner task) { + try { + return compatDispatch(task); + } catch (Exception e) { + // This never happens with Hbase 2.5 + throw new RuntimeException(e); } + } - public abstract boolean compatDispatch(CallRunner task) - throws IOException, InterruptedException; + public abstract boolean compatDispatch(CallRunner task) throws IOException, InterruptedException; } diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java index 51f681ab027..d493dee9b24 100644 --- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java +++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,43 +35,38 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class CompatUtil { - private static final Logger LOGGER = LoggerFactory.getLogger( - CompatUtil.class); + private static final Logger LOGGER = LoggerFactory.getLogger(CompatUtil.class); - private CompatUtil() { - //Not to be instantiated - } + private CompatUtil() { + // Not to be instantiated + } - public static HFileContext createHFileContext(Configuration conf, Algorithm compression, - Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) { + public static HFileContext createHFileContext(Configuration conf, Algorithm compression, + Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) { - return new HFileContextBuilder() - .withCompression(compression) - .withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) - .withBlockSize(blockSize) - .withDataBlockEncoding(encoding) - .build(); - } + return new HFileContextBuilder().withCompression(compression) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).build(); + } - public static List getMergeRegions(Connection conn, RegionInfo regionInfo) - throws IOException { - return MetaTableAccessor.getMergeRegions(conn, regionInfo.getRegionName()); - } + public static List getMergeRegions(Connection conn, RegionInfo regionInfo) + throws IOException { + return MetaTableAccessor.getMergeRegions(conn, regionInfo.getRegionName()); + } - public static ChecksumType getChecksumType(Configuration conf) { - return StoreUtils.getChecksumType(conf); - } + public static ChecksumType getChecksumType(Configuration conf) { + return StoreUtils.getChecksumType(conf); + } - public static int getBytesPerChecksum(Configuration conf) { - return StoreUtils.getBytesPerChecksum(conf); - } + public static int getBytesPerChecksum(Configuration conf) { + return StoreUtils.getBytesPerChecksum(conf); + } - public static Connection createShortCircuitConnection(final Configuration configuration, - final RegionCoprocessorEnvironment env) throws IOException { - return env.createConnection(configuration); - } + public static Connection createShortCircuitConnection(final Configuration configuration, + final RegionCoprocessorEnvironment env) throws IOException { + return env.createConnection(configuration); + } } diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java index ed892093dc4..ccd416a9143 100644 --- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java +++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,10 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compat.hbase; public class HbaseCompatCapabilities { - // Currently every supported HBase version has the same capabilities, so there is - // nothing in here. + // Currently every supported HBase version has the same capabilities, so there is + // nothing in here. } diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java index 05ca87b9f7a..cc23c83e5ea 100644 --- a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java +++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compat.hbase; import java.io.IOException; @@ -30,33 +30,32 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; /** - * Replication Sink compat endpoint that helps attach WAL attributes to - * mutation. In order to do so, this endpoint utilizes regionserver hook + * Replication Sink compat endpoint that helps attach WAL attributes to mutation. In order to do so, + * this endpoint utilizes regionserver hook * {@link #preReplicationSinkBatchMutate(ObserverContext, AdminProtos.WALEntry, Mutation)} */ public class ReplicationSinkCompatEndpoint - implements RegionServerCoprocessor, RegionServerObserver { + implements RegionServerCoprocessor, RegionServerObserver { - @Override - public Optional getRegionServerObserver() { - return Optional.of(this); - } + @Override + public Optional getRegionServerObserver() { + return Optional.of(this); + } - @Override - public void preReplicationSinkBatchMutate( - ObserverContext ctx, AdminProtos.WALEntry walEntry, - Mutation mutation) throws IOException { - RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, mutation); - List attributeList = walEntry.getKey().getExtendedAttributesList(); - attachWALExtendedAttributesToMutation(mutation, attributeList); - } + @Override + public void preReplicationSinkBatchMutate(ObserverContext ctx, + AdminProtos.WALEntry walEntry, Mutation mutation) throws IOException { + RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, mutation); + List attributeList = walEntry.getKey().getExtendedAttributesList(); + attachWALExtendedAttributesToMutation(mutation, attributeList); + } - private void attachWALExtendedAttributesToMutation(Mutation mutation, - List attributeList) { - if (attributeList != null) { - for (WALProtos.Attribute attribute : attributeList) { - mutation.setAttribute(attribute.getKey(), attribute.getValue().toByteArray()); - } - } + private void attachWALExtendedAttributesToMutation(Mutation mutation, + List attributeList) { + if (attributeList != null) { + for (WALProtos.Attribute attribute : attributeList) { + mutation.setAttribute(attribute.getKey(), attribute.getValue().toByteArray()); + } } + } } diff --git a/phoenix-hbase-compat-2.5.4/pom.xml b/phoenix-hbase-compat-2.5.4/pom.xml index 4cfd5847016..3b8291567b3 100644 --- a/phoenix-hbase-compat-2.5.4/pom.xml +++ b/phoenix-hbase-compat-2.5.4/pom.xml @@ -15,10 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -35,7 +32,7 @@ - + org.apache.hbase hbase-client @@ -67,7 +64,7 @@ ${hbase25.compat.version} provided - + org.apache.hbase hbase-protocol ${hbase25.compat.version} diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java index 8e6dca1b79e..c580275868f 100644 --- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java +++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,19 +26,19 @@ public abstract class CompatDelegateHTable implements Table { - protected final Table delegate; + protected final Table delegate; - public CompatDelegateHTable(Table delegate) { - this.delegate = delegate; - } + public CompatDelegateHTable(Table delegate) { + this.delegate = delegate; + } - @Override - public RegionLocator getRegionLocator() throws IOException { - return delegate.getRegionLocator(); - } + @Override + public RegionLocator getRegionLocator() throws IOException { + return delegate.getRegionLocator(); + } - @Override - public Result mutateRow(RowMutations rm) throws IOException { - return delegate.mutateRow(rm); - } + @Override + public Result mutateRow(RowMutations rm) throws IOException { + return delegate.mutateRow(rm); + } } diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java index 20d0eaae6d1..a79d128f2bf 100644 --- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java +++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -29,12 +30,10 @@ public class CompatIndexHalfStoreFileReader extends StoreFileReader { - public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf, - final Configuration conf, - final ReaderContext readerContext, - final HFileInfo hFileInfo, Path p) throws IOException { - super(readerContext, hFileInfo, cacheConf, new AtomicInteger(0), conf); - } - + public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf, + final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p) + throws IOException { + super(readerContext, hFileInfo, cacheConf, new AtomicInteger(0), conf); + } } diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java index 14ab6c1c276..5372e0e7c0e 100644 --- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java +++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,12 +22,11 @@ import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader; import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; - public abstract class CompatIndexedHLogReader extends ProtobufLogReader { - @Override - protected void initAfterCompression() throws IOException { - conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, - "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"); - super.initAfterCompression(); - } + @Override + protected void initAfterCompression() throws IOException { + conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, + "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"); + super.initAfterCompression(); + } } diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java index 4939fdb4ca6..a9f9427c6ab 100644 --- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java +++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,16 +19,13 @@ import org.apache.hadoop.hbase.regionserver.StoreFileScanner; - public class CompatLocalIndexStoreFileScanner extends StoreFileScanner { - public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader, - boolean cacheBlocks, boolean pread, - boolean isCompaction, long readPt, long scannerOrder, - boolean canOptimizeForNonNullColumn) { - super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), !isCompaction, reader - .getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); - } - + public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader, + boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, long scannerOrder, + boolean canOptimizeForNonNullColumn) { + super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), !isCompaction, + reader.getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); + } } diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java index cfbfe407756..7f8f3429d30 100644 --- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java +++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,49 +31,47 @@ public abstract class CompatOmidTransactionTable implements Table { - protected Table hTable; + protected Table hTable; - public CompatOmidTransactionTable(Table hTable) { - this.hTable = hTable; - } + public CompatOmidTransactionTable(Table hTable) { + this.hTable = hTable; + } - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - return hTable.getTableDescriptor(); - } + @Override + public HTableDescriptor getTableDescriptor() throws IOException { + return hTable.getTableDescriptor(); + } - @Override - public RegionLocator getRegionLocator() throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public RegionLocator getRegionLocator() throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public Result mutateRow(RowMutations rm) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public Result mutateRow(RowMutations rm) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Put put) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Put put) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Delete delete) - throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Delete delete) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, RowMutations mutation) - throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, RowMutations mutation) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { - throw new UnsupportedOperationException(); - } + @Override + public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { + throw new UnsupportedOperationException(); + } } diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java index 22c792b6fe2..b495f7e03bd 100644 --- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java +++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,18 +27,17 @@ * passing off the call to the delegate {@link RpcScheduler}. */ public abstract class CompatPhoenixRpcScheduler extends RpcScheduler { - protected RpcScheduler delegate; + protected RpcScheduler delegate; - @Override - public boolean dispatch(CallRunner task) { - try { - return compatDispatch(task); - } catch (Exception e) { - //This never happens with Hbase 2.5 - throw new RuntimeException(e); - } + @Override + public boolean dispatch(CallRunner task) { + try { + return compatDispatch(task); + } catch (Exception e) { + // This never happens with Hbase 2.5 + throw new RuntimeException(e); } + } - public abstract boolean compatDispatch(CallRunner task) - throws IOException, InterruptedException; + public abstract boolean compatDispatch(CallRunner task) throws IOException, InterruptedException; } diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java index 0182a7162ed..6db6f24a32e 100644 --- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java +++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,43 +35,38 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class CompatUtil { - private static final Logger LOGGER = LoggerFactory.getLogger( - CompatUtil.class); + private static final Logger LOGGER = LoggerFactory.getLogger(CompatUtil.class); - private CompatUtil() { - //Not to be instantiated - } + private CompatUtil() { + // Not to be instantiated + } - public static HFileContext createHFileContext(Configuration conf, Algorithm compression, - Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) { + public static HFileContext createHFileContext(Configuration conf, Algorithm compression, + Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) { - return new HFileContextBuilder() - .withCompression(compression) - .withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) - .withBlockSize(blockSize) - .withDataBlockEncoding(encoding) - .build(); - } + return new HFileContextBuilder().withCompression(compression) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).build(); + } - public static List getMergeRegions(Connection conn, RegionInfo regionInfo) - throws IOException { - return MetaTableAccessor.getMergeRegions(conn, regionInfo); - } + public static List getMergeRegions(Connection conn, RegionInfo regionInfo) + throws IOException { + return MetaTableAccessor.getMergeRegions(conn, regionInfo); + } - public static ChecksumType getChecksumType(Configuration conf) { - return StoreUtils.getChecksumType(conf); - } + public static ChecksumType getChecksumType(Configuration conf) { + return StoreUtils.getChecksumType(conf); + } - public static int getBytesPerChecksum(Configuration conf) { - return StoreUtils.getBytesPerChecksum(conf); - } + public static int getBytesPerChecksum(Configuration conf) { + return StoreUtils.getBytesPerChecksum(conf); + } - public static Connection createShortCircuitConnection(final Configuration configuration, - final RegionCoprocessorEnvironment env) throws IOException { - return env.createConnection(configuration); - } + public static Connection createShortCircuitConnection(final Configuration configuration, + final RegionCoprocessorEnvironment env) throws IOException { + return env.createConnection(configuration); + } } diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java index ed892093dc4..ccd416a9143 100644 --- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java +++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,10 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compat.hbase; public class HbaseCompatCapabilities { - // Currently every supported HBase version has the same capabilities, so there is - // nothing in here. + // Currently every supported HBase version has the same capabilities, so there is + // nothing in here. } diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java index 05ca87b9f7a..cc23c83e5ea 100644 --- a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java +++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compat.hbase; import java.io.IOException; @@ -30,33 +30,32 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; /** - * Replication Sink compat endpoint that helps attach WAL attributes to - * mutation. In order to do so, this endpoint utilizes regionserver hook + * Replication Sink compat endpoint that helps attach WAL attributes to mutation. In order to do so, + * this endpoint utilizes regionserver hook * {@link #preReplicationSinkBatchMutate(ObserverContext, AdminProtos.WALEntry, Mutation)} */ public class ReplicationSinkCompatEndpoint - implements RegionServerCoprocessor, RegionServerObserver { + implements RegionServerCoprocessor, RegionServerObserver { - @Override - public Optional getRegionServerObserver() { - return Optional.of(this); - } + @Override + public Optional getRegionServerObserver() { + return Optional.of(this); + } - @Override - public void preReplicationSinkBatchMutate( - ObserverContext ctx, AdminProtos.WALEntry walEntry, - Mutation mutation) throws IOException { - RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, mutation); - List attributeList = walEntry.getKey().getExtendedAttributesList(); - attachWALExtendedAttributesToMutation(mutation, attributeList); - } + @Override + public void preReplicationSinkBatchMutate(ObserverContext ctx, + AdminProtos.WALEntry walEntry, Mutation mutation) throws IOException { + RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, mutation); + List attributeList = walEntry.getKey().getExtendedAttributesList(); + attachWALExtendedAttributesToMutation(mutation, attributeList); + } - private void attachWALExtendedAttributesToMutation(Mutation mutation, - List attributeList) { - if (attributeList != null) { - for (WALProtos.Attribute attribute : attributeList) { - mutation.setAttribute(attribute.getKey(), attribute.getValue().toByteArray()); - } - } + private void attachWALExtendedAttributesToMutation(Mutation mutation, + List attributeList) { + if (attributeList != null) { + for (WALProtos.Attribute attribute : attributeList) { + mutation.setAttribute(attribute.getKey(), attribute.getValue().toByteArray()); + } } + } } diff --git a/phoenix-hbase-compat-2.6.0/pom.xml b/phoenix-hbase-compat-2.6.0/pom.xml index da0ac4ab489..7df076a5274 100644 --- a/phoenix-hbase-compat-2.6.0/pom.xml +++ b/phoenix-hbase-compat-2.6.0/pom.xml @@ -15,10 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -35,7 +32,7 @@ - + org.apache.hbase hbase-client @@ -67,7 +64,7 @@ ${hbase26.compat.version} provided - + org.apache.hbase hbase-protocol ${hbase26.compat.version} diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java index 8e6dca1b79e..c580275868f 100644 --- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java +++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,19 +26,19 @@ public abstract class CompatDelegateHTable implements Table { - protected final Table delegate; + protected final Table delegate; - public CompatDelegateHTable(Table delegate) { - this.delegate = delegate; - } + public CompatDelegateHTable(Table delegate) { + this.delegate = delegate; + } - @Override - public RegionLocator getRegionLocator() throws IOException { - return delegate.getRegionLocator(); - } + @Override + public RegionLocator getRegionLocator() throws IOException { + return delegate.getRegionLocator(); + } - @Override - public Result mutateRow(RowMutations rm) throws IOException { - return delegate.mutateRow(rm); - } + @Override + public Result mutateRow(RowMutations rm) throws IOException { + return delegate.mutateRow(rm); + } } diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java index d046c3df8e5..72664ad8b92 100644 --- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java +++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,16 +28,12 @@ import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileReader; +public class CompatIndexHalfStoreFileReader extends StoreFileReader { -public class CompatIndexHalfStoreFileReader extends StoreFileReader { - - public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf, - final Configuration conf, - final ReaderContext readerContext, - final HFileInfo hFileInfo, Path p) throws IOException { - super(readerContext, hFileInfo, cacheConf, - new StoreFileInfo(conf, fs, p, true), conf); - } - + public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf, + final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p) + throws IOException { + super(readerContext, hFileInfo, cacheConf, new StoreFileInfo(conf, fs, p, true), conf); + } } diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java index 6bf1602ec6b..c23b0be5d87 100644 --- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java +++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java index 5e4e3497a7b..62e328a54f9 100644 --- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java +++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,17 +20,14 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; - public class CompatLocalIndexStoreFileScanner extends StoreFileScanner { - public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader, - boolean cacheBlocks, boolean pread, - boolean isCompaction, long readPt, long scannerOrder, - boolean canOptimizeForNonNullColumn) { - super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), !isCompaction, reader - .getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn, - reader.getHFileReader().getDataBlockEncoding() == DataBlockEncoding.ROW_INDEX_V1); - } - + public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader, + boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, long scannerOrder, + boolean canOptimizeForNonNullColumn) { + super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), !isCompaction, + reader.getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn, + reader.getHFileReader().getDataBlockEncoding() == DataBlockEncoding.ROW_INDEX_V1); + } } diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java index 81e4a1873f4..7f8f3429d30 100644 --- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java +++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -30,49 +31,47 @@ public abstract class CompatOmidTransactionTable implements Table { - protected Table hTable; + protected Table hTable; - public CompatOmidTransactionTable(Table hTable) { - this.hTable = hTable; - } + public CompatOmidTransactionTable(Table hTable) { + this.hTable = hTable; + } - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - return hTable.getTableDescriptor(); - } + @Override + public HTableDescriptor getTableDescriptor() throws IOException { + return hTable.getTableDescriptor(); + } - @Override - public RegionLocator getRegionLocator() throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public RegionLocator getRegionLocator() throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public Result mutateRow(RowMutations rm) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public Result mutateRow(RowMutations rm) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Put put) throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Put put) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Delete delete) - throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, Delete delete) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, RowMutations mutation) - throws IOException { - throw new UnsupportedOperationException(); - } + @Override + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, + byte[] value, RowMutations mutation) throws IOException { + throw new UnsupportedOperationException(); + } - @Override - public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { - throw new UnsupportedOperationException(); - } + @Override + public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { + throw new UnsupportedOperationException(); + } } diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java index 4303301bed2..53380065865 100644 --- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java +++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,32 +27,31 @@ * passing off the call to the delegate {@link RpcScheduler}. */ public abstract class CompatPhoenixRpcScheduler extends RpcScheduler { - protected RpcScheduler delegate; - - @Override - public boolean dispatch(CallRunner task) { - try { - return compatDispatch(task); - } catch (Exception e) { - //This never happens with Hbase 2.5 - throw new RuntimeException(e); - } + protected RpcScheduler delegate; + + @Override + public boolean dispatch(CallRunner task) { + try { + return compatDispatch(task); + } catch (Exception e) { + // This never happens with Hbase 2.5 + throw new RuntimeException(e); } + } - public int getActiveRpcHandlerCount() { - return delegate.getActiveRpcHandlerCount(); - } + public int getActiveRpcHandlerCount() { + return delegate.getActiveRpcHandlerCount(); + } - @Override - public int getActiveBulkLoadRpcHandlerCount() { - return delegate.getActiveBulkLoadRpcHandlerCount(); - } + @Override + public int getActiveBulkLoadRpcHandlerCount() { + return delegate.getActiveBulkLoadRpcHandlerCount(); + } - @Override - public int getBulkLoadQueueLength() { - return delegate.getBulkLoadQueueLength(); - } + @Override + public int getBulkLoadQueueLength() { + return delegate.getBulkLoadQueueLength(); + } - public abstract boolean compatDispatch(CallRunner task) - throws IOException, InterruptedException; + public abstract boolean compatDispatch(CallRunner task) throws IOException, InterruptedException; } diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java index 0182a7162ed..6db6f24a32e 100644 --- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java +++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,43 +35,38 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - public class CompatUtil { - private static final Logger LOGGER = LoggerFactory.getLogger( - CompatUtil.class); + private static final Logger LOGGER = LoggerFactory.getLogger(CompatUtil.class); - private CompatUtil() { - //Not to be instantiated - } + private CompatUtil() { + // Not to be instantiated + } - public static HFileContext createHFileContext(Configuration conf, Algorithm compression, - Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) { + public static HFileContext createHFileContext(Configuration conf, Algorithm compression, + Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) { - return new HFileContextBuilder() - .withCompression(compression) - .withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) - .withBlockSize(blockSize) - .withDataBlockEncoding(encoding) - .build(); - } + return new HFileContextBuilder().withCompression(compression) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).build(); + } - public static List getMergeRegions(Connection conn, RegionInfo regionInfo) - throws IOException { - return MetaTableAccessor.getMergeRegions(conn, regionInfo); - } + public static List getMergeRegions(Connection conn, RegionInfo regionInfo) + throws IOException { + return MetaTableAccessor.getMergeRegions(conn, regionInfo); + } - public static ChecksumType getChecksumType(Configuration conf) { - return StoreUtils.getChecksumType(conf); - } + public static ChecksumType getChecksumType(Configuration conf) { + return StoreUtils.getChecksumType(conf); + } - public static int getBytesPerChecksum(Configuration conf) { - return StoreUtils.getBytesPerChecksum(conf); - } + public static int getBytesPerChecksum(Configuration conf) { + return StoreUtils.getBytesPerChecksum(conf); + } - public static Connection createShortCircuitConnection(final Configuration configuration, - final RegionCoprocessorEnvironment env) throws IOException { - return env.createConnection(configuration); - } + public static Connection createShortCircuitConnection(final Configuration configuration, + final RegionCoprocessorEnvironment env) throws IOException { + return env.createConnection(configuration); + } } diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java index ed892093dc4..ccd416a9143 100644 --- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java +++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,10 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compat.hbase; public class HbaseCompatCapabilities { - // Currently every supported HBase version has the same capabilities, so there is - // nothing in here. + // Currently every supported HBase version has the same capabilities, so there is + // nothing in here. } diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java index 05ca87b9f7a..cc23c83e5ea 100644 --- a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java +++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/ReplicationSinkCompatEndpoint.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.compat.hbase; import java.io.IOException; @@ -30,33 +30,32 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; /** - * Replication Sink compat endpoint that helps attach WAL attributes to - * mutation. In order to do so, this endpoint utilizes regionserver hook + * Replication Sink compat endpoint that helps attach WAL attributes to mutation. In order to do so, + * this endpoint utilizes regionserver hook * {@link #preReplicationSinkBatchMutate(ObserverContext, AdminProtos.WALEntry, Mutation)} */ public class ReplicationSinkCompatEndpoint - implements RegionServerCoprocessor, RegionServerObserver { + implements RegionServerCoprocessor, RegionServerObserver { - @Override - public Optional getRegionServerObserver() { - return Optional.of(this); - } + @Override + public Optional getRegionServerObserver() { + return Optional.of(this); + } - @Override - public void preReplicationSinkBatchMutate( - ObserverContext ctx, AdminProtos.WALEntry walEntry, - Mutation mutation) throws IOException { - RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, mutation); - List attributeList = walEntry.getKey().getExtendedAttributesList(); - attachWALExtendedAttributesToMutation(mutation, attributeList); - } + @Override + public void preReplicationSinkBatchMutate(ObserverContext ctx, + AdminProtos.WALEntry walEntry, Mutation mutation) throws IOException { + RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, mutation); + List attributeList = walEntry.getKey().getExtendedAttributesList(); + attachWALExtendedAttributesToMutation(mutation, attributeList); + } - private void attachWALExtendedAttributesToMutation(Mutation mutation, - List attributeList) { - if (attributeList != null) { - for (WALProtos.Attribute attribute : attributeList) { - mutation.setAttribute(attribute.getKey(), attribute.getValue().toByteArray()); - } - } + private void attachWALExtendedAttributesToMutation(Mutation mutation, + List attributeList) { + if (attributeList != null) { + for (WALProtos.Attribute attribute : attributeList) { + mutation.setAttribute(attribute.getKey(), attribute.getValue().toByteArray()); + } } + } } diff --git a/phoenix-mapreduce-byo-shaded-hbase/pom.xml b/phoenix-mapreduce-byo-shaded-hbase/pom.xml index 7a20de505b9..02dbb48f380 100644 --- a/phoenix-mapreduce-byo-shaded-hbase/pom.xml +++ b/phoenix-mapreduce-byo-shaded-hbase/pom.xml @@ -15,9 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -25,9 +23,9 @@ 5.3.0-SNAPSHOT phoenix-mapreduce-byo-shaded-hbase-${hbase.suffix} + jar Phoenix Mapreduce Phoenix Mapreduce JAR for use with the "hbase mapredcp" classpath - jar true @@ -35,6 +33,253 @@ true true + + + + + org.apache.phoenix + phoenix-core-server + + + + org.slf4j + slf4j-reload4j + + + ch.qos.reload4j + reload4j + + + + org.slf4j + slf4j-log4j12 + + + log4j + log4j + + + + + org.apache.phoenix + phoenix-hbase-compat-${hbase.compat.version} + false + + + + + org.eclipse.jetty + jetty-server + ${jetty.version} + provided + + + org.eclipse.jetty + jetty-util + ${jetty.version} + provided + + + org.eclipse.jetty + jetty-util-ajax + ${jetty.version} + provided + + + org.eclipse.jetty + jetty-servlet + ${jetty.version} + provided + + + org.eclipse.jetty + jetty-webapp + ${jetty.version} + provided + + + javax.servlet + javax.servlet-api + ${javax.servlet-api.version} + provided + + + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-mapreduce-client-core + provided + + + org.apache.hadoop + hadoop-annotations + provided + + + org.apache.hadoop + hadoop-auth + provided + + + org.apache.hadoop + hadoop-yarn-api + provided + + + org.apache.hadoop + hadoop-hdfs + provided + + + org.apache.hadoop + hadoop-hdfs-client + provided + + + org.apache.hadoop + hadoop-distcp + provided + + + org.apache.hadoop + hadoop-client + provided + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + provided + + + org.apache.hadoop + hadoop-mapreduce-client-common + provided + + + + + org.apache.hbase + hbase-client + provided + + + org.apache.hbase + hbase-common + provided + + + org.apache.hbase + hbase-mapreduce + provided + + + org.apache.hbase + hbase-replication + provided + + + org.apache.hbase + hbase-endpoint + provided + + + org.apache.hbase + hbase-metrics-api + provided + + + org.apache.hbase + hbase-metrics + provided + + + org.apache.hbase + hbase-protocol + provided + + + org.apache.hbase + hbase-protocol-shaded + provided + + + org.apache.hbase + hbase-server + provided + + + org.apache.hbase + hbase-hadoop-compat + provided + + + org.apache.hbase + hbase-hadoop2-compat + provided + + + org.apache.hbase + hbase-zookeeper + provided + + + org.apache.hbase.thirdparty + hbase-shaded-netty + ${hbase-thirdparty.excludeonly.version} + provided + + + org.apache.hbase.thirdparty + hbase-shaded-miscellaneous + ${hbase-thirdparty.excludeonly.version} + provided + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + ${hbase-thirdparty.excludeonly.version} + provided + + + + org.apache.commons + commons-configuration2 + ${commons-configuration2.excludeonly.version} + provided + + + + + + org.slf4j + slf4j-api + provided + + + com.google.guava + guava + + 11.0.2 + provided + + + com.github.stephenc.findbugs + findbugs-annotations + provided + + + com.google.protobuf + protobuf-java + provided + + @@ -66,7 +311,7 @@ * - + org.apache.maven.plugins @@ -88,7 +333,7 @@ README* - + org.apache.hadoop:hadoop-yarn-common org/apache/hadoop/yarn/factories/package-info.class @@ -122,27 +367,20 @@ - - + + csv-bulk-load-config.properties - - ${project.basedir}/../config/csv-bulk-load-config.properties - + ${project.basedir}/../config/csv-bulk-load-config.properties - + README.md ${project.basedir}/../README.md - + LICENSE.txt ${project.basedir}/../LICENSE - + NOTICE ${project.basedir}/../NOTICE @@ -175,7 +413,7 @@ - org/apache/commons/configuration2/** + org/apache/commons/configuration2/** @@ -335,10 +573,10 @@ lite-shaded - package shade + package true false @@ -359,252 +597,4 @@ - - - - - org.apache.phoenix - phoenix-core-server - - - - org.slf4j - slf4j-reload4j - - - ch.qos.reload4j - reload4j - - - - org.slf4j - slf4j-log4j12 - - - log4j - log4j - - - - - org.apache.phoenix - phoenix-hbase-compat-${hbase.compat.version} - false - - - - - org.eclipse.jetty - jetty-server - provided - ${jetty.version} - - - org.eclipse.jetty - jetty-util - provided - ${jetty.version} - - - org.eclipse.jetty - jetty-util-ajax - provided - ${jetty.version} - - - org.eclipse.jetty - jetty-servlet - provided - ${jetty.version} - - - org.eclipse.jetty - jetty-webapp - provided - ${jetty.version} - - - javax.servlet - javax.servlet-api - provided - ${javax.servlet-api.version} - - - - - org.apache.hadoop - hadoop-common - provided - - - org.apache.hadoop - hadoop-mapreduce-client-core - provided - - - org.apache.hadoop - hadoop-annotations - provided - - - org.apache.hadoop - hadoop-auth - provided - - - org.apache.hadoop - hadoop-yarn-api - provided - - - org.apache.hadoop - hadoop-hdfs - provided - - - org.apache.hadoop - hadoop-hdfs-client - provided - - - org.apache.hadoop - hadoop-distcp - provided - - - org.apache.hadoop - hadoop-client - provided - - - org.apache.hadoop - hadoop-mapreduce-client-jobclient - provided - - - org.apache.hadoop - hadoop-mapreduce-client-common - provided - - - - - org.apache.hbase - hbase-client - provided - - - org.apache.hbase - hbase-common - provided - - - org.apache.hbase - hbase-mapreduce - provided - - - org.apache.hbase - hbase-replication - provided - - - org.apache.hbase - hbase-endpoint - provided - - - org.apache.hbase - hbase-metrics-api - provided - - - org.apache.hbase - hbase-metrics - provided - - - org.apache.hbase - hbase-protocol - provided - - - org.apache.hbase - hbase-protocol-shaded - provided - - - org.apache.hbase - hbase-server - provided - - - org.apache.hbase - hbase-hadoop-compat - provided - - - org.apache.hbase - hbase-hadoop2-compat - provided - - - org.apache.hbase - hbase-zookeeper - provided - - - org.apache.hbase.thirdparty - hbase-shaded-netty - ${hbase-thirdparty.excludeonly.version} - provided - - - org.apache.hbase.thirdparty - hbase-shaded-miscellaneous - ${hbase-thirdparty.excludeonly.version} - provided - - - org.apache.hbase.thirdparty - hbase-shaded-protobuf - ${hbase-thirdparty.excludeonly.version} - provided - - - - org.apache.commons - commons-configuration2 - ${commons-configuration2.excludeonly.version} - provided - - - - - - - org.slf4j - slf4j-api - provided - - - com.google.guava - guava - - 11.0.2 - provided - - - com.github.stephenc.findbugs - findbugs-annotations - provided - - - com.google.protobuf - protobuf-java - provided - - diff --git a/phoenix-pherf/README.md b/phoenix-pherf/README.md index ecc8434057b..c489cb4294e 100644 --- a/phoenix-pherf/README.md +++ b/phoenix-pherf/README.md @@ -17,7 +17,7 @@ limitations under the License. Pherf is a performance test framework that exercises HBase through Apache Phoenix, a SQL layer interface. -## Build +## Build mvn clean package -DskipTests ## Important arguments: @@ -38,23 +38,23 @@ mvn clean package -DskipTests - -rowCountOverride [number of rows] _Specify number of rows to be upserted rather than using row count specified in schema_
    ## Running from IDE -Ex. Load data and execute queries. Specify the following as your IDE debug arguments:
    +Ex. Load data and execute queries. Specify the following as your IDE debug arguments:
    `-drop -l -q -z localhost` ## Running from command line -Ex. Drop existing tables, load data, and execute queries:
    +Ex. Drop existing tables, load data, and execute queries:
    `java -jar pherf-1.0-SNAPSHOT-jar-with-dependencies.jar -drop -l -q -z localhost` ## Adding Rules for Data Creation -Review [test_scenario.xml](/src/test/resources/scenario/test_scenario.xml) +Review [test_scenario.xml](/src/test/resources/scenario/test_scenario.xml) for syntax examples.
    * Rules are defined as `` and are applied in the order they appear in file. -* Rules of the same type override the values of a prior rule of the same type. If `true` is +* Rules of the same type override the values of a prior rule of the same type. If `true` is set, rule will only apply override when type and name match the column name in Phoenix. -* `` tag is set at the column level. It can be used to define a constant string appended to the beginning of -CHAR and VARCHAR data type values. +* `` tag is set at the column level. It can be used to define a constant string appended to the beginning of +CHAR and VARCHAR data type values. * **Required field** Supported Phoenix types: VARCHAR, CHAR, DATE, DECIMAL, INTEGER * denoted by the `` tag * User defined true changes rule matching to use both name and type fields to determine equivalence. @@ -66,8 +66,8 @@ CHAR and VARCHAR data type values. * LIST: Means pick values from predefined list of values * **Required field** Length defines boundary for random values for CHAR and VARCHAR types. * denoted by the `` tag -* Column level Min/Max value defines boundaries for numerical values. For DATES, these values supply a range between -which values are generated. At the column level the granularity is a year. At a specific data value level, the +* Column level Min/Max value defines boundaries for numerical values. For DATES, these values supply a range between +which values are generated. At the column level the granularity is a year. At a specific data value level, the granularity is down to the Ms. * denoted by the `` tag * denoted by the `` tag @@ -76,19 +76,19 @@ the value will be null. * denoted by `` * Name can either be any text or the actual column name in the Phoenix table. * denoted by the `` -* Value List is used in conjunction with LIST data sequences. Each entry is a DataValue with a specified value to be -used when generating data. +* Value List is used in conjunction with LIST data sequences. Each entry is a DataValue with a specified value to be +used when generating data. * Denoted by the `` tags * If the distribution attribute on the datavalue is set, values will be created according to -that probability. - * When distribution is used, values must add up to 100%. +that probability. + * When distribution is used, values must add up to 100%. * If distribution is not used, values will be randomly picked from the list with equal distribution. ## Defining Scenario -Scenario can have multiple querySets. Consider following example, concurrency of 1-4 means that each query will be -executed starting with concurrency level of 1 and reach up to maximum concurrency of 4. Per thread, query would be +Scenario can have multiple querySets. Consider following example, concurrency of 1-4 means that each query will be +executed starting with concurrency level of 1 and reach up to maximum concurrency of 4. Per thread, query would be executed to a minimum of 10 times or 10 seconds (whichever comes first). QuerySet by defult is executed serially but you - can change executionType to PARALLEL so queries are executed concurrently. Scenarios are defined in XMLs stored + can change executionType to PARALLEL so queries are executed concurrently. Scenarios are defined in XMLs stored in the resource directory. ``` @@ -97,7 +97,7 @@ executed to a minimum of 10 times or 10 seconds (whichever comes first). QuerySe - @@ -105,11 +105,11 @@ executed to a minimum of 10 times or 10 seconds (whichever comes first). QuerySe - + ``` ## Results -Results are written real time in _results_ directory. Open the result that is saved in .jpg format for real time +Results are written real time in _results_ directory. Open the result that is saved in .jpg format for real time visualization. ## Testing diff --git a/phoenix-pherf/config/scenario/user_defined_scenario.xml b/phoenix-pherf/config/scenario/user_defined_scenario.xml index 6435e29ad74..0ca36017603 100644 --- a/phoenix-pherf/config/scenario/user_defined_scenario.xml +++ b/phoenix-pherf/config/scenario/user_defined_scenario.xml @@ -120,7 +120,7 @@ DO_NOT_USE - diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml index 7c3e56982f8..df0a34132b8 100644 --- a/phoenix-pherf/pom.xml +++ b/phoenix-pherf/pom.xml @@ -15,9 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -123,8 +121,8 @@ commons-lang3
    - commons-io - commons-io + commons-io + commons-io org.apache.commons @@ -162,24 +160,24 @@ - org.apache.logging.log4j - log4j-api - test + org.apache.logging.log4j + log4j-api + test - org.apache.logging.log4j - log4j-core - test + org.apache.logging.log4j + log4j-core + test - org.apache.logging.log4j - log4j-slf4j-impl - test + org.apache.logging.log4j + log4j-slf4j-impl + test - org.apache.logging.log4j - log4j-1.2-api - test + org.apache.logging.log4j + log4j-1.2-api + test org.apache.phoenix @@ -190,8 +188,8 @@ org.apache.phoenix phoenix-hbase-compat-${hbase.compat.version} - false test + false org.apache.hbase @@ -278,44 +276,20 @@ maven-dependency-plugin - - org.apache.hbase.thirdparty:hbase-shaded-miscellaneous - - - javax.activation:activation - - - javax.xml.bind:jaxb-api - - - org.glassfish.jaxb:jax-runtime - - - org.apache.hbase:hbase-it - - - org.apache.phoenix:phoenix-hbase-compat-${hbase.compat.version} - - - org.apache.hbase:hbase-testing-util - - - org.apache.logging.log4j:log4j-api - - - org.apache.logging.log4j:log4j-core - - - org.apache.logging.log4j:log4j-slf4j-impl - - - org.apache.logging.log4j:log4j-1.2-api - + org.apache.hbase.thirdparty:hbase-shaded-miscellaneous + javax.activation:activation + javax.xml.bind:jaxb-api + org.glassfish.jaxb:jax-runtime + org.apache.hbase:hbase-it + org.apache.phoenix:phoenix-hbase-compat-${hbase.compat.version} + org.apache.hbase:hbase-testing-util + org.apache.logging.log4j:log4j-api + org.apache.logging.log4j:log4j-core + org.apache.logging.log4j:log4j-slf4j-impl + org.apache.logging.log4j:log4j-1.2-api - - jakarta.xml.bind:jakarta.xml.bind-api - + jakarta.xml.bind:jakarta.xml.bind-api @@ -341,29 +315,25 @@ maven-shade-plugin - package shade + package false false ${basedir}/target/pom.xml - - + + README.md ${project.basedir}/../README.md - + LICENSE.txt ${project.basedir}/../LICENSE - + NOTICE ${project.basedir}/../NOTICE @@ -372,7 +342,7 @@ org.apache.phoenix:phoenix-pherf com.google.code.gson:gson - org.apache.hbase.thirdparty:hbase-shaded-miscellaneous + org.apache.hbase.thirdparty:hbase-shaded-miscellaneous org.apache.phoenix.thirdparty:phoenix-shaded-guava com.googlecode.java-diff-utils:diffutils org.apache.commons:commons-lang3 diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java index cafd6b16f0c..c011e497779 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; import java.nio.file.Path; @@ -24,14 +23,6 @@ import java.util.List; import java.util.Properties; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; -import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.apache.phoenix.pherf.PherfConstants.CompareType; import org.apache.phoenix.pherf.PherfConstants.GeneratePhoenixStats; import org.apache.phoenix.pherf.configuration.DataModel; @@ -43,355 +34,346 @@ import org.apache.phoenix.pherf.util.GoogleChartGenerator; import org.apache.phoenix.pherf.util.PhoenixUtil; import org.apache.phoenix.pherf.util.ResourceList; -import org.apache.phoenix.pherf.workload.mt.MultiTenantWorkload; import org.apache.phoenix.pherf.workload.QueryExecutor; import org.apache.phoenix.pherf.workload.Workload; import org.apache.phoenix.pherf.workload.WorkloadExecutor; import org.apache.phoenix.pherf.workload.WriteWorkload; +import org.apache.phoenix.pherf.workload.mt.MultiTenantWorkload; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options; +import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class Pherf { - private static final Logger LOGGER = LoggerFactory.getLogger(Pherf.class); - private static final Options options = new Options(); - private final PhoenixUtil phoenixUtil = PhoenixUtil.create(); - - static { - options.addOption("disableSchemaApply", "disableSchemaApply", false, - "Set to disable schema from being applied."); - options.addOption("disableRuntimeResult", "disableRuntimeResult", false, - "Set to disable writing detailed CSV file during query execution. Those will eventually get written at the end of query execution."); - options.addOption("z", "zookeeper", true, - "HBase Zookeeper address for connection. Default: localhost"); - options.addOption("q", "query", false, "Executes multi-threaded query sets"); - options.addOption("listFiles", "listFiles", false, "List available resource files"); - options.addOption("mt", "multi-tenant", false, - "Multi tenanted workloads based on load profiles."); - options.addOption("l", "load", false, - "Pre-loads data according to specified configuration values."); - options.addOption("scenarioFile", "scenarioFile", true, - "Regex or file name for the Test Scenario configuration .xml file to use."); - options.addOption("scenarioName", "scenarioName", true, - "Regex or scenario name from the Test Scenario configuration .xml file to use."); - options.addOption("drop", "drop", true, "Regex drop all tables with schema name as PHERF. " - + "\nExample drop Event tables: -drop .*(EVENT).* Drop all: -drop .* or -drop all"); - options.addOption("schemaFile", "schemaFile", true, - "Regex or file name for the Test phoenix table schema .sql to use."); - options.addOption("m", "monitor", false, "Launch the stats profilers"); - options.addOption("monitorFrequency", "monitorFrequency", true, - "Override for frequency in Ms for which monitor should log stats. " - + "\n See pherf.default.monitorFrequency in pherf.properties"); - options.addOption("rowCountOverride", "rowCountOverride", true, - "Row count override to use instead of one specified in scenario."); - options.addOption("hint", "hint", true, - "Executes all queries with specified hint. Example SMALL"); - options.addOption("log_per_nrows", "log_per_nrows", true, - "Default value to display log line after every 'N' row load"); - options.addOption("diff", "diff", false, - "Run pherf in verification mode and diff with exported results"); - options.addOption("export", "export", false, - "Exports query results to CSV files in " + PherfConstants.EXPORT_DIR - + " directory"); - options.addOption("writerThreadSize", "writerThreadSize", true, - "Override the default number of writer threads. " - + "See pherf.default.dataloader.threadpool in Pherf.properties."); - options.addOption("h", "help", false, "Get help on using this utility."); - options.addOption("d", "debug", false, "Put tool in debug mode"); - options.addOption("stats", "stats", false, - "Update Phoenix Statistics after data is loaded with -l argument"); - options.addOption("label", "label", true, - "Label a run. Result file name will be suffixed with specified label"); - options.addOption("compare", "compare", true, "Specify labeled run(s) to compare"); - options.addOption("useAverageCompareType", "useAverageCompareType", false, - "Compare results with Average query time instead of default is Minimum query time."); - options.addOption("t", "thin", false, "Use the Phoenix Thin Driver"); - options.addOption("s", "server", true, "The URL for the Phoenix QueryServer"); - options.addOption("b", "batchApi", false, "Use JDBC Batch API for writes"); + private static final Logger LOGGER = LoggerFactory.getLogger(Pherf.class); + private static final Options options = new Options(); + private final PhoenixUtil phoenixUtil = PhoenixUtil.create(); + + static { + options.addOption("disableSchemaApply", "disableSchemaApply", false, + "Set to disable schema from being applied."); + options.addOption("disableRuntimeResult", "disableRuntimeResult", false, + "Set to disable writing detailed CSV file during query execution. Those will eventually get written at the end of query execution."); + options.addOption("z", "zookeeper", true, + "HBase Zookeeper address for connection. Default: localhost"); + options.addOption("q", "query", false, "Executes multi-threaded query sets"); + options.addOption("listFiles", "listFiles", false, "List available resource files"); + options.addOption("mt", "multi-tenant", false, + "Multi tenanted workloads based on load profiles."); + options.addOption("l", "load", false, + "Pre-loads data according to specified configuration values."); + options.addOption("scenarioFile", "scenarioFile", true, + "Regex or file name for the Test Scenario configuration .xml file to use."); + options.addOption("scenarioName", "scenarioName", true, + "Regex or scenario name from the Test Scenario configuration .xml file to use."); + options.addOption("drop", "drop", true, "Regex drop all tables with schema name as PHERF. " + + "\nExample drop Event tables: -drop .*(EVENT).* Drop all: -drop .* or -drop all"); + options.addOption("schemaFile", "schemaFile", true, + "Regex or file name for the Test phoenix table schema .sql to use."); + options.addOption("m", "monitor", false, "Launch the stats profilers"); + options.addOption("monitorFrequency", "monitorFrequency", true, + "Override for frequency in Ms for which monitor should log stats. " + + "\n See pherf.default.monitorFrequency in pherf.properties"); + options.addOption("rowCountOverride", "rowCountOverride", true, + "Row count override to use instead of one specified in scenario."); + options.addOption("hint", "hint", true, + "Executes all queries with specified hint. Example SMALL"); + options.addOption("log_per_nrows", "log_per_nrows", true, + "Default value to display log line after every 'N' row load"); + options.addOption("diff", "diff", false, + "Run pherf in verification mode and diff with exported results"); + options.addOption("export", "export", false, + "Exports query results to CSV files in " + PherfConstants.EXPORT_DIR + " directory"); + options.addOption("writerThreadSize", "writerThreadSize", true, + "Override the default number of writer threads. " + + "See pherf.default.dataloader.threadpool in Pherf.properties."); + options.addOption("h", "help", false, "Get help on using this utility."); + options.addOption("d", "debug", false, "Put tool in debug mode"); + options.addOption("stats", "stats", false, + "Update Phoenix Statistics after data is loaded with -l argument"); + options.addOption("label", "label", true, + "Label a run. Result file name will be suffixed with specified label"); + options.addOption("compare", "compare", true, "Specify labeled run(s) to compare"); + options.addOption("useAverageCompareType", "useAverageCompareType", false, + "Compare results with Average query time instead of default is Minimum query time."); + options.addOption("t", "thin", false, "Use the Phoenix Thin Driver"); + options.addOption("s", "server", true, "The URL for the Phoenix QueryServer"); + options.addOption("b", "batchApi", false, "Use JDBC Batch API for writes"); + } + + private final String zookeeper; + private final String scenarioFile; + private final String scenarioName; + private final String schemaFile; + private final String queryHint; + private final Properties globalProperties; + private final boolean preLoadData; + private final boolean multiTenantWorkload; + private final String dropPherfTablesRegEx; + private final boolean executeQuerySets; + private final boolean isFunctional; + private final boolean monitor; + private final int rowCountOverride; + private final boolean listFiles; + private final boolean applySchema; + private final boolean writeRuntimeResults; + private final GeneratePhoenixStats generateStatistics; + private final String label; + private final String compareResults; + private final CompareType compareType; + private final boolean thinDriver; + private final String queryServerUrl; + private Properties properties = new Properties(); + + @VisibleForTesting + WorkloadExecutor workloadExecutor; + + public Pherf(String[] args, Properties connProperties) throws Exception { + this(args); + // merging global and connection properties into properties. + if (connProperties != null) this.properties.putAll(connProperties); + } + + public Pherf(String[] args) throws Exception { + CommandLineParser parser = DefaultParser.builder().setAllowPartialMatching(false) + .setStripLeadingAndTrailingQuotes(false).build(); + CommandLine command = null; + HelpFormatter hf = new HelpFormatter(); + + try { + command = parser.parse(options, args); + } catch (ParseException e) { + hf.printHelp("Pherf", options); + LOGGER.error("Something went wrong while parsing.", e); + System.exit(1); } - private final String zookeeper; - private final String scenarioFile; - private final String scenarioName; - private final String schemaFile; - private final String queryHint; - private final Properties globalProperties; - private final boolean preLoadData; - private final boolean multiTenantWorkload; - private final String dropPherfTablesRegEx; - private final boolean executeQuerySets; - private final boolean isFunctional; - private final boolean monitor; - private final int rowCountOverride; - private final boolean listFiles; - private final boolean applySchema; - private final boolean writeRuntimeResults; - private final GeneratePhoenixStats generateStatistics; - private final String label; - private final String compareResults; - private final CompareType compareType; - private final boolean thinDriver; - private final String queryServerUrl; - private Properties properties = new Properties(); - - @VisibleForTesting - WorkloadExecutor workloadExecutor; - - public Pherf(String[] args, Properties connProperties) throws Exception { - this(args); - //merging global and connection properties into properties. - if (connProperties != null) - this.properties.putAll(connProperties); + globalProperties = + PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false); + dropPherfTablesRegEx = command.getOptionValue("drop", null); + monitor = command.hasOption("m"); + String monitorFrequency = (command.hasOption("m") && command.hasOption("monitorFrequency")) + ? command.getOptionValue("monitorFrequency") + : globalProperties.getProperty("pherf.default.monitorFrequency"); + globalProperties.setProperty("pherf.default.monitorFrequency", monitorFrequency); + LOGGER.debug("Using Monitor: " + monitor); + LOGGER.debug("Monitor Frequency Ms:" + monitorFrequency); + globalProperties.setProperty(PherfConstants.LOG_PER_NROWS_NAME, getLogPerNRow(command)); + + preLoadData = command.hasOption("l"); + multiTenantWorkload = command.hasOption("mt"); + executeQuerySets = command.hasOption("q"); + zookeeper = command.getOptionValue("z", "localhost"); + queryHint = command.getOptionValue("hint", null); + isFunctional = command.hasOption("diff"); + listFiles = command.hasOption("listFiles"); + applySchema = !command.hasOption("disableSchemaApply"); + writeRuntimeResults = !command.hasOption("disableRuntimeResult"); + scenarioFile = + command.hasOption("scenarioFile") ? command.getOptionValue("scenarioFile") : null; + scenarioName = + command.hasOption("scenarioName") ? command.getOptionValue("scenarioName") : null; + schemaFile = command.hasOption("schemaFile") ? command.getOptionValue("schemaFile") : null; + rowCountOverride = Integer.parseInt(command.getOptionValue("rowCountOverride", "0")); + generateStatistics = + command.hasOption("stats") ? GeneratePhoenixStats.YES : GeneratePhoenixStats.NO; + String writerThreadPoolSize = command.getOptionValue("writerThreadSize", + globalProperties.getProperty("pherf.default.dataloader.threadpool")); + globalProperties.setProperty("pherf.default.dataloader.threadpool", writerThreadPoolSize); + label = command.getOptionValue("label", null); + compareResults = command.getOptionValue("compare", null); + compareType = + command.hasOption("useAverageCompareType") ? CompareType.AVERAGE : CompareType.MINIMUM; + thinDriver = command.hasOption("thin"); + if (thinDriver) { + queryServerUrl = command.getOptionValue("server", "http://localhost:8765"); + } else { + queryServerUrl = null; } - public Pherf(String[] args) throws Exception { - CommandLineParser parser = DefaultParser.builder(). - setAllowPartialMatching(false). - setStripLeadingAndTrailingQuotes(false). - build(); - CommandLine command = null; - HelpFormatter hf = new HelpFormatter(); - - try { - command = parser.parse(options, args); - } catch (ParseException e) { - hf.printHelp("Pherf", options); - LOGGER.error("Something went wrong while parsing.", e); - System.exit(1); - } - - globalProperties = PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false); - dropPherfTablesRegEx = command.getOptionValue("drop", null); - monitor = command.hasOption("m"); - String - monitorFrequency = - (command.hasOption("m") && command.hasOption("monitorFrequency")) ? - command.getOptionValue("monitorFrequency") : - globalProperties.getProperty("pherf.default.monitorFrequency"); - globalProperties.setProperty("pherf.default.monitorFrequency", monitorFrequency); - LOGGER.debug("Using Monitor: " + monitor); - LOGGER.debug("Monitor Frequency Ms:" + monitorFrequency); - globalProperties.setProperty(PherfConstants.LOG_PER_NROWS_NAME, getLogPerNRow(command)); - - preLoadData = command.hasOption("l"); - multiTenantWorkload = command.hasOption("mt"); - executeQuerySets = command.hasOption("q"); - zookeeper = command.getOptionValue("z", "localhost"); - queryHint = command.getOptionValue("hint", null); - isFunctional = command.hasOption("diff"); - listFiles = command.hasOption("listFiles"); - applySchema = !command.hasOption("disableSchemaApply"); - writeRuntimeResults = !command.hasOption("disableRuntimeResult"); - scenarioFile = - command.hasOption("scenarioFile") ? command.getOptionValue("scenarioFile") : null; - scenarioName = - command.hasOption("scenarioName") ? command.getOptionValue("scenarioName") : null; - schemaFile = command.hasOption("schemaFile") ? command.getOptionValue("schemaFile") : null; - rowCountOverride = Integer.parseInt(command.getOptionValue("rowCountOverride", "0")); - generateStatistics = command.hasOption("stats") ? GeneratePhoenixStats.YES : GeneratePhoenixStats.NO; - String - writerThreadPoolSize = - command.getOptionValue("writerThreadSize", - globalProperties.getProperty("pherf.default.dataloader.threadpool")); - globalProperties.setProperty("pherf.default.dataloader.threadpool", writerThreadPoolSize); - label = command.getOptionValue("label", null); - compareResults = command.getOptionValue("compare", null); - compareType = command.hasOption("useAverageCompareType") ? CompareType.AVERAGE : CompareType.MINIMUM; - thinDriver = command.hasOption("thin"); - if (thinDriver) { - queryServerUrl = command.getOptionValue("server", "http://localhost:8765"); - } else { - queryServerUrl = null; - } - - if (command.hasOption('b')) { - // If the '-b' option was provided, set the system property for WriteWorkload to pick up. - System.setProperty(WriteWorkload.USE_BATCH_API_PROPERTY, Boolean.TRUE.toString()); - } - - if ((command.hasOption("h") || (args == null || args.length == 0)) && !command - .hasOption("listFiles")) { - hf.printHelp("Pherf", options); - System.exit(1); - } - PhoenixUtil.setRowCountOverride(rowCountOverride); - if (!thinDriver) { - LOGGER.info("Using thick driver with ZooKeepers '{}'", zookeeper); - PhoenixUtil.setZookeeper(zookeeper); - } else { - LOGGER.info("Using thin driver with PQS '{}'", queryServerUrl); - // Enables the thin-driver and sets the PQS URL - PhoenixUtil.useThinDriver(queryServerUrl); - } - ResultUtil.setFileSuffix(label); - this.properties.putAll(globalProperties); + if (command.hasOption('b')) { + // If the '-b' option was provided, set the system property for WriteWorkload to pick up. + System.setProperty(WriteWorkload.USE_BATCH_API_PROPERTY, Boolean.TRUE.toString()); } - private String getLogPerNRow(CommandLine command) { - try { - String logPerNRows = (command.hasOption("log_per_nrows")) ? - command.getOptionValue("log_per_nrows") : - globalProperties.getProperty( - PherfConstants.LOG_PER_NROWS_NAME, - String.valueOf(PherfConstants.LOG_PER_NROWS) - ); - if (Integer.valueOf(logPerNRows) > 0) { - return logPerNRows; - } - } catch (Exception e) { - LOGGER.warn("Invalid Log per N rows value. Phoenix will pick the default value."); - } - - return String.valueOf(PherfConstants.LOG_PER_NROWS); + if ( + (command.hasOption("h") || (args == null || args.length == 0)) + && !command.hasOption("listFiles") + ) { + hf.printHelp("Pherf", options); + System.exit(1); } - - public Properties getProperties() { - return this.properties; + PhoenixUtil.setRowCountOverride(rowCountOverride); + if (!thinDriver) { + LOGGER.info("Using thick driver with ZooKeepers '{}'", zookeeper); + PhoenixUtil.setZookeeper(zookeeper); + } else { + LOGGER.info("Using thin driver with PQS '{}'", queryServerUrl); + // Enables the thin-driver and sets the PQS URL + PhoenixUtil.useThinDriver(queryServerUrl); } - - public static void main(String[] args) { - try { - new Pherf(args).run(); - } catch (Exception e) { - e.printStackTrace(); - LOGGER.error("Something went wrong.", e); - System.exit(1); - } + ResultUtil.setFileSuffix(label); + this.properties.putAll(globalProperties); + } + + private String getLogPerNRow(CommandLine command) { + try { + String logPerNRows = (command.hasOption("log_per_nrows")) + ? command.getOptionValue("log_per_nrows") + : globalProperties.getProperty(PherfConstants.LOG_PER_NROWS_NAME, + String.valueOf(PherfConstants.LOG_PER_NROWS)); + if (Integer.valueOf(logPerNRows) > 0) { + return logPerNRows; + } + } catch (Exception e) { + LOGGER.warn("Invalid Log per N rows value. Phoenix will pick the default value."); } - public void run() throws Exception { - MonitorManager monitorManager = null; - List workloads = new ArrayList<>(); - workloadExecutor = new WorkloadExecutor(properties, workloads, !isFunctional); - try { - if (listFiles) { - ResourceList list = new ResourceList(PherfConstants.RESOURCE_DATAMODEL); - Collection - schemaFiles = - list.getResourceList(PherfConstants.SCHEMA_ROOT_PATTERN + ".sql"); - System.out.println("Schema Files:"); - for (Path path : schemaFiles) { - System.out.println(path); - } - list = new ResourceList(PherfConstants.RESOURCE_SCENARIO); - Collection - scenarioFiles = - list.getResourceList(PherfConstants.SCENARIO_ROOT_PATTERN + ".xml"); - System.out.println("Scenario Files:"); - for (Path path : scenarioFiles) { - System.out.println(path); - } - return; - } - - // Compare results and exit - if (null != compareResults) { - LOGGER.info("\nStarting to compare results and exiting for " + compareResults); - new GoogleChartGenerator(compareResults, compareType).readAndRender(); - return; - } - - // Drop tables with PHERF schema and regex comparison - if (null != dropPherfTablesRegEx) { - LOGGER.info( - "\nDropping existing table with PHERF namename and " + dropPherfTablesRegEx - + " regex expression."); - phoenixUtil.deleteTables(dropPherfTablesRegEx); - } + return String.valueOf(PherfConstants.LOG_PER_NROWS); + } + public Properties getProperties() { + return this.properties; + } - if (applySchema) { - LOGGER.info("\nStarting to apply schema..."); - SchemaReader - reader = - (schemaFile == null) ? - new SchemaReader(".*.sql") : - new SchemaReader(schemaFile); - reader.applySchema(); - } - - // If no scenario file specified then we are done. - if (scenarioFile == null) { - return; - } - - XMLConfigParser parser = new XMLConfigParser(scenarioFile); - if (monitor) { - monitorManager = - new MonitorManager(Integer.parseInt( - globalProperties.getProperty("pherf.default.monitorFrequency"))); - workloadExecutor.add(monitorManager); - } - - // Schema and Data Load - if (preLoadData || multiTenantWorkload) { - LOGGER.info("\nStarting Data Load..."); - List newWorkloads = Lists.newArrayList(); - try { - if (multiTenantWorkload) { - for (DataModel model : parser.getDataModels()) { - for (Scenario scenario : model.getScenarios()) { - if ((scenarioName != null) && (scenarioName.compareTo(scenario.getName()) != 0)) { - continue; - } - Workload workload = new MultiTenantWorkload(phoenixUtil, - model, scenario, properties); - newWorkloads.add(workload); - } - } - } else { - newWorkloads.add(new WriteWorkload(parser, properties, - generateStatistics)); - } - - if (newWorkloads.isEmpty()) { - throw new IllegalArgumentException("Found no new workload"); - } - - for (Workload workload : newWorkloads) { - workloadExecutor.add(workload); - } - - // Wait for dataLoad to complete - workloadExecutor.get(); - } finally { - if (!newWorkloads.isEmpty()) { - for (Workload workload : newWorkloads) { - workload.complete(); - } - } + public static void main(String[] args) { + try { + new Pherf(args).run(); + } catch (Exception e) { + e.printStackTrace(); + LOGGER.error("Something went wrong.", e); + System.exit(1); + } + } + + public void run() throws Exception { + MonitorManager monitorManager = null; + List workloads = new ArrayList<>(); + workloadExecutor = new WorkloadExecutor(properties, workloads, !isFunctional); + try { + if (listFiles) { + ResourceList list = new ResourceList(PherfConstants.RESOURCE_DATAMODEL); + Collection schemaFiles = + list.getResourceList(PherfConstants.SCHEMA_ROOT_PATTERN + ".sql"); + System.out.println("Schema Files:"); + for (Path path : schemaFiles) { + System.out.println(path); + } + list = new ResourceList(PherfConstants.RESOURCE_SCENARIO); + Collection scenarioFiles = + list.getResourceList(PherfConstants.SCENARIO_ROOT_PATTERN + ".xml"); + System.out.println("Scenario Files:"); + for (Path path : scenarioFiles) { + System.out.println(path); + } + return; + } + + // Compare results and exit + if (null != compareResults) { + LOGGER.info("\nStarting to compare results and exiting for " + compareResults); + new GoogleChartGenerator(compareResults, compareType).readAndRender(); + return; + } + + // Drop tables with PHERF schema and regex comparison + if (null != dropPherfTablesRegEx) { + LOGGER.info("\nDropping existing table with PHERF namename and " + dropPherfTablesRegEx + + " regex expression."); + phoenixUtil.deleteTables(dropPherfTablesRegEx); + } + + if (applySchema) { + LOGGER.info("\nStarting to apply schema..."); + SchemaReader reader = + (schemaFile == null) ? new SchemaReader(".*.sql") : new SchemaReader(schemaFile); + reader.applySchema(); + } + + // If no scenario file specified then we are done. + if (scenarioFile == null) { + return; + } + + XMLConfigParser parser = new XMLConfigParser(scenarioFile); + if (monitor) { + monitorManager = new MonitorManager( + Integer.parseInt(globalProperties.getProperty("pherf.default.monitorFrequency"))); + workloadExecutor.add(monitorManager); + } + + // Schema and Data Load + if (preLoadData || multiTenantWorkload) { + LOGGER.info("\nStarting Data Load..."); + List newWorkloads = Lists.newArrayList(); + try { + if (multiTenantWorkload) { + for (DataModel model : parser.getDataModels()) { + for (Scenario scenario : model.getScenarios()) { + if ((scenarioName != null) && (scenarioName.compareTo(scenario.getName()) != 0)) { + continue; } - } else { - LOGGER.info( - "\nSKIPPED: Data Load and schema creation as -l argument not specified"); + Workload workload = + new MultiTenantWorkload(phoenixUtil, model, scenario, properties); + newWorkloads.add(workload); + } } + } else { + newWorkloads.add(new WriteWorkload(parser, properties, generateStatistics)); + } - // Execute multi-threaded query sets - if (executeQuerySets) { - LOGGER.info("\nStarting to apply Execute Queries..."); - - workloadExecutor - .add(new QueryExecutor(parser, phoenixUtil, workloadExecutor, parser.getDataModels(), queryHint, - isFunctional, writeRuntimeResults)); - - } else { - LOGGER.info( - "\nSKIPPED: Multithreaded query set execution as -q argument not specified"); - } - - // Clean up the monitor explicitly - if (monitorManager != null) { - LOGGER.info("Run completed. Shutting down Monitor."); - monitorManager.complete(); - } + if (newWorkloads.isEmpty()) { + throw new IllegalArgumentException("Found no new workload"); + } - // Collect any final jobs - workloadExecutor.get(); + for (Workload workload : newWorkloads) { + workloadExecutor.add(workload); + } + // Wait for dataLoad to complete + workloadExecutor.get(); } finally { - if (workloadExecutor != null) { - LOGGER.info("Run completed. Shutting down thread pool."); - workloadExecutor.shutdown(); + if (!newWorkloads.isEmpty()) { + for (Workload workload : newWorkloads) { + workload.complete(); } + } } + } else { + LOGGER.info("\nSKIPPED: Data Load and schema creation as -l argument not specified"); + } + + // Execute multi-threaded query sets + if (executeQuerySets) { + LOGGER.info("\nStarting to apply Execute Queries..."); + + workloadExecutor.add(new QueryExecutor(parser, phoenixUtil, workloadExecutor, + parser.getDataModels(), queryHint, isFunctional, writeRuntimeResults)); + + } else { + LOGGER.info("\nSKIPPED: Multithreaded query set execution as -q argument not specified"); + } + + // Clean up the monitor explicitly + if (monitorManager != null) { + LOGGER.info("Run completed. Shutting down Monitor."); + monitorManager.complete(); + } + + // Collect any final jobs + workloadExecutor.get(); + + } finally { + if (workloadExecutor != null) { + LOGGER.info("Run completed. Shutting down thread pool."); + workloadExecutor.shutdown(); + } } -} \ No newline at end of file + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java index caba0d1e4a3..615a0f1163a 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; import java.io.IOException; @@ -24,121 +23,123 @@ import java.util.Properties; public class PherfConstants { - public static enum GeneratePhoenixStats { - YES, - NO - } - - public static enum CompareType { - MINIMUM, - AVERAGE - } - - private static PherfConstants instance = null; - private static Properties instanceProperties = null; - - public static final int DEFAULT_THREAD_POOL_SIZE = 10; - public static final int DEFAULT_BATCH_SIZE = 1000; - public static final String DEFAULT_DATE_PATTERN = "yyyy-MM-dd HH:mm:ss.SSS"; - public static final ZoneId DEFAULT_TIME_ZONE = ZoneId.of("UTC"); - public static final String RESOURCE_SCENARIO = "/scenario"; - public static final String - SCENARIO_ROOT_PATTERN = - ".*" + PherfConstants.RESOURCE_SCENARIO.substring(1) + ".*" + PherfConstants.RESOURCE_SCENARIO.substring(1) + ".*"; - public static final String SCHEMA_ROOT_PATTERN = ".*"; - public static final String TEST_SCENARIO_ROOT_PATTERN = ".*" + "test" + "_" + PherfConstants.RESOURCE_SCENARIO.substring(1); - public static final String PHERF_PROPERTIES = "pherf.properties"; - - public static final String EXPORT_DIR = "CSV_EXPORT"; - public static final String RESULT_PREFIX = "RESULT_"; - public static final String PATH_SEPARATOR = "/"; - public static final String RESULT_FILE_DELIMETER = ","; - public static final String NEW_LINE = "\n"; - - public static final long DEFAULT_NUMBER_OF_EXECUTIONS = 10; - public static final long DEFAULT_THREAD_DURATION_IN_MS = 10000; - public static final String DEFAULT_CONCURRENCY = "1"; - - public static final String DIFF_PASS = "VERIFIED_DIFF"; - public static final String DIFF_FAIL = "FAILED_DIFF"; - - public static final String PHERF_SCHEMA_NAME = "PHERF"; - - // TODO MOve to properties - // log out data load per n rows - public static final String LOG_PER_NROWS_NAME = "pherf.default.log_per_nrows"; - public static final int LOG_PER_NROWS = 1000000; - public static final String COMBINED_FILE_NAME = "COMBINED"; - - public static final String EXPORT_TMP = EXPORT_DIR + "_TMP"; - public static final String RESOURCE_DATAMODEL = "/datamodel"; - - // Default frequency in ms in which to log out monitor stats - public static final int MONITOR_FREQUENCY = 5000; - public static final String MONITOR_FILE_NAME = "STATS_MONITOR"; - - public static final String NUM_SEQUENTIAL_ITERATIONS_PROP_KEY = "pherf.mt.sequential.iterations"; - public static final String NUM_SEQUENTIAL_EXECUTION_TYPE_PROP_KEY = "pherf.mt.sequential.type"; - public static final String HANDLERS_PER_SCENARIO_PROP_KEY = "pherf.mt.handlers_per_scenario"; - public static final String MT_HANDLER_START_RENDEZVOUS_PROP_KEY = "pherf.mt.handlers_start_rendezvous"; - public static final String MT_HANDLER_RESULTS_RENDEZVOUS_PROP_KEY = "pherf.mt.handlers_results_rendezvous"; - - private PherfConstants() { - } - - public static PherfConstants create() { - if (instance == null) { - instance = new PherfConstants(); - } - return instance; + public static enum GeneratePhoenixStats { + YES, + NO + } + + public static enum CompareType { + MINIMUM, + AVERAGE + } + + private static PherfConstants instance = null; + private static Properties instanceProperties = null; + + public static final int DEFAULT_THREAD_POOL_SIZE = 10; + public static final int DEFAULT_BATCH_SIZE = 1000; + public static final String DEFAULT_DATE_PATTERN = "yyyy-MM-dd HH:mm:ss.SSS"; + public static final ZoneId DEFAULT_TIME_ZONE = ZoneId.of("UTC"); + public static final String RESOURCE_SCENARIO = "/scenario"; + public static final String SCENARIO_ROOT_PATTERN = + ".*" + PherfConstants.RESOURCE_SCENARIO.substring(1) + ".*" + + PherfConstants.RESOURCE_SCENARIO.substring(1) + ".*"; + public static final String SCHEMA_ROOT_PATTERN = ".*"; + public static final String TEST_SCENARIO_ROOT_PATTERN = + ".*" + "test" + "_" + PherfConstants.RESOURCE_SCENARIO.substring(1); + public static final String PHERF_PROPERTIES = "pherf.properties"; + + public static final String EXPORT_DIR = "CSV_EXPORT"; + public static final String RESULT_PREFIX = "RESULT_"; + public static final String PATH_SEPARATOR = "/"; + public static final String RESULT_FILE_DELIMETER = ","; + public static final String NEW_LINE = "\n"; + + public static final long DEFAULT_NUMBER_OF_EXECUTIONS = 10; + public static final long DEFAULT_THREAD_DURATION_IN_MS = 10000; + public static final String DEFAULT_CONCURRENCY = "1"; + + public static final String DIFF_PASS = "VERIFIED_DIFF"; + public static final String DIFF_FAIL = "FAILED_DIFF"; + + public static final String PHERF_SCHEMA_NAME = "PHERF"; + + // TODO MOve to properties + // log out data load per n rows + public static final String LOG_PER_NROWS_NAME = "pherf.default.log_per_nrows"; + public static final int LOG_PER_NROWS = 1000000; + public static final String COMBINED_FILE_NAME = "COMBINED"; + + public static final String EXPORT_TMP = EXPORT_DIR + "_TMP"; + public static final String RESOURCE_DATAMODEL = "/datamodel"; + + // Default frequency in ms in which to log out monitor stats + public static final int MONITOR_FREQUENCY = 5000; + public static final String MONITOR_FILE_NAME = "STATS_MONITOR"; + + public static final String NUM_SEQUENTIAL_ITERATIONS_PROP_KEY = "pherf.mt.sequential.iterations"; + public static final String NUM_SEQUENTIAL_EXECUTION_TYPE_PROP_KEY = "pherf.mt.sequential.type"; + public static final String HANDLERS_PER_SCENARIO_PROP_KEY = "pherf.mt.handlers_per_scenario"; + public static final String MT_HANDLER_START_RENDEZVOUS_PROP_KEY = + "pherf.mt.handlers_start_rendezvous"; + public static final String MT_HANDLER_RESULTS_RENDEZVOUS_PROP_KEY = + "pherf.mt.handlers_results_rendezvous"; + + private PherfConstants() { + } + + public static PherfConstants create() { + if (instance == null) { + instance = new PherfConstants(); } - - /** - * Get a {@link Properties} object based on the file name - * @param fileName Name of the file - * @param getDefault True if you want to use the properties that may have been loaded into - * the instance. use false if you want to reload the passed file. - * @return {@link Properties} - * @throws Exception - */ - public Properties getProperties(final String fileName, boolean getDefault) throws Exception { - - if (instanceProperties == null) { - instanceProperties = loadProperties(fileName); - } else { - return getDefault ? instanceProperties : loadProperties(fileName); - } - - return instanceProperties; + return instance; + } + + /** + * Get a {@link Properties} object based on the file name + * @param fileName Name of the file + * @param getDefault True if you want to use the properties that may have been loaded into the + * instance. use false if you want to reload the passed file. + * @return {@link Properties} + */ + public Properties getProperties(final String fileName, boolean getDefault) throws Exception { + + if (instanceProperties == null) { + instanceProperties = loadProperties(fileName); + } else { + return getDefault ? instanceProperties : loadProperties(fileName); } - private Properties loadProperties(String fileName) throws IOException{ - Properties properties = new Properties(); - InputStream is = null; - try { - is = getClass().getClassLoader().getResourceAsStream(fileName); - if (is != null) { - properties.load(is); - } - } finally { - if (is != null) { - is.close(); - } - } - return properties; + return instanceProperties; + } + + private Properties loadProperties(String fileName) throws IOException { + Properties properties = new Properties(); + InputStream is = null; + try { + is = getClass().getClassLoader().getResourceAsStream(fileName); + if (is != null) { + properties.load(is); + } + } finally { + if (is != null) { + is.close(); + } } - - public String getProperty(String property) { - return getProperty(PherfConstants.PHERF_PROPERTIES, property); - } - - public String getProperty(final String fileName, String property) { - String value = null; - try { - value = getProperties(fileName, false).getProperty(property); - } catch (Exception e) { - e.printStackTrace(); - } - return value; + return properties; + } + + public String getProperty(String property) { + return getProperty(PherfConstants.PHERF_PROPERTIES, property); + } + + public String getProperty(final String fileName, String property) { + String value = null; + try { + value = getProperties(fileName, false).getProperty(property); + } catch (Exception e) { + e.printStackTrace(); } + return value; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Column.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Column.java index ee5768581a3..adf198e70b4 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Column.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Column.java @@ -1,229 +1,228 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; -import org.apache.phoenix.pherf.rules.DataValue; +import java.util.List; +import java.util.Objects; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; -import java.util.List; -import java.util.Objects; + +import org.apache.phoenix.pherf.rules.DataValue; public class Column { - private String name; - private String prefix; - private DataSequence dataSequence; - private int length, precision; - private long minValue, maxValue; - private int nullChance; - private boolean userDefined; - private List dataValues; - private DataTypeMapping type; - private boolean useCurrentDate; - - public Column() { - super(); - // Initialize int to negative value so we can distinguish 0 in mutations - // Object fields can be detected with null - this.length = Integer.MIN_VALUE; - this.minValue = Long.MIN_VALUE; - this.maxValue = Long.MIN_VALUE; - this.precision = Integer.MIN_VALUE; - this.nullChance = Integer.MIN_VALUE; - this.userDefined = false; - this.useCurrentDate = false; - } + private String name; + private String prefix; + private DataSequence dataSequence; + private int length, precision; + private long minValue, maxValue; + private int nullChance; + private boolean userDefined; + private List dataValues; + private DataTypeMapping type; + private boolean useCurrentDate; + + public Column() { + super(); + // Initialize int to negative value so we can distinguish 0 in mutations + // Object fields can be detected with null + this.length = Integer.MIN_VALUE; + this.minValue = Long.MIN_VALUE; + this.maxValue = Long.MIN_VALUE; + this.precision = Integer.MIN_VALUE; + this.nullChance = Integer.MIN_VALUE; + this.userDefined = false; + this.useCurrentDate = false; + } + + public Column(Column column) { + this(); + this.type = column.type; + this.mutate(column); + } + + @Override + public int hashCode() { + return Objects.hash(this.type); + } + + /** + * Equal if column name and type match + */ + @Override + public boolean equals(Object column) { + if (!(column instanceof Column)) { + return false; + } + Column col = (Column) column; + return (getType() == col.getType()); + } + + public String getName() { + return name; + } - public Column(Column column) { - this(); - this.type = column.type; - this.mutate(column); - } + public void setName(String name) { + this.name = name; + } - @Override - public int hashCode() { - return Objects.hash(this.type); - } + public DataSequence getDataSequence() { + return dataSequence; + } - /** - * Equal if column name and type match - * @param column - * @return - */ - @Override - public boolean equals(Object column) { - if (!(column instanceof Column)) { - return false; - } - Column col = (Column)column; - return (getType() == col.getType()); - } + public void setDataSequence(DataSequence dataSequence) { + this.dataSequence = dataSequence; + } - public String getName() { - return name; - } + public int getLength() { + return length; + } - public void setName(String name) { - this.name = name; - } + public int getLengthExcludingPrefix() { + return (this.getPrefix() == null) ? this.length : this.length - this.getPrefix().length(); + } - public DataSequence getDataSequence() { - return dataSequence; - } + public void setLength(int length) { + this.length = length; + } - public void setDataSequence(DataSequence dataSequence) { - this.dataSequence = dataSequence; - } + public DataTypeMapping getType() { + return type; + } - public int getLength() { - return length; - } - - public int getLengthExcludingPrefix() { - return (this.getPrefix() == null) ? this.length : this.length - this.getPrefix().length(); - } + public void setType(DataTypeMapping type) { + this.type = type; + } - public void setLength(int length) { - this.length = length; - } + public long getMinValue() { + return minValue; + } - public DataTypeMapping getType() { - return type; - } + public void setMinValue(long minValue) { + this.minValue = minValue; + } - public void setType(DataTypeMapping type) { - this.type = type; - } + public long getMaxValue() { + return maxValue; + } - public long getMinValue() { - return minValue; - } + public void setMaxValue(long maxValue) { + this.maxValue = maxValue; + } - public void setMinValue(long minValue) { - this.minValue = minValue; - } + public int getPrecision() { + return precision; + } - public long getMaxValue() { - return maxValue; - } + public void setPrecision(int precision) { + this.precision = precision; + } - public void setMaxValue(long maxValue) { - this.maxValue = maxValue; - } + public void setUseCurrentDate(boolean useCurrentDate) { + this.useCurrentDate = useCurrentDate; + } - public int getPrecision() { - return precision; + public boolean getUseCurrentDate() { + return useCurrentDate; + } + + /** + * Changes fields of this object to match existing fields from the passed Column null object + * members are ignored. Field type cannot be mutated. + * @param column {@link Column} obj contains only the fields you want to mutate this object into. + */ + public void mutate(Column column) { + if (column.getMinValue() != Long.MIN_VALUE) { + setMinValue(column.getMinValue()); } - public void setPrecision(int precision) { - this.precision = precision; + if (column.getMaxValue() != Long.MIN_VALUE) { + setMaxValue(column.getMaxValue()); } - public void setUseCurrentDate(boolean useCurrentDate) { this.useCurrentDate = useCurrentDate; } - - public boolean getUseCurrentDate() { return useCurrentDate; } - - /** - * Changes fields of this object to match existing fields from the passed Column - * null object members are ignored. - * - * Field type cannot be mutated. - * @param column {@link Column} - * obj contains only the fields you want to mutate this object into. - */ - public void mutate(Column column) { - if (column.getMinValue() != Long.MIN_VALUE) { - setMinValue(column.getMinValue()); - } - - if (column.getMaxValue() != Long.MIN_VALUE) { - setMaxValue(column.getMaxValue()); - } - - if (column.getLength() != Integer.MIN_VALUE) { - setLength(column.getLength()); - } - - if (column.getName() != null) { - setName(column.getName()); - } - - if (column.getPrefix() != null) { - setPrefix(column.getPrefix()); - } - - if (column.getDataSequence() != null) { - setDataSequence(column.getDataSequence()); - } - - if (column.getNullChance() != Integer.MIN_VALUE) { - setNullChance(column.getNullChance()); - } - - if (column.getPrecision() != Integer.MIN_VALUE) { - setPrecision(column.getPrecision()); - } - - if (column.isUserDefined()) { - setUserDefined(column.isUserDefined()); - } - - if (column.dataValues != null) { - setDataValues(column.getDataValues()); - } - - if(column.getUseCurrentDate()) { - setUseCurrentDate(column.getUseCurrentDate()); - } + if (column.getLength() != Integer.MIN_VALUE) { + setLength(column.getLength()); } - public int getNullChance() { - return nullChance; + if (column.getName() != null) { + setName(column.getName()); } - public void setNullChance(int nullChance) { - this.nullChance = nullChance; + if (column.getPrefix() != null) { + setPrefix(column.getPrefix()); } - public boolean isUserDefined() { - return userDefined; + if (column.getDataSequence() != null) { + setDataSequence(column.getDataSequence()); } - public void setUserDefined(boolean userDefined) { - this.userDefined = userDefined; + if (column.getNullChance() != Integer.MIN_VALUE) { + setNullChance(column.getNullChance()); } - public List getDataValues() { - return dataValues; + if (column.getPrecision() != Integer.MIN_VALUE) { + setPrecision(column.getPrecision()); } - @XmlElementWrapper(name = "valuelist") - @XmlElement(name = "datavalue") - public void setDataValues(List dataValues) { - this.dataValues = dataValues; + if (column.isUserDefined()) { + setUserDefined(column.isUserDefined()); } - public String getPrefix() { - return prefix; + if (column.dataValues != null) { + setDataValues(column.getDataValues()); } - public void setPrefix(String prefix) { - this.prefix = prefix; + if (column.getUseCurrentDate()) { + setUseCurrentDate(column.getUseCurrentDate()); } -} \ No newline at end of file + } + + public int getNullChance() { + return nullChance; + } + + public void setNullChance(int nullChance) { + this.nullChance = nullChance; + } + + public boolean isUserDefined() { + return userDefined; + } + + public void setUserDefined(boolean userDefined) { + this.userDefined = userDefined; + } + + public List getDataValues() { + return dataValues; + } + + @XmlElementWrapper(name = "valuelist") + @XmlElement(name = "datavalue") + public void setDataValues(List dataValues) { + this.dataValues = dataValues; + } + + public String getPrefix() { + return prefix; + } + + public void setPrefix(String prefix) { + this.prefix = prefix; + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataModel.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataModel.java index 4c99ddd7e95..f4fae64b2a2 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataModel.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataModel.java @@ -1,75 +1,75 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; +import java.util.List; + import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; import javax.xml.bind.annotation.XmlRootElement; -import java.util.List; @XmlRootElement(name = "datamodel") public class DataModel { - private String name; - private List scenarios; - private List dataMappingColumns; + private String name; + private List scenarios; + private List dataMappingColumns; - public DataModel() { - } + public DataModel() { + } - public List getScenarios() { - return scenarios; - } + public List getScenarios() { + return scenarios; + } - @XmlElementWrapper(name = "datamapping") - @XmlElement(name = "column") - public void setDataMappingColumns(List dataMappingColumns) { - this.dataMappingColumns = dataMappingColumns; - } + @XmlElementWrapper(name = "datamapping") + @XmlElement(name = "column") + public void setDataMappingColumns(List dataMappingColumns) { + this.dataMappingColumns = dataMappingColumns; + } - public List getDataMappingColumns() { - return dataMappingColumns; - } + public List getDataMappingColumns() { + return dataMappingColumns; + } - @XmlElementWrapper(name = "scenarios") - @XmlElement(name = "scenario") - public void setScenarios(List scenarios) { - this.scenarios = scenarios; - } + @XmlElementWrapper(name = "scenarios") + @XmlElement(name = "scenario") + public void setScenarios(List scenarios) { + this.scenarios = scenarios; + } - public String getName() { - return name; - } + public String getName() { + return name; + } - @XmlAttribute() - public void setName(String name) { - this.name = name; - } + @XmlAttribute() + public void setName(String name) { + this.name = name; + } - @Override - public String toString() { - StringBuilder stringBuilder = new StringBuilder(); - for (Scenario scenario : getScenarios()) { - stringBuilder.append("Scenario: " + scenario.getName()); - stringBuilder.append("[" + scenario + "]"); - } - return stringBuilder.toString(); + @Override + public String toString() { + StringBuilder stringBuilder = new StringBuilder(); + for (Scenario scenario : getScenarios()) { + stringBuilder.append("Scenario: " + scenario.getName()); + stringBuilder.append("[" + scenario + "]"); } + return stringBuilder.toString(); + } -} \ No newline at end of file +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataOverride.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataOverride.java index 47faa62dcf8..680b38fbb6f 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataOverride.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataOverride.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import java.util.List; @@ -24,14 +23,14 @@ @XmlType public class DataOverride { - private List column; + private List column; - public List getColumn() { - return column; - } + public List getColumn() { + return column; + } - @SuppressWarnings("unused") - public void setColumn(List column) { - this.column = column; - } + @SuppressWarnings("unused") + public void setColumn(List column) { + this.column = column; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataSequence.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataSequence.java index 056a913c1f6..6d1f3e4f10e 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataSequence.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataSequence.java @@ -1,23 +1,24 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; public enum DataSequence { - RANDOM, SEQUENTIAL,LIST; -} \ No newline at end of file + RANDOM, + SEQUENTIAL, + LIST; +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java index dea8130b769..79e9dfa90bc 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java @@ -1,57 +1,56 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import java.sql.Types; public enum DataTypeMapping { - VARCHAR("VARCHAR", Types.VARCHAR), - CHAR("CHAR", Types.CHAR), - DECIMAL("DECIMAL", Types.DECIMAL), - INTEGER("INTEGER", Types.INTEGER), - DATE("DATE", Types.DATE), - UNSIGNED_LONG("UNSIGNED_LONG", Types.LONGVARCHAR), - VARCHAR_ARRAY("VARCHAR ARRAY", Types.ARRAY), - VARBINARY("VARBINARY", Types.VARBINARY), - TIMESTAMP("TIMESTAMP", Types.TIMESTAMP), - BOOLEAN("BOOLEAN", Types.BOOLEAN), - BIGINT("BIGINT", Types.BIGINT), - UNSIGNED_INT("UNSIGNED_INT", Types.INTEGER), - TINYINT("TINYINT", Types.TINYINT), - JSON("JSON", Types.VARBINARY), - BSON("BSON", Types.VARBINARY); - - private final String sType; - - private final int dType; - - private DataTypeMapping(String sType, int dType) { - this.dType = dType; - this.sType = sType; - } - - @Override - public String toString() { - return this.sType; - } - - public int getdType() { - return dType; - } + VARCHAR("VARCHAR", Types.VARCHAR), + CHAR("CHAR", Types.CHAR), + DECIMAL("DECIMAL", Types.DECIMAL), + INTEGER("INTEGER", Types.INTEGER), + DATE("DATE", Types.DATE), + UNSIGNED_LONG("UNSIGNED_LONG", Types.LONGVARCHAR), + VARCHAR_ARRAY("VARCHAR ARRAY", Types.ARRAY), + VARBINARY("VARBINARY", Types.VARBINARY), + TIMESTAMP("TIMESTAMP", Types.TIMESTAMP), + BOOLEAN("BOOLEAN", Types.BOOLEAN), + BIGINT("BIGINT", Types.BIGINT), + UNSIGNED_INT("UNSIGNED_INT", Types.INTEGER), + TINYINT("TINYINT", Types.TINYINT), + JSON("JSON", Types.VARBINARY), + BSON("BSON", Types.VARBINARY); + + private final String sType; + + private final int dType; + + private DataTypeMapping(String sType, int dType) { + this.dType = dType; + this.sType = sType; + } + + @Override + public String toString() { + return this.sType; + } + + public int getdType() { + return dType; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Ddl.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Ddl.java index b60508d2d98..9c9a5edf16c 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Ddl.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Ddl.java @@ -1,78 +1,77 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import javax.xml.bind.annotation.XmlAttribute; public class Ddl { - private String statement; - private String tableName; - private boolean useGlobalConnection; + private String statement; + private String tableName; + private boolean useGlobalConnection; + + public Ddl() { + } + + public Ddl(String statement, String tableName) { + this.statement = statement; + this.tableName = tableName; + } + + /** + * DDL + */ + @XmlAttribute + public String getStatement() { + return statement; + } + + public void setStatement(String statement) { + this.statement = statement; + } + + /** + * Table name used in the DDL + */ + @XmlAttribute + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + @XmlAttribute + public boolean isUseGlobalConnection() { + return useGlobalConnection; + } - public Ddl() { - } - - public Ddl(String statement, String tableName) { - this.statement = statement; - this.tableName = tableName; - } - - /** - * DDL - * @return - */ - @XmlAttribute - public String getStatement() { - return statement; - } - public void setStatement(String statement) { - this.statement = statement; - } - - /** - * Table name used in the DDL - * @return - */ - @XmlAttribute - public String getTableName() { - return tableName; - } - public void setTableName(String tableName) { - this.tableName = tableName; - } + public void setUseGlobalConnection(boolean useGlobalConnection) { + this.useGlobalConnection = useGlobalConnection; + } - @XmlAttribute - public boolean isUseGlobalConnection() { - return useGlobalConnection; - } + public String toString() { + if (statement.contains("?")) { + return statement.replace("?", tableName); + } else { + return statement; + } - public void setUseGlobalConnection(boolean useGlobalConnection) { - this.useGlobalConnection = useGlobalConnection; - } + } - public String toString(){ - if (statement.contains("?")) { - return statement.replace("?", tableName); - } else { - return statement; - } - - } - } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/ExecutionType.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/ExecutionType.java index 998aa717b80..6e7e1f3534b 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/ExecutionType.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/ExecutionType.java @@ -1,23 +1,23 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; public enum ExecutionType { - SERIAL, PARALLEL; + SERIAL, + PARALLEL; } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/IdleTime.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/IdleTime.java index 37d6e15b847..6ac410d02b8 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/IdleTime.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/IdleTime.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import javax.xml.bind.annotation.XmlAttribute; @@ -24,24 +23,24 @@ @XmlType public class IdleTime { - private String id; - private long idleTime = 0; + private String id; + private long idleTime = 0; - @XmlAttribute - public String getId() { - return id; - } + @XmlAttribute + public String getId() { + return id; + } - public void setId(String id) { - this.id = id; - } + public void setId(String id) { + this.id = id; + } - @XmlAttribute - public long getIdleTime() { - return idleTime; - } + @XmlAttribute + public long getIdleTime() { + return idleTime; + } - public void setIdleTime(long idleTime) { - this.idleTime = idleTime; - } + public void setIdleTime(long idleTime) { + this.idleTime = idleTime; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/LoadProfile.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/LoadProfile.java index c66bbee9fed..789de910f85 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/LoadProfile.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/LoadProfile.java @@ -1,121 +1,115 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; -import javax.xml.bind.annotation.XmlType; import java.util.List; +import javax.xml.bind.annotation.XmlType; + @XmlType public class LoadProfile { - private static final int MIN_BATCH_SIZE = 1; - private static final String DEFAULT_TENANT_ID_FMT = "T%s%08d"; - private static final int DEFAULT_GROUP_ID_LEN = 6; - private static final int DEFAULT_TENANT_ID_LEN = 15; - - // Holds the batch size to be used in upserts. - private int batchSize; - // Holds the number of operations to be generated. - private long numOperations; - /** - * Holds the format to be used when generating tenantIds. - * TenantId format should typically have 2 parts - - * 1. string fmt - that hold the tenant group id. - * 2. int fmt - that holds a random number between 1 and max tenants - * for e.g DEFAULT_TENANT_ID_FMT = "T%s%08d"; - * - * When the Tenant Group is configured to use a global connection, - * for now this is modelled as a special tenant whose id will translate to "TGLOBAL00000001" - * since the group id => "GLOBAL" and num tenants = 1. - * For now this is a hack/temporary workaround. - * - * TODO : - * Ideally it needs to be built into the framework and injected during event generation. - */ - private String tenantIdFormat; - private int groupIdLength; - private int tenantIdLength; - // Holds the desired tenant distribution for this load. - private List tenantDistribution; - // Holds the desired operation distribution for this load. - private List opDistribution; - - public LoadProfile() { - this.batchSize = MIN_BATCH_SIZE; - this.numOperations = Long.MAX_VALUE; - this.tenantIdFormat = DEFAULT_TENANT_ID_FMT; - this.tenantIdLength = DEFAULT_TENANT_ID_LEN; - this.groupIdLength = DEFAULT_GROUP_ID_LEN; - } - - public String getTenantIdFormat() { - return tenantIdFormat; - } - - public void setTenantIdFormat(String tenantIdFormat) { - this.tenantIdFormat = tenantIdFormat; - } - - public int getTenantIdLength() { - return tenantIdLength; - } - - public void setTenantIdLength(int tenantIdLength) { - this.tenantIdLength = tenantIdLength; - } - - public int getGroupIdLength() { - return groupIdLength; - } - - public void setGroupIdLength(int groupIdLength) { - this.groupIdLength = groupIdLength; - } - - public int getBatchSize() { - return batchSize; - } - - public void setBatchSize(int batchSize) { - this.batchSize = batchSize; - } - - public long getNumOperations() { - return numOperations; - } - - public void setNumOperations(long numOperations) { - this.numOperations = numOperations; - } - - public List getTenantDistribution() { - return tenantDistribution; - } - - public void setTenantDistribution(List tenantDistribution) { - this.tenantDistribution = tenantDistribution; - } - - public List getOpDistribution() { - return opDistribution; - } - - public void setOpDistribution(List opDistribution) { - this.opDistribution = opDistribution; - } + private static final int MIN_BATCH_SIZE = 1; + private static final String DEFAULT_TENANT_ID_FMT = "T%s%08d"; + private static final int DEFAULT_GROUP_ID_LEN = 6; + private static final int DEFAULT_TENANT_ID_LEN = 15; + + // Holds the batch size to be used in upserts. + private int batchSize; + // Holds the number of operations to be generated. + private long numOperations; + /** + * Holds the format to be used when generating tenantIds. TenantId format should typically have 2 + * parts - 1. string fmt - that hold the tenant group id. 2. int fmt - that holds a random number + * between 1 and max tenants for e.g DEFAULT_TENANT_ID_FMT = "T%s%08d"; When the Tenant Group is + * configured to use a global connection, for now this is modelled as a special tenant whose id + * will translate to "TGLOBAL00000001" since the group id => "GLOBAL" and num tenants = 1. For now + * this is a hack/temporary workaround. TODO : Ideally it needs to be built into the framework and + * injected during event generation. + */ + private String tenantIdFormat; + private int groupIdLength; + private int tenantIdLength; + // Holds the desired tenant distribution for this load. + private List tenantDistribution; + // Holds the desired operation distribution for this load. + private List opDistribution; + + public LoadProfile() { + this.batchSize = MIN_BATCH_SIZE; + this.numOperations = Long.MAX_VALUE; + this.tenantIdFormat = DEFAULT_TENANT_ID_FMT; + this.tenantIdLength = DEFAULT_TENANT_ID_LEN; + this.groupIdLength = DEFAULT_GROUP_ID_LEN; + } + + public String getTenantIdFormat() { + return tenantIdFormat; + } + + public void setTenantIdFormat(String tenantIdFormat) { + this.tenantIdFormat = tenantIdFormat; + } + + public int getTenantIdLength() { + return tenantIdLength; + } + + public void setTenantIdLength(int tenantIdLength) { + this.tenantIdLength = tenantIdLength; + } + + public int getGroupIdLength() { + return groupIdLength; + } + + public void setGroupIdLength(int groupIdLength) { + this.groupIdLength = groupIdLength; + } + + public int getBatchSize() { + return batchSize; + } + + public void setBatchSize(int batchSize) { + this.batchSize = batchSize; + } + + public long getNumOperations() { + return numOperations; + } + + public void setNumOperations(long numOperations) { + this.numOperations = numOperations; + } + + public List getTenantDistribution() { + return tenantDistribution; + } + + public void setTenantDistribution(List tenantDistribution) { + this.tenantDistribution = tenantDistribution; + } + + public List getOpDistribution() { + return opDistribution; + } + + public void setOpDistribution(List opDistribution) { + this.opDistribution = opDistribution; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/OperationGroup.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/OperationGroup.java index 31545b279dc..e5c4dc2f423 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/OperationGroup.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/OperationGroup.java @@ -15,30 +15,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import javax.xml.bind.annotation.XmlAttribute; public class OperationGroup { - private String id; - private int weight; + private String id; + private int weight; - @XmlAttribute - public String getId() { - return id; - } + @XmlAttribute + public String getId() { + return id; + } - public void setId(String id) { - this.id = id; - } + public void setId(String id) { + this.id = id; + } - @XmlAttribute - public int getWeight() { - return weight; - } + @XmlAttribute + public int getWeight() { + return weight; + } - public void setWeight(int weight) { - this.weight = weight; - } + public void setWeight(int weight) { + this.weight = weight; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java index 8172d2c8305..4cce0fc0a05 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Query.java @@ -1,185 +1,169 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; -import org.apache.phoenix.pherf.rules.RulesApplier; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlType; -import java.util.regex.Matcher; -import java.util.regex.Pattern; + +import org.apache.phoenix.pherf.rules.RulesApplier; @XmlType public class Query { - private String id; - private String queryGroup; - private String tenantId; - private String statement; - private Long expectedAggregateRowCount; - private String ddl; - private boolean useGlobalConnection; - private Pattern pattern; - private long timeoutDuration = Long.MAX_VALUE; - - public Query() { - pattern = Pattern.compile("\\[.*?\\]"); - } - - /** - * SQL statement - * - * @return - */ - @XmlAttribute - public String getStatement() { - return statement; - } - - public String getDynamicStatement(RulesApplier ruleApplier, Scenario scenario) - throws Exception { - String ret = this.statement; - String needQuotes = ""; - Matcher m = pattern.matcher(ret); - while (m.find()) { - String dynamicField = m.group(0).replace("[", "").replace("]", ""); - Column dynamicColumn = ruleApplier.getRule(dynamicField, scenario); - // For Json we can have queries like info[5].name and it should not match - if (dynamicColumn != null && dynamicColumn.getType() != null) { - needQuotes = (dynamicColumn.getType() == DataTypeMapping.CHAR - || dynamicColumn.getType() == DataTypeMapping.VARCHAR) ? "'" : ""; - ret = ret.replace("[" + dynamicField + "]", - needQuotes + ruleApplier.getDataValue(dynamicColumn) - .getValue() + needQuotes); - } - } - return ret; - } - - public void setStatement(String statement) { - // normalize statement - merge all consecutive spaces into one - this.statement = statement.replaceAll("\\s+", " "); - } - - /** - * Tenant Id used by connection of this query - * - * @return - */ - @XmlAttribute - public String getTenantId() { - return tenantId; - } - - public void setTenantId(String tenantId) { - this.tenantId = tenantId; - } - - /** - * Expected aggregate row count is matched if specified - * - * @return - */ - @XmlAttribute - public Long getExpectedAggregateRowCount() { - return expectedAggregateRowCount; - } - - public void setExpectedAggregateRowCount(Long expectedAggregateRowCount) { - this.expectedAggregateRowCount = expectedAggregateRowCount; - } - - /** - * DDL is executed only once. If tenantId is specified then DDL is executed with tenant - * specific connection. - * - * @return - */ - @XmlAttribute - public String getDdl() { - return ddl; - } - - public void setDdl(String ddl) { - this.ddl = ddl; - } - - /** - * queryGroup attribute is just a string value to help correlate queries across sets or files. - * This helps to make sense of reporting results. - * - * @return the group id - */ - @XmlAttribute - public String getQueryGroup() { - return queryGroup; - } - - public void setQueryGroup(String queryGroup) { - this.queryGroup = queryGroup; - } - - /** - * Set hint to query - * - * @param queryHint - */ - public void setHint(String queryHint) { - if (null != queryHint) { - this.statement = - this.statement.toUpperCase() - .replace("SELECT ", "SELECT /*+ " + queryHint + "*/ "); - } - } - - /** - * Query ID, Use UUID if none specified - * - * @return - */ - @XmlAttribute - public String getId() { - if (null == this.id) { - this.id = java.util.UUID.randomUUID().toString(); - } - return id; - } - - public void setId(String id) { - this.id = id; - } - - @XmlAttribute - public boolean isUseGlobalConnection() { - return useGlobalConnection; - } - - public void setUseGlobalConnection(boolean useGlobalConnection) { - this.useGlobalConnection = useGlobalConnection; - } - - @XmlAttribute - public long getTimeoutDuration() { - return this.timeoutDuration; - } - - public void setTimeoutDuration(long timeoutDuration) { - this.timeoutDuration = timeoutDuration; - } + private String id; + private String queryGroup; + private String tenantId; + private String statement; + private Long expectedAggregateRowCount; + private String ddl; + private boolean useGlobalConnection; + private Pattern pattern; + private long timeoutDuration = Long.MAX_VALUE; + + public Query() { + pattern = Pattern.compile("\\[.*?\\]"); + } + + /** + * SQL statement + */ + @XmlAttribute + public String getStatement() { + return statement; + } + + public String getDynamicStatement(RulesApplier ruleApplier, Scenario scenario) throws Exception { + String ret = this.statement; + String needQuotes = ""; + Matcher m = pattern.matcher(ret); + while (m.find()) { + String dynamicField = m.group(0).replace("[", "").replace("]", ""); + Column dynamicColumn = ruleApplier.getRule(dynamicField, scenario); + // For Json we can have queries like info[5].name and it should not match + if (dynamicColumn != null && dynamicColumn.getType() != null) { + needQuotes = (dynamicColumn.getType() == DataTypeMapping.CHAR + || dynamicColumn.getType() == DataTypeMapping.VARCHAR) ? "'" : ""; + ret = ret.replace("[" + dynamicField + "]", + needQuotes + ruleApplier.getDataValue(dynamicColumn).getValue() + needQuotes); + } + } + return ret; + } + + public void setStatement(String statement) { + // normalize statement - merge all consecutive spaces into one + this.statement = statement.replaceAll("\\s+", " "); + } + + /** + * Tenant Id used by connection of this query + */ + @XmlAttribute + public String getTenantId() { + return tenantId; + } + + public void setTenantId(String tenantId) { + this.tenantId = tenantId; + } + + /** + * Expected aggregate row count is matched if specified + */ + @XmlAttribute + public Long getExpectedAggregateRowCount() { + return expectedAggregateRowCount; + } + + public void setExpectedAggregateRowCount(Long expectedAggregateRowCount) { + this.expectedAggregateRowCount = expectedAggregateRowCount; + } + + /** + * DDL is executed only once. If tenantId is specified then DDL is executed with tenant specific + * connection. + */ + @XmlAttribute + public String getDdl() { + return ddl; + } + + public void setDdl(String ddl) { + this.ddl = ddl; + } + + /** + * queryGroup attribute is just a string value to help correlate queries across sets or files. + * This helps to make sense of reporting results. + * @return the group id + */ + @XmlAttribute + public String getQueryGroup() { + return queryGroup; + } + + public void setQueryGroup(String queryGroup) { + this.queryGroup = queryGroup; + } + + /** + * Set hint to query + */ + public void setHint(String queryHint) { + if (null != queryHint) { + this.statement = + this.statement.toUpperCase().replace("SELECT ", "SELECT /*+ " + queryHint + "*/ "); + } + } + + /** + * Query ID, Use UUID if none specified + */ + @XmlAttribute + public String getId() { + if (null == this.id) { + this.id = java.util.UUID.randomUUID().toString(); + } + return id; + } + + public void setId(String id) { + this.id = id; + } + + @XmlAttribute + public boolean isUseGlobalConnection() { + return useGlobalConnection; + } + + public void setUseGlobalConnection(boolean useGlobalConnection) { + this.useGlobalConnection = useGlobalConnection; + } + + @XmlAttribute + public long getTimeoutDuration() { + return this.timeoutDuration; + } + + public void setTimeoutDuration(long timeoutDuration) { + this.timeoutDuration = timeoutDuration; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/QuerySet.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/QuerySet.java index 17d415354c4..f34f9028a3a 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/QuerySet.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/QuerySet.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import java.util.ArrayList; @@ -26,115 +25,105 @@ import org.apache.phoenix.pherf.PherfConstants; public class QuerySet { - private List query = new ArrayList(); - private String concurrency = PherfConstants.DEFAULT_CONCURRENCY; - private long numberOfExecutions = PherfConstants.DEFAULT_NUMBER_OF_EXECUTIONS; - private long executionDurationInMs = PherfConstants.DEFAULT_THREAD_DURATION_IN_MS; - private ExecutionType executionType = ExecutionType.SERIAL; - - /** - * List of queries in each query set - * @return - */ - public List getQuery() { - return query; - } - - @SuppressWarnings("unused") - public void setQuery(List query) { - this.query = query; - } - - /** - * Target concurrency. - * This can be set as a range. Example: - * 3 - * 1-4 - * @return - */ - @XmlAttribute - public String getConcurrency() { - return concurrency; - } - - public void setConcurrency(String concurrency) { - this.concurrency = concurrency; - } - - /** - * Number of execution of query per thread. Minimum of either number of executions - * or execution duration is taken for each thread run - * @return - */ - @XmlAttribute - public long getNumberOfExecutions() { - return numberOfExecutions; - } - - public void setNumberOfExecutions(long numberOfExecutions) { - this.numberOfExecutions = numberOfExecutions; - } - - /** - * Minimum concurrency level for a query set - * @return - */ - public int getMinConcurrency() { - return getConcurrencyMinMax(0); - } - - /** - * Maximum concurrency for a query set - * @return - */ - public int getMaxConcurrency() { - return getConcurrencyMinMax(1); - } - - private int getConcurrencyMinMax(int idx) { - if (null == getConcurrency()) { - return 1; - } - String[] concurrencySplit = getConcurrency().split("-"); - if (concurrencySplit.length == 2) { - return Integer.parseInt(concurrencySplit[idx]); - } - return Integer.parseInt(getConcurrency()); - } - - /** - * This can be either SERIAL or PARALLEL - * @return - */ - @XmlAttribute - public ExecutionType getExecutionType() { - return executionType; - } - - public void setExecutionType(ExecutionType executionType) { - this.executionType = executionType; - } - - /** - * Execution duration of query per thread. Minimum of either number of executions - * or execution duration is taken for each thread run - * @return - */ - @XmlAttribute - public long getExecutionDurationInMs() { - return executionDurationInMs; - } - - public void setExecutionDurationInMs(long executionDurationInMs) { - this.executionDurationInMs = executionDurationInMs; - } - - @Override - public String toString() { - StringBuilder stringBuilder = new StringBuilder(); - for (Query q : query) { - stringBuilder.append(q.getStatement() + ","); - } - return stringBuilder.toString(); + private List query = new ArrayList(); + private String concurrency = PherfConstants.DEFAULT_CONCURRENCY; + private long numberOfExecutions = PherfConstants.DEFAULT_NUMBER_OF_EXECUTIONS; + private long executionDurationInMs = PherfConstants.DEFAULT_THREAD_DURATION_IN_MS; + private ExecutionType executionType = ExecutionType.SERIAL; + + /** + * List of queries in each query set + */ + public List getQuery() { + return query; + } + + @SuppressWarnings("unused") + public void setQuery(List query) { + this.query = query; + } + + /** + * Target concurrency. This can be set as a range. Example: 3 1-4 + */ + @XmlAttribute + public String getConcurrency() { + return concurrency; + } + + public void setConcurrency(String concurrency) { + this.concurrency = concurrency; + } + + /** + * Number of execution of query per thread. Minimum of either number of executions or execution + * duration is taken for each thread run + */ + @XmlAttribute + public long getNumberOfExecutions() { + return numberOfExecutions; + } + + public void setNumberOfExecutions(long numberOfExecutions) { + this.numberOfExecutions = numberOfExecutions; + } + + /** + * Minimum concurrency level for a query set + */ + public int getMinConcurrency() { + return getConcurrencyMinMax(0); + } + + /** + * Maximum concurrency for a query set + */ + public int getMaxConcurrency() { + return getConcurrencyMinMax(1); + } + + private int getConcurrencyMinMax(int idx) { + if (null == getConcurrency()) { + return 1; + } + String[] concurrencySplit = getConcurrency().split("-"); + if (concurrencySplit.length == 2) { + return Integer.parseInt(concurrencySplit[idx]); + } + return Integer.parseInt(getConcurrency()); + } + + /** + * This can be either SERIAL or PARALLEL + */ + @XmlAttribute + public ExecutionType getExecutionType() { + return executionType; + } + + public void setExecutionType(ExecutionType executionType) { + this.executionType = executionType; + } + + /** + * Execution duration of query per thread. Minimum of either number of executions or execution + * duration is taken for each thread run + */ + @XmlAttribute + public long getExecutionDurationInMs() { + return executionDurationInMs; + } + + public void setExecutionDurationInMs(long executionDurationInMs) { + this.executionDurationInMs = executionDurationInMs; + } + + @Override + public String toString() { + StringBuilder stringBuilder = new StringBuilder(); + for (Query q : query) { + stringBuilder.append(q.getStatement() + ","); } + return stringBuilder.toString(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java index 3834de5187a..69a9cded5ee 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import java.util.ArrayList; @@ -27,268 +26,241 @@ import javax.xml.bind.annotation.XmlElementWrapper; import javax.xml.bind.annotation.XmlRootElement; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; @XmlRootElement(namespace = "org.apache.phoenix.pherf.configuration.DataModel") public class Scenario { - private String tableName; - private int rowCount; - private Map phoenixProperties; - private WriteParams writeParams = null; - private DataOverride dataOverride; - private List querySet = new ArrayList<>(); - private List upsertSet = new ArrayList<>(); - private List idleTimes = new ArrayList<>(); - private List udfs = new ArrayList<>(); - private LoadProfile loadProfile = null; - - private String name; - private String generatorName; - private String tenantId; - private List preScenarioDdls; - private List postScenarioDdls; - - public Scenario() { - } - - /** - * Scenarios have to have unique table names - * - * @param object - * @return - */ - @Override - public boolean equals(Object object) { - if (!(object instanceof Scenario)) { - return false; - } - Scenario scenario = (Scenario) object; - return (this.tableName.equals(scenario.getTableName())); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(11, 38).appendSuper(super.hashCode()) - .append(tableName) - .toHashCode(); - } - - /** - * Table name for a scenario - * - * @return - */ - @XmlAttribute() - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * Generator name for a scenario - * - * @return - */ - @XmlAttribute() - public String getGeneratorName() { - return generatorName; - } - - public void setGeneratorName(String name) { - this.generatorName = name; - } - - /** - * Row count for a table - * - * @return - */ - @XmlAttribute() - public int getRowCount() { - return PhoenixUtil.getRowCountOverride() == 0 ? - rowCount : PhoenixUtil.getRowCountOverride(); - } - - public void setRowCount(int rowCount) { - this.rowCount = rowCount; - } - - /** - * Phoenix properties - * - * @return - */ - public Map getPhoenixProperties() { - return phoenixProperties; - } - - public void setPhoenixProperties(Map phoenixProperty) { - this.phoenixProperties = phoenixProperty; - } - - /** - * Data override - * - * @return - */ - @XmlElement() - public DataOverride getDataOverride() { - return dataOverride; - } - - public void setDataOverride(DataOverride dataOverride) { - this.dataOverride = dataOverride; - } - - /** - * List of Query Set - * - * @return - */ - public List getQuerySet() { - return querySet; - } - - @SuppressWarnings("unused") - public void setQuerySet(List querySet) { - this.querySet = querySet; - } - - /** - * Extract schema name from table name - * - * @return - */ - public String getSchemaName() { - return XMLConfigParser.parseSchemaName(this.tableName); - } - - /** - * Extract table name without schema name - * - * @return - */ - public String getTableNameWithoutSchemaName() { - return XMLConfigParser.parseTableName(this.tableName); - } - - /** - * Name of scenario - * - * @return - */ - @XmlAttribute() - public String getName() { - Preconditions.checkNotNull(name); - return name; - } - - public void setName(String name) { - this.name = name; - } - - /** - * Tenant Id used by connection of this query - * @return - */ - @XmlAttribute - public String getTenantId() { - return tenantId; - } - - public void setTenantId(String tenantId) { - this.tenantId = tenantId; - } - - public WriteParams getWriteParams() { - return writeParams; - } - - public void setWriteParams(WriteParams writeParams) { - this.writeParams = writeParams; - } - - - @Override - public String toString() { - StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append("Name:" + name); - stringBuilder.append("Table Name:" + tableName); - stringBuilder.append("Row Count:" + rowCount); - stringBuilder.append("Data Override:" + dataOverride); - for (QuerySet query : querySet) { - stringBuilder.append(query + ";"); - } - return stringBuilder.toString(); - } - - public List getPreScenarioDdls() { - return preScenarioDdls; - } - - /** - * Scenario level DDLs (for views/index/async) that are executed before data load - */ - @XmlElementWrapper(name = "preScenarioDdls") - @XmlElement(name = "ddl") - public void setPreScenarioDdls(List preScenarioDdls) { - this.preScenarioDdls = preScenarioDdls; - } - - public List getPostScenarioDdls() { - return postScenarioDdls; - } - - /** - * Scenario level DDLs (for views/index/async) that are executed after data load - */ - @XmlElementWrapper(name = "postScenarioDdls") - @XmlElement(name = "ddl") - public void setPostScenarioDdls(List postScenarioDdls) { - this.postScenarioDdls = postScenarioDdls; - } - - public List getUpserts() { - return upsertSet; - } - - @XmlElementWrapper(name = "upserts") - @XmlElement(name = "upsert") - public void setUpserts(List upsertSet) { - this.upsertSet = upsertSet; - } - - public List getIdleTimes() { - return idleTimes; - } - - @XmlElementWrapper(name = "idleTimes") - @XmlElement(name = "idleTime") - public void setIdleTimes(List idleTimes) { - this.idleTimes = idleTimes; - } - - public List getUdfs() { - return udfs; - } - - @XmlElementWrapper(name = "udfs") - @XmlElement(name = "udf") - public void setUdfs(List udfs) { - this.udfs = udfs; - } - - - public LoadProfile getLoadProfile() { - return loadProfile; - } - - public void setLoadProfile(LoadProfile loadProfile) { - this.loadProfile = loadProfile; - } + private String tableName; + private int rowCount; + private Map phoenixProperties; + private WriteParams writeParams = null; + private DataOverride dataOverride; + private List querySet = new ArrayList<>(); + private List upsertSet = new ArrayList<>(); + private List idleTimes = new ArrayList<>(); + private List udfs = new ArrayList<>(); + private LoadProfile loadProfile = null; + + private String name; + private String generatorName; + private String tenantId; + private List preScenarioDdls; + private List postScenarioDdls; + + public Scenario() { + } + + /** + * Scenarios have to have unique table names + */ + @Override + public boolean equals(Object object) { + if (!(object instanceof Scenario)) { + return false; + } + Scenario scenario = (Scenario) object; + return (this.tableName.equals(scenario.getTableName())); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(11, 38).appendSuper(super.hashCode()).append(tableName).toHashCode(); + } + + /** + * Table name for a scenario + */ + @XmlAttribute() + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + /** + * Generator name for a scenario + */ + @XmlAttribute() + public String getGeneratorName() { + return generatorName; + } + + public void setGeneratorName(String name) { + this.generatorName = name; + } + + /** + * Row count for a table + */ + @XmlAttribute() + public int getRowCount() { + return PhoenixUtil.getRowCountOverride() == 0 ? rowCount : PhoenixUtil.getRowCountOverride(); + } + + public void setRowCount(int rowCount) { + this.rowCount = rowCount; + } + + /** + * Phoenix properties + */ + public Map getPhoenixProperties() { + return phoenixProperties; + } + + public void setPhoenixProperties(Map phoenixProperty) { + this.phoenixProperties = phoenixProperty; + } + + /** + * Data override + */ + @XmlElement() + public DataOverride getDataOverride() { + return dataOverride; + } + + public void setDataOverride(DataOverride dataOverride) { + this.dataOverride = dataOverride; + } + + /** + * List of Query Set + */ + public List getQuerySet() { + return querySet; + } + + @SuppressWarnings("unused") + public void setQuerySet(List querySet) { + this.querySet = querySet; + } + + /** + * Extract schema name from table name + */ + public String getSchemaName() { + return XMLConfigParser.parseSchemaName(this.tableName); + } + + /** + * Extract table name without schema name + */ + public String getTableNameWithoutSchemaName() { + return XMLConfigParser.parseTableName(this.tableName); + } + + /** + * Name of scenario + */ + @XmlAttribute() + public String getName() { + Preconditions.checkNotNull(name); + return name; + } + + public void setName(String name) { + this.name = name; + } + + /** + * Tenant Id used by connection of this query + */ + @XmlAttribute + public String getTenantId() { + return tenantId; + } + + public void setTenantId(String tenantId) { + this.tenantId = tenantId; + } + + public WriteParams getWriteParams() { + return writeParams; + } + + public void setWriteParams(WriteParams writeParams) { + this.writeParams = writeParams; + } + + @Override + public String toString() { + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append("Name:" + name); + stringBuilder.append("Table Name:" + tableName); + stringBuilder.append("Row Count:" + rowCount); + stringBuilder.append("Data Override:" + dataOverride); + for (QuerySet query : querySet) { + stringBuilder.append(query + ";"); + } + return stringBuilder.toString(); + } + + public List getPreScenarioDdls() { + return preScenarioDdls; + } + + /** + * Scenario level DDLs (for views/index/async) that are executed before data load + */ + @XmlElementWrapper(name = "preScenarioDdls") + @XmlElement(name = "ddl") + public void setPreScenarioDdls(List preScenarioDdls) { + this.preScenarioDdls = preScenarioDdls; + } + + public List getPostScenarioDdls() { + return postScenarioDdls; + } + + /** + * Scenario level DDLs (for views/index/async) that are executed after data load + */ + @XmlElementWrapper(name = "postScenarioDdls") + @XmlElement(name = "ddl") + public void setPostScenarioDdls(List postScenarioDdls) { + this.postScenarioDdls = postScenarioDdls; + } + + public List getUpserts() { + return upsertSet; + } + + @XmlElementWrapper(name = "upserts") + @XmlElement(name = "upsert") + public void setUpserts(List upsertSet) { + this.upsertSet = upsertSet; + } + + public List getIdleTimes() { + return idleTimes; + } + + @XmlElementWrapper(name = "idleTimes") + @XmlElement(name = "idleTime") + public void setIdleTimes(List idleTimes) { + this.idleTimes = idleTimes; + } + + public List getUdfs() { + return udfs; + } + + @XmlElementWrapper(name = "udfs") + @XmlElement(name = "udf") + public void setUdfs(List udfs) { + this.udfs = udfs; + } + + public LoadProfile getLoadProfile() { + return loadProfile; + } + + public void setLoadProfile(LoadProfile loadProfile) { + this.loadProfile = loadProfile; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/TenantGroup.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/TenantGroup.java index d066cd5da21..a08ee7ecab7 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/TenantGroup.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/TenantGroup.java @@ -15,48 +15,51 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import javax.xml.bind.annotation.XmlAttribute; public class TenantGroup { - public static final String DEFAULT_GLOBAL_ID = "GLOBAL"; - private String id; - private int weight; - private int numTenants; - private boolean useGlobalConnection; - - @XmlAttribute - public String getId() { - return useGlobalConnection ? DEFAULT_GLOBAL_ID: id; - } - - public void setId(String id) { - this.id = id; - } - - @XmlAttribute - public int getWeight() { - return useGlobalConnection ? 100 : weight; - } - - public void setWeight(int weight) { - this.weight = weight; - } - - @XmlAttribute - public int getNumTenants() { return useGlobalConnection ? 1 : numTenants; } - - public void setNumTenants(int numTenants) { this.numTenants = numTenants; } - - @XmlAttribute - public boolean isUseGlobalConnection() { - return useGlobalConnection; - } - - public void setUseGlobalConnection(boolean useGlobalConnection) { - this.useGlobalConnection = useGlobalConnection; - } + public static final String DEFAULT_GLOBAL_ID = "GLOBAL"; + private String id; + private int weight; + private int numTenants; + private boolean useGlobalConnection; + + @XmlAttribute + public String getId() { + return useGlobalConnection ? DEFAULT_GLOBAL_ID : id; + } + + public void setId(String id) { + this.id = id; + } + + @XmlAttribute + public int getWeight() { + return useGlobalConnection ? 100 : weight; + } + + public void setWeight(int weight) { + this.weight = weight; + } + + @XmlAttribute + public int getNumTenants() { + return useGlobalConnection ? 1 : numTenants; + } + + public void setNumTenants(int numTenants) { + this.numTenants = numTenants; + } + + @XmlAttribute + public boolean isUseGlobalConnection() { + return useGlobalConnection; + } + + public void setUseGlobalConnection(boolean useGlobalConnection) { + this.useGlobalConnection = useGlobalConnection; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Upsert.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Upsert.java index b2a7766492b..50dcdf605b4 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Upsert.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Upsert.java @@ -1,129 +1,122 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; -import org.apache.phoenix.pherf.rules.RulesApplier; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - -import javax.xml.bind.annotation.XmlAttribute; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -public class Upsert { - - private String id; - private String upsertGroup; - private String statement; - private List column; - private boolean useGlobalConnection; - private Pattern pattern; - private long timeoutDuration = Long.MAX_VALUE; - - public Upsert() { - pattern = Pattern.compile("\\[.*?\\]"); - } - - public String getDynamicStatement(RulesApplier ruleApplier, Scenario scenario) - throws Exception { - String ret = this.statement; - String needQuotes = ""; - Matcher m = pattern.matcher(ret); - while (m.find()) { - String dynamicField = m.group(0).replace("[", "").replace("]", ""); - Column dynamicColumn = ruleApplier.getRule(dynamicField, scenario); - needQuotes = - (dynamicColumn.getType() == DataTypeMapping.CHAR - || dynamicColumn.getType() == DataTypeMapping.VARCHAR) ? "'" : ""; - ret = ret.replace("[" + dynamicField + "]", - needQuotes + ruleApplier.getDataValue(dynamicColumn).getValue() - + needQuotes); - } - return ret; - } - - /** - * upsertGroup attribute is just a string value to help correlate upserts across sets or files. - * This helps to make sense of reporting results. - * - * @return the group id - */ - @XmlAttribute - public String getUpsertGroup() { - return upsertGroup; - } - - public void setUpsertGroup(String upsertGroup) { - this.upsertGroup = upsertGroup; - } - - - /** - * Upsert ID, Use UUID if none specified - * - * @return - */ - @XmlAttribute - public String getId() { - if (null == this.id) { - this.id = java.util.UUID.randomUUID().toString(); - } - return id; - } - - public void setId(String id) { - this.id = id; - } - - public List getColumn() { - if (column == null) return Lists.newArrayList(); - return column; - } - - public void setColumn(List column) { - this.column = column; - } - - @XmlAttribute - public boolean isUseGlobalConnection() { - return useGlobalConnection; - } - - public void setUseGlobalConnection(boolean useGlobalConnection) { - this.useGlobalConnection = useGlobalConnection; - } +import javax.xml.bind.annotation.XmlAttribute; - @XmlAttribute - public long getTimeoutDuration() { - return this.timeoutDuration; - } +import org.apache.phoenix.pherf.rules.RulesApplier; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - public void setTimeoutDuration(long timeoutDuration) { - this.timeoutDuration = timeoutDuration; - } +public class Upsert { - public String getStatement() { - return statement; + private String id; + private String upsertGroup; + private String statement; + private List column; + private boolean useGlobalConnection; + private Pattern pattern; + private long timeoutDuration = Long.MAX_VALUE; + + public Upsert() { + pattern = Pattern.compile("\\[.*?\\]"); + } + + public String getDynamicStatement(RulesApplier ruleApplier, Scenario scenario) throws Exception { + String ret = this.statement; + String needQuotes = ""; + Matcher m = pattern.matcher(ret); + while (m.find()) { + String dynamicField = m.group(0).replace("[", "").replace("]", ""); + Column dynamicColumn = ruleApplier.getRule(dynamicField, scenario); + needQuotes = (dynamicColumn.getType() == DataTypeMapping.CHAR + || dynamicColumn.getType() == DataTypeMapping.VARCHAR) ? "'" : ""; + ret = ret.replace("[" + dynamicField + "]", + needQuotes + ruleApplier.getDataValue(dynamicColumn).getValue() + needQuotes); } - - public void setStatement(String statement) { - // normalize statement - merge all consecutive spaces into one - this.statement = statement.replaceAll("\\s+", " "); + return ret; + } + + /** + * upsertGroup attribute is just a string value to help correlate upserts across sets or files. + * This helps to make sense of reporting results. + * @return the group id + */ + @XmlAttribute + public String getUpsertGroup() { + return upsertGroup; + } + + public void setUpsertGroup(String upsertGroup) { + this.upsertGroup = upsertGroup; + } + + /** + * Upsert ID, Use UUID if none specified + */ + @XmlAttribute + public String getId() { + if (null == this.id) { + this.id = java.util.UUID.randomUUID().toString(); } + return id; + } + + public void setId(String id) { + this.id = id; + } + + public List getColumn() { + if (column == null) return Lists.newArrayList(); + return column; + } + + public void setColumn(List column) { + this.column = column; + } + + @XmlAttribute + public boolean isUseGlobalConnection() { + return useGlobalConnection; + } + + public void setUseGlobalConnection(boolean useGlobalConnection) { + this.useGlobalConnection = useGlobalConnection; + } + + @XmlAttribute + public long getTimeoutDuration() { + return this.timeoutDuration; + } + + public void setTimeoutDuration(long timeoutDuration) { + this.timeoutDuration = timeoutDuration; + } + + public String getStatement() { + return statement; + } + + public void setStatement(String statement) { + // normalize statement - merge all consecutive spaces into one + this.statement = statement.replaceAll("\\s+", " "); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/UserDefined.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/UserDefined.java index 8350d576037..27317248921 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/UserDefined.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/UserDefined.java @@ -15,41 +15,41 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; +import java.util.List; + import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlType; -import java.util.List; @XmlType public class UserDefined { - String id; - String clazzName; - List args; - - @XmlAttribute - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getClazzName() { - return clazzName; - } - - public void setClazzName(String clazzName) { - this.clazzName = clazzName; - } - - public List getArgs() { - return args; - } - - public void setArgs(List args) { - this.args = args; - } + String id; + String clazzName; + List args; + + @XmlAttribute + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getClazzName() { + return clazzName; + } + + public void setClazzName(String clazzName) { + this.clazzName = clazzName; + } + + public List getArgs() { + return args; + } + + public void setArgs(List args) { + this.args = args; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/WriteParams.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/WriteParams.java index 04be2391a84..197b28e8836 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/WriteParams.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/WriteParams.java @@ -1,72 +1,71 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import javax.xml.bind.annotation.XmlAttribute; public class WriteParams { - private int writerThreadCount; - private long threadSleepDuration; - private long batchSize; - private long executionDurationInMs; + private int writerThreadCount; + private long threadSleepDuration; + private long batchSize; + private long executionDurationInMs; - public WriteParams() { - this.batchSize = Long.MIN_VALUE; - this.writerThreadCount = Integer.MIN_VALUE; - this.threadSleepDuration = Long.MIN_VALUE; - this.executionDurationInMs = Long.MAX_VALUE; - } + public WriteParams() { + this.batchSize = Long.MIN_VALUE; + this.writerThreadCount = Integer.MIN_VALUE; + this.threadSleepDuration = Long.MIN_VALUE; + this.executionDurationInMs = Long.MAX_VALUE; + } - public long getThreadSleepDuration() { - return threadSleepDuration; - } + public long getThreadSleepDuration() { + return threadSleepDuration; + } - @SuppressWarnings("unused") - public void setThreadSleepDuration(long threadSleepDuration) { - this.threadSleepDuration = threadSleepDuration; - } + @SuppressWarnings("unused") + public void setThreadSleepDuration(long threadSleepDuration) { + this.threadSleepDuration = threadSleepDuration; + } - public long getBatchSize() { - return batchSize; - } + public long getBatchSize() { + return batchSize; + } - @SuppressWarnings("unused") - public void setBatchSize(long batchSize) { - this.batchSize = batchSize; - } + @SuppressWarnings("unused") + public void setBatchSize(long batchSize) { + this.batchSize = batchSize; + } - public int getWriterThreadCount() { - return writerThreadCount; - } + public int getWriterThreadCount() { + return writerThreadCount; + } - @SuppressWarnings("unused") - public void setWriterThreadCount(int writerThreadCount) { - this.writerThreadCount = writerThreadCount; - } + @SuppressWarnings("unused") + public void setWriterThreadCount(int writerThreadCount) { + this.writerThreadCount = writerThreadCount; + } - @XmlAttribute() - public long getExecutionDurationInMs() { - return executionDurationInMs; - } + @XmlAttribute() + public long getExecutionDurationInMs() { + return executionDurationInMs; + } - @SuppressWarnings("unused") - public void setExecutionDurationInMs(long executionDurationInMs) { - this.executionDurationInMs = executionDurationInMs; - } + @SuppressWarnings("unused") + public void setExecutionDurationInMs(long executionDurationInMs) { + this.executionDurationInMs = executionDurationInMs; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java index 104fab5f8c4..9b4ea7f8c56 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.configuration; import java.io.OutputStream; @@ -43,158 +42,157 @@ public class XMLConfigParser { - private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParser.class); - private String filePattern; - private List dataModels; - private List scenarios = null; - private ResourceList resourceList; - private Collection paths = null; - - public XMLConfigParser(String pattern) throws Exception { - init(pattern); + private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParser.class); + private String filePattern; + private List dataModels; + private List scenarios = null; + private ResourceList resourceList; + private Collection paths = null; + + public XMLConfigParser(String pattern) throws Exception { + init(pattern); + } + + public List getDataModels() { + return dataModels; + } + + public DataModel getDataModelByName(String name) { + for (DataModel dataModel : getDataModels()) { + if (dataModel.getName().equals(name)) { + return dataModel; + } } - - public List getDataModels() { - return dataModels; + return null; + } + + public Scenario getScenarioByName(String name) throws Exception { + for (Scenario scenario : getScenarios()) { + if (scenario.getName().equals(name)) { + return scenario; + } } + return null; + } - public DataModel getDataModelByName(String name) { - for (DataModel dataModel : getDataModels()) { - if (dataModel.getName().equals(name)) { - return dataModel; - } - } - return null; + public synchronized Collection getPaths(String strPattern) throws Exception { + if (paths != null) { + return paths; } + paths = getResources(strPattern); + return paths; + } - public Scenario getScenarioByName(String name) throws Exception { - for (Scenario scenario : getScenarios()) { - if (scenario.getName().equals(name)) { - return scenario; - } - } - return null; + public synchronized List getScenarios() throws Exception { + if (scenarios != null) { + return scenarios; } - public synchronized Collection getPaths(String strPattern) throws Exception { - if (paths != null) { - return paths; + scenarios = Collections.synchronizedList(new ArrayList()); + for (Path path : getPaths(getFilePattern())) { + try { + List scenarioList = XMLConfigParser.readDataModel(path).getScenarios(); + for (Scenario scenario : scenarioList) { + scenarios.add(scenario); } - paths = getResources(strPattern); - return paths; + } catch (JAXBException e) { + LOGGER.error("Unable to parse scenario file " + path, e); + throw e; + } } - - public synchronized List getScenarios() throws Exception { - if (scenarios != null) { - return scenarios; - } - - scenarios = Collections.synchronizedList(new ArrayList()); - for (Path path : getPaths(getFilePattern())) { - try { - List scenarioList = XMLConfigParser.readDataModel(path).getScenarios(); - for (Scenario scenario : scenarioList) { - scenarios.add(scenario); - } - } catch (JAXBException e) { - LOGGER.error("Unable to parse scenario file "+path, e); - throw e; - } - } - return scenarios; + return scenarios; + } + + public String getFilePattern() { + return filePattern; + } + + /** + * Unmarshall an XML data file + * @param file Name of File + * @return {@link org.apache.phoenix.pherf.configuration.DataModel} Returns DataModel from XML + * configuration + */ + // TODO Remove static calls + public static DataModel readDataModel(Path file) throws JAXBException, XMLStreamException { + XMLInputFactory xif = XMLInputFactory.newInstance(); + xif.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); + xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); + JAXBContext jaxbContext = JAXBContext.newInstance(DataModel.class); + Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller(); + String fName = PherfConstants.RESOURCE_SCENARIO + "/" + file.getFileName().toString(); + LOGGER.info("Open config file: " + fName); + XMLStreamReader xmlReader = + xif.createXMLStreamReader(new StreamSource(XMLConfigParser.class.getResourceAsStream(fName))); + return (DataModel) jaxbUnmarshaller.unmarshal(xmlReader); + } + + // TODO Remove static calls + public static String parseSchemaName(String fullTableName) { + String ret = null; + if (fullTableName.contains(".")) { + ret = fullTableName.substring(0, fullTableName.indexOf(".")); } - - public String getFilePattern() { - return filePattern; + return ret; + } + + // TODO Remove static calls + public static String parseTableName(String fullTableName) { + String ret = fullTableName; + if (fullTableName.contains(".")) { + ret = fullTableName.substring(fullTableName.indexOf(".") + 1, fullTableName.length()); } - - /** - * Unmarshall an XML data file - * - * @param file Name of File - * @return {@link org.apache.phoenix.pherf.configuration.DataModel} Returns DataModel from - * XML configuration - */ - // TODO Remove static calls - public static DataModel readDataModel(Path file) throws JAXBException, XMLStreamException { - XMLInputFactory xif = XMLInputFactory.newInstance(); - xif.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); - xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); - JAXBContext jaxbContext = JAXBContext.newInstance(DataModel.class); - Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller(); - String fName = PherfConstants.RESOURCE_SCENARIO + "/" + file.getFileName().toString(); - LOGGER.info("Open config file: " + fName); - XMLStreamReader xmlReader = xif.createXMLStreamReader( - new StreamSource(XMLConfigParser.class.getResourceAsStream(fName))); - return (DataModel) jaxbUnmarshaller.unmarshal(xmlReader); + // Remove any quotes that may be needed for multi-tenant tables + ret = ret.replaceAll("\"", ""); + return ret; + } + + // TODO Remove static calls + @SuppressWarnings("unused") + public static void writeDataModel(DataModel data, OutputStream output) throws JAXBException { + // create JAXB context and initializing Marshaller + JAXBContext jaxbContext = JAXBContext.newInstance(DataModel.class); + Marshaller jaxbMarshaller = jaxbContext.createMarshaller(); + + // for getting nice formatted output + jaxbMarshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); + + // Writing to console + jaxbMarshaller.marshal(data, output); + } + + private void init(String pattern) throws Exception { + if (dataModels != null) { + return; } - - // TODO Remove static calls - public static String parseSchemaName(String fullTableName) { - String ret = null; - if (fullTableName.contains(".")) { - ret = fullTableName.substring(0, fullTableName.indexOf(".")); - } - return ret; + this.filePattern = pattern; + this.dataModels = new ArrayList<>(); + this.resourceList = new ResourceList(PherfConstants.RESOURCE_SCENARIO); + this.paths = getResources(this.filePattern); + if (this.paths.isEmpty()) { + throw new FileLoaderException( + "Could not load the resource files using the pattern: " + pattern); } - - // TODO Remove static calls - public static String parseTableName(String fullTableName) { - String ret = fullTableName; - if (fullTableName.contains(".")) { - ret = fullTableName.substring(fullTableName.indexOf(".") + 1, fullTableName.length()); - } - // Remove any quotes that may be needed for multi-tenant tables - ret = ret.replaceAll("\"", ""); - return ret; - } - - // TODO Remove static calls - @SuppressWarnings("unused") - public static void writeDataModel(DataModel data, OutputStream output) throws JAXBException { - // create JAXB context and initializing Marshaller - JAXBContext jaxbContext = JAXBContext.newInstance(DataModel.class); - Marshaller jaxbMarshaller = jaxbContext.createMarshaller(); - - // for getting nice formatted output - jaxbMarshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); - - // Writing to console - jaxbMarshaller.marshal(data, output); + for (Path path : this.paths) { + System.out.println("Adding model for path:" + path.toString()); + DataModel dataModel = XMLConfigParser.readDataModel(path); + updateDataValueType(dataModel); + this.dataModels.add(dataModel); } - - private void init(String pattern) throws Exception { - if (dataModels != null) { - return; - } - this.filePattern = pattern; - this.dataModels = new ArrayList<>(); - this.resourceList = new ResourceList(PherfConstants.RESOURCE_SCENARIO); - this.paths = getResources(this.filePattern); - if (this.paths.isEmpty()) { - throw new FileLoaderException( - "Could not load the resource files using the pattern: " + pattern); - } - for (Path path : this.paths) { - System.out.println("Adding model for path:" + path.toString()); - DataModel dataModel = XMLConfigParser.readDataModel(path); - updateDataValueType(dataModel); - this.dataModels.add(dataModel); - } - } - - private void updateDataValueType(DataModel dataModel) { - for (Column column : dataModel.getDataMappingColumns()) { - if (column.getDataValues() != null) { - // DataValue type is inherited from the column - for (DataValue value : column.getDataValues()) { - value.setType(column.getType()); - } - } + } + + private void updateDataValueType(DataModel dataModel) { + for (Column column : dataModel.getDataMappingColumns()) { + if (column.getDataValues() != null) { + // DataValue type is inherited from the column + for (DataValue value : column.getDataValues()) { + value.setType(column.getType()); } + } } + } - private Collection getResources(String pattern) throws Exception { - return resourceList.getResourceList(pattern); - } + private Collection getResources(String pattern) throws Exception { + return resourceList.getResourceList(pattern); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/FileLoaderException.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/FileLoaderException.java index 99bbb0db365..3c7725000ba 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/FileLoaderException.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/FileLoaderException.java @@ -1,30 +1,29 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.exception; public class FileLoaderException extends PherfException { - public FileLoaderException(String message) throws Exception { - super(message); - } + public FileLoaderException(String message) throws Exception { + super(message); + } - @SuppressWarnings("unused") - public FileLoaderException(String message, Exception e) { - super(message, e); - } -} \ No newline at end of file + @SuppressWarnings("unused") + public FileLoaderException(String message, Exception e) { + super(message, e); + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/FileLoaderRuntimeException.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/FileLoaderRuntimeException.java index e82e8850d84..de5d85cdf2e 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/FileLoaderRuntimeException.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/FileLoaderRuntimeException.java @@ -1,29 +1,29 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.exception; public class FileLoaderRuntimeException extends PherfRuntimeException { - @SuppressWarnings("unused") - public FileLoaderRuntimeException(String message) throws Exception { - super(message); - } - public FileLoaderRuntimeException(String message, Exception e) { - super(message, e); - } + @SuppressWarnings("unused") + public FileLoaderRuntimeException(String message) throws Exception { + super(message); + } + + public FileLoaderRuntimeException(String message, Exception e) { + super(message, e); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/PherfException.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/PherfException.java index 1748b26b397..ccef25a88e9 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/PherfException.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/PherfException.java @@ -1,30 +1,28 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.exception; public class PherfException extends Exception { - public PherfException(String message) throws Exception{ - super(message); - } + public PherfException(String message) throws Exception { + super(message); + } - public PherfException(String message, Exception e) { - super(message, e); - } + public PherfException(String message, Exception e) { + super(message, e); + } } - diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/PherfRuntimeException.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/PherfRuntimeException.java index e12feba3313..9f14da9e1d4 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/PherfRuntimeException.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/exception/PherfRuntimeException.java @@ -1,30 +1,28 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.exception; public class PherfRuntimeException extends RuntimeException { - public PherfRuntimeException(String message) throws Exception{ - super(message); - } + public PherfRuntimeException(String message) throws Exception { + super(message); + } - public PherfRuntimeException(String message, Exception e) { - super(message, e); - } + public PherfRuntimeException(String message, Exception e) { + super(message, e); + } } - diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorDetails.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorDetails.java index 0a25ca41e6c..8f697b1e1a8 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorDetails.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorDetails.java @@ -1,50 +1,52 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx; import org.apache.phoenix.pherf.jmx.monitors.*; public enum MonitorDetails { - FREE_MEMORY("org.apache.phoenix.pherf:type=RuntimeFreeMemory", new FreeMemoryMonitor()), - TOTAL_MEMORY("org.apache.phoenix.pherf:type=RuntimeTotalMemory", new TotalMemoryMonitor()), - MAX_MEMORY("org.apache.phoenix.pherf:type=RuntimeMaxMemory", new MaxMemoryMonitor()), - HEAP_MEMORY_USAGE("org.apache.phoenix.pherf:type=HeapMemoryUsage", new HeapMemoryMonitor()), - NON_HEAP_MEMORY_USAGE("org.apache.phoenix.pherf:type=NonHeapMemoryUsage", new NonHeapMemoryMonitor()), - OBJECT_PENDING_FINALIZATION("org.apache.phoenix.pherf:type=ObjectPendingFinalizationCount", new ObjectPendingFinalizationCountMonitor()), - GARBAGE_COLLECTOR_ELAPSED_TIME("org.apache.phoenix.pherf:type=GarbageCollectorElapsedTime", new GarbageCollectorElapsedTimeMonitor()), - CPU_LOAD_AVERAGE("org.apache.phoenix.pherf:type=CPULoadAverage", new CPULoadAverageMonitor()), - THREAD_COUNT("org.apache.phoenix.pherf:type=PherfThreads",new ThreadMonitor()); + FREE_MEMORY("org.apache.phoenix.pherf:type=RuntimeFreeMemory", new FreeMemoryMonitor()), + TOTAL_MEMORY("org.apache.phoenix.pherf:type=RuntimeTotalMemory", new TotalMemoryMonitor()), + MAX_MEMORY("org.apache.phoenix.pherf:type=RuntimeMaxMemory", new MaxMemoryMonitor()), + HEAP_MEMORY_USAGE("org.apache.phoenix.pherf:type=HeapMemoryUsage", new HeapMemoryMonitor()), + NON_HEAP_MEMORY_USAGE("org.apache.phoenix.pherf:type=NonHeapMemoryUsage", + new NonHeapMemoryMonitor()), + OBJECT_PENDING_FINALIZATION("org.apache.phoenix.pherf:type=ObjectPendingFinalizationCount", + new ObjectPendingFinalizationCountMonitor()), + GARBAGE_COLLECTOR_ELAPSED_TIME("org.apache.phoenix.pherf:type=GarbageCollectorElapsedTime", + new GarbageCollectorElapsedTimeMonitor()), + CPU_LOAD_AVERAGE("org.apache.phoenix.pherf:type=CPULoadAverage", new CPULoadAverageMonitor()), + THREAD_COUNT("org.apache.phoenix.pherf:type=PherfThreads", new ThreadMonitor()); - private final String monitorName; - private final Monitor monitor; + private final String monitorName; + private final Monitor monitor; - private MonitorDetails(String monitorName, Monitor monitor) { - this.monitorName = monitorName; - this.monitor = monitor; - } + private MonitorDetails(String monitorName, Monitor monitor) { + this.monitorName = monitorName; + this.monitor = monitor; + } - @Override - public String toString() { - return monitorName; - } + @Override + public String toString() { + return monitorName; + } - public Monitor getMonitor() { - return monitor; - } -} \ No newline at end of file + public Monitor getMonitor() { + return monitor; + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java index 5800a2060e3..2e84e62bf72 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java @@ -1,23 +1,33 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx; +import java.lang.management.ManagementFactory; +import java.util.*; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import javax.management.InstanceAlreadyExistsException; +import javax.management.MBeanServer; +import javax.management.ObjectName; +import javax.management.StandardMBean; + import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.exception.FileLoaderRuntimeException; import org.apache.phoenix.pherf.jmx.monitors.Monitor; @@ -28,170 +38,151 @@ import org.apache.phoenix.pherf.workload.Workload; import org.apache.phoenix.util.DateUtil; -import javax.management.InstanceAlreadyExistsException; -import javax.management.MBeanServer; -import javax.management.ObjectName; -import javax.management.StandardMBean; -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.util.*; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; - /** - * This class starts JMX stats for the configured monitors. - * Monitors should be configured in MonitorDetails Enum. - * Each stat implements {@link org.apache.phoenix.pherf.jmx.monitors.Monitor}. + * This class starts JMX stats for the configured monitors. Monitors should be configured in + * MonitorDetails Enum. Each stat implements {@link org.apache.phoenix.pherf.jmx.monitors.Monitor}. * For the duration of any Pherf run, when the configured - * {@link org.apache.phoenix.pherf.PherfConstants#MONITOR_FREQUENCY} is reached a snapshot of - * each monitor is taken and dumped out to a log file. + * {@link org.apache.phoenix.pherf.PherfConstants#MONITOR_FREQUENCY} is reached a snapshot of each + * monitor is taken and dumped out to a log file. */ public class MonitorManager implements Workload { - // List of MonitorDetails for all the running monitors. - // TODO Move this out to config. Possible use Guice and use IOC to inject it in. - private static final List - MONITOR_DETAILS_LIST = - Arrays.asList(MonitorDetails.values()); - private final ResultHandler resultHandler; - private final AtomicLong monitorFrequency; - private final AtomicLong rowCount; - private final AtomicBoolean shouldStop = new AtomicBoolean(false); - private final AtomicBoolean isRunning = new AtomicBoolean(false); - - @SuppressWarnings("unused") public MonitorManager() throws Exception { - this(PherfConstants.MONITOR_FREQUENCY); + // List of MonitorDetails for all the running monitors. + // TODO Move this out to config. Possible use Guice and use IOC to inject it in. + private static final List MONITOR_DETAILS_LIST = + Arrays.asList(MonitorDetails.values()); + private final ResultHandler resultHandler; + private final AtomicLong monitorFrequency; + private final AtomicLong rowCount; + private final AtomicBoolean shouldStop = new AtomicBoolean(false); + private final AtomicBoolean isRunning = new AtomicBoolean(false); + + @SuppressWarnings("unused") + public MonitorManager() throws Exception { + this(PherfConstants.MONITOR_FREQUENCY); + } + + /** + * @param monitorFrequency Frequency at which monitor stats are written to a log file. + */ + public MonitorManager(long monitorFrequency) throws Exception { + this.monitorFrequency = new AtomicLong(monitorFrequency); + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + + // Register all the monitors to JMX + for (MonitorDetails monitorDetails : MONITOR_DETAILS_LIST) { + StandardMBean bean = new StandardMBean(monitorDetails.getMonitor(), Monitor.class); + ObjectName monitorThreadStatName = new ObjectName(monitorDetails.toString()); + try { + mbs.registerMBean(bean, monitorThreadStatName); + } catch (InstanceAlreadyExistsException e) { + mbs.unregisterMBean(monitorThreadStatName); + mbs.registerMBean(bean, monitorThreadStatName); + } } - - /** - * @param monitorFrequency Frequency at which monitor stats are written to a log file. - * @throws Exception - */ - public MonitorManager(long monitorFrequency) throws Exception { - this.monitorFrequency = new AtomicLong(monitorFrequency); - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - - // Register all the monitors to JMX - for (MonitorDetails monitorDetails : MONITOR_DETAILS_LIST) { - StandardMBean bean = new StandardMBean(monitorDetails.getMonitor(), Monitor.class); - ObjectName monitorThreadStatName = new ObjectName(monitorDetails.toString()); - try { - mbs.registerMBean(bean, monitorThreadStatName); - } catch (InstanceAlreadyExistsException e) { - mbs.unregisterMBean(monitorThreadStatName); - mbs.registerMBean(bean, monitorThreadStatName); - } - } - rowCount = new AtomicLong(0); - this.resultHandler = new CSVFileResultHandler(); - this.resultHandler.setResultFileDetails(ResultFileDetails.CSV); - this.resultHandler.setResultFileName(PherfConstants.MONITOR_FILE_NAME); - } - - @Override public synchronized void complete() { - this.shouldStop.set(true); - } - - @Override public Callable execute() { - return new Callable() { - @Override public Void call() throws Exception { + rowCount = new AtomicLong(0); + this.resultHandler = new CSVFileResultHandler(); + this.resultHandler.setResultFileDetails(ResultFileDetails.CSV); + this.resultHandler.setResultFileName(PherfConstants.MONITOR_FILE_NAME); + } + + @Override + public synchronized void complete() { + this.shouldStop.set(true); + } + + @Override + public Callable execute() { + return new Callable() { + @Override + public Void call() throws Exception { + try { + while (!shouldStop()) { + isRunning.set(true); + List rowValues = new ArrayList(); + synchronized (resultHandler) { + for (MonitorDetails monitorDetails : MONITOR_DETAILS_LIST) { + rowValues.clear(); try { - while (!shouldStop()) { - isRunning.set(true); - List rowValues = new ArrayList(); - synchronized (resultHandler) { - for (MonitorDetails monitorDetails : MONITOR_DETAILS_LIST) { - rowValues.clear(); - try { - StandardMBean - bean = - new StandardMBean(monitorDetails.getMonitor(), - Monitor.class); - - Calendar calendar = new GregorianCalendar(); - rowValues.add(monitorDetails); - - rowValues.add(((Monitor) bean.getImplementation()).getStat()); - rowValues.add(DateUtil.DEFAULT_MS_DATE_FORMATTER - .format(calendar.getTime())); - Result - result = - new Result(ResultFileDetails.CSV, - ResultFileDetails.CSV_MONITOR.getHeader() - .toString(), rowValues); - resultHandler.write(result); - } catch (Exception e) { - throw new FileLoaderRuntimeException( - "Could not log monitor result.", e); - } - rowCount.getAndIncrement(); - } - try { - resultHandler.flush(); - Thread.sleep(getMonitorFrequency()); - } catch (Exception e) { - Thread.currentThread().interrupt(); - e.printStackTrace(); - throw e; - } - } - } - } finally { - try { - isRunning.set(false); - if (resultHandler != null) { - resultHandler.close(); - } - } catch (Exception e) { - throw new FileLoaderRuntimeException("Could not close monitor results.", e); - } + StandardMBean bean = + new StandardMBean(monitorDetails.getMonitor(), Monitor.class); + + Calendar calendar = new GregorianCalendar(); + rowValues.add(monitorDetails); + + rowValues.add(((Monitor) bean.getImplementation()).getStat()); + rowValues.add(DateUtil.DEFAULT_MS_DATE_FORMATTER.format(calendar.getTime())); + Result result = new Result(ResultFileDetails.CSV, + ResultFileDetails.CSV_MONITOR.getHeader().toString(), rowValues); + resultHandler.write(result); + } catch (Exception e) { + throw new FileLoaderRuntimeException("Could not log monitor result.", e); } - return null; - } - }; - } - - public long getMonitorFrequency() { - return monitorFrequency.get(); - } - - public boolean shouldStop() { - return shouldStop.get(); - } - - // Convenience method for testing. - @SuppressWarnings("unused") - public long getRowCount() { - return rowCount.get(); - } - - public boolean isRunning() { - return isRunning.get(); - } - - /** - * This method should really only be used for testing - * - * @return {@code List < org.apache.phoenix.pherf.result.Result > } - * @throws Exception - */ - public synchronized List readResults() throws Exception { - ResultHandler handler = null; - try { - if (resultHandler.isClosed()) { - handler = new CSVFileResultHandler(); - handler.setResultFileDetails(ResultFileDetails.CSV); - handler.setResultFileName(PherfConstants.MONITOR_FILE_NAME); - return handler.read(); - } else { - return resultHandler.read(); + rowCount.getAndIncrement(); + } + try { + resultHandler.flush(); + Thread.sleep(getMonitorFrequency()); + } catch (Exception e) { + Thread.currentThread().interrupt(); + e.printStackTrace(); + throw e; + } } - } catch (Exception e) { - throw new FileLoaderRuntimeException("Could not close monitor results.", e); + } } finally { - if (handler != null) { - handler.close(); + try { + isRunning.set(false); + if (resultHandler != null) { + resultHandler.close(); } + } catch (Exception e) { + throw new FileLoaderRuntimeException("Could not close monitor results.", e); + } } + return null; + } + }; + } + + public long getMonitorFrequency() { + return monitorFrequency.get(); + } + + public boolean shouldStop() { + return shouldStop.get(); + } + + // Convenience method for testing. + @SuppressWarnings("unused") + public long getRowCount() { + return rowCount.get(); + } + + public boolean isRunning() { + return isRunning.get(); + } + + /** + * This method should really only be used for testing + * @return {@code List < org.apache.phoenix.pherf.result.Result > } + */ + public synchronized List readResults() throws Exception { + ResultHandler handler = null; + try { + if (resultHandler.isClosed()) { + handler = new CSVFileResultHandler(); + handler.setResultFileDetails(ResultFileDetails.CSV); + handler.setResultFileName(PherfConstants.MONITOR_FILE_NAME); + return handler.read(); + } else { + return resultHandler.read(); + } + } catch (Exception e) { + throw new FileLoaderRuntimeException("Could not close monitor results.", e); + } finally { + if (handler != null) { + handler.close(); + } } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/Stat.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/Stat.java index ef3703c1466..b4d039fc454 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/Stat.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/Stat.java @@ -1,32 +1,31 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx; -public class Stat { - private final T stat; +public class Stat { + private final T stat; - public Stat(T stat) { - this.stat = stat; - } + public Stat(T stat) { + this.stat = stat; + } - @Override - public String toString() { - return stat.toString(); - } + @Override + public String toString() { + return stat.toString(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/CPULoadAverageMonitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/CPULoadAverageMonitor.java index 0823d43dacf..a6a5103e097 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/CPULoadAverageMonitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/CPULoadAverageMonitor.java @@ -1,33 +1,33 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; -import org.apache.phoenix.pherf.jmx.Stat; - import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; +import org.apache.phoenix.pherf.jmx.Stat; + public class CPULoadAverageMonitor implements Monitor { - @Override - public Stat getStat() { - Stat stat = new Stat(ManagementFactory.getOperatingSystemMXBean().getSystemLoadAverage()); - return stat; - } + @Override + public Stat getStat() { + Stat stat = + new Stat(ManagementFactory.getOperatingSystemMXBean().getSystemLoadAverage()); + return stat; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/FreeMemoryMonitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/FreeMemoryMonitor.java index 848863cbf08..9faeee7a06b 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/FreeMemoryMonitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/FreeMemoryMonitor.java @@ -1,30 +1,29 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; import org.apache.phoenix.pherf.jmx.Stat; public class FreeMemoryMonitor implements Monitor { - @Override - public Stat getStat() { - Stat stat = new Stat(new Long(Runtime.getRuntime().freeMemory())); - return stat; - } + @Override + public Stat getStat() { + Stat stat = new Stat(new Long(Runtime.getRuntime().freeMemory())); + return stat; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/GarbageCollectorElapsedTimeMonitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/GarbageCollectorElapsedTimeMonitor.java index 7dc6798d06c..9563ec39f7a 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/GarbageCollectorElapsedTimeMonitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/GarbageCollectorElapsedTimeMonitor.java @@ -1,44 +1,43 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; -import org.apache.phoenix.pherf.jmx.Stat; - import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; import java.util.List; +import org.apache.phoenix.pherf.jmx.Stat; + public class GarbageCollectorElapsedTimeMonitor implements Monitor { - @Override - public Stat getStat() { - List beans = ManagementFactory.getGarbageCollectorMXBeans(); - long average = 0; - Stat stat = null; - if (beans.size() > 0) { - for (GarbageCollectorMXBean bean : beans) { - average += bean.getCollectionTime(); - } - stat = new Stat(average / beans.size()); - } else { - stat = new Stat(0); - } - return stat; + @Override + public Stat getStat() { + List beans = ManagementFactory.getGarbageCollectorMXBeans(); + long average = 0; + Stat stat = null; + if (beans.size() > 0) { + for (GarbageCollectorMXBean bean : beans) { + average += bean.getCollectionTime(); + } + stat = new Stat(average / beans.size()); + } else { + stat = new Stat(0); } + return stat; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/HeapMemoryMonitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/HeapMemoryMonitor.java index 41f4746a84c..aea5e98a72c 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/HeapMemoryMonitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/HeapMemoryMonitor.java @@ -1,32 +1,31 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; -import org.apache.phoenix.pherf.jmx.Stat; - import java.lang.management.ManagementFactory; +import org.apache.phoenix.pherf.jmx.Stat; + public class HeapMemoryMonitor implements Monitor { - @Override - public Stat getStat() { - Stat stat = new Stat(ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed()); - return stat; - } + @Override + public Stat getStat() { + Stat stat = new Stat(ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed()); + return stat; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/MaxMemoryMonitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/MaxMemoryMonitor.java index d53e552e81c..6b262d4d54e 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/MaxMemoryMonitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/MaxMemoryMonitor.java @@ -1,30 +1,29 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; import org.apache.phoenix.pherf.jmx.Stat; public class MaxMemoryMonitor implements Monitor { - @Override - public Stat getStat() { - Stat stat = new Stat(new Long(Runtime.getRuntime().maxMemory())); - return stat; - } + @Override + public Stat getStat() { + Stat stat = new Stat(new Long(Runtime.getRuntime().maxMemory())); + return stat; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/Monitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/Monitor.java index d8563980a4b..ceff01af510 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/Monitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/Monitor.java @@ -1,30 +1,29 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; -import org.apache.phoenix.pherf.jmx.Stat; - import javax.management.MXBean; +import org.apache.phoenix.pherf.jmx.Stat; + @MXBean public interface Monitor { - public Stat getStat(); + public Stat getStat(); -} \ No newline at end of file +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/NonHeapMemoryMonitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/NonHeapMemoryMonitor.java index 4f0a67b5e64..9420378dfeb 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/NonHeapMemoryMonitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/NonHeapMemoryMonitor.java @@ -1,32 +1,32 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; -import org.apache.phoenix.pherf.jmx.Stat; - import java.lang.management.ManagementFactory; +import org.apache.phoenix.pherf.jmx.Stat; + public class NonHeapMemoryMonitor implements Monitor { - @Override - public Stat getStat() { - Stat stat = new Stat(ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage().getUsed()); - return stat; - } + @Override + public Stat getStat() { + Stat stat = + new Stat(ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage().getUsed()); + return stat; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/ObjectPendingFinalizationCountMonitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/ObjectPendingFinalizationCountMonitor.java index 254bf8cebf0..62738aca603 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/ObjectPendingFinalizationCountMonitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/ObjectPendingFinalizationCountMonitor.java @@ -1,33 +1,33 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; -import org.apache.phoenix.pherf.jmx.Stat; - import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; +import org.apache.phoenix.pherf.jmx.Stat; + public class ObjectPendingFinalizationCountMonitor implements Monitor { - @Override - public Stat getStat() { - Stat stat = new Stat(ManagementFactory.getMemoryMXBean().getObjectPendingFinalizationCount()); - return stat; - } + @Override + public Stat getStat() { + Stat stat = + new Stat(ManagementFactory.getMemoryMXBean().getObjectPendingFinalizationCount()); + return stat; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/ThreadMonitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/ThreadMonitor.java index 260af710beb..8f2aeaca6e6 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/ThreadMonitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/ThreadMonitor.java @@ -1,32 +1,32 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; -import org.apache.phoenix.pherf.jmx.Stat; - import java.lang.management.ManagementFactory; +import org.apache.phoenix.pherf.jmx.Stat; + public class ThreadMonitor implements Monitor { - @Override - public Stat getStat() { - Stat stat = new Stat(new Integer(ManagementFactory.getThreadMXBean().getThreadCount())); - return stat; - } + @Override + public Stat getStat() { + Stat stat = + new Stat(new Integer(ManagementFactory.getThreadMXBean().getThreadCount())); + return stat; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/TotalMemoryMonitor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/TotalMemoryMonitor.java index 6d7336ace0a..dfcc991a7cf 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/TotalMemoryMonitor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/monitors/TotalMemoryMonitor.java @@ -1,30 +1,29 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.jmx.monitors; import org.apache.phoenix.pherf.jmx.Stat; public class TotalMemoryMonitor implements Monitor { - @Override - public Stat getStat() { - Stat stat = new Stat(new Long(Runtime.getRuntime().totalMemory())); - return stat; - } + @Override + public Stat getStat() { + Stat stat = new Stat(new Long(Runtime.getRuntime().totalMemory())); + return stat; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadThreadTime.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadThreadTime.java index e5553cc01c3..574d03deea6 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadThreadTime.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadThreadTime.java @@ -1,85 +1,83 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; -import org.apache.phoenix.pherf.PherfConstants; - import java.util.ArrayList; import java.util.List; +import org.apache.phoenix.pherf.PherfConstants; + public class DataLoadThreadTime { - private List threadTime = new ArrayList(); + private List threadTime = new ArrayList(); - public List getThreadTime() { - return threadTime; - } + public List getThreadTime() { + return threadTime; + } - public void add(String tableName, String threadName, long rowsUpserted, - long timeInMsPerMillionRows) { - threadTime.add(new WriteThreadTime(tableName, threadName, rowsUpserted, - timeInMsPerMillionRows)); - } + public void add(String tableName, String threadName, long rowsUpserted, + long timeInMsPerMillionRows) { + threadTime + .add(new WriteThreadTime(tableName, threadName, rowsUpserted, timeInMsPerMillionRows)); + } - public String getCsvTitle() { - return "TABLE_NAME,THREAD_NAME,ROWS_UPSERTED,TIME_IN_MS_PER_" + PherfConstants.LOG_PER_NROWS - + "_ROWS\n"; - } + public String getCsvTitle() { + return "TABLE_NAME,THREAD_NAME,ROWS_UPSERTED,TIME_IN_MS_PER_" + PherfConstants.LOG_PER_NROWS + + "_ROWS\n"; + } } class WriteThreadTime { - private String tableName; - private String threadName; - private long rowsUpserted; - private long timeInMsPerMillionRows; + private String tableName; + private String threadName; + private long rowsUpserted; + private long timeInMsPerMillionRows; - public WriteThreadTime(String tableName, String threadName, long rowsUpserted, - long timeInMsPerMillionRows) { - this.tableName = tableName; - this.threadName = threadName; - this.rowsUpserted = rowsUpserted; - this.timeInMsPerMillionRows = timeInMsPerMillionRows; - } + public WriteThreadTime(String tableName, String threadName, long rowsUpserted, + long timeInMsPerMillionRows) { + this.tableName = tableName; + this.threadName = threadName; + this.rowsUpserted = rowsUpserted; + this.timeInMsPerMillionRows = timeInMsPerMillionRows; + } - public String getTableName() { - return tableName; - } + public String getTableName() { + return tableName; + } - public String getThreadName() { - return threadName; - } + public String getThreadName() { + return threadName; + } - public long getTimeInMsPerMillionRows() { - return timeInMsPerMillionRows; - } + public long getTimeInMsPerMillionRows() { + return timeInMsPerMillionRows; + } - public List getCsvRepresentation(ResultUtil util) { - List rowValues = new ArrayList<>(); - rowValues.add(new ResultValue(util.convertNull(getTableName()))); - rowValues.add(new ResultValue(util.convertNull(getThreadName()))); - rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRowsUpserted())))); - rowValues.add(new ResultValue( - util.convertNull(String.valueOf(getTimeInMsPerMillionRows())))); + public List getCsvRepresentation(ResultUtil util) { + List rowValues = new ArrayList<>(); + rowValues.add(new ResultValue(util.convertNull(getTableName()))); + rowValues.add(new ResultValue(util.convertNull(getThreadName()))); + rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRowsUpserted())))); + rowValues.add(new ResultValue(util.convertNull(String.valueOf(getTimeInMsPerMillionRows())))); - return rowValues; - } + return rowValues; + } - public long getRowsUpserted() { - return rowsUpserted; - } -} \ No newline at end of file + public long getRowsUpserted() { + return rowsUpserted; + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadTimeSummary.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadTimeSummary.java index 0ff5c590c37..1baa3c14011 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadTimeSummary.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadTimeSummary.java @@ -1,67 +1,66 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; import java.util.ArrayList; import java.util.List; public class DataLoadTimeSummary { - private List tableLoadTime = new ArrayList(); + private List tableLoadTime = new ArrayList(); - public List getTableLoadTime() { - return tableLoadTime; - } + public List getTableLoadTime() { + return tableLoadTime; + } - public void add(String tableName, int rowCount, int durationInMs) { - tableLoadTime.add(new TableLoadTime(tableName, rowCount, durationInMs)); - } + public void add(String tableName, int rowCount, int durationInMs) { + tableLoadTime.add(new TableLoadTime(tableName, rowCount, durationInMs)); + } } class TableLoadTime { - private int durationInMs; - private String tableName; - private int rowCount; + private int durationInMs; + private String tableName; + private int rowCount; - public TableLoadTime(String tableName, int rowCount, int durationInMs) { - this.tableName = tableName; - this.rowCount = rowCount; - this.durationInMs = durationInMs; - } + public TableLoadTime(String tableName, int rowCount, int durationInMs) { + this.tableName = tableName; + this.rowCount = rowCount; + this.durationInMs = durationInMs; + } - public List getCsvRepresentation(ResultUtil util) { - List rowValues = new ArrayList<>(); - rowValues.add(new ResultValue(util.convertNull(getTableName()))); - rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRowCount())))); - rowValues.add(new ResultValue(util.convertNull(String.valueOf(getDurationInMs())))); + public List getCsvRepresentation(ResultUtil util) { + List rowValues = new ArrayList<>(); + rowValues.add(new ResultValue(util.convertNull(getTableName()))); + rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRowCount())))); + rowValues.add(new ResultValue(util.convertNull(String.valueOf(getDurationInMs())))); - return rowValues; - } + return rowValues; + } - public int getDurationInMs() { - return durationInMs; - } + public int getDurationInMs() { + return durationInMs; + } - public String getTableName() { - return tableName; - } + public String getTableName() { + return tableName; + } - public int getRowCount() { - return rowCount; - } + public int getRowCount() { + return rowCount; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataModelResult.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataModelResult.java index 5c07ffe3e92..12f58c3d371 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataModelResult.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataModelResult.java @@ -1,74 +1,74 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; -import org.apache.phoenix.pherf.configuration.DataModel; +import java.util.ArrayList; +import java.util.List; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; -import java.util.ArrayList; -import java.util.List; -@XmlRootElement(namespace = "org.apache.phoenix.pherf.result") public class DataModelResult - extends DataModel { - private List scenarioResult = new ArrayList(); - private String zookeeper; +import org.apache.phoenix.pherf.configuration.DataModel; + +@XmlRootElement(namespace = "org.apache.phoenix.pherf.result") +public class DataModelResult extends DataModel { + private List scenarioResult = new ArrayList(); + private String zookeeper; - public List getScenarioResult() { - return scenarioResult; - } + public List getScenarioResult() { + return scenarioResult; + } - @SuppressWarnings("unused") public void setScenarioResult(List scenarioResult) { - this.scenarioResult = scenarioResult; - } + @SuppressWarnings("unused") + public void setScenarioResult(List scenarioResult) { + this.scenarioResult = scenarioResult; + } - public DataModelResult() { - } + public DataModelResult() { + } - private DataModelResult(String name, String zookeeper) { - this.setName(name); - this.zookeeper = zookeeper; - } + private DataModelResult(String name, String zookeeper) { + this.setName(name); + this.zookeeper = zookeeper; + } - /** - * Copy constructor - * - * @param dataModelResult - */ - public DataModelResult(DataModelResult dataModelResult) { - this(dataModelResult.getName(), dataModelResult.getZookeeper()); - this.scenarioResult = dataModelResult.getScenarioResult(); - } + /** + * Copy constructor + */ + public DataModelResult(DataModelResult dataModelResult) { + this(dataModelResult.getName(), dataModelResult.getZookeeper()); + this.scenarioResult = dataModelResult.getScenarioResult(); + } - public DataModelResult(DataModel dataModel, String zookeeper) { - this(dataModel.getName(), zookeeper); - } + public DataModelResult(DataModel dataModel, String zookeeper) { + this(dataModel.getName(), zookeeper); + } - public DataModelResult(DataModel dataModel) { - this(dataModel, null); - } + public DataModelResult(DataModel dataModel) { + this(dataModel, null); + } - @XmlAttribute() public String getZookeeper() { - return zookeeper; - } + @XmlAttribute() + public String getZookeeper() { + return zookeeper; + } - public void setZookeeper(String zookeeper) { - this.zookeeper = zookeeper; - } + public void setZookeeper(String zookeeper) { + this.zookeeper = zookeeper; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java index 228d0030289..487daa66c47 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java @@ -1,23 +1,27 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + import org.apache.phoenix.pherf.configuration.Query; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.result.file.ResultFileDetails; @@ -25,153 +29,151 @@ import org.apache.phoenix.pherf.util.PhoenixUtil; import org.apache.phoenix.util.DateUtil; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; - public class QueryResult extends Query { - private List threadTimes = new ArrayList<>(); - private static PhoenixUtil pUtil = PhoenixUtil.create(); - - public QueryResult() { - super(); - } + private List threadTimes = new ArrayList<>(); + private static PhoenixUtil pUtil = PhoenixUtil.create(); - public synchronized List getThreadTimes() { - return this.threadTimes; - } + public QueryResult() { + super(); + } + + public synchronized List getThreadTimes() { + return this.threadTimes; + } + + @SuppressWarnings("unused") + public synchronized void setThreadTimes(List threadTimes) { + this.threadTimes = threadTimes; + } - @SuppressWarnings("unused") - public synchronized void setThreadTimes(List threadTimes) { - this.threadTimes = threadTimes; + public QueryResult(Query query) { + this.setStatement(query.getStatement()); + this.setExpectedAggregateRowCount(query.getExpectedAggregateRowCount()); + this.setTenantId(query.getTenantId()); + this.setDdl(query.getDdl()); + this.setQueryGroup(query.getQueryGroup()); + this.setId(query.getId()); + this.setTimeoutDuration(query.getTimeoutDuration()); + } + + public Date getStartTime() { + Date startTime = null; + for (ThreadTime tt : getThreadTimes()) { + Date currStartTime = tt.getStartTime(); + if (null != currStartTime) { + if (null == startTime) { + startTime = currStartTime; + } else if (currStartTime.compareTo(startTime) < 0) { + startTime = currStartTime; + } + } } + return startTime; + } - public QueryResult(Query query) { - this.setStatement(query.getStatement()); - this.setExpectedAggregateRowCount(query.getExpectedAggregateRowCount()); - this.setTenantId(query.getTenantId()); - this.setDdl(query.getDdl()); - this.setQueryGroup(query.getQueryGroup()); - this.setId(query.getId()); - this.setTimeoutDuration(query.getTimeoutDuration()); + public int getAvgMaxRunTimeInMs() { + int totalRunTime = 0; + for (ThreadTime tt : getThreadTimes()) { + if (null != tt.getMaxTimeInMs()) { + totalRunTime += tt.getMaxTimeInMs().getElapsedDurationInMs(); + } } + return totalRunTime / getThreadTimes().size(); + } - public Date getStartTime() { - Date startTime = null; - for (ThreadTime tt : getThreadTimes()) { - Date currStartTime = tt.getStartTime(); - if (null != currStartTime) { - if (null == startTime) { - startTime = currStartTime; - } else if (currStartTime.compareTo(startTime) < 0) { - startTime = currStartTime; - } - } - } - return startTime; + public int getAvgMinRunTimeInMs() { + int totalRunTime = 0; + for (ThreadTime tt : getThreadTimes()) { + if (null != tt.getMinTimeInMs()) { + totalRunTime += tt.getMinTimeInMs().getElapsedDurationInMs(); + } } + return totalRunTime / getThreadTimes().size(); + } - public int getAvgMaxRunTimeInMs() { - int totalRunTime = 0; - for (ThreadTime tt : getThreadTimes()) { - if (null != tt.getMaxTimeInMs()) { - totalRunTime += tt.getMaxTimeInMs().getElapsedDurationInMs(); - } - } - return totalRunTime / getThreadTimes().size(); + public int getAvgRunTimeInMs() { + int totalRunTime = 0; + for (ThreadTime tt : getThreadTimes()) { + if (null != tt.getAvgTimeInMs()) { + totalRunTime += tt.getAvgTimeInMs(); + } } + return totalRunTime / getThreadTimes().size(); + } - public int getAvgMinRunTimeInMs() { - int totalRunTime = 0; - for (ThreadTime tt : getThreadTimes()) { - if (null != tt.getMinTimeInMs()) { - totalRunTime += tt.getMinTimeInMs().getElapsedDurationInMs(); - } - } - return totalRunTime / getThreadTimes().size(); + public List getCsvRepresentation(ResultUtil util, Scenario scenario, + RulesApplier ruleApplier) { + List rowValues = new ArrayList<>(); + rowValues.add(new ResultValue(util.convertNull(getStartTimeText()))); + rowValues.add(new ResultValue(util.convertNull(this.getQueryGroup()))); + rowValues.add(new ResultValue(util.convertNull(this.getStatement()))); + rowValues.add(new ResultValue(util.convertNull(this.getTenantId()))); + rowValues.add(new ResultValue(util.convertNull(String.valueOf(getAvgMaxRunTimeInMs())))); + rowValues.add(new ResultValue(util.convertNull(String.valueOf(getAvgRunTimeInMs())))); + rowValues.add(new ResultValue(util.convertNull(String.valueOf(getAvgMinRunTimeInMs())))); + rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRunCount())))); + rowValues.add( + new ResultValue(util.convertNull(String.valueOf(getExplainPlan(scenario, ruleApplier))))); + rowValues.add(new ResultValue(util.convertNull(String.valueOf(getResultRowCount())))); + return rowValues; + } + + private String getExplainPlan(Scenario scenario, RulesApplier ruleApplier) { + try { + return pUtil.getExplainPlan(this, scenario, ruleApplier); + } catch (SQLException e) { + e.printStackTrace(); } + return null; + } - public int getAvgRunTimeInMs() { - int totalRunTime = 0; - for (ThreadTime tt : getThreadTimes()) { - if (null != tt.getAvgTimeInMs()) { - totalRunTime += tt.getAvgTimeInMs(); - } + private long getResultRowCount() { + long resultRowCount = -1; + for (ThreadTime tt : getThreadTimes()) { + for (int i = 0; i < tt.getRunTimesInMs().size(); i++) { + if (resultRowCount == -1) { + resultRowCount = tt.getRunTimesInMs().get(i).getResultRowCount(); + } else { + if (resultRowCount != tt.getRunTimesInMs().get(i).getResultRowCount()) { + return -1; + } } - return totalRunTime / getThreadTimes().size(); + } } + return resultRowCount; + } - public List getCsvRepresentation(ResultUtil util, Scenario scenario, RulesApplier ruleApplier) { + public List> getCsvDetailedRepresentation(ResultUtil util, + ResultFileDetails details) { + List> rows = new ArrayList<>(); + for (ThreadTime tt : getThreadTimes()) { + for (List runTime : details.isPerformance() + ? tt.getCsvPerformanceRepresentation(util) + : tt.getCsvFunctionalRepresentation(util)) { List rowValues = new ArrayList<>(); rowValues.add(new ResultValue(util.convertNull(getStartTimeText()))); rowValues.add(new ResultValue(util.convertNull(this.getQueryGroup()))); + rowValues.add(new ResultValue(util.convertNull(this.getId()))); rowValues.add(new ResultValue(util.convertNull(this.getStatement()))); rowValues.add(new ResultValue(util.convertNull(this.getTenantId()))); - rowValues.add(new ResultValue(util.convertNull(String.valueOf(getAvgMaxRunTimeInMs())))); - rowValues.add(new ResultValue(util.convertNull(String.valueOf(getAvgRunTimeInMs())))); - rowValues.add(new ResultValue(util.convertNull(String.valueOf(getAvgMinRunTimeInMs())))); - rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRunCount())))); - rowValues.add(new ResultValue(util.convertNull(String.valueOf(getExplainPlan(scenario, ruleApplier))))); - rowValues.add(new ResultValue(util.convertNull(String.valueOf(getResultRowCount())))); - return rowValues; - } - - private String getExplainPlan(Scenario scenario, RulesApplier ruleApplier) { - try { - return pUtil.getExplainPlan(this, scenario, ruleApplier); - } catch (SQLException e) { - e.printStackTrace(); - } - return null; - } - - private long getResultRowCount() { - long resultRowCount = -1; - for (ThreadTime tt : getThreadTimes()) { - for (int i = 0; i < tt.getRunTimesInMs().size(); i++) { - if (resultRowCount == -1) { - resultRowCount = tt.getRunTimesInMs().get(i).getResultRowCount(); - } else { - if (resultRowCount != tt.getRunTimesInMs().get(i).getResultRowCount()) { - return -1; - } - } - } - } - return resultRowCount; + rowValues.addAll(runTime); + rows.add(rowValues); + } } + return rows; + } - public List> getCsvDetailedRepresentation(ResultUtil util, ResultFileDetails details) { - List> rows = new ArrayList<>(); - for (ThreadTime tt : getThreadTimes()) { - for (List runTime : details.isPerformance() ? - tt.getCsvPerformanceRepresentation(util) : - tt.getCsvFunctionalRepresentation(util)) { - List rowValues = new ArrayList<>(); - rowValues.add(new ResultValue(util.convertNull(getStartTimeText()))); - rowValues.add(new ResultValue(util.convertNull(this.getQueryGroup()))); - rowValues.add(new ResultValue(util.convertNull(this.getId()))); - rowValues.add(new ResultValue(util.convertNull(this.getStatement()))); - rowValues.add(new ResultValue(util.convertNull(this.getTenantId()))); - rowValues.addAll(runTime); - rows.add(rowValues); - } - } - return rows; + private int getRunCount() { + int totalRunCount = 0; + for (ThreadTime tt : getThreadTimes()) { + totalRunCount += tt.getRunCount(); } + return totalRunCount; + } - private int getRunCount() { - int totalRunCount = 0; - for (ThreadTime tt : getThreadTimes()) { - totalRunCount += tt.getRunCount(); - } - return totalRunCount; - } - - private String getStartTimeText() { - return (null == this.getStartTime()) ? - "" : - DateUtil.DEFAULT_MS_DATE_FORMATTER.format(this.getStartTime()); - } -} \ No newline at end of file + private String getStartTimeText() { + return (null == this.getStartTime()) + ? "" + : DateUtil.DEFAULT_MS_DATE_FORMATTER.format(this.getStartTime()); + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QuerySetResult.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QuerySetResult.java index c2be5a316e9..11b0413f0e9 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QuerySetResult.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QuerySetResult.java @@ -1,48 +1,47 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; -import org.apache.phoenix.pherf.configuration.QuerySet; - import java.util.ArrayList; import java.util.List; +import org.apache.phoenix.pherf.configuration.QuerySet; + public class QuerySetResult extends QuerySet { - private List queryResults = new ArrayList<>(); + private List queryResults = new ArrayList<>(); - public QuerySetResult(QuerySet querySet) { - this.setConcurrency(querySet.getConcurrency()); - this.setNumberOfExecutions(querySet.getNumberOfExecutions()); - this.setExecutionDurationInMs(querySet.getExecutionDurationInMs()); - this.setExecutionType(querySet.getExecutionType()); - } + public QuerySetResult(QuerySet querySet) { + this.setConcurrency(querySet.getConcurrency()); + this.setNumberOfExecutions(querySet.getNumberOfExecutions()); + this.setExecutionDurationInMs(querySet.getExecutionDurationInMs()); + this.setExecutionType(querySet.getExecutionType()); + } - public QuerySetResult() { - } + public QuerySetResult() { + } - public List getQueryResults() { - return queryResults; - } + public List getQueryResults() { + return queryResults; + } - @SuppressWarnings("unused") - public void setQueryResults(List queryResults) { - this.queryResults = queryResults; - } + @SuppressWarnings("unused") + public void setQueryResults(List queryResults) { + this.queryResults = queryResults; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java index 93225d7b071..9faac76fd35 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java @@ -1,59 +1,59 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; -import org.apache.phoenix.pherf.result.file.ResultFileDetails; - import java.util.List; +import org.apache.phoenix.pherf.result.file.ResultFileDetails; + /** * Common container for Pherf results. */ public class Result { - private final List resultValues; - - // Placeholder for future work - private final ResultFileDetails type; - private final String header; - - /** - * @param type {@link org.apache.phoenix.pherf.result.file.ResultFileDetails} Currently unused, but gives metadata about the - * contents of the result. - * @param header Used for CSV, otherwise pass null. For CSV pass comma separated string of header fields. - * @param messageValues {@code List } All fields combined represent the data - * for a row to be written. - */ - public Result(ResultFileDetails type, String header, List messageValues) { - this.resultValues = messageValues; - this.header = header; - this.type = type; - } - - public List getResultValues() { - return resultValues; - } - - public String getHeader() { - return header; - } - - public ResultFileDetails getType() { - return type; - } + private final List resultValues; + + // Placeholder for future work + private final ResultFileDetails type; + private final String header; + + /** + * @param type {@link org.apache.phoenix.pherf.result.file.ResultFileDetails} Currently + * unused, but gives metadata about the contents of the result. + * @param header Used for CSV, otherwise pass null. For CSV pass comma separated string of + * header fields. + * @param messageValues {@code List } All fields combined represent the data for a + * row to be written. + */ + public Result(ResultFileDetails type, String header, List messageValues) { + this.resultValues = messageValues; + this.header = header; + this.type = type; + } + + public List getResultValues() { + return resultValues; + } + + public String getHeader() { + return header; + } + + public ResultFileDetails getType() { + return type; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultHandler.java index 2d2acf70ee3..09e97378b32 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultHandler.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultHandler.java @@ -1,48 +1,47 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; -import org.apache.phoenix.pherf.result.file.ResultFileDetails; - import java.util.List; +import org.apache.phoenix.pherf.result.file.ResultFileDetails; + /** - * This is a common interface for working with Pherf results in various output formats. Implementations of this - * interface can deal with particular details for that format while giving callers to output a simple API to report - * against. + * This is a common interface for working with Pherf results in various output formats. + * Implementations of this interface can deal with particular details for that format while giving + * callers to output a simple API to report against. */ public interface ResultHandler { - public void write(Result result) throws Exception; + public void write(Result result) throws Exception; - public void flush() throws Exception; + public void flush() throws Exception; - public void close() throws Exception; + public void close() throws Exception; - public List read() throws Exception; + public List read() throws Exception; - public boolean isClosed(); + public boolean isClosed(); - public ResultFileDetails getResultFileDetails(); + public ResultFileDetails getResultFileDetails(); - public String getResultFileName(); + public String getResultFileName(); - public void setResultFileDetails(ResultFileDetails details); + public void setResultFileDetails(ResultFileDetails details); - public void setResultFileName(String resultFileName); + public void setResultFileName(String resultFileName); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java index 91db782ded9..efae863848e 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java @@ -1,23 +1,25 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; +import java.util.ArrayList; +import java.util.List; + import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.result.file.ResultFileDetails; import org.apache.phoenix.pherf.result.impl.CSVFileResultHandler; @@ -27,138 +29,133 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.List; - public class ResultManager { - private static final Logger LOGGER = LoggerFactory.getLogger(ResultManager.class); - - private final List resultHandlers; - private final ResultUtil util; - private static final List defaultHandlers; - private static final List minimalHandlers; - - static { - defaultHandlers = new ArrayList<>(); - XMLResultHandler xmlResultHandler = new XMLResultHandler(); - xmlResultHandler.setResultFileDetails(ResultFileDetails.XML); - defaultHandlers.add(xmlResultHandler); - - ResultHandler handlerAgg = new CSVFileResultHandler(); - handlerAgg.setResultFileDetails(ResultFileDetails.CSV_AGGREGATE_PERFORMANCE); - defaultHandlers.add(handlerAgg); - - ResultHandler handlerDet = new CSVFileResultHandler(); - handlerDet.setResultFileDetails(ResultFileDetails.CSV_DETAILED_PERFORMANCE); - defaultHandlers.add(handlerDet); - } - - static { - minimalHandlers = new ArrayList<>(); - ResultHandler cvsHandler = new CSVFileResultHandler(); - cvsHandler.setResultFileDetails(ResultFileDetails.CSV_AGGREGATE_PERFORMANCE); - minimalHandlers.add(cvsHandler); - } + private static final Logger LOGGER = LoggerFactory.getLogger(ResultManager.class); - public ResultManager(String fileNameSeed) { - this(fileNameSeed, true); - } - - @SuppressWarnings("unchecked") - public ResultManager(String fileNameSeed, boolean writeRuntimeResults) { - this(fileNameSeed, writeRuntimeResults ? - InstanceResolver.get(ResultHandler.class, defaultHandlers) : - InstanceResolver.get(ResultHandler.class, minimalHandlers)); - } + private final List resultHandlers; + private final ResultUtil util; + private static final List defaultHandlers; + private static final List minimalHandlers; - public ResultManager(String fileNameSeed, List resultHandlers) { - this.resultHandlers = resultHandlers; - util = new ResultUtil(); + static { + defaultHandlers = new ArrayList<>(); + XMLResultHandler xmlResultHandler = new XMLResultHandler(); + xmlResultHandler.setResultFileDetails(ResultFileDetails.XML); + defaultHandlers.add(xmlResultHandler); - for (ResultHandler resultHandler : resultHandlers) { - if (resultHandler.getResultFileName() == null) { - resultHandler.setResultFileName(fileNameSeed); - } - } - } + ResultHandler handlerAgg = new CSVFileResultHandler(); + handlerAgg.setResultFileDetails(ResultFileDetails.CSV_AGGREGATE_PERFORMANCE); + defaultHandlers.add(handlerAgg); - - public synchronized void write(DataModelResult result) throws Exception { - write(result, null); - } - - /** - * Write out the result to each writer in the pool - * - * @param result {@link DataModelResult} - * @throws Exception - */ - public synchronized void write(DataModelResult result, RulesApplier ruleApplier) throws Exception { - try { - util.ensureBaseResultDirExists(); - final DataModelResult dataModelResultCopy = new DataModelResult(result); - for (ResultHandler handler : resultHandlers) { - util.write(handler, dataModelResultCopy, ruleApplier); - } - } finally { - for (ResultHandler handler : resultHandlers) { - try { - if (handler != null) { - handler.flush(); - handler.close(); - } - } catch (Exception e) { - e.printStackTrace(); - } - } - } - } + ResultHandler handlerDet = new CSVFileResultHandler(); + handlerDet.setResultFileDetails(ResultFileDetails.CSV_DETAILED_PERFORMANCE); + defaultHandlers.add(handlerDet); + } + + static { + minimalHandlers = new ArrayList<>(); + ResultHandler cvsHandler = new CSVFileResultHandler(); + cvsHandler.setResultFileDetails(ResultFileDetails.CSV_AGGREGATE_PERFORMANCE); + minimalHandlers.add(cvsHandler); + } + + public ResultManager(String fileNameSeed) { + this(fileNameSeed, true); + } + + @SuppressWarnings("unchecked") + public ResultManager(String fileNameSeed, boolean writeRuntimeResults) { + this(fileNameSeed, + writeRuntimeResults + ? InstanceResolver.get(ResultHandler.class, defaultHandlers) + : InstanceResolver.get(ResultHandler.class, minimalHandlers)); + } + + public ResultManager(String fileNameSeed, List resultHandlers) { + this.resultHandlers = resultHandlers; + util = new ResultUtil(); - public synchronized void write(List dataModelResults) throws Exception { - write(dataModelResults, null); + for (ResultHandler resultHandler : resultHandlers) { + if (resultHandler.getResultFileName() == null) { + resultHandler.setResultFileName(fileNameSeed); + } } - - /** - * Write a combined set of results for each result in the list. - * - * @param dataModelResults {@code List } - * @throws Exception - */ - public synchronized void write(List dataModelResults, RulesApplier rulesApplier) throws Exception { - util.ensureBaseResultDirExists(); - - CSVFileResultHandler detailsCSVWriter = null; + } + + public synchronized void write(DataModelResult result) throws Exception { + write(result, null); + } + + /** + * Write out the result to each writer in the pool + * @param result {@link DataModelResult} + */ + public synchronized void write(DataModelResult result, RulesApplier ruleApplier) + throws Exception { + try { + util.ensureBaseResultDirExists(); + final DataModelResult dataModelResultCopy = new DataModelResult(result); + for (ResultHandler handler : resultHandlers) { + util.write(handler, dataModelResultCopy, ruleApplier); + } + } finally { + for (ResultHandler handler : resultHandlers) { try { - detailsCSVWriter = new CSVFileResultHandler(); - detailsCSVWriter.setResultFileDetails(ResultFileDetails.CSV_DETAILED_PERFORMANCE); - detailsCSVWriter.setResultFileName(PherfConstants.COMBINED_FILE_NAME); - for (DataModelResult dataModelResult : dataModelResults) { - util.write(detailsCSVWriter, dataModelResult, rulesApplier); - } - } finally { - if (detailsCSVWriter != null) { - detailsCSVWriter.flush(); - detailsCSVWriter.close(); - } + if (handler != null) { + handler.flush(); + handler.close(); + } + } catch (Exception e) { + e.printStackTrace(); } + } } + } - /** - * Allows for flushing all the {@link org.apache.phoenix.pherf.result.ResultHandler} - */ - public synchronized void flush(){ - for (ResultHandler handler : resultHandlers) { - try { - handler.flush(); - } catch (Exception e) { - e.printStackTrace(); - LOGGER.warn("Could not flush handler: " - + handler.getResultFileName() + " : " + e.getMessage()); - } - } + public synchronized void write(List dataModelResults) throws Exception { + write(dataModelResults, null); + } + + /** + * Write a combined set of results for each result in the list. + * @param dataModelResults {@code List } + */ + public synchronized void write(List dataModelResults, RulesApplier rulesApplier) + throws Exception { + util.ensureBaseResultDirExists(); + + CSVFileResultHandler detailsCSVWriter = null; + try { + detailsCSVWriter = new CSVFileResultHandler(); + detailsCSVWriter.setResultFileDetails(ResultFileDetails.CSV_DETAILED_PERFORMANCE); + detailsCSVWriter.setResultFileName(PherfConstants.COMBINED_FILE_NAME); + for (DataModelResult dataModelResult : dataModelResults) { + util.write(detailsCSVWriter, dataModelResult, rulesApplier); + } + } finally { + if (detailsCSVWriter != null) { + detailsCSVWriter.flush(); + detailsCSVWriter.close(); + } } + } - public List getResultHandlers() { - return resultHandlers; + /** + * Allows for flushing all the {@link org.apache.phoenix.pherf.result.ResultHandler} + */ + public synchronized void flush() { + for (ResultHandler handler : resultHandlers) { + try { + handler.flush(); + } catch (Exception e) { + e.printStackTrace(); + LOGGER + .warn("Could not flush handler: " + handler.getResultFileName() + " : " + e.getMessage()); + } } + } + + public List getResultHandlers() { + return resultHandlers; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java index a0ddc0cf5ca..f91e9cb48fd 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java @@ -1,31 +1,22 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; -import org.apache.commons.io.FileUtils; -import org.apache.phoenix.pherf.PherfConstants; -import org.apache.phoenix.pherf.result.file.ResultFileDetails; -import org.apache.phoenix.pherf.result.impl.CSVFileResultHandler; -import org.apache.phoenix.pherf.result.impl.CSVResultHandler; -import org.apache.phoenix.pherf.rules.RulesApplier; -import org.apache.phoenix.pherf.util.PhoenixUtil; - import java.io.File; import java.io.IOException; import java.text.Format; @@ -35,237 +26,228 @@ import java.util.List; import java.util.Map; -public class ResultUtil { - - /*This variable needs to be static - Otherwise multiple result files will be generated*/ - private static String FILE_SUFFIX = null; - - /** - * Write data load time details - * - * @param dataLoadThreadTime {@link DataLoadThreadTime} - * @throws IOException - */ - public synchronized void write(DataLoadThreadTime dataLoadThreadTime) throws IOException { - ensureBaseResultDirExists(); +import org.apache.commons.io.FileUtils; +import org.apache.phoenix.pherf.PherfConstants; +import org.apache.phoenix.pherf.result.file.ResultFileDetails; +import org.apache.phoenix.pherf.result.impl.CSVFileResultHandler; +import org.apache.phoenix.pherf.result.impl.CSVResultHandler; +import org.apache.phoenix.pherf.rules.RulesApplier; +import org.apache.phoenix.pherf.util.PhoenixUtil; - CSVResultHandler writer = null; - try { - if (!dataLoadThreadTime.getThreadTime().isEmpty()) { - writer = new CSVFileResultHandler(); - writer.setResultFileDetails(ResultFileDetails.CSV); - writer.setResultFileName("Data_Load_Details"); +public class ResultUtil { - for (WriteThreadTime writeThreadTime : dataLoadThreadTime.getThreadTime()) { - List rowValues = new ArrayList<>(); - rowValues.add(new ResultValue(PhoenixUtil.getZookeeper())); - rowValues.addAll(writeThreadTime.getCsvRepresentation(this)); - Result - result = - new Result(ResultFileDetails.CSV_DETAILED_PERFORMANCE, - "ZK," + dataLoadThreadTime.getCsvTitle(), rowValues); - writer.write(result); - } - } - } finally { - if (writer != null) { - writer.flush(); - writer.close(); - } + /* This variable needs to be static - Otherwise multiple result files will be generated */ + private static String FILE_SUFFIX = null; + + /** + * Write data load time details + * @param dataLoadThreadTime {@link DataLoadThreadTime} + */ + public synchronized void write(DataLoadThreadTime dataLoadThreadTime) throws IOException { + ensureBaseResultDirExists(); + + CSVResultHandler writer = null; + try { + if (!dataLoadThreadTime.getThreadTime().isEmpty()) { + writer = new CSVFileResultHandler(); + writer.setResultFileDetails(ResultFileDetails.CSV); + writer.setResultFileName("Data_Load_Details"); + + for (WriteThreadTime writeThreadTime : dataLoadThreadTime.getThreadTime()) { + List rowValues = new ArrayList<>(); + rowValues.add(new ResultValue(PhoenixUtil.getZookeeper())); + rowValues.addAll(writeThreadTime.getCsvRepresentation(this)); + Result result = new Result(ResultFileDetails.CSV_DETAILED_PERFORMANCE, + "ZK," + dataLoadThreadTime.getCsvTitle(), rowValues); + writer.write(result); } + } + } finally { + if (writer != null) { + writer.flush(); + writer.close(); + } } + } + + /** + * Write data load time summary + */ + public synchronized void write(DataLoadTimeSummary dataLoadTime) throws IOException { + ensureBaseResultDirExists(); + + CSVResultHandler writer = null; + ResultFileDetails resultFileDetails; + if (PhoenixUtil.isThinDriver()) { + resultFileDetails = ResultFileDetails.CSV_THIN_AGGREGATE_DATA_LOAD; + } else { + resultFileDetails = ResultFileDetails.CSV_AGGREGATE_DATA_LOAD; + } + try { + writer = new CSVFileResultHandler(); + writer.setResultFileDetails(resultFileDetails); + writer.setResultFileName("Data_Load_Summary"); - /** - * Write data load time summary - * - * @param dataLoadTime - * @throws IOException - */ - public synchronized void write(DataLoadTimeSummary dataLoadTime) throws IOException { - ensureBaseResultDirExists(); - - CSVResultHandler writer = null; - ResultFileDetails resultFileDetails; + for (TableLoadTime loadTime : dataLoadTime.getTableLoadTime()) { + List rowValues = new ArrayList<>(); if (PhoenixUtil.isThinDriver()) { - resultFileDetails = ResultFileDetails.CSV_THIN_AGGREGATE_DATA_LOAD; + rowValues.add(new ResultValue(PhoenixUtil.getQueryServerUrl())); } else { - resultFileDetails = ResultFileDetails.CSV_AGGREGATE_DATA_LOAD; - } - try { - writer = new CSVFileResultHandler(); - writer.setResultFileDetails(resultFileDetails); - writer.setResultFileName("Data_Load_Summary"); - - for (TableLoadTime loadTime : dataLoadTime.getTableLoadTime()) { - List rowValues = new ArrayList<>(); - if (PhoenixUtil.isThinDriver()) { - rowValues.add(new ResultValue(PhoenixUtil.getQueryServerUrl())); - } else { - rowValues.add(new ResultValue(PhoenixUtil.getZookeeper())); - } - rowValues.addAll(loadTime.getCsvRepresentation(this)); - Result - result = - new Result(resultFileDetails, resultFileDetails.getHeader().toString(), - rowValues); - writer.write(result); - } - } finally { - if (writer != null) { - writer.flush(); - writer.close(); - } + rowValues.add(new ResultValue(PhoenixUtil.getZookeeper())); } + rowValues.addAll(loadTime.getCsvRepresentation(this)); + Result result = + new Result(resultFileDetails, resultFileDetails.getHeader().toString(), rowValues); + writer.write(result); + } + } finally { + if (writer != null) { + writer.flush(); + writer.close(); + } } - - public synchronized void write(ResultHandler resultHandler, DataModelResult dataModelResult, RulesApplier ruleApplier) - throws Exception { - ResultFileDetails resultFileDetails = resultHandler.getResultFileDetails(); - switch (resultFileDetails) { - case CSV_AGGREGATE_PERFORMANCE: - case CSV_DETAILED_PERFORMANCE: - case CSV_DETAILED_FUNCTIONAL: - List> - rowDetails = - getCSVResults(dataModelResult, resultFileDetails, ruleApplier); - for (List row : rowDetails) { - Result - result = - new Result(resultFileDetails, resultFileDetails.getHeader().toString(), - row); - resultHandler.write(result); - } - break; - default: - List resultValue = new ArrayList(); - resultValue.add(new ResultValue<>(dataModelResult)); - resultHandler.write(new Result(resultFileDetails, null, resultValue)); - break; + } + + public synchronized void write(ResultHandler resultHandler, DataModelResult dataModelResult, + RulesApplier ruleApplier) throws Exception { + ResultFileDetails resultFileDetails = resultHandler.getResultFileDetails(); + switch (resultFileDetails) { + case CSV_AGGREGATE_PERFORMANCE: + case CSV_DETAILED_PERFORMANCE: + case CSV_DETAILED_FUNCTIONAL: + List> rowDetails = + getCSVResults(dataModelResult, resultFileDetails, ruleApplier); + for (List row : rowDetails) { + Result result = + new Result(resultFileDetails, resultFileDetails.getHeader().toString(), row); + resultHandler.write(result); } + break; + default: + List resultValue = new ArrayList(); + resultValue.add(new ResultValue<>(dataModelResult)); + resultHandler.write(new Result(resultFileDetails, null, resultValue)); + break; } - - public void ensureBaseResultDirExists() { - PherfConstants constants = PherfConstants.create(); - String resultDir = constants.getProperty("pherf.default.results.dir"); - ensureBaseDirExists(resultDir); + } + + public void ensureBaseResultDirExists() { + PherfConstants constants = PherfConstants.create(); + String resultDir = constants.getProperty("pherf.default.results.dir"); + ensureBaseDirExists(resultDir); + } + + /** + * Utility method to check if base result dir exists + */ + public void ensureBaseDirExists(String directory) { + File baseDir = new File(directory); + if (!baseDir.exists()) { + baseDir.mkdir(); } - - /** - * Utility method to check if base result dir exists - */ - public void ensureBaseDirExists(String directory) { - File baseDir = new File(directory); - if (!baseDir.exists()) { - baseDir.mkdir(); - } - } - - /** - * Utility method to delete directory - * @throws IOException - */ - public void deleteDir(String directory) throws IOException { - File baseDir = new File(directory); - if (baseDir.exists()) { - //deleteDirectory seems to have a problem with MacOs tmp dir symlinks - FileUtils.cleanDirectory(baseDir); - FileUtils.deleteDirectory(baseDir); - } + } + + /** + * Utility method to delete directory + */ + public void deleteDir(String directory) throws IOException { + File baseDir = new File(directory); + if (baseDir.exists()) { + // deleteDirectory seems to have a problem with MacOs tmp dir symlinks + FileUtils.cleanDirectory(baseDir); + FileUtils.deleteDirectory(baseDir); } + } - public String getSuffix() { - if (null == FILE_SUFFIX) { - Date date = new Date(); - Format formatter = new SimpleDateFormat("YYYY-MM-dd_hh-mm-ss"); - FILE_SUFFIX = formatter.format(date); - } - return "_" + FILE_SUFFIX; - } - - public String convertNull(String str) { - if ((str == null) || str.equals("")) { - return "null"; - } - return str; + public String getSuffix() { + if (null == FILE_SUFFIX) { + Date date = new Date(); + Format formatter = new SimpleDateFormat("YYYY-MM-dd_hh-mm-ss"); + FILE_SUFFIX = formatter.format(date); } + return "_" + FILE_SUFFIX; + } - /** - * Used by custom ResultWriter out Pherf's normal code base - * - * @return Header field as a {@link Result} - */ - @SuppressWarnings("unused") - public Result getCSVHeaderAsResult(String row) { - List resultValues = new ArrayList<>(); - resultValues.add(new ResultValue(row)); - return new Result(ResultFileDetails.CSV, row, resultValues); + public String convertNull(String str) { + if ((str == null) || str.equals("")) { + return "null"; } - - private List> getCSVResults(DataModelResult dataModelResult, - ResultFileDetails resultFileDetails, RulesApplier ruleApplier) { - List> rowList = new ArrayList<>(); - - for (ScenarioResult result : dataModelResult.getScenarioResult()) { - for (QuerySetResult querySetResult : result.getQuerySetResult()) { - for (QueryResult queryResult : querySetResult.getQueryResults()) { - switch (resultFileDetails) { - case CSV_AGGREGATE_PERFORMANCE: - List csvResult = queryResult.getCsvRepresentation(this, result, ruleApplier); - rowList.add(csvResult); - break; - case CSV_DETAILED_PERFORMANCE: - case CSV_DETAILED_FUNCTIONAL: - List> - detailedRows = - queryResult.getCsvDetailedRepresentation(this, resultFileDetails); - for (List detailedRowList : detailedRows) { - List valueList = new ArrayList<>(); - valueList.add(new ResultValue(convertNull(result.getTableName()))); - valueList.add(new ResultValue(convertNull(result.getName()))); - valueList.add(new ResultValue( - convertNull(dataModelResult.getZookeeper()))); - valueList.add(new ResultValue( - convertNull(String.valueOf(result.getRowCount())))); - valueList.add(new ResultValue(convertNull( - String.valueOf(querySetResult.getNumberOfExecutions())))); - valueList.add(new ResultValue(convertNull( - String.valueOf(querySetResult.getExecutionType())))); - if (result.getPhoenixProperties() != null) { - String props = buildProperty(result); - valueList.add(new ResultValue(convertNull(props))); - } else { - valueList.add(new ResultValue("null")); - } - valueList.addAll(detailedRowList); - rowList.add(valueList); - } - break; - default: - break; - } + return str; + } + + /** + * Used by custom ResultWriter out Pherf's normal code base + * @return Header field as a {@link Result} + */ + @SuppressWarnings("unused") + public Result getCSVHeaderAsResult(String row) { + List resultValues = new ArrayList<>(); + resultValues.add(new ResultValue(row)); + return new Result(ResultFileDetails.CSV, row, resultValues); + } + + private List> getCSVResults(DataModelResult dataModelResult, + ResultFileDetails resultFileDetails, RulesApplier ruleApplier) { + List> rowList = new ArrayList<>(); + + for (ScenarioResult result : dataModelResult.getScenarioResult()) { + for (QuerySetResult querySetResult : result.getQuerySetResult()) { + for (QueryResult queryResult : querySetResult.getQueryResults()) { + switch (resultFileDetails) { + case CSV_AGGREGATE_PERFORMANCE: + List csvResult = + queryResult.getCsvRepresentation(this, result, ruleApplier); + rowList.add(csvResult); + break; + case CSV_DETAILED_PERFORMANCE: + case CSV_DETAILED_FUNCTIONAL: + List> detailedRows = + queryResult.getCsvDetailedRepresentation(this, resultFileDetails); + for (List detailedRowList : detailedRows) { + List valueList = new ArrayList<>(); + valueList.add(new ResultValue(convertNull(result.getTableName()))); + valueList.add(new ResultValue(convertNull(result.getName()))); + valueList.add(new ResultValue(convertNull(dataModelResult.getZookeeper()))); + valueList.add(new ResultValue(convertNull(String.valueOf(result.getRowCount())))); + valueList.add(new ResultValue( + convertNull(String.valueOf(querySetResult.getNumberOfExecutions())))); + valueList.add( + new ResultValue(convertNull(String.valueOf(querySetResult.getExecutionType())))); + if (result.getPhoenixProperties() != null) { + String props = buildProperty(result); + valueList.add(new ResultValue(convertNull(props))); + } else { + valueList.add(new ResultValue("null")); } - } + valueList.addAll(detailedRowList); + rowList.add(valueList); + } + break; + default: + break; + } } - return rowList; + } } + return rowList; + } - private String buildProperty(ScenarioResult result) { - StringBuffer sb = new StringBuffer(); - boolean firstPartialSeparator = true; + private String buildProperty(ScenarioResult result) { + StringBuffer sb = new StringBuffer(); + boolean firstPartialSeparator = true; - for (Map.Entry entry : result.getPhoenixProperties().entrySet()) { - if (!firstPartialSeparator) sb.append("|"); - firstPartialSeparator = false; - sb.append(entry.getKey() + "=" + entry.getValue()); - } - return sb.toString(); - } - - /** - * Set the file suffix - * @param suffix - */ - public static void setFileSuffix(String suffix) { - FILE_SUFFIX = suffix; + for (Map.Entry entry : result.getPhoenixProperties().entrySet()) { + if (!firstPartialSeparator) sb.append("|"); + firstPartialSeparator = false; + sb.append(entry.getKey() + "=" + entry.getValue()); } -} \ No newline at end of file + return sb.toString(); + } + + /** + * Set the file suffix + */ + public static void setFileSuffix(String suffix) { + FILE_SUFFIX = suffix; + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultValue.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultValue.java index 78364d9ddb0..0fd94f32e7e 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultValue.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultValue.java @@ -1,40 +1,40 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; /** - * Generic box container for a result value. This class allows for writing results of any type easily - * + * Generic box container for a result value. This class allows for writing results of any type + * easily * @param */ public class ResultValue { - T resultValue; + T resultValue; - public ResultValue(T resultValue) { - this.resultValue = resultValue; - } + public ResultValue(T resultValue) { + this.resultValue = resultValue; + } - public T getResultValue() { - return resultValue; - } + public T getResultValue() { + return resultValue; + } - @Override public String toString() { - return resultValue.toString(); - } + @Override + public String toString() { + return resultValue.toString(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/RunTime.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/RunTime.java index 59bd265f1c2..892b5c9a306 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/RunTime.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/RunTime.java @@ -1,122 +1,138 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; -import javax.xml.bind.annotation.XmlAttribute; import java.util.Comparator; import java.util.Date; +import javax.xml.bind.annotation.XmlAttribute; + public class RunTime implements Comparator, Comparable { - private Date startTime; - private Long elapsedDurationInMs; - private String message; - private Long resultRowCount; - private String explainPlan; - private boolean timedOut; - - @SuppressWarnings("unused") public RunTime() { - } - - @SuppressWarnings("unused") public RunTime(Long elapsedDurationInMs) { - this(null, elapsedDurationInMs); - } - - public RunTime(Long resultRowCount, Long elapsedDurationInMs) { - this(null, resultRowCount, elapsedDurationInMs); - } - - public RunTime(Date startTime, Long resultRowCount, Long elapsedDurationInMs) { - this(null, null, startTime, resultRowCount, elapsedDurationInMs, false); - } - - public RunTime(Date startTime, Long elapsedDurationInMs, boolean timedOut) { - this(null, startTime, null, elapsedDurationInMs, timedOut); - } - - public RunTime(String message, Date startTime, Long resultRowCount, - Long elapsedDurationInMs, boolean timedOut) { - this(message, null, startTime, resultRowCount, elapsedDurationInMs, timedOut); - } - - public RunTime(String message, String explainPlan, Date startTime, Long resultRowCount, - Long elapsedDurationInMs, boolean timedOut) { - this.elapsedDurationInMs = elapsedDurationInMs; - this.startTime = startTime; - this.resultRowCount = resultRowCount; - this.message = message; - this.explainPlan = explainPlan; - this.timedOut = timedOut; - } - - @XmlAttribute() public Date getStartTime() { - return startTime; - } - - @SuppressWarnings("unused") public void setStartTime(Date startTime) { - this.startTime = startTime; - } - - @XmlAttribute() public Long getElapsedDurationInMs() { - return elapsedDurationInMs; - } - - @SuppressWarnings("unused") public void setElapsedDurationInMs(Long elapsedDurationInMs) { - this.elapsedDurationInMs = elapsedDurationInMs; - } - - @Override public int compare(RunTime r1, RunTime r2) { - return r1.getElapsedDurationInMs().compareTo(r2.getElapsedDurationInMs()); - } - - @Override public int compareTo(RunTime o) { - return compare(this, o); - } - - @XmlAttribute() public String getMessage() { - return message; - } - - @SuppressWarnings("unused") public void setMessage(String message) { - this.message = message; - } - - @XmlAttribute() public String getExplainPlan() { - return explainPlan; - } - - @SuppressWarnings("unused") public void setExplainPlan(String explainPlan) { - this.explainPlan = explainPlan; - } - - @XmlAttribute() public Long getResultRowCount() { - return resultRowCount; - } - - @SuppressWarnings("unused") public void setResultRowCount(Long resultRowCount) { - this.resultRowCount = resultRowCount; - } - - @SuppressWarnings("unused") public void setTimedOut(boolean timedOut) { - this.timedOut = timedOut; - } - - @XmlAttribute() public boolean getTimedOut() { - return this.timedOut; - } -} \ No newline at end of file + private Date startTime; + private Long elapsedDurationInMs; + private String message; + private Long resultRowCount; + private String explainPlan; + private boolean timedOut; + + @SuppressWarnings("unused") + public RunTime() { + } + + @SuppressWarnings("unused") + public RunTime(Long elapsedDurationInMs) { + this(null, elapsedDurationInMs); + } + + public RunTime(Long resultRowCount, Long elapsedDurationInMs) { + this(null, resultRowCount, elapsedDurationInMs); + } + + public RunTime(Date startTime, Long resultRowCount, Long elapsedDurationInMs) { + this(null, null, startTime, resultRowCount, elapsedDurationInMs, false); + } + + public RunTime(Date startTime, Long elapsedDurationInMs, boolean timedOut) { + this(null, startTime, null, elapsedDurationInMs, timedOut); + } + + public RunTime(String message, Date startTime, Long resultRowCount, Long elapsedDurationInMs, + boolean timedOut) { + this(message, null, startTime, resultRowCount, elapsedDurationInMs, timedOut); + } + + public RunTime(String message, String explainPlan, Date startTime, Long resultRowCount, + Long elapsedDurationInMs, boolean timedOut) { + this.elapsedDurationInMs = elapsedDurationInMs; + this.startTime = startTime; + this.resultRowCount = resultRowCount; + this.message = message; + this.explainPlan = explainPlan; + this.timedOut = timedOut; + } + + @XmlAttribute() + public Date getStartTime() { + return startTime; + } + + @SuppressWarnings("unused") + public void setStartTime(Date startTime) { + this.startTime = startTime; + } + + @XmlAttribute() + public Long getElapsedDurationInMs() { + return elapsedDurationInMs; + } + + @SuppressWarnings("unused") + public void setElapsedDurationInMs(Long elapsedDurationInMs) { + this.elapsedDurationInMs = elapsedDurationInMs; + } + + @Override + public int compare(RunTime r1, RunTime r2) { + return r1.getElapsedDurationInMs().compareTo(r2.getElapsedDurationInMs()); + } + + @Override + public int compareTo(RunTime o) { + return compare(this, o); + } + + @XmlAttribute() + public String getMessage() { + return message; + } + + @SuppressWarnings("unused") + public void setMessage(String message) { + this.message = message; + } + + @XmlAttribute() + public String getExplainPlan() { + return explainPlan; + } + + @SuppressWarnings("unused") + public void setExplainPlan(String explainPlan) { + this.explainPlan = explainPlan; + } + + @XmlAttribute() + public Long getResultRowCount() { + return resultRowCount; + } + + @SuppressWarnings("unused") + public void setResultRowCount(Long resultRowCount) { + this.resultRowCount = resultRowCount; + } + + @SuppressWarnings("unused") + public void setTimedOut(boolean timedOut) { + this.timedOut = timedOut; + } + + @XmlAttribute() + public boolean getTimedOut() { + return this.timedOut; + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ScenarioResult.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ScenarioResult.java index 9cac1c73e55..1904c884e11 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ScenarioResult.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ScenarioResult.java @@ -1,48 +1,48 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; -import org.apache.phoenix.pherf.configuration.Scenario; - import java.util.ArrayList; import java.util.List; +import org.apache.phoenix.pherf.configuration.Scenario; + public class ScenarioResult extends Scenario { - private List querySetResult = new ArrayList<>(); + private List querySetResult = new ArrayList<>(); - public List getQuerySetResult() { - return querySetResult; - } + public List getQuerySetResult() { + return querySetResult; + } - @SuppressWarnings("unused") public void setQuerySetResult(List querySetResult) { - this.querySetResult = querySetResult; - } + @SuppressWarnings("unused") + public void setQuerySetResult(List querySetResult) { + this.querySetResult = querySetResult; + } - public ScenarioResult() { - } + public ScenarioResult() { + } - public ScenarioResult(Scenario scenario) { - this.setDataOverride(scenario.getDataOverride()); - this.setPhoenixProperties(scenario.getPhoenixProperties()); - this.setRowCount(scenario.getRowCount()); - this.setTableName(scenario.getTableName()); - this.setName(scenario.getName()); - } + public ScenarioResult(Scenario scenario) { + this.setDataOverride(scenario.getDataOverride()); + this.setPhoenixProperties(scenario.getPhoenixProperties()); + this.setRowCount(scenario.getRowCount()); + this.setTableName(scenario.getTableName()); + this.setName(scenario.getName()); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ThreadTime.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ThreadTime.java index e1e76528cad..0a5e9b1e7a5 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ThreadTime.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ThreadTime.java @@ -1,145 +1,141 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result; -import javax.xml.bind.annotation.XmlAttribute; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.List; -public class ThreadTime { - private List runTimesInMs = Collections.synchronizedList(new ArrayList()); - private String threadName; - - public synchronized List getRunTimesInMs() { - return this.runTimesInMs; - } - - @SuppressWarnings("unused") - public synchronized void setRunTimesInMs(List runTimesInMs) { - this.runTimesInMs = runTimesInMs; - } - - /** - * @return The earliest start time out of collected run times. - */ - public Date getStartTime() { - if (getRunTimesInMs().isEmpty()) return new Date(0); - - Date startTime = null; - synchronized (getRunTimesInMs()) { - for (RunTime runTime : getRunTimesInMs()) { - if (null != runTime.getStartTime()) { - Date currStartTime = new Date(runTime.getStartTime().getTime()); - if (null == startTime) { - startTime = currStartTime; - } else if (currStartTime.compareTo(startTime) < 0) { - startTime = currStartTime; - } - } else { - startTime = new Date(0); - } - } - } - return startTime; - } - - public RunTime getMinTimeInMs() { - if (getRunTimesInMs().isEmpty()) return null; - return Collections.min(getRunTimesInMs()); - } - - public Integer getAvgTimeInMs() { - if (getRunTimesInMs().isEmpty()) return null; +import javax.xml.bind.annotation.XmlAttribute; - Long totalTimeInMs = new Long(0); - for (RunTime runTime : getRunTimesInMs()) { - if (null != runTime.getElapsedDurationInMs()) { - totalTimeInMs += runTime.getElapsedDurationInMs(); - } +public class ThreadTime { + private List runTimesInMs = Collections.synchronizedList(new ArrayList()); + private String threadName; + + public synchronized List getRunTimesInMs() { + return this.runTimesInMs; + } + + @SuppressWarnings("unused") + public synchronized void setRunTimesInMs(List runTimesInMs) { + this.runTimesInMs = runTimesInMs; + } + + /** Returns The earliest start time out of collected run times. */ + public Date getStartTime() { + if (getRunTimesInMs().isEmpty()) return new Date(0); + + Date startTime = null; + synchronized (getRunTimesInMs()) { + for (RunTime runTime : getRunTimesInMs()) { + if (null != runTime.getStartTime()) { + Date currStartTime = new Date(runTime.getStartTime().getTime()); + if (null == startTime) { + startTime = currStartTime; + } else if (currStartTime.compareTo(startTime) < 0) { + startTime = currStartTime; + } + } else { + startTime = new Date(0); } - return (int) (totalTimeInMs / getRunTimesInMs().size()); - } - - public RunTime getMaxTimeInMs() { - if (getRunTimesInMs().isEmpty()) return null; - return Collections.max(getRunTimesInMs()); + } } - - @XmlAttribute() public String getThreadName() { - return threadName; + return startTime; + } + + public RunTime getMinTimeInMs() { + if (getRunTimesInMs().isEmpty()) return null; + return Collections.min(getRunTimesInMs()); + } + + public Integer getAvgTimeInMs() { + if (getRunTimesInMs().isEmpty()) return null; + + Long totalTimeInMs = new Long(0); + for (RunTime runTime : getRunTimesInMs()) { + if (null != runTime.getElapsedDurationInMs()) { + totalTimeInMs += runTime.getElapsedDurationInMs(); + } } - - public void setThreadName(String threadName) { - this.threadName = threadName; - } - - private String parseThreadName(boolean getConcurrency) { - if (getThreadName() == null || !getThreadName().contains(",")) return null; - String[] threadNameSet = getThreadName().split(","); - if (getConcurrency) { - return threadNameSet[1]; - } else { - return threadNameSet[0]; - } + return (int) (totalTimeInMs / getRunTimesInMs().size()); + } + + public RunTime getMaxTimeInMs() { + if (getRunTimesInMs().isEmpty()) return null; + return Collections.max(getRunTimesInMs()); + } + + @XmlAttribute() + public String getThreadName() { + return threadName; + } + + public void setThreadName(String threadName) { + this.threadName = threadName; + } + + private String parseThreadName(boolean getConcurrency) { + if (getThreadName() == null || !getThreadName().contains(",")) return null; + String[] threadNameSet = getThreadName().split(","); + if (getConcurrency) { + return threadNameSet[1]; + } else { + return threadNameSet[0]; } - - public List> getCsvPerformanceRepresentation(ResultUtil util) { - List> rows = new ArrayList<>(); - - for (int i = 0; i < getRunTimesInMs().size(); i++) { - List rowValues = new ArrayList(getRunTimesInMs().size()); - rowValues.add(new ResultValue(util.convertNull(parseThreadName(false)))); - rowValues.add(new ResultValue(util.convertNull(parseThreadName(true)))); - rowValues.add(new ResultValue( - String.valueOf(getRunTimesInMs().get(i).getResultRowCount()))); - if (getRunTimesInMs().get(i).getMessage() == null) { - rowValues.add(new ResultValue(util.convertNull( - String.valueOf(getRunTimesInMs().get(i).getElapsedDurationInMs())))); - } else { - rowValues.add(new ResultValue( - util.convertNull(getRunTimesInMs().get(i).getMessage()))); - } - rowValues.add(new ResultValue(getRunTimesInMs().get(i).getTimedOut())); - rows.add(rowValues); - } - return rows; + } + + public List> getCsvPerformanceRepresentation(ResultUtil util) { + List> rows = new ArrayList<>(); + + for (int i = 0; i < getRunTimesInMs().size(); i++) { + List rowValues = new ArrayList(getRunTimesInMs().size()); + rowValues.add(new ResultValue(util.convertNull(parseThreadName(false)))); + rowValues.add(new ResultValue(util.convertNull(parseThreadName(true)))); + rowValues.add(new ResultValue(String.valueOf(getRunTimesInMs().get(i).getResultRowCount()))); + if (getRunTimesInMs().get(i).getMessage() == null) { + rowValues.add(new ResultValue( + util.convertNull(String.valueOf(getRunTimesInMs().get(i).getElapsedDurationInMs())))); + } else { + rowValues.add(new ResultValue(util.convertNull(getRunTimesInMs().get(i).getMessage()))); + } + rowValues.add(new ResultValue(getRunTimesInMs().get(i).getTimedOut())); + rows.add(rowValues); } - - public List> getCsvFunctionalRepresentation(ResultUtil util) { - List> rows = new ArrayList<>(); - - for (int i = 0; i < getRunTimesInMs().size(); i++) { - List rowValues = new ArrayList<>(getRunTimesInMs().size()); - rowValues.add(new ResultValue(util.convertNull(parseThreadName(false)))); - rowValues.add(new ResultValue(util.convertNull(parseThreadName(true)))); - rowValues.add(new ResultValue(util.convertNull(getRunTimesInMs().get(i).getMessage()))); - rowValues.add(new ResultValue( - util.convertNull(getRunTimesInMs().get(i).getExplainPlan()))); - rows.add(rowValues); - } - return rows; + return rows; + } + + public List> getCsvFunctionalRepresentation(ResultUtil util) { + List> rows = new ArrayList<>(); + + for (int i = 0; i < getRunTimesInMs().size(); i++) { + List rowValues = new ArrayList<>(getRunTimesInMs().size()); + rowValues.add(new ResultValue(util.convertNull(parseThreadName(false)))); + rowValues.add(new ResultValue(util.convertNull(parseThreadName(true)))); + rowValues.add(new ResultValue(util.convertNull(getRunTimesInMs().get(i).getMessage()))); + rowValues.add(new ResultValue(util.convertNull(getRunTimesInMs().get(i).getExplainPlan()))); + rows.add(rowValues); } + return rows; + } - public int getRunCount() { - if (getRunTimesInMs().isEmpty()) return 0; - return getRunTimesInMs().size(); - } + public int getRunCount() { + if (getRunTimesInMs().isEmpty()) return 0; + return getRunTimesInMs().size(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Extension.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Extension.java index e6a7308d797..4949977c749 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Extension.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Extension.java @@ -1,37 +1,37 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result.file; public enum Extension { - CSV(".csv"), - XML(".xml"), - VISUALIZATION(".jpg"), - AGGREGATE_CSV("_aggregate" + CSV), - DETAILED_CSV("_detail" + CSV); + CSV(".csv"), + XML(".xml"), + VISUALIZATION(".jpg"), + AGGREGATE_CSV("_aggregate" + CSV), + DETAILED_CSV("_detail" + CSV); - private String extension; + private String extension; - private Extension(String extension) { - this.extension = extension; - } + private Extension(String extension) { + this.extension = extension; + } - @Override public String toString() { - return extension; - } + @Override + public String toString() { + return extension; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Header.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Header.java index c888199055c..171e78f7653 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Header.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Header.java @@ -1,43 +1,43 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result.file; public enum Header { - EMPTY(""), - AGGREGATE_PERFORMANCE( - "START_TIME,QUERY_GROUP,QUERY,TENANT_ID,AVG_MAX_TIME_MS,AVG_TIME_MS,AVG_MIN_TIME_MS,RUN_COUNT,EXPLAIN_PLAN,RESULT_ROW_COUNT"), - DETAILED_BASE( - "BASE_TABLE_NAME,SCENARIO_NAME,ZOOKEEPER,ROW_COUNT,EXECUTION_COUNT,EXECUTION_TYPE,PHOENIX_PROPERTIES" - + ",START_TIME,QUERY_GROUP,QUERY_ID,QUERY,TENANT_ID,THREAD_NUMBER,CONCURRENCY_LEVEL"), - DETAILED_PERFORMANCE(DETAILED_BASE + ",RESULT_ROW_COUNT,RUN_TIME_MS,TIMED_OUT"), - DETAILED_FUNCTIONAL(DETAILED_BASE + ",DIFF_STATUS,EXPLAIN_PLAN"), - AGGREGATE_DATA_LOAD("ZK,TABLE_NAME,ROW_COUNT,LOAD_DURATION_IN_MS"), - THIN_AGGREGATE_DATA_LOAD("QUERYSERVER,TABLE_NAME,ROW_COUNT,LOAD_DURATION_IN_MS"), - MONITOR("STAT_NAME,STAT_VALUE,TIME_STAMP"); + EMPTY(""), + AGGREGATE_PERFORMANCE( + "START_TIME,QUERY_GROUP,QUERY,TENANT_ID,AVG_MAX_TIME_MS,AVG_TIME_MS,AVG_MIN_TIME_MS,RUN_COUNT,EXPLAIN_PLAN,RESULT_ROW_COUNT"), + DETAILED_BASE( + "BASE_TABLE_NAME,SCENARIO_NAME,ZOOKEEPER,ROW_COUNT,EXECUTION_COUNT,EXECUTION_TYPE,PHOENIX_PROPERTIES" + + ",START_TIME,QUERY_GROUP,QUERY_ID,QUERY,TENANT_ID,THREAD_NUMBER,CONCURRENCY_LEVEL"), + DETAILED_PERFORMANCE(DETAILED_BASE + ",RESULT_ROW_COUNT,RUN_TIME_MS,TIMED_OUT"), + DETAILED_FUNCTIONAL(DETAILED_BASE + ",DIFF_STATUS,EXPLAIN_PLAN"), + AGGREGATE_DATA_LOAD("ZK,TABLE_NAME,ROW_COUNT,LOAD_DURATION_IN_MS"), + THIN_AGGREGATE_DATA_LOAD("QUERYSERVER,TABLE_NAME,ROW_COUNT,LOAD_DURATION_IN_MS"), + MONITOR("STAT_NAME,STAT_VALUE,TIME_STAMP"); - private String header; + private String header; - private Header(String header) { - this.header = header; - } + private Header(String header) { + this.header = header; + } - @Override public String toString() { - return header; - } -} \ No newline at end of file + @Override + public String toString() { + return header; + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/ResultFileDetails.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/ResultFileDetails.java index 51aa407df8a..8ad7bf22a4e 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/ResultFileDetails.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/ResultFileDetails.java @@ -1,52 +1,51 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result.file; public enum ResultFileDetails { - CSV(Header.EMPTY, Extension.CSV), - CSV_AGGREGATE_PERFORMANCE(Header.AGGREGATE_PERFORMANCE, Extension.AGGREGATE_CSV), - CSV_DETAILED_PERFORMANCE(Header.DETAILED_PERFORMANCE, Extension.DETAILED_CSV), - CSV_DETAILED_FUNCTIONAL(Header.DETAILED_FUNCTIONAL, Extension.DETAILED_CSV), - CSV_AGGREGATE_DATA_LOAD(Header.AGGREGATE_DATA_LOAD, Extension.CSV), - CSV_THIN_AGGREGATE_DATA_LOAD(Header.THIN_AGGREGATE_DATA_LOAD, Extension.CSV), - CSV_MONITOR(Header.MONITOR, Extension.CSV), - XML(Header.EMPTY, Extension.XML), - IMAGE(Header.EMPTY, Extension.VISUALIZATION); + CSV(Header.EMPTY, Extension.CSV), + CSV_AGGREGATE_PERFORMANCE(Header.AGGREGATE_PERFORMANCE, Extension.AGGREGATE_CSV), + CSV_DETAILED_PERFORMANCE(Header.DETAILED_PERFORMANCE, Extension.DETAILED_CSV), + CSV_DETAILED_FUNCTIONAL(Header.DETAILED_FUNCTIONAL, Extension.DETAILED_CSV), + CSV_AGGREGATE_DATA_LOAD(Header.AGGREGATE_DATA_LOAD, Extension.CSV), + CSV_THIN_AGGREGATE_DATA_LOAD(Header.THIN_AGGREGATE_DATA_LOAD, Extension.CSV), + CSV_MONITOR(Header.MONITOR, Extension.CSV), + XML(Header.EMPTY, Extension.XML), + IMAGE(Header.EMPTY, Extension.VISUALIZATION); - private Header header; - private Extension extension; + private Header header; + private Extension extension; - private ResultFileDetails(Header header, Extension extension) { - this.header = header; - this.extension = extension; - } + private ResultFileDetails(Header header, Extension extension) { + this.header = header; + this.extension = extension; + } - public Extension getExtension() { - return extension; - } + public Extension getExtension() { + return extension; + } - public Header getHeader() { - return header; - } + public Header getHeader() { + return header; + } - public boolean isPerformance() { - return (this == ResultFileDetails.CSV_AGGREGATE_PERFORMANCE) - || (this == CSV_DETAILED_PERFORMANCE); - } + public boolean isPerformance() { + return (this == ResultFileDetails.CSV_AGGREGATE_PERFORMANCE) + || (this == CSV_DETAILED_PERFORMANCE); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVFileResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVFileResultHandler.java index a2dcab33961..dfa176c953b 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVFileResultHandler.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVFileResultHandler.java @@ -1,23 +1,29 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result.impl; +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.List; + import org.apache.commons.csv.CSVFormat; import org.apache.commons.csv.CSVParser; import org.apache.commons.csv.CSVPrinter; @@ -26,61 +32,53 @@ import org.apache.phoenix.pherf.result.Result; import org.apache.phoenix.pherf.result.ResultValue; -import java.io.File; -import java.io.IOException; -import java.io.PrintWriter; -import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.List; - public class CSVFileResultHandler extends CSVResultHandler { - public CSVFileResultHandler() { - super(); - } + public CSVFileResultHandler() { + super(); + } - @Override - public synchronized void write(Result result) throws IOException{ - util.ensureBaseResultDirExists(); - open(result.getHeader()); - super.write(result); - } + @Override + public synchronized void write(Result result) throws IOException { + util.ensureBaseResultDirExists(); + open(result.getHeader()); + super.write(result); + } - public synchronized List read() throws IOException { - util.ensureBaseResultDirExists(); - File file = new File(resultFileName); - try (CSVParser parser = CSVParser - .parse(file, Charset.defaultCharset(), CSVFormat.DEFAULT)) { - List records = parser.getRecords(); - List results = new ArrayList<>(); - String header = null; - for (CSVRecord record : records) { + public synchronized List read() throws IOException { + util.ensureBaseResultDirExists(); + File file = new File(resultFileName); + try (CSVParser parser = CSVParser.parse(file, Charset.defaultCharset(), CSVFormat.DEFAULT)) { + List records = parser.getRecords(); + List results = new ArrayList<>(); + String header = null; + for (CSVRecord record : records) { - // First record is the CSV Header - if (record.getRecordNumber() == 1) { - header = record.toString(); - continue; - } - List resultValues = new ArrayList<>(); - for (String val : record.toString().split(PherfConstants.RESULT_FILE_DELIMETER)) { - resultValues.add(new ResultValue(val)); - } - Result result = new Result(resultFileDetails, header, resultValues); - results.add(result); - } - return results; + // First record is the CSV Header + if (record.getRecordNumber() == 1) { + header = record.toString(); + continue; } + List resultValues = new ArrayList<>(); + for (String val : record.toString().split(PherfConstants.RESULT_FILE_DELIMETER)) { + resultValues.add(new ResultValue(val)); + } + Result result = new Result(resultFileDetails, header, resultValues); + results.add(result); + } + return results; } + } - @Override - protected void open(String header) throws IOException { - // Check if already so we only open one writer - if (csvPrinter != null) { - return; - } - csvPrinter = new CSVPrinter(new PrintWriter(resultFileName), CSVFormat.DEFAULT); - Object[] records = header.split(PherfConstants.RESULT_FILE_DELIMETER); - csvPrinter.printRecord(records); - isClosed = false; + @Override + protected void open(String header) throws IOException { + // Check if already so we only open one writer + if (csvPrinter != null) { + return; } + csvPrinter = new CSVPrinter(new PrintWriter(resultFileName), CSVFormat.DEFAULT); + Object[] records = header.split(PherfConstants.RESULT_FILE_DELIMETER); + csvPrinter.printRecord(records); + isClosed = false; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java index 41fadb1de3c..bc0337e370d 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java @@ -1,68 +1,67 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.pherf.result.impl; +import java.io.IOException; + import org.apache.commons.csv.CSVPrinter; import org.apache.phoenix.pherf.result.Result; import org.apache.phoenix.pherf.result.ResultUtil; -import java.io.IOException; - public abstract class CSVResultHandler extends DefaultResultHandler { - protected final ResultUtil util; - protected volatile CSVPrinter csvPrinter = null; - protected volatile boolean isClosed = true; + protected final ResultUtil util; + protected volatile CSVPrinter csvPrinter = null; + protected volatile boolean isClosed = true; - public CSVResultHandler() { - this.util = new ResultUtil(); - } + public CSVResultHandler() { + this.util = new ResultUtil(); + } - @Override - public synchronized void write(Result result) throws IOException { - csvPrinter.printRecord(result.getResultValues()); - flush(); - } + @Override + public synchronized void write(Result result) throws IOException { + csvPrinter.printRecord(result.getResultValues()); + flush(); + } - @Override - public synchronized void flush() throws IOException { - if (csvPrinter != null) { - csvPrinter.flush(); - } + @Override + public synchronized void flush() throws IOException { + if (csvPrinter != null) { + csvPrinter.flush(); } + } - @Override - public synchronized void close() throws IOException { - if (csvPrinter != null) { - csvPrinter.flush(); - csvPrinter.close(); - isClosed = true; - } + @Override + public synchronized void close() throws IOException { + if (csvPrinter != null) { + csvPrinter.flush(); + csvPrinter.close(); + isClosed = true; } + } - @Override - public synchronized boolean isClosed() { - return isClosed; - } + @Override + public synchronized boolean isClosed() { + return isClosed; + } - /** - * This method is meant to open the connection to the target CSV location - * @param header {@link String} Comma separated list of header values for CSV - * @throws IOException - */ - protected abstract void open(String header) throws IOException; + /** + * This method is meant to open the connection to the target CSV location + * @param header {@link String} Comma separated list of header values for CSV + */ + protected abstract void open(String header) throws IOException; } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/DefaultResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/DefaultResultHandler.java index 22fb625da62..d26d3f10584 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/DefaultResultHandler.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/DefaultResultHandler.java @@ -1,19 +1,19 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.pherf.result.impl; @@ -22,46 +22,42 @@ import org.apache.phoenix.pherf.result.ResultUtil; import org.apache.phoenix.pherf.result.file.ResultFileDetails; -public abstract class DefaultResultHandler implements ResultHandler{ - protected String resultFileName; - protected ResultFileDetails resultFileDetails; - protected final String resultDir; - protected final ResultUtil util; +public abstract class DefaultResultHandler implements ResultHandler { + protected String resultFileName; + protected ResultFileDetails resultFileDetails; + protected final String resultDir; + protected final ResultUtil util; - public DefaultResultHandler() { - util = new ResultUtil(); - PherfConstants constants = PherfConstants.create(); - this.resultDir = constants.getProperty("pherf.default.results.dir"); - } + public DefaultResultHandler() { + util = new ResultUtil(); + PherfConstants constants = PherfConstants.create(); + this.resultDir = constants.getProperty("pherf.default.results.dir"); + } - /** - * {@link DefaultResultHandler#setResultFileDetails(ResultFileDetails)} Must be called prior to - * setting the file name. Otherwise you will get a NPE. - * - * TODO Change this so NPE is not possible. Needs a bit of refactoring here - * - * @param resultFileName Base name of file - */ - @Override - public void setResultFileName(String resultFileName) { - this.resultFileName = - resultDir + PherfConstants.PATH_SEPARATOR + PherfConstants.RESULT_PREFIX - + resultFileName + util.getSuffix() + getResultFileDetails() - .getExtension().toString(); - } + /** + * {@link DefaultResultHandler#setResultFileDetails(ResultFileDetails)} Must be called prior to + * setting the file name. Otherwise you will get a NPE. TODO Change this so NPE is not possible. + * Needs a bit of refactoring here + * @param resultFileName Base name of file + */ + @Override + public void setResultFileName(String resultFileName) { + this.resultFileName = resultDir + PherfConstants.PATH_SEPARATOR + PherfConstants.RESULT_PREFIX + + resultFileName + util.getSuffix() + getResultFileDetails().getExtension().toString(); + } - @Override - public void setResultFileDetails(ResultFileDetails details) { - this.resultFileDetails = details; - } + @Override + public void setResultFileDetails(ResultFileDetails details) { + this.resultFileDetails = details; + } - @Override - public String getResultFileName() { - return resultFileName; - } + @Override + public String getResultFileName() { + return resultFileName; + } - @Override - public ResultFileDetails getResultFileDetails() { - return resultFileDetails; - } + @Override + public ResultFileDetails getResultFileDetails() { + return resultFileDetails; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java index 87d68066e57..a99ba33338d 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.result.impl; import java.io.File; @@ -36,67 +35,67 @@ import org.apache.phoenix.pherf.result.ResultValue; import org.apache.phoenix.pherf.result.file.ResultFileDetails; -public class XMLResultHandler extends DefaultResultHandler{ +public class XMLResultHandler extends DefaultResultHandler { - public XMLResultHandler() { - super(); - } + public XMLResultHandler() { + super(); + } - @Override - public synchronized void write(Result result) throws Exception { - FileOutputStream os = null; - JAXBContext jaxbContext = JAXBContext.newInstance(DataModelResult.class); - Marshaller jaxbMarshaller = jaxbContext.createMarshaller(); - jaxbMarshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); - try { - os = new FileOutputStream(resultFileName); - ResultValue resultValue = result.getResultValues().get(0); - jaxbMarshaller.marshal(resultValue.getResultValue(), os); - } finally { - if (os != null) { - os.flush(); - os.close(); - } - } + @Override + public synchronized void write(Result result) throws Exception { + FileOutputStream os = null; + JAXBContext jaxbContext = JAXBContext.newInstance(DataModelResult.class); + Marshaller jaxbMarshaller = jaxbContext.createMarshaller(); + jaxbMarshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); + try { + os = new FileOutputStream(resultFileName); + ResultValue resultValue = result.getResultValues().get(0); + jaxbMarshaller.marshal(resultValue.getResultValue(), os); + } finally { + if (os != null) { + os.flush(); + os.close(); + } } + } - @Override - public synchronized void flush() throws IOException { - return; - } + @Override + public synchronized void flush() throws IOException { + return; + } - @Override - public synchronized void close() throws IOException { - return; - } + @Override + public synchronized void close() throws IOException { + return; + } - @Override - public synchronized List read() throws Exception { - return readFromResultFile(new File(resultFileName)); - } + @Override + public synchronized List read() throws Exception { + return readFromResultFile(new File(resultFileName)); + } - List readFromResultFile(File resultsFile) throws Exception { - XMLInputFactory xif = XMLInputFactory.newInstance(); - xif.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); - xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); - JAXBContext jaxbContext = JAXBContext.newInstance(DataModelResult.class); - Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller(); - @SuppressWarnings("rawtypes") - List resultValue = new ArrayList<>(); - XMLStreamReader xmlReader = xif.createXMLStreamReader(new StreamSource(resultsFile)); - resultValue.add(new ResultValue<>(jaxbUnmarshaller.unmarshal(xmlReader))); - List results = new ArrayList<>(); - results.add(new Result(ResultFileDetails.XML, null, resultValue)); - return results; - } + List readFromResultFile(File resultsFile) throws Exception { + XMLInputFactory xif = XMLInputFactory.newInstance(); + xif.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); + xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); + JAXBContext jaxbContext = JAXBContext.newInstance(DataModelResult.class); + Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller(); + @SuppressWarnings("rawtypes") + List resultValue = new ArrayList<>(); + XMLStreamReader xmlReader = xif.createXMLStreamReader(new StreamSource(resultsFile)); + resultValue.add(new ResultValue<>(jaxbUnmarshaller.unmarshal(xmlReader))); + List results = new ArrayList<>(); + results.add(new Result(ResultFileDetails.XML, null, resultValue)); + return results; + } - @Override - public boolean isClosed() { - return true; - } + @Override + public boolean isClosed() { + return true; + } - @Override - public void setResultFileDetails(ResultFileDetails details) { - super.setResultFileDetails(ResultFileDetails.XML); - } + @Override + public void setResultFileDetails(ResultFileDetails details) { + super.setResultFileDetails(ResultFileDetails.XML); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/DataValue.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/DataValue.java index 8f5506f30e1..e57d06bdd03 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/DataValue.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/DataValue.java @@ -1,100 +1,99 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.rules; -import org.apache.phoenix.pherf.configuration.DataTypeMapping; - import javax.xml.bind.annotation.*; +import org.apache.phoenix.pherf.configuration.DataTypeMapping; + public class DataValue { - private DataTypeMapping type; - private String value; - private String maxValue; - private String minValue; - private int distribution; - private boolean useCurrentDate; - - public DataValue() { - super(); - } - - public DataValue(DataTypeMapping type, String value) { - this.type = type; - this.value = value; - this.distribution = Integer.MIN_VALUE; - this.useCurrentDate = false; - } - - public DataValue(DataValue dataValue) { - this(dataValue.getType(), dataValue.getValue()); - this.setDistribution(dataValue.getDistribution()); - this.setMinValue(dataValue.getMinValue()); - this.setMaxValue(dataValue.getMaxValue()); - this.setUseCurrentDate(dataValue.getUseCurrentDate()); - } - - public String getValue() { - return value; - } - - public DataTypeMapping getType() { - return type; - } - - public int getDistribution() { - return distribution; - } - - @XmlAttribute() - public void setDistribution(int distribution) { - this.distribution = distribution; - } - - public void setType(DataTypeMapping type) { - this.type = type; - } - - public void setValue(String value) { - this.value = value; - } - - public String getMinValue() { - return minValue; - } - - public void setMinValue(String minValue) { - this.minValue = minValue; - } - - public String getMaxValue() { - return maxValue; - } - - public void setMaxValue(String maxValue) { - this.maxValue = maxValue; - } - - public boolean getUseCurrentDate() { - return useCurrentDate; - } - - public void setUseCurrentDate(boolean useCurrentDate) { - this.useCurrentDate = useCurrentDate; - } + private DataTypeMapping type; + private String value; + private String maxValue; + private String minValue; + private int distribution; + private boolean useCurrentDate; + + public DataValue() { + super(); + } + + public DataValue(DataTypeMapping type, String value) { + this.type = type; + this.value = value; + this.distribution = Integer.MIN_VALUE; + this.useCurrentDate = false; + } + + public DataValue(DataValue dataValue) { + this(dataValue.getType(), dataValue.getValue()); + this.setDistribution(dataValue.getDistribution()); + this.setMinValue(dataValue.getMinValue()); + this.setMaxValue(dataValue.getMaxValue()); + this.setUseCurrentDate(dataValue.getUseCurrentDate()); + } + + public String getValue() { + return value; + } + + public DataTypeMapping getType() { + return type; + } + + public int getDistribution() { + return distribution; + } + + @XmlAttribute() + public void setDistribution(int distribution) { + this.distribution = distribution; + } + + public void setType(DataTypeMapping type) { + this.type = type; + } + + public void setValue(String value) { + this.value = value; + } + + public String getMinValue() { + return minValue; + } + + public void setMinValue(String minValue) { + this.minValue = minValue; + } + + public String getMaxValue() { + return maxValue; + } + + public void setMaxValue(String maxValue) { + this.maxValue = maxValue; + } + + public boolean getUseCurrentDate() { + return useCurrentDate; + } + + public void setUseCurrentDate(boolean useCurrentDate) { + this.useCurrentDate = useCurrentDate; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RuleBasedDataGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RuleBasedDataGenerator.java index d68e4682889..d1269587ad5 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RuleBasedDataGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RuleBasedDataGenerator.java @@ -1,30 +1,29 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.rules; public interface RuleBasedDataGenerator { - /** - * Get data value based on the rules - * Implementations should be thread safe as multiple theads will call it in parallel - * - * @return {@link org.apache.phoenix.pherf.rules.DataValue} {@code Container Type --> Value } mapping - */ - DataValue getDataValue(); + /** + * Get data value based on the rules Implementations should be thread safe as multiple theads will + * call it in parallel + * @return {@link org.apache.phoenix.pherf.rules.DataValue} {@code Container Type --> Value } + * mapping + */ + DataValue getDataValue(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java index 09de9ddb387..af0cf438cb1 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.rules; import java.time.Instant; @@ -51,566 +50,569 @@ import org.slf4j.LoggerFactory; public class RulesApplier { - private static final Logger LOGGER = LoggerFactory.getLogger(RulesApplier.class); - - // Used to bail out of random distribution if it takes too long - // This should never happen when distributions add up to 100 - private static final int OH_SHIT_LIMIT = 1000; - - private final Random rndNull; - private final Random rndVal; - private final RandomDataGenerator randomDataGenerator; - - private final DataModel dataModel; - private final XMLConfigParser parser; - private final List modelList; - private final Map columnMap; - private String cachedScenarioOverrideName; - private Map scenarioOverrideMap; - - private ConcurrentHashMap columnRuleBasedDataGeneratorMap = new ConcurrentHashMap<>(); + private static final Logger LOGGER = LoggerFactory.getLogger(RulesApplier.class); + + // Used to bail out of random distribution if it takes too long + // This should never happen when distributions add up to 100 + private static final int OH_SHIT_LIMIT = 1000; + + private final Random rndNull; + private final Random rndVal; + private final RandomDataGenerator randomDataGenerator; + + private final DataModel dataModel; + private final XMLConfigParser parser; + private final List modelList; + private final Map columnMap; + private String cachedScenarioOverrideName; + private Map scenarioOverrideMap; + + private ConcurrentHashMap columnRuleBasedDataGeneratorMap = + new ConcurrentHashMap<>(); + + // Since rules are only relevant for a given data model, + // added a constructor to support a single data model => RulesApplier(DataModel model) + + // We should deprecate the RulesApplier(XMLConfigParser parser) constructor, + // since a parser can have multiple data models (all the models found on the classpath) + // it implies that the rules apply to all the data models the parser holds + // which can be confusing to the user of this class. + // + + public RulesApplier(DataModel model) { + this(model, EnvironmentEdgeManager.currentTimeMillis()); + } + + public RulesApplier(DataModel model, long seed) { + this.parser = null; + this.dataModel = model; + this.modelList = new ArrayList(); + this.columnMap = new HashMap(); + this.rndNull = new Random(seed); + this.rndVal = new Random(seed); + this.randomDataGenerator = new RandomDataGenerator(); + this.cachedScenarioOverrideName = null; + populateModelList(); + } + + public RulesApplier(XMLConfigParser parser) { + this(parser, EnvironmentEdgeManager.currentTimeMillis()); + } + + public RulesApplier(XMLConfigParser parser, long seed) { + this.parser = parser; + this.dataModel = null; + this.modelList = new ArrayList(); + this.columnMap = new HashMap(); + this.rndNull = new Random(seed); + this.rndVal = new Random(seed); + this.randomDataGenerator = new RandomDataGenerator(); + this.cachedScenarioOverrideName = null; + populateModelList(); + } + + public List getModelList() { + return Collections.unmodifiableList(this.modelList); + } + + private Map getCachedScenarioOverrides(Scenario scenario) { + if ( + this.cachedScenarioOverrideName == null + || this.cachedScenarioOverrideName != scenario.getName() + ) { + this.cachedScenarioOverrideName = scenario.getName(); + this.scenarioOverrideMap = new HashMap(); + + if (scenario.getDataOverride() != null) { + for (Column column : scenario.getDataOverride().getColumn()) { + List cols; + DataTypeMapping type = column.getType(); + if (this.scenarioOverrideMap.containsKey(type)) { + this.scenarioOverrideMap.get(type).add(column); + } else { + cols = new LinkedList(); + cols.add(column); + this.scenarioOverrideMap.put(type, cols); + } + } + } + } + return scenarioOverrideMap; + } + + /** + * Get a data value based on rules. + * @param scenario {@link org.apache.phoenix.pherf.configuration.Scenario} We are getting + * data for + * @param phxMetaColumn {@link org.apache.phoenix.pherf.configuration.Column} From Phoenix + * MetaData that are generating data for. It defines the type we are trying + * to match. + */ + public DataValue getDataForRule(Scenario scenario, Column phxMetaColumn) throws Exception { + // TODO Make a Set of Rules that have already been applied so that so we don't generate for + // every value + + List scenarios = dataModel != null ? dataModel.getScenarios() : parser.getScenarios(); + DataValue value = null; + if (scenarios.contains(scenario)) { + LOGGER.debug("We found a correct Scenario" + scenario.getName() + "column " + + phxMetaColumn.getName() + " " + phxMetaColumn.getType()); + + Map overrideRuleMap = this.getCachedScenarioOverrides(scenario); + + if (overrideRuleMap != null) { + List overrideRuleList = + this.getCachedScenarioOverrides(scenario).get(phxMetaColumn.getType()); + + if (overrideRuleList != null && overrideRuleList.contains(phxMetaColumn)) { + LOGGER.debug("We found a correct override column rule" + overrideRuleList); + Column columnRule = getColumnForRuleOverride(overrideRuleList, phxMetaColumn); + if (columnRule != null) { + return getDataValue(columnRule); + } + } + } - // Since rules are only relevant for a given data model, - // added a constructor to support a single data model => RulesApplier(DataModel model) + // Assume the first rule map + Map ruleMap = modelList.get(0); + List ruleList = ruleMap.get(phxMetaColumn.getType()); - // We should deprecate the RulesApplier(XMLConfigParser parser) constructor, - // since a parser can have multiple data models (all the models found on the classpath) - // it implies that the rules apply to all the data models the parser holds - // which can be confusing to the user of this class. - // + // Make sure Column from Phoenix Metadata matches a rule column + if (ruleList != null && ruleList.contains(phxMetaColumn)) { + // Generate some random data based on this rule + LOGGER.debug("We found a correct column rule" + ruleList); + Column columnRule = getColumnForRule(ruleList, phxMetaColumn); - public RulesApplier(DataModel model) { - this(model, EnvironmentEdgeManager.currentTimeMillis()); - } + value = getDataValue(columnRule); + } else { + LOGGER.warn(String.format( + "Attempted to apply rule to data, " + "but could not find a rule to match type %s on %s", + phxMetaColumn.getType(), phxMetaColumn.getName())); + } - public RulesApplier(DataModel model, long seed) { - this.parser = null; - this.dataModel = model; - this.modelList = new ArrayList(); - this.columnMap = new HashMap(); - this.rndNull = new Random(seed); - this.rndVal = new Random(seed); - this.randomDataGenerator = new RandomDataGenerator(); - this.cachedScenarioOverrideName = null; - populateModelList(); } - public RulesApplier(XMLConfigParser parser) { - this(parser, EnvironmentEdgeManager.currentTimeMillis()); + return value; + } + + /** + * Get data value based on the supplied rule + * @param column {@link org.apache.phoenix.pherf.configuration.Column} Column rule to get data for + * @return {@link org.apache.phoenix.pherf.rules.DataValue} + * {@code Container Type --> Value mapping } + */ + public DataValue getDataValue(Column column) throws Exception { + DataValue data = null; + String prefix = ""; + int length = column.getLength(); + int nullChance = column.getNullChance(); + List dataValues = column.getDataValues(); + + // Return an empty value if we fall within the configured probability of null + if ((nullChance != Integer.MIN_VALUE) && (isValueNull(nullChance))) { + return new DataValue(column.getType(), ""); } - public RulesApplier(XMLConfigParser parser, long seed) { - this.parser = parser; - this.dataModel = null; - this.modelList = new ArrayList(); - this.columnMap = new HashMap(); - this.rndNull = new Random(seed); - this.rndVal = new Random(seed); - this.randomDataGenerator = new RandomDataGenerator(); - this.cachedScenarioOverrideName = null; - populateModelList(); + if (column.getPrefix() != null) { + prefix = column.getPrefix(); } - public List getModelList() { - return Collections.unmodifiableList(this.modelList); + if ((prefix.length() >= length) && (length > 0)) { + LOGGER.warn("You are attempting to generate data with a prefix (" + prefix + ") " + + "That is longer than expected overall field length (" + length + "). " + + "This will certainly lead to unexpected data values."); } - - private Map getCachedScenarioOverrides(Scenario scenario) { - if (this.cachedScenarioOverrideName == null || this.cachedScenarioOverrideName != scenario.getName()) { - this.cachedScenarioOverrideName = scenario.getName(); - this.scenarioOverrideMap = new HashMap(); - - if (scenario.getDataOverride() != null) { - for (Column column : scenario.getDataOverride().getColumn()) { - List cols; - DataTypeMapping type = column.getType(); - if (this.scenarioOverrideMap.containsKey(type)) { - this.scenarioOverrideMap.get(type).add(column); - } else { - cols = new LinkedList(); - cols.add(column); - this.scenarioOverrideMap.put(type, cols); - } - } - } - } - return scenarioOverrideMap; - } - - - /** - * Get a data value based on rules. - * - * @param scenario {@link org.apache.phoenix.pherf.configuration.Scenario} We are getting data for - * @param phxMetaColumn {@link org.apache.phoenix.pherf.configuration.Column} - * From Phoenix MetaData that are - * generating data for. It defines the - * type we are trying to match. - * @return - * @throws Exception - */ - public DataValue getDataForRule(Scenario scenario, Column phxMetaColumn) throws Exception { - // TODO Make a Set of Rules that have already been applied so that so we don't generate for every value - - List scenarios = dataModel != null ? dataModel.getScenarios() : parser.getScenarios(); - DataValue value = null; - if (scenarios.contains(scenario)) { - LOGGER.debug("We found a correct Scenario" + scenario.getName() + - "column " + phxMetaColumn.getName() + " " + phxMetaColumn.getType()); - - Map overrideRuleMap = this.getCachedScenarioOverrides(scenario); - - if (overrideRuleMap != null) { - List overrideRuleList = this.getCachedScenarioOverrides(scenario).get(phxMetaColumn.getType()); - - if (overrideRuleList != null && overrideRuleList.contains(phxMetaColumn)) { - LOGGER.debug("We found a correct override column rule" + overrideRuleList); - Column columnRule = getColumnForRuleOverride(overrideRuleList, phxMetaColumn); - if (columnRule != null) { - return getDataValue(columnRule); - } - } - } - - // Assume the first rule map - Map ruleMap = modelList.get(0); - List ruleList = ruleMap.get(phxMetaColumn.getType()); - - // Make sure Column from Phoenix Metadata matches a rule column - if (ruleList != null && ruleList.contains(phxMetaColumn)) { - // Generate some random data based on this rule - LOGGER.debug("We found a correct column rule" + ruleList); - Column columnRule = getColumnForRule(ruleList, phxMetaColumn); - - value = getDataValue(columnRule); - } else { - LOGGER.warn(String.format("Attempted to apply rule to data, " - + "but could not find a rule to match type %s on %s", - phxMetaColumn.getType(), phxMetaColumn.getName())); - } + switch (column.getType()) { + case VARCHAR: + case VARBINARY: + case JSON: + case BSON: + case CHAR: + // Use the specified data values from configs if they exist + if (DataSequence.SEQUENTIAL.equals(column.getDataSequence())) { + RuleBasedDataGenerator generator = getRuleBasedDataGeneratorForColumn(column); + data = generator.getDataValue(); + } else if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { + data = pickDataValueFromList(dataValues); + } else { + Preconditions.checkArgument(length > 0, "length needs to be > 0"); + data = getRandomDataValue(column); } - - return value; - } - - /** - * Get data value based on the supplied rule - * - * @param column {@link org.apache.phoenix.pherf.configuration.Column} Column rule to get data for - * @return {@link org.apache.phoenix.pherf.rules.DataValue} {@code Container Type --> Value mapping } - */ - public DataValue getDataValue(Column column) throws Exception { - DataValue data = null; - String prefix = ""; - int length = column.getLength(); - int nullChance = column.getNullChance(); - List dataValues = column.getDataValues(); - - // Return an empty value if we fall within the configured probability of null - if ((nullChance != Integer.MIN_VALUE) && (isValueNull(nullChance))) { - return new DataValue(column.getType(), ""); + break; + case VARCHAR_ARRAY: + // only list datavalues are supported + String arr = ""; + for (DataValue dv : dataValues) { + arr += "," + dv.getValue(); } - - if (column.getPrefix() != null) { - prefix = column.getPrefix(); + if (arr.startsWith(",")) { + arr = arr.replaceFirst(",", ""); } - - if ((prefix.length() >= length) && (length > 0)) { - LOGGER.warn("You are attempting to generate data with a prefix (" + prefix + ") " - + "That is longer than expected overall field length (" + length + "). " - + "This will certainly lead to unexpected data values."); + data = new DataValue(column.getType(), arr); + break; + case DECIMAL: + if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { + data = pickDataValueFromList(dataValues); + } else { + int precision = column.getPrecision(); + double minDbl = column.getMinValue(); + Preconditions.checkArgument((precision > 0) && (precision <= 18), + "Precision must be between 0 and 18"); + Preconditions.checkArgument(minDbl >= 0, + "minvalue must be set in configuration for decimal"); + Preconditions.checkArgument(column.getMaxValue() > 0, + "maxValue must be set in configuration decimal"); + StringBuilder maxValueStr = new StringBuilder(); + + for (int i = 0; i < precision; i++) { + maxValueStr.append(9); + } + + double maxDbl = + Math.min(column.getMaxValue(), Double.parseDouble(maxValueStr.toString())); + final double dbl = RandomUtils.nextDouble(minDbl, maxDbl); + data = new DataValue(column.getType(), String.valueOf(dbl)); } - - switch (column.getType()) { - case VARCHAR: - case VARBINARY: - case JSON: - case BSON: - case CHAR: - // Use the specified data values from configs if they exist - if (DataSequence.SEQUENTIAL.equals(column.getDataSequence())) { - RuleBasedDataGenerator generator = getRuleBasedDataGeneratorForColumn(column); - data = generator.getDataValue(); - } else if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { - data = pickDataValueFromList(dataValues); - } else { - Preconditions.checkArgument(length > 0, "length needs to be > 0"); - data = getRandomDataValue(column); - } - break; - case VARCHAR_ARRAY: - //only list datavalues are supported - String arr = ""; - for (DataValue dv : dataValues) { - arr += "," + dv.getValue(); - } - if (arr.startsWith(",")) { - arr = arr.replaceFirst(",", ""); - } - data = new DataValue(column.getType(), arr); - break; - case DECIMAL: - if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { - data = pickDataValueFromList(dataValues); - } else { - int precision = column.getPrecision(); - double minDbl = column.getMinValue(); - Preconditions.checkArgument((precision > 0) && (precision <= 18), - "Precision must be between 0 and 18"); - Preconditions.checkArgument(minDbl >= 0, - "minvalue must be set in configuration for decimal"); - Preconditions.checkArgument(column.getMaxValue() > 0, - "maxValue must be set in configuration decimal"); - StringBuilder maxValueStr = new StringBuilder(); - - for (int i = 0; i < precision; i++) { - maxValueStr.append(9); - } - - double maxDbl = - Math.min(column.getMaxValue(), Double.parseDouble(maxValueStr.toString())); - final double dbl = RandomUtils.nextDouble(minDbl, maxDbl); - data = new DataValue(column.getType(), String.valueOf(dbl)); - } - break; - case TINYINT: - case INTEGER: - if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { - data = pickDataValueFromList(dataValues); - } else if (DataSequence.SEQUENTIAL.equals(column.getDataSequence())) { - RuleBasedDataGenerator generator = getRuleBasedDataGeneratorForColumn(column); - data = generator.getDataValue(); - } else { - int minInt = (int) column.getMinValue(); - int maxInt = (int) column.getMaxValue(); - if (column.getType() == DataTypeMapping.TINYINT) { - Preconditions.checkArgument((minInt >= -128) && (minInt <= 128), - "min value need to be set in configuration for tinyints " + column.getName()); - Preconditions.checkArgument((maxInt >= -128) && (maxInt <= 128), - "max value need to be set in configuration for tinyints " + column.getName()); - } - int intVal = ThreadLocalRandom.current().nextInt(minInt, maxInt + 1); - data = new DataValue(column.getType(), String.valueOf(intVal)); - } - break; - case BIGINT: - case UNSIGNED_LONG: - if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { - data = pickDataValueFromList(dataValues); - } else { - long minLong = column.getMinValue(); - long maxLong = column.getMaxValue(); - if (column.getType() == DataTypeMapping.UNSIGNED_LONG) - Preconditions.checkArgument((minLong > 0) && (maxLong > 0), - "min and max values need to be set in configuration for unsigned_longs " + column.getName()); - long longVal = RandomUtils.nextLong(minLong, maxLong); - data = new DataValue(column.getType(), String.valueOf(longVal)); - } - break; - case DATE: - case TIMESTAMP: - if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { - data = pickDataValueFromList(dataValues); - // Check if date has right format or not - data.setValue(checkDatePattern(data.getValue())); - } else if (DataSequence.SEQUENTIAL.equals(column.getDataSequence())) { - RuleBasedDataGenerator generator = getRuleBasedDataGeneratorForColumn(column); - data = generator.getDataValue(); - } else if (column.getUseCurrentDate() != true) { - int minYear = (int) column.getMinValue(); - int maxYear = (int) column.getMaxValue(); - Preconditions.checkArgument((minYear > 0) && (maxYear > 0), - "min and max values need to be set in configuration for date/timestamps " + column.getName()); - - String dt = generateRandomDate(minYear, maxYear); - data = new DataValue(column.getType(), dt); - data.setMaxValue(String.valueOf(minYear)); - data.setMinValue(String.valueOf(maxYear)); - } else { - String dt = getCurrentDate(); - data = new DataValue(column.getType(), dt); - } - break; - default: - break; + break; + case TINYINT: + case INTEGER: + if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { + data = pickDataValueFromList(dataValues); + } else if (DataSequence.SEQUENTIAL.equals(column.getDataSequence())) { + RuleBasedDataGenerator generator = getRuleBasedDataGeneratorForColumn(column); + data = generator.getDataValue(); + } else { + int minInt = (int) column.getMinValue(); + int maxInt = (int) column.getMaxValue(); + if (column.getType() == DataTypeMapping.TINYINT) { + Preconditions.checkArgument((minInt >= -128) && (minInt <= 128), + "min value need to be set in configuration for tinyints " + column.getName()); + Preconditions.checkArgument((maxInt >= -128) && (maxInt <= 128), + "max value need to be set in configuration for tinyints " + column.getName()); + } + int intVal = ThreadLocalRandom.current().nextInt(minInt, maxInt + 1); + data = new DataValue(column.getType(), String.valueOf(intVal)); } - Preconditions.checkArgument(data != null, - "Data value could not be generated for some reason. Please check configs"); - return data; - } - - // Convert years into standard date format yyyy-MM-dd HH:mm:ss.SSS z - public String generateRandomDate(int min, int max) throws Exception { - String mindt = min + "-01-01 00:00:00.000"; // set min date as starting of min year - String maxdt = max + "-12-31 23:59:59.999"; // set max date as end of max year - return generateRandomDate(mindt, maxdt); - } - - public String generateRandomDate(String min, String max) throws Exception { - DateTimeFormatter fmtr = - DateTimeFormatter.ofPattern(PherfConstants.DEFAULT_DATE_PATTERN) - .withZone(ZoneId.of("UTC")); - Instant minDt; - Instant maxDt; - Instant dt; - - minDt = ZonedDateTime.parse(checkDatePattern(min), fmtr).toInstant(); - maxDt = ZonedDateTime.parse(checkDatePattern(max), fmtr).toInstant(); - - // Get Ms Date between min and max - synchronized (randomDataGenerator) { - //Make sure date generated is exactly between the passed limits - long rndLong = randomDataGenerator.nextLong(minDt.toEpochMilli()+1, maxDt.toEpochMilli()-1); - dt = Instant.ofEpochMilli(rndLong); + break; + case BIGINT: + case UNSIGNED_LONG: + if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { + data = pickDataValueFromList(dataValues); + } else { + long minLong = column.getMinValue(); + long maxLong = column.getMaxValue(); + if ( + column.getType() == DataTypeMapping.UNSIGNED_LONG + ) Preconditions.checkArgument((minLong > 0) && (maxLong > 0), + "min and max values need to be set in configuration for unsigned_longs " + + column.getName()); + long longVal = RandomUtils.nextLong(minLong, maxLong); + data = new DataValue(column.getType(), String.valueOf(longVal)); } - - return fmtr.format(dt); + break; + case DATE: + case TIMESTAMP: + if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { + data = pickDataValueFromList(dataValues); + // Check if date has right format or not + data.setValue(checkDatePattern(data.getValue())); + } else if (DataSequence.SEQUENTIAL.equals(column.getDataSequence())) { + RuleBasedDataGenerator generator = getRuleBasedDataGeneratorForColumn(column); + data = generator.getDataValue(); + } else if (column.getUseCurrentDate() != true) { + int minYear = (int) column.getMinValue(); + int maxYear = (int) column.getMaxValue(); + Preconditions.checkArgument((minYear > 0) && (maxYear > 0), + "min and max values need to be set in configuration for date/timestamps " + + column.getName()); + + String dt = generateRandomDate(minYear, maxYear); + data = new DataValue(column.getType(), dt); + data.setMaxValue(String.valueOf(minYear)); + data.setMinValue(String.valueOf(maxYear)); + } else { + String dt = getCurrentDate(); + data = new DataValue(column.getType(), dt); + } + break; + default: + break; } - - public String getCurrentDate() { - DateTimeFormatter fmtr = - DateTimeFormatter.ofPattern(PherfConstants.DEFAULT_DATE_PATTERN) - .withZone(ZoneId.of("UTC")); - LocalDateTime dt = LocalDateTime.now(); - return fmtr.format(dt); + Preconditions.checkArgument(data != null, + "Data value could not be generated for some reason. Please check configs"); + return data; + } + + // Convert years into standard date format yyyy-MM-dd HH:mm:ss.SSS z + public String generateRandomDate(int min, int max) throws Exception { + String mindt = min + "-01-01 00:00:00.000"; // set min date as starting of min year + String maxdt = max + "-12-31 23:59:59.999"; // set max date as end of max year + return generateRandomDate(mindt, maxdt); + } + + public String generateRandomDate(String min, String max) throws Exception { + DateTimeFormatter fmtr = + DateTimeFormatter.ofPattern(PherfConstants.DEFAULT_DATE_PATTERN).withZone(ZoneId.of("UTC")); + Instant minDt; + Instant maxDt; + Instant dt; + + minDt = ZonedDateTime.parse(checkDatePattern(min), fmtr).toInstant(); + maxDt = ZonedDateTime.parse(checkDatePattern(max), fmtr).toInstant(); + + // Get Ms Date between min and max + synchronized (randomDataGenerator) { + // Make sure date generated is exactly between the passed limits + long rndLong = + randomDataGenerator.nextLong(minDt.toEpochMilli() + 1, maxDt.toEpochMilli() - 1); + dt = Instant.ofEpochMilli(rndLong); } - /** - * Given an int chance [0-100] inclusive, this method will return true if a winner is selected, otherwise false. - * - * @param chance Percentage as an int while number. - * @return boolean if we pick a number within range - */ - private boolean isValueNull(int chance) { - return (rndNull.nextInt(100) < chance); + return fmtr.format(dt); + } + + public String getCurrentDate() { + DateTimeFormatter fmtr = + DateTimeFormatter.ofPattern(PherfConstants.DEFAULT_DATE_PATTERN).withZone(ZoneId.of("UTC")); + LocalDateTime dt = LocalDateTime.now(); + return fmtr.format(dt); + } + + /** + * Given an int chance [0-100] inclusive, this method will return true if a winner is selected, + * otherwise false. + * @param chance Percentage as an int while number. + * @return boolean if we pick a number within range + */ + private boolean isValueNull(int chance) { + return (rndNull.nextInt(100) < chance); + } + + private DataValue pickDataValueFromList(List values) throws Exception { + DataValue generatedDataValue = null; + int sum = 0, count = 0; + + // Verify distributions add up to 100 if they exist + for (DataValue value : values) { + int dist = value.getDistribution(); + sum += dist; } + Preconditions.checkArgument((sum == 100) || (sum == 0), + "Distributions need to add up to 100 or not exist."); - private DataValue pickDataValueFromList(List values) throws Exception{ - DataValue generatedDataValue = null; - int sum = 0, count = 0; + // Spin the wheel until we get a value. + while (generatedDataValue == null) { - // Verify distributions add up to 100 if they exist - for (DataValue value : values) { - int dist = value.getDistribution(); - sum += dist; - } - Preconditions.checkArgument((sum == 100) || (sum == 0), - "Distributions need to add up to 100 or not exist."); + // Give an equal chance at picking any one rule to test + // This prevents rules at the beginning of the list from getting more chances to get picked + int rndIndex = rndVal.nextInt(values.size()); + DataValue valueRule = values.get(rndIndex); - // Spin the wheel until we get a value. - while (generatedDataValue == null) { + generatedDataValue = pickDataValueFromList(valueRule); - // Give an equal chance at picking any one rule to test - // This prevents rules at the beginning of the list from getting more chances to get picked - int rndIndex = rndVal.nextInt(values.size()); - DataValue valueRule = values.get(rndIndex); + // While it's possible to get here if you have a bunch of really small distributions, + // It's just really unlikely. This is just a safety just so we actually pick a value. + if (count++ == OH_SHIT_LIMIT) { + LOGGER.info("We generated a value from hitting our OH_SHIT_LIMIT: " + OH_SHIT_LIMIT); + generatedDataValue = valueRule; + } - generatedDataValue = pickDataValueFromList(valueRule); + } + return generatedDataValue; + } - // While it's possible to get here if you have a bunch of really small distributions, - // It's just really unlikely. This is just a safety just so we actually pick a value. - if(count++ == OH_SHIT_LIMIT){ - LOGGER.info("We generated a value from hitting our OH_SHIT_LIMIT: " + OH_SHIT_LIMIT); - generatedDataValue = valueRule; - } + private DataValue pickDataValueFromList(final DataValue valueRule) throws Exception { + DataValue retValue = new DataValue(valueRule); - } - return generatedDataValue; + // Path taken when configuration specifies a specific value to be taken with the tag + if (valueRule.getValue() != null) { + int chance = (valueRule.getDistribution() == 0) ? 100 : valueRule.getDistribution(); + return (rndVal.nextInt(100) <= chance) ? retValue : null; } - private DataValue pickDataValueFromList(final DataValue valueRule) throws Exception{ - DataValue retValue = new DataValue(valueRule); + // Path taken when configuration specifies to use current date + if (valueRule.getUseCurrentDate() == true) { + int chance = (valueRule.getDistribution() == 0) ? 100 : valueRule.getDistribution(); + retValue.setValue(getCurrentDate()); + return (rndVal.nextInt(100) <= chance) ? retValue : null; + } - // Path taken when configuration specifies a specific value to be taken with the tag - if (valueRule.getValue() != null) { - int chance = (valueRule.getDistribution() == 0) ? 100 : valueRule.getDistribution(); - return (rndVal.nextInt(100) <= chance) ? retValue : null; - } + // Later we can add support fo other data types if needed.Right now, we just do this for dates + Preconditions.checkArgument( + (retValue.getMinValue() != null) || (retValue.getMaxValue() != null), + "Both min/maxValue tags must be set if value tag is not used"); + Preconditions.checkArgument((retValue.getType() == DataTypeMapping.DATE), + "Currently on DATE is supported for ranged random values"); + + retValue.setValue(generateRandomDate(retValue.getMinValue(), retValue.getMaxValue())); + + retValue.setValue(generateRandomDate(retValue.getMinValue(), retValue.getMaxValue())); + retValue.setMinValue(checkDatePattern(valueRule.getMinValue())); + retValue.setMaxValue(checkDatePattern(valueRule.getMaxValue())); + return retValue; + } + + // Checks if date is in defult pattern + public String checkDatePattern(String date) { + DateTimeFormatter fmtr = + DateTimeFormatter.ofPattern(PherfConstants.DEFAULT_DATE_PATTERN).withZone(ZoneId.of("UTC")); + Instant parsedDate = ZonedDateTime.parse(date, fmtr).toInstant(); + return fmtr.format(parsedDate); + } + + /** + * Top level {@link java.util.List} {@link java.util.Map}. This will likely only have one entry + * until we have multiple files. + *

    + *

    + * Each Map entry in the List is: {@link java.util.Map} of + * {@link org.apache.phoenix.pherf.configuration.DataTypeMapping} --> List of + * {@link org.apache.phoenix.pherf.configuration.Column Build the initial Map with all the general + * rules. These are contained in: + * ... + *

    + *

    + * Unsupported until V2 Build the overrides by appending them to the list of rules that match the + * column type + */ + private void populateModelList() { + if (!modelList.isEmpty()) { + return; + } - // Path taken when configuration specifies to use current date - if (valueRule.getUseCurrentDate() == true) { - int chance = (valueRule.getDistribution() == 0) ? 100 : valueRule.getDistribution(); - retValue.setValue(getCurrentDate()); - return (rndVal.nextInt(100) <= chance) ? retValue : null; - } + // Since rules are only relevant for a given data model, + // added a constructor to support a single data model => RulesApplier(DataModel model) - // Later we can add support fo other data types if needed.Right now, we just do this for dates - Preconditions.checkArgument((retValue.getMinValue() != null) || (retValue.getMaxValue() != null), "Both min/maxValue tags must be set if value tag is not used"); - Preconditions.checkArgument((retValue.getType() == DataTypeMapping.DATE), "Currently on DATE is supported for ranged random values"); + // We should deprecate the RulesApplier(XMLConfigParser parser) constructor, + // since a parser can have multiple data models (all the models found on the classpath) + // it implies that the rules apply to all the data models the parser holds + // which can be confusing to the user of this class. - retValue.setValue(generateRandomDate(retValue.getMinValue(), retValue.getMaxValue())); + List models = + dataModel != null ? Lists.newArrayList(dataModel) : parser.getDataModels(); + for (DataModel model : models) { + + // Step 1 + final Map ruleMap = new HashMap(); + for (Column column : model.getDataMappingColumns()) { + columnMap.put(column.getName(), column); + + List cols; + DataTypeMapping type = column.getType(); + if (ruleMap.containsKey(type)) { + ruleMap.get(type).add(column); + } else { + cols = new LinkedList(); + cols.add(column); + ruleMap.put(type, cols); + } + } - retValue.setValue(generateRandomDate(retValue.getMinValue(), retValue.getMaxValue())); - retValue.setMinValue(checkDatePattern(valueRule.getMinValue())); - retValue.setMaxValue(checkDatePattern(valueRule.getMaxValue())); - return retValue; + this.modelList.add(ruleMap); } + } - // Checks if date is in defult pattern - public String checkDatePattern(String date) { - DateTimeFormatter fmtr = - DateTimeFormatter.ofPattern(PherfConstants.DEFAULT_DATE_PATTERN) - .withZone(ZoneId.of("UTC")); - Instant parsedDate = ZonedDateTime.parse(date, fmtr).toInstant(); - return fmtr.format(parsedDate); - } + public Column getRule(Column phxMetaColumn) { + // Assume the first rule map + Map ruleMap = modelList.get(0); - /** - * Top level {@link java.util.List} {@link java.util.Map}. This will likely only have one entry until we have - * multiple files. - *

    - *

    - * Each Map entry in the List is: - * {@link java.util.Map} of - * {@link org.apache.phoenix.pherf.configuration.DataTypeMapping} --> - * List of {@link org.apache.phoenix.pherf.configuration.Column - * Build the initial Map with all the general rules. - * These are contained in: - * ... - *

    - *

    - * Unsupported until V2 - * Build the overrides by appending them to the list of rules that match the column type - */ - private void populateModelList() { - if (!modelList.isEmpty()) { - return; - } + List ruleList = ruleMap.get(phxMetaColumn.getType()); + return getColumnForRule(ruleList, phxMetaColumn); + } - // Since rules are only relevant for a given data model, - // added a constructor to support a single data model => RulesApplier(DataModel model) - - // We should deprecate the RulesApplier(XMLConfigParser parser) constructor, - // since a parser can have multiple data models (all the models found on the classpath) - // it implies that the rules apply to all the data models the parser holds - // which can be confusing to the user of this class. - - List models = dataModel != null ? - Lists.newArrayList(dataModel) : parser.getDataModels(); - for (DataModel model : models) { - - // Step 1 - final Map ruleMap = new HashMap(); - for (Column column : model.getDataMappingColumns()) { - columnMap.put(column.getName(), column); - - List cols; - DataTypeMapping type = column.getType(); - if (ruleMap.containsKey(type)) { - ruleMap.get(type).add(column); - } else { - cols = new LinkedList(); - cols.add(column); - ruleMap.put(type, cols); - } - } - - this.modelList.add(ruleMap); + public Column getRule(String columnName) { + return getRule(columnName, null); + } + + public Column getRule(String columnName, Scenario scenario) { + if (null != scenario && null != scenario.getDataOverride()) { + for (Column column : scenario.getDataOverride().getColumn()) { + if (column.getName().equals(columnName)) { + return column; } + } } - public Column getRule(Column phxMetaColumn) { - // Assume the first rule map - Map ruleMap = modelList.get(0); + return columnMap.get(columnName); + } - List ruleList = ruleMap.get(phxMetaColumn.getType()); - return getColumnForRule(ruleList, phxMetaColumn); - } - - public Column getRule(String columnName) { - return getRule(columnName, null); - } - - public Column getRule(String columnName, Scenario scenario) { - if (null != scenario && null != scenario.getDataOverride()) { - for (Column column: scenario.getDataOverride().getColumn()) { - if (column.getName().equals(columnName)) { - return column; - } - } - } - - return columnMap.get(columnName); + private Column getColumnForRuleOverride(List ruleList, Column phxMetaColumn) { + for (Column columnRule : ruleList) { + if (columnRule.getName().equals(phxMetaColumn.getName())) { + return new Column(columnRule); + } } - private Column getColumnForRuleOverride(List ruleList, Column phxMetaColumn) { - for (Column columnRule : ruleList) { - if (columnRule.getName().equals(phxMetaColumn.getName())) { - return new Column(columnRule); - } - } + return null; + } - return null; - } - - private Column getColumnForRule(List ruleList, Column phxMetaColumn) { - - // Column pointer to head of list - Column ruleAppliedColumn = new Column(ruleList.get(0)); - - // Then we apply each rule override as a mutation to the column - for (Column columnRule : ruleList) { - - // Check if user defined column rules match the column data type we are generating - // We don't want to apply the rule if name doesn't match the column from Phoenix - if (columnRule.isUserDefined() - && !columnRule.getName().equals(phxMetaColumn.getName())) { - continue; - } - ruleAppliedColumn.mutate(columnRule); - } + private Column getColumnForRule(List ruleList, Column phxMetaColumn) { + + // Column pointer to head of list + Column ruleAppliedColumn = new Column(ruleList.get(0)); - return ruleAppliedColumn; + // Then we apply each rule override as a mutation to the column + for (Column columnRule : ruleList) { + + // Check if user defined column rules match the column data type we are generating + // We don't want to apply the rule if name doesn't match the column from Phoenix + if (columnRule.isUserDefined() && !columnRule.getName().equals(phxMetaColumn.getName())) { + continue; + } + ruleAppliedColumn.mutate(columnRule); } + return ruleAppliedColumn; + } - private DataValue getRandomDataValue(Column column) { - String varchar = RandomStringUtils.randomAlphanumeric(column.getLength()); - varchar = (column.getPrefix() != null) ? column.getPrefix() + varchar : varchar; + private DataValue getRandomDataValue(Column column) { + String varchar = RandomStringUtils.randomAlphanumeric(column.getLength()); + varchar = (column.getPrefix() != null) ? column.getPrefix() + varchar : varchar; - // Truncate string back down if it exceeds length - varchar = StringUtils.left(varchar, column.getLength()); - return new DataValue(column.getType(), varchar); - } + // Truncate string back down if it exceeds length + varchar = StringUtils.left(varchar, column.getLength()); + return new DataValue(column.getType(), varchar); + } - private RuleBasedDataGenerator getRuleBasedDataGeneratorForColumn(Column column) { - RuleBasedDataGenerator generator = columnRuleBasedDataGeneratorMap.get(column.getName()); - if(generator == null) { - //For now we only have couple of these, likely this should replace for all the methods - switch (column.getType()) { - case VARCHAR: - case JSON: - case BSON: - case VARBINARY: - case CHAR: - if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { - generator = new SequentialListDataGenerator(column); - } else { - generator = new SequentialVarcharDataGenerator(column); - } - break; - case DATE: - case TIMESTAMP: - generator = new SequentialDateDataGenerator(column); - break; - case BIGINT: - case INTEGER: - case TINYINT: - case UNSIGNED_LONG: - generator = new SequentialIntegerDataGenerator(column); - break; - default: - throw new IllegalArgumentException( - String.format("No rule based generator supported for column type %s on %s", - column.getType(), column.getName())); - } - RuleBasedDataGenerator oldGenerator = columnRuleBasedDataGeneratorMap.putIfAbsent(column.getName(),generator); - if (oldGenerator != null) { - // Another thread succeeded in registering their generator first, so let's use that. - generator = oldGenerator; - } - } - return generator; + private RuleBasedDataGenerator getRuleBasedDataGeneratorForColumn(Column column) { + RuleBasedDataGenerator generator = columnRuleBasedDataGeneratorMap.get(column.getName()); + if (generator == null) { + // For now we only have couple of these, likely this should replace for all the methods + switch (column.getType()) { + case VARCHAR: + case JSON: + case BSON: + case VARBINARY: + case CHAR: + if ((column.getDataValues() != null) && (column.getDataValues().size() > 0)) { + generator = new SequentialListDataGenerator(column); + } else { + generator = new SequentialVarcharDataGenerator(column); + } + break; + case DATE: + case TIMESTAMP: + generator = new SequentialDateDataGenerator(column); + break; + case BIGINT: + case INTEGER: + case TINYINT: + case UNSIGNED_LONG: + generator = new SequentialIntegerDataGenerator(column); + break; + default: + throw new IllegalArgumentException( + String.format("No rule based generator supported for column type %s on %s", + column.getType(), column.getName())); + } + RuleBasedDataGenerator oldGenerator = + columnRuleBasedDataGeneratorMap.putIfAbsent(column.getName(), generator); + if (oldGenerator != null) { + // Another thread succeeded in registering their generator first, so let's use that. + generator = oldGenerator; + } } + return generator; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialDateDataGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialDateDataGenerator.java index 9a398c1c847..b8305881822 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialDateDataGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialDateDataGenerator.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.rules; import java.time.LocalDateTime; @@ -29,47 +28,46 @@ import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** - * A generator for sequentially increasing dates. - * For now the increments are fixed at 1 second. + * A generator for sequentially increasing dates. For now the increments are fixed at 1 second. */ public class SequentialDateDataGenerator implements RuleBasedDataGenerator { - private static DateTimeFormatter FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); - private final Column columnRule; - private final AtomicInteger counter; - //Make sure we truncate to milliseconds - private final LocalDateTime startDateTime = - LocalDateTime.parse(LocalDateTime.now().format(FMT), FMT); + private static DateTimeFormatter FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); + private final Column columnRule; + private final AtomicInteger counter; + // Make sure we truncate to milliseconds + private final LocalDateTime startDateTime = + LocalDateTime.parse(LocalDateTime.now().format(FMT), FMT); - public SequentialDateDataGenerator(Column columnRule) { - Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL); - Preconditions.checkArgument(isDateType(columnRule.getType())); - this.columnRule = columnRule; - counter = new AtomicInteger(0); - } + public SequentialDateDataGenerator(Column columnRule) { + Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL); + Preconditions.checkArgument(isDateType(columnRule.getType())); + this.columnRule = columnRule; + counter = new AtomicInteger(0); + } - @VisibleForTesting - public LocalDateTime getStartDateTime() { - return startDateTime; - } + @VisibleForTesting + public LocalDateTime getStartDateTime() { + return startDateTime; + } - /** - * Note that this method rolls over for attempts to get larger than maxValue - * @return new DataValue - */ - @Override - public DataValue getDataValue() { - LocalDateTime newDateTime = startDateTime.plusSeconds(counter.getAndIncrement()); - String formattedDateTime = newDateTime.format(FMT); - return new DataValue(columnRule.getType(), formattedDateTime); - } + /** + * Note that this method rolls over for attempts to get larger than maxValue + * @return new DataValue + */ + @Override + public DataValue getDataValue() { + LocalDateTime newDateTime = startDateTime.plusSeconds(counter.getAndIncrement()); + String formattedDateTime = newDateTime.format(FMT); + return new DataValue(columnRule.getType(), formattedDateTime); + } - boolean isDateType(DataTypeMapping mapping) { - switch (mapping) { - case DATE: - case TIMESTAMP: - return true; - default: - return false; - } + boolean isDateType(DataTypeMapping mapping) { + switch (mapping) { + case DATE: + case TIMESTAMP: + return true; + default: + return false; } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialIntegerDataGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialIntegerDataGenerator.java index 125e0d780b2..88514f2e7d4 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialIntegerDataGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialIntegerDataGenerator.java @@ -1,64 +1,64 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.rules; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import java.util.concurrent.atomic.AtomicLong; + import org.apache.phoenix.pherf.configuration.Column; import org.apache.phoenix.pherf.configuration.DataSequence; import org.apache.phoenix.pherf.configuration.DataTypeMapping; - -import java.util.concurrent.atomic.AtomicLong; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; public class SequentialIntegerDataGenerator implements RuleBasedDataGenerator { - private final Column columnRule; - private final AtomicLong counter; - private final long minValue; - private final long maxValue; + private final Column columnRule; + private final AtomicLong counter; + private final long minValue; + private final long maxValue; - public SequentialIntegerDataGenerator(Column columnRule) { - Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL); - Preconditions.checkArgument(isIntegerType(columnRule.getType())); - this.columnRule = columnRule; - minValue = columnRule.getMinValue(); - maxValue = columnRule.getMaxValue(); - counter = new AtomicLong(0); - } + public SequentialIntegerDataGenerator(Column columnRule) { + Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL); + Preconditions.checkArgument(isIntegerType(columnRule.getType())); + this.columnRule = columnRule; + minValue = columnRule.getMinValue(); + maxValue = columnRule.getMaxValue(); + counter = new AtomicLong(0); + } - /** - * Note that this method rolls over for attempts to get larger than maxValue - * @return new DataValue - */ - @Override - public DataValue getDataValue() { - return new DataValue(columnRule.getType(), String.valueOf((counter.getAndIncrement() % (maxValue - minValue + 1)) + minValue)); - } + /** + * Note that this method rolls over for attempts to get larger than maxValue + * @return new DataValue + */ + @Override + public DataValue getDataValue() { + return new DataValue(columnRule.getType(), + String.valueOf((counter.getAndIncrement() % (maxValue - minValue + 1)) + minValue)); + } - // Probably could go into a util class in the future - boolean isIntegerType(DataTypeMapping mapping) { - switch (mapping) { - case BIGINT: - case INTEGER: - case TINYINT: - case UNSIGNED_LONG: - return true; - default: - return false; - } + // Probably could go into a util class in the future + boolean isIntegerType(DataTypeMapping mapping) { + switch (mapping) { + case BIGINT: + case INTEGER: + case TINYINT: + case UNSIGNED_LONG: + return true; + default: + return false; } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialListDataGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialListDataGenerator.java index 293d437f8e6..e972b05dc42 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialListDataGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialListDataGenerator.java @@ -1,68 +1,67 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.rules; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import java.util.concurrent.atomic.AtomicLong; + import org.apache.phoenix.pherf.configuration.Column; import org.apache.phoenix.pherf.configuration.DataSequence; import org.apache.phoenix.pherf.configuration.DataTypeMapping; - -import java.util.concurrent.atomic.AtomicLong; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** * A generator to round robin thru a list of values. */ public class SequentialListDataGenerator implements RuleBasedDataGenerator { - private final Column columnRule; - private final AtomicLong counter; + private final Column columnRule; + private final AtomicLong counter; - public SequentialListDataGenerator(Column columnRule) { - Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL); - Preconditions.checkArgument(columnRule.getDataValues().size() > 0); - Preconditions.checkArgument(isAllowedType(columnRule.getType())); - this.columnRule = columnRule; - counter = new AtomicLong(0); - } + public SequentialListDataGenerator(Column columnRule) { + Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL); + Preconditions.checkArgument(columnRule.getDataValues().size() > 0); + Preconditions.checkArgument(isAllowedType(columnRule.getType())); + this.columnRule = columnRule; + counter = new AtomicLong(0); + } - /** - * Note that this method rolls over for attempts to get larger than maxValue - * @return new DataValue - */ - @Override - public DataValue getDataValue() { - long pos = counter.getAndIncrement(); - int index = (int) pos % columnRule.getDataValues().size(); - return columnRule.getDataValues().get(index); - } + /** + * Note that this method rolls over for attempts to get larger than maxValue + * @return new DataValue + */ + @Override + public DataValue getDataValue() { + long pos = counter.getAndIncrement(); + int index = (int) pos % columnRule.getDataValues().size(); + return columnRule.getDataValues().get(index); + } - boolean isAllowedType(DataTypeMapping mapping) { - // For now only varchar list are supported - switch (mapping) { - case VARCHAR: - case VARBINARY: - case JSON: - case BSON: - case CHAR: - return true; - default: - return false; - } + boolean isAllowedType(DataTypeMapping mapping) { + // For now only varchar list are supported + switch (mapping) { + case VARCHAR: + case VARBINARY: + case JSON: + case BSON: + case CHAR: + return true; + default: + return false; } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialVarcharDataGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialVarcharDataGenerator.java index 449802de201..8fc9808558d 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialVarcharDataGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialVarcharDataGenerator.java @@ -1,77 +1,75 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.rules; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import java.util.concurrent.atomic.AtomicLong; + import org.apache.commons.lang3.StringUtils; import org.apache.phoenix.pherf.configuration.Column; import org.apache.phoenix.pherf.configuration.DataSequence; import org.apache.phoenix.pherf.configuration.DataTypeMapping; -import java.util.concurrent.atomic.AtomicLong; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; /** * A generator for sequentially increasing varchar values. */ public class SequentialVarcharDataGenerator implements RuleBasedDataGenerator { - private final Column columnRule; - private final AtomicLong counter; + private final Column columnRule; + private final AtomicLong counter; - public SequentialVarcharDataGenerator(Column columnRule) { - Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL); - Preconditions.checkArgument(isVarcharType(columnRule.getType())); - this.columnRule = columnRule; - counter = new AtomicLong(0); - } + public SequentialVarcharDataGenerator(Column columnRule) { + Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL); + Preconditions.checkArgument(isVarcharType(columnRule.getType())); + this.columnRule = columnRule; + counter = new AtomicLong(0); + } - /** - * Add a numerically increasing counter onto the and of a random string. - * Incremented counter should be thread safe. - * - * @return {@link org.apache.phoenix.pherf.rules.DataValue} - */ - @Override - public DataValue getDataValue() { - DataValue data = null; - long inc = counter.getAndIncrement(); - String strInc = String.valueOf(inc); - int paddedLength = columnRule.getLengthExcludingPrefix(); - String strInc1 = StringUtils.leftPad(strInc, paddedLength, "x"); - String strInc2 = StringUtils.right(strInc1, columnRule.getLengthExcludingPrefix()); - String varchar = (columnRule.getPrefix() != null) ? columnRule.getPrefix() + strInc2: - strInc2; + /** + * Add a numerically increasing counter onto the and of a random string. Incremented counter + * should be thread safe. + * @return {@link org.apache.phoenix.pherf.rules.DataValue} + */ + @Override + public DataValue getDataValue() { + DataValue data = null; + long inc = counter.getAndIncrement(); + String strInc = String.valueOf(inc); + int paddedLength = columnRule.getLengthExcludingPrefix(); + String strInc1 = StringUtils.leftPad(strInc, paddedLength, "x"); + String strInc2 = StringUtils.right(strInc1, columnRule.getLengthExcludingPrefix()); + String varchar = (columnRule.getPrefix() != null) ? columnRule.getPrefix() + strInc2 : strInc2; - // Truncate string back down if it exceeds length - varchar = StringUtils.left(varchar,columnRule.getLength()); - data = new DataValue(columnRule.getType(), varchar); - return data; - } + // Truncate string back down if it exceeds length + varchar = StringUtils.left(varchar, columnRule.getLength()); + data = new DataValue(columnRule.getType(), varchar); + return data; + } - boolean isVarcharType(DataTypeMapping mapping) { - switch (mapping) { - case VARCHAR: - case VARBINARY: - case JSON: - case BSON: - case CHAR: - return true; - default: - return false; - } + boolean isVarcharType(DataTypeMapping mapping) { + switch (mapping) { + case VARCHAR: + case VARBINARY: + case JSON: + case BSON: + case CHAR: + return true; + default: + return false; } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java index 53c4408c566..cd395172cbd 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java @@ -1,102 +1,100 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.schema; +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.nio.file.Path; +import java.sql.Connection; +import java.util.Collection; + import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.exception.FileLoaderException; import org.apache.phoenix.pherf.util.PhoenixUtil; import org.apache.phoenix.pherf.util.ResourceList; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.BufferedReader; -import java.io.InputStreamReader; -import java.nio.file.Path; -import java.sql.Connection; -import java.util.Collection; - public class SchemaReader { - private static final Logger LOGGER = LoggerFactory.getLogger(SchemaReader.class); - private final PhoenixUtil pUtil; - private Collection resourceList; - private final String searchPattern; - private final ResourceList resourceUtil; + private static final Logger LOGGER = LoggerFactory.getLogger(SchemaReader.class); + private final PhoenixUtil pUtil; + private Collection resourceList; + private final String searchPattern; + private final ResourceList resourceUtil; - /** - * Used for testing search Pattern - * @param searchPattern {@link java.util.regex.Pattern} that matches a resource on the CP - * @throws Exception - */ - public SchemaReader(final String searchPattern) throws Exception { - this(PhoenixUtil.create(), searchPattern); - } + /** + * Used for testing search Pattern + * @param searchPattern {@link java.util.regex.Pattern} that matches a resource on the CP + */ + public SchemaReader(final String searchPattern) throws Exception { + this(PhoenixUtil.create(), searchPattern); + } - public SchemaReader(PhoenixUtil util, final String searchPattern) throws Exception { - this.pUtil = util; - this.searchPattern = searchPattern; - this.resourceUtil = new ResourceList(PherfConstants.RESOURCE_DATAMODEL); - read(); - } + public SchemaReader(PhoenixUtil util, final String searchPattern) throws Exception { + this.pUtil = util; + this.searchPattern = searchPattern; + this.resourceUtil = new ResourceList(PherfConstants.RESOURCE_DATAMODEL); + read(); + } - public Collection getResourceList() { - return resourceList; - } + public Collection getResourceList() { + return resourceList; + } - public void applySchema() throws Exception { - Connection connection = null; - try { - connection = pUtil.getConnection(null); - for (Path file : resourceList) { - LOGGER.info("\nApplying schema to file: " + file); - pUtil.executeStatement(resourceToString(file), connection); - } - } finally { - if (connection != null) { - connection.close(); - } - } + public void applySchema() throws Exception { + Connection connection = null; + try { + connection = pUtil.getConnection(null); + for (Path file : resourceList) { + LOGGER.info("\nApplying schema to file: " + file); + pUtil.executeStatement(resourceToString(file), connection); + } + } finally { + if (connection != null) { + connection.close(); + } } + } - public String resourceToString(final Path file) throws Exception { - String fName = PherfConstants.RESOURCE_DATAMODEL + "/" + file.getFileName().toString(); - BufferedReader br = new BufferedReader(new InputStreamReader(this.getClass().getResourceAsStream(fName))); - StringBuffer sb = new StringBuffer(); + public String resourceToString(final Path file) throws Exception { + String fName = PherfConstants.RESOURCE_DATAMODEL + "/" + file.getFileName().toString(); + BufferedReader br = + new BufferedReader(new InputStreamReader(this.getClass().getResourceAsStream(fName))); + StringBuffer sb = new StringBuffer(); - String line; - while ((line = br.readLine()) != null) { - sb.append(line); - } - - return sb.toString(); + String line; + while ((line = br.readLine()) != null) { + sb.append(line); } - private void read() throws Exception { - LOGGER.debug("Trying to match resource pattern: " + searchPattern); - System.out.println("Trying to match resource pattern: " + searchPattern); + return sb.toString(); + } + + private void read() throws Exception { + LOGGER.debug("Trying to match resource pattern: " + searchPattern); + System.out.println("Trying to match resource pattern: " + searchPattern); - resourceList = null; - resourceList = resourceUtil.getResourceList(searchPattern); - LOGGER.info("File resourceList Loaded: " + resourceList); - System.out.println("File resourceList Loaded: " + resourceList); - if (resourceList.isEmpty()) { - throw new FileLoaderException("Could not load Schema Files"); - } + resourceList = null; + resourceList = resourceUtil.getResourceList(searchPattern); + LOGGER.info("File resourceList Loaded: " + resourceList); + System.out.println("File resourceList Loaded: " + resourceList); + if (resourceList.isEmpty()) { + throw new FileLoaderException("Could not load Schema Files"); } -} \ No newline at end of file + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/GoogleChartGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/GoogleChartGenerator.java index 577c0d94a0b..e5e8e1aa90f 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/GoogleChartGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/GoogleChartGenerator.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.util; import java.io.FileNotFoundException; @@ -41,354 +40,340 @@ */ public class GoogleChartGenerator { - private String[] labels; - private CompareType compareType; - private final Map datanodes = new TreeMap(); - private final PherfConstants constants = PherfConstants.create(); - private final String resultDir = constants.getProperty("pherf.default.results.dir"); - private final double threshold = Double.parseDouble(constants.getProperty("pherf.default.comparison.threshold")); - - public GoogleChartGenerator(String labels, CompareType compareType) { - this.setLabels(labels); - this.setCompareType(compareType); - } - - String[] getLabels() { - return labels; - } - - void setLabels(String[] labels) { - this.labels = labels; - } - - void setLabels(String labels) { - this.labels = labels.split(","); - } - - CompareType getCompareType() { - return this.compareType; - } - - void setCompareType(CompareType compareType) { - this.compareType = compareType; - } - - public void readAndRender() { - try { - for (String label : labels) { - read(label); - } - renderAsGoogleChartsHTML(); - - } catch (Exception e) { - e.printStackTrace(); - } - } - - /** - * Reads aggregate file and convert it to DataNode - * @param label - * @throws Exception - */ - private void read(String label) throws Exception { - String resultFileName = resultDir - + PherfConstants.PATH_SEPARATOR - + PherfConstants.RESULT_PREFIX - + label - + ResultFileDetails.CSV_AGGREGATE_PERFORMANCE.getExtension(); - - FileReader in = new FileReader(resultFileName); - final CSVParser parser = new CSVParser(in, CSVFormat.DEFAULT.withHeader()); - - for (CSVRecord record : parser) { - String group = record.get("QUERY_GROUP"); - String query = record.get("QUERY"); - String explain = record.get("EXPLAIN_PLAN"); - String tenantId = record.get("TENANT_ID"); - long avgTime = Long.parseLong(record.get("AVG_TIME_MS")); - long minTime = Long.parseLong(record.get("AVG_MIN_TIME_MS")); - long numRuns = Long.parseLong(record.get("RUN_COUNT")); - long rowCount = Long.parseLong(record.get("RESULT_ROW_COUNT")); - Node node = new Node(minTime, avgTime, numRuns, explain, query, tenantId, label, rowCount); - - if (datanodes.containsKey(group)) { - datanodes.get(group).getDataSet().put(label, node); - } else { - datanodes.put(group, new DataNode(label, node)); - } + private String[] labels; + private CompareType compareType; + private final Map datanodes = new TreeMap(); + private final PherfConstants constants = PherfConstants.create(); + private final String resultDir = constants.getProperty("pherf.default.results.dir"); + private final double threshold = + Double.parseDouble(constants.getProperty("pherf.default.comparison.threshold")); + + public GoogleChartGenerator(String labels, CompareType compareType) { + this.setLabels(labels); + this.setCompareType(compareType); + } + + String[] getLabels() { + return labels; + } + + void setLabels(String[] labels) { + this.labels = labels; + } + + void setLabels(String labels) { + this.labels = labels.split(","); + } + + CompareType getCompareType() { + return this.compareType; + } + + void setCompareType(CompareType compareType) { + this.compareType = compareType; + } + + public void readAndRender() { + try { + for (String label : labels) { + read(label); + } + renderAsGoogleChartsHTML(); + + } catch (Exception e) { + e.printStackTrace(); + } + } + + /** + * Reads aggregate file and convert it to DataNode + */ + private void read(String label) throws Exception { + String resultFileName = resultDir + PherfConstants.PATH_SEPARATOR + PherfConstants.RESULT_PREFIX + + label + ResultFileDetails.CSV_AGGREGATE_PERFORMANCE.getExtension(); + + FileReader in = new FileReader(resultFileName); + final CSVParser parser = new CSVParser(in, CSVFormat.DEFAULT.withHeader()); + + for (CSVRecord record : parser) { + String group = record.get("QUERY_GROUP"); + String query = record.get("QUERY"); + String explain = record.get("EXPLAIN_PLAN"); + String tenantId = record.get("TENANT_ID"); + long avgTime = Long.parseLong(record.get("AVG_TIME_MS")); + long minTime = Long.parseLong(record.get("AVG_MIN_TIME_MS")); + long numRuns = Long.parseLong(record.get("RUN_COUNT")); + long rowCount = Long.parseLong(record.get("RESULT_ROW_COUNT")); + Node node = new Node(minTime, avgTime, numRuns, explain, query, tenantId, label, rowCount); + + if (datanodes.containsKey(group)) { + datanodes.get(group).getDataSet().put(label, node); + } else { + datanodes.put(group, new DataNode(label, node)); + } + } + parser.close(); + } + + /** + * Verifies if the first result is within the set threshold of pherf.default.comparison.threshold + * set in pherf.properties files + */ + private boolean verifyWithinThreshold(double threshold) { + long resetTimeToCompare = -1; + long timeToCompare = resetTimeToCompare; + for (Map.Entry dn : datanodes.entrySet()) { + for (Map.Entry node : dn.getValue().getDataSet().entrySet()) { + if (timeToCompare == -1) { + timeToCompare = node.getValue().getTime(getCompareType()); + if (timeToCompare < 10) { // extremely small query time in ms therefore don't compare + return true; + } } - parser.close(); - } - - /** - * Verifies if the first result is within the set - * threshold of pherf.default.comparison.threshold - * set in pherf.properties files - * @param threshold - * @return - */ - private boolean verifyWithinThreshold(double threshold) { - long resetTimeToCompare = -1; - long timeToCompare = resetTimeToCompare; - for (Map.Entry dn : datanodes.entrySet()) { - for (Map.Entry node : dn.getValue().getDataSet().entrySet()) { - if (timeToCompare == -1) { - timeToCompare = node.getValue().getTime(getCompareType()); - if (timeToCompare < 10) { // extremely small query time in ms therefore don't compare - return true; - } - } - if ((((double) (timeToCompare - node.getValue().getTime(getCompareType()))) / (double) node - .getValue().getTime(getCompareType())) > threshold) { - return false; - } - } - timeToCompare = resetTimeToCompare; - } - return true; - } - - /** - * Render results as Google charts - * @throws FileNotFoundException - * @throws UnsupportedEncodingException - */ - private void renderAsGoogleChartsHTML() throws FileNotFoundException, UnsupportedEncodingException { - String lastKeyPrefix = ""; - StringBuffer sb = new StringBuffer(); - for (String label : labels) { - sb.append("dataTable.addColumn('number', '" + label + "');\n"); - sb.append("dataTable.addColumn({type: 'string', role: 'tooltip', 'p': {'html': true}});\n"); - } - sb.append("dataTable.addRows([\n"); - for (Map.Entry dn : datanodes.entrySet()) { - String currentKeyPrefix = dn.getKey().substring(0, dn.getKey().indexOf('|')); - if (!lastKeyPrefix.equalsIgnoreCase(currentKeyPrefix) && lastKeyPrefix != "") { - sb.append(getBlankRow()); - } - lastKeyPrefix = currentKeyPrefix; - sb.append("['" + dn.getKey() + "'"); - for (Map.Entry nodeSet : dn.getValue().getDataSet().entrySet()) { - sb.append (", " + nodeSet.getValue().getTime(getCompareType())); - sb.append (",'" + getToolTipAsHTML(dn.getValue().getDataSet()) + "'"); - } - sb.append("],\n"); + if ( + (((double) (timeToCompare - node.getValue().getTime(getCompareType()))) + / (double) node.getValue().getTime(getCompareType())) > threshold + ) { + return false; } - String summaryFile = PherfConstants.create().getProperty("pherf.default.summary.file"); - String title = labels[0]; - PrintWriter writer = new PrintWriter(summaryFile, "UTF-8"); - - writer.println(StaticGoogleChartsRenderingData.HEADER.replace("[title]", title)); - writer.println(sb.substring(0, sb.length() - 2) + "\n]);"); - String thresholdString = Math.round((threshold*100)) + "%"; - String footer = StaticGoogleChartsRenderingData.FOOTER - .replace("[summary]", - ((verifyWithinThreshold(threshold) == true ? "PASSED | Results are within ": - "FAILED | Results are outside ")) - + "set threshold of " + thresholdString + "
    " - + new SimpleDateFormat("yyyy/MM/dd ha z").format(new Date())); - footer = footer.replace("[title]", title); - writer.println(footer); - writer.close(); - } - - /** - * Render a blank Google charts row - * @return - */ - private String getBlankRow() { - String ret = "['" + new String(new char[60]).replace("\0", ".") + "'"; - for (int i=0; i nodeDataSet) { - String ret = ""; - for (Map.Entry nodeSet : nodeDataSet.entrySet()) - ret += ""; - return ret + "
    " + getToolText(nodeSet.getValue()) + "
    "; - } - - /** - * Get tooltip for node - * @param node - * @return - */ - private String getToolText(Node node) { - return node.getLabelAsHTML() - + node.getAvgTimeAsHTML() - + node.getMinTimeAsHTML() - + node.getNumRunsAsHTML() - + node.getRowCountAsHTML() - + node.getExplainPlanAsHTML() - + node.getQueryAsHTML(); - } - - /** - * DataNode to store results to render and compare - */ - class DataNode { - private Map dataSet = new LinkedHashMap(); - - public DataNode(String label, Node node) { - this.getDataSet().put(label, node); - } - - public Map getDataSet() { - return dataSet; - } - public void setDataSet(Map dataSet) { - this.dataSet = dataSet; - } - } - - class Node { - private String explainPlan; - private String query; - private String tenantId; - private long minTime; - private long avgTime; - private long numRuns; - private long rowCount; - private String label; - private DecimalFormat df = new DecimalFormat("#.#"); - - public Node(long minTime, long avgTime, long numRuns, String explainPlan, String query, String tenantId, String label, long rowCount) { - this.setMinTime(minTime); - this.setAvgTime(avgTime); - this.setNumRuns(numRuns); - this.setExplainPlan(explainPlan); - this.setQuery(query); - this.setTenantId(tenantId); - this.setLabel(label); - this.setRowCount(rowCount); - } - - String getExplainPlan() { - return explainPlan; - } - String getExplainPlanAsHTML() { - return "
    EXPLAIN PLAN " - + explainPlan.replace("'", "") + "
    "; - } - - void setExplainPlan(String explainPlan) { - this.explainPlan = explainPlan; - } - long getTime(CompareType compareType) { - return (compareType == CompareType.AVERAGE ? getAvgTime() : getMinTime()); - } - - long getMinTime() { - if (minTime <= 2) - return 2; - else - return minTime; - } - public String getMinTimeAsHTML() { - return "MIN TIME " - + minTime - + " ms (" - + df.format((double) minTime / 1000) - + " sec)
    "; - } - void setMinTime(long minTime) { - this.minTime = minTime; - } - long getAvgTime() { - return avgTime; - } - public String getAvgTimeAsHTML() { - return "AVERAGE TIME " - + avgTime - + " ms (" - + df.format((double) avgTime / 1000) - + " sec)
    "; - } - void setAvgTime(long avgTime) { - this.avgTime = avgTime; - } - - public long getNumRuns() { - return numRuns; - } - public String getNumRunsAsHTML() { - return "NUMBER OF RUNS " - + numRuns + "
    "; - } - - public void setNumRuns(long numRuns) { - this.numRuns = numRuns; - } - - public String getQuery() { - return query; - } - - public String getQueryAsHTML() { - return "
    QUERY " - + query.replace("'", "") + " (TENANT ID: " + getTenantId() - + ")
    "; - } - - public void setQuery(String query) { - this.query = query; - } - - public String getTenantId() { - return tenantId; - } - - public void setTenantId(String tenantId) { - this.tenantId = tenantId; - } - - public String getLabel() { - return label; - } - - public String getLabelAsHTML() { - return "" + label - + "
    "; - } - - public void setLabel(String label) { - this.label = label; - } - - public long getRowCount() { - return rowCount; - } - public String getRowCountAsHTML() { - return "RESULT ROW COUNT " - + rowCount + "
    "; - } - - public void setRowCount(long rowCount) { - this.rowCount = rowCount; - } - } - - static class StaticGoogleChartsRenderingData { - public static String HEADER = "[title]" - + "" - + "PHERFED [title]
    [summary]

    "; + } + timeToCompare = resetTimeToCompare; + } + return true; + } + + /** + * Render results as Google charts + */ + private void renderAsGoogleChartsHTML() + throws FileNotFoundException, UnsupportedEncodingException { + String lastKeyPrefix = ""; + StringBuffer sb = new StringBuffer(); + for (String label : labels) { + sb.append("dataTable.addColumn('number', '" + label + "');\n"); + sb.append("dataTable.addColumn({type: 'string', role: 'tooltip', 'p': {'html': true}});\n"); + } + sb.append("dataTable.addRows([\n"); + for (Map.Entry dn : datanodes.entrySet()) { + String currentKeyPrefix = dn.getKey().substring(0, dn.getKey().indexOf('|')); + if (!lastKeyPrefix.equalsIgnoreCase(currentKeyPrefix) && lastKeyPrefix != "") { + sb.append(getBlankRow()); + } + lastKeyPrefix = currentKeyPrefix; + sb.append("['" + dn.getKey() + "'"); + for (Map.Entry nodeSet : dn.getValue().getDataSet().entrySet()) { + sb.append(", " + nodeSet.getValue().getTime(getCompareType())); + sb.append(",'" + getToolTipAsHTML(dn.getValue().getDataSet()) + "'"); + } + sb.append("],\n"); + } + String summaryFile = PherfConstants.create().getProperty("pherf.default.summary.file"); + String title = labels[0]; + PrintWriter writer = new PrintWriter(summaryFile, "UTF-8"); + + writer.println(StaticGoogleChartsRenderingData.HEADER.replace("[title]", title)); + writer.println(sb.substring(0, sb.length() - 2) + "\n]);"); + String thresholdString = Math.round((threshold * 100)) + "%"; + String footer = StaticGoogleChartsRenderingData.FOOTER.replace("[summary]", + ((verifyWithinThreshold(threshold) == true + ? "PASSED | Results are within " + : "FAILED | Results are outside ")) + "set threshold of " + thresholdString + + "
    " + new SimpleDateFormat("yyyy/MM/dd ha z").format(new Date())); + footer = footer.replace("[title]", title); + writer.println(footer); + writer.close(); + } + + /** + * Render a blank Google charts row + */ + private String getBlankRow() { + String ret = "['" + new String(new char[60]).replace("\0", ".") + "'"; + for (int i = 0; i < labels.length; i++) + ret += ",0,''"; + ret += "],"; + return ret; + } + + /** + * Render tooltip as HTML table + */ + private String getToolTipAsHTML(Map nodeDataSet) { + String ret = ""; + for (Map.Entry nodeSet : nodeDataSet.entrySet()) + ret += ""; + return ret + "
    " + getToolText(nodeSet.getValue()) + "
    "; + } + + /** + * Get tooltip for node + */ + private String getToolText(Node node) { + return node.getLabelAsHTML() + node.getAvgTimeAsHTML() + node.getMinTimeAsHTML() + + node.getNumRunsAsHTML() + node.getRowCountAsHTML() + node.getExplainPlanAsHTML() + + node.getQueryAsHTML(); + } + + /** + * DataNode to store results to render and compare + */ + class DataNode { + private Map dataSet = new LinkedHashMap(); + + public DataNode(String label, Node node) { + this.getDataSet().put(label, node); + } + + public Map getDataSet() { + return dataSet; + } + + public void setDataSet(Map dataSet) { + this.dataSet = dataSet; + } + } + + class Node { + private String explainPlan; + private String query; + private String tenantId; + private long minTime; + private long avgTime; + private long numRuns; + private long rowCount; + private String label; + private DecimalFormat df = new DecimalFormat("#.#"); + + public Node(long minTime, long avgTime, long numRuns, String explainPlan, String query, + String tenantId, String label, long rowCount) { + this.setMinTime(minTime); + this.setAvgTime(avgTime); + this.setNumRuns(numRuns); + this.setExplainPlan(explainPlan); + this.setQuery(query); + this.setTenantId(tenantId); + this.setLabel(label); + this.setRowCount(rowCount); + } + + String getExplainPlan() { + return explainPlan; + } + + String getExplainPlanAsHTML() { + return "
    EXPLAIN PLAN " + explainPlan.replace("'", "") + + "
    "; + } + + void setExplainPlan(String explainPlan) { + this.explainPlan = explainPlan; + } + + long getTime(CompareType compareType) { + return (compareType == CompareType.AVERAGE ? getAvgTime() : getMinTime()); } + + long getMinTime() { + if (minTime <= 2) return 2; + else return minTime; + } + + public String getMinTimeAsHTML() { + return "MIN TIME " + minTime + + " ms (" + df.format((double) minTime / 1000) + " sec)
    "; + } + + void setMinTime(long minTime) { + this.minTime = minTime; + } + + long getAvgTime() { + return avgTime; + } + + public String getAvgTimeAsHTML() { + return "AVERAGE TIME " + avgTime + + " ms (" + df.format((double) avgTime / 1000) + " sec)
    "; + } + + void setAvgTime(long avgTime) { + this.avgTime = avgTime; + } + + public long getNumRuns() { + return numRuns; + } + + public String getNumRunsAsHTML() { + return "NUMBER OF RUNS " + + numRuns + "
    "; + } + + public void setNumRuns(long numRuns) { + this.numRuns = numRuns; + } + + public String getQuery() { + return query; + } + + public String getQueryAsHTML() { + return "
    QUERY " + query.replace("'", "") + " (TENANT ID: " + + getTenantId() + ")
    "; + } + + public void setQuery(String query) { + this.query = query; + } + + public String getTenantId() { + return tenantId; + } + + public void setTenantId(String tenantId) { + this.tenantId = tenantId; + } + + public String getLabel() { + return label; + } + + public String getLabelAsHTML() { + return "" + label + "
    "; + } + + public void setLabel(String label) { + this.label = label; + } + + public long getRowCount() { + return rowCount; + } + + public String getRowCountAsHTML() { + return "RESULT ROW COUNT " + + rowCount + "
    "; + } + + public void setRowCount(long rowCount) { + this.rowCount = rowCount; + } + } + + static class StaticGoogleChartsRenderingData { + public static String HEADER = "[title]" + + "" + + "PHERFED [title]
    [summary]
    "; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java index 5125611e8ff..ae7563d368d 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java @@ -1,41 +1,24 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.util; -import com.google.gson.Gson; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.phoenix.coprocessor.TaskRegionObserver; -import org.apache.phoenix.mapreduce.index.automation.PhoenixMRJobSubmitter; -import org.apache.phoenix.pherf.PherfConstants; -import org.apache.phoenix.pherf.configuration.Column; -import org.apache.phoenix.pherf.configuration.DataTypeMapping; -import org.apache.phoenix.pherf.configuration.Ddl; -import org.apache.phoenix.pherf.configuration.Query; -import org.apache.phoenix.pherf.configuration.QuerySet; -import org.apache.phoenix.pherf.configuration.Scenario; -import org.apache.phoenix.pherf.result.DataLoadTimeSummary; -import org.apache.phoenix.pherf.rules.DataValue; -import org.apache.phoenix.pherf.rules.RulesApplier; -import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.util.EnvironmentEdgeManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME; +import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM; import java.math.BigDecimal; import java.sql.Array; @@ -57,609 +40,584 @@ import java.util.Properties; import java.util.Set; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME; -import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM; - -public class PhoenixUtil { - public static final String ASYNC_KEYWORD = "ASYNC"; - public static final Gson GSON = new Gson(); - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixUtil.class); - private static String zookeeper; - private static int rowCountOverride = 0; - private boolean testEnabled; - private static PhoenixUtil instance; - private static boolean useThinDriver; - private static String queryServerUrl; - private static final int ONE_MIN_IN_MS = 60000; - private static String CurrentSCN = null; - - private PhoenixUtil() { - this(false); - } - - private PhoenixUtil(final boolean testEnabled) { - this.testEnabled = testEnabled; - } - - public static PhoenixUtil create() { - return create(false); - } - - public static PhoenixUtil create(final boolean testEnabled) { - instance = instance != null ? instance : new PhoenixUtil(testEnabled); - return instance; - } - - public static void useThinDriver(String queryServerUrl) { - PhoenixUtil.useThinDriver = true; - PhoenixUtil.queryServerUrl = Objects.requireNonNull(queryServerUrl); - } - - public static String getQueryServerUrl() { - return PhoenixUtil.queryServerUrl; - } - - public static boolean isThinDriver() { - return PhoenixUtil.useThinDriver; - } - - public static Gson getGSON() { - return GSON; - } - - public Connection getConnection() throws Exception { - return getConnection(null); - } - - public Connection getConnection(String tenantId) throws Exception { - return getConnection(tenantId, testEnabled, null); - } - - public Connection getConnection(String tenantId, - Properties properties) throws Exception { - Map propertyHashMap = getPropertyHashMap(properties); - return getConnection(tenantId, testEnabled, propertyHashMap); - } - - public Connection getConnection(String tenantId, - Map propertyHashMap) throws Exception { - return getConnection(tenantId, testEnabled, propertyHashMap); - } - - public Connection getConnection(String tenantId, boolean testEnabled, - Map propertyHashMap) throws Exception { - if (useThinDriver) { - if (null == queryServerUrl) { - throw new IllegalArgumentException("QueryServer URL must be set before" + - " initializing connection"); - } - Properties props = new Properties(); - if (null != tenantId) { - props.setProperty("TenantId", tenantId); - LOGGER.debug("\nSetting tenantId to " + tenantId); - } - String url = "jdbc:phoenix:thin:url=" + queryServerUrl + ";serialization=PROTOBUF"; - return DriverManager.getConnection(url, props); - } else { - if (null == zookeeper) { - throw new IllegalArgumentException( - "Zookeeper must be set before initializing connection!"); - } - Properties props = new Properties(); - if (null != tenantId) { - props.setProperty("TenantId", tenantId); - LOGGER.debug("\nSetting tenantId to " + tenantId); - } - - if (propertyHashMap != null) { - for (Map.Entry phxProperty: propertyHashMap.entrySet()) { - props.setProperty(phxProperty.getKey(), phxProperty.getValue()); - LOGGER.debug("Setting connection property " - + phxProperty.getKey() + " to " - + phxProperty.getValue()); - } - } - - String url = "jdbc:phoenix:" + zookeeper + (testEnabled ? ";test=true" : ""); - return DriverManager.getConnection(url, props); - } - } - - private Map getPropertyHashMap(Properties props) { - Map propsMaps = new HashMap<>(); - for (String prop : props.stringPropertyNames()) { - propsMaps.put(prop, props.getProperty(prop)); - } - return propsMaps; - } - - public boolean executeStatement(String sql, Scenario scenario) throws Exception { - Connection connection = null; - boolean result = false; - try { - connection = getConnection(scenario.getTenantId()); - result = executeStatement(sql, connection); - } finally { - if (connection != null) { - connection.close(); - } - } - return result; - } - - /** - * Execute statement - * - * @param sql - * @param connection - * @return - * @throws SQLException - */ - public boolean executeStatementThrowException(String sql, Connection connection) - throws SQLException { - boolean result = false; - PreparedStatement preparedStatement = null; - try { - preparedStatement = connection.prepareStatement(sql); - result = preparedStatement.execute(); - connection.commit(); - } finally { - if(preparedStatement != null) { - preparedStatement.close(); - } - } - return result; - } - - public boolean executeStatement(String sql, Connection connection) throws SQLException{ - boolean result = false; - PreparedStatement preparedStatement = null; - try { - preparedStatement = connection.prepareStatement(sql); - result = preparedStatement.execute(); - connection.commit(); - } finally { - try { - if (preparedStatement != null) { - preparedStatement.close(); - } - } catch (SQLException e) { - e.printStackTrace(); - } - } - return result; - } - - @SuppressWarnings("unused") - public boolean executeStatement(PreparedStatement preparedStatement, Connection connection) { - boolean result = false; - try { - result = preparedStatement.execute(); - connection.commit(); - } catch (SQLException e) { - e.printStackTrace(); - } - return result; - } - - /** - * Delete existing tables with schema name set as {@link PherfConstants#PHERF_SCHEMA_NAME} with regex comparison - * - * @param regexMatch - * @throws SQLException - * @throws Exception - */ - public void deleteTables(String regexMatch) throws Exception { - regexMatch = regexMatch.toUpperCase().replace("ALL", ".*"); - Connection conn = getConnection(); - try { - ResultSet resultSet = getTableMetaData(PherfConstants.PHERF_SCHEMA_NAME, null, conn); - while (resultSet.next()) { - String tableName = resultSet.getString(TABLE_SCHEM) == null ? resultSet - .getString(TABLE_NAME) : resultSet - .getString(TABLE_SCHEM) - + "." - + resultSet.getString(TABLE_NAME); - if (tableName.matches(regexMatch)) { - LOGGER.info("\nDropping " + tableName); - try { - executeStatementThrowException("DROP TABLE " - + tableName + " CASCADE", conn); - } catch (org.apache.phoenix.schema.TableNotFoundException tnf) { - LOGGER.error("Table might be already be deleted via cascade. Schema: " - + tnf.getSchemaName() - + " Table: " - + tnf.getTableName()); - } - } - } - } finally { - conn.close(); - } - } - - public void dropChildView(RegionCoprocessorEnvironment taskRegionEnvironment, int depth) { - TaskRegionObserver.SelfHealingTask task = - new TaskRegionObserver.SelfHealingTask( - taskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS); - for (int i = 0; i < depth; i++) { - task.run(); - } - } - - public ResultSet getTableMetaData(String schemaName, String tableName, Connection connection) - throws SQLException { - DatabaseMetaData dbmd = connection.getMetaData(); - ResultSet resultSet = dbmd.getTables(null, schemaName, tableName, null); - return resultSet; - } +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.phoenix.coprocessor.TaskRegionObserver; +import org.apache.phoenix.mapreduce.index.automation.PhoenixMRJobSubmitter; +import org.apache.phoenix.pherf.PherfConstants; +import org.apache.phoenix.pherf.configuration.Column; +import org.apache.phoenix.pherf.configuration.DataTypeMapping; +import org.apache.phoenix.pherf.configuration.Ddl; +import org.apache.phoenix.pherf.configuration.Query; +import org.apache.phoenix.pherf.configuration.QuerySet; +import org.apache.phoenix.pherf.configuration.Scenario; +import org.apache.phoenix.pherf.result.DataLoadTimeSummary; +import org.apache.phoenix.pherf.rules.DataValue; +import org.apache.phoenix.pherf.rules.RulesApplier; +import org.apache.phoenix.query.QueryServicesOptions; +import org.apache.phoenix.util.EnvironmentEdgeManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; - public ResultSet getColumnsMetaData(String schemaName, String tableName, Connection connection) - throws SQLException { - DatabaseMetaData dbmd = connection.getMetaData(); - ResultSet resultSet = dbmd.getColumns(null, schemaName.toUpperCase(), tableName.toUpperCase(), null); - return resultSet; - } +import com.google.gson.Gson; - public synchronized List getColumnsFromPhoenix(String schemaName, String tableName, - Connection connection) throws SQLException { - List columnList = new ArrayList<>(); - ResultSet resultSet = null; - try { - resultSet = getColumnsMetaData(schemaName, tableName, connection); - while (resultSet.next()) { - Column column = new Column(); - column.setName(resultSet.getString("COLUMN_NAME")); - column.setType(DataTypeMapping.valueOf(resultSet.getString("TYPE_NAME").replace(" ", "_"))); - column.setLength(resultSet.getInt("COLUMN_SIZE")); - columnList.add(column); - LOGGER.debug(String.format("getColumnsMetaData for column name : %s", column.getName())); - } - } finally { - if (null != resultSet) { - resultSet.close(); - } +public class PhoenixUtil { + public static final String ASYNC_KEYWORD = "ASYNC"; + public static final Gson GSON = new Gson(); + private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixUtil.class); + private static String zookeeper; + private static int rowCountOverride = 0; + private boolean testEnabled; + private static PhoenixUtil instance; + private static boolean useThinDriver; + private static String queryServerUrl; + private static final int ONE_MIN_IN_MS = 60000; + private static String CurrentSCN = null; + + private PhoenixUtil() { + this(false); + } + + private PhoenixUtil(final boolean testEnabled) { + this.testEnabled = testEnabled; + } + + public static PhoenixUtil create() { + return create(false); + } + + public static PhoenixUtil create(final boolean testEnabled) { + instance = instance != null ? instance : new PhoenixUtil(testEnabled); + return instance; + } + + public static void useThinDriver(String queryServerUrl) { + PhoenixUtil.useThinDriver = true; + PhoenixUtil.queryServerUrl = Objects.requireNonNull(queryServerUrl); + } + + public static String getQueryServerUrl() { + return PhoenixUtil.queryServerUrl; + } + + public static boolean isThinDriver() { + return PhoenixUtil.useThinDriver; + } + + public static Gson getGSON() { + return GSON; + } + + public Connection getConnection() throws Exception { + return getConnection(null); + } + + public Connection getConnection(String tenantId) throws Exception { + return getConnection(tenantId, testEnabled, null); + } + + public Connection getConnection(String tenantId, Properties properties) throws Exception { + Map propertyHashMap = getPropertyHashMap(properties); + return getConnection(tenantId, testEnabled, propertyHashMap); + } + + public Connection getConnection(String tenantId, Map propertyHashMap) + throws Exception { + return getConnection(tenantId, testEnabled, propertyHashMap); + } + + public Connection getConnection(String tenantId, boolean testEnabled, + Map propertyHashMap) throws Exception { + if (useThinDriver) { + if (null == queryServerUrl) { + throw new IllegalArgumentException( + "QueryServer URL must be set before" + " initializing connection"); + } + Properties props = new Properties(); + if (null != tenantId) { + props.setProperty("TenantId", tenantId); + LOGGER.debug("\nSetting tenantId to " + tenantId); + } + String url = "jdbc:phoenix:thin:url=" + queryServerUrl + ";serialization=PROTOBUF"; + return DriverManager.getConnection(url, props); + } else { + if (null == zookeeper) { + throw new IllegalArgumentException("Zookeeper must be set before initializing connection!"); + } + Properties props = new Properties(); + if (null != tenantId) { + props.setProperty("TenantId", tenantId); + LOGGER.debug("\nSetting tenantId to " + tenantId); + } + + if (propertyHashMap != null) { + for (Map.Entry phxProperty : propertyHashMap.entrySet()) { + props.setProperty(phxProperty.getKey(), phxProperty.getValue()); + LOGGER.debug("Setting connection property " + phxProperty.getKey() + " to " + + phxProperty.getValue()); } - - return Collections.unmodifiableList(columnList); - } - - /** - * Execute all querySet DDLs first based on tenantId if specified. This is executed - * first since we don't want to run DDLs in parallel to executing queries. - * - * @param querySet - * @throws Exception - */ - public void executeQuerySetDdls(QuerySet querySet) throws Exception { - for (Query query : querySet.getQuery()) { - if (null != query.getDdl()) { - Connection conn = null; - try { - LOGGER.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query - .getTenantId()); - executeStatement(query.getDdl(), - conn = getConnection(query.getTenantId())); - } finally { - if (null != conn) { - conn.close(); - } - } - } + } + + String url = "jdbc:phoenix:" + zookeeper + (testEnabled ? ";test=true" : ""); + return DriverManager.getConnection(url, props); + } + } + + private Map getPropertyHashMap(Properties props) { + Map propsMaps = new HashMap<>(); + for (String prop : props.stringPropertyNames()) { + propsMaps.put(prop, props.getProperty(prop)); + } + return propsMaps; + } + + public boolean executeStatement(String sql, Scenario scenario) throws Exception { + Connection connection = null; + boolean result = false; + try { + connection = getConnection(scenario.getTenantId()); + result = executeStatement(sql, connection); + } finally { + if (connection != null) { + connection.close(); + } + } + return result; + } + + /** + * Execute statement + */ + public boolean executeStatementThrowException(String sql, Connection connection) + throws SQLException { + boolean result = false; + PreparedStatement preparedStatement = null; + try { + preparedStatement = connection.prepareStatement(sql); + result = preparedStatement.execute(); + connection.commit(); + } finally { + if (preparedStatement != null) { + preparedStatement.close(); + } + } + return result; + } + + public boolean executeStatement(String sql, Connection connection) throws SQLException { + boolean result = false; + PreparedStatement preparedStatement = null; + try { + preparedStatement = connection.prepareStatement(sql); + result = preparedStatement.execute(); + connection.commit(); + } finally { + try { + if (preparedStatement != null) { + preparedStatement.close(); } - } - - /** - * Executes any ddl defined at the scenario level. This is executed before we commence - * the data load. - * - * @throws Exception - */ - public void executeScenarioDdl(List ddls, String tenantId, DataLoadTimeSummary dataLoadTimeSummary) throws Exception { - if (null != ddls) { - Connection conn = null; - try { - for (Ddl ddl : ddls) { - LOGGER.info("\nExecuting DDL:" + ddl + " on tenantId:" +tenantId); - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - executeStatement(ddl.toString(), conn = getConnection(tenantId)); - if (ddl.getStatement().toUpperCase().contains(ASYNC_KEYWORD)) { - waitForAsyncIndexToFinish(ddl.getTableName()); - } - dataLoadTimeSummary.add(ddl.getTableName(), 0, - (int)(EnvironmentEdgeManager.currentTimeMillis() - startTime)); - } - } finally { - if (null != conn) { - conn.close(); - } - } + } catch (SQLException e) { + e.printStackTrace(); + } + } + return result; + } + + @SuppressWarnings("unused") + public boolean executeStatement(PreparedStatement preparedStatement, Connection connection) { + boolean result = false; + try { + result = preparedStatement.execute(); + connection.commit(); + } catch (SQLException e) { + e.printStackTrace(); + } + return result; + } + + /** + * Delete existing tables with schema name set as {@link PherfConstants#PHERF_SCHEMA_NAME} with + * regex comparison + */ + public void deleteTables(String regexMatch) throws Exception { + regexMatch = regexMatch.toUpperCase().replace("ALL", ".*"); + Connection conn = getConnection(); + try { + ResultSet resultSet = getTableMetaData(PherfConstants.PHERF_SCHEMA_NAME, null, conn); + while (resultSet.next()) { + String tableName = resultSet.getString(TABLE_SCHEM) == null + ? resultSet.getString(TABLE_NAME) + : resultSet.getString(TABLE_SCHEM) + "." + resultSet.getString(TABLE_NAME); + if (tableName.matches(regexMatch)) { + LOGGER.info("\nDropping " + tableName); + try { + executeStatementThrowException("DROP TABLE " + tableName + " CASCADE", conn); + } catch (org.apache.phoenix.schema.TableNotFoundException tnf) { + LOGGER.error("Table might be already be deleted via cascade. Schema: " + + tnf.getSchemaName() + " Table: " + tnf.getTableName()); + } } - } - - /** - * Waits for ASYNC index to build - * @param tableName - * @throws InterruptedException - */ - public void waitForAsyncIndexToFinish(String tableName) throws InterruptedException { - //Wait for up to 15 mins for ASYNC index build to start - boolean jobStarted = false; - for (int i=0; i<15; i++) { - if (isYarnJobInProgress(tableName)) { - jobStarted = true; - break; - } - Thread.sleep(ONE_MIN_IN_MS); - } - if (jobStarted == false) { - throw new IllegalStateException("ASYNC index build did not start within 15 mins"); - } - - // Wait till ASYNC index job finishes to get approximate job E2E time - for (;;) { - if (!isYarnJobInProgress(tableName)) - break; - Thread.sleep(ONE_MIN_IN_MS); - } - } - - /** - * Checks if a YARN job with the specific table name is in progress - * @param tableName - * @return - */ - boolean isYarnJobInProgress(String tableName) { - try { - LOGGER.info("Fetching YARN apps..."); - Set response = new PhoenixMRJobSubmitter().getSubmittedYarnApps(); - for (String str : response) { - LOGGER.info("Runnng YARN app: " + str); - if (str.toUpperCase().contains(tableName.toUpperCase())) { - return true; - } - } - } catch (Exception e) { - e.printStackTrace(); - } - - return false; - } - - public static String getZookeeper() { - return zookeeper; - } - - public static void setZookeeper(String zookeeper) { - LOGGER.info("Setting zookeeper: " + zookeeper); - useThickDriver(zookeeper); - } - - public static void useThickDriver(String zookeeper) { - PhoenixUtil.useThinDriver = false; - PhoenixUtil.zookeeper = Objects.requireNonNull(zookeeper); - } - - public static int getRowCountOverride() { - return rowCountOverride; - } - - public static void setRowCountOverride(int rowCountOverride) { - PhoenixUtil.rowCountOverride = rowCountOverride; - } - - /** - * Update Phoenix table stats - * - * @param tableName - * @throws Exception - */ - public void updatePhoenixStats(String tableName, Scenario scenario) throws Exception { - LOGGER.info("Updating stats for " + tableName); - executeStatement("UPDATE STATISTICS " + tableName, scenario); - } - - public String getExplainPlan(Query query) throws SQLException { - return getExplainPlan(query, null, null); - } - - /** - * Get explain plan for a query - * - * @param query - * @param ruleApplier - * @param scenario - * @return - * @throws SQLException - */ - public String getExplainPlan(Query query, Scenario scenario, RulesApplier ruleApplier) throws SQLException { + } + } finally { + conn.close(); + } + } + + public void dropChildView(RegionCoprocessorEnvironment taskRegionEnvironment, int depth) { + TaskRegionObserver.SelfHealingTask task = new TaskRegionObserver.SelfHealingTask( + taskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS); + for (int i = 0; i < depth; i++) { + task.run(); + } + } + + public ResultSet getTableMetaData(String schemaName, String tableName, Connection connection) + throws SQLException { + DatabaseMetaData dbmd = connection.getMetaData(); + ResultSet resultSet = dbmd.getTables(null, schemaName, tableName, null); + return resultSet; + } + + public ResultSet getColumnsMetaData(String schemaName, String tableName, Connection connection) + throws SQLException { + DatabaseMetaData dbmd = connection.getMetaData(); + ResultSet resultSet = + dbmd.getColumns(null, schemaName.toUpperCase(), tableName.toUpperCase(), null); + return resultSet; + } + + public synchronized List getColumnsFromPhoenix(String schemaName, String tableName, + Connection connection) throws SQLException { + List columnList = new ArrayList<>(); + ResultSet resultSet = null; + try { + resultSet = getColumnsMetaData(schemaName, tableName, connection); + while (resultSet.next()) { + Column column = new Column(); + column.setName(resultSet.getString("COLUMN_NAME")); + column.setType(DataTypeMapping.valueOf(resultSet.getString("TYPE_NAME").replace(" ", "_"))); + column.setLength(resultSet.getInt("COLUMN_SIZE")); + columnList.add(column); + LOGGER.debug(String.format("getColumnsMetaData for column name : %s", column.getName())); + } + } finally { + if (null != resultSet) { + resultSet.close(); + } + } + + return Collections.unmodifiableList(columnList); + } + + /** + * Execute all querySet DDLs first based on tenantId if specified. This is executed first since we + * don't want to run DDLs in parallel to executing queries. + */ + public void executeQuerySetDdls(QuerySet querySet) throws Exception { + for (Query query : querySet.getQuery()) { + if (null != query.getDdl()) { Connection conn = null; - ResultSet rs = null; - PreparedStatement statement = null; - StringBuilder buf = new StringBuilder(); try { - conn = getConnection(query.getTenantId()); - String explainQuery; - if (scenario != null && ruleApplier != null) { - explainQuery = query.getDynamicStatement(ruleApplier, scenario); - } - else { - explainQuery = query.getStatement(); - } - - statement = conn.prepareStatement("EXPLAIN " + explainQuery); - rs = statement.executeQuery(); - while (rs.next()) { - buf.append(rs.getString(1).trim().replace(",", "-")); - } - statement.close(); - } catch (Exception e) { - e.printStackTrace(); + LOGGER.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query.getTenantId()); + executeStatement(query.getDdl(), conn = getConnection(query.getTenantId())); } finally { - if (rs != null) rs.close(); - if (statement != null) statement.close(); - if (conn != null) conn.close(); - } - return buf.toString(); - } - - public PreparedStatement buildStatement(RulesApplier rulesApplier, Scenario scenario, List columns, - PreparedStatement statement, SimpleDateFormat simpleDateFormat) throws Exception { - - int count = 1; - for (Column column : columns) { - DataValue dataValue = rulesApplier.getDataForRule(scenario, column); - switch (column.getType()) { - case VARCHAR: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.VARCHAR); - } else { - statement.setString(count, dataValue.getValue()); - } - break; - case JSON: - case BSON: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.VARBINARY); - } else { - statement.setString(count, dataValue.getValue()); - } - break; - case CHAR: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.CHAR); - } else { - statement.setString(count, dataValue.getValue()); - } - break; - case DECIMAL: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.DECIMAL); - } else { - statement.setBigDecimal(count, new BigDecimal(dataValue.getValue())); - } - break; - case INTEGER: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.INTEGER); - } else { - statement.setInt(count, Integer.parseInt(dataValue.getValue())); - } - break; - case UNSIGNED_LONG: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.OTHER); - } else { - statement.setLong(count, Long.parseLong(dataValue.getValue())); - } - break; - case BIGINT: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.BIGINT); - } else { - statement.setLong(count, Long.parseLong(dataValue.getValue())); - } - break; - case TINYINT: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.TINYINT); - } else { - statement.setLong(count, Integer.parseInt(dataValue.getValue())); - } - break; - case DATE: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.DATE); - } else { - Date - date = - new java.sql.Date(simpleDateFormat.parse(dataValue.getValue()).getTime()); - statement.setDate(count, date); - } - break; - case VARCHAR_ARRAY: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.ARRAY); - } else { - Array - arr = - statement.getConnection().createArrayOf("VARCHAR", dataValue.getValue().split(",")); - statement.setArray(count, arr); - } - break; - case VARBINARY: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.VARBINARY); - } else { - statement.setBytes(count, dataValue.getValue().getBytes()); - } - break; - case TIMESTAMP: - if (dataValue.getValue().equals("")) { - statement.setNull(count, Types.TIMESTAMP); - } else { - java.sql.Timestamp - ts = - new java.sql.Timestamp(simpleDateFormat.parse(dataValue.getValue()).getTime()); - statement.setTimestamp(count, ts); - } - break; - default: - break; - } - count++; + if (null != conn) { + conn.close(); + } } - return statement; - } - - public String buildSql(final List columns, final String tableName) { - StringBuilder builder = new StringBuilder(); - builder.append("upsert into "); - builder.append(tableName); - builder.append(" ("); - int count = 1; - for (Column column : columns) { - builder.append(column.getName()); - if (count < columns.size()) { - builder.append(","); - } else { - builder.append(")"); - } - count++; + } + } + } + + /** + * Executes any ddl defined at the scenario level. This is executed before we commence the data + * load. + */ + public void executeScenarioDdl(List ddls, String tenantId, + DataLoadTimeSummary dataLoadTimeSummary) throws Exception { + if (null != ddls) { + Connection conn = null; + try { + for (Ddl ddl : ddls) { + LOGGER.info("\nExecuting DDL:" + ddl + " on tenantId:" + tenantId); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + executeStatement(ddl.toString(), conn = getConnection(tenantId)); + if (ddl.getStatement().toUpperCase().contains(ASYNC_KEYWORD)) { + waitForAsyncIndexToFinish(ddl.getTableName()); + } + dataLoadTimeSummary.add(ddl.getTableName(), 0, + (int) (EnvironmentEdgeManager.currentTimeMillis() - startTime)); } - builder.append(" VALUES ("); - for (int i = 0; i < columns.size(); i++) { - if (i < columns.size() - 1) { - builder.append("?,"); - } else { - builder.append("?)"); - } + } finally { + if (null != conn) { + conn.close(); } - return builder.toString(); - } - - public org.apache.hadoop.hbase.util.Pair getResults( - Query query, - ResultSet rs, - String queryIteration, - boolean isSelectCountStatement, - Long queryStartTime) throws Exception { - - Long resultRowCount = 0L; - while (rs.next()) { - if (isSelectCountStatement) { - resultRowCount = rs.getLong(1); - } else { - resultRowCount++; - } - long queryElapsedTime = EnvironmentEdgeManager.currentTimeMillis() - queryStartTime; - if (queryElapsedTime >= query.getTimeoutDuration()) { - LOGGER.error("Query " + queryIteration + " exceeded timeout of " - + query.getTimeoutDuration() + " ms at " + queryElapsedTime + " ms."); - return new org.apache.hadoop.hbase.util.Pair(resultRowCount, queryElapsedTime); - } + } + } + } + + /** + * Waits for ASYNC index to build + */ + public void waitForAsyncIndexToFinish(String tableName) throws InterruptedException { + // Wait for up to 15 mins for ASYNC index build to start + boolean jobStarted = false; + for (int i = 0; i < 15; i++) { + if (isYarnJobInProgress(tableName)) { + jobStarted = true; + break; + } + Thread.sleep(ONE_MIN_IN_MS); + } + if (jobStarted == false) { + throw new IllegalStateException("ASYNC index build did not start within 15 mins"); + } + + // Wait till ASYNC index job finishes to get approximate job E2E time + for (;;) { + if (!isYarnJobInProgress(tableName)) break; + Thread.sleep(ONE_MIN_IN_MS); + } + } + + /** + * Checks if a YARN job with the specific table name is in progress + */ + boolean isYarnJobInProgress(String tableName) { + try { + LOGGER.info("Fetching YARN apps..."); + Set response = new PhoenixMRJobSubmitter().getSubmittedYarnApps(); + for (String str : response) { + LOGGER.info("Runnng YARN app: " + str); + if (str.toUpperCase().contains(tableName.toUpperCase())) { + return true; } - return new org.apache.hadoop.hbase.util.Pair(resultRowCount, EnvironmentEdgeManager.currentTimeMillis() - queryStartTime); - } + } + } catch (Exception e) { + e.printStackTrace(); + } + + return false; + } + + public static String getZookeeper() { + return zookeeper; + } + + public static void setZookeeper(String zookeeper) { + LOGGER.info("Setting zookeeper: " + zookeeper); + useThickDriver(zookeeper); + } + + public static void useThickDriver(String zookeeper) { + PhoenixUtil.useThinDriver = false; + PhoenixUtil.zookeeper = Objects.requireNonNull(zookeeper); + } + + public static int getRowCountOverride() { + return rowCountOverride; + } + + public static void setRowCountOverride(int rowCountOverride) { + PhoenixUtil.rowCountOverride = rowCountOverride; + } + + /** + * Update Phoenix table stats + */ + public void updatePhoenixStats(String tableName, Scenario scenario) throws Exception { + LOGGER.info("Updating stats for " + tableName); + executeStatement("UPDATE STATISTICS " + tableName, scenario); + } + + public String getExplainPlan(Query query) throws SQLException { + return getExplainPlan(query, null, null); + } + + /** + * Get explain plan for a query + */ + public String getExplainPlan(Query query, Scenario scenario, RulesApplier ruleApplier) + throws SQLException { + Connection conn = null; + ResultSet rs = null; + PreparedStatement statement = null; + StringBuilder buf = new StringBuilder(); + try { + conn = getConnection(query.getTenantId()); + String explainQuery; + if (scenario != null && ruleApplier != null) { + explainQuery = query.getDynamicStatement(ruleApplier, scenario); + } else { + explainQuery = query.getStatement(); + } + + statement = conn.prepareStatement("EXPLAIN " + explainQuery); + rs = statement.executeQuery(); + while (rs.next()) { + buf.append(rs.getString(1).trim().replace(",", "-")); + } + statement.close(); + } catch (Exception e) { + e.printStackTrace(); + } finally { + if (rs != null) rs.close(); + if (statement != null) statement.close(); + if (conn != null) conn.close(); + } + return buf.toString(); + } + + public PreparedStatement buildStatement(RulesApplier rulesApplier, Scenario scenario, + List columns, PreparedStatement statement, SimpleDateFormat simpleDateFormat) + throws Exception { + + int count = 1; + for (Column column : columns) { + DataValue dataValue = rulesApplier.getDataForRule(scenario, column); + switch (column.getType()) { + case VARCHAR: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.VARCHAR); + } else { + statement.setString(count, dataValue.getValue()); + } + break; + case JSON: + case BSON: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.VARBINARY); + } else { + statement.setString(count, dataValue.getValue()); + } + break; + case CHAR: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.CHAR); + } else { + statement.setString(count, dataValue.getValue()); + } + break; + case DECIMAL: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.DECIMAL); + } else { + statement.setBigDecimal(count, new BigDecimal(dataValue.getValue())); + } + break; + case INTEGER: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.INTEGER); + } else { + statement.setInt(count, Integer.parseInt(dataValue.getValue())); + } + break; + case UNSIGNED_LONG: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.OTHER); + } else { + statement.setLong(count, Long.parseLong(dataValue.getValue())); + } + break; + case BIGINT: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.BIGINT); + } else { + statement.setLong(count, Long.parseLong(dataValue.getValue())); + } + break; + case TINYINT: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.TINYINT); + } else { + statement.setLong(count, Integer.parseInt(dataValue.getValue())); + } + break; + case DATE: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.DATE); + } else { + Date date = new java.sql.Date(simpleDateFormat.parse(dataValue.getValue()).getTime()); + statement.setDate(count, date); + } + break; + case VARCHAR_ARRAY: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.ARRAY); + } else { + Array arr = + statement.getConnection().createArrayOf("VARCHAR", dataValue.getValue().split(",")); + statement.setArray(count, arr); + } + break; + case VARBINARY: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.VARBINARY); + } else { + statement.setBytes(count, dataValue.getValue().getBytes()); + } + break; + case TIMESTAMP: + if (dataValue.getValue().equals("")) { + statement.setNull(count, Types.TIMESTAMP); + } else { + java.sql.Timestamp ts = + new java.sql.Timestamp(simpleDateFormat.parse(dataValue.getValue()).getTime()); + statement.setTimestamp(count, ts); + } + break; + default: + break; + } + count++; + } + return statement; + } + + public String buildSql(final List columns, final String tableName) { + StringBuilder builder = new StringBuilder(); + builder.append("upsert into "); + builder.append(tableName); + builder.append(" ("); + int count = 1; + for (Column column : columns) { + builder.append(column.getName()); + if (count < columns.size()) { + builder.append(","); + } else { + builder.append(")"); + } + count++; + } + builder.append(" VALUES ("); + for (int i = 0; i < columns.size(); i++) { + if (i < columns.size() - 1) { + builder.append("?,"); + } else { + builder.append("?)"); + } + } + return builder.toString(); + } + + public org.apache.hadoop.hbase.util.Pair getResults(Query query, ResultSet rs, + String queryIteration, boolean isSelectCountStatement, Long queryStartTime) throws Exception { + + Long resultRowCount = 0L; + while (rs.next()) { + if (isSelectCountStatement) { + resultRowCount = rs.getLong(1); + } else { + resultRowCount++; + } + long queryElapsedTime = EnvironmentEdgeManager.currentTimeMillis() - queryStartTime; + if (queryElapsedTime >= query.getTimeoutDuration()) { + LOGGER.error("Query " + queryIteration + " exceeded timeout of " + + query.getTimeoutDuration() + " ms at " + queryElapsedTime + " ms."); + return new org.apache.hadoop.hbase.util.Pair(resultRowCount, queryElapsedTime); + } + } + return new org.apache.hadoop.hbase.util.Pair(resultRowCount, + EnvironmentEdgeManager.currentTimeMillis() - queryStartTime); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java index ea261d70a32..1a2315b558f 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.util; import java.io.File; @@ -40,163 +39,154 @@ import org.apache.commons.lang3.StringUtils; import org.apache.phoenix.pherf.exception.PherfException; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; - /** * list resources available from the classpath @ * */ public class ResourceList { - private static final Logger LOGGER = LoggerFactory.getLogger(ResourceList.class); - private final String rootResourceDir; - // Lists the directories to ignore meant for testing something else - // when getting the resources from classpath - private List dirsToIgnore = Lists.newArrayList("sql_files"); - - public ResourceList(String rootResourceDir) { - this.rootResourceDir = rootResourceDir; + private static final Logger LOGGER = LoggerFactory.getLogger(ResourceList.class); + private final String rootResourceDir; + // Lists the directories to ignore meant for testing something else + // when getting the resources from classpath + private List dirsToIgnore = Lists.newArrayList("sql_files"); + + public ResourceList(String rootResourceDir) { + this.rootResourceDir = rootResourceDir; + } + + public Collection getResourceList(final String pattern) throws Exception { + // Include files from config directory + Collection paths = getResourcesPaths(Pattern.compile(pattern)); + + return paths; + } + + /** + * for all elements of java.class.path get a Collection of resources Pattern pattern = + * Pattern.compile(".*"); gets all resources + * @param pattern the pattern to match + * @return the resources in the order they are found + */ + private Collection getResourcesPaths(final Pattern pattern) throws Exception { + + final String classPath = System.getProperty("java.class.path", "."); + final String[] classPathElements = classPath.split(":"); + Set strResources = new HashSet<>(); + Collection paths = new ArrayList<>(); + + // TODO Make getResourcesPaths() return the URLs directly instead of converting them + // Get resources as strings. + for (final String element : classPathElements) { + strResources.addAll(getResources(element, pattern)); } - public Collection getResourceList(final String pattern) throws Exception { - // Include files from config directory - Collection paths = getResourcesPaths(Pattern.compile(pattern)); + // Convert resources to URL + for (String resource : strResources) { + URL url = null; + URI uri = null; + Path path = null; + String rName = rootResourceDir + resource; - return paths; - } + LOGGER.debug("Trying with the root append."); + url = ResourceList.class.getResource(rName); + if (url == null) { + LOGGER.debug("Failed! Must be using a jar. Trying without the root append."); + url = ResourceList.class.getResource(resource); - /** - * for all elements of java.class.path get a Collection of resources Pattern - * pattern = Pattern.compile(".*"); gets all resources - * - * @param pattern the pattern to match - * @return the resources in the order they are found - */ - private Collection getResourcesPaths( - final Pattern pattern) throws Exception { - - final String classPath = System.getProperty("java.class.path", "."); - final String[] classPathElements = classPath.split(":"); - Set strResources = new HashSet<>(); - Collection paths = new ArrayList<>(); - - // TODO Make getResourcesPaths() return the URLs directly instead of converting them - // Get resources as strings. - for (final String element : classPathElements) { - strResources.addAll(getResources(element, pattern)); + if (url == null) { + throw new PherfException("Could not load resources: " + rName); } + final String[] splits = url.toString().split("!"); + uri = URI.create(splits[0]); + path = (splits.length < 2) ? Paths.get(uri) : Paths.get(splits[1]); + } else { + path = Paths.get(url.toURI()); + } + LOGGER.debug("Found the correct resource: " + path.toString()); + paths.add(path); + } - // Convert resources to URL - for (String resource : strResources) { - URL url = null; - URI uri = null; - Path path = null; - - String rName = rootResourceDir + resource; - - LOGGER.debug("Trying with the root append."); - url = ResourceList.class.getResource(rName); - if (url == null) { - LOGGER.debug("Failed! Must be using a jar. Trying without the root append."); - url = ResourceList.class.getResource(resource); - - if (url == null) { - throw new PherfException("Could not load resources: " + rName); - } - final String[] splits = url.toString().split("!"); - uri = URI.create(splits[0]); - path = (splits.length < 2) ? Paths.get(uri) : Paths.get(splits[1]); - } else { - path = Paths.get(url.toURI()); - } - LOGGER.debug("Found the correct resource: " + path.toString()); - paths.add(path); - } + Collections.sort((List) paths); + return paths; + } - Collections.sort((List)paths); - return paths; + private Collection getResources(final String element, final Pattern pattern) { + final List retVal = new ArrayList<>(); + if (StringUtils.isBlank(element)) { + return retVal; } - - private Collection getResources( - final String element, - final Pattern pattern) { - final List retVal = new ArrayList<>(); - if (StringUtils.isBlank(element)) { - return retVal; - } - final File file = new File(element); - if (file.isDirectory()) { - retVal.addAll(getResourcesFromDirectory(file, pattern)); - } else { - retVal.addAll(getResourcesFromJarFile(file, pattern)); - } - return retVal; + final File file = new File(element); + if (file.isDirectory()) { + retVal.addAll(getResourcesFromDirectory(file, pattern)); + } else { + retVal.addAll(getResourcesFromJarFile(file, pattern)); } - - // Visible for testing - Collection getResourcesFromJarFile( - final File file, - final Pattern pattern) { - final List retVal = new ArrayList<>(); - ZipFile zf; - try { - zf = new ZipFile(file); - } catch (FileNotFoundException|NoSuchFileException e) { - // Gracefully handle a jar listed on the classpath that doesn't actually exist. - return Collections.emptyList(); - } catch (final ZipException e) { - throw new Error(e); - } catch (final IOException e) { - throw new Error(e); - } - final Enumeration e = zf.entries(); - while (e.hasMoreElements()) { - final ZipEntry ze = (ZipEntry) e.nextElement(); - final String fileName = ze.getName(); - final boolean accept = pattern.matcher(fileName).matches(); - LOGGER.trace("fileName:" + fileName); - LOGGER.trace("File:" + file.toString()); - LOGGER.trace("Match:" + accept); - if (accept) { - LOGGER.trace("Adding File from Jar: " + fileName); - retVal.add("/" + fileName); - } - } - try { - zf.close(); - } catch (final IOException e1) { - throw new Error(e1); - } - return retVal; + return retVal; + } + + // Visible for testing + Collection getResourcesFromJarFile(final File file, final Pattern pattern) { + final List retVal = new ArrayList<>(); + ZipFile zf; + try { + zf = new ZipFile(file); + } catch (FileNotFoundException | NoSuchFileException e) { + // Gracefully handle a jar listed on the classpath that doesn't actually exist. + return Collections.emptyList(); + } catch (final ZipException e) { + throw new Error(e); + } catch (final IOException e) { + throw new Error(e); } - - private Collection getResourcesFromDirectory( - final File directory, - final Pattern pattern) { - final ArrayList retval = new ArrayList(); - final File[] fileList = directory.listFiles(); - for (final File file : fileList) { - if (isIgnoredDir(file.getAbsolutePath())) continue; - if (file.isDirectory()) { - retval.addAll(getResourcesFromDirectory(file, pattern)); - } else { - final String fileName = file.getName(); - final boolean accept = pattern.matcher(file.toString()).matches(); - if (accept) { - LOGGER.debug("Adding File from directory: " + fileName); - retval.add("/" + fileName); - } - } - } - return retval; + final Enumeration e = zf.entries(); + while (e.hasMoreElements()) { + final ZipEntry ze = (ZipEntry) e.nextElement(); + final String fileName = ze.getName(); + final boolean accept = pattern.matcher(fileName).matches(); + LOGGER.trace("fileName:" + fileName); + LOGGER.trace("File:" + file.toString()); + LOGGER.trace("Match:" + accept); + if (accept) { + LOGGER.trace("Adding File from Jar: " + fileName); + retVal.add("/" + fileName); + } + } + try { + zf.close(); + } catch (final IOException e1) { + throw new Error(e1); } - - private boolean isIgnoredDir(String path) { - for (String dir : dirsToIgnore) { - if (path.contains(dir)) return true; + return retVal; + } + + private Collection getResourcesFromDirectory(final File directory, + final Pattern pattern) { + final ArrayList retval = new ArrayList(); + final File[] fileList = directory.listFiles(); + for (final File file : fileList) { + if (isIgnoredDir(file.getAbsolutePath())) continue; + if (file.isDirectory()) { + retval.addAll(getResourcesFromDirectory(file, pattern)); + } else { + final String fileName = file.getName(); + final boolean accept = pattern.matcher(file.toString()).matches(); + if (accept) { + LOGGER.debug("Adding File from directory: " + fileName); + retval.add("/" + fileName); } - return false; + } + } + return retval; + } + + private boolean isIgnoredDir(String path) { + for (String dir : dirsToIgnore) { + if (path.contains(dir)) return true; } -} \ No newline at end of file + return false; + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/RowCalculator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/RowCalculator.java index d61297ad21b..e3d6b3e3454 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/RowCalculator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/RowCalculator.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.util; import java.util.ArrayList; @@ -23,56 +22,55 @@ import java.util.List; public class RowCalculator { - private final int buckets; - private final int rows; - private final List rowCountList; + private final int buckets; + private final int rows; + private final List rowCountList; - public RowCalculator(int buckets, int rows) { - this.buckets = buckets; - this.rows = rows; - this.rowCountList = Collections.synchronizedList(new ArrayList(buckets)); - init(); - } + public RowCalculator(int buckets, int rows) { + this.buckets = buckets; + this.rows = rows; + this.rowCountList = Collections.synchronizedList(new ArrayList(buckets)); + init(); + } - public synchronized int size() { - return rowCountList.size(); - } + public synchronized int size() { + return rowCountList.size(); + } - public synchronized int getNext() { - return rowCountList.remove(0); - } + public synchronized int getNext() { + return rowCountList.remove(0); + } - /** - * Get the number of row that should fit into each bucket. - * @return - */ - public int getRowsPerBucket() { - return rows / buckets; - } + /** + * Get the number of row that should fit into each bucket. + */ + public int getRowsPerBucket() { + return rows / buckets; + } - /** - * Get the number of extra rows that need to be added if rows don't divide evenly among the buckets. - * @return - */ - public int getExtraRowCount() { - return rows % buckets; - } + /** + * Get the number of extra rows that need to be added if rows don't divide evenly among the + * buckets. + */ + public int getExtraRowCount() { + return rows % buckets; + } - private void init() { - for (int i = 0; i < buckets; i++) { - synchronized (rowCountList) { - // On the first row count we tack on the extra rows if they exist - if (i == 0) { - // When row count is small we just put them all in the first bucket - if (rows < buckets) { - rowCountList.add(getExtraRowCount()); - } else { - rowCountList.add(getRowsPerBucket() + getExtraRowCount()); - } - } else { - rowCountList.add(getRowsPerBucket()); - } - } + private void init() { + for (int i = 0; i < buckets; i++) { + synchronized (rowCountList) { + // On the first row count we tack on the extra rows if they exist + if (i == 0) { + // When row count is small we just put them all in the first bucket + if (rows < buckets) { + rowCountList.add(getExtraRowCount()); + } else { + rowCountList.add(getRowsPerBucket() + getExtraRowCount()); + } + } else { + rowCountList.add(getRowsPerBucket()); } + } } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java index 522225dc0ea..ce38b9413d3 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.workload; import java.sql.Connection; @@ -25,204 +24,197 @@ import java.util.Date; import java.util.concurrent.Callable; -import org.apache.phoenix.pherf.PherfConstants; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.util.Pair; +import org.apache.phoenix.pherf.PherfConstants; +import org.apache.phoenix.pherf.PherfConstants.GeneratePhoenixStats; +import org.apache.phoenix.pherf.configuration.Query; +import org.apache.phoenix.pherf.configuration.Scenario; +import org.apache.phoenix.pherf.configuration.XMLConfigParser; import org.apache.phoenix.pherf.result.DataModelResult; import org.apache.phoenix.pherf.result.ResultManager; import org.apache.phoenix.pherf.result.RunTime; import org.apache.phoenix.pherf.result.ThreadTime; import org.apache.phoenix.pherf.rules.RulesApplier; +import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.phoenix.pherf.PherfConstants.GeneratePhoenixStats; -import org.apache.phoenix.pherf.configuration.Query; -import org.apache.phoenix.pherf.configuration.Scenario; -import org.apache.phoenix.pherf.configuration.WriteParams; -import org.apache.phoenix.pherf.configuration.XMLConfigParser; -import org.apache.phoenix.pherf.util.PhoenixUtil; class MultiThreadedRunner implements Callable { - private static final Logger LOGGER = LoggerFactory.getLogger(MultiThreadedRunner.class); - private Query query; - private ThreadTime threadTime; - private PhoenixUtil pUtil = PhoenixUtil.create(); - private String threadName; - private DataModelResult dataModelResult; - private long numberOfExecutions; - private long executionDurationInMs; - private static long lastResultWritten = EnvironmentEdgeManager.currentTimeMillis() - 1000; - private final ResultManager resultManager; - private final RulesApplier ruleApplier; - private final Scenario scenario; - private final WorkloadExecutor workloadExecutor; - private final XMLConfigParser parser; - private final boolean writeRuntimeResults; - - /** - * MultiThreadedRunner - * - * @param threadName - * @param query - * @param dataModelResult - * @param threadTime - * @param numberOfExecutions - * @param executionDurationInMs - * @param ruleRunner - */ - MultiThreadedRunner(String threadName, Query query, DataModelResult dataModelResult, - ThreadTime threadTime, long numberOfExecutions, long executionDurationInMs, boolean writeRuntimeResults, RulesApplier ruleApplier, Scenario scenario, WorkloadExecutor workloadExecutor, XMLConfigParser parser) { - this.query = query; - this.threadName = threadName; - this.threadTime = threadTime; - this.dataModelResult = dataModelResult; - this.numberOfExecutions = numberOfExecutions; - this.executionDurationInMs = executionDurationInMs; - this.ruleApplier = ruleApplier; - this.scenario = scenario; - this.resultManager = new ResultManager(dataModelResult.getName(), writeRuntimeResults); - this.workloadExecutor = workloadExecutor; - this.parser = parser; - this.writeRuntimeResults = writeRuntimeResults; - } - - /** - * Executes run for a minimum of number of execution or execution duration - */ - @Override - public Void call() throws Exception { - LOGGER.info("\n\nThread Starting " + threadName + " ; '" + query.getStatement() + "' for " - + numberOfExecutions + " times\n\n"); - long threadStartTime = EnvironmentEdgeManager.currentTimeMillis(); - for (long i = 0; i < numberOfExecutions; i++) { - long threadElapsedTime = EnvironmentEdgeManager.currentTimeMillis() - threadStartTime; - if (threadElapsedTime >= executionDurationInMs) { - LOGGER.info("Queryset timeout of " + executionDurationInMs + " ms reached; current time is " + threadElapsedTime + " ms." - + "\nStopping queryset execution for query " + query.getId() + " on thread " + threadName + "..."); - break; - } - - synchronized (workloadExecutor) { - if (!timedQuery(i+1)) { - break; - } - if (writeRuntimeResults && - (EnvironmentEdgeManager.currentTimeMillis() - lastResultWritten) > 1000) { - resultManager.write(dataModelResult, ruleApplier); - lastResultWritten = EnvironmentEdgeManager.currentTimeMillis(); - } - } - } - - if (!writeRuntimeResults) { - long duration = EnvironmentEdgeManager.currentTimeMillis() - threadStartTime; - LOGGER.info("The read query " + query.getStatement() + " for this thread in (" - + duration + ") Ms"); + private static final Logger LOGGER = LoggerFactory.getLogger(MultiThreadedRunner.class); + private Query query; + private ThreadTime threadTime; + private PhoenixUtil pUtil = PhoenixUtil.create(); + private String threadName; + private DataModelResult dataModelResult; + private long numberOfExecutions; + private long executionDurationInMs; + private static long lastResultWritten = EnvironmentEdgeManager.currentTimeMillis() - 1000; + private final ResultManager resultManager; + private final RulesApplier ruleApplier; + private final Scenario scenario; + private final WorkloadExecutor workloadExecutor; + private final XMLConfigParser parser; + private final boolean writeRuntimeResults; + + /** + * MultiThreadedRunner + */ + MultiThreadedRunner(String threadName, Query query, DataModelResult dataModelResult, + ThreadTime threadTime, long numberOfExecutions, long executionDurationInMs, + boolean writeRuntimeResults, RulesApplier ruleApplier, Scenario scenario, + WorkloadExecutor workloadExecutor, XMLConfigParser parser) { + this.query = query; + this.threadName = threadName; + this.threadTime = threadTime; + this.dataModelResult = dataModelResult; + this.numberOfExecutions = numberOfExecutions; + this.executionDurationInMs = executionDurationInMs; + this.ruleApplier = ruleApplier; + this.scenario = scenario; + this.resultManager = new ResultManager(dataModelResult.getName(), writeRuntimeResults); + this.workloadExecutor = workloadExecutor; + this.parser = parser; + this.writeRuntimeResults = writeRuntimeResults; + } + + /** + * Executes run for a minimum of number of execution or execution duration + */ + @Override + public Void call() throws Exception { + LOGGER.info("\n\nThread Starting " + threadName + " ; '" + query.getStatement() + "' for " + + numberOfExecutions + " times\n\n"); + long threadStartTime = EnvironmentEdgeManager.currentTimeMillis(); + for (long i = 0; i < numberOfExecutions; i++) { + long threadElapsedTime = EnvironmentEdgeManager.currentTimeMillis() - threadStartTime; + if (threadElapsedTime >= executionDurationInMs) { + LOGGER.info("Queryset timeout of " + executionDurationInMs + " ms reached; current time is " + + threadElapsedTime + " ms." + "\nStopping queryset execution for query " + query.getId() + + " on thread " + threadName + "..."); + break; + } + + synchronized (workloadExecutor) { + if (!timedQuery(i + 1)) { + break; } - - // Make sure all result have been dumped before exiting - if (writeRuntimeResults) { - synchronized (workloadExecutor) { - resultManager.flush(); - } + if ( + writeRuntimeResults + && (EnvironmentEdgeManager.currentTimeMillis() - lastResultWritten) > 1000 + ) { + resultManager.write(dataModelResult, ruleApplier); + lastResultWritten = EnvironmentEdgeManager.currentTimeMillis(); } - - LOGGER.info("\n\nThread exiting." + threadName + "\n\n"); - return null; + } } - private synchronized ThreadTime getThreadTime() { - return threadTime; + if (!writeRuntimeResults) { + long duration = EnvironmentEdgeManager.currentTimeMillis() - threadStartTime; + LOGGER.info( + "The read query " + query.getStatement() + " for this thread in (" + duration + ") Ms"); } - /** - * Timed query execution - * - * @throws Exception - * @returns boolean true if query finished without timing out; false otherwise - */ - private boolean timedQuery(long iterationNumber) throws Exception { - boolean - isSelectCountStatement = - query.getStatement().toUpperCase().trim().contains("COUNT(") ? true : false; - - Connection conn = null; - PreparedStatement statement = null; - ResultSet rs = null; - Long queryStartTime = EnvironmentEdgeManager.currentTimeMillis(); - Date startDate = Calendar.getInstance().getTime(); - String exception = null; - Long resultRowCount = 0L; - String queryIteration = threadName + ":" + iterationNumber; - Long queryElapsedTime = 0L; - - try { - conn = pUtil.getConnection(query.getTenantId(), scenario.getPhoenixProperties()); - conn.setAutoCommit(true); - final String statementString = query.getDynamicStatement(ruleApplier, scenario); - statement = conn.prepareStatement(statementString); - LOGGER.debug("Executing iteration: " + queryIteration + ": " + statementString); - - if (scenario.getWriteParams() != null) { - Workload writes = new WriteWorkload(PhoenixUtil.create(), parser, - PherfConstants.create(). - getProperties(PherfConstants.PHERF_PROPERTIES, true), - scenario, GeneratePhoenixStats.NO); - workloadExecutor.add(writes); - } - - boolean isQuery = statement.execute(); - if (isQuery) { - rs = statement.getResultSet(); - Pair r = getResults(rs, queryIteration, isSelectCountStatement, queryStartTime); - resultRowCount = r.getFirst(); - queryElapsedTime = r.getSecond(); - } else { - conn.commit(); - } - } catch (Exception e) { - LOGGER.error("Exception while executing query iteration " + queryIteration, e); - exception = e.getMessage(); - throw e; - } finally { - getThreadTime().getRunTimesInMs().add(new RunTime(exception, startDate, resultRowCount, - queryElapsedTime, queryElapsedTime > query.getTimeoutDuration())); - - if (rs != null) rs.close(); - if (statement != null) statement.close(); - if (conn != null) conn.close(); - } - return true; + // Make sure all result have been dumped before exiting + if (writeRuntimeResults) { + synchronized (workloadExecutor) { + resultManager.flush(); + } } - @VisibleForTesting - /** - * @return a Pair whose first value is the resultRowCount, and whose second value is whether the query timed out. - */ - Pair getResults(ResultSet rs, String queryIteration, boolean isSelectCountStatement, Long queryStartTime) throws Exception { - Long resultRowCount = 0L; - while (rs.next()) { - if (null != query.getExpectedAggregateRowCount()) { - if (rs.getLong(1) != query.getExpectedAggregateRowCount()) - throw new RuntimeException( - "Aggregate count " + rs.getLong(1) + " does not match expected " - + query.getExpectedAggregateRowCount()); - } - - if (isSelectCountStatement) { - resultRowCount = rs.getLong(1); - } else { - resultRowCount++; - } - long queryElapsedTime = EnvironmentEdgeManager.currentTimeMillis() - queryStartTime; - if (queryElapsedTime >= query.getTimeoutDuration()) { - LOGGER.error("Query " + queryIteration + " exceeded timeout of " - + query.getTimeoutDuration() + " ms at " + queryElapsedTime + " ms."); - return new Pair(resultRowCount, queryElapsedTime); - } - } - return new Pair(resultRowCount, EnvironmentEdgeManager.currentTimeMillis() - queryStartTime); + LOGGER.info("\n\nThread exiting." + threadName + "\n\n"); + return null; + } + + private synchronized ThreadTime getThreadTime() { + return threadTime; + } + + /** + * Timed query execution + * @returns boolean true if query finished without timing out; false otherwise + */ + private boolean timedQuery(long iterationNumber) throws Exception { + boolean isSelectCountStatement = + query.getStatement().toUpperCase().trim().contains("COUNT(") ? true : false; + + Connection conn = null; + PreparedStatement statement = null; + ResultSet rs = null; + Long queryStartTime = EnvironmentEdgeManager.currentTimeMillis(); + Date startDate = Calendar.getInstance().getTime(); + String exception = null; + Long resultRowCount = 0L; + String queryIteration = threadName + ":" + iterationNumber; + Long queryElapsedTime = 0L; + + try { + conn = pUtil.getConnection(query.getTenantId(), scenario.getPhoenixProperties()); + conn.setAutoCommit(true); + final String statementString = query.getDynamicStatement(ruleApplier, scenario); + statement = conn.prepareStatement(statementString); + LOGGER.debug("Executing iteration: " + queryIteration + ": " + statementString); + + if (scenario.getWriteParams() != null) { + Workload writes = new WriteWorkload(PhoenixUtil.create(), parser, + PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, true), scenario, + GeneratePhoenixStats.NO); + workloadExecutor.add(writes); + } + + boolean isQuery = statement.execute(); + if (isQuery) { + rs = statement.getResultSet(); + Pair r = getResults(rs, queryIteration, isSelectCountStatement, queryStartTime); + resultRowCount = r.getFirst(); + queryElapsedTime = r.getSecond(); + } else { + conn.commit(); + } + } catch (Exception e) { + LOGGER.error("Exception while executing query iteration " + queryIteration, e); + exception = e.getMessage(); + throw e; + } finally { + getThreadTime().getRunTimesInMs().add(new RunTime(exception, startDate, resultRowCount, + queryElapsedTime, queryElapsedTime > query.getTimeoutDuration())); + + if (rs != null) rs.close(); + if (statement != null) statement.close(); + if (conn != null) conn.close(); + } + return true; + } + + @VisibleForTesting + /** + * Returns a Pair whose first value is the resultRowCount, and whose second value is whether the + * query timed out. + */ + Pair getResults(ResultSet rs, String queryIteration, boolean isSelectCountStatement, + Long queryStartTime) throws Exception { + Long resultRowCount = 0L; + while (rs.next()) { + if (null != query.getExpectedAggregateRowCount()) { + if (rs.getLong(1) != query.getExpectedAggregateRowCount()) + throw new RuntimeException("Aggregate count " + rs.getLong(1) + + " does not match expected " + query.getExpectedAggregateRowCount()); + } + + if (isSelectCountStatement) { + resultRowCount = rs.getLong(1); + } else { + resultRowCount++; + } + long queryElapsedTime = EnvironmentEdgeManager.currentTimeMillis() - queryStartTime; + if (queryElapsedTime >= query.getTimeoutDuration()) { + LOGGER.error("Query " + queryIteration + " exceeded timeout of " + + query.getTimeoutDuration() + " ms at " + queryElapsedTime + " ms."); + return new Pair(resultRowCount, queryElapsedTime); + } } + return new Pair(resultRowCount, EnvironmentEdgeManager.currentTimeMillis() - queryStartTime); + } -} \ No newline at end of file +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java index d30f4dc0001..c7a38dc7b21 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java @@ -1,28 +1,26 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.workload; import java.util.Calendar; import java.util.Date; import java.util.concurrent.Callable; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.configuration.Query; import org.apache.phoenix.pherf.result.RunTime; @@ -33,72 +31,61 @@ import org.slf4j.LoggerFactory; class MultithreadedDiffer implements Callable { - private static final Logger LOGGER = LoggerFactory.getLogger(MultithreadedDiffer.class); - private Thread t; - private Query query; - private ThreadTime threadTime; - private long numberOfExecutions; - private long executionDurationInMs; - private QueryVerifier queryVerifier = new QueryVerifier(true); - private static PhoenixUtil pUtil = PhoenixUtil.create(); + private static final Logger LOGGER = LoggerFactory.getLogger(MultithreadedDiffer.class); + private Thread t; + private Query query; + private ThreadTime threadTime; + private long numberOfExecutions; + private long executionDurationInMs; + private QueryVerifier queryVerifier = new QueryVerifier(true); + private static PhoenixUtil pUtil = PhoenixUtil.create(); - private synchronized ThreadTime getThreadTime() { - return threadTime; - } + private synchronized ThreadTime getThreadTime() { + return threadTime; + } - /** - * Query Verification - * - * @throws Exception - */ - private void diffQuery() throws Exception { - Long start = EnvironmentEdgeManager.currentTimeMillis(); - Date startDate = Calendar.getInstance().getTime(); - String newCSV = queryVerifier.exportCSV(query); - boolean verifyResult = queryVerifier.doDiff(query, newCSV); - String explainPlan = pUtil.getExplainPlan(query); - long elapsedTime = EnvironmentEdgeManager.currentTimeMillis() - start; - getThreadTime().getRunTimesInMs().add(new RunTime( - verifyResult == true ? PherfConstants.DIFF_PASS : PherfConstants.DIFF_FAIL, - explainPlan, startDate, -1L, - elapsedTime, !(elapsedTime >= executionDurationInMs))); - } + /** + * Query Verification + */ + private void diffQuery() throws Exception { + Long start = EnvironmentEdgeManager.currentTimeMillis(); + Date startDate = Calendar.getInstance().getTime(); + String newCSV = queryVerifier.exportCSV(query); + boolean verifyResult = queryVerifier.doDiff(query, newCSV); + String explainPlan = pUtil.getExplainPlan(query); + long elapsedTime = EnvironmentEdgeManager.currentTimeMillis() - start; + getThreadTime().getRunTimesInMs() + .add(new RunTime(verifyResult == true ? PherfConstants.DIFF_PASS : PherfConstants.DIFF_FAIL, + explainPlan, startDate, -1L, elapsedTime, !(elapsedTime >= executionDurationInMs))); + } - /** - * Multithreaded Differ - * - * @param threadName - * @param query - * @param threadName - * @param threadTime - * @param numberOfExecutions - * @param executionDurationInMs - */ - MultithreadedDiffer(String threadName, Query query, ThreadTime threadTime, - long numberOfExecutions, long executionDurationInMs) { - this.query = query; - this.threadTime = threadTime; - this.numberOfExecutions = numberOfExecutions; - this.executionDurationInMs = executionDurationInMs; - } + /** + * Multithreaded Differ + */ + MultithreadedDiffer(String threadName, Query query, ThreadTime threadTime, + long numberOfExecutions, long executionDurationInMs) { + this.query = query; + this.threadTime = threadTime; + this.numberOfExecutions = numberOfExecutions; + this.executionDurationInMs = executionDurationInMs; + } - /** - * Executes verification runs for a minimum of number of execution or execution duration - */ - public Void call() throws Exception { - LOGGER.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for " - + numberOfExecutions + "times\n\n"); - Long start = EnvironmentEdgeManager.currentTimeMillis(); - for (long i = numberOfExecutions; (i > 0 && - ((EnvironmentEdgeManager.currentTimeMillis() - start) - < executionDurationInMs)); i--) { - try { - diffQuery(); - } catch (Exception e) { - e.printStackTrace(); - } - } - LOGGER.info("\n\nThread exiting." + t.getName() + "\n\n"); - return null; + /** + * Executes verification runs for a minimum of number of execution or execution duration + */ + public Void call() throws Exception { + LOGGER.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for " + + numberOfExecutions + "times\n\n"); + Long start = EnvironmentEdgeManager.currentTimeMillis(); + for (long i = numberOfExecutions; (i > 0 + && ((EnvironmentEdgeManager.currentTimeMillis() - start) < executionDurationInMs)); i--) { + try { + diffQuery(); + } catch (Exception e) { + e.printStackTrace(); + } } -} \ No newline at end of file + LOGGER.info("\n\nThread exiting." + t.getName() + "\n\n"); + return null; + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java index c15cf1acbaa..fa17ae99d9f 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java @@ -1,23 +1,29 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.workload; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.phoenix.pherf.configuration.DataModel; @@ -37,246 +43,205 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; - public class QueryExecutor implements Workload { - private static final Logger LOGGER = LoggerFactory.getLogger(QueryExecutor.class); - private List dataModels; - private String queryHint; - private final boolean exportCSV; - private final XMLConfigParser parser; - private final PhoenixUtil util; - private final WorkloadExecutor workloadExecutor; - private final boolean writeRuntimeResults; - private RulesApplier ruleApplier; - - public QueryExecutor(XMLConfigParser parser, PhoenixUtil util, - WorkloadExecutor workloadExecutor) { - this(parser, util, workloadExecutor, parser.getDataModels(), null, false, true); - } - - public QueryExecutor(XMLConfigParser parser, PhoenixUtil util, - WorkloadExecutor workloadExecutor, List dataModels, String queryHint, - boolean exportCSV) { - this(parser, util, workloadExecutor, dataModels, queryHint, exportCSV, true); + private static final Logger LOGGER = LoggerFactory.getLogger(QueryExecutor.class); + private List dataModels; + private String queryHint; + private final boolean exportCSV; + private final XMLConfigParser parser; + private final PhoenixUtil util; + private final WorkloadExecutor workloadExecutor; + private final boolean writeRuntimeResults; + private RulesApplier ruleApplier; + + public QueryExecutor(XMLConfigParser parser, PhoenixUtil util, + WorkloadExecutor workloadExecutor) { + this(parser, util, workloadExecutor, parser.getDataModels(), null, false, true); + } + + public QueryExecutor(XMLConfigParser parser, PhoenixUtil util, WorkloadExecutor workloadExecutor, + List dataModels, String queryHint, boolean exportCSV) { + this(parser, util, workloadExecutor, dataModels, queryHint, exportCSV, true); + } + + public QueryExecutor(XMLConfigParser parser, PhoenixUtil util, WorkloadExecutor workloadExecutor, + List dataModels, String queryHint, boolean exportCSV, boolean writeRuntimeResults) { + this.parser = parser; + this.queryHint = queryHint; + this.exportCSV = exportCSV; + this.dataModels = dataModels; + this.util = util; + this.workloadExecutor = workloadExecutor; + this.writeRuntimeResults = writeRuntimeResults; + this.ruleApplier = new RulesApplier(parser); + } + + @Override + public void complete() { + } + + /** + * Calls in Multithreaded Query Executor for all datamodels + */ + @Override + public Callable execute() throws Exception { + Callable callable = null; + for (DataModel dataModel : dataModels) { + if (exportCSV) { + callable = exportAllScenarios(dataModel); + } else { + callable = executeAllScenarios(dataModel); + } } - - public QueryExecutor(XMLConfigParser parser, PhoenixUtil util, - WorkloadExecutor workloadExecutor, List dataModels, String queryHint, - boolean exportCSV, boolean writeRuntimeResults) { - this.parser = parser; - this.queryHint = queryHint; - this.exportCSV = exportCSV; - this.dataModels = dataModels; - this.util = util; - this.workloadExecutor = workloadExecutor; - this.writeRuntimeResults = writeRuntimeResults; - this.ruleApplier = new RulesApplier(parser); - } - - @Override - public void complete() {} - - /** - * Calls in Multithreaded Query Executor for all datamodels - * - * @throws Exception - */ - @Override - public Callable execute() throws Exception { - Callable callable = null; - for (DataModel dataModel : dataModels) { - if (exportCSV) { - callable = exportAllScenarios(dataModel); - } else { - callable = executeAllScenarios(dataModel); + return callable; + } + + /** + * Export all queries results to CSV + */ + protected Callable exportAllScenarios(final DataModel dataModel) throws Exception { + return new Callable() { + @Override + public Void call() throws Exception { + try { + List scenarios = dataModel.getScenarios(); + QueryVerifier exportRunner = new QueryVerifier(false); + for (Scenario scenario : scenarios) { + for (QuerySet querySet : scenario.getQuerySet()) { + util.executeQuerySetDdls(querySet); + for (Query query : querySet.getQuery()) { + exportRunner.exportCSV(query); + } } + } + } catch (Exception e) { + LOGGER.error("Scenario throws exception", e); + throw e; } - return callable; - } - - - /** - * Export all queries results to CSV - * - * @param dataModel - * @throws Exception - */ - protected Callable exportAllScenarios(final DataModel dataModel) throws Exception { - return new Callable() { - @Override - public Void call() throws Exception { - try { - List scenarios = dataModel.getScenarios(); - QueryVerifier exportRunner = new QueryVerifier(false); - for (Scenario scenario : scenarios) { - for (QuerySet querySet : scenario.getQuerySet()) { - util.executeQuerySetDdls(querySet); - for (Query query : querySet.getQuery()) { - exportRunner.exportCSV(query); - } - } - } - } catch (Exception e) { - LOGGER.error("Scenario throws exception", e); - throw e; - } - return null; + return null; + } + }; + } + + /** + * Execute all scenarios + */ + protected Callable executeAllScenarios(final DataModel dataModel) throws Exception { + return new Callable() { + @Override + public Void call() throws Exception { + List dataModelResults = new ArrayList<>(); + DataModelResult dataModelResult = + new DataModelResult(dataModel, PhoenixUtil.getZookeeper()); + ResultManager resultManager = new ResultManager(dataModelResult.getName()); + + dataModelResults.add(dataModelResult); + List scenarios = dataModel.getScenarios(); + Configuration conf = HBaseConfiguration.create(); + Map phoenixProperty = conf.getValByRegex("phoenix"); + try { + + for (Scenario scenario : scenarios) { + ScenarioResult scenarioResult = new ScenarioResult(scenario); + scenarioResult.setPhoenixProperties(phoenixProperty); + dataModelResult.getScenarioResult().add(scenarioResult); + + for (QuerySet querySet : scenario.getQuerySet()) { + QuerySetResult querySetResult = new QuerySetResult(querySet); + scenarioResult.getQuerySetResult().add(querySetResult); + + util.executeQuerySetDdls(querySet); + if (querySet.getExecutionType() == ExecutionType.SERIAL) { + executeQuerySetSerial(dataModelResult, querySet, querySetResult, scenario); + } else { + executeQuerySetParallel(dataModelResult, querySet, querySetResult, scenario); + } } - }; - } - - /** - * Execute all scenarios - * - * @param dataModel - * @throws Exception - */ - protected Callable executeAllScenarios(final DataModel dataModel) throws Exception { - return new Callable() { - @Override public Void call() throws Exception { - List dataModelResults = new ArrayList<>(); - DataModelResult - dataModelResult = - new DataModelResult(dataModel, PhoenixUtil.getZookeeper()); - ResultManager - resultManager = - new ResultManager(dataModelResult.getName()); - - dataModelResults.add(dataModelResult); - List scenarios = dataModel.getScenarios(); - Configuration conf = HBaseConfiguration.create(); - Map phoenixProperty = conf.getValByRegex("phoenix"); - try { - - for (Scenario scenario : scenarios) { - ScenarioResult scenarioResult = new ScenarioResult(scenario); - scenarioResult.setPhoenixProperties(phoenixProperty); - dataModelResult.getScenarioResult().add(scenarioResult); - - for (QuerySet querySet : scenario.getQuerySet()) { - QuerySetResult querySetResult = new QuerySetResult(querySet); - scenarioResult.getQuerySetResult().add(querySetResult); + resultManager.write(dataModelResult, ruleApplier); + } + resultManager.write(dataModelResults, ruleApplier); + resultManager.flush(); + } catch (Exception e) { + LOGGER.error("Scenario throws exception", e); + throw e; + } + return null; + } + }; + } + + /** + * Execute query set serially + */ + protected void executeQuerySetSerial(DataModelResult dataModelResult, QuerySet querySet, + QuerySetResult querySetResult, Scenario scenario) + throws ExecutionException, InterruptedException { + for (Query query : querySet.getQuery()) { + QueryResult queryResult = new QueryResult(query); + querySetResult.getQueryResults().add(queryResult); + + for (int cr = querySet.getMinConcurrency(); cr <= querySet.getMaxConcurrency(); cr++) { + + List threads = new ArrayList<>(); + + for (int i = 0; i < cr; i++) { + + Callable thread = executeRunner((i + 1) + "," + cr, dataModelResult, queryResult, + querySetResult, scenario); + threads.add(workloadExecutor.getPool().submit(thread)); + } - util.executeQuerySetDdls(querySet); - if (querySet.getExecutionType() == ExecutionType.SERIAL) { - executeQuerySetSerial(dataModelResult, querySet, querySetResult, scenario); - } else { - executeQuerySetParallel(dataModelResult, querySet, querySetResult, scenario); - } - } - resultManager.write(dataModelResult, ruleApplier); - } - resultManager.write(dataModelResults, ruleApplier); - resultManager.flush(); - } catch (Exception e) { - LOGGER.error("Scenario throws exception", e); - throw e; - } - return null; - } - }; + for (Future thread : threads) { + thread.get(); + } + } } - - /** - * Execute query set serially - * - * @param dataModelResult - * @param querySet - * @param querySetResult - * @param scenario - * @throws InterruptedException - */ - protected void executeQuerySetSerial(DataModelResult dataModelResult, QuerySet querySet, - QuerySetResult querySetResult, Scenario scenario) throws ExecutionException, InterruptedException { + } + + /** + * Execute query set in parallel + */ + protected void executeQuerySetParallel(DataModelResult dataModelResult, QuerySet querySet, + QuerySetResult querySetResult, Scenario scenario) + throws ExecutionException, InterruptedException { + for (int cr = querySet.getMinConcurrency(); cr <= querySet.getMaxConcurrency(); cr++) { + List threads = new ArrayList<>(); + for (int i = 0; i < cr; i++) { for (Query query : querySet.getQuery()) { - QueryResult queryResult = new QueryResult(query); - querySetResult.getQueryResults().add(queryResult); + QueryResult queryResult = new QueryResult(query); + querySetResult.getQueryResults().add(queryResult); - for (int cr = querySet.getMinConcurrency(); cr <= querySet.getMaxConcurrency(); cr++) { - - List threads = new ArrayList<>(); - - for (int i = 0; i < cr; i++) { - - Callable - thread = - executeRunner((i + 1) + "," + cr, dataModelResult, queryResult, - querySetResult, scenario); - threads.add(workloadExecutor.getPool().submit(thread)); - } - - for (Future thread : threads) { - thread.get(); - } - } + Callable thread = executeRunner((i + 1) + "," + cr, dataModelResult, queryResult, + querySetResult, scenario); + threads.add(workloadExecutor.getPool().submit(thread)); } - } - /** - * Execute query set in parallel - * - * @param dataModelResult - * @param querySet - * @param querySetResult - * @throws InterruptedException - */ - protected void executeQuerySetParallel(DataModelResult dataModelResult, QuerySet querySet, - QuerySetResult querySetResult, Scenario scenario) throws ExecutionException, InterruptedException { - for (int cr = querySet.getMinConcurrency(); cr <= querySet.getMaxConcurrency(); cr++) { - List threads = new ArrayList<>(); - for (int i = 0; i < cr; i++) { - for (Query query : querySet.getQuery()) { - QueryResult queryResult = new QueryResult(query); - querySetResult.getQueryResults().add(queryResult); - - Callable - thread = - executeRunner((i + 1) + "," + cr, dataModelResult, queryResult, - querySetResult, scenario); - threads.add(workloadExecutor.getPool().submit(thread)); - } - - for (Future thread : threads) { - thread.get(); - } - } + for (Future thread : threads) { + thread.get(); } + } } - - /** - * Execute multi-thread runner - * - * @param name - * @param dataModelResult - * @param queryResult - * @param querySet - * @param scenario - * @return - */ - protected Callable executeRunner(String name, DataModelResult dataModelResult, - QueryResult queryResult, QuerySet querySet, Scenario scenario) { - ThreadTime threadTime = new ThreadTime(); - queryResult.getThreadTimes().add(threadTime); - threadTime.setThreadName(name); - queryResult.setHint(this.queryHint); - LOGGER.info("\nExecuting query " + queryResult.getStatement()); - Callable thread; - if (workloadExecutor.isPerformance()) { - thread = - new MultiThreadedRunner(threadTime.getThreadName(), queryResult, - dataModelResult, threadTime, querySet.getNumberOfExecutions(), - querySet.getExecutionDurationInMs(), writeRuntimeResults, ruleApplier, scenario, workloadExecutor, parser); - } else { - thread = - new MultithreadedDiffer(threadTime.getThreadName(), queryResult, threadTime, - querySet.getNumberOfExecutions(), querySet.getExecutionDurationInMs()); - } - return thread; + } + + /** + * Execute multi-thread runner + */ + protected Callable executeRunner(String name, DataModelResult dataModelResult, + QueryResult queryResult, QuerySet querySet, Scenario scenario) { + ThreadTime threadTime = new ThreadTime(); + queryResult.getThreadTimes().add(threadTime); + threadTime.setThreadName(name); + queryResult.setHint(this.queryHint); + LOGGER.info("\nExecuting query " + queryResult.getStatement()); + Callable thread; + if (workloadExecutor.isPerformance()) { + thread = new MultiThreadedRunner(threadTime.getThreadName(), queryResult, dataModelResult, + threadTime, querySet.getNumberOfExecutions(), querySet.getExecutionDurationInMs(), + writeRuntimeResults, ruleApplier, scenario, workloadExecutor, parser); + } else { + thread = new MultithreadedDiffer(threadTime.getThreadName(), queryResult, threadTime, + querySet.getNumberOfExecutions(), querySet.getExecutionDurationInMs()); } -} \ No newline at end of file + return thread; + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java index 786f778336d..4eca75358b6 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.workload; import java.io.BufferedReader; @@ -27,146 +26,127 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; -import java.sql.SQLException; import java.util.LinkedList; import java.util.List; -import org.apache.phoenix.pherf.result.file.Extension; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.configuration.Query; +import org.apache.phoenix.pherf.result.file.Extension; import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import difflib.DiffUtils; import difflib.Patch; public class QueryVerifier { - private PhoenixUtil pUtil = PhoenixUtil.create(); - private static final Logger LOGGER = LoggerFactory.getLogger(QueryVerifier.class); - private boolean useTemporaryOutput; - private String directoryLocation; - - public QueryVerifier(boolean useTemporaryOutput) { - this.useTemporaryOutput = useTemporaryOutput; - this.directoryLocation = - this.useTemporaryOutput ? PherfConstants.EXPORT_TMP : PherfConstants.EXPORT_DIR; - - ensureBaseDirExists(); - } - - /** - * Export query resultSet to CSV file - * - * @param query - * @throws Exception - */ - public String exportCSV(Query query) throws Exception { - Connection conn = null; - PreparedStatement statement = null; - ResultSet rs = null; - String fileName = getFileName(query); - FileOutputStream fos = new FileOutputStream(fileName); - try { - conn = pUtil.getConnection(query.getTenantId()); - statement = conn.prepareStatement(query.getStatement()); - boolean isQuery = statement.execute(); - if (isQuery) { - rs = statement.executeQuery(); - int columnCount = rs.getMetaData().getColumnCount(); - while (rs.next()) { - for (int columnNum = 1; columnNum <= columnCount; columnNum++) { - fos.write((rs.getString(columnNum) + PherfConstants.RESULT_FILE_DELIMETER) - .getBytes()); - } - fos.write(PherfConstants.NEW_LINE.getBytes()); - } - } else { - conn.commit(); - } - } catch (Exception e) { - e.printStackTrace(); - } finally { - if (rs != null) rs.close(); - if (statement != null) statement.close(); - if (conn != null) conn.close(); - fos.flush(); - fos.close(); + private PhoenixUtil pUtil = PhoenixUtil.create(); + private static final Logger LOGGER = LoggerFactory.getLogger(QueryVerifier.class); + private boolean useTemporaryOutput; + private String directoryLocation; + + public QueryVerifier(boolean useTemporaryOutput) { + this.useTemporaryOutput = useTemporaryOutput; + this.directoryLocation = + this.useTemporaryOutput ? PherfConstants.EXPORT_TMP : PherfConstants.EXPORT_DIR; + + ensureBaseDirExists(); + } + + /** + * Export query resultSet to CSV file + */ + public String exportCSV(Query query) throws Exception { + Connection conn = null; + PreparedStatement statement = null; + ResultSet rs = null; + String fileName = getFileName(query); + FileOutputStream fos = new FileOutputStream(fileName); + try { + conn = pUtil.getConnection(query.getTenantId()); + statement = conn.prepareStatement(query.getStatement()); + boolean isQuery = statement.execute(); + if (isQuery) { + rs = statement.executeQuery(); + int columnCount = rs.getMetaData().getColumnCount(); + while (rs.next()) { + for (int columnNum = 1; columnNum <= columnCount; columnNum++) { + fos.write((rs.getString(columnNum) + PherfConstants.RESULT_FILE_DELIMETER).getBytes()); + } + fos.write(PherfConstants.NEW_LINE.getBytes()); } - return fileName; + } else { + conn.commit(); + } + } catch (Exception e) { + e.printStackTrace(); + } finally { + if (rs != null) rs.close(); + if (statement != null) statement.close(); + if (conn != null) conn.close(); + fos.flush(); + fos.close(); } - - /** - * Do a diff between exported query results and temporary CSV file - * - * @param query - * @param newCSV - * @return - */ - public boolean doDiff(Query query, String newCSV) { - List original = fileToLines(getCSVName(query, PherfConstants.EXPORT_DIR, "")); - List newLines = fileToLines(newCSV); - - Patch patch = DiffUtils.diff(original, newLines); - if (patch.getDeltas().isEmpty()) { - LOGGER.info("Match: " + query.getId() + " with " + newCSV); - return true; - } else { - LOGGER.error("DIFF FAILED: " + query.getId() + " with " + newCSV); - return false; - } + return fileName; + } + + /** + * Do a diff between exported query results and temporary CSV file + */ + public boolean doDiff(Query query, String newCSV) { + List original = fileToLines(getCSVName(query, PherfConstants.EXPORT_DIR, "")); + List newLines = fileToLines(newCSV); + + Patch patch = DiffUtils.diff(original, newLines); + if (patch.getDeltas().isEmpty()) { + LOGGER.info("Match: " + query.getId() + " with " + newCSV); + return true; + } else { + LOGGER.error("DIFF FAILED: " + query.getId() + " with " + newCSV); + return false; } - - /** - * Helper method to load file - * - * @param filename - * @return - */ - private static List fileToLines(String filename) { - List lines = new LinkedList(); - String line = ""; - try { - BufferedReader in = new BufferedReader(new FileReader(filename)); - while ((line = in.readLine()) != null) { - lines.add(line); - } - in.close(); - } catch (IOException e) { - e.printStackTrace(); - } - - return lines; + } + + /** + * Helper method to load file + */ + private static List fileToLines(String filename) { + List lines = new LinkedList(); + String line = ""; + try { + BufferedReader in = new BufferedReader(new FileReader(filename)); + while ((line = in.readLine()) != null) { + lines.add(line); + } + in.close(); + } catch (IOException e) { + e.printStackTrace(); } - /** - * Helper method to generate CSV file name - * - * @param query - * @return - * @throws FileNotFoundException - */ - private String getFileName(Query query) throws FileNotFoundException { - String tempExt = ""; - if (this.useTemporaryOutput) { - tempExt = "_" + java.util.UUID.randomUUID().toString(); - } - return getCSVName(query, this.directoryLocation, tempExt); - } + return lines; + } - private String getCSVName(Query query, String directory, String tempExt) { - String - csvFile = - directory + PherfConstants.PATH_SEPARATOR + query.getId() + tempExt + Extension.CSV - .toString(); - return csvFile; + /** + * Helper method to generate CSV file name + */ + private String getFileName(Query query) throws FileNotFoundException { + String tempExt = ""; + if (this.useTemporaryOutput) { + tempExt = "_" + java.util.UUID.randomUUID().toString(); } - - private void ensureBaseDirExists() { - File baseDir = new File(this.directoryLocation); - if (!baseDir.exists()) { - baseDir.mkdir(); - } + return getCSVName(query, this.directoryLocation, tempExt); + } + + private String getCSVName(Query query, String directory, String tempExt) { + String csvFile = directory + PherfConstants.PATH_SEPARATOR + query.getId() + tempExt + + Extension.CSV.toString(); + return csvFile; + } + + private void ensureBaseDirExists() { + File baseDir = new File(this.directoryLocation); + if (!baseDir.exists()) { + baseDir.mkdir(); } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/Workload.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/Workload.java index 8dbda913bcc..5c38d5a89bc 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/Workload.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/Workload.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ public interface Workload { - Callable execute() throws Exception; /** diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java index 381751d7b96..56bc1941b08 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java @@ -1,127 +1,125 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.workload; -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.pherf.PherfConstants; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.concurrent.*; -public class WorkloadExecutor { - private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadExecutor.class); - private final int poolSize; - private final boolean isPerformance; - - // Jobs can be accessed by multiple threads - @VisibleForTesting - public final Map jobs = new ConcurrentHashMap<>(); - - private final ExecutorService pool; - - public WorkloadExecutor() throws Exception { - this(PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false)); - } - - public WorkloadExecutor(Properties properties) throws Exception { - this(properties, new ArrayList(), true); - } - - public WorkloadExecutor(Properties properties, List workloads, boolean isPerformance) throws Exception { - this.isPerformance = isPerformance; - this.poolSize = - (properties.getProperty("pherf.default.threadpool") == null) ? - PherfConstants.DEFAULT_THREAD_POOL_SIZE : - Integer.parseInt(properties.getProperty("pherf.default.threadpool")); - - this.pool = Executors.newFixedThreadPool(this.poolSize); - init(workloads); - } - - public void add(Workload workload) throws Exception { - this.jobs.put(workload, pool.submit(workload.execute())); - } - - /** - * Blocks on waiting for all workloads to finish. If a - * {@link org.apache.phoenix.pherf.workload.Workload} Requires complete() to be called, it must - * be called prior to using this method. Otherwise it will block infinitely. - */ - public void get() { - for (Workload workload : jobs.keySet()) { - get(workload); - } - } - - /** - * Calls the {@link java.util.concurrent.Future#get()} method pertaining to this workflow. - * Once the Future competes, the workflow is removed from the list. - * - * @param workload Key entry in the HashMap - */ - public void get(Workload workload) { - try { - Future future = jobs.get(workload); - future.get(); - jobs.remove(workload); - } catch (InterruptedException | ExecutionException e) { - LOGGER.error("", e); - } - } - - /** - * Complete all workloads in the list. - * Entries in the job Map will persist until {#link WorkloadExecutorNew#get()} is called - */ - public void complete() { - for (Workload workload : jobs.keySet()) { - workload.complete(); - } - } +import org.apache.phoenix.pherf.PherfConstants; +import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; - public void shutdown() { - // Make sure any Workloads still on pool have been properly shutdown - complete(); - pool.shutdownNow(); +public class WorkloadExecutor { + private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadExecutor.class); + private final int poolSize; + private final boolean isPerformance; + + // Jobs can be accessed by multiple threads + @VisibleForTesting + public final Map jobs = new ConcurrentHashMap<>(); + + private final ExecutorService pool; + + public WorkloadExecutor() throws Exception { + this(PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false)); + } + + public WorkloadExecutor(Properties properties) throws Exception { + this(properties, new ArrayList(), true); + } + + public WorkloadExecutor(Properties properties, List workloads, boolean isPerformance) + throws Exception { + this.isPerformance = isPerformance; + this.poolSize = (properties.getProperty("pherf.default.threadpool") == null) + ? PherfConstants.DEFAULT_THREAD_POOL_SIZE + : Integer.parseInt(properties.getProperty("pherf.default.threadpool")); + + this.pool = Executors.newFixedThreadPool(this.poolSize); + init(workloads); + } + + public void add(Workload workload) throws Exception { + this.jobs.put(workload, pool.submit(workload.execute())); + } + + /** + * Blocks on waiting for all workloads to finish. If a + * {@link org.apache.phoenix.pherf.workload.Workload} Requires complete() to be called, it must be + * called prior to using this method. Otherwise it will block infinitely. + */ + public void get() { + for (Workload workload : jobs.keySet()) { + get(workload); } - - /** - * TODO This should be removed, Access to the pool should be restriced and callers should Workflows - * - * @return {@link ExecutorService} Exposes the underlying thread pool - */ - public ExecutorService getPool() { - return pool; + } + + /** + * Calls the {@link java.util.concurrent.Future#get()} method pertaining to this workflow. Once + * the Future competes, the workflow is removed from the list. + * @param workload Key entry in the HashMap + */ + public void get(Workload workload) { + try { + Future future = jobs.get(workload); + future.get(); + jobs.remove(workload); + } catch (InterruptedException | ExecutionException e) { + LOGGER.error("", e); } - - public boolean isPerformance() { - return isPerformance; + } + + /** + * Complete all workloads in the list. Entries in the job Map will persist until {#link + * WorkloadExecutorNew#get()} is called + */ + public void complete() { + for (Workload workload : jobs.keySet()) { + workload.complete(); } - - private void init(List workloads) throws Exception { - for (Workload workload : workloads) { - this.jobs.put(workload, pool.submit(workload.execute())); - } + } + + public void shutdown() { + // Make sure any Workloads still on pool have been properly shutdown + complete(); + pool.shutdownNow(); + } + + /** + * TODO This should be removed, Access to the pool should be restriced and callers should + * Workflows + * @return {@link ExecutorService} Exposes the underlying thread pool + */ + public ExecutorService getPool() { + return pool; + } + + public boolean isPerformance() { + return isPerformance; + } + + private void init(List workloads) throws Exception { + for (Workload workload : workloads) { + this.jobs.put(workload, pool.submit(workload.execute())); } -} \ No newline at end of file + } +} diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java index b27fe3252a9..7672e48e9bc 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.workload; import java.sql.Connection; @@ -48,346 +47,334 @@ import org.slf4j.LoggerFactory; public class WriteWorkload implements Workload { - private static final Logger LOGGER = LoggerFactory.getLogger(WriteWorkload.class); - - public static final String USE_BATCH_API_PROPERTY = "pherf.default.dataloader.batchApi"; - - private final PhoenixUtil pUtil; - private final XMLConfigParser parser; - private final RulesApplier rulesApplier; - private final ResultUtil resultUtil; - private final ExecutorService pool; - private final WriteParams writeParams; - private final Scenario scenario; - private final long threadSleepDuration; - - private final int threadPoolSize; - private final int batchSize; - private final GeneratePhoenixStats generateStatistics; - private final boolean useBatchApi; - private final Properties properties; - - public WriteWorkload(XMLConfigParser parser) throws Exception { - this(PhoenixUtil.create(), parser, PherfConstants.create(). - getProperties(PherfConstants.PHERF_PROPERTIES, true), GeneratePhoenixStats.NO); - } - public WriteWorkload(XMLConfigParser parser, Properties properties, - GeneratePhoenixStats generateStatistics) throws Exception { - this(PhoenixUtil.create(), parser, properties, generateStatistics); + private static final Logger LOGGER = LoggerFactory.getLogger(WriteWorkload.class); + + public static final String USE_BATCH_API_PROPERTY = "pherf.default.dataloader.batchApi"; + + private final PhoenixUtil pUtil; + private final XMLConfigParser parser; + private final RulesApplier rulesApplier; + private final ResultUtil resultUtil; + private final ExecutorService pool; + private final WriteParams writeParams; + private final Scenario scenario; + private final long threadSleepDuration; + + private final int threadPoolSize; + private final int batchSize; + private final GeneratePhoenixStats generateStatistics; + private final boolean useBatchApi; + private final Properties properties; + + public WriteWorkload(XMLConfigParser parser) throws Exception { + this(PhoenixUtil.create(), parser, + PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, true), + GeneratePhoenixStats.NO); + } + + public WriteWorkload(XMLConfigParser parser, Properties properties, + GeneratePhoenixStats generateStatistics) throws Exception { + this(PhoenixUtil.create(), parser, properties, generateStatistics); + } + + public WriteWorkload(PhoenixUtil util, XMLConfigParser parser, Properties properties, + GeneratePhoenixStats generateStatistics) throws Exception { + this(util, parser, properties, null, generateStatistics); + } + + /** + * Default the writers to use up all available cores for threads. If writeParams are used in the + * config files, they will override the defaults. writeParams are used for read/write mixed + * workloads. TODO extract notion of the scenario list and have 1 write workload per scenario + * @param phoenixUtil {@link org.apache.phoenix.pherf.util.PhoenixUtil} Query helper + * @param parser {@link org.apache.phoenix.pherf.configuration.XMLConfigParser} + * @param properties {@link java.util.Properties} default properties to use + * @param scenario {@link org.apache.phoenix.pherf.configuration.Scenario} If null is passed it + * will run against all scenarios in the parsers list. + */ + public WriteWorkload(PhoenixUtil phoenixUtil, XMLConfigParser parser, Properties properties, + Scenario scenario, GeneratePhoenixStats generateStatistics) throws Exception { + this.pUtil = phoenixUtil; + this.parser = parser; + this.rulesApplier = new RulesApplier(parser); + this.resultUtil = new ResultUtil(); + this.generateStatistics = generateStatistics; + this.properties = properties; + int size = Integer.parseInt(this.properties.getProperty("pherf.default.dataloader.threadpool")); + + // Overwrite defaults properties with those given in the configuration. This indicates the + // scenario is a R/W mixed workload. + if (scenario != null) { + this.scenario = scenario; + writeParams = scenario.getWriteParams(); + if (writeParams != null) { + threadSleepDuration = writeParams.getThreadSleepDuration(); + size = writeParams.getWriterThreadCount(); + } else { + threadSleepDuration = 0; + } + + } else { + writeParams = null; + this.scenario = null; + threadSleepDuration = 0; } - public WriteWorkload(PhoenixUtil util, XMLConfigParser parser, Properties properties, - GeneratePhoenixStats generateStatistics) throws Exception { - this(util, parser, properties, null, generateStatistics); - } + // Should addBatch/executeBatch be used? Default: false + this.useBatchApi = Boolean.getBoolean(USE_BATCH_API_PROPERTY); + this.threadPoolSize = (size == 0) ? Runtime.getRuntime().availableProcessors() : size; - /** - * Default the writers to use up all available cores for threads. If writeParams are used in - * the config files, they will override the defaults. writeParams are used for read/write mixed - * workloads. - * TODO extract notion of the scenario list and have 1 write workload per scenario - * - * @param phoenixUtil {@link org.apache.phoenix.pherf.util.PhoenixUtil} Query helper - * @param parser {@link org.apache.phoenix.pherf.configuration.XMLConfigParser} - * @param properties {@link java.util.Properties} default properties to use - * @param scenario {@link org.apache.phoenix.pherf.configuration.Scenario} If null is passed - * it will run against all scenarios in the parsers list. - * @throws Exception - */ - public WriteWorkload(PhoenixUtil phoenixUtil, XMLConfigParser parser, - Properties properties, Scenario scenario, - GeneratePhoenixStats generateStatistics) throws Exception { - this.pUtil = phoenixUtil; - this.parser = parser; - this.rulesApplier = new RulesApplier(parser); - this.resultUtil = new ResultUtil(); - this.generateStatistics = generateStatistics; - this.properties = properties; - int size = Integer.parseInt(this.properties.getProperty("pherf.default.dataloader.threadpool")); - - // Overwrite defaults properties with those given in the configuration. This indicates the - // scenario is a R/W mixed workload. - if (scenario != null) { - this.scenario = scenario; - writeParams = scenario.getWriteParams(); - if (writeParams != null) { - threadSleepDuration = writeParams.getThreadSleepDuration(); - size = writeParams.getWriterThreadCount(); - } - else { - threadSleepDuration = 0; - } - - } else { - writeParams = null; - this.scenario = null; - threadSleepDuration = 0; - } + // TODO Move pool management up to WorkloadExecutor + this.pool = Executors.newFixedThreadPool(this.threadPoolSize); - // Should addBatch/executeBatch be used? Default: false - this.useBatchApi = Boolean.getBoolean(USE_BATCH_API_PROPERTY); + String bSize = (writeParams == null) || (writeParams.getBatchSize() == Long.MIN_VALUE) + ? this.properties.getProperty("pherf.default.dataloader.batchsize") + : String.valueOf(writeParams.getBatchSize()); + this.batchSize = (bSize == null) ? PherfConstants.DEFAULT_BATCH_SIZE : Integer.parseInt(bSize); + } - this.threadPoolSize = (size == 0) ? Runtime.getRuntime().availableProcessors() : size; + @Override + public void complete() { + pool.shutdownNow(); + } - // TODO Move pool management up to WorkloadExecutor - this.pool = Executors.newFixedThreadPool(this.threadPoolSize); + public Callable execute() throws Exception { + return new Callable() { + @Override + public Void call() throws Exception { + try { + DataLoadTimeSummary dataLoadTimeSummary = new DataLoadTimeSummary(); + DataLoadThreadTime dataLoadThreadTime = new DataLoadThreadTime(); - String - bSize = - (writeParams == null) || (writeParams.getBatchSize() == Long.MIN_VALUE) ? - this.properties.getProperty("pherf.default.dataloader.batchsize") : - String.valueOf(writeParams.getBatchSize()); - this.batchSize = - (bSize == null) ? PherfConstants.DEFAULT_BATCH_SIZE : Integer.parseInt(bSize); + if (WriteWorkload.this.scenario == null) { + for (Scenario scenario : getParser().getScenarios()) { + exec(dataLoadTimeSummary, dataLoadThreadTime, scenario); + } + } else { + exec(dataLoadTimeSummary, dataLoadThreadTime, WriteWorkload.this.scenario); + } + resultUtil.write(dataLoadTimeSummary); + resultUtil.write(dataLoadThreadTime); + + } catch (Exception e) { + LOGGER.error("WriteWorkLoad failed", e); + throw e; + } + return null; + } + }; + } + + private synchronized void exec(DataLoadTimeSummary dataLoadTimeSummary, + DataLoadThreadTime dataLoadThreadTime, Scenario scenario) throws Exception { + LOGGER.info("\nLoading " + scenario.getRowCount() + " rows for " + scenario.getTableName()); + + // Execute any pre dataload scenario DDLs + pUtil.executeScenarioDdl(scenario.getPreScenarioDdls(), scenario.getTenantId(), + dataLoadTimeSummary); + + // Write data + List> writeBatches = getBatches(dataLoadThreadTime, scenario); + waitForBatches(dataLoadTimeSummary, scenario, EnvironmentEdgeManager.currentTimeMillis(), + writeBatches); + + // Update Phoenix Statistics + if (this.generateStatistics == GeneratePhoenixStats.YES) { + LOGGER.info("Updating Phoenix table statistics..."); + pUtil.updatePhoenixStats(scenario.getTableName(), scenario); + LOGGER.info("Stats update done!"); + } else { + LOGGER.info("Phoenix table stats update not requested."); } - @Override public void complete() { - pool.shutdownNow(); + // Execute any post data load scenario DDLs before starting query workload + pUtil.executeScenarioDdl(scenario.getPostScenarioDdls(), scenario.getTenantId(), + dataLoadTimeSummary); + } + + private List> getBatches(DataLoadThreadTime dataLoadThreadTime, Scenario scenario) + throws Exception { + RowCalculator rowCalculator = new RowCalculator(getThreadPoolSize(), scenario.getRowCount()); + List> writeBatches = new ArrayList<>(); + + for (int i = 0; i < getThreadPoolSize(); i++) { + List phxMetaCols = pUtil.getColumnsFromPhoenix(scenario.getSchemaName(), + scenario.getTableNameWithoutSchemaName(), pUtil.getConnection(scenario.getTenantId())); + int threadRowCount = rowCalculator.getNext(); + LOGGER.info("Kick off thread (#" + i + ")for upsert with (" + threadRowCount + ") rows."); + Future write = upsertData(scenario, phxMetaCols, scenario.getTableName(), + threadRowCount, dataLoadThreadTime, this.useBatchApi); + writeBatches.add(write); + } + if (writeBatches.isEmpty()) { + throw new PherfException( + "Holy shit snacks! Throwing up hands in disbelief and exiting. Could not write data for some unknown reason."); } - public Callable execute() throws Exception { - return new Callable() { - @Override public Void call() throws Exception { - try { - DataLoadTimeSummary dataLoadTimeSummary = new DataLoadTimeSummary(); - DataLoadThreadTime dataLoadThreadTime = new DataLoadThreadTime(); - - if (WriteWorkload.this.scenario == null) { - for (Scenario scenario : getParser().getScenarios()) { - exec(dataLoadTimeSummary, dataLoadThreadTime, scenario); - } - } else { - exec(dataLoadTimeSummary, dataLoadThreadTime, WriteWorkload.this.scenario); - } - resultUtil.write(dataLoadTimeSummary); - resultUtil.write(dataLoadThreadTime); - - } catch (Exception e) { - LOGGER.error("WriteWorkLoad failed", e); - throw e; + return writeBatches; + } + + private void waitForBatches(DataLoadTimeSummary dataLoadTimeSummary, Scenario scenario, + long start, List> writeBatches) + throws InterruptedException, java.util.concurrent.ExecutionException { + int sumRows = 0, sumDuration = 0; + // Wait for all the batch threads to complete + for (Future write : writeBatches) { + Info writeInfo = write.get(); + sumRows += writeInfo.getRowCount(); + sumDuration += writeInfo.getDuration(); + LOGGER.info("Executor (" + this.hashCode() + ") writes complete with row count (" + + writeInfo.getRowCount() + ") in Ms (" + writeInfo.getDuration() + ")"); + } + long testDuration = EnvironmentEdgeManager.currentTimeMillis() - start; + LOGGER + .info("Writes completed with total row count (" + sumRows + ") with total elapsed time of (" + + testDuration + ") ms and total CPU execution time of (" + sumDuration + ") ms"); + dataLoadTimeSummary.add(scenario.getTableName(), sumRows, (int) testDuration); + } + + public Future upsertData(final Scenario scenario, final List columns, + final String tableName, final int rowCount, final DataLoadThreadTime dataLoadThreadTime, + final boolean useBatchApi) { + Future future = pool.submit(new Callable() { + @Override + public Info call() throws Exception { + int rowsCreated = 0; + long start = 0, last = 0, duration, totalDuration; + SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + Connection connection = null; + PreparedStatement stmt = null; + try { + connection = pUtil.getConnection(scenario.getTenantId(), properties); + long logStartTime = EnvironmentEdgeManager.currentTimeMillis(); + long maxDuration = (WriteWorkload.this.writeParams == null) + ? Long.MAX_VALUE + : WriteWorkload.this.writeParams.getExecutionDurationInMs(); + + int logPerNRows = PherfConstants.LOG_PER_NROWS; + String customizedLogPerNRows = + connection.getClientInfo().getProperty(PherfConstants.LOG_PER_NROWS_NAME); + if (customizedLogPerNRows != null) { + logPerNRows = Integer.valueOf(customizedLogPerNRows); + } + last = start = EnvironmentEdgeManager.currentTimeMillis(); + String sql = pUtil.buildSql(columns, tableName); + stmt = connection.prepareStatement(sql); + for (long i = rowCount; (i > 0) + && ((EnvironmentEdgeManager.currentTimeMillis() - logStartTime) < maxDuration); i--) { + stmt = pUtil.buildStatement(rulesApplier, scenario, columns, stmt, simpleDateFormat); + if (useBatchApi) { + stmt.addBatch(); + } else { + rowsCreated += stmt.executeUpdate(); + } + if ((i % getBatchSize()) == 0) { + if (useBatchApi) { + int[] results = stmt.executeBatch(); + for (int x = 0; x < results.length; x++) { + int result = results[x]; + if (result < 1) { + final String msg = + "Failed to write update in batch (update count=" + result + ")"; + throw new RuntimeException(msg); + } + rowsCreated += result; } - return null; + } + connection.commit(); + duration = EnvironmentEdgeManager.currentTimeMillis() - last; + LOGGER.info("Writer (" + Thread.currentThread().getName() + + ") committed Batch. Total " + getBatchSize() + " rows for this thread (" + + this.hashCode() + ") in (" + duration + ") Ms"); + + if (i % logPerNRows == 0 && i != 0) { + dataLoadThreadTime.add(tableName, Thread.currentThread().getName(), i, + EnvironmentEdgeManager.currentTimeMillis() - logStartTime); + } + + logStartTime = EnvironmentEdgeManager.currentTimeMillis(); + // Pause for throttling if configured to do so + Thread.sleep(threadSleepDuration); + // Re-compute the start time for the next batch + last = EnvironmentEdgeManager.currentTimeMillis(); + } + } + } catch (SQLException e) { + LOGGER.error("Scenario " + scenario.getName() + " failed with exception ", e); + throw e; + } finally { + // Need to keep the statement open to send the remaining batch of updates + if (!useBatchApi && stmt != null) { + stmt.close(); + } + if (connection != null) { + if (useBatchApi && stmt != null) { + int[] results = stmt.executeBatch(); + for (int x = 0; x < results.length; x++) { + int result = results[x]; + if (result < 1) { + final String msg = + "Failed to write update in batch (update count=" + result + ")"; + throw new RuntimeException(msg); + } + rowsCreated += result; + } + // Close the statement after our last batch execution. + stmt.close(); } - }; - } - - private synchronized void exec(DataLoadTimeSummary dataLoadTimeSummary, - DataLoadThreadTime dataLoadThreadTime, Scenario scenario) throws Exception { - LOGGER.info("\nLoading " + scenario.getRowCount() + " rows for " + scenario.getTableName()); - - // Execute any pre dataload scenario DDLs - pUtil.executeScenarioDdl(scenario.getPreScenarioDdls(), scenario.getTenantId(), dataLoadTimeSummary); - - // Write data - List> writeBatches = getBatches(dataLoadThreadTime, scenario); - waitForBatches(dataLoadTimeSummary, scenario, EnvironmentEdgeManager.currentTimeMillis(), writeBatches); - // Update Phoenix Statistics - if (this.generateStatistics == GeneratePhoenixStats.YES) { - LOGGER.info("Updating Phoenix table statistics..."); - pUtil.updatePhoenixStats(scenario.getTableName(), scenario); - LOGGER.info("Stats update done!"); - } else { - LOGGER.info("Phoenix table stats update not requested."); + try { + connection.commit(); + duration = EnvironmentEdgeManager.currentTimeMillis() - start; + LOGGER.info("Writer ( " + Thread.currentThread().getName() + + ") committed Final Batch. Duration (" + duration + ") Ms"); + connection.close(); + } catch (SQLException e) { + // Swallow since we are closing anyway + e.printStackTrace(); + } + } } - - // Execute any post data load scenario DDLs before starting query workload - pUtil.executeScenarioDdl(scenario.getPostScenarioDdls(), scenario.getTenantId(), dataLoadTimeSummary); - } + totalDuration = EnvironmentEdgeManager.currentTimeMillis() - start; + return new Info(totalDuration, rowsCreated); + } + }); + return future; + } - private List> getBatches(DataLoadThreadTime dataLoadThreadTime, Scenario scenario) - throws Exception { - RowCalculator - rowCalculator = - new RowCalculator(getThreadPoolSize(), scenario.getRowCount()); - List> writeBatches = new ArrayList<>(); - - for (int i = 0; i < getThreadPoolSize(); i++) { - List - phxMetaCols = - pUtil.getColumnsFromPhoenix(scenario.getSchemaName(), - scenario.getTableNameWithoutSchemaName(), pUtil.getConnection(scenario.getTenantId())); - int threadRowCount = rowCalculator.getNext(); - LOGGER.info( - "Kick off thread (#" + i + ")for upsert with (" + threadRowCount + ") rows."); - Future - write = - upsertData(scenario, phxMetaCols, scenario.getTableName(), threadRowCount, - dataLoadThreadTime, this.useBatchApi); - writeBatches.add(write); - } - if (writeBatches.isEmpty()) { - throw new PherfException( - "Holy shit snacks! Throwing up hands in disbelief and exiting. Could not write data for some unknown reason."); - } + public XMLConfigParser getParser() { + return parser; + } - return writeBatches; - } + public RulesApplier getRulesApplier() { + return rulesApplier; + } - private void waitForBatches(DataLoadTimeSummary dataLoadTimeSummary, Scenario scenario, - long start, List> writeBatches) - throws InterruptedException, java.util.concurrent.ExecutionException { - int sumRows = 0, sumDuration = 0; - // Wait for all the batch threads to complete - for (Future write : writeBatches) { - Info writeInfo = write.get(); - sumRows += writeInfo.getRowCount(); - sumDuration += writeInfo.getDuration(); - LOGGER.info("Executor (" + this.hashCode() + ") writes complete with row count (" - + writeInfo.getRowCount() + ") in Ms (" + writeInfo.getDuration() + ")"); - } - long testDuration = EnvironmentEdgeManager.currentTimeMillis() - start; - LOGGER.info("Writes completed with total row count (" + sumRows - + ") with total elapsed time of (" + testDuration - + ") ms and total CPU execution time of (" + sumDuration + ") ms"); - dataLoadTimeSummary - .add(scenario.getTableName(), sumRows, (int) testDuration); - } + public int getBatchSize() { + return batchSize; + } - public Future upsertData(final Scenario scenario, final List columns, - final String tableName, final int rowCount, - final DataLoadThreadTime dataLoadThreadTime, final boolean useBatchApi) { - Future future = pool.submit(new Callable() { - @Override public Info call() throws Exception { - int rowsCreated = 0; - long start = 0, last = 0, duration, totalDuration; - SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); - Connection connection = null; - PreparedStatement stmt = null; - try { - connection = pUtil.getConnection(scenario.getTenantId(), properties); - long logStartTime = EnvironmentEdgeManager.currentTimeMillis(); - long maxDuration = (WriteWorkload.this.writeParams == null) ? Long.MAX_VALUE : - WriteWorkload.this.writeParams.getExecutionDurationInMs(); - - int logPerNRows = PherfConstants.LOG_PER_NROWS; - String customizedLogPerNRows = connection.getClientInfo(). - getProperty(PherfConstants.LOG_PER_NROWS_NAME); - if (customizedLogPerNRows!= null) { - logPerNRows = Integer.valueOf(customizedLogPerNRows); - } - last = start = EnvironmentEdgeManager.currentTimeMillis(); - String sql = pUtil.buildSql(columns, tableName); - stmt = connection.prepareStatement(sql); - for (long i = rowCount; (i > 0) && ((EnvironmentEdgeManager.currentTimeMillis() - logStartTime) - < maxDuration); i--) { - stmt = pUtil.buildStatement(rulesApplier, scenario, columns, stmt, simpleDateFormat); - if (useBatchApi) { - stmt.addBatch(); - } else { - rowsCreated += stmt.executeUpdate(); - } - if ((i % getBatchSize()) == 0) { - if (useBatchApi) { - int[] results = stmt.executeBatch(); - for (int x = 0; x < results.length; x++) { - int result = results[x]; - if (result < 1) { - final String msg = - "Failed to write update in batch (update count=" - + result + ")"; - throw new RuntimeException(msg); - } - rowsCreated += result; - } - } - connection.commit(); - duration = EnvironmentEdgeManager.currentTimeMillis() - last; - LOGGER.info("Writer (" + Thread.currentThread().getName() - + ") committed Batch. Total " + getBatchSize() - + " rows for this thread (" + this.hashCode() + ") in (" - + duration + ") Ms"); - - if (i % logPerNRows == 0 && i != 0) { - dataLoadThreadTime.add(tableName, - Thread.currentThread().getName(), i, - EnvironmentEdgeManager.currentTimeMillis() - logStartTime); - } - - logStartTime = EnvironmentEdgeManager.currentTimeMillis(); - // Pause for throttling if configured to do so - Thread.sleep(threadSleepDuration); - // Re-compute the start time for the next batch - last = EnvironmentEdgeManager.currentTimeMillis(); - } - } - } catch (SQLException e) { - LOGGER.error("Scenario " + scenario.getName() + " failed with exception ", e); - throw e; - } finally { - // Need to keep the statement open to send the remaining batch of updates - if (!useBatchApi && stmt != null) { - stmt.close(); - } - if (connection != null) { - if (useBatchApi && stmt != null) { - int[] results = stmt.executeBatch(); - for (int x = 0; x < results.length; x++) { - int result = results[x]; - if (result < 1) { - final String msg = - "Failed to write update in batch (update count=" - + result + ")"; - throw new RuntimeException(msg); - } - rowsCreated += result; - } - // Close the statement after our last batch execution. - stmt.close(); - } - - try { - connection.commit(); - duration = EnvironmentEdgeManager.currentTimeMillis() - start; - LOGGER.info("Writer ( " + Thread.currentThread().getName() - + ") committed Final Batch. Duration (" + duration + ") Ms"); - connection.close(); - } catch (SQLException e) { - // Swallow since we are closing anyway - e.printStackTrace(); - } - } - } - totalDuration = EnvironmentEdgeManager.currentTimeMillis() - start; - return new Info(totalDuration, rowsCreated); - } - }); - return future; - } + public int getThreadPoolSize() { + return threadPoolSize; + } - public XMLConfigParser getParser() { - return parser; - } + private class Info { - public RulesApplier getRulesApplier() { - return rulesApplier; - } + private final int rowCount; + private final long duration; - public int getBatchSize() { - return batchSize; + public Info(long duration, int rows) { + this.duration = duration; + this.rowCount = rows; } - public int getThreadPoolSize() { - return threadPoolSize; + public long getDuration() { + return duration; } - private class Info { - - private final int rowCount; - private final long duration; - - public Info(long duration, int rows) { - this.duration = duration; - this.rowCount = rows; - } - - public long getDuration() { - return duration; - } - - public int getRowCount() { - return rowCount; - } + public int getRowCount() { + return rowCount; } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/MultiTenantWorkload.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/MultiTenantWorkload.java index c8fa6992719..ed989cece70 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/MultiTenantWorkload.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/MultiTenantWorkload.java @@ -15,67 +15,68 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt; + +import java.util.List; +import java.util.Properties; +import java.util.concurrent.Callable; + import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.util.PhoenixUtil; import org.apache.phoenix.pherf.workload.Workload; import org.apache.phoenix.pherf.workload.mt.generators.LoadEventGenerator; +import org.apache.phoenix.pherf.workload.mt.generators.TenantLoadEventGeneratorFactory; import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; import org.apache.phoenix.pherf.workload.mt.handlers.PherfWorkHandler; -import org.apache.phoenix.pherf.workload.mt.generators.TenantLoadEventGeneratorFactory; -import org.apache.phoenix.pherf.workload.mt.handlers.TenantOperationWorkHandler; import org.apache.phoenix.pherf.workload.mt.operations.TenantOperationFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.Callable; - /** - * This class creates workload for tenant based load profiles. - * It uses @see {@link TenantOperationFactory} in conjunction with - * @see {@link LoadEventGenerator} to generate the load events. - * It then publishes these events onto a RingBuffer based queue. - * The @see {@link TenantOperationWorkHandler} drains the events from the queue and executes them. - * Reference for RingBuffer based queue http://lmax-exchange.github.io/disruptor/ + * This class creates workload for tenant based load profiles. It uses @see + * {@link TenantOperationFactory} in conjunction with + * @see {@link LoadEventGenerator} to generate the load events. It then publishes these events onto + * a RingBuffer based queue. The @see {@link TenantOperationWorkHandler} drains the events from + * the queue and executes them. Reference for RingBuffer based queue + * http://lmax-exchange.github.io/disruptor/ */ public class MultiTenantWorkload implements Workload { - private static final Logger LOGGER = LoggerFactory.getLogger(MultiTenantWorkload.class); - private final TenantLoadEventGeneratorFactory evtGeneratorFactory - = new TenantLoadEventGeneratorFactory(); - private final LoadEventGenerator generator; + private static final Logger LOGGER = LoggerFactory.getLogger(MultiTenantWorkload.class); + private final TenantLoadEventGeneratorFactory evtGeneratorFactory = + new TenantLoadEventGeneratorFactory(); + private final LoadEventGenerator generator; + public MultiTenantWorkload(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, + Properties properties) { + this.generator = + evtGeneratorFactory.newLoadEventGenerator(phoenixUtil, model, scenario, properties); + } - public MultiTenantWorkload(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, - Properties properties) { - this.generator = evtGeneratorFactory.newLoadEventGenerator(phoenixUtil, - model, scenario, properties); - } - - public MultiTenantWorkload(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, - List workHandlers, Properties properties) throws Exception { - this.generator = evtGeneratorFactory.newLoadEventGenerator(phoenixUtil, - model, scenario, workHandlers, properties); - } + public MultiTenantWorkload(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, + List workHandlers, Properties properties) throws Exception { + this.generator = evtGeneratorFactory.newLoadEventGenerator(phoenixUtil, model, scenario, + workHandlers, properties); + } - @Override public Callable execute() throws Exception { - return new Callable() { - @Override public Void call() throws Exception { - generator.start(); - return null; - } - }; - } + @Override + public Callable execute() throws Exception { + return new Callable() { + @Override + public Void call() throws Exception { + generator.start(); + return null; + } + }; + } - @Override public void complete() { - try { - generator.stop(); - } catch (Exception e) { - LOGGER.error(e.getMessage()); - } + @Override + public void complete() { + try { + generator.stop(); + } catch (Exception e) { + LOGGER.error(e.getMessage()); } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/BaseLoadEventGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/BaseLoadEventGenerator.java index 02980555772..3d23f805a33 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/BaseLoadEventGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/BaseLoadEventGenerator.java @@ -15,203 +15,205 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.generators; -import com.lmax.disruptor.BlockingWaitStrategy; -import com.lmax.disruptor.EventFactory; -import com.lmax.disruptor.ExceptionHandler; -import com.lmax.disruptor.RingBuffer; -import com.lmax.disruptor.WorkHandler; -import com.lmax.disruptor.dsl.Disruptor; -import com.lmax.disruptor.dsl.ProducerType; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.List; +import java.util.Properties; + import org.apache.hadoop.hbase.util.Threads; import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.util.PhoenixUtil; import org.apache.phoenix.pherf.workload.mt.handlers.PherfWorkHandler; -import org.apache.phoenix.pherf.workload.mt.operations.TenantOperationFactory; import org.apache.phoenix.pherf.workload.mt.handlers.TenantOperationWorkHandler; +import org.apache.phoenix.pherf.workload.mt.operations.TenantOperationFactory; import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.List; -import java.util.Properties; +import com.lmax.disruptor.BlockingWaitStrategy; +import com.lmax.disruptor.EventFactory; +import com.lmax.disruptor.ExceptionHandler; +import com.lmax.disruptor.RingBuffer; +import com.lmax.disruptor.WorkHandler; +import com.lmax.disruptor.dsl.Disruptor; +import com.lmax.disruptor.dsl.ProducerType; /** * A base class for all load event generators. */ -public abstract class BaseLoadEventGenerator - implements LoadEventGenerator { - protected static final int DEFAULT_NUM_HANDLER_PER_SCENARIO = 4; - protected static final int DEFAULT_BUFFER_SIZE = 8192; - protected static final Logger LOGGER = LoggerFactory.getLogger( - BaseLoadEventGenerator.class); - - protected Disruptor disruptor; - protected List handlers; - protected final Properties properties; +public abstract class BaseLoadEventGenerator implements LoadEventGenerator { + protected static final int DEFAULT_NUM_HANDLER_PER_SCENARIO = 4; + protected static final int DEFAULT_BUFFER_SIZE = 8192; + protected static final Logger LOGGER = LoggerFactory.getLogger(BaseLoadEventGenerator.class); - protected final TenantOperationFactory operationFactory; - protected final ExceptionHandler exceptionHandler; + protected Disruptor disruptor; + protected List handlers; + protected final Properties properties; + protected final TenantOperationFactory operationFactory; + protected final ExceptionHandler exceptionHandler; - private static class WorkloadExceptionHandler implements ExceptionHandler { - private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadExceptionHandler.class); + private static class WorkloadExceptionHandler implements ExceptionHandler { + private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadExceptionHandler.class); - @Override public void handleEventException(Throwable ex, long sequence, Object event) { - LOGGER.error("Sequence=" + sequence + ", event=" + event, ex); - throw new RuntimeException(ex); - } - - @Override public void handleOnStartException(Throwable ex) { - LOGGER.error("On Start", ex); - throw new RuntimeException(ex); - } - - @Override public void handleOnShutdownException(Throwable ex) { - LOGGER.error("On Shutdown", ex); - throw new RuntimeException(ex); - } + @Override + public void handleEventException(Throwable ex, long sequence, Object event) { + LOGGER.error("Sequence=" + sequence + ", event=" + event, ex); + throw new RuntimeException(ex); } - public static class TenantOperationEvent { - TenantOperationInfo tenantOperationInfo; - - public TenantOperationInfo getTenantOperationInfo() { - return tenantOperationInfo; - } - - public void setTenantOperationInfo(TenantOperationInfo tenantOperationInfo) { - this.tenantOperationInfo = tenantOperationInfo; - } - - public static final EventFactory EVENT_FACTORY = new EventFactory() { - public TenantOperationEvent newInstance() { - return new TenantOperationEvent(); - } - }; + @Override + public void handleOnStartException(Throwable ex) { + LOGGER.error("On Start", ex); + throw new RuntimeException(ex); } - public BaseLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, - List workers, Properties properties) { - this(phoenixUtil, model, scenario, workers, new WorkloadExceptionHandler(), properties); + @Override + public void handleOnShutdownException(Throwable ex) { + LOGGER.error("On Shutdown", ex); + throw new RuntimeException(ex); } + } - public BaseLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, - Scenario scenario, Properties properties) { - this(phoenixUtil, model, scenario, null, new WorkloadExceptionHandler(), properties); - } + public static class TenantOperationEvent { + TenantOperationInfo tenantOperationInfo; - public BaseLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, - Scenario scenario, - List workers, - ExceptionHandler exceptionHandler, - Properties properties) { + public TenantOperationInfo getTenantOperationInfo() { + return tenantOperationInfo; + } + public void setTenantOperationInfo(TenantOperationInfo tenantOperationInfo) { + this.tenantOperationInfo = tenantOperationInfo; + } - operationFactory = new TenantOperationFactory(phoenixUtil, model, scenario); - if (scenario.getPhoenixProperties() != null) { - properties.putAll(scenario.getPhoenixProperties()); + public static final EventFactory EVENT_FACTORY = + new EventFactory() { + public TenantOperationEvent newInstance() { + return new TenantOperationEvent(); } - this.properties = properties; + }; + } - if (workers == null || workers.isEmpty()) { - workers = getWorkHandlers(properties); - } - this.handlers = workers; - this.exceptionHandler = exceptionHandler; - } + public BaseLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, + List workers, Properties properties) { + this(phoenixUtil, model, scenario, workers, new WorkloadExceptionHandler(), properties); + } + public BaseLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, + Properties properties) { + this(phoenixUtil, model, scenario, null, new WorkloadExceptionHandler(), properties); + } - @Override public PhoenixUtil getPhoenixUtil() { return operationFactory.getPhoenixUtil(); } + public BaseLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, + List workers, ExceptionHandler exceptionHandler, Properties properties) { - @Override public Scenario getScenario() { - return operationFactory.getScenario(); + operationFactory = new TenantOperationFactory(phoenixUtil, model, scenario); + if (scenario.getPhoenixProperties() != null) { + properties.putAll(scenario.getPhoenixProperties()); } + this.properties = properties; - @Override public DataModel getModel() { - return operationFactory.getModel(); + if (workers == null || workers.isEmpty()) { + workers = getWorkHandlers(properties); } - - @Override public Properties getProperties() { - return this.properties; + this.handlers = workers; + this.exceptionHandler = exceptionHandler; + } + + @Override + public PhoenixUtil getPhoenixUtil() { + return operationFactory.getPhoenixUtil(); + } + + @Override + public Scenario getScenario() { + return operationFactory.getScenario(); + } + + @Override + public DataModel getModel() { + return operationFactory.getModel(); + } + + @Override + public Properties getProperties() { + return this.properties; + } + + @Override + public TenantOperationFactory getOperationFactory() { + return operationFactory; + } + + @Override + public void start() throws Exception { + Scenario scenario = operationFactory.getScenario(); + String currentThreadName = Thread.currentThread().getName(); + int bufferSize = DEFAULT_BUFFER_SIZE; + if (properties.containsKey("pherf.mt.buffer_size_per_scenario")) { + bufferSize = Integer.parseInt((String) properties.get("pherf.mt.buffer_size_per_scenario")); } - @Override public TenantOperationFactory getOperationFactory() { - return operationFactory; + disruptor = new Disruptor<>(TenantOperationEvent.EVENT_FACTORY, bufferSize, + new ThreadFactoryBuilder().setNameFormat(currentThreadName + "." + scenario.getName()) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + ProducerType.SINGLE, new BlockingWaitStrategy()); + + this.disruptor.setDefaultExceptionHandler(this.exceptionHandler); + this.disruptor.handleEventsWithWorkerPool(this.handlers.toArray(new WorkHandler[] {})); + RingBuffer ringBuffer = this.disruptor.start(); + long numOperations = scenario.getLoadProfile().getNumOperations(); + while (numOperations > 0) { + TenantOperationInfo sample = next(); + operationFactory.initializeTenant(sample); + --numOperations; + // Publishers claim events in sequence + long sequence = ringBuffer.next(); + TenantOperationEvent event = ringBuffer.get(sequence); + event.setTenantOperationInfo(sample); + // make the event available to EventProcessors + ringBuffer.publish(sequence); + LOGGER.info(String.format("published : %s:%s:%d, %d, %d", scenario.getName(), + scenario.getTableName(), numOperations, ringBuffer.getCursor(), sequence)); } + } - @Override public void start() throws Exception { - Scenario scenario = operationFactory.getScenario(); - String currentThreadName = Thread.currentThread().getName(); - int bufferSize = DEFAULT_BUFFER_SIZE; - if (properties.containsKey("pherf.mt.buffer_size_per_scenario")) { - bufferSize = Integer.parseInt((String)properties.get("pherf.mt.buffer_size_per_scenario")); - } - - disruptor = new Disruptor<>(TenantOperationEvent.EVENT_FACTORY, bufferSize, - new ThreadFactoryBuilder() - .setNameFormat(currentThreadName + "." + scenario.getName()) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER) - .build(), - ProducerType.SINGLE, new BlockingWaitStrategy()); - - this.disruptor.setDefaultExceptionHandler(this.exceptionHandler); - this.disruptor.handleEventsWithWorkerPool(this.handlers.toArray(new WorkHandler[] {})); - RingBuffer ringBuffer = this.disruptor.start(); - long numOperations = scenario.getLoadProfile().getNumOperations(); - while (numOperations > 0) { - TenantOperationInfo sample = next(); - operationFactory.initializeTenant(sample); - --numOperations; - // Publishers claim events in sequence - long sequence = ringBuffer.next(); - TenantOperationEvent event = ringBuffer.get(sequence); - event.setTenantOperationInfo(sample); - // make the event available to EventProcessors - ringBuffer.publish(sequence); - LOGGER.info(String.format("published : %s:%s:%d, %d, %d", - scenario.getName(), scenario.getTableName(), - numOperations, ringBuffer.getCursor(), sequence)); - } + @Override + public void stop() throws Exception { + // Wait for the handlers to finish the jobs + if (disruptor != null) { + disruptor.shutdown(); } - @Override public void stop() throws Exception { - // Wait for the handlers to finish the jobs - if (disruptor != null) { - disruptor.shutdown(); - } - - // TODO need to handle asynchronous result publishing - } + // TODO need to handle asynchronous result publishing + } - @Override public List getWorkHandlers(Properties properties) { + @Override + public List getWorkHandlers(Properties properties) { - String handlerName = ""; - try { - handlerName = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - - int handlerCount = DEFAULT_NUM_HANDLER_PER_SCENARIO; - if (properties.containsKey("pherf.mt.handlers_per_scenario")) { - handlerCount = Integer.parseInt((String)properties.get("pherf.mt.handlers_per_scenario")); - } - List workers = Lists.newArrayListWithCapacity(handlerCount); - for (int i = 0; i < handlerCount; i++) { - String handlerId = String.format("%s.%d", handlerName, i + 1); - workers.add(new TenantOperationWorkHandler(operationFactory, handlerId)); - } - return workers; + String handlerName = ""; + try { + handlerName = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + throw new RuntimeException(e); } - abstract public TenantOperationInfo next(); + int handlerCount = DEFAULT_NUM_HANDLER_PER_SCENARIO; + if (properties.containsKey("pherf.mt.handlers_per_scenario")) { + handlerCount = Integer.parseInt((String) properties.get("pherf.mt.handlers_per_scenario")); + } + List workers = Lists.newArrayListWithCapacity(handlerCount); + for (int i = 0; i < handlerCount; i++) { + String handlerId = String.format("%s.%d", handlerName, i + 1); + workers.add(new TenantOperationWorkHandler(operationFactory, handlerId)); + } + return workers; + } + abstract public TenantOperationInfo next(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/LoadEventGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/LoadEventGenerator.java index 61f8fe709f2..e8f3d65e9df 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/LoadEventGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/LoadEventGenerator.java @@ -15,48 +15,45 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.generators; +import java.util.List; +import java.util.Properties; + import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.util.PhoenixUtil; -import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; import org.apache.phoenix.pherf.workload.mt.handlers.PherfWorkHandler; import org.apache.phoenix.pherf.workload.mt.operations.TenantOperationFactory; -import java.util.List; -import java.util.Properties; - /** * An interface that implementers can use to generate load events that can be consumed by - * @see {@link com.lmax.disruptor.WorkHandler} which provide event handling functionality for - * a given event. - * + * @see {@link com.lmax.disruptor.WorkHandler} which provide event handling functionality for a + * given event. * @param load event object */ public interface LoadEventGenerator { - /** - * Initializes and readies the generator for queue based workloads - */ - void start() throws Exception; + /** + * Initializes and readies the generator for queue based workloads + */ + void start() throws Exception; - /** - * Stop the generator and waits for the queues to drain. - */ - void stop() throws Exception; + /** + * Stop the generator and waits for the queues to drain. + */ + void stop() throws Exception; - PhoenixUtil getPhoenixUtil(); + PhoenixUtil getPhoenixUtil(); - Scenario getScenario(); + Scenario getScenario(); - DataModel getModel(); + DataModel getModel(); - Properties getProperties(); + Properties getProperties(); - TenantOperationFactory getOperationFactory(); + TenantOperationFactory getOperationFactory(); - List getWorkHandlers(Properties properties); + List getWorkHandlers(Properties properties); - TenantOperationInfo next(); + TenantOperationInfo next(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/LoadEventGeneratorFactory.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/LoadEventGeneratorFactory.java index 63e482c3cdd..a1748c6d5a2 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/LoadEventGeneratorFactory.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/LoadEventGeneratorFactory.java @@ -15,29 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.generators; +import java.util.List; +import java.util.Properties; + import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.util.PhoenixUtil; import org.apache.phoenix.pherf.workload.mt.handlers.PherfWorkHandler; -import java.util.List; -import java.util.Properties; - /** - * An interface that factory implementers need to implement - * for creating various supported load generators {@link LoadEventGenerator} + * An interface that factory implementers need to implement for creating various supported load + * generators {@link LoadEventGenerator} * @param load event object */ public interface LoadEventGeneratorFactory { - LoadEventGenerator newLoadEventGenerator(PhoenixUtil phoenixUtil, - DataModel model, Scenario scenario, - Properties properties) ; + LoadEventGenerator newLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, + Scenario scenario, Properties properties); - LoadEventGenerator newLoadEventGenerator(PhoenixUtil phoenixUtil, - DataModel model, Scenario scenario, - List workHandlers, Properties properties) ; + LoadEventGenerator newLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, + Scenario scenario, List workHandlers, Properties properties); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/SequentialLoadEventGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/SequentialLoadEventGenerator.java index 78fc78d9577..b031707852c 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/SequentialLoadEventGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/SequentialLoadEventGenerator.java @@ -15,9 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.generators; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.CyclicBarrier; + import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.ExecutionType; @@ -33,155 +39,153 @@ import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.CyclicBarrier; - /** - * A load generator that generates tenant operation events in the order specified in the - * scenario file. - * The scenario file can also specify on how many iterations to be executed per operation, + * A load generator that generates tenant operation events in the order specified in the scenario + * file. The scenario file can also specify on how many iterations to be executed per operation, * whether the iterations be run in parallel or serially. */ public class SequentialLoadEventGenerator extends BaseLoadEventGenerator { - private static class SequentialSampler { - private final LoadProfile loadProfile; - private final String modelName; - private final String scenarioName; - private final String tableName; - private long iteration; - private int opIndex; - private int numHandlers; - - private final TenantGroup tenantGroup; - private final List operationList; - - public SequentialSampler(List operationList, DataModel model, - Scenario scenario, Properties properties) { - this.modelName = model.getName(); - this.scenarioName = scenario.getName(); - this.tableName = scenario.getTableName(); - this.loadProfile = scenario.getLoadProfile(); - this.operationList = operationList; - - // Track the individual tenant group with single tenant or global connection, - // so that given a generated sample we can use the supplied tenant. - // NOTE : Not sure if there is a case for multiple tenants in a uniform distribution. - // For now keeping it simple. - Preconditions.checkArgument(loadProfile.getTenantDistribution() != null, - "Tenant distribution cannot be null"); - Preconditions.checkArgument(!loadProfile.getTenantDistribution().isEmpty(), - "Tenant group cannot be empty"); - Preconditions.checkArgument(loadProfile.getTenantDistribution().size() == 1, - "Tenant group cannot be more than 1"); - tenantGroup = loadProfile.getTenantDistribution().get(0); - } - - public TenantOperationInfo nextSample() { - Operation op = operationList.get(opIndex % operationList.size()); - String tenantGroupId = tenantGroup.getId(); - String tenantIdPrefix = Strings - .padStart(tenantGroupId, loadProfile.getGroupIdLength(), 'x'); - String formattedTenantId = String.format(loadProfile.getTenantIdFormat(), - tenantIdPrefix.substring(0, loadProfile.getGroupIdLength()), 1); - String paddedTenantId = Strings.padStart(formattedTenantId, loadProfile.getTenantIdLength(), 'x'); - String tenantId = paddedTenantId.substring(0, loadProfile.getTenantIdLength()); - - TenantOperationInfo sample = new TenantOperationInfo(modelName, scenarioName, tableName, - tenantGroupId, op.getId(), tenantId, op); - - iteration++; - if (iteration % numHandlers == 0) { - opIndex++; - } - return sample; - } - - public int getNumHandlers() { - return numHandlers; - } - - public void setNumHandlers(int handlers) { - numHandlers = handlers; - } + private static class SequentialSampler { + private final LoadProfile loadProfile; + private final String modelName; + private final String scenarioName; + private final String tableName; + private long iteration; + private int opIndex; + private int numHandlers; + private final TenantGroup tenantGroup; + private final List operationList; + + public SequentialSampler(List operationList, DataModel model, Scenario scenario, + Properties properties) { + this.modelName = model.getName(); + this.scenarioName = scenario.getName(); + this.tableName = scenario.getTableName(); + this.loadProfile = scenario.getLoadProfile(); + this.operationList = operationList; + + // Track the individual tenant group with single tenant or global connection, + // so that given a generated sample we can use the supplied tenant. + // NOTE : Not sure if there is a case for multiple tenants in a uniform distribution. + // For now keeping it simple. + Preconditions.checkArgument(loadProfile.getTenantDistribution() != null, + "Tenant distribution cannot be null"); + Preconditions.checkArgument(!loadProfile.getTenantDistribution().isEmpty(), + "Tenant group cannot be empty"); + Preconditions.checkArgument(loadProfile.getTenantDistribution().size() == 1, + "Tenant group cannot be more than 1"); + tenantGroup = loadProfile.getTenantDistribution().get(0); } - protected static final int DEFAULT_NUM_ITERATIONS = 1; - protected static final ExecutionType DEFAULT_EXECUTION_TYPE = ExecutionType.SERIAL; - private final SequentialSampler sampler; - private int numHandlers; - private int numIterations = DEFAULT_NUM_ITERATIONS; - private ExecutionType executionType = DEFAULT_EXECUTION_TYPE; - - public SequentialLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, - Properties properties) { - super(phoenixUtil, model, scenario, properties); - this.sampler = new SequentialSampler(operationFactory.getOperations(), model, scenario, properties); - this.sampler.setNumHandlers(this.numHandlers); + public TenantOperationInfo nextSample() { + Operation op = operationList.get(opIndex % operationList.size()); + String tenantGroupId = tenantGroup.getId(); + String tenantIdPrefix = Strings.padStart(tenantGroupId, loadProfile.getGroupIdLength(), 'x'); + String formattedTenantId = String.format(loadProfile.getTenantIdFormat(), + tenantIdPrefix.substring(0, loadProfile.getGroupIdLength()), 1); + String paddedTenantId = + Strings.padStart(formattedTenantId, loadProfile.getTenantIdLength(), 'x'); + String tenantId = paddedTenantId.substring(0, loadProfile.getTenantIdLength()); + + TenantOperationInfo sample = new TenantOperationInfo(modelName, scenarioName, tableName, + tenantGroupId, op.getId(), tenantId, op); + + iteration++; + if (iteration % numHandlers == 0) { + opIndex++; + } + return sample; } - public SequentialLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, - List workHandlers, Properties properties) { - super(phoenixUtil, model, scenario, workHandlers, properties); - this.sampler = new SequentialSampler(operationFactory.getOperations(), model, scenario, properties); - this.sampler.setNumHandlers(this.numHandlers); + public int getNumHandlers() { + return numHandlers; } - public List getWorkHandlers(Properties properties) { - - String handlerName = ""; - try { - handlerName = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - - this.numHandlers = DEFAULT_NUM_HANDLER_PER_SCENARIO; - if (properties.containsKey(PherfConstants.HANDLERS_PER_SCENARIO_PROP_KEY)) { - this.numHandlers = Integer.parseInt((String)properties.get(PherfConstants.HANDLERS_PER_SCENARIO_PROP_KEY)); - } - - if (properties.containsKey(PherfConstants.NUM_SEQUENTIAL_ITERATIONS_PROP_KEY)) { - this.numIterations = Integer.parseInt((String)properties.get(PherfConstants.NUM_SEQUENTIAL_ITERATIONS_PROP_KEY)); - } - - if (properties.containsKey(PherfConstants.NUM_SEQUENTIAL_EXECUTION_TYPE_PROP_KEY)) { - this.executionType = ExecutionType.valueOf((String)properties.get(PherfConstants.NUM_SEQUENTIAL_EXECUTION_TYPE_PROP_KEY)); - switch (executionType) { - case SERIAL: - this.numHandlers = DEFAULT_NUM_ITERATIONS; - break; - case PARALLEL: - this.numHandlers = numIterations; - break; - default: - // Just accepts the defaults, nothing to do here - } - } - - Map rendezvousPoints = Maps.newHashMap(); - CyclicBarrier startBarrier = new CyclicBarrier(numHandlers, new Runnable() { - @Override public void run() { - LOGGER.info("Rendezvoused for start of operation execution"); - } - }); - rendezvousPoints.put(PherfConstants.MT_HANDLER_START_RENDEZVOUS_PROP_KEY, startBarrier); - - List workers = Lists.newArrayListWithCapacity(numHandlers); - for (int i = 0; i < numHandlers; i++) { - String handlerId = String.format("%s.%d", handlerName, i + 1); - workers.add(new RendezvousingWorkHandler(operationFactory, handlerId, - rendezvousPoints)); - } - return workers; + public void setNumHandlers(int handlers) { + numHandlers = handlers; } - @Override public TenantOperationInfo next() { - return this.sampler.nextSample(); + } + + protected static final int DEFAULT_NUM_ITERATIONS = 1; + protected static final ExecutionType DEFAULT_EXECUTION_TYPE = ExecutionType.SERIAL; + private final SequentialSampler sampler; + private int numHandlers; + private int numIterations = DEFAULT_NUM_ITERATIONS; + private ExecutionType executionType = DEFAULT_EXECUTION_TYPE; + + public SequentialLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, + Properties properties) { + super(phoenixUtil, model, scenario, properties); + this.sampler = + new SequentialSampler(operationFactory.getOperations(), model, scenario, properties); + this.sampler.setNumHandlers(this.numHandlers); + } + + public SequentialLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, + List workHandlers, Properties properties) { + super(phoenixUtil, model, scenario, workHandlers, properties); + this.sampler = + new SequentialSampler(operationFactory.getOperations(), model, scenario, properties); + this.sampler.setNumHandlers(this.numHandlers); + } + + public List getWorkHandlers(Properties properties) { + + String handlerName = ""; + try { + handlerName = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + throw new RuntimeException(e); } + + this.numHandlers = DEFAULT_NUM_HANDLER_PER_SCENARIO; + if (properties.containsKey(PherfConstants.HANDLERS_PER_SCENARIO_PROP_KEY)) { + this.numHandlers = + Integer.parseInt((String) properties.get(PherfConstants.HANDLERS_PER_SCENARIO_PROP_KEY)); + } + + if (properties.containsKey(PherfConstants.NUM_SEQUENTIAL_ITERATIONS_PROP_KEY)) { + this.numIterations = Integer + .parseInt((String) properties.get(PherfConstants.NUM_SEQUENTIAL_ITERATIONS_PROP_KEY)); + } + + if (properties.containsKey(PherfConstants.NUM_SEQUENTIAL_EXECUTION_TYPE_PROP_KEY)) { + this.executionType = ExecutionType + .valueOf((String) properties.get(PherfConstants.NUM_SEQUENTIAL_EXECUTION_TYPE_PROP_KEY)); + switch (executionType) { + case SERIAL: + this.numHandlers = DEFAULT_NUM_ITERATIONS; + break; + case PARALLEL: + this.numHandlers = numIterations; + break; + default: + // Just accepts the defaults, nothing to do here + } + } + + Map rendezvousPoints = Maps.newHashMap(); + CyclicBarrier startBarrier = new CyclicBarrier(numHandlers, new Runnable() { + @Override + public void run() { + LOGGER.info("Rendezvoused for start of operation execution"); + } + }); + rendezvousPoints.put(PherfConstants.MT_HANDLER_START_RENDEZVOUS_PROP_KEY, startBarrier); + + List workers = Lists.newArrayListWithCapacity(numHandlers); + for (int i = 0; i < numHandlers; i++) { + String handlerId = String.format("%s.%d", handlerName, i + 1); + workers.add(new RendezvousingWorkHandler(operationFactory, handlerId, rendezvousPoints)); + } + return workers; + } + + @Override + public TenantOperationInfo next() { + return this.sampler.nextSample(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/TenantLoadEventGeneratorFactory.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/TenantLoadEventGeneratorFactory.java index 31c1033ee13..5ae4c97bc88 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/TenantLoadEventGeneratorFactory.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/TenantLoadEventGeneratorFactory.java @@ -15,56 +15,62 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.generators; +import java.util.List; +import java.util.Properties; + import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.util.PhoenixUtil; import org.apache.phoenix.pherf.workload.mt.handlers.PherfWorkHandler; -import java.util.List; -import java.util.Properties; - /** * A factory class for creating various supported load generators {@link LoadEventGenerator} */ -public class TenantLoadEventGeneratorFactory implements - LoadEventGeneratorFactory { - public enum GeneratorType { - WEIGHTED, UNIFORM, SEQUENTIAL - } - @Override public LoadEventGenerator newLoadEventGenerator(PhoenixUtil phoenixUtil, - DataModel model, Scenario scenario, - Properties properties) { - GeneratorType type = GeneratorType.valueOf(scenario.getGeneratorName()); - switch (type) { - case WEIGHTED: - return new WeightedRandomLoadEventGenerator(phoenixUtil, model, scenario, properties); - case UNIFORM: - return new UniformDistributionLoadEventGenerator(phoenixUtil, model, scenario, properties); - case SEQUENTIAL: - return new SequentialLoadEventGenerator(phoenixUtil, model, scenario, properties); - default: - throw new IllegalArgumentException("Unknown generator type"); - } - } +public class TenantLoadEventGeneratorFactory + implements LoadEventGeneratorFactory { + public enum GeneratorType { + WEIGHTED, + UNIFORM, + SEQUENTIAL + } - @Override public LoadEventGenerator newLoadEventGenerator(PhoenixUtil phoenixUtil, - DataModel model, Scenario scenario, - List workHandlers, Properties properties) { - GeneratorType type = GeneratorType.valueOf(scenario.getGeneratorName()); - switch (type) { - case WEIGHTED: - return new WeightedRandomLoadEventGenerator(phoenixUtil, model, scenario, workHandlers, properties); - case UNIFORM: - return new UniformDistributionLoadEventGenerator(phoenixUtil, model, scenario, workHandlers, properties); - case SEQUENTIAL: - return new SequentialLoadEventGenerator(phoenixUtil, model, scenario, workHandlers, properties); - default: - throw new IllegalArgumentException("Unknown generator type"); - } + @Override + public LoadEventGenerator newLoadEventGenerator(PhoenixUtil phoenixUtil, + DataModel model, Scenario scenario, Properties properties) { + GeneratorType type = GeneratorType.valueOf(scenario.getGeneratorName()); + switch (type) { + case WEIGHTED: + return new WeightedRandomLoadEventGenerator(phoenixUtil, model, scenario, properties); + case UNIFORM: + return new UniformDistributionLoadEventGenerator(phoenixUtil, model, scenario, properties); + case SEQUENTIAL: + return new SequentialLoadEventGenerator(phoenixUtil, model, scenario, properties); + default: + throw new IllegalArgumentException("Unknown generator type"); + } + } + @Override + public LoadEventGenerator newLoadEventGenerator(PhoenixUtil phoenixUtil, + DataModel model, Scenario scenario, List workHandlers, + Properties properties) { + GeneratorType type = GeneratorType.valueOf(scenario.getGeneratorName()); + switch (type) { + case WEIGHTED: + return new WeightedRandomLoadEventGenerator(phoenixUtil, model, scenario, workHandlers, + properties); + case UNIFORM: + return new UniformDistributionLoadEventGenerator(phoenixUtil, model, scenario, workHandlers, + properties); + case SEQUENTIAL: + return new SequentialLoadEventGenerator(phoenixUtil, model, scenario, workHandlers, + properties); + default: + throw new IllegalArgumentException("Unknown generator type"); } + } + } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/TenantOperationInfo.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/TenantOperationInfo.java index 3b862f325e6..8bcb38b3418 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/TenantOperationInfo.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/TenantOperationInfo.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.generators; import org.apache.phoenix.pherf.workload.mt.operations.Operation; @@ -24,47 +23,50 @@ * Holds information on the tenant operation details. */ public class TenantOperationInfo { - private final String modelName; - private final String scenarioName; - private final String tableName; - private final String tenantId; - private final String tenantGroupId; - private final String operationGroupId; - private final Operation operation; + private final String modelName; + private final String scenarioName; + private final String tableName; + private final String tenantId; + private final String tenantGroupId; + private final String operationGroupId; + private final Operation operation; - public TenantOperationInfo(String modelName, String scenarioName, String tableName, - String tenantGroupId, String operationGroupId, - String tenantId, Operation operation) { - this.modelName = modelName; - this.scenarioName = scenarioName; - this.tableName = tableName; - this.tenantGroupId = tenantGroupId; - this.operationGroupId = operationGroupId; - this.tenantId = tenantId; - this.operation = operation; - } + public TenantOperationInfo(String modelName, String scenarioName, String tableName, + String tenantGroupId, String operationGroupId, String tenantId, Operation operation) { + this.modelName = modelName; + this.scenarioName = scenarioName; + this.tableName = tableName; + this.tenantGroupId = tenantGroupId; + this.operationGroupId = operationGroupId; + this.tenantId = tenantId; + this.operation = operation; + } - public String getModelName() { return modelName; } + public String getModelName() { + return modelName; + } - public String getScenarioName() { return scenarioName; } + public String getScenarioName() { + return scenarioName; + } - public String getTableName() { - return tableName; - } + public String getTableName() { + return tableName; + } - public String getTenantGroupId() { - return tenantGroupId; - } + public String getTenantGroupId() { + return tenantGroupId; + } - public String getOperationGroupId() { - return operationGroupId; - } + public String getOperationGroupId() { + return operationGroupId; + } - public Operation getOperation() { - return operation; - } + public Operation getOperation() { + return operation; + } - public String getTenantId() { - return tenantId; - } + public String getTenantId() { + return tenantId; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/UniformDistributionLoadEventGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/UniformDistributionLoadEventGenerator.java index 3dac8bb3c5a..6f054fdddaa 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/UniformDistributionLoadEventGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/UniformDistributionLoadEventGenerator.java @@ -15,106 +15,107 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.generators; +import java.util.List; +import java.util.Properties; +import java.util.Random; + import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.LoadProfile; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.configuration.TenantGroup; import org.apache.phoenix.pherf.util.PhoenixUtil; -import org.apache.phoenix.pherf.workload.mt.operations.Operation; import org.apache.phoenix.pherf.workload.mt.handlers.PherfWorkHandler; +import org.apache.phoenix.pherf.workload.mt.operations.Operation; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import java.util.List; -import java.util.Properties; -import java.util.Random; /** - * A load generator that generates a uniform distribution of operations among the given tenant group. + * A load generator that generates a uniform distribution of operations among the given tenant + * group. */ public class UniformDistributionLoadEventGenerator extends BaseLoadEventGenerator { - private static class UniformDistributionSampler { - - private final Random RANDOM = new Random(); - - - private final LoadProfile loadProfile; - private final String modelName; - private final String scenarioName; - private final String tableName; - private final Random distribution; - - private final TenantGroup tenantGroup; - private final List operationList; - - public UniformDistributionSampler(List operationList, DataModel model, - Scenario scenario) { - this.modelName = model.getName(); - this.scenarioName = scenario.getName(); - this.tableName = scenario.getTableName(); - this.loadProfile = scenario.getLoadProfile(); - this.operationList = operationList; - - // Track the individual tenant group with single tenant or global connection, - // so that given a generated sample we can use the supplied tenant. - // NOTE : Not sure if there is a case for multiple tenants in a uniform distribution. - // For now keeping it simple. - Preconditions.checkArgument(loadProfile.getTenantDistribution() != null, - "Tenant distribution cannot be null"); - Preconditions.checkArgument(!loadProfile.getTenantDistribution().isEmpty(), - "Tenant group cannot be empty"); - Preconditions.checkArgument(loadProfile.getTenantDistribution().size() == 1, - "Tenant group cannot be more than 1"); - tenantGroup = loadProfile.getTenantDistribution().get(0); - - this.distribution = new Random(); - } - - public TenantOperationInfo nextSample() { - int sampleIndex = this.distribution.nextInt(operationList.size()); - Operation op = operationList.get(sampleIndex); - int numTenants = 1; - - if(tenantGroup.getNumTenants() != 0){ - numTenants = tenantGroup.getNumTenants(); - } - - String tenantGroupId = tenantGroup.getId(); - String tenantIdPrefix = Strings - .padStart(tenantGroupId, loadProfile.getGroupIdLength(), 'x'); - - String formattedTenantId = String.format(loadProfile.getTenantIdFormat(), - tenantIdPrefix.substring(0, loadProfile.getGroupIdLength()), RANDOM.nextInt(numTenants)); - - String paddedTenantId = Strings.padStart(formattedTenantId, loadProfile.getTenantIdLength(), 'x'); - String tenantId = paddedTenantId.substring(0, loadProfile.getTenantIdLength()); - - TenantOperationInfo sample = new TenantOperationInfo(modelName, scenarioName, tableName, - tenantGroupId, op.getId(), tenantId, op); - return sample; - } + private static class UniformDistributionSampler { + + private final Random RANDOM = new Random(); + + private final LoadProfile loadProfile; + private final String modelName; + private final String scenarioName; + private final String tableName; + private final Random distribution; + + private final TenantGroup tenantGroup; + private final List operationList; + + public UniformDistributionSampler(List operationList, DataModel model, + Scenario scenario) { + this.modelName = model.getName(); + this.scenarioName = scenario.getName(); + this.tableName = scenario.getTableName(); + this.loadProfile = scenario.getLoadProfile(); + this.operationList = operationList; + + // Track the individual tenant group with single tenant or global connection, + // so that given a generated sample we can use the supplied tenant. + // NOTE : Not sure if there is a case for multiple tenants in a uniform distribution. + // For now keeping it simple. + Preconditions.checkArgument(loadProfile.getTenantDistribution() != null, + "Tenant distribution cannot be null"); + Preconditions.checkArgument(!loadProfile.getTenantDistribution().isEmpty(), + "Tenant group cannot be empty"); + Preconditions.checkArgument(loadProfile.getTenantDistribution().size() == 1, + "Tenant group cannot be more than 1"); + tenantGroup = loadProfile.getTenantDistribution().get(0); + + this.distribution = new Random(); } - private final UniformDistributionSampler sampler; + public TenantOperationInfo nextSample() { + int sampleIndex = this.distribution.nextInt(operationList.size()); + Operation op = operationList.get(sampleIndex); + int numTenants = 1; + if (tenantGroup.getNumTenants() != 0) { + numTenants = tenantGroup.getNumTenants(); + } - public UniformDistributionLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, - Properties properties) { - super(phoenixUtil, model, scenario, properties); - this.sampler = new UniformDistributionSampler(operationFactory.getOperations(), model, scenario); - } + String tenantGroupId = tenantGroup.getId(); + String tenantIdPrefix = Strings.padStart(tenantGroupId, loadProfile.getGroupIdLength(), 'x'); - public UniformDistributionLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, - List workHandlers, Properties properties) { - super(phoenixUtil, model, scenario, workHandlers, properties); - this.sampler = new UniformDistributionSampler(operationFactory.getOperations(), model, scenario); - } + String formattedTenantId = String.format(loadProfile.getTenantIdFormat(), + tenantIdPrefix.substring(0, loadProfile.getGroupIdLength()), RANDOM.nextInt(numTenants)); + String paddedTenantId = + Strings.padStart(formattedTenantId, loadProfile.getTenantIdLength(), 'x'); + String tenantId = paddedTenantId.substring(0, loadProfile.getTenantIdLength()); - @Override public TenantOperationInfo next() { - return this.sampler.nextSample(); + TenantOperationInfo sample = new TenantOperationInfo(modelName, scenarioName, tableName, + tenantGroupId, op.getId(), tenantId, op); + return sample; } + } + + private final UniformDistributionSampler sampler; + + public UniformDistributionLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, + Scenario scenario, Properties properties) { + super(phoenixUtil, model, scenario, properties); + this.sampler = + new UniformDistributionSampler(operationFactory.getOperations(), model, scenario); + } + + public UniformDistributionLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, + Scenario scenario, List workHandlers, Properties properties) { + super(phoenixUtil, model, scenario, workHandlers, properties); + this.sampler = + new UniformDistributionSampler(operationFactory.getOperations(), model, scenario); + } + + @Override + public TenantOperationInfo next() { + return this.sampler.nextSample(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/WeightedRandomLoadEventGenerator.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/WeightedRandomLoadEventGenerator.java index 5f366bc50f5..2e599cbeeb8 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/WeightedRandomLoadEventGenerator.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/generators/WeightedRandomLoadEventGenerator.java @@ -15,15 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.generators; -import org.apache.phoenix.pherf.util.PhoenixUtil; -import org.apache.phoenix.pherf.workload.mt.handlers.PherfWorkHandler; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; -import org.apache.phoenix.thirdparty.com.google.common.base.Strings; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Random; + import org.apache.commons.math3.distribution.EnumeratedDistribution; import org.apache.commons.math3.util.Pair; import org.apache.phoenix.pherf.configuration.DataModel; @@ -31,156 +29,159 @@ import org.apache.phoenix.pherf.configuration.OperationGroup; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.configuration.TenantGroup; +import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.pherf.workload.mt.handlers.PherfWorkHandler; import org.apache.phoenix.pherf.workload.mt.operations.Operation; - -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Random; +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; +import org.apache.phoenix.thirdparty.com.google.common.base.Strings; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; /** - * A perf load event generator based on the supplied load profile. - * The load profile enumerates the distribution of operation among the different tenant group - * which is used by the load generator to generate the operation events for the various tenants. + * A perf load event generator based on the supplied load profile. The load profile enumerates the + * distribution of operation among the different tenant group which is used by the load generator to + * generate the operation events for the various tenants. */ public class WeightedRandomLoadEventGenerator extends BaseLoadEventGenerator { - private static class WeightedRandomSampler { - private static String AUTO_WEIGHTED_OPERATION_ID = "xxxxxx"; - private final Random RANDOM = new Random(); - private final LoadProfile loadProfile; - private final String modelName; - private final String scenarioName; - private final String tableName; - private final EnumeratedDistribution distribution; - - private final Map tenantGroupMap = Maps.newHashMap(); - private final Map operationMap = Maps.newHashMap(); - private final List autoWeightedOperations = Lists.newArrayList(); - private final int numAutoWeightedOperations; - - public WeightedRandomSampler(List operationList, DataModel model, Scenario scenario) { - this.modelName = model.getName(); - this.scenarioName = scenario.getName(); - this.tableName = scenario.getTableName(); - this.loadProfile = scenario.getLoadProfile(); - - // Track the individual tenant group sizes, - // so that given a generated sample we can get a random tenant for a group. - for (TenantGroup tg : loadProfile.getTenantDistribution()) { - tenantGroupMap.put(tg.getId(), tg); - } - Preconditions.checkArgument(!tenantGroupMap.isEmpty(), - "Tenant group cannot be empty"); - - for (Operation op : operationList) { - for (OperationGroup loadOp : loadProfile.getOpDistribution()) { - if (op.getId().compareTo(loadOp.getId()) == 0) { - operationMap.put(op.getId(), op); - } - } - } - Preconditions.checkArgument(!operationMap.isEmpty(), - "Operation list and load profile operation do not match"); - this.distribution = initProbabilityDistribution(scenario.getLoadProfile()); - this.numAutoWeightedOperations = autoWeightedOperations.size(); - + private static class WeightedRandomSampler { + private static String AUTO_WEIGHTED_OPERATION_ID = "xxxxxx"; + private final Random RANDOM = new Random(); + private final LoadProfile loadProfile; + private final String modelName; + private final String scenarioName; + private final String tableName; + private final EnumeratedDistribution distribution; + + private final Map tenantGroupMap = Maps.newHashMap(); + private final Map operationMap = Maps.newHashMap(); + private final List autoWeightedOperations = Lists.newArrayList(); + private final int numAutoWeightedOperations; + + public WeightedRandomSampler(List operationList, DataModel model, + Scenario scenario) { + this.modelName = model.getName(); + this.scenarioName = scenario.getName(); + this.tableName = scenario.getTableName(); + this.loadProfile = scenario.getLoadProfile(); + + // Track the individual tenant group sizes, + // so that given a generated sample we can get a random tenant for a group. + for (TenantGroup tg : loadProfile.getTenantDistribution()) { + tenantGroupMap.put(tg.getId(), tg); + } + Preconditions.checkArgument(!tenantGroupMap.isEmpty(), "Tenant group cannot be empty"); + + for (Operation op : operationList) { + for (OperationGroup loadOp : loadProfile.getOpDistribution()) { + if (op.getId().compareTo(loadOp.getId()) == 0) { + operationMap.put(op.getId(), op); + } } + } + Preconditions.checkArgument(!operationMap.isEmpty(), + "Operation list and load profile operation do not match"); + this.distribution = initProbabilityDistribution(scenario.getLoadProfile()); + this.numAutoWeightedOperations = autoWeightedOperations.size(); - public TenantOperationInfo nextSample() { - String sampleIndex = this.distribution.sample(); - String[] parts = sampleIndex.split(":"); - String tenantGroupId = parts[0]; - String opId = parts[1]; - - Operation op = operationMap.get(opId); - if (op == null && opId.compareTo(AUTO_WEIGHTED_OPERATION_ID) == 0) { - opId = autoWeightedOperations.get(RANDOM.nextInt(numAutoWeightedOperations)); - op = operationMap.get(opId); - } - int numTenants = tenantGroupMap.get(tenantGroupId).getNumTenants(); - String tenantIdPrefix = Strings.padStart(tenantGroupId, loadProfile.getGroupIdLength(), 'x'); - String formattedTenantId = String.format(loadProfile.getTenantIdFormat(), - tenantIdPrefix.substring(0, loadProfile.getGroupIdLength()), RANDOM.nextInt(numTenants)); - String paddedTenantId = Strings.padStart(formattedTenantId, loadProfile.getTenantIdLength(), 'x'); - String tenantId = paddedTenantId.substring(0, loadProfile.getTenantIdLength()); - - TenantOperationInfo sample = new TenantOperationInfo(modelName, scenarioName, tableName, - tenantGroupId, opId, tenantId, op); - return sample; - } - - private EnumeratedDistribution initProbabilityDistribution(LoadProfile loadProfile) { - double totalTenantGroupWeight = 0.0f; - double totalOperationWeight = 0.0f; - double remainingOperationWeight = 0.0f; - - // Sum the weights to find the total weight, - // so that the weights can be used in the total probability distribution. - for (TenantGroup tg : loadProfile.getTenantDistribution()) { - Preconditions.checkArgument(tg.getWeight() > 0.0f, - "Tenant group weight cannot be less than zero"); - totalTenantGroupWeight += tg.getWeight(); - } - for (OperationGroup op : loadProfile.getOpDistribution()) { - if (op.getWeight() > 0.0f) { - totalOperationWeight += op.getWeight(); - } else { - autoWeightedOperations.add(op.getId()); - } - } - - if (!autoWeightedOperations.isEmpty()) { - remainingOperationWeight = 100.0f - totalOperationWeight; - totalOperationWeight = 100.0f; - } - - Preconditions.checkArgument(totalTenantGroupWeight == 100.0f, - "Total tenant group weight cannot be <> 100.0"); - Preconditions.checkArgument(totalOperationWeight == 100.0f, - "Total operation group weight cannot be <> 100.0"); - - // Initialize the sample probability distribution - List> pmf = Lists.newArrayList(); - double totalWeight = totalTenantGroupWeight * totalOperationWeight; - for (TenantGroup tg : loadProfile.getTenantDistribution()) { - for (OperationGroup op : loadProfile.getOpDistribution()) { - int opWeight = op.getWeight(); - if (opWeight > 0.0f) { - String sampleName = String.format("%s:%s", tg.getId(), op.getId()); - double probability = (tg.getWeight() * opWeight)/totalWeight; - pmf.add(new Pair(sampleName, probability)); - } - } - - if (!autoWeightedOperations.isEmpty()) { - String sampleName = String.format("%s:%s", tg.getId(), AUTO_WEIGHTED_OPERATION_ID); - double probability = (tg.getWeight() * remainingOperationWeight)/totalWeight; - pmf.add(new Pair(sampleName, probability)); - } - } - EnumeratedDistribution distribution = new EnumeratedDistribution(pmf); - return distribution; - } } - private final WeightedRandomSampler sampler; - - public WeightedRandomLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, - Properties properties) { - super(phoenixUtil, model, scenario, properties); - this.sampler = new WeightedRandomSampler(operationFactory.getOperations(), model, scenario); + public TenantOperationInfo nextSample() { + String sampleIndex = this.distribution.sample(); + String[] parts = sampleIndex.split(":"); + String tenantGroupId = parts[0]; + String opId = parts[1]; + + Operation op = operationMap.get(opId); + if (op == null && opId.compareTo(AUTO_WEIGHTED_OPERATION_ID) == 0) { + opId = autoWeightedOperations.get(RANDOM.nextInt(numAutoWeightedOperations)); + op = operationMap.get(opId); + } + int numTenants = tenantGroupMap.get(tenantGroupId).getNumTenants(); + String tenantIdPrefix = Strings.padStart(tenantGroupId, loadProfile.getGroupIdLength(), 'x'); + String formattedTenantId = String.format(loadProfile.getTenantIdFormat(), + tenantIdPrefix.substring(0, loadProfile.getGroupIdLength()), RANDOM.nextInt(numTenants)); + String paddedTenantId = + Strings.padStart(formattedTenantId, loadProfile.getTenantIdLength(), 'x'); + String tenantId = paddedTenantId.substring(0, loadProfile.getTenantIdLength()); + + TenantOperationInfo sample = new TenantOperationInfo(modelName, scenarioName, tableName, + tenantGroupId, opId, tenantId, op); + return sample; } - public WeightedRandomLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario, - List workHandlers, Properties properties) { - super(phoenixUtil, model, scenario, workHandlers, properties); - this.sampler = new WeightedRandomSampler(operationFactory.getOperations(), model, scenario); - } + private EnumeratedDistribution initProbabilityDistribution(LoadProfile loadProfile) { + double totalTenantGroupWeight = 0.0f; + double totalOperationWeight = 0.0f; + double remainingOperationWeight = 0.0f; + + // Sum the weights to find the total weight, + // so that the weights can be used in the total probability distribution. + for (TenantGroup tg : loadProfile.getTenantDistribution()) { + Preconditions.checkArgument(tg.getWeight() > 0.0f, + "Tenant group weight cannot be less than zero"); + totalTenantGroupWeight += tg.getWeight(); + } + for (OperationGroup op : loadProfile.getOpDistribution()) { + if (op.getWeight() > 0.0f) { + totalOperationWeight += op.getWeight(); + } else { + autoWeightedOperations.add(op.getId()); + } + } + + if (!autoWeightedOperations.isEmpty()) { + remainingOperationWeight = 100.0f - totalOperationWeight; + totalOperationWeight = 100.0f; + } + + Preconditions.checkArgument(totalTenantGroupWeight == 100.0f, + "Total tenant group weight cannot be <> 100.0"); + Preconditions.checkArgument(totalOperationWeight == 100.0f, + "Total operation group weight cannot be <> 100.0"); + + // Initialize the sample probability distribution + List> pmf = Lists.newArrayList(); + double totalWeight = totalTenantGroupWeight * totalOperationWeight; + for (TenantGroup tg : loadProfile.getTenantDistribution()) { + for (OperationGroup op : loadProfile.getOpDistribution()) { + int opWeight = op.getWeight(); + if (opWeight > 0.0f) { + String sampleName = String.format("%s:%s", tg.getId(), op.getId()); + double probability = (tg.getWeight() * opWeight) / totalWeight; + pmf.add(new Pair(sampleName, probability)); + } + } - @Override public TenantOperationInfo next() { - return this.sampler.nextSample(); + if (!autoWeightedOperations.isEmpty()) { + String sampleName = String.format("%s:%s", tg.getId(), AUTO_WEIGHTED_OPERATION_ID); + double probability = (tg.getWeight() * remainingOperationWeight) / totalWeight; + pmf.add(new Pair(sampleName, probability)); + } + } + EnumeratedDistribution distribution = new EnumeratedDistribution(pmf); + return distribution; } + } + + private final WeightedRandomSampler sampler; + + public WeightedRandomLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, + Scenario scenario, Properties properties) { + super(phoenixUtil, model, scenario, properties); + this.sampler = new WeightedRandomSampler(operationFactory.getOperations(), model, scenario); + } + + public WeightedRandomLoadEventGenerator(PhoenixUtil phoenixUtil, DataModel model, + Scenario scenario, List workHandlers, Properties properties) { + super(phoenixUtil, model, scenario, workHandlers, properties); + this.sampler = new WeightedRandomSampler(operationFactory.getOperations(), model, scenario); + } + + @Override + public TenantOperationInfo next() { + return this.sampler.nextSample(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/PherfWorkHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/PherfWorkHandler.java index fc82b2e7d83..77192162058 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/PherfWorkHandler.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/PherfWorkHandler.java @@ -15,15 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.handlers; -import com.lmax.disruptor.WorkHandler; +import java.util.List; + import org.apache.phoenix.pherf.result.ResultValue; import org.apache.phoenix.pherf.workload.mt.operations.OperationStats; -import java.util.List; +import com.lmax.disruptor.WorkHandler; public interface PherfWorkHandler extends WorkHandler { - List> getResults(); + List> getResults(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/RendezvousingWorkHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/RendezvousingWorkHandler.java index c70e0db4df6..575eb405555 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/RendezvousingWorkHandler.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/RendezvousingWorkHandler.java @@ -15,11 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.handlers; -import com.lmax.disruptor.LifecycleAware; -import com.lmax.disruptor.WorkHandler; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CyclicBarrier; + import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.result.ResultValue; @@ -34,70 +36,70 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CyclicBarrier; +import com.lmax.disruptor.LifecycleAware; +import com.lmax.disruptor.WorkHandler; /** - * A handler {@link WorkHandler} for simple orchestrations using the supplied rendezvous points - * The handler will wait for the rendezvous to happen before executing the operations {@link Operation} + * A handler {@link WorkHandler} for simple orchestrations using the supplied rendezvous points The + * handler will wait for the rendezvous to happen before executing the operations {@link Operation} * The handlers as in the basic {@link TenantOperationWorkHandler} will pick up the operation events - * as and when they become available on the {@link com.lmax.disruptor.RingBuffer} - * when published by the workload generator {@link MultiTenantWorkload} + * as and when they become available on the {@link com.lmax.disruptor.RingBuffer} when published by + * the workload generator {@link MultiTenantWorkload} */ -public class RendezvousingWorkHandler implements PherfWorkHandler, - LifecycleAware { - private static final Logger LOGGER = LoggerFactory.getLogger(RendezvousingWorkHandler.class); - private final String handlerId; - private final TenantOperationFactory operationFactory; - private final Map rendezvousPoints; +public class RendezvousingWorkHandler + implements PherfWorkHandler, LifecycleAware { + private static final Logger LOGGER = LoggerFactory.getLogger(RendezvousingWorkHandler.class); + private final String handlerId; + private final TenantOperationFactory operationFactory; + private final Map rendezvousPoints; - public RendezvousingWorkHandler(TenantOperationFactory operationFactory, - String handlerId, Map rendezvousPoints) { - this.handlerId = handlerId; - this.operationFactory = operationFactory; - this.rendezvousPoints = rendezvousPoints; - } + public RendezvousingWorkHandler(TenantOperationFactory operationFactory, String handlerId, + Map rendezvousPoints) { + this.handlerId = handlerId; + this.operationFactory = operationFactory; + this.rendezvousPoints = rendezvousPoints; + } - @Override - public void onEvent(TenantOperationEvent event) - throws Exception { - TenantOperationInfo input = event.getTenantOperationInfo(); - Supplier> opSupplier = - operationFactory.getOperationSupplier(input); + @Override + public void onEvent(TenantOperationEvent event) throws Exception { + TenantOperationInfo input = event.getTenantOperationInfo(); + Supplier> opSupplier = + operationFactory.getOperationSupplier(input); - boolean startRendezvousEnabled = rendezvousPoints.containsKey(PherfConstants.MT_HANDLER_START_RENDEZVOUS_PROP_KEY); - if (startRendezvousEnabled) { - rendezvousPoints.get(PherfConstants.MT_HANDLER_START_RENDEZVOUS_PROP_KEY).await(); - } - OperationStats stats = opSupplier.get().apply(input); - stats.setHandlerId(handlerId); - - // TODO need to handle asynchronous result publishing - boolean resultsRendezvousEnabled = rendezvousPoints.containsKey(PherfConstants.MT_HANDLER_RESULTS_RENDEZVOUS_PROP_KEY); - if (resultsRendezvousEnabled) { - rendezvousPoints.get(PherfConstants.MT_HANDLER_RESULTS_RENDEZVOUS_PROP_KEY).await(); - } - LOGGER.info(operationFactory.getPhoenixUtil().getGSON().toJson(stats)); + boolean startRendezvousEnabled = + rendezvousPoints.containsKey(PherfConstants.MT_HANDLER_START_RENDEZVOUS_PROP_KEY); + if (startRendezvousEnabled) { + rendezvousPoints.get(PherfConstants.MT_HANDLER_START_RENDEZVOUS_PROP_KEY).await(); } + OperationStats stats = opSupplier.get().apply(input); + stats.setHandlerId(handlerId); - @Override - public void onStart() { - Scenario scenario = operationFactory.getScenario(); - LOGGER.info(String.format("TenantOperationWorkHandler started for %s:%s", - scenario.getName(), scenario.getTableName())); + // TODO need to handle asynchronous result publishing + boolean resultsRendezvousEnabled = + rendezvousPoints.containsKey(PherfConstants.MT_HANDLER_RESULTS_RENDEZVOUS_PROP_KEY); + if (resultsRendezvousEnabled) { + rendezvousPoints.get(PherfConstants.MT_HANDLER_RESULTS_RENDEZVOUS_PROP_KEY).await(); } + LOGGER.info(operationFactory.getPhoenixUtil().getGSON().toJson(stats)); + } - @Override - public void onShutdown() { - Scenario scenario = operationFactory.getScenario(); - LOGGER.info(String.format("TenantOperationWorkHandler stopped for %s:%s", - scenario.getName(), scenario.getTableName())); - } + @Override + public void onStart() { + Scenario scenario = operationFactory.getScenario(); + LOGGER.info(String.format("TenantOperationWorkHandler started for %s:%s", scenario.getName(), + scenario.getTableName())); + } - @Override public List> getResults() { - return new ArrayList<>(); - } + @Override + public void onShutdown() { + Scenario scenario = operationFactory.getScenario(); + LOGGER.info(String.format("TenantOperationWorkHandler stopped for %s:%s", scenario.getName(), + scenario.getTableName())); + } + + @Override + public List> getResults() { + return new ArrayList<>(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/TenantOperationWorkHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/TenantOperationWorkHandler.java index b3c9824ad7e..ab2b5909e5a 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/TenantOperationWorkHandler.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/handlers/TenantOperationWorkHandler.java @@ -15,73 +15,71 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.handlers; +import java.util.ArrayList; +import java.util.List; + +import org.apache.phoenix.pherf.configuration.Scenario; +import org.apache.phoenix.pherf.result.ResultValue; import org.apache.phoenix.pherf.workload.mt.MultiTenantWorkload; -import org.apache.phoenix.pherf.workload.mt.operations.TenantOperationFactory; +import org.apache.phoenix.pherf.workload.mt.generators.BaseLoadEventGenerator.TenantOperationEvent; import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; import org.apache.phoenix.pherf.workload.mt.operations.Operation; +import org.apache.phoenix.pherf.workload.mt.operations.OperationStats; +import org.apache.phoenix.pherf.workload.mt.operations.TenantOperationFactory; import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.base.Supplier; -import com.lmax.disruptor.LifecycleAware; -import com.lmax.disruptor.WorkHandler; -import org.apache.phoenix.pherf.configuration.Scenario; -import org.apache.phoenix.pherf.result.ResultValue; -import org.apache.phoenix.pherf.workload.mt.operations.OperationStats; -import org.apache.phoenix.pherf.workload.mt.generators.BaseLoadEventGenerator.TenantOperationEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.List; +import com.lmax.disruptor.LifecycleAware; +import com.lmax.disruptor.WorkHandler; /** - * A handler {@link WorkHandler} for - * executing the operations {@link Operation} - * as and when they become available on the {@link com.lmax.disruptor.RingBuffer} - * when published by the workload generator {@link MultiTenantWorkload} + * A handler {@link WorkHandler} for executing the operations {@link Operation} as and when they + * become available on the {@link com.lmax.disruptor.RingBuffer} when published by the workload + * generator {@link MultiTenantWorkload} */ -public class TenantOperationWorkHandler implements PherfWorkHandler, - LifecycleAware { - private static final Logger LOGGER = LoggerFactory.getLogger(TenantOperationWorkHandler.class); - private final String handlerId; - private final TenantOperationFactory operationFactory; +public class TenantOperationWorkHandler + implements PherfWorkHandler, LifecycleAware { + private static final Logger LOGGER = LoggerFactory.getLogger(TenantOperationWorkHandler.class); + private final String handlerId; + private final TenantOperationFactory operationFactory; - public TenantOperationWorkHandler(TenantOperationFactory operationFactory, - String handlerId) { - this.handlerId = handlerId; - this.operationFactory = operationFactory; - } + public TenantOperationWorkHandler(TenantOperationFactory operationFactory, String handlerId) { + this.handlerId = handlerId; + this.operationFactory = operationFactory; + } - @Override - public void onEvent(TenantOperationEvent event) - throws Exception { - TenantOperationInfo input = event.getTenantOperationInfo(); - Supplier> opSupplier = - operationFactory.getOperationSupplier(input); - OperationStats stats = opSupplier.get().apply(input); - stats.setHandlerId(handlerId); - // TODO need to handle asynchronous result publishing - LOGGER.info(operationFactory.getPhoenixUtil().getGSON().toJson(stats)); - } + @Override + public void onEvent(TenantOperationEvent event) throws Exception { + TenantOperationInfo input = event.getTenantOperationInfo(); + Supplier> opSupplier = + operationFactory.getOperationSupplier(input); + OperationStats stats = opSupplier.get().apply(input); + stats.setHandlerId(handlerId); + // TODO need to handle asynchronous result publishing + LOGGER.info(operationFactory.getPhoenixUtil().getGSON().toJson(stats)); + } - @Override - public void onStart() { - Scenario scenario = operationFactory.getScenario(); - LOGGER.info(String.format("TenantOperationWorkHandler started for %s:%s", - scenario.getName(), scenario.getTableName())); - } + @Override + public void onStart() { + Scenario scenario = operationFactory.getScenario(); + LOGGER.info(String.format("TenantOperationWorkHandler started for %s:%s", scenario.getName(), + scenario.getTableName())); + } - @Override - public void onShutdown() { - Scenario scenario = operationFactory.getScenario(); - LOGGER.info(String.format("TenantOperationWorkHandler stopped for %s:%s", - scenario.getName(), scenario.getTableName())); - } + @Override + public void onShutdown() { + Scenario scenario = operationFactory.getScenario(); + LOGGER.info(String.format("TenantOperationWorkHandler stopped for %s:%s", scenario.getName(), + scenario.getTableName())); + } - @Override public List> getResults() { - return new ArrayList<>(); - } + @Override + public List> getResults() { + return new ArrayList<>(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/BaseOperationSupplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/BaseOperationSupplier.java index 7ac2f419f78..0aeec8a9e22 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/BaseOperationSupplier.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/BaseOperationSupplier.java @@ -15,34 +15,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; -import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.base.Supplier; import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.LoadProfile; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.rules.RulesApplier; import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.base.Supplier; /** * An abstract base class for all OperationSuppliers */ -public abstract class BaseOperationSupplier implements Supplier> { +public abstract class BaseOperationSupplier + implements Supplier> { - final PhoenixUtil phoenixUtil; - final DataModel model; - final Scenario scenario; - final RulesApplier rulesApplier; - final LoadProfile loadProfile; + final PhoenixUtil phoenixUtil; + final DataModel model; + final Scenario scenario; + final RulesApplier rulesApplier; + final LoadProfile loadProfile; - public BaseOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { - this.phoenixUtil = phoenixUtil; - this.model = model; - this.scenario = scenario; - this.rulesApplier = new RulesApplier(model); - this.loadProfile = this.scenario.getLoadProfile(); - } + public BaseOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { + this.phoenixUtil = phoenixUtil; + this.model = model; + this.scenario = scenario; + this.rulesApplier = new RulesApplier(model); + this.loadProfile = this.scenario.getLoadProfile(); + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/IdleTimeOperation.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/IdleTimeOperation.java index 6dd9262a5b5..ed268c77f2a 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/IdleTimeOperation.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/IdleTimeOperation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; import org.apache.phoenix.pherf.configuration.IdleTime; @@ -25,5 +24,5 @@ * @see {@link OperationType#IDLE_TIME} */ public interface IdleTimeOperation extends Operation { - IdleTime getIdleTime(); + IdleTime getIdleTime(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/IdleTimeOperationSupplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/IdleTimeOperationSupplier.java index 8bfca0090cb..71b121d8354 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/IdleTimeOperationSupplier.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/IdleTimeOperationSupplier.java @@ -15,64 +15,63 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; -import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import java.util.concurrent.TimeUnit; + import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.IdleTime; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.concurrent.TimeUnit; - /** * A supplier of {@link Function} that takes {@link IdleTimeOperation} as an input. */ public class IdleTimeOperationSupplier extends BaseOperationSupplier { - private static final Logger LOGGER = LoggerFactory.getLogger(IdleTimeOperationSupplier.class); + private static final Logger LOGGER = LoggerFactory.getLogger(IdleTimeOperationSupplier.class); - public IdleTimeOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { - super(phoenixUtil, model, scenario); - } + public IdleTimeOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { + super(phoenixUtil, model, scenario); + } - @Override - public Function get() { + @Override + public Function get() { - return new Function() { + return new Function() { - @Override - public OperationStats apply(final TenantOperationInfo input) { - Preconditions.checkNotNull(input); - final IdleTimeOperation operation = (IdleTimeOperation) input.getOperation(); - final IdleTime idleTime = operation.getIdleTime(); + @Override + public OperationStats apply(final TenantOperationInfo input) { + Preconditions.checkNotNull(input); + final IdleTimeOperation operation = (IdleTimeOperation) input.getOperation(); + final IdleTime idleTime = operation.getIdleTime(); - final String tenantId = input.getTenantId(); - final String tenantGroup = input.getTenantGroupId(); - final String opGroup = input.getOperationGroupId(); - final String tableName = input.getTableName(); - final String scenarioName = input.getScenarioName(); - final String opName = String.format("%s:%s:%s:%s:%s", scenarioName, tableName, - opGroup, tenantGroup, tenantId); + final String tenantId = input.getTenantId(); + final String tenantGroup = input.getTenantGroupId(); + final String opGroup = input.getOperationGroupId(); + final String tableName = input.getTableName(); + final String scenarioName = input.getScenarioName(); + final String opName = + String.format("%s:%s:%s:%s:%s", scenarioName, tableName, opGroup, tenantGroup, tenantId); - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - int status = 0; + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + int status = 0; - // Sleep for the specified time to simulate idle time. - try { - TimeUnit.MILLISECONDS.sleep(idleTime.getIdleTime()); - } catch (InterruptedException ie) { - LOGGER.error("Operation " + opName + " failed with exception ", ie); - status = -1; - } - long duration = EnvironmentEdgeManager.currentTimeMillis() - startTime; - return new OperationStats(input, startTime, status, 0, duration); - } - }; - } + // Sleep for the specified time to simulate idle time. + try { + TimeUnit.MILLISECONDS.sleep(idleTime.getIdleTime()); + } catch (InterruptedException ie) { + LOGGER.error("Operation " + opName + " failed with exception ", ie); + status = -1; + } + long duration = EnvironmentEdgeManager.currentTimeMillis() - startTime; + return new OperationStats(input, startTime, status, 0, duration); + } + }; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/Operation.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/Operation.java index 65567846487..aecdf448daf 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/Operation.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/Operation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; /** @@ -23,9 +22,15 @@ * @see {@link org.apache.phoenix.pherf.configuration.LoadProfile} */ public interface Operation { - enum OperationType { - PRE_RUN, UPSERT, SELECT, IDLE_TIME, USER_DEFINED - } - String getId(); - OperationType getType(); + enum OperationType { + PRE_RUN, + UPSERT, + SELECT, + IDLE_TIME, + USER_DEFINED + } + + String getId(); + + OperationType getType(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/OperationStats.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/OperationStats.java index 8e4a44d0ab5..2542a02e66f 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/OperationStats.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/OperationStats.java @@ -15,94 +15,100 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; -import org.apache.phoenix.pherf.result.ResultValue; -import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; -import org.apache.phoenix.pherf.workload.mt.operations.Operation; - import java.util.ArrayList; import java.util.List; +import org.apache.phoenix.pherf.result.ResultValue; +import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; + /** * Holds metrics + contextual info on the operation run. */ public class OperationStats { - private final TenantOperationInfo input; - private String handlerId; - private final int status; - private final long rowCount; - private final long durationInMs; - private final long startTime; - - public OperationStats( - TenantOperationInfo input, - long startTime, - int status, - long rowCount, - long durationInMs) { - this.input = input; - this.startTime = startTime; - this.status = status; - this.rowCount = rowCount; - this.durationInMs = durationInMs; - } - - public String getModelName() { return this.input.getModelName(); } - - public String getScenarioName() { return this.input.getScenarioName(); } - - public String getTenantId() { return this.input.getTenantId(); } - - public Operation.OperationType getOpType() { return this.input.getOperation().getType(); } - - public String getTableName() { - return this.input.getTableName(); - } - - public String getTenantGroup() { - return this.input.getTenantGroupId(); - } - - public String getOperationGroup() { - return this.input.getOperationGroupId(); - } - - public int getStatus() { - return status; - } - - public long getRowCount() { - return rowCount; - } - - public String getHandlerId() { return handlerId; } - - public long getStartTime() { return startTime; } - - public long getDurationInMs() { - return durationInMs; - } - - public List getCsvRepresentation() { - List rowValues = new ArrayList<>(); - rowValues.add(new ResultValue(getModelName())); - rowValues.add(new ResultValue(getScenarioName())); - rowValues.add(new ResultValue(getTableName())); - rowValues.add(new ResultValue(getTenantId())); - rowValues.add(new ResultValue(handlerId)); - rowValues.add(new ResultValue(getTenantGroup())); - rowValues.add(new ResultValue(getOperationGroup())); - rowValues.add(new ResultValue(getOpType().name())); - rowValues.add(new ResultValue(String.valueOf(startTime))); - rowValues.add(new ResultValue(String.valueOf(status))); - rowValues.add(new ResultValue(String.valueOf(rowCount))); - rowValues.add(new ResultValue(String.valueOf(durationInMs))); - return rowValues; - } - - public void setHandlerId(String handlerId) { - this.handlerId = handlerId; - } + private final TenantOperationInfo input; + private String handlerId; + private final int status; + private final long rowCount; + private final long durationInMs; + private final long startTime; + + public OperationStats(TenantOperationInfo input, long startTime, int status, long rowCount, + long durationInMs) { + this.input = input; + this.startTime = startTime; + this.status = status; + this.rowCount = rowCount; + this.durationInMs = durationInMs; + } + + public String getModelName() { + return this.input.getModelName(); + } + + public String getScenarioName() { + return this.input.getScenarioName(); + } + + public String getTenantId() { + return this.input.getTenantId(); + } + + public Operation.OperationType getOpType() { + return this.input.getOperation().getType(); + } + + public String getTableName() { + return this.input.getTableName(); + } + + public String getTenantGroup() { + return this.input.getTenantGroupId(); + } + + public String getOperationGroup() { + return this.input.getOperationGroupId(); + } + + public int getStatus() { + return status; + } + + public long getRowCount() { + return rowCount; + } + + public String getHandlerId() { + return handlerId; + } + + public long getStartTime() { + return startTime; + } + + public long getDurationInMs() { + return durationInMs; + } + + public List getCsvRepresentation() { + List rowValues = new ArrayList<>(); + rowValues.add(new ResultValue(getModelName())); + rowValues.add(new ResultValue(getScenarioName())); + rowValues.add(new ResultValue(getTableName())); + rowValues.add(new ResultValue(getTenantId())); + rowValues.add(new ResultValue(handlerId)); + rowValues.add(new ResultValue(getTenantGroup())); + rowValues.add(new ResultValue(getOperationGroup())); + rowValues.add(new ResultValue(getOpType().name())); + rowValues.add(new ResultValue(String.valueOf(startTime))); + rowValues.add(new ResultValue(String.valueOf(status))); + rowValues.add(new ResultValue(String.valueOf(rowCount))); + rowValues.add(new ResultValue(String.valueOf(durationInMs))); + return rowValues; + } + + public void setHandlerId(String handlerId) { + this.handlerId = handlerId; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/PreScenarioOperation.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/PreScenarioOperation.java index 649485953c7..e9f5310d96e 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/PreScenarioOperation.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/PreScenarioOperation.java @@ -15,17 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; -import org.apache.phoenix.pherf.configuration.Ddl; - import java.util.List; +import org.apache.phoenix.pherf.configuration.Ddl; + /** * Defines a pre scenario operation. * @see {@link OperationType#PRE_RUN} */ public interface PreScenarioOperation extends Operation { - List getPreScenarioDdls(); + List getPreScenarioDdls(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/PreScenarioOperationSupplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/PreScenarioOperationSupplier.java index 94e20f56666..7a8d642e74f 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/PreScenarioOperationSupplier.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/PreScenarioOperationSupplier.java @@ -15,75 +15,77 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; -import org.apache.phoenix.pherf.configuration.TenantGroup; -import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import java.sql.Connection; + import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Ddl; import org.apache.phoenix.pherf.configuration.Scenario; +import org.apache.phoenix.pherf.configuration.TenantGroup; import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.Connection; - /** * A supplier of {@link Function} that takes {@link PreScenarioOperation} as an input */ public class PreScenarioOperationSupplier extends BaseOperationSupplier { - private static final Logger LOGGER = LoggerFactory.getLogger(PreScenarioOperationSupplier.class); + private static final Logger LOGGER = LoggerFactory.getLogger(PreScenarioOperationSupplier.class); - public PreScenarioOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { - super(phoenixUtil, model, scenario); - } + public PreScenarioOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { + super(phoenixUtil, model, scenario); + } - @Override - public Function get() { - return new Function() { + @Override + public Function get() { + return new Function() { - @Override - public OperationStats apply(final TenantOperationInfo input) { - Preconditions.checkNotNull(input); - final PreScenarioOperation operation = (PreScenarioOperation) input.getOperation(); - final String tenantGroup = input.getTenantGroupId(); - final String opGroup = input.getOperationGroupId(); - final String tableName = input.getTableName(); - final String scenarioName = input.getScenarioName(); - final boolean isTenantGroupGlobal = (tenantGroup.compareTo(TenantGroup.DEFAULT_GLOBAL_ID) == 0); + @Override + public OperationStats apply(final TenantOperationInfo input) { + Preconditions.checkNotNull(input); + final PreScenarioOperation operation = (PreScenarioOperation) input.getOperation(); + final String tenantGroup = input.getTenantGroupId(); + final String opGroup = input.getOperationGroupId(); + final String tableName = input.getTableName(); + final String scenarioName = input.getScenarioName(); + final boolean isTenantGroupGlobal = + (tenantGroup.compareTo(TenantGroup.DEFAULT_GLOBAL_ID) == 0); - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - int status = 0; - if (!operation.getPreScenarioDdls().isEmpty()) { - for (Ddl ddl : operation.getPreScenarioDdls()) { - // TODO: - // Ideally the fact that the op needs to executed using global connection - // needs to be built into the framework and injected during event generation. - // For now a special tenant whose id = "TGLOBAL00000001" will be logged. - final String tenantId = isTenantGroupGlobal || ddl.isUseGlobalConnection() ? null : input.getTenantId(); - final String opName = String.format("%s:%s:%s:%s:%s", - scenarioName, tableName, opGroup, tenantGroup, input.getTenantId()); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + int status = 0; + if (!operation.getPreScenarioDdls().isEmpty()) { + for (Ddl ddl : operation.getPreScenarioDdls()) { + // TODO: + // Ideally the fact that the op needs to executed using global connection + // needs to be built into the framework and injected during event generation. + // For now a special tenant whose id = "TGLOBAL00000001" will be logged. + final String tenantId = + isTenantGroupGlobal || ddl.isUseGlobalConnection() ? null : input.getTenantId(); + final String opName = String.format("%s:%s:%s:%s:%s", scenarioName, tableName, opGroup, + tenantGroup, input.getTenantId()); - try (Connection conn = phoenixUtil.getConnection(tenantId)) { - LOGGER.info("\nExecuting DDL:" + ddl + ", OPERATION:" + opName); - String sql = ddl.toString(); - phoenixUtil.executeStatement(sql, conn); - if (ddl.getStatement().toUpperCase().contains(phoenixUtil.ASYNC_KEYWORD)) { - phoenixUtil.waitForAsyncIndexToFinish(ddl.getTableName()); - } - } catch (Exception e) { - LOGGER.error("Operation " + opName + " failed with exception ", e); - status = -1; - } - } - } - long totalDuration = EnvironmentEdgeManager.currentTimeMillis() - startTime; - return new OperationStats(input, startTime, status, operation.getPreScenarioDdls().size(), totalDuration); + try (Connection conn = phoenixUtil.getConnection(tenantId)) { + LOGGER.info("\nExecuting DDL:" + ddl + ", OPERATION:" + opName); + String sql = ddl.toString(); + phoenixUtil.executeStatement(sql, conn); + if (ddl.getStatement().toUpperCase().contains(phoenixUtil.ASYNC_KEYWORD)) { + phoenixUtil.waitForAsyncIndexToFinish(ddl.getTableName()); + } + } catch (Exception e) { + LOGGER.error("Operation " + opName + " failed with exception ", e); + status = -1; } - }; - } + } + } + long totalDuration = EnvironmentEdgeManager.currentTimeMillis() - startTime; + return new OperationStats(input, startTime, status, operation.getPreScenarioDdls().size(), + totalDuration); + } + }; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/QueryOperation.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/QueryOperation.java index db32567f1a5..c3ee7dcd6a0 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/QueryOperation.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/QueryOperation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; import org.apache.phoenix.pherf.configuration.Query; @@ -25,5 +24,5 @@ * @see {@link OperationType#SELECT} */ public interface QueryOperation extends Operation { - Query getQuery(); + Query getQuery(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/QueryOperationSupplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/QueryOperationSupplier.java index 10a54d9474e..1d047849da9 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/QueryOperationSupplier.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/QueryOperationSupplier.java @@ -15,84 +15,86 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; -import org.apache.phoenix.pherf.configuration.TenantGroup; -import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; + import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Query; import org.apache.phoenix.pherf.configuration.Scenario; +import org.apache.phoenix.pherf.configuration.TenantGroup; import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; - /** * A supplier of {@link Function} that takes {@link QueryOperation} as an input. */ public class QueryOperationSupplier extends BaseOperationSupplier { - private static final Logger LOGGER = LoggerFactory.getLogger(QueryOperationSupplier.class); + private static final Logger LOGGER = LoggerFactory.getLogger(QueryOperationSupplier.class); - public QueryOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { - super(phoenixUtil, model, scenario); - } + public QueryOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { + super(phoenixUtil, model, scenario); + } - @Override - public Function get() { - return new Function() { + @Override + public Function get() { + return new Function() { - @Override - public OperationStats apply(final TenantOperationInfo input) { - Preconditions.checkNotNull(input); - final QueryOperation operation = (QueryOperation) input.getOperation(); - final Query query = operation.getQuery(); - final String tenantGroup = input.getTenantGroupId(); - final String opGroup = input.getOperationGroupId(); - final String scenarioName = input.getScenarioName(); - final String tableName = input.getTableName(); + @Override + public OperationStats apply(final TenantOperationInfo input) { + Preconditions.checkNotNull(input); + final QueryOperation operation = (QueryOperation) input.getOperation(); + final Query query = operation.getQuery(); + final String tenantGroup = input.getTenantGroupId(); + final String opGroup = input.getOperationGroupId(); + final String scenarioName = input.getScenarioName(); + final String tableName = input.getTableName(); - // TODO: - // Ideally the fact that the op needs to executed using global connection - // needs to be built into the framework and injected during event generation. - // For now a special tenant whose id = "TGLOBAL00000001" will be logged. - final boolean isTenantGroupGlobal = (tenantGroup.compareTo(TenantGroup.DEFAULT_GLOBAL_ID) == 0); - final String tenantId = isTenantGroupGlobal || query.isUseGlobalConnection() ? null : input.getTenantId(); + // TODO: + // Ideally the fact that the op needs to executed using global connection + // needs to be built into the framework and injected during event generation. + // For now a special tenant whose id = "TGLOBAL00000001" will be logged. + final boolean isTenantGroupGlobal = + (tenantGroup.compareTo(TenantGroup.DEFAULT_GLOBAL_ID) == 0); + final String tenantId = + isTenantGroupGlobal || query.isUseGlobalConnection() ? null : input.getTenantId(); - String opName = String.format("%s:%s:%s:%s:%s", scenarioName, tableName, - opGroup, tenantGroup, input.getTenantId()); - LOGGER.debug("\nExecuting query " + query.getStatement()); + String opName = String.format("%s:%s:%s:%s:%s", scenarioName, tableName, opGroup, + tenantGroup, input.getTenantId()); + LOGGER.debug("\nExecuting query " + query.getStatement()); - long startTime = 0; - int status = 0; - Long resultRowCount = 0L; - Long queryElapsedTime = 0L; - try (Connection connection = phoenixUtil.getConnection(tenantId)) { - startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = 0; + int status = 0; + Long resultRowCount = 0L; + Long queryElapsedTime = 0L; + try (Connection connection = phoenixUtil.getConnection(tenantId)) { + startTime = EnvironmentEdgeManager.currentTimeMillis(); - // TODO handle dynamic statements - try (PreparedStatement statement = connection.prepareStatement(query.getStatement())) { - try (ResultSet rs = statement.executeQuery()) { - boolean isSelectCountStatement = query.getStatement().toUpperCase().trim().contains("COUNT(") ? true : false; - Pair r = phoenixUtil.getResults(query, rs, opName, - isSelectCountStatement, startTime); - resultRowCount = r.getFirst(); - queryElapsedTime = r.getSecond(); - } - } - } catch (Exception e) { - LOGGER.error("Operation " + opName + " failed with exception ", e); - status = -1; - } - return new OperationStats(input, startTime, status, resultRowCount, queryElapsedTime); + // TODO handle dynamic statements + try (PreparedStatement statement = connection.prepareStatement(query.getStatement())) { + try (ResultSet rs = statement.executeQuery()) { + boolean isSelectCountStatement = + query.getStatement().toUpperCase().trim().contains("COUNT(") ? true : false; + Pair r = + phoenixUtil.getResults(query, rs, opName, isSelectCountStatement, startTime); + resultRowCount = r.getFirst(); + queryElapsedTime = r.getSecond(); } - }; - } + } + } catch (Exception e) { + LOGGER.error("Operation " + opName + " failed with exception ", e); + status = -1; + } + return new OperationStats(input, startTime, status, resultRowCount, queryElapsedTime); + } + }; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/TenantOperationFactory.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/TenantOperationFactory.java index f9e262e0380..4823048ddfd 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/TenantOperationFactory.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/TenantOperationFactory.java @@ -15,22 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; +import java.sql.SQLException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.locks.ReentrantReadWriteLock; + import org.apache.phoenix.exception.SQLExceptionCode; -import org.apache.phoenix.pherf.workload.mt.generators.LoadEventGenerator; -import org.apache.phoenix.pherf.workload.mt.MultiTenantWorkload; -import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; -import org.apache.phoenix.pherf.workload.mt.handlers.TenantOperationWorkHandler; -import org.apache.phoenix.thirdparty.com.google.common.base.Charsets; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.base.Supplier; -import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; -import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.thirdparty.com.google.common.hash.BloomFilter; -import org.apache.phoenix.thirdparty.com.google.common.hash.Funnel; -import org.apache.phoenix.thirdparty.com.google.common.hash.PrimitiveSink; import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Ddl; import org.apache.phoenix.pherf.configuration.IdleTime; @@ -44,322 +37,338 @@ import org.apache.phoenix.pherf.configuration.XMLConfigParser; import org.apache.phoenix.pherf.rules.RulesApplier; import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.pherf.workload.mt.MultiTenantWorkload; +import org.apache.phoenix.pherf.workload.mt.generators.LoadEventGenerator; +import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; +import org.apache.phoenix.pherf.workload.mt.handlers.TenantOperationWorkHandler; +import org.apache.phoenix.thirdparty.com.google.common.base.Charsets; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import org.apache.phoenix.thirdparty.com.google.common.base.Supplier; +import org.apache.phoenix.thirdparty.com.google.common.collect.Lists; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.thirdparty.com.google.common.hash.BloomFilter; +import org.apache.phoenix.thirdparty.com.google.common.hash.Funnel; +import org.apache.phoenix.thirdparty.com.google.common.hash.PrimitiveSink; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.SQLException; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.locks.ReentrantReadWriteLock; - /** - * Factory class for operation suppliers. - * The class is responsible for creating new instances of suppliers {@link Supplier} - * for operations {@link Operation} - * - * Operations that need to be executed for a given {@link Scenario} and {@link DataModel} - * are generated by {@link LoadEventGenerator} - * - * These operation events are then published on to the {@link com.lmax.disruptor.RingBuffer} - * by the {@link MultiTenantWorkload} workload generator and - * handled by the {@link com.lmax.disruptor.WorkHandler} for eg {@link TenantOperationWorkHandler} + * Factory class for operation suppliers. The class is responsible for creating new instances of + * suppliers {@link Supplier} for operations {@link Operation} Operations that need to be executed + * for a given {@link Scenario} and {@link DataModel} are generated by {@link LoadEventGenerator} + * These operation events are then published on to the {@link com.lmax.disruptor.RingBuffer} by the + * {@link MultiTenantWorkload} workload generator and handled by the + * {@link com.lmax.disruptor.WorkHandler} for eg {@link TenantOperationWorkHandler} */ public class TenantOperationFactory { - private static class TenantView { - private final String tenantId; - private final String viewName; + private static class TenantView { + private final String tenantId; + private final String viewName; - public TenantView(String tenantId, String viewName) { - this.tenantId = tenantId; - this.viewName = viewName; - } - - public String getTenantId() { - return tenantId; - } + public TenantView(String tenantId, String viewName) { + this.tenantId = tenantId; + this.viewName = viewName; + } - public String getViewName() { - return viewName; - } + public String getTenantId() { + return tenantId; + } - @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - TenantView that = (TenantView) o; - return getTenantId().equals(that.getTenantId()) && getViewName() - .equals(that.getViewName()); - } + public String getViewName() { + return viewName; + } - @Override public int hashCode() { - return Objects.hash(getTenantId(), getViewName()); - } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TenantView that = (TenantView) o; + return getTenantId().equals(that.getTenantId()) && getViewName().equals(that.getViewName()); } - private static final Logger LOGGER = LoggerFactory.getLogger(TenantOperationFactory.class); - private final PhoenixUtil phoenixUtil; - private final DataModel model; - private final Scenario scenario; - private final XMLConfigParser parser; - - private final RulesApplier rulesApplier; - private final LoadProfile loadProfile; - private final List operationList = Lists.newArrayList(); - private final Map>> operationSuppliers = - Maps.newEnumMap(Operation.OperationType.class); - - private final BloomFilter tenantsLoaded; - private final ReentrantReadWriteLock viewCreationLock = new ReentrantReadWriteLock(); - - public TenantOperationFactory(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { - this.phoenixUtil = phoenixUtil; - this.model = model; - this.scenario = scenario; - this.parser = null; - this.rulesApplier = new RulesApplier(model); - this.loadProfile = this.scenario.getLoadProfile(); - this.tenantsLoaded = createTenantsLoadedFilter(loadProfile); - - // Read the scenario definition and load the various operations. - // Case : Operation.OperationType.PRE_RUN - if (scenario.getPreScenarioDdls() != null && scenario.getPreScenarioDdls().size() > 0) { - operationSuppliers.put(Operation.OperationType.PRE_RUN, - new PreScenarioOperationSupplier(phoenixUtil, model, scenario)); - } - // Case : Operation.OperationType.UPSERT - List upsertOperations = getUpsertOperationsForScenario(scenario); - if (upsertOperations.size() > 0) { - operationList.addAll(upsertOperations); - operationSuppliers.put(Operation.OperationType.UPSERT, - new UpsertOperationSupplier(phoenixUtil, model, scenario)); - } + @Override + public int hashCode() { + return Objects.hash(getTenantId(), getViewName()); + } + } + + private static final Logger LOGGER = LoggerFactory.getLogger(TenantOperationFactory.class); + private final PhoenixUtil phoenixUtil; + private final DataModel model; + private final Scenario scenario; + private final XMLConfigParser parser; + + private final RulesApplier rulesApplier; + private final LoadProfile loadProfile; + private final List operationList = Lists.newArrayList(); + private final Map>> operationSuppliers = + Maps.newEnumMap(Operation.OperationType.class); + + private final BloomFilter tenantsLoaded; + private final ReentrantReadWriteLock viewCreationLock = new ReentrantReadWriteLock(); + + public TenantOperationFactory(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { + this.phoenixUtil = phoenixUtil; + this.model = model; + this.scenario = scenario; + this.parser = null; + this.rulesApplier = new RulesApplier(model); + this.loadProfile = this.scenario.getLoadProfile(); + this.tenantsLoaded = createTenantsLoadedFilter(loadProfile); + + // Read the scenario definition and load the various operations. + // Case : Operation.OperationType.PRE_RUN + if (scenario.getPreScenarioDdls() != null && scenario.getPreScenarioDdls().size() > 0) { + operationSuppliers.put(Operation.OperationType.PRE_RUN, + new PreScenarioOperationSupplier(phoenixUtil, model, scenario)); + } - // Case : Operation.OperationType.SELECT - List queryOperations = getQueryOperationsForScenario(scenario); - if (queryOperations.size() > 0) { - operationList.addAll(queryOperations); - operationSuppliers.put(Operation.OperationType.SELECT, - new QueryOperationSupplier(phoenixUtil, model, scenario)); - } + // Case : Operation.OperationType.UPSERT + List upsertOperations = getUpsertOperationsForScenario(scenario); + if (upsertOperations.size() > 0) { + operationList.addAll(upsertOperations); + operationSuppliers.put(Operation.OperationType.UPSERT, + new UpsertOperationSupplier(phoenixUtil, model, scenario)); + } - // Case : Operation.OperationType.IDLE_TIME - List idleOperations = getIdleTimeOperationsForScenario(scenario); - if (idleOperations.size() > 0) { - operationList.addAll(idleOperations); - operationSuppliers.put(Operation.OperationType.IDLE_TIME, - new IdleTimeOperationSupplier(phoenixUtil, model, scenario)); - } + // Case : Operation.OperationType.SELECT + List queryOperations = getQueryOperationsForScenario(scenario); + if (queryOperations.size() > 0) { + operationList.addAll(queryOperations); + operationSuppliers.put(Operation.OperationType.SELECT, + new QueryOperationSupplier(phoenixUtil, model, scenario)); + } - // Case : Operation.OperationType.USER_DEFINED - List udfOperations = getUDFOperationsForScenario(scenario); - if (udfOperations.size() > 0) { - operationList.addAll(udfOperations); - operationSuppliers.put(Operation.OperationType.USER_DEFINED, - new UserDefinedOperationSupplier(phoenixUtil, model, scenario)); - } + // Case : Operation.OperationType.IDLE_TIME + List idleOperations = getIdleTimeOperationsForScenario(scenario); + if (idleOperations.size() > 0) { + operationList.addAll(idleOperations); + operationSuppliers.put(Operation.OperationType.IDLE_TIME, + new IdleTimeOperationSupplier(phoenixUtil, model, scenario)); } - private BloomFilter createTenantsLoadedFilter(LoadProfile loadProfile) { - Funnel tenantViewFunnel = new Funnel() { - @Override - public void funnel(TenantView tenantView, PrimitiveSink into) { - into.putString(tenantView.getTenantId(), Charsets.UTF_8) - .putString(tenantView.getViewName(), Charsets.UTF_8); - } - }; + // Case : Operation.OperationType.USER_DEFINED + List udfOperations = getUDFOperationsForScenario(scenario); + if (udfOperations.size() > 0) { + operationList.addAll(udfOperations); + operationSuppliers.put(Operation.OperationType.USER_DEFINED, + new UserDefinedOperationSupplier(phoenixUtil, model, scenario)); + } + } + + private BloomFilter createTenantsLoadedFilter(LoadProfile loadProfile) { + Funnel tenantViewFunnel = new Funnel() { + @Override + public void funnel(TenantView tenantView, PrimitiveSink into) { + into.putString(tenantView.getTenantId(), Charsets.UTF_8).putString(tenantView.getViewName(), + Charsets.UTF_8); + } + }; + + int numTenants = 0; + for (TenantGroup tg : loadProfile.getTenantDistribution()) { + numTenants += tg.getNumTenants(); + } - int numTenants = 0; - for (TenantGroup tg : loadProfile.getTenantDistribution()) { - numTenants += tg.getNumTenants(); + // This holds the info whether the tenant view was created (initialized) or not. + return BloomFilter.create(tenantViewFunnel, numTenants, 0.0000001); + } + + private List getUpsertOperationsForScenario(Scenario scenario) { + List opList = Lists.newArrayList(); + for (final Upsert upsert : scenario.getUpserts()) { + final Operation upsertOp = new UpsertOperation() { + @Override + public Upsert getUpsert() { + return upsert; } - // This holds the info whether the tenant view was created (initialized) or not. - return BloomFilter.create(tenantViewFunnel, numTenants, 0.0000001); - } + @Override + public String getId() { + return upsert.getId(); + } - private List getUpsertOperationsForScenario(Scenario scenario) { - List opList = Lists.newArrayList(); - for (final Upsert upsert : scenario.getUpserts()) { - final Operation upsertOp = new UpsertOperation() { - @Override public Upsert getUpsert() { - return upsert; - } - - @Override public String getId() { - return upsert.getId(); - } - - @Override public OperationType getType() { - return OperationType.UPSERT; - } - }; - opList.add(upsertOp); + @Override + public OperationType getType() { + return OperationType.UPSERT; } - return opList; + }; + opList.add(upsertOp); + } + return opList; + } + + private List getQueryOperationsForScenario(Scenario scenario) { + List opList = Lists.newArrayList(); + for (final QuerySet querySet : scenario.getQuerySet()) { + for (final Query query : querySet.getQuery()) { + Operation queryOp = new QueryOperation() { + @Override + public Query getQuery() { + return query; + } + + @Override + public String getId() { + return query.getId(); + } + + @Override + public OperationType getType() { + return OperationType.SELECT; + } + }; + opList.add(queryOp); + } } + return opList; + } + + private List getIdleTimeOperationsForScenario(Scenario scenario) { + List opList = Lists.newArrayList(); + for (final IdleTime idleTime : scenario.getIdleTimes()) { + Operation idleTimeOperation = new IdleTimeOperation() { + @Override + public IdleTime getIdleTime() { + return idleTime; + } - private List getQueryOperationsForScenario(Scenario scenario) { - List opList = Lists.newArrayList(); - for (final QuerySet querySet : scenario.getQuerySet()) { - for (final Query query : querySet.getQuery()) { - Operation queryOp = new QueryOperation() { - @Override public Query getQuery() { - return query; - } - - @Override public String getId() { - return query.getId(); - } - - @Override public OperationType getType() { - return OperationType.SELECT; - } - }; - opList.add(queryOp); - } + @Override + public String getId() { + return idleTime.getId(); } - return opList; - } - private List getIdleTimeOperationsForScenario(Scenario scenario) { - List opList = Lists.newArrayList(); - for (final IdleTime idleTime : scenario.getIdleTimes()) { - Operation idleTimeOperation = new IdleTimeOperation() { - @Override public IdleTime getIdleTime() { - return idleTime; - } - @Override public String getId() { - return idleTime.getId(); - } - - @Override public OperationType getType() { - return OperationType.IDLE_TIME; - } - }; - opList.add(idleTimeOperation); + @Override + public OperationType getType() { + return OperationType.IDLE_TIME; } - return opList; + }; + opList.add(idleTimeOperation); } + return opList; + } + + private List getUDFOperationsForScenario(Scenario scenario) { + List opList = Lists.newArrayList(); + for (final UserDefined udf : scenario.getUdfs()) { + Operation udfOperation = new UserDefinedOperation() { + @Override + public UserDefined getUserFunction() { + return udf; + } - private List getUDFOperationsForScenario(Scenario scenario) { - List opList = Lists.newArrayList(); - for (final UserDefined udf : scenario.getUdfs()) { - Operation udfOperation = new UserDefinedOperation() { - @Override public UserDefined getUserFunction() { - return udf; - } - - @Override public String getId() { - return udf.getId(); - } - - @Override public OperationType getType() { - return OperationType.USER_DEFINED; - } - }; - opList.add(udfOperation); + @Override + public String getId() { + return udf.getId(); } - return opList; - } - public PhoenixUtil getPhoenixUtil() { - return phoenixUtil; + @Override + public OperationType getType() { + return OperationType.USER_DEFINED; + } + }; + opList.add(udfOperation); } - - public DataModel getModel() { - return model; + return opList; + } + + public PhoenixUtil getPhoenixUtil() { + return phoenixUtil; + } + + public DataModel getModel() { + return model; + } + + public Scenario getScenario() { + return scenario; + } + + public List getOperations() { + return operationList; + } + + public void initializeTenant(TenantOperationInfo input) throws Exception { + TenantView tenantView = new TenantView(input.getTenantId(), scenario.getTableName()); + + // Check if pre run ddls are needed. + viewCreationLock.writeLock().lock(); + try { + if (!tenantsLoaded.mightContain(tenantView)) { + executePreRunOpsForTenant(tenantView, input); + boolean updated = tenantsLoaded.put(tenantView); + if (updated) { + LOGGER.info(String.format("Successfully initialized tenant. [%s, %s] ", + tenantView.tenantId, tenantView.viewName)); + } + } + } finally { + viewCreationLock.writeLock().unlock(); } + } - public Scenario getScenario() { - return scenario; - } + public Supplier> + getOperationSupplier(final TenantOperationInfo input) throws Exception { - public List getOperations() { - return operationList; + Supplier> opSupplier = + operationSuppliers.get(input.getOperation().getType()); + if (opSupplier == null) { + throw new IllegalArgumentException("Unknown operation type"); } - - public void initializeTenant(TenantOperationInfo input) throws Exception { - TenantView tenantView = new TenantView(input.getTenantId(), scenario.getTableName()); - - // Check if pre run ddls are needed. - viewCreationLock.writeLock().lock(); - try { - if (!tenantsLoaded.mightContain(tenantView)) { - executePreRunOpsForTenant(tenantView, input); - boolean updated = tenantsLoaded.put(tenantView); - if (updated) { - LOGGER.info(String.format("Successfully initialized tenant. [%s, %s] ", - tenantView.tenantId, - tenantView.viewName)); - } - } - } finally { - viewCreationLock.writeLock().unlock(); + return opSupplier; + } + + private void executePreRunOpsForTenant(TenantView tenantView, TenantOperationInfo input) + throws Exception { + + Supplier> preRunOpSupplier = + operationSuppliers.get(Operation.OperationType.PRE_RUN); + // Check if the scenario has a PRE_RUN operation. + if (preRunOpSupplier != null) { + // Initialize the tenant using the pre scenario ddls. + final PreScenarioOperation operation = new PreScenarioOperation() { + @Override + public List getPreScenarioDdls() { + List ddls = scenario.getPreScenarioDdls(); + return ddls == null ? Lists. newArrayList() : ddls; } - } - - public Supplier> getOperationSupplier( - final TenantOperationInfo input) throws Exception { - - Supplier> opSupplier = - operationSuppliers.get(input.getOperation().getType()); - if (opSupplier == null) { - throw new IllegalArgumentException("Unknown operation type"); + @Override + public String getId() { + return OperationType.PRE_RUN.name(); } - return opSupplier; - } - private void executePreRunOpsForTenant(TenantView tenantView, TenantOperationInfo input) throws Exception { - - Supplier> preRunOpSupplier = - operationSuppliers.get(Operation.OperationType.PRE_RUN); - // Check if the scenario has a PRE_RUN operation. - if (preRunOpSupplier != null) { - // Initialize the tenant using the pre scenario ddls. - final PreScenarioOperation - operation = new PreScenarioOperation() { - @Override public List getPreScenarioDdls() { - List ddls = scenario.getPreScenarioDdls(); - return ddls == null ? Lists.newArrayList() : ddls; - } - - @Override public String getId() { - return OperationType.PRE_RUN.name(); - } - - @Override public OperationType getType() { - return OperationType.PRE_RUN; - } - }; - // Initialize with the pre run operation. - TenantOperationInfo preRunSample = new TenantOperationInfo( - input.getModelName(), - input.getScenarioName(), - input.getTableName(), - input.getTenantGroupId(), - Operation.OperationType.PRE_RUN.name(), - input.getTenantId(), operation); - - try { - // Run the initialization operation. - OperationStats stats = preRunOpSupplier.get().apply(preRunSample); - LOGGER.info(phoenixUtil.getGSON().toJson(stats)); - } catch (Exception e) { - LOGGER.error(String.format("Failed to initialize tenant. [%s, %s] ", - tenantView.tenantId, - tenantView.viewName), e); - if (e.getClass().isAssignableFrom(SQLException.class)) { - SQLException sqlException = (SQLException) e; - if (SQLExceptionCode.CONCURRENT_TABLE_MUTATION.getErrorCode() != sqlException.getErrorCode()) { - throw e; - } - } else { - throw e; - } - } + @Override + public OperationType getType() { + return OperationType.PRE_RUN; + } + }; + // Initialize with the pre run operation. + TenantOperationInfo preRunSample = new TenantOperationInfo(input.getModelName(), + input.getScenarioName(), input.getTableName(), input.getTenantGroupId(), + Operation.OperationType.PRE_RUN.name(), input.getTenantId(), operation); + + try { + // Run the initialization operation. + OperationStats stats = preRunOpSupplier.get().apply(preRunSample); + LOGGER.info(phoenixUtil.getGSON().toJson(stats)); + } catch (Exception e) { + LOGGER.error(String.format("Failed to initialize tenant. [%s, %s] ", tenantView.tenantId, + tenantView.viewName), e); + if (e.getClass().isAssignableFrom(SQLException.class)) { + SQLException sqlException = (SQLException) e; + if ( + SQLExceptionCode.CONCURRENT_TABLE_MUTATION.getErrorCode() != sqlException.getErrorCode() + ) { + throw e; + } + } else { + throw e; } + } } + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UpsertOperation.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UpsertOperation.java index 9c7d59b76f3..0c643fe62fa 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UpsertOperation.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UpsertOperation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; import org.apache.phoenix.pherf.configuration.Upsert; @@ -25,5 +24,5 @@ * @see {@link OperationType#UPSERT} */ public interface UpsertOperation extends Operation { - Upsert getUpsert(); + Upsert getUpsert(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UpsertOperationSupplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UpsertOperationSupplier.java index 6a85411ef34..2bd8ffc35ba 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UpsertOperationSupplier.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UpsertOperationSupplier.java @@ -15,151 +15,152 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; -import org.apache.phoenix.pherf.configuration.TenantGroup; -import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.text.SimpleDateFormat; +import java.util.List; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + import org.apache.phoenix.pherf.configuration.Column; import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Scenario; +import org.apache.phoenix.pherf.configuration.TenantGroup; import org.apache.phoenix.pherf.configuration.Upsert; import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.text.SimpleDateFormat; -import java.util.List; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - /** * A supplier of {@link Function} that takes {@link UpsertOperation} as an input */ public class UpsertOperationSupplier extends BaseOperationSupplier { - private static final Logger LOGGER = LoggerFactory.getLogger(UpsertOperationSupplier.class); - private ReadWriteLock rwLock = new ReentrantReadWriteLock(); + private static final Logger LOGGER = LoggerFactory.getLogger(UpsertOperationSupplier.class); + private ReadWriteLock rwLock = new ReentrantReadWriteLock(); - public UpsertOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { - super(phoenixUtil, model, scenario); - } - @Override - public Function get() { - return new Function() { + public UpsertOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { + super(phoenixUtil, model, scenario); + } - @Override - public OperationStats apply(final TenantOperationInfo input) { - Preconditions.checkNotNull(input); - final int batchSize = loadProfile.getBatchSize(); - final boolean useBatchApi = batchSize != 0; - final int rowCount = useBatchApi ? batchSize : 1; + @Override + public Function get() { + return new Function() { - final UpsertOperation operation = (UpsertOperation) input.getOperation(); - final Upsert upsert = operation.getUpsert(); - final String tenantGroup = input.getTenantGroupId(); - final String opGroup = input.getOperationGroupId(); - final String tableName = input.getTableName(); - final String scenarioName = input.getScenarioName(); + @Override + public OperationStats apply(final TenantOperationInfo input) { + Preconditions.checkNotNull(input); + final int batchSize = loadProfile.getBatchSize(); + final boolean useBatchApi = batchSize != 0; + final int rowCount = useBatchApi ? batchSize : 1; - // TODO: - // Ideally the fact that the op needs to executed using global connection - // needs to be built into the framework and injected during event generation. - // For now a special tenant whose id = "TGLOBAL00000001" will be logged. + final UpsertOperation operation = (UpsertOperation) input.getOperation(); + final Upsert upsert = operation.getUpsert(); + final String tenantGroup = input.getTenantGroupId(); + final String opGroup = input.getOperationGroupId(); + final String tableName = input.getTableName(); + final String scenarioName = input.getScenarioName(); - final boolean isTenantGroupGlobal = (tenantGroup.compareTo(TenantGroup.DEFAULT_GLOBAL_ID) == 0); - final String tenantId = isTenantGroupGlobal || upsert.isUseGlobalConnection() ? null : input.getTenantId(); - final String opName = String.format("%s:%s:%s:%s:%s", - scenarioName, tableName, opGroup, tenantGroup, input.getTenantId()); - long rowsCreated = 0; - long startTime = 0, duration, totalDuration; - int status = 0; - SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + // TODO: + // Ideally the fact that the op needs to executed using global connection + // needs to be built into the framework and injected during event generation. + // For now a special tenant whose id = "TGLOBAL00000001" will be logged. - try (Connection connection = phoenixUtil.getConnection(tenantId)) { - // If list of columns has not been not provided or lazy loaded - // then use the metadata call to get the column list. - if (upsert.getColumn().isEmpty()) { - rwLock.writeLock().lock(); - try { - if (upsert.getColumn().isEmpty()) { - LOGGER.info("Fetching columns metadata from db for operation : " + opName); - List allCols = phoenixUtil.getColumnsFromPhoenix(scenario.getSchemaName(), - scenario.getTableNameWithoutSchemaName(), - connection); - upsert.setColumn(allCols); - } - } finally { - rwLock.writeLock().unlock(); - } - } + final boolean isTenantGroupGlobal = + (tenantGroup.compareTo(TenantGroup.DEFAULT_GLOBAL_ID) == 0); + final String tenantId = + isTenantGroupGlobal || upsert.isUseGlobalConnection() ? null : input.getTenantId(); + final String opName = String.format("%s:%s:%s:%s:%s", scenarioName, tableName, opGroup, + tenantGroup, input.getTenantId()); + long rowsCreated = 0; + long startTime = 0, duration, totalDuration; + int status = 0; + SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); - String sql = phoenixUtil.buildSql(upsert.getColumn(), tableName); - LOGGER.info("Operation " + opName + " executing " + sql); - startTime = EnvironmentEdgeManager.currentTimeMillis(); - PreparedStatement stmt = null; - try { - stmt = connection.prepareStatement(sql); - for (long i = rowCount; i > 0; i--) { - stmt = phoenixUtil.buildStatement(rulesApplier, scenario, upsert.getColumn(), stmt, simpleDateFormat); - if (useBatchApi) { - stmt.addBatch(); - } else { - rowsCreated += stmt.executeUpdate(); - } - } - } catch (SQLException e) { - throw e; - } finally { - // Need to keep the statement open to send the remaining batch of updates - if (!useBatchApi && stmt != null) { - stmt.close(); - } - if (connection != null) { - if (useBatchApi && stmt != null) { - int[] results = stmt.executeBatch(); - for (int x = 0; x < results.length; x++) { - int result = results[x]; - if (result < 1) { - final String msg = - "Failed to write update in batch (update count=" - + result + ")"; - throw new RuntimeException(msg); - } - rowsCreated += result; - } - // Close the statement after our last batch execution. - stmt.close(); - } + try (Connection connection = phoenixUtil.getConnection(tenantId)) { + // If list of columns has not been not provided or lazy loaded + // then use the metadata call to get the column list. + if (upsert.getColumn().isEmpty()) { + rwLock.writeLock().lock(); + try { + if (upsert.getColumn().isEmpty()) { + LOGGER.info("Fetching columns metadata from db for operation : " + opName); + List allCols = phoenixUtil.getColumnsFromPhoenix(scenario.getSchemaName(), + scenario.getTableNameWithoutSchemaName(), connection); + upsert.setColumn(allCols); + } + } finally { + rwLock.writeLock().unlock(); + } + } - try { - connection.commit(); - duration = EnvironmentEdgeManager.currentTimeMillis() - startTime; - LOGGER.info("Writer ( " + Thread.currentThread().getName() - + ") committed Final Batch. Duration (" + duration + ") Ms"); - connection.close(); - } catch (SQLException e) { - // Swallow since we are closing anyway - LOGGER.error("Error when closing/committing", e); - } - } - } - } catch (SQLException sqle) { - LOGGER.error("Operation " + opName + " failed with exception ", sqle); - status = -1; - } catch (Exception e) { - LOGGER.error("Operation " + opName + " failed with exception ", e); - status = -1; + String sql = phoenixUtil.buildSql(upsert.getColumn(), tableName); + LOGGER.info("Operation " + opName + " executing " + sql); + startTime = EnvironmentEdgeManager.currentTimeMillis(); + PreparedStatement stmt = null; + try { + stmt = connection.prepareStatement(sql); + for (long i = rowCount; i > 0; i--) { + stmt = phoenixUtil.buildStatement(rulesApplier, scenario, upsert.getColumn(), stmt, + simpleDateFormat); + if (useBatchApi) { + stmt.addBatch(); + } else { + rowsCreated += stmt.executeUpdate(); + } + } + } catch (SQLException e) { + throw e; + } finally { + // Need to keep the statement open to send the remaining batch of updates + if (!useBatchApi && stmt != null) { + stmt.close(); + } + if (connection != null) { + if (useBatchApi && stmt != null) { + int[] results = stmt.executeBatch(); + for (int x = 0; x < results.length; x++) { + int result = results[x]; + if (result < 1) { + final String msg = + "Failed to write update in batch (update count=" + result + ")"; + throw new RuntimeException(msg); + } + rowsCreated += result; } + // Close the statement after our last batch execution. + stmt.close(); + } - totalDuration = EnvironmentEdgeManager.currentTimeMillis() - startTime; - return new OperationStats(input, startTime, status, rowsCreated, totalDuration); + try { + connection.commit(); + duration = EnvironmentEdgeManager.currentTimeMillis() - startTime; + LOGGER.info("Writer ( " + Thread.currentThread().getName() + + ") committed Final Batch. Duration (" + duration + ") Ms"); + connection.close(); + } catch (SQLException e) { + // Swallow since we are closing anyway + LOGGER.error("Error when closing/committing", e); + } } - }; - } + } + } catch (SQLException sqle) { + LOGGER.error("Operation " + opName + " failed with exception ", sqle); + status = -1; + } catch (Exception e) { + LOGGER.error("Operation " + opName + " failed with exception ", e); + status = -1; + } + + totalDuration = EnvironmentEdgeManager.currentTimeMillis() - startTime; + return new OperationStats(input, startTime, status, rowsCreated, totalDuration); + } + }; + } } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UserDefinedOperation.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UserDefinedOperation.java index 838666c508f..6545dee04e9 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UserDefinedOperation.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UserDefinedOperation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; import org.apache.phoenix.pherf.configuration.UserDefined; @@ -25,5 +24,5 @@ * @see {@link OperationType#USER_DEFINED} */ public interface UserDefinedOperation extends Operation { - UserDefined getUserFunction(); + UserDefined getUserFunction(); } diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UserDefinedOperationSupplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UserDefinedOperationSupplier.java index 9cb552a2bdd..9db565895b0 100644 --- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UserDefinedOperationSupplier.java +++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/mt/operations/UserDefinedOperationSupplier.java @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt.operations; -import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; -import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.util.PhoenixUtil; +import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; +import org.apache.phoenix.thirdparty.com.google.common.base.Function; import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; import org.apache.phoenix.util.EnvironmentEdgeManager; @@ -31,21 +30,21 @@ */ public class UserDefinedOperationSupplier extends BaseOperationSupplier { - public UserDefinedOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { - super(phoenixUtil, model, scenario); - } + public UserDefinedOperationSupplier(PhoenixUtil phoenixUtil, DataModel model, Scenario scenario) { + super(phoenixUtil, model, scenario); + } - @Override - public Function get() { - return new Function() { - @Override - public OperationStats apply(final TenantOperationInfo input) { - Preconditions.checkNotNull(input); - // TODO : implement user defined operation invocation. - long startTime = EnvironmentEdgeManager.currentTimeMillis(); - long duration = EnvironmentEdgeManager.currentTimeMillis() - startTime; - return new OperationStats(input, startTime,0, 0, duration); - } - }; - } + @Override + public Function get() { + return new Function() { + @Override + public OperationStats apply(final TenantOperationInfo input) { + Preconditions.checkNotNull(input); + // TODO : implement user defined operation invocation. + long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long duration = EnvironmentEdgeManager.currentTimeMillis() - startTime; + return new OperationStats(input, startTime, 0, 0, duration); + } + }; + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ColumnTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ColumnTest.java index 35e875445b8..4ab90a3ba80 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ColumnTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ColumnTest.java @@ -1,53 +1,49 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; -import java.util.regex.Matcher; -import java.util.regex.Pattern; +import static org.junit.Assert.assertTrue; import org.apache.phoenix.pherf.configuration.Column; import org.apache.phoenix.pherf.configuration.DataTypeMapping; import org.junit.Test; -import static org.junit.Assert.assertTrue; - public class ColumnTest { - @Test - public void testColumnMutate() { - Column columnA = new Column(); - Column columnB = new Column(); - Column columnC = new Column(); - columnA.setType(DataTypeMapping.VARCHAR); - columnB.setType(DataTypeMapping.VARCHAR); - columnA.setLength(15); - columnA.setMinValue(20); - columnA.setMaxValue(25); - columnB.setLength(30); - columnC.setMaxValue(45); + @Test + public void testColumnMutate() { + Column columnA = new Column(); + Column columnB = new Column(); + Column columnC = new Column(); + columnA.setType(DataTypeMapping.VARCHAR); + columnB.setType(DataTypeMapping.VARCHAR); + columnA.setLength(15); + columnA.setMinValue(20); + columnA.setMaxValue(25); + columnB.setLength(30); + columnC.setMaxValue(45); - columnA.mutate(columnB); - assertTrue("Mutation failed length", columnA.getLength() == columnB.getLength()); - columnA.mutate(columnC); - assertTrue("Mutation failed length", columnA.getLength() == columnB.getLength()); - assertTrue("Mutation failed min", columnA.getMinValue() == 20); - assertTrue("Mutation failed max", columnA.getMaxValue() == columnC.getMaxValue()); - assertTrue("Mutation failed name", columnA.getName() == null); + columnA.mutate(columnB); + assertTrue("Mutation failed length", columnA.getLength() == columnB.getLength()); + columnA.mutate(columnC); + assertTrue("Mutation failed length", columnA.getLength() == columnB.getLength()); + assertTrue("Mutation failed min", columnA.getMinValue() == 20); + assertTrue("Mutation failed max", columnA.getMaxValue() == columnC.getMaxValue()); + assertTrue("Mutation failed name", columnA.getName() == null); - } + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java index b0d7340534d..c3bf4696bbd 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java @@ -1,23 +1,24 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; +import static org.junit.Assert.*; + import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; @@ -27,311 +28,308 @@ import java.util.Map; import java.util.Set; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Marshaller; + import org.apache.phoenix.pherf.configuration.Column; +import org.apache.phoenix.pherf.configuration.DataModel; import org.apache.phoenix.pherf.configuration.DataOverride; import org.apache.phoenix.pherf.configuration.DataSequence; import org.apache.phoenix.pherf.configuration.DataTypeMapping; import org.apache.phoenix.pherf.configuration.Ddl; import org.apache.phoenix.pherf.configuration.ExecutionType; +import org.apache.phoenix.pherf.configuration.LoadProfile; import org.apache.phoenix.pherf.configuration.Query; import org.apache.phoenix.pherf.configuration.QuerySet; -import org.apache.phoenix.pherf.configuration.WriteParams; -import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; -import org.apache.phoenix.pherf.configuration.DataModel; -import org.apache.phoenix.pherf.configuration.LoadProfile; import org.apache.phoenix.pherf.configuration.Scenario; +import org.apache.phoenix.pherf.configuration.WriteParams; import org.apache.phoenix.pherf.configuration.XMLConfigParser; import org.apache.phoenix.pherf.rules.DataValue; import org.apache.phoenix.pherf.workload.mt.generators.TenantLoadEventGeneratorFactory; +import org.apache.phoenix.thirdparty.com.google.common.collect.Sets; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.xml.bind.JAXBContext; -import javax.xml.bind.JAXBException; -import javax.xml.bind.Marshaller; - -import static org.junit.Assert.*; - public class ConfigurationParserTest extends ResultBaseTest { - private static final Logger LOGGER = LoggerFactory.getLogger(ConfigurationParserTest.class); - - @Test - public void testReadWriteWorkloadReader() throws Exception { - String scenarioName = "testScenarioRW"; - String testResourceName = "/scenario/test_scenario.xml"; - List scenarioList = getScenarios(testResourceName); - Scenario target = null; - for (Scenario scenario : scenarioList) { - if (scenarioName.equals(scenario.getName())) { - target = scenario; - } - } - assertNotNull("Could not find scenario: " + scenarioName, target); - WriteParams params = target.getWriteParams(); - - assertNotNull("Could not find writeParams in scenario: " + scenarioName, params); - assertNotNull("Could not find batch size: ", params.getBatchSize()); - assertNotNull("Could not find execution duration: ", params.getExecutionDurationInMs()); - assertNotNull("Could not find sleep duration: ", params.getThreadSleepDuration()); - assertNotNull("Could not find writer count: ", params.getWriterThreadCount()); + private static final Logger LOGGER = LoggerFactory.getLogger(ConfigurationParserTest.class); + + @Test + public void testReadWriteWorkloadReader() throws Exception { + String scenarioName = "testScenarioRW"; + String testResourceName = "/scenario/test_scenario.xml"; + List scenarioList = getScenarios(testResourceName); + Scenario target = null; + for (Scenario scenario : scenarioList) { + if (scenarioName.equals(scenario.getName())) { + target = scenario; + } } - - @Test - // TODO Break this into multiple smaller tests. - public void testConfigReader() { - try { - String testResourceName = "/scenario/test_scenario.xml"; - LOGGER.debug("DataModel: " + writeXML()); - List scenarioList = getScenarios(testResourceName); - List dataMappingColumns = getDataModel(testResourceName).getDataMappingColumns(); - assertTrue("Could not load the data columns from xml.", - (dataMappingColumns != null) && (dataMappingColumns.size() > 0)); - assertTrue("Could not load the data DataValue list from xml.", - (dataMappingColumns.get(8).getDataValues() != null) - && (dataMappingColumns.get(8).getDataValues().size() > 0)); - - assertDateValue(dataMappingColumns); - assertCurrentDateValue(dataMappingColumns); - - // Validate column mappings - for (Column column : dataMappingColumns) { - assertNotNull("Column (" + column.getName() + ") is missing its type", - column.getType()); - } - - Scenario scenario = scenarioList.get(1); - assertNotNull(scenario); - assertEquals("PHERF.TEST_TABLE", scenario.getTableName()); - assertEquals(30, scenario.getRowCount()); - assertEquals(1, scenario.getDataOverride().getColumn().size()); - QuerySet qs = scenario.getQuerySet().get(0); - assertEquals(ExecutionType.SERIAL, qs.getExecutionType()); - assertEquals(5000, qs.getExecutionDurationInMs()); - assertEquals(2, qs.getQuery().size()); - - Query firstQuery = qs.getQuery().get(0); - assertEquals("1-3", qs.getConcurrency()); - assertEquals(1, qs.getMinConcurrency()); - assertEquals(3, qs.getMaxConcurrency()); - assertEquals(100, qs.getNumberOfExecutions()); - assertEquals("select count(*) from PHERF.TEST_TABLE", firstQuery.getStatement()); - assertEquals("123456789012345", firstQuery.getTenantId()); - assertEquals(null, firstQuery.getDdl()); - assertEquals(0, (long) firstQuery.getExpectedAggregateRowCount()); - - Query secondQuery = qs.getQuery().get(1); - assertEquals("Could not get statement.", "select sum(SOME_INT) from PHERF.TEST_TABLE", - secondQuery.getStatement()); - assertEquals("Could not get queryGroup.", "g1", secondQuery.getQueryGroup()); - - // Make sure anything in the overrides matches a real column in the data mappings - DataOverride override = scenario.getDataOverride(); - for (Column column : override.getColumn()) { - assertTrue("Could not lookup Column (" + column.getName() - + ") in DataMapping columns: " + dataMappingColumns, - dataMappingColumns.contains(column)); - } - - } catch (Exception e) { - e.printStackTrace(); - fail(); - } + assertNotNull("Could not find scenario: " + scenarioName, target); + WriteParams params = target.getWriteParams(); + + assertNotNull("Could not find writeParams in scenario: " + scenarioName, params); + assertNotNull("Could not find batch size: ", params.getBatchSize()); + assertNotNull("Could not find execution duration: ", params.getExecutionDurationInMs()); + assertNotNull("Could not find sleep duration: ", params.getThreadSleepDuration()); + assertNotNull("Could not find writer count: ", params.getWriterThreadCount()); + } + + @Test + // TODO Break this into multiple smaller tests. + public void testConfigReader() { + try { + String testResourceName = "/scenario/test_scenario.xml"; + LOGGER.debug("DataModel: " + writeXML()); + List scenarioList = getScenarios(testResourceName); + List dataMappingColumns = getDataModel(testResourceName).getDataMappingColumns(); + assertTrue("Could not load the data columns from xml.", + (dataMappingColumns != null) && (dataMappingColumns.size() > 0)); + assertTrue("Could not load the data DataValue list from xml.", + (dataMappingColumns.get(8).getDataValues() != null) + && (dataMappingColumns.get(8).getDataValues().size() > 0)); + + assertDateValue(dataMappingColumns); + assertCurrentDateValue(dataMappingColumns); + + // Validate column mappings + for (Column column : dataMappingColumns) { + assertNotNull("Column (" + column.getName() + ") is missing its type", column.getType()); + } + + Scenario scenario = scenarioList.get(1); + assertNotNull(scenario); + assertEquals("PHERF.TEST_TABLE", scenario.getTableName()); + assertEquals(30, scenario.getRowCount()); + assertEquals(1, scenario.getDataOverride().getColumn().size()); + QuerySet qs = scenario.getQuerySet().get(0); + assertEquals(ExecutionType.SERIAL, qs.getExecutionType()); + assertEquals(5000, qs.getExecutionDurationInMs()); + assertEquals(2, qs.getQuery().size()); + + Query firstQuery = qs.getQuery().get(0); + assertEquals("1-3", qs.getConcurrency()); + assertEquals(1, qs.getMinConcurrency()); + assertEquals(3, qs.getMaxConcurrency()); + assertEquals(100, qs.getNumberOfExecutions()); + assertEquals("select count(*) from PHERF.TEST_TABLE", firstQuery.getStatement()); + assertEquals("123456789012345", firstQuery.getTenantId()); + assertEquals(null, firstQuery.getDdl()); + assertEquals(0, (long) firstQuery.getExpectedAggregateRowCount()); + + Query secondQuery = qs.getQuery().get(1); + assertEquals("Could not get statement.", "select sum(SOME_INT) from PHERF.TEST_TABLE", + secondQuery.getStatement()); + assertEquals("Could not get queryGroup.", "g1", secondQuery.getQueryGroup()); + + // Make sure anything in the overrides matches a real column in the data mappings + DataOverride override = scenario.getDataOverride(); + for (Column column : override.getColumn()) { + assertTrue("Could not lookup Column (" + column.getName() + ") in DataMapping columns: " + + dataMappingColumns, dataMappingColumns.contains(column)); + } + + } catch (Exception e) { + e.printStackTrace(); + fail(); } - - @Test - public void testWorkloadWithLoadProfile() throws Exception { - String testResourceName = "/scenario/test_workload_with_load_profile.xml"; - Set scenarioNames = Sets.newHashSet("scenario_11", "scenario_12"); - List scenarioList = getScenarios(testResourceName); - Scenario target = null; - for (Scenario scenario : scenarioList) { - if (scenarioNames.contains(scenario.getName())) { - target = scenario; - } - assertNotNull("Could not find scenario: " + scenario.getName(), target); - } - - Scenario testScenarioWithLoadProfile = scenarioList.get(0); - Map props = testScenarioWithLoadProfile.getPhoenixProperties(); - assertEquals("Number of properties(size) not as expected: ", - 2, props.size()); - TenantLoadEventGeneratorFactory.GeneratorType - type = TenantLoadEventGeneratorFactory.GeneratorType.valueOf( - testScenarioWithLoadProfile.getGeneratorName()); - assertEquals("Unknown generator type: ", - TenantLoadEventGeneratorFactory.GeneratorType.UNIFORM, type); - - LoadProfile loadProfile = testScenarioWithLoadProfile.getLoadProfile(); - assertEquals("batch size not as expected: ", - 1, loadProfile.getBatchSize()); - assertEquals("num operations not as expected: ", - 1000, loadProfile.getNumOperations()); - assertEquals("tenant group size is not as expected: ", - 3, loadProfile.getTenantDistribution().size()); - assertEquals("operation group size is not as expected: ", - 5,loadProfile.getOpDistribution().size()); - assertEquals("UDFs size is not as expected ", - 1, testScenarioWithLoadProfile.getUdfs().size()); - assertNotNull("UDFs clazzName cannot be null ", - testScenarioWithLoadProfile.getUdfs().get(0).getClazzName()); - assertEquals("UDFs args size is not as expected ", - 2, testScenarioWithLoadProfile.getUdfs().get(0).getArgs().size()); - assertEquals("UpsertSet size is not as expected ", - 1, testScenarioWithLoadProfile.getUpserts().size()); - assertEquals("#Column within the first upsert is not as expected ", - 7, testScenarioWithLoadProfile.getUpserts().get(0).getColumn().size()); - assertEquals("QuerySet size is not as expected ", - 1, testScenarioWithLoadProfile.getQuerySet().size()); - assertEquals("#Queries within the first querySet is not as expected ", - 2, testScenarioWithLoadProfile.getQuerySet().get(0).getQuery().size()); - - // Test configuration for global connection - Scenario testScenarioWithGlobalConn = scenarioList.get(2); - LoadProfile loadProfileWithGlobalConn = testScenarioWithGlobalConn.getLoadProfile(); - assertEquals("batch size not as expected: ", - 1, loadProfileWithGlobalConn.getBatchSize()); - assertEquals("num operations not as expected: ", - 1000, loadProfileWithGlobalConn.getNumOperations()); - assertEquals("tenant group size is not as expected: ", - 1, loadProfileWithGlobalConn.getTenantDistribution().size()); - assertEquals("global tenant is not as expected: ", - 1, loadProfileWithGlobalConn.getTenantDistribution().get(0).getNumTenants()); - assertEquals("global tenant id is not as expected: ", - "GLOBAL", loadProfileWithGlobalConn.getTenantDistribution().get(0).getId()); - assertEquals("global tenant weight is not as expected: ", - 100, loadProfileWithGlobalConn.getTenantDistribution().get(0).getWeight()); - assertEquals("operation group size is not as expected: ", - 1,loadProfileWithGlobalConn.getOpDistribution().size()); - assertEquals("UpsertSet size is not as expected ", - 1, testScenarioWithGlobalConn.getUpserts().size()); - assertEquals("#Column within the first upsert is not as expected ", - 7, testScenarioWithGlobalConn.getUpserts().get(0).getColumn().size()); - assertEquals("Upsert operation not using global connection as expected ", - true, testScenarioWithGlobalConn.getUpserts().get(0).isUseGlobalConnection()); + } + + @Test + public void testWorkloadWithLoadProfile() throws Exception { + String testResourceName = "/scenario/test_workload_with_load_profile.xml"; + Set scenarioNames = Sets.newHashSet("scenario_11", "scenario_12"); + List scenarioList = getScenarios(testResourceName); + Scenario target = null; + for (Scenario scenario : scenarioList) { + if (scenarioNames.contains(scenario.getName())) { + target = scenario; + } + assertNotNull("Could not find scenario: " + scenario.getName(), target); } - private URL getResourceUrl(String resourceName) { - URL resourceUrl = getClass().getResource(resourceName); - assertNotNull("Test data XML file is missing", resourceUrl); - return resourceUrl; + Scenario testScenarioWithLoadProfile = scenarioList.get(0); + Map props = testScenarioWithLoadProfile.getPhoenixProperties(); + assertEquals("Number of properties(size) not as expected: ", 2, props.size()); + TenantLoadEventGeneratorFactory.GeneratorType type = + TenantLoadEventGeneratorFactory.GeneratorType + .valueOf(testScenarioWithLoadProfile.getGeneratorName()); + assertEquals("Unknown generator type: ", TenantLoadEventGeneratorFactory.GeneratorType.UNIFORM, + type); + + LoadProfile loadProfile = testScenarioWithLoadProfile.getLoadProfile(); + assertEquals("batch size not as expected: ", 1, loadProfile.getBatchSize()); + assertEquals("num operations not as expected: ", 1000, loadProfile.getNumOperations()); + assertEquals("tenant group size is not as expected: ", 3, + loadProfile.getTenantDistribution().size()); + assertEquals("operation group size is not as expected: ", 5, + loadProfile.getOpDistribution().size()); + assertEquals("UDFs size is not as expected ", 1, testScenarioWithLoadProfile.getUdfs().size()); + assertNotNull("UDFs clazzName cannot be null ", + testScenarioWithLoadProfile.getUdfs().get(0).getClazzName()); + assertEquals("UDFs args size is not as expected ", 2, + testScenarioWithLoadProfile.getUdfs().get(0).getArgs().size()); + assertEquals("UpsertSet size is not as expected ", 1, + testScenarioWithLoadProfile.getUpserts().size()); + assertEquals("#Column within the first upsert is not as expected ", 7, + testScenarioWithLoadProfile.getUpserts().get(0).getColumn().size()); + assertEquals("QuerySet size is not as expected ", 1, + testScenarioWithLoadProfile.getQuerySet().size()); + assertEquals("#Queries within the first querySet is not as expected ", 2, + testScenarioWithLoadProfile.getQuerySet().get(0).getQuery().size()); + + // Test configuration for global connection + Scenario testScenarioWithGlobalConn = scenarioList.get(2); + LoadProfile loadProfileWithGlobalConn = testScenarioWithGlobalConn.getLoadProfile(); + assertEquals("batch size not as expected: ", 1, loadProfileWithGlobalConn.getBatchSize()); + assertEquals("num operations not as expected: ", 1000, + loadProfileWithGlobalConn.getNumOperations()); + assertEquals("tenant group size is not as expected: ", 1, + loadProfileWithGlobalConn.getTenantDistribution().size()); + assertEquals("global tenant is not as expected: ", 1, + loadProfileWithGlobalConn.getTenantDistribution().get(0).getNumTenants()); + assertEquals("global tenant id is not as expected: ", "GLOBAL", + loadProfileWithGlobalConn.getTenantDistribution().get(0).getId()); + assertEquals("global tenant weight is not as expected: ", 100, + loadProfileWithGlobalConn.getTenantDistribution().get(0).getWeight()); + assertEquals("operation group size is not as expected: ", 1, + loadProfileWithGlobalConn.getOpDistribution().size()); + assertEquals("UpsertSet size is not as expected ", 1, + testScenarioWithGlobalConn.getUpserts().size()); + assertEquals("#Column within the first upsert is not as expected ", 7, + testScenarioWithGlobalConn.getUpserts().get(0).getColumn().size()); + assertEquals("Upsert operation not using global connection as expected ", true, + testScenarioWithGlobalConn.getUpserts().get(0).isUseGlobalConnection()); + } + + private URL getResourceUrl(String resourceName) { + URL resourceUrl = getClass().getResource(resourceName); + assertNotNull("Test data XML file is missing", resourceUrl); + return resourceUrl; + } + + private List getScenarios(String resourceName) throws Exception { + DataModel data = getDataModel(resourceName); + List scenarioList = data.getScenarios(); + assertTrue("Could not load the scenarios from xml.", + (scenarioList != null) && (scenarioList.size() > 0)); + return scenarioList; + } + + private DataModel getDataModel(String resourceName) throws Exception { + Path resourcePath = Paths.get(getResourceUrl(resourceName).toURI()); + return XMLConfigParser.readDataModel(resourcePath); + } + + private void assertDateValue(List dataMappingColumns) { + for (Column dataMapping : dataMappingColumns) { + if ( + (dataMapping.getType() == DataTypeMapping.DATE) + && (dataMapping.getName().equals("SOME_DATE")) + ) { + // First rule should have min/max set + assertNotNull(dataMapping.getDataValues().get(0).getMinValue()); + assertNotNull(dataMapping.getDataValues().get(0).getMaxValue()); + + // Second rule should have only value set + assertNotNull(dataMapping.getDataValues().get(1).getValue()); + + // Third rule should have min/max set + assertNotNull(dataMapping.getDataValues().get(2).getMinValue()); + assertNotNull(dataMapping.getDataValues().get(2).getMaxValue()); + return; + } } - - private List getScenarios(String resourceName) throws Exception { - DataModel data = getDataModel(resourceName); - List scenarioList = data.getScenarios(); - assertTrue("Could not load the scenarios from xml.", - (scenarioList != null) && (scenarioList.size() > 0)); - return scenarioList; + fail("We should have found a Rule value that matched."); + } + + private void assertCurrentDateValue(List dataMappingColumns) { + for (Column dataMapping : dataMappingColumns) { + if ( + (dataMapping.getType() == DataTypeMapping.DATE) + && (dataMapping.getName().equals("PRESENT_DATE")) + ) { + // First rule should have use current date value set + assertNotNull(dataMapping.getDataValues().get(0).getUseCurrentDate()); + + // Second rule should have use current date value set + assertNotNull(dataMapping.getDataValues().get(1).getUseCurrentDate()); + return; + } } - - private DataModel getDataModel(String resourceName) throws Exception { - Path resourcePath = Paths.get(getResourceUrl(resourceName).toURI()); - return XMLConfigParser.readDataModel(resourcePath); - } - - private void assertDateValue(List dataMappingColumns) { - for (Column dataMapping : dataMappingColumns) { - if ((dataMapping.getType() == DataTypeMapping.DATE) && (dataMapping.getName() - .equals("SOME_DATE"))) { - // First rule should have min/max set - assertNotNull(dataMapping.getDataValues().get(0).getMinValue()); - assertNotNull(dataMapping.getDataValues().get(0).getMaxValue()); - - // Second rule should have only value set - assertNotNull(dataMapping.getDataValues().get(1).getValue()); - - // Third rule should have min/max set - assertNotNull(dataMapping.getDataValues().get(2).getMinValue()); - assertNotNull(dataMapping.getDataValues().get(2).getMaxValue()); - return; - } - } - fail("We should have found a Rule value that matched."); - } - - private void assertCurrentDateValue(List dataMappingColumns) { - for (Column dataMapping : dataMappingColumns) { - if ((dataMapping.getType() == DataTypeMapping.DATE) && (dataMapping.getName() - .equals("PRESENT_DATE"))) { - //First rule should have use current date value set - assertNotNull(dataMapping.getDataValues().get(0).getUseCurrentDate()); - - //Second rule should have use current date value set - assertNotNull(dataMapping.getDataValues().get(1).getUseCurrentDate()); - return; - } - } - fail("We should have found a Rule value that matched."); - } - - /* - Used for debugging to dump out a simple xml filed based on the bound objects. - */ - private String writeXML() { - DataModel data = new DataModel(); - try { - DataValue dataValue = new DataValue(); - dataValue.setDistribution(20); - dataValue.setValue("jnhgGhHminwiajn"); - List dataValueList = new ArrayList<>(); - dataValueList.add(dataValue); - Column column = new Column(); - column.setLength(15); - column.setDataSequence(DataSequence.RANDOM); - column.setName("TEST_COL"); - column.setUserDefined(true); - column.setDataValues(dataValueList); - List columnList = new ArrayList<>(); - columnList.add(column); - - data.setDataMappingColumns(columnList); - - Scenario scenario = new Scenario(); - scenario.setName("scenario1"); - scenario.setTenantId("00DXXXXXX"); - List preScenarioDdls = new ArrayList(); - preScenarioDdls.add(new Ddl("CREATE INDEX IF NOT EXISTS ? ON FHA (NEWVAL_NUMBER) ASYNC", "FHAIDX_NEWVAL_NUMBER")); - preScenarioDdls.add(new Ddl("CREATE LOCAL INDEX IF NOT EXISTS ? ON FHA (NEWVAL_NUMBER)", "FHAIDX_NEWVAL_NUMBER")); - scenario.setPreScenarioDdls(preScenarioDdls); - scenario.setPhoenixProperties(new HashMap()); - scenario.getPhoenixProperties().put("phoenix.query.threadPoolSize", "200"); - scenario.setDataOverride(new DataOverride()); - scenario.setTableName("tableName"); - scenario.setRowCount(10); - QuerySet querySet = new QuerySet(); - querySet.setExecutionType(ExecutionType.PARALLEL); - querySet.setExecutionDurationInMs(10000); - scenario.getQuerySet().add(querySet); - Query query = new Query(); - querySet.getQuery().add(query); - querySet.setConcurrency("15"); - querySet.setNumberOfExecutions(20); - query.setStatement("select * from FHA"); - Scenario scenario2 = new Scenario(); - scenario2.setName("scenario2"); - scenario2.setPhoenixProperties(new HashMap()); - scenario2.setDataOverride(new DataOverride()); - scenario2.setTableName("tableName2"); - scenario2.setRowCount(500); - List scenarios = new ArrayList(); - scenarios.add(scenario); - scenarios.add(scenario2); - data.setScenarios(scenarios); - - // create JAXB context and initializing Marshaller - JAXBContext jaxbContext = JAXBContext.newInstance(DataModel.class); - Marshaller jaxbMarshaller = jaxbContext.createMarshaller(); - - // for getting nice formatted output - jaxbMarshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); - - // Writing to console - jaxbMarshaller.marshal(data, System.out); - } catch (JAXBException e) { - // some exception occured - e.printStackTrace(); - } - return data.toString(); + fail("We should have found a Rule value that matched."); + } + + /* + * Used for debugging to dump out a simple xml filed based on the bound objects. + */ + private String writeXML() { + DataModel data = new DataModel(); + try { + DataValue dataValue = new DataValue(); + dataValue.setDistribution(20); + dataValue.setValue("jnhgGhHminwiajn"); + List dataValueList = new ArrayList<>(); + dataValueList.add(dataValue); + Column column = new Column(); + column.setLength(15); + column.setDataSequence(DataSequence.RANDOM); + column.setName("TEST_COL"); + column.setUserDefined(true); + column.setDataValues(dataValueList); + List columnList = new ArrayList<>(); + columnList.add(column); + + data.setDataMappingColumns(columnList); + + Scenario scenario = new Scenario(); + scenario.setName("scenario1"); + scenario.setTenantId("00DXXXXXX"); + List preScenarioDdls = new ArrayList(); + preScenarioDdls.add(new Ddl("CREATE INDEX IF NOT EXISTS ? ON FHA (NEWVAL_NUMBER) ASYNC", + "FHAIDX_NEWVAL_NUMBER")); + preScenarioDdls.add(new Ddl("CREATE LOCAL INDEX IF NOT EXISTS ? ON FHA (NEWVAL_NUMBER)", + "FHAIDX_NEWVAL_NUMBER")); + scenario.setPreScenarioDdls(preScenarioDdls); + scenario.setPhoenixProperties(new HashMap()); + scenario.getPhoenixProperties().put("phoenix.query.threadPoolSize", "200"); + scenario.setDataOverride(new DataOverride()); + scenario.setTableName("tableName"); + scenario.setRowCount(10); + QuerySet querySet = new QuerySet(); + querySet.setExecutionType(ExecutionType.PARALLEL); + querySet.setExecutionDurationInMs(10000); + scenario.getQuerySet().add(querySet); + Query query = new Query(); + querySet.getQuery().add(query); + querySet.setConcurrency("15"); + querySet.setNumberOfExecutions(20); + query.setStatement("select * from FHA"); + Scenario scenario2 = new Scenario(); + scenario2.setName("scenario2"); + scenario2.setPhoenixProperties(new HashMap()); + scenario2.setDataOverride(new DataOverride()); + scenario2.setTableName("tableName2"); + scenario2.setRowCount(500); + List scenarios = new ArrayList(); + scenarios.add(scenario); + scenarios.add(scenario2); + data.setScenarios(scenarios); + + // create JAXB context and initializing Marshaller + JAXBContext jaxbContext = JAXBContext.newInstance(DataModel.class); + Marshaller jaxbMarshaller = jaxbContext.createMarshaller(); + + // for getting nice formatted output + jaxbMarshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); + + // Writing to console + jaxbMarshaller.marshal(data, System.out); + } catch (JAXBException e) { + // some exception occured + e.printStackTrace(); } + return data.toString(); + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/PherfTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/PherfTest.java index 34951d42cfa..6367105f9a3 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/PherfTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/PherfTest.java @@ -1,101 +1,104 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; -import org.junit.Rule; -import org.junit.Test; -import org.junit.contrib.java.lang.system.ExpectedSystemExit; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import java.util.Properties; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; +import org.junit.Rule; +import org.junit.Test; +import org.junit.contrib.java.lang.system.ExpectedSystemExit; public class PherfTest { - @Rule - public final ExpectedSystemExit exit = ExpectedSystemExit.none(); - - @Test - public void testListArgument() { - String[] args = {"-listFiles"}; - Pherf.main(args); - } - - @Test - public void testUnknownOption() { - String[] args = {"-drop", "all", "-q", "-m","-unknownOption"}; - - // Makes sure that System.exit(1) is called. - exit.expectSystemExitWithStatus(1); - Pherf.main(args); - } - - @Test - public void testLongOptions() throws Exception{ - String extension = ".sql"; - String args = "testArgs"; - Long numericArg = 15l; - - String[] longOptionArgs = {"--schemaFile",PherfConstants.SCHEMA_ROOT_PATTERN + extension,"--disableSchemaApply","--disableRuntimeResult","--listFiles","--scenarioFile",args,"--scenarioName",args,"--useAverageCompareType"}; - //Asset that No Exception is thrown, ParseException is thrown in case of invalid option - assertNotNull(new Pherf(longOptionArgs)); - - String[] otherLongOptionArgs = {"--drop",args,"--monitorFrequency",args,"--rowCountOverride",numericArg.toString(),"--hint",args,"--log_per_nrows",numericArg.toString(),"--diff","--export","--writerThreadSize",args,"--stats","--label",args,"--compare",args}; - //Asset that No Exception is thrown, ParseException is thrown in case of invalid option - assertNotNull(new Pherf(otherLongOptionArgs)); - } - - @Test - public void testDefaultLogPerNRowsArgument() throws Exception { - String[] args = {"-listFiles"}; - assertEquals(Long.valueOf(PherfConstants.LOG_PER_NROWS), - getLogPerNRowsValue(new Pherf(args).getProperties())); - } - - @Test - public void testCustomizedLogPerNRowsArgument() throws Exception { - Long customizedPerNRows = 15l; - String[] args = {"-listFiles", "-log_per_nrows", customizedPerNRows.toString()}; - assertEquals(customizedPerNRows, - getLogPerNRowsValue(new Pherf(args).getProperties())); - } - - @Test - public void testInvalidLogPerNRowsArgument() throws Exception { - Long zero = 0l; - Long negativeOne = -1l; - String invaildNum = "abc"; - - String[] args = {"-listFiles", "-log_per_nrows", zero.toString()}; - assertEquals(Long.valueOf(PherfConstants.LOG_PER_NROWS), - getLogPerNRowsValue(new Pherf(args).getProperties())); - - String[] args2 = {"-listFiles", "-log_per_nrows", negativeOne.toString()}; - assertEquals(Long.valueOf(PherfConstants.LOG_PER_NROWS), - getLogPerNRowsValue(new Pherf(args2).getProperties())); - - String[] args3 = {"-listFiles", "-log_per_nrows", invaildNum}; - assertEquals(Long.valueOf(PherfConstants.LOG_PER_NROWS), - getLogPerNRowsValue(new Pherf(args3).getProperties())); - } - - private Long getLogPerNRowsValue(Properties prop) { - return Long.valueOf(prop.getProperty(PherfConstants.LOG_PER_NROWS_NAME)); - } + @Rule + public final ExpectedSystemExit exit = ExpectedSystemExit.none(); + + @Test + public void testListArgument() { + String[] args = { "-listFiles" }; + Pherf.main(args); + } + + @Test + public void testUnknownOption() { + String[] args = { "-drop", "all", "-q", "-m", "-unknownOption" }; + + // Makes sure that System.exit(1) is called. + exit.expectSystemExitWithStatus(1); + Pherf.main(args); + } + + @Test + public void testLongOptions() throws Exception { + String extension = ".sql"; + String args = "testArgs"; + Long numericArg = 15l; + + String[] longOptionArgs = { "--schemaFile", PherfConstants.SCHEMA_ROOT_PATTERN + extension, + "--disableSchemaApply", "--disableRuntimeResult", "--listFiles", "--scenarioFile", args, + "--scenarioName", args, "--useAverageCompareType" }; + // Asset that No Exception is thrown, ParseException is thrown in case of invalid option + assertNotNull(new Pherf(longOptionArgs)); + + String[] otherLongOptionArgs = + { "--drop", args, "--monitorFrequency", args, "--rowCountOverride", numericArg.toString(), + "--hint", args, "--log_per_nrows", numericArg.toString(), "--diff", "--export", + "--writerThreadSize", args, "--stats", "--label", args, "--compare", args }; + // Asset that No Exception is thrown, ParseException is thrown in case of invalid option + assertNotNull(new Pherf(otherLongOptionArgs)); + } + + @Test + public void testDefaultLogPerNRowsArgument() throws Exception { + String[] args = { "-listFiles" }; + assertEquals(Long.valueOf(PherfConstants.LOG_PER_NROWS), + getLogPerNRowsValue(new Pherf(args).getProperties())); + } + + @Test + public void testCustomizedLogPerNRowsArgument() throws Exception { + Long customizedPerNRows = 15l; + String[] args = { "-listFiles", "-log_per_nrows", customizedPerNRows.toString() }; + assertEquals(customizedPerNRows, getLogPerNRowsValue(new Pherf(args).getProperties())); + } + + @Test + public void testInvalidLogPerNRowsArgument() throws Exception { + Long zero = 0l; + Long negativeOne = -1l; + String invaildNum = "abc"; + + String[] args = { "-listFiles", "-log_per_nrows", zero.toString() }; + assertEquals(Long.valueOf(PherfConstants.LOG_PER_NROWS), + getLogPerNRowsValue(new Pherf(args).getProperties())); + + String[] args2 = { "-listFiles", "-log_per_nrows", negativeOne.toString() }; + assertEquals(Long.valueOf(PherfConstants.LOG_PER_NROWS), + getLogPerNRowsValue(new Pherf(args2).getProperties())); + + String[] args3 = { "-listFiles", "-log_per_nrows", invaildNum }; + assertEquals(Long.valueOf(PherfConstants.LOG_PER_NROWS), + getLogPerNRowsValue(new Pherf(args3).getProperties())); + } + + private Long getLogPerNRowsValue(Properties prop) { + return Long.valueOf(prop.getProperty(PherfConstants.LOG_PER_NROWS_NAME)); + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResourceTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResourceTest.java index 5f5a53bab6b..5f8f1f54867 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResourceTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResourceTest.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; import static org.junit.Assert.assertNotNull; @@ -28,34 +27,36 @@ import org.junit.Test; public class ResourceTest { - @Test - public void testSchemaResourceList() throws Exception{ - String extension = ".sql"; - assertResources(PherfConstants.SCHEMA_ROOT_PATTERN + extension, PherfConstants.RESOURCE_DATAMODEL, extension); - } - - @Test - public void testScenarioResourceList() throws Exception { - String extension = ".xml"; - assertResources(PherfConstants.TEST_SCENARIO_ROOT_PATTERN + extension, PherfConstants.RESOURCE_SCENARIO, extension); - } - - @Test - public void testResourceListPropertyDirectory() throws Exception { - PherfConstants constants = PherfConstants.create(); - assertNotNull(constants.getProperty("pherf.default.dataloader.threadpool")); - assertNotNull(constants.getProperty("pherf.default.results.dir")); - assertNotNull(constants.getProperty(PherfConstants.LOG_PER_NROWS_NAME)); - } - - private Collection assertResources(String pattern, String rootDir, String assertStr) throws Exception { - ResourceList list = new ResourceList(rootDir); - Collection paths = - list.getResourceList(pattern); - assertTrue("Resource file list was empty", paths.size() > 0); - for (Path path : paths) { - assertTrue(path.toString().contains(assertStr)); - } - return paths; + @Test + public void testSchemaResourceList() throws Exception { + String extension = ".sql"; + assertResources(PherfConstants.SCHEMA_ROOT_PATTERN + extension, + PherfConstants.RESOURCE_DATAMODEL, extension); + } + + @Test + public void testScenarioResourceList() throws Exception { + String extension = ".xml"; + assertResources(PherfConstants.TEST_SCENARIO_ROOT_PATTERN + extension, + PherfConstants.RESOURCE_SCENARIO, extension); + } + + @Test + public void testResourceListPropertyDirectory() throws Exception { + PherfConstants constants = PherfConstants.create(); + assertNotNull(constants.getProperty("pherf.default.dataloader.threadpool")); + assertNotNull(constants.getProperty("pherf.default.results.dir")); + assertNotNull(constants.getProperty(PherfConstants.LOG_PER_NROWS_NAME)); + } + + private Collection assertResources(String pattern, String rootDir, String assertStr) + throws Exception { + ResourceList list = new ResourceList(rootDir); + Collection paths = list.getResourceList(pattern); + assertTrue("Resource file list was empty", paths.size() > 0); + for (Path path : paths) { + assertTrue(path.toString().contains(assertStr)); } -} \ No newline at end of file + return paths; + } +} diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java index a4b7648cfad..f7a4872f318 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultBaseTest.java @@ -1,51 +1,51 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; +import java.util.Properties; + import org.apache.phoenix.pherf.result.ResultUtil; import org.junit.AfterClass; import org.junit.BeforeClass; -import java.util.Properties; - public class ResultBaseTest { - private static PherfConstants constants; - private static boolean isSetUpDone = false; - private static Properties properties; - - @BeforeClass - public static synchronized void setUp() throws Exception { - if (isSetUpDone) { - return; - } + private static PherfConstants constants; + private static boolean isSetUpDone = false; + private static Properties properties; - constants = PherfConstants.create(); - properties = constants.getProperties(PherfConstants.PHERF_PROPERTIES, false); - new ResultUtil().ensureBaseDirExists(properties.getProperty("pherf.default.results.dir")); - isSetUpDone = true; + @BeforeClass + public static synchronized void setUp() throws Exception { + if (isSetUpDone) { + return; } - - @AfterClass public static synchronized void tearDown() throws Exception { - try { - new ResultUtil().deleteDir(properties.getProperty("pherf.default.results.dir")); - } catch (Exception e) { - // swallow - } + + constants = PherfConstants.create(); + properties = constants.getProperties(PherfConstants.PHERF_PROPERTIES, false); + new ResultUtil().ensureBaseDirExists(properties.getProperty("pherf.default.results.dir")); + isSetUpDone = true; + } + + @AfterClass + public static synchronized void tearDown() throws Exception { + try { + new ResultUtil().deleteDir(properties.getProperty("pherf.default.results.dir")); + } catch (Exception e) { + // swallow } + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java index 9cad1f19d5b..88f17e8f42e 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; import static org.junit.Assert.*; @@ -25,182 +24,185 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.apache.phoenix.pherf.configuration.Query; import org.apache.phoenix.pherf.jmx.MonitorManager; +import org.apache.phoenix.pherf.result.*; import org.apache.phoenix.pherf.result.file.Extension; import org.apache.phoenix.pherf.result.file.ResultFileDetails; import org.apache.phoenix.pherf.result.impl.CSVFileResultHandler; import org.apache.phoenix.pherf.result.impl.XMLResultHandler; -import org.apache.phoenix.pherf.result.*; import org.junit.Test; -import org.apache.phoenix.pherf.configuration.Query; public class ResultTest extends ResultBaseTest { - @Test - public void testMonitorWriter() throws Exception { - String[] row = "org.apache.phoenix.pherf:type=PherfWriteThreads,6,Mon Jan 05 15:14:00 PST 2015".split(PherfConstants.RESULT_FILE_DELIMETER); - ResultHandler resultMonitorWriter = null; - List resultValues = new ArrayList<>(); - for (String val : row) { - resultValues.add(new ResultValue(val)); - } - - try { - resultMonitorWriter = new CSVFileResultHandler(); - resultMonitorWriter.setResultFileDetails(ResultFileDetails.CSV_MONITOR); - resultMonitorWriter.setResultFileName(PherfConstants.MONITOR_FILE_NAME); - Result - result = new Result(ResultFileDetails.CSV_MONITOR, ResultFileDetails.CSV_MONITOR.getHeader().toString(), resultValues); - resultMonitorWriter.write(result); - resultMonitorWriter.write(result); - resultMonitorWriter.write(result); - resultMonitorWriter.close(); - List results = resultMonitorWriter.read(); - assertEquals("Results did not contain row.", results.size(), 3); - - } finally { - if (resultMonitorWriter != null) { - resultMonitorWriter.flush(); - resultMonitorWriter.close(); - } - } + @Test + public void testMonitorWriter() throws Exception { + String[] row = "org.apache.phoenix.pherf:type=PherfWriteThreads,6,Mon Jan 05 15:14:00 PST 2015" + .split(PherfConstants.RESULT_FILE_DELIMETER); + ResultHandler resultMonitorWriter = null; + List resultValues = new ArrayList<>(); + for (String val : row) { + resultValues.add(new ResultValue(val)); } - @Test - public void testMonitorResult() throws Exception { - ExecutorService executorService = Executors.newFixedThreadPool(1); - MonitorManager monitor = new MonitorManager(100); - Future future = executorService.submit(monitor.execute()); - List records; - final int TIMEOUT = 30; - - int ct = 0; - int max = 30; - // Wait while we write some rows. - while (!future.isDone()) { - Thread.sleep(100); - if (ct == max) { - int timer = 0; - monitor.complete(); - while (monitor.isRunning() && (timer < TIMEOUT)) { - System.out.println("Waiting for monitor to finish. Seconds Waited :" + timer); - Thread.sleep(1000); - timer++; - } - } - - ct++; - } - executorService.shutdown(); - records = monitor.readResults(); - - assertNotNull("Could not retrieve records", records); - assertTrue("Failed to get correct CSV records.", records.size() > 0); - assertFalse("Monitor was not stopped correctly.", monitor.isRunning()); - } - - @Test - public void testExtensionEnum() { - assertEquals("Extension did not match", Extension.CSV.toString(), ".csv"); - assertEquals("Extension did not match", Extension.DETAILED_CSV.toString(), "_detail.csv"); - } - - @Test - public void testResult() throws Exception { - String filename = "testresult"; - ResultHandler xmlResultHandler = new XMLResultHandler(); - xmlResultHandler.setResultFileDetails(ResultFileDetails.XML); - xmlResultHandler.setResultFileName(filename); - - ResultManager resultManager = new ResultManager(filename, Arrays.asList(xmlResultHandler)); - assertTrue("Default Handlers were not initialized.", resultManager.getResultHandlers().size() > 0); - - // write result to file - DataModelResult dataModelResult = setUpDataModelResult(); - resultManager.write(dataModelResult); - - // Put some stuff in a combined file - List modelResults = new ArrayList<>(); - modelResults.add(dataModelResult); - modelResults.add(dataModelResult); - resultManager.write(modelResults); - resultManager.flush(); - - // read result from file - List resultList = xmlResultHandler.read(); - ResultValue resultValue = resultList.get(0).getResultValues().get(0); - DataModelResult dataModelResultFromFile = resultValue.getResultValue(); - - ScenarioResult scenarioResultFromFile = dataModelResultFromFile.getScenarioResult().get(0); - QuerySetResult querySetResultFromFile = scenarioResultFromFile.getQuerySetResult().get(0); - QueryResult queryResultFromFile = querySetResultFromFile.getQueryResults().get(0); - ThreadTime ttFromFile = queryResultFromFile.getThreadTimes().get(0); - - // thread level verification - assertEquals(new Long(10), ttFromFile.getMinTimeInMs().getElapsedDurationInMs()); - assertEquals(new Long(30), ttFromFile.getMaxTimeInMs().getElapsedDurationInMs()); - assertEquals(20, (int) ttFromFile.getAvgTimeInMs()); - - // 3rd runtime has the earliest start time, therefore that's what's expected. - QueryResult - qr = - dataModelResult.getScenarioResult().get(0).getQuerySetResult().get(0) - .getQueryResults().get(0); - List runTimes = qr.getThreadTimes().get(0).getRunTimesInMs(); - assertEquals(runTimes.get(2).getStartTime(), ttFromFile.getStartTime()); - assertEquals(runTimes.get(0).getResultRowCount(), ttFromFile.getRunTimesInMs().get(0).getResultRowCount()); - assertEquals(runTimes.get(1).getResultRowCount(), ttFromFile.getRunTimesInMs().get(1).getResultRowCount()); - assertEquals(runTimes.get(2).getResultRowCount(), ttFromFile.getRunTimesInMs().get(2).getResultRowCount()); - - // query result level verification - assertEquals(10, queryResultFromFile.getAvgMinRunTimeInMs()); - assertEquals(30, queryResultFromFile.getAvgMaxRunTimeInMs()); - assertEquals(20, queryResultFromFile.getAvgRunTimeInMs()); + try { + resultMonitorWriter = new CSVFileResultHandler(); + resultMonitorWriter.setResultFileDetails(ResultFileDetails.CSV_MONITOR); + resultMonitorWriter.setResultFileName(PherfConstants.MONITOR_FILE_NAME); + Result result = new Result(ResultFileDetails.CSV_MONITOR, + ResultFileDetails.CSV_MONITOR.getHeader().toString(), resultValues); + resultMonitorWriter.write(result); + resultMonitorWriter.write(result); + resultMonitorWriter.write(result); + resultMonitorWriter.close(); + List results = resultMonitorWriter.read(); + assertEquals("Results did not contain row.", results.size(), 3); + + } finally { + if (resultMonitorWriter != null) { + resultMonitorWriter.flush(); + resultMonitorWriter.close(); + } } + } + + @Test + public void testMonitorResult() throws Exception { + ExecutorService executorService = Executors.newFixedThreadPool(1); + MonitorManager monitor = new MonitorManager(100); + Future future = executorService.submit(monitor.execute()); + List records; + final int TIMEOUT = 30; + + int ct = 0; + int max = 30; + // Wait while we write some rows. + while (!future.isDone()) { + Thread.sleep(100); + if (ct == max) { + int timer = 0; + monitor.complete(); + while (monitor.isRunning() && (timer < TIMEOUT)) { + System.out.println("Waiting for monitor to finish. Seconds Waited :" + timer); + Thread.sleep(1000); + timer++; + } + } - private DataModelResult setUpDataModelResult() { - DataModelResult dataModelResult = new DataModelResult(); - dataModelResult.setZookeeper("mytestzk"); - ScenarioResult scenarioResult = new ScenarioResult(); - scenarioResult.setTableName("MY_TABLE_NAME"); - scenarioResult.setName("MY_TEST_SCENARIO"); - - dataModelResult.getScenarioResult().add(scenarioResult); - scenarioResult.setRowCount(999); - QuerySetResult querySetResult = new QuerySetResult(); - querySetResult.setConcurrency("50"); - scenarioResult.getQuerySetResult().add(querySetResult); - Query query = new Query(); - Query query2 = new Query(); - - // add some spaces so we test query gets normalized - query.setQueryGroup("g123"); - query.setTenantId("tennantID123"); - query.setStatement("Select * \n" + "from FHA"); - query2.setStatement("Select a, b, c * \n" + "from FHA2"); - assertEquals("Expected consecutive spaces to be normalized", "Select * from FHA", - query.getStatement()); - - QueryResult queryResult = new QueryResult(query); - QueryResult queryResult2 = new QueryResult(query2); - querySetResult.getQueryResults().add(queryResult); - querySetResult.getQueryResults().add(queryResult2); - - ThreadTime tt = new ThreadTime(); - tt.setThreadName("thread1"); - Calendar calendar = Calendar.getInstance(); - Date startTime1 = calendar.getTime(); - RunTime runtime1 = new RunTime(startTime1, 1000L, new Long(10)); - tt.getRunTimesInMs().add(runtime1); - calendar.add(Calendar.MINUTE, -1); - RunTime runtime2 = new RunTime(calendar.getTime(), 2000L, new Long(20)); - tt.getRunTimesInMs().add(runtime2); - calendar.add(Calendar.MINUTE, -1); - RunTime runtime3 = new RunTime(calendar.getTime(), 3000L, new Long(30)); - tt.getRunTimesInMs().add(runtime3); - queryResult.getThreadTimes().add(tt); - queryResult2.getThreadTimes().add(tt); - - return dataModelResult; + ct++; } -} \ No newline at end of file + executorService.shutdown(); + records = monitor.readResults(); + + assertNotNull("Could not retrieve records", records); + assertTrue("Failed to get correct CSV records.", records.size() > 0); + assertFalse("Monitor was not stopped correctly.", monitor.isRunning()); + } + + @Test + public void testExtensionEnum() { + assertEquals("Extension did not match", Extension.CSV.toString(), ".csv"); + assertEquals("Extension did not match", Extension.DETAILED_CSV.toString(), "_detail.csv"); + } + + @Test + public void testResult() throws Exception { + String filename = "testresult"; + ResultHandler xmlResultHandler = new XMLResultHandler(); + xmlResultHandler.setResultFileDetails(ResultFileDetails.XML); + xmlResultHandler.setResultFileName(filename); + + ResultManager resultManager = new ResultManager(filename, Arrays.asList(xmlResultHandler)); + assertTrue("Default Handlers were not initialized.", + resultManager.getResultHandlers().size() > 0); + + // write result to file + DataModelResult dataModelResult = setUpDataModelResult(); + resultManager.write(dataModelResult); + + // Put some stuff in a combined file + List modelResults = new ArrayList<>(); + modelResults.add(dataModelResult); + modelResults.add(dataModelResult); + resultManager.write(modelResults); + resultManager.flush(); + + // read result from file + List resultList = xmlResultHandler.read(); + ResultValue resultValue = resultList.get(0).getResultValues().get(0); + DataModelResult dataModelResultFromFile = resultValue.getResultValue(); + + ScenarioResult scenarioResultFromFile = dataModelResultFromFile.getScenarioResult().get(0); + QuerySetResult querySetResultFromFile = scenarioResultFromFile.getQuerySetResult().get(0); + QueryResult queryResultFromFile = querySetResultFromFile.getQueryResults().get(0); + ThreadTime ttFromFile = queryResultFromFile.getThreadTimes().get(0); + + // thread level verification + assertEquals(new Long(10), ttFromFile.getMinTimeInMs().getElapsedDurationInMs()); + assertEquals(new Long(30), ttFromFile.getMaxTimeInMs().getElapsedDurationInMs()); + assertEquals(20, (int) ttFromFile.getAvgTimeInMs()); + + // 3rd runtime has the earliest start time, therefore that's what's expected. + QueryResult qr = dataModelResult.getScenarioResult().get(0).getQuerySetResult().get(0) + .getQueryResults().get(0); + List runTimes = qr.getThreadTimes().get(0).getRunTimesInMs(); + assertEquals(runTimes.get(2).getStartTime(), ttFromFile.getStartTime()); + assertEquals(runTimes.get(0).getResultRowCount(), + ttFromFile.getRunTimesInMs().get(0).getResultRowCount()); + assertEquals(runTimes.get(1).getResultRowCount(), + ttFromFile.getRunTimesInMs().get(1).getResultRowCount()); + assertEquals(runTimes.get(2).getResultRowCount(), + ttFromFile.getRunTimesInMs().get(2).getResultRowCount()); + + // query result level verification + assertEquals(10, queryResultFromFile.getAvgMinRunTimeInMs()); + assertEquals(30, queryResultFromFile.getAvgMaxRunTimeInMs()); + assertEquals(20, queryResultFromFile.getAvgRunTimeInMs()); + } + + private DataModelResult setUpDataModelResult() { + DataModelResult dataModelResult = new DataModelResult(); + dataModelResult.setZookeeper("mytestzk"); + ScenarioResult scenarioResult = new ScenarioResult(); + scenarioResult.setTableName("MY_TABLE_NAME"); + scenarioResult.setName("MY_TEST_SCENARIO"); + + dataModelResult.getScenarioResult().add(scenarioResult); + scenarioResult.setRowCount(999); + QuerySetResult querySetResult = new QuerySetResult(); + querySetResult.setConcurrency("50"); + scenarioResult.getQuerySetResult().add(querySetResult); + Query query = new Query(); + Query query2 = new Query(); + + // add some spaces so we test query gets normalized + query.setQueryGroup("g123"); + query.setTenantId("tennantID123"); + query.setStatement("Select * \n" + "from FHA"); + query2.setStatement("Select a, b, c * \n" + "from FHA2"); + assertEquals("Expected consecutive spaces to be normalized", "Select * from FHA", + query.getStatement()); + + QueryResult queryResult = new QueryResult(query); + QueryResult queryResult2 = new QueryResult(query2); + querySetResult.getQueryResults().add(queryResult); + querySetResult.getQueryResults().add(queryResult2); + + ThreadTime tt = new ThreadTime(); + tt.setThreadName("thread1"); + Calendar calendar = Calendar.getInstance(); + Date startTime1 = calendar.getTime(); + RunTime runtime1 = new RunTime(startTime1, 1000L, new Long(10)); + tt.getRunTimesInMs().add(runtime1); + calendar.add(Calendar.MINUTE, -1); + RunTime runtime2 = new RunTime(calendar.getTime(), 2000L, new Long(20)); + tt.getRunTimesInMs().add(runtime2); + calendar.add(Calendar.MINUTE, -1); + RunTime runtime3 = new RunTime(calendar.getTime(), 3000L, new Long(30)); + tt.getRunTimesInMs().add(runtime3); + queryResult.getThreadTimes().add(tt); + queryResult2.getThreadTimes().add(tt); + + return dataModelResult; + } +} diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RowCalculatorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RowCalculatorTest.java index 884028976a7..9597b89f055 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RowCalculatorTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RowCalculatorTest.java @@ -1,88 +1,80 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; +import static junit.framework.Assert.assertEquals; + import org.apache.phoenix.pherf.util.RowCalculator; import org.junit.Test; -import static junit.framework.Assert.assertEquals; - public class RowCalculatorTest { - /** - * Test rows divide evenly with large rows and small threadpool - * @throws Exception - */ - @Test - public void testRowsEvenDivide() throws Exception { - int threadPoolSize = 10; - int tableRowCount = 100; - assertRowsSum(threadPoolSize, tableRowCount); - } + /** + * Test rows divide evenly with large rows and small threadpool + */ + @Test + public void testRowsEvenDivide() throws Exception { + int threadPoolSize = 10; + int tableRowCount = 100; + assertRowsSum(threadPoolSize, tableRowCount); + } - /** - * Test rows add up when not divided evenly with large rows and small threadpool - * - * @throws Exception - */ - @Test - public void testRowsNotEvenDivide() throws Exception { - int threadPoolSize = 9; - int tableRowCount = 100; - assertRowsSum(threadPoolSize, tableRowCount); - } + /** + * Test rows add up when not divided evenly with large rows and small threadpool + */ + @Test + public void testRowsNotEvenDivide() throws Exception { + int threadPoolSize = 9; + int tableRowCount = 100; + assertRowsSum(threadPoolSize, tableRowCount); + } - /** - * Test rows add up when not divided evenly with large threadpool and small rowcount - * - * @throws Exception - */ - @Test - public void testRowsNotEvenDivideSmallRC() throws Exception { - int threadPoolSize = 50; - int tableRowCount = 21; - assertRowsSum(threadPoolSize, tableRowCount); - } + /** + * Test rows add up when not divided evenly with large threadpool and small rowcount + */ + @Test + public void testRowsNotEvenDivideSmallRC() throws Exception { + int threadPoolSize = 50; + int tableRowCount = 21; + assertRowsSum(threadPoolSize, tableRowCount); + } - /** - * Test rows count equal to thread pool - * - * @throws Exception - */ - @Test - public void testRowsEqualToPool() throws Exception { - int threadPoolSize = 50; - int tableRowCount = 50; - assertRowsSum(threadPoolSize, tableRowCount); - } + /** + * Test rows count equal to thread pool + */ + @Test + public void testRowsEqualToPool() throws Exception { + int threadPoolSize = 50; + int tableRowCount = 50; + assertRowsSum(threadPoolSize, tableRowCount); + } - private void assertRowsSum(int threadPoolSize, int tableRowCount) { - int sum = 0; - RowCalculator rc = new RowCalculator(threadPoolSize, tableRowCount); - assertEquals("Rows generated did not match expected count! ", threadPoolSize, rc.size()); + private void assertRowsSum(int threadPoolSize, int tableRowCount) { + int sum = 0; + RowCalculator rc = new RowCalculator(threadPoolSize, tableRowCount); + assertEquals("Rows generated did not match expected count! ", threadPoolSize, rc.size()); - // Sum of all rows should equal expected row count - for (int i = 0; i < threadPoolSize; i++) { - sum += rc.getNext(); - } - assertEquals("Rows did not sum up correctly", tableRowCount, sum); - - // Ensure rows were removed from list - assertEquals(rc.size(), 0); + // Sum of all rows should equal expected row count + for (int i = 0; i < threadPoolSize; i++) { + sum += rc.getNext(); } + assertEquals("Rows did not sum up correctly", tableRowCount, sum); + + // Ensure rows were removed from list + assertEquals(rc.size(), 0); + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java index eb3ee18cd13..85692d7b35c 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; import static org.junit.Assert.assertEquals; @@ -49,410 +48,419 @@ import org.junit.Test; public class RuleGeneratorTest { - private static final String matcherScenario = PherfConstants.TEST_SCENARIO_ROOT_PATTERN + ".xml"; - - @Test - public void testDateGenerator() throws Exception { - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - DataModel model = parser.getDataModels().get(0); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - - for (Column dataMapping : model.getDataMappingColumns()) { - if ((dataMapping.getType() == DataTypeMapping.DATE) && (dataMapping.getName().equals("SOME_DATE"))) { - // Test directly through generator method and that it converts to Phoenix type - assertRandomDateValue(dataMapping, rulesApplier); - - // Test through data value method, which is normal path - // Do this 20 times and we should hit each possibility at least once. - for (int i = 0; i < 20; i++) { - DataValue value = rulesApplier.getDataValue(dataMapping); - assertNotNull("Could not retrieve DataValue for random DATE.", value); - assertNotNull("Could not retrieve a value in DataValue for random DATE.", value.getValue()); - if (value.getMinValue() != null) { - // Check that dates are between min/max - assertDateBetween(value); - } - } - } + private static final String matcherScenario = PherfConstants.TEST_SCENARIO_ROOT_PATTERN + ".xml"; + + @Test + public void testDateGenerator() throws Exception { + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + DataModel model = parser.getDataModels().get(0); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + + for (Column dataMapping : model.getDataMappingColumns()) { + if ( + (dataMapping.getType() == DataTypeMapping.DATE) + && (dataMapping.getName().equals("SOME_DATE")) + ) { + // Test directly through generator method and that it converts to Phoenix type + assertRandomDateValue(dataMapping, rulesApplier); + + // Test through data value method, which is normal path + // Do this 20 times and we should hit each possibility at least once. + for (int i = 0; i < 20; i++) { + DataValue value = rulesApplier.getDataValue(dataMapping); + assertNotNull("Could not retrieve DataValue for random DATE.", value); + assertNotNull("Could not retrieve a value in DataValue for random DATE.", + value.getValue()); + if (value.getMinValue() != null) { + // Check that dates are between min/max + assertDateBetween(value); + } } + } } - - //Test to check the current date is generated correctly between the timestamps at column level and datavalue level - @Test - public void testCurrentDateGenerator() throws Exception { - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - DataModel model = parser.getDataModels().get(0); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - - // Time before generating the date - String timeStamp1 = rulesApplier.getCurrentDate(); - sleep(2); //sleep for few mili-sec - - for (Column dataMapping : model.getDataMappingColumns()) { - if ((dataMapping.getType() == DataTypeMapping.DATE) - && (dataMapping.getUseCurrentDate() == true) - && (dataMapping.getDataSequence() != DataSequence.SEQUENTIAL)) { - - // Generate the date using rules - DataValue value = rulesApplier.getDataValue(dataMapping); - assertNotNull("Could not retrieve DataValue for random DATE.", value); - assertNotNull("Could not retrieve a value in DataValue for random DATE.", - value.getValue()); - - sleep(2); - // Time after generating the date - String timeStamp2 = rulesApplier.getCurrentDate(); - - // Check that dates are between timestamp1 & timestamp2 - value.setMinValue(timeStamp1); - value.setMaxValue(timeStamp2); - assertDateBetween(value); - } - - // Check at list level - if ((dataMapping.getType() == DataTypeMapping.DATE) - && (dataMapping.getName().equals("PRESENT_DATE"))) { - // Do this 20 times and we should and every time generated data should be between - // timestamps - for (int i = 0; i < 1; i++) { - DataValue value = rulesApplier.getDataValue(dataMapping); - assertNotNull("Could not retrieve DataValue for random DATE.", value); - assertNotNull("Could not retrieve a value in DataValue for random DATE.", - value.getValue()); - - sleep(2); - // Time after generating the date - String timeStamp2 = rulesApplier.getCurrentDate(); - - // Check generated date is between timestamp1 & timestamp2 - value.setMinValue(timeStamp1); - value.setMaxValue(timeStamp2); - assertDateBetween(value); + } + + // Test to check the current date is generated correctly between the timestamps at column level + // and datavalue level + @Test + public void testCurrentDateGenerator() throws Exception { + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + DataModel model = parser.getDataModels().get(0); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + + // Time before generating the date + String timeStamp1 = rulesApplier.getCurrentDate(); + sleep(2); // sleep for few mili-sec + + for (Column dataMapping : model.getDataMappingColumns()) { + if ( + (dataMapping.getType() == DataTypeMapping.DATE) && (dataMapping.getUseCurrentDate() == true) + && (dataMapping.getDataSequence() != DataSequence.SEQUENTIAL) + ) { + + // Generate the date using rules + DataValue value = rulesApplier.getDataValue(dataMapping); + assertNotNull("Could not retrieve DataValue for random DATE.", value); + assertNotNull("Could not retrieve a value in DataValue for random DATE.", value.getValue()); + + sleep(2); + // Time after generating the date + String timeStamp2 = rulesApplier.getCurrentDate(); + + // Check that dates are between timestamp1 & timestamp2 + value.setMinValue(timeStamp1); + value.setMaxValue(timeStamp2); + assertDateBetween(value); + } + + // Check at list level + if ( + (dataMapping.getType() == DataTypeMapping.DATE) + && (dataMapping.getName().equals("PRESENT_DATE")) + ) { + // Do this 20 times and we should and every time generated data should be between + // timestamps + for (int i = 0; i < 1; i++) { + DataValue value = rulesApplier.getDataValue(dataMapping); + assertNotNull("Could not retrieve DataValue for random DATE.", value); + assertNotNull("Could not retrieve a value in DataValue for random DATE.", + value.getValue()); + + sleep(2); + // Time after generating the date + String timeStamp2 = rulesApplier.getCurrentDate(); + + // Check generated date is between timestamp1 & timestamp2 + value.setMinValue(timeStamp1); + value.setMaxValue(timeStamp2); + assertDateBetween(value); - } - } } - + } } - @Test - public void testNullChance() throws Exception { - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - DataModel model = parser.getDataModels().get(0); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - int sampleSize = 100; - List values = new ArrayList<>(sampleSize); - - for (Column dataMapping : model.getDataMappingColumns()) { - DataValue value = rulesApplier.getDataValue(dataMapping); - if (dataMapping.getNullChance() == 0) { - // 0 chance of getting null means we should never have an empty string returned - assertFalse("", value.getValue().equals("")); - } else if (dataMapping.getNullChance() == 100) { - // 100 chance of getting null means we should always have an empty string returned - assertTrue("", value.getValue().equals("")); - } else if ((dataMapping.getNullChance() == 90)) { - // You can't really test for this, but you can eyeball it on debugging. - for (int i = 0; i < sampleSize; i++) { - DataValue tVal = rulesApplier.getDataValue(dataMapping); - values.add(tVal.getValue()); - } - Collections.sort(values); - } + } + + @Test + public void testNullChance() throws Exception { + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + DataModel model = parser.getDataModels().get(0); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + int sampleSize = 100; + List values = new ArrayList<>(sampleSize); + + for (Column dataMapping : model.getDataMappingColumns()) { + DataValue value = rulesApplier.getDataValue(dataMapping); + if (dataMapping.getNullChance() == 0) { + // 0 chance of getting null means we should never have an empty string returned + assertFalse("", value.getValue().equals("")); + } else if (dataMapping.getNullChance() == 100) { + // 100 chance of getting null means we should always have an empty string returned + assertTrue("", value.getValue().equals("")); + } else if ((dataMapping.getNullChance() == 90)) { + // You can't really test for this, but you can eyeball it on debugging. + for (int i = 0; i < sampleSize; i++) { + DataValue tVal = rulesApplier.getDataValue(dataMapping); + values.add(tVal.getValue()); } + Collections.sort(values); + } } - - @Test - public void testSequentialDataSequence() throws Exception { - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - DataModel model = parser.getDataModels().get(0); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - - Column targetColumn = null; - for (Column column : model.getDataMappingColumns()) { - DataSequence sequence = column.getDataSequence(); - if (!DataTypeMapping.INTEGER.equals(column.getType()) && sequence == DataSequence.SEQUENTIAL) { - targetColumn = column; - break; + } + + @Test + public void testSequentialDataSequence() throws Exception { + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + DataModel model = parser.getDataModels().get(0); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + + Column targetColumn = null; + for (Column column : model.getDataMappingColumns()) { + DataSequence sequence = column.getDataSequence(); + if ( + !DataTypeMapping.INTEGER.equals(column.getType()) && sequence == DataSequence.SEQUENTIAL + ) { + targetColumn = column; + break; + } + } + assertNotNull("Could not find a DataSequence.SEQENTIAL rule.", targetColumn); + assertMultiThreadedIncrementValue(targetColumn, rulesApplier); + } + + @Test + public void testSequentialIntegerDataSequence() throws Exception { + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + DataModel model = parser.getDataModels().get(0); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + + Column targetColumn = null; + for (Column column : model.getDataMappingColumns()) { + DataSequence sequence = column.getDataSequence(); + if (DataTypeMapping.INTEGER.equals(column.getType()) && sequence == DataSequence.SEQUENTIAL) { + targetColumn = column; + break; + } + } + assertNotNull("Could not find a DataSequence.SEQENTIAL rule.", targetColumn); + assertMultiThreadedIncrementValue(targetColumn, rulesApplier); + } + + /** + * Verifies that we can generate a date between to specific dates. + */ + private void assertRandomDateValue(Column dataMapping, RulesApplier rulesApplier) + throws Exception { + List dataValues = dataMapping.getDataValues(); + DataValue ruleValue = dataValues.get(2); + String dt = rulesApplier.generateRandomDate(ruleValue.getMinValue(), ruleValue.getMaxValue()); + ruleValue.setValue(dt); + assertDateBetween(ruleValue); + } + + /** + * This method will test {@link org.apache.phoenix.pherf.configuration.DataSequence} SEQUENTIAL It + * ensures values returned always increase uniquely. RulesApplier will be accessed by multiple + * writer so we must ensure increment is thread safe. + */ + private void assertMultiThreadedIncrementValue(final Column column, + final RulesApplier rulesApplier) throws Exception { + final int threadCount = 30; + final int increments = 100; + final Set testSet = new TreeSet(); + List threadList = new ArrayList<>(); + for (int i = 0; i < threadCount; i++) { + Thread t = new Thread() { + + @Override + public void run() { + for (int i = 0; i < increments; i++) { + try { + synchronized (testSet) { + DataValue value = rulesApplier.getDataValue(column); + String strValue = value.getValue(); + assertFalse("Incrementer gave a duplicate value: " + strValue, + testSet.contains(strValue)); + if (DataTypeMapping.INTEGER.equals(column.getType())) { + assertEquals(testSet.size() + 1, (long) Long.valueOf(strValue)); + } else { + assertTrue("Length did not equal expected.", + strValue.length() == column.getLength()); + } + testSet.add(strValue); + } + } catch (Exception e) { + fail("Caught an exception during test: " + e.getMessage()); } + } } - assertNotNull("Could not find a DataSequence.SEQENTIAL rule.", targetColumn); - assertMultiThreadedIncrementValue(targetColumn, rulesApplier); + }; + t.start(); + threadList.add(t); } - @Test - public void testSequentialIntegerDataSequence() throws Exception { - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - DataModel model = parser.getDataModels().get(0); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - - Column targetColumn = null; - for (Column column : model.getDataMappingColumns()) { - DataSequence sequence = column.getDataSequence(); - if (DataTypeMapping.INTEGER.equals(column.getType()) && sequence == DataSequence.SEQUENTIAL) { - targetColumn = column; - break; - } - } - assertNotNull("Could not find a DataSequence.SEQENTIAL rule.", targetColumn); - assertMultiThreadedIncrementValue(targetColumn, rulesApplier); + // Wait for threads to finish + for (Thread t : threadList) { + try { + t.join(); + } catch (InterruptedException e) { + fail("There was a problem reading thread: " + e.getMessage()); + } } - /** - * Verifies that we can generate a date between to specific dates. - * - * @param dataMapping - * @param rulesApplier - * @throws Exception - */ - private void assertRandomDateValue(Column dataMapping, RulesApplier rulesApplier) throws Exception { - List dataValues = dataMapping.getDataValues(); - DataValue ruleValue = dataValues.get(2); - String dt = rulesApplier.generateRandomDate(ruleValue.getMinValue(), ruleValue.getMaxValue()); - ruleValue.setValue(dt); - assertDateBetween(ruleValue); + assertTrue("Expected count in increments did not match expected", + testSet.size() == (threadCount * increments)); + } + + @Test + public void testTimestampRule() throws Exception { + SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + SimpleDateFormat df = new SimpleDateFormat("yyyy"); + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + Scenario scenario = parser.getScenarios().get(0); + + Column simPhxCol = new Column(); + simPhxCol.setName("TS_DATE"); + simPhxCol.setType(DataTypeMapping.TIMESTAMP); + + // Run this 10 times gives a reasonable chance that all the values will appear at least once + for (int i = 0; i < 10; i++) { + DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol); + Date dt = simpleDateFormat.parse(value.getValue()); + int year = Integer.parseInt(df.format(dt)); + assertTrue("Got unexpected TS value" + value.getValue(), year >= 2020 && year <= 2025); } + } - /** - * This method will test {@link org.apache.phoenix.pherf.configuration.DataSequence} SEQUENTIAL - * It ensures values returned always increase uniquely. RulesApplier will be accessed by multiple writer - * so we must ensure increment is thread safe. - */ - private void assertMultiThreadedIncrementValue(final Column column, final RulesApplier rulesApplier) throws Exception { - final int threadCount = 30; - final int increments = 100; - final Set testSet = new TreeSet(); - List threadList = new ArrayList<>(); - for (int i = 0; i < threadCount; i++) { - Thread t = new Thread() { - - @Override - public void run() { - for (int i = 0; i < increments; i++) { - try { - synchronized (testSet) { - DataValue value = rulesApplier.getDataValue(column); - String strValue = value.getValue(); - assertFalse("Incrementer gave a duplicate value: " + strValue, testSet.contains(strValue)); - if(DataTypeMapping.INTEGER.equals(column.getType())) { - assertEquals(testSet.size() + 1,(long) Long.valueOf(strValue)); - } else { - assertTrue("Length did not equal expected.", - strValue.length() == column.getLength()); - } - testSet.add(strValue); - } - } catch (Exception e) { - fail("Caught an exception during test: " + e.getMessage()); - } - } - } - }; - t.start(); - threadList.add(t); - } + @Test + public void testVarcharArray() throws Exception { - // Wait for threads to finish - for (Thread t : threadList) { - try { - t.join(); - } catch (InterruptedException e) { - fail("There was a problem reading thread: " + e.getMessage()); - } - } + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); - assertTrue("Expected count in increments did not match expected", - testSet.size() == (threadCount * increments)); + // Run this 15 times gives a reasonable chance that all the values will appear at least once + for (int i = 0; i < 15; i++) { + Column c = rulesApplier.getRule("VAR_ARRAY"); + DataValue value = rulesApplier.getDataValue(c); + assertTrue("Got a value not in the list for the rule. :" + value.getValue(), + value.getValue().equals("Foo,Bar")); } + } - @Test - public void testTimestampRule() throws Exception { - SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); - SimpleDateFormat df = new SimpleDateFormat("yyyy"); - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - Scenario scenario = parser.getScenarios().get(0); - - Column simPhxCol = new Column(); - simPhxCol.setName("TS_DATE"); - simPhxCol.setType(DataTypeMapping.TIMESTAMP); - - // Run this 10 times gives a reasonable chance that all the values will appear at least once - for (int i = 0; i < 10; i++) { - DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol); - Date dt = simpleDateFormat.parse(value.getValue()); - int year = Integer.parseInt(df.format(dt)); - assertTrue("Got unexpected TS value" + value.getValue(), year >= 2020 && year <= 2025); - } + @Test + public void testVarBinary() throws Exception { + List expectedValues = new ArrayList(); + for (int i = 0; i < 10; i++) { + expectedValues.add("VBOxx00" + i); } - - @Test - public void testVarcharArray() throws Exception { - - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - - // Run this 15 times gives a reasonable chance that all the values will appear at least once - for (int i = 0; i < 15; i++) { - Column c = rulesApplier.getRule("VAR_ARRAY"); - DataValue value = rulesApplier.getDataValue(c); - assertTrue("Got a value not in the list for the rule. :" + value.getValue(), value.getValue().equals("Foo,Bar")); - } - } - - @Test - public void testVarBinary() throws Exception { - List expectedValues = new ArrayList(); - for (int i=0; i<10; i++) { - expectedValues.add("VBOxx00" + i); - } - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - - for (int i = 0; i < 5; i++) { - Column c = rulesApplier.getRule("VAR_BIN"); - DataValue value = rulesApplier.getDataValue(c); - System.out.println(value.getValue()); - assertTrue("Got a value not in the list for the rule. :" + value.getValue(), expectedValues.contains(value.getValue())); - } - } - - @Test - public void testPrefixSequence() throws Exception { - List expectedValues = new ArrayList(); - expectedValues.add("0F90000000000X0"); - expectedValues.add("0F90000000000X1"); - expectedValues.add("0F90000000000X2"); - expectedValues.add("0F90000000000X3"); - expectedValues.add("0F90000000000X4"); - expectedValues.add("0F90000000000X5"); - expectedValues.add("0F90000000000X6"); - expectedValues.add("0F90000000000X7"); - expectedValues.add("0F90000000000X8"); - expectedValues.add("0F90000000000X9"); - - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - - // Run this 15 times gives a reasonable chance that all the values will appear at least once - for (int i = 0; i < 15; i++) { - DataValue value = rulesApplier.getDataValue(rulesApplier.getRule("NEWVAL_STRING")); - assertTrue("Got a value not in the list for the rule. :" + value.getValue(), expectedValues.contains(value.getValue())); - } - } - - @Test - public void testValueListRule() throws Exception { - List expectedValues = new ArrayList(); - expectedValues.add("aAAyYhnNbBs9kWk"); - expectedValues.add("bBByYhnNbBs9kWu"); - expectedValues.add("cCCyYhnNbBs9kWr"); - - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - Scenario scenario = parser.getScenarios().get(0); - - Column simPhxCol = new Column(); - simPhxCol.setName("PARENT_ID"); - simPhxCol.setType(DataTypeMapping.CHAR); - - // Run this 10 times gives a reasonable chance that all the values will appear at least once - for (int i = 0; i < 10; i++) { - DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol); - assertTrue("Got a value not in the list for the rule. :" + value.getValue(), expectedValues.contains(value.getValue())); - } - } + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); - @Test - public void testRuleOverrides() throws Exception { - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - Scenario scenario = parser.getScenarios().get(0); - - // We should be able to find the correct rule based only on Type and Name combination - // Test CHAR - Column simPhxCol = new Column(); - simPhxCol.setName("OTHER_ID"); - simPhxCol.setType(DataTypeMapping.CHAR); - - // Get the rule we expect to match - Column rule = rulesApplier.getRule(simPhxCol); - assertEquals("Did not find the correct rule.", rule.getName(), simPhxCol.getName()); - assertEquals("Did not find the matching rule type.", rule.getType(), simPhxCol.getType()); - assertEquals("Rule contains incorrect length.", rule.getLength(), 8); - assertEquals("Rule contains incorrect prefix.", rule.getPrefix(), "z0Oxx00"); - - DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol); - assertEquals("Value returned does not match rule.", value.getValue().length(), 8); - - // Test VARCHAR with RANDOM and prefix - simPhxCol.setName("OLDVAL_STRING"); - simPhxCol.setType(DataTypeMapping.VARCHAR); - - // Get the rule we expect to match - rule = rulesApplier.getRule(simPhxCol); - assertEquals("Did not find the correct rule.", rule.getName(), simPhxCol.getName()); - assertEquals("Did not find the matching rule type.", rule.getType(), simPhxCol.getType()); - assertEquals("Rule contains incorrect length.", rule.getLength(), 10); - assertEquals("Rule contains incorrect prefix.", rule.getPrefix(), "MYPRFX"); - - value = rulesApplier.getDataForRule(scenario, simPhxCol); - assertEquals("Value returned does not match rule.", 10, value.getValue().length()); - assertTrue("Value returned start with prefix. " + value.getValue(), - StringUtils.startsWith(value.getValue(), rule.getPrefix())); - + for (int i = 0; i < 5; i++) { + Column c = rulesApplier.getRule("VAR_BIN"); + DataValue value = rulesApplier.getDataValue(c); + System.out.println(value.getValue()); + assertTrue("Got a value not in the list for the rule. :" + value.getValue(), + expectedValues.contains(value.getValue())); } - - - @Test - public void testScenarioLevelRuleOverride() throws Exception { - XMLConfigParser parser = new XMLConfigParser(matcherScenario); - WriteWorkload loader = new WriteWorkload(parser); - RulesApplier rulesApplier = loader.getRulesApplier(); - Scenario scenario = parser.getScenarios().get(0); - - // Test scenario level overridden rule - Column simPhxCol = new Column(); - simPhxCol.setName("FIELD"); - simPhxCol.setType(DataTypeMapping.VARCHAR); - DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol); - assertEquals("Override rule should contain field length of 5", 5, value.getValue().length()); + } + + @Test + public void testPrefixSequence() throws Exception { + List expectedValues = new ArrayList(); + expectedValues.add("0F90000000000X0"); + expectedValues.add("0F90000000000X1"); + expectedValues.add("0F90000000000X2"); + expectedValues.add("0F90000000000X3"); + expectedValues.add("0F90000000000X4"); + expectedValues.add("0F90000000000X5"); + expectedValues.add("0F90000000000X6"); + expectedValues.add("0F90000000000X7"); + expectedValues.add("0F90000000000X8"); + expectedValues.add("0F90000000000X9"); + + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + + // Run this 15 times gives a reasonable chance that all the values will appear at least once + for (int i = 0; i < 15; i++) { + DataValue value = rulesApplier.getDataValue(rulesApplier.getRule("NEWVAL_STRING")); + assertTrue("Got a value not in the list for the rule. :" + value.getValue(), + expectedValues.contains(value.getValue())); } - - - /** - * Asserts that the value field is between the min/max value fields - * - * @param value - */ - private void assertDateBetween(DataValue value) { - DateTimeFormatter fmtr = DateTimeFormatter.ofPattern(PherfConstants.DEFAULT_DATE_PATTERN).withZone(ZoneId.of("UTC")); - - Instant dt = ZonedDateTime.parse(value.getValue(), fmtr).toInstant(); - Instant min = ZonedDateTime.parse(value.getMinValue(), fmtr).toInstant(); - Instant max = ZonedDateTime.parse(value.getMaxValue(), fmtr).toInstant(); - - assertTrue("Value " + dt + " is not after minValue", dt.isAfter(min)); - assertTrue("Value " + dt + " is not before maxValue", dt.isBefore(max)); + } + + @Test + public void testValueListRule() throws Exception { + List expectedValues = new ArrayList(); + expectedValues.add("aAAyYhnNbBs9kWk"); + expectedValues.add("bBByYhnNbBs9kWu"); + expectedValues.add("cCCyYhnNbBs9kWr"); + + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + Scenario scenario = parser.getScenarios().get(0); + + Column simPhxCol = new Column(); + simPhxCol.setName("PARENT_ID"); + simPhxCol.setType(DataTypeMapping.CHAR); + + // Run this 10 times gives a reasonable chance that all the values will appear at least once + for (int i = 0; i < 10; i++) { + DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol); + assertTrue("Got a value not in the list for the rule. :" + value.getValue(), + expectedValues.contains(value.getValue())); } - - private void sleep(int time) { - try { - Thread.sleep(time); - } catch(InterruptedException ex) { - Thread.currentThread().interrupt(); - } + } + + @Test + public void testRuleOverrides() throws Exception { + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + Scenario scenario = parser.getScenarios().get(0); + + // We should be able to find the correct rule based only on Type and Name combination + // Test CHAR + Column simPhxCol = new Column(); + simPhxCol.setName("OTHER_ID"); + simPhxCol.setType(DataTypeMapping.CHAR); + + // Get the rule we expect to match + Column rule = rulesApplier.getRule(simPhxCol); + assertEquals("Did not find the correct rule.", rule.getName(), simPhxCol.getName()); + assertEquals("Did not find the matching rule type.", rule.getType(), simPhxCol.getType()); + assertEquals("Rule contains incorrect length.", rule.getLength(), 8); + assertEquals("Rule contains incorrect prefix.", rule.getPrefix(), "z0Oxx00"); + + DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol); + assertEquals("Value returned does not match rule.", value.getValue().length(), 8); + + // Test VARCHAR with RANDOM and prefix + simPhxCol.setName("OLDVAL_STRING"); + simPhxCol.setType(DataTypeMapping.VARCHAR); + + // Get the rule we expect to match + rule = rulesApplier.getRule(simPhxCol); + assertEquals("Did not find the correct rule.", rule.getName(), simPhxCol.getName()); + assertEquals("Did not find the matching rule type.", rule.getType(), simPhxCol.getType()); + assertEquals("Rule contains incorrect length.", rule.getLength(), 10); + assertEquals("Rule contains incorrect prefix.", rule.getPrefix(), "MYPRFX"); + + value = rulesApplier.getDataForRule(scenario, simPhxCol); + assertEquals("Value returned does not match rule.", 10, value.getValue().length()); + assertTrue("Value returned start with prefix. " + value.getValue(), + StringUtils.startsWith(value.getValue(), rule.getPrefix())); + + } + + @Test + public void testScenarioLevelRuleOverride() throws Exception { + XMLConfigParser parser = new XMLConfigParser(matcherScenario); + WriteWorkload loader = new WriteWorkload(parser); + RulesApplier rulesApplier = loader.getRulesApplier(); + Scenario scenario = parser.getScenarios().get(0); + + // Test scenario level overridden rule + Column simPhxCol = new Column(); + simPhxCol.setName("FIELD"); + simPhxCol.setType(DataTypeMapping.VARCHAR); + DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol); + assertEquals("Override rule should contain field length of 5", 5, value.getValue().length()); + } + + /** + * Asserts that the value field is between the min/max value fields + */ + private void assertDateBetween(DataValue value) { + DateTimeFormatter fmtr = + DateTimeFormatter.ofPattern(PherfConstants.DEFAULT_DATE_PATTERN).withZone(ZoneId.of("UTC")); + + Instant dt = ZonedDateTime.parse(value.getValue(), fmtr).toInstant(); + Instant min = ZonedDateTime.parse(value.getMinValue(), fmtr).toInstant(); + Instant max = ZonedDateTime.parse(value.getMaxValue(), fmtr).toInstant(); + + assertTrue("Value " + dt + " is not after minValue", dt.isAfter(min)); + assertTrue("Value " + dt + " is not before maxValue", dt.isBefore(max)); + } + + private void sleep(int time) { + try { + Thread.sleep(time); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); } + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/TestHBaseProps.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/TestHBaseProps.java index b2712c4d1ae..308068ccc3d 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/TestHBaseProps.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/TestHBaseProps.java @@ -1,21 +1,20 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf; import static org.junit.Assert.*; @@ -25,10 +24,11 @@ import org.junit.Test; public class TestHBaseProps { - - @Test - public void testCheckHBaseProps(){ - Configuration conf = HBaseConfiguration.create(); - assertTrue("did not get correct threadpool size", conf.get("phoenix.query.threadPoolSize").equals("128")); - } + + @Test + public void testCheckHBaseProps() { + Configuration conf = HBaseConfiguration.create(); + assertTrue("did not get correct threadpool size", + conf.get("phoenix.query.threadPoolSize").equals("128")); + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java index a4285f44baa..ff683231d58 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/XMLConfigParserTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,21 +34,22 @@ import org.slf4j.LoggerFactory; public class XMLConfigParserTest { - private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParserTest.class); - - @Test - public void testDTDInScenario() throws Exception { - URL scenarioUrl = XMLConfigParserTest.class.getResource("/scenario/malicious_scenario_with_dtd.xml"); - assertNotNull(scenarioUrl); - Path p = Paths.get(scenarioUrl.toURI()); - try { - XMLConfigParser.readDataModel(p); - fail("The scenario should have failed to parse because it contains a DTD"); - } catch (UnmarshalException e) { - // If we don't parse the DTD, the variable 'name' won't be defined in the XML - LOGGER.warn("Caught expected exception", e); - Throwable cause = e.getLinkedException(); - assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException); - } + private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParserTest.class); + + @Test + public void testDTDInScenario() throws Exception { + URL scenarioUrl = + XMLConfigParserTest.class.getResource("/scenario/malicious_scenario_with_dtd.xml"); + assertNotNull(scenarioUrl); + Path p = Paths.get(scenarioUrl.toURI()); + try { + XMLConfigParser.readDataModel(p); + fail("The scenario should have failed to parse because it contains a DTD"); + } catch (UnmarshalException e) { + // If we don't parse the DTD, the variable 'name' won't be defined in the XML + LOGGER.warn("Caught expected exception", e); + Throwable cause = e.getLinkedException(); + assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException); } + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java index 83a28e0e9de..fd721763ddd 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/result/impl/XMLResultHandlerTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,22 +33,22 @@ import org.slf4j.LoggerFactory; public class XMLResultHandlerTest { - private static final Logger LOGGER = LoggerFactory.getLogger(XMLResultHandlerTest.class); + private static final Logger LOGGER = LoggerFactory.getLogger(XMLResultHandlerTest.class); - @Test - public void testDTDInResults() throws Exception { - URL resultsUrl = XMLConfigParserTest.class.getResource("/malicious_results_with_dtd.xml"); - assertNotNull(resultsUrl); - File resultsFile = new File(resultsUrl.getFile()); - XMLResultHandler handler = new XMLResultHandler(); - try { - handler.readFromResultFile(resultsFile); - fail("Expected to see an exception parsing the results with a DTD"); - } catch (UnmarshalException e) { - // If we don't parse the DTD, the variable 'name' won't be defined in the XML - LOGGER.debug("Caught expected exception", e); - Throwable cause = e.getLinkedException(); - assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException); - } + @Test + public void testDTDInResults() throws Exception { + URL resultsUrl = XMLConfigParserTest.class.getResource("/malicious_results_with_dtd.xml"); + assertNotNull(resultsUrl); + File resultsFile = new File(resultsUrl.getFile()); + XMLResultHandler handler = new XMLResultHandler(); + try { + handler.readFromResultFile(resultsFile); + fail("Expected to see an exception parsing the results with a DTD"); + } catch (UnmarshalException e) { + // If we don't parse the DTD, the variable 'name' won't be defined in the XML + LOGGER.debug("Caught expected exception", e); + Throwable cause = e.getLinkedException(); + assertTrue("Cause was a " + cause.getClass(), cause instanceof XMLStreamException); } + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialDateDataGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialDateDataGeneratorTest.java index 1c351fbe117..0fbd9ff52e9 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialDateDataGeneratorTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialDateDataGeneratorTest.java @@ -1,19 +1,19 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.pherf.rules; @@ -30,50 +30,50 @@ import org.junit.Test; public class SequentialDateDataGeneratorTest { - SequentialDateDataGenerator generator; + SequentialDateDataGenerator generator; - @Test(expected = IllegalArgumentException.class) - public void testRejectsNonSequential() { - Column columnA = new Column(); - columnA.setType(DATE); - columnA.setDataSequence(DataSequence.RANDOM); + @Test(expected = IllegalArgumentException.class) + public void testRejectsNonSequential() { + Column columnA = new Column(); + columnA.setType(DATE); + columnA.setDataSequence(DataSequence.RANDOM); - //should reject this Column - generator = new SequentialDateDataGenerator(columnA); - } + // should reject this Column + generator = new SequentialDateDataGenerator(columnA); + } - @Test(expected = IllegalArgumentException.class) - public void testRejectsNonDate() { - Column columnA = new Column(); - columnA.setType(VARCHAR); - columnA.setDataSequence(DataSequence.SEQUENTIAL); + @Test(expected = IllegalArgumentException.class) + public void testRejectsNonDate() { + Column columnA = new Column(); + columnA.setType(VARCHAR); + columnA.setDataSequence(DataSequence.SEQUENTIAL); - //should reject this Column - generator = new SequentialDateDataGenerator(columnA); - } + // should reject this Column + generator = new SequentialDateDataGenerator(columnA); + } - @Test - public void testGetDataValue() { - DateTimeFormatter FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); - Column columnA = new Column(); - columnA.setType(DATE); - columnA.setDataSequence(DataSequence.SEQUENTIAL); + @Test + public void testGetDataValue() { + DateTimeFormatter FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); + Column columnA = new Column(); + columnA.setType(DATE); + columnA.setDataSequence(DataSequence.SEQUENTIAL); - // The increments are the of 1 sec units - generator = new SequentialDateDataGenerator(columnA); - LocalDateTime startDateTime = generator.getStartDateTime(); + // The increments are the of 1 sec units + generator = new SequentialDateDataGenerator(columnA); + LocalDateTime startDateTime = generator.getStartDateTime(); - DataValue result1 = generator.getDataValue(); - LocalDateTime result1LocalTime = LocalDateTime.parse(result1.getValue(), FMT); - assertFalse(result1LocalTime.isBefore(startDateTime)); - DataValue result2 = generator.getDataValue(); - LocalDateTime result2LocalTime = LocalDateTime.parse(result2.getValue(), FMT); - assertEquals(result2LocalTime.minusSeconds(1), result1LocalTime); - DataValue result3 = generator.getDataValue(); - LocalDateTime result3LocalTime = LocalDateTime.parse(result3.getValue(), FMT); - assertEquals(result3LocalTime.minusSeconds(1), result2LocalTime); - DataValue result4 = generator.getDataValue(); - LocalDateTime result4LocalTime = LocalDateTime.parse(result4.getValue(), FMT); - assertEquals(result4LocalTime.minusSeconds(1), result3LocalTime); - } + DataValue result1 = generator.getDataValue(); + LocalDateTime result1LocalTime = LocalDateTime.parse(result1.getValue(), FMT); + assertFalse(result1LocalTime.isBefore(startDateTime)); + DataValue result2 = generator.getDataValue(); + LocalDateTime result2LocalTime = LocalDateTime.parse(result2.getValue(), FMT); + assertEquals(result2LocalTime.minusSeconds(1), result1LocalTime); + DataValue result3 = generator.getDataValue(); + LocalDateTime result3LocalTime = LocalDateTime.parse(result3.getValue(), FMT); + assertEquals(result3LocalTime.minusSeconds(1), result2LocalTime); + DataValue result4 = generator.getDataValue(); + LocalDateTime result4LocalTime = LocalDateTime.parse(result4.getValue(), FMT); + assertEquals(result4LocalTime.minusSeconds(1), result3LocalTime); + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialIntegerDataGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialIntegerDataGeneratorTest.java index edc1e650a66..a5d704bfb7a 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialIntegerDataGeneratorTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialIntegerDataGeneratorTest.java @@ -1,69 +1,69 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.pherf.rules; -import org.apache.phoenix.pherf.configuration.Column; -import org.apache.phoenix.pherf.configuration.DataSequence; -import org.junit.Test; - import static org.apache.phoenix.pherf.configuration.DataTypeMapping.INTEGER; import static org.apache.phoenix.pherf.configuration.DataTypeMapping.VARCHAR; import static org.junit.Assert.assertEquals; +import org.apache.phoenix.pherf.configuration.Column; +import org.apache.phoenix.pherf.configuration.DataSequence; +import org.junit.Test; + public class SequentialIntegerDataGeneratorTest { - SequentialIntegerDataGenerator generator; + SequentialIntegerDataGenerator generator; - @Test(expected = IllegalArgumentException.class) - public void testRejectsNonSequential() { - Column columnA = new Column(); - columnA.setType(INTEGER); - columnA.setDataSequence(DataSequence.RANDOM); + @Test(expected = IllegalArgumentException.class) + public void testRejectsNonSequential() { + Column columnA = new Column(); + columnA.setType(INTEGER); + columnA.setDataSequence(DataSequence.RANDOM); - //should reject this Column - generator = new SequentialIntegerDataGenerator(columnA); - } + // should reject this Column + generator = new SequentialIntegerDataGenerator(columnA); + } - @Test(expected = IllegalArgumentException.class) - public void testRejectsNonInteger() { - Column columnA = new Column(); - columnA.setType(VARCHAR); - columnA.setDataSequence(DataSequence.SEQUENTIAL); + @Test(expected = IllegalArgumentException.class) + public void testRejectsNonInteger() { + Column columnA = new Column(); + columnA.setType(VARCHAR); + columnA.setDataSequence(DataSequence.SEQUENTIAL); - //should reject this Column - generator = new SequentialIntegerDataGenerator(columnA); - } + // should reject this Column + generator = new SequentialIntegerDataGenerator(columnA); + } - @Test - public void testGetDataValue() { - Column columnA = new Column(); - columnA.setType(INTEGER); - columnA.setDataSequence(DataSequence.SEQUENTIAL); - columnA.setMinValue(1); - columnA.setMaxValue(3); + @Test + public void testGetDataValue() { + Column columnA = new Column(); + columnA.setType(INTEGER); + columnA.setDataSequence(DataSequence.SEQUENTIAL); + columnA.setMinValue(1); + columnA.setMaxValue(3); - generator = new SequentialIntegerDataGenerator(columnA); - DataValue result1 = generator.getDataValue(); - assertEquals("1", result1.getValue()); - DataValue result2 = generator.getDataValue(); - assertEquals("2", result2.getValue()); - DataValue result3 = generator.getDataValue(); - assertEquals("3", result3.getValue()); - DataValue result4 = generator.getDataValue(); - assertEquals("1", result4.getValue()); - } + generator = new SequentialIntegerDataGenerator(columnA); + DataValue result1 = generator.getDataValue(); + assertEquals("1", result1.getValue()); + DataValue result2 = generator.getDataValue(); + assertEquals("2", result2.getValue()); + DataValue result3 = generator.getDataValue(); + assertEquals("3", result3.getValue()); + DataValue result4 = generator.getDataValue(); + assertEquals("1", result4.getValue()); + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialListDataGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialListDataGeneratorTest.java index a258b83a2ac..a0783252709 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialListDataGeneratorTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialListDataGeneratorTest.java @@ -1,19 +1,19 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.pherf.rules; @@ -31,55 +31,55 @@ import org.junit.Test; public class SequentialListDataGeneratorTest { - SequentialListDataGenerator generator; + SequentialListDataGenerator generator; - @Test(expected = IllegalArgumentException.class) - public void testRejectsNonSequential() { - Column columnA = new Column(); - columnA.setType(VARCHAR); - columnA.setDataSequence(DataSequence.RANDOM); + @Test(expected = IllegalArgumentException.class) + public void testRejectsNonSequential() { + Column columnA = new Column(); + columnA.setType(VARCHAR); + columnA.setDataSequence(DataSequence.RANDOM); - //should reject this Column - generator = new SequentialListDataGenerator(columnA); - } + // should reject this Column + generator = new SequentialListDataGenerator(columnA); + } - @Test(expected = IllegalArgumentException.class) - public void testRejectsNonVarchar() { - DateTimeFormatter FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); - LocalDateTime startDateTime = LocalDateTime.now(); - String formattedDateTime = startDateTime.format(FMT); - Column columnA = new Column(); - columnA.setType(DATE); - columnA.setDataSequence(DataSequence.SEQUENTIAL); - List values = new ArrayList<>(); - values.add(new DataValue(DATE, formattedDateTime)); - values.add(new DataValue(DATE, formattedDateTime)); - values.add(new DataValue(DATE, formattedDateTime)); - columnA.setDataValues(values); + @Test(expected = IllegalArgumentException.class) + public void testRejectsNonVarchar() { + DateTimeFormatter FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); + LocalDateTime startDateTime = LocalDateTime.now(); + String formattedDateTime = startDateTime.format(FMT); + Column columnA = new Column(); + columnA.setType(DATE); + columnA.setDataSequence(DataSequence.SEQUENTIAL); + List values = new ArrayList<>(); + values.add(new DataValue(DATE, formattedDateTime)); + values.add(new DataValue(DATE, formattedDateTime)); + values.add(new DataValue(DATE, formattedDateTime)); + columnA.setDataValues(values); - //should reject this Column - generator = new SequentialListDataGenerator(columnA); - } + // should reject this Column + generator = new SequentialListDataGenerator(columnA); + } - @Test - public void testGetDataValue() { - Column columnA = new Column(); - columnA.setType(VARCHAR); - columnA.setDataSequence(DataSequence.SEQUENTIAL); - List values = new ArrayList<>(); - values.add(new DataValue(VARCHAR, "A")); - values.add(new DataValue(VARCHAR, "B")); - values.add(new DataValue(VARCHAR, "C")); - columnA.setDataValues(values); + @Test + public void testGetDataValue() { + Column columnA = new Column(); + columnA.setType(VARCHAR); + columnA.setDataSequence(DataSequence.SEQUENTIAL); + List values = new ArrayList<>(); + values.add(new DataValue(VARCHAR, "A")); + values.add(new DataValue(VARCHAR, "B")); + values.add(new DataValue(VARCHAR, "C")); + columnA.setDataValues(values); - generator = new SequentialListDataGenerator(columnA); - DataValue result1 = generator.getDataValue(); - assertEquals("A", result1.getValue()); - DataValue result2 = generator.getDataValue(); - assertEquals("B", result2.getValue()); - DataValue result3 = generator.getDataValue(); - assertEquals("C", result3.getValue()); - DataValue result4 = generator.getDataValue(); - assertEquals("A", result4.getValue()); - } + generator = new SequentialListDataGenerator(columnA); + DataValue result1 = generator.getDataValue(); + assertEquals("A", result1.getValue()); + DataValue result2 = generator.getDataValue(); + assertEquals("B", result2.getValue()); + DataValue result3 = generator.getDataValue(); + assertEquals("C", result3.getValue()); + DataValue result4 = generator.getDataValue(); + assertEquals("A", result4.getValue()); + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialVarcharDataGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialVarcharDataGeneratorTest.java index 0157721cabf..2f680fe0db2 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialVarcharDataGeneratorTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/rules/SequentialVarcharDataGeneratorTest.java @@ -1,68 +1,68 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.phoenix.pherf.rules; -import org.apache.phoenix.pherf.configuration.Column; -import org.apache.phoenix.pherf.configuration.DataSequence; -import org.junit.Test; - import static org.apache.phoenix.pherf.configuration.DataTypeMapping.INTEGER; import static org.apache.phoenix.pherf.configuration.DataTypeMapping.VARCHAR; import static org.junit.Assert.assertEquals; +import org.apache.phoenix.pherf.configuration.Column; +import org.apache.phoenix.pherf.configuration.DataSequence; +import org.junit.Test; + public class SequentialVarcharDataGeneratorTest { - SequentialVarcharDataGenerator generator; + SequentialVarcharDataGenerator generator; - @Test(expected = IllegalArgumentException.class) - public void testRejectsNonSequential() { - Column columnA = new Column(); - columnA.setType(VARCHAR); - columnA.setDataSequence(DataSequence.RANDOM); + @Test(expected = IllegalArgumentException.class) + public void testRejectsNonSequential() { + Column columnA = new Column(); + columnA.setType(VARCHAR); + columnA.setDataSequence(DataSequence.RANDOM); - //should reject this Column - generator = new SequentialVarcharDataGenerator(columnA); - } + // should reject this Column + generator = new SequentialVarcharDataGenerator(columnA); + } - @Test(expected = IllegalArgumentException.class) - public void testRejectsNonVarchar() { - Column columnA = new Column(); - columnA.setType(INTEGER); - columnA.setDataSequence(DataSequence.SEQUENTIAL); + @Test(expected = IllegalArgumentException.class) + public void testRejectsNonVarchar() { + Column columnA = new Column(); + columnA.setType(INTEGER); + columnA.setDataSequence(DataSequence.SEQUENTIAL); - //should reject this Column - generator = new SequentialVarcharDataGenerator(columnA); - } + // should reject this Column + generator = new SequentialVarcharDataGenerator(columnA); + } - @Test - public void testGetDataValue() { - Column columnA = new Column(); - columnA.setType(VARCHAR); - columnA.setLength(15); - columnA.setDataSequence(DataSequence.SEQUENTIAL); + @Test + public void testGetDataValue() { + Column columnA = new Column(); + columnA.setType(VARCHAR); + columnA.setLength(15); + columnA.setDataSequence(DataSequence.SEQUENTIAL); - generator = new SequentialVarcharDataGenerator(columnA); - DataValue result1 = generator.getDataValue(); - assertEquals("xxxxxxxxxxxxxx0", result1.getValue()); - DataValue result2 = generator.getDataValue(); - assertEquals("xxxxxxxxxxxxxx1", result2.getValue()); - DataValue result3 = generator.getDataValue(); - assertEquals("xxxxxxxxxxxxxx2", result3.getValue()); - DataValue result4 = generator.getDataValue(); - assertEquals("xxxxxxxxxxxxxx3", result4.getValue()); - } + generator = new SequentialVarcharDataGenerator(columnA); + DataValue result1 = generator.getDataValue(); + assertEquals("xxxxxxxxxxxxxx0", result1.getValue()); + DataValue result2 = generator.getDataValue(); + assertEquals("xxxxxxxxxxxxxx1", result2.getValue()); + DataValue result3 = generator.getDataValue(); + assertEquals("xxxxxxxxxxxxxx2", result3.getValue()); + DataValue result4 = generator.getDataValue(); + assertEquals("xxxxxxxxxxxxxx3", result4.getValue()); + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/util/ResourceListTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/util/ResourceListTest.java index c77cb821758..cbd8910c42d 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/util/ResourceListTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/util/ResourceListTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,15 +17,15 @@ */ package org.apache.phoenix.pherf.util; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + import java.io.File; import java.util.Collections; import java.util.regex.Pattern; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - public class ResourceListTest { @Test @@ -32,7 +33,8 @@ public void testMissingJarFileReturnsGracefully() { ResourceList rl = new ResourceList("foo"); File missingFile = new File("abracadabraphoenix.txt"); assertFalse("Did not expect a fake test file to actually exist", missingFile.exists()); - assertEquals(Collections.emptyList(), rl.getResourcesFromJarFile(missingFile, Pattern.compile("pattern"))); + assertEquals(Collections.emptyList(), + rl.getResourcesFromJarFile(missingFile, Pattern.compile("pattern"))); } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java index 66f65b370c6..0b1f1446bf9 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/MultiThreadedRunnerTest.java @@ -1,23 +1,28 @@ /* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.phoenix.pherf.workload; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.sql.ResultSet; + import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.pherf.configuration.Query; import org.apache.phoenix.pherf.configuration.Scenario; @@ -33,96 +38,81 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import java.sql.ResultSet; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - public class MultiThreadedRunnerTest { - @Mock - private static XMLConfigParser mockParser; - @Mock - private static DataModelResult mockDMR; - @Mock - private static RulesApplier mockRA; - @Mock - private static ThreadTime mockTT; - @Mock - private static Scenario mockScenario; - @Mock - private static WorkloadExecutor mockWE; - @Mock - private static Query mockQuery; - @Mock - private static ResultSet mockRS; - - @Before - public void init() { - MockitoAnnotations.initMocks(this); - } + @Mock + private static XMLConfigParser mockParser; + @Mock + private static DataModelResult mockDMR; + @Mock + private static RulesApplier mockRA; + @Mock + private static ThreadTime mockTT; + @Mock + private static Scenario mockScenario; + @Mock + private static WorkloadExecutor mockWE; + @Mock + private static Query mockQuery; + @Mock + private static ResultSet mockRS; - @Test - public void testExpectedRowsMismatch() throws Exception { - Mockito.when(mockQuery.getExpectedAggregateRowCount()).thenReturn(1L); - MultiThreadedRunner mtr = new MultiThreadedRunner("test", - mockQuery, mockDMR, mockTT, - 10L, 1000L, - true, mockRA, - mockScenario, mockWE, mockParser); - Mockito.when(mockRS.next()).thenReturn(true); - Mockito.when(mockRS.getLong(1)).thenReturn(2L); - try { - mtr.getResults(mockRS, "test_iteration", false,0L); - fail(); - } catch (RuntimeException e) { - //pass; - } + @Before + public void init() { + MockitoAnnotations.initMocks(this); + } + @Test + public void testExpectedRowsMismatch() throws Exception { + Mockito.when(mockQuery.getExpectedAggregateRowCount()).thenReturn(1L); + MultiThreadedRunner mtr = new MultiThreadedRunner("test", mockQuery, mockDMR, mockTT, 10L, + 1000L, true, mockRA, mockScenario, mockWE, mockParser); + Mockito.when(mockRS.next()).thenReturn(true); + Mockito.when(mockRS.getLong(1)).thenReturn(2L); + try { + mtr.getResults(mockRS, "test_iteration", false, 0L); + fail(); + } catch (RuntimeException e) { + // pass; } - @Test - public void testTimeout() throws Exception { - Mockito.when(mockQuery.getTimeoutDuration()).thenReturn(1000L); - Mockito.when(mockQuery.getExpectedAggregateRowCount()).thenReturn(1L); - MultiThreadedRunner mtr = new MultiThreadedRunner("test", - mockQuery, mockDMR, mockTT, - 10L, 1000L, - true, mockRA, - mockScenario, mockWE, mockParser); - DefaultEnvironmentEdge myClock = Mockito.mock(DefaultEnvironmentEdge.class); - Mockito.when(myClock.currentTime()).thenReturn(0L, 5000L); - EnvironmentEdgeManager.injectEdge(myClock); - try { - Mockito.when(mockRS.next()).thenReturn(true); - Mockito.when(mockRS.getLong(1)).thenReturn(1L); - Pair results = mtr.getResults(mockRS, "test_iteration", false, 0L); - assertTrue(results.getSecond() > mockQuery.getTimeoutDuration()); - } finally { - EnvironmentEdgeManager.reset(); - } + } + + @Test + public void testTimeout() throws Exception { + Mockito.when(mockQuery.getTimeoutDuration()).thenReturn(1000L); + Mockito.when(mockQuery.getExpectedAggregateRowCount()).thenReturn(1L); + MultiThreadedRunner mtr = new MultiThreadedRunner("test", mockQuery, mockDMR, mockTT, 10L, + 1000L, true, mockRA, mockScenario, mockWE, mockParser); + DefaultEnvironmentEdge myClock = Mockito.mock(DefaultEnvironmentEdge.class); + Mockito.when(myClock.currentTime()).thenReturn(0L, 5000L); + EnvironmentEdgeManager.injectEdge(myClock); + try { + Mockito.when(mockRS.next()).thenReturn(true); + Mockito.when(mockRS.getLong(1)).thenReturn(1L); + Pair results = mtr.getResults(mockRS, "test_iteration", false, 0L); + assertTrue(results.getSecond() > mockQuery.getTimeoutDuration()); + } finally { + EnvironmentEdgeManager.reset(); } + } - @Test - public void testFinishWithoutTimeout() throws Exception { - DefaultEnvironmentEdge myClock = Mockito.mock(DefaultEnvironmentEdge.class); - Mockito.when(myClock.currentTime()).thenReturn(0L); - EnvironmentEdgeManager.injectEdge(myClock); - try { - Mockito.when(mockQuery.getTimeoutDuration()).thenReturn(1000L); - Mockito.when(mockQuery.getExpectedAggregateRowCount()).thenReturn(1L); - MultiThreadedRunner mtr = new MultiThreadedRunner("test", - mockQuery, mockDMR, mockTT, - 10L, 1000L, - true, mockRA, - mockScenario, mockWE, mockParser); - Mockito.when(mockRS.next()).thenReturn(true, false); - Mockito.when(mockRS.getLong(1)).thenReturn(1L); - Pair results = mtr.getResults(mockRS, "test_iteration", false, 0L); - assertFalse(results.getSecond() > mockQuery.getTimeoutDuration()); - } finally { - EnvironmentEdgeManager.reset(); - } + @Test + public void testFinishWithoutTimeout() throws Exception { + DefaultEnvironmentEdge myClock = Mockito.mock(DefaultEnvironmentEdge.class); + Mockito.when(myClock.currentTime()).thenReturn(0L); + EnvironmentEdgeManager.injectEdge(myClock); + try { + Mockito.when(mockQuery.getTimeoutDuration()).thenReturn(1000L); + Mockito.when(mockQuery.getExpectedAggregateRowCount()).thenReturn(1L); + MultiThreadedRunner mtr = new MultiThreadedRunner("test", mockQuery, mockDMR, mockTT, 10L, + 1000L, true, mockRA, mockScenario, mockWE, mockParser); + Mockito.when(mockRS.next()).thenReturn(true, false); + Mockito.when(mockRS.getLong(1)).thenReturn(1L); + Pair results = mtr.getResults(mockRS, "test_iteration", false, 0L); + assertFalse(results.getSecond() > mockQuery.getTimeoutDuration()); + } finally { + EnvironmentEdgeManager.reset(); } + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/SequentialLoadEventGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/SequentialLoadEventGeneratorTest.java index b8c4368861a..66023df9cba 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/SequentialLoadEventGeneratorTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/SequentialLoadEventGeneratorTest.java @@ -15,9 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.phoenix.pherf.workload.mt; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; -package org.apache.phoenix.pherf.workload.mt; +import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Properties; import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.XMLConfigParserTest; @@ -32,115 +39,117 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.net.URL; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - /** - * Tests the various sequential event generation outcomes based on scenario, model - * execution type and iterations + * Tests the various sequential event generation outcomes based on scenario, model execution type + * and iterations */ public class SequentialLoadEventGeneratorTest { - private static final Logger LOGGER = LoggerFactory.getLogger( - SequentialLoadEventGeneratorTest.class); - - private enum TestOperationGroup { - upsertOp, queryOp1, queryOp2, queryOp3, queryOp4, queryOp5, queryOp6, queryOp7, idleOp, udfOp - } - - private enum TestTenantGroup { - tg1 - } - - public DataModel readTestDataModel(String resourceName) throws Exception { - URL scenarioUrl = XMLConfigParserTest.class.getResource(resourceName); - assertNotNull(scenarioUrl); - Path p = Paths.get(scenarioUrl.toURI()); - return XMLConfigParser.readDataModel(p); - } - - @Test - public void testParallelExecutionWithOneHandler() throws Exception { - sequentialEventGeneration(1, true); - } - - @Test - public void testParallelExecutionWithManyHandler() throws Exception { - sequentialEventGeneration(5, true); - } - - @Test - public void testSerialExecutionWithOneHandler() throws Exception { - sequentialEventGeneration(1, false); - } - - @Test - public void testSerialExecutionWithManyHandler() throws Exception { - sequentialEventGeneration(5, false); - } + private static final Logger LOGGER = + LoggerFactory.getLogger(SequentialLoadEventGeneratorTest.class); + + private enum TestOperationGroup { + upsertOp, + queryOp1, + queryOp2, + queryOp3, + queryOp4, + queryOp5, + queryOp6, + queryOp7, + idleOp, + udfOp + } + + private enum TestTenantGroup { + tg1 + } + + public DataModel readTestDataModel(String resourceName) throws Exception { + URL scenarioUrl = XMLConfigParserTest.class.getResource(resourceName); + assertNotNull(scenarioUrl); + Path p = Paths.get(scenarioUrl.toURI()); + return XMLConfigParser.readDataModel(p); + } + + @Test + public void testParallelExecutionWithOneHandler() throws Exception { + sequentialEventGeneration(1, true); + } + + @Test + public void testParallelExecutionWithManyHandler() throws Exception { + sequentialEventGeneration(5, true); + } + + @Test + public void testSerialExecutionWithOneHandler() throws Exception { + sequentialEventGeneration(1, false); + } + + @Test + public void testSerialExecutionWithManyHandler() throws Exception { + sequentialEventGeneration(5, false); + } + + public void sequentialEventGeneration(int numIterations, boolean parallel) throws Exception { + int numTenantGroups = 1; + int numOpGroups = 10; + double variancePercent = 0.00f; // 0 percent + + PhoenixUtil pUtil = PhoenixUtil.create(); + Properties properties = + PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false); + properties.setProperty(PherfConstants.NUM_SEQUENTIAL_ITERATIONS_PROP_KEY, + String.valueOf(numIterations)); + properties.setProperty(PherfConstants.NUM_SEQUENTIAL_EXECUTION_TYPE_PROP_KEY, + parallel ? "PARALLEL" : "SERIAL"); + + DataModel model = readTestDataModel("/scenario/test_evt_gen4.xml"); + for (Scenario scenario : model.getScenarios()) { + LOGGER.debug(String.format("Testing %s", scenario.getName())); + LoadProfile loadProfile = scenario.getLoadProfile(); + assertEquals("tenant group size is not as expected: ", numTenantGroups, + loadProfile.getTenantDistribution().size()); + assertEquals("operation group size is not as expected: ", numOpGroups, + loadProfile.getOpDistribution().size()); + // Calculate the expected distribution. + double[][] expectedDistribution = new double[numOpGroups][numTenantGroups]; + for (int r = 0; r < numOpGroups; r++) { + for (int c = 0; c < numTenantGroups; c++) { + expectedDistribution[r][c] = numIterations; + LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c])); + } + } + + SequentialLoadEventGenerator evtGen = + new SequentialLoadEventGenerator(pUtil, model, scenario, properties); + + // Calculate the actual distribution. + double[][] distribution = new double[numOpGroups][numTenantGroups]; + for (int i = 0; i < numIterations; i++) { + for (int r = 0; r < numOpGroups; r++) { + TenantOperationInfo info = evtGen.next(); + int row = TestOperationGroup.valueOf(info.getOperationGroupId()).ordinal(); + int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal(); + distribution[row][col]++; + } + } + + // Validate that the expected and actual distribution + // is within the margin of allowed variance. + for (int r = 0; r < numOpGroups; r++) { + for (int c = 0; c < numTenantGroups; c++) { + double allowedVariance = expectedDistribution[r][c] * variancePercent; + double diff = Math.abs(expectedDistribution[r][c] - distribution[r][c]); + boolean isAllowed = diff == allowedVariance; + LOGGER.debug(String.format("Actual[%d,%d] = %f, %f, %f", r, c, distribution[r][c], diff, + allowedVariance)); + assertTrue(String.format( + "Difference is outside the allowed variance " + "[expected = %f, actual = %f]", + allowedVariance, diff), isAllowed); - public void sequentialEventGeneration(int numIterations, boolean parallel) throws Exception { - int numTenantGroups = 1; - int numOpGroups = 10; - double variancePercent = 0.00f; // 0 percent - - PhoenixUtil pUtil = PhoenixUtil.create(); - Properties properties = PherfConstants - .create().getProperties(PherfConstants.PHERF_PROPERTIES, false); - properties.setProperty(PherfConstants.NUM_SEQUENTIAL_ITERATIONS_PROP_KEY, String.valueOf(numIterations)); - properties.setProperty(PherfConstants.NUM_SEQUENTIAL_EXECUTION_TYPE_PROP_KEY, - parallel ? "PARALLEL" : "SERIAL"); - - DataModel model = readTestDataModel("/scenario/test_evt_gen4.xml"); - for (Scenario scenario : model.getScenarios()) { - LOGGER.debug(String.format("Testing %s", scenario.getName())); - LoadProfile loadProfile = scenario.getLoadProfile(); - assertEquals("tenant group size is not as expected: ", - numTenantGroups, loadProfile.getTenantDistribution().size()); - assertEquals("operation group size is not as expected: ", - numOpGroups, loadProfile.getOpDistribution().size()); - // Calculate the expected distribution. - double[][] expectedDistribution = new double[numOpGroups][numTenantGroups]; - for (int r = 0; r < numOpGroups; r++) { - for (int c = 0; c < numTenantGroups; c++) { - expectedDistribution[r][c] = numIterations; - LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c])); - } - } - - SequentialLoadEventGenerator evtGen = new SequentialLoadEventGenerator( - pUtil, model, scenario, properties); - - // Calculate the actual distribution. - double[][] distribution = new double[numOpGroups][numTenantGroups]; - for (int i = 0; i < numIterations; i++) { - for (int r = 0; r < numOpGroups; r++) { - TenantOperationInfo info = evtGen.next(); - int row = TestOperationGroup.valueOf(info.getOperationGroupId()).ordinal(); - int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal(); - distribution[row][col]++; - } - } - - // Validate that the expected and actual distribution - // is within the margin of allowed variance. - for (int r = 0; r < numOpGroups; r++) { - for (int c = 0; c < numTenantGroups; c++) { - double allowedVariance = expectedDistribution[r][c] * variancePercent; - double diff = Math.abs(expectedDistribution[r][c] - distribution[r][c]); - boolean isAllowed = diff == allowedVariance; - LOGGER.debug(String.format("Actual[%d,%d] = %f, %f, %f", - r, c, distribution[r][c], diff, allowedVariance)); - assertTrue(String.format("Difference is outside the allowed variance " - + "[expected = %f, actual = %f]", allowedVariance, diff), isAllowed); - - } - } } + } } + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/TenantOperationFactoryTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/TenantOperationFactoryTest.java index 5dd39357354..28f4c35d4b5 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/TenantOperationFactoryTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/TenantOperationFactoryTest.java @@ -15,9 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.phoenix.pherf.workload.mt; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Properties; + import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.XMLConfigParserTest; import org.apache.phoenix.pherf.configuration.DataModel; @@ -25,7 +33,6 @@ import org.apache.phoenix.pherf.configuration.Scenario; import org.apache.phoenix.pherf.configuration.XMLConfigParser; import org.apache.phoenix.pherf.util.PhoenixUtil; - import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo; import org.apache.phoenix.pherf.workload.mt.generators.WeightedRandomLoadEventGenerator; import org.apache.phoenix.pherf.workload.mt.operations.IdleTimeOperationSupplier; @@ -38,91 +45,88 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.net.URL; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - - /** * Tests the various operation supplier outcomes based on scenario, model and load profile. */ public class TenantOperationFactoryTest { - private static final Logger LOGGER = LoggerFactory.getLogger(TenantOperationFactoryTest.class); - - private enum TestOperationGroup { - upsertOp, queryOp1, queryOp2, idleOp, udfOp - } - - private enum TestTenantGroup { - tg1, tg2, tg3 - } - - public DataModel readTestDataModel(String resourceName) throws Exception { - URL scenarioUrl = XMLConfigParserTest.class.getResource(resourceName); - assertNotNull(scenarioUrl); - Path p = Paths.get(scenarioUrl.toURI()); - return XMLConfigParser.readDataModel(p); - } - - @Test public void testVariousOperations() throws Exception { - int numTenantGroups = 3; - int numOpGroups = 5; - int numRuns = 10; - int numOperations = 10; - - PhoenixUtil pUtil = PhoenixUtil.create(); - Properties properties = PherfConstants - .create().getProperties(PherfConstants.PHERF_PROPERTIES, false); - - DataModel model = readTestDataModel("/scenario/test_evt_gen1.xml"); - for (Scenario scenario : model.getScenarios()) { - LOGGER.debug(String.format("Testing %s", scenario.getName())); - LoadProfile loadProfile = scenario.getLoadProfile(); - assertEquals("tenant group size is not as expected: ", - numTenantGroups, loadProfile.getTenantDistribution().size()); - assertEquals("operation group size is not as expected: ", - numOpGroups, loadProfile.getOpDistribution().size()); - - WeightedRandomLoadEventGenerator evtGen = new WeightedRandomLoadEventGenerator( - pUtil, model, scenario, properties); - TenantOperationFactory opFactory = evtGen.getOperationFactory(); - assertEquals("operation group size from the factory is not as expected: ", - numOpGroups, opFactory.getOperations().size()); - - for (int i = 0; i < numRuns; i++) { - int ops = numOperations; - loadProfile.setNumOperations(ops); - while (ops-- > 0) { - TenantOperationInfo info = evtGen.next(); - switch (TestOperationGroup.valueOf(info.getOperationGroupId())) { - case upsertOp: - assertTrue(opFactory.getOperationSupplier(info).getClass() - .isAssignableFrom(UpsertOperationSupplier.class)); - break; - case queryOp1: - case queryOp2: - assertTrue(opFactory.getOperationSupplier(info).getClass() - .isAssignableFrom(QueryOperationSupplier.class)); - break; - case idleOp: - assertTrue(opFactory.getOperationSupplier(info).getClass() - .isAssignableFrom(IdleTimeOperationSupplier.class)); - break; - case udfOp: - assertTrue(opFactory.getOperationSupplier(info).getClass() - .isAssignableFrom(UserDefinedOperationSupplier.class)); - break; - default: - Assert.fail(); - - } - } - } + private static final Logger LOGGER = LoggerFactory.getLogger(TenantOperationFactoryTest.class); + + private enum TestOperationGroup { + upsertOp, + queryOp1, + queryOp2, + idleOp, + udfOp + } + + private enum TestTenantGroup { + tg1, + tg2, + tg3 + } + + public DataModel readTestDataModel(String resourceName) throws Exception { + URL scenarioUrl = XMLConfigParserTest.class.getResource(resourceName); + assertNotNull(scenarioUrl); + Path p = Paths.get(scenarioUrl.toURI()); + return XMLConfigParser.readDataModel(p); + } + + @Test + public void testVariousOperations() throws Exception { + int numTenantGroups = 3; + int numOpGroups = 5; + int numRuns = 10; + int numOperations = 10; + + PhoenixUtil pUtil = PhoenixUtil.create(); + Properties properties = + PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false); + + DataModel model = readTestDataModel("/scenario/test_evt_gen1.xml"); + for (Scenario scenario : model.getScenarios()) { + LOGGER.debug(String.format("Testing %s", scenario.getName())); + LoadProfile loadProfile = scenario.getLoadProfile(); + assertEquals("tenant group size is not as expected: ", numTenantGroups, + loadProfile.getTenantDistribution().size()); + assertEquals("operation group size is not as expected: ", numOpGroups, + loadProfile.getOpDistribution().size()); + + WeightedRandomLoadEventGenerator evtGen = + new WeightedRandomLoadEventGenerator(pUtil, model, scenario, properties); + TenantOperationFactory opFactory = evtGen.getOperationFactory(); + assertEquals("operation group size from the factory is not as expected: ", numOpGroups, + opFactory.getOperations().size()); + + for (int i = 0; i < numRuns; i++) { + int ops = numOperations; + loadProfile.setNumOperations(ops); + while (ops-- > 0) { + TenantOperationInfo info = evtGen.next(); + switch (TestOperationGroup.valueOf(info.getOperationGroupId())) { + case upsertOp: + assertTrue(opFactory.getOperationSupplier(info).getClass() + .isAssignableFrom(UpsertOperationSupplier.class)); + break; + case queryOp1: + case queryOp2: + assertTrue(opFactory.getOperationSupplier(info).getClass() + .isAssignableFrom(QueryOperationSupplier.class)); + break; + case idleOp: + assertTrue(opFactory.getOperationSupplier(info).getClass() + .isAssignableFrom(IdleTimeOperationSupplier.class)); + break; + case udfOp: + assertTrue(opFactory.getOperationSupplier(info).getClass() + .isAssignableFrom(UserDefinedOperationSupplier.class)); + break; + default: + Assert.fail(); + + } } + } } + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/UniformDistributionLoadEventGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/UniformDistributionLoadEventGeneratorTest.java index f3c048a04e8..d4cd8447723 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/UniformDistributionLoadEventGeneratorTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/UniformDistributionLoadEventGeneratorTest.java @@ -15,9 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.phoenix.pherf.workload.mt; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; -package org.apache.phoenix.pherf.workload.mt; +import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Properties; import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.XMLConfigParserTest; @@ -32,105 +39,103 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.net.URL; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - /** * Tests the various event generation outcomes based on scenario, model and load profile. */ public class UniformDistributionLoadEventGeneratorTest { - private static final Logger LOGGER = LoggerFactory.getLogger( - UniformDistributionLoadEventGeneratorTest.class); - - private enum TestOperationGroup { - upsertOp, queryOp1, queryOp2, queryOp3, queryOp4, queryOp5, queryOp6, queryOp7, idleOp, udfOp - } - - private enum TestTenantGroup { - tg1 - } - - public DataModel readTestDataModel(String resourceName) throws Exception { - URL scenarioUrl = XMLConfigParserTest.class.getResource(resourceName); - assertNotNull(scenarioUrl); - Path p = Paths.get(scenarioUrl.toURI()); - return XMLConfigParser.readDataModel(p); - } - - /** - * Case : where no operations and tenant groups have zero weight - * - * @throws Exception - */ - @Test - public void testVariousEventGeneration() throws Exception { - int numRuns = 100; - int numOperations = 1000; - double normalizedOperations = (double) (numOperations * numRuns) / 10000.0f; - int numTenantGroups = 1; - int numOpGroups = 10; - double variancePercent = 0.05f; // 5 percent - - PhoenixUtil pUtil = PhoenixUtil.create(); - Properties properties = PherfConstants - .create().getProperties(PherfConstants.PHERF_PROPERTIES, false); - - DataModel model = readTestDataModel("/scenario/test_evt_gen3.xml"); - for (Scenario scenario : model.getScenarios()) { - LOGGER.debug(String.format("Testing %s", scenario.getName())); - LoadProfile loadProfile = scenario.getLoadProfile(); - assertEquals("tenant group size is not as expected: ", - numTenantGroups, loadProfile.getTenantDistribution().size()); - assertEquals("operation group size is not as expected: ", - numOpGroups, loadProfile.getOpDistribution().size()); - // Calculate the expected distribution. - double[][] expectedDistribution = new double[numOpGroups][numTenantGroups]; - int tenantWeight = 100; - int opWeight = 10; - for (int r = 0; r < numOpGroups; r++) { - for (int c = 0; c < numTenantGroups; c++) { - expectedDistribution[r][c] = normalizedOperations * (tenantWeight * opWeight); - LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c])); - } - } - - UniformDistributionLoadEventGenerator - evtGen = new UniformDistributionLoadEventGenerator( - pUtil, model, scenario, properties); - - // Calculate the actual distribution. - double[][] distribution = new double[numOpGroups][numTenantGroups]; - for (int i = 0; i < numRuns; i++) { - int ops = numOperations; - loadProfile.setNumOperations(ops); - while (ops-- > 0) { - TenantOperationInfo info = evtGen.next(); - int row = TestOperationGroup.valueOf(info.getOperationGroupId()).ordinal(); - int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal(); - distribution[row][col]++; - } - } - - // Validate that the expected and actual distribution - // is within the margin of allowed variance. - for (int r = 0; r < numOpGroups; r++) { - for (int c = 0; c < numTenantGroups; c++) { - double allowedVariance = expectedDistribution[r][c] * variancePercent; - double diff = Math.abs(expectedDistribution[r][c] - distribution[r][c]); - boolean isAllowed = diff < allowedVariance; - LOGGER.debug(String.format("Actual[%d,%d] = %f, %f, %f", - r, c, distribution[r][c], diff, allowedVariance)); - assertTrue(String.format("Difference is outside the allowed variance " - + "[expected = %f, actual = %f]", allowedVariance, diff), isAllowed); + private static final Logger LOGGER = + LoggerFactory.getLogger(UniformDistributionLoadEventGeneratorTest.class); + + private enum TestOperationGroup { + upsertOp, + queryOp1, + queryOp2, + queryOp3, + queryOp4, + queryOp5, + queryOp6, + queryOp7, + idleOp, + udfOp + } + + private enum TestTenantGroup { + tg1 + } + + public DataModel readTestDataModel(String resourceName) throws Exception { + URL scenarioUrl = XMLConfigParserTest.class.getResource(resourceName); + assertNotNull(scenarioUrl); + Path p = Paths.get(scenarioUrl.toURI()); + return XMLConfigParser.readDataModel(p); + } + + /** + * Case : where no operations and tenant groups have zero weight + */ + @Test + public void testVariousEventGeneration() throws Exception { + int numRuns = 100; + int numOperations = 1000; + double normalizedOperations = (double) (numOperations * numRuns) / 10000.0f; + int numTenantGroups = 1; + int numOpGroups = 10; + double variancePercent = 0.05f; // 5 percent + + PhoenixUtil pUtil = PhoenixUtil.create(); + Properties properties = + PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false); + + DataModel model = readTestDataModel("/scenario/test_evt_gen3.xml"); + for (Scenario scenario : model.getScenarios()) { + LOGGER.debug(String.format("Testing %s", scenario.getName())); + LoadProfile loadProfile = scenario.getLoadProfile(); + assertEquals("tenant group size is not as expected: ", numTenantGroups, + loadProfile.getTenantDistribution().size()); + assertEquals("operation group size is not as expected: ", numOpGroups, + loadProfile.getOpDistribution().size()); + // Calculate the expected distribution. + double[][] expectedDistribution = new double[numOpGroups][numTenantGroups]; + int tenantWeight = 100; + int opWeight = 10; + for (int r = 0; r < numOpGroups; r++) { + for (int c = 0; c < numTenantGroups; c++) { + expectedDistribution[r][c] = normalizedOperations * (tenantWeight * opWeight); + LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c])); + } + } + + UniformDistributionLoadEventGenerator evtGen = + new UniformDistributionLoadEventGenerator(pUtil, model, scenario, properties); + + // Calculate the actual distribution. + double[][] distribution = new double[numOpGroups][numTenantGroups]; + for (int i = 0; i < numRuns; i++) { + int ops = numOperations; + loadProfile.setNumOperations(ops); + while (ops-- > 0) { + TenantOperationInfo info = evtGen.next(); + int row = TestOperationGroup.valueOf(info.getOperationGroupId()).ordinal(); + int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal(); + distribution[row][col]++; + } + } + + // Validate that the expected and actual distribution + // is within the margin of allowed variance. + for (int r = 0; r < numOpGroups; r++) { + for (int c = 0; c < numTenantGroups; c++) { + double allowedVariance = expectedDistribution[r][c] * variancePercent; + double diff = Math.abs(expectedDistribution[r][c] - distribution[r][c]); + boolean isAllowed = diff < allowedVariance; + LOGGER.debug(String.format("Actual[%d,%d] = %f, %f, %f", r, c, distribution[r][c], diff, + allowedVariance)); + assertTrue(String.format( + "Difference is outside the allowed variance " + "[expected = %f, actual = %f]", + allowedVariance, diff), isAllowed); - } - } } + } } + } } diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/WeightedRandomLoadEventGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/WeightedRandomLoadEventGeneratorTest.java index ddc01b3b32f..405cd7e5a47 100644 --- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/WeightedRandomLoadEventGeneratorTest.java +++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/WeightedRandomLoadEventGeneratorTest.java @@ -15,9 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.phoenix.pherf.workload.mt; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; -package org.apache.phoenix.pherf.workload.mt; +import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Properties; import org.apache.phoenix.pherf.PherfConstants; import org.apache.phoenix.pherf.XMLConfigParserTest; @@ -32,188 +39,196 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.net.URL; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - /** * Tests the various event generation outcomes based on scenario, model and load profile. */ public class WeightedRandomLoadEventGeneratorTest { - private static final Logger LOGGER = LoggerFactory.getLogger( - WeightedRandomLoadEventGeneratorTest.class); - private enum TestOperationGroup { - upsertOp, queryOp1, queryOp2, idleOp, udfOp - } - - private enum TestOperationGroup2 { - upsertOp, queryOp1, queryOp2, queryOp3, queryOp4, queryOp5, queryOp6, queryOp7, queryOp8, idleOp, udfOp - } - - private enum TestTenantGroup { - tg1, tg2, tg3 - } - - public DataModel readTestDataModel(String resourceName) throws Exception { - URL scenarioUrl = XMLConfigParserTest.class.getResource(resourceName); - assertNotNull(scenarioUrl); - Path p = Paths.get(scenarioUrl.toURI()); - return XMLConfigParser.readDataModel(p); - } - - /** - * Case : where no operations and tenant groups have zero weight - * @throws Exception - */ - @Test - public void testVariousEventGeneration() throws Exception { - int numRuns = 10; - int numOperations = 100000; - double normalizedOperations = (double) (numOperations * numRuns) / 10000.0f; - int numTenantGroups = 3; - int numOpGroups = 5; - - PhoenixUtil pUtil = PhoenixUtil.create(); - Properties properties = PherfConstants - .create().getProperties(PherfConstants.PHERF_PROPERTIES, false); - - DataModel model = readTestDataModel("/scenario/test_evt_gen1.xml"); - for (Scenario scenario : model.getScenarios()) { - LOGGER.debug(String.format("Testing %s", scenario.getName())); - LoadProfile loadProfile = scenario.getLoadProfile(); - assertEquals("tenant group size is not as expected: ", - numTenantGroups, loadProfile.getTenantDistribution().size()); - assertEquals("operation group size is not as expected: ", - numOpGroups, loadProfile.getOpDistribution().size()); - // Calculate the expected distribution. - double[][] expectedDistribution = new double[numOpGroups][numTenantGroups]; - for (int r = 0; r < numOpGroups; r++) { - for (int c = 0; c < numTenantGroups; c++) { - int tenantWeight = loadProfile.getTenantDistribution().get(c).getWeight(); - int opWeight = loadProfile.getOpDistribution().get(r).getWeight(); - expectedDistribution[r][c] = normalizedOperations * (tenantWeight * opWeight); - LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c])); - } - } - - WeightedRandomLoadEventGenerator evtGen = new WeightedRandomLoadEventGenerator( - pUtil, model, scenario, properties); - - // Calculate the actual distribution. - double[][] distribution = new double[numOpGroups][numTenantGroups]; - for (int i = 0; i < numRuns; i++) { - int ops = numOperations; - loadProfile.setNumOperations(ops); - while (ops-- > 0) { - TenantOperationInfo info = evtGen.next(); - int row = TestOperationGroup.valueOf(info.getOperationGroupId()).ordinal(); - int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal(); - distribution[row][col]++; - } - } - validateResults(numOpGroups, numTenantGroups, expectedDistribution, distribution); + private static final Logger LOGGER = + LoggerFactory.getLogger(WeightedRandomLoadEventGeneratorTest.class); + + private enum TestOperationGroup { + upsertOp, + queryOp1, + queryOp2, + idleOp, + udfOp + } + + private enum TestOperationGroup2 { + upsertOp, + queryOp1, + queryOp2, + queryOp3, + queryOp4, + queryOp5, + queryOp6, + queryOp7, + queryOp8, + idleOp, + udfOp + } + + private enum TestTenantGroup { + tg1, + tg2, + tg3 + } + + public DataModel readTestDataModel(String resourceName) throws Exception { + URL scenarioUrl = XMLConfigParserTest.class.getResource(resourceName); + assertNotNull(scenarioUrl); + Path p = Paths.get(scenarioUrl.toURI()); + return XMLConfigParser.readDataModel(p); + } + + /** + * Case : where no operations and tenant groups have zero weight + */ + @Test + public void testVariousEventGeneration() throws Exception { + int numRuns = 10; + int numOperations = 100000; + double normalizedOperations = (double) (numOperations * numRuns) / 10000.0f; + int numTenantGroups = 3; + int numOpGroups = 5; + + PhoenixUtil pUtil = PhoenixUtil.create(); + Properties properties = + PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false); + + DataModel model = readTestDataModel("/scenario/test_evt_gen1.xml"); + for (Scenario scenario : model.getScenarios()) { + LOGGER.debug(String.format("Testing %s", scenario.getName())); + LoadProfile loadProfile = scenario.getLoadProfile(); + assertEquals("tenant group size is not as expected: ", numTenantGroups, + loadProfile.getTenantDistribution().size()); + assertEquals("operation group size is not as expected: ", numOpGroups, + loadProfile.getOpDistribution().size()); + // Calculate the expected distribution. + double[][] expectedDistribution = new double[numOpGroups][numTenantGroups]; + for (int r = 0; r < numOpGroups; r++) { + for (int c = 0; c < numTenantGroups; c++) { + int tenantWeight = loadProfile.getTenantDistribution().get(c).getWeight(); + int opWeight = loadProfile.getOpDistribution().get(r).getWeight(); + expectedDistribution[r][c] = normalizedOperations * (tenantWeight * opWeight); + LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c])); } - } - - /** - * Case : where some operations have zero weight - */ - @Test - public void testAutoAssignedPMFs() throws Exception { - int numRuns = 50; - int numOperations = 100000; - double normalizedOperations = (double) (numOperations * numRuns) / 10000.0f; - int numTenantGroups = 3; - int numOpGroups = 11; - - PhoenixUtil pUtil = PhoenixUtil.create(); - Properties properties = PherfConstants - .create().getProperties(PherfConstants.PHERF_PROPERTIES, false); - - DataModel model = readTestDataModel("/scenario/test_evt_gen2.xml"); - for (Scenario scenario : model.getScenarios()) { - LOGGER.debug(String.format("Testing %s", scenario.getName())); - LoadProfile loadProfile = scenario.getLoadProfile(); - assertEquals("tenant group size is not as expected: ", - numTenantGroups, loadProfile.getTenantDistribution().size()); - assertEquals("operation group size is not as expected: ", - numOpGroups, loadProfile.getOpDistribution().size()); - - float totalOperationWeight = 0.0f; - float autoAssignedOperationWeight = 0.0f; - float remainingOperationWeight = 0.0f; - int numAutoWeightedOperations = 0; - for (int r = 0; r < numOpGroups; r++) { - int opWeight = loadProfile.getOpDistribution().get(r).getWeight(); - if (opWeight > 0.0f) { - totalOperationWeight += opWeight; - } else { - numAutoWeightedOperations++; - } - } - remainingOperationWeight = 100.0f - totalOperationWeight; - if (numAutoWeightedOperations > 0) { - autoAssignedOperationWeight = remainingOperationWeight/((float) numAutoWeightedOperations); - } - LOGGER.debug(String.format("Auto [%d,%f] = %f", numAutoWeightedOperations, - remainingOperationWeight, autoAssignedOperationWeight )); - - // Calculate the expected distribution. - double[][] expectedDistribution = new double[numOpGroups][numTenantGroups]; - for (int r = 0; r < numOpGroups; r++) { - for (int c = 0; c < numTenantGroups; c++) { - float tenantWeight = loadProfile.getTenantDistribution().get(c).getWeight(); - float opWeight = loadProfile.getOpDistribution().get(r).getWeight(); - if (opWeight <= 0.0f) { - opWeight = autoAssignedOperationWeight; - } - expectedDistribution[r][c] = Math.round(normalizedOperations * (tenantWeight * opWeight)); - LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c])); - } - } - - WeightedRandomLoadEventGenerator evtGen = new WeightedRandomLoadEventGenerator( - pUtil, model, scenario, properties); - - // Calculate the actual distribution. - double[][] distribution = new double[numOpGroups][numTenantGroups]; - for (int i = 0; i < numRuns; i++) { - int ops = numOperations; - loadProfile.setNumOperations(ops); - while (ops-- > 0) { - TenantOperationInfo info = evtGen.next(); - int row = TestOperationGroup2.valueOf(info.getOperationGroupId()).ordinal(); - int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal(); - distribution[row][col]++; - } - } - validateResults(numOpGroups, numTenantGroups, expectedDistribution, distribution); + } + + WeightedRandomLoadEventGenerator evtGen = + new WeightedRandomLoadEventGenerator(pUtil, model, scenario, properties); + + // Calculate the actual distribution. + double[][] distribution = new double[numOpGroups][numTenantGroups]; + for (int i = 0; i < numRuns; i++) { + int ops = numOperations; + loadProfile.setNumOperations(ops); + while (ops-- > 0) { + TenantOperationInfo info = evtGen.next(); + int row = TestOperationGroup.valueOf(info.getOperationGroupId()).ordinal(); + int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal(); + distribution[row][col]++; } + } + validateResults(numOpGroups, numTenantGroups, expectedDistribution, distribution); } - - private void validateResults(int numOpGroups, int numTenantGroups, - double[][] expectedDistribution, - double[][] actualDistribution) throws Exception { - - double variancePercent = 0.05f; // 5 percent - - // Validate that the expected and actual distribution - // is within the margin of allowed variance. - for (int r = 0; r < numOpGroups; r++) { - for (int c = 0; c < numTenantGroups; c++) { - double allowedVariance = expectedDistribution[r][c] * variancePercent; - double diff = Math.abs(expectedDistribution[r][c] - actualDistribution[r][c]); - boolean isAllowed = diff < allowedVariance; - LOGGER.debug(String.format("Actual[%d,%d] = %f, %f, %f", - r, c, actualDistribution[r][c], diff, allowedVariance)); - assertTrue(String.format("Difference is outside the allowed variance " - + "[expected = %f, actual = %f]", allowedVariance, diff), isAllowed); - } + } + + /** + * Case : where some operations have zero weight + */ + @Test + public void testAutoAssignedPMFs() throws Exception { + int numRuns = 50; + int numOperations = 100000; + double normalizedOperations = (double) (numOperations * numRuns) / 10000.0f; + int numTenantGroups = 3; + int numOpGroups = 11; + + PhoenixUtil pUtil = PhoenixUtil.create(); + Properties properties = + PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, false); + + DataModel model = readTestDataModel("/scenario/test_evt_gen2.xml"); + for (Scenario scenario : model.getScenarios()) { + LOGGER.debug(String.format("Testing %s", scenario.getName())); + LoadProfile loadProfile = scenario.getLoadProfile(); + assertEquals("tenant group size is not as expected: ", numTenantGroups, + loadProfile.getTenantDistribution().size()); + assertEquals("operation group size is not as expected: ", numOpGroups, + loadProfile.getOpDistribution().size()); + + float totalOperationWeight = 0.0f; + float autoAssignedOperationWeight = 0.0f; + float remainingOperationWeight = 0.0f; + int numAutoWeightedOperations = 0; + for (int r = 0; r < numOpGroups; r++) { + int opWeight = loadProfile.getOpDistribution().get(r).getWeight(); + if (opWeight > 0.0f) { + totalOperationWeight += opWeight; + } else { + numAutoWeightedOperations++; + } + } + remainingOperationWeight = 100.0f - totalOperationWeight; + if (numAutoWeightedOperations > 0) { + autoAssignedOperationWeight = + remainingOperationWeight / ((float) numAutoWeightedOperations); + } + LOGGER.debug(String.format("Auto [%d,%f] = %f", numAutoWeightedOperations, + remainingOperationWeight, autoAssignedOperationWeight)); + + // Calculate the expected distribution. + double[][] expectedDistribution = new double[numOpGroups][numTenantGroups]; + for (int r = 0; r < numOpGroups; r++) { + for (int c = 0; c < numTenantGroups; c++) { + float tenantWeight = loadProfile.getTenantDistribution().get(c).getWeight(); + float opWeight = loadProfile.getOpDistribution().get(r).getWeight(); + if (opWeight <= 0.0f) { + opWeight = autoAssignedOperationWeight; + } + expectedDistribution[r][c] = Math.round(normalizedOperations * (tenantWeight * opWeight)); + LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c])); } + } + + WeightedRandomLoadEventGenerator evtGen = + new WeightedRandomLoadEventGenerator(pUtil, model, scenario, properties); + + // Calculate the actual distribution. + double[][] distribution = new double[numOpGroups][numTenantGroups]; + for (int i = 0; i < numRuns; i++) { + int ops = numOperations; + loadProfile.setNumOperations(ops); + while (ops-- > 0) { + TenantOperationInfo info = evtGen.next(); + int row = TestOperationGroup2.valueOf(info.getOperationGroupId()).ordinal(); + int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal(); + distribution[row][col]++; + } + } + validateResults(numOpGroups, numTenantGroups, expectedDistribution, distribution); + } + } + + private void validateResults(int numOpGroups, int numTenantGroups, + double[][] expectedDistribution, double[][] actualDistribution) throws Exception { + + double variancePercent = 0.05f; // 5 percent + + // Validate that the expected and actual distribution + // is within the margin of allowed variance. + for (int r = 0; r < numOpGroups; r++) { + for (int c = 0; c < numTenantGroups; c++) { + double allowedVariance = expectedDistribution[r][c] * variancePercent; + double diff = Math.abs(expectedDistribution[r][c] - actualDistribution[r][c]); + boolean isAllowed = diff < allowedVariance; + LOGGER.debug(String.format("Actual[%d,%d] = %f, %f, %f", r, c, actualDistribution[r][c], + diff, allowedVariance)); + assertTrue(String.format( + "Difference is outside the allowed variance " + "[expected = %f, actual = %f]", + allowedVariance, diff), isAllowed); + } } + } } diff --git a/phoenix-pherf/src/test/resources/scenario/malicious_scenario_with_dtd.xml b/phoenix-pherf/src/test/resources/scenario/malicious_scenario_with_dtd.xml index cc453958fca..e9e3d3ca404 100644 --- a/phoenix-pherf/src/test/resources/scenario/malicious_scenario_with_dtd.xml +++ b/phoenix-pherf/src/test/resources/scenario/malicious_scenario_with_dtd.xml @@ -37,12 +37,12 @@ tightly bound to the scenario. In such cases you can't create the view through the data model flow. The value of the tableName attribute is name of the view that is dynamically created based on the DDL in the ddl attribute. Queries accessing the View will need to manually make sure Pherf was run with the -l option at - least once. + least once. --> - - + diff --git a/phoenix-pherf/src/test/resources/scenario/test_scenario.xml b/phoenix-pherf/src/test/resources/scenario/test_scenario.xml index db3caaca8d4..89331976832 100644 --- a/phoenix-pherf/src/test/resources/scenario/test_scenario.xml +++ b/phoenix-pherf/src/test/resources/scenario/test_scenario.xml @@ -211,7 +211,7 @@ LMN - + CHAR @@ -298,8 +298,8 @@ FIELD - -
    - + - + - + - + @@ -354,14 +354,14 @@ tightly bound to the scenario. In such cases you can't create the view through the data model flow. The value of the tableName attribute is name of the view that is dynamically created based on the DDL in the ddl attribute. Queries accessing the View will need to manually make sure Pherf was run with the -l option at - least once. + least once. --> - - +
    diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml index e6eb646633e..5181b79cbf4 100644 --- a/phoenix-server/pom.xml +++ b/phoenix-server/pom.xml @@ -17,9 +17,7 @@ specific language governing permissions and limitations under the License. --> - + 4.0.0 org.apache.phoenix @@ -27,9 +25,9 @@ 5.3.0-SNAPSHOT phoenix-server-${hbase.suffix} + jar Phoenix Server JAR Phoenix HBase Server Side JAR - jar true @@ -37,6 +35,253 @@ true true + + + + + org.apache.phoenix + phoenix-core-server + + + + org.slf4j + slf4j-reload4j + + + ch.qos.reload4j + reload4j + + + + org.slf4j + slf4j-log4j12 + + + log4j + log4j + + + + + org.apache.phoenix + phoenix-hbase-compat-${hbase.compat.version} + false + + + + + org.eclipse.jetty + jetty-server + ${jetty.version} + provided + + + org.eclipse.jetty + jetty-util + ${jetty.version} + provided + + + org.eclipse.jetty + jetty-util-ajax + ${jetty.version} + provided + + + org.eclipse.jetty + jetty-servlet + ${jetty.version} + provided + + + org.eclipse.jetty + jetty-webapp + ${jetty.version} + provided + + + javax.servlet + javax.servlet-api + ${javax.servlet-api.version} + provided + + + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-mapreduce-client-core + provided + + + org.apache.hadoop + hadoop-annotations + provided + + + org.apache.hadoop + hadoop-auth + provided + + + org.apache.hadoop + hadoop-yarn-api + provided + + + org.apache.hadoop + hadoop-hdfs + provided + + + org.apache.hadoop + hadoop-hdfs-client + provided + + + org.apache.hadoop + hadoop-distcp + provided + + + org.apache.hadoop + hadoop-client + provided + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + provided + + + org.apache.hadoop + hadoop-mapreduce-client-common + provided + + + + + org.apache.hbase + hbase-client + provided + + + org.apache.hbase + hbase-common + provided + + + org.apache.hbase + hbase-mapreduce + provided + + + org.apache.hbase + hbase-replication + provided + + + org.apache.hbase + hbase-endpoint + provided + + + org.apache.hbase + hbase-metrics-api + provided + + + org.apache.hbase + hbase-metrics + provided + + + org.apache.hbase + hbase-protocol + provided + + + org.apache.hbase + hbase-protocol-shaded + provided + + + org.apache.hbase + hbase-server + provided + + + org.apache.hbase + hbase-hadoop-compat + provided + + + org.apache.hbase + hbase-hadoop2-compat + provided + + + org.apache.hbase + hbase-zookeeper + provided + + + org.apache.hbase.thirdparty + hbase-shaded-netty + ${hbase-thirdparty.excludeonly.version} + provided + + + org.apache.hbase.thirdparty + hbase-shaded-miscellaneous + ${hbase-thirdparty.excludeonly.version} + provided + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + ${hbase-thirdparty.excludeonly.version} + provided + + + + org.apache.commons + commons-configuration2 + ${commons-configuration2.excludeonly.version} + provided + + + + + + org.slf4j + slf4j-api + provided + + + com.google.guava + guava + + 11.0.2 + provided + + + com.github.stephenc.findbugs + findbugs-annotations + provided + + + com.google.protobuf + protobuf-java + provided + + @@ -68,7 +313,7 @@ * -
    + org.apache.maven.plugins @@ -90,7 +335,7 @@ README* - + org.apache.hadoop:hadoop-yarn-common org/apache/hadoop/yarn/factories/package-info.class @@ -124,27 +369,20 @@ - - + + csv-bulk-load-config.properties - - ${project.basedir}/../config/csv-bulk-load-config.properties - + ${project.basedir}/../config/csv-bulk-load-config.properties - + README.md ${project.basedir}/../README.md - + LICENSE.txt ${project.basedir}/../LICENSE - + NOTICE ${project.basedir}/../NOTICE @@ -178,7 +416,7 @@ - org/apache/commons/configuration2/** + org/apache/commons/configuration2/** - - - org.apache.phoenix - phoenix-core-server - - - - org.slf4j - slf4j-reload4j - - - ch.qos.reload4j - reload4j - - - - org.slf4j - slf4j-log4j12 - - - log4j - log4j - - - - - org.apache.phoenix - phoenix-hbase-compat-${hbase.compat.version} - false - - - - - org.eclipse.jetty - jetty-server - provided - ${jetty.version} - - - org.eclipse.jetty - jetty-util - provided - ${jetty.version} - - - org.eclipse.jetty - jetty-util-ajax - provided - ${jetty.version} - - - org.eclipse.jetty - jetty-servlet - provided - ${jetty.version} - - - org.eclipse.jetty - jetty-webapp - provided - ${jetty.version} - - - javax.servlet - javax.servlet-api - provided - ${javax.servlet-api.version} - - - - - org.apache.hadoop - hadoop-common - provided - - - org.apache.hadoop - hadoop-mapreduce-client-core - provided - - - org.apache.hadoop - hadoop-annotations - provided - - - org.apache.hadoop - hadoop-auth - provided - - - org.apache.hadoop - hadoop-yarn-api - provided - - - org.apache.hadoop - hadoop-hdfs - provided - - - org.apache.hadoop - hadoop-hdfs-client - provided - - - org.apache.hadoop - hadoop-distcp - provided - - - org.apache.hadoop - hadoop-client - provided - - - org.apache.hadoop - hadoop-mapreduce-client-jobclient - provided - - - org.apache.hadoop - hadoop-mapreduce-client-common - provided - - - - - org.apache.hbase - hbase-client - provided - - - org.apache.hbase - hbase-common - provided - - - org.apache.hbase - hbase-mapreduce - provided - - - org.apache.hbase - hbase-replication - provided - - - org.apache.hbase - hbase-endpoint - provided - - - org.apache.hbase - hbase-metrics-api - provided - - - org.apache.hbase - hbase-metrics - provided - - - org.apache.hbase - hbase-protocol - provided - - - org.apache.hbase - hbase-protocol-shaded - provided - - - org.apache.hbase - hbase-server - provided - - - org.apache.hbase - hbase-hadoop-compat - provided - - - org.apache.hbase - hbase-hadoop2-compat - provided - - - org.apache.hbase - hbase-zookeeper - provided - - - org.apache.hbase.thirdparty - hbase-shaded-netty - ${hbase-thirdparty.excludeonly.version} - provided - - - org.apache.hbase.thirdparty - hbase-shaded-miscellaneous - ${hbase-thirdparty.excludeonly.version} - provided - - - org.apache.hbase.thirdparty - hbase-shaded-protobuf - ${hbase-thirdparty.excludeonly.version} - provided - - - - org.apache.commons - commons-configuration2 - ${commons-configuration2.excludeonly.version} - provided - - - - - - org.slf4j - slf4j-api - provided - - - com.google.guava - guava - - 11.0.2 - provided - - - com.github.stephenc.findbugs - findbugs-annotations - provided - - - com.google.protobuf - protobuf-java - provided - -
    diff --git a/phoenix-tracing-webapp/pom.xml b/phoenix-tracing-webapp/pom.xml index 2a7bfb9399f..3dd2cecadc8 100755 --- a/phoenix-tracing-webapp/pom.xml +++ b/phoenix-tracing-webapp/pom.xml @@ -15,193 +15,191 @@ See the License for the specific language governing permissions and limitations under the License. --> - - 4.0.0 + + 4.0.0 - - org.apache.phoenix - phoenix - 5.3.0-SNAPSHOT - - - phoenix-tracing-webapp - Phoenix - Tracing Web Application - Tracing web application will visualize the phoenix traces + + org.apache.phoenix + phoenix + 5.3.0-SNAPSHOT + - - - 3.1.0 - + phoenix-tracing-webapp + Phoenix - Tracing Web Application + Tracing web application will visualize the phoenix traces + + + 3.1.0 + - - - javax.servlet - javax.servlet-api - ${servlet.api.version} - - - org.eclipse.jetty - jetty-server - ${jetty.version} - - - org.eclipse.jetty - jetty-util - ${jetty.version} - - - org.eclipse.jetty - jetty-webapp - ${jetty.version} - - - org.slf4j - slf4j-api - - - org.apache.logging.log4j - log4j-api - - - org.apache.logging.log4j - log4j-core - - - org.apache.logging.log4j - log4j-slf4j-impl - - - org.apache.logging.log4j - log4j-1.2-api - - - org.apache.phoenix - phoenix-core - provided - - - org.apache.hadoop - hadoop-common - - - org.apache.hbase - hbase-common - - + + + javax.servlet + javax.servlet-api + ${servlet.api.version} + + + org.eclipse.jetty + jetty-server + ${jetty.version} + + + org.eclipse.jetty + jetty-util + ${jetty.version} + + + org.eclipse.jetty + jetty-webapp + ${jetty.version} + + + org.slf4j + slf4j-api + + + org.apache.logging.log4j + log4j-api + + + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-slf4j-impl + + + org.apache.logging.log4j + log4j-1.2-api + + + org.apache.phoenix + phoenix-core + provided + + + org.apache.hadoop + hadoop-common + + + org.apache.hbase + hbase-common + + - - - - org.apache.maven.plugins - maven-failsafe-plugin - - true - - - - org.apache.maven.plugins - maven-surefire-plugin + + + + org.apache.maven.plugins + maven-failsafe-plugin + + true + + + + org.apache.maven.plugins + maven-surefire-plugin + + true + + + + org.codehaus.mojo + build-helper-maven-plugin + + + maven-assembly-plugin + + + runnable + + single + + package - true + true + + + true + org.apache.phoenix.tracingwebapp.http.Main + + + ${project.artifactId}-${project.version} + + src/build/trace-server-runnable.xml + - - - org.codehaus.mojo - build-helper-maven-plugin - - - maven-assembly-plugin - - - runnable - package - - single - - - true - - - true - org.apache.phoenix.tracingwebapp.http.Main - - - ${project.artifactId}-${project.version} - - src/build/trace-server-runnable.xml - - - - - - - org.apache.rat - apache-rat-plugin - - - **/webapp/** - **/*.xml - **/README.md - - - - - - - - - jasmin-tests - - - jasmine-tests - - - - - - com.github.searls - jasmine-maven-plugin - - - - test - - - - - - 2.1.1 - - - - ${project.basedir}/src/main/webapp/js/lib/jquery.min.js - ${project.basedir}/src/main/webapp/js/lib/angular.js - ${project.basedir}/src/main/webapp/js/lib/angular-route.js - ${project.basedir}/src/main/webapp/js/lib/angular-mocks.js - ${project.basedir}/src/main/webapp/js/lib/ng-google-chart.js - ${project.basedir}/src/main/webapp/js/lib/bootstrap.js - ${project.basedir}/src/main/webapp/js/lib/ui-bootstrap-tpls.js - ${project.basedir}/src/main/webapp/js/controllers/accordion-controllers.js - ${project.basedir}/src/main/webapp/js/controllers/timeline-controllers.js - ${project.basedir}/src/main/webapp/js/controllers/search-controllers.js - ${project.basedir}/src/main/webapp/js/controllers/dependency-tree-controllers.js - ${project.basedir}/src/main/webapp/js/app.js - ${project.basedir}/src/main/webapp/js/controllers/list-controllers.js - ${project.basedir}/src/main/webapp/js/controllers/trace-count-controllers.js - ${project.basedir}/src/main/webapp/js/controllers/trace-distribution-controllers.js - - - ${basedir}/src/test/webapp/js/specs - - - - - - - + jasmin-tests + + + jasmine-tests + + + + + + com.github.searls + jasmine-maven-plugin + + + 2.1.1 + + + + ${project.basedir}/src/main/webapp/js/lib/jquery.min.js + ${project.basedir}/src/main/webapp/js/lib/angular.js + ${project.basedir}/src/main/webapp/js/lib/angular-route.js + ${project.basedir}/src/main/webapp/js/lib/angular-mocks.js + ${project.basedir}/src/main/webapp/js/lib/ng-google-chart.js + ${project.basedir}/src/main/webapp/js/lib/bootstrap.js + ${project.basedir}/src/main/webapp/js/lib/ui-bootstrap-tpls.js + ${project.basedir}/src/main/webapp/js/controllers/accordion-controllers.js + ${project.basedir}/src/main/webapp/js/controllers/timeline-controllers.js + ${project.basedir}/src/main/webapp/js/controllers/search-controllers.js + ${project.basedir}/src/main/webapp/js/controllers/dependency-tree-controllers.js + ${project.basedir}/src/main/webapp/js/app.js + ${project.basedir}/src/main/webapp/js/controllers/list-controllers.js + ${project.basedir}/src/main/webapp/js/controllers/trace-count-controllers.js + ${project.basedir}/src/main/webapp/js/controllers/trace-distribution-controllers.js + + + ${basedir}/src/test/webapp/js/specs + + + + + test + + + + + + + + + diff --git a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/ConnectionFactory.java b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/ConnectionFactory.java index b7a1df1adcf..987a210be47 100644 --- a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/ConnectionFactory.java +++ b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/ConnectionFactory.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,21 +22,19 @@ import java.sql.SQLException; /** -* -* ConnectionFactory is to handle database connection -* -*/ + * ConnectionFactory is to handle database connection + */ public class ConnectionFactory { private static Connection con; - //TODO : need to get port and host from configuration + // TODO : need to get port and host from configuration protected static String PHOENIX_HOST = "localhost"; protected static int PHOENIX_PORT = 2181; public static Connection getConnection() throws SQLException, ClassNotFoundException { if (con == null || con.isClosed()) { Class.forName("org.apache.phoenix.jdbc.PhoenixDriver"); - con = DriverManager.getConnection("jdbc:phoenix:"+PHOENIX_HOST+":"+PHOENIX_PORT); + con = DriverManager.getConnection("jdbc:phoenix:" + PHOENIX_HOST + ":" + PHOENIX_PORT); } return con; } diff --git a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/EntityFactory.java b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/EntityFactory.java index a17630de739..1e2a2ae4bd3 100644 --- a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/EntityFactory.java +++ b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/EntityFactory.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,8 +40,7 @@ public EntityFactory(Connection connection, String queryString) { this.connection = connection; } - public List> findMultiple() - throws SQLException { + public List> findMultiple() throws SQLException { ResultSet rs = null; PreparedStatement ps = null; try { @@ -59,8 +59,8 @@ public List> findMultiple() } } - protected static List> getEntitiesFromResultSet( - ResultSet resultSet) throws SQLException { + protected static List> getEntitiesFromResultSet(ResultSet resultSet) + throws SQLException { ArrayList> entities = new ArrayList<>(); while (resultSet.next()) { entities.add(getEntityFromResultSet(resultSet)); @@ -69,7 +69,7 @@ protected static List> getEntitiesFromResultSet( } protected static Map getEntityFromResultSet(ResultSet resultSet) - throws SQLException { + throws SQLException { ResultSetMetaData metaData = resultSet.getMetaData(); int columnCount = metaData.getColumnCount(); Map resultsMap = new HashMap<>(); diff --git a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java index f420feda30d..38d0f706509 100755 --- a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java +++ b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/Main.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,69 +18,63 @@ package org.apache.phoenix.tracingwebapp.http; import java.net.URL; -import java.security.ProtectionDomain; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.BasicConfigurator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.eclipse.jetty.server.Server; import org.eclipse.jetty.webapp.WebAppContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * tracing web app runner */ public final class Main extends Configured implements Tool { - protected static final Logger LOGGER = LoggerFactory.getLogger(Main.class); - public static final String PHONIX_DBSERVER_PORT_KEY = - "phoenix.dbserver.port"; - public static final int DEFAULT_DBSERVER_PORT = 2181; - public static final String PHONIX_DBSERVER_HOST_KEY = - "phoenix.dbserver.host"; - public static final String DEFAULT_DBSERVER_HOST = "localhost"; - public static final String TRACE_SERVER_HTTP_PORT_KEY = - "phoenix.traceserver.http.port"; - public static final int DEFAULT_HTTP_PORT = 8864; - public static final String TRACE_SERVER_HTTP_JETTY_HOME_KEY = - "phoenix.traceserver.http.home"; - public static final String DEFAULT_HTTP_HOME = "/"; - public static final String DEFAULT_WEBAPP_DIR_LOCATION = "src/main/webapp"; + protected static final Logger LOGGER = LoggerFactory.getLogger(Main.class); + public static final String PHONIX_DBSERVER_PORT_KEY = "phoenix.dbserver.port"; + public static final int DEFAULT_DBSERVER_PORT = 2181; + public static final String PHONIX_DBSERVER_HOST_KEY = "phoenix.dbserver.host"; + public static final String DEFAULT_DBSERVER_HOST = "localhost"; + public static final String TRACE_SERVER_HTTP_PORT_KEY = "phoenix.traceserver.http.port"; + public static final int DEFAULT_HTTP_PORT = 8864; + public static final String TRACE_SERVER_HTTP_JETTY_HOME_KEY = "phoenix.traceserver.http.home"; + public static final String DEFAULT_HTTP_HOME = "/"; + public static final String DEFAULT_WEBAPP_DIR_LOCATION = "src/main/webapp"; - public static void main(String[] args) throws Exception { - int ret = ToolRunner.run(HBaseConfiguration.create(), new Main(), args); - System.exit(ret); - } + public static void main(String[] args) throws Exception { + int ret = ToolRunner.run(HBaseConfiguration.create(), new Main(), args); + System.exit(ret); + } - @Override - public int run(String[] arg0) throws Exception { - // logProcessInfo(getConf()); - final int port = getConf().getInt(TRACE_SERVER_HTTP_PORT_KEY, - DEFAULT_HTTP_PORT); - BasicConfigurator.configure(); - final String home = getConf().get(TRACE_SERVER_HTTP_JETTY_HOME_KEY, - DEFAULT_HTTP_HOME); - //setting up the embedded server - Server server = new Server(port); - WebAppContext root = new WebAppContext(); + @Override + public int run(String[] arg0) throws Exception { + // logProcessInfo(getConf()); + final int port = getConf().getInt(TRACE_SERVER_HTTP_PORT_KEY, DEFAULT_HTTP_PORT); + BasicConfigurator.configure(); + final String home = getConf().get(TRACE_SERVER_HTTP_JETTY_HOME_KEY, DEFAULT_HTTP_HOME); + // setting up the embedded server + Server server = new Server(port); + WebAppContext root = new WebAppContext(); - URL webAppDir = Thread.currentThread().getContextClassLoader().getResource(DEFAULT_WEBAPP_DIR_LOCATION); - if (webAppDir == null) { - throw new RuntimeException(String.format("No %s directory was found into the JAR file", DEFAULT_WEBAPP_DIR_LOCATION)); - } + URL webAppDir = + Thread.currentThread().getContextClassLoader().getResource(DEFAULT_WEBAPP_DIR_LOCATION); + if (webAppDir == null) { + throw new RuntimeException( + String.format("No %s directory was found into the JAR file", DEFAULT_WEBAPP_DIR_LOCATION)); + } - root.setContextPath(home); - root.setDescriptor(DEFAULT_WEBAPP_DIR_LOCATION + "/WEB-INF/web.xml"); - root.setResourceBase(webAppDir.toURI().toString()); - root.setParentLoaderPriority(true); - server.setHandler(root); + root.setContextPath(home); + root.setDescriptor(DEFAULT_WEBAPP_DIR_LOCATION + "/WEB-INF/web.xml"); + root.setResourceBase(webAppDir.toURI().toString()); + root.setParentLoaderPriority(true); + server.setHandler(root); - server.start(); - server.join(); - return 0; - } + server.start(); + server.join(); + return 0; + } } diff --git a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/TraceServlet.java b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/TraceServlet.java index db31d83a28a..6802bcbac42 100755 --- a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/TraceServlet.java +++ b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/TraceServlet.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +19,10 @@ import java.io.IOException; import java.io.PrintWriter; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; +import java.util.Map; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; @@ -30,15 +35,8 @@ import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.util.JacksonUtil; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.List; -import java.util.Map; - /** - * * Server to show trace information - * */ public class TraceServlet extends HttpServlet { @@ -54,15 +52,14 @@ public class TraceServlet extends HttpServlet { @Override public void init() { Configuration conf = HBaseConfiguration.create(); - TRACING_TABLE = - conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB, - QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME); + TRACING_TABLE = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB, + QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME); } protected void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { + throws ServletException, IOException { - //reading url params + // reading url params String action = request.getParameter("action"); String limit = request.getParameter("limit"); String traceid = request.getParameter("traceid"); @@ -79,7 +76,7 @@ protected void doGet(HttpServletRequest request, HttpServletResponse response) } else { jsonObject = "{ \"Server\": \"Phoenix Tracing Web App\", \"API version\": 0.1 }"; } - //response send as json + // response send as json response.setContentType("application/json"); String output = jsonObject; PrintWriter out = response.getWriter(); @@ -87,34 +84,35 @@ protected void doGet(HttpServletRequest request, HttpServletResponse response) out.flush(); } - //get all trace results with limit count + // get all trace results with limit count protected String getAll(String limit) { String json = null; - if(limit == null) { + if (limit == null) { limit = DEFAULT_LIMIT; } - try{ - Long.parseLong(limit); + try { + Long.parseLong(limit); } catch (NumberFormatException e) { - throw new RuntimeException("The LIMIT passed to the query is not a number.", e); + throw new RuntimeException("The LIMIT passed to the query is not a number.", e); } - String sqlQuery = "SELECT * FROM " + TRACING_TABLE + " LIMIT "+limit; + String sqlQuery = "SELECT * FROM " + TRACING_TABLE + " LIMIT " + limit; json = getResults(sqlQuery); return getJson(json); } - //get count on traces can pick on param to count + // get count on traces can pick on param to count protected String getCount(String countby) { String json = null; - if(countby == null) { + if (countby == null) { countby = DEFAULT_COUNTBY; } - String sqlQuery = "SELECT "+countby+", COUNT(*) AS count FROM " + TRACING_TABLE + " GROUP BY "+countby+" HAVING COUNT(*) > 1 "; + String sqlQuery = "SELECT " + countby + ", COUNT(*) AS count FROM " + TRACING_TABLE + + " GROUP BY " + countby + " HAVING COUNT(*) > 1 "; json = getResults(sqlQuery); return json; } - //search the trace over parent id or trace id + // search the trace over parent id or trace id protected String searchTrace(String parentId, String traceId, String logic) { String json = null; @@ -131,10 +129,12 @@ protected String searchTrace(String parentId, String traceId, String logic) { throw new RuntimeException("The passed parentId/traceId is not a number.", e); } if (logic != null && !logic.equals(LOGIC_AND) && !logic.equals(LOGIC_OR)) { - throw new RuntimeException("Wrong logical operator passed to the query. Only " + LOGIC_AND + "," + LOGIC_OR + " are allowed."); + throw new RuntimeException("Wrong logical operator passed to the query. Only " + LOGIC_AND + + "," + LOGIC_OR + " are allowed."); } if (parentId != null && traceId != null) { - query = "SELECT * FROM " + TRACING_TABLE + " WHERE parent_id=" + parentId + " " + logic + " trace_id=" + traceId; + query = "SELECT * FROM " + TRACING_TABLE + " WHERE parent_id=" + parentId + " " + logic + + " trace_id=" + traceId; } else if (parentId != null && traceId == null) { query = "SELECT * FROM " + TRACING_TABLE + " WHERE parent_id=" + parentId; } else if (parentId == null && traceId != null) { @@ -144,16 +144,15 @@ protected String searchTrace(String parentId, String traceId, String logic) { return getJson(json); } - //return json string + // return json string protected String getJson(String json) { - String output = json.toString().replace("_id\":", "_id\":\"") - .replace(",\"hostname", "\",\"hostname") - .replace(",\"parent", "\",\"parent") - .replace(",\"end", "\",\"end"); + String output = + json.toString().replace("_id\":", "_id\":\"").replace(",\"hostname", "\",\"hostname") + .replace(",\"parent", "\",\"parent").replace(",\"end", "\",\"end"); return output; } - //get results with passing sql query + // get results with passing sql query protected String getResults(String sqlQuery) { String json = null; if (sqlQuery == null) { @@ -162,8 +161,7 @@ protected String getResults(String sqlQuery) { try { con = ConnectionFactory.getConnection(); EntityFactory nutrientEntityFactory = new EntityFactory(con, sqlQuery); - List> nutrients = nutrientEntityFactory - .findMultiple(); + List> nutrients = nutrientEntityFactory.findMultiple(); json = JacksonUtil.getObjectWriter().writeValueAsString(nutrients); } catch (Exception e) { json = "{error:true,msg:'Server Error:" + e.getMessage() + "'}"; diff --git a/pom.xml b/pom.xml index 36baee073fb..e0675df31c2 100644 --- a/pom.xml +++ b/pom.xml @@ -15,9 +15,14 @@ See the License for the specific language governing permissions and limitations under the License. --> - + 4.0.0 + + + org.apache + apache + 30 + org.apache.phoenix phoenix 5.3.0-SNAPSHOT @@ -25,6 +30,11 @@ Apache Phoenix A SQL layer over HBase + + Apache Software Foundation + https://www.apache.org + + The Apache Software License, Version 2.0 @@ -34,11 +44,6 @@ - - Apache Software Foundation - https://www.apache.org - - phoenix-hbase-compat-2.6.0 phoenix-hbase-compat-2.5.4 @@ -52,23 +57,10 @@ - - - apache release - https://repository.apache.org/content/repositories/releases/ - - - - - org.apache - apache - 30 - - scm:git:https://git-wip-us.apache.org/repos/asf/phoenix.git - https://git-wip-us.apache.org/repos/asf/phoenix.git scm:git:https://git-wip-us.apache.org/repos/asf/phoenix.git + https://git-wip-us.apache.org/repos/asf/phoenix.git @@ -179,7 +171,7 @@ false false - + 2200m @@ -191,13 +183,10 @@ -Dorg.apache.hbase.thirdparty.io.netty.leakDetection.level=advanced -Dio.netty.eventLoopThreads=3 -Duser.timezone="America/Los_Angeles" -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/ - "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}" - - - -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC + "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}" + -XX:NewRatio=4 -XX:SurvivorRatio=8 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+DisableExplicitGC -XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled - -XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 - + -XX:+CMSScavengeBeforeRemark -XX:CMSInitiatingOccupancyFraction=68 -Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true --add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED @@ -209,11 +198,9 @@ --add-opens java.base/java.util.concurrent=ALL-UNNAMED --add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/sun.security.krb5=ALL-UNNAMED - - ${phoenix-surefire.jdk8.tuning.flags} - + ${phoenix-surefire.jdk8.tuning.flags} --add-opens java.base/jdk.internal.util.random=ALL-UNNAMED - + -Djava.security.manager=allow @@ -238,726 +225,147 @@ org.apache.hadoop.hbase.shaded - - - - kr.motd.maven - os-maven-plugin - ${os.maven.version} - - - - - - - org.apache.maven.plugins - maven-project-info-reports-plugin - - - org.apache.maven.plugins - maven-compiler-plugin - - - org.antlr - antlr3-maven-plugin - ${maven-antlr-eclipse-plugin.version} - - - com.github.searls - jasmine-maven-plugin - ${jasmine-maven-plugin.version} - - - com.github.spotbugs - spotbugs-maven-plugin - ${spotbugs-maven-plugin.version} - - - com.github.spotbugs - spotbugs - ${spotbugs.version} - - - - Max - 2048 - ${top.dir}/src/main/config/spotbugs/spotbugs-exclude.xml - - - - - org.eclipse.m2e - lifecycle-mapping - ${lifecycle-mapping.version} - - - - - - org.antlr - antlr3-maven-plugin - [3.5,) - - antlr - - - - - - - - - - - - org.apache.maven.plugins - maven-install-plugin - - - org.apache.maven.plugins - maven-eclipse-plugin - ${maven-eclipse-plugin.version} - - - maven-assembly-plugin - - - org.apache.rat - apache-rat-plugin - - - - - org.codehaus.mojo - build-helper-maven-plugin - ${maven-build-helper-plugin.version} - - - add-test-source - validate - - add-test-source - - - - ${basedir}/src/it/java - - - - - add-test-resource - validate - - add-test-resource - - - - - ${basedir}/src/it/resources - - - - - - - - org.apache.maven.plugins - maven-failsafe-plugin - - - UTF-8 - ${numForkedIT} - alphabetical - ${test.output.tofile} - 180 - exit - ${basedir}/src/it/java - false - - false - - - ${test.tmp.dir} - - - - - ParallelStatsEnabledTest - - ${skipParallelStatsEnabledTests} - true - org.apache.phoenix.end2end.ParallelStatsEnabledTest - - - integration-test - verify - - - - ParallelStatsDisabledTest - - true - ${skipParallelStatsDisabledTests} - org.apache.phoenix.end2end.ParallelStatsDisabledTest - - - integration-test - verify - - - - NeedTheirOwnClusterTests - - false - ${skipNeedsOwnMiniClusterTests} - org.apache.phoenix.end2end.NeedsOwnMiniClusterTest - - - integration-test - verify - - - - - - maven-dependency-plugin - 3.1.1 - - - - org.apache.omid:* - - - org.apache.hbase:hbase-testing-util - - - org.apache.logging.log4j:log4j-api - - - org.apache.logging.log4j:log4j-core - - - org.apache.logging.log4j:log4j-slf4j-impl - - - org.apache.logging.log4j:log4j-1.2-api - - - - - - dnsjava:dnsjava - - - - org.apache.hbase.thirdparty:* - - - - org.apache.commons:commons-configuration2 - - - - - - create-mrapp-generated-classpath - generate-test-resources - - build-classpath - - - ${project.build.directory}/classes/mrapp-generated-classpath - - - - - enforce-dependencies - - analyze-only - - - true - - - - - - org.apache.maven.plugins - maven-shade-plugin - ${maven-shade-plugin.version} - - - - org.apache.felix - maven-bundle-plugin - ${maven-bundle-plugin.version} - - - org.codehaus.mojo - exec-maven-plugin - ${exec-maven-plugin.version} - - - org.xolstice.maven.plugins - protobuf-maven-plugin - ${protobuf.plugin.version} - - - org.apache.maven.plugins - maven-enforcer-plugin - - - de.skuzzle.enforcer - restrict-imports-enforcer-rule - ${restrict-imports.enforcer.version} - - - - - banned-illegal-imports - process-sources - - enforce - - - - - true - Use SLF4j for logging - - org.apache.commons.logging.Log - org.apache.commons.logging.LogFactory - - - - true - Use shaded version in phoenix-thirdparty - - com.google.common.** - - - - true - Use shaded version in phoenix-thirdparty - - org.apache.commons.cli.** - org.apache.hbase.thirdparty.org.apache.commons.cli.** - - - - true - Use commons lang 3 - - org.apache.commons.lang.** - - - - true - Use edu.umd.cs.findbugs.annotations - - com.sun.istack.** - - - - - - - - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - ${maven-checkstyle-plugin.version} - - ${top.dir}/src/main/config/checkstyle/checker.xml - ${top.dir}/src/main/config/checkstyle/suppressions.xml - true - ${top.dir}/src/main/config/checkstyle/header.txt - false - false - - - - validate - validate - - true - - - check - - - - - - org.apache.maven.plugins - maven-source-plugin - - - attach-sources - prepare-package - - jar-no-fork - - - - - - org.apache.maven.plugins - maven-javadoc-plugin - - true - - https://hbase.apache.org/apidocs/ - - - - - attach-javadocs - - - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${numForkedUT} - true - ${test.output.tofile} - exit - false - - - ${test.tmp.dir} - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - prepare-package - - - test-jar - - - - - - org.apache.rat - apache-rat-plugin - - - - CHANGES - - dev/create-release/README.txt - dev/create-release/vote.tmpl - - dev/phoenix.importorder - - dev/release_files/LICENSE - dev/release_files/NOTICE - - docs/*.csv - examples/*.csv - - examples/*.sql - - **/patchprocess/** - - bin/argparse-1.4.0/argparse.py - - dev/work/** - dev/artifacts/** - - - - - - org.apache.felix - maven-bundle-plugin - true - true - - - com.diffplug.spotless - spotless-maven-plugin - ${spotless.version} - - - - - **/generated/* - **/package-info.java - - - - Remove unhelpful javadoc stubs - (?m)^ *\* *@(?:param|throws|return) *\w* *\n - - - - - Purge single returns tag multi line - (?m)^ */\*\*\n *\* *@return *(.*) *\n *\*/$ - /** Returns $1 */ - - - Purge single returns tag single line - ^ */\*\* *@return *(.*) *\*/$ - /** Returns $1 */ - - - - ${session.executionRootDirectory}/dev/PhoenixCodeTemplate.xml - - - ${session.executionRootDirectory}/dev/phoenix.importorder - - - - - - - - false - - - - - - - - **/*.xml - **/*.sh - **/*.py - **/Jenkinsfile* - **/Dockerfile* - **/*.md - *.md - **/*.txt - *.txt - - - **/target/** - **/dependency-reduced-pom.xml - - - - - - - - - src/main/java/**/*.java - src/test/java/**/*.java - - - **/generated/* - **/package-info.java - - - ${session.executionRootDirectory}/src/main/config/checkstyle/header.txt - package - - - - - - false - - - - - - - - - - - org.apache.phoenix - phoenix-core-client - ${project.version} - - - org.apache.phoenix - phoenix-core-server - ${project.version} - - - org.apache.phoenix - phoenix-core - ${project.version} - - - org.apache.phoenix - phoenix-client-embedded-hbase-2.4 - ${project.version} - - - org.apache.phoenix - phoenix-client-embedded-hbase-2.5.0 - ${project.version} - - - org.apache.phoenix - phoenix-client-embedded-hbase-2.5 - ${project.version} - - - org.apache.phoenix - phoenix-client-embedded-hbase-2.6 - ${project.version} - - - org.apache.phoenix - phoenix-client-lite-hbase-2.4 - ${project.version} - - - org.apache.phoenix - phoenix-client-lite-hbase-2.5.0 - ${project.version} - - - org.apache.phoenix - phoenix-client-lite-hbase-2.5 - ${project.version} - - - org.apache.phoenix - phoenix-client-lite-hbase-2.6 - ${project.version} - - - org.apache.phoenix - phoenix-server-hbase-2.4 - ${project.version} - - - org.apache.phoenix - phoenix-server-hbase-2.5.0 - ${project.version} - - - org.apache.phoenix - phoenix-server-hbase-2.5 - ${project.version} - - - org.apache.phoenix - phoenix-server-hbase-2.6 - ${project.version} - - - org.apache.phoenix - phoenix-mapreduce-byo-shaded-hbase-hbase-2.4 - ${project.version} - - - org.apache.phoenix - phoenix-mapreduce-byo-shaded-hbase-hbase-2.5.0 - ${project.version} - - - org.apache.phoenix - phoenix-mapreduce-byo-shaded-hbase-hbase-2.5 - ${project.version} - - - org.apache.phoenix - phoenix-mapreduce-byo-shaded-hbase-hbase-2.6 - ${project.version} - - - org.apache.phoenix - phoenix-pherf - ${project.version} - - - org.apache.phoenix - phoenix-tracing-webapp - ${project.version} - - - org.apache.phoenix.thirdparty - phoenix-shaded-guava - ${phoenix.thirdparty.version} - - - org.apache.phoenix - phoenix-hbase-compat-2.4.1 - ${project.version} - - - org.apache.phoenix - phoenix-hbase-compat-2.5.0 - ${project.version} - - - org.apache.phoenix - phoenix-hbase-compat-2.5.4 - ${project.version} - - - org.apache.phoenix - phoenix-hbase-compat-2.6.0 - ${project.version} - - - - org.apache.phoenix - phoenix-core - ${project.version} - test-jar - test - + + + + + org.apache.phoenix + phoenix-core-client + ${project.version} + + + org.apache.phoenix + phoenix-core-server + ${project.version} + + + org.apache.phoenix + phoenix-core + ${project.version} + + + org.apache.phoenix + phoenix-client-embedded-hbase-2.4 + ${project.version} + + + org.apache.phoenix + phoenix-client-embedded-hbase-2.5.0 + ${project.version} + + + org.apache.phoenix + phoenix-client-embedded-hbase-2.5 + ${project.version} + + + org.apache.phoenix + phoenix-client-embedded-hbase-2.6 + ${project.version} + + + org.apache.phoenix + phoenix-client-lite-hbase-2.4 + ${project.version} + + + org.apache.phoenix + phoenix-client-lite-hbase-2.5.0 + ${project.version} + + + org.apache.phoenix + phoenix-client-lite-hbase-2.5 + ${project.version} + + + org.apache.phoenix + phoenix-client-lite-hbase-2.6 + ${project.version} + + + org.apache.phoenix + phoenix-server-hbase-2.4 + ${project.version} + + + org.apache.phoenix + phoenix-server-hbase-2.5.0 + ${project.version} + + + org.apache.phoenix + phoenix-server-hbase-2.5 + ${project.version} + + + org.apache.phoenix + phoenix-server-hbase-2.6 + ${project.version} + + + org.apache.phoenix + phoenix-mapreduce-byo-shaded-hbase-hbase-2.4 + ${project.version} + + + org.apache.phoenix + phoenix-mapreduce-byo-shaded-hbase-hbase-2.5.0 + ${project.version} + + + org.apache.phoenix + phoenix-mapreduce-byo-shaded-hbase-hbase-2.5 + ${project.version} + + + org.apache.phoenix + phoenix-mapreduce-byo-shaded-hbase-hbase-2.6 + ${project.version} + + + org.apache.phoenix + phoenix-pherf + ${project.version} + + + org.apache.phoenix + phoenix-tracing-webapp + ${project.version} + + + org.apache.phoenix.thirdparty + phoenix-shaded-guava + ${phoenix.thirdparty.version} + + + org.apache.phoenix + phoenix-hbase-compat-2.4.1 + ${project.version} + + + org.apache.phoenix + phoenix-hbase-compat-2.5.0 + ${project.version} + + + org.apache.phoenix + phoenix-hbase-compat-2.5.4 + ${project.version} + + + org.apache.phoenix + phoenix-hbase-compat-2.6.0 + ${project.version} + + + + org.apache.phoenix + phoenix-core + ${project.version} + test-jar + test + @@ -995,8 +403,8 @@ org.apache.hadoop hadoop-common - test-jar ${hadoop.version} + test-jar org.apache.htrace @@ -1119,6 +527,7 @@ org.apache.hadoop hadoop-auth + ${hadoop.version} net.minidev @@ -1149,7 +558,6 @@ slf4j-reload4j - ${hadoop.version} org.apache.hadoop @@ -1176,6 +584,7 @@ hadoop-minicluster ${hadoop.version} test + true log4j @@ -1194,7 +603,6 @@ slf4j-reload4j - true org.apache.hadoop @@ -1223,9 +631,9 @@ org.apache.hadoop hadoop-hdfs - test-jar - tests ${hadoop.version} + tests + test-jar test @@ -1508,10 +916,10 @@ omid-hbase-client ${omid.version} - - org.testng - testng - + + org.testng + testng + @@ -1765,8 +1173,8 @@ com.fasterxml.jackson jackson-bom ${jackson-bom.version} - import pom + import - com.fasterxml.woodstox - woodstox-core - 5.4.0 + com.fasterxml.woodstox + woodstox-core + 5.4.0 org.apache.avro @@ -1906,50 +1314,668 @@ import - - - ch.qos.reload4j - reload4j - ${reload4j.version} - - - org.slf4j - slf4j-reload4j - ${slf4j.version} - - - org.apache.logging.log4j - log4j-api - ${log4j2.version} - - - org.apache.logging.log4j - log4j-core - ${log4j2.version} - - - org.apache.logging.log4j - log4j-slf4j-impl - ${log4j2.version} - - - org.apache.logging.log4j - log4j-1.2-api - ${log4j2.version} - - - sqlline - sqlline - ${sqlline.version} - runtime - - - org.glassfish - javax.el - ${glassfish.el.version} - - - + + + ch.qos.reload4j + reload4j + ${reload4j.version} + + + org.slf4j + slf4j-reload4j + ${slf4j.version} + + + org.apache.logging.log4j + log4j-api + ${log4j2.version} + + + org.apache.logging.log4j + log4j-core + ${log4j2.version} + + + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} + + + org.apache.logging.log4j + log4j-1.2-api + ${log4j2.version} + + + sqlline + sqlline + ${sqlline.version} + runtime + + + org.glassfish + javax.el + ${glassfish.el.version} + + + + + + + apache release + https://repository.apache.org/content/repositories/releases/ + + + + + + + + + org.apache.maven.plugins + maven-project-info-reports-plugin + + + org.apache.maven.plugins + maven-compiler-plugin + + + org.antlr + antlr3-maven-plugin + ${maven-antlr-eclipse-plugin.version} + + + com.github.searls + jasmine-maven-plugin + ${jasmine-maven-plugin.version} + + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + Max + 2048 + ${top.dir}/src/main/config/spotbugs/spotbugs-exclude.xml + + + + com.github.spotbugs + spotbugs + ${spotbugs.version} + + + + + + org.eclipse.m2e + lifecycle-mapping + ${lifecycle-mapping.version} + + + + + + org.antlr + antlr3-maven-plugin + [3.5,) + + antlr + + + + + + + + + + + + org.apache.maven.plugins + maven-install-plugin + + + org.apache.maven.plugins + maven-eclipse-plugin + ${maven-eclipse-plugin.version} + + + maven-assembly-plugin + + + org.apache.rat + apache-rat-plugin + + + + + org.codehaus.mojo + build-helper-maven-plugin + ${maven-build-helper-plugin.version} + + + add-test-source + + add-test-source + + validate + + + ${basedir}/src/it/java + + + + + add-test-resource + + add-test-resource + + validate + + + + ${basedir}/src/it/resources + + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + UTF-8 + ${numForkedIT} + alphabetical + ${test.output.tofile} + 180 + exit + ${basedir}/src/it/java + false + + false + + + ${test.tmp.dir} + + + + + ParallelStatsEnabledTest + + integration-test + verify + + + ${skipParallelStatsEnabledTests} + true + org.apache.phoenix.end2end.ParallelStatsEnabledTest + + + + ParallelStatsDisabledTest + + integration-test + verify + + + true + ${skipParallelStatsDisabledTests} + org.apache.phoenix.end2end.ParallelStatsDisabledTest + + + + NeedTheirOwnClusterTests + + integration-test + verify + + + false + ${skipNeedsOwnMiniClusterTests} + org.apache.phoenix.end2end.NeedsOwnMiniClusterTest + + + + + + maven-dependency-plugin + 3.1.1 + + + org.apache.omid:* + org.apache.hbase:hbase-testing-util + org.apache.logging.log4j:log4j-api + org.apache.logging.log4j:log4j-core + org.apache.logging.log4j:log4j-slf4j-impl + org.apache.logging.log4j:log4j-1.2-api + + + + + dnsjava:dnsjava + + + + org.apache.hbase.thirdparty:* + + + + org.apache.commons:commons-configuration2 + + + + + + create-mrapp-generated-classpath + + build-classpath + + generate-test-resources + + ${project.build.directory}/classes/mrapp-generated-classpath + + + + enforce-dependencies + + analyze-only + + + true + + + + + + org.apache.maven.plugins + maven-shade-plugin + ${maven-shade-plugin.version} + + + + org.apache.felix + maven-bundle-plugin + ${maven-bundle-plugin.version} + + + org.codehaus.mojo + exec-maven-plugin + ${exec-maven-plugin.version} + + + org.xolstice.maven.plugins + protobuf-maven-plugin + ${protobuf.plugin.version} + + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + + enforce + + process-sources + + + + true + Use SLF4j for logging + + org.apache.commons.logging.Log + org.apache.commons.logging.LogFactory + + + + true + Use shaded version in phoenix-thirdparty + + com.google.common.** + + + + true + Use shaded version in phoenix-thirdparty + + org.apache.commons.cli.** + org.apache.hbase.thirdparty.org.apache.commons.cli.** + + + + true + Use commons lang 3 + + org.apache.commons.lang.** + + + + true + Use edu.umd.cs.findbugs.annotations + + com.sun.istack.** + + + + + + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + ${maven-checkstyle-plugin.version} + + ${top.dir}/src/main/config/checkstyle/checker.xml + ${top.dir}/src/main/config/checkstyle/suppressions.xml + true + ${top.dir}/src/main/config/checkstyle/header.txt + + + false + + + + false + + + + + validate + + check + + validate + + true + + + + + + org.apache.maven.plugins + maven-source-plugin + + + attach-sources + + jar-no-fork + + prepare-package + + + + + org.apache.maven.plugins + maven-javadoc-plugin + + true + + https://hbase.apache.org/apidocs/ + + + + + attach-javadocs + + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + ${numForkedUT} + true + ${test.output.tofile} + exit + false + + + ${test.tmp.dir} + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + prepare-package + + + + + org.apache.rat + apache-rat-plugin + + + + CHANGES + + dev/create-release/README.txt + dev/create-release/vote.tmpl + + dev/phoenix.importorder + + dev/release_files/LICENSE + dev/release_files/NOTICE + + docs/*.csv + examples/*.csv + + examples/*.sql + + **/patchprocess/** + + bin/argparse-1.4.0/argparse.py + + dev/work/** + dev/artifacts/** + + + + + + org.apache.felix + maven-bundle-plugin + true + true + + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.version} + + + + + **/generated/* + **/package-info.java + + + + Remove unhelpful javadoc stubs + (?m)^ *\* *@(?:param|throws|return) *\w* *\n + + + + + Purge single returns tag multi line + (?m)^ */\*\*\n *\* *@return *(.*) *\n *\*/$ + /** Returns $1 */ + + + Purge single returns tag single line + ^ */\*\* *@return *(.*) *\*/$ + /** Returns $1 */ + + + + ${session.executionRootDirectory}/dev/PhoenixCodeTemplate.xml + + + ${session.executionRootDirectory}/dev/phoenix.importorder + + + + + + + + false + + + + + + + + **/*.xml + **/*.sh + **/*.py + **/Jenkinsfile* + **/Dockerfile* + **/*.md + *.md + **/*.txt + *.txt + + + **/target/** + **/dependency-reduced-pom.xml + + + + + + + + + src/main/java/**/*.java + src/test/java/**/*.java + + + **/generated/* + **/package-info.java + + + ${session.executionRootDirectory}/src/main/config/checkstyle/header.txt + package + + + + + + false + + + + + + + kr.motd.maven + os-maven-plugin + ${os.maven.version} + + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + + org.owasp + dependency-check-maven + ${maven-owasp-plugin.version} + + true + true + true + + + + + aggregate + + + + + + org.jacoco + jacoco-maven-plugin + + + + report + + + + + + org.apache.rat + apache-rat-plugin + + + @@ -1978,10 +2004,10 @@ apache-rat-plugin - package check + package @@ -2100,7 +2126,7 @@ codecoverage - !skip.code-coverage + !skip.code-coverage @@ -2121,17 +2147,17 @@ report - post-integration-test report + post-integration-test check - verify - check + check + verify @@ -2168,9 +2194,7 @@ . ${project.build.directory}/surefire-reports - - ${sonar.projectBaseDir}/phoenix-assembly/target/code-coverage/jacoco-reports/jacoco.xml - + ${sonar.projectBaseDir}/phoenix-assembly/target/code-coverage/jacoco-reports/jacoco.xml ${project.basedir} ${main.basedir}/phoenix-assembly/target/code-coverage @@ -2186,20 +2210,20 @@ prepare-agent - initialize prepare-agent + initialize jacocoArgLine report - post-integration-test report + post-integration-test @@ -2215,7 +2239,7 @@ spotbugs-site - !spotbugs.site + !spotbugs.site @@ -2305,44 +2329,4 @@ - - - - com.github.spotbugs - spotbugs-maven-plugin - - - org.owasp - dependency-check-maven - ${maven-owasp-plugin.version} - - true - true - true - - - - - aggregate - - - - - - org.jacoco - jacoco-maven-plugin - - - - report - - - - - - org.apache.rat - apache-rat-plugin - - - diff --git a/src/main/config/checkstyle/header.txt b/src/main/config/checkstyle/header.txt index d5519133edc..2379ddac12c 100644 --- a/src/main/config/checkstyle/header.txt +++ b/src/main/config/checkstyle/header.txt @@ -14,4 +14,4 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - */ \ No newline at end of file + */ diff --git a/src/main/config/spotbugs/spotbugs-exclude.xml b/src/main/config/spotbugs/spotbugs-exclude.xml index 76104b1a97b..9692bb7d7c2 100644 --- a/src/main/config/spotbugs/spotbugs-exclude.xml +++ b/src/main/config/spotbugs/spotbugs-exclude.xml @@ -27,4 +27,4 @@ limitations under the License. - \ No newline at end of file +